summaryrefslogtreecommitdiff
path: root/lib/test_sort.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/test_sort.c')
0 files changed, 0 insertions, 0 deletions
s='sub'>Russell King's ARM Linux kernel treeRussell King
summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig14
-rw-r--r--drivers/Makefile4
-rw-r--r--drivers/accel/Kconfig2
-rw-r--r--drivers/accel/Makefile2
-rw-r--r--drivers/accel/amdxdna/Kconfig18
-rw-r--r--drivers/accel/amdxdna/Makefile24
-rw-r--r--drivers/accel/amdxdna/TODO2
-rw-r--r--drivers/accel/amdxdna/aie2_ctx.c944
-rw-r--r--drivers/accel/amdxdna/aie2_error.c358
-rw-r--r--drivers/accel/amdxdna/aie2_message.c779
-rw-r--r--drivers/accel/amdxdna/aie2_msg_priv.h368
-rw-r--r--drivers/accel/amdxdna/aie2_pci.c1004
-rw-r--r--drivers/accel/amdxdna/aie2_pci.h296
-rw-r--r--drivers/accel/amdxdna/aie2_pm.c108
-rw-r--r--drivers/accel/amdxdna/aie2_psp.c146
-rw-r--r--drivers/accel/amdxdna/aie2_smu.c136
-rw-r--r--drivers/accel/amdxdna/aie2_solver.c380
-rw-r--r--drivers/accel/amdxdna/aie2_solver.h155
-rw-r--r--drivers/accel/amdxdna/amdxdna_ctx.c542
-rw-r--r--drivers/accel/amdxdna/amdxdna_ctx.h159
-rw-r--r--drivers/accel/amdxdna/amdxdna_gem.c975
-rw-r--r--drivers/accel/amdxdna/amdxdna_gem.h84
-rw-r--r--drivers/accel/amdxdna/amdxdna_mailbox.c571
-rw-r--r--drivers/accel/amdxdna/amdxdna_mailbox.h124
-rw-r--r--drivers/accel/amdxdna/amdxdna_mailbox_helper.c61
-rw-r--r--drivers/accel/amdxdna/amdxdna_mailbox_helper.h42
-rw-r--r--drivers/accel/amdxdna/amdxdna_pci_drv.c405
-rw-r--r--drivers/accel/amdxdna/amdxdna_pci_drv.h146
-rw-r--r--drivers/accel/amdxdna/amdxdna_sysfs.c67
-rw-r--r--drivers/accel/amdxdna/amdxdna_ubuf.c232
-rw-r--r--drivers/accel/amdxdna/amdxdna_ubuf.h19
-rw-r--r--drivers/accel/amdxdna/npu1_regs.c114
-rw-r--r--drivers/accel/amdxdna/npu2_regs.c113
-rw-r--r--drivers/accel/amdxdna/npu4_regs.c134
-rw-r--r--drivers/accel/amdxdna/npu5_regs.c113
-rw-r--r--drivers/accel/amdxdna/npu6_regs.c114
-rw-r--r--drivers/accel/drm_accel.c16
-rw-r--r--drivers/accel/habanalabs/Kconfig25
-rw-r--r--drivers/accel/habanalabs/common/Makefile5
-rw-r--r--drivers/accel/habanalabs/common/command_submission.c2
-rw-r--r--drivers/accel/habanalabs/common/context.c3
-rw-r--r--drivers/accel/habanalabs/common/debugfs.c326
-rw-r--r--drivers/accel/habanalabs/common/device.c52
-rw-r--r--drivers/accel/habanalabs/common/habanalabs.h56
-rw-r--r--drivers/accel/habanalabs/common/habanalabs_drv.c6
-rw-r--r--drivers/accel/habanalabs/common/habanalabs_ioctl.c19
-rw-r--r--drivers/accel/habanalabs/common/hldio.c437
-rw-r--r--drivers/accel/habanalabs/common/hldio.h146
-rw-r--r--drivers/accel/habanalabs/common/memory.c34
-rw-r--r--drivers/accel/habanalabs/common/memory_mgr.c5
-rw-r--r--drivers/accel/habanalabs/common/sysfs.c17
-rw-r--r--drivers/accel/habanalabs/gaudi/gaudi.c19
-rw-r--r--drivers/accel/habanalabs/gaudi2/gaudi2.c388
-rw-r--r--drivers/accel/habanalabs/gaudi2/gaudi2P.h9
-rw-r--r--drivers/accel/habanalabs/gaudi2/gaudi2_coresight.c2
-rw-r--r--drivers/accel/ivpu/Kconfig10
-rw-r--r--drivers/accel/ivpu/Makefile8
-rw-r--r--drivers/accel/ivpu/ivpu_coredump.c39
-rw-r--r--drivers/accel/ivpu/ivpu_coredump.h25
-rw-r--r--drivers/accel/ivpu/ivpu_debugfs.c188
-rw-r--r--drivers/accel/ivpu/ivpu_drv.c169
-rw-r--r--drivers/accel/ivpu/ivpu_drv.h57
-rw-r--r--drivers/accel/ivpu/ivpu_fw.c69
-rw-r--r--drivers/accel/ivpu/ivpu_fw.h12
-rw-r--r--drivers/accel/ivpu/ivpu_fw_log.c113
-rw-r--r--drivers/accel/ivpu/ivpu_fw_log.h17
-rw-r--r--drivers/accel/ivpu/ivpu_gem.c147
-rw-r--r--drivers/accel/ivpu/ivpu_gem.h3
-rw-r--r--drivers/accel/ivpu/ivpu_hw.c128
-rw-r--r--drivers/accel/ivpu/ivpu_hw.h30
-rw-r--r--drivers/accel/ivpu/ivpu_hw_40xx_reg.h2
-rw-r--r--drivers/accel/ivpu/ivpu_hw_btrs.c168
-rw-r--r--drivers/accel/ivpu/ivpu_hw_btrs.h12
-rw-r--r--drivers/accel/ivpu/ivpu_hw_btrs_lnl_reg.h1
-rw-r--r--drivers/accel/ivpu/ivpu_hw_ip.c67
-rw-r--r--drivers/accel/ivpu/ivpu_hw_reg_io.h64
-rw-r--r--drivers/accel/ivpu/ivpu_ipc.c54
-rw-r--r--drivers/accel/ivpu/ivpu_ipc.h11
-rw-r--r--drivers/accel/ivpu/ivpu_job.c673
-rw-r--r--drivers/accel/ivpu/ivpu_job.h10
-rw-r--r--drivers/accel/ivpu/ivpu_jsm_msg.c82
-rw-r--r--drivers/accel/ivpu/ivpu_jsm_msg.h2
-rw-r--r--drivers/accel/ivpu/ivpu_mmu.c218
-rw-r--r--drivers/accel/ivpu/ivpu_mmu.h6
-rw-r--r--drivers/accel/ivpu/ivpu_mmu_context.c157
-rw-r--r--drivers/accel/ivpu/ivpu_mmu_context.h11
-rw-r--r--drivers/accel/ivpu/ivpu_ms.c26
-rw-r--r--drivers/accel/ivpu/ivpu_pm.c171
-rw-r--r--drivers/accel/ivpu/ivpu_pm.h4
-rw-r--r--drivers/accel/ivpu/ivpu_sysfs.c108
-rw-r--r--drivers/accel/ivpu/ivpu_trace.h73
-rw-r--r--drivers/accel/ivpu/ivpu_trace_points.c9
-rw-r--r--drivers/accel/ivpu/vpu_boot_api.h56
-rw-r--r--drivers/accel/ivpu/vpu_jsm_api.h344
-rw-r--r--drivers/accel/qaic/Kconfig1
-rw-r--r--drivers/accel/qaic/Makefile1
-rw-r--r--drivers/accel/qaic/mhi_controller.c392
-rw-r--r--drivers/accel/qaic/mhi_controller.h2
-rw-r--r--drivers/accel/qaic/qaic.h24
-rw-r--r--drivers/accel/qaic/qaic_data.c18
-rw-r--r--drivers/accel/qaic/qaic_debugfs.c45
-rw-r--r--drivers/accel/qaic/qaic_drv.c108
-rw-r--r--drivers/accel/qaic/qaic_ras.c642
-rw-r--r--drivers/accel/qaic/qaic_ras.h10
-rw-r--r--drivers/accel/qaic/qaic_timesync.c6
-rw-r--r--drivers/accel/qaic/sahara.c430
-rw-r--r--drivers/accel/rocket/Kconfig24
-rw-r--r--drivers/accel/rocket/Makefile10
-rw-r--r--drivers/accel/rocket/rocket_core.c110
-rw-r--r--drivers/accel/rocket/rocket_core.h64
-rw-r--r--drivers/accel/rocket/rocket_device.c60
-rw-r--r--drivers/accel/rocket/rocket_device.h30
-rw-r--r--drivers/accel/rocket/rocket_drv.c290
-rw-r--r--drivers/accel/rocket/rocket_drv.h32
-rw-r--r--drivers/accel/rocket/rocket_gem.c181
-rw-r--r--drivers/accel/rocket/rocket_gem.h34
-rw-r--r--drivers/accel/rocket/rocket_job.c637
-rw-r--r--drivers/accel/rocket/rocket_job.h52
-rw-r--r--drivers/accel/rocket/rocket_registers.h4404
-rw-r--r--drivers/accessibility/speakup/Makefile4
-rw-r--r--drivers/accessibility/speakup/main.c18
-rw-r--r--drivers/accessibility/speakup/synth.c2
-rw-r--r--drivers/acpi/Kconfig23
-rw-r--r--drivers/acpi/Makefile7
-rw-r--r--drivers/acpi/ac.c2
-rw-r--r--drivers/acpi/acpi_apd.c2
-rw-r--r--drivers/acpi/acpi_dbg.c26
-rw-r--r--drivers/acpi/acpi_extlog.c17
-rw-r--r--drivers/acpi/acpi_lpit.c2
-rw-r--r--drivers/acpi/acpi_mrrm.c185
-rw-r--r--drivers/acpi/acpi_pad.c9
-rw-r--r--drivers/acpi/acpi_pcc.c13
-rw-r--r--drivers/acpi/acpi_pnp.c4
-rw-r--r--drivers/acpi/acpi_processor.c6
-rw-r--r--drivers/acpi/acpi_tad.c9
-rw-r--r--drivers/acpi/acpi_video.c63
-rw-r--r--drivers/acpi/acpica/acapps.h4
-rw-r--r--drivers/acpi/acpica/accommon.h2
-rw-r--r--drivers/acpi/acpica/acconvert.h2
-rw-r--r--drivers/acpi/acpica/acdebug.h2
-rw-r--r--drivers/acpi/acpica/acdispat.h2
-rw-r--r--drivers/acpi/acpica/acevents.h2
-rw-r--r--drivers/acpi/acpica/acglobal.h2
-rw-r--r--drivers/acpi/acpica/achware.h4
-rw-r--r--drivers/acpi/acpica/acinterp.h5
-rw-r--r--drivers/acpi/acpica/aclocal.h8
-rw-r--r--drivers/acpi/acpica/acmacros.h2
-rw-r--r--drivers/acpi/acpica/acnamesp.h2
-rw-r--r--drivers/acpi/acpica/acobject.h2
-rw-r--r--drivers/acpi/acpica/acopcode.h2
-rw-r--r--drivers/acpi/acpica/acparser.h2
-rw-r--r--drivers/acpi/acpica/acpredef.h5
-rw-r--r--drivers/acpi/acpica/acresrc.h2
-rw-r--r--drivers/acpi/acpica/acstruct.h2
-rw-r--r--drivers/acpi/acpica/actables.h2
-rw-r--r--drivers/acpi/acpica/acutils.h2
-rw-r--r--drivers/acpi/acpica/amlcode.h2
-rw-r--r--drivers/acpi/acpica/amlresrc.h10
-rw-r--r--drivers/acpi/acpica/dbhistry.c2
-rw-r--r--drivers/acpi/acpica/dsargs.c2
-rw-r--r--drivers/acpi/acpica/dscontrol.c2
-rw-r--r--drivers/acpi/acpica/dsdebug.c2
-rw-r--r--drivers/acpi/acpica/dsfield.c2
-rw-r--r--drivers/acpi/acpica/dsinit.c2
-rw-r--r--drivers/acpi/acpica/dsmethod.c26
-rw-r--r--drivers/acpi/acpica/dsmthdat.c1
-rw-r--r--drivers/acpi/acpica/dsobject.c2
-rw-r--r--drivers/acpi/acpica/dsopcode.c2
-rw-r--r--drivers/acpi/acpica/dspkginit.c2
-rw-r--r--drivers/acpi/acpica/dsutils.c9
-rw-r--r--drivers/acpi/acpica/dswexec.c2
-rw-r--r--drivers/acpi/acpica/dswload.c2
-rw-r--r--drivers/acpi/acpica/dswload2.c2
-rw-r--r--drivers/acpi/acpica/dswscope.c2
-rw-r--r--drivers/acpi/acpica/dswstate.c2
-rw-r--r--drivers/acpi/acpica/evevent.c2
-rw-r--r--drivers/acpi/acpica/evglock.c6
-rw-r--r--drivers/acpi/acpica/evgpe.c2
-rw-r--r--drivers/acpi/acpica/evgpeblk.c2
-rw-r--r--drivers/acpi/acpica/evgpeinit.c2
-rw-r--r--drivers/acpi/acpica/evgpeutil.c2
-rw-r--r--drivers/acpi/acpica/evhandler.c2
-rw-r--r--drivers/acpi/acpica/evmisc.c2
-rw-r--r--drivers/acpi/acpica/evregion.c2
-rw-r--r--drivers/acpi/acpica/evrgnini.c2
-rw-r--r--drivers/acpi/acpica/evxface.c2
-rw-r--r--drivers/acpi/acpica/evxfevnt.c2
-rw-r--r--drivers/acpi/acpica/evxfgpe.c2
-rw-r--r--drivers/acpi/acpica/evxfregn.c4
-rw-r--r--drivers/acpi/acpica/exconcat.c2
-rw-r--r--drivers/acpi/acpica/exconfig.c2
-rw-r--r--drivers/acpi/acpica/exconvrt.c6
-rw-r--r--drivers/acpi/acpica/excreate.c2
-rw-r--r--drivers/acpi/acpica/exdebug.c2
-rw-r--r--drivers/acpi/acpica/exdump.c2
-rw-r--r--drivers/acpi/acpica/exfield.c2
-rw-r--r--drivers/acpi/acpica/exfldio.c2
-rw-r--r--drivers/acpi/acpica/exmisc.c2
-rw-r--r--drivers/acpi/acpica/exmutex.c2
-rw-r--r--drivers/acpi/acpica/exnames.c2
-rw-r--r--drivers/acpi/acpica/exoparg1.c2
-rw-r--r--drivers/acpi/acpica/exoparg2.c2
-rw-r--r--drivers/acpi/acpica/exoparg3.c2
-rw-r--r--drivers/acpi/acpica/exoparg6.c2
-rw-r--r--drivers/acpi/acpica/exprep.c2
-rw-r--r--drivers/acpi/acpica/exregion.c2
-rw-r--r--drivers/acpi/acpica/exresnte.c2
-rw-r--r--drivers/acpi/acpica/exresolv.c2
-rw-r--r--drivers/acpi/acpica/exresop.c2
-rw-r--r--drivers/acpi/acpica/exserial.c8
-rw-r--r--drivers/acpi/acpica/exstore.c2
-rw-r--r--drivers/acpi/acpica/exstoren.c2
-rw-r--r--drivers/acpi/acpica/exstorob.c2
-rw-r--r--drivers/acpi/acpica/exsystem.c2
-rw-r--r--drivers/acpi/acpica/extrace.c57
-rw-r--r--drivers/acpi/acpica/exutils.c2
-rw-r--r--drivers/acpi/acpica/hwacpi.c2
-rw-r--r--drivers/acpi/acpica/hwesleep.c2
-rw-r--r--drivers/acpi/acpica/hwgpe.c2
-rw-r--r--drivers/acpi/acpica/hwsleep.c2
-rw-r--r--drivers/acpi/acpica/hwtimer.c2
-rw-r--r--drivers/acpi/acpica/hwvalid.c2
-rw-r--r--drivers/acpi/acpica/hwxface.c2
-rw-r--r--drivers/acpi/acpica/hwxfsleep.c2
-rw-r--r--drivers/acpi/acpica/nsarguments.c2
-rw-r--r--drivers/acpi/acpica/nsconvert.c2
-rw-r--r--drivers/acpi/acpica/nsdump.c2
-rw-r--r--drivers/acpi/acpica/nsdumpdv.c2
-rw-r--r--drivers/acpi/acpica/nsinit.c2
-rw-r--r--drivers/acpi/acpica/nsload.c2
-rw-r--r--drivers/acpi/acpica/nsnames.c2
-rw-r--r--drivers/acpi/acpica/nsparse.c2
-rw-r--r--drivers/acpi/acpica/nspredef.c2
-rw-r--r--drivers/acpi/acpica/nsprepkg.c2
-rw-r--r--drivers/acpi/acpica/nsrepair.c2
-rw-r--r--drivers/acpi/acpica/nsrepair2.c4
-rw-r--r--drivers/acpi/acpica/nsutils.c2
-rw-r--r--drivers/acpi/acpica/nswalk.c2
-rw-r--r--drivers/acpi/acpica/nsxfname.c2
-rw-r--r--drivers/acpi/acpica/psargs.c2
-rw-r--r--drivers/acpi/acpica/psloop.c2
-rw-r--r--drivers/acpi/acpica/psobject.c54
-rw-r--r--drivers/acpi/acpica/psopcode.c2
-rw-r--r--drivers/acpi/acpica/psopinfo.c6
-rw-r--r--drivers/acpi/acpica/psparse.c2
-rw-r--r--drivers/acpi/acpica/psscope.c2
-rw-r--r--drivers/acpi/acpica/pstree.c2
-rw-r--r--drivers/acpi/acpica/psutils.c2
-rw-r--r--drivers/acpi/acpica/pswalk.c2
-rw-r--r--drivers/acpi/acpica/psxface.c2
-rw-r--r--drivers/acpi/acpica/rsaddr.c13
-rw-r--r--drivers/acpi/acpica/rscalc.c22
-rw-r--r--drivers/acpi/acpica/rslist.c12
-rw-r--r--drivers/acpi/acpica/tbdata.c2
-rw-r--r--drivers/acpi/acpica/tbfadt.c2
-rw-r--r--drivers/acpi/acpica/tbfind.c6
-rw-r--r--drivers/acpi/acpica/tbinstal.c2
-rw-r--r--drivers/acpi/acpica/tbprint.c10
-rw-r--r--drivers/acpi/acpica/tbutils.c2
-rw-r--r--drivers/acpi/acpica/tbxface.c2
-rw-r--r--drivers/acpi/acpica/tbxfload.c2
-rw-r--r--drivers/acpi/acpica/tbxfroot.c2
-rw-r--r--drivers/acpi/acpica/utaddress.c2
-rw-r--r--drivers/acpi/acpica/utalloc.c2
-rw-r--r--drivers/acpi/acpica/utascii.c2
-rw-r--r--drivers/acpi/acpica/utbuffer.c2
-rw-r--r--drivers/acpi/acpica/utcache.c4
-rw-r--r--drivers/acpi/acpica/utcksum.c2
-rw-r--r--drivers/acpi/acpica/utcopy.c2
-rw-r--r--drivers/acpi/acpica/utdebug.c2
-rw-r--r--drivers/acpi/acpica/utdecode.c2
-rw-r--r--drivers/acpi/acpica/utdelete.c2
-rw-r--r--drivers/acpi/acpica/uteval.c2
-rw-r--r--drivers/acpi/acpica/utglobal.c2
-rw-r--r--drivers/acpi/acpica/uthex.c2
-rw-r--r--drivers/acpi/acpica/utids.c2
-rw-r--r--drivers/acpi/acpica/utinit.c2
-rw-r--r--drivers/acpi/acpica/utlock.c2
-rw-r--r--drivers/acpi/acpica/utobject.c2
-rw-r--r--drivers/acpi/acpica/utosi.c2
-rw-r--r--drivers/acpi/acpica/utpredef.c2
-rw-r--r--drivers/acpi/acpica/utprint.c9
-rw-r--r--drivers/acpi/acpica/utresrc.c14
-rw-r--r--drivers/acpi/acpica/uttrack.c2
-rw-r--r--drivers/acpi/acpica/utuuid.c2
-rw-r--r--drivers/acpi/acpica/utxface.c2
-rw-r--r--drivers/acpi/acpica/utxfinit.c2
-rw-r--r--drivers/acpi/apei/Kconfig1
-rw-r--r--drivers/acpi/apei/apei-internal.h2
-rw-r--r--drivers/acpi/apei/einj-core.c509
-rw-r--r--drivers/acpi/apei/einj-cxl.c10
-rw-r--r--drivers/acpi/apei/erst-dbg.c8
-rw-r--r--drivers/acpi/apei/ghes.c217
-rw-r--r--drivers/acpi/arm64/agdi.c2
-rw-r--r--drivers/acpi/arm64/dma.c5
-rw-r--r--drivers/acpi/arm64/gtdt.c72
-rw-r--r--drivers/acpi/arm64/init.c2
-rw-r--r--drivers/acpi/arm64/iort.c19
-rw-r--r--drivers/acpi/battery.c93
-rw-r--r--drivers/acpi/bgrt.c4
-rw-r--r--drivers/acpi/bus.c13
-rw-r--r--drivers/acpi/button.c12
-rw-r--r--drivers/acpi/cppc_acpi.c369
-rw-r--r--drivers/acpi/device_pm.c12
-rw-r--r--drivers/acpi/device_sysfs.c2
-rw-r--r--drivers/acpi/dptf/dptf_pch_fivr.c3
-rw-r--r--drivers/acpi/dptf/dptf_power.c6
-rw-r--r--drivers/acpi/dptf/int340x_thermal.c13
-rw-r--r--drivers/acpi/ec.c59
-rw-r--r--drivers/acpi/event.c4
-rw-r--r--drivers/acpi/evged.c2
-rw-r--r--drivers/acpi/fan.h3
-rw-r--r--drivers/acpi/fan_attr.c45
-rw-r--r--drivers/acpi/fan_core.c41
-rw-r--r--drivers/acpi/fan_hwmon.c8
-rw-r--r--drivers/acpi/hed.c7
-rw-r--r--drivers/acpi/internal.h32
-rw-r--r--drivers/acpi/irq.c16
-rw-r--r--drivers/acpi/mipi-disco-img.c3
-rw-r--r--drivers/acpi/nfit/core.c11
-rw-r--r--drivers/acpi/nfit/intel.c119
-rw-r--r--drivers/acpi/numa/hmat.c112
-rw-r--r--drivers/acpi/numa/srat.c133
-rw-r--r--drivers/acpi/osi.c1
-rw-r--r--drivers/acpi/osl.c34
-rw-r--r--drivers/acpi/pci_irq.c3
-rw-r--r--drivers/acpi/pci_link.c6
-rw-r--r--drivers/acpi/pci_root.c6
-rw-r--r--drivers/acpi/pfr_telemetry.c5
-rw-r--r--drivers/acpi/pfr_update.c67
-rw-r--r--drivers/acpi/platform_profile.c712
-rw-r--r--drivers/acpi/power.c99
-rw-r--r--drivers/acpi/pptt.c15
-rw-r--r--drivers/acpi/prmt.c43
-rw-r--r--drivers/acpi/proc.c17
-rw-r--r--drivers/acpi/processor_driver.c18
-rw-r--r--drivers/acpi/processor_idle.c186
-rw-r--r--drivers/acpi/processor_perflib.c25
-rw-r--r--drivers/acpi/processor_thermal.c52
-rw-r--r--drivers/acpi/processor_throttling.c7
-rw-r--r--drivers/acpi/property.c317
-rw-r--r--drivers/acpi/resource.c62
-rw-r--r--drivers/acpi/riscv/Kconfig7
-rw-r--r--drivers/acpi/riscv/Makefile1
-rw-r--r--drivers/acpi/riscv/cppc.c6
-rw-r--r--drivers/acpi/riscv/init.c4
-rw-r--r--drivers/acpi/riscv/init.h1
-rw-r--r--drivers/acpi/riscv/irq.c75
-rw-r--r--drivers/acpi/riscv/rimt.c520
-rw-r--r--drivers/acpi/sbs.c4
-rw-r--r--drivers/acpi/sbshc.c13
-rw-r--r--drivers/acpi/scan.c35
-rw-r--r--drivers/acpi/spcr.c13
-rw-r--r--drivers/acpi/sysfs.c4
-rw-r--r--drivers/acpi/tables.c22
-rw-r--r--drivers/acpi/thermal.c28
-rw-r--r--drivers/acpi/thermal_lib.c8
-rw-r--r--drivers/acpi/utils.c7
-rw-r--r--drivers/acpi/video_detect.c24
-rw-r--r--drivers/acpi/viot.c2
-rw-r--r--drivers/acpi/wakeup.c4
-rw-r--r--drivers/acpi/x86/lpss.c3
-rw-r--r--drivers/acpi/x86/utils.c95
-rw-r--r--drivers/amba/bus.c13
-rw-r--r--drivers/android/Kconfig31
-rw-r--r--drivers/android/Makefile5
-rw-r--r--drivers/android/binder.c573
-rw-r--r--drivers/android/binder/Makefile9
-rw-r--r--drivers/android/binder/allocation.rs602
-rw-r--r--drivers/android/binder/context.rs180
-rw-r--r--drivers/android/binder/deferred_close.rs204
-rw-r--r--drivers/android/binder/defs.rs182
-rw-r--r--drivers/android/binder/error.rs99
-rw-r--r--drivers/android/binder/freeze.rs388
-rw-r--r--drivers/android/binder/node.rs1131
-rw-r--r--drivers/android/binder/node/wrapper.rs78
-rw-r--r--drivers/android/binder/page_range.rs734
-rw-r--r--drivers/android/binder/page_range_helper.c24
-rw-r--r--drivers/android/binder/page_range_helper.h15
-rw-r--r--drivers/android/binder/process.rs1696
-rw-r--r--drivers/android/binder/range_alloc/array.rs251
-rw-r--r--drivers/android/binder/range_alloc/mod.rs329
-rw-r--r--drivers/android/binder/range_alloc/tree.rs488
-rw-r--r--drivers/android/binder/rust_binder.h23
-rw-r--r--drivers/android/binder/rust_binder_events.c59
-rw-r--r--drivers/android/binder/rust_binder_events.h36
-rw-r--r--drivers/android/binder/rust_binder_internal.h87
-rw-r--r--drivers/android/binder/rust_binder_main.rs627
-rw-r--r--drivers/android/binder/rust_binderfs.c850
-rw-r--r--drivers/android/binder/stats.rs89
-rw-r--r--drivers/android/binder/thread.rs1596
-rw-r--r--drivers/android/binder/trace.rs16
-rw-r--r--drivers/android/binder/transaction.rs456
-rw-r--r--drivers/android/binder_alloc.c407
-rw-r--r--drivers/android/binder_alloc.h65
-rw-r--r--drivers/android/binder_alloc_selftest.c306
-rw-r--r--drivers/android/binder_internal.h26
-rw-r--r--drivers/android/binder_netlink.c31
-rw-r--r--drivers/android/binder_netlink.h20
-rw-r--r--drivers/android/binder_trace.h60
-rw-r--r--drivers/android/binderfs.c35
-rw-r--r--drivers/android/dbitmap.h1
-rw-r--r--drivers/android/tests/.kunitconfig7
-rw-r--r--drivers/android/tests/Makefile6
-rw-r--r--drivers/android/tests/binder_alloc_kunit.c572
-rw-r--r--drivers/ata/Kconfig36
-rw-r--r--drivers/ata/acard-ahci.c6
-rw-r--r--drivers/ata/ahci.c163
-rw-r--r--drivers/ata/ahci.h23
-rw-r--r--drivers/ata/ahci_brcm.c5
-rw-r--r--drivers/ata/ahci_ceva.c8
-rw-r--r--drivers/ata/ahci_da850.c8
-rw-r--r--drivers/ata/ahci_dm816.c4
-rw-r--r--drivers/ata/ahci_dwc.c2
-rw-r--r--drivers/ata/ahci_imx.c17
-rw-r--r--drivers/ata/ahci_mtk.c2
-rw-r--r--drivers/ata/ahci_mvebu.c2
-rw-r--r--drivers/ata/ahci_platform.c2
-rw-r--r--drivers/ata/ahci_qoriq.c6
-rw-r--r--drivers/ata/ahci_seattle.c2
-rw-r--r--drivers/ata/ahci_st.c8
-rw-r--r--drivers/ata/ahci_sunxi.c2
-rw-r--r--drivers/ata/ahci_tegra.c2
-rw-r--r--drivers/ata/ahci_xgene.c19
-rw-r--r--drivers/ata/ata_generic.c2
-rw-r--r--drivers/ata/ata_piix.c7
-rw-r--r--drivers/ata/libahci.c18
-rw-r--r--drivers/ata/libahci_platform.c38
-rw-r--r--drivers/ata/libata-acpi.c28
-rw-r--r--drivers/ata/libata-core.c193
-rw-r--r--drivers/ata/libata-eh.c440
-rw-r--r--drivers/ata/libata-pmp.c26
-rw-r--r--drivers/ata/libata-sata.c89
-rw-r--r--drivers/ata/libata-scsi.c645
-rw-r--r--drivers/ata/libata-sff.c40
-rw-r--r--drivers/ata/libata-transport.c4
-rw-r--r--drivers/ata/libata-zpodd.c3
-rw-r--r--drivers/ata/libata.h27
-rw-r--r--drivers/ata/pata_acpi.c2
-rw-r--r--drivers/ata/pata_ali.c10
-rw-r--r--drivers/ata/pata_amd.c4
-rw-r--r--drivers/ata/pata_arasan_cf.c2
-rw-r--r--drivers/ata/pata_artop.c4
-rw-r--r--drivers/ata/pata_atiixp.c2
-rw-r--r--drivers/ata/pata_atp867x.c2
-rw-r--r--drivers/ata/pata_cs5536.c2
-rw-r--r--drivers/ata/pata_efar.c2
-rw-r--r--drivers/ata/pata_ep93xx.c6
-rw-r--r--drivers/ata/pata_falcon.c4
-rw-r--r--drivers/ata/pata_ftide010.c2
-rw-r--r--drivers/ata/pata_gayle.c6
-rw-r--r--drivers/ata/pata_hpt366.c2
-rw-r--r--drivers/ata/pata_hpt37x.c4
-rw-r--r--drivers/ata/pata_hpt3x2n.c2
-rw-r--r--drivers/ata/pata_icside.c2
-rw-r--r--drivers/ata/pata_imx.c2
-rw-r--r--drivers/ata/pata_it8213.c4
-rw-r--r--drivers/ata/pata_ixp4xx_cf.c2
-rw-r--r--drivers/ata/pata_jmicron.c2
-rw-r--r--drivers/ata/pata_macio.c14
-rw-r--r--drivers/ata/pata_marvell.c2
-rw-r--r--drivers/ata/pata_mpc52xx.c2
-rw-r--r--drivers/ata/pata_mpiix.c2
-rw-r--r--drivers/ata/pata_ns87410.c2
-rw-r--r--drivers/ata/pata_octeon_cf.c9
-rw-r--r--drivers/ata/pata_of_platform.c2
-rw-r--r--drivers/ata/pata_oldpiix.c4
-rw-r--r--drivers/ata/pata_opti.c2
-rw-r--r--drivers/ata/pata_optidma.c6
-rw-r--r--drivers/ata/pata_parport/pata_parport.c4
-rw-r--r--drivers/ata/pata_pcmcia.c4
-rw-r--r--drivers/ata/pata_pdc2027x.c16
-rw-r--r--drivers/ata/pata_piccolo.c2
-rw-r--r--drivers/ata/pata_platform.c2
-rw-r--r--drivers/ata/pata_pxa.c8
-rw-r--r--drivers/ata/pata_radisys.c2
-rw-r--r--drivers/ata/pata_rb532_cf.c2
-rw-r--r--drivers/ata/pata_rdc.c8
-rw-r--r--drivers/ata/pata_sis.c2
-rw-r--r--drivers/ata/pata_sl82c105.c2
-rw-r--r--drivers/ata/pata_triflex.c2
-rw-r--r--drivers/ata/pata_via.c11
-rw-r--r--drivers/ata/pdc_adma.c2
-rw-r--r--drivers/ata/sata_dwc_460ex.c4
-rw-r--r--drivers/ata/sata_fsl.c8
-rw-r--r--drivers/ata/sata_gemini.c34
-rw-r--r--drivers/ata/sata_gemini.h1
-rw-r--r--drivers/ata/sata_highbank.c15
-rw-r--r--drivers/ata/sata_inic162x.c2
-rw-r--r--drivers/ata/sata_mv.c16
-rw-r--r--drivers/ata/sata_nv.c30
-rw-r--r--drivers/ata/sata_promise.c4
-rw-r--r--drivers/ata/sata_qstor.c4
-rw-r--r--drivers/ata/sata_rcar.c4
-rw-r--r--drivers/ata/sata_sil.c2
-rw-r--r--drivers/ata/sata_sil24.c13
-rw-r--r--drivers/ata/sata_sis.c2
-rw-r--r--drivers/ata/sata_svw.c4
-rw-r--r--drivers/ata/sata_sx4.c45
-rw-r--r--drivers/ata/sata_uli.c4
-rw-r--r--drivers/ata/sata_via.c7
-rw-r--r--drivers/ata/sata_vsc.c2
-rw-r--r--drivers/atm/atmtcp.c15
-rw-r--r--drivers/atm/fore200e.c2
-rw-r--r--drivers/atm/idt77105.c4
-rw-r--r--drivers/atm/idt77252.c9
-rw-r--r--drivers/atm/iphase.c2
-rw-r--r--drivers/atm/lanai.c6
-rw-r--r--drivers/atm/nicstar.c2
-rw-r--r--drivers/atm/suni.c2
-rw-r--r--drivers/auxdisplay/Kconfig3
-rw-r--r--drivers/auxdisplay/cfag12864b.c12
-rw-r--r--drivers/auxdisplay/cfag12864bfb.c2
-rw-r--r--drivers/auxdisplay/charlcd.c5
-rw-r--r--drivers/auxdisplay/charlcd.h5
-rw-r--r--drivers/auxdisplay/hd44780.c21
-rw-r--r--drivers/auxdisplay/hd44780_common.c24
-rw-r--r--drivers/auxdisplay/hd44780_common.h4
-rw-r--r--drivers/auxdisplay/ht16k33.c12
-rw-r--r--drivers/auxdisplay/img-ascii-lcd.c12
-rw-r--r--drivers/auxdisplay/lcd2s.c14
-rw-r--r--drivers/auxdisplay/line-display.c14
-rw-r--r--drivers/auxdisplay/max6959.c2
-rw-r--r--drivers/auxdisplay/panel.c21
-rw-r--r--drivers/auxdisplay/seg-led-gpio.c7
-rw-r--r--drivers/base/Kconfig6
-rw-r--r--drivers/base/Makefile2
-rw-r--r--drivers/base/arch_numa.c4
-rw-r--r--drivers/base/arch_topology.c84
-rw-r--r--drivers/base/auxiliary.c176
-rw-r--r--drivers/base/base.h27
-rw-r--r--drivers/base/bus.c13
-rw-r--r--drivers/base/cacheinfo.c79
-rw-r--r--drivers/base/class.c46
-rw-r--r--drivers/base/component.c32
-rw-r--r--drivers/base/core.c513
-rw-r--r--drivers/base/cpu.c17
-rw-r--r--drivers/base/dd.c9
-rw-r--r--drivers/base/devcoredump.c22
-rw-r--r--drivers/base/devres.c76
-rw-r--r--drivers/base/devtmpfs.c181
-rw-r--r--drivers/base/driver.c9
-rw-r--r--drivers/base/faux.c261
-rw-r--r--drivers/base/firmware_loader/Kconfig4
-rw-r--r--drivers/base/firmware_loader/builtin/main.c2
-rw-r--r--drivers/base/firmware_loader/fallback_table.c8
-rw-r--r--drivers/base/firmware_loader/main.c68
-rw-r--r--drivers/base/firmware_loader/sysfs.c8
-rw-r--r--drivers/base/firmware_loader/sysfs.h2
-rw-r--r--drivers/base/init.c1
-rw-r--r--drivers/base/memory.c181
-rw-r--r--drivers/base/module.c17
-rw-r--r--drivers/base/node.c196
-rw-r--r--drivers/base/physical_location.c9
-rw-r--r--drivers/base/platform-msi.c1
-rw-r--r--drivers/base/platform.c16
-rw-r--r--drivers/base/power/Makefile1
-rw-r--r--drivers/base/power/clock_ops.c73
-rw-r--r--drivers/base/power/common.c30
-rw-r--r--drivers/base/power/generic_ops.c24
-rw-r--r--drivers/base/power/main.c556
-rw-r--r--drivers/base/power/qos.c1
-rw-r--r--drivers/base/power/runtime-test.c253
-rw-r--r--drivers/base/power/runtime.c301
-rw-r--r--drivers/base/power/sysfs.c33
-rw-r--r--drivers/base/power/wakeirq.c26
-rw-r--r--drivers/base/power/wakeup.c22
-rw-r--r--drivers/base/power/wakeup_stats.c2
-rw-r--r--drivers/base/property.c79
-rw-r--r--drivers/base/regmap/Kconfig4
-rw-r--r--drivers/base/regmap/internal.h13
-rw-r--r--drivers/base/regmap/regcache-maple.c10
-rw-r--r--drivers/base/regmap/regcache-rbtree.c10
-rw-r--r--drivers/base/regmap/regcache.c22
-rw-r--r--drivers/base/regmap/regmap-debugfs.c10
-rw-r--r--drivers/base/regmap/regmap-irq.c144
-rw-r--r--drivers/base/regmap/regmap-kunit.c47
-rw-r--r--drivers/base/regmap/regmap-mmio.c1
-rw-r--r--drivers/base/regmap/regmap-sdw-mbq.c219
-rw-r--r--drivers/base/regmap/regmap.c45
-rw-r--r--drivers/base/swnode.c11
-rw-r--r--drivers/base/test/Kconfig1
-rw-r--r--drivers/base/test/platform-device-test.c41
-rw-r--r--drivers/base/topology.c114
-rw-r--r--drivers/base/trace.h6
-rw-r--r--drivers/bcma/driver_gpio.c6
-rw-r--r--drivers/bcma/host_soc.c2
-rw-r--r--drivers/block/Kconfig82
-rw-r--r--drivers/block/Makefile6
-rw-r--r--drivers/block/amiflop.c21
-rw-r--r--drivers/block/aoe/aoe.h1
-rw-r--r--drivers/block/aoe/aoeblk.c5
-rw-r--r--drivers/block/aoe/aoecmd.c12
-rw-r--r--drivers/block/aoe/aoedev.c22
-rw-r--r--drivers/block/aoe/aoemain.c4
-rw-r--r--drivers/block/ataflop.c18
-rw-r--r--drivers/block/brd.c322
-rw-r--r--drivers/block/drbd/Kconfig2
-rw-r--r--drivers/block/drbd/drbd_int.h39
-rw-r--r--drivers/block/drbd/drbd_main.c64
-rw-r--r--drivers/block/drbd/drbd_nl.c3
-rw-r--r--drivers/block/drbd/drbd_receiver.c272
-rw-r--r--drivers/block/drbd/drbd_req.c3
-rw-r--r--drivers/block/drbd/drbd_worker.c62
-rw-r--r--drivers/block/floppy.c70
-rw-r--r--drivers/block/loop.c532
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c55
-rw-r--r--drivers/block/nbd.c151
-rw-r--r--drivers/block/null_blk/main.c228
-rw-r--r--drivers/block/null_blk/null_blk.h7
-rw-r--r--drivers/block/null_blk/zoned.c22
-rw-r--r--drivers/block/pktcdvd.c2916
-rw-r--r--drivers/block/ps3disk.c7
-rw-r--r--drivers/block/rbd.c7
-rw-r--r--drivers/block/rnbd/rnbd-clt.c11
-rw-r--r--drivers/block/rnbd/rnbd-srv.c9
-rw-r--r--drivers/block/rnull.rs73
-rw-r--r--drivers/block/rnull/Kconfig13
-rw-r--r--drivers/block/rnull/Makefile3
-rw-r--r--drivers/block/rnull/configfs.rs262
-rw-r--r--drivers/block/rnull/rnull.rs104
-rw-r--r--drivers/block/sunvdc.c28
-rw-r--r--drivers/block/swim.c8
-rw-r--r--drivers/block/swim3.c26
-rw-r--r--drivers/block/ublk_drv.c2095
-rw-r--r--drivers/block/virtio_blk.c124
-rw-r--r--drivers/block/xen-blkback/blkback.c2
-rw-r--r--drivers/block/xen-blkfront.c7
-rw-r--r--drivers/block/z2ram.c1
-rw-r--r--drivers/block/zloop.c1386
-rw-r--r--drivers/block/zram/Kconfig1
-rw-r--r--drivers/block/zram/backend_deflate.c12
-rw-r--r--drivers/block/zram/backend_lz4.c2
-rw-r--r--drivers/block/zram/backend_lz4hc.c2
-rw-r--r--drivers/block/zram/backend_zstd.c13
-rw-r--r--drivers/block/zram/zcomp.c63
-rw-r--r--drivers/block/zram/zcomp.h19
-rw-r--r--drivers/block/zram/zram_drv.c1198
-rw-r--r--drivers/block/zram/zram_drv.h25
-rw-r--r--drivers/bluetooth/Kconfig12
-rw-r--r--drivers/bluetooth/bfusb.c5
-rw-r--r--drivers/bluetooth/bluecard_cs.c6
-rw-r--r--drivers/bluetooth/bpa10x.c4
-rw-r--r--drivers/bluetooth/btbcm.c15
-rw-r--r--drivers/bluetooth/btintel.c517
-rw-r--r--drivers/bluetooth/btintel.h30
-rw-r--r--drivers/bluetooth/btintel_pcie.c1473
-rw-r--r--drivers/bluetooth/btintel_pcie.h144
-rw-r--r--drivers/bluetooth/btmrvl_main.c3
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c4
-rw-r--r--drivers/bluetooth/btmtk.c49
-rw-r--r--drivers/bluetooth/btmtksdio.c48
-rw-r--r--drivers/bluetooth/btmtkuart.c8
-rw-r--r--drivers/bluetooth/btnxpuart.c646
-rw-r--r--drivers/bluetooth/btqca.c229
-rw-r--r--drivers/bluetooth/btqca.h9
-rw-r--r--drivers/bluetooth/btqcomsmd.c4
-rw-r--r--drivers/bluetooth/btrtl.c20
-rw-r--r--drivers/bluetooth/btsdio.c2
-rw-r--r--drivers/bluetooth/btusb.c720
-rw-r--r--drivers/bluetooth/h4_recv.h153
-rw-r--r--drivers/bluetooth/hci_aml.c5
-rw-r--r--drivers/bluetooth/hci_bcm.c31
-rw-r--r--drivers/bluetooth/hci_bcm4377.c12
-rw-r--r--drivers/bluetooth/hci_bcsp.c7
-rw-r--r--drivers/bluetooth/hci_h5.c8
-rw-r--r--drivers/bluetooth/hci_intel.c14
-rw-r--r--drivers/bluetooth/hci_ldisc.c27
-rw-r--r--drivers/bluetooth/hci_ll.c6
-rw-r--r--drivers/bluetooth/hci_nokia.c4
-rw-r--r--drivers/bluetooth/hci_qca.c148
-rw-r--r--drivers/bluetooth/hci_serdev.c8
-rw-r--r--drivers/bluetooth/hci_uart.h9
-rw-r--r--drivers/bluetooth/hci_vhci.c78
-rw-r--r--drivers/bluetooth/virtio_bt.c10
-rw-r--r--drivers/bus/Kconfig6
-rw-r--r--drivers/bus/Makefile1
-rw-r--r--drivers/bus/brcmstb_gisb.c10
-rw-r--r--drivers/bus/fsl-mc/dpmcp.c22
-rw-r--r--drivers/bus/fsl-mc/dprc-driver.c10
-rw-r--r--drivers/bus/fsl-mc/dprc.c4
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-allocator.c26
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-bus.c77
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-private.h8
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-uapi.c11
-rw-r--r--drivers/bus/fsl-mc/mc-io.c39
-rw-r--r--drivers/bus/fsl-mc/mc-sys.c2
-rw-r--r--drivers/bus/hisi_lpc.c2
-rw-r--r--drivers/bus/imx-aipstz.c108
-rw-r--r--drivers/bus/mhi/ep/main.c37
-rw-r--r--drivers/bus/mhi/ep/ring.c16
-rw-r--r--drivers/bus/mhi/host/boot.c216
-rw-r--r--drivers/bus/mhi/host/debugfs.c3
-rw-r--r--drivers/bus/mhi/host/init.c15
-rw-r--r--drivers/bus/mhi/host/internal.h23
-rw-r--r--drivers/bus/mhi/host/main.c50
-rw-r--r--drivers/bus/mhi/host/pci_generic.c289
-rw-r--r--drivers/bus/mhi/host/pm.c61
-rw-r--r--drivers/bus/mhi/host/trace.h25
-rw-r--r--drivers/bus/moxtet.c7
-rw-r--r--drivers/bus/omap-ocp2scp.c2
-rw-r--r--drivers/bus/omap_l3_smx.c2
-rw-r--r--drivers/bus/qcom-ssc-block-bus.c36
-rw-r--r--drivers/bus/simple-pm-bus.c24
-rw-r--r--drivers/bus/sun50i-de2.c2
-rw-r--r--drivers/bus/sunxi-rsb.c2
-rw-r--r--drivers/bus/tegra-aconnect.c2
-rw-r--r--drivers/bus/tegra-gmi.c2
-rw-r--r--drivers/bus/ti-pwmss.c2
-rw-r--r--drivers/bus/ti-sysc.c73
-rw-r--r--drivers/bus/ts-nbus.c2
-rw-r--r--drivers/cache/sifive_ccache.c10
-rw-r--r--drivers/cdrom/cdrom.c15
-rw-r--r--drivers/cdrom/gdrom.c4
-rw-r--r--drivers/cdx/Kconfig2
-rw-r--r--drivers/cdx/Makefile2
-rw-r--r--drivers/cdx/cdx.c26
-rw-r--r--drivers/cdx/cdx_msi.c7
-rw-r--r--drivers/cdx/controller/Kconfig2
-rw-r--r--drivers/cdx/controller/bitfield.h90
-rw-r--r--drivers/cdx/controller/cdx_controller.c36
-rw-r--r--drivers/cdx/controller/cdx_rpmsg.c5
-rw-r--r--drivers/cdx/controller/mcdi.c43
-rw-r--r--drivers/cdx/controller/mcdi.h242
-rw-r--r--drivers/cdx/controller/mcdi_functions.c1
-rw-r--r--drivers/cdx/controller/mcdi_functions.h3
-rw-r--r--drivers/cdx/controller/mcdid.h63
-rw-r--r--drivers/char/Kconfig5
-rw-r--r--drivers/char/Makefile1
-rw-r--r--drivers/char/adi.c2
-rw-r--r--drivers/char/agp/amd64-agp.c18
-rw-r--r--drivers/char/agp/intel-gtt.c55
-rw-r--r--drivers/char/agp/nvidia-agp.c1
-rw-r--r--drivers/char/apm-emulation.c5
-rw-r--r--drivers/char/dtlk.c6
-rw-r--r--drivers/char/hangcheck-timer.c2
-rw-r--r--drivers/char/hpet.c8
-rw-r--r--drivers/char/hw_random/Kconfig54
-rw-r--r--drivers/char/hw_random/Makefile2
-rw-r--r--drivers/char/hw_random/airoha-trng.c243
-rw-r--r--drivers/char/hw_random/atmel-rng.c12
-rw-r--r--drivers/char/hw_random/bcm74110-rng.c125
-rw-r--r--drivers/char/hw_random/cctrng.c3
-rw-r--r--drivers/char/hw_random/cn10k-rng.c2
-rw-r--r--drivers/char/hw_random/core.c11
-rw-r--r--drivers/char/hw_random/exynos-trng.c2
-rw-r--r--drivers/char/hw_random/histb-rng.c2
-rw-r--r--drivers/char/hw_random/imx-rngc.c69
-rw-r--r--drivers/char/hw_random/ingenic-rng.c2
-rw-r--r--drivers/char/hw_random/ks-sa-rng.c6
-rw-r--r--drivers/char/hw_random/mtk-rng.c12
-rw-r--r--drivers/char/hw_random/mxc-rnga.c2
-rw-r--r--drivers/char/hw_random/n2-drv.c2
-rw-r--r--drivers/char/hw_random/n2rng.h4
-rw-r--r--drivers/char/hw_random/npcm-rng.c10
-rw-r--r--drivers/char/hw_random/omap-rng.c2
-rw-r--r--drivers/char/hw_random/omap3-rom-rng.c1
-rw-r--r--drivers/char/hw_random/rockchip-rng.c320
-rw-r--r--drivers/char/hw_random/stm32-rng.c79
-rw-r--r--drivers/char/hw_random/timeriomem-rng.c7
-rw-r--r--drivers/char/hw_random/xgene-rng.c6
-rw-r--r--drivers/char/ipmi/Kconfig7
-rw-r--r--drivers/char/ipmi/Makefile1
-rw-r--r--drivers/char/ipmi/bt-bmc.c6
-rw-r--r--drivers/char/ipmi/ipmb_dev_int.c5
-rw-r--r--drivers/char/ipmi/ipmi_devintf.c5
-rw-r--r--drivers/char/ipmi/ipmi_ipmb.c4
-rw-r--r--drivers/char/ipmi/ipmi_kcs_sm.c16
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c1360
-rw-r--r--drivers/char/ipmi/ipmi_powernv.c19
-rw-r--r--drivers/char/ipmi/ipmi_poweroff.c8
-rw-r--r--drivers/char/ipmi/ipmi_si.h17
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c201
-rw-r--r--drivers/char/ipmi/ipmi_si_ls2k.c189
-rw-r--r--drivers/char/ipmi/ipmi_si_parisc.c2
-rw-r--r--drivers/char/ipmi/ipmi_si_pci.c54
-rw-r--r--drivers/char/ipmi/ipmi_si_platform.c29
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c24
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c156
-rw-r--r--drivers/char/ipmi/kcs_bmc_aspeed.c6
-rw-r--r--drivers/char/ipmi/kcs_bmc_npcm7xx.c2
-rw-r--r--drivers/char/ipmi/ssif_bmc.c10
-rw-r--r--drivers/char/mem.c39
-rw-r--r--drivers/char/misc.c62
-rw-r--r--drivers/char/misc_minor_kunit.c689
-rw-r--r--drivers/char/powernv-op-panel.c2
-rw-r--r--drivers/char/random.c66
-rw-r--r--drivers/char/sonypi.c13
-rw-r--r--drivers/char/tlclk.c36
-rw-r--r--drivers/char/tpm/Kconfig33
-rw-r--r--drivers/char/tpm/Makefile3
-rw-r--r--drivers/char/tpm/eventlog/acpi.c15
-rw-r--r--drivers/char/tpm/eventlog/common.c46
-rw-r--r--drivers/char/tpm/eventlog/of.c8
-rw-r--r--drivers/char/tpm/eventlog/tpm1.c7
-rw-r--r--drivers/char/tpm/st33zp24/st33zp24.c2
-rw-r--r--drivers/char/tpm/tpm-buf.c26
-rw-r--r--drivers/char/tpm/tpm-chip.c20
-rw-r--r--drivers/char/tpm/tpm-dev-common.c9
-rw-r--r--drivers/char/tpm/tpm-interface.c94
-rw-r--r--drivers/char/tpm/tpm.h2
-rw-r--r--drivers/char/tpm/tpm2-cmd.c158
-rw-r--r--drivers/char/tpm/tpm2-sessions.c285
-rw-r--r--drivers/char/tpm/tpm_atmel.c66
-rw-r--r--drivers/char/tpm/tpm_atmel.h140
-rw-r--r--drivers/char/tpm/tpm_crb.c107
-rw-r--r--drivers/char/tpm/tpm_crb_ffa.c414
-rw-r--r--drivers/char/tpm/tpm_crb_ffa.h23
-rw-r--r--drivers/char/tpm/tpm_ftpm_tee.c90
-rw-r--r--drivers/char/tpm/tpm_ftpm_tee.h5
-rw-r--r--drivers/char/tpm/tpm_i2c_atmel.c3
-rw-r--r--drivers/char/tpm/tpm_i2c_infineon.c3
-rw-r--r--drivers/char/tpm/tpm_i2c_nuvoton.c3
-rw-r--r--drivers/char/tpm/tpm_ibmvtpm.c21
-rw-r--r--drivers/char/tpm/tpm_infineon.c3
-rw-r--r--drivers/char/tpm/tpm_loongson.c84
-rw-r--r--drivers/char/tpm/tpm_nsc.c3
-rw-r--r--drivers/char/tpm/tpm_ppi.c137
-rw-r--r--drivers/char/tpm/tpm_svsm.c121
-rw-r--r--drivers/char/tpm/tpm_tis.c2
-rw-r--r--drivers/char/tpm/tpm_tis_core.c27
-rw-r--r--drivers/char/tpm/tpm_tis_core.h3
-rw-r--r--drivers/char/tpm/tpm_tis_i2c_cr50.c152
-rw-r--r--drivers/char/tpm/tpm_tis_synquacer.c2
-rw-r--r--drivers/char/tpm/tpm_vtpm_proxy.c4
-rw-r--r--drivers/char/tpm/xen-tpmfront.c3
-rw-r--r--drivers/char/virtio_console.c24
-rw-r--r--drivers/char/xilinx_hwicap/xilinx_hwicap.c2
-rw-r--r--drivers/char/xillybus/xillybus_core.c3
-rw-r--r--drivers/char/xillybus/xillybus_of.c2
-rw-r--r--drivers/clk/.kunitconfig1
-rw-r--r--drivers/clk/Kconfig47
-rw-r--r--drivers/clk/Makefile20
-rw-r--r--drivers/clk/actions/owl-common.c1
-rw-r--r--drivers/clk/actions/owl-composite.c8
-rw-r--r--drivers/clk/actions/owl-divider.c13
-rw-r--r--drivers/clk/actions/owl-factor.c12
-rw-r--r--drivers/clk/actions/owl-pll.c25
-rw-r--r--drivers/clk/analogbits/wrpll-cln28hpc.c2
-rw-r--r--drivers/clk/at91/Makefile1
-rw-r--r--drivers/clk/at91/clk-audio-pll.c42
-rw-r--r--drivers/clk/at91/clk-h32mx.c33
-rw-r--r--drivers/clk/at91/clk-master.c5
-rw-r--r--drivers/clk/at91/clk-peripheral.c48
-rw-r--r--drivers/clk/at91/clk-pll.c12
-rw-r--r--drivers/clk/at91/clk-plldiv.c34
-rw-r--r--drivers/clk/at91/clk-sam9x60-pll.c113
-rw-r--r--drivers/clk/at91/clk-usb.c20
-rw-r--r--drivers/clk/at91/pmc.c1
-rw-r--r--drivers/clk/at91/pmc.h1
-rw-r--r--drivers/clk/at91/sam9x60.c2
-rw-r--r--drivers/clk/at91/sam9x7.c26
-rw-r--r--drivers/clk/at91/sama7d65.c1379
-rw-r--r--drivers/clk/at91/sama7g5.c2
-rw-r--r--drivers/clk/at91/sckc.c24
-rw-r--r--drivers/clk/axs10x/i2s_pll_clock.c14
-rw-r--r--drivers/clk/axs10x/pll_clock.c12
-rw-r--r--drivers/clk/baikal-t1/ccu-div.c27
-rw-r--r--drivers/clk/baikal-t1/ccu-pll.c14
-rw-r--r--drivers/clk/baikal-t1/clk-ccu-div.c2
-rw-r--r--drivers/clk/baikal-t1/clk-ccu-pll.c2
-rw-r--r--drivers/clk/bcm/clk-bcm2835.c19
-rw-r--r--drivers/clk/bcm/clk-bcm53573-ilp.c2
-rw-r--r--drivers/clk/bcm/clk-iproc-asiu.c25
-rw-r--r--drivers/clk/bcm/clk-kona.c21
-rw-r--r--drivers/clk/bcm/clk-kona.h2
-rw-r--r--drivers/clk/bcm/clk-raspberrypi.c106
-rw-r--r--drivers/clk/berlin/berlin2-avpll.c2
-rw-r--r--drivers/clk/clk-apple-nco.c17
-rw-r--r--drivers/clk/clk-asm9260.c4
-rw-r--r--drivers/clk/clk-ast2600.c2
-rw-r--r--drivers/clk/clk-axi-clkgen.c181
-rw-r--r--drivers/clk/clk-axm5516.c1
-rw-r--r--drivers/clk/clk-bm1880.c21
-rw-r--r--drivers/clk/clk-cdce706.c18
-rw-r--r--drivers/clk/clk-cdce925.c52
-rw-r--r--drivers/clk/clk-clps711x.c2
-rw-r--r--drivers/clk/clk-cs2000-cp.c14
-rw-r--r--drivers/clk/clk-devres.c9
-rw-r--r--drivers/clk/clk-divider.c39
-rw-r--r--drivers/clk/clk-en7523.c327
-rw-r--r--drivers/clk/clk-ep93xx.c27
-rw-r--r--drivers/clk/clk-eyeq.c859
-rw-r--r--drivers/clk/clk-fixed-factor.c27
-rw-r--r--drivers/clk/clk-fractional-divider.c25
-rw-r--r--drivers/clk/clk-gate.c2
-rw-r--r--drivers/clk/clk-gemini.c15
-rw-r--r--drivers/clk/clk-gpio.c205
-rw-r--r--drivers/clk/clk-highbank.c26
-rw-r--r--drivers/clk/clk-hsdk-pll.c14
-rw-r--r--drivers/clk/clk-lan966x.c78
-rw-r--r--drivers/clk/clk-lmk04832.c57
-rw-r--r--drivers/clk/clk-loongson1.c12
-rw-r--r--drivers/clk/clk-loongson2.c133
-rw-r--r--drivers/clk/clk-max9485.c27
-rw-r--r--drivers/clk/clk-milbeaut.c22
-rw-r--r--drivers/clk/clk-multiplier.c12
-rw-r--r--drivers/clk/clk-nomadik.c5
-rw-r--r--drivers/clk/clk-npcm8xx.c430
-rw-r--r--drivers/clk/clk-pwm.c49
-rw-r--r--drivers/clk/clk-qoriq.c6
-rw-r--r--drivers/clk/clk-rp1.c2462
-rw-r--r--drivers/clk/clk-rpmi.c620
-rw-r--r--drivers/clk/clk-s2mps11.c13
-rw-r--r--drivers/clk/clk-scmi.c66
-rw-r--r--drivers/clk/clk-scpi.c18
-rw-r--r--drivers/clk/clk-si514.c26
-rw-r--r--drivers/clk/clk-si521xx.c14
-rw-r--r--drivers/clk/clk-si5341.c22
-rw-r--r--drivers/clk/clk-si5351.c6
-rw-r--r--drivers/clk/clk-si544.c12
-rw-r--r--drivers/clk/clk-si570.c28
-rw-r--r--drivers/clk/clk-sp7021.c46
-rw-r--r--drivers/clk/clk-sparx5.c10
-rw-r--r--drivers/clk/clk-stm32f4.c183
-rw-r--r--drivers/clk/clk-tps68470.c12
-rw-r--r--drivers/clk/clk-twl.c69
-rw-r--r--drivers/clk/clk-versaclock3.c129
-rw-r--r--drivers/clk/clk-versaclock5.c73
-rw-r--r--drivers/clk/clk-versaclock7.c32
-rw-r--r--drivers/clk/clk-vt8500.c59
-rw-r--r--drivers/clk/clk-wm831x.c14
-rw-r--r--drivers/clk/clk-xgene.c45
-rw-r--r--drivers/clk/clk.c101
-rw-r--r--drivers/clk/clk_kunit_helpers.c30
-rw-r--r--drivers/clk/clk_test.c547
-rw-r--r--drivers/clk/clkdev.c9
-rw-r--r--drivers/clk/davinci/Makefile2
-rw-r--r--drivers/clk/davinci/pll-da830.c71
-rw-r--r--drivers/clk/davinci/pll.c67
-rw-r--r--drivers/clk/davinci/pll.h2
-rw-r--r--drivers/clk/davinci/psc-da830.c118
-rw-r--r--drivers/clk/davinci/psc.c13
-rw-r--r--drivers/clk/davinci/psc.h7
-rw-r--r--drivers/clk/hisilicon/clk-hi3660-stub.c18
-rw-r--r--drivers/clk/hisilicon/clk-hi6220-stub.c12
-rw-r--r--drivers/clk/hisilicon/clkdivider-hi6220.c12
-rw-r--r--drivers/clk/hisilicon/clkgate-separated.c16
-rw-r--r--drivers/clk/imgtec/clk-boston.c6
-rw-r--r--drivers/clk/imx/clk-busy.c8
-rw-r--r--drivers/clk/imx/clk-composite-8m.c16
-rw-r--r--drivers/clk/imx/clk-composite-93.c7
-rw-r--r--drivers/clk/imx/clk-cpu.c10
-rw-r--r--drivers/clk/imx/clk-fixup-div.c10
-rw-r--r--drivers/clk/imx/clk-fixup-mux.c2
-rw-r--r--drivers/clk/imx/clk-frac-pll.c20
-rw-r--r--drivers/clk/imx/clk-fracn-gppll.c27
-rw-r--r--drivers/clk/imx/clk-gate-exclusive.c2
-rw-r--r--drivers/clk/imx/clk-imx5.c2
-rw-r--r--drivers/clk/imx/clk-imx8-acm.c6
-rw-r--r--drivers/clk/imx/clk-imx8mp-audiomix.c9
-rw-r--r--drivers/clk/imx/clk-imx8mp.c156
-rw-r--r--drivers/clk/imx/clk-imx8qxp-lpcg.c1
-rw-r--r--drivers/clk/imx/clk-imx93.c89
-rw-r--r--drivers/clk/imx/clk-imx95-blk-ctl.c170
-rw-r--r--drivers/clk/imx/clk-lpcg-scu.c41
-rw-r--r--drivers/clk/imx/clk-pfd.c18
-rw-r--r--drivers/clk/imx/clk-pll14xx.c31
-rw-r--r--drivers/clk/imx/clk-pllv2.c23
-rw-r--r--drivers/clk/imx/clk-pllv3.c72
-rw-r--r--drivers/clk/imx/clk-pllv4.c29
-rw-r--r--drivers/clk/imx/clk-scu.c41
-rw-r--r--drivers/clk/ingenic/cgu.c12
-rw-r--r--drivers/clk/ingenic/cgu.h2
-rw-r--r--drivers/clk/ingenic/jz4780-cgu.c24
-rw-r--r--drivers/clk/ingenic/x1000-cgu.c19
-rw-r--r--drivers/clk/keystone/sci-clk.c5
-rw-r--r--drivers/clk/keystone/syscon-clk.c13
-rw-r--r--drivers/clk/kunit_clk_assigned_rates.h8
-rw-r--r--drivers/clk/kunit_clk_assigned_rates_multiple.dtso16
-rw-r--r--drivers/clk/kunit_clk_assigned_rates_multiple_consumer.dtso20
-rw-r--r--drivers/clk/kunit_clk_assigned_rates_null.dtso14
-rw-r--r--drivers/clk/kunit_clk_assigned_rates_null_consumer.dtso18
-rw-r--r--drivers/clk/kunit_clk_assigned_rates_one.dtso14
-rw-r--r--drivers/clk/kunit_clk_assigned_rates_one_consumer.dtso18
-rw-r--r--drivers/clk/kunit_clk_assigned_rates_u64_multiple.dtso16
-rw-r--r--drivers/clk/kunit_clk_assigned_rates_u64_multiple_consumer.dtso20
-rw-r--r--drivers/clk/kunit_clk_assigned_rates_u64_one.dtso14
-rw-r--r--drivers/clk/kunit_clk_assigned_rates_u64_one_consumer.dtso18
-rw-r--r--drivers/clk/kunit_clk_assigned_rates_without.dtso13
-rw-r--r--drivers/clk/kunit_clk_assigned_rates_without_consumer.dtso17
-rw-r--r--drivers/clk/kunit_clk_assigned_rates_zero.dtso12
-rw-r--r--drivers/clk/kunit_clk_assigned_rates_zero_consumer.dtso16
-rw-r--r--drivers/clk/kunit_clk_hw_get_dev_of_node.dtso10
-rw-r--r--drivers/clk/mediatek/Kconfig123
-rw-r--r--drivers/clk/mediatek/Makefile18
-rw-r--r--drivers/clk/mediatek/clk-gate.c117
-rw-r--r--drivers/clk/mediatek/clk-gate.h3
-rw-r--r--drivers/clk/mediatek/clk-mt2701-aud.c10
-rw-r--r--drivers/clk/mediatek/clk-mt2701-bdp.c1
-rw-r--r--drivers/clk/mediatek/clk-mt2701-img.c1
-rw-r--r--drivers/clk/mediatek/clk-mt2701-mm.c1
-rw-r--r--drivers/clk/mediatek/clk-mt2701-vdec.c1
-rw-r--r--drivers/clk/mediatek/clk-mt6735-apmixedsys.c137
-rw-r--r--drivers/clk/mediatek/clk-mt6735-imgsys.c57
-rw-r--r--drivers/clk/mediatek/clk-mt6735-infracfg.c107
-rw-r--r--drivers/clk/mediatek/clk-mt6735-mfgcfg.c61
-rw-r--r--drivers/clk/mediatek/clk-mt6735-pericfg.c124
-rw-r--r--drivers/clk/mediatek/clk-mt6735-topckgen.c394
-rw-r--r--drivers/clk/mediatek/clk-mt6735-vdecsys.c79
-rw-r--r--drivers/clk/mediatek/clk-mt6735-vencsys.c53
-rw-r--r--drivers/clk/mediatek/clk-mt7622-aud.c1
-rw-r--r--drivers/clk/mediatek/clk-mt8188-cam.c17
-rw-r--r--drivers/clk/mediatek/clk-mt8188-img.c18
-rw-r--r--drivers/clk/mediatek/clk-mt8188-ipe.c14
-rw-r--r--drivers/clk/mediatek/clk-mt8188-topckgen.c9
-rw-r--r--drivers/clk/mediatek/clk-mt8188-vdo1.c11
-rw-r--r--drivers/clk/mediatek/clk-mt8195-infra_ao.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8196-apmixedsys.c204
-rw-r--r--drivers/clk/mediatek/clk-mt8196-disp0.c170
-rw-r--r--drivers/clk/mediatek/clk-mt8196-disp1.c170
-rw-r--r--drivers/clk/mediatek/clk-mt8196-imp_iic_wrap.c118
-rw-r--r--drivers/clk/mediatek/clk-mt8196-mcu.c167
-rw-r--r--drivers/clk/mediatek/clk-mt8196-mdpsys.c186
-rw-r--r--drivers/clk/mediatek/clk-mt8196-mfg.c150
-rw-r--r--drivers/clk/mediatek/clk-mt8196-ovl0.c154
-rw-r--r--drivers/clk/mediatek/clk-mt8196-ovl1.c154
-rw-r--r--drivers/clk/mediatek/clk-mt8196-peri_ao.c142
-rw-r--r--drivers/clk/mediatek/clk-mt8196-pextp.c131
-rw-r--r--drivers/clk/mediatek/clk-mt8196-topckgen.c985
-rw-r--r--drivers/clk/mediatek/clk-mt8196-topckgen2.c568
-rw-r--r--drivers/clk/mediatek/clk-mt8196-ufs_ao.c108
-rw-r--r--drivers/clk/mediatek/clk-mt8196-vdec.c253
-rw-r--r--drivers/clk/mediatek/clk-mt8196-vdisp_ao.c80
-rw-r--r--drivers/clk/mediatek/clk-mt8196-venc.c236
-rw-r--r--drivers/clk/mediatek/clk-mt8196-vlpckgen.c725
-rw-r--r--drivers/clk/mediatek/clk-mtk.c16
-rw-r--r--drivers/clk/mediatek/clk-mtk.h22
-rw-r--r--drivers/clk/mediatek/clk-mux.c122
-rw-r--r--drivers/clk/mediatek/clk-mux.h87
-rw-r--r--drivers/clk/mediatek/clk-pll.c58
-rw-r--r--drivers/clk/mediatek/clk-pll.h11
-rw-r--r--drivers/clk/mediatek/clk-pllfh.c2
-rw-r--r--drivers/clk/meson/Kconfig32
-rw-r--r--drivers/clk/meson/Makefile1
-rw-r--r--drivers/clk/meson/a1-peripherals.c1189
-rw-r--r--drivers/clk/meson/a1-peripherals.h46
-rw-r--r--drivers/clk/meson/a1-pll.c154
-rw-r--r--drivers/clk/meson/a1-pll.h28
-rw-r--r--drivers/clk/meson/axg-aoclk.c177
-rw-r--r--drivers/clk/meson/axg-audio.c605
-rw-r--r--drivers/clk/meson/axg-audio.h70
-rw-r--r--drivers/clk/meson/axg.c463
-rw-r--r--drivers/clk/meson/axg.h105
-rw-r--r--drivers/clk/meson/c3-peripherals.c2247
-rw-r--r--drivers/clk/meson/c3-pll.c278
-rw-r--r--drivers/clk/meson/clk-cpu-dyndiv.c5
-rw-r--r--drivers/clk/meson/clk-dualdiv.c8
-rw-r--r--drivers/clk/meson/clk-mpll.c23
-rw-r--r--drivers/clk/meson/clk-mpll.h1
-rw-r--r--drivers/clk/meson/clk-phase.c19
-rw-r--r--drivers/clk/meson/clk-pll.c23
-rw-r--r--drivers/clk/meson/clk-pll.h1
-rw-r--r--drivers/clk/meson/clk-regmap.c63
-rw-r--r--drivers/clk/meson/clk-regmap.h24
-rw-r--r--drivers/clk/meson/g12a-aoclk.c274
-rw-r--r--drivers/clk/meson/g12a.c2415
-rw-r--r--drivers/clk/meson/g12a.h130
-rw-r--r--drivers/clk/meson/gxbb-aoclk.c144
-rw-r--r--drivers/clk/meson/gxbb.c1125
-rw-r--r--drivers/clk/meson/gxbb.h115
-rw-r--r--drivers/clk/meson/meson-aoclk.c41
-rw-r--r--drivers/clk/meson/meson-aoclk.h4
-rw-r--r--drivers/clk/meson/meson-clkc-utils.c90
-rw-r--r--drivers/clk/meson/meson-clkc-utils.h89
-rw-r--r--drivers/clk/meson/meson-eeclk.c64
-rw-r--r--drivers/clk/meson/meson-eeclk.h26
-rw-r--r--drivers/clk/meson/meson8-ddr.c71
-rw-r--r--drivers/clk/meson/meson8b.c1021
-rw-r--r--drivers/clk/meson/meson8b.h80
-rw-r--r--drivers/clk/meson/s4-peripherals.c1554
-rw-r--r--drivers/clk/meson/s4-peripherals.h56
-rw-r--r--drivers/clk/meson/s4-pll.c155
-rw-r--r--drivers/clk/meson/s4-pll.h38
-rw-r--r--drivers/clk/meson/sclk-div.c9
-rw-r--r--drivers/clk/meson/vclk.c8
-rw-r--r--drivers/clk/meson/vid-pll-div.c5
-rw-r--r--drivers/clk/microchip/clk-core.c57
-rw-r--r--drivers/clk/microchip/clk-mpfs.c2
-rw-r--r--drivers/clk/mmp/Kconfig10
-rw-r--r--drivers/clk/mmp/Makefile5
-rw-r--r--drivers/clk/mmp/clk-audio.c18
-rw-r--r--drivers/clk/mmp/clk-frac.c82
-rw-r--r--drivers/clk/mmp/clk-gate.c2
-rw-r--r--drivers/clk/mmp/clk-of-mmp2.c26
-rw-r--r--drivers/clk/mmp/clk-of-pxa168.c4
-rw-r--r--drivers/clk/mmp/clk-of-pxa1928.c6
-rw-r--r--drivers/clk/mmp/clk-of-pxa910.c4
-rw-r--r--drivers/clk/mmp/clk-pxa1908-apbc.c130
-rw-r--r--drivers/clk/mmp/clk-pxa1908-apbcp.c82
-rw-r--r--drivers/clk/mmp/clk-pxa1908-apmu.c128
-rw-r--r--drivers/clk/mmp/clk-pxa1908-mpmu.c112
-rw-r--r--drivers/clk/mmp/clk.h10
-rw-r--r--drivers/clk/mmp/pwr-island.c2
-rw-r--r--drivers/clk/mstar/clk-msc313-cpupll.c18
-rw-r--r--drivers/clk/mvebu/ap-cpu-clk.c12
-rw-r--r--drivers/clk/mvebu/armada-37xx-periph.c15
-rw-r--r--drivers/clk/mvebu/armada-xp.c5
-rw-r--r--drivers/clk/mvebu/clk-corediv.c18
-rw-r--r--drivers/clk/mvebu/clk-cpu.c12
-rw-r--r--drivers/clk/mvebu/dove-divider.c16
-rw-r--r--drivers/clk/mxs/clk-div.c10
-rw-r--r--drivers/clk/mxs/clk-frac.c16
-rw-r--r--drivers/clk/mxs/clk-ref.c16
-rw-r--r--drivers/clk/nuvoton/Kconfig4
-rw-r--r--drivers/clk/nuvoton/clk-ma35d1-divider.c12
-rw-r--r--drivers/clk/nuvoton/clk-ma35d1-pll.c28
-rw-r--r--drivers/clk/nxp/clk-lpc18xx-ccu.c2
-rw-r--r--drivers/clk/nxp/clk-lpc18xx-cgu.c20
-rw-r--r--drivers/clk/nxp/clk-lpc32xx.c60
-rw-r--r--drivers/clk/pistachio/clk-pll.c20
-rw-r--r--drivers/clk/qcom/Kconfig304
-rw-r--r--drivers/clk/qcom/Makefile30
-rw-r--r--drivers/clk/qcom/a53-pll.c1
-rw-r--r--drivers/clk/qcom/a7-pll.c3
-rw-r--r--drivers/clk/qcom/apcs-sdx55.c6
-rw-r--r--drivers/clk/qcom/apss-ipq-pll.c4
-rw-r--r--drivers/clk/qcom/apss-ipq5424.c265
-rw-r--r--drivers/clk/qcom/camcc-milos.c2161
-rw-r--r--drivers/clk/qcom/camcc-qcs615.c1597
-rw-r--r--drivers/clk/qcom/camcc-sa8775p.c1960
-rw-r--r--drivers/clk/qcom/camcc-sc7180.c2
-rw-r--r--drivers/clk/qcom/camcc-sc7280.c2
-rw-r--r--drivers/clk/qcom/camcc-sc8180x.c2889
-rw-r--r--drivers/clk/qcom/camcc-sc8280xp.c2
-rw-r--r--drivers/clk/qcom/camcc-sdm845.c1
-rw-r--r--drivers/clk/qcom/camcc-sm4450.c3
-rw-r--r--drivers/clk/qcom/camcc-sm6350.c19
-rw-r--r--drivers/clk/qcom/camcc-sm7150.c1
-rw-r--r--drivers/clk/qcom/camcc-sm8150.c4
-rw-r--r--drivers/clk/qcom/camcc-sm8250.c58
-rw-r--r--drivers/clk/qcom/camcc-sm8450.c347
-rw-r--r--drivers/clk/qcom/camcc-sm8550.c87
-rw-r--r--drivers/clk/qcom/camcc-sm8650.c85
-rw-r--r--drivers/clk/qcom/camcc-x1e80100.c74
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.c665
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.h24
-rw-r--r--drivers/clk/qcom/clk-branch.c4
-rw-r--r--drivers/clk/qcom/clk-cbf-8996.c1
-rw-r--r--drivers/clk/qcom/clk-cpu-8996.c1
-rw-r--r--drivers/clk/qcom/clk-rcg.c3
-rw-r--r--drivers/clk/qcom/clk-rcg.h2
-rw-r--r--drivers/clk/qcom/clk-rcg2.c254
-rw-r--r--drivers/clk/qcom/clk-regmap-divider.c27
-rw-r--r--drivers/clk/qcom/clk-rpm.c37
-rw-r--r--drivers/clk/qcom/clk-rpmh.c136
-rw-r--r--drivers/clk/qcom/clk-smd-rpm.c129
-rw-r--r--drivers/clk/qcom/clk-spmi-pmic-div.c25
-rw-r--r--drivers/clk/qcom/common.c101
-rw-r--r--drivers/clk/qcom/common.h14
-rw-r--r--drivers/clk/qcom/dispcc-glymur.c1982
-rw-r--r--drivers/clk/qcom/dispcc-milos.c974
-rw-r--r--drivers/clk/qcom/dispcc-qcm2290.c5
-rw-r--r--drivers/clk/qcom/dispcc-qcs615.c792
-rw-r--r--drivers/clk/qcom/dispcc-sc7180.c1
-rw-r--r--drivers/clk/qcom/dispcc-sc7280.c9
-rw-r--r--drivers/clk/qcom/dispcc-sc8280xp.c7
-rw-r--r--drivers/clk/qcom/dispcc-sdm845.c2
-rw-r--r--drivers/clk/qcom/dispcc-sm4450.c3
-rw-r--r--drivers/clk/qcom/dispcc-sm6115.c5
-rw-r--r--drivers/clk/qcom/dispcc-sm6125.c1
-rw-r--r--drivers/clk/qcom/dispcc-sm6350.c11
-rw-r--r--drivers/clk/qcom/dispcc-sm6375.c1
-rw-r--r--drivers/clk/qcom/dispcc-sm7150.c1
-rw-r--r--drivers/clk/qcom/dispcc-sm8250.c2
-rw-r--r--drivers/clk/qcom/dispcc-sm8450.c71
-rw-r--r--drivers/clk/qcom/dispcc-sm8550.c23
-rw-r--r--drivers/clk/qcom/dispcc-sm8750.c1961
-rw-r--r--drivers/clk/qcom/dispcc0-sa8775p.c1480
-rw-r--r--drivers/clk/qcom/dispcc1-sa8775p.c1480
-rw-r--r--drivers/clk/qcom/gcc-glymur.c8616
-rw-r--r--drivers/clk/qcom/gcc-ipq4019.c14
-rw-r--r--drivers/clk/qcom/gcc-ipq5018.c4
-rw-r--r--drivers/clk/qcom/gcc-ipq5332.c382
-rw-r--r--drivers/clk/qcom/gcc-ipq5424.c3316
-rw-r--r--drivers/clk/qcom/gcc-ipq6018.c64
-rw-r--r--drivers/clk/qcom/gcc-ipq8074.c6
-rw-r--r--drivers/clk/qcom/gcc-ipq9574.c343
-rw-r--r--drivers/clk/qcom/gcc-mdm9607.c2
-rw-r--r--drivers/clk/qcom/gcc-milos.c3225
-rw-r--r--drivers/clk/qcom/gcc-msm8917.c617
-rw-r--r--drivers/clk/qcom/gcc-msm8939.c4
-rw-r--r--drivers/clk/qcom/gcc-msm8953.c2
-rw-r--r--drivers/clk/qcom/gcc-msm8960.c1
-rw-r--r--drivers/clk/qcom/gcc-msm8974.c1
-rw-r--r--drivers/clk/qcom/gcc-qcm2290.c1
-rw-r--r--drivers/clk/qcom/gcc-qcs404.c3
-rw-r--r--drivers/clk/qcom/gcc-qcs615.c3034
-rw-r--r--drivers/clk/qcom/gcc-qcs8300.c3640
-rw-r--r--drivers/clk/qcom/gcc-sar2130p.c2366
-rw-r--r--drivers/clk/qcom/gcc-sc8280xp.c4
-rw-r--r--drivers/clk/qcom/gcc-sdm660.c74
-rw-r--r--drivers/clk/qcom/gcc-sdm845.c43
-rw-r--r--drivers/clk/qcom/gcc-sm6350.c28
-rw-r--r--drivers/clk/qcom/gcc-sm8150.c6
-rw-r--r--drivers/clk/qcom/gcc-sm8450.c181
-rw-r--r--drivers/clk/qcom/gcc-sm8550.c8
-rw-r--r--drivers/clk/qcom/gcc-sm8650.c14
-rw-r--r--drivers/clk/qcom/gcc-sm8750.c3275
-rw-r--r--drivers/clk/qcom/gcc-x1e80100.c50
-rw-r--r--drivers/clk/qcom/gdsc.c98
-rw-r--r--drivers/clk/qcom/gdsc.h1
-rw-r--r--drivers/clk/qcom/gpucc-milos.c562
-rw-r--r--drivers/clk/qcom/gpucc-msm8998.c3
-rw-r--r--drivers/clk/qcom/gpucc-qcs615.c531
-rw-r--r--drivers/clk/qcom/gpucc-sa8775p.c55
-rw-r--r--drivers/clk/qcom/gpucc-sar2130p.c503
-rw-r--r--drivers/clk/qcom/gpucc-sc7180.c3
-rw-r--r--drivers/clk/qcom/gpucc-sc7280.c1
-rw-r--r--drivers/clk/qcom/gpucc-sc8280xp.c3
-rw-r--r--drivers/clk/qcom/gpucc-sdm660.c5
-rw-r--r--drivers/clk/qcom/gpucc-sdm845.c1
-rw-r--r--drivers/clk/qcom/gpucc-sm4450.c1
-rw-r--r--drivers/clk/qcom/gpucc-sm6350.c11
-rw-r--r--drivers/clk/qcom/gpucc-sm8150.c3
-rw-r--r--drivers/clk/qcom/gpucc-sm8250.c3
-rw-r--r--drivers/clk/qcom/gpucc-sm8350.c2
-rw-r--r--drivers/clk/qcom/gpucc-sm8450.c50
-rw-r--r--drivers/clk/qcom/gpucc-x1p42100.c587
-rw-r--r--drivers/clk/qcom/hfpll.c1
-rw-r--r--drivers/clk/qcom/ipq-cmn-pll.c468
-rw-r--r--drivers/clk/qcom/kpss-xcc.c1
-rw-r--r--drivers/clk/qcom/krait-cc.c1
-rw-r--r--drivers/clk/qcom/lpassaudiocc-sc7280.c29
-rw-r--r--drivers/clk/qcom/lpasscc-sc8280xp.c4
-rw-r--r--drivers/clk/qcom/lpasscc-sdm845.c1
-rw-r--r--drivers/clk/qcom/lpasscc-sm6115.c85
-rw-r--r--drivers/clk/qcom/lpasscorecc-sc7180.c5
-rw-r--r--drivers/clk/qcom/lpasscorecc-sc7280.c1
-rw-r--r--drivers/clk/qcom/mmcc-apq8084.c2
-rw-r--r--drivers/clk/qcom/mmcc-msm8960.c65
-rw-r--r--drivers/clk/qcom/mmcc-msm8974.c2
-rw-r--r--drivers/clk/qcom/mmcc-msm8994.c3
-rw-r--r--drivers/clk/qcom/mmcc-msm8996.c4
-rw-r--r--drivers/clk/qcom/mmcc-msm8998.c3
-rw-r--r--drivers/clk/qcom/mmcc-sdm660.c10
-rw-r--r--drivers/clk/qcom/nsscc-ipq9574.c3110
-rw-r--r--drivers/clk/qcom/tcsrcc-glymur.c313
-rw-r--r--drivers/clk/qcom/tcsrcc-sm8550.c18
-rw-r--r--drivers/clk/qcom/tcsrcc-sm8650.c8
-rw-r--r--drivers/clk/qcom/tcsrcc-sm8750.c141
-rw-r--r--drivers/clk/qcom/tcsrcc-x1e80100.c4
-rw-r--r--drivers/clk/qcom/videocc-milos.c403
-rw-r--r--drivers/clk/qcom/videocc-qcs615.c338
-rw-r--r--drivers/clk/qcom/videocc-sa8775p.c584
-rw-r--r--drivers/clk/qcom/videocc-sc7180.c2
-rw-r--r--drivers/clk/qcom/videocc-sdm845.c4
-rw-r--r--drivers/clk/qcom/videocc-sm6350.c355
-rw-r--r--drivers/clk/qcom/videocc-sm7150.c4
-rw-r--r--drivers/clk/qcom/videocc-sm8150.c4
-rw-r--r--drivers/clk/qcom/videocc-sm8350.c6
-rw-r--r--drivers/clk/qcom/videocc-sm8450.c96
-rw-r--r--drivers/clk/qcom/videocc-sm8550.c93
-rw-r--r--drivers/clk/ralink/clk-mtmips.c55
-rw-r--r--drivers/clk/renesas/Kconfig27
-rw-r--r--drivers/clk/renesas/Makefile5
-rw-r--r--drivers/clk/renesas/clk-mstp.c20
-rw-r--r--drivers/clk/renesas/clk-r8a73a4.c1
-rw-r--r--drivers/clk/renesas/clk-r8a7778.c1
-rw-r--r--drivers/clk/renesas/clk-vbattb.c205
-rw-r--r--drivers/clk/renesas/r7s9210-cpg-mssr.c17
-rw-r--r--drivers/clk/renesas/r8a77970-cpg-mssr.c8
-rw-r--r--drivers/clk/renesas/r8a779a0-cpg-mssr.c9
-rw-r--r--drivers/clk/renesas/r8a779g0-cpg-mssr.c6
-rw-r--r--drivers/clk/renesas/r8a779h0-cpg-mssr.c17
-rw-r--r--drivers/clk/renesas/r9a06g032-clocks.c29
-rw-r--r--drivers/clk/renesas/r9a07g043-cpg.c147
-rw-r--r--drivers/clk/renesas/r9a07g044-cpg.c221
-rw-r--r--drivers/clk/renesas/r9a08g045-cpg.c244
-rw-r--r--drivers/clk/renesas/r9a09g011-cpg.c117
-rw-r--r--drivers/clk/renesas/r9a09g047-cpg.c390
-rw-r--r--drivers/clk/renesas/r9a09g056-cpg.c360
-rw-r--r--drivers/clk/renesas/r9a09g057-cpg.c383
-rw-r--r--drivers/clk/renesas/r9a09g077-cpg.c295
-rw-r--r--drivers/clk/renesas/rcar-cpg-lib.c1
-rw-r--r--drivers/clk/renesas/rcar-gen2-cpg.c5
-rw-r--r--drivers/clk/renesas/rcar-gen2-cpg.h3
-rw-r--r--drivers/clk/renesas/rcar-gen3-cpg.c7
-rw-r--r--drivers/clk/renesas/rcar-gen3-cpg.h3
-rw-r--r--drivers/clk/renesas/rcar-gen4-cpg.c6
-rw-r--r--drivers/clk/renesas/rcar-gen4-cpg.h3
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.c212
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.h32
-rw-r--r--drivers/clk/renesas/rzg2l-cpg.c680
-rw-r--r--drivers/clk/renesas/rzg2l-cpg.h83
-rw-r--r--drivers/clk/renesas/rzv2h-cpg.c512
-rw-r--r--drivers/clk/renesas/rzv2h-cpg.h181
-rw-r--r--drivers/clk/rockchip/Kconfig14
-rw-r--r--drivers/clk/rockchip/Makefile4
-rw-r--r--drivers/clk/rockchip/clk-cpu.c6
-rw-r--r--drivers/clk/rockchip/clk-ddr.c13
-rw-r--r--drivers/clk/rockchip/clk-gate-grf.c105
-rw-r--r--drivers/clk/rockchip/clk-half-divider.c12
-rw-r--r--drivers/clk/rockchip/clk-mmc-phase.c28
-rw-r--r--drivers/clk/rockchip/clk-pll.c46
-rw-r--r--drivers/clk/rockchip/clk-rk3036.c11
-rw-r--r--drivers/clk/rockchip/clk-rk3188.c4
-rw-r--r--drivers/clk/rockchip/clk-rk3288.c2
-rw-r--r--drivers/clk/rockchip/clk-rk3328.c8
-rw-r--r--drivers/clk/rockchip/clk-rk3368.c2
-rw-r--r--drivers/clk/rockchip/clk-rk3528.c1187
-rw-r--r--drivers/clk/rockchip/clk-rk3562.c1101
-rw-r--r--drivers/clk/rockchip/clk-rk3568.c5
-rw-r--r--drivers/clk/rockchip/clk-rk3576.c62
-rw-r--r--drivers/clk/rockchip/clk-rk3588.c121
-rw-r--r--drivers/clk/rockchip/clk-rv1126.c2
-rw-r--r--drivers/clk/rockchip/clk.c140
-rw-r--r--drivers/clk/rockchip/clk.h180
-rw-r--r--drivers/clk/rockchip/gate-link.c85
-rw-r--r--drivers/clk/rockchip/rst-rk3528.c306
-rw-r--r--drivers/clk/rockchip/rst-rk3562.c429
-rw-r--r--drivers/clk/samsung/Makefile5
-rw-r--r--drivers/clk/samsung/clk-artpec8.c1044
-rw-r--r--drivers/clk/samsung/clk-cpu.c16
-rw-r--r--drivers/clk/samsung/clk-exynos-audss.c3
-rw-r--r--drivers/clk/samsung/clk-exynos-clkout.c1
-rw-r--r--drivers/clk/samsung/clk-exynos2200.c3928
-rw-r--r--drivers/clk/samsung/clk-exynos3250.c6
-rw-r--r--drivers/clk/samsung/clk-exynos4.c78
-rw-r--r--drivers/clk/samsung/clk-exynos4412-isp.c4
-rw-r--r--drivers/clk/samsung/clk-exynos5-subcmu.c1
-rw-r--r--drivers/clk/samsung/clk-exynos5250.c3
-rw-r--r--drivers/clk/samsung/clk-exynos5260.c7
-rw-r--r--drivers/clk/samsung/clk-exynos5410.c4
-rw-r--r--drivers/clk/samsung/clk-exynos5420.c7
-rw-r--r--drivers/clk/samsung/clk-exynos5433.c4
-rw-r--r--drivers/clk/samsung/clk-exynos7.c3
-rw-r--r--drivers/clk/samsung/clk-exynos7870.c1829
-rw-r--r--drivers/clk/samsung/clk-exynos7885.c2
-rw-r--r--drivers/clk/samsung/clk-exynos850.c4
-rw-r--r--drivers/clk/samsung/clk-exynos8895.c2803
-rw-r--r--drivers/clk/samsung/clk-exynos990.c2717
-rw-r--r--drivers/clk/samsung/clk-exynosautov9.c2
-rw-r--r--drivers/clk/samsung/clk-exynosautov920.c699
-rw-r--r--drivers/clk/samsung/clk-fsd.c51
-rw-r--r--drivers/clk/samsung/clk-gs101.c24
-rw-r--r--drivers/clk/samsung/clk-pll.c185
-rw-r--r--drivers/clk/samsung/clk-pll.h8
-rw-r--r--drivers/clk/samsung/clk-s3c64xx.c3
-rw-r--r--drivers/clk/samsung/clk-s5pv210-audss.c3
-rw-r--r--drivers/clk/samsung/clk-s5pv210.c1
-rw-r--r--drivers/clk/samsung/clk.c6
-rw-r--r--drivers/clk/samsung/clk.h1
-rw-r--r--drivers/clk/sifive/fu540-prci.h2
-rw-r--r--drivers/clk/sifive/fu740-prci.h2
-rw-r--r--drivers/clk/sifive/sifive-prci.c11
-rw-r--r--drivers/clk/sifive/sifive-prci.h4
-rw-r--r--drivers/clk/socfpga/clk-pll-a10.c2
-rw-r--r--drivers/clk/socfpga/clk-pll-s10.c6
-rw-r--r--drivers/clk/socfpga/clk-pll.c4
-rw-r--r--drivers/clk/sophgo/Kconfig19
-rw-r--r--drivers/clk/sophgo/Makefile2
-rw-r--r--drivers/clk/sophgo/clk-cv1800.c2
-rw-r--r--drivers/clk/sophgo/clk-cv18xx-ip.c10
-rw-r--r--drivers/clk/sophgo/clk-sg2042-clkgen.c19
-rw-r--r--drivers/clk/sophgo/clk-sg2042-pll.c32
-rw-r--r--drivers/clk/sophgo/clk-sg2044-pll.c628
-rw-r--r--drivers/clk/sophgo/clk-sg2044.c1812
-rw-r--r--drivers/clk/spacemit/Kconfig19
-rw-r--r--drivers/clk/spacemit/Makefile5
-rw-r--r--drivers/clk/spacemit/ccu-k1.c1209
-rw-r--r--drivers/clk/spacemit/ccu_common.h48
-rw-r--r--drivers/clk/spacemit/ccu_ddn.c86
-rw-r--r--drivers/clk/spacemit/ccu_ddn.h50
-rw-r--r--drivers/clk/spacemit/ccu_mix.c270
-rw-r--r--drivers/clk/spacemit/ccu_mix.h223
-rw-r--r--drivers/clk/spacemit/ccu_pll.c159
-rw-r--r--drivers/clk/spacemit/ccu_pll.h86
-rw-r--r--drivers/clk/spear/clk-aux-synth.c12
-rw-r--r--drivers/clk/spear/clk-frac-synth.c12
-rw-r--r--drivers/clk/spear/clk-gpt-synth.c12
-rw-r--r--drivers/clk/spear/clk-vco-pll.c23
-rw-r--r--drivers/clk/spear/spear1340_clock.c2
-rw-r--r--drivers/clk/sprd/div.c13
-rw-r--r--drivers/clk/sprd/gate.h2
-rw-r--r--drivers/clk/sprd/pll.c8
-rw-r--r--drivers/clk/sprd/ums512-clk.c4
-rw-r--r--drivers/clk/st/clk-flexgen.c80
-rw-r--r--drivers/clk/st/clkgen-fsyn.c33
-rw-r--r--drivers/clk/st/clkgen-pll.c38
-rw-r--r--drivers/clk/starfive/clk-starfive-jh7100-audio.c14
-rw-r--r--drivers/clk/starfive/clk-starfive-jh7110-aon.c14
-rw-r--r--drivers/clk/starfive/clk-starfive-jh7110-isp.c14
-rw-r--r--drivers/clk/starfive/clk-starfive-jh7110-pll.c2
-rw-r--r--drivers/clk/starfive/clk-starfive-jh7110-stg.c14
-rw-r--r--drivers/clk/starfive/clk-starfive-jh7110-sys.c16
-rw-r--r--drivers/clk/starfive/clk-starfive-jh7110-vout.c14
-rw-r--r--drivers/clk/starfive/clk-starfive-jh71x0.c12
-rw-r--r--drivers/clk/starfive/clk-starfive-jh71x0.h4
-rw-r--r--drivers/clk/stm32/Kconfig15
-rw-r--r--drivers/clk/stm32/Makefile1
-rw-r--r--drivers/clk/stm32/clk-stm32-core.c28
-rw-r--r--drivers/clk/stm32/clk-stm32mp1.c15
-rw-r--r--drivers/clk/stm32/clk-stm32mp21.c1586
-rw-r--r--drivers/clk/stm32/stm32mp21_rcc.h651
-rw-r--r--drivers/clk/sunxi-ng/Kconfig59
-rw-r--r--drivers/clk/sunxi-ng/Makefile6
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun20i-d1-r.c4
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun20i-d1.c50
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun4i-a10.c4
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a100-r.c4
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a100.c10
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a64.c17
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a64.h2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c8
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h6.c4
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h616.c137
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h616.h2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun55i-a523-mcu.c469
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun55i-a523-r.c249
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun55i-a523-r.h14
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun55i-a523.c1701
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun5i.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun6i-a31.c4
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun6i-rtc.c15
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a23.c4
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a33.c4
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a83t.c4
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-de2.c35
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-h3.c6
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-r.c8
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-r40.c6
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-v3s.c42
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun9i-a80-de.c4
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c4
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun9i-a80.c4
-rw-r--r--drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c4
-rw-r--r--drivers/clk/sunxi-ng/ccu_common.c10
-rw-r--r--drivers/clk/sunxi-ng/ccu_common.h7
-rw-r--r--drivers/clk/sunxi-ng/ccu_div.c4
-rw-r--r--drivers/clk/sunxi-ng/ccu_div.h18
-rw-r--r--drivers/clk/sunxi-ng/ccu_frac.c12
-rw-r--r--drivers/clk/sunxi-ng/ccu_gate.c26
-rw-r--r--drivers/clk/sunxi-ng/ccu_mp.c55
-rw-r--r--drivers/clk/sunxi-ng/ccu_mp.h79
-rw-r--r--drivers/clk/sunxi-ng/ccu_mult.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu_mux.c14
-rw-r--r--drivers/clk/sunxi-ng/ccu_nk.c16
-rw-r--r--drivers/clk/sunxi-ng/ccu_nkm.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu_nkmp.c25
-rw-r--r--drivers/clk/sunxi-ng/ccu_nm.c45
-rw-r--r--drivers/clk/sunxi-ng/ccu_phase.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu_reset.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu_reset.h2
-rw-r--r--drivers/clk/sunxi-ng/ccu_sdm.c12
-rw-r--r--drivers/clk/sunxi/Kconfig10
-rw-r--r--drivers/clk/tegra/Kconfig2
-rw-r--r--drivers/clk/tegra/clk-audio-sync.c10
-rw-r--r--drivers/clk/tegra/clk-bpmp.c4
-rw-r--r--drivers/clk/tegra/clk-dfll.c2
-rw-r--r--drivers/clk/tegra/clk-divider.c28
-rw-r--r--drivers/clk/tegra/clk-periph.c10
-rw-r--r--drivers/clk/tegra/clk-pll.c52
-rw-r--r--drivers/clk/tegra/clk-super.c9
-rw-r--r--drivers/clk/tegra/clk-tegra114.c30
-rw-r--r--drivers/clk/tegra/clk-tegra124-dfll-fcpu.c158
-rw-r--r--drivers/clk/tegra/clk-tegra210-emc.c24
-rw-r--r--drivers/clk/tegra/clk-tegra210.c2
-rw-r--r--drivers/clk/tegra/clk-tegra30.c1
-rw-r--r--drivers/clk/tegra/clk.h3
-rw-r--r--drivers/clk/thead/clk-th1520-ap.c681
-rw-r--r--drivers/clk/ti/autoidle.c2
-rw-r--r--drivers/clk/ti/clk-33xx.c2
-rw-r--r--drivers/clk/ti/clk-43xx.c2
-rw-r--r--drivers/clk/ti/clk-dra7-atl.c12
-rw-r--r--drivers/clk/ti/clk.c32
-rw-r--r--drivers/clk/ti/clkt_dpll.c36
-rw-r--r--drivers/clk/ti/clock.h6
-rw-r--r--drivers/clk/ti/composite.c6
-rw-r--r--drivers/clk/ti/divider.c12
-rw-r--r--drivers/clk/ti/dpll.c10
-rw-r--r--drivers/clk/ti/dpll3xxx.c7
-rw-r--r--drivers/clk/ti/dpll44xx.c89
-rw-r--r--drivers/clk/ti/fapll.c48
-rw-r--r--drivers/clk/ti/mux.c4
-rw-r--r--drivers/clk/ux500/clk-prcmu.c14
-rw-r--r--drivers/clk/versatile/clk-icst.c74
-rw-r--r--drivers/clk/versatile/clk-vexpress-osc.c16
-rw-r--r--drivers/clk/visconti/pll.c19
-rw-r--r--drivers/clk/x86/clk-cgu.c35
-rw-r--r--drivers/clk/xilinx/clk-xlnx-clock-wizard.c479
-rw-r--r--drivers/clk/xilinx/xlnx_vcu.c48
-rw-r--r--drivers/clk/zynq/pll.c12
-rw-r--r--drivers/clk/zynqmp/divider.c23
-rw-r--r--drivers/clk/zynqmp/pll.c24
-rw-r--r--drivers/clocksource/Kconfig45
-rw-r--r--drivers/clocksource/Makefile6
-rw-r--r--drivers/clocksource/arm_arch_timer.c694
-rw-r--r--drivers/clocksource/arm_arch_timer_mmio.c440
-rw-r--r--drivers/clocksource/arm_global_timer.c45
-rw-r--r--drivers/clocksource/clps711x-timer.c23
-rw-r--r--drivers/clocksource/dw_apb_timer.c39
-rw-r--r--drivers/clocksource/exynos_mct.c3
-rw-r--r--drivers/clocksource/hyperv_timer.c31
-rw-r--r--drivers/clocksource/i8253.c4
-rw-r--r--drivers/clocksource/ingenic-sysost.c27
-rw-r--r--drivers/clocksource/jcore-pit.c15
-rw-r--r--drivers/clocksource/mips-gic-timer.c45
-rw-r--r--drivers/clocksource/renesas-ostm.c4
-rw-r--r--drivers/clocksource/scx200_hrt.c1
-rw-r--r--drivers/clocksource/sh_cmt.c84
-rw-r--r--drivers/clocksource/timer-armada-370-xp.c1
-rw-r--r--drivers/clocksource/timer-cs5535.c1
-rw-r--r--drivers/clocksource/timer-econet-en751221.c216
-rw-r--r--drivers/clocksource/timer-gxp.c2
-rw-r--r--drivers/clocksource/timer-nxp-pit.c382
-rw-r--r--drivers/clocksource/timer-nxp-stm.c497
-rw-r--r--drivers/clocksource/timer-orion.c2
-rw-r--r--drivers/clocksource/timer-qcom.c1
-rw-r--r--drivers/clocksource/timer-ralink.c150
-rw-r--r--drivers/clocksource/timer-riscv.c6
-rw-r--r--drivers/clocksource/timer-rtl-otto.c42
-rw-r--r--drivers/clocksource/timer-stm32-lp.c98
-rw-r--r--drivers/clocksource/timer-sun5i.c4
-rw-r--r--drivers/clocksource/timer-tegra.c1
-rw-r--r--drivers/clocksource/timer-tegra186.c120
-rw-r--r--drivers/clocksource/timer-ti-dm-systimer.c8
-rw-r--r--drivers/clocksource/timer-ti-dm.c129
-rw-r--r--drivers/clocksource/timer-vf-pit.c194
-rw-r--r--drivers/comedi/Kconfig9
-rw-r--r--drivers/comedi/comedi_buf.c155
-rw-r--r--drivers/comedi/comedi_fops.c178
-rw-r--r--drivers/comedi/comedi_internal.h1
-rw-r--r--drivers/comedi/drivers.c43
-rw-r--r--drivers/comedi/drivers/Makefile1
-rw-r--r--drivers/comedi/drivers/adl_pci7250.c220
-rw-r--r--drivers/comedi/drivers/adl_pci9118.c4
-rw-r--r--drivers/comedi/drivers/aio_iiro_16.c3
-rw-r--r--drivers/comedi/drivers/comedi_test.c20
-rw-r--r--drivers/comedi/drivers/das16.c7
-rw-r--r--drivers/comedi/drivers/das16m1.c3
-rw-r--r--drivers/comedi/drivers/das6402.c3
-rw-r--r--drivers/comedi/drivers/jr3_pci.c5
-rw-r--r--drivers/comedi/drivers/ni_atmio.c2
-rw-r--r--drivers/comedi/drivers/ni_pcidio.c2
-rw-r--r--drivers/comedi/drivers/pcl726.c3
-rw-r--r--drivers/comedi/drivers/pcl812.c3
-rw-r--r--drivers/counter/104-quad-8.c2
-rw-r--r--drivers/counter/counter-chrdev.c2
-rw-r--r--drivers/counter/counter-core.c14
-rw-r--r--drivers/counter/ftm-quaddec.c3
-rw-r--r--drivers/counter/i8254.c4
-rw-r--r--drivers/counter/intel-qep.c12
-rw-r--r--drivers/counter/interrupt-cnt.c19
-rw-r--r--drivers/counter/microchip-tcb-capture.c206
-rw-r--r--drivers/counter/rz-mtu3-cnt.c2
-rw-r--r--drivers/counter/stm32-lptimer-cnt.c26
-rw-r--r--drivers/counter/stm32-timer-cnt.c26
-rw-r--r--drivers/counter/ti-ecap-capture.c23
-rw-r--r--drivers/counter/ti-eqep.c36
-rw-r--r--drivers/cpufreq/Kconfig32
-rw-r--r--drivers/cpufreq/Kconfig.arm34
-rw-r--r--drivers/cpufreq/Kconfig.powerpc25
-rw-r--r--drivers/cpufreq/Kconfig.x8612
-rw-r--r--drivers/cpufreq/Makefile8
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c72
-rw-r--r--drivers/cpufreq/airoha-cpufreq.c153
-rw-r--r--drivers/cpufreq/amd-pstate-trace.h75
-rw-r--r--drivers/cpufreq/amd-pstate-ut.c216
-rw-r--r--drivers/cpufreq/amd-pstate.c1259
-rw-r--r--drivers/cpufreq/amd-pstate.h71
-rw-r--r--drivers/cpufreq/amd_freq_sensitivity.c2
-rw-r--r--drivers/cpufreq/apple-soc-cpufreq.c84
-rw-r--r--drivers/cpufreq/armada-37xx-cpufreq.c10
-rw-r--r--drivers/cpufreq/armada-8k-cpufreq.c7
-rw-r--r--drivers/cpufreq/bmips-cpufreq.c1
-rw-r--r--drivers/cpufreq/brcmstb-avs-cpufreq.c13
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c350
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c10
-rw-r--r--drivers/cpufreq/cpufreq-dt.c39
-rw-r--r--drivers/cpufreq/cpufreq-dt.h2
-rw-r--r--drivers/cpufreq/cpufreq.c667
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c24
-rw-r--r--drivers/cpufreq/cpufreq_governor.c45
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c28
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.h23
-rw-r--r--drivers/cpufreq/cpufreq_userspace.c1
-rw-r--r--drivers/cpufreq/davinci-cpufreq.c3
-rw-r--r--drivers/cpufreq/e_powersaver.c7
-rw-r--r--drivers/cpufreq/elanfreq.c2
-rw-r--r--drivers/cpufreq/freq_table.c43
-rw-r--r--drivers/cpufreq/imx-cpufreq-dt.c2
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c5
-rw-r--r--drivers/cpufreq/intel_pstate.c619
-rw-r--r--drivers/cpufreq/kirkwood-cpufreq.c3
-rw-r--r--drivers/cpufreq/longhaul.c28
-rw-r--r--drivers/cpufreq/loongson2_cpufreq.c5
-rw-r--r--drivers/cpufreq/loongson3_cpufreq.c20
-rw-r--r--drivers/cpufreq/maple-cpufreq.c242
-rw-r--r--drivers/cpufreq/mediatek-cpufreq-hw.c143
-rw-r--r--drivers/cpufreq/mediatek-cpufreq.c28
-rw-r--r--drivers/cpufreq/mvebu-cpufreq.c2
-rw-r--r--drivers/cpufreq/omap-cpufreq.c3
-rw-r--r--drivers/cpufreq/p4-clockmod.c1
-rw-r--r--drivers/cpufreq/pasemi-cpufreq.c1
-rw-r--r--drivers/cpufreq/pcc-cpufreq.c2
-rw-r--r--drivers/cpufreq/pmac32-cpufreq.c1
-rw-r--r--drivers/cpufreq/pmac64-cpufreq.c1
-rw-r--r--drivers/cpufreq/powernow-k6.c1
-rw-r--r--drivers/cpufreq/powernow-k7.c15
-rw-r--r--drivers/cpufreq/powernow-k8.c3
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c25
-rw-r--r--drivers/cpufreq/powernv-trace.h44
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq.c173
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq.h33
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq_pervasive.c102
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq_pmi.c150
-rw-r--r--drivers/cpufreq/qcom-cpufreq-hw.c52
-rw-r--r--drivers/cpufreq/qcom-cpufreq-nvmem.c97
-rw-r--r--drivers/cpufreq/qoriq-cpufreq.c3
-rw-r--r--drivers/cpufreq/raspberrypi-cpufreq.c2
-rw-r--r--drivers/cpufreq/rcpufreq_dt.rs222
-rw-r--r--drivers/cpufreq/s3c64xx-cpufreq.c11
-rw-r--r--drivers/cpufreq/s5pv210-cpufreq.c4
-rw-r--r--drivers/cpufreq/sc520_freq.c2
-rw-r--r--drivers/cpufreq/scmi-cpufreq.c120
-rw-r--r--drivers/cpufreq/scpi-cpufreq.c25
-rw-r--r--drivers/cpufreq/sh-cpufreq.c7
-rw-r--r--drivers/cpufreq/sparc-us2e-cpufreq.c2
-rw-r--r--drivers/cpufreq/sparc-us3-cpufreq.c2
-rw-r--r--drivers/cpufreq/spear-cpufreq.c3
-rw-r--r--drivers/cpufreq/speedstep-centrino.c1
-rw-r--r--drivers/cpufreq/speedstep-ich.c1
-rw-r--r--drivers/cpufreq/speedstep-lib.c12
-rw-r--r--drivers/cpufreq/speedstep-lib.h10
-rw-r--r--drivers/cpufreq/speedstep-smi.c1
-rw-r--r--drivers/cpufreq/sun50i-cpufreq-nvmem.c54
-rw-r--r--drivers/cpufreq/tegra124-cpufreq.c49
-rw-r--r--drivers/cpufreq/tegra186-cpufreq.c49
-rw-r--r--drivers/cpufreq/tegra194-cpufreq.c3
-rw-r--r--drivers/cpufreq/ti-cpufreq.c22
-rw-r--r--drivers/cpufreq/vexpress-spc-cpufreq.c3
-rw-r--r--drivers/cpufreq/virtual-cpufreq.c332
-rw-r--r--drivers/cpuidle/Makefile3
-rw-r--r--drivers/cpuidle/cpuidle-arm.c8
-rw-r--r--drivers/cpuidle/cpuidle-big_little.c2
-rw-r--r--drivers/cpuidle/cpuidle-kirkwood.c2
-rw-r--r--drivers/cpuidle/cpuidle-psci-domain.c17
-rw-r--r--drivers/cpuidle/cpuidle-psci.c112
-rw-r--r--drivers/cpuidle/cpuidle-psci.h4
-rw-r--r--drivers/cpuidle/cpuidle-pseries.c1
-rw-r--r--drivers/cpuidle/cpuidle-qcom-spm.c15
-rw-r--r--drivers/cpuidle/cpuidle-riscv-sbi.c33
-rw-r--r--drivers/cpuidle/cpuidle.c20
-rw-r--r--drivers/cpuidle/driver.c4
-rw-r--r--drivers/cpuidle/dt_idle_states.c14
-rw-r--r--drivers/cpuidle/governors/menu.c319
-rw-r--r--drivers/cpuidle/governors/teo.c278
-rw-r--r--drivers/cpuidle/sysfs.c34
-rw-r--r--drivers/crypto/Kconfig76
-rw-r--r--drivers/crypto/Makefile10
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c2
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c154
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c54
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c296
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c1
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c1
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h31
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c2
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c2
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c49
-rw-r--r--drivers/crypto/amcc/crypto4xx_alg.c110
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c103
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.h17
-rw-r--r--drivers/crypto/amlogic/amlogic-gxl-core.c10
-rw-r--r--drivers/crypto/aspeed/aspeed-acry.c4
-rw-r--r--drivers/crypto/aspeed/aspeed-hace-crypto.c2
-rw-r--r--drivers/crypto/aspeed/aspeed-hace-hash.c800
-rw-r--r--drivers/crypto/aspeed/aspeed-hace.c2
-rw-r--r--drivers/crypto/aspeed/aspeed-hace.h28
-rw-r--r--drivers/crypto/atmel-aes.c8
-rw-r--r--drivers/crypto/atmel-ecc.c2
-rw-r--r--drivers/crypto/atmel-sha.c9
-rw-r--r--drivers/crypto/atmel-sha204a.c10
-rw-r--r--drivers/crypto/atmel-tdes.c8
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c12
-rw-r--r--drivers/crypto/bcm/cipher.c34
-rw-r--r--drivers/crypto/bcm/spu.c7
-rw-r--r--drivers/crypto/bcm/spu2.c3
-rw-r--r--drivers/crypto/caam/Makefile4
-rw-r--r--drivers/crypto/caam/blob_gen.c3
-rw-r--r--drivers/crypto/caam/caamalg_qi2.c3
-rw-r--r--drivers/crypto/caam/caampkc.c11
-rw-r--r--drivers/crypto/caam/ctrl.c22
-rw-r--r--drivers/crypto/caam/debugfs.c2
-rw-r--r--drivers/crypto/caam/debugfs.h2
-rw-r--r--drivers/crypto/caam/intern.h5
-rw-r--r--drivers/crypto/caam/jr.c5
-rw-r--r--drivers/crypto/caam/qi.c18
-rw-r--r--drivers/crypto/cavium/Makefile3
-rw-r--r--drivers/crypto/cavium/cpt/cptpf_main.c6
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_reqmanager.c4
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_lib.c2
-rw-r--r--drivers/crypto/cavium/zip/Makefile12
-rw-r--r--drivers/crypto/cavium/zip/common.h222
-rw-r--r--drivers/crypto/cavium/zip/zip_crypto.c301
-rw-r--r--drivers/crypto/cavium/zip/zip_crypto.h79
-rw-r--r--drivers/crypto/cavium/zip/zip_deflate.c200
-rw-r--r--drivers/crypto/cavium/zip/zip_deflate.h62
-rw-r--r--drivers/crypto/cavium/zip/zip_device.c202
-rw-r--r--drivers/crypto/cavium/zip/zip_device.h108
-rw-r--r--drivers/crypto/cavium/zip/zip_inflate.c223
-rw-r--r--drivers/crypto/cavium/zip/zip_inflate.h62
-rw-r--r--drivers/crypto/cavium/zip/zip_main.c651
-rw-r--r--drivers/crypto/cavium/zip/zip_main.h120
-rw-r--r--drivers/crypto/cavium/zip/zip_mem.c114
-rw-r--r--drivers/crypto/cavium/zip/zip_mem.h78
-rw-r--r--drivers/crypto/cavium/zip/zip_regs.h1347
-rw-r--r--drivers/crypto/ccp/Makefile3
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes.c15
-rw-r--r--drivers/crypto/ccp/ccp-crypto-des3.c13
-rw-r--r--drivers/crypto/ccp/ccp-crypto-main.c13
-rw-r--r--drivers/crypto/ccp/ccp-debugfs.c3
-rw-r--r--drivers/crypto/ccp/ccp-ops.c174
-rw-r--r--drivers/crypto/ccp/dbc.c53
-rw-r--r--drivers/crypto/ccp/hsti.c8
-rw-r--r--drivers/crypto/ccp/psp-dev.c20
-rw-r--r--drivers/crypto/ccp/psp-dev.h8
-rw-r--r--drivers/crypto/ccp/sev-dev.c597
-rw-r--r--drivers/crypto/ccp/sev-dev.h9
-rw-r--r--drivers/crypto/ccp/sfs.c311
-rw-r--r--drivers/crypto/ccp/sfs.h47
-rw-r--r--drivers/crypto/ccp/sp-dev.c14
-rw-r--r--drivers/crypto/ccp/sp-pci.c20
-rw-r--r--drivers/crypto/ccp/sp-platform.c2
-rw-r--r--drivers/crypto/ccree/cc_aead.c4
-rw-r--r--drivers/crypto/ccree/cc_buffer_mgr.c54
-rw-r--r--drivers/crypto/ccree/cc_cipher.c6
-rw-r--r--drivers/crypto/ccree/cc_driver.c2
-rw-r--r--drivers/crypto/ccree/cc_hash.c32
-rw-r--r--drivers/crypto/ccree/cc_pm.c1
-rw-r--r--drivers/crypto/chelsio/Kconfig6
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c261
-rw-r--r--drivers/crypto/chelsio/chcr_crypto.h1
-rw-r--r--drivers/crypto/exynos-rng.c2
-rw-r--r--drivers/crypto/gemini/sl3516-ce-core.c2
-rw-r--r--drivers/crypto/geode-aes.c2
-rw-r--r--drivers/crypto/hisilicon/Kconfig1
-rw-r--r--drivers/crypto/hisilicon/debugfs.c5
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre.h23
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_crypto.c414
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_main.c376
-rw-r--r--drivers/crypto/hisilicon/qm.c668
-rw-r--r--drivers/crypto/hisilicon/sec/sec_drv.c5
-rw-r--r--drivers/crypto/hisilicon/sec2/sec.h91
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c841
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.h11
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_main.c340
-rw-r--r--drivers/crypto/hisilicon/sgl.c15
-rw-r--r--drivers/crypto/hisilicon/trng/trng.c2
-rw-r--r--drivers/crypto/hisilicon/zip/Makefile2
-rw-r--r--drivers/crypto/hisilicon/zip/dae_main.c277
-rw-r--r--drivers/crypto/hisilicon/zip/zip.h26
-rw-r--r--drivers/crypto/hisilicon/zip/zip_crypto.c13
-rw-r--r--drivers/crypto/hisilicon/zip/zip_main.c419
-rw-r--r--drivers/crypto/img-hash.c53
-rw-r--r--drivers/crypto/inside-secure/Makefile1
-rw-r--r--drivers/crypto/inside-secure/eip93/Kconfig20
-rw-r--r--drivers/crypto/inside-secure/eip93/Makefile5
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-aead.c711
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-aead.h38
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-aes.h16
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-cipher.c413
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-cipher.h60
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-common.c822
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-common.h24
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-des.h16
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-hash.c875
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-hash.h82
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-main.c501
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-main.h151
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-regs.h335
-rw-r--r--drivers/crypto/inside-secure/safexcel.c5
-rw-r--r--drivers/crypto/inside-secure/safexcel.h1
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c104
-rw-r--r--drivers/crypto/intel/iaa/Makefile2
-rw-r--r--drivers/crypto/intel/iaa/iaa_crypto_main.c251
-rw-r--r--drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c5
-rw-r--r--drivers/crypto/intel/keembay/keembay-ocs-aes-core.c2
-rw-r--r--drivers/crypto/intel/keembay/keembay-ocs-ecc.c2
-rw-r--r--drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c15
-rw-r--r--drivers/crypto/intel/keembay/ocs-aes.c4
-rw-r--r--drivers/crypto/intel/qat/Kconfig19
-rw-r--r--drivers/crypto/intel/qat/Makefile2
-rw-r--r--drivers/crypto/intel/qat/qat_420xx/Makefile3
-rw-r--r--drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c47
-rw-r--r--drivers/crypto/intel/qat/qat_420xx/adf_drv.c25
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/Makefile3
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c41
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_drv.c29
-rw-r--r--drivers/crypto/intel/qat/qat_6xxx/Makefile3
-rw-r--r--drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.c950
-rw-r--r--drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.h168
-rw-r--r--drivers/crypto/intel/qat/qat_6xxx/adf_drv.c226
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxx/Makefile3
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c6
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c45
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxxvf/Makefile3
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c2
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c2
-rw-r--r--drivers/crypto/intel/qat/qat_c62x/Makefile3
-rw-r--r--drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c6
-rw-r--r--drivers/crypto/intel/qat/qat_c62x/adf_drv.c47
-rw-r--r--drivers/crypto/intel/qat/qat_c62xvf/Makefile3
-rw-r--r--drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c2
-rw-r--r--drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c2
-rw-r--r--drivers/crypto/intel/qat/qat_common/Makefile75
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_accel_devices.h87
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_admin.c1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_aer.c7
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_bank_state.c238
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_bank_state.h49
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg_common.h2
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg_services.c205
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg_services.h38
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h7
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_common_drv.h3
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c42
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_dbgfs.c13
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_dc.c64
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_dc.h17
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c10
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_fw_config.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen2_dc.c70
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen2_dc.h10
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c59
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_config.c21
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_config.h3
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_dc.c83
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_dc.h10
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c305
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h13
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c117
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c59
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_timer.c71
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_timer.h21
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.c7
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen6_pm.h52
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen6_pm_dbgfs.c124
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen6_ras.c818
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen6_ras.h504
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen6_shared.c56
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen6_shared.h17
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen6_tl.c258
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen6_tl.h198
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c4
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_init.c1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_isr.c5
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.c52
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.h36
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_rl.c86
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_rl.h11
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_rl_admin.c1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_sriov.c1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_sysfs.c24
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c21
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_telemetry.c19
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_telemetry.h5
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_timer.c71
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_timer.h21
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c91
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.h5
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_transport_debug.c21
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h27
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_hw_51_comp.h99
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_hw_51_comp_defs.h318
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h33
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_algs.c203
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_bl.c165
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_bl.h6
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_comp_algs.c92
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_comp_req.h10
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_compression.c9
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_compression.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_hal.c15
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_uclo.c457
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xcc/Makefile3
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c8
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c45
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xccvf/Makefile3
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c2
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c2
-rw-r--r--drivers/crypto/loongson/Kconfig5
-rw-r--r--drivers/crypto/loongson/Makefile1
-rw-r--r--drivers/crypto/loongson/loongson-rng.c209
-rw-r--r--drivers/crypto/marvell/Kconfig6
-rw-r--r--drivers/crypto/marvell/cesa/cesa.c56
-rw-r--r--drivers/crypto/marvell/cesa/cesa.h9
-rw-r--r--drivers/crypto/marvell/cesa/cipher.c31
-rw-r--r--drivers/crypto/marvell/cesa/hash.c12
-rw-r--r--drivers/crypto/marvell/cesa/tdma.c53
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c16
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptpf_ucode.h2
-rw-r--r--drivers/crypto/marvell/octeontx2/cn10k_cpt.c103
-rw-r--r--drivers/crypto/marvell/octeontx2/cn10k_cpt.h1
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_common.h40
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c45
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_reqmgr.h128
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptlf.c25
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptlf.h15
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c34
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c19
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c159
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h1
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c6
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c60
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c14
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c21
-rw-r--r--drivers/crypto/mxs-dcp.c30
-rw-r--r--drivers/crypto/n2_asm.S96
-rw-r--r--drivers/crypto/n2_core.c2168
-rw-r--r--drivers/crypto/n2_core.h232
-rw-r--r--drivers/crypto/nx/nx-842.c33
-rw-r--r--drivers/crypto/nx/nx-842.h15
-rw-r--r--drivers/crypto/nx/nx-aes-cbc.c8
-rw-r--r--drivers/crypto/nx/nx-aes-ccm.c16
-rw-r--r--drivers/crypto/nx/nx-aes-ctr.c8
-rw-r--r--drivers/crypto/nx/nx-aes-ecb.c8
-rw-r--r--drivers/crypto/nx/nx-aes-gcm.c17
-rw-r--r--drivers/crypto/nx/nx-aes-xcbc.c128
-rw-r--r--drivers/crypto/nx/nx-common-powernv.c33
-rw-r--r--drivers/crypto/nx/nx-common-pseries.c109
-rw-r--r--drivers/crypto/nx/nx-sha256.c130
-rw-r--r--drivers/crypto/nx/nx-sha512.c143
-rw-r--r--drivers/crypto/nx/nx.c53
-rw-r--r--drivers/crypto/nx/nx.h14
-rw-r--r--drivers/crypto/omap-aes-gcm.c1
-rw-r--r--drivers/crypto/omap-aes.c66
-rw-r--r--drivers/crypto/omap-aes.h8
-rw-r--r--drivers/crypto/omap-des.c60
-rw-r--r--drivers/crypto/omap-sham.c32
-rw-r--r--drivers/crypto/padlock-sha.c478
-rw-r--r--drivers/crypto/qce/aead.c2
-rw-r--r--drivers/crypto/qce/core.c129
-rw-r--r--drivers/crypto/qce/core.h9
-rw-r--r--drivers/crypto/qce/dma.c22
-rw-r--r--drivers/crypto/qce/dma.h3
-rw-r--r--drivers/crypto/qce/sha.c2
-rw-r--r--drivers/crypto/qce/skcipher.c2
-rw-r--r--drivers/crypto/qcom-rng.c2
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto.c2
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_ahash.c54
-rw-r--r--drivers/crypto/s5p-sss.c64
-rw-r--r--drivers/crypto/sa2ul.c67
-rw-r--r--drivers/crypto/sahara.c2
-rw-r--r--drivers/crypto/starfive/jh7110-aes.c12
-rw-r--r--drivers/crypto/starfive/jh7110-cryp.c7
-rw-r--r--drivers/crypto/starfive/jh7110-hash.c19
-rw-r--r--drivers/crypto/starfive/jh7110-rsa.c2
-rw-r--r--drivers/crypto/stm32/Kconfig9
-rw-r--r--drivers/crypto/stm32/Makefile1
-rw-r--r--drivers/crypto/stm32/stm32-crc32.c480
-rw-r--r--drivers/crypto/stm32/stm32-cryp.c39
-rw-r--r--drivers/crypto/stm32/stm32-hash.c3
-rw-r--r--drivers/crypto/talitos.c2
-rw-r--r--drivers/crypto/tegra/tegra-se-aes.c399
-rw-r--r--drivers/crypto/tegra/tegra-se-hash.c339
-rw-r--r--drivers/crypto/tegra/tegra-se-key.c29
-rw-r--r--drivers/crypto/tegra/tegra-se-main.c22
-rw-r--r--drivers/crypto/tegra/tegra-se.h39
-rw-r--r--drivers/crypto/ti/Kconfig14
-rw-r--r--drivers/crypto/ti/Makefile3
-rw-r--r--drivers/crypto/ti/dthev2-aes.c413
-rw-r--r--drivers/crypto/ti/dthev2-common.c217
-rw-r--r--drivers/crypto/ti/dthev2-common.h101
-rw-r--r--drivers/crypto/virtio/virtio_crypto_akcipher_algs.c104
-rw-r--r--drivers/crypto/virtio/virtio_crypto_common.h2
-rw-r--r--drivers/crypto/virtio/virtio_crypto_core.c8
-rw-r--r--drivers/crypto/virtio/virtio_crypto_mgr.c38
-rw-r--r--drivers/crypto/virtio/virtio_crypto_skcipher_algs.c17
-rw-r--r--drivers/crypto/xilinx/Makefile1
-rw-r--r--drivers/crypto/xilinx/xilinx-trng.c405
-rw-r--r--drivers/crypto/xilinx/zynqmp-aes-gcm.c2
-rw-r--r--drivers/crypto/xilinx/zynqmp-sha.c100
-rw-r--r--drivers/cxl/Kconfig88
-rw-r--r--drivers/cxl/Makefile20
-rw-r--r--drivers/cxl/acpi.c140
-rw-r--r--drivers/cxl/core/Makefile4
-rw-r--r--drivers/cxl/core/cdat.c163
-rw-r--r--drivers/cxl/core/core.h80
-rw-r--r--drivers/cxl/core/edac.c2109
-rw-r--r--drivers/cxl/core/features.c703
-rw-r--r--drivers/cxl/core/hdm.c672
-rw-r--r--drivers/cxl/core/mbox.c331
-rw-r--r--drivers/cxl/core/mce.c65
-rw-r--r--drivers/cxl/core/mce.h20
-rw-r--r--drivers/cxl/core/memdev.c226
-rw-r--r--drivers/cxl/core/pci.c266
-rw-r--r--drivers/cxl/core/pmem.c27
-rw-r--r--drivers/cxl/core/pmu.c2
-rw-r--r--drivers/cxl/core/port.c488
-rw-r--r--drivers/cxl/core/ras.c126
-rw-r--r--drivers/cxl/core/region.c1412
-rw-r--r--drivers/cxl/core/regs.c138
-rw-r--r--drivers/cxl/core/suspend.c4
-rw-r--r--drivers/cxl/core/trace.h458
-rw-r--r--drivers/cxl/cxl.h151
-rw-r--r--drivers/cxl/cxlmem.h168
-rw-r--r--drivers/cxl/cxlpci.h8
-rw-r--r--drivers/cxl/mem.c8
-rw-r--r--drivers/cxl/pci.c142
-rw-r--r--drivers/cxl/pmem.c83
-rw-r--r--drivers/cxl/port.c91
-rw-r--r--drivers/dax/cxl.c2
-rw-r--r--drivers/dax/dax-private.h26
-rw-r--r--drivers/dax/device.c33
-rw-r--r--drivers/dax/hmem/hmem.c1
-rw-r--r--drivers/dax/kmem.c11
-rw-r--r--drivers/dax/pmem.c1
-rw-r--r--drivers/dax/pmem/Makefile7
-rw-r--r--drivers/dax/pmem/pmem.c10
-rw-r--r--drivers/dax/super.c5
-rw-r--r--drivers/devfreq/Kconfig11
-rw-r--r--drivers/devfreq/Makefile1
-rw-r--r--drivers/devfreq/devfreq-event.c8
-rw-r--r--drivers/devfreq/devfreq.c23
-rw-r--r--drivers/devfreq/event/exynos-nocp.c2
-rw-r--r--drivers/devfreq/event/exynos-ppmu.c2
-rw-r--r--drivers/devfreq/event/rockchip-dfi.c94
-rw-r--r--drivers/devfreq/exynos-bus.c5
-rw-r--r--drivers/devfreq/governor_userspace.c6
-rw-r--r--drivers/devfreq/hisi_uncore_freq.c658
-rw-r--r--drivers/devfreq/mtk-cci-devfreq.c7
-rw-r--r--drivers/devfreq/rk3399_dmc.c2
-rw-r--r--drivers/devfreq/sun8i-a33-mbus.c40
-rw-r--r--drivers/dibs/Kconfig23
-rw-r--r--drivers/dibs/Makefile8
-rw-r--r--drivers/dibs/dibs_loopback.c361
-rw-r--r--drivers/dibs/dibs_loopback.h57
-rw-r--r--drivers/dibs/dibs_main.c278
-rw-r--r--drivers/dma-buf/dma-buf.c334
-rw-r--r--drivers/dma-buf/dma-fence-array.c28
-rw-r--r--drivers/dma-buf/dma-fence-chain.c7
-rw-r--r--drivers/dma-buf/dma-fence-unwrap.c160
-rw-r--r--drivers/dma-buf/dma-fence.c177
-rw-r--r--drivers/dma-buf/dma-heap.c4
-rw-r--r--drivers/dma-buf/dma-resv.c17
-rw-r--r--drivers/dma-buf/heaps/Kconfig10
-rw-r--r--drivers/dma-buf/heaps/cma_heap.c44
-rw-r--r--drivers/dma-buf/heaps/system_heap.c48
-rw-r--r--drivers/dma-buf/st-dma-fence-unwrap.c268
-rw-r--r--drivers/dma-buf/st-dma-fence.c6
-rw-r--r--drivers/dma-buf/sw_sync.c43
-rw-r--r--drivers/dma-buf/sync_debug.c70
-rw-r--r--drivers/dma-buf/sync_debug.h2
-rw-r--r--drivers/dma-buf/sync_file.c24
-rw-r--r--drivers/dma-buf/udmabuf.c305
-rw-r--r--drivers/dma/Kconfig58
-rw-r--r--drivers/dma/Makefile5
-rw-r--r--drivers/dma/acpi-dma.c43
-rw-r--r--drivers/dma/altera-msgdma.c2
-rw-r--r--drivers/dma/amd/Kconfig28
-rw-r--r--drivers/dma/amd/Makefile2
-rw-r--r--drivers/dma/amd/ae4dma/Makefile10
-rw-r--r--drivers/dma/amd/ae4dma/ae4dma-dev.c157
-rw-r--r--drivers/dma/amd/ae4dma/ae4dma-pci.c156
-rw-r--r--drivers/dma/amd/ae4dma/ae4dma.h102
-rw-r--r--drivers/dma/amd/ptdma/Makefile (renamed from drivers/dma/ptdma/Makefile)0
-rw-r--r--drivers/dma/amd/ptdma/ptdma-debugfs.c143
-rw-r--r--drivers/dma/amd/ptdma/ptdma-dev.c (renamed from drivers/dma/ptdma/ptdma-dev.c)0
-rw-r--r--drivers/dma/amd/ptdma/ptdma-dmaengine.c659
-rw-r--r--drivers/dma/amd/ptdma/ptdma-pci.c (renamed from drivers/dma/ptdma/ptdma-pci.c)0
-rw-r--r--drivers/dma/amd/ptdma/ptdma.h (renamed from drivers/dma/ptdma/ptdma.h)5
-rw-r--r--drivers/dma/amd/qdma/qdma.c52
-rw-r--r--drivers/dma/apple-admac.c9
-rw-r--r--drivers/dma/arm-dma350.c660
-rw-r--r--drivers/dma/at_hdmac.c2
-rw-r--r--drivers/dma/at_xdmac.c10
-rw-r--r--drivers/dma/bcm-sba-raid.c2
-rw-r--r--drivers/dma/bcm2835-dma.c24
-rw-r--r--drivers/dma/bestcomm/bestcomm.c2
-rw-r--r--drivers/dma/cv1800b-dmamux.c259
-rw-r--r--drivers/dma/dma-axi-dmac.c2
-rw-r--r--drivers/dma/dma-jz4780.c2
-rw-r--r--drivers/dma/dmaengine.c46
-rw-r--r--drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c2
-rw-r--r--drivers/dma/dw-edma/dw-edma-core.c40
-rw-r--r--drivers/dma/dw-edma/dw-edma-pcie.c65
-rw-r--r--drivers/dma/dw/acpi.c6
-rw-r--r--drivers/dma/dw/internal.h8
-rw-r--r--drivers/dma/dw/pci.c12
-rw-r--r--drivers/dma/dw/platform.c10
-rw-r--r--drivers/dma/dw/rzn1-dmamux.c15
-rw-r--r--drivers/dma/ep93xx_dma.c3
-rw-r--r--drivers/dma/fsl-dpaa2-qdma/dpdmai.c5
-rw-r--r--drivers/dma/fsl-edma-common.c66
-rw-r--r--drivers/dma/fsl-edma-common.h22
-rw-r--r--drivers/dma/fsl-edma-main.c292
-rw-r--r--drivers/dma/fsl-qdma.c5
-rw-r--r--drivers/dma/fsl_raid.c2
-rw-r--r--drivers/dma/fsldma.c22
-rw-r--r--drivers/dma/fsldma.h1
-rw-r--r--drivers/dma/idma64.c2
-rw-r--r--drivers/dma/idxd/Makefile2
-rw-r--r--drivers/dma/idxd/cdev.c28
-rw-r--r--drivers/dma/idxd/compat.c2
-rw-r--r--drivers/dma/idxd/defaults.c6
-rw-r--r--drivers/dma/idxd/device.c14
-rw-r--r--drivers/dma/idxd/idxd.h17
-rw-r--r--drivers/dma/idxd/init.c682
-rw-r--r--drivers/dma/idxd/irq.c85
-rw-r--r--drivers/dma/idxd/registers.h70
-rw-r--r--drivers/dma/idxd/submit.c6
-rw-r--r--drivers/dma/idxd/sysfs.c16
-rw-r--r--drivers/dma/img-mdc-dma.c4
-rw-r--r--drivers/dma/imx-dma.c12
-rw-r--r--drivers/dma/imx-sdma.c9
-rw-r--r--drivers/dma/ioat/dca.c8
-rw-r--r--drivers/dma/ioat/dma.c5
-rw-r--r--drivers/dma/ioat/dma.h2
-rw-r--r--drivers/dma/ioat/hw.h3
-rw-r--r--drivers/dma/ioat/init.c4
-rw-r--r--drivers/dma/k3dma.c2
-rw-r--r--drivers/dma/loongson2-apb-dma.c705
-rw-r--r--drivers/dma/ls2x-apb-dma.c705
-rw-r--r--drivers/dma/mcf-edma-main.c2
-rw-r--r--drivers/dma/mediatek/mtk-cqdma.c12
-rw-r--r--drivers/dma/mediatek/mtk-hsdma.c2
-rw-r--r--drivers/dma/mediatek/mtk-uart-apdma.c2
-rw-r--r--drivers/dma/milbeaut-hdmac.c2
-rw-r--r--drivers/dma/milbeaut-xdmac.c2
-rw-r--r--drivers/dma/mmp_pdma.c291
-rw-r--r--drivers/dma/mmp_tdma.c4
-rw-r--r--drivers/dma/moxart-dma.c2
-rw-r--r--drivers/dma/mpc512x_dma.c2
-rw-r--r--drivers/dma/mv_xor.c28
-rw-r--r--drivers/dma/mv_xor_v2.c4
-rw-r--r--drivers/dma/nbpfaxi.c26
-rw-r--r--drivers/dma/owl-dma.c2
-rw-r--r--drivers/dma/ppc4xx/adma.c6
-rw-r--r--drivers/dma/ptdma/Kconfig13
-rw-r--r--drivers/dma/ptdma/ptdma-debugfs.c106
-rw-r--r--drivers/dma/ptdma/ptdma-dmaengine.c411
-rw-r--r--drivers/dma/pxa_dma.c6
-rw-r--r--drivers/dma/qcom/bam_dma.c10
-rw-r--r--drivers/dma/qcom/gpi.c42
-rw-r--r--drivers/dma/qcom/hidma.c2
-rw-r--r--drivers/dma/qcom/qcom_adm.c2
-rw-r--r--drivers/dma/sa11x0-dma.c2
-rw-r--r--drivers/dma/sf-pdma/sf-pdma.c4
-rw-r--r--drivers/dma/sh/Kconfig10
-rw-r--r--drivers/dma/sh/rcar-dmac.c6
-rw-r--r--drivers/dma/sh/rz-dmac.c113
-rw-r--r--drivers/dma/sh/shdma-base.c29
-rw-r--r--drivers/dma/sh/shdmac.c19
-rw-r--r--drivers/dma/sh/usb-dmac.c4
-rw-r--r--drivers/dma/sprd-dma.c2
-rw-r--r--drivers/dma/st_fdma.c2
-rw-r--r--drivers/dma/stm32/stm32-dma.c12
-rw-r--r--drivers/dma/stm32/stm32-dma3.c131
-rw-r--r--drivers/dma/stm32/stm32-mdma.c8
-rw-r--r--drivers/dma/sun4i-dma.c254
-rw-r--r--drivers/dma/sun6i-dma.c5
-rw-r--r--drivers/dma/tegra186-gpc-dma.c12
-rw-r--r--drivers/dma/tegra20-apb-dma.c2
-rw-r--r--drivers/dma/tegra210-adma.c287
-rw-r--r--drivers/dma/ti/Kconfig4
-rw-r--r--drivers/dma/ti/cppi41.c2
-rw-r--r--drivers/dma/ti/edma.c24
-rw-r--r--drivers/dma/ti/k3-udma-glue.c15
-rw-r--r--drivers/dma/ti/k3-udma.c127
-rw-r--r--drivers/dma/ti/omap-dma.c2
-rw-r--r--drivers/dma/timb_dma.c2
-rw-r--r--drivers/dma/txx9dmac.c4
-rw-r--r--drivers/dma/uniphier-mdmac.c2
-rw-r--r--drivers/dma/uniphier-xdmac.c2
-rw-r--r--drivers/dma/xgene-dma.c2
-rw-r--r--drivers/dma/xilinx/xdma.c10
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c123
-rw-r--r--drivers/dma/xilinx/xilinx_dpdma.c2
-rw-r--r--drivers/dma/xilinx/zynqmp_dma.c9
-rw-r--r--drivers/dpll/Kconfig6
-rw-r--r--drivers/dpll/Makefile2
-rw-r--r--drivers/dpll/dpll_core.c52
-rw-r--r--drivers/dpll/dpll_core.h3
-rw-r--r--drivers/dpll/dpll_netlink.c335
-rw-r--r--drivers/dpll/dpll_netlink.h2
-rw-r--r--drivers/dpll/dpll_nl.c16
-rw-r--r--drivers/dpll/dpll_nl.h1
-rw-r--r--drivers/dpll/zl3073x/Kconfig39
-rw-r--r--drivers/dpll/zl3073x/Makefile10
-rw-r--r--drivers/dpll/zl3073x/core.c1266
-rw-r--r--drivers/dpll/zl3073x/core.h427
-rw-r--r--drivers/dpll/zl3073x/devlink.c390
-rw-r--r--drivers/dpll/zl3073x/devlink.h15
-rw-r--r--drivers/dpll/zl3073x/dpll.c2376
-rw-r--r--drivers/dpll/zl3073x/dpll.h48
-rw-r--r--drivers/dpll/zl3073x/flash.c666
-rw-r--r--drivers/dpll/zl3073x/flash.h29
-rw-r--r--drivers/dpll/zl3073x/fw.c419
-rw-r--r--drivers/dpll/zl3073x/fw.h52
-rw-r--r--drivers/dpll/zl3073x/i2c.c76
-rw-r--r--drivers/dpll/zl3073x/prop.c358
-rw-r--r--drivers/dpll/zl3073x/prop.h34
-rw-r--r--drivers/dpll/zl3073x/regs.h314
-rw-r--r--drivers/dpll/zl3073x/spi.c76
-rw-r--r--drivers/edac/Kconfig81
-rw-r--r--drivers/edac/Makefile10
-rw-r--r--drivers/edac/a72_edac.c225
-rw-r--r--drivers/edac/altera_edac.c24
-rw-r--r--drivers/edac/altera_edac.h2
-rw-r--r--drivers/edac/amd64_edac.c169
-rw-r--r--drivers/edac/amd64_edac.h2
-rw-r--r--drivers/edac/amd8111_edac.c596
-rw-r--r--drivers/edac/amd8111_edac.h118
-rw-r--r--drivers/edac/amd8131_edac.c358
-rw-r--r--drivers/edac/amd8131_edac.h107
-rw-r--r--drivers/edac/armada_xp_edac.c4
-rw-r--r--drivers/edac/aspeed_edac.c2
-rw-r--r--drivers/edac/bluefield_edac.c182
-rw-r--r--drivers/edac/cell_edac.c281
-rw-r--r--drivers/edac/cpc925_edac.c2
-rw-r--r--drivers/edac/debugfs.c5
-rw-r--r--drivers/edac/dmc520_edac.c2
-rw-r--r--drivers/edac/ecs.c207
-rw-r--r--drivers/edac/edac_device.c185
-rw-r--r--drivers/edac/edac_mc.c2
-rw-r--r--drivers/edac/edac_mc_sysfs.c30
-rw-r--r--drivers/edac/fsl_ddr_edac.c141
-rw-r--r--drivers/edac/fsl_ddr_edac.h13
-rw-r--r--drivers/edac/highbank_l2_edac.c2
-rw-r--r--drivers/edac/highbank_mc_edac.c2
-rw-r--r--drivers/edac/i10nm_base.c554
-rw-r--r--drivers/edac/i5000_edac.c8
-rw-r--r--drivers/edac/i5400_edac.c3
-rw-r--r--drivers/edac/i7300_edac.c7
-rw-r--r--drivers/edac/ie31200_edac.c686
-rw-r--r--drivers/edac/igen6_edac.c175
-rw-r--r--drivers/edac/layerscape_edac.c3
-rw-r--r--drivers/edac/loongson_edac.c157
-rw-r--r--drivers/edac/mce_amd.c19
-rw-r--r--drivers/edac/mem_repair.c357
-rw-r--r--drivers/edac/mpc85xx_edac.c6
-rw-r--r--drivers/edac/npcm_edac.c2
-rw-r--r--drivers/edac/octeon_edac-l2c.c2
-rw-r--r--drivers/edac/octeon_edac-lmc.c2
-rw-r--r--drivers/edac/octeon_edac-pc.c2
-rw-r--r--drivers/edac/octeon_edac-pci.c2
-rw-r--r--drivers/edac/pnd2_edac.c4
-rw-r--r--drivers/edac/qcom_edac.c14
-rw-r--r--drivers/edac/scrub.c210
-rw-r--r--drivers/edac/skx_base.c44
-rw-r--r--drivers/edac/skx_common.c178
-rw-r--r--drivers/edac/skx_common.h91
-rw-r--r--drivers/edac/synopsys_edac.c99
-rw-r--r--drivers/edac/ti_edac.c2
-rw-r--r--drivers/edac/versal_edac.c2
-rw-r--r--drivers/edac/versalnet_edac.c960
-rw-r--r--drivers/edac/xgene_edac.c19
-rw-r--r--drivers/edac/zynqmp_edac.c2
-rw-r--r--drivers/eisa/Makefile10
-rw-r--r--drivers/eisa/eisa-bus.c2
-rw-r--r--drivers/extcon/Kconfig13
-rw-r--r--drivers/extcon/Makefile1
-rw-r--r--drivers/extcon/extcon-adc-jack.c4
-rw-r--r--drivers/extcon/extcon-axp288.c2
-rw-r--r--drivers/extcon/extcon-fsa9480.c4
-rw-r--r--drivers/extcon/extcon-intel-cht-wc.c2
-rw-r--r--drivers/extcon/extcon-intel-mrfld.c2
-rw-r--r--drivers/extcon/extcon-max14526.c302
-rw-r--r--drivers/extcon/extcon-max3355.c2
-rw-r--r--drivers/extcon/extcon-max77843.c2
-rw-r--r--drivers/extcon/extcon-ptn5150.c2
-rw-r--r--drivers/extcon/extcon-qcom-spmi-misc.c2
-rw-r--r--drivers/extcon/extcon-rtk-type-c.c4
-rw-r--r--drivers/extcon/extcon-usb-gpio.c2
-rw-r--r--drivers/extcon/extcon-usbc-cros-ec.c2
-rw-r--r--drivers/firewire/Kconfig2
-rw-r--r--drivers/firewire/core-card.c499
-rw-r--r--drivers/firewire/core-cdev.c83
-rw-r--r--drivers/firewire/core-device.c46
-rw-r--r--drivers/firewire/core-topology.c93
-rw-r--r--drivers/firewire/core-transaction.c234
-rw-r--r--drivers/firewire/core.h24
-rw-r--r--drivers/firewire/device-attribute-test.c2
-rw-r--r--drivers/firewire/net.c4
-rw-r--r--drivers/firewire/ohci.c529
-rw-r--r--drivers/firewire/sbp2.c10
-rw-r--r--drivers/firmware/Kconfig32
-rw-r--r--drivers/firmware/Makefile2
-rw-r--r--drivers/firmware/arm_ffa/bus.c23
-rw-r--r--drivers/firmware/arm_ffa/driver.c581
-rw-r--r--drivers/firmware/arm_scmi/Kconfig13
-rw-r--r--drivers/firmware/arm_scmi/Makefile1
-rw-r--r--drivers/firmware/arm_scmi/bus.c197
-rw-r--r--drivers/firmware/arm_scmi/clock.c33
-rw-r--r--drivers/firmware/arm_scmi/common.h60
-rw-r--r--drivers/firmware/arm_scmi/driver.c286
-rw-r--r--drivers/firmware/arm_scmi/notify.c39
-rw-r--r--drivers/firmware/arm_scmi/perf.c46
-rw-r--r--drivers/firmware/arm_scmi/protocols.h2
-rw-r--r--drivers/firmware/arm_scmi/quirks.c327
-rw-r--r--drivers/firmware/arm_scmi/quirks.h52
-rw-r--r--drivers/firmware/arm_scmi/raw_mode.c90
-rw-r--r--drivers/firmware/arm_scmi/scmi_power_control.c22
-rw-r--r--drivers/firmware/arm_scmi/shmem.c85
-rw-r--r--drivers/firmware/arm_scmi/transports/mailbox.c23
-rw-r--r--drivers/firmware/arm_scmi/transports/optee.c21
-rw-r--r--drivers/firmware/arm_scmi/transports/smc.c14
-rw-r--r--drivers/firmware/arm_scmi/transports/virtio.c19
-rw-r--r--drivers/firmware/arm_scmi/vendors/imx/Kconfig25
-rw-r--r--drivers/firmware/arm_scmi/vendors/imx/Makefile2
-rw-r--r--drivers/firmware/arm_scmi/vendors/imx/imx-sm-bbm.c5
-rw-r--r--drivers/firmware/arm_scmi/vendors/imx/imx-sm-cpu.c276
-rw-r--r--drivers/firmware/arm_scmi/vendors/imx/imx-sm-lmm.c263
-rw-r--r--drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c120
-rw-r--r--drivers/firmware/arm_scmi/vendors/imx/imx95.rst853
-rw-r--r--drivers/firmware/arm_scmi/voltage.c2
-rw-r--r--drivers/firmware/arm_scpi.c5
-rw-r--r--drivers/firmware/arm_sdei.c13
-rw-r--r--drivers/firmware/broadcom/bcm47xx_sprom.c2
-rw-r--r--drivers/firmware/cirrus/Kconfig15
-rw-r--r--drivers/firmware/cirrus/Makefile2
-rw-r--r--drivers/firmware/cirrus/cs_dsp.c139
-rw-r--r--drivers/firmware/cirrus/test/Makefile23
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_mock_bin.c203
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_mock_mem_maps.c725
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_mock_regmap.c367
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_mock_utils.c13
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_mock_wmfw.c478
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_bin.c2556
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_bin_error.c595
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_callbacks.c688
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_control_cache.c3281
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_control_parse.c1838
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_control_rw.c2669
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_wmfw.c2211
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_wmfw_error.c1347
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_tests.c14
-rw-r--r--drivers/firmware/dmi-sysfs.c28
-rw-r--r--drivers/firmware/dmi_scan.c4
-rw-r--r--drivers/firmware/efi/Kconfig47
-rw-r--r--drivers/firmware/efi/Makefile1
-rw-r--r--drivers/firmware/efi/cper-arm.c2
-rw-r--r--drivers/firmware/efi/cper-x86.c2
-rw-r--r--drivers/firmware/efi/cper.c6
-rw-r--r--drivers/firmware/efi/cper_cxl.c39
-rw-r--r--drivers/firmware/efi/cper_cxl.h66
-rw-r--r--drivers/firmware/efi/dev-path-parser.c4
-rw-r--r--drivers/firmware/efi/efi-init.c29
-rw-r--r--drivers/firmware/efi/efi-pstore.c2
-rw-r--r--drivers/firmware/efi/efi.c59
-rw-r--r--drivers/firmware/efi/efibc.c2
-rw-r--r--drivers/firmware/efi/embedded-firmware.c4
-rw-r--r--drivers/firmware/efi/esrt.c2
-rw-r--r--drivers/firmware/efi/libstub/Makefile21
-rw-r--r--drivers/firmware/efi/libstub/Makefile.zboot24
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c22
-rw-r--r--drivers/firmware/efi/libstub/efi-stub.c59
-rw-r--r--drivers/firmware/efi/libstub/efistub.h25
-rw-r--r--drivers/firmware/efi/libstub/file.c22
-rw-r--r--drivers/firmware/efi/libstub/gop.c323
-rw-r--r--drivers/firmware/efi/libstub/intrinsics.c26
-rw-r--r--drivers/firmware/efi/libstub/kaslr.c4
-rw-r--r--drivers/firmware/efi/libstub/mem.c20
-rw-r--r--drivers/firmware/efi/libstub/pci.c34
-rw-r--r--drivers/firmware/efi/libstub/printk.c4
-rw-r--r--drivers/firmware/efi/libstub/randomalloc.c11
-rw-r--r--drivers/firmware/efi/libstub/relocate.c13
-rw-r--r--drivers/firmware/efi/libstub/tpm.c9
-rw-r--r--drivers/firmware/efi/libstub/x86-5lvl.c2
-rw-r--r--drivers/firmware/efi/libstub/x86-stub.c233
-rw-r--r--drivers/firmware/efi/libstub/zboot-decompress-gzip.c68
-rw-r--r--drivers/firmware/efi/libstub/zboot-decompress-zstd.c49
-rw-r--r--drivers/firmware/efi/libstub/zboot-header.S32
-rw-r--r--drivers/firmware/efi/libstub/zboot.c65
-rw-r--r--drivers/firmware/efi/libstub/zboot.lds14
-rw-r--r--drivers/firmware/efi/memattr.c18
-rw-r--r--drivers/firmware/efi/memmap.c3
-rw-r--r--drivers/firmware/efi/mokvar-table.c59
-rw-r--r--drivers/firmware/efi/ovmf-debug-log.c111
-rw-r--r--drivers/firmware/efi/rci2-table.c2
-rw-r--r--drivers/firmware/efi/stmm/tee_stmm_efi.c61
-rw-r--r--drivers/firmware/efi/sysfb_efi.c2
-rw-r--r--drivers/firmware/efi/test/efi_test.c4
-rw-r--r--drivers/firmware/efi/tpm.c26
-rw-r--r--drivers/firmware/efi/vars.c16
-rw-r--r--drivers/firmware/google/cbmem.c8
-rw-r--r--drivers/firmware/google/coreboot_table.c2
-rw-r--r--drivers/firmware/google/framebuffer-coreboot.c14
-rw-r--r--drivers/firmware/google/gsmi.c10
-rw-r--r--drivers/firmware/google/memconsole.c2
-rw-r--r--drivers/firmware/google/vpd.c4
-rw-r--r--drivers/firmware/imx/Kconfig24
-rw-r--r--drivers/firmware/imx/Makefile2
-rw-r--r--drivers/firmware/imx/imx-dsp.c2
-rw-r--r--drivers/firmware/imx/imx-scu.c1
-rw-r--r--drivers/firmware/imx/sm-cpu.c85
-rw-r--r--drivers/firmware/imx/sm-lmm.c91
-rw-r--r--drivers/firmware/iscsi_ibft.c5
-rw-r--r--drivers/firmware/memmap.c2
-rw-r--r--drivers/firmware/meson/Kconfig2
-rw-r--r--drivers/firmware/meson/meson_sm.c7
-rw-r--r--drivers/firmware/microchip/mpfs-auto-update.c48
-rw-r--r--drivers/firmware/mtk-adsp-ipc.c9
-rw-r--r--drivers/firmware/psci/psci.c49
-rw-r--r--drivers/firmware/psci/psci_checker.c4
-rw-r--r--drivers/firmware/qcom/qcom_qseecom_uefisecapp.c18
-rw-r--r--drivers/firmware/qcom/qcom_scm-smc.c6
-rw-r--r--drivers/firmware/qcom/qcom_scm.c469
-rw-r--r--drivers/firmware/qcom/qcom_scm.h16
-rw-r--r--drivers/firmware/qcom/qcom_tzmem.c68
-rw-r--r--drivers/firmware/qemu_fw_cfg.c6
-rw-r--r--drivers/firmware/raspberrypi.c2
-rw-r--r--drivers/firmware/samsung/Kconfig14
-rw-r--r--drivers/firmware/samsung/Makefile4
-rw-r--r--drivers/firmware/samsung/exynos-acpm-pmic.c239
-rw-r--r--drivers/firmware/samsung/exynos-acpm-pmic.h29
-rw-r--r--drivers/firmware/samsung/exynos-acpm.c766
-rw-r--r--drivers/firmware/samsung/exynos-acpm.h23
-rw-r--r--drivers/firmware/smccc/kvm_guest.c76
-rw-r--r--drivers/firmware/smccc/smccc.c18
-rw-r--r--drivers/firmware/smccc/soc_id.c80
-rw-r--r--drivers/firmware/stratix10-rsu.c2
-rw-r--r--drivers/firmware/stratix10-svc.c25
-rw-r--r--drivers/firmware/sysfb.c45
-rw-r--r--drivers/firmware/sysfb_simplefb.c31
-rw-r--r--drivers/firmware/tegra/Kconfig5
-rw-r--r--drivers/firmware/tegra/Makefile1
-rw-r--r--drivers/firmware/tegra/bpmp-private.h6
-rw-r--r--drivers/firmware/tegra/bpmp-tegra186.c14
-rw-r--r--drivers/firmware/tegra/bpmp.c20
-rw-r--r--drivers/firmware/thead,th1520-aon.c250
-rw-r--r--drivers/firmware/ti_sci.c550
-rw-r--r--drivers/firmware/ti_sci.h146
-rw-r--r--drivers/firmware/turris-mox-rwtm.c277
-rw-r--r--drivers/firmware/xilinx/zynqmp-debug.c162
-rw-r--r--drivers/firmware/xilinx/zynqmp.c179
-rw-r--r--drivers/fpga/altera-cvp.c2
-rw-r--r--drivers/fpga/altera-fpga2sdram.c2
-rw-r--r--drivers/fpga/altera-freeze-bridge.c2
-rw-r--r--drivers/fpga/altera-hps2fpga.c2
-rw-r--r--drivers/fpga/dfl-afu-dma-region.c117
-rw-r--r--drivers/fpga/dfl-afu-error.c59
-rw-r--r--drivers/fpga/dfl-afu-main.c286
-rw-r--r--drivers/fpga/dfl-afu-region.c51
-rw-r--r--drivers/fpga/dfl-afu.h26
-rw-r--r--drivers/fpga/dfl-fme-br.c32
-rw-r--r--drivers/fpga/dfl-fme-error.c98
-rw-r--r--drivers/fpga/dfl-fme-main.c103
-rw-r--r--drivers/fpga/dfl-fme-pr.c86
-rw-r--r--drivers/fpga/dfl-fme-region.c8
-rw-r--r--drivers/fpga/dfl-pci.c16
-rw-r--r--drivers/fpga/dfl.c445
-rw-r--r--drivers/fpga/dfl.h140
-rw-r--r--drivers/fpga/intel-m10-bmc-sec-update.c4
-rw-r--r--drivers/fpga/of-fpga-region.c2
-rw-r--r--drivers/fpga/socfpga-a10.c2
-rw-r--r--drivers/fpga/stratix10-soc.c2
-rw-r--r--drivers/fpga/tests/fpga-bridge-test.c1
-rw-r--r--drivers/fpga/tests/fpga-mgr-test.c2
-rw-r--r--drivers/fpga/tests/fpga-region-test.c1
-rw-r--r--drivers/fpga/versal-fpga.c2
-rw-r--r--drivers/fpga/xilinx-pr-decoupler.c2
-rw-r--r--drivers/fpga/zynq-fpga.c12
-rw-r--r--drivers/fsi/fsi-core.c6
-rw-r--r--drivers/fsi/fsi-master-aspeed.c2
-rw-r--r--drivers/fsi/fsi-master-ast-cf.c13
-rw-r--r--drivers/fsi/fsi-master-gpio.c2
-rw-r--r--drivers/fsi/fsi-occ.c2
-rw-r--r--drivers/fwctl/Kconfig33
-rw-r--r--drivers/fwctl/Makefile6
-rw-r--r--drivers/fwctl/main.c421
-rw-r--r--drivers/fwctl/mlx5/Makefile4
-rw-r--r--drivers/fwctl/mlx5/main.c418
-rw-r--r--drivers/fwctl/pds/Makefile4
-rw-r--r--drivers/fwctl/pds/main.c535
-rw-r--r--drivers/gpio/Kconfig207
-rw-r--r--drivers/gpio/Makefile16
-rw-r--r--drivers/gpio/TODO118
-rw-r--r--drivers/gpio/dev-sync-probe.c97
-rw-r--r--drivers/gpio/dev-sync-probe.h25
-rw-r--r--drivers/gpio/gpio-104-dio-48e.c4
-rw-r--r--drivers/gpio/gpio-104-idio-16.c2
-rw-r--r--drivers/gpio/gpio-74x164.c103
-rw-r--r--drivers/gpio/gpio-74xx-mmio.c32
-rw-r--r--drivers/gpio/gpio-adnp.c136
-rw-r--r--drivers/gpio/gpio-adp5520.c10
-rw-r--r--drivers/gpio/gpio-adp5585.c368
-rw-r--r--drivers/gpio/gpio-aggregator.c1520
-rw-r--r--drivers/gpio/gpio-altera-a10sr.c12
-rw-r--r--drivers/gpio/gpio-altera.c194
-rw-r--r--drivers/gpio/gpio-amd-fch.c5
-rw-r--r--drivers/gpio/gpio-amd8111.c4
-rw-r--r--drivers/gpio/gpio-amdpt.c52
-rw-r--r--drivers/gpio/gpio-arizona.c9
-rw-r--r--drivers/gpio/gpio-aspeed-sgpio.c86
-rw-r--r--drivers/gpio/gpio-aspeed.c716
-rw-r--r--drivers/gpio/gpio-ath79.c88
-rw-r--r--drivers/gpio/gpio-bcm-kona.c127
-rw-r--r--drivers/gpio/gpio-bd71815.c13
-rw-r--r--drivers/gpio/gpio-bd71828.c13
-rw-r--r--drivers/gpio/gpio-bd9571mwv.c6
-rw-r--r--drivers/gpio/gpio-blzp1600.c290
-rw-r--r--drivers/gpio/gpio-brcmstb.c129
-rw-r--r--drivers/gpio/gpio-bt8xx.c46
-rw-r--r--drivers/gpio/gpio-cadence.c61
-rw-r--r--drivers/gpio/gpio-cgbc.c200
-rw-r--r--drivers/gpio/gpio-clps711x.c28
-rw-r--r--drivers/gpio/gpio-creg-snps.c8
-rw-r--r--drivers/gpio/gpio-cros-ec.c11
-rw-r--r--drivers/gpio/gpio-crystalcove.c13
-rw-r--r--drivers/gpio/gpio-cs5535.c4
-rw-r--r--drivers/gpio/gpio-da9052.c32
-rw-r--r--drivers/gpio/gpio-da9055.c12
-rw-r--r--drivers/gpio/gpio-davinci.c48
-rw-r--r--drivers/gpio/gpio-dln2.c7
-rw-r--r--drivers/gpio/gpio-ds4520.c6
-rw-r--r--drivers/gpio/gpio-dwapb.c167
-rw-r--r--drivers/gpio/gpio-eic-sprd.c7
-rw-r--r--drivers/gpio/gpio-elkhartlake.c2
-rw-r--r--drivers/gpio/gpio-em.c8
-rw-r--r--drivers/gpio/gpio-en7523.c36
-rw-r--r--drivers/gpio/gpio-ep93xx.c35
-rw-r--r--drivers/gpio/gpio-exar.c22
-rw-r--r--drivers/gpio/gpio-f7188x.c11
-rw-r--r--drivers/gpio/gpio-ftgpio010.c89
-rw-r--r--drivers/gpio/gpio-ge.c25
-rw-r--r--drivers/gpio/gpio-gpio-mm.c2
-rw-r--r--drivers/gpio/gpio-graniterapids.c56
-rw-r--r--drivers/gpio/gpio-grgpio.c167
-rw-r--r--drivers/gpio/gpio-gw-pld.c4
-rw-r--r--drivers/gpio/gpio-hisi.c48
-rw-r--r--drivers/gpio/gpio-hlwd.c107
-rw-r--r--drivers/gpio/gpio-htc-egpio.c14
-rw-r--r--drivers/gpio/gpio-i8255.c2
-rw-r--r--drivers/gpio/gpio-ich.c10
-rw-r--r--drivers/gpio/gpio-idio-16.c5
-rw-r--r--drivers/gpio/gpio-idt3243x.c47
-rw-r--r--drivers/gpio/gpio-imx-scu.c45
-rw-r--r--drivers/gpio/gpio-it87.c9
-rw-r--r--drivers/gpio/gpio-ixp4xx.c72
-rw-r--r--drivers/gpio/gpio-janz-ttl.c4
-rw-r--r--drivers/gpio/gpio-kempld.c5
-rw-r--r--drivers/gpio/gpio-latch.c64
-rw-r--r--drivers/gpio/gpio-ljca.c32
-rw-r--r--drivers/gpio/gpio-logicvc.c9
-rw-r--r--drivers/gpio/gpio-loongson-64bit.c299
-rw-r--r--drivers/gpio/gpio-loongson.c6
-rw-r--r--drivers/gpio/gpio-loongson1.c40
-rw-r--r--drivers/gpio/gpio-lp3943.c11
-rw-r--r--drivers/gpio/gpio-lp873x.c10
-rw-r--r--drivers/gpio/gpio-lp87565.c13
-rw-r--r--drivers/gpio/gpio-lpc18xx.c29
-rw-r--r--drivers/gpio/gpio-lpc32xx.c18
-rw-r--r--drivers/gpio/gpio-macsmc.c292
-rw-r--r--drivers/gpio/gpio-madera.c16
-rw-r--r--drivers/gpio/gpio-max3191x.c34
-rw-r--r--drivers/gpio/gpio-max730x.c24
-rw-r--r--drivers/gpio/gpio-max732x.c11
-rw-r--r--drivers/gpio/gpio-max7360.c257
-rw-r--r--drivers/gpio/gpio-max77620.c11
-rw-r--r--drivers/gpio/gpio-max77650.c12
-rw-r--r--drivers/gpio/gpio-max77759.c530
-rw-r--r--drivers/gpio/gpio-mb86s7x.c8
-rw-r--r--drivers/gpio/gpio-mc33880.c7
-rw-r--r--drivers/gpio/gpio-menz127.c89
-rw-r--r--drivers/gpio/gpio-merrifield.c17
-rw-r--r--drivers/gpio/gpio-ml-ioh.c4
-rw-r--r--drivers/gpio/gpio-mlxbf.c25
-rw-r--r--drivers/gpio/gpio-mlxbf2.c85
-rw-r--r--drivers/gpio/gpio-mlxbf3.c105
-rw-r--r--drivers/gpio/gpio-mm-lantiq.c12
-rw-r--r--drivers/gpio/gpio-mmio.c485
-rw-r--r--drivers/gpio/gpio-mockup.c10
-rw-r--r--drivers/gpio/gpio-moxtet.c14
-rw-r--r--drivers/gpio/gpio-mpc5200.c86
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c162
-rw-r--r--drivers/gpio/gpio-mpfs.c191
-rw-r--r--drivers/gpio/gpio-mpsse.c525
-rw-r--r--drivers/gpio/gpio-msc313.c4
-rw-r--r--drivers/gpio/gpio-mt7621.c80
-rw-r--r--drivers/gpio/gpio-mvebu.c43
-rw-r--r--drivers/gpio/gpio-mxc.c97
-rw-r--r--drivers/gpio/gpio-mxs.c35
-rw-r--r--drivers/gpio/gpio-nct6694.c499
-rw-r--r--drivers/gpio/gpio-nomadik.c34
-rw-r--r--drivers/gpio/gpio-npcm-sgpio.c8
-rw-r--r--drivers/gpio/gpio-octeon.c5
-rw-r--r--drivers/gpio/gpio-omap.c14
-rw-r--r--drivers/gpio/gpio-palmas.c24
-rw-r--r--drivers/gpio/gpio-pca953x.c244
-rw-r--r--drivers/gpio/gpio-pca9570.c3
-rw-r--r--drivers/gpio/gpio-pcf857x.c42
-rw-r--r--drivers/gpio/gpio-pch.c4
-rw-r--r--drivers/gpio/gpio-pci-idio-16.c19
-rw-r--r--drivers/gpio/gpio-pcie-idio-24.c19
-rw-r--r--drivers/gpio/gpio-pisosr.c16
-rw-r--r--drivers/gpio/gpio-pl061.c6
-rw-r--r--drivers/gpio/gpio-pmic-eic-sprd.c7
-rw-r--r--drivers/gpio/gpio-pxa.c9
-rw-r--r--drivers/gpio/gpio-raspberrypi-exp.c8
-rw-r--r--drivers/gpio/gpio-rc5t583.c17
-rw-r--r--drivers/gpio/gpio-rcar.c77
-rw-r--r--drivers/gpio/gpio-rda.c35
-rw-r--r--drivers/gpio/gpio-rdc321x.c6
-rw-r--r--drivers/gpio/gpio-realtek-otto.c41
-rw-r--r--drivers/gpio/gpio-reg.c10
-rw-r--r--drivers/gpio/gpio-regmap.c101
-rw-r--r--drivers/gpio/gpio-rockchip.c40
-rw-r--r--drivers/gpio/gpio-rtd.c4
-rw-r--r--drivers/gpio/gpio-sa1100.c7
-rw-r--r--drivers/gpio/gpio-sama5d2-piobu.c6
-rw-r--r--drivers/gpio/gpio-sch.c7
-rw-r--r--drivers/gpio/gpio-sch311x.c6
-rw-r--r--drivers/gpio/gpio-sifive.c74
-rw-r--r--drivers/gpio/gpio-sim.c245
-rw-r--r--drivers/gpio/gpio-siox.c9
-rw-r--r--drivers/gpio/gpio-sloppy-logic-analyzer.c8
-rw-r--r--drivers/gpio/gpio-sodaville.c24
-rw-r--r--drivers/gpio/gpio-spacemit-k1.c307
-rw-r--r--drivers/gpio/gpio-spear-spics.c19
-rw-r--r--drivers/gpio/gpio-sprd.c6
-rw-r--r--drivers/gpio/gpio-stmpe.c66
-rw-r--r--drivers/gpio/gpio-stp-xway.c8
-rw-r--r--drivers/gpio/gpio-syscon.c31
-rw-r--r--drivers/gpio/gpio-tangier.c6
-rw-r--r--drivers/gpio/gpio-tb10x.c79
-rw-r--r--drivers/gpio/gpio-tc3589x.c9
-rw-r--r--drivers/gpio/gpio-tegra.c8
-rw-r--r--drivers/gpio/gpio-tegra186.c104
-rw-r--r--drivers/gpio/gpio-thunderx.c14
-rw-r--r--drivers/gpio/gpio-timberdale.c17
-rw-r--r--drivers/gpio/gpio-tpic2810.c23
-rw-r--r--drivers/gpio/gpio-tps65086.c14
-rw-r--r--drivers/gpio/gpio-tps65218.c29
-rw-r--r--drivers/gpio/gpio-tps65219.c130
-rw-r--r--drivers/gpio/gpio-tps6586x.c13
-rw-r--r--drivers/gpio/gpio-tps65910.c19
-rw-r--r--drivers/gpio/gpio-tps65912.c15
-rw-r--r--drivers/gpio/gpio-tps68470.c12
-rw-r--r--drivers/gpio/gpio-tqmx86.c214
-rw-r--r--drivers/gpio/gpio-ts4800.c39
-rw-r--r--drivers/gpio/gpio-ts4900.c18
-rw-r--r--drivers/gpio/gpio-ts5500.c6
-rw-r--r--drivers/gpio/gpio-twl4030.c30
-rw-r--r--drivers/gpio/gpio-twl6040.c27
-rw-r--r--drivers/gpio/gpio-uniphier.c14
-rw-r--r--drivers/gpio/gpio-usbio.c248
-rw-r--r--drivers/gpio/gpio-vf610.c125
-rw-r--r--drivers/gpio/gpio-viperboard.c126
-rw-r--r--drivers/gpio/gpio-virtio.c43
-rw-r--r--drivers/gpio/gpio-virtuser.c168
-rw-r--r--drivers/gpio/gpio-visconti.c27
-rw-r--r--drivers/gpio/gpio-vx855.c7
-rw-r--r--drivers/gpio/gpio-wcd934x.c16
-rw-r--r--drivers/gpio/gpio-wcove.c12
-rw-r--r--drivers/gpio/gpio-winbond.c14
-rw-r--r--drivers/gpio/gpio-wm831x.c19
-rw-r--r--drivers/gpio/gpio-wm8350.c13
-rw-r--r--drivers/gpio/gpio-wm8994.c12
-rw-r--r--drivers/gpio/gpio-xgene-sb.c119
-rw-r--r--drivers/gpio/gpio-xgene.c4
-rw-r--r--drivers/gpio/gpio-xgs-iproc.c38
-rw-r--r--drivers/gpio/gpio-xilinx.c195
-rw-r--r--drivers/gpio/gpio-xlp.c8
-rw-r--r--drivers/gpio/gpio-xra1403.c17
-rw-r--r--drivers/gpio/gpio-xtensa.c11
-rw-r--r--drivers/gpio/gpio-zevio.c10
-rw-r--r--drivers/gpio/gpio-zynq.c9
-rw-r--r--drivers/gpio/gpio-zynqmp-modepin.c8
-rw-r--r--drivers/gpio/gpiolib-acpi-core.c1424
-rw-r--r--drivers/gpio/gpiolib-acpi-quirks.c402
-rw-r--r--drivers/gpio/gpiolib-acpi.c1723
-rw-r--r--drivers/gpio/gpiolib-acpi.h15
-rw-r--r--drivers/gpio/gpiolib-cdev.c444
-rw-r--r--drivers/gpio/gpiolib-devres.c95
-rw-r--r--drivers/gpio/gpiolib-legacy.c41
-rw-r--r--drivers/gpio/gpiolib-of.c168
-rw-r--r--drivers/gpio/gpiolib-of.h8
-rw-r--r--drivers/gpio/gpiolib-swnode.c4
-rw-r--r--drivers/gpio/gpiolib-sysfs.c840
-rw-r--r--drivers/gpio/gpiolib.c1030
-rw-r--r--drivers/gpio/gpiolib.h63
-rw-r--r--drivers/gpu/Makefile1
-rw-r--r--drivers/gpu/drm/Kconfig189
-rw-r--r--drivers/gpu/drm/Kconfig.debug117
-rw-r--r--drivers/gpu/drm/Makefile43
-rw-r--r--drivers/gpu/drm/adp/Kconfig17
-rw-r--r--drivers/gpu/drm/adp/Makefile5
-rw-r--r--drivers/gpu/drm/adp/adp-mipi.c277
-rw-r--r--drivers/gpu/drm/adp/adp_drv.c613
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Kconfig9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aldebaran.c66
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h284
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c137
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c77
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c67
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c135
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h88
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v12.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c240
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c86
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c80
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c113
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c115
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c591
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h105
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c174
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c81
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c2459
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c368
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c88
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c125
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c420
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c241
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.h69
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c67
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c186
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c419
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c896
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h86
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c528
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h78
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c55
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c149
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ip.c96
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ip.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c231
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c120
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.h25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c275
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c147
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c1356
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h137
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c287
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c582
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h72
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c1015
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c292
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c160
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c426
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h189
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c296
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h64
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c241
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c50
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c151
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c504
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c1090
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h142
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c1009
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h77
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_utils.h91
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c836
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h101
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c426
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h83
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c62
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c500
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h87
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c117
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c116
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c690
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h60
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c319
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h82
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h176
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c390
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atom.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atom.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_dp.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_dp.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c53
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_ih.c53
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c79
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cikd.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_ih.c53
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c94
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c3806
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.h32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c444
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c115
-rw-r--r--drivers/gpu/drm/amd/amdgpu/df_v3_6.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c573
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0_cleaner_shader.h91
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_1_10_cleaner_shader.asm125
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_3_0_cleaner_shader.asm124
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c1190
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3_cleaner_shader.asm118
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0_cleaner_shader.h56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c1035
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v12_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c133
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c206
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c278
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c552
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0_cleaner_shader.h44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2_cleaner_shader.asm153
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c459
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c135
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c152
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c181
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c133
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c93
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c110
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c500
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_ih.c53
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v6_0.c64
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v6_1.c58
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v7_0.c58
-rw-r--r--drivers/gpu/drm/amd/amdgpu/imu_v11_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/imu_v12_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.c57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c237
-rw-r--r--drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c186
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c238
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c130
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c130
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c610
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c134
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c137
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c1100
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.h111
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_userqueue.c485
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_userqueue.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c297
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v12_0.c486
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c104
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c106
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmsch_v5_0.h144
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c87
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c127
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_ih.c58
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h64
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.c100
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c113
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nvd.h208
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v10_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c68
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0_8.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v12_0.c128
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0.c186
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v14_0.c84
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c73
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c76
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c356
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c637
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c601
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c581
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c649
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c545
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.c176
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_enums.h246
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_ih.c72
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sid.h1924
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c161
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15d.h144
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc21.c123
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc24.c86
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ta_ras_if.h16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c71
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v12_0.c330
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v12_0.h18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v8_14.c160
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v8_14.h51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c95
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c67
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c74
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c94
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c321
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c70
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c86
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v4_0.c314
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c309
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c435
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c1355
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c887
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c947
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c1104
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.h15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c869
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c818
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c1727
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.h39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_ih.c86
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c115
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Makefile1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c19
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h3232
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm202
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm1136
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm62
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c76
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c62
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_debug.c31
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_debug.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c76
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c368
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c398
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h18
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c75
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c43
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v11.c43
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v12.c43
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c42
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c77
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c76
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c145
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c97
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c146
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c64
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c14
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c131
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_module.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c94
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c115
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c103
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pasid.c70
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h83
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c224
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c135
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_queue.c30
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c39
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c184
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.h13
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c129
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.h18
-rw-r--r--drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c57
-rw-r--r--drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.h1
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig17
-rw-r--r--drivers/gpu/drm/amd/display/Makefile1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/Makefile1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c2438
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h126
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c90
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c565
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h56
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c85
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c223
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c136
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c153
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c67
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h7
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h7
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c226
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c103
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h7
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c54
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_quirks.c178
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h27
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/Makefile44
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/dc_common.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/vector.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.c57
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c104
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.c104
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.c106
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.c104
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce60/command_table_helper_dce60.c104
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce80/command_table_helper_dce80.c104
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c42
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dalsmc.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.h31
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.c118
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c150
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn351_clk_mgr.c141
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c433
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h41
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dalsmc.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c465
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.c167
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c999
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_debug.c206
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c181
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c43
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c521
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stat.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_state.c135
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c184
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_surface.c52
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h629
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c814
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h126
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h171
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dsc.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_fused_io.c148
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_fused_io.h31
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_helper.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h188
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_plane.h17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_spl_translate.c58
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_state.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_state_priv.h20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h55
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h132
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h94
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c149
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c112
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h40
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c67
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c44
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_transform.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_transform.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c201
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c124
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce60/Makefile3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce60/dce60_hw_sequencer.c432
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c176
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c34
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_link_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn20/dcn20_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn30/dcn30_dio_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn30/dcn30_dio_stream_encoder.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn31/dcn31_dio_link_encoder.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn32/dcn32_dio_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.c130
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.h41
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.h23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_helpers.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services.h22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services_types.h28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c138
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c48
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c82
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c38
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.c239
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/Makefile41
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c60
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h111
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c529
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c218
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c155
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h77
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn3_soc_bb.h401
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn4_soc_bb.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h57
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h107
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c86
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c5351
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared.c12413
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h334
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c365
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c66
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c466
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.c49
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.c307
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c1170
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top.c354
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top_mcache.c549
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top_mcache.h23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h182
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h78
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c79
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c100
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c324
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml_display_rq_dlg_calc.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h161
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c35
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c39
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c258
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c53
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dsc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_cm_common.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_link_encoder.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_link_encoder.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_stream_encoder.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hpo/dcn32/dcn32_hpo_dp_link_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hpo/dcn32/dcn32_hpo_dp_link_encoder.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn10/dcn10_hubbub.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn10/dcn10_hubbub.h18
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn20/dcn20_hubbub.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn21/dcn21_hubbub.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c34
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c39
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c34
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h36
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn201/dcn201_hubp.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c52
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c187
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h72
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/Makefile26
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce/dce_hwseq.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce100/dce100_hwseq.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce100/dce100_hwseq.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c100
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce120/dce120_hwseq.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce60/dce60_hwseq.c433
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce60/dce60_hwseq.h (renamed from drivers/gpu/drm/amd/display/dc/dce60/dce60_hw_sequencer.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce80/dce80_hwseq.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c406
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c401
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h17
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c109
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c95
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c122
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c92
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c245
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c1369
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h57
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_status.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h63
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h19
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h61
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h25
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h41
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h117
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/optc.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h42
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/transform.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link.h343
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link_service.h350
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/reg_helper.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h19
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/soc_and_ip_translator.h24
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/Makefile9
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce60/irq_service_dce60.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn302/irq_service_dcn302.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn315/irq_service_dcn315.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn32/irq_service_dcn32.c90
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn35/irq_service_dcn35.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn36/irq_service_dcn36.c381
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn36/irq_service_dcn36.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn401/irq_service_dcn401.c89
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/irq_service.c64
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/irq_service.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq_types.h17
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c76
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c72
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c81
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_detection.c50
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_detection.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_dpms.c155
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_dpms.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_factory.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_factory.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_resource.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_validation.c299
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_validation.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c303
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c67
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c535
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h57
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c66
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c66
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c114
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c155
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/mmhubbub/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/mmhubbub/dcn20/dcn20_mmhubbub.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn10/dcn10_mpc.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn10/dcn10_mpc.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c385
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c127
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h239
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn201/dcn201_optc.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c48
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c73
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c123
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c65
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.h26
-rw-r--r--drivers/gpu/drm/amd/display/dc/os_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/pg/dcn35/dcn35_pg_cntl.c78
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/Makefile34
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c (renamed from drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c)102
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.h (renamed from drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c62
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c38
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c69
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c42
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c79
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h15
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c37
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c61
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c118
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h20
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c65
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c57
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c2192
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.h73
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c193
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h18
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/Makefile19
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.c304
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h22
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/soc_and_ip_translator.c37
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/Makefile33
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/dc_spl.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.c757
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.h51
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_easf_filters.c1726
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_filters.h22
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/spl_debug.h25
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/Makefile33
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c (renamed from drivers/gpu/drm/amd/display/dc/spl/dc_spl.c)513
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl.h27
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl_filters.c (renamed from drivers/gpu/drm/amd/display/dc/spl/dc_spl_filters.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl_filters.h (renamed from drivers/gpu/drm/amd/display/dc/spl/dc_spl_filters.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl_isharp_filters.c553
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl_isharp_filters.h42
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl_scl_easf_filters.c2586
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl_scl_easf_filters.h (renamed from drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_easf_filters.h)9
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl_scl_filters.c (renamed from drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_filters.c)232
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl_scl_filters.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h (renamed from drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h)47
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/spl_custom_float.c (renamed from drivers/gpu/drm/amd/display/dc/spl/spl_custom_float.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/spl_custom_float.h (renamed from drivers/gpu/drm/amd/display/dc/spl/spl_custom_float.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/spl_debug.h30
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/spl_fixpt31_32.c (renamed from drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.c)68
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/spl_fixpt31_32.h (renamed from drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.h)21
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/spl_os_types.h (renamed from drivers/gpu/drm/amd/display/dc/spl/spl_os_types.h)3
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c7
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h196
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h1188
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/Makefile1
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c85
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h2
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c117
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h2
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c147
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h10
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c129
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h2
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn36.c34
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn36.h13
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c276
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h9
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c385
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c29
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_asic_id.h6
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/include/dpcd_defs.h19
-rw-r--r--drivers/gpu/drm/amd/display/include/gpio_service_interface.h3
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h2
-rw-r--r--drivers/gpu/drm/amd/display/include/link_service_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_interface.h9
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_types.h5
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c307
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.h11
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c182
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c5
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h1
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c84
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c48
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c73
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c6
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h26
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h39
-rw-r--r--drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c4
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c9
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.h5
-rw-r--r--drivers/gpu/drm/amd/include/amd_acpi.h4
-rw-r--r--drivers/gpu/drm/amd/include/amd_cper.h269
-rw-r--r--drivers/gpu/drm/amd/include/amd_pcie.h18
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h169
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h7
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_1_offset.h6193
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_1_sh_mask.h22091
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_3_offset.h6193
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_3_sh_mask.h22091
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_6_0_offset.h15485
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_6_0_sh_mask.h61940
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_offset.h26
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_sh_mask.h18
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_5_0_offset.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_3_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gca/gfx_6_0_d.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_offset.h23
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_offset.h32
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h48
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_offset.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_sh_mask.h13
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_d.h23
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_sh_mask.h41
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_d.h44
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_sh_mask.h188
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_14_0_offset.h29
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_14_0_sh_mask.h37
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_4_0_3_offset.h37
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_offset.h22
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_sh_mask.h39
-rw-r--r--drivers/gpu/drm/amd/include/atombios.h6
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h50
-rw-r--r--drivers/gpu/drm/amd/include/dm_pp_interface.h1
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_11_0_0.h1
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_12_0_0.h74
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h47
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h7
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h449
-rw-r--r--drivers/gpu/drm/amd/include/mes_v11_api_def.h47
-rw-r--r--drivers/gpu/drm/amd/include/mes_v12_api_def.h104
-rw-r--r--drivers/gpu/drm/amd/include/v11_structs.h8
-rw-r--r--drivers/gpu/drm/amd/include/v12_structs.h8
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm.c337
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c86
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c1338
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h29
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_dpm_internal.h6
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c161
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c135
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.h7
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c579
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c66
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c79
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c10
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c442
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.h2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.c78
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.h3
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppevvmath.h561
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c3
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_clockpowergating.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_clockpowergating.h1
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c1
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c3
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c5
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_processpptables.c574
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/hardwaremanager.h1
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h1
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c5
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c5
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c5
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c38
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.h2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c5
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c24
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c646
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h198
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h373
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_ppsmc.h148
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h144
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h7
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h17
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h7
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v12_0.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h28
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_0_pptable.h (renamed from drivers/gpu/drm/amd/pm/inc/smu_v13_0_0_pptable.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h7
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c633
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c231
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c1510
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c84
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c105
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c106
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c107
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c252
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c450
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c936
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c26
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c1049
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h70
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c381
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c40
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c149
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c65
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c565
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c110
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h59
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_internal.h1
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/Makefile0
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_core_status.h37
-rw-r--r--drivers/gpu/drm/arm/Kconfig3
-rw-r--r--drivers/gpu/drm/arm/display/Kconfig1
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_drv.c6
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c3
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.h1
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_kms.c3
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c6
-rw-r--r--drivers/gpu/drm/arm/hdlcd_crtc.c23
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c11
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c19
-rw-r--r--drivers/gpu/drm/arm/malidp_mw.c2
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c2
-rw-r--r--drivers/gpu/drm/armada/Kconfig1
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c2
-rw-r--r--drivers/gpu/drm/armada/armada_drm.h11
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c11
-rw-r--r--drivers/gpu/drm/armada/armada_fb.c12
-rw-r--r--drivers/gpu/drm/armada/armada_fb.h4
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c118
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c2
-rw-r--r--drivers/gpu/drm/aspeed/Kconfig1
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx_drv.c11
-rw-r--r--drivers/gpu/drm/ast/Kconfig3
-rw-r--r--drivers/gpu/drm/ast/Makefile7
-rw-r--r--drivers/gpu/drm/ast/ast_2000.c149
-rw-r--r--drivers/gpu/drm/ast/ast_2100.c388
-rw-r--r--drivers/gpu/drm/ast/ast_2300.c1328
-rw-r--r--drivers/gpu/drm/ast/ast_2500.c569
-rw-r--r--drivers/gpu/drm/ast/ast_2600.c44
-rw-r--r--drivers/gpu/drm/ast/ast_cursor.c314
-rw-r--r--drivers/gpu/drm/ast/ast_dp.c382
-rw-r--r--drivers/gpu/drm/ast/ast_dp501.c111
-rw-r--r--drivers/gpu/drm/ast/ast_dram_tables.h207
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c21
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h141
-rw-r--r--drivers/gpu/drm/ast/ast_main.c338
-rw-r--r--drivers/gpu/drm/ast/ast_mm.c26
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c767
-rw-r--r--drivers/gpu/drm/ast/ast_post.c2024
-rw-r--r--drivers/gpu/drm/ast/ast_post.h50
-rw-r--r--drivers/gpu/drm/ast/ast_reg.h74
-rw-r--r--drivers/gpu/drm/ast/ast_sil164.c55
-rw-r--r--drivers/gpu/drm/ast/ast_tables.h187
-rw-r--r--drivers/gpu/drm/ast/ast_vbios.c241
-rw-r--r--drivers/gpu/drm/ast/ast_vbios.h108
-rw-r--r--drivers/gpu/drm/ast/ast_vga.c55
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/Kconfig1
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c8
-rw-r--r--drivers/gpu/drm/bridge/Kconfig59
-rw-r--r--drivers/gpu/drm/bridge/Makefile8
-rw-r--r--drivers/gpu/drm/bridge/adv7511/Kconfig5
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511.h59
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_audio.c84
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_cec.c57
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c446
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7533.c13
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-anx6345.c43
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c42
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-i2c-dptx.c2
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c286
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.h6
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c52
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.c151
-rw-r--r--drivers/gpu/drm/bridge/aux-bridge.c18
-rw-r--r--drivers/gpu/drm/bridge/aux-hpd-bridge.c18
-rw-r--r--drivers/gpu/drm/bridge/cadence/Kconfig1
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c444
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-dsi-core.h2
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c115
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c28
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.h3
-rw-r--r--drivers/gpu/drm/bridge/chipone-icn6211.c23
-rw-r--r--drivers/gpu/drm/bridge/chrontel-ch7033.c17
-rw-r--r--drivers/gpu/drm/bridge/cros-ec-anx7688.c8
-rw-r--r--drivers/gpu/drm/bridge/display-connector.c33
-rw-r--r--drivers/gpu/drm/bridge/fsl-ldb.c23
-rw-r--r--drivers/gpu/drm/bridge/imx/Kconfig10
-rw-r--r--drivers/gpu/drm/bridge/imx/Makefile1
-rw-r--r--drivers/gpu/drm/bridge/imx/imx-ldb-helper.c11
-rw-r--r--drivers/gpu/drm/bridge/imx/imx-ldb-helper.h5
-rw-r--r--drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c91
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c20
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c22
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qm-ldb.c53
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c54
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c46
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c23
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c21
-rw-r--r--drivers/gpu/drm/bridge/imx/imx93-mipi-dsi.c14
-rw-r--r--drivers/gpu/drm/bridge/ite-it6263.c930
-rw-r--r--drivers/gpu/drm/bridge/ite-it6505.c448
-rw-r--r--drivers/gpu/drm/bridge/ite-it66121.c27
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt8912b.c21
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9211.c17
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611.c366
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611uxc.c26
-rw-r--r--drivers/gpu/drm/bridge/lvds-codec.c14
-rw-r--r--drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c32
-rw-r--r--drivers/gpu/drm/bridge/microchip-lvds.c14
-rw-r--r--drivers/gpu/drm/bridge/nwl-dsi.c24
-rw-r--r--drivers/gpu/drm/bridge/nxp-ptn3460.c19
-rw-r--r--drivers/gpu/drm/bridge/panel.c37
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8622.c10
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8640.c16
-rw-r--r--drivers/gpu/drm/bridge/samsung-dsim.c455
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c57
-rw-r--r--drivers/gpu/drm/bridge/sii9234.c12
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.c13
-rw-r--r--drivers/gpu/drm/bridge/simple-bridge.c25
-rw-r--r--drivers/gpu/drm/bridge/ssd2825.c775
-rw-r--r--drivers/gpu/drm/bridge/synopsys/Kconfig21
-rw-r--r--drivers/gpu/drm/bridge/synopsys/Makefile4
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-dp.c2095
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c2
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c10
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c2
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c5
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c1118
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h834
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c30
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c20
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c1032
-rw-r--r--drivers/gpu/drm/bridge/tc358762.c22
-rw-r--r--drivers/gpu/drm/bridge/tc358764.c11
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c151
-rw-r--r--drivers/gpu/drm/bridge/tc358768.c78
-rw-r--r--drivers/gpu/drm/bridge/tc358775.c54
-rw-r--r--drivers/gpu/drm/bridge/tda998x_drv.c (renamed from drivers/gpu/drm/i2c/tda998x_drv.c)66
-rw-r--r--drivers/gpu/drm/bridge/thc63lvd1024.c13
-rw-r--r--drivers/gpu/drm/bridge/ti-dlpc3433.c22
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi83.c306
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c293
-rw-r--r--drivers/gpu/drm/bridge/ti-tdp158.c115
-rw-r--r--drivers/gpu/drm/bridge/ti-tfp410.c19
-rw-r--r--drivers/gpu/drm/bridge/ti-tpd12s015.c21
-rw-r--r--drivers/gpu/drm/bridge/waveshare-dsi.c203
-rw-r--r--drivers/gpu/drm/ci/arm64.config9
-rw-r--r--drivers/gpu/drm/ci/build.sh28
-rw-r--r--drivers/gpu/drm/ci/build.yml128
-rw-r--r--drivers/gpu/drm/ci/check-devicetrees.yml50
-rw-r--r--drivers/gpu/drm/ci/container.yml36
-rwxr-xr-xdrivers/gpu/drm/ci/dt-binding-check.sh19
-rwxr-xr-xdrivers/gpu/drm/ci/dtbs-check.sh22
-rw-r--r--drivers/gpu/drm/ci/gitlab-ci.yml256
-rwxr-xr-xdrivers/gpu/drm/ci/igt_runner.sh25
-rw-r--r--drivers/gpu/drm/ci/image-tags.yml21
-rwxr-xr-xdrivers/gpu/drm/ci/kunit.sh16
-rw-r--r--drivers/gpu/drm/ci/kunit.yml37
-rwxr-xr-xdrivers/gpu/drm/ci/lava-submit.sh103
-rwxr-xr-xdrivers/gpu/drm/ci/setup-llvm-links.sh13
-rw-r--r--drivers/gpu/drm/ci/test.yml151
-rw-r--r--drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt2
-rw-r--r--drivers/gpu/drm/ci/xfails/amdgpu-stoney-flakes.txt28
-rw-r--r--drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt6
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-amly-fails.txt29
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-amly-flakes.txt21
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-amly-skips.txt2
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-apl-fails.txt9
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-apl-flakes.txt7
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-apl-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-cml-fails.txt32
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-cml-flakes.txt21
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-cml-skips.txt3
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-glk-fails.txt34
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-glk-flakes.txt7
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-glk-skips.txt299
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-jsl-fails.txt46
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-jsl-flakes.txt6
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-jsl-skips.txt21
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt7
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt113
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt44
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt14
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-whl-fails.txt28
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-whl-flakes.txt7
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-whl-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt32
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt14
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt6
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt40
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8183-flakes.txt21
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt6
-rw-r--r--drivers/gpu/drm/ci/xfails/meson-g12b-fails.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/meson-g12b-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt6
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-apq8016-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-apq8096-fails.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-apq8096-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-fails.txt28
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt21
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt14
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-fails.txt28
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-flakes.txt7
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt11
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt35
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt118
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt27
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-fails.txt10
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-flakes.txt6
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-skips.txt212
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-g12b-fails.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-g12b-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-mt8183-fails.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-mt8183-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-rk3288-fails.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-rk3288-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-rk3399-fails.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-rk3399-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/requirements.txt17
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt13
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3288-flakes.txt28
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3288-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt20
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt98
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt6
-rwxr-xr-xdrivers/gpu/drm/ci/xfails/update-xfails.py204
-rw-r--r--drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt6
-rw-r--r--drivers/gpu/drm/ci/xfails/vkms-none-fails.txt31
-rw-r--r--drivers/gpu/drm/ci/xfails/vkms-none-flakes.txt28
-rw-r--r--drivers/gpu/drm/ci/xfails/vkms-none-skips.txt598
-rw-r--r--drivers/gpu/drm/clients/Kconfig123
-rw-r--r--drivers/gpu/drm/clients/Makefile8
-rw-r--r--drivers/gpu/drm/clients/drm_client_internal.h25
-rw-r--r--drivers/gpu/drm/clients/drm_client_setup.c98
-rw-r--r--drivers/gpu/drm/clients/drm_fbdev_client.c167
-rw-r--r--drivers/gpu/drm/clients/drm_log.c420
-rw-r--r--drivers/gpu/drm/display/Kconfig29
-rw-r--r--drivers/gpu/drm/display/Makefile11
-rw-r--r--drivers/gpu/drm/display/drm_bridge_connector.c393
-rw-r--r--drivers/gpu/drm/display/drm_dp_aux_bus.c5
-rw-r--r--drivers/gpu/drm/display/drm_dp_cec.c54
-rw-r--r--drivers/gpu/drm/display/drm_dp_dual_mode_helper.c8
-rw-r--r--drivers/gpu/drm/display/drm_dp_helper.c834
-rw-r--r--drivers/gpu/drm/display/drm_dp_mst_topology.c385
-rw-r--r--drivers/gpu/drm/display/drm_dp_tunnel.c33
-rw-r--r--drivers/gpu/drm/display/drm_dsc_helper.c1
-rw-r--r--drivers/gpu/drm/display/drm_hdmi_audio_helper.c195
-rw-r--r--drivers/gpu/drm/display/drm_hdmi_cec_helper.c193
-rw-r--r--drivers/gpu/drm/display/drm_hdmi_cec_notifier_helper.c65
-rw-r--r--drivers/gpu/drm/display/drm_hdmi_helper.c171
-rw-r--r--drivers/gpu/drm/display/drm_hdmi_state_helper.c506
-rw-r--r--drivers/gpu/drm/display/drm_scdc_helper.c1
-rw-r--r--drivers/gpu/drm/drm_aperture.c192
-rw-r--r--drivers/gpu/drm/drm_atomic.c63
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c496
-rw-r--r--drivers/gpu/drm/drm_atomic_state_helper.c1
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c37
-rw-r--r--drivers/gpu/drm/drm_auth.c65
-rw-r--r--drivers/gpu/drm/drm_blend.c6
-rw-r--r--drivers/gpu/drm/drm_bridge.c307
-rw-r--r--drivers/gpu/drm/drm_bridge_helper.c60
-rw-r--r--drivers/gpu/drm/drm_buddy.c55
-rw-r--r--drivers/gpu/drm/drm_cache.c9
-rw-r--r--drivers/gpu/drm/drm_client.c163
-rw-r--r--drivers/gpu/drm/drm_client_event.c213
-rw-r--r--drivers/gpu/drm/drm_client_modeset.c282
-rw-r--r--drivers/gpu/drm/drm_color_mgmt.c244
-rw-r--r--drivers/gpu/drm/drm_connector.c225
-rw-r--r--drivers/gpu/drm/drm_crtc.c20
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c1
-rw-r--r--drivers/gpu/drm/drm_crtc_helper_internal.h2
-rw-r--r--drivers/gpu/drm/drm_damage_helper.c4
-rw-r--r--drivers/gpu/drm/drm_debugfs.c173
-rw-r--r--drivers/gpu/drm/drm_debugfs_crc.c1
-rw-r--r--drivers/gpu/drm/drm_displayid_internal.h31
-rw-r--r--drivers/gpu/drm/drm_draw.c157
-rw-r--r--drivers/gpu/drm/drm_draw_internal.h56
-rw-r--r--drivers/gpu/drm/drm_drv.c215
-rw-r--r--drivers/gpu/drm/drm_edid.c358
-rw-r--r--drivers/gpu/drm/drm_encoder_slave.c182
-rw-r--r--drivers/gpu/drm/drm_exec.c2
-rw-r--r--drivers/gpu/drm/drm_fb_dma_helper.c4
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c131
-rw-r--r--drivers/gpu/drm/drm_fbdev_dma.c360
-rw-r--r--drivers/gpu/drm/drm_fbdev_shmem.c171
-rw-r--r--drivers/gpu/drm/drm_fbdev_ttm.c226
-rw-r--r--drivers/gpu/drm/drm_file.c115
-rw-r--r--drivers/gpu/drm/drm_flip_work.c1
-rw-r--r--drivers/gpu/drm/drm_format_helper.c795
-rw-r--r--drivers/gpu/drm/drm_format_internal.h174
-rw-r--r--drivers/gpu/drm/drm_fourcc.c75
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c62
-rw-r--r--drivers/gpu/drm/drm_gem.c277
-rw-r--r--drivers/gpu/drm/drm_gem_atomic_helper.c1
-rw-r--r--drivers/gpu/drm/drm_gem_dma_helper.c4
-rw-r--r--drivers/gpu/drm/drm_gem_framebuffer_helper.c60
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c266
-rw-r--r--drivers/gpu/drm/drm_gem_ttm_helper.c1
-rw-r--r--drivers/gpu/drm/drm_gem_vram_helper.c125
-rw-r--r--drivers/gpu/drm/drm_gpusvm.c1633
-rw-r--r--drivers/gpu/drm/drm_gpuvm.c500
-rw-r--r--drivers/gpu/drm/drm_internal.h28
-rw-r--r--drivers/gpu/drm/drm_ioctl.c52
-rw-r--r--drivers/gpu/drm/drm_managed.c9
-rw-r--r--drivers/gpu/drm/drm_mipi_dbi.c17
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c230
-rw-r--r--drivers/gpu/drm/drm_mm.c4
-rw-r--r--drivers/gpu/drm/drm_mode_config.c17
-rw-r--r--drivers/gpu/drm/drm_mode_object.c1
-rw-r--r--drivers/gpu/drm/drm_modes.c14
-rw-r--r--drivers/gpu/drm/drm_modeset_helper.c20
-rw-r--r--drivers/gpu/drm/drm_modeset_lock.c2
-rw-r--r--drivers/gpu/drm/drm_of.c95
-rw-r--r--drivers/gpu/drm/drm_pagemap.c882
-rw-r--r--drivers/gpu/drm/drm_panel.c269
-rw-r--r--drivers/gpu/drm/drm_panel_backlight_quirks.c156
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c68
-rw-r--r--drivers/gpu/drm/drm_panic.c365
-rw-r--r--drivers/gpu/drm/drm_panic_qr.rs251
-rw-r--r--drivers/gpu/drm/drm_pci.c1
-rw-r--r--drivers/gpu/drm/drm_plane.c53
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c1
-rw-r--r--drivers/gpu/drm/drm_prime.c73
-rw-r--r--drivers/gpu/drm/drm_print.c38
-rw-r--r--drivers/gpu/drm/drm_privacy_screen.c1
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c49
-rw-r--r--drivers/gpu/drm/drm_self_refresh_helper.c1
-rw-r--r--drivers/gpu/drm/drm_simple_kms_helper.c1
-rw-r--r--drivers/gpu/drm/drm_suballoc.c2
-rw-r--r--drivers/gpu/drm/drm_syncobj.c57
-rw-r--r--drivers/gpu/drm/drm_sysfs.c47
-rw-r--r--drivers/gpu/drm/drm_vblank.c5
-rw-r--r--drivers/gpu/drm/drm_vblank_work.c4
-rw-r--r--drivers/gpu/drm/drm_vma_manager.c1
-rw-r--r--drivers/gpu/drm/drm_writeback.c194
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_buffer.c3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c7
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c36
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c42
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.h7
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c8
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c107
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.c62
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.h1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_perfmon.c4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_sched.c55
-rw-r--r--drivers/gpu/drm/etnaviv/state_hi.xml.h23
-rw-r--r--drivers/gpu/drm/exynos/Kconfig1
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c162
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.h3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c11
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c11
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c7
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.h1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c104
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.h15
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c16
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c32
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_mic.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_scaler.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c33
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c2
-rw-r--r--drivers/gpu/drm/exynos/regs-decon7.h15
-rw-r--r--drivers/gpu/drm/fsl-dcu/Kconfig2
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c34
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h3
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c3
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_tcon.c2
-rw-r--r--drivers/gpu/drm/gma500/Kconfig3
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_crt.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c5
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c2
-rw-r--r--drivers/gpu/drm/gma500/fbdev.c110
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c14
-rw-r--r--drivers/gpu/drm/gma500/mid_bios.c5
-rw-r--r--drivers/gpu/drm/gma500/mmu.c41
-rw-r--r--drivers/gpu/drm/gma500/mmu.h2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c7
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c4
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c5
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h14
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h3
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_modes.c31
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c2
-rw-r--r--drivers/gpu/drm/gud/Kconfig1
-rw-r--r--drivers/gpu/drm/gud/gud_connector.c25
-rw-r--r--drivers/gpu/drm/gud/gud_drv.c92
-rw-r--r--drivers/gpu/drm/gud/gud_internal.h14
-rw-r--r--drivers/gpu/drm/gud/gud_pipe.c79
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/Kconfig6
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/Makefile4
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_aux.c168
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_comm.h69
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_config.h21
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.c307
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.h64
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c392
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_reg.h132
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_serdes.c71
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_debugfs.c104
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_dp.c182
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c106
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h32
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c46
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c32
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/Kconfig1
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c6
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c7
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c6
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm.h4
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm_drv.c12
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm_modeset.c206
-rw-r--r--drivers/gpu/drm/i2c/Kconfig36
-rw-r--r--drivers/gpu/drm/i2c/Makefile10
-rw-r--r--drivers/gpu/drm/i915/Kconfig2
-rw-r--r--drivers/gpu/drm/i915/Kconfig.debug2
-rw-r--r--drivers/gpu/drm/i915/Makefile29
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ch7017.c4
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ch7xxx.c4
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ivch.c4
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ns2501.c7
-rw-r--r--drivers/gpu/drm/i915/display/dvo_sil164.c4
-rw-r--r--drivers/gpu/drm/i915/display/dvo_tfp410.c4
-rw-r--r--drivers/gpu/drm/i915/display/g4x_dp.c288
-rw-r--r--drivers/gpu/drm/i915/display/g4x_dp.h25
-rw-r--r--drivers/gpu/drm/i915/display/g4x_hdmi.c235
-rw-r--r--drivers/gpu/drm/i915/display/g4x_hdmi.h7
-rw-r--r--drivers/gpu/drm/i915/display/hsw_ips.c115
-rw-r--r--drivers/gpu/drm/i915/display/hsw_ips.h6
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_display_sr.c97
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_display_sr.h14
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_plane.c544
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_plane.h11
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_wm.c1495
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_wm.h18
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_wm_regs.h257
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c635
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.h4
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi_regs.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_acpi.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_alpm.c394
-rw-r--r--drivers/gpu/drm/i915/display/intel_alpm.h16
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.c35
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c1227
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.h77
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c583
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.h16
-rw-r--r--drivers/gpu/drm/i915/display/intel_backlight.c546
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c300
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.h186
-rw-r--r--drivers/gpu/drm/i915/display/intel_bo.c61
-rw-r--r--drivers/gpu/drm/i915/display/intel_bo.h29
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c873
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.h75
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c1760
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.h84
-rw-r--r--drivers/gpu/drm/i915/display/intel_cmtg.c188
-rw-r--r--drivers/gpu/drm/i915/display/intel_cmtg.h13
-rw-r--r--drivers/gpu/drm/i915/display/intel_cmtg_regs.h21
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c1033
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.h16
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy.c183
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy_regs.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.c113
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c450
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt_regs.h50
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc.c160
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc.h19
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc_state_dump.c87
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.c334
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.c845
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.h12
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h72
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c2368
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.h28
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c81
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_de.h113
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c3425
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h169
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_conversion.c15
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_conversion.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_core.h88
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c647
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs_params.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.c605
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.h353
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_driver.c464
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_driver.h38
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_irq.c1804
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_irq.h90
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_limits.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_params.c27
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_params.h9
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c1281
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.h110
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_map.c250
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.c1016
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.h54
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_regs.h2934
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_reset.c84
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_reset.h12
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_rpm.c69
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_rpm.h37
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_rps.c31
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_rps.h28
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_snapshot.c79
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_snapshot.h16
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_trace.h413
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h405
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_wa.c66
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_wa.h22
-rw-r--r--drivers/gpu/drm/i915/display/intel_dkl_phy.c65
-rw-r--r--drivers/gpu/drm/i915/display/intel_dkl_phy.h12
-rw-r--r--drivers/gpu/drm/i915/display/intel_dkl_phy_regs.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.c933
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.h51
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc_regs.h493
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc_wl.c371
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc_wl.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c2871
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h76
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.c53
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c90
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_hdcp.c126
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c322
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c1364
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_test.c764
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_test.h23
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_tunnel.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_tunnel.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.c418
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.h28
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.c585
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.h22
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c1639
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.h99
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt.c47
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt_common.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_drrs.c56
-rw-r--r--drivers/gpu/drm/i915/display/intel_drrs.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c570
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.h21
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb_regs.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi.c17
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.c248
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt_defs.h197
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c90
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo_dev.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_encoder.c48
-rw-r--r--drivers/gpu/drm/i915/display/intel_encoder.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c611
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.h47
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_bo.c28
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_bo.h16
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_pin.c91
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_pin.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c341
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc_regs.h9
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c449
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.h29
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev_fb.c24
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev_fb.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.c567
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.h24
-rw-r--r--drivers/gpu/drm/i915/display/intel_fifo_underrun.c299
-rw-r--r--drivers/gpu/drm/i915/display/intel_fifo_underrun.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_flipq.c472
-rw-r--r--drivers/gpu/drm/i915/display/intel_flipq.h37
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.c134
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.h13
-rw-r--r--drivers/gpu/drm/i915/display/intel_global_state.c79
-rw-r--r--drivers/gpu/drm/i915/display/intel_global_state.h42
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.c367
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.h15
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus_regs.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c1129
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.h21
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_gsc.c120
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_gsc.h19
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.c225
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.h64
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_regs.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_shim.h137
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c393
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.h15
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c710
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.h33
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug_irq.c688
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug_irq.h28
-rw-r--r--drivers/gpu/drm/i915/display/intel_hti.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_hti_regs.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_link_bw.c270
-rw-r--r--drivers/gpu/drm/i915/display/intel_link_bw.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_load_detect.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_lpe_audio.c127
-rw-r--r--drivers/gpu/drm/i915/display/intel_lpe_audio.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_lspcon.c49
-rw-r--r--drivers/gpu/drm/i915/display/intel_lspcon.h25
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c243
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_setup.c340
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_setup.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_verify.c111
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.c21
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c227
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.h45
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c468
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.h12
-rw-r--r--drivers/gpu/drm/i915/display/intel_panic.c27
-rw-r--r--drivers/gpu/drm/i915/display/intel_panic.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch.c340
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch.h56
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_display.c339
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_display.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_refclk.c323
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_refclk.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_pfit.c731
-rw-r--r--drivers/gpu/drm/i915/display/intel_pfit.h29
-rw-r--r--drivers/gpu/drm/i915/display/intel_pfit_regs.h79
-rw-r--r--drivers/gpu/drm/i915/display/intel_pipe_crc.c148
-rw-r--r--drivers/gpu/drm/i915/display/intel_pipe_crc.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane.c1750
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane.h94
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane_initial.c142
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane_initial.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_pmdemand.c302
-rw-r--r--drivers/gpu/drm/i915/display/intel_pmdemand.h51
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.c544
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.h13
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps_regs.h15
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c1237
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.h26
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr_regs.h31
-rw-r--r--drivers/gpu/drm/i915/display/intel_quirks.c17
-rw-r--r--drivers/gpu/drm/i915/display/intel_quirks.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_sbi.c90
-rw-r--r--drivers/gpu/drm/i915/display/intel_sbi.h27
-rw-r--r--drivers/gpu/drm/i915/display/intel_sbi_regs.h65
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c350
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo_regs.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c364
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.h17
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_phy.c128
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_phy.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c273
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.h11
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite_uapi.c20
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c622
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.h72
-rw-r--r--drivers/gpu/drm/i915/display/intel_tdf.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c48
-rw-r--r--drivers/gpu/drm/i915/display/intel_vblank.c128
-rw-r--r--drivers/gpu/drm/i915/display/intel_vblank.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_vbt_defs.h30
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c365
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc_regs.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_vga.c97
-rw-r--r--drivers/gpu/drm/i915/display/intel_vga.h12
-rw-r--r--drivers/gpu/drm/i915/display/intel_vga_regs.h36
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.c533
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.h17
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr_regs.h121
-rw-r--r--drivers/gpu/drm/i915/display/intel_wm.c202
-rw-r--r--drivers/gpu/drm/i915/display/intel_wm.h19
-rw-r--r--drivers/gpu/drm/i915/display/skl_scaler.c756
-rw-r--r--drivers/gpu/drm/i915/display/skl_scaler.h31
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.c1237
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.h14
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane_regs.h16
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.c2011
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.h67
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c236
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.h13
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_pll.c207
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_pll.h17
-rw-r--r--drivers/gpu/drm/i915/display/vlv_sideband.c50
-rw-r--r--drivers/gpu/drm/i915/display/vlv_sideband.h156
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_busy.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_clflush.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_clflush.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c21
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context_types.h9
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_create.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c7
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_domain.c21
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c68
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_internal.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ioctls.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_lmem.c26
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_lmem.h2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c9
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c31
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h16
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c133
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_phys.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pm.c5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pm.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_region.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c164
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c13
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_throttle.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_tiling.c5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.c8
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_wait.c18
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gemfs.c19
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gemfs.h3
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_pages.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c4
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c86
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_context.c38
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_context.h3
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c3
-rw-r--r--drivers/gpu/drm/i915/gt/gen2_engine_cs.c25
-rw-r--r--drivers/gpu/drm/i915/gt/gen2_engine_cs.h6
-rw-r--r--drivers/gpu/drm/i915/gt/gen7_renderclear.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_types.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine.h34
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c15
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_regs.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h9
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.c21
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c75
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gsc.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c87
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c12
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_debugfs.c5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_irq.c26
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_mcr.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.h12
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_regs.h144
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_requests.c10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c47
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.h10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_migrate.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.c23
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c61
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset_types.h5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring.c24
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c82
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c101
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps_types.h6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sa_media.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.c58
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_tlb.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_wopcm.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c67
-rw-r--r--drivers/gpu/drm/i915/gt/mock_engine.c6
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_execlists.c8
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_hangcheck.c6
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c11
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_migrate.c8
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rc6.c59
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rps.c16
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_slpc.c19
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_tlb.c8
-rw-r--r--drivers/gpu/drm/i915/gt/shaders/README6
-rw-r--r--drivers/gpu/drm/i915/gt/shaders/clear_kernel/hsw.asm2
-rw-r--r--drivers/gpu/drm/i915/gt/shaders/clear_kernel/ivb.asm2
-rw-r--r--drivers/gpu/drm/i915/gt/shmem_utils.c10
-rw-r--r--drivers/gpu/drm/i915/gt/sysfs_engines.c1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h5
-rw-r--r--drivers/gpu/drm/i915/gt/uc/guc_capture_fwif.h4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c50
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c6
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c25
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c14
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c25
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h3
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c3
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.c19
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.h8
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c117
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h3
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c135
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.c42
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c3
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c7
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c37
-rw-r--r--drivers/gpu/drm/i915/gvt/debugfs.c17
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c92
-rw-r--r--drivers/gpu/drm/i915/gvt/display.h55
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/edid.c15
-rw-r--r--drivers/gpu/drm/i915/gvt/edid.h8
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.c140
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c76
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h23
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c171
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c12
-rw-r--r--drivers/gpu/drm/i915/gvt/opregion.c9
-rw-r--r--drivers/gpu/drm/i915/gvt/page_track.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c13
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c6
-rw-r--r--drivers/gpu/drm/i915/i915_active.c25
-rw-r--r--drivers/gpu/drm/i915/i915_active.h1
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c34
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs_params.c4
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c282
-rw-r--r--drivers/gpu/drm/i915/i915_driver.h2
-rw-r--r--drivers/gpu/drm/i915/i915_drm_client.c1
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h147
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c9
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c2
-rw-r--r--drivers/gpu/drm/i915/i915_getparam.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c210
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h17
-rw-r--r--drivers/gpu/drm/i915/i915_gtt_view_types.h59
-rw-r--r--drivers/gpu/drm/i915/i915_hwmon.c40
-rw-r--r--drivers/gpu/drm/i915/i915_iosf_mbi.h6
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c648
-rw-r--r--drivers/gpu/drm/i915/i915_irq.h42
-rw-r--r--drivers/gpu/drm/i915/i915_list_util.h23
-rw-r--r--drivers/gpu/drm/i915/i915_mm.c8
-rw-r--r--drivers/gpu/drm/i915/i915_module.c4
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c7
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c34
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c215
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.h17
-rw-r--r--drivers/gpu/drm/i915/i915_ptr_util.h66
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h3413
-rw-r--r--drivers/gpu/drm/i915/i915_reg_defs.h126
-rw-r--r--drivers/gpu/drm/i915/i915_request.c25
-rw-r--r--drivers/gpu/drm/i915/i915_request.h9
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.c2
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c140
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.h14
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c13
-rw-r--r--drivers/gpu/drm/i915/i915_switcheroo.c9
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c4
-rw-r--r--drivers/gpu/drm/i915/i915_timer_util.c36
-rw-r--r--drivers/gpu/drm/i915/i915_timer_util.h23
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h30
-rw-r--r--drivers/gpu/drm/i915/i915_utils.c30
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h212
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c34
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h33
-rw-r--r--drivers/gpu/drm/i915/i915_vma_types.h52
-rw-r--r--drivers/gpu/drm/i915/i915_wait_util.h119
-rw-r--r--drivers/gpu/drm/i915/intel_clock_gating.c45
-rw-r--r--drivers/gpu/drm/i915/intel_cpu_info.c44
-rw-r--r--drivers/gpu/drm/i915/intel_cpu_info.h13
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c35
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h10
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c76
-rw-r--r--drivers/gpu/drm/i915/intel_gvt_mmio_table.c277
-rw-r--r--drivers/gpu/drm/i915/intel_mchbar_regs.h4
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.c15
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.h3
-rw-r--r--drivers/gpu/drm/i915/intel_pcode.c30
-rw-r--r--drivers/gpu/drm/i915/intel_pcode.h15
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c14
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.h17
-rw-r--r--drivers/gpu/drm/i915/intel_sbi.c74
-rw-r--r--drivers/gpu/drm/i915/intel_sbi.h23
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c28
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h7
-rw-r--r--drivers/gpu/drm/i915/intel_uncore_trace.c7
-rw-r--r--drivers/gpu/drm/i915/intel_uncore_trace.h49
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.c21
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.h29
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp.c16
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp.h6
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_43.h2
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c8
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.h8
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_types.h2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem.c11
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c25
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_random.h2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c33
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_selftest.c20
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_vma.c15
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_flush_test.c12
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.c5
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_memory_region.c14
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_uncore.c8
-rw-r--r--drivers/gpu/drm/i915/selftests/lib_sw_fence.c6
-rw-r--r--drivers/gpu/drm/i915/selftests/librapl.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c18
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_request.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/scatterlist.c2
-rw-r--r--drivers/gpu/drm/i915/soc/intel_dram.c196
-rw-r--r--drivers/gpu/drm/i915/soc/intel_dram.h33
-rw-r--r--drivers/gpu/drm/i915/soc/intel_gmch.c5
-rw-r--r--drivers/gpu/drm/i915/soc/intel_pch.c287
-rw-r--r--drivers/gpu/drm/i915/soc/intel_pch.h89
-rw-r--r--drivers/gpu/drm/i915/soc/intel_rom.c160
-rw-r--r--drivers/gpu/drm/i915/soc/intel_rom.h25
-rw-r--r--drivers/gpu/drm/i915/vlv_iosf_sb.c229
-rw-r--r--drivers/gpu/drm/i915/vlv_iosf_sb.h37
-rw-r--r--drivers/gpu/drm/i915/vlv_iosf_sb_reg.h180
-rw-r--r--drivers/gpu/drm/i915/vlv_sideband.c251
-rw-r--r--drivers/gpu/drm/i915/vlv_sideband.h122
-rw-r--r--drivers/gpu/drm/i915/vlv_sideband_reg.h180
-rw-r--r--drivers/gpu/drm/i915/vlv_suspend.c6
-rw-r--r--drivers/gpu/drm/imagination/Kconfig3
-rw-r--r--drivers/gpu/drm/imagination/Makefile4
-rw-r--r--drivers/gpu/drm/imagination/pvr_ccb.c2
-rw-r--r--drivers/gpu/drm/imagination/pvr_context.c51
-rw-r--r--drivers/gpu/drm/imagination/pvr_context.h21
-rw-r--r--drivers/gpu/drm/imagination/pvr_debugfs.c3
-rw-r--r--drivers/gpu/drm/imagination/pvr_device.c135
-rw-r--r--drivers/gpu/drm/imagination/pvr_device.h67
-rw-r--r--drivers/gpu/drm/imagination/pvr_drv.c47
-rw-r--r--drivers/gpu/drm/imagination/pvr_drv.h1
-rw-r--r--drivers/gpu/drm/imagination/pvr_free_list.c3
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw.c67
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw.h85
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_meta.c32
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_mips.c85
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_riscv.c165
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_startstop.c17
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_trace.c35
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_trace.h2
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_util.c66
-rw-r--r--drivers/gpu/drm/imagination/pvr_gem.c18
-rw-r--r--drivers/gpu/drm/imagination/pvr_gem.h6
-rw-r--r--drivers/gpu/drm/imagination/pvr_hwrt.c12
-rw-r--r--drivers/gpu/drm/imagination/pvr_job.c34
-rw-r--r--drivers/gpu/drm/imagination/pvr_mmu.c8
-rw-r--r--drivers/gpu/drm/imagination/pvr_power.c317
-rw-r--r--drivers/gpu/drm/imagination/pvr_power.h18
-rw-r--r--drivers/gpu/drm/imagination/pvr_queue.c53
-rw-r--r--drivers/gpu/drm/imagination/pvr_queue.h6
-rw-r--r--drivers/gpu/drm/imagination/pvr_rogue_cr_defs.h153
-rw-r--r--drivers/gpu/drm/imagination/pvr_rogue_riscv.h41
-rw-r--r--drivers/gpu/drm/imagination/pvr_stream.c12
-rw-r--r--drivers/gpu/drm/imagination/pvr_vm.c147
-rw-r--r--drivers/gpu/drm/imagination/pvr_vm.h4
-rw-r--r--drivers/gpu/drm/imagination/pvr_vm_mips.c3
-rw-r--r--drivers/gpu/drm/imx/Kconfig1
-rw-r--r--drivers/gpu/drm/imx/Makefile1
-rw-r--r--drivers/gpu/drm/imx/dc/Kconfig13
-rw-r--r--drivers/gpu/drm/imx/dc/Makefile7
-rw-r--r--drivers/gpu/drm/imx/dc/dc-cf.c172
-rw-r--r--drivers/gpu/drm/imx/dc/dc-crtc.c555
-rw-r--r--drivers/gpu/drm/imx/dc/dc-de.c177
-rw-r--r--drivers/gpu/drm/imx/dc/dc-de.h59
-rw-r--r--drivers/gpu/drm/imx/dc/dc-drv.c293
-rw-r--r--drivers/gpu/drm/imx/dc/dc-drv.h102
-rw-r--r--drivers/gpu/drm/imx/dc/dc-ed.c288
-rw-r--r--drivers/gpu/drm/imx/dc/dc-fg.c376
-rw-r--r--drivers/gpu/drm/imx/dc/dc-fl.c185
-rw-r--r--drivers/gpu/drm/imx/dc/dc-fu.c258
-rw-r--r--drivers/gpu/drm/imx/dc/dc-fu.h129
-rw-r--r--drivers/gpu/drm/imx/dc/dc-fw.c222
-rw-r--r--drivers/gpu/drm/imx/dc/dc-ic.c282
-rw-r--r--drivers/gpu/drm/imx/dc/dc-kms.c143
-rw-r--r--drivers/gpu/drm/imx/dc/dc-kms.h131
-rw-r--r--drivers/gpu/drm/imx/dc/dc-lb.c325
-rw-r--r--drivers/gpu/drm/imx/dc/dc-pe.c158
-rw-r--r--drivers/gpu/drm/imx/dc/dc-pe.h101
-rw-r--r--drivers/gpu/drm/imx/dc/dc-plane.c224
-rw-r--r--drivers/gpu/drm/imx/dc/dc-tc.c141
-rw-r--r--drivers/gpu/drm/imx/dcss/Kconfig3
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-crtc.c6
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-drv.c2
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-dtg.c4
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-kms.c6
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-scaler.c4
-rw-r--r--drivers/gpu/drm/imx/ipuv3/Kconfig14
-rw-r--r--drivers/gpu/drm/imx/ipuv3/dw_hdmi-imx.c2
-rw-r--r--drivers/gpu/drm/imx/ipuv3/imx-drm-core.c14
-rw-r--r--drivers/gpu/drm/imx/ipuv3/imx-drm.h14
-rw-r--r--drivers/gpu/drm/imx/ipuv3/imx-ldb.c205
-rw-r--r--drivers/gpu/drm/imx/ipuv3/imx-tve.c12
-rw-r--r--drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c8
-rw-r--r--drivers/gpu/drm/imx/ipuv3/parallel-display.c146
-rw-r--r--drivers/gpu/drm/imx/lcdc/Kconfig1
-rw-r--r--drivers/gpu/drm/imx/lcdc/imx-lcdc.c7
-rw-r--r--drivers/gpu/drm/ingenic/Kconfig1
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm-drv.c21
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-ipu.c2
-rw-r--r--drivers/gpu/drm/kmb/Kconfig1
-rw-r--r--drivers/gpu/drm/kmb/kmb_drv.c7
-rw-r--r--drivers/gpu/drm/kmb/kmb_drv.h1
-rw-r--r--drivers/gpu/drm/kmb/kmb_dsi.c4
-rw-r--r--drivers/gpu/drm/lib/drm_random.c1
-rw-r--r--drivers/gpu/drm/lib/drm_random.h2
-rw-r--r--drivers/gpu/drm/lima/lima_drv.c7
-rw-r--r--drivers/gpu/drm/lima/lima_gem.c6
-rw-r--r--drivers/gpu/drm/lima/lima_sched.c34
-rw-r--r--drivers/gpu/drm/lima/lima_sched.h3
-rw-r--r--drivers/gpu/drm/lima/lima_trace.h6
-rw-r--r--drivers/gpu/drm/logicvc/Kconfig1
-rw-r--r--drivers/gpu/drm/logicvc/logicvc_drm.c19
-rw-r--r--drivers/gpu/drm/loongson/Kconfig3
-rw-r--r--drivers/gpu/drm/loongson/lsdc_drv.c20
-rw-r--r--drivers/gpu/drm/loongson/lsdc_plane.c3
-rw-r--r--drivers/gpu/drm/mcde/Kconfig1
-rw-r--r--drivers/gpu/drm/mcde/mcde_drv.c8
-rw-r--r--drivers/gpu/drm/mcde/mcde_dsi.c12
-rw-r--r--drivers/gpu/drm/mediatek/Kconfig10
-rw-r--r--drivers/gpu/drm/mediatek/Makefile8
-rw-r--r--drivers/gpu/drm/mediatek/mtk_cec.c9
-rw-r--r--drivers/gpu/drm/mediatek/mtk_crtc.c66
-rw-r--r--drivers/gpu/drm/mediatek/mtk_crtc.h1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_ddp_comp.c3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_ddp_comp.h19
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_aal.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ccorr.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_color.c6
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_drv.h4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_gamma.c6
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_merge.c6
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c152
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c53
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_rdma.c6
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dp.c200
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dp_reg.h1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c470
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi_regs.h9
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c317
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.h2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c100
-rw-r--r--drivers/gpu/drm/mediatek/mtk_ethdr.c9
-rw-r--r--drivers/gpu/drm/mediatek/mtk_ethdr.h1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c537
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.h14
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mdp_rdma.c6
-rw-r--r--drivers/gpu/drm/mediatek/mtk_padding.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_plane.c28
-rw-r--r--drivers/gpu/drm/mediatek/mtk_plane.h5
-rw-r--r--drivers/gpu/drm/meson/Kconfig3
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c15
-rw-r--r--drivers/gpu/drm/meson/meson_drv.h2
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.c16
-rw-r--r--drivers/gpu/drm/meson/meson_dw_mipi_dsi.c2
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_cvbs.c18
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_dsi.c18
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_hdmi.c47
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.c226
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.h13
-rw-r--r--drivers/gpu/drm/mgag200/Kconfig5
-rw-r--r--drivers/gpu/drm/mgag200/Makefile1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ddc.c1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c14
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h12
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200eh5.c204
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200er.c4
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200ev.c4
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200se.c4
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c84
-rw-r--r--drivers/gpu/drm/msm/Kconfig41
-rw-r--r--drivers/gpu/drm/msm/Makefile30
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_catalog.c5
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_gpu.c27
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_gpummu.c10
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_catalog.c5
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c19
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_catalog.c3
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c23
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_catalog.c9
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_debugfs.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c67
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_power.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_preempt.c15
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_catalog.c286
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c517
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.h44
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c684
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.h178
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c80
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h57
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.c216
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.h26
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_preempt.c477
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c103
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gen7_0_0_snapshot.h19
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gen7_2_0_snapshot.h10
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h36
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c237
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h113
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h86
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_12_0_sm8750.h494
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h205
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h184
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h212
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h328
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h24
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h15
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h9
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h36
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h54
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h61
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h65
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_2_sm7150.h44
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h254
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h25
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h43
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h27
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h9
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h29
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h48
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h24
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h64
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h53
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h453
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h75
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_1_sar2130p.h408
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h64
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h46
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c198
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h40
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c431
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h41
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c691
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h119
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h97
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c16
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c28
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c71
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c246
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h32
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c253
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h140
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c12
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c215
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h49
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cwb.c75
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cwb.h70
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c31
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h17
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc_1_2.c12
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c22
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c52
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c22
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h13
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c231
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h28
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h19
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c13
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c13
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h9
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c20
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h20
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c18
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h9
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c18
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h11
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c180
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h53
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c741
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h40
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c525
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h82
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c13
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h18
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c13
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c15
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c92
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h18
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c52
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c121
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c88
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c18
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c6
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c3
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c38
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c20
-rw-r--r--drivers/gpu/drm/msm/disp/msm_disp_snapshot.c2
-rw-r--r--drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c31
-rw-r--r--drivers/gpu/drm/msm/dp/dp_audio.c592
-rw-r--r--drivers/gpu/drm/msm/dp/dp_audio.h57
-rw-r--r--drivers/gpu/drm/msm/dp/dp_aux.c326
-rw-r--r--drivers/gpu/drm/msm/dp/dp_aux.h29
-rw-r--r--drivers/gpu/drm/msm/dp/dp_catalog.c1341
-rw-r--r--drivers/gpu/drm/msm/dp/dp_catalog.h128
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.c1098
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.h55
-rw-r--r--drivers/gpu/drm/msm/dp/dp_debug.c73
-rw-r--r--drivers/gpu/drm/msm/dp/dp_debug.h10
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c1102
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.h22
-rw-r--r--drivers/gpu/drm/msm/dp/dp_drm.c185
-rw-r--r--drivers/gpu/drm/msm/dp/dp_drm.h35
-rw-r--r--drivers/gpu/drm/msm/dp/dp_link.c449
-rw-r--r--drivers/gpu/drm/msm/dp/dp_link.h48
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.c508
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.h52
-rw-r--r--drivers/gpu/drm/msm/dp/dp_reg.h19
-rw-r--r--drivers/gpu/drm/msm/dp/dp_utils.c28
-rw-r--r--drivers/gpu/drm/msm/dp/dp_utils.h8
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c6
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.c34
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.h3
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c268
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c44
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c67
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.h11
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c24
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c56
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c22
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c33
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c294
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c264
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h59
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_audio.c199
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_bridge.c393
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_hpd.c93
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_i2c.c14
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy.c8
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c16
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c25
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c12
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c17
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.c136
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c390
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h136
-rw-r--r--drivers/gpu/drm/msm/msm_dsc_helper.h11
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c51
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c146
-rw-r--r--drivers/gpu/drm/msm/msm_fence.c3
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c569
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h302
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c71
-rw-r--r--drivers/gpu/drm/msm/msm_gem_shrinker.c104
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c412
-rw-r--r--drivers/gpu/drm/msm/msm_gem_vma.c1591
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c239
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h181
-rw-r--r--drivers/gpu/drm/msm/msm_gpu_devfreq.c10
-rw-r--r--drivers/gpu/drm/msm/msm_gpu_trace.h54
-rw-r--r--drivers/gpu/drm/msm/msm_io_utils.c3
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c386
-rw-r--r--drivers/gpu/drm/msm/msm_kms.c103
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h64
-rw-r--r--drivers/gpu/drm/msm/msm_mdss.c362
-rw-r--r--drivers/gpu/drm/msm/msm_mdss.h28
-rw-r--r--drivers/gpu/drm/msm/msm_mmu.h44
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c62
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c29
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.h18
-rw-r--r--drivers/gpu/drm/msm/msm_submitqueue.c99
-rw-r--r--drivers/gpu/drm/msm/msm_syncobj.c172
-rw-r--r--drivers/gpu/drm/msm/msm_syncobj.h37
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a6xx.xml4077
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a6xx_descriptors.xml158
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a6xx_enums.xml429
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a6xx_gmu.xml16
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a6xx_perfcntrs.xml600
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a7xx_enums.xml223
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a7xx_perfcntrs.xml1030
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml450
-rw-r--r--drivers/gpu/drm/msm/registers/display/dsi.xml28
-rw-r--r--drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml37
-rw-r--r--drivers/gpu/drm/msm/registers/display/hdmi.xml2
-rw-r--r--drivers/gpu/drm/msm/registers/display/mdp5.xml16
-rw-r--r--drivers/gpu/drm/msm/registers/display/mdss.xml38
-rw-r--r--drivers/gpu/drm/msm/registers/gen_header.py199
-rw-r--r--drivers/gpu/drm/mxsfb/Kconfig2
-rw-r--r--drivers/gpu/drm/mxsfb/lcdif_drv.c11
-rw-r--r--drivers/gpu/drm/mxsfb/lcdif_kms.c4
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c30
-rw-r--r--drivers/gpu/drm/nouveau/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig25
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c22
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dfp.c17
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/i2c/Kbuild5
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/i2c/ch7006_drv.c (renamed from drivers/gpu/drm/i2c/ch7006_drv.c)34
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/i2c/ch7006_mode.c (renamed from drivers/gpu/drm/i2c/ch7006_mode.c)8
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/i2c/ch7006_priv.h (renamed from drivers/gpu/drm/i2c/ch7006_priv.h)11
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/i2c/sil164_drv.c (renamed from drivers/gpu/drm/i2c/sil164_drv.c)37
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/nouveau_i2c_encoder.c145
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv04.c24
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv17.c6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/Kbuild4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core.h6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core507d.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/corec37d.c3
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/corec57d.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/coreca7d.c122
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/crc.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/crc.h1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/crcca7d.c98
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/curs.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c61
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.h5
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/headc57d.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/headca7d.c297
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/tile.h63
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wimm.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.c158
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.h3
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndwca7e.c209
-rw-r--r--drivers/gpu/drm/nouveau/gv100_fence.c98
-rw-r--r--drivers/gpu/drm/nouveau/include/dispnv04/i2c/ch7006.h87
-rw-r--r--drivers/gpu/drm/nouveau/include/dispnv04/i2c/encoder_i2c.h220
-rw-r--r--drivers/gpu/drm/nouveau/include/dispnv04/i2c/sil164.h64
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/clc36f.h137
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/clc97b.h22
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/clca7d.h868
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/clca7e.h137
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gb100/dev_hshub_base.h28
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gb10b/dev_fbhub.h18
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gb202/dev_ce.h12
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gb202/dev_therm.h17
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_falcon_v4.h20
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_fb.h15
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_fsp_pri.h28
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_mmu.h173
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_riscv_pri.h14
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_therm.h17
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_xtl_ep_pri.h10
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/pri_nv_xal_ep.h13
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/chan.h76
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl0080.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/class.h35
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/ioctl.h32
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/log.h51
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/object.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/push.h14
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/push906f.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/pushc97b.h18
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/device.h17
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/layout.h7
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/nvjpg.h8
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/ofa.h9
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fsp.h24
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h157
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h6
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/alloc/alloc_channel.h170
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0000.h38
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0005.h38
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0080.h43
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080.h35
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h62
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl84a0.h33
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl90f1.h31
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/clc0b5sw.h34
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073common.h39
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h166
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h335
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h216
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h65
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h57
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h48
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h31
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h40
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h35
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h41
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h51
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h52
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h100
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h41
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h162
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl90f1.h95
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fgpfifo.h42
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvlimits.h33
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h148
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/shared/msgq/inc/msgq/msgq_priv.h97
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/uproc/os/common/include/libos_init_args.h52
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_sr_meta.h79
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_wpr_meta.h170
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmRiscvUcode.h82
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmgspseq.h100
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_allclasses.h33
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_chipset_nvoc.h38
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_fbsr_nvoc.h31
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_gpu_nvoc.h35
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_channel_nvoc.h62
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_fifo_nvoc.h119
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_mem_desc_nvoc.h32
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_os_nvoc.h44
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_rpc-structures.h124
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_sdk-structures.h45
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_acpi_data.h74
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_engine_type.h86
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_fw_heap.h33
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_init_args.h57
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_static_config.h174
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/intr/engine_idx.h57
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/nvbitmask.h33
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/os/nv_memory_type.h31
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h262
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_headers.h51
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/sdk-structures.h40
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/nvtypes.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c89
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c215
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.h18
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c20
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.h15
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c78
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c103
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h16
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c17
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c75
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_exec.c20
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c114
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sched.c23
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c57
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_uvmm.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv17_fence.c15
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c15
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvif/Kbuild6
-rw-r--r--drivers/gpu/drm/nouveau/nvif/chan.c159
-rw-r--r--drivers/gpu/drm/nouveau/nvif/chan506f.c72
-rw-r--r--drivers/gpu/drm/nouveau/nvif/chan906f.c93
-rw-r--r--drivers/gpu/drm/nouveau/nvif/chanc36f.c77
-rw-r--r--drivers/gpu/drm/nouveau/nvif/conn.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvif/disp.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvif/object.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvif/outp.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvif/user.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvif/vmm.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/enum.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/ga100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/ga102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/gb202.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/r535.c108
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c189
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c32
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c22
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/user.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ad102.c52
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c1714
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c52
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga100.c23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gb202.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c665
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/uchan.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ad102.c46
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ga102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/r535.c508
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ad102.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga100.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga102.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/r535.c110
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/tu102.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ad102.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ga102.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/r535.c110
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/tu102.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/Kbuild5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ad102.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ga100.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/priv.h8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/r535.c107
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ofa/Kbuild6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ofa/ad102.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga100.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga102.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ofa/priv.h8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ofa/r535.c107
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/fw.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/gm200.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c185
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/user.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb100.c34
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb202.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gh100.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fsp/Kbuild8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fsp/base.c66
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gb100.c24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gb202.c45
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gh100.c275
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fsp/priv.h29
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c46
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb100.c35
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb202.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gh100.c358
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h42
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c2707
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ad10x.c39
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/client.c49
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/engine.c189
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/engine.h20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga100.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga1xx.c39
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb10x.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb20x.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gh100.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h70
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.c87
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.h55
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/handles.h18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/nvdec.c33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/nvenc.c33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/Kbuild25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/alloc.c112
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/bar.c202
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ce.c46
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/client.c45
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ctrl.c93
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/device.c148
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c1793
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fbsr.c327
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c617
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c356
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c2205
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvdec.c45
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvenc.c45
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvjpg.c45
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/alloc.h36
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/bar.h29
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ce.h15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/client.h20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ctrl.h21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/device.h30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/disp.h741
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/engine.h260
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/event.h47
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/fbsr.h106
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/fifo.h350
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/gr.h73
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/gsp.h825
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/msgfn.h53
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvdec.h17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvenc.h17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvjpg.h17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ofa.h16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/rpcfn.h225
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/vmm.h132
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ofa.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c52
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c698
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/vmm.c191
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/Kbuild9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/client.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/disp.c263
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fbsr.c149
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fifo.c217
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gr.c191
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gsp.c216
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/client.h21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/disp.h355
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/engine.h318
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/fbsr.h19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/fifo.h213
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gr.h79
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gsp.h634
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/msgfn.h57
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/ofa.h17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/rpcfn.h249
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/ofa.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/rm.c99
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h191
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rpc.h18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/tu1xx.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c267
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/anx9805.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c215
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxch.c215
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxch.h (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h)0
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgf119.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padg94.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgm200.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gh100.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c333
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gh100.c25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memgf100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/r535.c123
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgh100.c306
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/g84.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/g92.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/gh100.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/gk104.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv46.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/vfn/r535.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/vfn/uvfn.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c2
-rw-r--r--drivers/gpu/drm/nova/Kconfig14
-rw-r--r--drivers/gpu/drm/nova/Makefile3
-rw-r--r--drivers/gpu/drm/nova/driver.rs71
-rw-r--r--drivers/gpu/drm/nova/file.rs69
-rw-r--r--drivers/gpu/drm/nova/gem.rs47
-rw-r--r--drivers/gpu/drm/nova/nova.rs17
-rw-r--r--drivers/gpu/drm/omapdrm/Kconfig1
-rw-r--r--drivers/gpu/drm/omapdrm/dss/base.c25
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.c150
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dpi.c13
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c14
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.c16
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.h13
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4.c36
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5.c36
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5_core.c17
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5_core.h1
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss.h3
-rw-r--r--drivers/gpu/drm/omapdrm/dss/sdi.c31
-rw-r--r--drivers/gpu/drm/omapdrm/dss/venc.c28
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c8
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c15
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c27
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.h5
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c166
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.h8
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c17
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c2
-rw-r--r--drivers/gpu/drm/panel/Kconfig176
-rw-r--r--drivers/gpu/drm/panel/Makefile16
-rw-r--r--drivers/gpu/drm/panel/panel-abt-y030xx067a.c10
-rw-r--r--drivers/gpu/drm/panel/panel-arm-versatile.c11
-rw-r--r--drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c11
-rw-r--r--drivers/gpu/drm/panel/panel-auo-a030jtn01.c10
-rw-r--r--drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c125
-rw-r--r--drivers/gpu/drm/panel/panel-boe-himax8279d.c11
-rw-r--r--drivers/gpu/drm/panel/panel-boe-td4320.c247
-rw-r--r--drivers/gpu/drm/panel/panel-boe-th101mb31ig002-28a.c11
-rw-r--r--drivers/gpu/drm/panel/panel-boe-tv101wum-ll2.c10
-rw-r--r--drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c10
-rw-r--r--drivers/gpu/drm/panel/panel-dsi-cm.c10
-rw-r--r--drivers/gpu/drm/panel/panel-ebbg-ft8719.c78
-rw-r--r--drivers/gpu/drm/panel/panel-edp.c115
-rw-r--r--drivers/gpu/drm/panel/panel-elida-kd35t133.c118
-rw-r--r--drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c11
-rw-r--r--drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c10
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx8279.c1296
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx83102.c392
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx83112a.c307
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx83112b.c430
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx8394.c594
-rw-r--r--drivers/gpu/drm/panel/panel-hydis-hv101hd1.c188
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9322.c12
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9341.c221
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9805.c12
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9806e.c10
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9881c.c545
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9882t.c12
-rw-r--r--drivers/gpu/drm/panel/panel-innolux-ej030na.c11
-rw-r--r--drivers/gpu/drm/panel/panel-innolux-p079zca.c11
-rw-r--r--drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c23
-rw-r--r--drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c10
-rw-r--r--drivers/gpu/drm/panel/panel-jdi-lpm102a188a.c204
-rw-r--r--drivers/gpu/drm/panel/panel-jdi-lt070me05000.c11
-rw-r--r--drivers/gpu/drm/panel/panel-khadas-ts050.c17
-rw-r--r--drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c12
-rw-r--r--drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c356
-rw-r--r--drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c11
-rw-r--r--drivers/gpu/drm/panel/panel-lg-lb035q02.c10
-rw-r--r--drivers/gpu/drm/panel/panel-lg-lg4573.c11
-rw-r--r--drivers/gpu/drm/panel/panel-lg-sw43408.c10
-rw-r--r--drivers/gpu/drm/panel/panel-lincolntech-lcd197.c11
-rw-r--r--drivers/gpu/drm/panel/panel-lvds.c16
-rw-r--r--drivers/gpu/drm/panel/panel-magnachip-d53e6ea8966.c11
-rw-r--r--drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c11
-rw-r--r--drivers/gpu/drm/panel/panel-nec-nl8048hl11.c10
-rw-r--r--drivers/gpu/drm/panel/panel-newvision-nv3051d.c20
-rw-r--r--drivers/gpu/drm/panel/panel-newvision-nv3052c.c12
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35510.c27
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35560.c210
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35950.c14
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt36523.c1704
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt36672a.c10
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt36672e.c10
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt37801.c340
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt39016.c10
-rw-r--r--drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c11
-rw-r--r--drivers/gpu/drm/panel/panel-orisetech-ota5601a.c18
-rw-r--r--drivers/gpu/drm/panel/panel-orisetech-otm8009a.c12
-rw-r--r--drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c12
-rw-r--r--drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c12
-rw-r--r--drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c14
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm67191.c10
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm67200.c493
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm68200.c11
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm692e5.c10
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm69380.c103
-rw-r--r--drivers/gpu/drm/panel/panel-renesas-r61307.c325
-rw-r--r--drivers/gpu/drm/panel/panel-renesas-r69328.c281
-rw-r--r--drivers/gpu/drm/panel/panel-ronbo-rb070d30.c11
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-ams581vf01.c283
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-ams639rq08.c329
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-atna33xc20.c11
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-db7430.c11
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-ld9040.c11
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6d16d0.c11
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6d27a1.c11
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c250
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e3fa7.c81
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c10
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e3ha8.c342
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c11
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63m0.c2
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams427ap24.c768
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c102
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c12
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e8aa5x01-ams561ra01.c981
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-sofef00.c119
-rw-r--r--drivers/gpu/drm/panel/panel-seiko-43wvf1g.c13
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c11
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c12
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c41
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c70
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c539
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7701.c133
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7703.c13
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7789v.c10
-rw-r--r--drivers/gpu/drm/panel/panel-sony-acx565akm.c13
-rw-r--r--drivers/gpu/drm/panel/panel-sony-td4353-jdi.c118
-rw-r--r--drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c11
-rw-r--r--drivers/gpu/drm/panel/panel-summit.c134
-rw-r--r--drivers/gpu/drm/panel/panel-synaptics-r63353.c81
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-td028ttec1.c17
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-td043mtea1.c10
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-tpg110.c11
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-g2647fb105.c280
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-r66451.c190
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-rm69299.c264
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-rm692e5.c442
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-vtdr6130.c11
-rw-r--r--drivers/gpu/drm/panel/panel-widechips-ws2401.c11
-rw-r--r--drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c188
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_devfreq.c7
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.c76
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.h34
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c200
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_dump.c4
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_features.h3
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.c188
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.h66
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gpu.c17
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gpu.h1
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c60
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c152
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_perfcnt.c10
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_regs.h38
-rw-r--r--drivers/gpu/drm/panthor/Makefile1
-rw-r--r--drivers/gpu/drm/panthor/panthor_devfreq.c41
-rw-r--r--drivers/gpu/drm/panthor/panthor_devfreq.h4
-rw-r--r--drivers/gpu/drm/panthor/panthor_device.c128
-rw-r--r--drivers/gpu/drm/panthor/panthor_device.h175
-rw-r--r--drivers/gpu/drm/panthor/panthor_drv.c352
-rw-r--r--drivers/gpu/drm/panthor/panthor_fw.c225
-rw-r--r--drivers/gpu/drm/panthor/panthor_fw.h6
-rw-r--r--drivers/gpu/drm/panthor/panthor_gem.c250
-rw-r--r--drivers/gpu/drm/panthor/panthor_gem.h96
-rw-r--r--drivers/gpu/drm/panthor/panthor_gpu.c228
-rw-r--r--drivers/gpu/drm/panthor/panthor_gpu.h12
-rw-r--r--drivers/gpu/drm/panthor/panthor_heap.c60
-rw-r--r--drivers/gpu/drm/panthor/panthor_heap.h2
-rw-r--r--drivers/gpu/drm/panthor/panthor_hw.c125
-rw-r--r--drivers/gpu/drm/panthor/panthor_hw.h11
-rw-r--r--drivers/gpu/drm/panthor/panthor_mmu.c308
-rw-r--r--drivers/gpu/drm/panthor/panthor_mmu.h5
-rw-r--r--drivers/gpu/drm/panthor/panthor_regs.h107
-rw-r--r--drivers/gpu/drm/panthor/panthor_sched.c572
-rw-r--r--drivers/gpu/drm/panthor/panthor_sched.h8
-rw-r--r--drivers/gpu/drm/pl111/Kconfig1
-rw-r--r--drivers/gpu/drm/pl111/pl111_drv.c5
-rw-r--r--drivers/gpu/drm/pl111/pl111_versatile.c2
-rw-r--r--drivers/gpu/drm/qxl/Kconfig4
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c5
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c9
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h8
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c68
-rw-r--r--drivers/gpu/drm/radeon/Kconfig4
-rw-r--r--drivers/gpu/drm/radeon/atombios.h5
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c10
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c2
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c48
-rw-r--r--drivers/gpu/drm/radeon/cik.c42
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c589
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/r100.c215
-rw-r--r--drivers/gpu/drm/radeon/r200.c34
-rw-r--r--drivers/gpu/drm/radeon/r300.c69
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c451
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon.h10
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c48
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c40
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c26
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_fbdev.c130
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c44
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c39
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h19
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c25
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_vce.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c10
-rw-r--r--drivers/gpu/drm/radeon/rs400.c18
-rw-r--r--drivers/gpu/drm/radeon/si.c2
-rw-r--r--drivers/gpu/drm/radeon/sid.h2
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/Kconfig1
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_cmm.c7
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.c25
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_du_group.c24
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c13
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_du_plane.c14
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_du_plane.h2
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_dw_hdmi.c2
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c22
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c251
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h134
-rw-r--r--drivers/gpu/drm/renesas/rz-du/Kconfig16
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_crtc.c8
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c20
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_encoder.c62
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c129
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.h1
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_vsp.c9
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c373
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi_regs.h56
-rw-r--r--drivers/gpu/drm/renesas/shmobile/Kconfig1
-rw-r--r--drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c8
-rw-r--r--drivers/gpu/drm/renesas/shmobile/shmob_drm_kms.c3
-rw-r--r--drivers/gpu/drm/renesas/shmobile/shmob_drm_plane.c14
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig32
-rw-r--r--drivers/gpu/drm/rockchip/Makefile3
-rw-r--r--drivers/gpu/drm/rockchip/analogix_dp-rockchip.c215
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c304
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.h10
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-reg.c2
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-reg.h4
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c146
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi2-rockchip.c508
-rw-r--r--drivers/gpu/drm/rockchip/dw_dp-rockchip.c150
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c270
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c626
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.c490
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.h349
-rw-r--r--drivers/gpu/drm/rockchip/rk3066_hdmi.c319
-rw-r--r--drivers/gpu/drm/rockchip/rk3066_hdmi.h2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c47
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.h5
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.c22
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.h2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.h2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c17
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.h2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop2.c1990
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop2.h341
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.c146
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.h23
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_rgb.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_rgb.h2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop2_reg.c2102
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.c98
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.h2
-rw-r--r--drivers/gpu/drm/scheduler/.kunitconfig12
-rw-r--r--drivers/gpu/drm/scheduler/Makefile2
-rw-r--r--drivers/gpu/drm/scheduler/gpu_scheduler_trace.h107
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c155
-rw-r--r--drivers/gpu/drm/scheduler/sched_fence.c8
-rw-r--r--drivers/gpu/drm/scheduler/sched_internal.h91
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c464
-rw-r--r--drivers/gpu/drm/scheduler/tests/Makefile7
-rw-r--r--drivers/gpu/drm/scheduler/tests/mock_scheduler.c370
-rw-r--r--drivers/gpu/drm/scheduler/tests/sched_tests.h225
-rw-r--r--drivers/gpu/drm/scheduler/tests/tests_basic.c563
-rw-r--r--drivers/gpu/drm/sitronix/Kconfig42
-rw-r--r--drivers/gpu/drm/sitronix/Makefile3
-rw-r--r--drivers/gpu/drm/sitronix/st7571-i2c.c1082
-rw-r--r--drivers/gpu/drm/sitronix/st7586.c (renamed from drivers/gpu/drm/tiny/st7586.c)5
-rw-r--r--drivers/gpu/drm/sitronix/st7735r.c (renamed from drivers/gpu/drm/tiny/st7735r.c)5
-rw-r--r--drivers/gpu/drm/solomon/Kconfig1
-rw-r--r--drivers/gpu/drm/solomon/ssd130x-i2c.c2
-rw-r--r--drivers/gpu/drm/solomon/ssd130x-spi.c12
-rw-r--r--drivers/gpu/drm/solomon/ssd130x.c16
-rw-r--r--drivers/gpu/drm/sprd/sprd_dpu.c15
-rw-r--r--drivers/gpu/drm/sprd/sprd_drm.c4
-rw-r--r--drivers/gpu/drm/sprd/sprd_dsi.c17
-rw-r--r--drivers/gpu/drm/sti/Kconfig1
-rw-r--r--drivers/gpu/drm/sti/Makefile2
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.c16
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.c3
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c8
-rw-r--r--drivers/gpu/drm/sti/sti_dvo.c47
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c3
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c54
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c49
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.h2
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c21
-rw-r--r--drivers/gpu/drm/sti/sti_mixer.c2
-rw-r--r--drivers/gpu/drm/sti/sti_tvout.c16
-rw-r--r--drivers/gpu/drm/sti/sti_vtg.c14
-rw-r--r--drivers/gpu/drm/stm/Kconfig1
-rw-r--r--drivers/gpu/drm/stm/drv.c24
-rw-r--r--drivers/gpu/drm/stm/dw_mipi_dsi-stm.c2
-rw-r--r--drivers/gpu/drm/stm/ltdc.c201
-rw-r--r--drivers/gpu/drm/stm/ltdc.h6
-rw-r--r--drivers/gpu/drm/stm/lvds.c23
-rw-r--r--drivers/gpu/drm/sun4i/Kconfig1
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c11
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_frontend.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c34
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tv.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_drc.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_csc.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.c170
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.h31
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_tcon_top.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_ui_layer.c27
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_ui_scaler.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_layer.c14
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_scaler.c6
-rw-r--r--drivers/gpu/drm/sysfb/Kconfig76
-rw-r--r--drivers/gpu/drm/sysfb/Makefile12
-rw-r--r--drivers/gpu/drm/sysfb/drm_sysfb.c35
-rw-r--r--drivers/gpu/drm/sysfb/drm_sysfb_helper.h188
-rw-r--r--drivers/gpu/drm/sysfb/drm_sysfb_modeset.c463
-rw-r--r--drivers/gpu/drm/sysfb/drm_sysfb_screen_info.c104
-rw-r--r--drivers/gpu/drm/sysfb/efidrm.c389
-rw-r--r--drivers/gpu/drm/sysfb/ofdrm.c1144
-rw-r--r--drivers/gpu/drm/sysfb/simpledrm.c883
-rw-r--r--drivers/gpu/drm/sysfb/vesadrm.c650
-rw-r--r--drivers/gpu/drm/tegra/Kconfig1
-rw-r--r--drivers/gpu/drm/tegra/dc.c22
-rw-r--r--drivers/gpu/drm/tegra/dp.c67
-rw-r--r--drivers/gpu/drm/tegra/dp.h2
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c13
-rw-r--r--drivers/gpu/drm/tegra/drm.c15
-rw-r--r--drivers/gpu/drm/tegra/drm.h14
-rw-r--r--drivers/gpu/drm/tegra/dsi.c8
-rw-r--r--drivers/gpu/drm/tegra/falcon.c20
-rw-r--r--drivers/gpu/drm/tegra/falcon.h1
-rw-r--r--drivers/gpu/drm/tegra/fb.c7
-rw-r--r--drivers/gpu/drm/tegra/fbdev.c102
-rw-r--r--drivers/gpu/drm/tegra/gem.c66
-rw-r--r--drivers/gpu/drm/tegra/gem.h21
-rw-r--r--drivers/gpu/drm/tegra/gr2d.c2
-rw-r--r--drivers/gpu/drm/tegra/gr3d.c41
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c6
-rw-r--r--drivers/gpu/drm/tegra/hub.c6
-rw-r--r--drivers/gpu/drm/tegra/hub.h3
-rw-r--r--drivers/gpu/drm/tegra/nvdec.c8
-rw-r--r--drivers/gpu/drm/tegra/rgb.c14
-rw-r--r--drivers/gpu/drm/tegra/sor.c8
-rw-r--r--drivers/gpu/drm/tegra/vic.c2
-rw-r--r--drivers/gpu/drm/tests/Makefile6
-rw-r--r--drivers/gpu/drm/tests/drm_atomic_state_test.c379
-rw-r--r--drivers/gpu/drm/tests/drm_atomic_test.c153
-rw-r--r--drivers/gpu/drm/tests/drm_bridge_test.c521
-rw-r--r--drivers/gpu/drm/tests/drm_buddy_test.c30
-rw-r--r--drivers/gpu/drm/tests/drm_client_modeset_test.c12
-rw-r--r--drivers/gpu/drm/tests/drm_cmdline_parser_test.c10
-rw-r--r--drivers/gpu/drm/tests/drm_connector_test.c547
-rw-r--r--drivers/gpu/drm/tests/drm_dp_mst_helper_test.c17
-rw-r--r--drivers/gpu/drm/tests/drm_exec_test.c22
-rw-r--r--drivers/gpu/drm/tests/drm_format_helper_test.c334
-rw-r--r--drivers/gpu/drm/tests/drm_framebuffer_test.c376
-rw-r--r--drivers/gpu/drm/tests/drm_gem_shmem_test.c31
-rw-r--r--drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c1366
-rw-r--r--drivers/gpu/drm/tests/drm_kunit_edid.h456
-rw-r--r--drivers/gpu/drm/tests/drm_kunit_helpers.c166
-rw-r--r--drivers/gpu/drm/tests/drm_modes_test.c26
-rw-r--r--drivers/gpu/drm/tests/drm_probe_helper_test.c8
-rw-r--r--drivers/gpu/drm/tests/drm_sysfb_modeset_test.c168
-rw-r--r--drivers/gpu/drm/tidss/Kconfig1
-rw-r--r--drivers/gpu/drm/tidss/Makefile3
-rw-r--r--drivers/gpu/drm/tidss/tidss_crtc.c11
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.c619
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.h23
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc_regs.h107
-rw-r--r--drivers/gpu/drm/tidss/tidss_drv.c28
-rw-r--r--drivers/gpu/drm/tidss/tidss_drv.h12
-rw-r--r--drivers/gpu/drm/tidss/tidss_encoder.c13
-rw-r--r--drivers/gpu/drm/tidss/tidss_irq.c34
-rw-r--r--drivers/gpu/drm/tidss/tidss_irq.h4
-rw-r--r--drivers/gpu/drm/tidss/tidss_kms.c4
-rw-r--r--drivers/gpu/drm/tidss/tidss_oldi.c597
-rw-r--r--drivers/gpu/drm/tidss/tidss_oldi.h43
-rw-r--r--drivers/gpu/drm/tidss/tidss_plane.c12
-rw-r--r--drivers/gpu/drm/tidss/tidss_plane.h4
-rw-r--r--drivers/gpu/drm/tidss/tidss_scale_coefs.h2
-rw-r--r--drivers/gpu/drm/tilcdc/Kconfig1
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c8
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_panel.c2
-rw-r--r--drivers/gpu/drm/tiny/Kconfig106
-rw-r--r--drivers/gpu/drm/tiny/Makefile9
-rw-r--r--drivers/gpu/drm/tiny/appletbdrm.c834
-rw-r--r--drivers/gpu/drm/tiny/arcpgu.c14
-rw-r--r--drivers/gpu/drm/tiny/bochs.c438
-rw-r--r--drivers/gpu/drm/tiny/cirrus-qemu.c663
-rw-r--r--drivers/gpu/drm/tiny/cirrus.c764
-rw-r--r--drivers/gpu/drm/tiny/gm12u320.c52
-rw-r--r--drivers/gpu/drm/tiny/hx8357d.c5
-rw-r--r--drivers/gpu/drm/tiny/ili9163.c5
-rw-r--r--drivers/gpu/drm/tiny/ili9225.c5
-rw-r--r--drivers/gpu/drm/tiny/ili9341.c5
-rw-r--r--drivers/gpu/drm/tiny/ili9486.c5
-rw-r--r--drivers/gpu/drm/tiny/mi0283qt.c5
-rw-r--r--drivers/gpu/drm/tiny/ofdrm.c1410
-rw-r--r--drivers/gpu/drm/tiny/panel-mipi-dbi.c9
-rw-r--r--drivers/gpu/drm/tiny/pixpaper.c1165
-rw-r--r--drivers/gpu/drm/tiny/repaper.c25
-rw-r--r--drivers/gpu/drm/tiny/sharp-memory.c669
-rw-r--r--drivers/gpu/drm/tiny/simpledrm.c1076
-rw-r--r--drivers/gpu/drm/ttm/Makefile2
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_bo_test.c28
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c62
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c3
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_mock_manager.c3
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_resource_test.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_backup.c182
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c168
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_internal.h58
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c357
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c59
-rw-r--r--drivers/gpu/drm/ttm/ttm_device.c28
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c749
-rw-r--r--drivers/gpu/drm/ttm/ttm_range_manager.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_resource.c98
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c87
-rw-r--r--drivers/gpu/drm/tve200/Kconfig1
-rw-r--r--drivers/gpu/drm/tve200/tve200_drv.c12
-rw-r--r--drivers/gpu/drm/tyr/Kconfig19
-rw-r--r--drivers/gpu/drm/tyr/Makefile3
-rw-r--r--drivers/gpu/drm/tyr/driver.rs205
-rw-r--r--drivers/gpu/drm/tyr/file.rs56
-rw-r--r--drivers/gpu/drm/tyr/gem.rs18
-rw-r--r--drivers/gpu/drm/tyr/gpu.rs219
-rw-r--r--drivers/gpu/drm/tyr/regs.rs108
-rw-r--r--drivers/gpu/drm/tyr/tyr.rs22
-rw-r--r--drivers/gpu/drm/udl/Kconfig1
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c29
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h21
-rw-r--r--drivers/gpu/drm/udl/udl_main.c191
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c22
-rw-r--r--drivers/gpu/drm/udl/udl_transfer.c6
-rw-r--r--drivers/gpu/drm/v3d/Makefile3
-rw-r--r--drivers/gpu/drm/v3d/v3d_bo.c21
-rw-r--r--drivers/gpu/drm/v3d/v3d_debugfs.c130
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c127
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.h88
-rw-r--r--drivers/gpu/drm/v3d/v3d_fence.c11
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c46
-rw-r--r--drivers/gpu/drm/v3d/v3d_gemfs.c60
-rw-r--r--drivers/gpu/drm/v3d/v3d_irq.c159
-rw-r--r--drivers/gpu/drm/v3d/v3d_mmu.c93
-rw-r--r--drivers/gpu/drm/v3d/v3d_perfmon.c69
-rw-r--r--drivers/gpu/drm/v3d/v3d_performance_counters.h12
-rw-r--r--drivers/gpu/drm/v3d/v3d_regs.h55
-rw-r--r--drivers/gpu/drm/v3d/v3d_sched.c278
-rw-r--r--drivers/gpu/drm/v3d/v3d_submit.c23
-rw-r--r--drivers/gpu/drm/vboxvideo/Kconfig1
-rw-r--r--drivers/gpu/drm/vboxvideo/hgsmi_base.c37
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_drv.c10
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_drv.h1
-rw-r--r--drivers/gpu/drm/vboxvideo/vboxvideo_guest.h2
-rw-r--r--drivers/gpu/drm/vc4/Kconfig3
-rw-r--r--drivers/gpu/drm/vc4/tests/vc4_mock.c22
-rw-r--r--drivers/gpu/drm/vc4/tests/vc4_mock_output.c62
-rw-r--r--drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c294
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c32
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c160
-rw-r--r--drivers/gpu/drm/vc4/vc4_dpi.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c48
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h110
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c48
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c207
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c362
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.h13
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi_phy.c640
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi_regs.h222
-rw-r--r--drivers/gpu/drm/vc4/vc4_hvs.c1060
-rw-r--r--drivers/gpu/drm/vc4/vc4_irq.c10
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c118
-rw-r--r--drivers/gpu/drm/vc4/vc4_perfmon.c26
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c1146
-rw-r--r--drivers/gpu/drm/vc4/vc4_regs.h298
-rw-r--r--drivers/gpu/drm/vc4/vc4_render_cl.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_txp.c95
-rw-r--r--drivers/gpu/drm/vc4/vc4_v3d.c12
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate.c19
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate_shaders.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_vec.c2
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c32
-rw-r--r--drivers/gpu/drm/vgem/vgem_fence.c19
-rw-r--r--drivers/gpu/drm/virtio/Kconfig1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c11
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c62
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h32
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fence.c16
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_gem.c46
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c6
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_kms.c20
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c28
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c239
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_prime.c183
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c191
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vram.c9
-rw-r--r--drivers/gpu/drm/vkms/Kconfig16
-rw-r--r--drivers/gpu/drm/vkms/Makefile5
-rw-r--r--drivers/gpu/drm/vkms/tests/.kunitconfig4
-rw-r--r--drivers/gpu/drm/vkms/tests/Makefile7
-rw-r--r--drivers/gpu/drm/vkms/tests/vkms_config_test.c992
-rw-r--r--drivers/gpu/drm/vkms/tests/vkms_format_test.c279
-rw-r--r--drivers/gpu/drm/vkms/vkms_composer.c319
-rw-r--r--drivers/gpu/drm/vkms/vkms_config.c640
-rw-r--r--drivers/gpu/drm/vkms/vkms_config.h437
-rw-r--r--drivers/gpu/drm/vkms/vkms_connector.c61
-rw-r--r--drivers/gpu/drm/vkms/vkms_connector.h26
-rw-r--r--drivers/gpu/drm/vkms/vkms_crtc.c56
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.c106
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h201
-rw-r--r--drivers/gpu/drm/vkms/vkms_formats.c943
-rw-r--r--drivers/gpu/drm/vkms/vkms_formats.h13
-rw-r--r--drivers/gpu/drm/vkms/vkms_output.c190
-rw-r--r--drivers/gpu/drm/vkms/vkms_plane.c62
-rw-r--r--drivers/gpu/drm/vkms/vkms_writeback.c48
-rw-r--r--drivers/gpu/drm/vmwgfx/Kconfig3
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile2
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_object.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_blit.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c24
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.h12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c844
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.h81
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c71
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h60
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c46
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c510
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.h21
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gem.c30
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c61
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c883
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h74
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c11
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c63
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c14
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c85
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c4
-rw-r--r--drivers/gpu/drm/xe/Kconfig60
-rw-r--r--drivers/gpu/drm/xe/Kconfig.debug28
-rw-r--r--drivers/gpu/drm/xe/Kconfig.profile1
-rw-r--r--drivers/gpu/drm/xe/Makefile80
-rw-r--r--drivers/gpu/drm/xe/abi/gsc_pxp_commands_abi.h41
-rw-r--r--drivers/gpu/drm/xe/abi/guc_actions_abi.h76
-rw-r--r--drivers/gpu/drm/xe/abi/guc_actions_slpc_abi.h8
-rw-r--r--drivers/gpu/drm/xe/abi/guc_actions_sriov_abi.h99
-rw-r--r--drivers/gpu/drm/xe/abi/guc_capture_abi.h186
-rw-r--r--drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h1
-rw-r--r--drivers/gpu/drm/xe/abi/guc_errors_abi.h17
-rw-r--r--drivers/gpu/drm/xe/abi/guc_klvs_abi.h77
-rw-r--r--drivers/gpu/drm/xe/abi/guc_log_abi.h75
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_lmem.h1
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_mman.h17
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h63
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object_frontbuffer.h12
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object_types.h11
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h20
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/gt/intel_rps.h11
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_debugfs.h14
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h84
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_gpu_error.h17
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_gtt_view_types.h7
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_scheduler_types.h13
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_vma.h3
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_vma_types.h74
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_pcode.h31
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_runtime_pm.h67
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h84
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_uncore_trace.h (renamed from drivers/gpu/drm/xe/compat-i915-headers/i915_trace.h)0
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_wakeref.h4
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/pxp/intel_pxp.h23
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/soc/intel_pch.h6
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/soc/intel_rom.h6
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/vlv_iosf_sb.h42
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/vlv_iosf_sb_reg.h6
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/vlv_sideband.h132
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/vlv_sideband_reg.h6
-rw-r--r--drivers/gpu/drm/xe/display/ext/i915_irq.c69
-rw-r--r--drivers/gpu/drm/xe/display/ext/i915_utils.c1
-rw-r--r--drivers/gpu/drm/xe/display/intel_bo.c61
-rw-r--r--drivers/gpu/drm/xe/display/intel_fb_bo.c28
-rw-r--r--drivers/gpu/drm/xe/display/intel_fb_bo.h24
-rw-r--r--drivers/gpu/drm/xe/display/intel_fbdev_fb.c41
-rw-r--r--drivers/gpu/drm/xe/display/xe_display.c432
-rw-r--r--drivers/gpu/drm/xe/display/xe_display.h29
-rw-r--r--drivers/gpu/drm/xe/display/xe_display_rpm.c73
-rw-r--r--drivers/gpu/drm/xe/display/xe_display_rps.c17
-rw-r--r--drivers/gpu/drm/xe/display/xe_display_wa.c8
-rw-r--r--drivers/gpu/drm/xe/display/xe_dsb_buffer.c26
-rw-r--r--drivers/gpu/drm/xe/display/xe_fb_pin.c228
-rw-r--r--drivers/gpu/drm/xe/display/xe_hdcp_gsc.c145
-rw-r--r--drivers/gpu/drm/xe/display/xe_panic.c80
-rw-r--r--drivers/gpu/drm/xe/display/xe_plane_initial.c65
-rw-r--r--drivers/gpu/drm/xe/display/xe_tdf.c10
-rw-r--r--drivers/gpu/drm/xe/instructions/xe_alu_commands.h79
-rw-r--r--drivers/gpu/drm/xe/instructions/xe_gfxpipe_commands.h1
-rw-r--r--drivers/gpu/drm/xe/instructions/xe_gpu_commands.h1
-rw-r--r--drivers/gpu/drm/xe/instructions/xe_instr_defs.h1
-rw-r--r--drivers/gpu/drm/xe/instructions/xe_mfx_commands.h28
-rw-r--r--drivers/gpu/drm/xe/instructions/xe_mi_commands.h15
-rw-r--r--drivers/gpu/drm/xe/regs/xe_bars.h1
-rw-r--r--drivers/gpu/drm/xe/regs/xe_engine_regs.h22
-rw-r--r--drivers/gpu/drm/xe/regs/xe_eu_stall_regs.h29
-rw-r--r--drivers/gpu/drm/xe/regs/xe_gsc_regs.h6
-rw-r--r--drivers/gpu/drm/xe/regs/xe_gt_regs.h103
-rw-r--r--drivers/gpu/drm/xe/regs/xe_guc_regs.h2
-rw-r--r--drivers/gpu/drm/xe/regs/xe_hw_error_regs.h20
-rw-r--r--drivers/gpu/drm/xe/regs/xe_i2c_regs.h20
-rw-r--r--drivers/gpu/drm/xe/regs/xe_irq_regs.h92
-rw-r--r--drivers/gpu/drm/xe/regs/xe_lrc_layout.h9
-rw-r--r--drivers/gpu/drm/xe/regs/xe_mchbar_regs.h14
-rw-r--r--drivers/gpu/drm/xe/regs/xe_oa_regs.h18
-rw-r--r--drivers/gpu/drm/xe/regs/xe_pcode_regs.h11
-rw-r--r--drivers/gpu/drm/xe/regs/xe_pmt.h34
-rw-r--r--drivers/gpu/drm/xe/regs/xe_pxp_regs.h23
-rw-r--r--drivers/gpu/drm/xe/regs/xe_reg_defs.h32
-rw-r--r--drivers/gpu/drm/xe/regs/xe_regs.h22
-rw-r--r--drivers/gpu/drm/xe/tests/xe_bo.c308
-rw-r--r--drivers/gpu/drm/xe/tests/xe_dma_buf.c39
-rw-r--r--drivers/gpu/drm/xe/tests/xe_gt_sriov_pf_service_test.c232
-rw-r--r--drivers/gpu/drm/xe/tests/xe_guc_buf_kunit.c333
-rw-r--r--drivers/gpu/drm/xe/tests/xe_guc_g2g_test.c776
-rw-r--r--drivers/gpu/drm/xe/tests/xe_live_test_mod.c6
-rw-r--r--drivers/gpu/drm/xe/tests/xe_migrate.c130
-rw-r--r--drivers/gpu/drm/xe/tests/xe_mocs.c30
-rw-r--r--drivers/gpu/drm/xe/tests/xe_pci.c320
-rw-r--r--drivers/gpu/drm/xe/tests/xe_pci_test.c28
-rw-r--r--drivers/gpu/drm/xe/tests/xe_pci_test.h21
-rw-r--r--drivers/gpu/drm/xe/tests/xe_rtp_test.c2
-rw-r--r--drivers/gpu/drm/xe/tests/xe_sriov_pf_service_kunit.c227
-rw-r--r--drivers/gpu/drm/xe/tests/xe_test_mod.c2
-rw-r--r--drivers/gpu/drm/xe/tests/xe_wa_test.c90
-rw-r--r--drivers/gpu/drm/xe/xe_assert.h14
-rw-r--r--drivers/gpu/drm/xe/xe_bb.c39
-rw-r--r--drivers/gpu/drm/xe/xe_bb.h5
-rw-r--r--drivers/gpu/drm/xe/xe_bo.c1916
-rw-r--r--drivers/gpu/drm/xe/xe_bo.h200
-rw-r--r--drivers/gpu/drm/xe/xe_bo_doc.h2
-rw-r--r--drivers/gpu/drm/xe/xe_bo_evict.c405
-rw-r--r--drivers/gpu/drm/xe/xe_bo_evict.h10
-rw-r--r--drivers/gpu/drm/xe/xe_bo_types.h53
-rw-r--r--drivers/gpu/drm/xe/xe_configfs.c1010
-rw-r--r--drivers/gpu/drm/xe/xe_configfs.h39
-rw-r--r--drivers/gpu/drm/xe/xe_debugfs.c237
-rw-r--r--drivers/gpu/drm/xe/xe_dep_job_types.h29
-rw-r--r--drivers/gpu/drm/xe/xe_dep_scheduler.c143
-rw-r--r--drivers/gpu/drm/xe/xe_dep_scheduler.h21
-rw-r--r--drivers/gpu/drm/xe/xe_devcoredump.c420
-rw-r--r--drivers/gpu/drm/xe/xe_devcoredump.h13
-rw-r--r--drivers/gpu/drm/xe/xe_devcoredump_types.h29
-rw-r--r--drivers/gpu/drm/xe/xe_device.c655
-rw-r--r--drivers/gpu/drm/xe/xe_device.h89
-rw-r--r--drivers/gpu/drm/xe/xe_device_sysfs.c245
-rw-r--r--drivers/gpu/drm/xe/xe_device_types.h358
-rw-r--r--drivers/gpu/drm/xe/xe_device_wa_oob.rules2
-rw-r--r--drivers/gpu/drm/xe/xe_dma_buf.c93
-rw-r--r--drivers/gpu/drm/xe/xe_drm_client.c93
-rw-r--r--drivers/gpu/drm/xe/xe_drv.h3
-rw-r--r--drivers/gpu/drm/xe/xe_eu_stall.c965
-rw-r--r--drivers/gpu/drm/xe/xe_eu_stall.h25
-rw-r--r--drivers/gpu/drm/xe/xe_exec.c69
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.c263
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.h10
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue_types.h39
-rw-r--r--drivers/gpu/drm/xe/xe_execlist.c71
-rw-r--r--drivers/gpu/drm/xe/xe_execlist_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_force_wake.c134
-rw-r--r--drivers/gpu/drm/xe/xe_force_wake.h23
-rw-r--r--drivers/gpu/drm/xe/xe_force_wake_types.h6
-rw-r--r--drivers/gpu/drm/xe/xe_gen_wa_oob.c59
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt.c355
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt.h29
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt_types.h4
-rw-r--r--drivers/gpu/drm/xe/xe_gpu_scheduler.c30
-rw-r--r--drivers/gpu/drm/xe/xe_gpu_scheduler.h21
-rw-r--r--drivers/gpu/drm/xe/xe_gsc.c94
-rw-r--r--drivers/gpu/drm/xe/xe_gsc.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gsc_proxy.c129
-rw-r--r--drivers/gpu/drm/xe/xe_gsc_proxy.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gsc_types.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gt.c503
-rw-r--r--drivers/gpu/drm/xe/xe_gt.h38
-rw-r--r--drivers/gpu/drm/xe/xe_gt_ccs_mode.c19
-rw-r--r--drivers/gpu/drm/xe/xe_gt_clock.c97
-rw-r--r--drivers/gpu/drm/xe/xe_gt_debugfs.c203
-rw-r--r--drivers/gpu/drm/xe/xe_gt_freq.c124
-rw-r--r--drivers/gpu/drm/xe/xe_gt_idle.c207
-rw-r--r--drivers/gpu/drm/xe/xe_gt_idle.h5
-rw-r--r--drivers/gpu/drm/xe/xe_gt_idle_types.h5
-rw-r--r--drivers/gpu/drm/xe/xe_gt_mcr.c134
-rw-r--r--drivers/gpu/drm/xe/xe_gt_mcr.h8
-rw-r--r--drivers/gpu/drm/xe/xe_gt_pagefault.c199
-rw-r--r--drivers/gpu/drm/xe/xe_gt_printk.h57
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf.c207
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf.h13
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c635
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h9
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c51
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c204
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_helpers.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c419
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h24
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_migration_types.h40
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_policy.c75
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c206
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_service.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h16
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_vf.c427
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_vf.h13
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_vf_debugfs.c2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h33
-rw-r--r--drivers/gpu/drm/xe/xe_gt_stats.c64
-rw-r--r--drivers/gpu/drm/xe/xe_gt_stats.h9
-rw-r--r--drivers/gpu/drm/xe/xe_gt_stats_types.h51
-rw-r--r--drivers/gpu/drm/xe/xe_gt_throttle.c96
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c526
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h38
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation_types.h32
-rw-r--r--drivers/gpu/drm/xe/xe_gt_topology.c109
-rw-r--r--drivers/gpu/drm/xe/xe_gt_topology.h19
-rw-r--r--drivers/gpu/drm/xe/xe_gt_types.h78
-rw-r--r--drivers/gpu/drm/xe/xe_guc.c745
-rw-r--r--drivers/gpu/drm/xe/xe_guc.h11
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ads.c411
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ads_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_buf.c172
-rw-r--r--drivers/gpu/drm/xe/xe_guc_buf.h47
-rw-r--r--drivers/gpu/drm/xe/xe_guc_buf_types.h28
-rw-r--r--drivers/gpu/drm/xe/xe_guc_capture.c2017
-rw-r--r--drivers/gpu/drm/xe/xe_guc_capture.h61
-rw-r--r--drivers/gpu/drm/xe/xe_guc_capture_types.h70
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.c988
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.h18
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct_types.h44
-rw-r--r--drivers/gpu/drm/xe/xe_guc_debugfs.c139
-rw-r--r--drivers/gpu/drm/xe/xe_guc_engine_activity.c521
-rw-r--r--drivers/gpu/drm/xe/xe_guc_engine_activity.h22
-rw-r--r--drivers/gpu/drm/xe/xe_guc_engine_activity_types.h102
-rw-r--r--drivers/gpu/drm/xe/xe_guc_exec_queue_types.h6
-rw-r--r--drivers/gpu/drm/xe/xe_guc_fwif.h84
-rw-r--r--drivers/gpu/drm/xe/xe_guc_klv_helpers.c2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_klv_thresholds_set.h7
-rw-r--r--drivers/gpu/drm/xe/xe_guc_log.c315
-rw-r--r--drivers/gpu/drm/xe/xe_guc_log.h19
-rw-r--r--drivers/gpu/drm/xe/xe_guc_log_types.h34
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc.c654
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc.h8
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc_types.h6
-rw-r--r--drivers/gpu/drm/xe/xe_guc_relay.c4
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.c712
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.h14
-rw-r--r--drivers/gpu/drm/xe/xe_guc_tlb_inval.c242
-rw-r--r--drivers/gpu/drm/xe/xe_guc_tlb_inval.h19
-rw-r--r--drivers/gpu/drm/xe/xe_guc_types.h30
-rw-r--r--drivers/gpu/drm/xe/xe_heci_gsc.c51
-rw-r--r--drivers/gpu/drm/xe/xe_heci_gsc.h3
-rw-r--r--drivers/gpu/drm/xe/xe_hmm.c253
-rw-r--r--drivers/gpu/drm/xe/xe_hmm.h11
-rw-r--r--drivers/gpu/drm/xe/xe_huc.c16
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine.c404
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine.h6
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c109
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine_group.c58
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine_types.h68
-rw-r--r--drivers/gpu/drm/xe/xe_hw_error.c182
-rw-r--r--drivers/gpu/drm/xe/xe_hw_error.h15
-rw-r--r--drivers/gpu/drm/xe/xe_hw_fence.c5
-rw-r--r--drivers/gpu/drm/xe/xe_hw_fence_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_hwmon.c786
-rw-r--r--drivers/gpu/drm/xe/xe_hwmon.h4
-rw-r--r--drivers/gpu/drm/xe/xe_i2c.c346
-rw-r--r--drivers/gpu/drm/xe/xe_i2c.h64
-rw-r--r--drivers/gpu/drm/xe/xe_irq.c463
-rw-r--r--drivers/gpu/drm/xe/xe_irq.h8
-rw-r--r--drivers/gpu/drm/xe/xe_late_bind_fw.c464
-rw-r--r--drivers/gpu/drm/xe/xe_late_bind_fw.h17
-rw-r--r--drivers/gpu/drm/xe/xe_late_bind_fw_types.h75
-rw-r--r--drivers/gpu/drm/xe/xe_lmtt.c94
-rw-r--r--drivers/gpu/drm/xe/xe_lmtt.h1
-rw-r--r--drivers/gpu/drm/xe/xe_lrc.c838
-rw-r--r--drivers/gpu/drm/xe/xe_lrc.h48
-rw-r--r--drivers/gpu/drm/xe/xe_lrc_types.h11
-rw-r--r--drivers/gpu/drm/xe/xe_macros.h12
-rw-r--r--drivers/gpu/drm/xe/xe_map.h18
-rw-r--r--drivers/gpu/drm/xe/xe_memirq.c225
-rw-r--r--drivers/gpu/drm/xe/xe_memirq.h6
-rw-r--r--drivers/gpu/drm/xe/xe_memirq_types.h4
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.c840
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.h39
-rw-r--r--drivers/gpu/drm/xe/xe_mmio.c236
-rw-r--r--drivers/gpu/drm/xe/xe_mmio.h43
-rw-r--r--drivers/gpu/drm/xe/xe_mmio_gem.c226
-rw-r--r--drivers/gpu/drm/xe/xe_mmio_gem.h20
-rw-r--r--drivers/gpu/drm/xe/xe_mocs.c32
-rw-r--r--drivers/gpu/drm/xe/xe_module.c89
-rw-r--r--drivers/gpu/drm/xe/xe_module.h1
-rw-r--r--drivers/gpu/drm/xe/xe_nvm.c170
-rw-r--r--drivers/gpu/drm/xe/xe_nvm.h15
-rw-r--r--drivers/gpu/drm/xe/xe_oa.c1352
-rw-r--r--drivers/gpu/drm/xe/xe_oa.h4
-rw-r--r--drivers/gpu/drm/xe/xe_oa_types.h26
-rw-r--r--drivers/gpu/drm/xe/xe_observation.c16
-rw-r--r--drivers/gpu/drm/xe/xe_pat.c126
-rw-r--r--drivers/gpu/drm/xe/xe_pci.c553
-rw-r--r--drivers/gpu/drm/xe/xe_pci_sriov.c101
-rw-r--r--drivers/gpu/drm/xe/xe_pci_types.h63
-rw-r--r--drivers/gpu/drm/xe/xe_pcode.c49
-rw-r--r--drivers/gpu/drm/xe/xe_pcode.h15
-rw-r--r--drivers/gpu/drm/xe/xe_pcode_api.h44
-rw-r--r--drivers/gpu/drm/xe/xe_platform_types.h1
-rw-r--r--drivers/gpu/drm/xe/xe_pm.c224
-rw-r--r--drivers/gpu/drm/xe/xe_pm.h2
-rw-r--r--drivers/gpu/drm/xe/xe_pmu.c591
-rw-r--r--drivers/gpu/drm/xe/xe_pmu.h18
-rw-r--r--drivers/gpu/drm/xe/xe_pmu_types.h39
-rw-r--r--drivers/gpu/drm/xe/xe_printk.h129
-rw-r--r--drivers/gpu/drm/xe/xe_psmi.c294
-rw-r--r--drivers/gpu/drm/xe/xe_psmi.h14
-rw-r--r--drivers/gpu/drm/xe/xe_pt.c1025
-rw-r--r--drivers/gpu/drm/xe/xe_pt.h8
-rw-r--r--drivers/gpu/drm/xe/xe_pt_types.h7
-rw-r--r--drivers/gpu/drm/xe/xe_pt_walk.c3
-rw-r--r--drivers/gpu/drm/xe/xe_pt_walk.h4
-rw-r--r--drivers/gpu/drm/xe/xe_pxp.c949
-rw-r--r--drivers/gpu/drm/xe/xe_pxp.h35
-rw-r--r--drivers/gpu/drm/xe/xe_pxp_debugfs.c129
-rw-r--r--drivers/gpu/drm/xe/xe_pxp_debugfs.h13
-rw-r--r--drivers/gpu/drm/xe/xe_pxp_submit.c602
-rw-r--r--drivers/gpu/drm/xe/xe_pxp_submit.h22
-rw-r--r--drivers/gpu/drm/xe/xe_pxp_types.h135
-rw-r--r--drivers/gpu/drm/xe/xe_query.c228
-rw-r--r--drivers/gpu/drm/xe/xe_reg_sr.c104
-rw-r--r--drivers/gpu/drm/xe/xe_reg_sr_types.h6
-rw-r--r--drivers/gpu/drm/xe/xe_reg_whitelist.c41
-rw-r--r--drivers/gpu/drm/xe/xe_res_cursor.h123
-rw-r--r--drivers/gpu/drm/xe/xe_ring_ops.c79
-rw-r--r--drivers/gpu/drm/xe/xe_ring_ops_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_rtp.c66
-rw-r--r--drivers/gpu/drm/xe/xe_rtp.h23
-rw-r--r--drivers/gpu/drm/xe/xe_rtp_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_sa.c57
-rw-r--r--drivers/gpu/drm/xe/xe_sa.h45
-rw-r--r--drivers/gpu/drm/xe/xe_sa_types.h1
-rw-r--r--drivers/gpu/drm/xe/xe_sched_job.c19
-rw-r--r--drivers/gpu/drm/xe/xe_sched_job_types.h9
-rw-r--r--drivers/gpu/drm/xe/xe_shrinker.c306
-rw-r--r--drivers/gpu/drm/xe/xe_shrinker.h16
-rw-r--r--drivers/gpu/drm/xe/xe_sriov.c26
-rw-r--r--drivers/gpu/drm/xe/xe_sriov.h1
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf.c88
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf.h7
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_helpers.h2
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_service.c216
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_service.h23
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_service_types.h36
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_types.h45
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_types.h19
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf.c475
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf.h20
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf_ccs.c410
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf_ccs.h34
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf_ccs_types.h51
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf_types.h51
-rw-r--r--drivers/gpu/drm/xe/xe_step.c2
-rw-r--r--drivers/gpu/drm/xe/xe_survivability_mode.c377
-rw-r--r--drivers/gpu/drm/xe/xe_survivability_mode.h18
-rw-r--r--drivers/gpu/drm/xe/xe_survivability_mode_types.h43
-rw-r--r--drivers/gpu/drm/xe/xe_svm.c1520
-rw-r--r--drivers/gpu/drm/xe/xe_svm.h389
-rw-r--r--drivers/gpu/drm/xe/xe_sync.c9
-rw-r--r--drivers/gpu/drm/xe/xe_tile.c85
-rw-r--r--drivers/gpu/drm/xe/xe_tile.h8
-rw-r--r--drivers/gpu/drm/xe/xe_tile_debugfs.c135
-rw-r--r--drivers/gpu/drm/xe/xe_tile_debugfs.h13
-rw-r--r--drivers/gpu/drm/xe/xe_tile_printk.h127
-rw-r--r--drivers/gpu/drm/xe/xe_tile_sriov_vf.c254
-rw-r--r--drivers/gpu/drm/xe/xe_tile_sriov_vf.h18
-rw-r--r--drivers/gpu/drm/xe/xe_tile_sysfs.c12
-rw-r--r--drivers/gpu/drm/xe/xe_tlb_inval.c433
-rw-r--r--drivers/gpu/drm/xe/xe_tlb_inval.h46
-rw-r--r--drivers/gpu/drm/xe/xe_tlb_inval_job.c268
-rw-r--r--drivers/gpu/drm/xe/xe_tlb_inval_job.h33
-rw-r--r--drivers/gpu/drm/xe/xe_tlb_inval_types.h130
-rw-r--r--drivers/gpu/drm/xe/xe_trace.h88
-rw-r--r--drivers/gpu/drm/xe/xe_trace_bo.h40
-rw-r--r--drivers/gpu/drm/xe/xe_trace_guc.h49
-rw-r--r--drivers/gpu/drm/xe/xe_trace_lrc.c9
-rw-r--r--drivers/gpu/drm/xe/xe_trace_lrc.h52
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c89
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_stolen_mgr.h2
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_sys_mgr.c3
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_vram_mgr.c84
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_vram_mgr.h3
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_vram_mgr_types.h4
-rw-r--r--drivers/gpu/drm/xe/xe_tuning.c99
-rw-r--r--drivers/gpu/drm/xe/xe_tuning.h3
-rw-r--r--drivers/gpu/drm/xe/xe_uc.c102
-rw-r--r--drivers/gpu/drm/xe/xe_uc.h7
-rw-r--r--drivers/gpu/drm/xe/xe_uc_fw.c196
-rw-r--r--drivers/gpu/drm/xe/xe_uc_fw_abi.h130
-rw-r--r--drivers/gpu/drm/xe/xe_uc_fw_types.h7
-rw-r--r--drivers/gpu/drm/xe/xe_userptr.c320
-rw-r--r--drivers/gpu/drm/xe/xe_userptr.h107
-rw-r--r--drivers/gpu/drm/xe/xe_validation.c278
-rw-r--r--drivers/gpu/drm/xe/xe_validation.h192
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c2074
-rw-r--r--drivers/gpu/drm/xe/xe_vm.h165
-rw-r--r--drivers/gpu/drm/xe/xe_vm_doc.h24
-rw-r--r--drivers/gpu/drm/xe/xe_vm_madvise.c431
-rw-r--r--drivers/gpu/drm/xe/xe_vm_madvise.h15
-rw-r--r--drivers/gpu/drm/xe/xe_vm_types.h214
-rw-r--r--drivers/gpu/drm/xe/xe_vram.c233
-rw-r--r--drivers/gpu/drm/xe/xe_vram.h11
-rw-r--r--drivers/gpu/drm/xe/xe_vram_freq.c4
-rw-r--r--drivers/gpu/drm/xe/xe_vram_types.h85
-rw-r--r--drivers/gpu/drm/xe/xe_vsec.c225
-rw-r--r--drivers/gpu/drm/xe/xe_vsec.h15
-rw-r--r--drivers/gpu/drm/xe/xe_wa.c317
-rw-r--r--drivers/gpu/drm/xe/xe_wa.h28
-rw-r--r--drivers/gpu/drm/xe/xe_wa_oob.rules54
-rw-r--r--drivers/gpu/drm/xe/xe_wait_user_fence.c7
-rw-r--r--drivers/gpu/drm/xe/xe_wopcm.c15
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front.c6
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_kms.c3
-rw-r--r--drivers/gpu/drm/xlnx/Kconfig11
-rw-r--r--drivers/gpu/drm/xlnx/Makefile1
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_disp.c51
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_disp_regs.h7
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dp.c880
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dp.h7
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dp_audio.c448
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dpsub.c44
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dpsub.h16
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_kms.c14
-rw-r--r--drivers/gpu/host1x/bus.c11
-rw-r--r--drivers/gpu/host1x/cdma.c7
-rw-r--r--drivers/gpu/host1x/context_bus.c2
-rw-r--r--drivers/gpu/host1x/debug.c9
-rw-r--r--drivers/gpu/host1x/debug.h1
-rw-r--r--drivers/gpu/host1x/dev.c160
-rw-r--r--drivers/gpu/host1x/dev.h6
-rw-r--r--drivers/gpu/host1x/hw/cdma_hw.c12
-rw-r--r--drivers/gpu/host1x/hw/debug_hw.c15
-rw-r--r--drivers/gpu/host1x/intr.c2
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c48
-rw-r--r--drivers/gpu/ipu-v3/ipu-cpmem.c23
-rw-r--r--drivers/gpu/ipu-v3/ipu-csi.c108
-rw-r--r--drivers/gpu/ipu-v3/ipu-ic.c73
-rw-r--r--drivers/gpu/ipu-v3/ipu-image-convert.c48
-rw-r--r--drivers/gpu/ipu-v3/ipu-pre.c2
-rw-r--r--drivers/gpu/ipu-v3/ipu-prg.c2
-rw-r--r--drivers/gpu/ipu-v3/ipu-prv.h2
-rw-r--r--drivers/gpu/ipu-v3/ipu-vdi.c11
-rw-r--r--drivers/gpu/nova-core/Kconfig16
-rw-r--r--drivers/gpu/nova-core/Makefile3
-rw-r--r--drivers/gpu/nova-core/dma.rs58
-rw-r--r--drivers/gpu/nova-core/driver.rs89
-rw-r--r--drivers/gpu/nova-core/falcon.rs613
-rw-r--r--drivers/gpu/nova-core/falcon/gsp.rs32
-rw-r--r--drivers/gpu/nova-core/falcon/hal.rs54
-rw-r--r--drivers/gpu/nova-core/falcon/hal/ga102.rs116
-rw-r--r--drivers/gpu/nova-core/falcon/sec2.rs19
-rw-r--r--drivers/gpu/nova-core/fb.rs147
-rw-r--r--drivers/gpu/nova-core/fb/hal.rs39
-rw-r--r--drivers/gpu/nova-core/fb/hal/ga100.rs57
-rw-r--r--drivers/gpu/nova-core/fb/hal/ga102.rs36
-rw-r--r--drivers/gpu/nova-core/fb/hal/tu102.rs58
-rw-r--r--drivers/gpu/nova-core/firmware.rs237
-rw-r--r--drivers/gpu/nova-core/firmware/booter.rs375
-rw-r--r--drivers/gpu/nova-core/firmware/fwsec.rs416
-rw-r--r--drivers/gpu/nova-core/firmware/gsp.rs243
-rw-r--r--drivers/gpu/nova-core/firmware/riscv.rs91
-rw-r--r--drivers/gpu/nova-core/gfw.rs71
-rw-r--r--drivers/gpu/nova-core/gpu.rs241
-rw-r--r--drivers/gpu/nova-core/gsp.rs22
-rw-r--r--drivers/gpu/nova-core/gsp/boot.rs137
-rw-r--r--drivers/gpu/nova-core/gsp/fw.rs7
-rw-r--r--drivers/gpu/nova-core/gsp/fw/r570_144.rs29
-rw-r--r--drivers/gpu/nova-core/gsp/fw/r570_144/bindings.rs1
-rw-r--r--drivers/gpu/nova-core/nova_core.rs28
-rw-r--r--drivers/gpu/nova-core/regs.rs346
-rw-r--r--drivers/gpu/nova-core/regs/macros.rs960
-rw-r--r--drivers/gpu/nova-core/util.rs27
-rw-r--r--drivers/gpu/nova-core/vbios.rs1158
-rw-r--r--drivers/gpu/trace/Kconfig11
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c2
-rw-r--r--drivers/greybus/gb-beagleplay.c29
-rw-r--r--drivers/greybus/interface.c2
-rw-r--r--drivers/greybus/operation.c5
-rw-r--r--drivers/greybus/svc.c3
-rw-r--r--drivers/hid/Kconfig88
-rw-r--r--drivers/hid/Makefile10
-rw-r--r--drivers/hid/amd-sfh-hid/Kconfig1
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_client.c35
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_common.h5
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_hid.h2
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_pcie.c70
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_pcie.h1
-rw-r--r--drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c78
-rw-r--r--drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.h3
-rw-r--r--drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c40
-rw-r--r--drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.h24
-rw-r--r--drivers/hid/bpf/hid_bpf_dispatch.c35
-rw-r--r--drivers/hid/bpf/progs/Huion__Dial-2.bpf.c66
-rw-r--r--drivers/hid/bpf/progs/Huion__Inspiroy-2-S.bpf.c60
-rw-r--r--drivers/hid/bpf/progs/Huion__Kamvas-Pro-19.bpf.c75
-rw-r--r--drivers/hid/bpf/progs/Huion__KeydialK20.bpf.c531
-rw-r--r--drivers/hid/bpf/progs/Mistel__MD770.bpf.c154
-rw-r--r--drivers/hid/bpf/progs/Rapoo__M50-Plus-Silent.bpf.c148
-rw-r--r--drivers/hid/bpf/progs/TUXEDO__Sirius-16-Gen1-and-Gen2.bpf.c47
-rw-r--r--drivers/hid/bpf/progs/XPPen__ACK05.bpf.c331
-rw-r--r--drivers/hid/bpf/progs/XPPen__ArtistPro16Gen2.bpf.c44
-rw-r--r--drivers/hid/bpf/progs/hid_bpf_async.h219
-rw-r--r--drivers/hid/bpf/progs/hid_bpf_helpers.h19
-rw-r--r--drivers/hid/bpf/progs/hid_report_helpers.h36
-rw-r--r--drivers/hid/hid-apple.c249
-rw-r--r--drivers/hid/hid-appleir.c6
-rw-r--r--drivers/hid/hid-appletb-bl.c204
-rw-r--r--drivers/hid/hid-appletb-kbd.c519
-rw-r--r--drivers/hid/hid-asus.c156
-rw-r--r--drivers/hid/hid-core.c289
-rw-r--r--drivers/hid/hid-corsair-void.c833
-rw-r--r--drivers/hid/hid-cp2112.c73
-rw-r--r--drivers/hid/hid-debug.c19
-rw-r--r--drivers/hid/hid-elecom.c8
-rw-r--r--drivers/hid/hid-generic.c3
-rw-r--r--drivers/hid/hid-goodix-spi.c35
-rw-r--r--drivers/hid/hid-google-hammer.c5
-rw-r--r--drivers/hid/hid-haptic.c580
-rw-r--r--drivers/hid/hid-haptic.h127
-rw-r--r--drivers/hid/hid-hyperv.c62
-rw-r--r--drivers/hid/hid-ids.h96
-rw-r--r--drivers/hid/hid-input-test.c10
-rw-r--r--drivers/hid/hid-input.c108
-rw-r--r--drivers/hid/hid-kysona.c290
-rw-r--r--drivers/hid/hid-lenovo.c119
-rw-r--r--drivers/hid/hid-letsketch.c3
-rw-r--r--drivers/hid/hid-lg-g15.c146
-rw-r--r--drivers/hid/hid-lg4ff.c15
-rw-r--r--drivers/hid/hid-logitech-dj.c4
-rw-r--r--drivers/hid/hid-logitech-hidpp.c76
-rw-r--r--drivers/hid/hid-magicmouse.c176
-rw-r--r--drivers/hid/hid-mcp2200.c19
-rw-r--r--drivers/hid/hid-mcp2221.c120
-rw-r--r--drivers/hid/hid-multitouch.c189
-rw-r--r--drivers/hid/hid-nintendo.c56
-rw-r--r--drivers/hid/hid-ntrig.c3
-rw-r--r--drivers/hid/hid-nvidia-shield.c2
-rw-r--r--drivers/hid/hid-picolcd_fb.c6
-rw-r--r--drivers/hid/hid-picolcd_lcd.c6
-rw-r--r--drivers/hid/hid-plantronics.c144
-rw-r--r--drivers/hid/hid-playstation.c1073
-rw-r--r--drivers/hid/hid-prodikeys.c4
-rw-r--r--drivers/hid/hid-quirks.c52
-rw-r--r--drivers/hid/hid-roccat-arvo.c18
-rw-r--r--drivers/hid/hid-roccat-common.h14
-rw-r--r--drivers/hid/hid-roccat-isku.c12
-rw-r--r--drivers/hid/hid-roccat-kone.c16
-rw-r--r--drivers/hid/hid-roccat-koneplus.c32
-rw-r--r--drivers/hid/hid-roccat-konepure.c2
-rw-r--r--drivers/hid/hid-roccat-kovaplus.c26
-rw-r--r--drivers/hid/hid-roccat-lua.c6
-rw-r--r--drivers/hid/hid-roccat-pyra.c36
-rw-r--r--drivers/hid/hid-roccat-ryos.c2
-rw-r--r--drivers/hid/hid-roccat-savu.c2
-rw-r--r--drivers/hid/hid-sensor-custom.c4
-rw-r--r--drivers/hid/hid-sensor-hub.c21
-rw-r--r--drivers/hid/hid-sony.c7
-rw-r--r--drivers/hid/hid-steam.c95
-rw-r--r--drivers/hid/hid-steelseries.c241
-rw-r--r--drivers/hid/hid-thrustmaster.c9
-rw-r--r--drivers/hid/hid-topre.c7
-rw-r--r--drivers/hid/hid-uclogic-core.c79
-rw-r--r--drivers/hid/hid-uclogic-params.c146
-rw-r--r--drivers/hid/hid-uclogic-params.h5
-rw-r--r--drivers/hid/hid-uclogic-rdesc-test.c2
-rw-r--r--drivers/hid/hid-uclogic-rdesc.c44
-rw-r--r--drivers/hid/hid-uclogic-rdesc.h4
-rw-r--r--drivers/hid/hid-universal-pidff.c204
-rw-r--r--drivers/hid/hid-wiimote-core.c4
-rw-r--r--drivers/hid/hid-winwing.c2
-rw-r--r--drivers/hid/hidraw.c224
-rw-r--r--drivers/hid/i2c-hid/Kconfig2
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-acpi.c8
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c108
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-of-elan.c11
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-of.c6
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.h2
-rw-r--r--drivers/hid/intel-ish-hid/Kconfig1
-rw-r--r--drivers/hid/intel-ish-hid/ipc/hw-ish.h3
-rw-r--r--drivers/hid/intel-ish-hid/ipc/ipc.c32
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c63
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-fw-loader.c2
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid-client.c30
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid.c4
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid.h11
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/bus.c3
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/client-buffers.c21
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/client.c21
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/client.h2
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/init.c30
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h18
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/loader.c35
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/loader.h34
-rw-r--r--drivers/hid/intel-thc-hid/Kconfig42
-rw-r--r--drivers/hid/intel-thc-hid/Makefile23
-rw-r--r--drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c1046
-rw-r--r--drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-dev.h227
-rw-r--r--drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-hid.c166
-rw-r--r--drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-hid.h14
-rw-r--r--drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-protocol.c248
-rw-r--r--drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-protocol.h20
-rw-r--r--drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c1002
-rw-r--r--drivers/hid/intel-thc-hid/intel-quickspi/quickspi-dev.h174
-rw-r--r--drivers/hid/intel-thc-hid/intel-quickspi/quickspi-hid.c165
-rw-r--r--drivers/hid/intel-thc-hid/intel-quickspi/quickspi-hid.h14
-rw-r--r--drivers/hid/intel-thc-hid/intel-quickspi/quickspi-protocol.c414
-rw-r--r--drivers/hid/intel-thc-hid/intel-quickspi/quickspi-protocol.h25
-rw-r--r--drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.c1719
-rw-r--r--drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.h133
-rw-r--r--drivers/hid/intel-thc-hid/intel-thc/intel-thc-dma.c1009
-rw-r--r--drivers/hid/intel-thc-hid/intel-thc/intel-thc-dma.h154
-rw-r--r--drivers/hid/intel-thc-hid/intel-thc/intel-thc-hw.h886
-rw-r--r--drivers/hid/intel-thc-hid/intel-thc/intel-thc-wot.c94
-rw-r--r--drivers/hid/intel-thc-hid/intel-thc/intel-thc-wot.h26
-rw-r--r--drivers/hid/surface-hid/Kconfig2
-rw-r--r--drivers/hid/surface-hid/surface_kbd.c2
-rw-r--r--drivers/hid/usbhid/Kconfig3
-rw-r--r--drivers/hid/usbhid/hid-core.c34
-rw-r--r--drivers/hid/usbhid/hid-pidff.c1059
-rw-r--r--drivers/hid/usbhid/hid-pidff.h32
-rw-r--r--drivers/hid/usbhid/usbkbd.c2
-rw-r--r--drivers/hid/wacom.h8
-rw-r--r--drivers/hid/wacom_sys.c99
-rw-r--r--drivers/hid/wacom_wac.c27
-rw-r--r--drivers/hid/wacom_wac.h9
-rw-r--r--drivers/hsi/clients/ssi_protocol.c25
-rw-r--r--drivers/hsi/controllers/omap_ssi_core.c2
-rw-r--r--drivers/hsi/controllers/omap_ssi_port.c13
-rw-r--r--drivers/hte/hte-tegra194-test.c4
-rw-r--r--drivers/hv/Kconfig39
-rw-r--r--drivers/hv/Makefile8
-rw-r--r--drivers/hv/channel.c68
-rw-r--r--drivers/hv/channel_mgmt.c62
-rw-r--r--drivers/hv/connection.c32
-rw-r--r--drivers/hv/hv.c100
-rw-r--r--drivers/hv/hv_balloon.c33
-rw-r--r--drivers/hv/hv_common.c295
-rw-r--r--drivers/hv/hv_kvp.c12
-rw-r--r--drivers/hv/hv_proc.c196
-rw-r--r--drivers/hv/hv_snapshot.c11
-rw-r--r--drivers/hv/hv_util.c13
-rw-r--r--drivers/hv/hv_utils_transport.c10
-rw-r--r--drivers/hv/hyperv_vmbus.h24
-rw-r--r--drivers/hv/mshv.h28
-rw-r--r--drivers/hv/mshv_common.c140
-rw-r--r--drivers/hv/mshv_eventfd.c833
-rw-r--r--drivers/hv/mshv_eventfd.h71
-rw-r--r--drivers/hv/mshv_irq.c124
-rw-r--r--drivers/hv/mshv_portid_table.c83
-rw-r--r--drivers/hv/mshv_root.h311
-rw-r--r--drivers/hv/mshv_root_hv_call.c850
-rw-r--r--drivers/hv/mshv_root_main.c2288
-rw-r--r--drivers/hv/mshv_synic.c665
-rw-r--r--drivers/hv/ring_buffer.c1
-rw-r--r--drivers/hv/vmbus_drv.c309
-rw-r--r--drivers/hwmon/Kconfig187
-rw-r--r--drivers/hwmon/Makefile13
-rw-r--r--drivers/hwmon/abituguru.c2
-rw-r--r--drivers/hwmon/abituguru3.c4
-rw-r--r--drivers/hwmon/acpi_power_meter.c854
-rw-r--r--drivers/hwmon/ad7314.c10
-rw-r--r--drivers/hwmon/adt7475.c20
-rw-r--r--drivers/hwmon/aht10.c16
-rw-r--r--drivers/hwmon/amc6821.c175
-rw-r--r--drivers/hwmon/aquacomputer_d5next.c2
-rw-r--r--drivers/hwmon/aspeed-g6-pwm-tach.c2
-rw-r--r--drivers/hwmon/asus-ec-sensors.c418
-rw-r--r--drivers/hwmon/asus_atk0110.c15
-rw-r--r--drivers/hwmon/axi-fan-control.c2
-rw-r--r--drivers/hwmon/cgbc-hwmon.c304
-rw-r--r--drivers/hwmon/chipcap2.c63
-rw-r--r--drivers/hwmon/coretemp.c76
-rw-r--r--drivers/hwmon/corsair-cpro.c5
-rw-r--r--drivers/hwmon/corsair-psu.c1
-rw-r--r--drivers/hwmon/cros_ec_hwmon.c314
-rw-r--r--drivers/hwmon/da9052-hwmon.c2
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c117
-rw-r--r--drivers/hwmon/dme1737.c2
-rw-r--r--drivers/hwmon/drivetemp.c8
-rw-r--r--drivers/hwmon/emc2305.c217
-rw-r--r--drivers/hwmon/f71805f.c2
-rw-r--r--drivers/hwmon/f71882fg.c2
-rw-r--r--drivers/hwmon/fam15h_power.c6
-rw-r--r--drivers/hwmon/ftsteutates.c9
-rw-r--r--drivers/hwmon/gpd-fan.c715
-rw-r--r--drivers/hwmon/gpio-fan.c119
-rw-r--r--drivers/hwmon/gsc-hwmon.c14
-rw-r--r--drivers/hwmon/hp-wmi-sensors.c4
-rw-r--r--drivers/hwmon/htu31.c350
-rw-r--r--drivers/hwmon/hwmon-vid.c4
-rw-r--r--drivers/hwmon/hwmon.c110
-rw-r--r--drivers/hwmon/i5500_temp.c8
-rw-r--r--drivers/hwmon/i5k_amb.c2
-rw-r--r--drivers/hwmon/ibmaem.c27
-rw-r--r--drivers/hwmon/ina238.c620
-rw-r--r--drivers/hwmon/ina2xx.c160
-rw-r--r--drivers/hwmon/ina3221.c9
-rw-r--r--drivers/hwmon/intel-m10-bmc-hwmon.c11
-rw-r--r--drivers/hwmon/isl28022.c494
-rw-r--r--drivers/hwmon/jc42.c6
-rw-r--r--drivers/hwmon/k10temp.c26
-rw-r--r--drivers/hwmon/kbatt.c147
-rw-r--r--drivers/hwmon/kfan.c246
-rw-r--r--drivers/hwmon/lenovo-ec-sensors.c34
-rw-r--r--drivers/hwmon/lm75.c352
-rw-r--r--drivers/hwmon/lm90.c84
-rw-r--r--drivers/hwmon/ltc2991.c2
-rw-r--r--drivers/hwmon/ltc2992.c26
-rw-r--r--drivers/hwmon/ltc4282.c70
-rw-r--r--drivers/hwmon/max197.c2
-rw-r--r--drivers/hwmon/max31827.c2
-rw-r--r--drivers/hwmon/max6639.c99
-rw-r--r--drivers/hwmon/max77705-hwmon.c221
-rw-r--r--drivers/hwmon/mc13783-adc.c2
-rw-r--r--drivers/hwmon/mc33xs2410_hwmon.c178
-rw-r--r--drivers/hwmon/mlxreg-fan.c47
-rw-r--r--drivers/hwmon/nct6683.c9
-rw-r--r--drivers/hwmon/nct6694-hwmon.c949
-rw-r--r--drivers/hwmon/nct6775-core.c17
-rw-r--r--drivers/hwmon/nct6775-i2c.c2
-rw-r--r--drivers/hwmon/nct6775-platform.c7
-rw-r--r--drivers/hwmon/nct7363.c447
-rw-r--r--drivers/hwmon/npcm750-pwm-fan.c2
-rw-r--r--drivers/hwmon/ntc_thermistor.c81
-rw-r--r--drivers/hwmon/nzxt-kraken2.c9
-rw-r--r--drivers/hwmon/nzxt-smart2.c8
-rw-r--r--drivers/hwmon/occ/common.c240
-rw-r--r--drivers/hwmon/occ/p9_sbe.c8
-rw-r--r--drivers/hwmon/oxp-sensors.c716
-rw-r--r--drivers/hwmon/pc87360.c2
-rw-r--r--drivers/hwmon/pc87427.c2
-rw-r--r--drivers/hwmon/peci/cputemp.c2
-rw-r--r--drivers/hwmon/peci/dimmtemp.c12
-rw-r--r--drivers/hwmon/pmbus/Kconfig93
-rw-r--r--drivers/hwmon/pmbus/Makefile6
-rw-r--r--drivers/hwmon/pmbus/acbel-fsg032.c2
-rw-r--r--drivers/hwmon/pmbus/adm1266.c2
-rw-r--r--drivers/hwmon/pmbus/adm1275.c19
-rw-r--r--drivers/hwmon/pmbus/adp1050.c74
-rw-r--r--drivers/hwmon/pmbus/bel-pfe.c2
-rw-r--r--drivers/hwmon/pmbus/bpa-rs600.c2
-rw-r--r--drivers/hwmon/pmbus/crps.c74
-rw-r--r--drivers/hwmon/pmbus/delta-ahe50dc-fan.c2
-rw-r--r--drivers/hwmon/pmbus/dps920ab.c9
-rw-r--r--drivers/hwmon/pmbus/fsp-3y.c2
-rw-r--r--drivers/hwmon/pmbus/ibm-cffps.c2
-rw-r--r--drivers/hwmon/pmbus/ina233.c191
-rw-r--r--drivers/hwmon/pmbus/inspur-ipsps.c2
-rw-r--r--drivers/hwmon/pmbus/ir35221.c2
-rw-r--r--drivers/hwmon/pmbus/ir36021.c2
-rw-r--r--drivers/hwmon/pmbus/ir38064.c2
-rw-r--r--drivers/hwmon/pmbus/irps5401.c2
-rw-r--r--drivers/hwmon/pmbus/isl68137.c221
-rw-r--r--drivers/hwmon/pmbus/lm25066.c4
-rw-r--r--drivers/hwmon/pmbus/lt3074.c122
-rw-r--r--drivers/hwmon/pmbus/lt7182s.c2
-rw-r--r--drivers/hwmon/pmbus/ltc2978.c87
-rw-r--r--drivers/hwmon/pmbus/ltc3815.c2
-rw-r--r--drivers/hwmon/pmbus/max15301.c3
-rw-r--r--drivers/hwmon/pmbus/max16064.c2
-rw-r--r--drivers/hwmon/pmbus/max16601.c2
-rw-r--r--drivers/hwmon/pmbus/max20730.c2
-rw-r--r--drivers/hwmon/pmbus/max20751.c2
-rw-r--r--drivers/hwmon/pmbus/max31785.c2
-rw-r--r--drivers/hwmon/pmbus/max34440.c121
-rw-r--r--drivers/hwmon/pmbus/max8688.c2
-rw-r--r--drivers/hwmon/pmbus/mp2856.c2
-rw-r--r--drivers/hwmon/pmbus/mp2869.c659
-rw-r--r--drivers/hwmon/pmbus/mp2888.c2
-rw-r--r--drivers/hwmon/pmbus/mp2891.c6
-rw-r--r--drivers/hwmon/pmbus/mp29502.c670
-rw-r--r--drivers/hwmon/pmbus/mp2975.c2
-rw-r--r--drivers/hwmon/pmbus/mp2993.c6
-rw-r--r--drivers/hwmon/pmbus/mp5023.c2
-rw-r--r--drivers/hwmon/pmbus/mp5920.c2
-rw-r--r--drivers/hwmon/pmbus/mp5990.c69
-rw-r--r--drivers/hwmon/pmbus/mp9941.c6
-rw-r--r--drivers/hwmon/pmbus/mpq7932.c6
-rw-r--r--drivers/hwmon/pmbus/mpq8785.c95
-rw-r--r--drivers/hwmon/pmbus/pim4328.c2
-rw-r--r--drivers/hwmon/pmbus/pli1209bc.c2
-rw-r--r--drivers/hwmon/pmbus/pm6764tr.c2
-rw-r--r--drivers/hwmon/pmbus/pmbus.c4
-rw-r--r--drivers/hwmon/pmbus/pmbus.h23
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c566
-rw-r--r--drivers/hwmon/pmbus/pxe1610.c2
-rw-r--r--drivers/hwmon/pmbus/q54sj108a2.c2
-rw-r--r--drivers/hwmon/pmbus/stpddc60.c2
-rw-r--r--drivers/hwmon/pmbus/tda38640.c4
-rw-r--r--drivers/hwmon/pmbus/tps25990.c436
-rw-r--r--drivers/hwmon/pmbus/tps40422.c2
-rw-r--r--drivers/hwmon/pmbus/tps53679.c39
-rw-r--r--drivers/hwmon/pmbus/tps546d24.c2
-rw-r--r--drivers/hwmon/pmbus/ucd9000.c24
-rw-r--r--drivers/hwmon/pmbus/ucd9200.c2
-rw-r--r--drivers/hwmon/pmbus/xdp710.c2
-rw-r--r--drivers/hwmon/pmbus/xdpe12284.c2
-rw-r--r--drivers/hwmon/pmbus/xdpe152c4.c2
-rw-r--r--drivers/hwmon/pmbus/zl6100.c2
-rw-r--r--drivers/hwmon/powerz.c8
-rw-r--r--drivers/hwmon/pt5161l.c46
-rw-r--r--drivers/hwmon/pwm-fan.c75
-rw-r--r--drivers/hwmon/qnap-mcu-hwmon.c363
-rw-r--r--drivers/hwmon/raspberrypi-hwmon.c30
-rw-r--r--drivers/hwmon/sa67mcu-hwmon.c161
-rw-r--r--drivers/hwmon/sbrmi.c357
-rw-r--r--drivers/hwmon/sbtsi_temp.c46
-rw-r--r--drivers/hwmon/sch5636.c2
-rw-r--r--drivers/hwmon/sch56xx-common.c4
-rw-r--r--drivers/hwmon/sg2042-mcu.c46
-rw-r--r--drivers/hwmon/sht15.c2
-rw-r--r--drivers/hwmon/sht21.c15
-rw-r--r--drivers/hwmon/sht3x.c67
-rw-r--r--drivers/hwmon/sht4x.c184
-rw-r--r--drivers/hwmon/sis5595.c2
-rw-r--r--drivers/hwmon/sl28cpld-hwmon.c9
-rw-r--r--drivers/hwmon/smsc47m1.c2
-rw-r--r--drivers/hwmon/spd5118.c367
-rw-r--r--drivers/hwmon/surface_fan.c10
-rw-r--r--drivers/hwmon/sy7636a-hwmon.c1
-rw-r--r--drivers/hwmon/tmp102.c27
-rw-r--r--drivers/hwmon/tmp108.c81
-rw-r--r--drivers/hwmon/tmp513.c17
-rw-r--r--drivers/hwmon/tps23861.c33
-rw-r--r--drivers/hwmon/ultra45_env.c2
-rw-r--r--drivers/hwmon/via-cputemp.c2
-rw-r--r--drivers/hwmon/via686a.c2
-rw-r--r--drivers/hwmon/vt1211.c2
-rw-r--r--drivers/hwmon/vt8231.c4
-rw-r--r--drivers/hwmon/w83627ehf.c9
-rw-r--r--drivers/hwmon/w83627hf.c2
-rw-r--r--drivers/hwmon/w83781d.c2
-rw-r--r--drivers/hwmon/xgene-hwmon.c43
-rw-r--r--drivers/hwspinlock/hwspinlock_core.c94
-rw-r--r--drivers/hwspinlock/u8500_hsem.c2
-rw-r--r--drivers/hwtracing/coresight/Kconfig33
-rw-r--r--drivers/hwtracing/coresight/Makefile8
-rw-r--r--drivers/hwtracing/coresight/coresight-catu.c98
-rw-r--r--drivers/hwtracing/coresight/coresight-catu.h2
-rw-r--r--drivers/hwtracing/coresight/coresight-cfg-preload.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-cfg-preload.h2
-rw-r--r--drivers/hwtracing/coresight/coresight-cfg-pstop.c83
-rw-r--r--drivers/hwtracing/coresight/coresight-config.c8
-rw-r--r--drivers/hwtracing/coresight/coresight-config.h4
-rw-r--r--drivers/hwtracing/coresight/coresight-core.c486
-rw-r--r--drivers/hwtracing/coresight/coresight-cpu-debug.c46
-rw-r--r--drivers/hwtracing/coresight/coresight-ctcu-core.c316
-rw-r--r--drivers/hwtracing/coresight/coresight-ctcu.h39
-rw-r--r--drivers/hwtracing/coresight/coresight-cti-core.c46
-rw-r--r--drivers/hwtracing/coresight/coresight-cti-sysfs.c76
-rw-r--r--drivers/hwtracing/coresight/coresight-cti.h5
-rw-r--r--drivers/hwtracing/coresight/coresight-dummy.c98
-rw-r--r--drivers/hwtracing/coresight/coresight-etb10.c48
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.c115
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.h2
-rw-r--r--drivers/hwtracing/coresight/coresight-etm.h7
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x-core.c100
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x-sysfs.c11
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-core.c379
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-sysfs.c269
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.h13
-rw-r--r--drivers/hwtracing/coresight/coresight-funnel.c90
-rw-r--r--drivers/hwtracing/coresight/coresight-kunit-tests.c74
-rw-r--r--drivers/hwtracing/coresight/coresight-platform.c53
-rw-r--r--drivers/hwtracing/coresight/coresight-priv.h39
-rw-r--r--drivers/hwtracing/coresight/coresight-replicator.c89
-rw-r--r--drivers/hwtracing/coresight/coresight-self-hosted-trace.h9
-rw-r--r--drivers/hwtracing/coresight/coresight-stm.c65
-rw-r--r--drivers/hwtracing/coresight/coresight-syscfg-configfs.c14
-rw-r--r--drivers/hwtracing/coresight/coresight-syscfg.c77
-rw-r--r--drivers/hwtracing/coresight/coresight-sysfs.c88
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-core.c378
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etf.c158
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etr.c250
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.h108
-rw-r--r--drivers/hwtracing/coresight/coresight-tnoc.c246
-rw-r--r--drivers/hwtracing/coresight/coresight-tpda.c42
-rw-r--r--drivers/hwtracing/coresight/coresight-tpdm.c131
-rw-r--r--drivers/hwtracing/coresight/coresight-tpdm.h33
-rw-r--r--drivers/hwtracing/coresight/coresight-tpiu.c40
-rw-r--r--drivers/hwtracing/coresight/coresight-trace-id.c65
-rw-r--r--drivers/hwtracing/coresight/coresight-trace-id.h9
-rw-r--r--drivers/hwtracing/coresight/coresight-trbe.c47
-rw-r--r--drivers/hwtracing/coresight/ultrasoc-smb.c14
-rw-r--r--drivers/hwtracing/coresight/ultrasoc-smb.h3
-rw-r--r--drivers/hwtracing/intel_th/Kconfig1
-rw-r--r--drivers/hwtracing/intel_th/acpi.c2
-rw-r--r--drivers/hwtracing/intel_th/core.c3
-rw-r--r--drivers/hwtracing/intel_th/msu.c43
-rw-r--r--drivers/hwtracing/intel_th/pci.c24
-rw-r--r--drivers/hwtracing/stm/heartbeat.c6
-rw-r--r--drivers/i2c/Makefile7
-rw-r--r--drivers/i2c/algos/i2c-algo-bit.c4
-rw-r--r--drivers/i2c/algos/i2c-algo-pca.c6
-rw-r--r--drivers/i2c/algos/i2c-algo-pcf.c7
-rw-r--r--drivers/i2c/busses/Kconfig138
-rw-r--r--drivers/i2c/busses/Makefile21
-rw-r--r--drivers/i2c/busses/i2c-ali1535.c12
-rw-r--r--drivers/i2c/busses/i2c-ali15x3.c12
-rw-r--r--drivers/i2c/busses/i2c-altera.c2
-rw-r--r--drivers/i2c/busses/i2c-amd-asf-plat.c370
-rw-r--r--drivers/i2c/busses/i2c-amd-mp2-pci.c5
-rw-r--r--drivers/i2c/busses/i2c-amd-mp2-plat.c4
-rw-r--r--drivers/i2c/busses/i2c-amd756-s4882.c245
-rw-r--r--drivers/i2c/busses/i2c-amd756.c8
-rw-r--r--drivers/i2c/busses/i2c-aspeed.c10
-rw-r--r--drivers/i2c/busses/i2c-at91-core.c2
-rw-r--r--drivers/i2c/busses/i2c-at91-master.c7
-rw-r--r--drivers/i2c/busses/i2c-au1550.c2
-rw-r--r--drivers/i2c/busses/i2c-axxia.c25
-rw-r--r--drivers/i2c/busses/i2c-bcm-iproc.c240
-rw-r--r--drivers/i2c/busses/i2c-bcm-kona.c8
-rw-r--r--drivers/i2c/busses/i2c-bcm2835.c2
-rw-r--r--drivers/i2c/busses/i2c-brcmstb.c13
-rw-r--r--drivers/i2c/busses/i2c-cadence.c452
-rw-r--r--drivers/i2c/busses/i2c-cbus-gpio.c2
-rw-r--r--drivers/i2c/busses/i2c-cgbc.c406
-rw-r--r--drivers/i2c/busses/i2c-cht-wc.c4
-rw-r--r--drivers/i2c/busses/i2c-cpm.c2
-rw-r--r--drivers/i2c/busses/i2c-cros-ec-tunnel.c5
-rw-r--r--drivers/i2c/busses/i2c-davinci.c117
-rw-r--r--drivers/i2c/busses/i2c-designware-amdisp.c207
-rw-r--r--drivers/i2c/busses/i2c-designware-amdpsp.c36
-rw-r--r--drivers/i2c/busses/i2c-designware-common.c88
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h19
-rw-r--r--drivers/i2c/busses/i2c-designware-master.c64
-rw-r--r--drivers/i2c/busses/i2c-designware-pcidrv.c47
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c91
-rw-r--r--drivers/i2c/busses/i2c-designware-slave.c22
-rw-r--r--drivers/i2c/busses/i2c-digicolor.c2
-rw-r--r--drivers/i2c/busses/i2c-dln2.c2
-rw-r--r--drivers/i2c/busses/i2c-eg20t.c30
-rw-r--r--drivers/i2c/busses/i2c-emev2.c8
-rw-r--r--drivers/i2c/busses/i2c-exynos5.c45
-rw-r--r--drivers/i2c/busses/i2c-gpio.c2
-rw-r--r--drivers/i2c/busses/i2c-gxp.c8
-rw-r--r--drivers/i2c/busses/i2c-highlander.c2
-rw-r--r--drivers/i2c/busses/i2c-hix5hd2.c4
-rw-r--r--drivers/i2c/busses/i2c-i801.c417
-rw-r--r--drivers/i2c/busses/i2c-ibm_iic.c16
-rw-r--r--drivers/i2c/busses/i2c-img-scb.c8
-rw-r--r--drivers/i2c/busses/i2c-imx-lpi2c.c1059
-rw-r--r--drivers/i2c/busses/i2c-imx.c567
-rw-r--r--drivers/i2c/busses/i2c-iop3xx.c2
-rw-r--r--drivers/i2c/busses/i2c-isch.c317
-rw-r--r--drivers/i2c/busses/i2c-ismt.c2
-rw-r--r--drivers/i2c/busses/i2c-jz4780.c2
-rw-r--r--drivers/i2c/busses/i2c-k1.c621
-rw-r--r--drivers/i2c/busses/i2c-keba.c10
-rw-r--r--drivers/i2c/busses/i2c-kempld.c12
-rw-r--r--drivers/i2c/busses/i2c-ljca.c2
-rw-r--r--drivers/i2c/busses/i2c-lpc2k.c9
-rw-r--r--drivers/i2c/busses/i2c-ls2x.c16
-rw-r--r--drivers/i2c/busses/i2c-mchp-pci1xxxx.c2
-rw-r--r--drivers/i2c/busses/i2c-meson.c6
-rw-r--r--drivers/i2c/busses/i2c-microchip-corei2c.c230
-rw-r--r--drivers/i2c/busses/i2c-mlxbf.c195
-rw-r--r--drivers/i2c/busses/i2c-mlxcpld.c2
-rw-r--r--drivers/i2c/busses/i2c-mpc.c2
-rw-r--r--drivers/i2c/busses/i2c-mt65xx.c32
-rw-r--r--drivers/i2c/busses/i2c-mt7621.c22
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c14
-rw-r--r--drivers/i2c/busses/i2c-mxs.c4
-rw-r--r--drivers/i2c/busses/i2c-nct6694.c196
-rw-r--r--drivers/i2c/busses/i2c-nforce2-s4985.c240
-rw-r--r--drivers/i2c/busses/i2c-nforce2.c16
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c92
-rw-r--r--drivers/i2c/busses/i2c-npcm7xx.c482
-rw-r--r--drivers/i2c/busses/i2c-ocores.c2
-rw-r--r--drivers/i2c/busses/i2c-octeon-core.c277
-rw-r--r--drivers/i2c/busses/i2c-octeon-core.h13
-rw-r--r--drivers/i2c/busses/i2c-octeon-platdrv.c2
-rw-r--r--drivers/i2c/busses/i2c-omap.c66
-rw-r--r--drivers/i2c/busses/i2c-opal.c2
-rw-r--r--drivers/i2c/busses/i2c-pasemi-core.c155
-rw-r--r--drivers/i2c/busses/i2c-pasemi-pci.c10
-rw-r--r--drivers/i2c/busses/i2c-pasemi-platform.c2
-rw-r--r--drivers/i2c/busses/i2c-pca-isa.c2
-rw-r--r--drivers/i2c/busses/i2c-pca-platform.c4
-rw-r--r--drivers/i2c/busses/i2c-piix4.c71
-rw-r--r--drivers/i2c/busses/i2c-piix4.h44
-rw-r--r--drivers/i2c/busses/i2c-pnx.c8
-rw-r--r--drivers/i2c/busses/i2c-powermac.c4
-rw-r--r--drivers/i2c/busses/i2c-pxa.c23
-rw-r--r--drivers/i2c/busses/i2c-qcom-cci.c27
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c104
-rw-r--r--drivers/i2c/busses/i2c-qup.c54
-rw-r--r--drivers/i2c/busses/i2c-rcar.c32
-rw-r--r--drivers/i2c/busses/i2c-riic.c237
-rw-r--r--drivers/i2c/busses/i2c-rk3x.c2
-rw-r--r--drivers/i2c/busses/i2c-robotfuzz-osif.c6
-rw-r--r--drivers/i2c/busses/i2c-rtl9300.c539
-rw-r--r--drivers/i2c/busses/i2c-rzv2m.c19
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c9
-rw-r--r--drivers/i2c/busses/i2c-scmi.c2
-rw-r--r--drivers/i2c/busses/i2c-sh7760.c6
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c9
-rw-r--r--drivers/i2c/busses/i2c-simtec.c2
-rw-r--r--drivers/i2c/busses/i2c-sis630.c12
-rw-r--r--drivers/i2c/busses/i2c-sprd.c4
-rw-r--r--drivers/i2c/busses/i2c-st.c23
-rw-r--r--drivers/i2c/busses/i2c-stm32.c8
-rw-r--r--drivers/i2c/busses/i2c-stm32f4.c2
-rw-r--r--drivers/i2c/busses/i2c-stm32f7.c65
-rw-r--r--drivers/i2c/busses/i2c-sun6i-p2wi.c2
-rw-r--r--drivers/i2c/busses/i2c-synquacer.c6
-rw-r--r--drivers/i2c/busses/i2c-tegra-bpmp.c2
-rw-r--r--drivers/i2c/busses/i2c-tegra.c126
-rw-r--r--drivers/i2c/busses/i2c-thunderx-pcidrv.c5
-rw-r--r--drivers/i2c/busses/i2c-tiny-usb.c9
-rw-r--r--drivers/i2c/busses/i2c-uniphier-f.c26
-rw-r--r--drivers/i2c/busses/i2c-uniphier.c26
-rw-r--r--drivers/i2c/busses/i2c-usbio.c320
-rw-r--r--drivers/i2c/busses/i2c-versatile.c2
-rw-r--r--drivers/i2c/busses/i2c-via.c15
-rw-r--r--drivers/i2c/busses/i2c-viai2c-wmt.c22
-rw-r--r--drivers/i2c/busses/i2c-viapro.c33
-rw-r--r--drivers/i2c/busses/i2c-viperboard.c21
-rw-r--r--drivers/i2c/busses/i2c-virtio.c22
-rw-r--r--drivers/i2c/busses/i2c-xgene-slimpro.c59
-rw-r--r--drivers/i2c/busses/i2c-xiic.c289
-rw-r--r--drivers/i2c/busses/i2c-xlp9xx.c4
-rw-r--r--drivers/i2c/busses/scx200_acb.c8
-rw-r--r--drivers/i2c/i2c-atr.c588
-rw-r--r--drivers/i2c/i2c-core-acpi.c25
-rw-r--r--drivers/i2c/i2c-core-base.c92
-rw-r--r--drivers/i2c/i2c-core-of-prober.c415
-rw-r--r--drivers/i2c/i2c-core-of.c2
-rw-r--r--drivers/i2c/i2c-core-slave.c11
-rw-r--r--drivers/i2c/i2c-core-smbus.c14
-rw-r--r--drivers/i2c/i2c-core.h9
-rw-r--r--drivers/i2c/i2c-dev.c17
-rw-r--r--drivers/i2c/i2c-mux.c15
-rw-r--r--drivers/i2c/i2c-slave-eeprom.c4
-rw-r--r--drivers/i2c/i2c-slave-testunit.c26
-rw-r--r--drivers/i2c/i2c-smbus.c43
-rw-r--r--drivers/i2c/muxes/i2c-arb-gpio-challenge.c2
-rw-r--r--drivers/i2c/muxes/i2c-demux-pinctrl.c12
-rw-r--r--drivers/i2c/muxes/i2c-mux-gpio.c2
-rw-r--r--drivers/i2c/muxes/i2c-mux-gpmux.c2
-rw-r--r--drivers/i2c/muxes/i2c-mux-ltc4306.c10
-rw-r--r--drivers/i2c/muxes/i2c-mux-mlxcpld.c2
-rw-r--r--drivers/i2c/muxes/i2c-mux-mule.c7
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca9541.c12
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca954x.c57
-rw-r--r--drivers/i2c/muxes/i2c-mux-pinctrl.c2
-rw-r--r--drivers/i2c/muxes/i2c-mux-reg.c4
-rw-r--r--drivers/i3c/device.c11
-rw-r--r--drivers/i3c/internals.h46
-rw-r--r--drivers/i3c/master.c243
-rw-r--r--drivers/i3c/master/Kconfig36
-rw-r--r--drivers/i3c/master/Makefile2
-rw-r--r--drivers/i3c/master/adi-i3c-master.c1019
-rw-r--r--drivers/i3c/master/ast2600-i3c-master.c2
-rw-r--r--drivers/i3c/master/dw-i3c-master.c123
-rw-r--r--drivers/i3c/master/dw-i3c-master.h1
-rw-r--r--drivers/i3c/master/i3c-master-cdns.c97
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/Makefile1
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/cmd_v1.c9
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/cmd_v2.c7
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/core.c127
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/dat_v1.c11
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/dma.c123
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/ext_caps.c11
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/hci.h8
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/mipi-i3c-hci-pci.c151
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/pio.c77
-rw-r--r--drivers/i3c/master/renesas-i3c.c1404
-rw-r--r--drivers/i3c/master/svc-i3c-master.c428
-rw-r--r--drivers/idle/Makefile5
-rw-r--r--drivers/idle/intel_idle.c503
-rw-r--r--drivers/iio/accel/adis16201.c6
-rw-r--r--drivers/iio/accel/adis16209.c2
-rw-r--r--drivers/iio/accel/adxl313.h33
-rw-r--r--drivers/iio/accel/adxl313_core.c939
-rw-r--r--drivers/iio/accel/adxl313_i2c.c8
-rw-r--r--drivers/iio/accel/adxl313_spi.c8
-rw-r--r--drivers/iio/accel/adxl345.h85
-rw-r--r--drivers/iio/accel/adxl345_core.c1911
-rw-r--r--drivers/iio/accel/adxl345_i2c.c6
-rw-r--r--drivers/iio/accel/adxl345_spi.c11
-rw-r--r--drivers/iio/accel/adxl355_core.c16
-rw-r--r--drivers/iio/accel/adxl355_i2c.c2
-rw-r--r--drivers/iio/accel/adxl355_spi.c2
-rw-r--r--drivers/iio/accel/adxl367.c206
-rw-r--r--drivers/iio/accel/adxl367_i2c.c4
-rw-r--r--drivers/iio/accel/adxl367_spi.c6
-rw-r--r--drivers/iio/accel/adxl372.c16
-rw-r--r--drivers/iio/accel/adxl372_i2c.c4
-rw-r--r--drivers/iio/accel/adxl372_spi.c4
-rw-r--r--drivers/iio/accel/adxl380.c26
-rw-r--r--drivers/iio/accel/adxl380_i2c.c2
-rw-r--r--drivers/iio/accel/adxl380_spi.c2
-rw-r--r--drivers/iio/accel/bma180.c24
-rw-r--r--drivers/iio/accel/bma220_spi.c15
-rw-r--r--drivers/iio/accel/bma400_core.c15
-rw-r--r--drivers/iio/accel/bma400_i2c.c2
-rw-r--r--drivers/iio/accel/bma400_spi.c2
-rw-r--r--drivers/iio/accel/bmc150-accel-core.c22
-rw-r--r--drivers/iio/accel/bmc150-accel-i2c.c8
-rw-r--r--drivers/iio/accel/bmc150-accel-spi.c6
-rw-r--r--drivers/iio/accel/bmc150-accel.h3
-rw-r--r--drivers/iio/accel/bmi088-accel-core.c18
-rw-r--r--drivers/iio/accel/bmi088-accel-i2c.c6
-rw-r--r--drivers/iio/accel/bmi088-accel-spi.c6
-rw-r--r--drivers/iio/accel/da280.c4
-rw-r--r--drivers/iio/accel/da311.c2
-rw-r--r--drivers/iio/accel/dmard06.c4
-rw-r--r--drivers/iio/accel/dmard09.c4
-rw-r--r--drivers/iio/accel/dmard10.c6
-rw-r--r--drivers/iio/accel/fxls8962af-core.c78
-rw-r--r--drivers/iio/accel/fxls8962af-i2c.c8
-rw-r--r--drivers/iio/accel/fxls8962af-spi.c6
-rw-r--r--drivers/iio/accel/fxls8962af.h2
-rw-r--r--drivers/iio/accel/hid-sensor-accel-3d.c15
-rw-r--r--drivers/iio/accel/kionix-kx022a-i2c.c6
-rw-r--r--drivers/iio/accel/kionix-kx022a-spi.c6
-rw-r--r--drivers/iio/accel/kionix-kx022a.c272
-rw-r--r--drivers/iio/accel/kionix-kx022a.h14
-rw-r--r--drivers/iio/accel/kxcjk-1013.c534
-rw-r--r--drivers/iio/accel/kxsd9-i2c.c4
-rw-r--r--drivers/iio/accel/kxsd9-spi.c4
-rw-r--r--drivers/iio/accel/kxsd9.c17
-rw-r--r--drivers/iio/accel/mc3230.c99
-rw-r--r--drivers/iio/accel/mma7455_core.c14
-rw-r--r--drivers/iio/accel/mma7455_i2c.c2
-rw-r--r--drivers/iio/accel/mma7455_spi.c2
-rw-r--r--drivers/iio/accel/mma7660.c8
-rw-r--r--drivers/iio/accel/mma8452.c103
-rw-r--r--drivers/iio/accel/mma9551.c39
-rw-r--r--drivers/iio/accel/mma9551_core.c41
-rw-r--r--drivers/iio/accel/mma9553.c65
-rw-r--r--drivers/iio/accel/msa311.c61
-rw-r--r--drivers/iio/accel/mxc4005.c17
-rw-r--r--drivers/iio/accel/mxc6255.c3
-rw-r--r--drivers/iio/accel/sca3000.c37
-rw-r--r--drivers/iio/accel/sca3300.c27
-rw-r--r--drivers/iio/accel/ssp_accel_sensor.c2
-rw-r--r--drivers/iio/accel/st_accel_core.c16
-rw-r--r--drivers/iio/accel/st_accel_i2c.c8
-rw-r--r--drivers/iio/accel/st_accel_spi.c6
-rw-r--r--drivers/iio/accel/stk8312.c16
-rw-r--r--drivers/iio/accel/stk8ba50.c18
-rw-r--r--drivers/iio/adc/88pm886-gpadc.c393
-rw-r--r--drivers/iio/adc/Kconfig348
-rw-r--r--drivers/iio/adc/Makefile18
-rw-r--r--drivers/iio/adc/ab8500-gpadc.c3
-rw-r--r--drivers/iio/adc/ad4000.c758
-rw-r--r--drivers/iio/adc/ad4030.c1228
-rw-r--r--drivers/iio/adc/ad4080.c619
-rw-r--r--drivers/iio/adc/ad4130.c150
-rw-r--r--drivers/iio/adc/ad4170-4.c3027
-rw-r--r--drivers/iio/adc/ad4695.c1209
-rw-r--r--drivers/iio/adc/ad4851.c1317
-rw-r--r--drivers/iio/adc/ad7091r-base.c21
-rw-r--r--drivers/iio/adc/ad7091r-base.h2
-rw-r--r--drivers/iio/adc/ad7091r5.c4
-rw-r--r--drivers/iio/adc/ad7091r8.c8
-rw-r--r--drivers/iio/adc/ad7124.c1280
-rw-r--r--drivers/iio/adc/ad7173.c1147
-rw-r--r--drivers/iio/adc/ad7191.c554
-rw-r--r--drivers/iio/adc/ad7192.c135
-rw-r--r--drivers/iio/adc/ad7266.c18
-rw-r--r--drivers/iio/adc/ad7280a.c16
-rw-r--r--drivers/iio/adc/ad7291.c2
-rw-r--r--drivers/iio/adc/ad7298.c11
-rw-r--r--drivers/iio/adc/ad7380.c1227
-rw-r--r--drivers/iio/adc/ad7405.c253
-rw-r--r--drivers/iio/adc/ad7476.c477
-rw-r--r--drivers/iio/adc/ad7606.c1439
-rw-r--r--drivers/iio/adc/ad7606.h249
-rw-r--r--drivers/iio/adc/ad7606_bus_iface.h16
-rw-r--r--drivers/iio/adc/ad7606_par.c150
-rw-r--r--drivers/iio/adc/ad7606_spi.c526
-rw-r--r--drivers/iio/adc/ad7625.c687
-rw-r--r--drivers/iio/adc/ad7768-1.c1213
-rw-r--r--drivers/iio/adc/ad7779.c1050
-rw-r--r--drivers/iio/adc/ad7780.c4
-rw-r--r--drivers/iio/adc/ad7791.c40
-rw-r--r--drivers/iio/adc/ad7793.c87
-rw-r--r--drivers/iio/adc/ad7887.c11
-rw-r--r--drivers/iio/adc/ad7923.c15
-rw-r--r--drivers/iio/adc/ad7944.c333
-rw-r--r--drivers/iio/adc/ad7949.c11
-rw-r--r--drivers/iio/adc/ad799x.c48
-rw-r--r--drivers/iio/adc/ad9467.c40
-rw-r--r--drivers/iio/adc/ad_sigma_delta.c523
-rw-r--r--drivers/iio/adc/ade9000.c1799
-rw-r--r--drivers/iio/adc/adi-axi-adc.c411
-rw-r--r--drivers/iio/adc/at91-sama5d2_adc.c150
-rw-r--r--drivers/iio/adc/at91_adc.c14
-rw-r--r--drivers/iio/adc/axp20x_adc.c69
-rw-r--r--drivers/iio/adc/axp288_adc.c6
-rw-r--r--drivers/iio/adc/bcm_iproc_adc.c12
-rw-r--r--drivers/iio/adc/cpcap-adc.c8
-rw-r--r--drivers/iio/adc/da9150-gpadc.c33
-rw-r--r--drivers/iio/adc/dln2-adc.c45
-rw-r--r--drivers/iio/adc/envelope-detector.c4
-rw-r--r--drivers/iio/adc/ep93xx_adc.c2
-rw-r--r--drivers/iio/adc/exynos_adc.c288
-rw-r--r--drivers/iio/adc/fsl-imx25-gcq.c2
-rw-r--r--drivers/iio/adc/gehc-pmc-adc.c228
-rw-r--r--drivers/iio/adc/hi8435.c8
-rw-r--r--drivers/iio/adc/hx711.c13
-rw-r--r--drivers/iio/adc/imx7d_adc.c6
-rw-r--r--drivers/iio/adc/imx8qxp-adc.c10
-rw-r--r--drivers/iio/adc/imx93_adc.c30
-rw-r--r--drivers/iio/adc/ina2xx-adc.c4
-rw-r--r--drivers/iio/adc/industrialio-adc.c82
-rw-r--r--drivers/iio/adc/intel_dc_ti_adc.c328
-rw-r--r--drivers/iio/adc/intel_mrfld_adc.c6
-rw-r--r--drivers/iio/adc/lp8788_adc.c18
-rw-r--r--drivers/iio/adc/lpc18xx_adc.c2
-rw-r--r--drivers/iio/adc/ltc2471.c2
-rw-r--r--drivers/iio/adc/ltc2497-core.c19
-rw-r--r--drivers/iio/adc/ltc2497.h2
-rw-r--r--drivers/iio/adc/max1027.c37
-rw-r--r--drivers/iio/adc/max1118.c6
-rw-r--r--drivers/iio/adc/max11205.c2
-rw-r--r--drivers/iio/adc/max11410.c78
-rw-r--r--drivers/iio/adc/max1363.c247
-rw-r--r--drivers/iio/adc/max34408.c3
-rw-r--r--drivers/iio/adc/max77541-adc.c2
-rw-r--r--drivers/iio/adc/max9611.c4
-rw-r--r--drivers/iio/adc/mcp3564.c2
-rw-r--r--drivers/iio/adc/mcp3911.c64
-rw-r--r--drivers/iio/adc/men_z188_adc.c2
-rw-r--r--drivers/iio/adc/meson_saradc.c87
-rw-r--r--drivers/iio/adc/mp2629_adc.c6
-rw-r--r--drivers/iio/adc/mt6359-auxadc.c442
-rw-r--r--drivers/iio/adc/mt6360-adc.c7
-rw-r--r--drivers/iio/adc/mt6370-adc.c2
-rw-r--r--drivers/iio/adc/mt6577_auxadc.c3
-rw-r--r--drivers/iio/adc/mxs-lradc-adc.c27
-rw-r--r--drivers/iio/adc/nct7201.c501
-rw-r--r--drivers/iio/adc/npcm_adc.c4
-rw-r--r--drivers/iio/adc/pac1921.c152
-rw-r--r--drivers/iio/adc/pac1934.c35
-rw-r--r--drivers/iio/adc/palmas_gpadc.c10
-rw-r--r--drivers/iio/adc/qcom-pm8xxx-xoadc.c10
-rw-r--r--drivers/iio/adc/qcom-spmi-adc5.c4
-rw-r--r--drivers/iio/adc/qcom-spmi-iadc.c4
-rw-r--r--drivers/iio/adc/qcom-spmi-rradc.c4
-rw-r--r--drivers/iio/adc/qcom-spmi-vadc.c7
-rw-r--r--drivers/iio/adc/qcom-vadc-common.c2
-rw-r--r--drivers/iio/adc/rcar-gyroadc.c21
-rw-r--r--drivers/iio/adc/rn5t618-adc.c8
-rw-r--r--drivers/iio/adc/rockchip_saradc.c72
-rw-r--r--drivers/iio/adc/rohm-bd79112.c556
-rw-r--r--drivers/iio/adc/rohm-bd79124.c1146
-rw-r--r--drivers/iio/adc/rtq6056.c55
-rw-r--r--drivers/iio/adc/rzg2l_adc.c449
-rw-r--r--drivers/iio/adc/sd_adc_modulator.c2
-rw-r--r--drivers/iio/adc/spear_adc.c14
-rw-r--r--drivers/iio/adc/stm32-adc-core.c24
-rw-r--r--drivers/iio/adc/stm32-adc-core.h17
-rw-r--r--drivers/iio/adc/stm32-adc.c181
-rw-r--r--drivers/iio/adc/stm32-dfsdm-adc.c106
-rw-r--r--drivers/iio/adc/stm32-dfsdm-core.c3
-rw-r--r--drivers/iio/adc/stmpe-adc.c4
-rw-r--r--drivers/iio/adc/sun20i-gpadc-iio.c44
-rw-r--r--drivers/iio/adc/sun4i-gpadc-iio.c18
-rw-r--r--drivers/iio/adc/ti-adc081c.c46
-rw-r--r--drivers/iio/adc/ti-adc0832.c4
-rw-r--r--drivers/iio/adc/ti-adc084s021.c19
-rw-r--r--drivers/iio/adc/ti-adc108s102.c7
-rw-r--r--drivers/iio/adc/ti-adc12138.c34
-rw-r--r--drivers/iio/adc/ti-adc128s052.c188
-rw-r--r--drivers/iio/adc/ti-adc161s626.c14
-rw-r--r--drivers/iio/adc/ti-ads1015.c189
-rw-r--r--drivers/iio/adc/ti-ads1100.c45
-rw-r--r--drivers/iio/adc/ti-ads1119.c40
-rw-r--r--drivers/iio/adc/ti-ads124s08.c10
-rw-r--r--drivers/iio/adc/ti-ads1298.c11
-rw-r--r--drivers/iio/adc/ti-ads131e08.c38
-rw-r--r--drivers/iio/adc/ti-ads7138.c749
-rw-r--r--drivers/iio/adc/ti-ads7924.c16
-rw-r--r--drivers/iio/adc/ti-ads7950.c15
-rw-r--r--drivers/iio/adc/ti-ads8688.c6
-rw-r--r--drivers/iio/adc/ti-lmp92064.c12
-rw-r--r--drivers/iio/adc/ti-tlc4541.c11
-rw-r--r--drivers/iio/adc/ti-tsc2046.c21
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c15
-rw-r--r--drivers/iio/adc/twl4030-madc.c8
-rw-r--r--drivers/iio/adc/twl6030-gpadc.c4
-rw-r--r--drivers/iio/adc/vf610_adc.c149
-rw-r--r--drivers/iio/adc/viperboard_adc.c4
-rw-r--r--drivers/iio/adc/xilinx-ams.c49
-rw-r--r--drivers/iio/adc/xilinx-xadc-core.c6
-rw-r--r--drivers/iio/adc/xilinx-xadc-events.c4
-rw-r--r--drivers/iio/adc/xilinx-xadc.h2
-rw-r--r--drivers/iio/addac/ad74115.c41
-rw-r--r--drivers/iio/addac/ad74413r.c131
-rw-r--r--drivers/iio/addac/stx104.c2
-rw-r--r--drivers/iio/afe/iio-rescale.c6
-rw-r--r--drivers/iio/amplifiers/ad8366.c8
-rw-r--r--drivers/iio/amplifiers/ada4250.c59
-rw-r--r--drivers/iio/amplifiers/hmc425a.c8
-rw-r--r--drivers/iio/buffer/industrialio-buffer-cb.c1
-rw-r--r--drivers/iio/buffer/industrialio-buffer-dma.c40
-rw-r--r--drivers/iio/buffer/industrialio-buffer-dmaengine.c149
-rw-r--r--drivers/iio/buffer/industrialio-triggered-buffer.c2
-rw-r--r--drivers/iio/cdc/ad7150.c6
-rw-r--r--drivers/iio/cdc/ad7746.c4
-rw-r--r--drivers/iio/chemical/Kconfig22
-rw-r--r--drivers/iio/chemical/Makefile2
-rw-r--r--drivers/iio/chemical/ags02ma.c4
-rw-r--r--drivers/iio/chemical/atlas-ezo-sensor.c6
-rw-r--r--drivers/iio/chemical/atlas-sensor.c19
-rw-r--r--drivers/iio/chemical/bme680.h13
-rw-r--r--drivers/iio/chemical/bme680_core.c487
-rw-r--r--drivers/iio/chemical/bme680_i2c.c7
-rw-r--r--drivers/iio/chemical/bme680_spi.c15
-rw-r--r--drivers/iio/chemical/ccs811.c85
-rw-r--r--drivers/iio/chemical/ens160_core.c43
-rw-r--r--drivers/iio/chemical/ens160_i2c.c2
-rw-r--r--drivers/iio/chemical/ens160_spi.c2
-rw-r--r--drivers/iio/chemical/mhz19b.c316
-rw-r--r--drivers/iio/chemical/pms7003.c9
-rw-r--r--drivers/iio/chemical/scd30_core.c82
-rw-r--r--drivers/iio/chemical/scd30_i2c.c2
-rw-r--r--drivers/iio/chemical/scd30_serial.c2
-rw-r--r--drivers/iio/chemical/scd4x.c15
-rw-r--r--drivers/iio/chemical/sen0322.c161
-rw-r--r--drivers/iio/chemical/sps30.c8
-rw-r--r--drivers/iio/chemical/sps30_i2c.c2
-rw-r--r--drivers/iio/chemical/sps30_serial.c2
-rw-r--r--drivers/iio/chemical/sunrise_co2.c10
-rw-r--r--drivers/iio/common/cros_ec_sensors/Kconfig9
-rw-r--r--drivers/iio/common/cros_ec_sensors/Makefile4
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_activity.c307
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_lid_angle.c2
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c2
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c97
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors_trace.c32
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors_trace.h56
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-attributes.c32
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-trigger.c11
-rw-r--r--drivers/iio/common/inv_sensors/inv_sensors_timestamp.c16
-rw-r--r--drivers/iio/common/ms_sensors/ms_sensors_i2c.c24
-rw-r--r--drivers/iio/common/scmi_sensors/scmi_iio.c21
-rw-r--r--drivers/iio/common/ssp_sensors/ssp_dev.c18
-rw-r--r--drivers/iio/common/ssp_sensors/ssp_iio.c20
-rw-r--r--drivers/iio/common/ssp_sensors/ssp_spi.c2
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_buffer.c2
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c71
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_i2c.c2
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_spi.c2
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_trigger.c26
-rw-r--r--drivers/iio/dac/Kconfig58
-rw-r--r--drivers/iio/dac/Makefile5
-rw-r--r--drivers/iio/dac/ad3530r.c519
-rw-r--r--drivers/iio/dac/ad3552r-common.c294
-rw-r--r--drivers/iio/dac/ad3552r-hs.c884
-rw-r--r--drivers/iio/dac/ad3552r-hs.h27
-rw-r--r--drivers/iio/dac/ad3552r.c602
-rw-r--r--drivers/iio/dac/ad3552r.h232
-rw-r--r--drivers/iio/dac/ad5064.c8
-rw-r--r--drivers/iio/dac/ad5360.c4
-rw-r--r--drivers/iio/dac/ad5380.c93
-rw-r--r--drivers/iio/dac/ad5421.c4
-rw-r--r--drivers/iio/dac/ad5446.c83
-rw-r--r--drivers/iio/dac/ad5449.c2
-rw-r--r--drivers/iio/dac/ad5504.c65
-rw-r--r--drivers/iio/dac/ad5592r-base.c158
-rw-r--r--drivers/iio/dac/ad5592r.c8
-rw-r--r--drivers/iio/dac/ad5593r.c8
-rw-r--r--drivers/iio/dac/ad5624r.h5
-rw-r--r--drivers/iio/dac/ad5624r_spi.c75
-rw-r--r--drivers/iio/dac/ad5686-spi.c10
-rw-r--r--drivers/iio/dac/ad5686.c66
-rw-r--r--drivers/iio/dac/ad5686.h6
-rw-r--r--drivers/iio/dac/ad5696-i2c.c12
-rw-r--r--drivers/iio/dac/ad5755.c15
-rw-r--r--drivers/iio/dac/ad5758.c2
-rw-r--r--drivers/iio/dac/ad5761.c113
-rw-r--r--drivers/iio/dac/ad5764.c4
-rw-r--r--drivers/iio/dac/ad5766.c8
-rw-r--r--drivers/iio/dac/ad5770r.c50
-rw-r--r--drivers/iio/dac/ad5791.c386
-rw-r--r--drivers/iio/dac/ad7293.c85
-rw-r--r--drivers/iio/dac/ad7303.c6
-rw-r--r--drivers/iio/dac/ad8460.c957
-rw-r--r--drivers/iio/dac/ad8801.c83
-rw-r--r--drivers/iio/dac/ad9739a.c6
-rw-r--r--drivers/iio/dac/adi-axi-dac.c556
-rw-r--r--drivers/iio/dac/dpot-dac.c4
-rw-r--r--drivers/iio/dac/ds4424.c6
-rw-r--r--drivers/iio/dac/lpc18xx_dac.c8
-rw-r--r--drivers/iio/dac/ltc1660.c4
-rw-r--r--drivers/iio/dac/ltc2632.c75
-rw-r--r--drivers/iio/dac/ltc2688.c60
-rw-r--r--drivers/iio/dac/m62332.c2
-rw-r--r--drivers/iio/dac/max517.c6
-rw-r--r--drivers/iio/dac/max5522.c4
-rw-r--r--drivers/iio/dac/max5821.c38
-rw-r--r--drivers/iio/dac/mcp4725.c10
-rw-r--r--drivers/iio/dac/mcp4728.c6
-rw-r--r--drivers/iio/dac/mcp4821.c4
-rw-r--r--drivers/iio/dac/mcp4922.c2
-rw-r--r--drivers/iio/dac/rohm-bd79703.c246
-rw-r--r--drivers/iio/dac/stm32-dac-core.c4
-rw-r--r--drivers/iio/dac/stm32-dac.c25
-rw-r--r--drivers/iio/dac/ti-dac082s085.c2
-rw-r--r--drivers/iio/dac/ti-dac5571.c6
-rw-r--r--drivers/iio/dac/ti-dac7311.c6
-rw-r--r--drivers/iio/dac/ti-dac7612.c4
-rw-r--r--drivers/iio/dac/vf610_dac.c29
-rw-r--r--drivers/iio/dummy/iio_simple_dummy.c119
-rw-r--r--drivers/iio/dummy/iio_simple_dummy.h2
-rw-r--r--drivers/iio/dummy/iio_simple_dummy_buffer.c27
-rw-r--r--drivers/iio/dummy/iio_simple_dummy_events.c32
-rw-r--r--drivers/iio/filter/admv8818.c244
-rw-r--r--drivers/iio/frequency/ad9523.c4
-rw-r--r--drivers/iio/frequency/adf4350.c31
-rw-r--r--drivers/iio/frequency/adf4371.c108
-rw-r--r--drivers/iio/frequency/adf4377.c4
-rw-r--r--drivers/iio/frequency/admv1013.c8
-rw-r--r--drivers/iio/frequency/admv1014.c4
-rw-r--r--drivers/iio/frequency/adrf6780.c4
-rw-r--r--drivers/iio/gyro/Kconfig2
-rw-r--r--drivers/iio/gyro/adis16080.c2
-rw-r--r--drivers/iio/gyro/adis16136.c2
-rw-r--r--drivers/iio/gyro/adis16260.c4
-rw-r--r--drivers/iio/gyro/adxrs290.c16
-rw-r--r--drivers/iio/gyro/adxrs450.c7
-rw-r--r--drivers/iio/gyro/bmg160_core.c29
-rw-r--r--drivers/iio/gyro/bmg160_i2c.c11
-rw-r--r--drivers/iio/gyro/bmg160_spi.c12
-rw-r--r--drivers/iio/gyro/fxas21002c_core.c18
-rw-r--r--drivers/iio/gyro/fxas21002c_i2c.c2
-rw-r--r--drivers/iio/gyro/fxas21002c_spi.c2
-rw-r--r--drivers/iio/gyro/hid-sensor-gyro-3d.c12
-rw-r--r--drivers/iio/gyro/itg3200_buffer.c2
-rw-r--r--drivers/iio/gyro/mpu3050-core.c9
-rw-r--r--drivers/iio/gyro/mpu3050-i2c.c5
-rw-r--r--drivers/iio/gyro/ssp_gyro_sensor.c2
-rw-r--r--drivers/iio/gyro/st_gyro_core.c6
-rw-r--r--drivers/iio/gyro/st_gyro_i2c.c6
-rw-r--r--drivers/iio/gyro/st_gyro_spi.c6
-rw-r--r--drivers/iio/health/afe4403.c54
-rw-r--r--drivers/iio/health/afe4404.c54
-rw-r--r--drivers/iio/health/max30100.c5
-rw-r--r--drivers/iio/health/max30102.c9
-rw-r--r--drivers/iio/humidity/am2315.c8
-rw-r--r--drivers/iio/humidity/dht11.c11
-rw-r--r--drivers/iio/humidity/hdc100x.c70
-rw-r--r--drivers/iio/humidity/hdc2010.c14
-rw-r--r--drivers/iio/humidity/hid-sensor-humidity.c8
-rw-r--r--drivers/iio/humidity/hts221.h2
-rw-r--r--drivers/iio/humidity/hts221_buffer.c3
-rw-r--r--drivers/iio/humidity/hts221_core.c93
-rw-r--r--drivers/iio/humidity/hts221_i2c.c8
-rw-r--r--drivers/iio/humidity/hts221_spi.c6
-rw-r--r--drivers/iio/humidity/htu21.c6
-rw-r--r--drivers/iio/imu/Kconfig28
-rw-r--r--drivers/iio/imu/Makefile4
-rw-r--r--drivers/iio/imu/adis.c64
-rw-r--r--drivers/iio/imu/adis16400.c318
-rw-r--r--drivers/iio/imu/adis16460.c6
-rw-r--r--drivers/iio/imu/adis16475.c5
-rw-r--r--drivers/iio/imu/adis16480.c79
-rw-r--r--drivers/iio/imu/adis16550.c1147
-rw-r--r--drivers/iio/imu/adis_buffer.c7
-rw-r--r--drivers/iio/imu/adis_trigger.c2
-rw-r--r--drivers/iio/imu/bmi160/bmi160.h2
-rw-r--r--drivers/iio/imu/bmi160/bmi160_core.c38
-rw-r--r--drivers/iio/imu/bmi160/bmi160_i2c.c10
-rw-r--r--drivers/iio/imu/bmi160/bmi160_spi.c10
-rw-r--r--drivers/iio/imu/bmi270/Kconfig33
-rw-r--r--drivers/iio/imu/bmi270/Makefile7
-rw-r--r--drivers/iio/imu/bmi270/bmi270.h25
-rw-r--r--drivers/iio/imu/bmi270/bmi270_core.c1323
-rw-r--r--drivers/iio/imu/bmi270/bmi270_i2c.c70
-rw-r--r--drivers/iio/imu/bmi270/bmi270_spi.c94
-rw-r--r--drivers/iio/imu/bmi323/bmi323.h1
-rw-r--r--drivers/iio/imu/bmi323/bmi323_core.c71
-rw-r--r--drivers/iio/imu/bmi323/bmi323_i2c.c2
-rw-r--r--drivers/iio/imu/bmi323/bmi323_spi.c2
-rw-r--r--drivers/iio/imu/bno055/bno055.c70
-rw-r--r--drivers/iio/imu/bno055/bno055_i2c.c2
-rw-r--r--drivers/iio/imu/bno055/bno055_ser_core.c2
-rw-r--r--drivers/iio/imu/fxos8700_core.c1
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600.h66
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c412
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c85
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.h10
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_core.c262
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c98
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c24
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_spi.c27
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_temp.c28
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c17
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_aux.c56
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c92
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c22
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h6
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c6
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c21
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c2
-rw-r--r--drivers/iio/imu/kmx61.c44
-rw-r--r--drivers/iio/imu/smi240.c620
-rw-r--r--drivers/iio/imu/st_lsm6dsx/Kconfig18
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h2
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c6
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c61
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c8
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i3c.c10
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c73
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c6
-rw-r--r--drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_core.c4
-rw-r--r--drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_i2c.c10
-rw-r--r--drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_spi.c8
-rw-r--r--drivers/iio/industrialio-acpi.c48
-rw-r--r--drivers/iio/industrialio-backend.c284
-rw-r--r--drivers/iio/industrialio-buffer.c4
-rw-r--r--drivers/iio/industrialio-core.c68
-rw-r--r--drivers/iio/industrialio-event.c2
-rw-r--r--drivers/iio/industrialio-gts-helper.c384
-rw-r--r--drivers/iio/inkern.c103
-rw-r--r--drivers/iio/light/Kconfig85
-rw-r--r--drivers/iio/light/Makefile6
-rw-r--r--drivers/iio/light/acpi-als.c21
-rw-r--r--drivers/iio/light/adjd_s311.c12
-rw-r--r--drivers/iio/light/adux1020.c18
-rw-r--r--drivers/iio/light/al3000a.c210
-rw-r--r--drivers/iio/light/al3010.c106
-rw-r--r--drivers/iio/light/al3320a.c116
-rw-r--r--drivers/iio/light/apds9160.c1592
-rw-r--r--drivers/iio/light/apds9300.c23
-rw-r--r--drivers/iio/light/apds9306.c60
-rw-r--r--drivers/iio/light/apds9960.c16
-rw-r--r--drivers/iio/light/as73211.c70
-rw-r--r--drivers/iio/light/bh1745.c81
-rw-r--r--drivers/iio/light/bh1750.c24
-rw-r--r--drivers/iio/light/bh1780.c3
-rw-r--r--drivers/iio/light/cm32181.c5
-rw-r--r--drivers/iio/light/cm3232.c40
-rw-r--r--drivers/iio/light/cm3323.c4
-rw-r--r--drivers/iio/light/cm3605.c4
-rw-r--r--drivers/iio/light/cm36651.c4
-rw-r--r--drivers/iio/light/cros_ec_light_prox.c2
-rw-r--r--drivers/iio/light/gp2ap002.c6
-rw-r--r--drivers/iio/light/gp2ap020a00f.c9
-rw-r--r--drivers/iio/light/hid-sensor-als.c17
-rw-r--r--drivers/iio/light/hid-sensor-prox.c230
-rw-r--r--drivers/iio/light/iqs621-als.c2
-rw-r--r--drivers/iio/light/isl29018.c40
-rw-r--r--drivers/iio/light/isl29028.c17
-rw-r--r--drivers/iio/light/isl29125.c21
-rw-r--r--drivers/iio/light/isl76682.c2
-rw-r--r--drivers/iio/light/jsa1212.c5
-rw-r--r--drivers/iio/light/lm3533-als.c8
-rw-r--r--drivers/iio/light/ltr390.c547
-rw-r--r--drivers/iio/light/ltr501.c225
-rw-r--r--drivers/iio/light/ltrf216a.c6
-rw-r--r--drivers/iio/light/max44000.c18
-rw-r--r--drivers/iio/light/max44009.c2
-rw-r--r--drivers/iio/light/opt3001.c196
-rw-r--r--drivers/iio/light/opt4001.c5
-rw-r--r--drivers/iio/light/opt4060.c1341
-rw-r--r--drivers/iio/light/pa12203001.c15
-rw-r--r--drivers/iio/light/rohm-bu27008.c1635
-rw-r--r--drivers/iio/light/rohm-bu27034.c94
-rw-r--r--drivers/iio/light/rpr0521.c95
-rw-r--r--drivers/iio/light/si1145.c30
-rw-r--r--drivers/iio/light/st_uvis25.h5
-rw-r--r--drivers/iio/light/st_uvis25_core.c24
-rw-r--r--drivers/iio/light/st_uvis25_i2c.c6
-rw-r--r--drivers/iio/light/st_uvis25_spi.c6
-rw-r--r--drivers/iio/light/stk3310.c23
-rw-r--r--drivers/iio/light/tcs3414.c24
-rw-r--r--drivers/iio/light/tcs3472.c25
-rw-r--r--drivers/iio/light/tsl2563.c6
-rw-r--r--drivers/iio/light/tsl2583.c16
-rw-r--r--drivers/iio/light/tsl2591.c6
-rw-r--r--drivers/iio/light/tsl2772.c10
-rw-r--r--drivers/iio/light/us5182d.c20
-rw-r--r--drivers/iio/light/vcnl4000.c109
-rw-r--r--drivers/iio/light/vcnl4035.c61
-rw-r--r--drivers/iio/light/veml3235.c547
-rw-r--r--drivers/iio/light/veml6030.c905
-rw-r--r--drivers/iio/light/veml6040.c7
-rw-r--r--drivers/iio/light/veml6046x00.c1030
-rw-r--r--drivers/iio/light/veml6070.c201
-rw-r--r--drivers/iio/light/veml6075.c10
-rw-r--r--drivers/iio/light/vl6180.c257
-rw-r--r--drivers/iio/light/zopt2201.c42
-rw-r--r--drivers/iio/magnetometer/Kconfig39
-rw-r--r--drivers/iio/magnetometer/Makefile5
-rw-r--r--drivers/iio/magnetometer/af8133j.c13
-rw-r--r--drivers/iio/magnetometer/ak8974.c20
-rw-r--r--drivers/iio/magnetometer/ak8975.c11
-rw-r--r--drivers/iio/magnetometer/als31300.c490
-rw-r--r--drivers/iio/magnetometer/bmc150_magn.c47
-rw-r--r--drivers/iio/magnetometer/bmc150_magn_i2c.c13
-rw-r--r--drivers/iio/magnetometer/bmc150_magn_spi.c13
-rw-r--r--drivers/iio/magnetometer/hid-sensor-magn-3d.c10
-rw-r--r--drivers/iio/magnetometer/hmc5843.h4
-rw-r--r--drivers/iio/magnetometer/hmc5843_core.c8
-rw-r--r--drivers/iio/magnetometer/hmc5843_i2c.c4
-rw-r--r--drivers/iio/magnetometer/hmc5843_spi.c3
-rw-r--r--drivers/iio/magnetometer/mag3110.c167
-rw-r--r--drivers/iio/magnetometer/mmc35240.c7
-rw-r--r--drivers/iio/magnetometer/rm3100-core.c19
-rw-r--r--drivers/iio/magnetometer/rm3100-i2c.c2
-rw-r--r--drivers/iio/magnetometer/rm3100-spi.c3
-rw-r--r--drivers/iio/magnetometer/si7210.c446
-rw-r--r--drivers/iio/magnetometer/st_magn_core.c6
-rw-r--r--drivers/iio/magnetometer/st_magn_i2c.c6
-rw-r--r--drivers/iio/magnetometer/st_magn_spi.c6
-rw-r--r--drivers/iio/magnetometer/tlv493d.c526
-rw-r--r--drivers/iio/magnetometer/tmag5273.c9
-rw-r--r--drivers/iio/magnetometer/yamaha-yas530.c25
-rw-r--r--drivers/iio/multiplexer/iio-mux.c86
-rw-r--r--drivers/iio/orientation/hid-sensor-incl-3d.c12
-rw-r--r--drivers/iio/orientation/hid-sensor-rotation.c14
-rw-r--r--drivers/iio/position/hid-sensor-custom-intel-hinge.c12
-rw-r--r--drivers/iio/position/iqs624-pos.c2
-rw-r--r--drivers/iio/potentiometer/ad5272.c4
-rw-r--r--drivers/iio/potentiometer/ds1803.c5
-rw-r--r--drivers/iio/potentiometer/max5432.c2
-rw-r--r--drivers/iio/potentiometer/max5487.c2
-rw-r--r--drivers/iio/potentiometer/mcp4018.c4
-rw-r--r--drivers/iio/potentiometer/mcp41010.c4
-rw-r--r--drivers/iio/potentiometer/mcp4131.c5
-rw-r--r--drivers/iio/potentiometer/mcp4531.c4
-rw-r--r--drivers/iio/potentiometer/tpl0102.c2
-rw-r--r--drivers/iio/potentiostat/lmp91000.c8
-rw-r--r--drivers/iio/pressure/abp060mg.c6
-rw-r--r--drivers/iio/pressure/bmp280-core.c794
-rw-r--r--drivers/iio/pressure/bmp280-i2c.c10
-rw-r--r--drivers/iio/pressure/bmp280-regmap.c10
-rw-r--r--drivers/iio/pressure/bmp280-spi.c16
-rw-r--r--drivers/iio/pressure/bmp280.h64
-rw-r--r--drivers/iio/pressure/cros_ec_baro.c2
-rw-r--r--drivers/iio/pressure/dlhl60d.c62
-rw-r--r--drivers/iio/pressure/dps310.c4
-rw-r--r--drivers/iio/pressure/hid-sensor-press.c17
-rw-r--r--drivers/iio/pressure/hp03.c2
-rw-r--r--drivers/iio/pressure/hp206c.c4
-rw-r--r--drivers/iio/pressure/hsc030pa.c6
-rw-r--r--drivers/iio/pressure/hsc030pa.h2
-rw-r--r--drivers/iio/pressure/hsc030pa_i2c.c6
-rw-r--r--drivers/iio/pressure/hsc030pa_spi.c6
-rw-r--r--drivers/iio/pressure/icp10100.c16
-rw-r--r--drivers/iio/pressure/mpl115.c4
-rw-r--r--drivers/iio/pressure/mpl115_i2c.c2
-rw-r--r--drivers/iio/pressure/mpl115_spi.c4
-rw-r--r--drivers/iio/pressure/mpl3115.c104
-rw-r--r--drivers/iio/pressure/mprls0025pa.c2
-rw-r--r--drivers/iio/pressure/mprls0025pa.h17
-rw-r--r--drivers/iio/pressure/mprls0025pa_i2c.c11
-rw-r--r--drivers/iio/pressure/mprls0025pa_spi.c6
-rw-r--r--drivers/iio/pressure/ms5611_core.c12
-rw-r--r--drivers/iio/pressure/ms5611_i2c.c2
-rw-r--r--drivers/iio/pressure/ms5611_spi.c3
-rw-r--r--drivers/iio/pressure/ms5637.c6
-rw-r--r--drivers/iio/pressure/rohm-bm1390.c97
-rw-r--r--drivers/iio/pressure/st_pressure_core.c6
-rw-r--r--drivers/iio/pressure/st_pressure_i2c.c8
-rw-r--r--drivers/iio/pressure/st_pressure_spi.c6
-rw-r--r--drivers/iio/pressure/zpa2326.c37
-rw-r--r--drivers/iio/pressure/zpa2326_i2c.c3
-rw-r--r--drivers/iio/pressure/zpa2326_spi.c6
-rw-r--r--drivers/iio/proximity/Kconfig9
-rw-r--r--drivers/iio/proximity/Makefile1
-rw-r--r--drivers/iio/proximity/as3935.c10
-rw-r--r--drivers/iio/proximity/aw96103.c4
-rw-r--r--drivers/iio/proximity/cros_ec_mkbp_proximity.c21
-rw-r--r--drivers/iio/proximity/d3323aa.c815
-rw-r--r--drivers/iio/proximity/hx9023s.c118
-rw-r--r--drivers/iio/proximity/irsd200.c46
-rw-r--r--drivers/iio/proximity/isl29501.c20
-rw-r--r--drivers/iio/proximity/mb1232.c19
-rw-r--r--drivers/iio/proximity/ping.c6
-rw-r--r--drivers/iio/proximity/pulsedlight-lidar-lite-v2.c21
-rw-r--r--drivers/iio/proximity/srf04.c12
-rw-r--r--drivers/iio/proximity/srf08.c22
-rw-r--r--drivers/iio/proximity/sx9310.c27
-rw-r--r--drivers/iio/proximity/sx9324.c43
-rw-r--r--drivers/iio/proximity/sx9360.c22
-rw-r--r--drivers/iio/proximity/sx9500.c47
-rw-r--r--drivers/iio/proximity/sx_common.c37
-rw-r--r--drivers/iio/proximity/sx_common.h8
-rw-r--r--drivers/iio/proximity/vcnl3020.c20
-rw-r--r--drivers/iio/proximity/vl53l0x-i2c.c187
-rw-r--r--drivers/iio/resolver/ad2s1200.c5
-rw-r--r--drivers/iio/resolver/ad2s1210.c24
-rw-r--r--drivers/iio/resolver/ad2s90.c4
-rw-r--r--drivers/iio/temperature/Kconfig10
-rw-r--r--drivers/iio/temperature/hid-sensor-temperature.c13
-rw-r--r--drivers/iio/temperature/ltc2983.c4
-rw-r--r--drivers/iio/temperature/maxim_thermocouple.c61
-rw-r--r--drivers/iio/temperature/mcp9600.c157
-rw-r--r--drivers/iio/temperature/mlx90614.c1
-rw-r--r--drivers/iio/temperature/mlx90632.c5
-rw-r--r--drivers/iio/temperature/mlx90635.c9
-rw-r--r--drivers/iio/temperature/tmp006.c127
-rw-r--r--drivers/iio/temperature/tmp007.c4
-rw-r--r--drivers/iio/temperature/tsys01.c6
-rw-r--r--drivers/iio/temperature/tsys02d.c4
-rw-r--r--drivers/iio/test/Kconfig14
-rw-r--r--drivers/iio/test/Makefile1
-rw-r--r--drivers/iio/test/iio-test-format.c2
-rw-r--r--drivers/iio/test/iio-test-gts.c4
-rw-r--r--drivers/iio/test/iio-test-multiply.c212
-rw-r--r--drivers/iio/test/iio-test-rescale.c8
-rw-r--r--drivers/iio/trigger/iio-trig-hrtimer.c4
-rw-r--r--drivers/iio/trigger/iio-trig-interrupt.c2
-rw-r--r--drivers/iio/trigger/stm32-lptimer-trigger.c78
-rw-r--r--drivers/iio/trigger/stm32-timer-trigger.c76
-rw-r--r--drivers/infiniband/Kconfig2
-rw-r--r--drivers/infiniband/core/Makefile4
-rw-r--r--drivers/infiniband/core/addr.c85
-rw-r--r--drivers/infiniband/core/agent.c3
-rw-r--r--drivers/infiniband/core/cache.c45
-rw-r--r--drivers/infiniband/core/cm.c291
-rw-r--r--drivers/infiniband/core/cm_trace.h2
-rw-r--r--drivers/infiniband/core/cma.c205
-rw-r--r--drivers/infiniband/core/cma_priv.h4
-rw-r--r--drivers/infiniband/core/cma_trace.h2
-rw-r--r--drivers/infiniband/core/counters.c54
-rw-r--r--drivers/infiniband/core/cq.c12
-rw-r--r--drivers/infiniband/core/device.c216
-rw-r--r--drivers/infiniband/core/iwcm.c33
-rw-r--r--drivers/infiniband/core/mad.c506
-rw-r--r--drivers/infiniband/core/mad_priv.h76
-rw-r--r--drivers/infiniband/core/mad_rmpp.c43
-rw-r--r--drivers/infiniband/core/nldev.c84
-rw-r--r--drivers/infiniband/core/rdma_core.c41
-rw-r--r--drivers/infiniband/core/rdma_core.h1
-rw-r--r--drivers/infiniband/core/restrack.c2
-rw-r--r--drivers/infiniband/core/roce_gid_mgmt.c30
-rw-r--r--drivers/infiniband/core/sa_query.c283
-rw-r--r--drivers/infiniband/core/sysfs.c15
-rw-r--r--drivers/infiniband/core/ucaps.c267
-rw-r--r--drivers/infiniband/core/ucma.c143
-rw-r--r--drivers/infiniband/core/ud_header.c83
-rw-r--r--drivers/infiniband/core/umem.c36
-rw-r--r--drivers/infiniband/core/umem_dmabuf.c2
-rw-r--r--drivers/infiniband/core/umem_odp.c280
-rw-r--r--drivers/infiniband/core/uverbs.h29
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c202
-rw-r--r--drivers/infiniband/core/uverbs_main.c45
-rw-r--r--drivers/infiniband/core/uverbs_marshall.c42
-rw-r--r--drivers/infiniband/core/uverbs_std_types_cq.c87
-rw-r--r--drivers/infiniband/core/uverbs_std_types_device.c4
-rw-r--r--drivers/infiniband/core/uverbs_std_types_dmah.c145
-rw-r--r--drivers/infiniband/core/uverbs_std_types_mr.c172
-rw-r--r--drivers/infiniband/core/uverbs_std_types_qp.c2
-rw-r--r--drivers/infiniband/core/uverbs_uapi.c1
-rw-r--r--drivers/infiniband/core/verbs.c20
-rw-r--r--drivers/infiniband/hw/Makefile4
-rw-r--r--drivers/infiniband/hw/bnxt_re/Makefile3
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h84
-rw-r--r--drivers/infiniband/hw/bnxt_re/debugfs.c396
-rw-r--r--drivers/infiniband/hw/bnxt_re/debugfs.h36
-rw-r--r--drivers/infiniband/hw/bnxt_re/hw_counters.c206
-rw-r--r--drivers/infiniband/hw/bnxt_re/hw_counters.h26
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c443
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h26
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c1196
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c227
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.h35
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c84
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h11
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c56
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.h63
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c278
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.h15
-rw-r--r--drivers/infiniband/hw/bnxt_re/roce_hsi.h109
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c4
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c14
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h1
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c6
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c1
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c8
-rw-r--r--drivers/infiniband/hw/efa/efa.h13
-rw-r--r--drivers/infiniband/hw/efa/efa_admin_cmds_defs.h80
-rw-r--r--drivers/infiniband/hw/efa/efa_admin_defs.h4
-rw-r--r--drivers/infiniband/hw/efa/efa_com.c18
-rw-r--r--drivers/infiniband/hw/efa/efa_com.h6
-rw-r--r--drivers/infiniband/hw/efa/efa_com_cmd.c59
-rw-r--r--drivers/infiniband/hw/efa/efa_com_cmd.h15
-rw-r--r--drivers/infiniband/hw/efa/efa_io_defs.h106
-rw-r--r--drivers/infiniband/hw/efa/efa_main.c29
-rw-r--r--drivers/infiniband/hw/efa/efa_verbs.c148
-rw-r--r--drivers/infiniband/hw/erdma/Kconfig2
-rw-r--r--drivers/infiniband/hw/erdma/erdma.h14
-rw-r--r--drivers/infiniband/hw/erdma/erdma_cm.c72
-rw-r--r--drivers/infiniband/hw/erdma/erdma_cmdq.c26
-rw-r--r--drivers/infiniband/hw/erdma/erdma_cq.c65
-rw-r--r--drivers/infiniband/hw/erdma/erdma_eq.c6
-rw-r--r--drivers/infiniband/hw/erdma/erdma_hw.h135
-rw-r--r--drivers/infiniband/hw/erdma/erdma_main.c62
-rw-r--r--drivers/infiniband/hw/erdma/erdma_qp.c301
-rw-r--r--drivers/infiniband/hw/erdma/erdma_verbs.c689
-rw-r--r--drivers/infiniband/hw/erdma/erdma_verbs.h173
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.c96
-rw-r--r--drivers/infiniband/hw/hfi1/aspm.c4
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c28
-rw-r--r--drivers/infiniband/hw/hfi1/chip.h2
-rw-r--r--drivers/infiniband/hw/hfi1/debugfs.c28
-rw-r--r--drivers/infiniband/hw/hfi1/debugfs.h9
-rw-r--r--drivers/infiniband/hw/hfi1/device.c4
-rw-r--r--drivers/infiniband/hw/hfi1/driver.c7
-rw-r--r--drivers/infiniband/hw/hfi1/fault.c9
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h14
-rw-r--r--drivers/infiniband/hw/hfi1/init.c7
-rw-r--r--drivers/infiniband/hw/hfi1/intr.c31
-rw-r--r--drivers/infiniband/hw/hfi1/iowait.h2
-rw-r--r--drivers/infiniband/hw/hfi1/mad.c6
-rw-r--r--drivers/infiniband/hw/hfi1/mad.h1
-rw-r--r--drivers/infiniband/hw/hfi1/pio.c10
-rw-r--r--drivers/infiniband/hw/hfi1/pio.h1
-rw-r--r--drivers/infiniband/hw/hfi1/qsfp.c20
-rw-r--r--drivers/infiniband/hw/hfi1/qsfp.h2
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c25
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.h1
-rw-r--r--drivers/infiniband/hw/hfi1/sysfs.c12
-rw-r--r--drivers/infiniband/hw/hfi1/tid_rdma.c13
-rw-r--r--drivers/infiniband/hw/hfi1/user_exp_rcv.c2
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.c4
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c4
-rw-r--r--drivers/infiniband/hw/hns/Kconfig20
-rw-r--r--drivers/infiniband/hw/hns/Makefile10
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_ah.c1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_alloc.c4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c5
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_debugfs.c3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h55
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.c125
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c511
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h27
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c44
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c149
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c103
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_restrack.c10
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_srq.c6
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_trace.h216
-rw-r--r--drivers/infiniband/hw/ionic/Kconfig15
-rw-r--r--drivers/infiniband/hw/ionic/Makefile9
-rw-r--r--drivers/infiniband/hw/ionic/ionic_admin.c1229
-rw-r--r--drivers/infiniband/hw/ionic/ionic_controlpath.c2679
-rw-r--r--drivers/infiniband/hw/ionic/ionic_datapath.c1399
-rw-r--r--drivers/infiniband/hw/ionic/ionic_fw.h1029
-rw-r--r--drivers/infiniband/hw/ionic/ionic_hw_stats.c484
-rw-r--r--drivers/infiniband/hw/ionic/ionic_ibdev.c440
-rw-r--r--drivers/infiniband/hw/ionic/ionic_ibdev.h517
-rw-r--r--drivers/infiniband/hw/ionic/ionic_lif_cfg.c111
-rw-r--r--drivers/infiniband/hw/ionic/ionic_lif_cfg.h66
-rw-r--r--drivers/infiniband/hw/ionic/ionic_pgtbl.c143
-rw-r--r--drivers/infiniband/hw/ionic/ionic_queue.c52
-rw-r--r--drivers/infiniband/hw/ionic/ionic_queue.h234
-rw-r--r--drivers/infiniband/hw/ionic/ionic_res.h154
-rw-r--r--drivers/infiniband/hw/irdma/Kconfig8
-rw-r--r--drivers/infiniband/hw/irdma/Makefile4
-rw-r--r--drivers/infiniband/hw/irdma/cm.c5
-rw-r--r--drivers/infiniband/hw/irdma/ctrl.c1442
-rw-r--r--drivers/infiniband/hw/irdma/defs.h264
-rw-r--r--drivers/infiniband/hw/irdma/hmc.c18
-rw-r--r--drivers/infiniband/hw/irdma/hmc.h19
-rw-r--r--drivers/infiniband/hw/irdma/hw.c365
-rw-r--r--drivers/infiniband/hw/irdma/i40iw_hw.c2
-rw-r--r--drivers/infiniband/hw/irdma/i40iw_hw.h2
-rw-r--r--drivers/infiniband/hw/irdma/i40iw_if.c3
-rw-r--r--drivers/infiniband/hw/irdma/icrdma_hw.c3
-rw-r--r--drivers/infiniband/hw/irdma/icrdma_hw.h5
-rw-r--r--drivers/infiniband/hw/irdma/icrdma_if.c343
-rw-r--r--drivers/infiniband/hw/irdma/ig3rdma_hw.c170
-rw-r--r--drivers/infiniband/hw/irdma/ig3rdma_hw.h32
-rw-r--r--drivers/infiniband/hw/irdma/ig3rdma_if.c232
-rw-r--r--drivers/infiniband/hw/irdma/irdma.h22
-rw-r--r--drivers/infiniband/hw/irdma/main.c324
-rw-r--r--drivers/infiniband/hw/irdma/main.h40
-rw-r--r--drivers/infiniband/hw/irdma/osdep.h12
-rw-r--r--drivers/infiniband/hw/irdma/pble.c22
-rw-r--r--drivers/infiniband/hw/irdma/protos.h5
-rw-r--r--drivers/infiniband/hw/irdma/puda.c19
-rw-r--r--drivers/infiniband/hw/irdma/puda.h9
-rw-r--r--drivers/infiniband/hw/irdma/type.h225
-rw-r--r--drivers/infiniband/hw/irdma/uda_d.h5
-rw-r--r--drivers/infiniband/hw/irdma/uk.c303
-rw-r--r--drivers/infiniband/hw/irdma/user.h267
-rw-r--r--drivers/infiniband/hw/irdma/utils.c238
-rw-r--r--drivers/infiniband/hw/irdma/verbs.c844
-rw-r--r--drivers/infiniband/hw/irdma/verbs.h50
-rw-r--r--drivers/infiniband/hw/irdma/virtchnl.c618
-rw-r--r--drivers/infiniband/hw/irdma/virtchnl.h176
-rw-r--r--drivers/infiniband/hw/mana/Makefile2
-rw-r--r--drivers/infiniband/hw/mana/ah.c58
-rw-r--r--drivers/infiniband/hw/mana/counters.c179
-rw-r--r--drivers/infiniband/hw/mana/counters.h62
-rw-r--r--drivers/infiniband/hw/mana/cq.c252
-rw-r--r--drivers/infiniband/hw/mana/device.c223
-rw-r--r--drivers/infiniband/hw/mana/main.c197
-rw-r--r--drivers/infiniband/hw/mana/mana_ib.h261
-rw-r--r--drivers/infiniband/hw/mana/mr.c136
-rw-r--r--drivers/infiniband/hw/mana/qp.c257
-rw-r--r--drivers/infiniband/hw/mana/shadow_queue.h115
-rw-r--r--drivers/infiniband/hw/mana/wr.c168
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c6
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c8
-rw-r--r--drivers/infiniband/hw/mlx4/main.c60
-rw-r--r--drivers/infiniband/hw/mlx4/mcg.c8
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h19
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c290
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c15
-rw-r--r--drivers/infiniband/hw/mlx5/Makefile3
-rw-r--r--drivers/infiniband/hw/mlx5/ah.c13
-rw-r--r--drivers/infiniband/hw/mlx5/counters.c229
-rw-r--r--drivers/infiniband/hw/mlx5/counters.h2
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c25
-rw-r--r--drivers/infiniband/hw/mlx5/data_direct.c2
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c147
-rw-r--r--drivers/infiniband/hw/mlx5/devx.h9
-rw-r--r--drivers/infiniband/hw/mlx5/dm.c2
-rw-r--r--drivers/infiniband/hw/mlx5/dmah.c54
-rw-r--r--drivers/infiniband/hw/mlx5/dmah.h23
-rw-r--r--drivers/infiniband/hw/mlx5/fs.c739
-rw-r--r--drivers/infiniband/hw/mlx5/fs.h21
-rw-r--r--drivers/infiniband/hw/mlx5/gsi.c15
-rw-r--r--drivers/infiniband/hw/mlx5/ib_rep.c3
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c8
-rw-r--r--drivers/infiniband/hw/mlx5/main.c347
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h139
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c257
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c170
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c65
-rw-r--r--drivers/infiniband/hw/mlx5/qp.h1
-rw-r--r--drivers/infiniband/hw/mlx5/qpc.c30
-rw-r--r--drivers/infiniband/hw/mlx5/restrack.c9
-rw-r--r--drivers/infiniband/hw/mlx5/std_types.c27
-rw-r--r--drivers/infiniband/hw/mlx5/umr.c408
-rw-r--r--drivers/infiniband/hw/mlx5/umr.h13
-rw-r--r--drivers/infiniband/hw/mthca/mthca_catas.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mr.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c6
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c6
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.h3
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c6
-rw-r--r--drivers/infiniband/hw/qedr/verbs.h3
-rw-r--r--drivers/infiniband/hw/qib/Kconfig17
-rw-r--r--drivers/infiniband/hw/qib/Makefile17
-rw-r--r--drivers/infiniband/hw/qib/qib.h1492
-rw-r--r--drivers/infiniband/hw/qib/qib_6120_regs.h977
-rw-r--r--drivers/infiniband/hw/qib/qib_7220.h149
-rw-r--r--drivers/infiniband/hw/qib/qib_7220_regs.h1496
-rw-r--r--drivers/infiniband/hw/qib/qib_7322_regs.h3163
-rw-r--r--drivers/infiniband/hw/qib/qib_common.h798
-rw-r--r--drivers/infiniband/hw/qib/qib_debugfs.c274
-rw-r--r--drivers/infiniband/hw/qib/qib_debugfs.h45
-rw-r--r--drivers/infiniband/hw/qib/qib_diag.c906
-rw-r--r--drivers/infiniband/hw/qib/qib_driver.c798
-rw-r--r--drivers/infiniband/hw/qib/qib_eeprom.c271
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c2401
-rw-r--r--drivers/infiniband/hw/qib/qib_fs.c548
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c3533
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7220.c4596
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c8474
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c1782
-rw-r--r--drivers/infiniband/hw/qib/qib_intr.c240
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.c2449
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.h300
-rw-r--r--drivers/infiniband/hw/qib/qib_pcie.c598
-rw-r--r--drivers/infiniband/hw/qib/qib_pio_copy.c64
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c454
-rw-r--r--drivers/infiniband/hw/qib/qib_qsfp.c549
-rw-r--r--drivers/infiniband/hw/qib/qib_qsfp.h188
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c2131
-rw-r--r--drivers/infiniband/hw/qib/qib_ruc.c314
-rw-r--r--drivers/infiniband/hw/qib/qib_sd7220.c1445
-rw-r--r--drivers/infiniband/hw/qib/qib_sdma.c999
-rw-r--r--drivers/infiniband/hw/qib/qib_sysfs.c731
-rw-r--r--drivers/infiniband/hw/qib/qib_twsi.c502
-rw-r--r--drivers/infiniband/hw/qib/qib_tx.c566
-rw-r--r--drivers/infiniband/hw/qib/qib_uc.c521
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c583
-rw-r--r--drivers/infiniband/hw/qib/qib_user_pages.c137
-rw-r--r--drivers/infiniband/hw/qib/qib_user_sdma.c1470
-rw-r--r--drivers/infiniband/hw/qib/qib_user_sdma.h52
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c1705
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h398
-rw-r--r--drivers/infiniband/hw/qib/qib_wc_ppc64.c62
-rw-r--r--drivers/infiniband/hw/qib/qib_wc_x86_64.c150
-rw-r--r--drivers/infiniband/hw/usnic/usnic_abi.h2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_main.c87
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.c4
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.h1
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.c2
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c66
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c5
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c28
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h3
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.c5
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.h1
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c28
-rw-r--r--drivers/infiniband/sw/rdmavt/vt.c2
-rw-r--r--drivers/infiniband/sw/rxe/Kconfig5
-rw-r--r--drivers/infiniband/sw/rxe/Makefile2
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c63
-rw-r--r--drivers/infiniband/sw/rxe/rxe.h41
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_cq.c5
-rw-r--r--drivers/infiniband/sw/rxe/rxe_icrc.c40
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h72
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mcast.c22
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c71
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c73
-rw-r--r--drivers/infiniband/sw/rxe/rxe_odp.c578
-rw-r--r--drivers/infiniband/sw/rxe/rxe_param.h7
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.c11
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c14
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c9
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c25
-rw-r--r--drivers/infiniband/sw/rxe/rxe_task.c48
-rw-r--r--drivers/infiniband/sw/rxe/rxe_task.h2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c58
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.h51
-rw-r--r--drivers/infiniband/sw/siw/Kconfig5
-rw-r--r--drivers/infiniband/sw/siw/siw.h52
-rw-r--r--drivers/infiniband/sw/siw/siw_cm.c27
-rw-r--r--drivers/infiniband/sw/siw/siw_cq.c2
-rw-r--r--drivers/infiniband/sw/siw/siw_main.c45
-rw-r--r--drivers/infiniband/sw/siw/siw_mem.c28
-rw-r--r--drivers/infiniband/sw/siw/siw_mem.h1
-rw-r--r--drivers/infiniband/sw/siw/siw_qp.c54
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_rx.c31
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_tx.c71
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c77
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.h3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h13
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ethtool.c9
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c65
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c153
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_netlink.c9
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c8
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c19
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c14
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c4
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.c2
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs.c3
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c11
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c16
-rw-r--r--drivers/input/Kconfig14
-rw-r--r--drivers/input/Makefile3
-rw-r--r--drivers/input/evbug.c100
-rw-r--r--drivers/input/evdev.c8
-rw-r--r--drivers/input/ff-core.c93
-rw-r--r--drivers/input/ff-memless.c25
-rw-r--r--drivers/input/gameport/gameport.c8
-rw-r--r--drivers/input/input-compat.c30
-rw-r--r--drivers/input/input-compat.h3
-rw-r--r--drivers/input/input-mt.c48
-rw-r--r--drivers/input/input-poller.c5
-rw-r--r--drivers/input/input.c522
-rw-r--r--drivers/input/joystick/db9.c34
-rw-r--r--drivers/input/joystick/fsia6b.c2
-rw-r--r--drivers/input/joystick/gamecon.c26
-rw-r--r--drivers/input/joystick/iforce/iforce-ff.c48
-rw-r--r--drivers/input/joystick/iforce/iforce-main.c1
-rw-r--r--drivers/input/joystick/iforce/iforce-packets.c58
-rw-r--r--drivers/input/joystick/iforce/iforce-serio.c36
-rw-r--r--drivers/input/joystick/iforce/iforce-usb.c13
-rw-r--r--drivers/input/joystick/magellan.c2
-rw-r--r--drivers/input/joystick/n64joy.c37
-rw-r--r--drivers/input/joystick/psxpad-spi.c6
-rw-r--r--drivers/input/joystick/sidewinder.c3
-rw-r--r--drivers/input/joystick/turbografx.c26
-rw-r--r--drivers/input/joystick/walkera0701.c3
-rw-r--r--drivers/input/joystick/xpad.c311
-rw-r--r--drivers/input/keyboard/Kconfig51
-rw-r--r--drivers/input/keyboard/Makefile4
-rw-r--r--drivers/input/keyboard/adp5520-keys.c2
-rw-r--r--drivers/input/keyboard/adp5585-keys.c371
-rw-r--r--drivers/input/keyboard/adp5588-keys.c13
-rw-r--r--drivers/input/keyboard/adp5589-keys.c1069
-rw-r--r--drivers/input/keyboard/applespi.c72
-rw-r--r--drivers/input/keyboard/atkbd.c26
-rw-r--r--drivers/input/keyboard/cap11xx.c12
-rw-r--r--drivers/input/keyboard/cros_ec_keyb.c8
-rw-r--r--drivers/input/keyboard/cypress-sf.c2
-rw-r--r--drivers/input/keyboard/dlink-dir685-touchkeys.c3
-rw-r--r--drivers/input/keyboard/ep93xx_keypad.c10
-rw-r--r--drivers/input/keyboard/gpio_keys.c24
-rw-r--r--drivers/input/keyboard/gpio_keys_polled.c12
-rw-r--r--drivers/input/keyboard/hilkbd.c4
-rw-r--r--drivers/input/keyboard/imx_keypad.c32
-rw-r--r--drivers/input/keyboard/ipaq-micro-keys.c17
-rw-r--r--drivers/input/keyboard/iqs62x-keys.c2
-rw-r--r--drivers/input/keyboard/lm8323.c52
-rw-r--r--drivers/input/keyboard/locomokbd.c7
-rw-r--r--drivers/input/keyboard/lpc32xx-keys.c18
-rw-r--r--drivers/input/keyboard/maple_keyb.c9
-rw-r--r--drivers/input/keyboard/matrix_keypad.c57
-rw-r--r--drivers/input/keyboard/max7360-keypad.c308
-rw-r--r--drivers/input/keyboard/mpr121_touchkey.c45
-rw-r--r--drivers/input/keyboard/mtk-pmic-keys.c43
-rw-r--r--drivers/input/keyboard/omap-keypad.c18
-rw-r--r--drivers/input/keyboard/omap4-keypad.c6
-rw-r--r--drivers/input/keyboard/pmic8xxx-keypad.c8
-rw-r--r--drivers/input/keyboard/pxa27x_keypad.c546
-rw-r--r--drivers/input/keyboard/samsung-keypad.c137
-rw-r--r--drivers/input/keyboard/sh_keysc.c2
-rw-r--r--drivers/input/keyboard/snvs_pwrkey.c30
-rw-r--r--drivers/input/keyboard/spear-keyboard.c80
-rw-r--r--drivers/input/keyboard/st-keyscan.c19
-rw-r--r--drivers/input/keyboard/stmpe-keypad.c2
-rw-r--r--drivers/input/keyboard/sun4i-lradc-keys.c8
-rw-r--r--drivers/input/keyboard/sunkbd.c5
-rw-r--r--drivers/input/keyboard/tca6416-keypad.c305
-rw-r--r--drivers/input/keyboard/tca8418_keypad.c13
-rw-r--r--drivers/input/keyboard/tegra-kbc.c6
-rw-r--r--drivers/input/keyboard/twl4030_keypad.c35
-rw-r--r--drivers/input/misc/88pm80x_onkey.c2
-rw-r--r--drivers/input/misc/Kconfig56
-rw-r--r--drivers/input/misc/Makefile5
-rw-r--r--drivers/input/misc/ad714x.c13
-rw-r--r--drivers/input/misc/adxl34x.c1
-rw-r--r--drivers/input/misc/ati_remote2.c57
-rw-r--r--drivers/input/misc/aw86927.c846
-rw-r--r--drivers/input/misc/cm109.c167
-rw-r--r--drivers/input/misc/cma3000_d0x.c17
-rw-r--r--drivers/input/misc/cs40l50-vibra.c9
-rw-r--r--drivers/input/misc/da7280.c26
-rw-r--r--drivers/input/misc/da9052_onkey.c4
-rw-r--r--drivers/input/misc/da9055_onkey.c4
-rw-r--r--drivers/input/misc/drv260x.c50
-rw-r--r--drivers/input/misc/drv2665.c46
-rw-r--r--drivers/input/misc/drv2667.c46
-rw-r--r--drivers/input/misc/gpio-beeper.c2
-rw-r--r--drivers/input/misc/hisi_powerkey.c2
-rw-r--r--drivers/input/misc/ibm-panel.c5
-rw-r--r--drivers/input/misc/ideapad_slidebar.c30
-rw-r--r--drivers/input/misc/ims-pcu.c8
-rw-r--r--drivers/input/misc/iqs269a.c55
-rw-r--r--drivers/input/misc/iqs626a.c24
-rw-r--r--drivers/input/misc/iqs7222.c90
-rw-r--r--drivers/input/misc/kxtj9.c16
-rw-r--r--drivers/input/misc/m68kspkr.c2
-rw-r--r--drivers/input/misc/max7360-rotary.c192
-rw-r--r--drivers/input/misc/max77693-haptic.c57
-rw-r--r--drivers/input/misc/max8997_haptic.c107
-rw-r--r--drivers/input/misc/mc13783-pwrbutton.c3
-rw-r--r--drivers/input/misc/mma8450.c16
-rw-r--r--drivers/input/misc/nxp-bbnsm-pwrkey.c10
-rw-r--r--drivers/input/misc/palmas-pwrbutton.c2
-rw-r--r--drivers/input/misc/pcap_keys.c2
-rw-r--r--drivers/input/misc/pcf50633-input.c113
-rw-r--r--drivers/input/misc/pcspkr.c2
-rw-r--r--drivers/input/misc/pm8941-pwrkey.c18
-rw-r--r--drivers/input/misc/powermate.c11
-rw-r--r--drivers/input/misc/pwm-beeper.c12
-rw-r--r--drivers/input/misc/qnap-mcu-input.c153
-rw-r--r--drivers/input/misc/regulator-haptic.c27
-rw-r--r--drivers/input/misc/rotary_encoder.c23
-rw-r--r--drivers/input/misc/soc_button_array.c4
-rw-r--r--drivers/input/misc/sparcspkr.c81
-rw-r--r--drivers/input/misc/tps65219-pwrbutton.c2
-rw-r--r--drivers/input/misc/tps6594-pwrbutton.c126
-rw-r--r--drivers/input/misc/twl4030-vibra.c11
-rw-r--r--drivers/input/misc/twl6040-vibra.c8
-rw-r--r--drivers/input/misc/uinput.c1
-rw-r--r--drivers/input/misc/wistron_btns.c4
-rw-r--r--drivers/input/misc/wm831x-on.c2
-rw-r--r--drivers/input/misc/yealink.c4
-rw-r--r--drivers/input/mouse/alps.c18
-rw-r--r--drivers/input/mouse/amimouse.c2
-rw-r--r--drivers/input/mouse/byd.c9
-rw-r--r--drivers/input/mouse/cyapa.c4
-rw-r--r--drivers/input/mouse/elan_i2c_core.c231
-rw-r--r--drivers/input/mouse/elan_i2c_i2c.c14
-rw-r--r--drivers/input/mouse/lifebook.c4
-rw-r--r--drivers/input/mouse/psmouse-base.c2
-rw-r--r--drivers/input/mouse/psmouse-smbus.c28
-rw-r--r--drivers/input/mouse/synaptics.c69
-rw-r--r--drivers/input/mouse/synaptics.h3
-rw-r--r--drivers/input/rmi4/Kconfig15
-rw-r--r--drivers/input/rmi4/Makefile2
-rw-r--r--drivers/input/rmi4/rmi_2d_sensor.c1
-rw-r--r--drivers/input/rmi4/rmi_2d_sensor.h3
-rw-r--r--drivers/input/rmi4/rmi_bus.c7
-rw-r--r--drivers/input/rmi4/rmi_driver.c1
-rw-r--r--drivers/input/rmi4/rmi_driver.h2
-rw-r--r--drivers/input/rmi4/rmi_f03.c4
-rw-r--r--drivers/input/rmi4/rmi_f1a.c143
-rw-r--r--drivers/input/rmi4/rmi_f21.c179
-rw-r--r--drivers/input/rmi4/rmi_f34.c172
-rw-r--r--drivers/input/rmi4/rmi_f54.c2
-rw-r--r--drivers/input/serio/Kconfig4
-rw-r--r--drivers/input/serio/altera_ps2.c2
-rw-r--r--drivers/input/serio/ams_delta_serio.c2
-rw-r--r--drivers/input/serio/apbps2.c2
-rw-r--r--drivers/input/serio/arc_ps2.c2
-rw-r--r--drivers/input/serio/ct82c710.c2
-rw-r--r--drivers/input/serio/gscps2.c122
-rw-r--r--drivers/input/serio/hil_mlc.c3
-rw-r--r--drivers/input/serio/hp_sdc.c3
-rw-r--r--drivers/input/serio/hyperv-keyboard.c38
-rw-r--r--drivers/input/serio/i8042-acpipnpio.h127
-rw-r--r--drivers/input/serio/i8042-sparcio.h16
-rw-r--r--drivers/input/serio/i8042.c347
-rw-r--r--drivers/input/serio/ioc3kbd.c2
-rw-r--r--drivers/input/serio/libps2.c29
-rw-r--r--drivers/input/serio/maceps2.c2
-rw-r--r--drivers/input/serio/olpc_apsp.c2
-rw-r--r--drivers/input/serio/ps2-gpio.c8
-rw-r--r--drivers/input/serio/ps2mult.c25
-rw-r--r--drivers/input/serio/q40kbd.c12
-rw-r--r--drivers/input/serio/rpckbd.c2
-rw-r--r--drivers/input/serio/sa1111ps2.c8
-rw-r--r--drivers/input/serio/serio.c166
-rw-r--r--drivers/input/serio/serio_raw.c125
-rw-r--r--drivers/input/serio/serport.c27
-rw-r--r--drivers/input/serio/sun4i-ps2.c10
-rw-r--r--drivers/input/serio/userio.c139
-rw-r--r--drivers/input/serio/xilinx_ps2.c17
-rw-r--r--drivers/input/sparse-keymap.c4
-rw-r--r--drivers/input/tablet/pegasus_notetaker.c86
-rw-r--r--drivers/input/touch-overlay.c278
-rw-r--r--drivers/input/touchscreen.c1
-rw-r--r--drivers/input/touchscreen/88pm860x-ts.c20
-rw-r--r--drivers/input/touchscreen/Kconfig36
-rw-r--r--drivers/input/touchscreen/Makefile3
-rw-r--r--drivers/input/touchscreen/ad7877.c4
-rw-r--r--drivers/input/touchscreen/ad7879.c14
-rw-r--r--drivers/input/touchscreen/ads7846.c16
-rw-r--r--drivers/input/touchscreen/apple_z2.c477
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c15
-rw-r--r--drivers/input/touchscreen/auo-pixcir-ts.c2
-rw-r--r--drivers/input/touchscreen/bcm_iproc_tsc.c2
-rw-r--r--drivers/input/touchscreen/bu21029_ts.c5
-rw-r--r--drivers/input/touchscreen/cyttsp5.c7
-rw-r--r--drivers/input/touchscreen/cyttsp_core.c1
-rw-r--r--drivers/input/touchscreen/da9052_tsi.c2
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c47
-rw-r--r--drivers/input/touchscreen/egalax_ts.c3
-rw-r--r--drivers/input/touchscreen/elo.c8
-rw-r--r--drivers/input/touchscreen/exc3000.c4
-rw-r--r--drivers/input/touchscreen/fsl-imx25-tcq.c1
-rw-r--r--drivers/input/touchscreen/goodix.c50
-rw-r--r--drivers/input/touchscreen/goodix_berlin.h16
-rw-r--r--drivers/input/touchscreen/goodix_berlin_core.c56
-rw-r--r--drivers/input/touchscreen/goodix_berlin_i2c.c14
-rw-r--r--drivers/input/touchscreen/goodix_berlin_spi.c48
-rw-r--r--drivers/input/touchscreen/himax_hx852x.c503
-rw-r--r--drivers/input/touchscreen/hynitron-cst816x.c253
-rw-r--r--drivers/input/touchscreen/ili210x.c2
-rw-r--r--drivers/input/touchscreen/imagis.c11
-rw-r--r--drivers/input/touchscreen/imx6ul_tsc.c121
-rw-r--r--drivers/input/touchscreen/mainstone-wm97xx.c2
-rw-r--r--drivers/input/touchscreen/mc13783_ts.c6
-rw-r--r--drivers/input/touchscreen/melfas_mip4.c2
-rw-r--r--drivers/input/touchscreen/novatek-nvt-ts.c67
-rw-r--r--drivers/input/touchscreen/pcap_ts.c2
-rw-r--r--drivers/input/touchscreen/pixcir_i2c_ts.c2
-rw-r--r--drivers/input/touchscreen/raspberrypi-ts.c4
-rw-r--r--drivers/input/touchscreen/rohm_bu21023.c4
-rw-r--r--drivers/input/touchscreen/st1232.c35
-rw-r--r--drivers/input/touchscreen/stmpe-ts.c13
-rw-r--r--drivers/input/touchscreen/sun4i-ts.c4
-rw-r--r--drivers/input/touchscreen/sur40.c2
-rw-r--r--drivers/input/touchscreen/sx8654.c4
-rw-r--r--drivers/input/touchscreen/ti_am335x_tsc.c4
-rw-r--r--drivers/input/touchscreen/ts4800-ts.c5
-rw-r--r--drivers/input/touchscreen/tsc2007.h2
-rw-r--r--drivers/input/touchscreen/tsc2007_core.c44
-rw-r--r--drivers/input/touchscreen/tsc200x-core.c7
-rw-r--r--drivers/input/touchscreen/wdt87xx_i2c.c2
-rw-r--r--drivers/input/touchscreen/wm831x-ts.c2
-rw-r--r--drivers/input/touchscreen/wm9705.c1
-rw-r--r--drivers/input/touchscreen/wm9712.c1
-rw-r--r--drivers/input/touchscreen/wm9713.c1
-rw-r--r--drivers/input/touchscreen/wm97xx-core.c7
-rw-r--r--drivers/interconnect/core.c112
-rw-r--r--drivers/interconnect/icc-clk.c12
-rw-r--r--drivers/interconnect/imx/imx8mm.c2
-rw-r--r--drivers/interconnect/imx/imx8mn.c2
-rw-r--r--drivers/interconnect/imx/imx8mp.c2
-rw-r--r--drivers/interconnect/imx/imx8mq.c2
-rw-r--r--drivers/interconnect/mediatek/mt8183.c2
-rw-r--r--drivers/interconnect/mediatek/mt8195.c2
-rw-r--r--drivers/interconnect/qcom/Kconfig56
-rw-r--r--drivers/interconnect/qcom/Makefile12
-rw-r--r--drivers/interconnect/qcom/glymur.c2543
-rw-r--r--drivers/interconnect/qcom/icc-rpm.c2
-rw-r--r--drivers/interconnect/qcom/icc-rpmh.c29
-rw-r--r--drivers/interconnect/qcom/icc-rpmh.h9
-rw-r--r--drivers/interconnect/qcom/milos.c1931
-rw-r--r--drivers/interconnect/qcom/msm8909.c2
-rw-r--r--drivers/interconnect/qcom/msm8916.c2
-rw-r--r--drivers/interconnect/qcom/msm8937.c10
-rw-r--r--drivers/interconnect/qcom/msm8939.c2
-rw-r--r--drivers/interconnect/qcom/msm8953.c2
-rw-r--r--drivers/interconnect/qcom/msm8974.c2
-rw-r--r--drivers/interconnect/qcom/msm8976.c2
-rw-r--r--drivers/interconnect/qcom/msm8996.c2
-rw-r--r--drivers/interconnect/qcom/osm-l3.c47
-rw-r--r--drivers/interconnect/qcom/qcm2290.c2
-rw-r--r--drivers/interconnect/qcom/qcs404.c2
-rw-r--r--drivers/interconnect/qcom/qcs615.c1521
-rw-r--r--drivers/interconnect/qcom/qcs615.h128
-rw-r--r--drivers/interconnect/qcom/qcs8300.c2088
-rw-r--r--drivers/interconnect/qcom/qcs8300.h177
-rw-r--r--drivers/interconnect/qcom/qdu1000.c2
-rw-r--r--drivers/interconnect/qcom/sa8775p.c954
-rw-r--r--drivers/interconnect/qcom/sar2130p.c1930
-rw-r--r--drivers/interconnect/qcom/sc7180.c2
-rw-r--r--drivers/interconnect/qcom/sc7280.c7
-rw-r--r--drivers/interconnect/qcom/sc8180x.c8
-rw-r--r--drivers/interconnect/qcom/sc8280xp.c3
-rw-r--r--drivers/interconnect/qcom/sdm660.c2
-rw-r--r--drivers/interconnect/qcom/sdm670.c2
-rw-r--r--drivers/interconnect/qcom/sdm845.c2
-rw-r--r--drivers/interconnect/qcom/sdx55.c2
-rw-r--r--drivers/interconnect/qcom/sdx65.c2
-rw-r--r--drivers/interconnect/qcom/sdx75.c2
-rw-r--r--drivers/interconnect/qcom/sm6115.c2
-rw-r--r--drivers/interconnect/qcom/sm6350.c2
-rw-r--r--drivers/interconnect/qcom/sm7150.c2
-rw-r--r--drivers/interconnect/qcom/sm8150.c2
-rw-r--r--drivers/interconnect/qcom/sm8250.c2
-rw-r--r--drivers/interconnect/qcom/sm8350.c2
-rw-r--r--drivers/interconnect/qcom/sm8450.c2
-rw-r--r--drivers/interconnect/qcom/sm8550.c2
-rw-r--r--drivers/interconnect/qcom/sm8650.c346
-rw-r--r--drivers/interconnect/qcom/sm8650.h1
-rw-r--r--drivers/interconnect/qcom/sm8750.c1705
-rw-r--r--drivers/interconnect/qcom/smd-rpm.c2
-rw-r--r--drivers/interconnect/qcom/x1e80100.c2
-rw-r--r--drivers/interconnect/samsung/exynos.c7
-rw-r--r--drivers/iommu/Kconfig144
-rw-r--r--drivers/iommu/Makefile6
-rw-r--r--drivers/iommu/amd/Kconfig1
-rw-r--r--drivers/iommu/amd/Makefile2
-rw-r--r--drivers/iommu/amd/amd_iommu.h30
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h124
-rw-r--r--drivers/iommu/amd/debugfs.c378
-rw-r--r--drivers/iommu/amd/init.c829
-rw-r--r--drivers/iommu/amd/io_pgtable.c85
-rw-r--r--drivers/iommu/amd/io_pgtable_v2.c17
-rw-r--r--drivers/iommu/amd/iommu.c1308
-rw-r--r--drivers/iommu/amd/pasid.c11
-rw-r--r--drivers/iommu/amd/ppr.c2
-rw-r--r--drivers/iommu/apple-dart.c83
-rw-r--r--drivers/iommu/arm/Kconfig144
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/Makefile1
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c473
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c115
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c2
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c696
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h223
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c540
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-impl.c5
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c11
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c157
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h3
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.c79
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.h2
-rw-r--r--drivers/iommu/arm/arm-smmu/qcom_iommu.c10
-rw-r--r--drivers/iommu/dma-iommu.c803
-rw-r--r--drivers/iommu/dma-iommu.h14
-rw-r--r--drivers/iommu/exynos-iommu.c27
-rw-r--r--drivers/iommu/fsl_pamu_domain.c2
-rw-r--r--drivers/iommu/hyperv-iommu.c41
-rw-r--r--drivers/iommu/intel/Kconfig2
-rw-r--r--drivers/iommu/intel/Makefile7
-rw-r--r--drivers/iommu/intel/cache.c105
-rw-r--r--drivers/iommu/intel/cap_audit.c217
-rw-r--r--drivers/iommu/intel/cap_audit.h131
-rw-r--r--drivers/iommu/intel/debugfs.c29
-rw-r--r--drivers/iommu/intel/dmar.c33
-rw-r--r--drivers/iommu/intel/iommu.c1400
-rw-r--r--drivers/iommu/intel/iommu.h169
-rw-r--r--drivers/iommu/intel/irq_remapping.c134
-rw-r--r--drivers/iommu/intel/nested.c86
-rw-r--r--drivers/iommu/intel/pasid.c495
-rw-r--r--drivers/iommu/intel/pasid.h34
-rw-r--r--drivers/iommu/intel/perf.c10
-rw-r--r--drivers/iommu/intel/perf.h5
-rw-r--r--drivers/iommu/intel/prq.c396
-rw-r--r--drivers/iommu/intel/svm.c474
-rw-r--r--drivers/iommu/intel/trace.h5
-rw-r--r--drivers/iommu/io-pgfault.c1
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c149
-rw-r--r--drivers/iommu/io-pgtable-arm.c429
-rw-r--r--drivers/iommu/io-pgtable-dart.c156
-rw-r--r--drivers/iommu/iommu-pages.c119
-rw-r--r--drivers/iommu/iommu-pages.h195
-rw-r--r--drivers/iommu/iommu-priv.h23
-rw-r--r--drivers/iommu/iommu-sva.c19
-rw-r--r--drivers/iommu/iommu-sysfs.c2
-rw-r--r--drivers/iommu/iommu.c889
-rw-r--r--drivers/iommu/iommufd/Kconfig4
-rw-r--r--drivers/iommu/iommufd/Makefile8
-rw-r--r--drivers/iommu/iommufd/device.c674
-rw-r--r--drivers/iommu/iommufd/driver.c304
-rw-r--r--drivers/iommu/iommufd/eventq.c541
-rw-r--r--drivers/iommu/iommufd/fault.c441
-rw-r--r--drivers/iommu/iommufd/hw_pagetable.c171
-rw-r--r--drivers/iommu/iommufd/io_pagetable.c162
-rw-r--r--drivers/iommu/iommufd/io_pagetable.h31
-rw-r--r--drivers/iommu/iommufd/ioas.c259
-rw-r--r--drivers/iommu/iommufd/iommufd_private.h339
-rw-r--r--drivers/iommu/iommufd/iommufd_test.h92
-rw-r--r--drivers/iommu/iommufd/iova_bitmap.c11
-rw-r--r--drivers/iommu/iommufd/main.c306
-rw-r--r--drivers/iommu/iommufd/pages.c334
-rw-r--r--drivers/iommu/iommufd/selftest.c904
-rw-r--r--drivers/iommu/iommufd/vfio_compat.c13
-rw-r--r--drivers/iommu/iommufd/viommu.c430
-rw-r--r--drivers/iommu/iova.c8
-rw-r--r--drivers/iommu/ipmmu-vmsa.c34
-rw-r--r--drivers/iommu/msm_iommu.c58
-rw-r--r--drivers/iommu/mtk_iommu.c85
-rw-r--r--drivers/iommu/mtk_iommu_v1.c41
-rw-r--r--drivers/iommu/of_iommu.c15
-rw-r--r--drivers/iommu/omap-iommu.c57
-rw-r--r--drivers/iommu/riscv/Kconfig20
-rw-r--r--drivers/iommu/riscv/Makefile3
-rw-r--r--drivers/iommu/riscv/iommu-bits.h784
-rw-r--r--drivers/iommu/riscv/iommu-pci.c128
-rw-r--r--drivers/iommu/riscv/iommu-platform.c179
-rw-r--r--drivers/iommu/riscv/iommu.c1679
-rw-r--r--drivers/iommu/riscv/iommu.h89
-rw-r--r--drivers/iommu/rockchip-iommu.c84
-rw-r--r--drivers/iommu/s390-iommu.c565
-rw-r--r--drivers/iommu/sprd-iommu.c5
-rw-r--r--drivers/iommu/sun50i-iommu.c9
-rw-r--r--drivers/iommu/tegra-smmu.c115
-rw-r--r--drivers/iommu/virtio-iommu.c204
-rw-r--r--drivers/irqchip/Kconfig101
-rw-r--r--drivers/irqchip/Makefile12
-rw-r--r--drivers/irqchip/exynos-combiner.c2
-rw-r--r--drivers/irqchip/irq-aclint-sswi.c208
-rw-r--r--drivers/irqchip/irq-al-fic.c20
-rw-r--r--drivers/irqchip/irq-alpine-msi.c156
-rw-r--r--drivers/irqchip/irq-apple-aic.c15
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c62
-rw-r--r--drivers/irqchip/irq-aspeed-i2c-ic.c2
-rw-r--r--drivers/irqchip/irq-aspeed-intc.c139
-rw-r--r--drivers/irqchip/irq-aspeed-scu-ic.c258
-rw-r--r--drivers/irqchip/irq-aspeed-vic.c4
-rw-r--r--drivers/irqchip/irq-ath79-misc.c22
-rw-r--r--drivers/irqchip/irq-atmel-aic-common.c2
-rw-r--r--drivers/irqchip/irq-atmel-aic.c19
-rw-r--r--drivers/irqchip/irq-atmel-aic5.c37
-rw-r--r--drivers/irqchip/irq-bcm2712-mip.c293
-rw-r--r--drivers/irqchip/irq-bcm2835.c2
-rw-r--r--drivers/irqchip/irq-bcm2836.c5
-rw-r--r--drivers/irqchip/irq-bcm6345-l1.c2
-rw-r--r--drivers/irqchip/irq-bcm7038-l1.c2
-rw-r--r--drivers/irqchip/irq-bcm7120-l2.c24
-rw-r--r--drivers/irqchip/irq-brcmstb-l2.c38
-rw-r--r--drivers/irqchip/irq-clps711x.c6
-rw-r--r--drivers/irqchip/irq-crossbar.c6
-rw-r--r--drivers/irqchip/irq-csky-apb-intc.c5
-rw-r--r--drivers/irqchip/irq-csky-mpintc.c2
-rw-r--r--drivers/irqchip/irq-davinci-cp-intc.c59
-rw-r--r--drivers/irqchip/irq-digicolor.c2
-rw-r--r--drivers/irqchip/irq-dw-apb-ictl.c5
-rw-r--r--drivers/irqchip/irq-econet-en751221.c310
-rw-r--r--drivers/irqchip/irq-ftintc010.c5
-rw-r--r--drivers/irqchip/irq-gic-common.h2
-rw-r--r--drivers/irqchip/irq-gic-its-msi-parent.c376
-rw-r--r--drivers/irqchip/irq-gic-its-msi-parent.h12
-rw-r--r--drivers/irqchip/irq-gic-v2m.c39
-rw-r--r--drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c4
-rw-r--r--drivers/irqchip/irq-gic-v3-its-msi-parent.c210
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c318
-rw-r--r--drivers/irqchip/irq-gic-v3-mbi.c31
-rw-r--r--drivers/irqchip/irq-gic-v3.c72
-rw-r--r--drivers/irqchip/irq-gic-v4.c4
-rw-r--r--drivers/irqchip/irq-gic-v5-irs.c827
-rw-r--r--drivers/irqchip/irq-gic-v5-its.c1233
-rw-r--r--drivers/irqchip/irq-gic-v5-iwb.c277
-rw-r--r--drivers/irqchip/irq-gic-v5.c1130
-rw-r--r--drivers/irqchip/irq-gic.c6
-rw-r--r--drivers/irqchip/irq-goldfish-pic.c7
-rw-r--r--drivers/irqchip/irq-hip04.c6
-rw-r--r--drivers/irqchip/irq-i8259.c4
-rw-r--r--drivers/irqchip/irq-idt3243x.c2
-rw-r--r--drivers/irqchip/irq-imgpdc.c6
-rw-r--r--drivers/irqchip/irq-imx-gpcv2.c6
-rw-r--r--drivers/irqchip/irq-imx-intmux.c4
-rw-r--r--drivers/irqchip/irq-imx-irqsteer.c20
-rw-r--r--drivers/irqchip/irq-imx-mu-msi.c17
-rw-r--r--drivers/irqchip/irq-ingenic-tcu.c13
-rw-r--r--drivers/irqchip/irq-ingenic.c4
-rw-r--r--drivers/irqchip/irq-ixp4xx.c2
-rw-r--r--drivers/irqchip/irq-jcore-aic.c7
-rw-r--r--drivers/irqchip/irq-keystone.c17
-rw-r--r--drivers/irqchip/irq-lan966x-oic.c20
-rw-r--r--drivers/irqchip/irq-loongarch-avec.c18
-rw-r--r--drivers/irqchip/irq-loongarch-cpu.c2
-rw-r--r--drivers/irqchip/irq-loongson-eiointc.c215
-rw-r--r--drivers/irqchip/irq-loongson-htvec.c2
-rw-r--r--drivers/irqchip/irq-loongson-liointc.c11
-rw-r--r--drivers/irqchip/irq-loongson-pch-lpc.c9
-rw-r--r--drivers/irqchip/irq-loongson-pch-msi.c30
-rw-r--r--drivers/irqchip/irq-loongson-pch-pic.c2
-rw-r--r--drivers/irqchip/irq-lpc32xx.c4
-rw-r--r--drivers/irqchip/irq-ls-extirq.c4
-rw-r--r--drivers/irqchip/irq-ls-scfg-msi.c57
-rw-r--r--drivers/irqchip/irq-ls1x.c4
-rw-r--r--drivers/irqchip/irq-madera.c2
-rw-r--r--drivers/irqchip/irq-mchp-eic.c5
-rw-r--r--drivers/irqchip/irq-meson-gpio.c50
-rw-r--r--drivers/irqchip/irq-mips-cpu.c13
-rw-r--r--drivers/irqchip/irq-mips-gic.c290
-rw-r--r--drivers/irqchip/irq-mmp.c12
-rw-r--r--drivers/irqchip/irq-mscc-ocelot.c7
-rw-r--r--drivers/irqchip/irq-msi-lib.c37
-rw-r--r--drivers/irqchip/irq-msi-lib.h27
-rw-r--r--drivers/irqchip/irq-mst-intc.c4
-rw-r--r--drivers/irqchip/irq-mtk-cirq.c5
-rw-r--r--drivers/irqchip/irq-mtk-sysirq.c4
-rw-r--r--drivers/irqchip/irq-mvebu-gicp.c35
-rw-r--r--drivers/irqchip/irq-mvebu-icu.c5
-rw-r--r--drivers/irqchip/irq-mvebu-odmi.c28
-rw-r--r--drivers/irqchip/irq-mvebu-pic.c8
-rw-r--r--drivers/irqchip/irq-mvebu-sei.c27
-rw-r--r--drivers/irqchip/irq-mxs.c4
-rw-r--r--drivers/irqchip/irq-nvic.c5
-rw-r--r--drivers/irqchip/irq-omap-intc.c4
-rw-r--r--drivers/irqchip/irq-or1k-pic.c4
-rw-r--r--drivers/irqchip/irq-orion.c6
-rw-r--r--drivers/irqchip/irq-owl-sirq.c4
-rw-r--r--drivers/irqchip/irq-partition-percpu.c2
-rw-r--r--drivers/irqchip/irq-pic32-evic.c8
-rw-r--r--drivers/irqchip/irq-pruss-intc.c9
-rw-r--r--drivers/irqchip/irq-qcom-mpm.c5
-rw-r--r--drivers/irqchip/irq-realtek-rtl.c2
-rw-r--r--drivers/irqchip/irq-renesas-intc-irqpin.c12
-rw-r--r--drivers/irqchip/irq-renesas-irqc.c12
-rw-r--r--drivers/irqchip/irq-renesas-rza1.c10
-rw-r--r--drivers/irqchip/irq-renesas-rzg2l.c58
-rw-r--r--drivers/irqchip/irq-renesas-rzv2h.c636
-rw-r--r--drivers/irqchip/irq-riscv-aplic-direct.c10
-rw-r--r--drivers/irqchip/irq-riscv-aplic-main.c3
-rw-r--r--drivers/irqchip/irq-riscv-aplic-msi.c3
-rw-r--r--drivers/irqchip/irq-riscv-imsic-early.c38
-rw-r--r--drivers/irqchip/irq-riscv-imsic-platform.c227
-rw-r--r--drivers/irqchip/irq-riscv-imsic-state.c160
-rw-r--r--drivers/irqchip/irq-riscv-imsic-state.h13
-rw-r--r--drivers/irqchip/irq-riscv-intc.c2
-rw-r--r--drivers/irqchip/irq-riscv-rpmi-sysmsi.c328
-rw-r--r--drivers/irqchip/irq-sa11x0.c2
-rw-r--r--drivers/irqchip/irq-sg2042-msi.c340
-rw-r--r--drivers/irqchip/irq-sifive-plic.c23
-rw-r--r--drivers/irqchip/irq-sni-exiu.c6
-rw-r--r--drivers/irqchip/irq-sp7021-intc.c4
-rw-r--r--drivers/irqchip/irq-starfive-jh8100-intc.c4
-rw-r--r--drivers/irqchip/irq-stm32-exti.c25
-rw-r--r--drivers/irqchip/irq-stm32mp-exti.c12
-rw-r--r--drivers/irqchip/irq-sun4i.c2
-rw-r--r--drivers/irqchip/irq-sun6i-r.c4
-rw-r--r--drivers/irqchip/irq-sunxi-nmi.c97
-rw-r--r--drivers/irqchip/irq-tb10x.c21
-rw-r--r--drivers/irqchip/irq-tegra.c5
-rw-r--r--drivers/irqchip/irq-ti-sci-inta.c10
-rw-r--r--drivers/irqchip/irq-ti-sci-intr.c7
-rw-r--r--drivers/irqchip/irq-ts4800.c6
-rw-r--r--drivers/irqchip/irq-uniphier-aidet.c2
-rw-r--r--drivers/irqchip/irq-versatile-fpga.c6
-rw-r--r--drivers/irqchip/irq-vf610-mscm-ir.c6
-rw-r--r--drivers/irqchip/irq-vic.c5
-rw-r--r--drivers/irqchip/irq-vt8500.c153
-rw-r--r--drivers/irqchip/irq-wpcm450-aic.c2
-rw-r--r--drivers/irqchip/irq-xilinx-intc.c6
-rw-r--r--drivers/irqchip/irq-xtensa-mx.c7
-rw-r--r--drivers/irqchip/irq-xtensa-pic.c8
-rw-r--r--drivers/irqchip/irq-zevio.c4
-rw-r--r--drivers/irqchip/irqchip.c4
-rw-r--r--drivers/irqchip/qcom-pdc.c67
-rw-r--r--drivers/irqchip/spear-shirq.c2
-rw-r--r--drivers/isdn/capi/capi.c8
-rw-r--r--drivers/isdn/hardware/mISDN/hfcmulti.c22
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c28
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNipac.c12
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNisar.c8
-rw-r--r--drivers/isdn/hardware/mISDN/w6692.c10
-rw-r--r--drivers/isdn/mISDN/core.c14
-rw-r--r--drivers/isdn/mISDN/core.h1
-rw-r--r--drivers/isdn/mISDN/dsp_core.c6
-rw-r--r--drivers/isdn/mISDN/dsp_hwec.c6
-rw-r--r--drivers/isdn/mISDN/dsp_tones.c6
-rw-r--r--drivers/isdn/mISDN/fsm.c6
-rw-r--r--drivers/isdn/mISDN/l1oip_core.c6
-rw-r--r--drivers/isdn/mISDN/timerdev.c2
-rw-r--r--drivers/leds/.kunitconfig4
-rw-r--r--drivers/leds/Kconfig73
-rw-r--r--drivers/leds/Makefile10
-rw-r--r--drivers/leds/blink/leds-bcm63138.c29
-rw-r--r--drivers/leds/blink/leds-lgm-sso.c6
-rw-r--r--drivers/leds/flash/Kconfig12
-rw-r--r--drivers/leds/flash/Makefile1
-rw-r--r--drivers/leds/flash/leds-aat1290.c2
-rw-r--r--drivers/leds/flash/leds-ktd2692.c5
-rw-r--r--drivers/leds/flash/leds-max77693.c2
-rw-r--r--drivers/leds/flash/leds-mt6360.c5
-rw-r--r--drivers/leds/flash/leds-mt6370-flash.c11
-rw-r--r--drivers/leds/flash/leds-qcom-flash.c108
-rw-r--r--drivers/leds/flash/leds-rt8515.c8
-rw-r--r--drivers/leds/flash/leds-sgm3140.c10
-rw-r--r--drivers/leds/flash/leds-tps6131x.c815
-rw-r--r--drivers/leds/led-class-flash.c16
-rw-r--r--drivers/leds/led-class-multicolor.c5
-rw-r--r--drivers/leds/led-class.c48
-rw-r--r--drivers/leds/led-core.c78
-rw-r--r--drivers/leds/led-test.c132
-rw-r--r--drivers/leds/led-triggers.c17
-rw-r--r--drivers/leds/leds-88pm860x.c2
-rw-r--r--drivers/leds/leds-adp5520.c2
-rw-r--r--drivers/leds/leds-aw200xx.c9
-rw-r--r--drivers/leds/leds-bcm6328.c4
-rw-r--r--drivers/leds/leds-cht-wcove.c8
-rw-r--r--drivers/leds/leds-clevo-mail.c2
-rw-r--r--drivers/leds/leds-cr0014114.c4
-rw-r--r--drivers/leds/leds-cros_ec.c21
-rw-r--r--drivers/leds/leds-da903x.c2
-rw-r--r--drivers/leds/leds-da9052.c2
-rw-r--r--drivers/leds/leds-el15203000.c14
-rw-r--r--drivers/leds/leds-expresswire.c12
-rw-r--r--drivers/leds/leds-gpio-register.c2
-rw-r--r--drivers/leds/leds-gpio.c20
-rw-r--r--drivers/leds/leds-is31fl319x.c8
-rw-r--r--drivers/leds/leds-is31fl32xx.c47
-rw-r--r--drivers/leds/leds-lm3532.c18
-rw-r--r--drivers/leds/leds-lm3533.c2
-rw-r--r--drivers/leds/leds-lm3697.c18
-rw-r--r--drivers/leds/leds-lp50xx.c34
-rw-r--r--drivers/leds/leds-lp5562.c25
-rw-r--r--drivers/leds/leds-lp55xx-common.c5
-rw-r--r--drivers/leds/leds-lp8860.c220
-rw-r--r--drivers/leds/leds-lp8864.c296
-rw-r--r--drivers/leds/leds-max5970.c5
-rw-r--r--drivers/leds/leds-max77650.c18
-rw-r--r--drivers/leds/leds-max77705.c275
-rw-r--r--drivers/leds/leds-mc13783.c2
-rw-r--r--drivers/leds/leds-mlxcpld.c1
-rw-r--r--drivers/leds/leds-mt6323.c2
-rw-r--r--drivers/leds/leds-netxbig.c1
-rw-r--r--drivers/leds/leds-nic78bx.c16
-rw-r--r--drivers/leds/leds-ns2.c7
-rw-r--r--drivers/leds/leds-pca9532.c9
-rw-r--r--drivers/leds/leds-pca955x.c379
-rw-r--r--drivers/leds/leds-pca963x.c11
-rw-r--r--drivers/leds/leds-pca995x.c2
-rw-r--r--drivers/leds/leds-powernv.c4
-rw-r--r--drivers/leds/leds-pwm.c33
-rw-r--r--drivers/leds/leds-qnap-mcu.c392
-rw-r--r--drivers/leds/leds-rb532.c2
-rw-r--r--drivers/leds/leds-regulator.c2
-rw-r--r--drivers/leds/leds-sc27xx-bltc.c2
-rw-r--r--drivers/leds/leds-ss4200.c2
-rw-r--r--drivers/leds/leds-st1202.c416
-rw-r--r--drivers/leds/leds-sun50i-a100.c29
-rw-r--r--drivers/leds/leds-sunfire.c4
-rw-r--r--drivers/leds/leds-tca6507.c16
-rw-r--r--drivers/leds/leds-turris-omnia.c341
-rw-r--r--drivers/leds/leds-upboard.c126
-rw-r--r--drivers/leds/leds-wm831x-status.c2
-rw-r--r--drivers/leds/leds-wm8350.c2
-rw-r--r--drivers/leds/leds.h4
-rw-r--r--drivers/leds/rgb/leds-group-multicolor.c2
-rw-r--r--drivers/leds/rgb/leds-ktd202x.c8
-rw-r--r--drivers/leds/rgb/leds-mt6370-rgb.c55
-rw-r--r--drivers/leds/rgb/leds-ncp5623.c5
-rw-r--r--drivers/leds/rgb/leds-pwm-multicolor.c20
-rw-r--r--drivers/leds/rgb/leds-qcom-lpg.c23
-rw-r--r--drivers/leds/simatic/Kconfig (renamed from drivers/leds/simple/Kconfig)0
-rw-r--r--drivers/leds/simatic/Makefile (renamed from drivers/leds/simple/Makefile)0
-rw-r--r--drivers/leds/simatic/simatic-ipc-leds-gpio-apollolake.c (renamed from drivers/leds/simple/simatic-ipc-leds-gpio-apollolake.c)2
-rw-r--r--drivers/leds/simatic/simatic-ipc-leds-gpio-core.c (renamed from drivers/leds/simple/simatic-ipc-leds-gpio-core.c)0
-rw-r--r--drivers/leds/simatic/simatic-ipc-leds-gpio-elkhartlake.c (renamed from drivers/leds/simple/simatic-ipc-leds-gpio-elkhartlake.c)2
-rw-r--r--drivers/leds/simatic/simatic-ipc-leds-gpio-f7188x.c (renamed from drivers/leds/simple/simatic-ipc-leds-gpio-f7188x.c)2
-rw-r--r--drivers/leds/simatic/simatic-ipc-leds-gpio.h (renamed from drivers/leds/simple/simatic-ipc-leds-gpio.h)0
-rw-r--r--drivers/leds/simatic/simatic-ipc-leds.c (renamed from drivers/leds/simple/simatic-ipc-leds.c)0
-rw-r--r--drivers/leds/trigger/ledtrig-activity.c6
-rw-r--r--drivers/leds/trigger/ledtrig-backlight.c48
-rw-r--r--drivers/leds/trigger/ledtrig-heartbeat.c2
-rw-r--r--drivers/leds/trigger/ledtrig-netdev.c2
-rw-r--r--drivers/leds/trigger/ledtrig-pattern.c8
-rw-r--r--drivers/leds/trigger/ledtrig-transient.c4
-rw-r--r--drivers/macintosh/Kconfig1
-rw-r--r--drivers/macintosh/adbhid.c2
-rw-r--r--drivers/macintosh/mac_hid.c2
-rw-r--r--drivers/macintosh/smu.c6
-rw-r--r--drivers/macintosh/therm_windtunnel.c2
-rw-r--r--drivers/macintosh/via-pmu-led.c19
-rw-r--r--drivers/macintosh/windfarm_pm112.c2
-rw-r--r--drivers/macintosh/windfarm_pm121.c2
-rw-r--r--drivers/macintosh/windfarm_pm72.c2
-rw-r--r--drivers/macintosh/windfarm_pm81.c2
-rw-r--r--drivers/macintosh/windfarm_pm91.c2
-rw-r--r--drivers/macintosh/windfarm_rm31.c2
-rw-r--r--drivers/mailbox/Kconfig97
-rw-r--r--drivers/mailbox/Makefile18
-rw-r--r--drivers/mailbox/arm_mhu.c2
-rw-r--r--drivers/mailbox/arm_mhu_db.c2
-rw-r--r--drivers/mailbox/arm_mhuv2.c10
-rw-r--r--drivers/mailbox/arm_mhuv3.c2
-rw-r--r--drivers/mailbox/ast2700-mailbox.c235
-rw-r--r--drivers/mailbox/bcm-flexrm-mailbox.c2
-rw-r--r--drivers/mailbox/bcm-pdc-mailbox.c2
-rw-r--r--drivers/mailbox/bcm74110-mailbox.c656
-rw-r--r--drivers/mailbox/cix-mailbox.c645
-rw-r--r--drivers/mailbox/cv1800-mailbox.c220
-rw-r--r--drivers/mailbox/exynos-mailbox.c157
-rw-r--r--drivers/mailbox/imx-mailbox.c27
-rw-r--r--drivers/mailbox/mailbox-altera.c4
-rw-r--r--drivers/mailbox/mailbox-mchp-ipc-sbi.c504
-rw-r--r--drivers/mailbox/mailbox-mpfs.c81
-rw-r--r--drivers/mailbox/mailbox-test.c4
-rw-r--r--drivers/mailbox/mailbox-th1520.c599
-rw-r--r--drivers/mailbox/mailbox.c267
-rw-r--r--drivers/mailbox/mailbox.h2
-rw-r--r--drivers/mailbox/mtk-cmdq-mailbox.c67
-rw-r--r--drivers/mailbox/mtk-gpueb-mailbox.c319
-rw-r--r--drivers/mailbox/omap-mailbox.c1
-rw-r--r--drivers/mailbox/pcc.c194
-rw-r--r--drivers/mailbox/pl320-ipc.c14
-rw-r--r--drivers/mailbox/qcom-apcs-ipc-mailbox.c20
-rw-r--r--drivers/mailbox/qcom-cpucp-mbox.c2
-rw-r--r--drivers/mailbox/qcom-ipcc.c21
-rw-r--r--drivers/mailbox/riscv-sbi-mpxy-mbox.c1019
-rw-r--r--drivers/mailbox/stm32-ipcc.c2
-rw-r--r--drivers/mailbox/sun6i-msgbox.c4
-rw-r--r--drivers/mailbox/tegra-hsp.c80
-rw-r--r--drivers/mailbox/ti-msgmgr.c2
-rw-r--r--drivers/mailbox/zynqmp-ipi-mailbox.c32
-rw-r--r--drivers/mcb/mcb-core.c36
-rw-r--r--drivers/mcb/mcb-lpc.c4
-rw-r--r--drivers/mcb/mcb-parse.c4
-rw-r--r--drivers/mcb/mcb-pci.c2
-rw-r--r--drivers/md/Kconfig47
-rw-r--r--drivers/md/Makefile7
-rw-r--r--drivers/md/bcache/alloc.c64
-rw-r--r--drivers/md/bcache/bcache.h2
-rw-r--r--drivers/md/bcache/bset.c124
-rw-r--r--drivers/md/bcache/bset.h40
-rw-r--r--drivers/md/bcache/btree.c74
-rw-r--r--drivers/md/bcache/debug.c3
-rw-r--r--drivers/md/bcache/extents.c53
-rw-r--r--drivers/md/bcache/io.c3
-rw-r--r--drivers/md/bcache/journal.c2
-rw-r--r--drivers/md/bcache/movinggc.c51
-rw-r--r--drivers/md/bcache/stats.c4
-rw-r--r--drivers/md/bcache/super.c90
-rw-r--r--drivers/md/bcache/sysfs.c4
-rw-r--r--drivers/md/bcache/util.h67
-rw-r--r--drivers/md/bcache/writeback.c23
-rw-r--r--drivers/md/dm-bio-prison-v1.c35
-rw-r--r--drivers/md/dm-bio-prison-v1.h24
-rw-r--r--drivers/md/dm-bufio.c257
-rw-r--r--drivers/md/dm-cache-background-tracker.c31
-rw-r--r--drivers/md/dm-cache-background-tracker.h9
-rw-r--r--drivers/md/dm-cache-metadata.c33
-rw-r--r--drivers/md/dm-cache-metadata.h3
-rw-r--r--drivers/md/dm-cache-policy-smq.c2
-rw-r--r--drivers/md/dm-cache-target.c184
-rw-r--r--drivers/md/dm-clone-target.c4
-rw-r--r--drivers/md/dm-core.h6
-rw-r--r--drivers/md/dm-crypt.c147
-rw-r--r--drivers/md/dm-delay.c37
-rw-r--r--drivers/md/dm-dust.c4
-rw-r--r--drivers/md/dm-ebs-target.c12
-rw-r--r--drivers/md/dm-flakey.c123
-rw-r--r--drivers/md/dm-ima.c110
-rw-r--r--drivers/md/dm-integrity.c477
-rw-r--r--drivers/md/dm-io.c1
-rw-r--r--drivers/md/dm-ioctl.c5
-rw-r--r--drivers/md/dm-linear.c11
-rw-r--r--drivers/md/dm-log-writes.c8
-rw-r--r--drivers/md/dm-mpath.c247
-rw-r--r--drivers/md/dm-path-selector.c8
-rw-r--r--drivers/md/dm-path-selector.h2
-rw-r--r--drivers/md/dm-pcache/Kconfig17
-rw-r--r--drivers/md/dm-pcache/Makefile3
-rw-r--r--drivers/md/dm-pcache/backing_dev.c374
-rw-r--r--drivers/md/dm-pcache/backing_dev.h127
-rw-r--r--drivers/md/dm-pcache/cache.c445
-rw-r--r--drivers/md/dm-pcache/cache.h635
-rw-r--r--drivers/md/dm-pcache/cache_dev.c303
-rw-r--r--drivers/md/dm-pcache/cache_dev.h70
-rw-r--r--drivers/md/dm-pcache/cache_gc.c170
-rw-r--r--drivers/md/dm-pcache/cache_key.c888
-rw-r--r--drivers/md/dm-pcache/cache_req.c836
-rw-r--r--drivers/md/dm-pcache/cache_segment.c305
-rw-r--r--drivers/md/dm-pcache/cache_writeback.c261
-rw-r--r--drivers/md/dm-pcache/dm_pcache.c497
-rw-r--r--drivers/md/dm-pcache/dm_pcache.h67
-rw-r--r--drivers/md/dm-pcache/pcache_internal.h117
-rw-r--r--drivers/md/dm-pcache/segment.c61
-rw-r--r--drivers/md/dm-pcache/segment.h74
-rw-r--r--drivers/md/dm-ps-historical-service-time.c9
-rw-r--r--drivers/md/dm-ps-io-affinity.c7
-rw-r--r--drivers/md/dm-ps-queue-length.c9
-rw-r--r--drivers/md/dm-ps-round-robin.c9
-rw-r--r--drivers/md/dm-ps-service-time.c9
-rw-r--r--drivers/md/dm-raid.c95
-rw-r--r--drivers/md/dm-raid1.c14
-rw-r--r--drivers/md/dm-region-hash.c2
-rw-r--r--drivers/md/dm-rq.c6
-rw-r--r--drivers/md/dm-stripe.c23
-rw-r--r--drivers/md/dm-switch.c8
-rw-r--r--drivers/md/dm-table.c333
-rw-r--r--drivers/md/dm-target.c5
-rw-r--r--drivers/md/dm-thin.c19
-rw-r--r--drivers/md/dm-unstripe.c4
-rw-r--r--drivers/md/dm-vdo/Kconfig1
-rw-r--r--drivers/md/dm-vdo/block-map.c15
-rw-r--r--drivers/md/dm-vdo/constants.h3
-rw-r--r--drivers/md/dm-vdo/data-vio.c53
-rw-r--r--drivers/md/dm-vdo/data-vio.h5
-rw-r--r--drivers/md/dm-vdo/dedupe.c34
-rw-r--r--drivers/md/dm-vdo/encodings.c22
-rw-r--r--drivers/md/dm-vdo/funnel-workqueue.c3
-rw-r--r--drivers/md/dm-vdo/indexer/index-layout.c31
-rw-r--r--drivers/md/dm-vdo/indexer/index-session.c6
-rw-r--r--drivers/md/dm-vdo/indexer/indexer.h57
-rw-r--r--drivers/md/dm-vdo/indexer/volume-index.c4
-rw-r--r--drivers/md/dm-vdo/indexer/volume.c24
-rw-r--r--drivers/md/dm-vdo/int-map.c28
-rw-r--r--drivers/md/dm-vdo/io-submitter.c8
-rw-r--r--drivers/md/dm-vdo/io-submitter.h18
-rw-r--r--drivers/md/dm-vdo/murmurhash3.c7
-rw-r--r--drivers/md/dm-vdo/packer.c3
-rw-r--r--drivers/md/dm-vdo/packer.h2
-rw-r--r--drivers/md/dm-vdo/physical-zone.c2
-rw-r--r--drivers/md/dm-vdo/priority-table.c2
-rw-r--r--drivers/md/dm-vdo/recovery-journal.c2
-rw-r--r--drivers/md/dm-vdo/recovery-journal.h6
-rw-r--r--drivers/md/dm-vdo/repair.c2
-rw-r--r--drivers/md/dm-vdo/slab-depot.c212
-rw-r--r--drivers/md/dm-vdo/slab-depot.h13
-rw-r--r--drivers/md/dm-vdo/types.h3
-rw-r--r--drivers/md/dm-vdo/vdo.c15
-rw-r--r--drivers/md/dm-vdo/vio.c57
-rw-r--r--drivers/md/dm-vdo/vio.h13
-rw-r--r--drivers/md/dm-vdo/wait-queue.c2
-rw-r--r--drivers/md/dm-verity-fec.c69
-rw-r--r--drivers/md/dm-verity-target.c277
-rw-r--r--drivers/md/dm-verity-verify-sig.c17
-rw-r--r--drivers/md/dm-verity.h24
-rw-r--r--drivers/md/dm-writecache.c21
-rw-r--r--drivers/md/dm-zone.c102
-rw-r--r--drivers/md/dm-zoned-metadata.c50
-rw-r--r--drivers/md/dm-zoned-reclaim.c6
-rw-r--r--drivers/md/dm-zoned-target.c5
-rw-r--r--drivers/md/dm-zoned.h2
-rw-r--r--drivers/md/dm.c293
-rw-r--r--drivers/md/dm.h8
-rw-r--r--drivers/md/md-autodetect.c8
-rw-r--r--drivers/md/md-bitmap.c250
-rw-r--r--drivers/md/md-bitmap.h119
-rw-r--r--drivers/md/md-cluster.c40
-rw-r--r--drivers/md/md-cluster.h6
-rw-r--r--drivers/md/md-linear.c348
-rw-r--r--drivers/md/md-llbitmap.c1626
-rw-r--r--drivers/md/md.c1180
-rw-r--r--drivers/md/md.h155
-rw-r--r--drivers/md/persistent-data/Kconfig2
-rw-r--r--drivers/md/persistent-data/dm-array.c19
-rw-r--r--drivers/md/persistent-data/dm-space-map-common.c2
-rw-r--r--drivers/md/persistent-data/dm-transaction-manager.c54
-rw-r--r--drivers/md/raid0.c53
-rw-r--r--drivers/md/raid1-10.c20
-rw-r--r--drivers/md/raid1.c448
-rw-r--r--drivers/md/raid1.h27
-rw-r--r--drivers/md/raid10.c326
-rw-r--r--drivers/md/raid10.h3
-rw-r--r--drivers/md/raid5-cache.c51
-rw-r--r--drivers/md/raid5-ppl.c24
-rw-r--r--drivers/md/raid5.c332
-rw-r--r--drivers/md/raid5.h6
-rw-r--r--drivers/media/cec/core/cec-adap.c5
-rw-r--r--drivers/media/cec/core/cec-api.c2
-rw-r--r--drivers/media/cec/core/cec-core.c7
-rw-r--r--drivers/media/cec/core/cec-pin-error-inj.c62
-rw-r--r--drivers/media/cec/core/cec-pin-priv.h8
-rw-r--r--drivers/media/cec/core/cec-pin.c48
-rw-r--r--drivers/media/cec/i2c/Kconfig10
-rw-r--r--drivers/media/cec/i2c/Makefile1
-rw-r--r--drivers/media/cec/i2c/tda9950.c (renamed from drivers/gpu/drm/i2c/tda9950.c)4
-rw-r--r--drivers/media/cec/platform/Kconfig2
-rw-r--r--drivers/media/cec/platform/cec-gpio/cec-gpio.c69
-rw-r--r--drivers/media/cec/platform/cros-ec/cros-ec-cec.c7
-rw-r--r--drivers/media/cec/platform/meson/ao-cec-g12a.c2
-rw-r--r--drivers/media/cec/platform/meson/ao-cec.c2
-rw-r--r--drivers/media/cec/platform/s5p/s5p_cec.c2
-rw-r--r--drivers/media/cec/platform/seco/seco-cec.c2
-rw-r--r--drivers/media/cec/platform/sti/stih-cec.c2
-rw-r--r--drivers/media/cec/platform/stm32/stm32-cec.c3
-rw-r--r--drivers/media/cec/platform/tegra/tegra_cec.c2
-rw-r--r--drivers/media/cec/usb/extron-da-hd-4k-plus/Makefile6
-rw-r--r--drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.c16
-rw-r--r--drivers/media/cec/usb/pulse8/pulse8-cec.c6
-rw-r--r--drivers/media/cec/usb/rainshadow/rainshadow-cec.c7
-rw-r--r--drivers/media/common/b2c2/flexcop-common.h4
-rw-r--r--drivers/media/common/b2c2/flexcop-i2c.c2
-rw-r--r--drivers/media/common/b2c2/flexcop-misc.c13
-rw-r--r--drivers/media/common/b2c2/flexcop-sram.c2
-rw-r--r--drivers/media/common/b2c2/flexcop.c22
-rw-r--r--drivers/media/common/cx2341x.c2
-rw-r--r--drivers/media/common/saa7146/saa7146_fops.c4
-rw-r--r--drivers/media/common/saa7146/saa7146_vbi.c8
-rw-r--r--drivers/media/common/saa7146/saa7146_video.c4
-rw-r--r--drivers/media/common/siano/smsdvb-debugfs.c9
-rw-r--r--drivers/media/common/siano/smsdvb-main.c2
-rw-r--r--drivers/media/common/uvc.c8
-rw-r--r--drivers/media/common/v4l2-tpg/v4l2-tpg-core.c11
-rw-r--r--drivers/media/common/videobuf2/videobuf2-core.c79
-rw-r--r--drivers/media/common/videobuf2/videobuf2-dma-contig.c2
-rw-r--r--drivers/media/common/videobuf2/videobuf2-dma-sg.c6
-rw-r--r--drivers/media/common/videobuf2/videobuf2-v4l2.c25
-rw-r--r--drivers/media/common/videobuf2/videobuf2-vmalloc.c2
-rw-r--r--drivers/media/dvb-core/dmxdev.c11
-rw-r--r--drivers/media/dvb-core/dvb_frontend.c4
-rw-r--r--drivers/media/dvb-core/dvb_vb2.c8
-rw-r--r--drivers/media/dvb-core/dvbdev.c16
-rw-r--r--drivers/media/dvb-frontends/Kconfig4
-rw-r--r--drivers/media/dvb-frontends/bcm3510.c2
-rw-r--r--drivers/media/dvb-frontends/cx24116.c7
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_core.c4
-rw-r--r--drivers/media/dvb-frontends/cxd2841er.c8
-rw-r--r--drivers/media/dvb-frontends/dib0090.c4
-rw-r--r--drivers/media/dvb-frontends/dib3000mb.c4
-rw-r--r--drivers/media/dvb-frontends/dib7000p.c12
-rw-r--r--drivers/media/dvb-frontends/dib8000.c10
-rw-r--r--drivers/media/dvb-frontends/dibx000_common.c10
-rw-r--r--drivers/media/dvb-frontends/rtl2832_sdr.c6
-rw-r--r--drivers/media/dvb-frontends/stb0899_algo.c2
-rw-r--r--drivers/media/dvb-frontends/stv0299.c2
-rw-r--r--drivers/media/dvb-frontends/stv6111.c2
-rw-r--r--drivers/media/dvb-frontends/tda10048.c8
-rw-r--r--drivers/media/dvb-frontends/tda18271c2dd.c2
-rw-r--r--drivers/media/dvb-frontends/ts2020.c8
-rw-r--r--drivers/media/dvb-frontends/zd1301_demod.c2
-rw-r--r--drivers/media/dvb-frontends/zl10036.c2
-rw-r--r--drivers/media/i2c/Kconfig149
-rw-r--r--drivers/media/i2c/Makefile10
-rw-r--r--drivers/media/i2c/adv7180.c385
-rw-r--r--drivers/media/i2c/adv748x/adv748x-afe.c17
-rw-r--r--drivers/media/i2c/adv748x/adv748x-hdmi.c10
-rw-r--r--drivers/media/i2c/adv748x/adv748x.h2
-rw-r--r--drivers/media/i2c/adv7511-v4l2.c101
-rw-r--r--drivers/media/i2c/adv7604.c127
-rw-r--r--drivers/media/i2c/adv7842.c135
-rw-r--r--drivers/media/i2c/alvium-csi2.c6
-rw-r--r--drivers/media/i2c/ar0521.c13
-rw-r--r--drivers/media/i2c/ccs-pll.c69
-rw-r--r--drivers/media/i2c/ccs-pll.h29
-rw-r--r--drivers/media/i2c/ccs/ccs-core.c86
-rw-r--r--drivers/media/i2c/ccs/ccs-data.c15
-rw-r--r--drivers/media/i2c/ccs/ccs-quirk.c3
-rw-r--r--drivers/media/i2c/ccs/ccs-reg-access.c9
-rw-r--r--drivers/media/i2c/ccs/ccs.h2
-rw-r--r--drivers/media/i2c/cx25840/cx25840-core.c4
-rw-r--r--drivers/media/i2c/ds90ub913.c145
-rw-r--r--drivers/media/i2c/ds90ub953.c316
-rw-r--r--drivers/media/i2c/ds90ub953.h104
-rw-r--r--drivers/media/i2c/ds90ub960.c2353
-rw-r--r--drivers/media/i2c/dw9714.c62
-rw-r--r--drivers/media/i2c/dw9719.c113
-rw-r--r--drivers/media/i2c/dw9768.c14
-rw-r--r--drivers/media/i2c/et8ek8/et8ek8_driver.c34
-rw-r--r--drivers/media/i2c/et8ek8/et8ek8_mode.c9
-rw-r--r--drivers/media/i2c/et8ek8/et8ek8_reg.h1
-rw-r--r--drivers/media/i2c/gc0308.c7
-rw-r--r--drivers/media/i2c/gc0310.c783
-rw-r--r--drivers/media/i2c/gc05a2.c18
-rw-r--r--drivers/media/i2c/gc08a3.c18
-rw-r--r--drivers/media/i2c/gc2145.c46
-rw-r--r--drivers/media/i2c/hi556.c181
-rw-r--r--drivers/media/i2c/hi846.c11
-rw-r--r--drivers/media/i2c/hi847.c84
-rw-r--r--drivers/media/i2c/imx208.c93
-rw-r--r--drivers/media/i2c/imx214.c1485
-rw-r--r--drivers/media/i2c/imx219.c372
-rw-r--r--drivers/media/i2c/imx258.c105
-rw-r--r--drivers/media/i2c/imx274.c2
-rw-r--r--drivers/media/i2c/imx283.c57
-rw-r--r--drivers/media/i2c/imx290.c148
-rw-r--r--drivers/media/i2c/imx296.c7
-rw-r--r--drivers/media/i2c/imx319.c93
-rw-r--r--drivers/media/i2c/imx334.c1050
-rw-r--r--drivers/media/i2c/imx335.c35
-rw-r--r--drivers/media/i2c/imx355.c90
-rw-r--r--drivers/media/i2c/imx412.c49
-rw-r--r--drivers/media/i2c/imx415.c189
-rw-r--r--drivers/media/i2c/ir-kbd-i2c.c6
-rw-r--r--drivers/media/i2c/lt6911uxe.c707
-rw-r--r--drivers/media/i2c/max9286.c6
-rw-r--r--drivers/media/i2c/max96714.c9
-rw-r--r--drivers/media/i2c/max96717.c17
-rw-r--r--drivers/media/i2c/mt9m001.c5
-rw-r--r--drivers/media/i2c/mt9m111.c5
-rw-r--r--drivers/media/i2c/mt9m114.c252
-rw-r--r--drivers/media/i2c/mt9p031.c103
-rw-r--r--drivers/media/i2c/mt9t112.c11
-rw-r--r--drivers/media/i2c/mt9v032.c105
-rw-r--r--drivers/media/i2c/mt9v111.c9
-rw-r--r--drivers/media/i2c/og01a1b.c115
-rw-r--r--drivers/media/i2c/og0ve1b.c816
-rw-r--r--drivers/media/i2c/ov01a10.c6
-rw-r--r--drivers/media/i2c/ov02a10.c45
-rw-r--r--drivers/media/i2c/ov02c10.c999
-rw-r--r--drivers/media/i2c/ov02e10.c956
-rw-r--r--drivers/media/i2c/ov08d10.c82
-rw-r--r--drivers/media/i2c/ov08x40.c1650
-rw-r--r--drivers/media/i2c/ov13858.c69
-rw-r--r--drivers/media/i2c/ov13b10.c282
-rw-r--r--drivers/media/i2c/ov2659.c8
-rw-r--r--drivers/media/i2c/ov2680.c29
-rw-r--r--drivers/media/i2c/ov2685.c16
-rw-r--r--drivers/media/i2c/ov2735.c1109
-rw-r--r--drivers/media/i2c/ov2740.c171
-rw-r--r--drivers/media/i2c/ov4689.c15
-rw-r--r--drivers/media/i2c/ov5640.c16
-rw-r--r--drivers/media/i2c/ov5645.c282
-rw-r--r--drivers/media/i2c/ov5647.c9
-rw-r--r--drivers/media/i2c/ov5648.c10
-rw-r--r--drivers/media/i2c/ov5670.c114
-rw-r--r--drivers/media/i2c/ov5675.c98
-rw-r--r--drivers/media/i2c/ov5693.c23
-rw-r--r--drivers/media/i2c/ov5695.c16
-rw-r--r--drivers/media/i2c/ov6211.c793
-rw-r--r--drivers/media/i2c/ov64a40.c19
-rw-r--r--drivers/media/i2c/ov6650.c1149
-rw-r--r--drivers/media/i2c/ov7251.c37
-rw-r--r--drivers/media/i2c/ov772x.c2
-rw-r--r--drivers/media/i2c/ov7740.c13
-rw-r--r--drivers/media/i2c/ov8856.c102
-rw-r--r--drivers/media/i2c/ov8858.c13
-rw-r--r--drivers/media/i2c/ov8865.c53
-rw-r--r--drivers/media/i2c/ov9282.c34
-rw-r--r--drivers/media/i2c/ov9640.c5
-rw-r--r--drivers/media/i2c/ov9650.c7
-rw-r--r--drivers/media/i2c/ov9734.c84
-rw-r--r--drivers/media/i2c/rdacm20.c7
-rw-r--r--drivers/media/i2c/rdacm21.c7
-rw-r--r--drivers/media/i2c/rj54n1cb0c.c9
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3-core.c19
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3.h2
-rw-r--r--drivers/media/i2c/s5k5baf.c21
-rw-r--r--drivers/media/i2c/s5k6a3.c20
-rw-r--r--drivers/media/i2c/saa6752hs.c2
-rw-r--r--drivers/media/i2c/saa7115.c14
-rw-r--r--drivers/media/i2c/saa7127.c2
-rw-r--r--drivers/media/i2c/saa717x.c2
-rw-r--r--drivers/media/i2c/st-mipid02.c120
-rw-r--r--drivers/media/i2c/tc358743.c287
-rw-r--r--drivers/media/i2c/tc358743_regs.h57
-rw-r--r--drivers/media/i2c/tc358746.c240
-rw-r--r--drivers/media/i2c/tda1997x.c11
-rw-r--r--drivers/media/i2c/tda9840.c2
-rw-r--r--drivers/media/i2c/tea6415c.c2
-rw-r--r--drivers/media/i2c/tea6420.c2
-rw-r--r--drivers/media/i2c/thp7312.c9
-rw-r--r--drivers/media/i2c/ths7303.c2
-rw-r--r--drivers/media/i2c/tlv320aic23b.c2
-rw-r--r--drivers/media/i2c/tvaudio.c6
-rw-r--r--drivers/media/i2c/upd64031a.c2
-rw-r--r--drivers/media/i2c/upd64083.c2
-rw-r--r--drivers/media/i2c/vd55g1.c1961
-rw-r--r--drivers/media/i2c/vd56g3.c1582
-rw-r--r--drivers/media/i2c/vgxy61.c32
-rw-r--r--drivers/media/i2c/video-i2c.c20
-rw-r--r--drivers/media/i2c/vp27smpx.c2
-rw-r--r--drivers/media/i2c/wm8739.c2
-rw-r--r--drivers/media/i2c/wm8775.c2
-rw-r--r--drivers/media/mc/mc-devnode.c6
-rw-r--r--drivers/media/mc/mc-entity.c20
-rw-r--r--drivers/media/mc/mc-request.c20
-rw-r--r--drivers/media/pci/Kconfig1
-rw-r--r--drivers/media/pci/Makefile2
-rw-r--r--drivers/media/pci/b2c2/flexcop-dma.c17
-rw-r--r--drivers/media/pci/b2c2/flexcop-pci.c2
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c20
-rw-r--r--drivers/media/pci/bt8xx/bttv-input.c8
-rw-r--r--drivers/media/pci/bt8xx/bttv-risc.c2
-rw-r--r--drivers/media/pci/bt8xx/bttv-vbi.c8
-rw-r--r--drivers/media/pci/cobalt/cobalt-driver.c2
-rw-r--r--drivers/media/pci/cobalt/cobalt-v4l2.c62
-rw-r--r--drivers/media/pci/cx18/cx18-audio.c2
-rw-r--r--drivers/media/pci/cx18/cx18-audio.h2
-rw-r--r--drivers/media/pci/cx18/cx18-av-audio.c2
-rw-r--r--drivers/media/pci/cx18/cx18-av-core.c2
-rw-r--r--drivers/media/pci/cx18/cx18-av-core.h2
-rw-r--r--drivers/media/pci/cx18/cx18-av-firmware.c2
-rw-r--r--drivers/media/pci/cx18/cx18-av-vbi.c14
-rw-r--r--drivers/media/pci/cx18/cx18-cards.c2
-rw-r--r--drivers/media/pci/cx18/cx18-cards.h2
-rw-r--r--drivers/media/pci/cx18/cx18-controls.c2
-rw-r--r--drivers/media/pci/cx18/cx18-controls.h2
-rw-r--r--drivers/media/pci/cx18/cx18-driver.c2
-rw-r--r--drivers/media/pci/cx18/cx18-driver.h16
-rw-r--r--drivers/media/pci/cx18/cx18-fileops.c17
-rw-r--r--drivers/media/pci/cx18/cx18-fileops.h2
-rw-r--r--drivers/media/pci/cx18/cx18-firmware.c2
-rw-r--r--drivers/media/pci/cx18/cx18-firmware.h2
-rw-r--r--drivers/media/pci/cx18/cx18-gpio.c17
-rw-r--r--drivers/media/pci/cx18/cx18-gpio.h3
-rw-r--r--drivers/media/pci/cx18/cx18-i2c.c2
-rw-r--r--drivers/media/pci/cx18/cx18-i2c.h2
-rw-r--r--drivers/media/pci/cx18/cx18-io.c2
-rw-r--r--drivers/media/pci/cx18/cx18-io.h2
-rw-r--r--drivers/media/pci/cx18/cx18-ioctl.c68
-rw-r--r--drivers/media/pci/cx18/cx18-ioctl.h2
-rw-r--r--drivers/media/pci/cx18/cx18-irq.c2
-rw-r--r--drivers/media/pci/cx18/cx18-irq.h2
-rw-r--r--drivers/media/pci/cx18/cx18-mailbox.c2
-rw-r--r--drivers/media/pci/cx18/cx18-mailbox.h2
-rw-r--r--drivers/media/pci/cx18/cx18-queue.c15
-rw-r--r--drivers/media/pci/cx18/cx18-queue.h2
-rw-r--r--drivers/media/pci/cx18/cx18-scb.c2
-rw-r--r--drivers/media/pci/cx18/cx18-scb.h2
-rw-r--r--drivers/media/pci/cx18/cx18-streams.c4
-rw-r--r--drivers/media/pci/cx18/cx18-streams.h2
-rw-r--r--drivers/media/pci/cx18/cx18-vbi.c2
-rw-r--r--drivers/media/pci/cx18/cx18-vbi.h2
-rw-r--r--drivers/media/pci/cx18/cx18-version.h2
-rw-r--r--drivers/media/pci/cx18/cx18-video.c2
-rw-r--r--drivers/media/pci/cx18/cx18-video.h2
-rw-r--r--drivers/media/pci/cx18/cx23418.h2
-rw-r--r--drivers/media/pci/cx23885/cx23885-417.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-cards.c30
-rw-r--r--drivers/media/pci/cx23885/cx23885-core.c33
-rw-r--r--drivers/media/pci/cx23885/cx23885-dvb.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-vbi.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-video.c5
-rw-r--r--drivers/media/pci/cx23885/cx23885.h1
-rw-r--r--drivers/media/pci/cx23885/netup-eeprom.c29
-rw-r--r--drivers/media/pci/cx23885/netup-eeprom.h1
-rw-r--r--drivers/media/pci/cx25821/cx25821-video.c2
-rw-r--r--drivers/media/pci/cx88/cx88-blackbird.c2
-rw-r--r--drivers/media/pci/cx88/cx88-dvb.c2
-rw-r--r--drivers/media/pci/cx88/cx88-input.c3
-rw-r--r--drivers/media/pci/cx88/cx88-vbi.c2
-rw-r--r--drivers/media/pci/cx88/cx88-video.c2
-rw-r--r--drivers/media/pci/dt3155/dt3155.c2
-rw-r--r--drivers/media/pci/intel/ipu-bridge.c50
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-cio2.c102
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-cio2.h2
-rw-r--r--drivers/media/pci/intel/ipu6/Kconfig10
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-bus.c8
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-bus.h7
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-buttress.c91
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-buttress.h11
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-cpd.c30
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-dma.c217
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-dma.h30
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-fw-com.c40
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c26
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h2
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-dwc-phy.c4
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-queue.c111
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-queue.h11
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-subdev.c6
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-subdev.h4
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-video.c13
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-video.h8
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys.c38
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys.h8
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-mmu.c314
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-mmu.h4
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-platform-buttress-regs.h2
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6.c23
-rw-r--r--drivers/media/pci/intel/ivsc/mei_ace.c4
-rw-r--r--drivers/media/pci/intel/ivsc/mei_csi.c84
-rw-r--r--drivers/media/pci/ivtv/ivtv-alsa-pcm.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-cards.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-cards.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-controls.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-controls.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-driver.c17
-rw-r--r--drivers/media/pci/ivtv/ivtv-driver.h24
-rw-r--r--drivers/media/pci/ivtv/ivtv-fileops.c42
-rw-r--r--drivers/media/pci/ivtv/ivtv-fileops.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-firmware.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-firmware.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-gpio.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-gpio.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-i2c.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-i2c.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-ioctl.c126
-rw-r--r--drivers/media/pci/ivtv/ivtv-ioctl.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-irq.c16
-rw-r--r--drivers/media/pci/ivtv/ivtv-irq.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-mailbox.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-mailbox.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-queue.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-queue.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-routing.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-routing.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-streams.c21
-rw-r--r--drivers/media/pci/ivtv/ivtv-streams.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-udma.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-udma.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-vbi.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-vbi.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-version.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-yuv.c8
-rw-r--r--drivers/media/pci/mantis/mantis_core.h43
-rw-r--r--drivers/media/pci/mgb4/mgb4_cmt.c10
-rw-r--r--drivers/media/pci/mgb4/mgb4_core.c29
-rw-r--r--drivers/media/pci/mgb4/mgb4_core.h13
-rw-r--r--drivers/media/pci/mgb4/mgb4_regs.c1
-rw-r--r--drivers/media/pci/mgb4/mgb4_sysfs_in.c12
-rw-r--r--drivers/media/pci/mgb4/mgb4_trigger.c2
-rw-r--r--drivers/media/pci/mgb4/mgb4_vin.c101
-rw-r--r--drivers/media/pci/mgb4/mgb4_vin.h5
-rw-r--r--drivers/media/pci/mgb4/mgb4_vout.c50
-rw-r--r--drivers/media/pci/mgb4/mgb4_vout.h1
-rw-r--r--drivers/media/pci/netup_unidvb/netup_unidvb_core.c4
-rw-r--r--drivers/media/pci/netup_unidvb/netup_unidvb_spi.c6
-rw-r--r--drivers/media/pci/pt3/pt3.c17
-rw-r--r--drivers/media/pci/saa7134/saa7134-core.c12
-rw-r--r--drivers/media/pci/saa7134/saa7134-empress.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-input.c4
-rw-r--r--drivers/media/pci/saa7134/saa7134-ts.c4
-rw-r--r--drivers/media/pci/saa7134/saa7134-vbi.c4
-rw-r--r--drivers/media/pci/saa7134/saa7134-video.c8
-rw-r--r--drivers/media/pci/saa7164/saa7164-buffer.c20
-rw-r--r--drivers/media/pci/saa7164/saa7164-cmd.c28
-rw-r--r--drivers/media/pci/saa7164/saa7164-encoder.c30
-rw-r--r--drivers/media/pci/saa7164/saa7164-vbi.c27
-rw-r--r--drivers/media/pci/saa7164/saa7164.h12
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-core.c6
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-gpio.c20
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c2
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-v4l2.c2
-rw-r--r--drivers/media/pci/sta2x11/Kconfig16
-rw-r--r--drivers/media/pci/sta2x11/Makefile2
-rw-r--r--drivers/media/pci/sta2x11/sta2x11_vip.c1272
-rw-r--r--drivers/media/pci/sta2x11/sta2x11_vip.h29
-rw-r--r--drivers/media/pci/tw5864/tw5864-core.c13
-rw-r--r--drivers/media/pci/tw5864/tw5864-video.c2
-rw-r--r--drivers/media/pci/tw68/tw68-core.c4
-rw-r--r--drivers/media/pci/tw68/tw68-reg.h2
-rw-r--r--drivers/media/pci/tw68/tw68-risc.c2
-rw-r--r--drivers/media/pci/tw68/tw68-video.c4
-rw-r--r--drivers/media/pci/tw68/tw68.h2
-rw-r--r--drivers/media/pci/tw686x/tw686x-core.c4
-rw-r--r--drivers/media/pci/tw686x/tw686x-video.c2
-rw-r--r--drivers/media/pci/zoran/zoran.h6
-rw-r--r--drivers/media/pci/zoran/zoran_card.c6
-rw-r--r--drivers/media/pci/zoran/zoran_card.h2
-rw-r--r--drivers/media/pci/zoran/zoran_driver.c37
-rw-r--r--drivers/media/pci/zoran/zr36016.c2
-rw-r--r--drivers/media/pci/zoran/zr36050.c2
-rw-r--r--drivers/media/pci/zoran/zr36060.c2
-rw-r--r--drivers/media/platform/Kconfig1
-rw-r--r--drivers/media/platform/Makefile1
-rw-r--r--drivers/media/platform/allegro-dvt/allegro-core.c42
-rw-r--r--drivers/media/platform/amlogic/Kconfig1
-rw-r--r--drivers/media/platform/amlogic/Makefile2
-rw-r--r--drivers/media/platform/amlogic/c3/Kconfig5
-rw-r--r--drivers/media/platform/amlogic/c3/Makefile5
-rw-r--r--drivers/media/platform/amlogic/c3/isp/Kconfig18
-rw-r--r--drivers/media/platform/amlogic/c3/isp/Makefile10
-rw-r--r--drivers/media/platform/amlogic/c3/isp/c3-isp-capture.c804
-rw-r--r--drivers/media/platform/amlogic/c3/isp/c3-isp-common.h340
-rw-r--r--drivers/media/platform/amlogic/c3/isp/c3-isp-core.c641
-rw-r--r--drivers/media/platform/amlogic/c3/isp/c3-isp-dev.c421
-rw-r--r--drivers/media/platform/amlogic/c3/isp/c3-isp-params.c1008
-rw-r--r--drivers/media/platform/amlogic/c3/isp/c3-isp-regs.h618
-rw-r--r--drivers/media/platform/amlogic/c3/isp/c3-isp-resizer.c892
-rw-r--r--drivers/media/platform/amlogic/c3/isp/c3-isp-stats.c326
-rw-r--r--drivers/media/platform/amlogic/c3/mipi-adapter/Kconfig16
-rw-r--r--drivers/media/platform/amlogic/c3/mipi-adapter/Makefile3
-rw-r--r--drivers/media/platform/amlogic/c3/mipi-adapter/c3-mipi-adap.c842
-rw-r--r--drivers/media/platform/amlogic/c3/mipi-csi2/Kconfig16
-rw-r--r--drivers/media/platform/amlogic/c3/mipi-csi2/Makefile3
-rw-r--r--drivers/media/platform/amlogic/c3/mipi-csi2/c3-mipi-csi2.c827
-rw-r--r--drivers/media/platform/amlogic/meson-ge2d/ge2d.c29
-rw-r--r--drivers/media/platform/amphion/vdec.c294
-rw-r--r--drivers/media/platform/amphion/venc.c12
-rw-r--r--drivers/media/platform/amphion/vpu.h10
-rw-r--r--drivers/media/platform/amphion/vpu_color.c73
-rw-r--r--drivers/media/platform/amphion/vpu_core.c9
-rw-r--r--drivers/media/platform/amphion/vpu_dbg.c15
-rw-r--r--drivers/media/platform/amphion/vpu_defs.h12
-rw-r--r--drivers/media/platform/amphion/vpu_drv.c4
-rw-r--r--drivers/media/platform/amphion/vpu_helpers.c123
-rw-r--r--drivers/media/platform/amphion/vpu_helpers.h12
-rw-r--r--drivers/media/platform/amphion/vpu_malone.c44
-rw-r--r--drivers/media/platform/amphion/vpu_mbox.c4
-rw-r--r--drivers/media/platform/amphion/vpu_mbox.h1
-rw-r--r--drivers/media/platform/amphion/vpu_v4l2.c37
-rw-r--r--drivers/media/platform/amphion/vpu_v4l2.h8
-rw-r--r--drivers/media/platform/aspeed/aspeed-video.c203
-rw-r--r--drivers/media/platform/atmel/atmel-isi.c12
-rw-r--r--drivers/media/platform/broadcom/bcm2835-unicam.c46
-rw-r--r--drivers/media/platform/cadence/cdns-csi2rx.c212
-rw-r--r--drivers/media/platform/cadence/cdns-csi2tx.c2
-rw-r--r--drivers/media/platform/chips-media/coda/coda-common.c55
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-helper.c47
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-helper.h7
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-hw.c32
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c373
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c339
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpu.c62
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpu.h10
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpuapi.c43
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpuapi.h1
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpuconfig.h27
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5.h3
-rw-r--r--drivers/media/platform/imagination/e5010-jpeg-enc.c36
-rw-r--r--drivers/media/platform/imagination/e5010-jpeg-enc.h5
-rw-r--r--drivers/media/platform/intel/pxa_camera.c4
-rw-r--r--drivers/media/platform/m2m-deinterlace.c30
-rw-r--r--drivers/media/platform/marvell/cafe-driver.c2
-rw-r--r--drivers/media/platform/marvell/mcam-core.c11
-rw-r--r--drivers/media/platform/marvell/mmp-driver.c23
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c58
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.h4
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.c84
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.h1
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_reg.h8
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c33
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.h7
-rw-r--r--drivers/media/platform/mediatek/mdp/mtk_mdp_core.c2
-rw-r--r--drivers/media/platform/mediatek/mdp/mtk_mdp_m2m.c33
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c77
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.h3
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c540
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.h29
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c25
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.h1
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c27
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-regs.c4
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-vpu.c2
-rw-r--r--drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_scp.c5
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec.c36
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.c11
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h7
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateful.c2
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateless.c4
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_multi_if.c652
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_hevc_req_multi_if.c2
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_req_lat_if.c3
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c39
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.c11
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.h4
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/venc/venc_h264_if.c8
-rw-r--r--drivers/media/platform/mediatek/vpu/mtk_vpu.c2
-rw-r--r--drivers/media/platform/microchip/microchip-csi2dc.c2
-rw-r--r--drivers/media/platform/microchip/microchip-isc-base.c2
-rw-r--r--drivers/media/platform/microchip/microchip-sama5d2-isc.c2
-rw-r--r--drivers/media/platform/microchip/microchip-sama7g5-isc.c2
-rw-r--r--drivers/media/platform/nuvoton/npcm-video.c33
-rw-r--r--drivers/media/platform/nvidia/tegra-vde/dmabuf-cache.c2
-rw-r--r--drivers/media/platform/nvidia/tegra-vde/h264.c2
-rw-r--r--drivers/media/platform/nvidia/tegra-vde/iommu.c7
-rw-r--r--drivers/media/platform/nvidia/tegra-vde/v4l2.c49
-rw-r--r--drivers/media/platform/nvidia/tegra-vde/vde.c2
-rw-r--r--drivers/media/platform/nxp/dw100/dw100.c21
-rw-r--r--drivers/media/platform/nxp/imx-jpeg/mxc-jpeg-hw.h1
-rw-r--r--drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c248
-rw-r--r--drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h6
-rw-r--r--drivers/media/platform/nxp/imx-mipi-csis.c422
-rw-r--r--drivers/media/platform/nxp/imx-pxp.c11
-rw-r--r--drivers/media/platform/nxp/imx7-media-csi.c4
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c155
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-core.h21
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-crossbar.c18
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-hw.c2
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c282
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-pipe.c2
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c161
-rw-r--r--drivers/media/platform/nxp/imx8mq-mipi-csi2.c182
-rw-r--r--drivers/media/platform/nxp/mx2_emmaprp.c28
-rw-r--r--drivers/media/platform/qcom/Kconfig1
-rw-r--r--drivers/media/platform/qcom/Makefile1
-rw-r--r--drivers/media/platform/qcom/camss/Makefile6
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid-340.c190
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid-4-1.c19
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid-4-7.c42
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid-680.c422
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid-gen2.c60
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid-gen3.c351
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid-gen3.h25
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid.c258
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid.h56
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c6
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c1098
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy.c66
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy.h17
-rw-r--r--drivers/media/platform/qcom/camss/camss-ispif.c5
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-17x.c112
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-340.c320
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-4-1.c19
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-4-7.c11
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-4-8.c11
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-480.c274
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-680.c244
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-gen1.c9
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-gen3.c193
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe.c314
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe.h61
-rw-r--r--drivers/media/platform/qcom/camss/camss-video.c84
-rw-r--r--drivers/media/platform/qcom/camss/camss.c2288
-rw-r--r--drivers/media/platform/qcom/camss/camss.h23
-rw-r--r--drivers/media/platform/qcom/iris/Kconfig13
-rw-r--r--drivers/media/platform/qcom/iris/Makefile32
-rw-r--r--drivers/media/platform/qcom/iris/iris_buffer.c784
-rw-r--r--drivers/media/platform/qcom/iris/iris_buffer.h123
-rw-r--r--drivers/media/platform/qcom/iris/iris_common.c232
-rw-r--r--drivers/media/platform/qcom/iris/iris_common.h18
-rw-r--r--drivers/media/platform/qcom/iris/iris_core.c98
-rw-r--r--drivers/media/platform/qcom/iris/iris_core.h125
-rw-r--r--drivers/media/platform/qcom/iris/iris_ctrls.c907
-rw-r--r--drivers/media/platform/qcom/iris/iris_ctrls.h37
-rw-r--r--drivers/media/platform/qcom/iris/iris_firmware.c115
-rw-r--r--drivers/media/platform/qcom/iris/iris_firmware.h15
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_common.c176
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_common.h156
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen1.h16
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen1_command.c1084
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen1_defines.h551
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen1_response.c709
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen2.h41
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen2_command.c1199
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen2_defines.h196
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen2_packet.c292
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen2_packet.h125
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen2_response.c984
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_queue.c317
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_queue.h182
-rw-r--r--drivers/media/platform/qcom/iris/iris_instance.h107
-rw-r--r--drivers/media/platform/qcom/iris/iris_platform_common.h262
-rw-r--r--drivers/media/platform/qcom/iris/iris_platform_gen2.c1082
-rw-r--r--drivers/media/platform/qcom/iris/iris_platform_qcs8300.h550
-rw-r--r--drivers/media/platform/qcom/iris/iris_platform_sm8250.c366
-rw-r--r--drivers/media/platform/qcom/iris/iris_platform_sm8650.h13
-rw-r--r--drivers/media/platform/qcom/iris/iris_platform_sm8750.h22
-rw-r--r--drivers/media/platform/qcom/iris/iris_power.c140
-rw-r--r--drivers/media/platform/qcom/iris/iris_power.h13
-rw-r--r--drivers/media/platform/qcom/iris/iris_probe.c393
-rw-r--r--drivers/media/platform/qcom/iris/iris_resources.c131
-rw-r--r--drivers/media/platform/qcom/iris/iris_resources.h18
-rw-r--r--drivers/media/platform/qcom/iris/iris_state.c277
-rw-r--r--drivers/media/platform/qcom/iris/iris_state.h146
-rw-r--r--drivers/media/platform/qcom/iris/iris_utils.c126
-rw-r--r--drivers/media/platform/qcom/iris/iris_utils.h55
-rw-r--r--drivers/media/platform/qcom/iris/iris_vb2.c341
-rw-r--r--drivers/media/platform/qcom/iris/iris_vb2.h19
-rw-r--r--drivers/media/platform/qcom/iris/iris_vdec.c478
-rw-r--r--drivers/media/platform/qcom/iris/iris_vdec.h25
-rw-r--r--drivers/media/platform/qcom/iris/iris_venc.c579
-rw-r--r--drivers/media/platform/qcom/iris/iris_venc.h27
-rw-r--r--drivers/media/platform/qcom/iris/iris_vidc.c718
-rw-r--r--drivers/media/platform/qcom/iris/iris_vidc.h15
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu2.c41
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu3x.c473
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu_buffer.c1555
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu_buffer.h153
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu_common.c373
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu_common.h37
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu_register_defines.h17
-rw-r--r--drivers/media/platform/qcom/venus/Kconfig3
-rw-r--r--drivers/media/platform/qcom/venus/core.c257
-rw-r--r--drivers/media/platform/qcom/venus/core.h40
-rw-r--r--drivers/media/platform/qcom/venus/firmware.c42
-rw-r--r--drivers/media/platform/qcom/venus/firmware.h2
-rw-r--r--drivers/media/platform/qcom/venus/helpers.c12
-rw-r--r--drivers/media/platform/qcom/venus/hfi.c23
-rw-r--r--drivers/media/platform/qcom/venus/hfi.h2
-rw-r--r--drivers/media/platform/qcom/venus/hfi_msgs.c94
-rw-r--r--drivers/media/platform/qcom/venus/hfi_parser.c102
-rw-r--r--drivers/media/platform/qcom/venus/hfi_platform.c23
-rw-r--r--drivers/media/platform/qcom/venus/hfi_platform.h34
-rw-r--r--drivers/media/platform/qcom/venus/hfi_platform_v4.c188
-rw-r--r--drivers/media/platform/qcom/venus/hfi_platform_v6.c33
-rw-r--r--drivers/media/platform/qcom/venus/hfi_venus.c59
-rw-r--r--drivers/media/platform/qcom/venus/hfi_venus_io.h4
-rw-r--r--drivers/media/platform/qcom/venus/pm_helpers.c149
-rw-r--r--drivers/media/platform/qcom/venus/vdec.c62
-rw-r--r--drivers/media/platform/qcom/venus/vdec.h1
-rw-r--r--drivers/media/platform/qcom/venus/vdec_ctrls.c5
-rw-r--r--drivers/media/platform/qcom/venus/venc.c116
-rw-r--r--drivers/media/platform/qcom/venus/venc.h1
-rw-r--r--drivers/media/platform/qcom/venus/venc_ctrls.c140
-rw-r--r--drivers/media/platform/raspberrypi/Kconfig1
-rw-r--r--drivers/media/platform/raspberrypi/Makefile1
-rw-r--r--drivers/media/platform/raspberrypi/pisp_be/Kconfig1
-rw-r--r--drivers/media/platform/raspberrypi/pisp_be/pisp_be.c211
-rw-r--r--drivers/media/platform/raspberrypi/rp1-cfe/Kconfig15
-rw-r--r--drivers/media/platform/raspberrypi/rp1-cfe/Makefile6
-rw-r--r--drivers/media/platform/raspberrypi/rp1-cfe/cfe-fmts.h332
-rw-r--r--drivers/media/platform/raspberrypi/rp1-cfe/cfe-trace.h202
-rw-r--r--drivers/media/platform/raspberrypi/rp1-cfe/cfe.c2506
-rw-r--r--drivers/media/platform/raspberrypi/rp1-cfe/cfe.h43
-rw-r--r--drivers/media/platform/raspberrypi/rp1-cfe/csi2.c586
-rw-r--r--drivers/media/platform/raspberrypi/rp1-cfe/csi2.h89
-rw-r--r--drivers/media/platform/raspberrypi/rp1-cfe/dphy.c181
-rw-r--r--drivers/media/platform/raspberrypi/rp1-cfe/dphy.h27
-rw-r--r--drivers/media/platform/raspberrypi/rp1-cfe/pisp-fe.c605
-rw-r--r--drivers/media/platform/raspberrypi/rp1-cfe/pisp-fe.h53
-rw-r--r--drivers/media/platform/renesas/Kconfig18
-rw-r--r--drivers/media/platform/renesas/Makefile2
-rw-r--r--drivers/media/platform/renesas/rcar-csi2.c1043
-rw-r--r--drivers/media/platform/renesas/rcar-fcp.c38
-rw-r--r--drivers/media/platform/renesas/rcar-isp.c532
-rw-r--r--drivers/media/platform/renesas/rcar-isp/Kconfig18
-rw-r--r--drivers/media/platform/renesas/rcar-isp/Makefile4
-rw-r--r--drivers/media/platform/renesas/rcar-isp/csisp.c593
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-core.c713
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-dma.c293
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-v4l2.c552
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-vin.h61
-rw-r--r--drivers/media/platform/renesas/rcar_drif.c18
-rw-r--r--drivers/media/platform/renesas/rcar_fdp1.c31
-rw-r--r--drivers/media/platform/renesas/rcar_jpu.c33
-rw-r--r--drivers/media/platform/renesas/renesas-ceu.c14
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-core.c151
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-cru-regs.h111
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-cru.h68
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c251
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-ip.c180
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c593
-rw-r--r--drivers/media/platform/renesas/sh_vou.c4
-rw-r--r--drivers/media/platform/renesas/vsp1/Makefile3
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1.h5
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_brx.c9
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_dl.c32
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_drm.c31
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_drm.h8
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_drv.c108
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_entity.c30
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_entity.h3
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_histo.c8
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_hsit.c11
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_iif.c121
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_iif.h29
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_pipe.c190
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_pipe.h6
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_regs.h9
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_rpf.c38
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_rwpf.c51
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_sru.c9
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_uds.c9
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_video.c68
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_vspx.c634
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_vspx.h16
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_wpf.c53
-rw-r--r--drivers/media/platform/rockchip/Kconfig1
-rw-r--r--drivers/media/platform/rockchip/Makefile1
-rw-r--r--drivers/media/platform/rockchip/rga/rga-buf.c11
-rw-r--r--drivers/media/platform/rockchip/rga/rga-hw.c4
-rw-r--r--drivers/media/platform/rockchip/rga/rga-hw.h2
-rw-r--r--drivers/media/platform/rockchip/rga/rga.c36
-rw-r--r--drivers/media/platform/rockchip/rga/rga.h7
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c6
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-common.h19
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c128
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c2
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-params.c152
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h106
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c2
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c2
-rw-r--r--drivers/media/platform/rockchip/rkvdec/Kconfig (renamed from drivers/staging/media/rkvdec/Kconfig)0
-rw-r--r--drivers/media/platform/rockchip/rkvdec/Makefile (renamed from drivers/staging/media/rkvdec/Makefile)0
-rw-r--r--drivers/media/platform/rockchip/rkvdec/rkvdec-h264.c (renamed from drivers/staging/media/rkvdec/rkvdec-h264.c)64
-rw-r--r--drivers/media/platform/rockchip/rkvdec/rkvdec-regs.h (renamed from drivers/staging/media/rkvdec/rkvdec-regs.h)0
-rw-r--r--drivers/media/platform/rockchip/rkvdec/rkvdec-vp9.c (renamed from drivers/staging/media/rkvdec/rkvdec-vp9.c)0
-rw-r--r--drivers/media/platform/rockchip/rkvdec/rkvdec.c (renamed from drivers/staging/media/rkvdec/rkvdec.c)310
-rw-r--r--drivers/media/platform/rockchip/rkvdec/rkvdec.h (renamed from drivers/staging/media/rkvdec/rkvdec.h)23
-rw-r--r--drivers/media/platform/samsung/exynos-gsc/gsc-core.c2
-rw-r--r--drivers/media/platform/samsung/exynos-gsc/gsc-core.h6
-rw-r--r--drivers/media/platform/samsung/exynos-gsc/gsc-m2m.c39
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-capture.c8
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-core.c2
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-core.h5
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-is-errno.c131
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-is-errno.h1
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-is-i2c.c4
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-is-i2c.h2
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-is-param.c9
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-is-param.h1
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-is-regs.c1
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-is.c4
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-isp-video.c2
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-lite.c12
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-m2m.c21
-rw-r--r--drivers/media/platform/samsung/exynos4-is/media-dev.c29
-rw-r--r--drivers/media/platform/samsung/exynos4-is/media-dev.h5
-rw-r--r--drivers/media/platform/samsung/exynos4-is/mipi-csis.c12
-rw-r--r--drivers/media/platform/samsung/s3c-camif/camif-capture.c40
-rw-r--r--drivers/media/platform/samsung/s3c-camif/camif-core.c15
-rw-r--r--drivers/media/platform/samsung/s5p-g2d/g2d.c44
-rw-r--r--drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c54
-rw-r--r--drivers/media/platform/samsung/s5p-jpeg/jpeg-hw-exynos3250.c5
-rw-r--r--drivers/media/platform/samsung/s5p-jpeg/jpeg-hw-exynos3250.h1
-rw-r--r--drivers/media/platform/samsung/s5p-jpeg/jpeg-hw-exynos4.c19
-rw-r--r--drivers/media/platform/samsung/s5p-jpeg/jpeg-hw-exynos4.h4
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/regs-mfc-v6.h1
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c48
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd_v6.c35
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_common.h7
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_dec.c36
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c40
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.c10
-rw-r--r--drivers/media/platform/st/sti/bdisp/bdisp-debug.c8
-rw-r--r--drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c48
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c43
-rw-r--r--drivers/media/platform/st/sti/delta/delta-debug.c8
-rw-r--r--drivers/media/platform/st/sti/delta/delta-mjpeg-dec.c20
-rw-r--r--drivers/media/platform/st/sti/delta/delta-v4l2.c47
-rw-r--r--drivers/media/platform/st/sti/hva/hva-v4l2.c42
-rw-r--r--drivers/media/platform/st/sti/hva/hva.h2
-rw-r--r--drivers/media/platform/st/stm32/Kconfig14
-rw-r--r--drivers/media/platform/st/stm32/Makefile1
-rw-r--r--drivers/media/platform/st/stm32/dma2d/dma2d.c35
-rw-r--r--drivers/media/platform/st/stm32/stm32-csi.c1141
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmi.c26
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmipp/Makefile2
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-bytecap.c134
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-byteproc.c129
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-common.h4
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-core.c129
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-input.c540
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-parallel.c440
-rw-r--r--drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c2
-rw-r--r--drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c2
-rw-r--r--drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c2
-rw-r--r--drivers/media/platform/sunxi/sun6i-csi/sun6i_csi_capture.c18
-rw-r--r--drivers/media/platform/sunxi/sun6i-mipi-csi2/sun6i_mipi_csi2.c2
-rw-r--r--drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/sun8i_a83t_mipi_csi2.c2
-rw-r--r--drivers/media/platform/sunxi/sun8i-di/sun8i-di.c14
-rw-r--r--drivers/media/platform/sunxi/sun8i-rotate/sun8i_rotate.c14
-rw-r--r--drivers/media/platform/synopsys/Kconfig3
-rw-r--r--drivers/media/platform/synopsys/Makefile2
-rw-r--r--drivers/media/platform/synopsys/hdmirx/Kconfig36
-rw-r--r--drivers/media/platform/synopsys/hdmirx/Makefile4
-rw-r--r--drivers/media/platform/synopsys/hdmirx/snps_hdmirx.c2746
-rw-r--r--drivers/media/platform/synopsys/hdmirx/snps_hdmirx.h396
-rw-r--r--drivers/media/platform/synopsys/hdmirx/snps_hdmirx_cec.c275
-rw-r--r--drivers/media/platform/synopsys/hdmirx/snps_hdmirx_cec.h43
-rw-r--r--drivers/media/platform/ti/Kconfig3
-rw-r--r--drivers/media/platform/ti/am437x/am437x-vpfe.c6
-rw-r--r--drivers/media/platform/ti/cal/cal-camerarx.c269
-rw-r--r--drivers/media/platform/ti/cal/cal-video.c159
-rw-r--r--drivers/media/platform/ti/cal/cal.c51
-rw-r--r--drivers/media/platform/ti/cal/cal.h4
-rw-r--r--drivers/media/platform/ti/davinci/vpif.c6
-rw-r--r--drivers/media/platform/ti/davinci/vpif_capture.c4
-rw-r--r--drivers/media/platform/ti/davinci/vpif_display.c4
-rw-r--r--drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c79
-rw-r--r--drivers/media/platform/ti/omap/omap_vout.c10
-rw-r--r--drivers/media/platform/ti/omap/omap_voutdef.h2
-rw-r--r--drivers/media/platform/ti/omap3isp/isp.c56
-rw-r--r--drivers/media/platform/ti/omap3isp/isp.h2
-rw-r--r--drivers/media/platform/ti/omap3isp/ispccdc.c16
-rw-r--r--drivers/media/platform/ti/omap3isp/isph3a_aewb.c2
-rw-r--r--drivers/media/platform/ti/omap3isp/isph3a_af.c2
-rw-r--r--drivers/media/platform/ti/omap3isp/isphist.c2
-rw-r--r--drivers/media/platform/ti/omap3isp/ispstat.c13
-rw-r--r--drivers/media/platform/ti/omap3isp/ispstat.h3
-rw-r--r--drivers/media/platform/ti/omap3isp/ispvideo.c37
-rw-r--r--drivers/media/platform/ti/omap3isp/ispvideo.h6
-rw-r--r--drivers/media/platform/ti/vpe/vpdma.c32
-rw-r--r--drivers/media/platform/ti/vpe/vpdma.h3
-rw-r--r--drivers/media/platform/ti/vpe/vpe.c25
-rw-r--r--drivers/media/platform/verisilicon/hantro.h15
-rw-r--r--drivers/media/platform/verisilicon/hantro_drv.c15
-rw-r--r--drivers/media/platform/verisilicon/hantro_g1_regs.h2
-rw-r--r--drivers/media/platform/verisilicon/hantro_g2.c2
-rw-r--r--drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c1
-rw-r--r--drivers/media/platform/verisilicon/hantro_g2_vp9_dec.c8
-rw-r--r--drivers/media/platform/verisilicon/hantro_h264.c6
-rw-r--r--drivers/media/platform/verisilicon/hantro_postproc.c42
-rw-r--r--drivers/media/platform/verisilicon/hantro_v4l2.c70
-rw-r--r--drivers/media/platform/verisilicon/imx8m_vpu_hw.c30
-rw-r--r--drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c16
-rw-r--r--drivers/media/platform/verisilicon/rockchip_vpu_hw.c43
-rw-r--r--drivers/media/platform/via/via-camera.c4
-rw-r--r--drivers/media/platform/video-mux.c8
-rw-r--r--drivers/media/platform/xilinx/xilinx-csi2rxss.c2
-rw-r--r--drivers/media/platform/xilinx/xilinx-dma.c12
-rw-r--r--drivers/media/platform/xilinx/xilinx-tpg.c18
-rw-r--r--drivers/media/platform/xilinx/xilinx-vipp.c9
-rw-r--r--drivers/media/platform/xilinx/xilinx-vtc.c2
-rw-r--r--drivers/media/radio/Kconfig21
-rw-r--r--drivers/media/radio/Makefile2
-rw-r--r--drivers/media/radio/radio-aimslab.c2
-rw-r--r--drivers/media/radio/radio-aztech.c2
-rw-r--r--drivers/media/radio/radio-cadet.c4
-rw-r--r--drivers/media/radio/radio-gemtek.c2
-rw-r--r--drivers/media/radio/radio-isa.c2
-rw-r--r--drivers/media/radio/radio-isa.h2
-rw-r--r--drivers/media/radio/radio-keene.c4
-rw-r--r--drivers/media/radio/radio-miropcm20.c2
-rw-r--r--drivers/media/radio/radio-raremono.c4
-rw-r--r--drivers/media/radio/radio-rtrack2.c2
-rw-r--r--drivers/media/radio/radio-si476x.c2
-rw-r--r--drivers/media/radio/radio-terratec.c2
-rw-r--r--drivers/media/radio/radio-timb.c2
-rw-r--r--drivers/media/radio/radio-wl1273.c2159
-rw-r--r--drivers/media/radio/radio-zoltrix.c2
-rw-r--r--drivers/media/radio/si4713/radio-platform-si4713.c12
-rw-r--r--drivers/media/radio/wl128x/Kconfig15
-rw-r--r--drivers/media/radio/wl128x/Makefile7
-rw-r--r--drivers/media/radio/wl128x/fmdrv.h229
-rw-r--r--drivers/media/radio/wl128x/fmdrv_common.c1675
-rw-r--r--drivers/media/radio/wl128x/fmdrv_common.h389
-rw-r--r--drivers/media/radio/wl128x/fmdrv_rx.c820
-rw-r--r--drivers/media/radio/wl128x/fmdrv_rx.h45
-rw-r--r--drivers/media/radio/wl128x/fmdrv_tx.c413
-rw-r--r--drivers/media/radio/wl128x/fmdrv_tx.h24
-rw-r--r--drivers/media/radio/wl128x/fmdrv_v4l2.c604
-rw-r--r--drivers/media/radio/wl128x/fmdrv_v4l2.h20
-rw-r--r--drivers/media/rc/Kconfig1
-rw-r--r--drivers/media/rc/ati_remote.c6
-rw-r--r--drivers/media/rc/ene_ir.c4
-rw-r--r--drivers/media/rc/gpio-ir-recv.c6
-rw-r--r--drivers/media/rc/gpio-ir-tx.c4
-rw-r--r--drivers/media/rc/igorplugusb.c6
-rw-r--r--drivers/media/rc/iguanair.c4
-rw-r--r--drivers/media/rc/img-ir/img-ir-core.c2
-rw-r--r--drivers/media/rc/img-ir/img-ir-hw.c9
-rw-r--r--drivers/media/rc/img-ir/img-ir-raw.c4
-rw-r--r--drivers/media/rc/imon.c103
-rw-r--r--drivers/media/rc/imon_raw.c2
-rw-r--r--drivers/media/rc/ir-hix5hd2.c2
-rw-r--r--drivers/media/rc/ir-mce_kbd-decoder.c7
-rw-r--r--drivers/media/rc/ir-spi.c40
-rw-r--r--drivers/media/rc/keymaps/Makefile1
-rw-r--r--drivers/media/rc/keymaps/rc-hauppauge.c42
-rw-r--r--drivers/media/rc/keymaps/rc-siemens-gigaset-rc20.c71
-rw-r--r--drivers/media/rc/lirc_dev.c22
-rw-r--r--drivers/media/rc/mceusb.c5
-rw-r--r--drivers/media/rc/meson-ir.c2
-rw-r--r--drivers/media/rc/mtk-cir.c2
-rw-r--r--drivers/media/rc/pwm-ir-tx.c8
-rw-r--r--drivers/media/rc/rc-core-priv.h4
-rw-r--r--drivers/media/rc/rc-ir-raw.c5
-rw-r--r--drivers/media/rc/rc-main.c10
-rw-r--r--drivers/media/rc/redrat3.c2
-rw-r--r--drivers/media/rc/serial_ir.c2
-rw-r--r--drivers/media/rc/st_rc.c2
-rw-r--r--drivers/media/rc/streamzap.c70
-rw-r--r--drivers/media/rc/sunxi-cir.c2
-rw-r--r--drivers/media/test-drivers/vicodec/vicodec-core.c50
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_bridge.c10
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_channel.c2
-rw-r--r--drivers/media/test-drivers/vim2m.c360
-rw-r--r--drivers/media/test-drivers/vimc/vimc-capture.c10
-rw-r--r--drivers/media/test-drivers/vimc/vimc-core.c4
-rw-r--r--drivers/media/test-drivers/vimc/vimc-streamer.c6
-rw-r--r--drivers/media/test-drivers/visl/visl-core.c19
-rw-r--r--drivers/media/test-drivers/visl/visl-video.c22
-rw-r--r--drivers/media/test-drivers/visl/visl.h7
-rw-r--r--drivers/media/test-drivers/vivid/Kconfig13
-rw-r--r--drivers/media/test-drivers/vivid/Makefile5
-rw-r--r--drivers/media/test-drivers/vivid/vivid-cec.c12
-rw-r--r--drivers/media/test-drivers/vivid/vivid-core.c118
-rw-r--r--drivers/media/test-drivers/vivid/vivid-core.h6
-rw-r--r--drivers/media/test-drivers/vivid/vivid-ctrls.c41
-rw-r--r--drivers/media/test-drivers/vivid/vivid-kthread-cap.c31
-rw-r--r--drivers/media/test-drivers/vivid/vivid-kthread-out.c11
-rw-r--r--drivers/media/test-drivers/vivid/vivid-kthread-touch.c11
-rw-r--r--drivers/media/test-drivers/vivid/vivid-meta-cap.c2
-rw-r--r--drivers/media/test-drivers/vivid/vivid-meta-out.c2
-rw-r--r--drivers/media/test-drivers/vivid/vivid-osd.c24
-rw-r--r--drivers/media/test-drivers/vivid/vivid-osd.h19
-rw-r--r--drivers/media/test-drivers/vivid/vivid-radio-rx.c12
-rw-r--r--drivers/media/test-drivers/vivid/vivid-radio-rx.h8
-rw-r--r--drivers/media/test-drivers/vivid/vivid-radio-tx.c8
-rw-r--r--drivers/media/test-drivers/vivid/vivid-radio-tx.h4
-rw-r--r--drivers/media/test-drivers/vivid/vivid-sdr-cap.c31
-rw-r--r--drivers/media/test-drivers/vivid/vivid-sdr-cap.h18
-rw-r--r--drivers/media/test-drivers/vivid/vivid-touch-cap.c2
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vbi-cap.c12
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vbi-cap.h8
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vbi-gen.c8
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vbi-out.c10
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vbi-out.h6
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-cap.c56
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-cap.h24
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-common.c8
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-common.h8
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-out.c21
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-out.h16
-rw-r--r--drivers/media/tuners/fc0013.c64
-rw-r--r--drivers/media/tuners/fc0013.h11
-rw-r--r--drivers/media/tuners/it913x.c2
-rw-r--r--drivers/media/tuners/mt2063.c2
-rw-r--r--drivers/media/tuners/mxl301rf.c2
-rw-r--r--drivers/media/tuners/mxl5005s.c2
-rw-r--r--drivers/media/tuners/tda18271-fe.c4
-rw-r--r--drivers/media/tuners/tea5761.c4
-rw-r--r--drivers/media/tuners/tea5767.c4
-rw-r--r--drivers/media/tuners/tuner-simple.c20
-rw-r--r--drivers/media/tuners/tuner-types.c296
-rw-r--r--drivers/media/tuners/xc4000.c8
-rw-r--r--drivers/media/tuners/xc5000.c14
-rw-r--r--drivers/media/usb/airspy/airspy.c4
-rw-r--r--drivers/media/usb/au0828/au0828-dvb.c6
-rw-r--r--drivers/media/usb/au0828/au0828-vbi.c2
-rw-r--r--drivers/media/usb/au0828/au0828-video.c23
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-417.c21
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-avcore.c60
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-cards.c6
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-pcb-cfg.h18
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-vbi.c2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-video.c2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx.h5
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9015.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.c20
-rw-r--r--drivers/media/usb/dvb-usb-v2/anysee.c19
-rw-r--r--drivers/media/usb/dvb-usb-v2/au6610.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/az6007.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/ce6230.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvb_usb.h2
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvbsky.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/ec168.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/gl861.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/lmedm04.c14
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.c2
-rw-r--r--drivers/media/usb/dvb-usb/a800.c2
-rw-r--r--drivers/media/usb/dvb-usb/af9005.c4
-rw-r--r--drivers/media/usb/dvb-usb/az6027.c4
-rw-r--r--drivers/media/usb/dvb-usb/cinergyT2-core.c2
-rw-r--r--drivers/media/usb/dvb-usb/cxusb-analog.c6
-rw-r--r--drivers/media/usb/dvb-usb/cxusb.c9
-rw-r--r--drivers/media/usb/dvb-usb/dibusb-mb.c2
-rw-r--r--drivers/media/usb/dvb-usb/dibusb-mc.c2
-rw-r--r--drivers/media/usb/dvb-usb/digitv.c4
-rw-r--r--drivers/media/usb/dvb-usb/dtt200u.c2
-rw-r--r--drivers/media/usb/dvb-usb/dtv5100.c4
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb.h6
-rw-r--r--drivers/media/usb/dvb-usb/dw2102.c16
-rw-r--r--drivers/media/usb/dvb-usb/gp8psk.c2
-rw-r--r--drivers/media/usb/dvb-usb/m920x.c4
-rw-r--r--drivers/media/usb/dvb-usb/nova-t-usb2.c2
-rw-r--r--drivers/media/usb/dvb-usb/opera1.c4
-rw-r--r--drivers/media/usb/dvb-usb/pctv452e.c6
-rw-r--r--drivers/media/usb/dvb-usb/technisat-usb2.c4
-rw-r--r--drivers/media/usb/dvb-usb/ttusb2.c4
-rw-r--r--drivers/media/usb/dvb-usb/umt-010.c2
-rw-r--r--drivers/media/usb/dvb-usb/vp702x.c2
-rw-r--r--drivers/media/usb/dvb-usb/vp7045.c2
-rw-r--r--drivers/media/usb/em28xx/Kconfig1
-rw-r--r--drivers/media/usb/em28xx/em28xx-dvb.c4
-rw-r--r--drivers/media/usb/em28xx/em28xx-vbi.c2
-rw-r--r--drivers/media/usb/em28xx/em28xx-video.c4
-rw-r--r--drivers/media/usb/go7007/go7007-v4l2.c2
-rw-r--r--drivers/media/usb/gspca/gspca.c20
-rw-r--r--drivers/media/usb/gspca/ov534.c2
-rw-r--r--drivers/media/usb/gspca/stv06xx/stv06xx_hdcs.c7
-rw-r--r--drivers/media/usb/gspca/vicam.c10
-rw-r--r--drivers/media/usb/hackrf/hackrf.c6
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-i2c.c30
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-video.c69
-rw-r--r--drivers/media/usb/msi2500/msi2500.c8
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-encoder.c2
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-hdw.c29
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-io.c4
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-std.c167
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-std.h6
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-v4l2.c109
-rw-r--r--drivers/media/usb/pwc/pwc-if.c5
-rw-r--r--drivers/media/usb/s2255/s2255drv.c4
-rw-r--r--drivers/media/usb/stk1160/stk1160-core.c3
-rw-r--r--drivers/media/usb/stk1160/stk1160-v4l.c6
-rw-r--r--drivers/media/usb/stk1160/stk1160-video.c48
-rw-r--r--drivers/media/usb/stk1160/stk1160.h7
-rw-r--r--drivers/media/usb/usbtv/usbtv-video.c6
-rw-r--r--drivers/media/usb/uvc/uvc_ctrl.c1047
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c575
-rw-r--r--drivers/media/usb/uvc/uvc_entity.c4
-rw-r--r--drivers/media/usb/uvc/uvc_metadata.c163
-rw-r--r--drivers/media/usb/uvc/uvc_queue.c212
-rw-r--r--drivers/media/usb/uvc/uvc_status.c78
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c691
-rw-r--r--drivers/media/usb/uvc/uvc_video.c169
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h129
-rw-r--r--drivers/media/v4l2-core/v4l2-common.c241
-rw-r--r--drivers/media/v4l2-core/v4l2-compat-ioctl32.c11
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-api.c135
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-core.c191
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-defs.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-priv.h2
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-request.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c81
-rw-r--r--drivers/media/v4l2-core/v4l2-device.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-dv-timings.c243
-rw-r--r--drivers/media/v4l2-core/v4l2-fh.c16
-rw-r--r--drivers/media/v4l2-core/v4l2-fwnode.c43
-rw-r--r--drivers/media/v4l2-core/v4l2-i2c.c3
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c517
-rw-r--r--drivers/media/v4l2-core/v4l2-jpeg.c80
-rw-r--r--drivers/media/v4l2-core/v4l2-mc.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c50
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c100
-rw-r--r--drivers/memory/Kconfig23
-rw-r--r--drivers/memory/Makefile1
-rw-r--r--drivers/memory/brcmstb_dpfe.c2
-rw-r--r--drivers/memory/brcmstb_memc.c58
-rw-r--r--drivers/memory/bt1-l2-ctl.c2
-rw-r--r--drivers/memory/emif.c3
-rw-r--r--drivers/memory/fsl-corenet-cf.c2
-rw-r--r--drivers/memory/fsl_ifc.c2
-rw-r--r--drivers/memory/jz4780-nemc.c2
-rw-r--r--drivers/memory/mtk-smi.c122
-rw-r--r--drivers/memory/omap-gpmc.c76
-rw-r--r--drivers/memory/renesas-rpc-if-regs.h147
-rw-r--r--drivers/memory/renesas-rpc-if.c716
-rw-r--r--drivers/memory/renesas-xspi-if-regs.h105
-rw-r--r--drivers/memory/samsung/exynos-srom.c10
-rw-r--r--drivers/memory/samsung/exynos5422-dmc.c2
-rw-r--r--drivers/memory/stm32-fmc2-ebi.c2
-rw-r--r--drivers/memory/stm32_omm.c470
-rw-r--r--drivers/memory/tegra/Kconfig8
-rw-r--r--drivers/memory/tegra/Makefile2
-rw-r--r--drivers/memory/tegra/mc.c5
-rw-r--r--drivers/memory/tegra/mc.h9
-rw-r--r--drivers/memory/tegra/tegra186-emc.c7
-rw-r--r--drivers/memory/tegra/tegra186.c17
-rw-r--r--drivers/memory/tegra/tegra20-emc.c12
-rw-r--r--drivers/memory/tegra/tegra210-emc-core.c11
-rw-r--r--drivers/memory/tegra/tegra210.c146
-rw-r--r--drivers/memory/tegra/tegra264-bwmgr.h50
-rw-r--r--drivers/memory/tegra/tegra264.c313
-rw-r--r--drivers/memory/ti-aemif.c192
-rw-r--r--drivers/memory/ti-emif-pm.c2
-rw-r--r--drivers/memstick/core/memstick.c61
-rw-r--r--drivers/memstick/core/ms_block.c23
-rw-r--r--drivers/memstick/core/mspro_block.c16
-rw-r--r--drivers/memstick/host/jmb38x_ms.c7
-rw-r--r--drivers/memstick/host/r592.c8
-rw-r--r--drivers/memstick/host/rtsx_usb_ms.c9
-rw-r--r--drivers/memstick/host/tifm_ms.c9
-rw-r--r--drivers/message/fusion/mptfc.c16
-rw-r--r--drivers/message/fusion/mptlan.h3
-rw-r--r--drivers/message/fusion/mptsas.c28
-rw-r--r--drivers/message/fusion/mptscsih.c76
-rw-r--r--drivers/message/fusion/mptscsih.h8
-rw-r--r--drivers/message/fusion/mptspi.c21
-rw-r--r--drivers/mfd/88pm860x-core.c5
-rw-r--r--drivers/mfd/88pm886.c16
-rw-r--r--drivers/mfd/Kconfig346
-rw-r--r--drivers/mfd/Makefile26
-rw-r--r--drivers/mfd/aat2870-core.c4
-rw-r--r--drivers/mfd/ab8500-core.c5
-rw-r--r--drivers/mfd/ab8500-sysctrl.c2
-rw-r--r--drivers/mfd/adp5585.c738
-rw-r--r--drivers/mfd/arizona-irq.c9
-rw-r--r--drivers/mfd/as3722.c4
-rw-r--r--drivers/mfd/atmel-flexcom.c2
-rw-r--r--drivers/mfd/atmel-smc.c13
-rw-r--r--drivers/mfd/axp20x-i2c.c1
-rw-r--r--drivers/mfd/axp20x.c72
-rw-r--r--drivers/mfd/bcm590xx.c66
-rw-r--r--drivers/mfd/bq257xx.c99
-rw-r--r--drivers/mfd/cgbc-core.c428
-rw-r--r--drivers/mfd/cros_ec_dev.c37
-rw-r--r--drivers/mfd/cs40l50-core.c5
-rw-r--r--drivers/mfd/cs42l43-i2c.c10
-rw-r--r--drivers/mfd/cs42l43-sdw.c12
-rw-r--r--drivers/mfd/cs42l43.c83
-rw-r--r--drivers/mfd/cs42l43.h1
-rw-r--r--drivers/mfd/da9052-core.c1
-rw-r--r--drivers/mfd/da9052-spi.c2
-rw-r--r--drivers/mfd/da9063-i2c.c27
-rw-r--r--drivers/mfd/db8500-prcmu.c6
-rw-r--r--drivers/mfd/ene-kb3930.c2
-rw-r--r--drivers/mfd/exynos-lpass.c34
-rw-r--r--drivers/mfd/ezx-pcap.c33
-rw-r--r--drivers/mfd/fsl-imx25-tsadc.c8
-rw-r--r--drivers/mfd/hi655x-pmic.c8
-rw-r--r--drivers/mfd/intel-lpss-acpi.c4
-rw-r--r--drivers/mfd/intel-lpss-pci.c15
-rw-r--r--drivers/mfd/intel-lpss.c6
-rw-r--r--drivers/mfd/intel-m10-bmc-core.c10
-rw-r--r--drivers/mfd/intel-m10-bmc-pmci.c2
-rw-r--r--drivers/mfd/intel-m10-bmc-spi.c2
-rw-r--r--drivers/mfd/intel_soc_pmic_bxtwc.c177
-rw-r--r--drivers/mfd/intel_soc_pmic_chtdc_ti.c5
-rw-r--r--drivers/mfd/intel_soc_pmic_chtwc.c2
-rw-r--r--drivers/mfd/intel_soc_pmic_crc.c10
-rw-r--r--drivers/mfd/ioc3.c2
-rw-r--r--drivers/mfd/ipaq-micro.c4
-rw-r--r--drivers/mfd/kempld-core.c38
-rw-r--r--drivers/mfd/loongson-se.c253
-rw-r--r--drivers/mfd/lp8788-irq.c2
-rw-r--r--drivers/mfd/lpc_ich.c3
-rw-r--r--drivers/mfd/ls2k-bmc-core.c528
-rw-r--r--drivers/mfd/macsmc.c499
-rw-r--r--drivers/mfd/madera-core.c4
-rw-r--r--drivers/mfd/max14577.c1
-rw-r--r--drivers/mfd/max7360.c171
-rw-r--r--drivers/mfd/max77541.c2
-rw-r--r--drivers/mfd/max77620.c5
-rw-r--r--drivers/mfd/max77705.c180
-rw-r--r--drivers/mfd/max77759.c690
-rw-r--r--drivers/mfd/max8925-core.c6
-rw-r--r--drivers/mfd/max8925-i2c.c1
-rw-r--r--drivers/mfd/max8997-irq.c19
-rw-r--r--drivers/mfd/max8997.c4
-rw-r--r--drivers/mfd/max8998-irq.c2
-rw-r--r--drivers/mfd/max8998.c4
-rw-r--r--drivers/mfd/mcp-sa11x0.c2
-rw-r--r--drivers/mfd/mfd-core.c1
-rw-r--r--drivers/mfd/mt6358-irq.c5
-rw-r--r--drivers/mfd/mt6370.c2
-rw-r--r--drivers/mfd/mt6370.h2
-rw-r--r--drivers/mfd/mt6397-core.c54
-rw-r--r--drivers/mfd/mt6397-irq.c29
-rw-r--r--drivers/mfd/mxs-lradc.c2
-rw-r--r--drivers/mfd/nct6694.c388
-rw-r--r--drivers/mfd/ocelot-core.c6
-rw-r--r--drivers/mfd/ocelot-spi.c4
-rw-r--r--drivers/mfd/omap-usb-host.c2
-rw-r--r--drivers/mfd/omap-usb-tll.c2
-rw-r--r--drivers/mfd/pcf50633-adc.c255
-rw-r--r--drivers/mfd/pcf50633-core.c304
-rw-r--r--drivers/mfd/pcf50633-gpio.c92
-rw-r--r--drivers/mfd/pcf50633-irq.c312
-rw-r--r--drivers/mfd/qcom-pm8xxx.c8
-rw-r--r--drivers/mfd/qnap-mcu.c347
-rw-r--r--drivers/mfd/rk8xx-core.c18
-rw-r--r--drivers/mfd/rohm-bd71828.c64
-rw-r--r--drivers/mfd/rohm-bd96801.c678
-rw-r--r--drivers/mfd/rt5033.c10
-rw-r--r--drivers/mfd/rz-mtu3.c2
-rw-r--r--drivers/mfd/sec-acpm.c442
-rw-r--r--drivers/mfd/sec-common.c301
-rw-r--r--drivers/mfd/sec-core.c458
-rw-r--r--drivers/mfd/sec-core.h23
-rw-r--r--drivers/mfd/sec-i2c.c239
-rw-r--r--drivers/mfd/sec-irq.c494
-rw-r--r--drivers/mfd/simple-mfd-i2c.c33
-rw-r--r--drivers/mfd/sm501.c56
-rw-r--r--drivers/mfd/sprd-sc27xx-spi.c5
-rw-r--r--drivers/mfd/sta2x11-mfd.c645
-rw-r--r--drivers/mfd/stm32-lptimer.c34
-rw-r--r--drivers/mfd/stm32-timers.c34
-rw-r--r--drivers/mfd/stmfx.c5
-rw-r--r--drivers/mfd/stmpe-i2c.c14
-rw-r--r--drivers/mfd/stmpe-spi.c16
-rw-r--r--drivers/mfd/stmpe.c13
-rw-r--r--drivers/mfd/stpmic1.c6
-rw-r--r--drivers/mfd/sun4i-gpadc.c1
-rw-r--r--drivers/mfd/syscon.c130
-rw-r--r--drivers/mfd/tc3589x.c6
-rw-r--r--drivers/mfd/ti_am335x_tscadc.c2
-rw-r--r--drivers/mfd/tps65010.c28
-rw-r--r--drivers/mfd/tps65217.c4
-rw-r--r--drivers/mfd/tps65219.c297
-rw-r--r--drivers/mfd/tps6586x.c6
-rw-r--r--drivers/mfd/tps65911-comparator.c2
-rw-r--r--drivers/mfd/tps6594-core.c147
-rw-r--r--drivers/mfd/tps6594-i2c.c10
-rw-r--r--drivers/mfd/tps6594-spi.c10
-rw-r--r--drivers/mfd/tqmx86.c115
-rw-r--r--drivers/mfd/twl-core.c26
-rw-r--r--drivers/mfd/twl4030-audio.c2
-rw-r--r--drivers/mfd/twl4030-irq.c5
-rw-r--r--drivers/mfd/twl6030-irq.c80
-rw-r--r--drivers/mfd/twl6040.c2
-rw-r--r--drivers/mfd/ucb1x00-core.c5
-rw-r--r--drivers/mfd/upboard-fpga.c324
-rw-r--r--drivers/mfd/vexpress-sysreg.c72
-rw-r--r--drivers/mfd/wcd934x.c2
-rw-r--r--drivers/mfd/wm831x-irq.c13
-rw-r--r--drivers/mfd/wm8994-irq.c4
-rw-r--r--drivers/misc/Kconfig41
-rw-r--r--drivers/misc/Makefile9
-rw-r--r--drivers/misc/ad525x_dpot.c7
-rw-r--r--drivers/misc/amd-sbi/Kconfig19
-rw-r--r--drivers/misc/amd-sbi/Makefile4
-rw-r--r--drivers/misc/amd-sbi/rmi-core.c484
-rw-r--r--drivers/misc/amd-sbi/rmi-core.h74
-rw-r--r--drivers/misc/amd-sbi/rmi-hwmon.c120
-rw-r--r--drivers/misc/amd-sbi/rmi-i2c.c133
-rw-r--r--drivers/misc/apds990x.c13
-rw-r--r--drivers/misc/atmel-ssc.c6
-rw-r--r--drivers/misc/bcm-vk/bcm_vk.h1
-rw-r--r--drivers/misc/bcm-vk/bcm_vk_tty.c6
-rw-r--r--drivers/misc/c2port/core.c27
-rw-r--r--drivers/misc/cardreader/alcor_pci.c15
-rw-r--r--drivers/misc/cardreader/rts5227.c13
-rw-r--r--drivers/misc/cardreader/rts5228.c12
-rw-r--r--drivers/misc/cardreader/rts5249.c16
-rw-r--r--drivers/misc/cardreader/rts5264.c100
-rw-r--r--drivers/misc/cardreader/rts5264.h7
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.c48
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.h4
-rw-r--r--drivers/misc/cardreader/rtsx_usb.c33
-rw-r--r--drivers/misc/cs5535-mfgpt.c1
-rw-r--r--drivers/misc/cxl/Kconfig26
-rw-r--r--drivers/misc/cxl/Makefile14
-rw-r--r--drivers/misc/cxl/api.c532
-rw-r--r--drivers/misc/cxl/base.c126
-rw-r--r--drivers/misc/cxl/context.c362
-rw-r--r--drivers/misc/cxl/cxl.h1135
-rw-r--r--drivers/misc/cxl/cxllib.c271
-rw-r--r--drivers/misc/cxl/debugfs.c134
-rw-r--r--drivers/misc/cxl/fault.c341
-rw-r--r--drivers/misc/cxl/file.c699
-rw-r--r--drivers/misc/cxl/flash.c538
-rw-r--r--drivers/misc/cxl/guest.c1208
-rw-r--r--drivers/misc/cxl/hcalls.c643
-rw-r--r--drivers/misc/cxl/hcalls.h200
-rw-r--r--drivers/misc/cxl/irq.c450
-rw-r--r--drivers/misc/cxl/main.c383
-rw-r--r--drivers/misc/cxl/native.c1592
-rw-r--r--drivers/misc/cxl/of.c344
-rw-r--r--drivers/misc/cxl/pci.c2101
-rw-r--r--drivers/misc/cxl/sysfs.c771
-rw-r--r--drivers/misc/cxl/trace.c9
-rw-r--r--drivers/misc/cxl/trace.h691
-rw-r--r--drivers/misc/cxl/vphb.c309
-rw-r--r--drivers/misc/ds1682.c4
-rw-r--r--drivers/misc/dw-xdata-pcie.c5
-rw-r--r--drivers/misc/echo/Kconfig9
-rw-r--r--drivers/misc/echo/Makefile2
-rw-r--r--drivers/misc/echo/echo.c589
-rw-r--r--drivers/misc/echo/echo.h175
-rw-r--r--drivers/misc/echo/fir.h154
-rw-r--r--drivers/misc/echo/oslec.h81
-rw-r--r--drivers/misc/eeprom/Kconfig23
-rw-r--r--drivers/misc/eeprom/Makefile1
-rw-r--r--drivers/misc/eeprom/at24.c14
-rw-r--r--drivers/misc/eeprom/at25.c378
-rw-r--r--drivers/misc/eeprom/digsy_mtc_eeprom.c2
-rw-r--r--drivers/misc/eeprom/ee1004.c4
-rw-r--r--drivers/misc/eeprom/eeprom_93cx6.c15
-rw-r--r--drivers/misc/eeprom/eeprom_93xx46.c2
-rw-r--r--drivers/misc/eeprom/idt_89hpesx.c81
-rw-r--r--drivers/misc/eeprom/m24lr.c606
-rw-r--r--drivers/misc/eeprom/max6875.c2
-rw-r--r--drivers/misc/enclosure.c3
-rw-r--r--drivers/misc/fastrpc.c244
-rw-r--r--drivers/misc/genwqe/card_ddcb.c2
-rw-r--r--drivers/misc/hi6421v600-irq.c5
-rw-r--r--drivers/misc/hisi_hikey_usb.c5
-rw-r--r--drivers/misc/ibmasm/ibmasmfs.c14
-rw-r--r--drivers/misc/isl29020.c2
-rw-r--r--drivers/misc/keba/Kconfig13
-rw-r--r--drivers/misc/keba/Makefile1
-rw-r--r--drivers/misc/keba/cp500.c579
-rw-r--r--drivers/misc/keba/lan9252.c359
-rw-r--r--drivers/misc/lan966x_pci.c215
-rw-r--r--drivers/misc/lan966x_pci.dtso177
-rw-r--r--drivers/misc/lis3lv02d/Kconfig8
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d.c26
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d.h4
-rw-r--r--drivers/misc/lkdtm/Makefile2
-rw-r--r--drivers/misc/lkdtm/bugs.c2
-rw-r--r--drivers/misc/lkdtm/cfi.c2
-rw-r--r--drivers/misc/lkdtm/fortify.c6
-rw-r--r--drivers/misc/lkdtm/heap.c17
-rw-r--r--drivers/misc/lkdtm/kstack_erase.c150
-rw-r--r--drivers/misc/lkdtm/perms.c19
-rw-r--r--drivers/misc/lkdtm/stackleak.c150
-rw-r--r--drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c108
-rw-r--r--drivers/misc/mei/Kconfig15
-rw-r--r--drivers/misc/mei/Makefile1
-rw-r--r--drivers/misc/mei/bus-fixup.c16
-rw-r--r--drivers/misc/mei/bus.c128
-rw-r--r--drivers/misc/mei/client.c108
-rw-r--r--drivers/misc/mei/client.h8
-rw-r--r--drivers/misc/mei/dma-ring.c8
-rw-r--r--drivers/misc/mei/gsc-me.c20
-rw-r--r--drivers/misc/mei/hbm.c135
-rw-r--r--drivers/misc/mei/hw-me-regs.h3
-rw-r--r--drivers/misc/mei/hw-me.c153
-rw-r--r--drivers/misc/mei/hw-txe.c105
-rw-r--r--drivers/misc/mei/hw-txe.h2
-rw-r--r--drivers/misc/mei/hw.h2
-rw-r--r--drivers/misc/mei/init.c66
-rw-r--r--drivers/misc/mei/interrupt.c49
-rw-r--r--drivers/misc/mei/main.c192
-rw-r--r--drivers/misc/mei/mei_dev.h24
-rw-r--r--drivers/misc/mei/mei_lb.c312
-rw-r--r--drivers/misc/mei/pci-me.c23
-rw-r--r--drivers/misc/mei/pci-txe.c4
-rw-r--r--drivers/misc/mei/platform-vsc.c38
-rw-r--r--drivers/misc/mei/vsc-fw-loader.c28
-rw-r--r--drivers/misc/mei/vsc-tp.c138
-rw-r--r--drivers/misc/mei/vsc-tp.h3
-rw-r--r--drivers/misc/ntsync.c1001
-rw-r--r--drivers/misc/ocxl/sysfs.c16
-rw-r--r--drivers/misc/open-dice.c2
-rw-r--r--drivers/misc/pch_phub.c4
-rw-r--r--drivers/misc/pci_endpoint_test.c551
-rw-r--r--drivers/misc/rp1/Kconfig20
-rw-r--r--drivers/misc/rp1/Makefile3
-rw-r--r--drivers/misc/rp1/rp1-pci.dtso25
-rw-r--r--drivers/misc/rp1/rp1_pci.c333
-rw-r--r--drivers/misc/rpmb-core.c9
-rw-r--r--drivers/misc/sgi-xp/xpc_main.c9
-rw-r--r--drivers/misc/sgi-xp/xpc_partition.c2
-rw-r--r--drivers/misc/sram.c16
-rw-r--r--drivers/misc/ti-st/Kconfig19
-rw-r--r--drivers/misc/ti-st/Makefile7
-rw-r--r--drivers/misc/ti-st/st_core.c918
-rw-r--r--drivers/misc/ti-st/st_kim.c839
-rw-r--r--drivers/misc/ti-st/st_ll.c156
-rw-r--r--drivers/misc/ti_fpc202.c435
-rw-r--r--drivers/misc/tps6594-esm.c2
-rw-r--r--drivers/misc/tps6594-pfsm.c36
-rw-r--r--drivers/misc/uacce/uacce.c40
-rw-r--r--drivers/misc/vcpu_stall_detector.c5
-rw-r--r--drivers/misc/vmw_balloon.c7
-rw-r--r--drivers/misc/vmw_vmci/vmci_context.c56
-rw-r--r--drivers/misc/vmw_vmci/vmci_context.h2
-rw-r--r--drivers/misc/vmw_vmci/vmci_doorbell.c53
-rw-r--r--drivers/misc/vmw_vmci/vmci_host.c11
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.c133
-rw-r--r--drivers/misc/xilinx_sdfec.c2
-rw-r--r--drivers/misc/xilinx_tmr_inject.c2
-rw-r--r--drivers/mmc/core/Makefile2
-rw-r--r--drivers/mmc/core/block.c156
-rw-r--r--drivers/mmc/core/bus.c56
-rw-r--r--drivers/mmc/core/card.h25
-rw-r--r--drivers/mmc/core/core.c164
-rw-r--r--drivers/mmc/core/core.h33
-rw-r--r--drivers/mmc/core/host.c11
-rw-r--r--drivers/mmc/core/host.h8
-rw-r--r--drivers/mmc/core/mmc.c179
-rw-r--r--drivers/mmc/core/mmc_ops.c102
-rw-r--r--drivers/mmc/core/mmc_ops.h3
-rw-r--r--drivers/mmc/core/mmc_test.c32
-rw-r--r--drivers/mmc/core/pwrseq_emmc.c2
-rw-r--r--drivers/mmc/core/pwrseq_sd8787.c2
-rw-r--r--drivers/mmc/core/pwrseq_simple.c51
-rw-r--r--drivers/mmc/core/queue.c10
-rw-r--r--drivers/mmc/core/quirks.h31
-rw-r--r--drivers/mmc/core/regulator.c111
-rw-r--r--drivers/mmc/core/sd.c115
-rw-r--r--drivers/mmc/core/sd.h4
-rw-r--r--drivers/mmc/core/sd_ops.c24
-rw-r--r--drivers/mmc/core/sd_ops.h3
-rw-r--r--drivers/mmc/core/sd_uhs2.c1304
-rw-r--r--drivers/mmc/core/sdio.c16
-rw-r--r--drivers/mmc/core/sdio_bus.c5
-rw-r--r--drivers/mmc/core/sdio_uart.c2
-rw-r--r--drivers/mmc/core/slot-gpio.c20
-rw-r--r--drivers/mmc/host/Kconfig54
-rw-r--r--drivers/mmc/host/Makefile3
-rw-r--r--drivers/mmc/host/alcor.c33
-rw-r--r--drivers/mmc/host/atmel-mci.c42
-rw-r--r--drivers/mmc/host/au1xmmc.c36
-rw-r--r--drivers/mmc/host/bcm2835.c69
-rw-r--r--drivers/mmc/host/cavium-octeon.c4
-rw-r--r--drivers/mmc/host/cavium-thunderx.c4
-rw-r--r--drivers/mmc/host/cavium.c10
-rw-r--r--drivers/mmc/host/cb710-mmc.c29
-rw-r--r--drivers/mmc/host/cqhci-crypto.c46
-rw-r--r--drivers/mmc/host/cqhci.h8
-rw-r--r--drivers/mmc/host/davinci_mmc.c63
-rw-r--r--drivers/mmc/host/dw_mmc-bluefield.c2
-rw-r--r--drivers/mmc/host/dw_mmc-exynos.c56
-rw-r--r--drivers/mmc/host/dw_mmc-hi3798cv200.c2
-rw-r--r--drivers/mmc/host/dw_mmc-hi3798mv200.c10
-rw-r--r--drivers/mmc/host/dw_mmc-k3.c11
-rw-r--r--drivers/mmc/host/dw_mmc-pci.c9
-rw-r--r--drivers/mmc/host/dw_mmc-pltfm.c2
-rw-r--r--drivers/mmc/host/dw_mmc-rockchip.c20
-rw-r--r--drivers/mmc/host/dw_mmc-starfive.c2
-rw-r--r--drivers/mmc/host/dw_mmc.c146
-rw-r--r--drivers/mmc/host/dw_mmc.h30
-rw-r--r--drivers/mmc/host/jz4740_mmc.c49
-rw-r--r--drivers/mmc/host/litex_mmc.c14
-rw-r--r--drivers/mmc/host/loongson2-mmc.c1030
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c4
-rw-r--r--drivers/mmc/host/meson-mx-sdhc-clkc.c4
-rw-r--r--drivers/mmc/host/meson-mx-sdhc-mmc.c15
-rw-r--r--drivers/mmc/host/meson-mx-sdio.c9
-rw-r--r--drivers/mmc/host/mmc_spi.c17
-rw-r--r--drivers/mmc/host/mmci.c41
-rw-r--r--drivers/mmc/host/mmci.h2
-rw-r--r--drivers/mmc/host/moxart-mmc.c42
-rw-r--r--drivers/mmc/host/mtk-sd.c556
-rw-r--r--drivers/mmc/host/mvsdio.c34
-rw-r--r--drivers/mmc/host/mxcmmc.c47
-rw-r--r--drivers/mmc/host/mxs-mmc.c39
-rw-r--r--drivers/mmc/host/omap.c25
-rw-r--r--drivers/mmc/host/omap_hsmmc.c35
-rw-r--r--drivers/mmc/host/owl-mmc.c39
-rw-r--r--drivers/mmc/host/pxamci.c44
-rw-r--r--drivers/mmc/host/renesas_sdhi.h2
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c207
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c5
-rw-r--r--drivers/mmc/host/renesas_sdhi_sys_dmac.c2
-rw-r--r--drivers/mmc/host/rtsx_pci_sdmmc.c9
-rw-r--r--drivers/mmc/host/rtsx_usb_sdmmc.c87
-rw-r--r--drivers/mmc/host/sdhci-acpi.c51
-rw-r--r--drivers/mmc/host/sdhci-bcm-kona.c4
-rw-r--r--drivers/mmc/host/sdhci-brcmstb.c21
-rw-r--r--drivers/mmc/host/sdhci-cadence.c106
-rw-r--r--drivers/mmc/host/sdhci-dove.c14
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c356
-rw-r--r--drivers/mmc/host/sdhci-esdhc-mcf.c27
-rw-r--r--drivers/mmc/host/sdhci-iproc.c20
-rw-r--r--drivers/mmc/host/sdhci-milbeaut.c21
-rw-r--r--drivers/mmc/host/sdhci-msm.c222
-rw-r--r--drivers/mmc/host/sdhci-npcm.c17
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c87
-rw-r--r--drivers/mmc/host/sdhci-of-aspeed.c14
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c52
-rw-r--r--drivers/mmc/host/sdhci-of-dwcmshc.c141
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c21
-rw-r--r--drivers/mmc/host/sdhci-of-hlwd.c2
-rw-r--r--drivers/mmc/host/sdhci-of-k1.c308
-rw-r--r--drivers/mmc/host/sdhci-of-ma35d1.c25
-rw-r--r--drivers/mmc/host/sdhci-of-sparx5.c26
-rw-r--r--drivers/mmc/host/sdhci-omap.c49
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c138
-rw-r--r--drivers/mmc/host/sdhci-pci-gli.c574
-rw-r--r--drivers/mmc/host/sdhci-pci.h4
-rw-r--r--drivers/mmc/host/sdhci-pic32.c11
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c16
-rw-r--r--drivers/mmc/host/sdhci-pltfm.h1
-rw-r--r--drivers/mmc/host/sdhci-pxav2.c28
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c72
-rw-r--r--drivers/mmc/host/sdhci-s3c.c39
-rw-r--r--drivers/mmc/host/sdhci-spear.c19
-rw-r--r--drivers/mmc/host/sdhci-sprd.c46
-rw-r--r--drivers/mmc/host/sdhci-st.c14
-rw-r--r--drivers/mmc/host/sdhci-tegra.c25
-rw-r--r--drivers/mmc/host/sdhci-uhs2.c1251
-rw-r--r--drivers/mmc/host/sdhci-uhs2.h188
-rw-r--r--drivers/mmc/host/sdhci-xenon.c39
-rw-r--r--drivers/mmc/host/sdhci.c316
-rw-r--r--drivers/mmc/host/sdhci.h105
-rw-r--r--drivers/mmc/host/sdhci_am654.c104
-rw-r--r--drivers/mmc/host/sdhci_f_sdh30.c15
-rw-r--r--drivers/mmc/host/sdricoh_cs.c10
-rw-r--r--drivers/mmc/host/sh_mmcif.c37
-rw-r--r--drivers/mmc/host/sunplus-mmc.c4
-rw-r--r--drivers/mmc/host/sunxi-mmc.c41
-rw-r--r--drivers/mmc/host/tifm_sd.c15
-rw-r--r--drivers/mmc/host/tmio_mmc.h27
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c64
-rw-r--r--drivers/mmc/host/toshsd.c12
-rw-r--r--drivers/mmc/host/uniphier-sd.c10
-rw-r--r--drivers/mmc/host/usdhi6rol0.c36
-rw-r--r--drivers/mmc/host/ushc.c4
-rw-r--r--drivers/mmc/host/via-sdmmc.c23
-rw-r--r--drivers/mmc/host/vub300.c30
-rw-r--r--drivers/mmc/host/wbsd.c10
-rw-r--r--drivers/mmc/host/wmt-sdmmc.c26
-rw-r--r--drivers/most/core.c2
-rw-r--r--drivers/most/most_usb.c6
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c4
-rw-r--r--drivers/mtd/chips/cfi_probe.c2
-rw-r--r--drivers/mtd/chips/jedec_probe.c4
-rw-r--r--drivers/mtd/devices/Kconfig13
-rw-r--r--drivers/mtd/devices/Makefile1
-rw-r--r--drivers/mtd/devices/bcm47xxsflash.c2
-rw-r--r--drivers/mtd/devices/docg3.c2
-rw-r--r--drivers/mtd/devices/mchp48l640.c37
-rw-r--r--drivers/mtd/devices/mtd_intel_dg.c830
-rw-r--r--drivers/mtd/devices/phram.c15
-rw-r--r--drivers/mtd/devices/powernv_flash.c2
-rw-r--r--drivers/mtd/devices/spear_smi.c2
-rw-r--r--drivers/mtd/devices/st_spi_fsm.c8
-rw-r--r--drivers/mtd/ftl.c4
-rw-r--r--drivers/mtd/hyperbus/hbmc-am654.c24
-rw-r--r--drivers/mtd/hyperbus/rpc-if.c9
-rw-r--r--drivers/mtd/inftlcore.c9
-rw-r--r--drivers/mtd/lpddr/lpddr2_nvm.c2
-rw-r--r--drivers/mtd/lpddr/lpddr_cmds.c10
-rw-r--r--drivers/mtd/lpddr/qinfo_probe.c4
-rw-r--r--drivers/mtd/maps/lantiq-flash.c2
-rw-r--r--drivers/mtd/maps/physmap-core.c2
-rw-r--r--drivers/mtd/maps/plat-ram.c2
-rw-r--r--drivers/mtd/maps/pxa2xx-flash.c2
-rw-r--r--drivers/mtd/maps/sa1100-flash.c2
-rw-r--r--drivers/mtd/maps/sun_uflash.c2
-rw-r--r--drivers/mtd/mtd_blkdevs.c11
-rw-r--r--drivers/mtd/mtdcore.c75
-rw-r--r--drivers/mtd/mtdoops.c5
-rw-r--r--drivers/mtd/mtdpart.c3
-rw-r--r--drivers/mtd/mtdpstore.c12
-rw-r--r--drivers/mtd/mtdswap.c4
-rw-r--r--drivers/mtd/nand/Kconfig8
-rw-r--r--drivers/mtd/nand/Makefile4
-rw-r--r--drivers/mtd/nand/core.c131
-rw-r--r--drivers/mtd/nand/ecc-mxic.c24
-rw-r--r--drivers/mtd/nand/ecc-realtek.c464
-rw-r--r--drivers/mtd/nand/ecc-sw-bch.c2
-rw-r--r--drivers/mtd/nand/ecc-sw-hamming.c2
-rw-r--r--drivers/mtd/nand/ecc.c2
-rw-r--r--drivers/mtd/nand/onenand/generic.c2
-rw-r--r--drivers/mtd/nand/onenand/onenand_base.c1
-rw-r--r--drivers/mtd/nand/onenand/onenand_omap2.c3
-rw-r--r--drivers/mtd/nand/onenand/onenand_samsung.c2
-rw-r--r--drivers/mtd/nand/qpic_common.c779
-rw-r--r--drivers/mtd/nand/raw/Kconfig47
-rw-r--r--drivers/mtd/nand/raw/Makefile3
-rw-r--r--drivers/mtd/nand/raw/ams-delta.c2
-rw-r--r--drivers/mtd/nand/raw/arasan-nand-controller.c13
-rw-r--r--drivers/mtd/nand/raw/atmel/nand-controller.c37
-rw-r--r--drivers/mtd/nand/raw/atmel/pmecc.c19
-rw-r--r--drivers/mtd/nand/raw/atmel/pmecc.h2
-rw-r--r--drivers/mtd/nand/raw/au1550nd.c2
-rw-r--r--drivers/mtd/nand/raw/bcm47xxnflash/main.c2
-rw-r--r--drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c5
-rw-r--r--drivers/mtd/nand/raw/brcmnand/bcm6368_nand.c2
-rw-r--r--drivers/mtd/nand/raw/brcmnand/bcma_nand.c2
-rw-r--r--drivers/mtd/nand/raw/brcmnand/bcmbca_nand.c2
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmnand.c319
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmstb_nand.c2
-rw-r--r--drivers/mtd/nand/raw/brcmnand/iproc_nand.c2
-rw-r--r--drivers/mtd/nand/raw/cadence-nand-controller.c48
-rw-r--r--drivers/mtd/nand/raw/cs553x_nand.c8
-rw-r--r--drivers/mtd/nand/raw/davinci_nand.c197
-rw-r--r--drivers/mtd/nand/raw/denali_dt.c2
-rw-r--r--drivers/mtd/nand/raw/denali_pci.c13
-rw-r--r--drivers/mtd/nand/raw/diskonchip.c2
-rw-r--r--drivers/mtd/nand/raw/fsl_elbc_nand.c2
-rw-r--r--drivers/mtd/nand/raw/fsl_ifc_nand.c2
-rw-r--r--drivers/mtd/nand/raw/fsl_upm.c2
-rw-r--r--drivers/mtd/nand/raw/fsmc_nand.c10
-rw-r--r--drivers/mtd/nand/raw/gpio.c2
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c82
-rw-r--r--drivers/mtd/nand/raw/hisi504_nand.c2
-rw-r--r--drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c2
-rw-r--r--drivers/mtd/nand/raw/intel-nand-controller.c2
-rw-r--r--drivers/mtd/nand/raw/loongson-nand-controller.c1024
-rw-r--r--drivers/mtd/nand/raw/lpc32xx_mlc.c2
-rw-r--r--drivers/mtd/nand/raw/lpc32xx_slc.c2
-rw-r--r--drivers/mtd/nand/raw/marvell_nand.c2
-rw-r--r--drivers/mtd/nand/raw/meson_nand.c2
-rw-r--r--drivers/mtd/nand/raw/mpc5121_nfc.c2
-rw-r--r--drivers/mtd/nand/raw/mtk_nand.c2
-rw-r--r--drivers/mtd/nand/raw/mxc_nand.c2
-rw-r--r--drivers/mtd/nand/raw/mxic_nand.c2
-rw-r--r--drivers/mtd/nand/raw/nand_base.c135
-rw-r--r--drivers/mtd/nand/raw/nand_hynix.c4
-rw-r--r--drivers/mtd/nand/raw/nand_macronix.c2
-rw-r--r--drivers/mtd/nand/raw/nandsim.c7
-rw-r--r--drivers/mtd/nand/raw/ndfc.c2
-rw-r--r--drivers/mtd/nand/raw/nuvoton-ma35d1-nand-controller.c1029
-rw-r--r--drivers/mtd/nand/raw/omap2.c45
-rw-r--r--drivers/mtd/nand/raw/omap_elm.c2
-rw-r--r--drivers/mtd/nand/raw/orion_nand.c2
-rw-r--r--drivers/mtd/nand/raw/pasemi_nand.c2
-rw-r--r--drivers/mtd/nand/raw/pl35x-nand-controller.c7
-rw-r--r--drivers/mtd/nand/raw/plat_nand.c2
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c1811
-rw-r--r--drivers/mtd/nand/raw/r852.c7
-rw-r--r--drivers/mtd/nand/raw/renesas-nand-controller.c8
-rw-r--r--drivers/mtd/nand/raw/rockchip-nand-controller.c18
-rw-r--r--drivers/mtd/nand/raw/s3c2410.c1230
-rw-r--r--drivers/mtd/nand/raw/sh_flctl.c2
-rw-r--r--drivers/mtd/nand/raw/sharpsl.c2
-rw-r--r--drivers/mtd/nand/raw/sm_common.c4
-rw-r--r--drivers/mtd/nand/raw/socrates_nand.c2
-rw-r--r--drivers/mtd/nand/raw/stm32_fmc2_nand.c49
-rw-r--r--drivers/mtd/nand/raw/sunxi_nand.c5
-rw-r--r--drivers/mtd/nand/raw/technologic-nand-controller.c2
-rw-r--r--drivers/mtd/nand/raw/tegra_nand.c2
-rw-r--r--drivers/mtd/nand/raw/txx9ndfmc.c2
-rw-r--r--drivers/mtd/nand/raw/vf610_nfc.c2
-rw-r--r--drivers/mtd/nand/raw/xway_nand.c2
-rw-r--r--drivers/mtd/nand/spi/Makefile5
-rw-r--r--drivers/mtd/nand/spi/alliancememory.c20
-rw-r--r--drivers/mtd/nand/spi/ato.c14
-rw-r--r--drivers/mtd/nand/spi/core.c248
-rw-r--r--drivers/mtd/nand/spi/esmt.c108
-rw-r--r--drivers/mtd/nand/spi/fmsh.c74
-rw-r--r--drivers/mtd/nand/spi/foresee.c26
-rw-r--r--drivers/mtd/nand/spi/gigadevice.c183
-rw-r--r--drivers/mtd/nand/spi/macronix.c95
-rw-r--r--drivers/mtd/nand/spi/micron.c169
-rw-r--r--drivers/mtd/nand/spi/otp.c362
-rw-r--r--drivers/mtd/nand/spi/paragon.c20
-rw-r--r--drivers/mtd/nand/spi/skyhigh.c147
-rw-r--r--drivers/mtd/nand/spi/toshiba.c22
-rw-r--r--drivers/mtd/nand/spi/winbond.c372
-rw-r--r--drivers/mtd/nand/spi/xtx.c20
-rw-r--r--drivers/mtd/nftlcore.c43
-rw-r--r--drivers/mtd/rfd_ftl.c4
-rw-r--r--drivers/mtd/sm_ftl.c6
-rw-r--r--drivers/mtd/spi-nor/atmel.c4
-rw-r--r--drivers/mtd/spi-nor/controllers/hisi-sfc.c2
-rw-r--r--drivers/mtd/spi-nor/controllers/nxp-spifi.c2
-rw-r--r--drivers/mtd/spi-nor/core.c244
-rw-r--r--drivers/mtd/spi-nor/core.h7
-rw-r--r--drivers/mtd/spi-nor/macronix.c206
-rw-r--r--drivers/mtd/spi-nor/micron-st.c8
-rw-r--r--drivers/mtd/spi-nor/otp.c1
-rw-r--r--drivers/mtd/spi-nor/sfdp.c4
-rw-r--r--drivers/mtd/spi-nor/sfdp.h1
-rw-r--r--drivers/mtd/spi-nor/spansion.c46
-rw-r--r--drivers/mtd/spi-nor/sst.c2
-rw-r--r--drivers/mtd/spi-nor/swp.c20
-rw-r--r--drivers/mtd/spi-nor/sysfs.c8
-rw-r--r--drivers/mtd/spi-nor/winbond.c89
-rw-r--r--drivers/mtd/tests/oobtest.c2
-rw-r--r--drivers/mtd/tests/pagetest.c2
-rw-r--r--drivers/mtd/tests/subpagetest.c2
-rw-r--r--drivers/mtd/ubi/attach.c12
-rw-r--r--drivers/mtd/ubi/block.c8
-rw-r--r--drivers/mtd/ubi/build.c2
-rw-r--r--drivers/mtd/ubi/cdev.c70
-rw-r--r--drivers/mtd/ubi/fastmap-wl.c19
-rw-r--r--drivers/mtd/ubi/kapi.c27
-rw-r--r--drivers/mtd/ubi/nvmem.c2
-rw-r--r--drivers/mtd/ubi/ubi.h1
-rw-r--r--drivers/mtd/ubi/vmt.c2
-rw-r--r--drivers/mtd/ubi/wl.c11
-rw-r--r--drivers/mtd/ubi/wl.h3
-rw-r--r--drivers/mux/Kconfig1
-rw-r--r--drivers/mux/adg792a.c2
-rw-r--r--drivers/mux/adgs1408.c4
-rw-r--r--drivers/mux/core.c9
-rw-r--r--drivers/mux/gpio.c9
-rw-r--r--drivers/mux/mmio.c15
-rw-r--r--drivers/net/Kconfig55
-rw-r--r--drivers/net/Makefile2
-rw-r--r--drivers/net/Space.c3
-rw-r--r--drivers/net/amt.c42
-rw-r--r--drivers/net/arcnet/arcnet.c4
-rw-r--r--drivers/net/arcnet/com20020-pci.c17
-rw-r--r--drivers/net/bareudp.c52
-rw-r--r--drivers/net/bonding/bond_3ad.c122
-rw-r--r--drivers/net/bonding/bond_alb.c8
-rw-r--r--drivers/net/bonding/bond_debugfs.c9
-rw-r--r--drivers/net/bonding/bond_main.c473
-rw-r--r--drivers/net/bonding/bond_netlink.c68
-rw-r--r--drivers/net/bonding/bond_options.c221
-rw-r--r--drivers/net/bonding/bond_sysfs.c6
-rw-r--r--drivers/net/caif/caif_serial.c16
-rw-r--r--drivers/net/caif/caif_virtio.c2
-rw-r--r--drivers/net/can/Kconfig3
-rw-r--r--drivers/net/can/Makefile2
-rw-r--r--drivers/net/can/c_can/c_can_main.c33
-rw-r--r--drivers/net/can/c_can/c_can_platform.c56
-rw-r--r--drivers/net/can/cc770/Kconfig2
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd_base.c29
-rw-r--r--drivers/net/can/dev/calc_bittiming.c10
-rw-r--r--drivers/net/can/dev/dev.c108
-rw-r--r--drivers/net/can/dev/netlink.c610
-rw-r--r--drivers/net/can/flexcan/flexcan-core.c84
-rw-r--r--drivers/net/can/flexcan/flexcan.h6
-rw-r--r--drivers/net/can/grcan.c19
-rw-r--r--drivers/net/can/ifi_canfd/ifi_canfd.c68
-rw-r--r--drivers/net/can/janz-ican3.c2
-rw-r--r--drivers/net/can/kvaser_pciefd.c1885
-rw-r--r--drivers/net/can/kvaser_pciefd/Makefile3
-rw-r--r--drivers/net/can/kvaser_pciefd/kvaser_pciefd.h96
-rw-r--r--drivers/net/can/kvaser_pciefd/kvaser_pciefd_core.c1908
-rw-r--r--drivers/net/can/kvaser_pciefd/kvaser_pciefd_devlink.c60
-rw-r--r--drivers/net/can/m_can/m_can.c122
-rw-r--r--drivers/net/can/m_can/m_can.h2
-rw-r--r--drivers/net/can/m_can/m_can_pci.c1
-rw-r--r--drivers/net/can/m_can/tcan4x5x-core.c100
-rw-r--r--drivers/net/can/m_can/tcan4x5x.h2
-rw-r--r--drivers/net/can/peak_canfd/peak_canfd.c10
-rw-r--r--drivers/net/can/peak_canfd/peak_canfd_user.h4
-rw-r--r--drivers/net/can/peak_canfd/peak_pciefd_main.c6
-rw-r--r--drivers/net/can/rcar/rcar_can.c309
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c662
-rw-r--r--drivers/net/can/rockchip/Kconfig3
-rw-r--r--drivers/net/can/rockchip/rockchip_canfd-core.c20
-rw-r--r--drivers/net/can/rockchip/rockchip_canfd-timestamp.c4
-rw-r--r--drivers/net/can/sja1000/Kconfig4
-rw-r--r--drivers/net/can/sja1000/peak_pci.c6
-rw-r--r--drivers/net/can/sja1000/peak_pcmcia.c12
-rw-r--r--drivers/net/can/sja1000/sja1000.c67
-rw-r--r--drivers/net/can/sja1000/sja1000_platform.c15
-rw-r--r--drivers/net/can/slcan/slcan-core.c26
-rw-r--r--drivers/net/can/spi/hi311x.c90
-rw-r--r--drivers/net/can/spi/mcp251x.c36
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c46
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c16
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c39
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c2
-rw-r--r--drivers/net/can/sun4i_can.c25
-rw-r--r--drivers/net/can/ti_hecc.c2
-rw-r--r--drivers/net/can/usb/Kconfig12
-rw-r--r--drivers/net/can/usb/Makefile1
-rw-r--r--drivers/net/can/usb/ems_usb.c58
-rw-r--r--drivers/net/can/usb/esd_usb.c70
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_core.c7
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_devlink.c6
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_fd.c8
-rw-r--r--drivers/net/can/usb/f81604.c10
-rw-r--r--drivers/net/can/usb/gs_usb.c38
-rw-r--r--drivers/net/can/usb/kvaser_usb/Makefile2
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb.h35
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c148
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_devlink.c87
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c198
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c113
-rw-r--r--drivers/net/can/usb/mcba_usb.c1
-rw-r--r--drivers/net/can/usb/nct6694_canfd.c832
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb.c8
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c14
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.h4
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_fd.c20
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c4
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.h4
-rw-r--r--drivers/net/can/usb/ucan.c43
-rw-r--r--drivers/net/can/vcan.c2
-rw-r--r--drivers/net/can/vxcan.c29
-rw-r--r--drivers/net/can/xilinx_can.c34
-rw-r--r--drivers/net/dsa/Kconfig19
-rw-r--r--drivers/net/dsa/Makefile6
-rw-r--r--drivers/net/dsa/b53/Kconfig1
-rw-r--r--drivers/net/dsa/b53/b53_common.c679
-rw-r--r--drivers/net/dsa/b53/b53_mdio.c1
-rw-r--r--drivers/net/dsa/b53/b53_mmap.c136
-rw-r--r--drivers/net/dsa/b53/b53_priv.h71
-rw-r--r--drivers/net/dsa/b53/b53_regs.h48
-rw-r--r--drivers/net/dsa/b53/b53_serdes.c5
-rw-r--r--drivers/net/dsa/b53/b53_srab.c2
-rw-r--r--drivers/net/dsa/bcm_sf2.c19
-rw-r--r--drivers/net/dsa/bcm_sf2.h5
-rw-r--r--drivers/net/dsa/bcm_sf2_cfp.c22
-rw-r--r--drivers/net/dsa/dsa_loop.c82
-rw-r--r--drivers/net/dsa/dsa_loop.h20
-rw-r--r--drivers/net/dsa/dsa_loop_bdinfo.c36
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek.c30
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek.h2
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c24
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h5
-rw-r--r--drivers/net/dsa/ks8995.c857
-rw-r--r--drivers/net/dsa/lantiq/Kconfig7
-rw-r--r--drivers/net/dsa/lantiq/Makefile1
-rw-r--r--drivers/net/dsa/lantiq/lantiq_gswip.c (renamed from drivers/net/dsa/lantiq_gswip.c)492
-rw-r--r--drivers/net/dsa/lantiq/lantiq_gswip.h276
-rw-r--r--drivers/net/dsa/lantiq/lantiq_pce.h (renamed from drivers/net/dsa/lantiq_pce.h)9
-rw-r--r--drivers/net/dsa/microchip/Kconfig1
-rw-r--r--drivers/net/dsa/microchip/ksz8.c218
-rw-r--r--drivers/net/dsa/microchip/ksz8.h4
-rw-r--r--drivers/net/dsa/microchip/ksz8_reg.h53
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c243
-rw-r--r--drivers/net/dsa/microchip/ksz9477.h4
-rw-r--r--drivers/net/dsa/microchip/ksz9477_i2c.c18
-rw-r--r--drivers/net/dsa/microchip/ksz9477_reg.h4
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c948
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h144
-rw-r--r--drivers/net/dsa/microchip/ksz_dcb.c241
-rw-r--r--drivers/net/dsa/microchip/ksz_ptp.c32
-rw-r--r--drivers/net/dsa/microchip/ksz_ptp.h7
-rw-r--r--drivers/net/dsa/microchip/ksz_spi.c115
-rw-r--r--drivers/net/dsa/microchip/lan937x.h2
-rw-r--r--drivers/net/dsa/microchip/lan937x_main.c288
-rw-r--r--drivers/net/dsa/microchip/lan937x_reg.h13
-rw-r--r--drivers/net/dsa/mt7530-mdio.c21
-rw-r--r--drivers/net/dsa/mt7530-mmio.c24
-rw-r--r--drivers/net/dsa/mt7530.c661
-rw-r--r--drivers/net/dsa/mt7530.h80
-rw-r--r--drivers/net/dsa/mv88e6xxx/Kconfig10
-rw-r--r--drivers/net/dsa/mv88e6xxx/Makefile1
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c339
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h29
-rw-r--r--drivers/net/dsa/mv88e6xxx/devlink.c43
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_vtu.c3
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.c4
-rw-r--r--drivers/net/dsa/mv88e6xxx/hwtstamp.c26
-rw-r--r--drivers/net/dsa/mv88e6xxx/hwtstamp.h17
-rw-r--r--drivers/net/dsa/mv88e6xxx/leds.c848
-rw-r--r--drivers/net/dsa/mv88e6xxx/pcs-6185.c2
-rw-r--r--drivers/net/dsa/mv88e6xxx/pcs-6352.c2
-rw-r--r--drivers/net/dsa/mv88e6xxx/pcs-639x.c12
-rw-r--r--drivers/net/dsa/mv88e6xxx/phy.c9
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c4
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h133
-rw-r--r--drivers/net/dsa/mv88e6xxx/ptp.c87
-rw-r--r--drivers/net/dsa/mv88e6xxx/ptp.h133
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.c14
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.h8
-rw-r--r--drivers/net/dsa/ocelot/felix.c24
-rw-r--r--drivers/net/dsa/ocelot/felix.h3
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c25
-rw-r--r--drivers/net/dsa/ocelot/ocelot_ext.c4
-rw-r--r--drivers/net/dsa/ocelot/seville_vsc9953.c2
-rw-r--r--drivers/net/dsa/qca/ar9331.c4
-rw-r--r--drivers/net/dsa/qca/qca8k-8xxx.c13
-rw-r--r--drivers/net/dsa/qca/qca8k-common.c7
-rw-r--r--drivers/net/dsa/qca/qca8k.h3
-rw-r--r--drivers/net/dsa/realtek/Kconfig6
-rw-r--r--drivers/net/dsa/realtek/Makefile3
-rw-r--r--drivers/net/dsa/realtek/realtek-mdio.c8
-rw-r--r--drivers/net/dsa/realtek/realtek-smi.c8
-rw-r--r--drivers/net/dsa/realtek/realtek.h3
-rw-r--r--drivers/net/dsa/realtek/rtl8365mb.c8
-rw-r--r--drivers/net/dsa/realtek/rtl8366-core.c22
-rw-r--r--drivers/net/dsa/realtek/rtl8366rb-leds.c177
-rw-r--r--drivers/net/dsa/realtek/rtl8366rb.c275
-rw-r--r--drivers/net/dsa/realtek/rtl8366rb.h107
-rw-r--r--drivers/net/dsa/realtek/rtl83xx.c16
-rw-r--r--drivers/net/dsa/rzn1_a5psw.c43
-rw-r--r--drivers/net/dsa/sja1105/sja1105.h2
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ethtool.c10
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c91
-rw-r--r--drivers/net/dsa/sja1105/sja1105_mdio.c34
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.c66
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.h7
-rw-r--r--drivers/net/dsa/sja1105/sja1105_static_config.c14
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-core.c8
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-platform.c2
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x.c6
-rw-r--r--drivers/net/dummy.c19
-rw-r--r--drivers/net/eql.c4
-rw-r--r--drivers/net/ethernet/3com/3c515.c4
-rw-r--r--drivers/net/ethernet/3com/3c574_cs.c4
-rw-r--r--drivers/net/ethernet/3com/3c589_cs.c4
-rw-r--r--drivers/net/ethernet/3com/3c59x.c6
-rw-r--r--drivers/net/ethernet/8390/ax88796.c2
-rw-r--r--drivers/net/ethernet/8390/axnet_cs.c4
-rw-r--r--drivers/net/ethernet/8390/mcf8390.c2
-rw-r--r--drivers/net/ethernet/8390/ne.c2
-rw-r--r--drivers/net/ethernet/8390/pcnet_cs.c4
-rw-r--r--drivers/net/ethernet/Kconfig4
-rw-r--r--drivers/net/ethernet/Makefile2
-rw-r--r--drivers/net/ethernet/actions/owl-emac.c9
-rw-r--r--drivers/net/ethernet/adi/adin1110.c2
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c2
-rw-r--r--drivers/net/ethernet/agere/et131x.c41
-rw-r--r--drivers/net/ethernet/airoha/Kconfig34
-rw-r--r--drivers/net/ethernet/airoha/Makefile9
-rw-r--r--drivers/net/ethernet/airoha/airoha_eth.c3050
-rw-r--r--drivers/net/ethernet/airoha/airoha_eth.h643
-rw-r--r--drivers/net/ethernet/airoha/airoha_npu.c742
-rw-r--r--drivers/net/ethernet/airoha/airoha_ppe.c1480
-rw-r--r--drivers/net/ethernet/airoha/airoha_ppe_debugfs.c186
-rw-r--r--drivers/net/ethernet/airoha/airoha_regs.h936
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c2
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c2
-rw-r--r--drivers/net/ethernet/amazon/Kconfig2
-rw-r--r--drivers/net/ethernet/amazon/ena/Makefile2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_admin_defs.h76
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c319
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.h116
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_debugfs.c62
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_debugfs.h27
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_devlink.c210
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_devlink.h21
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c74
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c162
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h14
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_phc.c233
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_phc.h37
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_regs_defs.h8
-rw-r--r--drivers/net/ethernet/amd/a2065.c4
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c7
-rw-r--r--drivers/net/ethernet/amd/amd8111e.h1
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c4
-rw-r--r--drivers/net/ethernet/amd/declance.c4
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c15
-rw-r--r--drivers/net/ethernet/amd/pds_core/adminq.c40
-rw-r--r--drivers/net/ethernet/amd/pds_core/auxbus.c46
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.c17
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.h12
-rw-r--r--drivers/net/ethernet/amd/pds_core/debugfs.c5
-rw-r--r--drivers/net/ethernet/amd/pds_core/devlink.c13
-rw-r--r--drivers/net/ethernet/amd/pds_core/main.c29
-rw-r--r--drivers/net/ethernet/amd/sunlance.c6
-rw-r--r--drivers/net/ethernet/amd/xgbe/Makefile2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h164
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dcb.c117
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c136
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-desc.c126
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c434
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c357
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c169
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-hwtstamp.c401
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-i2c.c119
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c119
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c130
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-pci.c206
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c117
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c162
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-platform.c119
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-pps.c74
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ptp.c218
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-smn.h30
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h202
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/main.c6
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/mdio.c18
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c16
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c10
-rw-r--r--drivers/net/ethernet/apple/bmac.c68
-rw-r--r--drivers/net/ethernet/apple/mace.c6
-rw-r--r--drivers/net/ethernet/apple/macmace.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c14
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c73
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h5
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c14
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c12
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c132
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c43
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h21
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h32
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c39
-rw-r--r--drivers/net/ethernet/arc/emac_main.c27
-rw-r--r--drivers/net/ethernet/arc/emac_mdio.c9
-rw-r--r--drivers/net/ethernet/arc/emac_rockchip.c2
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c50
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c8
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c6
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c85
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c15
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig19
-rw-r--r--drivers/net/ethernet/broadcom/Makefile1
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp.c178
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp.h81
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c83
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c47
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_intf_defs.h3
-rw-r--r--drivers/net/ethernet/broadcom/b44.c8
-rw-r--r--drivers/net/ethernet/broadcom/bcm4908_enet.c2
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c26
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c69
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h23
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-platform.c7
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c5
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnge/Makefile12
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge.h245
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_core.c404
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_db.h34
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_devlink.c306
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_devlink.h18
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_ethtool.c33
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_ethtool.h9
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_hwrm.c508
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_hwrm.h110
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.c1185
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.h58
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_netdev.c2485
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_netdev.h454
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_resc.c605
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_resc.h96
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_rmem.c499
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_rmem.h202
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c105
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c11
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c2123
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h132
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c208
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h45
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c117
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c579
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h10720
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hwmon.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c204
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h54
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c86
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c96
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h15
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c20
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c44
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h3
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c3
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c1390
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h84
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c105
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c15
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c2
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c267
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h4
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c26
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c35
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.h1
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_debugfs.c31
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c2
-rw-r--r--drivers/net/ethernet/cadence/macb.h205
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c814
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c2
-rw-r--r--drivers/net/ethernet/cavium/Kconfig2
-rw-r--r--drivers/net/ethernet/cavium/common/cavium_ptp.c9
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c284
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h5
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_core.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c8
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c3
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.c16
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.h7
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_main.h2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_nic.h4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/request_manager.c4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/response_manager.c3
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c37
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c12
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c8
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c37
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/pm3393.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.c10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c39
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.c37
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h34
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c106
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c67
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_mps.c98
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c19
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c31
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/srq.c58
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/srq.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c2
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c34
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c2
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h1
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c20
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h7
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c9
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c5
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c9
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c2
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c2
-rw-r--r--drivers/net/ethernet/cirrus/mac89x0.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/Kconfig1
-rw-r--r--drivers/net/ethernet/cisco/enic/Makefile2
-rw-r--r--drivers/net/ethernet/cisco/enic/cq_desc.h25
-rw-r--r--drivers/net/ethernet/cisco/enic/cq_enet_desc.h142
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h80
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_clsf.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_clsf.h2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c67
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c782
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_res.c129
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_res.h11
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_rq.c436
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_rq.h8
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_wq.c117
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_wq.h7
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_cq.h45
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_devcmd.h19
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_enet.h5
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rq.h4
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_wq.h2
-rw-r--r--drivers/net/ethernet/cortina/gemini.c47
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c5
-rw-r--r--drivers/net/ethernet/dec/tulip/21142.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c10
-rw-r--r--drivers/net/ethernet/dec/tulip/dmfe.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/interrupt.c6
-rw-r--r--drivers/net/ethernet/dec/tulip/pnic.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/pnic2.c8
-rw-r--r--drivers/net/ethernet/dec/tulip/timer.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c17
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c8
-rw-r--r--drivers/net/ethernet/dec/tulip/xircom_cb.c4
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c88
-rw-r--r--drivers/net/ethernet/dlink/dl2k.h22
-rw-r--r--drivers/net/ethernet/dlink/sundance.c39
-rw-r--r--drivers/net/ethernet/dnet.c2
-rw-r--r--drivers/net/ethernet/ec_bhf.c3
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c205
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c56
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c13
-rw-r--r--drivers/net/ethernet/engleder/tsnep_main.c85
-rw-r--r--drivers/net/ethernet/ethoc.c2
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c2
-rw-r--r--drivers/net/ethernet/faraday/Kconfig1
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c69
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c2
-rw-r--r--drivers/net/ethernet/fealnx.c8
-rw-r--r--drivers/net/ethernet/freescale/Kconfig4
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c93
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h2
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c88
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c84
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c51
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c9
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c9
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c20
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/Kconfig57
-rw-r--r--drivers/net/ethernet/freescale/enetc/Makefile13
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c1019
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h156
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc4_debugfs.c90
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc4_debugfs.h20
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc4_hw.h196
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc4_pf.c1087
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_cbdr.c50
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c343
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h97
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c31
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c472
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.h35
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf_common.c426
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf_common.h22
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ptp.c5
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_qos.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_vf.c36
-rw-r--r--drivers/net/ethernet/freescale/enetc/netc_blk_ctrl.c445
-rw-r--r--drivers/net/ethernet/freescale/enetc/ntmp.c457
-rw-r--r--drivers/net/ethernet/freescale/enetc/ntmp_private.h104
-rw-r--r--drivers/net/ethernet/freescale/fec.h28
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c412
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c4
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx_phy.c4
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c58
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c36
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.h3
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.c6
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c3
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.c2
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_tgec.c1
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c51
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.h2
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c2
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c4
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-fec.c2
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c8
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c93
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c36
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c632
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.h24
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth_ethtool.c95
-rw-r--r--drivers/net/ethernet/fungible/funcore/fun_queue.c65
-rw-r--r--drivers/net/ethernet/fungible/funcore/fun_queue.h1
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_ethtool.c3
-rw-r--r--drivers/net/ethernet/google/Kconfig2
-rw-r--r--drivers/net/ethernet/google/gve/Makefile5
-rw-r--r--drivers/net/ethernet/google/gve/gve.h222
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.c189
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.h31
-rw-r--r--drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c344
-rw-r--r--drivers/net/ethernet/google/gve/gve_desc_dqo.h3
-rw-r--r--drivers/net/ethernet/google/gve/gve_dqo.h3
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c134
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c940
-rw-r--r--drivers/net/ethernet/google/gve/gve_ptp.c139
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx.c44
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c636
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx.c91
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx_dqo.c432
-rw-r--r--drivers/net/ethernet/google/gve/gve_utils.c5
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig18
-rw-r--r--drivers/net/ethernet/hisilicon/Makefile1
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/Makefile9
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h285
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c168
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.h12
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.c349
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.h11
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c193
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_err.h14
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c500
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.h16
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c396
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.h63
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c144
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.h11
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c511
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c305
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.h14
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h298
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c586
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.h44
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hisi_femac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c20
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c13
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c181
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h30
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c67
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c31
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c71
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c71
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.c20
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h27
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c11
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c1086
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h16
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c168
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c200
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c1370
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c202
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c30
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c90
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c36
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c4
-rw-r--r--drivers/net/ethernet/huawei/Kconfig1
-rw-r--r--drivers/net/ethernet/huawei/Makefile1
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_devlink.c10
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_ethtool.c47
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_port.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic3/Kconfig20
-rw-r--r--drivers/net/ethernet/huawei/hinic3/Makefile25
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.c915
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.h156
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_common.c76
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_common.h54
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_csr.h79
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_eqs.c776
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_eqs.h122
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.c236
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.h57
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.c426
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.h47
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h264
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c557
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.h81
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hwif.c436
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hwif.h90
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_irq.c194
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_lld.c421
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_lld.h21
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_main.c409
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c860
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mbox.h141
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.c21
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h15
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h224
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c496
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c385
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h61
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h93
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c885
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h145
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_pci_id_tbl.h9
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.c68
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.h54
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_rss.c336
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_rss.h14
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_rx.c551
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_rx.h104
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_tx.c779
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_tx.h147
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_wq.c138
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_wq.h87
-rw-r--r--drivers/net/ethernet/i825xx/sni_82596.c2
-rw-r--r--drivers/net/ethernet/ibm/Kconfig13
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c2
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c51
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c90
-rw-r--r--drivers/net/ethernet/ibm/emac/rgmii.c49
-rw-r--r--drivers/net/ethernet/ibm/emac/tah.c49
-rw-r--r--drivers/net/ethernet/ibm/emac/zmii.c49
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c579
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.h86
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c181
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h17
-rw-r--r--drivers/net/ethernet/intel/Kconfig13
-rw-r--r--drivers/net/ethernet/intel/Makefile2
-rw-r--r--drivers/net/ethernet/intel/e100.c6
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000.h2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c4
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c26
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h6
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h4
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c89
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c99
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h4
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c15
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c126
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.c10
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c8
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k.h3
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_common.c5
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_common.h2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c36
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c6
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c122
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.h2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_vf.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h23
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c78
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.h12
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h156
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c1229
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c23
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c172
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c169
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c593
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c18
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h57
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c45
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c43
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h47
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h38
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c202
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h14
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c34
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.h10
-rw-r--r--drivers/net/ethernet/intel/iavf/Makefile2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h71
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adminq.c62
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adminq.h12
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adminq_cmd.h83
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_common.c110
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c83
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c768
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_prototype.h6
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ptp.c485
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ptp.h47
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_trace.h6
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c450
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.h68
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_type.h273
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_types.h34
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c412
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile14
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink.c180
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink_port.c993
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink_port.h58
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/health.c551
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/health.h71
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/port.c1001
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/port.h58
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h150
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adapter.c77
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adapter.h33
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h519
-rw-r--r--drivers/net/ethernet/intel/ice/ice_arfs.c83
-rw-r--r--drivers/net/ethernet/intel/ice/ice_arfs.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c462
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_cgu_regs.h181
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c1556
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h85
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c53
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.c38
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c51
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_nl.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.c397
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_debugfs.c633
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devids.h18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dpll.c1539
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dpll.h33
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.c24
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c337
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.h38
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c24
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fdir.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c78
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.h10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.c49
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.h68
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fw_update.c52
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fwlog.c472
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fwlog.h79
-rw-r--r--drivers/net/ethernet/intel/ice/ice_gnss.c31
-rw-r--r--drivers/net/ethernet/intel/ice/ice_gnss.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h38
-rw-r--r--drivers/net/ethernet/intel/ice/ice_idc.c269
-rw-r--r--drivers/net/ethernet/intel/ice/ice_idc_int.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_irq.c275
-rw-r--r--drivers/net/ethernet/intel/ice/ice_irq.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c1012
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.h24
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h99
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c221
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c766
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c38
-rw-r--r--drivers/net/ethernet/intel/ice/ice_parser.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_parser_rt.c12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c1989
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.h180
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_consts.h270
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.c1506
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.h193
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.c12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sbq_cmd.h11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c199
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sf_eth.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c187
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.h11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c62
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.c266
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.h11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_trace.h10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tspll.c626
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tspll.h31
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c473
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h26
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c26
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.h58
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h68
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c84
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.h51
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib_private.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_mbx.c38
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_mbx.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c4200
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.h104
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c186
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c2422
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vlan_mode.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c24
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c189
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.h30
-rw-r--r--drivers/net/ethernet/intel/ice/virt/allowlist.c199
-rw-r--r--drivers/net/ethernet/intel/ice/virt/allowlist.h (renamed from drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.h)0
-rw-r--r--drivers/net/ethernet/intel/ice/virt/fdir.c2434
-rw-r--r--drivers/net/ethernet/intel/ice/virt/fdir.h (renamed from drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h)0
-rw-r--r--drivers/net/ethernet/intel/ice/virt/queues.c973
-rw-r--r--drivers/net/ethernet/intel/ice/virt/queues.h20
-rw-r--r--drivers/net/ethernet/intel/ice/virt/rss.c719
-rw-r--r--drivers/net/ethernet/intel/ice/virt/rss.h18
-rw-r--r--drivers/net/ethernet/intel/ice/virt/virtchnl.c2936
-rw-r--r--drivers/net/ethernet/intel/ice/virt/virtchnl.h140
-rw-r--r--drivers/net/ethernet/intel/idpf/Kconfig3
-rw-r--r--drivers/net/ethernet/intel/idpf/Makefile7
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf.h268
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_controlq.c43
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_controlq.h18
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_controlq_api.h5
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_dev.c77
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ethtool.c480
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_idc.c503
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lan_pf_regs.h4
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h19
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lib.c546
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_main.c63
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_mem.h8
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ptp.c1018
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ptp.h379
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c214
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c1970
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.h340
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_vf_dev.c59
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.c1751
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.h125
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c672
-rw-r--r--drivers/net/ethernet/intel/idpf/virtchnl2.h590
-rw-r--r--drivers/net/ethernet/intel/idpf/xdp.c486
-rw-r--r--drivers/net/ethernet/intel/idpf/xdp.h175
-rw-r--r--drivers/net/ethernet/intel/idpf/xsk.c633
-rw-r--r--drivers/net/ethernet/intel/idpf/xsk.h33
-rw-r--r--drivers/net/ethernet/intel/igb/Makefile2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.h1
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.c4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.h1
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h72
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c33
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c378
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c63
-rw-r--r--drivers/net/ethernet/intel/igb/igb_xsk.c562
-rw-r--r--drivers/net/ethernet/intel/igbvf/ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/igbvf/igbvf.h30
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c21
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h76
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.c6
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.h9
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h66
-rw-r--r--drivers/net/ethernet/intel/igc/igc_diag.c3
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c150
-rw-r--r--drivers/net/ethernet/intel/igc/igc_hw.h6
-rw-r--r--drivers/net/ethernet/intel/igc/igc_i225.c2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_mac.c318
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c526
-rw-r--r--drivers/net/ethernet/intel/igc/igc_nvm.c54
-rw-r--r--drivers/net/ethernet/intel/igc/igc_nvm.h2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_phy.c24
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c165
-rw-r--r--drivers/net/ethernet/intel/igc/igc_regs.h16
-rw-r--r--drivers/net/ethernet/intel/igc/igc_tsn.c363
-rw-r--r--drivers/net/ethernet/intel/igc/igc_tsn.h57
-rw-r--r--drivers/net/ethernet/intel/igc/igc_xdp.c21
-rw-r--r--drivers/net/ethernet/intel/ixgbe/Makefile6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/devlink/devlink.c558
-rw-r--r--drivers/net/ethernet/intel/ixgbe/devlink/devlink.h12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/devlink/region.c290
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h55
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c34
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c59
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c4043
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h102
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c302
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c14
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fw_update.c707
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fw_update.h12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c72
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c1028
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h18
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c11
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c59
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c72
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h140
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h1032
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c20
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c195
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.h25
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h5
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ipsec.c43
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h10
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c43
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c14
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.h4
-rw-r--r--drivers/net/ethernet/intel/libeth/Kconfig10
-rw-r--r--drivers/net/ethernet/intel/libeth/Makefile8
-rw-r--r--drivers/net/ethernet/intel/libeth/priv.h37
-rw-r--r--drivers/net/ethernet/intel/libeth/rx.c42
-rw-r--r--drivers/net/ethernet/intel/libeth/tx.c41
-rw-r--r--drivers/net/ethernet/intel/libeth/xdp.c451
-rw-r--r--drivers/net/ethernet/intel/libeth/xsk.c271
-rw-r--r--drivers/net/ethernet/intel/libie/Kconfig15
-rw-r--r--drivers/net/ethernet/intel/libie/Makefile8
-rw-r--r--drivers/net/ethernet/intel/libie/adminq.c52
-rw-r--r--drivers/net/ethernet/intel/libie/fwlog.c1115
-rw-r--r--drivers/net/ethernet/intel/libie/rx.c9
-rw-r--r--drivers/net/ethernet/korina.c7
-rw-r--r--drivers/net/ethernet/lantiq_etop.c27
-rw-r--r--drivers/net/ethernet/lantiq_xrx200.c2
-rw-r--r--drivers/net/ethernet/litex/litex_liteeth.c2
-rw-r--r--drivers/net/ethernet/marvell/Kconfig1
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c67
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c13
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c173
-rw-r--r--drivers/net/ethernet/marvell/mvneta_bm.c2
-rw-r--r--drivers/net/ethernet/marvell/mvneta_bm.h2
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2.h8
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c8
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h6
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c280
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c201
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c80
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.c86
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.h7
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c26
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.h6
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_rx.c11
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_rx.h4
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_tx.c7
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_tx.h4
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c70
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c31
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h8
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c9
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h2
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c7
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/Kconfig8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/Makefile3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c179
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.h10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h33
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cn20k/api.h32
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cn20k/mbox_init.c424
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cn20k/reg.h81
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cn20k/struct.h40
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/common.h4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.c123
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h105
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/ptp.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.c87
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.h18
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c260
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h141
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c149
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c26
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c310
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c69
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c184
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c198
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c62
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c16
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c469
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h32
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c28
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h88
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/Makefile7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c43
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c1042
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h265
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c252
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn20k.h17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c381
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h197
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c50
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c9
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c327
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c18
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c570
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h49
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c28
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c308
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h15
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c121
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c245
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.h24
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/qos.c9
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c29
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/rep.c879
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/rep.h55
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_counter.c3
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_main.c6
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_pci.c8
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c26
-rw-r--r--drivers/net/ethernet/marvell/skge.c12
-rw-r--r--drivers/net/ethernet/marvell/sky2.c13
-rw-r--r--drivers/net/ethernet/mediatek/Kconfig9
-rw-r--r--drivers/net/ethernet/mediatek/Makefile1
-rw-r--r--drivers/net/ethernet/mediatek/airoha_eth.c2793
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_path.c45
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c563
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h101
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_offload.c24
-rw-r--r--drivers/net/ethernet/mediatek/mtk_star_emac.c24
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.c62
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.h2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_mcu.c36
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_wo.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/alloc.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/catas.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c123
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile86
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c134
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c208
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c63
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dpll.c81
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h63
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs_ethtool.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/health.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c99
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/pcie_cong_event.c376
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/pcie_cong_event.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.c73
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c142
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rss.c118
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rss.h38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c93
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c43
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_hmfs.c294
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c83
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c126
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tir.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tir.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/trap.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c65
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c296
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h44
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c812
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.c952
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.h61
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.c200
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.h121
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dim.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c352
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c720
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c68
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c429
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c265
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c71
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/adj_vport.c209
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c73
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/diag/qos_tracepoint.h86
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c2136
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/vporttbl.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c289
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h101
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c351
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/events.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c510
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h97
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c673
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_pool.c195
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_pool.h55
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c165
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c71
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/hwmon.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/hwmon.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c463
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c77
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c59
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c747
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c44
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c440
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.c567
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/st.c164
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c90
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h118
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c95
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c191
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qos.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rdma.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rdma.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rl.c62
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/diag/dev_tracepoint.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h97
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c234
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c2651
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.h316
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action_ste_pool.c467
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action_ste_pool.h69
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/buddy.c149
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/buddy.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c1226
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h117
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c1101
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.h88
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c1219
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h347
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c257
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h73
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c487
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.h42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c2329
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h831
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c1641
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h120
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c427
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.h73
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/internal.h59
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c1293
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h114
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.c2604
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.h307
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.c149
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.c995
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.h73
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.c86
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.h29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.c1300
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.h361
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.c260
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.h65
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.c480
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.h40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.c2146
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.h834
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_internal.h59
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.c1216
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.h107
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.c579
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.h101
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.c640
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.h151
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_prm.h514
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.c780
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.h84
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.c1231
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.h270
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.c493
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.h68
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.c86
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c582
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.h102
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c394
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.h132
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/prm.h472
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.c846
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.h87
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c1365
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h265
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c494
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.h74
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/vport.c86
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/vport.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.h)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_action.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_arg.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_arg.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_buddy.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_buddy.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_cmd.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c)30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_dbg.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_dbg.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.h)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_definer.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_definer.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c)30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_fw.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_icm_pool.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_matcher.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ptrn.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ptrn.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_rule.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c)38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c)6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h)23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v0.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c)6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c)259
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h240
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c81
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.h168
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v3.c263
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_table.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_types.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h)1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c)45
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.h)10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5_ifc_dr.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h)40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5_ifc_dr_ste_v1.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr_ste_v1.h)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5dr.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h)4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c99
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wc.c69
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/i2c.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c77
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c306
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h25
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c72
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c71
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h48
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c66
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/txheader.h63
-rw-r--r--drivers/net/ethernet/meta/Kconfig2
-rw-r--r--drivers/net/ethernet/meta/fbnic/Makefile10
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic.h75
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_csr.c149
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_csr.h362
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_debugfs.c271
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_devlink.c507
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c1844
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.c1382
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.h218
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw_log.c123
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw_log.h45
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c574
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h123
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_hwmon.c81
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_irq.c195
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mac.c307
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mac.h40
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.c372
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.h62
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_pci.c143
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_phylink.c129
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_rpc.c649
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_rpc.h43
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_time.c303
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_tlv.c55
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_tlv.h39
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.c1594
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.h95
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c4
-rw-r--r--drivers/net/ethernet/micrel/ks8851_common.c20
-rw-r--r--drivers/net/ethernet/micrel/ks8851_par.c2
-rw-r--r--drivers/net/ethernet/micrel/ks8851_spi.c2
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c8
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.c70
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c96
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h3
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.c56
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.h11
-rw-r--r--drivers/net/ethernet/microchip/lan865x/lan865x.c30
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c10
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.h8
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c3
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_port.c4
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c63
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c1
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c21
-rw-r--r--drivers/net/ethernet/microchip/sparx5/Kconfig9
-rw-r--r--drivers/net/ethernet/microchip/sparx5/Makefile10
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.c357
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.h82
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_calendar.c191
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_fdma.c406
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_regs.c222
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_rgmii.c224
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_vcap_ag_api.c3843
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_vcap_impl.c85
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_calendar.c126
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c5
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c52
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c80
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c10
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.c368
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.h243
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h4750
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_mirror.c13
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c39
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_packet.c39
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_pgid.c15
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c16
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_police.c3
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_port.c181
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_port.h28
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_psfp.c49
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c58
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_qos.c11
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_qos.h2
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_regs.c222
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_regs.h247
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_sdlb.c25
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c45
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_tc.c8
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c9
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_ag_api.h2
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c48
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.h21
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c45
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c17
-rw-r--r--drivers/net/ethernet/microsoft/Kconfig1
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c702
-rw-r--r--drivers/net/ethernet/microsoft/mana/hw_channel.c44
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_bpf.c48
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c847
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_ethtool.c148
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c2
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c8
-rw-r--r--drivers/net/ethernet/mscc/ocelot_flower.c54
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c51
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ptp.c266
-rw-r--r--drivers/net/ethernet/mscc/ocelot_stats.c39
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vsc7514.c4
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c4
-rw-r--r--drivers/net/ethernet/natsemi/jazzsonic.c2
-rw-r--r--drivers/net/ethernet/natsemi/macsonic.c2
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c8
-rw-r--r--drivers/net/ethernet/natsemi/ns83820.c19
-rw-r--r--drivers/net/ethernet/natsemi/xtsonic.c2
-rw-r--r--drivers/net/ethernet/neterion/s2io.c10
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/cmsg.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/offload.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/ipsec.c22
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/tls.c9
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/metadata.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/dp.c22
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfdk/dp.c22
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_hwmon.c40
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c15
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c15
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c19
-rw-r--r--drivers/net/ethernet/ni/nixge.c2
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c44
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c2
-rw-r--r--drivers/net/ethernet/oa_tc6.c14
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c6
-rw-r--r--drivers/net/ethernet/packetengines/hamachi.c4
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c4
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c5
-rw-r--r--drivers/net/ethernet/pensando/Kconfig1
-rw-r--r--drivers/net/ethernet/pensando/ionic/Makefile2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic.h10
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_api.h131
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_aux.c102
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_aux.h10
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c11
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c281
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h28
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c137
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_if.h159
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c67
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h3
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c10
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_phc.c2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c14
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c14
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c10
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h31
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c33
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c21
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_devlink.c9
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h52
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.c12
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.h9
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c138
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c50
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ooo.c9
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ptp.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c8
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c56
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c3
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.c2
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c60
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c7
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c15
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c35
-rw-r--r--drivers/net/ethernet/qualcomm/Kconfig16
-rw-r--r--drivers/net/ethernet/qualcomm/Makefile1
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-sgmii.c24
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c2
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/Makefile7
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/ppe.c239
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/ppe.h39
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/ppe_config.c2034
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/ppe_config.h317
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.c847
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.h16
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/ppe_regs.h591
-rw-r--r--drivers/net/ethernet/qualcomm/qca_debug.c4
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c56
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.h3
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c9
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c4
-rw-r--r--drivers/net/ethernet/realtek/8139too.c4
-rw-r--r--drivers/net/ethernet/realtek/Kconfig5
-rw-r--r--drivers/net/ethernet/realtek/atp.c4
-rw-r--r--drivers/net/ethernet/realtek/r8169.h10
-rw-r--r--drivers/net/ethernet/realtek/r8169_firmware.c6
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c938
-rw-r--r--drivers/net/ethernet/realtek/r8169_phy_config.c263
-rw-r--r--drivers/net/ethernet/realtek/rtase/rtase.h28
-rw-r--r--drivers/net/ethernet/realtek/rtase/rtase_main.c184
-rw-r--r--drivers/net/ethernet/renesas/Makefile1
-rw-r--r--drivers/net/ethernet/renesas/ravb.h6
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c131
-rw-r--r--drivers/net/ethernet/renesas/ravb_ptp.c12
-rw-r--r--drivers/net/ethernet/renesas/rcar_gen4_ptp.c78
-rw-r--r--drivers/net/ethernet/renesas/rcar_gen4_ptp.h33
-rw-r--r--drivers/net/ethernet/renesas/rswitch.c2201
-rw-r--r--drivers/net/ethernet/renesas/rswitch.h118
-rw-r--r--drivers/net/ethernet/renesas/rswitch_l2.c316
-rw-r--r--drivers/net/ethernet/renesas/rswitch_l2.h15
-rw-r--r--drivers/net/ethernet/renesas/rswitch_main.c2325
-rw-r--r--drivers/net/ethernet/renesas/rtsn.c8
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c40
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c4
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c6
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c45
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c11
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c2
-rw-r--r--drivers/net/ethernet/seeq/ether3.c8
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c2
-rw-r--r--drivers/net/ethernet/sfc/Kconfig5
-rw-r--r--drivers/net/ethernet/sfc/Makefile2
-rw-r--r--drivers/net/ethernet/sfc/ef10.c11
-rw-r--r--drivers/net/ethernet/sfc/ef100_ethtool.c1
-rw-r--r--drivers/net/ethernet/sfc/ef100_netdev.c7
-rw-r--r--drivers/net/ethernet/sfc/ef100_nic.c49
-rw-r--r--drivers/net/ethernet/sfc/ef100_rx.c5
-rw-r--r--drivers/net/ethernet/sfc/ef100_tx.c17
-rw-r--r--drivers/net/ethernet/sfc/efx.c141
-rw-r--r--drivers/net/ethernet/sfc/efx.h1
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.c12
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.h7
-rw-r--r--drivers/net/ethernet/sfc/efx_common.c20
-rw-r--r--drivers/net/ethernet/sfc/efx_common.h1
-rw-r--r--drivers/net/ethernet/sfc/efx_devlink.c13
-rw-r--r--drivers/net/ethernet/sfc/efx_reflash.c522
-rw-r--r--drivers/net/ethernet/sfc/efx_reflash.h20
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c6
-rw-r--r--drivers/net/ethernet/sfc/ethtool_common.c148
-rw-r--r--drivers/net/ethernet/sfc/ethtool_common.h2
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c16
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.h1
-rw-r--r--drivers/net/ethernet/sfc/falcon/ethtool.c89
-rw-r--r--drivers/net/ethernet/sfc/falcon/falcon.c8
-rw-r--r--drivers/net/ethernet/sfc/falcon/farch.c22
-rw-r--r--drivers/net/ethernet/sfc/falcon/net_driver.h2
-rw-r--r--drivers/net/ethernet/sfc/falcon/nic.c20
-rw-r--r--drivers/net/ethernet/sfc/falcon/nic.h7
-rw-r--r--drivers/net/ethernet/sfc/falcon/rx.c5
-rw-r--r--drivers/net/ethernet/sfc/falcon/tx.c8
-rw-r--r--drivers/net/ethernet/sfc/falcon/tx.h3
-rw-r--r--drivers/net/ethernet/sfc/fw_formats.h114
-rw-r--r--drivers/net/ethernet/sfc/io.h24
-rw-r--r--drivers/net/ethernet/sfc/mae.c13
-rw-r--r--drivers/net/ethernet/sfc/mae.h1
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c197
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h32
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h13844
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c59
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port_common.c11
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h64
-rw-r--r--drivers/net/ethernet/sfc/nic.c9
-rw-r--r--drivers/net/ethernet/sfc/nic_common.h2
-rw-r--r--drivers/net/ethernet/sfc/ptp.c7
-rw-r--r--drivers/net/ethernet/sfc/ptp.h3
-rw-r--r--drivers/net/ethernet/sfc/rx.c5
-rw-r--r--drivers/net/ethernet/sfc/rx_common.c19
-rw-r--r--drivers/net/ethernet/sfc/siena/efx_channels.c6
-rw-r--r--drivers/net/ethernet/sfc/siena/efx_common.c3
-rw-r--r--drivers/net/ethernet/sfc/siena/ethtool.c4
-rw-r--r--drivers/net/ethernet/sfc/siena/ethtool_common.c123
-rw-r--r--drivers/net/ethernet/sfc/siena/ethtool_common.h2
-rw-r--r--drivers/net/ethernet/sfc/siena/farch.c2
-rw-r--r--drivers/net/ethernet/sfc/siena/mcdi.c6
-rw-r--r--drivers/net/ethernet/sfc/siena/mcdi_pcol.h12
-rw-r--r--drivers/net/ethernet/sfc/siena/net_driver.h6
-rw-r--r--drivers/net/ethernet/sfc/siena/nic.c14
-rw-r--r--drivers/net/ethernet/sfc/siena/nic_common.h5
-rw-r--r--drivers/net/ethernet/sfc/siena/ptp.c2
-rw-r--r--drivers/net/ethernet/sfc/siena/ptp.h2
-rw-r--r--drivers/net/ethernet/sfc/siena/rx_common.c16
-rw-r--r--drivers/net/ethernet/sfc/siena/siena.c2
-rw-r--r--drivers/net/ethernet/sfc/tc.c6
-rw-r--r--drivers/net/ethernet/sfc/tc_conntrack.c2
-rw-r--r--drivers/net/ethernet/sfc/tc_encap_actions.c6
-rw-r--r--drivers/net/ethernet/sfc/tx.c14
-rw-r--r--drivers/net/ethernet/sfc/tx.h3
-rw-r--r--drivers/net/ethernet/sfc/tx_common.c33
-rw-r--r--drivers/net/ethernet/sfc/tx_common.h4
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c12
-rw-r--r--drivers/net/ethernet/sgi/meth.c2
-rw-r--r--drivers/net/ethernet/sis/sis190.c6
-rw-r--r--drivers/net/ethernet/sis/sis900.c7
-rw-r--r--drivers/net/ethernet/smsc/epic100.c4
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c4
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c2
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c19
-rw-r--r--drivers/net/ethernet/socionext/netsec.c9
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c2
-rw-r--r--drivers/net/ethernet/spacemit/Kconfig29
-rw-r--r--drivers/net/ethernet/spacemit/Makefile6
-rw-r--r--drivers/net/ethernet/spacemit/k1_emac.c2159
-rw-r--r--drivers/net/ethernet/spacemit/k1_emac.h416
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig60
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h44
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c42
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c200
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c110
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c71
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c113
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c371
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h30
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c29
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c242
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c73
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c25
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c89
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c235
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c1281
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rzn1.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c186
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c121
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sophgo.c76
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c46
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c137
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c159
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun55i.c159
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c73
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c64
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c16
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c286
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c154
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h25
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c144
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h64
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c416
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c19
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.c150
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.h26
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h45
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c182
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c18
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c33
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.c39
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h128
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h83
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_est.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_est.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c195
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c311
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.h28
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c56
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c1263
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c401
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c102
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h32
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c187
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c92
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c18
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c374
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.h64
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.h1
-rw-r--r--drivers/net/ethernet/sun/cassini.c4
-rw-r--r--drivers/net/ethernet/sun/ldmvsw.c6
-rw-r--r--drivers/net/ethernet/sun/niu.c94
-rw-r--r--drivers/net/ethernet/sun/niu.h8
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c6
-rw-r--r--drivers/net/ethernet/sun/sungem.c10
-rw-r--r--drivers/net/ethernet/sun/sunhme.c10
-rw-r--r--drivers/net/ethernet/sun/sunqe.c2
-rw-r--r--drivers/net/ethernet/sun/sunqe.h2
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c2
-rw-r--r--drivers/net/ethernet/sun/sunvnet_common.c8
-rw-r--r--drivers/net/ethernet/sunplus/spl2sw_driver.c4
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-net.c5
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c2
-rw-r--r--drivers/net/ethernet/tehuti/tn40.c9
-rw-r--r--drivers/net/ethernet/tehuti/tn40.h33
-rw-r--r--drivers/net/ethernet/tehuti/tn40_mdio.c84
-rw-r--r--drivers/net/ethernet/ti/Kconfig14
-rw-r--r--drivers/net/ethernet/ti/Makefile3
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-ethtool.c27
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c1046
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.h23
-rw-r--r--drivers/net/ethernet/ti/cpsw.c39
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c84
-rw-r--r--drivers/net/ethernet/ti/cpsw_ethtool.c12
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c20
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.c70
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.h7
-rw-r--r--drivers/net/ethernet/ti/cpts.c2
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c2
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c2
-rw-r--r--drivers/net/ethernet/ti/icssg/icss_iep.c268
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_common.c471
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_config.c199
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_config.h81
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.c850
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.h95
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c86
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_stats.c20
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_stats.h58
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_switch_map.h36
-rw-r--r--drivers/net/ethernet/ti/icssm/icssm_prueth.c1746
-rw-r--r--drivers/net/ethernet/ti/icssm/icssm_prueth.h262
-rw-r--r--drivers/net/ethernet/ti/icssm/icssm_prueth_ptp.h85
-rw-r--r--drivers/net/ethernet/ti/icssm/icssm_switch.h257
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c2
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c4
-rw-r--r--drivers/net/ethernet/ti/tlan.c8
-rw-r--r--drivers/net/ethernet/toshiba/Kconfig11
-rw-r--r--drivers/net/ethernet/toshiba/Makefile2
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_wireless.c1
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_wireless.h1
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c2556
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.h475
-rw-r--r--drivers/net/ethernet/toshiba/spider_net_ethtool.c174
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c6
-rw-r--r--drivers/net/ethernet/vertexcom/mse102x.c119
-rw-r--r--drivers/net/ethernet/via/via-rhine.c13
-rw-r--r--drivers/net/ethernet/via/via-velocity.c8
-rw-r--r--drivers/net/ethernet/wangxun/Kconfig43
-rw-r--r--drivers/net/ethernet/wangxun/Makefile2
-rw-r--r--drivers/net/ethernet/wangxun/libwx/Makefile3
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ethtool.c305
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ethtool.h17
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.c703
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.h13
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.c503
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.h8
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_mbx.c419
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_mbx.h99
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ptp.c905
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ptp.h20
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_sriov.c925
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_sriov.h18
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_type.h297
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf.c599
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf.h125
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf_common.c414
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf_common.h22
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf_lib.c280
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf_lib.h15
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c11
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_main.c115
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c16
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_type.h10
-rw-r--r--drivers/net/ethernet/wangxun/ngbevf/Makefile9
-rw-r--r--drivers/net/ethernet/wangxun/ngbevf/ngbevf_main.c266
-rw-r--r--drivers/net/ethernet/wangxun/ngbevf/ngbevf_type.h29
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/Makefile3
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c386
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_aml.h15
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c49
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.h2
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.c23
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c18
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c89
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_main.c273
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c239
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h4
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_type.h141
-rw-r--r--drivers/net/ethernet/wangxun/txgbevf/Makefile9
-rw-r--r--drivers/net/ethernet/wangxun/txgbevf/txgbevf_main.c319
-rw-r--r--drivers/net/ethernet/wangxun/txgbevf/txgbevf_type.h26
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c4
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c2
-rw-r--r--drivers/net/ethernet/xilinx/Kconfig1
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c6
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h32
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c379
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c25
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c2
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c63
-rw-r--r--drivers/net/fddi/defza.c12
-rw-r--r--drivers/net/fjes/fjes_ethtool.c64
-rw-r--r--drivers/net/fjes/fjes_main.c7
-rw-r--r--drivers/net/geneve.c128
-rw-r--r--drivers/net/gtp.c100
-rw-r--r--drivers/net/hamradio/6pack.c67
-rw-r--r--drivers/net/hamradio/baycom_epp.c5
-rw-r--r--drivers/net/hamradio/baycom_par.c4
-rw-r--r--drivers/net/hamradio/baycom_ser_fdx.c3
-rw-r--r--drivers/net/hamradio/baycom_ser_hdx.c4
-rw-r--r--drivers/net/hamradio/bpqether.c27
-rw-r--r--drivers/net/hamradio/scc.c44
-rw-r--r--drivers/net/hamradio/yam.c2
-rw-r--r--drivers/net/hippi/rrunner.c4
-rw-r--r--drivers/net/hyperv/Kconfig2
-rw-r--r--drivers/net/hyperv/hyperv_net.h20
-rw-r--r--drivers/net/hyperv/netvsc.c78
-rw-r--r--drivers/net/hyperv/netvsc_bpf.c2
-rw-r--r--drivers/net/hyperv/netvsc_drv.c154
-rw-r--r--drivers/net/hyperv/rndis_filter.c57
-rw-r--r--drivers/net/ieee802154/at86rf230.c4
-rw-r--r--drivers/net/ieee802154/ca8210.c84
-rw-r--r--drivers/net/ieee802154/fakelb.c2
-rw-r--r--drivers/net/ieee802154/mac802154_hwsim.c2
-rw-r--r--drivers/net/ifb.c18
-rw-r--r--drivers/net/ipa/Kconfig2
-rw-r--r--drivers/net/ipa/data/ipa_data-v3.1.c1
-rw-r--r--drivers/net/ipa/data/ipa_data-v3.5.1.c1
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.11.c1
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.2.c1
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.5.c1
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.7.c19
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.9.c1
-rw-r--r--drivers/net/ipa/data/ipa_data-v5.0.c1
-rw-r--r--drivers/net/ipa/data/ipa_data-v5.5.c1
-rw-r--r--drivers/net/ipa/ipa_data.h2
-rw-r--r--drivers/net/ipa/ipa_main.c14
-rw-r--r--drivers/net/ipa/ipa_mem.c21
-rw-r--r--drivers/net/ipa/ipa_sysfs.c6
-rw-r--r--drivers/net/ipvlan/ipvlan.h3
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c26
-rw-r--r--drivers/net/ipvlan/ipvlan_l3s.c7
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c22
-rw-r--r--drivers/net/ipvlan/ipvtap.c6
-rw-r--r--drivers/net/loopback.c22
-rw-r--r--drivers/net/macsec.c307
-rw-r--r--drivers/net/macvlan.c50
-rw-r--r--drivers/net/macvtap.c6
-rw-r--r--drivers/net/mctp/Kconfig10
-rw-r--r--drivers/net/mctp/Makefile1
-rw-r--r--drivers/net/mctp/mctp-i2c.c16
-rw-r--r--drivers/net/mctp/mctp-i3c.c14
-rw-r--r--drivers/net/mctp/mctp-serial.c5
-rw-r--r--drivers/net/mctp/mctp-usb.c388
-rw-r--r--drivers/net/mdio.c172
-rw-r--r--drivers/net/mdio/Kconfig52
-rw-r--r--drivers/net/mdio/Makefile2
-rw-r--r--drivers/net/mdio/fwnode_mdio.c39
-rw-r--r--drivers/net/mdio/mdio-airoha.c276
-rw-r--r--drivers/net/mdio/mdio-aspeed.c2
-rw-r--r--drivers/net/mdio/mdio-bcm-iproc.c2
-rw-r--r--drivers/net/mdio/mdio-bcm-unimac.c13
-rw-r--r--drivers/net/mdio/mdio-gpio.c2
-rw-r--r--drivers/net/mdio/mdio-hisi-femac.c2
-rw-r--r--drivers/net/mdio/mdio-i2c.c90
-rw-r--r--drivers/net/mdio/mdio-ipq4019.c7
-rw-r--r--drivers/net/mdio/mdio-ipq8064.c2
-rw-r--r--drivers/net/mdio/mdio-moxart.c2
-rw-r--r--drivers/net/mdio/mdio-mscc-miim.c2
-rw-r--r--drivers/net/mdio/mdio-mux-bcm-iproc.c2
-rw-r--r--drivers/net/mdio/mdio-mux-bcm6368.c2
-rw-r--r--drivers/net/mdio/mdio-mux-gpio.c5
-rw-r--r--drivers/net/mdio/mdio-mux-meson-g12a.c2
-rw-r--r--drivers/net/mdio/mdio-mux-meson-gxl.c5
-rw-r--r--drivers/net/mdio/mdio-mux-mmioreg.c2
-rw-r--r--drivers/net/mdio/mdio-mux-multiplexer.c2
-rw-r--r--drivers/net/mdio/mdio-octeon.c27
-rw-r--r--drivers/net/mdio/mdio-realtek-rtl9300.c522
-rw-r--r--drivers/net/mdio/mdio-sun4i.c2
-rw-r--r--drivers/net/mdio/mdio-thunder.c14
-rw-r--r--drivers/net/mdio/mdio-xgene.c2
-rw-r--r--drivers/net/mdio/of_mdio.c5
-rw-r--r--drivers/net/mii.c3
-rw-r--r--drivers/net/net_failover.c2
-rw-r--r--drivers/net/netconsole.c896
-rw-r--r--drivers/net/netdevsim/Makefile4
-rw-r--r--drivers/net/netdevsim/bpf.c3
-rw-r--r--drivers/net/netdevsim/bus.c29
-rw-r--r--drivers/net/netdevsim/dev.c72
-rw-r--r--drivers/net/netdevsim/ethtool.c56
-rw-r--r--drivers/net/netdevsim/fib.c4
-rw-r--r--drivers/net/netdevsim/health.c6
-rw-r--r--drivers/net/netdevsim/hwstats.c34
-rw-r--r--drivers/net/netdevsim/ipsec.c49
-rw-r--r--drivers/net/netdevsim/macsec.c56
-rw-r--r--drivers/net/netdevsim/netdev.c570
-rw-r--r--drivers/net/netdevsim/netdevsim.h57
-rw-r--r--drivers/net/netdevsim/psp.c225
-rw-r--r--drivers/net/netdevsim/udp_tunnels.c35
-rw-r--r--drivers/net/netkit.c172
-rw-r--r--drivers/net/ntb_netdev.c4
-rw-r--r--drivers/net/ovpn/Makefile22
-rw-r--r--drivers/net/ovpn/bind.c55
-rw-r--r--drivers/net/ovpn/bind.h101
-rw-r--r--drivers/net/ovpn/crypto.c210
-rw-r--r--drivers/net/ovpn/crypto.h145
-rw-r--r--drivers/net/ovpn/crypto_aead.c389
-rw-r--r--drivers/net/ovpn/crypto_aead.h29
-rw-r--r--drivers/net/ovpn/io.c465
-rw-r--r--drivers/net/ovpn/io.h34
-rw-r--r--drivers/net/ovpn/main.c279
-rw-r--r--drivers/net/ovpn/main.h14
-rw-r--r--drivers/net/ovpn/netlink-gen.c262
-rw-r--r--drivers/net/ovpn/netlink-gen.h47
-rw-r--r--drivers/net/ovpn/netlink.c1293
-rw-r--r--drivers/net/ovpn/netlink.h18
-rw-r--r--drivers/net/ovpn/ovpnpriv.h55
-rw-r--r--drivers/net/ovpn/peer.c1364
-rw-r--r--drivers/net/ovpn/peer.h163
-rw-r--r--drivers/net/ovpn/pktid.c129
-rw-r--r--drivers/net/ovpn/pktid.h86
-rw-r--r--drivers/net/ovpn/proto.h118
-rw-r--r--drivers/net/ovpn/skb.h61
-rw-r--r--drivers/net/ovpn/socket.c241
-rw-r--r--drivers/net/ovpn/socket.h49
-rw-r--r--drivers/net/ovpn/stats.c21
-rw-r--r--drivers/net/ovpn/stats.h47
-rw-r--r--drivers/net/ovpn/tcp.c601
-rw-r--r--drivers/net/ovpn/tcp.h37
-rw-r--r--drivers/net/ovpn/udp.c448
-rw-r--r--drivers/net/ovpn/udp.h25
-rw-r--r--drivers/net/pcs/Kconfig11
-rw-r--r--drivers/net/pcs/pcs-lynx.c51
-rw-r--r--drivers/net/pcs/pcs-mtk-lynxi.c26
-rw-r--r--drivers/net/pcs/pcs-rzn1-miic.c343
-rw-r--r--drivers/net/pcs/pcs-xpcs-nxp.c24
-rw-r--r--drivers/net/pcs/pcs-xpcs-plat.c6
-rw-r--r--drivers/net/pcs/pcs-xpcs-wx.c56
-rw-r--r--drivers/net/pcs/pcs-xpcs.c728
-rw-r--r--drivers/net/pcs/pcs-xpcs.h64
-rw-r--r--drivers/net/pfcp.c35
-rw-r--r--drivers/net/phy/Kconfig81
-rw-r--r--drivers/net/phy/Makefile29
-rw-r--r--drivers/net/phy/adin.c8
-rw-r--r--drivers/net/phy/adin1100.c7
-rw-r--r--drivers/net/phy/air_en8811h.c142
-rw-r--r--drivers/net/phy/amd.c2
-rw-r--r--drivers/net/phy/aquantia/aquantia.h53
-rw-r--r--drivers/net/phy/aquantia/aquantia_firmware.c7
-rw-r--r--drivers/net/phy/aquantia/aquantia_hwmon.c32
-rw-r--r--drivers/net/phy/aquantia/aquantia_leds.c19
-rw-r--r--drivers/net/phy/aquantia/aquantia_main.c882
-rw-r--r--drivers/net/phy/as21xxx.c1088
-rw-r--r--drivers/net/phy/ax88796b.c7
-rw-r--r--drivers/net/phy/ax88796b_rust.rs2
-rw-r--r--drivers/net/phy/bcm-cygnus.c2
-rw-r--r--drivers/net/phy/bcm-phy-lib.c5
-rw-r--r--drivers/net/phy/bcm-phy-ptp.c5
-rw-r--r--drivers/net/phy/bcm54140.c3
-rw-r--r--drivers/net/phy/bcm63xx.c2
-rw-r--r--drivers/net/phy/bcm7xxx.c2
-rw-r--r--drivers/net/phy/bcm84881.c12
-rw-r--r--drivers/net/phy/bcm87xx.c14
-rw-r--r--drivers/net/phy/broadcom.c194
-rw-r--r--drivers/net/phy/cicada.c2
-rw-r--r--drivers/net/phy/cortina.c2
-rw-r--r--drivers/net/phy/davicom.c2
-rw-r--r--drivers/net/phy/dp83640.c73
-rw-r--r--drivers/net/phy/dp83822.c496
-rw-r--r--drivers/net/phy/dp83848.c4
-rw-r--r--drivers/net/phy/dp83867.c83
-rw-r--r--drivers/net/phy/dp83869.c29
-rw-r--r--drivers/net/phy/dp83tc811.c2
-rw-r--r--drivers/net/phy/dp83td510.c301
-rw-r--r--drivers/net/phy/dp83tg720.c334
-rw-r--r--drivers/net/phy/et1011c.c2
-rw-r--r--drivers/net/phy/fixed_phy.c241
-rw-r--r--drivers/net/phy/icplus.c11
-rw-r--r--drivers/net/phy/intel-xway.c262
-rw-r--r--drivers/net/phy/lxt.c2
-rw-r--r--drivers/net/phy/marvell-88q2xxx.c420
-rw-r--r--drivers/net/phy/marvell-88x2222.c15
-rw-r--r--drivers/net/phy/marvell.c219
-rw-r--r--drivers/net/phy/marvell10g.c45
-rw-r--r--drivers/net/phy/mdio-boardinfo.c80
-rw-r--r--drivers/net/phy/mdio-boardinfo.h23
-rw-r--r--drivers/net/phy/mdio_bus.c475
-rw-r--r--drivers/net/phy/mdio_bus_provider.c447
-rw-r--r--drivers/net/phy/mdio_device.c4
-rw-r--r--drivers/net/phy/mediatek-ge-soc.c1555
-rw-r--r--drivers/net/phy/mediatek-ge.c111
-rw-r--r--drivers/net/phy/mediatek/Kconfig40
-rw-r--r--drivers/net/phy/mediatek/Makefile5
-rw-r--r--drivers/net/phy/mediatek/mtk-2p5ge.c413
-rw-r--r--drivers/net/phy/mediatek/mtk-ge-soc.c1548
-rw-r--r--drivers/net/phy/mediatek/mtk-ge.c142
-rw-r--r--drivers/net/phy/mediatek/mtk-phy-lib.c347
-rw-r--r--drivers/net/phy/mediatek/mtk.h104
-rw-r--r--drivers/net/phy/meson-gxl.c2
-rw-r--r--drivers/net/phy/micrel.c1440
-rw-r--r--drivers/net/phy/microchip.c74
-rw-r--r--drivers/net/phy/microchip_rds_ptp.c1306
-rw-r--r--drivers/net/phy/microchip_rds_ptp.h247
-rw-r--r--drivers/net/phy/microchip_t1.c280
-rw-r--r--drivers/net/phy/microchip_t1s.c302
-rw-r--r--drivers/net/phy/motorcomm.c117
-rw-r--r--drivers/net/phy/mscc/mscc.h19
-rw-r--r--drivers/net/phy/mscc/mscc_main.c62
-rw-r--r--drivers/net/phy/mscc/mscc_ptp.c136
-rw-r--r--drivers/net/phy/mscc/mscc_ptp.h1
-rw-r--r--drivers/net/phy/mxl-86110.c978
-rw-r--r--drivers/net/phy/mxl-gpy.c248
-rw-r--r--drivers/net/phy/national.c2
-rw-r--r--drivers/net/phy/ncn26000.c2
-rw-r--r--drivers/net/phy/nxp-c45-tja11xx-macsec.c8
-rw-r--r--drivers/net/phy/nxp-c45-tja11xx.c213
-rw-r--r--drivers/net/phy/nxp-c45-tja11xx.h1
-rw-r--r--drivers/net/phy/nxp-cbtx.c4
-rw-r--r--drivers/net/phy/nxp-tja11xx.c65
-rw-r--r--drivers/net/phy/phy-c45.c84
-rw-r--r--drivers/net/phy/phy-caps.h63
-rw-r--r--drivers/net/phy/phy-core.c447
-rw-r--r--drivers/net/phy/phy.c388
-rw-r--r--drivers/net/phy/phy_caps.c378
-rw-r--r--drivers/net/phy/phy_device.c883
-rw-r--r--drivers/net/phy/phy_led_triggers.c25
-rw-r--r--drivers/net/phy/phy_link_topology.c2
-rw-r--r--drivers/net/phy/phy_package.c419
-rw-r--r--drivers/net/phy/phylib-internal.h31
-rw-r--r--drivers/net/phy/phylib.h34
-rw-r--r--drivers/net/phy/phylink.c1587
-rw-r--r--drivers/net/phy/qcom/Kconfig3
-rw-r--r--drivers/net/phy/qcom/at803x.c205
-rw-r--r--drivers/net/phy/qcom/qca807x.c67
-rw-r--r--drivers/net/phy/qcom/qca808x.c27
-rw-r--r--drivers/net/phy/qcom/qca83xx.c8
-rw-r--r--drivers/net/phy/qcom/qcom-phy-lib.c100
-rw-r--r--drivers/net/phy/qcom/qcom.h28
-rw-r--r--drivers/net/phy/qsemi.c2
-rw-r--r--drivers/net/phy/qt2025.rs4
-rw-r--r--drivers/net/phy/realtek.c1511
-rw-r--r--drivers/net/phy/realtek/Kconfig15
-rw-r--r--drivers/net/phy/realtek/Makefile4
-rw-r--r--drivers/net/phy/realtek/realtek.h10
-rw-r--r--drivers/net/phy/realtek/realtek_hwmon.c74
-rw-r--r--drivers/net/phy/realtek/realtek_main.c1990
-rw-r--r--drivers/net/phy/rockchip.c2
-rw-r--r--drivers/net/phy/sfp-bus.c107
-rw-r--r--drivers/net/phy/sfp.c205
-rw-r--r--drivers/net/phy/sfp.h4
-rw-r--r--drivers/net/phy/smsc.c65
-rw-r--r--drivers/net/phy/spi_ks8995.c506
-rw-r--r--drivers/net/phy/ste10Xp.c2
-rw-r--r--drivers/net/phy/teranetics.c5
-rw-r--r--drivers/net/phy/uPD60620.c2
-rw-r--r--drivers/net/phy/vitesse.c2
-rw-r--r--drivers/net/phy/xilinx_gmii2rgmii.c7
-rw-r--r--drivers/net/ppp/Kconfig3
-rw-r--r--drivers/net/ppp/bsd_comp.c4
-rw-r--r--drivers/net/ppp/ppp_generic.c290
-rw-r--r--drivers/net/ppp/ppp_mppe.c108
-rw-r--r--drivers/net/ppp/ppp_synctty.c5
-rw-r--r--drivers/net/ppp/pppoe.c136
-rw-r--r--drivers/net/ppp/pptp.c19
-rw-r--r--drivers/net/pse-pd/Kconfig11
-rw-r--r--drivers/net/pse-pd/Makefile1
-rw-r--r--drivers/net/pse-pd/pd692x0.c500
-rw-r--r--drivers/net/pse-pd/pse_core.c1217
-rw-r--r--drivers/net/pse-pd/pse_regulator.c23
-rw-r--r--drivers/net/pse-pd/si3474.c578
-rw-r--r--drivers/net/pse-pd/tps23881.c842
-rw-r--r--drivers/net/sb1000.c1179
-rw-r--r--drivers/net/slip/slip.c18
-rw-r--r--drivers/net/tap.c192
-rw-r--r--drivers/net/team/team_core.c140
-rw-r--r--drivers/net/team/team_mode_activebackup.c3
-rw-r--r--drivers/net/team/team_mode_loadbalance.c13
-rw-r--r--drivers/net/thunderbolt/main.c21
-rw-r--r--drivers/net/tun.c314
-rw-r--r--drivers/net/tun_vnet.h269
-rw-r--r--drivers/net/usb/Kconfig8
-rw-r--r--drivers/net/usb/aqc111.c10
-rw-r--r--drivers/net/usb/asix.h1
-rw-r--r--drivers/net/usb/asix_common.c22
-rw-r--r--drivers/net/usb/asix_devices.c64
-rw-r--r--drivers/net/usb/ax88172a.c12
-rw-r--r--drivers/net/usb/catc.c4
-rw-r--r--drivers/net/usb/cdc_ether.c7
-rw-r--r--drivers/net/usb/cdc_mbim.c4
-rw-r--r--drivers/net/usb/cdc_ncm.c30
-rw-r--r--drivers/net/usb/ch9200.c7
-rw-r--r--drivers/net/usb/gl620a.c4
-rw-r--r--drivers/net/usb/ipheth.c69
-rw-r--r--drivers/net/usb/lan78xx.c2092
-rw-r--r--drivers/net/usb/qmi_wwan.c16
-rw-r--r--drivers/net/usb/r8152.c107
-rw-r--r--drivers/net/usb/r8153_ecm.c6
-rw-r--r--drivers/net/usb/rtl8150.c24
-rw-r--r--drivers/net/usb/sierra_net.c8
-rw-r--r--drivers/net/usb/smsc95xx.c72
-rw-r--r--drivers/net/usb/sr9700.c10
-rw-r--r--drivers/net/usb/usbnet.c90
-rw-r--r--drivers/net/veth.c100
-rw-r--r--drivers/net/virtio_net.c1215
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c53
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c74
-rw-r--r--drivers/net/vmxnet3/vmxnet3_xdp.c16
-rw-r--r--drivers/net/vrf.c73
-rw-r--r--drivers/net/vxlan/vxlan_core.c986
-rw-r--r--drivers/net/vxlan/vxlan_mdb.c4
-rw-r--r--drivers/net/vxlan/vxlan_private.h17
-rw-r--r--drivers/net/vxlan/vxlan_vnifilter.c50
-rw-r--r--drivers/net/wan/framer/framer-core.c23
-rw-r--r--drivers/net/wan/framer/pef2256/pef2256.c30
-rw-r--r--drivers/net/wan/fsl_qmc_hdlc.c2
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.c2
-rw-r--r--drivers/net/wan/hdlc_cisco.c4
-rw-r--r--drivers/net/wan/hdlc_fr.c4
-rw-r--r--drivers/net/wan/hdlc_ppp.c4
-rw-r--r--drivers/net/wan/ixp4xx_hss.c2
-rw-r--r--drivers/net/wan/lapbether.c4
-rw-r--r--drivers/net/wireguard/allowedips.c102
-rw-r--r--drivers/net/wireguard/allowedips.h4
-rw-r--r--drivers/net/wireguard/cookie.c4
-rw-r--r--drivers/net/wireguard/device.c19
-rw-r--r--drivers/net/wireguard/netlink.c47
-rw-r--r--drivers/net/wireguard/noise.c4
-rw-r--r--drivers/net/wireguard/peer.h2
-rw-r--r--drivers/net/wireguard/queueing.h13
-rw-r--r--drivers/net/wireguard/selftest/allowedips.c49
-rw-r--r--drivers/net/wireguard/socket.c4
-rw-r--r--drivers/net/wireguard/timers.c25
-rw-r--r--drivers/net/wireless/admtek/adm8211.c2
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c11
-rw-r--r--drivers/net/wireless/ath/ath10k/ahb.c10
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.c8
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c34
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c71
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h13
-rw-r--r--drivers/net/wireless/ath/ath10k/coredump.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c10
-rw-r--r--drivers/net/wireless/ath/ath10k/debugfs_sta.c7
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c9
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c18
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.c63
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h44
-rw-r--r--drivers/net/wireless/ath/ath10k/leds.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c207
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c14
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.c13
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c31
-rw-r--r--drivers/net/wireless/ath/ath10k/testmode.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/trace.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/usb.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c7
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c51
-rw-r--r--drivers/net/wireless/ath/ath11k/Kconfig2
-rw-r--r--drivers/net/wireless/ath/ath11k/Makefile1
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.c49
-rw-r--r--drivers/net/wireless/ath/ath11k/ce.c17
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c493
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h52
-rw-r--r--drivers/net/wireless/ath/ath11k/coredump.c54
-rw-r--r--drivers/net/wireless/ath/ath11k/coredump.h79
-rw-r--r--drivers/net/wireless/ath/ath11k/dbring.c3
-rw-r--r--drivers/net/wireless/ath/ath11k/debug.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.c188
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.h10
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c15
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_sta.c11
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.c48
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.h7
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c188
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.c15
-rw-r--r--drivers/net/wireless/ath/ath11k/fw.c5
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.c56
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.h7
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_rx.c3
-rw-r--r--drivers/net/wireless/ath/ath11k/hif.h21
-rw-r--r--drivers/net/wireless/ath/ath11k/htc.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c485
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.h4
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.c20
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.h5
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.c250
-rw-r--r--drivers/net/wireless/ath/ath11k/pcic.c15
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.c62
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.h10
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.c107
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.h3
-rw-r--r--drivers/net/wireless/ath/ath11k/spectral.c3
-rw-r--r--drivers/net/wireless/ath/ath11k/testmode.c82
-rw-r--r--drivers/net/wireless/ath/ath11k/testmode_i.h66
-rw-r--r--drivers/net/wireless/ath/ath11k/trace.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c72
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.h11
-rw-r--r--drivers/net/wireless/ath/ath11k/wow.c45
-rw-r--r--drivers/net/wireless/ath/ath12k/Kconfig20
-rw-r--r--drivers/net/wireless/ath/ath12k/Makefile5
-rw-r--r--drivers/net/wireless/ath/ath12k/acpi.c202
-rw-r--r--drivers/net/wireless/ath/ath12k/acpi.h40
-rw-r--r--drivers/net/wireless/ath/ath12k/ahb.c1156
-rw-r--r--drivers/net/wireless/ath/ath12k/ahb.h80
-rw-r--r--drivers/net/wireless/ath/ath12k/ce.c109
-rw-r--r--drivers/net/wireless/ath/ath12k/ce.h20
-rw-r--r--drivers/net/wireless/ath/ath12k/core.c1201
-rw-r--r--drivers/net/wireless/ath/ath12k/core.h621
-rw-r--r--drivers/net/wireless/ath/ath12k/coredump.c54
-rw-r--r--drivers/net/wireless/ath/ath12k/coredump.h81
-rw-r--r--drivers/net/wireless/ath/ath12k/dbring.c3
-rw-r--r--drivers/net/wireless/ath/ath12k/debug.c10
-rw-r--r--drivers/net/wireless/ath/ath12k/debug.h16
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs.c1418
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs.h115
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c4324
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h1397
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_sta.c337
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_sta.h24
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.c384
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.h224
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_mon.c2758
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_mon.h17
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.c1305
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.h75
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_tx.c609
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_tx.h7
-rw-r--r--drivers/net/wireless/ath/ath12k/fw.c9
-rw-r--r--drivers/net/wireless/ath/ath12k/fw.h6
-rw-r--r--drivers/net/wireless/ath/ath12k/hal.c201
-rw-r--r--drivers/net/wireless/ath/ath12k/hal.h86
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_desc.h25
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_rx.c136
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_rx.h534
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_tx.h10
-rw-r--r--drivers/net/wireless/ath/ath12k/hif.h6
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.c586
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.h64
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.c8249
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.h129
-rw-r--r--drivers/net/wireless/ath/ath12k/mhi.c14
-rw-r--r--drivers/net/wireless/ath/ath12k/mhi.h2
-rw-r--r--drivers/net/wireless/ath/ath12k/p2p.c20
-rw-r--r--drivers/net/wireless/ath/ath12k/p2p.h2
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.c304
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.h9
-rw-r--r--drivers/net/wireless/ath/ath12k/peer.c244
-rw-r--r--drivers/net/wireless/ath/ath12k/peer.h59
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.c746
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.h48
-rw-r--r--drivers/net/wireless/ath/ath12k/reg.c616
-rw-r--r--drivers/net/wireless/ath/ath12k/reg.h28
-rw-r--r--drivers/net/wireless/ath/ath12k/rx_desc.h100
-rw-r--r--drivers/net/wireless/ath/ath12k/testmode.c395
-rw-r--r--drivers/net/wireless/ath/ath12k/testmode.h40
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.c3552
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.h963
-rw-r--r--drivers/net/wireless/ath/ath12k/wow.c90
-rw-r--r--drivers/net/wireless/ath/ath5k/ahb.c8
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c12
-rw-r--r--drivers/net/wireless/ath/ath5k/pci.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c12
-rw-r--r--drivers/net/wireless/ath/ath5k/reg.h2
-rw-r--r--drivers/net/wireless/ath/ath6kl/bmi.c4
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c14
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc.h6
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_mbox.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_pipe.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c6
-rw-r--r--drivers/net/wireless/ath/ath6kl/main.c6
-rw-r--r--drivers/net/wireless/ath/ath6kl/recovery.c6
-rw-r--r--drivers/net/wireless/ath/ath6kl/sdio.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/txrx.c5
-rw-r--r--drivers/net/wireless/ath/ath6kl/usb.c6
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c4
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.h18
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c81
-rw-r--r--drivers/net/wireless/ath/ath9k/antenna.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_aic.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h11
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.c16
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c24
-rw-r--r--drivers/net/wireless/ath/ath9k/channel.c37
-rw-r--r--drivers/net/wireless/ath/ath9k/common-beacon.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/common-debug.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/common-init.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/common-spectral.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/common.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/dynack.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c13
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c22
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c32
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c58
-rw-r--r--drivers/net/wireless/ath/ath9k/link.c37
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c35
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/wow.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c63
-rw-r--r--drivers/net/wireless/ath/carl9170/debug.c28
-rw-r--r--drivers/net/wireless/ath/carl9170/fw.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/rx.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c3
-rw-r--r--drivers/net/wireless/ath/carl9170/usb.c19
-rw-r--r--drivers/net/wireless/ath/main.c1
-rw-r--r--drivers/net/wireless/ath/testmode_i.h66
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.c6
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c18
-rw-r--r--drivers/net/wireless/ath/wcn36xx/testmode.c2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/wcn36xx.h2
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c7
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c26
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c6
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c13
-rw-r--r--drivers/net/wireless/ath/wil6210/p2p.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.h4
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c8
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h4
-rw-r--r--drivers/net/wireless/atmel/at76c50x-usb.c6
-rw-r--r--drivers/net/wireless/broadcom/b43/debugfs.c27
-rw-r--r--drivers/net/wireless/broadcom/b43/main.c8
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/debugfs.c26
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/main.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/Kconfig1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/module.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c34
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c159
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h27
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c12
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c16
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c314
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/fwil_types.h87
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/module.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c14
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c7
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h29
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c37
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.h9
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c109
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c37
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c37
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/module.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.h1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c11
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.h1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c26
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c443
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_hal.h27
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_int.h11
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c28
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/pmu.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h5
-rw-r--r--drivers/net/wireless/intel/ipw2x00/Kconfig11
-rw-r--r--drivers/net/wireless/intel/ipw2x00/Makefile7
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.c11
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.h2
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.c27
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.h6
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw.h116
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_crypto.c246
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_crypto_ccmp.c438
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_crypto_tkip.c728
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_crypto_wep.c247
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_module.c38
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_rx.c102
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_spy.c233
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_tx.c4
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_wx.c43
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-mac.c40
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-rs.c5
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945.h1
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c15
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-rs.c21
-rw-r--r--drivers/net/wireless/intel/iwlegacy/commands.h2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.c52
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.h17
-rw-r--r--drivers/net/wireless/intel/iwlegacy/iwl-spectrum.h24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Kconfig16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Makefile30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/1000.c51
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/2000.c90
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c397
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/5000.c83
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/6000.c227
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/7000.c173
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/8000.c92
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/9000.c168
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/ax210.c297
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/bz.c199
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/dr.c90
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-fm.c51
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-gf.c85
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-hr.c89
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-jf.c111
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-pe.c16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-wh.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/sc.c203
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/agn.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/commands.h170
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/dev.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/devices.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/eeprom.c92
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/led.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/lib.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c31
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/main.c179
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/power.c36
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/power.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rs.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rx.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rxon.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tt.c29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tt.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tx.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/ucode.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c254
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.h60
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/alive.h29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/binding.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/commands.h20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/context.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/d3.h305
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/debug.h48
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/dhc.h226
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/location.h179
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h318
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h62
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/offload.h14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/phy.h22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/power.h114
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rs.h170
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rx.h54
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/sta.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/stats.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h60
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tx.h93
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c381
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.h20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/debugfs.c50
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dhc-utils.h75
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dump.c98
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/error-dump.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h45
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/init.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/paging.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/pnvm.c154
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/pnvm.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/regulatory.c211
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/regulatory.h87
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/rs.c137
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/smem.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.c195
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.h67
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h545
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h311
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h48
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-debug.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c374
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fh.h30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.c123
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.h25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c274
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h90
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-utils.c17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h106
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.c509
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h399
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-utils.c195
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-utils.h58
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/sap.h32
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/Makefile12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/agg.c677
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/agg.h127
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ap.c363
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ap.h45
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/coex.c40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/coex.h15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/constants.h79
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/d3.c2061
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/d3.h51
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/debugfs.c1113
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/debugfs.h244
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ftm-initiator.c451
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ftm-initiator.h29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/fw.c554
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/hcmd.h56
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/iface.c690
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/iface.h250
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/key.c408
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/key.h46
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/led.c100
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/led.h29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/link.c905
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/link.h131
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/low_latency.c336
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/low_latency.h68
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mac80211.c2665
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mac80211.h13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mcc.c285
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mcc.h17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mld.c775
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mld.h592
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mlo.c1207
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mlo.h171
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/notif.c719
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/notif.h35
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/phy.c198
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/phy.h60
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/power.c391
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/power.h33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ptp.c321
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ptp.h45
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/regulatory.c383
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/regulatory.h23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/roc.c259
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/roc.h20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/rx.c2147
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/rx.h72
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/scan.c2179
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/scan.h173
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/session-protect.c222
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/session-protect.h102
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/sta.c1319
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/sta.h273
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/stats.c521
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/stats.h22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/Makefile5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/agg.c663
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/hcmd.c62
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/link-selection.c339
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/link.c110
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/module.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/rx.c353
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/utils.c503
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/utils.h140
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/thermal.c467
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/thermal.h36
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/time_sync.c240
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/time_sync.h26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tlc.c739
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tlc.h23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tx.c1394
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tx.h77
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/binding.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/coex.c142
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/constants.h19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c1064
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c160
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c127
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c129
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c88
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c287
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/led.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/link.c880
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c180
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c496
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c55
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c291
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c71
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h298
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/nvm.c42
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/offloading.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c439
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/power.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ptp.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/quota.c43
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c36
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c206
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c141
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c253
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c233
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c307
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h47
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tdls.c20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tests/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tests/hcmd.c38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tests/links.c433
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tests/scan.c110
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c50
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c97
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c384
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c583
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-v2.c618
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c2454
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/internal.h1172
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c2488
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans-gen2.c654
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c (renamed from drivers/net/wireless/intel/iwlwifi/pcie/trans.c)1341
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx-gen2.c (renamed from drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c)88
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx.c2689
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h1151
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/iwl-context-info-v2.h344
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/iwl-context-info.h (renamed from drivers/net/wireless/intel/iwlwifi/iwl-context-info.h)44
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c2401
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c551
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c2677
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/utils.c104
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/utils.h40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/devinfo.c229
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/nvm_parse.c72
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/utils.c109
-rw-r--r--drivers/net/wireless/intersil/p54/fwio.c2
-rw-r--r--drivers/net/wireless/intersil/p54/main.c3
-rw-r--r--drivers/net/wireless/intersil/p54/p54.h1
-rw-r--r--drivers/net/wireless/intersil/p54/p54spi.c8
-rw-r--r--drivers/net/wireless/intersil/p54/txrx.c15
-rw-r--r--drivers/net/wireless/marvell/libertas/Kconfig1
-rw-r--r--drivers/net/wireless/marvell/libertas/cfg.c14
-rw-r--r--drivers/net/wireless/marvell/libertas/cmd.c143
-rw-r--r--drivers/net/wireless/marvell/libertas/cmd.h10
-rw-r--r--drivers/net/wireless/marvell/libertas/cmdresp.c3
-rw-r--r--drivers/net/wireless/marvell/libertas/decl.h4
-rw-r--r--drivers/net/wireless/marvell/libertas/dev.h4
-rw-r--r--drivers/net/wireless/marvell/libertas/if_sdio.c3
-rw-r--r--drivers/net/wireless/marvell/libertas/if_spi.c3
-rw-r--r--drivers/net/wireless/marvell/libertas/if_usb.c4
-rw-r--r--drivers/net/wireless/marvell/libertas/main.c105
-rw-r--r--drivers/net/wireless/marvell/libertas/mesh.h1
-rw-r--r--drivers/net/wireless/marvell/libertas/radiotap.h4
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/cmd.c2
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/if_usb.c4
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/main.c10
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c74
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfp.c6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cmdevt.c81
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h20
-rw-r--r--drivers/net/wireless/marvell/mwifiex/init.c28
-rw-r--r--drivers/net/wireless/marvell/mwifiex/ioctl.h2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/join.c11
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c67
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h24
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c10
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmd.c182
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_event.c10
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_ioctl.c62
-rw-r--r--drivers/net/wireless/marvell/mwifiex/tdls.c7
-rw-r--r--drivers/net/wireless/marvell/mwifiex/txrx.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_event.c16
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.c12
-rw-r--r--drivers/net/wireless/marvell/mwifiex/util.c26
-rw-r--r--drivers/net/wireless/marvell/mwifiex/wmm.c12
-rw-r--r--drivers/net/wireless/marvell/mwl8k.c16
-rw-r--r--drivers/net/wireless/mediatek/mt76/Makefile2
-rw-r--r--drivers/net/wireless/mediatek/mt76/agg-rx.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/channel.c416
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c257
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.h29
-rw-r--r--drivers/net/wireless/mediatek/mt76/eeprom.c13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c361
-rw-r--r--drivers/net/wireless/mediatek/mt76/mcu.c11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h279
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/dma.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mac.c28
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/main.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/soc.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/init.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.c21
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/main.c28
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/soc.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac.h11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c47
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c155
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h37
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/main.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci.c11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/phy.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02.h9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_util.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci.c10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/phy.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c81
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/dma.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c58
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/init.c42
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.c125
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/main.c97
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c147
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.h14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mmio.c21
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h28
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/pci.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/regs.h7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/soc.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/init.c12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mac.c26
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/main.c158
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.c30
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.h5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci.c17
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c58
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/usb.c10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/Makefile1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/init.c138
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mac.c39
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/main.c300
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mcu.c653
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mcu.h100
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h61
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/pci.c73
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/pci_mac.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/regs.h4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/testmode.c201
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/usb.c23
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x.h44
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c123
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.h18
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_core.c91
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_dma.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_mac.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/coredump.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c219
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/dma.c520
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c259
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/eeprom.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/init.c790
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mac.c1284
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/main.c1709
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.c1551
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.h76
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mmio.c301
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h375
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/pci.c24
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/regs.h95
-rw-r--r--drivers/net/wireless/mediatek/mt76/scan.c174
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio_txrx.c10
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c60
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/util.c12
-rw-r--r--drivers/net/wireless/mediatek/mt76/wed.c14
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/main.c5
-rw-r--r--drivers/net/wireless/microchip/wilc1000/cfg80211.c127
-rw-r--r--drivers/net/wireless/microchip/wilc1000/cfg80211.h2
-rw-r--r--drivers/net/wireless/microchip/wilc1000/hif.c31
-rw-r--r--drivers/net/wireless/microchip/wilc1000/mon.c4
-rw-r--r--drivers/net/wireless/microchip/wilc1000/netdev.c39
-rw-r--r--drivers/net/wireless/microchip/wilc1000/sdio.c108
-rw-r--r--drivers/net/wireless/microchip/wilc1000/spi.c17
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan.c449
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan.h53
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan_cfg.c37
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan_cfg.h5
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/mac.c17
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/mac.h2
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/usb.c41
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c8
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.c2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.h3
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink.h21
-rw-r--r--drivers/net/wireless/ralink/rt2x00/Kconfig11
-rw-r--r--drivers/net/wireless/ralink/rt2x00/Makefile1
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c14
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.h3
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800mmio.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800soc.c110
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800usb.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00.h8
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00dev.c10
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00mac.c8
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00queue.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00soc.c153
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00soc.h29
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00usb.c2
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c2
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c5
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/8188e.c2
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/8192c.c2
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/core.c78
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c18
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c79
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c13
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/efuse.c11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c94
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/ps.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c25
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c23
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/sw.c1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c7
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c9
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c21
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c24
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c38
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.c24
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h12
-rw-r--r--drivers/net/wireless/realtek/rtw88/Kconfig63
-rw-r--r--drivers/net/wireless/realtek/rtw88/Makefile26
-rw-r--r--drivers/net/wireless/realtek/rtw88/coex.c61
-rw-r--r--drivers/net/wireless/realtek/rtw88/coex.h11
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.c59
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c84
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.h18
-rw-r--r--drivers/net/wireless/realtek/rtw88/hci.h8
-rw-r--r--drivers/net/wireless/realtek/rtw88/led.c74
-rw-r--r--drivers/net/wireless/realtek/rtw88/led.h25
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.c65
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.h6
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c17
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c170
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h126
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.c59
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.h1
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.c297
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.h20
-rw-r--r--drivers/net/wireless/realtek/rtw88/reg.h263
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8703b.c155
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723cs.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723d.c78
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723de.c3
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723ds.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723du.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723x.c71
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723x.h14
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8812a.c1125
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8812a.h10
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8812a_table.c2812
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8812a_table.h26
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8812au.c94
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8814a.c2270
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8814a.h62
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8814a_table.c23930
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8814a_table.h40
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8814ae.c31
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8814au.c54
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821a.c1226
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821a.h10
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821a_table.c2350
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821a_table.h21
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821au.c78
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c.c127
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c.h33
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821ce.c3
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821cs.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821cu.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.c114
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.h21
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822be.c3
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822bs.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822bu.c14
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.c112
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.h9
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822ce.c3
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822cs.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822cu.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw88xxa.c1989
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw88xxa.h175
-rw-r--r--drivers/net/wireless/realtek/rtw88/rx.c88
-rw-r--r--drivers/net/wireless/realtek/rtw88/rx.h64
-rw-r--r--drivers/net/wireless/realtek/rtw88/sar.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/sdio.c54
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.c9
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.h4
-rw-r--r--drivers/net/wireless/realtek/rtw88/usb.c345
-rw-r--r--drivers/net/wireless/realtek/rtw88/usb.h3
-rw-r--r--drivers/net/wireless/realtek/rtw88/util.c3
-rw-r--r--drivers/net/wireless/realtek/rtw89/Kconfig34
-rw-r--r--drivers/net/wireless/realtek/rtw89/Makefile9
-rw-r--r--drivers/net/wireless/realtek/rtw89/acpi.c1131
-rw-r--r--drivers/net/wireless/realtek/rtw89/acpi.h232
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.c351
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.h53
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.c1355
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.h130
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.c4476
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.h31
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.c2824
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.h1383
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.c2410
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.h1
-rw-r--r--drivers/net/wireless/realtek/rtw89/efuse.c150
-rw-r--r--drivers/net/wireless/realtek/rtw89/efuse.h2
-rw-r--r--drivers/net/wireless/realtek/rtw89/efuse_be.c52
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.c2764
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.h713
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.c1286
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.h233
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac80211.c1239
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac_be.c101
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.c719
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.h237
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci_be.c98
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.c2550
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.h108
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy_be.c23
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.c319
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.h18
-rw-r--r--drivers/net/wireless/realtek/rtw89/reg.h157
-rw-r--r--drivers/net/wireless/realtek/rtw89/regd.c888
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b.c227
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c315
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_rfk_table.c77
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_rfk_table.h2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_table.c501
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851be.c9
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851bu.c42
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.c95
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ae.c9
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b.c153
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_common.c104
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c90
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.h3
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852be.c9
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt.c69
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c96
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.h3
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bte.c15
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bu.c57
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.c140
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c14
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ce.c9
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a.c306
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a.h1
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c118
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922ae.c30
-rw-r--r--drivers/net/wireless/realtek/rtw89/sar.c755
-rw-r--r--drivers/net/wireless/realtek/rtw89/sar.h28
-rw-r--r--drivers/net/wireless/realtek/rtw89/ser.c77
-rw-r--r--drivers/net/wireless/realtek/rtw89/txrx.h41
-rw-r--r--drivers/net/wireless/realtek/rtw89/usb.c1042
-rw-r--r--drivers/net/wireless/realtek/rtw89/usb.h65
-rw-r--r--drivers/net/wireless/realtek/rtw89/util.c220
-rw-r--r--drivers/net/wireless/realtek/rtw89/util.h13
-rw-r--r--drivers/net/wireless/realtek/rtw89/wow.c319
-rw-r--r--drivers/net/wireless/realtek/rtw89/wow.h30
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_hal.c6
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c17
-rw-r--r--drivers/net/wireless/silabs/wfx/bus.h1
-rw-r--r--drivers/net/wireless/silabs/wfx/bus_sdio.c54
-rw-r--r--drivers/net/wireless/silabs/wfx/bus_spi.c45
-rw-r--r--drivers/net/wireless/silabs/wfx/main.c31
-rw-r--r--drivers/net/wireless/silabs/wfx/sta.c29
-rw-r--r--drivers/net/wireless/silabs/wfx/sta.h7
-rw-r--r--drivers/net/wireless/st/cw1200/cw1200_spi.c4
-rw-r--r--drivers/net/wireless/st/cw1200/main.c2
-rw-r--r--drivers/net/wireless/st/cw1200/pm.c2
-rw-r--r--drivers/net/wireless/st/cw1200/queue.c31
-rw-r--r--drivers/net/wireless/st/cw1200/queue.h1
-rw-r--r--drivers/net/wireless/st/cw1200/sta.c16
-rw-r--r--drivers/net/wireless/st/cw1200/sta.h5
-rw-r--r--drivers/net/wireless/ti/wl1251/acx.c35
-rw-r--r--drivers/net/wireless/ti/wl1251/acx.h1
-rw-r--r--drivers/net/wireless/ti/wl1251/cmd.c79
-rw-r--r--drivers/net/wireless/ti/wl1251/cmd.h3
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c5
-rw-r--r--drivers/net/wireless/ti/wl1251/reg.h6
-rw-r--r--drivers/net/wireless/ti/wl1251/sdio.c4
-rw-r--r--drivers/net/wireless/ti/wl1251/tx.c4
-rw-r--r--drivers/net/wireless/ti/wl12xx/main.c2
-rw-r--r--drivers/net/wireless/ti/wl12xx/reg.h6
-rw-r--r--drivers/net/wireless/ti/wl18xx/main.c4
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c26
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.h1
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c30
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c13
-rw-r--r--drivers/net/wireless/ti/wlcore/sysfs.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/testmode.c2
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.c356
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.h18
-rw-r--r--drivers/net/wireless/virtual/virt_wifi.c14
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_mac.c10
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_usb.c2
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem.c24
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mmio.c2
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_pcie.c58
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_trace.c3
-rw-r--r--drivers/net/wwan/mhi_wwan_mbim.c11
-rw-r--r--drivers/net/wwan/qcom_bam_dmux.c2
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c6
-rw-r--r--drivers/net/wwan/t7xx/t7xx_modem_ops.c1
-rw-r--r--drivers/net/wwan/t7xx/t7xx_netdev.c11
-rw-r--r--drivers/net/wwan/t7xx/t7xx_pci.c85
-rw-r--r--drivers/net/wwan/t7xx/t7xx_pci.h1
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port.h3
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_proxy.c51
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_proxy.h1
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_trace.c2
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_wwan.c8
-rw-r--r--drivers/net/wwan/t7xx/t7xx_state_monitor.c26
-rw-r--r--drivers/net/wwan/t7xx/t7xx_state_monitor.h5
-rw-r--r--drivers/net/wwan/wwan_core.c26
-rw-r--r--drivers/net/wwan/wwan_hwsim.c2
-rw-r--r--drivers/net/xen-netback/interface.c2
-rw-r--r--drivers/net/xen-netback/netback.c3
-rw-r--r--drivers/net/xen-netfront.c32
-rw-r--r--drivers/nfc/nfcmrvl/fw_dnld.c9
-rw-r--r--drivers/nfc/nfcmrvl/uart.c9
-rw-r--r--drivers/nfc/pn533/pn533.c18
-rw-r--r--drivers/nfc/pn533/uart.c4
-rw-r--r--drivers/nfc/s3fwrn5/Kconfig3
-rw-r--r--drivers/nfc/s3fwrn5/core.c2
-rw-r--r--drivers/nfc/s3fwrn5/firmware.c19
-rw-r--r--drivers/nfc/s3fwrn5/firmware.h2
-rw-r--r--drivers/nfc/s3fwrn5/i2c.c2
-rw-r--r--drivers/nfc/s3fwrn5/nci.c2
-rw-r--r--drivers/nfc/s3fwrn5/nci.h2
-rw-r--r--drivers/nfc/s3fwrn5/phy_common.c4
-rw-r--r--drivers/nfc/s3fwrn5/phy_common.h4
-rw-r--r--drivers/nfc/s3fwrn5/s3fwrn5.h2
-rw-r--r--drivers/nfc/st-nci/ndlc.c16
-rw-r--r--drivers/nfc/st-nci/se.c17
-rw-r--r--drivers/nfc/st21nfca/core.c4
-rw-r--r--drivers/nfc/st21nfca/dep.c18
-rw-r--r--drivers/nfc/st21nfca/i2c.c1
-rw-r--r--drivers/nfc/st21nfca/se.c13
-rw-r--r--drivers/nfc/trf7970a.c91
-rw-r--r--drivers/nfc/virtual_ncidev.c2
-rw-r--r--drivers/ntb/hw/amd/ntb_hw_amd.c19
-rw-r--r--drivers/ntb/hw/amd/ntb_hw_amd.h1
-rw-r--r--drivers/ntb/hw/epf/ntb_hw_epf.c118
-rw-r--r--drivers/ntb/hw/idt/ntb_hw_idt.c31
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_gen3.c3
-rw-r--r--drivers/ntb/hw/mscc/ntb_hw_switchtec.c2
-rw-r--r--drivers/ntb/msi.c22
-rw-r--r--drivers/ntb/ntb_transport.c9
-rw-r--r--drivers/ntb/test/ntb_perf.c4
-rw-r--r--drivers/ntb/test/ntb_pingpong.c3
-rw-r--r--drivers/nvdimm/badrange.c5
-rw-r--r--drivers/nvdimm/btt.c6
-rw-r--r--drivers/nvdimm/btt_devs.c24
-rw-r--r--drivers/nvdimm/bus.c74
-rw-r--r--drivers/nvdimm/claim.c27
-rw-r--r--drivers/nvdimm/core.c17
-rw-r--r--drivers/nvdimm/dax_devs.c14
-rw-r--r--drivers/nvdimm/dimm.c5
-rw-r--r--drivers/nvdimm/dimm_devs.c48
-rw-r--r--drivers/nvdimm/e820.c2
-rw-r--r--drivers/nvdimm/label.c3
-rw-r--r--drivers/nvdimm/namespace_devs.c113
-rw-r--r--drivers/nvdimm/nd-core.h4
-rw-r--r--drivers/nvdimm/nd.h12
-rw-r--r--drivers/nvdimm/nd_virtio.c2
-rw-r--r--drivers/nvdimm/of_pmem.c2
-rw-r--r--drivers/nvdimm/pfn_devs.c72
-rw-r--r--drivers/nvdimm/pmem.c14
-rw-r--r--drivers/nvdimm/pmem.h4
-rw-r--r--drivers/nvdimm/region.c16
-rw-r--r--drivers/nvdimm/region_devs.c161
-rw-r--r--drivers/nvdimm/security.c10
-rw-r--r--drivers/nvdimm/virtio_pmem.c24
-rw-r--r--drivers/nvme/common/Kconfig1
-rw-r--r--drivers/nvme/common/auth.c398
-rw-r--r--drivers/nvme/common/keyring.c65
-rw-r--r--drivers/nvme/host/Kconfig22
-rw-r--r--drivers/nvme/host/apple.c269
-rw-r--r--drivers/nvme/host/auth.c146
-rw-r--r--drivers/nvme/host/constants.c4
-rw-r--r--drivers/nvme/host/core.c668
-rw-r--r--drivers/nvme/host/fabrics.c36
-rw-r--r--drivers/nvme/host/fabrics.h9
-rw-r--r--drivers/nvme/host/fc.c114
-rw-r--r--drivers/nvme/host/ioctl.c127
-rw-r--r--drivers/nvme/host/multipath.c382
-rw-r--r--drivers/nvme/host/nvme.h116
-rw-r--r--drivers/nvme/host/pci.c1163
-rw-r--r--drivers/nvme/host/pr.c124
-rw-r--r--drivers/nvme/host/rdma.c21
-rw-r--r--drivers/nvme/host/sysfs.c61
-rw-r--r--drivers/nvme/host/tcp.c462
-rw-r--r--drivers/nvme/host/trace.c58
-rw-r--r--drivers/nvme/host/zns.c12
-rw-r--r--drivers/nvme/target/Kconfig14
-rw-r--r--drivers/nvme/target/Makefile4
-rw-r--r--drivers/nvme/target/admin-cmd.c689
-rw-r--r--drivers/nvme/target/auth.c93
-rw-r--r--drivers/nvme/target/configfs.c99
-rw-r--r--drivers/nvme/target/core.c510
-rw-r--r--drivers/nvme/target/debugfs.c29
-rw-r--r--drivers/nvme/target/discovery.c19
-rw-r--r--drivers/nvme/target/fabrics-cmd-auth.c74
-rw-r--r--drivers/nvme/target/fabrics-cmd.c135
-rw-r--r--drivers/nvme/target/fc.c213
-rw-r--r--drivers/nvme/target/fcloop.c499
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c22
-rw-r--r--drivers/nvme/target/loop.c31
-rw-r--r--drivers/nvme/target/nvmet.h239
-rw-r--r--drivers/nvme/target/passthru.c28
-rw-r--r--drivers/nvme/target/pci-epf.c2649
-rw-r--r--drivers/nvme/target/pr.c1155
-rw-r--r--drivers/nvme/target/rdma.c49
-rw-r--r--drivers/nvme/target/tcp.c156
-rw-r--r--drivers/nvme/target/trace.c108
-rw-r--r--drivers/nvme/target/zns.c22
-rw-r--r--drivers/nvmem/Kconfig59
-rw-r--r--drivers/nvmem/Makefile10
-rw-r--r--drivers/nvmem/an8855-efuse.c68
-rw-r--r--drivers/nvmem/apple-spmi-nvmem.c62
-rw-r--r--drivers/nvmem/brcm_nvram.c4
-rw-r--r--drivers/nvmem/core.c167
-rw-r--r--drivers/nvmem/imx-iim.c10
-rw-r--r--drivers/nvmem/imx-ocotp-ele.c41
-rw-r--r--drivers/nvmem/imx-ocotp.c5
-rw-r--r--drivers/nvmem/layouts.c13
-rw-r--r--drivers/nvmem/layouts/u-boot-env.c10
-rw-r--r--drivers/nvmem/lpc18xx_eeprom.c2
-rw-r--r--drivers/nvmem/lpc18xx_otp.c2
-rw-r--r--drivers/nvmem/max77759-nvmem.c145
-rw-r--r--drivers/nvmem/microchip-otpc.c2
-rw-r--r--drivers/nvmem/mtk-efuse.c2
-rw-r--r--drivers/nvmem/qcom-spmi-sdam.c1
-rw-r--r--drivers/nvmem/qfprom.c26
-rw-r--r--drivers/nvmem/rcar-efuse.c142
-rw-r--r--drivers/nvmem/rmem.c95
-rw-r--r--drivers/nvmem/rockchip-otp.c17
-rw-r--r--drivers/nvmem/s32g-ocotp-nvmem.c100
-rw-r--r--drivers/nvmem/zynqmp_nvmem.c1
-rw-r--r--drivers/of/.kunitconfig1
-rw-r--r--drivers/of/Kconfig8
-rw-r--r--drivers/of/address.c110
-rw-r--r--drivers/of/base.c156
-rw-r--r--drivers/of/cpu.c2
-rw-r--r--drivers/of/device.c42
-rw-r--r--drivers/of/dynamic.c57
-rw-r--r--drivers/of/empty_root.dts9
-rw-r--r--drivers/of/fdt.c96
-rw-r--r--drivers/of/fdt_address.c25
-rw-r--r--drivers/of/irq.c132
-rw-r--r--drivers/of/kexec.c44
-rw-r--r--drivers/of/kobj.c10
-rw-r--r--drivers/of/module.c4
-rw-r--r--drivers/of/of_numa.c8
-rw-r--r--drivers/of/of_private.h46
-rw-r--r--drivers/of/of_reserved_mem.c335
-rw-r--r--drivers/of/of_test.c119
-rw-r--r--drivers/of/overlay.c31
-rw-r--r--drivers/of/overlay_test.c2
-rw-r--r--drivers/of/pdt.c2
-rw-r--r--drivers/of/platform.c31
-rw-r--r--drivers/of/property.c179
-rw-r--r--drivers/of/resolver.c53
-rw-r--r--drivers/of/unittest-data/tests-address.dtsi2
-rw-r--r--drivers/of/unittest-data/tests-interrupts.dtsi13
-rw-r--r--drivers/of/unittest-data/tests-platform.dtsi28
-rw-r--r--drivers/of/unittest.c156
-rw-r--r--drivers/opp/core.c807
-rw-r--r--drivers/opp/cpu.c30
-rw-r--r--drivers/opp/debugfs.c10
-rw-r--r--drivers/opp/of.c248
-rw-r--r--drivers/opp/opp.h9
-rw-r--r--drivers/parisc/eisa_eeprom.c2
-rw-r--r--drivers/parisc/led.c6
-rw-r--r--drivers/parisc/power.c20
-rw-r--r--drivers/parport/ieee1284.c4
-rw-r--r--drivers/parport/parport_amiga.c2
-rw-r--r--drivers/parport/parport_serial.c12
-rw-r--r--drivers/parport/parport_sunbpp.c2
-rw-r--r--drivers/pci/Kconfig26
-rw-r--r--drivers/pci/Makefile3
-rw-r--r--drivers/pci/ats.c35
-rw-r--r--drivers/pci/bus.c105
-rw-r--r--drivers/pci/controller/Kconfig22
-rw-r--r--drivers/pci/controller/cadence/Kconfig26
-rw-r--r--drivers/pci/controller/cadence/Makefile1
-rw-r--r--drivers/pci/controller/cadence/pci-j721e.c112
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-ep.c89
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-host.c126
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.c34
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.h92
-rw-r--r--drivers/pci/controller/cadence/pcie-sg2042.c134
-rw-r--r--drivers/pci/controller/dwc/Kconfig59
-rw-r--r--drivers/pci/controller/dwc/Makefile5
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c36
-rw-r--r--drivers/pci/controller/dwc/pci-exynos.c70
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c789
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.c36
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape.c10
-rw-r--r--drivers/pci/controller/dwc/pci-meson.c6
-rw-r--r--drivers/pci/controller/dwc/pcie-al.c1
-rw-r--r--drivers/pci/controller/dwc/pcie-amd-mdb.c526
-rw-r--r--drivers/pci/controller/dwc/pcie-armada8k.c6
-rw-r--r--drivers/pci/controller/dwc/pcie-artpec6.c11
-rw-r--r--drivers/pci/controller/dwc/pcie-bt1.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-debugfs.c927
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c430
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c437
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-plat.c1
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c254
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h201
-rw-r--r--drivers/pci/controller/dwc/pcie-dw-rockchip.c246
-rw-r--r--drivers/pci/controller/dwc/pcie-hisi.c1
-rw-r--r--drivers/pci/controller/dwc/pcie-histb.c23
-rw-r--r--drivers/pci/controller/dwc/pcie-intel-gw.c10
-rw-r--r--drivers/pci/controller/dwc/pcie-keembay.c3
-rw-r--r--drivers/pci/controller/dwc/pcie-kirin.c59
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-common.c58
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-common.h2
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-ep.c58
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c481
-rw-r--r--drivers/pci/controller/dwc/pcie-rcar-gen4.c35
-rw-r--r--drivers/pci/controller/dwc/pcie-sophgo.c257
-rw-r--r--drivers/pci/controller/dwc/pcie-spear13xx.c7
-rw-r--r--drivers/pci/controller/dwc/pcie-stm32-ep.c364
-rw-r--r--drivers/pci/controller/dwc/pcie-stm32.c358
-rw-r--r--drivers/pci/controller/dwc/pcie-stm32.h16
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c83
-rw-r--r--drivers/pci/controller/dwc/pcie-uniphier.c4
-rw-r--r--drivers/pci/controller/dwc/pcie-visconti.c4
-rw-r--r--drivers/pci/controller/mobiveil/Kconfig1
-rw-r--r--drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c12
-rw-r--r--drivers/pci/controller/mobiveil/pcie-mobiveil-host.c49
-rw-r--r--drivers/pci/controller/mobiveil/pcie-mobiveil.h3
-rw-r--r--drivers/pci/controller/pci-aardvark.c71
-rw-r--r--drivers/pci/controller/pci-ftpci100.c4
-rw-r--r--drivers/pci/controller/pci-host-common.c37
-rw-r--r--drivers/pci/controller/pci-host-common.h22
-rw-r--r--drivers/pci/controller/pci-host-generic.c4
-rw-r--r--drivers/pci/controller/pci-hyperv-intf.c1
-rw-r--r--drivers/pci/controller/pci-hyperv.c250
-rw-r--r--drivers/pci/controller/pci-mvebu.c56
-rw-r--r--drivers/pci/controller/pci-tegra.c176
-rw-r--r--drivers/pci/controller/pci-thunder-ecam.c4
-rw-r--r--drivers/pci/controller/pci-thunder-pem.c5
-rw-r--r--drivers/pci/controller/pci-xgene-msi.c479
-rw-r--r--drivers/pci/controller/pci-xgene.c33
-rw-r--r--drivers/pci/controller/pcie-altera-msi.c47
-rw-r--r--drivers/pci/controller/pcie-altera.c266
-rw-r--r--drivers/pci/controller/pcie-apple.c408
-rw-r--r--drivers/pci/controller/pcie-brcmstb.c284
-rw-r--r--drivers/pci/controller/pcie-hisi-error.c2
-rw-r--r--drivers/pci/controller/pcie-iproc-msi.c46
-rw-r--r--drivers/pci/controller/pcie-iproc-platform.c2
-rw-r--r--drivers/pci/controller/pcie-mediatek-gen3.c340
-rw-r--r--drivers/pci/controller/pcie-mediatek.c69
-rw-r--r--drivers/pci/controller/pcie-mt7621.c17
-rw-r--r--drivers/pci/controller/pcie-rcar-ep.c12
-rw-r--r--drivers/pci/controller/pcie-rcar-host.c126
-rw-r--r--drivers/pci/controller/pcie-rockchip-ep.c454
-rw-r--r--drivers/pci/controller/pcie-rockchip-host.c76
-rw-r--r--drivers/pci/controller/pcie-rockchip.c240
-rw-r--r--drivers/pci/controller/pcie-rockchip.h118
-rw-r--r--drivers/pci/controller/pcie-xilinx-cpm.c113
-rw-r--r--drivers/pci/controller/pcie-xilinx-dma-pl.c57
-rw-r--r--drivers/pci/controller/pcie-xilinx-nwl.c62
-rw-r--r--drivers/pci/controller/pcie-xilinx.c63
-rw-r--r--drivers/pci/controller/plda/Kconfig1
-rw-r--r--drivers/pci/controller/plda/pcie-microchip-host.c223
-rw-r--r--drivers/pci/controller/plda/pcie-plda-host.c77
-rw-r--r--drivers/pci/controller/plda/pcie-plda.h7
-rw-r--r--drivers/pci/controller/plda/pcie-starfive.c14
-rw-r--r--drivers/pci/controller/vmd.c283
-rw-r--r--drivers/pci/devres.c356
-rw-r--r--drivers/pci/doe.c261
-rw-r--r--drivers/pci/ecam.c2
-rw-r--r--drivers/pci/endpoint/Kconfig10
-rw-r--r--drivers/pci/endpoint/Makefile1
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-mhi.c8
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c661
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-vntb.c170
-rw-r--r--drivers/pci/endpoint/pci-ep-cfs.c1
-rw-r--r--drivers/pci/endpoint/pci-ep-msi.c100
-rw-r--r--drivers/pci/endpoint/pci-epc-core.c304
-rw-r--r--drivers/pci/endpoint/pci-epc-mem.c9
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c67
-rw-r--r--drivers/pci/hotplug/Kconfig12
-rw-r--r--drivers/pci/hotplug/Makefile1
-rw-r--r--drivers/pci/hotplug/TODO4
-rw-r--r--drivers/pci/hotplug/acpiphp_ampere_altra.c2
-rw-r--r--drivers/pci/hotplug/acpiphp_ibm.c4
-rw-r--r--drivers/pci/hotplug/cpci_hotplug.h3
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_core.c17
-rw-r--r--drivers/pci/hotplug/cpqphp_ctrl.c4
-rw-r--r--drivers/pci/hotplug/cpqphp_pci.c55
-rw-r--r--drivers/pci/hotplug/cpqphp_sysfs.c1
-rw-r--r--drivers/pci/hotplug/ibmphp_hpc.c6
-rw-r--r--drivers/pci/hotplug/octep_hp.c427
-rw-r--r--drivers/pci/hotplug/pci_hotplug_core.c223
-rw-r--r--drivers/pci/hotplug/pciehp.h1
-rw-r--r--drivers/pci/hotplug/pciehp_core.c26
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c5
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c91
-rw-r--r--drivers/pci/hotplug/pnv_php.c248
-rw-r--r--drivers/pci/hotplug/s390_pci_hpc.c3
-rw-r--r--drivers/pci/hotplug/shpchp.h18
-rw-r--r--drivers/pci/hotplug/shpchp_core.c13
-rw-r--r--drivers/pci/hotplug/shpchp_hpc.c6
-rw-r--r--drivers/pci/iomap.c45
-rw-r--r--drivers/pci/iov.c222
-rw-r--r--drivers/pci/msi/api.c10
-rw-r--r--drivers/pci/msi/irqdomain.c92
-rw-r--r--drivers/pci/msi/msi.c237
-rw-r--r--drivers/pci/msi/msi.h4
-rw-r--r--drivers/pci/of.c220
-rw-r--r--drivers/pci/of_property.c143
-rw-r--r--drivers/pci/p2pdma.c64
-rw-r--r--drivers/pci/pci-acpi.c13
-rw-r--r--drivers/pci/pci-driver.c40
-rw-r--r--drivers/pci/pci-sysfs.c288
-rw-r--r--drivers/pci/pci.c631
-rw-r--r--drivers/pci/pci.h405
-rw-r--r--drivers/pci/pcie/Makefile4
-rw-r--r--drivers/pci/pcie/aer.c595
-rw-r--r--drivers/pci/pcie/aspm.c197
-rw-r--r--drivers/pci/pcie/bwctrl.c329
-rw-r--r--drivers/pci/pcie/dpc.c109
-rw-r--r--drivers/pci/pcie/err.c41
-rw-r--r--drivers/pci/pcie/portdrv.c23
-rw-r--r--drivers/pci/pcie/portdrv.h6
-rw-r--r--drivers/pci/pcie/ptm.c302
-rw-r--r--drivers/pci/pcie/tlp.c137
-rw-r--r--drivers/pci/probe.c367
-rw-r--r--drivers/pci/proc.c4
-rw-r--r--drivers/pci/pwrctl/Kconfig12
-rw-r--r--drivers/pci/pwrctl/Makefile6
-rw-r--r--drivers/pci/pwrctl/core.c157
-rw-r--r--drivers/pci/pwrctl/pci-pwrctl-pwrseq.c139
-rw-r--r--drivers/pci/pwrctrl/Kconfig33
-rw-r--r--drivers/pci/pwrctrl/Makefile9
-rw-r--r--drivers/pci/pwrctrl/core.c150
-rw-r--r--drivers/pci/pwrctrl/pci-pwrctrl-pwrseq.c139
-rw-r--r--drivers/pci/pwrctrl/slot.c95
-rw-r--r--drivers/pci/quirks.c137
-rw-r--r--drivers/pci/remove.c43
-rw-r--r--drivers/pci/setup-bus.c1393
-rw-r--r--drivers/pci/setup-res.c108
-rw-r--r--drivers/pci/slot.c68
-rw-r--r--drivers/pci/switch/switchtec.c51
-rw-r--r--drivers/pci/tph.c506
-rw-r--r--drivers/pci/vgaarb.c31
-rw-r--r--drivers/pci/vpd.c14
-rw-r--r--drivers/pcmcia/Kconfig3
-rw-r--r--drivers/pcmcia/Makefile1
-rw-r--r--drivers/pcmcia/bcm63xx_pcmcia.c4
-rw-r--r--drivers/pcmcia/cardbus.c1
-rw-r--r--drivers/pcmcia/cistpl.c4
-rw-r--r--drivers/pcmcia/cs.c17
-rw-r--r--drivers/pcmcia/cs_internal.h1
-rw-r--r--drivers/pcmcia/db1xxx_ss.c2
-rw-r--r--drivers/pcmcia/ds.c2
-rw-r--r--drivers/pcmcia/electra_cf.c4
-rw-r--r--drivers/pcmcia/i82365.c2
-rw-r--r--drivers/pcmcia/omap_cf.c14
-rw-r--r--drivers/pcmcia/pd6729.c3
-rw-r--r--drivers/pcmcia/pxa2xx_base.c2
-rw-r--r--drivers/pcmcia/rsrc_iodyn.c168
-rw-r--r--drivers/pcmcia/rsrc_nonstatic.c4
-rw-r--r--drivers/pcmcia/sa1100_generic.c2
-rw-r--r--drivers/pcmcia/soc_common.c18
-rw-r--r--drivers/pcmcia/socket_sysfs.c5
-rw-r--r--drivers/pcmcia/tcic.c2
-rw-r--r--drivers/pcmcia/xxs1500_ss.c2
-rw-r--r--drivers/pcmcia/yenta_socket.c3
-rw-r--r--drivers/peci/controller/peci-aspeed.c2
-rw-r--r--drivers/peci/controller/peci-npcm.c5
-rw-r--r--drivers/peci/core.c2
-rw-r--r--drivers/peci/cpu.c12
-rw-r--r--drivers/peci/device.c4
-rw-r--r--drivers/peci/request.c30
-rw-r--r--drivers/perf/Kconfig29
-rw-r--r--drivers/perf/Makefile3
-rw-r--r--drivers/perf/alibaba_uncore_drw_pmu.c2
-rw-r--r--drivers/perf/amlogic/meson_ddr_pmu_core.c2
-rw-r--r--drivers/perf/amlogic/meson_g12_ddr_pmu.c2
-rw-r--r--drivers/perf/apple_m1_cpu_pmu.c110
-rw-r--r--drivers/perf/arm-cci.c2
-rw-r--r--drivers/perf/arm-ccn.c9
-rw-r--r--drivers/perf/arm-cmn.c62
-rw-r--r--drivers/perf/arm-ni.c174
-rw-r--r--drivers/perf/arm_brbe.c805
-rw-r--r--drivers/perf/arm_brbe.h47
-rw-r--r--drivers/perf/arm_cspmu/ampere_cspmu.c32
-rw-r--r--drivers/perf/arm_cspmu/arm_cspmu.c83
-rw-r--r--drivers/perf/arm_cspmu/arm_cspmu.h57
-rw-r--r--drivers/perf/arm_cspmu/nvidia_cspmu.c97
-rw-r--r--drivers/perf/arm_dmc620_pmu.c2
-rw-r--r--drivers/perf/arm_dsu_pmu.c2
-rw-r--r--drivers/perf/arm_pmu.c24
-rw-r--r--drivers/perf/arm_pmuv3.c182
-rw-r--r--drivers/perf/arm_smmuv3_pmu.c21
-rw-r--r--drivers/perf/arm_spe_pmu.c160
-rw-r--r--drivers/perf/arm_v6_pmu.c3
-rw-r--r--drivers/perf/arm_v7_pmu.c53
-rw-r--r--drivers/perf/arm_xscale_pmu.c6
-rw-r--r--drivers/perf/cxl_pmu.c21
-rw-r--r--drivers/perf/dwc_pcie_pmu.c287
-rw-r--r--drivers/perf/fsl_imx8_ddr_perf.c2
-rw-r--r--drivers/perf/fsl_imx9_ddr_perf.c54
-rw-r--r--drivers/perf/fujitsu_uncore_pmu.c613
-rw-r--r--drivers/perf/hisilicon/Makefile3
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c44
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c413
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_hha_pmu.c56
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c589
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_mn_pmu.c411
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_noc_pmu.c443
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pa_pmu.c57
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.c174
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.h57
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c253
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_uc_pmu.c45
-rw-r--r--drivers/perf/marvell_cn10k_ddr_pmu.c536
-rw-r--r--drivers/perf/marvell_cn10k_tad_pmu.c68
-rw-r--r--drivers/perf/marvell_pem_pmu.c425
-rw-r--r--drivers/perf/qcom_l2_pmu.c2
-rw-r--r--drivers/perf/riscv_pmu_sbi.c203
-rw-r--r--drivers/perf/thunderx2_pmu.c7
-rw-r--r--drivers/perf/xgene_pmu.c2
-rw-r--r--drivers/phy/Kconfig20
-rw-r--r--drivers/phy/Makefile3
-rw-r--r--drivers/phy/allwinner/phy-sun4i-usb.c49
-rw-r--r--drivers/phy/amlogic/phy-meson-axg-mipi-dphy.c10
-rw-r--r--drivers/phy/amlogic/phy-meson-axg-mipi-pcie-analog.c10
-rw-r--r--drivers/phy/amlogic/phy-meson-axg-pcie.c14
-rw-r--r--drivers/phy/amlogic/phy-meson-g12a-usb2.c10
-rw-r--r--drivers/phy/amlogic/phy-meson-gxl-usb2.c11
-rw-r--r--drivers/phy/amlogic/phy-meson8b-usb2.c35
-rw-r--r--drivers/phy/broadcom/phy-bcm-ns-usb2.c54
-rw-r--r--drivers/phy/broadcom/phy-bcm-ns2-pcie.c2
-rw-r--r--drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c1
-rw-r--r--drivers/phy/broadcom/phy-bcm-sr-pcie.c2
-rw-r--r--drivers/phy/broadcom/phy-brcm-sata.c3
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c79
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb-init.c433
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb-init.h1
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb.c17
-rw-r--r--drivers/phy/cadence/cdns-dphy-rx.c3
-rw-r--r--drivers/phy/cadence/cdns-dphy.c156
-rw-r--r--drivers/phy/cadence/phy-cadence-sierra.c204
-rw-r--r--drivers/phy/cadence/phy-cadence-torrent.c292
-rw-r--r--drivers/phy/freescale/Kconfig1
-rw-r--r--drivers/phy/freescale/phy-fsl-imx8m-pcie.c46
-rw-r--r--drivers/phy/freescale/phy-fsl-imx8mq-usb.c324
-rw-r--r--drivers/phy/freescale/phy-fsl-imx8qm-lvds-phy.c6
-rw-r--r--drivers/phy/freescale/phy-fsl-lynx-28g.c22
-rw-r--r--drivers/phy/freescale/phy-fsl-samsung-hdmi.c652
-rw-r--r--drivers/phy/hisilicon/phy-hi3670-pcie.c11
-rw-r--r--drivers/phy/hisilicon/phy-hi6220-usb.c1
-rw-r--r--drivers/phy/hisilicon/phy-histb-combphy.c2
-rw-r--r--drivers/phy/ingenic/phy-ingenic-usb.c8
-rw-r--r--drivers/phy/intel/phy-intel-lgm-combo.c2
-rw-r--r--drivers/phy/marvell/Kconfig4
-rw-r--r--drivers/phy/marvell/phy-mvebu-cp110-comphy.c2
-rw-r--r--drivers/phy/marvell/phy-mvebu-cp110-utmi.c16
-rw-r--r--drivers/phy/marvell/phy-pxa-usb.c1
-rw-r--r--drivers/phy/mediatek/Kconfig1
-rw-r--r--drivers/phy/mediatek/phy-mtk-hdmi-mt8195.c44
-rw-r--r--drivers/phy/mediatek/phy-mtk-hdmi-mt8195.h3
-rw-r--r--drivers/phy/mediatek/phy-mtk-hdmi.c28
-rw-r--r--drivers/phy/mediatek/phy-mtk-hdmi.h4
-rw-r--r--drivers/phy/mediatek/phy-mtk-tphy.c109
-rw-r--r--drivers/phy/mediatek/phy-mtk-xsphy.c85
-rw-r--r--drivers/phy/microchip/Kconfig1
-rw-r--r--drivers/phy/microchip/sparx5_serdes.c195
-rw-r--r--drivers/phy/microchip/sparx5_serdes.h44
-rw-r--r--drivers/phy/microchip/sparx5_serdes_regs.h746
-rw-r--r--drivers/phy/motorola/phy-cpcap-usb.c2
-rw-r--r--drivers/phy/motorola/phy-mapphone-mdm6600.c6
-rw-r--r--drivers/phy/phy-airoha-pcie-regs.h6
-rw-r--r--drivers/phy/phy-airoha-pcie.c8
-rw-r--r--drivers/phy/phy-can-transceiver.c23
-rw-r--r--drivers/phy/phy-core.c80
-rw-r--r--drivers/phy/phy-lgm-usb.c2
-rw-r--r--drivers/phy/phy-nxp-ptn3222.c123
-rw-r--r--drivers/phy/phy-snps-eusb2.c633
-rw-r--r--drivers/phy/qualcomm/Kconfig38
-rw-r--r--drivers/phy/qualcomm/Makefile3
-rw-r--r--drivers/phy/qualcomm/phy-qcom-apq8064-sata.c10
-rw-r--r--drivers/phy/qualcomm/phy-qcom-edp.c74
-rw-r--r--drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c106
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ipq806x-sata.c8
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c1
-rw-r--r--drivers/phy/qualcomm/phy-qcom-m31-eusb2.c324
-rw-r--r--drivers/phy/qualcomm/phy-qcom-m31.c16
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-combo.c514
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcie.c938
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v4_20.h5
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5_20.h7
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v6.h3
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v6_30.h25
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-usb-v8.h38
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-v2.h1
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-v5_20.h4
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6.h2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6_30.h19
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-v7.h2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-v8.h32
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com-v6.h7
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com-v8.h64
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-ln-shrd-v5.h11
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-ufs-v7.h67
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6.h1
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v7.h4
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v8.h68
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-ufs.c473
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-usb-legacy.c1
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-usb.c77
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-usbc.c4
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.h6
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qusb2.c34
-rw-r--r--drivers/phy/qualcomm/phy-qcom-snps-eusb2.c442
-rw-r--r--drivers/phy/qualcomm/phy-qcom-uniphy-pcie-28lp.c331
-rw-r--r--drivers/phy/realtek/phy-rtk-usb2.c4
-rw-r--r--drivers/phy/realtek/phy-rtk-usb3.c4
-rw-r--r--drivers/phy/renesas/phy-rcar-gen3-pcie.c8
-rw-r--r--drivers/phy/renesas/phy-rcar-gen3-usb2.c297
-rw-r--r--drivers/phy/renesas/phy-rcar-gen3-usb3.c8
-rw-r--r--drivers/phy/renesas/r8a779f0-ether-serdes.c99
-rw-r--r--drivers/phy/rockchip/Kconfig14
-rw-r--r--drivers/phy/rockchip/Makefile1
-rw-r--r--drivers/phy/rockchip/phy-rockchip-emmc.c3
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-csidphy.c69
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c2
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-hdmi.c4
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-usb2.c285
-rw-r--r--drivers/phy/rockchip/phy-rockchip-naneng-combphy.c957
-rw-r--r--drivers/phy/rockchip/phy-rockchip-pcie.c213
-rw-r--r--drivers/phy/rockchip/phy-rockchip-samsung-dcphy.c1714
-rw-r--r--drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c1317
-rw-r--r--drivers/phy/rockchip/phy-rockchip-typec.c4
-rw-r--r--drivers/phy/rockchip/phy-rockchip-usb.c51
-rw-r--r--drivers/phy/rockchip/phy-rockchip-usbdp.c134
-rw-r--r--drivers/phy/samsung/Kconfig4
-rw-r--r--drivers/phy/samsung/Makefile1
-rw-r--r--drivers/phy/samsung/phy-exynos-mipi-video.c52
-rw-r--r--drivers/phy/samsung/phy-exynos5-usbdrd.c863
-rw-r--r--drivers/phy/samsung/phy-exynosautov920-ufs.c168
-rw-r--r--drivers/phy/samsung/phy-samsung-ufs.c15
-rw-r--r--drivers/phy/samsung/phy-samsung-ufs.h4
-rw-r--r--drivers/phy/samsung/phy-samsung-usb2.c1
-rw-r--r--drivers/phy/sophgo/Kconfig19
-rw-r--r--drivers/phy/sophgo/Makefile2
-rw-r--r--drivers/phy/sophgo/phy-cv1800-usb2.c170
-rw-r--r--drivers/phy/st/Kconfig11
-rw-r--r--drivers/phy/st/Makefile1
-rw-r--r--drivers/phy/st/phy-stih407-usb.c26
-rw-r--r--drivers/phy/st/phy-stm32-combophy.c605
-rw-r--r--drivers/phy/st/phy-stm32-usbphyc.c6
-rw-r--r--drivers/phy/starfive/phy-jh7110-usb.c23
-rw-r--r--drivers/phy/tegra/Kconfig5
-rw-r--r--drivers/phy/tegra/xusb-tegra186.c132
-rw-r--r--drivers/phy/tegra/xusb-tegra210.c6
-rw-r--r--drivers/phy/tegra/xusb.c14
-rw-r--r--drivers/phy/tegra/xusb.h1
-rw-r--r--drivers/phy/ti/Kconfig2
-rw-r--r--drivers/phy/ti/phy-am654-serdes.c3
-rw-r--r--drivers/phy/ti/phy-da8xx-usb.c4
-rw-r--r--drivers/phy/ti/phy-dm816x-usb.c3
-rw-r--r--drivers/phy/ti/phy-gmii-sel.c65
-rw-r--r--drivers/phy/ti/phy-j721e-wiz.c7
-rw-r--r--drivers/phy/ti/phy-omap-control.c1
-rw-r--r--drivers/phy/ti/phy-omap-usb2.c16
-rw-r--r--drivers/phy/ti/phy-ti-pipe3.c16
-rw-r--r--drivers/phy/ti/phy-twl4030-usb.c3
-rw-r--r--drivers/phy/xilinx/phy-zynqmp.c72
-rw-r--r--drivers/pinctrl/Kconfig111
-rw-r--r--drivers/pinctrl/Makefile10
-rw-r--r--drivers/pinctrl/actions/pinctrl-owl.c6
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c2
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c2
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c18
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed.c2
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed.h2
-rw-r--r--drivers/pinctrl/bcm/Kconfig12
-rw-r--r--drivers/pinctrl/bcm/Kconfig.stb10
-rw-r--r--drivers/pinctrl/bcm/Makefile2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm281xx.c849
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c25
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm4908.c2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm6358.c4
-rw-r--r--drivers/pinctrl/bcm/pinctrl-brcmstb-bcm2712.c747
-rw-r--r--drivers/pinctrl/bcm/pinctrl-brcmstb.c442
-rw-r--r--drivers/pinctrl/bcm/pinctrl-brcmstb.h93
-rw-r--r--drivers/pinctrl/bcm/pinctrl-cygnus-mux.c8
-rw-r--r--drivers/pinctrl/bcm/pinctrl-iproc-gpio.c6
-rw-r--r--drivers/pinctrl/bcm/pinctrl-ns.c2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-ns2-mux.c8
-rw-r--r--drivers/pinctrl/bcm/pinctrl-nsp-gpio.c7
-rw-r--r--drivers/pinctrl/bcm/pinctrl-nsp-mux.c8
-rw-r--r--drivers/pinctrl/berlin/berlin.c10
-rw-r--r--drivers/pinctrl/cirrus/pinctrl-cs42l43.c21
-rw-r--r--drivers/pinctrl/cirrus/pinctrl-lochnagar.c26
-rw-r--r--drivers/pinctrl/cirrus/pinctrl-madera-core.c18
-rw-r--r--drivers/pinctrl/core.c108
-rw-r--r--drivers/pinctrl/core.h3
-rw-r--r--drivers/pinctrl/devicetree.c10
-rw-r--r--drivers/pinctrl/freescale/Kconfig104
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx-scmi.c4
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c57
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx1.c228
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx27.c350
-rw-r--r--drivers/pinctrl/intel/Kconfig2
-rw-r--r--drivers/pinctrl/intel/pinctrl-alderlake.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c19
-rw-r--r--drivers/pinctrl/intel/pinctrl-broxton.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-cannonlake.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-cedarfork.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c13
-rw-r--r--drivers/pinctrl/intel/pinctrl-denverton.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-elkhartlake.c40
-rw-r--r--drivers/pinctrl/intel/pinctrl-emmitsburg.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-geminilake.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-icelake.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel-platform.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c60
-rw-r--r--drivers/pinctrl/intel/pinctrl-jasperlake.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-lakefield.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-lewisburg.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-lynxpoint.c11
-rw-r--r--drivers/pinctrl/intel/pinctrl-merrifield.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-meteorlake.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-meteorpoint.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-moorefield.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-sunrisepoint.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-tangier.c7
-rw-r--r--drivers/pinctrl/intel/pinctrl-tigerlake.c2
-rw-r--r--drivers/pinctrl/mediatek/Kconfig58
-rw-r--r--drivers/pinctrl/mediatek/Makefile5
-rw-r--r--drivers/pinctrl/mediatek/mtk-eint.c320
-rw-r--r--drivers/pinctrl/mediatek/mtk-eint.h28
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-airoha.c2945
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-moore.c29
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-moore.h7
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt6893.c879
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7622.c2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7623.c2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7629.c2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7981.c2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7986.c2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7988.c1546
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8189.c1700
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8196.c1860
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c97
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h4
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.c27
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-mt6893.h2283
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-mt8189.h2452
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-mt8196.h3085
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-paris.c31
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-paris.h7
-rw-r--r--drivers/pinctrl/meson/Kconfig33
-rw-r--r--drivers/pinctrl/meson/Makefile1
-rw-r--r--drivers/pinctrl/meson/pinctrl-amlogic-a4.c1109
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-g12a.c30
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxl.c10
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.c12
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-37xx.c49
-rw-r--r--drivers/pinctrl/nomadik/Kconfig6
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-abx500.c23
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c44
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-ma35.c123
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-ma35d1.c1
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c233
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c202
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-wpcm450.c66
-rw-r--r--drivers/pinctrl/nxp/pinctrl-s32g2.c52
-rw-r--r--drivers/pinctrl/pinconf-generic.c144
-rw-r--r--drivers/pinctrl/pinconf.h21
-rw-r--r--drivers/pinctrl/pinctrl-amd.c146
-rw-r--r--drivers/pinctrl/pinctrl-amd.h11
-rw-r--r--drivers/pinctrl/pinctrl-amdisp.c233
-rw-r--r--drivers/pinctrl/pinctrl-amdisp.h95
-rw-r--r--drivers/pinctrl/pinctrl-apple-gpio.c35
-rw-r--r--drivers/pinctrl/pinctrl-artpec6.c4
-rw-r--r--drivers/pinctrl/pinctrl-as3722.c21
-rw-r--r--drivers/pinctrl/pinctrl-at91-pio4.c22
-rw-r--r--drivers/pinctrl/pinctrl-at91.c17
-rw-r--r--drivers/pinctrl/pinctrl-aw9523.c93
-rw-r--r--drivers/pinctrl/pinctrl-axp209.c33
-rw-r--r--drivers/pinctrl/pinctrl-bm1880.c2
-rw-r--r--drivers/pinctrl/pinctrl-cy8c95x0.c388
-rw-r--r--drivers/pinctrl/pinctrl-da9062.c12
-rw-r--r--drivers/pinctrl/pinctrl-digicolor.c6
-rw-r--r--drivers/pinctrl/pinctrl-eic7700.c704
-rw-r--r--drivers/pinctrl/pinctrl-equilibrium.c39
-rw-r--r--drivers/pinctrl/pinctrl-equilibrium.h2
-rw-r--r--drivers/pinctrl/pinctrl-falcon.c2
-rw-r--r--drivers/pinctrl/pinctrl-gemini.c11
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c322
-rw-r--r--drivers/pinctrl/pinctrl-k210.c21
-rw-r--r--drivers/pinctrl/pinctrl-k230.c648
-rw-r--r--drivers/pinctrl/pinctrl-keembay.c47
-rw-r--r--drivers/pinctrl/pinctrl-lpc18xx.c2
-rw-r--r--drivers/pinctrl/pinctrl-max7360.c215
-rw-r--r--drivers/pinctrl/pinctrl-max77620.c9
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08.c55
-rw-r--r--drivers/pinctrl/pinctrl-microchip-sgpio.c12
-rw-r--r--drivers/pinctrl/pinctrl-mlxbf3.c2
-rw-r--r--drivers/pinctrl/pinctrl-ocelot.c240
-rw-r--r--drivers/pinctrl/pinctrl-palmas.c4
-rw-r--r--drivers/pinctrl/pinctrl-pic32.c10
-rw-r--r--drivers/pinctrl/pinctrl-pistachio.c20
-rw-r--r--drivers/pinctrl/pinctrl-rk805.c28
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c370
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.h4
-rw-r--r--drivers/pinctrl/pinctrl-rp1.c1913
-rw-r--r--drivers/pinctrl/pinctrl-scmi.c3
-rw-r--r--drivers/pinctrl/pinctrl-single.c20
-rw-r--r--drivers/pinctrl/pinctrl-st.c9
-rw-r--r--drivers/pinctrl/pinctrl-stmfx.c21
-rw-r--r--drivers/pinctrl/pinctrl-sx150x.c35
-rw-r--r--drivers/pinctrl/pinctrl-tb10x.c6
-rw-r--r--drivers/pinctrl/pinctrl-th1520.c918
-rw-r--r--drivers/pinctrl/pinctrl-tps6594.c35
-rw-r--r--drivers/pinctrl/pinctrl-upboard.c1070
-rw-r--r--drivers/pinctrl/pinctrl-xway.c18
-rw-r--r--drivers/pinctrl/pinctrl-zynq.c2
-rw-r--r--drivers/pinctrl/pinctrl-zynqmp.c98
-rw-r--r--drivers/pinctrl/pinmux.c264
-rw-r--r--drivers/pinctrl/pinmux.h19
-rw-r--r--drivers/pinctrl/pxa/pinctrl-pxa2xx.c8
-rw-r--r--drivers/pinctrl/qcom/Kconfig11
-rw-r--r--drivers/pinctrl/qcom/Kconfig.msm73
-rw-r--r--drivers/pinctrl/qcom/Makefile10
-rw-r--r--drivers/pinctrl/qcom/pinctrl-apq8064.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-apq8084.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-glymur.c1777
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq4019.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq5018.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq5332.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq5424.c809
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq6018.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq8064.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq8074.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq9574.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-lpass-lpi.c30
-rw-r--r--drivers/pinctrl/qcom/pinctrl-lpass-lpi.h18
-rw-r--r--drivers/pinctrl/qcom/pinctrl-mdm9607.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-mdm9615.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-milos.c1339
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c126
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.h6
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8226.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8660.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8909.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8916.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8917.c1625
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8953.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8960.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8976.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8994.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8996.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8998.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8x74.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qcm2290.c82
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qcs404.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qcs615.c1106
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qcs8300.c1245
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qdf2xxx.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qdu1000.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sa8775p.c61
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sar2130p.c1504
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc7180.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc7280-lpass-lpi.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc7280.c5
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc8180x.c5
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc8280xp-lpass-lpi.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc8280xp.c5
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdm660-lpass-lpi.c160
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdm660.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdm670.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdm845.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdx55.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdx65.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdx75.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm4250-lpass-lpi.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm4450.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm6115-lpass-lpi.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm6115.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm6125.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm6350.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm6375.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm7150.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8150.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8250-lpass-lpi.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8250.c84
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8350-lpass-lpi.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8350.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8450-lpass-lpi.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8450.c5
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8550-lpass-lpi.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8550.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8650-lpass-lpi.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8650.c5
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8750.c1730
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c25
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-mpp.c18
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c16
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c21
-rw-r--r--drivers/pinctrl/qcom/pinctrl-x1e80100.c3
-rw-r--r--drivers/pinctrl/qcom/tlmm-test.c709
-rw-r--r--drivers/pinctrl/renesas/Kconfig265
-rw-r--r--drivers/pinctrl/renesas/Makefile1
-rw-r--r--drivers/pinctrl/renesas/gpio.c4
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a779g0.c2
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rza1.c16
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rza2.c15
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzg2l.c836
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzn1.c6
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzt2h.c813
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzv2m.c10
-rw-r--r--drivers/pinctrl/renesas/pinctrl.c3
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos-arm64.c944
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.c403
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.h89
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c49
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.h26
-rw-r--r--drivers/pinctrl/sophgo/Kconfig48
-rw-r--r--drivers/pinctrl/sophgo/Makefile8
-rw-r--r--drivers/pinctrl/sophgo/pinctrl-cv1800b.c27
-rw-r--r--drivers/pinctrl/sophgo/pinctrl-cv1812h.c27
-rw-r--r--drivers/pinctrl/sophgo/pinctrl-cv18xx.c602
-rw-r--r--drivers/pinctrl/sophgo/pinctrl-cv18xx.h66
-rw-r--r--drivers/pinctrl/sophgo/pinctrl-sg2000.c27
-rw-r--r--drivers/pinctrl/sophgo/pinctrl-sg2002.c27
-rw-r--r--drivers/pinctrl/sophgo/pinctrl-sg2042-ops.c296
-rw-r--r--drivers/pinctrl/sophgo/pinctrl-sg2042.c655
-rw-r--r--drivers/pinctrl/sophgo/pinctrl-sg2042.h49
-rw-r--r--drivers/pinctrl/sophgo/pinctrl-sg2044.c718
-rw-r--r--drivers/pinctrl/sophgo/pinctrl-sophgo-common.c451
-rw-r--r--drivers/pinctrl/sophgo/pinctrl-sophgo.h136
-rw-r--r--drivers/pinctrl/spacemit/Kconfig18
-rw-r--r--drivers/pinctrl/spacemit/Makefile3
-rw-r--r--drivers/pinctrl/spacemit/pinctrl-k1.c1065
-rw-r--r--drivers/pinctrl/spacemit/pinctrl-k1.h40
-rw-r--r--drivers/pinctrl/spear/pinctrl-plgpio.c9
-rw-r--r--drivers/pinctrl/sprd/pinctrl-sprd-sc9860.c2
-rw-r--r--drivers/pinctrl/sprd/pinctrl-sprd.c9
-rw-r--r--drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c8
-rw-r--r--drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c5
-rw-r--r--drivers/pinctrl/stm32/Kconfig20
-rw-r--r--drivers/pinctrl/stm32/Makefile1
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32-hdp.c726
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c232
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.h22
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32mp257.c15
-rw-r--r--drivers/pinctrl/sunplus/sppctl.c8
-rw-r--r--drivers/pinctrl/sunxi/Kconfig10
-rw-r--r--drivers/pinctrl/sunxi/Makefile3
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun20i-d1.c6
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c8
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-a100.c12
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun55i-a523-r.c54
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun55i-a523.c54
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun5i.c8
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun6i-a31.c8
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-v3s.c9
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi-dt.c373
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c78
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.h47
-rw-r--r--drivers/pinctrl/tegra/Kconfig4
-rw-r--r--drivers/pinctrl/tegra/Makefile1
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra.c73
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra.h34
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra186.c1979
-rw-r--r--drivers/pinctrl/uniphier/Kconfig2
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wmt.c15
-rw-r--r--drivers/platform/arm64/Kconfig43
-rw-r--r--drivers/platform/arm64/Makefile2
-rw-r--r--drivers/platform/arm64/acer-aspire1-ec.c10
-rw-r--r--drivers/platform/arm64/huawei-gaokun-ec.c827
-rw-r--r--drivers/platform/arm64/lenovo-thinkpad-t14s.c616
-rw-r--r--drivers/platform/arm64/lenovo-yoga-c630.c40
-rw-r--r--drivers/platform/chrome/Kconfig25
-rw-r--r--drivers/platform/chrome/Makefile8
-rw-r--r--drivers/platform/chrome/chromeos_laptop.c9
-rw-r--r--drivers/platform/chrome/chromeos_of_hw_prober.c187
-rw-r--r--drivers/platform/chrome/chromeos_pstore.c7
-rw-r--r--drivers/platform/chrome/cros_ec.c96
-rw-r--r--drivers/platform/chrome/cros_ec.h3
-rw-r--r--drivers/platform/chrome/cros_ec_chardev.c74
-rw-r--r--drivers/platform/chrome/cros_ec_debugfs.c54
-rw-r--r--drivers/platform/chrome/cros_ec_i2c.c10
-rw-r--r--drivers/platform/chrome/cros_ec_ishtp.c6
-rw-r--r--drivers/platform/chrome/cros_ec_lightbar.c2
-rw-r--r--drivers/platform/chrome/cros_ec_lpc.c237
-rw-r--r--drivers/platform/chrome/cros_ec_proto.c108
-rw-r--r--drivers/platform/chrome/cros_ec_proto_test_util.h5
-rw-r--r--drivers/platform/chrome/cros_ec_rpmsg.c6
-rw-r--r--drivers/platform/chrome/cros_ec_sensorhub.c23
-rw-r--r--drivers/platform/chrome/cros_ec_spi.c9
-rw-r--r--drivers/platform/chrome/cros_ec_sysfs.c73
-rw-r--r--drivers/platform/chrome/cros_ec_trace.c10
-rw-r--r--drivers/platform/chrome/cros_ec_typec.c172
-rw-r--r--drivers/platform/chrome/cros_ec_typec.h2
-rw-r--r--drivers/platform/chrome/cros_ec_uart.c6
-rw-r--r--drivers/platform/chrome/cros_ec_vbc.c10
-rw-r--r--drivers/platform/chrome/cros_hps_i2c.c2
-rw-r--r--drivers/platform/chrome/cros_kbd_led_backlight.c103
-rw-r--r--drivers/platform/chrome/cros_typec_altmode.c373
-rw-r--r--drivers/platform/chrome/cros_typec_altmode.h51
-rw-r--r--drivers/platform/chrome/cros_typec_switch.c2
-rw-r--r--drivers/platform/chrome/cros_usbpd_logger.c7
-rw-r--r--drivers/platform/chrome/cros_usbpd_notify.c4
-rw-r--r--drivers/platform/chrome/wilco_ec/core.c2
-rw-r--r--drivers/platform/chrome/wilco_ec/debugfs.c2
-rw-r--r--drivers/platform/chrome/wilco_ec/telemetry.c4
-rw-r--r--drivers/platform/cznic/Kconfig18
-rw-r--r--drivers/platform/cznic/Makefile3
-rw-r--r--drivers/platform/cznic/turris-omnia-mcu-base.c7
-rw-r--r--drivers/platform/cznic/turris-omnia-mcu-gpio.c56
-rw-r--r--drivers/platform/cznic/turris-omnia-mcu-keyctl.c162
-rw-r--r--drivers/platform/cznic/turris-omnia-mcu-trng.c17
-rw-r--r--drivers/platform/cznic/turris-omnia-mcu.h205
-rw-r--r--drivers/platform/cznic/turris-signing-key.c193
-rw-r--r--drivers/platform/goldfish/goldfish_pipe.c5
-rw-r--r--drivers/platform/loongarch/Kconfig2
-rw-r--r--drivers/platform/loongarch/loongson-laptop.c87
-rw-r--r--drivers/platform/mellanox/Kconfig26
-rw-r--r--drivers/platform/mellanox/Makefile2
-rw-r--r--drivers/platform/mellanox/mlx-platform.c (renamed from drivers/platform/x86/mlx-platform.c)1567
-rw-r--r--drivers/platform/mellanox/mlxbf-bootctl.c40
-rw-r--r--drivers/platform/mellanox/mlxbf-bootctl.h5
-rw-r--r--drivers/platform/mellanox/mlxbf-pmc.c295
-rw-r--r--drivers/platform/mellanox/mlxbf-tmfifo.c9
-rw-r--r--drivers/platform/mellanox/mlxreg-dpu.c613
-rw-r--r--drivers/platform/mellanox/mlxreg-hotplug.c12
-rw-r--r--drivers/platform/mellanox/mlxreg-io.c4
-rw-r--r--drivers/platform/mellanox/mlxreg-lc.c14
-rw-r--r--drivers/platform/mellanox/nvsw-sn2201.c116
-rw-r--r--drivers/platform/surface/Kconfig2
-rw-r--r--drivers/platform/surface/surface3-wmi.c2
-rw-r--r--drivers/platform/surface/surface_acpi_notify.c2
-rw-r--r--drivers/platform/surface/surface_aggregator_cdev.c2
-rw-r--r--drivers/platform/surface/surface_aggregator_registry.c24
-rw-r--r--drivers/platform/surface/surface_dtx.c2
-rw-r--r--drivers/platform/surface/surface_gpe.c2
-rw-r--r--drivers/platform/surface/surface_hotplug.c2
-rw-r--r--drivers/platform/surface/surface_platform_profile.c44
-rw-r--r--drivers/platform/x86/Kconfig324
-rw-r--r--drivers/platform/x86/Makefile34
-rw-r--r--drivers/platform/x86/acer-wmi.c514
-rw-r--r--drivers/platform/x86/acerhdf.c4
-rw-r--r--drivers/platform/x86/adv_swbutton.c2
-rw-r--r--drivers/platform/x86/amd/Kconfig30
-rw-r--r--drivers/platform/x86/amd/Makefile7
-rw-r--r--drivers/platform/x86/amd/amd_isp4.c419
-rw-r--r--drivers/platform/x86/amd/hfi/Kconfig18
-rw-r--r--drivers/platform/x86/amd/hfi/Makefile7
-rw-r--r--drivers/platform/x86/amd/hfi/hfi.c557
-rw-r--r--drivers/platform/x86/amd/hsmp.c988
-rw-r--r--drivers/platform/x86/amd/hsmp/Kconfig49
-rw-r--r--drivers/platform/x86/amd/hsmp/Makefile13
-rw-r--r--drivers/platform/x86/amd/hsmp/acpi.c648
-rw-r--r--drivers/platform/x86/amd/hsmp/hsmp.c466
-rw-r--r--drivers/platform/x86/amd/hsmp/hsmp.h74
-rw-r--r--drivers/platform/x86/amd/hsmp/hwmon.c121
-rw-r--r--drivers/platform/x86/amd/hsmp/plat.c350
-rw-r--r--drivers/platform/x86/amd/pmc/Kconfig2
-rw-r--r--drivers/platform/x86/amd/pmc/Makefile6
-rw-r--r--drivers/platform/x86/amd/pmc/mp1_stb.c332
-rw-r--r--drivers/platform/x86/amd/pmc/pmc-quirks.c109
-rw-r--r--drivers/platform/x86/amd/pmc/pmc.c530
-rw-r--r--drivers/platform/x86/amd/pmc/pmc.h106
-rw-r--r--drivers/platform/x86/amd/pmf/Kconfig3
-rw-r--r--drivers/platform/x86/amd/pmf/Makefile8
-rw-r--r--drivers/platform/x86/amd/pmf/acpi.c147
-rw-r--r--drivers/platform/x86/amd/pmf/auto-mode.c4
-rw-r--r--drivers/platform/x86/amd/pmf/cnqf.c8
-rw-r--r--drivers/platform/x86/amd/pmf/core.c53
-rw-r--r--drivers/platform/x86/amd/pmf/pmf-quirks.c66
-rw-r--r--drivers/platform/x86/amd/pmf/pmf.h112
-rw-r--r--drivers/platform/x86/amd/pmf/spc.c140
-rw-r--r--drivers/platform/x86/amd/pmf/sps.c74
-rw-r--r--drivers/platform/x86/amd/pmf/tee-if.c145
-rw-r--r--drivers/platform/x86/amd/x3d_vcache.c176
-rw-r--r--drivers/platform/x86/amilo-rfkill.c6
-rw-r--r--drivers/platform/x86/asus-laptop.c13
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c41
-rw-r--r--drivers/platform/x86/asus-tf103c-dock.c2
-rw-r--r--drivers/platform/x86/asus-wmi.c327
-rw-r--r--drivers/platform/x86/asus-wmi.h6
-rw-r--r--drivers/platform/x86/barco-p50-gpio.c114
-rw-r--r--drivers/platform/x86/classmate-laptop.c7
-rw-r--r--drivers/platform/x86/compal-laptop.c62
-rw-r--r--drivers/platform/x86/dasharo-acpi.c360
-rw-r--r--drivers/platform/x86/dell/Kconfig36
-rw-r--r--drivers/platform/x86/dell/Makefile44
-rw-r--r--drivers/platform/x86/dell/alienware-wmi-base.c491
-rw-r--r--drivers/platform/x86/dell/alienware-wmi-legacy.c95
-rw-r--r--drivers/platform/x86/dell/alienware-wmi-wmax.c1710
-rw-r--r--drivers/platform/x86/dell/alienware-wmi.c842
-rw-r--r--drivers/platform/x86/dell/alienware-wmi.h117
-rw-r--r--drivers/platform/x86/dell/dcdbas.c21
-rw-r--r--drivers/platform/x86/dell/dcdbas.h8
-rw-r--r--drivers/platform/x86/dell/dell-laptop.c60
-rw-r--r--drivers/platform/x86/dell/dell-lis3lv02d.c259
-rw-r--r--drivers/platform/x86/dell/dell-pc.c105
-rw-r--r--drivers/platform/x86/dell/dell-smbios-base.c20
-rw-r--r--drivers/platform/x86/dell/dell-smbios-smm.c3
-rw-r--r--drivers/platform/x86/dell/dell-smbios-wmi.c4
-rw-r--r--drivers/platform/x86/dell/dell-smbios.h2
-rw-r--r--drivers/platform/x86/dell/dell-smo8800-ids.h27
-rw-r--r--drivers/platform/x86/dell/dell-smo8800.c18
-rw-r--r--drivers/platform/x86/dell/dell-uart-backlight.c13
-rw-r--r--drivers/platform/x86/dell/dell-wmi-base.c6
-rw-r--r--drivers/platform/x86/dell/dell-wmi-ddv.c314
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/Makefile2
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/dell-wmi-sysman.h5
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/enum-attributes.c5
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/int-attributes.c5
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c7
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/string-attributes.c5
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/sysman.c25
-rw-r--r--drivers/platform/x86/dell/dell_rbu.c42
-rw-r--r--drivers/platform/x86/eeepc-laptop.c13
-rw-r--r--drivers/platform/x86/firmware_attributes_class.c42
-rw-r--r--drivers/platform/x86/firmware_attributes_class.h5
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c49
-rw-r--r--drivers/platform/x86/gigabyte-wmi.c4
-rw-r--r--drivers/platform/x86/hp/Kconfig1
-rw-r--r--drivers/platform/x86/hp/hp-bioscfg/Makefile2
-rw-r--r--drivers/platform/x86/hp/hp-bioscfg/bioscfg.c29
-rw-r--r--drivers/platform/x86/hp/hp-bioscfg/passwdobj-attributes.c11
-rw-r--r--drivers/platform/x86/hp/hp-wmi.c459
-rw-r--r--drivers/platform/x86/hp/hp_accel.c6
-rw-r--r--drivers/platform/x86/hp/tc1100-wmi.c2
-rw-r--r--drivers/platform/x86/huawei-wmi.c2
-rw-r--r--drivers/platform/x86/inspur_platform_profile.c43
-rw-r--r--drivers/platform/x86/intel/Kconfig3
-rw-r--r--drivers/platform/x86/intel/Makefile68
-rw-r--r--drivers/platform/x86/intel/bxtwc_tmu.c24
-rw-r--r--drivers/platform/x86/intel/bytcrc_pwrsrc.c81
-rw-r--r--drivers/platform/x86/intel/chtdc_ti_pwrbtn.c2
-rw-r--r--drivers/platform/x86/intel/chtwc_int33fe.c2
-rw-r--r--drivers/platform/x86/intel/hid.c38
-rw-r--r--drivers/platform/x86/intel/ifs/Makefile2
-rw-r--r--drivers/platform/x86/intel/ifs/core.c6
-rw-r--r--drivers/platform/x86/intel/ifs/ifs.h9
-rw-r--r--drivers/platform/x86/intel/ifs/load.c21
-rw-r--r--drivers/platform/x86/intel/ifs/runtest.c17
-rw-r--r--drivers/platform/x86/intel/int0002_vgpio.c25
-rw-r--r--drivers/platform/x86/intel/int1092/intel_sar.c2
-rw-r--r--drivers/platform/x86/intel/int3472/Makefile3
-rw-r--r--drivers/platform/x86/intel/int3472/clk_and_regulator.c173
-rw-r--r--drivers/platform/x86/intel/int3472/common.c11
-rw-r--r--drivers/platform/x86/intel/int3472/common.h131
-rw-r--r--drivers/platform/x86/intel/int3472/discrete.c214
-rw-r--r--drivers/platform/x86/intel/int3472/discrete_quirks.c21
-rw-r--r--drivers/platform/x86/intel/int3472/led.c3
-rw-r--r--drivers/platform/x86/intel/int3472/tps68470.c6
-rw-r--r--drivers/platform/x86/intel/int3472/tps68470_board_data.c128
-rw-r--r--drivers/platform/x86/intel/intel_plr_tpmi.c354
-rw-r--r--drivers/platform/x86/intel/mrfld_pwrbtn.c2
-rw-r--r--drivers/platform/x86/intel/plr_tpmi.c355
-rw-r--r--drivers/platform/x86/intel/pmc/Kconfig4
-rw-r--r--drivers/platform/x86/intel/pmc/Makefile8
-rw-r--r--drivers/platform/x86/intel/pmc/adl.c56
-rw-r--r--drivers/platform/x86/intel/pmc/arl.c154
-rw-r--r--drivers/platform/x86/intel/pmc/cnp.c83
-rw-r--r--drivers/platform/x86/intel/pmc/core.c500
-rw-r--r--drivers/platform/x86/intel/pmc/core.h247
-rw-r--r--drivers/platform/x86/intel/pmc/core_ssram.c328
-rw-r--r--drivers/platform/x86/intel/pmc/icl.c24
-rw-r--r--drivers/platform/x86/intel/pmc/lnl.c88
-rw-r--r--drivers/platform/x86/intel/pmc/mtl.c124
-rw-r--r--drivers/platform/x86/intel/pmc/ptl.c581
-rw-r--r--drivers/platform/x86/intel/pmc/spt.c45
-rw-r--r--drivers/platform/x86/intel/pmc/ssram_telemetry.c208
-rw-r--r--drivers/platform/x86/intel/pmc/ssram_telemetry.h24
-rw-r--r--drivers/platform/x86/intel/pmc/tgl.c63
-rw-r--r--drivers/platform/x86/intel/pmc/wcl.c486
-rw-r--r--drivers/platform/x86/intel/pmt/Kconfig28
-rw-r--r--drivers/platform/x86/intel/pmt/Makefile4
-rw-r--r--drivers/platform/x86/intel/pmt/class.c72
-rw-r--r--drivers/platform/x86/intel/pmt/class.h14
-rw-r--r--drivers/platform/x86/intel/pmt/crashlog.c461
-rw-r--r--drivers/platform/x86/intel/pmt/discovery-kunit.c116
-rw-r--r--drivers/platform/x86/intel/pmt/discovery.c635
-rw-r--r--drivers/platform/x86/intel/pmt/features.c205
-rw-r--r--drivers/platform/x86/intel/pmt/telemetry.c112
-rw-r--r--drivers/platform/x86/intel/punit_ipc.c33
-rw-r--r--drivers/platform/x86/intel/sdsi.c34
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_common.c43
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c15
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_tpmi.c2
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c131
-rw-r--r--drivers/platform/x86/intel/telemetry/core.c177
-rw-r--r--drivers/platform/x86/intel/telemetry/pltdrv.c233
-rw-r--r--drivers/platform/x86/intel/tpmi.c857
-rw-r--r--drivers/platform/x86/intel/tpmi_power_domains.c53
-rw-r--r--drivers/platform/x86/intel/tpmi_power_domains.h1
-rw-r--r--drivers/platform/x86/intel/turbo_max_3.c5
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c42
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h20
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c148
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c26
-rw-r--r--drivers/platform/x86/intel/vbtn.c2
-rw-r--r--drivers/platform/x86/intel/vsec.c398
-rw-r--r--drivers/platform/x86/intel/vsec_tpmi.c861
-rw-r--r--drivers/platform/x86/intel_ips.c40
-rw-r--r--drivers/platform/x86/intel_scu_ipc.c140
-rw-r--r--drivers/platform/x86/lenovo-wmi-camera.c127
-rw-r--r--drivers/platform/x86/lenovo-ymc.c165
-rw-r--r--drivers/platform/x86/lenovo-yoga-tab2-pro-1380-fastcharger.c338
-rw-r--r--drivers/platform/x86/lenovo-yogabook.c573
-rw-r--r--drivers/platform/x86/lenovo/Kconfig276
-rw-r--r--drivers/platform/x86/lenovo/Makefile28
-rw-r--r--drivers/platform/x86/lenovo/ideapad-laptop.c (renamed from drivers/platform/x86/ideapad-laptop.c)222
-rw-r--r--drivers/platform/x86/lenovo/ideapad-laptop.h (renamed from drivers/platform/x86/ideapad-laptop.h)0
-rw-r--r--drivers/platform/x86/lenovo/think-lmi.c (renamed from drivers/platform/x86/think-lmi.c)415
-rw-r--r--drivers/platform/x86/lenovo/think-lmi.h (renamed from drivers/platform/x86/think-lmi.h)23
-rw-r--r--drivers/platform/x86/lenovo/thinkpad_acpi.c (renamed from drivers/platform/x86/thinkpad_acpi.c)375
-rw-r--r--drivers/platform/x86/lenovo/wmi-camera.c146
-rw-r--r--drivers/platform/x86/lenovo/wmi-capdata01.c302
-rw-r--r--drivers/platform/x86/lenovo/wmi-capdata01.h25
-rw-r--r--drivers/platform/x86/lenovo/wmi-events.c196
-rw-r--r--drivers/platform/x86/lenovo/wmi-events.h20
-rw-r--r--drivers/platform/x86/lenovo/wmi-gamezone.c407
-rw-r--r--drivers/platform/x86/lenovo/wmi-gamezone.h20
-rw-r--r--drivers/platform/x86/lenovo/wmi-helpers.c74
-rw-r--r--drivers/platform/x86/lenovo/wmi-helpers.h20
-rw-r--r--drivers/platform/x86/lenovo/wmi-hotkey-utilities.c224
-rw-r--r--drivers/platform/x86/lenovo/wmi-other.c665
-rw-r--r--drivers/platform/x86/lenovo/wmi-other.h16
-rw-r--r--drivers/platform/x86/lenovo/ymc.c165
-rw-r--r--drivers/platform/x86/lenovo/yoga-tab2-pro-1380-fastcharger.c333
-rw-r--r--drivers/platform/x86/lenovo/yogabook.c573
-rw-r--r--drivers/platform/x86/lg-laptop.c34
-rw-r--r--drivers/platform/x86/meraki-mx100.c404
-rw-r--r--drivers/platform/x86/msi-laptop.c6
-rw-r--r--drivers/platform/x86/msi-wmi-platform.c99
-rw-r--r--drivers/platform/x86/oxpec.c1086
-rw-r--r--drivers/platform/x86/p2sb.c78
-rw-r--r--drivers/platform/x86/panasonic-laptop.c18
-rw-r--r--drivers/platform/x86/pcengines-apuv2.c192
-rw-r--r--drivers/platform/x86/portwell-ec.c460
-rw-r--r--drivers/platform/x86/quickstart.c11
-rw-r--r--drivers/platform/x86/redmi-wmi.c130
-rw-r--r--drivers/platform/x86/samsung-galaxybook.c1426
-rw-r--r--drivers/platform/x86/samsung-laptop.c112
-rw-r--r--drivers/platform/x86/samsung-q10.c2
-rw-r--r--drivers/platform/x86/sel3350-platform.c2
-rw-r--r--drivers/platform/x86/serdev_helpers.h60
-rw-r--r--drivers/platform/x86/serial-multi-instantiate.c14
-rw-r--r--drivers/platform/x86/siemens/simatic-ipc-batt-apollolake.c2
-rw-r--r--drivers/platform/x86/siemens/simatic-ipc-batt-elkhartlake.c2
-rw-r--r--drivers/platform/x86/siemens/simatic-ipc-batt-f7188x.c2
-rw-r--r--drivers/platform/x86/siemens/simatic-ipc-batt.c2
-rw-r--r--drivers/platform/x86/silicom-platform.c11
-rw-r--r--drivers/platform/x86/sony-laptop.c177
-rw-r--r--drivers/platform/x86/topstar-laptop.c4
-rw-r--r--drivers/platform/x86/toshiba_acpi.c4
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c26
-rw-r--r--drivers/platform/x86/tuxedo/Kconfig8
-rw-r--r--drivers/platform/x86/tuxedo/Makefile8
-rw-r--r--drivers/platform/x86/tuxedo/nb04/Kconfig17
-rw-r--r--drivers/platform/x86/tuxedo/nb04/Makefile10
-rw-r--r--drivers/platform/x86/tuxedo/nb04/wmi_ab.c923
-rw-r--r--drivers/platform/x86/tuxedo/nb04/wmi_util.c91
-rw-r--r--drivers/platform/x86/tuxedo/nb04/wmi_util.h109
-rw-r--r--drivers/platform/x86/wmi-bmof.c75
-rw-r--r--drivers/platform/x86/wmi.c274
-rw-r--r--drivers/platform/x86/x86-android-tablets/Kconfig5
-rw-r--r--drivers/platform/x86/x86-android-tablets/Makefile4
-rw-r--r--drivers/platform/x86/x86-android-tablets/acer.c247
-rw-r--r--drivers/platform/x86/x86-android-tablets/asus.c131
-rw-r--r--drivers/platform/x86/x86-android-tablets/core.c202
-rw-r--r--drivers/platform/x86/x86-android-tablets/dmi.c34
-rw-r--r--drivers/platform/x86/x86-android-tablets/lenovo.c297
-rw-r--r--drivers/platform/x86/x86-android-tablets/other.c505
-rw-r--r--drivers/platform/x86/x86-android-tablets/shared-psy-info.c110
-rw-r--r--drivers/platform/x86/x86-android-tablets/shared-psy-info.h9
-rw-r--r--drivers/platform/x86/x86-android-tablets/vexia_atla10_ec.c261
-rw-r--r--drivers/platform/x86/x86-android-tablets/x86-android-tablets.h42
-rw-r--r--drivers/platform/x86/xiaomi-wmi.c10
-rw-r--r--drivers/platform/x86/xo1-rfkill.c2
-rw-r--r--drivers/platform/x86/xo15-ebook.c10
-rw-r--r--drivers/pmdomain/Kconfig2
-rw-r--r--drivers/pmdomain/Makefile2
-rw-r--r--drivers/pmdomain/amlogic/meson-ee-pwrc.c78
-rw-r--r--drivers/pmdomain/amlogic/meson-secure-pwrc.c109
-rw-r--r--drivers/pmdomain/apple/Kconfig1
-rw-r--r--drivers/pmdomain/apple/pmgr-pwrstate.c1
-rw-r--r--drivers/pmdomain/arm/Kconfig6
-rw-r--r--drivers/pmdomain/arm/scmi_perf_domain.c3
-rw-r--r--drivers/pmdomain/arm/scmi_pm_domain.c29
-rw-r--r--drivers/pmdomain/bcm/bcm2835-power.c17
-rw-r--r--drivers/pmdomain/core.c571
-rw-r--r--drivers/pmdomain/governor.c20
-rw-r--r--drivers/pmdomain/imx/gpc.c9
-rw-r--r--drivers/pmdomain/imx/gpcv2.c16
-rw-r--r--drivers/pmdomain/imx/imx8m-blk-ctrl.c13
-rw-r--r--drivers/pmdomain/imx/imx8mp-blk-ctrl.c5
-rw-r--r--drivers/pmdomain/imx/imx93-blk-ctrl.c29
-rw-r--r--drivers/pmdomain/imx/imx93-pd.c2
-rw-r--r--drivers/pmdomain/marvell/Kconfig18
-rw-r--r--drivers/pmdomain/marvell/Makefile3
-rw-r--r--drivers/pmdomain/marvell/pxa1908-power-controller.c274
-rw-r--r--drivers/pmdomain/mediatek/Kconfig12
-rw-r--r--drivers/pmdomain/mediatek/Makefile8
-rw-r--r--drivers/pmdomain/mediatek/airoha-cpu-pmdomain.c144
-rw-r--r--drivers/pmdomain/mediatek/mt6735-pm-domains.h96
-rw-r--r--drivers/pmdomain/mediatek/mt6795-pm-domains.h5
-rw-r--r--drivers/pmdomain/mediatek/mt6893-pm-domains.h585
-rw-r--r--drivers/pmdomain/mediatek/mt8167-pm-domains.h5
-rw-r--r--drivers/pmdomain/mediatek/mt8173-pm-domains.h5
-rw-r--r--drivers/pmdomain/mediatek/mt8183-pm-domains.h5
-rw-r--r--drivers/pmdomain/mediatek/mt8186-pm-domains.h5
-rw-r--r--drivers/pmdomain/mediatek/mt8188-pm-domains.h6
-rw-r--r--drivers/pmdomain/mediatek/mt8192-pm-domains.h5
-rw-r--r--drivers/pmdomain/mediatek/mt8195-pm-domains.h6
-rw-r--r--drivers/pmdomain/mediatek/mt8365-pm-domains.h14
-rw-r--r--drivers/pmdomain/mediatek/mtk-pm-domains.c415
-rw-r--r--drivers/pmdomain/mediatek/mtk-pm-domains.h78
-rw-r--r--drivers/pmdomain/qcom/cpr.c2
-rw-r--r--drivers/pmdomain/qcom/rpmhpd.c150
-rw-r--r--drivers/pmdomain/qcom/rpmpd.c114
-rw-r--r--drivers/pmdomain/renesas/Kconfig124
-rw-r--r--drivers/pmdomain/renesas/rcar-gen4-sysc.c8
-rw-r--r--drivers/pmdomain/renesas/rcar-sysc.c27
-rw-r--r--drivers/pmdomain/renesas/rmobile-sysc.c6
-rw-r--r--drivers/pmdomain/rockchip/Kconfig3
-rw-r--r--drivers/pmdomain/rockchip/pm-domains.c282
-rw-r--r--drivers/pmdomain/samsung/exynos-pm-domains.c9
-rw-r--r--drivers/pmdomain/sunxi/Kconfig29
-rw-r--r--drivers/pmdomain/sunxi/Makefile2
-rw-r--r--drivers/pmdomain/sunxi/sun20i-ppu.c32
-rw-r--r--drivers/pmdomain/sunxi/sun50i-h6-prcm-ppu.c208
-rw-r--r--drivers/pmdomain/sunxi/sun55i-pck600.c234
-rw-r--r--drivers/pmdomain/thead/Kconfig13
-rw-r--r--drivers/pmdomain/thead/Makefile2
-rw-r--r--drivers/pmdomain/thead/th1520-pm-domains.c285
-rw-r--r--drivers/pmdomain/ti/Kconfig2
-rw-r--r--drivers/pmdomain/ti/omap_prm.c10
-rw-r--r--drivers/pmdomain/ti/ti_sci_pm_domains.c130
-rw-r--r--drivers/pmdomain/xilinx/zynqmp-pm-domains.c18
-rw-r--r--drivers/pnp/base.h4
-rw-r--r--drivers/pnp/card.c32
-rw-r--r--drivers/pnp/core.c16
-rw-r--r--drivers/pnp/isapnp/core.c4
-rw-r--r--drivers/pnp/quirks.c2
-rw-r--r--drivers/power/reset/Kconfig35
-rw-r--r--drivers/power/reset/Makefile3
-rw-r--r--drivers/power/reset/as3722-poweroff.c2
-rw-r--r--drivers/power/reset/at91-poweroff.c2
-rw-r--r--drivers/power/reset/at91-reset.c7
-rw-r--r--drivers/power/reset/at91-sama5d2_shdwc.c6
-rw-r--r--drivers/power/reset/gpio-poweroff.c8
-rw-r--r--drivers/power/reset/keystone-reset.c20
-rw-r--r--drivers/power/reset/ltc2952-poweroff.c10
-rw-r--r--drivers/power/reset/macsmc-reboot.c290
-rw-r--r--drivers/power/reset/qcom-pon.c30
-rw-r--r--drivers/power/reset/qnap-poweroff.c2
-rw-r--r--drivers/power/reset/reboot-mode.c25
-rw-r--r--drivers/power/reset/syscon-reboot.c101
-rw-r--r--drivers/power/reset/tdx-ec-poweroff.c150
-rw-r--r--drivers/power/reset/th1520-aon-reboot.c98
-rw-r--r--drivers/power/sequencing/Kconfig9
-rw-r--r--drivers/power/sequencing/Makefile1
-rw-r--r--drivers/power/sequencing/core.c6
-rw-r--r--drivers/power/sequencing/pwrseq-qcom-wcn.c138
-rw-r--r--drivers/power/sequencing/pwrseq-thead-gpu.c249
-rw-r--r--drivers/power/supply/88pm860x_battery.c8
-rw-r--r--drivers/power/supply/88pm860x_charger.c8
-rw-r--r--drivers/power/supply/Kconfig91
-rw-r--r--drivers/power/supply/Makefile12
-rw-r--r--drivers/power/supply/ab8500_bmdata.c4
-rw-r--r--drivers/power/supply/ab8500_btemp.c12
-rw-r--r--drivers/power/supply/ab8500_chargalg.c16
-rw-r--r--drivers/power/supply/ab8500_charger.c11
-rw-r--r--drivers/power/supply/ab8500_fg.c35
-rw-r--r--drivers/power/supply/acer_a500_battery.c12
-rw-r--r--drivers/power/supply/act8945a_charger.c4
-rw-r--r--drivers/power/supply/adc-battery-helper.c327
-rw-r--r--drivers/power/supply/adc-battery-helper.h62
-rw-r--r--drivers/power/supply/adp5061.c2
-rw-r--r--drivers/power/supply/apm_power.c6
-rw-r--r--drivers/power/supply/axp20x_ac_power.c2
-rw-r--r--drivers/power/supply/axp20x_battery.c87
-rw-r--r--drivers/power/supply/axp20x_usb_power.c37
-rw-r--r--drivers/power/supply/bd99954-charger.c4
-rw-r--r--drivers/power/supply/bq2415x_charger.c42
-rw-r--r--drivers/power/supply/bq24190_charger.c75
-rw-r--r--drivers/power/supply/bq24257_charger.c10
-rw-r--r--drivers/power/supply/bq24735-charger.c2
-rw-r--r--drivers/power/supply/bq2515x_charger.c6
-rw-r--r--drivers/power/supply/bq256xx_charger.c8
-rw-r--r--drivers/power/supply/bq257xx_charger.c755
-rw-r--r--drivers/power/supply/bq25890_charger.c2
-rw-r--r--drivers/power/supply/bq25980_charger.c14
-rw-r--r--drivers/power/supply/bq27xxx_battery.c142
-rw-r--r--drivers/power/supply/bq27xxx_battery_i2c.c13
-rw-r--r--drivers/power/supply/chagall-battery.c291
-rw-r--r--drivers/power/supply/charger-manager.c10
-rw-r--r--drivers/power/supply/collie_battery.c1
-rw-r--r--drivers/power/supply/cpcap-battery.c4
-rw-r--r--drivers/power/supply/cpcap-charger.c12
-rw-r--r--drivers/power/supply/cros_charge-control.c241
-rw-r--r--drivers/power/supply/cros_usbpd-charger.c4
-rw-r--r--drivers/power/supply/cw2015_battery.c8
-rw-r--r--drivers/power/supply/da9030_battery.c12
-rw-r--r--drivers/power/supply/da9052-battery.c2
-rw-r--r--drivers/power/supply/da9150-charger.c2
-rw-r--r--drivers/power/supply/da9150-fg.c4
-rw-r--r--drivers/power/supply/ds2760_battery.c58
-rw-r--r--drivers/power/supply/ds2780_battery.c14
-rw-r--r--drivers/power/supply/ds2781_battery.c14
-rw-r--r--drivers/power/supply/ds2782_battery.c89
-rw-r--r--drivers/power/supply/generic-adc-battery.c6
-rw-r--r--drivers/power/supply/gpio-charger.c34
-rw-r--r--drivers/power/supply/huawei-gaokun-battery.c645
-rw-r--r--drivers/power/supply/ingenic-battery.c2
-rw-r--r--drivers/power/supply/intel_dc_ti_battery.c389
-rw-r--r--drivers/power/supply/ip5xxx_power.c571
-rw-r--r--drivers/power/supply/ipaq_micro_battery.c5
-rw-r--r--drivers/power/supply/isp1704_charger.c2
-rw-r--r--drivers/power/supply/lego_ev3_battery.c3
-rw-r--r--drivers/power/supply/lenovo_yoga_c630_battery.c14
-rw-r--r--drivers/power/supply/lp8788-charger.c2
-rw-r--r--drivers/power/supply/lt3651-charger.c2
-rw-r--r--drivers/power/supply/ltc4162-l-charger.c444
-rw-r--r--drivers/power/supply/max14577_charger.c6
-rw-r--r--drivers/power/supply/max17040_battery.c5
-rw-r--r--drivers/power/supply/max17042_battery.c205
-rw-r--r--drivers/power/supply/max1720x_battery.c128
-rw-r--r--drivers/power/supply/max77650-charger.c4
-rw-r--r--drivers/power/supply/max77693_charger.c4
-rw-r--r--drivers/power/supply/max77705_charger.c665
-rw-r--r--drivers/power/supply/max77976_charger.c15
-rw-r--r--drivers/power/supply/max8903_charger.c2
-rw-r--r--drivers/power/supply/max8925_power.c4
-rw-r--r--drivers/power/supply/max8971_charger.c752
-rw-r--r--drivers/power/supply/mm8013.c4
-rw-r--r--drivers/power/supply/mt6360_charger.c2
-rw-r--r--drivers/power/supply/mt6370-charger.c21
-rw-r--r--drivers/power/supply/olpc_battery.c11
-rw-r--r--drivers/power/supply/pcf50633-charger.c466
-rw-r--r--drivers/power/supply/pm8916_bms_vm.c2
-rw-r--r--drivers/power/supply/pm8916_lbc.c2
-rw-r--r--drivers/power/supply/pmu_battery.c1
-rw-r--r--drivers/power/supply/power_supply.h53
-rw-r--r--drivers/power/supply/power_supply_core.c616
-rw-r--r--drivers/power/supply/power_supply_hwmon.c51
-rw-r--r--drivers/power/supply/power_supply_sysfs.c238
-rw-r--r--drivers/power/supply/qcom_battmgr.c356
-rw-r--r--drivers/power/supply/qcom_pmi8998_charger.c1050
-rw-r--r--drivers/power/supply/qcom_smbb.c10
-rw-r--r--drivers/power/supply/qcom_smbx.c1052
-rw-r--r--drivers/power/supply/rk817_charger.c120
-rw-r--r--drivers/power/supply/rt5033_battery.c2
-rw-r--r--drivers/power/supply/rt5033_charger.c3
-rw-r--r--drivers/power/supply/rt9455_charger.c4
-rw-r--r--drivers/power/supply/rt9467-charger.c49
-rw-r--r--drivers/power/supply/rt9471.c66
-rw-r--r--drivers/power/supply/rx51_battery.c2
-rw-r--r--drivers/power/supply/samsung-sdi-battery.c10
-rw-r--r--drivers/power/supply/sbs-battery.c7
-rw-r--r--drivers/power/supply/sbs-charger.c18
-rw-r--r--drivers/power/supply/sbs-manager.c4
-rw-r--r--drivers/power/supply/sc2731_charger.c4
-rw-r--r--drivers/power/supply/sc27xx_fuel_gauge.c20
-rw-r--r--drivers/power/supply/smb347-charger.c4
-rw-r--r--drivers/power/supply/stc3117_fuel_gauge.c612
-rw-r--r--drivers/power/supply/surface_battery.c4
-rw-r--r--drivers/power/supply/test_power.c138
-rw-r--r--drivers/power/supply/tps65090-charger.c4
-rw-r--r--drivers/power/supply/tps65217_charger.c4
-rw-r--r--drivers/power/supply/twl4030_charger.c3
-rw-r--r--drivers/power/supply/twl6030_charger.c581
-rw-r--r--drivers/power/supply/ucs1002_power.c4
-rw-r--r--drivers/power/supply/ug3105_battery.c401
-rw-r--r--drivers/power/supply/wm831x_power.c22
-rw-r--r--drivers/power/supply/wm8350_power.c2
-rw-r--r--drivers/power/supply/wm97xx_battery.c2
-rw-r--r--drivers/powercap/Kconfig2
-rw-r--r--drivers/powercap/dtpm_cpu.c2
-rw-r--r--drivers/powercap/idle_inject.c24
-rw-r--r--drivers/powercap/intel_rapl_common.c26
-rw-r--r--drivers/powercap/intel_rapl_msr.c10
-rw-r--r--drivers/powercap/intel_rapl_tpmi.c11
-rw-r--r--drivers/powercap/powercap_sys.c3
-rw-r--r--drivers/pps/Makefile3
-rw-r--r--drivers/pps/clients/pps-gpio.c21
-rw-r--r--drivers/pps/clients/pps-ktimer.c6
-rw-r--r--drivers/pps/clients/pps-ldisc.c6
-rw-r--r--drivers/pps/clients/pps_parport.c4
-rw-r--r--drivers/pps/generators/Kconfig38
-rw-r--r--drivers/pps/generators/Makefile5
-rw-r--r--drivers/pps/generators/pps_gen-dummy.c96
-rw-r--r--drivers/pps/generators/pps_gen.c344
-rw-r--r--drivers/pps/generators/pps_gen_parport.c3
-rw-r--r--drivers/pps/generators/pps_gen_tio.c272
-rw-r--r--drivers/pps/generators/sysfs.c75
-rw-r--r--drivers/pps/kapi.c15
-rw-r--r--drivers/pps/kc.c10
-rw-r--r--drivers/pps/pps.c141
-rw-r--r--drivers/ps3/ps3-lpm.c2
-rw-r--r--drivers/ps3/ps3-sys-manager.c2
-rw-r--r--drivers/ps3/ps3-vuart.c4
-rw-r--r--drivers/ps3/ps3stor_lib.c3
-rw-r--r--drivers/ps3/sys-manager-core.c2
-rw-r--r--drivers/ptp/Kconfig45
-rw-r--r--drivers/ptp/Makefile7
-rw-r--r--drivers/ptp/ptp_chardev.c766
-rw-r--r--drivers/ptp/ptp_clock.c168
-rw-r--r--drivers/ptp/ptp_clockmatrix.c18
-rw-r--r--drivers/ptp/ptp_dte.c2
-rw-r--r--drivers/ptp/ptp_fc3.c8
-rw-r--r--drivers/ptp/ptp_idt82p33.c17
-rw-r--r--drivers/ptp/ptp_ines.c2
-rw-r--r--drivers/ptp/ptp_kvm_x86.c6
-rw-r--r--drivers/ptp/ptp_mock.c2
-rw-r--r--drivers/ptp/ptp_netc.c1043
-rw-r--r--drivers/ptp/ptp_ocp.c105
-rw-r--r--drivers/ptp/ptp_pch.c6
-rw-r--r--drivers/ptp/ptp_private.h20
-rw-r--r--drivers/ptp/ptp_qoriq.c26
-rw-r--r--drivers/ptp/ptp_qoriq_debugfs.c101
-rw-r--r--drivers/ptp/ptp_s390.c129
-rw-r--r--drivers/ptp/ptp_sysfs.c2
-rw-r--r--drivers/ptp/ptp_vclock.c9
-rw-r--r--drivers/ptp/ptp_vmclock.c610
-rw-r--r--drivers/pwm/Kconfig152
-rw-r--r--drivers/pwm/Makefile14
-rw-r--r--drivers/pwm/core.c1416
-rw-r--r--drivers/pwm/pwm-adp5585.c79
-rw-r--r--drivers/pwm/pwm-argon-fan-hat.c109
-rw-r--r--drivers/pwm/pwm-atmel-tcb.c4
-rw-r--r--drivers/pwm/pwm-atmel.c12
-rw-r--r--drivers/pwm/pwm-axi-pwmgen.c208
-rw-r--r--drivers/pwm/pwm-berlin.c4
-rw-r--r--drivers/pwm/pwm-clps711x.c12
-rw-r--r--drivers/pwm/pwm-cros-ec.c10
-rw-r--r--drivers/pwm/pwm-dwc-core.c2
-rw-r--r--drivers/pwm/pwm-dwc.c14
-rw-r--r--drivers/pwm/pwm-dwc.h2
-rw-r--r--drivers/pwm/pwm-fsl-ftm.c69
-rw-r--r--drivers/pwm/pwm-gpio.c5
-rw-r--r--drivers/pwm/pwm-img.c2
-rw-r--r--drivers/pwm/pwm-imx-tpm.c13
-rw-r--r--drivers/pwm/pwm-imx27.c161
-rw-r--r--drivers/pwm/pwm-loongson.c290
-rw-r--r--drivers/pwm/pwm-lpc18xx-sct.c14
-rw-r--r--drivers/pwm/pwm-lpss-pci.c11
-rw-r--r--drivers/pwm/pwm-lpss-platform.c2
-rw-r--r--drivers/pwm/pwm-lpss.c5
-rw-r--r--drivers/pwm/pwm-lpss.h1
-rw-r--r--drivers/pwm/pwm-max7360.c209
-rw-r--r--drivers/pwm/pwm-mc33xs2410.c407
-rw-r--r--drivers/pwm/pwm-mediatek.c386
-rw-r--r--drivers/pwm/pwm-meson.c123
-rw-r--r--drivers/pwm/pwm-microchip-core.c19
-rw-r--r--drivers/pwm/pwm-pca9685.c522
-rw-r--r--drivers/pwm/pwm-pxa.c24
-rw-r--r--drivers/pwm/pwm-rcar.c24
-rw-r--r--drivers/pwm/pwm-rockchip.c33
-rw-r--r--drivers/pwm/pwm-rzg2l-gpt.c447
-rw-r--r--drivers/pwm/pwm-sifive.c52
-rw-r--r--drivers/pwm/pwm-sophgo-sg2042.c301
-rw-r--r--drivers/pwm/pwm-sti.c23
-rw-r--r--drivers/pwm/pwm-stm32-lp.c227
-rw-r--r--drivers/pwm/pwm-stm32.c654
-rw-r--r--drivers/pwm/pwm-stmpe.c25
-rw-r--r--drivers/pwm/pwm-sun4i.c10
-rw-r--r--drivers/pwm/pwm-tiecap.c4
-rw-r--r--drivers/pwm/pwm-tiehrpwm.c154
-rw-r--r--drivers/pwm/pwm-twl-led.c49
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c23
-rw-r--r--drivers/rapidio/rio-scan.c5
-rw-r--r--drivers/rapidio/rio-sysfs.c8
-rw-r--r--drivers/rapidio/rio.c103
-rw-r--r--drivers/rapidio/rio.h2
-rw-r--r--drivers/rapidio/rio_cm.c9
-rw-r--r--drivers/ras/amd/atl/Kconfig1
-rw-r--r--drivers/ras/amd/atl/access.c8
-rw-r--r--drivers/ras/amd/atl/internal.h6
-rw-r--r--drivers/ras/amd/atl/umc.c19
-rw-r--r--drivers/ras/amd/fmpm.c9
-rw-r--r--drivers/ras/ras.c1
-rw-r--r--drivers/regulator/Kconfig120
-rw-r--r--drivers/regulator/Makefile11
-rw-r--r--drivers/regulator/act8865-regulator.c2
-rw-r--r--drivers/regulator/ad5398.c30
-rw-r--r--drivers/regulator/adp5055-regulator.c424
-rw-r--r--drivers/regulator/arizona-ldo1.c12
-rw-r--r--drivers/regulator/axp20x-regulator.c37
-rw-r--r--drivers/regulator/bcm590xx-regulator.c1289
-rw-r--r--drivers/regulator/bd718x7-regulator.c29
-rw-r--r--drivers/regulator/bd9571mwv-regulator.c2
-rw-r--r--drivers/regulator/bd96801-regulator.c567
-rw-r--r--drivers/regulator/bq257xx-regulator.c186
-rw-r--r--drivers/regulator/core.c364
-rw-r--r--drivers/regulator/cros-ec-regulator.c4
-rw-r--r--drivers/regulator/da9121-regulator.c2
-rw-r--r--drivers/regulator/db8500-prcmu.c2
-rw-r--r--drivers/regulator/devres.c61
-rw-r--r--drivers/regulator/dummy.c37
-rw-r--r--drivers/regulator/fan53555.c14
-rw-r--r--drivers/regulator/gpio-regulator.c18
-rw-r--r--drivers/regulator/internal.h18
-rw-r--r--drivers/regulator/irq_helpers.c16
-rw-r--r--drivers/regulator/isl6271a-regulator.c4
-rw-r--r--drivers/regulator/max14577-regulator.c5
-rw-r--r--drivers/regulator/max20086-regulator.c17
-rw-r--r--drivers/regulator/max5970-regulator.c21
-rw-r--r--drivers/regulator/max77838-regulator.c221
-rw-r--r--drivers/regulator/mp886x.c3
-rw-r--r--drivers/regulator/mt6370-regulator.c4
-rw-r--r--drivers/regulator/mtk-dvfsrc-regulator.c38
-rw-r--r--drivers/regulator/of_regulator.c91
-rw-r--r--drivers/regulator/pca9450-regulator.c357
-rw-r--r--drivers/regulator/pcf50633-regulator.c124
-rw-r--r--drivers/regulator/pf0900-regulator.c975
-rw-r--r--drivers/regulator/pf530x-regulator.c375
-rw-r--r--drivers/regulator/pf9453-regulator.c880
-rw-r--r--drivers/regulator/qcom-pm8008-regulator.c2
-rw-r--r--drivers/regulator/qcom-refgen-regulator.c1
-rw-r--r--drivers/regulator/qcom-rpmh-regulator.c141
-rw-r--r--drivers/regulator/qcom_smd-regulator.c2
-rw-r--r--drivers/regulator/qcom_spmi-regulator.c69
-rw-r--r--drivers/regulator/renesas-usb-vbus-regulator.c7
-rw-r--r--drivers/regulator/rk808-regulator.c47
-rw-r--r--drivers/regulator/rpi-panel-attiny-regulator.c81
-rw-r--r--drivers/regulator/rpi-panel-v2-regulator.c125
-rw-r--r--drivers/regulator/rt5133-regulator.c642
-rw-r--r--drivers/regulator/rt5739.c9
-rw-r--r--drivers/regulator/rt6160-regulator.c19
-rw-r--r--drivers/regulator/rtq2208-regulator.c218
-rw-r--r--drivers/regulator/rtq6752-regulator.c2
-rw-r--r--drivers/regulator/s2dos05-regulator.c165
-rw-r--r--drivers/regulator/s2mps11.c92
-rw-r--r--drivers/regulator/s5m8767.c146
-rw-r--r--drivers/regulator/scmi-regulator.c3
-rw-r--r--drivers/regulator/spacemit-p1.c157
-rw-r--r--drivers/regulator/stm32-vrefbuf.c8
-rw-r--r--drivers/regulator/sy7636a-regulator.c7
-rw-r--r--drivers/regulator/sy8824x.c5
-rw-r--r--drivers/regulator/sy8827n.c3
-rw-r--r--drivers/regulator/tps6286x-regulator.c9
-rw-r--r--drivers/regulator/tps6287x-regulator.c64
-rw-r--r--drivers/regulator/tps65219-regulator.c279
-rw-r--r--drivers/regulator/tps6524x-regulator.c1
-rw-r--r--drivers/regulator/tps6594-regulator.c273
-rw-r--r--drivers/regulator/uniphier-regulator.c2
-rw-r--r--drivers/regulator/userspace-consumer.c2
-rw-r--r--drivers/regulator/virtual.c2
-rw-r--r--drivers/regulator/wm8350-regulator.c6
-rw-r--r--drivers/remoteproc/Kconfig23
-rw-r--r--drivers/remoteproc/Makefile6
-rw-r--r--drivers/remoteproc/da8xx_remoteproc.c86
-rw-r--r--drivers/remoteproc/imx_dsp_rproc.c171
-rw-r--r--drivers/remoteproc/imx_rproc.c496
-rw-r--r--drivers/remoteproc/imx_rproc.h9
-rw-r--r--drivers/remoteproc/keystone_remoteproc.c112
-rw-r--r--drivers/remoteproc/meson_mx_ao_arc.c2
-rw-r--r--drivers/remoteproc/mtk_scp.c14
-rw-r--r--drivers/remoteproc/omap_remoteproc.c27
-rw-r--r--drivers/remoteproc/pru_rproc.c9
-rw-r--r--drivers/remoteproc/qcom_q6v5.c8
-rw-r--r--drivers/remoteproc/qcom_q6v5_adsp.c32
-rw-r--r--drivers/remoteproc/qcom_q6v5_mss.c251
-rw-r--r--drivers/remoteproc/qcom_q6v5_pas.c709
-rw-r--r--drivers/remoteproc/qcom_q6v5_wcss.c116
-rw-r--r--drivers/remoteproc/qcom_sysmon.c2
-rw-r--r--drivers/remoteproc/qcom_wcnss.c36
-rw-r--r--drivers/remoteproc/qcom_wcnss_iris.c7
-rw-r--r--drivers/remoteproc/rcar_rproc.c2
-rw-r--r--drivers/remoteproc/remoteproc_core.c28
-rw-r--r--drivers/remoteproc/remoteproc_virtio.c4
-rw-r--r--drivers/remoteproc/st_remoteproc.c56
-rw-r--r--drivers/remoteproc/st_slim_rproc.c2
-rw-r--r--drivers/remoteproc/stm32_rproc.c10
-rw-r--r--drivers/remoteproc/ti_k3_common.c548
-rw-r--r--drivers/remoteproc/ti_k3_common.h118
-rw-r--r--drivers/remoteproc/ti_k3_dsp_remoteproc.c622
-rw-r--r--drivers/remoteproc/ti_k3_m4_remoteproc.c585
-rw-r--r--drivers/remoteproc/ti_k3_r5_remoteproc.c1107
-rw-r--r--drivers/remoteproc/wkup_m3_rproc.c69
-rw-r--r--drivers/remoteproc/xlnx_r5_remoteproc.c108
-rw-r--r--drivers/reset/Kconfig79
-rw-r--r--drivers/reset/Makefile9
-rw-r--r--drivers/reset/amlogic/Kconfig27
-rw-r--r--drivers/reset/amlogic/Makefile4
-rw-r--r--drivers/reset/amlogic/reset-meson-audio-arb.c (renamed from drivers/reset/reset-meson-audio-arb.c)2
-rw-r--r--drivers/reset/amlogic/reset-meson-aux.c81
-rw-r--r--drivers/reset/amlogic/reset-meson-common.c142
-rw-r--r--drivers/reset/amlogic/reset-meson.c105
-rw-r--r--drivers/reset/amlogic/reset-meson.h28
-rw-r--r--drivers/reset/core.c119
-rw-r--r--drivers/reset/reset-aspeed.c253
-rw-r--r--drivers/reset/reset-bcm6345.c1
-rw-r--r--drivers/reset/reset-eyeq.c11
-rw-r--r--drivers/reset/reset-imx-scu.c101
-rw-r--r--drivers/reset/reset-imx8mp-audiomix.c78
-rw-r--r--drivers/reset/reset-intel-gw.c1
-rw-r--r--drivers/reset/reset-k230.c371
-rw-r--r--drivers/reset/reset-meson.c159
-rw-r--r--drivers/reset/reset-microchip-sparx5.c47
-rw-r--r--drivers/reset/reset-mpfs.c60
-rw-r--r--drivers/reset/reset-npcm.c78
-rw-r--r--drivers/reset/reset-qcom-pdc.c1
-rw-r--r--drivers/reset/reset-rzg2l-usbphy-ctrl.c3
-rw-r--r--drivers/reset/reset-rzv2h-usb2phy.c236
-rw-r--r--drivers/reset/reset-simple.c2
-rw-r--r--drivers/reset/reset-spacemit.c304
-rw-r--r--drivers/reset/reset-th1520.c172
-rw-r--r--drivers/reset/reset-ti-sci.c2
-rw-r--r--drivers/reset/reset-uniphier-glue.c24
-rw-r--r--drivers/rpmsg/qcom_glink_native.c15
-rw-r--r--drivers/rpmsg/qcom_glink_rpm.c2
-rw-r--r--drivers/rpmsg/qcom_smd.c16
-rw-r--r--drivers/rpmsg/rpmsg_char.c3
-rw-r--r--drivers/rpmsg/rpmsg_core.c72
-rw-r--r--drivers/rpmsg/rpmsg_internal.h6
-rw-r--r--drivers/rpmsg/virtio_rpmsg_bus.c26
-rw-r--r--drivers/rtc/Kconfig128
-rw-r--r--drivers/rtc/Makefile12
-rw-r--r--drivers/rtc/class.c5
-rw-r--r--drivers/rtc/dev.c4
-rw-r--r--drivers/rtc/interface.c36
-rw-r--r--drivers/rtc/lib.c36
-rw-r--r--drivers/rtc/lib_test.c101
-rw-r--r--drivers/rtc/rtc-88pm80x.c4
-rw-r--r--drivers/rtc/rtc-88pm860x.c4
-rw-r--r--drivers/rtc/rtc-88pm886.c97
-rw-r--r--drivers/rtc/rtc-ab-eoz9.c35
-rw-r--r--drivers/rtc/rtc-ab8500.c11
-rw-r--r--drivers/rtc/rtc-abx80x.c2
-rw-r--r--drivers/rtc/rtc-ac100.c2
-rw-r--r--drivers/rtc/rtc-amlogic-a4.c465
-rw-r--r--drivers/rtc/rtc-armada38x.c2
-rw-r--r--drivers/rtc/rtc-as3722.c2
-rw-r--r--drivers/rtc/rtc-asm9260.c2
-rw-r--r--drivers/rtc/rtc-aspeed.c16
-rw-r--r--drivers/rtc/rtc-at91rm9200.c5
-rw-r--r--drivers/rtc/rtc-at91sam9.c4
-rw-r--r--drivers/rtc/rtc-bd70528.c5
-rw-r--r--drivers/rtc/rtc-brcmstb-waketimer.c3
-rw-r--r--drivers/rtc/rtc-cadence.c4
-rw-r--r--drivers/rtc/rtc-cmos.c50
-rw-r--r--drivers/rtc/rtc-cpcap.c4
-rw-r--r--drivers/rtc/rtc-cros-ec.c34
-rw-r--r--drivers/rtc/rtc-cv1800.c218
-rw-r--r--drivers/rtc/rtc-da9055.c2
-rw-r--r--drivers/rtc/rtc-da9063.c31
-rw-r--r--drivers/rtc/rtc-ds1307.c34
-rw-r--r--drivers/rtc/rtc-ds1343.c8
-rw-r--r--drivers/rtc/rtc-ds1685.c6
-rw-r--r--drivers/rtc/rtc-ds2404.c14
-rw-r--r--drivers/rtc/rtc-ds3232.c26
-rw-r--r--drivers/rtc/rtc-efi.c76
-rw-r--r--drivers/rtc/rtc-ep93xx.c16
-rw-r--r--drivers/rtc/rtc-fsl-ftm-alarm.c2
-rw-r--r--drivers/rtc/rtc-ftrtc010.c19
-rw-r--r--drivers/rtc/rtc-hid-sensor-time.c4
-rw-r--r--drivers/rtc/rtc-hym8563.c15
-rw-r--r--drivers/rtc/rtc-imxdi.c2
-rw-r--r--drivers/rtc/rtc-isl12022.c270
-rw-r--r--drivers/rtc/rtc-isl1208.c2
-rw-r--r--drivers/rtc/rtc-jz4740.c3
-rw-r--r--drivers/rtc/rtc-loongson.c27
-rw-r--r--drivers/rtc/rtc-lp8788.c2
-rw-r--r--drivers/rtc/rtc-lpc24xx.c2
-rw-r--r--drivers/rtc/rtc-lpc32xx.c2
-rw-r--r--drivers/rtc/rtc-m41t80.c103
-rw-r--r--drivers/rtc/rtc-m48t59.c26
-rw-r--r--drivers/rtc/rtc-m48t86.c14
-rw-r--r--drivers/rtc/rtc-max31335.c177
-rw-r--r--drivers/rtc/rtc-max77686.c41
-rw-r--r--drivers/rtc/rtc-max8925.c2
-rw-r--r--drivers/rtc/rtc-max8997.c2
-rw-r--r--drivers/rtc/rtc-mc13xxx.c15
-rw-r--r--drivers/rtc/rtc-mc146818-lib.c6
-rw-r--r--drivers/rtc/rtc-meson-vrtc.c14
-rw-r--r--drivers/rtc/rtc-meson.c17
-rw-r--r--drivers/rtc/rtc-mpc5121.c4
-rw-r--r--drivers/rtc/rtc-mpfs.c10
-rw-r--r--drivers/rtc/rtc-mt6397.c32
-rw-r--r--drivers/rtc/rtc-mt7622.c2
-rw-r--r--drivers/rtc/rtc-mv.c6
-rw-r--r--drivers/rtc/rtc-mxc.c2
-rw-r--r--drivers/rtc/rtc-mxc_v2.c4
-rw-r--r--drivers/rtc/rtc-nct3018y.c15
-rw-r--r--drivers/rtc/rtc-nct6694.c297
-rw-r--r--drivers/rtc/rtc-nxp-bbnsm.c9
-rw-r--r--drivers/rtc/rtc-omap.c4
-rw-r--r--drivers/rtc/rtc-optee.c465
-rw-r--r--drivers/rtc/rtc-palmas.c4
-rw-r--r--drivers/rtc/rtc-pcf2127.c108
-rw-r--r--drivers/rtc/rtc-pcf50633.c284
-rw-r--r--drivers/rtc/rtc-pcf85063.c293
-rw-r--r--drivers/rtc/rtc-pcf8563.c227
-rw-r--r--drivers/rtc/rtc-pic32.c4
-rw-r--r--drivers/rtc/rtc-pl030.c16
-rw-r--r--drivers/rtc/rtc-pl031.c8
-rw-r--r--drivers/rtc/rtc-pm8xxx.c228
-rw-r--r--drivers/rtc/rtc-pxa.c4
-rw-r--r--drivers/rtc/rtc-rc5t583.c4
-rw-r--r--drivers/rtc/rtc-rc5t619.c2
-rw-r--r--drivers/rtc/rtc-renesas-rtca3.c897
-rw-r--r--drivers/rtc/rtc-rk808.c2
-rw-r--r--drivers/rtc/rtc-rtd119x.c2
-rw-r--r--drivers/rtc/rtc-rv3028.c21
-rw-r--r--drivers/rtc/rtc-rv3032.c29
-rw-r--r--drivers/rtc/rtc-rx8581.c85
-rw-r--r--drivers/rtc/rtc-rzn1.c269
-rw-r--r--drivers/rtc/rtc-s32g.c385
-rw-r--r--drivers/rtc/rtc-s35390a.c22
-rw-r--r--drivers/rtc/rtc-s3c.c56
-rw-r--r--drivers/rtc/rtc-s3c.h19
-rw-r--r--drivers/rtc/rtc-s5m.c227
-rw-r--r--drivers/rtc/rtc-sa1100.c4
-rw-r--r--drivers/rtc/rtc-sc27xx.c4
-rw-r--r--drivers/rtc/rtc-sd2405al.c20
-rw-r--r--drivers/rtc/rtc-sd3078.c71
-rw-r--r--drivers/rtc/rtc-sh.c295
-rw-r--r--drivers/rtc/rtc-spacemit-p1.c167
-rw-r--r--drivers/rtc/rtc-spear.c6
-rw-r--r--drivers/rtc/rtc-st-lpc.c5
-rw-r--r--drivers/rtc/rtc-stm32.c37
-rw-r--r--drivers/rtc/rtc-stmp3xxx.c2
-rw-r--r--drivers/rtc/rtc-sun6i.c2
-rw-r--r--drivers/rtc/rtc-sunplus.c6
-rw-r--r--drivers/rtc/rtc-tegra.c4
-rw-r--r--drivers/rtc/rtc-test.c8
-rw-r--r--drivers/rtc/rtc-tps6586x.c5
-rw-r--r--drivers/rtc/rtc-tps65910.c2
-rw-r--r--drivers/rtc/rtc-tps6594.c2
-rw-r--r--drivers/rtc/rtc-twl.c4
-rw-r--r--drivers/rtc/rtc-vt8500.c2
-rw-r--r--drivers/rtc/rtc-wm831x.c2
-rw-r--r--drivers/rtc/rtc-wm8350.c4
-rw-r--r--drivers/rtc/rtc-x1205.c2
-rw-r--r--drivers/rtc/rtc-xgene.c6
-rw-r--r--drivers/rtc/rtc-zynqmp.c29
-rw-r--r--drivers/rtc/sysfs.c64
-rw-r--r--drivers/rtc/test_rtc_lib.c106
-rw-r--r--drivers/s390/block/Kconfig5
-rw-r--r--drivers/s390/block/dasd.c42
-rw-r--r--drivers/s390/block/dasd_devmap.c6
-rw-r--r--drivers/s390/block/dasd_diag.c20
-rw-r--r--drivers/s390/block/dasd_eckd.c5
-rw-r--r--drivers/s390/block/dasd_eer.c1
-rw-r--r--drivers/s390/block/dasd_erp.c1
-rw-r--r--drivers/s390/block/dasd_genhd.c1
-rw-r--r--drivers/s390/block/dasd_ioctl.c1
-rw-r--r--drivers/s390/block/dasd_proc.c5
-rw-r--r--drivers/s390/block/dcssblk.c82
-rw-r--r--drivers/s390/block/scm_blk.c1
-rw-r--r--drivers/s390/char/Makefile1
-rw-r--r--drivers/s390/char/con3215.c6
-rw-r--r--drivers/s390/char/con3270.c48
-rw-r--r--drivers/s390/char/diag_ftp.c4
-rw-r--r--drivers/s390/char/hmcdrv_dev.c19
-rw-r--r--drivers/s390/char/hmcdrv_ftp.c6
-rw-r--r--drivers/s390/char/keyboard.c1
-rw-r--r--drivers/s390/char/monreader.c3
-rw-r--r--drivers/s390/char/monwriter.c3
-rw-r--r--drivers/s390/char/raw3270.c4
-rw-r--r--drivers/s390/char/sclp.c38
-rw-r--r--drivers/s390/char/sclp.h45
-rw-r--r--drivers/s390/char/sclp_cmd.c478
-rw-r--r--drivers/s390/char/sclp_con.c19
-rw-r--r--drivers/s390/char/sclp_config.c2
-rw-r--r--drivers/s390/char/sclp_cpi_sys.c8
-rw-r--r--drivers/s390/char/sclp_early.c10
-rw-r--r--drivers/s390/char/sclp_early_core.c15
-rw-r--r--drivers/s390/char/sclp_mem.c399
-rw-r--r--drivers/s390/char/sclp_ocf.c5
-rw-r--r--drivers/s390/char/sclp_pci.c19
-rw-r--r--drivers/s390/char/sclp_sd.c3
-rw-r--r--drivers/s390/char/sclp_tty.c16
-rw-r--r--drivers/s390/char/sclp_vt220.c4
-rw-r--r--drivers/s390/char/tape_34xx.c1
-rw-r--r--drivers/s390/char/tape_3590.c3
-rw-r--r--drivers/s390/char/tape_class.c1
-rw-r--r--drivers/s390/char/tape_core.c21
-rw-r--r--drivers/s390/char/tape_std.c6
-rw-r--r--drivers/s390/char/uvdevice.c153
-rw-r--r--drivers/s390/char/vmcp.c6
-rw-r--r--drivers/s390/char/vmlogrdr.c19
-rw-r--r--drivers/s390/char/vmur.c5
-rw-r--r--drivers/s390/cio/airq.c1
-rw-r--r--drivers/s390/cio/ccwgroup.c4
-rw-r--r--drivers/s390/cio/chp.c58
-rw-r--r--drivers/s390/cio/chp.h1
-rw-r--r--drivers/s390/cio/chsc.c32
-rw-r--r--drivers/s390/cio/chsc.h16
-rw-r--r--drivers/s390/cio/cio.c7
-rw-r--r--drivers/s390/cio/cio.h2
-rw-r--r--drivers/s390/cio/cmf.c17
-rw-r--r--drivers/s390/cio/crw.c5
-rw-r--r--drivers/s390/cio/css.c6
-rw-r--r--drivers/s390/cio/device.c77
-rw-r--r--drivers/s390/cio/device_fsm.c5
-rw-r--r--drivers/s390/cio/device_id.c3
-rw-r--r--drivers/s390/cio/device_ops.c2
-rw-r--r--drivers/s390/cio/eadm_sch.c5
-rw-r--r--drivers/s390/cio/fcx.c1
-rw-r--r--drivers/s390/cio/ioasm.c122
-rw-r--r--drivers/s390/cio/isc.c1
-rw-r--r--drivers/s390/cio/itcw.c1
-rw-r--r--drivers/s390/cio/qdio.h9
-rw-r--r--drivers/s390/cio/qdio_debug.c1
-rw-r--r--drivers/s390/cio/qdio_main.c30
-rw-r--r--drivers/s390/cio/qdio_setup.c21
-rw-r--r--drivers/s390/cio/scm.c3
-rw-r--r--drivers/s390/cio/vfio_ccw_drv.c6
-rw-r--r--drivers/s390/cio/vfio_ccw_private.h2
-rw-r--r--drivers/s390/crypto/Makefile4
-rw-r--r--drivers/s390/crypto/ap_bus.c85
-rw-r--r--drivers/s390/crypto/ap_bus.h32
-rw-r--r--drivers/s390/crypto/ap_queue.c3
-rw-r--r--drivers/s390/crypto/pkey_api.c53
-rw-r--r--drivers/s390/crypto/pkey_base.c49
-rw-r--r--drivers/s390/crypto/pkey_base.h73
-rw-r--r--drivers/s390/crypto/pkey_cca.c141
-rw-r--r--drivers/s390/crypto/pkey_ep11.c118
-rw-r--r--drivers/s390/crypto/pkey_pckmo.c248
-rw-r--r--drivers/s390/crypto/pkey_sysfs.c125
-rw-r--r--drivers/s390/crypto/pkey_uv.c318
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c219
-rw-r--r--drivers/s390/crypto/vfio_ap_private.h4
-rw-r--r--drivers/s390/crypto/zcrypt_api.c168
-rw-r--r--drivers/s390/crypto/zcrypt_api.h16
-rw-r--r--drivers/s390/crypto/zcrypt_card.c1
-rw-r--r--drivers/s390/crypto/zcrypt_ccamisc.c487
-rw-r--r--drivers/s390/crypto/zcrypt_ccamisc.h50
-rw-r--r--drivers/s390/crypto/zcrypt_cex4.c39
-rw-r--r--drivers/s390/crypto/zcrypt_ep11misc.c457
-rw-r--r--drivers/s390/crypto/zcrypt_ep11misc.h27
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.c36
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c109
-rw-r--r--drivers/s390/crypto/zcrypt_queue.c1
-rw-r--r--drivers/s390/net/Kconfig26
-rw-r--r--drivers/s390/net/Makefile2
-rw-r--r--drivers/s390/net/ctcm_mpc.c3
-rw-r--r--drivers/s390/net/fsm.c7
-rw-r--r--drivers/s390/net/ism.h53
-rw-r--r--drivers/s390/net/ism_drv.c572
-rw-r--r--drivers/s390/net/lcs.c2385
-rw-r--r--drivers/s390/net/lcs.h342
-rw-r--r--drivers/s390/net/netiucv.c2083
-rw-r--r--drivers/s390/net/qeth_core_main.c14
-rw-r--r--drivers/s390/net/qeth_core_sys.c22
-rw-r--r--drivers/s390/net/qeth_l2_main.c4
-rw-r--r--drivers/s390/net/qeth_l3_main.c1
-rw-r--r--drivers/s390/net/smsgiucv.c4
-rw-r--r--drivers/s390/net/smsgiucv_app.c3
-rw-r--r--drivers/s390/scsi/zfcp_aux.c16
-rw-r--r--drivers/s390/scsi/zfcp_erp.c4
-rw-r--r--drivers/s390/scsi/zfcp_fc.c7
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c10
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c5
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c15
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c86
-rw-r--r--drivers/s390/scsi/zfcp_unit.c2
-rw-r--r--drivers/s390/virtio/virtio_ccw.c20
-rw-r--r--drivers/sbus/char/bbc_i2c.c2
-rw-r--r--drivers/sbus/char/display7seg.c2
-rw-r--r--drivers/sbus/char/envctrl.c2
-rw-r--r--drivers/sbus/char/flash.c2
-rw-r--r--drivers/sbus/char/uctrl.c2
-rw-r--r--drivers/scsi/3w-9xxx.c11
-rw-r--r--drivers/scsi/3w-sas.c19
-rw-r--r--drivers/scsi/3w-xxxx.c12
-rw-r--r--drivers/scsi/53c700.c19
-rw-r--r--drivers/scsi/BusLogic.c17
-rw-r--r--drivers/scsi/BusLogic.h5
-rw-r--r--drivers/scsi/Kconfig8
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/a100u2w.c2
-rw-r--r--drivers/scsi/a3000.c6
-rw-r--r--drivers/scsi/a4000t.c6
-rw-r--r--drivers/scsi/aacraid/aachba.c4
-rw-r--r--drivers/scsi/aacraid/aacraid.h2
-rw-r--r--drivers/scsi/aacraid/comminit.c3
-rw-r--r--drivers/scsi/aacraid/commsup.c131
-rw-r--r--drivers/scsi/aacraid/linit.c16
-rw-r--r--drivers/scsi/advansys.c27
-rw-r--r--drivers/scsi/aha152x.c7
-rw-r--r--drivers/scsi/aha1542.c2
-rw-r--r--drivers/scsi/aha1740.c2
-rw-r--r--drivers/scsi/aic7xxx/aic7770.c15
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_core.c6
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c12
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx.h2
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c12
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_gram.y1
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.y1
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_scan.l3
-rw-r--r--drivers/scsi/aic94xx/aic94xx_hwi.c2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_scb.c2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_task.c1
-rw-r--r--drivers/scsi/aic94xx/aic94xx_tmf.c10
-rw-r--r--drivers/scsi/am53c974.c2
-rw-r--r--drivers/scsi/arcmsr/arcmsr_attr.c6
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c44
-rw-r--r--drivers/scsi/arm/acornscsi.c2
-rw-r--r--drivers/scsi/arm/fas216.c8
-rw-r--r--drivers/scsi/atari_scsi.c2
-rw-r--r--drivers/scsi/atp870u.c4
-rw-r--r--drivers/scsi/be2iscsi/be_main.c10
-rw-r--r--drivers/scsi/bfa/bfa.h10
-rw-r--r--drivers/scsi/bfa/bfa_core.c36
-rw-r--r--drivers/scsi/bfa/bfa_defs_fcs.h22
-rw-r--r--drivers/scsi/bfa/bfa_fcbuild.c482
-rw-r--r--drivers/scsi/bfa/bfa_fcbuild.h72
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.c9
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.h1
-rw-r--r--drivers/scsi/bfa/bfa_fcs.h12
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c142
-rw-r--r--drivers/scsi/bfa/bfa_fcs_rport.c36
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c21
-rw-r--r--drivers/scsi/bfa/bfa_ioc.h2
-rw-r--r--drivers/scsi/bfa/bfa_modules.h1
-rw-r--r--drivers/scsi/bfa/bfa_svc.c72
-rw-r--r--drivers/scsi/bfa/bfa_svc.h5
-rw-r--r--drivers/scsi/bfa/bfad.c38
-rw-r--r--drivers/scsi/bfa/bfad_drv.h1
-rw-r--r--drivers/scsi/bfa/bfad_im.c27
-rw-r--r--drivers/scsi/bfa/bfi.h2
-rw-r--r--drivers/scsi/bnx2fc/Kconfig1
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c20
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c8
-rw-r--r--drivers/scsi/bnx2i/Kconfig1
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c7
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c8
-rw-r--r--drivers/scsi/bvme6000_scsi.c2
-rw-r--r--drivers/scsi/csiostor/csio_hw.c8
-rw-r--r--drivers/scsi/csiostor/csio_init.c2
-rw-r--r--drivers/scsi/csiostor/csio_mb.c4
-rw-r--r--drivers/scsi/csiostor/csio_scsi.c20
-rw-r--r--drivers/scsi/csiostor/csio_wr.c4
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c4
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c4
-rw-r--r--drivers/scsi/cxlflash/Kconfig13
-rw-r--r--drivers/scsi/cxlflash/Makefile5
-rw-r--r--drivers/scsi/cxlflash/backend.h48
-rw-r--r--drivers/scsi/cxlflash/common.h340
-rw-r--r--drivers/scsi/cxlflash/cxl_hw.c177
-rw-r--r--drivers/scsi/cxlflash/lunmgt.c278
-rw-r--r--drivers/scsi/cxlflash/main.c3968
-rw-r--r--drivers/scsi/cxlflash/main.h129
-rw-r--r--drivers/scsi/cxlflash/ocxl_hw.c1399
-rw-r--r--drivers/scsi/cxlflash/ocxl_hw.h72
-rw-r--r--drivers/scsi/cxlflash/sislite.h560
-rw-r--r--drivers/scsi/cxlflash/superpipe.c2218
-rw-r--r--drivers/scsi/cxlflash/superpipe.h150
-rw-r--r--drivers/scsi/cxlflash/vlun.c1336
-rw-r--r--drivers/scsi/cxlflash/vlun.h82
-rw-r--r--drivers/scsi/dc395x.c725
-rw-r--r--drivers/scsi/dmx3191d.c2
-rw-r--r--drivers/scsi/elx/efct/efct_driver.c6
-rw-r--r--drivers/scsi/elx/efct/efct_hw.c5
-rw-r--r--drivers/scsi/elx/efct/efct_lio.c2
-rw-r--r--drivers/scsi/elx/efct/efct_xport.c4
-rw-r--r--drivers/scsi/elx/libefc/efc_els.c2
-rw-r--r--drivers/scsi/elx/libefc/efc_fabric.c4
-rw-r--r--drivers/scsi/elx/libefc/efc_node.c2
-rw-r--r--drivers/scsi/elx/libefc_sli/sli4.c6
-rw-r--r--drivers/scsi/esas2r/esas2r.h16
-rw-r--r--drivers/scsi/esas2r/esas2r_init.c2
-rw-r--r--drivers/scsi/esas2r/esas2r_main.c28
-rw-r--r--drivers/scsi/esas2r/esas2r_vda.c17
-rw-r--r--drivers/scsi/esp_scsi.c14
-rw-r--r--drivers/scsi/esp_scsi.h2
-rw-r--r--drivers/scsi/fcoe/fcoe.c26
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c6
-rw-r--r--drivers/scsi/fcoe/fcoe_transport.c2
-rw-r--r--drivers/scsi/fdomain.c4
-rw-r--r--drivers/scsi/fdomain_pci.c2
-rw-r--r--drivers/scsi/fnic/Makefile5
-rw-r--r--drivers/scsi/fnic/fdls_disc.c5093
-rw-r--r--drivers/scsi/fnic/fdls_fc.h253
-rw-r--r--drivers/scsi/fnic/fip.c1005
-rw-r--r--drivers/scsi/fnic/fip.h159
-rw-r--r--drivers/scsi/fnic/fnic.h288
-rw-r--r--drivers/scsi/fnic/fnic_attrs.c12
-rw-r--r--drivers/scsi/fnic/fnic_debugfs.c11
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c1744
-rw-r--r--drivers/scsi/fnic/fnic_fdls.h435
-rw-r--r--drivers/scsi/fnic/fnic_fip.h48
-rw-r--r--drivers/scsi/fnic/fnic_io.h14
-rw-r--r--drivers/scsi/fnic/fnic_isr.c28
-rw-r--r--drivers/scsi/fnic/fnic_main.c760
-rw-r--r--drivers/scsi/fnic/fnic_pci_subsys_devid.c131
-rw-r--r--drivers/scsi/fnic/fnic_res.c77
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c1161
-rw-r--r--drivers/scsi/fnic/fnic_stats.h49
-rw-r--r--drivers/scsi/fnic/fnic_trace.c97
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h61
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c181
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c30
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c76
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c492
-rw-r--r--drivers/scsi/hosts.c18
-rw-r--r--drivers/scsi/hpsa.c92
-rw-r--r--drivers/scsi/hptiop.c8
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c38
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c17
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c6
-rw-r--r--drivers/scsi/ibmvscsi_tgt/libsrp.c6
-rw-r--r--drivers/scsi/imm.c3
-rw-r--r--drivers/scsi/initio.c6
-rw-r--r--drivers/scsi/ipr.c74
-rw-r--r--drivers/scsi/ips.c16
-rw-r--r--drivers/scsi/ips.h5
-rw-r--r--drivers/scsi/isci/host.c18
-rw-r--r--drivers/scsi/isci/init.c14
-rw-r--r--drivers/scsi/isci/isci.h15
-rw-r--r--drivers/scsi/isci/phy.c2
-rw-r--r--drivers/scsi/isci/port.c2
-rw-r--r--drivers/scsi/isci/port_config.c4
-rw-r--r--drivers/scsi/isci/remote_device.c61
-rw-r--r--drivers/scsi/isci/remote_device.h34
-rw-r--r--drivers/scsi/isci/request.c2
-rw-r--r--drivers/scsi/iscsi_tcp.c66
-rw-r--r--drivers/scsi/iscsi_tcp.h4
-rw-r--r--drivers/scsi/jazz_esp.c2
-rw-r--r--drivers/scsi/libfc/fc_encode.h2
-rw-r--r--drivers/scsi/libfc/fc_fcp.c14
-rw-r--r--drivers/scsi/libiscsi.c14
-rw-r--r--drivers/scsi/libiscsi_tcp.c91
-rw-r--r--drivers/scsi/libsas/sas_ata.c14
-rw-r--r--drivers/scsi/libsas/sas_discover.c2
-rw-r--r--drivers/scsi/libsas/sas_expander.c7
-rw-r--r--drivers/scsi/libsas/sas_internal.h78
-rw-r--r--drivers/scsi/libsas/sas_phy.c6
-rw-r--r--drivers/scsi/libsas/sas_port.c13
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c23
-rw-r--r--drivers/scsi/lpfc/lpfc.h78
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c150
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c222
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h19
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h5
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c58
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c670
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h16
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h71
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c567
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c398
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h111
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c243
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c409
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c82
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c14
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c109
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c249
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_vmid.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c34
-rw-r--r--drivers/scsi/mac_esp.c2
-rw-r--r--drivers/scsi/mac_scsi.c2
-rw-r--r--drivers/scsi/megaraid.c16
-rw-r--r--drivers/scsi/megaraid.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c20
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.c6
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h4
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c80
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c7
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h42
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_image.h8
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_init.h11
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_ioc.h21
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_pci.h2
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_sas.h1
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_tool.h1
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_transport.h20
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr.h54
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_app.c183
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_fw.c318
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_os.c244
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_transport.c19
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2.h9
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h7
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_ioc.h54
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c43
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h14
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_config.c79
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c277
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.h49
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c57
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c19
-rw-r--r--drivers/scsi/mvme16x_scsi.c2
-rw-r--r--drivers/scsi/mvsas/mv_64xx.h4
-rw-r--r--drivers/scsi/mvsas/mv_defs.h4
-rw-r--r--drivers/scsi/mvsas/mv_init.c4
-rw-r--r--drivers/scsi/mvsas/mv_sas.c20
-rw-r--r--drivers/scsi/mvsas/mv_sas.h1
-rw-r--r--drivers/scsi/mvumi.c7
-rw-r--r--drivers/scsi/myrb.c27
-rw-r--r--drivers/scsi/myrs.c21
-rw-r--r--drivers/scsi/ncr53c8xx.c11
-rw-r--r--drivers/scsi/nsp32.c2
-rw-r--r--drivers/scsi/pcmcia/sym53c500_cs.c2
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.c26
-rw-r--r--drivers/scsi/pm8001/pm8001_defs.h9
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c16
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.h4
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c15
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c126
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h20
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c132
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.h4
-rw-r--r--drivers/scsi/pmcraid.c36
-rw-r--r--drivers/scsi/ppa.c3
-rw-r--r--drivers/scsi/ps3rom.c5
-rw-r--r--drivers/scsi/qedf/qedf_attr.c6
-rw-r--r--drivers/scsi/qedf/qedf_dbg.h2
-rw-r--r--drivers/scsi/qedf/qedf_main.c13
-rw-r--r--drivers/scsi/qedi/qedi_dbg.c22
-rw-r--r--drivers/scsi/qedi/qedi_dbg.h12
-rw-r--r--drivers/scsi/qedi/qedi_gbl.h1
-rw-r--r--drivers/scsi/qedi/qedi_main.c19
-rw-r--r--drivers/scsi/qla1280.c16
-rw-r--r--drivers/scsi/qla1280.h12
-rw-r--r--drivers/scsi/qla2xxx/Kconfig6
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c53
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c124
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c53
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c144
-rw-r--r--drivers/scsi/qla2xxx/qla_edif.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h9
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c90
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c40
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c50
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c50
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c62
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c129
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h4
-rw-r--r--drivers/scsi/qla4xxx/ql4_attr.c8
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c5
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c19
-rw-r--r--drivers/scsi/qlogicfas408.c2
-rw-r--r--drivers/scsi/qlogicfas408.h2
-rw-r--r--drivers/scsi/qlogicpti.c7
-rw-r--r--drivers/scsi/scsi.c75
-rw-r--r--drivers/scsi/scsi_debug.c1309
-rw-r--r--drivers/scsi/scsi_devinfo.c40
-rw-r--r--drivers/scsi/scsi_error.c48
-rw-r--r--drivers/scsi/scsi_ioctl.c37
-rw-r--r--drivers/scsi/scsi_lib.c74
-rw-r--r--drivers/scsi/scsi_lib_test.c7
-rw-r--r--drivers/scsi/scsi_priv.h2
-rw-r--r--drivers/scsi/scsi_scan.c59
-rw-r--r--drivers/scsi/scsi_sysctl.c6
-rw-r--r--drivers/scsi/scsi_sysfs.c22
-rw-r--r--drivers/scsi/scsi_transport_fc.c74
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c61
-rw-r--r--drivers/scsi/scsi_transport_sas.c70
-rw-r--r--drivers/scsi/scsi_transport_spi.c3
-rw-r--r--drivers/scsi/scsi_transport_srp.c2
-rw-r--r--drivers/scsi/scsicam.c16
-rw-r--r--drivers/scsi/sd.c118
-rw-r--r--drivers/scsi/sd_dif.c3
-rw-r--r--drivers/scsi/sd_zbc.c11
-rw-r--r--drivers/scsi/sg.c19
-rw-r--r--drivers/scsi/sgiwd93.c2
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c200
-rw-r--r--drivers/scsi/sni_53c710.c2
-rw-r--r--drivers/scsi/snic/snic_main.c14
-rw-r--r--drivers/scsi/sr.c19
-rw-r--r--drivers/scsi/st.c122
-rw-r--r--drivers/scsi/st.h7
-rw-r--r--drivers/scsi/stex.c8
-rw-r--r--drivers/scsi/storvsc_drv.c57
-rw-r--r--drivers/scsi/sun3_scsi.c10
-rw-r--r--drivers/scsi/sun3x_esp.c2
-rw-r--r--drivers/scsi/sun_esp.c2
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c21
-rw-r--r--drivers/scsi/virtio_scsi.c6
-rw-r--r--drivers/scsi/wd719x.c2
-rw-r--r--drivers/scsi/xen-scsifront.c11
-rw-r--r--drivers/sh/clk/cpg.c25
-rw-r--r--drivers/sh/intc/core.c2
-rw-r--r--drivers/sh/intc/irqdomain.c5
-rw-r--r--drivers/sh/intc/virq-debugfs.c1
-rw-r--r--drivers/siox/siox-bus-gpio.c3
-rw-r--r--drivers/slimbus/Kconfig7
-rw-r--r--drivers/slimbus/Makefile3
-rw-r--r--drivers/slimbus/core.c17
-rw-r--r--drivers/slimbus/messaging.c11
-rw-r--r--drivers/slimbus/qcom-ctrl.c735
-rw-r--r--drivers/slimbus/qcom-ngd-ctrl.c4
-rw-r--r--drivers/soc/Kconfig2
-rw-r--r--drivers/soc/Makefile2
-rw-r--r--drivers/soc/amlogic/meson-clk-measure.c461
-rw-r--r--drivers/soc/apple/Kconfig3
-rw-r--r--drivers/soc/apple/mailbox.c19
-rw-r--r--drivers/soc/apple/rtkit-internal.h1
-rw-r--r--drivers/soc/apple/rtkit.c115
-rw-r--r--drivers/soc/apple/sart.c60
-rw-r--r--drivers/soc/aspeed/aspeed-lpc-ctrl.c16
-rw-r--r--drivers/soc/aspeed/aspeed-lpc-snoop.c229
-rw-r--r--drivers/soc/aspeed/aspeed-p2a-ctrl.c16
-rw-r--r--drivers/soc/aspeed/aspeed-socinfo.c4
-rw-r--r--drivers/soc/aspeed/aspeed-uart-routing.c2
-rw-r--r--drivers/soc/atmel/soc.c7
-rw-r--r--drivers/soc/atmel/soc.h3
-rw-r--r--drivers/soc/bcm/brcmstb/pm/pm.h2
-rw-r--r--drivers/soc/dove/pmu.c7
-rw-r--r--drivers/soc/fsl/Kconfig2
-rw-r--r--drivers/soc/fsl/dpaa2-console.c2
-rw-r--r--drivers/soc/fsl/dpio/dpio-service.c2
-rw-r--r--drivers/soc/fsl/qbman/qman.c2
-rw-r--r--drivers/soc/fsl/qbman/qman_test_stash.c8
-rw-r--r--drivers/soc/fsl/qe/gpio.c149
-rw-r--r--drivers/soc/fsl/qe/qe_ic.c24
-rw-r--r--drivers/soc/fsl/qe/qmc.c50
-rw-r--r--drivers/soc/fsl/qe/tsa.c30
-rw-r--r--drivers/soc/fsl/rcpm.c1
-rw-r--r--drivers/soc/fujitsu/a64fx-diag.c2
-rw-r--r--drivers/soc/hisilicon/Kconfig7
-rw-r--r--drivers/soc/hisilicon/kunpeng_hccs.c532
-rw-r--r--drivers/soc/hisilicon/kunpeng_hccs.h35
-rw-r--r--drivers/soc/imx/Makefile2
-rw-r--r--drivers/soc/imx/soc-imx8m.c293
-rw-r--r--drivers/soc/imx/soc-imx9.c128
-rw-r--r--drivers/soc/ixp4xx/ixp4xx-npe.c2
-rw-r--r--drivers/soc/ixp4xx/ixp4xx-qmgr.c2
-rw-r--r--drivers/soc/litex/litex_soc_ctrl.c23
-rw-r--r--drivers/soc/loongson/loongson2_guts.c7
-rw-r--r--drivers/soc/mediatek/Kconfig11
-rw-r--r--drivers/soc/mediatek/Makefile1
-rw-r--r--drivers/soc/mediatek/mt8167-mmsys.h31
-rw-r--r--drivers/soc/mediatek/mt8173-mmsys.h99
-rw-r--r--drivers/soc/mediatek/mt8183-mmsys.h50
-rw-r--r--drivers/soc/mediatek/mt8186-mmsys.h88
-rw-r--r--drivers/soc/mediatek/mt8188-mmsys.h266
-rw-r--r--drivers/soc/mediatek/mt8192-mmsys.h71
-rw-r--r--drivers/soc/mediatek/mt8195-mmsys.h632
-rw-r--r--drivers/soc/mediatek/mt8365-mmsys.h84
-rw-r--r--drivers/soc/mediatek/mtk-cmdq-helper.c244
-rw-r--r--drivers/soc/mediatek/mtk-devapc.c21
-rw-r--r--drivers/soc/mediatek/mtk-dvfsrc.c578
-rw-r--r--drivers/soc/mediatek/mtk-mmsys.c2
-rw-r--r--drivers/soc/mediatek/mtk-mmsys.h14
-rw-r--r--drivers/soc/mediatek/mtk-mutex.c115
-rw-r--r--drivers/soc/mediatek/mtk-pmic-wrap.c4
-rw-r--r--drivers/soc/mediatek/mtk-regulator-coupler.c1
-rw-r--r--drivers/soc/mediatek/mtk-socinfo.c24
-rw-r--r--drivers/soc/mediatek/mtk-svs.c27
-rw-r--r--drivers/soc/microchip/mpfs-sys-controller.c2
-rw-r--r--drivers/soc/pxa/ssp.c2
-rw-r--r--drivers/soc/qcom/Kconfig10
-rw-r--r--drivers/soc/qcom/Makefile1
-rw-r--r--drivers/soc/qcom/icc-bwmon.c5
-rw-r--r--drivers/soc/qcom/ice.c407
-rw-r--r--drivers/soc/qcom/llcc-qcom.c3824
-rw-r--r--drivers/soc/qcom/mdt_loader.c85
-rw-r--r--drivers/soc/qcom/ocmem.c2
-rw-r--r--drivers/soc/qcom/pdr_interface.c8
-rw-r--r--drivers/soc/qcom/pdr_internal.h1
-rw-r--r--drivers/soc/qcom/pmic_glink.c82
-rw-r--r--drivers/soc/qcom/pmic_glink_altmode.c41
-rw-r--r--drivers/soc/qcom/qcom-geni-se.c522
-rw-r--r--drivers/soc/qcom/qcom-pbs.c22
-rw-r--r--drivers/soc/qcom/qcom_aoss.c5
-rw-r--r--drivers/soc/qcom/qcom_gsbi.c2
-rw-r--r--drivers/soc/qcom/qcom_pd_mapper.c27
-rw-r--r--drivers/soc/qcom/qcom_pdr_msg.c3
-rw-r--r--drivers/soc/qcom/qcom_stats.c135
-rw-r--r--drivers/soc/qcom/qmi_encdec.c52
-rw-r--r--drivers/soc/qcom/qmi_interface.c8
-rw-r--r--drivers/soc/qcom/ramp_controller.c5
-rw-r--r--drivers/soc/qcom/rmtfs_mem.c4
-rw-r--r--drivers/soc/qcom/rpm-proc.c2
-rw-r--r--drivers/soc/qcom/rpm_master_stats.c4
-rw-r--r--drivers/soc/qcom/rpmh-rsc.c18
-rw-r--r--drivers/soc/qcom/smem.c22
-rw-r--r--drivers/soc/qcom/smem_state.c15
-rw-r--r--drivers/soc/qcom/smp2p.c17
-rw-r--r--drivers/soc/qcom/smsm.c8
-rw-r--r--drivers/soc/qcom/socinfo.c32
-rw-r--r--drivers/soc/qcom/ubwc_config.c299
-rw-r--r--drivers/soc/renesas/Kconfig388
-rw-r--r--drivers/soc/renesas/Makefile5
-rw-r--r--drivers/soc/renesas/pwc-rzv2m.c6
-rw-r--r--drivers/soc/renesas/r9a08g045-sysc.c24
-rw-r--r--drivers/soc/renesas/r9a09g047-sys.c68
-rw-r--r--drivers/soc/renesas/r9a09g056-sys.c75
-rw-r--r--drivers/soc/renesas/r9a09g057-sys.c68
-rw-r--r--drivers/soc/renesas/renesas-soc.c45
-rw-r--r--drivers/soc/renesas/rz-sysc.c168
-rw-r--r--drivers/soc/renesas/rz-sysc.h49
-rw-r--r--drivers/soc/rockchip/grf.c35
-rw-r--r--drivers/soc/rockchip/io-domain.c8
-rw-r--r--drivers/soc/samsung/exynos-asv.c1
-rw-r--r--drivers/soc/samsung/exynos-chipid.c12
-rw-r--r--drivers/soc/samsung/exynos-pmu.c313
-rw-r--r--drivers/soc/samsung/exynos-pmu.h1
-rw-r--r--drivers/soc/samsung/exynos-usi.c108
-rw-r--r--drivers/soc/samsung/exynos3250-pmu.c1
-rw-r--r--drivers/soc/samsung/exynos5250-pmu.c1
-rw-r--r--drivers/soc/samsung/exynos5420-pmu.c1
-rw-r--r--drivers/soc/sophgo/Kconfig34
-rw-r--r--drivers/soc/sophgo/Makefile4
-rw-r--r--drivers/soc/sophgo/cv1800-rtcsys.c63
-rw-r--r--drivers/soc/sophgo/sg2044-topsys.c45
-rw-r--r--drivers/soc/sunxi/sunxi_sram.c14
-rw-r--r--drivers/soc/tegra/Kconfig18
-rw-r--r--drivers/soc/tegra/cbb/tegra-cbb.c20
-rw-r--r--drivers/soc/tegra/cbb/tegra194-cbb.c36
-rw-r--r--drivers/soc/tegra/cbb/tegra234-cbb.c760
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra30.c139
-rw-r--r--drivers/soc/tegra/fuse/tegra-apbmisc.c1
-rw-r--r--drivers/soc/tegra/pmc.c205
-rw-r--r--drivers/soc/ti/k3-ringacc.c4
-rw-r--r--drivers/soc/ti/k3-socinfo.c25
-rw-r--r--drivers/soc/ti/knav_dma.c4
-rw-r--r--drivers/soc/ti/knav_qmss_queue.c11
-rw-r--r--drivers/soc/ti/pm33xx.c4
-rw-r--r--drivers/soc/ti/pruss.c6
-rw-r--r--drivers/soc/ti/smartreflex.c6
-rw-r--r--drivers/soc/ti/ti_sci_inta_msi.c10
-rw-r--r--drivers/soc/ti/wkup_m3_ipc.c10
-rw-r--r--drivers/soc/vt8500/Kconfig19
-rw-r--r--drivers/soc/vt8500/Makefile2
-rw-r--r--drivers/soc/vt8500/wmt-socinfo.c125
-rw-r--r--drivers/soc/xilinx/xlnx_event_manager.c6
-rw-r--r--drivers/soc/xilinx/zynqmp_power.c2
-rw-r--r--drivers/soundwire/Kconfig2
-rw-r--r--drivers/soundwire/amd_init.c19
-rw-r--r--drivers/soundwire/amd_manager.c267
-rw-r--r--drivers/soundwire/amd_manager.h42
-rw-r--r--drivers/soundwire/bus.c173
-rw-r--r--drivers/soundwire/bus.h21
-rw-r--r--drivers/soundwire/bus_type.c16
-rw-r--r--drivers/soundwire/cadence_master.c698
-rw-r--r--drivers/soundwire/cadence_master.h21
-rw-r--r--drivers/soundwire/debugfs.c89
-rw-r--r--drivers/soundwire/generic_bandwidth_allocation.c372
-rw-r--r--drivers/soundwire/intel.c2
-rw-r--r--drivers/soundwire/intel.h25
-rw-r--r--drivers/soundwire/intel_ace2x.c324
-rw-r--r--drivers/soundwire/intel_ace2x_debugfs.c6
-rw-r--r--drivers/soundwire/intel_auxdevice.c94
-rw-r--r--drivers/soundwire/intel_bus_common.c6
-rw-r--r--drivers/soundwire/intel_init.c24
-rw-r--r--drivers/soundwire/irq.c16
-rw-r--r--drivers/soundwire/irq.h5
-rw-r--r--drivers/soundwire/mipi_disco.c188
-rw-r--r--drivers/soundwire/qcom.c17
-rw-r--r--drivers/soundwire/slave.c21
-rw-r--r--drivers/soundwire/stream.c216
-rw-r--r--drivers/soundwire/sysfs_slave.c2
-rw-r--r--drivers/spi/Kconfig134
-rw-r--r--drivers/spi/Makefile17
-rw-r--r--drivers/spi/atmel-quadspi.c1103
-rw-r--r--drivers/spi/spi-airoha-snfi.c154
-rw-r--r--drivers/spi/spi-altera-platform.c1
-rw-r--r--drivers/spi/spi-amd-pci.c69
-rw-r--r--drivers/spi/spi-amd.c516
-rw-r--r--drivers/spi/spi-amd.h44
-rw-r--r--drivers/spi/spi-amlogic-spifc-a1.c7
-rw-r--r--drivers/spi/spi-amlogic-spifc-a4.c1222
-rw-r--r--drivers/spi/spi-amlogic-spisg.c888
-rw-r--r--drivers/spi/spi-apple.c531
-rw-r--r--drivers/spi/spi-ar934x.c2
-rw-r--r--drivers/spi/spi-aspeed-smc.c19
-rw-r--r--drivers/spi/spi-at91-usart.c2
-rw-r--r--drivers/spi/spi-ath79.c2
-rw-r--r--drivers/spi/spi-atmel.c80
-rw-r--r--drivers/spi/spi-au1550.c2
-rw-r--r--drivers/spi/spi-axi-spi-engine.c434
-rw-r--r--drivers/spi/spi-bcm2835.c22
-rw-r--r--drivers/spi/spi-bcm2835aux.c2
-rw-r--r--drivers/spi/spi-bcm63xx-hsspi.c4
-rw-r--r--drivers/spi/spi-bcm63xx.c4
-rw-r--r--drivers/spi/spi-bcmbca-hsspi.c2
-rw-r--r--drivers/spi/spi-brcmstb-qspi.c2
-rw-r--r--drivers/spi/spi-cadence-quadspi.c196
-rw-r--r--drivers/spi/spi-cadence-xspi.c2
-rw-r--r--drivers/spi/spi-cadence.c3
-rw-r--r--drivers/spi/spi-cavium-octeon.c2
-rw-r--r--drivers/spi/spi-cavium-thunderx.c4
-rw-r--r--drivers/spi/spi-ch341.c2
-rw-r--r--drivers/spi/spi-coldfire-qspi.c2
-rw-r--r--drivers/spi/spi-cs42l43.c52
-rw-r--r--drivers/spi/spi-davinci.c2
-rw-r--r--drivers/spi/spi-dln2.c2
-rw-r--r--drivers/spi/spi-dw-bt1.c4
-rw-r--r--drivers/spi/spi-dw-core.c26
-rw-r--r--drivers/spi/spi-dw-dma.c4
-rw-r--r--drivers/spi/spi-dw-mmio.c4
-rw-r--r--drivers/spi/spi-dw-pci.c11
-rw-r--r--drivers/spi/spi-ep93xx.c2
-rw-r--r--drivers/spi/spi-falcon.c5
-rw-r--r--drivers/spi/spi-fsi.c13
-rw-r--r--drivers/spi/spi-fsl-dspi.c619
-rw-r--r--drivers/spi/spi-fsl-espi.c4
-rw-r--r--drivers/spi/spi-fsl-lpspi.c96
-rw-r--r--drivers/spi/spi-fsl-qspi.c122
-rw-r--r--drivers/spi/spi-fsl-spi.c6
-rw-r--r--drivers/spi/spi-geni-qcom.c14
-rw-r--r--drivers/spi/spi-gpio.c63
-rw-r--r--drivers/spi/spi-hisi-kunpeng.c2
-rw-r--r--drivers/spi/spi-hisi-sfc-v3xx.c2
-rw-r--r--drivers/spi/spi-img-spfi.c2
-rw-r--r--drivers/spi/spi-imx.c125
-rw-r--r--drivers/spi/spi-intel-pci.c11
-rw-r--r--drivers/spi/spi-intel-platform.c10
-rw-r--r--drivers/spi/spi-intel.c84
-rw-r--r--drivers/spi/spi-intel.h4
-rw-r--r--drivers/spi/spi-iproc-qspi.c2
-rw-r--r--drivers/spi/spi-kspi2.c431
-rw-r--r--drivers/spi/spi-lantiq-ssc.c4
-rw-r--r--drivers/spi/spi-ljca.c4
-rw-r--r--drivers/spi/spi-loongson-core.c5
-rw-r--r--drivers/spi/spi-loongson-pci.c7
-rw-r--r--drivers/spi/spi-loongson-plat.c2
-rw-r--r--drivers/spi/spi-loopback-test.c22
-rw-r--r--drivers/spi/spi-mem.c103
-rw-r--r--drivers/spi/spi-meson-spicc.c243
-rw-r--r--drivers/spi/spi-meson-spifc.c2
-rw-r--r--drivers/spi/spi-microchip-core-qspi.c237
-rw-r--r--drivers/spi/spi-microchip-core.c46
-rw-r--r--drivers/spi/spi-mpc52xx-psc.c4
-rw-r--r--drivers/spi/spi-mpc52xx.c3
-rw-r--r--drivers/spi/spi-mt65xx.c67
-rw-r--r--drivers/spi/spi-mtk-nor.c3
-rw-r--r--drivers/spi/spi-mtk-snfi.c10
-rw-r--r--drivers/spi/spi-mux.c4
-rw-r--r--drivers/spi/spi-mxic.c32
-rw-r--r--drivers/spi/spi-mxs.c6
-rw-r--r--drivers/spi/spi-npcm-fiu.c19
-rw-r--r--drivers/spi/spi-npcm-pspi.c2
-rw-r--r--drivers/spi/spi-nxp-fspi.c309
-rw-r--r--drivers/spi/spi-oc-tiny.c2
-rw-r--r--drivers/spi/spi-offload-trigger-adi-util-sigma-delta.c62
-rw-r--r--drivers/spi/spi-offload-trigger-pwm.c169
-rw-r--r--drivers/spi/spi-offload.c465
-rw-r--r--drivers/spi/spi-omap-uwire.c2
-rw-r--r--drivers/spi/spi-omap2-mcspi.c41
-rw-r--r--drivers/spi/spi-orion.c2
-rw-r--r--drivers/spi/spi-pci1xxxx.c341
-rw-r--r--drivers/spi/spi-pic32-sqi.c4
-rw-r--r--drivers/spi/spi-pic32.c2
-rw-r--r--drivers/spi/spi-pl022.c15
-rw-r--r--drivers/spi/spi-ppc4xx.c2
-rw-r--r--drivers/spi/spi-pxa2xx-pci.c10
-rw-r--r--drivers/spi/spi-pxa2xx-platform.c4
-rw-r--r--drivers/spi/spi-pxa2xx.c94
-rw-r--r--drivers/spi/spi-qcom-qspi.c4
-rw-r--r--drivers/spi/spi-qpic-snand.c1653
-rw-r--r--drivers/spi/spi-qup.c2
-rw-r--r--drivers/spi/spi-rb4xx.c36
-rw-r--r--drivers/spi/spi-realtek-rtl-snand.c418
-rw-r--r--drivers/spi/spi-rockchip-sfc.c257
-rw-r--r--drivers/spi/spi-rockchip.c75
-rw-r--r--drivers/spi/spi-rpc-if.c30
-rw-r--r--drivers/spi/spi-rspi.c11
-rw-r--r--drivers/spi/spi-rzv2h-rspi.c466
-rw-r--r--drivers/spi/spi-rzv2m-csi.c2
-rw-r--r--drivers/spi/spi-s3c64xx.c30
-rw-r--r--drivers/spi/spi-sc18is602.c34
-rw-r--r--drivers/spi/spi-sg2044-nor.c511
-rw-r--r--drivers/spi/spi-sh-hspi.c2
-rw-r--r--drivers/spi/spi-sh-msiof.c410
-rw-r--r--drivers/spi/spi-sh-sci.c2
-rw-r--r--drivers/spi/spi-sh.c2
-rw-r--r--drivers/spi/spi-sifive.c2
-rw-r--r--drivers/spi/spi-slave-mt27xx.c10
-rw-r--r--drivers/spi/spi-sn-f-ospi.c13
-rw-r--r--drivers/spi/spi-sprd.c5
-rw-r--r--drivers/spi/spi-st-ssc4.c16
-rw-r--r--drivers/spi/spi-stm32-ospi.c1068
-rw-r--r--drivers/spi/spi-stm32-qspi.c14
-rw-r--r--drivers/spi/spi-stm32.c321
-rw-r--r--drivers/spi/spi-sun4i.c8
-rw-r--r--drivers/spi/spi-sun6i.c2
-rw-r--r--drivers/spi/spi-sunplus-sp7021.c8
-rw-r--r--drivers/spi/spi-synquacer.c2
-rw-r--r--drivers/spi/spi-tegra114.c8
-rw-r--r--drivers/spi/spi-tegra20-sflash.c2
-rw-r--r--drivers/spi/spi-tegra20-slink.c4
-rw-r--r--drivers/spi/spi-tegra210-quad.c300
-rw-r--r--drivers/spi/spi-ti-qspi.c26
-rw-r--r--drivers/spi/spi-topcliff-pch.c2
-rw-r--r--drivers/spi/spi-uniphier.c2
-rw-r--r--drivers/spi/spi-virtio.c431
-rw-r--r--drivers/spi/spi-xcomm.c6
-rw-r--r--drivers/spi/spi-xilinx.c7
-rw-r--r--drivers/spi/spi-xtensa-xtfpga.c2
-rw-r--r--drivers/spi/spi-zynq-qspi.c32
-rw-r--r--drivers/spi/spi-zynqmp-gqspi.c191
-rw-r--r--drivers/spi/spi.c291
-rw-r--r--drivers/spi/spidev.c34
-rw-r--r--drivers/spmi/Kconfig9
-rw-r--r--drivers/spmi/Makefile1
-rw-r--r--drivers/spmi/hisi-spmi-controller.c3
-rw-r--r--drivers/spmi/spmi-apple-controller.c168
-rw-r--r--drivers/spmi/spmi-mtk-pmif.c2
-rw-r--r--drivers/spmi/spmi-pmic-arb.c7
-rw-r--r--drivers/spmi/spmi.c2
-rw-r--r--drivers/ssb/driver_gpio.c20
-rw-r--r--drivers/staging/Kconfig18
-rw-r--r--drivers/staging/Makefile9
-rw-r--r--drivers/staging/axis-fifo/axis-fifo.c334
-rw-r--r--drivers/staging/fbtft/Kconfig36
-rw-r--r--drivers/staging/fbtft/fb_ssd1351.c3
-rw-r--r--drivers/staging/fbtft/fbtft-core.c42
-rw-r--r--drivers/staging/fbtft/fbtft.h2
-rw-r--r--drivers/staging/fieldbus/Documentation/ABI/fieldbus-dev-cdev31
-rw-r--r--drivers/staging/fieldbus/Documentation/ABI/sysfs-class-fieldbus-dev62
-rw-r--r--drivers/staging/fieldbus/Documentation/devicetree/bindings/fieldbus/arcx,anybus-controller.txt71
-rw-r--r--drivers/staging/fieldbus/Documentation/fieldbus_dev.txt66
-rw-r--r--drivers/staging/fieldbus/Kconfig19
-rw-r--r--drivers/staging/fieldbus/Makefile7
-rw-r--r--drivers/staging/fieldbus/TODO5
-rw-r--r--drivers/staging/fieldbus/anybuss/Kconfig41
-rw-r--r--drivers/staging/fieldbus/anybuss/Makefile10
-rw-r--r--drivers/staging/fieldbus/anybuss/anybuss-client.h95
-rw-r--r--drivers/staging/fieldbus/anybuss/anybuss-controller.h47
-rw-r--r--drivers/staging/fieldbus/anybuss/arcx-anybus.c379
-rw-r--r--drivers/staging/fieldbus/anybuss/hms-profinet.c224
-rw-r--r--drivers/staging/fieldbus/anybuss/host.c1452
-rw-r--r--drivers/staging/fieldbus/dev_core.c344
-rw-r--r--drivers/staging/fieldbus/fieldbus_dev.h114
-rw-r--r--drivers/staging/gdm724x/Kconfig16
-rw-r--r--drivers/staging/gdm724x/Makefile8
-rw-r--r--drivers/staging/gdm724x/TODO16
-rw-r--r--drivers/staging/gdm724x/gdm_endian.c37
-rw-r--r--drivers/staging/gdm724x/gdm_endian.h30
-rw-r--r--drivers/staging/gdm724x/gdm_lte.c937
-rw-r--r--drivers/staging/gdm724x/gdm_lte.h71
-rw-r--r--drivers/staging/gdm724x/gdm_mux.c668
-rw-r--r--drivers/staging/gdm724x/gdm_mux.h85
-rw-r--r--drivers/staging/gdm724x/gdm_tty.c316
-rw-r--r--drivers/staging/gdm724x/gdm_tty.h60
-rw-r--r--drivers/staging/gdm724x/gdm_usb.c1012
-rw-r--r--drivers/staging/gdm724x/gdm_usb.h99
-rw-r--r--drivers/staging/gdm724x/hci.h45
-rw-r--r--drivers/staging/gdm724x/hci_packet.h82
-rw-r--r--drivers/staging/gdm724x/netlink_k.c128
-rw-r--r--drivers/staging/gdm724x/netlink_k.h16
-rw-r--r--drivers/staging/gpib/Kconfig255
-rw-r--r--drivers/staging/gpib/Makefile20
-rw-r--r--drivers/staging/gpib/TODO24
-rw-r--r--drivers/staging/gpib/agilent_82350b/Makefile2
-rw-r--r--drivers/staging/gpib/agilent_82350b/agilent_82350b.c894
-rw-r--r--drivers/staging/gpib/agilent_82350b/agilent_82350b.h157
-rw-r--r--drivers/staging/gpib/agilent_82357a/Makefile4
-rw-r--r--drivers/staging/gpib/agilent_82357a/agilent_82357a.c1691
-rw-r--r--drivers/staging/gpib/agilent_82357a/agilent_82357a.h182
-rw-r--r--drivers/staging/gpib/cb7210/Makefile3
-rw-r--r--drivers/staging/gpib/cb7210/cb7210.c1598
-rw-r--r--drivers/staging/gpib/cb7210/cb7210.h203
-rw-r--r--drivers/staging/gpib/cec/Makefile3
-rw-r--r--drivers/staging/gpib/cec/cec.h20
-rw-r--r--drivers/staging/gpib/cec/cec_gpib.c393
-rw-r--r--drivers/staging/gpib/common/Makefile6
-rw-r--r--drivers/staging/gpib/common/gpib_os.c2271
-rw-r--r--drivers/staging/gpib/common/iblib.c717
-rw-r--r--drivers/staging/gpib/common/ibsys.h34
-rw-r--r--drivers/staging/gpib/eastwood/Makefile3
-rw-r--r--drivers/staging/gpib/eastwood/fluke_gpib.c1180
-rw-r--r--drivers/staging/gpib/eastwood/fluke_gpib.h146
-rw-r--r--drivers/staging/gpib/fmh_gpib/Makefile2
-rw-r--r--drivers/staging/gpib/fmh_gpib/fmh_gpib.c1749
-rw-r--r--drivers/staging/gpib/fmh_gpib/fmh_gpib.h177
-rw-r--r--drivers/staging/gpib/gpio/Makefile4
-rw-r--r--drivers/staging/gpib/gpio/gpib_bitbang.c1469
-rw-r--r--drivers/staging/gpib/hp_82335/Makefile4
-rw-r--r--drivers/staging/gpib/hp_82335/hp82335.c371
-rw-r--r--drivers/staging/gpib/hp_82335/hp82335.h52
-rw-r--r--drivers/staging/gpib/hp_82341/Makefile2
-rw-r--r--drivers/staging/gpib/hp_82341/hp_82341.c907
-rw-r--r--drivers/staging/gpib/hp_82341/hp_82341.h165
-rw-r--r--drivers/staging/gpib/include/amcc5920.h49
-rw-r--r--drivers/staging/gpib/include/amccs5933.h59
-rw-r--r--drivers/staging/gpib/include/gpibP.h41
-rw-r--r--drivers/staging/gpib/include/gpib_cmd.h112
-rw-r--r--drivers/staging/gpib/include/gpib_pci_ids.h23
-rw-r--r--drivers/staging/gpib/include/gpib_proto.h49
-rw-r--r--drivers/staging/gpib/include/gpib_state_machines.h23
-rw-r--r--drivers/staging/gpib/include/gpib_types.h381
-rw-r--r--drivers/staging/gpib/include/nec7210.h141
-rw-r--r--drivers/staging/gpib/include/nec7210_registers.h218
-rw-r--r--drivers/staging/gpib/include/plx9050.h72
-rw-r--r--drivers/staging/gpib/include/quancom_pci.h22
-rw-r--r--drivers/staging/gpib/include/tms9914.h280
-rw-r--r--drivers/staging/gpib/include/tnt4882_registers.h192
-rw-r--r--drivers/staging/gpib/ines/Makefile3
-rw-r--r--drivers/staging/gpib/ines/ines.h165
-rw-r--r--drivers/staging/gpib/ines/ines_gpib.c1500
-rw-r--r--drivers/staging/gpib/lpvo_usb_gpib/Makefile3
-rw-r--r--drivers/staging/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c2025
-rw-r--r--drivers/staging/gpib/nec7210/Makefile4
-rw-r--r--drivers/staging/gpib/nec7210/board.h19
-rw-r--r--drivers/staging/gpib/nec7210/nec7210.c1121
-rw-r--r--drivers/staging/gpib/ni_usb/Makefile4
-rw-r--r--drivers/staging/gpib/ni_usb/ni_usb_gpib.c2671
-rw-r--r--drivers/staging/gpib/ni_usb/ni_usb_gpib.h226
-rw-r--r--drivers/staging/gpib/pc2/Makefile5
-rw-r--r--drivers/staging/gpib/pc2/pc2_gpib.c684
-rw-r--r--drivers/staging/gpib/tms9914/Makefile6
-rw-r--r--drivers/staging/gpib/tms9914/tms9914.c914
-rw-r--r--drivers/staging/gpib/tnt4882/Makefile6
-rw-r--r--drivers/staging/gpib/tnt4882/mite.c133
-rw-r--r--drivers/staging/gpib/tnt4882/mite.h234
-rw-r--r--drivers/staging/gpib/tnt4882/tnt4882_gpib.c1838
-rw-r--r--drivers/staging/gpib/uapi/gpib.h104
-rw-r--r--drivers/staging/gpib/uapi/gpib_ioctl.h167
-rw-r--r--drivers/staging/greybus/Documentation/firmware/firmware.c28
-rw-r--r--drivers/staging/greybus/arche-apb-ctrl.c2
-rw-r--r--drivers/staging/greybus/arche-platform.c2
-rw-r--r--drivers/staging/greybus/camera.c19
-rw-r--r--drivers/staging/greybus/fw-management.c48
-rw-r--r--drivers/staging/greybus/gbphy.c6
-rw-r--r--drivers/staging/greybus/gpio.c51
-rw-r--r--drivers/staging/greybus/power_supply.c14
-rw-r--r--drivers/staging/greybus/uart.c13
-rw-r--r--drivers/staging/iio/TODO5
-rw-r--r--drivers/staging/iio/accel/Kconfig12
-rw-r--r--drivers/staging/iio/accel/Makefile1
-rw-r--r--drivers/staging/iio/accel/adis16203.c4
-rw-r--r--drivers/staging/iio/accel/adis16240.c443
-rw-r--r--drivers/staging/iio/adc/ad7816.c6
-rw-r--r--drivers/staging/iio/addac/adt7316-i2c.c2
-rw-r--r--drivers/staging/iio/addac/adt7316.c2
-rw-r--r--drivers/staging/iio/frequency/ad9832.c150
-rw-r--r--drivers/staging/iio/frequency/ad9832.h1
-rw-r--r--drivers/staging/iio/frequency/ad9834.c28
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c50
-rw-r--r--drivers/staging/media/Kconfig6
-rw-r--r--drivers/staging/media/Makefile3
-rw-r--r--drivers/staging/media/atomisp/Kconfig1
-rw-r--r--drivers/staging/media/atomisp/Makefile1
-rw-r--r--drivers/staging/media/atomisp/TODO2
-rw-r--r--drivers/staging/media/atomisp/i2c/Kconfig30
-rw-r--r--drivers/staging/media/atomisp/i2c/Makefile4
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-gc0310.c722
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-gc2235.c12
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-libmsrlisthelper.c211
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c1618
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-ov2722.c10
-rw-r--r--drivers/staging/media/atomisp/i2c/gc2235.h29
-rw-r--r--drivers/staging/media/atomisp/i2c/mt9m114.h1779
-rw-r--r--drivers/staging/media/atomisp/i2c/ov2722.h27
-rw-r--r--drivers/staging/media/atomisp/include/hmm/hmm.h11
-rw-r--r--drivers/staging/media/atomisp/include/hmm/hmm_bo.h15
-rw-r--r--drivers/staging/media/atomisp/include/hmm/hmm_common.h11
-rw-r--r--drivers/staging/media/atomisp/include/linux/atomisp.h11
-rw-r--r--drivers/staging/media/atomisp/include/linux/atomisp_gmin_platform.h9
-rw-r--r--drivers/staging/media/atomisp/include/linux/atomisp_platform.h15
-rw-r--r--drivers/staging/media/atomisp/include/linux/libmsrlisthelper.h28
-rw-r--r--drivers/staging/media/atomisp/include/mmu/isp_mmu.h11
-rw-r--r--drivers/staging/media/atomisp/include/mmu/sh_mmu_mrfld.h11
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp-regs.h11
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_cmd.c84
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_cmd.h11
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_common.h11
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_compat.h11
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_compat_css20.c15
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_compat_css20.h11
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_compat_ioctl32.h255
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_csi2.c13
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_csi2.h28
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_csi2_bridge.c233
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_dfs_tables.h11
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_drvfs.c166
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_drvfs.h26
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_fops.c18
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_fops.h11
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c101
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_internal.h21
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_ioctl.c214
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_ioctl.h15
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_subdev.c20
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_subdev.h14
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_tables.h11
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_trace_event.h11
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_v4l2.c80
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_v4l2.h11
-rw-r--r--drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf.h9
-rw-r--r--drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf_comm.h9
-rw-r--r--drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf_desc.h9
-rw-r--r--drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c9
-rw-r--r--drivers/staging/media/atomisp/pci/base/refcount/interface/ia_css_refcount.h9
-rw-r--r--drivers/staging/media/atomisp/pci/base/refcount/src/refcount.c9
-rw-r--r--drivers/staging/media/atomisp/pci/bits.h9
-rw-r--r--drivers/staging/media/atomisp/pci/camera/pipe/interface/ia_css_pipe_binarydesc.h9
-rw-r--r--drivers/staging/media/atomisp/pci/camera/pipe/interface/ia_css_pipe_stagedesc.h9
-rw-r--r--drivers/staging/media/atomisp/pci/camera/pipe/interface/ia_css_pipe_util.h9
-rw-r--r--drivers/staging/media/atomisp/pci/camera/pipe/src/pipe_binarydesc.c9
-rw-r--r--drivers/staging/media/atomisp/pci/camera/pipe/src/pipe_stagedesc.c9
-rw-r--r--drivers/staging/media/atomisp/pci/camera/pipe/src/pipe_util.c9
-rw-r--r--drivers/staging/media/atomisp/pci/camera/util/interface/ia_css_util.h9
-rw-r--r--drivers/staging/media/atomisp/pci/camera/util/src/util.c9
-rw-r--r--drivers/staging/media/atomisp/pci/cell_params.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/csi_rx_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/csi_rx.c9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/csi_rx_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/csi_rx_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/ibuf_ctrl.c9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/ibuf_ctrl_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/isys_dma.c9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/isys_dma_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/isys_irq.c9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/isys_irq_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/isys_irq_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/isys_stream2mmio.c9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/isys_stream2mmio_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/isys_stream2mmio_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/pixelgen_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/pixelgen_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/hrt/PixelGen_SysBlock_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/hrt/ibuf_cntrl_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/hrt/mipi_backend_common_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/hrt/mipi_backend_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/hrt/rx_csi_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/hrt/stream2mmio_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/ibuf_ctrl_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/isys_dma_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/isys_irq_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/isys_stream2mmio_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/pixelgen_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_receiver_2400_common_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_receiver_2400_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_trace.h9
-rw-r--r--drivers/staging/media/atomisp/pci/dma_v2_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/gdc_v2_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/gp_timer_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/gpio_block_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/debug_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/dma_global.h10
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/event_fifo_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/fifo_monitor_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/gdc_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/gp_device_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/gp_timer_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/gpio_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/hmem_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/debug.c9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/debug_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/debug_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/dma.c9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/dma_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/dma_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/event_fifo.c9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/event_fifo_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/event_fifo_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/fifo_monitor.c9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/fifo_monitor_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/fifo_monitor_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gdc.c9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gdc_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gdc_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_device.c9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_device_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_device_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_timer.c9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_timer_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_timer_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gpio_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/hmem.c9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/hmem_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/hmem_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_formatter.c9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_formatter_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_formatter_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_system.c11
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/irq.c9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/irq_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/irq_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/isp.c9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/isp_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/isp_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/mmu.c9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/mmu_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/sp.c9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/sp_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/sp_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/timed_ctrl.c9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/timed_ctrl_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/timed_ctrl_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/vamem_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/vmem.c9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/vmem_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/vmem_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/input_formatter_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/irq_global.h11
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/isp_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/mmu_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/sp_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/timed_ctrl_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/vamem_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/vmem_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/assert_support.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/bitop_support.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/csi_rx.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/debug.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/device_access/device_access.h8
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/dma.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/event_fifo.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/fifo_monitor.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/gdc_device.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/gp_device.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/gp_timer.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/hmem.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/csi_rx_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/debug_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/dma_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/event_fifo_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/fifo_monitor_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/gdc_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/gp_device_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/gp_timer_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/hmem_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/input_formatter_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/irq_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isp_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_dma_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_irq_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_stream2mmio_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/mmu_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/pixelgen_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/sp_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/tag_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/timed_ctrl_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/vamem_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/vmem_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/input_formatter.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/input_system.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/irq.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/isp.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/isys_irq.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/isys_stream2mmio.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/math_support.h14
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/misc_support.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/mmu_device.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/pixelgen.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/platform_support.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/print_support.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/queue.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/resource.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/sp.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/tag.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/timed_ctrl.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/type_support.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/vamem.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/vmem.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_shared/host/queue_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_shared/host/queue_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_shared/host/tag.c9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_shared/host/tag_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_shared/host/tag_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_shared/queue_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_shared/sw_event_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_shared/tag_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_streaming_to_mipi_types_hrt.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hmm/hmm.c102
-rw-r--r--drivers/staging/media/atomisp/pci/hmm/hmm_bo.c20
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_3a.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_acc_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_buffer.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_control.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_device_access.c9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_device_access.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_dvs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_env.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_err.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_event_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_firmware.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_frac.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_frame_format.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_frame_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_host_data.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_input_port.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_irq.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_isp_configs.c9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_isp_configs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_isp_params.c9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_isp_params.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_isp_states.c9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_isp_states.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_metadata.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_mipi.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_mmu.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_mmu_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_morph.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_pipe.h11
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_pipe_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_prbs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_properties.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_shading.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_stream.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_stream_format.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_stream_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_timer.h8
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_version.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_version_data.h9
-rw-r--r--drivers/staging/media/atomisp/pci/if_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/input_formatter_subsystem_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/input_selector_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/input_switch_2400_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/input_system_ctrl_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/input_system_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/irq_controller_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/irq_types_hrt.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/aa/aa_2/ia_css_aa2.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/aa/aa_2/ia_css_aa2.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/aa/aa_2/ia_css_aa2_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/aa/aa_2/ia_css_aa2_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_1.0/ia_css_anr.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_1.0/ia_css_anr.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_1.0/ia_css_anr_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_1.0/ia_css_anr_types.h13
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2_table.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2_table.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bh/bh_2/ia_css_bh.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bh/bh_2/ia_css_bh.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bh/bh_2/ia_css_bh_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bh/bh_2/ia_css_bh_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnlm/ia_css_bnlm.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnlm/ia_css_bnlm.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnlm/ia_css_bnlm_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnlm/ia_css_bnlm_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr_1.0/ia_css_bnr_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_1.0/ia_css_cnr.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_1.0/ia_css_cnr.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_1.0/ia_css_cnr_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_2/ia_css_cnr2.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_2/ia_css_cnr2.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_2/ia_css_cnr2_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_2/ia_css_cnr2_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/conversion/conversion_1.0/ia_css_conversion.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/conversion/conversion_1.0/ia_css_conversion.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/conversion/conversion_1.0/ia_css_conversion_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/conversion/conversion_1.0/ia_css_conversion_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/crop/crop_1.0/ia_css_crop.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/crop/crop_1.0/ia_css_crop.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/crop/crop_1.0/ia_css_crop_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/crop/crop_1.0/ia_css_crop_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/csc/csc_1.0/ia_css_csc.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/csc/csc_1.0/ia_css_csc.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/csc/csc_1.0/ia_css_csc_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/csc/csc_1.0/ia_css_csc_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc2/ia_css_ctc2.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc2/ia_css_ctc2.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc2/ia_css_ctc2_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc2/ia_css_ctc2_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc_table.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc_table.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/de/de_1.0/ia_css_de.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/de/de_1.0/ia_css_de.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/de/de_1.0/ia_css_de_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/de/de_1.0/ia_css_de_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/de/de_2/ia_css_de2.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/de/de_2/ia_css_de2.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/de/de_2/ia_css_de2_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/de/de_2/ia_css_de2_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dp/dp_1.0/ia_css_dp.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dp/dp_1.0/ia_css_dp.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dp/dp_1.0/ia_css_dp_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dp/dp_1.0/ia_css_dp_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dpc2/ia_css_dpc2.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dpc2/ia_css_dpc2.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dpc2/ia_css_dpc2_param.h15
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dpc2/ia_css_dpc2_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.c13
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8_param.h31
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fc/fc_1.0/ia_css_formats.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fc/fc_1.0/ia_css_formats.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fc/fc_1.0/ia_css_formats_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fc/fc_1.0/ia_css_formats_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fixedbds/fixedbds_1.0/ia_css_fixedbds_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fixedbds/fixedbds_1.0/ia_css_fixedbds_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.c15
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc_table.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc_table.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2_table.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2_table.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/common/ia_css_common_io_param.h8
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/common/ia_css_common_io_types.h8
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.c8
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.h8
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io_param.h8
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io_types.h8
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/iterator/iterator_1.0/ia_css_iterator_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5_table.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5_table.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc_table.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc_table.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/norm/norm_1.0/ia_css_norm.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/norm/norm_1.0/ia_css_norm.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/norm/norm_1.0/ia_css_norm_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ob/ob2/ia_css_ob2.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ob/ob2/ia_css_ob2.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ob/ob2/ia_css_ob2_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ob/ob2/ia_css_ob2_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ob/ob_1.0/ia_css_ob.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ob/ob_1.0/ia_css_ob.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ob/ob_1.0/ia_css_ob_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ob/ob_1.0/ia_css_ob_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/output/output_1.0/ia_css_output.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/output/output_1.0/ia_css_output.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/output/output_1.0/ia_css_output_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/output/output_1.0/ia_css_output_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/qplane/qplane_2/ia_css_qplane.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/qplane/qplane_2/ia_css_qplane.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/qplane/qplane_2/ia_css_qplane_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/qplane/qplane_2/ia_css_qplane_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ia_css_raa.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ia_css_raa.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref_state.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/s3a/s3a_1.0/ia_css_s3a_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/s3a/s3a_1.0/ia_css_s3a_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc_param.h11
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sdis/common/ia_css_sdis_common.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sdis/common/ia_css_sdis_common_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.c18
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_1.0/ia_css_sdis_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_2/ia_css_sdis2_types.h11
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/tdf/tdf_1.0/ia_css_tdf.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/tdf/tdf_1.0/ia_css_tdf.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/tdf/tdf_1.0/ia_css_tdf_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/tdf/tdf_1.0/ia_css_tdf_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr3/ia_css_tnr3_types.h8
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr_state.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/uds/uds_1.0/ia_css_uds_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf.host.c14
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/wb/wb_1.0/ia_css_wb.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/wb/wb_1.0/ia_css_wb.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/wb/wb_1.0/ia_css_wb_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/wb/wb_1.0/ia_css_wb_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr_table.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr_table.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_1.0/ia_css_ynr_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_1.0/ia_css_ynr_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_2/ia_css_ynr2.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_2/ia_css_ynr2.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_2/ia_css_ynr2_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_2/ia_css_ynr2_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/modes/interface/input_buf.isp.h14
-rw-r--r--drivers/staging/media/atomisp/pci/isp/modes/interface/isp_const.h165
-rw-r--r--drivers/staging/media/atomisp/pci/isp/modes/interface/isp_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp2400_input_system_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp2400_input_system_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp2400_input_system_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp2400_input_system_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp2400_support.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp2401_input_system_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp2401_input_system_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp2401_input_system_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp_acquisition_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp_capture_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/mamoiada_params.h9
-rw-r--r--drivers/staging/media/atomisp/pci/mmu/isp_mmu.c11
-rw-r--r--drivers/staging/media/atomisp/pci/mmu/sh_mmu_mrfld.c11
-rw-r--r--drivers/staging/media/atomisp/pci/mmu_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/binary/interface/ia_css_binary.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/binary/src/binary.c11
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/bufq/interface/ia_css_bufq.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/bufq/interface/ia_css_bufq_comm.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c13
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/debug/interface/ia_css_debug.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/debug/interface/ia_css_debug_internal.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/debug/interface/ia_css_debug_pipe.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/debug/src/ia_css_debug.c13
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/event/interface/ia_css_event.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/event/src/event.c9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/eventq/interface/ia_css_eventq.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/eventq/src/eventq.c9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/frame/interface/ia_css_frame.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/frame/interface/ia_css_frame_comm.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/frame/src/frame.c40
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/ifmtr/interface/ia_css_ifmtr.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/ifmtr/src/ifmtr.c20
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/inputfifo/interface/ia_css_inputfifo.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/inputfifo/src/inputfifo.c9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isp_param/interface/ia_css_isp_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isp_param/interface/ia_css_isp_param_types.h8
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isp_param/src/isp_param.c9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/interface/ia_css_isys.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/interface/ia_css_isys_comm.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/csi_rx_rmgr.c9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/csi_rx_rmgr.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/ibuf_ctrl_rmgr.c9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/ibuf_ctrl_rmgr.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/isys_dma_rmgr.c9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/isys_dma_rmgr.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/isys_init.c9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/isys_stream2mmio_rmgr.c9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/isys_stream2mmio_rmgr.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/rx.c9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/virtual_isys.c11
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/virtual_isys.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/pipeline/interface/ia_css_pipeline.h10
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/pipeline/interface/ia_css_pipeline_common.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/pipeline/src/pipeline.c11
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/queue/interface/ia_css_queue.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/queue/interface/ia_css_queue_comm.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/queue/src/queue.c9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/queue/src/queue_access.c9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/queue/src/queue_access.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/rmgr/interface/ia_css_rmgr.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/rmgr/interface/ia_css_rmgr_vbuf.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/rmgr/src/rmgr.c9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/rmgr/src/rmgr_vbuf.c9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/spctrl/interface/ia_css_spctrl.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/spctrl/interface/ia_css_spctrl_comm.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/spctrl/src/spctrl.c9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/tagger/interface/ia_css_tagger_common.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/timer/src/timer.c9
-rw-r--r--drivers/staging/media/atomisp/pci/scalar_processor_2400_params.h9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css.c39
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_defs.h21
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_firmware.c9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_firmware.h9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_frac.h15
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_host_data.c9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_hrt.c9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_hrt.h9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_internal.h17
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_legacy.h9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_metrics.c9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_metrics.h9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_mipi.c22
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_mipi.h11
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_mmu.c9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_param_dvs.c9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_param_dvs.h31
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_param_shading.c9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_param_shading.h9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_params.c23
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_params.h9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_params_internal.h9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_properties.c9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_sp.c9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_sp.h9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_stream_format.c9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_stream_format.h9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_struct.h9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_uds.h9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_version.c9
-rw-r--r--drivers/staging/media/atomisp/pci/str2mem_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/streaming_to_mipi_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/system_local.c9
-rw-r--r--drivers/staging/media/atomisp/pci/system_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/timed_controller_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/version.h9
-rw-r--r--drivers/staging/media/av7110/av7110.h4
-rw-r--r--drivers/staging/media/av7110/av7110_ca.c25
-rw-r--r--drivers/staging/media/deprecated/atmel/atmel-isc-base.c2
-rw-r--r--drivers/staging/media/deprecated/atmel/atmel-sama5d2-isc.c2
-rw-r--r--drivers/staging/media/deprecated/atmel/atmel-sama7g5-isc.c2
-rw-r--r--drivers/staging/media/imx/imx-ic-prpencvf.c4
-rw-r--r--drivers/staging/media/imx/imx-media-capture.c2
-rw-r--r--drivers/staging/media/imx/imx-media-csc-scaler.c30
-rw-r--r--drivers/staging/media/imx/imx-media-csi.c14
-rw-r--r--drivers/staging/media/imx/imx-media-dev.c2
-rw-r--r--drivers/staging/media/imx/imx-media-of.c8
-rw-r--r--drivers/staging/media/imx/imx-media-vdic.c54
-rw-r--r--drivers/staging/media/imx/imx6-mipi-csi2.c2
-rw-r--r--drivers/staging/media/ipu3/include/uapi/intel-ipu3.h3
-rw-r--r--drivers/staging/media/ipu3/ipu3-css-params.c6
-rw-r--r--drivers/staging/media/ipu3/ipu3-css.c3
-rw-r--r--drivers/staging/media/ipu3/ipu3-v4l2.c7
-rw-r--r--drivers/staging/media/ipu7/Kconfig19
-rw-r--r--drivers/staging/media/ipu7/Makefile23
-rw-r--r--drivers/staging/media/ipu7/TODO28
-rw-r--r--drivers/staging/media/ipu7/abi/ipu7_fw_boot_abi.h163
-rw-r--r--drivers/staging/media/ipu7/abi/ipu7_fw_common_abi.h175
-rw-r--r--drivers/staging/media/ipu7/abi/ipu7_fw_config_abi.h19
-rw-r--r--drivers/staging/media/ipu7/abi/ipu7_fw_insys_config_abi.h19
-rw-r--r--drivers/staging/media/ipu7/abi/ipu7_fw_isys_abi.h412
-rw-r--r--drivers/staging/media/ipu7/abi/ipu7_fw_msg_abi.h465
-rw-r--r--drivers/staging/media/ipu7/abi/ipu7_fw_psys_config_abi.h24
-rw-r--r--drivers/staging/media/ipu7/abi/ipu7_fw_syscom_abi.h49
-rw-r--r--drivers/staging/media/ipu7/ipu7-boot.c430
-rw-r--r--drivers/staging/media/ipu7/ipu7-boot.h25
-rw-r--r--drivers/staging/media/ipu7/ipu7-bus.c158
-rw-r--r--drivers/staging/media/ipu7/ipu7-bus.h69
-rw-r--r--drivers/staging/media/ipu7/ipu7-buttress-regs.h461
-rw-r--r--drivers/staging/media/ipu7/ipu7-buttress.c1192
-rw-r--r--drivers/staging/media/ipu7/ipu7-buttress.h77
-rw-r--r--drivers/staging/media/ipu7/ipu7-cpd.c276
-rw-r--r--drivers/staging/media/ipu7/ipu7-cpd.h16
-rw-r--r--drivers/staging/media/ipu7/ipu7-dma.c477
-rw-r--r--drivers/staging/media/ipu7/ipu7-dma.h46
-rw-r--r--drivers/staging/media/ipu7/ipu7-fw-isys.c301
-rw-r--r--drivers/staging/media/ipu7/ipu7-fw-isys.h39
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-csi-phy.c1034
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-csi-phy.h16
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-csi2-regs.h1197
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-csi2.c543
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-csi2.h64
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-queue.c828
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-queue.h72
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-subdev.c333
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-subdev.h52
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-video.c1083
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-video.h117
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys.c1166
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys.h140
-rw-r--r--drivers/staging/media/ipu7/ipu7-mmu.c853
-rw-r--r--drivers/staging/media/ipu7/ipu7-mmu.h414
-rw-r--r--drivers/staging/media/ipu7/ipu7-platform-regs.h82
-rw-r--r--drivers/staging/media/ipu7/ipu7-syscom.c78
-rw-r--r--drivers/staging/media/ipu7/ipu7-syscom.h35
-rw-r--r--drivers/staging/media/ipu7/ipu7.c2778
-rw-r--r--drivers/staging/media/ipu7/ipu7.h242
-rw-r--r--drivers/staging/media/max96712/max96712.c60
-rw-r--r--drivers/staging/media/meson/vdec/vdec.c33
-rw-r--r--drivers/staging/media/meson/vdec/vdec.h5
-rw-r--r--drivers/staging/media/omap4iss/Kconfig12
-rw-r--r--drivers/staging/media/omap4iss/Makefile9
-rw-r--r--drivers/staging/media/omap4iss/TODO3
-rw-r--r--drivers/staging/media/omap4iss/iss.c1354
-rw-r--r--drivers/staging/media/omap4iss/iss.h247
-rw-r--r--drivers/staging/media/omap4iss/iss_csi2.c1379
-rw-r--r--drivers/staging/media/omap4iss/iss_csi2.h155
-rw-r--r--drivers/staging/media/omap4iss/iss_csiphy.c277
-rw-r--r--drivers/staging/media/omap4iss/iss_csiphy.h47
-rw-r--r--drivers/staging/media/omap4iss/iss_ipipe.c579
-rw-r--r--drivers/staging/media/omap4iss/iss_ipipe.h63
-rw-r--r--drivers/staging/media/omap4iss/iss_ipipeif.c844
-rw-r--r--drivers/staging/media/omap4iss/iss_ipipeif.h89
-rw-r--r--drivers/staging/media/omap4iss/iss_regs.h899
-rw-r--r--drivers/staging/media/omap4iss/iss_resizer.c884
-rw-r--r--drivers/staging/media/omap4iss/iss_resizer.h72
-rw-r--r--drivers/staging/media/omap4iss/iss_video.c1274
-rw-r--r--drivers/staging/media/omap4iss/iss_video.h203
-rw-r--r--drivers/staging/media/rkvdec/TODO11
-rw-r--r--drivers/staging/media/starfive/camss/stf-camss.c2
-rw-r--r--drivers/staging/media/starfive/camss/stf-isp.c2
-rw-r--r--drivers/staging/media/starfive/camss/stf-video.c2
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.c10
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.h5
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_hw.c19
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_video.c25
-rw-r--r--drivers/staging/media/sunxi/sun6i-isp/sun6i_isp.c2
-rw-r--r--drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_capture.c18
-rw-r--r--drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_params.c8
-rw-r--r--drivers/staging/media/tegra-video/csi.c2
-rw-r--r--drivers/staging/media/tegra-video/tegra20.c4
-rw-r--r--drivers/staging/media/tegra-video/vi.c4
-rw-r--r--drivers/staging/media/tegra-video/vip.c2
-rw-r--r--drivers/staging/most/TODO7
-rw-r--r--drivers/staging/most/dim2/dim2.c2
-rw-r--r--drivers/staging/most/i2c/i2c.c4
-rw-r--r--drivers/staging/most/video/video.c19
-rw-r--r--drivers/staging/nvec/nvec.c2
-rw-r--r--drivers/staging/nvec/nvec_kbd.c2
-rw-r--r--drivers/staging/nvec/nvec_power.c4
-rw-r--r--drivers/staging/nvec/nvec_ps2.c2
-rw-r--r--drivers/staging/octeon/ethernet-tx.c49
-rw-r--r--drivers/staging/octeon/ethernet.c2
-rw-r--r--drivers/staging/octeon/octeon-stubs.h134
-rw-r--r--drivers/staging/olpc_dcon/Kconfig17
-rw-r--r--drivers/staging/olpc_dcon/Makefile5
-rw-r--r--drivers/staging/olpc_dcon/TODO15
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.c807
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.h112
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon_xo_1.c201
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c204
-rw-r--r--drivers/staging/rtl8192e/Kconfig61
-rw-r--r--drivers/staging/rtl8192e/Makefile19
-rw-r--r--drivers/staging/rtl8192e/TODO18
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/Kconfig10
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/Makefile19
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8190P_def.h266
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c198
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.h17
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c79
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.h12
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c1915
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.h34
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c189
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.h52
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_hw.h244
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c1110
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h55
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_phyreg.h773
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_cam.c123
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_cam.h25
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c2016
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.h402
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_dm.c1856
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_dm.h155
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.c84
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.h12
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_ethtool.c37
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_pci.c79
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_pci.h20
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_pm.c89
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_pm.h16
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_ps.c230
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_ps.h31
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_wx.c867
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_wx.h13
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/table.c543
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/table.h27
-rw-r--r--drivers/staging/rtl8192e/rtl819x_BA.h60
-rw-r--r--drivers/staging/rtl8192e/rtl819x_BAProc.c544
-rw-r--r--drivers/staging/rtl8192e/rtl819x_HT.h223
-rw-r--r--drivers/staging/rtl8192e/rtl819x_HTProc.c699
-rw-r--r--drivers/staging/rtl8192e/rtl819x_Qos.h43
-rw-r--r--drivers/staging/rtl8192e/rtl819x_TS.h50
-rw-r--r--drivers/staging/rtl8192e/rtl819x_TSProc.c450
-rw-r--r--drivers/staging/rtl8192e/rtllib.h1799
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_ccmp.c411
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_tkip.c706
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_wep.c242
-rw-r--r--drivers/staging/rtl8192e/rtllib_module.c179
-rw-r--r--drivers/staging/rtl8192e/rtllib_rx.c2564
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac.c2309
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac_wx.c534
-rw-r--r--drivers/staging/rtl8192e/rtllib_tx.c901
-rw-r--r--drivers/staging/rtl8192e/rtllib_wx.c752
-rw-r--r--drivers/staging/rtl8712/Kconfig21
-rw-r--r--drivers/staging/rtl8712/Makefile35
-rw-r--r--drivers/staging/rtl8712/TODO13
-rw-r--r--drivers/staging/rtl8712/basic_types.h28
-rw-r--r--drivers/staging/rtl8712/drv_types.h175
-rw-r--r--drivers/staging/rtl8712/ethernet.h21
-rw-r--r--drivers/staging/rtl8712/hal_init.c401
-rw-r--r--drivers/staging/rtl8712/ieee80211.c415
-rw-r--r--drivers/staging/rtl8712/ieee80211.h165
-rw-r--r--drivers/staging/rtl8712/mlme_linux.c160
-rw-r--r--drivers/staging/rtl8712/mlme_osdep.h31
-rw-r--r--drivers/staging/rtl8712/mp_custom_oid.h287
-rw-r--r--drivers/staging/rtl8712/os_intfs.c482
-rw-r--r--drivers/staging/rtl8712/osdep_intf.h32
-rw-r--r--drivers/staging/rtl8712/osdep_service.h60
-rw-r--r--drivers/staging/rtl8712/recv_linux.c139
-rw-r--r--drivers/staging/rtl8712/recv_osdep.h39
-rw-r--r--drivers/staging/rtl8712/rtl8712_bitdef.h26
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmd.c409
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmd.h231
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmdctrl_bitdef.h95
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmdctrl_regdef.h19
-rw-r--r--drivers/staging/rtl8712/rtl8712_debugctrl_bitdef.h41
-rw-r--r--drivers/staging/rtl8712/rtl8712_debugctrl_regdef.h32
-rw-r--r--drivers/staging/rtl8712/rtl8712_edcasetting_bitdef.h65
-rw-r--r--drivers/staging/rtl8712/rtl8712_edcasetting_regdef.h24
-rw-r--r--drivers/staging/rtl8712/rtl8712_efuse.c563
-rw-r--r--drivers/staging/rtl8712/rtl8712_efuse.h44
-rw-r--r--drivers/staging/rtl8712/rtl8712_event.h86
-rw-r--r--drivers/staging/rtl8712/rtl8712_fifoctrl_bitdef.h131
-rw-r--r--drivers/staging/rtl8712/rtl8712_fifoctrl_regdef.h61
-rw-r--r--drivers/staging/rtl8712/rtl8712_gp_bitdef.h68
-rw-r--r--drivers/staging/rtl8712/rtl8712_gp_regdef.h29
-rw-r--r--drivers/staging/rtl8712/rtl8712_hal.h142
-rw-r--r--drivers/staging/rtl8712/rtl8712_interrupt_bitdef.h44
-rw-r--r--drivers/staging/rtl8712/rtl8712_io.c99
-rw-r--r--drivers/staging/rtl8712/rtl8712_led.c1830
-rw-r--r--drivers/staging/rtl8712/rtl8712_macsetting_bitdef.h31
-rw-r--r--drivers/staging/rtl8712/rtl8712_macsetting_regdef.h20
-rw-r--r--drivers/staging/rtl8712/rtl8712_powersave_bitdef.h39
-rw-r--r--drivers/staging/rtl8712/rtl8712_powersave_regdef.h26
-rw-r--r--drivers/staging/rtl8712/rtl8712_ratectrl_bitdef.h36
-rw-r--r--drivers/staging/rtl8712/rtl8712_ratectrl_regdef.h43
-rw-r--r--drivers/staging/rtl8712/rtl8712_recv.c1075
-rw-r--r--drivers/staging/rtl8712/rtl8712_recv.h145
-rw-r--r--drivers/staging/rtl8712/rtl8712_regdef.h32
-rw-r--r--drivers/staging/rtl8712/rtl8712_security_bitdef.h34
-rw-r--r--drivers/staging/rtl8712/rtl8712_spec.h121
-rw-r--r--drivers/staging/rtl8712/rtl8712_syscfg_bitdef.h163
-rw-r--r--drivers/staging/rtl8712/rtl8712_syscfg_regdef.h42
-rw-r--r--drivers/staging/rtl8712/rtl8712_timectrl_bitdef.h49
-rw-r--r--drivers/staging/rtl8712/rtl8712_timectrl_regdef.h26
-rw-r--r--drivers/staging/rtl8712/rtl8712_wmac_bitdef.h49
-rw-r--r--drivers/staging/rtl8712/rtl8712_wmac_regdef.h36
-rw-r--r--drivers/staging/rtl8712/rtl8712_xmit.c732
-rw-r--r--drivers/staging/rtl8712/rtl8712_xmit.h108
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.c750
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.h750
-rw-r--r--drivers/staging/rtl8712/rtl871x_debug.h130
-rw-r--r--drivers/staging/rtl8712/rtl871x_eeprom.c220
-rw-r--r--drivers/staging/rtl8712/rtl871x_eeprom.h88
-rw-r--r--drivers/staging/rtl8712/rtl871x_event.h109
-rw-r--r--drivers/staging/rtl8712/rtl871x_ht.h33
-rw-r--r--drivers/staging/rtl8712/rtl871x_io.c147
-rw-r--r--drivers/staging/rtl8712/rtl871x_io.h236
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl.h94
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_linux.c2275
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_rtl.c519
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_rtl.h109
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_set.c354
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_set.h45
-rw-r--r--drivers/staging/rtl8712/rtl871x_led.h118
-rw-r--r--drivers/staging/rtl8712/rtl871x_mlme.c1710
-rw-r--r--drivers/staging/rtl8712/rtl871x_mlme.h205
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp.c724
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp.h275
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp_ioctl.c883
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp_ioctl.h328
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp_phy_regdef.h1034
-rw-r--r--drivers/staging/rtl8712/rtl871x_pwrctrl.c234
-rw-r--r--drivers/staging/rtl8712/rtl871x_pwrctrl.h113
-rw-r--r--drivers/staging/rtl8712/rtl871x_recv.c671
-rw-r--r--drivers/staging/rtl8712/rtl871x_recv.h208
-rw-r--r--drivers/staging/rtl8712/rtl871x_rf.h55
-rw-r--r--drivers/staging/rtl8712/rtl871x_security.c1386
-rw-r--r--drivers/staging/rtl8712/rtl871x_security.h218
-rw-r--r--drivers/staging/rtl8712/rtl871x_sta_mgt.c263
-rw-r--r--drivers/staging/rtl8712/rtl871x_wlan_sme.h35
-rw-r--r--drivers/staging/rtl8712/rtl871x_xmit.c1056
-rw-r--r--drivers/staging/rtl8712/rtl871x_xmit.h287
-rw-r--r--drivers/staging/rtl8712/sta_info.h132
-rw-r--r--drivers/staging/rtl8712/usb_halinit.c307
-rw-r--r--drivers/staging/rtl8712/usb_intf.c638
-rw-r--r--drivers/staging/rtl8712/usb_ops.c195
-rw-r--r--drivers/staging/rtl8712/usb_ops.h38
-rw-r--r--drivers/staging/rtl8712/usb_ops_linux.c508
-rw-r--r--drivers/staging/rtl8712/usb_osintf.h35
-rw-r--r--drivers/staging/rtl8712/wifi.h196
-rw-r--r--drivers/staging/rtl8712/wlan_bssdef.h223
-rw-r--r--drivers/staging/rtl8712/xmit_linux.c181
-rw-r--r--drivers/staging/rtl8712/xmit_osdep.h52
-rw-r--r--drivers/staging/rtl8723bs/Kconfig1
-rw-r--r--drivers/staging/rtl8723bs/Makefile3
-rw-r--r--drivers/staging/rtl8723bs/TODO4
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ap.c115
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_btcoex.c12
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_cmd.c22
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_efuse.c280
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ieee80211.c5
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_io.c10
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme.c409
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme_ext.c174
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_pwrctrl.c30
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_recv.c206
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_security.c122
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_sta_mgt.c12
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_wlan_util.c137
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_xmit.c30
-rw-r--r--drivers/staging/rtl8723bs/hal/HalBtc8723b2Ant.c68
-rw-r--r--drivers/staging/rtl8723bs/hal/HalPhyRf.h2
-rw-r--r--drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c10
-rw-r--r--drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.h7
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_btcoex.c22
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_com.c112
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_com_phycfg.c5
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_intf.c174
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_pwr_seq.c2
-rw-r--r--drivers/staging/rtl8723bs/hal/odm.c176
-rw-r--r--drivers/staging/rtl8723bs/hal/odm.h6
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_CfoTracking.c4
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_DIG.c3
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c27
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_dm.c7
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c1095
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c15
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_rxdesc.c4
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c6
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c39
-rw-r--r--drivers/staging/rtl8723bs/hal/sdio_halinit.c73
-rw-r--r--drivers/staging/rtl8723bs/hal/sdio_ops.c2
-rw-r--r--drivers/staging/rtl8723bs/include/basic_types.h43
-rw-r--r--drivers/staging/rtl8723bs/include/drv_types.h13
-rw-r--r--drivers/staging/rtl8723bs/include/hal_com.h11
-rw-r--r--drivers/staging/rtl8723bs/include/hal_intf.h112
-rw-r--r--drivers/staging/rtl8723bs/include/hal_phy_cfg.h5
-rw-r--r--drivers/staging/rtl8723bs/include/hal_pwr_seq.h2
-rw-r--r--drivers/staging/rtl8723bs/include/ioctl_cfg80211.h1
-rw-r--r--drivers/staging/rtl8723bs/include/mlme_osdep.h19
-rw-r--r--drivers/staging/rtl8723bs/include/osdep_intf.h32
-rw-r--r--drivers/staging/rtl8723bs/include/osdep_service.h5
-rw-r--r--drivers/staging/rtl8723bs/include/recv_osdep.h40
-rw-r--r--drivers/staging/rtl8723bs/include/rtl8723b_cmd.h3
-rw-r--r--drivers/staging/rtl8723bs/include/rtl8723b_hal.h4
-rw-r--r--drivers/staging/rtl8723bs/include/rtl8723b_recv.h5
-rw-r--r--drivers/staging/rtl8723bs/include/rtl8723b_xmit.h3
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_efuse.h20
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_io.h92
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_mlme.h3
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_mlme_ext.h7
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_mp.h345
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_recv.h22
-rw-r--r--drivers/staging/rtl8723bs/include/sdio_hal.h2
-rw-r--r--drivers/staging/rtl8723bs/include/sta_info.h2
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c53
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_linux.c1299
-rw-r--r--drivers/staging/rtl8723bs/os_dep/mlme_linux.c179
-rw-r--r--drivers/staging/rtl8723bs/os_dep/os_intfs.c65
-rw-r--r--drivers/staging/rtl8723bs/os_dep/recv_linux.c228
-rw-r--r--drivers/staging/rtl8723bs/os_dep/sdio_intf.c21
-rw-r--r--drivers/staging/rtl8723bs/os_dep/sdio_ops_linux.c1
-rw-r--r--drivers/staging/rtl8723bs/os_dep/wifi_regd.c16
-rw-r--r--drivers/staging/rts5208/Kconfig9
-rw-r--r--drivers/staging/rts5208/Makefile5
-rw-r--r--drivers/staging/rts5208/TODO7
-rw-r--r--drivers/staging/rts5208/general.c25
-rw-r--r--drivers/staging/rts5208/general.h19
-rw-r--r--drivers/staging/rts5208/ms.c4311
-rw-r--r--drivers/staging/rts5208/ms.h214
-rw-r--r--drivers/staging/rts5208/rtsx.c987
-rw-r--r--drivers/staging/rts5208/rtsx.h164
-rw-r--r--drivers/staging/rts5208/rtsx_card.c1151
-rw-r--r--drivers/staging/rts5208/rtsx_card.h1087
-rw-r--r--drivers/staging/rts5208/rtsx_chip.c2161
-rw-r--r--drivers/staging/rts5208/rtsx_chip.h987
-rw-r--r--drivers/staging/rts5208/rtsx_scsi.c3279
-rw-r--r--drivers/staging/rts5208/rtsx_scsi.h131
-rw-r--r--drivers/staging/rts5208/rtsx_sys.h36
-rw-r--r--drivers/staging/rts5208/rtsx_transport.c768
-rw-r--r--drivers/staging/rts5208/rtsx_transport.h57
-rw-r--r--drivers/staging/rts5208/sd.c4717
-rw-r--r--drivers/staging/rts5208/sd.h289
-rw-r--r--drivers/staging/rts5208/spi.c906
-rw-r--r--drivers/staging/rts5208/spi.h52
-rw-r--r--drivers/staging/rts5208/xd.c2145
-rw-r--r--drivers/staging/rts5208/xd.h176
-rw-r--r--drivers/staging/sm750fb/Makefile3
-rw-r--r--drivers/staging/sm750fb/TODO8
-rw-r--r--drivers/staging/sm750fb/ddk750.h3
-rw-r--r--drivers/staging/sm750fb/ddk750_chip.c2
-rw-r--r--drivers/staging/sm750fb/ddk750_display.c1
-rw-r--r--drivers/staging/sm750fb/ddk750_dvi.c62
-rw-r--r--drivers/staging/sm750fb/ddk750_dvi.h57
-rw-r--r--drivers/staging/sm750fb/ddk750_hwi2c.c247
-rw-r--r--drivers/staging/sm750fb/ddk750_hwi2c.h12
-rw-r--r--drivers/staging/sm750fb/ddk750_power.h8
-rw-r--r--drivers/staging/sm750fb/ddk750_sii164.c408
-rw-r--r--drivers/staging/sm750fb/ddk750_sii164.h174
-rw-r--r--drivers/staging/sm750fb/sm750.c58
-rw-r--r--drivers/staging/sm750fb/sm750.h40
-rw-r--r--drivers/staging/sm750fb/sm750_accel.c8
-rw-r--r--drivers/staging/sm750fb/sm750_cursor.c14
-rw-r--r--drivers/staging/sm750fb/sm750_cursor.h12
-rw-r--r--drivers/staging/sm750fb/sm750_hw.c42
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835-ctl.c6
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c5
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c3
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/controls.c14
-rw-r--r--drivers/staging/vc04_services/include/linux/raspberrypi/vchiq.h7
-rw-r--r--drivers/staging/vc04_services/interface/TESTING4
-rw-r--r--drivers/staging/vc04_services/interface/TODO13
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c559
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h3
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c754
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h55
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.h3
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c58
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_ioctl.h2
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_pagelist.h21
-rw-r--r--drivers/staging/vc04_services/vchiq-mmal/mmal-msg.h2
-rw-r--r--drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c9
-rw-r--r--drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.h2
-rw-r--r--drivers/staging/vme_user/vme.c6
-rw-r--r--drivers/staging/vme_user/vme_bridge.h56
-rw-r--r--drivers/staging/vme_user/vme_fake.c2
-rw-r--r--drivers/staging/vme_user/vme_tsi148.c3
-rw-r--r--drivers/staging/vme_user/vme_tsi148.h2
-rw-r--r--drivers/staging/vt6655/Kconfig6
-rw-r--r--drivers/staging/vt6655/Makefile15
-rw-r--r--drivers/staging/vt6655/TODO21
-rw-r--r--drivers/staging/vt6655/baseband.c2257
-rw-r--r--drivers/staging/vt6655/baseband.h72
-rw-r--r--drivers/staging/vt6655/card.c836
-rw-r--r--drivers/staging/vt6655/card.h62
-rw-r--r--drivers/staging/vt6655/channel.c135
-rw-r--r--drivers/staging/vt6655/channel.h17
-rw-r--r--drivers/staging/vt6655/desc.h249
-rw-r--r--drivers/staging/vt6655/device.h292
-rw-r--r--drivers/staging/vt6655/device_cfg.h44
-rw-r--r--drivers/staging/vt6655/device_main.c1868
-rw-r--r--drivers/staging/vt6655/dpc.c145
-rw-r--r--drivers/staging/vt6655/dpc.h21
-rw-r--r--drivers/staging/vt6655/key.c143
-rw-r--r--drivers/staging/vt6655/key.h51
-rw-r--r--drivers/staging/vt6655/mac.c851
-rw-r--r--drivers/staging/vt6655/mac.h580
-rw-r--r--drivers/staging/vt6655/power.c144
-rw-r--r--drivers/staging/vt6655/power.h29
-rw-r--r--drivers/staging/vt6655/rf.c535
-rw-r--r--drivers/staging/vt6655/rf.h71
-rw-r--r--drivers/staging/vt6655/rxtx.c1462
-rw-r--r--drivers/staging/vt6655/rxtx.h184
-rw-r--r--drivers/staging/vt6655/srom.c139
-rw-r--r--drivers/staging/vt6655/srom.h85
-rw-r--r--drivers/staging/vt6655/test9
-rw-r--r--drivers/staging/vt6656/Kconfig7
-rw-r--r--drivers/staging/vt6656/Makefile15
-rw-r--r--drivers/staging/vt6656/TODO18
-rw-r--r--drivers/staging/vt6656/baseband.c455
-rw-r--r--drivers/staging/vt6656/baseband.h75
-rw-r--r--drivers/staging/vt6656/card.c456
-rw-r--r--drivers/staging/vt6656/card.h44
-rw-r--r--drivers/staging/vt6656/channel.c77
-rw-r--r--drivers/staging/vt6656/channel.h21
-rw-r--r--drivers/staging/vt6656/desc.h91
-rw-r--r--drivers/staging/vt6656/device.h386
-rw-r--r--drivers/staging/vt6656/key.c142
-rw-r--r--drivers/staging/vt6656/key.h40
-rw-r--r--drivers/staging/vt6656/mac.c183
-rw-r--r--drivers/staging/vt6656/mac.h373
-rw-r--r--drivers/staging/vt6656/main_usb.c1121
-rw-r--r--drivers/staging/vt6656/power.c112
-rw-r--r--drivers/staging/vt6656/power.h23
-rw-r--r--drivers/staging/vt6656/rf.c443
-rw-r--r--drivers/staging/vt6656/rf.h46
-rw-r--r--drivers/staging/vt6656/rxtx.c730
-rw-r--r--drivers/staging/vt6656/rxtx.h178
-rw-r--r--drivers/staging/vt6656/usbpipe.c506
-rw-r--r--drivers/staging/vt6656/usbpipe.h67
-rw-r--r--drivers/staging/vt6656/wcmd.c185
-rw-r--r--drivers/staging/vt6656/wcmd.h48
-rw-r--r--drivers/target/iscsi/Kconfig4
-rw-r--r--drivers/target/iscsi/iscsi_target.c172
-rw-r--r--drivers/target/iscsi/iscsi_target.h1
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_erl0.c5
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c4
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.c48
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.h2
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c50
-rw-r--r--drivers/target/iscsi/iscsi_target_login.h1
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c27
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c48
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.h3
-rw-r--r--drivers/target/iscsi/iscsi_target_tmr.c3
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c5
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.h1
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c69
-rw-r--r--drivers/target/iscsi/iscsi_target_util.h2
-rw-r--r--drivers/target/loopback/tcm_loop.c5
-rw-r--r--drivers/target/target_core_configfs.c28
-rw-r--r--drivers/target/target_core_device.c97
-rw-r--r--drivers/target/target_core_fabric_lib.c63
-rw-r--r--drivers/target/target_core_iblock.c45
-rw-r--r--drivers/target/target_core_iblock.h1
-rw-r--r--drivers/target/target_core_internal.h4
-rw-r--r--drivers/target/target_core_pr.c28
-rw-r--r--drivers/target/target_core_pscsi.c10
-rw-r--r--drivers/target/target_core_spc.c170
-rw-r--r--drivers/target/target_core_stat.c73
-rw-r--r--drivers/target/target_core_transport.c119
-rw-r--r--drivers/target/target_core_user.c16
-rw-r--r--drivers/tc/tc.c2
-rw-r--r--drivers/tee/Kconfig9
-rw-r--r--drivers/tee/Makefile2
-rw-r--r--drivers/tee/amdtee/core.c16
-rw-r--r--drivers/tee/optee/Kconfig5
-rw-r--r--drivers/tee/optee/Makefile1
-rw-r--r--drivers/tee/optee/core.c9
-rw-r--r--drivers/tee/optee/ffa_abi.c189
-rw-r--r--drivers/tee/optee/optee_ffa.h27
-rw-r--r--drivers/tee/optee/optee_msg.h84
-rw-r--r--drivers/tee/optee/optee_private.h17
-rw-r--r--drivers/tee/optee/optee_smc.h37
-rw-r--r--drivers/tee/optee/protmem.c335
-rw-r--r--drivers/tee/optee/smc_abi.c151
-rw-r--r--drivers/tee/optee/supp.c35
-rw-r--r--drivers/tee/qcomtee/Kconfig12
-rw-r--r--drivers/tee/qcomtee/Makefile9
-rw-r--r--drivers/tee/qcomtee/async.c182
-rw-r--r--drivers/tee/qcomtee/call.c820
-rw-r--r--drivers/tee/qcomtee/core.c915
-rw-r--r--drivers/tee/qcomtee/mem_obj.c169
-rw-r--r--drivers/tee/qcomtee/primordial_obj.c113
-rw-r--r--drivers/tee/qcomtee/qcomtee.h185
-rw-r--r--drivers/tee/qcomtee/qcomtee_msg.h304
-rw-r--r--drivers/tee/qcomtee/qcomtee_object.h316
-rw-r--r--drivers/tee/qcomtee/shm.c150
-rw-r--r--drivers/tee/qcomtee/user_obj.c692
-rw-r--r--drivers/tee/tee_core.c353
-rw-r--r--drivers/tee/tee_heap.c500
-rw-r--r--drivers/tee/tee_private.h20
-rw-r--r--drivers/tee/tee_shm.c179
-rw-r--r--drivers/thermal/Kconfig20
-rw-r--r--drivers/thermal/Makefile4
-rw-r--r--drivers/thermal/airoha_thermal.c489
-rw-r--r--drivers/thermal/amlogic_thermal.c18
-rw-r--r--drivers/thermal/armada_thermal.c4
-rw-r--r--drivers/thermal/broadcom/bcm2835_thermal.c4
-rw-r--r--drivers/thermal/broadcom/brcmstb_thermal.c11
-rw-r--r--drivers/thermal/broadcom/ns-thermal.c2
-rw-r--r--drivers/thermal/cpufreq_cooling.c2
-rw-r--r--drivers/thermal/da9062-thermal.c8
-rw-r--r--drivers/thermal/dove_thermal.c4
-rw-r--r--drivers/thermal/gov_bang_bang.c70
-rw-r--r--drivers/thermal/gov_fair_share.c20
-rw-r--r--drivers/thermal/gov_power_allocator.c117
-rw-r--r--drivers/thermal/gov_step_wise.c47
-rw-r--r--drivers/thermal/gov_user_space.c12
-rw-r--r--drivers/thermal/hisi_thermal.c8
-rw-r--r--drivers/thermal/imx8mm_thermal.c2
-rw-r--r--drivers/thermal/imx_thermal.c4
-rw-r--r--drivers/thermal/intel/Kconfig4
-rw-r--r--drivers/thermal/intel/int340x_thermal/Kconfig5
-rw-r--r--drivers/thermal/intel/int340x_thermal/Makefile2
-rw-r--r--drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c3
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3400_thermal.c25
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3401_thermal.c2
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3402_thermal.c5
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3403_thermal.c4
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3406_thermal.c2
-rw-r--r--drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c7
-rw-r--r--drivers/thermal/intel/int340x_thermal/platform_temperature_control.c313
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device.c42
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device.h11
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c17
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_mbox.c6
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_power_floor.c12
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c36
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_soc_slider.c284
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_wt_hint.c10
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_wt_req.c2
-rw-r--r--drivers/thermal/intel/intel_hfi.c14
-rw-r--r--drivers/thermal/intel/intel_powerclamp.c6
-rw-r--r--drivers/thermal/intel/intel_quark_dts_thermal.c2
-rw-r--r--drivers/thermal/intel/intel_soc_dts_iosf.c2
-rw-r--r--drivers/thermal/intel/intel_tcc.c12
-rw-r--r--drivers/thermal/intel/intel_tcc_cooling.c7
-rw-r--r--drivers/thermal/intel/therm_throt.c10
-rw-r--r--drivers/thermal/intel/x86_pkg_temp_thermal.c4
-rw-r--r--drivers/thermal/k3_bandgap.c2
-rw-r--r--drivers/thermal/k3_j72xx_bandgap.c12
-rw-r--r--drivers/thermal/kirkwood_thermal.c4
-rw-r--r--drivers/thermal/loongson2_thermal.c15
-rw-r--r--drivers/thermal/mediatek/lvts_thermal.c203
-rw-r--r--drivers/thermal/pcie_cooling.c80
-rw-r--r--drivers/thermal/qcom/Kconfig3
-rw-r--r--drivers/thermal/qcom/lmh.c13
-rw-r--r--drivers/thermal/qcom/qcom-spmi-adc-tm5.c7
-rw-r--r--drivers/thermal/qcom/qcom-spmi-temp-alarm.c597
-rw-r--r--drivers/thermal/qcom/tsens-v1.c81
-rw-r--r--drivers/thermal/qcom/tsens-v2.c178
-rw-r--r--drivers/thermal/qcom/tsens.c36
-rw-r--r--drivers/thermal/qcom/tsens.h9
-rw-r--r--drivers/thermal/qoriq_thermal.c47
-rw-r--r--drivers/thermal/renesas/Kconfig21
-rw-r--r--drivers/thermal/renesas/Makefile2
-rw-r--r--drivers/thermal/renesas/rcar_gen3_thermal.c172
-rw-r--r--drivers/thermal/renesas/rcar_thermal.c4
-rw-r--r--drivers/thermal/renesas/rzg2l_thermal.c2
-rw-r--r--drivers/thermal/renesas/rzg3e_thermal.c547
-rw-r--r--drivers/thermal/renesas/rzg3s_thermal.c272
-rw-r--r--drivers/thermal/rockchip_thermal.c304
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c2
-rw-r--r--drivers/thermal/spear_thermal.c4
-rw-r--r--drivers/thermal/sprd_thermal.c2
-rw-r--r--drivers/thermal/st/st_thermal.c2
-rw-r--r--drivers/thermal/st/st_thermal_memmap.c2
-rw-r--r--drivers/thermal/st/stm_thermal.c2
-rw-r--r--drivers/thermal/sun8i_thermal.c11
-rw-r--r--drivers/thermal/tegra/Makefile1
-rw-r--r--drivers/thermal/tegra/soctherm-fuse.c18
-rw-r--r--drivers/thermal/tegra/soctherm.c33
-rw-r--r--drivers/thermal/tegra/soctherm.h11
-rw-r--r--drivers/thermal/tegra/tegra-bpmp-thermal.c2
-rw-r--r--drivers/thermal/tegra/tegra114-soctherm.c209
-rw-r--r--drivers/thermal/tegra/tegra124-soctherm.c4
-rw-r--r--drivers/thermal/tegra/tegra132-soctherm.c4
-rw-r--r--drivers/thermal/tegra/tegra210-soctherm.c4
-rw-r--r--drivers/thermal/testing/command.c30
-rw-r--r--drivers/thermal/testing/zone.c60
-rw-r--r--drivers/thermal/thermal-generic-adc.c55
-rw-r--r--drivers/thermal/thermal_core.c924
-rw-r--r--drivers/thermal/thermal_core.h44
-rw-r--r--drivers/thermal/thermal_debugfs.c54
-rw-r--r--drivers/thermal/thermal_helpers.c46
-rw-r--r--drivers/thermal/thermal_hwmon.c9
-rw-r--r--drivers/thermal/thermal_netlink.c253
-rw-r--r--drivers/thermal/thermal_netlink.h34
-rw-r--r--drivers/thermal/thermal_of.c127
-rw-r--r--drivers/thermal/thermal_sysfs.c141
-rw-r--r--drivers/thermal/thermal_thresholds.c244
-rw-r--r--drivers/thermal/thermal_thresholds.h19
-rw-r--r--drivers/thermal/thermal_trip.c48
-rw-r--r--drivers/thermal/ti-soc-thermal/dra752-bandgap.h4
-rw-r--r--drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h8
-rw-r--r--drivers/thermal/ti-soc-thermal/omap5xxx-bandgap.h4
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-bandgap.c2
-rw-r--r--drivers/thermal/uniphier_thermal.c2
-rw-r--r--drivers/thunderbolt/Kconfig4
-rw-r--r--drivers/thunderbolt/acpi.c28
-rw-r--r--drivers/thunderbolt/cap.c49
-rw-r--r--drivers/thunderbolt/clx.c12
-rw-r--r--drivers/thunderbolt/ctl.c51
-rw-r--r--drivers/thunderbolt/ctl.h2
-rw-r--r--drivers/thunderbolt/debugfs.c574
-rw-r--r--drivers/thunderbolt/dma_port.c21
-rw-r--r--drivers/thunderbolt/domain.c77
-rw-r--r--drivers/thunderbolt/eeprom.c84
-rw-r--r--drivers/thunderbolt/icm.c36
-rw-r--r--drivers/thunderbolt/lc.c58
-rw-r--r--drivers/thunderbolt/nhi.c40
-rw-r--r--drivers/thunderbolt/nhi.h4
-rw-r--r--drivers/thunderbolt/nhi_regs.h6
-rw-r--r--drivers/thunderbolt/nvm.c44
-rw-r--r--drivers/thunderbolt/path.c18
-rw-r--r--drivers/thunderbolt/property.c38
-rw-r--r--drivers/thunderbolt/retimer.c43
-rw-r--r--drivers/thunderbolt/sb_regs.h32
-rw-r--r--drivers/thunderbolt/switch.c153
-rw-r--r--drivers/thunderbolt/tb.c355
-rw-r--r--drivers/thunderbolt/tb.h82
-rw-r--r--drivers/thunderbolt/tb_msgs.h1
-rw-r--r--drivers/thunderbolt/test.c90
-rw-r--r--drivers/thunderbolt/tmu.c16
-rw-r--r--drivers/thunderbolt/tunnel.c583
-rw-r--r--drivers/thunderbolt/tunnel.h87
-rw-r--r--drivers/thunderbolt/usb4.c392
-rw-r--r--drivers/thunderbolt/usb4_port.c63
-rw-r--r--drivers/thunderbolt/xdomain.c55
-rw-r--r--drivers/tty/Kconfig25
-rw-r--r--drivers/tty/amiserial.c2
-rw-r--r--drivers/tty/goldfish.c2
-rw-r--r--drivers/tty/hvc/hvc_console.c8
-rw-r--r--drivers/tty/hvc/hvc_iucv.c7
-rw-r--r--drivers/tty/hvc/hvc_opal.c2
-rw-r--r--drivers/tty/ipwireless/hardware.c6
-rw-r--r--drivers/tty/mips_ejtag_fdc.c11
-rw-r--r--drivers/tty/moxa.c253
-rw-r--r--drivers/tty/mxser.c263
-rw-r--r--drivers/tty/n_gsm.c86
-rw-r--r--drivers/tty/n_tty.c212
-rw-r--r--drivers/tty/pty.c2
-rw-r--r--drivers/tty/serdev/core.c30
-rw-r--r--drivers/tty/serdev/serdev-ttyport.c9
-rw-r--r--drivers/tty/serial/8250/8250.h25
-rw-r--r--drivers/tty/serial/8250/8250_aspeed_vuart.c7
-rw-r--r--drivers/tty/serial/8250/8250_bcm2835aux.c6
-rw-r--r--drivers/tty/serial/8250/8250_bcm7271.c7
-rw-r--r--drivers/tty/serial/8250/8250_ce4100.c93
-rw-r--r--drivers/tty/serial/8250/8250_core.c397
-rw-r--r--drivers/tty/serial/8250/8250_dma.c16
-rw-r--r--drivers/tty/serial/8250/8250_dw.c114
-rw-r--r--drivers/tty/serial/8250/8250_early.c6
-rw-r--r--drivers/tty/serial/8250/8250_em.c6
-rw-r--r--drivers/tty/serial/8250/8250_exar.c136
-rw-r--r--drivers/tty/serial/8250/8250_fintek.c16
-rw-r--r--drivers/tty/serial/8250/8250_fsl.c10
-rw-r--r--drivers/tty/serial/8250/8250_ingenic.c10
-rw-r--r--drivers/tty/serial/8250/8250_ioc3.c8
-rw-r--r--drivers/tty/serial/8250/8250_lpc18xx.c4
-rw-r--r--drivers/tty/serial/8250/8250_men_mcb.c2
-rw-r--r--drivers/tty/serial/8250/8250_mtk.c6
-rw-r--r--drivers/tty/serial/8250/8250_ni.c450
-rw-r--r--drivers/tty/serial/8250/8250_of.c18
-rw-r--r--drivers/tty/serial/8250/8250_omap.c278
-rw-r--r--drivers/tty/serial/8250/8250_pci.c170
-rw-r--r--drivers/tty/serial/8250/8250_pci1xxxx.c72
-rw-r--r--drivers/tty/serial/8250/8250_pcilib.c15
-rw-r--r--drivers/tty/serial/8250/8250_pcilib.h2
-rw-r--r--drivers/tty/serial/8250/8250_platform.c98
-rw-r--r--drivers/tty/serial/8250/8250_pnp.c10
-rw-r--r--drivers/tty/serial/8250/8250_port.c1131
-rw-r--r--drivers/tty/serial/8250/8250_pxa.c2
-rw-r--r--drivers/tty/serial/8250/8250_rsa.c118
-rw-r--r--drivers/tty/serial/8250/8250_rt288x.c4
-rw-r--r--drivers/tty/serial/8250/8250_tegra.c2
-rw-r--r--drivers/tty/serial/8250/8250_uniphier.c6
-rw-r--r--drivers/tty/serial/8250/Kconfig18
-rw-r--r--drivers/tty/serial/8250/Makefile4
-rw-r--r--drivers/tty/serial/Kconfig65
-rw-r--r--drivers/tty/serial/Makefile3
-rw-r--r--drivers/tty/serial/altera_jtaguart.c16
-rw-r--r--drivers/tty/serial/altera_uart.c17
-rw-r--r--drivers/tty/serial/amba-pl010.c2
-rw-r--r--drivers/tty/serial/amba-pl011.c293
-rw-r--r--drivers/tty/serial/ar933x_uart.c2
-rw-r--r--drivers/tty/serial/atmel_serial.c42
-rw-r--r--drivers/tty/serial/bcm63xx_uart.c2
-rw-r--r--drivers/tty/serial/clps711x.c2
-rw-r--r--drivers/tty/serial/cpm_uart.c4
-rw-r--r--drivers/tty/serial/digicolor-usart.c2
-rw-r--r--drivers/tty/serial/earlycon.c23
-rw-r--r--drivers/tty/serial/esp32_acm.c2
-rw-r--r--drivers/tty/serial/esp32_uart.c2
-rw-r--r--drivers/tty/serial/fsl_linflexuart.c2
-rw-r--r--drivers/tty/serial/fsl_lpuart.c540
-rw-r--r--drivers/tty/serial/icom.c9
-rw-r--r--drivers/tty/serial/imx.c170
-rw-r--r--drivers/tty/serial/ip22zilog.c352
-rw-r--r--drivers/tty/serial/jsm/jsm_tty.c1
-rw-r--r--drivers/tty/serial/kgdb_nmi.c381
-rw-r--r--drivers/tty/serial/kgdboc.c8
-rw-r--r--drivers/tty/serial/lantiq.c6
-rw-r--r--drivers/tty/serial/liteuart.c8
-rw-r--r--drivers/tty/serial/lpc32xx_hs.c2
-rw-r--r--drivers/tty/serial/ma35d1_serial.c4
-rw-r--r--drivers/tty/serial/max3100.c11
-rw-r--r--drivers/tty/serial/max310x.c33
-rw-r--r--drivers/tty/serial/mcf.c2
-rw-r--r--drivers/tty/serial/men_z135_uart.c2
-rw-r--r--drivers/tty/serial/meson_uart.c2
-rw-r--r--drivers/tty/serial/milbeaut_usio.c7
-rw-r--r--drivers/tty/serial/mpc52xx_uart.c5
-rw-r--r--drivers/tty/serial/msm_serial.c10
-rw-r--r--drivers/tty/serial/mux.c2
-rw-r--r--drivers/tty/serial/mvebu-uart.c10
-rw-r--r--drivers/tty/serial/mxs-auart.c2
-rw-r--r--drivers/tty/serial/omap-serial.c2
-rw-r--r--drivers/tty/serial/owl-uart.c2
-rw-r--r--drivers/tty/serial/pch_uart.c3
-rw-r--r--drivers/tty/serial/pic32_uart.c2
-rw-r--r--drivers/tty/serial/pmac_zilog.c2
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c403
-rw-r--r--drivers/tty/serial/rda-uart.c2
-rw-r--r--drivers/tty/serial/rp2.c12
-rw-r--r--drivers/tty/serial/rsci.c480
-rw-r--r--drivers/tty/serial/rsci.h10
-rw-r--r--drivers/tty/serial/sa1100.c8
-rw-r--r--drivers/tty/serial/samsung_tty.c21
-rw-r--r--drivers/tty/serial/sc16is7xx.c62
-rw-r--r--drivers/tty/serial/sc16is7xx_i2c.c2
-rw-r--r--drivers/tty/serial/sc16is7xx_spi.c2
-rw-r--r--drivers/tty/serial/sccnxp.c6
-rw-r--r--drivers/tty/serial/serial-tegra.c2
-rw-r--r--drivers/tty/serial/serial_base_bus.c4
-rw-r--r--drivers/tty/serial/serial_core.c601
-rw-r--r--drivers/tty/serial/serial_mctrl_gpio.c62
-rw-r--r--drivers/tty/serial/serial_mctrl_gpio.h27
-rw-r--r--drivers/tty/serial/serial_port.c12
-rw-r--r--drivers/tty/serial/serial_txx9.c2
-rw-r--r--drivers/tty/serial/sh-sci-common.h175
-rw-r--r--drivers/tty/serial/sh-sci.c936
-rw-r--r--drivers/tty/serial/sh-sci.h2
-rw-r--r--drivers/tty/serial/sifive.c96
-rw-r--r--drivers/tty/serial/sprd_serial.c43
-rw-r--r--drivers/tty/serial/st-asc.c2
-rw-r--r--drivers/tty/serial/stm32-usart.c12
-rw-r--r--drivers/tty/serial/sunhv.c2
-rw-r--r--drivers/tty/serial/sunplus-uart.c2
-rw-r--r--drivers/tty/serial/sunsab.c2
-rw-r--r--drivers/tty/serial/sunsu.c180
-rw-r--r--drivers/tty/serial/sunzilog.c2
-rw-r--r--drivers/tty/serial/tegra-tcu.c2
-rw-r--r--drivers/tty/serial/tegra-utc.c625
-rw-r--r--drivers/tty/serial/timbuart.c2
-rw-r--r--drivers/tty/serial/uartlite.c27
-rw-r--r--drivers/tty/serial/ucc_uart.c4
-rw-r--r--drivers/tty/serial/xilinx_uartps.c28
-rw-r--r--drivers/tty/synclink_gt.c12
-rw-r--r--drivers/tty/sysrq.c65
-rw-r--r--drivers/tty/tty_audit.c10
-rw-r--r--drivers/tty/tty_buffer.c3
-rw-r--r--drivers/tty/tty_io.c111
-rw-r--r--drivers/tty/tty_ioctl.c50
-rw-r--r--drivers/tty/tty_ldsem.c17
-rw-r--r--drivers/tty/tty_port.c195
-rw-r--r--drivers/tty/vcc.c10
-rw-r--r--drivers/tty/vt/.gitignore3
-rw-r--r--drivers/tty/vt/Makefile34
-rw-r--r--drivers/tty/vt/consolemap.c118
-rw-r--r--drivers/tty/vt/defkeymap.c_shipped112
-rwxr-xr-xdrivers/tty/vt/gen_ucs_fallback_table.py360
-rwxr-xr-xdrivers/tty/vt/gen_ucs_recompose_table.py257
-rwxr-xr-xdrivers/tty/vt/gen_ucs_width_table.py307
-rw-r--r--drivers/tty/vt/keyboard.c41
-rw-r--r--drivers/tty/vt/selection.c64
-rw-r--r--drivers/tty/vt/ucs.c251
-rw-r--r--drivers/tty/vt/ucs_fallback_table.h_shipped3346
-rw-r--r--drivers/tty/vt/ucs_recompose_table.h_shipped102
-rw-r--r--drivers/tty/vt/ucs_width_table.h_shipped453
-rw-r--r--drivers/tty/vt/vc_screen.c74
-rw-r--r--drivers/tty/vt/vt.c498
-rw-r--r--drivers/tty/vt/vt_ioctl.c212
-rw-r--r--drivers/ufs/core/ufs-mcq.c63
-rw-r--r--drivers/ufs/core/ufs-sysfs.c409
-rw-r--r--drivers/ufs/core/ufs-sysfs.h1
-rw-r--r--drivers/ufs/core/ufs_bsg.c10
-rw-r--r--drivers/ufs/core/ufs_trace.h136
-rw-r--r--drivers/ufs/core/ufs_trace_types.h24
-rw-r--r--drivers/ufs/core/ufshcd-crypto.c33
-rw-r--r--drivers/ufs/core/ufshcd-priv.h29
-rw-r--r--drivers/ufs/core/ufshcd.c1644
-rw-r--r--drivers/ufs/host/Kconfig12
-rw-r--r--drivers/ufs/host/Makefile1
-rw-r--r--drivers/ufs/host/cdns-pltfrm.c6
-rw-r--r--drivers/ufs/host/tc-dwc-g210-pci.c8
-rw-r--r--drivers/ufs/host/tc-dwc-g210-pltfrm.c7
-rw-r--r--drivers/ufs/host/ti-j721e-ufs.c2
-rw-r--r--drivers/ufs/host/ufs-exynos.c251
-rw-r--r--drivers/ufs/host/ufs-exynos.h10
-rw-r--r--drivers/ufs/host/ufs-hisi.c12
-rw-r--r--drivers/ufs/host/ufs-mediatek.c710
-rw-r--r--drivers/ufs/host/ufs-mediatek.h33
-rw-r--r--drivers/ufs/host/ufs-qcom.c951
-rw-r--r--drivers/ufs/host/ufs-qcom.h102
-rw-r--r--drivers/ufs/host/ufs-renesas.c738
-rw-r--r--drivers/ufs/host/ufs-rockchip.c354
-rw-r--r--drivers/ufs/host/ufs-rockchip.h90
-rw-r--r--drivers/ufs/host/ufs-sprd.c13
-rw-r--r--drivers/ufs/host/ufshcd-pci.c46
-rw-r--r--drivers/ufs/host/ufshcd-pltfrm.c75
-rw-r--r--drivers/ufs/host/ufshcd-pltfrm.h2
-rw-r--r--drivers/uio/Kconfig2
-rw-r--r--drivers/uio/uio.c2
-rw-r--r--drivers/uio/uio_aec.c2
-rw-r--r--drivers/uio/uio_cif.c2
-rw-r--r--drivers/uio/uio_dmem_genirq.c30
-rw-r--r--drivers/uio/uio_fsl_elbc_gpcm.c2
-rw-r--r--drivers/uio/uio_hv_generic.c171
-rw-r--r--drivers/uio/uio_netx.c2
-rw-r--r--drivers/uio/uio_pdrv_genirq.c29
-rw-r--r--drivers/uio/uio_sercos3.c2
-rw-r--r--drivers/usb/atm/cxacru.c125
-rw-r--r--drivers/usb/atm/speedtch.c18
-rw-r--r--drivers/usb/atm/ueagle-atm.c6
-rw-r--r--drivers/usb/atm/usbatm.c8
-rw-r--r--drivers/usb/c67x00/c67x00-drv.c2
-rw-r--r--drivers/usb/cdns3/cdns3-gadget.c6
-rw-r--r--drivers/usb/cdns3/cdns3-imx.c2
-rw-r--r--drivers/usb/cdns3/cdns3-pci-wrap.c4
-rw-r--r--drivers/usb/cdns3/cdns3-plat.c4
-rw-r--r--drivers/usb/cdns3/cdns3-starfive.c2
-rw-r--r--drivers/usb/cdns3/cdns3-ti.c109
-rw-r--r--drivers/usb/cdns3/cdns3-trace.h61
-rw-r--r--drivers/usb/cdns3/cdnsp-debug.h5
-rw-r--r--drivers/usb/cdns3/cdnsp-ep0.c18
-rw-r--r--drivers/usb/cdns3/cdnsp-gadget.c75
-rw-r--r--drivers/usb/cdns3/cdnsp-gadget.h16
-rw-r--r--drivers/usb/cdns3/cdnsp-pci.c41
-rw-r--r--drivers/usb/cdns3/cdnsp-ring.c10
-rw-r--r--drivers/usb/cdns3/cdnsp-trace.h25
-rw-r--r--drivers/usb/cdns3/core.c9
-rw-r--r--drivers/usb/cdns3/core.h5
-rw-r--r--drivers/usb/cdns3/host.c11
-rw-r--r--drivers/usb/chipidea/ci.h2
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c117
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.h1
-rw-r--r--drivers/usb/chipidea/ci_hdrc_msm.c2
-rw-r--r--drivers/usb/chipidea/ci_hdrc_npcm.c2
-rw-r--r--drivers/usb/chipidea/ci_hdrc_tegra.c2
-rw-r--r--drivers/usb/chipidea/ci_hdrc_usb2.c2
-rw-r--r--drivers/usb/chipidea/core.c6
-rw-r--r--drivers/usb/chipidea/host.c13
-rw-r--r--drivers/usb/chipidea/otg_fsm.c3
-rw-r--r--drivers/usb/chipidea/udc.c190
-rw-r--r--drivers/usb/chipidea/udc.h2
-rw-r--r--drivers/usb/chipidea/usbmisc_imx.c228
-rw-r--r--drivers/usb/class/cdc-acm.c46
-rw-r--r--drivers/usb/class/cdc-wdm.c44
-rw-r--r--drivers/usb/class/usblp.c45
-rw-r--r--drivers/usb/class/usbtmc.c80
-rw-r--r--drivers/usb/common/common.c17
-rw-r--r--drivers/usb/common/usb-conn-gpio.c32
-rw-r--r--drivers/usb/core/Makefile1
-rw-r--r--drivers/usb/core/config.c84
-rw-r--r--drivers/usb/core/devio.c5
-rw-r--r--drivers/usb/core/driver.c93
-rw-r--r--drivers/usb/core/endpoint.c11
-rw-r--r--drivers/usb/core/generic.c14
-rw-r--r--drivers/usb/core/hcd-pci.c17
-rw-r--r--drivers/usb/core/hcd.c79
-rw-r--r--drivers/usb/core/hub.c200
-rw-r--r--drivers/usb/core/hub.h1
-rw-r--r--drivers/usb/core/ledtrig-usbport.c3
-rw-r--r--drivers/usb/core/offload.c136
-rw-r--r--drivers/usb/core/port.c21
-rw-r--r--drivers/usb/core/quirks.c31
-rw-r--r--drivers/usb/core/sysfs.c14
-rw-r--r--drivers/usb/core/urb.c47
-rw-r--r--drivers/usb/core/usb-acpi.c13
-rw-r--r--drivers/usb/core/usb.c145
-rw-r--r--drivers/usb/core/usb.h2
-rw-r--r--drivers/usb/dwc2/Kconfig2
-rw-r--r--drivers/usb/dwc2/core.c1
-rw-r--r--drivers/usb/dwc2/core.h23
-rw-r--r--drivers/usb/dwc2/gadget.c163
-rw-r--r--drivers/usb/dwc2/hcd.c122
-rw-r--r--drivers/usb/dwc2/hcd_queue.c9
-rw-r--r--drivers/usb/dwc2/params.c29
-rw-r--r--drivers/usb/dwc2/platform.c43
-rw-r--r--drivers/usb/dwc3/Kconfig13
-rw-r--r--drivers/usb/dwc3/Makefile2
-rw-r--r--drivers/usb/dwc3/core.c389
-rw-r--r--drivers/usb/dwc3/core.h55
-rw-r--r--drivers/usb/dwc3/debug.h18
-rw-r--r--drivers/usb/dwc3/debugfs.c12
-rw-r--r--drivers/usb/dwc3/drd.c5
-rw-r--r--drivers/usb/dwc3/dwc3-am62.c96
-rw-r--r--drivers/usb/dwc3/dwc3-exynos.c29
-rw-r--r--drivers/usb/dwc3/dwc3-generic-plat.c166
-rw-r--r--drivers/usb/dwc3/dwc3-imx8mp.c39
-rw-r--r--drivers/usb/dwc3/dwc3-keystone.c2
-rw-r--r--drivers/usb/dwc3/dwc3-meson-g12a.c5
-rw-r--r--drivers/usb/dwc3/dwc3-octeon.c2
-rw-r--r--drivers/usb/dwc3/dwc3-of-simple.c2
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c17
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c12
-rw-r--r--drivers/usb/dwc3/dwc3-qcom-legacy.c935
-rw-r--r--drivers/usb/dwc3/dwc3-qcom.c425
-rw-r--r--drivers/usb/dwc3/dwc3-rtk.c2
-rw-r--r--drivers/usb/dwc3/dwc3-st.c10
-rw-r--r--drivers/usb/dwc3/dwc3-xilinx.c46
-rw-r--r--drivers/usb/dwc3/ep0.c24
-rw-r--r--drivers/usb/dwc3/gadget.c502
-rw-r--r--drivers/usb/dwc3/glue.h36
-rw-r--r--drivers/usb/dwc3/host.c5
-rw-r--r--drivers/usb/dwc3/trace.h17
-rw-r--r--drivers/usb/early/xhci-dbc.c4
-rw-r--r--drivers/usb/fotg210/fotg210-core.c7
-rw-r--r--drivers/usb/fotg210/fotg210-hcd.c3
-rw-r--r--drivers/usb/gadget/Kconfig4
-rw-r--r--drivers/usb/gadget/composite.c90
-rw-r--r--drivers/usb/gadget/config.c53
-rw-r--r--drivers/usb/gadget/configfs.c14
-rw-r--r--drivers/usb/gadget/epautoconf.c2
-rw-r--r--drivers/usb/gadget/function/Makefile4
-rw-r--r--drivers/usb/gadget/function/f_acm.c42
-rw-r--r--drivers/usb/gadget/function/f_ecm.c59
-rw-r--r--drivers/usb/gadget/function/f_fs.c35
-rw-r--r--drivers/usb/gadget/function/f_hid.c161
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c12
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.h2
-rw-r--r--drivers/usb/gadget/function/f_midi.c27
-rw-r--r--drivers/usb/gadget/function/f_midi2.c23
-rw-r--r--drivers/usb/gadget/function/f_ncm.c87
-rw-r--r--drivers/usb/gadget/function/f_rndis.c85
-rw-r--r--drivers/usb/gadget/function/f_serial.c7
-rw-r--r--drivers/usb/gadget/function/f_tcm.c723
-rw-r--r--drivers/usb/gadget/function/f_uac1.c2
-rw-r--r--drivers/usb/gadget/function/f_uac2.c3
-rw-r--r--drivers/usb/gadget/function/f_uvc.c4
-rw-r--r--drivers/usb/gadget/function/storage_common.h2
-rw-r--r--drivers/usb/gadget/function/tcm.h28
-rw-r--r--drivers/usb/gadget/function/u_ether.c4
-rw-r--r--drivers/usb/gadget/function/u_hid.h2
-rw-r--r--drivers/usb/gadget/function/u_serial.c68
-rw-r--r--drivers/usb/gadget/function/uvc.h18
-rw-r--r--drivers/usb/gadget/function/uvc_configfs.c358
-rw-r--r--drivers/usb/gadget/function/uvc_configfs.h20
-rw-r--r--drivers/usb/gadget/function/uvc_queue.c28
-rw-r--r--drivers/usb/gadget/function/uvc_queue.h2
-rw-r--r--drivers/usb/gadget/function/uvc_trace.c11
-rw-r--r--drivers/usb/gadget/function/uvc_trace.h60
-rw-r--r--drivers/usb/gadget/function/uvc_v4l2.c74
-rw-r--r--drivers/usb/gadget/function/uvc_video.c268
-rw-r--r--drivers/usb/gadget/legacy/g_ffs.c2
-rw-r--r--drivers/usb/gadget/legacy/hid.c2
-rw-r--r--drivers/usb/gadget/legacy/inode.c14
-rw-r--r--drivers/usb/gadget/legacy/raw_gadget.c4
-rw-r--r--drivers/usb/gadget/legacy/zero.c4
-rw-r--r--drivers/usb/gadget/udc/Kconfig44
-rw-r--r--drivers/usb/gadget/udc/Makefile5
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/core.c2
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/dev.c3
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/hub.c3
-rw-r--r--drivers/usb/gadget/udc/aspeed_udc.c4
-rw-r--r--drivers/usb/gadget/udc/at91_udc.c7
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c2
-rw-r--r--drivers/usb/gadget/udc/bcm63xx_udc.c2
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_core.c2
-rw-r--r--drivers/usb/gadget/udc/cdns2/cdns2-gadget.c13
-rw-r--r--drivers/usb/gadget/udc/cdns2/cdns2-pci.c3
-rw-r--r--drivers/usb/gadget/udc/cdns2/cdns2-trace.h69
-rw-r--r--drivers/usb/gadget/udc/core.c8
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c28
-rw-r--r--drivers/usb/gadget/udc/fsl_qe_udc.c8
-rw-r--r--drivers/usb/gadget/udc/fsl_udc_core.c5
-rw-r--r--drivers/usb/gadget/udc/fusb300_udc.c1516
-rw-r--r--drivers/usb/gadget/udc/fusb300_udc.h675
-rw-r--r--drivers/usb/gadget/udc/gr_udc.c2
-rw-r--r--drivers/usb/gadget/udc/lpc32xx_udc.c4
-rw-r--r--drivers/usb/gadget/udc/m66592-udc.c10
-rw-r--r--drivers/usb/gadget/udc/mv_u3d.h317
-rw-r--r--drivers/usb/gadget/udc/mv_u3d_core.c2062
-rw-r--r--drivers/usb/gadget/udc/mv_udc.h309
-rw-r--r--drivers/usb/gadget/udc/mv_udc_core.c2426
-rw-r--r--drivers/usb/gadget/udc/net2272.c2723
-rw-r--r--drivers/usb/gadget/udc/net2272.h584
-rw-r--r--drivers/usb/gadget/udc/net2280.c8
-rw-r--r--drivers/usb/gadget/udc/omap_udc.c13
-rw-r--r--drivers/usb/gadget/udc/pch_udc.c2
-rw-r--r--drivers/usb/gadget/udc/pxa25x_udc.c17
-rw-r--r--drivers/usb/gadget/udc/pxa27x_udc.c12
-rw-r--r--drivers/usb/gadget/udc/r8a66597-udc.c6
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c20
-rw-r--r--drivers/usb/gadget/udc/renesas_usbf.c4
-rw-r--r--drivers/usb/gadget/udc/rzv2m_usb3drd.c2
-rw-r--r--drivers/usb/gadget/udc/snps_udc_core.c6
-rw-r--r--drivers/usb/gadget/udc/snps_udc_plat.c2
-rw-r--r--drivers/usb/gadget/udc/tegra-xudc.c18
-rw-r--r--drivers/usb/gadget/udc/trace.h5
-rw-r--r--drivers/usb/gadget/udc/udc-xilinx.c8
-rw-r--r--drivers/usb/gadget/usbstring.c2
-rw-r--r--drivers/usb/host/Kconfig13
-rw-r--r--drivers/usb/host/Makefile4
-rw-r--r--drivers/usb/host/bcma-hcd.c1
-rw-r--r--drivers/usb/host/ehci-atmel.c2
-rw-r--r--drivers/usb/host/ehci-brcm.c2
-rw-r--r--drivers/usb/host/ehci-exynos.c2
-rw-r--r--drivers/usb/host/ehci-fsl.c27
-rw-r--r--drivers/usb/host/ehci-grlib.c2
-rw-r--r--drivers/usb/host/ehci-hcd.c5
-rw-r--r--drivers/usb/host/ehci-mv.c2
-rw-r--r--drivers/usb/host/ehci-npcm7xx.c2
-rw-r--r--drivers/usb/host/ehci-omap.c2
-rw-r--r--drivers/usb/host/ehci-orion.c2
-rw-r--r--drivers/usb/host/ehci-platform.c7
-rw-r--r--drivers/usb/host/ehci-ppc-of.c2
-rw-r--r--drivers/usb/host/ehci-sh.c11
-rw-r--r--drivers/usb/host/ehci-spear.c9
-rw-r--r--drivers/usb/host/ehci-st.c2
-rw-r--r--drivers/usb/host/ehci-sysfs.c18
-rw-r--r--drivers/usb/host/ehci-xilinx-of.c2
-rw-r--r--drivers/usb/host/fhci-hcd.c2
-rw-r--r--drivers/usb/host/fhci-sched.c4
-rw-r--r--drivers/usb/host/fsl-mph-dr-of.c5
-rw-r--r--drivers/usb/host/isp116x-hcd.c2
-rw-r--r--drivers/usb/host/isp1362-hcd.c4
-rw-r--r--drivers/usb/host/max3421-hcd.c25
-rw-r--r--drivers/usb/host/octeon-hcd.c6
-rw-r--r--drivers/usb/host/ohci-at91.c4
-rw-r--r--drivers/usb/host/ohci-da8xx.c2
-rw-r--r--drivers/usb/host/ohci-exynos.c2
-rw-r--r--drivers/usb/host/ohci-hcd.c5
-rw-r--r--drivers/usb/host/ohci-hub.c2
-rw-r--r--drivers/usb/host/ohci-nxp.c2
-rw-r--r--drivers/usb/host/ohci-omap.c4
-rw-r--r--drivers/usb/host/ohci-pci.c23
-rw-r--r--drivers/usb/host/ohci-platform.c2
-rw-r--r--drivers/usb/host/ohci-ppc-of.c2
-rw-r--r--drivers/usb/host/ohci-pxa27x.c2
-rw-r--r--drivers/usb/host/ohci-s3c2410.c10
-rw-r--r--drivers/usb/host/ohci-sm501.c2
-rw-r--r--drivers/usb/host/ohci-spear.c5
-rw-r--r--drivers/usb/host/ohci-st.c2
-rw-r--r--drivers/usb/host/oxu210hp-hcd.c19
-rw-r--r--drivers/usb/host/pci-quirks.c9
-rw-r--r--drivers/usb/host/r8a66597-hcd.c15
-rw-r--r--drivers/usb/host/sl811-hcd.c9
-rw-r--r--drivers/usb/host/uhci-grlib.c2
-rw-r--r--drivers/usb/host/uhci-hcd.c2
-rw-r--r--drivers/usb/host/uhci-platform.c4
-rw-r--r--drivers/usb/host/uhci-q.c4
-rw-r--r--drivers/usb/host/xen-hcd.c6
-rw-r--r--drivers/usb/host/xhci-caps.h12
-rw-r--r--drivers/usb/host/xhci-dbgcap.c130
-rw-r--r--drivers/usb/host/xhci-dbgcap.h3
-rw-r--r--drivers/usb/host/xhci-dbgtty.c99
-rw-r--r--drivers/usb/host/xhci-debugfs.c143
-rw-r--r--drivers/usb/host/xhci-histb.c4
-rw-r--r--drivers/usb/host/xhci-hub.c49
-rw-r--r--drivers/usb/host/xhci-mem.c594
-rw-r--r--drivers/usb/host/xhci-mtk.c6
-rw-r--r--drivers/usb/host/xhci-mvebu.c10
-rw-r--r--drivers/usb/host/xhci-mvebu.h6
-rw-r--r--drivers/usb/host/xhci-pci-renesas.c9
-rw-r--r--drivers/usb/host/xhci-pci.c147
-rw-r--r--drivers/usb/host/xhci-plat.c85
-rw-r--r--drivers/usb/host/xhci-plat.h3
-rw-r--r--drivers/usb/host/xhci-rcar-regs.h49
-rw-r--r--drivers/usb/host/xhci-rcar.c102
-rw-r--r--drivers/usb/host/xhci-ring.c854
-rw-r--r--drivers/usb/host/xhci-rzg3e-regs.h12
-rw-r--r--drivers/usb/host/xhci-sideband.c493
-rw-r--r--drivers/usb/host/xhci-tegra.c106
-rw-r--r--drivers/usb/host/xhci-trace.h113
-rw-r--r--drivers/usb/host/xhci.c456
-rw-r--r--drivers/usb/host/xhci.h207
-rw-r--r--drivers/usb/image/microtek.c4
-rw-r--r--drivers/usb/isp1760/Kconfig2
-rw-r--r--drivers/usb/isp1760/isp1760-hcd.c2
-rw-r--r--drivers/usb/isp1760/isp1760-if.c2
-rw-r--r--drivers/usb/isp1760/isp1760-udc.c6
-rw-r--r--drivers/usb/misc/Kconfig19
-rw-r--r--drivers/usb/misc/Makefile1
-rw-r--r--drivers/usb/misc/apple-mfi-fastcharge.c24
-rw-r--r--drivers/usb/misc/chaoskey.c35
-rw-r--r--drivers/usb/misc/iowarrior.c50
-rw-r--r--drivers/usb/misc/onboard_usb_dev.c131
-rw-r--r--drivers/usb/misc/onboard_usb_dev.h36
-rw-r--r--drivers/usb/misc/qcom_eud.c38
-rw-r--r--drivers/usb/misc/usb-ljca.c28
-rw-r--r--drivers/usb/misc/usb251xb.c114
-rw-r--r--drivers/usb/misc/usb3503.c2
-rw-r--r--drivers/usb/misc/usbio.c749
-rw-r--r--drivers/usb/misc/usbtest.c9
-rw-r--r--drivers/usb/misc/yurex.c5
-rw-r--r--drivers/usb/mon/mon_bin.c16
-rw-r--r--drivers/usb/mtu3/Kconfig2
-rw-r--r--drivers/usb/mtu3/mtu3_debugfs.c43
-rw-r--r--drivers/usb/mtu3/mtu3_dr.c3
-rw-r--r--drivers/usb/mtu3/mtu3_gadget.c3
-rw-r--r--drivers/usb/mtu3/mtu3_plat.c4
-rw-r--r--drivers/usb/musb/Kconfig5
-rw-r--r--drivers/usb/musb/da8xx.c14
-rw-r--r--drivers/usb/musb/jz4740.c6
-rw-r--r--drivers/usb/musb/mediatek.c4
-rw-r--r--drivers/usb/musb/mpfs.c11
-rw-r--r--drivers/usb/musb/musb_core.c27
-rw-r--r--drivers/usb/musb/musb_cppi41.c4
-rw-r--r--drivers/usb/musb/musb_dsps.c17
-rw-r--r--drivers/usb/musb/musb_gadget.c18
-rw-r--r--drivers/usb/musb/musb_gadget_ep0.c2
-rw-r--r--drivers/usb/musb/musb_host.c8
-rw-r--r--drivers/usb/musb/omap2430.c39
-rw-r--r--drivers/usb/musb/sunxi.c8
-rw-r--r--drivers/usb/musb/tusb6010.c12
-rw-r--r--drivers/usb/musb/ux500.c2
-rw-r--r--drivers/usb/phy/Kconfig12
-rw-r--r--drivers/usb/phy/Makefile1
-rw-r--r--drivers/usb/phy/phy-ab8500-usb.c2
-rw-r--r--drivers/usb/phy/phy-am335x.c2
-rw-r--r--drivers/usb/phy/phy-fsl-usb.c5
-rw-r--r--drivers/usb/phy/phy-generic.c4
-rw-r--r--drivers/usb/phy/phy-gpio-vbus-usb.c2
-rw-r--r--drivers/usb/phy/phy-isp1301.c2
-rw-r--r--drivers/usb/phy/phy-keystone.c2
-rw-r--r--drivers/usb/phy/phy-mv-usb.c880
-rw-r--r--drivers/usb/phy/phy-mxs-usb.c10
-rw-r--r--drivers/usb/phy/phy-tahvo.c5
-rw-r--r--drivers/usb/phy/phy-tegra-usb.c91
-rw-r--r--drivers/usb/phy/phy-twl6030-usb.c5
-rw-r--r--drivers/usb/phy/phy-ulpi.c23
-rw-r--r--drivers/usb/phy/phy.c28
-rw-r--r--drivers/usb/renesas_usbhs/common.c76
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c2
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c2
-rw-r--r--drivers/usb/roles/class.c5
-rw-r--r--drivers/usb/roles/intel-xhci-usb-role-switch.c2
-rw-r--r--drivers/usb/serial/bus.c6
-rw-r--r--drivers/usb/serial/ch341.c35
-rw-r--r--drivers/usb/serial/cp210x.c9
-rw-r--r--drivers/usb/serial/ftdi_sio.c34
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h21
-rw-r--r--drivers/usb/serial/garmin_gps.c3
-rw-r--r--drivers/usb/serial/io_edgeport.c10
-rw-r--r--drivers/usb/serial/mos7840.c17
-rw-r--r--drivers/usb/serial/option.c133
-rw-r--r--drivers/usb/serial/oti6858.c2
-rw-r--r--drivers/usb/serial/pl2303.c40
-rw-r--r--drivers/usb/serial/qcserial.c2
-rw-r--r--drivers/usb/serial/quatech2.c2
-rw-r--r--drivers/usb/serial/sierra.c2
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c5
-rw-r--r--drivers/usb/serial/usb-serial-simple.c7
-rw-r--r--drivers/usb/serial/usb-serial.c11
-rw-r--r--drivers/usb/storage/Kconfig3
-rw-r--r--drivers/usb/storage/Makefile2
-rw-r--r--drivers/usb/storage/alauda.c10
-rw-r--r--drivers/usb/storage/cypress_atacb.c2
-rw-r--r--drivers/usb/storage/datafab.c16
-rw-r--r--drivers/usb/storage/debug.c4
-rw-r--r--drivers/usb/storage/ene_ub6250.c10
-rw-r--r--drivers/usb/storage/freecom.c2
-rw-r--r--drivers/usb/storage/initializers.c2
-rw-r--r--drivers/usb/storage/isd200.c2
-rw-r--r--drivers/usb/storage/jumpshot.c12
-rw-r--r--drivers/usb/storage/karma.c2
-rw-r--r--drivers/usb/storage/onetouch.c2
-rw-r--r--drivers/usb/storage/realtek_cr.c25
-rw-r--r--drivers/usb/storage/scsiglue.c15
-rw-r--r--drivers/usb/storage/sddr09.c20
-rw-r--r--drivers/usb/storage/sddr55.c14
-rw-r--r--drivers/usb/storage/shuttle_usbat.c8
-rw-r--r--drivers/usb/storage/transport.c12
-rw-r--r--drivers/usb/storage/uas.c12
-rw-r--r--drivers/usb/storage/unusual_devs.h36
-rw-r--r--drivers/usb/storage/unusual_uas.h14
-rw-r--r--drivers/usb/storage/usb.c20
-rw-r--r--drivers/usb/typec/altmodes/Kconfig9
-rw-r--r--drivers/usb/typec/altmodes/Makefile2
-rw-r--r--drivers/usb/typec/altmodes/displayport.c39
-rw-r--r--drivers/usb/typec/altmodes/nvidia.c2
-rw-r--r--drivers/usb/typec/altmodes/thunderbolt.c388
-rw-r--r--drivers/usb/typec/anx7411.c66
-rw-r--r--drivers/usb/typec/bus.c8
-rw-r--r--drivers/usb/typec/class.c282
-rw-r--r--drivers/usb/typec/class.h4
-rw-r--r--drivers/usb/typec/hd3ss3220.c207
-rw-r--r--drivers/usb/typec/mux.c4
-rw-r--r--drivers/usb/typec/mux/Kconfig19
-rw-r--r--drivers/usb/typec/mux/Makefile2
-rw-r--r--drivers/usb/typec/mux/fsa4480.c5
-rw-r--r--drivers/usb/typec/mux/gpio-sbu-mux.c2
-rw-r--r--drivers/usb/typec/mux/intel_pmc_mux.c6
-rw-r--r--drivers/usb/typec/mux/ps883x.c466
-rw-r--r--drivers/usb/typec/mux/tusb1046.c196
-rw-r--r--drivers/usb/typec/port-mapper.c23
-rw-r--r--drivers/usb/typec/stusb160x.c7
-rw-r--r--drivers/usb/typec/tcpm/fusb302.c32
-rw-r--r--drivers/usb/typec/tcpm/maxim_contaminant.c62
-rw-r--r--drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c12
-rw-r--r--drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c13
-rw-r--r--drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy_stub.c3
-rw-r--r--drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.c6
-rw-r--r--drivers/usb/typec/tcpm/tcpci.c80
-rw-r--r--drivers/usb/typec/tcpm/tcpci_maxim.h1
-rw-r--r--drivers/usb/typec/tcpm/tcpci_maxim_core.c59
-rw-r--r--drivers/usb/typec/tcpm/tcpci_mt6360.c2
-rw-r--r--drivers/usb/typec/tcpm/tcpci_mt6370.c3
-rw-r--r--drivers/usb/typec/tcpm/tcpci_rt1711h.c11
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c467
-rw-r--r--drivers/usb/typec/tcpm/wcove.c6
-rw-r--r--drivers/usb/typec/tipd/core.c559
-rw-r--r--drivers/usb/typec/tipd/tps6598x.h7
-rw-r--r--drivers/usb/typec/tipd/trace.h41
-rw-r--r--drivers/usb/typec/ucsi/Kconfig26
-rw-r--r--drivers/usb/typec/ucsi/Makefile2
-rw-r--r--drivers/usb/typec/ucsi/cros_ec_ucsi.c342
-rw-r--r--drivers/usb/typec/ucsi/debugfs.c42
-rw-r--r--drivers/usb/typec/ucsi/displayport.c21
-rw-r--r--drivers/usb/typec/ucsi/psy.c30
-rw-r--r--drivers/usb/typec/ucsi/trace.c19
-rw-r--r--drivers/usb/typec/ucsi/trace.h29
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c261
-rw-r--r--drivers/usb/typec/ucsi/ucsi.h282
-rw-r--r--drivers/usb/typec/ucsi/ucsi_acpi.c97
-rw-r--r--drivers/usb/typec/ucsi/ucsi_ccg.c111
-rw-r--r--drivers/usb/typec/ucsi/ucsi_glink.c27
-rw-r--r--drivers/usb/typec/ucsi/ucsi_huawei_gaokun.c526
-rw-r--r--drivers/usb/typec/ucsi/ucsi_stm32g0.c8
-rw-r--r--drivers/usb/typec/ucsi/ucsi_yoga_c630.c179
-rw-r--r--drivers/usb/usbip/stub_rx.c2
-rw-r--r--drivers/usb/usbip/stub_tx.c2
-rw-r--r--drivers/usb/usbip/vhci_hcd.c37
-rw-r--r--drivers/usb/usbip/vhci_rx.c6
-rw-r--r--drivers/usb/usbip/vudc_main.c2
-rw-r--r--drivers/usb/usbip/vudc_sysfs.c6
-rw-r--r--drivers/usb/usbip/vudc_transfer.c2
-rw-r--r--drivers/usb/usbip/vudc_tx.c2
-rw-r--r--drivers/vdpa/Kconfig8
-rw-r--r--drivers/vdpa/alibaba/eni_vdpa.c5
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_base.c2
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_main.c5
-rw-r--r--drivers/vdpa/mlx5/core/mr.c26
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c55
-rw-r--r--drivers/vdpa/octeon_ep/octep_vdpa.h32
-rw-r--r--drivers/vdpa/octeon_ep/octep_vdpa_hw.c38
-rw-r--r--drivers/vdpa/octeon_ep/octep_vdpa_main.c122
-rw-r--r--drivers/vdpa/pds/vdpa_dev.c5
-rw-r--r--drivers/vdpa/solidrun/snet_main.c71
-rw-r--r--drivers/vdpa/vdpa.c5
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.c6
-rw-r--r--drivers/vdpa/vdpa_user/iova_domain.c134
-rw-r--r--drivers/vdpa/vdpa_user/iova_domain.h7
-rw-r--r--drivers/vdpa/vdpa_user/vduse_dev.c81
-rw-r--r--drivers/vdpa/virtio_pci/vp_vdpa.c24
-rw-r--r--drivers/vfio/cdx/Makefile6
-rw-r--r--drivers/vfio/cdx/main.c2
-rw-r--r--drivers/vfio/cdx/private.h14
-rw-r--r--drivers/vfio/debugfs.c19
-rw-r--r--drivers/vfio/device_cdev.c98
-rw-r--r--drivers/vfio/fsl-mc/Kconfig5
-rw-r--r--drivers/vfio/fsl-mc/vfio_fsl_mc.c2
-rw-r--r--drivers/vfio/group.c29
-rw-r--r--drivers/vfio/iommufd.c68
-rw-r--r--drivers/vfio/mdev/mdev_core.c4
-rw-r--r--drivers/vfio/pci/Kconfig6
-rw-r--r--drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c374
-rw-r--r--drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h33
-rw-r--r--drivers/vfio/pci/mlx5/cmd.c416
-rw-r--r--drivers/vfio/pci/mlx5/cmd.h35
-rw-r--r--drivers/vfio/pci/mlx5/main.c125
-rw-r--r--drivers/vfio/pci/nvgrace-gpu/main.c177
-rw-r--r--drivers/vfio/pci/pds/dirty.c2
-rw-r--r--drivers/vfio/pci/pds/lm.c3
-rw-r--r--drivers/vfio/pci/pds/pci_drv.c2
-rw-r--r--drivers/vfio/pci/pds/vfio_dev.c2
-rw-r--r--drivers/vfio/pci/qat/main.c9
-rw-r--r--drivers/vfio/pci/vfio_pci.c7
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c32
-rw-r--r--drivers/vfio/pci/vfio_pci_core.c96
-rw-r--r--drivers/vfio/pci/vfio_pci_igd.c5
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c21
-rw-r--r--drivers/vfio/pci/vfio_pci_priv.h6
-rw-r--r--drivers/vfio/pci/vfio_pci_rdwr.c38
-rw-r--r--drivers/vfio/pci/virtio/Kconfig42
-rw-r--r--drivers/vfio/pci/virtio/Makefile3
-rw-r--r--drivers/vfio/pci/virtio/common.h127
-rw-r--r--drivers/vfio/pci/virtio/legacy_io.c420
-rw-r--r--drivers/vfio/pci/virtio/main.c484
-rw-r--r--drivers/vfio/pci/virtio/migrate.c1336
-rw-r--r--drivers/vfio/platform/Kconfig5
-rw-r--r--drivers/vfio/platform/reset/Kconfig6
-rw-r--r--drivers/vfio/platform/reset/vfio_platform_amdxgbe.c2
-rw-r--r--drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c2
-rw-r--r--drivers/vfio/platform/reset/vfio_platform_calxedaxgmac.c2
-rw-r--r--drivers/vfio/platform/vfio_amba.c2
-rw-r--r--drivers/vfio/platform/vfio_platform.c2
-rw-r--r--drivers/vfio/platform/vfio_platform_common.c10
-rw-r--r--drivers/vfio/vfio_iommu_type1.c305
-rw-r--r--drivers/vfio/vfio_main.c27
-rw-r--r--drivers/vfio/virqfd.c16
-rw-r--r--drivers/vhost/Kconfig19
-rw-r--r--drivers/vhost/net.c223
-rw-r--r--drivers/vhost/scsi.c815
-rw-r--r--drivers/vhost/vdpa.c16
-rw-r--r--drivers/vhost/vhost.c408
-rw-r--r--drivers/vhost/vhost.h34
-rw-r--r--drivers/vhost/vringh.c151
-rw-r--r--drivers/vhost/vsock.c15
-rw-r--r--drivers/video/Kconfig19
-rw-r--r--drivers/video/backlight/88pm860x_bl.c6
-rw-r--r--drivers/video/backlight/Kconfig20
-rw-r--r--drivers/video/backlight/Makefile2
-rw-r--r--drivers/video/backlight/aat2870_bl.c2
-rw-r--r--drivers/video/backlight/adp5520_bl.c3
-rw-r--r--drivers/video/backlight/adp8860_bl.c1
-rw-r--r--drivers/video/backlight/adp8870_bl.c1
-rw-r--r--drivers/video/backlight/apple_dwi_bl.c124
-rw-r--r--drivers/video/backlight/as3711_bl.c2
-rw-r--r--drivers/video/backlight/backlight.c136
-rw-r--r--drivers/video/backlight/bd6107.c1
-rw-r--r--drivers/video/backlight/corgi_lcd.c17
-rw-r--r--drivers/video/backlight/da903x_bl.c1
-rw-r--r--drivers/video/backlight/da9052_bl.c4
-rw-r--r--drivers/video/backlight/ep93xx_bl.c1
-rw-r--r--drivers/video/backlight/hp680_bl.c3
-rw-r--r--drivers/video/backlight/hx8357.c2
-rw-r--r--drivers/video/backlight/ili922x.c7
-rw-r--r--drivers/video/backlight/ili9320.c15
-rw-r--r--drivers/video/backlight/jornada720_bl.c1
-rw-r--r--drivers/video/backlight/jornada720_lcd.c10
-rw-r--r--drivers/video/backlight/ktd2801-backlight.c3
-rw-r--r--drivers/video/backlight/ktz8866.c1
-rw-r--r--drivers/video/backlight/l4f00242t03.c32
-rw-r--r--drivers/video/backlight/lcd.c88
-rw-r--r--drivers/video/backlight/led_bl.c12
-rw-r--r--drivers/video/backlight/lm3533_bl.c2
-rw-r--r--drivers/video/backlight/lms283gf05.c2
-rw-r--r--drivers/video/backlight/lms501kf03.c24
-rw-r--r--drivers/video/backlight/locomolcd.c1
-rw-r--r--drivers/video/backlight/lp855x_bl.c2
-rw-r--r--drivers/video/backlight/lp8788_bl.c2
-rw-r--r--drivers/video/backlight/ltv350qv.c15
-rw-r--r--drivers/video/backlight/lv5207lp.c1
-rw-r--r--drivers/video/backlight/max8925_bl.c1
-rw-r--r--drivers/video/backlight/mp3309c.c14
-rw-r--r--drivers/video/backlight/mt6370-backlight.c2
-rw-r--r--drivers/video/backlight/otm3225a.c2
-rw-r--r--drivers/video/backlight/pcf50633-backlight.c154
-rw-r--r--drivers/video/backlight/platform_lcd.c20
-rw-r--r--drivers/video/backlight/pwm_bl.c2
-rw-r--r--drivers/video/backlight/qcom-wled.c8
-rw-r--r--drivers/video/backlight/rave-sp-backlight.c2
-rw-r--r--drivers/video/backlight/rt4831-backlight.c3
-rw-r--r--drivers/video/backlight/sky81452-backlight.c2
-rw-r--r--drivers/video/backlight/tdo24m.c19
-rw-r--r--drivers/video/backlight/tps65217_bl.c1
-rw-r--r--drivers/video/backlight/vgg2432a4.c1
-rw-r--r--drivers/video/backlight/wm831x_bl.c1
-rw-r--r--drivers/video/console/Kconfig9
-rw-r--r--drivers/video/console/dummycon.c18
-rw-r--r--drivers/video/fbdev/Kconfig35
-rw-r--r--drivers/video/fbdev/arkfb.c5
-rw-r--r--drivers/video/fbdev/atmel_lcdfb.c4
-rw-r--r--drivers/video/fbdev/aty/aty128fb.c6
-rw-r--r--drivers/video/fbdev/aty/atyfb_base.c2
-rw-r--r--drivers/video/fbdev/aty/mach64_cursor.c7
-rw-r--r--drivers/video/fbdev/aty/radeon_backlight.c4
-rw-r--r--drivers/video/fbdev/aty/radeon_base.c10
-rw-r--r--drivers/video/fbdev/aty/radeon_pm.c2
-rw-r--r--drivers/video/fbdev/au1100fb.c4
-rw-r--r--drivers/video/fbdev/c2p_iplan2.c1
-rw-r--r--drivers/video/fbdev/c2p_planar.c1
-rw-r--r--drivers/video/fbdev/carminefb.c8
-rw-r--r--drivers/video/fbdev/carminefb.h2
-rw-r--r--drivers/video/fbdev/chipsfb.c2
-rw-r--r--drivers/video/fbdev/clps711x-fb.c29
-rw-r--r--drivers/video/fbdev/core/Kconfig30
-rw-r--r--drivers/video/fbdev/core/bitblit.c22
-rw-r--r--drivers/video/fbdev/core/cfbcopyarea.c428
-rw-r--r--drivers/video/fbdev/core/cfbfillrect.c364
-rw-r--r--drivers/video/fbdev/core/cfbimgblt.c359
-rw-r--r--drivers/video/fbdev/core/cfbmem.h43
-rw-r--r--drivers/video/fbdev/core/fb_backlight.c12
-rw-r--r--drivers/video/fbdev/core/fb_cmdline.c2
-rw-r--r--drivers/video/fbdev/core/fb_copyarea.h405
-rw-r--r--drivers/video/fbdev/core/fb_ddc.c1
-rw-r--r--drivers/video/fbdev/core/fb_defio.c44
-rw-r--r--drivers/video/fbdev/core/fb_draw.h274
-rw-r--r--drivers/video/fbdev/core/fb_fillrect.h279
-rw-r--r--drivers/video/fbdev/core/fb_imageblit.h495
-rw-r--r--drivers/video/fbdev/core/fb_info.c1
-rw-r--r--drivers/video/fbdev/core/fb_io_fops.c1
-rw-r--r--drivers/video/fbdev/core/fb_sys_fops.c2
-rw-r--r--drivers/video/fbdev/core/fbcmap.c1
-rw-r--r--drivers/video/fbdev/core/fbcon.c199
-rw-r--r--drivers/video/fbdev/core/fbcon.h38
-rw-r--r--drivers/video/fbdev/core/fbcon_ccw.c5
-rw-r--r--drivers/video/fbdev/core/fbcon_cw.c5
-rw-r--r--drivers/video/fbdev/core/fbcon_ud.c5
-rw-r--r--drivers/video/fbdev/core/fbcvt.c2
-rw-r--r--drivers/video/fbdev/core/fbmem.c107
-rw-r--r--drivers/video/fbdev/core/fbmon.c12
-rw-r--r--drivers/video/fbdev/core/fbsysfs.c77
-rw-r--r--drivers/video/fbdev/core/modedb.c1
-rw-r--r--drivers/video/fbdev/core/svgalib.c96
-rw-r--r--drivers/video/fbdev/core/syscopyarea.c371
-rw-r--r--drivers/video/fbdev/core/sysfillrect.c326
-rw-r--r--drivers/video/fbdev/core/sysimgblt.c335
-rw-r--r--drivers/video/fbdev/core/sysmem.h39
-rw-r--r--drivers/video/fbdev/core/tileblit.c45
-rw-r--r--drivers/video/fbdev/cyber2000fb.c36
-rw-r--r--drivers/video/fbdev/cyber2000fb.h2
-rw-r--r--drivers/video/fbdev/efifb.c4
-rw-r--r--drivers/video/fbdev/fsl-diu-fb.c1
-rw-r--r--drivers/video/fbdev/geode/display_gx.c1
-rw-r--r--drivers/video/fbdev/geode/gxfb_core.c3
-rw-r--r--drivers/video/fbdev/geode/lxfb_ops.c23
-rw-r--r--drivers/video/fbdev/geode/suspend_gx.c10
-rw-r--r--drivers/video/fbdev/geode/video_gx.c16
-rw-r--r--drivers/video/fbdev/hyperv_fb.c54
-rw-r--r--drivers/video/fbdev/imxfb.c41
-rw-r--r--drivers/video/fbdev/kyro/fbdev.c24
-rw-r--r--drivers/video/fbdev/macmodes.c3
-rw-r--r--drivers/video/fbdev/matrox/g450_pll.c26
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_DAC1064.c47
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_Ti3026.c1
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_accel.c2
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_base.c1
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_g450.c62
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_misc.c21
-rw-r--r--drivers/video/fbdev/mb862xx/mb862xx-i2c.c1
-rw-r--r--drivers/video/fbdev/mb862xx/mb862xxfbdrv.c2
-rw-r--r--drivers/video/fbdev/mmp/hw/mmp_spi.c6
-rw-r--r--drivers/video/fbdev/nvidia/nv_backlight.c2
-rw-r--r--drivers/video/fbdev/nvidia/nv_local.h2
-rw-r--r--drivers/video/fbdev/nvidia/nvidia.c5
-rw-r--r--drivers/video/fbdev/omap/hwa742.c2
-rw-r--r--drivers/video/fbdev/omap/lcd_ams_delta.c8
-rw-r--r--drivers/video/fbdev/omap/lcd_dma.c5
-rw-r--r--drivers/video/fbdev/omap/lcdc.c2
-rw-r--r--drivers/video/fbdev/omap/omapfb_main.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c4
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/apply.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/core.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dispc.c53
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/display.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dpi.c4
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dsi.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dss-of.c68
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dss.c20
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dss.h3
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dss_features.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c17
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.h1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/manager.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/output.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/overlay.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/sdi.c4
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/venc.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/vrfb.c1
-rw-r--r--drivers/video/fbdev/pxafb.c43
-rw-r--r--drivers/video/fbdev/riva/fbdev.c2
-rw-r--r--drivers/video/fbdev/s3fb.c177
-rw-r--r--drivers/video/fbdev/sbuslib.c1
-rw-r--r--drivers/video/fbdev/sh7760fb.c3
-rw-r--r--drivers/video/fbdev/sh_mobile_lcdcfb.c39
-rw-r--r--drivers/video/fbdev/simplefb.c48
-rw-r--r--drivers/video/fbdev/sis/sis.h2
-rw-r--r--drivers/video/fbdev/sis/sis_main.c25
-rw-r--r--drivers/video/fbdev/sm501fb.c12
-rw-r--r--drivers/video/fbdev/udlfb.c4
-rw-r--r--drivers/video/fbdev/vga16fb.c7
-rw-r--r--drivers/video/fbdev/via/via-core.c1
-rw-r--r--drivers/video/fbdev/via/via-gpio.c9
-rw-r--r--drivers/video/fbdev/via/via_i2c.c1
-rw-r--r--drivers/video/fbdev/wmt_ge_rops.c31
-rw-r--r--drivers/video/fbdev/xen-fbfront.c2
-rw-r--r--drivers/video/hdmi.c28
-rw-r--r--drivers/video/screen_info_generic.c91
-rw-r--r--drivers/video/screen_info_pci.c79
-rw-r--r--drivers/virt/acrn/hsm.c6
-rw-r--r--drivers/virt/acrn/ioreq.c4
-rw-r--r--drivers/virt/acrn/irqfd.c15
-rw-r--r--drivers/virt/acrn/mm.c8
-rw-r--r--drivers/virt/coco/Kconfig8
-rw-r--r--drivers/virt/coco/Makefile3
-rw-r--r--drivers/virt/coco/arm-cca-guest/Kconfig10
-rw-r--r--drivers/virt/coco/arm-cca-guest/Makefile2
-rw-r--r--drivers/virt/coco/arm-cca-guest/arm-cca-guest.c232
-rw-r--r--drivers/virt/coco/efi_secret/Kconfig2
-rw-r--r--drivers/virt/coco/efi_secret/efi_secret.c49
-rw-r--r--drivers/virt/coco/guest/Kconfig17
-rw-r--r--drivers/virt/coco/guest/Makefile4
-rw-r--r--drivers/virt/coco/guest/report.c539
-rw-r--r--drivers/virt/coco/guest/tsm-mr.c251
-rw-r--r--drivers/virt/coco/pkvm-guest/arm-pkvm-guest.c6
-rw-r--r--drivers/virt/coco/sev-guest/Kconfig3
-rw-r--r--drivers/virt/coco/sev-guest/sev-guest.c760
-rw-r--r--drivers/virt/coco/tdx-guest/Kconfig1
-rw-r--r--drivers/virt/coco/tdx-guest/tdx-guest.c263
-rw-r--r--drivers/virt/coco/tsm.c512
-rw-r--r--drivers/virt/vboxguest/Kconfig3
-rw-r--r--drivers/virt/vboxguest/vboxguest_core.c4
-rw-r--r--drivers/virtio/Kconfig76
-rw-r--r--drivers/virtio/Makefile5
-rw-r--r--drivers/virtio/virtio.c198
-rw-r--r--drivers/virtio/virtio_balloon.c18
-rw-r--r--drivers/virtio/virtio_debug.c27
-rw-r--r--drivers/virtio/virtio_dma_buf.c4
-rw-r--r--drivers/virtio/virtio_input.c4
-rw-r--r--drivers/virtio/virtio_mem.c109
-rw-r--r--drivers/virtio/virtio_mmio.c54
-rw-r--r--drivers/virtio/virtio_pci_common.c65
-rw-r--r--drivers/virtio/virtio_pci_common.h20
-rw-r--r--drivers/virtio/virtio_pci_legacy_dev.c4
-rw-r--r--drivers/virtio/virtio_pci_modern.c490
-rw-r--r--drivers/virtio/virtio_pci_modern_dev.c73
-rw-r--r--drivers/virtio/virtio_ring.c1030
-rw-r--r--drivers/virtio/virtio_rtc_arm.c23
-rw-r--r--drivers/virtio/virtio_rtc_class.c262
-rw-r--r--drivers/virtio/virtio_rtc_driver.c1407
-rw-r--r--drivers/virtio/virtio_rtc_internal.h122
-rw-r--r--drivers/virtio/virtio_rtc_ptp.c347
-rw-r--r--drivers/virtio/virtio_vdpa.c78
-rw-r--r--drivers/w1/masters/amd_axi_w1.c2
-rw-r--r--drivers/w1/masters/ds2482.c26
-rw-r--r--drivers/w1/masters/matrox_w1.c10
-rw-r--r--drivers/w1/masters/mxc_w1.c2
-rw-r--r--drivers/w1/masters/omap_hdq.c2
-rw-r--r--drivers/w1/masters/sgi_w1.c2
-rw-r--r--drivers/w1/masters/w1-gpio.c2
-rw-r--r--drivers/w1/masters/w1-uart.c4
-rw-r--r--drivers/w1/slaves/w1_ds2406.c18
-rw-r--r--drivers/w1/slaves/w1_ds2408.c40
-rw-r--r--drivers/w1/slaves/w1_ds2413.c12
-rw-r--r--drivers/w1/slaves/w1_ds2430.c8
-rw-r--r--drivers/w1/slaves/w1_ds2431.c8
-rw-r--r--drivers/w1/slaves/w1_ds2433.c12
-rw-r--r--drivers/w1/slaves/w1_ds2438.c32
-rw-r--r--drivers/w1/slaves/w1_ds2780.c6
-rw-r--r--drivers/w1/slaves/w1_ds2781.c6
-rw-r--r--drivers/w1/slaves/w1_ds2805.c6
-rw-r--r--drivers/w1/slaves/w1_ds28e04.c16
-rw-r--r--drivers/w1/slaves/w1_ds28e17.c4
-rw-r--r--drivers/w1/slaves/w1_therm.c12
-rw-r--r--drivers/w1/w1.c10
-rw-r--r--drivers/w1/w1_netlink.c42
-rw-r--r--drivers/watchdog/Kconfig90
-rw-r--r--drivers/watchdog/Makefile7
-rw-r--r--drivers/watchdog/acquirewdt.c2
-rw-r--r--drivers/watchdog/advantechwdt.c2
-rw-r--r--drivers/watchdog/airoha_wdt.c216
-rw-r--r--drivers/watchdog/alim7101_wdt.c4
-rw-r--r--drivers/watchdog/apple_wdt.c15
-rw-r--r--drivers/watchdog/arm_smc_wdt.c17
-rw-r--r--drivers/watchdog/armada_37xx_wdt.c10
-rw-r--r--drivers/watchdog/aspeed_wdt.c81
-rw-r--r--drivers/watchdog/at91rm9200_wdt.c2
-rw-r--r--drivers/watchdog/at91sam9_wdt.c8
-rw-r--r--drivers/watchdog/ath79_wdt.c2
-rw-r--r--drivers/watchdog/bcm2835_wdt.c2
-rw-r--r--drivers/watchdog/bcm47xx_wdt.c6
-rw-r--r--drivers/watchdog/bcm_kona_wdt.c2
-rw-r--r--drivers/watchdog/cgbc_wdt.c211
-rw-r--r--drivers/watchdog/cpu5wdt.c284
-rw-r--r--drivers/watchdog/cpwd.c6
-rw-r--r--drivers/watchdog/cros_ec_wdt.c40
-rw-r--r--drivers/watchdog/da9052_wdt.c40
-rw-r--r--drivers/watchdog/da9055_wdt.c7
-rw-r--r--drivers/watchdog/da9063_wdt.c19
-rw-r--r--drivers/watchdog/diag288_wdt.c60
-rw-r--r--drivers/watchdog/dw_wdt.c4
-rw-r--r--drivers/watchdog/exar_wdt.c2
-rw-r--r--drivers/watchdog/gef_wdt.c2
-rw-r--r--drivers/watchdog/geodewdt.c2
-rw-r--r--drivers/watchdog/gxp-wdt.c4
-rw-r--r--drivers/watchdog/iTCO_wdt.c56
-rw-r--r--drivers/watchdog/ib700wdt.c2
-rw-r--r--drivers/watchdog/ie6xx_wdt.c2
-rw-r--r--drivers/watchdog/intel_oc_wdt.c233
-rw-r--r--drivers/watchdog/it87_wdt.c47
-rw-r--r--drivers/watchdog/lenovo_se30_wdt.c396
-rw-r--r--drivers/watchdog/lpc18xx_wdt.c9
-rw-r--r--drivers/watchdog/machzwd.c4
-rw-r--r--drivers/watchdog/max77620_wdt.c1
-rw-r--r--drivers/watchdog/menz69_wdt.c2
-rw-r--r--drivers/watchdog/mixcomwd.c4
-rw-r--r--drivers/watchdog/mpc8xxx_wdt.c2
-rw-r--r--drivers/watchdog/mtk_wdt.c12
-rw-r--r--drivers/watchdog/mtx-1_wdt.c2
-rw-r--r--drivers/watchdog/nct6694_wdt.c307
-rw-r--r--drivers/watchdog/nic7018_wdt.c11
-rw-r--r--drivers/watchdog/npcm_wdt.c9
-rw-r--r--drivers/watchdog/nv_tco.c2
-rw-r--r--drivers/watchdog/octeon-wdt-main.c4
-rw-r--r--drivers/watchdog/omap_wdt.c2
-rw-r--r--drivers/watchdog/orion_wdt.c2
-rw-r--r--drivers/watchdog/pcwd.c4
-rw-r--r--drivers/watchdog/pcwd_usb.c6
-rw-r--r--drivers/watchdog/pika_wdt.c2
-rw-r--r--drivers/watchdog/pretimeout_noop.c2
-rw-r--r--drivers/watchdog/pretimeout_panic.c2
-rw-r--r--drivers/watchdog/qcom-wdt.c7
-rw-r--r--drivers/watchdog/rc32434_wdt.c2
-rw-r--r--drivers/watchdog/rdc321x_wdt.c2
-rw-r--r--drivers/watchdog/renesas_wdt.c10
-rw-r--r--drivers/watchdog/riowd.c2
-rw-r--r--drivers/watchdog/rti_wdt.c25
-rw-r--r--drivers/watchdog/rza_wdt.c7
-rw-r--r--drivers/watchdog/rzg2l_wdt.c24
-rw-r--r--drivers/watchdog/rzn1_wdt.c2
-rw-r--r--drivers/watchdog/rzv2h_wdt.c158
-rw-r--r--drivers/watchdog/s32g_wdt.c315
-rw-r--r--drivers/watchdog/s3c2410_wdt.c126
-rw-r--r--drivers/watchdog/sa1100_wdt.c4
-rw-r--r--drivers/watchdog/sbc60xxwdt.c4
-rw-r--r--drivers/watchdog/sbsa_gwdt.c50
-rw-r--r--drivers/watchdog/sc520_wdt.c2
-rw-r--r--drivers/watchdog/sch311x_wdt.c2
-rw-r--r--drivers/watchdog/shwdt.c6
-rw-r--r--drivers/watchdog/sl28cpld_wdt.c4
-rw-r--r--drivers/watchdog/smsc37b787_wdt.c2
-rw-r--r--drivers/watchdog/softdog.c8
-rw-r--r--drivers/watchdog/sp805_wdt.c3
-rw-r--r--drivers/watchdog/st_lpc_wdt.c2
-rw-r--r--drivers/watchdog/starfive-wdt.c4
-rw-r--r--drivers/watchdog/stm32_iwdg.c95
-rw-r--r--drivers/watchdog/stmp3xxx_rtc_wdt.c2
-rw-r--r--drivers/watchdog/sunxi_wdt.c11
-rw-r--r--drivers/watchdog/txx9wdt.c2
-rw-r--r--drivers/watchdog/via_wdt.c2
-rw-r--r--drivers/watchdog/visconti_wdt.c5
-rw-r--r--drivers/watchdog/w83877f_wdt.c4
-rw-r--r--drivers/watchdog/watchdog_core.c32
-rw-r--r--drivers/watchdog/watchdog_core.h8
-rw-r--r--drivers/watchdog/watchdog_dev.c6
-rw-r--r--drivers/watchdog/watchdog_hrtimer_pretimeout.c4
-rw-r--r--drivers/watchdog/watchdog_pretimeout.c2
-rw-r--r--drivers/watchdog/wdt_pci.c2
-rw-r--r--drivers/watchdog/xilinx_wwdt.c75
-rw-r--r--drivers/watchdog/ziirave_wdt.c5
-rw-r--r--drivers/xen/Kconfig3
-rw-r--r--drivers/xen/balloon.c47
-rw-r--r--drivers/xen/events/events_base.c45
-rw-r--r--drivers/xen/gntdev-common.h4
-rw-r--r--drivers/xen/gntdev-dmabuf.c37
-rw-r--r--drivers/xen/gntdev-dmabuf.h2
-rw-r--r--drivers/xen/gntdev.c109
-rw-r--r--drivers/xen/grant-dma-iommu.c2
-rw-r--r--drivers/xen/grant-table.c6
-rw-r--r--drivers/xen/manage.c20
-rw-r--r--drivers/xen/pci.c32
-rw-r--r--drivers/xen/pcpu.c2
-rw-r--r--drivers/xen/platform-pci.c4
-rw-r--r--drivers/xen/privcmd.c42
-rw-r--r--drivers/xen/pvcalls-front.c14
-rw-r--r--drivers/xen/pvcalls-front.h2
-rw-r--r--drivers/xen/swiotlb-xen.c43
-rw-r--r--drivers/xen/time.c8
-rw-r--r--drivers/xen/unpopulated-alloc.c4
-rw-r--r--drivers/xen/xen-pciback/pci_stub.c32
-rw-r--r--drivers/xen/xen-pciback/pciback.h2
-rw-r--r--drivers/xen/xenbus/xenbus.h2
-rw-r--r--drivers/xen/xenbus/xenbus_client.c4
-rw-r--r--drivers/xen/xenbus/xenbus_comms.c9
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c2
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c22
-rw-r--r--drivers/xen/xenbus/xenbus_probe_frontend.c1
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c58
-rw-r--r--drivers/xen/xenfs/xensyms.c4
-rw-r--r--drivers/zorro/names.c12
-rw-r--r--drivers/zorro/zorro-sysfs.c6
18698 files changed, 1695069 insertions, 801467 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 7bdad836fc62..4915a63866b0 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -10,6 +10,12 @@ source "drivers/cxl/Kconfig"
source "drivers/pcmcia/Kconfig"
source "drivers/rapidio/Kconfig"
+config PC104
+ bool "PC/104 support" if EXPERT
+ help
+ Expose PC/104 form factor device drivers and options available for
+ selection and configuration. Enable this option if your target
+ machine has a PC/104 bus.
source "drivers/base/Kconfig"
@@ -21,6 +27,8 @@ source "drivers/connector/Kconfig"
source "drivers/firmware/Kconfig"
+source "drivers/fwctl/Kconfig"
+
source "drivers/gnss/Kconfig"
source "drivers/mtd/Kconfig"
@@ -75,6 +83,8 @@ source "drivers/pps/Kconfig"
source "drivers/ptp/Kconfig"
+source "drivers/dpll/Kconfig"
+
source "drivers/pinctrl/Kconfig"
source "drivers/gpio/Kconfig"
@@ -207,8 +217,6 @@ source "drivers/thunderbolt/Kconfig"
source "drivers/android/Kconfig"
-source "drivers/gpu/trace/Kconfig"
-
source "drivers/nvdimm/Kconfig"
source "drivers/dax/Kconfig"
@@ -243,6 +251,4 @@ source "drivers/hte/Kconfig"
source "drivers/cdx/Kconfig"
-source "drivers/dpll/Kconfig"
-
endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index 45d1c3e630f7..8e1ffa4358d5 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -135,6 +135,7 @@ obj-y += ufs/
obj-$(CONFIG_MEMSTICK) += memstick/
obj-$(CONFIG_INFINIBAND) += infiniband/
obj-y += firmware/
+obj-$(CONFIG_FWCTL) += fwctl/
obj-$(CONFIG_CRYPTO) += crypto/
obj-$(CONFIG_SUPERH) += sh/
obj-y += clocksource/
@@ -160,7 +161,7 @@ obj-$(CONFIG_SOUNDWIRE) += soundwire/
# Virtualization drivers
obj-$(CONFIG_VIRT_DRIVERS) += virt/
-obj-$(subst m,y,$(CONFIG_HYPERV)) += hv/
+obj-$(CONFIG_HYPERV) += hv/
obj-$(CONFIG_PM_DEVFREQ) += devfreq/
obj-$(CONFIG_EXTCON) += extcon/
@@ -194,4 +195,5 @@ obj-$(CONFIG_DRM_ACCEL) += accel/
obj-$(CONFIG_CDX_BUS) += cdx/
obj-$(CONFIG_DPLL) += dpll/
+obj-$(CONFIG_DIBS) += dibs/
obj-$(CONFIG_S390) += s390/
diff --git a/drivers/accel/Kconfig b/drivers/accel/Kconfig
index 64065fb8922b..bb01cebc42bf 100644
--- a/drivers/accel/Kconfig
+++ b/drivers/accel/Kconfig
@@ -24,8 +24,10 @@ menuconfig DRM_ACCEL
different device files, called accel/accel* (in /dev, sysfs
and debugfs).
+source "drivers/accel/amdxdna/Kconfig"
source "drivers/accel/habanalabs/Kconfig"
source "drivers/accel/ivpu/Kconfig"
source "drivers/accel/qaic/Kconfig"
+source "drivers/accel/rocket/Kconfig"
endif
diff --git a/drivers/accel/Makefile b/drivers/accel/Makefile
index ab3df932937f..ffc3fa588666 100644
--- a/drivers/accel/Makefile
+++ b/drivers/accel/Makefile
@@ -1,5 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_DRM_ACCEL_AMDXDNA) += amdxdna/
obj-$(CONFIG_DRM_ACCEL_HABANALABS) += habanalabs/
obj-$(CONFIG_DRM_ACCEL_IVPU) += ivpu/
obj-$(CONFIG_DRM_ACCEL_QAIC) += qaic/
+obj-$(CONFIG_DRM_ACCEL_ROCKET) += rocket/ \ No newline at end of file
diff --git a/drivers/accel/amdxdna/Kconfig b/drivers/accel/amdxdna/Kconfig
new file mode 100644
index 000000000000..f39d7a87296c
--- /dev/null
+++ b/drivers/accel/amdxdna/Kconfig
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config DRM_ACCEL_AMDXDNA
+ tristate "AMD AI Engine"
+ depends on AMD_IOMMU
+ depends on DRM_ACCEL
+ depends on PCI && HAS_IOMEM
+ depends on X86_64
+ select DRM_SCHED
+ select DRM_GEM_SHMEM_HELPER
+ select FW_LOADER
+ select HMM_MIRROR
+ help
+ Choose this option to enable support for NPU integrated into AMD
+ client CPUs like AMD Ryzen AI 300 Series. AMD NPU can be used to
+ accelerate machine learning applications.
+
+ If "M" is selected, the driver module will be amdxdna.
diff --git a/drivers/accel/amdxdna/Makefile b/drivers/accel/amdxdna/Makefile
new file mode 100644
index 000000000000..6797dac65efa
--- /dev/null
+++ b/drivers/accel/amdxdna/Makefile
@@ -0,0 +1,24 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+amdxdna-y := \
+ aie2_ctx.o \
+ aie2_error.o \
+ aie2_message.o \
+ aie2_pci.o \
+ aie2_pm.o \
+ aie2_psp.o \
+ aie2_smu.o \
+ aie2_solver.o \
+ amdxdna_ctx.o \
+ amdxdna_gem.o \
+ amdxdna_mailbox.o \
+ amdxdna_mailbox_helper.o \
+ amdxdna_pci_drv.o \
+ amdxdna_sysfs.o \
+ amdxdna_ubuf.o \
+ npu1_regs.o \
+ npu2_regs.o \
+ npu4_regs.o \
+ npu5_regs.o \
+ npu6_regs.o
+obj-$(CONFIG_DRM_ACCEL_AMDXDNA) = amdxdna.o
diff --git a/drivers/accel/amdxdna/TODO b/drivers/accel/amdxdna/TODO
new file mode 100644
index 000000000000..ad8ac6e315b6
--- /dev/null
+++ b/drivers/accel/amdxdna/TODO
@@ -0,0 +1,2 @@
+- Add debugfs support
+- Add debug BO support
diff --git a/drivers/accel/amdxdna/aie2_ctx.c b/drivers/accel/amdxdna/aie2_ctx.c
new file mode 100644
index 000000000000..e9f9b1fa5dc1
--- /dev/null
+++ b/drivers/accel/amdxdna/aie2_ctx.c
@@ -0,0 +1,944 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_device.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_print.h>
+#include <drm/drm_syncobj.h>
+#include <linux/hmm.h>
+#include <linux/types.h>
+#include <linux/xarray.h>
+#include <trace/events/amdxdna.h>
+
+#include "aie2_msg_priv.h"
+#include "aie2_pci.h"
+#include "aie2_solver.h"
+#include "amdxdna_ctx.h"
+#include "amdxdna_gem.h"
+#include "amdxdna_mailbox.h"
+#include "amdxdna_pci_drv.h"
+
+static bool force_cmdlist;
+module_param(force_cmdlist, bool, 0600);
+MODULE_PARM_DESC(force_cmdlist, "Force use command list (Default false)");
+
+#define HWCTX_MAX_TIMEOUT 60000 /* milliseconds */
+
+static void aie2_job_release(struct kref *ref)
+{
+ struct amdxdna_sched_job *job;
+
+ job = container_of(ref, struct amdxdna_sched_job, refcnt);
+ amdxdna_sched_job_cleanup(job);
+ atomic64_inc(&job->hwctx->job_free_cnt);
+ wake_up(&job->hwctx->priv->job_free_wq);
+ if (job->out_fence)
+ dma_fence_put(job->out_fence);
+ kfree(job);
+}
+
+static void aie2_job_put(struct amdxdna_sched_job *job)
+{
+ kref_put(&job->refcnt, aie2_job_release);
+}
+
+static void aie2_hwctx_status_shift_stop(struct amdxdna_hwctx *hwctx)
+{
+ hwctx->old_status = hwctx->status;
+ hwctx->status = HWCTX_STAT_STOP;
+}
+
+static void aie2_hwctx_status_restore(struct amdxdna_hwctx *hwctx)
+{
+ hwctx->status = hwctx->old_status;
+}
+
+/* The bad_job is used in aie2_sched_job_timedout, otherwise, set it to NULL */
+static void aie2_hwctx_stop(struct amdxdna_dev *xdna, struct amdxdna_hwctx *hwctx,
+ struct drm_sched_job *bad_job)
+{
+ drm_sched_stop(&hwctx->priv->sched, bad_job);
+ aie2_destroy_context(xdna->dev_handle, hwctx);
+}
+
+static int aie2_hwctx_restart(struct amdxdna_dev *xdna, struct amdxdna_hwctx *hwctx)
+{
+ struct amdxdna_gem_obj *heap = hwctx->priv->heap;
+ int ret;
+
+ ret = aie2_create_context(xdna->dev_handle, hwctx);
+ if (ret) {
+ XDNA_ERR(xdna, "Create hwctx failed, ret %d", ret);
+ goto out;
+ }
+
+ ret = aie2_map_host_buf(xdna->dev_handle, hwctx->fw_ctx_id,
+ heap->mem.userptr, heap->mem.size);
+ if (ret) {
+ XDNA_ERR(xdna, "Map host buf failed, ret %d", ret);
+ goto out;
+ }
+
+ if (hwctx->status != HWCTX_STAT_READY) {
+ XDNA_DBG(xdna, "hwctx is not ready, status %d", hwctx->status);
+ goto out;
+ }
+
+ ret = aie2_config_cu(hwctx);
+ if (ret) {
+ XDNA_ERR(xdna, "Config cu failed, ret %d", ret);
+ goto out;
+ }
+
+out:
+ drm_sched_start(&hwctx->priv->sched, 0);
+ XDNA_DBG(xdna, "%s restarted, ret %d", hwctx->name, ret);
+ return ret;
+}
+
+static struct dma_fence *aie2_cmd_get_out_fence(struct amdxdna_hwctx *hwctx, u64 seq)
+{
+ struct dma_fence *fence, *out_fence = NULL;
+ int ret;
+
+ fence = drm_syncobj_fence_get(hwctx->priv->syncobj);
+ if (!fence)
+ return NULL;
+
+ ret = dma_fence_chain_find_seqno(&fence, seq);
+ if (ret)
+ goto out;
+
+ out_fence = dma_fence_get(dma_fence_chain_contained(fence));
+
+out:
+ dma_fence_put(fence);
+ return out_fence;
+}
+
+static void aie2_hwctx_wait_for_idle(struct amdxdna_hwctx *hwctx)
+{
+ struct dma_fence *fence;
+
+ fence = aie2_cmd_get_out_fence(hwctx, hwctx->priv->seq - 1);
+ if (!fence)
+ return;
+
+ /* Wait up to 2 seconds for fw to finish all pending requests */
+ dma_fence_wait_timeout(fence, false, msecs_to_jiffies(2000));
+ dma_fence_put(fence);
+}
+
+static int aie2_hwctx_suspend_cb(struct amdxdna_hwctx *hwctx, void *arg)
+{
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+
+ aie2_hwctx_wait_for_idle(hwctx);
+ aie2_hwctx_stop(xdna, hwctx, NULL);
+ aie2_hwctx_status_shift_stop(hwctx);
+
+ return 0;
+}
+
+void aie2_hwctx_suspend(struct amdxdna_client *client)
+{
+ struct amdxdna_dev *xdna = client->xdna;
+
+ /*
+ * Command timeout is unlikely. But if it happens, it doesn't
+ * break the system. aie2_hwctx_stop() will destroy mailbox
+ * and abort all commands.
+ */
+ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
+ amdxdna_hwctx_walk(client, NULL, aie2_hwctx_suspend_cb);
+}
+
+static int aie2_hwctx_resume_cb(struct amdxdna_hwctx *hwctx, void *arg)
+{
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+
+ aie2_hwctx_status_restore(hwctx);
+ return aie2_hwctx_restart(xdna, hwctx);
+}
+
+int aie2_hwctx_resume(struct amdxdna_client *client)
+{
+ struct amdxdna_dev *xdna = client->xdna;
+
+ /*
+ * The resume path cannot guarantee that mailbox channel can be
+ * regenerated. If this happen, when submit message to this
+ * mailbox channel, error will return.
+ */
+ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
+ return amdxdna_hwctx_walk(client, NULL, aie2_hwctx_resume_cb);
+}
+
+static void
+aie2_sched_notify(struct amdxdna_sched_job *job)
+{
+ struct dma_fence *fence = job->fence;
+
+ trace_xdna_job(&job->base, job->hwctx->name, "signaled fence", job->seq);
+ job->hwctx->priv->completed++;
+ dma_fence_signal(fence);
+
+ up(&job->hwctx->priv->job_sem);
+ job->job_done = true;
+ dma_fence_put(fence);
+ mmput_async(job->mm);
+ aie2_job_put(job);
+}
+
+static int
+aie2_sched_resp_handler(void *handle, void __iomem *data, size_t size)
+{
+ struct amdxdna_sched_job *job = handle;
+ struct amdxdna_gem_obj *cmd_abo;
+ int ret = 0;
+ u32 status;
+
+ cmd_abo = job->cmd_bo;
+
+ if (unlikely(!data))
+ goto out;
+
+ if (unlikely(size != sizeof(u32))) {
+ amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_ABORT);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ status = readl(data);
+ XDNA_DBG(job->hwctx->client->xdna, "Resp status 0x%x", status);
+ if (status == AIE2_STATUS_SUCCESS)
+ amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_COMPLETED);
+ else
+ amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_ERROR);
+
+out:
+ aie2_sched_notify(job);
+ return ret;
+}
+
+static int
+aie2_sched_nocmd_resp_handler(void *handle, void __iomem *data, size_t size)
+{
+ struct amdxdna_sched_job *job = handle;
+ int ret = 0;
+ u32 status;
+
+ if (unlikely(!data))
+ goto out;
+
+ if (unlikely(size != sizeof(u32))) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ status = readl(data);
+ XDNA_DBG(job->hwctx->client->xdna, "Resp status 0x%x", status);
+
+out:
+ aie2_sched_notify(job);
+ return ret;
+}
+
+static int
+aie2_sched_cmdlist_resp_handler(void *handle, void __iomem *data, size_t size)
+{
+ struct amdxdna_sched_job *job = handle;
+ struct amdxdna_gem_obj *cmd_abo;
+ struct amdxdna_dev *xdna;
+ u32 fail_cmd_status;
+ u32 fail_cmd_idx;
+ u32 cmd_status;
+ int ret = 0;
+
+ cmd_abo = job->cmd_bo;
+ if (unlikely(!data) || unlikely(size != sizeof(u32) * 3)) {
+ amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_ABORT);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ cmd_status = readl(data + offsetof(struct cmd_chain_resp, status));
+ xdna = job->hwctx->client->xdna;
+ XDNA_DBG(xdna, "Status 0x%x", cmd_status);
+ if (cmd_status == AIE2_STATUS_SUCCESS) {
+ amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_COMPLETED);
+ goto out;
+ }
+
+ /* Slow path to handle error, read from ringbuf on BAR */
+ fail_cmd_idx = readl(data + offsetof(struct cmd_chain_resp, fail_cmd_idx));
+ fail_cmd_status = readl(data + offsetof(struct cmd_chain_resp, fail_cmd_status));
+ XDNA_DBG(xdna, "Failed cmd idx %d, status 0x%x",
+ fail_cmd_idx, fail_cmd_status);
+
+ if (fail_cmd_status == AIE2_STATUS_SUCCESS) {
+ amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_ABORT);
+ ret = -EINVAL;
+ goto out;
+ }
+ amdxdna_cmd_set_state(cmd_abo, fail_cmd_status);
+
+ if (amdxdna_cmd_get_op(cmd_abo) == ERT_CMD_CHAIN) {
+ struct amdxdna_cmd_chain *cc = amdxdna_cmd_get_payload(cmd_abo, NULL);
+
+ cc->error_index = fail_cmd_idx;
+ if (cc->error_index >= cc->command_count)
+ cc->error_index = 0;
+ }
+out:
+ aie2_sched_notify(job);
+ return ret;
+}
+
+static struct dma_fence *
+aie2_sched_job_run(struct drm_sched_job *sched_job)
+{
+ struct amdxdna_sched_job *job = drm_job_to_xdna_job(sched_job);
+ struct amdxdna_gem_obj *cmd_abo = job->cmd_bo;
+ struct amdxdna_hwctx *hwctx = job->hwctx;
+ struct dma_fence *fence;
+ int ret;
+
+ if (!mmget_not_zero(job->mm))
+ return ERR_PTR(-ESRCH);
+
+ kref_get(&job->refcnt);
+ fence = dma_fence_get(job->fence);
+
+ if (unlikely(!cmd_abo)) {
+ ret = aie2_sync_bo(hwctx, job, aie2_sched_nocmd_resp_handler);
+ goto out;
+ }
+
+ amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_NEW);
+
+ if (amdxdna_cmd_get_op(cmd_abo) == ERT_CMD_CHAIN)
+ ret = aie2_cmdlist_multi_execbuf(hwctx, job, aie2_sched_cmdlist_resp_handler);
+ else if (force_cmdlist)
+ ret = aie2_cmdlist_single_execbuf(hwctx, job, aie2_sched_cmdlist_resp_handler);
+ else
+ ret = aie2_execbuf(hwctx, job, aie2_sched_resp_handler);
+
+out:
+ if (ret) {
+ dma_fence_put(job->fence);
+ aie2_job_put(job);
+ mmput(job->mm);
+ fence = ERR_PTR(ret);
+ }
+ trace_xdna_job(sched_job, hwctx->name, "sent to device", job->seq);
+
+ return fence;
+}
+
+static void aie2_sched_job_free(struct drm_sched_job *sched_job)
+{
+ struct amdxdna_sched_job *job = drm_job_to_xdna_job(sched_job);
+ struct amdxdna_hwctx *hwctx = job->hwctx;
+
+ trace_xdna_job(sched_job, hwctx->name, "job free", job->seq);
+ if (!job->job_done)
+ up(&hwctx->priv->job_sem);
+
+ drm_sched_job_cleanup(sched_job);
+ aie2_job_put(job);
+}
+
+static enum drm_gpu_sched_stat
+aie2_sched_job_timedout(struct drm_sched_job *sched_job)
+{
+ struct amdxdna_sched_job *job = drm_job_to_xdna_job(sched_job);
+ struct amdxdna_hwctx *hwctx = job->hwctx;
+ struct amdxdna_dev *xdna;
+
+ xdna = hwctx->client->xdna;
+ trace_xdna_job(sched_job, hwctx->name, "job timedout", job->seq);
+ mutex_lock(&xdna->dev_lock);
+ aie2_hwctx_stop(xdna, hwctx, sched_job);
+
+ aie2_hwctx_restart(xdna, hwctx);
+ mutex_unlock(&xdna->dev_lock);
+
+ return DRM_GPU_SCHED_STAT_RESET;
+}
+
+static const struct drm_sched_backend_ops sched_ops = {
+ .run_job = aie2_sched_job_run,
+ .free_job = aie2_sched_job_free,
+ .timedout_job = aie2_sched_job_timedout,
+};
+
+static int aie2_hwctx_col_list(struct amdxdna_hwctx *hwctx)
+{
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+ struct amdxdna_dev_hdl *ndev;
+ int start, end, first, last;
+ u32 width = 1, entries = 0;
+ int i;
+
+ if (!hwctx->num_tiles) {
+ XDNA_ERR(xdna, "Number of tiles is zero");
+ return -EINVAL;
+ }
+
+ ndev = xdna->dev_handle;
+ if (unlikely(!ndev->metadata.core.row_count)) {
+ XDNA_WARN(xdna, "Core tile row count is zero");
+ return -EINVAL;
+ }
+
+ hwctx->num_col = hwctx->num_tiles / ndev->metadata.core.row_count;
+ if (!hwctx->num_col || hwctx->num_col > ndev->total_col) {
+ XDNA_ERR(xdna, "Invalid num_col %d", hwctx->num_col);
+ return -EINVAL;
+ }
+
+ if (ndev->priv->col_align == COL_ALIGN_NATURE)
+ width = hwctx->num_col;
+
+ /*
+ * In range [start, end], find out columns that is multiple of width.
+ * 'first' is the first column,
+ * 'last' is the last column,
+ * 'entries' is the total number of columns.
+ */
+ start = xdna->dev_info->first_col;
+ end = ndev->total_col - hwctx->num_col;
+ if (start > 0 && end == 0) {
+ XDNA_DBG(xdna, "Force start from col 0");
+ start = 0;
+ }
+ first = start + (width - start % width) % width;
+ last = end - end % width;
+ if (last >= first)
+ entries = (last - first) / width + 1;
+ XDNA_DBG(xdna, "start %d end %d first %d last %d",
+ start, end, first, last);
+
+ if (unlikely(!entries)) {
+ XDNA_ERR(xdna, "Start %d end %d width %d",
+ start, end, width);
+ return -EINVAL;
+ }
+
+ hwctx->col_list = kmalloc_array(entries, sizeof(*hwctx->col_list), GFP_KERNEL);
+ if (!hwctx->col_list)
+ return -ENOMEM;
+
+ hwctx->col_list_len = entries;
+ hwctx->col_list[0] = first;
+ for (i = 1; i < entries; i++)
+ hwctx->col_list[i] = hwctx->col_list[i - 1] + width;
+
+ print_hex_dump_debug("col_list: ", DUMP_PREFIX_OFFSET, 16, 4, hwctx->col_list,
+ entries * sizeof(*hwctx->col_list), false);
+ return 0;
+}
+
+static int aie2_alloc_resource(struct amdxdna_hwctx *hwctx)
+{
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+ struct alloc_requests *xrs_req;
+ int ret;
+
+ xrs_req = kzalloc(sizeof(*xrs_req), GFP_KERNEL);
+ if (!xrs_req)
+ return -ENOMEM;
+
+ xrs_req->cdo.start_cols = hwctx->col_list;
+ xrs_req->cdo.cols_len = hwctx->col_list_len;
+ xrs_req->cdo.ncols = hwctx->num_col;
+ xrs_req->cdo.qos_cap.opc = hwctx->max_opc;
+
+ xrs_req->rqos.gops = hwctx->qos.gops;
+ xrs_req->rqos.fps = hwctx->qos.fps;
+ xrs_req->rqos.dma_bw = hwctx->qos.dma_bandwidth;
+ xrs_req->rqos.latency = hwctx->qos.latency;
+ xrs_req->rqos.exec_time = hwctx->qos.frame_exec_time;
+ xrs_req->rqos.priority = hwctx->qos.priority;
+
+ xrs_req->rid = (uintptr_t)hwctx;
+
+ ret = xrs_allocate_resource(xdna->xrs_hdl, xrs_req, hwctx);
+ if (ret)
+ XDNA_ERR(xdna, "Allocate AIE resource failed, ret %d", ret);
+
+ kfree(xrs_req);
+ return ret;
+}
+
+static void aie2_release_resource(struct amdxdna_hwctx *hwctx)
+{
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+ int ret;
+
+ ret = xrs_release_resource(xdna->xrs_hdl, (uintptr_t)hwctx);
+ if (ret)
+ XDNA_ERR(xdna, "Release AIE resource failed, ret %d", ret);
+}
+
+static int aie2_ctx_syncobj_create(struct amdxdna_hwctx *hwctx)
+{
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+ struct drm_file *filp = hwctx->client->filp;
+ struct drm_syncobj *syncobj;
+ u32 hdl;
+ int ret;
+
+ hwctx->syncobj_hdl = AMDXDNA_INVALID_FENCE_HANDLE;
+
+ ret = drm_syncobj_create(&syncobj, 0, NULL);
+ if (ret) {
+ XDNA_ERR(xdna, "Create ctx syncobj failed, ret %d", ret);
+ return ret;
+ }
+ ret = drm_syncobj_get_handle(filp, syncobj, &hdl);
+ if (ret) {
+ drm_syncobj_put(syncobj);
+ XDNA_ERR(xdna, "Create ctx syncobj handle failed, ret %d", ret);
+ return ret;
+ }
+ hwctx->priv->syncobj = syncobj;
+ hwctx->syncobj_hdl = hdl;
+
+ return 0;
+}
+
+static void aie2_ctx_syncobj_destroy(struct amdxdna_hwctx *hwctx)
+{
+ /*
+ * The syncobj_hdl is owned by user space and will be cleaned up
+ * separately.
+ */
+ drm_syncobj_put(hwctx->priv->syncobj);
+}
+
+int aie2_hwctx_init(struct amdxdna_hwctx *hwctx)
+{
+ struct amdxdna_client *client = hwctx->client;
+ struct amdxdna_dev *xdna = client->xdna;
+ const struct drm_sched_init_args args = {
+ .ops = &sched_ops,
+ .num_rqs = DRM_SCHED_PRIORITY_COUNT,
+ .credit_limit = HWCTX_MAX_CMDS,
+ .timeout = msecs_to_jiffies(HWCTX_MAX_TIMEOUT),
+ .name = hwctx->name,
+ .dev = xdna->ddev.dev,
+ };
+ struct drm_gpu_scheduler *sched;
+ struct amdxdna_hwctx_priv *priv;
+ struct amdxdna_gem_obj *heap;
+ struct amdxdna_dev_hdl *ndev;
+ int i, ret;
+
+ priv = kzalloc(sizeof(*hwctx->priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ hwctx->priv = priv;
+
+ mutex_lock(&client->mm_lock);
+ heap = client->dev_heap;
+ if (!heap) {
+ XDNA_ERR(xdna, "The client dev heap object not exist");
+ mutex_unlock(&client->mm_lock);
+ ret = -ENOENT;
+ goto free_priv;
+ }
+ drm_gem_object_get(to_gobj(heap));
+ mutex_unlock(&client->mm_lock);
+ priv->heap = heap;
+ sema_init(&priv->job_sem, HWCTX_MAX_CMDS);
+
+ ret = amdxdna_gem_pin(heap);
+ if (ret) {
+ XDNA_ERR(xdna, "Dev heap pin failed, ret %d", ret);
+ goto put_heap;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(priv->cmd_buf); i++) {
+ struct amdxdna_gem_obj *abo;
+ struct amdxdna_drm_create_bo args = {
+ .flags = 0,
+ .type = AMDXDNA_BO_DEV,
+ .vaddr = 0,
+ .size = MAX_CHAIN_CMDBUF_SIZE,
+ };
+
+ abo = amdxdna_drm_alloc_dev_bo(&xdna->ddev, &args, client->filp);
+ if (IS_ERR(abo)) {
+ ret = PTR_ERR(abo);
+ goto free_cmd_bufs;
+ }
+
+ XDNA_DBG(xdna, "Command buf %d addr 0x%llx size 0x%lx",
+ i, abo->mem.dev_addr, abo->mem.size);
+ priv->cmd_buf[i] = abo;
+ }
+
+ sched = &priv->sched;
+ mutex_init(&priv->io_lock);
+
+ fs_reclaim_acquire(GFP_KERNEL);
+ might_lock(&priv->io_lock);
+ fs_reclaim_release(GFP_KERNEL);
+
+ ret = drm_sched_init(sched, &args);
+ if (ret) {
+ XDNA_ERR(xdna, "Failed to init DRM scheduler. ret %d", ret);
+ goto free_cmd_bufs;
+ }
+
+ ret = drm_sched_entity_init(&priv->entity, DRM_SCHED_PRIORITY_NORMAL,
+ &sched, 1, NULL);
+ if (ret) {
+ XDNA_ERR(xdna, "Failed to initial sched entiry. ret %d", ret);
+ goto free_sched;
+ }
+
+ ret = aie2_hwctx_col_list(hwctx);
+ if (ret) {
+ XDNA_ERR(xdna, "Create col list failed, ret %d", ret);
+ goto free_entity;
+ }
+
+ ret = aie2_alloc_resource(hwctx);
+ if (ret) {
+ XDNA_ERR(xdna, "Alloc hw resource failed, ret %d", ret);
+ goto free_col_list;
+ }
+
+ ret = aie2_map_host_buf(xdna->dev_handle, hwctx->fw_ctx_id,
+ heap->mem.userptr, heap->mem.size);
+ if (ret) {
+ XDNA_ERR(xdna, "Map host buffer failed, ret %d", ret);
+ goto release_resource;
+ }
+
+ ret = aie2_ctx_syncobj_create(hwctx);
+ if (ret) {
+ XDNA_ERR(xdna, "Create syncobj failed, ret %d", ret);
+ goto release_resource;
+ }
+
+ hwctx->status = HWCTX_STAT_INIT;
+ ndev = xdna->dev_handle;
+ ndev->hwctx_num++;
+ init_waitqueue_head(&priv->job_free_wq);
+
+ XDNA_DBG(xdna, "hwctx %s init completed", hwctx->name);
+
+ return 0;
+
+release_resource:
+ aie2_release_resource(hwctx);
+free_col_list:
+ kfree(hwctx->col_list);
+free_entity:
+ drm_sched_entity_destroy(&priv->entity);
+free_sched:
+ drm_sched_fini(&priv->sched);
+free_cmd_bufs:
+ for (i = 0; i < ARRAY_SIZE(priv->cmd_buf); i++) {
+ if (!priv->cmd_buf[i])
+ continue;
+ drm_gem_object_put(to_gobj(priv->cmd_buf[i]));
+ }
+ amdxdna_gem_unpin(heap);
+put_heap:
+ drm_gem_object_put(to_gobj(heap));
+free_priv:
+ kfree(priv);
+ return ret;
+}
+
+void aie2_hwctx_fini(struct amdxdna_hwctx *hwctx)
+{
+ struct amdxdna_dev_hdl *ndev;
+ struct amdxdna_dev *xdna;
+ int idx;
+
+ xdna = hwctx->client->xdna;
+ ndev = xdna->dev_handle;
+ ndev->hwctx_num--;
+
+ XDNA_DBG(xdna, "%s sequence number %lld", hwctx->name, hwctx->priv->seq);
+ drm_sched_entity_destroy(&hwctx->priv->entity);
+
+ aie2_hwctx_wait_for_idle(hwctx);
+
+ /* Request fw to destroy hwctx and cancel the rest pending requests */
+ aie2_release_resource(hwctx);
+
+ /* Wait for all submitted jobs to be completed or canceled */
+ wait_event(hwctx->priv->job_free_wq,
+ atomic64_read(&hwctx->job_submit_cnt) ==
+ atomic64_read(&hwctx->job_free_cnt));
+
+ drm_sched_fini(&hwctx->priv->sched);
+ aie2_ctx_syncobj_destroy(hwctx);
+
+ for (idx = 0; idx < ARRAY_SIZE(hwctx->priv->cmd_buf); idx++)
+ drm_gem_object_put(to_gobj(hwctx->priv->cmd_buf[idx]));
+ amdxdna_gem_unpin(hwctx->priv->heap);
+ drm_gem_object_put(to_gobj(hwctx->priv->heap));
+
+ mutex_destroy(&hwctx->priv->io_lock);
+ kfree(hwctx->col_list);
+ kfree(hwctx->priv);
+ kfree(hwctx->cus);
+}
+
+static int aie2_hwctx_cu_config(struct amdxdna_hwctx *hwctx, void *buf, u32 size)
+{
+ struct amdxdna_hwctx_param_config_cu *config = buf;
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+ u32 total_size;
+ int ret;
+
+ XDNA_DBG(xdna, "Config %d CU to %s", config->num_cus, hwctx->name);
+ if (XDNA_MBZ_DBG(xdna, config->pad, sizeof(config->pad)))
+ return -EINVAL;
+
+ if (hwctx->status != HWCTX_STAT_INIT) {
+ XDNA_ERR(xdna, "Not support re-config CU");
+ return -EINVAL;
+ }
+
+ if (!config->num_cus) {
+ XDNA_ERR(xdna, "Number of CU is zero");
+ return -EINVAL;
+ }
+
+ total_size = struct_size(config, cu_configs, config->num_cus);
+ if (total_size > size) {
+ XDNA_ERR(xdna, "CU config larger than size");
+ return -EINVAL;
+ }
+
+ hwctx->cus = kmemdup(config, total_size, GFP_KERNEL);
+ if (!hwctx->cus)
+ return -ENOMEM;
+
+ ret = aie2_config_cu(hwctx);
+ if (ret) {
+ XDNA_ERR(xdna, "Config CU to firmware failed, ret %d", ret);
+ goto free_cus;
+ }
+
+ wmb(); /* To avoid locking in command submit when check status */
+ hwctx->status = HWCTX_STAT_READY;
+
+ return 0;
+
+free_cus:
+ kfree(hwctx->cus);
+ hwctx->cus = NULL;
+ return ret;
+}
+
+int aie2_hwctx_config(struct amdxdna_hwctx *hwctx, u32 type, u64 value, void *buf, u32 size)
+{
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+
+ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
+ switch (type) {
+ case DRM_AMDXDNA_HWCTX_CONFIG_CU:
+ return aie2_hwctx_cu_config(hwctx, buf, size);
+ case DRM_AMDXDNA_HWCTX_ASSIGN_DBG_BUF:
+ case DRM_AMDXDNA_HWCTX_REMOVE_DBG_BUF:
+ return -EOPNOTSUPP;
+ default:
+ XDNA_DBG(xdna, "Not supported type %d", type);
+ return -EOPNOTSUPP;
+ }
+}
+
+static int aie2_populate_range(struct amdxdna_gem_obj *abo)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
+ struct amdxdna_umap *mapp;
+ unsigned long timeout;
+ struct mm_struct *mm;
+ bool found;
+ int ret;
+
+ timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
+again:
+ found = false;
+ down_write(&xdna->notifier_lock);
+ list_for_each_entry(mapp, &abo->mem.umap_list, node) {
+ if (mapp->invalid) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ abo->mem.map_invalid = false;
+ up_write(&xdna->notifier_lock);
+ return 0;
+ }
+ kref_get(&mapp->refcnt);
+ up_write(&xdna->notifier_lock);
+
+ XDNA_DBG(xdna, "populate memory range %lx %lx",
+ mapp->vma->vm_start, mapp->vma->vm_end);
+ mm = mapp->notifier.mm;
+ if (!mmget_not_zero(mm)) {
+ amdxdna_umap_put(mapp);
+ return -EFAULT;
+ }
+
+ mapp->range.notifier_seq = mmu_interval_read_begin(&mapp->notifier);
+ mmap_read_lock(mm);
+ ret = hmm_range_fault(&mapp->range);
+ mmap_read_unlock(mm);
+ if (ret) {
+ if (time_after(jiffies, timeout)) {
+ ret = -ETIME;
+ goto put_mm;
+ }
+
+ if (ret == -EBUSY) {
+ amdxdna_umap_put(mapp);
+ goto again;
+ }
+
+ goto put_mm;
+ }
+
+ down_write(&xdna->notifier_lock);
+ if (mmu_interval_read_retry(&mapp->notifier, mapp->range.notifier_seq)) {
+ up_write(&xdna->notifier_lock);
+ amdxdna_umap_put(mapp);
+ goto again;
+ }
+ mapp->invalid = false;
+ up_write(&xdna->notifier_lock);
+ amdxdna_umap_put(mapp);
+ goto again;
+
+put_mm:
+ amdxdna_umap_put(mapp);
+ mmput(mm);
+ return ret;
+}
+
+int aie2_cmd_submit(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, u64 *seq)
+{
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+ struct ww_acquire_ctx acquire_ctx;
+ struct dma_fence_chain *chain;
+ struct amdxdna_gem_obj *abo;
+ unsigned long timeout = 0;
+ int ret, i;
+
+ ret = down_interruptible(&hwctx->priv->job_sem);
+ if (ret) {
+ XDNA_ERR(xdna, "Grab job sem failed, ret %d", ret);
+ return ret;
+ }
+
+ chain = dma_fence_chain_alloc();
+ if (!chain) {
+ XDNA_ERR(xdna, "Alloc fence chain failed");
+ ret = -ENOMEM;
+ goto up_sem;
+ }
+
+ ret = drm_sched_job_init(&job->base, &hwctx->priv->entity, 1, hwctx,
+ hwctx->client->filp->client_id);
+ if (ret) {
+ XDNA_ERR(xdna, "DRM job init failed, ret %d", ret);
+ goto free_chain;
+ }
+
+retry:
+ ret = drm_gem_lock_reservations(job->bos, job->bo_cnt, &acquire_ctx);
+ if (ret) {
+ XDNA_WARN(xdna, "Failed to lock BOs, ret %d", ret);
+ goto cleanup_job;
+ }
+
+ for (i = 0; i < job->bo_cnt; i++) {
+ ret = dma_resv_reserve_fences(job->bos[i]->resv, 1);
+ if (ret) {
+ XDNA_WARN(xdna, "Failed to reserve fences %d", ret);
+ drm_gem_unlock_reservations(job->bos, job->bo_cnt, &acquire_ctx);
+ goto cleanup_job;
+ }
+ }
+
+ down_read(&xdna->notifier_lock);
+ for (i = 0; i < job->bo_cnt; i++) {
+ abo = to_xdna_obj(job->bos[i]);
+ if (abo->mem.map_invalid) {
+ up_read(&xdna->notifier_lock);
+ drm_gem_unlock_reservations(job->bos, job->bo_cnt, &acquire_ctx);
+ if (!timeout) {
+ timeout = jiffies +
+ msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
+ } else if (time_after(jiffies, timeout)) {
+ ret = -ETIME;
+ goto cleanup_job;
+ }
+
+ ret = aie2_populate_range(abo);
+ if (ret)
+ goto cleanup_job;
+ goto retry;
+ }
+ }
+
+ mutex_lock(&hwctx->priv->io_lock);
+ drm_sched_job_arm(&job->base);
+ job->out_fence = dma_fence_get(&job->base.s_fence->finished);
+ for (i = 0; i < job->bo_cnt; i++)
+ dma_resv_add_fence(job->bos[i]->resv, job->out_fence, DMA_RESV_USAGE_WRITE);
+ job->seq = hwctx->priv->seq++;
+ kref_get(&job->refcnt);
+ drm_sched_entity_push_job(&job->base);
+
+ *seq = job->seq;
+ drm_syncobj_add_point(hwctx->priv->syncobj, chain, job->out_fence, *seq);
+ mutex_unlock(&hwctx->priv->io_lock);
+
+ up_read(&xdna->notifier_lock);
+ drm_gem_unlock_reservations(job->bos, job->bo_cnt, &acquire_ctx);
+
+ aie2_job_put(job);
+ atomic64_inc(&hwctx->job_submit_cnt);
+
+ return 0;
+
+cleanup_job:
+ drm_sched_job_cleanup(&job->base);
+free_chain:
+ dma_fence_chain_free(chain);
+up_sem:
+ up(&hwctx->priv->job_sem);
+ job->job_done = true;
+ return ret;
+}
+
+void aie2_hmm_invalidate(struct amdxdna_gem_obj *abo,
+ unsigned long cur_seq)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
+ struct drm_gem_object *gobj = to_gobj(abo);
+ long ret;
+
+ ret = dma_resv_wait_timeout(gobj->resv, DMA_RESV_USAGE_BOOKKEEP,
+ true, MAX_SCHEDULE_TIMEOUT);
+ if (!ret || ret == -ERESTARTSYS)
+ XDNA_ERR(xdna, "Failed to wait for bo, ret %ld", ret);
+}
diff --git a/drivers/accel/amdxdna/aie2_error.c b/drivers/accel/amdxdna/aie2_error.c
new file mode 100644
index 000000000000..5ee905632a39
--- /dev/null
+++ b/drivers/accel/amdxdna/aie2_error.c
@@ -0,0 +1,358 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/drm_cache.h>
+#include <drm/drm_device.h>
+#include <drm/drm_print.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/dma-mapping.h>
+#include <linux/kthread.h>
+#include <linux/kernel.h>
+
+#include "aie2_msg_priv.h"
+#include "aie2_pci.h"
+#include "amdxdna_mailbox.h"
+#include "amdxdna_pci_drv.h"
+
+struct async_event {
+ struct amdxdna_dev_hdl *ndev;
+ struct async_event_msg_resp resp;
+ struct workqueue_struct *wq;
+ struct work_struct work;
+ u8 *buf;
+ dma_addr_t addr;
+ u32 size;
+};
+
+struct async_events {
+ struct workqueue_struct *wq;
+ u8 *buf;
+ dma_addr_t addr;
+ u32 size;
+ u32 event_cnt;
+ struct async_event event[] __counted_by(event_cnt);
+};
+
+/*
+ * Below enum, struct and lookup tables are porting from XAIE util header file.
+ *
+ * Below data is defined by AIE device and it is used for decode error message
+ * from the device.
+ */
+
+enum aie_module_type {
+ AIE_MEM_MOD = 0,
+ AIE_CORE_MOD,
+ AIE_PL_MOD,
+};
+
+enum aie_error_category {
+ AIE_ERROR_SATURATION = 0,
+ AIE_ERROR_FP,
+ AIE_ERROR_STREAM,
+ AIE_ERROR_ACCESS,
+ AIE_ERROR_BUS,
+ AIE_ERROR_INSTRUCTION,
+ AIE_ERROR_ECC,
+ AIE_ERROR_LOCK,
+ AIE_ERROR_DMA,
+ AIE_ERROR_MEM_PARITY,
+ /* Unknown is not from XAIE, added for better category */
+ AIE_ERROR_UNKNOWN,
+};
+
+/* Don't pack, unless XAIE side changed */
+struct aie_error {
+ __u8 row;
+ __u8 col;
+ __u32 mod_type;
+ __u8 event_id;
+};
+
+struct aie_err_info {
+ u32 err_cnt;
+ u32 ret_code;
+ u32 rsvd;
+ struct aie_error payload[] __counted_by(err_cnt);
+};
+
+struct aie_event_category {
+ u8 event_id;
+ enum aie_error_category category;
+};
+
+#define EVENT_CATEGORY(id, cat) { id, cat }
+static const struct aie_event_category aie_ml_mem_event_cat[] = {
+ EVENT_CATEGORY(88U, AIE_ERROR_ECC),
+ EVENT_CATEGORY(90U, AIE_ERROR_ECC),
+ EVENT_CATEGORY(91U, AIE_ERROR_MEM_PARITY),
+ EVENT_CATEGORY(92U, AIE_ERROR_MEM_PARITY),
+ EVENT_CATEGORY(93U, AIE_ERROR_MEM_PARITY),
+ EVENT_CATEGORY(94U, AIE_ERROR_MEM_PARITY),
+ EVENT_CATEGORY(95U, AIE_ERROR_MEM_PARITY),
+ EVENT_CATEGORY(96U, AIE_ERROR_MEM_PARITY),
+ EVENT_CATEGORY(97U, AIE_ERROR_DMA),
+ EVENT_CATEGORY(98U, AIE_ERROR_DMA),
+ EVENT_CATEGORY(99U, AIE_ERROR_DMA),
+ EVENT_CATEGORY(100U, AIE_ERROR_DMA),
+ EVENT_CATEGORY(101U, AIE_ERROR_LOCK),
+};
+
+static const struct aie_event_category aie_ml_core_event_cat[] = {
+ EVENT_CATEGORY(55U, AIE_ERROR_ACCESS),
+ EVENT_CATEGORY(56U, AIE_ERROR_STREAM),
+ EVENT_CATEGORY(57U, AIE_ERROR_STREAM),
+ EVENT_CATEGORY(58U, AIE_ERROR_BUS),
+ EVENT_CATEGORY(59U, AIE_ERROR_INSTRUCTION),
+ EVENT_CATEGORY(60U, AIE_ERROR_ACCESS),
+ EVENT_CATEGORY(62U, AIE_ERROR_ECC),
+ EVENT_CATEGORY(64U, AIE_ERROR_ECC),
+ EVENT_CATEGORY(65U, AIE_ERROR_ACCESS),
+ EVENT_CATEGORY(66U, AIE_ERROR_ACCESS),
+ EVENT_CATEGORY(67U, AIE_ERROR_LOCK),
+ EVENT_CATEGORY(70U, AIE_ERROR_INSTRUCTION),
+ EVENT_CATEGORY(71U, AIE_ERROR_STREAM),
+ EVENT_CATEGORY(72U, AIE_ERROR_BUS),
+};
+
+static const struct aie_event_category aie_ml_mem_tile_event_cat[] = {
+ EVENT_CATEGORY(130U, AIE_ERROR_ECC),
+ EVENT_CATEGORY(132U, AIE_ERROR_ECC),
+ EVENT_CATEGORY(133U, AIE_ERROR_DMA),
+ EVENT_CATEGORY(134U, AIE_ERROR_DMA),
+ EVENT_CATEGORY(135U, AIE_ERROR_STREAM),
+ EVENT_CATEGORY(136U, AIE_ERROR_STREAM),
+ EVENT_CATEGORY(137U, AIE_ERROR_STREAM),
+ EVENT_CATEGORY(138U, AIE_ERROR_BUS),
+ EVENT_CATEGORY(139U, AIE_ERROR_LOCK),
+};
+
+static const struct aie_event_category aie_ml_shim_tile_event_cat[] = {
+ EVENT_CATEGORY(64U, AIE_ERROR_BUS),
+ EVENT_CATEGORY(65U, AIE_ERROR_STREAM),
+ EVENT_CATEGORY(66U, AIE_ERROR_STREAM),
+ EVENT_CATEGORY(67U, AIE_ERROR_BUS),
+ EVENT_CATEGORY(68U, AIE_ERROR_BUS),
+ EVENT_CATEGORY(69U, AIE_ERROR_BUS),
+ EVENT_CATEGORY(70U, AIE_ERROR_BUS),
+ EVENT_CATEGORY(71U, AIE_ERROR_BUS),
+ EVENT_CATEGORY(72U, AIE_ERROR_DMA),
+ EVENT_CATEGORY(73U, AIE_ERROR_DMA),
+ EVENT_CATEGORY(74U, AIE_ERROR_LOCK),
+};
+
+static enum aie_error_category
+aie_get_error_category(u8 row, u8 event_id, enum aie_module_type mod_type)
+{
+ const struct aie_event_category *lut;
+ int num_entry;
+ int i;
+
+ switch (mod_type) {
+ case AIE_PL_MOD:
+ lut = aie_ml_shim_tile_event_cat;
+ num_entry = ARRAY_SIZE(aie_ml_shim_tile_event_cat);
+ break;
+ case AIE_CORE_MOD:
+ lut = aie_ml_core_event_cat;
+ num_entry = ARRAY_SIZE(aie_ml_core_event_cat);
+ break;
+ case AIE_MEM_MOD:
+ if (row == 1) {
+ lut = aie_ml_mem_tile_event_cat;
+ num_entry = ARRAY_SIZE(aie_ml_mem_tile_event_cat);
+ } else {
+ lut = aie_ml_mem_event_cat;
+ num_entry = ARRAY_SIZE(aie_ml_mem_event_cat);
+ }
+ break;
+ default:
+ return AIE_ERROR_UNKNOWN;
+ }
+
+ for (i = 0; i < num_entry; i++) {
+ if (event_id != lut[i].event_id)
+ continue;
+
+ return lut[i].category;
+ }
+
+ return AIE_ERROR_UNKNOWN;
+}
+
+static u32 aie2_error_backtrack(struct amdxdna_dev_hdl *ndev, void *err_info, u32 num_err)
+{
+ struct aie_error *errs = err_info;
+ u32 err_col = 0; /* assume that AIE has less than 32 columns */
+ int i;
+
+ /* Get err column bitmap */
+ for (i = 0; i < num_err; i++) {
+ struct aie_error *err = &errs[i];
+ enum aie_error_category cat;
+
+ cat = aie_get_error_category(err->row, err->event_id, err->mod_type);
+ XDNA_ERR(ndev->xdna, "Row: %d, Col: %d, module %d, event ID %d, category %d",
+ err->row, err->col, err->mod_type,
+ err->event_id, cat);
+
+ if (err->col >= 32) {
+ XDNA_WARN(ndev->xdna, "Invalid column number");
+ break;
+ }
+
+ err_col |= (1 << err->col);
+ }
+
+ return err_col;
+}
+
+static int aie2_error_async_cb(void *handle, void __iomem *data, size_t size)
+{
+ struct async_event *e = handle;
+
+ if (data) {
+ e->resp.type = readl(data + offsetof(struct async_event_msg_resp, type));
+ wmb(); /* Update status in the end, so that no lock for here */
+ e->resp.status = readl(data + offsetof(struct async_event_msg_resp, status));
+ }
+ queue_work(e->wq, &e->work);
+ return 0;
+}
+
+static int aie2_error_event_send(struct async_event *e)
+{
+ drm_clflush_virt_range(e->buf, e->size); /* device can access */
+ return aie2_register_asyn_event_msg(e->ndev, e->addr, e->size, e,
+ aie2_error_async_cb);
+}
+
+static void aie2_error_worker(struct work_struct *err_work)
+{
+ struct aie_err_info *info;
+ struct amdxdna_dev *xdna;
+ struct async_event *e;
+ u32 max_err;
+ u32 err_col;
+
+ e = container_of(err_work, struct async_event, work);
+
+ xdna = e->ndev->xdna;
+
+ if (e->resp.status == MAX_AIE2_STATUS_CODE)
+ return;
+
+ e->resp.status = MAX_AIE2_STATUS_CODE;
+
+ print_hex_dump_debug("AIE error: ", DUMP_PREFIX_OFFSET, 16, 4,
+ e->buf, 0x100, false);
+
+ info = (struct aie_err_info *)e->buf;
+ XDNA_DBG(xdna, "Error count %d return code %d", info->err_cnt, info->ret_code);
+
+ max_err = (e->size - sizeof(*info)) / sizeof(struct aie_error);
+ if (unlikely(info->err_cnt > max_err)) {
+ WARN_ONCE(1, "Error count too large %d\n", info->err_cnt);
+ return;
+ }
+ err_col = aie2_error_backtrack(e->ndev, info->payload, info->err_cnt);
+ if (!err_col) {
+ XDNA_WARN(xdna, "Did not get error column");
+ return;
+ }
+
+ mutex_lock(&xdna->dev_lock);
+ /* Re-sent this event to firmware */
+ if (aie2_error_event_send(e))
+ XDNA_WARN(xdna, "Unable to register async event");
+ mutex_unlock(&xdna->dev_lock);
+}
+
+int aie2_error_async_events_send(struct amdxdna_dev_hdl *ndev)
+{
+ struct amdxdna_dev *xdna = ndev->xdna;
+ struct async_event *e;
+ int i, ret;
+
+ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
+ for (i = 0; i < ndev->async_events->event_cnt; i++) {
+ e = &ndev->async_events->event[i];
+ ret = aie2_error_event_send(e);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+void aie2_error_async_events_free(struct amdxdna_dev_hdl *ndev)
+{
+ struct amdxdna_dev *xdna = ndev->xdna;
+ struct async_events *events;
+
+ events = ndev->async_events;
+
+ mutex_unlock(&xdna->dev_lock);
+ destroy_workqueue(events->wq);
+ mutex_lock(&xdna->dev_lock);
+
+ dma_free_noncoherent(xdna->ddev.dev, events->size, events->buf,
+ events->addr, DMA_FROM_DEVICE);
+ kfree(events);
+}
+
+int aie2_error_async_events_alloc(struct amdxdna_dev_hdl *ndev)
+{
+ struct amdxdna_dev *xdna = ndev->xdna;
+ u32 total_col = ndev->total_col;
+ u32 total_size = ASYNC_BUF_SIZE * total_col;
+ struct async_events *events;
+ int i, ret;
+
+ events = kzalloc(struct_size(events, event, total_col), GFP_KERNEL);
+ if (!events)
+ return -ENOMEM;
+
+ events->buf = dma_alloc_noncoherent(xdna->ddev.dev, total_size, &events->addr,
+ DMA_FROM_DEVICE, GFP_KERNEL);
+ if (!events->buf) {
+ ret = -ENOMEM;
+ goto free_events;
+ }
+ events->size = total_size;
+ events->event_cnt = total_col;
+
+ events->wq = alloc_ordered_workqueue("async_wq", 0);
+ if (!events->wq) {
+ ret = -ENOMEM;
+ goto free_buf;
+ }
+
+ for (i = 0; i < events->event_cnt; i++) {
+ struct async_event *e = &events->event[i];
+ u32 offset = i * ASYNC_BUF_SIZE;
+
+ e->ndev = ndev;
+ e->wq = events->wq;
+ e->buf = &events->buf[offset];
+ e->addr = events->addr + offset;
+ e->size = ASYNC_BUF_SIZE;
+ e->resp.status = MAX_AIE2_STATUS_CODE;
+ INIT_WORK(&e->work, aie2_error_worker);
+ }
+
+ ndev->async_events = events;
+
+ XDNA_DBG(xdna, "Async event count %d, buf total size 0x%x",
+ events->event_cnt, events->size);
+ return 0;
+
+free_buf:
+ dma_free_noncoherent(xdna->ddev.dev, events->size, events->buf,
+ events->addr, DMA_FROM_DEVICE);
+free_events:
+ kfree(events);
+ return ret;
+}
diff --git a/drivers/accel/amdxdna/aie2_message.c b/drivers/accel/amdxdna/aie2_message.c
new file mode 100644
index 000000000000..9caad083543d
--- /dev/null
+++ b/drivers/accel/amdxdna/aie2_message.c
@@ -0,0 +1,779 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_cache.h>
+#include <drm/drm_device.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_print.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/bitfield.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <linux/xarray.h>
+
+#include "aie2_msg_priv.h"
+#include "aie2_pci.h"
+#include "amdxdna_ctx.h"
+#include "amdxdna_gem.h"
+#include "amdxdna_mailbox.h"
+#include "amdxdna_mailbox_helper.h"
+#include "amdxdna_pci_drv.h"
+
+#define DECLARE_AIE2_MSG(name, op) \
+ DECLARE_XDNA_MSG_COMMON(name, op, MAX_AIE2_STATUS_CODE)
+
+static int aie2_send_mgmt_msg_wait(struct amdxdna_dev_hdl *ndev,
+ struct xdna_mailbox_msg *msg)
+{
+ struct amdxdna_dev *xdna = ndev->xdna;
+ struct xdna_notify *hdl = msg->handle;
+ int ret;
+
+ if (!ndev->mgmt_chann)
+ return -ENODEV;
+
+ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
+ ret = xdna_send_msg_wait(xdna, ndev->mgmt_chann, msg);
+ if (ret == -ETIME) {
+ xdna_mailbox_stop_channel(ndev->mgmt_chann);
+ xdna_mailbox_destroy_channel(ndev->mgmt_chann);
+ ndev->mgmt_chann = NULL;
+ }
+
+ if (!ret && *hdl->data != AIE2_STATUS_SUCCESS) {
+ XDNA_ERR(xdna, "command opcode 0x%x failed, status 0x%x",
+ msg->opcode, *hdl->data);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+int aie2_suspend_fw(struct amdxdna_dev_hdl *ndev)
+{
+ DECLARE_AIE2_MSG(suspend, MSG_OP_SUSPEND);
+
+ return aie2_send_mgmt_msg_wait(ndev, &msg);
+}
+
+int aie2_resume_fw(struct amdxdna_dev_hdl *ndev)
+{
+ DECLARE_AIE2_MSG(suspend, MSG_OP_RESUME);
+
+ return aie2_send_mgmt_msg_wait(ndev, &msg);
+}
+
+int aie2_set_runtime_cfg(struct amdxdna_dev_hdl *ndev, u32 type, u64 value)
+{
+ DECLARE_AIE2_MSG(set_runtime_cfg, MSG_OP_SET_RUNTIME_CONFIG);
+ int ret;
+
+ req.type = type;
+ req.value = value;
+
+ ret = aie2_send_mgmt_msg_wait(ndev, &msg);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Failed to set runtime config, ret %d", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int aie2_get_runtime_cfg(struct amdxdna_dev_hdl *ndev, u32 type, u64 *value)
+{
+ DECLARE_AIE2_MSG(get_runtime_cfg, MSG_OP_GET_RUNTIME_CONFIG);
+ int ret;
+
+ req.type = type;
+ ret = aie2_send_mgmt_msg_wait(ndev, &msg);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Failed to get runtime config, ret %d", ret);
+ return ret;
+ }
+
+ *value = resp.value;
+ return 0;
+}
+
+int aie2_assign_mgmt_pasid(struct amdxdna_dev_hdl *ndev, u16 pasid)
+{
+ DECLARE_AIE2_MSG(assign_mgmt_pasid, MSG_OP_ASSIGN_MGMT_PASID);
+
+ req.pasid = pasid;
+
+ return aie2_send_mgmt_msg_wait(ndev, &msg);
+}
+
+int aie2_query_aie_version(struct amdxdna_dev_hdl *ndev, struct aie_version *version)
+{
+ DECLARE_AIE2_MSG(aie_version_info, MSG_OP_QUERY_AIE_VERSION);
+ struct amdxdna_dev *xdna = ndev->xdna;
+ int ret;
+
+ ret = aie2_send_mgmt_msg_wait(ndev, &msg);
+ if (ret)
+ return ret;
+
+ XDNA_DBG(xdna, "Query AIE version - major: %u minor: %u completed",
+ resp.major, resp.minor);
+
+ version->major = resp.major;
+ version->minor = resp.minor;
+
+ return 0;
+}
+
+int aie2_query_aie_metadata(struct amdxdna_dev_hdl *ndev, struct aie_metadata *metadata)
+{
+ DECLARE_AIE2_MSG(aie_tile_info, MSG_OP_QUERY_AIE_TILE_INFO);
+ int ret;
+
+ ret = aie2_send_mgmt_msg_wait(ndev, &msg);
+ if (ret)
+ return ret;
+
+ metadata->size = resp.info.size;
+ metadata->cols = resp.info.cols;
+ metadata->rows = resp.info.rows;
+
+ metadata->version.major = resp.info.major;
+ metadata->version.minor = resp.info.minor;
+
+ metadata->core.row_count = resp.info.core_rows;
+ metadata->core.row_start = resp.info.core_row_start;
+ metadata->core.dma_channel_count = resp.info.core_dma_channels;
+ metadata->core.lock_count = resp.info.core_locks;
+ metadata->core.event_reg_count = resp.info.core_events;
+
+ metadata->mem.row_count = resp.info.mem_rows;
+ metadata->mem.row_start = resp.info.mem_row_start;
+ metadata->mem.dma_channel_count = resp.info.mem_dma_channels;
+ metadata->mem.lock_count = resp.info.mem_locks;
+ metadata->mem.event_reg_count = resp.info.mem_events;
+
+ metadata->shim.row_count = resp.info.shim_rows;
+ metadata->shim.row_start = resp.info.shim_row_start;
+ metadata->shim.dma_channel_count = resp.info.shim_dma_channels;
+ metadata->shim.lock_count = resp.info.shim_locks;
+ metadata->shim.event_reg_count = resp.info.shim_events;
+
+ return 0;
+}
+
+int aie2_query_firmware_version(struct amdxdna_dev_hdl *ndev,
+ struct amdxdna_fw_ver *fw_ver)
+{
+ DECLARE_AIE2_MSG(firmware_version, MSG_OP_GET_FIRMWARE_VERSION);
+ int ret;
+
+ ret = aie2_send_mgmt_msg_wait(ndev, &msg);
+ if (ret)
+ return ret;
+
+ fw_ver->major = resp.major;
+ fw_ver->minor = resp.minor;
+ fw_ver->sub = resp.sub;
+ fw_ver->build = resp.build;
+
+ return 0;
+}
+
+int aie2_create_context(struct amdxdna_dev_hdl *ndev, struct amdxdna_hwctx *hwctx)
+{
+ DECLARE_AIE2_MSG(create_ctx, MSG_OP_CREATE_CONTEXT);
+ struct amdxdna_dev *xdna = ndev->xdna;
+ struct xdna_mailbox_chann_res x2i;
+ struct xdna_mailbox_chann_res i2x;
+ struct cq_pair *cq_pair;
+ u32 intr_reg;
+ int ret;
+
+ req.aie_type = 1;
+ req.start_col = hwctx->start_col;
+ req.num_col = hwctx->num_col;
+ req.num_cq_pairs_requested = 1;
+ req.pasid = hwctx->client->pasid;
+ req.context_priority = 2;
+
+ ret = aie2_send_mgmt_msg_wait(ndev, &msg);
+ if (ret)
+ return ret;
+
+ hwctx->fw_ctx_id = resp.context_id;
+ WARN_ONCE(hwctx->fw_ctx_id == -1, "Unexpected context id");
+
+ cq_pair = &resp.cq_pair[0];
+ x2i.mb_head_ptr_reg = AIE2_MBOX_OFF(ndev, cq_pair->x2i_q.head_addr);
+ x2i.mb_tail_ptr_reg = AIE2_MBOX_OFF(ndev, cq_pair->x2i_q.tail_addr);
+ x2i.rb_start_addr = AIE2_SRAM_OFF(ndev, cq_pair->x2i_q.buf_addr);
+ x2i.rb_size = cq_pair->x2i_q.buf_size;
+
+ i2x.mb_head_ptr_reg = AIE2_MBOX_OFF(ndev, cq_pair->i2x_q.head_addr);
+ i2x.mb_tail_ptr_reg = AIE2_MBOX_OFF(ndev, cq_pair->i2x_q.tail_addr);
+ i2x.rb_start_addr = AIE2_SRAM_OFF(ndev, cq_pair->i2x_q.buf_addr);
+ i2x.rb_size = cq_pair->i2x_q.buf_size;
+
+ ret = pci_irq_vector(to_pci_dev(xdna->ddev.dev), resp.msix_id);
+ if (ret == -EINVAL) {
+ XDNA_ERR(xdna, "not able to create channel");
+ goto out_destroy_context;
+ }
+
+ intr_reg = i2x.mb_head_ptr_reg + 4;
+ hwctx->priv->mbox_chann = xdna_mailbox_create_channel(ndev->mbox, &x2i, &i2x,
+ intr_reg, ret);
+ if (!hwctx->priv->mbox_chann) {
+ XDNA_ERR(xdna, "not able to create channel");
+ ret = -EINVAL;
+ goto out_destroy_context;
+ }
+
+ XDNA_DBG(xdna, "%s mailbox channel irq: %d, msix_id: %d",
+ hwctx->name, ret, resp.msix_id);
+ XDNA_DBG(xdna, "%s created fw ctx %d pasid %d", hwctx->name,
+ hwctx->fw_ctx_id, hwctx->client->pasid);
+
+ return 0;
+
+out_destroy_context:
+ aie2_destroy_context(ndev, hwctx);
+ return ret;
+}
+
+int aie2_destroy_context(struct amdxdna_dev_hdl *ndev, struct amdxdna_hwctx *hwctx)
+{
+ DECLARE_AIE2_MSG(destroy_ctx, MSG_OP_DESTROY_CONTEXT);
+ struct amdxdna_dev *xdna = ndev->xdna;
+ int ret;
+
+ if (hwctx->fw_ctx_id == -1)
+ return 0;
+
+ xdna_mailbox_stop_channel(hwctx->priv->mbox_chann);
+
+ req.context_id = hwctx->fw_ctx_id;
+ ret = aie2_send_mgmt_msg_wait(ndev, &msg);
+ if (ret)
+ XDNA_WARN(xdna, "%s destroy context failed, ret %d", hwctx->name, ret);
+
+ xdna_mailbox_destroy_channel(hwctx->priv->mbox_chann);
+ XDNA_DBG(xdna, "%s destroyed fw ctx %d", hwctx->name,
+ hwctx->fw_ctx_id);
+ hwctx->priv->mbox_chann = NULL;
+ hwctx->fw_ctx_id = -1;
+
+ return ret;
+}
+
+int aie2_map_host_buf(struct amdxdna_dev_hdl *ndev, u32 context_id, u64 addr, u64 size)
+{
+ DECLARE_AIE2_MSG(map_host_buffer, MSG_OP_MAP_HOST_BUFFER);
+ struct amdxdna_dev *xdna = ndev->xdna;
+ int ret;
+
+ req.context_id = context_id;
+ req.buf_addr = addr;
+ req.buf_size = size;
+ ret = aie2_send_mgmt_msg_wait(ndev, &msg);
+ if (ret)
+ return ret;
+
+ XDNA_DBG(xdna, "fw ctx %d map host buf addr 0x%llx size 0x%llx",
+ context_id, addr, size);
+
+ return 0;
+}
+
+static int amdxdna_hwctx_col_map(struct amdxdna_hwctx *hwctx, void *arg)
+{
+ u32 *bitmap = arg;
+
+ *bitmap |= GENMASK(hwctx->start_col + hwctx->num_col - 1, hwctx->start_col);
+
+ return 0;
+}
+
+int aie2_query_status(struct amdxdna_dev_hdl *ndev, char __user *buf,
+ u32 size, u32 *cols_filled)
+{
+ DECLARE_AIE2_MSG(aie_column_info, MSG_OP_QUERY_COL_STATUS);
+ struct amdxdna_dev *xdna = ndev->xdna;
+ struct amdxdna_client *client;
+ dma_addr_t dma_addr;
+ u32 aie_bitmap = 0;
+ u8 *buff_addr;
+ int ret;
+
+ buff_addr = dma_alloc_noncoherent(xdna->ddev.dev, size, &dma_addr,
+ DMA_FROM_DEVICE, GFP_KERNEL);
+ if (!buff_addr)
+ return -ENOMEM;
+
+ /* Go through each hardware context and mark the AIE columns that are active */
+ list_for_each_entry(client, &xdna->client_list, node)
+ amdxdna_hwctx_walk(client, &aie_bitmap, amdxdna_hwctx_col_map);
+
+ *cols_filled = 0;
+ req.dump_buff_addr = dma_addr;
+ req.dump_buff_size = size;
+ req.num_cols = hweight32(aie_bitmap);
+ req.aie_bitmap = aie_bitmap;
+
+ drm_clflush_virt_range(buff_addr, size); /* device can access */
+ ret = aie2_send_mgmt_msg_wait(ndev, &msg);
+ if (ret) {
+ XDNA_ERR(xdna, "Error during NPU query, status %d", ret);
+ goto fail;
+ }
+
+ if (resp.status != AIE2_STATUS_SUCCESS) {
+ XDNA_ERR(xdna, "Query NPU status failed, status 0x%x", resp.status);
+ ret = -EINVAL;
+ goto fail;
+ }
+ XDNA_DBG(xdna, "Query NPU status completed");
+
+ if (size < resp.size) {
+ ret = -EINVAL;
+ XDNA_ERR(xdna, "Bad buffer size. Available: %u. Needs: %u", size, resp.size);
+ goto fail;
+ }
+
+ if (copy_to_user(buf, buff_addr, resp.size)) {
+ ret = -EFAULT;
+ XDNA_ERR(xdna, "Failed to copy NPU status to user space");
+ goto fail;
+ }
+
+ *cols_filled = aie_bitmap;
+
+fail:
+ dma_free_noncoherent(xdna->ddev.dev, size, buff_addr, dma_addr, DMA_FROM_DEVICE);
+ return ret;
+}
+
+int aie2_register_asyn_event_msg(struct amdxdna_dev_hdl *ndev, dma_addr_t addr, u32 size,
+ void *handle, int (*cb)(void*, void __iomem *, size_t))
+{
+ struct async_event_msg_req req = { 0 };
+ struct xdna_mailbox_msg msg = {
+ .send_data = (u8 *)&req,
+ .send_size = sizeof(req),
+ .handle = handle,
+ .opcode = MSG_OP_REGISTER_ASYNC_EVENT_MSG,
+ .notify_cb = cb,
+ };
+
+ req.buf_addr = addr;
+ req.buf_size = size;
+
+ XDNA_DBG(ndev->xdna, "Register addr 0x%llx size 0x%x", addr, size);
+ return xdna_mailbox_send_msg(ndev->mgmt_chann, &msg, TX_TIMEOUT);
+}
+
+int aie2_config_cu(struct amdxdna_hwctx *hwctx)
+{
+ struct mailbox_channel *chann = hwctx->priv->mbox_chann;
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+ u32 shift = xdna->dev_info->dev_mem_buf_shift;
+ DECLARE_AIE2_MSG(config_cu, MSG_OP_CONFIG_CU);
+ struct drm_gem_object *gobj;
+ struct amdxdna_gem_obj *abo;
+ int ret, i;
+
+ if (!chann)
+ return -ENODEV;
+
+ if (hwctx->cus->num_cus > MAX_NUM_CUS) {
+ XDNA_DBG(xdna, "Exceed maximum CU %d", MAX_NUM_CUS);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < hwctx->cus->num_cus; i++) {
+ struct amdxdna_cu_config *cu = &hwctx->cus->cu_configs[i];
+
+ if (XDNA_MBZ_DBG(xdna, cu->pad, sizeof(cu->pad)))
+ return -EINVAL;
+
+ gobj = drm_gem_object_lookup(hwctx->client->filp, cu->cu_bo);
+ if (!gobj) {
+ XDNA_ERR(xdna, "Lookup GEM object failed");
+ return -EINVAL;
+ }
+ abo = to_xdna_obj(gobj);
+
+ if (abo->type != AMDXDNA_BO_DEV) {
+ drm_gem_object_put(gobj);
+ XDNA_ERR(xdna, "Invalid BO type");
+ return -EINVAL;
+ }
+
+ req.cfgs[i] = FIELD_PREP(AIE2_MSG_CFG_CU_PDI_ADDR,
+ abo->mem.dev_addr >> shift);
+ req.cfgs[i] |= FIELD_PREP(AIE2_MSG_CFG_CU_FUNC, cu->cu_func);
+ XDNA_DBG(xdna, "CU %d full addr 0x%llx, cfg 0x%x", i,
+ abo->mem.dev_addr, req.cfgs[i]);
+ drm_gem_object_put(gobj);
+ }
+ req.num_cus = hwctx->cus->num_cus;
+
+ ret = xdna_send_msg_wait(xdna, chann, &msg);
+ if (ret == -ETIME)
+ aie2_destroy_context(xdna->dev_handle, hwctx);
+
+ if (resp.status == AIE2_STATUS_SUCCESS) {
+ XDNA_DBG(xdna, "Configure %d CUs, ret %d", req.num_cus, ret);
+ return 0;
+ }
+
+ XDNA_ERR(xdna, "Command opcode 0x%x failed, status 0x%x ret %d",
+ msg.opcode, resp.status, ret);
+ return ret;
+}
+
+int aie2_execbuf(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
+ int (*notify_cb)(void *, void __iomem *, size_t))
+{
+ struct mailbox_channel *chann = hwctx->priv->mbox_chann;
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+ struct amdxdna_gem_obj *cmd_abo = job->cmd_bo;
+ union {
+ struct execute_buffer_req ebuf;
+ struct exec_dpu_req dpu;
+ } req;
+ struct xdna_mailbox_msg msg;
+ u32 payload_len;
+ void *payload;
+ int cu_idx;
+ int ret;
+ u32 op;
+
+ if (!chann)
+ return -ENODEV;
+
+ payload = amdxdna_cmd_get_payload(cmd_abo, &payload_len);
+ if (!payload) {
+ XDNA_ERR(xdna, "Invalid command, cannot get payload");
+ return -EINVAL;
+ }
+
+ cu_idx = amdxdna_cmd_get_cu_idx(cmd_abo);
+ if (cu_idx < 0) {
+ XDNA_DBG(xdna, "Invalid cu idx");
+ return -EINVAL;
+ }
+
+ op = amdxdna_cmd_get_op(cmd_abo);
+ switch (op) {
+ case ERT_START_CU:
+ if (unlikely(payload_len > sizeof(req.ebuf.payload)))
+ XDNA_DBG(xdna, "Invalid ebuf payload len: %d", payload_len);
+ req.ebuf.cu_idx = cu_idx;
+ memcpy(req.ebuf.payload, payload, sizeof(req.ebuf.payload));
+ msg.send_size = sizeof(req.ebuf);
+ msg.opcode = MSG_OP_EXECUTE_BUFFER_CF;
+ break;
+ case ERT_START_NPU: {
+ struct amdxdna_cmd_start_npu *sn = payload;
+
+ if (unlikely(payload_len - sizeof(*sn) > sizeof(req.dpu.payload)))
+ XDNA_DBG(xdna, "Invalid dpu payload len: %d", payload_len);
+ req.dpu.inst_buf_addr = sn->buffer;
+ req.dpu.inst_size = sn->buffer_size;
+ req.dpu.inst_prop_cnt = sn->prop_count;
+ req.dpu.cu_idx = cu_idx;
+ memcpy(req.dpu.payload, sn->prop_args, sizeof(req.dpu.payload));
+ msg.send_size = sizeof(req.dpu);
+ msg.opcode = MSG_OP_EXEC_DPU;
+ break;
+ }
+ default:
+ XDNA_DBG(xdna, "Invalid ERT cmd op code: %d", op);
+ return -EINVAL;
+ }
+ msg.handle = job;
+ msg.notify_cb = notify_cb;
+ msg.send_data = (u8 *)&req;
+ print_hex_dump_debug("cmd: ", DUMP_PREFIX_OFFSET, 16, 4, &req,
+ 0x40, false);
+
+ ret = xdna_mailbox_send_msg(chann, &msg, TX_TIMEOUT);
+ if (ret) {
+ XDNA_ERR(xdna, "Send message failed");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+aie2_cmdlist_fill_one_slot_cf(void *cmd_buf, u32 offset,
+ struct amdxdna_gem_obj *abo, u32 *size)
+{
+ struct cmd_chain_slot_execbuf_cf *buf = cmd_buf + offset;
+ int cu_idx = amdxdna_cmd_get_cu_idx(abo);
+ u32 payload_len;
+ void *payload;
+
+ if (cu_idx < 0)
+ return -EINVAL;
+
+ payload = amdxdna_cmd_get_payload(abo, &payload_len);
+ if (!payload)
+ return -EINVAL;
+
+ if (!slot_has_space(*buf, offset, payload_len))
+ return -ENOSPC;
+
+ buf->cu_idx = cu_idx;
+ buf->arg_cnt = payload_len / sizeof(u32);
+ memcpy(buf->args, payload, payload_len);
+ /* Accurate buf size to hint firmware to do necessary copy */
+ *size = sizeof(*buf) + payload_len;
+ return 0;
+}
+
+static int
+aie2_cmdlist_fill_one_slot_dpu(void *cmd_buf, u32 offset,
+ struct amdxdna_gem_obj *abo, u32 *size)
+{
+ struct cmd_chain_slot_dpu *buf = cmd_buf + offset;
+ int cu_idx = amdxdna_cmd_get_cu_idx(abo);
+ struct amdxdna_cmd_start_npu *sn;
+ u32 payload_len;
+ void *payload;
+ u32 arg_sz;
+
+ if (cu_idx < 0)
+ return -EINVAL;
+
+ payload = amdxdna_cmd_get_payload(abo, &payload_len);
+ if (!payload)
+ return -EINVAL;
+ sn = payload;
+ arg_sz = payload_len - sizeof(*sn);
+ if (payload_len < sizeof(*sn) || arg_sz > MAX_DPU_ARGS_SIZE)
+ return -EINVAL;
+
+ if (!slot_has_space(*buf, offset, arg_sz))
+ return -ENOSPC;
+
+ buf->inst_buf_addr = sn->buffer;
+ buf->inst_size = sn->buffer_size;
+ buf->inst_prop_cnt = sn->prop_count;
+ buf->cu_idx = cu_idx;
+ buf->arg_cnt = arg_sz / sizeof(u32);
+ memcpy(buf->args, sn->prop_args, arg_sz);
+
+ /* Accurate buf size to hint firmware to do necessary copy */
+ *size = sizeof(*buf) + arg_sz;
+ return 0;
+}
+
+static int
+aie2_cmdlist_fill_one_slot(u32 op, struct amdxdna_gem_obj *cmdbuf_abo, u32 offset,
+ struct amdxdna_gem_obj *abo, u32 *size)
+{
+ u32 this_op = amdxdna_cmd_get_op(abo);
+ void *cmd_buf = cmdbuf_abo->mem.kva;
+ int ret;
+
+ if (this_op != op) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ switch (op) {
+ case ERT_START_CU:
+ ret = aie2_cmdlist_fill_one_slot_cf(cmd_buf, offset, abo, size);
+ break;
+ case ERT_START_NPU:
+ ret = aie2_cmdlist_fill_one_slot_dpu(cmd_buf, offset, abo, size);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+done:
+ if (ret) {
+ XDNA_ERR(abo->client->xdna, "Can't fill slot for cmd op %d ret %d",
+ op, ret);
+ }
+ return ret;
+}
+
+static inline struct amdxdna_gem_obj *
+aie2_cmdlist_get_cmd_buf(struct amdxdna_sched_job *job)
+{
+ int idx = get_job_idx(job->seq);
+
+ return job->hwctx->priv->cmd_buf[idx];
+}
+
+static void
+aie2_cmdlist_prepare_request(struct cmd_chain_req *req,
+ struct amdxdna_gem_obj *cmdbuf_abo, u32 size, u32 cnt)
+{
+ req->buf_addr = cmdbuf_abo->mem.dev_addr;
+ req->buf_size = size;
+ req->count = cnt;
+ drm_clflush_virt_range(cmdbuf_abo->mem.kva, size);
+ XDNA_DBG(cmdbuf_abo->client->xdna, "Command buf addr 0x%llx size 0x%x count %d",
+ req->buf_addr, size, cnt);
+}
+
+static inline u32
+aie2_cmd_op_to_msg_op(u32 op)
+{
+ switch (op) {
+ case ERT_START_CU:
+ return MSG_OP_CHAIN_EXEC_BUFFER_CF;
+ case ERT_START_NPU:
+ return MSG_OP_CHAIN_EXEC_DPU;
+ default:
+ return MSG_OP_MAX_OPCODE;
+ }
+}
+
+int aie2_cmdlist_multi_execbuf(struct amdxdna_hwctx *hwctx,
+ struct amdxdna_sched_job *job,
+ int (*notify_cb)(void *, void __iomem *, size_t))
+{
+ struct amdxdna_gem_obj *cmdbuf_abo = aie2_cmdlist_get_cmd_buf(job);
+ struct mailbox_channel *chann = hwctx->priv->mbox_chann;
+ struct amdxdna_client *client = hwctx->client;
+ struct amdxdna_gem_obj *cmd_abo = job->cmd_bo;
+ struct amdxdna_cmd_chain *payload;
+ struct xdna_mailbox_msg msg;
+ struct cmd_chain_req req;
+ u32 payload_len;
+ u32 offset = 0;
+ u32 size;
+ int ret;
+ u32 op;
+ u32 i;
+
+ op = amdxdna_cmd_get_op(cmd_abo);
+ payload = amdxdna_cmd_get_payload(cmd_abo, &payload_len);
+ if (op != ERT_CMD_CHAIN || !payload ||
+ payload_len < struct_size(payload, data, payload->command_count))
+ return -EINVAL;
+
+ for (i = 0; i < payload->command_count; i++) {
+ u32 boh = (u32)(payload->data[i]);
+ struct amdxdna_gem_obj *abo;
+
+ abo = amdxdna_gem_get_obj(client, boh, AMDXDNA_BO_CMD);
+ if (!abo) {
+ XDNA_ERR(client->xdna, "Failed to find cmd BO %d", boh);
+ return -ENOENT;
+ }
+
+ /* All sub-cmd should have same op, use the first one. */
+ if (i == 0)
+ op = amdxdna_cmd_get_op(abo);
+
+ ret = aie2_cmdlist_fill_one_slot(op, cmdbuf_abo, offset, abo, &size);
+ amdxdna_gem_put_obj(abo);
+ if (ret)
+ return -EINVAL;
+
+ offset += size;
+ }
+
+ /* The offset is the accumulated total size of the cmd buffer */
+ aie2_cmdlist_prepare_request(&req, cmdbuf_abo, offset, payload->command_count);
+
+ msg.opcode = aie2_cmd_op_to_msg_op(op);
+ if (msg.opcode == MSG_OP_MAX_OPCODE)
+ return -EOPNOTSUPP;
+ msg.handle = job;
+ msg.notify_cb = notify_cb;
+ msg.send_data = (u8 *)&req;
+ msg.send_size = sizeof(req);
+ ret = xdna_mailbox_send_msg(chann, &msg, TX_TIMEOUT);
+ if (ret) {
+ XDNA_ERR(hwctx->client->xdna, "Send message failed");
+ return ret;
+ }
+
+ return 0;
+}
+
+int aie2_cmdlist_single_execbuf(struct amdxdna_hwctx *hwctx,
+ struct amdxdna_sched_job *job,
+ int (*notify_cb)(void *, void __iomem *, size_t))
+{
+ struct amdxdna_gem_obj *cmdbuf_abo = aie2_cmdlist_get_cmd_buf(job);
+ struct mailbox_channel *chann = hwctx->priv->mbox_chann;
+ struct amdxdna_gem_obj *cmd_abo = job->cmd_bo;
+ struct xdna_mailbox_msg msg;
+ struct cmd_chain_req req;
+ u32 size;
+ int ret;
+ u32 op;
+
+ op = amdxdna_cmd_get_op(cmd_abo);
+ ret = aie2_cmdlist_fill_one_slot(op, cmdbuf_abo, 0, cmd_abo, &size);
+ if (ret)
+ return ret;
+
+ aie2_cmdlist_prepare_request(&req, cmdbuf_abo, size, 1);
+
+ msg.opcode = aie2_cmd_op_to_msg_op(op);
+ if (msg.opcode == MSG_OP_MAX_OPCODE)
+ return -EOPNOTSUPP;
+ msg.handle = job;
+ msg.notify_cb = notify_cb;
+ msg.send_data = (u8 *)&req;
+ msg.send_size = sizeof(req);
+ ret = xdna_mailbox_send_msg(chann, &msg, TX_TIMEOUT);
+ if (ret) {
+ XDNA_ERR(hwctx->client->xdna, "Send message failed");
+ return ret;
+ }
+
+ return 0;
+}
+
+int aie2_sync_bo(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
+ int (*notify_cb)(void *, void __iomem *, size_t))
+{
+ struct mailbox_channel *chann = hwctx->priv->mbox_chann;
+ struct amdxdna_gem_obj *abo = to_xdna_obj(job->bos[0]);
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+ struct xdna_mailbox_msg msg;
+ struct sync_bo_req req;
+ int ret = 0;
+
+ req.src_addr = 0;
+ req.dst_addr = abo->mem.dev_addr - hwctx->client->dev_heap->mem.dev_addr;
+ req.size = abo->mem.size;
+
+ /* Device to Host */
+ req.type = FIELD_PREP(AIE2_MSG_SYNC_BO_SRC_TYPE, SYNC_BO_DEV_MEM) |
+ FIELD_PREP(AIE2_MSG_SYNC_BO_DST_TYPE, SYNC_BO_HOST_MEM);
+
+ XDNA_DBG(xdna, "sync %d bytes src(0x%llx) to dst(0x%llx) completed",
+ req.size, req.src_addr, req.dst_addr);
+
+ msg.handle = job;
+ msg.notify_cb = notify_cb;
+ msg.send_data = (u8 *)&req;
+ msg.send_size = sizeof(req);
+ msg.opcode = MSG_OP_SYNC_BO;
+
+ ret = xdna_mailbox_send_msg(chann, &msg, TX_TIMEOUT);
+ if (ret) {
+ XDNA_ERR(xdna, "Send message failed");
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/accel/amdxdna/aie2_msg_priv.h b/drivers/accel/amdxdna/aie2_msg_priv.h
new file mode 100644
index 000000000000..6df9065b13f6
--- /dev/null
+++ b/drivers/accel/amdxdna/aie2_msg_priv.h
@@ -0,0 +1,368 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _AIE2_MSG_PRIV_H_
+#define _AIE2_MSG_PRIV_H_
+
+enum aie2_msg_opcode {
+ MSG_OP_CREATE_CONTEXT = 0x2,
+ MSG_OP_DESTROY_CONTEXT = 0x3,
+ MSG_OP_SYNC_BO = 0x7,
+ MSG_OP_EXECUTE_BUFFER_CF = 0xC,
+ MSG_OP_QUERY_COL_STATUS = 0xD,
+ MSG_OP_QUERY_AIE_TILE_INFO = 0xE,
+ MSG_OP_QUERY_AIE_VERSION = 0xF,
+ MSG_OP_EXEC_DPU = 0x10,
+ MSG_OP_CONFIG_CU = 0x11,
+ MSG_OP_CHAIN_EXEC_BUFFER_CF = 0x12,
+ MSG_OP_CHAIN_EXEC_DPU = 0x13,
+ MSG_OP_MAX_XRT_OPCODE,
+ MSG_OP_SUSPEND = 0x101,
+ MSG_OP_RESUME = 0x102,
+ MSG_OP_ASSIGN_MGMT_PASID = 0x103,
+ MSG_OP_INVOKE_SELF_TEST = 0x104,
+ MSG_OP_MAP_HOST_BUFFER = 0x106,
+ MSG_OP_GET_FIRMWARE_VERSION = 0x108,
+ MSG_OP_SET_RUNTIME_CONFIG = 0x10A,
+ MSG_OP_GET_RUNTIME_CONFIG = 0x10B,
+ MSG_OP_REGISTER_ASYNC_EVENT_MSG = 0x10C,
+ MSG_OP_MAX_DRV_OPCODE,
+ MSG_OP_GET_PROTOCOL_VERSION = 0x301,
+ MSG_OP_MAX_OPCODE
+};
+
+enum aie2_msg_status {
+ AIE2_STATUS_SUCCESS = 0x0,
+ /* AIE Error codes */
+ AIE2_STATUS_AIE_SATURATION_ERROR = 0x1000001,
+ AIE2_STATUS_AIE_FP_ERROR = 0x1000002,
+ AIE2_STATUS_AIE_STREAM_ERROR = 0x1000003,
+ AIE2_STATUS_AIE_ACCESS_ERROR = 0x1000004,
+ AIE2_STATUS_AIE_BUS_ERROR = 0x1000005,
+ AIE2_STATUS_AIE_INSTRUCTION_ERROR = 0x1000006,
+ AIE2_STATUS_AIE_ECC_ERROR = 0x1000007,
+ AIE2_STATUS_AIE_LOCK_ERROR = 0x1000008,
+ AIE2_STATUS_AIE_DMA_ERROR = 0x1000009,
+ AIE2_STATUS_AIE_MEM_PARITY_ERROR = 0x100000a,
+ AIE2_STATUS_AIE_PWR_CFG_ERROR = 0x100000b,
+ AIE2_STATUS_AIE_BACKTRACK_ERROR = 0x100000c,
+ AIE2_STATUS_MAX_AIE_STATUS_CODE,
+ /* MGMT ERT Error codes */
+ AIE2_STATUS_MGMT_ERT_SELF_TEST_FAILURE = 0x2000001,
+ AIE2_STATUS_MGMT_ERT_HASH_MISMATCH,
+ AIE2_STATUS_MGMT_ERT_NOAVAIL,
+ AIE2_STATUS_MGMT_ERT_INVALID_PARAM,
+ AIE2_STATUS_MGMT_ERT_ENTER_SUSPEND_FAILURE,
+ AIE2_STATUS_MGMT_ERT_BUSY,
+ AIE2_STATUS_MGMT_ERT_APPLICATION_ACTIVE,
+ MAX_MGMT_ERT_STATUS_CODE,
+ /* APP ERT Error codes */
+ AIE2_STATUS_APP_ERT_FIRST_ERROR = 0x3000001,
+ AIE2_STATUS_APP_INVALID_INSTR,
+ AIE2_STATUS_APP_LOAD_PDI_FAIL,
+ MAX_APP_ERT_STATUS_CODE,
+ /* NPU RTOS Error Codes */
+ AIE2_STATUS_INVALID_INPUT_BUFFER = 0x4000001,
+ AIE2_STATUS_INVALID_COMMAND,
+ AIE2_STATUS_INVALID_PARAM,
+ AIE2_STATUS_INVALID_OPERATION = 0x4000006,
+ AIE2_STATUS_ASYNC_EVENT_MSGS_FULL,
+ AIE2_STATUS_MAX_RTOS_STATUS_CODE,
+ MAX_AIE2_STATUS_CODE
+};
+
+struct assign_mgmt_pasid_req {
+ __u16 pasid;
+ __u16 reserved;
+} __packed;
+
+struct assign_mgmt_pasid_resp {
+ enum aie2_msg_status status;
+} __packed;
+
+struct map_host_buffer_req {
+ __u32 context_id;
+ __u64 buf_addr;
+ __u64 buf_size;
+} __packed;
+
+struct map_host_buffer_resp {
+ enum aie2_msg_status status;
+} __packed;
+
+#define MAX_CQ_PAIRS 2
+struct cq_info {
+ __u32 head_addr;
+ __u32 tail_addr;
+ __u32 buf_addr;
+ __u32 buf_size;
+};
+
+struct cq_pair {
+ struct cq_info x2i_q;
+ struct cq_info i2x_q;
+};
+
+struct create_ctx_req {
+ __u32 aie_type;
+ __u8 start_col;
+ __u8 num_col;
+ __u16 reserved;
+ __u8 num_cq_pairs_requested;
+ __u8 reserved1;
+ __u16 pasid;
+ __u32 pad[2];
+ __u32 sec_comm_target_type;
+ __u32 context_priority;
+} __packed;
+
+struct create_ctx_resp {
+ enum aie2_msg_status status;
+ __u32 context_id;
+ __u16 msix_id;
+ __u8 num_cq_pairs_allocated;
+ __u8 reserved;
+ struct cq_pair cq_pair[MAX_CQ_PAIRS];
+} __packed;
+
+struct destroy_ctx_req {
+ __u32 context_id;
+} __packed;
+
+struct destroy_ctx_resp {
+ enum aie2_msg_status status;
+} __packed;
+
+struct execute_buffer_req {
+ __u32 cu_idx;
+ __u32 payload[19];
+} __packed;
+
+struct exec_dpu_req {
+ __u64 inst_buf_addr;
+ __u32 inst_size;
+ __u32 inst_prop_cnt;
+ __u32 cu_idx;
+ __u32 payload[35];
+} __packed;
+
+struct execute_buffer_resp {
+ enum aie2_msg_status status;
+} __packed;
+
+struct aie_tile_info {
+ __u32 size;
+ __u16 major;
+ __u16 minor;
+ __u16 cols;
+ __u16 rows;
+ __u16 core_rows;
+ __u16 mem_rows;
+ __u16 shim_rows;
+ __u16 core_row_start;
+ __u16 mem_row_start;
+ __u16 shim_row_start;
+ __u16 core_dma_channels;
+ __u16 mem_dma_channels;
+ __u16 shim_dma_channels;
+ __u16 core_locks;
+ __u16 mem_locks;
+ __u16 shim_locks;
+ __u16 core_events;
+ __u16 mem_events;
+ __u16 shim_events;
+ __u16 reserved;
+};
+
+struct aie_tile_info_req {
+ __u32 reserved;
+} __packed;
+
+struct aie_tile_info_resp {
+ enum aie2_msg_status status;
+ struct aie_tile_info info;
+} __packed;
+
+struct aie_version_info_req {
+ __u32 reserved;
+} __packed;
+
+struct aie_version_info_resp {
+ enum aie2_msg_status status;
+ __u16 major;
+ __u16 minor;
+} __packed;
+
+struct aie_column_info_req {
+ __u64 dump_buff_addr;
+ __u32 dump_buff_size;
+ __u32 num_cols;
+ __u32 aie_bitmap;
+} __packed;
+
+struct aie_column_info_resp {
+ enum aie2_msg_status status;
+ __u32 size;
+} __packed;
+
+struct suspend_req {
+ __u32 place_holder;
+} __packed;
+
+struct suspend_resp {
+ enum aie2_msg_status status;
+} __packed;
+
+struct resume_req {
+ __u32 place_holder;
+} __packed;
+
+struct resume_resp {
+ enum aie2_msg_status status;
+} __packed;
+
+struct check_header_hash_req {
+ __u64 hash_high;
+ __u64 hash_low;
+} __packed;
+
+struct check_header_hash_resp {
+ enum aie2_msg_status status;
+} __packed;
+
+struct query_error_req {
+ __u64 buf_addr;
+ __u32 buf_size;
+ __u32 next_row;
+ __u32 next_column;
+ __u32 next_module;
+} __packed;
+
+struct query_error_resp {
+ enum aie2_msg_status status;
+ __u32 num_err;
+ __u32 has_next_err;
+ __u32 next_row;
+ __u32 next_column;
+ __u32 next_module;
+} __packed;
+
+struct protocol_version_req {
+ __u32 reserved;
+} __packed;
+
+struct protocol_version_resp {
+ enum aie2_msg_status status;
+ __u32 major;
+ __u32 minor;
+} __packed;
+
+struct firmware_version_req {
+ __u32 reserved;
+} __packed;
+
+struct firmware_version_resp {
+ enum aie2_msg_status status;
+ __u32 major;
+ __u32 minor;
+ __u32 sub;
+ __u32 build;
+} __packed;
+
+#define MAX_NUM_CUS 32
+#define AIE2_MSG_CFG_CU_PDI_ADDR GENMASK(16, 0)
+#define AIE2_MSG_CFG_CU_FUNC GENMASK(24, 17)
+struct config_cu_req {
+ __u32 num_cus;
+ __u32 cfgs[MAX_NUM_CUS];
+} __packed;
+
+struct config_cu_resp {
+ enum aie2_msg_status status;
+} __packed;
+
+struct set_runtime_cfg_req {
+ __u32 type;
+ __u64 value;
+} __packed;
+
+struct set_runtime_cfg_resp {
+ enum aie2_msg_status status;
+} __packed;
+
+struct get_runtime_cfg_req {
+ __u32 type;
+} __packed;
+
+struct get_runtime_cfg_resp {
+ enum aie2_msg_status status;
+ __u64 value;
+} __packed;
+
+enum async_event_type {
+ ASYNC_EVENT_TYPE_AIE_ERROR,
+ ASYNC_EVENT_TYPE_EXCEPTION,
+ MAX_ASYNC_EVENT_TYPE
+};
+
+#define ASYNC_BUF_SIZE SZ_8K
+struct async_event_msg_req {
+ __u64 buf_addr;
+ __u32 buf_size;
+} __packed;
+
+struct async_event_msg_resp {
+ enum aie2_msg_status status;
+ enum async_event_type type;
+} __packed;
+
+#define MAX_CHAIN_CMDBUF_SIZE SZ_4K
+#define slot_has_space(slot, offset, payload_size) \
+ (MAX_CHAIN_CMDBUF_SIZE >= (offset) + (payload_size) + \
+ sizeof(typeof(slot)))
+
+struct cmd_chain_slot_execbuf_cf {
+ __u32 cu_idx;
+ __u32 arg_cnt;
+ __u32 args[] __counted_by(arg_cnt);
+};
+
+struct cmd_chain_slot_dpu {
+ __u64 inst_buf_addr;
+ __u32 inst_size;
+ __u32 inst_prop_cnt;
+ __u32 cu_idx;
+ __u32 arg_cnt;
+#define MAX_DPU_ARGS_SIZE (34 * sizeof(__u32))
+ __u32 args[] __counted_by(arg_cnt);
+};
+
+struct cmd_chain_req {
+ __u64 buf_addr;
+ __u32 buf_size;
+ __u32 count;
+} __packed;
+
+struct cmd_chain_resp {
+ enum aie2_msg_status status;
+ __u32 fail_cmd_idx;
+ enum aie2_msg_status fail_cmd_status;
+} __packed;
+
+#define AIE2_MSG_SYNC_BO_SRC_TYPE GENMASK(3, 0)
+#define AIE2_MSG_SYNC_BO_DST_TYPE GENMASK(7, 4)
+struct sync_bo_req {
+ __u64 src_addr;
+ __u64 dst_addr;
+ __u32 size;
+#define SYNC_BO_DEV_MEM 0
+#define SYNC_BO_HOST_MEM 2
+ __u32 type;
+} __packed;
+
+struct sync_bo_resp {
+ enum aie2_msg_status status;
+} __packed;
+#endif /* _AIE2_MSG_PRIV_H_ */
diff --git a/drivers/accel/amdxdna/aie2_pci.c b/drivers/accel/amdxdna/aie2_pci.c
new file mode 100644
index 000000000000..87c425e3d2b9
--- /dev/null
+++ b/drivers/accel/amdxdna/aie2_pci.c
@@ -0,0 +1,1004 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/cleanup.h>
+#include <linux/errno.h>
+#include <linux/firmware.h>
+#include <linux/iommu.h>
+#include <linux/iopoll.h>
+#include <linux/pci.h>
+#include <linux/xarray.h>
+
+#include "aie2_msg_priv.h"
+#include "aie2_pci.h"
+#include "aie2_solver.h"
+#include "amdxdna_ctx.h"
+#include "amdxdna_gem.h"
+#include "amdxdna_mailbox.h"
+#include "amdxdna_pci_drv.h"
+
+static int aie2_max_col = XRS_MAX_COL;
+module_param(aie2_max_col, uint, 0600);
+MODULE_PARM_DESC(aie2_max_col, "Maximum column could be used");
+
+/*
+ * The management mailbox channel is allocated by firmware.
+ * The related register and ring buffer information is on SRAM BAR.
+ * This struct is the register layout.
+ */
+#define MGMT_MBOX_MAGIC 0x55504e5f /* _NPU */
+struct mgmt_mbox_chann_info {
+ __u32 x2i_tail;
+ __u32 x2i_head;
+ __u32 x2i_buf;
+ __u32 x2i_buf_sz;
+ __u32 i2x_tail;
+ __u32 i2x_head;
+ __u32 i2x_buf;
+ __u32 i2x_buf_sz;
+ __u32 magic;
+ __u32 msi_id;
+ __u32 prot_major;
+ __u32 prot_minor;
+ __u32 rsvd[4];
+};
+
+static int aie2_check_protocol(struct amdxdna_dev_hdl *ndev, u32 fw_major, u32 fw_minor)
+{
+ struct amdxdna_dev *xdna = ndev->xdna;
+
+ /*
+ * The driver supported mailbox behavior is defined by
+ * ndev->priv->protocol_major and protocol_minor.
+ *
+ * When protocol_major and fw_major are different, it means driver
+ * and firmware are incompatible.
+ */
+ if (ndev->priv->protocol_major != fw_major) {
+ XDNA_ERR(xdna, "Incompatible firmware protocol major %d minor %d",
+ fw_major, fw_minor);
+ return -EINVAL;
+ }
+
+ /*
+ * When protocol_minor is greater then fw_minor, that means driver
+ * relies on operation the installed firmware does not support.
+ */
+ if (ndev->priv->protocol_minor > fw_minor) {
+ XDNA_ERR(xdna, "Firmware minor version smaller than supported");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void aie2_dump_chann_info_debug(struct amdxdna_dev_hdl *ndev)
+{
+ struct amdxdna_dev *xdna = ndev->xdna;
+
+ XDNA_DBG(xdna, "i2x tail 0x%x", ndev->mgmt_i2x.mb_tail_ptr_reg);
+ XDNA_DBG(xdna, "i2x head 0x%x", ndev->mgmt_i2x.mb_head_ptr_reg);
+ XDNA_DBG(xdna, "i2x ringbuf 0x%x", ndev->mgmt_i2x.rb_start_addr);
+ XDNA_DBG(xdna, "i2x rsize 0x%x", ndev->mgmt_i2x.rb_size);
+ XDNA_DBG(xdna, "x2i tail 0x%x", ndev->mgmt_x2i.mb_tail_ptr_reg);
+ XDNA_DBG(xdna, "x2i head 0x%x", ndev->mgmt_x2i.mb_head_ptr_reg);
+ XDNA_DBG(xdna, "x2i ringbuf 0x%x", ndev->mgmt_x2i.rb_start_addr);
+ XDNA_DBG(xdna, "x2i rsize 0x%x", ndev->mgmt_x2i.rb_size);
+ XDNA_DBG(xdna, "x2i chann index 0x%x", ndev->mgmt_chan_idx);
+ XDNA_DBG(xdna, "mailbox protocol major 0x%x", ndev->mgmt_prot_major);
+ XDNA_DBG(xdna, "mailbox protocol minor 0x%x", ndev->mgmt_prot_minor);
+}
+
+static int aie2_get_mgmt_chann_info(struct amdxdna_dev_hdl *ndev)
+{
+ struct mgmt_mbox_chann_info info_regs;
+ struct xdna_mailbox_chann_res *i2x;
+ struct xdna_mailbox_chann_res *x2i;
+ u32 addr, off;
+ u32 *reg;
+ int ret;
+ int i;
+
+ /*
+ * Once firmware is alive, it will write management channel
+ * information in SRAM BAR and write the address of that information
+ * at FW_ALIVE_OFF offset in SRMA BAR.
+ *
+ * Read a non-zero value from FW_ALIVE_OFF implies that firmware
+ * is alive.
+ */
+ ret = readx_poll_timeout(readl, SRAM_GET_ADDR(ndev, FW_ALIVE_OFF),
+ addr, addr, AIE2_INTERVAL, AIE2_TIMEOUT);
+ if (ret || !addr)
+ return -ETIME;
+
+ off = AIE2_SRAM_OFF(ndev, addr);
+ reg = (u32 *)&info_regs;
+ for (i = 0; i < sizeof(info_regs) / sizeof(u32); i++)
+ reg[i] = readl(ndev->sram_base + off + i * sizeof(u32));
+
+ if (info_regs.magic != MGMT_MBOX_MAGIC) {
+ XDNA_ERR(ndev->xdna, "Invalid mbox magic 0x%x", info_regs.magic);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ i2x = &ndev->mgmt_i2x;
+ x2i = &ndev->mgmt_x2i;
+
+ i2x->mb_head_ptr_reg = AIE2_MBOX_OFF(ndev, info_regs.i2x_head);
+ i2x->mb_tail_ptr_reg = AIE2_MBOX_OFF(ndev, info_regs.i2x_tail);
+ i2x->rb_start_addr = AIE2_SRAM_OFF(ndev, info_regs.i2x_buf);
+ i2x->rb_size = info_regs.i2x_buf_sz;
+
+ x2i->mb_head_ptr_reg = AIE2_MBOX_OFF(ndev, info_regs.x2i_head);
+ x2i->mb_tail_ptr_reg = AIE2_MBOX_OFF(ndev, info_regs.x2i_tail);
+ x2i->rb_start_addr = AIE2_SRAM_OFF(ndev, info_regs.x2i_buf);
+ x2i->rb_size = info_regs.x2i_buf_sz;
+
+ ndev->mgmt_chan_idx = info_regs.msi_id;
+ ndev->mgmt_prot_major = info_regs.prot_major;
+ ndev->mgmt_prot_minor = info_regs.prot_minor;
+
+ ret = aie2_check_protocol(ndev, ndev->mgmt_prot_major, ndev->mgmt_prot_minor);
+
+done:
+ aie2_dump_chann_info_debug(ndev);
+
+ /* Must clear address at FW_ALIVE_OFF */
+ writel(0, SRAM_GET_ADDR(ndev, FW_ALIVE_OFF));
+
+ return ret;
+}
+
+int aie2_runtime_cfg(struct amdxdna_dev_hdl *ndev,
+ enum rt_config_category category, u32 *val)
+{
+ const struct rt_config *cfg;
+ u32 value;
+ int ret;
+
+ for (cfg = ndev->priv->rt_config; cfg->type; cfg++) {
+ if (cfg->category != category)
+ continue;
+
+ value = val ? *val : cfg->value;
+ ret = aie2_set_runtime_cfg(ndev, cfg->type, value);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Set type %d value %d failed",
+ cfg->type, value);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int aie2_xdna_reset(struct amdxdna_dev_hdl *ndev)
+{
+ int ret;
+
+ ret = aie2_suspend_fw(ndev);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Suspend firmware failed");
+ return ret;
+ }
+
+ ret = aie2_resume_fw(ndev);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Resume firmware failed");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int aie2_mgmt_fw_init(struct amdxdna_dev_hdl *ndev)
+{
+ int ret;
+
+ ret = aie2_runtime_cfg(ndev, AIE2_RT_CFG_INIT, NULL);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Runtime config failed");
+ return ret;
+ }
+
+ ret = aie2_assign_mgmt_pasid(ndev, 0);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Can not assign PASID");
+ return ret;
+ }
+
+ ret = aie2_xdna_reset(ndev);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Reset firmware failed");
+ return ret;
+ }
+
+ if (!ndev->async_events)
+ return 0;
+
+ ret = aie2_error_async_events_send(ndev);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Send async events failed");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int aie2_mgmt_fw_query(struct amdxdna_dev_hdl *ndev)
+{
+ int ret;
+
+ ret = aie2_query_firmware_version(ndev, &ndev->xdna->fw_ver);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "query firmware version failed");
+ return ret;
+ }
+
+ ret = aie2_query_aie_version(ndev, &ndev->version);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Query AIE version failed");
+ return ret;
+ }
+
+ ret = aie2_query_aie_metadata(ndev, &ndev->metadata);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Query AIE metadata failed");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void aie2_mgmt_fw_fini(struct amdxdna_dev_hdl *ndev)
+{
+ if (aie2_suspend_fw(ndev))
+ XDNA_ERR(ndev->xdna, "Suspend_fw failed");
+ XDNA_DBG(ndev->xdna, "Firmware suspended");
+}
+
+static int aie2_xrs_load(void *cb_arg, struct xrs_action_load *action)
+{
+ struct amdxdna_hwctx *hwctx = cb_arg;
+ struct amdxdna_dev *xdna;
+ int ret;
+
+ xdna = hwctx->client->xdna;
+
+ hwctx->start_col = action->part.start_col;
+ hwctx->num_col = action->part.ncols;
+ ret = aie2_create_context(xdna->dev_handle, hwctx);
+ if (ret)
+ XDNA_ERR(xdna, "create context failed, ret %d", ret);
+
+ return ret;
+}
+
+static int aie2_xrs_unload(void *cb_arg)
+{
+ struct amdxdna_hwctx *hwctx = cb_arg;
+ struct amdxdna_dev *xdna;
+ int ret;
+
+ xdna = hwctx->client->xdna;
+
+ ret = aie2_destroy_context(xdna->dev_handle, hwctx);
+ if (ret)
+ XDNA_ERR(xdna, "destroy context failed, ret %d", ret);
+
+ return ret;
+}
+
+static int aie2_xrs_set_dft_dpm_level(struct drm_device *ddev, u32 dpm_level)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(ddev);
+ struct amdxdna_dev_hdl *ndev;
+
+ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
+
+ ndev = xdna->dev_handle;
+ ndev->dft_dpm_level = dpm_level;
+ if (ndev->pw_mode != POWER_MODE_DEFAULT || ndev->dpm_level == dpm_level)
+ return 0;
+
+ return ndev->priv->hw_ops.set_dpm(ndev, dpm_level);
+}
+
+static struct xrs_action_ops aie2_xrs_actions = {
+ .load = aie2_xrs_load,
+ .unload = aie2_xrs_unload,
+ .set_dft_dpm_level = aie2_xrs_set_dft_dpm_level,
+};
+
+static void aie2_hw_stop(struct amdxdna_dev *xdna)
+{
+ struct pci_dev *pdev = to_pci_dev(xdna->ddev.dev);
+ struct amdxdna_dev_hdl *ndev = xdna->dev_handle;
+
+ if (ndev->dev_status <= AIE2_DEV_INIT) {
+ XDNA_ERR(xdna, "device is already stopped");
+ return;
+ }
+
+ aie2_mgmt_fw_fini(ndev);
+ xdna_mailbox_stop_channel(ndev->mgmt_chann);
+ xdna_mailbox_destroy_channel(ndev->mgmt_chann);
+ ndev->mgmt_chann = NULL;
+ drmm_kfree(&xdna->ddev, ndev->mbox);
+ ndev->mbox = NULL;
+ aie2_psp_stop(ndev->psp_hdl);
+ aie2_smu_fini(ndev);
+ pci_disable_device(pdev);
+
+ ndev->dev_status = AIE2_DEV_INIT;
+}
+
+static int aie2_hw_start(struct amdxdna_dev *xdna)
+{
+ struct pci_dev *pdev = to_pci_dev(xdna->ddev.dev);
+ struct amdxdna_dev_hdl *ndev = xdna->dev_handle;
+ struct xdna_mailbox_res mbox_res;
+ u32 xdna_mailbox_intr_reg;
+ int mgmt_mb_irq, ret;
+
+ if (ndev->dev_status >= AIE2_DEV_START) {
+ XDNA_INFO(xdna, "device is already started");
+ return 0;
+ }
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ XDNA_ERR(xdna, "failed to enable device, ret %d", ret);
+ return ret;
+ }
+ pci_set_master(pdev);
+
+ ret = aie2_smu_init(ndev);
+ if (ret) {
+ XDNA_ERR(xdna, "failed to init smu, ret %d", ret);
+ goto disable_dev;
+ }
+
+ ret = aie2_psp_start(ndev->psp_hdl);
+ if (ret) {
+ XDNA_ERR(xdna, "failed to start psp, ret %d", ret);
+ goto fini_smu;
+ }
+
+ ret = aie2_get_mgmt_chann_info(ndev);
+ if (ret) {
+ XDNA_ERR(xdna, "firmware is not alive");
+ goto stop_psp;
+ }
+
+ mbox_res.ringbuf_base = ndev->sram_base;
+ mbox_res.ringbuf_size = pci_resource_len(pdev, xdna->dev_info->sram_bar);
+ mbox_res.mbox_base = ndev->mbox_base;
+ mbox_res.mbox_size = MBOX_SIZE(ndev);
+ mbox_res.name = "xdna_mailbox";
+ ndev->mbox = xdnam_mailbox_create(&xdna->ddev, &mbox_res);
+ if (!ndev->mbox) {
+ XDNA_ERR(xdna, "failed to create mailbox device");
+ ret = -ENODEV;
+ goto stop_psp;
+ }
+
+ mgmt_mb_irq = pci_irq_vector(pdev, ndev->mgmt_chan_idx);
+ if (mgmt_mb_irq < 0) {
+ ret = mgmt_mb_irq;
+ XDNA_ERR(xdna, "failed to alloc irq vector, ret %d", ret);
+ goto stop_psp;
+ }
+
+ xdna_mailbox_intr_reg = ndev->mgmt_i2x.mb_head_ptr_reg + 4;
+ ndev->mgmt_chann = xdna_mailbox_create_channel(ndev->mbox,
+ &ndev->mgmt_x2i,
+ &ndev->mgmt_i2x,
+ xdna_mailbox_intr_reg,
+ mgmt_mb_irq);
+ if (!ndev->mgmt_chann) {
+ XDNA_ERR(xdna, "failed to create management mailbox channel");
+ ret = -EINVAL;
+ goto stop_psp;
+ }
+
+ ret = aie2_pm_init(ndev);
+ if (ret) {
+ XDNA_ERR(xdna, "failed to init pm, ret %d", ret);
+ goto destroy_mgmt_chann;
+ }
+
+ ret = aie2_mgmt_fw_init(ndev);
+ if (ret) {
+ XDNA_ERR(xdna, "initial mgmt firmware failed, ret %d", ret);
+ goto destroy_mgmt_chann;
+ }
+
+ ndev->dev_status = AIE2_DEV_START;
+
+ return 0;
+
+destroy_mgmt_chann:
+ xdna_mailbox_stop_channel(ndev->mgmt_chann);
+ xdna_mailbox_destroy_channel(ndev->mgmt_chann);
+stop_psp:
+ aie2_psp_stop(ndev->psp_hdl);
+fini_smu:
+ aie2_smu_fini(ndev);
+disable_dev:
+ pci_disable_device(pdev);
+
+ return ret;
+}
+
+static int aie2_hw_suspend(struct amdxdna_dev *xdna)
+{
+ struct amdxdna_client *client;
+
+ guard(mutex)(&xdna->dev_lock);
+ list_for_each_entry(client, &xdna->client_list, node)
+ aie2_hwctx_suspend(client);
+
+ aie2_hw_stop(xdna);
+
+ return 0;
+}
+
+static int aie2_hw_resume(struct amdxdna_dev *xdna)
+{
+ struct amdxdna_client *client;
+ int ret;
+
+ guard(mutex)(&xdna->dev_lock);
+ ret = aie2_hw_start(xdna);
+ if (ret) {
+ XDNA_ERR(xdna, "Start hardware failed, %d", ret);
+ return ret;
+ }
+
+ list_for_each_entry(client, &xdna->client_list, node) {
+ ret = aie2_hwctx_resume(client);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static int aie2_init(struct amdxdna_dev *xdna)
+{
+ struct pci_dev *pdev = to_pci_dev(xdna->ddev.dev);
+ void __iomem *tbl[PCI_NUM_RESOURCES] = {0};
+ struct init_config xrs_cfg = { 0 };
+ struct amdxdna_dev_hdl *ndev;
+ struct psp_config psp_conf;
+ const struct firmware *fw;
+ unsigned long bars = 0;
+ int i, nvec, ret;
+
+ ndev = drmm_kzalloc(&xdna->ddev, sizeof(*ndev), GFP_KERNEL);
+ if (!ndev)
+ return -ENOMEM;
+
+ ndev->priv = xdna->dev_info->dev_priv;
+ ndev->xdna = xdna;
+
+ ret = request_firmware(&fw, ndev->priv->fw_path, &pdev->dev);
+ if (ret) {
+ XDNA_ERR(xdna, "failed to request_firmware %s, ret %d",
+ ndev->priv->fw_path, ret);
+ return ret;
+ }
+
+ ret = pcim_enable_device(pdev);
+ if (ret) {
+ XDNA_ERR(xdna, "pcim enable device failed, ret %d", ret);
+ goto release_fw;
+ }
+
+ for (i = 0; i < PSP_MAX_REGS; i++)
+ set_bit(PSP_REG_BAR(ndev, i), &bars);
+
+ set_bit(xdna->dev_info->sram_bar, &bars);
+ set_bit(xdna->dev_info->smu_bar, &bars);
+ set_bit(xdna->dev_info->mbox_bar, &bars);
+
+ for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+ if (!test_bit(i, &bars))
+ continue;
+ tbl[i] = pcim_iomap(pdev, i, 0);
+ if (!tbl[i]) {
+ XDNA_ERR(xdna, "map bar %d failed", i);
+ ret = -ENOMEM;
+ goto release_fw;
+ }
+ }
+
+ ndev->sram_base = tbl[xdna->dev_info->sram_bar];
+ ndev->smu_base = tbl[xdna->dev_info->smu_bar];
+ ndev->mbox_base = tbl[xdna->dev_info->mbox_bar];
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ XDNA_ERR(xdna, "Failed to set DMA mask: %d", ret);
+ goto release_fw;
+ }
+
+ nvec = pci_msix_vec_count(pdev);
+ if (nvec <= 0) {
+ XDNA_ERR(xdna, "does not get number of interrupt vector");
+ ret = -EINVAL;
+ goto release_fw;
+ }
+
+ ret = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
+ if (ret < 0) {
+ XDNA_ERR(xdna, "failed to alloc irq vectors, ret %d", ret);
+ goto release_fw;
+ }
+
+ psp_conf.fw_size = fw->size;
+ psp_conf.fw_buf = fw->data;
+ for (i = 0; i < PSP_MAX_REGS; i++)
+ psp_conf.psp_regs[i] = tbl[PSP_REG_BAR(ndev, i)] + PSP_REG_OFF(ndev, i);
+ ndev->psp_hdl = aie2m_psp_create(&xdna->ddev, &psp_conf);
+ if (!ndev->psp_hdl) {
+ XDNA_ERR(xdna, "failed to create psp");
+ ret = -ENOMEM;
+ goto release_fw;
+ }
+ xdna->dev_handle = ndev;
+
+ ret = aie2_hw_start(xdna);
+ if (ret) {
+ XDNA_ERR(xdna, "start npu failed, ret %d", ret);
+ goto release_fw;
+ }
+
+ ret = aie2_mgmt_fw_query(ndev);
+ if (ret) {
+ XDNA_ERR(xdna, "Query firmware failed, ret %d", ret);
+ goto stop_hw;
+ }
+ ndev->total_col = min(aie2_max_col, ndev->metadata.cols);
+
+ xrs_cfg.clk_list.num_levels = ndev->max_dpm_level + 1;
+ for (i = 0; i < xrs_cfg.clk_list.num_levels; i++)
+ xrs_cfg.clk_list.cu_clk_list[i] = ndev->priv->dpm_clk_tbl[i].hclk;
+ xrs_cfg.sys_eff_factor = 1;
+ xrs_cfg.ddev = &xdna->ddev;
+ xrs_cfg.actions = &aie2_xrs_actions;
+ xrs_cfg.total_col = ndev->total_col;
+
+ xdna->xrs_hdl = xrsm_init(&xrs_cfg);
+ if (!xdna->xrs_hdl) {
+ XDNA_ERR(xdna, "Initialize resolver failed");
+ ret = -EINVAL;
+ goto stop_hw;
+ }
+
+ ret = aie2_error_async_events_alloc(ndev);
+ if (ret) {
+ XDNA_ERR(xdna, "Allocate async events failed, ret %d", ret);
+ goto stop_hw;
+ }
+
+ ret = aie2_error_async_events_send(ndev);
+ if (ret) {
+ XDNA_ERR(xdna, "Send async events failed, ret %d", ret);
+ goto async_event_free;
+ }
+
+ /* Issue a command to make sure firmware handled async events */
+ ret = aie2_query_firmware_version(ndev, &ndev->xdna->fw_ver);
+ if (ret) {
+ XDNA_ERR(xdna, "Re-query firmware version failed");
+ goto async_event_free;
+ }
+
+ release_firmware(fw);
+ return 0;
+
+async_event_free:
+ aie2_error_async_events_free(ndev);
+stop_hw:
+ aie2_hw_stop(xdna);
+release_fw:
+ release_firmware(fw);
+
+ return ret;
+}
+
+static void aie2_fini(struct amdxdna_dev *xdna)
+{
+ struct amdxdna_dev_hdl *ndev = xdna->dev_handle;
+
+ aie2_hw_stop(xdna);
+ aie2_error_async_events_free(ndev);
+}
+
+static int aie2_get_aie_status(struct amdxdna_client *client,
+ struct amdxdna_drm_get_info *args)
+{
+ struct amdxdna_drm_query_aie_status status;
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_dev_hdl *ndev;
+ int ret;
+
+ ndev = xdna->dev_handle;
+ if (copy_from_user(&status, u64_to_user_ptr(args->buffer), sizeof(status))) {
+ XDNA_ERR(xdna, "Failed to copy AIE request into kernel");
+ return -EFAULT;
+ }
+
+ if (ndev->metadata.cols * ndev->metadata.size < status.buffer_size) {
+ XDNA_ERR(xdna, "Invalid buffer size. Given Size: %u. Need Size: %u.",
+ status.buffer_size, ndev->metadata.cols * ndev->metadata.size);
+ return -EINVAL;
+ }
+
+ ret = aie2_query_status(ndev, u64_to_user_ptr(status.buffer),
+ status.buffer_size, &status.cols_filled);
+ if (ret) {
+ XDNA_ERR(xdna, "Failed to get AIE status info. Ret: %d", ret);
+ return ret;
+ }
+
+ if (copy_to_user(u64_to_user_ptr(args->buffer), &status, sizeof(status))) {
+ XDNA_ERR(xdna, "Failed to copy AIE request info to user space");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int aie2_get_aie_metadata(struct amdxdna_client *client,
+ struct amdxdna_drm_get_info *args)
+{
+ struct amdxdna_drm_query_aie_metadata *meta;
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_dev_hdl *ndev;
+ int ret = 0;
+
+ ndev = xdna->dev_handle;
+ meta = kzalloc(sizeof(*meta), GFP_KERNEL);
+ if (!meta)
+ return -ENOMEM;
+
+ meta->col_size = ndev->metadata.size;
+ meta->cols = ndev->metadata.cols;
+ meta->rows = ndev->metadata.rows;
+
+ meta->version.major = ndev->metadata.version.major;
+ meta->version.minor = ndev->metadata.version.minor;
+
+ meta->core.row_count = ndev->metadata.core.row_count;
+ meta->core.row_start = ndev->metadata.core.row_start;
+ meta->core.dma_channel_count = ndev->metadata.core.dma_channel_count;
+ meta->core.lock_count = ndev->metadata.core.lock_count;
+ meta->core.event_reg_count = ndev->metadata.core.event_reg_count;
+
+ meta->mem.row_count = ndev->metadata.mem.row_count;
+ meta->mem.row_start = ndev->metadata.mem.row_start;
+ meta->mem.dma_channel_count = ndev->metadata.mem.dma_channel_count;
+ meta->mem.lock_count = ndev->metadata.mem.lock_count;
+ meta->mem.event_reg_count = ndev->metadata.mem.event_reg_count;
+
+ meta->shim.row_count = ndev->metadata.shim.row_count;
+ meta->shim.row_start = ndev->metadata.shim.row_start;
+ meta->shim.dma_channel_count = ndev->metadata.shim.dma_channel_count;
+ meta->shim.lock_count = ndev->metadata.shim.lock_count;
+ meta->shim.event_reg_count = ndev->metadata.shim.event_reg_count;
+
+ if (copy_to_user(u64_to_user_ptr(args->buffer), meta, sizeof(*meta)))
+ ret = -EFAULT;
+
+ kfree(meta);
+ return ret;
+}
+
+static int aie2_get_aie_version(struct amdxdna_client *client,
+ struct amdxdna_drm_get_info *args)
+{
+ struct amdxdna_drm_query_aie_version version;
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_dev_hdl *ndev;
+
+ ndev = xdna->dev_handle;
+ version.major = ndev->version.major;
+ version.minor = ndev->version.minor;
+
+ if (copy_to_user(u64_to_user_ptr(args->buffer), &version, sizeof(version)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int aie2_get_firmware_version(struct amdxdna_client *client,
+ struct amdxdna_drm_get_info *args)
+{
+ struct amdxdna_drm_query_firmware_version version;
+ struct amdxdna_dev *xdna = client->xdna;
+
+ version.major = xdna->fw_ver.major;
+ version.minor = xdna->fw_ver.minor;
+ version.patch = xdna->fw_ver.sub;
+ version.build = xdna->fw_ver.build;
+
+ if (copy_to_user(u64_to_user_ptr(args->buffer), &version, sizeof(version)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int aie2_get_power_mode(struct amdxdna_client *client,
+ struct amdxdna_drm_get_info *args)
+{
+ struct amdxdna_drm_get_power_mode mode = {};
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_dev_hdl *ndev;
+
+ ndev = xdna->dev_handle;
+ mode.power_mode = ndev->pw_mode;
+
+ if (copy_to_user(u64_to_user_ptr(args->buffer), &mode, sizeof(mode)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int aie2_get_clock_metadata(struct amdxdna_client *client,
+ struct amdxdna_drm_get_info *args)
+{
+ struct amdxdna_drm_query_clock_metadata *clock;
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_dev_hdl *ndev;
+ int ret = 0;
+
+ ndev = xdna->dev_handle;
+ clock = kzalloc(sizeof(*clock), GFP_KERNEL);
+ if (!clock)
+ return -ENOMEM;
+
+ snprintf(clock->mp_npu_clock.name, sizeof(clock->mp_npu_clock.name),
+ "MP-NPU Clock");
+ clock->mp_npu_clock.freq_mhz = ndev->npuclk_freq;
+ snprintf(clock->h_clock.name, sizeof(clock->h_clock.name), "H Clock");
+ clock->h_clock.freq_mhz = ndev->hclk_freq;
+
+ if (copy_to_user(u64_to_user_ptr(args->buffer), clock, sizeof(*clock)))
+ ret = -EFAULT;
+
+ kfree(clock);
+ return ret;
+}
+
+static int aie2_hwctx_status_cb(struct amdxdna_hwctx *hwctx, void *arg)
+{
+ struct amdxdna_drm_hwctx_entry *tmp __free(kfree) = NULL;
+ struct amdxdna_drm_get_array *array_args = arg;
+ struct amdxdna_drm_hwctx_entry __user *buf;
+ u32 size;
+
+ if (!array_args->num_element)
+ return -EINVAL;
+
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ tmp->pid = hwctx->client->pid;
+ tmp->context_id = hwctx->id;
+ tmp->start_col = hwctx->start_col;
+ tmp->num_col = hwctx->num_col;
+ tmp->command_submissions = hwctx->priv->seq;
+ tmp->command_completions = hwctx->priv->completed;
+ tmp->pasid = hwctx->client->pasid;
+ tmp->priority = hwctx->qos.priority;
+ tmp->gops = hwctx->qos.gops;
+ tmp->fps = hwctx->qos.fps;
+ tmp->dma_bandwidth = hwctx->qos.dma_bandwidth;
+ tmp->latency = hwctx->qos.latency;
+ tmp->frame_exec_time = hwctx->qos.frame_exec_time;
+ tmp->state = AMDXDNA_HWCTX_STATE_ACTIVE;
+
+ buf = u64_to_user_ptr(array_args->buffer);
+ size = min(sizeof(*tmp), array_args->element_size);
+
+ if (copy_to_user(buf, tmp, size))
+ return -EFAULT;
+
+ array_args->buffer += size;
+ array_args->num_element--;
+
+ return 0;
+}
+
+static int aie2_get_hwctx_status(struct amdxdna_client *client,
+ struct amdxdna_drm_get_info *args)
+{
+ struct amdxdna_drm_get_array array_args;
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_client *tmp_client;
+ int ret;
+
+ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
+
+ array_args.element_size = sizeof(struct amdxdna_drm_query_hwctx);
+ array_args.buffer = args->buffer;
+ array_args.num_element = args->buffer_size / array_args.element_size;
+ list_for_each_entry(tmp_client, &xdna->client_list, node) {
+ ret = amdxdna_hwctx_walk(tmp_client, &array_args,
+ aie2_hwctx_status_cb);
+ if (ret)
+ break;
+ }
+
+ args->buffer_size -= (u32)(array_args.buffer - args->buffer);
+ return ret;
+}
+
+static int aie2_get_info(struct amdxdna_client *client, struct amdxdna_drm_get_info *args)
+{
+ struct amdxdna_dev *xdna = client->xdna;
+ int ret, idx;
+
+ if (!drm_dev_enter(&xdna->ddev, &idx))
+ return -ENODEV;
+
+ switch (args->param) {
+ case DRM_AMDXDNA_QUERY_AIE_STATUS:
+ ret = aie2_get_aie_status(client, args);
+ break;
+ case DRM_AMDXDNA_QUERY_AIE_METADATA:
+ ret = aie2_get_aie_metadata(client, args);
+ break;
+ case DRM_AMDXDNA_QUERY_AIE_VERSION:
+ ret = aie2_get_aie_version(client, args);
+ break;
+ case DRM_AMDXDNA_QUERY_CLOCK_METADATA:
+ ret = aie2_get_clock_metadata(client, args);
+ break;
+ case DRM_AMDXDNA_QUERY_HW_CONTEXTS:
+ ret = aie2_get_hwctx_status(client, args);
+ break;
+ case DRM_AMDXDNA_QUERY_FIRMWARE_VERSION:
+ ret = aie2_get_firmware_version(client, args);
+ break;
+ case DRM_AMDXDNA_GET_POWER_MODE:
+ ret = aie2_get_power_mode(client, args);
+ break;
+ default:
+ XDNA_ERR(xdna, "Not supported request parameter %u", args->param);
+ ret = -EOPNOTSUPP;
+ }
+ XDNA_DBG(xdna, "Got param %d", args->param);
+
+ drm_dev_exit(idx);
+ return ret;
+}
+
+static int aie2_query_ctx_status_array(struct amdxdna_client *client,
+ struct amdxdna_drm_get_array *args)
+{
+ struct amdxdna_drm_get_array array_args;
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_client *tmp_client;
+ int ret;
+
+ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
+
+ array_args.element_size = min(args->element_size,
+ sizeof(struct amdxdna_drm_hwctx_entry));
+ array_args.buffer = args->buffer;
+ array_args.num_element = args->num_element * args->element_size /
+ array_args.element_size;
+ list_for_each_entry(tmp_client, &xdna->client_list, node) {
+ ret = amdxdna_hwctx_walk(tmp_client, &array_args,
+ aie2_hwctx_status_cb);
+ if (ret)
+ break;
+ }
+
+ args->element_size = array_args.element_size;
+ args->num_element = (u32)((array_args.buffer - args->buffer) /
+ args->element_size);
+
+ return ret;
+}
+
+static int aie2_get_array(struct amdxdna_client *client,
+ struct amdxdna_drm_get_array *args)
+{
+ struct amdxdna_dev *xdna = client->xdna;
+ int ret, idx;
+
+ if (!drm_dev_enter(&xdna->ddev, &idx))
+ return -ENODEV;
+
+ switch (args->param) {
+ case DRM_AMDXDNA_HW_CONTEXT_ALL:
+ ret = aie2_query_ctx_status_array(client, args);
+ break;
+ default:
+ XDNA_ERR(xdna, "Not supported request parameter %u", args->param);
+ ret = -EOPNOTSUPP;
+ }
+ XDNA_DBG(xdna, "Got param %d", args->param);
+
+ drm_dev_exit(idx);
+ return ret;
+}
+
+static int aie2_set_power_mode(struct amdxdna_client *client,
+ struct amdxdna_drm_set_state *args)
+{
+ struct amdxdna_drm_set_power_mode power_state;
+ enum amdxdna_power_mode_type power_mode;
+ struct amdxdna_dev *xdna = client->xdna;
+
+ if (copy_from_user(&power_state, u64_to_user_ptr(args->buffer),
+ sizeof(power_state))) {
+ XDNA_ERR(xdna, "Failed to copy power mode request into kernel");
+ return -EFAULT;
+ }
+
+ if (XDNA_MBZ_DBG(xdna, power_state.pad, sizeof(power_state.pad)))
+ return -EINVAL;
+
+ power_mode = power_state.power_mode;
+ if (power_mode > POWER_MODE_TURBO) {
+ XDNA_ERR(xdna, "Invalid power mode %d", power_mode);
+ return -EINVAL;
+ }
+
+ return aie2_pm_set_mode(xdna->dev_handle, power_mode);
+}
+
+static int aie2_set_state(struct amdxdna_client *client,
+ struct amdxdna_drm_set_state *args)
+{
+ struct amdxdna_dev *xdna = client->xdna;
+ int ret, idx;
+
+ if (!drm_dev_enter(&xdna->ddev, &idx))
+ return -ENODEV;
+
+ switch (args->param) {
+ case DRM_AMDXDNA_SET_POWER_MODE:
+ ret = aie2_set_power_mode(client, args);
+ break;
+ default:
+ XDNA_ERR(xdna, "Not supported request parameter %u", args->param);
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ drm_dev_exit(idx);
+ return ret;
+}
+
+const struct amdxdna_dev_ops aie2_ops = {
+ .init = aie2_init,
+ .fini = aie2_fini,
+ .resume = aie2_hw_resume,
+ .suspend = aie2_hw_suspend,
+ .get_aie_info = aie2_get_info,
+ .set_aie_state = aie2_set_state,
+ .hwctx_init = aie2_hwctx_init,
+ .hwctx_fini = aie2_hwctx_fini,
+ .hwctx_config = aie2_hwctx_config,
+ .cmd_submit = aie2_cmd_submit,
+ .hmm_invalidate = aie2_hmm_invalidate,
+ .get_array = aie2_get_array,
+};
diff --git a/drivers/accel/amdxdna/aie2_pci.h b/drivers/accel/amdxdna/aie2_pci.h
new file mode 100644
index 000000000000..91a8e948f82a
--- /dev/null
+++ b/drivers/accel/amdxdna/aie2_pci.h
@@ -0,0 +1,296 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _AIE2_PCI_H_
+#define _AIE2_PCI_H_
+
+#include <drm/amdxdna_accel.h>
+#include <linux/semaphore.h>
+
+#include "amdxdna_mailbox.h"
+
+#define AIE2_INTERVAL 20000 /* us */
+#define AIE2_TIMEOUT 1000000 /* us */
+
+/* Firmware determines device memory base address and size */
+#define AIE2_DEVM_BASE 0x4000000
+#define AIE2_DEVM_SIZE SZ_64M
+
+#define NDEV2PDEV(ndev) (to_pci_dev((ndev)->xdna->ddev.dev))
+
+#define AIE2_SRAM_OFF(ndev, addr) ((addr) - (ndev)->priv->sram_dev_addr)
+#define AIE2_MBOX_OFF(ndev, addr) ((addr) - (ndev)->priv->mbox_dev_addr)
+
+#define PSP_REG_BAR(ndev, idx) ((ndev)->priv->psp_regs_off[(idx)].bar_idx)
+#define PSP_REG_OFF(ndev, idx) ((ndev)->priv->psp_regs_off[(idx)].offset)
+#define SRAM_REG_OFF(ndev, idx) ((ndev)->priv->sram_offs[(idx)].offset)
+
+#define SMU_REG(ndev, idx) \
+({ \
+ typeof(ndev) _ndev = ndev; \
+ ((_ndev)->smu_base + (_ndev)->priv->smu_regs_off[(idx)].offset); \
+})
+#define SRAM_GET_ADDR(ndev, idx) \
+({ \
+ typeof(ndev) _ndev = ndev; \
+ ((_ndev)->sram_base + SRAM_REG_OFF((_ndev), (idx))); \
+})
+
+#define CHAN_SLOT_SZ SZ_8K
+#define MBOX_SIZE(ndev) \
+({ \
+ typeof(ndev) _ndev = (ndev); \
+ ((_ndev)->priv->mbox_size) ? (_ndev)->priv->mbox_size : \
+ pci_resource_len(NDEV2PDEV(_ndev), (_ndev)->xdna->dev_info->mbox_bar); \
+})
+
+enum aie2_smu_reg_idx {
+ SMU_CMD_REG = 0,
+ SMU_ARG_REG,
+ SMU_INTR_REG,
+ SMU_RESP_REG,
+ SMU_OUT_REG,
+ SMU_MAX_REGS /* Keep this at the end */
+};
+
+enum aie2_sram_reg_idx {
+ MBOX_CHANN_OFF = 0,
+ FW_ALIVE_OFF,
+ SRAM_MAX_INDEX /* Keep this at the end */
+};
+
+enum psp_reg_idx {
+ PSP_CMD_REG = 0,
+ PSP_ARG0_REG,
+ PSP_ARG1_REG,
+ PSP_ARG2_REG,
+ PSP_NUM_IN_REGS, /* number of input registers */
+ PSP_INTR_REG = PSP_NUM_IN_REGS,
+ PSP_STATUS_REG,
+ PSP_RESP_REG,
+ PSP_MAX_REGS /* Keep this at the end */
+};
+
+struct amdxdna_client;
+struct amdxdna_fw_ver;
+struct amdxdna_hwctx;
+struct amdxdna_sched_job;
+
+struct psp_config {
+ const void *fw_buf;
+ u32 fw_size;
+ void __iomem *psp_regs[PSP_MAX_REGS];
+};
+
+struct aie_version {
+ u16 major;
+ u16 minor;
+};
+
+struct aie_tile_metadata {
+ u16 row_count;
+ u16 row_start;
+ u16 dma_channel_count;
+ u16 lock_count;
+ u16 event_reg_count;
+};
+
+struct aie_metadata {
+ u32 size;
+ u16 cols;
+ u16 rows;
+ struct aie_version version;
+ struct aie_tile_metadata core;
+ struct aie_tile_metadata mem;
+ struct aie_tile_metadata shim;
+};
+
+enum rt_config_category {
+ AIE2_RT_CFG_INIT,
+ AIE2_RT_CFG_CLK_GATING,
+};
+
+struct rt_config {
+ u32 type;
+ u32 value;
+ u32 category;
+};
+
+struct dpm_clk_freq {
+ u32 npuclk;
+ u32 hclk;
+};
+
+/*
+ * Define the maximum number of pending commands in a hardware context.
+ * Must be power of 2!
+ */
+#define HWCTX_MAX_CMDS 4
+#define get_job_idx(seq) ((seq) & (HWCTX_MAX_CMDS - 1))
+struct amdxdna_hwctx_priv {
+ struct amdxdna_gem_obj *heap;
+ void *mbox_chann;
+
+ struct drm_gpu_scheduler sched;
+ struct drm_sched_entity entity;
+
+ struct mutex io_lock; /* protect seq and cmd order */
+ struct wait_queue_head job_free_wq;
+ u32 num_pending;
+ u64 seq;
+ struct semaphore job_sem;
+ bool job_done;
+
+ /* Completed job counter */
+ u64 completed;
+
+ struct amdxdna_gem_obj *cmd_buf[HWCTX_MAX_CMDS];
+ struct drm_syncobj *syncobj;
+};
+
+enum aie2_dev_status {
+ AIE2_DEV_UNINIT,
+ AIE2_DEV_INIT,
+ AIE2_DEV_START,
+};
+
+struct amdxdna_dev_hdl {
+ struct amdxdna_dev *xdna;
+ const struct amdxdna_dev_priv *priv;
+ void __iomem *sram_base;
+ void __iomem *smu_base;
+ void __iomem *mbox_base;
+ struct psp_device *psp_hdl;
+
+ struct xdna_mailbox_chann_res mgmt_x2i;
+ struct xdna_mailbox_chann_res mgmt_i2x;
+ u32 mgmt_chan_idx;
+ u32 mgmt_prot_major;
+ u32 mgmt_prot_minor;
+
+ u32 total_col;
+ struct aie_version version;
+ struct aie_metadata metadata;
+
+ /* power management and clock*/
+ enum amdxdna_power_mode_type pw_mode;
+ u32 dpm_level;
+ u32 dft_dpm_level;
+ u32 max_dpm_level;
+ u32 clk_gating;
+ u32 npuclk_freq;
+ u32 hclk_freq;
+
+ /* Mailbox and the management channel */
+ struct mailbox *mbox;
+ struct mailbox_channel *mgmt_chann;
+ struct async_events *async_events;
+
+ enum aie2_dev_status dev_status;
+ u32 hwctx_num;
+};
+
+#define DEFINE_BAR_OFFSET(reg_name, bar, reg_addr) \
+ [reg_name] = {bar##_BAR_INDEX, (reg_addr) - bar##_BAR_BASE}
+
+struct aie2_bar_off_pair {
+ int bar_idx;
+ u32 offset;
+};
+
+struct aie2_hw_ops {
+ int (*set_dpm)(struct amdxdna_dev_hdl *ndev, u32 dpm_level);
+};
+
+struct amdxdna_dev_priv {
+ const char *fw_path;
+ u64 protocol_major;
+ u64 protocol_minor;
+ const struct rt_config *rt_config;
+ const struct dpm_clk_freq *dpm_clk_tbl;
+
+#define COL_ALIGN_NONE 0
+#define COL_ALIGN_NATURE 1
+ u32 col_align;
+ u32 mbox_dev_addr;
+ /* If mbox_size is 0, use BAR size. See MBOX_SIZE macro */
+ u32 mbox_size;
+ u32 sram_dev_addr;
+ struct aie2_bar_off_pair sram_offs[SRAM_MAX_INDEX];
+ struct aie2_bar_off_pair psp_regs_off[PSP_MAX_REGS];
+ struct aie2_bar_off_pair smu_regs_off[SMU_MAX_REGS];
+ struct aie2_hw_ops hw_ops;
+};
+
+extern const struct amdxdna_dev_ops aie2_ops;
+
+int aie2_runtime_cfg(struct amdxdna_dev_hdl *ndev,
+ enum rt_config_category category, u32 *val);
+
+/* aie2 npu hw config */
+extern const struct dpm_clk_freq npu1_dpm_clk_table[];
+extern const struct dpm_clk_freq npu4_dpm_clk_table[];
+extern const struct rt_config npu1_default_rt_cfg[];
+extern const struct rt_config npu4_default_rt_cfg[];
+
+/* aie2_smu.c */
+int aie2_smu_init(struct amdxdna_dev_hdl *ndev);
+void aie2_smu_fini(struct amdxdna_dev_hdl *ndev);
+int npu1_set_dpm(struct amdxdna_dev_hdl *ndev, u32 dpm_level);
+int npu4_set_dpm(struct amdxdna_dev_hdl *ndev, u32 dpm_level);
+
+/* aie2_pm.c */
+int aie2_pm_init(struct amdxdna_dev_hdl *ndev);
+int aie2_pm_set_mode(struct amdxdna_dev_hdl *ndev, enum amdxdna_power_mode_type target);
+
+/* aie2_psp.c */
+struct psp_device *aie2m_psp_create(struct drm_device *ddev, struct psp_config *conf);
+int aie2_psp_start(struct psp_device *psp);
+void aie2_psp_stop(struct psp_device *psp);
+
+/* aie2_error.c */
+int aie2_error_async_events_alloc(struct amdxdna_dev_hdl *ndev);
+void aie2_error_async_events_free(struct amdxdna_dev_hdl *ndev);
+int aie2_error_async_events_send(struct amdxdna_dev_hdl *ndev);
+int aie2_error_async_msg_thread(void *data);
+
+/* aie2_message.c */
+int aie2_suspend_fw(struct amdxdna_dev_hdl *ndev);
+int aie2_resume_fw(struct amdxdna_dev_hdl *ndev);
+int aie2_set_runtime_cfg(struct amdxdna_dev_hdl *ndev, u32 type, u64 value);
+int aie2_get_runtime_cfg(struct amdxdna_dev_hdl *ndev, u32 type, u64 *value);
+int aie2_assign_mgmt_pasid(struct amdxdna_dev_hdl *ndev, u16 pasid);
+int aie2_query_aie_version(struct amdxdna_dev_hdl *ndev, struct aie_version *version);
+int aie2_query_aie_metadata(struct amdxdna_dev_hdl *ndev, struct aie_metadata *metadata);
+int aie2_query_firmware_version(struct amdxdna_dev_hdl *ndev,
+ struct amdxdna_fw_ver *fw_ver);
+int aie2_create_context(struct amdxdna_dev_hdl *ndev, struct amdxdna_hwctx *hwctx);
+int aie2_destroy_context(struct amdxdna_dev_hdl *ndev, struct amdxdna_hwctx *hwctx);
+int aie2_map_host_buf(struct amdxdna_dev_hdl *ndev, u32 context_id, u64 addr, u64 size);
+int aie2_query_status(struct amdxdna_dev_hdl *ndev, char __user *buf, u32 size, u32 *cols_filled);
+int aie2_register_asyn_event_msg(struct amdxdna_dev_hdl *ndev, dma_addr_t addr, u32 size,
+ void *handle, int (*cb)(void*, void __iomem *, size_t));
+int aie2_config_cu(struct amdxdna_hwctx *hwctx);
+int aie2_execbuf(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
+ int (*notify_cb)(void *, void __iomem *, size_t));
+int aie2_cmdlist_single_execbuf(struct amdxdna_hwctx *hwctx,
+ struct amdxdna_sched_job *job,
+ int (*notify_cb)(void *, void __iomem *, size_t));
+int aie2_cmdlist_multi_execbuf(struct amdxdna_hwctx *hwctx,
+ struct amdxdna_sched_job *job,
+ int (*notify_cb)(void *, void __iomem *, size_t));
+int aie2_sync_bo(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
+ int (*notify_cb)(void *, void __iomem *, size_t));
+
+/* aie2_hwctx.c */
+int aie2_hwctx_init(struct amdxdna_hwctx *hwctx);
+void aie2_hwctx_fini(struct amdxdna_hwctx *hwctx);
+int aie2_hwctx_config(struct amdxdna_hwctx *hwctx, u32 type, u64 value, void *buf, u32 size);
+void aie2_hwctx_suspend(struct amdxdna_client *client);
+int aie2_hwctx_resume(struct amdxdna_client *client);
+int aie2_cmd_submit(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, u64 *seq);
+void aie2_hmm_invalidate(struct amdxdna_gem_obj *abo, unsigned long cur_seq);
+
+#endif /* _AIE2_PCI_H_ */
diff --git a/drivers/accel/amdxdna/aie2_pm.c b/drivers/accel/amdxdna/aie2_pm.c
new file mode 100644
index 000000000000..426c38fce848
--- /dev/null
+++ b/drivers/accel/amdxdna/aie2_pm.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_device.h>
+#include <drm/drm_print.h>
+#include <drm/gpu_scheduler.h>
+
+#include "aie2_pci.h"
+#include "amdxdna_pci_drv.h"
+
+#define AIE2_CLK_GATING_ENABLE 1
+#define AIE2_CLK_GATING_DISABLE 0
+
+static int aie2_pm_set_clk_gating(struct amdxdna_dev_hdl *ndev, u32 val)
+{
+ int ret;
+
+ ret = aie2_runtime_cfg(ndev, AIE2_RT_CFG_CLK_GATING, &val);
+ if (ret)
+ return ret;
+
+ ndev->clk_gating = val;
+ return 0;
+}
+
+int aie2_pm_init(struct amdxdna_dev_hdl *ndev)
+{
+ int ret;
+
+ if (ndev->dev_status != AIE2_DEV_UNINIT) {
+ /* Resume device */
+ ret = ndev->priv->hw_ops.set_dpm(ndev, ndev->dpm_level);
+ if (ret)
+ return ret;
+
+ ret = aie2_pm_set_clk_gating(ndev, ndev->clk_gating);
+ if (ret)
+ return ret;
+
+ return 0;
+ }
+
+ while (ndev->priv->dpm_clk_tbl[ndev->max_dpm_level].hclk)
+ ndev->max_dpm_level++;
+ ndev->max_dpm_level--;
+
+ ret = ndev->priv->hw_ops.set_dpm(ndev, ndev->max_dpm_level);
+ if (ret)
+ return ret;
+
+ ret = aie2_pm_set_clk_gating(ndev, AIE2_CLK_GATING_ENABLE);
+ if (ret)
+ return ret;
+
+ ndev->pw_mode = POWER_MODE_DEFAULT;
+ ndev->dft_dpm_level = ndev->max_dpm_level;
+
+ return 0;
+}
+
+int aie2_pm_set_mode(struct amdxdna_dev_hdl *ndev, enum amdxdna_power_mode_type target)
+{
+ struct amdxdna_dev *xdna = ndev->xdna;
+ u32 clk_gating, dpm_level;
+ int ret;
+
+ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
+
+ if (ndev->pw_mode == target)
+ return 0;
+
+ switch (target) {
+ case POWER_MODE_TURBO:
+ if (ndev->hwctx_num) {
+ XDNA_ERR(xdna, "Can not set turbo when there is active hwctx");
+ return -EINVAL;
+ }
+
+ clk_gating = AIE2_CLK_GATING_DISABLE;
+ dpm_level = ndev->max_dpm_level;
+ break;
+ case POWER_MODE_HIGH:
+ clk_gating = AIE2_CLK_GATING_ENABLE;
+ dpm_level = ndev->max_dpm_level;
+ break;
+ case POWER_MODE_DEFAULT:
+ clk_gating = AIE2_CLK_GATING_ENABLE;
+ dpm_level = ndev->dft_dpm_level;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ ret = ndev->priv->hw_ops.set_dpm(ndev, dpm_level);
+ if (ret)
+ return ret;
+
+ ret = aie2_pm_set_clk_gating(ndev, clk_gating);
+ if (ret)
+ return ret;
+
+ ndev->pw_mode = target;
+
+ return 0;
+}
diff --git a/drivers/accel/amdxdna/aie2_psp.c b/drivers/accel/amdxdna/aie2_psp.c
new file mode 100644
index 000000000000..f28a060a8810
--- /dev/null
+++ b/drivers/accel/amdxdna/aie2_psp.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/drm_device.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/bitfield.h>
+#include <linux/iopoll.h>
+
+#include "aie2_pci.h"
+#include "amdxdna_mailbox.h"
+#include "amdxdna_pci_drv.h"
+
+#define PSP_STATUS_READY BIT(31)
+
+/* PSP commands */
+#define PSP_VALIDATE 1
+#define PSP_START 2
+#define PSP_RELEASE_TMR 3
+
+/* PSP special arguments */
+#define PSP_START_COPY_FW 1
+
+/* PSP response error code */
+#define PSP_ERROR_CANCEL 0xFFFF0002
+#define PSP_ERROR_BAD_STATE 0xFFFF0007
+
+#define PSP_FW_ALIGN 0x10000
+#define PSP_POLL_INTERVAL 20000 /* us */
+#define PSP_POLL_TIMEOUT 1000000 /* us */
+
+#define PSP_REG(p, reg) ((p)->psp_regs[reg])
+
+struct psp_device {
+ struct drm_device *ddev;
+ struct psp_config conf;
+ u32 fw_buf_sz;
+ u64 fw_paddr;
+ void *fw_buffer;
+ void __iomem *psp_regs[PSP_MAX_REGS];
+};
+
+static int psp_exec(struct psp_device *psp, u32 *reg_vals)
+{
+ u32 resp_code;
+ int ret, i;
+ u32 ready;
+
+ /* Write command and argument registers */
+ for (i = 0; i < PSP_NUM_IN_REGS; i++)
+ writel(reg_vals[i], PSP_REG(psp, i));
+
+ /* clear and set PSP INTR register to kick off */
+ writel(0, PSP_REG(psp, PSP_INTR_REG));
+ writel(1, PSP_REG(psp, PSP_INTR_REG));
+
+ /* PSP should be busy. Wait for ready, so we know task is done. */
+ ret = readx_poll_timeout(readl, PSP_REG(psp, PSP_STATUS_REG), ready,
+ FIELD_GET(PSP_STATUS_READY, ready),
+ PSP_POLL_INTERVAL, PSP_POLL_TIMEOUT);
+ if (ret) {
+ drm_err(psp->ddev, "PSP is not ready, ret 0x%x", ret);
+ return ret;
+ }
+
+ resp_code = readl(PSP_REG(psp, PSP_RESP_REG));
+ if (resp_code) {
+ drm_err(psp->ddev, "fw return error 0x%x", resp_code);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+void aie2_psp_stop(struct psp_device *psp)
+{
+ u32 reg_vals[PSP_NUM_IN_REGS] = { PSP_RELEASE_TMR, };
+ int ret;
+
+ ret = psp_exec(psp, reg_vals);
+ if (ret)
+ drm_err(psp->ddev, "release tmr failed, ret %d", ret);
+}
+
+int aie2_psp_start(struct psp_device *psp)
+{
+ u32 reg_vals[PSP_NUM_IN_REGS];
+ int ret;
+
+ reg_vals[0] = PSP_VALIDATE;
+ reg_vals[1] = lower_32_bits(psp->fw_paddr);
+ reg_vals[2] = upper_32_bits(psp->fw_paddr);
+ reg_vals[3] = psp->fw_buf_sz;
+
+ ret = psp_exec(psp, reg_vals);
+ if (ret) {
+ drm_err(psp->ddev, "failed to validate fw, ret %d", ret);
+ return ret;
+ }
+
+ memset(reg_vals, 0, sizeof(reg_vals));
+ reg_vals[0] = PSP_START;
+ reg_vals[1] = PSP_START_COPY_FW;
+ ret = psp_exec(psp, reg_vals);
+ if (ret) {
+ drm_err(psp->ddev, "failed to start fw, ret %d", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+struct psp_device *aie2m_psp_create(struct drm_device *ddev, struct psp_config *conf)
+{
+ struct psp_device *psp;
+ u64 offset;
+
+ psp = drmm_kzalloc(ddev, sizeof(*psp), GFP_KERNEL);
+ if (!psp)
+ return NULL;
+
+ psp->ddev = ddev;
+ memcpy(psp->psp_regs, conf->psp_regs, sizeof(psp->psp_regs));
+
+ psp->fw_buf_sz = ALIGN(conf->fw_size, PSP_FW_ALIGN);
+ psp->fw_buffer = drmm_kmalloc(ddev, psp->fw_buf_sz + PSP_FW_ALIGN, GFP_KERNEL);
+ if (!psp->fw_buffer) {
+ drm_err(ddev, "no memory for fw buffer");
+ return NULL;
+ }
+
+ /*
+ * AMD Platform Security Processor(PSP) requires host physical
+ * address to load NPU firmware.
+ */
+ psp->fw_paddr = virt_to_phys(psp->fw_buffer);
+ offset = ALIGN(psp->fw_paddr, PSP_FW_ALIGN) - psp->fw_paddr;
+ psp->fw_paddr += offset;
+ memcpy(psp->fw_buffer + offset, conf->fw_buf, conf->fw_size);
+
+ return psp;
+}
diff --git a/drivers/accel/amdxdna/aie2_smu.c b/drivers/accel/amdxdna/aie2_smu.c
new file mode 100644
index 000000000000..d303701b0ded
--- /dev/null
+++ b/drivers/accel/amdxdna/aie2_smu.c
@@ -0,0 +1,136 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/drm_device.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_print.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/iopoll.h>
+
+#include "aie2_pci.h"
+#include "amdxdna_pci_drv.h"
+
+#define SMU_RESULT_OK 1
+
+/* SMU commands */
+#define AIE2_SMU_POWER_ON 0x3
+#define AIE2_SMU_POWER_OFF 0x4
+#define AIE2_SMU_SET_MPNPUCLK_FREQ 0x5
+#define AIE2_SMU_SET_HCLK_FREQ 0x6
+#define AIE2_SMU_SET_SOFT_DPMLEVEL 0x7
+#define AIE2_SMU_SET_HARD_DPMLEVEL 0x8
+
+static int aie2_smu_exec(struct amdxdna_dev_hdl *ndev, u32 reg_cmd,
+ u32 reg_arg, u32 *out)
+{
+ u32 resp;
+ int ret;
+
+ writel(0, SMU_REG(ndev, SMU_RESP_REG));
+ writel(reg_arg, SMU_REG(ndev, SMU_ARG_REG));
+ writel(reg_cmd, SMU_REG(ndev, SMU_CMD_REG));
+
+ /* Clear and set SMU_INTR_REG to kick off */
+ writel(0, SMU_REG(ndev, SMU_INTR_REG));
+ writel(1, SMU_REG(ndev, SMU_INTR_REG));
+
+ ret = readx_poll_timeout(readl, SMU_REG(ndev, SMU_RESP_REG), resp,
+ resp, AIE2_INTERVAL, AIE2_TIMEOUT);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "smu cmd %d timed out", reg_cmd);
+ return ret;
+ }
+
+ if (out)
+ *out = readl(SMU_REG(ndev, SMU_OUT_REG));
+
+ if (resp != SMU_RESULT_OK) {
+ XDNA_ERR(ndev->xdna, "smu cmd %d failed, 0x%x", reg_cmd, resp);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int npu1_set_dpm(struct amdxdna_dev_hdl *ndev, u32 dpm_level)
+{
+ u32 freq;
+ int ret;
+
+ ret = aie2_smu_exec(ndev, AIE2_SMU_SET_MPNPUCLK_FREQ,
+ ndev->priv->dpm_clk_tbl[dpm_level].npuclk, &freq);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Set npu clock to %d failed, ret %d\n",
+ ndev->priv->dpm_clk_tbl[dpm_level].npuclk, ret);
+ return ret;
+ }
+ ndev->npuclk_freq = freq;
+
+ ret = aie2_smu_exec(ndev, AIE2_SMU_SET_HCLK_FREQ,
+ ndev->priv->dpm_clk_tbl[dpm_level].hclk, &freq);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Set h clock to %d failed, ret %d\n",
+ ndev->priv->dpm_clk_tbl[dpm_level].hclk, ret);
+ return ret;
+ }
+ ndev->hclk_freq = freq;
+ ndev->dpm_level = dpm_level;
+
+ XDNA_DBG(ndev->xdna, "MP-NPU clock %d, H clock %d\n",
+ ndev->npuclk_freq, ndev->hclk_freq);
+
+ return 0;
+}
+
+int npu4_set_dpm(struct amdxdna_dev_hdl *ndev, u32 dpm_level)
+{
+ int ret;
+
+ ret = aie2_smu_exec(ndev, AIE2_SMU_SET_HARD_DPMLEVEL, dpm_level, NULL);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Set hard dpm level %d failed, ret %d ",
+ dpm_level, ret);
+ return ret;
+ }
+
+ ret = aie2_smu_exec(ndev, AIE2_SMU_SET_SOFT_DPMLEVEL, dpm_level, NULL);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Set soft dpm level %d failed, ret %d",
+ dpm_level, ret);
+ return ret;
+ }
+
+ ndev->npuclk_freq = ndev->priv->dpm_clk_tbl[dpm_level].npuclk;
+ ndev->hclk_freq = ndev->priv->dpm_clk_tbl[dpm_level].hclk;
+ ndev->dpm_level = dpm_level;
+
+ XDNA_DBG(ndev->xdna, "MP-NPU clock %d, H clock %d\n",
+ ndev->npuclk_freq, ndev->hclk_freq);
+
+ return 0;
+}
+
+int aie2_smu_init(struct amdxdna_dev_hdl *ndev)
+{
+ int ret;
+
+ ret = aie2_smu_exec(ndev, AIE2_SMU_POWER_ON, 0, NULL);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Power on failed, ret %d", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void aie2_smu_fini(struct amdxdna_dev_hdl *ndev)
+{
+ int ret;
+
+ ndev->priv->hw_ops.set_dpm(ndev, 0);
+ ret = aie2_smu_exec(ndev, AIE2_SMU_POWER_OFF, 0, NULL);
+ if (ret)
+ XDNA_ERR(ndev->xdna, "Power off failed, ret %d", ret);
+}
diff --git a/drivers/accel/amdxdna/aie2_solver.c b/drivers/accel/amdxdna/aie2_solver.c
new file mode 100644
index 000000000000..2013d1f13aae
--- /dev/null
+++ b/drivers/accel/amdxdna/aie2_solver.c
@@ -0,0 +1,380 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/drm_device.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
+#include <linux/bitops.h>
+#include <linux/bitmap.h>
+#include <linux/slab.h>
+
+#include "aie2_solver.h"
+
+struct partition_node {
+ struct list_head list;
+ u32 nshared; /* # shared requests */
+ u32 start_col; /* start column */
+ u32 ncols; /* # columns */
+ bool exclusive; /* can not be shared if set */
+};
+
+struct solver_node {
+ struct list_head list;
+ u64 rid; /* Request ID from consumer */
+
+ struct partition_node *pt_node;
+ void *cb_arg;
+ u32 dpm_level;
+ u32 cols_len;
+ u32 start_cols[] __counted_by(cols_len);
+};
+
+struct solver_rgroup {
+ u32 rgid;
+ u32 nnode;
+ u32 npartition_node;
+
+ DECLARE_BITMAP(resbit, XRS_MAX_COL);
+ struct list_head node_list;
+ struct list_head pt_node_list;
+};
+
+struct solver_state {
+ struct solver_rgroup rgp;
+ struct init_config cfg;
+ struct xrs_action_ops *actions;
+};
+
+static u32 calculate_gops(struct aie_qos *rqos)
+{
+ u32 service_rate = 0;
+
+ if (rqos->latency)
+ service_rate = (1000 / rqos->latency);
+
+ if (rqos->fps > service_rate)
+ return rqos->fps * rqos->gops;
+
+ return service_rate * rqos->gops;
+}
+
+/*
+ * qos_meet() - Check the QOS request can be met.
+ */
+static int qos_meet(struct solver_state *xrs, struct aie_qos *rqos, u32 cgops)
+{
+ u32 request_gops = calculate_gops(rqos) * xrs->cfg.sys_eff_factor;
+
+ if (request_gops <= cgops)
+ return 0;
+
+ return -EINVAL;
+}
+
+/*
+ * sanity_check() - Do a basic sanity check on allocation request.
+ */
+static int sanity_check(struct solver_state *xrs, struct alloc_requests *req)
+{
+ struct cdo_parts *cdop = &req->cdo;
+ struct aie_qos *rqos = &req->rqos;
+ u32 cu_clk_freq;
+
+ if (cdop->ncols > xrs->cfg.total_col)
+ return -EINVAL;
+
+ /*
+ * We can find at least one CDOs groups that meet the
+ * GOPs requirement.
+ */
+ cu_clk_freq = xrs->cfg.clk_list.cu_clk_list[xrs->cfg.clk_list.num_levels - 1];
+
+ if (qos_meet(xrs, rqos, cdop->qos_cap.opc * cu_clk_freq / 1000))
+ return -EINVAL;
+
+ return 0;
+}
+
+static bool is_valid_qos_dpm_params(struct aie_qos *rqos)
+{
+ /*
+ * gops is retrieved from the xmodel, so it's always set
+ * fps and latency are the configurable params from the application
+ */
+ if (rqos->gops > 0 && (rqos->fps > 0 || rqos->latency > 0))
+ return true;
+
+ return false;
+}
+
+static int set_dpm_level(struct solver_state *xrs, struct alloc_requests *req, u32 *dpm_level)
+{
+ struct solver_rgroup *rgp = &xrs->rgp;
+ struct cdo_parts *cdop = &req->cdo;
+ struct aie_qos *rqos = &req->rqos;
+ u32 freq, max_dpm_level, level;
+ struct solver_node *node;
+
+ max_dpm_level = xrs->cfg.clk_list.num_levels - 1;
+ /* If no QoS parameters are passed, set it to the max DPM level */
+ if (!is_valid_qos_dpm_params(rqos)) {
+ level = max_dpm_level;
+ goto set_dpm;
+ }
+
+ /* Find one CDO group that meet the GOPs requirement. */
+ for (level = 0; level < max_dpm_level; level++) {
+ freq = xrs->cfg.clk_list.cu_clk_list[level];
+ if (!qos_meet(xrs, rqos, cdop->qos_cap.opc * freq / 1000))
+ break;
+ }
+
+ /* set the dpm level which fits all the sessions */
+ list_for_each_entry(node, &rgp->node_list, list) {
+ if (node->dpm_level > level)
+ level = node->dpm_level;
+ }
+
+set_dpm:
+ *dpm_level = level;
+ return xrs->cfg.actions->set_dft_dpm_level(xrs->cfg.ddev, level);
+}
+
+static struct solver_node *rg_search_node(struct solver_rgroup *rgp, u64 rid)
+{
+ struct solver_node *node;
+
+ list_for_each_entry(node, &rgp->node_list, list) {
+ if (node->rid == rid)
+ return node;
+ }
+
+ return NULL;
+}
+
+static void remove_partition_node(struct solver_rgroup *rgp,
+ struct partition_node *pt_node)
+{
+ pt_node->nshared--;
+ if (pt_node->nshared > 0)
+ return;
+
+ list_del(&pt_node->list);
+ rgp->npartition_node--;
+
+ bitmap_clear(rgp->resbit, pt_node->start_col, pt_node->ncols);
+ kfree(pt_node);
+}
+
+static void remove_solver_node(struct solver_rgroup *rgp,
+ struct solver_node *node)
+{
+ list_del(&node->list);
+ rgp->nnode--;
+
+ if (node->pt_node)
+ remove_partition_node(rgp, node->pt_node);
+
+ kfree(node);
+}
+
+static int get_free_partition(struct solver_state *xrs,
+ struct solver_node *snode,
+ struct alloc_requests *req)
+{
+ struct partition_node *pt_node;
+ u32 ncols = req->cdo.ncols;
+ u32 col, i;
+
+ for (i = 0; i < snode->cols_len; i++) {
+ col = snode->start_cols[i];
+ if (find_next_bit(xrs->rgp.resbit, XRS_MAX_COL, col) >= col + ncols)
+ break;
+ }
+
+ if (i == snode->cols_len)
+ return -ENODEV;
+
+ pt_node = kzalloc(sizeof(*pt_node), GFP_KERNEL);
+ if (!pt_node)
+ return -ENOMEM;
+
+ pt_node->nshared = 1;
+ pt_node->start_col = col;
+ pt_node->ncols = ncols;
+
+ /*
+ * Always set exclusive to false for now.
+ */
+ pt_node->exclusive = false;
+
+ list_add_tail(&pt_node->list, &xrs->rgp.pt_node_list);
+ xrs->rgp.npartition_node++;
+ bitmap_set(xrs->rgp.resbit, pt_node->start_col, pt_node->ncols);
+
+ snode->pt_node = pt_node;
+
+ return 0;
+}
+
+static int allocate_partition(struct solver_state *xrs,
+ struct solver_node *snode,
+ struct alloc_requests *req)
+{
+ struct partition_node *pt_node, *rpt_node = NULL;
+ int idx, ret;
+
+ ret = get_free_partition(xrs, snode, req);
+ if (!ret)
+ return ret;
+
+ /* try to get a share-able partition */
+ list_for_each_entry(pt_node, &xrs->rgp.pt_node_list, list) {
+ if (pt_node->exclusive)
+ continue;
+
+ if (rpt_node && pt_node->nshared >= rpt_node->nshared)
+ continue;
+
+ for (idx = 0; idx < snode->cols_len; idx++) {
+ if (snode->start_cols[idx] != pt_node->start_col)
+ continue;
+
+ if (req->cdo.ncols != pt_node->ncols)
+ continue;
+
+ rpt_node = pt_node;
+ break;
+ }
+ }
+
+ if (!rpt_node)
+ return -ENODEV;
+
+ rpt_node->nshared++;
+ snode->pt_node = rpt_node;
+
+ return 0;
+}
+
+static struct solver_node *create_solver_node(struct solver_state *xrs,
+ struct alloc_requests *req)
+{
+ struct cdo_parts *cdop = &req->cdo;
+ struct solver_node *node;
+ int ret;
+
+ node = kzalloc(struct_size(node, start_cols, cdop->cols_len), GFP_KERNEL);
+ if (!node)
+ return ERR_PTR(-ENOMEM);
+
+ node->rid = req->rid;
+ node->cols_len = cdop->cols_len;
+ memcpy(node->start_cols, cdop->start_cols, cdop->cols_len * sizeof(u32));
+
+ ret = allocate_partition(xrs, node, req);
+ if (ret)
+ goto free_node;
+
+ list_add_tail(&node->list, &xrs->rgp.node_list);
+ xrs->rgp.nnode++;
+ return node;
+
+free_node:
+ kfree(node);
+ return ERR_PTR(ret);
+}
+
+static void fill_load_action(struct solver_state *xrs,
+ struct solver_node *snode,
+ struct xrs_action_load *action)
+{
+ action->rid = snode->rid;
+ action->part.start_col = snode->pt_node->start_col;
+ action->part.ncols = snode->pt_node->ncols;
+}
+
+int xrs_allocate_resource(void *hdl, struct alloc_requests *req, void *cb_arg)
+{
+ struct xrs_action_load load_act;
+ struct solver_node *snode;
+ struct solver_state *xrs;
+ u32 dpm_level;
+ int ret;
+
+ xrs = (struct solver_state *)hdl;
+
+ ret = sanity_check(xrs, req);
+ if (ret) {
+ drm_err(xrs->cfg.ddev, "invalid request");
+ return ret;
+ }
+
+ if (rg_search_node(&xrs->rgp, req->rid)) {
+ drm_err(xrs->cfg.ddev, "rid %lld is in-use", req->rid);
+ return -EEXIST;
+ }
+
+ snode = create_solver_node(xrs, req);
+ if (IS_ERR(snode))
+ return PTR_ERR(snode);
+
+ fill_load_action(xrs, snode, &load_act);
+ ret = xrs->cfg.actions->load(cb_arg, &load_act);
+ if (ret)
+ goto free_node;
+
+ ret = set_dpm_level(xrs, req, &dpm_level);
+ if (ret)
+ goto free_node;
+
+ snode->dpm_level = dpm_level;
+ snode->cb_arg = cb_arg;
+
+ drm_dbg(xrs->cfg.ddev, "start col %d ncols %d\n",
+ snode->pt_node->start_col, snode->pt_node->ncols);
+
+ return 0;
+
+free_node:
+ remove_solver_node(&xrs->rgp, snode);
+
+ return ret;
+}
+
+int xrs_release_resource(void *hdl, u64 rid)
+{
+ struct solver_state *xrs = hdl;
+ struct solver_node *node;
+
+ node = rg_search_node(&xrs->rgp, rid);
+ if (!node) {
+ drm_err(xrs->cfg.ddev, "node not exist");
+ return -ENODEV;
+ }
+
+ xrs->cfg.actions->unload(node->cb_arg);
+ remove_solver_node(&xrs->rgp, node);
+
+ return 0;
+}
+
+void *xrsm_init(struct init_config *cfg)
+{
+ struct solver_rgroup *rgp;
+ struct solver_state *xrs;
+
+ xrs = drmm_kzalloc(cfg->ddev, sizeof(*xrs), GFP_KERNEL);
+ if (!xrs)
+ return NULL;
+
+ memcpy(&xrs->cfg, cfg, sizeof(*cfg));
+
+ rgp = &xrs->rgp;
+ INIT_LIST_HEAD(&rgp->node_list);
+ INIT_LIST_HEAD(&rgp->pt_node_list);
+
+ return xrs;
+}
diff --git a/drivers/accel/amdxdna/aie2_solver.h b/drivers/accel/amdxdna/aie2_solver.h
new file mode 100644
index 000000000000..a2e3c52229e9
--- /dev/null
+++ b/drivers/accel/amdxdna/aie2_solver.h
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _AIE2_SOLVER_H
+#define _AIE2_SOLVER_H
+
+#define XRS_MAX_COL 128
+
+/*
+ * Structure used to describe a partition. A partition is column based
+ * allocation unit described by its start column and number of columns.
+ */
+struct aie_part {
+ u32 start_col;
+ u32 ncols;
+};
+
+/*
+ * The QoS capabilities of a given AIE partition.
+ */
+struct aie_qos_cap {
+ u32 opc; /* operations per cycle */
+ u32 dma_bw; /* DMA bandwidth */
+};
+
+/*
+ * QoS requirement of a resource allocation.
+ */
+struct aie_qos {
+ u32 gops; /* Giga operations */
+ u32 fps; /* Frames per second */
+ u32 dma_bw; /* DMA bandwidth */
+ u32 latency; /* Frame response latency */
+ u32 exec_time; /* Frame execution time */
+ u32 priority; /* Request priority */
+};
+
+/*
+ * Structure used to describe a relocatable CDO (Configuration Data Object).
+ */
+struct cdo_parts {
+ u32 *start_cols; /* Start column array */
+ u32 cols_len; /* Length of start column array */
+ u32 ncols; /* # of column */
+ struct aie_qos_cap qos_cap; /* CDO QoS capabilities */
+};
+
+/*
+ * Structure used to describe a request to allocate.
+ */
+struct alloc_requests {
+ u64 rid;
+ struct cdo_parts cdo;
+ struct aie_qos rqos; /* Requested QoS */
+};
+
+/*
+ * Load callback argument
+ */
+struct xrs_action_load {
+ u32 rid;
+ struct aie_part part;
+};
+
+/*
+ * Define the power level available
+ *
+ * POWER_LEVEL_MIN:
+ * Lowest power level. Usually set when all actions are unloaded.
+ *
+ * POWER_LEVEL_n
+ * Power levels 0 - n, is a step increase in system frequencies
+ */
+enum power_level {
+ POWER_LEVEL_MIN = 0x0,
+ POWER_LEVEL_0 = 0x1,
+ POWER_LEVEL_1 = 0x2,
+ POWER_LEVEL_2 = 0x3,
+ POWER_LEVEL_3 = 0x4,
+ POWER_LEVEL_4 = 0x5,
+ POWER_LEVEL_5 = 0x6,
+ POWER_LEVEL_6 = 0x7,
+ POWER_LEVEL_7 = 0x8,
+ POWER_LEVEL_NUM,
+};
+
+/*
+ * Structure used to describe the frequency table.
+ * Resource solver chooses the frequency from the table
+ * to meet the QOS requirements.
+ */
+struct clk_list_info {
+ u32 num_levels; /* available power levels */
+ u32 cu_clk_list[POWER_LEVEL_NUM]; /* available aie clock frequencies in Mhz*/
+};
+
+struct xrs_action_ops {
+ int (*load)(void *cb_arg, struct xrs_action_load *action);
+ int (*unload)(void *cb_arg);
+ int (*set_dft_dpm_level)(struct drm_device *ddev, u32 level);
+};
+
+/*
+ * Structure used to describe information for solver during initialization.
+ */
+struct init_config {
+ u32 total_col;
+ u32 sys_eff_factor; /* system efficiency factor */
+ u32 latency_adj; /* latency adjustment in ms */
+ struct clk_list_info clk_list; /* List of frequencies available in system */
+ struct drm_device *ddev;
+ struct xrs_action_ops *actions;
+};
+
+/*
+ * xrsm_init() - Register resource solver. Resource solver client needs
+ * to call this function to register itself.
+ *
+ * @cfg: The system metrics for resource solver to use
+ *
+ * Return: A resource solver handle
+ *
+ * Note: We should only create one handle per AIE array to be managed.
+ */
+void *xrsm_init(struct init_config *cfg);
+
+/*
+ * xrs_allocate_resource() - Request to allocate resources for a given context
+ * and a partition metadata. (See struct part_meta)
+ *
+ * @hdl: Resource solver handle obtained from xrs_init()
+ * @req: Input to the Resource solver including request id
+ * and partition metadata.
+ * @cb_arg: callback argument pointer
+ *
+ * Return: 0 when successful.
+ * Or standard error number when failing
+ *
+ * Note:
+ * There is no lock mechanism inside resource solver. So it is
+ * the caller's responsibility to lock down XCLBINs and grab
+ * necessary lock.
+ */
+int xrs_allocate_resource(void *hdl, struct alloc_requests *req, void *cb_arg);
+
+/*
+ * xrs_release_resource() - Request to free resources for a given context.
+ *
+ * @hdl: Resource solver handle obtained from xrs_init()
+ * @rid: The Request ID to identify the requesting context
+ */
+int xrs_release_resource(void *hdl, u64 rid);
+#endif /* _AIE2_SOLVER_H */
diff --git a/drivers/accel/amdxdna/amdxdna_ctx.c b/drivers/accel/amdxdna/amdxdna_ctx.c
new file mode 100644
index 000000000000..4bfe4ef20550
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_ctx.c
@@ -0,0 +1,542 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_print.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/xarray.h>
+#include <trace/events/amdxdna.h>
+
+#include "amdxdna_ctx.h"
+#include "amdxdna_gem.h"
+#include "amdxdna_pci_drv.h"
+
+#define MAX_HWCTX_ID 255
+#define MAX_ARG_COUNT 4095
+
+struct amdxdna_fence {
+ struct dma_fence base;
+ spinlock_t lock; /* for base */
+ struct amdxdna_hwctx *hwctx;
+};
+
+static const char *amdxdna_fence_get_driver_name(struct dma_fence *fence)
+{
+ return KBUILD_MODNAME;
+}
+
+static const char *amdxdna_fence_get_timeline_name(struct dma_fence *fence)
+{
+ struct amdxdna_fence *xdna_fence;
+
+ xdna_fence = container_of(fence, struct amdxdna_fence, base);
+
+ return xdna_fence->hwctx->name;
+}
+
+static const struct dma_fence_ops fence_ops = {
+ .get_driver_name = amdxdna_fence_get_driver_name,
+ .get_timeline_name = amdxdna_fence_get_timeline_name,
+};
+
+static struct dma_fence *amdxdna_fence_create(struct amdxdna_hwctx *hwctx)
+{
+ struct amdxdna_fence *fence;
+
+ fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+ if (!fence)
+ return NULL;
+
+ fence->hwctx = hwctx;
+ spin_lock_init(&fence->lock);
+ dma_fence_init(&fence->base, &fence_ops, &fence->lock, hwctx->id, 0);
+ return &fence->base;
+}
+
+static void amdxdna_hwctx_destroy_rcu(struct amdxdna_hwctx *hwctx,
+ struct srcu_struct *ss)
+{
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+
+ synchronize_srcu(ss);
+
+ /* At this point, user is not able to submit new commands */
+ xdna->dev_info->ops->hwctx_fini(hwctx);
+
+ kfree(hwctx->name);
+ kfree(hwctx);
+}
+
+int amdxdna_hwctx_walk(struct amdxdna_client *client, void *arg,
+ int (*walk)(struct amdxdna_hwctx *hwctx, void *arg))
+{
+ struct amdxdna_hwctx *hwctx;
+ unsigned long hwctx_id;
+ int ret = 0, idx;
+
+ idx = srcu_read_lock(&client->hwctx_srcu);
+ amdxdna_for_each_hwctx(client, hwctx_id, hwctx) {
+ ret = walk(hwctx, arg);
+ if (ret)
+ break;
+ }
+ srcu_read_unlock(&client->hwctx_srcu, idx);
+
+ return ret;
+}
+
+void *amdxdna_cmd_get_payload(struct amdxdna_gem_obj *abo, u32 *size)
+{
+ struct amdxdna_cmd *cmd = abo->mem.kva;
+ u32 num_masks, count;
+
+ if (amdxdna_cmd_get_op(abo) == ERT_CMD_CHAIN)
+ num_masks = 0;
+ else
+ num_masks = 1 + FIELD_GET(AMDXDNA_CMD_EXTRA_CU_MASK, cmd->header);
+
+ if (size) {
+ count = FIELD_GET(AMDXDNA_CMD_COUNT, cmd->header);
+ if (unlikely(count <= num_masks)) {
+ *size = 0;
+ return NULL;
+ }
+ *size = (count - num_masks) * sizeof(u32);
+ }
+ return &cmd->data[num_masks];
+}
+
+int amdxdna_cmd_get_cu_idx(struct amdxdna_gem_obj *abo)
+{
+ struct amdxdna_cmd *cmd = abo->mem.kva;
+ u32 num_masks, i;
+ u32 *cu_mask;
+
+ if (amdxdna_cmd_get_op(abo) == ERT_CMD_CHAIN)
+ return -1;
+
+ num_masks = 1 + FIELD_GET(AMDXDNA_CMD_EXTRA_CU_MASK, cmd->header);
+ cu_mask = cmd->data;
+ for (i = 0; i < num_masks; i++) {
+ if (cu_mask[i])
+ return ffs(cu_mask[i]) - 1;
+ }
+
+ return -1;
+}
+
+/*
+ * This should be called in close() and remove(). DO NOT call in other syscalls.
+ * This guarantee that when hwctx and resources will be released, if user
+ * doesn't call amdxdna_drm_destroy_hwctx_ioctl.
+ */
+void amdxdna_hwctx_remove_all(struct amdxdna_client *client)
+{
+ struct amdxdna_hwctx *hwctx;
+ unsigned long hwctx_id;
+
+ amdxdna_for_each_hwctx(client, hwctx_id, hwctx) {
+ XDNA_DBG(client->xdna, "PID %d close HW context %d",
+ client->pid, hwctx->id);
+ xa_erase(&client->hwctx_xa, hwctx->id);
+ amdxdna_hwctx_destroy_rcu(hwctx, &client->hwctx_srcu);
+ }
+}
+
+int amdxdna_drm_create_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+{
+ struct amdxdna_client *client = filp->driver_priv;
+ struct amdxdna_drm_create_hwctx *args = data;
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ struct amdxdna_hwctx *hwctx;
+ int ret, idx;
+
+ if (args->ext || args->ext_flags)
+ return -EINVAL;
+
+ if (!drm_dev_enter(dev, &idx))
+ return -ENODEV;
+
+ hwctx = kzalloc(sizeof(*hwctx), GFP_KERNEL);
+ if (!hwctx) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ if (copy_from_user(&hwctx->qos, u64_to_user_ptr(args->qos_p), sizeof(hwctx->qos))) {
+ XDNA_ERR(xdna, "Access QoS info failed");
+ ret = -EFAULT;
+ goto free_hwctx;
+ }
+
+ hwctx->client = client;
+ hwctx->fw_ctx_id = -1;
+ hwctx->num_tiles = args->num_tiles;
+ hwctx->mem_size = args->mem_size;
+ hwctx->max_opc = args->max_opc;
+ ret = xa_alloc_cyclic(&client->hwctx_xa, &hwctx->id, hwctx,
+ XA_LIMIT(AMDXDNA_INVALID_CTX_HANDLE + 1, MAX_HWCTX_ID),
+ &client->next_hwctxid, GFP_KERNEL);
+ if (ret < 0) {
+ XDNA_ERR(xdna, "Allocate hwctx ID failed, ret %d", ret);
+ goto free_hwctx;
+ }
+
+ hwctx->name = kasprintf(GFP_KERNEL, "hwctx.%d.%d", client->pid, hwctx->id);
+ if (!hwctx->name) {
+ ret = -ENOMEM;
+ goto rm_id;
+ }
+
+ mutex_lock(&xdna->dev_lock);
+ ret = xdna->dev_info->ops->hwctx_init(hwctx);
+ if (ret) {
+ mutex_unlock(&xdna->dev_lock);
+ XDNA_ERR(xdna, "Init hwctx failed, ret %d", ret);
+ goto free_name;
+ }
+ args->handle = hwctx->id;
+ args->syncobj_handle = hwctx->syncobj_hdl;
+ mutex_unlock(&xdna->dev_lock);
+
+ atomic64_set(&hwctx->job_submit_cnt, 0);
+ atomic64_set(&hwctx->job_free_cnt, 0);
+ XDNA_DBG(xdna, "PID %d create HW context %d, ret %d", client->pid, args->handle, ret);
+ drm_dev_exit(idx);
+ return 0;
+
+free_name:
+ kfree(hwctx->name);
+rm_id:
+ xa_erase(&client->hwctx_xa, hwctx->id);
+free_hwctx:
+ kfree(hwctx);
+exit:
+ drm_dev_exit(idx);
+ return ret;
+}
+
+int amdxdna_drm_destroy_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+{
+ struct amdxdna_client *client = filp->driver_priv;
+ struct amdxdna_drm_destroy_hwctx *args = data;
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ struct amdxdna_hwctx *hwctx;
+ int ret = 0, idx;
+
+ if (XDNA_MBZ_DBG(xdna, &args->pad, sizeof(args->pad)))
+ return -EINVAL;
+
+ if (!drm_dev_enter(dev, &idx))
+ return -ENODEV;
+
+ mutex_lock(&xdna->dev_lock);
+ hwctx = xa_erase(&client->hwctx_xa, args->handle);
+ if (!hwctx) {
+ ret = -EINVAL;
+ XDNA_DBG(xdna, "PID %d HW context %d not exist",
+ client->pid, args->handle);
+ goto out;
+ }
+
+ /*
+ * The pushed jobs are handled by DRM scheduler during destroy.
+ * SRCU to synchronize with exec command ioctls.
+ */
+ amdxdna_hwctx_destroy_rcu(hwctx, &client->hwctx_srcu);
+
+ XDNA_DBG(xdna, "PID %d destroyed HW context %d", client->pid, args->handle);
+out:
+ mutex_unlock(&xdna->dev_lock);
+ drm_dev_exit(idx);
+ return ret;
+}
+
+int amdxdna_drm_config_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+{
+ struct amdxdna_client *client = filp->driver_priv;
+ struct amdxdna_drm_config_hwctx *args = data;
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ struct amdxdna_hwctx *hwctx;
+ int ret, idx;
+ u32 buf_size;
+ void *buf;
+ u64 val;
+
+ if (XDNA_MBZ_DBG(xdna, &args->pad, sizeof(args->pad)))
+ return -EINVAL;
+
+ if (!xdna->dev_info->ops->hwctx_config)
+ return -EOPNOTSUPP;
+
+ val = args->param_val;
+ buf_size = args->param_val_size;
+
+ switch (args->param_type) {
+ case DRM_AMDXDNA_HWCTX_CONFIG_CU:
+ /* For those types that param_val is pointer */
+ if (buf_size > PAGE_SIZE) {
+ XDNA_ERR(xdna, "Config CU param buffer too large");
+ return -E2BIG;
+ }
+
+ /* Hwctx needs to keep buf */
+ buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ if (copy_from_user(buf, u64_to_user_ptr(val), buf_size)) {
+ kfree(buf);
+ return -EFAULT;
+ }
+
+ break;
+ case DRM_AMDXDNA_HWCTX_ASSIGN_DBG_BUF:
+ case DRM_AMDXDNA_HWCTX_REMOVE_DBG_BUF:
+ /* For those types that param_val is a value */
+ buf = NULL;
+ buf_size = 0;
+ break;
+ default:
+ XDNA_DBG(xdna, "Unknown HW context config type %d", args->param_type);
+ return -EINVAL;
+ }
+
+ mutex_lock(&xdna->dev_lock);
+ idx = srcu_read_lock(&client->hwctx_srcu);
+ hwctx = xa_load(&client->hwctx_xa, args->handle);
+ if (!hwctx) {
+ XDNA_DBG(xdna, "PID %d failed to get hwctx %d", client->pid, args->handle);
+ ret = -EINVAL;
+ goto unlock_srcu;
+ }
+
+ ret = xdna->dev_info->ops->hwctx_config(hwctx, args->param_type, val, buf, buf_size);
+
+unlock_srcu:
+ srcu_read_unlock(&client->hwctx_srcu, idx);
+ mutex_unlock(&xdna->dev_lock);
+ kfree(buf);
+ return ret;
+}
+
+static void
+amdxdna_arg_bos_put(struct amdxdna_sched_job *job)
+{
+ int i;
+
+ for (i = 0; i < job->bo_cnt; i++) {
+ if (!job->bos[i])
+ break;
+ drm_gem_object_put(job->bos[i]);
+ }
+}
+
+static int
+amdxdna_arg_bos_lookup(struct amdxdna_client *client,
+ struct amdxdna_sched_job *job,
+ u32 *bo_hdls, u32 bo_cnt)
+{
+ struct drm_gem_object *gobj;
+ int i, ret;
+
+ job->bo_cnt = bo_cnt;
+ for (i = 0; i < job->bo_cnt; i++) {
+ struct amdxdna_gem_obj *abo;
+
+ gobj = drm_gem_object_lookup(client->filp, bo_hdls[i]);
+ if (!gobj) {
+ ret = -ENOENT;
+ goto put_shmem_bo;
+ }
+ abo = to_xdna_obj(gobj);
+
+ mutex_lock(&abo->lock);
+ if (abo->pinned) {
+ mutex_unlock(&abo->lock);
+ job->bos[i] = gobj;
+ continue;
+ }
+
+ ret = amdxdna_gem_pin_nolock(abo);
+ if (ret) {
+ mutex_unlock(&abo->lock);
+ drm_gem_object_put(gobj);
+ goto put_shmem_bo;
+ }
+ abo->pinned = true;
+ mutex_unlock(&abo->lock);
+
+ job->bos[i] = gobj;
+ }
+
+ return 0;
+
+put_shmem_bo:
+ amdxdna_arg_bos_put(job);
+ return ret;
+}
+
+void amdxdna_sched_job_cleanup(struct amdxdna_sched_job *job)
+{
+ trace_amdxdna_debug_point(job->hwctx->name, job->seq, "job release");
+ amdxdna_arg_bos_put(job);
+ amdxdna_gem_put_obj(job->cmd_bo);
+}
+
+int amdxdna_cmd_submit(struct amdxdna_client *client,
+ u32 cmd_bo_hdl, u32 *arg_bo_hdls, u32 arg_bo_cnt,
+ u32 hwctx_hdl, u64 *seq)
+{
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_sched_job *job;
+ struct amdxdna_hwctx *hwctx;
+ int ret, idx;
+
+ XDNA_DBG(xdna, "Command BO hdl %d, Arg BO count %d", cmd_bo_hdl, arg_bo_cnt);
+ job = kzalloc(struct_size(job, bos, arg_bo_cnt), GFP_KERNEL);
+ if (!job)
+ return -ENOMEM;
+
+ if (cmd_bo_hdl != AMDXDNA_INVALID_BO_HANDLE) {
+ job->cmd_bo = amdxdna_gem_get_obj(client, cmd_bo_hdl, AMDXDNA_BO_CMD);
+ if (!job->cmd_bo) {
+ XDNA_ERR(xdna, "Failed to get cmd bo from %d", cmd_bo_hdl);
+ ret = -EINVAL;
+ goto free_job;
+ }
+ } else {
+ job->cmd_bo = NULL;
+ }
+
+ ret = amdxdna_arg_bos_lookup(client, job, arg_bo_hdls, arg_bo_cnt);
+ if (ret) {
+ XDNA_ERR(xdna, "Argument BOs lookup failed, ret %d", ret);
+ goto cmd_put;
+ }
+
+ idx = srcu_read_lock(&client->hwctx_srcu);
+ hwctx = xa_load(&client->hwctx_xa, hwctx_hdl);
+ if (!hwctx) {
+ XDNA_DBG(xdna, "PID %d failed to get hwctx %d",
+ client->pid, hwctx_hdl);
+ ret = -EINVAL;
+ goto unlock_srcu;
+ }
+
+ if (hwctx->status != HWCTX_STAT_READY) {
+ XDNA_ERR(xdna, "HW Context is not ready");
+ ret = -EINVAL;
+ goto unlock_srcu;
+ }
+
+ job->hwctx = hwctx;
+ job->mm = current->mm;
+
+ job->fence = amdxdna_fence_create(hwctx);
+ if (!job->fence) {
+ XDNA_ERR(xdna, "Failed to create fence");
+ ret = -ENOMEM;
+ goto unlock_srcu;
+ }
+ kref_init(&job->refcnt);
+
+ ret = xdna->dev_info->ops->cmd_submit(hwctx, job, seq);
+ if (ret)
+ goto put_fence;
+
+ /*
+ * The amdxdna_hwctx_destroy_rcu() will release hwctx and associated
+ * resource after synchronize_srcu(). The submitted jobs should be
+ * handled by the queue, for example DRM scheduler, in device layer.
+ * For here we can unlock SRCU.
+ */
+ srcu_read_unlock(&client->hwctx_srcu, idx);
+ trace_amdxdna_debug_point(hwctx->name, *seq, "job pushed");
+
+ return 0;
+
+put_fence:
+ dma_fence_put(job->fence);
+unlock_srcu:
+ srcu_read_unlock(&client->hwctx_srcu, idx);
+ amdxdna_arg_bos_put(job);
+cmd_put:
+ amdxdna_gem_put_obj(job->cmd_bo);
+free_job:
+ kfree(job);
+ return ret;
+}
+
+/*
+ * The submit command ioctl submits a command to firmware. One firmware command
+ * may contain multiple command BOs for processing as a whole.
+ * The command sequence number is returned which can be used for wait command ioctl.
+ */
+static int amdxdna_drm_submit_execbuf(struct amdxdna_client *client,
+ struct amdxdna_drm_exec_cmd *args)
+{
+ struct amdxdna_dev *xdna = client->xdna;
+ u32 *arg_bo_hdls = NULL;
+ u32 cmd_bo_hdl;
+ int ret;
+
+ if (args->arg_count > MAX_ARG_COUNT) {
+ XDNA_ERR(xdna, "Invalid arg bo count %d", args->arg_count);
+ return -EINVAL;
+ }
+
+ /* Only support single command for now. */
+ if (args->cmd_count != 1) {
+ XDNA_ERR(xdna, "Invalid cmd bo count %d", args->cmd_count);
+ return -EINVAL;
+ }
+
+ cmd_bo_hdl = (u32)args->cmd_handles;
+ if (args->arg_count) {
+ arg_bo_hdls = kcalloc(args->arg_count, sizeof(u32), GFP_KERNEL);
+ if (!arg_bo_hdls)
+ return -ENOMEM;
+ ret = copy_from_user(arg_bo_hdls, u64_to_user_ptr(args->args),
+ args->arg_count * sizeof(u32));
+ if (ret) {
+ ret = -EFAULT;
+ goto free_cmd_bo_hdls;
+ }
+ }
+
+ ret = amdxdna_cmd_submit(client, cmd_bo_hdl, arg_bo_hdls,
+ args->arg_count, args->hwctx, &args->seq);
+ if (ret)
+ XDNA_DBG(xdna, "Submit cmds failed, ret %d", ret);
+
+free_cmd_bo_hdls:
+ kfree(arg_bo_hdls);
+ if (!ret)
+ XDNA_DBG(xdna, "Pushed cmd %lld to scheduler", args->seq);
+ return ret;
+}
+
+int amdxdna_drm_submit_cmd_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+{
+ struct amdxdna_client *client = filp->driver_priv;
+ struct amdxdna_drm_exec_cmd *args = data;
+
+ if (args->ext || args->ext_flags)
+ return -EINVAL;
+
+ switch (args->type) {
+ case AMDXDNA_CMD_SUBMIT_EXEC_BUF:
+ return amdxdna_drm_submit_execbuf(client, args);
+ }
+
+ XDNA_ERR(client->xdna, "Invalid command type %d", args->type);
+ return -EINVAL;
+}
diff --git a/drivers/accel/amdxdna/amdxdna_ctx.h b/drivers/accel/amdxdna/amdxdna_ctx.h
new file mode 100644
index 000000000000..7cd7a55936f0
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_ctx.h
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _AMDXDNA_CTX_H_
+#define _AMDXDNA_CTX_H_
+
+#include <linux/bitfield.h>
+
+#include "amdxdna_gem.h"
+
+struct amdxdna_hwctx_priv;
+
+enum ert_cmd_opcode {
+ ERT_START_CU = 0,
+ ERT_CMD_CHAIN = 19,
+ ERT_START_NPU = 20,
+};
+
+enum ert_cmd_state {
+ ERT_CMD_STATE_INVALID,
+ ERT_CMD_STATE_NEW,
+ ERT_CMD_STATE_QUEUED,
+ ERT_CMD_STATE_RUNNING,
+ ERT_CMD_STATE_COMPLETED,
+ ERT_CMD_STATE_ERROR,
+ ERT_CMD_STATE_ABORT,
+ ERT_CMD_STATE_SUBMITTED,
+ ERT_CMD_STATE_TIMEOUT,
+ ERT_CMD_STATE_NORESPONSE,
+};
+
+/*
+ * Interpretation of the beginning of data payload for ERT_START_NPU in
+ * amdxdna_cmd. The rest of the payload in amdxdna_cmd is regular kernel args.
+ */
+struct amdxdna_cmd_start_npu {
+ u64 buffer; /* instruction buffer address */
+ u32 buffer_size; /* size of buffer in bytes */
+ u32 prop_count; /* properties count */
+ u32 prop_args[]; /* properties and regular kernel arguments */
+};
+
+/*
+ * Interpretation of the beginning of data payload for ERT_CMD_CHAIN in
+ * amdxdna_cmd. The rest of the payload in amdxdna_cmd is cmd BO handles.
+ */
+struct amdxdna_cmd_chain {
+ u32 command_count;
+ u32 submit_index;
+ u32 error_index;
+ u32 reserved[3];
+ u64 data[] __counted_by(command_count);
+};
+
+/* Exec buffer command header format */
+#define AMDXDNA_CMD_STATE GENMASK(3, 0)
+#define AMDXDNA_CMD_EXTRA_CU_MASK GENMASK(11, 10)
+#define AMDXDNA_CMD_COUNT GENMASK(22, 12)
+#define AMDXDNA_CMD_OPCODE GENMASK(27, 23)
+struct amdxdna_cmd {
+ u32 header;
+ u32 data[];
+};
+
+struct amdxdna_hwctx {
+ struct amdxdna_client *client;
+ struct amdxdna_hwctx_priv *priv;
+ char *name;
+
+ u32 id;
+ u32 max_opc;
+ u32 num_tiles;
+ u32 mem_size;
+ u32 fw_ctx_id;
+ u32 col_list_len;
+ u32 *col_list;
+ u32 start_col;
+ u32 num_col;
+#define HWCTX_STAT_INIT 0
+#define HWCTX_STAT_READY 1
+#define HWCTX_STAT_STOP 2
+ u32 status;
+ u32 old_status;
+
+ struct amdxdna_qos_info qos;
+ struct amdxdna_hwctx_param_config_cu *cus;
+ u32 syncobj_hdl;
+
+ atomic64_t job_submit_cnt;
+ atomic64_t job_free_cnt ____cacheline_aligned_in_smp;
+};
+
+#define drm_job_to_xdna_job(j) \
+ container_of(j, struct amdxdna_sched_job, base)
+
+struct amdxdna_sched_job {
+ struct drm_sched_job base;
+ struct kref refcnt;
+ struct amdxdna_hwctx *hwctx;
+ struct mm_struct *mm;
+ /* The fence to notice DRM scheduler that job is done by hardware */
+ struct dma_fence *fence;
+ /* user can wait on this fence */
+ struct dma_fence *out_fence;
+ bool job_done;
+ u64 seq;
+ struct amdxdna_gem_obj *cmd_bo;
+ size_t bo_cnt;
+ struct drm_gem_object *bos[] __counted_by(bo_cnt);
+};
+
+static inline u32
+amdxdna_cmd_get_op(struct amdxdna_gem_obj *abo)
+{
+ struct amdxdna_cmd *cmd = abo->mem.kva;
+
+ return FIELD_GET(AMDXDNA_CMD_OPCODE, cmd->header);
+}
+
+static inline void
+amdxdna_cmd_set_state(struct amdxdna_gem_obj *abo, enum ert_cmd_state s)
+{
+ struct amdxdna_cmd *cmd = abo->mem.kva;
+
+ cmd->header &= ~AMDXDNA_CMD_STATE;
+ cmd->header |= FIELD_PREP(AMDXDNA_CMD_STATE, s);
+}
+
+static inline enum ert_cmd_state
+amdxdna_cmd_get_state(struct amdxdna_gem_obj *abo)
+{
+ struct amdxdna_cmd *cmd = abo->mem.kva;
+
+ return FIELD_GET(AMDXDNA_CMD_STATE, cmd->header);
+}
+
+void *amdxdna_cmd_get_payload(struct amdxdna_gem_obj *abo, u32 *size);
+int amdxdna_cmd_get_cu_idx(struct amdxdna_gem_obj *abo);
+
+void amdxdna_sched_job_cleanup(struct amdxdna_sched_job *job);
+void amdxdna_hwctx_remove_all(struct amdxdna_client *client);
+int amdxdna_hwctx_walk(struct amdxdna_client *client, void *arg,
+ int (*walk)(struct amdxdna_hwctx *hwctx, void *arg));
+
+int amdxdna_cmd_submit(struct amdxdna_client *client,
+ u32 cmd_bo_hdls, u32 *arg_bo_hdls, u32 arg_bo_cnt,
+ u32 hwctx_hdl, u64 *seq);
+
+int amdxdna_cmd_wait(struct amdxdna_client *client, u32 hwctx_hdl,
+ u64 seq, u32 timeout);
+
+int amdxdna_drm_create_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+int amdxdna_drm_config_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+int amdxdna_drm_destroy_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+int amdxdna_drm_submit_cmd_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+
+#endif /* _AMDXDNA_CTX_H_ */
diff --git a/drivers/accel/amdxdna/amdxdna_gem.c b/drivers/accel/amdxdna/amdxdna_gem.c
new file mode 100644
index 000000000000..d407a36eb412
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_gem.c
@@ -0,0 +1,975 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_cache.h>
+#include <drm/drm_device.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/dma-buf.h>
+#include <linux/dma-direct.h>
+#include <linux/iosys-map.h>
+#include <linux/pagemap.h>
+#include <linux/vmalloc.h>
+
+#include "amdxdna_ctx.h"
+#include "amdxdna_gem.h"
+#include "amdxdna_pci_drv.h"
+#include "amdxdna_ubuf.h"
+
+#define XDNA_MAX_CMD_BO_SIZE SZ_32K
+
+MODULE_IMPORT_NS("DMA_BUF");
+
+static int
+amdxdna_gem_heap_alloc(struct amdxdna_gem_obj *abo)
+{
+ struct amdxdna_client *client = abo->client;
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_mem *mem = &abo->mem;
+ struct amdxdna_gem_obj *heap;
+ u64 offset;
+ u32 align;
+ int ret;
+
+ mutex_lock(&client->mm_lock);
+
+ heap = client->dev_heap;
+ if (!heap) {
+ ret = -EINVAL;
+ goto unlock_out;
+ }
+
+ if (heap->mem.userptr == AMDXDNA_INVALID_ADDR) {
+ XDNA_ERR(xdna, "Invalid dev heap userptr");
+ ret = -EINVAL;
+ goto unlock_out;
+ }
+
+ if (mem->size == 0 || mem->size > heap->mem.size) {
+ XDNA_ERR(xdna, "Invalid dev bo size 0x%lx, limit 0x%lx",
+ mem->size, heap->mem.size);
+ ret = -EINVAL;
+ goto unlock_out;
+ }
+
+ align = 1 << max(PAGE_SHIFT, xdna->dev_info->dev_mem_buf_shift);
+ ret = drm_mm_insert_node_generic(&heap->mm, &abo->mm_node,
+ mem->size, align,
+ 0, DRM_MM_INSERT_BEST);
+ if (ret) {
+ XDNA_ERR(xdna, "Failed to alloc dev bo memory, ret %d", ret);
+ goto unlock_out;
+ }
+
+ mem->dev_addr = abo->mm_node.start;
+ offset = mem->dev_addr - heap->mem.dev_addr;
+ mem->userptr = heap->mem.userptr + offset;
+ mem->kva = heap->mem.kva + offset;
+
+ drm_gem_object_get(to_gobj(heap));
+
+unlock_out:
+ mutex_unlock(&client->mm_lock);
+
+ return ret;
+}
+
+static void
+amdxdna_gem_destroy_obj(struct amdxdna_gem_obj *abo)
+{
+ mutex_destroy(&abo->lock);
+ kfree(abo);
+}
+
+static void
+amdxdna_gem_heap_free(struct amdxdna_gem_obj *abo)
+{
+ struct amdxdna_gem_obj *heap;
+
+ mutex_lock(&abo->client->mm_lock);
+
+ drm_mm_remove_node(&abo->mm_node);
+
+ heap = abo->client->dev_heap;
+ drm_gem_object_put(to_gobj(heap));
+
+ mutex_unlock(&abo->client->mm_lock);
+}
+
+static bool amdxdna_hmm_invalidate(struct mmu_interval_notifier *mni,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq)
+{
+ struct amdxdna_umap *mapp = container_of(mni, struct amdxdna_umap, notifier);
+ struct amdxdna_gem_obj *abo = mapp->abo;
+ struct amdxdna_dev *xdna;
+
+ xdna = to_xdna_dev(to_gobj(abo)->dev);
+ XDNA_DBG(xdna, "Invalidating range 0x%lx, 0x%lx, type %d",
+ mapp->vma->vm_start, mapp->vma->vm_end, abo->type);
+
+ if (!mmu_notifier_range_blockable(range))
+ return false;
+
+ down_write(&xdna->notifier_lock);
+ abo->mem.map_invalid = true;
+ mapp->invalid = true;
+ mmu_interval_set_seq(&mapp->notifier, cur_seq);
+ up_write(&xdna->notifier_lock);
+
+ xdna->dev_info->ops->hmm_invalidate(abo, cur_seq);
+
+ if (range->event == MMU_NOTIFY_UNMAP) {
+ down_write(&xdna->notifier_lock);
+ if (!mapp->unmapped) {
+ queue_work(xdna->notifier_wq, &mapp->hmm_unreg_work);
+ mapp->unmapped = true;
+ }
+ up_write(&xdna->notifier_lock);
+ }
+
+ return true;
+}
+
+static const struct mmu_interval_notifier_ops amdxdna_hmm_ops = {
+ .invalidate = amdxdna_hmm_invalidate,
+};
+
+static void amdxdna_hmm_unregister(struct amdxdna_gem_obj *abo,
+ struct vm_area_struct *vma)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
+ struct amdxdna_umap *mapp;
+
+ down_read(&xdna->notifier_lock);
+ list_for_each_entry(mapp, &abo->mem.umap_list, node) {
+ if (!vma || mapp->vma == vma) {
+ if (!mapp->unmapped) {
+ queue_work(xdna->notifier_wq, &mapp->hmm_unreg_work);
+ mapp->unmapped = true;
+ }
+ if (vma)
+ break;
+ }
+ }
+ up_read(&xdna->notifier_lock);
+}
+
+static void amdxdna_umap_release(struct kref *ref)
+{
+ struct amdxdna_umap *mapp = container_of(ref, struct amdxdna_umap, refcnt);
+ struct vm_area_struct *vma = mapp->vma;
+ struct amdxdna_dev *xdna;
+
+ mmu_interval_notifier_remove(&mapp->notifier);
+ if (is_import_bo(mapp->abo) && vma->vm_file && vma->vm_file->f_mapping)
+ mapping_clear_unevictable(vma->vm_file->f_mapping);
+
+ xdna = to_xdna_dev(to_gobj(mapp->abo)->dev);
+ down_write(&xdna->notifier_lock);
+ list_del(&mapp->node);
+ up_write(&xdna->notifier_lock);
+
+ kvfree(mapp->range.hmm_pfns);
+ kfree(mapp);
+}
+
+void amdxdna_umap_put(struct amdxdna_umap *mapp)
+{
+ kref_put(&mapp->refcnt, amdxdna_umap_release);
+}
+
+static void amdxdna_hmm_unreg_work(struct work_struct *work)
+{
+ struct amdxdna_umap *mapp = container_of(work, struct amdxdna_umap,
+ hmm_unreg_work);
+
+ amdxdna_umap_put(mapp);
+}
+
+static int amdxdna_hmm_register(struct amdxdna_gem_obj *abo,
+ struct vm_area_struct *vma)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
+ unsigned long len = vma->vm_end - vma->vm_start;
+ unsigned long addr = vma->vm_start;
+ struct amdxdna_umap *mapp;
+ u32 nr_pages;
+ int ret;
+
+ if (!xdna->dev_info->ops->hmm_invalidate)
+ return 0;
+
+ mapp = kzalloc(sizeof(*mapp), GFP_KERNEL);
+ if (!mapp)
+ return -ENOMEM;
+
+ nr_pages = (PAGE_ALIGN(addr + len) - (addr & PAGE_MASK)) >> PAGE_SHIFT;
+ mapp->range.hmm_pfns = kvcalloc(nr_pages, sizeof(*mapp->range.hmm_pfns),
+ GFP_KERNEL);
+ if (!mapp->range.hmm_pfns) {
+ ret = -ENOMEM;
+ goto free_map;
+ }
+
+ ret = mmu_interval_notifier_insert_locked(&mapp->notifier,
+ current->mm,
+ addr,
+ len,
+ &amdxdna_hmm_ops);
+ if (ret) {
+ XDNA_ERR(xdna, "Insert mmu notifier failed, ret %d", ret);
+ goto free_pfns;
+ }
+
+ mapp->range.notifier = &mapp->notifier;
+ mapp->range.start = vma->vm_start;
+ mapp->range.end = vma->vm_end;
+ mapp->range.default_flags = HMM_PFN_REQ_FAULT;
+ mapp->vma = vma;
+ mapp->abo = abo;
+ kref_init(&mapp->refcnt);
+
+ if (abo->mem.userptr == AMDXDNA_INVALID_ADDR)
+ abo->mem.userptr = addr;
+ INIT_WORK(&mapp->hmm_unreg_work, amdxdna_hmm_unreg_work);
+ if (is_import_bo(abo) && vma->vm_file && vma->vm_file->f_mapping)
+ mapping_set_unevictable(vma->vm_file->f_mapping);
+
+ down_write(&xdna->notifier_lock);
+ list_add_tail(&mapp->node, &abo->mem.umap_list);
+ up_write(&xdna->notifier_lock);
+
+ return 0;
+
+free_pfns:
+ kvfree(mapp->range.hmm_pfns);
+free_map:
+ kfree(mapp);
+ return ret;
+}
+
+static void amdxdna_gem_dev_obj_free(struct drm_gem_object *gobj)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(gobj->dev);
+ struct amdxdna_gem_obj *abo = to_xdna_obj(gobj);
+
+ XDNA_DBG(xdna, "BO type %d xdna_addr 0x%llx", abo->type, abo->mem.dev_addr);
+ if (abo->pinned)
+ amdxdna_gem_unpin(abo);
+
+ amdxdna_gem_heap_free(abo);
+ drm_gem_object_release(gobj);
+ amdxdna_gem_destroy_obj(abo);
+}
+
+static int amdxdna_insert_pages(struct amdxdna_gem_obj *abo,
+ struct vm_area_struct *vma)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
+ unsigned long num_pages = vma_pages(vma);
+ unsigned long offset = 0;
+ int ret;
+
+ if (!is_import_bo(abo)) {
+ ret = drm_gem_shmem_mmap(&abo->base, vma);
+ if (ret) {
+ XDNA_ERR(xdna, "Failed shmem mmap %d", ret);
+ return ret;
+ }
+
+ /* The buffer is based on memory pages. Fix the flag. */
+ vm_flags_mod(vma, VM_MIXEDMAP, VM_PFNMAP);
+ ret = vm_insert_pages(vma, vma->vm_start, abo->base.pages,
+ &num_pages);
+ if (ret) {
+ XDNA_ERR(xdna, "Failed insert pages %d", ret);
+ vma->vm_ops->close(vma);
+ return ret;
+ }
+
+ return 0;
+ }
+
+ vma->vm_private_data = NULL;
+ vma->vm_ops = NULL;
+ ret = dma_buf_mmap(abo->dma_buf, vma, 0);
+ if (ret) {
+ XDNA_ERR(xdna, "Failed to mmap dma buf %d", ret);
+ return ret;
+ }
+
+ do {
+ vm_fault_t fault_ret;
+
+ fault_ret = handle_mm_fault(vma, vma->vm_start + offset,
+ FAULT_FLAG_WRITE, NULL);
+ if (fault_ret & VM_FAULT_ERROR) {
+ vma->vm_ops->close(vma);
+ XDNA_ERR(xdna, "Fault in page failed");
+ return -EFAULT;
+ }
+
+ offset += PAGE_SIZE;
+ } while (--num_pages);
+
+ /* Drop the reference drm_gem_mmap_obj() acquired.*/
+ drm_gem_object_put(to_gobj(abo));
+
+ return 0;
+}
+
+static int amdxdna_gem_obj_mmap(struct drm_gem_object *gobj,
+ struct vm_area_struct *vma)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(gobj->dev);
+ struct amdxdna_gem_obj *abo = to_xdna_obj(gobj);
+ int ret;
+
+ ret = amdxdna_hmm_register(abo, vma);
+ if (ret)
+ return ret;
+
+ ret = amdxdna_insert_pages(abo, vma);
+ if (ret) {
+ XDNA_ERR(xdna, "Failed insert pages, ret %d", ret);
+ goto hmm_unreg;
+ }
+
+ XDNA_DBG(xdna, "BO map_offset 0x%llx type %d userptr 0x%lx size 0x%lx",
+ drm_vma_node_offset_addr(&gobj->vma_node), abo->type,
+ vma->vm_start, gobj->size);
+ return 0;
+
+hmm_unreg:
+ amdxdna_hmm_unregister(abo, vma);
+ return ret;
+}
+
+static int amdxdna_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
+{
+ struct drm_gem_object *gobj = dma_buf->priv;
+ struct amdxdna_gem_obj *abo = to_xdna_obj(gobj);
+ unsigned long num_pages = vma_pages(vma);
+ int ret;
+
+ vma->vm_ops = &drm_gem_shmem_vm_ops;
+ vma->vm_private_data = gobj;
+
+ drm_gem_object_get(gobj);
+ ret = drm_gem_shmem_mmap(&abo->base, vma);
+ if (ret)
+ goto put_obj;
+
+ /* The buffer is based on memory pages. Fix the flag. */
+ vm_flags_mod(vma, VM_MIXEDMAP, VM_PFNMAP);
+ ret = vm_insert_pages(vma, vma->vm_start, abo->base.pages,
+ &num_pages);
+ if (ret)
+ goto close_vma;
+
+ return 0;
+
+close_vma:
+ vma->vm_ops->close(vma);
+put_obj:
+ drm_gem_object_put(gobj);
+ return ret;
+}
+
+static const struct dma_buf_ops amdxdna_dmabuf_ops = {
+ .attach = drm_gem_map_attach,
+ .detach = drm_gem_map_detach,
+ .map_dma_buf = drm_gem_map_dma_buf,
+ .unmap_dma_buf = drm_gem_unmap_dma_buf,
+ .release = drm_gem_dmabuf_release,
+ .mmap = amdxdna_gem_dmabuf_mmap,
+ .vmap = drm_gem_dmabuf_vmap,
+ .vunmap = drm_gem_dmabuf_vunmap,
+};
+
+static int amdxdna_gem_obj_vmap(struct drm_gem_object *obj, struct iosys_map *map)
+{
+ struct amdxdna_gem_obj *abo = to_xdna_obj(obj);
+
+ iosys_map_clear(map);
+
+ dma_resv_assert_held(obj->resv);
+
+ if (is_import_bo(abo))
+ dma_buf_vmap(abo->dma_buf, map);
+ else
+ drm_gem_shmem_object_vmap(obj, map);
+
+ if (!map->vaddr)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void amdxdna_gem_obj_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
+{
+ struct amdxdna_gem_obj *abo = to_xdna_obj(obj);
+
+ dma_resv_assert_held(obj->resv);
+
+ if (is_import_bo(abo))
+ dma_buf_vunmap(abo->dma_buf, map);
+ else
+ drm_gem_shmem_object_vunmap(obj, map);
+}
+
+static struct dma_buf *amdxdna_gem_prime_export(struct drm_gem_object *gobj, int flags)
+{
+ struct amdxdna_gem_obj *abo = to_xdna_obj(gobj);
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+
+ if (abo->dma_buf) {
+ get_dma_buf(abo->dma_buf);
+ return abo->dma_buf;
+ }
+
+ exp_info.ops = &amdxdna_dmabuf_ops;
+ exp_info.size = gobj->size;
+ exp_info.flags = flags;
+ exp_info.priv = gobj;
+ exp_info.resv = gobj->resv;
+
+ return drm_gem_dmabuf_export(gobj->dev, &exp_info);
+}
+
+static void amdxdna_imported_obj_free(struct amdxdna_gem_obj *abo)
+{
+ dma_buf_unmap_attachment_unlocked(abo->attach, abo->base.sgt, DMA_BIDIRECTIONAL);
+ dma_buf_detach(abo->dma_buf, abo->attach);
+ dma_buf_put(abo->dma_buf);
+ drm_gem_object_release(to_gobj(abo));
+ kfree(abo);
+}
+
+static void amdxdna_gem_obj_free(struct drm_gem_object *gobj)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(gobj->dev);
+ struct amdxdna_gem_obj *abo = to_xdna_obj(gobj);
+ struct iosys_map map = IOSYS_MAP_INIT_VADDR(abo->mem.kva);
+
+ XDNA_DBG(xdna, "BO type %d xdna_addr 0x%llx", abo->type, abo->mem.dev_addr);
+
+ amdxdna_hmm_unregister(abo, NULL);
+ flush_workqueue(xdna->notifier_wq);
+
+ if (abo->pinned)
+ amdxdna_gem_unpin(abo);
+
+ if (abo->type == AMDXDNA_BO_DEV_HEAP)
+ drm_mm_takedown(&abo->mm);
+
+ drm_gem_vunmap(gobj, &map);
+ mutex_destroy(&abo->lock);
+
+ if (is_import_bo(abo)) {
+ amdxdna_imported_obj_free(abo);
+ return;
+ }
+
+ drm_gem_shmem_free(&abo->base);
+}
+
+static const struct drm_gem_object_funcs amdxdna_gem_dev_obj_funcs = {
+ .free = amdxdna_gem_dev_obj_free,
+};
+
+static const struct drm_gem_object_funcs amdxdna_gem_shmem_funcs = {
+ .free = amdxdna_gem_obj_free,
+ .print_info = drm_gem_shmem_object_print_info,
+ .pin = drm_gem_shmem_object_pin,
+ .unpin = drm_gem_shmem_object_unpin,
+ .get_sg_table = drm_gem_shmem_object_get_sg_table,
+ .vmap = amdxdna_gem_obj_vmap,
+ .vunmap = amdxdna_gem_obj_vunmap,
+ .mmap = amdxdna_gem_obj_mmap,
+ .vm_ops = &drm_gem_shmem_vm_ops,
+ .export = amdxdna_gem_prime_export,
+};
+
+static struct amdxdna_gem_obj *
+amdxdna_gem_create_obj(struct drm_device *dev, size_t size)
+{
+ struct amdxdna_gem_obj *abo;
+
+ abo = kzalloc(sizeof(*abo), GFP_KERNEL);
+ if (!abo)
+ return ERR_PTR(-ENOMEM);
+
+ abo->pinned = false;
+ abo->assigned_hwctx = AMDXDNA_INVALID_CTX_HANDLE;
+ mutex_init(&abo->lock);
+
+ abo->mem.userptr = AMDXDNA_INVALID_ADDR;
+ abo->mem.dev_addr = AMDXDNA_INVALID_ADDR;
+ abo->mem.size = size;
+ INIT_LIST_HEAD(&abo->mem.umap_list);
+
+ return abo;
+}
+
+/* For drm_driver->gem_create_object callback */
+struct drm_gem_object *
+amdxdna_gem_create_object_cb(struct drm_device *dev, size_t size)
+{
+ struct amdxdna_gem_obj *abo;
+
+ abo = amdxdna_gem_create_obj(dev, size);
+ if (IS_ERR(abo))
+ return ERR_CAST(abo);
+
+ to_gobj(abo)->funcs = &amdxdna_gem_shmem_funcs;
+
+ return to_gobj(abo);
+}
+
+static struct amdxdna_gem_obj *
+amdxdna_gem_create_shmem_object(struct drm_device *dev, size_t size)
+{
+ struct drm_gem_shmem_object *shmem = drm_gem_shmem_create(dev, size);
+
+ if (IS_ERR(shmem))
+ return ERR_CAST(shmem);
+
+ shmem->map_wc = false;
+ return to_xdna_obj(&shmem->base);
+}
+
+static struct amdxdna_gem_obj *
+amdxdna_gem_create_ubuf_object(struct drm_device *dev, struct amdxdna_drm_create_bo *args)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ enum amdxdna_ubuf_flag flags = 0;
+ struct amdxdna_drm_va_tbl va_tbl;
+ struct drm_gem_object *gobj;
+ struct dma_buf *dma_buf;
+
+ if (copy_from_user(&va_tbl, u64_to_user_ptr(args->vaddr), sizeof(va_tbl))) {
+ XDNA_DBG(xdna, "Access va table failed");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (va_tbl.num_entries) {
+ if (args->type == AMDXDNA_BO_CMD)
+ flags |= AMDXDNA_UBUF_FLAG_MAP_DMA;
+
+ dma_buf = amdxdna_get_ubuf(dev, flags, va_tbl.num_entries,
+ u64_to_user_ptr(args->vaddr + sizeof(va_tbl)));
+ } else {
+ dma_buf = dma_buf_get(va_tbl.dmabuf_fd);
+ }
+
+ if (IS_ERR(dma_buf))
+ return ERR_CAST(dma_buf);
+
+ gobj = amdxdna_gem_prime_import(dev, dma_buf);
+ if (IS_ERR(gobj)) {
+ dma_buf_put(dma_buf);
+ return ERR_CAST(gobj);
+ }
+
+ dma_buf_put(dma_buf);
+
+ return to_xdna_obj(gobj);
+}
+
+static struct amdxdna_gem_obj *
+amdxdna_gem_create_object(struct drm_device *dev,
+ struct amdxdna_drm_create_bo *args)
+{
+ size_t aligned_sz = PAGE_ALIGN(args->size);
+
+ if (args->vaddr)
+ return amdxdna_gem_create_ubuf_object(dev, args);
+
+ return amdxdna_gem_create_shmem_object(dev, aligned_sz);
+}
+
+struct drm_gem_object *
+amdxdna_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf)
+{
+ struct dma_buf_attachment *attach;
+ struct amdxdna_gem_obj *abo;
+ struct drm_gem_object *gobj;
+ struct sg_table *sgt;
+ int ret;
+
+ get_dma_buf(dma_buf);
+
+ attach = dma_buf_attach(dma_buf, dev->dev);
+ if (IS_ERR(attach)) {
+ ret = PTR_ERR(attach);
+ goto put_buf;
+ }
+
+ sgt = dma_buf_map_attachment_unlocked(attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sgt)) {
+ ret = PTR_ERR(sgt);
+ goto fail_detach;
+ }
+
+ gobj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt);
+ if (IS_ERR(gobj)) {
+ ret = PTR_ERR(gobj);
+ goto fail_unmap;
+ }
+
+ abo = to_xdna_obj(gobj);
+ abo->attach = attach;
+ abo->dma_buf = dma_buf;
+
+ return gobj;
+
+fail_unmap:
+ dma_buf_unmap_attachment_unlocked(attach, sgt, DMA_BIDIRECTIONAL);
+fail_detach:
+ dma_buf_detach(dma_buf, attach);
+put_buf:
+ dma_buf_put(dma_buf);
+
+ return ERR_PTR(ret);
+}
+
+static struct amdxdna_gem_obj *
+amdxdna_drm_alloc_shmem(struct drm_device *dev,
+ struct amdxdna_drm_create_bo *args,
+ struct drm_file *filp)
+{
+ struct amdxdna_client *client = filp->driver_priv;
+ struct amdxdna_gem_obj *abo;
+
+ abo = amdxdna_gem_create_object(dev, args);
+ if (IS_ERR(abo))
+ return ERR_CAST(abo);
+
+ abo->client = client;
+ abo->type = AMDXDNA_BO_SHMEM;
+
+ return abo;
+}
+
+static struct amdxdna_gem_obj *
+amdxdna_drm_create_dev_heap(struct drm_device *dev,
+ struct amdxdna_drm_create_bo *args,
+ struct drm_file *filp)
+{
+ struct amdxdna_client *client = filp->driver_priv;
+ struct iosys_map map = IOSYS_MAP_INIT_VADDR(NULL);
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ struct amdxdna_gem_obj *abo;
+ int ret;
+
+ if (args->size > xdna->dev_info->dev_mem_size) {
+ XDNA_DBG(xdna, "Invalid dev heap size 0x%llx, limit 0x%lx",
+ args->size, xdna->dev_info->dev_mem_size);
+ return ERR_PTR(-EINVAL);
+ }
+
+ mutex_lock(&client->mm_lock);
+ if (client->dev_heap) {
+ XDNA_DBG(client->xdna, "dev heap is already created");
+ ret = -EBUSY;
+ goto mm_unlock;
+ }
+
+ abo = amdxdna_gem_create_object(dev, args);
+ if (IS_ERR(abo)) {
+ ret = PTR_ERR(abo);
+ goto mm_unlock;
+ }
+
+ abo->type = AMDXDNA_BO_DEV_HEAP;
+ abo->client = client;
+ abo->mem.dev_addr = client->xdna->dev_info->dev_mem_base;
+ drm_mm_init(&abo->mm, abo->mem.dev_addr, abo->mem.size);
+
+ ret = drm_gem_vmap(to_gobj(abo), &map);
+ if (ret) {
+ XDNA_ERR(xdna, "Vmap heap bo failed, ret %d", ret);
+ goto release_obj;
+ }
+ abo->mem.kva = map.vaddr;
+
+ client->dev_heap = abo;
+ drm_gem_object_get(to_gobj(abo));
+ mutex_unlock(&client->mm_lock);
+
+ return abo;
+
+release_obj:
+ drm_gem_object_put(to_gobj(abo));
+mm_unlock:
+ mutex_unlock(&client->mm_lock);
+ return ERR_PTR(ret);
+}
+
+struct amdxdna_gem_obj *
+amdxdna_drm_alloc_dev_bo(struct drm_device *dev,
+ struct amdxdna_drm_create_bo *args,
+ struct drm_file *filp)
+{
+ struct amdxdna_client *client = filp->driver_priv;
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ size_t aligned_sz = PAGE_ALIGN(args->size);
+ struct amdxdna_gem_obj *abo;
+ int ret;
+
+ abo = amdxdna_gem_create_obj(&xdna->ddev, aligned_sz);
+ if (IS_ERR(abo))
+ return abo;
+
+ to_gobj(abo)->funcs = &amdxdna_gem_dev_obj_funcs;
+ abo->type = AMDXDNA_BO_DEV;
+ abo->client = client;
+
+ ret = amdxdna_gem_heap_alloc(abo);
+ if (ret) {
+ XDNA_ERR(xdna, "Failed to alloc dev bo memory, ret %d", ret);
+ amdxdna_gem_destroy_obj(abo);
+ return ERR_PTR(ret);
+ }
+
+ drm_gem_private_object_init(&xdna->ddev, to_gobj(abo), aligned_sz);
+
+ return abo;
+}
+
+static struct amdxdna_gem_obj *
+amdxdna_drm_create_cmd_bo(struct drm_device *dev,
+ struct amdxdna_drm_create_bo *args,
+ struct drm_file *filp)
+{
+ struct iosys_map map = IOSYS_MAP_INIT_VADDR(NULL);
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ struct amdxdna_gem_obj *abo;
+ int ret;
+
+ if (args->size > XDNA_MAX_CMD_BO_SIZE) {
+ XDNA_ERR(xdna, "Command bo size 0x%llx too large", args->size);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (args->size < sizeof(struct amdxdna_cmd)) {
+ XDNA_DBG(xdna, "Command BO size 0x%llx too small", args->size);
+ return ERR_PTR(-EINVAL);
+ }
+
+ abo = amdxdna_gem_create_object(dev, args);
+ if (IS_ERR(abo))
+ return ERR_CAST(abo);
+
+ abo->type = AMDXDNA_BO_CMD;
+ abo->client = filp->driver_priv;
+
+ ret = drm_gem_vmap(to_gobj(abo), &map);
+ if (ret) {
+ XDNA_ERR(xdna, "Vmap cmd bo failed, ret %d", ret);
+ goto release_obj;
+ }
+ abo->mem.kva = map.vaddr;
+
+ return abo;
+
+release_obj:
+ drm_gem_object_put(to_gobj(abo));
+ return ERR_PTR(ret);
+}
+
+int amdxdna_drm_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ struct amdxdna_drm_create_bo *args = data;
+ struct amdxdna_gem_obj *abo;
+ int ret;
+
+ if (args->flags)
+ return -EINVAL;
+
+ XDNA_DBG(xdna, "BO arg type %d vaddr 0x%llx size 0x%llx flags 0x%llx",
+ args->type, args->vaddr, args->size, args->flags);
+ switch (args->type) {
+ case AMDXDNA_BO_SHMEM:
+ abo = amdxdna_drm_alloc_shmem(dev, args, filp);
+ break;
+ case AMDXDNA_BO_DEV_HEAP:
+ abo = amdxdna_drm_create_dev_heap(dev, args, filp);
+ break;
+ case AMDXDNA_BO_DEV:
+ abo = amdxdna_drm_alloc_dev_bo(dev, args, filp);
+ break;
+ case AMDXDNA_BO_CMD:
+ abo = amdxdna_drm_create_cmd_bo(dev, args, filp);
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (IS_ERR(abo))
+ return PTR_ERR(abo);
+
+ /* ready to publish object to userspace */
+ ret = drm_gem_handle_create(filp, to_gobj(abo), &args->handle);
+ if (ret) {
+ XDNA_ERR(xdna, "Create handle failed");
+ goto put_obj;
+ }
+
+ XDNA_DBG(xdna, "BO hdl %d type %d userptr 0x%llx xdna_addr 0x%llx size 0x%lx",
+ args->handle, args->type, abo->mem.userptr,
+ abo->mem.dev_addr, abo->mem.size);
+put_obj:
+ /* Dereference object reference. Handle holds it now. */
+ drm_gem_object_put(to_gobj(abo));
+ return ret;
+}
+
+int amdxdna_gem_pin_nolock(struct amdxdna_gem_obj *abo)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
+ int ret;
+
+ if (abo->type == AMDXDNA_BO_DEV)
+ abo = abo->client->dev_heap;
+
+ if (is_import_bo(abo))
+ return 0;
+
+ ret = drm_gem_shmem_pin(&abo->base);
+
+ XDNA_DBG(xdna, "BO type %d ret %d", abo->type, ret);
+ return ret;
+}
+
+int amdxdna_gem_pin(struct amdxdna_gem_obj *abo)
+{
+ int ret;
+
+ mutex_lock(&abo->lock);
+ ret = amdxdna_gem_pin_nolock(abo);
+ mutex_unlock(&abo->lock);
+
+ return ret;
+}
+
+void amdxdna_gem_unpin(struct amdxdna_gem_obj *abo)
+{
+ if (abo->type == AMDXDNA_BO_DEV)
+ abo = abo->client->dev_heap;
+
+ if (is_import_bo(abo))
+ return;
+
+ mutex_lock(&abo->lock);
+ drm_gem_shmem_unpin(&abo->base);
+ mutex_unlock(&abo->lock);
+}
+
+struct amdxdna_gem_obj *amdxdna_gem_get_obj(struct amdxdna_client *client,
+ u32 bo_hdl, u8 bo_type)
+{
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_gem_obj *abo;
+ struct drm_gem_object *gobj;
+
+ gobj = drm_gem_object_lookup(client->filp, bo_hdl);
+ if (!gobj) {
+ XDNA_DBG(xdna, "Can not find bo %d", bo_hdl);
+ return NULL;
+ }
+
+ abo = to_xdna_obj(gobj);
+ if (bo_type == AMDXDNA_BO_INVALID || abo->type == bo_type)
+ return abo;
+
+ drm_gem_object_put(gobj);
+ return NULL;
+}
+
+int amdxdna_drm_get_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+{
+ struct amdxdna_drm_get_bo_info *args = data;
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ struct amdxdna_gem_obj *abo;
+ struct drm_gem_object *gobj;
+ int ret = 0;
+
+ if (args->ext || args->ext_flags || args->pad)
+ return -EINVAL;
+
+ gobj = drm_gem_object_lookup(filp, args->handle);
+ if (!gobj) {
+ XDNA_DBG(xdna, "Lookup GEM object %d failed", args->handle);
+ return -ENOENT;
+ }
+
+ abo = to_xdna_obj(gobj);
+ args->vaddr = abo->mem.userptr;
+ args->xdna_addr = abo->mem.dev_addr;
+
+ if (abo->type != AMDXDNA_BO_DEV)
+ args->map_offset = drm_vma_node_offset_addr(&gobj->vma_node);
+ else
+ args->map_offset = AMDXDNA_INVALID_ADDR;
+
+ XDNA_DBG(xdna, "BO hdl %d map_offset 0x%llx vaddr 0x%llx xdna_addr 0x%llx",
+ args->handle, args->map_offset, args->vaddr, args->xdna_addr);
+
+ drm_gem_object_put(gobj);
+ return ret;
+}
+
+/*
+ * The sync bo ioctl is to make sure the CPU cache is in sync with memory.
+ * This is required because NPU is not cache coherent device. CPU cache
+ * flushing/invalidation is expensive so it is best to handle this outside
+ * of the command submission path. This ioctl allows explicit cache
+ * flushing/invalidation outside of the critical path.
+ */
+int amdxdna_drm_sync_bo_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *filp)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ struct amdxdna_drm_sync_bo *args = data;
+ struct amdxdna_gem_obj *abo;
+ struct drm_gem_object *gobj;
+ int ret;
+
+ gobj = drm_gem_object_lookup(filp, args->handle);
+ if (!gobj) {
+ XDNA_ERR(xdna, "Lookup GEM object failed");
+ return -ENOENT;
+ }
+ abo = to_xdna_obj(gobj);
+
+ ret = amdxdna_gem_pin(abo);
+ if (ret) {
+ XDNA_ERR(xdna, "Pin BO %d failed, ret %d", args->handle, ret);
+ goto put_obj;
+ }
+
+ if (is_import_bo(abo))
+ drm_clflush_sg(abo->base.sgt);
+ else if (abo->mem.kva)
+ drm_clflush_virt_range(abo->mem.kva + args->offset, args->size);
+ else if (abo->base.pages)
+ drm_clflush_pages(abo->base.pages, gobj->size >> PAGE_SHIFT);
+ else
+ drm_WARN(&xdna->ddev, 1, "Can not get flush memory");
+
+ amdxdna_gem_unpin(abo);
+
+ XDNA_DBG(xdna, "Sync bo %d offset 0x%llx, size 0x%llx\n",
+ args->handle, args->offset, args->size);
+
+put_obj:
+ drm_gem_object_put(gobj);
+ return ret;
+}
diff --git a/drivers/accel/amdxdna/amdxdna_gem.h b/drivers/accel/amdxdna/amdxdna_gem.h
new file mode 100644
index 000000000000..ae29db94a9d3
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_gem.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2024, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _AMDXDNA_GEM_H_
+#define _AMDXDNA_GEM_H_
+
+#include <linux/hmm.h>
+
+struct amdxdna_umap {
+ struct vm_area_struct *vma;
+ struct mmu_interval_notifier notifier;
+ struct hmm_range range;
+ struct work_struct hmm_unreg_work;
+ struct amdxdna_gem_obj *abo;
+ struct list_head node;
+ struct kref refcnt;
+ bool invalid;
+ bool unmapped;
+};
+
+struct amdxdna_mem {
+ u64 userptr;
+ void *kva;
+ u64 dev_addr;
+ size_t size;
+ struct page **pages;
+ u32 nr_pages;
+ struct list_head umap_list;
+ bool map_invalid;
+};
+
+struct amdxdna_gem_obj {
+ struct drm_gem_shmem_object base;
+ struct amdxdna_client *client;
+ u8 type;
+ bool pinned;
+ struct mutex lock; /* Protects: pinned */
+ struct amdxdna_mem mem;
+
+ /* Below members is uninitialized when needed */
+ struct drm_mm mm; /* For AMDXDNA_BO_DEV_HEAP */
+ struct drm_mm_node mm_node; /* For AMDXDNA_BO_DEV */
+ u32 assigned_hwctx;
+ struct dma_buf *dma_buf;
+ struct dma_buf_attachment *attach;
+};
+
+#define to_gobj(obj) (&(obj)->base.base)
+#define is_import_bo(obj) ((obj)->attach)
+
+static inline struct amdxdna_gem_obj *to_xdna_obj(struct drm_gem_object *gobj)
+{
+ return container_of(gobj, struct amdxdna_gem_obj, base.base);
+}
+
+struct amdxdna_gem_obj *amdxdna_gem_get_obj(struct amdxdna_client *client,
+ u32 bo_hdl, u8 bo_type);
+static inline void amdxdna_gem_put_obj(struct amdxdna_gem_obj *abo)
+{
+ drm_gem_object_put(to_gobj(abo));
+}
+
+void amdxdna_umap_put(struct amdxdna_umap *mapp);
+
+struct drm_gem_object *
+amdxdna_gem_create_object_cb(struct drm_device *dev, size_t size);
+struct drm_gem_object *
+amdxdna_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf);
+struct amdxdna_gem_obj *
+amdxdna_drm_alloc_dev_bo(struct drm_device *dev,
+ struct amdxdna_drm_create_bo *args,
+ struct drm_file *filp);
+
+int amdxdna_gem_pin_nolock(struct amdxdna_gem_obj *abo);
+int amdxdna_gem_pin(struct amdxdna_gem_obj *abo);
+void amdxdna_gem_unpin(struct amdxdna_gem_obj *abo);
+
+int amdxdna_drm_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+int amdxdna_drm_get_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+int amdxdna_drm_sync_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+
+#endif /* _AMDXDNA_GEM_H_ */
diff --git a/drivers/accel/amdxdna/amdxdna_mailbox.c b/drivers/accel/amdxdna/amdxdna_mailbox.c
new file mode 100644
index 000000000000..da1ac89bb78f
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_mailbox.c
@@ -0,0 +1,571 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/drm_device.h>
+#include <drm/drm_managed.h>
+#include <linux/bitfield.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/slab.h>
+#include <linux/xarray.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/amdxdna.h>
+
+#include "amdxdna_mailbox.h"
+
+#define MB_ERR(chann, fmt, args...) \
+({ \
+ typeof(chann) _chann = chann; \
+ dev_err((_chann)->mb->dev, "xdna_mailbox.%d: "fmt, \
+ (_chann)->msix_irq, ##args); \
+})
+#define MB_DBG(chann, fmt, args...) \
+({ \
+ typeof(chann) _chann = chann; \
+ dev_dbg((_chann)->mb->dev, "xdna_mailbox.%d: "fmt, \
+ (_chann)->msix_irq, ##args); \
+})
+#define MB_WARN_ONCE(chann, fmt, args...) \
+({ \
+ typeof(chann) _chann = chann; \
+ dev_warn_once((_chann)->mb->dev, "xdna_mailbox.%d: "fmt, \
+ (_chann)->msix_irq, ##args); \
+})
+
+#define MAGIC_VAL 0x1D000000U
+#define MAGIC_VAL_MASK 0xFF000000
+#define MAX_MSG_ID_ENTRIES 256
+#define MSG_RX_TIMER 200 /* milliseconds */
+#define MAILBOX_NAME "xdna_mailbox"
+
+enum channel_res_type {
+ CHAN_RES_X2I,
+ CHAN_RES_I2X,
+ CHAN_RES_NUM
+};
+
+struct mailbox {
+ struct device *dev;
+ struct xdna_mailbox_res res;
+};
+
+struct mailbox_channel {
+ struct mailbox *mb;
+ struct xdna_mailbox_chann_res res[CHAN_RES_NUM];
+ int msix_irq;
+ u32 iohub_int_addr;
+ struct xarray chan_xa;
+ u32 next_msgid;
+ u32 x2i_tail;
+
+ /* Received msg related fields */
+ struct workqueue_struct *work_q;
+ struct work_struct rx_work;
+ u32 i2x_head;
+ bool bad_state;
+};
+
+#define MSG_BODY_SZ GENMASK(10, 0)
+#define MSG_PROTO_VER GENMASK(23, 16)
+struct xdna_msg_header {
+ __u32 total_size;
+ __u32 sz_ver;
+ __u32 id;
+ __u32 opcode;
+} __packed;
+
+static_assert(sizeof(struct xdna_msg_header) == 16);
+
+struct mailbox_pkg {
+ struct xdna_msg_header header;
+ __u32 payload[];
+};
+
+/* The protocol version. */
+#define MSG_PROTOCOL_VERSION 0x1
+/* The tombstone value. */
+#define TOMBSTONE 0xDEADFACE
+
+struct mailbox_msg {
+ void *handle;
+ int (*notify_cb)(void *handle, void __iomem *data, size_t size);
+ size_t pkg_size; /* package size in bytes */
+ struct mailbox_pkg pkg;
+};
+
+static void mailbox_reg_write(struct mailbox_channel *mb_chann, u32 mbox_reg, u32 data)
+{
+ struct xdna_mailbox_res *mb_res = &mb_chann->mb->res;
+ void __iomem *ringbuf_addr = mb_res->mbox_base + mbox_reg;
+
+ writel(data, ringbuf_addr);
+}
+
+static u32 mailbox_reg_read(struct mailbox_channel *mb_chann, u32 mbox_reg)
+{
+ struct xdna_mailbox_res *mb_res = &mb_chann->mb->res;
+ void __iomem *ringbuf_addr = mb_res->mbox_base + mbox_reg;
+
+ return readl(ringbuf_addr);
+}
+
+static int mailbox_reg_read_non_zero(struct mailbox_channel *mb_chann, u32 mbox_reg, u32 *val)
+{
+ struct xdna_mailbox_res *mb_res = &mb_chann->mb->res;
+ void __iomem *ringbuf_addr = mb_res->mbox_base + mbox_reg;
+ int ret, value;
+
+ /* Poll till value is not zero */
+ ret = readx_poll_timeout(readl, ringbuf_addr, value,
+ value, 1 /* us */, 100);
+ if (ret < 0)
+ return ret;
+
+ *val = value;
+ return 0;
+}
+
+static inline void
+mailbox_set_headptr(struct mailbox_channel *mb_chann, u32 headptr_val)
+{
+ mailbox_reg_write(mb_chann, mb_chann->res[CHAN_RES_I2X].mb_head_ptr_reg, headptr_val);
+ mb_chann->i2x_head = headptr_val;
+}
+
+static inline void
+mailbox_set_tailptr(struct mailbox_channel *mb_chann, u32 tailptr_val)
+{
+ mailbox_reg_write(mb_chann, mb_chann->res[CHAN_RES_X2I].mb_tail_ptr_reg, tailptr_val);
+ mb_chann->x2i_tail = tailptr_val;
+}
+
+static inline u32
+mailbox_get_headptr(struct mailbox_channel *mb_chann, enum channel_res_type type)
+{
+ return mailbox_reg_read(mb_chann, mb_chann->res[type].mb_head_ptr_reg);
+}
+
+static inline u32
+mailbox_get_tailptr(struct mailbox_channel *mb_chann, enum channel_res_type type)
+{
+ return mailbox_reg_read(mb_chann, mb_chann->res[type].mb_tail_ptr_reg);
+}
+
+static inline u32
+mailbox_get_ringbuf_size(struct mailbox_channel *mb_chann, enum channel_res_type type)
+{
+ return mb_chann->res[type].rb_size;
+}
+
+static inline int mailbox_validate_msgid(int msg_id)
+{
+ return (msg_id & MAGIC_VAL_MASK) == MAGIC_VAL;
+}
+
+static int mailbox_acquire_msgid(struct mailbox_channel *mb_chann, struct mailbox_msg *mb_msg)
+{
+ u32 msg_id;
+ int ret;
+
+ ret = xa_alloc_cyclic_irq(&mb_chann->chan_xa, &msg_id, mb_msg,
+ XA_LIMIT(0, MAX_MSG_ID_ENTRIES - 1),
+ &mb_chann->next_msgid, GFP_NOWAIT);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Add MAGIC_VAL to the higher bits.
+ */
+ msg_id |= MAGIC_VAL;
+ return msg_id;
+}
+
+static void mailbox_release_msgid(struct mailbox_channel *mb_chann, int msg_id)
+{
+ msg_id &= ~MAGIC_VAL_MASK;
+ xa_erase_irq(&mb_chann->chan_xa, msg_id);
+}
+
+static void mailbox_release_msg(struct mailbox_channel *mb_chann,
+ struct mailbox_msg *mb_msg)
+{
+ MB_DBG(mb_chann, "msg_id 0x%x msg opcode 0x%x",
+ mb_msg->pkg.header.id, mb_msg->pkg.header.opcode);
+ mb_msg->notify_cb(mb_msg->handle, NULL, 0);
+ kfree(mb_msg);
+}
+
+static int
+mailbox_send_msg(struct mailbox_channel *mb_chann, struct mailbox_msg *mb_msg)
+{
+ void __iomem *write_addr;
+ u32 ringbuf_size;
+ u32 head, tail;
+ u32 start_addr;
+ u32 tmp_tail;
+
+ head = mailbox_get_headptr(mb_chann, CHAN_RES_X2I);
+ tail = mb_chann->x2i_tail;
+ ringbuf_size = mailbox_get_ringbuf_size(mb_chann, CHAN_RES_X2I);
+ start_addr = mb_chann->res[CHAN_RES_X2I].rb_start_addr;
+ tmp_tail = tail + mb_msg->pkg_size;
+
+ if (tail < head && tmp_tail >= head)
+ goto no_space;
+
+ if (tail >= head && (tmp_tail > ringbuf_size - sizeof(u32) &&
+ mb_msg->pkg_size >= head))
+ goto no_space;
+
+ if (tail >= head && tmp_tail > ringbuf_size - sizeof(u32)) {
+ write_addr = mb_chann->mb->res.ringbuf_base + start_addr + tail;
+ writel(TOMBSTONE, write_addr);
+
+ /* tombstone is set. Write from the start of the ringbuf */
+ tail = 0;
+ }
+
+ write_addr = mb_chann->mb->res.ringbuf_base + start_addr + tail;
+ memcpy_toio(write_addr, &mb_msg->pkg, mb_msg->pkg_size);
+ mailbox_set_tailptr(mb_chann, tail + mb_msg->pkg_size);
+
+ trace_mbox_set_tail(MAILBOX_NAME, mb_chann->msix_irq,
+ mb_msg->pkg.header.opcode,
+ mb_msg->pkg.header.id);
+
+ return 0;
+
+no_space:
+ return -ENOSPC;
+}
+
+static int
+mailbox_get_resp(struct mailbox_channel *mb_chann, struct xdna_msg_header *header,
+ void __iomem *data)
+{
+ struct mailbox_msg *mb_msg;
+ int msg_id;
+ int ret;
+
+ msg_id = header->id;
+ if (!mailbox_validate_msgid(msg_id)) {
+ MB_ERR(mb_chann, "Bad message ID 0x%x", msg_id);
+ return -EINVAL;
+ }
+
+ msg_id &= ~MAGIC_VAL_MASK;
+ mb_msg = xa_erase_irq(&mb_chann->chan_xa, msg_id);
+ if (!mb_msg) {
+ MB_ERR(mb_chann, "Cannot find msg 0x%x", msg_id);
+ return -EINVAL;
+ }
+
+ MB_DBG(mb_chann, "opcode 0x%x size %d id 0x%x",
+ header->opcode, header->total_size, header->id);
+ ret = mb_msg->notify_cb(mb_msg->handle, data, header->total_size);
+ if (unlikely(ret))
+ MB_ERR(mb_chann, "Message callback ret %d", ret);
+
+ kfree(mb_msg);
+ return ret;
+}
+
+static int mailbox_get_msg(struct mailbox_channel *mb_chann)
+{
+ struct xdna_msg_header header;
+ void __iomem *read_addr;
+ u32 msg_size, rest;
+ u32 ringbuf_size;
+ u32 head, tail;
+ u32 start_addr;
+ int ret;
+
+ if (mailbox_reg_read_non_zero(mb_chann, mb_chann->res[CHAN_RES_I2X].mb_tail_ptr_reg, &tail))
+ return -EINVAL;
+ head = mb_chann->i2x_head;
+ ringbuf_size = mailbox_get_ringbuf_size(mb_chann, CHAN_RES_I2X);
+ start_addr = mb_chann->res[CHAN_RES_I2X].rb_start_addr;
+
+ if (unlikely(tail > ringbuf_size || !IS_ALIGNED(tail, 4))) {
+ MB_WARN_ONCE(mb_chann, "Invalid tail 0x%x", tail);
+ return -EINVAL;
+ }
+
+ /* ringbuf empty */
+ if (head == tail)
+ return -ENOENT;
+
+ if (head == ringbuf_size)
+ head = 0;
+
+ /* Peek size of the message or TOMBSTONE */
+ read_addr = mb_chann->mb->res.ringbuf_base + start_addr + head;
+ header.total_size = readl(read_addr);
+ /* size is TOMBSTONE, set next read from 0 */
+ if (header.total_size == TOMBSTONE) {
+ if (head < tail) {
+ MB_WARN_ONCE(mb_chann, "Tombstone, head 0x%x tail 0x%x",
+ head, tail);
+ return -EINVAL;
+ }
+ mailbox_set_headptr(mb_chann, 0);
+ return 0;
+ }
+
+ if (unlikely(!header.total_size || !IS_ALIGNED(header.total_size, 4))) {
+ MB_WARN_ONCE(mb_chann, "Invalid total size 0x%x", header.total_size);
+ return -EINVAL;
+ }
+ msg_size = sizeof(header) + header.total_size;
+
+ if (msg_size > ringbuf_size - head || msg_size > tail - head) {
+ MB_WARN_ONCE(mb_chann, "Invalid message size %d, tail %d, head %d",
+ msg_size, tail, head);
+ return -EINVAL;
+ }
+
+ rest = sizeof(header) - sizeof(u32);
+ read_addr += sizeof(u32);
+ memcpy_fromio((u32 *)&header + 1, read_addr, rest);
+ read_addr += rest;
+
+ ret = mailbox_get_resp(mb_chann, &header, read_addr);
+
+ mailbox_set_headptr(mb_chann, head + msg_size);
+ /* After update head, it can equal to ringbuf_size. This is expected. */
+ trace_mbox_set_head(MAILBOX_NAME, mb_chann->msix_irq,
+ header.opcode, header.id);
+
+ return ret;
+}
+
+static irqreturn_t mailbox_irq_handler(int irq, void *p)
+{
+ struct mailbox_channel *mb_chann = p;
+
+ trace_mbox_irq_handle(MAILBOX_NAME, irq);
+ /* Schedule a rx_work to call the callback functions */
+ queue_work(mb_chann->work_q, &mb_chann->rx_work);
+
+ return IRQ_HANDLED;
+}
+
+static void mailbox_rx_worker(struct work_struct *rx_work)
+{
+ struct mailbox_channel *mb_chann;
+ int ret;
+
+ mb_chann = container_of(rx_work, struct mailbox_channel, rx_work);
+
+ if (READ_ONCE(mb_chann->bad_state)) {
+ MB_ERR(mb_chann, "Channel in bad state, work aborted");
+ return;
+ }
+
+again:
+ mailbox_reg_write(mb_chann, mb_chann->iohub_int_addr, 0);
+
+ while (1) {
+ /*
+ * If return is 0, keep consuming next message, until there is
+ * no messages or an error happened.
+ */
+ ret = mailbox_get_msg(mb_chann);
+ if (ret == -ENOENT)
+ break;
+
+ /* Other error means device doesn't look good, disable irq. */
+ if (unlikely(ret)) {
+ MB_ERR(mb_chann, "Unexpected ret %d, disable irq", ret);
+ WRITE_ONCE(mb_chann->bad_state, true);
+ return;
+ }
+ }
+
+ /*
+ * The hardware will not generate interrupt if firmware creates a new
+ * response right after driver clears interrupt register. Check
+ * the interrupt register to make sure there is not any new response
+ * before exiting.
+ */
+ if (mailbox_reg_read(mb_chann, mb_chann->iohub_int_addr))
+ goto again;
+}
+
+int xdna_mailbox_send_msg(struct mailbox_channel *mb_chann,
+ const struct xdna_mailbox_msg *msg, u64 tx_timeout)
+{
+ struct xdna_msg_header *header;
+ struct mailbox_msg *mb_msg;
+ size_t pkg_size;
+ int ret;
+
+ pkg_size = sizeof(*header) + msg->send_size;
+ if (pkg_size > mailbox_get_ringbuf_size(mb_chann, CHAN_RES_X2I)) {
+ MB_ERR(mb_chann, "Message size larger than ringbuf size");
+ return -EINVAL;
+ }
+
+ if (unlikely(!IS_ALIGNED(msg->send_size, 4))) {
+ MB_ERR(mb_chann, "Message must be 4 bytes align");
+ return -EINVAL;
+ }
+
+ /* The fist word in payload can NOT be TOMBSTONE */
+ if (unlikely(((u32 *)msg->send_data)[0] == TOMBSTONE)) {
+ MB_ERR(mb_chann, "Tomb stone in data");
+ return -EINVAL;
+ }
+
+ if (READ_ONCE(mb_chann->bad_state)) {
+ MB_ERR(mb_chann, "Channel in bad state");
+ return -EPIPE;
+ }
+
+ mb_msg = kzalloc(sizeof(*mb_msg) + pkg_size, GFP_KERNEL);
+ if (!mb_msg)
+ return -ENOMEM;
+
+ mb_msg->handle = msg->handle;
+ mb_msg->notify_cb = msg->notify_cb;
+ mb_msg->pkg_size = pkg_size;
+
+ header = &mb_msg->pkg.header;
+ /*
+ * Hardware use total_size and size to split huge message.
+ * We do not support it here. Thus the values are the same.
+ */
+ header->total_size = msg->send_size;
+ header->sz_ver = FIELD_PREP(MSG_BODY_SZ, msg->send_size) |
+ FIELD_PREP(MSG_PROTO_VER, MSG_PROTOCOL_VERSION);
+ header->opcode = msg->opcode;
+ memcpy(mb_msg->pkg.payload, msg->send_data, msg->send_size);
+
+ ret = mailbox_acquire_msgid(mb_chann, mb_msg);
+ if (unlikely(ret < 0)) {
+ MB_ERR(mb_chann, "mailbox_acquire_msgid failed");
+ goto msg_id_failed;
+ }
+ header->id = ret;
+
+ MB_DBG(mb_chann, "opcode 0x%x size %d id 0x%x",
+ header->opcode, header->total_size, header->id);
+
+ ret = mailbox_send_msg(mb_chann, mb_msg);
+ if (ret) {
+ MB_DBG(mb_chann, "Error in mailbox send msg, ret %d", ret);
+ goto release_id;
+ }
+
+ return 0;
+
+release_id:
+ mailbox_release_msgid(mb_chann, header->id);
+msg_id_failed:
+ kfree(mb_msg);
+ return ret;
+}
+
+struct mailbox_channel *
+xdna_mailbox_create_channel(struct mailbox *mb,
+ const struct xdna_mailbox_chann_res *x2i,
+ const struct xdna_mailbox_chann_res *i2x,
+ u32 iohub_int_addr,
+ int mb_irq)
+{
+ struct mailbox_channel *mb_chann;
+ int ret;
+
+ if (!is_power_of_2(x2i->rb_size) || !is_power_of_2(i2x->rb_size)) {
+ pr_err("Ring buf size must be power of 2");
+ return NULL;
+ }
+
+ mb_chann = kzalloc(sizeof(*mb_chann), GFP_KERNEL);
+ if (!mb_chann)
+ return NULL;
+
+ mb_chann->mb = mb;
+ mb_chann->msix_irq = mb_irq;
+ mb_chann->iohub_int_addr = iohub_int_addr;
+ memcpy(&mb_chann->res[CHAN_RES_X2I], x2i, sizeof(*x2i));
+ memcpy(&mb_chann->res[CHAN_RES_I2X], i2x, sizeof(*i2x));
+
+ xa_init_flags(&mb_chann->chan_xa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
+ mb_chann->x2i_tail = mailbox_get_tailptr(mb_chann, CHAN_RES_X2I);
+ mb_chann->i2x_head = mailbox_get_headptr(mb_chann, CHAN_RES_I2X);
+
+ INIT_WORK(&mb_chann->rx_work, mailbox_rx_worker);
+ mb_chann->work_q = create_singlethread_workqueue(MAILBOX_NAME);
+ if (!mb_chann->work_q) {
+ MB_ERR(mb_chann, "Create workqueue failed");
+ goto free_and_out;
+ }
+
+ /* Everything look good. Time to enable irq handler */
+ ret = request_irq(mb_irq, mailbox_irq_handler, 0, MAILBOX_NAME, mb_chann);
+ if (ret) {
+ MB_ERR(mb_chann, "Failed to request irq %d ret %d", mb_irq, ret);
+ goto destroy_wq;
+ }
+
+ mb_chann->bad_state = false;
+
+ MB_DBG(mb_chann, "Mailbox channel created (irq: %d)", mb_chann->msix_irq);
+ return mb_chann;
+
+destroy_wq:
+ destroy_workqueue(mb_chann->work_q);
+free_and_out:
+ kfree(mb_chann);
+ return NULL;
+}
+
+int xdna_mailbox_destroy_channel(struct mailbox_channel *mb_chann)
+{
+ struct mailbox_msg *mb_msg;
+ unsigned long msg_id;
+
+ MB_DBG(mb_chann, "IRQ disabled and RX work cancelled");
+ free_irq(mb_chann->msix_irq, mb_chann);
+ destroy_workqueue(mb_chann->work_q);
+ /* We can clean up and release resources */
+
+ xa_for_each(&mb_chann->chan_xa, msg_id, mb_msg)
+ mailbox_release_msg(mb_chann, mb_msg);
+
+ xa_destroy(&mb_chann->chan_xa);
+
+ MB_DBG(mb_chann, "Mailbox channel destroyed, irq: %d", mb_chann->msix_irq);
+ kfree(mb_chann);
+ return 0;
+}
+
+void xdna_mailbox_stop_channel(struct mailbox_channel *mb_chann)
+{
+ /* Disable an irq and wait. This might sleep. */
+ disable_irq(mb_chann->msix_irq);
+
+ /* Cancel RX work and wait for it to finish */
+ cancel_work_sync(&mb_chann->rx_work);
+ MB_DBG(mb_chann, "IRQ disabled and RX work cancelled");
+}
+
+struct mailbox *xdnam_mailbox_create(struct drm_device *ddev,
+ const struct xdna_mailbox_res *res)
+{
+ struct mailbox *mb;
+
+ mb = drmm_kzalloc(ddev, sizeof(*mb), GFP_KERNEL);
+ if (!mb)
+ return NULL;
+ mb->dev = ddev->dev;
+
+ /* mailbox and ring buf base and size information */
+ memcpy(&mb->res, res, sizeof(*res));
+
+ return mb;
+}
diff --git a/drivers/accel/amdxdna/amdxdna_mailbox.h b/drivers/accel/amdxdna/amdxdna_mailbox.h
new file mode 100644
index 000000000000..ea367f2fb738
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_mailbox.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _AIE2_MAILBOX_H_
+#define _AIE2_MAILBOX_H_
+
+struct mailbox;
+struct mailbox_channel;
+
+/*
+ * xdna_mailbox_msg - message struct
+ *
+ * @opcode: opcode for firmware
+ * @handle: handle used for the notify callback
+ * @notify_cb: callback function to notify the sender when there is response
+ * @send_data: pointing to sending data
+ * @send_size: size of the sending data
+ *
+ * The mailbox will split the sending data in to multiple firmware message if
+ * the size of the data is too big. This is transparent to the sender. The
+ * sender will receive one notification.
+ */
+struct xdna_mailbox_msg {
+ u32 opcode;
+ void *handle;
+ int (*notify_cb)(void *handle, void __iomem *data, size_t size);
+ u8 *send_data;
+ size_t send_size;
+};
+
+/*
+ * xdna_mailbox_res - mailbox hardware resource
+ *
+ * @ringbuf_base: ring buffer base address
+ * @ringbuf_size: ring buffer size
+ * @mbox_base: mailbox base address
+ * @mbox_size: mailbox size
+ */
+struct xdna_mailbox_res {
+ void __iomem *ringbuf_base;
+ size_t ringbuf_size;
+ void __iomem *mbox_base;
+ size_t mbox_size;
+ const char *name;
+};
+
+/*
+ * xdna_mailbox_chann_res - resources
+ *
+ * @rb_start_addr: ring buffer start address
+ * @rb_size: ring buffer size
+ * @mb_head_ptr_reg: mailbox head pointer register
+ * @mb_tail_ptr_reg: mailbox tail pointer register
+ */
+struct xdna_mailbox_chann_res {
+ u32 rb_start_addr;
+ u32 rb_size;
+ u32 mb_head_ptr_reg;
+ u32 mb_tail_ptr_reg;
+};
+
+/*
+ * xdna_mailbox_create() -- create mailbox subsystem and initialize
+ *
+ * @ddev: device pointer
+ * @res: SRAM and mailbox resources
+ *
+ * Return: If success, return a handle of mailbox subsystem.
+ * Otherwise, return NULL pointer.
+ */
+struct mailbox *xdnam_mailbox_create(struct drm_device *ddev,
+ const struct xdna_mailbox_res *res);
+
+/*
+ * xdna_mailbox_create_channel() -- Create a mailbox channel instance
+ *
+ * @mailbox: the handle return from xdna_mailbox_create()
+ * @x2i: host to firmware mailbox resources
+ * @i2x: firmware to host mailbox resources
+ * @xdna_mailbox_intr_reg: register addr of MSI-X interrupt
+ * @mb_irq: Linux IRQ number associated with mailbox MSI-X interrupt vector index
+ *
+ * Return: If success, return a handle of mailbox channel. Otherwise, return NULL.
+ */
+struct mailbox_channel *
+xdna_mailbox_create_channel(struct mailbox *mailbox,
+ const struct xdna_mailbox_chann_res *x2i,
+ const struct xdna_mailbox_chann_res *i2x,
+ u32 xdna_mailbox_intr_reg,
+ int mb_irq);
+
+/*
+ * xdna_mailbox_destroy_channel() -- destroy mailbox channel
+ *
+ * @mailbox_chann: the handle return from xdna_mailbox_create_channel()
+ *
+ * Return: if success, return 0. otherwise return error code
+ */
+int xdna_mailbox_destroy_channel(struct mailbox_channel *mailbox_chann);
+
+/*
+ * xdna_mailbox_stop_channel() -- stop mailbox channel
+ *
+ * @mailbox_chann: the handle return from xdna_mailbox_create_channel()
+ *
+ * Return: if success, return 0. otherwise return error code
+ */
+void xdna_mailbox_stop_channel(struct mailbox_channel *mailbox_chann);
+
+/*
+ * xdna_mailbox_send_msg() -- Send a message
+ *
+ * @mailbox_chann: Mailbox channel handle
+ * @msg: message struct for message information
+ * @tx_timeout: the timeout value for sending the message in ms.
+ *
+ * Return: If success return 0, otherwise, return error code
+ */
+int xdna_mailbox_send_msg(struct mailbox_channel *mailbox_chann,
+ const struct xdna_mailbox_msg *msg, u64 tx_timeout);
+
+#endif /* _AIE2_MAILBOX_ */
diff --git a/drivers/accel/amdxdna/amdxdna_mailbox_helper.c b/drivers/accel/amdxdna/amdxdna_mailbox_helper.c
new file mode 100644
index 000000000000..6d0c24513476
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_mailbox_helper.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_device.h>
+#include <drm/drm_print.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/completion.h>
+
+#include "amdxdna_gem.h"
+#include "amdxdna_mailbox.h"
+#include "amdxdna_mailbox_helper.h"
+#include "amdxdna_pci_drv.h"
+
+int xdna_msg_cb(void *handle, void __iomem *data, size_t size)
+{
+ struct xdna_notify *cb_arg = handle;
+ int ret;
+
+ if (unlikely(!data))
+ goto out;
+
+ if (unlikely(cb_arg->size != size)) {
+ cb_arg->error = -EINVAL;
+ goto out;
+ }
+
+ memcpy_fromio(cb_arg->data, data, cb_arg->size);
+ print_hex_dump_debug("resp data: ", DUMP_PREFIX_OFFSET,
+ 16, 4, cb_arg->data, cb_arg->size, true);
+out:
+ ret = cb_arg->error;
+ complete(&cb_arg->comp);
+ return ret;
+}
+
+int xdna_send_msg_wait(struct amdxdna_dev *xdna, struct mailbox_channel *chann,
+ struct xdna_mailbox_msg *msg)
+{
+ struct xdna_notify *hdl = msg->handle;
+ int ret;
+
+ ret = xdna_mailbox_send_msg(chann, msg, TX_TIMEOUT);
+ if (ret) {
+ XDNA_ERR(xdna, "Send message failed, ret %d", ret);
+ return ret;
+ }
+
+ ret = wait_for_completion_timeout(&hdl->comp,
+ msecs_to_jiffies(RX_TIMEOUT));
+ if (!ret) {
+ XDNA_ERR(xdna, "Wait for completion timeout");
+ return -ETIME;
+ }
+
+ return hdl->error;
+}
diff --git a/drivers/accel/amdxdna/amdxdna_mailbox_helper.h b/drivers/accel/amdxdna/amdxdna_mailbox_helper.h
new file mode 100644
index 000000000000..710ff8873d61
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_mailbox_helper.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _AMDXDNA_MAILBOX_HELPER_H
+#define _AMDXDNA_MAILBOX_HELPER_H
+
+#define TX_TIMEOUT 2000 /* milliseconds */
+#define RX_TIMEOUT 5000 /* milliseconds */
+
+struct amdxdna_dev;
+
+struct xdna_notify {
+ struct completion comp;
+ u32 *data;
+ size_t size;
+ int error;
+};
+
+#define DECLARE_XDNA_MSG_COMMON(name, op, status) \
+ struct name##_req req = { 0 }; \
+ struct name##_resp resp = { status }; \
+ struct xdna_notify hdl = { \
+ .error = 0, \
+ .data = (u32 *)&resp, \
+ .size = sizeof(resp), \
+ .comp = COMPLETION_INITIALIZER_ONSTACK(hdl.comp), \
+ }; \
+ struct xdna_mailbox_msg msg = { \
+ .send_data = (u8 *)&req, \
+ .send_size = sizeof(req), \
+ .handle = &hdl, \
+ .opcode = op, \
+ .notify_cb = xdna_msg_cb, \
+ }
+
+int xdna_msg_cb(void *handle, void __iomem *data, size_t size);
+int xdna_send_msg_wait(struct amdxdna_dev *xdna, struct mailbox_channel *chann,
+ struct xdna_mailbox_msg *msg);
+
+#endif /* _AMDXDNA_MAILBOX_HELPER_H */
diff --git a/drivers/accel/amdxdna/amdxdna_pci_drv.c b/drivers/accel/amdxdna/amdxdna_pci_drv.c
new file mode 100644
index 000000000000..569cd703729d
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_pci_drv.c
@@ -0,0 +1,405 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_accel.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_managed.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/iommu.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+
+#include "amdxdna_ctx.h"
+#include "amdxdna_gem.h"
+#include "amdxdna_pci_drv.h"
+
+#define AMDXDNA_AUTOSUSPEND_DELAY 5000 /* milliseconds */
+
+MODULE_FIRMWARE("amdnpu/1502_00/npu.sbin");
+MODULE_FIRMWARE("amdnpu/17f0_10/npu.sbin");
+MODULE_FIRMWARE("amdnpu/17f0_11/npu.sbin");
+MODULE_FIRMWARE("amdnpu/17f0_20/npu.sbin");
+
+/*
+ * 0.0: Initial version
+ * 0.1: Support getting all hardware contexts by DRM_IOCTL_AMDXDNA_GET_ARRAY
+ */
+#define AMDXDNA_DRIVER_MAJOR 0
+#define AMDXDNA_DRIVER_MINOR 1
+
+/*
+ * Bind the driver base on (vendor_id, device_id) pair and later use the
+ * (device_id, rev_id) pair as a key to select the devices. The devices with
+ * same device_id have very similar interface to host driver.
+ */
+static const struct pci_device_id pci_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1502) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x17f0) },
+ {0}
+};
+
+MODULE_DEVICE_TABLE(pci, pci_ids);
+
+static const struct amdxdna_device_id amdxdna_ids[] = {
+ { 0x1502, 0x0, &dev_npu1_info },
+ { 0x17f0, 0x0, &dev_npu2_info },
+ { 0x17f0, 0x10, &dev_npu4_info },
+ { 0x17f0, 0x11, &dev_npu5_info },
+ { 0x17f0, 0x20, &dev_npu6_info },
+ {0}
+};
+
+static int amdxdna_drm_open(struct drm_device *ddev, struct drm_file *filp)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(ddev);
+ struct amdxdna_client *client;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(ddev->dev);
+ if (ret) {
+ XDNA_ERR(xdna, "Failed to get rpm, ret %d", ret);
+ return ret;
+ }
+
+ client = kzalloc(sizeof(*client), GFP_KERNEL);
+ if (!client) {
+ ret = -ENOMEM;
+ goto put_rpm;
+ }
+
+ client->pid = pid_nr(rcu_access_pointer(filp->pid));
+ client->xdna = xdna;
+
+ client->sva = iommu_sva_bind_device(xdna->ddev.dev, current->mm);
+ if (IS_ERR(client->sva)) {
+ ret = PTR_ERR(client->sva);
+ XDNA_ERR(xdna, "SVA bind device failed, ret %d", ret);
+ goto failed;
+ }
+ client->pasid = iommu_sva_get_pasid(client->sva);
+ if (client->pasid == IOMMU_PASID_INVALID) {
+ XDNA_ERR(xdna, "SVA get pasid failed");
+ ret = -ENODEV;
+ goto unbind_sva;
+ }
+ init_srcu_struct(&client->hwctx_srcu);
+ xa_init_flags(&client->hwctx_xa, XA_FLAGS_ALLOC);
+ mutex_init(&client->mm_lock);
+
+ mutex_lock(&xdna->dev_lock);
+ list_add_tail(&client->node, &xdna->client_list);
+ mutex_unlock(&xdna->dev_lock);
+
+ filp->driver_priv = client;
+ client->filp = filp;
+
+ XDNA_DBG(xdna, "pid %d opened", client->pid);
+ return 0;
+
+unbind_sva:
+ iommu_sva_unbind_device(client->sva);
+failed:
+ kfree(client);
+put_rpm:
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
+ return ret;
+}
+
+static void amdxdna_drm_close(struct drm_device *ddev, struct drm_file *filp)
+{
+ struct amdxdna_client *client = filp->driver_priv;
+ struct amdxdna_dev *xdna = to_xdna_dev(ddev);
+
+ XDNA_DBG(xdna, "closing pid %d", client->pid);
+
+ xa_destroy(&client->hwctx_xa);
+ cleanup_srcu_struct(&client->hwctx_srcu);
+ mutex_destroy(&client->mm_lock);
+ if (client->dev_heap)
+ drm_gem_object_put(to_gobj(client->dev_heap));
+
+ iommu_sva_unbind_device(client->sva);
+
+ XDNA_DBG(xdna, "pid %d closed", client->pid);
+ kfree(client);
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+}
+
+static int amdxdna_flush(struct file *f, fl_owner_t id)
+{
+ struct drm_file *filp = f->private_data;
+ struct amdxdna_client *client = filp->driver_priv;
+ struct amdxdna_dev *xdna = client->xdna;
+ int idx;
+
+ XDNA_DBG(xdna, "PID %d flushing...", client->pid);
+ if (!drm_dev_enter(&xdna->ddev, &idx))
+ return 0;
+
+ mutex_lock(&xdna->dev_lock);
+ list_del_init(&client->node);
+ amdxdna_hwctx_remove_all(client);
+ mutex_unlock(&xdna->dev_lock);
+
+ drm_dev_exit(idx);
+ return 0;
+}
+
+static int amdxdna_drm_get_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+{
+ struct amdxdna_client *client = filp->driver_priv;
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ struct amdxdna_drm_get_info *args = data;
+ int ret;
+
+ if (!xdna->dev_info->ops->get_aie_info)
+ return -EOPNOTSUPP;
+
+ XDNA_DBG(xdna, "Request parameter %u", args->param);
+ mutex_lock(&xdna->dev_lock);
+ ret = xdna->dev_info->ops->get_aie_info(client, args);
+ mutex_unlock(&xdna->dev_lock);
+ return ret;
+}
+
+static int amdxdna_drm_get_array_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct amdxdna_client *client = filp->driver_priv;
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ struct amdxdna_drm_get_array *args = data;
+
+ if (!xdna->dev_info->ops->get_array)
+ return -EOPNOTSUPP;
+
+ if (args->pad || !args->num_element || !args->element_size)
+ return -EINVAL;
+
+ guard(mutex)(&xdna->dev_lock);
+ return xdna->dev_info->ops->get_array(client, args);
+}
+
+static int amdxdna_drm_set_state_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+{
+ struct amdxdna_client *client = filp->driver_priv;
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ struct amdxdna_drm_set_state *args = data;
+ int ret;
+
+ if (!xdna->dev_info->ops->set_aie_state)
+ return -EOPNOTSUPP;
+
+ XDNA_DBG(xdna, "Request parameter %u", args->param);
+ mutex_lock(&xdna->dev_lock);
+ ret = xdna->dev_info->ops->set_aie_state(client, args);
+ mutex_unlock(&xdna->dev_lock);
+
+ return ret;
+}
+
+static const struct drm_ioctl_desc amdxdna_drm_ioctls[] = {
+ /* Context */
+ DRM_IOCTL_DEF_DRV(AMDXDNA_CREATE_HWCTX, amdxdna_drm_create_hwctx_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(AMDXDNA_DESTROY_HWCTX, amdxdna_drm_destroy_hwctx_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(AMDXDNA_CONFIG_HWCTX, amdxdna_drm_config_hwctx_ioctl, 0),
+ /* BO */
+ DRM_IOCTL_DEF_DRV(AMDXDNA_CREATE_BO, amdxdna_drm_create_bo_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(AMDXDNA_GET_BO_INFO, amdxdna_drm_get_bo_info_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(AMDXDNA_SYNC_BO, amdxdna_drm_sync_bo_ioctl, 0),
+ /* Execution */
+ DRM_IOCTL_DEF_DRV(AMDXDNA_EXEC_CMD, amdxdna_drm_submit_cmd_ioctl, 0),
+ /* AIE hardware */
+ DRM_IOCTL_DEF_DRV(AMDXDNA_GET_INFO, amdxdna_drm_get_info_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(AMDXDNA_GET_ARRAY, amdxdna_drm_get_array_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(AMDXDNA_SET_STATE, amdxdna_drm_set_state_ioctl, DRM_ROOT_ONLY),
+};
+
+static const struct file_operations amdxdna_fops = {
+ .owner = THIS_MODULE,
+ .open = accel_open,
+ .release = drm_release,
+ .flush = amdxdna_flush,
+ .unlocked_ioctl = drm_ioctl,
+ .compat_ioctl = drm_compat_ioctl,
+ .poll = drm_poll,
+ .read = drm_read,
+ .llseek = noop_llseek,
+ .mmap = drm_gem_mmap,
+ .fop_flags = FOP_UNSIGNED_OFFSET,
+};
+
+const struct drm_driver amdxdna_drm_drv = {
+ .driver_features = DRIVER_GEM | DRIVER_COMPUTE_ACCEL |
+ DRIVER_SYNCOBJ | DRIVER_SYNCOBJ_TIMELINE,
+ .fops = &amdxdna_fops,
+ .name = "amdxdna_accel_driver",
+ .desc = "AMD XDNA DRM implementation",
+ .major = AMDXDNA_DRIVER_MAJOR,
+ .minor = AMDXDNA_DRIVER_MINOR,
+ .open = amdxdna_drm_open,
+ .postclose = amdxdna_drm_close,
+ .ioctls = amdxdna_drm_ioctls,
+ .num_ioctls = ARRAY_SIZE(amdxdna_drm_ioctls),
+
+ .gem_create_object = amdxdna_gem_create_object_cb,
+ .gem_prime_import = amdxdna_gem_prime_import,
+};
+
+static const struct amdxdna_dev_info *
+amdxdna_get_dev_info(struct pci_dev *pdev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(amdxdna_ids); i++) {
+ if (pdev->device == amdxdna_ids[i].device &&
+ pdev->revision == amdxdna_ids[i].revision)
+ return amdxdna_ids[i].dev_info;
+ }
+ return NULL;
+}
+
+static int amdxdna_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ struct amdxdna_dev *xdna;
+ int ret;
+
+ xdna = devm_drm_dev_alloc(dev, &amdxdna_drm_drv, typeof(*xdna), ddev);
+ if (IS_ERR(xdna))
+ return PTR_ERR(xdna);
+
+ xdna->dev_info = amdxdna_get_dev_info(pdev);
+ if (!xdna->dev_info)
+ return -ENODEV;
+
+ drmm_mutex_init(&xdna->ddev, &xdna->dev_lock);
+ init_rwsem(&xdna->notifier_lock);
+ INIT_LIST_HEAD(&xdna->client_list);
+ pci_set_drvdata(pdev, xdna);
+
+ if (IS_ENABLED(CONFIG_LOCKDEP)) {
+ fs_reclaim_acquire(GFP_KERNEL);
+ might_lock(&xdna->notifier_lock);
+ fs_reclaim_release(GFP_KERNEL);
+ }
+
+ xdna->notifier_wq = alloc_ordered_workqueue("notifier_wq", 0);
+ if (!xdna->notifier_wq)
+ return -ENOMEM;
+
+ mutex_lock(&xdna->dev_lock);
+ ret = xdna->dev_info->ops->init(xdna);
+ mutex_unlock(&xdna->dev_lock);
+ if (ret) {
+ XDNA_ERR(xdna, "Hardware init failed, ret %d", ret);
+ goto destroy_notifier_wq;
+ }
+
+ ret = amdxdna_sysfs_init(xdna);
+ if (ret) {
+ XDNA_ERR(xdna, "Create amdxdna attrs failed: %d", ret);
+ goto failed_dev_fini;
+ }
+
+ pm_runtime_set_autosuspend_delay(dev, AMDXDNA_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_allow(dev);
+
+ ret = drm_dev_register(&xdna->ddev, 0);
+ if (ret) {
+ XDNA_ERR(xdna, "DRM register failed, ret %d", ret);
+ pm_runtime_forbid(dev);
+ goto failed_sysfs_fini;
+ }
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+ return 0;
+
+failed_sysfs_fini:
+ amdxdna_sysfs_fini(xdna);
+failed_dev_fini:
+ mutex_lock(&xdna->dev_lock);
+ xdna->dev_info->ops->fini(xdna);
+ mutex_unlock(&xdna->dev_lock);
+destroy_notifier_wq:
+ destroy_workqueue(xdna->notifier_wq);
+ return ret;
+}
+
+static void amdxdna_remove(struct pci_dev *pdev)
+{
+ struct amdxdna_dev *xdna = pci_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ struct amdxdna_client *client;
+
+ destroy_workqueue(xdna->notifier_wq);
+
+ pm_runtime_get_noresume(dev);
+ pm_runtime_forbid(dev);
+
+ drm_dev_unplug(&xdna->ddev);
+ amdxdna_sysfs_fini(xdna);
+
+ mutex_lock(&xdna->dev_lock);
+ client = list_first_entry_or_null(&xdna->client_list,
+ struct amdxdna_client, node);
+ while (client) {
+ list_del_init(&client->node);
+ amdxdna_hwctx_remove_all(client);
+
+ client = list_first_entry_or_null(&xdna->client_list,
+ struct amdxdna_client, node);
+ }
+
+ xdna->dev_info->ops->fini(xdna);
+ mutex_unlock(&xdna->dev_lock);
+}
+
+static int amdxdna_pmops_suspend(struct device *dev)
+{
+ struct amdxdna_dev *xdna = pci_get_drvdata(to_pci_dev(dev));
+
+ if (!xdna->dev_info->ops->suspend)
+ return -EOPNOTSUPP;
+
+ return xdna->dev_info->ops->suspend(xdna);
+}
+
+static int amdxdna_pmops_resume(struct device *dev)
+{
+ struct amdxdna_dev *xdna = pci_get_drvdata(to_pci_dev(dev));
+
+ if (!xdna->dev_info->ops->resume)
+ return -EOPNOTSUPP;
+
+ return xdna->dev_info->ops->resume(xdna);
+}
+
+static const struct dev_pm_ops amdxdna_pm_ops = {
+ SYSTEM_SLEEP_PM_OPS(amdxdna_pmops_suspend, amdxdna_pmops_resume)
+ RUNTIME_PM_OPS(amdxdna_pmops_suspend, amdxdna_pmops_resume, NULL)
+};
+
+static struct pci_driver amdxdna_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = pci_ids,
+ .probe = amdxdna_probe,
+ .remove = amdxdna_remove,
+ .driver.pm = &amdxdna_pm_ops,
+};
+
+module_pci_driver(amdxdna_pci_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("XRT Team <runtimeca39d@amd.com>");
+MODULE_DESCRIPTION("amdxdna driver");
diff --git a/drivers/accel/amdxdna/amdxdna_pci_drv.h b/drivers/accel/amdxdna/amdxdna_pci_drv.h
new file mode 100644
index 000000000000..72d6696d49da
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_pci_drv.h
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _AMDXDNA_PCI_DRV_H_
+#define _AMDXDNA_PCI_DRV_H_
+
+#include <linux/workqueue.h>
+#include <linux/xarray.h>
+
+#define XDNA_INFO(xdna, fmt, args...) drm_info(&(xdna)->ddev, fmt, ##args)
+#define XDNA_WARN(xdna, fmt, args...) drm_warn(&(xdna)->ddev, "%s: "fmt, __func__, ##args)
+#define XDNA_ERR(xdna, fmt, args...) drm_err(&(xdna)->ddev, "%s: "fmt, __func__, ##args)
+#define XDNA_DBG(xdna, fmt, args...) drm_dbg(&(xdna)->ddev, fmt, ##args)
+#define XDNA_INFO_ONCE(xdna, fmt, args...) drm_info_once(&(xdna)->ddev, fmt, ##args)
+
+#define XDNA_MBZ_DBG(xdna, ptr, sz) \
+ ({ \
+ int __i; \
+ int __ret = 0; \
+ u8 *__ptr = (u8 *)(ptr); \
+ for (__i = 0; __i < (sz); __i++) { \
+ if (__ptr[__i]) { \
+ XDNA_DBG(xdna, "MBZ check failed"); \
+ __ret = -EINVAL; \
+ break; \
+ } \
+ } \
+ __ret; \
+ })
+
+#define to_xdna_dev(drm_dev) \
+ ((struct amdxdna_dev *)container_of(drm_dev, struct amdxdna_dev, ddev))
+
+extern const struct drm_driver amdxdna_drm_drv;
+
+struct amdxdna_client;
+struct amdxdna_dev;
+struct amdxdna_drm_get_info;
+struct amdxdna_drm_set_state;
+struct amdxdna_gem_obj;
+struct amdxdna_hwctx;
+struct amdxdna_sched_job;
+
+/*
+ * struct amdxdna_dev_ops - Device hardware operation callbacks
+ */
+struct amdxdna_dev_ops {
+ int (*init)(struct amdxdna_dev *xdna);
+ void (*fini)(struct amdxdna_dev *xdna);
+ int (*resume)(struct amdxdna_dev *xdna);
+ int (*suspend)(struct amdxdna_dev *xdna);
+ int (*hwctx_init)(struct amdxdna_hwctx *hwctx);
+ void (*hwctx_fini)(struct amdxdna_hwctx *hwctx);
+ int (*hwctx_config)(struct amdxdna_hwctx *hwctx, u32 type, u64 value, void *buf, u32 size);
+ void (*hmm_invalidate)(struct amdxdna_gem_obj *abo, unsigned long cur_seq);
+ int (*cmd_submit)(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, u64 *seq);
+ int (*get_aie_info)(struct amdxdna_client *client, struct amdxdna_drm_get_info *args);
+ int (*set_aie_state)(struct amdxdna_client *client, struct amdxdna_drm_set_state *args);
+ int (*get_array)(struct amdxdna_client *client, struct amdxdna_drm_get_array *args);
+};
+
+/*
+ * struct amdxdna_dev_info - Device hardware information
+ * Record device static information, like reg, mbox, PSP, SMU bar index
+ */
+struct amdxdna_dev_info {
+ int reg_bar;
+ int mbox_bar;
+ int sram_bar;
+ int psp_bar;
+ int smu_bar;
+ int device_type;
+ int first_col;
+ u32 dev_mem_buf_shift;
+ u64 dev_mem_base;
+ size_t dev_mem_size;
+ char *vbnv;
+ const struct amdxdna_dev_priv *dev_priv;
+ const struct amdxdna_dev_ops *ops;
+};
+
+struct amdxdna_fw_ver {
+ u32 major;
+ u32 minor;
+ u32 sub;
+ u32 build;
+};
+
+struct amdxdna_dev {
+ struct drm_device ddev;
+ struct amdxdna_dev_hdl *dev_handle;
+ const struct amdxdna_dev_info *dev_info;
+ void *xrs_hdl;
+
+ struct mutex dev_lock; /* per device lock */
+ struct list_head client_list;
+ struct amdxdna_fw_ver fw_ver;
+ struct rw_semaphore notifier_lock; /* for mmu notifier*/
+ struct workqueue_struct *notifier_wq;
+};
+
+/*
+ * struct amdxdna_device_id - PCI device info
+ */
+struct amdxdna_device_id {
+ unsigned short device;
+ u8 revision;
+ const struct amdxdna_dev_info *dev_info;
+};
+
+/*
+ * struct amdxdna_client - amdxdna client
+ * A per fd data structure for managing context and other user process stuffs.
+ */
+struct amdxdna_client {
+ struct list_head node;
+ pid_t pid;
+ struct srcu_struct hwctx_srcu;
+ struct xarray hwctx_xa;
+ u32 next_hwctxid;
+ struct amdxdna_dev *xdna;
+ struct drm_file *filp;
+
+ struct mutex mm_lock; /* protect memory related */
+ struct amdxdna_gem_obj *dev_heap;
+
+ struct iommu_sva *sva;
+ int pasid;
+};
+
+#define amdxdna_for_each_hwctx(client, hwctx_id, entry) \
+ xa_for_each(&(client)->hwctx_xa, hwctx_id, entry)
+
+/* Add device info below */
+extern const struct amdxdna_dev_info dev_npu1_info;
+extern const struct amdxdna_dev_info dev_npu2_info;
+extern const struct amdxdna_dev_info dev_npu4_info;
+extern const struct amdxdna_dev_info dev_npu5_info;
+extern const struct amdxdna_dev_info dev_npu6_info;
+
+int amdxdna_sysfs_init(struct amdxdna_dev *xdna);
+void amdxdna_sysfs_fini(struct amdxdna_dev *xdna);
+
+#endif /* _AMDXDNA_PCI_DRV_H_ */
diff --git a/drivers/accel/amdxdna/amdxdna_sysfs.c b/drivers/accel/amdxdna/amdxdna_sysfs.c
new file mode 100644
index 000000000000..f27e4ee960a0
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_sysfs.c
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_device.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_print.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/types.h>
+
+#include "amdxdna_gem.h"
+#include "amdxdna_pci_drv.h"
+
+static ssize_t vbnv_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct amdxdna_dev *xdna = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%s\n", xdna->dev_info->vbnv);
+}
+static DEVICE_ATTR_RO(vbnv);
+
+static ssize_t device_type_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct amdxdna_dev *xdna = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", xdna->dev_info->device_type);
+}
+static DEVICE_ATTR_RO(device_type);
+
+static ssize_t fw_version_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct amdxdna_dev *xdna = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d.%d.%d.%d\n", xdna->fw_ver.major,
+ xdna->fw_ver.minor, xdna->fw_ver.sub,
+ xdna->fw_ver.build);
+}
+static DEVICE_ATTR_RO(fw_version);
+
+static struct attribute *amdxdna_attrs[] = {
+ &dev_attr_device_type.attr,
+ &dev_attr_vbnv.attr,
+ &dev_attr_fw_version.attr,
+ NULL,
+};
+
+static struct attribute_group amdxdna_attr_group = {
+ .attrs = amdxdna_attrs,
+};
+
+int amdxdna_sysfs_init(struct amdxdna_dev *xdna)
+{
+ int ret;
+
+ ret = sysfs_create_group(&xdna->ddev.dev->kobj, &amdxdna_attr_group);
+ if (ret)
+ XDNA_ERR(xdna, "Create attr group failed");
+
+ return ret;
+}
+
+void amdxdna_sysfs_fini(struct amdxdna_dev *xdna)
+{
+ sysfs_remove_group(&xdna->ddev.dev->kobj, &amdxdna_attr_group);
+}
diff --git a/drivers/accel/amdxdna/amdxdna_ubuf.c b/drivers/accel/amdxdna/amdxdna_ubuf.c
new file mode 100644
index 000000000000..077b2261cf2a
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_ubuf.c
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_device.h>
+#include <drm/drm_print.h>
+#include <linux/dma-buf.h>
+#include <linux/pagemap.h>
+#include <linux/vmalloc.h>
+
+#include "amdxdna_pci_drv.h"
+#include "amdxdna_ubuf.h"
+
+struct amdxdna_ubuf_priv {
+ struct page **pages;
+ u64 nr_pages;
+ enum amdxdna_ubuf_flag flags;
+ struct mm_struct *mm;
+};
+
+static struct sg_table *amdxdna_ubuf_map(struct dma_buf_attachment *attach,
+ enum dma_data_direction direction)
+{
+ struct amdxdna_ubuf_priv *ubuf = attach->dmabuf->priv;
+ struct sg_table *sg;
+ int ret;
+
+ sg = kzalloc(sizeof(*sg), GFP_KERNEL);
+ if (!sg)
+ return ERR_PTR(-ENOMEM);
+
+ ret = sg_alloc_table_from_pages(sg, ubuf->pages, ubuf->nr_pages, 0,
+ ubuf->nr_pages << PAGE_SHIFT, GFP_KERNEL);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (ubuf->flags & AMDXDNA_UBUF_FLAG_MAP_DMA) {
+ ret = dma_map_sgtable(attach->dev, sg, direction, 0);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ return sg;
+}
+
+static void amdxdna_ubuf_unmap(struct dma_buf_attachment *attach,
+ struct sg_table *sg,
+ enum dma_data_direction direction)
+{
+ struct amdxdna_ubuf_priv *ubuf = attach->dmabuf->priv;
+
+ if (ubuf->flags & AMDXDNA_UBUF_FLAG_MAP_DMA)
+ dma_unmap_sgtable(attach->dev, sg, direction, 0);
+
+ sg_free_table(sg);
+ kfree(sg);
+}
+
+static void amdxdna_ubuf_release(struct dma_buf *dbuf)
+{
+ struct amdxdna_ubuf_priv *ubuf = dbuf->priv;
+
+ unpin_user_pages(ubuf->pages, ubuf->nr_pages);
+ kvfree(ubuf->pages);
+ atomic64_sub(ubuf->nr_pages, &ubuf->mm->pinned_vm);
+ mmdrop(ubuf->mm);
+ kfree(ubuf);
+}
+
+static vm_fault_t amdxdna_ubuf_vm_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct amdxdna_ubuf_priv *ubuf;
+ unsigned long pfn;
+ pgoff_t pgoff;
+
+ ubuf = vma->vm_private_data;
+ pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
+
+ pfn = page_to_pfn(ubuf->pages[pgoff]);
+ return vmf_insert_pfn(vma, vmf->address, pfn);
+}
+
+static const struct vm_operations_struct amdxdna_ubuf_vm_ops = {
+ .fault = amdxdna_ubuf_vm_fault,
+};
+
+static int amdxdna_ubuf_mmap(struct dma_buf *dbuf, struct vm_area_struct *vma)
+{
+ struct amdxdna_ubuf_priv *ubuf = dbuf->priv;
+
+ vma->vm_ops = &amdxdna_ubuf_vm_ops;
+ vma->vm_private_data = ubuf;
+ vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
+
+ return 0;
+}
+
+static int amdxdna_ubuf_vmap(struct dma_buf *dbuf, struct iosys_map *map)
+{
+ struct amdxdna_ubuf_priv *ubuf = dbuf->priv;
+ void *kva;
+
+ kva = vmap(ubuf->pages, ubuf->nr_pages, VM_MAP, PAGE_KERNEL);
+ if (!kva)
+ return -EINVAL;
+
+ iosys_map_set_vaddr(map, kva);
+ return 0;
+}
+
+static void amdxdna_ubuf_vunmap(struct dma_buf *dbuf, struct iosys_map *map)
+{
+ vunmap(map->vaddr);
+}
+
+static const struct dma_buf_ops amdxdna_ubuf_dmabuf_ops = {
+ .map_dma_buf = amdxdna_ubuf_map,
+ .unmap_dma_buf = amdxdna_ubuf_unmap,
+ .release = amdxdna_ubuf_release,
+ .mmap = amdxdna_ubuf_mmap,
+ .vmap = amdxdna_ubuf_vmap,
+ .vunmap = amdxdna_ubuf_vunmap,
+};
+
+struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
+ enum amdxdna_ubuf_flag flags,
+ u32 num_entries, void __user *va_entries)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ unsigned long lock_limit, new_pinned;
+ struct amdxdna_drm_va_entry *va_ent;
+ struct amdxdna_ubuf_priv *ubuf;
+ u32 npages, start = 0;
+ struct dma_buf *dbuf;
+ int i, ret;
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+
+ if (!can_do_mlock())
+ return ERR_PTR(-EPERM);
+
+ ubuf = kzalloc(sizeof(*ubuf), GFP_KERNEL);
+ if (!ubuf)
+ return ERR_PTR(-ENOMEM);
+
+ ubuf->flags = flags;
+ ubuf->mm = current->mm;
+ mmgrab(ubuf->mm);
+
+ va_ent = kvcalloc(num_entries, sizeof(*va_ent), GFP_KERNEL);
+ if (!va_ent) {
+ ret = -ENOMEM;
+ goto free_ubuf;
+ }
+
+ if (copy_from_user(va_ent, va_entries, sizeof(*va_ent) * num_entries)) {
+ XDNA_DBG(xdna, "Access va entries failed");
+ ret = -EINVAL;
+ goto free_ent;
+ }
+
+ for (i = 0, exp_info.size = 0; i < num_entries; i++) {
+ if (!IS_ALIGNED(va_ent[i].vaddr, PAGE_SIZE) ||
+ !IS_ALIGNED(va_ent[i].len, PAGE_SIZE)) {
+ XDNA_ERR(xdna, "Invalid address or len %llx, %llx",
+ va_ent[i].vaddr, va_ent[i].len);
+ ret = -EINVAL;
+ goto free_ent;
+ }
+
+ exp_info.size += va_ent[i].len;
+ }
+
+ ubuf->nr_pages = exp_info.size >> PAGE_SHIFT;
+ lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+ new_pinned = atomic64_add_return(ubuf->nr_pages, &ubuf->mm->pinned_vm);
+ if (new_pinned > lock_limit && !capable(CAP_IPC_LOCK)) {
+ XDNA_DBG(xdna, "New pin %ld, limit %ld, cap %d",
+ new_pinned, lock_limit, capable(CAP_IPC_LOCK));
+ ret = -ENOMEM;
+ goto sub_pin_cnt;
+ }
+
+ ubuf->pages = kvmalloc_array(ubuf->nr_pages, sizeof(*ubuf->pages), GFP_KERNEL);
+ if (!ubuf->pages) {
+ ret = -ENOMEM;
+ goto sub_pin_cnt;
+ }
+
+ for (i = 0; i < num_entries; i++) {
+ npages = va_ent[i].len >> PAGE_SHIFT;
+
+ ret = pin_user_pages_fast(va_ent[i].vaddr, npages,
+ FOLL_WRITE | FOLL_LONGTERM,
+ &ubuf->pages[start]);
+ if (ret < 0 || ret != npages) {
+ ret = -ENOMEM;
+ XDNA_ERR(xdna, "Failed to pin pages ret %d", ret);
+ goto destroy_pages;
+ }
+
+ start += ret;
+ }
+
+ exp_info.ops = &amdxdna_ubuf_dmabuf_ops;
+ exp_info.priv = ubuf;
+ exp_info.flags = O_RDWR | O_CLOEXEC;
+
+ dbuf = dma_buf_export(&exp_info);
+ if (IS_ERR(dbuf)) {
+ ret = PTR_ERR(dbuf);
+ goto destroy_pages;
+ }
+ kvfree(va_ent);
+
+ return dbuf;
+
+destroy_pages:
+ if (start)
+ unpin_user_pages(ubuf->pages, start);
+ kvfree(ubuf->pages);
+sub_pin_cnt:
+ atomic64_sub(ubuf->nr_pages, &ubuf->mm->pinned_vm);
+free_ent:
+ kvfree(va_ent);
+free_ubuf:
+ mmdrop(ubuf->mm);
+ kfree(ubuf);
+ return ERR_PTR(ret);
+}
diff --git a/drivers/accel/amdxdna/amdxdna_ubuf.h b/drivers/accel/amdxdna/amdxdna_ubuf.h
new file mode 100644
index 000000000000..e5cb3bdb3ec9
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_ubuf.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2025, Advanced Micro Devices, Inc.
+ */
+#ifndef _AMDXDNA_UBUF_H_
+#define _AMDXDNA_UBUF_H_
+
+#include <drm/drm_device.h>
+#include <linux/dma-buf.h>
+
+enum amdxdna_ubuf_flag {
+ AMDXDNA_UBUF_FLAG_MAP_DMA = 1,
+};
+
+struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
+ enum amdxdna_ubuf_flag flags,
+ u32 num_entries, void __user *va_entries);
+
+#endif /* _AMDXDNA_UBUF_H_ */
diff --git a/drivers/accel/amdxdna/npu1_regs.c b/drivers/accel/amdxdna/npu1_regs.c
new file mode 100644
index 000000000000..e4f6dac7d00f
--- /dev/null
+++ b/drivers/accel/amdxdna/npu1_regs.c
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_device.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/sizes.h>
+
+#include "aie2_pci.h"
+#include "amdxdna_mailbox.h"
+#include "amdxdna_pci_drv.h"
+
+/* Address definition from NPU1 docs */
+#define MPNPU_PUB_SEC_INTR 0x3010090
+#define MPNPU_PUB_PWRMGMT_INTR 0x3010094
+#define MPNPU_PUB_SCRATCH2 0x30100A0
+#define MPNPU_PUB_SCRATCH3 0x30100A4
+#define MPNPU_PUB_SCRATCH4 0x30100A8
+#define MPNPU_PUB_SCRATCH5 0x30100AC
+#define MPNPU_PUB_SCRATCH6 0x30100B0
+#define MPNPU_PUB_SCRATCH7 0x30100B4
+#define MPNPU_PUB_SCRATCH9 0x30100BC
+
+#define MPNPU_SRAM_X2I_MAILBOX_0 0x30A0000
+#define MPNPU_SRAM_X2I_MAILBOX_1 0x30A2000
+#define MPNPU_SRAM_I2X_MAILBOX_15 0x30BF000
+
+#define MPNPU_APERTURE0_BASE 0x3000000
+#define MPNPU_APERTURE1_BASE 0x3080000
+#define MPNPU_APERTURE2_BASE 0x30C0000
+
+/* PCIe BAR Index for NPU1 */
+#define NPU1_REG_BAR_INDEX 0
+#define NPU1_MBOX_BAR_INDEX 4
+#define NPU1_PSP_BAR_INDEX 0
+#define NPU1_SMU_BAR_INDEX 0
+#define NPU1_SRAM_BAR_INDEX 2
+/* Associated BARs and Apertures */
+#define NPU1_REG_BAR_BASE MPNPU_APERTURE0_BASE
+#define NPU1_MBOX_BAR_BASE MPNPU_APERTURE2_BASE
+#define NPU1_PSP_BAR_BASE MPNPU_APERTURE0_BASE
+#define NPU1_SMU_BAR_BASE MPNPU_APERTURE0_BASE
+#define NPU1_SRAM_BAR_BASE MPNPU_APERTURE1_BASE
+
+const struct rt_config npu1_default_rt_cfg[] = {
+ { 2, 1, AIE2_RT_CFG_INIT }, /* PDI APP LOAD MODE */
+ { 1, 1, AIE2_RT_CFG_CLK_GATING }, /* Clock gating on */
+ { 0 },
+};
+
+const struct dpm_clk_freq npu1_dpm_clk_table[] = {
+ {400, 800},
+ {600, 1024},
+ {600, 1024},
+ {600, 1024},
+ {600, 1024},
+ {720, 1309},
+ {720, 1309},
+ {847, 1600},
+ { 0 }
+};
+
+static const struct amdxdna_dev_priv npu1_dev_priv = {
+ .fw_path = "amdnpu/1502_00/npu.sbin",
+ .protocol_major = 0x5,
+ .protocol_minor = 0x7,
+ .rt_config = npu1_default_rt_cfg,
+ .dpm_clk_tbl = npu1_dpm_clk_table,
+ .col_align = COL_ALIGN_NONE,
+ .mbox_dev_addr = NPU1_MBOX_BAR_BASE,
+ .mbox_size = 0, /* Use BAR size */
+ .sram_dev_addr = NPU1_SRAM_BAR_BASE,
+ .sram_offs = {
+ DEFINE_BAR_OFFSET(MBOX_CHANN_OFF, NPU1_SRAM, MPNPU_SRAM_X2I_MAILBOX_0),
+ DEFINE_BAR_OFFSET(FW_ALIVE_OFF, NPU1_SRAM, MPNPU_SRAM_I2X_MAILBOX_15),
+ },
+ .psp_regs_off = {
+ DEFINE_BAR_OFFSET(PSP_CMD_REG, NPU1_PSP, MPNPU_PUB_SCRATCH2),
+ DEFINE_BAR_OFFSET(PSP_ARG0_REG, NPU1_PSP, MPNPU_PUB_SCRATCH3),
+ DEFINE_BAR_OFFSET(PSP_ARG1_REG, NPU1_PSP, MPNPU_PUB_SCRATCH4),
+ DEFINE_BAR_OFFSET(PSP_ARG2_REG, NPU1_PSP, MPNPU_PUB_SCRATCH9),
+ DEFINE_BAR_OFFSET(PSP_INTR_REG, NPU1_PSP, MPNPU_PUB_SEC_INTR),
+ DEFINE_BAR_OFFSET(PSP_STATUS_REG, NPU1_PSP, MPNPU_PUB_SCRATCH2),
+ DEFINE_BAR_OFFSET(PSP_RESP_REG, NPU1_PSP, MPNPU_PUB_SCRATCH3),
+ },
+ .smu_regs_off = {
+ DEFINE_BAR_OFFSET(SMU_CMD_REG, NPU1_SMU, MPNPU_PUB_SCRATCH5),
+ DEFINE_BAR_OFFSET(SMU_ARG_REG, NPU1_SMU, MPNPU_PUB_SCRATCH7),
+ DEFINE_BAR_OFFSET(SMU_INTR_REG, NPU1_SMU, MPNPU_PUB_PWRMGMT_INTR),
+ DEFINE_BAR_OFFSET(SMU_RESP_REG, NPU1_SMU, MPNPU_PUB_SCRATCH6),
+ DEFINE_BAR_OFFSET(SMU_OUT_REG, NPU1_SMU, MPNPU_PUB_SCRATCH7),
+ },
+ .hw_ops = {
+ .set_dpm = npu1_set_dpm,
+ },
+};
+
+const struct amdxdna_dev_info dev_npu1_info = {
+ .reg_bar = NPU1_REG_BAR_INDEX,
+ .mbox_bar = NPU1_MBOX_BAR_INDEX,
+ .sram_bar = NPU1_SRAM_BAR_INDEX,
+ .psp_bar = NPU1_PSP_BAR_INDEX,
+ .smu_bar = NPU1_SMU_BAR_INDEX,
+ .first_col = 1,
+ .dev_mem_buf_shift = 15, /* 32 KiB aligned */
+ .dev_mem_base = AIE2_DEVM_BASE,
+ .dev_mem_size = AIE2_DEVM_SIZE,
+ .vbnv = "RyzenAI-npu1",
+ .device_type = AMDXDNA_DEV_TYPE_KMQ,
+ .dev_priv = &npu1_dev_priv,
+ .ops = &aie2_ops,
+};
diff --git a/drivers/accel/amdxdna/npu2_regs.c b/drivers/accel/amdxdna/npu2_regs.c
new file mode 100644
index 000000000000..a081cac75ee0
--- /dev/null
+++ b/drivers/accel/amdxdna/npu2_regs.c
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_device.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/sizes.h>
+
+#include "aie2_pci.h"
+#include "amdxdna_mailbox.h"
+#include "amdxdna_pci_drv.h"
+
+/* NPU Public Registers on MpNPUAxiXbar (refer to Diag npu_registers.h) */
+#define MPNPU_PUB_SEC_INTR 0x3010060
+#define MPNPU_PUB_PWRMGMT_INTR 0x3010064
+#define MPNPU_PUB_SCRATCH0 0x301006C
+#define MPNPU_PUB_SCRATCH1 0x3010070
+#define MPNPU_PUB_SCRATCH2 0x3010074
+#define MPNPU_PUB_SCRATCH3 0x3010078
+#define MPNPU_PUB_SCRATCH4 0x301007C
+#define MPNPU_PUB_SCRATCH5 0x3010080
+#define MPNPU_PUB_SCRATCH6 0x3010084
+#define MPNPU_PUB_SCRATCH7 0x3010088
+#define MPNPU_PUB_SCRATCH8 0x301008C
+#define MPNPU_PUB_SCRATCH9 0x3010090
+#define MPNPU_PUB_SCRATCH10 0x3010094
+#define MPNPU_PUB_SCRATCH11 0x3010098
+#define MPNPU_PUB_SCRATCH12 0x301009C
+#define MPNPU_PUB_SCRATCH13 0x30100A0
+#define MPNPU_PUB_SCRATCH14 0x30100A4
+#define MPNPU_PUB_SCRATCH15 0x30100A8
+#define MP0_C2PMSG_73 0x3810A24
+#define MP0_C2PMSG_123 0x3810AEC
+
+#define MP1_C2PMSG_0 0x3B10900
+#define MP1_C2PMSG_60 0x3B109F0
+#define MP1_C2PMSG_61 0x3B109F4
+
+#define MPNPU_SRAM_X2I_MAILBOX_0 0x3600000
+#define MPNPU_SRAM_X2I_MAILBOX_15 0x361E000
+#define MPNPU_SRAM_X2I_MAILBOX_31 0x363E000
+#define MPNPU_SRAM_I2X_MAILBOX_31 0x363F000
+
+#define MMNPU_APERTURE0_BASE 0x3000000
+#define MMNPU_APERTURE1_BASE 0x3600000
+#define MMNPU_APERTURE3_BASE 0x3810000
+#define MMNPU_APERTURE4_BASE 0x3B10000
+
+/* PCIe BAR Index for NPU2 */
+#define NPU2_REG_BAR_INDEX 0
+#define NPU2_MBOX_BAR_INDEX 0
+#define NPU2_PSP_BAR_INDEX 4
+#define NPU2_SMU_BAR_INDEX 5
+#define NPU2_SRAM_BAR_INDEX 2
+/* Associated BARs and Apertures */
+#define NPU2_REG_BAR_BASE MMNPU_APERTURE0_BASE
+#define NPU2_MBOX_BAR_BASE MMNPU_APERTURE0_BASE
+#define NPU2_PSP_BAR_BASE MMNPU_APERTURE3_BASE
+#define NPU2_SMU_BAR_BASE MMNPU_APERTURE4_BASE
+#define NPU2_SRAM_BAR_BASE MMNPU_APERTURE1_BASE
+
+static const struct amdxdna_dev_priv npu2_dev_priv = {
+ .fw_path = "amdnpu/17f0_00/npu.sbin",
+ .protocol_major = 0x6,
+ .protocol_minor = 0x6,
+ .rt_config = npu4_default_rt_cfg,
+ .dpm_clk_tbl = npu4_dpm_clk_table,
+ .col_align = COL_ALIGN_NATURE,
+ .mbox_dev_addr = NPU2_MBOX_BAR_BASE,
+ .mbox_size = 0, /* Use BAR size */
+ .sram_dev_addr = NPU2_SRAM_BAR_BASE,
+ .sram_offs = {
+ DEFINE_BAR_OFFSET(MBOX_CHANN_OFF, NPU2_SRAM, MPNPU_SRAM_X2I_MAILBOX_0),
+ DEFINE_BAR_OFFSET(FW_ALIVE_OFF, NPU2_SRAM, MPNPU_SRAM_X2I_MAILBOX_15),
+ },
+ .psp_regs_off = {
+ DEFINE_BAR_OFFSET(PSP_CMD_REG, NPU2_PSP, MP0_C2PMSG_123),
+ DEFINE_BAR_OFFSET(PSP_ARG0_REG, NPU2_REG, MPNPU_PUB_SCRATCH3),
+ DEFINE_BAR_OFFSET(PSP_ARG1_REG, NPU2_REG, MPNPU_PUB_SCRATCH4),
+ DEFINE_BAR_OFFSET(PSP_ARG2_REG, NPU2_REG, MPNPU_PUB_SCRATCH9),
+ DEFINE_BAR_OFFSET(PSP_INTR_REG, NPU2_PSP, MP0_C2PMSG_73),
+ DEFINE_BAR_OFFSET(PSP_STATUS_REG, NPU2_PSP, MP0_C2PMSG_123),
+ DEFINE_BAR_OFFSET(PSP_RESP_REG, NPU2_REG, MPNPU_PUB_SCRATCH3),
+ },
+ .smu_regs_off = {
+ DEFINE_BAR_OFFSET(SMU_CMD_REG, NPU2_SMU, MP1_C2PMSG_0),
+ DEFINE_BAR_OFFSET(SMU_ARG_REG, NPU2_SMU, MP1_C2PMSG_60),
+ DEFINE_BAR_OFFSET(SMU_INTR_REG, NPU2_SMU, MMNPU_APERTURE4_BASE),
+ DEFINE_BAR_OFFSET(SMU_RESP_REG, NPU2_SMU, MP1_C2PMSG_61),
+ DEFINE_BAR_OFFSET(SMU_OUT_REG, NPU2_SMU, MP1_C2PMSG_60),
+ },
+ .hw_ops = {
+ .set_dpm = npu4_set_dpm,
+ },
+};
+
+const struct amdxdna_dev_info dev_npu2_info = {
+ .reg_bar = NPU2_REG_BAR_INDEX,
+ .mbox_bar = NPU2_MBOX_BAR_INDEX,
+ .sram_bar = NPU2_SRAM_BAR_INDEX,
+ .psp_bar = NPU2_PSP_BAR_INDEX,
+ .smu_bar = NPU2_SMU_BAR_INDEX,
+ .first_col = 0,
+ .dev_mem_buf_shift = 15, /* 32 KiB aligned */
+ .dev_mem_base = AIE2_DEVM_BASE,
+ .dev_mem_size = AIE2_DEVM_SIZE,
+ .vbnv = "RyzenAI-npu2",
+ .device_type = AMDXDNA_DEV_TYPE_KMQ,
+ .dev_priv = &npu2_dev_priv,
+ .ops = &aie2_ops, /* NPU2 can share NPU1's callback */
+};
diff --git a/drivers/accel/amdxdna/npu4_regs.c b/drivers/accel/amdxdna/npu4_regs.c
new file mode 100644
index 000000000000..9f2e33182ec6
--- /dev/null
+++ b/drivers/accel/amdxdna/npu4_regs.c
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_device.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/sizes.h>
+
+#include "aie2_pci.h"
+#include "amdxdna_mailbox.h"
+#include "amdxdna_pci_drv.h"
+
+/* NPU Public Registers on MpNPUAxiXbar (refer to Diag npu_registers.h) */
+#define MPNPU_PUB_SEC_INTR 0x3010060
+#define MPNPU_PUB_PWRMGMT_INTR 0x3010064
+#define MPNPU_PUB_SCRATCH0 0x301006C
+#define MPNPU_PUB_SCRATCH1 0x3010070
+#define MPNPU_PUB_SCRATCH2 0x3010074
+#define MPNPU_PUB_SCRATCH3 0x3010078
+#define MPNPU_PUB_SCRATCH4 0x301007C
+#define MPNPU_PUB_SCRATCH5 0x3010080
+#define MPNPU_PUB_SCRATCH6 0x3010084
+#define MPNPU_PUB_SCRATCH7 0x3010088
+#define MPNPU_PUB_SCRATCH8 0x301008C
+#define MPNPU_PUB_SCRATCH9 0x3010090
+#define MPNPU_PUB_SCRATCH10 0x3010094
+#define MPNPU_PUB_SCRATCH11 0x3010098
+#define MPNPU_PUB_SCRATCH12 0x301009C
+#define MPNPU_PUB_SCRATCH13 0x30100A0
+#define MPNPU_PUB_SCRATCH14 0x30100A4
+#define MPNPU_PUB_SCRATCH15 0x30100A8
+#define MP0_C2PMSG_73 0x3810A24
+#define MP0_C2PMSG_123 0x3810AEC
+
+#define MP1_C2PMSG_0 0x3B10900
+#define MP1_C2PMSG_60 0x3B109F0
+#define MP1_C2PMSG_61 0x3B109F4
+
+#define MPNPU_SRAM_X2I_MAILBOX_0 0x3600000
+#define MPNPU_SRAM_X2I_MAILBOX_15 0x361E000
+#define MPNPU_SRAM_X2I_MAILBOX_31 0x363E000
+#define MPNPU_SRAM_I2X_MAILBOX_31 0x363F000
+
+#define MMNPU_APERTURE0_BASE 0x3000000
+#define MMNPU_APERTURE1_BASE 0x3600000
+#define MMNPU_APERTURE3_BASE 0x3810000
+#define MMNPU_APERTURE4_BASE 0x3B10000
+
+/* PCIe BAR Index for NPU4 */
+#define NPU4_REG_BAR_INDEX 0
+#define NPU4_MBOX_BAR_INDEX 0
+#define NPU4_PSP_BAR_INDEX 4
+#define NPU4_SMU_BAR_INDEX 5
+#define NPU4_SRAM_BAR_INDEX 2
+/* Associated BARs and Apertures */
+#define NPU4_REG_BAR_BASE MMNPU_APERTURE0_BASE
+#define NPU4_MBOX_BAR_BASE MMNPU_APERTURE0_BASE
+#define NPU4_PSP_BAR_BASE MMNPU_APERTURE3_BASE
+#define NPU4_SMU_BAR_BASE MMNPU_APERTURE4_BASE
+#define NPU4_SRAM_BAR_BASE MMNPU_APERTURE1_BASE
+
+const struct rt_config npu4_default_rt_cfg[] = {
+ { 5, 1, AIE2_RT_CFG_INIT }, /* PDI APP LOAD MODE */
+ { 1, 1, AIE2_RT_CFG_CLK_GATING }, /* Clock gating on */
+ { 2, 1, AIE2_RT_CFG_CLK_GATING }, /* Clock gating on */
+ { 3, 1, AIE2_RT_CFG_CLK_GATING }, /* Clock gating on */
+ { 4, 1, AIE2_RT_CFG_CLK_GATING }, /* Clock gating on */
+ { 0 },
+};
+
+const struct dpm_clk_freq npu4_dpm_clk_table[] = {
+ {396, 792},
+ {600, 1056},
+ {792, 1152},
+ {975, 1267},
+ {975, 1267},
+ {1056, 1408},
+ {1152, 1584},
+ {1267, 1800},
+ { 0 }
+};
+
+static const struct amdxdna_dev_priv npu4_dev_priv = {
+ .fw_path = "amdnpu/17f0_10/npu.sbin",
+ .protocol_major = 0x6,
+ .protocol_minor = 12,
+ .rt_config = npu4_default_rt_cfg,
+ .dpm_clk_tbl = npu4_dpm_clk_table,
+ .col_align = COL_ALIGN_NATURE,
+ .mbox_dev_addr = NPU4_MBOX_BAR_BASE,
+ .mbox_size = 0, /* Use BAR size */
+ .sram_dev_addr = NPU4_SRAM_BAR_BASE,
+ .sram_offs = {
+ DEFINE_BAR_OFFSET(MBOX_CHANN_OFF, NPU4_SRAM, MPNPU_SRAM_X2I_MAILBOX_0),
+ DEFINE_BAR_OFFSET(FW_ALIVE_OFF, NPU4_SRAM, MPNPU_SRAM_X2I_MAILBOX_15),
+ },
+ .psp_regs_off = {
+ DEFINE_BAR_OFFSET(PSP_CMD_REG, NPU4_PSP, MP0_C2PMSG_123),
+ DEFINE_BAR_OFFSET(PSP_ARG0_REG, NPU4_REG, MPNPU_PUB_SCRATCH3),
+ DEFINE_BAR_OFFSET(PSP_ARG1_REG, NPU4_REG, MPNPU_PUB_SCRATCH4),
+ DEFINE_BAR_OFFSET(PSP_ARG2_REG, NPU4_REG, MPNPU_PUB_SCRATCH9),
+ DEFINE_BAR_OFFSET(PSP_INTR_REG, NPU4_PSP, MP0_C2PMSG_73),
+ DEFINE_BAR_OFFSET(PSP_STATUS_REG, NPU4_PSP, MP0_C2PMSG_123),
+ DEFINE_BAR_OFFSET(PSP_RESP_REG, NPU4_REG, MPNPU_PUB_SCRATCH3),
+ },
+ .smu_regs_off = {
+ DEFINE_BAR_OFFSET(SMU_CMD_REG, NPU4_SMU, MP1_C2PMSG_0),
+ DEFINE_BAR_OFFSET(SMU_ARG_REG, NPU4_SMU, MP1_C2PMSG_60),
+ DEFINE_BAR_OFFSET(SMU_INTR_REG, NPU4_SMU, MMNPU_APERTURE4_BASE),
+ DEFINE_BAR_OFFSET(SMU_RESP_REG, NPU4_SMU, MP1_C2PMSG_61),
+ DEFINE_BAR_OFFSET(SMU_OUT_REG, NPU4_SMU, MP1_C2PMSG_60),
+ },
+ .hw_ops = {
+ .set_dpm = npu4_set_dpm,
+ },
+};
+
+const struct amdxdna_dev_info dev_npu4_info = {
+ .reg_bar = NPU4_REG_BAR_INDEX,
+ .mbox_bar = NPU4_MBOX_BAR_INDEX,
+ .sram_bar = NPU4_SRAM_BAR_INDEX,
+ .psp_bar = NPU4_PSP_BAR_INDEX,
+ .smu_bar = NPU4_SMU_BAR_INDEX,
+ .first_col = 0,
+ .dev_mem_buf_shift = 15, /* 32 KiB aligned */
+ .dev_mem_base = AIE2_DEVM_BASE,
+ .dev_mem_size = AIE2_DEVM_SIZE,
+ .vbnv = "RyzenAI-npu4",
+ .device_type = AMDXDNA_DEV_TYPE_KMQ,
+ .dev_priv = &npu4_dev_priv,
+ .ops = &aie2_ops, /* NPU4 can share NPU1's callback */
+};
diff --git a/drivers/accel/amdxdna/npu5_regs.c b/drivers/accel/amdxdna/npu5_regs.c
new file mode 100644
index 000000000000..5f1cf83461c4
--- /dev/null
+++ b/drivers/accel/amdxdna/npu5_regs.c
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_device.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/sizes.h>
+
+#include "aie2_pci.h"
+#include "amdxdna_mailbox.h"
+#include "amdxdna_pci_drv.h"
+
+/* NPU Public Registers on MpNPUAxiXbar (refer to Diag npu_registers.h) */
+#define MPNPU_PUB_SEC_INTR 0x3010060
+#define MPNPU_PUB_PWRMGMT_INTR 0x3010064
+#define MPNPU_PUB_SCRATCH0 0x301006C
+#define MPNPU_PUB_SCRATCH1 0x3010070
+#define MPNPU_PUB_SCRATCH2 0x3010074
+#define MPNPU_PUB_SCRATCH3 0x3010078
+#define MPNPU_PUB_SCRATCH4 0x301007C
+#define MPNPU_PUB_SCRATCH5 0x3010080
+#define MPNPU_PUB_SCRATCH6 0x3010084
+#define MPNPU_PUB_SCRATCH7 0x3010088
+#define MPNPU_PUB_SCRATCH8 0x301008C
+#define MPNPU_PUB_SCRATCH9 0x3010090
+#define MPNPU_PUB_SCRATCH10 0x3010094
+#define MPNPU_PUB_SCRATCH11 0x3010098
+#define MPNPU_PUB_SCRATCH12 0x301009C
+#define MPNPU_PUB_SCRATCH13 0x30100A0
+#define MPNPU_PUB_SCRATCH14 0x30100A4
+#define MPNPU_PUB_SCRATCH15 0x30100A8
+#define MP0_C2PMSG_73 0x3810A24
+#define MP0_C2PMSG_123 0x3810AEC
+
+#define MP1_C2PMSG_0 0x3B10900
+#define MP1_C2PMSG_60 0x3B109F0
+#define MP1_C2PMSG_61 0x3B109F4
+
+#define MPNPU_SRAM_X2I_MAILBOX_0 0x3600000
+#define MPNPU_SRAM_X2I_MAILBOX_15 0x361E000
+#define MPNPU_SRAM_X2I_MAILBOX_31 0x363E000
+#define MPNPU_SRAM_I2X_MAILBOX_31 0x363F000
+
+#define MMNPU_APERTURE0_BASE 0x3000000
+#define MMNPU_APERTURE1_BASE 0x3600000
+#define MMNPU_APERTURE3_BASE 0x3810000
+#define MMNPU_APERTURE4_BASE 0x3B10000
+
+/* PCIe BAR Index for NPU5 */
+#define NPU5_REG_BAR_INDEX 0
+#define NPU5_MBOX_BAR_INDEX 0
+#define NPU5_PSP_BAR_INDEX 4
+#define NPU5_SMU_BAR_INDEX 5
+#define NPU5_SRAM_BAR_INDEX 2
+/* Associated BARs and Apertures */
+#define NPU5_REG_BAR_BASE MMNPU_APERTURE0_BASE
+#define NPU5_MBOX_BAR_BASE MMNPU_APERTURE0_BASE
+#define NPU5_PSP_BAR_BASE MMNPU_APERTURE3_BASE
+#define NPU5_SMU_BAR_BASE MMNPU_APERTURE4_BASE
+#define NPU5_SRAM_BAR_BASE MMNPU_APERTURE1_BASE
+
+static const struct amdxdna_dev_priv npu5_dev_priv = {
+ .fw_path = "amdnpu/17f0_11/npu.sbin",
+ .protocol_major = 0x6,
+ .protocol_minor = 12,
+ .rt_config = npu4_default_rt_cfg,
+ .dpm_clk_tbl = npu4_dpm_clk_table,
+ .col_align = COL_ALIGN_NATURE,
+ .mbox_dev_addr = NPU5_MBOX_BAR_BASE,
+ .mbox_size = 0, /* Use BAR size */
+ .sram_dev_addr = NPU5_SRAM_BAR_BASE,
+ .sram_offs = {
+ DEFINE_BAR_OFFSET(MBOX_CHANN_OFF, NPU5_SRAM, MPNPU_SRAM_X2I_MAILBOX_0),
+ DEFINE_BAR_OFFSET(FW_ALIVE_OFF, NPU5_SRAM, MPNPU_SRAM_X2I_MAILBOX_15),
+ },
+ .psp_regs_off = {
+ DEFINE_BAR_OFFSET(PSP_CMD_REG, NPU5_PSP, MP0_C2PMSG_123),
+ DEFINE_BAR_OFFSET(PSP_ARG0_REG, NPU5_REG, MPNPU_PUB_SCRATCH3),
+ DEFINE_BAR_OFFSET(PSP_ARG1_REG, NPU5_REG, MPNPU_PUB_SCRATCH4),
+ DEFINE_BAR_OFFSET(PSP_ARG2_REG, NPU5_REG, MPNPU_PUB_SCRATCH9),
+ DEFINE_BAR_OFFSET(PSP_INTR_REG, NPU5_PSP, MP0_C2PMSG_73),
+ DEFINE_BAR_OFFSET(PSP_STATUS_REG, NPU5_PSP, MP0_C2PMSG_123),
+ DEFINE_BAR_OFFSET(PSP_RESP_REG, NPU5_REG, MPNPU_PUB_SCRATCH3),
+ },
+ .smu_regs_off = {
+ DEFINE_BAR_OFFSET(SMU_CMD_REG, NPU5_SMU, MP1_C2PMSG_0),
+ DEFINE_BAR_OFFSET(SMU_ARG_REG, NPU5_SMU, MP1_C2PMSG_60),
+ DEFINE_BAR_OFFSET(SMU_INTR_REG, NPU5_SMU, MMNPU_APERTURE4_BASE),
+ DEFINE_BAR_OFFSET(SMU_RESP_REG, NPU5_SMU, MP1_C2PMSG_61),
+ DEFINE_BAR_OFFSET(SMU_OUT_REG, NPU5_SMU, MP1_C2PMSG_60),
+ },
+ .hw_ops = {
+ .set_dpm = npu4_set_dpm,
+ },
+};
+
+const struct amdxdna_dev_info dev_npu5_info = {
+ .reg_bar = NPU5_REG_BAR_INDEX,
+ .mbox_bar = NPU5_MBOX_BAR_INDEX,
+ .sram_bar = NPU5_SRAM_BAR_INDEX,
+ .psp_bar = NPU5_PSP_BAR_INDEX,
+ .smu_bar = NPU5_SMU_BAR_INDEX,
+ .first_col = 0,
+ .dev_mem_buf_shift = 15, /* 32 KiB aligned */
+ .dev_mem_base = AIE2_DEVM_BASE,
+ .dev_mem_size = AIE2_DEVM_SIZE,
+ .vbnv = "RyzenAI-npu5",
+ .device_type = AMDXDNA_DEV_TYPE_KMQ,
+ .dev_priv = &npu5_dev_priv,
+ .ops = &aie2_ops,
+};
diff --git a/drivers/accel/amdxdna/npu6_regs.c b/drivers/accel/amdxdna/npu6_regs.c
new file mode 100644
index 000000000000..94a7005685a7
--- /dev/null
+++ b/drivers/accel/amdxdna/npu6_regs.c
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_device.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/sizes.h>
+
+#include "aie2_pci.h"
+#include "amdxdna_mailbox.h"
+#include "amdxdna_pci_drv.h"
+
+/* NPU Public Registers on MpNPUAxiXbar (refer to Diag npu_registers.h) */
+#define MPNPU_PUB_SEC_INTR 0x3010060
+#define MPNPU_PUB_PWRMGMT_INTR 0x3010064
+#define MPNPU_PUB_SCRATCH0 0x301006C
+#define MPNPU_PUB_SCRATCH1 0x3010070
+#define MPNPU_PUB_SCRATCH2 0x3010074
+#define MPNPU_PUB_SCRATCH3 0x3010078
+#define MPNPU_PUB_SCRATCH4 0x301007C
+#define MPNPU_PUB_SCRATCH5 0x3010080
+#define MPNPU_PUB_SCRATCH6 0x3010084
+#define MPNPU_PUB_SCRATCH7 0x3010088
+#define MPNPU_PUB_SCRATCH8 0x301008C
+#define MPNPU_PUB_SCRATCH9 0x3010090
+#define MPNPU_PUB_SCRATCH10 0x3010094
+#define MPNPU_PUB_SCRATCH11 0x3010098
+#define MPNPU_PUB_SCRATCH12 0x301009C
+#define MPNPU_PUB_SCRATCH13 0x30100A0
+#define MPNPU_PUB_SCRATCH14 0x30100A4
+#define MPNPU_PUB_SCRATCH15 0x30100A8
+#define MP0_C2PMSG_73 0x3810A24
+#define MP0_C2PMSG_123 0x3810AEC
+
+#define MP1_C2PMSG_0 0x3B10900
+#define MP1_C2PMSG_60 0x3B109F0
+#define MP1_C2PMSG_61 0x3B109F4
+
+#define MPNPU_SRAM_X2I_MAILBOX_0 0x3600000
+#define MPNPU_SRAM_X2I_MAILBOX_15 0x361E000
+#define MPNPU_SRAM_X2I_MAILBOX_31 0x363E000
+#define MPNPU_SRAM_I2X_MAILBOX_31 0x363F000
+
+#define MMNPU_APERTURE0_BASE 0x3000000
+#define MMNPU_APERTURE1_BASE 0x3600000
+#define MMNPU_APERTURE3_BASE 0x3810000
+#define MMNPU_APERTURE4_BASE 0x3B10000
+
+/* PCIe BAR Index for NPU6 */
+#define NPU6_REG_BAR_INDEX 0
+#define NPU6_MBOX_BAR_INDEX 0
+#define NPU6_PSP_BAR_INDEX 4
+#define NPU6_SMU_BAR_INDEX 5
+#define NPU6_SRAM_BAR_INDEX 2
+/* Associated BARs and Apertures */
+#define NPU6_REG_BAR_BASE MMNPU_APERTURE0_BASE
+#define NPU6_MBOX_BAR_BASE MMNPU_APERTURE0_BASE
+#define NPU6_PSP_BAR_BASE MMNPU_APERTURE3_BASE
+#define NPU6_SMU_BAR_BASE MMNPU_APERTURE4_BASE
+#define NPU6_SRAM_BAR_BASE MMNPU_APERTURE1_BASE
+
+static const struct amdxdna_dev_priv npu6_dev_priv = {
+ .fw_path = "amdnpu/17f0_10/npu.sbin",
+ .protocol_major = 0x6,
+ .protocol_minor = 12,
+ .rt_config = npu4_default_rt_cfg,
+ .dpm_clk_tbl = npu4_dpm_clk_table,
+ .col_align = COL_ALIGN_NATURE,
+ .mbox_dev_addr = NPU6_MBOX_BAR_BASE,
+ .mbox_size = 0, /* Use BAR size */
+ .sram_dev_addr = NPU6_SRAM_BAR_BASE,
+ .sram_offs = {
+ DEFINE_BAR_OFFSET(MBOX_CHANN_OFF, NPU6_SRAM, MPNPU_SRAM_X2I_MAILBOX_0),
+ DEFINE_BAR_OFFSET(FW_ALIVE_OFF, NPU6_SRAM, MPNPU_SRAM_X2I_MAILBOX_15),
+ },
+ .psp_regs_off = {
+ DEFINE_BAR_OFFSET(PSP_CMD_REG, NPU6_PSP, MP0_C2PMSG_123),
+ DEFINE_BAR_OFFSET(PSP_ARG0_REG, NPU6_REG, MPNPU_PUB_SCRATCH3),
+ DEFINE_BAR_OFFSET(PSP_ARG1_REG, NPU6_REG, MPNPU_PUB_SCRATCH4),
+ DEFINE_BAR_OFFSET(PSP_ARG2_REG, NPU6_REG, MPNPU_PUB_SCRATCH9),
+ DEFINE_BAR_OFFSET(PSP_INTR_REG, NPU6_PSP, MP0_C2PMSG_73),
+ DEFINE_BAR_OFFSET(PSP_STATUS_REG, NPU6_PSP, MP0_C2PMSG_123),
+ DEFINE_BAR_OFFSET(PSP_RESP_REG, NPU6_REG, MPNPU_PUB_SCRATCH3),
+ },
+ .smu_regs_off = {
+ DEFINE_BAR_OFFSET(SMU_CMD_REG, NPU6_SMU, MP1_C2PMSG_0),
+ DEFINE_BAR_OFFSET(SMU_ARG_REG, NPU6_SMU, MP1_C2PMSG_60),
+ DEFINE_BAR_OFFSET(SMU_INTR_REG, NPU6_SMU, MMNPU_APERTURE4_BASE),
+ DEFINE_BAR_OFFSET(SMU_RESP_REG, NPU6_SMU, MP1_C2PMSG_61),
+ DEFINE_BAR_OFFSET(SMU_OUT_REG, NPU6_SMU, MP1_C2PMSG_60),
+ },
+ .hw_ops = {
+ .set_dpm = npu4_set_dpm,
+ },
+
+};
+
+const struct amdxdna_dev_info dev_npu6_info = {
+ .reg_bar = NPU6_REG_BAR_INDEX,
+ .mbox_bar = NPU6_MBOX_BAR_INDEX,
+ .sram_bar = NPU6_SRAM_BAR_INDEX,
+ .psp_bar = NPU6_PSP_BAR_INDEX,
+ .smu_bar = NPU6_SMU_BAR_INDEX,
+ .first_col = 0,
+ .dev_mem_buf_shift = 15, /* 32 KiB aligned */
+ .dev_mem_base = AIE2_DEVM_BASE,
+ .dev_mem_size = AIE2_DEVM_SIZE,
+ .vbnv = "RyzenAI-npu6",
+ .device_type = AMDXDNA_DEV_TYPE_KMQ,
+ .dev_priv = &npu6_dev_priv,
+ .ops = &aie2_ops,
+};
diff --git a/drivers/accel/drm_accel.c b/drivers/accel/drm_accel.c
index aa826033b0ce..ca3357acd127 100644
--- a/drivers/accel/drm_accel.c
+++ b/drivers/accel/drm_accel.c
@@ -20,8 +20,6 @@
DEFINE_XARRAY_ALLOC(accel_minors_xa);
-static struct dentry *accel_debugfs_root;
-
static const struct device_type accel_sysfs_device_minor = {
.name = "accel_minor"
};
@@ -74,17 +72,6 @@ static const struct drm_info_list accel_debugfs_list[] = {
#define ACCEL_DEBUGFS_ENTRIES ARRAY_SIZE(accel_debugfs_list)
/**
- * accel_debugfs_init() - Initialize debugfs for device
- * @dev: Pointer to the device instance.
- *
- * This function creates a root directory for the device in debugfs.
- */
-void accel_debugfs_init(struct drm_device *dev)
-{
- drm_debugfs_dev_init(dev, accel_debugfs_root);
-}
-
-/**
* accel_debugfs_register() - Register debugfs for device
* @dev: Pointer to the device instance.
*
@@ -194,7 +181,6 @@ static const struct file_operations accel_stub_fops = {
void accel_core_exit(void)
{
unregister_chrdev(ACCEL_MAJOR, "accel");
- debugfs_remove(accel_debugfs_root);
accel_sysfs_destroy();
WARN_ON(!xa_empty(&accel_minors_xa));
}
@@ -209,8 +195,6 @@ int __init accel_core_init(void)
goto error;
}
- accel_debugfs_root = debugfs_create_dir("accel", NULL);
-
ret = register_chrdev(ACCEL_MAJOR, "accel", &accel_stub_fops);
if (ret < 0)
DRM_ERROR("Cannot register ACCEL major: %d\n", ret);
diff --git a/drivers/accel/habanalabs/Kconfig b/drivers/accel/habanalabs/Kconfig
index be85336107f9..6d1506acbd72 100644
--- a/drivers/accel/habanalabs/Kconfig
+++ b/drivers/accel/habanalabs/Kconfig
@@ -6,7 +6,7 @@
config DRM_ACCEL_HABANALABS
tristate "HabanaLabs AI accelerators"
depends on DRM_ACCEL
- depends on X86_64
+ depends on X86 && X86_64
depends on PCI && HAS_IOMEM
select GENERIC_ALLOCATOR
select HWMON
@@ -27,3 +27,26 @@ config DRM_ACCEL_HABANALABS
To compile this driver as a module, choose M here: the
module will be called habanalabs.
+
+if DRM_ACCEL_HABANALABS
+
+config HL_HLDIO
+ bool "Habanalabs NVMe Direct I/O (HLDIO)"
+ depends on PCI_P2PDMA
+ depends on BLOCK
+ help
+ Enable NVMe peer-to-peer direct I/O support for Habanalabs AI
+ accelerators.
+
+ This allows direct data transfers between NVMe storage devices
+ and Habanalabs accelerators without involving system memory,
+ using PCI peer-to-peer DMA capabilities.
+
+ Requirements:
+ - CONFIG_PCI_P2PDMA=y
+ - NVMe device and Habanalabs accelerator under same PCI root complex
+ - IOMMU disabled or in passthrough mode
+ - Hardware supporting PCI P2P DMA
+
+ If unsure, say N
+endif # DRM_ACCEL_HABANALABS
diff --git a/drivers/accel/habanalabs/common/Makefile b/drivers/accel/habanalabs/common/Makefile
index e6abffea9f87..b6d00de09db5 100644
--- a/drivers/accel/habanalabs/common/Makefile
+++ b/drivers/accel/habanalabs/common/Makefile
@@ -13,3 +13,8 @@ HL_COMMON_FILES := common/habanalabs_drv.o common/device.o common/context.o \
common/command_submission.o common/firmware_if.o \
common/security.o common/state_dump.o \
common/memory_mgr.o common/decoder.o
+
+# Conditionally add HLDIO support
+ifdef CONFIG_HL_HLDIO
+HL_COMMON_FILES += common/hldio.o
+endif \ No newline at end of file
diff --git a/drivers/accel/habanalabs/common/command_submission.c b/drivers/accel/habanalabs/common/command_submission.c
index 59823e3c3bf7..dee487724918 100644
--- a/drivers/accel/habanalabs/common/command_submission.c
+++ b/drivers/accel/habanalabs/common/command_submission.c
@@ -2586,7 +2586,7 @@ int hl_cs_ioctl(struct drm_device *ddev, void *data, struct drm_file *file_priv)
cs_seq = args->in.seq;
timeout = flags & HL_CS_FLAGS_CUSTOM_TIMEOUT
- ? msecs_to_jiffies(args->in.timeout * 1000)
+ ? secs_to_jiffies(args->in.timeout)
: hpriv->hdev->timeout_jiffies;
switch (cs_type) {
diff --git a/drivers/accel/habanalabs/common/context.c b/drivers/accel/habanalabs/common/context.c
index b83141f58319..9f212b17611a 100644
--- a/drivers/accel/habanalabs/common/context.c
+++ b/drivers/accel/habanalabs/common/context.c
@@ -199,7 +199,6 @@ out_err:
int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
{
- char task_comm[TASK_COMM_LEN];
int rc = 0, i;
ctx->hdev = hdev;
@@ -272,7 +271,7 @@ int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
mutex_init(&ctx->ts_reg_lock);
dev_dbg(hdev->dev, "create user context, comm=\"%s\", asid=%u\n",
- get_task_comm(task_comm, current), ctx->asid);
+ current->comm, ctx->asid);
}
return 0;
diff --git a/drivers/accel/habanalabs/common/debugfs.c b/drivers/accel/habanalabs/common/debugfs.c
index ca7677293a55..5f0820b19ccb 100644
--- a/drivers/accel/habanalabs/common/debugfs.c
+++ b/drivers/accel/habanalabs/common/debugfs.c
@@ -6,6 +6,7 @@
*/
#include "habanalabs.h"
+#include "hldio.h"
#include "../include/hw_ip/mmu/mmu_general.h"
#include <linux/pci.h>
@@ -602,6 +603,198 @@ static int engines_show(struct seq_file *s, void *data)
return 0;
}
+#ifdef CONFIG_HL_HLDIO
+/* DIO debugfs functions following the standard pattern */
+static int dio_ssd2hl_show(struct seq_file *s, void *data)
+{
+ struct hl_debugfs_entry *entry = s->private;
+ struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ struct hl_device *hdev = dev_entry->hdev;
+
+ if (!hdev->asic_prop.supports_nvme) {
+ seq_puts(s, "NVMe Direct I/O not supported\\n");
+ return 0;
+ }
+
+ seq_puts(s, "Usage: echo \"fd=N va=0xADDR off=N len=N\" > dio_ssd2hl\n");
+ seq_printf(s, "Last transfer: %zu bytes\\n", dev_entry->dio_stats.last_len_read);
+ seq_puts(s, "Note: All parameters must be page-aligned (4KB)\\n");
+
+ return 0;
+}
+
+static ssize_t dio_ssd2hl_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ struct seq_file *s = file->private_data;
+ struct hl_debugfs_entry *entry = s->private;
+ struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ struct hl_device *hdev = dev_entry->hdev;
+ struct hl_ctx *ctx = hdev->kernel_ctx;
+ char kbuf[128];
+ u64 device_va = 0, off_bytes = 0, len_bytes = 0;
+ u32 fd = 0;
+ size_t len_read = 0;
+ int rc, parsed;
+
+ if (!hdev->asic_prop.supports_nvme)
+ return -EOPNOTSUPP;
+
+ if (count >= sizeof(kbuf))
+ return -EINVAL;
+
+ if (copy_from_user(kbuf, buf, count))
+ return -EFAULT;
+
+ kbuf[count] = 0;
+
+ /* Parse: fd=N va=0xADDR off=N len=N */
+ parsed = sscanf(kbuf, "fd=%u va=0x%llx off=%llu len=%llu",
+ &fd, &device_va, &off_bytes, &len_bytes);
+ if (parsed != 4) {
+ dev_err(hdev->dev, "Invalid format. Expected: fd=N va=0xADDR off=N len=N\\n");
+ return -EINVAL;
+ }
+
+ /* Validate file descriptor */
+ if (fd == 0) {
+ dev_err(hdev->dev, "Invalid file descriptor: %u\\n", fd);
+ return -EINVAL;
+ }
+
+ /* Validate alignment requirements */
+ if (!IS_ALIGNED(device_va, PAGE_SIZE) ||
+ !IS_ALIGNED(off_bytes, PAGE_SIZE) ||
+ !IS_ALIGNED(len_bytes, PAGE_SIZE)) {
+ dev_err(hdev->dev,
+ "All parameters must be page-aligned (4KB)\\n");
+ return -EINVAL;
+ }
+
+ /* Validate transfer size */
+ if (len_bytes == 0 || len_bytes > SZ_1G) {
+ dev_err(hdev->dev, "Invalid length: %llu (max 1GB)\\n",
+ len_bytes);
+ return -EINVAL;
+ }
+
+ dev_dbg(hdev->dev, "DIO SSD2HL: fd=%u va=0x%llx off=%llu len=%llu\\n",
+ fd, device_va, off_bytes, len_bytes);
+
+ rc = hl_dio_ssd2hl(hdev, ctx, fd, device_va, off_bytes, len_bytes, &len_read);
+ if (rc < 0) {
+ dev_entry->dio_stats.failed_ops++;
+ dev_err(hdev->dev, "SSD2HL operation failed: %d\\n", rc);
+ return rc;
+ }
+
+ /* Update statistics */
+ dev_entry->dio_stats.total_ops++;
+ dev_entry->dio_stats.successful_ops++;
+ dev_entry->dio_stats.bytes_transferred += len_read;
+ dev_entry->dio_stats.last_len_read = len_read;
+
+ dev_dbg(hdev->dev, "DIO SSD2HL completed: %zu bytes transferred\\n", len_read);
+
+ return count;
+}
+
+static int dio_hl2ssd_show(struct seq_file *s, void *data)
+{
+ seq_puts(s, "HL2SSD (device-to-SSD) transfers not implemented\\n");
+ return 0;
+}
+
+static ssize_t dio_hl2ssd_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ struct seq_file *s = file->private_data;
+ struct hl_debugfs_entry *entry = s->private;
+ struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ struct hl_device *hdev = dev_entry->hdev;
+
+ if (!hdev->asic_prop.supports_nvme)
+ return -EOPNOTSUPP;
+
+ dev_dbg(hdev->dev, "HL2SSD operation not implemented\\n");
+ return -EOPNOTSUPP;
+}
+
+static int dio_stats_show(struct seq_file *s, void *data)
+{
+ struct hl_debugfs_entry *entry = s->private;
+ struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ struct hl_device *hdev = dev_entry->hdev;
+ struct hl_dio_stats *stats = &dev_entry->dio_stats;
+ u64 avg_bytes_per_op = 0, success_rate = 0;
+
+ if (!hdev->asic_prop.supports_nvme) {
+ seq_puts(s, "NVMe Direct I/O not supported\\n");
+ return 0;
+ }
+
+ if (stats->successful_ops > 0)
+ avg_bytes_per_op = stats->bytes_transferred / stats->successful_ops;
+
+ if (stats->total_ops > 0)
+ success_rate = (stats->successful_ops * 100) / stats->total_ops;
+
+ seq_puts(s, "=== Habanalabs Direct I/O Statistics ===\\n");
+ seq_printf(s, "Total operations: %llu\\n", stats->total_ops);
+ seq_printf(s, "Successful ops: %llu\\n", stats->successful_ops);
+ seq_printf(s, "Failed ops: %llu\\n", stats->failed_ops);
+ seq_printf(s, "Success rate: %llu%%\\n", success_rate);
+ seq_printf(s, "Total bytes: %llu\\n", stats->bytes_transferred);
+ seq_printf(s, "Avg bytes per op: %llu\\n", avg_bytes_per_op);
+ seq_printf(s, "Last transfer: %zu bytes\\n", stats->last_len_read);
+
+ return 0;
+}
+
+static int dio_reset_show(struct seq_file *s, void *data)
+{
+ seq_puts(s, "Write '1' to reset DIO statistics\\n");
+ return 0;
+}
+
+static ssize_t dio_reset_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ struct seq_file *s = file->private_data;
+ struct hl_debugfs_entry *entry = s->private;
+ struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ struct hl_device *hdev = dev_entry->hdev;
+ char kbuf[8];
+ unsigned long val;
+ int rc;
+
+ if (!hdev->asic_prop.supports_nvme)
+ return -EOPNOTSUPP;
+
+ if (count >= sizeof(kbuf))
+ return -EINVAL;
+
+ if (copy_from_user(kbuf, buf, count))
+ return -EFAULT;
+
+ kbuf[count] = 0;
+
+ rc = kstrtoul(kbuf, 0, &val);
+ if (rc)
+ return rc;
+
+ if (val == 1) {
+ memset(&dev_entry->dio_stats, 0, sizeof(dev_entry->dio_stats));
+ dev_dbg(hdev->dev, "DIO statistics reset\\n");
+ } else {
+ dev_err(hdev->dev, "Write '1' to reset statistics\\n");
+ return -EINVAL;
+ }
+
+ return count;
+}
+#endif
+
static ssize_t hl_memory_scrub(struct file *f, const char __user *buf,
size_t count, loff_t *ppos)
{
@@ -788,6 +981,113 @@ static void hl_access_host_mem(struct hl_device *hdev, u64 addr, u64 *val,
}
}
+static void dump_cfg_access_entry(struct hl_device *hdev,
+ struct hl_debugfs_cfg_access_entry *entry)
+{
+ char *access_type = "";
+ struct tm tm;
+
+ switch (entry->debugfs_type) {
+ case DEBUGFS_READ32:
+ access_type = "READ32 from";
+ break;
+ case DEBUGFS_WRITE32:
+ access_type = "WRITE32 to";
+ break;
+ case DEBUGFS_READ64:
+ access_type = "READ64 from";
+ break;
+ case DEBUGFS_WRITE64:
+ access_type = "WRITE64 to";
+ break;
+ default:
+ dev_err(hdev->dev, "Invalid DEBUGFS access type (%u)\n", entry->debugfs_type);
+ return;
+ }
+
+ time64_to_tm(entry->seconds_since_epoch, 0, &tm);
+ dev_info(hdev->dev,
+ "%ld-%02d-%02d %02d:%02d:%02d (UTC): %s %#llx\n", tm.tm_year + 1900, tm.tm_mon + 1,
+ tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec, access_type, entry->addr);
+}
+
+void hl_debugfs_cfg_access_history_dump(struct hl_device *hdev)
+{
+ struct hl_debugfs_cfg_access *dbgfs = &hdev->debugfs_cfg_accesses;
+ u32 i, head, count = 0;
+ time64_t entry_time, now;
+ unsigned long flags;
+
+ now = ktime_get_real_seconds();
+
+ spin_lock_irqsave(&dbgfs->lock, flags);
+ head = dbgfs->head;
+ if (head == 0)
+ i = HL_DBGFS_CFG_ACCESS_HIST_LEN - 1;
+ else
+ i = head - 1;
+
+ /* Walk back until timeout or invalid entry */
+ while (dbgfs->cfg_access_list[i].valid) {
+ entry_time = dbgfs->cfg_access_list[i].seconds_since_epoch;
+ /* Stop when entry is older than timeout */
+ if (now - entry_time > HL_DBGFS_CFG_ACCESS_HIST_TIMEOUT_SEC)
+ break;
+
+ /* print single entry under lock */
+ {
+ struct hl_debugfs_cfg_access_entry entry = dbgfs->cfg_access_list[i];
+ /*
+ * We copy the entry out under lock and then print after
+ * releasing the lock to minimize time under lock.
+ */
+ spin_unlock_irqrestore(&dbgfs->lock, flags);
+ dump_cfg_access_entry(hdev, &entry);
+ spin_lock_irqsave(&dbgfs->lock, flags);
+ }
+
+ /* mark consumed */
+ dbgfs->cfg_access_list[i].valid = false;
+
+ if (i == 0)
+ i = HL_DBGFS_CFG_ACCESS_HIST_LEN - 1;
+ else
+ i--;
+ count++;
+ if (count >= HL_DBGFS_CFG_ACCESS_HIST_LEN)
+ break;
+ }
+ spin_unlock_irqrestore(&dbgfs->lock, flags);
+}
+
+static void check_if_cfg_access_and_log(struct hl_device *hdev, u64 addr, size_t access_size,
+ enum debugfs_access_type access_type)
+{
+ struct hl_debugfs_cfg_access *dbgfs_cfg_accesses = &hdev->debugfs_cfg_accesses;
+ struct pci_mem_region *mem_reg = &hdev->pci_mem_region[PCI_REGION_CFG];
+ struct hl_debugfs_cfg_access_entry *new_entry;
+ unsigned long flags;
+
+ /* Check if address is in config memory */
+ if (addr >= mem_reg->region_base &&
+ mem_reg->region_size >= access_size &&
+ addr <= mem_reg->region_base + mem_reg->region_size - access_size) {
+
+ spin_lock_irqsave(&dbgfs_cfg_accesses->lock, flags);
+
+ new_entry = &dbgfs_cfg_accesses->cfg_access_list[dbgfs_cfg_accesses->head];
+ new_entry->seconds_since_epoch = ktime_get_real_seconds();
+ new_entry->addr = addr;
+ new_entry->debugfs_type = access_type;
+ new_entry->valid = true;
+ dbgfs_cfg_accesses->head = (dbgfs_cfg_accesses->head + 1)
+ % HL_DBGFS_CFG_ACCESS_HIST_LEN;
+
+ spin_unlock_irqrestore(&dbgfs_cfg_accesses->lock, flags);
+
+ }
+}
+
static int hl_access_mem(struct hl_device *hdev, u64 addr, u64 *val,
enum debugfs_access_type acc_type)
{
@@ -805,6 +1105,7 @@ static int hl_access_mem(struct hl_device *hdev, u64 addr, u64 *val,
return rc;
}
+ check_if_cfg_access_and_log(hdev, addr, acc_size, acc_type);
rc = hl_access_dev_mem_by_region(hdev, addr, val, acc_type, &found);
if (rc) {
dev_err(hdev->dev,
@@ -1403,7 +1704,7 @@ static ssize_t hl_timeout_locked_write(struct file *f, const char __user *buf,
return rc;
if (value)
- hdev->timeout_jiffies = msecs_to_jiffies(value * 1000);
+ hdev->timeout_jiffies = secs_to_jiffies(value);
else
hdev->timeout_jiffies = MAX_SCHEDULE_TIMEOUT;
@@ -1525,6 +1826,13 @@ static const struct hl_info_list hl_debugfs_list[] = {
{"mmu", mmu_show, mmu_asid_va_write},
{"mmu_error", mmu_ack_error, mmu_ack_error_value_write},
{"engines", engines_show, NULL},
+#ifdef CONFIG_HL_HLDIO
+ /* DIO entries - only created if NVMe is supported */
+ {"dio_ssd2hl", dio_ssd2hl_show, dio_ssd2hl_write},
+ {"dio_stats", dio_stats_show, NULL},
+ {"dio_reset", dio_reset_show, dio_reset_write},
+ {"dio_hl2ssd", dio_hl2ssd_show, dio_hl2ssd_write},
+#endif
};
static int hl_debugfs_open(struct inode *inode, struct file *file)
@@ -1723,6 +2031,11 @@ static void add_files_to_device(struct hl_device *hdev, struct hl_dbg_device_ent
&hdev->asic_prop.server_type);
for (i = 0, entry = dev_entry->entry_arr ; i < count ; i++, entry++) {
+ /* Skip DIO entries if NVMe is not supported */
+ if (strncmp(hl_debugfs_list[i].name, "dio_", 4) == 0 &&
+ !hdev->asic_prop.supports_nvme)
+ continue;
+
debugfs_create_file(hl_debugfs_list[i].name,
0644,
root,
@@ -1762,6 +2075,14 @@ int hl_debugfs_device_init(struct hl_device *hdev)
spin_lock_init(&dev_entry->userptr_spinlock);
mutex_init(&dev_entry->ctx_mem_hash_mutex);
+ spin_lock_init(&hdev->debugfs_cfg_accesses.lock);
+ hdev->debugfs_cfg_accesses.head = 0; /* already zero by alloc but explicit init is fine */
+
+#ifdef CONFIG_HL_HLDIO
+ /* Initialize DIO statistics */
+ memset(&dev_entry->dio_stats, 0, sizeof(dev_entry->dio_stats));
+#endif
+
return 0;
}
@@ -1780,6 +2101,7 @@ void hl_debugfs_device_fini(struct hl_device *hdev)
vfree(entry->state_dump[i]);
kfree(entry->entry_arr);
+
}
void hl_debugfs_add_device(struct hl_device *hdev)
@@ -1792,6 +2114,7 @@ void hl_debugfs_add_device(struct hl_device *hdev)
if (!hdev->asic_prop.fw_security_enabled)
add_secured_nodes(dev_entry, dev_entry->root);
+
}
void hl_debugfs_add_file(struct hl_fpriv *hpriv)
@@ -1924,3 +2247,4 @@ void hl_debugfs_set_state_dump(struct hl_device *hdev, char *data,
up_write(&dev_entry->state_dump_sem);
}
+
diff --git a/drivers/accel/habanalabs/common/device.c b/drivers/accel/habanalabs/common/device.c
index e0cf3b4343bb..999c92d7036e 100644
--- a/drivers/accel/habanalabs/common/device.c
+++ b/drivers/accel/habanalabs/common/device.c
@@ -817,7 +817,7 @@ static void device_hard_reset_pending(struct work_struct *work)
}
queue_delayed_work(hdev->reset_wq, &device_reset_work->reset_work,
- msecs_to_jiffies(HL_PENDING_RESET_PER_SEC * 1000));
+ secs_to_jiffies(HL_PENDING_RESET_PER_SEC));
}
}
@@ -1066,28 +1066,11 @@ static bool is_pci_link_healthy(struct hl_device *hdev)
return (device_id == hdev->pdev->device);
}
-static void stringify_time_of_last_heartbeat(struct hl_device *hdev, char *time_str, size_t size,
- bool is_pq_hb)
-{
- time64_t seconds = is_pq_hb ? hdev->heartbeat_debug_info.last_pq_heartbeat_ts
- : hdev->heartbeat_debug_info.last_eq_heartbeat_ts;
- struct tm tm;
-
- if (!seconds)
- return;
-
- time64_to_tm(seconds, 0, &tm);
-
- snprintf(time_str, size, "%ld-%02d-%02d %02d:%02d:%02d (UTC)",
- tm.tm_year + 1900, tm.tm_mon, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);
-}
-
static bool hl_device_eq_heartbeat_received(struct hl_device *hdev)
{
struct eq_heartbeat_debug_info *heartbeat_debug_info = &hdev->heartbeat_debug_info;
u32 cpu_q_id = heartbeat_debug_info->cpu_queue_id, pq_pi_mask = (HL_QUEUE_LENGTH << 1) - 1;
struct asic_fixed_properties *prop = &hdev->asic_prop;
- char pq_time_str[64] = "N/A", eq_time_str[64] = "N/A";
if (!prop->cpucp_info.eq_health_check_supported)
return true;
@@ -1095,17 +1078,15 @@ static bool hl_device_eq_heartbeat_received(struct hl_device *hdev)
if (!hdev->eq_heartbeat_received) {
dev_err(hdev->dev, "EQ heartbeat event was not received!\n");
- stringify_time_of_last_heartbeat(hdev, pq_time_str, sizeof(pq_time_str), true);
- stringify_time_of_last_heartbeat(hdev, eq_time_str, sizeof(eq_time_str), false);
dev_err(hdev->dev,
- "EQ: {CI %u, HB counter %u, last HB time: %s}, PQ: {PI: %u, CI: %u (%u), last HB time: %s}\n",
+ "EQ: {CI %u, HB counter %u, last HB time: %ptTs}, PQ: {PI: %u, CI: %u (%u), last HB time: %ptTs}\n",
hdev->event_queue.ci,
heartbeat_debug_info->heartbeat_event_counter,
- eq_time_str,
+ &hdev->heartbeat_debug_info.last_eq_heartbeat_ts,
hdev->kernel_queues[cpu_q_id].pi,
atomic_read(&hdev->kernel_queues[cpu_q_id].ci),
atomic_read(&hdev->kernel_queues[cpu_q_id].ci) & pq_pi_mask,
- pq_time_str);
+ &hdev->heartbeat_debug_info.last_pq_heartbeat_ts);
hl_eq_dump(hdev, &hdev->event_queue);
@@ -1649,6 +1630,11 @@ int hl_device_reset(struct hl_device *hdev, u32 flags)
from_watchdog_thread = !!(flags & HL_DRV_RESET_FROM_WD_THR);
reset_upon_device_release = hdev->reset_upon_device_release && from_dev_release;
+ if (hdev->cpld_shutdown) {
+ dev_err(hdev->dev, "Cannot reset device, cpld is shutdown! Device is NOT usable\n");
+ return -EIO;
+ }
+
if (!hard_reset && (hl_device_status(hdev) == HL_DEVICE_STATUS_MALFUNCTION)) {
dev_dbg(hdev->dev, "soft-reset isn't supported on a malfunctioning device\n");
return 0;
@@ -2091,7 +2077,7 @@ int hl_device_cond_reset(struct hl_device *hdev, u32 flags, u64 event_mask)
dev_dbg(hdev->dev, "Device is going to be hard-reset in %u sec unless being released\n",
hdev->device_release_watchdog_timeout_sec);
schedule_delayed_work(&hdev->device_release_watchdog_work.reset_work,
- msecs_to_jiffies(hdev->device_release_watchdog_timeout_sec * 1000));
+ secs_to_jiffies(hdev->device_release_watchdog_timeout_sec));
hdev->reset_info.watchdog_active = 1;
out:
spin_unlock(&hdev->reset_info.lock);
@@ -2595,6 +2581,14 @@ void hl_device_fini(struct hl_device *hdev)
if (rc)
dev_err(hdev->dev, "hw_fini failed in device fini while removing device %d\n", rc);
+ /* Reset the H/W (if it accessible). It will be in idle state after this returns */
+ if (!hdev->cpld_shutdown) {
+ rc = hdev->asic_funcs->hw_fini(hdev, true, false);
+ if (rc)
+ dev_err(hdev->dev,
+ "hw_fini failed in device fini while removing device %d\n", rc);
+ }
+
hdev->fw_loader.fw_comp_loaded = FW_TYPE_NONE;
/* Release kernel context */
@@ -2962,3 +2956,13 @@ void hl_handle_clk_change_event(struct hl_device *hdev, u16 event_type, u64 *eve
mutex_unlock(&clk_throttle->lock);
}
+
+void hl_eq_cpld_shutdown_event_handle(struct hl_device *hdev, u16 event_id, u64 *event_mask)
+{
+ hl_handle_critical_hw_err(hdev, event_id, event_mask);
+ *event_mask |= HL_NOTIFIER_EVENT_DEVICE_UNAVAILABLE;
+
+ /* Avoid any new accesses to the H/W */
+ hdev->disabled = true;
+ hdev->cpld_shutdown = true;
+}
diff --git a/drivers/accel/habanalabs/common/habanalabs.h b/drivers/accel/habanalabs/common/habanalabs.h
index 6f27ce4fa01b..d94c2ba22a6a 100644
--- a/drivers/accel/habanalabs/common/habanalabs.h
+++ b/drivers/accel/habanalabs/common/habanalabs.h
@@ -90,7 +90,9 @@ struct hl_fpriv;
#define HL_COMMON_USER_CQ_INTERRUPT_ID 0xFFF
#define HL_COMMON_DEC_INTERRUPT_ID 0xFFE
-#define HL_STATE_DUMP_HIST_LEN 5
+#define HL_STATE_DUMP_HIST_LEN 5
+#define HL_DBGFS_CFG_ACCESS_HIST_LEN 20
+#define HL_DBGFS_CFG_ACCESS_HIST_TIMEOUT_SEC 2 /* 2s */
/* Default value for device reset trigger , an invalid value */
#define HL_RESET_TRIGGER_DEFAULT 0xFF
@@ -702,6 +704,7 @@ struct hl_hints_range {
* @supports_advanced_cpucp_rc: true if new cpucp opcodes are supported.
* @supports_engine_modes: true if changing engines/engine_cores modes is supported.
* @support_dynamic_resereved_fw_size: true if we support dynamic reserved size for fw.
+ * @supports_nvme: indicates whether the asic supports NVMe P2P DMA.
*/
struct asic_fixed_properties {
struct hw_queue_properties *hw_queues_props;
@@ -822,6 +825,7 @@ struct asic_fixed_properties {
u8 supports_advanced_cpucp_rc;
u8 supports_engine_modes;
u8 support_dynamic_resereved_fw_size;
+ u8 supports_nvme;
};
/**
@@ -2274,6 +2278,9 @@ struct hl_vm {
u8 init_done;
};
+#ifdef CONFIG_HL_HLDIO
+#include "hldio.h"
+#endif
/*
* DEBUG, PROFILING STRUCTURE
@@ -2344,7 +2351,6 @@ struct hl_fpriv {
struct mutex ctx_lock;
};
-
/*
* DebugFS
*/
@@ -2372,6 +2378,7 @@ struct hl_debugfs_entry {
struct hl_dbg_device_entry *dev_entry;
};
+
/**
* struct hl_dbg_device_entry - ASIC specific debugfs manager.
* @root: root dentry.
@@ -2403,6 +2410,7 @@ struct hl_debugfs_entry {
* @i2c_addr: generic u8 debugfs file for address value to use in i2c_data_read.
* @i2c_reg: generic u8 debugfs file for register value to use in i2c_data_read.
* @i2c_len: generic u8 debugfs file for length value to use in i2c_data_read.
+ * @dio_stats: Direct I/O statistics
*/
struct hl_dbg_device_entry {
struct dentry *root;
@@ -2434,6 +2442,35 @@ struct hl_dbg_device_entry {
u8 i2c_addr;
u8 i2c_reg;
u8 i2c_len;
+#ifdef CONFIG_HL_HLDIO
+ struct hl_dio_stats dio_stats;
+#endif
+};
+
+/**
+ * struct hl_debugfs_cfg_access_entry - single debugfs config access object, member of
+ * hl_debugfs_cfg_access.
+ * @seconds_since_epoch: seconds since January 1, 1970, used for time comparisons.
+ * @debugfs_type: the debugfs operation requested, can be READ32, WRITE32, READ64 or WRITE64.
+ * @addr: the requested address to access.
+ * @valid: if set, this entry has valid data for dumping at interrupt time.
+ */
+struct hl_debugfs_cfg_access_entry {
+ ktime_t seconds_since_epoch;
+ enum debugfs_access_type debugfs_type;
+ u64 addr;
+ bool valid;
+};
+
+/**
+ * struct hl_debugfs_cfg_access - saves debugfs config region access requests history.
+ * @cfg_access_list: list of objects describing config region access requests.
+ * @head: next valid index to add new entry to in cfg_access_list.
+ */
+struct hl_debugfs_cfg_access {
+ struct hl_debugfs_cfg_access_entry cfg_access_list[HL_DBGFS_CFG_ACCESS_HIST_LEN];
+ u32 head;
+ spinlock_t lock; /* protects head and entries */
};
/**
@@ -3281,6 +3318,7 @@ struct eq_heartbeat_debug_info {
* @hl_chip_info: ASIC's sensors information.
* @device_status_description: device status description.
* @hl_debugfs: device's debugfs manager.
+ * @debugfs_cfg_accesses: list of last debugfs config region accesses.
* @cb_pool: list of pre allocated CBs.
* @cb_pool_lock: protects the CB pool.
* @internal_cb_pool_virt_addr: internal command buffer pool virtual address.
@@ -3305,6 +3343,7 @@ struct eq_heartbeat_debug_info {
* @captured_err_info: holds information about errors.
* @reset_info: holds current device reset information.
* @heartbeat_debug_info: counters used to debug heartbeat failures.
+ * @hldio: describes habanalabs direct storage interaction interface.
* @irq_affinity_mask: mask of available CPU cores for user and decoder interrupt handling.
* @stream_master_qid_arr: pointer to array with QIDs of master streams.
* @fw_inner_major_ver: the major of current loaded preboot inner version.
@@ -3357,6 +3396,7 @@ struct eq_heartbeat_debug_info {
* addresses.
* @is_in_dram_scrub: true if dram scrub operation is on going.
* @disabled: is device disabled.
+ * @cpld_shutdown: is cpld shutdown.
* @late_init_done: is late init stage was done during initialization.
* @hwmon_initialized: is H/W monitor sensors was initialized.
* @reset_on_lockup: true if a reset should be done in case of stuck CS, false
@@ -3461,6 +3501,7 @@ struct hl_device {
struct hwmon_chip_info *hl_chip_info;
struct hl_dbg_device_entry hl_debugfs;
+ struct hl_debugfs_cfg_access debugfs_cfg_accesses;
struct list_head cb_pool;
spinlock_t cb_pool_lock;
@@ -3496,7 +3537,9 @@ struct hl_device {
struct hl_reset_info reset_info;
struct eq_heartbeat_debug_info heartbeat_debug_info;
-
+#ifdef CONFIG_HL_HLDIO
+ struct hl_dio hldio;
+#endif
cpumask_t irq_affinity_mask;
u32 *stream_master_qid_arr;
@@ -3532,6 +3575,7 @@ struct hl_device {
u16 cpu_pci_msb_addr;
u8 is_in_dram_scrub;
u8 disabled;
+ u8 cpld_shutdown;
u8 late_init_done;
u8 hwmon_initialized;
u8 reset_on_lockup;
@@ -4089,6 +4133,7 @@ void hl_init_cpu_for_irq(struct hl_device *hdev);
void hl_set_irq_affinity(struct hl_device *hdev, int irq);
void hl_eq_heartbeat_event_handle(struct hl_device *hdev);
void hl_handle_clk_change_event(struct hl_device *hdev, u16 event_type, u64 *event_mask);
+void hl_eq_cpld_shutdown_event_handle(struct hl_device *hdev, u16 event_id, u64 *event_mask);
#ifdef CONFIG_DEBUG_FS
@@ -4110,6 +4155,7 @@ void hl_debugfs_add_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx);
void hl_debugfs_remove_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx);
void hl_debugfs_set_state_dump(struct hl_device *hdev, char *data,
unsigned long length);
+void hl_debugfs_cfg_access_history_dump(struct hl_device *hdev);
#else
@@ -4185,6 +4231,10 @@ static inline void hl_debugfs_set_state_dump(struct hl_device *hdev,
{
}
+static inline void hl_debugfs_cfg_access_history_dump(struct hl_device *hdev)
+{
+}
+
#endif
/* Security */
diff --git a/drivers/accel/habanalabs/common/habanalabs_drv.c b/drivers/accel/habanalabs/common/habanalabs_drv.c
index 708dfd10f39c..0035748f3228 100644
--- a/drivers/accel/habanalabs/common/habanalabs_drv.c
+++ b/drivers/accel/habanalabs/common/habanalabs_drv.c
@@ -101,7 +101,6 @@ static const struct drm_driver hl_driver = {
.major = LINUX_VERSION_MAJOR,
.minor = LINUX_VERSION_PATCHLEVEL,
.patchlevel = LINUX_VERSION_SUBLEVEL,
- .date = "20190505",
.fops = &hl_fops,
.open = hl_device_open,
@@ -362,8 +361,7 @@ static void fixup_device_params_per_asic(struct hl_device *hdev, int timeout)
* a different default timeout for Gaudi
*/
if (timeout == HL_DEFAULT_TIMEOUT_LOCKED)
- hdev->timeout_jiffies = msecs_to_jiffies(GAUDI_DEFAULT_TIMEOUT_LOCKED *
- MSEC_PER_SEC);
+ hdev->timeout_jiffies = secs_to_jiffies(GAUDI_DEFAULT_TIMEOUT_LOCKED);
hdev->reset_upon_device_release = 0;
break;
@@ -388,7 +386,7 @@ static int fixup_device_params(struct hl_device *hdev)
hdev->fw_comms_poll_interval_usec = HL_FW_STATUS_POLL_INTERVAL_USEC;
if (tmp_timeout)
- hdev->timeout_jiffies = msecs_to_jiffies(tmp_timeout * MSEC_PER_SEC);
+ hdev->timeout_jiffies = secs_to_jiffies(tmp_timeout);
else
hdev->timeout_jiffies = MAX_SCHEDULE_TIMEOUT;
diff --git a/drivers/accel/habanalabs/common/habanalabs_ioctl.c b/drivers/accel/habanalabs/common/habanalabs_ioctl.c
index 1dd6e23172ca..fdfdabc85e54 100644
--- a/drivers/accel/habanalabs/common/habanalabs_ioctl.c
+++ b/drivers/accel/habanalabs/common/habanalabs_ioctl.c
@@ -17,8 +17,6 @@
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
-#include <asm/msr.h>
-
/* make sure there is space for all the signed info */
static_assert(sizeof(struct cpucp_info) <= SEC_DEV_INFO_BUF_SZ);
@@ -963,6 +961,12 @@ static int send_fw_generic_request(struct hl_device *hdev, struct hl_info_args *
case HL_PASSTHROUGH_VERSIONS:
need_input_buff = false;
break;
+ case HL_GET_ERR_COUNTERS_CMD:
+ need_input_buff = true;
+ break;
+ case HL_GET_P_STATE:
+ need_input_buff = false;
+ break;
default:
return -EINVAL;
}
@@ -1279,13 +1283,10 @@ static long _hl_ioctl(struct hl_fpriv *hpriv, unsigned int cmd, unsigned long ar
retcode = -EFAULT;
out_err:
- if (retcode) {
- char task_comm[TASK_COMM_LEN];
-
+ if (retcode)
dev_dbg_ratelimited(dev,
"error in ioctl: pid=%d, comm=\"%s\", cmd=%#010x, nr=%#04x\n",
- task_pid_nr(current), get_task_comm(task_comm, current), cmd, nr);
- }
+ task_pid_nr(current), current->comm, cmd, nr);
if (kdata != stack_kdata)
kfree(kdata);
@@ -1308,11 +1309,9 @@ long hl_ioctl_control(struct file *filep, unsigned int cmd, unsigned long arg)
if (nr == _IOC_NR(DRM_IOCTL_HL_INFO)) {
ioctl = &hl_ioctls_control[nr - HL_COMMAND_START];
} else {
- char task_comm[TASK_COMM_LEN];
-
dev_dbg_ratelimited(hdev->dev_ctrl,
"invalid ioctl: pid=%d, comm=\"%s\", cmd=%#010x, nr=%#04x\n",
- task_pid_nr(current), get_task_comm(task_comm, current), cmd, nr);
+ task_pid_nr(current), current->comm, cmd, nr);
return -ENOTTY;
}
diff --git a/drivers/accel/habanalabs/common/hldio.c b/drivers/accel/habanalabs/common/hldio.c
new file mode 100644
index 000000000000..083ae5610875
--- /dev/null
+++ b/drivers/accel/habanalabs/common/hldio.c
@@ -0,0 +1,437 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2024 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include "habanalabs.h"
+#include "hldio.h"
+#include <generated/uapi/linux/version.h>
+#include <linux/pci-p2pdma.h>
+#include <linux/blkdev.h>
+#include <linux/vmalloc.h>
+
+/*
+ * NVMe Direct I/O implementation for habanalabs driver
+ *
+ * ASSUMPTIONS
+ * ===========
+ * 1. No IOMMU (well, technically it can work with IOMMU, but it is *almost useless).
+ * 2. Only READ operations (can extend in the future).
+ * 3. No sparse files (can overcome this in the future).
+ * 4. Kernel version >= 6.9
+ * 5. Requiring page alignment is OK (I don't see a solution to this one right,
+ * now, how do we read partial pages?)
+ * 6. Kernel compiled with CONFIG_PCI_P2PDMA. This requires a CUSTOM kernel.
+ * Theoretically I have a slight idea on how this could be solvable, but it
+ * is probably inacceptable for the upstream. Also may not work in the end.
+ * 7. Either make sure our cards and disks are under the same PCI bridge, or
+ * compile a custom kernel to hack around this.
+ */
+
+#define IO_STABILIZE_TIMEOUT 10000000 /* 10 seconds in microseconds */
+
+/*
+ * This struct contains all the useful data I could milk out of the file handle
+ * provided by the user.
+ * @TODO: right now it is retrieved on each IO, but can be done once with some
+ * dedicated IOCTL, call it for example HL_REGISTER_HANDLE.
+ */
+struct hl_dio_fd {
+ /* Back pointer in case we need it in async completion */
+ struct hl_ctx *ctx;
+ /* Associated fd struct */
+ struct file *filp;
+};
+
+/*
+ * This is a single IO descriptor
+ */
+struct hl_direct_io {
+ struct hl_dio_fd f;
+ struct kiocb kio;
+ struct bio_vec *bv;
+ struct iov_iter iter;
+ u64 device_va;
+ u64 off_bytes;
+ u64 len_bytes;
+ u32 type;
+};
+
+bool hl_device_supports_nvme(struct hl_device *hdev)
+{
+ return hdev->asic_prop.supports_nvme;
+}
+
+static int hl_dio_fd_register(struct hl_ctx *ctx, int fd, struct hl_dio_fd *f)
+{
+ struct hl_device *hdev = ctx->hdev;
+ struct block_device *bd;
+ struct super_block *sb;
+ struct inode *inode;
+ struct gendisk *gd;
+ struct device *disk_dev;
+ int rc;
+
+ f->filp = fget(fd);
+ if (!f->filp) {
+ rc = -ENOENT;
+ goto out;
+ }
+
+ if (!(f->filp->f_flags & O_DIRECT)) {
+ dev_err(hdev->dev, "file is not in the direct mode\n");
+ rc = -EINVAL;
+ goto fput;
+ }
+
+ if (!f->filp->f_op->read_iter) {
+ dev_err(hdev->dev, "read iter is not supported, need to fall back to legacy\n");
+ rc = -EINVAL;
+ goto fput;
+ }
+
+ inode = file_inode(f->filp);
+ sb = inode->i_sb;
+ bd = sb->s_bdev;
+ gd = bd->bd_disk;
+
+ if (inode->i_blocks << sb->s_blocksize_bits < i_size_read(inode)) {
+ dev_err(hdev->dev, "sparse files are not currently supported\n");
+ rc = -EINVAL;
+ goto fput;
+ }
+
+ if (!bd || !gd) {
+ dev_err(hdev->dev, "invalid block device\n");
+ rc = -ENODEV;
+ goto fput;
+ }
+ /* Get the underlying device from the block device */
+ disk_dev = disk_to_dev(gd);
+ if (!dma_pci_p2pdma_supported(disk_dev)) {
+ dev_err(hdev->dev, "device does not support PCI P2P DMA\n");
+ rc = -EOPNOTSUPP;
+ goto fput;
+ }
+
+ /*
+ * @TODO: Maybe we need additional checks here
+ */
+
+ f->ctx = ctx;
+ rc = 0;
+
+ goto out;
+fput:
+ fput(f->filp);
+out:
+ return rc;
+}
+
+static void hl_dio_fd_unregister(struct hl_dio_fd *f)
+{
+ fput(f->filp);
+}
+
+static long hl_dio_count_io(struct hl_device *hdev)
+{
+ s64 sum = 0;
+ int i;
+
+ for_each_possible_cpu(i)
+ sum += per_cpu(*hdev->hldio.inflight_ios, i);
+
+ return sum;
+}
+
+static bool hl_dio_get_iopath(struct hl_ctx *ctx)
+{
+ struct hl_device *hdev = ctx->hdev;
+
+ if (hdev->hldio.io_enabled) {
+ this_cpu_inc(*hdev->hldio.inflight_ios);
+
+ /* Avoid race conditions */
+ if (!hdev->hldio.io_enabled) {
+ this_cpu_dec(*hdev->hldio.inflight_ios);
+ return false;
+ }
+
+ hl_ctx_get(ctx);
+
+ return true;
+ }
+
+ return false;
+}
+
+static void hl_dio_put_iopath(struct hl_ctx *ctx)
+{
+ struct hl_device *hdev = ctx->hdev;
+
+ hl_ctx_put(ctx);
+ this_cpu_dec(*hdev->hldio.inflight_ios);
+}
+
+static void hl_dio_set_io_enabled(struct hl_device *hdev, bool enabled)
+{
+ hdev->hldio.io_enabled = enabled;
+}
+
+static bool hl_dio_validate_io(struct hl_device *hdev, struct hl_direct_io *io)
+{
+ if ((u64)io->device_va & ~PAGE_MASK) {
+ dev_dbg(hdev->dev, "device address must be 4K aligned\n");
+ return false;
+ }
+
+ if (io->len_bytes & ~PAGE_MASK) {
+ dev_dbg(hdev->dev, "IO length must be 4K aligned\n");
+ return false;
+ }
+
+ if (io->off_bytes & ~PAGE_MASK) {
+ dev_dbg(hdev->dev, "IO offset must be 4K aligned\n");
+ return false;
+ }
+
+ return true;
+}
+
+static struct page *hl_dio_va2page(struct hl_device *hdev, struct hl_ctx *ctx, u64 device_va)
+{
+ struct hl_dio *hldio = &hdev->hldio;
+ u64 device_pa;
+ int rc, i;
+
+ rc = hl_mmu_va_to_pa(ctx, device_va, &device_pa);
+ if (rc) {
+ dev_err(hdev->dev, "device virtual address translation error: %#llx (%d)",
+ device_va, rc);
+ return NULL;
+ }
+
+ for (i = 0 ; i < hldio->np2prs ; ++i) {
+ if (device_pa >= hldio->p2prs[i].device_pa &&
+ device_pa < hldio->p2prs[i].device_pa + hldio->p2prs[i].size)
+ return hldio->p2prs[i].p2ppages[(device_pa - hldio->p2prs[i].device_pa) >>
+ PAGE_SHIFT];
+ }
+
+ return NULL;
+}
+
+static ssize_t hl_direct_io(struct hl_device *hdev, struct hl_direct_io *io)
+{
+ u64 npages, device_va;
+ ssize_t rc;
+ int i;
+
+ if (!hl_dio_validate_io(hdev, io))
+ return -EINVAL;
+
+ if (!hl_dio_get_iopath(io->f.ctx)) {
+ dev_info(hdev->dev, "can't schedule a new IO, IO is disabled\n");
+ return -ESHUTDOWN;
+ }
+
+ init_sync_kiocb(&io->kio, io->f.filp);
+ io->kio.ki_pos = io->off_bytes;
+
+ npages = (io->len_bytes >> PAGE_SHIFT);
+
+ /* @TODO: this can be implemented smarter, vmalloc in iopath is not
+ * ideal. Maybe some variation of genpool. Number of pages may differ
+ * greatly, so maybe even use pools of different sizes and chose the
+ * closest one.
+ */
+ io->bv = vzalloc(npages * sizeof(struct bio_vec));
+ if (!io->bv)
+ return -ENOMEM;
+
+ for (i = 0, device_va = io->device_va; i < npages ; ++i, device_va += PAGE_SIZE) {
+ io->bv[i].bv_page = hl_dio_va2page(hdev, io->f.ctx, device_va);
+ if (!io->bv[i].bv_page) {
+ dev_err(hdev->dev, "error getting page struct for device va %#llx",
+ device_va);
+ rc = -EFAULT;
+ goto cleanup;
+ }
+ io->bv[i].bv_offset = 0;
+ io->bv[i].bv_len = PAGE_SIZE;
+ }
+
+ iov_iter_bvec(&io->iter, io->type, io->bv, 1, io->len_bytes);
+ if (io->f.filp->f_op && io->f.filp->f_op->read_iter)
+ rc = io->f.filp->f_op->read_iter(&io->kio, &io->iter);
+ else
+ rc = -EINVAL;
+
+cleanup:
+ vfree(io->bv);
+ hl_dio_put_iopath(io->f.ctx);
+
+ dev_dbg(hdev->dev, "IO ended with %ld\n", rc);
+
+ return rc;
+}
+
+/*
+ * @TODO: This function can be used as a callback for io completion under
+ * kio->ki_complete in order to implement async IO.
+ * Note that on more recent kernels there is no ret2.
+ */
+__maybe_unused static void hl_direct_io_complete(struct kiocb *kio, long ret, long ret2)
+{
+ struct hl_direct_io *io = container_of(kio, struct hl_direct_io, kio);
+
+ dev_dbg(io->f.ctx->hdev->dev, "IO completed with %ld\n", ret);
+
+ /* Do something to copy result to user / notify completion */
+
+ hl_dio_put_iopath(io->f.ctx);
+
+ hl_dio_fd_unregister(&io->f);
+}
+
+/*
+ * DMA disk to ASIC, wait for results. Must be invoked from the user context
+ */
+int hl_dio_ssd2hl(struct hl_device *hdev, struct hl_ctx *ctx, int fd,
+ u64 device_va, off_t off_bytes, size_t len_bytes,
+ size_t *len_read)
+{
+ struct hl_direct_io *io;
+ ssize_t rc;
+
+ dev_dbg(hdev->dev, "SSD2HL fd=%d va=%#llx len=%#lx\n", fd, device_va, len_bytes);
+
+ io = kzalloc(sizeof(*io), GFP_KERNEL);
+ if (!io) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ *io = (struct hl_direct_io){
+ .device_va = device_va,
+ .len_bytes = len_bytes,
+ .off_bytes = off_bytes,
+ .type = READ,
+ };
+
+ rc = hl_dio_fd_register(ctx, fd, &io->f);
+ if (rc)
+ goto kfree_io;
+
+ rc = hl_direct_io(hdev, io);
+ if (rc >= 0) {
+ *len_read = rc;
+ rc = 0;
+ }
+
+ /* This shall be called only in the case of a sync IO */
+ hl_dio_fd_unregister(&io->f);
+kfree_io:
+ kfree(io);
+out:
+ return rc;
+}
+
+static void hl_p2p_region_fini(struct hl_device *hdev, struct hl_p2p_region *p2pr)
+{
+ if (p2pr->p2ppages) {
+ vfree(p2pr->p2ppages);
+ p2pr->p2ppages = NULL;
+ }
+
+ if (p2pr->p2pmem) {
+ dev_dbg(hdev->dev, "freeing P2P mem from %p, size=%#llx\n",
+ p2pr->p2pmem, p2pr->size);
+ pci_free_p2pmem(hdev->pdev, p2pr->p2pmem, p2pr->size);
+ p2pr->p2pmem = NULL;
+ }
+}
+
+void hl_p2p_region_fini_all(struct hl_device *hdev)
+{
+ int i;
+
+ for (i = 0 ; i < hdev->hldio.np2prs ; ++i)
+ hl_p2p_region_fini(hdev, &hdev->hldio.p2prs[i]);
+
+ kvfree(hdev->hldio.p2prs);
+ hdev->hldio.p2prs = NULL;
+ hdev->hldio.np2prs = 0;
+}
+
+int hl_p2p_region_init(struct hl_device *hdev, struct hl_p2p_region *p2pr)
+{
+ void *addr;
+ int rc, i;
+
+ /* Start by publishing our p2p memory */
+ rc = pci_p2pdma_add_resource(hdev->pdev, p2pr->bar, p2pr->size, p2pr->bar_offset);
+ if (rc) {
+ dev_err(hdev->dev, "error adding p2p resource: %d\n", rc);
+ goto err;
+ }
+
+ /* Alloc all p2p mem */
+ p2pr->p2pmem = pci_alloc_p2pmem(hdev->pdev, p2pr->size);
+ if (!p2pr->p2pmem) {
+ dev_err(hdev->dev, "error allocating p2p memory\n");
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ p2pr->p2ppages = vmalloc((p2pr->size >> PAGE_SHIFT) * sizeof(struct page *));
+ if (!p2pr->p2ppages) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ for (i = 0, addr = p2pr->p2pmem ; i < (p2pr->size >> PAGE_SHIFT) ; ++i, addr += PAGE_SIZE) {
+ p2pr->p2ppages[i] = virt_to_page(addr);
+ if (!p2pr->p2ppages[i]) {
+ rc = -EFAULT;
+ goto err;
+ }
+ }
+
+ return 0;
+err:
+ hl_p2p_region_fini(hdev, p2pr);
+ return rc;
+}
+
+int hl_dio_start(struct hl_device *hdev)
+{
+ dev_dbg(hdev->dev, "initializing HLDIO\n");
+
+ /* Initialize the IO counter and enable IO */
+ hdev->hldio.inflight_ios = alloc_percpu(s64);
+ if (!hdev->hldio.inflight_ios)
+ return -ENOMEM;
+
+ hl_dio_set_io_enabled(hdev, true);
+
+ return 0;
+}
+
+void hl_dio_stop(struct hl_device *hdev)
+{
+ dev_dbg(hdev->dev, "deinitializing HLDIO\n");
+
+ if (hdev->hldio.io_enabled) {
+ /* Wait for all the IO to finish */
+ hl_dio_set_io_enabled(hdev, false);
+ hl_poll_timeout_condition(hdev, !hl_dio_count_io(hdev), 1000, IO_STABILIZE_TIMEOUT);
+ }
+
+ if (hdev->hldio.inflight_ios) {
+ free_percpu(hdev->hldio.inflight_ios);
+ hdev->hldio.inflight_ios = NULL;
+ }
+}
diff --git a/drivers/accel/habanalabs/common/hldio.h b/drivers/accel/habanalabs/common/hldio.h
new file mode 100644
index 000000000000..2874388f2851
--- /dev/null
+++ b/drivers/accel/habanalabs/common/hldio.h
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * hldio.h - NVMe Direct I/O (HLDIO) infrastructure for Habana Labs Driver
+ *
+ * This feature requires specific hardware setup and must not be built
+ * under COMPILE_TEST.
+ */
+
+#ifndef __HL_HLDIO_H__
+#define __HL_HLDIO_H__
+
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/seq_file.h>
+#include <linux/ktime.h> /* ktime functions */
+#include <linux/delay.h> /* usleep_range */
+#include <linux/kernel.h> /* might_sleep_if */
+#include <linux/errno.h> /* error codes */
+
+/* Forward declarations */
+struct hl_device;
+struct file;
+
+/* Enable only if Kconfig selected */
+#ifdef CONFIG_HL_HLDIO
+/**
+ * struct hl_p2p_region - describes a single P2P memory region
+ * @p2ppages: array of page structs for the P2P memory
+ * @p2pmem: virtual address of the P2P memory region
+ * @device_pa: physical address on the device
+ * @bar_offset: offset within the BAR
+ * @size: size of the region in bytes
+ * @bar: BAR number containing this region
+ */
+struct hl_p2p_region {
+ struct page **p2ppages;
+ void *p2pmem;
+ u64 device_pa;
+ u64 bar_offset;
+ u64 size;
+ int bar;
+};
+
+/**
+ * struct hl_dio_stats - Direct I/O statistics
+ * @total_ops: total number of operations attempted
+ * @successful_ops: number of successful operations
+ * @failed_ops: number of failed operations
+ * @bytes_transferred: total bytes successfully transferred
+ * @last_len_read: length of the last read operation
+ */
+struct hl_dio_stats {
+ u64 total_ops;
+ u64 successful_ops;
+ u64 failed_ops;
+ u64 bytes_transferred;
+ size_t last_len_read;
+};
+
+/**
+ * struct hl_dio - describes habanalabs direct storage interaction interface
+ * @p2prs: array of p2p regions
+ * @inflight_ios: percpu counter for inflight ios
+ * @np2prs: number of elements in p2prs
+ * @io_enabled: 1 if io is enabled 0 otherwise
+ */
+struct hl_dio {
+ struct hl_p2p_region *p2prs;
+ s64 __percpu *inflight_ios;
+ u8 np2prs;
+ u8 io_enabled;
+};
+
+int hl_dio_ssd2hl(struct hl_device *hdev, struct hl_ctx *ctx, int fd,
+ u64 device_va, off_t off_bytes, size_t len_bytes,
+ size_t *len_read);
+void hl_p2p_region_fini_all(struct hl_device *hdev);
+int hl_p2p_region_init(struct hl_device *hdev, struct hl_p2p_region *p2pr);
+int hl_dio_start(struct hl_device *hdev);
+void hl_dio_stop(struct hl_device *hdev);
+
+/* Init/teardown */
+int hl_hldio_init(struct hl_device *hdev);
+void hl_hldio_fini(struct hl_device *hdev);
+
+/* File operations */
+long hl_hldio_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
+
+/* DebugFS hooks */
+#ifdef CONFIG_DEBUG_FS
+void hl_hldio_debugfs_init(struct hl_device *hdev);
+void hl_hldio_debugfs_fini(struct hl_device *hdev);
+#else
+static inline void hl_hldio_debugfs_init(struct hl_device *hdev) { }
+static inline void hl_hldio_debugfs_fini(struct hl_device *hdev) { }
+#endif
+
+#else /* !CONFIG_HL_HLDIO */
+
+struct hl_p2p_region;
+/* Stubs when HLDIO is disabled */
+static inline int hl_dio_ssd2hl(struct hl_device *hdev, struct hl_ctx *ctx, int fd,
+ u64 device_va, off_t off_bytes, size_t len_bytes,
+ size_t *len_read)
+{ return -EOPNOTSUPP; }
+static inline void hl_p2p_region_fini_all(struct hl_device *hdev) {}
+static inline int hl_p2p_region_init(struct hl_device *hdev, struct hl_p2p_region *p2pr)
+{ return -EOPNOTSUPP; }
+static inline int hl_dio_start(struct hl_device *hdev) { return -EOPNOTSUPP; }
+static inline void hl_dio_stop(struct hl_device *hdev) {}
+
+static inline int hl_hldio_init(struct hl_device *hdev) { return 0; }
+static inline void hl_hldio_fini(struct hl_device *hdev) { }
+static inline long hl_hldio_ioctl(struct file *f, unsigned int c,
+ unsigned long a)
+{ return -ENOTTY; }
+static inline void hl_hldio_debugfs_init(struct hl_device *hdev) { }
+static inline void hl_hldio_debugfs_fini(struct hl_device *hdev) { }
+
+#endif /* CONFIG_HL_HLDIO */
+
+/* Simplified polling macro for HLDIO (no simulator support) */
+#define hl_poll_timeout_condition(hdev, cond, sleep_us, timeout_us) \
+({ \
+ ktime_t __timeout = ktime_add_us(ktime_get(), timeout_us); \
+ might_sleep_if(sleep_us); \
+ (void)(hdev); /* keep signature consistent, hdev unused */ \
+ for (;;) { \
+ mb(); /* ensure ordering of memory operations */ \
+ if (cond) \
+ break; \
+ if (timeout_us && ktime_compare(ktime_get(), __timeout) > 0) \
+ break; \
+ if (sleep_us) \
+ usleep_range((sleep_us >> 2) + 1, sleep_us); \
+ } \
+ (cond) ? 0 : -ETIMEDOUT; \
+})
+
+#ifdef CONFIG_HL_HLDIO
+bool hl_device_supports_nvme(struct hl_device *hdev);
+#else
+static inline bool hl_device_supports_nvme(struct hl_device *hdev) { return false; }
+#endif
+
+#endif /* __HL_HLDIO_H__ */
diff --git a/drivers/accel/habanalabs/common/memory.c b/drivers/accel/habanalabs/common/memory.c
index 3348ad12c237..633db4bff46f 100644
--- a/drivers/accel/habanalabs/common/memory.c
+++ b/drivers/accel/habanalabs/common/memory.c
@@ -14,7 +14,7 @@
#include <linux/vmalloc.h>
#include <linux/pci-p2pdma.h>
-MODULE_IMPORT_NS(DMA_BUF);
+MODULE_IMPORT_NS("DMA_BUF");
#define HL_MMU_DEBUG 0
@@ -1829,9 +1829,6 @@ static void hl_release_dmabuf(struct dma_buf *dmabuf)
struct hl_dmabuf_priv *hl_dmabuf = dmabuf->priv;
struct hl_ctx *ctx;
- if (!hl_dmabuf)
- return;
-
ctx = hl_dmabuf->ctx;
if (hl_dmabuf->memhash_hnode)
@@ -1840,7 +1837,12 @@ static void hl_release_dmabuf(struct dma_buf *dmabuf)
atomic_dec(&ctx->hdev->dmabuf_export_cnt);
hl_ctx_put(ctx);
- /* Paired with get_file() in export_dmabuf() */
+ /*
+ * Paired with get_file() in export_dmabuf().
+ * 'ctx' can be still used here to get the file pointer, even after hl_ctx_put() was called,
+ * because releasing the compute device file involves another reference decrement, and it
+ * would be possible only after calling fput().
+ */
fput(ctx->hpriv->file_priv->filp);
kfree(hl_dmabuf);
@@ -1859,7 +1861,12 @@ static int export_dmabuf(struct hl_ctx *ctx,
{
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
struct hl_device *hdev = ctx->hdev;
- int rc, fd;
+ CLASS(get_unused_fd, fd)(flags);
+
+ if (fd < 0) {
+ dev_err(hdev->dev, "failed to get a file descriptor for a dma-buf, %d\n", fd);
+ return fd;
+ }
exp_info.ops = &habanalabs_dmabuf_ops;
exp_info.size = total_size;
@@ -1872,13 +1879,6 @@ static int export_dmabuf(struct hl_ctx *ctx,
return PTR_ERR(hl_dmabuf->dmabuf);
}
- fd = dma_buf_fd(hl_dmabuf->dmabuf, flags);
- if (fd < 0) {
- dev_err(hdev->dev, "failed to get a file descriptor for a dma-buf, %d\n", fd);
- rc = fd;
- goto err_dma_buf_put;
- }
-
hl_dmabuf->ctx = ctx;
hl_ctx_get(hl_dmabuf->ctx);
atomic_inc(&ctx->hdev->dmabuf_export_cnt);
@@ -1890,13 +1890,9 @@ static int export_dmabuf(struct hl_ctx *ctx,
get_file(ctx->hpriv->file_priv->filp);
*dmabuf_fd = fd;
+ fd_install(take_fd(fd), hl_dmabuf->dmabuf->file);
return 0;
-
-err_dma_buf_put:
- hl_dmabuf->dmabuf->priv = NULL;
- dma_buf_put(hl_dmabuf->dmabuf);
- return rc;
}
static int validate_export_params_common(struct hl_device *hdev, u64 addr, u64 size, u64 offset)
@@ -2341,7 +2337,7 @@ static int get_user_memory(struct hl_device *hdev, u64 addr, u64 size,
if (rc < 0)
goto destroy_pages;
npages = rc;
- rc = -EFAULT;
+ rc = -ENOMEM;
goto put_pages;
}
userptr->npages = npages;
diff --git a/drivers/accel/habanalabs/common/memory_mgr.c b/drivers/accel/habanalabs/common/memory_mgr.c
index 99cd83139d46..4401beb99e42 100644
--- a/drivers/accel/habanalabs/common/memory_mgr.c
+++ b/drivers/accel/habanalabs/common/memory_mgr.c
@@ -259,13 +259,8 @@ int hl_mem_mgr_mmap(struct hl_mem_mgr *mmg, struct vm_area_struct *vma,
goto put_mem;
}
-#ifdef _HAS_TYPE_ARG_IN_ACCESS_OK
- if (!access_ok(VERIFY_WRITE, (void __user *)(uintptr_t)vma->vm_start,
- user_mem_size)) {
-#else
if (!access_ok((void __user *)(uintptr_t)vma->vm_start,
user_mem_size)) {
-#endif
dev_err(mmg->dev, "%s: User pointer is invalid - 0x%lx\n",
buf->behavior->topic, vma->vm_start);
diff --git a/drivers/accel/habanalabs/common/sysfs.c b/drivers/accel/habanalabs/common/sysfs.c
index e9f8ccc0bbf9..8f55ba3b4e73 100644
--- a/drivers/accel/habanalabs/common/sysfs.c
+++ b/drivers/accel/habanalabs/common/sysfs.c
@@ -96,14 +96,21 @@ static ssize_t vrm_ver_show(struct device *dev, struct device_attribute *attr, c
infineon_second_stage_third_instance =
(infineon_second_stage_version >> 16) & mask;
- if (cpucp_info->infineon_second_stage_version)
+ if (cpucp_info->infineon_version && cpucp_info->infineon_second_stage_version)
return sprintf(buf, "%#04x %#04x:%#04x:%#04x\n",
le32_to_cpu(cpucp_info->infineon_version),
infineon_second_stage_first_instance,
infineon_second_stage_second_instance,
infineon_second_stage_third_instance);
- else
+ else if (cpucp_info->infineon_second_stage_version)
+ return sprintf(buf, "%#04x:%#04x:%#04x\n",
+ infineon_second_stage_first_instance,
+ infineon_second_stage_second_instance,
+ infineon_second_stage_third_instance);
+ else if (cpucp_info->infineon_version)
return sprintf(buf, "%#04x\n", le32_to_cpu(cpucp_info->infineon_version));
+
+ return 0;
}
static DEVICE_ATTR_RO(vrm_ver);
@@ -368,7 +375,7 @@ out:
}
static ssize_t eeprom_read_handler(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf, loff_t offset,
+ const struct bin_attribute *attr, char *buf, loff_t offset,
size_t max_size)
{
struct device *dev = kobj_to_dev(kobj);
@@ -443,7 +450,7 @@ static DEVICE_ATTR_RO(security_enabled);
static DEVICE_ATTR_RO(module_id);
static DEVICE_ATTR_RO(parent_device);
-static struct bin_attribute bin_attr_eeprom = {
+static const struct bin_attribute bin_attr_eeprom = {
.attr = {.name = "eeprom", .mode = (0444)},
.size = PAGE_SIZE,
.read = eeprom_read_handler
@@ -472,7 +479,7 @@ static struct attribute *hl_dev_attrs[] = {
NULL,
};
-static struct bin_attribute *hl_dev_bin_attrs[] = {
+static const struct bin_attribute *const hl_dev_bin_attrs[] = {
&bin_attr_eeprom,
NULL
};
diff --git a/drivers/accel/habanalabs/gaudi/gaudi.c b/drivers/accel/habanalabs/gaudi/gaudi.c
index fa893a9b826e..34771d75da9d 100644
--- a/drivers/accel/habanalabs/gaudi/gaudi.c
+++ b/drivers/accel/habanalabs/gaudi/gaudi.c
@@ -4168,10 +4168,29 @@ static int gaudi_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
VM_DONTCOPY | VM_NORESERVE);
+#ifdef _HAS_DMA_MMAP_COHERENT
+ /*
+ * If dma_alloc_coherent() returns a vmalloc address, set VM_MIXEDMAP
+ * so vm_insert_page() can handle it safely. Without this, the kernel
+ * may BUG_ON due to VM_PFNMAP.
+ */
+ if (is_vmalloc_addr(cpu_addr))
+ vm_flags_set(vma, VM_MIXEDMAP);
+
rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr,
(dma_addr - HOST_PHYS_BASE), size);
if (rc)
dev_err(hdev->dev, "dma_mmap_coherent error %d", rc);
+#else
+
+ rc = remap_pfn_range(vma, vma->vm_start,
+ virt_to_phys(cpu_addr) >> PAGE_SHIFT,
+ size, vma->vm_page_prot);
+ if (rc)
+ dev_err(hdev->dev, "remap_pfn_range error %d", rc);
+
+ #endif
+
return rc;
}
diff --git a/drivers/accel/habanalabs/gaudi2/gaudi2.c b/drivers/accel/habanalabs/gaudi2/gaudi2.c
index a38b88baadf2..b8c0689dba64 100644
--- a/drivers/accel/habanalabs/gaudi2/gaudi2.c
+++ b/drivers/accel/habanalabs/gaudi2/gaudi2.c
@@ -728,6 +728,354 @@ static const int gaudi2_dma_core_async_event_id[] = {
[DMA_CORE_ID_KDMA] = GAUDI2_EVENT_KDMA0_CORE,
};
+const char *gaudi2_engine_id_str[] = {
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_EDMA_0),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_EDMA_1),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_MME),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_TPC_0),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_TPC_1),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_TPC_2),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_TPC_3),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_TPC_4),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_TPC_5),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_DEC_0),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_DEC_1),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_EDMA_0),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_EDMA_1),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_MME),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_TPC_0),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_TPC_1),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_TPC_2),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_TPC_3),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_TPC_4),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_TPC_5),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_DEC_0),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_DEC_1),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_EDMA_0),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_EDMA_1),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_MME),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_TPC_0),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_TPC_1),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_TPC_2),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_TPC_3),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_TPC_4),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_TPC_5),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_DEC_0),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_DEC_1),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_EDMA_0),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_EDMA_1),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_MME),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_TPC_0),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_TPC_1),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_TPC_2),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_TPC_3),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_TPC_4),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_TPC_5),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_DEC_0),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_DEC_1),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_TPC_6),
+ __stringify(GAUDI2_ENGINE_ID_PDMA_0),
+ __stringify(GAUDI2_ENGINE_ID_PDMA_1),
+ __stringify(GAUDI2_ENGINE_ID_ROT_0),
+ __stringify(GAUDI2_ENGINE_ID_ROT_1),
+ __stringify(GAUDI2_PCIE_ENGINE_ID_DEC_0),
+ __stringify(GAUDI2_PCIE_ENGINE_ID_DEC_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC0_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC0_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC1_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC1_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC2_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC2_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC3_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC3_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC4_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC4_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC5_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC5_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC6_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC6_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC7_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC7_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC8_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC8_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC9_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC9_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC10_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC10_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC11_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC11_1),
+ __stringify(GAUDI2_ENGINE_ID_PCIE),
+ __stringify(GAUDI2_ENGINE_ID_PSOC),
+ __stringify(GAUDI2_ENGINE_ID_ARC_FARM),
+ __stringify(GAUDI2_ENGINE_ID_KDMA),
+ __stringify(GAUDI2_ENGINE_ID_SIZE),
+};
+
+const char *gaudi2_queue_id_str[] = {
+ __stringify(GAUDI2_QUEUE_ID_PDMA_0_0),
+ __stringify(GAUDI2_QUEUE_ID_PDMA_0_1),
+ __stringify(GAUDI2_QUEUE_ID_PDMA_0_2),
+ __stringify(GAUDI2_QUEUE_ID_PDMA_0_3),
+ __stringify(GAUDI2_QUEUE_ID_PDMA_1_0),
+ __stringify(GAUDI2_QUEUE_ID_PDMA_1_1),
+ __stringify(GAUDI2_QUEUE_ID_PDMA_1_2),
+ __stringify(GAUDI2_QUEUE_ID_PDMA_1_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_EDMA_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_EDMA_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_EDMA_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_EDMA_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_EDMA_1_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_EDMA_1_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_EDMA_1_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_EDMA_1_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_MME_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_MME_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_MME_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_MME_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_1_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_1_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_1_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_1_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_2_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_2_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_2_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_2_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_3_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_3_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_3_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_3_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_4_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_4_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_4_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_4_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_5_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_5_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_5_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_5_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_6_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_6_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_6_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_6_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_EDMA_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_EDMA_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_EDMA_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_EDMA_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_EDMA_1_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_EDMA_1_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_EDMA_1_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_EDMA_1_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_MME_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_MME_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_MME_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_MME_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_1_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_1_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_1_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_1_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_2_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_2_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_2_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_2_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_3_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_3_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_3_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_3_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_4_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_4_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_4_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_4_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_5_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_5_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_5_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_5_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_EDMA_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_EDMA_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_EDMA_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_EDMA_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_EDMA_1_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_EDMA_1_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_EDMA_1_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_EDMA_1_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_MME_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_MME_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_MME_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_MME_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_1_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_1_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_1_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_1_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_2_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_2_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_2_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_2_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_3_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_3_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_3_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_3_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_4_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_4_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_4_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_4_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_5_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_5_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_5_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_5_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_EDMA_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_EDMA_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_EDMA_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_EDMA_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_EDMA_1_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_EDMA_1_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_EDMA_1_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_EDMA_1_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_MME_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_MME_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_MME_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_MME_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_1_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_1_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_1_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_1_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_2_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_2_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_2_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_2_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_3_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_3_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_3_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_3_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_4_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_4_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_4_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_4_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_5_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_5_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_5_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_5_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_0_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_0_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_0_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_0_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_1_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_1_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_1_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_1_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_2_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_2_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_2_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_2_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_3_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_3_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_3_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_3_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_4_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_4_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_4_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_4_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_5_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_5_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_5_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_5_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_6_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_6_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_6_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_6_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_7_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_7_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_7_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_7_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_8_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_8_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_8_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_8_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_9_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_9_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_9_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_9_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_10_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_10_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_10_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_10_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_11_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_11_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_11_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_11_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_12_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_12_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_12_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_12_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_13_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_13_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_13_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_13_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_14_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_14_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_14_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_14_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_15_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_15_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_15_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_15_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_16_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_16_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_16_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_16_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_17_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_17_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_17_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_17_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_18_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_18_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_18_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_18_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_19_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_19_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_19_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_19_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_20_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_20_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_20_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_20_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_21_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_21_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_21_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_21_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_22_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_22_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_22_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_22_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_23_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_23_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_23_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_23_3),
+ __stringify(GAUDI2_QUEUE_ID_ROT_0_0),
+ __stringify(GAUDI2_QUEUE_ID_ROT_0_1),
+ __stringify(GAUDI2_QUEUE_ID_ROT_0_2),
+ __stringify(GAUDI2_QUEUE_ID_ROT_0_3),
+ __stringify(GAUDI2_QUEUE_ID_ROT_1_0),
+ __stringify(GAUDI2_QUEUE_ID_ROT_1_1),
+ __stringify(GAUDI2_QUEUE_ID_ROT_1_2),
+ __stringify(GAUDI2_QUEUE_ID_ROT_1_3),
+ __stringify(GAUDI2_QUEUE_ID_CPU_PQ),
+ __stringify(GAUDI2_QUEUE_ID_SIZE),
+};
+
static const char * const gaudi2_qm_sei_error_cause[GAUDI2_NUM_OF_QM_SEI_ERR_CAUSE] = {
"qman sei intr",
"arc sei intr"
@@ -3150,7 +3498,6 @@ static int gaudi2_early_init(struct hl_device *hdev)
rc = hl_fw_read_preboot_status(hdev);
if (rc) {
if (hdev->reset_on_preboot_fail)
- /* we are already on failure flow, so don't check if hw_fini fails. */
hdev->asic_funcs->hw_fini(hdev, true, false);
goto pci_fini;
}
@@ -3162,6 +3509,13 @@ static int gaudi2_early_init(struct hl_device *hdev)
dev_err(hdev->dev, "failed to reset HW in dirty state (%d)\n", rc);
goto pci_fini;
}
+
+ rc = hl_fw_read_preboot_status(hdev);
+ if (rc) {
+ if (hdev->reset_on_preboot_fail)
+ hdev->asic_funcs->hw_fini(hdev, true, false);
+ goto pci_fini;
+ }
}
return 0;
@@ -4836,7 +5190,7 @@ static void gaudi2_halt_engines(struct hl_device *hdev, bool hard_reset, bool fw
else
wait_timeout_ms = GAUDI2_RESET_WAIT_MSEC;
- if (fw_reset)
+ if (fw_reset || hdev->cpld_shutdown)
goto skip_engines;
gaudi2_stop_dma_qmans(hdev);
@@ -6484,6 +6838,13 @@ static int gaudi2_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
VM_DONTCOPY | VM_NORESERVE);
#ifdef _HAS_DMA_MMAP_COHERENT
+ /*
+ * If dma_alloc_coherent() returns a vmalloc address, set VM_MIXEDMAP
+ * so vm_insert_page() can handle it safely. Without this, the kernel
+ * may BUG_ON due to VM_PFNMAP.
+ */
+ if (is_vmalloc_addr(cpu_addr))
+ vm_flags_set(vma, VM_MIXEDMAP);
rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr, dma_addr, size);
if (rc)
@@ -6774,7 +7135,8 @@ static int gaudi2_validate_cb_address(struct hl_device *hdev, struct hl_cs_parse
struct gaudi2_device *gaudi2 = hdev->asic_specific;
if (!gaudi2_is_queue_enabled(hdev, parser->hw_queue_id)) {
- dev_err(hdev->dev, "h/w queue %d is disabled\n", parser->hw_queue_id);
+ dev_err(hdev->dev, "h/w queue %s is disabled\n",
+ GAUDI2_QUEUE_ID_TO_STR(parser->hw_queue_id));
return -EINVAL;
}
@@ -7026,7 +7388,8 @@ static int gaudi2_test_queue_send_msg_short(struct hl_device *hdev, u32 hw_queue
rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id, pkt_size, msg_info->dma_addr);
if (rc)
dev_err(hdev->dev,
- "Failed to send msg_short packet to H/W queue %d\n", hw_queue_id);
+ "Failed to send msg_short packet to H/W queue %s\n",
+ GAUDI2_QUEUE_ID_TO_STR(hw_queue_id));
return rc;
}
@@ -7052,8 +7415,8 @@ static int gaudi2_test_queue_wait_completion(struct hl_device *hdev, u32 hw_queu
timeout_usec);
if (rc == -ETIMEDOUT) {
- dev_err(hdev->dev, "H/W queue %d test failed (SOB_OBJ_0 == 0x%x)\n",
- hw_queue_id, tmp);
+ dev_err(hdev->dev, "H/W queue %s test failed (SOB_OBJ_0 == 0x%x)\n",
+ GAUDI2_QUEUE_ID_TO_STR(hw_queue_id), tmp);
rc = -EIO;
}
@@ -9603,8 +9966,8 @@ static int hl_arc_event_handle(struct hl_device *hdev, u16 event_type,
q = (struct hl_engine_arc_dccm_queue_full_irq *) &payload;
gaudi2_print_event(hdev, event_type, true,
- "ARC DCCM Full event: EngId: %u, Intr_type: %u, Qidx: %u",
- engine_id, intr_type, q->queue_index);
+ "ARC DCCM Full event: Eng: %s, Intr_type: %u, Qidx: %u",
+ GAUDI2_ENG_ID_TO_STR(engine_id), intr_type, q->queue_index);
return 1;
default:
gaudi2_print_event(hdev, event_type, true, "Unknown ARC event type");
@@ -10172,7 +10535,7 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
dev_err(hdev->dev, "CPLD shutdown event, reset reason: 0x%llx\n",
le64_to_cpu(eq_entry->data[0]));
error_count = GAUDI2_NA_EVENT_CAUSE;
- event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
+ hl_eq_cpld_shutdown_event_handle(hdev, event_type, &event_mask);
break;
case GAUDI2_EVENT_CPU_PKT_SANITY_FAILED:
@@ -10260,6 +10623,7 @@ reset_device:
if (event_mask & HL_NOTIFIER_EVENT_GENERAL_HW_ERR)
hl_handle_critical_hw_err(hdev, event_type, &event_mask);
+ hl_debugfs_cfg_access_history_dump(hdev);
event_mask |= HL_NOTIFIER_EVENT_DEVICE_RESET;
hl_device_cond_reset(hdev, reset_flags, event_mask);
}
@@ -10296,8 +10660,8 @@ static int gaudi2_memset_memory_chunk_using_edma_qm(struct hl_device *hdev,
rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id, pkt_size, phys_addr);
if (rc)
- dev_err(hdev->dev, "Failed to send lin_dma packet to H/W queue %d\n",
- hw_queue_id);
+ dev_err(hdev->dev, "Failed to send lin_dma packet to H/W queue %s\n",
+ GAUDI2_QUEUE_ID_TO_STR(hw_queue_id));
return rc;
}
@@ -10437,7 +10801,7 @@ end:
(u64 *)(lin_dma_pkts_arr), DEBUGFS_WRITE64);
WREG32(sob_addr, 0);
- kfree(lin_dma_pkts_arr);
+ kvfree(lin_dma_pkts_arr);
return rc;
}
diff --git a/drivers/accel/habanalabs/gaudi2/gaudi2P.h b/drivers/accel/habanalabs/gaudi2/gaudi2P.h
index 05117272cac7..bdf5c1bd2d63 100644
--- a/drivers/accel/habanalabs/gaudi2/gaudi2P.h
+++ b/drivers/accel/habanalabs/gaudi2/gaudi2P.h
@@ -240,6 +240,15 @@
#define GAUDI2_NUM_TESTED_QS (GAUDI2_QUEUE_ID_CPU_PQ - GAUDI2_QUEUE_ID_PDMA_0_0)
+extern const char *gaudi2_engine_id_str[];
+extern const char *gaudi2_queue_id_str[];
+
+#define GAUDI2_ENG_ID_TO_STR(initiator) ((initiator) >= GAUDI2_ENGINE_ID_SIZE ? "not found" : \
+ gaudi2_engine_id_str[initiator])
+
+#define GAUDI2_QUEUE_ID_TO_STR(initiator) ((initiator) >= GAUDI2_QUEUE_ID_SIZE ? "not found" : \
+ gaudi2_queue_id_str[initiator])
+
enum gaudi2_reserved_sob_id {
GAUDI2_RESERVED_SOB_CS_COMPLETION_FIRST,
GAUDI2_RESERVED_SOB_CS_COMPLETION_LAST =
diff --git a/drivers/accel/habanalabs/gaudi2/gaudi2_coresight.c b/drivers/accel/habanalabs/gaudi2/gaudi2_coresight.c
index 2423620ff358..bc3c57bda5cd 100644
--- a/drivers/accel/habanalabs/gaudi2/gaudi2_coresight.c
+++ b/drivers/accel/habanalabs/gaudi2/gaudi2_coresight.c
@@ -2426,7 +2426,7 @@ static int gaudi2_config_bmon(struct hl_device *hdev, struct hl_debug_params *pa
WREG32(base_reg + mmBMON_ADDRH_E3_OFFSET, 0);
WREG32(base_reg + mmBMON_REDUCTION_OFFSET, 0);
WREG32(base_reg + mmBMON_STM_TRC_OFFSET, 0x7 | (0xA << 8));
- WREG32(base_reg + mmBMON_CR_OFFSET, 0x77 | 0xf << 24);
+ WREG32(base_reg + mmBMON_CR_OFFSET, 0x41);
}
return 0;
diff --git a/drivers/accel/ivpu/Kconfig b/drivers/accel/ivpu/Kconfig
index 682c53245286..9e055b5ce03d 100644
--- a/drivers/accel/ivpu/Kconfig
+++ b/drivers/accel/ivpu/Kconfig
@@ -8,6 +8,7 @@ config DRM_ACCEL_IVPU
select FW_LOADER
select DRM_GEM_SHMEM_HELPER
select GENERIC_ALLOCATOR
+ select WANT_DEV_COREDUMP
help
Choose this option if you have a system with an 14th generation
Intel CPU (Meteor Lake) or newer. Intel NPU (formerly called Intel VPU)
@@ -15,3 +16,12 @@ config DRM_ACCEL_IVPU
and Deep Learning applications.
If "M" is selected, the module will be called intel_vpu.
+
+config DRM_ACCEL_IVPU_DEBUG
+ bool "Intel NPU debug mode"
+ depends on DRM_ACCEL_IVPU
+ help
+ Choose this option to enable additional
+ debug features for the Intel NPU driver:
+ - Always print debug messages regardless of dyndbg config,
+ - Enable unsafe module params.
diff --git a/drivers/accel/ivpu/Makefile b/drivers/accel/ivpu/Makefile
index ebd682a42eb1..1029e0bab061 100644
--- a/drivers/accel/ivpu/Makefile
+++ b/drivers/accel/ivpu/Makefile
@@ -16,8 +16,14 @@ intel_vpu-y := \
ivpu_mmu_context.o \
ivpu_ms.o \
ivpu_pm.o \
- ivpu_sysfs.o
+ ivpu_sysfs.o \
+ ivpu_trace_points.o
intel_vpu-$(CONFIG_DEBUG_FS) += ivpu_debugfs.o
+intel_vpu-$(CONFIG_DEV_COREDUMP) += ivpu_coredump.o
obj-$(CONFIG_DRM_ACCEL_IVPU) += intel_vpu.o
+
+subdir-ccflags-$(CONFIG_DRM_ACCEL_IVPU_DEBUG) += -DDEBUG
+
+CFLAGS_ivpu_trace_points.o = -I$(src)
diff --git a/drivers/accel/ivpu/ivpu_coredump.c b/drivers/accel/ivpu/ivpu_coredump.c
new file mode 100644
index 000000000000..16ad0c30818c
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_coredump.c
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-2024 Intel Corporation
+ */
+
+#include <linux/devcoredump.h>
+#include <linux/firmware.h>
+
+#include "ivpu_coredump.h"
+#include "ivpu_fw.h"
+#include "ivpu_gem.h"
+#include "vpu_boot_api.h"
+
+#define CRASH_DUMP_HEADER "Intel NPU crash dump"
+#define CRASH_DUMP_HEADERS_SIZE SZ_4K
+
+void ivpu_dev_coredump(struct ivpu_device *vdev)
+{
+ struct drm_print_iterator pi = {};
+ struct drm_printer p;
+ size_t coredump_size;
+ char *coredump;
+
+ coredump_size = CRASH_DUMP_HEADERS_SIZE + FW_VERSION_HEADER_SIZE +
+ ivpu_bo_size(vdev->fw->mem_log_crit) + ivpu_bo_size(vdev->fw->mem_log_verb);
+ coredump = vmalloc(coredump_size);
+ if (!coredump)
+ return;
+
+ pi.data = coredump;
+ pi.remain = coredump_size;
+ p = drm_coredump_printer(&pi);
+
+ drm_printf(&p, "%s\n", CRASH_DUMP_HEADER);
+ drm_printf(&p, "FW version: %s\n", vdev->fw->version);
+ ivpu_fw_log_print(vdev, false, &p);
+
+ dev_coredumpv(vdev->drm.dev, coredump, pi.offset, GFP_KERNEL);
+}
diff --git a/drivers/accel/ivpu/ivpu_coredump.h b/drivers/accel/ivpu/ivpu_coredump.h
new file mode 100644
index 000000000000..8efb09d02441
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_coredump.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020-2024 Intel Corporation
+ */
+
+#ifndef __IVPU_COREDUMP_H__
+#define __IVPU_COREDUMP_H__
+
+#include <drm/drm_print.h>
+
+#include "ivpu_drv.h"
+#include "ivpu_fw_log.h"
+
+#ifdef CONFIG_DEV_COREDUMP
+void ivpu_dev_coredump(struct ivpu_device *vdev);
+#else
+static inline void ivpu_dev_coredump(struct ivpu_device *vdev)
+{
+ struct drm_printer p = drm_info_printer(vdev->drm.dev);
+
+ ivpu_fw_log_print(vdev, false, &p);
+}
+#endif
+
+#endif /* __IVPU_COREDUMP_H__ */
diff --git a/drivers/accel/ivpu/ivpu_debugfs.c b/drivers/accel/ivpu/ivpu_debugfs.c
index 6f86f8df30db..cd24ccd20ba6 100644
--- a/drivers/accel/ivpu/ivpu_debugfs.c
+++ b/drivers/accel/ivpu/ivpu_debugfs.c
@@ -4,6 +4,7 @@
*/
#include <linux/debugfs.h>
+#include <linux/fault-inject.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_file.h>
@@ -45,6 +46,14 @@ static int fw_name_show(struct seq_file *s, void *v)
return 0;
}
+static int fw_version_show(struct seq_file *s, void *v)
+{
+ struct ivpu_device *vdev = seq_to_ivpu(s);
+
+ seq_printf(s, "%s\n", vdev->fw->version);
+ return 0;
+}
+
static int fw_trace_capability_show(struct seq_file *s, void *v)
{
struct ivpu_device *vdev = seq_to_ivpu(s);
@@ -108,42 +117,43 @@ static int reset_pending_show(struct seq_file *s, void *v)
return 0;
}
+static int firewall_irq_counter_show(struct seq_file *s, void *v)
+{
+ struct ivpu_device *vdev = seq_to_ivpu(s);
+
+ seq_printf(s, "%d\n", atomic_read(&vdev->hw->firewall_irq_counter));
+ return 0;
+}
+
static const struct drm_debugfs_info vdev_debugfs_list[] = {
{"bo_list", bo_list_show, 0},
{"fw_name", fw_name_show, 0},
+ {"fw_version", fw_version_show, 0},
{"fw_trace_capability", fw_trace_capability_show, 0},
{"fw_trace_config", fw_trace_config_show, 0},
{"last_bootmode", last_bootmode_show, 0},
{"reset_counter", reset_counter_show, 0},
{"reset_pending", reset_pending_show, 0},
+ {"firewall_irq_counter", firewall_irq_counter_show, 0},
};
-static ssize_t
-dvfs_mode_fops_write(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
+static int dvfs_mode_get(void *data, u64 *dvfs_mode)
{
- struct ivpu_device *vdev = file->private_data;
- struct ivpu_fw_info *fw = vdev->fw;
- u32 dvfs_mode;
- int ret;
+ struct ivpu_device *vdev = (struct ivpu_device *)data;
- ret = kstrtou32_from_user(user_buf, size, 0, &dvfs_mode);
- if (ret < 0)
- return ret;
-
- fw->dvfs_mode = dvfs_mode;
+ *dvfs_mode = vdev->fw->dvfs_mode;
+ return 0;
+}
- ret = pci_try_reset_function(to_pci_dev(vdev->drm.dev));
- if (ret)
- return ret;
+static int dvfs_mode_set(void *data, u64 dvfs_mode)
+{
+ struct ivpu_device *vdev = (struct ivpu_device *)data;
- return size;
+ vdev->fw->dvfs_mode = (u32)dvfs_mode;
+ return pci_try_reset_function(to_pci_dev(vdev->drm.dev));
}
-static const struct file_operations dvfs_mode_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .write = dvfs_mode_fops_write,
-};
+DEFINE_DEBUGFS_ATTRIBUTE(dvfs_mode_fops, dvfs_mode_get, dvfs_mode_set, "%llu\n");
static ssize_t
fw_dyndbg_fops_write(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
@@ -192,7 +202,7 @@ fw_log_fops_write(struct file *file, const char __user *user_buf, size_t size, l
if (!size)
return -EINVAL;
- ivpu_fw_log_clear(vdev);
+ ivpu_fw_log_mark_read(vdev);
return size;
}
@@ -322,7 +332,7 @@ ivpu_force_recovery_fn(struct file *file, const char __user *user_buf, size_t si
return -EINVAL;
ret = ivpu_rpm_get(vdev);
- if (ret)
+ if (ret < 0)
return ret;
ivpu_pm_trigger_recovery(vdev, "debugfs");
@@ -337,49 +347,23 @@ static const struct file_operations ivpu_force_recovery_fops = {
.write = ivpu_force_recovery_fn,
};
-static ssize_t
-ivpu_reset_engine_fn(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
+static int ivpu_reset_engine_fn(void *data, u64 val)
{
- struct ivpu_device *vdev = file->private_data;
-
- if (!size)
- return -EINVAL;
-
- if (ivpu_jsm_reset_engine(vdev, DRM_IVPU_ENGINE_COMPUTE))
- return -ENODEV;
- if (ivpu_jsm_reset_engine(vdev, DRM_IVPU_ENGINE_COPY))
- return -ENODEV;
+ struct ivpu_device *vdev = (struct ivpu_device *)data;
- return size;
+ return ivpu_jsm_reset_engine(vdev, (u32)val);
}
-static const struct file_operations ivpu_reset_engine_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .write = ivpu_reset_engine_fn,
-};
+DEFINE_DEBUGFS_ATTRIBUTE(ivpu_reset_engine_fops, NULL, ivpu_reset_engine_fn, "0x%02llx\n");
-static ssize_t
-ivpu_resume_engine_fn(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
+static int ivpu_resume_engine_fn(void *data, u64 val)
{
- struct ivpu_device *vdev = file->private_data;
+ struct ivpu_device *vdev = (struct ivpu_device *)data;
- if (!size)
- return -EINVAL;
-
- if (ivpu_jsm_hws_resume_engine(vdev, DRM_IVPU_ENGINE_COMPUTE))
- return -ENODEV;
- if (ivpu_jsm_hws_resume_engine(vdev, DRM_IVPU_ENGINE_COPY))
- return -ENODEV;
-
- return size;
+ return ivpu_jsm_hws_resume_engine(vdev, (u32)val);
}
-static const struct file_operations ivpu_resume_engine_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .write = ivpu_resume_engine_fn,
-};
+DEFINE_DEBUGFS_ATTRIBUTE(ivpu_resume_engine_fops, NULL, ivpu_resume_engine_fn, "0x%02llx\n");
static int dct_active_get(void *data, u64 *active_percent)
{
@@ -399,7 +383,7 @@ static int dct_active_set(void *data, u64 active_percent)
return -EINVAL;
ret = ivpu_rpm_get(vdev);
- if (ret)
+ if (ret < 0)
return ret;
if (active_percent)
@@ -414,6 +398,88 @@ static int dct_active_set(void *data, u64 active_percent)
DEFINE_DEBUGFS_ATTRIBUTE(ivpu_dct_fops, dct_active_get, dct_active_set, "%llu\n");
+static int priority_bands_show(struct seq_file *s, void *v)
+{
+ struct ivpu_device *vdev = s->private;
+ struct ivpu_hw_info *hw = vdev->hw;
+
+ for (int band = VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE;
+ band < VPU_JOB_SCHEDULING_PRIORITY_BAND_COUNT; band++) {
+ switch (band) {
+ case VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE:
+ seq_puts(s, "Idle: ");
+ break;
+
+ case VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL:
+ seq_puts(s, "Normal: ");
+ break;
+
+ case VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS:
+ seq_puts(s, "Focus: ");
+ break;
+
+ case VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME:
+ seq_puts(s, "Realtime: ");
+ break;
+ }
+
+ seq_printf(s, "grace_period %9u process_grace_period %9u process_quantum %9u\n",
+ hw->hws.grace_period[band], hw->hws.process_grace_period[band],
+ hw->hws.process_quantum[band]);
+ }
+
+ return 0;
+}
+
+static int priority_bands_fops_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, priority_bands_show, inode->i_private);
+}
+
+static ssize_t
+priority_bands_fops_write(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
+{
+ struct seq_file *s = file->private_data;
+ struct ivpu_device *vdev = s->private;
+ char buf[64];
+ u32 grace_period;
+ u32 process_grace_period;
+ u32 process_quantum;
+ u32 band;
+ int ret;
+
+ if (size >= sizeof(buf))
+ return -EINVAL;
+
+ ret = simple_write_to_buffer(buf, sizeof(buf) - 1, pos, user_buf, size);
+ if (ret < 0)
+ return ret;
+
+ buf[ret] = '\0';
+ ret = sscanf(buf, "%u %u %u %u", &band, &grace_period, &process_grace_period,
+ &process_quantum);
+ if (ret != 4)
+ return -EINVAL;
+
+ if (band >= VPU_JOB_SCHEDULING_PRIORITY_BAND_COUNT)
+ return -EINVAL;
+
+ vdev->hw->hws.grace_period[band] = grace_period;
+ vdev->hw->hws.process_grace_period[band] = process_grace_period;
+ vdev->hw->hws.process_quantum[band] = process_quantum;
+
+ return size;
+}
+
+static const struct file_operations ivpu_hws_priority_bands_fops = {
+ .owner = THIS_MODULE,
+ .open = priority_bands_fops_open,
+ .write = priority_bands_fops_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
void ivpu_debugfs_init(struct ivpu_device *vdev)
{
struct dentry *debugfs_root = vdev->drm.debugfs_root;
@@ -423,7 +489,7 @@ void ivpu_debugfs_init(struct ivpu_device *vdev)
debugfs_create_file("force_recovery", 0200, debugfs_root, vdev,
&ivpu_force_recovery_fops);
- debugfs_create_file("dvfs_mode", 0200, debugfs_root, vdev,
+ debugfs_create_file("dvfs_mode", 0644, debugfs_root, vdev,
&dvfs_mode_fops);
debugfs_create_file("fw_dyndbg", 0200, debugfs_root, vdev,
@@ -436,6 +502,8 @@ void ivpu_debugfs_init(struct ivpu_device *vdev)
&fw_trace_hw_comp_mask_fops);
debugfs_create_file("fw_trace_level", 0200, debugfs_root, vdev,
&fw_trace_level_fops);
+ debugfs_create_file("hws_priority_bands", 0200, debugfs_root, vdev,
+ &ivpu_hws_priority_bands_fops);
debugfs_create_file("reset_engine", 0200, debugfs_root, vdev,
&ivpu_reset_engine_fops);
@@ -447,4 +515,8 @@ void ivpu_debugfs_init(struct ivpu_device *vdev)
debugfs_root, vdev, &fw_profiling_freq_fops);
debugfs_create_file("dct", 0644, debugfs_root, vdev, &ivpu_dct_fops);
}
+
+#ifdef CONFIG_FAULT_INJECTION
+ fault_create_debugfs_attr("fail_hw", debugfs_root, &ivpu_hw_failure);
+#endif
}
diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c
index c91400ecf926..3289751b4757 100644
--- a/drivers/accel/ivpu/ivpu_drv.c
+++ b/drivers/accel/ivpu/ivpu_drv.c
@@ -1,12 +1,14 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2020-2024 Intel Corporation
+ * Copyright (C) 2020-2025 Intel Corporation
*/
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
+#include <linux/workqueue.h>
+#include <generated/utsrelease.h>
#include <drm/drm_accel.h>
#include <drm/drm_file.h>
@@ -14,7 +16,7 @@
#include <drm/drm_ioctl.h>
#include <drm/drm_prime.h>
-#include "vpu_boot_api.h"
+#include "ivpu_coredump.h"
#include "ivpu_debugfs.h"
#include "ivpu_drv.h"
#include "ivpu_fw.h"
@@ -29,21 +31,21 @@
#include "ivpu_ms.h"
#include "ivpu_pm.h"
#include "ivpu_sysfs.h"
+#include "vpu_boot_api.h"
#ifndef DRIVER_VERSION_STR
-#define DRIVER_VERSION_STR __stringify(DRM_IVPU_DRIVER_MAJOR) "." \
- __stringify(DRM_IVPU_DRIVER_MINOR) "."
+#define DRIVER_VERSION_STR "1.0.0 " UTS_RELEASE
#endif
-static struct lock_class_key submitted_jobs_xa_lock_class_key;
-
int ivpu_dbg_mask;
module_param_named(dbg_mask, ivpu_dbg_mask, int, 0644);
MODULE_PARM_DESC(dbg_mask, "Driver debug mask. See IVPU_DBG_* macros.");
int ivpu_test_mode;
+#if IS_ENABLED(CONFIG_DRM_ACCEL_IVPU_DEBUG)
module_param_named_unsafe(test_mode, ivpu_test_mode, int, 0644);
MODULE_PARM_DESC(test_mode, "Test mode mask. See IVPU_TEST_MODE_* macros.");
+#endif
u8 ivpu_pll_min_ratio;
module_param_named(pll_min_ratio, ivpu_pll_min_ratio, byte, 0644);
@@ -53,9 +55,9 @@ u8 ivpu_pll_max_ratio = U8_MAX;
module_param_named(pll_max_ratio, ivpu_pll_max_ratio, byte, 0644);
MODULE_PARM_DESC(pll_max_ratio, "Maximum PLL ratio used to set NPU frequency");
-int ivpu_sched_mode;
+int ivpu_sched_mode = IVPU_SCHED_MODE_AUTO;
module_param_named(sched_mode, ivpu_sched_mode, int, 0444);
-MODULE_PARM_DESC(sched_mode, "Scheduler mode: 0 - Default scheduler, 1 - Force HW scheduler");
+MODULE_PARM_DESC(sched_mode, "Scheduler mode: -1 - Use default scheduler, 0 - Use OS scheduler, 1 - Use HW scheduler");
bool ivpu_disable_mmu_cont_pages;
module_param_named(disable_mmu_cont_pages, ivpu_disable_mmu_cont_pages, bool, 0444);
@@ -85,7 +87,7 @@ static void file_priv_unbind(struct ivpu_device *vdev, struct ivpu_file_priv *fi
ivpu_cmdq_release_all_locked(file_priv);
ivpu_bo_unbind_all_bos_from_context(vdev, &file_priv->ctx);
- ivpu_mmu_user_context_fini(vdev, &file_priv->ctx);
+ ivpu_mmu_context_fini(vdev, &file_priv->ctx);
file_priv->bound = false;
drm_WARN_ON(&vdev->drm, !xa_erase_irq(&vdev->context_xa, file_priv->ctx.id));
}
@@ -103,6 +105,8 @@ static void file_priv_release(struct kref *ref)
pm_runtime_get_sync(vdev->drm.dev);
mutex_lock(&vdev->context_list_lock);
file_priv_unbind(vdev, file_priv);
+ drm_WARN_ON(&vdev->drm, !xa_empty(&file_priv->cmdq_xa));
+ xa_destroy(&file_priv->cmdq_xa);
mutex_unlock(&vdev->context_list_lock);
pm_runtime_put_autosuspend(vdev->drm.dev);
@@ -116,8 +120,6 @@ void ivpu_file_priv_put(struct ivpu_file_priv **link)
struct ivpu_file_priv *file_priv = *link;
struct ivpu_device *vdev = file_priv->vdev;
- drm_WARN_ON(&vdev->drm, !file_priv);
-
ivpu_dbg(vdev, KREF, "file_priv put: ctx %u refcount %u\n",
file_priv->ctx.id, kref_read(&file_priv->ref));
@@ -125,20 +127,18 @@ void ivpu_file_priv_put(struct ivpu_file_priv **link)
kref_put(&file_priv->ref, file_priv_release);
}
-static int ivpu_get_capabilities(struct ivpu_device *vdev, struct drm_ivpu_param *args)
+bool ivpu_is_capable(struct ivpu_device *vdev, u32 capability)
{
- switch (args->index) {
+ switch (capability) {
case DRM_IVPU_CAP_METRIC_STREAMER:
- args->value = 1;
- break;
+ return true;
case DRM_IVPU_CAP_DMA_MEMORY_RANGE:
- args->value = 1;
- break;
+ return true;
+ case DRM_IVPU_CAP_MANAGE_CMDQ:
+ return vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW;
default:
- return -EINVAL;
+ return false;
}
-
- return 0;
}
static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
@@ -164,7 +164,7 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
args->value = vdev->platform;
break;
case DRM_IVPU_PARAM_CORE_CLOCK_RATE:
- args->value = ivpu_hw_ratio_to_freq(vdev, vdev->hw->pll.max_ratio);
+ args->value = ivpu_hw_dpu_max_freq_get(vdev);
break;
case DRM_IVPU_PARAM_NUM_CONTEXTS:
args->value = ivpu_get_context_count(vdev);
@@ -198,7 +198,7 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
args->value = vdev->hw->sku;
break;
case DRM_IVPU_PARAM_CAPABILITIES:
- ret = ivpu_get_capabilities(vdev, args);
+ args->value = ivpu_is_capable(vdev, args->index);
break;
default:
ret = -EINVAL;
@@ -255,9 +255,14 @@ static int ivpu_open(struct drm_device *dev, struct drm_file *file)
goto err_unlock;
}
- ret = ivpu_mmu_user_context_init(vdev, &file_priv->ctx, ctx_id);
- if (ret)
- goto err_xa_erase;
+ ivpu_mmu_context_init(vdev, &file_priv->ctx, ctx_id);
+
+ file_priv->job_limit.min = FIELD_PREP(IVPU_JOB_ID_CONTEXT_MASK, (file_priv->ctx.id - 1));
+ file_priv->job_limit.max = file_priv->job_limit.min | IVPU_JOB_ID_JOB_MASK;
+
+ xa_init_flags(&file_priv->cmdq_xa, XA_FLAGS_ALLOC1);
+ file_priv->cmdq_limit.min = IVPU_CMDQ_MIN_ID;
+ file_priv->cmdq_limit.max = IVPU_CMDQ_MAX_ID;
mutex_unlock(&vdev->context_list_lock);
drm_dev_exit(idx);
@@ -269,8 +274,6 @@ static int ivpu_open(struct drm_device *dev, struct drm_file *file)
return 0;
-err_xa_erase:
- xa_erase_irq(&vdev->context_xa, ctx_id);
err_unlock:
mutex_unlock(&vdev->context_list_lock);
mutex_destroy(&file_priv->ms_lock);
@@ -304,6 +307,9 @@ static const struct drm_ioctl_desc ivpu_drm_ioctls[] = {
DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_GET_DATA, ivpu_ms_get_data_ioctl, 0),
DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_STOP, ivpu_ms_stop_ioctl, 0),
DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_GET_INFO, ivpu_ms_get_info_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(IVPU_CMDQ_CREATE, ivpu_cmdq_create_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(IVPU_CMDQ_DESTROY, ivpu_cmdq_destroy_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(IVPU_CMDQ_SUBMIT, ivpu_cmdq_submit_ioctl, 0),
};
static int ivpu_wait_for_ready(struct ivpu_device *vdev)
@@ -346,7 +352,7 @@ static int ivpu_hw_sched_init(struct ivpu_device *vdev)
{
int ret = 0;
- if (vdev->hw->sched_mode == VPU_SCHEDULING_MODE_HW) {
+ if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) {
ret = ivpu_jsm_hws_setup_priority_bands(vdev);
if (ret) {
ivpu_err(vdev, "Failed to enable hw scheduler: %d", ret);
@@ -368,6 +374,9 @@ int ivpu_boot(struct ivpu_device *vdev)
{
int ret;
+ drm_WARN_ON(&vdev->drm, atomic_read(&vdev->job_timeout_counter));
+ drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->submitted_jobs_xa));
+
/* Update boot params located at first 4KB of FW memory */
ivpu_fw_boot_params_setup(vdev, ivpu_bo_vaddr(vdev->fw->mem));
@@ -380,10 +389,7 @@ int ivpu_boot(struct ivpu_device *vdev)
ret = ivpu_wait_for_ready(vdev);
if (ret) {
ivpu_err(vdev, "Failed to boot the firmware: %d\n", ret);
- ivpu_hw_diagnose_failure(vdev);
- ivpu_mmu_evtq_dump(vdev);
- ivpu_fw_log_dump(vdev);
- return ret;
+ goto err_diagnose_failure;
}
ivpu_hw_irq_clear(vdev);
@@ -394,18 +400,33 @@ int ivpu_boot(struct ivpu_device *vdev)
if (ivpu_fw_is_cold_boot(vdev)) {
ret = ivpu_pm_dct_init(vdev);
if (ret)
- return ret;
+ goto err_disable_ipc;
- return ivpu_hw_sched_init(vdev);
+ ret = ivpu_hw_sched_init(vdev);
+ if (ret)
+ goto err_disable_ipc;
}
return 0;
+
+err_disable_ipc:
+ ivpu_ipc_disable(vdev);
+ ivpu_hw_irq_disable(vdev);
+ disable_irq(vdev->irq);
+err_diagnose_failure:
+ ivpu_hw_diagnose_failure(vdev);
+ ivpu_mmu_evtq_dump(vdev);
+ ivpu_dev_coredump(vdev);
+ return ret;
}
void ivpu_prepare_for_reset(struct ivpu_device *vdev)
{
ivpu_hw_irq_disable(vdev);
disable_irq(vdev->irq);
+ flush_work(&vdev->irq_ipc_work);
+ flush_work(&vdev->irq_dct_work);
+ flush_work(&vdev->context_abort_work);
ivpu_ipc_disable(vdev);
ivpu_mmu_disable(vdev);
}
@@ -438,7 +459,7 @@ static const struct drm_driver driver = {
.postclose = ivpu_postclose,
.gem_create_object = ivpu_gem_create_object,
- .gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table,
+ .gem_prime_import = ivpu_gem_prime_import,
.ioctls = ivpu_drm_ioctls,
.num_ioctls = ARRAY_SIZE(ivpu_drm_ioctls),
@@ -446,58 +467,9 @@ static const struct drm_driver driver = {
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
- .major = DRM_IVPU_DRIVER_MAJOR,
- .minor = DRM_IVPU_DRIVER_MINOR,
-};
-
-static void ivpu_context_abort_invalid(struct ivpu_device *vdev)
-{
- struct ivpu_file_priv *file_priv;
- unsigned long ctx_id;
- mutex_lock(&vdev->context_list_lock);
-
- xa_for_each(&vdev->context_xa, ctx_id, file_priv) {
- if (!file_priv->has_mmu_faults || file_priv->aborted)
- continue;
-
- mutex_lock(&file_priv->lock);
- ivpu_context_abort_locked(file_priv);
- file_priv->aborted = true;
- mutex_unlock(&file_priv->lock);
- }
-
- mutex_unlock(&vdev->context_list_lock);
-}
-
-static irqreturn_t ivpu_irq_thread_handler(int irq, void *arg)
-{
- struct ivpu_device *vdev = arg;
- u8 irq_src;
-
- if (kfifo_is_empty(&vdev->hw->irq.fifo))
- return IRQ_NONE;
-
- while (kfifo_get(&vdev->hw->irq.fifo, &irq_src)) {
- switch (irq_src) {
- case IVPU_HW_IRQ_SRC_IPC:
- ivpu_ipc_irq_thread_handler(vdev);
- break;
- case IVPU_HW_IRQ_SRC_MMU_EVTQ:
- ivpu_context_abort_invalid(vdev);
- break;
- case IVPU_HW_IRQ_SRC_DCT:
- ivpu_pm_dct_irq_thread_handler(vdev);
- break;
- default:
- ivpu_err_ratelimited(vdev, "Unknown IRQ source: %u\n", irq_src);
- break;
- }
- }
-
- return IRQ_HANDLED;
-}
+ .major = 1,
+};
static int ivpu_irq_init(struct ivpu_device *vdev)
{
@@ -510,12 +482,16 @@ static int ivpu_irq_init(struct ivpu_device *vdev)
return ret;
}
+ INIT_WORK(&vdev->irq_ipc_work, ivpu_ipc_irq_work_fn);
+ INIT_WORK(&vdev->irq_dct_work, ivpu_pm_irq_dct_work_fn);
+ INIT_WORK(&vdev->context_abort_work, ivpu_context_abort_work_fn);
+
ivpu_irq_handlers_init(vdev);
vdev->irq = pci_irq_vector(pdev, 0);
- ret = devm_request_threaded_irq(vdev->drm.dev, vdev->irq, ivpu_hw_irq_handler,
- ivpu_irq_thread_handler, IRQF_NO_AUTOEN, DRIVER_NAME, vdev);
+ ret = devm_request_irq(vdev->drm.dev, vdev->irq, ivpu_hw_irq_handler,
+ IRQF_NO_AUTOEN, DRIVER_NAME, vdev);
if (ret)
ivpu_err(vdev, "Failed to request an IRQ %d\n", ret);
@@ -600,16 +576,23 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
vdev->context_xa_limit.min = IVPU_USER_CONTEXT_MIN_SSID;
vdev->context_xa_limit.max = IVPU_USER_CONTEXT_MAX_SSID;
atomic64_set(&vdev->unique_id_counter, 0);
+ atomic_set(&vdev->job_timeout_counter, 0);
xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
xa_init_flags(&vdev->submitted_jobs_xa, XA_FLAGS_ALLOC1);
xa_init_flags(&vdev->db_xa, XA_FLAGS_ALLOC1);
- lockdep_set_class(&vdev->submitted_jobs_xa.xa_lock, &submitted_jobs_xa_lock_class_key);
INIT_LIST_HEAD(&vdev->bo_list);
+ vdev->db_limit.min = IVPU_MIN_DB;
+ vdev->db_limit.max = IVPU_MAX_DB;
+
ret = drmm_mutex_init(&vdev->drm, &vdev->context_list_lock);
if (ret)
goto err_xa_destroy;
+ ret = drmm_mutex_init(&vdev->drm, &vdev->submitted_jobs_lock);
+ if (ret)
+ goto err_xa_destroy;
+
ret = drmm_mutex_init(&vdev->drm, &vdev->bo_list_lock);
if (ret)
goto err_xa_destroy;
@@ -632,9 +615,7 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
if (ret)
goto err_shutdown;
- ret = ivpu_mmu_global_context_init(vdev);
- if (ret)
- goto err_shutdown;
+ ivpu_mmu_global_context_init(vdev);
ret = ivpu_mmu_init(vdev);
if (ret)
@@ -696,7 +677,7 @@ static void ivpu_bo_unbind_all_user_contexts(struct ivpu_device *vdev)
static void ivpu_dev_fini(struct ivpu_device *vdev)
{
ivpu_jobs_abort_all(vdev);
- ivpu_pm_cancel_recovery(vdev);
+ ivpu_pm_disable_recovery(vdev);
ivpu_pm_disable(vdev);
ivpu_prepare_for_reset(vdev);
ivpu_shutdown(vdev);
@@ -722,6 +703,8 @@ static struct pci_device_id ivpu_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_MTL) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_ARL) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_LNL) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PTL_P) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_WCL) },
{ }
};
MODULE_DEVICE_TABLE(pci, ivpu_pci_ids);
diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h
index 63f13b697eed..62ab1c654e63 100644
--- a/drivers/accel/ivpu/ivpu_drv.h
+++ b/drivers/accel/ivpu/ivpu_drv.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2020-2024 Intel Corporation
+ * Copyright (C) 2020-2025 Intel Corporation
*/
#ifndef __IVPU_DRV_H__
@@ -21,11 +21,12 @@
#define DRIVER_NAME "intel_vpu"
#define DRIVER_DESC "Driver for Intel NPU (Neural Processing Unit)"
-#define DRIVER_DATE "20230117"
-#define PCI_DEVICE_ID_MTL 0x7d1d
-#define PCI_DEVICE_ID_ARL 0xad1d
-#define PCI_DEVICE_ID_LNL 0x643e
+#define PCI_DEVICE_ID_MTL 0x7d1d
+#define PCI_DEVICE_ID_ARL 0xad1d
+#define PCI_DEVICE_ID_LNL 0x643e
+#define PCI_DEVICE_ID_PTL_P 0xb03e
+#define PCI_DEVICE_ID_WCL 0xfd3e
#define IVPU_HW_IP_37XX 37
#define IVPU_HW_IP_40XX 40
@@ -46,17 +47,23 @@
#define IVPU_MIN_DB 1
#define IVPU_MAX_DB 255
-#define IVPU_NUM_ENGINES 2
+#define IVPU_JOB_ID_JOB_MASK GENMASK(7, 0)
+#define IVPU_JOB_ID_CONTEXT_MASK GENMASK(31, 8)
+
#define IVPU_NUM_PRIORITIES 4
-#define IVPU_NUM_CMDQS_PER_CTX (IVPU_NUM_ENGINES * IVPU_NUM_PRIORITIES)
+#define IVPU_NUM_CMDQS_PER_CTX (IVPU_NUM_PRIORITIES)
-#define IVPU_CMDQ_INDEX(engine, priority) ((engine) * IVPU_NUM_PRIORITIES + (priority))
+#define IVPU_CMDQ_MIN_ID 1
+#define IVPU_CMDQ_MAX_ID 255
#define IVPU_PLATFORM_SILICON 0
#define IVPU_PLATFORM_SIMICS 2
#define IVPU_PLATFORM_FPGA 3
+#define IVPU_PLATFORM_HSLE 4
#define IVPU_PLATFORM_INVALID 8
+#define IVPU_SCHED_MODE_AUTO -1
+
#define IVPU_DBG_REG BIT(0)
#define IVPU_DBG_IRQ BIT(1)
#define IVPU_DBG_MMU BIT(2)
@@ -105,6 +112,7 @@ struct ivpu_wa_table {
bool disable_clock_relinquish;
bool disable_d0i3_msg;
bool wp0_during_power_up;
+ bool disable_d0i2;
};
struct ivpu_hw_info;
@@ -134,12 +142,20 @@ struct ivpu_device {
struct xa_limit context_xa_limit;
struct xarray db_xa;
+ struct xa_limit db_limit;
+ u32 db_next;
+
+ struct work_struct irq_ipc_work;
+ struct work_struct irq_dct_work;
+ struct work_struct context_abort_work;
struct mutex bo_list_lock; /* Protects bo_list */
struct list_head bo_list;
+ struct mutex submitted_jobs_lock; /* Protects submitted_jobs */
struct xarray submitted_jobs_xa;
struct ivpu_ipc_consumer job_done_consumer;
+ atomic_t job_timeout_counter;
atomic64_t unique_id_counter;
@@ -150,8 +166,10 @@ struct ivpu_device {
int boot;
int jsm;
int tdr;
+ int inference;
int autosuspend;
int d0i3_entry_msg;
+ int state_dump_msg;
} timeout;
};
@@ -163,11 +181,15 @@ struct ivpu_file_priv {
struct kref ref;
struct ivpu_device *vdev;
struct mutex lock; /* Protects cmdq */
- struct ivpu_cmdq *cmdq[IVPU_NUM_CMDQS_PER_CTX];
+ struct xarray cmdq_xa;
struct ivpu_mmu_context ctx;
struct mutex ms_lock; /* Protects ms_instance_list, ms_info_bo */
struct list_head ms_instance_list;
struct ivpu_bo *ms_info_bo;
+ struct xa_limit job_limit;
+ u32 job_id_next;
+ struct xa_limit cmdq_limit;
+ u32 cmdq_id_next;
bool has_mmu_faults;
bool bound;
bool aborted;
@@ -185,9 +207,13 @@ extern bool ivpu_force_snoop;
#define IVPU_TEST_MODE_NULL_SUBMISSION BIT(2)
#define IVPU_TEST_MODE_D0I3_MSG_DISABLE BIT(4)
#define IVPU_TEST_MODE_D0I3_MSG_ENABLE BIT(5)
-#define IVPU_TEST_MODE_PREEMPTION_DISABLE BIT(6)
-#define IVPU_TEST_MODE_HWS_EXTRA_EVENTS BIT(7)
+#define IVPU_TEST_MODE_MIP_DISABLE BIT(6)
#define IVPU_TEST_MODE_DISABLE_TIMEOUTS BIT(8)
+#define IVPU_TEST_MODE_TURBO_ENABLE BIT(9)
+#define IVPU_TEST_MODE_TURBO_DISABLE BIT(10)
+#define IVPU_TEST_MODE_CLK_RELINQ_DISABLE BIT(11)
+#define IVPU_TEST_MODE_CLK_RELINQ_ENABLE BIT(12)
+#define IVPU_TEST_MODE_D0I2_DISABLE BIT(13)
extern int ivpu_test_mode;
struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv);
@@ -196,6 +222,7 @@ void ivpu_file_priv_put(struct ivpu_file_priv **link);
int ivpu_boot(struct ivpu_device *vdev);
int ivpu_shutdown(struct ivpu_device *vdev);
void ivpu_prepare_for_reset(struct ivpu_device *vdev);
+bool ivpu_is_capable(struct ivpu_device *vdev, u32 capability);
static inline u8 ivpu_revision(struct ivpu_device *vdev)
{
@@ -215,6 +242,9 @@ static inline int ivpu_hw_ip_gen(struct ivpu_device *vdev)
return IVPU_HW_IP_37XX;
case PCI_DEVICE_ID_LNL:
return IVPU_HW_IP_40XX;
+ case PCI_DEVICE_ID_PTL_P:
+ case PCI_DEVICE_ID_WCL:
+ return IVPU_HW_IP_50XX;
default:
dump_stack();
ivpu_err(vdev, "Unknown NPU IP generation\n");
@@ -229,6 +259,8 @@ static inline int ivpu_hw_btrs_gen(struct ivpu_device *vdev)
case PCI_DEVICE_ID_ARL:
return IVPU_HW_BTRS_MTL;
case PCI_DEVICE_ID_LNL:
+ case PCI_DEVICE_ID_PTL_P:
+ case PCI_DEVICE_ID_WCL:
return IVPU_HW_BTRS_LNL;
default:
dump_stack();
@@ -267,7 +299,8 @@ static inline bool ivpu_is_simics(struct ivpu_device *vdev)
static inline bool ivpu_is_fpga(struct ivpu_device *vdev)
{
- return ivpu_get_platform(vdev) == IVPU_PLATFORM_FPGA;
+ return ivpu_get_platform(vdev) == IVPU_PLATFORM_FPGA ||
+ ivpu_get_platform(vdev) == IVPU_PLATFORM_HSLE;
}
static inline bool ivpu_is_force_snoop_enabled(struct ivpu_device *vdev)
diff --git a/drivers/accel/ivpu/ivpu_fw.c b/drivers/accel/ivpu/ivpu_fw.c
index ede6165e09d9..9db741695401 100644
--- a/drivers/accel/ivpu/ivpu_fw.c
+++ b/drivers/accel/ivpu/ivpu_fw.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2020-2024 Intel Corporation
+ * Copyright (C) 2020-2025 Intel Corporation
*/
#include <linux/firmware.h>
@@ -25,7 +25,6 @@
#define FW_SHAVE_NN_MAX_SIZE SZ_2M
#define FW_RUNTIME_MIN_ADDR (FW_GLOBAL_MEM_START)
#define FW_RUNTIME_MAX_ADDR (FW_GLOBAL_MEM_END - FW_SHARED_MEM_SIZE)
-#define FW_VERSION_HEADER_SIZE SZ_4K
#define FW_FILE_IMAGE_OFFSET (VPU_FW_HEADER_SIZE + FW_VERSION_HEADER_SIZE)
#define WATCHDOG_MSS_REDIRECT 32
@@ -47,22 +46,27 @@
#define IVPU_FOCUS_PRESENT_TIMER_MS 1000
static char *ivpu_firmware;
+#if IS_ENABLED(CONFIG_DRM_ACCEL_IVPU_DEBUG)
module_param_named_unsafe(firmware, ivpu_firmware, charp, 0644);
MODULE_PARM_DESC(firmware, "NPU firmware binary in /lib/firmware/..");
+#endif
static struct {
int gen;
const char *name;
} fw_names[] = {
- { IVPU_HW_IP_37XX, "vpu_37xx.bin" },
+ { IVPU_HW_IP_37XX, "intel/vpu/vpu_37xx_v1.bin" },
{ IVPU_HW_IP_37XX, "intel/vpu/vpu_37xx_v0.0.bin" },
- { IVPU_HW_IP_40XX, "vpu_40xx.bin" },
+ { IVPU_HW_IP_40XX, "intel/vpu/vpu_40xx_v1.bin" },
{ IVPU_HW_IP_40XX, "intel/vpu/vpu_40xx_v0.0.bin" },
+ { IVPU_HW_IP_50XX, "intel/vpu/vpu_50xx_v1.bin" },
+ { IVPU_HW_IP_50XX, "intel/vpu/vpu_50xx_v0.0.bin" },
};
/* Production fw_names from the table above */
-MODULE_FIRMWARE("intel/vpu/vpu_37xx_v0.0.bin");
-MODULE_FIRMWARE("intel/vpu/vpu_40xx_v0.0.bin");
+MODULE_FIRMWARE("intel/vpu/vpu_37xx_v1.bin");
+MODULE_FIRMWARE("intel/vpu/vpu_40xx_v1.bin");
+MODULE_FIRMWARE("intel/vpu/vpu_50xx_v1.bin");
static int ivpu_fw_request(struct ivpu_device *vdev)
{
@@ -135,6 +139,18 @@ static bool is_within_range(u64 addr, size_t size, u64 range_start, size_t range
return true;
}
+static u32
+ivpu_fw_sched_mode_select(struct ivpu_device *vdev, const struct vpu_firmware_header *fw_hdr)
+{
+ if (ivpu_sched_mode != IVPU_SCHED_MODE_AUTO)
+ return ivpu_sched_mode;
+
+ if (IVPU_FW_CHECK_API_VER_LT(vdev, fw_hdr, JSM, 3, 24))
+ return VPU_SCHEDULING_MODE_OS;
+
+ return VPU_SCHEDULING_MODE_HW;
+}
+
static int ivpu_fw_parse(struct ivpu_device *vdev)
{
struct ivpu_fw_info *fw = vdev->fw;
@@ -191,8 +207,10 @@ static int ivpu_fw_parse(struct ivpu_device *vdev)
ivpu_dbg(vdev, FW_BOOT, "Header version: 0x%x, format 0x%x\n",
fw_hdr->header_version, fw_hdr->image_format);
- ivpu_info(vdev, "Firmware: %s, version: %s", fw->name,
- (const char *)fw_hdr + VPU_FW_HEADER_SIZE);
+ if (!scnprintf(fw->version, sizeof(fw->version), "%s", fw->file->data + VPU_FW_HEADER_SIZE))
+ ivpu_warn(vdev, "Missing firmware version\n");
+
+ ivpu_info(vdev, "Firmware: %s, version: %s\n", fw->name, fw->version);
if (IVPU_FW_CHECK_API_COMPAT(vdev, fw_hdr, BOOT, 3))
return -EINVAL;
@@ -208,14 +226,26 @@ static int ivpu_fw_parse(struct ivpu_device *vdev)
fw->cold_boot_entry_point = fw_hdr->entry_point;
fw->entry_point = fw->cold_boot_entry_point;
- fw->trace_level = min_t(u32, ivpu_log_level, IVPU_FW_LOG_FATAL);
+ fw->trace_level = min_t(u32, ivpu_fw_log_level, IVPU_FW_LOG_FATAL);
fw->trace_destination_mask = VPU_TRACE_DESTINATION_VERBOSE_TRACING;
fw->trace_hw_component_mask = -1;
fw->dvfs_mode = 0;
- fw->primary_preempt_buf_size = fw_hdr->preemption_buffer_1_size;
- fw->secondary_preempt_buf_size = fw_hdr->preemption_buffer_2_size;
+ fw->sched_mode = ivpu_fw_sched_mode_select(vdev, fw_hdr);
+ ivpu_info(vdev, "Scheduler mode: %s\n", fw->sched_mode ? "HW" : "OS");
+
+ if (fw_hdr->preemption_buffer_1_max_size)
+ fw->primary_preempt_buf_size = fw_hdr->preemption_buffer_1_max_size;
+ else
+ fw->primary_preempt_buf_size = fw_hdr->preemption_buffer_1_size;
+
+ if (fw_hdr->preemption_buffer_2_max_size)
+ fw->secondary_preempt_buf_size = fw_hdr->preemption_buffer_2_max_size;
+ else
+ fw->secondary_preempt_buf_size = fw_hdr->preemption_buffer_2_size;
+ ivpu_dbg(vdev, FW_BOOT, "Preemption buffer sizes: primary %u, secondary %u\n",
+ fw->primary_preempt_buf_size, fw->secondary_preempt_buf_size);
if (fw_hdr->ro_section_start_address && !is_within_range(fw_hdr->ro_section_start_address,
fw_hdr->ro_section_size,
@@ -311,7 +341,7 @@ static int ivpu_fw_mem_init(struct ivpu_device *vdev)
goto err_free_fw_mem;
}
- if (ivpu_log_level <= IVPU_FW_LOG_INFO)
+ if (ivpu_fw_log_level <= IVPU_FW_LOG_INFO)
log_verb_size = IVPU_FW_VERBOSE_BUFFER_LARGE_SIZE;
else
log_verb_size = IVPU_FW_VERBOSE_BUFFER_SMALL_SIZE;
@@ -514,6 +544,8 @@ static void ivpu_fw_boot_params_print(struct ivpu_device *vdev, struct vpu_boot_
boot_params->d0i3_entry_vpu_ts);
ivpu_dbg(vdev, FW_BOOT, "boot_params.system_time_us = %llu\n",
boot_params->system_time_us);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.power_profile = 0x%x\n",
+ boot_params->power_profile);
}
void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params *boot_params)
@@ -544,7 +576,6 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
boot_params->magic = VPU_BOOT_PARAMS_MAGIC;
boot_params->vpu_id = to_pci_dev(vdev->drm.dev)->bus->number;
- boot_params->frequency = ivpu_hw_pll_freq_get(vdev);
/*
* This param is a debug firmware feature. It switches default clock
@@ -567,8 +598,10 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
boot_params->ipc_payload_area_start = ipc_mem_rx->vpu_addr + ivpu_bo_size(ipc_mem_rx) / 2;
boot_params->ipc_payload_area_size = ivpu_bo_size(ipc_mem_rx) / 2;
- boot_params->global_aliased_pio_base = vdev->hw->ranges.user.start;
- boot_params->global_aliased_pio_size = ivpu_hw_range_size(&vdev->hw->ranges.user);
+ if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) {
+ boot_params->global_aliased_pio_base = vdev->hw->ranges.user.start;
+ boot_params->global_aliased_pio_size = ivpu_hw_range_size(&vdev->hw->ranges.user);
+ }
/* Allow configuration for L2C_PAGE_TABLE with boot param value */
boot_params->autoconfig = 1;
@@ -604,14 +637,16 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
boot_params->punit_telemetry_sram_base = ivpu_hw_telemetry_offset_get(vdev);
boot_params->punit_telemetry_sram_size = ivpu_hw_telemetry_size_get(vdev);
boot_params->vpu_telemetry_enable = ivpu_hw_telemetry_enable_get(vdev);
- boot_params->vpu_scheduling_mode = vdev->hw->sched_mode;
- if (vdev->hw->sched_mode == VPU_SCHEDULING_MODE_HW)
+ boot_params->vpu_scheduling_mode = vdev->fw->sched_mode;
+ if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW)
boot_params->vpu_focus_present_timer_ms = IVPU_FOCUS_PRESENT_TIMER_MS;
boot_params->dvfs_mode = vdev->fw->dvfs_mode;
if (!IVPU_WA(disable_d0i3_msg))
boot_params->d0i3_delayed_entry = 1;
boot_params->d0i3_residency_time_us = 0;
boot_params->d0i3_entry_vpu_ts = 0;
+ if (IVPU_WA(disable_d0i2))
+ boot_params->power_profile |= BIT(1);
boot_params->system_time_us = ktime_to_us(ktime_get_real());
wmb(); /* Flush WC buffers after writing bootparams */
diff --git a/drivers/accel/ivpu/ivpu_fw.h b/drivers/accel/ivpu/ivpu_fw.h
index 40d9d17be3f5..7081913fb0dd 100644
--- a/drivers/accel/ivpu/ivpu_fw.h
+++ b/drivers/accel/ivpu/ivpu_fw.h
@@ -1,11 +1,16 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2020-2023 Intel Corporation
+ * Copyright (C) 2020-2024 Intel Corporation
*/
#ifndef __IVPU_FW_H__
#define __IVPU_FW_H__
+#include "vpu_jsm_api.h"
+
+#define FW_VERSION_HEADER_SIZE SZ_4K
+#define FW_VERSION_STR_SIZE SZ_256
+
struct ivpu_device;
struct ivpu_bo;
struct vpu_boot_params;
@@ -13,6 +18,7 @@ struct vpu_boot_params;
struct ivpu_fw_info {
const struct firmware *file;
const char *name;
+ char version[FW_VERSION_STR_SIZE];
struct ivpu_bo *mem;
struct ivpu_bo *mem_shave_nn;
struct ivpu_bo *mem_log_crit;
@@ -32,12 +38,14 @@ struct ivpu_fw_info {
u32 secondary_preempt_buf_size;
u64 read_only_addr;
u32 read_only_size;
+ u32 sched_mode;
+ u64 last_heartbeat;
};
int ivpu_fw_init(struct ivpu_device *vdev);
void ivpu_fw_fini(struct ivpu_device *vdev);
void ivpu_fw_load(struct ivpu_device *vdev);
-void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params *bp);
+void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params *boot_params);
static inline bool ivpu_fw_is_cold_boot(struct ivpu_device *vdev)
{
diff --git a/drivers/accel/ivpu/ivpu_fw_log.c b/drivers/accel/ivpu/ivpu_fw_log.c
index ef0adb5e0fbe..337c906b0210 100644
--- a/drivers/accel/ivpu/ivpu_fw_log.c
+++ b/drivers/accel/ivpu/ivpu_fw_log.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2020-2023 Intel Corporation
+ * Copyright (C) 2020-2024 Intel Corporation
*/
#include <linux/ctype.h>
@@ -15,19 +15,19 @@
#include "ivpu_fw_log.h"
#include "ivpu_gem.h"
-#define IVPU_FW_LOG_LINE_LENGTH 256
+#define IVPU_FW_LOG_LINE_LENGTH 256
-unsigned int ivpu_log_level = IVPU_FW_LOG_ERROR;
-module_param(ivpu_log_level, uint, 0444);
-MODULE_PARM_DESC(ivpu_log_level,
- "NPU firmware default trace level: debug=" __stringify(IVPU_FW_LOG_DEBUG)
+unsigned int ivpu_fw_log_level = IVPU_FW_LOG_ERROR;
+module_param_named(fw_log_level, ivpu_fw_log_level, uint, 0444);
+MODULE_PARM_DESC(fw_log_level,
+ "NPU firmware default log level: debug=" __stringify(IVPU_FW_LOG_DEBUG)
" info=" __stringify(IVPU_FW_LOG_INFO)
" warn=" __stringify(IVPU_FW_LOG_WARN)
" error=" __stringify(IVPU_FW_LOG_ERROR)
" fatal=" __stringify(IVPU_FW_LOG_FATAL));
-static int fw_log_ptr(struct ivpu_device *vdev, struct ivpu_bo *bo, u32 *offset,
- struct vpu_tracing_buffer_header **log_header)
+static int fw_log_from_bo(struct ivpu_device *vdev, struct ivpu_bo *bo, u32 *offset,
+ struct vpu_tracing_buffer_header **out_log)
{
struct vpu_tracing_buffer_header *log;
@@ -48,7 +48,7 @@ static int fw_log_ptr(struct ivpu_device *vdev, struct ivpu_bo *bo, u32 *offset,
return -EINVAL;
}
- *log_header = log;
+ *out_log = log;
*offset += log->size;
ivpu_dbg(vdev, FW_BOOT,
@@ -59,7 +59,7 @@ static int fw_log_ptr(struct ivpu_device *vdev, struct ivpu_bo *bo, u32 *offset,
return 0;
}
-static void buffer_print(char *buffer, u32 size, struct drm_printer *p)
+static void fw_log_print_lines(char *buffer, u32 size, struct drm_printer *p)
{
char line[IVPU_FW_LOG_LINE_LENGTH];
u32 index = 0;
@@ -87,56 +87,89 @@ static void buffer_print(char *buffer, u32 size, struct drm_printer *p)
}
line[index] = 0;
if (index != 0)
- drm_printf(p, "%s\n", line);
+ drm_printf(p, "%s", line);
}
-static void fw_log_print_buffer(struct ivpu_device *vdev, struct vpu_tracing_buffer_header *log,
- const char *prefix, bool only_new_msgs, struct drm_printer *p)
+static void fw_log_print_buffer(struct vpu_tracing_buffer_header *log, const char *prefix,
+ bool only_new_msgs, struct drm_printer *p)
{
- char *log_buffer = (void *)log + log->header_size;
- u32 log_size = log->size - log->header_size;
- u32 log_start = log->read_index;
- u32 log_end = log->write_index;
-
- if (!(log->write_index || log->wrap_count) ||
- (log->write_index == log->read_index && only_new_msgs)) {
- drm_printf(p, "==== %s \"%s\" log empty ====\n", prefix, log->name);
- return;
+ char *log_data = (void *)log + log->header_size;
+ u32 data_size = log->size - log->header_size;
+ u32 log_start = only_new_msgs ? READ_ONCE(log->read_index) : 0;
+ u32 log_end = READ_ONCE(log->write_index);
+
+ if (log->wrap_count == log->read_wrap_count) {
+ if (log_end <= log_start) {
+ drm_printf(p, "==== %s \"%s\" log empty ====\n", prefix, log->name);
+ return;
+ }
+ } else if (log->wrap_count == log->read_wrap_count + 1) {
+ if (log_end > log_start)
+ log_start = log_end;
+ } else {
+ log_start = log_end;
}
drm_printf(p, "==== %s \"%s\" log start ====\n", prefix, log->name);
- if (log->write_index > log->read_index) {
- buffer_print(log_buffer + log_start, log_end - log_start, p);
+ if (log_end > log_start) {
+ fw_log_print_lines(log_data + log_start, log_end - log_start, p);
} else {
- buffer_print(log_buffer + log_end, log_size - log_end, p);
- buffer_print(log_buffer, log_end, p);
+ fw_log_print_lines(log_data + log_start, data_size - log_start, p);
+ fw_log_print_lines(log_data, log_end, p);
}
- drm_printf(p, "\x1b[0m");
+ drm_printf(p, "\n\x1b[0m"); /* add new line and clear formatting */
drm_printf(p, "==== %s \"%s\" log end ====\n", prefix, log->name);
}
-void ivpu_fw_log_print(struct ivpu_device *vdev, bool only_new_msgs, struct drm_printer *p)
+static void
+fw_log_print_all_in_bo(struct ivpu_device *vdev, const char *name,
+ struct ivpu_bo *bo, bool only_new_msgs, struct drm_printer *p)
{
- struct vpu_tracing_buffer_header *log_header;
+ struct vpu_tracing_buffer_header *log;
u32 next = 0;
- while (fw_log_ptr(vdev, vdev->fw->mem_log_crit, &next, &log_header) == 0)
- fw_log_print_buffer(vdev, log_header, "NPU critical", only_new_msgs, p);
+ while (fw_log_from_bo(vdev, bo, &next, &log) == 0)
+ fw_log_print_buffer(log, name, only_new_msgs, p);
+}
+
+void ivpu_fw_log_print(struct ivpu_device *vdev, bool only_new_msgs, struct drm_printer *p)
+{
+ fw_log_print_all_in_bo(vdev, "NPU critical", vdev->fw->mem_log_crit, only_new_msgs, p);
+ fw_log_print_all_in_bo(vdev, "NPU verbose", vdev->fw->mem_log_verb, only_new_msgs, p);
+}
+
+void ivpu_fw_log_mark_read(struct ivpu_device *vdev)
+{
+ struct vpu_tracing_buffer_header *log;
+ u32 next;
+
+ next = 0;
+ while (fw_log_from_bo(vdev, vdev->fw->mem_log_crit, &next, &log) == 0) {
+ log->read_index = READ_ONCE(log->write_index);
+ log->read_wrap_count = READ_ONCE(log->wrap_count);
+ }
next = 0;
- while (fw_log_ptr(vdev, vdev->fw->mem_log_verb, &next, &log_header) == 0)
- fw_log_print_buffer(vdev, log_header, "NPU verbose", only_new_msgs, p);
+ while (fw_log_from_bo(vdev, vdev->fw->mem_log_verb, &next, &log) == 0) {
+ log->read_index = READ_ONCE(log->write_index);
+ log->read_wrap_count = READ_ONCE(log->wrap_count);
+ }
}
-void ivpu_fw_log_clear(struct ivpu_device *vdev)
+void ivpu_fw_log_reset(struct ivpu_device *vdev)
{
- struct vpu_tracing_buffer_header *log_header;
- u32 next = 0;
+ struct vpu_tracing_buffer_header *log;
+ u32 next;
- while (fw_log_ptr(vdev, vdev->fw->mem_log_crit, &next, &log_header) == 0)
- log_header->read_index = log_header->write_index;
+ next = 0;
+ while (fw_log_from_bo(vdev, vdev->fw->mem_log_crit, &next, &log) == 0) {
+ log->read_index = 0;
+ log->read_wrap_count = 0;
+ }
next = 0;
- while (fw_log_ptr(vdev, vdev->fw->mem_log_verb, &next, &log_header) == 0)
- log_header->read_index = log_header->write_index;
+ while (fw_log_from_bo(vdev, vdev->fw->mem_log_verb, &next, &log) == 0) {
+ log->read_index = 0;
+ log->read_wrap_count = 0;
+ }
}
diff --git a/drivers/accel/ivpu/ivpu_fw_log.h b/drivers/accel/ivpu/ivpu_fw_log.h
index 0b2573f6f315..8bb528a73cb7 100644
--- a/drivers/accel/ivpu/ivpu_fw_log.h
+++ b/drivers/accel/ivpu/ivpu_fw_log.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2020-2023 Intel Corporation
+ * Copyright (C) 2020-2024 Intel Corporation
*/
#ifndef __IVPU_FW_LOG_H__
@@ -8,8 +8,6 @@
#include <linux/types.h>
-#include <drm/drm_print.h>
-
#include "ivpu_drv.h"
#define IVPU_FW_LOG_DEFAULT 0
@@ -19,20 +17,15 @@
#define IVPU_FW_LOG_ERROR 4
#define IVPU_FW_LOG_FATAL 5
-extern unsigned int ivpu_log_level;
-
#define IVPU_FW_VERBOSE_BUFFER_SMALL_SIZE SZ_1M
#define IVPU_FW_VERBOSE_BUFFER_LARGE_SIZE SZ_8M
#define IVPU_FW_CRITICAL_BUFFER_SIZE SZ_512K
-void ivpu_fw_log_print(struct ivpu_device *vdev, bool only_new_msgs, struct drm_printer *p);
-void ivpu_fw_log_clear(struct ivpu_device *vdev);
+extern unsigned int ivpu_fw_log_level;
-static inline void ivpu_fw_log_dump(struct ivpu_device *vdev)
-{
- struct drm_printer p = drm_info_printer(vdev->drm.dev);
+void ivpu_fw_log_print(struct ivpu_device *vdev, bool only_new_msgs, struct drm_printer *p);
+void ivpu_fw_log_mark_read(struct ivpu_device *vdev);
+void ivpu_fw_log_reset(struct ivpu_device *vdev);
- ivpu_fw_log_print(vdev, false, &p);
-}
#endif /* __IVPU_FW_LOG_H__ */
diff --git a/drivers/accel/ivpu/ivpu_gem.c b/drivers/accel/ivpu/ivpu_gem.c
index 1b409dbd332d..59cfcf3eaded 100644
--- a/drivers/accel/ivpu/ivpu_gem.c
+++ b/drivers/accel/ivpu/ivpu_gem.c
@@ -20,15 +20,27 @@
#include "ivpu_mmu.h"
#include "ivpu_mmu_context.h"
+MODULE_IMPORT_NS("DMA_BUF");
+
static const struct drm_gem_object_funcs ivpu_gem_funcs;
static inline void ivpu_dbg_bo(struct ivpu_device *vdev, struct ivpu_bo *bo, const char *action)
{
ivpu_dbg(vdev, BO,
"%6s: bo %8p vpu_addr %9llx size %8zu ctx %d has_pages %d dma_mapped %d mmu_mapped %d wc %d imported %d\n",
- action, bo, bo->vpu_addr, ivpu_bo_size(bo), bo->ctx ? bo->ctx->id : 0,
+ action, bo, bo->vpu_addr, ivpu_bo_size(bo), bo->ctx_id,
(bool)bo->base.pages, (bool)bo->base.sgt, bo->mmu_mapped, bo->base.map_wc,
- (bool)bo->base.base.import_attach);
+ (bool)drm_gem_is_imported(&bo->base.base));
+}
+
+static inline int ivpu_bo_lock(struct ivpu_bo *bo)
+{
+ return dma_resv_lock(bo->base.base.resv, NULL);
+}
+
+static inline void ivpu_bo_unlock(struct ivpu_bo *bo)
+{
+ dma_resv_unlock(bo->base.base.resv);
}
/*
@@ -41,22 +53,22 @@ static inline void ivpu_dbg_bo(struct ivpu_device *vdev, struct ivpu_bo *bo, con
int __must_check ivpu_bo_pin(struct ivpu_bo *bo)
{
struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
+ struct sg_table *sgt;
int ret = 0;
- mutex_lock(&bo->lock);
-
ivpu_dbg_bo(vdev, bo, "pin");
- drm_WARN_ON(&vdev->drm, !bo->ctx);
- if (!bo->mmu_mapped) {
- struct sg_table *sgt = drm_gem_shmem_get_pages_sgt(&bo->base);
+ sgt = drm_gem_shmem_get_pages_sgt(&bo->base);
+ if (IS_ERR(sgt)) {
+ ret = PTR_ERR(sgt);
+ ivpu_err(vdev, "Failed to map BO in IOMMU: %d\n", ret);
+ return ret;
+ }
- if (IS_ERR(sgt)) {
- ret = PTR_ERR(sgt);
- ivpu_err(vdev, "Failed to map BO in IOMMU: %d\n", ret);
- goto unlock;
- }
+ ivpu_bo_lock(bo);
+ if (!bo->mmu_mapped) {
+ drm_WARN_ON(&vdev->drm, !bo->ctx);
ret = ivpu_mmu_context_map_sgt(vdev, bo->ctx, bo->vpu_addr, sgt,
ivpu_bo_is_snooped(bo));
if (ret) {
@@ -67,7 +79,7 @@ int __must_check ivpu_bo_pin(struct ivpu_bo *bo)
}
unlock:
- mutex_unlock(&bo->lock);
+ ivpu_bo_unlock(bo);
return ret;
}
@@ -82,7 +94,7 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx,
if (!drm_dev_enter(&vdev->drm, &idx))
return -ENODEV;
- mutex_lock(&bo->lock);
+ ivpu_bo_lock(bo);
ret = ivpu_mmu_context_insert_node(ctx, range, ivpu_bo_size(bo), &bo->mm_node);
if (!ret) {
@@ -92,9 +104,7 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx,
ivpu_err(vdev, "Failed to add BO to context %u: %d\n", ctx->id, ret);
}
- ivpu_dbg_bo(vdev, bo, "alloc");
-
- mutex_unlock(&bo->lock);
+ ivpu_bo_unlock(bo);
drm_dev_exit(idx);
@@ -105,7 +115,7 @@ static void ivpu_bo_unbind_locked(struct ivpu_bo *bo)
{
struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
- lockdep_assert(lockdep_is_held(&bo->lock) || !kref_read(&bo->base.base.refcount));
+ lockdep_assert(dma_resv_held(bo->base.base.resv) || !kref_read(&bo->base.base.refcount));
if (bo->mmu_mapped) {
drm_WARN_ON(&vdev->drm, !bo->ctx);
@@ -120,17 +130,15 @@ static void ivpu_bo_unbind_locked(struct ivpu_bo *bo)
bo->ctx = NULL;
}
- if (bo->base.base.import_attach)
+ if (drm_gem_is_imported(&bo->base.base))
return;
- dma_resv_lock(bo->base.base.resv, NULL);
if (bo->base.sgt) {
dma_unmap_sgtable(vdev->drm.dev, bo->base.sgt, DMA_BIDIRECTIONAL, 0);
sg_free_table(bo->base.sgt);
kfree(bo->base.sgt);
bo->base.sgt = NULL;
}
- dma_resv_unlock(bo->base.base.resv);
}
void ivpu_bo_unbind_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)
@@ -142,12 +150,12 @@ void ivpu_bo_unbind_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_m
mutex_lock(&vdev->bo_list_lock);
list_for_each_entry(bo, &vdev->bo_list, bo_list_node) {
- mutex_lock(&bo->lock);
+ ivpu_bo_lock(bo);
if (bo->ctx == ctx) {
ivpu_dbg_bo(vdev, bo, "unbind");
ivpu_bo_unbind_locked(bo);
}
- mutex_unlock(&bo->lock);
+ ivpu_bo_unlock(bo);
}
mutex_unlock(&vdev->bo_list_lock);
}
@@ -167,12 +175,52 @@ struct drm_gem_object *ivpu_gem_create_object(struct drm_device *dev, size_t siz
bo->base.pages_mark_dirty_on_put = true; /* VPU can dirty a BO anytime */
INIT_LIST_HEAD(&bo->bo_list_node);
- mutex_init(&bo->lock);
return &bo->base.base;
}
-static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 flags)
+struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf)
+{
+ struct device *attach_dev = dev->dev;
+ struct dma_buf_attachment *attach;
+ struct sg_table *sgt;
+ struct drm_gem_object *obj;
+ int ret;
+
+ attach = dma_buf_attach(dma_buf, attach_dev);
+ if (IS_ERR(attach))
+ return ERR_CAST(attach);
+
+ get_dma_buf(dma_buf);
+
+ sgt = dma_buf_map_attachment_unlocked(attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sgt)) {
+ ret = PTR_ERR(sgt);
+ goto fail_detach;
+ }
+
+ obj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt);
+ if (IS_ERR(obj)) {
+ ret = PTR_ERR(obj);
+ goto fail_unmap;
+ }
+
+ obj->import_attach = attach;
+ obj->resv = dma_buf->resv;
+
+ return obj;
+
+fail_unmap:
+ dma_buf_unmap_attachment_unlocked(attach, sgt, DMA_BIDIRECTIONAL);
+fail_detach:
+ dma_buf_detach(dma_buf, attach);
+ dma_buf_put(dma_buf);
+
+ return ERR_PTR(ret);
+}
+
+static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 flags, u32 ctx_id)
{
struct drm_gem_shmem_object *shmem;
struct ivpu_bo *bo;
@@ -190,6 +238,7 @@ static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 fla
return ERR_CAST(shmem);
bo = to_ivpu_bo(&shmem->base);
+ bo->ctx_id = ctx_id;
bo->base.map_wc = flags & DRM_IVPU_BO_WC;
bo->flags = flags;
@@ -197,6 +246,8 @@ static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 fla
list_add_tail(&bo->bo_list_node, &vdev->bo_list);
mutex_unlock(&vdev->bo_list_lock);
+ ivpu_dbg_bo(vdev, bo, "alloc");
+
return bo;
}
@@ -234,12 +285,16 @@ static void ivpu_gem_bo_free(struct drm_gem_object *obj)
list_del(&bo->bo_list_node);
mutex_unlock(&vdev->bo_list_lock);
- drm_WARN_ON(&vdev->drm, !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ));
+ drm_WARN_ON(&vdev->drm, !drm_gem_is_imported(&bo->base.base) &&
+ !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ));
+ drm_WARN_ON(&vdev->drm, ivpu_bo_size(bo) == 0);
+ drm_WARN_ON(&vdev->drm, bo->base.vaddr);
ivpu_bo_unbind_locked(bo);
- mutex_destroy(&bo->lock);
+ drm_WARN_ON(&vdev->drm, bo->mmu_mapped);
+ drm_WARN_ON(&vdev->drm, bo->ctx);
- drm_WARN_ON(obj->dev, bo->base.pages_use_count > 1);
+ drm_WARN_ON(obj->dev, refcount_read(&bo->base.pages_use_count) > 1);
drm_gem_shmem_free(&bo->base);
}
@@ -271,7 +326,7 @@ int ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
if (size == 0)
return -EINVAL;
- bo = ivpu_bo_alloc(vdev, size, args->flags);
+ bo = ivpu_bo_alloc(vdev, size, args->flags, file_priv->ctx.id);
if (IS_ERR(bo)) {
ivpu_err(vdev, "Failed to allocate BO: %pe (ctx %u size %llu flags 0x%x)",
bo, file_priv->ctx.id, args->size, args->flags);
@@ -279,7 +334,10 @@ int ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
}
ret = drm_gem_handle_create(file, &bo->base.base, &args->handle);
- if (!ret)
+ if (ret)
+ ivpu_err(vdev, "Failed to create handle for BO: %pe (ctx %u size %llu flags 0x%x)",
+ bo, file_priv->ctx.id, args->size, args->flags);
+ else
args->vpu_addr = bo->vpu_addr;
drm_gem_object_put(&bo->base.base);
@@ -302,7 +360,7 @@ ivpu_bo_create(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(range->end));
drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(size));
- bo = ivpu_bo_alloc(vdev, size, flags);
+ bo = ivpu_bo_alloc(vdev, size, flags, IVPU_GLOBAL_CONTEXT_MMU_SSID);
if (IS_ERR(bo)) {
ivpu_err(vdev, "Failed to allocate BO: %pe (vpu_addr 0x%llx size %llu flags 0x%x)",
bo, range->start, size, flags);
@@ -318,9 +376,9 @@ ivpu_bo_create(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
goto err_put;
if (flags & DRM_IVPU_BO_MAPPABLE) {
- dma_resv_lock(bo->base.base.resv, NULL);
- ret = drm_gem_shmem_vmap(&bo->base, &map);
- dma_resv_unlock(bo->base.base.resv);
+ ivpu_bo_lock(bo);
+ ret = drm_gem_shmem_vmap_locked(&bo->base, &map);
+ ivpu_bo_unlock(bo);
if (ret)
goto err_put;
@@ -343,9 +401,9 @@ void ivpu_bo_free(struct ivpu_bo *bo)
struct iosys_map map = IOSYS_MAP_INIT_VADDR(bo->base.vaddr);
if (bo->flags & DRM_IVPU_BO_MAPPABLE) {
- dma_resv_lock(bo->base.base.resv, NULL);
- drm_gem_shmem_vunmap(&bo->base, &map);
- dma_resv_unlock(bo->base.base.resv);
+ ivpu_bo_lock(bo);
+ drm_gem_shmem_vunmap_locked(&bo->base, &map);
+ ivpu_bo_unlock(bo);
}
drm_gem_object_put(&bo->base.base);
@@ -364,12 +422,12 @@ int ivpu_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file
bo = to_ivpu_bo(obj);
- mutex_lock(&bo->lock);
+ ivpu_bo_lock(bo);
args->flags = bo->flags;
args->mmap_offset = drm_vma_node_offset_addr(&obj->vma_node);
args->vpu_addr = bo->vpu_addr;
args->size = obj->size;
- mutex_unlock(&bo->lock);
+ ivpu_bo_unlock(bo);
drm_gem_object_put(obj);
return ret;
@@ -384,6 +442,9 @@ int ivpu_bo_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file
timeout = drm_timeout_abs_to_jiffies(args->timeout_ns);
+ /* Add 1 jiffy to ensure the wait function never times out before intended timeout_ns */
+ timeout += 1;
+
obj = drm_gem_object_lookup(file, args->handle);
if (!obj)
return -EINVAL;
@@ -403,10 +464,10 @@ int ivpu_bo_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file
static void ivpu_bo_print_info(struct ivpu_bo *bo, struct drm_printer *p)
{
- mutex_lock(&bo->lock);
+ ivpu_bo_lock(bo);
drm_printf(p, "%-9p %-3u 0x%-12llx %-10lu 0x%-8x %-4u",
- bo, bo->ctx->id, bo->vpu_addr, bo->base.base.size,
+ bo, bo->ctx_id, bo->vpu_addr, bo->base.base.size,
bo->flags, kref_read(&bo->base.base.refcount));
if (bo->base.pages)
@@ -415,12 +476,12 @@ static void ivpu_bo_print_info(struct ivpu_bo *bo, struct drm_printer *p)
if (bo->mmu_mapped)
drm_printf(p, " mmu_mapped");
- if (bo->base.base.import_attach)
+ if (drm_gem_is_imported(&bo->base.base))
drm_printf(p, " imported");
drm_printf(p, "\n");
- mutex_unlock(&bo->lock);
+ ivpu_bo_unlock(bo);
}
void ivpu_bo_list(struct drm_device *dev, struct drm_printer *p)
diff --git a/drivers/accel/ivpu/ivpu_gem.h b/drivers/accel/ivpu/ivpu_gem.h
index d975000abd78..aa8ff14f7aae 100644
--- a/drivers/accel/ivpu/ivpu_gem.h
+++ b/drivers/accel/ivpu/ivpu_gem.h
@@ -17,10 +17,10 @@ struct ivpu_bo {
struct list_head bo_list_node;
struct drm_mm_node mm_node;
- struct mutex lock; /* Protects: ctx, mmu_mapped, vpu_addr */
u64 vpu_addr;
u32 flags;
u32 job_status; /* Valid only for command buffer */
+ u32 ctx_id;
bool mmu_mapped;
};
@@ -28,6 +28,7 @@ int ivpu_bo_pin(struct ivpu_bo *bo);
void ivpu_bo_unbind_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx);
struct drm_gem_object *ivpu_gem_create_object(struct drm_device *dev, size_t size);
+struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf);
struct ivpu_bo *ivpu_bo_create(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
struct ivpu_addr_range *range, u64 size, u32 flags);
struct ivpu_bo *ivpu_bo_create_global(struct ivpu_device *vdev, u64 size, u32 flags);
diff --git a/drivers/accel/ivpu/ivpu_hw.c b/drivers/accel/ivpu/ivpu_hw.c
index 27f0fe4d54e0..08dcc31b56f4 100644
--- a/drivers/accel/ivpu/ivpu_hw.c
+++ b/drivers/accel/ivpu/ivpu_hw.c
@@ -9,6 +9,16 @@
#include "ivpu_hw_ip.h"
#include <linux/dmi.h>
+#include <linux/fault-inject.h>
+#include <linux/pm_runtime.h>
+
+#ifdef CONFIG_FAULT_INJECTION
+DECLARE_FAULT_ATTR(ivpu_hw_failure);
+
+static char *ivpu_fail_hw;
+module_param_named_unsafe(fail_hw, ivpu_fail_hw, charp, 0444);
+MODULE_PARM_DESC(fail_hw, "<interval>,<probability>,<space>,<times>");
+#endif
static char *platform_to_str(u32 platform)
{
@@ -19,43 +29,36 @@ static char *platform_to_str(u32 platform)
return "SIMICS";
case IVPU_PLATFORM_FPGA:
return "FPGA";
+ case IVPU_PLATFORM_HSLE:
+ return "HSLE";
default:
return "Invalid platform";
}
}
-static const struct dmi_system_id dmi_platform_simulation[] = {
- {
- .ident = "Intel Simics",
- .matches = {
- DMI_MATCH(DMI_BOARD_NAME, "lnlrvp"),
- DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
- DMI_MATCH(DMI_BOARD_SERIAL, "123456789"),
- },
- },
- {
- .ident = "Intel Simics",
- .matches = {
- DMI_MATCH(DMI_BOARD_NAME, "Simics"),
- },
- },
- { }
-};
-
static void platform_init(struct ivpu_device *vdev)
{
- if (dmi_check_system(dmi_platform_simulation))
- vdev->platform = IVPU_PLATFORM_SIMICS;
- else
- vdev->platform = IVPU_PLATFORM_SILICON;
+ int platform = ivpu_hw_btrs_platform_read(vdev);
+
+ ivpu_dbg(vdev, MISC, "Platform type: %s (%d)\n", platform_to_str(platform), platform);
+
+ switch (platform) {
+ case IVPU_PLATFORM_SILICON:
+ case IVPU_PLATFORM_SIMICS:
+ case IVPU_PLATFORM_FPGA:
+ case IVPU_PLATFORM_HSLE:
+ vdev->platform = platform;
+ break;
- ivpu_dbg(vdev, MISC, "Platform type: %s (%d)\n",
- platform_to_str(vdev->platform), vdev->platform);
+ default:
+ ivpu_err(vdev, "Invalid platform type: %d\n", platform);
+ break;
+ }
}
static void wa_init(struct ivpu_device *vdev)
{
- vdev->wa.punit_disabled = ivpu_is_fpga(vdev);
+ vdev->wa.punit_disabled = false;
vdev->wa.clear_runtime_mem = false;
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
@@ -65,14 +68,24 @@ static void wa_init(struct ivpu_device *vdev)
ivpu_revision(vdev) < IVPU_HW_IP_REV_LNL_B0)
vdev->wa.disable_clock_relinquish = true;
+ if (ivpu_test_mode & IVPU_TEST_MODE_CLK_RELINQ_ENABLE)
+ vdev->wa.disable_clock_relinquish = false;
+
+ if (ivpu_test_mode & IVPU_TEST_MODE_CLK_RELINQ_DISABLE)
+ vdev->wa.disable_clock_relinquish = true;
+
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
vdev->wa.wp0_during_power_up = true;
+ if (ivpu_test_mode & IVPU_TEST_MODE_D0I2_DISABLE)
+ vdev->wa.disable_d0i2 = true;
+
IVPU_PRINT_WA(punit_disabled);
IVPU_PRINT_WA(clear_runtime_mem);
IVPU_PRINT_WA(interrupt_clear_with_0);
IVPU_PRINT_WA(disable_clock_relinquish);
IVPU_PRINT_WA(wp0_during_power_up);
+ IVPU_PRINT_WA(disable_d0i2);
}
static void timeouts_init(struct ivpu_device *vdev)
@@ -81,44 +94,71 @@ static void timeouts_init(struct ivpu_device *vdev)
vdev->timeout.boot = -1;
vdev->timeout.jsm = -1;
vdev->timeout.tdr = -1;
+ vdev->timeout.inference = -1;
vdev->timeout.autosuspend = -1;
vdev->timeout.d0i3_entry_msg = -1;
} else if (ivpu_is_fpga(vdev)) {
- vdev->timeout.boot = 100000;
- vdev->timeout.jsm = 50000;
- vdev->timeout.tdr = 2000000;
+ vdev->timeout.boot = 50;
+ vdev->timeout.jsm = 15000;
+ vdev->timeout.tdr = 30000;
+ vdev->timeout.inference = 900000;
vdev->timeout.autosuspend = -1;
vdev->timeout.d0i3_entry_msg = 500;
+ vdev->timeout.state_dump_msg = 10000;
} else if (ivpu_is_simics(vdev)) {
vdev->timeout.boot = 50;
vdev->timeout.jsm = 500;
vdev->timeout.tdr = 10000;
- vdev->timeout.autosuspend = -1;
+ vdev->timeout.inference = 300000;
+ vdev->timeout.autosuspend = 100;
vdev->timeout.d0i3_entry_msg = 100;
+ vdev->timeout.state_dump_msg = 10;
} else {
vdev->timeout.boot = 1000;
vdev->timeout.jsm = 500;
vdev->timeout.tdr = 2000;
+ vdev->timeout.inference = 60000;
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
vdev->timeout.autosuspend = 10;
else
vdev->timeout.autosuspend = 100;
vdev->timeout.d0i3_entry_msg = 5;
+ vdev->timeout.state_dump_msg = 100;
}
}
+static void priority_bands_init(struct ivpu_device *vdev)
+{
+ /* Idle */
+ vdev->hw->hws.grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE] = 0;
+ vdev->hw->hws.process_grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE] = 50000;
+ vdev->hw->hws.process_quantum[VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE] = 160000;
+ /* Normal */
+ vdev->hw->hws.grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL] = 50000;
+ vdev->hw->hws.process_grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL] = 50000;
+ vdev->hw->hws.process_quantum[VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL] = 300000;
+ /* Focus */
+ vdev->hw->hws.grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS] = 50000;
+ vdev->hw->hws.process_grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS] = 50000;
+ vdev->hw->hws.process_quantum[VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS] = 200000;
+ /* Realtime */
+ vdev->hw->hws.grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME] = 0;
+ vdev->hw->hws.process_grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME] = 50000;
+ vdev->hw->hws.process_quantum[VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME] = 200000;
+}
+
static void memory_ranges_init(struct ivpu_device *vdev)
{
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) {
ivpu_hw_range_init(&vdev->hw->ranges.global, 0x80000000, SZ_512M);
- ivpu_hw_range_init(&vdev->hw->ranges.user, 0xc0000000, 255 * SZ_1M);
+ ivpu_hw_range_init(&vdev->hw->ranges.user, 0x88000000, 511 * SZ_1M);
ivpu_hw_range_init(&vdev->hw->ranges.shave, 0x180000000, SZ_2G);
- ivpu_hw_range_init(&vdev->hw->ranges.dma, 0x200000000, SZ_8G);
+ ivpu_hw_range_init(&vdev->hw->ranges.dma, 0x200000000, SZ_128G);
} else {
ivpu_hw_range_init(&vdev->hw->ranges.global, 0x80000000, SZ_512M);
- ivpu_hw_range_init(&vdev->hw->ranges.user, 0x80000000, SZ_256M);
- ivpu_hw_range_init(&vdev->hw->ranges.shave, 0x80000000 + SZ_256M, SZ_2G - SZ_256M);
- ivpu_hw_range_init(&vdev->hw->ranges.dma, 0x200000000, SZ_8G);
+ ivpu_hw_range_init(&vdev->hw->ranges.shave, 0x80000000, SZ_2G);
+ ivpu_hw_range_init(&vdev->hw->ranges.user, 0x100000000, SZ_256G);
+ vdev->hw->ranges.dma = vdev->hw->ranges.user;
}
}
@@ -245,10 +285,17 @@ int ivpu_hw_init(struct ivpu_device *vdev)
{
ivpu_hw_btrs_info_init(vdev);
ivpu_hw_btrs_freq_ratios_init(vdev);
+ priority_bands_init(vdev);
memory_ranges_init(vdev);
platform_init(vdev);
wa_init(vdev);
timeouts_init(vdev);
+ atomic_set(&vdev->hw->firewall_irq_counter, 0);
+
+#ifdef CONFIG_FAULT_INJECTION
+ if (ivpu_fail_hw)
+ setup_fault_attr(&ivpu_hw_failure, ivpu_fail_hw);
+#endif
return 0;
}
@@ -281,8 +328,6 @@ void ivpu_hw_profiling_freq_drive(struct ivpu_device *vdev, bool enable)
void ivpu_irq_handlers_init(struct ivpu_device *vdev)
{
- INIT_KFIFO(vdev->hw->irq.fifo);
-
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
vdev->hw->irq.ip_irq_handler = ivpu_hw_ip_irq_handler_37xx;
else
@@ -296,7 +341,6 @@ void ivpu_irq_handlers_init(struct ivpu_device *vdev)
void ivpu_hw_irq_enable(struct ivpu_device *vdev)
{
- kfifo_reset(&vdev->hw->irq.fifo);
ivpu_hw_ip_irq_enable(vdev);
ivpu_hw_btrs_irq_enable(vdev);
}
@@ -323,9 +367,9 @@ irqreturn_t ivpu_hw_irq_handler(int irq, void *ptr)
/* Re-enable global interrupts to re-trigger MSI for pending interrupts */
ivpu_hw_btrs_global_int_enable(vdev);
- if (!kfifo_is_empty(&vdev->hw->irq.fifo))
- return IRQ_WAKE_THREAD;
- if (ip_handled || btrs_handled)
- return IRQ_HANDLED;
- return IRQ_NONE;
+ if (!ip_handled && !btrs_handled)
+ return IRQ_NONE;
+
+ pm_runtime_mark_last_busy(vdev->drm.dev);
+ return IRQ_HANDLED;
}
diff --git a/drivers/accel/ivpu/ivpu_hw.h b/drivers/accel/ivpu/ivpu_hw.h
index 1c0c98e3afb8..d79668fe1609 100644
--- a/drivers/accel/ivpu/ivpu_hw.h
+++ b/drivers/accel/ivpu/ivpu_hw.h
@@ -1,23 +1,15 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2020-2024 Intel Corporation
+ * Copyright (C) 2020-2025 Intel Corporation
*/
#ifndef __IVPU_HW_H__
#define __IVPU_HW_H__
-#include <linux/kfifo.h>
-
#include "ivpu_drv.h"
#include "ivpu_hw_btrs.h"
#include "ivpu_hw_ip.h"
-#define IVPU_HW_IRQ_FIFO_LENGTH 1024
-
-#define IVPU_HW_IRQ_SRC_IPC 1
-#define IVPU_HW_IRQ_SRC_MMU_EVTQ 2
-#define IVPU_HW_IRQ_SRC_DCT 3
-
struct ivpu_addr_range {
resource_size_t start;
resource_size_t end;
@@ -27,7 +19,6 @@ struct ivpu_hw_info {
struct {
bool (*btrs_irq_handler)(struct ivpu_device *vdev, int irq);
bool (*ip_irq_handler)(struct ivpu_device *vdev, int irq);
- DECLARE_KFIFO(fifo, u8, IVPU_HW_IRQ_FIFO_LENGTH);
} irq;
struct {
struct ivpu_addr_range global;
@@ -45,13 +36,18 @@ struct ivpu_hw_info {
u8 pn_ratio;
u32 profiling_freq;
} pll;
+ struct {
+ u32 grace_period[VPU_HWS_NUM_PRIORITY_BANDS];
+ u32 process_quantum[VPU_HWS_NUM_PRIORITY_BANDS];
+ u32 process_grace_period[VPU_HWS_NUM_PRIORITY_BANDS];
+ } hws;
u32 tile_fuse;
- u32 sched_mode;
u32 sku;
u16 config;
int dma_bits;
ktime_t d0i3_entry_host_ts;
u64 d0i3_entry_vpu_ts;
+ atomic_t firewall_irq_counter;
};
int ivpu_hw_init(struct ivpu_device *vdev);
@@ -86,19 +82,19 @@ static inline u64 ivpu_hw_range_size(const struct ivpu_addr_range *range)
return range->end - range->start;
}
-static inline u32 ivpu_hw_ratio_to_freq(struct ivpu_device *vdev, u32 ratio)
+static inline u32 ivpu_hw_dpu_max_freq_get(struct ivpu_device *vdev)
{
- return ivpu_hw_btrs_ratio_to_freq(vdev, ratio);
+ return ivpu_hw_btrs_dpu_max_freq_get(vdev);
}
-static inline void ivpu_hw_irq_clear(struct ivpu_device *vdev)
+static inline u32 ivpu_hw_dpu_freq_get(struct ivpu_device *vdev)
{
- ivpu_hw_ip_irq_clear(vdev);
+ return ivpu_hw_btrs_dpu_freq_get(vdev);
}
-static inline u32 ivpu_hw_pll_freq_get(struct ivpu_device *vdev)
+static inline void ivpu_hw_irq_clear(struct ivpu_device *vdev)
{
- return ivpu_hw_btrs_pll_freq_get(vdev);
+ ivpu_hw_ip_irq_clear(vdev);
}
static inline u32 ivpu_hw_profiling_freq_get(struct ivpu_device *vdev)
diff --git a/drivers/accel/ivpu/ivpu_hw_40xx_reg.h b/drivers/accel/ivpu/ivpu_hw_40xx_reg.h
index d0b795b344c7..fc0ee8d637f9 100644
--- a/drivers/accel/ivpu/ivpu_hw_40xx_reg.h
+++ b/drivers/accel/ivpu/ivpu_hw_40xx_reg.h
@@ -115,6 +115,8 @@
#define VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY 0x00030068u
#define VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY_POST_DLY_MASK GENMASK(7, 0)
+#define VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY_POST1_DLY_MASK GENMASK(15, 8)
+#define VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY_POST2_DLY_MASK GENMASK(23, 16)
#define VPU_50XX_HOST_SS_AON_PWR_ISLAND_STATUS_DLY 0x0003006cu
#define VPU_50XX_HOST_SS_AON_PWR_ISLAND_STATUS_DLY_STATUS_DLY_MASK GENMASK(7, 0)
diff --git a/drivers/accel/ivpu/ivpu_hw_btrs.c b/drivers/accel/ivpu/ivpu_hw_btrs.c
index 745e5248803d..afdb3b2aa72a 100644
--- a/drivers/accel/ivpu/ivpu_hw_btrs.c
+++ b/drivers/accel/ivpu/ivpu_hw_btrs.c
@@ -1,8 +1,10 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2020-2024 Intel Corporation
+ * Copyright (C) 2020-2025 Intel Corporation
*/
+#include <linux/units.h>
+
#include "ivpu_drv.h"
#include "ivpu_hw.h"
#include "ivpu_hw_btrs.h"
@@ -28,17 +30,12 @@
#define BTRS_LNL_ALL_IRQ_MASK ((u32)-1)
-#define BTRS_MTL_WP_CONFIG_1_TILE_5_3_RATIO WP_CONFIG(MTL_CONFIG_1_TILE, MTL_PLL_RATIO_5_3)
-#define BTRS_MTL_WP_CONFIG_1_TILE_4_3_RATIO WP_CONFIG(MTL_CONFIG_1_TILE, MTL_PLL_RATIO_4_3)
-#define BTRS_MTL_WP_CONFIG_2_TILE_5_3_RATIO WP_CONFIG(MTL_CONFIG_2_TILE, MTL_PLL_RATIO_5_3)
-#define BTRS_MTL_WP_CONFIG_2_TILE_4_3_RATIO WP_CONFIG(MTL_CONFIG_2_TILE, MTL_PLL_RATIO_4_3)
-#define BTRS_MTL_WP_CONFIG_0_TILE_PLL_OFF WP_CONFIG(0, 0)
#define PLL_CDYN_DEFAULT 0x80
#define PLL_EPP_DEFAULT 0x80
-#define PLL_CONFIG_DEFAULT 0x0
-#define PLL_SIMULATION_FREQ 10000000
-#define PLL_REF_CLK_FREQ 50000000
+#define PLL_REF_CLK_FREQ 50000000ull
+#define PLL_RATIO_TO_FREQ(x) ((x) * PLL_REF_CLK_FREQ)
+
#define PLL_TIMEOUT_US (1500 * USEC_PER_MSEC)
#define IDLE_TIMEOUT_US (5 * USEC_PER_MSEC)
#define TIMEOUT_US (150 * USEC_PER_MSEC)
@@ -62,6 +59,8 @@
#define DCT_ENABLE 0x1
#define DCT_DISABLE 0x0
+static u32 pll_ratio_to_dpu_freq(struct ivpu_device *vdev, u32 ratio);
+
int ivpu_hw_btrs_irqs_clear_with_0_mtl(struct ivpu_device *vdev)
{
REGB_WR32(VPU_HW_BTRS_MTL_INTERRUPT_STAT, BTRS_MTL_ALL_IRQ_MASK);
@@ -141,16 +140,10 @@ static int read_tile_config_fuse(struct ivpu_device *vdev, u32 *tile_fuse_config
}
config = REG_GET_FLD(VPU_HW_BTRS_LNL_TILE_FUSE, CONFIG, fuse);
- if (!tile_disable_check(config)) {
- ivpu_err(vdev, "Fuse: Invalid tile disable config (0x%x)\n", config);
- return -EIO;
- }
+ if (!tile_disable_check(config))
+ ivpu_warn(vdev, "More than 1 tile disabled, tile fuse config mask: 0x%x\n", config);
- if (config)
- ivpu_dbg(vdev, MISC, "Fuse: %d tiles enabled. Tile number %d disabled\n",
- BTRS_LNL_TILE_MAX_NUM - 1, ffs(config) - 1);
- else
- ivpu_dbg(vdev, MISC, "Fuse: All %d tiles enabled\n", BTRS_LNL_TILE_MAX_NUM);
+ ivpu_dbg(vdev, MISC, "Tile disable config mask: 0x%x\n", config);
*tile_fuse_config = config;
return 0;
@@ -162,8 +155,7 @@ static int info_init_mtl(struct ivpu_device *vdev)
hw->tile_fuse = BTRS_MTL_TILE_FUSE_ENABLE_BOTH;
hw->sku = BTRS_MTL_TILE_SKU_BOTH;
- hw->config = BTRS_MTL_WP_CONFIG_2_TILE_4_3_RATIO;
- hw->sched_mode = ivpu_sched_mode;
+ hw->config = WP_CONFIG(MTL_CONFIG_2_TILE, MTL_PLL_RATIO_4_3);
return 0;
}
@@ -178,7 +170,6 @@ static int info_init_lnl(struct ivpu_device *vdev)
if (ret)
return ret;
- hw->sched_mode = ivpu_sched_mode;
hw->tile_fuse = tile_fuse_config;
hw->pll.profiling_freq = PLL_PROFILING_FREQ_DEFAULT;
@@ -311,14 +302,10 @@ static void prepare_wp_request(struct ivpu_device *vdev, struct wp_request *wp,
wp->epp = 0;
} else {
wp->target = hw->pll.pn_ratio;
- wp->cfg = enable ? PLL_CONFIG_DEFAULT : 0;
+ wp->cfg = 0;
wp->cdyn = enable ? PLL_CDYN_DEFAULT : 0;
wp->epp = enable ? PLL_EPP_DEFAULT : 0;
}
-
- /* Simics cannot start without at least one tile */
- if (enable && ivpu_is_simics(vdev))
- wp->cfg = 1;
}
static int wait_for_pll_lock(struct ivpu_device *vdev, bool enable)
@@ -346,8 +333,8 @@ int ivpu_hw_btrs_wp_drive(struct ivpu_device *vdev, bool enable)
prepare_wp_request(vdev, &wp, enable);
- ivpu_dbg(vdev, PM, "PLL workpoint request: %u Hz, config: 0x%x, epp: 0x%x, cdyn: 0x%x\n",
- PLL_RATIO_TO_FREQ(wp.target), wp.cfg, wp.epp, wp.cdyn);
+ ivpu_dbg(vdev, PM, "PLL workpoint request: %lu MHz, config: 0x%x, epp: 0x%x, cdyn: 0x%x\n",
+ pll_ratio_to_dpu_freq(vdev, wp.target) / HZ_PER_MHZ, wp.cfg, wp.epp, wp.cdyn);
ret = wp_request_send(vdev, &wp);
if (ret) {
@@ -465,9 +452,6 @@ int ivpu_hw_btrs_wait_for_clock_res_own_ack(struct ivpu_device *vdev)
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
return 0;
- if (ivpu_is_simics(vdev))
- return 0;
-
return REGB_POLL_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, CLOCK_RESOURCE_OWN_ACK, 1, TIMEOUT_US);
}
@@ -588,6 +572,47 @@ int ivpu_hw_btrs_wait_for_idle(struct ivpu_device *vdev)
return REGB_POLL_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, IDLE, 0x1, IDLE_TIMEOUT_US);
}
+static u32 pll_config_get_mtl(struct ivpu_device *vdev)
+{
+ return REGB_RD32(VPU_HW_BTRS_MTL_CURRENT_PLL);
+}
+
+static u32 pll_config_get_lnl(struct ivpu_device *vdev)
+{
+ return REGB_RD32(VPU_HW_BTRS_LNL_PLL_FREQ);
+}
+
+static u32 pll_ratio_to_dpu_freq_mtl(u16 ratio)
+{
+ return (PLL_RATIO_TO_FREQ(ratio) * 2) / 3;
+}
+
+static u32 pll_ratio_to_dpu_freq_lnl(u16 ratio)
+{
+ return PLL_RATIO_TO_FREQ(ratio) / 2;
+}
+
+static u32 pll_ratio_to_dpu_freq(struct ivpu_device *vdev, u32 ratio)
+{
+ if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
+ return pll_ratio_to_dpu_freq_mtl(ratio);
+ else
+ return pll_ratio_to_dpu_freq_lnl(ratio);
+}
+
+u32 ivpu_hw_btrs_dpu_max_freq_get(struct ivpu_device *vdev)
+{
+ return pll_ratio_to_dpu_freq(vdev, vdev->hw->pll.max_ratio);
+}
+
+u32 ivpu_hw_btrs_dpu_freq_get(struct ivpu_device *vdev)
+{
+ if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
+ return pll_ratio_to_dpu_freq_mtl(pll_config_get_mtl(vdev));
+ else
+ return pll_ratio_to_dpu_freq_lnl(pll_config_get_lnl(vdev));
+}
+
/* Handler for IRQs from Buttress core (irqB) */
bool ivpu_hw_btrs_irq_handler_mtl(struct ivpu_device *vdev, int irq)
{
@@ -597,9 +622,12 @@ bool ivpu_hw_btrs_irq_handler_mtl(struct ivpu_device *vdev, int irq)
if (!status)
return false;
- if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, FREQ_CHANGE, status))
- ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq: %08x",
- REGB_RD32(VPU_HW_BTRS_MTL_CURRENT_PLL));
+ if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, FREQ_CHANGE, status)) {
+ u32 pll = pll_config_get_mtl(vdev);
+
+ ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq, wp %08x, %lu MHz",
+ pll, pll_ratio_to_dpu_freq_mtl(pll) / HZ_PER_MHZ);
+ }
if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, ATS_ERR, status)) {
ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(VPU_HW_BTRS_MTL_ATS_ERR_LOG_0));
@@ -645,12 +673,15 @@ bool ivpu_hw_btrs_irq_handler_lnl(struct ivpu_device *vdev, int irq)
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, SURV_ERR, status)) {
ivpu_dbg(vdev, IRQ, "Survivability IRQ\n");
- if (!kfifo_put(&vdev->hw->irq.fifo, IVPU_HW_IRQ_SRC_DCT))
- ivpu_err_ratelimited(vdev, "IRQ FIFO full\n");
+ queue_work(system_wq, &vdev->irq_dct_work);
}
- if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, FREQ_CHANGE, status))
- ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq: %08x", REGB_RD32(VPU_HW_BTRS_LNL_PLL_FREQ));
+ if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, FREQ_CHANGE, status)) {
+ u32 pll = pll_config_get_lnl(vdev);
+
+ ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq, wp %08x, %lu MHz",
+ pll, pll_ratio_to_dpu_freq_lnl(pll) / HZ_PER_MHZ);
+ }
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, ATS_ERR, status)) {
ivpu_err(vdev, "ATS_ERR LOG1 0x%08x ATS_ERR_LOG2 0x%08x\n",
@@ -733,60 +764,6 @@ void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u32 acti
REGB_WR32(VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS, val);
}
-static u32 pll_ratio_to_freq_mtl(u32 ratio, u32 config)
-{
- u32 pll_clock = PLL_REF_CLK_FREQ * ratio;
- u32 cpu_clock;
-
- if ((config & 0xff) == MTL_PLL_RATIO_4_3)
- cpu_clock = pll_clock * 2 / 4;
- else
- cpu_clock = pll_clock * 2 / 5;
-
- return cpu_clock;
-}
-
-u32 ivpu_hw_btrs_ratio_to_freq(struct ivpu_device *vdev, u32 ratio)
-{
- struct ivpu_hw_info *hw = vdev->hw;
-
- if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
- return pll_ratio_to_freq_mtl(ratio, hw->config);
- else
- return PLL_RATIO_TO_FREQ(ratio);
-}
-
-static u32 pll_freq_get_mtl(struct ivpu_device *vdev)
-{
- u32 pll_curr_ratio;
-
- pll_curr_ratio = REGB_RD32(VPU_HW_BTRS_MTL_CURRENT_PLL);
- pll_curr_ratio &= VPU_HW_BTRS_MTL_CURRENT_PLL_RATIO_MASK;
-
- if (!ivpu_is_silicon(vdev))
- return PLL_SIMULATION_FREQ;
-
- return pll_ratio_to_freq_mtl(pll_curr_ratio, vdev->hw->config);
-}
-
-static u32 pll_freq_get_lnl(struct ivpu_device *vdev)
-{
- u32 pll_curr_ratio;
-
- pll_curr_ratio = REGB_RD32(VPU_HW_BTRS_LNL_PLL_FREQ);
- pll_curr_ratio &= VPU_HW_BTRS_LNL_PLL_FREQ_RATIO_MASK;
-
- return PLL_RATIO_TO_FREQ(pll_curr_ratio);
-}
-
-u32 ivpu_hw_btrs_pll_freq_get(struct ivpu_device *vdev)
-{
- if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
- return pll_freq_get_mtl(vdev);
- else
- return pll_freq_get_lnl(vdev);
-}
-
u32 ivpu_hw_btrs_telemetry_offset_get(struct ivpu_device *vdev)
{
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
@@ -903,3 +880,10 @@ void ivpu_hw_btrs_diagnose_failure(struct ivpu_device *vdev)
else
return diagnose_failure_lnl(vdev);
}
+
+int ivpu_hw_btrs_platform_read(struct ivpu_device *vdev)
+{
+ u32 reg = REGB_RD32(VPU_HW_BTRS_LNL_VPU_STATUS);
+
+ return REG_GET_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, PLATFORM, reg);
+}
diff --git a/drivers/accel/ivpu/ivpu_hw_btrs.h b/drivers/accel/ivpu/ivpu_hw_btrs.h
index 04f14f50fed6..032c384ac3d4 100644
--- a/drivers/accel/ivpu/ivpu_hw_btrs.h
+++ b/drivers/accel/ivpu/ivpu_hw_btrs.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2020-2024 Intel Corporation
+ * Copyright (C) 2020-2025 Intel Corporation
*/
#ifndef __IVPU_HW_BTRS_H__
@@ -13,9 +13,8 @@
#define PLL_PROFILING_FREQ_DEFAULT 38400000
#define PLL_PROFILING_FREQ_HIGH 400000000
-#define PLL_RATIO_TO_FREQ(x) ((x) * PLL_REF_CLK_FREQ)
-#define DCT_DEFAULT_ACTIVE_PERCENT 15u
+#define DCT_DEFAULT_ACTIVE_PERCENT 30u
#define DCT_PERIOD_US 35300u
int ivpu_hw_btrs_info_init(struct ivpu_device *vdev);
@@ -32,12 +31,12 @@ int ivpu_hw_btrs_ip_reset(struct ivpu_device *vdev);
void ivpu_hw_btrs_profiling_freq_reg_set_lnl(struct ivpu_device *vdev);
void ivpu_hw_btrs_ats_print_lnl(struct ivpu_device *vdev);
void ivpu_hw_btrs_clock_relinquish_disable_lnl(struct ivpu_device *vdev);
+u32 ivpu_hw_btrs_dpu_max_freq_get(struct ivpu_device *vdev);
+u32 ivpu_hw_btrs_dpu_freq_get(struct ivpu_device *vdev);
bool ivpu_hw_btrs_irq_handler_mtl(struct ivpu_device *vdev, int irq);
bool ivpu_hw_btrs_irq_handler_lnl(struct ivpu_device *vdev, int irq);
int ivpu_hw_btrs_dct_get_request(struct ivpu_device *vdev, bool *enable);
-void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u32 dct_percent);
-u32 ivpu_hw_btrs_pll_freq_get(struct ivpu_device *vdev);
-u32 ivpu_hw_btrs_ratio_to_freq(struct ivpu_device *vdev, u32 ratio);
+void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u32 active_percent);
u32 ivpu_hw_btrs_telemetry_offset_get(struct ivpu_device *vdev);
u32 ivpu_hw_btrs_telemetry_size_get(struct ivpu_device *vdev);
u32 ivpu_hw_btrs_telemetry_enable_get(struct ivpu_device *vdev);
@@ -46,5 +45,6 @@ void ivpu_hw_btrs_global_int_disable(struct ivpu_device *vdev);
void ivpu_hw_btrs_irq_enable(struct ivpu_device *vdev);
void ivpu_hw_btrs_irq_disable(struct ivpu_device *vdev);
void ivpu_hw_btrs_diagnose_failure(struct ivpu_device *vdev);
+int ivpu_hw_btrs_platform_read(struct ivpu_device *vdev);
#endif /* __IVPU_HW_BTRS_H__ */
diff --git a/drivers/accel/ivpu/ivpu_hw_btrs_lnl_reg.h b/drivers/accel/ivpu/ivpu_hw_btrs_lnl_reg.h
index fc51f3098f97..fff2ef2cada6 100644
--- a/drivers/accel/ivpu/ivpu_hw_btrs_lnl_reg.h
+++ b/drivers/accel/ivpu/ivpu_hw_btrs_lnl_reg.h
@@ -86,6 +86,7 @@
#define VPU_HW_BTRS_LNL_VPU_STATUS_POWER_RESOURCE_OWN_ACK_MASK BIT_MASK(7)
#define VPU_HW_BTRS_LNL_VPU_STATUS_PERF_CLK_MASK BIT_MASK(11)
#define VPU_HW_BTRS_LNL_VPU_STATUS_DISABLE_CLK_RELINQUISH_MASK BIT_MASK(12)
+#define VPU_HW_BTRS_LNL_VPU_STATUS_PLATFORM_MASK GENMASK(31, 29)
#define VPU_HW_BTRS_LNL_IP_RESET 0x00000160u
#define VPU_HW_BTRS_LNL_IP_RESET_TRIGGER_MASK BIT_MASK(0)
diff --git a/drivers/accel/ivpu/ivpu_hw_ip.c b/drivers/accel/ivpu/ivpu_hw_ip.c
index dfd2f4a5b526..2bf9882ab52e 100644
--- a/drivers/accel/ivpu/ivpu_hw_ip.c
+++ b/drivers/accel/ivpu/ivpu_hw_ip.c
@@ -8,15 +8,12 @@
#include "ivpu_hw.h"
#include "ivpu_hw_37xx_reg.h"
#include "ivpu_hw_40xx_reg.h"
+#include "ivpu_hw_btrs.h"
#include "ivpu_hw_ip.h"
#include "ivpu_hw_reg_io.h"
#include "ivpu_mmu.h"
#include "ivpu_pm.h"
-#define PWR_ISLAND_EN_POST_DLY_FREQ_DEFAULT 0
-#define PWR_ISLAND_EN_POST_DLY_FREQ_HIGH 18
-#define PWR_ISLAND_STATUS_DLY_FREQ_DEFAULT 3
-#define PWR_ISLAND_STATUS_DLY_FREQ_HIGH 46
#define PWR_ISLAND_STATUS_TIMEOUT_US (5 * USEC_PER_MSEC)
#define TIM_SAFE_ENABLE 0xf1d0dead
@@ -268,20 +265,15 @@ void ivpu_hw_ip_idle_gen_disable(struct ivpu_device *vdev)
idle_gen_drive_40xx(vdev, false);
}
-static void pwr_island_delay_set_50xx(struct ivpu_device *vdev)
+static void
+pwr_island_delay_set_50xx(struct ivpu_device *vdev, u32 post, u32 post1, u32 post2, u32 status)
{
- u32 val, post, status;
-
- if (vdev->hw->pll.profiling_freq == PLL_PROFILING_FREQ_DEFAULT) {
- post = PWR_ISLAND_EN_POST_DLY_FREQ_DEFAULT;
- status = PWR_ISLAND_STATUS_DLY_FREQ_DEFAULT;
- } else {
- post = PWR_ISLAND_EN_POST_DLY_FREQ_HIGH;
- status = PWR_ISLAND_STATUS_DLY_FREQ_HIGH;
- }
+ u32 val;
val = REGV_RD32(VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY);
val = REG_SET_FLD_NUM(VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY, POST_DLY, post, val);
+ val = REG_SET_FLD_NUM(VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY, POST1_DLY, post1, val);
+ val = REG_SET_FLD_NUM(VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY, POST2_DLY, post2, val);
REGV_WR32(VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY, val);
val = REGV_RD32(VPU_50XX_HOST_SS_AON_PWR_ISLAND_STATUS_DLY);
@@ -311,9 +303,6 @@ static void pwr_island_trickle_drive_40xx(struct ivpu_device *vdev, bool enable)
val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, CSS_CPU, val);
REGV_WR32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, val);
-
- if (enable)
- ndelay(500);
}
static void pwr_island_drive_37xx(struct ivpu_device *vdev, bool enable)
@@ -326,9 +315,6 @@ static void pwr_island_drive_37xx(struct ivpu_device *vdev, bool enable)
val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0, CSS_CPU, val);
REGV_WR32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0, val);
-
- if (!enable)
- ndelay(500);
}
static void pwr_island_drive_40xx(struct ivpu_device *vdev, bool enable)
@@ -347,9 +333,11 @@ static void pwr_island_enable(struct ivpu_device *vdev)
{
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) {
pwr_island_trickle_drive_37xx(vdev, true);
+ ndelay(500);
pwr_island_drive_37xx(vdev, true);
} else {
pwr_island_trickle_drive_40xx(vdev, true);
+ ndelay(500);
pwr_island_drive_40xx(vdev, true);
}
}
@@ -686,13 +674,37 @@ static void dpu_active_drive_37xx(struct ivpu_device *vdev, bool enable)
REGV_WR32(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, val);
}
+static void pwr_island_delay_set(struct ivpu_device *vdev)
+{
+ bool high = vdev->hw->pll.profiling_freq == PLL_PROFILING_FREQ_HIGH;
+ u32 post, post1, post2, status;
+
+ if (ivpu_hw_ip_gen(vdev) < IVPU_HW_IP_50XX)
+ return;
+
+ switch (ivpu_device_id(vdev)) {
+ case PCI_DEVICE_ID_WCL:
+ case PCI_DEVICE_ID_PTL_P:
+ post = high ? 18 : 0;
+ post1 = 0;
+ post2 = 0;
+ status = high ? 46 : 3;
+ break;
+
+ default:
+ dump_stack();
+ ivpu_err(vdev, "Unknown device ID\n");
+ return;
+ }
+
+ pwr_island_delay_set_50xx(vdev, post, post1, post2, status);
+}
+
int ivpu_hw_ip_pwr_domain_enable(struct ivpu_device *vdev)
{
int ret;
- if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_50XX)
- pwr_island_delay_set_50xx(vdev);
-
+ pwr_island_delay_set(vdev);
pwr_island_enable(vdev);
ret = wait_for_pwr_island_status(vdev, 0x1);
@@ -957,14 +969,14 @@ void ivpu_hw_ip_wdt_disable(struct ivpu_device *vdev)
static u32 ipc_rx_count_get_37xx(struct ivpu_device *vdev)
{
- u32 count = REGV_RD32_SILENT(VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT);
+ u32 count = readl(vdev->regv + VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT);
return REG_GET_FLD(VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT, FILL_LEVEL, count);
}
static u32 ipc_rx_count_get_40xx(struct ivpu_device *vdev)
{
- u32 count = REGV_RD32_SILENT(VPU_40XX_HOST_SS_TIM_IPC_FIFO_STAT);
+ u32 count = readl(vdev->regv + VPU_40XX_HOST_SS_TIM_IPC_FIFO_STAT);
return REG_GET_FLD(VPU_40XX_HOST_SS_TIM_IPC_FIFO_STAT, FILL_LEVEL, count);
}
@@ -1062,7 +1074,10 @@ static void irq_wdt_mss_handler(struct ivpu_device *vdev)
static void irq_noc_firewall_handler(struct ivpu_device *vdev)
{
- ivpu_pm_trigger_recovery(vdev, "NOC Firewall IRQ");
+ atomic_inc(&vdev->hw->firewall_irq_counter);
+
+ ivpu_dbg(vdev, IRQ, "NOC Firewall interrupt detected, counter %d\n",
+ atomic_read(&vdev->hw->firewall_irq_counter));
}
/* Handler for IRQs from NPU core */
diff --git a/drivers/accel/ivpu/ivpu_hw_reg_io.h b/drivers/accel/ivpu/ivpu_hw_reg_io.h
index 79b3f441eac4..66259b0ead02 100644
--- a/drivers/accel/ivpu/ivpu_hw_reg_io.h
+++ b/drivers/accel/ivpu/ivpu_hw_reg_io.h
@@ -7,6 +7,7 @@
#define __IVPU_HW_REG_IO_H__
#include <linux/bitfield.h>
+#include <linux/fault-inject.h>
#include <linux/io.h>
#include <linux/iopoll.h>
@@ -16,13 +17,11 @@
#define REG_IO_ERROR 0xffffffff
#define REGB_RD32(reg) ivpu_hw_reg_rd32(vdev, vdev->regb, (reg), #reg, __func__)
-#define REGB_RD32_SILENT(reg) readl(vdev->regb + (reg))
#define REGB_RD64(reg) ivpu_hw_reg_rd64(vdev, vdev->regb, (reg), #reg, __func__)
#define REGB_WR32(reg, val) ivpu_hw_reg_wr32(vdev, vdev->regb, (reg), (val), #reg, __func__)
#define REGB_WR64(reg, val) ivpu_hw_reg_wr64(vdev, vdev->regb, (reg), (val), #reg, __func__)
#define REGV_RD32(reg) ivpu_hw_reg_rd32(vdev, vdev->regv, (reg), #reg, __func__)
-#define REGV_RD32_SILENT(reg) readl(vdev->regv + (reg))
#define REGV_RD64(reg) ivpu_hw_reg_rd64(vdev, vdev->regv, (reg), #reg, __func__)
#define REGV_WR32(reg, val) ivpu_hw_reg_wr32(vdev, vdev->regv, (reg), (val), #reg, __func__)
#define REGV_WR64(reg, val) ivpu_hw_reg_wr64(vdev, vdev->regv, (reg), (val), #reg, __func__)
@@ -47,31 +46,42 @@
#define REG_TEST_FLD_NUM(REG, FLD, num, val) \
((num) == FIELD_GET(REG##_##FLD##_MASK, val))
-#define REGB_POLL_FLD(reg, fld, val, timeout_us) \
-({ \
- u32 var; \
- int r; \
- ivpu_dbg(vdev, REG, "%s : %s (0x%08x) Polling field %s started (expected 0x%x)\n", \
- __func__, #reg, reg, #fld, val); \
- r = read_poll_timeout(REGB_RD32_SILENT, var, (FIELD_GET(reg##_##fld##_MASK, var) == (val)),\
- REG_POLL_SLEEP_US, timeout_us, false, (reg)); \
- ivpu_dbg(vdev, REG, "%s : %s (0x%08x) Polling field %s %s (reg val 0x%08x)\n", \
- __func__, #reg, reg, #fld, r ? "ETIMEDOUT" : "OK", var); \
- r; \
-})
-
-#define REGV_POLL_FLD(reg, fld, val, timeout_us) \
-({ \
- u32 var; \
- int r; \
- ivpu_dbg(vdev, REG, "%s : %s (0x%08x) Polling field %s started (expected 0x%x)\n", \
- __func__, #reg, reg, #fld, val); \
- r = read_poll_timeout(REGV_RD32_SILENT, var, (FIELD_GET(reg##_##fld##_MASK, var) == (val)),\
- REG_POLL_SLEEP_US, timeout_us, false, (reg)); \
- ivpu_dbg(vdev, REG, "%s : %s (0x%08x) Polling field %s %s (reg val 0x%08x)\n", \
- __func__, #reg, reg, #fld, r ? "ETIMEDOUT" : "OK", var); \
- r; \
-})
+#define REGB_POLL_FLD(reg, fld, exp_fld_val, timeout_us) \
+ ivpu_hw_reg_poll_fld(vdev, vdev->regb, reg, reg##_##fld##_MASK, \
+ FIELD_PREP(reg##_##fld##_MASK, exp_fld_val), timeout_us, \
+ __func__, #reg, #fld)
+
+#define REGV_POLL_FLD(reg, fld, exp_fld_val, timeout_us) \
+ ivpu_hw_reg_poll_fld(vdev, vdev->regv, reg, reg##_##fld##_MASK, \
+ FIELD_PREP(reg##_##fld##_MASK, exp_fld_val), timeout_us, \
+ __func__, #reg, #fld)
+
+extern struct fault_attr ivpu_hw_failure;
+
+static inline int __must_check
+ivpu_hw_reg_poll_fld(struct ivpu_device *vdev, void __iomem *base,
+ u32 reg_offset, u32 reg_mask, u32 exp_masked_val, u32 timeout_us,
+ const char *func_name, const char *reg_name, const char *fld_name)
+{
+ u32 reg_val;
+ int ret;
+
+ ivpu_dbg(vdev, REG, "%s : %s (0x%08x) POLL %s started (exp_val 0x%x)\n",
+ func_name, reg_name, reg_offset, fld_name, exp_masked_val);
+
+ ret = read_poll_timeout(readl, reg_val, (reg_val & reg_mask) == exp_masked_val,
+ REG_POLL_SLEEP_US, timeout_us, false, base + reg_offset);
+
+#ifdef CONFIG_FAULT_INJECTION
+ if (should_fail(&ivpu_hw_failure, 1))
+ ret = -ETIMEDOUT;
+#endif
+
+ ivpu_dbg(vdev, REG, "%s : %s (0x%08x) POLL %s %s (reg_val 0x%08x)\n",
+ func_name, reg_name, reg_offset, fld_name, ret ? "ETIMEDOUT" : "OK", reg_val);
+
+ return ret;
+}
static inline u32
ivpu_hw_reg_rd32(struct ivpu_device *vdev, void __iomem *base, u32 reg,
diff --git a/drivers/accel/ivpu/ivpu_ipc.c b/drivers/accel/ivpu/ivpu_ipc.c
index 78b32a823241..5f00809d448a 100644
--- a/drivers/accel/ivpu/ivpu_ipc.c
+++ b/drivers/accel/ivpu/ivpu_ipc.c
@@ -15,6 +15,7 @@
#include "ivpu_ipc.h"
#include "ivpu_jsm_msg.h"
#include "ivpu_pm.h"
+#include "ivpu_trace.h"
#define IPC_MAX_RX_MSG 128
@@ -140,7 +141,6 @@ ivpu_ipc_rx_msg_add(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
struct ivpu_ipc_rx_msg *rx_msg;
lockdep_assert_held(&ipc->cons_lock);
- lockdep_assert_irqs_disabled();
rx_msg = kzalloc(sizeof(*rx_msg), GFP_ATOMIC);
if (!rx_msg) {
@@ -227,6 +227,7 @@ int ivpu_ipc_send(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, stru
goto unlock;
ivpu_ipc_tx(vdev, cons->tx_vpu_addr);
+ trace_jsm("[tx]", req);
unlock:
mutex_unlock(&ipc->lock);
@@ -278,12 +279,13 @@ int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
u32 size = min_t(int, rx_msg->ipc_hdr->data_size, sizeof(*jsm_msg));
if (rx_msg->jsm_msg->result != VPU_JSM_STATUS_SUCCESS) {
- ivpu_dbg(vdev, IPC, "IPC resp result error: %d\n", rx_msg->jsm_msg->result);
+ ivpu_err(vdev, "IPC resp result error: %d\n", rx_msg->jsm_msg->result);
ret = -EBADMSG;
}
if (jsm_msg)
memcpy(jsm_msg, rx_msg->jsm_msg, size);
+ trace_jsm("[rx]", rx_msg->jsm_msg);
}
ivpu_ipc_rx_msg_del(vdev, rx_msg);
@@ -291,15 +293,17 @@ int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
return ret;
}
-static int
+int
ivpu_ipc_send_receive_internal(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
enum vpu_ipc_msg_type expected_resp_type,
- struct vpu_jsm_msg *resp, u32 channel,
- unsigned long timeout_ms)
+ struct vpu_jsm_msg *resp, u32 channel, unsigned long timeout_ms)
{
struct ivpu_ipc_consumer cons;
int ret;
+ drm_WARN_ON(&vdev->drm, pm_runtime_status_suspended(vdev->drm.dev) &&
+ pm_runtime_enabled(vdev->drm.dev));
+
ivpu_ipc_consumer_add(vdev, &cons, channel, NULL);
ret = ivpu_ipc_send(vdev, &cons, req);
@@ -325,19 +329,21 @@ consumer_del:
return ret;
}
-int ivpu_ipc_send_receive_active(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
- enum vpu_ipc_msg_type expected_resp, struct vpu_jsm_msg *resp,
- u32 channel, unsigned long timeout_ms)
+int ivpu_ipc_send_receive(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
+ enum vpu_ipc_msg_type expected_resp, struct vpu_jsm_msg *resp,
+ u32 channel, unsigned long timeout_ms)
{
struct vpu_jsm_msg hb_req = { .type = VPU_JSM_MSG_QUERY_ENGINE_HB };
struct vpu_jsm_msg hb_resp;
int ret, hb_ret;
- drm_WARN_ON(&vdev->drm, pm_runtime_status_suspended(vdev->drm.dev));
+ ret = ivpu_rpm_get(vdev);
+ if (ret < 0)
+ return ret;
ret = ivpu_ipc_send_receive_internal(vdev, req, expected_resp, resp, channel, timeout_ms);
if (ret != -ETIMEDOUT)
- return ret;
+ goto rpm_put;
hb_ret = ivpu_ipc_send_receive_internal(vdev, &hb_req, VPU_JSM_MSG_QUERY_ENGINE_HB_DONE,
&hb_resp, VPU_IPC_CHAN_ASYNC_CMD,
@@ -345,21 +351,33 @@ int ivpu_ipc_send_receive_active(struct ivpu_device *vdev, struct vpu_jsm_msg *r
if (hb_ret == -ETIMEDOUT)
ivpu_pm_trigger_recovery(vdev, "IPC timeout");
+rpm_put:
+ ivpu_rpm_put(vdev);
return ret;
}
-int ivpu_ipc_send_receive(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
- enum vpu_ipc_msg_type expected_resp, struct vpu_jsm_msg *resp,
- u32 channel, unsigned long timeout_ms)
+int ivpu_ipc_send_and_wait(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
+ u32 channel, unsigned long timeout_ms)
{
+ struct ivpu_ipc_consumer cons;
int ret;
ret = ivpu_rpm_get(vdev);
if (ret < 0)
return ret;
- ret = ivpu_ipc_send_receive_active(vdev, req, expected_resp, resp, channel, timeout_ms);
+ ivpu_ipc_consumer_add(vdev, &cons, channel, NULL);
+
+ ret = ivpu_ipc_send(vdev, &cons, req);
+ if (ret) {
+ ivpu_warn_ratelimited(vdev, "IPC send failed: %d\n", ret);
+ goto consumer_del;
+ }
+
+ msleep(timeout_ms);
+consumer_del:
+ ivpu_ipc_consumer_del(vdev, &cons);
ivpu_rpm_put(vdev);
return ret;
}
@@ -441,13 +459,12 @@ void ivpu_ipc_irq_handler(struct ivpu_device *vdev)
}
}
- if (!list_empty(&ipc->cb_msg_list))
- if (!kfifo_put(&vdev->hw->irq.fifo, IVPU_HW_IRQ_SRC_IPC))
- ivpu_err_ratelimited(vdev, "IRQ FIFO full\n");
+ queue_work(system_wq, &vdev->irq_ipc_work);
}
-void ivpu_ipc_irq_thread_handler(struct ivpu_device *vdev)
+void ivpu_ipc_irq_work_fn(struct work_struct *work)
{
+ struct ivpu_device *vdev = container_of(work, struct ivpu_device, irq_ipc_work);
struct ivpu_ipc_info *ipc = vdev->ipc;
struct ivpu_ipc_rx_msg *rx_msg, *r;
struct list_head cb_msg_list;
@@ -518,7 +535,6 @@ void ivpu_ipc_fini(struct ivpu_device *vdev)
{
struct ivpu_ipc_info *ipc = vdev->ipc;
- drm_WARN_ON(&vdev->drm, ipc->on);
drm_WARN_ON(&vdev->drm, !list_empty(&ipc->cons_list));
drm_WARN_ON(&vdev->drm, !list_empty(&ipc->cb_msg_list));
drm_WARN_ON(&vdev->drm, atomic_read(&ipc->rx_msg_count) > 0);
diff --git a/drivers/accel/ivpu/ivpu_ipc.h b/drivers/accel/ivpu/ivpu_ipc.h
index 4fe38141045e..b524a1985b9d 100644
--- a/drivers/accel/ivpu/ivpu_ipc.h
+++ b/drivers/accel/ivpu/ivpu_ipc.h
@@ -90,7 +90,7 @@ void ivpu_ipc_disable(struct ivpu_device *vdev);
void ivpu_ipc_reset(struct ivpu_device *vdev);
void ivpu_ipc_irq_handler(struct ivpu_device *vdev);
-void ivpu_ipc_irq_thread_handler(struct ivpu_device *vdev);
+void ivpu_ipc_irq_work_fn(struct work_struct *work);
void ivpu_ipc_consumer_add(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
u32 channel, ivpu_ipc_rx_callback_t callback);
@@ -101,12 +101,13 @@ int ivpu_ipc_send(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
struct ivpu_ipc_hdr *ipc_buf, struct vpu_jsm_msg *jsm_msg,
unsigned long timeout_ms);
-
-int ivpu_ipc_send_receive_active(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
- enum vpu_ipc_msg_type expected_resp, struct vpu_jsm_msg *resp,
- u32 channel, unsigned long timeout_ms);
+int ivpu_ipc_send_receive_internal(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
+ enum vpu_ipc_msg_type expected_resp_type,
+ struct vpu_jsm_msg *resp, u32 channel, unsigned long timeout_ms);
int ivpu_ipc_send_receive(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
enum vpu_ipc_msg_type expected_resp, struct vpu_jsm_msg *resp,
u32 channel, unsigned long timeout_ms);
+int ivpu_ipc_send_and_wait(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
+ u32 channel, unsigned long timeout_ms);
#endif /* __IVPU_IPC_H__ */
diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c
index be2e2bf0f43f..060f1fc031d3 100644
--- a/drivers/accel/ivpu/ivpu_job.c
+++ b/drivers/accel/ivpu/ivpu_job.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2020-2024 Intel Corporation
+ * Copyright (C) 2020-2025 Intel Corporation
*/
#include <drm/drm_file.h>
@@ -8,6 +8,7 @@
#include <linux/bitfield.h>
#include <linux/highmem.h>
#include <linux/pci.h>
+#include <linux/pm_runtime.h>
#include <linux/module.h>
#include <uapi/drm/ivpu_accel.h>
@@ -17,12 +18,12 @@
#include "ivpu_ipc.h"
#include "ivpu_job.h"
#include "ivpu_jsm_msg.h"
+#include "ivpu_mmu.h"
#include "ivpu_pm.h"
+#include "ivpu_trace.h"
#include "vpu_boot_api.h"
#define CMD_BUF_IDX 0
-#define JOB_ID_JOB_MASK GENMASK(7, 0)
-#define JOB_ID_CONTEXT_MASK GENMASK(31, 8)
#define JOB_MAX_BUFFER_COUNT 65535
static void ivpu_cmdq_ring_db(struct ivpu_device *vdev, struct ivpu_cmdq *cmdq)
@@ -35,24 +36,20 @@ static int ivpu_preemption_buffers_create(struct ivpu_device *vdev,
{
u64 primary_size = ALIGN(vdev->fw->primary_preempt_buf_size, PAGE_SIZE);
u64 secondary_size = ALIGN(vdev->fw->secondary_preempt_buf_size, PAGE_SIZE);
- struct ivpu_addr_range range;
- if (vdev->hw->sched_mode != VPU_SCHEDULING_MODE_HW)
+ if (vdev->fw->sched_mode != VPU_SCHEDULING_MODE_HW ||
+ ivpu_test_mode & IVPU_TEST_MODE_MIP_DISABLE)
return 0;
- range.start = vdev->hw->ranges.user.end - (primary_size * IVPU_NUM_CMDQS_PER_CTX);
- range.end = vdev->hw->ranges.user.end;
- cmdq->primary_preempt_buf = ivpu_bo_create(vdev, &file_priv->ctx, &range, primary_size,
- DRM_IVPU_BO_WC);
+ cmdq->primary_preempt_buf = ivpu_bo_create(vdev, &file_priv->ctx, &vdev->hw->ranges.user,
+ primary_size, DRM_IVPU_BO_WC);
if (!cmdq->primary_preempt_buf) {
ivpu_err(vdev, "Failed to create primary preemption buffer\n");
return -ENOMEM;
}
- range.start = vdev->hw->ranges.shave.end - (secondary_size * IVPU_NUM_CMDQS_PER_CTX);
- range.end = vdev->hw->ranges.shave.end;
- cmdq->secondary_preempt_buf = ivpu_bo_create(vdev, &file_priv->ctx, &range, secondary_size,
- DRM_IVPU_BO_WC);
+ cmdq->secondary_preempt_buf = ivpu_bo_create(vdev, &file_priv->ctx, &vdev->hw->ranges.dma,
+ secondary_size, DRM_IVPU_BO_WC);
if (!cmdq->secondary_preempt_buf) {
ivpu_err(vdev, "Failed to create secondary preemption buffer\n");
goto err_free_primary;
@@ -62,24 +59,24 @@ static int ivpu_preemption_buffers_create(struct ivpu_device *vdev,
err_free_primary:
ivpu_bo_free(cmdq->primary_preempt_buf);
+ cmdq->primary_preempt_buf = NULL;
return -ENOMEM;
}
static void ivpu_preemption_buffers_free(struct ivpu_device *vdev,
struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
{
- if (vdev->hw->sched_mode != VPU_SCHEDULING_MODE_HW)
+ if (vdev->fw->sched_mode != VPU_SCHEDULING_MODE_HW)
return;
- drm_WARN_ON(&vdev->drm, !cmdq->primary_preempt_buf);
- drm_WARN_ON(&vdev->drm, !cmdq->secondary_preempt_buf);
- ivpu_bo_free(cmdq->primary_preempt_buf);
- ivpu_bo_free(cmdq->secondary_preempt_buf);
+ if (cmdq->primary_preempt_buf)
+ ivpu_bo_free(cmdq->primary_preempt_buf);
+ if (cmdq->secondary_preempt_buf)
+ ivpu_bo_free(cmdq->secondary_preempt_buf);
}
static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv)
{
- struct xa_limit db_xa_limit = {.max = IVPU_MAX_DB, .min = IVPU_MIN_DB};
struct ivpu_device *vdev = file_priv->vdev;
struct ivpu_cmdq *cmdq;
int ret;
@@ -88,55 +85,114 @@ static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv)
if (!cmdq)
return NULL;
- ret = xa_alloc(&vdev->db_xa, &cmdq->db_id, NULL, db_xa_limit, GFP_KERNEL);
- if (ret) {
- ivpu_err(vdev, "Failed to allocate doorbell id: %d\n", ret);
- goto err_free_cmdq;
- }
-
cmdq->mem = ivpu_bo_create_global(vdev, SZ_4K, DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE);
if (!cmdq->mem)
- goto err_erase_xa;
+ goto err_free_cmdq;
ret = ivpu_preemption_buffers_create(vdev, file_priv, cmdq);
if (ret)
- goto err_free_cmdq_mem;
+ ivpu_warn(vdev, "Failed to allocate preemption buffers, preemption limited\n");
return cmdq;
-err_free_cmdq_mem:
- ivpu_bo_free(cmdq->mem);
-err_erase_xa:
- xa_erase(&vdev->db_xa, cmdq->db_id);
err_free_cmdq:
kfree(cmdq);
return NULL;
}
-static void ivpu_cmdq_free(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
+/**
+ * ivpu_cmdq_get_entry_count - Calculate the number of entries in the command queue.
+ * @cmdq: Pointer to the command queue structure.
+ *
+ * Returns the number of entries that can fit in the command queue memory.
+ */
+static inline u32 ivpu_cmdq_get_entry_count(struct ivpu_cmdq *cmdq)
{
- if (!cmdq)
- return;
+ size_t size = ivpu_bo_size(cmdq->mem) - sizeof(struct vpu_job_queue_header);
+
+ return size / sizeof(struct vpu_job_queue_entry);
+}
+
+/**
+ * ivpu_cmdq_get_flags - Get command queue flags based on input flags and test mode.
+ * @vdev: Pointer to the ivpu device structure.
+ * @flags: Input flags to determine the command queue flags.
+ *
+ * Returns the calculated command queue flags, considering both the input flags
+ * and the current test mode settings.
+ */
+static u32 ivpu_cmdq_get_flags(struct ivpu_device *vdev, u32 flags)
+{
+ u32 cmdq_flags = 0;
+ if ((flags & DRM_IVPU_CMDQ_FLAG_TURBO) && (ivpu_hw_ip_gen(vdev) >= IVPU_HW_IP_40XX))
+ cmdq_flags |= VPU_JOB_QUEUE_FLAGS_TURBO_MODE;
+
+ /* Test mode can override the TURBO flag coming from the application */
+ if (ivpu_test_mode & IVPU_TEST_MODE_TURBO_ENABLE)
+ cmdq_flags |= VPU_JOB_QUEUE_FLAGS_TURBO_MODE;
+ if (ivpu_test_mode & IVPU_TEST_MODE_TURBO_DISABLE)
+ cmdq_flags &= ~VPU_JOB_QUEUE_FLAGS_TURBO_MODE;
+
+ return cmdq_flags;
+}
+
+static void ivpu_cmdq_free(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
+{
ivpu_preemption_buffers_free(file_priv->vdev, file_priv, cmdq);
ivpu_bo_free(cmdq->mem);
- xa_erase(&file_priv->vdev->db_xa, cmdq->db_id);
kfree(cmdq);
}
+static struct ivpu_cmdq *ivpu_cmdq_create(struct ivpu_file_priv *file_priv, u8 priority, u32 flags)
+{
+ struct ivpu_device *vdev = file_priv->vdev;
+ struct ivpu_cmdq *cmdq = NULL;
+ int ret;
+
+ lockdep_assert_held(&file_priv->lock);
+
+ cmdq = ivpu_cmdq_alloc(file_priv);
+ if (!cmdq) {
+ ivpu_err(vdev, "Failed to allocate command queue\n");
+ return NULL;
+ }
+ ret = xa_alloc_cyclic(&file_priv->cmdq_xa, &cmdq->id, cmdq, file_priv->cmdq_limit,
+ &file_priv->cmdq_id_next, GFP_KERNEL);
+ if (ret < 0) {
+ ivpu_err(vdev, "Failed to allocate command queue ID: %d\n", ret);
+ goto err_free_cmdq;
+ }
+
+ cmdq->entry_count = ivpu_cmdq_get_entry_count(cmdq);
+ cmdq->priority = priority;
+
+ cmdq->jobq = (struct vpu_job_queue *)ivpu_bo_vaddr(cmdq->mem);
+ cmdq->jobq->header.engine_idx = VPU_ENGINE_COMPUTE;
+ cmdq->jobq->header.flags = ivpu_cmdq_get_flags(vdev, flags);
+
+ ivpu_dbg(vdev, JOB, "Command queue %d created, ctx %d, flags 0x%08x\n",
+ cmdq->id, file_priv->ctx.id, cmdq->jobq->header.flags);
+ return cmdq;
+
+err_free_cmdq:
+ ivpu_cmdq_free(file_priv, cmdq);
+ return NULL;
+}
+
static int ivpu_hws_cmdq_init(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq, u16 engine,
u8 priority)
{
struct ivpu_device *vdev = file_priv->vdev;
int ret;
- ret = ivpu_jsm_hws_create_cmdq(vdev, file_priv->ctx.id, file_priv->ctx.id, cmdq->db_id,
+ ret = ivpu_jsm_hws_create_cmdq(vdev, file_priv->ctx.id, file_priv->ctx.id, cmdq->id,
task_pid_nr(current), engine,
cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem));
if (ret)
return ret;
- ret = ivpu_jsm_hws_set_context_sched_properties(vdev, file_priv->ctx.id, cmdq->db_id,
+ ret = ivpu_jsm_hws_set_context_sched_properties(vdev, file_priv->ctx.id, cmdq->id,
priority);
if (ret)
return ret;
@@ -149,43 +205,51 @@ static int ivpu_register_db(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *
struct ivpu_device *vdev = file_priv->vdev;
int ret;
- if (vdev->hw->sched_mode == VPU_SCHEDULING_MODE_HW)
- ret = ivpu_jsm_hws_register_db(vdev, file_priv->ctx.id, cmdq->db_id, cmdq->db_id,
+ ret = xa_alloc_cyclic(&vdev->db_xa, &cmdq->db_id, NULL, vdev->db_limit, &vdev->db_next,
+ GFP_KERNEL);
+ if (ret < 0) {
+ ivpu_err(vdev, "Failed to allocate doorbell ID: %d\n", ret);
+ return ret;
+ }
+
+ if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW)
+ ret = ivpu_jsm_hws_register_db(vdev, file_priv->ctx.id, cmdq->id, cmdq->db_id,
cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem));
else
ret = ivpu_jsm_register_db(vdev, file_priv->ctx.id, cmdq->db_id,
cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem));
if (!ret)
- ivpu_dbg(vdev, JOB, "DB %d registered to ctx %d\n", cmdq->db_id, file_priv->ctx.id);
+ ivpu_dbg(vdev, JOB, "DB %d registered to cmdq %d ctx %d priority %d\n",
+ cmdq->db_id, cmdq->id, file_priv->ctx.id, cmdq->priority);
+ else
+ xa_erase(&vdev->db_xa, cmdq->db_id);
return ret;
}
-static int
-ivpu_cmdq_init(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq, u16 engine, u8 priority)
+static void ivpu_cmdq_jobq_reset(struct ivpu_device *vdev, struct vpu_job_queue *jobq)
+{
+ jobq->header.head = 0;
+ jobq->header.tail = 0;
+
+ wmb(); /* Flush WC buffer for jobq->header */
+}
+
+static int ivpu_cmdq_register(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
{
struct ivpu_device *vdev = file_priv->vdev;
- struct vpu_job_queue_header *jobq_header;
int ret;
lockdep_assert_held(&file_priv->lock);
- if (cmdq->db_registered)
+ if (cmdq->db_id)
return 0;
- cmdq->entry_count = (u32)((ivpu_bo_size(cmdq->mem) - sizeof(struct vpu_job_queue_header)) /
- sizeof(struct vpu_job_queue_entry));
+ ivpu_cmdq_jobq_reset(vdev, cmdq->jobq);
- cmdq->jobq = (struct vpu_job_queue *)ivpu_bo_vaddr(cmdq->mem);
- jobq_header = &cmdq->jobq->header;
- jobq_header->engine_idx = engine;
- jobq_header->head = 0;
- jobq_header->tail = 0;
- wmb(); /* Flush WC buffer for jobq->header */
-
- if (vdev->hw->sched_mode == VPU_SCHEDULING_MODE_HW) {
- ret = ivpu_hws_cmdq_init(file_priv, cmdq, engine, priority);
+ if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) {
+ ret = ivpu_hws_cmdq_init(file_priv, cmdq, VPU_ENGINE_COMPUTE, cmdq->priority);
if (ret)
return ret;
}
@@ -194,83 +258,97 @@ ivpu_cmdq_init(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq, u16 eng
if (ret)
return ret;
- cmdq->db_registered = true;
-
return 0;
}
-static int ivpu_cmdq_fini(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
+static int ivpu_cmdq_unregister(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
{
struct ivpu_device *vdev = file_priv->vdev;
int ret;
lockdep_assert_held(&file_priv->lock);
- if (!cmdq->db_registered)
+ if (!cmdq->db_id)
return 0;
- cmdq->db_registered = false;
+ ret = ivpu_jsm_unregister_db(vdev, cmdq->db_id);
+ if (!ret)
+ ivpu_dbg(vdev, JOB, "DB %d unregistered\n", cmdq->db_id);
- if (vdev->hw->sched_mode == VPU_SCHEDULING_MODE_HW) {
- ret = ivpu_jsm_hws_destroy_cmdq(vdev, file_priv->ctx.id, cmdq->db_id);
+ if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) {
+ ret = ivpu_jsm_hws_destroy_cmdq(vdev, file_priv->ctx.id, cmdq->id);
if (!ret)
- ivpu_dbg(vdev, JOB, "Command queue %d destroyed\n", cmdq->db_id);
+ ivpu_dbg(vdev, JOB, "Command queue %d destroyed, ctx %d\n",
+ cmdq->id, file_priv->ctx.id);
}
- ret = ivpu_jsm_unregister_db(vdev, cmdq->db_id);
- if (!ret)
- ivpu_dbg(vdev, JOB, "DB %d unregistered\n", cmdq->db_id);
+ xa_erase(&file_priv->vdev->db_xa, cmdq->db_id);
+ cmdq->db_id = 0;
return 0;
}
-static struct ivpu_cmdq *ivpu_cmdq_acquire(struct ivpu_file_priv *file_priv, u16 engine,
- u8 priority)
+static inline u8 ivpu_job_to_jsm_priority(u8 priority)
{
- int cmdq_idx = IVPU_CMDQ_INDEX(engine, priority);
- struct ivpu_cmdq *cmdq = file_priv->cmdq[cmdq_idx];
- int ret;
+ if (priority == DRM_IVPU_JOB_PRIORITY_DEFAULT)
+ return VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL;
+
+ return priority - 1;
+}
+
+static void ivpu_cmdq_destroy(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
+{
+ ivpu_cmdq_unregister(file_priv, cmdq);
+ xa_erase(&file_priv->cmdq_xa, cmdq->id);
+ ivpu_cmdq_free(file_priv, cmdq);
+}
+
+static struct ivpu_cmdq *ivpu_cmdq_acquire_legacy(struct ivpu_file_priv *file_priv, u8 priority)
+{
+ struct ivpu_cmdq *cmdq;
+ unsigned long id;
lockdep_assert_held(&file_priv->lock);
+ xa_for_each(&file_priv->cmdq_xa, id, cmdq)
+ if (cmdq->is_legacy && cmdq->priority == priority)
+ break;
+
if (!cmdq) {
- cmdq = ivpu_cmdq_alloc(file_priv);
+ cmdq = ivpu_cmdq_create(file_priv, priority, 0);
if (!cmdq)
return NULL;
- file_priv->cmdq[cmdq_idx] = cmdq;
+ cmdq->is_legacy = true;
}
- ret = ivpu_cmdq_init(file_priv, cmdq, engine, priority);
- if (ret)
- return NULL;
-
return cmdq;
}
-static void ivpu_cmdq_release_locked(struct ivpu_file_priv *file_priv, u16 engine, u8 priority)
+static struct ivpu_cmdq *ivpu_cmdq_acquire(struct ivpu_file_priv *file_priv, u32 cmdq_id)
{
- int cmdq_idx = IVPU_CMDQ_INDEX(engine, priority);
- struct ivpu_cmdq *cmdq = file_priv->cmdq[cmdq_idx];
+ struct ivpu_device *vdev = file_priv->vdev;
+ struct ivpu_cmdq *cmdq;
lockdep_assert_held(&file_priv->lock);
- if (cmdq) {
- file_priv->cmdq[cmdq_idx] = NULL;
- ivpu_cmdq_fini(file_priv, cmdq);
- ivpu_cmdq_free(file_priv, cmdq);
+ cmdq = xa_load(&file_priv->cmdq_xa, cmdq_id);
+ if (!cmdq) {
+ ivpu_warn_ratelimited(vdev, "Failed to find command queue with ID: %u\n", cmdq_id);
+ return NULL;
}
+
+ return cmdq;
}
void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv)
{
- u16 engine;
- u8 priority;
+ struct ivpu_cmdq *cmdq;
+ unsigned long cmdq_id;
lockdep_assert_held(&file_priv->lock);
- for (engine = 0; engine < IVPU_NUM_ENGINES; engine++)
- for (priority = 0; priority < IVPU_NUM_PRIORITIES; priority++)
- ivpu_cmdq_release_locked(file_priv, engine, priority);
+ xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq)
+ ivpu_cmdq_destroy(file_priv, cmdq);
}
/*
@@ -281,19 +359,14 @@ void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv)
*/
static void ivpu_cmdq_reset(struct ivpu_file_priv *file_priv)
{
- u16 engine;
- u8 priority;
+ struct ivpu_cmdq *cmdq;
+ unsigned long cmdq_id;
mutex_lock(&file_priv->lock);
- for (engine = 0; engine < IVPU_NUM_ENGINES; engine++) {
- for (priority = 0; priority < IVPU_NUM_PRIORITIES; priority++) {
- int cmdq_idx = IVPU_CMDQ_INDEX(engine, priority);
- struct ivpu_cmdq *cmdq = file_priv->cmdq[cmdq_idx];
-
- if (cmdq)
- cmdq->db_registered = false;
- }
+ xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq) {
+ xa_erase(&file_priv->vdev->db_xa, cmdq->db_id);
+ cmdq->db_id = 0;
}
mutex_unlock(&file_priv->lock);
@@ -312,31 +385,24 @@ void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev)
mutex_unlock(&vdev->context_list_lock);
}
-static void ivpu_cmdq_fini_all(struct ivpu_file_priv *file_priv)
-{
- u16 engine;
- u8 priority;
-
- for (engine = 0; engine < IVPU_NUM_ENGINES; engine++) {
- for (priority = 0; priority < IVPU_NUM_PRIORITIES; priority++) {
- int cmdq_idx = IVPU_CMDQ_INDEX(engine, priority);
-
- if (file_priv->cmdq[cmdq_idx])
- ivpu_cmdq_fini(file_priv, file_priv->cmdq[cmdq_idx]);
- }
- }
-}
-
void ivpu_context_abort_locked(struct ivpu_file_priv *file_priv)
{
struct ivpu_device *vdev = file_priv->vdev;
+ struct ivpu_cmdq *cmdq;
+ unsigned long cmdq_id;
lockdep_assert_held(&file_priv->lock);
+ ivpu_dbg(vdev, JOB, "Context ID: %u abort\n", file_priv->ctx.id);
- ivpu_cmdq_fini_all(file_priv);
+ xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq)
+ ivpu_cmdq_unregister(file_priv, cmdq);
- if (vdev->hw->sched_mode == VPU_SCHEDULING_MODE_OS)
+ if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_OS)
ivpu_jsm_context_release(vdev, file_priv->ctx.id);
+
+ ivpu_mmu_disable_ssid_events(vdev, file_priv->ctx.id);
+
+ file_priv->aborted = true;
}
static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job)
@@ -349,24 +415,29 @@ static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job)
/* Check if there is space left in job queue */
if (next_entry == header->head) {
- ivpu_dbg(vdev, JOB, "Job queue full: ctx %d engine %d db %d head %d tail %d\n",
- job->file_priv->ctx.id, job->engine_idx, cmdq->db_id, header->head, tail);
+ ivpu_dbg(vdev, JOB, "Job queue full: ctx %d cmdq %d db %d head %d tail %d\n",
+ job->file_priv->ctx.id, cmdq->id, cmdq->db_id, header->head, tail);
return -EBUSY;
}
- entry = &cmdq->jobq->job[tail];
+ entry = &cmdq->jobq->slot[tail].job;
entry->batch_buf_addr = job->cmd_buf_vpu_addr;
entry->job_id = job->job_id;
entry->flags = 0;
if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_SUBMISSION))
entry->flags = VPU_JOB_FLAGS_NULL_SUBMISSION_MASK;
- if (vdev->hw->sched_mode == VPU_SCHEDULING_MODE_HW &&
- (unlikely(!(ivpu_test_mode & IVPU_TEST_MODE_PREEMPTION_DISABLE)))) {
- entry->primary_preempt_buf_addr = cmdq->primary_preempt_buf->vpu_addr;
- entry->primary_preempt_buf_size = ivpu_bo_size(cmdq->primary_preempt_buf);
- entry->secondary_preempt_buf_addr = cmdq->secondary_preempt_buf->vpu_addr;
- entry->secondary_preempt_buf_size = ivpu_bo_size(cmdq->secondary_preempt_buf);
+ if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) {
+ if (cmdq->primary_preempt_buf) {
+ entry->primary_preempt_buf_addr = cmdq->primary_preempt_buf->vpu_addr;
+ entry->primary_preempt_buf_size = ivpu_bo_size(cmdq->primary_preempt_buf);
+ }
+
+ if (cmdq->secondary_preempt_buf) {
+ entry->secondary_preempt_buf_addr = cmdq->secondary_preempt_buf->vpu_addr;
+ entry->secondary_preempt_buf_size =
+ ivpu_bo_size(cmdq->secondary_preempt_buf);
+ }
}
wmb(); /* Ensure that tail is updated after filling entry */
@@ -424,8 +495,8 @@ static void ivpu_job_destroy(struct ivpu_job *job)
struct ivpu_device *vdev = job->vdev;
u32 i;
- ivpu_dbg(vdev, JOB, "Job destroyed: id %3u ctx %2d engine %d",
- job->job_id, job->file_priv->ctx.id, job->engine_idx);
+ ivpu_dbg(vdev, JOB, "Job destroyed: id %3u ctx %2d cmdq_id %u engine %d",
+ job->job_id, job->file_priv->ctx.id, job->cmdq_id, job->engine_idx);
for (i = 0; i < job->bo_count; i++)
if (job->bos[i])
@@ -457,6 +528,7 @@ ivpu_job_create(struct ivpu_file_priv *file_priv, u32 engine_idx, u32 bo_count)
job->file_priv = ivpu_file_priv_get(file_priv);
+ trace_job("create", job);
ivpu_dbg(vdev, JOB, "Job created: ctx %2d engine %d", file_priv->ctx.id, job->engine_idx);
return job;
@@ -469,16 +541,14 @@ static struct ivpu_job *ivpu_job_remove_from_submitted_jobs(struct ivpu_device *
{
struct ivpu_job *job;
- xa_lock(&vdev->submitted_jobs_xa);
- job = __xa_erase(&vdev->submitted_jobs_xa, job_id);
+ lockdep_assert_held(&vdev->submitted_jobs_lock);
+ job = xa_erase(&vdev->submitted_jobs_xa, job_id);
if (xa_empty(&vdev->submitted_jobs_xa) && job) {
vdev->busy_time = ktime_add(ktime_sub(ktime_get(), vdev->busy_start_ts),
vdev->busy_time);
}
- xa_unlock(&vdev->submitted_jobs_xa);
-
return job;
}
@@ -486,6 +556,28 @@ static int ivpu_job_signal_and_destroy(struct ivpu_device *vdev, u32 job_id, u32
{
struct ivpu_job *job;
+ lockdep_assert_held(&vdev->submitted_jobs_lock);
+
+ job = xa_load(&vdev->submitted_jobs_xa, job_id);
+ if (!job)
+ return -ENOENT;
+
+ if (job_status == VPU_JSM_STATUS_MVNCI_CONTEXT_VIOLATION_HW) {
+ guard(mutex)(&job->file_priv->lock);
+
+ if (job->file_priv->has_mmu_faults)
+ return 0;
+
+ /*
+ * Mark context as faulty and defer destruction of the job to jobs abort thread
+ * handler to synchronize between both faults and jobs returning context violation
+ * status and ensure both are handled in the same way
+ */
+ job->file_priv->has_mmu_faults = true;
+ queue_work(system_wq, &vdev->context_abort_work);
+ return 0;
+ }
+
job = ivpu_job_remove_from_submitted_jobs(vdev, job_id);
if (!job)
return -ENOENT;
@@ -496,13 +588,18 @@ static int ivpu_job_signal_and_destroy(struct ivpu_device *vdev, u32 job_id, u32
job->bos[CMD_BUF_IDX]->job_status = job_status;
dma_fence_signal(job->done_fence);
- ivpu_dbg(vdev, JOB, "Job complete: id %3u ctx %2d engine %d status 0x%x\n",
- job->job_id, job->file_priv->ctx.id, job->engine_idx, job_status);
+ trace_job("done", job);
+ ivpu_dbg(vdev, JOB, "Job complete: id %3u ctx %2d cmdq_id %u engine %d status 0x%x\n",
+ job->job_id, job->file_priv->ctx.id, job->cmdq_id, job->engine_idx, job_status);
ivpu_job_destroy(job);
ivpu_stop_job_timeout_detection(vdev);
ivpu_rpm_put(vdev);
+
+ if (!xa_empty(&vdev->submitted_jobs_xa))
+ ivpu_start_job_timeout_detection(vdev);
+
return 0;
}
@@ -511,15 +608,32 @@ void ivpu_jobs_abort_all(struct ivpu_device *vdev)
struct ivpu_job *job;
unsigned long id;
+ mutex_lock(&vdev->submitted_jobs_lock);
+
xa_for_each(&vdev->submitted_jobs_xa, id, job)
ivpu_job_signal_and_destroy(vdev, id, DRM_IVPU_JOB_STATUS_ABORTED);
+
+ mutex_unlock(&vdev->submitted_jobs_lock);
}
-static int ivpu_job_submit(struct ivpu_job *job, u8 priority)
+void ivpu_cmdq_abort_all_jobs(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id)
+{
+ struct ivpu_job *job;
+ unsigned long id;
+
+ mutex_lock(&vdev->submitted_jobs_lock);
+
+ xa_for_each(&vdev->submitted_jobs_xa, id, job)
+ if (job->file_priv->ctx.id == ctx_id && job->cmdq_id == cmdq_id)
+ ivpu_job_signal_and_destroy(vdev, id, DRM_IVPU_JOB_STATUS_ABORTED);
+
+ mutex_unlock(&vdev->submitted_jobs_lock);
+}
+
+static int ivpu_job_submit(struct ivpu_job *job, u8 priority, u32 cmdq_id)
{
struct ivpu_file_priv *file_priv = job->file_priv;
struct ivpu_device *vdev = job->vdev;
- struct xa_limit job_id_range;
struct ivpu_cmdq *cmdq;
bool is_first_job;
int ret;
@@ -528,27 +642,35 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority)
if (ret < 0)
return ret;
+ mutex_lock(&vdev->submitted_jobs_lock);
mutex_lock(&file_priv->lock);
- cmdq = ivpu_cmdq_acquire(job->file_priv, job->engine_idx, priority);
+ if (cmdq_id == 0)
+ cmdq = ivpu_cmdq_acquire_legacy(file_priv, priority);
+ else
+ cmdq = ivpu_cmdq_acquire(file_priv, cmdq_id);
if (!cmdq) {
- ivpu_warn_ratelimited(vdev, "Failed to get job queue, ctx %d engine %d prio %d\n",
- file_priv->ctx.id, job->engine_idx, priority);
+ ivpu_warn_ratelimited(vdev, "Failed to get job queue, ctx %d\n", file_priv->ctx.id);
ret = -EINVAL;
- goto err_unlock_file_priv;
+ goto err_unlock;
+ }
+
+ ret = ivpu_cmdq_register(file_priv, cmdq);
+ if (ret) {
+ ivpu_err(vdev, "Failed to register command queue: %d\n", ret);
+ goto err_unlock;
}
- job_id_range.min = FIELD_PREP(JOB_ID_CONTEXT_MASK, (file_priv->ctx.id - 1));
- job_id_range.max = job_id_range.min | JOB_ID_JOB_MASK;
+ job->cmdq_id = cmdq->id;
- xa_lock(&vdev->submitted_jobs_xa);
is_first_job = xa_empty(&vdev->submitted_jobs_xa);
- ret = __xa_alloc(&vdev->submitted_jobs_xa, &job->job_id, job, job_id_range, GFP_KERNEL);
- if (ret) {
+ ret = xa_alloc_cyclic(&vdev->submitted_jobs_xa, &job->job_id, job, file_priv->job_limit,
+ &file_priv->job_id_next, GFP_KERNEL);
+ if (ret < 0) {
ivpu_dbg(vdev, JOB, "Too many active jobs in ctx %d\n",
file_priv->ctx.id);
ret = -EBUSY;
- goto err_unlock_submitted_jobs_xa;
+ goto err_unlock;
}
ret = ivpu_cmdq_push_job(cmdq, job);
@@ -566,25 +688,26 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority)
vdev->busy_start_ts = ktime_get();
}
- ivpu_dbg(vdev, JOB, "Job submitted: id %3u ctx %2d engine %d prio %d addr 0x%llx next %d\n",
- job->job_id, file_priv->ctx.id, job->engine_idx, priority,
+ trace_job("submit", job);
+ ivpu_dbg(vdev, JOB, "Job submitted: id %3u ctx %2d cmdq_id %u engine %d prio %d addr 0x%llx next %d\n",
+ job->job_id, file_priv->ctx.id, cmdq->id, job->engine_idx, cmdq->priority,
job->cmd_buf_vpu_addr, cmdq->jobq->header.tail);
- xa_unlock(&vdev->submitted_jobs_xa);
-
mutex_unlock(&file_priv->lock);
- if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_HW))
+ if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_HW)) {
ivpu_job_signal_and_destroy(vdev, job->job_id, VPU_JSM_STATUS_SUCCESS);
+ }
+
+ mutex_unlock(&vdev->submitted_jobs_lock);
return 0;
err_erase_xa:
- __xa_erase(&vdev->submitted_jobs_xa, job->job_id);
-err_unlock_submitted_jobs_xa:
- xa_unlock(&vdev->submitted_jobs_xa);
-err_unlock_file_priv:
+ xa_erase(&vdev->submitted_jobs_xa, job->job_id);
+err_unlock:
mutex_unlock(&file_priv->lock);
+ mutex_unlock(&vdev->submitted_jobs_lock);
ivpu_rpm_put(vdev);
return ret;
}
@@ -593,7 +716,7 @@ static int
ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32 *buf_handles,
u32 buf_count, u32 commands_offset)
{
- struct ivpu_file_priv *file_priv = file->driver_priv;
+ struct ivpu_file_priv *file_priv = job->file_priv;
struct ivpu_device *vdev = file_priv->vdev;
struct ww_acquire_ctx acquire_ctx;
enum dma_resv_usage usage;
@@ -655,49 +778,20 @@ unlock_reservations:
return ret;
}
-static inline u8 ivpu_job_to_hws_priority(struct ivpu_file_priv *file_priv, u8 priority)
-{
- if (priority == DRM_IVPU_JOB_PRIORITY_DEFAULT)
- return DRM_IVPU_JOB_PRIORITY_NORMAL;
-
- return priority - 1;
-}
-
-int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+static int ivpu_submit(struct drm_file *file, struct ivpu_file_priv *file_priv, u32 cmdq_id,
+ u32 buffer_count, u32 engine, void __user *buffers_ptr, u32 cmds_offset,
+ u8 priority)
{
- struct ivpu_file_priv *file_priv = file->driver_priv;
struct ivpu_device *vdev = file_priv->vdev;
- struct drm_ivpu_submit *params = data;
struct ivpu_job *job;
u32 *buf_handles;
int idx, ret;
- u8 priority;
- if (params->engine > DRM_IVPU_ENGINE_COPY)
- return -EINVAL;
-
- if (params->priority > DRM_IVPU_JOB_PRIORITY_REALTIME)
- return -EINVAL;
-
- if (params->buffer_count == 0 || params->buffer_count > JOB_MAX_BUFFER_COUNT)
- return -EINVAL;
-
- if (!IS_ALIGNED(params->commands_offset, 8))
- return -EINVAL;
-
- if (!file_priv->ctx.id)
- return -EINVAL;
-
- if (file_priv->has_mmu_faults)
- return -EBADFD;
-
- buf_handles = kcalloc(params->buffer_count, sizeof(u32), GFP_KERNEL);
+ buf_handles = kcalloc(buffer_count, sizeof(u32), GFP_KERNEL);
if (!buf_handles)
return -ENOMEM;
- ret = copy_from_user(buf_handles,
- (void __user *)params->buffers_ptr,
- params->buffer_count * sizeof(u32));
+ ret = copy_from_user(buf_handles, buffers_ptr, buffer_count * sizeof(u32));
if (ret) {
ret = -EFAULT;
goto err_free_handles;
@@ -708,27 +802,24 @@ int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
goto err_free_handles;
}
- ivpu_dbg(vdev, JOB, "Submit ioctl: ctx %u buf_count %u\n",
- file_priv->ctx.id, params->buffer_count);
+ ivpu_dbg(vdev, JOB, "Submit ioctl: ctx %u cmdq_id %u buf_count %u\n",
+ file_priv->ctx.id, cmdq_id, buffer_count);
- job = ivpu_job_create(file_priv, params->engine, params->buffer_count);
+ job = ivpu_job_create(file_priv, engine, buffer_count);
if (!job) {
ivpu_err(vdev, "Failed to create job\n");
ret = -ENOMEM;
goto err_exit_dev;
}
- ret = ivpu_job_prepare_bos_for_submit(file, job, buf_handles, params->buffer_count,
- params->commands_offset);
+ ret = ivpu_job_prepare_bos_for_submit(file, job, buf_handles, buffer_count, cmds_offset);
if (ret) {
ivpu_err(vdev, "Failed to prepare job: %d\n", ret);
goto err_destroy_job;
}
- priority = ivpu_job_to_hws_priority(file_priv, params->priority);
-
down_read(&vdev->pm->reset_lock);
- ret = ivpu_job_submit(job, priority);
+ ret = ivpu_job_submit(job, priority, cmdq_id);
up_read(&vdev->pm->reset_lock);
if (ret)
goto err_signal_fence;
@@ -748,12 +839,137 @@ err_free_handles:
return ret;
}
+int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct ivpu_file_priv *file_priv = file->driver_priv;
+ struct drm_ivpu_submit *args = data;
+ u8 priority;
+
+ if (args->engine != DRM_IVPU_ENGINE_COMPUTE)
+ return -EINVAL;
+
+ if (args->priority > DRM_IVPU_JOB_PRIORITY_REALTIME)
+ return -EINVAL;
+
+ if (args->buffer_count == 0 || args->buffer_count > JOB_MAX_BUFFER_COUNT)
+ return -EINVAL;
+
+ if (!IS_ALIGNED(args->commands_offset, 8))
+ return -EINVAL;
+
+ if (!file_priv->ctx.id)
+ return -EINVAL;
+
+ if (file_priv->has_mmu_faults)
+ return -EBADFD;
+
+ priority = ivpu_job_to_jsm_priority(args->priority);
+
+ return ivpu_submit(file, file_priv, 0, args->buffer_count, args->engine,
+ (void __user *)args->buffers_ptr, args->commands_offset, priority);
+}
+
+int ivpu_cmdq_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct ivpu_file_priv *file_priv = file->driver_priv;
+ struct drm_ivpu_cmdq_submit *args = data;
+
+ if (!ivpu_is_capable(file_priv->vdev, DRM_IVPU_CAP_MANAGE_CMDQ))
+ return -ENODEV;
+
+ if (args->cmdq_id < IVPU_CMDQ_MIN_ID || args->cmdq_id > IVPU_CMDQ_MAX_ID)
+ return -EINVAL;
+
+ if (args->buffer_count == 0 || args->buffer_count > JOB_MAX_BUFFER_COUNT)
+ return -EINVAL;
+
+ if (!IS_ALIGNED(args->commands_offset, 8))
+ return -EINVAL;
+
+ if (!file_priv->ctx.id)
+ return -EINVAL;
+
+ if (file_priv->has_mmu_faults)
+ return -EBADFD;
+
+ return ivpu_submit(file, file_priv, args->cmdq_id, args->buffer_count, VPU_ENGINE_COMPUTE,
+ (void __user *)args->buffers_ptr, args->commands_offset, 0);
+}
+
+int ivpu_cmdq_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct ivpu_file_priv *file_priv = file->driver_priv;
+ struct ivpu_device *vdev = file_priv->vdev;
+ struct drm_ivpu_cmdq_create *args = data;
+ struct ivpu_cmdq *cmdq;
+ int ret;
+
+ if (!ivpu_is_capable(vdev, DRM_IVPU_CAP_MANAGE_CMDQ))
+ return -ENODEV;
+
+ if (args->priority > DRM_IVPU_JOB_PRIORITY_REALTIME)
+ return -EINVAL;
+
+ ret = ivpu_rpm_get(vdev);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&file_priv->lock);
+
+ cmdq = ivpu_cmdq_create(file_priv, ivpu_job_to_jsm_priority(args->priority), args->flags);
+ if (cmdq)
+ args->cmdq_id = cmdq->id;
+
+ mutex_unlock(&file_priv->lock);
+
+ ivpu_rpm_put(vdev);
+
+ return cmdq ? 0 : -ENOMEM;
+}
+
+int ivpu_cmdq_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct ivpu_file_priv *file_priv = file->driver_priv;
+ struct ivpu_device *vdev = file_priv->vdev;
+ struct drm_ivpu_cmdq_destroy *args = data;
+ struct ivpu_cmdq *cmdq;
+ u32 cmdq_id = 0;
+ int ret;
+
+ if (!ivpu_is_capable(vdev, DRM_IVPU_CAP_MANAGE_CMDQ))
+ return -ENODEV;
+
+ ret = ivpu_rpm_get(vdev);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&file_priv->lock);
+
+ cmdq = xa_load(&file_priv->cmdq_xa, args->cmdq_id);
+ if (!cmdq || cmdq->is_legacy) {
+ ret = -ENOENT;
+ } else {
+ cmdq_id = cmdq->id;
+ ivpu_cmdq_destroy(file_priv, cmdq);
+ ret = 0;
+ }
+
+ mutex_unlock(&file_priv->lock);
+
+ /* Abort any pending jobs only if cmdq was destroyed */
+ if (!ret)
+ ivpu_cmdq_abort_all_jobs(vdev, file_priv->ctx.id, cmdq_id);
+
+ ivpu_rpm_put(vdev);
+
+ return ret;
+}
+
static void
ivpu_job_done_callback(struct ivpu_device *vdev, struct ivpu_ipc_hdr *ipc_hdr,
struct vpu_jsm_msg *jsm_msg)
{
struct vpu_ipc_msg_payload_job_done *payload;
- int ret;
if (!jsm_msg) {
ivpu_err(vdev, "IPC message has no JSM payload\n");
@@ -766,9 +982,10 @@ ivpu_job_done_callback(struct ivpu_device *vdev, struct ivpu_ipc_hdr *ipc_hdr,
}
payload = (struct vpu_ipc_msg_payload_job_done *)&jsm_msg->payload;
- ret = ivpu_job_signal_and_destroy(vdev, payload->job_id, payload->job_status);
- if (!ret && !xa_empty(&vdev->submitted_jobs_xa))
- ivpu_start_job_timeout_detection(vdev);
+
+ mutex_lock(&vdev->submitted_jobs_lock);
+ ivpu_job_signal_and_destroy(vdev, payload->job_id, payload->job_status);
+ mutex_unlock(&vdev->submitted_jobs_lock);
}
void ivpu_job_done_consumer_init(struct ivpu_device *vdev)
@@ -781,3 +998,57 @@ void ivpu_job_done_consumer_fini(struct ivpu_device *vdev)
{
ivpu_ipc_consumer_del(vdev, &vdev->job_done_consumer);
}
+
+void ivpu_context_abort_work_fn(struct work_struct *work)
+{
+ struct ivpu_device *vdev = container_of(work, struct ivpu_device, context_abort_work);
+ struct ivpu_file_priv *file_priv;
+ struct ivpu_job *job;
+ unsigned long ctx_id;
+ unsigned long id;
+
+ if (drm_WARN_ON(&vdev->drm, pm_runtime_get_if_active(vdev->drm.dev) <= 0))
+ return;
+
+ if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW)
+ if (ivpu_jsm_reset_engine(vdev, 0))
+ return;
+
+ mutex_lock(&vdev->context_list_lock);
+ xa_for_each(&vdev->context_xa, ctx_id, file_priv) {
+ if (!file_priv->has_mmu_faults || file_priv->aborted)
+ continue;
+
+ mutex_lock(&file_priv->lock);
+ ivpu_context_abort_locked(file_priv);
+ mutex_unlock(&file_priv->lock);
+ }
+ mutex_unlock(&vdev->context_list_lock);
+
+ /*
+ * We will not receive new MMU event interrupts until existing events are discarded
+ * however, we want to discard these events only after aborting the faulty context
+ * to avoid generating new faults from that context
+ */
+ ivpu_mmu_discard_events(vdev);
+
+ if (vdev->fw->sched_mode != VPU_SCHEDULING_MODE_HW)
+ goto runtime_put;
+
+ if (ivpu_jsm_hws_resume_engine(vdev, 0))
+ return;
+ /*
+ * In hardware scheduling mode NPU already has stopped processing jobs
+ * and won't send us any further notifications, thus we have to free job related resources
+ * and notify userspace
+ */
+ mutex_lock(&vdev->submitted_jobs_lock);
+ xa_for_each(&vdev->submitted_jobs_xa, id, job)
+ if (job->file_priv->aborted)
+ ivpu_job_signal_and_destroy(vdev, job->job_id, DRM_IVPU_JOB_STATUS_ABORTED);
+ mutex_unlock(&vdev->submitted_jobs_lock);
+
+runtime_put:
+ pm_runtime_mark_last_busy(vdev->drm.dev);
+ pm_runtime_put_autosuspend(vdev->drm.dev);
+}
diff --git a/drivers/accel/ivpu/ivpu_job.h b/drivers/accel/ivpu/ivpu_job.h
index 6accb94028c7..2e301c2eea7b 100644
--- a/drivers/accel/ivpu/ivpu_job.h
+++ b/drivers/accel/ivpu/ivpu_job.h
@@ -28,8 +28,10 @@ struct ivpu_cmdq {
struct ivpu_bo *secondary_preempt_buf;
struct ivpu_bo *mem;
u32 entry_count;
+ u32 id;
u32 db_id;
- bool db_registered;
+ u8 priority;
+ bool is_legacy;
};
/**
@@ -49,6 +51,7 @@ struct ivpu_job {
struct ivpu_file_priv *file_priv;
struct dma_fence *done_fence;
u64 cmd_buf_vpu_addr;
+ u32 cmdq_id;
u32 job_id;
u32 engine_idx;
size_t bo_count;
@@ -56,14 +59,19 @@ struct ivpu_job {
};
int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
+int ivpu_cmdq_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
+int ivpu_cmdq_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
+int ivpu_cmdq_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
void ivpu_context_abort_locked(struct ivpu_file_priv *file_priv);
void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv);
void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev);
+void ivpu_cmdq_abort_all_jobs(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id);
void ivpu_job_done_consumer_init(struct ivpu_device *vdev);
void ivpu_job_done_consumer_fini(struct ivpu_device *vdev);
+void ivpu_context_abort_work_fn(struct work_struct *work);
void ivpu_jobs_abort_all(struct ivpu_device *vdev);
diff --git a/drivers/accel/ivpu/ivpu_jsm_msg.c b/drivers/accel/ivpu/ivpu_jsm_msg.c
index 46ef16c3c069..0256b2dfefc1 100644
--- a/drivers/accel/ivpu/ivpu_jsm_msg.c
+++ b/drivers/accel/ivpu/ivpu_jsm_msg.c
@@ -7,6 +7,8 @@
#include "ivpu_hw.h"
#include "ivpu_ipc.h"
#include "ivpu_jsm_msg.h"
+#include "ivpu_pm.h"
+#include "vpu_jsm_api.h"
const char *ivpu_jsm_msg_type_to_str(enum vpu_ipc_msg_type type)
{
@@ -48,9 +50,10 @@ const char *ivpu_jsm_msg_type_to_str(enum vpu_ipc_msg_type type)
IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE);
IVPU_CASE_TO_STR(VPU_JSM_MSG_STATE_DUMP);
IVPU_CASE_TO_STR(VPU_JSM_MSG_STATE_DUMP_RSP);
- IVPU_CASE_TO_STR(VPU_JSM_MSG_BLOB_DEINIT);
+ IVPU_CASE_TO_STR(VPU_JSM_MSG_BLOB_DEINIT_DEPRECATED);
IVPU_CASE_TO_STR(VPU_JSM_MSG_DYNDBG_CONTROL);
IVPU_CASE_TO_STR(VPU_JSM_MSG_JOB_DONE);
+ IVPU_CASE_TO_STR(VPU_JSM_MSG_NATIVE_FENCE_SIGNALLED);
IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_RESET_DONE);
IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_PREEMPT_DONE);
IVPU_CASE_TO_STR(VPU_JSM_MSG_REGISTER_DB_DONE);
@@ -131,7 +134,7 @@ int ivpu_jsm_get_heartbeat(struct ivpu_device *vdev, u32 engine, u64 *heartbeat)
struct vpu_jsm_msg resp;
int ret;
- if (engine > VPU_ENGINE_COPY)
+ if (engine != VPU_ENGINE_COMPUTE)
return -EINVAL;
req.payload.query_engine_hb.engine_idx = engine;
@@ -154,15 +157,17 @@ int ivpu_jsm_reset_engine(struct ivpu_device *vdev, u32 engine)
struct vpu_jsm_msg resp;
int ret;
- if (engine > VPU_ENGINE_COPY)
+ if (engine != VPU_ENGINE_COMPUTE)
return -EINVAL;
req.payload.engine_reset.engine_idx = engine;
ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_ENGINE_RESET_DONE, &resp,
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
- if (ret)
+ if (ret) {
ivpu_err_ratelimited(vdev, "Failed to reset engine %d: %d\n", engine, ret);
+ ivpu_pm_trigger_recovery(vdev, "Engine reset failed");
+ }
return ret;
}
@@ -173,7 +178,7 @@ int ivpu_jsm_preempt_engine(struct ivpu_device *vdev, u32 engine, u32 preempt_id
struct vpu_jsm_msg resp;
int ret;
- if (engine > VPU_ENGINE_COPY)
+ if (engine != VPU_ENGINE_COMPUTE)
return -EINVAL;
req.payload.engine_preempt.engine_idx = engine;
@@ -196,7 +201,7 @@ int ivpu_jsm_dyndbg_control(struct ivpu_device *vdev, char *command, size_t size
strscpy(req.payload.dyndbg_control.dyndbg_cmd, command, VPU_DYNDBG_CMD_MAX_LEN);
ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_DYNDBG_CONTROL_RSP, &resp,
- VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
+ VPU_IPC_CHAN_GEN_CMD, vdev->timeout.jsm);
if (ret)
ivpu_warn_ratelimited(vdev, "Failed to send command \"%s\": ret %d\n",
command, ret);
@@ -270,9 +275,8 @@ int ivpu_jsm_pwr_d0i3_enter(struct ivpu_device *vdev)
req.payload.pwr_d0i3_enter.send_response = 1;
- ret = ivpu_ipc_send_receive_active(vdev, &req, VPU_JSM_MSG_PWR_D0I3_ENTER_DONE,
- &resp, VPU_IPC_CHAN_GEN_CMD,
- vdev->timeout.d0i3_entry_msg);
+ ret = ivpu_ipc_send_receive_internal(vdev, &req, VPU_JSM_MSG_PWR_D0I3_ENTER_DONE, &resp,
+ VPU_IPC_CHAN_GEN_CMD, vdev->timeout.d0i3_entry_msg);
if (ret)
return ret;
@@ -346,15 +350,17 @@ int ivpu_jsm_hws_resume_engine(struct ivpu_device *vdev, u32 engine)
struct vpu_jsm_msg resp;
int ret;
- if (engine >= VPU_ENGINE_NB)
+ if (engine != VPU_ENGINE_COMPUTE)
return -EINVAL;
req.payload.hws_resume_engine.engine_idx = engine;
ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE, &resp,
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
- if (ret)
+ if (ret) {
ivpu_err_ratelimited(vdev, "Failed to resume engine %d: %d\n", engine, ret);
+ ivpu_pm_trigger_recovery(vdev, "Engine resume failed");
+ }
return ret;
}
@@ -394,8 +400,6 @@ int ivpu_jsm_hws_set_scheduling_log(struct ivpu_device *vdev, u32 engine_idx, u3
req.payload.hws_set_scheduling_log.host_ssid = host_ssid;
req.payload.hws_set_scheduling_log.vpu_log_buffer_va = vpu_log_buffer_va;
req.payload.hws_set_scheduling_log.notify_index = 0;
- req.payload.hws_set_scheduling_log.enable_extra_events =
- ivpu_test_mode & IVPU_TEST_MODE_HWS_EXTRA_EVENTS;
ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG_RSP, &resp,
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
@@ -409,29 +413,21 @@ int ivpu_jsm_hws_setup_priority_bands(struct ivpu_device *vdev)
{
struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP };
struct vpu_jsm_msg resp;
+ struct ivpu_hw_info *hw = vdev->hw;
+ struct vpu_ipc_msg_payload_hws_priority_band_setup *setup =
+ &req.payload.hws_priority_band_setup;
int ret;
- /* Idle */
- req.payload.hws_priority_band_setup.grace_period[0] = 0;
- req.payload.hws_priority_band_setup.process_grace_period[0] = 50000;
- req.payload.hws_priority_band_setup.process_quantum[0] = 160000;
- /* Normal */
- req.payload.hws_priority_band_setup.grace_period[1] = 50000;
- req.payload.hws_priority_band_setup.process_grace_period[1] = 50000;
- req.payload.hws_priority_band_setup.process_quantum[1] = 300000;
- /* Focus */
- req.payload.hws_priority_band_setup.grace_period[2] = 50000;
- req.payload.hws_priority_band_setup.process_grace_period[2] = 50000;
- req.payload.hws_priority_band_setup.process_quantum[2] = 200000;
- /* Realtime */
- req.payload.hws_priority_band_setup.grace_period[3] = 0;
- req.payload.hws_priority_band_setup.process_grace_period[3] = 50000;
- req.payload.hws_priority_band_setup.process_quantum[3] = 200000;
-
- req.payload.hws_priority_band_setup.normal_band_percentage = 10;
-
- ret = ivpu_ipc_send_receive_active(vdev, &req, VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP_RSP,
- &resp, VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
+ for (int band = VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE;
+ band < VPU_JOB_SCHEDULING_PRIORITY_BAND_COUNT; band++) {
+ setup->grace_period[band] = hw->hws.grace_period[band];
+ setup->process_grace_period[band] = hw->hws.process_grace_period[band];
+ setup->process_quantum[band] = hw->hws.process_quantum[band];
+ }
+ setup->normal_band_percentage = 10;
+
+ ret = ivpu_ipc_send_receive_internal(vdev, &req, VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP_RSP,
+ &resp, VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
if (ret)
ivpu_warn_ratelimited(vdev, "Failed to set priority bands: %d\n", ret);
@@ -544,9 +540,8 @@ int ivpu_jsm_dct_enable(struct ivpu_device *vdev, u32 active_us, u32 inactive_us
req.payload.pwr_dct_control.dct_active_us = active_us;
req.payload.pwr_dct_control.dct_inactive_us = inactive_us;
- return ivpu_ipc_send_receive_active(vdev, &req, VPU_JSM_MSG_DCT_ENABLE_DONE,
- &resp, VPU_IPC_CHAN_ASYNC_CMD,
- vdev->timeout.jsm);
+ return ivpu_ipc_send_receive_internal(vdev, &req, VPU_JSM_MSG_DCT_ENABLE_DONE, &resp,
+ VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
}
int ivpu_jsm_dct_disable(struct ivpu_device *vdev)
@@ -554,7 +549,14 @@ int ivpu_jsm_dct_disable(struct ivpu_device *vdev)
struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DCT_DISABLE };
struct vpu_jsm_msg resp;
- return ivpu_ipc_send_receive_active(vdev, &req, VPU_JSM_MSG_DCT_DISABLE_DONE,
- &resp, VPU_IPC_CHAN_ASYNC_CMD,
- vdev->timeout.jsm);
+ return ivpu_ipc_send_receive_internal(vdev, &req, VPU_JSM_MSG_DCT_DISABLE_DONE, &resp,
+ VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
+}
+
+int ivpu_jsm_state_dump(struct ivpu_device *vdev)
+{
+ struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_STATE_DUMP };
+
+ return ivpu_ipc_send_and_wait(vdev, &req, VPU_IPC_CHAN_ASYNC_CMD,
+ vdev->timeout.state_dump_msg);
}
diff --git a/drivers/accel/ivpu/ivpu_jsm_msg.h b/drivers/accel/ivpu/ivpu_jsm_msg.h
index e4e42c0ff6e6..9e84d3526a14 100644
--- a/drivers/accel/ivpu/ivpu_jsm_msg.h
+++ b/drivers/accel/ivpu/ivpu_jsm_msg.h
@@ -43,4 +43,6 @@ int ivpu_jsm_metric_streamer_info(struct ivpu_device *vdev, u64 metric_group_mas
u64 buffer_size, u32 *sample_size, u64 *info_size);
int ivpu_jsm_dct_enable(struct ivpu_device *vdev, u32 active_us, u32 inactive_us);
int ivpu_jsm_dct_disable(struct ivpu_device *vdev);
+int ivpu_jsm_state_dump(struct ivpu_device *vdev);
+
#endif
diff --git a/drivers/accel/ivpu/ivpu_mmu.c b/drivers/accel/ivpu/ivpu_mmu.c
index c078e214b221..5ea010568faa 100644
--- a/drivers/accel/ivpu/ivpu_mmu.c
+++ b/drivers/accel/ivpu/ivpu_mmu.c
@@ -20,6 +20,12 @@
#define IVPU_MMU_REG_CR0 0x00200020u
#define IVPU_MMU_REG_CR0ACK 0x00200024u
#define IVPU_MMU_REG_CR0ACK_VAL_MASK GENMASK(31, 0)
+#define IVPU_MMU_REG_CR0_ATSCHK_MASK BIT(4)
+#define IVPU_MMU_REG_CR0_CMDQEN_MASK BIT(3)
+#define IVPU_MMU_REG_CR0_EVTQEN_MASK BIT(2)
+#define IVPU_MMU_REG_CR0_PRIQEN_MASK BIT(1)
+#define IVPU_MMU_REG_CR0_SMMUEN_MASK BIT(0)
+
#define IVPU_MMU_REG_CR1 0x00200028u
#define IVPU_MMU_REG_CR2 0x0020002cu
#define IVPU_MMU_REG_IRQ_CTRL 0x00200050u
@@ -141,12 +147,6 @@
#define IVPU_MMU_IRQ_EVTQ_EN BIT(2)
#define IVPU_MMU_IRQ_GERROR_EN BIT(0)
-#define IVPU_MMU_CR0_ATSCHK BIT(4)
-#define IVPU_MMU_CR0_CMDQEN BIT(3)
-#define IVPU_MMU_CR0_EVTQEN BIT(2)
-#define IVPU_MMU_CR0_PRIQEN BIT(1)
-#define IVPU_MMU_CR0_SMMUEN BIT(0)
-
#define IVPU_MMU_CR1_TABLE_SH GENMASK(11, 10)
#define IVPU_MMU_CR1_TABLE_OC GENMASK(9, 8)
#define IVPU_MMU_CR1_TABLE_IC GENMASK(7, 6)
@@ -596,7 +596,7 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev)
REGV_WR32(IVPU_MMU_REG_CMDQ_PROD, 0);
REGV_WR32(IVPU_MMU_REG_CMDQ_CONS, 0);
- val = IVPU_MMU_CR0_CMDQEN;
+ val = REG_SET_FLD(IVPU_MMU_REG_CR0, CMDQEN, 0);
ret = ivpu_mmu_reg_write_cr0(vdev, val);
if (ret)
return ret;
@@ -617,12 +617,12 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev)
REGV_WR32(IVPU_MMU_REG_EVTQ_PROD_SEC, 0);
REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, 0);
- val |= IVPU_MMU_CR0_EVTQEN;
+ val = REG_SET_FLD(IVPU_MMU_REG_CR0, EVTQEN, val);
ret = ivpu_mmu_reg_write_cr0(vdev, val);
if (ret)
return ret;
- val |= IVPU_MMU_CR0_ATSCHK;
+ val = REG_SET_FLD(IVPU_MMU_REG_CR0, ATSCHK, val);
ret = ivpu_mmu_reg_write_cr0(vdev, val);
if (ret)
return ret;
@@ -631,7 +631,7 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev)
if (ret)
return ret;
- val |= IVPU_MMU_CR0_SMMUEN;
+ val = REG_SET_FLD(IVPU_MMU_REG_CR0, SMMUEN, val);
return ivpu_mmu_reg_write_cr0(vdev, val);
}
@@ -696,7 +696,7 @@ unlock:
return ret;
}
-static int ivpu_mmu_cd_add(struct ivpu_device *vdev, u32 ssid, u64 cd_dma)
+static int ivpu_mmu_cdtab_entry_set(struct ivpu_device *vdev, u32 ssid, u64 cd_dma, bool valid)
{
struct ivpu_mmu_info *mmu = vdev->mmu;
struct ivpu_mmu_cdtab *cdtab = &mmu->cdtab;
@@ -708,30 +708,29 @@ static int ivpu_mmu_cd_add(struct ivpu_device *vdev, u32 ssid, u64 cd_dma)
return -EINVAL;
entry = cdtab->base + (ssid * IVPU_MMU_CDTAB_ENT_SIZE);
-
- if (cd_dma != 0) {
- cd[0] = FIELD_PREP(IVPU_MMU_CD_0_TCR_T0SZ, IVPU_MMU_T0SZ_48BIT) |
- FIELD_PREP(IVPU_MMU_CD_0_TCR_TG0, 0) |
- FIELD_PREP(IVPU_MMU_CD_0_TCR_IRGN0, 0) |
- FIELD_PREP(IVPU_MMU_CD_0_TCR_ORGN0, 0) |
- FIELD_PREP(IVPU_MMU_CD_0_TCR_SH0, 0) |
- FIELD_PREP(IVPU_MMU_CD_0_TCR_IPS, IVPU_MMU_IPS_48BIT) |
- FIELD_PREP(IVPU_MMU_CD_0_ASID, ssid) |
- IVPU_MMU_CD_0_TCR_EPD1 |
- IVPU_MMU_CD_0_AA64 |
- IVPU_MMU_CD_0_R |
- IVPU_MMU_CD_0_ASET |
- IVPU_MMU_CD_0_V;
- cd[1] = cd_dma & IVPU_MMU_CD_1_TTB0_MASK;
- cd[2] = 0;
- cd[3] = 0x0000000000007444;
-
- /* For global context generate memory fault on VPU */
- if (ssid == IVPU_GLOBAL_CONTEXT_MMU_SSID)
- cd[0] |= IVPU_MMU_CD_0_A;
- } else {
- memset(cd, 0, sizeof(cd));
- }
+ drm_WARN_ON(&vdev->drm, (entry[0] & IVPU_MMU_CD_0_V) == valid);
+
+ cd[0] = FIELD_PREP(IVPU_MMU_CD_0_TCR_T0SZ, IVPU_MMU_T0SZ_48BIT) |
+ FIELD_PREP(IVPU_MMU_CD_0_TCR_TG0, 0) |
+ FIELD_PREP(IVPU_MMU_CD_0_TCR_IRGN0, 0) |
+ FIELD_PREP(IVPU_MMU_CD_0_TCR_ORGN0, 0) |
+ FIELD_PREP(IVPU_MMU_CD_0_TCR_SH0, 0) |
+ FIELD_PREP(IVPU_MMU_CD_0_TCR_IPS, IVPU_MMU_IPS_48BIT) |
+ FIELD_PREP(IVPU_MMU_CD_0_ASID, ssid) |
+ IVPU_MMU_CD_0_TCR_EPD1 |
+ IVPU_MMU_CD_0_AA64 |
+ IVPU_MMU_CD_0_R |
+ IVPU_MMU_CD_0_ASET;
+ cd[1] = cd_dma & IVPU_MMU_CD_1_TTB0_MASK;
+ cd[2] = 0;
+ cd[3] = 0x0000000000007444;
+
+ /* For global and reserved contexts generate memory fault on VPU */
+ if (ssid == IVPU_GLOBAL_CONTEXT_MMU_SSID || ssid == IVPU_RESERVED_CONTEXT_MMU_SSID)
+ cd[0] |= IVPU_MMU_CD_0_A;
+
+ if (valid)
+ cd[0] |= IVPU_MMU_CD_0_V;
WRITE_ONCE(entry[1], cd[1]);
WRITE_ONCE(entry[2], cd[2]);
@@ -741,8 +740,8 @@ static int ivpu_mmu_cd_add(struct ivpu_device *vdev, u32 ssid, u64 cd_dma)
if (!ivpu_is_force_snoop_enabled(vdev))
clflush_cache_range(entry, IVPU_MMU_CDTAB_ENT_SIZE);
- ivpu_dbg(vdev, MMU, "CDTAB %s entry (SSID=%u, dma=%pad): 0x%llx, 0x%llx, 0x%llx, 0x%llx\n",
- cd_dma ? "write" : "clear", ssid, &cd_dma, cd[0], cd[1], cd[2], cd[3]);
+ ivpu_dbg(vdev, MMU, "CDTAB set %s entry (SSID=%u, dma=%pad): 0x%llx, 0x%llx, 0x%llx, 0x%llx\n",
+ valid ? "valid" : "invalid", ssid, &cd_dma, cd[0], cd[1], cd[2], cd[3]);
mutex_lock(&mmu->lock);
if (!mmu->on)
@@ -750,38 +749,18 @@ static int ivpu_mmu_cd_add(struct ivpu_device *vdev, u32 ssid, u64 cd_dma)
ret = ivpu_mmu_cmdq_write_cfgi_all(vdev);
if (ret)
- goto unlock;
+ goto err_invalidate;
ret = ivpu_mmu_cmdq_sync(vdev);
+ if (ret)
+ goto err_invalidate;
unlock:
mutex_unlock(&mmu->lock);
- return ret;
-}
-
-static int ivpu_mmu_cd_add_gbl(struct ivpu_device *vdev)
-{
- int ret;
-
- ret = ivpu_mmu_cd_add(vdev, 0, vdev->gctx.pgtable.pgd_dma);
- if (ret)
- ivpu_err(vdev, "Failed to add global CD entry: %d\n", ret);
-
- return ret;
-}
-
-static int ivpu_mmu_cd_add_user(struct ivpu_device *vdev, u32 ssid, dma_addr_t cd_dma)
-{
- int ret;
-
- if (ssid == 0) {
- ivpu_err(vdev, "Invalid SSID: %u\n", ssid);
- return -EINVAL;
- }
-
- ret = ivpu_mmu_cd_add(vdev, ssid, cd_dma);
- if (ret)
- ivpu_err(vdev, "Failed to add CD entry SSID=%u: %d\n", ssid, ret);
+ return 0;
+err_invalidate:
+ WRITE_ONCE(entry[0], 0);
+ mutex_unlock(&mmu->lock);
return ret;
}
@@ -808,12 +787,6 @@ int ivpu_mmu_init(struct ivpu_device *vdev)
return ret;
}
- ret = ivpu_mmu_cd_add_gbl(vdev);
- if (ret) {
- ivpu_err(vdev, "Failed to initialize strtab: %d\n", ret);
- return ret;
- }
-
ret = ivpu_mmu_enable(vdev);
if (ret) {
ivpu_err(vdev, "Failed to resume MMU: %d\n", ret);
@@ -897,28 +870,107 @@ static u32 *ivpu_mmu_get_event(struct ivpu_device *vdev)
return evt;
}
+static int ivpu_mmu_evtq_set(struct ivpu_device *vdev, bool enable)
+{
+ u32 val = REGV_RD32(IVPU_MMU_REG_CR0);
+
+ if (enable)
+ val = REG_SET_FLD(IVPU_MMU_REG_CR0, EVTQEN, val);
+ else
+ val = REG_CLR_FLD(IVPU_MMU_REG_CR0, EVTQEN, val);
+ REGV_WR32(IVPU_MMU_REG_CR0, val);
+
+ return REGV_POLL_FLD(IVPU_MMU_REG_CR0ACK, VAL, val, IVPU_MMU_REG_TIMEOUT_US);
+}
+
+static int ivpu_mmu_evtq_enable(struct ivpu_device *vdev)
+{
+ return ivpu_mmu_evtq_set(vdev, true);
+}
+
+static int ivpu_mmu_evtq_disable(struct ivpu_device *vdev)
+{
+ return ivpu_mmu_evtq_set(vdev, false);
+}
+
+void ivpu_mmu_discard_events(struct ivpu_device *vdev)
+{
+ struct ivpu_mmu_info *mmu = vdev->mmu;
+
+ mutex_lock(&mmu->lock);
+ /*
+ * Disable event queue (stop MMU from updating the producer)
+ * to allow synchronization of consumer and producer indexes
+ */
+ ivpu_mmu_evtq_disable(vdev);
+
+ vdev->mmu->evtq.cons = REGV_RD32(IVPU_MMU_REG_EVTQ_PROD_SEC);
+ REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, vdev->mmu->evtq.cons);
+ vdev->mmu->evtq.prod = REGV_RD32(IVPU_MMU_REG_EVTQ_PROD_SEC);
+
+ ivpu_mmu_evtq_enable(vdev);
+
+ drm_WARN_ON_ONCE(&vdev->drm, vdev->mmu->evtq.cons != vdev->mmu->evtq.prod);
+
+ mutex_unlock(&mmu->lock);
+}
+
+int ivpu_mmu_disable_ssid_events(struct ivpu_device *vdev, u32 ssid)
+{
+ struct ivpu_mmu_info *mmu = vdev->mmu;
+ struct ivpu_mmu_cdtab *cdtab = &mmu->cdtab;
+ u64 *entry;
+ u64 val;
+
+ if (ssid > IVPU_MMU_CDTAB_ENT_COUNT)
+ return -EINVAL;
+
+ mutex_lock(&mmu->lock);
+
+ entry = cdtab->base + (ssid * IVPU_MMU_CDTAB_ENT_SIZE);
+
+ val = READ_ONCE(entry[0]);
+ val &= ~IVPU_MMU_CD_0_R;
+ WRITE_ONCE(entry[0], val);
+
+ if (!ivpu_is_force_snoop_enabled(vdev))
+ clflush_cache_range(entry, IVPU_MMU_CDTAB_ENT_SIZE);
+
+ ivpu_mmu_cmdq_write_cfgi_all(vdev);
+ ivpu_mmu_cmdq_sync(vdev);
+
+ mutex_unlock(&mmu->lock);
+
+ return 0;
+}
+
void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev)
{
+ struct ivpu_file_priv *file_priv;
u32 *event;
u32 ssid;
ivpu_dbg(vdev, IRQ, "MMU event queue\n");
- while ((event = ivpu_mmu_get_event(vdev)) != NULL) {
- ivpu_mmu_dump_event(vdev, event);
-
- ssid = FIELD_GET(IVPU_MMU_EVT_SSID_MASK, event[0]);
- if (ssid == IVPU_GLOBAL_CONTEXT_MMU_SSID) {
+ while ((event = ivpu_mmu_get_event(vdev))) {
+ ssid = FIELD_GET(IVPU_MMU_EVT_SSID_MASK, *event);
+ if (ssid == IVPU_GLOBAL_CONTEXT_MMU_SSID ||
+ ssid == IVPU_RESERVED_CONTEXT_MMU_SSID) {
+ ivpu_mmu_dump_event(vdev, event);
ivpu_pm_trigger_recovery(vdev, "MMU event");
return;
}
- ivpu_mmu_user_context_mark_invalid(vdev, ssid);
- REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, vdev->mmu->evtq.cons);
+ file_priv = xa_load(&vdev->context_xa, ssid);
+ if (file_priv) {
+ if (!READ_ONCE(file_priv->has_mmu_faults)) {
+ ivpu_mmu_dump_event(vdev, event);
+ WRITE_ONCE(file_priv->has_mmu_faults, true);
+ }
+ }
}
- if (!kfifo_put(&vdev->hw->irq.fifo, IVPU_HW_IRQ_SRC_MMU_EVTQ))
- ivpu_err_ratelimited(vdev, "IRQ FIFO full\n");
+ queue_work(system_wq, &vdev->context_abort_work);
}
void ivpu_mmu_evtq_dump(struct ivpu_device *vdev)
@@ -966,12 +1018,12 @@ void ivpu_mmu_irq_gerr_handler(struct ivpu_device *vdev)
REGV_WR32(IVPU_MMU_REG_GERRORN, gerror_val);
}
-int ivpu_mmu_set_pgtable(struct ivpu_device *vdev, int ssid, struct ivpu_mmu_pgtable *pgtable)
+int ivpu_mmu_cd_set(struct ivpu_device *vdev, int ssid, struct ivpu_mmu_pgtable *pgtable)
{
- return ivpu_mmu_cd_add_user(vdev, ssid, pgtable->pgd_dma);
+ return ivpu_mmu_cdtab_entry_set(vdev, ssid, pgtable->pgd_dma, true);
}
-void ivpu_mmu_clear_pgtable(struct ivpu_device *vdev, int ssid)
+void ivpu_mmu_cd_clear(struct ivpu_device *vdev, int ssid)
{
- ivpu_mmu_cd_add_user(vdev, ssid, 0); /* 0 will clear CD entry */
+ ivpu_mmu_cdtab_entry_set(vdev, ssid, 0, false);
}
diff --git a/drivers/accel/ivpu/ivpu_mmu.h b/drivers/accel/ivpu/ivpu_mmu.h
index 6fa35c240710..1ce7529746ad 100644
--- a/drivers/accel/ivpu/ivpu_mmu.h
+++ b/drivers/accel/ivpu/ivpu_mmu.h
@@ -40,12 +40,14 @@ struct ivpu_mmu_info {
int ivpu_mmu_init(struct ivpu_device *vdev);
void ivpu_mmu_disable(struct ivpu_device *vdev);
int ivpu_mmu_enable(struct ivpu_device *vdev);
-int ivpu_mmu_set_pgtable(struct ivpu_device *vdev, int ssid, struct ivpu_mmu_pgtable *pgtable);
-void ivpu_mmu_clear_pgtable(struct ivpu_device *vdev, int ssid);
+int ivpu_mmu_cd_set(struct ivpu_device *vdev, int ssid, struct ivpu_mmu_pgtable *pgtable);
+void ivpu_mmu_cd_clear(struct ivpu_device *vdev, int ssid);
int ivpu_mmu_invalidate_tlb(struct ivpu_device *vdev, u16 ssid);
void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev);
void ivpu_mmu_irq_gerr_handler(struct ivpu_device *vdev);
void ivpu_mmu_evtq_dump(struct ivpu_device *vdev);
+void ivpu_mmu_discard_events(struct ivpu_device *vdev);
+int ivpu_mmu_disable_ssid_events(struct ivpu_device *vdev, u32 ssid);
#endif /* __IVPU_MMU_H__ */
diff --git a/drivers/accel/ivpu/ivpu_mmu_context.c b/drivers/accel/ivpu/ivpu_mmu_context.c
index bbe652a7019d..f0267efa55aa 100644
--- a/drivers/accel/ivpu/ivpu_mmu_context.c
+++ b/drivers/accel/ivpu/ivpu_mmu_context.c
@@ -90,19 +90,6 @@ static void ivpu_pgtable_free_page(struct ivpu_device *vdev, u64 *cpu_addr, dma_
}
}
-static int ivpu_mmu_pgtable_init(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable)
-{
- dma_addr_t pgd_dma;
-
- pgtable->pgd_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pgd_dma);
- if (!pgtable->pgd_dma_ptr)
- return -ENOMEM;
-
- pgtable->pgd_dma = pgd_dma;
-
- return 0;
-}
-
static void ivpu_mmu_pgtables_free(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable)
{
int pgd_idx, pud_idx, pmd_idx;
@@ -140,6 +127,27 @@ static void ivpu_mmu_pgtables_free(struct ivpu_device *vdev, struct ivpu_mmu_pgt
}
ivpu_pgtable_free_page(vdev, pgtable->pgd_dma_ptr, pgtable->pgd_dma);
+ pgtable->pgd_dma_ptr = NULL;
+ pgtable->pgd_dma = 0;
+}
+
+static u64*
+ivpu_mmu_ensure_pgd(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable)
+{
+ u64 *pgd_dma_ptr = pgtable->pgd_dma_ptr;
+ dma_addr_t pgd_dma;
+
+ if (pgd_dma_ptr)
+ return pgd_dma_ptr;
+
+ pgd_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pgd_dma);
+ if (!pgd_dma_ptr)
+ return NULL;
+
+ pgtable->pgd_dma_ptr = pgd_dma_ptr;
+ pgtable->pgd_dma = pgd_dma;
+
+ return pgd_dma_ptr;
}
static u64*
@@ -237,6 +245,12 @@ ivpu_mmu_context_map_page(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx
int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr);
+ drm_WARN_ON(&vdev->drm, ctx->id == IVPU_RESERVED_CONTEXT_MMU_SSID);
+
+ /* Allocate PGD - first level page table if needed */
+ if (!ivpu_mmu_ensure_pgd(vdev, &ctx->pgtable))
+ return -ENOMEM;
+
/* Allocate PUD - second level page table if needed */
if (!ivpu_mmu_ensure_pud(vdev, &ctx->pgtable, pgd_idx))
return -ENOMEM;
@@ -418,6 +432,7 @@ int
ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
u64 vpu_addr, struct sg_table *sgt, bool llc_coherent)
{
+ size_t start_vpu_addr = vpu_addr;
struct scatterlist *sg;
int ret;
u64 prot;
@@ -448,20 +463,36 @@ ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
ret = ivpu_mmu_context_map_pages(vdev, ctx, vpu_addr, dma_addr, size, prot);
if (ret) {
ivpu_err(vdev, "Failed to map context pages\n");
- mutex_unlock(&ctx->lock);
- return ret;
+ goto err_unmap_pages;
}
vpu_addr += size;
}
+ if (!ctx->is_cd_valid) {
+ ret = ivpu_mmu_cd_set(vdev, ctx->id, &ctx->pgtable);
+ if (ret) {
+ ivpu_err(vdev, "Failed to set context descriptor for context %u: %d\n",
+ ctx->id, ret);
+ goto err_unmap_pages;
+ }
+ ctx->is_cd_valid = true;
+ }
+
/* Ensure page table modifications are flushed from wc buffers to memory */
wmb();
- mutex_unlock(&ctx->lock);
-
ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id);
- if (ret)
+ if (ret) {
ivpu_err(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret);
+ goto err_unmap_pages;
+ }
+
+ mutex_unlock(&ctx->lock);
+ return 0;
+
+err_unmap_pages:
+ ivpu_mmu_context_unmap_pages(ctx, start_vpu_addr, vpu_addr - start_vpu_addr);
+ mutex_unlock(&ctx->lock);
return ret;
}
@@ -530,109 +561,77 @@ ivpu_mmu_context_remove_node(struct ivpu_mmu_context *ctx, struct drm_mm_node *n
mutex_unlock(&ctx->lock);
}
-static int
-ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 context_id)
+void ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 context_id)
{
u64 start, end;
- int ret;
mutex_init(&ctx->lock);
- ret = ivpu_mmu_pgtable_init(vdev, &ctx->pgtable);
- if (ret) {
- ivpu_err(vdev, "Failed to initialize pgtable for ctx %u: %d\n", context_id, ret);
- return ret;
- }
-
if (!context_id) {
start = vdev->hw->ranges.global.start;
end = vdev->hw->ranges.shave.end;
} else {
- start = vdev->hw->ranges.user.start;
- end = vdev->hw->ranges.dma.end;
+ start = min_t(u64, vdev->hw->ranges.user.start, vdev->hw->ranges.shave.start);
+ end = max_t(u64, vdev->hw->ranges.user.end, vdev->hw->ranges.dma.end);
}
drm_mm_init(&ctx->mm, start, end - start);
ctx->id = context_id;
-
- return 0;
}
-static void ivpu_mmu_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)
+void ivpu_mmu_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)
{
- if (drm_WARN_ON(&vdev->drm, !ctx->pgtable.pgd_dma_ptr))
- return;
+ if (ctx->is_cd_valid) {
+ ivpu_mmu_cd_clear(vdev, ctx->id);
+ ctx->is_cd_valid = false;
+ }
mutex_destroy(&ctx->lock);
ivpu_mmu_pgtables_free(vdev, &ctx->pgtable);
drm_mm_takedown(&ctx->mm);
-
- ctx->pgtable.pgd_dma_ptr = NULL;
- ctx->pgtable.pgd_dma = 0;
}
-int ivpu_mmu_global_context_init(struct ivpu_device *vdev)
+void ivpu_mmu_global_context_init(struct ivpu_device *vdev)
{
- return ivpu_mmu_context_init(vdev, &vdev->gctx, IVPU_GLOBAL_CONTEXT_MMU_SSID);
+ ivpu_mmu_context_init(vdev, &vdev->gctx, IVPU_GLOBAL_CONTEXT_MMU_SSID);
}
void ivpu_mmu_global_context_fini(struct ivpu_device *vdev)
{
- return ivpu_mmu_context_fini(vdev, &vdev->gctx);
+ ivpu_mmu_context_fini(vdev, &vdev->gctx);
}
int ivpu_mmu_reserved_context_init(struct ivpu_device *vdev)
{
- return ivpu_mmu_user_context_init(vdev, &vdev->rctx, IVPU_RESERVED_CONTEXT_MMU_SSID);
-}
-
-void ivpu_mmu_reserved_context_fini(struct ivpu_device *vdev)
-{
- return ivpu_mmu_user_context_fini(vdev, &vdev->rctx);
-}
-
-void ivpu_mmu_user_context_mark_invalid(struct ivpu_device *vdev, u32 ssid)
-{
- struct ivpu_file_priv *file_priv;
-
- xa_lock(&vdev->context_xa);
-
- file_priv = xa_load(&vdev->context_xa, ssid);
- if (file_priv)
- file_priv->has_mmu_faults = true;
-
- xa_unlock(&vdev->context_xa);
-}
-
-int ivpu_mmu_user_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 ctx_id)
-{
int ret;
- drm_WARN_ON(&vdev->drm, !ctx_id);
+ ivpu_mmu_context_init(vdev, &vdev->rctx, IVPU_RESERVED_CONTEXT_MMU_SSID);
- ret = ivpu_mmu_context_init(vdev, ctx, ctx_id);
- if (ret) {
- ivpu_err(vdev, "Failed to initialize context %u: %d\n", ctx_id, ret);
- return ret;
+ mutex_lock(&vdev->rctx.lock);
+
+ if (!ivpu_mmu_ensure_pgd(vdev, &vdev->rctx.pgtable)) {
+ ivpu_err(vdev, "Failed to allocate root page table for reserved context\n");
+ ret = -ENOMEM;
+ goto err_ctx_fini;
}
- ret = ivpu_mmu_set_pgtable(vdev, ctx_id, &ctx->pgtable);
+ ret = ivpu_mmu_cd_set(vdev, vdev->rctx.id, &vdev->rctx.pgtable);
if (ret) {
- ivpu_err(vdev, "Failed to set page table for context %u: %d\n", ctx_id, ret);
- goto err_context_fini;
+ ivpu_err(vdev, "Failed to set context descriptor for reserved context\n");
+ goto err_ctx_fini;
}
- return 0;
+ mutex_unlock(&vdev->rctx.lock);
+ return ret;
-err_context_fini:
- ivpu_mmu_context_fini(vdev, ctx);
+err_ctx_fini:
+ mutex_unlock(&vdev->rctx.lock);
+ ivpu_mmu_context_fini(vdev, &vdev->rctx);
return ret;
}
-void ivpu_mmu_user_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)
+void ivpu_mmu_reserved_context_fini(struct ivpu_device *vdev)
{
- drm_WARN_ON(&vdev->drm, !ctx->id);
-
- ivpu_mmu_clear_pgtable(vdev, ctx->id);
- ivpu_mmu_context_fini(vdev, ctx);
+ ivpu_mmu_cd_clear(vdev, vdev->rctx.id);
+ ivpu_mmu_context_fini(vdev, &vdev->rctx);
}
diff --git a/drivers/accel/ivpu/ivpu_mmu_context.h b/drivers/accel/ivpu/ivpu_mmu_context.h
index 7f9aaf3d10c2..f255310968cf 100644
--- a/drivers/accel/ivpu/ivpu_mmu_context.h
+++ b/drivers/accel/ivpu/ivpu_mmu_context.h
@@ -23,21 +23,20 @@ struct ivpu_mmu_pgtable {
};
struct ivpu_mmu_context {
- struct mutex lock; /* Protects: mm, pgtable */
+ struct mutex lock; /* Protects: mm, pgtable, is_cd_valid */
struct drm_mm mm;
struct ivpu_mmu_pgtable pgtable;
+ bool is_cd_valid;
u32 id;
};
-int ivpu_mmu_global_context_init(struct ivpu_device *vdev);
+void ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 context_id);
+void ivpu_mmu_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx);
+void ivpu_mmu_global_context_init(struct ivpu_device *vdev);
void ivpu_mmu_global_context_fini(struct ivpu_device *vdev);
int ivpu_mmu_reserved_context_init(struct ivpu_device *vdev);
void ivpu_mmu_reserved_context_fini(struct ivpu_device *vdev);
-int ivpu_mmu_user_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 ctx_id);
-void ivpu_mmu_user_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx);
-void ivpu_mmu_user_context_mark_invalid(struct ivpu_device *vdev, u32 ssid);
-
int ivpu_mmu_context_insert_node(struct ivpu_mmu_context *ctx, const struct ivpu_addr_range *range,
u64 size, struct drm_mm_node *node);
void ivpu_mmu_context_remove_node(struct ivpu_mmu_context *ctx, struct drm_mm_node *node);
diff --git a/drivers/accel/ivpu/ivpu_ms.c b/drivers/accel/ivpu/ivpu_ms.c
index 2f9d37f5c208..2a043baf10ca 100644
--- a/drivers/accel/ivpu/ivpu_ms.c
+++ b/drivers/accel/ivpu/ivpu_ms.c
@@ -4,6 +4,7 @@
*/
#include <drm/drm_file.h>
+#include <linux/pm_runtime.h>
#include "ivpu_drv.h"
#include "ivpu_gem.h"
@@ -11,7 +12,7 @@
#include "ivpu_ms.h"
#include "ivpu_pm.h"
-#define MS_INFO_BUFFER_SIZE SZ_16K
+#define MS_INFO_BUFFER_SIZE SZ_64K
#define MS_NUM_BUFFERS 2
#define MS_READ_PERIOD_MULTIPLIER 2
#define MS_MIN_SAMPLE_PERIOD_NS 1000000
@@ -44,6 +45,10 @@ int ivpu_ms_start_ioctl(struct drm_device *dev, void *data, struct drm_file *fil
args->sampling_period_ns < MS_MIN_SAMPLE_PERIOD_NS)
return -EINVAL;
+ ret = ivpu_rpm_get(vdev);
+ if (ret < 0)
+ return ret;
+
mutex_lock(&file_priv->ms_lock);
if (get_instance_by_mask(file_priv, args->metric_group_mask)) {
@@ -96,6 +101,8 @@ err_free_ms:
kfree(ms);
unlock:
mutex_unlock(&file_priv->ms_lock);
+
+ ivpu_rpm_put(vdev);
return ret;
}
@@ -160,6 +167,10 @@ int ivpu_ms_get_data_ioctl(struct drm_device *dev, void *data, struct drm_file *
if (!args->metric_group_mask)
return -EINVAL;
+ ret = ivpu_rpm_get(vdev);
+ if (ret < 0)
+ return ret;
+
mutex_lock(&file_priv->ms_lock);
ms = get_instance_by_mask(file_priv, args->metric_group_mask);
@@ -187,6 +198,7 @@ int ivpu_ms_get_data_ioctl(struct drm_device *dev, void *data, struct drm_file *
unlock:
mutex_unlock(&file_priv->ms_lock);
+ ivpu_rpm_put(vdev);
return ret;
}
@@ -204,11 +216,17 @@ int ivpu_ms_stop_ioctl(struct drm_device *dev, void *data, struct drm_file *file
{
struct ivpu_file_priv *file_priv = file->driver_priv;
struct drm_ivpu_metric_streamer_stop *args = data;
+ struct ivpu_device *vdev = file_priv->vdev;
struct ivpu_ms_instance *ms;
+ int ret;
if (!args->metric_group_mask)
return -EINVAL;
+ ret = ivpu_rpm_get(vdev);
+ if (ret < 0)
+ return ret;
+
mutex_lock(&file_priv->ms_lock);
ms = get_instance_by_mask(file_priv, args->metric_group_mask);
@@ -217,6 +235,7 @@ int ivpu_ms_stop_ioctl(struct drm_device *dev, void *data, struct drm_file *file
mutex_unlock(&file_priv->ms_lock);
+ ivpu_rpm_put(vdev);
return ms ? 0 : -EINVAL;
}
@@ -281,6 +300,9 @@ unlock:
void ivpu_ms_cleanup(struct ivpu_file_priv *file_priv)
{
struct ivpu_ms_instance *ms, *tmp;
+ struct ivpu_device *vdev = file_priv->vdev;
+
+ pm_runtime_get_sync(vdev->drm.dev);
mutex_lock(&file_priv->ms_lock);
@@ -293,6 +315,8 @@ void ivpu_ms_cleanup(struct ivpu_file_priv *file_priv)
free_instance(file_priv, ms);
mutex_unlock(&file_priv->ms_lock);
+
+ pm_runtime_put_autosuspend(vdev->drm.dev);
}
void ivpu_ms_cleanup_all(struct ivpu_device *vdev)
diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c
index 59d3170f5e35..475ddc94f1cf 100644
--- a/drivers/accel/ivpu/ivpu_pm.c
+++ b/drivers/accel/ivpu/ivpu_pm.c
@@ -9,26 +9,34 @@
#include <linux/pm_runtime.h>
#include <linux/reboot.h>
-#include "vpu_boot_api.h"
+#include "ivpu_coredump.h"
#include "ivpu_drv.h"
-#include "ivpu_hw.h"
#include "ivpu_fw.h"
#include "ivpu_fw_log.h"
+#include "ivpu_hw.h"
#include "ivpu_ipc.h"
#include "ivpu_job.h"
#include "ivpu_jsm_msg.h"
#include "ivpu_mmu.h"
#include "ivpu_ms.h"
#include "ivpu_pm.h"
+#include "ivpu_trace.h"
+#include "vpu_boot_api.h"
static bool ivpu_disable_recovery;
+#if IS_ENABLED(CONFIG_DRM_ACCEL_IVPU_DEBUG)
module_param_named_unsafe(disable_recovery, ivpu_disable_recovery, bool, 0644);
MODULE_PARM_DESC(disable_recovery, "Disables recovery when NPU hang is detected");
+#endif
static unsigned long ivpu_tdr_timeout_ms;
module_param_named(tdr_timeout_ms, ivpu_tdr_timeout_ms, ulong, 0644);
MODULE_PARM_DESC(tdr_timeout_ms, "Timeout for device hang detection, in milliseconds, 0 - default");
+static unsigned long ivpu_inference_timeout_ms;
+module_param_named(inference_timeout_ms, ivpu_inference_timeout_ms, ulong, 0644);
+MODULE_PARM_DESC(inference_timeout_ms, "Inference maximum duration, in milliseconds, 0 - default");
+
#define PM_RESCHEDULE_LIMIT 5
static void ivpu_pm_prepare_cold_boot(struct ivpu_device *vdev)
@@ -37,8 +45,10 @@ static void ivpu_pm_prepare_cold_boot(struct ivpu_device *vdev)
ivpu_cmdq_reset_all_contexts(vdev);
ivpu_ipc_reset(vdev);
+ ivpu_fw_log_reset(vdev);
ivpu_fw_load(vdev);
fw->entry_point = fw->cold_boot_entry_point;
+ fw->last_heartbeat = 0;
}
static void ivpu_pm_prepare_warm_boot(struct ivpu_device *vdev)
@@ -73,8 +83,8 @@ static int ivpu_resume(struct ivpu_device *vdev)
int ret;
retry:
- pci_restore_state(to_pci_dev(vdev->drm.dev));
pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D0);
+ pci_restore_state(to_pci_dev(vdev->drm.dev));
ret = ivpu_hw_power_up(vdev);
if (ret) {
@@ -110,40 +120,57 @@ err_power_down:
return ret;
}
-static void ivpu_pm_recovery_work(struct work_struct *work)
+static void ivpu_pm_reset_begin(struct ivpu_device *vdev)
{
- struct ivpu_pm_info *pm = container_of(work, struct ivpu_pm_info, recovery_work);
- struct ivpu_device *vdev = pm->vdev;
- char *evt[2] = {"IVPU_PM_EVENT=IVPU_RECOVER", NULL};
- int ret;
-
- ivpu_err(vdev, "Recovering the NPU (reset #%d)\n", atomic_read(&vdev->pm->reset_counter));
-
- ret = pm_runtime_resume_and_get(vdev->drm.dev);
- if (ret)
- ivpu_err(vdev, "Failed to resume NPU: %d\n", ret);
-
- ivpu_fw_log_dump(vdev);
+ pm_runtime_disable(vdev->drm.dev);
atomic_inc(&vdev->pm->reset_counter);
atomic_set(&vdev->pm->reset_pending, 1);
down_write(&vdev->pm->reset_lock);
+}
+
+static void ivpu_pm_reset_complete(struct ivpu_device *vdev)
+{
+ int ret;
- ivpu_suspend(vdev);
ivpu_pm_prepare_cold_boot(vdev);
ivpu_jobs_abort_all(vdev);
ivpu_ms_cleanup_all(vdev);
ret = ivpu_resume(vdev);
- if (ret)
+ if (ret) {
ivpu_err(vdev, "Failed to resume NPU: %d\n", ret);
+ pm_runtime_set_suspended(vdev->drm.dev);
+ } else {
+ pm_runtime_set_active(vdev->drm.dev);
+ }
up_write(&vdev->pm->reset_lock);
atomic_set(&vdev->pm->reset_pending, 0);
- kobject_uevent_env(&vdev->drm.dev->kobj, KOBJ_CHANGE, evt);
pm_runtime_mark_last_busy(vdev->drm.dev);
- pm_runtime_put_autosuspend(vdev->drm.dev);
+ pm_runtime_enable(vdev->drm.dev);
+}
+
+static void ivpu_pm_recovery_work(struct work_struct *work)
+{
+ struct ivpu_pm_info *pm = container_of(work, struct ivpu_pm_info, recovery_work);
+ struct ivpu_device *vdev = pm->vdev;
+ char *evt[2] = {"IVPU_PM_EVENT=IVPU_RECOVER", NULL};
+
+ ivpu_err(vdev, "Recovering the NPU (reset #%d)\n", atomic_read(&vdev->pm->reset_counter));
+
+ ivpu_pm_reset_begin(vdev);
+
+ if (!pm_runtime_status_suspended(vdev->drm.dev)) {
+ ivpu_jsm_state_dump(vdev);
+ ivpu_dev_coredump(vdev);
+ ivpu_suspend(vdev);
+ }
+
+ ivpu_pm_reset_complete(vdev);
+
+ kobject_uevent_env(&vdev->drm.dev->kobj, KOBJ_CHANGE, evt);
}
void ivpu_pm_trigger_recovery(struct ivpu_device *vdev, const char *reason)
@@ -155,16 +182,11 @@ void ivpu_pm_trigger_recovery(struct ivpu_device *vdev, const char *reason)
return;
}
- if (ivpu_is_fpga(vdev)) {
- ivpu_err(vdev, "Recovery not available on FPGA\n");
- return;
- }
-
/* Trigger recovery if it's not in progress */
if (atomic_cmpxchg(&vdev->pm->reset_pending, 0, 1) == 0) {
ivpu_hw_diagnose_failure(vdev);
ivpu_hw_irq_disable(vdev); /* Disable IRQ early to protect from IRQ storm */
- queue_work(system_long_wq, &vdev->pm->recovery_work);
+ queue_work(system_unbound_wq, &vdev->pm->recovery_work);
}
}
@@ -172,7 +194,30 @@ static void ivpu_job_timeout_work(struct work_struct *work)
{
struct ivpu_pm_info *pm = container_of(work, struct ivpu_pm_info, job_timeout_work.work);
struct ivpu_device *vdev = pm->vdev;
+ unsigned long timeout_ms = ivpu_tdr_timeout_ms ? ivpu_tdr_timeout_ms : vdev->timeout.tdr;
+ unsigned long inference_timeout_ms = ivpu_inference_timeout_ms ? ivpu_inference_timeout_ms :
+ vdev->timeout.inference;
+ u64 inference_max_retries;
+ u64 heartbeat;
+
+ if (ivpu_jsm_get_heartbeat(vdev, 0, &heartbeat) || heartbeat <= vdev->fw->last_heartbeat) {
+ ivpu_err(vdev, "Job timeout detected, heartbeat not progressed\n");
+ goto recovery;
+ }
+
+ inference_max_retries = DIV_ROUND_UP(inference_timeout_ms, timeout_ms);
+ if (atomic_fetch_inc(&vdev->job_timeout_counter) >= inference_max_retries) {
+ ivpu_err(vdev, "Job timeout detected, heartbeat limit (%lld) exceeded\n",
+ inference_max_retries);
+ goto recovery;
+ }
+
+ vdev->fw->last_heartbeat = heartbeat;
+ ivpu_start_job_timeout_detection(vdev);
+ return;
+recovery:
+ atomic_set(&vdev->job_timeout_counter, 0);
ivpu_pm_trigger_recovery(vdev, "TDR");
}
@@ -187,6 +232,7 @@ void ivpu_start_job_timeout_detection(struct ivpu_device *vdev)
void ivpu_stop_job_timeout_detection(struct ivpu_device *vdev)
{
cancel_delayed_work_sync(&vdev->pm->job_timeout_work);
+ atomic_set(&vdev->job_timeout_counter, 0);
}
int ivpu_pm_suspend_cb(struct device *dev)
@@ -195,6 +241,7 @@ int ivpu_pm_suspend_cb(struct device *dev)
struct ivpu_device *vdev = to_ivpu_device(drm);
unsigned long timeout;
+ trace_pm("suspend");
ivpu_dbg(vdev, PM, "Suspend..\n");
timeout = jiffies + msecs_to_jiffies(vdev->timeout.tdr);
@@ -212,6 +259,7 @@ int ivpu_pm_suspend_cb(struct device *dev)
ivpu_pm_prepare_warm_boot(vdev);
ivpu_dbg(vdev, PM, "Suspend done.\n");
+ trace_pm("suspend done");
return 0;
}
@@ -222,6 +270,7 @@ int ivpu_pm_resume_cb(struct device *dev)
struct ivpu_device *vdev = to_ivpu_device(drm);
int ret;
+ trace_pm("resume");
ivpu_dbg(vdev, PM, "Resume..\n");
ret = ivpu_resume(vdev);
@@ -229,6 +278,7 @@ int ivpu_pm_resume_cb(struct device *dev)
ivpu_err(vdev, "Failed to resume: %d\n", ret);
ivpu_dbg(vdev, PM, "Resume done.\n");
+ trace_pm("resume done");
return ret;
}
@@ -243,6 +293,7 @@ int ivpu_pm_runtime_suspend_cb(struct device *dev)
drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->submitted_jobs_xa));
drm_WARN_ON(&vdev->drm, work_pending(&vdev->pm->recovery_work));
+ trace_pm("runtime suspend");
ivpu_dbg(vdev, PM, "Runtime suspend..\n");
ivpu_mmu_disable(vdev);
@@ -262,13 +313,14 @@ int ivpu_pm_runtime_suspend_cb(struct device *dev)
if (!is_idle || ret_d0i3) {
ivpu_err(vdev, "Forcing cold boot due to previous errors\n");
atomic_inc(&vdev->pm->reset_counter);
- ivpu_fw_log_dump(vdev);
+ ivpu_dev_coredump(vdev);
ivpu_pm_prepare_cold_boot(vdev);
} else {
ivpu_pm_prepare_warm_boot(vdev);
}
ivpu_dbg(vdev, PM, "Runtime suspend done.\n");
+ trace_pm("runtime suspend done");
return 0;
}
@@ -279,6 +331,7 @@ int ivpu_pm_runtime_resume_cb(struct device *dev)
struct ivpu_device *vdev = to_ivpu_device(drm);
int ret;
+ trace_pm("runtime resume");
ivpu_dbg(vdev, PM, "Runtime resume..\n");
ret = ivpu_resume(vdev);
@@ -286,6 +339,7 @@ int ivpu_pm_runtime_resume_cb(struct device *dev)
ivpu_err(vdev, "Failed to set RESUME state: %d\n", ret);
ivpu_dbg(vdev, PM, "Runtime resume done.\n");
+ trace_pm("runtime resume done");
return ret;
}
@@ -295,7 +349,10 @@ int ivpu_rpm_get(struct ivpu_device *vdev)
int ret;
ret = pm_runtime_resume_and_get(vdev->drm.dev);
- drm_WARN_ON(&vdev->drm, ret < 0);
+ if (ret < 0) {
+ ivpu_err(vdev, "Failed to resume NPU: %d\n", ret);
+ pm_runtime_set_suspended(vdev->drm.dev);
+ }
return ret;
}
@@ -311,16 +368,13 @@ void ivpu_pm_reset_prepare_cb(struct pci_dev *pdev)
struct ivpu_device *vdev = pci_get_drvdata(pdev);
ivpu_dbg(vdev, PM, "Pre-reset..\n");
- atomic_inc(&vdev->pm->reset_counter);
- atomic_set(&vdev->pm->reset_pending, 1);
- pm_runtime_get_sync(vdev->drm.dev);
- down_write(&vdev->pm->reset_lock);
- ivpu_prepare_for_reset(vdev);
- ivpu_hw_reset(vdev);
- ivpu_pm_prepare_cold_boot(vdev);
- ivpu_jobs_abort_all(vdev);
- ivpu_ms_cleanup_all(vdev);
+ ivpu_pm_reset_begin(vdev);
+
+ if (!pm_runtime_status_suspended(vdev->drm.dev)) {
+ ivpu_prepare_for_reset(vdev);
+ ivpu_hw_reset(vdev);
+ }
ivpu_dbg(vdev, PM, "Pre-reset done.\n");
}
@@ -328,18 +382,12 @@ void ivpu_pm_reset_prepare_cb(struct pci_dev *pdev)
void ivpu_pm_reset_done_cb(struct pci_dev *pdev)
{
struct ivpu_device *vdev = pci_get_drvdata(pdev);
- int ret;
ivpu_dbg(vdev, PM, "Post-reset..\n");
- ret = ivpu_resume(vdev);
- if (ret)
- ivpu_err(vdev, "Failed to set RESUME state: %d\n", ret);
- up_write(&vdev->pm->reset_lock);
- atomic_set(&vdev->pm->reset_pending, 0);
- ivpu_dbg(vdev, PM, "Post-reset done.\n");
- pm_runtime_mark_last_busy(vdev->drm.dev);
- pm_runtime_put_autosuspend(vdev->drm.dev);
+ ivpu_pm_reset_complete(vdev);
+
+ ivpu_dbg(vdev, PM, "Post-reset done.\n");
}
void ivpu_pm_init(struct ivpu_device *vdev)
@@ -364,21 +412,21 @@ void ivpu_pm_init(struct ivpu_device *vdev)
pm_runtime_use_autosuspend(dev);
pm_runtime_set_autosuspend_delay(dev, delay);
+ pm_runtime_set_active(dev);
ivpu_dbg(vdev, PM, "Autosuspend delay = %d\n", delay);
}
-void ivpu_pm_cancel_recovery(struct ivpu_device *vdev)
+void ivpu_pm_disable_recovery(struct ivpu_device *vdev)
{
drm_WARN_ON(&vdev->drm, delayed_work_pending(&vdev->pm->job_timeout_work));
- cancel_work_sync(&vdev->pm->recovery_work);
+ disable_work_sync(&vdev->pm->recovery_work);
}
void ivpu_pm_enable(struct ivpu_device *vdev)
{
struct device *dev = vdev->drm.dev;
- pm_runtime_set_active(dev);
pm_runtime_allow(dev);
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
@@ -409,16 +457,17 @@ int ivpu_pm_dct_enable(struct ivpu_device *vdev, u8 active_percent)
active_us = (DCT_PERIOD_US * active_percent) / 100;
inactive_us = DCT_PERIOD_US - active_us;
+ vdev->pm->dct_active_percent = active_percent;
+
+ ivpu_dbg(vdev, PM, "DCT requested %u%% (D0: %uus, D0i2: %uus)\n",
+ active_percent, active_us, inactive_us);
+
ret = ivpu_jsm_dct_enable(vdev, active_us, inactive_us);
if (ret) {
- ivpu_err_ratelimited(vdev, "Filed to enable DCT: %d\n", ret);
+ ivpu_err_ratelimited(vdev, "Failed to enable DCT: %d\n", ret);
return ret;
}
- vdev->pm->dct_active_percent = active_percent;
-
- ivpu_dbg(vdev, PM, "DCT set to %u%% (D0: %uus, D0i2: %uus)\n",
- active_percent, active_us, inactive_us);
return 0;
}
@@ -426,27 +475,29 @@ int ivpu_pm_dct_disable(struct ivpu_device *vdev)
{
int ret;
+ vdev->pm->dct_active_percent = 0;
+
+ ivpu_dbg(vdev, PM, "DCT requested to be disabled\n");
+
ret = ivpu_jsm_dct_disable(vdev);
if (ret) {
- ivpu_err_ratelimited(vdev, "Filed to disable DCT: %d\n", ret);
+ ivpu_err_ratelimited(vdev, "Failed to disable DCT: %d\n", ret);
return ret;
}
- vdev->pm->dct_active_percent = 0;
-
- ivpu_dbg(vdev, PM, "DCT disabled\n");
return 0;
}
-void ivpu_pm_dct_irq_thread_handler(struct ivpu_device *vdev)
+void ivpu_pm_irq_dct_work_fn(struct work_struct *work)
{
+ struct ivpu_device *vdev = container_of(work, struct ivpu_device, irq_dct_work);
bool enable;
int ret;
if (ivpu_hw_btrs_dct_get_request(vdev, &enable))
return;
- if (vdev->pm->dct_active_percent)
+ if (enable)
ret = ivpu_pm_dct_enable(vdev, DCT_DEFAULT_ACTIVE_PERCENT);
else
ret = ivpu_pm_dct_disable(vdev);
diff --git a/drivers/accel/ivpu/ivpu_pm.h b/drivers/accel/ivpu/ivpu_pm.h
index b70efe6c36e4..a2aa7a27f32e 100644
--- a/drivers/accel/ivpu/ivpu_pm.h
+++ b/drivers/accel/ivpu/ivpu_pm.h
@@ -25,7 +25,7 @@ struct ivpu_pm_info {
void ivpu_pm_init(struct ivpu_device *vdev);
void ivpu_pm_enable(struct ivpu_device *vdev);
void ivpu_pm_disable(struct ivpu_device *vdev);
-void ivpu_pm_cancel_recovery(struct ivpu_device *vdev);
+void ivpu_pm_disable_recovery(struct ivpu_device *vdev);
int ivpu_pm_suspend_cb(struct device *dev);
int ivpu_pm_resume_cb(struct device *dev);
@@ -45,6 +45,6 @@ void ivpu_stop_job_timeout_detection(struct ivpu_device *vdev);
int ivpu_pm_dct_init(struct ivpu_device *vdev);
int ivpu_pm_dct_enable(struct ivpu_device *vdev, u8 active_percent);
int ivpu_pm_dct_disable(struct ivpu_device *vdev);
-void ivpu_pm_dct_irq_thread_handler(struct ivpu_device *vdev);
+void ivpu_pm_irq_dct_work_fn(struct work_struct *work);
#endif /* __IVPU_PM_H__ */
diff --git a/drivers/accel/ivpu/ivpu_sysfs.c b/drivers/accel/ivpu/ivpu_sysfs.c
index 913669f1786e..268ab7744a8b 100644
--- a/drivers/accel/ivpu/ivpu_sysfs.c
+++ b/drivers/accel/ivpu/ivpu_sysfs.c
@@ -1,15 +1,22 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2024 Intel Corporation
+ * Copyright (C) 2024-2025 Intel Corporation
*/
#include <linux/device.h>
#include <linux/err.h>
+#include <linux/pm_runtime.h>
+#include <linux/units.h>
+#include "ivpu_drv.h"
+#include "ivpu_gem.h"
+#include "ivpu_fw.h"
#include "ivpu_hw.h"
#include "ivpu_sysfs.h"
-/*
+/**
+ * DOC: npu_busy_time_us
+ *
* npu_busy_time_us is the time that the device spent executing jobs.
* The time is counted when and only when there are jobs submitted to firmware.
*
@@ -28,19 +35,112 @@ npu_busy_time_us_show(struct device *dev, struct device_attribute *attr, char *b
struct ivpu_device *vdev = to_ivpu_device(drm);
ktime_t total, now = 0;
- xa_lock(&vdev->submitted_jobs_xa);
+ mutex_lock(&vdev->submitted_jobs_lock);
+
total = vdev->busy_time;
if (!xa_empty(&vdev->submitted_jobs_xa))
now = ktime_sub(ktime_get(), vdev->busy_start_ts);
- xa_unlock(&vdev->submitted_jobs_xa);
+ mutex_unlock(&vdev->submitted_jobs_lock);
return sysfs_emit(buf, "%lld\n", ktime_to_us(ktime_add(total, now)));
}
static DEVICE_ATTR_RO(npu_busy_time_us);
+/**
+ * DOC: npu_memory_utilization
+ *
+ * The npu_memory_utilization is used to report in bytes a current NPU memory utilization.
+ *
+ */
+static ssize_t
+npu_memory_utilization_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct ivpu_device *vdev = to_ivpu_device(drm);
+ struct ivpu_bo *bo;
+ u64 total_npu_memory = 0;
+
+ mutex_lock(&vdev->bo_list_lock);
+ list_for_each_entry(bo, &vdev->bo_list, bo_list_node)
+ total_npu_memory += bo->base.base.size;
+ mutex_unlock(&vdev->bo_list_lock);
+
+ return sysfs_emit(buf, "%lld\n", total_npu_memory);
+}
+
+static DEVICE_ATTR_RO(npu_memory_utilization);
+
+/**
+ * DOC: sched_mode
+ *
+ * The sched_mode is used to report current NPU scheduling mode.
+ *
+ * It returns following strings:
+ * - "HW" - Hardware Scheduler mode
+ * - "OS" - Operating System Scheduler mode
+ *
+ */
+static ssize_t
+sched_mode_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct ivpu_device *vdev = to_ivpu_device(drm);
+
+ return sysfs_emit(buf, "%s\n", vdev->fw->sched_mode ? "HW" : "OS");
+}
+
+static DEVICE_ATTR_RO(sched_mode);
+
+/**
+ * DOC: npu_max_frequency
+ *
+ * The npu_max_frequency shows maximum frequency in MHz of the NPU's data
+ * processing unit
+ */
+static ssize_t
+npu_max_frequency_mhz_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct ivpu_device *vdev = to_ivpu_device(drm);
+ u32 freq = ivpu_hw_dpu_max_freq_get(vdev);
+
+ return sysfs_emit(buf, "%lu\n", freq / HZ_PER_MHZ);
+}
+
+static DEVICE_ATTR_RO(npu_max_frequency_mhz);
+
+/**
+ * DOC: npu_current_frequency_mhz
+ *
+ * The npu_current_frequency_mhz shows current frequency in MHz of the NPU's
+ * data processing unit
+ */
+static ssize_t
+npu_current_frequency_mhz_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct ivpu_device *vdev = to_ivpu_device(drm);
+ u32 freq = 0;
+
+ /* Read frequency only if device is active, otherwise frequency is 0 */
+ if (pm_runtime_get_if_active(vdev->drm.dev) > 0) {
+ freq = ivpu_hw_dpu_freq_get(vdev);
+
+ pm_runtime_put_autosuspend(vdev->drm.dev);
+ }
+
+ return sysfs_emit(buf, "%lu\n", freq / HZ_PER_MHZ);
+}
+
+static DEVICE_ATTR_RO(npu_current_frequency_mhz);
+
static struct attribute *ivpu_dev_attrs[] = {
&dev_attr_npu_busy_time_us.attr,
+ &dev_attr_npu_memory_utilization.attr,
+ &dev_attr_sched_mode.attr,
+ &dev_attr_npu_max_frequency_mhz.attr,
+ &dev_attr_npu_current_frequency_mhz.attr,
NULL,
};
diff --git a/drivers/accel/ivpu/ivpu_trace.h b/drivers/accel/ivpu/ivpu_trace.h
new file mode 100644
index 000000000000..eb792038e701
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_trace.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020-2024 Intel Corporation
+ */
+
+#if !defined(__IVPU_TRACE_H__) || defined(TRACE_HEADER_MULTI_READ)
+#define __IVPU_TRACE_H__
+
+#include <linux/tracepoint.h>
+#include "ivpu_drv.h"
+#include "ivpu_job.h"
+#include "vpu_jsm_api.h"
+#include "ivpu_jsm_msg.h"
+#include "ivpu_ipc.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM vpu
+#define TRACE_INCLUDE_FILE ivpu_trace
+
+TRACE_EVENT(pm,
+ TP_PROTO(const char *event),
+ TP_ARGS(event),
+ TP_STRUCT__entry(__field(const char *, event)),
+ TP_fast_assign(__entry->event = event;),
+ TP_printk("%s", __entry->event)
+);
+
+TRACE_EVENT(job,
+ TP_PROTO(const char *event, struct ivpu_job *job),
+ TP_ARGS(event, job),
+ TP_STRUCT__entry(__field(const char *, event)
+ __field(u32, ctx_id)
+ __field(u32, engine_id)
+ __field(u32, job_id)
+ ),
+ TP_fast_assign(__entry->event = event;
+ __entry->ctx_id = job->file_priv->ctx.id;
+ __entry->engine_id = job->engine_idx;
+ __entry->job_id = job->job_id;),
+ TP_printk("%s context:%d engine:%d job:%d",
+ __entry->event,
+ __entry->ctx_id,
+ __entry->engine_id,
+ __entry->job_id)
+);
+
+TRACE_EVENT(jsm,
+ TP_PROTO(const char *event, struct vpu_jsm_msg *msg),
+ TP_ARGS(event, msg),
+ TP_STRUCT__entry(__field(const char *, event)
+ __field(const char *, type)
+ __field(enum vpu_ipc_msg_status, status)
+ __field(u32, request_id)
+ __field(u32, result)
+ ),
+ TP_fast_assign(__entry->event = event;
+ __entry->type = ivpu_jsm_msg_type_to_str(msg->type);
+ __entry->status = msg->status;
+ __entry->request_id = msg->request_id;
+ __entry->result = msg->result;),
+ TP_printk("%s type:%s, status:%#x, id:%#x, result:%#x",
+ __entry->event,
+ __entry->type,
+ __entry->status,
+ __entry->request_id,
+ __entry->result)
+);
+
+#endif /* __IVPU_TRACE_H__ */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
diff --git a/drivers/accel/ivpu/ivpu_trace_points.c b/drivers/accel/ivpu/ivpu_trace_points.c
new file mode 100644
index 000000000000..f8fb99de0de3
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_trace_points.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-2024 Intel Corporation
+ */
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "ivpu_trace.h"
+#endif
diff --git a/drivers/accel/ivpu/vpu_boot_api.h b/drivers/accel/ivpu/vpu_boot_api.h
index 82954b91b748..218468bbbcad 100644
--- a/drivers/accel/ivpu/vpu_boot_api.h
+++ b/drivers/accel/ivpu/vpu_boot_api.h
@@ -1,14 +1,13 @@
/* SPDX-License-Identifier: MIT */
/*
- * Copyright (c) 2020-2023, Intel Corporation.
+ * Copyright (c) 2020-2024, Intel Corporation.
*/
#ifndef VPU_BOOT_API_H
#define VPU_BOOT_API_H
/*
- * =========== FW API version information beginning ================
- * The bellow values will be used to construct the version info this way:
+ * The below values will be used to construct the version info this way:
* fw_bin_header->api_version[VPU_BOOT_API_VER_ID] = (VPU_BOOT_API_VER_MAJOR << 16) |
* VPU_BOOT_API_VER_MINOR;
* VPU_BOOT_API_VER_PATCH will be ignored. KMD and compatibility is not affected if this changes
@@ -27,19 +26,18 @@
* Minor version changes when API backward compatibility is preserved.
* Resets to 0 if Major version is incremented.
*/
-#define VPU_BOOT_API_VER_MINOR 24
+#define VPU_BOOT_API_VER_MINOR 28
/*
* API header changed (field names, documentation, formatting) but API itself has not been changed
*/
-#define VPU_BOOT_API_VER_PATCH 0
+#define VPU_BOOT_API_VER_PATCH 3
/*
* Index in the API version table
* Must be unique for each API
*/
#define VPU_BOOT_API_VER_INDEX 0
-/* ------------ FW API version information end ---------------------*/
#pragma pack(push, 4)
@@ -78,8 +76,15 @@ struct vpu_firmware_header {
* submission queue size and device capabilities.
*/
u32 preemption_buffer_2_size;
+ /*
+ * Maximum preemption buffer size that the FW can use: no need for the host
+ * driver to allocate more space than that specified by these fields.
+ * A value of 0 means no declared limit.
+ */
+ u32 preemption_buffer_1_max_size;
+ u32 preemption_buffer_2_max_size;
/* Space reserved for future preemption-related fields. */
- u32 preemption_reserved[6];
+ u32 preemption_reserved[4];
/* FW image read only section start address, 4KB aligned */
u64 ro_section_start_address;
/* FW image read only section size, 4KB aligned */
@@ -136,7 +141,7 @@ enum vpu_trace_destination {
/*
* Processor bit shifts (for loggable HW components).
*/
-#define VPU_TRACE_PROC_BIT_ARM 0
+#define VPU_TRACE_PROC_BIT_RESERVED 0
#define VPU_TRACE_PROC_BIT_LRT 1
#define VPU_TRACE_PROC_BIT_LNN 2
#define VPU_TRACE_PROC_BIT_SHV_0 3
@@ -164,8 +169,6 @@ enum vpu_trace_destination {
/* VPU 30xx HW component IDs are sequential, so define first and last IDs. */
#define VPU_TRACE_PROC_BIT_30XX_FIRST VPU_TRACE_PROC_BIT_LRT
#define VPU_TRACE_PROC_BIT_30XX_LAST VPU_TRACE_PROC_BIT_SHV_15
-#define VPU_TRACE_PROC_BIT_KMB_FIRST VPU_TRACE_PROC_BIT_30XX_FIRST
-#define VPU_TRACE_PROC_BIT_KMB_LAST VPU_TRACE_PROC_BIT_30XX_LAST
struct vpu_boot_l2_cache_config {
u8 use;
@@ -199,6 +202,17 @@ struct vpu_warm_boot_section {
*/
#define POWER_PROFILE_SURVIVABILITY 0x1
+/**
+ * Enum for dvfs_mode boot param.
+ */
+enum vpu_governor {
+ VPU_GOV_DEFAULT = 0, /* Default Governor for the system */
+ VPU_GOV_MAX_PERFORMANCE = 1, /* Maximum performance governor */
+ VPU_GOV_ON_DEMAND = 2, /* On Demand frequency control governor */
+ VPU_GOV_POWER_SAVE = 3, /* Power save governor */
+ VPU_GOV_ON_DEMAND_PRIORITY_AWARE = 4 /* On Demand priority based governor */
+};
+
struct vpu_boot_params {
u32 magic;
u32 vpu_id;
@@ -301,7 +315,14 @@ struct vpu_boot_params {
u32 temp_sensor_period_ms;
/** PLL ratio for efficient clock frequency */
u32 pn_freq_pll_ratio;
- /** DVFS Mode: Default: 0, Max Performance: 1, On Demand: 2, Power Save: 3 */
+ /**
+ * DVFS Mode:
+ * 0 - Default, DVFS mode selected by the firmware
+ * 1 - Max Performance
+ * 2 - On Demand
+ * 3 - Power Save
+ * 4 - On Demand Priority Aware
+ */
u32 dvfs_mode;
/**
* Depending on DVFS Mode:
@@ -332,8 +353,8 @@ struct vpu_boot_params {
u64 d0i3_entry_vpu_ts;
/*
* The system time of the host operating system in microseconds.
- * E.g the number of microseconds since 1st of January 1970, or whatever date the
- * host operating system uses to maintain system time.
+ * E.g the number of microseconds since 1st of January 1970, or whatever
+ * date the host operating system uses to maintain system time.
* This value will be used to track system time on the VPU.
* The KMD is required to update this value on every VPU reset.
*/
@@ -382,10 +403,7 @@ struct vpu_boot_params {
u32 pad6[734];
};
-/*
- * Magic numbers set between host and vpu to detect corruptio of tracing init
- */
-
+/* Magic numbers set between host and vpu to detect corruption of tracing init */
#define VPU_TRACING_BUFFER_CANARY (0xCAFECAFE)
/* Tracing buffer message format definitions */
@@ -405,7 +423,9 @@ struct vpu_tracing_buffer_header {
u32 host_canary_start;
/* offset from start of buffer for trace entries */
u32 read_index;
- u32 pad_to_cache_line_size_0[14];
+ /* keeps track of wrapping on the reader side */
+ u32 read_wrap_count;
+ u32 pad_to_cache_line_size_0[13];
/* End of first cache line */
/**
diff --git a/drivers/accel/ivpu/vpu_jsm_api.h b/drivers/accel/ivpu/vpu_jsm_api.h
index 33f462b1a25d..4b6b2b3d2583 100644
--- a/drivers/accel/ivpu/vpu_jsm_api.h
+++ b/drivers/accel/ivpu/vpu_jsm_api.h
@@ -22,7 +22,7 @@
/*
* Minor version changes when API backward compatibility is preserved.
*/
-#define VPU_JSM_API_VER_MINOR 16
+#define VPU_JSM_API_VER_MINOR 29
/*
* API header changed (field names, documentation, formatting) but API itself has not been changed
@@ -36,7 +36,7 @@
/*
* Number of Priority Bands for Hardware Scheduling
- * Bands: RealTime, Focus, Normal, Idle
+ * Bands: Idle(0), Normal(1), Focus(2), RealTime(3)
*/
#define VPU_HWS_NUM_PRIORITY_BANDS 4
@@ -53,8 +53,7 @@
* Engine indexes.
*/
#define VPU_ENGINE_COMPUTE 0
-#define VPU_ENGINE_COPY 1
-#define VPU_ENGINE_NB 2
+#define VPU_ENGINE_NB 1
/*
* VPU status values.
@@ -74,6 +73,7 @@
#define VPU_JSM_STATUS_MVNCI_INTERNAL_ERROR 0xCU
/* Job status returned when the job was preempted mid-inference */
#define VPU_JSM_STATUS_PREEMPTED_MID_INFERENCE 0xDU
+#define VPU_JSM_STATUS_MVNCI_CONTEXT_VIOLATION_HW 0xEU
/*
* Host <-> VPU IPC channels.
@@ -86,18 +86,60 @@
/*
* Job flags bit masks.
*/
-#define VPU_JOB_FLAGS_NULL_SUBMISSION_MASK 0x00000001
-#define VPU_JOB_FLAGS_PRIVATE_DATA_MASK 0xFF000000
+enum {
+ /*
+ * Null submission mask.
+ * When set, batch buffer's commands are not processed but returned as
+ * successful immediately, except fences and timestamps.
+ * When cleared, batch buffer's commands are processed normally.
+ * Used for testing and profiling purposes.
+ */
+ VPU_JOB_FLAGS_NULL_SUBMISSION_MASK = (1 << 0U),
+ /*
+ * Inline command mask.
+ * When set, the object in job queue is an inline command (see struct vpu_inline_cmd below).
+ * When cleared, the object in job queue is a job (see struct vpu_job_queue_entry below).
+ */
+ VPU_JOB_FLAGS_INLINE_CMD_MASK = (1 << 1U),
+ /*
+ * VPU private data mask.
+ * Reserved for the VPU to store private data about the job (or inline command)
+ * while being processed.
+ */
+ VPU_JOB_FLAGS_PRIVATE_DATA_MASK = 0xFFFF0000U
+};
/*
- * Sizes of the reserved areas in jobs, in bytes.
+ * Job queue flags bit masks.
*/
-#define VPU_JOB_RESERVED_BYTES 8
+enum {
+ /*
+ * No job done notification mask.
+ * When set, indicates that no job done notification should be sent for any
+ * job from this queue. When cleared, indicates that job done notification
+ * should be sent for every job completed from this queue.
+ */
+ VPU_JOB_QUEUE_FLAGS_NO_JOB_DONE_MASK = (1 << 0U),
+ /*
+ * Native fence usage mask.
+ * When set, indicates that job queue uses native fences (as inline commands
+ * in job queue). Such queues may also use legacy fences (as commands in batch buffers).
+ * When cleared, indicates the job queue only uses legacy fences.
+ * NOTES:
+ * 1. For queues using native fences, VPU expects that all jobs in the queue
+ * are immediately followed by an inline command object. This object is expected
+ * to be a fence signal command in most cases, but can also be a NOP in case the host
+ * does not need per-job fence signalling. Other inline commands objects can be
+ * inserted between "job and inline command" pairs.
+ * 2. Native fence queues are only supported on VPU 40xx onwards.
+ */
+ VPU_JOB_QUEUE_FLAGS_USE_NATIVE_FENCE_MASK = (1 << 1U),
-/*
- * Sizes of the reserved areas in job queues, in bytes.
- */
-#define VPU_JOB_QUEUE_RESERVED_BYTES 52
+ /*
+ * Enable turbo mode for testing NPU performance; not recommended for regular usage.
+ */
+ VPU_JOB_QUEUE_FLAGS_TURBO_MODE = (1 << 2U)
+};
/*
* Max length (including trailing NULL char) of trace entity name (e.g., the
@@ -141,23 +183,114 @@
#define VPU_HWS_INVALID_CMDQ_HANDLE 0ULL
/*
+ * Inline commands types.
+ */
+/*
+ * NOP.
+ * VPU does nothing other than consuming the inline command object.
+ */
+#define VPU_INLINE_CMD_TYPE_NOP 0x0
+/*
+ * Fence wait.
+ * VPU waits for the fence current value to reach monitored value.
+ * Fence wait operations are executed upon job dispatching. While waiting for
+ * the fence to be satisfied, VPU blocks fetching of the next objects in the queue.
+ * Jobs present in the queue prior to the fence wait object may be processed
+ * concurrently.
+ */
+#define VPU_INLINE_CMD_TYPE_FENCE_WAIT 0x1
+/*
+ * Fence signal.
+ * VPU sets the fence current value to the provided value. If new current value
+ * is equal to or higher than monitored value, VPU sends fence signalled notification
+ * to the host. Fence signal operations are executed upon completion of all the jobs
+ * present in the queue prior to them, and in-order relative to each other in the queue.
+ * But jobs in-between them may be processed concurrently and may complete out-of-order.
+ */
+#define VPU_INLINE_CMD_TYPE_FENCE_SIGNAL 0x2
+
+/*
+ * Job scheduling priority bands for both hardware scheduling and OS scheduling.
+ */
+enum vpu_job_scheduling_priority_band {
+ VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE = 0,
+ VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL = 1,
+ VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS = 2,
+ VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME = 3,
+ VPU_JOB_SCHEDULING_PRIORITY_BAND_COUNT = 4,
+};
+
+/*
* Job format.
+ * Jobs defines the actual workloads to be executed by a given engine.
*/
struct vpu_job_queue_entry {
- u64 batch_buf_addr; /**< Address of VPU commands batch buffer */
- u32 job_id; /**< Job ID */
- u32 flags; /**< Flags bit field, see VPU_JOB_FLAGS_* above */
- u64 root_page_table_addr; /**< Address of root page table to use for this job */
- u64 root_page_table_update_counter; /**< Page tables update events counter */
- u64 primary_preempt_buf_addr;
+ /**< Address of VPU commands batch buffer */
+ u64 batch_buf_addr;
+ /**< Job ID */
+ u32 job_id;
+ /**< Flags bit field, see VPU_JOB_FLAGS_* above */
+ u32 flags;
+ /**
+ * Doorbell ring timestamp taken by KMD from SoC's global system clock, in
+ * microseconds. NPU can convert this value to its own fixed clock's timebase,
+ * to match other profiling timestamps.
+ */
+ u64 doorbell_timestamp;
+ /**< Extra id for job tracking, used only in the firmware perf traces */
+ u64 host_tracking_id;
/**< Address of the primary preemption buffer to use for this job */
- u32 primary_preempt_buf_size;
+ u64 primary_preempt_buf_addr;
/**< Size of the primary preemption buffer to use for this job */
- u32 secondary_preempt_buf_size;
+ u32 primary_preempt_buf_size;
/**< Size of secondary preemption buffer to use for this job */
- u64 secondary_preempt_buf_addr;
+ u32 secondary_preempt_buf_size;
/**< Address of secondary preemption buffer to use for this job */
- u8 reserved_0[VPU_JOB_RESERVED_BYTES];
+ u64 secondary_preempt_buf_addr;
+ u64 reserved_0;
+};
+
+/*
+ * Inline command format.
+ * Inline commands are the commands executed at scheduler level (typically,
+ * synchronization directives). Inline command and job objects must be of
+ * the same size and have flags field at same offset.
+ */
+struct vpu_inline_cmd {
+ u64 reserved_0;
+ /* Inline command type, see VPU_INLINE_CMD_TYPE_* defines. */
+ u32 type;
+ /* Flags bit field, see VPU_JOB_FLAGS_* above. */
+ u32 flags;
+ /* Inline command payload. Depends on inline command type. */
+ union {
+ /* Fence (wait and signal) commands' payload. */
+ struct {
+ /* Fence object handle. */
+ u64 fence_handle;
+ /* User VA of the current fence value. */
+ u64 current_value_va;
+ /* User VA of the monitored fence value (read-only). */
+ u64 monitored_value_va;
+ /* Value to wait for or write in fence location. */
+ u64 value;
+ /* User VA of the log buffer in which to add log entry on completion. */
+ u64 log_buffer_va;
+ /* NPU private data. */
+ u64 npu_private_data;
+ } fence;
+ /* Other commands do not have a payload. */
+ /* Payload definition for future inline commands can be inserted here. */
+ u64 reserved_1[6];
+ } payload;
+};
+
+/*
+ * Job queue slots can be populated either with job objects or inline command objects.
+ */
+union vpu_jobq_slot {
+ struct vpu_job_queue_entry job;
+ struct vpu_inline_cmd inline_cmd;
};
/*
@@ -167,7 +300,21 @@ struct vpu_job_queue_header {
u32 engine_idx;
u32 head;
u32 tail;
- u8 reserved_0[VPU_JOB_QUEUE_RESERVED_BYTES];
+ u32 flags;
+ /* Set to 1 to indicate priority_band field is valid */
+ u32 priority_band_valid;
+ /*
+ * Priority for the work of this job queue, valid only if the HWS is NOT used
+ * and the `priority_band_valid` is set to 1. It is applied only during
+ * the VPU_JSM_MSG_REGISTER_DB message processing.
+ * The device firmware might use the `priority_band` to optimize the power
+ * management logic, but it will not affect the order of jobs.
+ * Available priority bands: @see enum vpu_job_scheduling_priority_band
+ */
+ u32 priority_band;
+ /* Inside realtime band assigns a further priority, limited to 0..31 range */
+ u32 realtime_priority_level;
+ u32 reserved_0[9];
};
/*
@@ -175,7 +322,7 @@ struct vpu_job_queue_header {
*/
struct vpu_job_queue {
struct vpu_job_queue_header header;
- struct vpu_job_queue_entry job[];
+ union vpu_jobq_slot slot[];
};
/**
@@ -197,9 +344,7 @@ enum vpu_trace_entity_type {
struct vpu_hws_log_buffer_header {
/* Written by VPU after adding a log entry. Initialised by host to 0. */
u32 first_free_entry_index;
- /* Incremented by VPU every time the VPU overwrites the 0th entry;
- * initialised by host to 0.
- */
+ /* Incremented by VPU every time the VPU writes the 0th entry; initialised by host to 0. */
u32 wraparound_count;
/*
* This is the number of buffers that can be stored in the log buffer provided by the host.
@@ -230,14 +375,80 @@ struct vpu_hws_log_buffer_entry {
u64 operation_data[2];
};
+/* Native fence log buffer types. */
+enum vpu_hws_native_fence_log_type {
+ VPU_HWS_NATIVE_FENCE_LOG_TYPE_WAITS = 1,
+ VPU_HWS_NATIVE_FENCE_LOG_TYPE_SIGNALS = 2
+};
+
+/* HWS native fence log buffer header. */
+struct vpu_hws_native_fence_log_header {
+ union {
+ struct {
+ /* Index of the first free entry in buffer. */
+ u32 first_free_entry_idx;
+ /* Incremented each time NPU wraps around the buffer to write next entry. */
+ u32 wraparound_count;
+ };
+ /* Field allowing atomic update of both fields above. */
+ u64 atomic_wraparound_and_entry_idx;
+ };
+ /* Log buffer type, see enum vpu_hws_native_fence_log_type. */
+ u64 type;
+ /* Allocated number of entries in the log buffer. */
+ u64 entry_nb;
+ u64 reserved[2];
+};
+
+/* Native fence log operation types. */
+enum vpu_hws_native_fence_log_op {
+ VPU_HWS_NATIVE_FENCE_LOG_OP_SIGNAL_EXECUTED = 0,
+ VPU_HWS_NATIVE_FENCE_LOG_OP_WAIT_UNBLOCKED = 1
+};
+
+/* HWS native fence log entry. */
+struct vpu_hws_native_fence_log_entry {
+ /* Newly signaled/unblocked fence value. */
+ u64 fence_value;
+ /* Native fence object handle to which this operation belongs. */
+ u64 fence_handle;
+ /* Operation type, see enum vpu_hws_native_fence_log_op. */
+ u64 op_type;
+ u64 reserved_0;
+ /*
+ * VPU_HWS_NATIVE_FENCE_LOG_OP_WAIT_UNBLOCKED only: Timestamp at which fence
+ * wait was started (in NPU SysTime).
+ */
+ u64 fence_wait_start_ts;
+ u64 reserved_1;
+ /* Timestamp at which fence operation was completed (in NPU SysTime). */
+ u64 fence_end_ts;
+};
+
+/* Native fence log buffer. */
+struct vpu_hws_native_fence_log_buffer {
+ struct vpu_hws_native_fence_log_header header;
+ struct vpu_hws_native_fence_log_entry entry[];
+};
+
/*
* Host <-> VPU IPC messages types.
*/
enum vpu_ipc_msg_type {
VPU_JSM_MSG_UNKNOWN = 0xFFFFFFFF,
+
/* IPC Host -> Device, Async commands */
VPU_JSM_MSG_ASYNC_CMD = 0x1100,
VPU_JSM_MSG_ENGINE_RESET = VPU_JSM_MSG_ASYNC_CMD,
+ /**
+ * Preempt engine. The NPU stops (preempts) all the jobs currently
+ * executing on the target engine making the engine become idle and ready to
+ * execute new jobs.
+ * NOTE: The NPU does not remove unstarted jobs (if any) from job queues of
+ * the target engine, but it stops processing them (until the queue doorbell
+ * is rung again); the host is responsible to reset the job queue, either
+ * after preemption or when resubmitting jobs to the queue.
+ */
VPU_JSM_MSG_ENGINE_PREEMPT = 0x1101,
VPU_JSM_MSG_REGISTER_DB = 0x1102,
VPU_JSM_MSG_UNREGISTER_DB = 0x1103,
@@ -323,9 +534,10 @@ enum vpu_ipc_msg_type {
* NOTE: Please introduce new ASYNC commands before this one. *
*/
VPU_JSM_MSG_STATE_DUMP = 0x11FF,
+
/* IPC Host -> Device, General commands */
VPU_JSM_MSG_GENERAL_CMD = 0x1200,
- VPU_JSM_MSG_BLOB_DEINIT = VPU_JSM_MSG_GENERAL_CMD,
+ VPU_JSM_MSG_BLOB_DEINIT_DEPRECATED = VPU_JSM_MSG_GENERAL_CMD,
/**
* Control dyndbg behavior by executing a dyndbg command; equivalent to
* Linux command: `echo '<dyndbg_cmd>' > <debugfs>/dynamic_debug/control`.
@@ -335,8 +547,12 @@ enum vpu_ipc_msg_type {
* Perform the save procedure for the D0i3 entry
*/
VPU_JSM_MSG_PWR_D0I3_ENTER = 0x1202,
+
/* IPC Device -> Host, Job completion */
VPU_JSM_MSG_JOB_DONE = 0x2100,
+ /* IPC Device -> Host, Fence signalled */
+ VPU_JSM_MSG_NATIVE_FENCE_SIGNALLED = 0x2101,
+
/* IPC Device -> Host, Async command completion */
VPU_JSM_MSG_ASYNC_CMD_DONE = 0x2200,
VPU_JSM_MSG_ENGINE_RESET_DONE = VPU_JSM_MSG_ASYNC_CMD_DONE,
@@ -422,6 +638,7 @@ enum vpu_ipc_msg_type {
* NOTE: Please introduce new ASYNC responses before this one. *
*/
VPU_JSM_MSG_STATE_DUMP_RSP = 0x22FF,
+
/* IPC Device -> Host, General command completion */
VPU_JSM_MSG_GENERAL_CMD_DONE = 0x2300,
VPU_JSM_MSG_BLOB_DEINIT_DONE = VPU_JSM_MSG_GENERAL_CMD_DONE,
@@ -577,12 +794,22 @@ struct vpu_jsm_metric_streamer_update {
/** Metric group mask that identifies metric streamer instance. */
u64 metric_group_mask;
/**
- * Address and size of the buffer where the VPU will write metric data. If
- * the buffer address is 0 or same as the currently used buffer the VPU will
- * continue writing metric data to the current buffer. In this case the
- * buffer size is ignored and the size of the current buffer is unchanged.
- * If the address is non-zero and differs from the current buffer address the
- * VPU will immediately switch data collection to the new buffer.
+ * Address and size of the buffer where the VPU will write metric data.
+ * This member dictates how the update operation should perform:
+ * 1. client needs information about the number of collected samples and the
+ * amount of data written to the current buffer
+ * 2. client wants to switch to a new buffer
+ *
+ * Case 1. is identified by the buffer address being 0 or the same as the
+ * currently used buffer address. In this case the buffer size is ignored and
+ * the size of the current buffer is unchanged. The VPU will return an update
+ * in the vpu_jsm_metric_streamer_done structure. The internal writing position
+ * into the buffer is not changed.
+ *
+ * Case 2. is identified by the address being non-zero and differs from the
+ * current buffer address. The VPU will immediately switch data collection to
+ * the new buffer. Then the VPU will return an update in the
+ * vpu_jsm_metric_streamer_done structure.
*/
u64 buffer_addr;
u64 buffer_size;
@@ -600,11 +827,6 @@ struct vpu_jsm_metric_streamer_update {
u64 next_buffer_size;
};
-struct vpu_ipc_msg_payload_blob_deinit {
- /* 64-bit unique ID for the blob to be de-initialized. */
- u64 blob_id;
-};
-
struct vpu_ipc_msg_payload_job_done {
/* Engine to which the job was submitted. */
u32 engine_idx;
@@ -622,6 +844,21 @@ struct vpu_ipc_msg_payload_job_done {
u64 cmdq_id;
};
+/*
+ * Notification message upon native fence signalling.
+ * @see VPU_JSM_MSG_NATIVE_FENCE_SIGNALLED
+ */
+struct vpu_ipc_msg_payload_native_fence_signalled {
+ /* Engine ID. */
+ u32 engine_idx;
+ /* Host SSID. */
+ u32 host_ssid;
+ /* CMDQ ID */
+ u64 cmdq_id;
+ /* Fence object handle. */
+ u64 fence_handle;
+};
+
struct vpu_jsm_engine_reset_context {
/* Host SSID */
u32 host_ssid;
@@ -700,11 +937,6 @@ struct vpu_ipc_msg_payload_get_power_level_count_done {
u8 power_limit[16];
};
-struct vpu_ipc_msg_payload_blob_deinit_done {
- /* 64-bit unique ID for the blob de-initialized. */
- u64 blob_id;
-};
-
/* HWS priority band setup request / response */
struct vpu_ipc_msg_payload_hws_priority_band_setup {
/*
@@ -715,6 +947,7 @@ struct vpu_ipc_msg_payload_hws_priority_band_setup {
/*
* Default quantum in 100ns units for scheduling across processes
* within a priority band
+ * Minimum value supported by NPU is 1ms (10000 in 100ns units).
*/
u32 process_quantum[VPU_HWS_NUM_PRIORITY_BANDS];
/*
@@ -727,8 +960,10 @@ struct vpu_ipc_msg_payload_hws_priority_band_setup {
* in situations when it's starved by the focus band.
*/
u32 normal_band_percentage;
- /* Reserved */
- u32 reserved_0;
+ /*
+ * TDR timeout value in milliseconds. Default value of 0 meaning no timeout.
+ */
+ u32 tdr_timeout;
};
/*
@@ -794,7 +1029,10 @@ struct vpu_ipc_msg_payload_hws_set_context_sched_properties {
u32 reserved_0;
/* Command queue id */
u64 cmdq_id;
- /* Priority band to assign to work of this context */
+ /*
+ * Priority band to assign to work of this context.
+ * Available priority bands: @see enum vpu_job_scheduling_priority_band
+ */
u32 priority_band;
/* Inside realtime band assigns a further priority */
u32 realtime_priority_level;
@@ -802,7 +1040,10 @@ struct vpu_ipc_msg_payload_hws_set_context_sched_properties {
s32 in_process_priority;
/* Zero padding / Reserved */
u32 reserved_1;
- /* Context quantum relative to other contexts of same priority in the same process */
+ /*
+ * Context quantum relative to other contexts of same priority in the same process
+ * Minimum value supported by NPU is 1ms (10000 in 100ns units).
+ */
u64 context_quantum;
/* Grace period when preempting context of the same priority within the same process */
u64 grace_period_same_priority;
@@ -869,9 +1110,7 @@ struct vpu_ipc_msg_payload_hws_set_scheduling_log {
*/
u64 notify_index;
/*
- * Enable extra events to be output to log for debug of scheduling algorithm.
- * Interpreted by VPU as a boolean to enable or disable, expected values are
- * 0 and 1.
+ * Field is now deprecated, will be removed when KMD is updated to support removal
*/
u32 enable_extra_events;
/* Zero Padding */
@@ -1243,10 +1482,10 @@ union vpu_ipc_msg_payload {
struct vpu_jsm_metric_streamer_start metric_streamer_start;
struct vpu_jsm_metric_streamer_stop metric_streamer_stop;
struct vpu_jsm_metric_streamer_update metric_streamer_update;
- struct vpu_ipc_msg_payload_blob_deinit blob_deinit;
struct vpu_ipc_msg_payload_ssid_release ssid_release;
struct vpu_jsm_hws_register_db hws_register_db;
struct vpu_ipc_msg_payload_job_done job_done;
+ struct vpu_ipc_msg_payload_native_fence_signalled native_fence_signalled;
struct vpu_ipc_msg_payload_engine_reset_done engine_reset_done;
struct vpu_ipc_msg_payload_engine_preempt_done engine_preempt_done;
struct vpu_ipc_msg_payload_register_db_done register_db_done;
@@ -1254,7 +1493,6 @@ union vpu_ipc_msg_payload {
struct vpu_ipc_msg_payload_query_engine_hb_done query_engine_hb_done;
struct vpu_ipc_msg_payload_get_power_level_count_done get_power_level_count_done;
struct vpu_jsm_metric_streamer_done metric_streamer_done;
- struct vpu_ipc_msg_payload_blob_deinit_done blob_deinit_done;
struct vpu_ipc_msg_payload_trace_config trace_config;
struct vpu_ipc_msg_payload_trace_capability_rsp trace_capability;
struct vpu_ipc_msg_payload_trace_get_name trace_get_name;
diff --git a/drivers/accel/qaic/Kconfig b/drivers/accel/qaic/Kconfig
index a9f866230058..5e405a19c157 100644
--- a/drivers/accel/qaic/Kconfig
+++ b/drivers/accel/qaic/Kconfig
@@ -8,7 +8,6 @@ config DRM_ACCEL_QAIC
depends on DRM_ACCEL
depends on PCI && HAS_IOMEM
depends on MHI_BUS
- depends on MMU
select CRC32
help
Enables driver for Qualcomm's Cloud AI accelerator PCIe cards that are
diff --git a/drivers/accel/qaic/Makefile b/drivers/accel/qaic/Makefile
index 35e883515629..1106b876f737 100644
--- a/drivers/accel/qaic/Makefile
+++ b/drivers/accel/qaic/Makefile
@@ -10,6 +10,7 @@ qaic-y := \
qaic_control.o \
qaic_data.o \
qaic_drv.o \
+ qaic_ras.o \
qaic_timesync.o \
sahara.o
diff --git a/drivers/accel/qaic/mhi_controller.c b/drivers/accel/qaic/mhi_controller.c
index ada9b1eb0787..13a14c6c6168 100644
--- a/drivers/accel/qaic/mhi_controller.c
+++ b/drivers/accel/qaic/mhi_controller.c
@@ -20,6 +20,11 @@ static unsigned int mhi_timeout_ms = 2000; /* 2 sec default */
module_param(mhi_timeout_ms, uint, 0600);
MODULE_PARM_DESC(mhi_timeout_ms, "MHI controller timeout value");
+static const char *fw_image_paths[FAMILY_MAX] = {
+ [FAMILY_AIC100] = "qcom/aic100/sbl.bin",
+ [FAMILY_AIC200] = "qcom/aic200/sbl.bin",
+};
+
static const struct mhi_channel_config aic100_channels[] = {
{
.name = "QAIC_LOOPBACK",
@@ -405,6 +410,329 @@ static const struct mhi_channel_config aic100_channels[] = {
.auto_queue = false,
.wake_capable = false,
},
+ {
+ .name = "IPCR",
+ .num = 24,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "IPCR",
+ .num = 25,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = true,
+ .wake_capable = false,
+ },
+};
+
+static const struct mhi_channel_config aic200_channels[] = {
+ {
+ .name = "QAIC_LOOPBACK",
+ .num = 0,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_LOOPBACK",
+ .num = 1,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_SAHARA",
+ .num = 2,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = MHI_CH_EE_SBL,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_SAHARA",
+ .num = 3,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = MHI_CH_EE_SBL,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_SSR",
+ .num = 6,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_SSR",
+ .num = 7,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_CONTROL",
+ .num = 10,
+ .num_elements = 128,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_CONTROL",
+ .num = 11,
+ .num_elements = 128,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_LOGGING",
+ .num = 12,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = MHI_CH_EE_SBL,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_LOGGING",
+ .num = 13,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = MHI_CH_EE_SBL,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_STATUS",
+ .num = 14,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_STATUS",
+ .num = 15,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_TELEMETRY",
+ .num = 16,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_TELEMETRY",
+ .num = 17,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_TIMESYNC_PERIODIC",
+ .num = 22,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_TIMESYNC_PERIODIC",
+ .num = 23,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "IPCR",
+ .num = 24,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "IPCR",
+ .num = 25,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = true,
+ .wake_capable = false,
+ },
};
static struct mhi_event_config aic100_events[] = {
@@ -422,16 +750,44 @@ static struct mhi_event_config aic100_events[] = {
},
};
-static struct mhi_controller_config aic100_config = {
- .max_channels = 128,
- .timeout_ms = 0, /* controlled by mhi_timeout */
- .buf_len = 0,
- .num_channels = ARRAY_SIZE(aic100_channels),
- .ch_cfg = aic100_channels,
- .num_events = ARRAY_SIZE(aic100_events),
- .event_cfg = aic100_events,
- .use_bounce_buf = false,
- .m2_no_db = false,
+static struct mhi_event_config aic200_events[] = {
+ {
+ .num_elements = 32,
+ .irq_moderation_ms = 0,
+ .irq = 0,
+ .channel = U32_MAX,
+ .priority = 1,
+ .mode = MHI_DB_BRST_DISABLE,
+ .data_type = MHI_ER_CTRL,
+ .hardware_event = false,
+ .client_managed = false,
+ .offload_channel = false,
+ },
+};
+
+static struct mhi_controller_config mhi_cntrl_configs[] = {
+ [FAMILY_AIC100] = {
+ .max_channels = 128,
+ .timeout_ms = 0, /* controlled by mhi_timeout */
+ .buf_len = 0,
+ .num_channels = ARRAY_SIZE(aic100_channels),
+ .ch_cfg = aic100_channels,
+ .num_events = ARRAY_SIZE(aic100_events),
+ .event_cfg = aic100_events,
+ .use_bounce_buf = false,
+ .m2_no_db = false,
+ },
+ [FAMILY_AIC200] = {
+ .max_channels = 128,
+ .timeout_ms = 0, /* controlled by mhi_timeout */
+ .buf_len = 0,
+ .num_channels = ARRAY_SIZE(aic200_channels),
+ .ch_cfg = aic200_channels,
+ .num_events = ARRAY_SIZE(aic200_events),
+ .event_cfg = aic200_events,
+ .use_bounce_buf = false,
+ .m2_no_db = false,
+ },
};
static int mhi_read_reg(struct mhi_controller *mhi_cntrl, void __iomem *addr, u32 *out)
@@ -513,8 +869,9 @@ static int mhi_reset_and_async_power_up(struct mhi_controller *mhi_cntrl)
}
struct mhi_controller *qaic_mhi_register_controller(struct pci_dev *pci_dev, void __iomem *mhi_bar,
- int mhi_irq, bool shared_msi)
+ int mhi_irq, bool shared_msi, int family)
{
+ struct mhi_controller_config mhi_config = mhi_cntrl_configs[family];
struct mhi_controller *mhi_cntrl;
int ret;
@@ -549,11 +906,18 @@ struct mhi_controller *qaic_mhi_register_controller(struct pci_dev *pci_dev, voi
if (shared_msi) /* MSI shared with data path, no IRQF_NO_SUSPEND */
mhi_cntrl->irq_flags = IRQF_SHARED;
- mhi_cntrl->fw_image = "qcom/aic100/sbl.bin";
+ mhi_cntrl->fw_image = fw_image_paths[family];
+
+ if (family == FAMILY_AIC200) {
+ mhi_cntrl->name = "AIC200";
+ mhi_cntrl->seg_len = SZ_512K;
+ } else {
+ mhi_cntrl->name = "AIC100";
+ }
/* use latest configured timeout */
- aic100_config.timeout_ms = mhi_timeout_ms;
- ret = mhi_register_controller(mhi_cntrl, &aic100_config);
+ mhi_config.timeout_ms = mhi_timeout_ms;
+ ret = mhi_register_controller(mhi_cntrl, &mhi_config);
if (ret) {
pci_err(pci_dev, "mhi_register_controller failed %d\n", ret);
return ERR_PTR(ret);
diff --git a/drivers/accel/qaic/mhi_controller.h b/drivers/accel/qaic/mhi_controller.h
index 500e7f4af2af..8939f6ae185e 100644
--- a/drivers/accel/qaic/mhi_controller.h
+++ b/drivers/accel/qaic/mhi_controller.h
@@ -8,7 +8,7 @@
#define MHICONTROLLERQAIC_H_
struct mhi_controller *qaic_mhi_register_controller(struct pci_dev *pci_dev, void __iomem *mhi_bar,
- int mhi_irq, bool shared_msi);
+ int mhi_irq, bool shared_msi, int family);
void qaic_mhi_free_controller(struct mhi_controller *mhi_cntrl, bool link_up);
void qaic_mhi_start_reset(struct mhi_controller *mhi_cntrl);
void qaic_mhi_reset_done(struct mhi_controller *mhi_cntrl);
diff --git a/drivers/accel/qaic/qaic.h b/drivers/accel/qaic/qaic.h
index 02561b6cecc6..c31081e42cee 100644
--- a/drivers/accel/qaic/qaic.h
+++ b/drivers/accel/qaic/qaic.h
@@ -32,6 +32,12 @@
#define to_accel_kdev(qddev) (to_drm(qddev)->accel->kdev) /* Return Linux device of accel node */
#define to_qaic_device(dev) (to_qaic_drm_device((dev))->qdev)
+enum aic_families {
+ FAMILY_AIC100,
+ FAMILY_AIC200,
+ FAMILY_MAX,
+};
+
enum __packed dev_states {
/* Device is offline or will be very soon */
QAIC_OFFLINE,
@@ -113,10 +119,10 @@ struct qaic_device {
struct pci_dev *pdev;
/* Req. ID of request that will be queued next in MHI control device */
u32 next_seq_num;
- /* Base address of bar 0 */
- void __iomem *bar_0;
- /* Base address of bar 2 */
- void __iomem *bar_2;
+ /* Base address of the MHI bar */
+ void __iomem *bar_mhi;
+ /* Base address of the DBCs bar */
+ void __iomem *bar_dbc;
/* Controller structure for MHI devices */
struct mhi_controller *mhi_cntrl;
/* MHI control channel device */
@@ -161,6 +167,14 @@ struct qaic_device {
struct workqueue_struct *bootlog_wq;
/* Synchronizes access of pages in MHI bootlog device */
struct mutex bootlog_mutex;
+ /* MHI RAS channel device */
+ struct mhi_device *ras_ch;
+ /* Correctable error count */
+ unsigned int ce_count;
+ /* Un-correctable error count */
+ unsigned int ue_count;
+ /* Un-correctable non-fatal error count */
+ unsigned int ue_nf_count;
};
struct qaic_drm_device {
@@ -207,8 +221,6 @@ struct qaic_bo {
bool sliced;
/* Request ID of this BO if it is queued for execution */
u16 req_id;
- /* Handle assigned to this BO */
- u32 handle;
/* Wait on this for completion of DMA transfer of this BO */
struct completion xfer_done;
/*
diff --git a/drivers/accel/qaic/qaic_data.c b/drivers/accel/qaic/qaic_data.c
index c20eb63750f5..797289e9d780 100644
--- a/drivers/accel/qaic/qaic_data.c
+++ b/drivers/accel/qaic/qaic_data.c
@@ -172,9 +172,10 @@ static void free_slice(struct kref *kref)
static int clone_range_of_sgt_for_slice(struct qaic_device *qdev, struct sg_table **sgt_out,
struct sg_table *sgt_in, u64 size, u64 offset)
{
- int total_len, len, nents, offf = 0, offl = 0;
struct scatterlist *sg, *sgn, *sgf, *sgl;
+ unsigned int len, nents, offf, offl;
struct sg_table *sgt;
+ size_t total_len;
int ret, j;
/* find out number of relevant nents needed for this mem */
@@ -182,6 +183,8 @@ static int clone_range_of_sgt_for_slice(struct qaic_device *qdev, struct sg_tabl
sgf = NULL;
sgl = NULL;
nents = 0;
+ offf = 0;
+ offl = 0;
size = size ? size : PAGE_SIZE;
for_each_sgtable_dma_sg(sgt_in, sg, j) {
@@ -554,6 +557,7 @@ static bool invalid_sem(struct qaic_sem *sem)
static int qaic_validate_req(struct qaic_device *qdev, struct qaic_attach_slice_entry *slice_ent,
u32 count, u64 total_size)
{
+ u64 total;
int i;
for (i = 0; i < count; i++) {
@@ -563,7 +567,8 @@ static int qaic_validate_req(struct qaic_device *qdev, struct qaic_attach_slice_
invalid_sem(&slice_ent[i].sem2) || invalid_sem(&slice_ent[i].sem3))
return -EINVAL;
- if (slice_ent[i].offset + slice_ent[i].size > total_size)
+ if (check_add_overflow(slice_ent[i].offset, slice_ent[i].size, &total) ||
+ total > total_size)
return -EINVAL;
}
@@ -604,7 +609,7 @@ static int qaic_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struc
struct scatterlist *sg;
int ret = 0;
- if (obj->import_attach)
+ if (drm_gem_is_imported(obj))
return -EINVAL;
for (sg = bo->sgt->sgl; sg; sg = sg_next(sg)) {
@@ -625,7 +630,7 @@ static void qaic_free_object(struct drm_gem_object *obj)
{
struct qaic_bo *bo = to_qaic_bo(obj);
- if (obj->import_attach) {
+ if (drm_gem_is_imported(obj)) {
/* DMABUF/PRIME Path */
drm_prime_gem_destroy(obj, NULL);
} else {
@@ -726,7 +731,6 @@ int qaic_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
if (ret)
goto free_bo;
- bo->handle = args->handle;
drm_gem_object_put(obj);
srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
@@ -865,7 +869,7 @@ static int qaic_prepare_bo(struct qaic_device *qdev, struct qaic_bo *bo,
{
int ret;
- if (bo->base.import_attach)
+ if (drm_gem_is_imported(&bo->base))
ret = qaic_prepare_import_bo(bo, hdr);
else
ret = qaic_prepare_export_bo(qdev, bo, hdr);
@@ -889,7 +893,7 @@ static void qaic_unprepare_export_bo(struct qaic_device *qdev, struct qaic_bo *b
static void qaic_unprepare_bo(struct qaic_device *qdev, struct qaic_bo *bo)
{
- if (bo->base.import_attach)
+ if (drm_gem_is_imported(&bo->base))
qaic_unprepare_import_bo(bo);
else
qaic_unprepare_export_bo(qdev, bo);
diff --git a/drivers/accel/qaic/qaic_debugfs.c b/drivers/accel/qaic/qaic_debugfs.c
index 20b653d99e52..a991b8198dc4 100644
--- a/drivers/accel/qaic/qaic_debugfs.c
+++ b/drivers/accel/qaic/qaic_debugfs.c
@@ -64,20 +64,9 @@ static int bootlog_show(struct seq_file *s, void *unused)
return 0;
}
-static int bootlog_fops_open(struct inode *inode, struct file *file)
-{
- return single_open(file, bootlog_show, inode->i_private);
-}
-
-static const struct file_operations bootlog_fops = {
- .owner = THIS_MODULE,
- .open = bootlog_fops_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(bootlog);
-static int read_dbc_fifo_size(struct seq_file *s, void *unused)
+static int fifo_size_show(struct seq_file *s, void *unused)
{
struct dma_bridge_chan *dbc = s->private;
@@ -85,20 +74,9 @@ static int read_dbc_fifo_size(struct seq_file *s, void *unused)
return 0;
}
-static int fifo_size_open(struct inode *inode, struct file *file)
-{
- return single_open(file, read_dbc_fifo_size, inode->i_private);
-}
-
-static const struct file_operations fifo_size_fops = {
- .owner = THIS_MODULE,
- .open = fifo_size_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(fifo_size);
-static int read_dbc_queued(struct seq_file *s, void *unused)
+static int queued_show(struct seq_file *s, void *unused)
{
struct dma_bridge_chan *dbc = s->private;
u32 tail = 0, head = 0;
@@ -115,18 +93,7 @@ static int read_dbc_queued(struct seq_file *s, void *unused)
return 0;
}
-static int queued_open(struct inode *inode, struct file *file)
-{
- return single_open(file, read_dbc_queued, inode->i_private);
-}
-
-static const struct file_operations queued_fops = {
- .owner = THIS_MODULE,
- .open = queued_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(queued);
void qaic_debugfs_init(struct qaic_drm_device *qddev)
{
@@ -273,7 +240,6 @@ static int qaic_bootlog_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_d
mhi_unprepare:
mhi_unprepare_from_transfer(mhi_dev);
destroy_workqueue:
- flush_workqueue(qdev->bootlog_wq);
destroy_workqueue(qdev->bootlog_wq);
out:
return ret;
@@ -286,7 +252,6 @@ static void qaic_bootlog_mhi_remove(struct mhi_device *mhi_dev)
qdev = dev_get_drvdata(&mhi_dev->dev);
mhi_unprepare_from_transfer(qdev->bootlog_ch);
- flush_workqueue(qdev->bootlog_wq);
destroy_workqueue(qdev->bootlog_wq);
qdev->bootlog_ch = NULL;
}
diff --git a/drivers/accel/qaic/qaic_drv.c b/drivers/accel/qaic/qaic_drv.c
index bf10156c334e..e31bcb0ecfc9 100644
--- a/drivers/accel/qaic/qaic_drv.c
+++ b/drivers/accel/qaic/qaic_drv.c
@@ -29,17 +29,52 @@
#include "mhi_controller.h"
#include "qaic.h"
#include "qaic_debugfs.h"
+#include "qaic_ras.h"
#include "qaic_timesync.h"
#include "sahara.h"
-MODULE_IMPORT_NS(DMA_BUF);
+MODULE_IMPORT_NS("DMA_BUF");
-#define PCI_DEV_AIC100 0xa100
+#define PCI_DEVICE_ID_QCOM_AIC080 0xa080
+#define PCI_DEVICE_ID_QCOM_AIC100 0xa100
+#define PCI_DEVICE_ID_QCOM_AIC200 0xa110
#define QAIC_NAME "qaic"
#define QAIC_DESC "Qualcomm Cloud AI Accelerators"
#define CNTL_MAJOR 5
#define CNTL_MINOR 0
+struct qaic_device_config {
+ /* Indicates the AIC family the device belongs to */
+ int family;
+ /* A bitmask representing the available BARs */
+ int bar_mask;
+ /* An index value used to identify the MHI controller BAR */
+ unsigned int mhi_bar_idx;
+ /* An index value used to identify the DBCs BAR */
+ unsigned int dbc_bar_idx;
+};
+
+static const struct qaic_device_config aic080_config = {
+ .family = FAMILY_AIC100,
+ .bar_mask = BIT(0) | BIT(2) | BIT(4),
+ .mhi_bar_idx = 0,
+ .dbc_bar_idx = 2,
+};
+
+static const struct qaic_device_config aic100_config = {
+ .family = FAMILY_AIC100,
+ .bar_mask = BIT(0) | BIT(2) | BIT(4),
+ .mhi_bar_idx = 0,
+ .dbc_bar_idx = 2,
+};
+
+static const struct qaic_device_config aic200_config = {
+ .family = FAMILY_AIC200,
+ .bar_mask = BIT(0) | BIT(1) | BIT(2) | BIT(4),
+ .mhi_bar_idx = 1,
+ .dbc_bar_idx = 2,
+};
+
bool datapath_polling;
module_param(datapath_polling, bool, 0400);
MODULE_PARM_DESC(datapath_polling, "Operate the datapath in polling mode");
@@ -53,12 +88,12 @@ static void qaicm_wq_release(struct drm_device *dev, void *res)
destroy_workqueue(wq);
}
-static struct workqueue_struct *qaicm_wq_init(struct drm_device *dev, const char *fmt)
+static struct workqueue_struct *qaicm_wq_init(struct drm_device *dev, const char *name)
{
struct workqueue_struct *wq;
int ret;
- wq = alloc_workqueue(fmt, WQ_UNBOUND, 0);
+ wq = alloc_workqueue("%s", WQ_UNBOUND, 0, name);
if (!wq)
return ERR_PTR(-ENOMEM);
ret = drmm_add_action_or_reset(dev, qaicm_wq_release, wq);
@@ -207,7 +242,6 @@ static const struct drm_driver qaic_accel_driver = {
.name = QAIC_NAME,
.desc = QAIC_DESC,
- .date = "20190618",
.fops = &qaic_accel_fops,
.open = qaic_open,
@@ -352,7 +386,8 @@ void qaic_dev_reset_clean_local_state(struct qaic_device *qdev)
release_dbc(qdev, i);
}
-static struct qaic_device *create_qdev(struct pci_dev *pdev, const struct pci_device_id *id)
+static struct qaic_device *create_qdev(struct pci_dev *pdev,
+ const struct qaic_device_config *config)
{
struct device *dev = &pdev->dev;
struct qaic_drm_device *qddev;
@@ -365,12 +400,10 @@ static struct qaic_device *create_qdev(struct pci_dev *pdev, const struct pci_de
return NULL;
qdev->dev_state = QAIC_OFFLINE;
- if (id->device == PCI_DEV_AIC100) {
- qdev->num_dbc = 16;
- qdev->dbc = devm_kcalloc(dev, qdev->num_dbc, sizeof(*qdev->dbc), GFP_KERNEL);
- if (!qdev->dbc)
- return NULL;
- }
+ qdev->num_dbc = 16;
+ qdev->dbc = devm_kcalloc(dev, qdev->num_dbc, sizeof(*qdev->dbc), GFP_KERNEL);
+ if (!qdev->dbc)
+ return NULL;
qddev = devm_drm_dev_alloc(&pdev->dev, &qaic_accel_driver, struct qaic_drm_device, drm);
if (IS_ERR(qddev))
@@ -426,17 +459,18 @@ static struct qaic_device *create_qdev(struct pci_dev *pdev, const struct pci_de
return qdev;
}
-static int init_pci(struct qaic_device *qdev, struct pci_dev *pdev)
+static int init_pci(struct qaic_device *qdev, struct pci_dev *pdev,
+ const struct qaic_device_config *config)
{
int bars;
int ret;
- bars = pci_select_bars(pdev, IORESOURCE_MEM);
+ bars = pci_select_bars(pdev, IORESOURCE_MEM) & 0x3f;
/* make sure the device has the expected BARs */
- if (bars != (BIT(0) | BIT(2) | BIT(4))) {
- pci_dbg(pdev, "%s: expected BARs 0, 2, and 4 not found in device. Found 0x%x\n",
- __func__, bars);
+ if (bars != config->bar_mask) {
+ pci_dbg(pdev, "%s: expected BARs %#x not found in device. Found %#x\n",
+ __func__, config->bar_mask, bars);
return -EINVAL;
}
@@ -449,13 +483,13 @@ static int init_pci(struct qaic_device *qdev, struct pci_dev *pdev)
return ret;
dma_set_max_seg_size(&pdev->dev, UINT_MAX);
- qdev->bar_0 = devm_ioremap_resource(&pdev->dev, &pdev->resource[0]);
- if (IS_ERR(qdev->bar_0))
- return PTR_ERR(qdev->bar_0);
+ qdev->bar_mhi = devm_ioremap_resource(&pdev->dev, &pdev->resource[config->mhi_bar_idx]);
+ if (IS_ERR(qdev->bar_mhi))
+ return PTR_ERR(qdev->bar_mhi);
- qdev->bar_2 = devm_ioremap_resource(&pdev->dev, &pdev->resource[2]);
- if (IS_ERR(qdev->bar_2))
- return PTR_ERR(qdev->bar_2);
+ qdev->bar_dbc = devm_ioremap_resource(&pdev->dev, &pdev->resource[config->dbc_bar_idx]);
+ if (IS_ERR(qdev->bar_dbc))
+ return PTR_ERR(qdev->bar_dbc);
/* Managed release since we use pcim_enable_device above */
pci_set_master(pdev);
@@ -465,14 +499,15 @@ static int init_pci(struct qaic_device *qdev, struct pci_dev *pdev)
static int init_msi(struct qaic_device *qdev, struct pci_dev *pdev)
{
+ int irq_count = qdev->num_dbc + 1;
int mhi_irq;
int ret;
int i;
/* Managed release since we use pcim_enable_device */
- ret = pci_alloc_irq_vectors(pdev, 32, 32, PCI_IRQ_MSI);
+ ret = pci_alloc_irq_vectors(pdev, irq_count, irq_count, PCI_IRQ_MSI | PCI_IRQ_MSIX);
if (ret == -ENOSPC) {
- ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI | PCI_IRQ_MSIX);
if (ret < 0)
return ret;
@@ -485,7 +520,8 @@ static int init_msi(struct qaic_device *qdev, struct pci_dev *pdev)
* interrupted, it shouldn't race with itself.
*/
qdev->single_msi = true;
- pci_info(pdev, "Allocating 32 MSIs failed, operating in 1 MSI mode. Performance may be impacted.\n");
+ pci_info(pdev, "Allocating %d MSIs failed, operating in 1 MSI mode. Performance may be impacted.\n",
+ irq_count);
} else if (ret < 0) {
return ret;
}
@@ -515,21 +551,22 @@ static int init_msi(struct qaic_device *qdev, struct pci_dev *pdev)
static int qaic_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
+ struct qaic_device_config *config = (struct qaic_device_config *)id->driver_data;
struct qaic_device *qdev;
int mhi_irq;
int ret;
int i;
- qdev = create_qdev(pdev, id);
+ qdev = create_qdev(pdev, config);
if (!qdev)
return -ENOMEM;
- ret = init_pci(qdev, pdev);
+ ret = init_pci(qdev, pdev, config);
if (ret)
return ret;
for (i = 0; i < qdev->num_dbc; ++i)
- qdev->dbc[i].dbc_base = qdev->bar_2 + QAIC_DBC_OFF(i);
+ qdev->dbc[i].dbc_base = qdev->bar_dbc + QAIC_DBC_OFF(i);
mhi_irq = init_msi(qdev, pdev);
if (mhi_irq < 0)
@@ -539,8 +576,8 @@ static int qaic_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
return ret;
- qdev->mhi_cntrl = qaic_mhi_register_controller(pdev, qdev->bar_0, mhi_irq,
- qdev->single_msi);
+ qdev->mhi_cntrl = qaic_mhi_register_controller(pdev, qdev->bar_mhi, mhi_irq,
+ qdev->single_msi, config->family);
if (IS_ERR(qdev->mhi_cntrl)) {
ret = PTR_ERR(qdev->mhi_cntrl);
qaic_destroy_drm_device(qdev, QAIC_NO_PARTITION);
@@ -607,7 +644,9 @@ static struct mhi_driver qaic_mhi_driver = {
};
static const struct pci_device_id qaic_ids[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_QCOM, PCI_DEV_AIC100), },
+ { PCI_DEVICE_DATA(QCOM, AIC080, (kernel_ulong_t)&aic080_config), },
+ { PCI_DEVICE_DATA(QCOM, AIC100, (kernel_ulong_t)&aic100_config), },
+ { PCI_DEVICE_DATA(QCOM, AIC200, (kernel_ulong_t)&aic200_config), },
{ }
};
MODULE_DEVICE_TABLE(pci, qaic_ids);
@@ -657,6 +696,10 @@ static int __init qaic_init(void)
if (ret)
pr_debug("qaic: qaic_bootlog_register failed %d\n", ret);
+ ret = qaic_ras_register();
+ if (ret)
+ pr_debug("qaic: qaic_ras_register failed %d\n", ret);
+
return 0;
free_mhi:
@@ -684,6 +727,7 @@ static void __exit qaic_exit(void)
* reinitializing the link_up state after the cleanup is done.
*/
link_up = true;
+ qaic_ras_unregister();
qaic_bootlog_unregister();
qaic_timesync_deinit();
sahara_unregister();
diff --git a/drivers/accel/qaic/qaic_ras.c b/drivers/accel/qaic/qaic_ras.c
new file mode 100644
index 000000000000..914ffc4a9970
--- /dev/null
+++ b/drivers/accel/qaic/qaic_ras.c
@@ -0,0 +1,642 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. */
+/* Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */
+
+#include <asm/byteorder.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mhi.h>
+
+#include "qaic.h"
+#include "qaic_ras.h"
+
+#define MAGIC 0x55AA
+#define VERSION 0x2
+#define HDR_SZ 12
+#define NUM_TEMP_LVL 3
+#define POWER_BREAK BIT(0)
+
+enum msg_type {
+ MSG_PUSH, /* async push from device */
+ MSG_REQ, /* sync request to device */
+ MSG_RESP, /* sync response from device */
+};
+
+enum err_type {
+ CE, /* correctable error */
+ UE, /* uncorrectable error */
+ UE_NF, /* uncorrectable error that is non-fatal, expect a disruption */
+ ERR_TYPE_MAX,
+};
+
+static const char * const err_type_str[] = {
+ [CE] = "Correctable",
+ [UE] = "Uncorrectable",
+ [UE_NF] = "Uncorrectable Non-Fatal",
+};
+
+static const char * const err_class_str[] = {
+ [CE] = "Warning",
+ [UE] = "Fatal",
+ [UE_NF] = "Warning",
+};
+
+enum err_source {
+ SOC_MEM,
+ PCIE,
+ DDR,
+ SYS_BUS1,
+ SYS_BUS2,
+ NSP_MEM,
+ TSENS,
+};
+
+static const char * const err_src_str[TSENS + 1] = {
+ [SOC_MEM] = "SoC Memory",
+ [PCIE] = "PCIE",
+ [DDR] = "DDR",
+ [SYS_BUS1] = "System Bus source 1",
+ [SYS_BUS2] = "System Bus source 2",
+ [NSP_MEM] = "NSP Memory",
+ [TSENS] = "Temperature Sensors",
+};
+
+struct ras_data {
+ /* header start */
+ /* Magic number to validate the message */
+ u16 magic;
+ /* RAS version number */
+ u16 ver;
+ u32 seq_num;
+ /* RAS message type */
+ u8 type;
+ u8 id;
+ /* Size of RAS message without the header in byte */
+ u16 len;
+ /* header end */
+ s32 result;
+ /*
+ * Error source
+ * 0 : SoC Memory
+ * 1 : PCIE
+ * 2 : DDR
+ * 3 : System Bus source 1
+ * 4 : System Bus source 2
+ * 5 : NSP Memory
+ * 6 : Temperature Sensors
+ */
+ u32 source;
+ /*
+ * Stores the error type, there are three types of error in RAS
+ * 0 : correctable error (CE)
+ * 1 : uncorrectable error (UE)
+ * 2 : uncorrectable error that is non-fatal (UE_NF)
+ */
+ u32 err_type;
+ u32 err_threshold;
+ u32 ce_count;
+ u32 ue_count;
+ u32 intr_num;
+ /* Data specific to error source */
+ u8 syndrome[64];
+} __packed;
+
+struct soc_mem_syndrome {
+ u64 error_address[8];
+} __packed;
+
+struct nsp_mem_syndrome {
+ u32 error_address[8];
+ u8 nsp_id;
+} __packed;
+
+struct ddr_syndrome {
+ u32 count;
+ u32 irq_status;
+ u32 data_31_0[2];
+ u32 data_63_32[2];
+ u32 data_95_64[2];
+ u32 data_127_96[2];
+ u32 addr_lsb;
+ u16 addr_msb;
+ u16 parity_bits;
+ u16 instance;
+ u16 err_type;
+} __packed;
+
+struct tsens_syndrome {
+ u32 threshold_type;
+ s32 temp;
+} __packed;
+
+struct sysbus1_syndrome {
+ u32 slave;
+ u32 err_type;
+ u16 addr[8];
+ u8 instance;
+} __packed;
+
+struct sysbus2_syndrome {
+ u32 lsb3;
+ u32 msb3;
+ u32 lsb2;
+ u32 msb2;
+ u32 ext_id;
+ u16 path;
+ u16 op_type;
+ u16 len;
+ u16 redirect;
+ u8 valid;
+ u8 word_error;
+ u8 non_secure;
+ u8 opc;
+ u8 error_code;
+ u8 trans_type;
+ u8 addr_space;
+ u8 instance;
+} __packed;
+
+struct pcie_syndrome {
+ /* CE info */
+ u32 bad_tlp;
+ u32 bad_dllp;
+ u32 replay_rollover;
+ u32 replay_timeout;
+ u32 rx_err;
+ u32 internal_ce_count;
+ /* UE_NF info */
+ u32 fc_timeout;
+ u32 poison_tlp;
+ u32 ecrc_err;
+ u32 unsupported_req;
+ u32 completer_abort;
+ u32 completion_timeout;
+ /* UE info */
+ u32 addr;
+ u8 index;
+ /*
+ * Flag to indicate specific event of PCIe
+ * BIT(0): Power break (low power)
+ * BIT(1) to BIT(7): Reserved
+ */
+ u8 flag;
+} __packed;
+
+static const char * const threshold_type_str[NUM_TEMP_LVL] = {
+ [0] = "lower",
+ [1] = "upper",
+ [2] = "critical",
+};
+
+static void ras_msg_to_cpu(struct ras_data *msg)
+{
+ struct sysbus1_syndrome *sysbus1_syndrome = (struct sysbus1_syndrome *)&msg->syndrome[0];
+ struct sysbus2_syndrome *sysbus2_syndrome = (struct sysbus2_syndrome *)&msg->syndrome[0];
+ struct soc_mem_syndrome *soc_syndrome = (struct soc_mem_syndrome *)&msg->syndrome[0];
+ struct nsp_mem_syndrome *nsp_syndrome = (struct nsp_mem_syndrome *)&msg->syndrome[0];
+ struct tsens_syndrome *tsens_syndrome = (struct tsens_syndrome *)&msg->syndrome[0];
+ struct pcie_syndrome *pcie_syndrome = (struct pcie_syndrome *)&msg->syndrome[0];
+ struct ddr_syndrome *ddr_syndrome = (struct ddr_syndrome *)&msg->syndrome[0];
+ int i;
+
+ le16_to_cpus(&msg->magic);
+ le16_to_cpus(&msg->ver);
+ le32_to_cpus(&msg->seq_num);
+ le16_to_cpus(&msg->len);
+ le32_to_cpus(&msg->result);
+ le32_to_cpus(&msg->source);
+ le32_to_cpus(&msg->err_type);
+ le32_to_cpus(&msg->err_threshold);
+ le32_to_cpus(&msg->ce_count);
+ le32_to_cpus(&msg->ue_count);
+ le32_to_cpus(&msg->intr_num);
+
+ switch (msg->source) {
+ case SOC_MEM:
+ for (i = 0; i < 8; i++)
+ le64_to_cpus(&soc_syndrome->error_address[i]);
+ break;
+ case PCIE:
+ le32_to_cpus(&pcie_syndrome->bad_tlp);
+ le32_to_cpus(&pcie_syndrome->bad_dllp);
+ le32_to_cpus(&pcie_syndrome->replay_rollover);
+ le32_to_cpus(&pcie_syndrome->replay_timeout);
+ le32_to_cpus(&pcie_syndrome->rx_err);
+ le32_to_cpus(&pcie_syndrome->internal_ce_count);
+ le32_to_cpus(&pcie_syndrome->fc_timeout);
+ le32_to_cpus(&pcie_syndrome->poison_tlp);
+ le32_to_cpus(&pcie_syndrome->ecrc_err);
+ le32_to_cpus(&pcie_syndrome->unsupported_req);
+ le32_to_cpus(&pcie_syndrome->completer_abort);
+ le32_to_cpus(&pcie_syndrome->completion_timeout);
+ le32_to_cpus(&pcie_syndrome->addr);
+ break;
+ case DDR:
+ le16_to_cpus(&ddr_syndrome->instance);
+ le16_to_cpus(&ddr_syndrome->err_type);
+ le32_to_cpus(&ddr_syndrome->count);
+ le32_to_cpus(&ddr_syndrome->irq_status);
+ le32_to_cpus(&ddr_syndrome->data_31_0[0]);
+ le32_to_cpus(&ddr_syndrome->data_31_0[1]);
+ le32_to_cpus(&ddr_syndrome->data_63_32[0]);
+ le32_to_cpus(&ddr_syndrome->data_63_32[1]);
+ le32_to_cpus(&ddr_syndrome->data_95_64[0]);
+ le32_to_cpus(&ddr_syndrome->data_95_64[1]);
+ le32_to_cpus(&ddr_syndrome->data_127_96[0]);
+ le32_to_cpus(&ddr_syndrome->data_127_96[1]);
+ le16_to_cpus(&ddr_syndrome->parity_bits);
+ le16_to_cpus(&ddr_syndrome->addr_msb);
+ le32_to_cpus(&ddr_syndrome->addr_lsb);
+ break;
+ case SYS_BUS1:
+ le32_to_cpus(&sysbus1_syndrome->slave);
+ le32_to_cpus(&sysbus1_syndrome->err_type);
+ for (i = 0; i < 8; i++)
+ le16_to_cpus(&sysbus1_syndrome->addr[i]);
+ break;
+ case SYS_BUS2:
+ le16_to_cpus(&sysbus2_syndrome->op_type);
+ le16_to_cpus(&sysbus2_syndrome->len);
+ le16_to_cpus(&sysbus2_syndrome->redirect);
+ le16_to_cpus(&sysbus2_syndrome->path);
+ le32_to_cpus(&sysbus2_syndrome->ext_id);
+ le32_to_cpus(&sysbus2_syndrome->lsb2);
+ le32_to_cpus(&sysbus2_syndrome->msb2);
+ le32_to_cpus(&sysbus2_syndrome->lsb3);
+ le32_to_cpus(&sysbus2_syndrome->msb3);
+ break;
+ case NSP_MEM:
+ for (i = 0; i < 8; i++)
+ le32_to_cpus(&nsp_syndrome->error_address[i]);
+ break;
+ case TSENS:
+ le32_to_cpus(&tsens_syndrome->threshold_type);
+ le32_to_cpus(&tsens_syndrome->temp);
+ break;
+ }
+}
+
+static void decode_ras_msg(struct qaic_device *qdev, struct ras_data *msg)
+{
+ struct sysbus1_syndrome *sysbus1_syndrome = (struct sysbus1_syndrome *)&msg->syndrome[0];
+ struct sysbus2_syndrome *sysbus2_syndrome = (struct sysbus2_syndrome *)&msg->syndrome[0];
+ struct soc_mem_syndrome *soc_syndrome = (struct soc_mem_syndrome *)&msg->syndrome[0];
+ struct nsp_mem_syndrome *nsp_syndrome = (struct nsp_mem_syndrome *)&msg->syndrome[0];
+ struct tsens_syndrome *tsens_syndrome = (struct tsens_syndrome *)&msg->syndrome[0];
+ struct pcie_syndrome *pcie_syndrome = (struct pcie_syndrome *)&msg->syndrome[0];
+ struct ddr_syndrome *ddr_syndrome = (struct ddr_syndrome *)&msg->syndrome[0];
+ char *class;
+ char *level;
+
+ if (msg->magic != MAGIC) {
+ pci_warn(qdev->pdev, "Dropping RAS message with invalid magic %x\n", msg->magic);
+ return;
+ }
+
+ if (!msg->ver || msg->ver > VERSION) {
+ pci_warn(qdev->pdev, "Dropping RAS message with invalid version %d\n", msg->ver);
+ return;
+ }
+
+ if (msg->type != MSG_PUSH) {
+ pci_warn(qdev->pdev, "Dropping non-PUSH RAS message\n");
+ return;
+ }
+
+ if (msg->len != sizeof(*msg) - HDR_SZ) {
+ pci_warn(qdev->pdev, "Dropping RAS message with invalid len %d\n", msg->len);
+ return;
+ }
+
+ if (msg->err_type >= ERR_TYPE_MAX) {
+ pci_warn(qdev->pdev, "Dropping RAS message with err type %d\n", msg->err_type);
+ return;
+ }
+
+ if (msg->err_type == UE)
+ level = KERN_ERR;
+ else
+ level = KERN_WARNING;
+
+ switch (msg->source) {
+ case SOC_MEM:
+ dev_printk(level, &qdev->pdev->dev, "RAS event.\nClass:%s\nDescription:%s %s %s\nError Threshold for this report %d\nSyndrome:\n 0x%llx\n 0x%llx\n 0x%llx\n 0x%llx\n 0x%llx\n 0x%llx\n 0x%llx\n 0x%llx\n",
+ err_class_str[msg->err_type],
+ err_type_str[msg->err_type],
+ "error from",
+ err_src_str[msg->source],
+ msg->err_threshold,
+ soc_syndrome->error_address[0],
+ soc_syndrome->error_address[1],
+ soc_syndrome->error_address[2],
+ soc_syndrome->error_address[3],
+ soc_syndrome->error_address[4],
+ soc_syndrome->error_address[5],
+ soc_syndrome->error_address[6],
+ soc_syndrome->error_address[7]);
+ break;
+ case PCIE:
+ dev_printk(level, &qdev->pdev->dev, "RAS event.\nClass:%s\nDescription:%s %s %s\nError Threshold for this report %d\n",
+ err_class_str[msg->err_type],
+ err_type_str[msg->err_type],
+ "error from",
+ err_src_str[msg->source],
+ msg->err_threshold);
+
+ switch (msg->err_type) {
+ case CE:
+ /*
+ * Modeled after AER prints. This continues the dev_printk() from a few
+ * lines up. We reduce duplication of code, but also avoid re-printing the
+ * PCI device info so that the end result looks uniform to the log user.
+ */
+ printk(KERN_WARNING pr_fmt("Syndrome:\n Bad TLP count %d\n Bad DLLP count %d\n Replay Rollover count %d\n Replay Timeout count %d\n Recv Error count %d\n Internal CE count %d\n"),
+ pcie_syndrome->bad_tlp,
+ pcie_syndrome->bad_dllp,
+ pcie_syndrome->replay_rollover,
+ pcie_syndrome->replay_timeout,
+ pcie_syndrome->rx_err,
+ pcie_syndrome->internal_ce_count);
+ if (msg->ver > 0x1)
+ pr_warn(" Power break %s\n",
+ pcie_syndrome->flag & POWER_BREAK ? "ON" : "OFF");
+ break;
+ case UE:
+ printk(KERN_ERR pr_fmt("Syndrome:\n Index %d\n Address 0x%x\n"),
+ pcie_syndrome->index, pcie_syndrome->addr);
+ break;
+ case UE_NF:
+ printk(KERN_WARNING pr_fmt("Syndrome:\n FC timeout count %d\n Poisoned TLP count %d\n ECRC error count %d\n Unsupported request count %d\n Completer abort count %d\n Completion timeout count %d\n"),
+ pcie_syndrome->fc_timeout,
+ pcie_syndrome->poison_tlp,
+ pcie_syndrome->ecrc_err,
+ pcie_syndrome->unsupported_req,
+ pcie_syndrome->completer_abort,
+ pcie_syndrome->completion_timeout);
+ break;
+ default:
+ break;
+ }
+ break;
+ case DDR:
+ dev_printk(level, &qdev->pdev->dev, "RAS event.\nClass:%s\nDescription:%s %s %s\nError Threshold for this report %d\nSyndrome:\n Instance %d\n Count %d\n Data 31_0 0x%x 0x%x\n Data 63_32 0x%x 0x%x\n Data 95_64 0x%x 0x%x\n Data 127_96 0x%x 0x%x\n Parity bits 0x%x\n Address msb 0x%x\n Address lsb 0x%x\n",
+ err_class_str[msg->err_type],
+ err_type_str[msg->err_type],
+ "error from",
+ err_src_str[msg->source],
+ msg->err_threshold,
+ ddr_syndrome->instance,
+ ddr_syndrome->count,
+ ddr_syndrome->data_31_0[1],
+ ddr_syndrome->data_31_0[0],
+ ddr_syndrome->data_63_32[1],
+ ddr_syndrome->data_63_32[0],
+ ddr_syndrome->data_95_64[1],
+ ddr_syndrome->data_95_64[0],
+ ddr_syndrome->data_127_96[1],
+ ddr_syndrome->data_127_96[0],
+ ddr_syndrome->parity_bits,
+ ddr_syndrome->addr_msb,
+ ddr_syndrome->addr_lsb);
+ break;
+ case SYS_BUS1:
+ dev_printk(level, &qdev->pdev->dev, "RAS event.\nClass:%s\nDescription:%s %s %s\nError Threshold for this report %d\nSyndrome:\n instance %d\n %s\n err_type %d\n address0 0x%x\n address1 0x%x\n address2 0x%x\n address3 0x%x\n address4 0x%x\n address5 0x%x\n address6 0x%x\n address7 0x%x\n",
+ err_class_str[msg->err_type],
+ err_type_str[msg->err_type],
+ "error from",
+ err_src_str[msg->source],
+ msg->err_threshold,
+ sysbus1_syndrome->instance,
+ sysbus1_syndrome->slave ? "Slave" : "Master",
+ sysbus1_syndrome->err_type,
+ sysbus1_syndrome->addr[0],
+ sysbus1_syndrome->addr[1],
+ sysbus1_syndrome->addr[2],
+ sysbus1_syndrome->addr[3],
+ sysbus1_syndrome->addr[4],
+ sysbus1_syndrome->addr[5],
+ sysbus1_syndrome->addr[6],
+ sysbus1_syndrome->addr[7]);
+ break;
+ case SYS_BUS2:
+ dev_printk(level, &qdev->pdev->dev, "RAS event.\nClass:%s\nDescription:%s %s %s\nError Threshold for this report %d\nSyndrome:\n instance %d\n valid %d\n word error %d\n non-secure %d\n opc %d\n error code %d\n transaction type %d\n address space %d\n operation type %d\n len %d\n redirect %d\n path %d\n ext_id %d\n lsb2 %d\n msb2 %d\n lsb3 %d\n msb3 %d\n",
+ err_class_str[msg->err_type],
+ err_type_str[msg->err_type],
+ "error from",
+ err_src_str[msg->source],
+ msg->err_threshold,
+ sysbus2_syndrome->instance,
+ sysbus2_syndrome->valid,
+ sysbus2_syndrome->word_error,
+ sysbus2_syndrome->non_secure,
+ sysbus2_syndrome->opc,
+ sysbus2_syndrome->error_code,
+ sysbus2_syndrome->trans_type,
+ sysbus2_syndrome->addr_space,
+ sysbus2_syndrome->op_type,
+ sysbus2_syndrome->len,
+ sysbus2_syndrome->redirect,
+ sysbus2_syndrome->path,
+ sysbus2_syndrome->ext_id,
+ sysbus2_syndrome->lsb2,
+ sysbus2_syndrome->msb2,
+ sysbus2_syndrome->lsb3,
+ sysbus2_syndrome->msb3);
+ break;
+ case NSP_MEM:
+ dev_printk(level, &qdev->pdev->dev, "RAS event.\nClass:%s\nDescription:%s %s %s\nError Threshold for this report %d\nSyndrome:\n NSP ID %d\n 0x%x\n 0x%x\n 0x%x\n 0x%x\n 0x%x\n 0x%x\n 0x%x\n 0x%x\n",
+ err_class_str[msg->err_type],
+ err_type_str[msg->err_type],
+ "error from",
+ err_src_str[msg->source],
+ msg->err_threshold,
+ nsp_syndrome->nsp_id,
+ nsp_syndrome->error_address[0],
+ nsp_syndrome->error_address[1],
+ nsp_syndrome->error_address[2],
+ nsp_syndrome->error_address[3],
+ nsp_syndrome->error_address[4],
+ nsp_syndrome->error_address[5],
+ nsp_syndrome->error_address[6],
+ nsp_syndrome->error_address[7]);
+ break;
+ case TSENS:
+ if (tsens_syndrome->threshold_type >= NUM_TEMP_LVL) {
+ pci_warn(qdev->pdev, "Dropping RAS message with invalid temp threshold %d\n",
+ tsens_syndrome->threshold_type);
+ break;
+ }
+
+ if (msg->err_type)
+ class = "Fatal";
+ else if (tsens_syndrome->threshold_type)
+ class = "Critical";
+ else
+ class = "Warning";
+
+ dev_printk(level, &qdev->pdev->dev, "RAS event.\nClass:%s\nDescription:%s %s %s\nError Threshold for this report %d\nSyndrome:\n %s threshold\n %d deg C\n",
+ class,
+ err_type_str[msg->err_type],
+ "error from",
+ err_src_str[msg->source],
+ msg->err_threshold,
+ threshold_type_str[tsens_syndrome->threshold_type],
+ tsens_syndrome->temp);
+ break;
+ }
+
+ /* Uncorrectable errors are fatal */
+ if (msg->err_type == UE)
+ mhi_soc_reset(qdev->mhi_cntrl);
+
+ switch (msg->err_type) {
+ case CE:
+ if (qdev->ce_count != UINT_MAX)
+ qdev->ce_count++;
+ break;
+ case UE:
+ if (qdev->ce_count != UINT_MAX)
+ qdev->ue_count++;
+ break;
+ case UE_NF:
+ if (qdev->ce_count != UINT_MAX)
+ qdev->ue_nf_count++;
+ break;
+ default:
+ /* not possible */
+ break;
+ }
+}
+
+static ssize_t ce_count_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(dev));
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", qdev->ce_count);
+}
+
+static ssize_t ue_count_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(dev));
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", qdev->ue_count);
+}
+
+static ssize_t ue_nonfatal_count_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(dev));
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", qdev->ue_nf_count);
+}
+
+static DEVICE_ATTR_RO(ce_count);
+static DEVICE_ATTR_RO(ue_count);
+static DEVICE_ATTR_RO(ue_nonfatal_count);
+
+static struct attribute *ras_attrs[] = {
+ &dev_attr_ce_count.attr,
+ &dev_attr_ue_count.attr,
+ &dev_attr_ue_nonfatal_count.attr,
+ NULL,
+};
+
+static struct attribute_group ras_group = {
+ .attrs = ras_attrs,
+};
+
+static int qaic_ras_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id)
+{
+ struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(mhi_dev->mhi_cntrl->cntrl_dev));
+ struct ras_data *resp;
+ int ret;
+
+ ret = mhi_prepare_for_transfer(mhi_dev);
+ if (ret)
+ return ret;
+
+ resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+ if (!resp) {
+ mhi_unprepare_from_transfer(mhi_dev);
+ return -ENOMEM;
+ }
+
+ ret = mhi_queue_buf(mhi_dev, DMA_FROM_DEVICE, resp, sizeof(*resp), MHI_EOT);
+ if (ret) {
+ kfree(resp);
+ mhi_unprepare_from_transfer(mhi_dev);
+ return ret;
+ }
+
+ ret = device_add_group(&qdev->pdev->dev, &ras_group);
+ if (ret) {
+ mhi_unprepare_from_transfer(mhi_dev);
+ pci_dbg(qdev->pdev, "ras add sysfs failed %d\n", ret);
+ return ret;
+ }
+
+ dev_set_drvdata(&mhi_dev->dev, qdev);
+ qdev->ras_ch = mhi_dev;
+
+ return ret;
+}
+
+static void qaic_ras_mhi_remove(struct mhi_device *mhi_dev)
+{
+ struct qaic_device *qdev;
+
+ qdev = dev_get_drvdata(&mhi_dev->dev);
+ qdev->ras_ch = NULL;
+ device_remove_group(&qdev->pdev->dev, &ras_group);
+ mhi_unprepare_from_transfer(mhi_dev);
+}
+
+static void qaic_ras_mhi_ul_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result) {}
+
+static void qaic_ras_mhi_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
+{
+ struct qaic_device *qdev = dev_get_drvdata(&mhi_dev->dev);
+ struct ras_data *msg = mhi_result->buf_addr;
+ int ret;
+
+ if (mhi_result->transaction_status) {
+ kfree(msg);
+ return;
+ }
+
+ ras_msg_to_cpu(msg);
+ decode_ras_msg(qdev, msg);
+
+ ret = mhi_queue_buf(qdev->ras_ch, DMA_FROM_DEVICE, msg, sizeof(*msg), MHI_EOT);
+ if (ret) {
+ dev_err(&mhi_dev->dev, "Cannot requeue RAS recv buf %d\n", ret);
+ kfree(msg);
+ }
+}
+
+static const struct mhi_device_id qaic_ras_mhi_match_table[] = {
+ { .chan = "QAIC_STATUS", },
+ {},
+};
+
+static struct mhi_driver qaic_ras_mhi_driver = {
+ .id_table = qaic_ras_mhi_match_table,
+ .remove = qaic_ras_mhi_remove,
+ .probe = qaic_ras_mhi_probe,
+ .ul_xfer_cb = qaic_ras_mhi_ul_xfer_cb,
+ .dl_xfer_cb = qaic_ras_mhi_dl_xfer_cb,
+ .driver = {
+ .name = "qaic_ras",
+ },
+};
+
+int qaic_ras_register(void)
+{
+ return mhi_driver_register(&qaic_ras_mhi_driver);
+}
+
+void qaic_ras_unregister(void)
+{
+ mhi_driver_unregister(&qaic_ras_mhi_driver);
+}
diff --git a/drivers/accel/qaic/qaic_ras.h b/drivers/accel/qaic/qaic_ras.h
new file mode 100644
index 000000000000..d44a4eeeb060
--- /dev/null
+++ b/drivers/accel/qaic/qaic_ras.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2020, The Linux Foundation. All rights reserved. */
+
+#ifndef __QAIC_RAS_H__
+#define __QAIC_RAS_H__
+
+int qaic_ras_register(void);
+void qaic_ras_unregister(void);
+
+#endif /* __QAIC_RAS_H__ */
diff --git a/drivers/accel/qaic/qaic_timesync.c b/drivers/accel/qaic/qaic_timesync.c
index 301f4462d51b..3fac540f8e03 100644
--- a/drivers/accel/qaic/qaic_timesync.c
+++ b/drivers/accel/qaic/qaic_timesync.c
@@ -129,7 +129,7 @@ static void qaic_timesync_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_resu
static void qaic_timesync_timer(struct timer_list *t)
{
- struct mqts_dev *mqtsdev = from_timer(mqtsdev, t, timer);
+ struct mqts_dev *mqtsdev = timer_container_of(mqtsdev, t, timer);
struct qts_host_time_sync_msg_data *sync_msg;
u64 device_qtimer_us;
u64 device_qtimer;
@@ -201,7 +201,7 @@ static int qaic_timesync_probe(struct mhi_device *mhi_dev, const struct mhi_devi
goto free_sync_msg;
/* Qtimer register pointer */
- mqtsdev->qtimer_addr = qdev->bar_0 + QTIMER_REG_OFFSET;
+ mqtsdev->qtimer_addr = qdev->bar_mhi + QTIMER_REG_OFFSET;
timer_setup(timer, qaic_timesync_timer, 0);
timer->expires = jiffies + msecs_to_jiffies(timesync_delay_ms);
add_timer(timer);
@@ -221,7 +221,7 @@ static void qaic_timesync_remove(struct mhi_device *mhi_dev)
{
struct mqts_dev *mqtsdev = dev_get_drvdata(&mhi_dev->dev);
- del_timer_sync(&mqtsdev->timer);
+ timer_delete_sync(&mqtsdev->timer);
mhi_unprepare_from_transfer(mqtsdev->mhi_dev);
kfree(mqtsdev->sync_msg);
kfree(mqtsdev);
diff --git a/drivers/accel/qaic/sahara.c b/drivers/accel/qaic/sahara.c
index bf94bbab6be5..3ebcc1f7ff58 100644
--- a/drivers/accel/qaic/sahara.c
+++ b/drivers/accel/qaic/sahara.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. */
+#include <linux/devcoredump.h>
#include <linux/firmware.h>
#include <linux/limits.h>
#include <linux/mhi.h>
@@ -9,6 +10,7 @@
#include <linux/mod_devicetable.h>
#include <linux/overflow.h>
#include <linux/types.h>
+#include <linux/vmalloc.h>
#include <linux/workqueue.h>
#include "sahara.h"
@@ -36,12 +38,14 @@
#define SAHARA_PACKET_MAX_SIZE 0xffffU /* MHI_MAX_MTU */
#define SAHARA_TRANSFER_MAX_SIZE 0x80000
+#define SAHARA_READ_MAX_SIZE 0xfff0U /* Avoid unaligned requests */
#define SAHARA_NUM_TX_BUF DIV_ROUND_UP(SAHARA_TRANSFER_MAX_SIZE,\
SAHARA_PACKET_MAX_SIZE)
#define SAHARA_IMAGE_ID_NONE U32_MAX
#define SAHARA_VERSION 2
#define SAHARA_SUCCESS 0
+#define SAHARA_TABLE_ENTRY_STR_LEN 20
#define SAHARA_MODE_IMAGE_TX_PENDING 0x0
#define SAHARA_MODE_IMAGE_TX_COMPLETE 0x1
@@ -53,6 +57,8 @@
#define SAHARA_END_OF_IMAGE_LENGTH 0x10
#define SAHARA_DONE_LENGTH 0x8
#define SAHARA_RESET_LENGTH 0x8
+#define SAHARA_MEM_DEBUG64_LENGTH 0x18
+#define SAHARA_MEM_READ64_LENGTH 0x18
struct sahara_packet {
__le32 cmd;
@@ -80,21 +86,98 @@ struct sahara_packet {
__le32 image;
__le32 status;
} end_of_image;
+ struct {
+ __le64 table_address;
+ __le64 table_length;
+ } memory_debug64;
+ struct {
+ __le64 memory_address;
+ __le64 memory_length;
+ } memory_read64;
};
};
+struct sahara_debug_table_entry64 {
+ __le64 type;
+ __le64 address;
+ __le64 length;
+ char description[SAHARA_TABLE_ENTRY_STR_LEN];
+ char filename[SAHARA_TABLE_ENTRY_STR_LEN];
+};
+
+struct sahara_dump_table_entry {
+ u64 type;
+ u64 address;
+ u64 length;
+ char description[SAHARA_TABLE_ENTRY_STR_LEN];
+ char filename[SAHARA_TABLE_ENTRY_STR_LEN];
+};
+
+#define SAHARA_DUMP_V1_MAGIC 0x1234567890abcdef
+#define SAHARA_DUMP_V1_VER 1
+struct sahara_memory_dump_meta_v1 {
+ u64 magic;
+ u64 version;
+ u64 dump_size;
+ u64 table_size;
+};
+
+/*
+ * Layout of crashdump provided to user via devcoredump
+ * +------------------------------------------+
+ * | Crashdump Meta structure |
+ * | type: struct sahara_memory_dump_meta_v1 |
+ * +------------------------------------------+
+ * | Crashdump Table |
+ * | type: array of struct |
+ * | sahara_dump_table_entry |
+ * | |
+ * | |
+ * +------------------------------------------+
+ * | Crashdump |
+ * | |
+ * | |
+ * | |
+ * | |
+ * | |
+ * +------------------------------------------+
+ *
+ * First is the metadata header. Userspace can use the magic number to verify
+ * the content type, and then check the version for the rest of the format.
+ * New versions should keep the magic number location/value, and version
+ * location, but increment the version value.
+ *
+ * For v1, the metadata lists the size of the entire dump (header + table +
+ * dump) and the size of the table. Then the dump image table, which describes
+ * the contents of the dump. Finally all the images are listed in order, with
+ * no deadspace in between. Userspace can use the sizes listed in the image
+ * table to reconstruct the individual images.
+ */
+
struct sahara_context {
struct sahara_packet *tx[SAHARA_NUM_TX_BUF];
struct sahara_packet *rx;
- struct work_struct work;
+ struct work_struct fw_work;
+ struct work_struct dump_work;
struct mhi_device *mhi_dev;
- const char **image_table;
+ const char * const *image_table;
u32 table_size;
u32 active_image_id;
const struct firmware *firmware;
+ u64 dump_table_address;
+ u64 dump_table_length;
+ size_t rx_size;
+ size_t rx_size_requested;
+ void *mem_dump;
+ size_t mem_dump_sz;
+ struct sahara_dump_table_entry *dump_image;
+ u64 dump_image_offset;
+ void *mem_dump_freespace;
+ u64 dump_images_left;
+ bool is_mem_dump_mode;
};
-static const char *aic100_image_table[] = {
+static const char * const aic100_image_table[] = {
[1] = "qcom/aic100/fw1.bin",
[2] = "qcom/aic100/fw2.bin",
[4] = "qcom/aic100/fw4.bin",
@@ -105,6 +188,34 @@ static const char *aic100_image_table[] = {
[10] = "qcom/aic100/fw10.bin",
};
+static const char * const aic200_image_table[] = {
+ [5] = "qcom/aic200/uefi.elf",
+ [12] = "qcom/aic200/aic200-nsp.bin",
+ [23] = "qcom/aic200/aop.mbn",
+ [32] = "qcom/aic200/tz.mbn",
+ [33] = "qcom/aic200/hypvm.mbn",
+ [39] = "qcom/aic200/aic200_abl.elf",
+ [40] = "qcom/aic200/apdp.mbn",
+ [41] = "qcom/aic200/devcfg.mbn",
+ [42] = "qcom/aic200/sec.elf",
+ [43] = "qcom/aic200/aic200-hlos.elf",
+ [49] = "qcom/aic200/shrm.elf",
+ [50] = "qcom/aic200/cpucp.elf",
+ [51] = "qcom/aic200/aop_devcfg.mbn",
+ [57] = "qcom/aic200/cpucp_dtbs.elf",
+ [62] = "qcom/aic200/uefi_dtbs.elf",
+ [63] = "qcom/aic200/xbl_ac_config.mbn",
+ [64] = "qcom/aic200/tz_ac_config.mbn",
+ [65] = "qcom/aic200/hyp_ac_config.mbn",
+ [66] = "qcom/aic200/pdp.elf",
+ [67] = "qcom/aic200/pdp_cdb.elf",
+ [68] = "qcom/aic200/sdi.mbn",
+ [69] = "qcom/aic200/dcd.mbn",
+ [73] = "qcom/aic200/gearvm.mbn",
+ [74] = "qcom/aic200/sti.bin",
+ [75] = "qcom/aic200/pvs.bin",
+};
+
static int sahara_find_image(struct sahara_context *context, u32 image_id)
{
int ret;
@@ -153,6 +264,8 @@ static void sahara_send_reset(struct sahara_context *context)
{
int ret;
+ context->is_mem_dump_mode = false;
+
context->tx[0]->cmd = cpu_to_le32(SAHARA_RESET_CMD);
context->tx[0]->length = cpu_to_le32(SAHARA_RESET_LENGTH);
@@ -186,7 +299,8 @@ static void sahara_hello(struct sahara_context *context)
}
if (le32_to_cpu(context->rx->hello.mode) != SAHARA_MODE_IMAGE_TX_PENDING &&
- le32_to_cpu(context->rx->hello.mode) != SAHARA_MODE_IMAGE_TX_COMPLETE) {
+ le32_to_cpu(context->rx->hello.mode) != SAHARA_MODE_IMAGE_TX_COMPLETE &&
+ le32_to_cpu(context->rx->hello.mode) != SAHARA_MODE_MEMORY_DEBUG) {
dev_err(&context->mhi_dev->dev, "Unsupported hello packet - mode %d\n",
le32_to_cpu(context->rx->hello.mode));
return;
@@ -320,9 +434,70 @@ static void sahara_end_of_image(struct sahara_context *context)
dev_dbg(&context->mhi_dev->dev, "Unable to send done response %d\n", ret);
}
+static void sahara_memory_debug64(struct sahara_context *context)
+{
+ int ret;
+
+ dev_dbg(&context->mhi_dev->dev,
+ "MEMORY DEBUG64 cmd received. length:%d table_address:%#llx table_length:%#llx\n",
+ le32_to_cpu(context->rx->length),
+ le64_to_cpu(context->rx->memory_debug64.table_address),
+ le64_to_cpu(context->rx->memory_debug64.table_length));
+
+ if (le32_to_cpu(context->rx->length) != SAHARA_MEM_DEBUG64_LENGTH) {
+ dev_err(&context->mhi_dev->dev, "Malformed memory debug64 packet - length %d\n",
+ le32_to_cpu(context->rx->length));
+ return;
+ }
+
+ context->dump_table_address = le64_to_cpu(context->rx->memory_debug64.table_address);
+ context->dump_table_length = le64_to_cpu(context->rx->memory_debug64.table_length);
+
+ if (context->dump_table_length % sizeof(struct sahara_debug_table_entry64) != 0 ||
+ !context->dump_table_length) {
+ dev_err(&context->mhi_dev->dev, "Malformed memory debug64 packet - table length %lld\n",
+ context->dump_table_length);
+ return;
+ }
+
+ /*
+ * From this point, the protocol flips. We make memory_read requests to
+ * the device, and the device responds with the raw data. If the device
+ * has an error, it will send an End of Image command. First we need to
+ * request the memory dump table so that we know where all the pieces
+ * of the dump are that we can consume.
+ */
+
+ context->is_mem_dump_mode = true;
+
+ /*
+ * Assume that the table is smaller than our MTU so that we can read it
+ * in one shot. The spec does not put an upper limit on the table, but
+ * no known device will exceed this.
+ */
+ if (context->dump_table_length > SAHARA_PACKET_MAX_SIZE) {
+ dev_err(&context->mhi_dev->dev, "Memory dump table length %lld exceeds supported size. Discarding dump\n",
+ context->dump_table_length);
+ sahara_send_reset(context);
+ return;
+ }
+
+ context->tx[0]->cmd = cpu_to_le32(SAHARA_MEM_READ64_CMD);
+ context->tx[0]->length = cpu_to_le32(SAHARA_MEM_READ64_LENGTH);
+ context->tx[0]->memory_read64.memory_address = cpu_to_le64(context->dump_table_address);
+ context->tx[0]->memory_read64.memory_length = cpu_to_le64(context->dump_table_length);
+
+ context->rx_size_requested = context->dump_table_length;
+
+ ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE, context->tx[0],
+ SAHARA_MEM_READ64_LENGTH, MHI_EOT);
+ if (ret)
+ dev_err(&context->mhi_dev->dev, "Unable to send read for dump table %d\n", ret);
+}
+
static void sahara_processing(struct work_struct *work)
{
- struct sahara_context *context = container_of(work, struct sahara_context, work);
+ struct sahara_context *context = container_of(work, struct sahara_context, fw_work);
int ret;
switch (le32_to_cpu(context->rx->cmd)) {
@@ -338,6 +513,12 @@ static void sahara_processing(struct work_struct *work)
case SAHARA_DONE_RESP_CMD:
/* Intentional do nothing as we don't need to exit an app */
break;
+ case SAHARA_RESET_RESP_CMD:
+ /* Intentional do nothing as we don't need to exit an app */
+ break;
+ case SAHARA_MEM_DEBUG64_CMD:
+ sahara_memory_debug64(context);
+ break;
default:
dev_err(&context->mhi_dev->dev, "Unknown command %d\n",
le32_to_cpu(context->rx->cmd));
@@ -350,6 +531,217 @@ static void sahara_processing(struct work_struct *work)
dev_err(&context->mhi_dev->dev, "Unable to requeue rx buf %d\n", ret);
}
+static void sahara_parse_dump_table(struct sahara_context *context)
+{
+ struct sahara_dump_table_entry *image_out_table;
+ struct sahara_debug_table_entry64 *dev_table;
+ struct sahara_memory_dump_meta_v1 *dump_meta;
+ u64 table_nents;
+ u64 dump_length;
+ int ret;
+ u64 i;
+
+ table_nents = context->dump_table_length / sizeof(*dev_table);
+ context->dump_images_left = table_nents;
+ dump_length = 0;
+
+ dev_table = (struct sahara_debug_table_entry64 *)(context->rx);
+ for (i = 0; i < table_nents; ++i) {
+ /* Do not trust the device, ensure the strings are terminated */
+ dev_table[i].description[SAHARA_TABLE_ENTRY_STR_LEN - 1] = 0;
+ dev_table[i].filename[SAHARA_TABLE_ENTRY_STR_LEN - 1] = 0;
+
+ dump_length = size_add(dump_length, le64_to_cpu(dev_table[i].length));
+ if (dump_length == SIZE_MAX) {
+ /* Discard the dump */
+ sahara_send_reset(context);
+ return;
+ }
+
+ dev_dbg(&context->mhi_dev->dev,
+ "Memory dump table entry %lld type: %lld address: %#llx length: %#llx description: \"%s\" filename \"%s\"\n",
+ i,
+ le64_to_cpu(dev_table[i].type),
+ le64_to_cpu(dev_table[i].address),
+ le64_to_cpu(dev_table[i].length),
+ dev_table[i].description,
+ dev_table[i].filename);
+ }
+
+ dump_length = size_add(dump_length, sizeof(*dump_meta));
+ if (dump_length == SIZE_MAX) {
+ /* Discard the dump */
+ sahara_send_reset(context);
+ return;
+ }
+ dump_length = size_add(dump_length, size_mul(sizeof(*image_out_table), table_nents));
+ if (dump_length == SIZE_MAX) {
+ /* Discard the dump */
+ sahara_send_reset(context);
+ return;
+ }
+
+ context->mem_dump_sz = dump_length;
+ context->mem_dump = vzalloc(dump_length);
+ if (!context->mem_dump) {
+ /* Discard the dump */
+ sahara_send_reset(context);
+ return;
+ }
+
+ /* Populate the dump metadata and table for userspace */
+ dump_meta = context->mem_dump;
+ dump_meta->magic = SAHARA_DUMP_V1_MAGIC;
+ dump_meta->version = SAHARA_DUMP_V1_VER;
+ dump_meta->dump_size = dump_length;
+ dump_meta->table_size = context->dump_table_length;
+
+ image_out_table = context->mem_dump + sizeof(*dump_meta);
+ for (i = 0; i < table_nents; ++i) {
+ image_out_table[i].type = le64_to_cpu(dev_table[i].type);
+ image_out_table[i].address = le64_to_cpu(dev_table[i].address);
+ image_out_table[i].length = le64_to_cpu(dev_table[i].length);
+ strscpy(image_out_table[i].description, dev_table[i].description,
+ SAHARA_TABLE_ENTRY_STR_LEN);
+ strscpy(image_out_table[i].filename,
+ dev_table[i].filename,
+ SAHARA_TABLE_ENTRY_STR_LEN);
+ }
+
+ context->mem_dump_freespace = &image_out_table[i];
+
+ /* Done parsing the table, switch to image dump mode */
+ context->dump_table_length = 0;
+
+ /* Request the first chunk of the first image */
+ context->dump_image = &image_out_table[0];
+ dump_length = min(context->dump_image->length, SAHARA_READ_MAX_SIZE);
+ /* Avoid requesting EOI sized data so that we can identify errors */
+ if (dump_length == SAHARA_END_OF_IMAGE_LENGTH)
+ dump_length = SAHARA_END_OF_IMAGE_LENGTH / 2;
+
+ context->dump_image_offset = dump_length;
+
+ context->tx[0]->cmd = cpu_to_le32(SAHARA_MEM_READ64_CMD);
+ context->tx[0]->length = cpu_to_le32(SAHARA_MEM_READ64_LENGTH);
+ context->tx[0]->memory_read64.memory_address = cpu_to_le64(context->dump_image->address);
+ context->tx[0]->memory_read64.memory_length = cpu_to_le64(dump_length);
+
+ context->rx_size_requested = dump_length;
+
+ ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE, context->tx[0],
+ SAHARA_MEM_READ64_LENGTH, MHI_EOT);
+ if (ret)
+ dev_err(&context->mhi_dev->dev, "Unable to send read for dump content %d\n", ret);
+}
+
+static void sahara_parse_dump_image(struct sahara_context *context)
+{
+ u64 dump_length;
+ int ret;
+
+ memcpy(context->mem_dump_freespace, context->rx, context->rx_size);
+ context->mem_dump_freespace += context->rx_size;
+
+ if (context->dump_image_offset >= context->dump_image->length) {
+ /* Need to move to next image */
+ context->dump_image++;
+ context->dump_images_left--;
+ context->dump_image_offset = 0;
+
+ if (!context->dump_images_left) {
+ /* Dump done */
+ dev_coredumpv(context->mhi_dev->mhi_cntrl->cntrl_dev,
+ context->mem_dump,
+ context->mem_dump_sz,
+ GFP_KERNEL);
+ context->mem_dump = NULL;
+ sahara_send_reset(context);
+ return;
+ }
+ }
+
+ /* Get next image chunk */
+ dump_length = context->dump_image->length - context->dump_image_offset;
+ dump_length = min(dump_length, SAHARA_READ_MAX_SIZE);
+ /* Avoid requesting EOI sized data so that we can identify errors */
+ if (dump_length == SAHARA_END_OF_IMAGE_LENGTH)
+ dump_length = SAHARA_END_OF_IMAGE_LENGTH / 2;
+
+ context->tx[0]->cmd = cpu_to_le32(SAHARA_MEM_READ64_CMD);
+ context->tx[0]->length = cpu_to_le32(SAHARA_MEM_READ64_LENGTH);
+ context->tx[0]->memory_read64.memory_address =
+ cpu_to_le64(context->dump_image->address + context->dump_image_offset);
+ context->tx[0]->memory_read64.memory_length = cpu_to_le64(dump_length);
+
+ context->dump_image_offset += dump_length;
+ context->rx_size_requested = dump_length;
+
+ ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE, context->tx[0],
+ SAHARA_MEM_READ64_LENGTH, MHI_EOT);
+ if (ret)
+ dev_err(&context->mhi_dev->dev,
+ "Unable to send read for dump content %d\n", ret);
+}
+
+static void sahara_dump_processing(struct work_struct *work)
+{
+ struct sahara_context *context = container_of(work, struct sahara_context, dump_work);
+ int ret;
+
+ /*
+ * We should get the expected raw data, but if the device has an error
+ * it is supposed to send EOI with an error code.
+ */
+ if (context->rx_size != context->rx_size_requested &&
+ context->rx_size != SAHARA_END_OF_IMAGE_LENGTH) {
+ dev_err(&context->mhi_dev->dev,
+ "Unexpected response to read_data. Expected size: %#zx got: %#zx\n",
+ context->rx_size_requested,
+ context->rx_size);
+ goto error;
+ }
+
+ if (context->rx_size == SAHARA_END_OF_IMAGE_LENGTH &&
+ le32_to_cpu(context->rx->cmd) == SAHARA_END_OF_IMAGE_CMD) {
+ dev_err(&context->mhi_dev->dev,
+ "Unexpected EOI response to read_data. Status: %d\n",
+ le32_to_cpu(context->rx->end_of_image.status));
+ goto error;
+ }
+
+ if (context->rx_size == SAHARA_END_OF_IMAGE_LENGTH &&
+ le32_to_cpu(context->rx->cmd) != SAHARA_END_OF_IMAGE_CMD) {
+ dev_err(&context->mhi_dev->dev,
+ "Invalid EOI response to read_data. CMD: %d\n",
+ le32_to_cpu(context->rx->cmd));
+ goto error;
+ }
+
+ /*
+ * Need to know if we received the dump table, or part of a dump image.
+ * Since we get raw data, we cannot tell from the data itself. Instead,
+ * we use the stored dump_table_length, which we zero after we read and
+ * process the entire table.
+ */
+ if (context->dump_table_length)
+ sahara_parse_dump_table(context);
+ else
+ sahara_parse_dump_image(context);
+
+ ret = mhi_queue_buf(context->mhi_dev, DMA_FROM_DEVICE, context->rx,
+ SAHARA_PACKET_MAX_SIZE, MHI_EOT);
+ if (ret)
+ dev_err(&context->mhi_dev->dev, "Unable to requeue rx buf %d\n", ret);
+
+ return;
+
+error:
+ vfree(context->mem_dump);
+ context->mem_dump = NULL;
+ sahara_send_reset(context);
+}
+
static int sahara_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id)
{
struct sahara_context *context;
@@ -382,9 +774,17 @@ static int sahara_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_
}
context->mhi_dev = mhi_dev;
- INIT_WORK(&context->work, sahara_processing);
- context->image_table = aic100_image_table;
- context->table_size = ARRAY_SIZE(aic100_image_table);
+ INIT_WORK(&context->fw_work, sahara_processing);
+ INIT_WORK(&context->dump_work, sahara_dump_processing);
+
+ if (!strcmp(mhi_dev->mhi_cntrl->name, "AIC200")) {
+ context->image_table = aic200_image_table;
+ context->table_size = ARRAY_SIZE(aic200_image_table);
+ } else {
+ context->image_table = aic100_image_table;
+ context->table_size = ARRAY_SIZE(aic100_image_table);
+ }
+
context->active_image_id = SAHARA_IMAGE_ID_NONE;
dev_set_drvdata(&mhi_dev->dev, context);
@@ -405,7 +805,9 @@ static void sahara_mhi_remove(struct mhi_device *mhi_dev)
{
struct sahara_context *context = dev_get_drvdata(&mhi_dev->dev);
- cancel_work_sync(&context->work);
+ cancel_work_sync(&context->fw_work);
+ cancel_work_sync(&context->dump_work);
+ vfree(context->mem_dump);
sahara_release_image(context);
mhi_unprepare_from_transfer(mhi_dev);
}
@@ -418,8 +820,14 @@ static void sahara_mhi_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result
{
struct sahara_context *context = dev_get_drvdata(&mhi_dev->dev);
- if (!mhi_result->transaction_status)
- schedule_work(&context->work);
+ if (!mhi_result->transaction_status) {
+ context->rx_size = mhi_result->bytes_xferd;
+ if (context->is_mem_dump_mode)
+ schedule_work(&context->dump_work);
+ else
+ schedule_work(&context->fw_work);
+ }
+
}
static const struct mhi_device_id sahara_mhi_match_table[] = {
diff --git a/drivers/accel/rocket/Kconfig b/drivers/accel/rocket/Kconfig
new file mode 100644
index 000000000000..16465abe0660
--- /dev/null
+++ b/drivers/accel/rocket/Kconfig
@@ -0,0 +1,24 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config DRM_ACCEL_ROCKET
+ tristate "Rocket (support for Rockchip NPUs)"
+ depends on DRM_ACCEL
+ depends on (ARCH_ROCKCHIP && ARM64) || COMPILE_TEST
+ depends on ROCKCHIP_IOMMU || COMPILE_TEST
+ depends on MMU
+ select DRM_SCHED
+ select DRM_GEM_SHMEM_HELPER
+ help
+ Choose this option if you have a Rockchip SoC that contains a
+ compatible Neural Processing Unit (NPU), such as the RK3588. Called by
+ Rockchip either RKNN or RKNPU, it accelerates inference of neural
+ networks.
+
+ The interface exposed to userspace is described in
+ include/uapi/drm/rocket_accel.h and is used by the Rocket userspace
+ driver in Mesa3D.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called rocket.
diff --git a/drivers/accel/rocket/Makefile b/drivers/accel/rocket/Makefile
new file mode 100644
index 000000000000..3713dfe223d6
--- /dev/null
+++ b/drivers/accel/rocket/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_DRM_ACCEL_ROCKET) := rocket.o
+
+rocket-y := \
+ rocket_core.o \
+ rocket_device.o \
+ rocket_drv.o \
+ rocket_gem.o \
+ rocket_job.o
diff --git a/drivers/accel/rocket/rocket_core.c b/drivers/accel/rocket/rocket_core.c
new file mode 100644
index 000000000000..abe7719c1db4
--- /dev/null
+++ b/drivers/accel/rocket/rocket_core.c
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dev_printk.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/iommu.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+#include "rocket_core.h"
+#include "rocket_job.h"
+
+int rocket_core_init(struct rocket_core *core)
+{
+ struct device *dev = core->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ u32 version;
+ int err = 0;
+
+ core->resets[0].id = "srst_a";
+ core->resets[1].id = "srst_h";
+ err = devm_reset_control_bulk_get_exclusive(&pdev->dev, ARRAY_SIZE(core->resets),
+ core->resets);
+ if (err)
+ return dev_err_probe(dev, err, "failed to get resets for core %d\n", core->index);
+
+ err = devm_clk_bulk_get(dev, ARRAY_SIZE(core->clks), core->clks);
+ if (err)
+ return dev_err_probe(dev, err, "failed to get clocks for core %d\n", core->index);
+
+ core->pc_iomem = devm_platform_ioremap_resource_byname(pdev, "pc");
+ if (IS_ERR(core->pc_iomem)) {
+ dev_err(dev, "couldn't find PC registers %ld\n", PTR_ERR(core->pc_iomem));
+ return PTR_ERR(core->pc_iomem);
+ }
+
+ core->cna_iomem = devm_platform_ioremap_resource_byname(pdev, "cna");
+ if (IS_ERR(core->cna_iomem)) {
+ dev_err(dev, "couldn't find CNA registers %ld\n", PTR_ERR(core->cna_iomem));
+ return PTR_ERR(core->cna_iomem);
+ }
+
+ core->core_iomem = devm_platform_ioremap_resource_byname(pdev, "core");
+ if (IS_ERR(core->core_iomem)) {
+ dev_err(dev, "couldn't find CORE registers %ld\n", PTR_ERR(core->core_iomem));
+ return PTR_ERR(core->core_iomem);
+ }
+
+ dma_set_max_seg_size(dev, UINT_MAX);
+
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
+ if (err)
+ return err;
+
+ core->iommu_group = iommu_group_get(dev);
+
+ err = rocket_job_init(core);
+ if (err)
+ return err;
+
+ pm_runtime_use_autosuspend(dev);
+
+ /*
+ * As this NPU will be most often used as part of a media pipeline that
+ * ends presenting in a display, choose 50 ms (~3 frames at 60Hz) as an
+ * autosuspend delay as that will keep the device powered up while the
+ * pipeline is running.
+ */
+ pm_runtime_set_autosuspend_delay(dev, 50);
+
+ pm_runtime_enable(dev);
+
+ err = pm_runtime_resume_and_get(dev);
+ if (err) {
+ rocket_job_fini(core);
+ return err;
+ }
+
+ version = rocket_pc_readl(core, VERSION);
+ version += rocket_pc_readl(core, VERSION_NUM) & 0xffff;
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ dev_info(dev, "Rockchip NPU core %d version: %d\n", core->index, version);
+
+ return 0;
+}
+
+void rocket_core_fini(struct rocket_core *core)
+{
+ pm_runtime_dont_use_autosuspend(core->dev);
+ pm_runtime_disable(core->dev);
+ iommu_group_put(core->iommu_group);
+ core->iommu_group = NULL;
+ rocket_job_fini(core);
+}
+
+void rocket_core_reset(struct rocket_core *core)
+{
+ reset_control_bulk_assert(ARRAY_SIZE(core->resets), core->resets);
+
+ udelay(10);
+
+ reset_control_bulk_deassert(ARRAY_SIZE(core->resets), core->resets);
+}
diff --git a/drivers/accel/rocket/rocket_core.h b/drivers/accel/rocket/rocket_core.h
new file mode 100644
index 000000000000..f6d7382854ca
--- /dev/null
+++ b/drivers/accel/rocket/rocket_core.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+
+#ifndef __ROCKET_CORE_H__
+#define __ROCKET_CORE_H__
+
+#include <drm/gpu_scheduler.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/mutex_types.h>
+#include <linux/reset.h>
+
+#include "rocket_registers.h"
+
+#define rocket_pc_readl(core, reg) \
+ readl((core)->pc_iomem + (REG_PC_##reg))
+#define rocket_pc_writel(core, reg, value) \
+ writel(value, (core)->pc_iomem + (REG_PC_##reg))
+
+#define rocket_cna_readl(core, reg) \
+ readl((core)->cna_iomem + (REG_CNA_##reg) - REG_CNA_S_STATUS)
+#define rocket_cna_writel(core, reg, value) \
+ writel(value, (core)->cna_iomem + (REG_CNA_##reg) - REG_CNA_S_STATUS)
+
+#define rocket_core_readl(core, reg) \
+ readl((core)->core_iomem + (REG_CORE_##reg) - REG_CORE_S_STATUS)
+#define rocket_core_writel(core, reg, value) \
+ writel(value, (core)->core_iomem + (REG_CORE_##reg) - REG_CORE_S_STATUS)
+
+struct rocket_core {
+ struct device *dev;
+ struct rocket_device *rdev;
+ unsigned int index;
+
+ int irq;
+ void __iomem *pc_iomem;
+ void __iomem *cna_iomem;
+ void __iomem *core_iomem;
+ struct clk_bulk_data clks[4];
+ struct reset_control_bulk_data resets[2];
+
+ struct iommu_group *iommu_group;
+
+ struct mutex job_lock;
+ struct rocket_job *in_flight_job;
+
+ spinlock_t fence_lock;
+
+ struct {
+ struct workqueue_struct *wq;
+ struct work_struct work;
+ atomic_t pending;
+ } reset;
+
+ struct drm_gpu_scheduler sched;
+ u64 fence_context;
+ u64 emit_seqno;
+};
+
+int rocket_core_init(struct rocket_core *core);
+void rocket_core_fini(struct rocket_core *core);
+void rocket_core_reset(struct rocket_core *core);
+
+#endif
diff --git a/drivers/accel/rocket/rocket_device.c b/drivers/accel/rocket/rocket_device.c
new file mode 100644
index 000000000000..46e6ee1e72c5
--- /dev/null
+++ b/drivers/accel/rocket/rocket_device.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+
+#include <drm/drm_drv.h>
+#include <linux/array_size.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+
+#include "rocket_device.h"
+
+struct rocket_device *rocket_device_init(struct platform_device *pdev,
+ const struct drm_driver *rocket_drm_driver)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *core_node;
+ struct rocket_device *rdev;
+ struct drm_device *ddev;
+ unsigned int num_cores = 0;
+ int err;
+
+ rdev = devm_drm_dev_alloc(dev, rocket_drm_driver, struct rocket_device, ddev);
+ if (IS_ERR(rdev))
+ return rdev;
+
+ ddev = &rdev->ddev;
+ dev_set_drvdata(dev, rdev);
+
+ for_each_compatible_node(core_node, NULL, "rockchip,rk3588-rknn-core")
+ if (of_device_is_available(core_node))
+ num_cores++;
+
+ rdev->cores = devm_kcalloc(dev, num_cores, sizeof(*rdev->cores), GFP_KERNEL);
+ if (!rdev->cores)
+ return ERR_PTR(-ENOMEM);
+
+ dma_set_max_seg_size(dev, UINT_MAX);
+
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
+ if (err)
+ return ERR_PTR(err);
+
+ err = devm_mutex_init(dev, &rdev->sched_lock);
+ if (err)
+ return ERR_PTR(-ENOMEM);
+
+ err = drm_dev_register(ddev, 0);
+ if (err)
+ return ERR_PTR(err);
+
+ return rdev;
+}
+
+void rocket_device_fini(struct rocket_device *rdev)
+{
+ WARN_ON(rdev->num_cores > 0);
+
+ drm_dev_unregister(&rdev->ddev);
+}
diff --git a/drivers/accel/rocket/rocket_device.h b/drivers/accel/rocket/rocket_device.h
new file mode 100644
index 000000000000..ce662abc01d3
--- /dev/null
+++ b/drivers/accel/rocket/rocket_device.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+
+#ifndef __ROCKET_DEVICE_H__
+#define __ROCKET_DEVICE_H__
+
+#include <drm/drm_device.h>
+#include <linux/clk.h>
+#include <linux/container_of.h>
+#include <linux/iommu.h>
+#include <linux/platform_device.h>
+
+#include "rocket_core.h"
+
+struct rocket_device {
+ struct drm_device ddev;
+
+ struct mutex sched_lock;
+
+ struct rocket_core *cores;
+ unsigned int num_cores;
+};
+
+struct rocket_device *rocket_device_init(struct platform_device *pdev,
+ const struct drm_driver *rocket_drm_driver);
+void rocket_device_fini(struct rocket_device *rdev);
+#define to_rocket_device(drm_dev) \
+ ((struct rocket_device *)(container_of((drm_dev), struct rocket_device, ddev)))
+
+#endif /* __ROCKET_DEVICE_H__ */
diff --git a/drivers/accel/rocket/rocket_drv.c b/drivers/accel/rocket/rocket_drv.c
new file mode 100644
index 000000000000..5c0b63f0a8f0
--- /dev/null
+++ b/drivers/accel/rocket/rocket_drv.c
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+
+#include <drm/drm_accel.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_ioctl.h>
+#include <drm/rocket_accel.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/iommu.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include "rocket_drv.h"
+#include "rocket_gem.h"
+#include "rocket_job.h"
+
+/*
+ * Facade device, used to expose a single DRM device to userspace, that
+ * schedules jobs to any RKNN cores in the system.
+ */
+static struct platform_device *drm_dev;
+static struct rocket_device *rdev;
+
+static void
+rocket_iommu_domain_destroy(struct kref *kref)
+{
+ struct rocket_iommu_domain *domain = container_of(kref, struct rocket_iommu_domain, kref);
+
+ iommu_domain_free(domain->domain);
+ domain->domain = NULL;
+ kfree(domain);
+}
+
+static struct rocket_iommu_domain*
+rocket_iommu_domain_create(struct device *dev)
+{
+ struct rocket_iommu_domain *domain = kmalloc(sizeof(*domain), GFP_KERNEL);
+ void *err;
+
+ if (!domain)
+ return ERR_PTR(-ENOMEM);
+
+ domain->domain = iommu_paging_domain_alloc(dev);
+ if (IS_ERR(domain->domain)) {
+ err = ERR_CAST(domain->domain);
+ kfree(domain);
+ return err;
+ }
+ kref_init(&domain->kref);
+
+ return domain;
+}
+
+struct rocket_iommu_domain *
+rocket_iommu_domain_get(struct rocket_file_priv *rocket_priv)
+{
+ kref_get(&rocket_priv->domain->kref);
+ return rocket_priv->domain;
+}
+
+void
+rocket_iommu_domain_put(struct rocket_iommu_domain *domain)
+{
+ kref_put(&domain->kref, rocket_iommu_domain_destroy);
+}
+
+static int
+rocket_open(struct drm_device *dev, struct drm_file *file)
+{
+ struct rocket_device *rdev = to_rocket_device(dev);
+ struct rocket_file_priv *rocket_priv;
+ u64 start, end;
+ int ret;
+
+ if (!try_module_get(THIS_MODULE))
+ return -EINVAL;
+
+ rocket_priv = kzalloc(sizeof(*rocket_priv), GFP_KERNEL);
+ if (!rocket_priv) {
+ ret = -ENOMEM;
+ goto err_put_mod;
+ }
+
+ rocket_priv->rdev = rdev;
+ rocket_priv->domain = rocket_iommu_domain_create(rdev->cores[0].dev);
+ if (IS_ERR(rocket_priv->domain)) {
+ ret = PTR_ERR(rocket_priv->domain);
+ goto err_free;
+ }
+
+ file->driver_priv = rocket_priv;
+
+ start = rocket_priv->domain->domain->geometry.aperture_start;
+ end = rocket_priv->domain->domain->geometry.aperture_end;
+ drm_mm_init(&rocket_priv->mm, start, end - start + 1);
+ mutex_init(&rocket_priv->mm_lock);
+
+ ret = rocket_job_open(rocket_priv);
+ if (ret)
+ goto err_mm_takedown;
+
+ return 0;
+
+err_mm_takedown:
+ mutex_destroy(&rocket_priv->mm_lock);
+ drm_mm_takedown(&rocket_priv->mm);
+ rocket_iommu_domain_put(rocket_priv->domain);
+err_free:
+ kfree(rocket_priv);
+err_put_mod:
+ module_put(THIS_MODULE);
+ return ret;
+}
+
+static void
+rocket_postclose(struct drm_device *dev, struct drm_file *file)
+{
+ struct rocket_file_priv *rocket_priv = file->driver_priv;
+
+ rocket_job_close(rocket_priv);
+ mutex_destroy(&rocket_priv->mm_lock);
+ drm_mm_takedown(&rocket_priv->mm);
+ rocket_iommu_domain_put(rocket_priv->domain);
+ kfree(rocket_priv);
+ module_put(THIS_MODULE);
+}
+
+static const struct drm_ioctl_desc rocket_drm_driver_ioctls[] = {
+#define ROCKET_IOCTL(n, func) \
+ DRM_IOCTL_DEF_DRV(ROCKET_##n, rocket_ioctl_##func, 0)
+
+ ROCKET_IOCTL(CREATE_BO, create_bo),
+ ROCKET_IOCTL(SUBMIT, submit),
+ ROCKET_IOCTL(PREP_BO, prep_bo),
+ ROCKET_IOCTL(FINI_BO, fini_bo),
+};
+
+DEFINE_DRM_ACCEL_FOPS(rocket_accel_driver_fops);
+
+/*
+ * Rocket driver version:
+ * - 1.0 - initial interface
+ */
+static const struct drm_driver rocket_drm_driver = {
+ .driver_features = DRIVER_COMPUTE_ACCEL | DRIVER_GEM,
+ .open = rocket_open,
+ .postclose = rocket_postclose,
+ .gem_create_object = rocket_gem_create_object,
+ .ioctls = rocket_drm_driver_ioctls,
+ .num_ioctls = ARRAY_SIZE(rocket_drm_driver_ioctls),
+ .fops = &rocket_accel_driver_fops,
+ .name = "rocket",
+ .desc = "rocket DRM",
+};
+
+static int rocket_probe(struct platform_device *pdev)
+{
+ if (rdev == NULL) {
+ /* First core probing, initialize DRM device. */
+ rdev = rocket_device_init(drm_dev, &rocket_drm_driver);
+ if (IS_ERR(rdev)) {
+ dev_err(&pdev->dev, "failed to initialize rocket device\n");
+ return PTR_ERR(rdev);
+ }
+ }
+
+ unsigned int core = rdev->num_cores;
+
+ dev_set_drvdata(&pdev->dev, rdev);
+
+ rdev->cores[core].rdev = rdev;
+ rdev->cores[core].dev = &pdev->dev;
+ rdev->cores[core].index = core;
+
+ rdev->num_cores++;
+
+ return rocket_core_init(&rdev->cores[core]);
+}
+
+static void rocket_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ for (unsigned int core = 0; core < rdev->num_cores; core++) {
+ if (rdev->cores[core].dev == dev) {
+ rocket_core_fini(&rdev->cores[core]);
+ rdev->num_cores--;
+ break;
+ }
+ }
+
+ if (rdev->num_cores == 0) {
+ /* Last core removed, deinitialize DRM device. */
+ rocket_device_fini(rdev);
+ rdev = NULL;
+ }
+}
+
+static const struct of_device_id dt_match[] = {
+ { .compatible = "rockchip,rk3588-rknn-core" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dt_match);
+
+static int find_core_for_dev(struct device *dev)
+{
+ struct rocket_device *rdev = dev_get_drvdata(dev);
+
+ for (unsigned int core = 0; core < rdev->num_cores; core++) {
+ if (dev == rdev->cores[core].dev)
+ return core;
+ }
+
+ return -1;
+}
+
+static int rocket_device_runtime_resume(struct device *dev)
+{
+ struct rocket_device *rdev = dev_get_drvdata(dev);
+ int core = find_core_for_dev(dev);
+ int err = 0;
+
+ if (core < 0)
+ return -ENODEV;
+
+ err = clk_bulk_prepare_enable(ARRAY_SIZE(rdev->cores[core].clks), rdev->cores[core].clks);
+ if (err) {
+ dev_err(dev, "failed to enable (%d) clocks for core %d\n", err, core);
+ return err;
+ }
+
+ return 0;
+}
+
+static int rocket_device_runtime_suspend(struct device *dev)
+{
+ struct rocket_device *rdev = dev_get_drvdata(dev);
+ int core = find_core_for_dev(dev);
+
+ if (core < 0)
+ return -ENODEV;
+
+ if (!rocket_job_is_idle(&rdev->cores[core]))
+ return -EBUSY;
+
+ clk_bulk_disable_unprepare(ARRAY_SIZE(rdev->cores[core].clks), rdev->cores[core].clks);
+
+ return 0;
+}
+
+EXPORT_GPL_DEV_PM_OPS(rocket_pm_ops) = {
+ RUNTIME_PM_OPS(rocket_device_runtime_suspend, rocket_device_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+};
+
+static struct platform_driver rocket_driver = {
+ .probe = rocket_probe,
+ .remove = rocket_remove,
+ .driver = {
+ .name = "rocket",
+ .pm = pm_ptr(&rocket_pm_ops),
+ .of_match_table = dt_match,
+ },
+};
+
+static int __init rocket_register(void)
+{
+ drm_dev = platform_device_register_simple("rknn", -1, NULL, 0);
+ if (IS_ERR(drm_dev))
+ return PTR_ERR(drm_dev);
+
+ return platform_driver_register(&rocket_driver);
+}
+
+static void __exit rocket_unregister(void)
+{
+ platform_driver_unregister(&rocket_driver);
+
+ platform_device_unregister(drm_dev);
+}
+
+module_init(rocket_register);
+module_exit(rocket_unregister);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("DRM driver for the Rockchip NPU IP");
+MODULE_AUTHOR("Tomeu Vizoso");
diff --git a/drivers/accel/rocket/rocket_drv.h b/drivers/accel/rocket/rocket_drv.h
new file mode 100644
index 000000000000..2c673bb99ccc
--- /dev/null
+++ b/drivers/accel/rocket/rocket_drv.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+
+#ifndef __ROCKET_DRV_H__
+#define __ROCKET_DRV_H__
+
+#include <drm/drm_mm.h>
+#include <drm/gpu_scheduler.h>
+
+#include "rocket_device.h"
+
+extern const struct dev_pm_ops rocket_pm_ops;
+
+struct rocket_iommu_domain {
+ struct iommu_domain *domain;
+ struct kref kref;
+};
+
+struct rocket_file_priv {
+ struct rocket_device *rdev;
+
+ struct rocket_iommu_domain *domain;
+ struct drm_mm mm;
+ struct mutex mm_lock;
+
+ struct drm_sched_entity sched_entity;
+};
+
+struct rocket_iommu_domain *rocket_iommu_domain_get(struct rocket_file_priv *rocket_priv);
+void rocket_iommu_domain_put(struct rocket_iommu_domain *domain);
+
+#endif
diff --git a/drivers/accel/rocket/rocket_gem.c b/drivers/accel/rocket/rocket_gem.c
new file mode 100644
index 000000000000..0551e11cc184
--- /dev/null
+++ b/drivers/accel/rocket/rocket_gem.c
@@ -0,0 +1,181 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+
+#include <drm/drm_device.h>
+#include <drm/drm_utils.h>
+#include <drm/rocket_accel.h>
+#include <linux/dma-mapping.h>
+#include <linux/iommu.h>
+
+#include "rocket_drv.h"
+#include "rocket_gem.h"
+
+static void rocket_gem_bo_free(struct drm_gem_object *obj)
+{
+ struct rocket_gem_object *bo = to_rocket_bo(obj);
+ struct rocket_file_priv *rocket_priv = bo->driver_priv;
+ size_t unmapped;
+
+ drm_WARN_ON(obj->dev, refcount_read(&bo->base.pages_use_count) > 1);
+
+ unmapped = iommu_unmap(bo->domain->domain, bo->mm.start, bo->size);
+ drm_WARN_ON(obj->dev, unmapped != bo->size);
+
+ mutex_lock(&rocket_priv->mm_lock);
+ drm_mm_remove_node(&bo->mm);
+ mutex_unlock(&rocket_priv->mm_lock);
+
+ rocket_iommu_domain_put(bo->domain);
+ bo->domain = NULL;
+
+ drm_gem_shmem_free(&bo->base);
+}
+
+static const struct drm_gem_object_funcs rocket_gem_funcs = {
+ .free = rocket_gem_bo_free,
+ .print_info = drm_gem_shmem_object_print_info,
+ .pin = drm_gem_shmem_object_pin,
+ .unpin = drm_gem_shmem_object_unpin,
+ .get_sg_table = drm_gem_shmem_object_get_sg_table,
+ .vmap = drm_gem_shmem_object_vmap,
+ .vunmap = drm_gem_shmem_object_vunmap,
+ .mmap = drm_gem_shmem_object_mmap,
+ .vm_ops = &drm_gem_shmem_vm_ops,
+};
+
+struct drm_gem_object *rocket_gem_create_object(struct drm_device *dev, size_t size)
+{
+ struct rocket_gem_object *obj;
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
+
+ obj->base.base.funcs = &rocket_gem_funcs;
+
+ return &obj->base.base;
+}
+
+int rocket_ioctl_create_bo(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct rocket_file_priv *rocket_priv = file->driver_priv;
+ struct drm_rocket_create_bo *args = data;
+ struct drm_gem_shmem_object *shmem_obj;
+ struct rocket_gem_object *rkt_obj;
+ struct drm_gem_object *gem_obj;
+ struct sg_table *sgt;
+ int ret;
+
+ shmem_obj = drm_gem_shmem_create(dev, args->size);
+ if (IS_ERR(shmem_obj))
+ return PTR_ERR(shmem_obj);
+
+ gem_obj = &shmem_obj->base;
+ rkt_obj = to_rocket_bo(gem_obj);
+
+ rkt_obj->driver_priv = rocket_priv;
+ rkt_obj->domain = rocket_iommu_domain_get(rocket_priv);
+ rkt_obj->size = args->size;
+ rkt_obj->offset = 0;
+
+ ret = drm_gem_handle_create(file, gem_obj, &args->handle);
+ drm_gem_object_put(gem_obj);
+ if (ret)
+ goto err;
+
+ sgt = drm_gem_shmem_get_pages_sgt(shmem_obj);
+ if (IS_ERR(sgt)) {
+ ret = PTR_ERR(sgt);
+ goto err;
+ }
+
+ mutex_lock(&rocket_priv->mm_lock);
+ ret = drm_mm_insert_node_generic(&rocket_priv->mm, &rkt_obj->mm,
+ rkt_obj->size, PAGE_SIZE,
+ 0, 0);
+ mutex_unlock(&rocket_priv->mm_lock);
+
+ ret = iommu_map_sgtable(rocket_priv->domain->domain,
+ rkt_obj->mm.start,
+ shmem_obj->sgt,
+ IOMMU_READ | IOMMU_WRITE);
+ if (ret < 0 || ret < args->size) {
+ drm_err(dev, "failed to map buffer: size=%d request_size=%u\n",
+ ret, args->size);
+ ret = -ENOMEM;
+ goto err_remove_node;
+ }
+
+ /* iommu_map_sgtable might have aligned the size */
+ rkt_obj->size = ret;
+ args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
+ args->dma_address = rkt_obj->mm.start;
+
+ return 0;
+
+err_remove_node:
+ mutex_lock(&rocket_priv->mm_lock);
+ drm_mm_remove_node(&rkt_obj->mm);
+ mutex_unlock(&rocket_priv->mm_lock);
+
+err:
+ drm_gem_shmem_object_free(gem_obj);
+
+ return ret;
+}
+
+int rocket_ioctl_prep_bo(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct drm_rocket_prep_bo *args = data;
+ unsigned long timeout = drm_timeout_abs_to_jiffies(args->timeout_ns);
+ struct drm_gem_object *gem_obj;
+ struct drm_gem_shmem_object *shmem_obj;
+ long ret = 0;
+
+ if (args->reserved != 0) {
+ drm_dbg(dev, "Reserved field in drm_rocket_prep_bo struct should be 0.\n");
+ return -EINVAL;
+ }
+
+ gem_obj = drm_gem_object_lookup(file, args->handle);
+ if (!gem_obj)
+ return -ENOENT;
+
+ ret = dma_resv_wait_timeout(gem_obj->resv, DMA_RESV_USAGE_WRITE, true, timeout);
+ if (!ret)
+ ret = timeout ? -ETIMEDOUT : -EBUSY;
+
+ shmem_obj = &to_rocket_bo(gem_obj)->base;
+
+ dma_sync_sgtable_for_cpu(dev->dev, shmem_obj->sgt, DMA_BIDIRECTIONAL);
+
+ drm_gem_object_put(gem_obj);
+
+ return ret;
+}
+
+int rocket_ioctl_fini_bo(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct drm_rocket_fini_bo *args = data;
+ struct drm_gem_shmem_object *shmem_obj;
+ struct rocket_gem_object *rkt_obj;
+ struct drm_gem_object *gem_obj;
+
+ if (args->reserved != 0) {
+ drm_dbg(dev, "Reserved field in drm_rocket_fini_bo struct should be 0.\n");
+ return -EINVAL;
+ }
+
+ gem_obj = drm_gem_object_lookup(file, args->handle);
+ if (!gem_obj)
+ return -ENOENT;
+
+ rkt_obj = to_rocket_bo(gem_obj);
+ shmem_obj = &rkt_obj->base;
+
+ dma_sync_sgtable_for_device(dev->dev, shmem_obj->sgt, DMA_BIDIRECTIONAL);
+
+ drm_gem_object_put(gem_obj);
+
+ return 0;
+}
diff --git a/drivers/accel/rocket/rocket_gem.h b/drivers/accel/rocket/rocket_gem.h
new file mode 100644
index 000000000000..240430334509
--- /dev/null
+++ b/drivers/accel/rocket/rocket_gem.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+
+#ifndef __ROCKET_GEM_H__
+#define __ROCKET_GEM_H__
+
+#include <drm/drm_gem_shmem_helper.h>
+
+struct rocket_gem_object {
+ struct drm_gem_shmem_object base;
+
+ struct rocket_file_priv *driver_priv;
+
+ struct rocket_iommu_domain *domain;
+ struct drm_mm_node mm;
+ size_t size;
+ u32 offset;
+};
+
+struct drm_gem_object *rocket_gem_create_object(struct drm_device *dev, size_t size);
+
+int rocket_ioctl_create_bo(struct drm_device *dev, void *data, struct drm_file *file);
+
+int rocket_ioctl_prep_bo(struct drm_device *dev, void *data, struct drm_file *file);
+
+int rocket_ioctl_fini_bo(struct drm_device *dev, void *data, struct drm_file *file);
+
+static inline
+struct rocket_gem_object *to_rocket_bo(struct drm_gem_object *obj)
+{
+ return container_of(to_drm_gem_shmem_obj(obj), struct rocket_gem_object, base);
+}
+
+#endif
diff --git a/drivers/accel/rocket/rocket_job.c b/drivers/accel/rocket/rocket_job.c
new file mode 100644
index 000000000000..acd606160dc9
--- /dev/null
+++ b/drivers/accel/rocket/rocket_job.c
@@ -0,0 +1,637 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
+/* Copyright 2019 Collabora ltd. */
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+
+#include <drm/drm_print.h>
+#include <drm/drm_file.h>
+#include <drm/drm_gem.h>
+#include <drm/rocket_accel.h>
+#include <linux/interrupt.h>
+#include <linux/iommu.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include "rocket_core.h"
+#include "rocket_device.h"
+#include "rocket_drv.h"
+#include "rocket_job.h"
+#include "rocket_registers.h"
+
+#define JOB_TIMEOUT_MS 500
+
+static struct rocket_job *
+to_rocket_job(struct drm_sched_job *sched_job)
+{
+ return container_of(sched_job, struct rocket_job, base);
+}
+
+static const char *rocket_fence_get_driver_name(struct dma_fence *fence)
+{
+ return "rocket";
+}
+
+static const char *rocket_fence_get_timeline_name(struct dma_fence *fence)
+{
+ return "rockchip-npu";
+}
+
+static const struct dma_fence_ops rocket_fence_ops = {
+ .get_driver_name = rocket_fence_get_driver_name,
+ .get_timeline_name = rocket_fence_get_timeline_name,
+};
+
+static struct dma_fence *rocket_fence_create(struct rocket_core *core)
+{
+ struct dma_fence *fence;
+
+ fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+ if (!fence)
+ return ERR_PTR(-ENOMEM);
+
+ dma_fence_init(fence, &rocket_fence_ops, &core->fence_lock,
+ core->fence_context, ++core->emit_seqno);
+
+ return fence;
+}
+
+static int
+rocket_copy_tasks(struct drm_device *dev,
+ struct drm_file *file_priv,
+ struct drm_rocket_job *job,
+ struct rocket_job *rjob)
+{
+ int ret = 0;
+
+ if (job->task_struct_size < sizeof(struct drm_rocket_task))
+ return -EINVAL;
+
+ rjob->task_count = job->task_count;
+
+ if (!rjob->task_count)
+ return 0;
+
+ rjob->tasks = kvmalloc_array(job->task_count, sizeof(*rjob->tasks), GFP_KERNEL);
+ if (!rjob->tasks) {
+ drm_dbg(dev, "Failed to allocate task array\n");
+ return -ENOMEM;
+ }
+
+ for (int i = 0; i < rjob->task_count; i++) {
+ struct drm_rocket_task task = {0};
+
+ if (copy_from_user(&task,
+ u64_to_user_ptr(job->tasks) + i * job->task_struct_size,
+ sizeof(task))) {
+ drm_dbg(dev, "Failed to copy incoming tasks\n");
+ ret = -EFAULT;
+ goto fail;
+ }
+
+ if (task.regcmd_count == 0) {
+ drm_dbg(dev, "regcmd_count field in drm_rocket_task should be > 0.\n");
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ rjob->tasks[i].regcmd = task.regcmd;
+ rjob->tasks[i].regcmd_count = task.regcmd_count;
+ }
+
+ return 0;
+
+fail:
+ kvfree(rjob->tasks);
+ return ret;
+}
+
+static void rocket_job_hw_submit(struct rocket_core *core, struct rocket_job *job)
+{
+ struct rocket_task *task;
+ unsigned int extra_bit;
+
+ /* Don't queue the job if a reset is in progress */
+ if (atomic_read(&core->reset.pending))
+ return;
+
+ /* GO ! */
+
+ task = &job->tasks[job->next_task_idx];
+ job->next_task_idx++;
+
+ rocket_pc_writel(core, BASE_ADDRESS, 0x1);
+
+ /* From rknpu, in the TRM this bit is marked as reserved */
+ extra_bit = 0x10000000 * core->index;
+ rocket_cna_writel(core, S_POINTER, CNA_S_POINTER_POINTER_PP_EN(1) |
+ CNA_S_POINTER_EXECUTER_PP_EN(1) |
+ CNA_S_POINTER_POINTER_PP_MODE(1) |
+ extra_bit);
+
+ rocket_core_writel(core, S_POINTER, CORE_S_POINTER_POINTER_PP_EN(1) |
+ CORE_S_POINTER_EXECUTER_PP_EN(1) |
+ CORE_S_POINTER_POINTER_PP_MODE(1) |
+ extra_bit);
+
+ rocket_pc_writel(core, BASE_ADDRESS, task->regcmd);
+ rocket_pc_writel(core, REGISTER_AMOUNTS,
+ PC_REGISTER_AMOUNTS_PC_DATA_AMOUNT((task->regcmd_count + 1) / 2 - 1));
+
+ rocket_pc_writel(core, INTERRUPT_MASK, PC_INTERRUPT_MASK_DPU_0 | PC_INTERRUPT_MASK_DPU_1);
+ rocket_pc_writel(core, INTERRUPT_CLEAR, PC_INTERRUPT_CLEAR_DPU_0 | PC_INTERRUPT_CLEAR_DPU_1);
+
+ rocket_pc_writel(core, TASK_CON, PC_TASK_CON_RESERVED_0(1) |
+ PC_TASK_CON_TASK_COUNT_CLEAR(1) |
+ PC_TASK_CON_TASK_NUMBER(1) |
+ PC_TASK_CON_TASK_PP_EN(1));
+
+ rocket_pc_writel(core, TASK_DMA_BASE_ADDR, PC_TASK_DMA_BASE_ADDR_DMA_BASE_ADDR(0x0));
+
+ rocket_pc_writel(core, OPERATION_ENABLE, PC_OPERATION_ENABLE_OP_EN(1));
+
+ dev_dbg(core->dev, "Submitted regcmd at 0x%llx to core %d", task->regcmd, core->index);
+}
+
+static int rocket_acquire_object_fences(struct drm_gem_object **bos,
+ int bo_count,
+ struct drm_sched_job *job,
+ bool is_write)
+{
+ int i, ret;
+
+ for (i = 0; i < bo_count; i++) {
+ ret = dma_resv_reserve_fences(bos[i]->resv, 1);
+ if (ret)
+ return ret;
+
+ ret = drm_sched_job_add_implicit_dependencies(job, bos[i],
+ is_write);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void rocket_attach_object_fences(struct drm_gem_object **bos,
+ int bo_count,
+ struct dma_fence *fence)
+{
+ int i;
+
+ for (i = 0; i < bo_count; i++)
+ dma_resv_add_fence(bos[i]->resv, fence, DMA_RESV_USAGE_WRITE);
+}
+
+static int rocket_job_push(struct rocket_job *job)
+{
+ struct rocket_device *rdev = job->rdev;
+ struct drm_gem_object **bos;
+ struct ww_acquire_ctx acquire_ctx;
+ int ret = 0;
+
+ bos = kvmalloc_array(job->in_bo_count + job->out_bo_count, sizeof(void *),
+ GFP_KERNEL);
+ memcpy(bos, job->in_bos, job->in_bo_count * sizeof(void *));
+ memcpy(&bos[job->in_bo_count], job->out_bos, job->out_bo_count * sizeof(void *));
+
+ ret = drm_gem_lock_reservations(bos, job->in_bo_count + job->out_bo_count, &acquire_ctx);
+ if (ret)
+ goto err;
+
+ scoped_guard(mutex, &rdev->sched_lock) {
+ drm_sched_job_arm(&job->base);
+
+ job->inference_done_fence = dma_fence_get(&job->base.s_fence->finished);
+
+ ret = rocket_acquire_object_fences(job->in_bos, job->in_bo_count, &job->base, false);
+ if (ret)
+ goto err_unlock;
+
+ ret = rocket_acquire_object_fences(job->out_bos, job->out_bo_count, &job->base, true);
+ if (ret)
+ goto err_unlock;
+
+ kref_get(&job->refcount); /* put by scheduler job completion */
+
+ drm_sched_entity_push_job(&job->base);
+ }
+
+ rocket_attach_object_fences(job->out_bos, job->out_bo_count, job->inference_done_fence);
+
+err_unlock:
+ drm_gem_unlock_reservations(bos, job->in_bo_count + job->out_bo_count, &acquire_ctx);
+err:
+ kvfree(bos);
+
+ return ret;
+}
+
+static void rocket_job_cleanup(struct kref *ref)
+{
+ struct rocket_job *job = container_of(ref, struct rocket_job,
+ refcount);
+ unsigned int i;
+
+ rocket_iommu_domain_put(job->domain);
+
+ dma_fence_put(job->done_fence);
+ dma_fence_put(job->inference_done_fence);
+
+ if (job->in_bos) {
+ for (i = 0; i < job->in_bo_count; i++)
+ drm_gem_object_put(job->in_bos[i]);
+
+ kvfree(job->in_bos);
+ }
+
+ if (job->out_bos) {
+ for (i = 0; i < job->out_bo_count; i++)
+ drm_gem_object_put(job->out_bos[i]);
+
+ kvfree(job->out_bos);
+ }
+
+ kvfree(job->tasks);
+
+ kfree(job);
+}
+
+static void rocket_job_put(struct rocket_job *job)
+{
+ kref_put(&job->refcount, rocket_job_cleanup);
+}
+
+static void rocket_job_free(struct drm_sched_job *sched_job)
+{
+ struct rocket_job *job = to_rocket_job(sched_job);
+
+ drm_sched_job_cleanup(sched_job);
+
+ rocket_job_put(job);
+}
+
+static struct rocket_core *sched_to_core(struct rocket_device *rdev,
+ struct drm_gpu_scheduler *sched)
+{
+ unsigned int core;
+
+ for (core = 0; core < rdev->num_cores; core++) {
+ if (&rdev->cores[core].sched == sched)
+ return &rdev->cores[core];
+ }
+
+ return NULL;
+}
+
+static struct dma_fence *rocket_job_run(struct drm_sched_job *sched_job)
+{
+ struct rocket_job *job = to_rocket_job(sched_job);
+ struct rocket_device *rdev = job->rdev;
+ struct rocket_core *core = sched_to_core(rdev, sched_job->sched);
+ struct dma_fence *fence = NULL;
+ int ret;
+
+ if (unlikely(job->base.s_fence->finished.error))
+ return NULL;
+
+ /*
+ * Nothing to execute: can happen if the job has finished while
+ * we were resetting the NPU.
+ */
+ if (job->next_task_idx == job->task_count)
+ return NULL;
+
+ fence = rocket_fence_create(core);
+ if (IS_ERR(fence))
+ return fence;
+
+ if (job->done_fence)
+ dma_fence_put(job->done_fence);
+ job->done_fence = dma_fence_get(fence);
+
+ ret = pm_runtime_get_sync(core->dev);
+ if (ret < 0)
+ return fence;
+
+ ret = iommu_attach_group(job->domain->domain, core->iommu_group);
+ if (ret < 0)
+ return fence;
+
+ scoped_guard(mutex, &core->job_lock) {
+ core->in_flight_job = job;
+ rocket_job_hw_submit(core, job);
+ }
+
+ return fence;
+}
+
+static void rocket_job_handle_irq(struct rocket_core *core)
+{
+ pm_runtime_mark_last_busy(core->dev);
+
+ rocket_pc_writel(core, OPERATION_ENABLE, 0x0);
+ rocket_pc_writel(core, INTERRUPT_CLEAR, 0x1ffff);
+
+ scoped_guard(mutex, &core->job_lock)
+ if (core->in_flight_job) {
+ if (core->in_flight_job->next_task_idx < core->in_flight_job->task_count) {
+ rocket_job_hw_submit(core, core->in_flight_job);
+ return;
+ }
+
+ iommu_detach_group(NULL, iommu_group_get(core->dev));
+ dma_fence_signal(core->in_flight_job->done_fence);
+ pm_runtime_put_autosuspend(core->dev);
+ core->in_flight_job = NULL;
+ }
+}
+
+static void
+rocket_reset(struct rocket_core *core, struct drm_sched_job *bad)
+{
+ if (!atomic_read(&core->reset.pending))
+ return;
+
+ drm_sched_stop(&core->sched, bad);
+
+ /*
+ * Remaining interrupts have been handled, but we might still have
+ * stuck jobs. Let's make sure the PM counters stay balanced by
+ * manually calling pm_runtime_put_noidle().
+ */
+ scoped_guard(mutex, &core->job_lock) {
+ if (core->in_flight_job)
+ pm_runtime_put_noidle(core->dev);
+
+ iommu_detach_group(NULL, core->iommu_group);
+
+ core->in_flight_job = NULL;
+ }
+
+ /* Proceed with reset now. */
+ rocket_core_reset(core);
+
+ /* NPU has been reset, we can clear the reset pending bit. */
+ atomic_set(&core->reset.pending, 0);
+
+ /* Restart the scheduler */
+ drm_sched_start(&core->sched, 0);
+}
+
+static enum drm_gpu_sched_stat rocket_job_timedout(struct drm_sched_job *sched_job)
+{
+ struct rocket_job *job = to_rocket_job(sched_job);
+ struct rocket_device *rdev = job->rdev;
+ struct rocket_core *core = sched_to_core(rdev, sched_job->sched);
+
+ dev_err(core->dev, "NPU job timed out");
+
+ atomic_set(&core->reset.pending, 1);
+ rocket_reset(core, sched_job);
+
+ return DRM_GPU_SCHED_STAT_RESET;
+}
+
+static void rocket_reset_work(struct work_struct *work)
+{
+ struct rocket_core *core;
+
+ core = container_of(work, struct rocket_core, reset.work);
+ rocket_reset(core, NULL);
+}
+
+static const struct drm_sched_backend_ops rocket_sched_ops = {
+ .run_job = rocket_job_run,
+ .timedout_job = rocket_job_timedout,
+ .free_job = rocket_job_free
+};
+
+static irqreturn_t rocket_job_irq_handler_thread(int irq, void *data)
+{
+ struct rocket_core *core = data;
+
+ rocket_job_handle_irq(core);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rocket_job_irq_handler(int irq, void *data)
+{
+ struct rocket_core *core = data;
+ u32 raw_status = rocket_pc_readl(core, INTERRUPT_RAW_STATUS);
+
+ WARN_ON(raw_status & PC_INTERRUPT_RAW_STATUS_DMA_READ_ERROR);
+ WARN_ON(raw_status & PC_INTERRUPT_RAW_STATUS_DMA_WRITE_ERROR);
+
+ if (!(raw_status & PC_INTERRUPT_RAW_STATUS_DPU_0 ||
+ raw_status & PC_INTERRUPT_RAW_STATUS_DPU_1))
+ return IRQ_NONE;
+
+ rocket_pc_writel(core, INTERRUPT_MASK, 0x0);
+
+ return IRQ_WAKE_THREAD;
+}
+
+int rocket_job_init(struct rocket_core *core)
+{
+ struct drm_sched_init_args args = {
+ .ops = &rocket_sched_ops,
+ .num_rqs = DRM_SCHED_PRIORITY_COUNT,
+ .credit_limit = 1,
+ .timeout = msecs_to_jiffies(JOB_TIMEOUT_MS),
+ .name = dev_name(core->dev),
+ .dev = core->dev,
+ };
+ int ret;
+
+ INIT_WORK(&core->reset.work, rocket_reset_work);
+ spin_lock_init(&core->fence_lock);
+ mutex_init(&core->job_lock);
+
+ core->irq = platform_get_irq(to_platform_device(core->dev), 0);
+ if (core->irq < 0)
+ return core->irq;
+
+ ret = devm_request_threaded_irq(core->dev, core->irq,
+ rocket_job_irq_handler,
+ rocket_job_irq_handler_thread,
+ IRQF_SHARED, dev_name(core->dev),
+ core);
+ if (ret) {
+ dev_err(core->dev, "failed to request job irq");
+ return ret;
+ }
+
+ core->reset.wq = alloc_ordered_workqueue("rocket-reset-%d", 0, core->index);
+ if (!core->reset.wq)
+ return -ENOMEM;
+
+ core->fence_context = dma_fence_context_alloc(1);
+
+ args.timeout_wq = core->reset.wq;
+ ret = drm_sched_init(&core->sched, &args);
+ if (ret) {
+ dev_err(core->dev, "Failed to create scheduler: %d.", ret);
+ goto err_sched;
+ }
+
+ return 0;
+
+err_sched:
+ drm_sched_fini(&core->sched);
+
+ destroy_workqueue(core->reset.wq);
+ return ret;
+}
+
+void rocket_job_fini(struct rocket_core *core)
+{
+ drm_sched_fini(&core->sched);
+
+ cancel_work_sync(&core->reset.work);
+ destroy_workqueue(core->reset.wq);
+}
+
+int rocket_job_open(struct rocket_file_priv *rocket_priv)
+{
+ struct rocket_device *rdev = rocket_priv->rdev;
+ struct drm_gpu_scheduler **scheds = kmalloc_array(rdev->num_cores,
+ sizeof(*scheds),
+ GFP_KERNEL);
+ unsigned int core;
+ int ret;
+
+ for (core = 0; core < rdev->num_cores; core++)
+ scheds[core] = &rdev->cores[core].sched;
+
+ ret = drm_sched_entity_init(&rocket_priv->sched_entity,
+ DRM_SCHED_PRIORITY_NORMAL,
+ scheds,
+ rdev->num_cores, NULL);
+ if (WARN_ON(ret))
+ return ret;
+
+ return 0;
+}
+
+void rocket_job_close(struct rocket_file_priv *rocket_priv)
+{
+ struct drm_sched_entity *entity = &rocket_priv->sched_entity;
+
+ kfree(entity->sched_list);
+ drm_sched_entity_destroy(entity);
+}
+
+int rocket_job_is_idle(struct rocket_core *core)
+{
+ /* If there are any jobs in this HW queue, we're not idle */
+ if (atomic_read(&core->sched.credit_count))
+ return false;
+
+ return true;
+}
+
+static int rocket_ioctl_submit_job(struct drm_device *dev, struct drm_file *file,
+ struct drm_rocket_job *job)
+{
+ struct rocket_device *rdev = to_rocket_device(dev);
+ struct rocket_file_priv *file_priv = file->driver_priv;
+ struct rocket_job *rjob = NULL;
+ int ret = 0;
+
+ if (job->task_count == 0)
+ return -EINVAL;
+
+ rjob = kzalloc(sizeof(*rjob), GFP_KERNEL);
+ if (!rjob)
+ return -ENOMEM;
+
+ kref_init(&rjob->refcount);
+
+ rjob->rdev = rdev;
+
+ ret = drm_sched_job_init(&rjob->base,
+ &file_priv->sched_entity,
+ 1, NULL, file->client_id);
+ if (ret)
+ goto out_put_job;
+
+ ret = rocket_copy_tasks(dev, file, job, rjob);
+ if (ret)
+ goto out_cleanup_job;
+
+ ret = drm_gem_objects_lookup(file, u64_to_user_ptr(job->in_bo_handles),
+ job->in_bo_handle_count, &rjob->in_bos);
+ if (ret)
+ goto out_cleanup_job;
+
+ rjob->in_bo_count = job->in_bo_handle_count;
+
+ ret = drm_gem_objects_lookup(file, u64_to_user_ptr(job->out_bo_handles),
+ job->out_bo_handle_count, &rjob->out_bos);
+ if (ret)
+ goto out_cleanup_job;
+
+ rjob->out_bo_count = job->out_bo_handle_count;
+
+ rjob->domain = rocket_iommu_domain_get(file_priv);
+
+ ret = rocket_job_push(rjob);
+ if (ret)
+ goto out_cleanup_job;
+
+out_cleanup_job:
+ if (ret)
+ drm_sched_job_cleanup(&rjob->base);
+out_put_job:
+ rocket_job_put(rjob);
+
+ return ret;
+}
+
+int rocket_ioctl_submit(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct drm_rocket_submit *args = data;
+ struct drm_rocket_job *jobs;
+ int ret = 0;
+ unsigned int i = 0;
+
+ if (args->job_count == 0)
+ return 0;
+
+ if (args->job_struct_size < sizeof(struct drm_rocket_job)) {
+ drm_dbg(dev, "job_struct_size field in drm_rocket_submit struct is too small.\n");
+ return -EINVAL;
+ }
+
+ if (args->reserved != 0) {
+ drm_dbg(dev, "Reserved field in drm_rocket_submit struct should be 0.\n");
+ return -EINVAL;
+ }
+
+ jobs = kvmalloc_array(args->job_count, sizeof(*jobs), GFP_KERNEL);
+ if (!jobs) {
+ drm_dbg(dev, "Failed to allocate incoming job array\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < args->job_count; i++) {
+ if (copy_from_user(&jobs[i],
+ u64_to_user_ptr(args->jobs) + i * args->job_struct_size,
+ sizeof(*jobs))) {
+ ret = -EFAULT;
+ drm_dbg(dev, "Failed to copy incoming job array\n");
+ goto exit;
+ }
+ }
+
+
+ for (i = 0; i < args->job_count; i++)
+ rocket_ioctl_submit_job(dev, file, &jobs[i]);
+
+exit:
+ kvfree(jobs);
+
+ return ret;
+}
diff --git a/drivers/accel/rocket/rocket_job.h b/drivers/accel/rocket/rocket_job.h
new file mode 100644
index 000000000000..4ae00feec3b9
--- /dev/null
+++ b/drivers/accel/rocket/rocket_job.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+
+#ifndef __ROCKET_JOB_H__
+#define __ROCKET_JOB_H__
+
+#include <drm/drm_drv.h>
+#include <drm/gpu_scheduler.h>
+
+#include "rocket_core.h"
+#include "rocket_drv.h"
+
+struct rocket_task {
+ u64 regcmd;
+ u32 regcmd_count;
+};
+
+struct rocket_job {
+ struct drm_sched_job base;
+
+ struct rocket_device *rdev;
+
+ struct drm_gem_object **in_bos;
+ struct drm_gem_object **out_bos;
+
+ u32 in_bo_count;
+ u32 out_bo_count;
+
+ struct rocket_task *tasks;
+ u32 task_count;
+ u32 next_task_idx;
+
+ /* Fence to be signaled by drm-sched once its done with the job */
+ struct dma_fence *inference_done_fence;
+
+ /* Fence to be signaled by IRQ handler when the job is complete. */
+ struct dma_fence *done_fence;
+
+ struct rocket_iommu_domain *domain;
+
+ struct kref refcount;
+};
+
+int rocket_ioctl_submit(struct drm_device *dev, void *data, struct drm_file *file);
+
+int rocket_job_init(struct rocket_core *core);
+void rocket_job_fini(struct rocket_core *core);
+int rocket_job_open(struct rocket_file_priv *rocket_priv);
+void rocket_job_close(struct rocket_file_priv *rocket_priv);
+int rocket_job_is_idle(struct rocket_core *core);
+
+#endif
diff --git a/drivers/accel/rocket/rocket_registers.h b/drivers/accel/rocket/rocket_registers.h
new file mode 100644
index 000000000000..9aef614c3470
--- /dev/null
+++ b/drivers/accel/rocket/rocket_registers.h
@@ -0,0 +1,4404 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+
+#ifndef __ROCKET_REGISTERS_XML__
+#define __ROCKET_REGISTERS_XML__
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng gen_header.py tool in this git repository:
+http://gitlab.freedesktop.org/mesa/mesa/
+git clone https://gitlab.freedesktop.org/mesa/mesa.git
+
+The rules-ng-ng source files this header was generated from are:
+
+- /home/tomeu/src/mesa/src/gallium/drivers/rocket/registers.xml ( 60076 bytes, from Wed Jun 12 10:02:25 2024)
+
+Copyright (C) 2024-2025 by the following authors:
+- Tomeu Vizoso <tomeu@tomeuvizoso.net>
+*/
+
+#define REG_PC_VERSION 0x00000000
+#define PC_VERSION_VERSION__MASK 0xffffffff
+#define PC_VERSION_VERSION__SHIFT 0
+static inline uint32_t PC_VERSION_VERSION(uint32_t val)
+{
+ return ((val) << PC_VERSION_VERSION__SHIFT) & PC_VERSION_VERSION__MASK;
+}
+
+#define REG_PC_VERSION_NUM 0x00000004
+#define PC_VERSION_NUM_VERSION_NUM__MASK 0xffffffff
+#define PC_VERSION_NUM_VERSION_NUM__SHIFT 0
+static inline uint32_t PC_VERSION_NUM_VERSION_NUM(uint32_t val)
+{
+ return ((val) << PC_VERSION_NUM_VERSION_NUM__SHIFT) & PC_VERSION_NUM_VERSION_NUM__MASK;
+}
+
+#define REG_PC_OPERATION_ENABLE 0x00000008
+#define PC_OPERATION_ENABLE_RESERVED_0__MASK 0xfffffffe
+#define PC_OPERATION_ENABLE_RESERVED_0__SHIFT 1
+static inline uint32_t PC_OPERATION_ENABLE_RESERVED_0(uint32_t val)
+{
+ return ((val) << PC_OPERATION_ENABLE_RESERVED_0__SHIFT) & PC_OPERATION_ENABLE_RESERVED_0__MASK;
+}
+#define PC_OPERATION_ENABLE_OP_EN__MASK 0x00000001
+#define PC_OPERATION_ENABLE_OP_EN__SHIFT 0
+static inline uint32_t PC_OPERATION_ENABLE_OP_EN(uint32_t val)
+{
+ return ((val) << PC_OPERATION_ENABLE_OP_EN__SHIFT) & PC_OPERATION_ENABLE_OP_EN__MASK;
+}
+
+#define REG_PC_BASE_ADDRESS 0x00000010
+#define PC_BASE_ADDRESS_PC_SOURCE_ADDR__MASK 0xfffffff0
+#define PC_BASE_ADDRESS_PC_SOURCE_ADDR__SHIFT 4
+static inline uint32_t PC_BASE_ADDRESS_PC_SOURCE_ADDR(uint32_t val)
+{
+ return ((val) << PC_BASE_ADDRESS_PC_SOURCE_ADDR__SHIFT) & PC_BASE_ADDRESS_PC_SOURCE_ADDR__MASK;
+}
+#define PC_BASE_ADDRESS_RESERVED_0__MASK 0x0000000e
+#define PC_BASE_ADDRESS_RESERVED_0__SHIFT 1
+static inline uint32_t PC_BASE_ADDRESS_RESERVED_0(uint32_t val)
+{
+ return ((val) << PC_BASE_ADDRESS_RESERVED_0__SHIFT) & PC_BASE_ADDRESS_RESERVED_0__MASK;
+}
+#define PC_BASE_ADDRESS_PC_SEL__MASK 0x00000001
+#define PC_BASE_ADDRESS_PC_SEL__SHIFT 0
+static inline uint32_t PC_BASE_ADDRESS_PC_SEL(uint32_t val)
+{
+ return ((val) << PC_BASE_ADDRESS_PC_SEL__SHIFT) & PC_BASE_ADDRESS_PC_SEL__MASK;
+}
+
+#define REG_PC_REGISTER_AMOUNTS 0x00000014
+#define PC_REGISTER_AMOUNTS_RESERVED_0__MASK 0xffff0000
+#define PC_REGISTER_AMOUNTS_RESERVED_0__SHIFT 16
+static inline uint32_t PC_REGISTER_AMOUNTS_RESERVED_0(uint32_t val)
+{
+ return ((val) << PC_REGISTER_AMOUNTS_RESERVED_0__SHIFT) & PC_REGISTER_AMOUNTS_RESERVED_0__MASK;
+}
+#define PC_REGISTER_AMOUNTS_PC_DATA_AMOUNT__MASK 0x0000ffff
+#define PC_REGISTER_AMOUNTS_PC_DATA_AMOUNT__SHIFT 0
+static inline uint32_t PC_REGISTER_AMOUNTS_PC_DATA_AMOUNT(uint32_t val)
+{
+ return ((val) << PC_REGISTER_AMOUNTS_PC_DATA_AMOUNT__SHIFT) & PC_REGISTER_AMOUNTS_PC_DATA_AMOUNT__MASK;
+}
+
+#define REG_PC_INTERRUPT_MASK 0x00000020
+#define PC_INTERRUPT_MASK_RESERVED_0__MASK 0xffffc000
+#define PC_INTERRUPT_MASK_RESERVED_0__SHIFT 14
+static inline uint32_t PC_INTERRUPT_MASK_RESERVED_0(uint32_t val)
+{
+ return ((val) << PC_INTERRUPT_MASK_RESERVED_0__SHIFT) & PC_INTERRUPT_MASK_RESERVED_0__MASK;
+}
+#define PC_INTERRUPT_MASK_DMA_WRITE_ERROR 0x00002000
+#define PC_INTERRUPT_MASK_DMA_READ_ERROR 0x00001000
+#define PC_INTERRUPT_MASK_PPU_1 0x00000800
+#define PC_INTERRUPT_MASK_PPU_0 0x00000400
+#define PC_INTERRUPT_MASK_DPU_1 0x00000200
+#define PC_INTERRUPT_MASK_DPU_0 0x00000100
+#define PC_INTERRUPT_MASK_CORE_1 0x00000080
+#define PC_INTERRUPT_MASK_CORE_0 0x00000040
+#define PC_INTERRUPT_MASK_CNA_CSC_1 0x00000020
+#define PC_INTERRUPT_MASK_CNA_CSC_0 0x00000010
+#define PC_INTERRUPT_MASK_CNA_WEIGHT_1 0x00000008
+#define PC_INTERRUPT_MASK_CNA_WEIGHT_0 0x00000004
+#define PC_INTERRUPT_MASK_CNA_FEATURE_1 0x00000002
+#define PC_INTERRUPT_MASK_CNA_FEATURE_0 0x00000001
+
+#define REG_PC_INTERRUPT_CLEAR 0x00000024
+#define PC_INTERRUPT_CLEAR_RESERVED_0__MASK 0xffffc000
+#define PC_INTERRUPT_CLEAR_RESERVED_0__SHIFT 14
+static inline uint32_t PC_INTERRUPT_CLEAR_RESERVED_0(uint32_t val)
+{
+ return ((val) << PC_INTERRUPT_CLEAR_RESERVED_0__SHIFT) & PC_INTERRUPT_CLEAR_RESERVED_0__MASK;
+}
+#define PC_INTERRUPT_CLEAR_DMA_WRITE_ERROR 0x00002000
+#define PC_INTERRUPT_CLEAR_DMA_READ_ERROR 0x00001000
+#define PC_INTERRUPT_CLEAR_PPU_1 0x00000800
+#define PC_INTERRUPT_CLEAR_PPU_0 0x00000400
+#define PC_INTERRUPT_CLEAR_DPU_1 0x00000200
+#define PC_INTERRUPT_CLEAR_DPU_0 0x00000100
+#define PC_INTERRUPT_CLEAR_CORE_1 0x00000080
+#define PC_INTERRUPT_CLEAR_CORE_0 0x00000040
+#define PC_INTERRUPT_CLEAR_CNA_CSC_1 0x00000020
+#define PC_INTERRUPT_CLEAR_CNA_CSC_0 0x00000010
+#define PC_INTERRUPT_CLEAR_CNA_WEIGHT_1 0x00000008
+#define PC_INTERRUPT_CLEAR_CNA_WEIGHT_0 0x00000004
+#define PC_INTERRUPT_CLEAR_CNA_FEATURE_1 0x00000002
+#define PC_INTERRUPT_CLEAR_CNA_FEATURE_0 0x00000001
+
+#define REG_PC_INTERRUPT_STATUS 0x00000028
+#define PC_INTERRUPT_STATUS_RESERVED_0__MASK 0xffffc000
+#define PC_INTERRUPT_STATUS_RESERVED_0__SHIFT 14
+static inline uint32_t PC_INTERRUPT_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << PC_INTERRUPT_STATUS_RESERVED_0__SHIFT) & PC_INTERRUPT_STATUS_RESERVED_0__MASK;
+}
+#define PC_INTERRUPT_STATUS_DMA_WRITE_ERROR 0x00002000
+#define PC_INTERRUPT_STATUS_DMA_READ_ERROR 0x00001000
+#define PC_INTERRUPT_STATUS_PPU_1 0x00000800
+#define PC_INTERRUPT_STATUS_PPU_0 0x00000400
+#define PC_INTERRUPT_STATUS_DPU_1 0x00000200
+#define PC_INTERRUPT_STATUS_DPU_0 0x00000100
+#define PC_INTERRUPT_STATUS_CORE_1 0x00000080
+#define PC_INTERRUPT_STATUS_CORE_0 0x00000040
+#define PC_INTERRUPT_STATUS_CNA_CSC_1 0x00000020
+#define PC_INTERRUPT_STATUS_CNA_CSC_0 0x00000010
+#define PC_INTERRUPT_STATUS_CNA_WEIGHT_1 0x00000008
+#define PC_INTERRUPT_STATUS_CNA_WEIGHT_0 0x00000004
+#define PC_INTERRUPT_STATUS_CNA_FEATURE_1 0x00000002
+#define PC_INTERRUPT_STATUS_CNA_FEATURE_0 0x00000001
+
+#define REG_PC_INTERRUPT_RAW_STATUS 0x0000002c
+#define PC_INTERRUPT_RAW_STATUS_RESERVED_0__MASK 0xffffc000
+#define PC_INTERRUPT_RAW_STATUS_RESERVED_0__SHIFT 14
+static inline uint32_t PC_INTERRUPT_RAW_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << PC_INTERRUPT_RAW_STATUS_RESERVED_0__SHIFT) & PC_INTERRUPT_RAW_STATUS_RESERVED_0__MASK;
+}
+#define PC_INTERRUPT_RAW_STATUS_DMA_WRITE_ERROR 0x00002000
+#define PC_INTERRUPT_RAW_STATUS_DMA_READ_ERROR 0x00001000
+#define PC_INTERRUPT_RAW_STATUS_PPU_1 0x00000800
+#define PC_INTERRUPT_RAW_STATUS_PPU_0 0x00000400
+#define PC_INTERRUPT_RAW_STATUS_DPU_1 0x00000200
+#define PC_INTERRUPT_RAW_STATUS_DPU_0 0x00000100
+#define PC_INTERRUPT_RAW_STATUS_CORE_1 0x00000080
+#define PC_INTERRUPT_RAW_STATUS_CORE_0 0x00000040
+#define PC_INTERRUPT_RAW_STATUS_CNA_CSC_1 0x00000020
+#define PC_INTERRUPT_RAW_STATUS_CNA_CSC_0 0x00000010
+#define PC_INTERRUPT_RAW_STATUS_CNA_WEIGHT_1 0x00000008
+#define PC_INTERRUPT_RAW_STATUS_CNA_WEIGHT_0 0x00000004
+#define PC_INTERRUPT_RAW_STATUS_CNA_FEATURE_1 0x00000002
+#define PC_INTERRUPT_RAW_STATUS_CNA_FEATURE_0 0x00000001
+
+#define REG_PC_TASK_CON 0x00000030
+#define PC_TASK_CON_RESERVED_0__MASK 0xffffc000
+#define PC_TASK_CON_RESERVED_0__SHIFT 14
+static inline uint32_t PC_TASK_CON_RESERVED_0(uint32_t val)
+{
+ return ((val) << PC_TASK_CON_RESERVED_0__SHIFT) & PC_TASK_CON_RESERVED_0__MASK;
+}
+#define PC_TASK_CON_TASK_COUNT_CLEAR__MASK 0x00002000
+#define PC_TASK_CON_TASK_COUNT_CLEAR__SHIFT 13
+static inline uint32_t PC_TASK_CON_TASK_COUNT_CLEAR(uint32_t val)
+{
+ return ((val) << PC_TASK_CON_TASK_COUNT_CLEAR__SHIFT) & PC_TASK_CON_TASK_COUNT_CLEAR__MASK;
+}
+#define PC_TASK_CON_TASK_PP_EN__MASK 0x00001000
+#define PC_TASK_CON_TASK_PP_EN__SHIFT 12
+static inline uint32_t PC_TASK_CON_TASK_PP_EN(uint32_t val)
+{
+ return ((val) << PC_TASK_CON_TASK_PP_EN__SHIFT) & PC_TASK_CON_TASK_PP_EN__MASK;
+}
+#define PC_TASK_CON_TASK_NUMBER__MASK 0x00000fff
+#define PC_TASK_CON_TASK_NUMBER__SHIFT 0
+static inline uint32_t PC_TASK_CON_TASK_NUMBER(uint32_t val)
+{
+ return ((val) << PC_TASK_CON_TASK_NUMBER__SHIFT) & PC_TASK_CON_TASK_NUMBER__MASK;
+}
+
+#define REG_PC_TASK_DMA_BASE_ADDR 0x00000034
+#define PC_TASK_DMA_BASE_ADDR_DMA_BASE_ADDR__MASK 0xfffffff0
+#define PC_TASK_DMA_BASE_ADDR_DMA_BASE_ADDR__SHIFT 4
+static inline uint32_t PC_TASK_DMA_BASE_ADDR_DMA_BASE_ADDR(uint32_t val)
+{
+ return ((val) << PC_TASK_DMA_BASE_ADDR_DMA_BASE_ADDR__SHIFT) & PC_TASK_DMA_BASE_ADDR_DMA_BASE_ADDR__MASK;
+}
+#define PC_TASK_DMA_BASE_ADDR_RESERVED_0__MASK 0x0000000f
+#define PC_TASK_DMA_BASE_ADDR_RESERVED_0__SHIFT 0
+static inline uint32_t PC_TASK_DMA_BASE_ADDR_RESERVED_0(uint32_t val)
+{
+ return ((val) << PC_TASK_DMA_BASE_ADDR_RESERVED_0__SHIFT) & PC_TASK_DMA_BASE_ADDR_RESERVED_0__MASK;
+}
+
+#define REG_PC_TASK_STATUS 0x0000003c
+#define PC_TASK_STATUS_RESERVED_0__MASK 0xf0000000
+#define PC_TASK_STATUS_RESERVED_0__SHIFT 28
+static inline uint32_t PC_TASK_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << PC_TASK_STATUS_RESERVED_0__SHIFT) & PC_TASK_STATUS_RESERVED_0__MASK;
+}
+#define PC_TASK_STATUS_TASK_STATUS__MASK 0x0fffffff
+#define PC_TASK_STATUS_TASK_STATUS__SHIFT 0
+static inline uint32_t PC_TASK_STATUS_TASK_STATUS(uint32_t val)
+{
+ return ((val) << PC_TASK_STATUS_TASK_STATUS__SHIFT) & PC_TASK_STATUS_TASK_STATUS__MASK;
+}
+
+#define REG_CNA_S_STATUS 0x00001000
+#define CNA_S_STATUS_RESERVED_0__MASK 0xfffc0000
+#define CNA_S_STATUS_RESERVED_0__SHIFT 18
+static inline uint32_t CNA_S_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_S_STATUS_RESERVED_0__SHIFT) & CNA_S_STATUS_RESERVED_0__MASK;
+}
+#define CNA_S_STATUS_STATUS_1__MASK 0x00030000
+#define CNA_S_STATUS_STATUS_1__SHIFT 16
+static inline uint32_t CNA_S_STATUS_STATUS_1(uint32_t val)
+{
+ return ((val) << CNA_S_STATUS_STATUS_1__SHIFT) & CNA_S_STATUS_STATUS_1__MASK;
+}
+#define CNA_S_STATUS_RESERVED_1__MASK 0x0000fffc
+#define CNA_S_STATUS_RESERVED_1__SHIFT 2
+static inline uint32_t CNA_S_STATUS_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_S_STATUS_RESERVED_1__SHIFT) & CNA_S_STATUS_RESERVED_1__MASK;
+}
+#define CNA_S_STATUS_STATUS_0__MASK 0x00000003
+#define CNA_S_STATUS_STATUS_0__SHIFT 0
+static inline uint32_t CNA_S_STATUS_STATUS_0(uint32_t val)
+{
+ return ((val) << CNA_S_STATUS_STATUS_0__SHIFT) & CNA_S_STATUS_STATUS_0__MASK;
+}
+
+#define REG_CNA_S_POINTER 0x00001004
+#define CNA_S_POINTER_RESERVED_0__MASK 0xfffe0000
+#define CNA_S_POINTER_RESERVED_0__SHIFT 17
+static inline uint32_t CNA_S_POINTER_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_S_POINTER_RESERVED_0__SHIFT) & CNA_S_POINTER_RESERVED_0__MASK;
+}
+#define CNA_S_POINTER_EXECUTER__MASK 0x00010000
+#define CNA_S_POINTER_EXECUTER__SHIFT 16
+static inline uint32_t CNA_S_POINTER_EXECUTER(uint32_t val)
+{
+ return ((val) << CNA_S_POINTER_EXECUTER__SHIFT) & CNA_S_POINTER_EXECUTER__MASK;
+}
+#define CNA_S_POINTER_RESERVED_1__MASK 0x0000ffc0
+#define CNA_S_POINTER_RESERVED_1__SHIFT 6
+static inline uint32_t CNA_S_POINTER_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_S_POINTER_RESERVED_1__SHIFT) & CNA_S_POINTER_RESERVED_1__MASK;
+}
+#define CNA_S_POINTER_EXECUTER_PP_CLEAR__MASK 0x00000020
+#define CNA_S_POINTER_EXECUTER_PP_CLEAR__SHIFT 5
+static inline uint32_t CNA_S_POINTER_EXECUTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << CNA_S_POINTER_EXECUTER_PP_CLEAR__SHIFT) & CNA_S_POINTER_EXECUTER_PP_CLEAR__MASK;
+}
+#define CNA_S_POINTER_POINTER_PP_CLEAR__MASK 0x00000010
+#define CNA_S_POINTER_POINTER_PP_CLEAR__SHIFT 4
+static inline uint32_t CNA_S_POINTER_POINTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << CNA_S_POINTER_POINTER_PP_CLEAR__SHIFT) & CNA_S_POINTER_POINTER_PP_CLEAR__MASK;
+}
+#define CNA_S_POINTER_POINTER_PP_MODE__MASK 0x00000008
+#define CNA_S_POINTER_POINTER_PP_MODE__SHIFT 3
+static inline uint32_t CNA_S_POINTER_POINTER_PP_MODE(uint32_t val)
+{
+ return ((val) << CNA_S_POINTER_POINTER_PP_MODE__SHIFT) & CNA_S_POINTER_POINTER_PP_MODE__MASK;
+}
+#define CNA_S_POINTER_EXECUTER_PP_EN__MASK 0x00000004
+#define CNA_S_POINTER_EXECUTER_PP_EN__SHIFT 2
+static inline uint32_t CNA_S_POINTER_EXECUTER_PP_EN(uint32_t val)
+{
+ return ((val) << CNA_S_POINTER_EXECUTER_PP_EN__SHIFT) & CNA_S_POINTER_EXECUTER_PP_EN__MASK;
+}
+#define CNA_S_POINTER_POINTER_PP_EN__MASK 0x00000002
+#define CNA_S_POINTER_POINTER_PP_EN__SHIFT 1
+static inline uint32_t CNA_S_POINTER_POINTER_PP_EN(uint32_t val)
+{
+ return ((val) << CNA_S_POINTER_POINTER_PP_EN__SHIFT) & CNA_S_POINTER_POINTER_PP_EN__MASK;
+}
+#define CNA_S_POINTER_POINTER__MASK 0x00000001
+#define CNA_S_POINTER_POINTER__SHIFT 0
+static inline uint32_t CNA_S_POINTER_POINTER(uint32_t val)
+{
+ return ((val) << CNA_S_POINTER_POINTER__SHIFT) & CNA_S_POINTER_POINTER__MASK;
+}
+
+#define REG_CNA_OPERATION_ENABLE 0x00001008
+#define CNA_OPERATION_ENABLE_RESERVED_0__MASK 0xfffffffe
+#define CNA_OPERATION_ENABLE_RESERVED_0__SHIFT 1
+static inline uint32_t CNA_OPERATION_ENABLE_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_OPERATION_ENABLE_RESERVED_0__SHIFT) & CNA_OPERATION_ENABLE_RESERVED_0__MASK;
+}
+#define CNA_OPERATION_ENABLE_OP_EN__MASK 0x00000001
+#define CNA_OPERATION_ENABLE_OP_EN__SHIFT 0
+static inline uint32_t CNA_OPERATION_ENABLE_OP_EN(uint32_t val)
+{
+ return ((val) << CNA_OPERATION_ENABLE_OP_EN__SHIFT) & CNA_OPERATION_ENABLE_OP_EN__MASK;
+}
+
+#define REG_CNA_CONV_CON1 0x0000100c
+#define CNA_CONV_CON1_RESERVED_0__MASK 0x80000000
+#define CNA_CONV_CON1_RESERVED_0__SHIFT 31
+static inline uint32_t CNA_CONV_CON1_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON1_RESERVED_0__SHIFT) & CNA_CONV_CON1_RESERVED_0__MASK;
+}
+#define CNA_CONV_CON1_NONALIGN_DMA__MASK 0x40000000
+#define CNA_CONV_CON1_NONALIGN_DMA__SHIFT 30
+static inline uint32_t CNA_CONV_CON1_NONALIGN_DMA(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON1_NONALIGN_DMA__SHIFT) & CNA_CONV_CON1_NONALIGN_DMA__MASK;
+}
+#define CNA_CONV_CON1_GROUP_LINE_OFF__MASK 0x20000000
+#define CNA_CONV_CON1_GROUP_LINE_OFF__SHIFT 29
+static inline uint32_t CNA_CONV_CON1_GROUP_LINE_OFF(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON1_GROUP_LINE_OFF__SHIFT) & CNA_CONV_CON1_GROUP_LINE_OFF__MASK;
+}
+#define CNA_CONV_CON1_RESERVED_1__MASK 0x1ffe0000
+#define CNA_CONV_CON1_RESERVED_1__SHIFT 17
+static inline uint32_t CNA_CONV_CON1_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON1_RESERVED_1__SHIFT) & CNA_CONV_CON1_RESERVED_1__MASK;
+}
+#define CNA_CONV_CON1_DECONV__MASK 0x00010000
+#define CNA_CONV_CON1_DECONV__SHIFT 16
+static inline uint32_t CNA_CONV_CON1_DECONV(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON1_DECONV__SHIFT) & CNA_CONV_CON1_DECONV__MASK;
+}
+#define CNA_CONV_CON1_ARGB_IN__MASK 0x0000f000
+#define CNA_CONV_CON1_ARGB_IN__SHIFT 12
+static inline uint32_t CNA_CONV_CON1_ARGB_IN(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON1_ARGB_IN__SHIFT) & CNA_CONV_CON1_ARGB_IN__MASK;
+}
+#define CNA_CONV_CON1_RESERVED_2__MASK 0x00000c00
+#define CNA_CONV_CON1_RESERVED_2__SHIFT 10
+static inline uint32_t CNA_CONV_CON1_RESERVED_2(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON1_RESERVED_2__SHIFT) & CNA_CONV_CON1_RESERVED_2__MASK;
+}
+#define CNA_CONV_CON1_PROC_PRECISION__MASK 0x00000380
+#define CNA_CONV_CON1_PROC_PRECISION__SHIFT 7
+static inline uint32_t CNA_CONV_CON1_PROC_PRECISION(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON1_PROC_PRECISION__SHIFT) & CNA_CONV_CON1_PROC_PRECISION__MASK;
+}
+#define CNA_CONV_CON1_IN_PRECISION__MASK 0x00000070
+#define CNA_CONV_CON1_IN_PRECISION__SHIFT 4
+static inline uint32_t CNA_CONV_CON1_IN_PRECISION(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON1_IN_PRECISION__SHIFT) & CNA_CONV_CON1_IN_PRECISION__MASK;
+}
+#define CNA_CONV_CON1_CONV_MODE__MASK 0x0000000f
+#define CNA_CONV_CON1_CONV_MODE__SHIFT 0
+static inline uint32_t CNA_CONV_CON1_CONV_MODE(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON1_CONV_MODE__SHIFT) & CNA_CONV_CON1_CONV_MODE__MASK;
+}
+
+#define REG_CNA_CONV_CON2 0x00001010
+#define CNA_CONV_CON2_RESERVED_0__MASK 0xff000000
+#define CNA_CONV_CON2_RESERVED_0__SHIFT 24
+static inline uint32_t CNA_CONV_CON2_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON2_RESERVED_0__SHIFT) & CNA_CONV_CON2_RESERVED_0__MASK;
+}
+#define CNA_CONV_CON2_KERNEL_GROUP__MASK 0x00ff0000
+#define CNA_CONV_CON2_KERNEL_GROUP__SHIFT 16
+static inline uint32_t CNA_CONV_CON2_KERNEL_GROUP(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON2_KERNEL_GROUP__SHIFT) & CNA_CONV_CON2_KERNEL_GROUP__MASK;
+}
+#define CNA_CONV_CON2_RESERVED_1__MASK 0x0000c000
+#define CNA_CONV_CON2_RESERVED_1__SHIFT 14
+static inline uint32_t CNA_CONV_CON2_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON2_RESERVED_1__SHIFT) & CNA_CONV_CON2_RESERVED_1__MASK;
+}
+#define CNA_CONV_CON2_FEATURE_GRAINS__MASK 0x00003ff0
+#define CNA_CONV_CON2_FEATURE_GRAINS__SHIFT 4
+static inline uint32_t CNA_CONV_CON2_FEATURE_GRAINS(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON2_FEATURE_GRAINS__SHIFT) & CNA_CONV_CON2_FEATURE_GRAINS__MASK;
+}
+#define CNA_CONV_CON2_RESERVED_2__MASK 0x00000008
+#define CNA_CONV_CON2_RESERVED_2__SHIFT 3
+static inline uint32_t CNA_CONV_CON2_RESERVED_2(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON2_RESERVED_2__SHIFT) & CNA_CONV_CON2_RESERVED_2__MASK;
+}
+#define CNA_CONV_CON2_CSC_WO_EN__MASK 0x00000004
+#define CNA_CONV_CON2_CSC_WO_EN__SHIFT 2
+static inline uint32_t CNA_CONV_CON2_CSC_WO_EN(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON2_CSC_WO_EN__SHIFT) & CNA_CONV_CON2_CSC_WO_EN__MASK;
+}
+#define CNA_CONV_CON2_CSC_DO_EN__MASK 0x00000002
+#define CNA_CONV_CON2_CSC_DO_EN__SHIFT 1
+static inline uint32_t CNA_CONV_CON2_CSC_DO_EN(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON2_CSC_DO_EN__SHIFT) & CNA_CONV_CON2_CSC_DO_EN__MASK;
+}
+#define CNA_CONV_CON2_CMD_FIFO_SRST__MASK 0x00000001
+#define CNA_CONV_CON2_CMD_FIFO_SRST__SHIFT 0
+static inline uint32_t CNA_CONV_CON2_CMD_FIFO_SRST(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON2_CMD_FIFO_SRST__SHIFT) & CNA_CONV_CON2_CMD_FIFO_SRST__MASK;
+}
+
+#define REG_CNA_CONV_CON3 0x00001014
+#define CNA_CONV_CON3_RESERVED_0__MASK 0x80000000
+#define CNA_CONV_CON3_RESERVED_0__SHIFT 31
+static inline uint32_t CNA_CONV_CON3_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_RESERVED_0__SHIFT) & CNA_CONV_CON3_RESERVED_0__MASK;
+}
+#define CNA_CONV_CON3_NN_MODE__MASK 0x70000000
+#define CNA_CONV_CON3_NN_MODE__SHIFT 28
+static inline uint32_t CNA_CONV_CON3_NN_MODE(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_NN_MODE__SHIFT) & CNA_CONV_CON3_NN_MODE__MASK;
+}
+#define CNA_CONV_CON3_RESERVED_1__MASK 0x0c000000
+#define CNA_CONV_CON3_RESERVED_1__SHIFT 26
+static inline uint32_t CNA_CONV_CON3_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_RESERVED_1__SHIFT) & CNA_CONV_CON3_RESERVED_1__MASK;
+}
+#define CNA_CONV_CON3_ATROUS_Y_DILATION__MASK 0x03e00000
+#define CNA_CONV_CON3_ATROUS_Y_DILATION__SHIFT 21
+static inline uint32_t CNA_CONV_CON3_ATROUS_Y_DILATION(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_ATROUS_Y_DILATION__SHIFT) & CNA_CONV_CON3_ATROUS_Y_DILATION__MASK;
+}
+#define CNA_CONV_CON3_ATROUS_X_DILATION__MASK 0x001f0000
+#define CNA_CONV_CON3_ATROUS_X_DILATION__SHIFT 16
+static inline uint32_t CNA_CONV_CON3_ATROUS_X_DILATION(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_ATROUS_X_DILATION__SHIFT) & CNA_CONV_CON3_ATROUS_X_DILATION__MASK;
+}
+#define CNA_CONV_CON3_RESERVED_2__MASK 0x0000c000
+#define CNA_CONV_CON3_RESERVED_2__SHIFT 14
+static inline uint32_t CNA_CONV_CON3_RESERVED_2(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_RESERVED_2__SHIFT) & CNA_CONV_CON3_RESERVED_2__MASK;
+}
+#define CNA_CONV_CON3_DECONV_Y_STRIDE__MASK 0x00003800
+#define CNA_CONV_CON3_DECONV_Y_STRIDE__SHIFT 11
+static inline uint32_t CNA_CONV_CON3_DECONV_Y_STRIDE(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_DECONV_Y_STRIDE__SHIFT) & CNA_CONV_CON3_DECONV_Y_STRIDE__MASK;
+}
+#define CNA_CONV_CON3_DECONV_X_STRIDE__MASK 0x00000700
+#define CNA_CONV_CON3_DECONV_X_STRIDE__SHIFT 8
+static inline uint32_t CNA_CONV_CON3_DECONV_X_STRIDE(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_DECONV_X_STRIDE__SHIFT) & CNA_CONV_CON3_DECONV_X_STRIDE__MASK;
+}
+#define CNA_CONV_CON3_RESERVED_3__MASK 0x000000c0
+#define CNA_CONV_CON3_RESERVED_3__SHIFT 6
+static inline uint32_t CNA_CONV_CON3_RESERVED_3(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_RESERVED_3__SHIFT) & CNA_CONV_CON3_RESERVED_3__MASK;
+}
+#define CNA_CONV_CON3_CONV_Y_STRIDE__MASK 0x00000038
+#define CNA_CONV_CON3_CONV_Y_STRIDE__SHIFT 3
+static inline uint32_t CNA_CONV_CON3_CONV_Y_STRIDE(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_CONV_Y_STRIDE__SHIFT) & CNA_CONV_CON3_CONV_Y_STRIDE__MASK;
+}
+#define CNA_CONV_CON3_CONV_X_STRIDE__MASK 0x00000007
+#define CNA_CONV_CON3_CONV_X_STRIDE__SHIFT 0
+static inline uint32_t CNA_CONV_CON3_CONV_X_STRIDE(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_CONV_X_STRIDE__SHIFT) & CNA_CONV_CON3_CONV_X_STRIDE__MASK;
+}
+
+#define REG_CNA_DATA_SIZE0 0x00001020
+#define CNA_DATA_SIZE0_RESERVED_0__MASK 0xf8000000
+#define CNA_DATA_SIZE0_RESERVED_0__SHIFT 27
+static inline uint32_t CNA_DATA_SIZE0_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE0_RESERVED_0__SHIFT) & CNA_DATA_SIZE0_RESERVED_0__MASK;
+}
+#define CNA_DATA_SIZE0_DATAIN_WIDTH__MASK 0x07ff0000
+#define CNA_DATA_SIZE0_DATAIN_WIDTH__SHIFT 16
+static inline uint32_t CNA_DATA_SIZE0_DATAIN_WIDTH(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE0_DATAIN_WIDTH__SHIFT) & CNA_DATA_SIZE0_DATAIN_WIDTH__MASK;
+}
+#define CNA_DATA_SIZE0_RESERVED_1__MASK 0x0000f800
+#define CNA_DATA_SIZE0_RESERVED_1__SHIFT 11
+static inline uint32_t CNA_DATA_SIZE0_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE0_RESERVED_1__SHIFT) & CNA_DATA_SIZE0_RESERVED_1__MASK;
+}
+#define CNA_DATA_SIZE0_DATAIN_HEIGHT__MASK 0x000007ff
+#define CNA_DATA_SIZE0_DATAIN_HEIGHT__SHIFT 0
+static inline uint32_t CNA_DATA_SIZE0_DATAIN_HEIGHT(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE0_DATAIN_HEIGHT__SHIFT) & CNA_DATA_SIZE0_DATAIN_HEIGHT__MASK;
+}
+
+#define REG_CNA_DATA_SIZE1 0x00001024
+#define CNA_DATA_SIZE1_RESERVED_0__MASK 0xc0000000
+#define CNA_DATA_SIZE1_RESERVED_0__SHIFT 30
+static inline uint32_t CNA_DATA_SIZE1_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE1_RESERVED_0__SHIFT) & CNA_DATA_SIZE1_RESERVED_0__MASK;
+}
+#define CNA_DATA_SIZE1_DATAIN_CHANNEL_REAL__MASK 0x3fff0000
+#define CNA_DATA_SIZE1_DATAIN_CHANNEL_REAL__SHIFT 16
+static inline uint32_t CNA_DATA_SIZE1_DATAIN_CHANNEL_REAL(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE1_DATAIN_CHANNEL_REAL__SHIFT) & CNA_DATA_SIZE1_DATAIN_CHANNEL_REAL__MASK;
+}
+#define CNA_DATA_SIZE1_DATAIN_CHANNEL__MASK 0x0000ffff
+#define CNA_DATA_SIZE1_DATAIN_CHANNEL__SHIFT 0
+static inline uint32_t CNA_DATA_SIZE1_DATAIN_CHANNEL(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE1_DATAIN_CHANNEL__SHIFT) & CNA_DATA_SIZE1_DATAIN_CHANNEL__MASK;
+}
+
+#define REG_CNA_DATA_SIZE2 0x00001028
+#define CNA_DATA_SIZE2_RESERVED_0__MASK 0xfffff800
+#define CNA_DATA_SIZE2_RESERVED_0__SHIFT 11
+static inline uint32_t CNA_DATA_SIZE2_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE2_RESERVED_0__SHIFT) & CNA_DATA_SIZE2_RESERVED_0__MASK;
+}
+#define CNA_DATA_SIZE2_DATAOUT_WIDTH__MASK 0x000007ff
+#define CNA_DATA_SIZE2_DATAOUT_WIDTH__SHIFT 0
+static inline uint32_t CNA_DATA_SIZE2_DATAOUT_WIDTH(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE2_DATAOUT_WIDTH__SHIFT) & CNA_DATA_SIZE2_DATAOUT_WIDTH__MASK;
+}
+
+#define REG_CNA_DATA_SIZE3 0x0000102c
+#define CNA_DATA_SIZE3_RESERVED_0__MASK 0xff000000
+#define CNA_DATA_SIZE3_RESERVED_0__SHIFT 24
+static inline uint32_t CNA_DATA_SIZE3_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE3_RESERVED_0__SHIFT) & CNA_DATA_SIZE3_RESERVED_0__MASK;
+}
+#define CNA_DATA_SIZE3_SURF_MODE__MASK 0x00c00000
+#define CNA_DATA_SIZE3_SURF_MODE__SHIFT 22
+static inline uint32_t CNA_DATA_SIZE3_SURF_MODE(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE3_SURF_MODE__SHIFT) & CNA_DATA_SIZE3_SURF_MODE__MASK;
+}
+#define CNA_DATA_SIZE3_DATAOUT_ATOMICS__MASK 0x003fffff
+#define CNA_DATA_SIZE3_DATAOUT_ATOMICS__SHIFT 0
+static inline uint32_t CNA_DATA_SIZE3_DATAOUT_ATOMICS(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE3_DATAOUT_ATOMICS__SHIFT) & CNA_DATA_SIZE3_DATAOUT_ATOMICS__MASK;
+}
+
+#define REG_CNA_WEIGHT_SIZE0 0x00001030
+#define CNA_WEIGHT_SIZE0_WEIGHT_BYTES__MASK 0xffffffff
+#define CNA_WEIGHT_SIZE0_WEIGHT_BYTES__SHIFT 0
+static inline uint32_t CNA_WEIGHT_SIZE0_WEIGHT_BYTES(uint32_t val)
+{
+ return ((val) << CNA_WEIGHT_SIZE0_WEIGHT_BYTES__SHIFT) & CNA_WEIGHT_SIZE0_WEIGHT_BYTES__MASK;
+}
+
+#define REG_CNA_WEIGHT_SIZE1 0x00001034
+#define CNA_WEIGHT_SIZE1_RESERVED_0__MASK 0xfff80000
+#define CNA_WEIGHT_SIZE1_RESERVED_0__SHIFT 19
+static inline uint32_t CNA_WEIGHT_SIZE1_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_WEIGHT_SIZE1_RESERVED_0__SHIFT) & CNA_WEIGHT_SIZE1_RESERVED_0__MASK;
+}
+#define CNA_WEIGHT_SIZE1_WEIGHT_BYTES_PER_KERNEL__MASK 0x0007ffff
+#define CNA_WEIGHT_SIZE1_WEIGHT_BYTES_PER_KERNEL__SHIFT 0
+static inline uint32_t CNA_WEIGHT_SIZE1_WEIGHT_BYTES_PER_KERNEL(uint32_t val)
+{
+ return ((val) << CNA_WEIGHT_SIZE1_WEIGHT_BYTES_PER_KERNEL__SHIFT) & CNA_WEIGHT_SIZE1_WEIGHT_BYTES_PER_KERNEL__MASK;
+}
+
+#define REG_CNA_WEIGHT_SIZE2 0x00001038
+#define CNA_WEIGHT_SIZE2_RESERVED_0__MASK 0xe0000000
+#define CNA_WEIGHT_SIZE2_RESERVED_0__SHIFT 29
+static inline uint32_t CNA_WEIGHT_SIZE2_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_WEIGHT_SIZE2_RESERVED_0__SHIFT) & CNA_WEIGHT_SIZE2_RESERVED_0__MASK;
+}
+#define CNA_WEIGHT_SIZE2_WEIGHT_WIDTH__MASK 0x1f000000
+#define CNA_WEIGHT_SIZE2_WEIGHT_WIDTH__SHIFT 24
+static inline uint32_t CNA_WEIGHT_SIZE2_WEIGHT_WIDTH(uint32_t val)
+{
+ return ((val) << CNA_WEIGHT_SIZE2_WEIGHT_WIDTH__SHIFT) & CNA_WEIGHT_SIZE2_WEIGHT_WIDTH__MASK;
+}
+#define CNA_WEIGHT_SIZE2_RESERVED_1__MASK 0x00e00000
+#define CNA_WEIGHT_SIZE2_RESERVED_1__SHIFT 21
+static inline uint32_t CNA_WEIGHT_SIZE2_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_WEIGHT_SIZE2_RESERVED_1__SHIFT) & CNA_WEIGHT_SIZE2_RESERVED_1__MASK;
+}
+#define CNA_WEIGHT_SIZE2_WEIGHT_HEIGHT__MASK 0x001f0000
+#define CNA_WEIGHT_SIZE2_WEIGHT_HEIGHT__SHIFT 16
+static inline uint32_t CNA_WEIGHT_SIZE2_WEIGHT_HEIGHT(uint32_t val)
+{
+ return ((val) << CNA_WEIGHT_SIZE2_WEIGHT_HEIGHT__SHIFT) & CNA_WEIGHT_SIZE2_WEIGHT_HEIGHT__MASK;
+}
+#define CNA_WEIGHT_SIZE2_RESERVED_2__MASK 0x0000c000
+#define CNA_WEIGHT_SIZE2_RESERVED_2__SHIFT 14
+static inline uint32_t CNA_WEIGHT_SIZE2_RESERVED_2(uint32_t val)
+{
+ return ((val) << CNA_WEIGHT_SIZE2_RESERVED_2__SHIFT) & CNA_WEIGHT_SIZE2_RESERVED_2__MASK;
+}
+#define CNA_WEIGHT_SIZE2_WEIGHT_KERNELS__MASK 0x00003fff
+#define CNA_WEIGHT_SIZE2_WEIGHT_KERNELS__SHIFT 0
+static inline uint32_t CNA_WEIGHT_SIZE2_WEIGHT_KERNELS(uint32_t val)
+{
+ return ((val) << CNA_WEIGHT_SIZE2_WEIGHT_KERNELS__SHIFT) & CNA_WEIGHT_SIZE2_WEIGHT_KERNELS__MASK;
+}
+
+#define REG_CNA_CBUF_CON0 0x00001040
+#define CNA_CBUF_CON0_RESERVED_0__MASK 0xffffc000
+#define CNA_CBUF_CON0_RESERVED_0__SHIFT 14
+static inline uint32_t CNA_CBUF_CON0_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_CBUF_CON0_RESERVED_0__SHIFT) & CNA_CBUF_CON0_RESERVED_0__MASK;
+}
+#define CNA_CBUF_CON0_WEIGHT_REUSE__MASK 0x00002000
+#define CNA_CBUF_CON0_WEIGHT_REUSE__SHIFT 13
+static inline uint32_t CNA_CBUF_CON0_WEIGHT_REUSE(uint32_t val)
+{
+ return ((val) << CNA_CBUF_CON0_WEIGHT_REUSE__SHIFT) & CNA_CBUF_CON0_WEIGHT_REUSE__MASK;
+}
+#define CNA_CBUF_CON0_DATA_REUSE__MASK 0x00001000
+#define CNA_CBUF_CON0_DATA_REUSE__SHIFT 12
+static inline uint32_t CNA_CBUF_CON0_DATA_REUSE(uint32_t val)
+{
+ return ((val) << CNA_CBUF_CON0_DATA_REUSE__SHIFT) & CNA_CBUF_CON0_DATA_REUSE__MASK;
+}
+#define CNA_CBUF_CON0_RESERVED_1__MASK 0x00000800
+#define CNA_CBUF_CON0_RESERVED_1__SHIFT 11
+static inline uint32_t CNA_CBUF_CON0_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_CBUF_CON0_RESERVED_1__SHIFT) & CNA_CBUF_CON0_RESERVED_1__MASK;
+}
+#define CNA_CBUF_CON0_FC_DATA_BANK__MASK 0x00000700
+#define CNA_CBUF_CON0_FC_DATA_BANK__SHIFT 8
+static inline uint32_t CNA_CBUF_CON0_FC_DATA_BANK(uint32_t val)
+{
+ return ((val) << CNA_CBUF_CON0_FC_DATA_BANK__SHIFT) & CNA_CBUF_CON0_FC_DATA_BANK__MASK;
+}
+#define CNA_CBUF_CON0_WEIGHT_BANK__MASK 0x000000f0
+#define CNA_CBUF_CON0_WEIGHT_BANK__SHIFT 4
+static inline uint32_t CNA_CBUF_CON0_WEIGHT_BANK(uint32_t val)
+{
+ return ((val) << CNA_CBUF_CON0_WEIGHT_BANK__SHIFT) & CNA_CBUF_CON0_WEIGHT_BANK__MASK;
+}
+#define CNA_CBUF_CON0_DATA_BANK__MASK 0x0000000f
+#define CNA_CBUF_CON0_DATA_BANK__SHIFT 0
+static inline uint32_t CNA_CBUF_CON0_DATA_BANK(uint32_t val)
+{
+ return ((val) << CNA_CBUF_CON0_DATA_BANK__SHIFT) & CNA_CBUF_CON0_DATA_BANK__MASK;
+}
+
+#define REG_CNA_CBUF_CON1 0x00001044
+#define CNA_CBUF_CON1_RESERVED_0__MASK 0xffffc000
+#define CNA_CBUF_CON1_RESERVED_0__SHIFT 14
+static inline uint32_t CNA_CBUF_CON1_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_CBUF_CON1_RESERVED_0__SHIFT) & CNA_CBUF_CON1_RESERVED_0__MASK;
+}
+#define CNA_CBUF_CON1_DATA_ENTRIES__MASK 0x00003fff
+#define CNA_CBUF_CON1_DATA_ENTRIES__SHIFT 0
+static inline uint32_t CNA_CBUF_CON1_DATA_ENTRIES(uint32_t val)
+{
+ return ((val) << CNA_CBUF_CON1_DATA_ENTRIES__SHIFT) & CNA_CBUF_CON1_DATA_ENTRIES__MASK;
+}
+
+#define REG_CNA_CVT_CON0 0x0000104c
+#define CNA_CVT_CON0_RESERVED_0__MASK 0xf0000000
+#define CNA_CVT_CON0_RESERVED_0__SHIFT 28
+static inline uint32_t CNA_CVT_CON0_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON0_RESERVED_0__SHIFT) & CNA_CVT_CON0_RESERVED_0__MASK;
+}
+#define CNA_CVT_CON0_CVT_TRUNCATE_3__MASK 0x0fc00000
+#define CNA_CVT_CON0_CVT_TRUNCATE_3__SHIFT 22
+static inline uint32_t CNA_CVT_CON0_CVT_TRUNCATE_3(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON0_CVT_TRUNCATE_3__SHIFT) & CNA_CVT_CON0_CVT_TRUNCATE_3__MASK;
+}
+#define CNA_CVT_CON0_CVT_TRUNCATE_2__MASK 0x003f0000
+#define CNA_CVT_CON0_CVT_TRUNCATE_2__SHIFT 16
+static inline uint32_t CNA_CVT_CON0_CVT_TRUNCATE_2(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON0_CVT_TRUNCATE_2__SHIFT) & CNA_CVT_CON0_CVT_TRUNCATE_2__MASK;
+}
+#define CNA_CVT_CON0_CVT_TRUNCATE_1__MASK 0x0000fc00
+#define CNA_CVT_CON0_CVT_TRUNCATE_1__SHIFT 10
+static inline uint32_t CNA_CVT_CON0_CVT_TRUNCATE_1(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON0_CVT_TRUNCATE_1__SHIFT) & CNA_CVT_CON0_CVT_TRUNCATE_1__MASK;
+}
+#define CNA_CVT_CON0_CVT_TRUNCATE_0__MASK 0x000003f0
+#define CNA_CVT_CON0_CVT_TRUNCATE_0__SHIFT 4
+static inline uint32_t CNA_CVT_CON0_CVT_TRUNCATE_0(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON0_CVT_TRUNCATE_0__SHIFT) & CNA_CVT_CON0_CVT_TRUNCATE_0__MASK;
+}
+#define CNA_CVT_CON0_DATA_SIGN__MASK 0x00000008
+#define CNA_CVT_CON0_DATA_SIGN__SHIFT 3
+static inline uint32_t CNA_CVT_CON0_DATA_SIGN(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON0_DATA_SIGN__SHIFT) & CNA_CVT_CON0_DATA_SIGN__MASK;
+}
+#define CNA_CVT_CON0_ROUND_TYPE__MASK 0x00000004
+#define CNA_CVT_CON0_ROUND_TYPE__SHIFT 2
+static inline uint32_t CNA_CVT_CON0_ROUND_TYPE(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON0_ROUND_TYPE__SHIFT) & CNA_CVT_CON0_ROUND_TYPE__MASK;
+}
+#define CNA_CVT_CON0_CVT_TYPE__MASK 0x00000002
+#define CNA_CVT_CON0_CVT_TYPE__SHIFT 1
+static inline uint32_t CNA_CVT_CON0_CVT_TYPE(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON0_CVT_TYPE__SHIFT) & CNA_CVT_CON0_CVT_TYPE__MASK;
+}
+#define CNA_CVT_CON0_CVT_BYPASS__MASK 0x00000001
+#define CNA_CVT_CON0_CVT_BYPASS__SHIFT 0
+static inline uint32_t CNA_CVT_CON0_CVT_BYPASS(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON0_CVT_BYPASS__SHIFT) & CNA_CVT_CON0_CVT_BYPASS__MASK;
+}
+
+#define REG_CNA_CVT_CON1 0x00001050
+#define CNA_CVT_CON1_CVT_SCALE0__MASK 0xffff0000
+#define CNA_CVT_CON1_CVT_SCALE0__SHIFT 16
+static inline uint32_t CNA_CVT_CON1_CVT_SCALE0(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON1_CVT_SCALE0__SHIFT) & CNA_CVT_CON1_CVT_SCALE0__MASK;
+}
+#define CNA_CVT_CON1_CVT_OFFSET0__MASK 0x0000ffff
+#define CNA_CVT_CON1_CVT_OFFSET0__SHIFT 0
+static inline uint32_t CNA_CVT_CON1_CVT_OFFSET0(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON1_CVT_OFFSET0__SHIFT) & CNA_CVT_CON1_CVT_OFFSET0__MASK;
+}
+
+#define REG_CNA_CVT_CON2 0x00001054
+#define CNA_CVT_CON2_CVT_SCALE1__MASK 0xffff0000
+#define CNA_CVT_CON2_CVT_SCALE1__SHIFT 16
+static inline uint32_t CNA_CVT_CON2_CVT_SCALE1(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON2_CVT_SCALE1__SHIFT) & CNA_CVT_CON2_CVT_SCALE1__MASK;
+}
+#define CNA_CVT_CON2_CVT_OFFSET1__MASK 0x0000ffff
+#define CNA_CVT_CON2_CVT_OFFSET1__SHIFT 0
+static inline uint32_t CNA_CVT_CON2_CVT_OFFSET1(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON2_CVT_OFFSET1__SHIFT) & CNA_CVT_CON2_CVT_OFFSET1__MASK;
+}
+
+#define REG_CNA_CVT_CON3 0x00001058
+#define CNA_CVT_CON3_CVT_SCALE2__MASK 0xffff0000
+#define CNA_CVT_CON3_CVT_SCALE2__SHIFT 16
+static inline uint32_t CNA_CVT_CON3_CVT_SCALE2(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON3_CVT_SCALE2__SHIFT) & CNA_CVT_CON3_CVT_SCALE2__MASK;
+}
+#define CNA_CVT_CON3_CVT_OFFSET2__MASK 0x0000ffff
+#define CNA_CVT_CON3_CVT_OFFSET2__SHIFT 0
+static inline uint32_t CNA_CVT_CON3_CVT_OFFSET2(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON3_CVT_OFFSET2__SHIFT) & CNA_CVT_CON3_CVT_OFFSET2__MASK;
+}
+
+#define REG_CNA_CVT_CON4 0x0000105c
+#define CNA_CVT_CON4_CVT_SCALE3__MASK 0xffff0000
+#define CNA_CVT_CON4_CVT_SCALE3__SHIFT 16
+static inline uint32_t CNA_CVT_CON4_CVT_SCALE3(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON4_CVT_SCALE3__SHIFT) & CNA_CVT_CON4_CVT_SCALE3__MASK;
+}
+#define CNA_CVT_CON4_CVT_OFFSET3__MASK 0x0000ffff
+#define CNA_CVT_CON4_CVT_OFFSET3__SHIFT 0
+static inline uint32_t CNA_CVT_CON4_CVT_OFFSET3(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON4_CVT_OFFSET3__SHIFT) & CNA_CVT_CON4_CVT_OFFSET3__MASK;
+}
+
+#define REG_CNA_FC_CON0 0x00001060
+#define CNA_FC_CON0_FC_SKIP_DATA__MASK 0xffff0000
+#define CNA_FC_CON0_FC_SKIP_DATA__SHIFT 16
+static inline uint32_t CNA_FC_CON0_FC_SKIP_DATA(uint32_t val)
+{
+ return ((val) << CNA_FC_CON0_FC_SKIP_DATA__SHIFT) & CNA_FC_CON0_FC_SKIP_DATA__MASK;
+}
+#define CNA_FC_CON0_RESERVED_0__MASK 0x0000fffe
+#define CNA_FC_CON0_RESERVED_0__SHIFT 1
+static inline uint32_t CNA_FC_CON0_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_FC_CON0_RESERVED_0__SHIFT) & CNA_FC_CON0_RESERVED_0__MASK;
+}
+#define CNA_FC_CON0_FC_SKIP_EN__MASK 0x00000001
+#define CNA_FC_CON0_FC_SKIP_EN__SHIFT 0
+static inline uint32_t CNA_FC_CON0_FC_SKIP_EN(uint32_t val)
+{
+ return ((val) << CNA_FC_CON0_FC_SKIP_EN__SHIFT) & CNA_FC_CON0_FC_SKIP_EN__MASK;
+}
+
+#define REG_CNA_FC_CON1 0x00001064
+#define CNA_FC_CON1_RESERVED_0__MASK 0xfffe0000
+#define CNA_FC_CON1_RESERVED_0__SHIFT 17
+static inline uint32_t CNA_FC_CON1_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_FC_CON1_RESERVED_0__SHIFT) & CNA_FC_CON1_RESERVED_0__MASK;
+}
+#define CNA_FC_CON1_DATA_OFFSET__MASK 0x0001ffff
+#define CNA_FC_CON1_DATA_OFFSET__SHIFT 0
+static inline uint32_t CNA_FC_CON1_DATA_OFFSET(uint32_t val)
+{
+ return ((val) << CNA_FC_CON1_DATA_OFFSET__SHIFT) & CNA_FC_CON1_DATA_OFFSET__MASK;
+}
+
+#define REG_CNA_PAD_CON0 0x00001068
+#define CNA_PAD_CON0_RESERVED_0__MASK 0xffffff00
+#define CNA_PAD_CON0_RESERVED_0__SHIFT 8
+static inline uint32_t CNA_PAD_CON0_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_PAD_CON0_RESERVED_0__SHIFT) & CNA_PAD_CON0_RESERVED_0__MASK;
+}
+#define CNA_PAD_CON0_PAD_LEFT__MASK 0x000000f0
+#define CNA_PAD_CON0_PAD_LEFT__SHIFT 4
+static inline uint32_t CNA_PAD_CON0_PAD_LEFT(uint32_t val)
+{
+ return ((val) << CNA_PAD_CON0_PAD_LEFT__SHIFT) & CNA_PAD_CON0_PAD_LEFT__MASK;
+}
+#define CNA_PAD_CON0_PAD_TOP__MASK 0x0000000f
+#define CNA_PAD_CON0_PAD_TOP__SHIFT 0
+static inline uint32_t CNA_PAD_CON0_PAD_TOP(uint32_t val)
+{
+ return ((val) << CNA_PAD_CON0_PAD_TOP__SHIFT) & CNA_PAD_CON0_PAD_TOP__MASK;
+}
+
+#define REG_CNA_FEATURE_DATA_ADDR 0x00001070
+#define CNA_FEATURE_DATA_ADDR_FEATURE_BASE_ADDR__MASK 0xffffffff
+#define CNA_FEATURE_DATA_ADDR_FEATURE_BASE_ADDR__SHIFT 0
+static inline uint32_t CNA_FEATURE_DATA_ADDR_FEATURE_BASE_ADDR(uint32_t val)
+{
+ return ((val) << CNA_FEATURE_DATA_ADDR_FEATURE_BASE_ADDR__SHIFT) & CNA_FEATURE_DATA_ADDR_FEATURE_BASE_ADDR__MASK;
+}
+
+#define REG_CNA_FC_CON2 0x00001074
+#define CNA_FC_CON2_RESERVED_0__MASK 0xfffe0000
+#define CNA_FC_CON2_RESERVED_0__SHIFT 17
+static inline uint32_t CNA_FC_CON2_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_FC_CON2_RESERVED_0__SHIFT) & CNA_FC_CON2_RESERVED_0__MASK;
+}
+#define CNA_FC_CON2_WEIGHT_OFFSET__MASK 0x0001ffff
+#define CNA_FC_CON2_WEIGHT_OFFSET__SHIFT 0
+static inline uint32_t CNA_FC_CON2_WEIGHT_OFFSET(uint32_t val)
+{
+ return ((val) << CNA_FC_CON2_WEIGHT_OFFSET__SHIFT) & CNA_FC_CON2_WEIGHT_OFFSET__MASK;
+}
+
+#define REG_CNA_DMA_CON0 0x00001078
+#define CNA_DMA_CON0_OV4K_BYPASS__MASK 0x80000000
+#define CNA_DMA_CON0_OV4K_BYPASS__SHIFT 31
+static inline uint32_t CNA_DMA_CON0_OV4K_BYPASS(uint32_t val)
+{
+ return ((val) << CNA_DMA_CON0_OV4K_BYPASS__SHIFT) & CNA_DMA_CON0_OV4K_BYPASS__MASK;
+}
+#define CNA_DMA_CON0_RESERVED_0__MASK 0x7ff00000
+#define CNA_DMA_CON0_RESERVED_0__SHIFT 20
+static inline uint32_t CNA_DMA_CON0_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_DMA_CON0_RESERVED_0__SHIFT) & CNA_DMA_CON0_RESERVED_0__MASK;
+}
+#define CNA_DMA_CON0_WEIGHT_BURST_LEN__MASK 0x000f0000
+#define CNA_DMA_CON0_WEIGHT_BURST_LEN__SHIFT 16
+static inline uint32_t CNA_DMA_CON0_WEIGHT_BURST_LEN(uint32_t val)
+{
+ return ((val) << CNA_DMA_CON0_WEIGHT_BURST_LEN__SHIFT) & CNA_DMA_CON0_WEIGHT_BURST_LEN__MASK;
+}
+#define CNA_DMA_CON0_RESERVED_1__MASK 0x0000fff0
+#define CNA_DMA_CON0_RESERVED_1__SHIFT 4
+static inline uint32_t CNA_DMA_CON0_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_DMA_CON0_RESERVED_1__SHIFT) & CNA_DMA_CON0_RESERVED_1__MASK;
+}
+#define CNA_DMA_CON0_DATA_BURST_LEN__MASK 0x0000000f
+#define CNA_DMA_CON0_DATA_BURST_LEN__SHIFT 0
+static inline uint32_t CNA_DMA_CON0_DATA_BURST_LEN(uint32_t val)
+{
+ return ((val) << CNA_DMA_CON0_DATA_BURST_LEN__SHIFT) & CNA_DMA_CON0_DATA_BURST_LEN__MASK;
+}
+
+#define REG_CNA_DMA_CON1 0x0000107c
+#define CNA_DMA_CON1_RESERVED_0__MASK 0xf0000000
+#define CNA_DMA_CON1_RESERVED_0__SHIFT 28
+static inline uint32_t CNA_DMA_CON1_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_DMA_CON1_RESERVED_0__SHIFT) & CNA_DMA_CON1_RESERVED_0__MASK;
+}
+#define CNA_DMA_CON1_LINE_STRIDE__MASK 0x0fffffff
+#define CNA_DMA_CON1_LINE_STRIDE__SHIFT 0
+static inline uint32_t CNA_DMA_CON1_LINE_STRIDE(uint32_t val)
+{
+ return ((val) << CNA_DMA_CON1_LINE_STRIDE__SHIFT) & CNA_DMA_CON1_LINE_STRIDE__MASK;
+}
+
+#define REG_CNA_DMA_CON2 0x00001080
+#define CNA_DMA_CON2_RESERVED_0__MASK 0xf0000000
+#define CNA_DMA_CON2_RESERVED_0__SHIFT 28
+static inline uint32_t CNA_DMA_CON2_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_DMA_CON2_RESERVED_0__SHIFT) & CNA_DMA_CON2_RESERVED_0__MASK;
+}
+#define CNA_DMA_CON2_SURF_STRIDE__MASK 0x0fffffff
+#define CNA_DMA_CON2_SURF_STRIDE__SHIFT 0
+static inline uint32_t CNA_DMA_CON2_SURF_STRIDE(uint32_t val)
+{
+ return ((val) << CNA_DMA_CON2_SURF_STRIDE__SHIFT) & CNA_DMA_CON2_SURF_STRIDE__MASK;
+}
+
+#define REG_CNA_FC_DATA_SIZE0 0x00001084
+#define CNA_FC_DATA_SIZE0_RESERVED_0__MASK 0xc0000000
+#define CNA_FC_DATA_SIZE0_RESERVED_0__SHIFT 30
+static inline uint32_t CNA_FC_DATA_SIZE0_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_FC_DATA_SIZE0_RESERVED_0__SHIFT) & CNA_FC_DATA_SIZE0_RESERVED_0__MASK;
+}
+#define CNA_FC_DATA_SIZE0_DMA_WIDTH__MASK 0x3fff0000
+#define CNA_FC_DATA_SIZE0_DMA_WIDTH__SHIFT 16
+static inline uint32_t CNA_FC_DATA_SIZE0_DMA_WIDTH(uint32_t val)
+{
+ return ((val) << CNA_FC_DATA_SIZE0_DMA_WIDTH__SHIFT) & CNA_FC_DATA_SIZE0_DMA_WIDTH__MASK;
+}
+#define CNA_FC_DATA_SIZE0_RESERVED_1__MASK 0x0000f800
+#define CNA_FC_DATA_SIZE0_RESERVED_1__SHIFT 11
+static inline uint32_t CNA_FC_DATA_SIZE0_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_FC_DATA_SIZE0_RESERVED_1__SHIFT) & CNA_FC_DATA_SIZE0_RESERVED_1__MASK;
+}
+#define CNA_FC_DATA_SIZE0_DMA_HEIGHT__MASK 0x000007ff
+#define CNA_FC_DATA_SIZE0_DMA_HEIGHT__SHIFT 0
+static inline uint32_t CNA_FC_DATA_SIZE0_DMA_HEIGHT(uint32_t val)
+{
+ return ((val) << CNA_FC_DATA_SIZE0_DMA_HEIGHT__SHIFT) & CNA_FC_DATA_SIZE0_DMA_HEIGHT__MASK;
+}
+
+#define REG_CNA_FC_DATA_SIZE1 0x00001088
+#define CNA_FC_DATA_SIZE1_RESERVED_0__MASK 0xffff0000
+#define CNA_FC_DATA_SIZE1_RESERVED_0__SHIFT 16
+static inline uint32_t CNA_FC_DATA_SIZE1_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_FC_DATA_SIZE1_RESERVED_0__SHIFT) & CNA_FC_DATA_SIZE1_RESERVED_0__MASK;
+}
+#define CNA_FC_DATA_SIZE1_DMA_CHANNEL__MASK 0x0000ffff
+#define CNA_FC_DATA_SIZE1_DMA_CHANNEL__SHIFT 0
+static inline uint32_t CNA_FC_DATA_SIZE1_DMA_CHANNEL(uint32_t val)
+{
+ return ((val) << CNA_FC_DATA_SIZE1_DMA_CHANNEL__SHIFT) & CNA_FC_DATA_SIZE1_DMA_CHANNEL__MASK;
+}
+
+#define REG_CNA_CLK_GATE 0x00001090
+#define CNA_CLK_GATE_RESERVED_0__MASK 0xffffffe0
+#define CNA_CLK_GATE_RESERVED_0__SHIFT 5
+static inline uint32_t CNA_CLK_GATE_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_CLK_GATE_RESERVED_0__SHIFT) & CNA_CLK_GATE_RESERVED_0__MASK;
+}
+#define CNA_CLK_GATE_CBUF_CS_DISABLE_CLKGATE__MASK 0x00000010
+#define CNA_CLK_GATE_CBUF_CS_DISABLE_CLKGATE__SHIFT 4
+static inline uint32_t CNA_CLK_GATE_CBUF_CS_DISABLE_CLKGATE(uint32_t val)
+{
+ return ((val) << CNA_CLK_GATE_CBUF_CS_DISABLE_CLKGATE__SHIFT) & CNA_CLK_GATE_CBUF_CS_DISABLE_CLKGATE__MASK;
+}
+#define CNA_CLK_GATE_RESERVED_1__MASK 0x00000008
+#define CNA_CLK_GATE_RESERVED_1__SHIFT 3
+static inline uint32_t CNA_CLK_GATE_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_CLK_GATE_RESERVED_1__SHIFT) & CNA_CLK_GATE_RESERVED_1__MASK;
+}
+#define CNA_CLK_GATE_CSC_DISABLE_CLKGATE__MASK 0x00000004
+#define CNA_CLK_GATE_CSC_DISABLE_CLKGATE__SHIFT 2
+static inline uint32_t CNA_CLK_GATE_CSC_DISABLE_CLKGATE(uint32_t val)
+{
+ return ((val) << CNA_CLK_GATE_CSC_DISABLE_CLKGATE__SHIFT) & CNA_CLK_GATE_CSC_DISABLE_CLKGATE__MASK;
+}
+#define CNA_CLK_GATE_CNA_WEIGHT_DISABLE_CLKGATE__MASK 0x00000002
+#define CNA_CLK_GATE_CNA_WEIGHT_DISABLE_CLKGATE__SHIFT 1
+static inline uint32_t CNA_CLK_GATE_CNA_WEIGHT_DISABLE_CLKGATE(uint32_t val)
+{
+ return ((val) << CNA_CLK_GATE_CNA_WEIGHT_DISABLE_CLKGATE__SHIFT) & CNA_CLK_GATE_CNA_WEIGHT_DISABLE_CLKGATE__MASK;
+}
+#define CNA_CLK_GATE_CNA_FEATURE_DISABLE_CLKGATE__MASK 0x00000001
+#define CNA_CLK_GATE_CNA_FEATURE_DISABLE_CLKGATE__SHIFT 0
+static inline uint32_t CNA_CLK_GATE_CNA_FEATURE_DISABLE_CLKGATE(uint32_t val)
+{
+ return ((val) << CNA_CLK_GATE_CNA_FEATURE_DISABLE_CLKGATE__SHIFT) & CNA_CLK_GATE_CNA_FEATURE_DISABLE_CLKGATE__MASK;
+}
+
+#define REG_CNA_DCOMP_CTRL 0x00001100
+#define CNA_DCOMP_CTRL_RESERVED_0__MASK 0xfffffff0
+#define CNA_DCOMP_CTRL_RESERVED_0__SHIFT 4
+static inline uint32_t CNA_DCOMP_CTRL_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_CTRL_RESERVED_0__SHIFT) & CNA_DCOMP_CTRL_RESERVED_0__MASK;
+}
+#define CNA_DCOMP_CTRL_WT_DEC_BYPASS__MASK 0x00000008
+#define CNA_DCOMP_CTRL_WT_DEC_BYPASS__SHIFT 3
+static inline uint32_t CNA_DCOMP_CTRL_WT_DEC_BYPASS(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_CTRL_WT_DEC_BYPASS__SHIFT) & CNA_DCOMP_CTRL_WT_DEC_BYPASS__MASK;
+}
+#define CNA_DCOMP_CTRL_DECOMP_CONTROL__MASK 0x00000007
+#define CNA_DCOMP_CTRL_DECOMP_CONTROL__SHIFT 0
+static inline uint32_t CNA_DCOMP_CTRL_DECOMP_CONTROL(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_CTRL_DECOMP_CONTROL__SHIFT) & CNA_DCOMP_CTRL_DECOMP_CONTROL__MASK;
+}
+
+#define REG_CNA_DCOMP_REGNUM 0x00001104
+#define CNA_DCOMP_REGNUM_DCOMP_REGNUM__MASK 0xffffffff
+#define CNA_DCOMP_REGNUM_DCOMP_REGNUM__SHIFT 0
+static inline uint32_t CNA_DCOMP_REGNUM_DCOMP_REGNUM(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_REGNUM_DCOMP_REGNUM__SHIFT) & CNA_DCOMP_REGNUM_DCOMP_REGNUM__MASK;
+}
+
+#define REG_CNA_DCOMP_ADDR0 0x00001110
+#define CNA_DCOMP_ADDR0_DECOMPRESS_ADDR0__MASK 0xffffffff
+#define CNA_DCOMP_ADDR0_DECOMPRESS_ADDR0__SHIFT 0
+static inline uint32_t CNA_DCOMP_ADDR0_DECOMPRESS_ADDR0(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_ADDR0_DECOMPRESS_ADDR0__SHIFT) & CNA_DCOMP_ADDR0_DECOMPRESS_ADDR0__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT0 0x00001140
+#define CNA_DCOMP_AMOUNT0_DCOMP_AMOUNT0__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT0_DCOMP_AMOUNT0__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT0_DCOMP_AMOUNT0(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT0_DCOMP_AMOUNT0__SHIFT) & CNA_DCOMP_AMOUNT0_DCOMP_AMOUNT0__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT1 0x00001144
+#define CNA_DCOMP_AMOUNT1_DCOMP_AMOUNT1__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT1_DCOMP_AMOUNT1__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT1_DCOMP_AMOUNT1(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT1_DCOMP_AMOUNT1__SHIFT) & CNA_DCOMP_AMOUNT1_DCOMP_AMOUNT1__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT2 0x00001148
+#define CNA_DCOMP_AMOUNT2_DCOMP_AMOUNT2__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT2_DCOMP_AMOUNT2__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT2_DCOMP_AMOUNT2(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT2_DCOMP_AMOUNT2__SHIFT) & CNA_DCOMP_AMOUNT2_DCOMP_AMOUNT2__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT3 0x0000114c
+#define CNA_DCOMP_AMOUNT3_DCOMP_AMOUNT3__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT3_DCOMP_AMOUNT3__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT3_DCOMP_AMOUNT3(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT3_DCOMP_AMOUNT3__SHIFT) & CNA_DCOMP_AMOUNT3_DCOMP_AMOUNT3__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT4 0x00001150
+#define CNA_DCOMP_AMOUNT4_DCOMP_AMOUNT4__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT4_DCOMP_AMOUNT4__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT4_DCOMP_AMOUNT4(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT4_DCOMP_AMOUNT4__SHIFT) & CNA_DCOMP_AMOUNT4_DCOMP_AMOUNT4__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT5 0x00001154
+#define CNA_DCOMP_AMOUNT5_DCOMP_AMOUNT5__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT5_DCOMP_AMOUNT5__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT5_DCOMP_AMOUNT5(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT5_DCOMP_AMOUNT5__SHIFT) & CNA_DCOMP_AMOUNT5_DCOMP_AMOUNT5__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT6 0x00001158
+#define CNA_DCOMP_AMOUNT6_DCOMP_AMOUNT6__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT6_DCOMP_AMOUNT6__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT6_DCOMP_AMOUNT6(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT6_DCOMP_AMOUNT6__SHIFT) & CNA_DCOMP_AMOUNT6_DCOMP_AMOUNT6__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT7 0x0000115c
+#define CNA_DCOMP_AMOUNT7_DCOMP_AMOUNT7__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT7_DCOMP_AMOUNT7__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT7_DCOMP_AMOUNT7(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT7_DCOMP_AMOUNT7__SHIFT) & CNA_DCOMP_AMOUNT7_DCOMP_AMOUNT7__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT8 0x00001160
+#define CNA_DCOMP_AMOUNT8_DCOMP_AMOUNT8__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT8_DCOMP_AMOUNT8__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT8_DCOMP_AMOUNT8(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT8_DCOMP_AMOUNT8__SHIFT) & CNA_DCOMP_AMOUNT8_DCOMP_AMOUNT8__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT9 0x00001164
+#define CNA_DCOMP_AMOUNT9_DCOMP_AMOUNT9__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT9_DCOMP_AMOUNT9__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT9_DCOMP_AMOUNT9(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT9_DCOMP_AMOUNT9__SHIFT) & CNA_DCOMP_AMOUNT9_DCOMP_AMOUNT9__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT10 0x00001168
+#define CNA_DCOMP_AMOUNT10_DCOMP_AMOUNT10__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT10_DCOMP_AMOUNT10__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT10_DCOMP_AMOUNT10(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT10_DCOMP_AMOUNT10__SHIFT) & CNA_DCOMP_AMOUNT10_DCOMP_AMOUNT10__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT11 0x0000116c
+#define CNA_DCOMP_AMOUNT11_DCOMP_AMOUNT11__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT11_DCOMP_AMOUNT11__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT11_DCOMP_AMOUNT11(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT11_DCOMP_AMOUNT11__SHIFT) & CNA_DCOMP_AMOUNT11_DCOMP_AMOUNT11__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT12 0x00001170
+#define CNA_DCOMP_AMOUNT12_DCOMP_AMOUNT12__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT12_DCOMP_AMOUNT12__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT12_DCOMP_AMOUNT12(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT12_DCOMP_AMOUNT12__SHIFT) & CNA_DCOMP_AMOUNT12_DCOMP_AMOUNT12__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT13 0x00001174
+#define CNA_DCOMP_AMOUNT13_DCOMP_AMOUNT13__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT13_DCOMP_AMOUNT13__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT13_DCOMP_AMOUNT13(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT13_DCOMP_AMOUNT13__SHIFT) & CNA_DCOMP_AMOUNT13_DCOMP_AMOUNT13__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT14 0x00001178
+#define CNA_DCOMP_AMOUNT14_DCOMP_AMOUNT14__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT14_DCOMP_AMOUNT14__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT14_DCOMP_AMOUNT14(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT14_DCOMP_AMOUNT14__SHIFT) & CNA_DCOMP_AMOUNT14_DCOMP_AMOUNT14__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT15 0x0000117c
+#define CNA_DCOMP_AMOUNT15_DCOMP_AMOUNT15__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT15_DCOMP_AMOUNT15__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT15_DCOMP_AMOUNT15(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT15_DCOMP_AMOUNT15__SHIFT) & CNA_DCOMP_AMOUNT15_DCOMP_AMOUNT15__MASK;
+}
+
+#define REG_CNA_CVT_CON5 0x00001180
+#define CNA_CVT_CON5_PER_CHANNEL_CVT_EN__MASK 0xffffffff
+#define CNA_CVT_CON5_PER_CHANNEL_CVT_EN__SHIFT 0
+static inline uint32_t CNA_CVT_CON5_PER_CHANNEL_CVT_EN(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON5_PER_CHANNEL_CVT_EN__SHIFT) & CNA_CVT_CON5_PER_CHANNEL_CVT_EN__MASK;
+}
+
+#define REG_CNA_PAD_CON1 0x00001184
+#define CNA_PAD_CON1_PAD_VALUE__MASK 0xffffffff
+#define CNA_PAD_CON1_PAD_VALUE__SHIFT 0
+static inline uint32_t CNA_PAD_CON1_PAD_VALUE(uint32_t val)
+{
+ return ((val) << CNA_PAD_CON1_PAD_VALUE__SHIFT) & CNA_PAD_CON1_PAD_VALUE__MASK;
+}
+
+#define REG_CORE_S_STATUS 0x00003000
+#define CORE_S_STATUS_RESERVED_0__MASK 0xfffc0000
+#define CORE_S_STATUS_RESERVED_0__SHIFT 18
+static inline uint32_t CORE_S_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << CORE_S_STATUS_RESERVED_0__SHIFT) & CORE_S_STATUS_RESERVED_0__MASK;
+}
+#define CORE_S_STATUS_STATUS_1__MASK 0x00030000
+#define CORE_S_STATUS_STATUS_1__SHIFT 16
+static inline uint32_t CORE_S_STATUS_STATUS_1(uint32_t val)
+{
+ return ((val) << CORE_S_STATUS_STATUS_1__SHIFT) & CORE_S_STATUS_STATUS_1__MASK;
+}
+#define CORE_S_STATUS_RESERVED_1__MASK 0x0000fffc
+#define CORE_S_STATUS_RESERVED_1__SHIFT 2
+static inline uint32_t CORE_S_STATUS_RESERVED_1(uint32_t val)
+{
+ return ((val) << CORE_S_STATUS_RESERVED_1__SHIFT) & CORE_S_STATUS_RESERVED_1__MASK;
+}
+#define CORE_S_STATUS_STATUS_0__MASK 0x00000003
+#define CORE_S_STATUS_STATUS_0__SHIFT 0
+static inline uint32_t CORE_S_STATUS_STATUS_0(uint32_t val)
+{
+ return ((val) << CORE_S_STATUS_STATUS_0__SHIFT) & CORE_S_STATUS_STATUS_0__MASK;
+}
+
+#define REG_CORE_S_POINTER 0x00003004
+#define CORE_S_POINTER_RESERVED_0__MASK 0xfffe0000
+#define CORE_S_POINTER_RESERVED_0__SHIFT 17
+static inline uint32_t CORE_S_POINTER_RESERVED_0(uint32_t val)
+{
+ return ((val) << CORE_S_POINTER_RESERVED_0__SHIFT) & CORE_S_POINTER_RESERVED_0__MASK;
+}
+#define CORE_S_POINTER_EXECUTER__MASK 0x00010000
+#define CORE_S_POINTER_EXECUTER__SHIFT 16
+static inline uint32_t CORE_S_POINTER_EXECUTER(uint32_t val)
+{
+ return ((val) << CORE_S_POINTER_EXECUTER__SHIFT) & CORE_S_POINTER_EXECUTER__MASK;
+}
+#define CORE_S_POINTER_RESERVED_1__MASK 0x0000ffc0
+#define CORE_S_POINTER_RESERVED_1__SHIFT 6
+static inline uint32_t CORE_S_POINTER_RESERVED_1(uint32_t val)
+{
+ return ((val) << CORE_S_POINTER_RESERVED_1__SHIFT) & CORE_S_POINTER_RESERVED_1__MASK;
+}
+#define CORE_S_POINTER_EXECUTER_PP_CLEAR__MASK 0x00000020
+#define CORE_S_POINTER_EXECUTER_PP_CLEAR__SHIFT 5
+static inline uint32_t CORE_S_POINTER_EXECUTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << CORE_S_POINTER_EXECUTER_PP_CLEAR__SHIFT) & CORE_S_POINTER_EXECUTER_PP_CLEAR__MASK;
+}
+#define CORE_S_POINTER_POINTER_PP_CLEAR__MASK 0x00000010
+#define CORE_S_POINTER_POINTER_PP_CLEAR__SHIFT 4
+static inline uint32_t CORE_S_POINTER_POINTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << CORE_S_POINTER_POINTER_PP_CLEAR__SHIFT) & CORE_S_POINTER_POINTER_PP_CLEAR__MASK;
+}
+#define CORE_S_POINTER_POINTER_PP_MODE__MASK 0x00000008
+#define CORE_S_POINTER_POINTER_PP_MODE__SHIFT 3
+static inline uint32_t CORE_S_POINTER_POINTER_PP_MODE(uint32_t val)
+{
+ return ((val) << CORE_S_POINTER_POINTER_PP_MODE__SHIFT) & CORE_S_POINTER_POINTER_PP_MODE__MASK;
+}
+#define CORE_S_POINTER_EXECUTER_PP_EN__MASK 0x00000004
+#define CORE_S_POINTER_EXECUTER_PP_EN__SHIFT 2
+static inline uint32_t CORE_S_POINTER_EXECUTER_PP_EN(uint32_t val)
+{
+ return ((val) << CORE_S_POINTER_EXECUTER_PP_EN__SHIFT) & CORE_S_POINTER_EXECUTER_PP_EN__MASK;
+}
+#define CORE_S_POINTER_POINTER_PP_EN__MASK 0x00000002
+#define CORE_S_POINTER_POINTER_PP_EN__SHIFT 1
+static inline uint32_t CORE_S_POINTER_POINTER_PP_EN(uint32_t val)
+{
+ return ((val) << CORE_S_POINTER_POINTER_PP_EN__SHIFT) & CORE_S_POINTER_POINTER_PP_EN__MASK;
+}
+#define CORE_S_POINTER_POINTER__MASK 0x00000001
+#define CORE_S_POINTER_POINTER__SHIFT 0
+static inline uint32_t CORE_S_POINTER_POINTER(uint32_t val)
+{
+ return ((val) << CORE_S_POINTER_POINTER__SHIFT) & CORE_S_POINTER_POINTER__MASK;
+}
+
+#define REG_CORE_OPERATION_ENABLE 0x00003008
+#define CORE_OPERATION_ENABLE_RESERVED_0__MASK 0xfffffffe
+#define CORE_OPERATION_ENABLE_RESERVED_0__SHIFT 1
+static inline uint32_t CORE_OPERATION_ENABLE_RESERVED_0(uint32_t val)
+{
+ return ((val) << CORE_OPERATION_ENABLE_RESERVED_0__SHIFT) & CORE_OPERATION_ENABLE_RESERVED_0__MASK;
+}
+#define CORE_OPERATION_ENABLE_OP_EN__MASK 0x00000001
+#define CORE_OPERATION_ENABLE_OP_EN__SHIFT 0
+static inline uint32_t CORE_OPERATION_ENABLE_OP_EN(uint32_t val)
+{
+ return ((val) << CORE_OPERATION_ENABLE_OP_EN__SHIFT) & CORE_OPERATION_ENABLE_OP_EN__MASK;
+}
+
+#define REG_CORE_MAC_GATING 0x0000300c
+#define CORE_MAC_GATING_RESERVED_0__MASK 0xf8000000
+#define CORE_MAC_GATING_RESERVED_0__SHIFT 27
+static inline uint32_t CORE_MAC_GATING_RESERVED_0(uint32_t val)
+{
+ return ((val) << CORE_MAC_GATING_RESERVED_0__SHIFT) & CORE_MAC_GATING_RESERVED_0__MASK;
+}
+#define CORE_MAC_GATING_SLCG_OP_EN__MASK 0x07ffffff
+#define CORE_MAC_GATING_SLCG_OP_EN__SHIFT 0
+static inline uint32_t CORE_MAC_GATING_SLCG_OP_EN(uint32_t val)
+{
+ return ((val) << CORE_MAC_GATING_SLCG_OP_EN__SHIFT) & CORE_MAC_GATING_SLCG_OP_EN__MASK;
+}
+
+#define REG_CORE_MISC_CFG 0x00003010
+#define CORE_MISC_CFG_RESERVED_0__MASK 0xfff00000
+#define CORE_MISC_CFG_RESERVED_0__SHIFT 20
+static inline uint32_t CORE_MISC_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << CORE_MISC_CFG_RESERVED_0__SHIFT) & CORE_MISC_CFG_RESERVED_0__MASK;
+}
+#define CORE_MISC_CFG_SOFT_GATING__MASK 0x000fc000
+#define CORE_MISC_CFG_SOFT_GATING__SHIFT 14
+static inline uint32_t CORE_MISC_CFG_SOFT_GATING(uint32_t val)
+{
+ return ((val) << CORE_MISC_CFG_SOFT_GATING__SHIFT) & CORE_MISC_CFG_SOFT_GATING__MASK;
+}
+#define CORE_MISC_CFG_RESERVED_1__MASK 0x00003800
+#define CORE_MISC_CFG_RESERVED_1__SHIFT 11
+static inline uint32_t CORE_MISC_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << CORE_MISC_CFG_RESERVED_1__SHIFT) & CORE_MISC_CFG_RESERVED_1__MASK;
+}
+#define CORE_MISC_CFG_PROC_PRECISION__MASK 0x00000700
+#define CORE_MISC_CFG_PROC_PRECISION__SHIFT 8
+static inline uint32_t CORE_MISC_CFG_PROC_PRECISION(uint32_t val)
+{
+ return ((val) << CORE_MISC_CFG_PROC_PRECISION__SHIFT) & CORE_MISC_CFG_PROC_PRECISION__MASK;
+}
+#define CORE_MISC_CFG_RESERVED_2__MASK 0x000000fc
+#define CORE_MISC_CFG_RESERVED_2__SHIFT 2
+static inline uint32_t CORE_MISC_CFG_RESERVED_2(uint32_t val)
+{
+ return ((val) << CORE_MISC_CFG_RESERVED_2__SHIFT) & CORE_MISC_CFG_RESERVED_2__MASK;
+}
+#define CORE_MISC_CFG_DW_EN__MASK 0x00000002
+#define CORE_MISC_CFG_DW_EN__SHIFT 1
+static inline uint32_t CORE_MISC_CFG_DW_EN(uint32_t val)
+{
+ return ((val) << CORE_MISC_CFG_DW_EN__SHIFT) & CORE_MISC_CFG_DW_EN__MASK;
+}
+#define CORE_MISC_CFG_QD_EN__MASK 0x00000001
+#define CORE_MISC_CFG_QD_EN__SHIFT 0
+static inline uint32_t CORE_MISC_CFG_QD_EN(uint32_t val)
+{
+ return ((val) << CORE_MISC_CFG_QD_EN__SHIFT) & CORE_MISC_CFG_QD_EN__MASK;
+}
+
+#define REG_CORE_DATAOUT_SIZE_0 0x00003014
+#define CORE_DATAOUT_SIZE_0_DATAOUT_HEIGHT__MASK 0xffff0000
+#define CORE_DATAOUT_SIZE_0_DATAOUT_HEIGHT__SHIFT 16
+static inline uint32_t CORE_DATAOUT_SIZE_0_DATAOUT_HEIGHT(uint32_t val)
+{
+ return ((val) << CORE_DATAOUT_SIZE_0_DATAOUT_HEIGHT__SHIFT) & CORE_DATAOUT_SIZE_0_DATAOUT_HEIGHT__MASK;
+}
+#define CORE_DATAOUT_SIZE_0_DATAOUT_WIDTH__MASK 0x0000ffff
+#define CORE_DATAOUT_SIZE_0_DATAOUT_WIDTH__SHIFT 0
+static inline uint32_t CORE_DATAOUT_SIZE_0_DATAOUT_WIDTH(uint32_t val)
+{
+ return ((val) << CORE_DATAOUT_SIZE_0_DATAOUT_WIDTH__SHIFT) & CORE_DATAOUT_SIZE_0_DATAOUT_WIDTH__MASK;
+}
+
+#define REG_CORE_DATAOUT_SIZE_1 0x00003018
+#define CORE_DATAOUT_SIZE_1_RESERVED_0__MASK 0xffff0000
+#define CORE_DATAOUT_SIZE_1_RESERVED_0__SHIFT 16
+static inline uint32_t CORE_DATAOUT_SIZE_1_RESERVED_0(uint32_t val)
+{
+ return ((val) << CORE_DATAOUT_SIZE_1_RESERVED_0__SHIFT) & CORE_DATAOUT_SIZE_1_RESERVED_0__MASK;
+}
+#define CORE_DATAOUT_SIZE_1_DATAOUT_CHANNEL__MASK 0x0000ffff
+#define CORE_DATAOUT_SIZE_1_DATAOUT_CHANNEL__SHIFT 0
+static inline uint32_t CORE_DATAOUT_SIZE_1_DATAOUT_CHANNEL(uint32_t val)
+{
+ return ((val) << CORE_DATAOUT_SIZE_1_DATAOUT_CHANNEL__SHIFT) & CORE_DATAOUT_SIZE_1_DATAOUT_CHANNEL__MASK;
+}
+
+#define REG_CORE_CLIP_TRUNCATE 0x0000301c
+#define CORE_CLIP_TRUNCATE_RESERVED_0__MASK 0xffffff80
+#define CORE_CLIP_TRUNCATE_RESERVED_0__SHIFT 7
+static inline uint32_t CORE_CLIP_TRUNCATE_RESERVED_0(uint32_t val)
+{
+ return ((val) << CORE_CLIP_TRUNCATE_RESERVED_0__SHIFT) & CORE_CLIP_TRUNCATE_RESERVED_0__MASK;
+}
+#define CORE_CLIP_TRUNCATE_ROUND_TYPE__MASK 0x00000040
+#define CORE_CLIP_TRUNCATE_ROUND_TYPE__SHIFT 6
+static inline uint32_t CORE_CLIP_TRUNCATE_ROUND_TYPE(uint32_t val)
+{
+ return ((val) << CORE_CLIP_TRUNCATE_ROUND_TYPE__SHIFT) & CORE_CLIP_TRUNCATE_ROUND_TYPE__MASK;
+}
+#define CORE_CLIP_TRUNCATE_RESERVED_1__MASK 0x00000020
+#define CORE_CLIP_TRUNCATE_RESERVED_1__SHIFT 5
+static inline uint32_t CORE_CLIP_TRUNCATE_RESERVED_1(uint32_t val)
+{
+ return ((val) << CORE_CLIP_TRUNCATE_RESERVED_1__SHIFT) & CORE_CLIP_TRUNCATE_RESERVED_1__MASK;
+}
+#define CORE_CLIP_TRUNCATE_CLIP_TRUNCATE__MASK 0x0000001f
+#define CORE_CLIP_TRUNCATE_CLIP_TRUNCATE__SHIFT 0
+static inline uint32_t CORE_CLIP_TRUNCATE_CLIP_TRUNCATE(uint32_t val)
+{
+ return ((val) << CORE_CLIP_TRUNCATE_CLIP_TRUNCATE__SHIFT) & CORE_CLIP_TRUNCATE_CLIP_TRUNCATE__MASK;
+}
+
+#define REG_DPU_S_STATUS 0x00004000
+#define DPU_S_STATUS_RESERVED_0__MASK 0xfffc0000
+#define DPU_S_STATUS_RESERVED_0__SHIFT 18
+static inline uint32_t DPU_S_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_S_STATUS_RESERVED_0__SHIFT) & DPU_S_STATUS_RESERVED_0__MASK;
+}
+#define DPU_S_STATUS_STATUS_1__MASK 0x00030000
+#define DPU_S_STATUS_STATUS_1__SHIFT 16
+static inline uint32_t DPU_S_STATUS_STATUS_1(uint32_t val)
+{
+ return ((val) << DPU_S_STATUS_STATUS_1__SHIFT) & DPU_S_STATUS_STATUS_1__MASK;
+}
+#define DPU_S_STATUS_RESERVED_1__MASK 0x0000fffc
+#define DPU_S_STATUS_RESERVED_1__SHIFT 2
+static inline uint32_t DPU_S_STATUS_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_S_STATUS_RESERVED_1__SHIFT) & DPU_S_STATUS_RESERVED_1__MASK;
+}
+#define DPU_S_STATUS_STATUS_0__MASK 0x00000003
+#define DPU_S_STATUS_STATUS_0__SHIFT 0
+static inline uint32_t DPU_S_STATUS_STATUS_0(uint32_t val)
+{
+ return ((val) << DPU_S_STATUS_STATUS_0__SHIFT) & DPU_S_STATUS_STATUS_0__MASK;
+}
+
+#define REG_DPU_S_POINTER 0x00004004
+#define DPU_S_POINTER_RESERVED_0__MASK 0xfffe0000
+#define DPU_S_POINTER_RESERVED_0__SHIFT 17
+static inline uint32_t DPU_S_POINTER_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_S_POINTER_RESERVED_0__SHIFT) & DPU_S_POINTER_RESERVED_0__MASK;
+}
+#define DPU_S_POINTER_EXECUTER__MASK 0x00010000
+#define DPU_S_POINTER_EXECUTER__SHIFT 16
+static inline uint32_t DPU_S_POINTER_EXECUTER(uint32_t val)
+{
+ return ((val) << DPU_S_POINTER_EXECUTER__SHIFT) & DPU_S_POINTER_EXECUTER__MASK;
+}
+#define DPU_S_POINTER_RESERVED_1__MASK 0x0000ffc0
+#define DPU_S_POINTER_RESERVED_1__SHIFT 6
+static inline uint32_t DPU_S_POINTER_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_S_POINTER_RESERVED_1__SHIFT) & DPU_S_POINTER_RESERVED_1__MASK;
+}
+#define DPU_S_POINTER_EXECUTER_PP_CLEAR__MASK 0x00000020
+#define DPU_S_POINTER_EXECUTER_PP_CLEAR__SHIFT 5
+static inline uint32_t DPU_S_POINTER_EXECUTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << DPU_S_POINTER_EXECUTER_PP_CLEAR__SHIFT) & DPU_S_POINTER_EXECUTER_PP_CLEAR__MASK;
+}
+#define DPU_S_POINTER_POINTER_PP_CLEAR__MASK 0x00000010
+#define DPU_S_POINTER_POINTER_PP_CLEAR__SHIFT 4
+static inline uint32_t DPU_S_POINTER_POINTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << DPU_S_POINTER_POINTER_PP_CLEAR__SHIFT) & DPU_S_POINTER_POINTER_PP_CLEAR__MASK;
+}
+#define DPU_S_POINTER_POINTER_PP_MODE__MASK 0x00000008
+#define DPU_S_POINTER_POINTER_PP_MODE__SHIFT 3
+static inline uint32_t DPU_S_POINTER_POINTER_PP_MODE(uint32_t val)
+{
+ return ((val) << DPU_S_POINTER_POINTER_PP_MODE__SHIFT) & DPU_S_POINTER_POINTER_PP_MODE__MASK;
+}
+#define DPU_S_POINTER_EXECUTER_PP_EN__MASK 0x00000004
+#define DPU_S_POINTER_EXECUTER_PP_EN__SHIFT 2
+static inline uint32_t DPU_S_POINTER_EXECUTER_PP_EN(uint32_t val)
+{
+ return ((val) << DPU_S_POINTER_EXECUTER_PP_EN__SHIFT) & DPU_S_POINTER_EXECUTER_PP_EN__MASK;
+}
+#define DPU_S_POINTER_POINTER_PP_EN__MASK 0x00000002
+#define DPU_S_POINTER_POINTER_PP_EN__SHIFT 1
+static inline uint32_t DPU_S_POINTER_POINTER_PP_EN(uint32_t val)
+{
+ return ((val) << DPU_S_POINTER_POINTER_PP_EN__SHIFT) & DPU_S_POINTER_POINTER_PP_EN__MASK;
+}
+#define DPU_S_POINTER_POINTER__MASK 0x00000001
+#define DPU_S_POINTER_POINTER__SHIFT 0
+static inline uint32_t DPU_S_POINTER_POINTER(uint32_t val)
+{
+ return ((val) << DPU_S_POINTER_POINTER__SHIFT) & DPU_S_POINTER_POINTER__MASK;
+}
+
+#define REG_DPU_OPERATION_ENABLE 0x00004008
+#define DPU_OPERATION_ENABLE_RESERVED_0__MASK 0xfffffffe
+#define DPU_OPERATION_ENABLE_RESERVED_0__SHIFT 1
+static inline uint32_t DPU_OPERATION_ENABLE_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_OPERATION_ENABLE_RESERVED_0__SHIFT) & DPU_OPERATION_ENABLE_RESERVED_0__MASK;
+}
+#define DPU_OPERATION_ENABLE_OP_EN__MASK 0x00000001
+#define DPU_OPERATION_ENABLE_OP_EN__SHIFT 0
+static inline uint32_t DPU_OPERATION_ENABLE_OP_EN(uint32_t val)
+{
+ return ((val) << DPU_OPERATION_ENABLE_OP_EN__SHIFT) & DPU_OPERATION_ENABLE_OP_EN__MASK;
+}
+
+#define REG_DPU_FEATURE_MODE_CFG 0x0000400c
+#define DPU_FEATURE_MODE_CFG_COMB_USE__MASK 0x80000000
+#define DPU_FEATURE_MODE_CFG_COMB_USE__SHIFT 31
+static inline uint32_t DPU_FEATURE_MODE_CFG_COMB_USE(uint32_t val)
+{
+ return ((val) << DPU_FEATURE_MODE_CFG_COMB_USE__SHIFT) & DPU_FEATURE_MODE_CFG_COMB_USE__MASK;
+}
+#define DPU_FEATURE_MODE_CFG_TP_EN__MASK 0x40000000
+#define DPU_FEATURE_MODE_CFG_TP_EN__SHIFT 30
+static inline uint32_t DPU_FEATURE_MODE_CFG_TP_EN(uint32_t val)
+{
+ return ((val) << DPU_FEATURE_MODE_CFG_TP_EN__SHIFT) & DPU_FEATURE_MODE_CFG_TP_EN__MASK;
+}
+#define DPU_FEATURE_MODE_CFG_RGP_TYPE__MASK 0x3c000000
+#define DPU_FEATURE_MODE_CFG_RGP_TYPE__SHIFT 26
+static inline uint32_t DPU_FEATURE_MODE_CFG_RGP_TYPE(uint32_t val)
+{
+ return ((val) << DPU_FEATURE_MODE_CFG_RGP_TYPE__SHIFT) & DPU_FEATURE_MODE_CFG_RGP_TYPE__MASK;
+}
+#define DPU_FEATURE_MODE_CFG_NONALIGN__MASK 0x02000000
+#define DPU_FEATURE_MODE_CFG_NONALIGN__SHIFT 25
+static inline uint32_t DPU_FEATURE_MODE_CFG_NONALIGN(uint32_t val)
+{
+ return ((val) << DPU_FEATURE_MODE_CFG_NONALIGN__SHIFT) & DPU_FEATURE_MODE_CFG_NONALIGN__MASK;
+}
+#define DPU_FEATURE_MODE_CFG_SURF_LEN__MASK 0x01fffe00
+#define DPU_FEATURE_MODE_CFG_SURF_LEN__SHIFT 9
+static inline uint32_t DPU_FEATURE_MODE_CFG_SURF_LEN(uint32_t val)
+{
+ return ((val) << DPU_FEATURE_MODE_CFG_SURF_LEN__SHIFT) & DPU_FEATURE_MODE_CFG_SURF_LEN__MASK;
+}
+#define DPU_FEATURE_MODE_CFG_BURST_LEN__MASK 0x000001e0
+#define DPU_FEATURE_MODE_CFG_BURST_LEN__SHIFT 5
+static inline uint32_t DPU_FEATURE_MODE_CFG_BURST_LEN(uint32_t val)
+{
+ return ((val) << DPU_FEATURE_MODE_CFG_BURST_LEN__SHIFT) & DPU_FEATURE_MODE_CFG_BURST_LEN__MASK;
+}
+#define DPU_FEATURE_MODE_CFG_CONV_MODE__MASK 0x00000018
+#define DPU_FEATURE_MODE_CFG_CONV_MODE__SHIFT 3
+static inline uint32_t DPU_FEATURE_MODE_CFG_CONV_MODE(uint32_t val)
+{
+ return ((val) << DPU_FEATURE_MODE_CFG_CONV_MODE__SHIFT) & DPU_FEATURE_MODE_CFG_CONV_MODE__MASK;
+}
+#define DPU_FEATURE_MODE_CFG_OUTPUT_MODE__MASK 0x00000006
+#define DPU_FEATURE_MODE_CFG_OUTPUT_MODE__SHIFT 1
+static inline uint32_t DPU_FEATURE_MODE_CFG_OUTPUT_MODE(uint32_t val)
+{
+ return ((val) << DPU_FEATURE_MODE_CFG_OUTPUT_MODE__SHIFT) & DPU_FEATURE_MODE_CFG_OUTPUT_MODE__MASK;
+}
+#define DPU_FEATURE_MODE_CFG_FLYING_MODE__MASK 0x00000001
+#define DPU_FEATURE_MODE_CFG_FLYING_MODE__SHIFT 0
+static inline uint32_t DPU_FEATURE_MODE_CFG_FLYING_MODE(uint32_t val)
+{
+ return ((val) << DPU_FEATURE_MODE_CFG_FLYING_MODE__SHIFT) & DPU_FEATURE_MODE_CFG_FLYING_MODE__MASK;
+}
+
+#define REG_DPU_DATA_FORMAT 0x00004010
+#define DPU_DATA_FORMAT_OUT_PRECISION__MASK 0xe0000000
+#define DPU_DATA_FORMAT_OUT_PRECISION__SHIFT 29
+static inline uint32_t DPU_DATA_FORMAT_OUT_PRECISION(uint32_t val)
+{
+ return ((val) << DPU_DATA_FORMAT_OUT_PRECISION__SHIFT) & DPU_DATA_FORMAT_OUT_PRECISION__MASK;
+}
+#define DPU_DATA_FORMAT_IN_PRECISION__MASK 0x1c000000
+#define DPU_DATA_FORMAT_IN_PRECISION__SHIFT 26
+static inline uint32_t DPU_DATA_FORMAT_IN_PRECISION(uint32_t val)
+{
+ return ((val) << DPU_DATA_FORMAT_IN_PRECISION__SHIFT) & DPU_DATA_FORMAT_IN_PRECISION__MASK;
+}
+#define DPU_DATA_FORMAT_EW_TRUNCATE_NEG__MASK 0x03ff0000
+#define DPU_DATA_FORMAT_EW_TRUNCATE_NEG__SHIFT 16
+static inline uint32_t DPU_DATA_FORMAT_EW_TRUNCATE_NEG(uint32_t val)
+{
+ return ((val) << DPU_DATA_FORMAT_EW_TRUNCATE_NEG__SHIFT) & DPU_DATA_FORMAT_EW_TRUNCATE_NEG__MASK;
+}
+#define DPU_DATA_FORMAT_BN_MUL_SHIFT_VALUE_NEG__MASK 0x0000fc00
+#define DPU_DATA_FORMAT_BN_MUL_SHIFT_VALUE_NEG__SHIFT 10
+static inline uint32_t DPU_DATA_FORMAT_BN_MUL_SHIFT_VALUE_NEG(uint32_t val)
+{
+ return ((val) << DPU_DATA_FORMAT_BN_MUL_SHIFT_VALUE_NEG__SHIFT) & DPU_DATA_FORMAT_BN_MUL_SHIFT_VALUE_NEG__MASK;
+}
+#define DPU_DATA_FORMAT_BS_MUL_SHIFT_VALUE_NEG__MASK 0x000003f0
+#define DPU_DATA_FORMAT_BS_MUL_SHIFT_VALUE_NEG__SHIFT 4
+static inline uint32_t DPU_DATA_FORMAT_BS_MUL_SHIFT_VALUE_NEG(uint32_t val)
+{
+ return ((val) << DPU_DATA_FORMAT_BS_MUL_SHIFT_VALUE_NEG__SHIFT) & DPU_DATA_FORMAT_BS_MUL_SHIFT_VALUE_NEG__MASK;
+}
+#define DPU_DATA_FORMAT_MC_SURF_OUT__MASK 0x00000008
+#define DPU_DATA_FORMAT_MC_SURF_OUT__SHIFT 3
+static inline uint32_t DPU_DATA_FORMAT_MC_SURF_OUT(uint32_t val)
+{
+ return ((val) << DPU_DATA_FORMAT_MC_SURF_OUT__SHIFT) & DPU_DATA_FORMAT_MC_SURF_OUT__MASK;
+}
+#define DPU_DATA_FORMAT_PROC_PRECISION__MASK 0x00000007
+#define DPU_DATA_FORMAT_PROC_PRECISION__SHIFT 0
+static inline uint32_t DPU_DATA_FORMAT_PROC_PRECISION(uint32_t val)
+{
+ return ((val) << DPU_DATA_FORMAT_PROC_PRECISION__SHIFT) & DPU_DATA_FORMAT_PROC_PRECISION__MASK;
+}
+
+#define REG_DPU_OFFSET_PEND 0x00004014
+#define DPU_OFFSET_PEND_RESERVED_0__MASK 0xffff0000
+#define DPU_OFFSET_PEND_RESERVED_0__SHIFT 16
+static inline uint32_t DPU_OFFSET_PEND_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_OFFSET_PEND_RESERVED_0__SHIFT) & DPU_OFFSET_PEND_RESERVED_0__MASK;
+}
+#define DPU_OFFSET_PEND_OFFSET_PEND__MASK 0x0000ffff
+#define DPU_OFFSET_PEND_OFFSET_PEND__SHIFT 0
+static inline uint32_t DPU_OFFSET_PEND_OFFSET_PEND(uint32_t val)
+{
+ return ((val) << DPU_OFFSET_PEND_OFFSET_PEND__SHIFT) & DPU_OFFSET_PEND_OFFSET_PEND__MASK;
+}
+
+#define REG_DPU_DST_BASE_ADDR 0x00004020
+#define DPU_DST_BASE_ADDR_DST_BASE_ADDR__MASK 0xffffffff
+#define DPU_DST_BASE_ADDR_DST_BASE_ADDR__SHIFT 0
+static inline uint32_t DPU_DST_BASE_ADDR_DST_BASE_ADDR(uint32_t val)
+{
+ return ((val) << DPU_DST_BASE_ADDR_DST_BASE_ADDR__SHIFT) & DPU_DST_BASE_ADDR_DST_BASE_ADDR__MASK;
+}
+
+#define REG_DPU_DST_SURF_STRIDE 0x00004024
+#define DPU_DST_SURF_STRIDE_DST_SURF_STRIDE__MASK 0xfffffff0
+#define DPU_DST_SURF_STRIDE_DST_SURF_STRIDE__SHIFT 4
+static inline uint32_t DPU_DST_SURF_STRIDE_DST_SURF_STRIDE(uint32_t val)
+{
+ return ((val) << DPU_DST_SURF_STRIDE_DST_SURF_STRIDE__SHIFT) & DPU_DST_SURF_STRIDE_DST_SURF_STRIDE__MASK;
+}
+#define DPU_DST_SURF_STRIDE_RESERVED_0__MASK 0x0000000f
+#define DPU_DST_SURF_STRIDE_RESERVED_0__SHIFT 0
+static inline uint32_t DPU_DST_SURF_STRIDE_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_DST_SURF_STRIDE_RESERVED_0__SHIFT) & DPU_DST_SURF_STRIDE_RESERVED_0__MASK;
+}
+
+#define REG_DPU_DATA_CUBE_WIDTH 0x00004030
+#define DPU_DATA_CUBE_WIDTH_RESERVED_0__MASK 0xffffe000
+#define DPU_DATA_CUBE_WIDTH_RESERVED_0__SHIFT 13
+static inline uint32_t DPU_DATA_CUBE_WIDTH_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_WIDTH_RESERVED_0__SHIFT) & DPU_DATA_CUBE_WIDTH_RESERVED_0__MASK;
+}
+#define DPU_DATA_CUBE_WIDTH_WIDTH__MASK 0x00001fff
+#define DPU_DATA_CUBE_WIDTH_WIDTH__SHIFT 0
+static inline uint32_t DPU_DATA_CUBE_WIDTH_WIDTH(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_WIDTH_WIDTH__SHIFT) & DPU_DATA_CUBE_WIDTH_WIDTH__MASK;
+}
+
+#define REG_DPU_DATA_CUBE_HEIGHT 0x00004034
+#define DPU_DATA_CUBE_HEIGHT_RESERVED_0__MASK 0xfe000000
+#define DPU_DATA_CUBE_HEIGHT_RESERVED_0__SHIFT 25
+static inline uint32_t DPU_DATA_CUBE_HEIGHT_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_HEIGHT_RESERVED_0__SHIFT) & DPU_DATA_CUBE_HEIGHT_RESERVED_0__MASK;
+}
+#define DPU_DATA_CUBE_HEIGHT_MINMAX_CTL__MASK 0x01c00000
+#define DPU_DATA_CUBE_HEIGHT_MINMAX_CTL__SHIFT 22
+static inline uint32_t DPU_DATA_CUBE_HEIGHT_MINMAX_CTL(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_HEIGHT_MINMAX_CTL__SHIFT) & DPU_DATA_CUBE_HEIGHT_MINMAX_CTL__MASK;
+}
+#define DPU_DATA_CUBE_HEIGHT_RESERVED_1__MASK 0x003fe000
+#define DPU_DATA_CUBE_HEIGHT_RESERVED_1__SHIFT 13
+static inline uint32_t DPU_DATA_CUBE_HEIGHT_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_HEIGHT_RESERVED_1__SHIFT) & DPU_DATA_CUBE_HEIGHT_RESERVED_1__MASK;
+}
+#define DPU_DATA_CUBE_HEIGHT_HEIGHT__MASK 0x00001fff
+#define DPU_DATA_CUBE_HEIGHT_HEIGHT__SHIFT 0
+static inline uint32_t DPU_DATA_CUBE_HEIGHT_HEIGHT(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_HEIGHT_HEIGHT__SHIFT) & DPU_DATA_CUBE_HEIGHT_HEIGHT__MASK;
+}
+
+#define REG_DPU_DATA_CUBE_NOTCH_ADDR 0x00004038
+#define DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_0__MASK 0xe0000000
+#define DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_0__SHIFT 29
+static inline uint32_t DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_0__SHIFT) & DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_0__MASK;
+}
+#define DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_1__MASK 0x1fff0000
+#define DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_1__SHIFT 16
+static inline uint32_t DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_1(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_1__SHIFT) & DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_1__MASK;
+}
+#define DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_1__MASK 0x0000e000
+#define DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_1__SHIFT 13
+static inline uint32_t DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_1__SHIFT) & DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_1__MASK;
+}
+#define DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_0__MASK 0x00001fff
+#define DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_0__SHIFT 0
+static inline uint32_t DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_0(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_0__SHIFT) & DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_0__MASK;
+}
+
+#define REG_DPU_DATA_CUBE_CHANNEL 0x0000403c
+#define DPU_DATA_CUBE_CHANNEL_RESERVED_0__MASK 0xe0000000
+#define DPU_DATA_CUBE_CHANNEL_RESERVED_0__SHIFT 29
+static inline uint32_t DPU_DATA_CUBE_CHANNEL_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_CHANNEL_RESERVED_0__SHIFT) & DPU_DATA_CUBE_CHANNEL_RESERVED_0__MASK;
+}
+#define DPU_DATA_CUBE_CHANNEL_ORIG_CHANNEL__MASK 0x1fff0000
+#define DPU_DATA_CUBE_CHANNEL_ORIG_CHANNEL__SHIFT 16
+static inline uint32_t DPU_DATA_CUBE_CHANNEL_ORIG_CHANNEL(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_CHANNEL_ORIG_CHANNEL__SHIFT) & DPU_DATA_CUBE_CHANNEL_ORIG_CHANNEL__MASK;
+}
+#define DPU_DATA_CUBE_CHANNEL_RESERVED_1__MASK 0x0000e000
+#define DPU_DATA_CUBE_CHANNEL_RESERVED_1__SHIFT 13
+static inline uint32_t DPU_DATA_CUBE_CHANNEL_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_CHANNEL_RESERVED_1__SHIFT) & DPU_DATA_CUBE_CHANNEL_RESERVED_1__MASK;
+}
+#define DPU_DATA_CUBE_CHANNEL_CHANNEL__MASK 0x00001fff
+#define DPU_DATA_CUBE_CHANNEL_CHANNEL__SHIFT 0
+static inline uint32_t DPU_DATA_CUBE_CHANNEL_CHANNEL(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_CHANNEL_CHANNEL__SHIFT) & DPU_DATA_CUBE_CHANNEL_CHANNEL__MASK;
+}
+
+#define REG_DPU_BS_CFG 0x00004040
+#define DPU_BS_CFG_RESERVED_0__MASK 0xfff00000
+#define DPU_BS_CFG_RESERVED_0__SHIFT 20
+static inline uint32_t DPU_BS_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_RESERVED_0__SHIFT) & DPU_BS_CFG_RESERVED_0__MASK;
+}
+#define DPU_BS_CFG_BS_ALU_ALGO__MASK 0x000f0000
+#define DPU_BS_CFG_BS_ALU_ALGO__SHIFT 16
+static inline uint32_t DPU_BS_CFG_BS_ALU_ALGO(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_BS_ALU_ALGO__SHIFT) & DPU_BS_CFG_BS_ALU_ALGO__MASK;
+}
+#define DPU_BS_CFG_RESERVED_1__MASK 0x0000fe00
+#define DPU_BS_CFG_RESERVED_1__SHIFT 9
+static inline uint32_t DPU_BS_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_RESERVED_1__SHIFT) & DPU_BS_CFG_RESERVED_1__MASK;
+}
+#define DPU_BS_CFG_BS_ALU_SRC__MASK 0x00000100
+#define DPU_BS_CFG_BS_ALU_SRC__SHIFT 8
+static inline uint32_t DPU_BS_CFG_BS_ALU_SRC(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_BS_ALU_SRC__SHIFT) & DPU_BS_CFG_BS_ALU_SRC__MASK;
+}
+#define DPU_BS_CFG_BS_RELUX_EN__MASK 0x00000080
+#define DPU_BS_CFG_BS_RELUX_EN__SHIFT 7
+static inline uint32_t DPU_BS_CFG_BS_RELUX_EN(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_BS_RELUX_EN__SHIFT) & DPU_BS_CFG_BS_RELUX_EN__MASK;
+}
+#define DPU_BS_CFG_BS_RELU_BYPASS__MASK 0x00000040
+#define DPU_BS_CFG_BS_RELU_BYPASS__SHIFT 6
+static inline uint32_t DPU_BS_CFG_BS_RELU_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_BS_RELU_BYPASS__SHIFT) & DPU_BS_CFG_BS_RELU_BYPASS__MASK;
+}
+#define DPU_BS_CFG_BS_MUL_PRELU__MASK 0x00000020
+#define DPU_BS_CFG_BS_MUL_PRELU__SHIFT 5
+static inline uint32_t DPU_BS_CFG_BS_MUL_PRELU(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_BS_MUL_PRELU__SHIFT) & DPU_BS_CFG_BS_MUL_PRELU__MASK;
+}
+#define DPU_BS_CFG_BS_MUL_BYPASS__MASK 0x00000010
+#define DPU_BS_CFG_BS_MUL_BYPASS__SHIFT 4
+static inline uint32_t DPU_BS_CFG_BS_MUL_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_BS_MUL_BYPASS__SHIFT) & DPU_BS_CFG_BS_MUL_BYPASS__MASK;
+}
+#define DPU_BS_CFG_RESERVED_2__MASK 0x0000000c
+#define DPU_BS_CFG_RESERVED_2__SHIFT 2
+static inline uint32_t DPU_BS_CFG_RESERVED_2(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_RESERVED_2__SHIFT) & DPU_BS_CFG_RESERVED_2__MASK;
+}
+#define DPU_BS_CFG_BS_ALU_BYPASS__MASK 0x00000002
+#define DPU_BS_CFG_BS_ALU_BYPASS__SHIFT 1
+static inline uint32_t DPU_BS_CFG_BS_ALU_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_BS_ALU_BYPASS__SHIFT) & DPU_BS_CFG_BS_ALU_BYPASS__MASK;
+}
+#define DPU_BS_CFG_BS_BYPASS__MASK 0x00000001
+#define DPU_BS_CFG_BS_BYPASS__SHIFT 0
+static inline uint32_t DPU_BS_CFG_BS_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_BS_BYPASS__SHIFT) & DPU_BS_CFG_BS_BYPASS__MASK;
+}
+
+#define REG_DPU_BS_ALU_CFG 0x00004044
+#define DPU_BS_ALU_CFG_BS_ALU_OPERAND__MASK 0xffffffff
+#define DPU_BS_ALU_CFG_BS_ALU_OPERAND__SHIFT 0
+static inline uint32_t DPU_BS_ALU_CFG_BS_ALU_OPERAND(uint32_t val)
+{
+ return ((val) << DPU_BS_ALU_CFG_BS_ALU_OPERAND__SHIFT) & DPU_BS_ALU_CFG_BS_ALU_OPERAND__MASK;
+}
+
+#define REG_DPU_BS_MUL_CFG 0x00004048
+#define DPU_BS_MUL_CFG_BS_MUL_OPERAND__MASK 0xffff0000
+#define DPU_BS_MUL_CFG_BS_MUL_OPERAND__SHIFT 16
+static inline uint32_t DPU_BS_MUL_CFG_BS_MUL_OPERAND(uint32_t val)
+{
+ return ((val) << DPU_BS_MUL_CFG_BS_MUL_OPERAND__SHIFT) & DPU_BS_MUL_CFG_BS_MUL_OPERAND__MASK;
+}
+#define DPU_BS_MUL_CFG_RESERVED_0__MASK 0x0000c000
+#define DPU_BS_MUL_CFG_RESERVED_0__SHIFT 14
+static inline uint32_t DPU_BS_MUL_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_BS_MUL_CFG_RESERVED_0__SHIFT) & DPU_BS_MUL_CFG_RESERVED_0__MASK;
+}
+#define DPU_BS_MUL_CFG_BS_MUL_SHIFT_VALUE__MASK 0x00003f00
+#define DPU_BS_MUL_CFG_BS_MUL_SHIFT_VALUE__SHIFT 8
+static inline uint32_t DPU_BS_MUL_CFG_BS_MUL_SHIFT_VALUE(uint32_t val)
+{
+ return ((val) << DPU_BS_MUL_CFG_BS_MUL_SHIFT_VALUE__SHIFT) & DPU_BS_MUL_CFG_BS_MUL_SHIFT_VALUE__MASK;
+}
+#define DPU_BS_MUL_CFG_RESERVED_1__MASK 0x000000fc
+#define DPU_BS_MUL_CFG_RESERVED_1__SHIFT 2
+static inline uint32_t DPU_BS_MUL_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_BS_MUL_CFG_RESERVED_1__SHIFT) & DPU_BS_MUL_CFG_RESERVED_1__MASK;
+}
+#define DPU_BS_MUL_CFG_BS_TRUNCATE_SRC__MASK 0x00000002
+#define DPU_BS_MUL_CFG_BS_TRUNCATE_SRC__SHIFT 1
+static inline uint32_t DPU_BS_MUL_CFG_BS_TRUNCATE_SRC(uint32_t val)
+{
+ return ((val) << DPU_BS_MUL_CFG_BS_TRUNCATE_SRC__SHIFT) & DPU_BS_MUL_CFG_BS_TRUNCATE_SRC__MASK;
+}
+#define DPU_BS_MUL_CFG_BS_MUL_SRC__MASK 0x00000001
+#define DPU_BS_MUL_CFG_BS_MUL_SRC__SHIFT 0
+static inline uint32_t DPU_BS_MUL_CFG_BS_MUL_SRC(uint32_t val)
+{
+ return ((val) << DPU_BS_MUL_CFG_BS_MUL_SRC__SHIFT) & DPU_BS_MUL_CFG_BS_MUL_SRC__MASK;
+}
+
+#define REG_DPU_BS_RELUX_CMP_VALUE 0x0000404c
+#define DPU_BS_RELUX_CMP_VALUE_BS_RELUX_CMP_DAT__MASK 0xffffffff
+#define DPU_BS_RELUX_CMP_VALUE_BS_RELUX_CMP_DAT__SHIFT 0
+static inline uint32_t DPU_BS_RELUX_CMP_VALUE_BS_RELUX_CMP_DAT(uint32_t val)
+{
+ return ((val) << DPU_BS_RELUX_CMP_VALUE_BS_RELUX_CMP_DAT__SHIFT) & DPU_BS_RELUX_CMP_VALUE_BS_RELUX_CMP_DAT__MASK;
+}
+
+#define REG_DPU_BS_OW_CFG 0x00004050
+#define DPU_BS_OW_CFG_RGP_CNTER__MASK 0xf0000000
+#define DPU_BS_OW_CFG_RGP_CNTER__SHIFT 28
+static inline uint32_t DPU_BS_OW_CFG_RGP_CNTER(uint32_t val)
+{
+ return ((val) << DPU_BS_OW_CFG_RGP_CNTER__SHIFT) & DPU_BS_OW_CFG_RGP_CNTER__MASK;
+}
+#define DPU_BS_OW_CFG_TP_ORG_EN__MASK 0x08000000
+#define DPU_BS_OW_CFG_TP_ORG_EN__SHIFT 27
+static inline uint32_t DPU_BS_OW_CFG_TP_ORG_EN(uint32_t val)
+{
+ return ((val) << DPU_BS_OW_CFG_TP_ORG_EN__SHIFT) & DPU_BS_OW_CFG_TP_ORG_EN__MASK;
+}
+#define DPU_BS_OW_CFG_RESERVED_0__MASK 0x07fff800
+#define DPU_BS_OW_CFG_RESERVED_0__SHIFT 11
+static inline uint32_t DPU_BS_OW_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_BS_OW_CFG_RESERVED_0__SHIFT) & DPU_BS_OW_CFG_RESERVED_0__MASK;
+}
+#define DPU_BS_OW_CFG_SIZE_E_2__MASK 0x00000700
+#define DPU_BS_OW_CFG_SIZE_E_2__SHIFT 8
+static inline uint32_t DPU_BS_OW_CFG_SIZE_E_2(uint32_t val)
+{
+ return ((val) << DPU_BS_OW_CFG_SIZE_E_2__SHIFT) & DPU_BS_OW_CFG_SIZE_E_2__MASK;
+}
+#define DPU_BS_OW_CFG_SIZE_E_1__MASK 0x000000e0
+#define DPU_BS_OW_CFG_SIZE_E_1__SHIFT 5
+static inline uint32_t DPU_BS_OW_CFG_SIZE_E_1(uint32_t val)
+{
+ return ((val) << DPU_BS_OW_CFG_SIZE_E_1__SHIFT) & DPU_BS_OW_CFG_SIZE_E_1__MASK;
+}
+#define DPU_BS_OW_CFG_SIZE_E_0__MASK 0x0000001c
+#define DPU_BS_OW_CFG_SIZE_E_0__SHIFT 2
+static inline uint32_t DPU_BS_OW_CFG_SIZE_E_0(uint32_t val)
+{
+ return ((val) << DPU_BS_OW_CFG_SIZE_E_0__SHIFT) & DPU_BS_OW_CFG_SIZE_E_0__MASK;
+}
+#define DPU_BS_OW_CFG_OD_BYPASS__MASK 0x00000002
+#define DPU_BS_OW_CFG_OD_BYPASS__SHIFT 1
+static inline uint32_t DPU_BS_OW_CFG_OD_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_BS_OW_CFG_OD_BYPASS__SHIFT) & DPU_BS_OW_CFG_OD_BYPASS__MASK;
+}
+#define DPU_BS_OW_CFG_OW_SRC__MASK 0x00000001
+#define DPU_BS_OW_CFG_OW_SRC__SHIFT 0
+static inline uint32_t DPU_BS_OW_CFG_OW_SRC(uint32_t val)
+{
+ return ((val) << DPU_BS_OW_CFG_OW_SRC__SHIFT) & DPU_BS_OW_CFG_OW_SRC__MASK;
+}
+
+#define REG_DPU_BS_OW_OP 0x00004054
+#define DPU_BS_OW_OP_RESERVED_0__MASK 0xffff0000
+#define DPU_BS_OW_OP_RESERVED_0__SHIFT 16
+static inline uint32_t DPU_BS_OW_OP_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_BS_OW_OP_RESERVED_0__SHIFT) & DPU_BS_OW_OP_RESERVED_0__MASK;
+}
+#define DPU_BS_OW_OP_OW_OP__MASK 0x0000ffff
+#define DPU_BS_OW_OP_OW_OP__SHIFT 0
+static inline uint32_t DPU_BS_OW_OP_OW_OP(uint32_t val)
+{
+ return ((val) << DPU_BS_OW_OP_OW_OP__SHIFT) & DPU_BS_OW_OP_OW_OP__MASK;
+}
+
+#define REG_DPU_WDMA_SIZE_0 0x00004058
+#define DPU_WDMA_SIZE_0_RESERVED_0__MASK 0xf0000000
+#define DPU_WDMA_SIZE_0_RESERVED_0__SHIFT 28
+static inline uint32_t DPU_WDMA_SIZE_0_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_WDMA_SIZE_0_RESERVED_0__SHIFT) & DPU_WDMA_SIZE_0_RESERVED_0__MASK;
+}
+#define DPU_WDMA_SIZE_0_TP_PRECISION__MASK 0x08000000
+#define DPU_WDMA_SIZE_0_TP_PRECISION__SHIFT 27
+static inline uint32_t DPU_WDMA_SIZE_0_TP_PRECISION(uint32_t val)
+{
+ return ((val) << DPU_WDMA_SIZE_0_TP_PRECISION__SHIFT) & DPU_WDMA_SIZE_0_TP_PRECISION__MASK;
+}
+#define DPU_WDMA_SIZE_0_SIZE_C_WDMA__MASK 0x07ff0000
+#define DPU_WDMA_SIZE_0_SIZE_C_WDMA__SHIFT 16
+static inline uint32_t DPU_WDMA_SIZE_0_SIZE_C_WDMA(uint32_t val)
+{
+ return ((val) << DPU_WDMA_SIZE_0_SIZE_C_WDMA__SHIFT) & DPU_WDMA_SIZE_0_SIZE_C_WDMA__MASK;
+}
+#define DPU_WDMA_SIZE_0_RESERVED_1__MASK 0x0000e000
+#define DPU_WDMA_SIZE_0_RESERVED_1__SHIFT 13
+static inline uint32_t DPU_WDMA_SIZE_0_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_WDMA_SIZE_0_RESERVED_1__SHIFT) & DPU_WDMA_SIZE_0_RESERVED_1__MASK;
+}
+#define DPU_WDMA_SIZE_0_CHANNEL_WDMA__MASK 0x00001fff
+#define DPU_WDMA_SIZE_0_CHANNEL_WDMA__SHIFT 0
+static inline uint32_t DPU_WDMA_SIZE_0_CHANNEL_WDMA(uint32_t val)
+{
+ return ((val) << DPU_WDMA_SIZE_0_CHANNEL_WDMA__SHIFT) & DPU_WDMA_SIZE_0_CHANNEL_WDMA__MASK;
+}
+
+#define REG_DPU_WDMA_SIZE_1 0x0000405c
+#define DPU_WDMA_SIZE_1_RESERVED_0__MASK 0xe0000000
+#define DPU_WDMA_SIZE_1_RESERVED_0__SHIFT 29
+static inline uint32_t DPU_WDMA_SIZE_1_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_WDMA_SIZE_1_RESERVED_0__SHIFT) & DPU_WDMA_SIZE_1_RESERVED_0__MASK;
+}
+#define DPU_WDMA_SIZE_1_HEIGHT_WDMA__MASK 0x1fff0000
+#define DPU_WDMA_SIZE_1_HEIGHT_WDMA__SHIFT 16
+static inline uint32_t DPU_WDMA_SIZE_1_HEIGHT_WDMA(uint32_t val)
+{
+ return ((val) << DPU_WDMA_SIZE_1_HEIGHT_WDMA__SHIFT) & DPU_WDMA_SIZE_1_HEIGHT_WDMA__MASK;
+}
+#define DPU_WDMA_SIZE_1_RESERVED_1__MASK 0x0000e000
+#define DPU_WDMA_SIZE_1_RESERVED_1__SHIFT 13
+static inline uint32_t DPU_WDMA_SIZE_1_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_WDMA_SIZE_1_RESERVED_1__SHIFT) & DPU_WDMA_SIZE_1_RESERVED_1__MASK;
+}
+#define DPU_WDMA_SIZE_1_WIDTH_WDMA__MASK 0x00001fff
+#define DPU_WDMA_SIZE_1_WIDTH_WDMA__SHIFT 0
+static inline uint32_t DPU_WDMA_SIZE_1_WIDTH_WDMA(uint32_t val)
+{
+ return ((val) << DPU_WDMA_SIZE_1_WIDTH_WDMA__SHIFT) & DPU_WDMA_SIZE_1_WIDTH_WDMA__MASK;
+}
+
+#define REG_DPU_BN_CFG 0x00004060
+#define DPU_BN_CFG_RESERVED_0__MASK 0xfff00000
+#define DPU_BN_CFG_RESERVED_0__SHIFT 20
+static inline uint32_t DPU_BN_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_RESERVED_0__SHIFT) & DPU_BN_CFG_RESERVED_0__MASK;
+}
+#define DPU_BN_CFG_BN_ALU_ALGO__MASK 0x000f0000
+#define DPU_BN_CFG_BN_ALU_ALGO__SHIFT 16
+static inline uint32_t DPU_BN_CFG_BN_ALU_ALGO(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_BN_ALU_ALGO__SHIFT) & DPU_BN_CFG_BN_ALU_ALGO__MASK;
+}
+#define DPU_BN_CFG_RESERVED_1__MASK 0x0000fe00
+#define DPU_BN_CFG_RESERVED_1__SHIFT 9
+static inline uint32_t DPU_BN_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_RESERVED_1__SHIFT) & DPU_BN_CFG_RESERVED_1__MASK;
+}
+#define DPU_BN_CFG_BN_ALU_SRC__MASK 0x00000100
+#define DPU_BN_CFG_BN_ALU_SRC__SHIFT 8
+static inline uint32_t DPU_BN_CFG_BN_ALU_SRC(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_BN_ALU_SRC__SHIFT) & DPU_BN_CFG_BN_ALU_SRC__MASK;
+}
+#define DPU_BN_CFG_BN_RELUX_EN__MASK 0x00000080
+#define DPU_BN_CFG_BN_RELUX_EN__SHIFT 7
+static inline uint32_t DPU_BN_CFG_BN_RELUX_EN(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_BN_RELUX_EN__SHIFT) & DPU_BN_CFG_BN_RELUX_EN__MASK;
+}
+#define DPU_BN_CFG_BN_RELU_BYPASS__MASK 0x00000040
+#define DPU_BN_CFG_BN_RELU_BYPASS__SHIFT 6
+static inline uint32_t DPU_BN_CFG_BN_RELU_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_BN_RELU_BYPASS__SHIFT) & DPU_BN_CFG_BN_RELU_BYPASS__MASK;
+}
+#define DPU_BN_CFG_BN_MUL_PRELU__MASK 0x00000020
+#define DPU_BN_CFG_BN_MUL_PRELU__SHIFT 5
+static inline uint32_t DPU_BN_CFG_BN_MUL_PRELU(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_BN_MUL_PRELU__SHIFT) & DPU_BN_CFG_BN_MUL_PRELU__MASK;
+}
+#define DPU_BN_CFG_BN_MUL_BYPASS__MASK 0x00000010
+#define DPU_BN_CFG_BN_MUL_BYPASS__SHIFT 4
+static inline uint32_t DPU_BN_CFG_BN_MUL_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_BN_MUL_BYPASS__SHIFT) & DPU_BN_CFG_BN_MUL_BYPASS__MASK;
+}
+#define DPU_BN_CFG_RESERVED_2__MASK 0x0000000c
+#define DPU_BN_CFG_RESERVED_2__SHIFT 2
+static inline uint32_t DPU_BN_CFG_RESERVED_2(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_RESERVED_2__SHIFT) & DPU_BN_CFG_RESERVED_2__MASK;
+}
+#define DPU_BN_CFG_BN_ALU_BYPASS__MASK 0x00000002
+#define DPU_BN_CFG_BN_ALU_BYPASS__SHIFT 1
+static inline uint32_t DPU_BN_CFG_BN_ALU_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_BN_ALU_BYPASS__SHIFT) & DPU_BN_CFG_BN_ALU_BYPASS__MASK;
+}
+#define DPU_BN_CFG_BN_BYPASS__MASK 0x00000001
+#define DPU_BN_CFG_BN_BYPASS__SHIFT 0
+static inline uint32_t DPU_BN_CFG_BN_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_BN_BYPASS__SHIFT) & DPU_BN_CFG_BN_BYPASS__MASK;
+}
+
+#define REG_DPU_BN_ALU_CFG 0x00004064
+#define DPU_BN_ALU_CFG_BN_ALU_OPERAND__MASK 0xffffffff
+#define DPU_BN_ALU_CFG_BN_ALU_OPERAND__SHIFT 0
+static inline uint32_t DPU_BN_ALU_CFG_BN_ALU_OPERAND(uint32_t val)
+{
+ return ((val) << DPU_BN_ALU_CFG_BN_ALU_OPERAND__SHIFT) & DPU_BN_ALU_CFG_BN_ALU_OPERAND__MASK;
+}
+
+#define REG_DPU_BN_MUL_CFG 0x00004068
+#define DPU_BN_MUL_CFG_BN_MUL_OPERAND__MASK 0xffff0000
+#define DPU_BN_MUL_CFG_BN_MUL_OPERAND__SHIFT 16
+static inline uint32_t DPU_BN_MUL_CFG_BN_MUL_OPERAND(uint32_t val)
+{
+ return ((val) << DPU_BN_MUL_CFG_BN_MUL_OPERAND__SHIFT) & DPU_BN_MUL_CFG_BN_MUL_OPERAND__MASK;
+}
+#define DPU_BN_MUL_CFG_RESERVED_0__MASK 0x0000c000
+#define DPU_BN_MUL_CFG_RESERVED_0__SHIFT 14
+static inline uint32_t DPU_BN_MUL_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_BN_MUL_CFG_RESERVED_0__SHIFT) & DPU_BN_MUL_CFG_RESERVED_0__MASK;
+}
+#define DPU_BN_MUL_CFG_BN_MUL_SHIFT_VALUE__MASK 0x00003f00
+#define DPU_BN_MUL_CFG_BN_MUL_SHIFT_VALUE__SHIFT 8
+static inline uint32_t DPU_BN_MUL_CFG_BN_MUL_SHIFT_VALUE(uint32_t val)
+{
+ return ((val) << DPU_BN_MUL_CFG_BN_MUL_SHIFT_VALUE__SHIFT) & DPU_BN_MUL_CFG_BN_MUL_SHIFT_VALUE__MASK;
+}
+#define DPU_BN_MUL_CFG_RESERVED_1__MASK 0x000000fc
+#define DPU_BN_MUL_CFG_RESERVED_1__SHIFT 2
+static inline uint32_t DPU_BN_MUL_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_BN_MUL_CFG_RESERVED_1__SHIFT) & DPU_BN_MUL_CFG_RESERVED_1__MASK;
+}
+#define DPU_BN_MUL_CFG_BN_TRUNCATE_SRC__MASK 0x00000002
+#define DPU_BN_MUL_CFG_BN_TRUNCATE_SRC__SHIFT 1
+static inline uint32_t DPU_BN_MUL_CFG_BN_TRUNCATE_SRC(uint32_t val)
+{
+ return ((val) << DPU_BN_MUL_CFG_BN_TRUNCATE_SRC__SHIFT) & DPU_BN_MUL_CFG_BN_TRUNCATE_SRC__MASK;
+}
+#define DPU_BN_MUL_CFG_BN_MUL_SRC__MASK 0x00000001
+#define DPU_BN_MUL_CFG_BN_MUL_SRC__SHIFT 0
+static inline uint32_t DPU_BN_MUL_CFG_BN_MUL_SRC(uint32_t val)
+{
+ return ((val) << DPU_BN_MUL_CFG_BN_MUL_SRC__SHIFT) & DPU_BN_MUL_CFG_BN_MUL_SRC__MASK;
+}
+
+#define REG_DPU_BN_RELUX_CMP_VALUE 0x0000406c
+#define DPU_BN_RELUX_CMP_VALUE_BN_RELUX_CMP_DAT__MASK 0xffffffff
+#define DPU_BN_RELUX_CMP_VALUE_BN_RELUX_CMP_DAT__SHIFT 0
+static inline uint32_t DPU_BN_RELUX_CMP_VALUE_BN_RELUX_CMP_DAT(uint32_t val)
+{
+ return ((val) << DPU_BN_RELUX_CMP_VALUE_BN_RELUX_CMP_DAT__SHIFT) & DPU_BN_RELUX_CMP_VALUE_BN_RELUX_CMP_DAT__MASK;
+}
+
+#define REG_DPU_EW_CFG 0x00004070
+#define DPU_EW_CFG_EW_CVT_TYPE__MASK 0x80000000
+#define DPU_EW_CFG_EW_CVT_TYPE__SHIFT 31
+static inline uint32_t DPU_EW_CFG_EW_CVT_TYPE(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_CVT_TYPE__SHIFT) & DPU_EW_CFG_EW_CVT_TYPE__MASK;
+}
+#define DPU_EW_CFG_EW_CVT_ROUND__MASK 0x40000000
+#define DPU_EW_CFG_EW_CVT_ROUND__SHIFT 30
+static inline uint32_t DPU_EW_CFG_EW_CVT_ROUND(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_CVT_ROUND__SHIFT) & DPU_EW_CFG_EW_CVT_ROUND__MASK;
+}
+#define DPU_EW_CFG_EW_DATA_MODE__MASK 0x30000000
+#define DPU_EW_CFG_EW_DATA_MODE__SHIFT 28
+static inline uint32_t DPU_EW_CFG_EW_DATA_MODE(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_DATA_MODE__SHIFT) & DPU_EW_CFG_EW_DATA_MODE__MASK;
+}
+#define DPU_EW_CFG_RESERVED_0__MASK 0x0f000000
+#define DPU_EW_CFG_RESERVED_0__SHIFT 24
+static inline uint32_t DPU_EW_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_RESERVED_0__SHIFT) & DPU_EW_CFG_RESERVED_0__MASK;
+}
+#define DPU_EW_CFG_EDATA_SIZE__MASK 0x00c00000
+#define DPU_EW_CFG_EDATA_SIZE__SHIFT 22
+static inline uint32_t DPU_EW_CFG_EDATA_SIZE(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EDATA_SIZE__SHIFT) & DPU_EW_CFG_EDATA_SIZE__MASK;
+}
+#define DPU_EW_CFG_EW_EQUAL_EN__MASK 0x00200000
+#define DPU_EW_CFG_EW_EQUAL_EN__SHIFT 21
+static inline uint32_t DPU_EW_CFG_EW_EQUAL_EN(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_EQUAL_EN__SHIFT) & DPU_EW_CFG_EW_EQUAL_EN__MASK;
+}
+#define DPU_EW_CFG_EW_BINARY_EN__MASK 0x00100000
+#define DPU_EW_CFG_EW_BINARY_EN__SHIFT 20
+static inline uint32_t DPU_EW_CFG_EW_BINARY_EN(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_BINARY_EN__SHIFT) & DPU_EW_CFG_EW_BINARY_EN__MASK;
+}
+#define DPU_EW_CFG_EW_ALU_ALGO__MASK 0x000f0000
+#define DPU_EW_CFG_EW_ALU_ALGO__SHIFT 16
+static inline uint32_t DPU_EW_CFG_EW_ALU_ALGO(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_ALU_ALGO__SHIFT) & DPU_EW_CFG_EW_ALU_ALGO__MASK;
+}
+#define DPU_EW_CFG_RESERVED_1__MASK 0x0000f800
+#define DPU_EW_CFG_RESERVED_1__SHIFT 11
+static inline uint32_t DPU_EW_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_RESERVED_1__SHIFT) & DPU_EW_CFG_RESERVED_1__MASK;
+}
+#define DPU_EW_CFG_EW_RELUX_EN__MASK 0x00000400
+#define DPU_EW_CFG_EW_RELUX_EN__SHIFT 10
+static inline uint32_t DPU_EW_CFG_EW_RELUX_EN(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_RELUX_EN__SHIFT) & DPU_EW_CFG_EW_RELUX_EN__MASK;
+}
+#define DPU_EW_CFG_EW_RELU_BYPASS__MASK 0x00000200
+#define DPU_EW_CFG_EW_RELU_BYPASS__SHIFT 9
+static inline uint32_t DPU_EW_CFG_EW_RELU_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_RELU_BYPASS__SHIFT) & DPU_EW_CFG_EW_RELU_BYPASS__MASK;
+}
+#define DPU_EW_CFG_EW_OP_CVT_BYPASS__MASK 0x00000100
+#define DPU_EW_CFG_EW_OP_CVT_BYPASS__SHIFT 8
+static inline uint32_t DPU_EW_CFG_EW_OP_CVT_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_OP_CVT_BYPASS__SHIFT) & DPU_EW_CFG_EW_OP_CVT_BYPASS__MASK;
+}
+#define DPU_EW_CFG_EW_LUT_BYPASS__MASK 0x00000080
+#define DPU_EW_CFG_EW_LUT_BYPASS__SHIFT 7
+static inline uint32_t DPU_EW_CFG_EW_LUT_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_LUT_BYPASS__SHIFT) & DPU_EW_CFG_EW_LUT_BYPASS__MASK;
+}
+#define DPU_EW_CFG_EW_OP_SRC__MASK 0x00000040
+#define DPU_EW_CFG_EW_OP_SRC__SHIFT 6
+static inline uint32_t DPU_EW_CFG_EW_OP_SRC(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_OP_SRC__SHIFT) & DPU_EW_CFG_EW_OP_SRC__MASK;
+}
+#define DPU_EW_CFG_EW_MUL_PRELU__MASK 0x00000020
+#define DPU_EW_CFG_EW_MUL_PRELU__SHIFT 5
+static inline uint32_t DPU_EW_CFG_EW_MUL_PRELU(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_MUL_PRELU__SHIFT) & DPU_EW_CFG_EW_MUL_PRELU__MASK;
+}
+#define DPU_EW_CFG_RESERVED_2__MASK 0x00000018
+#define DPU_EW_CFG_RESERVED_2__SHIFT 3
+static inline uint32_t DPU_EW_CFG_RESERVED_2(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_RESERVED_2__SHIFT) & DPU_EW_CFG_RESERVED_2__MASK;
+}
+#define DPU_EW_CFG_EW_OP_TYPE__MASK 0x00000004
+#define DPU_EW_CFG_EW_OP_TYPE__SHIFT 2
+static inline uint32_t DPU_EW_CFG_EW_OP_TYPE(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_OP_TYPE__SHIFT) & DPU_EW_CFG_EW_OP_TYPE__MASK;
+}
+#define DPU_EW_CFG_EW_OP_BYPASS__MASK 0x00000002
+#define DPU_EW_CFG_EW_OP_BYPASS__SHIFT 1
+static inline uint32_t DPU_EW_CFG_EW_OP_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_OP_BYPASS__SHIFT) & DPU_EW_CFG_EW_OP_BYPASS__MASK;
+}
+#define DPU_EW_CFG_EW_BYPASS__MASK 0x00000001
+#define DPU_EW_CFG_EW_BYPASS__SHIFT 0
+static inline uint32_t DPU_EW_CFG_EW_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_BYPASS__SHIFT) & DPU_EW_CFG_EW_BYPASS__MASK;
+}
+
+#define REG_DPU_EW_CVT_OFFSET_VALUE 0x00004074
+#define DPU_EW_CVT_OFFSET_VALUE_EW_OP_CVT_OFFSET__MASK 0xffffffff
+#define DPU_EW_CVT_OFFSET_VALUE_EW_OP_CVT_OFFSET__SHIFT 0
+static inline uint32_t DPU_EW_CVT_OFFSET_VALUE_EW_OP_CVT_OFFSET(uint32_t val)
+{
+ return ((val) << DPU_EW_CVT_OFFSET_VALUE_EW_OP_CVT_OFFSET__SHIFT) & DPU_EW_CVT_OFFSET_VALUE_EW_OP_CVT_OFFSET__MASK;
+}
+
+#define REG_DPU_EW_CVT_SCALE_VALUE 0x00004078
+#define DPU_EW_CVT_SCALE_VALUE_EW_TRUNCATE__MASK 0xffc00000
+#define DPU_EW_CVT_SCALE_VALUE_EW_TRUNCATE__SHIFT 22
+static inline uint32_t DPU_EW_CVT_SCALE_VALUE_EW_TRUNCATE(uint32_t val)
+{
+ return ((val) << DPU_EW_CVT_SCALE_VALUE_EW_TRUNCATE__SHIFT) & DPU_EW_CVT_SCALE_VALUE_EW_TRUNCATE__MASK;
+}
+#define DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SHIFT__MASK 0x003f0000
+#define DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SHIFT__SHIFT 16
+static inline uint32_t DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SHIFT(uint32_t val)
+{
+ return ((val) << DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SHIFT__SHIFT) & DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SHIFT__MASK;
+}
+#define DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SCALE__MASK 0x0000ffff
+#define DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SCALE__SHIFT 0
+static inline uint32_t DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SCALE(uint32_t val)
+{
+ return ((val) << DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SCALE__SHIFT) & DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SCALE__MASK;
+}
+
+#define REG_DPU_EW_RELUX_CMP_VALUE 0x0000407c
+#define DPU_EW_RELUX_CMP_VALUE_EW_RELUX_CMP_DAT__MASK 0xffffffff
+#define DPU_EW_RELUX_CMP_VALUE_EW_RELUX_CMP_DAT__SHIFT 0
+static inline uint32_t DPU_EW_RELUX_CMP_VALUE_EW_RELUX_CMP_DAT(uint32_t val)
+{
+ return ((val) << DPU_EW_RELUX_CMP_VALUE_EW_RELUX_CMP_DAT__SHIFT) & DPU_EW_RELUX_CMP_VALUE_EW_RELUX_CMP_DAT__MASK;
+}
+
+#define REG_DPU_OUT_CVT_OFFSET 0x00004080
+#define DPU_OUT_CVT_OFFSET_OUT_CVT_OFFSET__MASK 0xffffffff
+#define DPU_OUT_CVT_OFFSET_OUT_CVT_OFFSET__SHIFT 0
+static inline uint32_t DPU_OUT_CVT_OFFSET_OUT_CVT_OFFSET(uint32_t val)
+{
+ return ((val) << DPU_OUT_CVT_OFFSET_OUT_CVT_OFFSET__SHIFT) & DPU_OUT_CVT_OFFSET_OUT_CVT_OFFSET__MASK;
+}
+
+#define REG_DPU_OUT_CVT_SCALE 0x00004084
+#define DPU_OUT_CVT_SCALE_RESERVED_0__MASK 0xfffe0000
+#define DPU_OUT_CVT_SCALE_RESERVED_0__SHIFT 17
+static inline uint32_t DPU_OUT_CVT_SCALE_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_OUT_CVT_SCALE_RESERVED_0__SHIFT) & DPU_OUT_CVT_SCALE_RESERVED_0__MASK;
+}
+#define DPU_OUT_CVT_SCALE_FP32TOFP16_EN__MASK 0x00010000
+#define DPU_OUT_CVT_SCALE_FP32TOFP16_EN__SHIFT 16
+static inline uint32_t DPU_OUT_CVT_SCALE_FP32TOFP16_EN(uint32_t val)
+{
+ return ((val) << DPU_OUT_CVT_SCALE_FP32TOFP16_EN__SHIFT) & DPU_OUT_CVT_SCALE_FP32TOFP16_EN__MASK;
+}
+#define DPU_OUT_CVT_SCALE_OUT_CVT_SCALE__MASK 0x0000ffff
+#define DPU_OUT_CVT_SCALE_OUT_CVT_SCALE__SHIFT 0
+static inline uint32_t DPU_OUT_CVT_SCALE_OUT_CVT_SCALE(uint32_t val)
+{
+ return ((val) << DPU_OUT_CVT_SCALE_OUT_CVT_SCALE__SHIFT) & DPU_OUT_CVT_SCALE_OUT_CVT_SCALE__MASK;
+}
+
+#define REG_DPU_OUT_CVT_SHIFT 0x00004088
+#define DPU_OUT_CVT_SHIFT_CVT_TYPE__MASK 0x80000000
+#define DPU_OUT_CVT_SHIFT_CVT_TYPE__SHIFT 31
+static inline uint32_t DPU_OUT_CVT_SHIFT_CVT_TYPE(uint32_t val)
+{
+ return ((val) << DPU_OUT_CVT_SHIFT_CVT_TYPE__SHIFT) & DPU_OUT_CVT_SHIFT_CVT_TYPE__MASK;
+}
+#define DPU_OUT_CVT_SHIFT_CVT_ROUND__MASK 0x40000000
+#define DPU_OUT_CVT_SHIFT_CVT_ROUND__SHIFT 30
+static inline uint32_t DPU_OUT_CVT_SHIFT_CVT_ROUND(uint32_t val)
+{
+ return ((val) << DPU_OUT_CVT_SHIFT_CVT_ROUND__SHIFT) & DPU_OUT_CVT_SHIFT_CVT_ROUND__MASK;
+}
+#define DPU_OUT_CVT_SHIFT_RESERVED_0__MASK 0x3ff00000
+#define DPU_OUT_CVT_SHIFT_RESERVED_0__SHIFT 20
+static inline uint32_t DPU_OUT_CVT_SHIFT_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_OUT_CVT_SHIFT_RESERVED_0__SHIFT) & DPU_OUT_CVT_SHIFT_RESERVED_0__MASK;
+}
+#define DPU_OUT_CVT_SHIFT_MINUS_EXP__MASK 0x000ff000
+#define DPU_OUT_CVT_SHIFT_MINUS_EXP__SHIFT 12
+static inline uint32_t DPU_OUT_CVT_SHIFT_MINUS_EXP(uint32_t val)
+{
+ return ((val) << DPU_OUT_CVT_SHIFT_MINUS_EXP__SHIFT) & DPU_OUT_CVT_SHIFT_MINUS_EXP__MASK;
+}
+#define DPU_OUT_CVT_SHIFT_OUT_CVT_SHIFT__MASK 0x00000fff
+#define DPU_OUT_CVT_SHIFT_OUT_CVT_SHIFT__SHIFT 0
+static inline uint32_t DPU_OUT_CVT_SHIFT_OUT_CVT_SHIFT(uint32_t val)
+{
+ return ((val) << DPU_OUT_CVT_SHIFT_OUT_CVT_SHIFT__SHIFT) & DPU_OUT_CVT_SHIFT_OUT_CVT_SHIFT__MASK;
+}
+
+#define REG_DPU_EW_OP_VALUE_0 0x00004090
+#define DPU_EW_OP_VALUE_0_EW_OPERAND_0__MASK 0xffffffff
+#define DPU_EW_OP_VALUE_0_EW_OPERAND_0__SHIFT 0
+static inline uint32_t DPU_EW_OP_VALUE_0_EW_OPERAND_0(uint32_t val)
+{
+ return ((val) << DPU_EW_OP_VALUE_0_EW_OPERAND_0__SHIFT) & DPU_EW_OP_VALUE_0_EW_OPERAND_0__MASK;
+}
+
+#define REG_DPU_EW_OP_VALUE_1 0x00004094
+#define DPU_EW_OP_VALUE_1_EW_OPERAND_1__MASK 0xffffffff
+#define DPU_EW_OP_VALUE_1_EW_OPERAND_1__SHIFT 0
+static inline uint32_t DPU_EW_OP_VALUE_1_EW_OPERAND_1(uint32_t val)
+{
+ return ((val) << DPU_EW_OP_VALUE_1_EW_OPERAND_1__SHIFT) & DPU_EW_OP_VALUE_1_EW_OPERAND_1__MASK;
+}
+
+#define REG_DPU_EW_OP_VALUE_2 0x00004098
+#define DPU_EW_OP_VALUE_2_EW_OPERAND_2__MASK 0xffffffff
+#define DPU_EW_OP_VALUE_2_EW_OPERAND_2__SHIFT 0
+static inline uint32_t DPU_EW_OP_VALUE_2_EW_OPERAND_2(uint32_t val)
+{
+ return ((val) << DPU_EW_OP_VALUE_2_EW_OPERAND_2__SHIFT) & DPU_EW_OP_VALUE_2_EW_OPERAND_2__MASK;
+}
+
+#define REG_DPU_EW_OP_VALUE_3 0x0000409c
+#define DPU_EW_OP_VALUE_3_EW_OPERAND_3__MASK 0xffffffff
+#define DPU_EW_OP_VALUE_3_EW_OPERAND_3__SHIFT 0
+static inline uint32_t DPU_EW_OP_VALUE_3_EW_OPERAND_3(uint32_t val)
+{
+ return ((val) << DPU_EW_OP_VALUE_3_EW_OPERAND_3__SHIFT) & DPU_EW_OP_VALUE_3_EW_OPERAND_3__MASK;
+}
+
+#define REG_DPU_EW_OP_VALUE_4 0x000040a0
+#define DPU_EW_OP_VALUE_4_EW_OPERAND_4__MASK 0xffffffff
+#define DPU_EW_OP_VALUE_4_EW_OPERAND_4__SHIFT 0
+static inline uint32_t DPU_EW_OP_VALUE_4_EW_OPERAND_4(uint32_t val)
+{
+ return ((val) << DPU_EW_OP_VALUE_4_EW_OPERAND_4__SHIFT) & DPU_EW_OP_VALUE_4_EW_OPERAND_4__MASK;
+}
+
+#define REG_DPU_EW_OP_VALUE_5 0x000040a4
+#define DPU_EW_OP_VALUE_5_EW_OPERAND_5__MASK 0xffffffff
+#define DPU_EW_OP_VALUE_5_EW_OPERAND_5__SHIFT 0
+static inline uint32_t DPU_EW_OP_VALUE_5_EW_OPERAND_5(uint32_t val)
+{
+ return ((val) << DPU_EW_OP_VALUE_5_EW_OPERAND_5__SHIFT) & DPU_EW_OP_VALUE_5_EW_OPERAND_5__MASK;
+}
+
+#define REG_DPU_EW_OP_VALUE_6 0x000040a8
+#define DPU_EW_OP_VALUE_6_EW_OPERAND_6__MASK 0xffffffff
+#define DPU_EW_OP_VALUE_6_EW_OPERAND_6__SHIFT 0
+static inline uint32_t DPU_EW_OP_VALUE_6_EW_OPERAND_6(uint32_t val)
+{
+ return ((val) << DPU_EW_OP_VALUE_6_EW_OPERAND_6__SHIFT) & DPU_EW_OP_VALUE_6_EW_OPERAND_6__MASK;
+}
+
+#define REG_DPU_EW_OP_VALUE_7 0x000040ac
+#define DPU_EW_OP_VALUE_7_EW_OPERAND_7__MASK 0xffffffff
+#define DPU_EW_OP_VALUE_7_EW_OPERAND_7__SHIFT 0
+static inline uint32_t DPU_EW_OP_VALUE_7_EW_OPERAND_7(uint32_t val)
+{
+ return ((val) << DPU_EW_OP_VALUE_7_EW_OPERAND_7__SHIFT) & DPU_EW_OP_VALUE_7_EW_OPERAND_7__MASK;
+}
+
+#define REG_DPU_SURFACE_ADD 0x000040c0
+#define DPU_SURFACE_ADD_SURF_ADD__MASK 0xfffffff0
+#define DPU_SURFACE_ADD_SURF_ADD__SHIFT 4
+static inline uint32_t DPU_SURFACE_ADD_SURF_ADD(uint32_t val)
+{
+ return ((val) << DPU_SURFACE_ADD_SURF_ADD__SHIFT) & DPU_SURFACE_ADD_SURF_ADD__MASK;
+}
+#define DPU_SURFACE_ADD_RESERVED_0__MASK 0x0000000f
+#define DPU_SURFACE_ADD_RESERVED_0__SHIFT 0
+static inline uint32_t DPU_SURFACE_ADD_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_SURFACE_ADD_RESERVED_0__SHIFT) & DPU_SURFACE_ADD_RESERVED_0__MASK;
+}
+
+#define REG_DPU_LUT_ACCESS_CFG 0x00004100
+#define DPU_LUT_ACCESS_CFG_RESERVED_0__MASK 0xfffc0000
+#define DPU_LUT_ACCESS_CFG_RESERVED_0__SHIFT 18
+static inline uint32_t DPU_LUT_ACCESS_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_LUT_ACCESS_CFG_RESERVED_0__SHIFT) & DPU_LUT_ACCESS_CFG_RESERVED_0__MASK;
+}
+#define DPU_LUT_ACCESS_CFG_LUT_ACCESS_TYPE__MASK 0x00020000
+#define DPU_LUT_ACCESS_CFG_LUT_ACCESS_TYPE__SHIFT 17
+static inline uint32_t DPU_LUT_ACCESS_CFG_LUT_ACCESS_TYPE(uint32_t val)
+{
+ return ((val) << DPU_LUT_ACCESS_CFG_LUT_ACCESS_TYPE__SHIFT) & DPU_LUT_ACCESS_CFG_LUT_ACCESS_TYPE__MASK;
+}
+#define DPU_LUT_ACCESS_CFG_LUT_TABLE_ID__MASK 0x00010000
+#define DPU_LUT_ACCESS_CFG_LUT_TABLE_ID__SHIFT 16
+static inline uint32_t DPU_LUT_ACCESS_CFG_LUT_TABLE_ID(uint32_t val)
+{
+ return ((val) << DPU_LUT_ACCESS_CFG_LUT_TABLE_ID__SHIFT) & DPU_LUT_ACCESS_CFG_LUT_TABLE_ID__MASK;
+}
+#define DPU_LUT_ACCESS_CFG_RESERVED_1__MASK 0x0000fc00
+#define DPU_LUT_ACCESS_CFG_RESERVED_1__SHIFT 10
+static inline uint32_t DPU_LUT_ACCESS_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_LUT_ACCESS_CFG_RESERVED_1__SHIFT) & DPU_LUT_ACCESS_CFG_RESERVED_1__MASK;
+}
+#define DPU_LUT_ACCESS_CFG_LUT_ADDR__MASK 0x000003ff
+#define DPU_LUT_ACCESS_CFG_LUT_ADDR__SHIFT 0
+static inline uint32_t DPU_LUT_ACCESS_CFG_LUT_ADDR(uint32_t val)
+{
+ return ((val) << DPU_LUT_ACCESS_CFG_LUT_ADDR__SHIFT) & DPU_LUT_ACCESS_CFG_LUT_ADDR__MASK;
+}
+
+#define REG_DPU_LUT_ACCESS_DATA 0x00004104
+#define DPU_LUT_ACCESS_DATA_RESERVED_0__MASK 0xffff0000
+#define DPU_LUT_ACCESS_DATA_RESERVED_0__SHIFT 16
+static inline uint32_t DPU_LUT_ACCESS_DATA_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_LUT_ACCESS_DATA_RESERVED_0__SHIFT) & DPU_LUT_ACCESS_DATA_RESERVED_0__MASK;
+}
+#define DPU_LUT_ACCESS_DATA_LUT_ACCESS_DATA__MASK 0x0000ffff
+#define DPU_LUT_ACCESS_DATA_LUT_ACCESS_DATA__SHIFT 0
+static inline uint32_t DPU_LUT_ACCESS_DATA_LUT_ACCESS_DATA(uint32_t val)
+{
+ return ((val) << DPU_LUT_ACCESS_DATA_LUT_ACCESS_DATA__SHIFT) & DPU_LUT_ACCESS_DATA_LUT_ACCESS_DATA__MASK;
+}
+
+#define REG_DPU_LUT_CFG 0x00004108
+#define DPU_LUT_CFG_RESERVED_0__MASK 0xffffff00
+#define DPU_LUT_CFG_RESERVED_0__SHIFT 8
+static inline uint32_t DPU_LUT_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_LUT_CFG_RESERVED_0__SHIFT) & DPU_LUT_CFG_RESERVED_0__MASK;
+}
+#define DPU_LUT_CFG_LUT_CAL_SEL__MASK 0x00000080
+#define DPU_LUT_CFG_LUT_CAL_SEL__SHIFT 7
+static inline uint32_t DPU_LUT_CFG_LUT_CAL_SEL(uint32_t val)
+{
+ return ((val) << DPU_LUT_CFG_LUT_CAL_SEL__SHIFT) & DPU_LUT_CFG_LUT_CAL_SEL__MASK;
+}
+#define DPU_LUT_CFG_LUT_HYBRID_PRIORITY__MASK 0x00000040
+#define DPU_LUT_CFG_LUT_HYBRID_PRIORITY__SHIFT 6
+static inline uint32_t DPU_LUT_CFG_LUT_HYBRID_PRIORITY(uint32_t val)
+{
+ return ((val) << DPU_LUT_CFG_LUT_HYBRID_PRIORITY__SHIFT) & DPU_LUT_CFG_LUT_HYBRID_PRIORITY__MASK;
+}
+#define DPU_LUT_CFG_LUT_OFLOW_PRIORITY__MASK 0x00000020
+#define DPU_LUT_CFG_LUT_OFLOW_PRIORITY__SHIFT 5
+static inline uint32_t DPU_LUT_CFG_LUT_OFLOW_PRIORITY(uint32_t val)
+{
+ return ((val) << DPU_LUT_CFG_LUT_OFLOW_PRIORITY__SHIFT) & DPU_LUT_CFG_LUT_OFLOW_PRIORITY__MASK;
+}
+#define DPU_LUT_CFG_LUT_UFLOW_PRIORITY__MASK 0x00000010
+#define DPU_LUT_CFG_LUT_UFLOW_PRIORITY__SHIFT 4
+static inline uint32_t DPU_LUT_CFG_LUT_UFLOW_PRIORITY(uint32_t val)
+{
+ return ((val) << DPU_LUT_CFG_LUT_UFLOW_PRIORITY__SHIFT) & DPU_LUT_CFG_LUT_UFLOW_PRIORITY__MASK;
+}
+#define DPU_LUT_CFG_LUT_LO_LE_MUX__MASK 0x0000000c
+#define DPU_LUT_CFG_LUT_LO_LE_MUX__SHIFT 2
+static inline uint32_t DPU_LUT_CFG_LUT_LO_LE_MUX(uint32_t val)
+{
+ return ((val) << DPU_LUT_CFG_LUT_LO_LE_MUX__SHIFT) & DPU_LUT_CFG_LUT_LO_LE_MUX__MASK;
+}
+#define DPU_LUT_CFG_LUT_EXPAND_EN__MASK 0x00000002
+#define DPU_LUT_CFG_LUT_EXPAND_EN__SHIFT 1
+static inline uint32_t DPU_LUT_CFG_LUT_EXPAND_EN(uint32_t val)
+{
+ return ((val) << DPU_LUT_CFG_LUT_EXPAND_EN__SHIFT) & DPU_LUT_CFG_LUT_EXPAND_EN__MASK;
+}
+#define DPU_LUT_CFG_LUT_ROAD_SEL__MASK 0x00000001
+#define DPU_LUT_CFG_LUT_ROAD_SEL__SHIFT 0
+static inline uint32_t DPU_LUT_CFG_LUT_ROAD_SEL(uint32_t val)
+{
+ return ((val) << DPU_LUT_CFG_LUT_ROAD_SEL__SHIFT) & DPU_LUT_CFG_LUT_ROAD_SEL__MASK;
+}
+
+#define REG_DPU_LUT_INFO 0x0000410c
+#define DPU_LUT_INFO_RESERVED_0__MASK 0xff000000
+#define DPU_LUT_INFO_RESERVED_0__SHIFT 24
+static inline uint32_t DPU_LUT_INFO_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_LUT_INFO_RESERVED_0__SHIFT) & DPU_LUT_INFO_RESERVED_0__MASK;
+}
+#define DPU_LUT_INFO_LUT_LO_INDEX_SELECT__MASK 0x00ff0000
+#define DPU_LUT_INFO_LUT_LO_INDEX_SELECT__SHIFT 16
+static inline uint32_t DPU_LUT_INFO_LUT_LO_INDEX_SELECT(uint32_t val)
+{
+ return ((val) << DPU_LUT_INFO_LUT_LO_INDEX_SELECT__SHIFT) & DPU_LUT_INFO_LUT_LO_INDEX_SELECT__MASK;
+}
+#define DPU_LUT_INFO_LUT_LE_INDEX_SELECT__MASK 0x0000ff00
+#define DPU_LUT_INFO_LUT_LE_INDEX_SELECT__SHIFT 8
+static inline uint32_t DPU_LUT_INFO_LUT_LE_INDEX_SELECT(uint32_t val)
+{
+ return ((val) << DPU_LUT_INFO_LUT_LE_INDEX_SELECT__SHIFT) & DPU_LUT_INFO_LUT_LE_INDEX_SELECT__MASK;
+}
+#define DPU_LUT_INFO_RESERVED_1__MASK 0x000000ff
+#define DPU_LUT_INFO_RESERVED_1__SHIFT 0
+static inline uint32_t DPU_LUT_INFO_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_LUT_INFO_RESERVED_1__SHIFT) & DPU_LUT_INFO_RESERVED_1__MASK;
+}
+
+#define REG_DPU_LUT_LE_START 0x00004110
+#define DPU_LUT_LE_START_LUT_LE_START__MASK 0xffffffff
+#define DPU_LUT_LE_START_LUT_LE_START__SHIFT 0
+static inline uint32_t DPU_LUT_LE_START_LUT_LE_START(uint32_t val)
+{
+ return ((val) << DPU_LUT_LE_START_LUT_LE_START__SHIFT) & DPU_LUT_LE_START_LUT_LE_START__MASK;
+}
+
+#define REG_DPU_LUT_LE_END 0x00004114
+#define DPU_LUT_LE_END_LUT_LE_END__MASK 0xffffffff
+#define DPU_LUT_LE_END_LUT_LE_END__SHIFT 0
+static inline uint32_t DPU_LUT_LE_END_LUT_LE_END(uint32_t val)
+{
+ return ((val) << DPU_LUT_LE_END_LUT_LE_END__SHIFT) & DPU_LUT_LE_END_LUT_LE_END__MASK;
+}
+
+#define REG_DPU_LUT_LO_START 0x00004118
+#define DPU_LUT_LO_START_LUT_LO_START__MASK 0xffffffff
+#define DPU_LUT_LO_START_LUT_LO_START__SHIFT 0
+static inline uint32_t DPU_LUT_LO_START_LUT_LO_START(uint32_t val)
+{
+ return ((val) << DPU_LUT_LO_START_LUT_LO_START__SHIFT) & DPU_LUT_LO_START_LUT_LO_START__MASK;
+}
+
+#define REG_DPU_LUT_LO_END 0x0000411c
+#define DPU_LUT_LO_END_LUT_LO_END__MASK 0xffffffff
+#define DPU_LUT_LO_END_LUT_LO_END__SHIFT 0
+static inline uint32_t DPU_LUT_LO_END_LUT_LO_END(uint32_t val)
+{
+ return ((val) << DPU_LUT_LO_END_LUT_LO_END__SHIFT) & DPU_LUT_LO_END_LUT_LO_END__MASK;
+}
+
+#define REG_DPU_LUT_LE_SLOPE_SCALE 0x00004120
+#define DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_OFLOW_SCALE__MASK 0xffff0000
+#define DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_OFLOW_SCALE__SHIFT 16
+static inline uint32_t DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_OFLOW_SCALE(uint32_t val)
+{
+ return ((val) << DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_OFLOW_SCALE__SHIFT) & DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_OFLOW_SCALE__MASK;
+}
+#define DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_UFLOW_SCALE__MASK 0x0000ffff
+#define DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_UFLOW_SCALE__SHIFT 0
+static inline uint32_t DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_UFLOW_SCALE(uint32_t val)
+{
+ return ((val) << DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_UFLOW_SCALE__SHIFT) & DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_UFLOW_SCALE__MASK;
+}
+
+#define REG_DPU_LUT_LE_SLOPE_SHIFT 0x00004124
+#define DPU_LUT_LE_SLOPE_SHIFT_RESERVED_0__MASK 0xfffffc00
+#define DPU_LUT_LE_SLOPE_SHIFT_RESERVED_0__SHIFT 10
+static inline uint32_t DPU_LUT_LE_SLOPE_SHIFT_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_LUT_LE_SLOPE_SHIFT_RESERVED_0__SHIFT) & DPU_LUT_LE_SLOPE_SHIFT_RESERVED_0__MASK;
+}
+#define DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_OFLOW_SHIFT__MASK 0x000003e0
+#define DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_OFLOW_SHIFT__SHIFT 5
+static inline uint32_t DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_OFLOW_SHIFT(uint32_t val)
+{
+ return ((val) << DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_OFLOW_SHIFT__SHIFT) & DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_OFLOW_SHIFT__MASK;
+}
+#define DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_UFLOW_SHIFT__MASK 0x0000001f
+#define DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_UFLOW_SHIFT__SHIFT 0
+static inline uint32_t DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_UFLOW_SHIFT(uint32_t val)
+{
+ return ((val) << DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_UFLOW_SHIFT__SHIFT) & DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_UFLOW_SHIFT__MASK;
+}
+
+#define REG_DPU_LUT_LO_SLOPE_SCALE 0x00004128
+#define DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_OFLOW_SCALE__MASK 0xffff0000
+#define DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_OFLOW_SCALE__SHIFT 16
+static inline uint32_t DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_OFLOW_SCALE(uint32_t val)
+{
+ return ((val) << DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_OFLOW_SCALE__SHIFT) & DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_OFLOW_SCALE__MASK;
+}
+#define DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_UFLOW_SCALE__MASK 0x0000ffff
+#define DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_UFLOW_SCALE__SHIFT 0
+static inline uint32_t DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_UFLOW_SCALE(uint32_t val)
+{
+ return ((val) << DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_UFLOW_SCALE__SHIFT) & DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_UFLOW_SCALE__MASK;
+}
+
+#define REG_DPU_LUT_LO_SLOPE_SHIFT 0x0000412c
+#define DPU_LUT_LO_SLOPE_SHIFT_RESERVED_0__MASK 0xfffffc00
+#define DPU_LUT_LO_SLOPE_SHIFT_RESERVED_0__SHIFT 10
+static inline uint32_t DPU_LUT_LO_SLOPE_SHIFT_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_LUT_LO_SLOPE_SHIFT_RESERVED_0__SHIFT) & DPU_LUT_LO_SLOPE_SHIFT_RESERVED_0__MASK;
+}
+#define DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_OFLOW_SHIFT__MASK 0x000003e0
+#define DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_OFLOW_SHIFT__SHIFT 5
+static inline uint32_t DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_OFLOW_SHIFT(uint32_t val)
+{
+ return ((val) << DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_OFLOW_SHIFT__SHIFT) & DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_OFLOW_SHIFT__MASK;
+}
+#define DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_UFLOW_SHIFT__MASK 0x0000001f
+#define DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_UFLOW_SHIFT__SHIFT 0
+static inline uint32_t DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_UFLOW_SHIFT(uint32_t val)
+{
+ return ((val) << DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_UFLOW_SHIFT__SHIFT) & DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_UFLOW_SHIFT__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_S_STATUS 0x00005000
+#define DPU_RDMA_RDMA_S_STATUS_RESERVED_0__MASK 0xfffc0000
+#define DPU_RDMA_RDMA_S_STATUS_RESERVED_0__SHIFT 18
+static inline uint32_t DPU_RDMA_RDMA_S_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_STATUS_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_S_STATUS_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_S_STATUS_STATUS_1__MASK 0x00030000
+#define DPU_RDMA_RDMA_S_STATUS_STATUS_1__SHIFT 16
+static inline uint32_t DPU_RDMA_RDMA_S_STATUS_STATUS_1(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_STATUS_STATUS_1__SHIFT) & DPU_RDMA_RDMA_S_STATUS_STATUS_1__MASK;
+}
+#define DPU_RDMA_RDMA_S_STATUS_RESERVED_1__MASK 0x0000fffc
+#define DPU_RDMA_RDMA_S_STATUS_RESERVED_1__SHIFT 2
+static inline uint32_t DPU_RDMA_RDMA_S_STATUS_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_STATUS_RESERVED_1__SHIFT) & DPU_RDMA_RDMA_S_STATUS_RESERVED_1__MASK;
+}
+#define DPU_RDMA_RDMA_S_STATUS_STATUS_0__MASK 0x00000003
+#define DPU_RDMA_RDMA_S_STATUS_STATUS_0__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_S_STATUS_STATUS_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_STATUS_STATUS_0__SHIFT) & DPU_RDMA_RDMA_S_STATUS_STATUS_0__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_S_POINTER 0x00005004
+#define DPU_RDMA_RDMA_S_POINTER_RESERVED_0__MASK 0xfffe0000
+#define DPU_RDMA_RDMA_S_POINTER_RESERVED_0__SHIFT 17
+static inline uint32_t DPU_RDMA_RDMA_S_POINTER_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_POINTER_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_S_POINTER_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_S_POINTER_EXECUTER__MASK 0x00010000
+#define DPU_RDMA_RDMA_S_POINTER_EXECUTER__SHIFT 16
+static inline uint32_t DPU_RDMA_RDMA_S_POINTER_EXECUTER(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_POINTER_EXECUTER__SHIFT) & DPU_RDMA_RDMA_S_POINTER_EXECUTER__MASK;
+}
+#define DPU_RDMA_RDMA_S_POINTER_RESERVED_1__MASK 0x0000ffc0
+#define DPU_RDMA_RDMA_S_POINTER_RESERVED_1__SHIFT 6
+static inline uint32_t DPU_RDMA_RDMA_S_POINTER_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_POINTER_RESERVED_1__SHIFT) & DPU_RDMA_RDMA_S_POINTER_RESERVED_1__MASK;
+}
+#define DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR__MASK 0x00000020
+#define DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR__SHIFT 5
+static inline uint32_t DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR__SHIFT) & DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR__MASK;
+}
+#define DPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR__MASK 0x00000010
+#define DPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR__SHIFT 4
+static inline uint32_t DPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR__SHIFT) & DPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR__MASK;
+}
+#define DPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE__MASK 0x00000008
+#define DPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE__SHIFT 3
+static inline uint32_t DPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE__SHIFT) & DPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE__MASK;
+}
+#define DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN__MASK 0x00000004
+#define DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN__SHIFT 2
+static inline uint32_t DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN__SHIFT) & DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN__MASK;
+}
+#define DPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN__MASK 0x00000002
+#define DPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN__SHIFT 1
+static inline uint32_t DPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN__SHIFT) & DPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN__MASK;
+}
+#define DPU_RDMA_RDMA_S_POINTER_POINTER__MASK 0x00000001
+#define DPU_RDMA_RDMA_S_POINTER_POINTER__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_S_POINTER_POINTER(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_POINTER_POINTER__SHIFT) & DPU_RDMA_RDMA_S_POINTER_POINTER__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_OPERATION_ENABLE 0x00005008
+#define DPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0__MASK 0xfffffffe
+#define DPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0__SHIFT 1
+static inline uint32_t DPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN__MASK 0x00000001
+#define DPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN__SHIFT) & DPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_DATA_CUBE_WIDTH 0x0000500c
+#define DPU_RDMA_RDMA_DATA_CUBE_WIDTH_RESERVED_0__MASK 0xffffe000
+#define DPU_RDMA_RDMA_DATA_CUBE_WIDTH_RESERVED_0__SHIFT 13
+static inline uint32_t DPU_RDMA_RDMA_DATA_CUBE_WIDTH_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_DATA_CUBE_WIDTH_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_DATA_CUBE_WIDTH_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_DATA_CUBE_WIDTH_WIDTH__MASK 0x00001fff
+#define DPU_RDMA_RDMA_DATA_CUBE_WIDTH_WIDTH__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_DATA_CUBE_WIDTH_WIDTH(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_DATA_CUBE_WIDTH_WIDTH__SHIFT) & DPU_RDMA_RDMA_DATA_CUBE_WIDTH_WIDTH__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_DATA_CUBE_HEIGHT 0x00005010
+#define DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_0__MASK 0xe0000000
+#define DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_0__SHIFT 29
+static inline uint32_t DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_EW_LINE_NOTCH_ADDR__MASK 0x1fff0000
+#define DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_EW_LINE_NOTCH_ADDR__SHIFT 16
+static inline uint32_t DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_EW_LINE_NOTCH_ADDR(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_EW_LINE_NOTCH_ADDR__SHIFT) & DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_EW_LINE_NOTCH_ADDR__MASK;
+}
+#define DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_1__MASK 0x0000e000
+#define DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_1__SHIFT 13
+static inline uint32_t DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_1__SHIFT) & DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_1__MASK;
+}
+#define DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_HEIGHT__MASK 0x00001fff
+#define DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_HEIGHT__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_HEIGHT(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_HEIGHT__SHIFT) & DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_HEIGHT__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_DATA_CUBE_CHANNEL 0x00005014
+#define DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_RESERVED_0__MASK 0xffffe000
+#define DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_RESERVED_0__SHIFT 13
+static inline uint32_t DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_CHANNEL__MASK 0x00001fff
+#define DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_CHANNEL__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_CHANNEL(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_CHANNEL__SHIFT) & DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_CHANNEL__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_SRC_BASE_ADDR 0x00005018
+#define DPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR__MASK 0xffffffff
+#define DPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR__SHIFT) & DPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_BRDMA_CFG 0x0000501c
+#define DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_0__MASK 0xffffffe0
+#define DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_0__SHIFT 5
+static inline uint32_t DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_BRDMA_CFG_BRDMA_DATA_USE__MASK 0x0000001e
+#define DPU_RDMA_RDMA_BRDMA_CFG_BRDMA_DATA_USE__SHIFT 1
+static inline uint32_t DPU_RDMA_RDMA_BRDMA_CFG_BRDMA_DATA_USE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_BRDMA_CFG_BRDMA_DATA_USE__SHIFT) & DPU_RDMA_RDMA_BRDMA_CFG_BRDMA_DATA_USE__MASK;
+}
+#define DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_1__MASK 0x00000001
+#define DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_1__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_1__SHIFT) & DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_1__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_BS_BASE_ADDR 0x00005020
+#define DPU_RDMA_RDMA_BS_BASE_ADDR_BS_BASE_ADDR__MASK 0xffffffff
+#define DPU_RDMA_RDMA_BS_BASE_ADDR_BS_BASE_ADDR__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_BS_BASE_ADDR_BS_BASE_ADDR(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_BS_BASE_ADDR_BS_BASE_ADDR__SHIFT) & DPU_RDMA_RDMA_BS_BASE_ADDR_BS_BASE_ADDR__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_NRDMA_CFG 0x00005028
+#define DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_0__MASK 0xffffffe0
+#define DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_0__SHIFT 5
+static inline uint32_t DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_NRDMA_CFG_NRDMA_DATA_USE__MASK 0x0000001e
+#define DPU_RDMA_RDMA_NRDMA_CFG_NRDMA_DATA_USE__SHIFT 1
+static inline uint32_t DPU_RDMA_RDMA_NRDMA_CFG_NRDMA_DATA_USE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_NRDMA_CFG_NRDMA_DATA_USE__SHIFT) & DPU_RDMA_RDMA_NRDMA_CFG_NRDMA_DATA_USE__MASK;
+}
+#define DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_1__MASK 0x00000001
+#define DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_1__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_1__SHIFT) & DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_1__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_BN_BASE_ADDR 0x0000502c
+#define DPU_RDMA_RDMA_BN_BASE_ADDR_BN_BASE_ADDR__MASK 0xffffffff
+#define DPU_RDMA_RDMA_BN_BASE_ADDR_BN_BASE_ADDR__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_BN_BASE_ADDR_BN_BASE_ADDR(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_BN_BASE_ADDR_BN_BASE_ADDR__SHIFT) & DPU_RDMA_RDMA_BN_BASE_ADDR_BN_BASE_ADDR__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_ERDMA_CFG 0x00005034
+#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_MODE__MASK 0xc0000000
+#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_MODE__SHIFT 30
+static inline uint32_t DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_MODE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_MODE__SHIFT) & DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_MODE__MASK;
+}
+#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_SURF_MODE__MASK 0x20000000
+#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_SURF_MODE__SHIFT 29
+static inline uint32_t DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_SURF_MODE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_SURF_MODE__SHIFT) & DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_SURF_MODE__MASK;
+}
+#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_NONALIGN__MASK 0x10000000
+#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_NONALIGN__SHIFT 28
+static inline uint32_t DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_NONALIGN(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_NONALIGN__SHIFT) & DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_NONALIGN__MASK;
+}
+#define DPU_RDMA_RDMA_ERDMA_CFG_RESERVED_0__MASK 0x0ffffff0
+#define DPU_RDMA_RDMA_ERDMA_CFG_RESERVED_0__SHIFT 4
+static inline uint32_t DPU_RDMA_RDMA_ERDMA_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_ERDMA_CFG_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_ERDMA_CFG_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_SIZE__MASK 0x0000000c
+#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_SIZE__SHIFT 2
+static inline uint32_t DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_SIZE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_SIZE__SHIFT) & DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_SIZE__MASK;
+}
+#define DPU_RDMA_RDMA_ERDMA_CFG_OV4K_BYPASS__MASK 0x00000002
+#define DPU_RDMA_RDMA_ERDMA_CFG_OV4K_BYPASS__SHIFT 1
+static inline uint32_t DPU_RDMA_RDMA_ERDMA_CFG_OV4K_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_ERDMA_CFG_OV4K_BYPASS__SHIFT) & DPU_RDMA_RDMA_ERDMA_CFG_OV4K_BYPASS__MASK;
+}
+#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DISABLE__MASK 0x00000001
+#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DISABLE__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DISABLE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DISABLE__SHIFT) & DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DISABLE__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_EW_BASE_ADDR 0x00005038
+#define DPU_RDMA_RDMA_EW_BASE_ADDR_EW_BASE_ADDR__MASK 0xffffffff
+#define DPU_RDMA_RDMA_EW_BASE_ADDR_EW_BASE_ADDR__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_EW_BASE_ADDR_EW_BASE_ADDR(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_EW_BASE_ADDR_EW_BASE_ADDR__SHIFT) & DPU_RDMA_RDMA_EW_BASE_ADDR_EW_BASE_ADDR__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_EW_SURF_STRIDE 0x00005040
+#define DPU_RDMA_RDMA_EW_SURF_STRIDE_EW_SURF_STRIDE__MASK 0xfffffff0
+#define DPU_RDMA_RDMA_EW_SURF_STRIDE_EW_SURF_STRIDE__SHIFT 4
+static inline uint32_t DPU_RDMA_RDMA_EW_SURF_STRIDE_EW_SURF_STRIDE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_EW_SURF_STRIDE_EW_SURF_STRIDE__SHIFT) & DPU_RDMA_RDMA_EW_SURF_STRIDE_EW_SURF_STRIDE__MASK;
+}
+#define DPU_RDMA_RDMA_EW_SURF_STRIDE_RESERVED_0__MASK 0x0000000f
+#define DPU_RDMA_RDMA_EW_SURF_STRIDE_RESERVED_0__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_EW_SURF_STRIDE_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_EW_SURF_STRIDE_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_EW_SURF_STRIDE_RESERVED_0__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_FEATURE_MODE_CFG 0x00005044
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_RESERVED_0__MASK 0xfffc0000
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_RESERVED_0__SHIFT 18
+static inline uint32_t DPU_RDMA_RDMA_FEATURE_MODE_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_FEATURE_MODE_CFG_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_FEATURE_MODE_CFG_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_IN_PRECISION__MASK 0x00038000
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_IN_PRECISION__SHIFT 15
+static inline uint32_t DPU_RDMA_RDMA_FEATURE_MODE_CFG_IN_PRECISION(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_FEATURE_MODE_CFG_IN_PRECISION__SHIFT) & DPU_RDMA_RDMA_FEATURE_MODE_CFG_IN_PRECISION__MASK;
+}
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_BURST_LEN__MASK 0x00007800
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_BURST_LEN__SHIFT 11
+static inline uint32_t DPU_RDMA_RDMA_FEATURE_MODE_CFG_BURST_LEN(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_FEATURE_MODE_CFG_BURST_LEN__SHIFT) & DPU_RDMA_RDMA_FEATURE_MODE_CFG_BURST_LEN__MASK;
+}
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_COMB_USE__MASK 0x00000700
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_COMB_USE__SHIFT 8
+static inline uint32_t DPU_RDMA_RDMA_FEATURE_MODE_CFG_COMB_USE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_FEATURE_MODE_CFG_COMB_USE__SHIFT) & DPU_RDMA_RDMA_FEATURE_MODE_CFG_COMB_USE__MASK;
+}
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_PROC_PRECISION__MASK 0x000000e0
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_PROC_PRECISION__SHIFT 5
+static inline uint32_t DPU_RDMA_RDMA_FEATURE_MODE_CFG_PROC_PRECISION(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_FEATURE_MODE_CFG_PROC_PRECISION__SHIFT) & DPU_RDMA_RDMA_FEATURE_MODE_CFG_PROC_PRECISION__MASK;
+}
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_DISABLE__MASK 0x00000010
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_DISABLE__SHIFT 4
+static inline uint32_t DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_DISABLE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_DISABLE__SHIFT) & DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_DISABLE__MASK;
+}
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_FP16TOFP32_EN__MASK 0x00000008
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_FP16TOFP32_EN__SHIFT 3
+static inline uint32_t DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_FP16TOFP32_EN(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_FP16TOFP32_EN__SHIFT) & DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_FP16TOFP32_EN__MASK;
+}
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_CONV_MODE__MASK 0x00000006
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_CONV_MODE__SHIFT 1
+static inline uint32_t DPU_RDMA_RDMA_FEATURE_MODE_CFG_CONV_MODE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_FEATURE_MODE_CFG_CONV_MODE__SHIFT) & DPU_RDMA_RDMA_FEATURE_MODE_CFG_CONV_MODE__MASK;
+}
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_FLYING_MODE__MASK 0x00000001
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_FLYING_MODE__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_FEATURE_MODE_CFG_FLYING_MODE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_FEATURE_MODE_CFG_FLYING_MODE__SHIFT) & DPU_RDMA_RDMA_FEATURE_MODE_CFG_FLYING_MODE__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_SRC_DMA_CFG 0x00005048
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_LINE_NOTCH_ADDR__MASK 0xfff80000
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_LINE_NOTCH_ADDR__SHIFT 19
+static inline uint32_t DPU_RDMA_RDMA_SRC_DMA_CFG_LINE_NOTCH_ADDR(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SRC_DMA_CFG_LINE_NOTCH_ADDR__SHIFT) & DPU_RDMA_RDMA_SRC_DMA_CFG_LINE_NOTCH_ADDR__MASK;
+}
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_RESERVED_0__MASK 0x0007c000
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_RESERVED_0__SHIFT 14
+static inline uint32_t DPU_RDMA_RDMA_SRC_DMA_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SRC_DMA_CFG_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_SRC_DMA_CFG_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_POOLING_METHOD__MASK 0x00002000
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_POOLING_METHOD__SHIFT 13
+static inline uint32_t DPU_RDMA_RDMA_SRC_DMA_CFG_POOLING_METHOD(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SRC_DMA_CFG_POOLING_METHOD__SHIFT) & DPU_RDMA_RDMA_SRC_DMA_CFG_POOLING_METHOD__MASK;
+}
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_UNPOOLING_EN__MASK 0x00001000
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_UNPOOLING_EN__SHIFT 12
+static inline uint32_t DPU_RDMA_RDMA_SRC_DMA_CFG_UNPOOLING_EN(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SRC_DMA_CFG_UNPOOLING_EN__SHIFT) & DPU_RDMA_RDMA_SRC_DMA_CFG_UNPOOLING_EN__MASK;
+}
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_HEIGHT__MASK 0x00000e00
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_HEIGHT__SHIFT 9
+static inline uint32_t DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_HEIGHT(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_HEIGHT__SHIFT) & DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_HEIGHT__MASK;
+}
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_WIDTH__MASK 0x000001c0
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_WIDTH__SHIFT 6
+static inline uint32_t DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_WIDTH(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_WIDTH__SHIFT) & DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_WIDTH__MASK;
+}
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_HEIGHT__MASK 0x00000038
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_HEIGHT__SHIFT 3
+static inline uint32_t DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_HEIGHT(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_HEIGHT__SHIFT) & DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_HEIGHT__MASK;
+}
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_WIDTH__MASK 0x00000007
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_WIDTH__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_WIDTH(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_WIDTH__SHIFT) & DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_WIDTH__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_SURF_NOTCH 0x0000504c
+#define DPU_RDMA_RDMA_SURF_NOTCH_SURF_NOTCH_ADDR__MASK 0xfffffff0
+#define DPU_RDMA_RDMA_SURF_NOTCH_SURF_NOTCH_ADDR__SHIFT 4
+static inline uint32_t DPU_RDMA_RDMA_SURF_NOTCH_SURF_NOTCH_ADDR(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SURF_NOTCH_SURF_NOTCH_ADDR__SHIFT) & DPU_RDMA_RDMA_SURF_NOTCH_SURF_NOTCH_ADDR__MASK;
+}
+#define DPU_RDMA_RDMA_SURF_NOTCH_RESERVED_0__MASK 0x0000000f
+#define DPU_RDMA_RDMA_SURF_NOTCH_RESERVED_0__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_SURF_NOTCH_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SURF_NOTCH_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_SURF_NOTCH_RESERVED_0__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_PAD_CFG 0x00005064
+#define DPU_RDMA_RDMA_PAD_CFG_PAD_VALUE__MASK 0xffff0000
+#define DPU_RDMA_RDMA_PAD_CFG_PAD_VALUE__SHIFT 16
+static inline uint32_t DPU_RDMA_RDMA_PAD_CFG_PAD_VALUE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_PAD_CFG_PAD_VALUE__SHIFT) & DPU_RDMA_RDMA_PAD_CFG_PAD_VALUE__MASK;
+}
+#define DPU_RDMA_RDMA_PAD_CFG_RESERVED_0__MASK 0x0000ff80
+#define DPU_RDMA_RDMA_PAD_CFG_RESERVED_0__SHIFT 7
+static inline uint32_t DPU_RDMA_RDMA_PAD_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_PAD_CFG_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_PAD_CFG_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_PAD_CFG_PAD_TOP__MASK 0x00000070
+#define DPU_RDMA_RDMA_PAD_CFG_PAD_TOP__SHIFT 4
+static inline uint32_t DPU_RDMA_RDMA_PAD_CFG_PAD_TOP(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_PAD_CFG_PAD_TOP__SHIFT) & DPU_RDMA_RDMA_PAD_CFG_PAD_TOP__MASK;
+}
+#define DPU_RDMA_RDMA_PAD_CFG_RESERVED_1__MASK 0x00000008
+#define DPU_RDMA_RDMA_PAD_CFG_RESERVED_1__SHIFT 3
+static inline uint32_t DPU_RDMA_RDMA_PAD_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_PAD_CFG_RESERVED_1__SHIFT) & DPU_RDMA_RDMA_PAD_CFG_RESERVED_1__MASK;
+}
+#define DPU_RDMA_RDMA_PAD_CFG_PAD_LEFT__MASK 0x00000007
+#define DPU_RDMA_RDMA_PAD_CFG_PAD_LEFT__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_PAD_CFG_PAD_LEFT(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_PAD_CFG_PAD_LEFT__SHIFT) & DPU_RDMA_RDMA_PAD_CFG_PAD_LEFT__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_WEIGHT 0x00005068
+#define DPU_RDMA_RDMA_WEIGHT_E_WEIGHT__MASK 0xff000000
+#define DPU_RDMA_RDMA_WEIGHT_E_WEIGHT__SHIFT 24
+static inline uint32_t DPU_RDMA_RDMA_WEIGHT_E_WEIGHT(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_WEIGHT_E_WEIGHT__SHIFT) & DPU_RDMA_RDMA_WEIGHT_E_WEIGHT__MASK;
+}
+#define DPU_RDMA_RDMA_WEIGHT_N_WEIGHT__MASK 0x00ff0000
+#define DPU_RDMA_RDMA_WEIGHT_N_WEIGHT__SHIFT 16
+static inline uint32_t DPU_RDMA_RDMA_WEIGHT_N_WEIGHT(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_WEIGHT_N_WEIGHT__SHIFT) & DPU_RDMA_RDMA_WEIGHT_N_WEIGHT__MASK;
+}
+#define DPU_RDMA_RDMA_WEIGHT_B_WEIGHT__MASK 0x0000ff00
+#define DPU_RDMA_RDMA_WEIGHT_B_WEIGHT__SHIFT 8
+static inline uint32_t DPU_RDMA_RDMA_WEIGHT_B_WEIGHT(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_WEIGHT_B_WEIGHT__SHIFT) & DPU_RDMA_RDMA_WEIGHT_B_WEIGHT__MASK;
+}
+#define DPU_RDMA_RDMA_WEIGHT_M_WEIGHT__MASK 0x000000ff
+#define DPU_RDMA_RDMA_WEIGHT_M_WEIGHT__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_WEIGHT_M_WEIGHT(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_WEIGHT_M_WEIGHT__SHIFT) & DPU_RDMA_RDMA_WEIGHT_M_WEIGHT__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_EW_SURF_NOTCH 0x0000506c
+#define DPU_RDMA_RDMA_EW_SURF_NOTCH_EW_SURF_NOTCH__MASK 0xfffffff0
+#define DPU_RDMA_RDMA_EW_SURF_NOTCH_EW_SURF_NOTCH__SHIFT 4
+static inline uint32_t DPU_RDMA_RDMA_EW_SURF_NOTCH_EW_SURF_NOTCH(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_EW_SURF_NOTCH_EW_SURF_NOTCH__SHIFT) & DPU_RDMA_RDMA_EW_SURF_NOTCH_EW_SURF_NOTCH__MASK;
+}
+#define DPU_RDMA_RDMA_EW_SURF_NOTCH_RESERVED_0__MASK 0x0000000f
+#define DPU_RDMA_RDMA_EW_SURF_NOTCH_RESERVED_0__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_EW_SURF_NOTCH_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_EW_SURF_NOTCH_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_EW_SURF_NOTCH_RESERVED_0__MASK;
+}
+
+#define REG_PPU_S_STATUS 0x00006000
+#define PPU_S_STATUS_RESERVED_0__MASK 0xfffc0000
+#define PPU_S_STATUS_RESERVED_0__SHIFT 18
+static inline uint32_t PPU_S_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_S_STATUS_RESERVED_0__SHIFT) & PPU_S_STATUS_RESERVED_0__MASK;
+}
+#define PPU_S_STATUS_STATUS_1__MASK 0x00030000
+#define PPU_S_STATUS_STATUS_1__SHIFT 16
+static inline uint32_t PPU_S_STATUS_STATUS_1(uint32_t val)
+{
+ return ((val) << PPU_S_STATUS_STATUS_1__SHIFT) & PPU_S_STATUS_STATUS_1__MASK;
+}
+#define PPU_S_STATUS_RESERVED_1__MASK 0x0000fffc
+#define PPU_S_STATUS_RESERVED_1__SHIFT 2
+static inline uint32_t PPU_S_STATUS_RESERVED_1(uint32_t val)
+{
+ return ((val) << PPU_S_STATUS_RESERVED_1__SHIFT) & PPU_S_STATUS_RESERVED_1__MASK;
+}
+#define PPU_S_STATUS_STATUS_0__MASK 0x00000003
+#define PPU_S_STATUS_STATUS_0__SHIFT 0
+static inline uint32_t PPU_S_STATUS_STATUS_0(uint32_t val)
+{
+ return ((val) << PPU_S_STATUS_STATUS_0__SHIFT) & PPU_S_STATUS_STATUS_0__MASK;
+}
+
+#define REG_PPU_S_POINTER 0x00006004
+#define PPU_S_POINTER_RESERVED_0__MASK 0xfffe0000
+#define PPU_S_POINTER_RESERVED_0__SHIFT 17
+static inline uint32_t PPU_S_POINTER_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_S_POINTER_RESERVED_0__SHIFT) & PPU_S_POINTER_RESERVED_0__MASK;
+}
+#define PPU_S_POINTER_EXECUTER__MASK 0x00010000
+#define PPU_S_POINTER_EXECUTER__SHIFT 16
+static inline uint32_t PPU_S_POINTER_EXECUTER(uint32_t val)
+{
+ return ((val) << PPU_S_POINTER_EXECUTER__SHIFT) & PPU_S_POINTER_EXECUTER__MASK;
+}
+#define PPU_S_POINTER_RESERVED_1__MASK 0x0000ffc0
+#define PPU_S_POINTER_RESERVED_1__SHIFT 6
+static inline uint32_t PPU_S_POINTER_RESERVED_1(uint32_t val)
+{
+ return ((val) << PPU_S_POINTER_RESERVED_1__SHIFT) & PPU_S_POINTER_RESERVED_1__MASK;
+}
+#define PPU_S_POINTER_EXECUTER_PP_CLEAR__MASK 0x00000020
+#define PPU_S_POINTER_EXECUTER_PP_CLEAR__SHIFT 5
+static inline uint32_t PPU_S_POINTER_EXECUTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << PPU_S_POINTER_EXECUTER_PP_CLEAR__SHIFT) & PPU_S_POINTER_EXECUTER_PP_CLEAR__MASK;
+}
+#define PPU_S_POINTER_POINTER_PP_CLEAR__MASK 0x00000010
+#define PPU_S_POINTER_POINTER_PP_CLEAR__SHIFT 4
+static inline uint32_t PPU_S_POINTER_POINTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << PPU_S_POINTER_POINTER_PP_CLEAR__SHIFT) & PPU_S_POINTER_POINTER_PP_CLEAR__MASK;
+}
+#define PPU_S_POINTER_POINTER_PP_MODE__MASK 0x00000008
+#define PPU_S_POINTER_POINTER_PP_MODE__SHIFT 3
+static inline uint32_t PPU_S_POINTER_POINTER_PP_MODE(uint32_t val)
+{
+ return ((val) << PPU_S_POINTER_POINTER_PP_MODE__SHIFT) & PPU_S_POINTER_POINTER_PP_MODE__MASK;
+}
+#define PPU_S_POINTER_EXECUTER_PP_EN__MASK 0x00000004
+#define PPU_S_POINTER_EXECUTER_PP_EN__SHIFT 2
+static inline uint32_t PPU_S_POINTER_EXECUTER_PP_EN(uint32_t val)
+{
+ return ((val) << PPU_S_POINTER_EXECUTER_PP_EN__SHIFT) & PPU_S_POINTER_EXECUTER_PP_EN__MASK;
+}
+#define PPU_S_POINTER_POINTER_PP_EN__MASK 0x00000002
+#define PPU_S_POINTER_POINTER_PP_EN__SHIFT 1
+static inline uint32_t PPU_S_POINTER_POINTER_PP_EN(uint32_t val)
+{
+ return ((val) << PPU_S_POINTER_POINTER_PP_EN__SHIFT) & PPU_S_POINTER_POINTER_PP_EN__MASK;
+}
+#define PPU_S_POINTER_POINTER__MASK 0x00000001
+#define PPU_S_POINTER_POINTER__SHIFT 0
+static inline uint32_t PPU_S_POINTER_POINTER(uint32_t val)
+{
+ return ((val) << PPU_S_POINTER_POINTER__SHIFT) & PPU_S_POINTER_POINTER__MASK;
+}
+
+#define REG_PPU_OPERATION_ENABLE 0x00006008
+#define PPU_OPERATION_ENABLE_RESERVED_0__MASK 0xfffffffe
+#define PPU_OPERATION_ENABLE_RESERVED_0__SHIFT 1
+static inline uint32_t PPU_OPERATION_ENABLE_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_ENABLE_RESERVED_0__SHIFT) & PPU_OPERATION_ENABLE_RESERVED_0__MASK;
+}
+#define PPU_OPERATION_ENABLE_OP_EN__MASK 0x00000001
+#define PPU_OPERATION_ENABLE_OP_EN__SHIFT 0
+static inline uint32_t PPU_OPERATION_ENABLE_OP_EN(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_ENABLE_OP_EN__SHIFT) & PPU_OPERATION_ENABLE_OP_EN__MASK;
+}
+
+#define REG_PPU_DATA_CUBE_IN_WIDTH 0x0000600c
+#define PPU_DATA_CUBE_IN_WIDTH_RESERVED_0__MASK 0xffffe000
+#define PPU_DATA_CUBE_IN_WIDTH_RESERVED_0__SHIFT 13
+static inline uint32_t PPU_DATA_CUBE_IN_WIDTH_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_IN_WIDTH_RESERVED_0__SHIFT) & PPU_DATA_CUBE_IN_WIDTH_RESERVED_0__MASK;
+}
+#define PPU_DATA_CUBE_IN_WIDTH_CUBE_IN_WIDTH__MASK 0x00001fff
+#define PPU_DATA_CUBE_IN_WIDTH_CUBE_IN_WIDTH__SHIFT 0
+static inline uint32_t PPU_DATA_CUBE_IN_WIDTH_CUBE_IN_WIDTH(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_IN_WIDTH_CUBE_IN_WIDTH__SHIFT) & PPU_DATA_CUBE_IN_WIDTH_CUBE_IN_WIDTH__MASK;
+}
+
+#define REG_PPU_DATA_CUBE_IN_HEIGHT 0x00006010
+#define PPU_DATA_CUBE_IN_HEIGHT_RESERVED_0__MASK 0xffffe000
+#define PPU_DATA_CUBE_IN_HEIGHT_RESERVED_0__SHIFT 13
+static inline uint32_t PPU_DATA_CUBE_IN_HEIGHT_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_IN_HEIGHT_RESERVED_0__SHIFT) & PPU_DATA_CUBE_IN_HEIGHT_RESERVED_0__MASK;
+}
+#define PPU_DATA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT__MASK 0x00001fff
+#define PPU_DATA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT__SHIFT 0
+static inline uint32_t PPU_DATA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT__SHIFT) & PPU_DATA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT__MASK;
+}
+
+#define REG_PPU_DATA_CUBE_IN_CHANNEL 0x00006014
+#define PPU_DATA_CUBE_IN_CHANNEL_RESERVED_0__MASK 0xffffe000
+#define PPU_DATA_CUBE_IN_CHANNEL_RESERVED_0__SHIFT 13
+static inline uint32_t PPU_DATA_CUBE_IN_CHANNEL_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_IN_CHANNEL_RESERVED_0__SHIFT) & PPU_DATA_CUBE_IN_CHANNEL_RESERVED_0__MASK;
+}
+#define PPU_DATA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL__MASK 0x00001fff
+#define PPU_DATA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL__SHIFT 0
+static inline uint32_t PPU_DATA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL__SHIFT) & PPU_DATA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL__MASK;
+}
+
+#define REG_PPU_DATA_CUBE_OUT_WIDTH 0x00006018
+#define PPU_DATA_CUBE_OUT_WIDTH_RESERVED_0__MASK 0xffffe000
+#define PPU_DATA_CUBE_OUT_WIDTH_RESERVED_0__SHIFT 13
+static inline uint32_t PPU_DATA_CUBE_OUT_WIDTH_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_OUT_WIDTH_RESERVED_0__SHIFT) & PPU_DATA_CUBE_OUT_WIDTH_RESERVED_0__MASK;
+}
+#define PPU_DATA_CUBE_OUT_WIDTH_CUBE_OUT_WIDTH__MASK 0x00001fff
+#define PPU_DATA_CUBE_OUT_WIDTH_CUBE_OUT_WIDTH__SHIFT 0
+static inline uint32_t PPU_DATA_CUBE_OUT_WIDTH_CUBE_OUT_WIDTH(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_OUT_WIDTH_CUBE_OUT_WIDTH__SHIFT) & PPU_DATA_CUBE_OUT_WIDTH_CUBE_OUT_WIDTH__MASK;
+}
+
+#define REG_PPU_DATA_CUBE_OUT_HEIGHT 0x0000601c
+#define PPU_DATA_CUBE_OUT_HEIGHT_RESERVED_0__MASK 0xffffe000
+#define PPU_DATA_CUBE_OUT_HEIGHT_RESERVED_0__SHIFT 13
+static inline uint32_t PPU_DATA_CUBE_OUT_HEIGHT_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_OUT_HEIGHT_RESERVED_0__SHIFT) & PPU_DATA_CUBE_OUT_HEIGHT_RESERVED_0__MASK;
+}
+#define PPU_DATA_CUBE_OUT_HEIGHT_CUBE_OUT_HEIGHT__MASK 0x00001fff
+#define PPU_DATA_CUBE_OUT_HEIGHT_CUBE_OUT_HEIGHT__SHIFT 0
+static inline uint32_t PPU_DATA_CUBE_OUT_HEIGHT_CUBE_OUT_HEIGHT(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_OUT_HEIGHT_CUBE_OUT_HEIGHT__SHIFT) & PPU_DATA_CUBE_OUT_HEIGHT_CUBE_OUT_HEIGHT__MASK;
+}
+
+#define REG_PPU_DATA_CUBE_OUT_CHANNEL 0x00006020
+#define PPU_DATA_CUBE_OUT_CHANNEL_RESERVED_0__MASK 0xffffe000
+#define PPU_DATA_CUBE_OUT_CHANNEL_RESERVED_0__SHIFT 13
+static inline uint32_t PPU_DATA_CUBE_OUT_CHANNEL_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_OUT_CHANNEL_RESERVED_0__SHIFT) & PPU_DATA_CUBE_OUT_CHANNEL_RESERVED_0__MASK;
+}
+#define PPU_DATA_CUBE_OUT_CHANNEL_CUBE_OUT_CHANNEL__MASK 0x00001fff
+#define PPU_DATA_CUBE_OUT_CHANNEL_CUBE_OUT_CHANNEL__SHIFT 0
+static inline uint32_t PPU_DATA_CUBE_OUT_CHANNEL_CUBE_OUT_CHANNEL(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_OUT_CHANNEL_CUBE_OUT_CHANNEL__SHIFT) & PPU_DATA_CUBE_OUT_CHANNEL_CUBE_OUT_CHANNEL__MASK;
+}
+
+#define REG_PPU_OPERATION_MODE_CFG 0x00006024
+#define PPU_OPERATION_MODE_CFG_RESERVED_0__MASK 0x80000000
+#define PPU_OPERATION_MODE_CFG_RESERVED_0__SHIFT 31
+static inline uint32_t PPU_OPERATION_MODE_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_MODE_CFG_RESERVED_0__SHIFT) & PPU_OPERATION_MODE_CFG_RESERVED_0__MASK;
+}
+#define PPU_OPERATION_MODE_CFG_INDEX_EN__MASK 0x40000000
+#define PPU_OPERATION_MODE_CFG_INDEX_EN__SHIFT 30
+static inline uint32_t PPU_OPERATION_MODE_CFG_INDEX_EN(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_MODE_CFG_INDEX_EN__SHIFT) & PPU_OPERATION_MODE_CFG_INDEX_EN__MASK;
+}
+#define PPU_OPERATION_MODE_CFG_RESERVED_1__MASK 0x20000000
+#define PPU_OPERATION_MODE_CFG_RESERVED_1__SHIFT 29
+static inline uint32_t PPU_OPERATION_MODE_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_MODE_CFG_RESERVED_1__SHIFT) & PPU_OPERATION_MODE_CFG_RESERVED_1__MASK;
+}
+#define PPU_OPERATION_MODE_CFG_NOTCH_ADDR__MASK 0x1fff0000
+#define PPU_OPERATION_MODE_CFG_NOTCH_ADDR__SHIFT 16
+static inline uint32_t PPU_OPERATION_MODE_CFG_NOTCH_ADDR(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_MODE_CFG_NOTCH_ADDR__SHIFT) & PPU_OPERATION_MODE_CFG_NOTCH_ADDR__MASK;
+}
+#define PPU_OPERATION_MODE_CFG_RESERVED_2__MASK 0x0000ff00
+#define PPU_OPERATION_MODE_CFG_RESERVED_2__SHIFT 8
+static inline uint32_t PPU_OPERATION_MODE_CFG_RESERVED_2(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_MODE_CFG_RESERVED_2__SHIFT) & PPU_OPERATION_MODE_CFG_RESERVED_2__MASK;
+}
+#define PPU_OPERATION_MODE_CFG_USE_CNT__MASK 0x000000e0
+#define PPU_OPERATION_MODE_CFG_USE_CNT__SHIFT 5
+static inline uint32_t PPU_OPERATION_MODE_CFG_USE_CNT(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_MODE_CFG_USE_CNT__SHIFT) & PPU_OPERATION_MODE_CFG_USE_CNT__MASK;
+}
+#define PPU_OPERATION_MODE_CFG_FLYING_MODE__MASK 0x00000010
+#define PPU_OPERATION_MODE_CFG_FLYING_MODE__SHIFT 4
+static inline uint32_t PPU_OPERATION_MODE_CFG_FLYING_MODE(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_MODE_CFG_FLYING_MODE__SHIFT) & PPU_OPERATION_MODE_CFG_FLYING_MODE__MASK;
+}
+#define PPU_OPERATION_MODE_CFG_RESERVED_3__MASK 0x0000000c
+#define PPU_OPERATION_MODE_CFG_RESERVED_3__SHIFT 2
+static inline uint32_t PPU_OPERATION_MODE_CFG_RESERVED_3(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_MODE_CFG_RESERVED_3__SHIFT) & PPU_OPERATION_MODE_CFG_RESERVED_3__MASK;
+}
+#define PPU_OPERATION_MODE_CFG_POOLING_METHOD__MASK 0x00000003
+#define PPU_OPERATION_MODE_CFG_POOLING_METHOD__SHIFT 0
+static inline uint32_t PPU_OPERATION_MODE_CFG_POOLING_METHOD(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_MODE_CFG_POOLING_METHOD__SHIFT) & PPU_OPERATION_MODE_CFG_POOLING_METHOD__MASK;
+}
+
+#define REG_PPU_POOLING_KERNEL_CFG 0x00006034
+#define PPU_POOLING_KERNEL_CFG_RESERVED_0__MASK 0xff000000
+#define PPU_POOLING_KERNEL_CFG_RESERVED_0__SHIFT 24
+static inline uint32_t PPU_POOLING_KERNEL_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_POOLING_KERNEL_CFG_RESERVED_0__SHIFT) & PPU_POOLING_KERNEL_CFG_RESERVED_0__MASK;
+}
+#define PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_HEIGHT__MASK 0x00f00000
+#define PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_HEIGHT__SHIFT 20
+static inline uint32_t PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_HEIGHT(uint32_t val)
+{
+ return ((val) << PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_HEIGHT__SHIFT) & PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_HEIGHT__MASK;
+}
+#define PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_WIDTH__MASK 0x000f0000
+#define PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_WIDTH__SHIFT 16
+static inline uint32_t PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_WIDTH(uint32_t val)
+{
+ return ((val) << PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_WIDTH__SHIFT) & PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_WIDTH__MASK;
+}
+#define PPU_POOLING_KERNEL_CFG_RESERVED_1__MASK 0x0000f000
+#define PPU_POOLING_KERNEL_CFG_RESERVED_1__SHIFT 12
+static inline uint32_t PPU_POOLING_KERNEL_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << PPU_POOLING_KERNEL_CFG_RESERVED_1__SHIFT) & PPU_POOLING_KERNEL_CFG_RESERVED_1__MASK;
+}
+#define PPU_POOLING_KERNEL_CFG_KERNEL_HEIGHT__MASK 0x00000f00
+#define PPU_POOLING_KERNEL_CFG_KERNEL_HEIGHT__SHIFT 8
+static inline uint32_t PPU_POOLING_KERNEL_CFG_KERNEL_HEIGHT(uint32_t val)
+{
+ return ((val) << PPU_POOLING_KERNEL_CFG_KERNEL_HEIGHT__SHIFT) & PPU_POOLING_KERNEL_CFG_KERNEL_HEIGHT__MASK;
+}
+#define PPU_POOLING_KERNEL_CFG_RESERVED_2__MASK 0x000000f0
+#define PPU_POOLING_KERNEL_CFG_RESERVED_2__SHIFT 4
+static inline uint32_t PPU_POOLING_KERNEL_CFG_RESERVED_2(uint32_t val)
+{
+ return ((val) << PPU_POOLING_KERNEL_CFG_RESERVED_2__SHIFT) & PPU_POOLING_KERNEL_CFG_RESERVED_2__MASK;
+}
+#define PPU_POOLING_KERNEL_CFG_KERNEL_WIDTH__MASK 0x0000000f
+#define PPU_POOLING_KERNEL_CFG_KERNEL_WIDTH__SHIFT 0
+static inline uint32_t PPU_POOLING_KERNEL_CFG_KERNEL_WIDTH(uint32_t val)
+{
+ return ((val) << PPU_POOLING_KERNEL_CFG_KERNEL_WIDTH__SHIFT) & PPU_POOLING_KERNEL_CFG_KERNEL_WIDTH__MASK;
+}
+
+#define REG_PPU_RECIP_KERNEL_WIDTH 0x00006038
+#define PPU_RECIP_KERNEL_WIDTH_RESERVED_0__MASK 0xfffe0000
+#define PPU_RECIP_KERNEL_WIDTH_RESERVED_0__SHIFT 17
+static inline uint32_t PPU_RECIP_KERNEL_WIDTH_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RECIP_KERNEL_WIDTH_RESERVED_0__SHIFT) & PPU_RECIP_KERNEL_WIDTH_RESERVED_0__MASK;
+}
+#define PPU_RECIP_KERNEL_WIDTH_RECIP_KERNEL_WIDTH__MASK 0x0001ffff
+#define PPU_RECIP_KERNEL_WIDTH_RECIP_KERNEL_WIDTH__SHIFT 0
+static inline uint32_t PPU_RECIP_KERNEL_WIDTH_RECIP_KERNEL_WIDTH(uint32_t val)
+{
+ return ((val) << PPU_RECIP_KERNEL_WIDTH_RECIP_KERNEL_WIDTH__SHIFT) & PPU_RECIP_KERNEL_WIDTH_RECIP_KERNEL_WIDTH__MASK;
+}
+
+#define REG_PPU_RECIP_KERNEL_HEIGHT 0x0000603c
+#define PPU_RECIP_KERNEL_HEIGHT_RESERVED_0__MASK 0xfffe0000
+#define PPU_RECIP_KERNEL_HEIGHT_RESERVED_0__SHIFT 17
+static inline uint32_t PPU_RECIP_KERNEL_HEIGHT_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RECIP_KERNEL_HEIGHT_RESERVED_0__SHIFT) & PPU_RECIP_KERNEL_HEIGHT_RESERVED_0__MASK;
+}
+#define PPU_RECIP_KERNEL_HEIGHT_RECIP_KERNEL_HEIGHT__MASK 0x0001ffff
+#define PPU_RECIP_KERNEL_HEIGHT_RECIP_KERNEL_HEIGHT__SHIFT 0
+static inline uint32_t PPU_RECIP_KERNEL_HEIGHT_RECIP_KERNEL_HEIGHT(uint32_t val)
+{
+ return ((val) << PPU_RECIP_KERNEL_HEIGHT_RECIP_KERNEL_HEIGHT__SHIFT) & PPU_RECIP_KERNEL_HEIGHT_RECIP_KERNEL_HEIGHT__MASK;
+}
+
+#define REG_PPU_POOLING_PADDING_CFG 0x00006040
+#define PPU_POOLING_PADDING_CFG_RESERVED_0__MASK 0xffff8000
+#define PPU_POOLING_PADDING_CFG_RESERVED_0__SHIFT 15
+static inline uint32_t PPU_POOLING_PADDING_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_POOLING_PADDING_CFG_RESERVED_0__SHIFT) & PPU_POOLING_PADDING_CFG_RESERVED_0__MASK;
+}
+#define PPU_POOLING_PADDING_CFG_PAD_BOTTOM__MASK 0x00007000
+#define PPU_POOLING_PADDING_CFG_PAD_BOTTOM__SHIFT 12
+static inline uint32_t PPU_POOLING_PADDING_CFG_PAD_BOTTOM(uint32_t val)
+{
+ return ((val) << PPU_POOLING_PADDING_CFG_PAD_BOTTOM__SHIFT) & PPU_POOLING_PADDING_CFG_PAD_BOTTOM__MASK;
+}
+#define PPU_POOLING_PADDING_CFG_RESERVED_1__MASK 0x00000800
+#define PPU_POOLING_PADDING_CFG_RESERVED_1__SHIFT 11
+static inline uint32_t PPU_POOLING_PADDING_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << PPU_POOLING_PADDING_CFG_RESERVED_1__SHIFT) & PPU_POOLING_PADDING_CFG_RESERVED_1__MASK;
+}
+#define PPU_POOLING_PADDING_CFG_PAD_RIGHT__MASK 0x00000700
+#define PPU_POOLING_PADDING_CFG_PAD_RIGHT__SHIFT 8
+static inline uint32_t PPU_POOLING_PADDING_CFG_PAD_RIGHT(uint32_t val)
+{
+ return ((val) << PPU_POOLING_PADDING_CFG_PAD_RIGHT__SHIFT) & PPU_POOLING_PADDING_CFG_PAD_RIGHT__MASK;
+}
+#define PPU_POOLING_PADDING_CFG_RESERVED_2__MASK 0x00000080
+#define PPU_POOLING_PADDING_CFG_RESERVED_2__SHIFT 7
+static inline uint32_t PPU_POOLING_PADDING_CFG_RESERVED_2(uint32_t val)
+{
+ return ((val) << PPU_POOLING_PADDING_CFG_RESERVED_2__SHIFT) & PPU_POOLING_PADDING_CFG_RESERVED_2__MASK;
+}
+#define PPU_POOLING_PADDING_CFG_PAD_TOP__MASK 0x00000070
+#define PPU_POOLING_PADDING_CFG_PAD_TOP__SHIFT 4
+static inline uint32_t PPU_POOLING_PADDING_CFG_PAD_TOP(uint32_t val)
+{
+ return ((val) << PPU_POOLING_PADDING_CFG_PAD_TOP__SHIFT) & PPU_POOLING_PADDING_CFG_PAD_TOP__MASK;
+}
+#define PPU_POOLING_PADDING_CFG_RESERVED_3__MASK 0x00000008
+#define PPU_POOLING_PADDING_CFG_RESERVED_3__SHIFT 3
+static inline uint32_t PPU_POOLING_PADDING_CFG_RESERVED_3(uint32_t val)
+{
+ return ((val) << PPU_POOLING_PADDING_CFG_RESERVED_3__SHIFT) & PPU_POOLING_PADDING_CFG_RESERVED_3__MASK;
+}
+#define PPU_POOLING_PADDING_CFG_PAD_LEFT__MASK 0x00000007
+#define PPU_POOLING_PADDING_CFG_PAD_LEFT__SHIFT 0
+static inline uint32_t PPU_POOLING_PADDING_CFG_PAD_LEFT(uint32_t val)
+{
+ return ((val) << PPU_POOLING_PADDING_CFG_PAD_LEFT__SHIFT) & PPU_POOLING_PADDING_CFG_PAD_LEFT__MASK;
+}
+
+#define REG_PPU_PADDING_VALUE_1_CFG 0x00006044
+#define PPU_PADDING_VALUE_1_CFG_PAD_VALUE_0__MASK 0xffffffff
+#define PPU_PADDING_VALUE_1_CFG_PAD_VALUE_0__SHIFT 0
+static inline uint32_t PPU_PADDING_VALUE_1_CFG_PAD_VALUE_0(uint32_t val)
+{
+ return ((val) << PPU_PADDING_VALUE_1_CFG_PAD_VALUE_0__SHIFT) & PPU_PADDING_VALUE_1_CFG_PAD_VALUE_0__MASK;
+}
+
+#define REG_PPU_PADDING_VALUE_2_CFG 0x00006048
+#define PPU_PADDING_VALUE_2_CFG_RESERVED_0__MASK 0xfffffff8
+#define PPU_PADDING_VALUE_2_CFG_RESERVED_0__SHIFT 3
+static inline uint32_t PPU_PADDING_VALUE_2_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_PADDING_VALUE_2_CFG_RESERVED_0__SHIFT) & PPU_PADDING_VALUE_2_CFG_RESERVED_0__MASK;
+}
+#define PPU_PADDING_VALUE_2_CFG_PAD_VALUE_1__MASK 0x00000007
+#define PPU_PADDING_VALUE_2_CFG_PAD_VALUE_1__SHIFT 0
+static inline uint32_t PPU_PADDING_VALUE_2_CFG_PAD_VALUE_1(uint32_t val)
+{
+ return ((val) << PPU_PADDING_VALUE_2_CFG_PAD_VALUE_1__SHIFT) & PPU_PADDING_VALUE_2_CFG_PAD_VALUE_1__MASK;
+}
+
+#define REG_PPU_DST_BASE_ADDR 0x00006070
+#define PPU_DST_BASE_ADDR_DST_BASE_ADDR__MASK 0xfffffff0
+#define PPU_DST_BASE_ADDR_DST_BASE_ADDR__SHIFT 4
+static inline uint32_t PPU_DST_BASE_ADDR_DST_BASE_ADDR(uint32_t val)
+{
+ return ((val) << PPU_DST_BASE_ADDR_DST_BASE_ADDR__SHIFT) & PPU_DST_BASE_ADDR_DST_BASE_ADDR__MASK;
+}
+#define PPU_DST_BASE_ADDR_RESERVED_0__MASK 0x0000000f
+#define PPU_DST_BASE_ADDR_RESERVED_0__SHIFT 0
+static inline uint32_t PPU_DST_BASE_ADDR_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_DST_BASE_ADDR_RESERVED_0__SHIFT) & PPU_DST_BASE_ADDR_RESERVED_0__MASK;
+}
+
+#define REG_PPU_DST_SURF_STRIDE 0x0000607c
+#define PPU_DST_SURF_STRIDE_DST_SURF_STRIDE__MASK 0xfffffff0
+#define PPU_DST_SURF_STRIDE_DST_SURF_STRIDE__SHIFT 4
+static inline uint32_t PPU_DST_SURF_STRIDE_DST_SURF_STRIDE(uint32_t val)
+{
+ return ((val) << PPU_DST_SURF_STRIDE_DST_SURF_STRIDE__SHIFT) & PPU_DST_SURF_STRIDE_DST_SURF_STRIDE__MASK;
+}
+#define PPU_DST_SURF_STRIDE_RESERVED_0__MASK 0x0000000f
+#define PPU_DST_SURF_STRIDE_RESERVED_0__SHIFT 0
+static inline uint32_t PPU_DST_SURF_STRIDE_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_DST_SURF_STRIDE_RESERVED_0__SHIFT) & PPU_DST_SURF_STRIDE_RESERVED_0__MASK;
+}
+
+#define REG_PPU_DATA_FORMAT 0x00006084
+#define PPU_DATA_FORMAT_INDEX_ADD__MASK 0xfffffff0
+#define PPU_DATA_FORMAT_INDEX_ADD__SHIFT 4
+static inline uint32_t PPU_DATA_FORMAT_INDEX_ADD(uint32_t val)
+{
+ return ((val) << PPU_DATA_FORMAT_INDEX_ADD__SHIFT) & PPU_DATA_FORMAT_INDEX_ADD__MASK;
+}
+#define PPU_DATA_FORMAT_DPU_FLYIN__MASK 0x00000008
+#define PPU_DATA_FORMAT_DPU_FLYIN__SHIFT 3
+static inline uint32_t PPU_DATA_FORMAT_DPU_FLYIN(uint32_t val)
+{
+ return ((val) << PPU_DATA_FORMAT_DPU_FLYIN__SHIFT) & PPU_DATA_FORMAT_DPU_FLYIN__MASK;
+}
+#define PPU_DATA_FORMAT_PROC_PRECISION__MASK 0x00000007
+#define PPU_DATA_FORMAT_PROC_PRECISION__SHIFT 0
+static inline uint32_t PPU_DATA_FORMAT_PROC_PRECISION(uint32_t val)
+{
+ return ((val) << PPU_DATA_FORMAT_PROC_PRECISION__SHIFT) & PPU_DATA_FORMAT_PROC_PRECISION__MASK;
+}
+
+#define REG_PPU_MISC_CTRL 0x000060dc
+#define PPU_MISC_CTRL_SURF_LEN__MASK 0xffff0000
+#define PPU_MISC_CTRL_SURF_LEN__SHIFT 16
+static inline uint32_t PPU_MISC_CTRL_SURF_LEN(uint32_t val)
+{
+ return ((val) << PPU_MISC_CTRL_SURF_LEN__SHIFT) & PPU_MISC_CTRL_SURF_LEN__MASK;
+}
+#define PPU_MISC_CTRL_RESERVED_0__MASK 0x0000fe00
+#define PPU_MISC_CTRL_RESERVED_0__SHIFT 9
+static inline uint32_t PPU_MISC_CTRL_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_MISC_CTRL_RESERVED_0__SHIFT) & PPU_MISC_CTRL_RESERVED_0__MASK;
+}
+#define PPU_MISC_CTRL_MC_SURF_OUT__MASK 0x00000100
+#define PPU_MISC_CTRL_MC_SURF_OUT__SHIFT 8
+static inline uint32_t PPU_MISC_CTRL_MC_SURF_OUT(uint32_t val)
+{
+ return ((val) << PPU_MISC_CTRL_MC_SURF_OUT__SHIFT) & PPU_MISC_CTRL_MC_SURF_OUT__MASK;
+}
+#define PPU_MISC_CTRL_NONALIGN__MASK 0x00000080
+#define PPU_MISC_CTRL_NONALIGN__SHIFT 7
+static inline uint32_t PPU_MISC_CTRL_NONALIGN(uint32_t val)
+{
+ return ((val) << PPU_MISC_CTRL_NONALIGN__SHIFT) & PPU_MISC_CTRL_NONALIGN__MASK;
+}
+#define PPU_MISC_CTRL_RESERVED_1__MASK 0x00000070
+#define PPU_MISC_CTRL_RESERVED_1__SHIFT 4
+static inline uint32_t PPU_MISC_CTRL_RESERVED_1(uint32_t val)
+{
+ return ((val) << PPU_MISC_CTRL_RESERVED_1__SHIFT) & PPU_MISC_CTRL_RESERVED_1__MASK;
+}
+#define PPU_MISC_CTRL_BURST_LEN__MASK 0x0000000f
+#define PPU_MISC_CTRL_BURST_LEN__SHIFT 0
+static inline uint32_t PPU_MISC_CTRL_BURST_LEN(uint32_t val)
+{
+ return ((val) << PPU_MISC_CTRL_BURST_LEN__SHIFT) & PPU_MISC_CTRL_BURST_LEN__MASK;
+}
+
+#define REG_PPU_RDMA_RDMA_S_STATUS 0x00007000
+#define PPU_RDMA_RDMA_S_STATUS_RESERVED_0__MASK 0xfffc0000
+#define PPU_RDMA_RDMA_S_STATUS_RESERVED_0__SHIFT 18
+static inline uint32_t PPU_RDMA_RDMA_S_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_STATUS_RESERVED_0__SHIFT) & PPU_RDMA_RDMA_S_STATUS_RESERVED_0__MASK;
+}
+#define PPU_RDMA_RDMA_S_STATUS_STATUS_1__MASK 0x00030000
+#define PPU_RDMA_RDMA_S_STATUS_STATUS_1__SHIFT 16
+static inline uint32_t PPU_RDMA_RDMA_S_STATUS_STATUS_1(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_STATUS_STATUS_1__SHIFT) & PPU_RDMA_RDMA_S_STATUS_STATUS_1__MASK;
+}
+#define PPU_RDMA_RDMA_S_STATUS_RESERVED_1__MASK 0x0000fffc
+#define PPU_RDMA_RDMA_S_STATUS_RESERVED_1__SHIFT 2
+static inline uint32_t PPU_RDMA_RDMA_S_STATUS_RESERVED_1(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_STATUS_RESERVED_1__SHIFT) & PPU_RDMA_RDMA_S_STATUS_RESERVED_1__MASK;
+}
+#define PPU_RDMA_RDMA_S_STATUS_STATUS_0__MASK 0x00000003
+#define PPU_RDMA_RDMA_S_STATUS_STATUS_0__SHIFT 0
+static inline uint32_t PPU_RDMA_RDMA_S_STATUS_STATUS_0(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_STATUS_STATUS_0__SHIFT) & PPU_RDMA_RDMA_S_STATUS_STATUS_0__MASK;
+}
+
+#define REG_PPU_RDMA_RDMA_S_POINTER 0x00007004
+#define PPU_RDMA_RDMA_S_POINTER_RESERVED_0__MASK 0xfffe0000
+#define PPU_RDMA_RDMA_S_POINTER_RESERVED_0__SHIFT 17
+static inline uint32_t PPU_RDMA_RDMA_S_POINTER_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_POINTER_RESERVED_0__SHIFT) & PPU_RDMA_RDMA_S_POINTER_RESERVED_0__MASK;
+}
+#define PPU_RDMA_RDMA_S_POINTER_EXECUTER__MASK 0x00010000
+#define PPU_RDMA_RDMA_S_POINTER_EXECUTER__SHIFT 16
+static inline uint32_t PPU_RDMA_RDMA_S_POINTER_EXECUTER(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_POINTER_EXECUTER__SHIFT) & PPU_RDMA_RDMA_S_POINTER_EXECUTER__MASK;
+}
+#define PPU_RDMA_RDMA_S_POINTER_RESERVED_1__MASK 0x0000ffc0
+#define PPU_RDMA_RDMA_S_POINTER_RESERVED_1__SHIFT 6
+static inline uint32_t PPU_RDMA_RDMA_S_POINTER_RESERVED_1(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_POINTER_RESERVED_1__SHIFT) & PPU_RDMA_RDMA_S_POINTER_RESERVED_1__MASK;
+}
+#define PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR__MASK 0x00000020
+#define PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR__SHIFT 5
+static inline uint32_t PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR__SHIFT) & PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR__MASK;
+}
+#define PPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR__MASK 0x00000010
+#define PPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR__SHIFT 4
+static inline uint32_t PPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR__SHIFT) & PPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR__MASK;
+}
+#define PPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE__MASK 0x00000008
+#define PPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE__SHIFT 3
+static inline uint32_t PPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE__SHIFT) & PPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE__MASK;
+}
+#define PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN__MASK 0x00000004
+#define PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN__SHIFT 2
+static inline uint32_t PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN__SHIFT) & PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN__MASK;
+}
+#define PPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN__MASK 0x00000002
+#define PPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN__SHIFT 1
+static inline uint32_t PPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN__SHIFT) & PPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN__MASK;
+}
+#define PPU_RDMA_RDMA_S_POINTER_POINTER__MASK 0x00000001
+#define PPU_RDMA_RDMA_S_POINTER_POINTER__SHIFT 0
+static inline uint32_t PPU_RDMA_RDMA_S_POINTER_POINTER(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_POINTER_POINTER__SHIFT) & PPU_RDMA_RDMA_S_POINTER_POINTER__MASK;
+}
+
+#define REG_PPU_RDMA_RDMA_OPERATION_ENABLE 0x00007008
+#define PPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0__MASK 0xfffffffe
+#define PPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0__SHIFT 1
+static inline uint32_t PPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0__SHIFT) & PPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0__MASK;
+}
+#define PPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN__MASK 0x00000001
+#define PPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN__SHIFT 0
+static inline uint32_t PPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN__SHIFT) & PPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN__MASK;
+}
+
+#define REG_PPU_RDMA_RDMA_CUBE_IN_WIDTH 0x0000700c
+#define PPU_RDMA_RDMA_CUBE_IN_WIDTH_RESERVED_0__MASK 0xffffe000
+#define PPU_RDMA_RDMA_CUBE_IN_WIDTH_RESERVED_0__SHIFT 13
+static inline uint32_t PPU_RDMA_RDMA_CUBE_IN_WIDTH_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_CUBE_IN_WIDTH_RESERVED_0__SHIFT) & PPU_RDMA_RDMA_CUBE_IN_WIDTH_RESERVED_0__MASK;
+}
+#define PPU_RDMA_RDMA_CUBE_IN_WIDTH_CUBE_IN_WIDTH__MASK 0x00001fff
+#define PPU_RDMA_RDMA_CUBE_IN_WIDTH_CUBE_IN_WIDTH__SHIFT 0
+static inline uint32_t PPU_RDMA_RDMA_CUBE_IN_WIDTH_CUBE_IN_WIDTH(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_CUBE_IN_WIDTH_CUBE_IN_WIDTH__SHIFT) & PPU_RDMA_RDMA_CUBE_IN_WIDTH_CUBE_IN_WIDTH__MASK;
+}
+
+#define REG_PPU_RDMA_RDMA_CUBE_IN_HEIGHT 0x00007010
+#define PPU_RDMA_RDMA_CUBE_IN_HEIGHT_RESERVED_0__MASK 0xffffe000
+#define PPU_RDMA_RDMA_CUBE_IN_HEIGHT_RESERVED_0__SHIFT 13
+static inline uint32_t PPU_RDMA_RDMA_CUBE_IN_HEIGHT_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_CUBE_IN_HEIGHT_RESERVED_0__SHIFT) & PPU_RDMA_RDMA_CUBE_IN_HEIGHT_RESERVED_0__MASK;
+}
+#define PPU_RDMA_RDMA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT__MASK 0x00001fff
+#define PPU_RDMA_RDMA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT__SHIFT 0
+static inline uint32_t PPU_RDMA_RDMA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT__SHIFT) & PPU_RDMA_RDMA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT__MASK;
+}
+
+#define REG_PPU_RDMA_RDMA_CUBE_IN_CHANNEL 0x00007014
+#define PPU_RDMA_RDMA_CUBE_IN_CHANNEL_RESERVED_0__MASK 0xffffe000
+#define PPU_RDMA_RDMA_CUBE_IN_CHANNEL_RESERVED_0__SHIFT 13
+static inline uint32_t PPU_RDMA_RDMA_CUBE_IN_CHANNEL_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_CUBE_IN_CHANNEL_RESERVED_0__SHIFT) & PPU_RDMA_RDMA_CUBE_IN_CHANNEL_RESERVED_0__MASK;
+}
+#define PPU_RDMA_RDMA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL__MASK 0x00001fff
+#define PPU_RDMA_RDMA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL__SHIFT 0
+static inline uint32_t PPU_RDMA_RDMA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL__SHIFT) & PPU_RDMA_RDMA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL__MASK;
+}
+
+#define REG_PPU_RDMA_RDMA_SRC_BASE_ADDR 0x0000701c
+#define PPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR__MASK 0xffffffff
+#define PPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR__SHIFT 0
+static inline uint32_t PPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR__SHIFT) & PPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR__MASK;
+}
+
+#define REG_PPU_RDMA_RDMA_SRC_LINE_STRIDE 0x00007024
+#define PPU_RDMA_RDMA_SRC_LINE_STRIDE_SRC_LINE_STRIDE__MASK 0xfffffff0
+#define PPU_RDMA_RDMA_SRC_LINE_STRIDE_SRC_LINE_STRIDE__SHIFT 4
+static inline uint32_t PPU_RDMA_RDMA_SRC_LINE_STRIDE_SRC_LINE_STRIDE(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_SRC_LINE_STRIDE_SRC_LINE_STRIDE__SHIFT) & PPU_RDMA_RDMA_SRC_LINE_STRIDE_SRC_LINE_STRIDE__MASK;
+}
+#define PPU_RDMA_RDMA_SRC_LINE_STRIDE_RESERVED_0__MASK 0x0000000f
+#define PPU_RDMA_RDMA_SRC_LINE_STRIDE_RESERVED_0__SHIFT 0
+static inline uint32_t PPU_RDMA_RDMA_SRC_LINE_STRIDE_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_SRC_LINE_STRIDE_RESERVED_0__SHIFT) & PPU_RDMA_RDMA_SRC_LINE_STRIDE_RESERVED_0__MASK;
+}
+
+#define REG_PPU_RDMA_RDMA_SRC_SURF_STRIDE 0x00007028
+#define PPU_RDMA_RDMA_SRC_SURF_STRIDE_SRC_SURF_STRIDE__MASK 0xfffffff0
+#define PPU_RDMA_RDMA_SRC_SURF_STRIDE_SRC_SURF_STRIDE__SHIFT 4
+static inline uint32_t PPU_RDMA_RDMA_SRC_SURF_STRIDE_SRC_SURF_STRIDE(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_SRC_SURF_STRIDE_SRC_SURF_STRIDE__SHIFT) & PPU_RDMA_RDMA_SRC_SURF_STRIDE_SRC_SURF_STRIDE__MASK;
+}
+#define PPU_RDMA_RDMA_SRC_SURF_STRIDE_RESERVED_0__MASK 0x0000000f
+#define PPU_RDMA_RDMA_SRC_SURF_STRIDE_RESERVED_0__SHIFT 0
+static inline uint32_t PPU_RDMA_RDMA_SRC_SURF_STRIDE_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_SRC_SURF_STRIDE_RESERVED_0__SHIFT) & PPU_RDMA_RDMA_SRC_SURF_STRIDE_RESERVED_0__MASK;
+}
+
+#define REG_PPU_RDMA_RDMA_DATA_FORMAT 0x00007030
+#define PPU_RDMA_RDMA_DATA_FORMAT_RESERVED_0__MASK 0xfffffffc
+#define PPU_RDMA_RDMA_DATA_FORMAT_RESERVED_0__SHIFT 2
+static inline uint32_t PPU_RDMA_RDMA_DATA_FORMAT_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_DATA_FORMAT_RESERVED_0__SHIFT) & PPU_RDMA_RDMA_DATA_FORMAT_RESERVED_0__MASK;
+}
+#define PPU_RDMA_RDMA_DATA_FORMAT_IN_PRECISION__MASK 0x00000003
+#define PPU_RDMA_RDMA_DATA_FORMAT_IN_PRECISION__SHIFT 0
+static inline uint32_t PPU_RDMA_RDMA_DATA_FORMAT_IN_PRECISION(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_DATA_FORMAT_IN_PRECISION__SHIFT) & PPU_RDMA_RDMA_DATA_FORMAT_IN_PRECISION__MASK;
+}
+
+#define REG_DDMA_CFG_OUTSTANDING 0x00008000
+#define DDMA_CFG_OUTSTANDING_RESERVED_0__MASK 0xffff0000
+#define DDMA_CFG_OUTSTANDING_RESERVED_0__SHIFT 16
+static inline uint32_t DDMA_CFG_OUTSTANDING_RESERVED_0(uint32_t val)
+{
+ return ((val) << DDMA_CFG_OUTSTANDING_RESERVED_0__SHIFT) & DDMA_CFG_OUTSTANDING_RESERVED_0__MASK;
+}
+#define DDMA_CFG_OUTSTANDING_WR_OS_CNT__MASK 0x0000ff00
+#define DDMA_CFG_OUTSTANDING_WR_OS_CNT__SHIFT 8
+static inline uint32_t DDMA_CFG_OUTSTANDING_WR_OS_CNT(uint32_t val)
+{
+ return ((val) << DDMA_CFG_OUTSTANDING_WR_OS_CNT__SHIFT) & DDMA_CFG_OUTSTANDING_WR_OS_CNT__MASK;
+}
+#define DDMA_CFG_OUTSTANDING_RD_OS_CNT__MASK 0x000000ff
+#define DDMA_CFG_OUTSTANDING_RD_OS_CNT__SHIFT 0
+static inline uint32_t DDMA_CFG_OUTSTANDING_RD_OS_CNT(uint32_t val)
+{
+ return ((val) << DDMA_CFG_OUTSTANDING_RD_OS_CNT__SHIFT) & DDMA_CFG_OUTSTANDING_RD_OS_CNT__MASK;
+}
+
+#define REG_DDMA_RD_WEIGHT_0 0x00008004
+#define DDMA_RD_WEIGHT_0_RD_WEIGHT_PDP__MASK 0xff000000
+#define DDMA_RD_WEIGHT_0_RD_WEIGHT_PDP__SHIFT 24
+static inline uint32_t DDMA_RD_WEIGHT_0_RD_WEIGHT_PDP(uint32_t val)
+{
+ return ((val) << DDMA_RD_WEIGHT_0_RD_WEIGHT_PDP__SHIFT) & DDMA_RD_WEIGHT_0_RD_WEIGHT_PDP__MASK;
+}
+#define DDMA_RD_WEIGHT_0_RD_WEIGHT_DPU__MASK 0x00ff0000
+#define DDMA_RD_WEIGHT_0_RD_WEIGHT_DPU__SHIFT 16
+static inline uint32_t DDMA_RD_WEIGHT_0_RD_WEIGHT_DPU(uint32_t val)
+{
+ return ((val) << DDMA_RD_WEIGHT_0_RD_WEIGHT_DPU__SHIFT) & DDMA_RD_WEIGHT_0_RD_WEIGHT_DPU__MASK;
+}
+#define DDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL__MASK 0x0000ff00
+#define DDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL__SHIFT 8
+static inline uint32_t DDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL(uint32_t val)
+{
+ return ((val) << DDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL__SHIFT) & DDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL__MASK;
+}
+#define DDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE__MASK 0x000000ff
+#define DDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE__SHIFT 0
+static inline uint32_t DDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE(uint32_t val)
+{
+ return ((val) << DDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE__SHIFT) & DDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE__MASK;
+}
+
+#define REG_DDMA_WR_WEIGHT_0 0x00008008
+#define DDMA_WR_WEIGHT_0_RESERVED_0__MASK 0xffff0000
+#define DDMA_WR_WEIGHT_0_RESERVED_0__SHIFT 16
+static inline uint32_t DDMA_WR_WEIGHT_0_RESERVED_0(uint32_t val)
+{
+ return ((val) << DDMA_WR_WEIGHT_0_RESERVED_0__SHIFT) & DDMA_WR_WEIGHT_0_RESERVED_0__MASK;
+}
+#define DDMA_WR_WEIGHT_0_WR_WEIGHT_PDP__MASK 0x0000ff00
+#define DDMA_WR_WEIGHT_0_WR_WEIGHT_PDP__SHIFT 8
+static inline uint32_t DDMA_WR_WEIGHT_0_WR_WEIGHT_PDP(uint32_t val)
+{
+ return ((val) << DDMA_WR_WEIGHT_0_WR_WEIGHT_PDP__SHIFT) & DDMA_WR_WEIGHT_0_WR_WEIGHT_PDP__MASK;
+}
+#define DDMA_WR_WEIGHT_0_WR_WEIGHT_DPU__MASK 0x000000ff
+#define DDMA_WR_WEIGHT_0_WR_WEIGHT_DPU__SHIFT 0
+static inline uint32_t DDMA_WR_WEIGHT_0_WR_WEIGHT_DPU(uint32_t val)
+{
+ return ((val) << DDMA_WR_WEIGHT_0_WR_WEIGHT_DPU__SHIFT) & DDMA_WR_WEIGHT_0_WR_WEIGHT_DPU__MASK;
+}
+
+#define REG_DDMA_CFG_ID_ERROR 0x0000800c
+#define DDMA_CFG_ID_ERROR_RESERVED_0__MASK 0xfffffc00
+#define DDMA_CFG_ID_ERROR_RESERVED_0__SHIFT 10
+static inline uint32_t DDMA_CFG_ID_ERROR_RESERVED_0(uint32_t val)
+{
+ return ((val) << DDMA_CFG_ID_ERROR_RESERVED_0__SHIFT) & DDMA_CFG_ID_ERROR_RESERVED_0__MASK;
+}
+#define DDMA_CFG_ID_ERROR_WR_RESP_ID__MASK 0x000003c0
+#define DDMA_CFG_ID_ERROR_WR_RESP_ID__SHIFT 6
+static inline uint32_t DDMA_CFG_ID_ERROR_WR_RESP_ID(uint32_t val)
+{
+ return ((val) << DDMA_CFG_ID_ERROR_WR_RESP_ID__SHIFT) & DDMA_CFG_ID_ERROR_WR_RESP_ID__MASK;
+}
+#define DDMA_CFG_ID_ERROR_RESERVED_1__MASK 0x00000020
+#define DDMA_CFG_ID_ERROR_RESERVED_1__SHIFT 5
+static inline uint32_t DDMA_CFG_ID_ERROR_RESERVED_1(uint32_t val)
+{
+ return ((val) << DDMA_CFG_ID_ERROR_RESERVED_1__SHIFT) & DDMA_CFG_ID_ERROR_RESERVED_1__MASK;
+}
+#define DDMA_CFG_ID_ERROR_RD_RESP_ID__MASK 0x0000001f
+#define DDMA_CFG_ID_ERROR_RD_RESP_ID__SHIFT 0
+static inline uint32_t DDMA_CFG_ID_ERROR_RD_RESP_ID(uint32_t val)
+{
+ return ((val) << DDMA_CFG_ID_ERROR_RD_RESP_ID__SHIFT) & DDMA_CFG_ID_ERROR_RD_RESP_ID__MASK;
+}
+
+#define REG_DDMA_RD_WEIGHT_1 0x00008010
+#define DDMA_RD_WEIGHT_1_RESERVED_0__MASK 0xffffff00
+#define DDMA_RD_WEIGHT_1_RESERVED_0__SHIFT 8
+static inline uint32_t DDMA_RD_WEIGHT_1_RESERVED_0(uint32_t val)
+{
+ return ((val) << DDMA_RD_WEIGHT_1_RESERVED_0__SHIFT) & DDMA_RD_WEIGHT_1_RESERVED_0__MASK;
+}
+#define DDMA_RD_WEIGHT_1_RD_WEIGHT_PC__MASK 0x000000ff
+#define DDMA_RD_WEIGHT_1_RD_WEIGHT_PC__SHIFT 0
+static inline uint32_t DDMA_RD_WEIGHT_1_RD_WEIGHT_PC(uint32_t val)
+{
+ return ((val) << DDMA_RD_WEIGHT_1_RD_WEIGHT_PC__SHIFT) & DDMA_RD_WEIGHT_1_RD_WEIGHT_PC__MASK;
+}
+
+#define REG_DDMA_CFG_DMA_FIFO_CLR 0x00008014
+#define DDMA_CFG_DMA_FIFO_CLR_RESERVED_0__MASK 0xfffffffe
+#define DDMA_CFG_DMA_FIFO_CLR_RESERVED_0__SHIFT 1
+static inline uint32_t DDMA_CFG_DMA_FIFO_CLR_RESERVED_0(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_FIFO_CLR_RESERVED_0__SHIFT) & DDMA_CFG_DMA_FIFO_CLR_RESERVED_0__MASK;
+}
+#define DDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR__MASK 0x00000001
+#define DDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR__SHIFT 0
+static inline uint32_t DDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR__SHIFT) & DDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR__MASK;
+}
+
+#define REG_DDMA_CFG_DMA_ARB 0x00008018
+#define DDMA_CFG_DMA_ARB_RESERVED_0__MASK 0xfffffc00
+#define DDMA_CFG_DMA_ARB_RESERVED_0__SHIFT 10
+static inline uint32_t DDMA_CFG_DMA_ARB_RESERVED_0(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_ARB_RESERVED_0__SHIFT) & DDMA_CFG_DMA_ARB_RESERVED_0__MASK;
+}
+#define DDMA_CFG_DMA_ARB_WR_ARBIT_MODEL__MASK 0x00000200
+#define DDMA_CFG_DMA_ARB_WR_ARBIT_MODEL__SHIFT 9
+static inline uint32_t DDMA_CFG_DMA_ARB_WR_ARBIT_MODEL(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_ARB_WR_ARBIT_MODEL__SHIFT) & DDMA_CFG_DMA_ARB_WR_ARBIT_MODEL__MASK;
+}
+#define DDMA_CFG_DMA_ARB_RD_ARBIT_MODEL__MASK 0x00000100
+#define DDMA_CFG_DMA_ARB_RD_ARBIT_MODEL__SHIFT 8
+static inline uint32_t DDMA_CFG_DMA_ARB_RD_ARBIT_MODEL(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_ARB_RD_ARBIT_MODEL__SHIFT) & DDMA_CFG_DMA_ARB_RD_ARBIT_MODEL__MASK;
+}
+#define DDMA_CFG_DMA_ARB_RESERVED_1__MASK 0x00000080
+#define DDMA_CFG_DMA_ARB_RESERVED_1__SHIFT 7
+static inline uint32_t DDMA_CFG_DMA_ARB_RESERVED_1(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_ARB_RESERVED_1__SHIFT) & DDMA_CFG_DMA_ARB_RESERVED_1__MASK;
+}
+#define DDMA_CFG_DMA_ARB_WR_FIX_ARB__MASK 0x00000070
+#define DDMA_CFG_DMA_ARB_WR_FIX_ARB__SHIFT 4
+static inline uint32_t DDMA_CFG_DMA_ARB_WR_FIX_ARB(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_ARB_WR_FIX_ARB__SHIFT) & DDMA_CFG_DMA_ARB_WR_FIX_ARB__MASK;
+}
+#define DDMA_CFG_DMA_ARB_RESERVED_2__MASK 0x00000008
+#define DDMA_CFG_DMA_ARB_RESERVED_2__SHIFT 3
+static inline uint32_t DDMA_CFG_DMA_ARB_RESERVED_2(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_ARB_RESERVED_2__SHIFT) & DDMA_CFG_DMA_ARB_RESERVED_2__MASK;
+}
+#define DDMA_CFG_DMA_ARB_RD_FIX_ARB__MASK 0x00000007
+#define DDMA_CFG_DMA_ARB_RD_FIX_ARB__SHIFT 0
+static inline uint32_t DDMA_CFG_DMA_ARB_RD_FIX_ARB(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_ARB_RD_FIX_ARB__SHIFT) & DDMA_CFG_DMA_ARB_RD_FIX_ARB__MASK;
+}
+
+#define REG_DDMA_CFG_DMA_RD_QOS 0x00008020
+#define DDMA_CFG_DMA_RD_QOS_RESERVED_0__MASK 0xfffffc00
+#define DDMA_CFG_DMA_RD_QOS_RESERVED_0__SHIFT 10
+static inline uint32_t DDMA_CFG_DMA_RD_QOS_RESERVED_0(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_QOS_RESERVED_0__SHIFT) & DDMA_CFG_DMA_RD_QOS_RESERVED_0__MASK;
+}
+#define DDMA_CFG_DMA_RD_QOS_RD_PC_QOS__MASK 0x00000300
+#define DDMA_CFG_DMA_RD_QOS_RD_PC_QOS__SHIFT 8
+static inline uint32_t DDMA_CFG_DMA_RD_QOS_RD_PC_QOS(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_QOS_RD_PC_QOS__SHIFT) & DDMA_CFG_DMA_RD_QOS_RD_PC_QOS__MASK;
+}
+#define DDMA_CFG_DMA_RD_QOS_RD_PPU_QOS__MASK 0x000000c0
+#define DDMA_CFG_DMA_RD_QOS_RD_PPU_QOS__SHIFT 6
+static inline uint32_t DDMA_CFG_DMA_RD_QOS_RD_PPU_QOS(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_QOS_RD_PPU_QOS__SHIFT) & DDMA_CFG_DMA_RD_QOS_RD_PPU_QOS__MASK;
+}
+#define DDMA_CFG_DMA_RD_QOS_RD_DPU_QOS__MASK 0x00000030
+#define DDMA_CFG_DMA_RD_QOS_RD_DPU_QOS__SHIFT 4
+static inline uint32_t DDMA_CFG_DMA_RD_QOS_RD_DPU_QOS(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_QOS_RD_DPU_QOS__SHIFT) & DDMA_CFG_DMA_RD_QOS_RD_DPU_QOS__MASK;
+}
+#define DDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS__MASK 0x0000000c
+#define DDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS__SHIFT 2
+static inline uint32_t DDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS__SHIFT) & DDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS__MASK;
+}
+#define DDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS__MASK 0x00000003
+#define DDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS__SHIFT 0
+static inline uint32_t DDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS__SHIFT) & DDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS__MASK;
+}
+
+#define REG_DDMA_CFG_DMA_RD_CFG 0x00008024
+#define DDMA_CFG_DMA_RD_CFG_RESERVED_0__MASK 0xffffe000
+#define DDMA_CFG_DMA_RD_CFG_RESERVED_0__SHIFT 13
+static inline uint32_t DDMA_CFG_DMA_RD_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_CFG_RESERVED_0__SHIFT) & DDMA_CFG_DMA_RD_CFG_RESERVED_0__MASK;
+}
+#define DDMA_CFG_DMA_RD_CFG_RD_ARLOCK__MASK 0x00001000
+#define DDMA_CFG_DMA_RD_CFG_RD_ARLOCK__SHIFT 12
+static inline uint32_t DDMA_CFG_DMA_RD_CFG_RD_ARLOCK(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_CFG_RD_ARLOCK__SHIFT) & DDMA_CFG_DMA_RD_CFG_RD_ARLOCK__MASK;
+}
+#define DDMA_CFG_DMA_RD_CFG_RD_ARCACHE__MASK 0x00000f00
+#define DDMA_CFG_DMA_RD_CFG_RD_ARCACHE__SHIFT 8
+static inline uint32_t DDMA_CFG_DMA_RD_CFG_RD_ARCACHE(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_CFG_RD_ARCACHE__SHIFT) & DDMA_CFG_DMA_RD_CFG_RD_ARCACHE__MASK;
+}
+#define DDMA_CFG_DMA_RD_CFG_RD_ARPROT__MASK 0x000000e0
+#define DDMA_CFG_DMA_RD_CFG_RD_ARPROT__SHIFT 5
+static inline uint32_t DDMA_CFG_DMA_RD_CFG_RD_ARPROT(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_CFG_RD_ARPROT__SHIFT) & DDMA_CFG_DMA_RD_CFG_RD_ARPROT__MASK;
+}
+#define DDMA_CFG_DMA_RD_CFG_RD_ARBURST__MASK 0x00000018
+#define DDMA_CFG_DMA_RD_CFG_RD_ARBURST__SHIFT 3
+static inline uint32_t DDMA_CFG_DMA_RD_CFG_RD_ARBURST(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_CFG_RD_ARBURST__SHIFT) & DDMA_CFG_DMA_RD_CFG_RD_ARBURST__MASK;
+}
+#define DDMA_CFG_DMA_RD_CFG_RD_ARSIZE__MASK 0x00000007
+#define DDMA_CFG_DMA_RD_CFG_RD_ARSIZE__SHIFT 0
+static inline uint32_t DDMA_CFG_DMA_RD_CFG_RD_ARSIZE(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_CFG_RD_ARSIZE__SHIFT) & DDMA_CFG_DMA_RD_CFG_RD_ARSIZE__MASK;
+}
+
+#define REG_DDMA_CFG_DMA_WR_CFG 0x00008028
+#define DDMA_CFG_DMA_WR_CFG_RESERVED_0__MASK 0xffffe000
+#define DDMA_CFG_DMA_WR_CFG_RESERVED_0__SHIFT 13
+static inline uint32_t DDMA_CFG_DMA_WR_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_WR_CFG_RESERVED_0__SHIFT) & DDMA_CFG_DMA_WR_CFG_RESERVED_0__MASK;
+}
+#define DDMA_CFG_DMA_WR_CFG_WR_AWLOCK__MASK 0x00001000
+#define DDMA_CFG_DMA_WR_CFG_WR_AWLOCK__SHIFT 12
+static inline uint32_t DDMA_CFG_DMA_WR_CFG_WR_AWLOCK(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_WR_CFG_WR_AWLOCK__SHIFT) & DDMA_CFG_DMA_WR_CFG_WR_AWLOCK__MASK;
+}
+#define DDMA_CFG_DMA_WR_CFG_WR_AWCACHE__MASK 0x00000f00
+#define DDMA_CFG_DMA_WR_CFG_WR_AWCACHE__SHIFT 8
+static inline uint32_t DDMA_CFG_DMA_WR_CFG_WR_AWCACHE(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_WR_CFG_WR_AWCACHE__SHIFT) & DDMA_CFG_DMA_WR_CFG_WR_AWCACHE__MASK;
+}
+#define DDMA_CFG_DMA_WR_CFG_WR_AWPROT__MASK 0x000000e0
+#define DDMA_CFG_DMA_WR_CFG_WR_AWPROT__SHIFT 5
+static inline uint32_t DDMA_CFG_DMA_WR_CFG_WR_AWPROT(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_WR_CFG_WR_AWPROT__SHIFT) & DDMA_CFG_DMA_WR_CFG_WR_AWPROT__MASK;
+}
+#define DDMA_CFG_DMA_WR_CFG_WR_AWBURST__MASK 0x00000018
+#define DDMA_CFG_DMA_WR_CFG_WR_AWBURST__SHIFT 3
+static inline uint32_t DDMA_CFG_DMA_WR_CFG_WR_AWBURST(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_WR_CFG_WR_AWBURST__SHIFT) & DDMA_CFG_DMA_WR_CFG_WR_AWBURST__MASK;
+}
+#define DDMA_CFG_DMA_WR_CFG_WR_AWSIZE__MASK 0x00000007
+#define DDMA_CFG_DMA_WR_CFG_WR_AWSIZE__SHIFT 0
+static inline uint32_t DDMA_CFG_DMA_WR_CFG_WR_AWSIZE(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_WR_CFG_WR_AWSIZE__SHIFT) & DDMA_CFG_DMA_WR_CFG_WR_AWSIZE__MASK;
+}
+
+#define REG_DDMA_CFG_DMA_WSTRB 0x0000802c
+#define DDMA_CFG_DMA_WSTRB_WR_WSTRB__MASK 0xffffffff
+#define DDMA_CFG_DMA_WSTRB_WR_WSTRB__SHIFT 0
+static inline uint32_t DDMA_CFG_DMA_WSTRB_WR_WSTRB(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_WSTRB_WR_WSTRB__SHIFT) & DDMA_CFG_DMA_WSTRB_WR_WSTRB__MASK;
+}
+
+#define REG_DDMA_CFG_STATUS 0x00008030
+#define DDMA_CFG_STATUS_RESERVED_0__MASK 0xfffffe00
+#define DDMA_CFG_STATUS_RESERVED_0__SHIFT 9
+static inline uint32_t DDMA_CFG_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << DDMA_CFG_STATUS_RESERVED_0__SHIFT) & DDMA_CFG_STATUS_RESERVED_0__MASK;
+}
+#define DDMA_CFG_STATUS_IDEL__MASK 0x00000100
+#define DDMA_CFG_STATUS_IDEL__SHIFT 8
+static inline uint32_t DDMA_CFG_STATUS_IDEL(uint32_t val)
+{
+ return ((val) << DDMA_CFG_STATUS_IDEL__SHIFT) & DDMA_CFG_STATUS_IDEL__MASK;
+}
+#define DDMA_CFG_STATUS_RESERVED_1__MASK 0x000000ff
+#define DDMA_CFG_STATUS_RESERVED_1__SHIFT 0
+static inline uint32_t DDMA_CFG_STATUS_RESERVED_1(uint32_t val)
+{
+ return ((val) << DDMA_CFG_STATUS_RESERVED_1__SHIFT) & DDMA_CFG_STATUS_RESERVED_1__MASK;
+}
+
+#define REG_SDMA_CFG_OUTSTANDING 0x00009000
+#define SDMA_CFG_OUTSTANDING_RESERVED_0__MASK 0xffff0000
+#define SDMA_CFG_OUTSTANDING_RESERVED_0__SHIFT 16
+static inline uint32_t SDMA_CFG_OUTSTANDING_RESERVED_0(uint32_t val)
+{
+ return ((val) << SDMA_CFG_OUTSTANDING_RESERVED_0__SHIFT) & SDMA_CFG_OUTSTANDING_RESERVED_0__MASK;
+}
+#define SDMA_CFG_OUTSTANDING_WR_OS_CNT__MASK 0x0000ff00
+#define SDMA_CFG_OUTSTANDING_WR_OS_CNT__SHIFT 8
+static inline uint32_t SDMA_CFG_OUTSTANDING_WR_OS_CNT(uint32_t val)
+{
+ return ((val) << SDMA_CFG_OUTSTANDING_WR_OS_CNT__SHIFT) & SDMA_CFG_OUTSTANDING_WR_OS_CNT__MASK;
+}
+#define SDMA_CFG_OUTSTANDING_RD_OS_CNT__MASK 0x000000ff
+#define SDMA_CFG_OUTSTANDING_RD_OS_CNT__SHIFT 0
+static inline uint32_t SDMA_CFG_OUTSTANDING_RD_OS_CNT(uint32_t val)
+{
+ return ((val) << SDMA_CFG_OUTSTANDING_RD_OS_CNT__SHIFT) & SDMA_CFG_OUTSTANDING_RD_OS_CNT__MASK;
+}
+
+#define REG_SDMA_RD_WEIGHT_0 0x00009004
+#define SDMA_RD_WEIGHT_0_RD_WEIGHT_PDP__MASK 0xff000000
+#define SDMA_RD_WEIGHT_0_RD_WEIGHT_PDP__SHIFT 24
+static inline uint32_t SDMA_RD_WEIGHT_0_RD_WEIGHT_PDP(uint32_t val)
+{
+ return ((val) << SDMA_RD_WEIGHT_0_RD_WEIGHT_PDP__SHIFT) & SDMA_RD_WEIGHT_0_RD_WEIGHT_PDP__MASK;
+}
+#define SDMA_RD_WEIGHT_0_RD_WEIGHT_DPU__MASK 0x00ff0000
+#define SDMA_RD_WEIGHT_0_RD_WEIGHT_DPU__SHIFT 16
+static inline uint32_t SDMA_RD_WEIGHT_0_RD_WEIGHT_DPU(uint32_t val)
+{
+ return ((val) << SDMA_RD_WEIGHT_0_RD_WEIGHT_DPU__SHIFT) & SDMA_RD_WEIGHT_0_RD_WEIGHT_DPU__MASK;
+}
+#define SDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL__MASK 0x0000ff00
+#define SDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL__SHIFT 8
+static inline uint32_t SDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL(uint32_t val)
+{
+ return ((val) << SDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL__SHIFT) & SDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL__MASK;
+}
+#define SDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE__MASK 0x000000ff
+#define SDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE__SHIFT 0
+static inline uint32_t SDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE(uint32_t val)
+{
+ return ((val) << SDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE__SHIFT) & SDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE__MASK;
+}
+
+#define REG_SDMA_WR_WEIGHT_0 0x00009008
+#define SDMA_WR_WEIGHT_0_RESERVED_0__MASK 0xffff0000
+#define SDMA_WR_WEIGHT_0_RESERVED_0__SHIFT 16
+static inline uint32_t SDMA_WR_WEIGHT_0_RESERVED_0(uint32_t val)
+{
+ return ((val) << SDMA_WR_WEIGHT_0_RESERVED_0__SHIFT) & SDMA_WR_WEIGHT_0_RESERVED_0__MASK;
+}
+#define SDMA_WR_WEIGHT_0_WR_WEIGHT_PDP__MASK 0x0000ff00
+#define SDMA_WR_WEIGHT_0_WR_WEIGHT_PDP__SHIFT 8
+static inline uint32_t SDMA_WR_WEIGHT_0_WR_WEIGHT_PDP(uint32_t val)
+{
+ return ((val) << SDMA_WR_WEIGHT_0_WR_WEIGHT_PDP__SHIFT) & SDMA_WR_WEIGHT_0_WR_WEIGHT_PDP__MASK;
+}
+#define SDMA_WR_WEIGHT_0_WR_WEIGHT_DPU__MASK 0x000000ff
+#define SDMA_WR_WEIGHT_0_WR_WEIGHT_DPU__SHIFT 0
+static inline uint32_t SDMA_WR_WEIGHT_0_WR_WEIGHT_DPU(uint32_t val)
+{
+ return ((val) << SDMA_WR_WEIGHT_0_WR_WEIGHT_DPU__SHIFT) & SDMA_WR_WEIGHT_0_WR_WEIGHT_DPU__MASK;
+}
+
+#define REG_SDMA_CFG_ID_ERROR 0x0000900c
+#define SDMA_CFG_ID_ERROR_RESERVED_0__MASK 0xfffffc00
+#define SDMA_CFG_ID_ERROR_RESERVED_0__SHIFT 10
+static inline uint32_t SDMA_CFG_ID_ERROR_RESERVED_0(uint32_t val)
+{
+ return ((val) << SDMA_CFG_ID_ERROR_RESERVED_0__SHIFT) & SDMA_CFG_ID_ERROR_RESERVED_0__MASK;
+}
+#define SDMA_CFG_ID_ERROR_WR_RESP_ID__MASK 0x000003c0
+#define SDMA_CFG_ID_ERROR_WR_RESP_ID__SHIFT 6
+static inline uint32_t SDMA_CFG_ID_ERROR_WR_RESP_ID(uint32_t val)
+{
+ return ((val) << SDMA_CFG_ID_ERROR_WR_RESP_ID__SHIFT) & SDMA_CFG_ID_ERROR_WR_RESP_ID__MASK;
+}
+#define SDMA_CFG_ID_ERROR_RESERVED_1__MASK 0x00000020
+#define SDMA_CFG_ID_ERROR_RESERVED_1__SHIFT 5
+static inline uint32_t SDMA_CFG_ID_ERROR_RESERVED_1(uint32_t val)
+{
+ return ((val) << SDMA_CFG_ID_ERROR_RESERVED_1__SHIFT) & SDMA_CFG_ID_ERROR_RESERVED_1__MASK;
+}
+#define SDMA_CFG_ID_ERROR_RD_RESP_ID__MASK 0x0000001f
+#define SDMA_CFG_ID_ERROR_RD_RESP_ID__SHIFT 0
+static inline uint32_t SDMA_CFG_ID_ERROR_RD_RESP_ID(uint32_t val)
+{
+ return ((val) << SDMA_CFG_ID_ERROR_RD_RESP_ID__SHIFT) & SDMA_CFG_ID_ERROR_RD_RESP_ID__MASK;
+}
+
+#define REG_SDMA_RD_WEIGHT_1 0x00009010
+#define SDMA_RD_WEIGHT_1_RESERVED_0__MASK 0xffffff00
+#define SDMA_RD_WEIGHT_1_RESERVED_0__SHIFT 8
+static inline uint32_t SDMA_RD_WEIGHT_1_RESERVED_0(uint32_t val)
+{
+ return ((val) << SDMA_RD_WEIGHT_1_RESERVED_0__SHIFT) & SDMA_RD_WEIGHT_1_RESERVED_0__MASK;
+}
+#define SDMA_RD_WEIGHT_1_RD_WEIGHT_PC__MASK 0x000000ff
+#define SDMA_RD_WEIGHT_1_RD_WEIGHT_PC__SHIFT 0
+static inline uint32_t SDMA_RD_WEIGHT_1_RD_WEIGHT_PC(uint32_t val)
+{
+ return ((val) << SDMA_RD_WEIGHT_1_RD_WEIGHT_PC__SHIFT) & SDMA_RD_WEIGHT_1_RD_WEIGHT_PC__MASK;
+}
+
+#define REG_SDMA_CFG_DMA_FIFO_CLR 0x00009014
+#define SDMA_CFG_DMA_FIFO_CLR_RESERVED_0__MASK 0xfffffffe
+#define SDMA_CFG_DMA_FIFO_CLR_RESERVED_0__SHIFT 1
+static inline uint32_t SDMA_CFG_DMA_FIFO_CLR_RESERVED_0(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_FIFO_CLR_RESERVED_0__SHIFT) & SDMA_CFG_DMA_FIFO_CLR_RESERVED_0__MASK;
+}
+#define SDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR__MASK 0x00000001
+#define SDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR__SHIFT 0
+static inline uint32_t SDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR__SHIFT) & SDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR__MASK;
+}
+
+#define REG_SDMA_CFG_DMA_ARB 0x00009018
+#define SDMA_CFG_DMA_ARB_RESERVED_0__MASK 0xfffffc00
+#define SDMA_CFG_DMA_ARB_RESERVED_0__SHIFT 10
+static inline uint32_t SDMA_CFG_DMA_ARB_RESERVED_0(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_ARB_RESERVED_0__SHIFT) & SDMA_CFG_DMA_ARB_RESERVED_0__MASK;
+}
+#define SDMA_CFG_DMA_ARB_WR_ARBIT_MODEL__MASK 0x00000200
+#define SDMA_CFG_DMA_ARB_WR_ARBIT_MODEL__SHIFT 9
+static inline uint32_t SDMA_CFG_DMA_ARB_WR_ARBIT_MODEL(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_ARB_WR_ARBIT_MODEL__SHIFT) & SDMA_CFG_DMA_ARB_WR_ARBIT_MODEL__MASK;
+}
+#define SDMA_CFG_DMA_ARB_RD_ARBIT_MODEL__MASK 0x00000100
+#define SDMA_CFG_DMA_ARB_RD_ARBIT_MODEL__SHIFT 8
+static inline uint32_t SDMA_CFG_DMA_ARB_RD_ARBIT_MODEL(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_ARB_RD_ARBIT_MODEL__SHIFT) & SDMA_CFG_DMA_ARB_RD_ARBIT_MODEL__MASK;
+}
+#define SDMA_CFG_DMA_ARB_RESERVED_1__MASK 0x00000080
+#define SDMA_CFG_DMA_ARB_RESERVED_1__SHIFT 7
+static inline uint32_t SDMA_CFG_DMA_ARB_RESERVED_1(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_ARB_RESERVED_1__SHIFT) & SDMA_CFG_DMA_ARB_RESERVED_1__MASK;
+}
+#define SDMA_CFG_DMA_ARB_WR_FIX_ARB__MASK 0x00000070
+#define SDMA_CFG_DMA_ARB_WR_FIX_ARB__SHIFT 4
+static inline uint32_t SDMA_CFG_DMA_ARB_WR_FIX_ARB(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_ARB_WR_FIX_ARB__SHIFT) & SDMA_CFG_DMA_ARB_WR_FIX_ARB__MASK;
+}
+#define SDMA_CFG_DMA_ARB_RESERVED_2__MASK 0x00000008
+#define SDMA_CFG_DMA_ARB_RESERVED_2__SHIFT 3
+static inline uint32_t SDMA_CFG_DMA_ARB_RESERVED_2(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_ARB_RESERVED_2__SHIFT) & SDMA_CFG_DMA_ARB_RESERVED_2__MASK;
+}
+#define SDMA_CFG_DMA_ARB_RD_FIX_ARB__MASK 0x00000007
+#define SDMA_CFG_DMA_ARB_RD_FIX_ARB__SHIFT 0
+static inline uint32_t SDMA_CFG_DMA_ARB_RD_FIX_ARB(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_ARB_RD_FIX_ARB__SHIFT) & SDMA_CFG_DMA_ARB_RD_FIX_ARB__MASK;
+}
+
+#define REG_SDMA_CFG_DMA_RD_QOS 0x00009020
+#define SDMA_CFG_DMA_RD_QOS_RESERVED_0__MASK 0xfffffc00
+#define SDMA_CFG_DMA_RD_QOS_RESERVED_0__SHIFT 10
+static inline uint32_t SDMA_CFG_DMA_RD_QOS_RESERVED_0(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_QOS_RESERVED_0__SHIFT) & SDMA_CFG_DMA_RD_QOS_RESERVED_0__MASK;
+}
+#define SDMA_CFG_DMA_RD_QOS_RD_PC_QOS__MASK 0x00000300
+#define SDMA_CFG_DMA_RD_QOS_RD_PC_QOS__SHIFT 8
+static inline uint32_t SDMA_CFG_DMA_RD_QOS_RD_PC_QOS(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_QOS_RD_PC_QOS__SHIFT) & SDMA_CFG_DMA_RD_QOS_RD_PC_QOS__MASK;
+}
+#define SDMA_CFG_DMA_RD_QOS_RD_PPU_QOS__MASK 0x000000c0
+#define SDMA_CFG_DMA_RD_QOS_RD_PPU_QOS__SHIFT 6
+static inline uint32_t SDMA_CFG_DMA_RD_QOS_RD_PPU_QOS(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_QOS_RD_PPU_QOS__SHIFT) & SDMA_CFG_DMA_RD_QOS_RD_PPU_QOS__MASK;
+}
+#define SDMA_CFG_DMA_RD_QOS_RD_DPU_QOS__MASK 0x00000030
+#define SDMA_CFG_DMA_RD_QOS_RD_DPU_QOS__SHIFT 4
+static inline uint32_t SDMA_CFG_DMA_RD_QOS_RD_DPU_QOS(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_QOS_RD_DPU_QOS__SHIFT) & SDMA_CFG_DMA_RD_QOS_RD_DPU_QOS__MASK;
+}
+#define SDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS__MASK 0x0000000c
+#define SDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS__SHIFT 2
+static inline uint32_t SDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS__SHIFT) & SDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS__MASK;
+}
+#define SDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS__MASK 0x00000003
+#define SDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS__SHIFT 0
+static inline uint32_t SDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS__SHIFT) & SDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS__MASK;
+}
+
+#define REG_SDMA_CFG_DMA_RD_CFG 0x00009024
+#define SDMA_CFG_DMA_RD_CFG_RESERVED_0__MASK 0xffffe000
+#define SDMA_CFG_DMA_RD_CFG_RESERVED_0__SHIFT 13
+static inline uint32_t SDMA_CFG_DMA_RD_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_CFG_RESERVED_0__SHIFT) & SDMA_CFG_DMA_RD_CFG_RESERVED_0__MASK;
+}
+#define SDMA_CFG_DMA_RD_CFG_RD_ARLOCK__MASK 0x00001000
+#define SDMA_CFG_DMA_RD_CFG_RD_ARLOCK__SHIFT 12
+static inline uint32_t SDMA_CFG_DMA_RD_CFG_RD_ARLOCK(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_CFG_RD_ARLOCK__SHIFT) & SDMA_CFG_DMA_RD_CFG_RD_ARLOCK__MASK;
+}
+#define SDMA_CFG_DMA_RD_CFG_RD_ARCACHE__MASK 0x00000f00
+#define SDMA_CFG_DMA_RD_CFG_RD_ARCACHE__SHIFT 8
+static inline uint32_t SDMA_CFG_DMA_RD_CFG_RD_ARCACHE(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_CFG_RD_ARCACHE__SHIFT) & SDMA_CFG_DMA_RD_CFG_RD_ARCACHE__MASK;
+}
+#define SDMA_CFG_DMA_RD_CFG_RD_ARPROT__MASK 0x000000e0
+#define SDMA_CFG_DMA_RD_CFG_RD_ARPROT__SHIFT 5
+static inline uint32_t SDMA_CFG_DMA_RD_CFG_RD_ARPROT(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_CFG_RD_ARPROT__SHIFT) & SDMA_CFG_DMA_RD_CFG_RD_ARPROT__MASK;
+}
+#define SDMA_CFG_DMA_RD_CFG_RD_ARBURST__MASK 0x00000018
+#define SDMA_CFG_DMA_RD_CFG_RD_ARBURST__SHIFT 3
+static inline uint32_t SDMA_CFG_DMA_RD_CFG_RD_ARBURST(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_CFG_RD_ARBURST__SHIFT) & SDMA_CFG_DMA_RD_CFG_RD_ARBURST__MASK;
+}
+#define SDMA_CFG_DMA_RD_CFG_RD_ARSIZE__MASK 0x00000007
+#define SDMA_CFG_DMA_RD_CFG_RD_ARSIZE__SHIFT 0
+static inline uint32_t SDMA_CFG_DMA_RD_CFG_RD_ARSIZE(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_CFG_RD_ARSIZE__SHIFT) & SDMA_CFG_DMA_RD_CFG_RD_ARSIZE__MASK;
+}
+
+#define REG_SDMA_CFG_DMA_WR_CFG 0x00009028
+#define SDMA_CFG_DMA_WR_CFG_RESERVED_0__MASK 0xffffe000
+#define SDMA_CFG_DMA_WR_CFG_RESERVED_0__SHIFT 13
+static inline uint32_t SDMA_CFG_DMA_WR_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_WR_CFG_RESERVED_0__SHIFT) & SDMA_CFG_DMA_WR_CFG_RESERVED_0__MASK;
+}
+#define SDMA_CFG_DMA_WR_CFG_WR_AWLOCK__MASK 0x00001000
+#define SDMA_CFG_DMA_WR_CFG_WR_AWLOCK__SHIFT 12
+static inline uint32_t SDMA_CFG_DMA_WR_CFG_WR_AWLOCK(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_WR_CFG_WR_AWLOCK__SHIFT) & SDMA_CFG_DMA_WR_CFG_WR_AWLOCK__MASK;
+}
+#define SDMA_CFG_DMA_WR_CFG_WR_AWCACHE__MASK 0x00000f00
+#define SDMA_CFG_DMA_WR_CFG_WR_AWCACHE__SHIFT 8
+static inline uint32_t SDMA_CFG_DMA_WR_CFG_WR_AWCACHE(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_WR_CFG_WR_AWCACHE__SHIFT) & SDMA_CFG_DMA_WR_CFG_WR_AWCACHE__MASK;
+}
+#define SDMA_CFG_DMA_WR_CFG_WR_AWPROT__MASK 0x000000e0
+#define SDMA_CFG_DMA_WR_CFG_WR_AWPROT__SHIFT 5
+static inline uint32_t SDMA_CFG_DMA_WR_CFG_WR_AWPROT(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_WR_CFG_WR_AWPROT__SHIFT) & SDMA_CFG_DMA_WR_CFG_WR_AWPROT__MASK;
+}
+#define SDMA_CFG_DMA_WR_CFG_WR_AWBURST__MASK 0x00000018
+#define SDMA_CFG_DMA_WR_CFG_WR_AWBURST__SHIFT 3
+static inline uint32_t SDMA_CFG_DMA_WR_CFG_WR_AWBURST(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_WR_CFG_WR_AWBURST__SHIFT) & SDMA_CFG_DMA_WR_CFG_WR_AWBURST__MASK;
+}
+#define SDMA_CFG_DMA_WR_CFG_WR_AWSIZE__MASK 0x00000007
+#define SDMA_CFG_DMA_WR_CFG_WR_AWSIZE__SHIFT 0
+static inline uint32_t SDMA_CFG_DMA_WR_CFG_WR_AWSIZE(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_WR_CFG_WR_AWSIZE__SHIFT) & SDMA_CFG_DMA_WR_CFG_WR_AWSIZE__MASK;
+}
+
+#define REG_SDMA_CFG_DMA_WSTRB 0x0000902c
+#define SDMA_CFG_DMA_WSTRB_WR_WSTRB__MASK 0xffffffff
+#define SDMA_CFG_DMA_WSTRB_WR_WSTRB__SHIFT 0
+static inline uint32_t SDMA_CFG_DMA_WSTRB_WR_WSTRB(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_WSTRB_WR_WSTRB__SHIFT) & SDMA_CFG_DMA_WSTRB_WR_WSTRB__MASK;
+}
+
+#define REG_SDMA_CFG_STATUS 0x00009030
+#define SDMA_CFG_STATUS_RESERVED_0__MASK 0xfffffe00
+#define SDMA_CFG_STATUS_RESERVED_0__SHIFT 9
+static inline uint32_t SDMA_CFG_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << SDMA_CFG_STATUS_RESERVED_0__SHIFT) & SDMA_CFG_STATUS_RESERVED_0__MASK;
+}
+#define SDMA_CFG_STATUS_IDEL__MASK 0x00000100
+#define SDMA_CFG_STATUS_IDEL__SHIFT 8
+static inline uint32_t SDMA_CFG_STATUS_IDEL(uint32_t val)
+{
+ return ((val) << SDMA_CFG_STATUS_IDEL__SHIFT) & SDMA_CFG_STATUS_IDEL__MASK;
+}
+#define SDMA_CFG_STATUS_RESERVED_1__MASK 0x000000ff
+#define SDMA_CFG_STATUS_RESERVED_1__SHIFT 0
+static inline uint32_t SDMA_CFG_STATUS_RESERVED_1(uint32_t val)
+{
+ return ((val) << SDMA_CFG_STATUS_RESERVED_1__SHIFT) & SDMA_CFG_STATUS_RESERVED_1__MASK;
+}
+
+#define REG_GLOBAL_OPERATION_ENABLE 0x0000f008
+#define GLOBAL_OPERATION_ENABLE_RESERVED_0__MASK 0xffffff80
+#define GLOBAL_OPERATION_ENABLE_RESERVED_0__SHIFT 7
+static inline uint32_t GLOBAL_OPERATION_ENABLE_RESERVED_0(uint32_t val)
+{
+ return ((val) << GLOBAL_OPERATION_ENABLE_RESERVED_0__SHIFT) & GLOBAL_OPERATION_ENABLE_RESERVED_0__MASK;
+}
+#define GLOBAL_OPERATION_ENABLE_PPU_RDMA_OP_EN__MASK 0x00000040
+#define GLOBAL_OPERATION_ENABLE_PPU_RDMA_OP_EN__SHIFT 6
+static inline uint32_t GLOBAL_OPERATION_ENABLE_PPU_RDMA_OP_EN(uint32_t val)
+{
+ return ((val) << GLOBAL_OPERATION_ENABLE_PPU_RDMA_OP_EN__SHIFT) & GLOBAL_OPERATION_ENABLE_PPU_RDMA_OP_EN__MASK;
+}
+#define GLOBAL_OPERATION_ENABLE_PPU_OP_EN__MASK 0x00000020
+#define GLOBAL_OPERATION_ENABLE_PPU_OP_EN__SHIFT 5
+static inline uint32_t GLOBAL_OPERATION_ENABLE_PPU_OP_EN(uint32_t val)
+{
+ return ((val) << GLOBAL_OPERATION_ENABLE_PPU_OP_EN__SHIFT) & GLOBAL_OPERATION_ENABLE_PPU_OP_EN__MASK;
+}
+#define GLOBAL_OPERATION_ENABLE_DPU_RDMA_OP_EN__MASK 0x00000010
+#define GLOBAL_OPERATION_ENABLE_DPU_RDMA_OP_EN__SHIFT 4
+static inline uint32_t GLOBAL_OPERATION_ENABLE_DPU_RDMA_OP_EN(uint32_t val)
+{
+ return ((val) << GLOBAL_OPERATION_ENABLE_DPU_RDMA_OP_EN__SHIFT) & GLOBAL_OPERATION_ENABLE_DPU_RDMA_OP_EN__MASK;
+}
+#define GLOBAL_OPERATION_ENABLE_DPU_OP_EN__MASK 0x00000008
+#define GLOBAL_OPERATION_ENABLE_DPU_OP_EN__SHIFT 3
+static inline uint32_t GLOBAL_OPERATION_ENABLE_DPU_OP_EN(uint32_t val)
+{
+ return ((val) << GLOBAL_OPERATION_ENABLE_DPU_OP_EN__SHIFT) & GLOBAL_OPERATION_ENABLE_DPU_OP_EN__MASK;
+}
+#define GLOBAL_OPERATION_ENABLE_CORE_OP_EN__MASK 0x00000004
+#define GLOBAL_OPERATION_ENABLE_CORE_OP_EN__SHIFT 2
+static inline uint32_t GLOBAL_OPERATION_ENABLE_CORE_OP_EN(uint32_t val)
+{
+ return ((val) << GLOBAL_OPERATION_ENABLE_CORE_OP_EN__SHIFT) & GLOBAL_OPERATION_ENABLE_CORE_OP_EN__MASK;
+}
+#define GLOBAL_OPERATION_ENABLE_RESERVED_1__MASK 0x00000002
+#define GLOBAL_OPERATION_ENABLE_RESERVED_1__SHIFT 1
+static inline uint32_t GLOBAL_OPERATION_ENABLE_RESERVED_1(uint32_t val)
+{
+ return ((val) << GLOBAL_OPERATION_ENABLE_RESERVED_1__SHIFT) & GLOBAL_OPERATION_ENABLE_RESERVED_1__MASK;
+}
+#define GLOBAL_OPERATION_ENABLE_CNA_OP_EN__MASK 0x00000001
+#define GLOBAL_OPERATION_ENABLE_CNA_OP_EN__SHIFT 0
+static inline uint32_t GLOBAL_OPERATION_ENABLE_CNA_OP_EN(uint32_t val)
+{
+ return ((val) << GLOBAL_OPERATION_ENABLE_CNA_OP_EN__SHIFT) & GLOBAL_OPERATION_ENABLE_CNA_OP_EN__MASK;
+}
+
+#endif /* __ROCKET_REGISTERS_XML__ */
diff --git a/drivers/accessibility/speakup/Makefile b/drivers/accessibility/speakup/Makefile
index 6f6a83565c0d..14ba1cca87f4 100644
--- a/drivers/accessibility/speakup/Makefile
+++ b/drivers/accessibility/speakup/Makefile
@@ -40,9 +40,7 @@ hostprogs += makemapdata
makemapdata-objs := makemapdata.o
quiet_cmd_mkmap = MKMAP $@
- cmd_mkmap = TOPDIR=$(srctree) \
- SPKDIR=$(if $(KBUILD_EXTMOD),$(KBUILD_EXTMOD),$(srctree)/drivers/accessibility/speakup) \
- $(obj)/makemapdata > $@
+ cmd_mkmap = TOPDIR=$(srctree) SPKDIR=$(src) $(obj)/makemapdata > $@
$(obj)/mapdata.h: $(obj)/makemapdata
$(call cmd,mkmap)
diff --git a/drivers/accessibility/speakup/main.c b/drivers/accessibility/speakup/main.c
index f677ad2177c2..e68cf1d83787 100644
--- a/drivers/accessibility/speakup/main.c
+++ b/drivers/accessibility/speakup/main.c
@@ -1172,13 +1172,13 @@ static void do_handle_shift(struct vc_data *vc, u_char value, char up_flag)
if (cursor_track == read_all_mode) {
switch (value) {
case KVAL(K_SHIFT):
- del_timer(&cursor_timer);
+ timer_delete(&cursor_timer);
spk_shut_up &= 0xfe;
spk_do_flush();
read_all_doc(vc);
break;
case KVAL(K_CTRL):
- del_timer(&cursor_timer);
+ timer_delete(&cursor_timer);
cursor_track = prev_cursor_track;
spk_shut_up &= 0xfe;
spk_do_flush();
@@ -1399,7 +1399,7 @@ static void start_read_all_timer(struct vc_data *vc, enum read_all_command comma
static void kbd_fakekey2(struct vc_data *vc, enum read_all_command command)
{
- del_timer(&cursor_timer);
+ timer_delete(&cursor_timer);
speakup_fake_down_arrow();
start_read_all_timer(vc, command);
}
@@ -1415,7 +1415,7 @@ static void read_all_doc(struct vc_data *vc)
cursor_track = read_all_mode;
spk_reset_index_count(0);
if (get_sentence_buf(vc, 0) == -1) {
- del_timer(&cursor_timer);
+ timer_delete(&cursor_timer);
if (!in_keyboard_notifier)
speakup_fake_down_arrow();
start_read_all_timer(vc, RA_DOWN_ARROW);
@@ -1428,7 +1428,7 @@ static void read_all_doc(struct vc_data *vc)
static void stop_read_all(struct vc_data *vc)
{
- del_timer(&cursor_timer);
+ timer_delete(&cursor_timer);
cursor_track = prev_cursor_track;
spk_shut_up &= 0xfe;
spk_do_flush();
@@ -1528,7 +1528,7 @@ static int pre_handle_cursor(struct vc_data *vc, u_char value, char up_flag)
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return NOTIFY_STOP;
}
- del_timer(&cursor_timer);
+ timer_delete(&cursor_timer);
spk_shut_up &= 0xfe;
spk_do_flush();
start_read_all_timer(vc, value + 1);
@@ -1692,7 +1692,7 @@ static void cursor_done(struct timer_list *unused)
struct vc_data *vc = vc_cons[cursor_con].d;
unsigned long flags;
- del_timer(&cursor_timer);
+ timer_delete(&cursor_timer);
spin_lock_irqsave(&speakup_info.spinlock, flags);
if (cursor_con != fg_console) {
is_cursor = 0;
@@ -2333,7 +2333,7 @@ static void __exit speakup_exit(void)
speakup_unregister_devsynth();
speakup_cancel_selection();
speakup_cancel_paste();
- del_timer_sync(&cursor_timer);
+ timer_delete_sync(&cursor_timer);
kthread_stop(speakup_task);
speakup_task = NULL;
mutex_lock(&spk_mutex);
@@ -2437,7 +2437,7 @@ error_task:
error_vtnotifier:
unregister_keyboard_notifier(&keyboard_notifier_block);
- del_timer(&cursor_timer);
+ timer_delete(&cursor_timer);
error_kbdnotifier:
speakup_unregister_devsynth();
diff --git a/drivers/accessibility/speakup/synth.c b/drivers/accessibility/speakup/synth.c
index 85062e605d79..d8addbf3ad0d 100644
--- a/drivers/accessibility/speakup/synth.c
+++ b/drivers/accessibility/speakup/synth.c
@@ -521,7 +521,7 @@ void synth_release(void)
spin_lock_irqsave(&speakup_info.spinlock, flags);
pr_info("releasing synth %s\n", synth->name);
synth->alive = 0;
- del_timer(&thread_timer);
+ timer_delete(&thread_timer);
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
if (synth->attributes.name)
sysfs_remove_group(speakup_kobj, &synth->attributes);
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index d67f63d93b2a..ca00a5dbcf75 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -132,8 +132,17 @@ config ACPI_REV_OVERRIDE_POSSIBLE
makes it possible to force the kernel to return "5" as the supported
ACPI revision via the "acpi_rev_override" command line switch.
+config ACPI_EC
+ bool "Embedded Controller"
+ depends on HAS_IOPORT
+ default X86 || LOONGARCH
+ help
+ This driver handles communication with the microcontroller
+ on many x86/LoongArch laptops and other machines.
+
config ACPI_EC_DEBUGFS
tristate "EC read/write access through /sys/kernel/debug/ec"
+ depends on ACPI_EC
help
Say N to disable Embedded Controller /sys/kernel/debug interface
@@ -385,6 +394,7 @@ config ACPI_TABLE_OVERRIDE_VIA_BUILTIN_INITRD
config ACPI_DEBUG
bool "Debug Statements"
+ default y
help
The ACPI subsystem can produce debug output. Saying Y enables this
output and increases the kernel size by around 50K.
@@ -433,7 +443,7 @@ config ACPI_HOTPLUG_IOAPIC
config ACPI_SBS
tristate "Smart Battery System"
- depends on X86
+ depends on X86 && ACPI_EC
select POWER_SUPPLY
help
This driver supports the Smart Battery System, another
@@ -443,7 +453,7 @@ config ACPI_SBS
the modules will be called sbs and sbshc.
config ACPI_HED
- tristate "Hardware Error Device"
+ bool "Hardware Error Device"
help
This driver supports the Hardware Error Device (PNP0C33),
which is used to report some hardware errors notified via
@@ -451,7 +461,7 @@ config ACPI_HED
config ACPI_BGRT
bool "Boottime Graphics Resource Table support"
- depends on EFI && (X86 || ARM64 || LOONGARCH)
+ depends on EFI
help
This driver adds support for exposing the ACPI Boottime Graphics
Resource Table, which allows the operating system to obtain
@@ -537,6 +547,10 @@ if ARM64
source "drivers/acpi/arm64/Kconfig"
endif
+if RISCV
+source "drivers/acpi/riscv/Kconfig"
+endif
+
config ACPI_PPTT
bool
@@ -567,6 +581,9 @@ config ACPI_FFH
Enable this feature if you want to set up and install the FFH Address
Space handler to handle FFH OpRegion in the firmware.
+config ACPI_MRRM
+ bool
+
source "drivers/acpi/pmic/Kconfig"
config ACPI_VIOT
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 61ca4afe83dc..d1b0affb844f 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -5,6 +5,10 @@
ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT
+ifdef CONFIG_TRACE_BRANCH_PROFILING
+CFLAGS_processor_idle.o += -DDISABLE_BRANCH_PROFILING
+endif
+
#
# ACPI Boot-Time Table Parsing
#
@@ -41,7 +45,7 @@ acpi-y += resource.o
acpi-y += acpi_processor.o
acpi-y += processor_core.o
acpi-$(CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC) += processor_pdc.o
-acpi-y += ec.o
+acpi-$(CONFIG_ACPI_EC) += ec.o
acpi-$(CONFIG_ACPI_DOCK) += dock.o
acpi-$(CONFIG_PCI) += pci_root.o pci_link.o pci_irq.o
obj-$(CONFIG_ACPI_MCFG) += pci_mcfg.o
@@ -62,6 +66,7 @@ acpi-$(CONFIG_ACPI_WATCHDOG) += acpi_watchdog.o
acpi-$(CONFIG_ACPI_PRMT) += prmt.o
acpi-$(CONFIG_ACPI_PCC) += acpi_pcc.o
acpi-$(CONFIG_ACPI_FFH) += acpi_ffh.o
+acpi-$(CONFIG_ACPI_MRRM) += acpi_mrrm.o
# Address translation
acpi-$(CONFIG_ACPI_ADXL) += acpi_adxl.o
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index 7c5b040a83e8..1f69be8f51a2 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -290,7 +290,7 @@ static void acpi_ac_remove(struct platform_device *pdev)
static struct platform_driver acpi_ac_driver = {
.probe = acpi_ac_probe,
- .remove_new = acpi_ac_remove,
+ .remove = acpi_ac_remove,
.driver = {
.name = "ac",
.acpi_match_table = ac_device_ids,
diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c
index 800f97868448..49539f7528c6 100644
--- a/drivers/acpi/acpi_apd.c
+++ b/drivers/acpi/acpi_apd.c
@@ -86,7 +86,7 @@ static int fch_misc_setup(struct apd_private_data *pdata)
if (!clk_data->name)
return -ENOMEM;
- strcpy(clk_data->name, obj->string.pointer);
+ strscpy(clk_data->name, obj->string.pointer, obj->string.length);
} else {
/* Set default name to mclk if entry missing in firmware */
clk_data->name = "mclk";
diff --git a/drivers/acpi/acpi_dbg.c b/drivers/acpi/acpi_dbg.c
index d50261d05f3a..515b20d0b698 100644
--- a/drivers/acpi/acpi_dbg.c
+++ b/drivers/acpi/acpi_dbg.c
@@ -569,11 +569,11 @@ static int acpi_aml_release(struct inode *inode, struct file *file)
return 0;
}
-static int acpi_aml_read_user(char __user *buf, int len)
+static ssize_t acpi_aml_read_user(char __user *buf, size_t len)
{
- int ret;
struct circ_buf *crc = &acpi_aml_io.out_crc;
- int n;
+ ssize_t ret;
+ size_t n;
char *p;
ret = acpi_aml_lock_read(crc, ACPI_AML_OUT_USER);
@@ -582,7 +582,7 @@ static int acpi_aml_read_user(char __user *buf, int len)
/* sync head before removing logs */
smp_rmb();
p = &crc->buf[crc->tail];
- n = min(len, circ_count_to_end(crc));
+ n = min_t(size_t, len, circ_count_to_end(crc));
if (copy_to_user(buf, p, n)) {
ret = -EFAULT;
goto out;
@@ -599,8 +599,8 @@ out:
static ssize_t acpi_aml_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
- int ret = 0;
- int size = 0;
+ ssize_t ret = 0;
+ ssize_t size = 0;
if (!count)
return 0;
@@ -639,11 +639,11 @@ again:
return size > 0 ? size : ret;
}
-static int acpi_aml_write_user(const char __user *buf, int len)
+static ssize_t acpi_aml_write_user(const char __user *buf, size_t len)
{
- int ret;
struct circ_buf *crc = &acpi_aml_io.in_crc;
- int n;
+ ssize_t ret;
+ size_t n;
char *p;
ret = acpi_aml_lock_write(crc, ACPI_AML_IN_USER);
@@ -652,7 +652,7 @@ static int acpi_aml_write_user(const char __user *buf, int len)
/* sync tail before inserting cmds */
smp_mb();
p = &crc->buf[crc->head];
- n = min(len, circ_space_to_end(crc));
+ n = min_t(size_t, len, circ_space_to_end(crc));
if (copy_from_user(p, buf, n)) {
ret = -EFAULT;
goto out;
@@ -663,14 +663,14 @@ static int acpi_aml_write_user(const char __user *buf, int len)
ret = n;
out:
acpi_aml_unlock_fifo(ACPI_AML_IN_USER, ret >= 0);
- return n;
+ return ret;
}
static ssize_t acpi_aml_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
- int ret = 0;
- int size = 0;
+ ssize_t ret = 0;
+ ssize_t size = 0;
if (!count)
return 0;
diff --git a/drivers/acpi/acpi_extlog.c b/drivers/acpi/acpi_extlog.c
index ca87a0939135..f6b9562779de 100644
--- a/drivers/acpi/acpi_extlog.c
+++ b/drivers/acpi/acpi_extlog.c
@@ -15,6 +15,7 @@
#include <acpi/ghes.h>
#include <asm/cpu.h>
#include <asm/mce.h>
+#include <asm/msr.h>
#include "apei/apei-internal.h"
#include <ras/ras_event.h>
@@ -234,7 +235,7 @@ static int __init extlog_init(void)
u64 cap;
int rc;
- if (rdmsrl_safe(MSR_IA32_MCG_CAP, &cap) ||
+ if (rdmsrq_safe(MSR_IA32_MCG_CAP, &cap) ||
!(cap & MCG_ELOG_P) ||
!extlog_get_l1addr())
return -ENODEV;
@@ -251,6 +252,10 @@ static int __init extlog_init(void)
}
extlog_l1_hdr = acpi_os_map_iomem(l1_dirbase, l1_hdr_size);
+ if (!extlog_l1_hdr) {
+ rc = -ENOMEM;
+ goto err_release_l1_hdr;
+ }
l1_head = (struct extlog_l1_head *)extlog_l1_hdr;
l1_size = l1_head->total_len;
l1_percpu_entry = l1_head->entries;
@@ -268,6 +273,10 @@ static int __init extlog_init(void)
goto err;
}
extlog_l1_addr = acpi_os_map_iomem(l1_dirbase, l1_size);
+ if (!extlog_l1_addr) {
+ rc = -ENOMEM;
+ goto err_release_l1_dir;
+ }
l1_entry_base = (u64 *)((u8 *)extlog_l1_addr + l1_hdr_size);
/* remap elog table */
@@ -279,6 +288,10 @@ static int __init extlog_init(void)
goto err_release_l1_dir;
}
elog_addr = acpi_os_map_iomem(elog_base, elog_size);
+ if (!elog_addr) {
+ rc = -ENOMEM;
+ goto err_release_elog;
+ }
rc = -ENOMEM;
/* allocate buffer to save elog record */
@@ -300,6 +313,8 @@ err_release_l1_dir:
if (extlog_l1_addr)
acpi_os_unmap_iomem(extlog_l1_addr, l1_size);
release_mem_region(l1_dirbase, l1_size);
+err_release_l1_hdr:
+ release_mem_region(l1_dirbase, l1_hdr_size);
err:
pr_warn(FW_BUG "Extended error log disabled because of problems parsing f/w tables\n");
return rc;
diff --git a/drivers/acpi/acpi_lpit.c b/drivers/acpi/acpi_lpit.c
index 794962c5c88e..b8d98b1b48ae 100644
--- a/drivers/acpi/acpi_lpit.c
+++ b/drivers/acpi/acpi_lpit.c
@@ -39,7 +39,7 @@ static int lpit_read_residency_counter_us(u64 *counter, bool io_mem)
return 0;
}
- err = rdmsrl_safe(residency_info_ffh.gaddr.address, counter);
+ err = rdmsrq_safe(residency_info_ffh.gaddr.address, counter);
if (!err) {
u64 mask = GENMASK_ULL(residency_info_ffh.gaddr.bit_offset +
residency_info_ffh.gaddr. bit_width - 1,
diff --git a/drivers/acpi/acpi_mrrm.c b/drivers/acpi/acpi_mrrm.c
new file mode 100644
index 000000000000..47ea3ccc2142
--- /dev/null
+++ b/drivers/acpi/acpi_mrrm.c
@@ -0,0 +1,185 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2025, Intel Corporation.
+ *
+ * Memory Range and Region Mapping (MRRM) structure
+ *
+ * Parse and report the platform's MRRM table in /sys.
+ */
+
+#define pr_fmt(fmt) "acpi/mrrm: " fmt
+
+#include <linux/acpi.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+
+/* Default assume one memory region covering all system memory, per the spec */
+static int max_mem_region = 1;
+
+/* Access for use by resctrl file system */
+int acpi_mrrm_max_mem_region(void)
+{
+ return max_mem_region;
+}
+
+struct mrrm_mem_range_entry {
+ u64 base;
+ u64 length;
+ int node;
+ u8 local_region_id;
+ u8 remote_region_id;
+};
+
+static struct mrrm_mem_range_entry *mrrm_mem_range_entry;
+static u32 mrrm_mem_entry_num;
+
+static int get_node_num(struct mrrm_mem_range_entry *e)
+{
+ unsigned int nid;
+
+ for_each_online_node(nid) {
+ for (int z = 0; z < MAX_NR_ZONES; z++) {
+ struct zone *zone = NODE_DATA(nid)->node_zones + z;
+
+ if (!populated_zone(zone))
+ continue;
+ if (zone_intersects(zone, PHYS_PFN(e->base), PHYS_PFN(e->length)))
+ return zone_to_nid(zone);
+ }
+ }
+
+ return -ENOENT;
+}
+
+static __init int acpi_parse_mrrm(struct acpi_table_header *table)
+{
+ struct acpi_mrrm_mem_range_entry *mre_entry;
+ struct acpi_table_mrrm *mrrm;
+ void *mre, *mrrm_end;
+ int mre_count = 0;
+
+ mrrm = (struct acpi_table_mrrm *)table;
+ if (!mrrm)
+ return -ENODEV;
+
+ if (mrrm->flags & ACPI_MRRM_FLAGS_REGION_ASSIGNMENT_OS)
+ return -EOPNOTSUPP;
+
+ mrrm_end = (void *)mrrm + mrrm->header.length - 1;
+ mre = (void *)mrrm + sizeof(struct acpi_table_mrrm);
+ while (mre < mrrm_end) {
+ mre_entry = mre;
+ mre_count++;
+ mre += mre_entry->header.length;
+ }
+ if (!mre_count) {
+ pr_info(FW_BUG "No ranges listed in MRRM table\n");
+ return -EINVAL;
+ }
+
+ mrrm_mem_range_entry = kmalloc_array(mre_count, sizeof(*mrrm_mem_range_entry),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!mrrm_mem_range_entry)
+ return -ENOMEM;
+
+ mre = (void *)mrrm + sizeof(struct acpi_table_mrrm);
+ while (mre < mrrm_end) {
+ struct mrrm_mem_range_entry *e;
+
+ mre_entry = mre;
+ e = mrrm_mem_range_entry + mrrm_mem_entry_num;
+
+ e->base = mre_entry->addr_base;
+ e->length = mre_entry->addr_len;
+ e->node = get_node_num(e);
+
+ if (mre_entry->region_id_flags & ACPI_MRRM_VALID_REGION_ID_FLAGS_LOCAL)
+ e->local_region_id = mre_entry->local_region_id;
+ else
+ e->local_region_id = -1;
+ if (mre_entry->region_id_flags & ACPI_MRRM_VALID_REGION_ID_FLAGS_REMOTE)
+ e->remote_region_id = mre_entry->remote_region_id;
+ else
+ e->remote_region_id = -1;
+
+ mrrm_mem_entry_num++;
+ mre += mre_entry->header.length;
+ }
+
+ max_mem_region = mrrm->max_mem_region;
+
+ return 0;
+}
+
+#define RANGE_ATTR(name, fmt) \
+static ssize_t name##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, char *buf) \
+{ \
+ struct mrrm_mem_range_entry *mre; \
+ const char *kname = kobject_name(kobj); \
+ int n, ret; \
+ \
+ ret = kstrtoint(kname + 5, 10, &n); \
+ if (ret) \
+ return ret; \
+ \
+ mre = mrrm_mem_range_entry + n; \
+ \
+ return sysfs_emit(buf, fmt, mre->name); \
+} \
+static struct kobj_attribute name##_attr = __ATTR_RO(name)
+
+RANGE_ATTR(base, "0x%llx\n");
+RANGE_ATTR(length, "0x%llx\n");
+RANGE_ATTR(node, "%d\n");
+RANGE_ATTR(local_region_id, "%d\n");
+RANGE_ATTR(remote_region_id, "%d\n");
+
+static struct attribute *memory_range_attrs[] = {
+ &base_attr.attr,
+ &length_attr.attr,
+ &node_attr.attr,
+ &local_region_id_attr.attr,
+ &remote_region_id_attr.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(memory_range);
+
+static __init int add_boot_memory_ranges(void)
+{
+ struct kobject *pkobj, *kobj;
+ int ret = -EINVAL;
+ char *name;
+
+ pkobj = kobject_create_and_add("memory_ranges", acpi_kobj);
+
+ for (int i = 0; i < mrrm_mem_entry_num; i++) {
+ name = kasprintf(GFP_KERNEL, "range%d", i);
+ if (!name) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ kobj = kobject_create_and_add(name, pkobj);
+
+ ret = sysfs_create_groups(kobj, memory_range_groups);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static __init int mrrm_init(void)
+{
+ int ret;
+
+ ret = acpi_table_parse(ACPI_SIG_MRRM, acpi_parse_mrrm);
+ if (ret < 0)
+ return ret;
+
+ return add_boot_memory_ranges();
+}
+device_initcall(mrrm_init);
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index 42b7220d4cfd..c9a0bcaba2e4 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -19,6 +19,7 @@
#include <linux/acpi.h>
#include <linux/perf_event.h>
#include <linux/platform_device.h>
+#include <asm/cpuid/api.h>
#include <asm/mwait.h>
#include <xen/xen.h>
@@ -32,7 +33,7 @@
static DEFINE_MUTEX(isolated_cpus_lock);
static DEFINE_MUTEX(round_robin_lock);
-static unsigned long power_saving_mwait_eax;
+static unsigned int power_saving_mwait_eax;
static unsigned char tsc_detected_unstable;
static unsigned char tsc_marked_unstable;
@@ -46,10 +47,8 @@ static void power_saving_mwait_init(void)
if (!boot_cpu_has(X86_FEATURE_MWAIT))
return;
- if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
- return;
- cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
+ cpuid(CPUID_LEAF_MWAIT, &eax, &ebx, &ecx, &edx);
if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
!(ecx & CPUID5_ECX_INTERRUPT_BREAK))
@@ -462,7 +461,7 @@ MODULE_DEVICE_TABLE(acpi, pad_device_ids);
static struct platform_driver acpi_pad_driver = {
.probe = acpi_pad_probe,
- .remove_new = acpi_pad_remove,
+ .remove = acpi_pad_remove,
.driver = {
.dev_groups = acpi_pad_groups,
.name = "processor_aggregator",
diff --git a/drivers/acpi/acpi_pcc.c b/drivers/acpi/acpi_pcc.c
index 07a034a53aca..97064e943768 100644
--- a/drivers/acpi/acpi_pcc.c
+++ b/drivers/acpi/acpi_pcc.c
@@ -31,7 +31,6 @@
struct pcc_data {
struct pcc_mbox_chan *pcc_chan;
- void __iomem *pcc_comm_addr;
struct completion done;
struct mbox_client cl;
struct acpi_pcc_info ctx;
@@ -81,14 +80,6 @@ acpi_pcc_address_space_setup(acpi_handle region_handle, u32 function,
ret = AE_SUPPORT;
goto err_free_channel;
}
- data->pcc_comm_addr = acpi_os_ioremap(pcc_chan->shmem_base_addr,
- pcc_chan->shmem_size);
- if (!data->pcc_comm_addr) {
- pr_err("Failed to ioremap PCC comm region mem for %d\n",
- ctx->subspace_id);
- ret = AE_NO_MEMORY;
- goto err_free_channel;
- }
*region_context = data;
return AE_OK;
@@ -113,7 +104,7 @@ acpi_pcc_address_space_handler(u32 function, acpi_physical_address addr,
reinit_completion(&data->done);
/* Write to Shared Memory */
- memcpy_toio(data->pcc_comm_addr, (void *)value, data->ctx.length);
+ memcpy_toio(data->pcc_chan->shmem, (void *)value, data->ctx.length);
ret = mbox_send_message(data->pcc_chan->mchan, NULL);
if (ret < 0)
@@ -134,7 +125,7 @@ acpi_pcc_address_space_handler(u32 function, acpi_physical_address addr,
mbox_chan_txdone(data->pcc_chan->mchan, ret);
- memcpy_fromio(value, data->pcc_comm_addr, data->ctx.length);
+ memcpy_fromio(value, data->pcc_chan->shmem, data->ctx.length);
return AE_OK;
}
diff --git a/drivers/acpi/acpi_pnp.c b/drivers/acpi/acpi_pnp.c
index 01abf26764b0..4ad88187dc7a 100644
--- a/drivers/acpi/acpi_pnp.c
+++ b/drivers/acpi/acpi_pnp.c
@@ -120,8 +120,6 @@ static const struct acpi_device_id acpi_pnp_device_ids[] = {
{"IBM0071"},
/* smsc-ircc2 */
{"SMCf010"},
- /* sb1000 */
- {"GIC1000"},
/* parport_pc */
{"PNP0400"}, /* Standard LPT Printer Port */
{"PNP0401"}, /* ECP Printer Port */
@@ -355,8 +353,10 @@ static bool acpi_pnp_match(const char *idstr, const struct acpi_device_id **matc
* device represented by it.
*/
static const struct acpi_device_id acpi_nonpnp_device_ids[] = {
+ {"INT3F0D"},
{"INTC1080"},
{"INTC1081"},
+ {"INTC1099"},
{""},
};
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index 7cf6101cb4c7..7ec1dc04fd11 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -275,7 +275,7 @@ static inline int acpi_processor_hotadd_init(struct acpi_processor *pr,
static int acpi_processor_get_info(struct acpi_device *device)
{
- union acpi_object object = { 0 };
+ union acpi_object object = { .processor = { 0 } };
struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
struct acpi_processor *pr = acpi_driver_data(device);
int device_declaration = 0;
@@ -815,7 +815,7 @@ bool acpi_processor_claim_cst_control(void)
cst_control_claimed = true;
return true;
}
-EXPORT_SYMBOL_GPL(acpi_processor_claim_cst_control);
+EXPORT_SYMBOL_NS_GPL(acpi_processor_claim_cst_control, "ACPI_PROCESSOR_IDLE");
/**
* acpi_processor_evaluate_cst - Evaluate the processor _CST control method.
@@ -994,5 +994,5 @@ end:
return ret;
}
-EXPORT_SYMBOL_GPL(acpi_processor_evaluate_cst);
+EXPORT_SYMBOL_NS_GPL(acpi_processor_evaluate_cst, "ACPI_PROCESSOR_IDLE");
#endif /* CONFIG_ACPI_PROCESSOR_CSTATE */
diff --git a/drivers/acpi/acpi_tad.c b/drivers/acpi/acpi_tad.c
index b831cb8e53dc..33418dd6768a 100644
--- a/drivers/acpi/acpi_tad.c
+++ b/drivers/acpi/acpi_tad.c
@@ -233,7 +233,7 @@ static ssize_t time_show(struct device *dev, struct device_attribute *attr,
if (ret)
return ret;
- return sprintf(buf, "%u:%u:%u:%u:%u:%u:%d:%u\n",
+ return sysfs_emit(buf, "%u:%u:%u:%u:%u:%u:%d:%u\n",
rt.year, rt.month, rt.day, rt.hour, rt.minute, rt.second,
rt.tz, rt.daylight);
}
@@ -428,7 +428,7 @@ static ssize_t caps_show(struct device *dev, struct device_attribute *attr,
{
struct acpi_tad_driver_data *dd = dev_get_drvdata(dev);
- return sprintf(buf, "0x%02X\n", dd->capabilities);
+ return sysfs_emit(buf, "0x%02X\n", dd->capabilities);
}
static DEVICE_ATTR_RO(caps);
@@ -565,6 +565,9 @@ static void acpi_tad_remove(struct platform_device *pdev)
pm_runtime_get_sync(dev);
+ if (dd->capabilities & ACPI_TAD_RT)
+ sysfs_remove_group(&dev->kobj, &acpi_tad_time_attr_group);
+
if (dd->capabilities & ACPI_TAD_DC_WAKE)
sysfs_remove_group(&dev->kobj, &acpi_tad_dc_attr_group);
@@ -684,7 +687,7 @@ static struct platform_driver acpi_tad_driver = {
.acpi_match_table = acpi_tad_ids,
},
.probe = acpi_tad_probe,
- .remove_new = acpi_tad_remove,
+ .remove = acpi_tad_remove,
};
MODULE_DEVICE_TABLE(acpi, acpi_tad_ids);
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 8274a17872ed..103f29661576 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -27,6 +27,7 @@
#include <linux/acpi.h>
#include <acpi/video.h>
#include <linux/uaccess.h>
+#include <linux/string_choices.h>
#define ACPI_VIDEO_BUS_NAME "Video Bus"
#define ACPI_VIDEO_DEVICE_NAME "Video Device"
@@ -610,16 +611,28 @@ acpi_video_device_lcd_get_level_current(struct acpi_video_device *device,
return 0;
}
+/**
+ * acpi_video_device_EDID() - Get EDID from ACPI _DDC
+ * @device: video output device (LCD, CRT, ..)
+ * @edid: address for returned EDID pointer
+ * @length: _DDC length to request (must be a multiple of 128)
+ *
+ * Get EDID from ACPI _DDC. On success, a pointer to the EDID data is written
+ * to the @edid address, and the length of the EDID is returned. The caller is
+ * responsible for freeing the edid pointer.
+ *
+ * Return the length of EDID (positive value) on success or error (negative
+ * value).
+ */
static int
-acpi_video_device_EDID(struct acpi_video_device *device,
- union acpi_object **edid, int length)
+acpi_video_device_EDID(struct acpi_video_device *device, void **edid, int length)
{
- int status;
+ acpi_status status;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
union acpi_object arg0 = { ACPI_TYPE_INTEGER };
struct acpi_object_list args = { 1, &arg0 };
-
+ int ret;
*edid = NULL;
@@ -636,16 +649,24 @@ acpi_video_device_EDID(struct acpi_video_device *device,
obj = buffer.pointer;
- if (obj && obj->type == ACPI_TYPE_BUFFER)
- *edid = obj;
- else {
+ /*
+ * Some buggy implementations incorrectly return the EDID buffer in an ACPI package.
+ * In this case, extract the buffer from the package.
+ */
+ if (obj && obj->type == ACPI_TYPE_PACKAGE && obj->package.count == 1)
+ obj = &obj->package.elements[0];
+
+ if (obj && obj->type == ACPI_TYPE_BUFFER) {
+ *edid = kmemdup(obj->buffer.pointer, obj->buffer.length, GFP_KERNEL);
+ ret = *edid ? obj->buffer.length : -ENOMEM;
+ } else {
acpi_handle_debug(device->dev->handle,
"Invalid _DDC data for length %d\n", length);
- status = -EFAULT;
- kfree(obj);
+ ret = -EFAULT;
}
- return status;
+ kfree(buffer.pointer);
+ return ret;
}
/* bus */
@@ -1435,9 +1456,7 @@ int acpi_video_get_edid(struct acpi_device *device, int type, int device_id,
{
struct acpi_video_bus *video;
struct acpi_video_device *video_device;
- union acpi_object *buffer = NULL;
- acpi_status status;
- int i, length;
+ int i, length, ret;
if (!device || !acpi_driver_data(device))
return -EINVAL;
@@ -1477,16 +1496,10 @@ int acpi_video_get_edid(struct acpi_device *device, int type, int device_id,
}
for (length = 512; length > 0; length -= 128) {
- status = acpi_video_device_EDID(video_device, &buffer,
- length);
- if (ACPI_SUCCESS(status))
- break;
+ ret = acpi_video_device_EDID(video_device, edid, length);
+ if (ret > 0)
+ return ret;
}
- if (!length)
- continue;
-
- *edid = buffer->buffer.pointer;
- return length;
}
return -ENODEV;
@@ -2034,9 +2047,9 @@ static int acpi_video_bus_add(struct acpi_device *device)
pr_info("%s [%s] (multi-head: %s rom: %s post: %s)\n",
ACPI_VIDEO_DEVICE_NAME, acpi_device_bid(device),
- video->flags.multihead ? "yes" : "no",
- video->flags.rom ? "yes" : "no",
- video->flags.post ? "yes" : "no");
+ str_yes_no(video->flags.multihead),
+ str_yes_no(video->flags.rom),
+ str_yes_no(video->flags.post));
mutex_lock(&video_list_lock);
list_add_tail(&video->entry, &video_bus_head);
mutex_unlock(&video_list_lock);
diff --git a/drivers/acpi/acpica/acapps.h b/drivers/acpi/acpica/acapps.h
index 9d4cbd956627..d7d4649ce66f 100644
--- a/drivers/acpi/acpica/acapps.h
+++ b/drivers/acpi/acpica/acapps.h
@@ -3,7 +3,7 @@
*
* Module Name: acapps - common include for ACPI applications/tools
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -17,7 +17,7 @@
/* Common info for tool signons */
#define ACPICA_NAME "Intel ACPI Component Architecture"
-#define ACPICA_COPYRIGHT "Copyright (c) 2000 - 2022 Intel Corporation"
+#define ACPICA_COPYRIGHT "Copyright (c) 2000 - 2025 Intel Corporation"
#if ACPI_MACHINE_WIDTH == 64
#define ACPI_WIDTH " (64-bit version)"
diff --git a/drivers/acpi/acpica/accommon.h b/drivers/acpi/acpica/accommon.h
index 4536dc9d3979..662231f4f881 100644
--- a/drivers/acpi/acpica/accommon.h
+++ b/drivers/acpi/acpica/accommon.h
@@ -3,7 +3,7 @@
*
* Name: accommon.h - Common include files for generation of ACPICA source
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acconvert.h b/drivers/acpi/acpica/acconvert.h
index c6ba6a36cfb5..24998f2d7539 100644
--- a/drivers/acpi/acpica/acconvert.h
+++ b/drivers/acpi/acpica/acconvert.h
@@ -3,7 +3,7 @@
*
* Module Name: acapps - common include for ACPI applications/tools
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index 911875c5a5f1..91241bd6917a 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -3,7 +3,7 @@
*
* Name: acdebug.h - ACPI/AML debugger
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acdispat.h b/drivers/acpi/acpica/acdispat.h
index 73eecbf62f06..5d48a344b35f 100644
--- a/drivers/acpi/acpica/acdispat.h
+++ b/drivers/acpi/acpica/acdispat.h
@@ -3,7 +3,7 @@
*
* Name: acdispat.h - dispatcher (parser to interpreter interface)
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index 1c5218b79fc2..b40fb3a5ac8a 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -3,7 +3,7 @@
*
* Name: acevents.h - Event subcomponent prototypes and defines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 309ce8efb4f6..c8a750d2674c 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -3,7 +3,7 @@
*
* Name: acglobal.h - Declarations for global variables
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index 79bbfe00d241..6aec56c65fa0 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -3,7 +3,7 @@
*
* Name: achware.h -- hardware specific interfaces
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -103,8 +103,6 @@ acpi_hw_get_gpe_status(struct acpi_gpe_event_info *gpe_event_info,
acpi_status acpi_hw_enable_all_runtime_gpes(void);
-acpi_status acpi_hw_enable_all_wakeup_gpes(void);
-
u8 acpi_hw_check_all_gpes(acpi_handle gpe_skip_device, u32 gpe_skip_number);
acpi_status
diff --git a/drivers/acpi/acpica/acinterp.h b/drivers/acpi/acpica/acinterp.h
index 955114c926bd..1ee6ac9b2baf 100644
--- a/drivers/acpi/acpica/acinterp.h
+++ b/drivers/acpi/acpica/acinterp.h
@@ -3,7 +3,7 @@
*
* Name: acinterp.h - Interpreter subcomponent prototypes and defines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -120,6 +120,9 @@ void
acpi_ex_trace_point(acpi_trace_event_type type,
u8 begin, u8 *aml, char *pathname);
+void
+acpi_ex_trace_args(union acpi_operand_object **params, u32 count);
+
/*
* exfield - ACPI AML (p-code) execution - field manipulation
*/
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 6f4fe47c955b..f98640086f4e 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -3,7 +3,7 @@
*
* Name: aclocal.h - Internal data types used across the ACPI subsystem
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -293,7 +293,7 @@ acpi_status (*acpi_internal_method) (struct acpi_walk_state * walk_state);
* expected_return_btypes - Allowed type(s) for the return value
*/
struct acpi_name_info {
- char name[ACPI_NAMESEG_SIZE];
+ char name[ACPI_NAMESEG_SIZE] ACPI_NONSTRING;
u16 argument_list;
u8 expected_btypes;
};
@@ -370,7 +370,7 @@ typedef acpi_status (*acpi_object_converter) (struct acpi_namespace_node *
converted_object);
struct acpi_simple_repair_info {
- char name[ACPI_NAMESEG_SIZE];
+ char name[ACPI_NAMESEG_SIZE] ACPI_NONSTRING;
u32 unexpected_btypes;
u32 package_index;
acpi_object_converter object_converter;
@@ -1141,7 +1141,7 @@ struct acpi_port_info {
#define ACPI_RESOURCE_NAME_PIN_GROUP_FUNCTION 0x91
#define ACPI_RESOURCE_NAME_PIN_GROUP_CONFIG 0x92
#define ACPI_RESOURCE_NAME_CLOCK_INPUT 0x93
-#define ACPI_RESOURCE_NAME_LARGE_MAX 0x94
+#define ACPI_RESOURCE_NAME_LARGE_MAX 0x93
/*****************************************************************************
*
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index de83dd22292b..4e9402c02410 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -3,7 +3,7 @@
*
* Name: acmacros.h - C macros for the entire subsystem.
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index 9448bc026b9b..13f050fecb49 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -3,7 +3,7 @@
*
* Name: acnamesp.h - Namespace subcomponent prototypes and defines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
index 8fc02946d3cd..6ffcc7a0a0c2 100644
--- a/drivers/acpi/acpica/acobject.h
+++ b/drivers/acpi/acpica/acobject.h
@@ -3,7 +3,7 @@
*
* Name: acobject.h - Definition of union acpi_operand_object (Internal object only)
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acopcode.h b/drivers/acpi/acpica/acopcode.h
index da96d80e6b3a..a2a9e51d7ac6 100644
--- a/drivers/acpi/acpica/acopcode.h
+++ b/drivers/acpi/acpica/acopcode.h
@@ -3,7 +3,7 @@
*
* Name: acopcode.h - AML opcode information for the AML parser and interpreter
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acparser.h b/drivers/acpi/acpica/acparser.h
index 6dad786a382c..65a15dee092b 100644
--- a/drivers/acpi/acpica/acparser.h
+++ b/drivers/acpi/acpica/acparser.h
@@ -3,7 +3,7 @@
*
* Module Name: acparser.h - AML Parser subcomponent prototypes and defines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index ef068f4c864a..da2c45880cc7 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -3,7 +3,7 @@
*
* Name: acpredef - Information table for ACPI predefined methods and objects
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -450,7 +450,8 @@ const union acpi_predefined_info acpi_gbl_predefined_methods[] = {
{{"_DSM",
METHOD_4ARGS(ACPI_TYPE_BUFFER, ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER,
- ACPI_TYPE_ANY) | ARG_COUNT_IS_MINIMUM,
+ ACPI_TYPE_ANY | ACPI_TYPE_PACKAGE) |
+ ARG_COUNT_IS_MINIMUM,
METHOD_RETURNS(ACPI_RTYPE_ALL)}}, /* Must return a value, but it can be of any type */
{{"_DSS", METHOD_1ARGS(ACPI_TYPE_INTEGER),
diff --git a/drivers/acpi/acpica/acresrc.h b/drivers/acpi/acpica/acresrc.h
index d772ff9ca07d..e8a92be5adae 100644
--- a/drivers/acpi/acpica/acresrc.h
+++ b/drivers/acpi/acpica/acresrc.h
@@ -3,7 +3,7 @@
*
* Name: acresrc.h - Resource Manager function prototypes
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acstruct.h b/drivers/acpi/acpica/acstruct.h
index f8fee94ba708..e690f604cfa0 100644
--- a/drivers/acpi/acpica/acstruct.h
+++ b/drivers/acpi/acpica/acstruct.h
@@ -3,7 +3,7 @@
*
* Name: acstruct.h - Internal structs
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h
index b6ae979b01b6..ebef72bf58d0 100644
--- a/drivers/acpi/acpica/actables.h
+++ b/drivers/acpi/acpica/actables.h
@@ -3,7 +3,7 @@
*
* Name: actables.h - ACPI table management
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index edfdbbef81c1..3990d509bbab 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -3,7 +3,7 @@
*
* Name: acutils.h -- prototypes for the common (subsystem-wide) procedures
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/amlcode.h b/drivers/acpi/acpica/amlcode.h
index effe52b40dce..c5b544a006c5 100644
--- a/drivers/acpi/acpica/amlcode.h
+++ b/drivers/acpi/acpica/amlcode.h
@@ -5,7 +5,7 @@
* Declarations and definitions contained herein are derived
* directly from the ACPI specification.
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/amlresrc.h b/drivers/acpi/acpica/amlresrc.h
index 4e88f9fc2a28..54d6e51e0b9a 100644
--- a/drivers/acpi/acpica/amlresrc.h
+++ b/drivers/acpi/acpica/amlresrc.h
@@ -3,7 +3,7 @@
*
* Module Name: amlresrc.h - AML resource descriptors
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -504,10 +504,6 @@ struct aml_resource_pin_group_config {
#define AML_RESOURCE_PIN_GROUP_CONFIG_REVISION 1 /* ACPI 6.2 */
-/* restore default alignment */
-
-#pragma pack()
-
/* Union of all resource descriptors, so we can allocate the worst case */
union aml_resource {
@@ -562,6 +558,10 @@ union aml_resource {
u8 byte_item;
};
+/* restore default alignment */
+
+#pragma pack()
+
/* Interfaces used by both the disassembler and compiler */
void
diff --git a/drivers/acpi/acpica/dbhistry.c b/drivers/acpi/acpica/dbhistry.c
index e874c1dddefa..554ae35108bd 100644
--- a/drivers/acpi/acpica/dbhistry.c
+++ b/drivers/acpi/acpica/dbhistry.c
@@ -3,7 +3,7 @@
*
* Module Name: dbhistry - debugger HISTORY command
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsargs.c b/drivers/acpi/acpica/dsargs.c
index 4354c175e12e..e2f00c54cb36 100644
--- a/drivers/acpi/acpica/dsargs.c
+++ b/drivers/acpi/acpica/dsargs.c
@@ -4,7 +4,7 @@
* Module Name: dsargs - Support for execution of dynamic arguments for static
* objects (regions, fields, buffer fields, etc.)
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c
index 80c69af06948..c1f79d7a2026 100644
--- a/drivers/acpi/acpica/dscontrol.c
+++ b/drivers/acpi/acpica/dscontrol.c
@@ -4,7 +4,7 @@
* Module Name: dscontrol - Support for execution control opcodes -
* if/else/while/return
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsdebug.c b/drivers/acpi/acpica/dsdebug.c
index c5c8380a3114..274b74255551 100644
--- a/drivers/acpi/acpica/dsdebug.c
+++ b/drivers/acpi/acpica/dsdebug.c
@@ -3,7 +3,7 @@
*
* Module Name: dsdebug - Parser/Interpreter interface - debugging
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index 532401ecdab0..df132c9089c7 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -3,7 +3,7 @@
*
* Module Name: dsfield - Dispatcher field routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c
index 6e0e362e461f..57cd9e2d1109 100644
--- a/drivers/acpi/acpica/dsinit.c
+++ b/drivers/acpi/acpica/dsinit.c
@@ -3,7 +3,7 @@
*
* Module Name: dsinit - Object initialization namespace walk
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index e809c2aed78a..45ec32e81903 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -3,7 +3,7 @@
*
* Module Name: dsmethod - Parser/Interpreter interface - control method parsing
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -462,7 +462,6 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
struct acpi_walk_state *next_walk_state = NULL;
union acpi_operand_object *obj_desc;
struct acpi_evaluate_info *info;
- u32 i;
ACPI_FUNCTION_TRACE_PTR(ds_call_control_method, this_walk_state);
@@ -483,6 +482,20 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
return_ACPI_STATUS(AE_NULL_OBJECT);
}
+ if (this_walk_state->num_operands < obj_desc->method.param_count) {
+ ACPI_ERROR((AE_INFO, "Missing argument(s) for method [%4.4s]",
+ acpi_ut_get_node_name(method_node)));
+
+ return_ACPI_STATUS(AE_AML_TOO_FEW_ARGUMENTS);
+ }
+
+ else if (this_walk_state->num_operands > obj_desc->method.param_count) {
+ ACPI_ERROR((AE_INFO, "Too many arguments for method [%4.4s]",
+ acpi_ut_get_node_name(method_node)));
+
+ return_ACPI_STATUS(AE_AML_TOO_MANY_ARGUMENTS);
+ }
+
/* Init for new method, possibly wait on method mutex */
status =
@@ -539,14 +552,7 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
* Delete the operands on the previous walkstate operand stack
* (they were copied to new objects)
*/
- for (i = 0; i < obj_desc->method.param_count; i++) {
- acpi_ut_remove_reference(this_walk_state->operands[i]);
- this_walk_state->operands[i] = NULL;
- }
-
- /* Clear the operand stack */
-
- this_walk_state->num_operands = 0;
+ acpi_ds_clear_operands(this_walk_state);
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"**** Begin nested execution of [%4.4s] **** WalkState=%p\n",
diff --git a/drivers/acpi/acpica/dsmthdat.c b/drivers/acpi/acpica/dsmthdat.c
index eca50517ad82..5393de4dbc4c 100644
--- a/drivers/acpi/acpica/dsmthdat.c
+++ b/drivers/acpi/acpica/dsmthdat.c
@@ -188,6 +188,7 @@ acpi_ds_method_data_init_args(union acpi_operand_object **params,
index++;
}
+ acpi_ex_trace_args(params, index);
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%u args passed to method\n", index));
return_ACPI_STATUS(AE_OK);
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
index 555f148d666b..1bf7eec49899 100644
--- a/drivers/acpi/acpica/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -3,7 +3,7 @@
*
* Module Name: dsobject - Dispatcher object management routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index dd3059000885..5699b0872848 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -3,7 +3,7 @@
*
* Module Name: dsopcode - Dispatcher support for regions and fields
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dspkginit.c b/drivers/acpi/acpica/dspkginit.c
index ecf793fe9919..1ed2386fab82 100644
--- a/drivers/acpi/acpica/dspkginit.c
+++ b/drivers/acpi/acpica/dspkginit.c
@@ -3,7 +3,7 @@
*
* Module Name: dspkginit - Completion of deferred package initialization
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c
index fb9ed5e1da89..baf6a1f27605 100644
--- a/drivers/acpi/acpica/dsutils.c
+++ b/drivers/acpi/acpica/dsutils.c
@@ -668,6 +668,8 @@ acpi_ds_create_operands(struct acpi_walk_state *walk_state,
union acpi_parse_object *arguments[ACPI_OBJ_NUM_OPERANDS];
u32 arg_count = 0;
u32 index = walk_state->num_operands;
+ u32 prev_num_operands = walk_state->num_operands;
+ u32 new_num_operands;
u32 i;
ACPI_FUNCTION_TRACE_PTR(ds_create_operands, first_arg);
@@ -696,6 +698,7 @@ acpi_ds_create_operands(struct acpi_walk_state *walk_state,
/* Create the interpreter arguments, in reverse order */
+ new_num_operands = index;
index--;
for (i = 0; i < arg_count; i++) {
arg = arguments[index];
@@ -720,7 +723,11 @@ cleanup:
* pop everything off of the operand stack and delete those
* objects
*/
- acpi_ds_obj_stack_pop_and_delete(arg_count, walk_state);
+ walk_state->num_operands = (u8)(i);
+ acpi_ds_obj_stack_pop_and_delete(new_num_operands, walk_state);
+
+ /* Restore operand count */
+ walk_state->num_operands = (u8)(prev_num_operands);
ACPI_EXCEPTION((AE_INFO, status, "While creating Arg %u", index));
return_ACPI_STATUS(status);
diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c
index a43336f05206..5c5c6d8a4e48 100644
--- a/drivers/acpi/acpica/dswexec.c
+++ b/drivers/acpi/acpica/dswexec.c
@@ -4,7 +4,7 @@
* Module Name: dswexec - Dispatcher method execution callbacks;
* dispatch to interpreter.
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index f7b8496c8bdd..666419b6a5c6 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -3,7 +3,7 @@
*
* Module Name: dswload - Dispatcher first pass namespace load callbacks
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index 541235f498c2..bfc54c914757 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -3,7 +3,7 @@
*
* Module Name: dswload2 - Dispatcher second pass namespace load callbacks
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dswscope.c b/drivers/acpi/acpica/dswscope.c
index 1fdd07ae862c..375a8fa43d9d 100644
--- a/drivers/acpi/acpica/dswscope.c
+++ b/drivers/acpi/acpica/dswscope.c
@@ -3,7 +3,7 @@
*
* Module Name: dswscope - Scope stack manipulation
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c
index 75338a13c802..02aaddb89df9 100644
--- a/drivers/acpi/acpica/dswstate.c
+++ b/drivers/acpi/acpica/dswstate.c
@@ -3,7 +3,7 @@
*
* Module Name: dswstate - Dispatcher parse tree walk management routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
index 9e78c5b9ad52..6cdd39c987b8 100644
--- a/drivers/acpi/acpica/evevent.c
+++ b/drivers/acpi/acpica/evevent.c
@@ -3,7 +3,7 @@
*
* Module Name: evevent - Fixed Event handling and dispatch
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evglock.c b/drivers/acpi/acpica/evglock.c
index 989dc01af03f..df2a4ab0e0da 100644
--- a/drivers/acpi/acpica/evglock.c
+++ b/drivers/acpi/acpica/evglock.c
@@ -3,7 +3,7 @@
*
* Module Name: evglock - Global Lock support
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -42,6 +42,10 @@ acpi_status acpi_ev_init_global_lock_handler(void)
return_ACPI_STATUS(AE_OK);
}
+ if (!acpi_gbl_use_global_lock) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
/* Attempt installation of the global lock handler */
status = acpi_install_fixed_event_handler(ACPI_EVENT_GLOBAL,
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index 934b201d3820..ba65b2ea49b2 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -3,7 +3,7 @@
*
* Module Name: evgpe - General Purpose Event handling and dispatch
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index 58e1890ab25b..fadd93caf1d5 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -3,7 +3,7 @@
*
* Module Name: evgpeblk - GPE block creation and initialization.
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
index 38f408cf13ce..eb769739420e 100644
--- a/drivers/acpi/acpica/evgpeinit.c
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -3,7 +3,7 @@
*
* Module Name: evgpeinit - System GPE initialization and update
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c
index ee3b1ea656d4..d15b1d75c8ec 100644
--- a/drivers/acpi/acpica/evgpeutil.c
+++ b/drivers/acpi/acpica/evgpeutil.c
@@ -3,7 +3,7 @@
*
* Module Name: evgpeutil - GPE utilities
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evhandler.c b/drivers/acpi/acpica/evhandler.c
index 1c8cb6d924df..5a35dae945e2 100644
--- a/drivers/acpi/acpica/evhandler.c
+++ b/drivers/acpi/acpica/evhandler.c
@@ -3,7 +3,7 @@
*
* Module Name: evhandler - Support for Address Space handlers
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index e68e876d3b84..04a23a6c3bb1 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -3,7 +3,7 @@
*
* Module Name: evmisc - Miscellaneous event manager support functions
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index cf53b9535f18..fa3475da7ea9 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -3,7 +3,7 @@
*
* Module Name: evregion - Operation Region support
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index 46d1b3f5582d..b03952798af5 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -3,7 +3,7 @@
*
* Module Name: evrgnini- ACPI address_space (op_region) init
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index 24fa6433d562..86a8d41c079c 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -3,7 +3,7 @@
*
* Module Name: evxface - External interfaces for ACPI events
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index 48bf845191d2..4b052908d2e7 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -3,7 +3,7 @@
*
* Module Name: evxfevnt - External Interfaces, ACPI event disable/enable
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index 4eeeb3b7ab7e..60dacec1b121 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -3,7 +3,7 @@
*
* Module Name: evxfgpe - External Interfaces for General Purpose Events (GPEs)
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
index 95f78383bbdb..bccc672c934c 100644
--- a/drivers/acpi/acpica/evxfregn.c
+++ b/drivers/acpi/acpica/evxfregn.c
@@ -4,7 +4,7 @@
* Module Name: evxfregn - External Interfaces, ACPI Operation Regions and
* Address Spaces.
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -232,8 +232,6 @@ acpi_remove_address_space_handler(acpi_handle device,
/* Now we can delete the handler object */
- acpi_os_release_mutex(handler_obj->address_space.
- context_mutex);
acpi_ut_remove_reference(handler_obj);
goto unlock_and_exit;
}
diff --git a/drivers/acpi/acpica/exconcat.c b/drivers/acpi/acpica/exconcat.c
index 2fb78b35565b..c248c9b162fa 100644
--- a/drivers/acpi/acpica/exconcat.c
+++ b/drivers/acpi/acpica/exconcat.c
@@ -3,7 +3,7 @@
*
* Module Name: exconcat - Concatenate-type AML operators
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c
index 473115309860..4d7dd0fc6b07 100644
--- a/drivers/acpi/acpica/exconfig.c
+++ b/drivers/acpi/acpica/exconfig.c
@@ -3,7 +3,7 @@
*
* Module Name: exconfig - Namespace reconfiguration (Load/Unload opcodes)
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c
index bb1be42daee1..fded9bfc2436 100644
--- a/drivers/acpi/acpica/exconvrt.c
+++ b/drivers/acpi/acpica/exconvrt.c
@@ -3,7 +3,7 @@
*
* Module Name: exconvrt - Object conversion routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -226,8 +226,8 @@ acpi_ex_convert_to_buffer(union acpi_operand_object *obj_desc,
/* Copy the string to the buffer */
new_buf = return_desc->buffer.pointer;
- strncpy((char *)new_buf, (char *)obj_desc->string.pointer,
- obj_desc->string.length);
+ memcpy((char *)new_buf, (char *)obj_desc->string.pointer,
+ obj_desc->string.length);
break;
default:
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index 1bea9d97652c..052c69567997 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -3,7 +3,7 @@
*
* Module Name: excreate - Named object creation
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exdebug.c b/drivers/acpi/acpica/exdebug.c
index 3f86bfada510..81a07a52b73c 100644
--- a/drivers/acpi/acpica/exdebug.c
+++ b/drivers/acpi/acpica/exdebug.c
@@ -3,7 +3,7 @@
*
* Module Name: exdebug - Support for stores to the AML Debug Object
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index 2e2da8790224..d8aeebaab70a 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -3,7 +3,7 @@
*
* Module Name: exdump - Interpreter debug output routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c
index 61ff36189ace..ced3ff9d0a86 100644
--- a/drivers/acpi/acpica/exfield.c
+++ b/drivers/acpi/acpica/exfield.c
@@ -3,7 +3,7 @@
*
* Module Name: exfield - AML execution - field_unit read/write
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index cf6c812a8b6d..0771934c0455 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -3,7 +3,7 @@
*
* Module Name: exfldio - Aml Field I/O
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c
index c6f2a9166ac0..07cbac58ed21 100644
--- a/drivers/acpi/acpica/exmisc.c
+++ b/drivers/acpi/acpica/exmisc.c
@@ -3,7 +3,7 @@
*
* Module Name: exmisc - ACPI AML (p-code) execution - specific opcodes
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exmutex.c b/drivers/acpi/acpica/exmutex.c
index 65c487facdda..1fa013197fcf 100644
--- a/drivers/acpi/acpica/exmutex.c
+++ b/drivers/acpi/acpica/exmutex.c
@@ -3,7 +3,7 @@
*
* Module Name: exmutex - ASL Mutex Acquire/Release functions
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exnames.c b/drivers/acpi/acpica/exnames.c
index 9a448165bfeb..76ab73c37e90 100644
--- a/drivers/acpi/acpica/exnames.c
+++ b/drivers/acpi/acpica/exnames.c
@@ -3,7 +3,7 @@
*
* Module Name: exnames - interpreter/scanner name load/execute
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c
index 20fb34b68bee..6ac7e0ca5c9d 100644
--- a/drivers/acpi/acpica/exoparg1.c
+++ b/drivers/acpi/acpica/exoparg1.c
@@ -3,7 +3,7 @@
*
* Module Name: exoparg1 - AML execution - opcodes with 1 argument
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c
index 743c258bf2e8..a94fa4d70e99 100644
--- a/drivers/acpi/acpica/exoparg2.c
+++ b/drivers/acpi/acpica/exoparg2.c
@@ -3,7 +3,7 @@
*
* Module Name: exoparg2 - AML execution - opcodes with 2 arguments
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c
index d3091f619909..bf08110ed6d2 100644
--- a/drivers/acpi/acpica/exoparg3.c
+++ b/drivers/acpi/acpica/exoparg3.c
@@ -3,7 +3,7 @@
*
* Module Name: exoparg3 - AML execution - opcodes with 3 arguments
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exoparg6.c b/drivers/acpi/acpica/exoparg6.c
index 1af35e143ba9..cb078e39abf7 100644
--- a/drivers/acpi/acpica/exoparg6.c
+++ b/drivers/acpi/acpica/exoparg6.c
@@ -3,7 +3,7 @@
*
* Module Name: exoparg6 - AML execution - opcodes with 6 arguments
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
index 82b1fa2d201f..1b1a006e82de 100644
--- a/drivers/acpi/acpica/exprep.c
+++ b/drivers/acpi/acpica/exprep.c
@@ -3,7 +3,7 @@
*
* Module Name: exprep - ACPI AML field prep utilities
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
index c49b9f8de723..a390a1c2b0ab 100644
--- a/drivers/acpi/acpica/exregion.c
+++ b/drivers/acpi/acpica/exregion.c
@@ -3,7 +3,7 @@
*
* Module Name: exregion - ACPI default op_region (address space) handlers
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exresnte.c b/drivers/acpi/acpica/exresnte.c
index 873de01b8ad2..dd83631090fc 100644
--- a/drivers/acpi/acpica/exresnte.c
+++ b/drivers/acpi/acpica/exresnte.c
@@ -3,7 +3,7 @@
*
* Module Name: exresnte - AML Interpreter object resolution
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c
index 24a78b5e266c..4589de3f3012 100644
--- a/drivers/acpi/acpica/exresolv.c
+++ b/drivers/acpi/acpica/exresolv.c
@@ -3,7 +3,7 @@
*
* Module Name: exresolv - AML Interpreter object resolution
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c
index 3a437e6ace5c..782ee353a709 100644
--- a/drivers/acpi/acpica/exresop.c
+++ b/drivers/acpi/acpica/exresop.c
@@ -3,7 +3,7 @@
*
* Module Name: exresop - AML Interpreter operand/object resolution
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exserial.c b/drivers/acpi/acpica/exserial.c
index 5241f4c01c76..6d2581ec22ad 100644
--- a/drivers/acpi/acpica/exserial.c
+++ b/drivers/acpi/acpica/exserial.c
@@ -3,7 +3,7 @@
*
* Module Name: exserial - field_unit support for serial address spaces
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -201,6 +201,12 @@ acpi_ex_read_serial_bus(union acpi_operand_object *obj_desc,
function = ACPI_READ;
break;
+ case ACPI_ADR_SPACE_FIXED_HARDWARE:
+
+ buffer_length = ACPI_FFH_INPUT_BUFFER_SIZE;
+ function = ACPI_READ;
+ break;
+
default:
return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID);
}
diff --git a/drivers/acpi/acpica/exstore.c b/drivers/acpi/acpica/exstore.c
index 575c7a39f1aa..cbc42207496d 100644
--- a/drivers/acpi/acpica/exstore.c
+++ b/drivers/acpi/acpica/exstore.c
@@ -3,7 +3,7 @@
*
* Module Name: exstore - AML Interpreter object store support
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exstoren.c b/drivers/acpi/acpica/exstoren.c
index b01ae015e1b5..0470b2639831 100644
--- a/drivers/acpi/acpica/exstoren.c
+++ b/drivers/acpi/acpica/exstoren.c
@@ -4,7 +4,7 @@
* Module Name: exstoren - AML Interpreter object store support,
* Store to Node (namespace object)
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exstorob.c b/drivers/acpi/acpica/exstorob.c
index 37c3131a82fa..5b168fbc03e8 100644
--- a/drivers/acpi/acpica/exstorob.c
+++ b/drivers/acpi/acpica/exstorob.c
@@ -3,7 +3,7 @@
*
* Module Name: exstorob - AML object store support, store to object
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exsystem.c b/drivers/acpi/acpica/exsystem.c
index 2c384bd52b9c..7f843c9d8a06 100644
--- a/drivers/acpi/acpica/exsystem.c
+++ b/drivers/acpi/acpica/exsystem.c
@@ -3,7 +3,7 @@
*
* Module Name: exsystem - Interface to OS services
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/extrace.c b/drivers/acpi/acpica/extrace.c
index f1730221ff13..36934d4f26fb 100644
--- a/drivers/acpi/acpica/extrace.c
+++ b/drivers/acpi/acpica/extrace.c
@@ -3,7 +3,7 @@
*
* Module Name: extrace - Support for interpreter execution tracing
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -136,9 +136,9 @@ acpi_ex_trace_point(acpi_trace_event_type type,
if (pathname) {
ACPI_DEBUG_PRINT((ACPI_DB_TRACE_POINT,
- "%s %s [0x%p:%s] execution.\n",
+ "%s %s [%s] execution.\n",
acpi_ex_get_trace_event_name(type),
- begin ? "Begin" : "End", aml, pathname));
+ begin ? "Begin" : "End", pathname));
} else {
ACPI_DEBUG_PRINT((ACPI_DB_TRACE_POINT,
"%s %s [0x%p] execution.\n",
@@ -149,6 +149,57 @@ acpi_ex_trace_point(acpi_trace_event_type type,
/*******************************************************************************
*
+ * FUNCTION: acpi_ex_trace_args
+ *
+ * PARAMETERS: params - AML method arguments
+ * count - numer of method arguments
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Trace any arguments
+ *
+ ******************************************************************************/
+
+void
+acpi_ex_trace_args(union acpi_operand_object **params, u32 count)
+{
+ u32 i;
+
+ ACPI_FUNCTION_NAME(ex_trace_args);
+
+ for (i = 0; i < count; i++) {
+ union acpi_operand_object *obj_desc = params[i];
+
+ if (!i) {
+ ACPI_DEBUG_PRINT((ACPI_DB_TRACE_POINT, " "));
+ }
+
+ switch (obj_desc->common.type) {
+ case ACPI_TYPE_INTEGER:
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_TRACE_POINT, "%llx", obj_desc->integer.value));
+ break;
+ case ACPI_TYPE_STRING:
+ if (!obj_desc->string.length) {
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_TRACE_POINT, "NULL"));
+ continue;
+ }
+ if (ACPI_IS_DEBUG_ENABLED(ACPI_LV_TRACE_POINT, _COMPONENT))
+ acpi_ut_print_string(obj_desc->string.pointer, ACPI_UINT8_MAX);
+ break;
+ default:
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_TRACE_POINT, "Unknown"));
+ break;
+ }
+ if (i+1 == count) {
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_TRACE_POINT, "\n"));
+ } else {
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_TRACE_POINT, ", "));
+ }
+ }
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_ex_start_trace_method
*
* PARAMETERS: method_node - Node of the method
diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c
index f4d4a033f166..cc10c0732218 100644
--- a/drivers/acpi/acpica/exutils.c
+++ b/drivers/acpi/acpica/exutils.c
@@ -3,7 +3,7 @@
*
* Module Name: exutils - interpreter/scanner utilities
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c
index 790f342dcd25..a1e1fa787566 100644
--- a/drivers/acpi/acpica/hwacpi.c
+++ b/drivers/acpi/acpica/hwacpi.c
@@ -3,7 +3,7 @@
*
* Module Name: hwacpi - ACPI Hardware Initialization/Mode Interface
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwesleep.c b/drivers/acpi/acpica/hwesleep.c
index a9ba9190408b..631fd8e2b774 100644
--- a/drivers/acpi/acpica/hwesleep.c
+++ b/drivers/acpi/acpica/hwesleep.c
@@ -4,7 +4,7 @@
* Name: hwesleep.c - ACPI Hardware Sleep/Wake Support functions for the
* extended FADT-V5 sleep registers.
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
index e0c847ab8324..386f4759c317 100644
--- a/drivers/acpi/acpica/hwgpe.c
+++ b/drivers/acpi/acpica/hwgpe.c
@@ -3,7 +3,7 @@
*
* Module Name: hwgpe - Low level GPE enable/disable/clear functions
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
index e0921f08b71a..87d78bef6323 100644
--- a/drivers/acpi/acpica/hwsleep.c
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -4,7 +4,7 @@
* Name: hwsleep.c - ACPI Hardware Sleep/Wake Support functions for the
* original/legacy sleep/PM registers.
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c
index 192c04b5a599..a5e0bccae6a4 100644
--- a/drivers/acpi/acpica/hwtimer.c
+++ b/drivers/acpi/acpica/hwtimer.c
@@ -3,7 +3,7 @@
*
* Name: hwtimer.c - ACPI Power Management Timer Interface
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c
index b8de458f0368..496fd9e49f0b 100644
--- a/drivers/acpi/acpica/hwvalid.c
+++ b/drivers/acpi/acpica/hwvalid.c
@@ -3,7 +3,7 @@
*
* Module Name: hwvalid - I/O request validation
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
index c31f803995c6..847cd1b2493d 100644
--- a/drivers/acpi/acpica/hwxface.c
+++ b/drivers/acpi/acpica/hwxface.c
@@ -3,7 +3,7 @@
*
* Module Name: hwxface - Public ACPICA hardware interfaces
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
index 8dbf83aeb455..9aabe30416da 100644
--- a/drivers/acpi/acpica/hwxfsleep.c
+++ b/drivers/acpi/acpica/hwxfsleep.c
@@ -3,7 +3,7 @@
*
* Name: hwxfsleep.c - ACPI Hardware Sleep/Wake External Interfaces
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsarguments.c b/drivers/acpi/acpica/nsarguments.c
index 3efb46f0dc54..366d54a1d157 100644
--- a/drivers/acpi/acpica/nsarguments.c
+++ b/drivers/acpi/acpica/nsarguments.c
@@ -3,7 +3,7 @@
*
* Module Name: nsarguments - Validation of args for ACPI predefined methods
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsconvert.c b/drivers/acpi/acpica/nsconvert.c
index 7e5a683ae957..f05a92b88642 100644
--- a/drivers/acpi/acpica/nsconvert.c
+++ b/drivers/acpi/acpica/nsconvert.c
@@ -4,7 +4,7 @@
* Module Name: nsconvert - Object conversions for objects returned by
* predefined methods
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index 90a26cb0c472..6dc20486ad51 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -3,7 +3,7 @@
*
* Module Name: nsdump - table dumping routines for debug
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsdumpdv.c b/drivers/acpi/acpica/nsdumpdv.c
index fa116ebe49a3..d5b16aaec233 100644
--- a/drivers/acpi/acpica/nsdumpdv.c
+++ b/drivers/acpi/acpica/nsdumpdv.c
@@ -3,7 +3,7 @@
*
* Module Name: nsdump - table dumping routines for debug
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
index 86d126fdb27d..03373e7f7978 100644
--- a/drivers/acpi/acpica/nsinit.c
+++ b/drivers/acpi/acpica/nsinit.c
@@ -3,7 +3,7 @@
*
* Module Name: nsinit - namespace initialization
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c
index fcb9de0f77a2..6ec4c646fff7 100644
--- a/drivers/acpi/acpica/nsload.c
+++ b/drivers/acpi/acpica/nsload.c
@@ -3,7 +3,7 @@
*
* Module Name: nsload - namespace loading/expanding/contracting procedures
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
index d91153f65700..22aeeeb56cff 100644
--- a/drivers/acpi/acpica/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -194,7 +194,7 @@ acpi_ns_build_normalized_path(struct acpi_namespace_node *node,
char *full_path, u32 path_size, u8 no_trailing)
{
u32 length = 0, i;
- char name[ACPI_NAMESEG_SIZE];
+ char name[ACPI_NAMESEG_SIZE] ACPI_NONSTRING;
u8 do_no_trailing;
char c, *left, *right;
struct acpi_namespace_node *next_node;
diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c
index 31e551cf4ea6..959e6379bc4c 100644
--- a/drivers/acpi/acpica/nsparse.c
+++ b/drivers/acpi/acpica/nsparse.c
@@ -3,7 +3,7 @@
*
* Module Name: nsparse - namespace interface to AML parser
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c
index cf57bd69616d..81995ee48c49 100644
--- a/drivers/acpi/acpica/nspredef.c
+++ b/drivers/acpi/acpica/nspredef.c
@@ -3,7 +3,7 @@
*
* Module Name: nspredef - Validation of ACPI predefined methods and objects
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsprepkg.c b/drivers/acpi/acpica/nsprepkg.c
index dd37fc108fce..ca137ce5674f 100644
--- a/drivers/acpi/acpica/nsprepkg.c
+++ b/drivers/acpi/acpica/nsprepkg.c
@@ -3,7 +3,7 @@
*
* Module Name: nsprepkg - Validation of package objects for predefined names
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
index b8657004190d..accfdcfb7e62 100644
--- a/drivers/acpi/acpica/nsrepair.c
+++ b/drivers/acpi/acpica/nsrepair.c
@@ -3,7 +3,7 @@
*
* Module Name: nsrepair - Repair for objects returned by predefined methods
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
index 1bb7b71f07f1..8dbb870f40d2 100644
--- a/drivers/acpi/acpica/nsrepair2.c
+++ b/drivers/acpi/acpica/nsrepair2.c
@@ -4,7 +4,7 @@
* Module Name: nsrepair2 - Repair for objects returned by specific
* predefined methods
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -25,7 +25,7 @@ acpi_status (*acpi_repair_function) (struct acpi_evaluate_info * info,
return_object_ptr);
typedef struct acpi_repair_info {
- char name[ACPI_NAMESEG_SIZE];
+ char name[ACPI_NAMESEG_SIZE] ACPI_NONSTRING;
acpi_repair_function repair_function;
} acpi_repair_info;
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index 06ffdb6808f5..49cc07e2ac5a 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -4,7 +4,7 @@
* Module Name: nsutils - Utilities for accessing ACPI namespace, accessing
* parents and siblings and Scope manipulation
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c
index eee396a77bae..a2ac06a26e92 100644
--- a/drivers/acpi/acpica/nswalk.c
+++ b/drivers/acpi/acpica/nswalk.c
@@ -3,7 +3,7 @@
*
* Module Name: nswalk - Functions for walking the ACPI namespace
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index 5d5bcf165298..1db831545ec8 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -4,7 +4,7 @@
* Module Name: nsxfname - Public interfaces to the ACPI subsystem
* ACPI Namespace oriented interfaces
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
index 28582adfc0ac..6f6ae38ec044 100644
--- a/drivers/acpi/acpica/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -3,7 +3,7 @@
*
* Module Name: psargs - Parse AML opcode arguments
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c
index d0fd55636129..c989cadf271c 100644
--- a/drivers/acpi/acpica/psloop.c
+++ b/drivers/acpi/acpica/psloop.c
@@ -3,7 +3,7 @@
*
* Module Name: psloop - Main AML parse loop
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c
index 54471083ba54..496a1c1d5b0b 100644
--- a/drivers/acpi/acpica/psobject.c
+++ b/drivers/acpi/acpica/psobject.c
@@ -3,7 +3,7 @@
*
* Module Name: psobject - Support for parse objects
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -636,7 +636,8 @@ acpi_status
acpi_ps_complete_final_op(struct acpi_walk_state *walk_state,
union acpi_parse_object *op, acpi_status status)
{
- acpi_status status2;
+ acpi_status return_status = status;
+ u8 ascending = TRUE;
ACPI_FUNCTION_TRACE_PTR(ps_complete_final_op, walk_state);
@@ -650,7 +651,7 @@ acpi_ps_complete_final_op(struct acpi_walk_state *walk_state,
op));
do {
if (op) {
- if (walk_state->ascending_callback != NULL) {
+ if (ascending && walk_state->ascending_callback != NULL) {
walk_state->op = op;
walk_state->op_info =
acpi_ps_get_opcode_info(op->common.
@@ -672,49 +673,26 @@ acpi_ps_complete_final_op(struct acpi_walk_state *walk_state,
}
if (status == AE_CTRL_TERMINATE) {
- status = AE_OK;
-
- /* Clean up */
- do {
- if (op) {
- status2 =
- acpi_ps_complete_this_op
- (walk_state, op);
- if (ACPI_FAILURE
- (status2)) {
- return_ACPI_STATUS
- (status2);
- }
- }
-
- acpi_ps_pop_scope(&
- (walk_state->
- parser_state),
- &op,
- &walk_state->
- arg_types,
- &walk_state->
- arg_count);
-
- } while (op);
-
- return_ACPI_STATUS(status);
+ ascending = FALSE;
+ return_status = AE_CTRL_TERMINATE;
}
else if (ACPI_FAILURE(status)) {
/* First error is most important */
- (void)
- acpi_ps_complete_this_op(walk_state,
- op);
- return_ACPI_STATUS(status);
+ ascending = FALSE;
+ return_status = status;
}
}
- status2 = acpi_ps_complete_this_op(walk_state, op);
- if (ACPI_FAILURE(status2)) {
- return_ACPI_STATUS(status2);
+ status = acpi_ps_complete_this_op(walk_state, op);
+ if (ACPI_FAILURE(status)) {
+ ascending = FALSE;
+ if (ACPI_SUCCESS(return_status) ||
+ return_status == AE_CTRL_TERMINATE) {
+ return_status = status;
+ }
}
}
@@ -724,5 +702,5 @@ acpi_ps_complete_final_op(struct acpi_walk_state *walk_state,
} while (op);
- return_ACPI_STATUS(status);
+ return_ACPI_STATUS(return_status);
}
diff --git a/drivers/acpi/acpica/psopcode.c b/drivers/acpi/acpica/psopcode.c
index 39e31030e5f4..bf6103986f48 100644
--- a/drivers/acpi/acpica/psopcode.c
+++ b/drivers/acpi/acpica/psopcode.c
@@ -3,7 +3,7 @@
*
* Module Name: psopcode - Parser/Interpreter opcode information table
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psopinfo.c b/drivers/acpi/acpica/psopinfo.c
index bccf606e08b4..532ea307a675 100644
--- a/drivers/acpi/acpica/psopinfo.c
+++ b/drivers/acpi/acpica/psopinfo.c
@@ -3,7 +3,7 @@
*
* Module Name: psopinfo - AML opcode information functions and dispatch tables
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -34,7 +34,7 @@ static const u8 acpi_gbl_argument_count[] =
const struct acpi_opcode_info *acpi_ps_get_opcode_info(u16 opcode)
{
-#ifdef ACPI_DEBUG_OUTPUT
+#if defined ACPI_ASL_COMPILER && defined ACPI_DEBUG_OUTPUT
const char *opcode_name = "Unknown AML opcode";
#endif
@@ -102,11 +102,11 @@ const struct acpi_opcode_info *acpi_ps_get_opcode_info(u16 opcode)
default:
break;
}
-#endif
/* Unknown AML opcode */
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%s [%4.4X]\n", opcode_name, opcode));
+#endif
return (&acpi_gbl_aml_op_info[_UNK]);
}
diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c
index 10a072953d78..55a416e56fd8 100644
--- a/drivers/acpi/acpica/psparse.c
+++ b/drivers/acpi/acpica/psparse.c
@@ -3,7 +3,7 @@
*
* Module Name: psparse - Parser top level AML parse routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psscope.c b/drivers/acpi/acpica/psscope.c
index a0035bde7556..c4e4483f0a0b 100644
--- a/drivers/acpi/acpica/psscope.c
+++ b/drivers/acpi/acpica/psscope.c
@@ -3,7 +3,7 @@
*
* Module Name: psscope - Parser scope stack management routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/pstree.c b/drivers/acpi/acpica/pstree.c
index 7f7f5ecd4011..5a285d3f2cdb 100644
--- a/drivers/acpi/acpica/pstree.c
+++ b/drivers/acpi/acpica/pstree.c
@@ -3,7 +3,7 @@
*
* Module Name: pstree - Parser op tree manipulation/traversal/search
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c
index d550c4af4702..ada1dc304d25 100644
--- a/drivers/acpi/acpica/psutils.c
+++ b/drivers/acpi/acpica/psutils.c
@@ -3,7 +3,7 @@
*
* Module Name: psutils - Parser miscellaneous utilities (Parser only)
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/pswalk.c b/drivers/acpi/acpica/pswalk.c
index d92817c72b8d..2f3ebcd8aebe 100644
--- a/drivers/acpi/acpica/pswalk.c
+++ b/drivers/acpi/acpica/pswalk.c
@@ -3,7 +3,7 @@
*
* Module Name: pswalk - Parser routines to walk parsed op tree(s)
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c
index 6f4eace0ba69..d480de075a90 100644
--- a/drivers/acpi/acpica/psxface.c
+++ b/drivers/acpi/acpica/psxface.c
@@ -3,7 +3,7 @@
*
* Module Name: psxface - Parser external interfaces
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/rsaddr.c b/drivers/acpi/acpica/rsaddr.c
index 27384ee245f0..f92010e667cd 100644
--- a/drivers/acpi/acpica/rsaddr.c
+++ b/drivers/acpi/acpica/rsaddr.c
@@ -272,18 +272,13 @@ u8
acpi_rs_get_address_common(struct acpi_resource *resource,
union aml_resource *aml)
{
- struct aml_resource_address address;
-
ACPI_FUNCTION_ENTRY();
- /* Avoid undefined behavior: member access within misaligned address */
-
- memcpy(&address, aml, sizeof(address));
-
/* Validate the Resource Type */
- if ((address.resource_type > 2) &&
- (address.resource_type < 0xC0) && (address.resource_type != 0x0A)) {
+ if ((aml->address.resource_type > 2) &&
+ (aml->address.resource_type < 0xC0) &&
+ (aml->address.resource_type != 0x0A)) {
return (FALSE);
}
@@ -304,7 +299,7 @@ acpi_rs_get_address_common(struct acpi_resource *resource,
/* Generic resource type, just grab the type_specific byte */
resource->data.address.info.type_specific =
- address.specific_flags;
+ aml->address.specific_flags;
}
return (TRUE);
diff --git a/drivers/acpi/acpica/rscalc.c b/drivers/acpi/acpica/rscalc.c
index 6e7a152d6459..242daf45e20e 100644
--- a/drivers/acpi/acpica/rscalc.c
+++ b/drivers/acpi/acpica/rscalc.c
@@ -608,18 +608,12 @@ acpi_rs_get_list_length(u8 *aml_buffer,
case ACPI_RESOURCE_NAME_SERIAL_BUS:{
- /* Avoid undefined behavior: member access within misaligned address */
-
- struct aml_resource_common_serialbus
- common_serial_bus;
- memcpy(&common_serial_bus, aml_resource,
- sizeof(common_serial_bus));
-
minimum_aml_resource_length =
acpi_gbl_resource_aml_serial_bus_sizes
- [common_serial_bus.type];
+ [aml_resource->common_serial_bus.type];
extra_struct_bytes +=
- common_serial_bus.resource_length -
+ aml_resource->common_serial_bus.
+ resource_length -
minimum_aml_resource_length;
break;
}
@@ -688,16 +682,10 @@ acpi_rs_get_list_length(u8 *aml_buffer,
*/
if (acpi_ut_get_resource_type(aml_buffer) ==
ACPI_RESOURCE_NAME_SERIAL_BUS) {
-
- /* Avoid undefined behavior: member access within misaligned address */
-
- struct aml_resource_common_serialbus common_serial_bus;
- memcpy(&common_serial_bus, aml_resource,
- sizeof(common_serial_bus));
-
buffer_size =
acpi_gbl_resource_struct_serial_bus_sizes
- [common_serial_bus.type] + extra_struct_bytes;
+ [aml_resource->common_serial_bus.type] +
+ extra_struct_bytes;
} else {
buffer_size =
acpi_gbl_resource_struct_sizes[resource_index] +
diff --git a/drivers/acpi/acpica/rslist.c b/drivers/acpi/acpica/rslist.c
index 164c96e063c6..e46efaa889cd 100644
--- a/drivers/acpi/acpica/rslist.c
+++ b/drivers/acpi/acpica/rslist.c
@@ -55,21 +55,15 @@ acpi_rs_convert_aml_to_resources(u8 * aml,
aml_resource = ACPI_CAST_PTR(union aml_resource, aml);
if (acpi_ut_get_resource_type(aml) == ACPI_RESOURCE_NAME_SERIAL_BUS) {
-
- /* Avoid undefined behavior: member access within misaligned address */
-
- struct aml_resource_common_serialbus common_serial_bus;
- memcpy(&common_serial_bus, aml_resource,
- sizeof(common_serial_bus));
-
- if (common_serial_bus.type > AML_RESOURCE_MAX_SERIALBUSTYPE) {
+ if (aml_resource->common_serial_bus.type >
+ AML_RESOURCE_MAX_SERIALBUSTYPE) {
conversion_table = NULL;
} else {
/* This is an I2C, SPI, UART, or CSI2 serial_bus descriptor */
conversion_table =
acpi_gbl_convert_resource_serial_bus_dispatch
- [common_serial_bus.type];
+ [aml_resource->common_serial_bus.type];
}
} else {
conversion_table =
diff --git a/drivers/acpi/acpica/tbdata.c b/drivers/acpi/acpica/tbdata.c
index a1f10e4409a3..5b98e09fff76 100644
--- a/drivers/acpi/acpica/tbdata.c
+++ b/drivers/acpi/acpica/tbdata.c
@@ -3,7 +3,7 @@
*
* Module Name: tbdata - Table manager data structure functions
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index 3c126c6d306b..c6658b2f3027 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -3,7 +3,7 @@
*
* Module Name: tbfadt - FADT table utilities
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c
index 1c1b2e284bd9..d71a73216380 100644
--- a/drivers/acpi/acpica/tbfind.c
+++ b/drivers/acpi/acpica/tbfind.c
@@ -3,7 +3,7 @@
*
* Module Name: tbfind - find table
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -57,8 +57,8 @@ acpi_tb_find_table(char *signature,
memset(&header, 0, sizeof(struct acpi_table_header));
ACPI_COPY_NAMESEG(header.signature, signature);
- strncpy(header.oem_id, oem_id, ACPI_OEM_ID_SIZE);
- strncpy(header.oem_table_id, oem_table_id, ACPI_OEM_TABLE_ID_SIZE);
+ memcpy(header.oem_id, oem_id, ACPI_OEM_ID_SIZE);
+ memcpy(header.oem_table_id, oem_table_id, ACPI_OEM_TABLE_ID_SIZE);
/* Search for the table */
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 0dc003c20e4d..ee9b85bc238b 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -3,7 +3,7 @@
*
* Module Name: tbinstal - ACPI table installation and removal
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbprint.c b/drivers/acpi/acpica/tbprint.c
index 58b02e4b254b..049f6c2f1e32 100644
--- a/drivers/acpi/acpica/tbprint.c
+++ b/drivers/acpi/acpica/tbprint.c
@@ -3,7 +3,7 @@
*
* Module Name: tbprint - Table output utilities
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -121,6 +121,14 @@ acpi_tb_print_table_header(acpi_physical_address address,
ACPI_CAST_PTR(struct acpi_table_rsdp,
header)->revision,
local_header.oem_id));
+ } else if (acpi_gbl_CDAT && !acpi_ut_valid_nameseg(header->signature)) {
+
+ /* CDAT does not use the common ACPI table header */
+
+ ACPI_INFO(("%-4.4s 0x%8.8X%8.8X %06X",
+ ACPI_SIG_CDAT, ACPI_FORMAT_UINT64(address),
+ ACPI_CAST_PTR(struct acpi_table_cdat,
+ header)->length));
} else {
/* Standard ACPI table with full common header */
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index dad7425fce3f..fa64851c7b62 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -3,7 +3,7 @@
*
* Module Name: tbutils - ACPI Table utilities
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index 275b52dc42e9..a8f07d2641b6 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -3,7 +3,7 @@
*
* Module Name: tbxface - ACPI table-oriented external interfaces
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
index 0f2a7343de3a..2a17c60a9a39 100644
--- a/drivers/acpi/acpica/tbxfload.c
+++ b/drivers/acpi/acpica/tbxfload.c
@@ -3,7 +3,7 @@
*
* Module Name: tbxfload - Table load/unload external interfaces
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c
index 5b413bbab338..961577ba9486 100644
--- a/drivers/acpi/acpica/tbxfroot.c
+++ b/drivers/acpi/acpica/tbxfroot.c
@@ -3,7 +3,7 @@
*
* Module Name: tbxfroot - Find the root ACPI table (RSDT)
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utaddress.c b/drivers/acpi/acpica/utaddress.c
index be94d2fd99a7..c673d6c95e0a 100644
--- a/drivers/acpi/acpica/utaddress.c
+++ b/drivers/acpi/acpica/utaddress.c
@@ -3,7 +3,7 @@
*
* Module Name: utaddress - op_region address range check
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utalloc.c b/drivers/acpi/acpica/utalloc.c
index c1fb70457e20..2418a312733a 100644
--- a/drivers/acpi/acpica/utalloc.c
+++ b/drivers/acpi/acpica/utalloc.c
@@ -3,7 +3,7 @@
*
* Module Name: utalloc - local memory allocation routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utascii.c b/drivers/acpi/acpica/utascii.c
index 2be37676edd7..259c28d3fecd 100644
--- a/drivers/acpi/acpica/utascii.c
+++ b/drivers/acpi/acpica/utascii.c
@@ -3,7 +3,7 @@
*
* Module Name: utascii - Utility ascii functions
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utbuffer.c b/drivers/acpi/acpica/utbuffer.c
index b054bb5eeaf0..f6e6e98e9523 100644
--- a/drivers/acpi/acpica/utbuffer.c
+++ b/drivers/acpi/acpica/utbuffer.c
@@ -3,7 +3,7 @@
*
* Module Name: utbuffer - Buffer dump routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utcache.c b/drivers/acpi/acpica/utcache.c
index 85a85f7cf750..cabec193febb 100644
--- a/drivers/acpi/acpica/utcache.c
+++ b/drivers/acpi/acpica/utcache.c
@@ -3,7 +3,7 @@
*
* Module Name: utcache - local cache allocation routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -251,9 +251,9 @@ void *acpi_os_acquire_object(struct acpi_memory_list *cache)
} else {
/* The cache is empty, create a new object */
+#ifdef ACPI_DBG_TRACK_ALLOCATIONS
ACPI_MEM_TRACKING(cache->total_allocated++);
-#ifdef ACPI_DBG_TRACK_ALLOCATIONS
if ((cache->total_allocated - cache->total_freed) >
cache->max_occupied) {
cache->max_occupied =
diff --git a/drivers/acpi/acpica/utcksum.c b/drivers/acpi/acpica/utcksum.c
index b483894c3629..e6f6030b3a3f 100644
--- a/drivers/acpi/acpica/utcksum.c
+++ b/drivers/acpi/acpica/utcksum.c
@@ -3,7 +3,7 @@
*
* Module Name: utcksum - Support generating table checksums
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
index 2e17e657dfa4..80458e70ac2b 100644
--- a/drivers/acpi/acpica/utcopy.c
+++ b/drivers/acpi/acpica/utcopy.c
@@ -3,7 +3,7 @@
*
* Module Name: utcopy - Internal to external object translation utilities
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index 3d71bd9245c7..9f197e293c7e 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -3,7 +3,7 @@
*
* Module Name: utdebug - Debug print/trace routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index 95a4b7509e01..b82130d1a8bc 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -3,7 +3,7 @@
*
* Module Name: utdecode - Utility decoding routines (value-to-string)
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
index c85bfa13ac1e..e8180099d01f 100644
--- a/drivers/acpi/acpica/utdelete.c
+++ b/drivers/acpi/acpica/utdelete.c
@@ -404,7 +404,7 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action)
object, object->common.type,
acpi_ut_get_object_type_name(object),
new_count));
- message = "Incremement";
+ message = "Increment";
break;
case REF_DECREMENT:
diff --git a/drivers/acpi/acpica/uteval.c b/drivers/acpi/acpica/uteval.c
index 3e5173d03953..abc6583ed369 100644
--- a/drivers/acpi/acpica/uteval.c
+++ b/drivers/acpi/acpica/uteval.c
@@ -3,7 +3,7 @@
*
* Module Name: uteval - Object evaluation
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index 820820ea8119..97c55a113bae 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -3,7 +3,7 @@
*
* Module Name: utglobal - Global variables for the ACPI subsystem
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/uthex.c b/drivers/acpi/acpica/uthex.c
index e62802791dcf..8cd050e9cad5 100644
--- a/drivers/acpi/acpica/uthex.c
+++ b/drivers/acpi/acpica/uthex.c
@@ -3,7 +3,7 @@
*
* Module Name: uthex -- Hex/ASCII support functions
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c
index 15c2ce91d403..eb88335dea2c 100644
--- a/drivers/acpi/acpica/utids.c
+++ b/drivers/acpi/acpica/utids.c
@@ -3,7 +3,7 @@
*
* Module Name: utids - support for device Ids - HID, UID, CID, SUB, CLS
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utinit.c b/drivers/acpi/acpica/utinit.c
index 6d78504e9fbc..4bef97e8223a 100644
--- a/drivers/acpi/acpica/utinit.c
+++ b/drivers/acpi/acpica/utinit.c
@@ -3,7 +3,7 @@
*
* Module Name: utinit - Common ACPI subsystem initialization
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utlock.c b/drivers/acpi/acpica/utlock.c
index ee6d72385c5c..123dbcbc60bc 100644
--- a/drivers/acpi/acpica/utlock.c
+++ b/drivers/acpi/acpica/utlock.c
@@ -3,7 +3,7 @@
*
* Module Name: utlock - Reader/Writer lock interfaces
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c
index f4aae8f0d3a8..272e46208263 100644
--- a/drivers/acpi/acpica/utobject.c
+++ b/drivers/acpi/acpica/utobject.c
@@ -3,7 +3,7 @@
*
* Module Name: utobject - ACPI object create/delete/size/cache routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
index 99b85fd6eccf..f6ac16729e42 100644
--- a/drivers/acpi/acpica/utosi.c
+++ b/drivers/acpi/acpica/utosi.c
@@ -3,7 +3,7 @@
*
* Module Name: utosi - Support for the _OSI predefined control method
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utpredef.c b/drivers/acpi/acpica/utpredef.c
index 29d2977d0746..d9bd80e2d32a 100644
--- a/drivers/acpi/acpica/utpredef.c
+++ b/drivers/acpi/acpica/utpredef.c
@@ -3,7 +3,7 @@
*
* Module Name: utpredef - support functions for predefined names
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utprint.c b/drivers/acpi/acpica/utprint.c
index 42b30b9f9312..423d10569736 100644
--- a/drivers/acpi/acpica/utprint.c
+++ b/drivers/acpi/acpica/utprint.c
@@ -3,7 +3,7 @@
*
* Module Name: utprint - Formatted printing routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -333,11 +333,8 @@ int vsnprintf(char *string, acpi_size size, const char *format, va_list args)
pos = string;
- if (size != ACPI_UINT32_MAX) {
- end = string + size;
- } else {
- end = ACPI_CAST_PTR(char, ACPI_UINT32_MAX);
- }
+ size = ACPI_MIN(size, ACPI_PTR_DIFF(ACPI_MAX_PTR, string));
+ end = string + size;
for (; *format; ++format) {
if (*format != '%') {
diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c
index cff7901f7866..e1cc3d348750 100644
--- a/drivers/acpi/acpica/utresrc.c
+++ b/drivers/acpi/acpica/utresrc.c
@@ -361,20 +361,16 @@ acpi_ut_validate_resource(struct acpi_walk_state *walk_state,
aml_resource = ACPI_CAST_PTR(union aml_resource, aml);
if (resource_type == ACPI_RESOURCE_NAME_SERIAL_BUS) {
- /* Avoid undefined behavior: member access within misaligned address */
-
- struct aml_resource_common_serialbus common_serial_bus;
- memcpy(&common_serial_bus, aml_resource,
- sizeof(common_serial_bus));
-
/* Validate the bus_type field */
- if ((common_serial_bus.type == 0) ||
- (common_serial_bus.type > AML_RESOURCE_MAX_SERIALBUSTYPE)) {
+ if ((aml_resource->common_serial_bus.type == 0) ||
+ (aml_resource->common_serial_bus.type >
+ AML_RESOURCE_MAX_SERIALBUSTYPE)) {
if (walk_state) {
ACPI_ERROR((AE_INFO,
"Invalid/unsupported SerialBus resource descriptor: BusType 0x%2.2X",
- common_serial_bus.type));
+ aml_resource->common_serial_bus.
+ type));
}
return (AE_AML_INVALID_RESOURCE_TYPE);
}
diff --git a/drivers/acpi/acpica/uttrack.c b/drivers/acpi/acpica/uttrack.c
index f5f5da441458..a99c4c9e3d39 100644
--- a/drivers/acpi/acpica/uttrack.c
+++ b/drivers/acpi/acpica/uttrack.c
@@ -3,7 +3,7 @@
*
* Module Name: uttrack - Memory allocation tracking routines (debug only)
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utuuid.c b/drivers/acpi/acpica/utuuid.c
index 8f10b413e928..0682554934ca 100644
--- a/drivers/acpi/acpica/utuuid.c
+++ b/drivers/acpi/acpica/utuuid.c
@@ -3,7 +3,7 @@
*
* Module Name: utuuid -- UUID support functions
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
index aa2e923462b7..56942b5f026b 100644
--- a/drivers/acpi/acpica/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -3,7 +3,7 @@
*
* Module Name: utxface - External interfaces, miscellaneous utility functions
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utxfinit.c b/drivers/acpi/acpica/utxfinit.c
index 70ae0afa7939..c1702f8fba67 100644
--- a/drivers/acpi/acpica/utxfinit.c
+++ b/drivers/acpi/acpica/utxfinit.c
@@ -3,7 +3,7 @@
*
* Module Name: utxfinit - External interfaces for ACPICA initialization
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig
index 3cfe7e7475f2..070c07d68dfb 100644
--- a/drivers/acpi/apei/Kconfig
+++ b/drivers/acpi/apei/Kconfig
@@ -23,6 +23,7 @@ config ACPI_APEI_GHES
select ACPI_HED
select IRQ_WORK
select GENERIC_ALLOCATOR
+ select ARM_SDE_INTERFACE if ARM64
help
Generic Hardware Error Source provides a way to report
platform hardware errors (such as that from chipset). It
diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
index cd2766c69d78..77c10a7a7a9f 100644
--- a/drivers/acpi/apei/apei-internal.h
+++ b/drivers/acpi/apei/apei-internal.h
@@ -131,7 +131,7 @@ static inline u32 cper_estatus_len(struct acpi_hest_generic_status *estatus)
int apei_osc_setup(void);
-int einj_get_available_error_type(u32 *type);
+int einj_get_available_error_type(u32 *type, int einj_action);
int einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2, u64 param3,
u64 param4);
int einj_cxl_rch_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
diff --git a/drivers/acpi/apei/einj-core.c b/drivers/acpi/apei/einj-core.c
index 5c22720f43cc..3c87953dbd19 100644
--- a/drivers/acpi/apei/einj-core.c
+++ b/drivers/acpi/apei/einj-core.c
@@ -21,7 +21,7 @@
#include <linux/nmi.h>
#include <linux/delay.h>
#include <linux/mm.h>
-#include <linux/platform_device.h>
+#include <linux/device/faux.h>
#include <linux/unaligned.h>
#include "apei-internal.h"
@@ -33,6 +33,8 @@
#define SLEEP_UNIT_MAX 5000 /* 5ms */
/* Firmware should respond within 1 seconds */
#define FIRMWARE_TIMEOUT (1 * USEC_PER_SEC)
+#define COMPONENT_LEN 16
+#define ACPI65_EINJV2_SUPP BIT(30)
#define ACPI5_VENDOR_BIT BIT(31)
#define MEM_ERROR_MASK (ACPI_EINJ_MEMORY_CORRECTABLE | \
ACPI_EINJ_MEMORY_UNCORRECTABLE | \
@@ -49,6 +51,28 @@
*/
static int acpi5;
+struct syndrome_array {
+ union {
+ u8 acpi_id[COMPONENT_LEN];
+ u8 device_id[COMPONENT_LEN];
+ u8 pcie_sbdf[COMPONENT_LEN];
+ u8 vendor_id[COMPONENT_LEN];
+ } comp_id;
+ union {
+ u8 proc_synd[COMPONENT_LEN];
+ u8 mem_synd[COMPONENT_LEN];
+ u8 pcie_synd[COMPONENT_LEN];
+ u8 vendor_synd[COMPONENT_LEN];
+ } comp_synd;
+};
+
+struct einjv2_extension_struct {
+ u32 length;
+ u16 revision;
+ u16 component_arr_count;
+ struct syndrome_array component_arr[] __counted_by(component_arr_count);
+};
+
struct set_error_type_with_address {
u32 type;
u32 vendor_extension;
@@ -57,11 +81,13 @@ struct set_error_type_with_address {
u64 memory_address;
u64 memory_address_range;
u32 pcie_sbdf;
+ struct einjv2_extension_struct einjv2_struct;
};
enum {
SETWA_FLAGS_APICID = 1,
SETWA_FLAGS_MEM = 2,
SETWA_FLAGS_PCIE_SBDF = 4,
+ SETWA_FLAGS_EINJV2 = 8,
};
/*
@@ -83,6 +109,11 @@ static struct debugfs_blob_wrapper vendor_blob;
static struct debugfs_blob_wrapper vendor_errors;
static char vendor_dev[64];
+static u32 max_nr_components;
+static u32 available_error_type;
+static u32 available_error_type_v2;
+static struct syndrome_array *syndrome_data;
+
/*
* Some BIOSes allow parameters to the SET_ERROR_TYPE entries in the
* EINJ table through an unpublished extension. Use with caution as
@@ -149,7 +180,9 @@ static DEFINE_MUTEX(einj_mutex);
*/
bool einj_initialized __ro_after_init;
-static void *einj_param;
+static void __iomem *einj_param;
+static u32 v5param_size;
+static bool is_v2;
static void einj_exec_ctx_init(struct apei_exec_context *ctx)
{
@@ -157,13 +190,13 @@ static void einj_exec_ctx_init(struct apei_exec_context *ctx)
EINJ_TAB_ENTRY(einj_tab), einj_tab->entries);
}
-static int __einj_get_available_error_type(u32 *type)
+static int __einj_get_available_error_type(u32 *type, int einj_action)
{
struct apei_exec_context ctx;
int rc;
einj_exec_ctx_init(&ctx);
- rc = apei_exec_run(&ctx, ACPI_EINJ_GET_ERROR_TYPE);
+ rc = apei_exec_run(&ctx, einj_action);
if (rc)
return rc;
*type = apei_exec_ctx_get_output(&ctx);
@@ -172,17 +205,34 @@ static int __einj_get_available_error_type(u32 *type)
}
/* Get error injection capabilities of the platform */
-int einj_get_available_error_type(u32 *type)
+int einj_get_available_error_type(u32 *type, int einj_action)
{
int rc;
mutex_lock(&einj_mutex);
- rc = __einj_get_available_error_type(type);
+ rc = __einj_get_available_error_type(type, einj_action);
mutex_unlock(&einj_mutex);
return rc;
}
+static int einj_get_available_error_types(u32 *type1, u32 *type2)
+{
+ int rc;
+
+ rc = einj_get_available_error_type(type1, ACPI_EINJ_GET_ERROR_TYPE);
+ if (rc)
+ return rc;
+ if (*type1 & ACPI65_EINJV2_SUPP) {
+ rc = einj_get_available_error_type(type2,
+ ACPI_EINJV2_GET_ERROR_TYPE);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
static int einj_timedout(u64 *t)
{
if ((s64)*t < SLEEP_UNIT_MIN) {
@@ -214,24 +264,26 @@ static void check_vendor_extension(u64 paddr,
struct set_error_type_with_address *v5param)
{
int offset = v5param->vendor_extension;
- struct vendor_error_type_extension *v;
+ struct vendor_error_type_extension v;
+ struct vendor_error_type_extension __iomem *p;
u32 sbdf;
if (!offset)
return;
- v = acpi_os_map_iomem(paddr + offset, sizeof(*v));
- if (!v)
+ p = acpi_os_map_iomem(paddr + offset, sizeof(*p));
+ if (!p)
return;
- get_oem_vendor_struct(paddr, offset, v);
- sbdf = v->pcie_sbdf;
+ memcpy_fromio(&v, p, sizeof(v));
+ get_oem_vendor_struct(paddr, offset, &v);
+ sbdf = v.pcie_sbdf;
sprintf(vendor_dev, "%x:%x:%x.%x vendor_id=%x device_id=%x rev_id=%x\n",
sbdf >> 24, (sbdf >> 16) & 0xff,
(sbdf >> 11) & 0x1f, (sbdf >> 8) & 0x7,
- v->vendor_id, v->device_id, v->rev_id);
- acpi_os_unmap_iomem(v, sizeof(*v));
+ v.vendor_id, v.device_id, v.rev_id);
+ acpi_os_unmap_iomem(p, sizeof(v));
}
-static void *einj_get_parameter_address(void)
+static void __iomem *einj_get_parameter_address(void)
{
int i;
u64 pa_v4 = 0, pa_v5 = 0;
@@ -252,26 +304,50 @@ static void *einj_get_parameter_address(void)
entry++;
}
if (pa_v5) {
- struct set_error_type_with_address *v5param;
+ struct set_error_type_with_address v5param;
+ struct set_error_type_with_address __iomem *p;
- v5param = acpi_os_map_iomem(pa_v5, sizeof(*v5param));
- if (v5param) {
+ v5param_size = sizeof(v5param);
+ p = acpi_os_map_iomem(pa_v5, sizeof(*p));
+ if (p) {
+ int offset, len;
+
+ memcpy_fromio(&v5param, p, v5param_size);
acpi5 = 1;
- check_vendor_extension(pa_v5, v5param);
- return v5param;
+ check_vendor_extension(pa_v5, &v5param);
+ if (is_v2 && available_error_type & ACPI65_EINJV2_SUPP) {
+ len = v5param.einjv2_struct.length;
+ offset = offsetof(struct einjv2_extension_struct, component_arr);
+ max_nr_components = (len - offset) /
+ sizeof(v5param.einjv2_struct.component_arr[0]);
+ /*
+ * The first call to acpi_os_map_iomem above does not include the
+ * component array, instead it is used to read and calculate maximum
+ * number of components supported by the system. Below, the mapping
+ * is expanded to include the component array.
+ */
+ acpi_os_unmap_iomem(p, v5param_size);
+ offset = offsetof(struct set_error_type_with_address, einjv2_struct);
+ v5param_size = offset + struct_size(&v5param.einjv2_struct,
+ component_arr, max_nr_components);
+ p = acpi_os_map_iomem(pa_v5, v5param_size);
+ }
+ return p;
}
}
if (param_extension && pa_v4) {
- struct einj_parameter *v4param;
+ struct einj_parameter v4param;
+ struct einj_parameter __iomem *p;
- v4param = acpi_os_map_iomem(pa_v4, sizeof(*v4param));
- if (!v4param)
+ p = acpi_os_map_iomem(pa_v4, sizeof(*p));
+ if (!p)
return NULL;
- if (v4param->reserved1 || v4param->reserved2) {
- acpi_os_unmap_iomem(v4param, sizeof(*v4param));
+ memcpy_fromio(&v4param, p, sizeof(v4param));
+ if (v4param.reserved1 || v4param.reserved2) {
+ acpi_os_unmap_iomem(p, sizeof(v4param));
return NULL;
}
- return v4param;
+ return p;
}
return NULL;
@@ -317,7 +393,8 @@ static struct acpi_generic_address *einj_get_trigger_parameter_region(
static int __einj_error_trigger(u64 trigger_paddr, u32 type,
u64 param1, u64 param2)
{
- struct acpi_einj_trigger *trigger_tab = NULL;
+ struct acpi_einj_trigger trigger_tab;
+ struct acpi_einj_trigger *full_trigger_tab;
struct apei_exec_context trigger_ctx;
struct apei_resources trigger_resources;
struct acpi_whea_header *trigger_entry;
@@ -325,54 +402,60 @@ static int __einj_error_trigger(u64 trigger_paddr, u32 type,
u32 table_size;
int rc = -EIO;
struct acpi_generic_address *trigger_param_region = NULL;
+ struct acpi_einj_trigger __iomem *p = NULL;
- r = request_mem_region(trigger_paddr, sizeof(*trigger_tab),
+ r = request_mem_region(trigger_paddr, sizeof(trigger_tab),
"APEI EINJ Trigger Table");
if (!r) {
pr_err("Can not request [mem %#010llx-%#010llx] for Trigger table\n",
(unsigned long long)trigger_paddr,
(unsigned long long)trigger_paddr +
- sizeof(*trigger_tab) - 1);
+ sizeof(trigger_tab) - 1);
goto out;
}
- trigger_tab = ioremap_cache(trigger_paddr, sizeof(*trigger_tab));
- if (!trigger_tab) {
+ p = ioremap_cache(trigger_paddr, sizeof(*p));
+ if (!p) {
pr_err("Failed to map trigger table!\n");
goto out_rel_header;
}
- rc = einj_check_trigger_header(trigger_tab);
+ memcpy_fromio(&trigger_tab, p, sizeof(trigger_tab));
+ rc = einj_check_trigger_header(&trigger_tab);
if (rc) {
pr_warn(FW_BUG "Invalid trigger error action table.\n");
goto out_rel_header;
}
/* No action structures in the TRIGGER_ERROR table, nothing to do */
- if (!trigger_tab->entry_count)
+ if (!trigger_tab.entry_count)
goto out_rel_header;
rc = -EIO;
- table_size = trigger_tab->table_size;
- r = request_mem_region(trigger_paddr + sizeof(*trigger_tab),
- table_size - sizeof(*trigger_tab),
+ table_size = trigger_tab.table_size;
+ full_trigger_tab = kmalloc(table_size, GFP_KERNEL);
+ if (!full_trigger_tab)
+ goto out_rel_header;
+ r = request_mem_region(trigger_paddr + sizeof(trigger_tab),
+ table_size - sizeof(trigger_tab),
"APEI EINJ Trigger Table");
if (!r) {
pr_err("Can not request [mem %#010llx-%#010llx] for Trigger Table Entry\n",
- (unsigned long long)trigger_paddr + sizeof(*trigger_tab),
+ (unsigned long long)trigger_paddr + sizeof(trigger_tab),
(unsigned long long)trigger_paddr + table_size - 1);
- goto out_rel_header;
+ goto out_free_trigger_tab;
}
- iounmap(trigger_tab);
- trigger_tab = ioremap_cache(trigger_paddr, table_size);
- if (!trigger_tab) {
+ iounmap(p);
+ p = ioremap_cache(trigger_paddr, table_size);
+ if (!p) {
pr_err("Failed to map trigger table!\n");
goto out_rel_entry;
}
+ memcpy_fromio(full_trigger_tab, p, table_size);
trigger_entry = (struct acpi_whea_header *)
- ((char *)trigger_tab + sizeof(struct acpi_einj_trigger));
+ ((char *)full_trigger_tab + sizeof(struct acpi_einj_trigger));
apei_resources_init(&trigger_resources);
apei_exec_ctx_init(&trigger_ctx, einj_ins_type,
ARRAY_SIZE(einj_ins_type),
- trigger_entry, trigger_tab->entry_count);
+ trigger_entry, trigger_tab.entry_count);
rc = apei_exec_collect_resources(&trigger_ctx, &trigger_resources);
if (rc)
goto out_fini;
@@ -390,7 +473,7 @@ static int __einj_error_trigger(u64 trigger_paddr, u32 type,
apei_resources_init(&addr_resources);
trigger_param_region = einj_get_trigger_parameter_region(
- trigger_tab, param1, param2);
+ full_trigger_tab, param1, param2);
if (trigger_param_region) {
rc = apei_resources_add(&addr_resources,
trigger_param_region->address,
@@ -419,23 +502,33 @@ out_release:
out_fini:
apei_resources_fini(&trigger_resources);
out_rel_entry:
- release_mem_region(trigger_paddr + sizeof(*trigger_tab),
- table_size - sizeof(*trigger_tab));
+ release_mem_region(trigger_paddr + sizeof(trigger_tab),
+ table_size - sizeof(trigger_tab));
+out_free_trigger_tab:
+ kfree(full_trigger_tab);
out_rel_header:
- release_mem_region(trigger_paddr, sizeof(*trigger_tab));
+ release_mem_region(trigger_paddr, sizeof(trigger_tab));
out:
- if (trigger_tab)
- iounmap(trigger_tab);
+ if (p)
+ iounmap(p);
return rc;
}
+static bool is_end_of_list(u8 *val)
+{
+ for (int i = 0; i < COMPONENT_LEN; ++i) {
+ if (val[i] != 0xFF)
+ return false;
+ }
+ return true;
+}
static int __einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
u64 param3, u64 param4)
{
struct apei_exec_context ctx;
u64 val, trigger_paddr, timeout = FIRMWARE_TIMEOUT;
- int rc;
+ int i, rc;
einj_exec_ctx_init(&ctx);
@@ -444,8 +537,13 @@ static int __einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
return rc;
apei_exec_ctx_set_input(&ctx, type);
if (acpi5) {
- struct set_error_type_with_address *v5param = einj_param;
+ struct set_error_type_with_address *v5param;
+ v5param = kmalloc(v5param_size, GFP_KERNEL);
+ if (!v5param)
+ return -ENOMEM;
+
+ memcpy_fromio(v5param, einj_param, v5param_size);
v5param->type = type;
if (type & ACPI5_VENDOR_BIT) {
switch (vendor_flags) {
@@ -465,8 +563,21 @@ static int __einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
v5param->flags = flags;
v5param->memory_address = param1;
v5param->memory_address_range = param2;
- v5param->apicid = param3;
- v5param->pcie_sbdf = param4;
+
+ if (is_v2) {
+ for (i = 0; i < max_nr_components; i++) {
+ if (is_end_of_list(syndrome_data[i].comp_id.acpi_id))
+ break;
+ v5param->einjv2_struct.component_arr[i].comp_id =
+ syndrome_data[i].comp_id;
+ v5param->einjv2_struct.component_arr[i].comp_synd =
+ syndrome_data[i].comp_synd;
+ }
+ v5param->einjv2_struct.component_arr_count = i;
+ } else {
+ v5param->apicid = param3;
+ v5param->pcie_sbdf = param4;
+ }
} else {
switch (type) {
case ACPI_EINJ_PROCESSOR_CORRECTABLE:
@@ -490,15 +601,19 @@ static int __einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
break;
}
}
+ memcpy_toio(einj_param, v5param, v5param_size);
+ kfree(v5param);
} else {
rc = apei_exec_run(&ctx, ACPI_EINJ_SET_ERROR_TYPE);
if (rc)
return rc;
if (einj_param) {
- struct einj_parameter *v4param = einj_param;
+ struct einj_parameter v4param;
- v4param->param1 = param1;
- v4param->param2 = param2;
+ memcpy_fromio(&v4param, einj_param, sizeof(v4param));
+ v4param.param1 = param1;
+ v4param.param2 = param2;
+ memcpy_toio(einj_param, &v4param, sizeof(v4param));
}
}
rc = apei_exec_run(&ctx, ACPI_EINJ_EXECUTE_OPERATION);
@@ -541,6 +656,43 @@ static int __einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
return rc;
}
+/* Allow almost all types of address except MMIO. */
+static bool is_allowed_range(u64 base_addr, u64 size)
+{
+ int i;
+ /*
+ * MMIO region is usually claimed with IORESOURCE_MEM + IORES_DESC_NONE.
+ * However, IORES_DESC_NONE is treated like a wildcard when we check if
+ * region intersects with known resource. So do an allow list check for
+ * IORES_DESCs that definitely or most likely not MMIO.
+ */
+ int non_mmio_desc[] = {
+ IORES_DESC_CRASH_KERNEL,
+ IORES_DESC_ACPI_TABLES,
+ IORES_DESC_ACPI_NV_STORAGE,
+ IORES_DESC_PERSISTENT_MEMORY,
+ IORES_DESC_PERSISTENT_MEMORY_LEGACY,
+ /* Treat IORES_DESC_DEVICE_PRIVATE_MEMORY as MMIO. */
+ IORES_DESC_RESERVED,
+ IORES_DESC_SOFT_RESERVED,
+ };
+
+ if (region_intersects(base_addr, size, IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE)
+ == REGION_INTERSECTS)
+ return true;
+
+ for (i = 0; i < ARRAY_SIZE(non_mmio_desc); ++i) {
+ if (region_intersects(base_addr, size, IORESOURCE_MEM, non_mmio_desc[i])
+ == REGION_INTERSECTS)
+ return true;
+ }
+
+ if (arch_is_platform_page(base_addr))
+ return true;
+
+ return false;
+}
+
/* Inject the specified hardware error */
int einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2, u64 param3,
u64 param4)
@@ -549,10 +701,15 @@ int einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2, u64 param3,
u64 base_addr, size;
/* If user manually set "flags", make sure it is legal */
- if (flags && (flags &
- ~(SETWA_FLAGS_APICID|SETWA_FLAGS_MEM|SETWA_FLAGS_PCIE_SBDF)))
+ if (flags && (flags & ~(SETWA_FLAGS_APICID | SETWA_FLAGS_MEM |
+ SETWA_FLAGS_PCIE_SBDF | SETWA_FLAGS_EINJV2)))
return -EINVAL;
+ /* check if type is a valid EINJv2 error type */
+ if (is_v2) {
+ if (!(type & available_error_type_v2))
+ return -EINVAL;
+ }
/*
* We need extra sanity checks for memory errors.
* Other types leap directly to injection.
@@ -582,19 +739,15 @@ int einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2, u64 param3,
* Disallow crazy address masks that give BIOS leeway to pick
* injection address almost anywhere. Insist on page or
* better granularity and that target address is normal RAM or
- * NVDIMM.
+ * as long as is not MMIO.
*/
base_addr = param1 & param2;
size = ~param2 + 1;
- if (((param2 & PAGE_MASK) != PAGE_MASK) ||
- ((region_intersects(base_addr, size, IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE)
- != REGION_INTERSECTS) &&
- (region_intersects(base_addr, size, IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY)
- != REGION_INTERSECTS) &&
- (region_intersects(base_addr, size, IORESOURCE_MEM, IORES_DESC_SOFT_RESERVED)
- != REGION_INTERSECTS) &&
- !arch_is_platform_page(base_addr)))
+ if ((param2 & PAGE_MASK) != PAGE_MASK)
+ return -EINVAL;
+
+ if (!is_allowed_range(base_addr, size))
return -EINVAL;
if (is_zero_pfn(base_addr >> PAGE_SHIFT))
@@ -630,6 +783,8 @@ static u64 error_param2;
static u64 error_param3;
static u64 error_param4;
static struct dentry *einj_debug_dir;
+static char einj_buf[32];
+static bool einj_v2_enabled;
static struct { u32 mask; const char *str; } const einj_error_type_string[] = {
{ BIT(0), "Processor Correctable" },
{ BIT(1), "Processor Uncorrectable non-fatal" },
@@ -646,29 +801,35 @@ static struct { u32 mask; const char *str; } const einj_error_type_string[] = {
{ BIT(31), "Vendor Defined Error Types" },
};
+static struct { u32 mask; const char *str; } const einjv2_error_type_string[] = {
+ { BIT(0), "EINJV2 Processor Error" },
+ { BIT(1), "EINJV2 Memory Error" },
+ { BIT(2), "EINJV2 PCI Express Error" },
+};
+
static int available_error_type_show(struct seq_file *m, void *v)
{
- int rc;
- u32 error_type = 0;
- rc = einj_get_available_error_type(&error_type);
- if (rc)
- return rc;
for (int pos = 0; pos < ARRAY_SIZE(einj_error_type_string); pos++)
- if (error_type & einj_error_type_string[pos].mask)
+ if (available_error_type & einj_error_type_string[pos].mask)
seq_printf(m, "0x%08x\t%s\n", einj_error_type_string[pos].mask,
einj_error_type_string[pos].str);
-
+ if ((available_error_type & ACPI65_EINJV2_SUPP) && einj_v2_enabled) {
+ for (int pos = 0; pos < ARRAY_SIZE(einjv2_error_type_string); pos++) {
+ if (available_error_type_v2 & einjv2_error_type_string[pos].mask)
+ seq_printf(m, "V2_0x%08x\t%s\n", einjv2_error_type_string[pos].mask,
+ einjv2_error_type_string[pos].str);
+ }
+ }
return 0;
}
DEFINE_SHOW_ATTRIBUTE(available_error_type);
-static int error_type_get(void *data, u64 *val)
+static ssize_t error_type_get(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
{
- *val = error_type;
-
- return 0;
+ return simple_read_from_buffer(buf, count, ppos, einj_buf, strlen(einj_buf));
}
bool einj_is_cxl_error_type(u64 type)
@@ -678,8 +839,7 @@ bool einj_is_cxl_error_type(u64 type)
int einj_validate_error_type(u64 type)
{
- u32 tval, vendor, available_error_type = 0;
- int rc;
+ u32 tval, vendor;
/* Only low 32 bits for error type are valid */
if (type & GENMASK_ULL(63, 32))
@@ -695,20 +855,36 @@ int einj_validate_error_type(u64 type)
/* Only one error type can be specified */
if (tval & (tval - 1))
return -EINVAL;
- if (!vendor) {
- rc = einj_get_available_error_type(&available_error_type);
- if (rc)
- return rc;
- if (!(type & available_error_type))
+ if (!vendor)
+ if (!(type & (available_error_type | available_error_type_v2)))
return -EINVAL;
- }
return 0;
}
-static int error_type_set(void *data, u64 val)
+static ssize_t error_type_set(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
{
int rc;
+ u64 val;
+
+ /* Leave the last character for the NUL terminator */
+ if (count > sizeof(einj_buf) - 1)
+ return -EINVAL;
+
+ memset(einj_buf, 0, sizeof(einj_buf));
+ if (copy_from_user(einj_buf, buf, count))
+ return -EFAULT;
+
+ if (strncmp(einj_buf, "V2_", 3) == 0) {
+ if (!sscanf(einj_buf, "V2_%llx", &val))
+ return -EINVAL;
+ is_v2 = true;
+ } else {
+ if (!sscanf(einj_buf, "%llx", &val))
+ return -EINVAL;
+ is_v2 = false;
+ }
rc = einj_validate_error_type(val);
if (rc)
@@ -716,17 +892,24 @@ static int error_type_set(void *data, u64 val)
error_type = val;
- return 0;
+ return count;
}
-DEFINE_DEBUGFS_ATTRIBUTE(error_type_fops, error_type_get, error_type_set,
- "0x%llx\n");
+static const struct file_operations error_type_fops = {
+ .read = error_type_get,
+ .write = error_type_set,
+};
static int error_inject_set(void *data, u64 val)
{
if (!error_type)
return -EINVAL;
+ if (is_v2)
+ error_flags |= SETWA_FLAGS_EINJV2;
+ else
+ error_flags &= ~SETWA_FLAGS_EINJV2;
+
return einj_error_inject(error_type, error_flags, error_param1, error_param2,
error_param3, error_param4);
}
@@ -749,17 +932,104 @@ static int einj_check_table(struct acpi_table_einj *einj_tab)
return 0;
}
-static int __init einj_probe(struct platform_device *pdev)
+static ssize_t u128_read(struct file *f, char __user *buf, size_t count, loff_t *off)
+{
+ char output[2 * COMPONENT_LEN + 1];
+ u8 *data = f->f_inode->i_private;
+ int i;
+
+ if (*off >= sizeof(output))
+ return 0;
+
+ for (i = 0; i < COMPONENT_LEN; i++)
+ sprintf(output + 2 * i, "%.02x", data[COMPONENT_LEN - i - 1]);
+ output[2 * COMPONENT_LEN] = '\n';
+
+ return simple_read_from_buffer(buf, count, off, output, sizeof(output));
+}
+
+static ssize_t u128_write(struct file *f, const char __user *buf, size_t count, loff_t *off)
+{
+ char input[2 + 2 * COMPONENT_LEN + 2];
+ u8 *save = f->f_inode->i_private;
+ u8 tmp[COMPONENT_LEN];
+ char byte[3] = {};
+ char *s, *e;
+ ssize_t c;
+ long val;
+ int i;
+
+ /* Require that user supply whole input line in one write(2) syscall */
+ if (*off)
+ return -EINVAL;
+
+ c = simple_write_to_buffer(input, sizeof(input), off, buf, count);
+ if (c < 0)
+ return c;
+
+ if (c < 1 || input[c - 1] != '\n')
+ return -EINVAL;
+
+ /* Empty line means invalidate this entry */
+ if (c == 1) {
+ memset(save, 0xff, COMPONENT_LEN);
+ return c;
+ }
+
+ if (input[0] == '0' && (input[1] == 'x' || input[1] == 'X'))
+ s = input + 2;
+ else
+ s = input;
+ e = input + c - 1;
+
+ for (i = 0; i < COMPONENT_LEN; i++) {
+ byte[1] = *--e;
+ byte[0] = e > s ? *--e : '0';
+ if (kstrtol(byte, 16, &val))
+ return -EINVAL;
+ tmp[i] = val;
+ if (e <= s)
+ break;
+ }
+ while (++i < COMPONENT_LEN)
+ tmp[i] = 0;
+
+ memcpy(save, tmp, COMPONENT_LEN);
+
+ return c;
+}
+
+static const struct file_operations u128_fops = {
+ .read = u128_read,
+ .write = u128_write,
+};
+
+static bool setup_einjv2_component_files(void)
+{
+ char name[32];
+
+ syndrome_data = kcalloc(max_nr_components, sizeof(syndrome_data[0]), GFP_KERNEL);
+ if (!syndrome_data)
+ return false;
+
+ for (int i = 0; i < max_nr_components; i++) {
+ sprintf(name, "component_id%d", i);
+ debugfs_create_file(name, 0600, einj_debug_dir,
+ &syndrome_data[i].comp_id, &u128_fops);
+ sprintf(name, "component_syndrome%d", i);
+ debugfs_create_file(name, 0600, einj_debug_dir,
+ &syndrome_data[i].comp_synd, &u128_fops);
+ }
+
+ return true;
+}
+
+static int __init einj_probe(struct faux_device *fdev)
{
int rc;
acpi_status status;
struct apei_exec_context ctx;
- if (acpi_disabled) {
- pr_debug("ACPI disabled.\n");
- return -ENODEV;
- }
-
status = acpi_get_table(ACPI_SIG_EINJ, 0,
(struct acpi_table_header **)&einj_tab);
if (status == AE_NOT_FOUND) {
@@ -777,6 +1047,10 @@ static int __init einj_probe(struct platform_device *pdev)
goto err_put_table;
}
+ rc = einj_get_available_error_types(&available_error_type, &available_error_type_v2);
+ if (rc)
+ goto err_put_table;
+
rc = -ENOMEM;
einj_debug_dir = debugfs_create_dir("einj", apei_get_debugfs_dir());
@@ -821,6 +1095,8 @@ static int __init einj_probe(struct platform_device *pdev)
&error_param4);
debugfs_create_x32("notrigger", S_IRUSR | S_IWUSR,
einj_debug_dir, &notrigger);
+ if (available_error_type & ACPI65_EINJV2_SUPP)
+ einj_v2_enabled = setup_einjv2_component_files();
}
if (vendor_dev[0]) {
@@ -851,13 +1127,13 @@ err_put_table:
return rc;
}
-static void __exit einj_remove(struct platform_device *pdev)
+static void einj_remove(struct faux_device *fdev)
{
struct apei_exec_context ctx;
if (einj_param) {
acpi_size size = (acpi5) ?
- sizeof(struct set_error_type_with_address) :
+ v5param_size :
sizeof(struct einj_parameter);
acpi_os_unmap_iomem(einj_param, size);
@@ -869,47 +1145,34 @@ static void __exit einj_remove(struct platform_device *pdev)
apei_resources_release(&einj_resources);
apei_resources_fini(&einj_resources);
debugfs_remove_recursive(einj_debug_dir);
+ kfree(syndrome_data);
acpi_put_table((struct acpi_table_header *)einj_tab);
}
-static struct platform_device *einj_dev;
-/*
- * einj_remove() lives in .exit.text. For drivers registered via
- * platform_driver_probe() this is ok because they cannot get unbound at
- * runtime. So mark the driver struct with __refdata to prevent modpost
- * triggering a section mismatch warning.
- */
-static struct platform_driver einj_driver __refdata = {
- .remove_new = __exit_p(einj_remove),
- .driver = {
- .name = "acpi-einj",
- },
+static struct faux_device *einj_dev;
+static struct faux_device_ops einj_device_ops = {
+ .probe = einj_probe,
+ .remove = einj_remove,
};
static int __init einj_init(void)
{
- struct platform_device_info einj_dev_info = {
- .name = "acpi-einj",
- .id = -1,
- };
- int rc;
+ if (acpi_disabled) {
+ pr_debug("ACPI disabled.\n");
+ return -ENODEV;
+ }
- einj_dev = platform_device_register_full(&einj_dev_info);
- if (IS_ERR(einj_dev))
- return PTR_ERR(einj_dev);
+ einj_dev = faux_device_create("acpi-einj", NULL, &einj_device_ops);
- rc = platform_driver_probe(&einj_driver, einj_probe);
- einj_initialized = rc == 0;
+ if (einj_dev)
+ einj_initialized = true;
return 0;
}
static void __exit einj_exit(void)
{
- if (einj_initialized)
- platform_driver_unregister(&einj_driver);
-
- platform_device_unregister(einj_dev);
+ faux_device_destroy(einj_dev);
}
module_init(einj_init);
diff --git a/drivers/acpi/apei/einj-cxl.c b/drivers/acpi/apei/einj-cxl.c
index a4e709937236..e70a416ec925 100644
--- a/drivers/acpi/apei/einj-cxl.c
+++ b/drivers/acpi/apei/einj-cxl.c
@@ -30,7 +30,7 @@ int einj_cxl_available_error_type_show(struct seq_file *m, void *v)
int cxl_err, rc;
u32 available_error_type = 0;
- rc = einj_get_available_error_type(&available_error_type);
+ rc = einj_get_available_error_type(&available_error_type, ACPI_EINJ_GET_ERROR_TYPE);
if (rc)
return rc;
@@ -45,7 +45,7 @@ int einj_cxl_available_error_type_show(struct seq_file *m, void *v)
return 0;
}
-EXPORT_SYMBOL_NS_GPL(einj_cxl_available_error_type_show, CXL);
+EXPORT_SYMBOL_NS_GPL(einj_cxl_available_error_type_show, "CXL");
static int cxl_dport_get_sbdf(struct pci_dev *dport_dev, u64 *sbdf)
{
@@ -83,7 +83,7 @@ int einj_cxl_inject_rch_error(u64 rcrb, u64 type)
return einj_cxl_rch_error_inject(type, 0x2, rcrb, GENMASK_ULL(63, 0),
0, 0);
}
-EXPORT_SYMBOL_NS_GPL(einj_cxl_inject_rch_error, CXL);
+EXPORT_SYMBOL_NS_GPL(einj_cxl_inject_rch_error, "CXL");
int einj_cxl_inject_error(struct pci_dev *dport, u64 type)
{
@@ -104,10 +104,10 @@ int einj_cxl_inject_error(struct pci_dev *dport, u64 type)
return einj_error_inject(type, 0x4, 0, 0, 0, param4);
}
-EXPORT_SYMBOL_NS_GPL(einj_cxl_inject_error, CXL);
+EXPORT_SYMBOL_NS_GPL(einj_cxl_inject_error, "CXL");
bool einj_cxl_is_initialized(void)
{
return einj_initialized;
}
-EXPORT_SYMBOL_NS_GPL(einj_cxl_is_initialized, CXL);
+EXPORT_SYMBOL_NS_GPL(einj_cxl_is_initialized, "CXL");
diff --git a/drivers/acpi/apei/erst-dbg.c b/drivers/acpi/apei/erst-dbg.c
index 246076341e8c..ff0e8bf8e97a 100644
--- a/drivers/acpi/apei/erst-dbg.c
+++ b/drivers/acpi/apei/erst-dbg.c
@@ -60,9 +60,8 @@ static long erst_dbg_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
switch (cmd) {
case APEI_ERST_CLEAR_RECORD:
- rc = copy_from_user(&record_id, (void __user *)arg,
- sizeof(record_id));
- if (rc)
+ if (copy_from_user(&record_id, (void __user *)arg,
+ sizeof(record_id)))
return -EFAULT;
return erst_clear(record_id);
case APEI_ERST_GET_RECORD_COUNT:
@@ -175,8 +174,7 @@ static ssize_t erst_dbg_write(struct file *filp, const char __user *ubuf,
erst_dbg_buf = p;
erst_dbg_buf_len = usize;
}
- rc = copy_from_user(erst_dbg_buf, ubuf, usize);
- if (rc) {
+ if (copy_from_user(erst_dbg_buf, ubuf, usize)) {
rc = -EFAULT;
goto out;
}
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index ada93cfde9ba..97ee19f2cae0 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -173,8 +173,6 @@ static struct gen_pool *ghes_estatus_pool;
static struct ghes_estatus_cache __rcu *ghes_estatus_caches[GHES_ESTATUS_CACHES_SIZE];
static atomic_t ghes_estatus_cache_alloced;
-static int ghes_panic_timeout __read_mostly = 30;
-
static void __iomem *ghes_map(u64 pfn, enum fixed_addresses fixmap_idx)
{
phys_addr_t paddr;
@@ -466,28 +464,41 @@ static void ghes_clear_estatus(struct ghes *ghes,
ghes_ack_error(ghes->generic_v2);
}
-/*
- * Called as task_work before returning to user-space.
- * Ensure any queued work has been done before we return to the context that
- * triggered the notification.
+/**
+ * struct ghes_task_work - for synchronous RAS event
+ *
+ * @twork: callback_head for task work
+ * @pfn: page frame number of corrupted page
+ * @flags: work control flags
+ *
+ * Structure to pass task work to be handled before
+ * returning to user-space via task_work_add().
*/
-static void ghes_kick_task_work(struct callback_head *head)
+struct ghes_task_work {
+ struct callback_head twork;
+ u64 pfn;
+ int flags;
+};
+
+static void memory_failure_cb(struct callback_head *twork)
{
- struct acpi_hest_generic_status *estatus;
- struct ghes_estatus_node *estatus_node;
- u32 node_len;
+ struct ghes_task_work *twcb = container_of(twork, struct ghes_task_work, twork);
+ int ret;
- estatus_node = container_of(head, struct ghes_estatus_node, task_work);
- if (IS_ENABLED(CONFIG_ACPI_APEI_MEMORY_FAILURE))
- memory_failure_queue_kick(estatus_node->task_work_cpu);
+ ret = memory_failure(twcb->pfn, twcb->flags);
+ gen_pool_free(ghes_estatus_pool, (unsigned long)twcb, sizeof(*twcb));
- estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
- node_len = GHES_ESTATUS_NODE_LEN(cper_estatus_len(estatus));
- gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node, node_len);
+ if (!ret || ret == -EHWPOISON || ret == -EOPNOTSUPP)
+ return;
+
+ pr_err("%#llx: Sending SIGBUS to %s:%d due to hardware memory corruption\n",
+ twcb->pfn, current->comm, task_pid_nr(current));
+ force_sig(SIGBUS);
}
static bool ghes_do_memory_failure(u64 physical_addr, int flags)
{
+ struct ghes_task_work *twcb;
unsigned long pfn;
if (!IS_ENABLED(CONFIG_ACPI_APEI_MEMORY_FAILURE))
@@ -501,6 +512,18 @@ static bool ghes_do_memory_failure(u64 physical_addr, int flags)
return false;
}
+ if (flags == MF_ACTION_REQUIRED && current->mm) {
+ twcb = (void *)gen_pool_alloc(ghes_estatus_pool, sizeof(*twcb));
+ if (!twcb)
+ return false;
+
+ twcb->pfn = pfn;
+ twcb->flags = flags;
+ init_task_work(&twcb->twork, memory_failure_cb);
+ task_work_add(current, &twcb->twork, TWA_RESUME);
+ return true;
+ }
+
memory_failure_queue(pfn, flags);
return true;
}
@@ -676,6 +699,105 @@ static void ghes_defer_non_standard_event(struct acpi_hest_generic_data *gdata,
schedule_work(&entry->work);
}
+/* Room for 8 entries */
+#define CXL_CPER_PROT_ERR_FIFO_DEPTH 8
+static DEFINE_KFIFO(cxl_cper_prot_err_fifo, struct cxl_cper_prot_err_work_data,
+ CXL_CPER_PROT_ERR_FIFO_DEPTH);
+
+/* Synchronize schedule_work() with cxl_cper_prot_err_work changes */
+static DEFINE_SPINLOCK(cxl_cper_prot_err_work_lock);
+struct work_struct *cxl_cper_prot_err_work;
+
+static void cxl_cper_post_prot_err(struct cxl_cper_sec_prot_err *prot_err,
+ int severity)
+{
+#ifdef CONFIG_ACPI_APEI_PCIEAER
+ struct cxl_cper_prot_err_work_data wd;
+ u8 *dvsec_start, *cap_start;
+
+ if (!(prot_err->valid_bits & PROT_ERR_VALID_AGENT_ADDRESS)) {
+ pr_err_ratelimited("CXL CPER invalid agent type\n");
+ return;
+ }
+
+ if (!(prot_err->valid_bits & PROT_ERR_VALID_ERROR_LOG)) {
+ pr_err_ratelimited("CXL CPER invalid protocol error log\n");
+ return;
+ }
+
+ if (prot_err->err_len != sizeof(struct cxl_ras_capability_regs)) {
+ pr_err_ratelimited("CXL CPER invalid RAS Cap size (%u)\n",
+ prot_err->err_len);
+ return;
+ }
+
+ if (!(prot_err->valid_bits & PROT_ERR_VALID_SERIAL_NUMBER))
+ pr_warn(FW_WARN "CXL CPER no device serial number\n");
+
+ guard(spinlock_irqsave)(&cxl_cper_prot_err_work_lock);
+
+ if (!cxl_cper_prot_err_work)
+ return;
+
+ switch (prot_err->agent_type) {
+ case RCD:
+ case DEVICE:
+ case LD:
+ case FMLD:
+ case RP:
+ case DSP:
+ case USP:
+ memcpy(&wd.prot_err, prot_err, sizeof(wd.prot_err));
+
+ dvsec_start = (u8 *)(prot_err + 1);
+ cap_start = dvsec_start + prot_err->dvsec_len;
+
+ memcpy(&wd.ras_cap, cap_start, sizeof(wd.ras_cap));
+ wd.severity = cper_severity_to_aer(severity);
+ break;
+ default:
+ pr_err_ratelimited("CXL CPER invalid agent type: %d\n",
+ prot_err->agent_type);
+ return;
+ }
+
+ if (!kfifo_put(&cxl_cper_prot_err_fifo, wd)) {
+ pr_err_ratelimited("CXL CPER kfifo overflow\n");
+ return;
+ }
+
+ schedule_work(cxl_cper_prot_err_work);
+#endif
+}
+
+int cxl_cper_register_prot_err_work(struct work_struct *work)
+{
+ if (cxl_cper_prot_err_work)
+ return -EINVAL;
+
+ guard(spinlock)(&cxl_cper_prot_err_work_lock);
+ cxl_cper_prot_err_work = work;
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_cper_register_prot_err_work, "CXL");
+
+int cxl_cper_unregister_prot_err_work(struct work_struct *work)
+{
+ if (cxl_cper_prot_err_work != work)
+ return -EINVAL;
+
+ guard(spinlock)(&cxl_cper_prot_err_work_lock);
+ cxl_cper_prot_err_work = NULL;
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_cper_unregister_prot_err_work, "CXL");
+
+int cxl_cper_prot_err_kfifo_get(struct cxl_cper_prot_err_work_data *wd)
+{
+ return kfifo_get(&cxl_cper_prot_err_fifo, wd);
+}
+EXPORT_SYMBOL_NS_GPL(cxl_cper_prot_err_kfifo_get, "CXL");
+
/* Room for 8 entries for each of the 4 event log queues */
#define CXL_CPER_FIFO_DEPTH 32
DEFINE_KFIFO(cxl_cper_fifo, struct cxl_cper_work_data, CXL_CPER_FIFO_DEPTH);
@@ -726,7 +848,7 @@ int cxl_cper_register_work(struct work_struct *work)
cxl_cper_work = work;
return 0;
}
-EXPORT_SYMBOL_NS_GPL(cxl_cper_register_work, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_cper_register_work, "CXL");
int cxl_cper_unregister_work(struct work_struct *work)
{
@@ -737,15 +859,15 @@ int cxl_cper_unregister_work(struct work_struct *work)
cxl_cper_work = NULL;
return 0;
}
-EXPORT_SYMBOL_NS_GPL(cxl_cper_unregister_work, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_cper_unregister_work, "CXL");
int cxl_cper_kfifo_get(struct cxl_cper_work_data *wd)
{
return kfifo_get(&cxl_cper_fifo, wd);
}
-EXPORT_SYMBOL_NS_GPL(cxl_cper_kfifo_get, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_cper_kfifo_get, "CXL");
-static bool ghes_do_proc(struct ghes *ghes,
+static void ghes_do_proc(struct ghes *ghes,
const struct acpi_hest_generic_status *estatus)
{
int sev, sec_sev;
@@ -779,6 +901,10 @@ static bool ghes_do_proc(struct ghes *ghes,
}
else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) {
queued = ghes_handle_arm_hw_error(gdata, sev, sync);
+ } else if (guid_equal(sec_type, &CPER_SEC_CXL_PROT_ERR)) {
+ struct cxl_cper_sec_prot_err *prot_err = acpi_hest_get_payload(gdata);
+
+ cxl_cper_post_prot_err(prot_err, gdata->error_severity);
} else if (guid_equal(sec_type, &CPER_SEC_CXL_GEN_MEDIA_GUID)) {
struct cxl_cper_event_rec *rec = acpi_hest_get_payload(gdata);
@@ -801,7 +927,16 @@ static bool ghes_do_proc(struct ghes *ghes,
}
}
- return queued;
+ /*
+ * If no memory failure work is queued for abnormal synchronous
+ * errors, do a force kill.
+ */
+ if (sync && !queued) {
+ dev_err(ghes->dev,
+ HW_ERR GHES_PFX "%s:%d: synchronous unrecoverable error (SIGBUS)\n",
+ current->comm, task_pid_nr(current));
+ force_sig(SIGBUS);
+ }
}
static void __ghes_print_estatus(const char *pfx,
@@ -983,14 +1118,18 @@ static void __ghes_panic(struct ghes *ghes,
struct acpi_hest_generic_status *estatus,
u64 buf_paddr, enum fixed_addresses fixmap_idx)
{
+ const char *msg = GHES_PFX "Fatal hardware error";
+
__ghes_print_estatus(KERN_EMERG, ghes->generic, estatus);
+ add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK);
+
ghes_clear_estatus(ghes, estatus, buf_paddr, fixmap_idx);
- /* reboot to log the error! */
if (!panic_timeout)
- panic_timeout = ghes_panic_timeout;
- panic("Fatal hardware error!");
+ pr_emerg("%s but panic disabled\n", msg);
+
+ panic(msg);
}
static int ghes_proc(struct ghes *ghes)
@@ -1035,7 +1174,7 @@ static void ghes_add_timer(struct ghes *ghes)
static void ghes_poll_func(struct timer_list *t)
{
- struct ghes *ghes = from_timer(ghes, t, timer);
+ struct ghes *ghes = timer_container_of(ghes, t, timer);
unsigned long flags;
spin_lock_irqsave(&ghes_notify_lock_irq, flags);
@@ -1068,12 +1207,10 @@ static int ghes_notify_hed(struct notifier_block *this, unsigned long event,
int ret = NOTIFY_DONE;
spin_lock_irqsave(&ghes_notify_lock_irq, flags);
- rcu_read_lock();
list_for_each_entry_rcu(ghes, &ghes_hed, list) {
if (!ghes_proc(ghes))
ret = NOTIFY_OK;
}
- rcu_read_unlock();
spin_unlock_irqrestore(&ghes_notify_lock_irq, flags);
return ret;
@@ -1103,9 +1240,7 @@ static void ghes_proc_in_irq(struct irq_work *irq_work)
struct ghes_estatus_node *estatus_node;
struct acpi_hest_generic *generic;
struct acpi_hest_generic_status *estatus;
- bool task_work_pending;
u32 len, node_len;
- int ret;
llnode = llist_del_all(&ghes_estatus_llist);
/*
@@ -1120,25 +1255,16 @@ static void ghes_proc_in_irq(struct irq_work *irq_work)
estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
len = cper_estatus_len(estatus);
node_len = GHES_ESTATUS_NODE_LEN(len);
- task_work_pending = ghes_do_proc(estatus_node->ghes, estatus);
+
+ ghes_do_proc(estatus_node->ghes, estatus);
+
if (!ghes_estatus_cached(estatus)) {
generic = estatus_node->generic;
if (ghes_print_estatus(NULL, generic, estatus))
ghes_estatus_cache_add(generic, estatus);
}
-
- if (task_work_pending && current->mm) {
- estatus_node->task_work.func = ghes_kick_task_work;
- estatus_node->task_work_cpu = smp_processor_id();
- ret = task_work_add(current, &estatus_node->task_work,
- TWA_RESUME);
- if (ret)
- estatus_node->task_work.func = NULL;
- }
-
- if (!estatus_node->task_work.func)
- gen_pool_free(ghes_estatus_pool,
- (unsigned long)estatus_node, node_len);
+ gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node,
+ node_len);
llnode = next;
}
@@ -1199,7 +1325,6 @@ static int ghes_in_nmi_queue_one_entry(struct ghes *ghes,
estatus_node->ghes = ghes;
estatus_node->generic = ghes->generic;
- estatus_node->task_work.func = NULL;
estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
if (__ghes_read_estatus(estatus, buf_paddr, fixmap_idx, len)) {
@@ -1605,14 +1730,14 @@ static struct platform_driver ghes_platform_driver = {
.name = "GHES",
},
.probe = ghes_probe,
- .remove_new = ghes_remove,
+ .remove = ghes_remove,
};
void __init acpi_ghes_init(void)
{
int rc;
- sdei_init();
+ acpi_sdei_init();
if (acpi_disabled)
return;
diff --git a/drivers/acpi/arm64/agdi.c b/drivers/acpi/arm64/agdi.c
index f5f21dd0d277..e0df3daa4abf 100644
--- a/drivers/acpi/arm64/agdi.c
+++ b/drivers/acpi/arm64/agdi.c
@@ -88,7 +88,7 @@ static struct platform_driver agdi_driver = {
.name = "agdi",
},
.probe = agdi_probe,
- .remove_new = agdi_remove,
+ .remove = agdi_remove,
};
void __init acpi_agdi_init(void)
diff --git a/drivers/acpi/arm64/dma.c b/drivers/acpi/arm64/dma.c
index 52b2abf88689..f30f138352b7 100644
--- a/drivers/acpi/arm64/dma.c
+++ b/drivers/acpi/arm64/dma.c
@@ -26,6 +26,11 @@ void acpi_arch_dma_setup(struct device *dev)
else
end = (1ULL << 32) - 1;
+ if (dev->dma_range_map) {
+ dev_dbg(dev, "dma_range_map already set\n");
+ return;
+ }
+
ret = acpi_dma_get_range(dev, &map);
if (!ret && map) {
end = dma_range_map_max(map);
diff --git a/drivers/acpi/arm64/gtdt.c b/drivers/acpi/arm64/gtdt.c
index c0e77c1c8e09..fd995a1d3d24 100644
--- a/drivers/acpi/arm64/gtdt.c
+++ b/drivers/acpi/arm64/gtdt.c
@@ -36,19 +36,25 @@ struct acpi_gtdt_descriptor {
static struct acpi_gtdt_descriptor acpi_gtdt_desc __initdata;
-static inline __init void *next_platform_timer(void *platform_timer)
+static __init bool platform_timer_valid(void *platform_timer)
{
struct acpi_gtdt_header *gh = platform_timer;
- platform_timer += gh->length;
- if (platform_timer < acpi_gtdt_desc.gtdt_end)
- return platform_timer;
+ return (platform_timer >= (void *)(acpi_gtdt_desc.gtdt + 1) &&
+ platform_timer < acpi_gtdt_desc.gtdt_end &&
+ gh->length != 0 &&
+ platform_timer + gh->length <= acpi_gtdt_desc.gtdt_end);
+}
+
+static __init void *next_platform_timer(void *platform_timer)
+{
+ struct acpi_gtdt_header *gh = platform_timer;
- return NULL;
+ return platform_timer + gh->length;
}
#define for_each_platform_timer(_g) \
- for (_g = acpi_gtdt_desc.platform_timer; _g; \
+ for (_g = acpi_gtdt_desc.platform_timer; platform_timer_valid(_g);\
_g = next_platform_timer(_g))
static inline bool is_timer_block(void *platform_timer)
@@ -157,6 +163,7 @@ int __init acpi_gtdt_init(struct acpi_table_header *table,
{
void *platform_timer;
struct acpi_table_gtdt *gtdt;
+ u32 cnt = 0;
gtdt = container_of(table, struct acpi_table_gtdt, header);
acpi_gtdt_desc.gtdt = gtdt;
@@ -176,14 +183,22 @@ int __init acpi_gtdt_init(struct acpi_table_header *table,
return 0;
}
- platform_timer = (void *)gtdt + gtdt->platform_timer_offset;
- if (platform_timer < (void *)table + sizeof(struct acpi_table_gtdt)) {
- pr_err(FW_BUG "invalid timer data.\n");
- return -EINVAL;
+ acpi_gtdt_desc.platform_timer = (void *)gtdt + gtdt->platform_timer_offset;
+ for_each_platform_timer(platform_timer)
+ cnt++;
+
+ if (cnt != gtdt->platform_timer_count) {
+ cnt = min(cnt, gtdt->platform_timer_count);
+ pr_err(FW_BUG "limiting Platform Timer count to %d\n", cnt);
}
- acpi_gtdt_desc.platform_timer = platform_timer;
+
+ if (!cnt) {
+ acpi_gtdt_desc.platform_timer = NULL;
+ return 0;
+ }
+
if (platform_timer_count)
- *platform_timer_count = gtdt->platform_timer_count;
+ *platform_timer_count = cnt;
return 0;
}
@@ -283,7 +298,7 @@ error:
if (frame->virt_irq > 0)
acpi_unregister_gsi(gtdt_frame->virtual_timer_interrupt);
frame->virt_irq = 0;
- } while (i-- >= 0 && gtdt_frame--);
+ } while (i-- > 0 && gtdt_frame--);
return -EINVAL;
}
@@ -352,7 +367,7 @@ static int __init gtdt_import_sbsa_gwdt(struct acpi_gtdt_watchdog *wd,
}
irq = map_gt_gsi(wd->timer_interrupt, wd->timer_flags);
- res[2] = (struct resource)DEFINE_RES_IRQ(irq);
+ res[2] = DEFINE_RES_IRQ(irq);
if (irq <= 0) {
pr_warn("failed to map the Watchdog interrupt.\n");
nr_res--;
@@ -373,11 +388,11 @@ static int __init gtdt_import_sbsa_gwdt(struct acpi_gtdt_watchdog *wd,
return 0;
}
-static int __init gtdt_sbsa_gwdt_init(void)
+static int __init gtdt_platform_timer_init(void)
{
void *platform_timer;
struct acpi_table_header *table;
- int ret, timer_count, gwdt_count = 0;
+ int ret, timer_count, gwdt_count = 0, mmio_timer_count = 0;
if (acpi_disabled)
return 0;
@@ -399,20 +414,41 @@ static int __init gtdt_sbsa_gwdt_init(void)
goto out_put_gtdt;
for_each_platform_timer(platform_timer) {
+ ret = 0;
+
if (is_non_secure_watchdog(platform_timer)) {
ret = gtdt_import_sbsa_gwdt(platform_timer, gwdt_count);
if (ret)
- break;
+ continue;
gwdt_count++;
+ } else if (is_timer_block(platform_timer)) {
+ struct arch_timer_mem atm = {};
+ struct platform_device *pdev;
+
+ ret = gtdt_parse_timer_block(platform_timer, &atm);
+ if (ret)
+ continue;
+
+ pdev = platform_device_register_data(NULL, "gtdt-arm-mmio-timer",
+ gwdt_count, &atm,
+ sizeof(atm));
+ if (IS_ERR(pdev)) {
+ pr_err("Can't register timer %d\n", gwdt_count);
+ continue;
+ }
+
+ mmio_timer_count++;
}
}
if (gwdt_count)
pr_info("found %d SBSA generic Watchdog(s).\n", gwdt_count);
+ if (mmio_timer_count)
+ pr_info("found %d Generic MMIO timer(s).\n", mmio_timer_count);
out_put_gtdt:
acpi_put_table(table);
return ret;
}
-device_initcall(gtdt_sbsa_gwdt_init);
+device_initcall(gtdt_platform_timer_init);
diff --git a/drivers/acpi/arm64/init.c b/drivers/acpi/arm64/init.c
index d0c8aed90fd1..7a47d8095a7d 100644
--- a/drivers/acpi/arm64/init.c
+++ b/drivers/acpi/arm64/init.c
@@ -2,7 +2,7 @@
#include <linux/acpi.h>
#include "init.h"
-void __init acpi_arm_init(void)
+void __init acpi_arch_init(void)
{
if (IS_ENABLED(CONFIG_ACPI_AGDI))
acpi_agdi_init();
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index 4c745a26226b..65f0f56ad753 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -937,8 +937,10 @@ static u32 *iort_rmr_alloc_sids(u32 *sids, u32 count, u32 id_start,
new_sids = krealloc_array(sids, count + new_count,
sizeof(*new_sids), GFP_KERNEL);
- if (!new_sids)
+ if (!new_sids) {
+ kfree(sids);
return NULL;
+ }
for (i = count; i < total_count; i++)
new_sids[i] = id_start++;
@@ -1218,6 +1220,17 @@ static bool iort_pci_rc_supports_ats(struct acpi_iort_node *node)
return pci_rc->ats_attribute & ACPI_IORT_ATS_SUPPORTED;
}
+static bool iort_pci_rc_supports_canwbs(struct acpi_iort_node *node)
+{
+ struct acpi_iort_memory_access *memory_access;
+ struct acpi_iort_root_complex *pci_rc;
+
+ pci_rc = (struct acpi_iort_root_complex *)node->node_data;
+ memory_access =
+ (struct acpi_iort_memory_access *)&pci_rc->memory_properties;
+ return memory_access->memory_flags & ACPI_IORT_MF_CANWBS;
+}
+
static int iort_iommu_xlate(struct device *dev, struct acpi_iort_node *node,
u32 streamid)
{
@@ -1335,6 +1348,8 @@ int iort_iommu_configure_id(struct device *dev, const u32 *id_in)
fwspec = dev_iommu_fwspec_get(dev);
if (fwspec && iort_pci_rc_supports_ats(node))
fwspec->flags |= IOMMU_FWSPEC_PCI_RC_ATS;
+ if (fwspec && iort_pci_rc_supports_canwbs(node))
+ fwspec->flags |= IOMMU_FWSPEC_PCI_RC_CANWBS;
} else {
node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
iort_match_node_callback, dev);
@@ -1703,6 +1718,8 @@ static struct acpi_platform_list pmcg_plat_info[] __initdata = {
/* HiSilicon Hip09 Platform */
{"HISI ", "HIP09 ", 0, ACPI_SIG_IORT, greater_than_or_equal,
"Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
+ {"HISI ", "HIP09A ", 0, ACPI_SIG_IORT, greater_than_or_equal,
+ "Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
/* HiSilicon Hip10/11 Platform uses the same SMMU IP with Hip09 */
{"HISI ", "HIP10 ", 0, ACPI_SIG_IORT, greater_than_or_equal,
"Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 65fa3444367a..67b76492c839 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -92,7 +92,7 @@ enum {
struct acpi_battery {
struct mutex lock;
- struct mutex sysfs_lock;
+ struct mutex update_lock;
struct power_supply *bat;
struct power_supply_desc bat_desc;
struct acpi_device *device;
@@ -279,8 +279,8 @@ static int acpi_battery_get_property(struct power_supply *psy,
full_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
ret = -ENODEV;
else
- val->intval = battery->capacity_now * 100/
- full_capacity;
+ val->intval = DIV_ROUND_CLOSEST_ULL(battery->capacity_now * 100ULL,
+ full_capacity);
break;
case POWER_SUPPLY_PROP_CAPACITY_LEVEL:
if (battery->state & ACPI_BATTERY_STATE_CRITICAL)
@@ -717,7 +717,7 @@ static void battery_hook_unregister_unlocked(struct acpi_battery_hook *hook)
}
list_del_init(&hook->list);
- pr_info("extension unregistered: %s\n", hook->name);
+ pr_info("hook unregistered: %s\n", hook->name);
}
void battery_hook_unregister(struct acpi_battery_hook *hook)
@@ -751,18 +751,18 @@ void battery_hook_register(struct acpi_battery_hook *hook)
if (hook->add_battery(battery->bat, hook)) {
/*
* If a add-battery returns non-zero,
- * the registration of the extension has failed,
+ * the registration of the hook has failed,
* and we will not add it to the list of loaded
* hooks.
*/
- pr_err("extension failed to load: %s", hook->name);
+ pr_err("hook failed to load: %s", hook->name);
battery_hook_unregister_unlocked(hook);
goto end;
}
power_supply_changed(battery->bat);
}
- pr_info("new extension: %s\n", hook->name);
+ pr_info("new hook: %s\n", hook->name);
end:
mutex_unlock(&hook_mutex);
}
@@ -805,10 +805,10 @@ static void battery_hook_add_battery(struct acpi_battery *battery)
list_for_each_entry_safe(hook_node, tmp, &battery_hook_list, list) {
if (hook_node->add_battery(battery->bat, hook_node)) {
/*
- * The notification of the extensions has failed, to
- * prevent further errors we will unload the extension.
+ * The notification of the hook has failed, to
+ * prevent further errors we will unload the hook.
*/
- pr_err("error in extension, unloading: %s",
+ pr_err("error in hook, unloading: %s",
hook_node->name);
battery_hook_unregister_unlocked(hook_node);
}
@@ -853,6 +853,7 @@ static int sysfs_add_battery(struct acpi_battery *battery)
struct power_supply_config psy_cfg = {
.drv_data = battery,
.attr_grp = acpi_battery_groups,
+ .no_wakeup_source = true,
};
bool full_cap_broken = false;
@@ -888,7 +889,7 @@ static int sysfs_add_battery(struct acpi_battery *battery)
battery->bat_desc.type = POWER_SUPPLY_TYPE_BATTERY;
battery->bat_desc.get_property = acpi_battery_get_property;
- battery->bat = power_supply_register_no_ws(&battery->device->dev,
+ battery->bat = power_supply_register(&battery->device->dev,
&battery->bat_desc, &psy_cfg);
if (IS_ERR(battery->bat)) {
@@ -903,15 +904,12 @@ static int sysfs_add_battery(struct acpi_battery *battery)
static void sysfs_remove_battery(struct acpi_battery *battery)
{
- mutex_lock(&battery->sysfs_lock);
- if (!battery->bat) {
- mutex_unlock(&battery->sysfs_lock);
+ if (!battery->bat)
return;
- }
+
battery_hook_remove_battery(battery);
power_supply_unregister(battery->bat);
battery->bat = NULL;
- mutex_unlock(&battery->sysfs_lock);
}
static void find_battery(const struct dmi_header *dm, void *private)
@@ -1071,6 +1069,9 @@ static void acpi_battery_notify(acpi_handle handle, u32 event, void *data)
if (!battery)
return;
+
+ guard(mutex)(&battery->update_lock);
+
old = battery->bat;
/*
* On Acer Aspire V5-573G notifications are sometimes triggered too
@@ -1093,21 +1094,22 @@ static void acpi_battery_notify(acpi_handle handle, u32 event, void *data)
}
static int battery_notify(struct notifier_block *nb,
- unsigned long mode, void *_unused)
+ unsigned long mode, void *_unused)
{
struct acpi_battery *battery = container_of(nb, struct acpi_battery,
pm_nb);
- int result;
- switch (mode) {
- case PM_POST_HIBERNATION:
- case PM_POST_SUSPEND:
+ if (mode == PM_POST_SUSPEND || mode == PM_POST_HIBERNATION) {
+ guard(mutex)(&battery->update_lock);
+
if (!acpi_battery_present(battery))
return 0;
if (battery->bat) {
acpi_battery_refresh(battery);
} else {
+ int result;
+
result = acpi_battery_get_info(battery);
if (result)
return result;
@@ -1119,7 +1121,6 @@ static int battery_notify(struct notifier_block *nb,
acpi_battery_init_alarm(battery);
acpi_battery_get_state(battery);
- break;
}
return 0;
@@ -1197,6 +1198,8 @@ static int acpi_battery_update_retry(struct acpi_battery *battery)
{
int retry, ret;
+ guard(mutex)(&battery->update_lock);
+
for (retry = 5; retry; retry--) {
ret = acpi_battery_update(battery, false);
if (!ret)
@@ -1207,6 +1210,13 @@ static int acpi_battery_update_retry(struct acpi_battery *battery)
return ret;
}
+static void sysfs_battery_cleanup(struct acpi_battery *battery)
+{
+ guard(mutex)(&battery->update_lock);
+
+ sysfs_remove_battery(battery);
+}
+
static int acpi_battery_add(struct acpi_device *device)
{
int result = 0;
@@ -1218,15 +1228,21 @@ static int acpi_battery_add(struct acpi_device *device)
if (device->dep_unmet)
return -EPROBE_DEFER;
- battery = kzalloc(sizeof(struct acpi_battery), GFP_KERNEL);
+ battery = devm_kzalloc(&device->dev, sizeof(*battery), GFP_KERNEL);
if (!battery)
return -ENOMEM;
battery->device = device;
strscpy(acpi_device_name(device), ACPI_BATTERY_DEVICE_NAME);
strscpy(acpi_device_class(device), ACPI_BATTERY_CLASS);
device->driver_data = battery;
- mutex_init(&battery->lock);
- mutex_init(&battery->sysfs_lock);
+ result = devm_mutex_init(&device->dev, &battery->lock);
+ if (result)
+ return result;
+
+ result = devm_mutex_init(&device->dev, &battery->update_lock);
+ if (result)
+ return result;
+
if (acpi_has_method(battery->device->handle, "_BIX"))
set_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags);
@@ -1238,7 +1254,9 @@ static int acpi_battery_add(struct acpi_device *device)
device->status.battery_present ? "present" : "absent");
battery->pm_nb.notifier_call = battery_notify;
- register_pm_notifier(&battery->pm_nb);
+ result = register_pm_notifier(&battery->pm_nb);
+ if (result)
+ goto fail;
device_init_wakeup(&device->dev, 1);
@@ -1253,10 +1271,7 @@ fail_pm:
device_init_wakeup(&device->dev, 0);
unregister_pm_notifier(&battery->pm_nb);
fail:
- sysfs_remove_battery(battery);
- mutex_destroy(&battery->lock);
- mutex_destroy(&battery->sysfs_lock);
- kfree(battery);
+ sysfs_battery_cleanup(battery);
return result;
}
@@ -1275,14 +1290,12 @@ static void acpi_battery_remove(struct acpi_device *device)
device_init_wakeup(&device->dev, 0);
unregister_pm_notifier(&battery->pm_nb);
- sysfs_remove_battery(battery);
- mutex_destroy(&battery->lock);
- mutex_destroy(&battery->sysfs_lock);
- kfree(battery);
+ guard(mutex)(&battery->update_lock);
+
+ sysfs_remove_battery(battery);
}
-#ifdef CONFIG_PM_SLEEP
/* this is needed to learn about changes made in suspended state */
static int acpi_battery_resume(struct device *dev)
{
@@ -1296,14 +1309,14 @@ static int acpi_battery_resume(struct device *dev)
return -EINVAL;
battery->update_time = 0;
+
+ guard(mutex)(&battery->update_lock);
+
acpi_battery_update(battery, true);
return 0;
}
-#else
-#define acpi_battery_resume NULL
-#endif
-static SIMPLE_DEV_PM_OPS(acpi_battery_pm, NULL, acpi_battery_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(acpi_battery_pm, NULL, acpi_battery_resume);
static struct acpi_driver acpi_battery_driver = {
.name = "battery",
@@ -1313,7 +1326,7 @@ static struct acpi_driver acpi_battery_driver = {
.add = acpi_battery_add,
.remove = acpi_battery_remove,
},
- .drv.pm = &acpi_battery_pm,
+ .drv.pm = pm_sleep_ptr(&acpi_battery_pm),
.drv.probe_type = PROBE_PREFER_ASYNCHRONOUS,
};
diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
index d1d9c9289087..0fdd581ef96f 100644
--- a/drivers/acpi/bgrt.c
+++ b/drivers/acpi/bgrt.c
@@ -29,7 +29,7 @@ BGRT_SHOW(type, image_type);
BGRT_SHOW(xoffset, image_offset_x);
BGRT_SHOW(yoffset, image_offset_y);
-static BIN_ATTR_SIMPLE_RO(image);
+static __ro_after_init BIN_ATTR_SIMPLE_RO(image);
static struct attribute *bgrt_attributes[] = {
&bgrt_attr_version.attr,
@@ -40,7 +40,7 @@ static struct attribute *bgrt_attributes[] = {
NULL,
};
-static struct bin_attribute *bgrt_bin_attributes[] = {
+static const struct bin_attribute *const bgrt_bin_attributes[] = {
&bin_attr_image,
NULL,
};
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 16917dc3ad60..a984ccd4a2a0 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -1406,7 +1406,7 @@ static int __init acpi_bus_init(void)
goto error1;
/*
- * Register the for all standard device notifications.
+ * Register for all standard device notifications.
*/
status =
acpi_install_notify_handler(ACPI_ROOT_OBJECT, ACPI_SYSTEM_NOTIFY,
@@ -1434,6 +1434,8 @@ static int __init acpi_bus_init(void)
struct kobject *acpi_kobj;
EXPORT_SYMBOL_GPL(acpi_kobj);
+void __weak __init acpi_arch_init(void) { }
+
static int __init acpi_init(void)
{
int result;
@@ -1444,8 +1446,10 @@ static int __init acpi_init(void)
}
acpi_kobj = kobject_create_and_add("acpi", firmware_kobj);
- if (!acpi_kobj)
- pr_debug("%s: kset create error\n", __func__);
+ if (!acpi_kobj) {
+ pr_err("Failed to register kobject\n");
+ return -ENOMEM;
+ }
init_prmt();
acpi_init_pcc();
@@ -1461,8 +1465,7 @@ static int __init acpi_init(void)
acpi_viot_early_init();
acpi_hest_init();
acpi_ghes_init();
- acpi_arm_init();
- acpi_riscv_init();
+ acpi_arch_init();
acpi_scan_init();
acpi_ec_init();
acpi_debugfs_init();
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 7773e6b860e7..0a7026040188 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -24,6 +24,7 @@
#define ACPI_BUTTON_CLASS "button"
#define ACPI_BUTTON_FILE_STATE "state"
#define ACPI_BUTTON_TYPE_UNKNOWN 0x00
+#define ACPI_BUTTON_NOTIFY_WAKE 0x02
#define ACPI_BUTTON_NOTIFY_STATUS 0x80
#define ACPI_BUTTON_SUBCLASS_POWER "power"
@@ -443,7 +444,12 @@ static void acpi_button_notify(acpi_handle handle, u32 event, void *data)
struct input_dev *input;
int keycode;
- if (event != ACPI_BUTTON_NOTIFY_STATUS) {
+ switch (event) {
+ case ACPI_BUTTON_NOTIFY_STATUS:
+ break;
+ case ACPI_BUTTON_NOTIFY_WAKE:
+ break;
+ default:
acpi_handle_debug(device->handle, "Unsupported event [0x%x]\n",
event);
return;
@@ -452,7 +458,7 @@ static void acpi_button_notify(acpi_handle handle, u32 event, void *data)
acpi_pm_wakeup_event(&device->dev);
button = acpi_driver_data(device);
- if (button->suspended)
+ if (button->suspended || event == ACPI_BUTTON_NOTIFY_WAKE)
return;
input = button->input;
@@ -629,7 +635,7 @@ static int acpi_button_add(struct acpi_device *device)
break;
default:
status = acpi_install_notify_handler(device->handle,
- ACPI_DEVICE_NOTIFY, handler,
+ ACPI_ALL_NOTIFY, handler,
device);
break;
}
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index c3fc2c05d868..ab4651205e8a 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -47,7 +47,6 @@
struct cppc_pcc_data {
struct pcc_mbox_chan *pcc_channel;
- void __iomem *pcc_comm_addr;
bool pcc_channel_acquired;
unsigned int deadline_us;
unsigned int pcc_mpar, pcc_mrtt, pcc_nominal;
@@ -95,7 +94,7 @@ static DEFINE_PER_CPU(int, cpu_pcc_subspace_idx);
static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
/* pcc mapped address + header size + offset within PCC subspace */
-#define GET_PCC_VADDR(offs, pcc_ss_id) (pcc_data[pcc_ss_id]->pcc_comm_addr + \
+#define GET_PCC_VADDR(offs, pcc_ss_id) (pcc_data[pcc_ss_id]->pcc_channel->shmem + \
0x8 + (offs))
/* Check if a CPC register is in PCC */
@@ -129,6 +128,20 @@ static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
#define CPC_SUPPORTED(cpc) ((cpc)->type == ACPI_TYPE_INTEGER ? \
!!(cpc)->cpc_entry.int_value : \
!IS_NULL_REG(&(cpc)->cpc_entry.reg))
+
+/*
+ * Each bit indicates the optionality of the register in per-cpu
+ * cpc_regs[] with the corresponding index. 0 means mandatory and 1
+ * means optional.
+ */
+#define REG_OPTIONAL (0x1FC7D0)
+
+/*
+ * Use the index of the register in per-cpu cpc_regs[] to check if
+ * it's an optional one.
+ */
+#define IS_OPTIONAL_CPC_REG(reg_idx) (REG_OPTIONAL & (1U << (reg_idx)))
+
/*
* Arbitrary Retries in case the remote processor is slow to respond
* to PCC commands. Keeping it high enough to cover emulators where
@@ -223,7 +236,7 @@ static int check_pcc_chan(int pcc_ss_id, bool chk_err_bit)
int ret, status;
struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
struct acpi_pcct_shared_memory __iomem *generic_comm_base =
- pcc_ss_data->pcc_comm_addr;
+ pcc_ss_data->pcc_channel->shmem;
if (!pcc_ss_data->platform_owns_pcc)
return 0;
@@ -258,7 +271,7 @@ static int send_pcc_cmd(int pcc_ss_id, u16 cmd)
int ret = -EIO, i;
struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
struct acpi_pcct_shared_memory __iomem *generic_comm_base =
- pcc_ss_data->pcc_comm_addr;
+ pcc_ss_data->pcc_channel->shmem;
unsigned int time_delta;
/*
@@ -463,7 +476,7 @@ bool cppc_allow_fast_switch(void)
struct cpc_desc *cpc_ptr;
int cpu;
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
desired_reg = &cpc_ptr->cpc_regs[DESIRED_PERF];
if (!CPC_IN_SYSTEM_MEMORY(desired_reg) &&
@@ -571,15 +584,6 @@ static int register_pcc_channel(int pcc_ss_idx)
pcc_data[pcc_ss_idx]->pcc_mpar = pcc_chan->max_access_rate;
pcc_data[pcc_ss_idx]->pcc_nominal = pcc_chan->latency;
- pcc_data[pcc_ss_idx]->pcc_comm_addr =
- acpi_os_ioremap(pcc_chan->shmem_base_addr,
- pcc_chan->shmem_size);
- if (!pcc_data[pcc_ss_idx]->pcc_comm_addr) {
- pr_err("Failed to ioremap PCC comm region mem for %d\n",
- pcc_ss_idx);
- return -ENOMEM;
- }
-
/* Set flag so that we don't come here for each CPU. */
pcc_data[pcc_ss_idx]->pcc_channel_acquired = true;
}
@@ -671,10 +675,6 @@ static int pcc_data_alloc(int pcc_ss_id)
* )
*/
-#ifndef arch_init_invariance_cppc
-static inline void arch_init_invariance_cppc(void) { }
-#endif
-
/**
* acpi_cppc_processor_probe - Search for per CPU _CPC objects.
* @pr: Ptr to acpi_processor containing this CPU's logical ID.
@@ -867,7 +867,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
/* Store CPU Logical ID */
cpc_ptr->cpu_id = pr->id;
- spin_lock_init(&cpc_ptr->rmw_lock);
+ raw_spin_lock_init(&cpc_ptr->rmw_lock);
/* Parse PSD data for this CPU */
ret = acpi_get_psd(cpc_ptr, handle);
@@ -905,8 +905,6 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
goto out_free;
}
- arch_init_invariance_cppc();
-
kfree(output.pointer);
return 0;
@@ -1017,7 +1015,8 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
*val = 0;
size = GET_BIT_WIDTH(reg);
- if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
+ if (IS_ENABLED(CONFIG_HAS_IOPORT) &&
+ reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
u32 val_u32;
acpi_status status;
@@ -1087,10 +1086,12 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
struct cpc_reg *reg = &reg_res->cpc_entry.reg;
struct cpc_desc *cpc_desc;
+ unsigned long flags;
size = GET_BIT_WIDTH(reg);
- if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
+ if (IS_ENABLED(CONFIG_HAS_IOPORT) &&
+ reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
acpi_status status;
status = acpi_os_write_port((acpi_io_address)reg->address,
@@ -1126,7 +1127,7 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
return -ENODEV;
}
- spin_lock(&cpc_desc->rmw_lock);
+ raw_spin_lock_irqsave(&cpc_desc->rmw_lock, flags);
switch (size) {
case 8:
prev_val = readb_relaxed(vaddr);
@@ -1141,11 +1142,10 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
prev_val = readq_relaxed(vaddr);
break;
default:
- spin_unlock(&cpc_desc->rmw_lock);
+ raw_spin_unlock_irqrestore(&cpc_desc->rmw_lock, flags);
return -EFAULT;
}
val = MASK_VAL_WRITE(reg, prev_val, val);
- val |= prev_val;
}
switch (size) {
@@ -1174,48 +1174,111 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
}
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
- spin_unlock(&cpc_desc->rmw_lock);
+ raw_spin_unlock_irqrestore(&cpc_desc->rmw_lock, flags);
return ret_val;
}
-static int cppc_get_perf(int cpunum, enum cppc_regs reg_idx, u64 *perf)
+static int cppc_get_reg_val_in_pcc(int cpu, struct cpc_register_resource *reg, u64 *val)
{
- struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
+ int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
+ struct cppc_pcc_data *pcc_ss_data = NULL;
+ int ret;
+
+ if (pcc_ss_id < 0) {
+ pr_debug("Invalid pcc_ss_id\n");
+ return -ENODEV;
+ }
+
+ pcc_ss_data = pcc_data[pcc_ss_id];
+
+ down_write(&pcc_ss_data->pcc_lock);
+
+ if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0)
+ ret = cpc_read(cpu, reg, val);
+ else
+ ret = -EIO;
+
+ up_write(&pcc_ss_data->pcc_lock);
+
+ return ret;
+}
+
+static int cppc_get_reg_val(int cpu, enum cppc_regs reg_idx, u64 *val)
+{
+ struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
struct cpc_register_resource *reg;
+ if (val == NULL)
+ return -EINVAL;
+
if (!cpc_desc) {
- pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
+ pr_debug("No CPC descriptor for CPU:%d\n", cpu);
return -ENODEV;
}
reg = &cpc_desc->cpc_regs[reg_idx];
- if (CPC_IN_PCC(reg)) {
- int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
- struct cppc_pcc_data *pcc_ss_data = NULL;
- int ret = 0;
+ if ((reg->type == ACPI_TYPE_INTEGER && IS_OPTIONAL_CPC_REG(reg_idx) &&
+ !reg->cpc_entry.int_value) || (reg->type != ACPI_TYPE_INTEGER &&
+ IS_NULL_REG(&reg->cpc_entry.reg))) {
+ pr_debug("CPC register is not supported\n");
+ return -EOPNOTSUPP;
+ }
- if (pcc_ss_id < 0)
- return -EIO;
+ if (CPC_IN_PCC(reg))
+ return cppc_get_reg_val_in_pcc(cpu, reg, val);
- pcc_ss_data = pcc_data[pcc_ss_id];
-
- down_write(&pcc_ss_data->pcc_lock);
+ return cpc_read(cpu, reg, val);
+}
- if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0)
- cpc_read(cpunum, reg, perf);
- else
- ret = -EIO;
+static int cppc_set_reg_val_in_pcc(int cpu, struct cpc_register_resource *reg, u64 val)
+{
+ int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
+ struct cppc_pcc_data *pcc_ss_data = NULL;
+ int ret;
- up_write(&pcc_ss_data->pcc_lock);
+ if (pcc_ss_id < 0) {
+ pr_debug("Invalid pcc_ss_id\n");
+ return -ENODEV;
+ }
+ ret = cpc_write(cpu, reg, val);
+ if (ret)
return ret;
+
+ pcc_ss_data = pcc_data[pcc_ss_id];
+
+ down_write(&pcc_ss_data->pcc_lock);
+ /* after writing CPC, transfer the ownership of PCC to platform */
+ ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE);
+ up_write(&pcc_ss_data->pcc_lock);
+
+ return ret;
+}
+
+static int cppc_set_reg_val(int cpu, enum cppc_regs reg_idx, u64 val)
+{
+ struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
+ struct cpc_register_resource *reg;
+
+ if (!cpc_desc) {
+ pr_debug("No CPC descriptor for CPU:%d\n", cpu);
+ return -ENODEV;
}
- cpc_read(cpunum, reg, perf);
+ reg = &cpc_desc->cpc_regs[reg_idx];
- return 0;
+ /* if a register is writeable, it must be a buffer and not null */
+ if ((reg->type != ACPI_TYPE_BUFFER) || IS_NULL_REG(&reg->cpc_entry.reg)) {
+ pr_debug("CPC register is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (CPC_IN_PCC(reg))
+ return cppc_set_reg_val_in_pcc(cpu, reg, val);
+
+ return cpc_write(cpu, reg, val);
}
/**
@@ -1227,7 +1290,7 @@ static int cppc_get_perf(int cpunum, enum cppc_regs reg_idx, u64 *perf)
*/
int cppc_get_desired_perf(int cpunum, u64 *desired_perf)
{
- return cppc_get_perf(cpunum, DESIRED_PERF, desired_perf);
+ return cppc_get_reg_val(cpunum, DESIRED_PERF, desired_perf);
}
EXPORT_SYMBOL_GPL(cppc_get_desired_perf);
@@ -1240,7 +1303,7 @@ EXPORT_SYMBOL_GPL(cppc_get_desired_perf);
*/
int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf)
{
- return cppc_get_perf(cpunum, NOMINAL_PERF, nominal_perf);
+ return cppc_get_reg_val(cpunum, NOMINAL_PERF, nominal_perf);
}
/**
@@ -1252,7 +1315,7 @@ int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf)
*/
int cppc_get_highest_perf(int cpunum, u64 *highest_perf)
{
- return cppc_get_perf(cpunum, HIGHEST_PERF, highest_perf);
+ return cppc_get_reg_val(cpunum, HIGHEST_PERF, highest_perf);
}
EXPORT_SYMBOL_GPL(cppc_get_highest_perf);
@@ -1265,7 +1328,7 @@ EXPORT_SYMBOL_GPL(cppc_get_highest_perf);
*/
int cppc_get_epp_perf(int cpunum, u64 *epp_perf)
{
- return cppc_get_perf(cpunum, ENERGY_PERF, epp_perf);
+ return cppc_get_reg_val(cpunum, ENERGY_PERF, epp_perf);
}
EXPORT_SYMBOL_GPL(cppc_get_epp_perf);
@@ -1539,53 +1602,110 @@ int cppc_set_epp_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls, bool enable)
EXPORT_SYMBOL_GPL(cppc_set_epp_perf);
/**
- * cppc_get_auto_sel_caps - Read autonomous selection register.
- * @cpunum : CPU from which to read register.
- * @perf_caps : struct where autonomous selection register value is updated.
+ * cppc_set_epp() - Write the EPP register.
+ * @cpu: CPU on which to write register.
+ * @epp_val: Value to write to the EPP register.
*/
-int cppc_get_auto_sel_caps(int cpunum, struct cppc_perf_caps *perf_caps)
+int cppc_set_epp(int cpu, u64 epp_val)
{
- struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
- struct cpc_register_resource *auto_sel_reg;
- u64 auto_sel;
+ if (epp_val > CPPC_ENERGY_PERF_MAX)
+ return -EINVAL;
- if (!cpc_desc) {
- pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
- return -ENODEV;
- }
+ return cppc_set_reg_val(cpu, ENERGY_PERF, epp_val);
+}
+EXPORT_SYMBOL_GPL(cppc_set_epp);
- auto_sel_reg = &cpc_desc->cpc_regs[AUTO_SEL_ENABLE];
+/**
+ * cppc_get_auto_act_window() - Read autonomous activity window register.
+ * @cpu: CPU from which to read register.
+ * @auto_act_window: Return address.
+ *
+ * According to ACPI 6.5, s8.4.6.1.6, the value read from the autonomous
+ * activity window register consists of two parts: a 7 bits value indicate
+ * significand and a 3 bits value indicate exponent.
+ */
+int cppc_get_auto_act_window(int cpu, u64 *auto_act_window)
+{
+ unsigned int exp;
+ u64 val, sig;
+ int ret;
+
+ if (auto_act_window == NULL)
+ return -EINVAL;
+
+ ret = cppc_get_reg_val(cpu, AUTO_ACT_WINDOW, &val);
+ if (ret)
+ return ret;
- if (!CPC_SUPPORTED(auto_sel_reg))
- pr_warn_once("Autonomous mode is not unsupported!\n");
+ sig = val & CPPC_AUTO_ACT_WINDOW_MAX_SIG;
+ exp = (val >> CPPC_AUTO_ACT_WINDOW_SIG_BIT_SIZE) & CPPC_AUTO_ACT_WINDOW_MAX_EXP;
+ *auto_act_window = sig * int_pow(10, exp);
- if (CPC_IN_PCC(auto_sel_reg)) {
- int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
- struct cppc_pcc_data *pcc_ss_data = NULL;
- int ret = 0;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cppc_get_auto_act_window);
- if (pcc_ss_id < 0)
- return -ENODEV;
+/**
+ * cppc_set_auto_act_window() - Write autonomous activity window register.
+ * @cpu: CPU on which to write register.
+ * @auto_act_window: usec value to write to the autonomous activity window register.
+ *
+ * According to ACPI 6.5, s8.4.6.1.6, the value to write to the autonomous
+ * activity window register consists of two parts: a 7 bits value indicate
+ * significand and a 3 bits value indicate exponent.
+ */
+int cppc_set_auto_act_window(int cpu, u64 auto_act_window)
+{
+ /* The max value to store is 1270000000 */
+ u64 max_val = CPPC_AUTO_ACT_WINDOW_MAX_SIG * int_pow(10, CPPC_AUTO_ACT_WINDOW_MAX_EXP);
+ int exp = 0;
+ u64 val;
- pcc_ss_data = pcc_data[pcc_ss_id];
+ if (auto_act_window > max_val)
+ return -EINVAL;
- down_write(&pcc_ss_data->pcc_lock);
+ /*
+ * The max significand is 127, when auto_act_window is larger than
+ * 129, discard the precision of the last digit and increase the
+ * exponent by 1.
+ */
+ while (auto_act_window > CPPC_AUTO_ACT_WINDOW_SIG_CARRY_THRESH) {
+ auto_act_window /= 10;
+ exp += 1;
+ }
- if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0) {
- cpc_read(cpunum, auto_sel_reg, &auto_sel);
- perf_caps->auto_sel = (bool)auto_sel;
- } else {
- ret = -EIO;
- }
+ /* For 128 and 129, cut it to 127. */
+ if (auto_act_window > CPPC_AUTO_ACT_WINDOW_MAX_SIG)
+ auto_act_window = CPPC_AUTO_ACT_WINDOW_MAX_SIG;
- up_write(&pcc_ss_data->pcc_lock);
+ val = (exp << CPPC_AUTO_ACT_WINDOW_SIG_BIT_SIZE) + auto_act_window;
+
+ return cppc_set_reg_val(cpu, AUTO_ACT_WINDOW, val);
+}
+EXPORT_SYMBOL_GPL(cppc_set_auto_act_window);
+
+/**
+ * cppc_get_auto_sel() - Read autonomous selection register.
+ * @cpu: CPU from which to read register.
+ * @enable: Return address.
+ */
+int cppc_get_auto_sel(int cpu, bool *enable)
+{
+ u64 auto_sel;
+ int ret;
+
+ if (enable == NULL)
+ return -EINVAL;
+ ret = cppc_get_reg_val(cpu, AUTO_SEL_ENABLE, &auto_sel);
+ if (ret)
return ret;
- }
+
+ *enable = (bool)auto_sel;
return 0;
}
-EXPORT_SYMBOL_GPL(cppc_get_auto_sel_caps);
+EXPORT_SYMBOL_GPL(cppc_get_auto_sel);
/**
* cppc_set_auto_sel - Write autonomous selection register.
@@ -1594,43 +1714,7 @@ EXPORT_SYMBOL_GPL(cppc_get_auto_sel_caps);
*/
int cppc_set_auto_sel(int cpu, bool enable)
{
- int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
- struct cpc_register_resource *auto_sel_reg;
- struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
- struct cppc_pcc_data *pcc_ss_data = NULL;
- int ret = -EINVAL;
-
- if (!cpc_desc) {
- pr_debug("No CPC descriptor for CPU:%d\n", cpu);
- return -ENODEV;
- }
-
- auto_sel_reg = &cpc_desc->cpc_regs[AUTO_SEL_ENABLE];
-
- if (CPC_IN_PCC(auto_sel_reg)) {
- if (pcc_ss_id < 0) {
- pr_debug("Invalid pcc_ss_id\n");
- return -ENODEV;
- }
-
- if (CPC_SUPPORTED(auto_sel_reg)) {
- ret = cpc_write(cpu, auto_sel_reg, enable);
- if (ret)
- return ret;
- }
-
- pcc_ss_data = pcc_data[pcc_ss_id];
-
- down_write(&pcc_ss_data->pcc_lock);
- /* after writing CPC, transfer the ownership of PCC to platform */
- ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE);
- up_write(&pcc_ss_data->pcc_lock);
- } else {
- ret = -ENOTSUPP;
- pr_debug("_CPC in PCC is not supported\n");
- }
-
- return ret;
+ return cppc_set_reg_val(cpu, AUTO_SEL_ENABLE, enable);
}
EXPORT_SYMBOL_GPL(cppc_set_auto_sel);
@@ -1644,38 +1728,7 @@ EXPORT_SYMBOL_GPL(cppc_set_auto_sel);
*/
int cppc_set_enable(int cpu, bool enable)
{
- int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
- struct cpc_register_resource *enable_reg;
- struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
- struct cppc_pcc_data *pcc_ss_data = NULL;
- int ret = -EINVAL;
-
- if (!cpc_desc) {
- pr_debug("No CPC descriptor for CPU:%d\n", cpu);
- return -EINVAL;
- }
-
- enable_reg = &cpc_desc->cpc_regs[ENABLE];
-
- if (CPC_IN_PCC(enable_reg)) {
-
- if (pcc_ss_id < 0)
- return -EIO;
-
- ret = cpc_write(cpu, enable_reg, enable);
- if (ret)
- return ret;
-
- pcc_ss_data = pcc_data[pcc_ss_id];
-
- down_write(&pcc_ss_data->pcc_lock);
- /* after writing CPC, transfer the ownership of PCC to platfrom */
- ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE);
- up_write(&pcc_ss_data->pcc_lock);
- return ret;
- }
-
- return cpc_write(cpu, enable_reg, enable);
+ return cppc_set_reg_val(cpu, ENABLE, enable);
}
EXPORT_SYMBOL_GPL(cppc_set_enable);
@@ -1823,7 +1876,7 @@ EXPORT_SYMBOL_GPL(cppc_set_perf);
* If desired_reg is in the SystemMemory or SystemIo ACPI address space,
* then assume there is no latency.
*/
-unsigned int cppc_get_transition_latency(int cpu_num)
+int cppc_get_transition_latency(int cpu_num)
{
/*
* Expected transition latency is based on the PCCT timing values
@@ -1836,31 +1889,29 @@ unsigned int cppc_get_transition_latency(int cpu_num)
* completion of a command before issuing the next command,
* in microseconds.
*/
- unsigned int latency_ns = 0;
struct cpc_desc *cpc_desc;
struct cpc_register_resource *desired_reg;
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu_num);
struct cppc_pcc_data *pcc_ss_data;
+ int latency_ns = 0;
cpc_desc = per_cpu(cpc_desc_ptr, cpu_num);
if (!cpc_desc)
- return CPUFREQ_ETERNAL;
+ return -ENODATA;
desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
if (CPC_IN_SYSTEM_MEMORY(desired_reg) || CPC_IN_SYSTEM_IO(desired_reg))
return 0;
- else if (!CPC_IN_PCC(desired_reg))
- return CPUFREQ_ETERNAL;
- if (pcc_ss_id < 0)
- return CPUFREQ_ETERNAL;
+ if (!CPC_IN_PCC(desired_reg) || pcc_ss_id < 0)
+ return -ENODATA;
pcc_ss_data = pcc_data[pcc_ss_id];
if (pcc_ss_data->pcc_mpar)
latency_ns = 60 * (1000 * 1000 * 1000 / pcc_ss_data->pcc_mpar);
- latency_ns = max(latency_ns, pcc_ss_data->pcc_nominal * 1000);
- latency_ns = max(latency_ns, pcc_ss_data->pcc_mrtt * 1000);
+ latency_ns = max_t(int, latency_ns, pcc_ss_data->pcc_nominal * 1000);
+ latency_ns = max_t(int, latency_ns, pcc_ss_data->pcc_mrtt * 1000);
return latency_ns;
}
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 3b4d048c4941..4e0583274b8f 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -1119,6 +1119,8 @@ int acpi_subsys_prepare(struct device *dev)
{
struct acpi_device *adev = ACPI_COMPANION(dev);
+ dev_pm_set_strict_midlayer(dev, true);
+
if (dev->driver && dev->driver->pm && dev->driver->pm->prepare) {
int ret = dev->driver->pm->prepare(dev);
@@ -1147,6 +1149,8 @@ void acpi_subsys_complete(struct device *dev)
*/
if (pm_runtime_suspended(dev) && pm_resume_via_firmware())
pm_request_resume(dev);
+
+ dev_pm_set_strict_midlayer(dev, false);
}
EXPORT_SYMBOL_GPL(acpi_subsys_complete);
@@ -1161,7 +1165,7 @@ EXPORT_SYMBOL_GPL(acpi_subsys_complete);
*/
int acpi_subsys_suspend(struct device *dev)
{
- if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) ||
+ if (!dev_pm_smart_suspend(dev) ||
acpi_dev_needs_resume(dev, ACPI_COMPANION(dev)))
pm_runtime_resume(dev);
@@ -1320,7 +1324,7 @@ EXPORT_SYMBOL_GPL(acpi_subsys_restore_early);
*/
int acpi_subsys_poweroff(struct device *dev)
{
- if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) ||
+ if (!dev_pm_smart_suspend(dev) ||
acpi_dev_needs_resume(dev, ACPI_COMPANION(dev)))
pm_runtime_resume(dev);
@@ -1362,6 +1366,8 @@ static int acpi_subsys_poweroff_noirq(struct device *dev)
}
#endif /* CONFIG_PM_SLEEP */
+static void acpi_dev_pm_detach(struct device *dev, bool power_off);
+
static struct dev_pm_domain acpi_general_pm_domain = {
.ops = {
.runtime_suspend = acpi_subsys_runtime_suspend,
@@ -1382,6 +1388,7 @@ static struct dev_pm_domain acpi_general_pm_domain = {
.restore_early = acpi_subsys_restore_early,
#endif
},
+ .detach = acpi_dev_pm_detach,
};
/**
@@ -1465,7 +1472,6 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
acpi_device_wakeup_disable(adev);
}
- dev->pm_domain->detach = acpi_dev_pm_detach;
return 1;
}
EXPORT_SYMBOL_GPL(acpi_dev_pm_attach);
diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c
index 3961fc47152c..cd199fbe4dc9 100644
--- a/drivers/acpi/device_sysfs.c
+++ b/drivers/acpi/device_sysfs.c
@@ -464,7 +464,7 @@ static ssize_t description_show(struct device *dev,
buf[result++] = '\n';
- kfree(str_obj);
+ ACPI_FREE(str_obj);
return result;
}
diff --git a/drivers/acpi/dptf/dptf_pch_fivr.c b/drivers/acpi/dptf/dptf_pch_fivr.c
index d202730fafd8..952216c67d58 100644
--- a/drivers/acpi/dptf/dptf_pch_fivr.c
+++ b/drivers/acpi/dptf/dptf_pch_fivr.c
@@ -152,13 +152,14 @@ static const struct acpi_device_id pch_fivr_device_ids[] = {
{"INTC1064", 0},
{"INTC106B", 0},
{"INTC10A3", 0},
+ {"INTC10D7", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, pch_fivr_device_ids);
static struct platform_driver pch_fivr_driver = {
.probe = pch_fivr_add,
- .remove_new = pch_fivr_remove,
+ .remove = pch_fivr_remove,
.driver = {
.name = "dptf_pch_fivr",
.acpi_match_table = pch_fivr_device_ids,
diff --git a/drivers/acpi/dptf/dptf_power.c b/drivers/acpi/dptf/dptf_power.c
index 8023b3e23315..776914f31b9e 100644
--- a/drivers/acpi/dptf/dptf_power.c
+++ b/drivers/acpi/dptf/dptf_power.c
@@ -236,13 +236,17 @@ static const struct acpi_device_id int3407_device_ids[] = {
{"INTC106D", 0},
{"INTC10A4", 0},
{"INTC10A5", 0},
+ {"INTC10D8", 0},
+ {"INTC10D9", 0},
+ {"INTC1100", 0},
+ {"INTC1101", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, int3407_device_ids);
static struct platform_driver dptf_power_driver = {
.probe = dptf_power_add,
- .remove_new = dptf_power_remove,
+ .remove = dptf_power_remove,
.driver = {
.name = "dptf_power",
.acpi_match_table = int3407_device_ids,
diff --git a/drivers/acpi/dptf/int340x_thermal.c b/drivers/acpi/dptf/int340x_thermal.c
index 014ada759954..a222df059a16 100644
--- a/drivers/acpi/dptf/int340x_thermal.c
+++ b/drivers/acpi/dptf/int340x_thermal.c
@@ -55,6 +55,19 @@ static const struct acpi_device_id int340x_thermal_device_ids[] = {
{"INTC10A3"},
{"INTC10A4"},
{"INTC10A5"},
+ {"INTC10D4"},
+ {"INTC10D5"},
+ {"INTC10D6"},
+ {"INTC10D7"},
+ {"INTC10D8"},
+ {"INTC10D9"},
+ {"INTC10FC"},
+ {"INTC10FD"},
+ {"INTC10FE"},
+ {"INTC10FF"},
+ {"INTC1100"},
+ {"INTC1101"},
+ {"INTC1102"},
{""},
};
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 25399f6dde7e..7855bbf752b1 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -23,8 +23,10 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/list.h>
+#include <linux/printk.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
+#include <linux/string.h>
#include <linux/suspend.h>
#include <linux/acpi.h>
#include <linux/dmi.h>
@@ -1677,8 +1679,8 @@ static int acpi_ec_add(struct acpi_device *device)
struct acpi_ec *ec;
int ret;
- strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME);
- strcpy(acpi_device_class(device), ACPI_EC_CLASS);
+ strscpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME);
+ strscpy(acpi_device_class(device), ACPI_EC_CLASS);
if (boot_ec && (boot_ec->handle == device->handle ||
!strcmp(acpi_device_hid(device), ACPI_ECDT_HID))) {
@@ -2031,6 +2033,25 @@ void __init acpi_ec_ecdt_probe(void)
goto out;
}
+ if (!strlen(ecdt_ptr->id)) {
+ /*
+ * The ECDT table on some MSI notebooks contains invalid data, together
+ * with an empty ID string ("").
+ *
+ * Section 5.2.15 of the ACPI specification requires the ID string to be
+ * a "fully qualified reference to the (...) embedded controller device",
+ * so this string always has to start with a backslash.
+ *
+ * However some ThinkBook machines have a ECDT table with a valid EC
+ * description but an invalid ID string ("_SB.PC00.LPCB.EC0").
+ *
+ * Because of this we only check if the ID string is empty in order to
+ * avoid the obvious cases.
+ */
+ pr_err(FW_BUG "Ignoring ECDT due to empty ID string\n");
+ goto out;
+ }
+
ec = acpi_ec_alloc();
if (!ec)
goto out;
@@ -2301,6 +2322,40 @@ static const struct dmi_system_id acpi_ec_no_wakeup[] = {
DMI_MATCH(DMI_PRODUCT_FAMILY, "103C_5336AN HP ZHAN 66 Pro"),
},
},
+ /*
+ * Lenovo Legion Go S; touchscreen blocks HW sleep when woken up from EC
+ * https://gitlab.freedesktop.org/drm/amd/-/issues/3929
+ */
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "83L3"),
+ }
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "83N6"),
+ }
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "83Q2"),
+ }
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "83Q3"),
+ }
+ },
+ {
+ // TUXEDO InfinityBook Pro AMD Gen9
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GXxHRXx"),
+ },
+ },
{ },
};
diff --git a/drivers/acpi/event.c b/drivers/acpi/event.c
index d199a19bb292..96a9aaaaf9f7 100644
--- a/drivers/acpi/event.c
+++ b/drivers/acpi/event.c
@@ -28,8 +28,8 @@ int acpi_notifier_call_chain(struct acpi_device *dev, u32 type, u32 data)
{
struct acpi_bus_event event;
- strcpy(event.device_class, dev->pnp.device_class);
- strcpy(event.bus_id, dev->pnp.bus_id);
+ strscpy(event.device_class, dev->pnp.device_class);
+ strscpy(event.bus_id, dev->pnp.bus_id);
event.type = type;
event.data = data;
return (blocking_notifier_call_chain(&acpi_chain_head, 0, (void *)&event)
diff --git a/drivers/acpi/evged.c b/drivers/acpi/evged.c
index 11778c93254b..5c35cbc7f6ff 100644
--- a/drivers/acpi/evged.c
+++ b/drivers/acpi/evged.c
@@ -185,7 +185,7 @@ static const struct acpi_device_id ged_acpi_ids[] = {
static struct platform_driver ged_driver = {
.probe = ged_probe,
- .remove_new = ged_remove,
+ .remove = ged_remove,
.shutdown = ged_shutdown,
.driver = {
.name = MODULE_NAME,
diff --git a/drivers/acpi/fan.h b/drivers/acpi/fan.h
index db25a3898af7..8a28a72a7c6a 100644
--- a/drivers/acpi/fan.h
+++ b/drivers/acpi/fan.h
@@ -19,6 +19,8 @@
{"INTC1063", }, /* Fan for Meteor Lake generation */ \
{"INTC106A", }, /* Fan for Lunar Lake generation */ \
{"INTC10A2", }, /* Fan for Raptor Lake generation */ \
+ {"INTC10D6", }, /* Fan for Panther Lake generation */ \
+ {"INTC10FE", }, /* Fan for Wildcat Lake generation */ \
{"PNP0C0B", } /* Generic ACPI fan */
#define ACPI_FPS_NAME_LEN 20
@@ -48,6 +50,7 @@ struct acpi_fan_fst {
struct acpi_fan {
bool acpi4;
+ bool has_fst;
struct acpi_fan_fif fif;
struct acpi_fan_fps *fps;
int fps_count;
diff --git a/drivers/acpi/fan_attr.c b/drivers/acpi/fan_attr.c
index f4f6e2381f1d..c1afb7b5ed3d 100644
--- a/drivers/acpi/fan_attr.c
+++ b/drivers/acpi/fan_attr.c
@@ -22,9 +22,9 @@ static ssize_t show_state(struct device *dev, struct device_attribute *attr, cha
int count;
if (fps->control == 0xFFFFFFFF || fps->control > 100)
- count = scnprintf(buf, PAGE_SIZE, "not-defined:");
+ count = sysfs_emit(buf, "not-defined:");
else
- count = scnprintf(buf, PAGE_SIZE, "%lld:", fps->control);
+ count = sysfs_emit(buf, "%lld:", fps->control);
if (fps->trip_point == 0xFFFFFFFF || fps->trip_point > 9)
count += sysfs_emit_at(buf, count, "not-defined:");
@@ -59,7 +59,7 @@ static ssize_t show_fan_speed(struct device *dev, struct device_attribute *attr,
if (status)
return status;
- return sprintf(buf, "%lld\n", fst.speed);
+ return sysfs_emit(buf, "%lld\n", fst.speed);
}
static ssize_t show_fine_grain_control(struct device *dev, struct device_attribute *attr, char *buf)
@@ -67,7 +67,7 @@ static ssize_t show_fine_grain_control(struct device *dev, struct device_attribu
struct acpi_device *acpi_dev = container_of(dev, struct acpi_device, dev);
struct acpi_fan *fan = acpi_driver_data(acpi_dev);
- return sprintf(buf, "%d\n", fan->fif.fine_grain_ctrl);
+ return sysfs_emit(buf, "%d\n", fan->fif.fine_grain_ctrl);
}
int acpi_fan_create_attributes(struct acpi_device *device)
@@ -75,15 +75,6 @@ int acpi_fan_create_attributes(struct acpi_device *device)
struct acpi_fan *fan = acpi_driver_data(device);
int i, status;
- sysfs_attr_init(&fan->fine_grain_control.attr);
- fan->fine_grain_control.show = show_fine_grain_control;
- fan->fine_grain_control.store = NULL;
- fan->fine_grain_control.attr.name = "fine_grain_control";
- fan->fine_grain_control.attr.mode = 0444;
- status = sysfs_create_file(&device->dev.kobj, &fan->fine_grain_control.attr);
- if (status)
- return status;
-
/* _FST is present if we are here */
sysfs_attr_init(&fan->fst_speed.attr);
fan->fst_speed.show = show_fan_speed;
@@ -92,7 +83,19 @@ int acpi_fan_create_attributes(struct acpi_device *device)
fan->fst_speed.attr.mode = 0444;
status = sysfs_create_file(&device->dev.kobj, &fan->fst_speed.attr);
if (status)
- goto rem_fine_grain_attr;
+ return status;
+
+ if (!fan->acpi4)
+ return 0;
+
+ sysfs_attr_init(&fan->fine_grain_control.attr);
+ fan->fine_grain_control.show = show_fine_grain_control;
+ fan->fine_grain_control.store = NULL;
+ fan->fine_grain_control.attr.name = "fine_grain_control";
+ fan->fine_grain_control.attr.mode = 0444;
+ status = sysfs_create_file(&device->dev.kobj, &fan->fine_grain_control.attr);
+ if (status)
+ goto rem_fst_attr;
for (i = 0; i < fan->fps_count; ++i) {
struct acpi_fan_fps *fps = &fan->fps[i];
@@ -109,18 +112,18 @@ int acpi_fan_create_attributes(struct acpi_device *device)
for (j = 0; j < i; ++j)
sysfs_remove_file(&device->dev.kobj, &fan->fps[j].dev_attr.attr);
- goto rem_fst_attr;
+ goto rem_fine_grain_attr;
}
}
return 0;
-rem_fst_attr:
- sysfs_remove_file(&device->dev.kobj, &fan->fst_speed.attr);
-
rem_fine_grain_attr:
sysfs_remove_file(&device->dev.kobj, &fan->fine_grain_control.attr);
+rem_fst_attr:
+ sysfs_remove_file(&device->dev.kobj, &fan->fst_speed.attr);
+
return status;
}
@@ -129,9 +132,13 @@ void acpi_fan_delete_attributes(struct acpi_device *device)
struct acpi_fan *fan = acpi_driver_data(device);
int i;
+ sysfs_remove_file(&device->dev.kobj, &fan->fst_speed.attr);
+
+ if (!fan->acpi4)
+ return;
+
for (i = 0; i < fan->fps_count; ++i)
sysfs_remove_file(&device->dev.kobj, &fan->fps[i].dev_attr.attr);
- sysfs_remove_file(&device->dev.kobj, &fan->fst_speed.attr);
sysfs_remove_file(&device->dev.kobj, &fan->fine_grain_control.attr);
}
diff --git a/drivers/acpi/fan_core.c b/drivers/acpi/fan_core.c
index 7cea4495f19b..04ff608f2ff0 100644
--- a/drivers/acpi/fan_core.c
+++ b/drivers/acpi/fan_core.c
@@ -102,7 +102,7 @@ match_fps:
break;
}
if (i == fan->fps_count) {
- dev_dbg(&device->dev, "Invalid control value returned\n");
+ dev_dbg(&device->dev, "No matching fps control value\n");
return -EINVAL;
}
@@ -203,14 +203,6 @@ static const struct thermal_cooling_device_ops fan_cooling_ops = {
* --------------------------------------------------------------------------
*/
-static bool acpi_fan_is_acpi4(struct acpi_device *device)
-{
- return acpi_has_method(device->handle, "_FIF") &&
- acpi_has_method(device->handle, "_FPS") &&
- acpi_has_method(device->handle, "_FSL") &&
- acpi_has_method(device->handle, "_FST");
-}
-
static int acpi_fan_get_fif(struct acpi_device *device)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -327,7 +319,14 @@ static int acpi_fan_probe(struct platform_device *pdev)
device->driver_data = fan;
platform_set_drvdata(pdev, fan);
- if (acpi_fan_is_acpi4(device)) {
+ if (acpi_has_method(device->handle, "_FST")) {
+ fan->has_fst = true;
+ fan->acpi4 = acpi_has_method(device->handle, "_FIF") &&
+ acpi_has_method(device->handle, "_FPS") &&
+ acpi_has_method(device->handle, "_FSL");
+ }
+
+ if (fan->acpi4) {
result = acpi_fan_get_fif(device);
if (result)
return result;
@@ -335,7 +334,9 @@ static int acpi_fan_probe(struct platform_device *pdev)
result = acpi_fan_get_fps(device);
if (result)
return result;
+ }
+ if (fan->has_fst) {
result = devm_acpi_fan_create_hwmon(device);
if (result)
return result;
@@ -343,9 +344,9 @@ static int acpi_fan_probe(struct platform_device *pdev)
result = acpi_fan_create_attributes(device);
if (result)
return result;
+ }
- fan->acpi4 = true;
- } else {
+ if (!fan->acpi4) {
result = acpi_device_update_power(device, NULL);
if (result) {
dev_err(&device->dev, "Failed to set initial power state\n");
@@ -371,21 +372,27 @@ static int acpi_fan_probe(struct platform_device *pdev)
result = sysfs_create_link(&pdev->dev.kobj,
&cdev->device.kobj,
"thermal_cooling");
- if (result)
+ if (result) {
dev_err(&pdev->dev, "Failed to create sysfs link 'thermal_cooling'\n");
+ goto err_unregister;
+ }
result = sysfs_create_link(&cdev->device.kobj,
&pdev->dev.kobj,
"device");
if (result) {
dev_err(&pdev->dev, "Failed to create sysfs link 'device'\n");
- goto err_end;
+ goto err_remove_link;
}
return 0;
+err_remove_link:
+ sysfs_remove_link(&pdev->dev.kobj, "thermal_cooling");
+err_unregister:
+ thermal_cooling_device_unregister(cdev);
err_end:
- if (fan->acpi4)
+ if (fan->has_fst)
acpi_fan_delete_attributes(device);
return result;
@@ -395,7 +402,7 @@ static void acpi_fan_remove(struct platform_device *pdev)
{
struct acpi_fan *fan = platform_get_drvdata(pdev);
- if (fan->acpi4) {
+ if (fan->has_fst) {
struct acpi_device *device = ACPI_COMPANION(&pdev->dev);
acpi_fan_delete_attributes(device);
@@ -448,7 +455,7 @@ static const struct dev_pm_ops acpi_fan_pm = {
static struct platform_driver acpi_fan_driver = {
.probe = acpi_fan_probe,
- .remove_new = acpi_fan_remove,
+ .remove = acpi_fan_remove,
.driver = {
.name = "acpi-fan",
.acpi_match_table = fan_device_ids,
diff --git a/drivers/acpi/fan_hwmon.c b/drivers/acpi/fan_hwmon.c
index bd0d31a398fa..e8d90605106e 100644
--- a/drivers/acpi/fan_hwmon.c
+++ b/drivers/acpi/fan_hwmon.c
@@ -43,6 +43,10 @@ static umode_t acpi_fan_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_
case hwmon_fan_input:
return 0444;
case hwmon_fan_target:
+ /* Only acpi4 fans support fan control. */
+ if (!fan->acpi4)
+ return 0;
+
/*
* When in fine grain control mode, not every fan control value
* has an associated fan performance state.
@@ -57,6 +61,10 @@ static umode_t acpi_fan_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_
case hwmon_power:
switch (attr) {
case hwmon_power_input:
+ /* Only acpi4 fans support fan control. */
+ if (!fan->acpi4)
+ return 0;
+
/*
* When in fine grain control mode, not every fan control value
* has an associated fan performance state.
diff --git a/drivers/acpi/hed.c b/drivers/acpi/hed.c
index 7652515a6be1..3499f86c411e 100644
--- a/drivers/acpi/hed.c
+++ b/drivers/acpi/hed.c
@@ -80,7 +80,12 @@ static struct acpi_driver acpi_hed_driver = {
.remove = acpi_hed_remove,
},
};
-module_acpi_driver(acpi_hed_driver);
+
+static int __init acpi_hed_driver_init(void)
+{
+ return acpi_bus_register_driver(&acpi_hed_driver);
+}
+subsys_initcall(acpi_hed_driver_init);
MODULE_AUTHOR("Huang Ying");
MODULE_DESCRIPTION("ACPI Hardware Error Device Driver");
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index ced7dff9a5db..63354972ab0b 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -140,6 +140,7 @@ int __acpi_device_uevent_modalias(const struct acpi_device *adev,
/* --------------------------------------------------------------------------
Power Resource
-------------------------------------------------------------------------- */
+void acpi_power_resources_init(void);
void acpi_power_resources_list_free(struct list_head *list);
int acpi_extract_power_resources(union acpi_object *package, unsigned int start,
struct list_head *list);
@@ -175,6 +176,12 @@ bool processor_physically_present(acpi_handle handle);
static inline void acpi_early_processor_control_setup(void) {}
#endif
+#ifdef CONFIG_ACPI_PROCESSOR_CSTATE
+void acpi_idle_rescan_dead_smt_siblings(void);
+#else
+static inline void acpi_idle_rescan_dead_smt_siblings(void) {}
+#endif
+
/* --------------------------------------------------------------------------
Embedded Controller
-------------------------------------------------------------------------- */
@@ -215,6 +222,8 @@ extern struct acpi_ec *first_ec;
/* External interfaces use first EC only, so remember */
typedef int (*acpi_ec_query_func) (void *data);
+#ifdef CONFIG_ACPI_EC
+
void acpi_ec_init(void);
void acpi_ec_ecdt_probe(void);
void acpi_ec_dsdt_probe(void);
@@ -231,6 +240,29 @@ void acpi_ec_flush_work(void);
bool acpi_ec_dispatch_gpe(void);
#endif
+#else
+
+static inline void acpi_ec_init(void) {}
+static inline void acpi_ec_ecdt_probe(void) {}
+static inline void acpi_ec_dsdt_probe(void) {}
+static inline void acpi_ec_block_transactions(void) {}
+static inline void acpi_ec_unblock_transactions(void) {}
+static inline int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
+ acpi_handle handle, acpi_ec_query_func func,
+ void *data)
+{
+ return -ENXIO;
+}
+static inline void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit) {}
+static inline void acpi_ec_register_opregions(struct acpi_device *adev) {}
+
+static inline void acpi_ec_flush_work(void) {}
+static inline bool acpi_ec_dispatch_gpe(void)
+{
+ return false;
+}
+
+#endif
/*--------------------------------------------------------------------------
Suspend/Resume
diff --git a/drivers/acpi/irq.c b/drivers/acpi/irq.c
index 1687483ff319..76a856c32c4d 100644
--- a/drivers/acpi/irq.c
+++ b/drivers/acpi/irq.c
@@ -12,7 +12,7 @@
enum acpi_irq_model_id acpi_irq_model;
-static struct fwnode_handle *(*acpi_get_gsi_domain_id)(u32 gsi);
+static acpi_gsi_domain_disp_fn acpi_get_gsi_domain_id;
static u32 (*acpi_gsi_to_irq_fallback)(u32 gsi);
/**
@@ -307,12 +307,24 @@ EXPORT_SYMBOL_GPL(acpi_irq_get);
* for a given GSI
*/
void __init acpi_set_irq_model(enum acpi_irq_model_id model,
- struct fwnode_handle *(*fn)(u32))
+ acpi_gsi_domain_disp_fn fn)
{
acpi_irq_model = model;
acpi_get_gsi_domain_id = fn;
}
+/*
+ * acpi_get_gsi_dispatcher() - Get the GSI dispatcher function
+ *
+ * Return the dispatcher function that computes the domain fwnode for
+ * a given GSI.
+ */
+acpi_gsi_domain_disp_fn acpi_get_gsi_dispatcher(void)
+{
+ return acpi_get_gsi_domain_id;
+}
+EXPORT_SYMBOL_GPL(acpi_get_gsi_dispatcher);
+
/**
* acpi_set_gsi_to_irq_fallback - Register a GSI transfer
* callback to fallback to arch specified implementation.
diff --git a/drivers/acpi/mipi-disco-img.c b/drivers/acpi/mipi-disco-img.c
index 92b658f92dc0..5b85989f96be 100644
--- a/drivers/acpi/mipi-disco-img.c
+++ b/drivers/acpi/mipi-disco-img.c
@@ -624,8 +624,7 @@ static void init_crs_csi2_swnodes(struct crs_csi2 *csi2)
if (!fwnode_property_present(adev_fwnode, "rotation")) {
struct acpi_pld_info *pld;
- status = acpi_get_physical_device_location(handle, &pld);
- if (ACPI_SUCCESS(status)) {
+ if (acpi_get_physical_device_location(handle, &pld)) {
swnodes->dev_props[NEXT_PROPERTY(prop_index, DEV_ROTATION)] =
PROPERTY_ENTRY_U32("rotation",
pld->rotation * 45U);
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index 5429ec9ef06f..3eb56b77cb6d 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -454,8 +454,13 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
if (cmd_rc)
*cmd_rc = -EINVAL;
- if (cmd == ND_CMD_CALL)
+ if (cmd == ND_CMD_CALL) {
+ if (!buf || buf_len < sizeof(*call_pkg))
+ return -EINVAL;
+
call_pkg = buf;
+ }
+
func = cmd_to_func(nfit_mem, cmd, call_pkg, &family);
if (func < 0)
return func;
@@ -480,7 +485,7 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
cmd_mask = nd_desc->cmd_mask;
if (cmd == ND_CMD_CALL && call_pkg->nd_family) {
family = call_pkg->nd_family;
- if (family > NVDIMM_BUS_FAMILY_MAX ||
+ if (call_pkg->nd_family > NVDIMM_BUS_FAMILY_MAX ||
!test_bit(family, &nd_desc->bus_family_mask))
return -EINVAL;
family = array_index_nospec(family,
@@ -2632,7 +2637,7 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
if (ndr_desc->target_node == NUMA_NO_NODE) {
ndr_desc->target_node = phys_to_target_node(spa->address);
dev_info(acpi_desc->dev, "changing target node from %d to %d for nfit region [%pa-%pa]",
- NUMA_NO_NODE, ndr_desc->numa_node, &res.start, &res.end);
+ NUMA_NO_NODE, ndr_desc->target_node, &res.start, &res.end);
}
/*
diff --git a/drivers/acpi/nfit/intel.c b/drivers/acpi/nfit/intel.c
index 3902759abcba..bce6f6a18426 100644
--- a/drivers/acpi/nfit/intel.c
+++ b/drivers/acpi/nfit/intel.c
@@ -55,10 +55,9 @@ static unsigned long intel_security_flags(struct nvdimm *nvdimm,
{
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
unsigned long security_flags = 0;
- struct {
- struct nd_cmd_pkg pkg;
+ TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload,
struct nd_intel_get_security_state cmd;
- } nd_cmd = {
+ ) nd_cmd = {
.pkg = {
.nd_command = NVDIMM_INTEL_GET_SECURITY_STATE,
.nd_family = NVDIMM_FAMILY_INTEL,
@@ -120,10 +119,9 @@ static unsigned long intel_security_flags(struct nvdimm *nvdimm,
static int intel_security_freeze(struct nvdimm *nvdimm)
{
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
- struct {
- struct nd_cmd_pkg pkg;
+ TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload,
struct nd_intel_freeze_lock cmd;
- } nd_cmd = {
+ ) nd_cmd = {
.pkg = {
.nd_command = NVDIMM_INTEL_FREEZE_LOCK,
.nd_family = NVDIMM_FAMILY_INTEL,
@@ -153,10 +151,9 @@ static int intel_security_change_key(struct nvdimm *nvdimm,
unsigned int cmd = ptype == NVDIMM_MASTER ?
NVDIMM_INTEL_SET_MASTER_PASSPHRASE :
NVDIMM_INTEL_SET_PASSPHRASE;
- struct {
- struct nd_cmd_pkg pkg;
+ TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload,
struct nd_intel_set_passphrase cmd;
- } nd_cmd = {
+ ) nd_cmd = {
.pkg = {
.nd_family = NVDIMM_FAMILY_INTEL,
.nd_size_in = ND_INTEL_PASSPHRASE_SIZE * 2,
@@ -195,10 +192,9 @@ static int __maybe_unused intel_security_unlock(struct nvdimm *nvdimm,
const struct nvdimm_key_data *key_data)
{
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
- struct {
- struct nd_cmd_pkg pkg;
+ TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload,
struct nd_intel_unlock_unit cmd;
- } nd_cmd = {
+ ) nd_cmd = {
.pkg = {
.nd_command = NVDIMM_INTEL_UNLOCK_UNIT,
.nd_family = NVDIMM_FAMILY_INTEL,
@@ -234,10 +230,9 @@ static int intel_security_disable(struct nvdimm *nvdimm,
{
int rc;
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
- struct {
- struct nd_cmd_pkg pkg;
+ TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload,
struct nd_intel_disable_passphrase cmd;
- } nd_cmd = {
+ ) nd_cmd = {
.pkg = {
.nd_command = NVDIMM_INTEL_DISABLE_PASSPHRASE,
.nd_family = NVDIMM_FAMILY_INTEL,
@@ -277,10 +272,9 @@ static int __maybe_unused intel_security_erase(struct nvdimm *nvdimm,
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
unsigned int cmd = ptype == NVDIMM_MASTER ?
NVDIMM_INTEL_MASTER_SECURE_ERASE : NVDIMM_INTEL_SECURE_ERASE;
- struct {
- struct nd_cmd_pkg pkg;
+ TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload,
struct nd_intel_secure_erase cmd;
- } nd_cmd = {
+ ) nd_cmd = {
.pkg = {
.nd_family = NVDIMM_FAMILY_INTEL,
.nd_size_in = ND_INTEL_PASSPHRASE_SIZE,
@@ -318,10 +312,9 @@ static int __maybe_unused intel_security_query_overwrite(struct nvdimm *nvdimm)
{
int rc;
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
- struct {
- struct nd_cmd_pkg pkg;
+ TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload,
struct nd_intel_query_overwrite cmd;
- } nd_cmd = {
+ ) nd_cmd = {
.pkg = {
.nd_command = NVDIMM_INTEL_QUERY_OVERWRITE,
.nd_family = NVDIMM_FAMILY_INTEL,
@@ -354,10 +347,9 @@ static int __maybe_unused intel_security_overwrite(struct nvdimm *nvdimm,
{
int rc;
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
- struct {
- struct nd_cmd_pkg pkg;
+ TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload,
struct nd_intel_overwrite cmd;
- } nd_cmd = {
+ ) nd_cmd = {
.pkg = {
.nd_command = NVDIMM_INTEL_OVERWRITE,
.nd_family = NVDIMM_FAMILY_INTEL,
@@ -407,10 +399,9 @@ const struct nvdimm_security_ops *intel_security_ops = &__intel_security_ops;
static int intel_bus_fwa_businfo(struct nvdimm_bus_descriptor *nd_desc,
struct nd_intel_bus_fw_activate_businfo *info)
{
- struct {
- struct nd_cmd_pkg pkg;
+ TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload,
struct nd_intel_bus_fw_activate_businfo cmd;
- } nd_cmd = {
+ ) nd_cmd = {
.pkg = {
.nd_command = NVDIMM_BUS_INTEL_FW_ACTIVATE_BUSINFO,
.nd_family = NVDIMM_BUS_FAMILY_INTEL,
@@ -518,33 +509,31 @@ static enum nvdimm_fwa_capability intel_bus_fwa_capability(
static int intel_bus_fwa_activate(struct nvdimm_bus_descriptor *nd_desc)
{
struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
- struct {
- struct nd_cmd_pkg pkg;
+ TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload,
struct nd_intel_bus_fw_activate cmd;
- } nd_cmd = {
- .pkg = {
- .nd_command = NVDIMM_BUS_INTEL_FW_ACTIVATE,
- .nd_family = NVDIMM_BUS_FAMILY_INTEL,
- .nd_size_in = sizeof(nd_cmd.cmd.iodev_state),
- .nd_size_out =
- sizeof(struct nd_intel_bus_fw_activate),
- .nd_fw_size =
- sizeof(struct nd_intel_bus_fw_activate),
- },
+ ) nd_cmd;
+ int rc;
+
+ nd_cmd.pkg = (struct nd_cmd_pkg) {
+ .nd_command = NVDIMM_BUS_INTEL_FW_ACTIVATE,
+ .nd_family = NVDIMM_BUS_FAMILY_INTEL,
+ .nd_size_in = sizeof(nd_cmd.cmd.iodev_state),
+ .nd_size_out =
+ sizeof(struct nd_intel_bus_fw_activate),
+ .nd_fw_size =
+ sizeof(struct nd_intel_bus_fw_activate),
+ };
+ nd_cmd.cmd = (struct nd_intel_bus_fw_activate) {
/*
* Even though activate is run from a suspended context,
* for safety, still ask platform firmware to force
* quiesce devices by default. Let a module
* parameter override that policy.
*/
- .cmd = {
- .iodev_state = acpi_desc->fwa_noidle
- ? ND_INTEL_BUS_FWA_IODEV_OS_IDLE
- : ND_INTEL_BUS_FWA_IODEV_FORCE_IDLE,
- },
+ .iodev_state = acpi_desc->fwa_noidle
+ ? ND_INTEL_BUS_FWA_IODEV_OS_IDLE
+ : ND_INTEL_BUS_FWA_IODEV_FORCE_IDLE,
};
- int rc;
-
switch (intel_bus_fwa_state(nd_desc)) {
case NVDIMM_FWA_ARMED:
case NVDIMM_FWA_ARM_OVERFLOW:
@@ -582,10 +571,9 @@ const struct nvdimm_bus_fw_ops *intel_bus_fw_ops = &__intel_bus_fw_ops;
static int intel_fwa_dimminfo(struct nvdimm *nvdimm,
struct nd_intel_fw_activate_dimminfo *info)
{
- struct {
- struct nd_cmd_pkg pkg;
+ TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload,
struct nd_intel_fw_activate_dimminfo cmd;
- } nd_cmd = {
+ ) nd_cmd = {
.pkg = {
.nd_command = NVDIMM_INTEL_FW_ACTIVATE_DIMMINFO,
.nd_family = NVDIMM_FAMILY_INTEL,
@@ -688,27 +676,24 @@ static int intel_fwa_arm(struct nvdimm *nvdimm, enum nvdimm_fwa_trigger arm)
{
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc;
- struct {
- struct nd_cmd_pkg pkg;
+ TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload,
struct nd_intel_fw_activate_arm cmd;
- } nd_cmd = {
- .pkg = {
- .nd_command = NVDIMM_INTEL_FW_ACTIVATE_ARM,
- .nd_family = NVDIMM_FAMILY_INTEL,
- .nd_size_in = sizeof(nd_cmd.cmd.activate_arm),
- .nd_size_out =
- sizeof(struct nd_intel_fw_activate_arm),
- .nd_fw_size =
- sizeof(struct nd_intel_fw_activate_arm),
- },
- .cmd = {
- .activate_arm = arm == NVDIMM_FWA_ARM
- ? ND_INTEL_DIMM_FWA_ARM
- : ND_INTEL_DIMM_FWA_DISARM,
- },
- };
+ ) nd_cmd;
int rc;
+ nd_cmd.pkg = (struct nd_cmd_pkg) {
+ .nd_command = NVDIMM_INTEL_FW_ACTIVATE_ARM,
+ .nd_family = NVDIMM_FAMILY_INTEL,
+ .nd_size_in = sizeof(nd_cmd.cmd.activate_arm),
+ .nd_size_out = sizeof(struct nd_intel_fw_activate_arm),
+ .nd_fw_size = sizeof(struct nd_intel_fw_activate_arm),
+ };
+ nd_cmd.cmd = (struct nd_intel_fw_activate_arm) {
+ .activate_arm = arm == NVDIMM_FWA_ARM ?
+ ND_INTEL_DIMM_FWA_ARM :
+ ND_INTEL_DIMM_FWA_DISARM,
+ };
+
switch (intel_fwa_state(nvdimm)) {
case NVDIMM_FWA_INVALID:
return -ENXIO;
diff --git a/drivers/acpi/numa/hmat.c b/drivers/acpi/numa/hmat.c
index 1a902a02390f..5a36d57289b4 100644
--- a/drivers/acpi/numa/hmat.c
+++ b/drivers/acpi/numa/hmat.c
@@ -74,7 +74,6 @@ struct memory_target {
struct node_cache_attrs cache_attrs;
u8 gen_port_device_handle[ACPI_SRAT_DEVICE_HANDLE_SIZE];
bool registered;
- bool ext_updated; /* externally updated */
};
struct memory_initiator {
@@ -108,6 +107,45 @@ static struct memory_target *find_mem_target(unsigned int mem_pxm)
return NULL;
}
+/**
+ * hmat_get_extended_linear_cache_size - Retrieve the extended linear cache size
+ * @backing_res: resource from the backing media
+ * @nid: node id for the memory region
+ * @cache_size: (Output) size of extended linear cache.
+ *
+ * Return: 0 on success. Errno on failure.
+ *
+ */
+int hmat_get_extended_linear_cache_size(struct resource *backing_res, int nid,
+ resource_size_t *cache_size)
+{
+ unsigned int pxm = node_to_pxm(nid);
+ struct memory_target *target;
+ struct target_cache *tcache;
+ struct resource *res;
+
+ target = find_mem_target(pxm);
+ if (!target)
+ return -ENOENT;
+
+ list_for_each_entry(tcache, &target->caches, node) {
+ if (tcache->cache_attrs.address_mode !=
+ NODE_CACHE_ADDR_MODE_EXTENDED_LINEAR)
+ continue;
+
+ res = &target->memregions;
+ if (!resource_contains(res, backing_res))
+ continue;
+
+ *cache_size = tcache->cache_attrs.size;
+ return 0;
+ }
+
+ *cache_size = 0;
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(hmat_get_extended_linear_cache_size, "CXL");
+
static struct memory_target *acpi_find_genport_target(u32 uid)
{
struct memory_target *target;
@@ -151,7 +189,7 @@ int acpi_get_genport_coordinates(u32 uid,
return 0;
}
-EXPORT_SYMBOL_NS_GPL(acpi_get_genport_coordinates, CXL);
+EXPORT_SYMBOL_NS_GPL(acpi_get_genport_coordinates, "CXL");
static __init void alloc_memory_initiator(unsigned int cpu_pxm)
{
@@ -329,35 +367,6 @@ static void hmat_update_target_access(struct memory_target *target,
}
}
-int hmat_update_target_coordinates(int nid, struct access_coordinate *coord,
- enum access_coordinate_class access)
-{
- struct memory_target *target;
- int pxm;
-
- if (nid == NUMA_NO_NODE)
- return -EINVAL;
-
- pxm = node_to_pxm(nid);
- guard(mutex)(&target_lock);
- target = find_mem_target(pxm);
- if (!target)
- return -ENODEV;
-
- hmat_update_target_access(target, ACPI_HMAT_READ_LATENCY,
- coord->read_latency, access);
- hmat_update_target_access(target, ACPI_HMAT_WRITE_LATENCY,
- coord->write_latency, access);
- hmat_update_target_access(target, ACPI_HMAT_READ_BANDWIDTH,
- coord->read_bandwidth, access);
- hmat_update_target_access(target, ACPI_HMAT_WRITE_BANDWIDTH,
- coord->write_bandwidth, access);
- target->ext_updated = true;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(hmat_update_target_coordinates);
-
static __init void hmat_add_locality(struct acpi_hmat_locality *hmat_loc)
{
struct memory_locality *loc;
@@ -442,9 +451,9 @@ static __init int hmat_parse_locality(union acpi_subtable_headers *header,
return -EINVAL;
}
- pr_info("Locality: Flags:%02x Type:%s Initiator Domains:%u Target Domains:%u Base:%lld\n",
- hmat_loc->flags, hmat_data_type(type), ipds, tpds,
- hmat_loc->entry_base_unit);
+ pr_debug("Locality: Flags:%02x Type:%s Initiator Domains:%u Target Domains:%u Base:%lld\n",
+ hmat_loc->flags, hmat_data_type(type), ipds, tpds,
+ hmat_loc->entry_base_unit);
inits = (u32 *)(hmat_loc + 1);
targs = inits + ipds;
@@ -455,9 +464,9 @@ static __init int hmat_parse_locality(union acpi_subtable_headers *header,
value = hmat_normalize(entries[init * tpds + targ],
hmat_loc->entry_base_unit,
type);
- pr_info(" Initiator-Target[%u-%u]:%u%s\n",
- inits[init], targs[targ], value,
- hmat_data_type_suffix(type));
+ pr_debug(" Initiator-Target[%u-%u]:%u%s\n",
+ inits[init], targs[targ], value,
+ hmat_data_type_suffix(type));
hmat_update_target(targs[targ], inits[init],
mem_hier, type, value);
@@ -485,9 +494,9 @@ static __init int hmat_parse_cache(union acpi_subtable_headers *header,
}
attrs = cache->cache_attributes;
- pr_info("Cache: Domain:%u Size:%llu Attrs:%08x SMBIOS Handles:%d\n",
- cache->memory_PD, cache->cache_size, attrs,
- cache->number_of_SMBIOShandles);
+ pr_debug("Cache: Domain:%u Size:%llu Attrs:%08x SMBIOS Handles:%d\n",
+ cache->memory_PD, cache->cache_size, attrs,
+ cache->number_of_SMBIOShandles);
target = find_mem_target(cache->memory_PD);
if (!target)
@@ -506,6 +515,11 @@ static __init int hmat_parse_cache(union acpi_subtable_headers *header,
switch ((attrs & ACPI_HMAT_CACHE_ASSOCIATIVITY) >> 8) {
case ACPI_HMAT_CA_DIRECT_MAPPED:
tcache->cache_attrs.indexing = NODE_CACHE_DIRECT_MAP;
+ /* Extended Linear mode is only valid if cache is direct mapped */
+ if (cache->address_mode == ACPI_HMAT_CACHE_MODE_EXTENDED_LINEAR) {
+ tcache->cache_attrs.address_mode =
+ NODE_CACHE_ADDR_MODE_EXTENDED_LINEAR;
+ }
break;
case ACPI_HMAT_CA_COMPLEX_CACHE_INDEXING:
tcache->cache_attrs.indexing = NODE_CACHE_INDEXED;
@@ -546,9 +560,9 @@ static int __init hmat_parse_proximity_domain(union acpi_subtable_headers *heade
}
if (hmat_revision == 1)
- pr_info("Memory (%#llx length %#llx) Flags:%04x Processor Domain:%u Memory Domain:%u\n",
- p->reserved3, p->reserved4, p->flags, p->processor_PD,
- p->memory_PD);
+ pr_debug("Memory (%#llx length %#llx) Flags:%04x Processor Domain:%u Memory Domain:%u\n",
+ p->reserved3, p->reserved4, p->flags, p->processor_PD,
+ p->memory_PD);
else
pr_info("Memory Flags:%04x Processor Domain:%u Memory Domain:%u\n",
p->flags, p->processor_PD, p->memory_PD);
@@ -729,10 +743,6 @@ static void hmat_update_target_attrs(struct memory_target *target,
u32 best = 0;
int i;
- /* Don't update if an external agent has changed the data. */
- if (target->ext_updated)
- return;
-
/* Don't update for generic port if there's no device handle */
if ((access == NODE_ACCESS_CLASS_GENPORT_SINK_LOCAL ||
access == NODE_ACCESS_CLASS_GENPORT_SINK_CPU) &&
@@ -918,10 +928,10 @@ static int hmat_callback(struct notifier_block *self,
unsigned long action, void *arg)
{
struct memory_target *target;
- struct memory_notify *mnb = arg;
- int pxm, nid = mnb->status_change_nid;
+ struct node_notify *nn = arg;
+ int pxm, nid = nn->nid;
- if (nid == NUMA_NO_NODE || action != MEM_ONLINE)
+ if (action != NODE_ADDED_FIRST_MEMORY)
return NOTIFY_OK;
pxm = node_to_pxm(nid);
@@ -1074,7 +1084,7 @@ static __init int hmat_init(void)
hmat_register_targets();
/* Keep the table and structures if the notifier may use them */
- if (hotplug_memory_notifier(hmat_callback, HMAT_CALLBACK_PRI))
+ if (hotplug_node_notifier(hmat_callback, HMAT_CALLBACK_PRI))
goto out_put;
if (!hmat_set_default_dram_perf())
diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c
index bec0dcd1f9c3..53816dfab645 100644
--- a/drivers/acpi/numa/srat.c
+++ b/drivers/acpi/numa/srat.c
@@ -14,10 +14,12 @@
#include <linux/errno.h>
#include <linux/acpi.h>
#include <linux/memblock.h>
+#include <linux/memory.h>
#include <linux/numa.h>
#include <linux/nodemask.h>
#include <linux/topology.h>
#include <linux/numa_memblks.h>
+#include <linux/string_choices.h>
static nodemask_t nodes_found_map = NODE_MASK_NONE;
@@ -51,6 +53,7 @@ int node_to_pxm(int node)
return PXM_INVAL;
return node_to_pxm_map[node];
}
+EXPORT_SYMBOL_GPL(node_to_pxm);
static void __acpi_map_pxm_to_node(int pxm, int node)
{
@@ -81,6 +84,101 @@ int acpi_map_pxm_to_node(int pxm)
}
EXPORT_SYMBOL(acpi_map_pxm_to_node);
+#ifdef CONFIG_NUMA_EMU
+/*
+ * Take max_nid - 1 fake-numa nodes into account in both
+ * pxm_to_node_map()/node_to_pxm_map[] tables.
+ */
+int __init fix_pxm_node_maps(int max_nid)
+{
+ static int pxm_to_node_map_copy[MAX_PXM_DOMAINS] __initdata
+ = { [0 ... MAX_PXM_DOMAINS - 1] = NUMA_NO_NODE };
+ static int node_to_pxm_map_copy[MAX_NUMNODES] __initdata
+ = { [0 ... MAX_NUMNODES - 1] = PXM_INVAL };
+ int i, j, index = -1, count = 0;
+ nodemask_t nodes_to_enable;
+
+ if (numa_off)
+ return -1;
+
+ /* no or incomplete node/PXM mapping set, nothing to do */
+ if (srat_disabled())
+ return 0;
+
+ /* find fake nodes PXM mapping */
+ for (i = 0; i < MAX_NUMNODES; i++) {
+ if (node_to_pxm_map[i] != PXM_INVAL) {
+ for (j = 0; j <= max_nid; j++) {
+ if ((emu_nid_to_phys[j] == i) &&
+ WARN(node_to_pxm_map_copy[j] != PXM_INVAL,
+ "Node %d is already binded to PXM %d\n",
+ j, node_to_pxm_map_copy[j]))
+ return -1;
+ if (emu_nid_to_phys[j] == i) {
+ node_to_pxm_map_copy[j] =
+ node_to_pxm_map[i];
+ if (j > index)
+ index = j;
+ count++;
+ }
+ }
+ }
+ }
+ if (index == -1) {
+ pr_debug("No node/PXM mapping has been set\n");
+ /* nothing more to be done */
+ return 0;
+ }
+ if (WARN(index != max_nid, "%d max nid when expected %d\n",
+ index, max_nid))
+ return -1;
+
+ nodes_clear(nodes_to_enable);
+
+ /* map phys nodes not used for fake nodes */
+ for (i = 0; i < MAX_NUMNODES; i++) {
+ if (node_to_pxm_map[i] != PXM_INVAL) {
+ for (j = 0; j <= max_nid; j++)
+ if (emu_nid_to_phys[j] == i)
+ break;
+ /* fake nodes PXM mapping has been done */
+ if (j <= max_nid)
+ continue;
+ /* find first hole */
+ for (j = 0;
+ j < MAX_NUMNODES &&
+ node_to_pxm_map_copy[j] != PXM_INVAL;
+ j++)
+ ;
+ if (WARN(j == MAX_NUMNODES,
+ "Number of nodes exceeds MAX_NUMNODES\n"))
+ return -1;
+ node_to_pxm_map_copy[j] = node_to_pxm_map[i];
+ node_set(j, nodes_to_enable);
+ count++;
+ }
+ }
+
+ /* creating reverse mapping in pxm_to_node_map[] */
+ for (i = 0; i < MAX_NUMNODES; i++)
+ if (node_to_pxm_map_copy[i] != PXM_INVAL &&
+ pxm_to_node_map_copy[node_to_pxm_map_copy[i]] == NUMA_NO_NODE)
+ pxm_to_node_map_copy[node_to_pxm_map_copy[i]] = i;
+
+ /* overwrite with new mapping */
+ for (i = 0; i < MAX_NUMNODES; i++) {
+ node_to_pxm_map[i] = node_to_pxm_map_copy[i];
+ pxm_to_node_map[i] = pxm_to_node_map_copy[i];
+ }
+
+ /* enable other nodes found in PXM for hotplug */
+ nodes_or(numa_nodes_parsed, nodes_to_enable, numa_nodes_parsed);
+
+ pr_debug("found %d total number of nodes\n", count);
+ return 0;
+}
+#endif
+
static void __init
acpi_table_print_srat_entry(struct acpi_subtable_header *header)
{
@@ -92,8 +190,7 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header)
pr_debug("SRAT Processor (id[0x%02x] eid[0x%02x]) in proximity domain %d %s\n",
p->apic_id, p->local_sapic_eid,
p->proximity_domain_lo,
- (p->flags & ACPI_SRAT_CPU_ENABLED) ?
- "enabled" : "disabled");
+ str_enabled_disabled(p->flags & ACPI_SRAT_CPU_ENABLED));
}
break;
@@ -105,8 +202,7 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header)
(unsigned long long)p->base_address,
(unsigned long long)p->length,
p->proximity_domain,
- (p->flags & ACPI_SRAT_MEM_ENABLED) ?
- "enabled" : "disabled",
+ str_enabled_disabled(p->flags & ACPI_SRAT_MEM_ENABLED),
(p->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) ?
" hot-pluggable" : "",
(p->flags & ACPI_SRAT_MEM_NON_VOLATILE) ?
@@ -121,8 +217,7 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header)
pr_debug("SRAT Processor (x2apicid[0x%08x]) in proximity domain %d %s\n",
p->apic_id,
p->proximity_domain,
- (p->flags & ACPI_SRAT_CPU_ENABLED) ?
- "enabled" : "disabled");
+ str_enabled_disabled(p->flags & ACPI_SRAT_CPU_ENABLED));
}
break;
@@ -133,8 +228,7 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header)
pr_debug("SRAT Processor (acpi id[0x%04x]) in proximity domain %d %s\n",
p->acpi_processor_uid,
p->proximity_domain,
- (p->flags & ACPI_SRAT_GICC_ENABLED) ?
- "enabled" : "disabled");
+ str_enabled_disabled(p->flags & ACPI_SRAT_GICC_ENABLED));
}
break;
@@ -152,8 +246,7 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header)
*(u16 *)(&p->device_handle[0]),
*(u16 *)(&p->device_handle[2]),
p->proximity_domain,
- (p->flags & ACPI_SRAT_GENERIC_AFFINITY_ENABLED) ?
- "enabled" : "disabled");
+ str_enabled_disabled(p->flags & ACPI_SRAT_GENERIC_AFFINITY_ENABLED));
} else {
/*
* In this case we can rely on the device having a
@@ -163,8 +256,7 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header)
(char *)(&p->device_handle[0]),
(char *)(&p->device_handle[8]),
p->proximity_domain,
- (p->flags & ACPI_SRAT_GENERIC_AFFINITY_ENABLED) ?
- "enabled" : "disabled");
+ str_enabled_disabled(p->flags & ACPI_SRAT_GENERIC_AFFINITY_ENABLED));
}
}
break;
@@ -176,8 +268,7 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header)
pr_debug("SRAT Processor (acpi id[0x%04x]) in proximity domain %d %s\n",
p->acpi_processor_uid,
p->proximity_domain,
- (p->flags & ACPI_SRAT_RINTC_ENABLED) ?
- "enabled" : "disabled");
+ str_enabled_disabled(p->flags & ACPI_SRAT_RINTC_ENABLED));
}
break;
@@ -339,13 +430,23 @@ static int __init acpi_parse_cfmws(union acpi_subtable_headers *header,
{
struct acpi_cedt_cfmws *cfmws;
int *fake_pxm = arg;
- u64 start, end;
+ u64 start, end, align;
int node;
+ int err;
cfmws = (struct acpi_cedt_cfmws *)header;
start = cfmws->base_hpa;
end = cfmws->base_hpa + cfmws->window_size;
+ /* Align memblock size to CFMW regions if possible */
+ align = 1UL << __ffs(start | end);
+ if (align >= SZ_256M) {
+ err = memory_block_advise_max_size(align);
+ if (err)
+ pr_warn("CFMWS: memblock size advise failed (%d)\n", err);
+ } else
+ pr_err("CFMWS: [BIOS BUG] base/size alignment violates spec\n");
+
/*
* The SRAT may have already described NUMA details for all,
* or a portion of, this CFMWS HPA range. Extend the memblks
@@ -363,7 +464,7 @@ static int __init acpi_parse_cfmws(union acpi_subtable_headers *header,
return -EINVAL;
}
- if (numa_add_memblk(node, start, end) < 0) {
+ if (numa_add_reserved_memblk(node, start, end) < 0) {
/* CXL driver must handle the NUMA_NO_NODE case */
pr_warn("ACPI NUMA: Failed to add memblk for CFMWS node %d [mem %#llx-%#llx]\n",
node, start, end);
diff --git a/drivers/acpi/osi.c b/drivers/acpi/osi.c
index df9328c850bd..f2c943b934be 100644
--- a/drivers/acpi/osi.c
+++ b/drivers/acpi/osi.c
@@ -42,7 +42,6 @@ static struct acpi_osi_entry
osi_setup_entries[OSI_STRING_ENTRIES_MAX] __initdata = {
{"Module Device", true},
{"Processor Device", true},
- {"3.0 _SCP Extensions", true},
{"Processor Aggregator Device", true},
};
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 70af3fbbebe5..5ff343096ece 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -607,7 +607,27 @@ acpi_status acpi_os_remove_interrupt_handler(u32 gsi, acpi_osd_handler handler)
void acpi_os_sleep(u64 ms)
{
- msleep(ms);
+ u64 usec = ms * USEC_PER_MSEC, delta_us = 50;
+
+ /*
+ * Use a hrtimer because the timer wheel timers are optimized for
+ * cancelation before they expire and this timer is not going to be
+ * canceled.
+ *
+ * Set the delta between the requested sleep time and the effective
+ * deadline to at least 50 us in case there is an opportunity for timer
+ * coalescing.
+ *
+ * Moreover, longer sleeps can be assumed to need somewhat less timer
+ * precision, so sacrifice some of it for making the timer a more likely
+ * candidate for coalescing by setting the delta to 1% of the sleep time
+ * if it is above 5 ms (this value is chosen so that the delta is a
+ * continuous function of the sleep time).
+ */
+ if (ms > 5)
+ delta_us = (USEC_PER_MSEC / 100) * ms;
+
+ usleep_range(usec, usec + delta_us);
}
void acpi_os_stall(u32 us)
@@ -642,6 +662,15 @@ acpi_status acpi_os_read_port(acpi_io_address port, u32 *value, u32 width)
{
u32 dummy;
+ if (!IS_ENABLED(CONFIG_HAS_IOPORT)) {
+ /*
+ * set all-1 result as if reading from non-existing
+ * I/O port
+ */
+ *value = GENMASK(width, 0);
+ return AE_NOT_IMPLEMENTED;
+ }
+
if (value)
*value = 0;
else
@@ -665,6 +694,9 @@ EXPORT_SYMBOL(acpi_os_read_port);
acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
{
+ if (!IS_ENABLED(CONFIG_HAS_IOPORT))
+ return AE_NOT_IMPLEMENTED;
+
if (width <= 8) {
outb(value, port);
} else if (width <= 16) {
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index 630fe0a34bc6..ad81aa03fe2f 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -22,6 +22,7 @@
#include <linux/acpi.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
+#include <linux/string_choices.h>
struct acpi_prt_entry {
struct acpi_pci_id id;
@@ -468,7 +469,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
dev_dbg(&dev->dev, "PCI INT %c%s -> GSI %u (%s, %s) -> IRQ %d\n",
pin_name(pin), link_desc, gsi,
(triggering == ACPI_LEVEL_SENSITIVE) ? "level" : "edge",
- (polarity == ACPI_ACTIVE_LOW) ? "low" : "high", dev->irq);
+ str_low_high(polarity == ACPI_ACTIVE_LOW), dev->irq);
kfree(entry);
return 0;
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index b727db968f33..e4560b33b8ad 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -268,7 +268,7 @@ static int acpi_pci_link_get_current(struct acpi_pci_link *link)
link->irq.active = irq;
- acpi_handle_debug(handle, "Link at IRQ %d \n", link->irq.active);
+ acpi_handle_debug(handle, "Link at IRQ %d\n", link->irq.active);
end:
return result;
@@ -714,8 +714,8 @@ static int acpi_pci_link_add(struct acpi_device *device,
return -ENOMEM;
link->device = device;
- strcpy(acpi_device_name(device), ACPI_PCI_LINK_DEVICE_NAME);
- strcpy(acpi_device_class(device), ACPI_PCI_LINK_CLASS);
+ strscpy(acpi_device_name(device), ACPI_PCI_LINK_DEVICE_NAME);
+ strscpy(acpi_device_class(device), ACPI_PCI_LINK_CLASS);
device->driver_data = link;
mutex_lock(&acpi_link_lock);
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index d0bfb3706801..74ade4160314 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -689,8 +689,8 @@ static int acpi_pci_root_add(struct acpi_device *device,
root->device = device;
root->segment = segment & 0xFFFF;
- strcpy(acpi_device_name(device), ACPI_PCI_ROOT_DEVICE_NAME);
- strcpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS);
+ strscpy(acpi_device_name(device), ACPI_PCI_ROOT_DEVICE_NAME);
+ strscpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS);
device->driver_data = root;
if (hotadd && dmar_device_add(handle)) {
@@ -858,7 +858,7 @@ next:
}
}
-static void acpi_pci_root_remap_iospace(struct fwnode_handle *fwnode,
+static void acpi_pci_root_remap_iospace(const struct fwnode_handle *fwnode,
struct resource_entry *entry)
{
#ifdef PCI_IOBASE
diff --git a/drivers/acpi/pfr_telemetry.c b/drivers/acpi/pfr_telemetry.c
index 998264a7333d..32bdf8cbe8f2 100644
--- a/drivers/acpi/pfr_telemetry.c
+++ b/drivers/acpi/pfr_telemetry.c
@@ -272,9 +272,6 @@ static long pfrt_log_ioctl(struct file *file, unsigned int cmd, unsigned long ar
case PFRT_LOG_IOC_GET_INFO:
info.log_level = get_pfrt_log_level(pfrt_log_dev);
- if (ret < 0)
- return ret;
-
info.log_type = pfrt_log_dev->info.log_type;
info.log_revid = pfrt_log_dev->info.log_revid;
if (copy_to_user(p, &info, sizeof(info)))
@@ -425,7 +422,7 @@ static struct platform_driver acpi_pfrt_log_driver = {
.acpi_match_table = acpi_pfrt_log_ids,
},
.probe = acpi_pfrt_log_probe,
- .remove_new = acpi_pfrt_log_remove,
+ .remove = acpi_pfrt_log_remove,
};
module_platform_driver(acpi_pfrt_log_driver);
diff --git a/drivers/acpi/pfr_update.c b/drivers/acpi/pfr_update.c
index 8b2910995fc1..11b1c2828005 100644
--- a/drivers/acpi/pfr_update.c
+++ b/drivers/acpi/pfr_update.c
@@ -127,8 +127,11 @@ static int query_capability(struct pfru_update_cap_info *cap_hdr,
pfru_dev->rev_id,
PFRU_FUNC_QUERY_UPDATE_CAP,
NULL, ACPI_TYPE_PACKAGE);
- if (!out_obj)
+ if (!out_obj) {
+ dev_dbg(pfru_dev->parent_dev,
+ "Query cap failed with no object\n");
return ret;
+ }
if (out_obj->package.count < CAP_NR_IDX ||
out_obj->package.elements[CAP_STATUS_IDX].type != ACPI_TYPE_INTEGER ||
@@ -141,13 +144,17 @@ static int query_capability(struct pfru_update_cap_info *cap_hdr,
out_obj->package.elements[CAP_DRV_SVN_IDX].type != ACPI_TYPE_INTEGER ||
out_obj->package.elements[CAP_PLAT_ID_IDX].type != ACPI_TYPE_BUFFER ||
out_obj->package.elements[CAP_OEM_ID_IDX].type != ACPI_TYPE_BUFFER ||
- out_obj->package.elements[CAP_OEM_INFO_IDX].type != ACPI_TYPE_BUFFER)
+ out_obj->package.elements[CAP_OEM_INFO_IDX].type != ACPI_TYPE_BUFFER) {
+ dev_dbg(pfru_dev->parent_dev,
+ "Query cap failed with invalid package count/type\n");
goto free_acpi_buffer;
+ }
cap_hdr->status = out_obj->package.elements[CAP_STATUS_IDX].integer.value;
if (cap_hdr->status != DSM_SUCCEED) {
ret = -EBUSY;
- dev_dbg(pfru_dev->parent_dev, "Error Status:%d\n", cap_hdr->status);
+ dev_dbg(pfru_dev->parent_dev, "Query cap Error Status:%d\n",
+ cap_hdr->status);
goto free_acpi_buffer;
}
@@ -193,24 +200,32 @@ static int query_buffer(struct pfru_com_buf_info *info,
out_obj = acpi_evaluate_dsm_typed(handle, &pfru_guid,
pfru_dev->rev_id, PFRU_FUNC_QUERY_BUF,
NULL, ACPI_TYPE_PACKAGE);
- if (!out_obj)
+ if (!out_obj) {
+ dev_dbg(pfru_dev->parent_dev,
+ "Query buf failed with no object\n");
return ret;
+ }
if (out_obj->package.count < BUF_NR_IDX ||
out_obj->package.elements[BUF_STATUS_IDX].type != ACPI_TYPE_INTEGER ||
out_obj->package.elements[BUF_EXT_STATUS_IDX].type != ACPI_TYPE_INTEGER ||
out_obj->package.elements[BUF_ADDR_LOW_IDX].type != ACPI_TYPE_INTEGER ||
out_obj->package.elements[BUF_ADDR_HI_IDX].type != ACPI_TYPE_INTEGER ||
- out_obj->package.elements[BUF_SIZE_IDX].type != ACPI_TYPE_INTEGER)
+ out_obj->package.elements[BUF_SIZE_IDX].type != ACPI_TYPE_INTEGER) {
+ dev_dbg(pfru_dev->parent_dev,
+ "Query buf failed with invalid package count/type\n");
goto free_acpi_buffer;
+ }
info->status = out_obj->package.elements[BUF_STATUS_IDX].integer.value;
info->ext_status =
out_obj->package.elements[BUF_EXT_STATUS_IDX].integer.value;
if (info->status != DSM_SUCCEED) {
ret = -EBUSY;
- dev_dbg(pfru_dev->parent_dev, "Error Status:%d\n", info->status);
- dev_dbg(pfru_dev->parent_dev, "Error Extended Status:%d\n", info->ext_status);
+ dev_dbg(pfru_dev->parent_dev,
+ "Query buf failed with Error Status:%d\n", info->status);
+ dev_dbg(pfru_dev->parent_dev,
+ "Query buf failed with Error Extended Status:%d\n", info->ext_status);
goto free_acpi_buffer;
}
@@ -295,12 +310,16 @@ static bool applicable_image(const void *data, struct pfru_update_cap_info *cap,
m_img_hdr = data + size;
type = get_image_type(m_img_hdr, pfru_dev);
- if (type < 0)
+ if (type < 0) {
+ dev_dbg(pfru_dev->parent_dev, "Invalid image type\n");
return false;
+ }
size = adjust_efi_size(m_img_hdr, size);
- if (size < 0)
+ if (size < 0) {
+ dev_dbg(pfru_dev->parent_dev, "Invalid image size\n");
return false;
+ }
auth = data + size;
size += sizeof(u64) + auth->auth_info.hdr.len;
@@ -310,7 +329,7 @@ static bool applicable_image(const void *data, struct pfru_update_cap_info *cap,
if (type == PFRU_CODE_INJECT_TYPE)
return payload_hdr->rt_ver >= cap->code_rt_version;
- return payload_hdr->rt_ver >= cap->drv_rt_version;
+ return payload_hdr->svn_ver >= cap->drv_svn;
}
static void print_update_debug_info(struct pfru_updated_result *result,
@@ -346,8 +365,11 @@ static int start_update(int action, struct pfru_device *pfru_dev)
out_obj = acpi_evaluate_dsm_typed(handle, &pfru_guid,
pfru_dev->rev_id, PFRU_FUNC_START,
&in_obj, ACPI_TYPE_PACKAGE);
- if (!out_obj)
+ if (!out_obj) {
+ dev_dbg(pfru_dev->parent_dev,
+ "Update failed to start with no object\n");
return ret;
+ }
if (out_obj->package.count < UPDATE_NR_IDX ||
out_obj->package.elements[UPDATE_STATUS_IDX].type != ACPI_TYPE_INTEGER ||
@@ -355,8 +377,11 @@ static int start_update(int action, struct pfru_device *pfru_dev)
out_obj->package.elements[UPDATE_AUTH_TIME_LOW_IDX].type != ACPI_TYPE_INTEGER ||
out_obj->package.elements[UPDATE_AUTH_TIME_HI_IDX].type != ACPI_TYPE_INTEGER ||
out_obj->package.elements[UPDATE_EXEC_TIME_LOW_IDX].type != ACPI_TYPE_INTEGER ||
- out_obj->package.elements[UPDATE_EXEC_TIME_HI_IDX].type != ACPI_TYPE_INTEGER)
+ out_obj->package.elements[UPDATE_EXEC_TIME_HI_IDX].type != ACPI_TYPE_INTEGER) {
+ dev_dbg(pfru_dev->parent_dev,
+ "Update failed with invalid package count/type\n");
goto free_acpi_buffer;
+ }
update_result.status =
out_obj->package.elements[UPDATE_STATUS_IDX].integer.value;
@@ -365,8 +390,10 @@ static int start_update(int action, struct pfru_device *pfru_dev)
if (update_result.status != DSM_SUCCEED) {
ret = -EBUSY;
- dev_dbg(pfru_dev->parent_dev, "Error Status:%d\n", update_result.status);
- dev_dbg(pfru_dev->parent_dev, "Error Extended Status:%d\n",
+ dev_dbg(pfru_dev->parent_dev,
+ "Update failed with Error Status:%d\n", update_result.status);
+ dev_dbg(pfru_dev->parent_dev,
+ "Update failed with Error Extended Status:%d\n",
update_result.ext_status);
goto free_acpi_buffer;
@@ -450,8 +477,10 @@ static ssize_t pfru_write(struct file *file, const char __user *buf,
if (ret)
return ret;
- if (len > buf_info.buf_size)
+ if (len > buf_info.buf_size) {
+ dev_dbg(pfru_dev->parent_dev, "Capsule image size too large\n");
return -EINVAL;
+ }
iov.iov_base = (void __user *)buf;
iov.iov_len = len;
@@ -460,10 +489,14 @@ static ssize_t pfru_write(struct file *file, const char __user *buf,
/* map the communication buffer */
phy_addr = (phys_addr_t)((buf_info.addr_hi << 32) | buf_info.addr_lo);
buf_ptr = memremap(phy_addr, buf_info.buf_size, MEMREMAP_WB);
- if (!buf_ptr)
+ if (!buf_ptr) {
+ dev_dbg(pfru_dev->parent_dev, "Failed to remap the buffer\n");
return -ENOMEM;
+ }
if (!copy_from_iter_full(buf_ptr, len, &iter)) {
+ dev_dbg(pfru_dev->parent_dev,
+ "Failed to copy the data from the user space buffer\n");
ret = -EINVAL;
goto unmap;
}
@@ -565,7 +598,7 @@ static struct platform_driver acpi_pfru_driver = {
.acpi_match_table = acpi_pfru_ids,
},
.probe = acpi_pfru_probe,
- .remove_new = acpi_pfru_remove,
+ .remove = acpi_pfru_remove,
};
module_platform_driver(acpi_pfru_driver);
diff --git a/drivers/acpi/platform_profile.c b/drivers/acpi/platform_profile.c
index d2f7fd7743a1..b43f4459a4f6 100644
--- a/drivers/acpi/platform_profile.c
+++ b/drivers/acpi/platform_profile.c
@@ -2,16 +2,34 @@
/* Platform profile sysfs interface */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/acpi.h>
#include <linux/bits.h>
+#include <linux/cleanup.h>
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/platform_profile.h>
#include <linux/sysfs.h>
-static struct platform_profile_handler *cur_profile;
+#define to_pprof_handler(d) (container_of(d, struct platform_profile_handler, dev))
+
static DEFINE_MUTEX(profile_lock);
+struct platform_profile_handler {
+ const char *name;
+ struct device dev;
+ int minor;
+ unsigned long choices[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
+ unsigned long hidden_choices[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
+ const struct platform_profile_ops *ops;
+};
+
+struct aggregate_choices_data {
+ unsigned long aggregate[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
+ int count;
+};
+
static const char * const profile_names[] = {
[PLATFORM_PROFILE_LOW_POWER] = "low-power",
[PLATFORM_PROFILE_COOL] = "cool",
@@ -19,203 +37,679 @@ static const char * const profile_names[] = {
[PLATFORM_PROFILE_BALANCED] = "balanced",
[PLATFORM_PROFILE_BALANCED_PERFORMANCE] = "balanced-performance",
[PLATFORM_PROFILE_PERFORMANCE] = "performance",
+ [PLATFORM_PROFILE_CUSTOM] = "custom",
};
static_assert(ARRAY_SIZE(profile_names) == PLATFORM_PROFILE_LAST);
-static ssize_t platform_profile_choices_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- int len = 0;
- int err, i;
-
- err = mutex_lock_interruptible(&profile_lock);
- if (err)
- return err;
+static DEFINE_IDA(platform_profile_ida);
- if (!cur_profile) {
- mutex_unlock(&profile_lock);
- return -ENODEV;
- }
+/**
+ * _commmon_choices_show - Show the available profile choices
+ * @choices: The available profile choices
+ * @buf: The buffer to write to
+ *
+ * Return: The number of bytes written
+ */
+static ssize_t _commmon_choices_show(unsigned long *choices, char *buf)
+{
+ int i, len = 0;
- for_each_set_bit(i, cur_profile->choices, PLATFORM_PROFILE_LAST) {
+ for_each_set_bit(i, choices, PLATFORM_PROFILE_LAST) {
if (len == 0)
len += sysfs_emit_at(buf, len, "%s", profile_names[i]);
else
len += sysfs_emit_at(buf, len, " %s", profile_names[i]);
}
len += sysfs_emit_at(buf, len, "\n");
- mutex_unlock(&profile_lock);
+
return len;
}
-static ssize_t platform_profile_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+/**
+ * _store_class_profile - Set the profile for a class device
+ * @dev: The class device
+ * @data: The profile to set
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int _store_class_profile(struct device *dev, void *data)
+{
+ struct platform_profile_handler *handler;
+ int *bit = (int *)data;
+
+ lockdep_assert_held(&profile_lock);
+ handler = to_pprof_handler(dev);
+ if (!test_bit(*bit, handler->choices) && !test_bit(*bit, handler->hidden_choices))
+ return -EOPNOTSUPP;
+
+ return handler->ops->profile_set(dev, *bit);
+}
+
+/**
+ * _notify_class_profile - Notify the class device of a profile change
+ * @dev: The class device
+ * @data: Unused
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int _notify_class_profile(struct device *dev, void *data)
+{
+ struct platform_profile_handler *handler = to_pprof_handler(dev);
+
+ lockdep_assert_held(&profile_lock);
+ sysfs_notify(&handler->dev.kobj, NULL, "profile");
+ kobject_uevent(&handler->dev.kobj, KOBJ_CHANGE);
+
+ return 0;
+}
+
+/**
+ * get_class_profile - Show the current profile for a class device
+ * @dev: The class device
+ * @profile: The profile to return
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int get_class_profile(struct device *dev,
+ enum platform_profile_option *profile)
{
- enum platform_profile_option profile = PLATFORM_PROFILE_BALANCED;
+ struct platform_profile_handler *handler;
+ enum platform_profile_option val;
int err;
- err = mutex_lock_interruptible(&profile_lock);
- if (err)
+ lockdep_assert_held(&profile_lock);
+ handler = to_pprof_handler(dev);
+ err = handler->ops->profile_get(dev, &val);
+ if (err) {
+ pr_err("Failed to get profile for handler %s\n", handler->name);
return err;
+ }
+
+ if (WARN_ON(val >= PLATFORM_PROFILE_LAST))
+ return -EINVAL;
+ *profile = val;
+
+ return 0;
+}
+
+/**
+ * name_show - Show the name of the profile handler
+ * @dev: The device
+ * @attr: The attribute
+ * @buf: The buffer to write to
+ *
+ * Return: The number of bytes written
+ */
+static ssize_t name_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct platform_profile_handler *handler = to_pprof_handler(dev);
+
+ return sysfs_emit(buf, "%s\n", handler->name);
+}
+static DEVICE_ATTR_RO(name);
+
+/**
+ * choices_show - Show the available profile choices
+ * @dev: The device
+ * @attr: The attribute
+ * @buf: The buffer to write to
+ *
+ * Return: The number of bytes written
+ */
+static ssize_t choices_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct platform_profile_handler *handler = to_pprof_handler(dev);
+
+ return _commmon_choices_show(handler->choices, buf);
+}
+static DEVICE_ATTR_RO(choices);
+
+/**
+ * profile_show - Show the current profile for a class device
+ * @dev: The device
+ * @attr: The attribute
+ * @buf: The buffer to write to
+ *
+ * Return: The number of bytes written
+ */
+static ssize_t profile_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ enum platform_profile_option profile = PLATFORM_PROFILE_LAST;
+ int err;
- if (!cur_profile) {
- mutex_unlock(&profile_lock);
- return -ENODEV;
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) {
+ err = get_class_profile(dev, &profile);
+ if (err)
+ return err;
}
- err = cur_profile->profile_get(cur_profile, &profile);
- mutex_unlock(&profile_lock);
+ return sysfs_emit(buf, "%s\n", profile_names[profile]);
+}
+
+/**
+ * profile_store - Set the profile for a class device
+ * @dev: The device
+ * @attr: The attribute
+ * @buf: The buffer to read from
+ * @count: The number of bytes to read
+ *
+ * Return: The number of bytes read
+ */
+static ssize_t profile_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int index, ret;
+
+ index = sysfs_match_string(profile_names, buf);
+ if (index < 0)
+ return -EINVAL;
+
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) {
+ ret = _store_class_profile(dev, &index);
+ if (ret)
+ return ret;
+ }
+
+ sysfs_notify(acpi_kobj, NULL, "platform_profile");
+
+ return count;
+}
+static DEVICE_ATTR_RW(profile);
+
+static struct attribute *profile_attrs[] = {
+ &dev_attr_name.attr,
+ &dev_attr_choices.attr,
+ &dev_attr_profile.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(profile);
+
+static void pprof_device_release(struct device *dev)
+{
+ struct platform_profile_handler *pprof = to_pprof_handler(dev);
+
+ kfree(pprof);
+}
+
+static const struct class platform_profile_class = {
+ .name = "platform-profile",
+ .dev_groups = profile_groups,
+ .dev_release = pprof_device_release,
+};
+
+/**
+ * _aggregate_choices - Aggregate the available profile choices
+ * @dev: The device
+ * @arg: struct aggregate_choices_data, with it's aggregate member bitmap
+ * initially filled with ones
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int _aggregate_choices(struct device *dev, void *arg)
+{
+ unsigned long tmp[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
+ struct aggregate_choices_data *data = arg;
+ struct platform_profile_handler *handler;
+
+ lockdep_assert_held(&profile_lock);
+
+ handler = to_pprof_handler(dev);
+ bitmap_or(tmp, handler->choices, handler->hidden_choices, PLATFORM_PROFILE_LAST);
+ bitmap_and(data->aggregate, tmp, data->aggregate, PLATFORM_PROFILE_LAST);
+ data->count++;
+
+ return 0;
+}
+
+/**
+ * _remove_hidden_choices - Remove hidden choices from aggregate data
+ * @dev: The device
+ * @arg: struct aggregate_choices_data
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int _remove_hidden_choices(struct device *dev, void *arg)
+{
+ struct aggregate_choices_data *data = arg;
+ struct platform_profile_handler *handler;
+
+ lockdep_assert_held(&profile_lock);
+ handler = to_pprof_handler(dev);
+ bitmap_andnot(data->aggregate, handler->choices,
+ handler->hidden_choices, PLATFORM_PROFILE_LAST);
+
+ return 0;
+}
+
+/**
+ * platform_profile_choices_show - Show the available profile choices for legacy sysfs interface
+ * @kobj: The kobject
+ * @attr: The attribute
+ * @buf: The buffer to write to
+ *
+ * Return: The number of bytes written
+ */
+static ssize_t platform_profile_choices_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ struct aggregate_choices_data data = {
+ .aggregate = { [0 ... BITS_TO_LONGS(PLATFORM_PROFILE_LAST) - 1] = ~0UL },
+ .count = 0,
+ };
+ int err;
+
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) {
+ err = class_for_each_device(&platform_profile_class, NULL,
+ &data, _aggregate_choices);
+ if (err)
+ return err;
+ if (data.count == 1) {
+ err = class_for_each_device(&platform_profile_class, NULL,
+ &data, _remove_hidden_choices);
+ if (err)
+ return err;
+ }
+ }
+
+ /* no profile handler registered any more */
+ if (bitmap_empty(data.aggregate, PLATFORM_PROFILE_LAST))
+ return -EINVAL;
+
+ return _commmon_choices_show(data.aggregate, buf);
+}
+
+/**
+ * _aggregate_profiles - Aggregate the profiles for legacy sysfs interface
+ * @dev: The device
+ * @data: The profile to return
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int _aggregate_profiles(struct device *dev, void *data)
+{
+ enum platform_profile_option *profile = data;
+ enum platform_profile_option val;
+ int err;
+
+ err = get_class_profile(dev, &val);
if (err)
return err;
- /* Check that profile is valid index */
- if (WARN_ON((profile < 0) || (profile >= ARRAY_SIZE(profile_names))))
- return -EIO;
+ if (*profile != PLATFORM_PROFILE_LAST && *profile != val)
+ *profile = PLATFORM_PROFILE_CUSTOM;
+ else
+ *profile = val;
- return sysfs_emit(buf, "%s\n", profile_names[profile]);
+ return 0;
}
-static ssize_t platform_profile_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+/**
+ * _store_and_notify - Store and notify a class from legacy sysfs interface
+ * @dev: The device
+ * @data: The profile to return
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int _store_and_notify(struct device *dev, void *data)
{
- int err, i;
+ enum platform_profile_option *profile = data;
+ int err;
- err = mutex_lock_interruptible(&profile_lock);
+ err = _store_class_profile(dev, profile);
if (err)
return err;
+ return _notify_class_profile(dev, NULL);
+}
+
+/**
+ * platform_profile_show - Show the current profile for legacy sysfs interface
+ * @kobj: The kobject
+ * @attr: The attribute
+ * @buf: The buffer to write to
+ *
+ * Return: The number of bytes written
+ */
+static ssize_t platform_profile_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ enum platform_profile_option profile = PLATFORM_PROFILE_LAST;
+ int err;
- if (!cur_profile) {
- mutex_unlock(&profile_lock);
- return -ENODEV;
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) {
+ err = class_for_each_device(&platform_profile_class, NULL,
+ &profile, _aggregate_profiles);
+ if (err)
+ return err;
}
+ /* no profile handler registered any more */
+ if (profile == PLATFORM_PROFILE_LAST)
+ return -EINVAL;
+
+ return sysfs_emit(buf, "%s\n", profile_names[profile]);
+}
+
+/**
+ * platform_profile_store - Set the profile for legacy sysfs interface
+ * @kobj: The kobject
+ * @attr: The attribute
+ * @buf: The buffer to read from
+ * @count: The number of bytes to read
+ *
+ * Return: The number of bytes read
+ */
+static ssize_t platform_profile_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct aggregate_choices_data data = {
+ .aggregate = { [0 ... BITS_TO_LONGS(PLATFORM_PROFILE_LAST) - 1] = ~0UL },
+ .count = 0,
+ };
+ int ret;
+ int i;
+
/* Scan for a matching profile */
i = sysfs_match_string(profile_names, buf);
- if (i < 0) {
- mutex_unlock(&profile_lock);
+ if (i < 0 || i == PLATFORM_PROFILE_CUSTOM)
return -EINVAL;
- }
- /* Check that platform supports this profile choice */
- if (!test_bit(i, cur_profile->choices)) {
- mutex_unlock(&profile_lock);
- return -EOPNOTSUPP;
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) {
+ ret = class_for_each_device(&platform_profile_class, NULL,
+ &data, _aggregate_choices);
+ if (ret)
+ return ret;
+ if (!test_bit(i, data.aggregate))
+ return -EOPNOTSUPP;
+
+ ret = class_for_each_device(&platform_profile_class, NULL, &i,
+ _store_and_notify);
+ if (ret)
+ return ret;
}
- err = cur_profile->profile_set(cur_profile, i);
- if (!err)
- sysfs_notify(acpi_kobj, NULL, "platform_profile");
+ sysfs_notify(acpi_kobj, NULL, "platform_profile");
- mutex_unlock(&profile_lock);
- if (err)
- return err;
return count;
}
-static DEVICE_ATTR_RO(platform_profile_choices);
-static DEVICE_ATTR_RW(platform_profile);
+static struct kobj_attribute attr_platform_profile_choices = __ATTR_RO(platform_profile_choices);
+static struct kobj_attribute attr_platform_profile = __ATTR_RW(platform_profile);
static struct attribute *platform_profile_attrs[] = {
- &dev_attr_platform_profile_choices.attr,
- &dev_attr_platform_profile.attr,
+ &attr_platform_profile_choices.attr,
+ &attr_platform_profile.attr,
NULL
};
+static int profile_class_registered(struct device *dev, const void *data)
+{
+ return 1;
+}
+
+static umode_t profile_class_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
+{
+ struct device *dev;
+
+ dev = class_find_device(&platform_profile_class, NULL, NULL, profile_class_registered);
+ if (!dev)
+ return 0;
+
+ put_device(dev);
+
+ return attr->mode;
+}
+
static const struct attribute_group platform_profile_group = {
- .attrs = platform_profile_attrs
+ .attrs = platform_profile_attrs,
+ .is_visible = profile_class_is_visible,
};
-void platform_profile_notify(void)
+/**
+ * platform_profile_notify - Notify class device and legacy sysfs interface
+ * @dev: The class device
+ */
+void platform_profile_notify(struct device *dev)
{
- if (!cur_profile)
- return;
+ scoped_cond_guard(mutex_intr, return, &profile_lock) {
+ _notify_class_profile(dev, NULL);
+ }
sysfs_notify(acpi_kobj, NULL, "platform_profile");
}
EXPORT_SYMBOL_GPL(platform_profile_notify);
+/**
+ * platform_profile_cycle - Cycles profiles available on all registered class devices
+ *
+ * Return: 0 on success, -errno on failure
+ */
int platform_profile_cycle(void)
{
- enum platform_profile_option profile;
- enum platform_profile_option next;
+ struct aggregate_choices_data data = {
+ .aggregate = { [0 ... BITS_TO_LONGS(PLATFORM_PROFILE_LAST) - 1] = ~0UL },
+ .count = 0,
+ };
+ enum platform_profile_option next = PLATFORM_PROFILE_LAST;
+ enum platform_profile_option profile = PLATFORM_PROFILE_LAST;
int err;
- err = mutex_lock_interruptible(&profile_lock);
- if (err)
- return err;
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) {
+ err = class_for_each_device(&platform_profile_class, NULL,
+ &profile, _aggregate_profiles);
+ if (err)
+ return err;
- if (!cur_profile) {
- mutex_unlock(&profile_lock);
- return -ENODEV;
- }
+ if (profile == PLATFORM_PROFILE_CUSTOM ||
+ profile == PLATFORM_PROFILE_LAST)
+ return -EINVAL;
- err = cur_profile->profile_get(cur_profile, &profile);
- if (err) {
- mutex_unlock(&profile_lock);
- return err;
- }
+ err = class_for_each_device(&platform_profile_class, NULL,
+ &data, _aggregate_choices);
+ if (err)
+ return err;
- next = find_next_bit_wrap(cur_profile->choices, PLATFORM_PROFILE_LAST,
- profile + 1);
+ /* never iterate into a custom if all drivers supported it */
+ clear_bit(PLATFORM_PROFILE_CUSTOM, data.aggregate);
- if (WARN_ON(next == PLATFORM_PROFILE_LAST)) {
- mutex_unlock(&profile_lock);
- return -EINVAL;
- }
+ next = find_next_bit_wrap(data.aggregate,
+ PLATFORM_PROFILE_LAST,
+ profile + 1);
- err = cur_profile->profile_set(cur_profile, next);
- mutex_unlock(&profile_lock);
+ err = class_for_each_device(&platform_profile_class, NULL, &next,
+ _store_and_notify);
- if (!err)
- sysfs_notify(acpi_kobj, NULL, "platform_profile");
+ if (err)
+ return err;
+ }
- return err;
+ sysfs_notify(acpi_kobj, NULL, "platform_profile");
+
+ return 0;
}
EXPORT_SYMBOL_GPL(platform_profile_cycle);
-int platform_profile_register(struct platform_profile_handler *pprof)
+/**
+ * platform_profile_register - Creates and registers a platform profile class device
+ * @dev: Parent device
+ * @name: Name of the class device
+ * @drvdata: Driver data that will be attached to the class device
+ * @ops: Platform profile's mandatory operations
+ *
+ * Return: pointer to the new class device on success, ERR_PTR on failure
+ */
+struct device *platform_profile_register(struct device *dev, const char *name,
+ void *drvdata,
+ const struct platform_profile_ops *ops)
{
+ struct device *ppdev;
+ int minor;
int err;
- mutex_lock(&profile_lock);
- /* We can only have one active profile */
- if (cur_profile) {
- mutex_unlock(&profile_lock);
- return -EEXIST;
+ /* Sanity check */
+ if (WARN_ON_ONCE(!dev || !name || !ops || !ops->profile_get ||
+ !ops->profile_set || !ops->probe))
+ return ERR_PTR(-EINVAL);
+
+ struct platform_profile_handler *pprof __free(kfree) = kzalloc(
+ sizeof(*pprof), GFP_KERNEL);
+ if (!pprof)
+ return ERR_PTR(-ENOMEM);
+
+ err = ops->probe(drvdata, pprof->choices);
+ if (err) {
+ dev_err(dev, "platform_profile probe failed\n");
+ return ERR_PTR(err);
}
- /* Sanity check the profile handler field are set */
- if (!pprof || bitmap_empty(pprof->choices, PLATFORM_PROFILE_LAST) ||
- !pprof->profile_set || !pprof->profile_get) {
- mutex_unlock(&profile_lock);
- return -EINVAL;
+ if (bitmap_empty(pprof->choices, PLATFORM_PROFILE_LAST)) {
+ dev_err(dev, "Failed to register platform_profile class device with empty choices\n");
+ return ERR_PTR(-EINVAL);
}
- err = sysfs_create_group(acpi_kobj, &platform_profile_group);
+ if (ops->hidden_choices) {
+ err = ops->hidden_choices(drvdata, pprof->hidden_choices);
+ if (err) {
+ dev_err(dev, "platform_profile hidden_choices failed\n");
+ return ERR_PTR(err);
+ }
+ }
+
+ guard(mutex)(&profile_lock);
+
+ /* create class interface for individual handler */
+ minor = ida_alloc(&platform_profile_ida, GFP_KERNEL);
+ if (minor < 0)
+ return ERR_PTR(minor);
+
+ pprof->name = name;
+ pprof->ops = ops;
+ pprof->minor = minor;
+ pprof->dev.class = &platform_profile_class;
+ pprof->dev.parent = dev;
+ dev_set_drvdata(&pprof->dev, drvdata);
+ dev_set_name(&pprof->dev, "platform-profile-%d", pprof->minor);
+ /* device_register() takes ownership of pprof/ppdev */
+ ppdev = &no_free_ptr(pprof)->dev;
+ err = device_register(ppdev);
if (err) {
- mutex_unlock(&profile_lock);
- return err;
+ put_device(ppdev);
+ goto cleanup_ida;
}
- cur_profile = pprof;
- mutex_unlock(&profile_lock);
- return 0;
+ sysfs_notify(acpi_kobj, NULL, "platform_profile");
+
+ err = sysfs_update_group(acpi_kobj, &platform_profile_group);
+ if (err)
+ goto cleanup_cur;
+
+ return ppdev;
+
+cleanup_cur:
+ device_unregister(ppdev);
+
+cleanup_ida:
+ ida_free(&platform_profile_ida, minor);
+
+ return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(platform_profile_register);
-int platform_profile_remove(void)
+/**
+ * platform_profile_remove - Unregisters a platform profile class device
+ * @dev: Class device
+ */
+void platform_profile_remove(struct device *dev)
{
- sysfs_remove_group(acpi_kobj, &platform_profile_group);
+ struct platform_profile_handler *pprof;
- mutex_lock(&profile_lock);
- cur_profile = NULL;
- mutex_unlock(&profile_lock);
- return 0;
+ if (IS_ERR_OR_NULL(dev))
+ return;
+
+ pprof = to_pprof_handler(dev);
+
+ guard(mutex)(&profile_lock);
+
+ ida_free(&platform_profile_ida, pprof->minor);
+ device_unregister(&pprof->dev);
+
+ sysfs_notify(acpi_kobj, NULL, "platform_profile");
+ sysfs_update_group(acpi_kobj, &platform_profile_group);
}
EXPORT_SYMBOL_GPL(platform_profile_remove);
+static void devm_platform_profile_release(struct device *dev, void *res)
+{
+ struct device **ppdev = res;
+
+ platform_profile_remove(*ppdev);
+}
+
+/**
+ * devm_platform_profile_register - Device managed version of platform_profile_register
+ * @dev: Parent device
+ * @name: Name of the class device
+ * @drvdata: Driver data that will be attached to the class device
+ * @ops: Platform profile's mandatory operations
+ *
+ * Return: pointer to the new class device on success, ERR_PTR on failure
+ */
+struct device *devm_platform_profile_register(struct device *dev, const char *name,
+ void *drvdata,
+ const struct platform_profile_ops *ops)
+{
+ struct device *ppdev;
+ struct device **dr;
+
+ dr = devres_alloc(devm_platform_profile_release, sizeof(*dr), GFP_KERNEL);
+ if (!dr)
+ return ERR_PTR(-ENOMEM);
+
+ ppdev = platform_profile_register(dev, name, drvdata, ops);
+ if (IS_ERR(ppdev)) {
+ devres_free(dr);
+ return ppdev;
+ }
+
+ *dr = ppdev;
+ devres_add(dev, dr);
+
+ return ppdev;
+}
+EXPORT_SYMBOL_GPL(devm_platform_profile_register);
+
+static int __init platform_profile_init(void)
+{
+ int err;
+
+ if (acpi_disabled)
+ return -EOPNOTSUPP;
+
+ err = class_register(&platform_profile_class);
+ if (err)
+ return err;
+
+ err = sysfs_create_group(acpi_kobj, &platform_profile_group);
+ if (err)
+ class_unregister(&platform_profile_class);
+
+ return err;
+}
+
+static void __exit platform_profile_exit(void)
+{
+ sysfs_remove_group(acpi_kobj, &platform_profile_group);
+ class_unregister(&platform_profile_class);
+}
+module_init(platform_profile_init);
+module_exit(platform_profile_exit);
+
MODULE_AUTHOR("Mark Pearson <markpearson@lenovo.com>");
MODULE_DESCRIPTION("ACPI platform profile sysfs interface");
MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index c2c70139c4f1..361a7721a6a8 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -23,12 +23,14 @@
#define pr_fmt(fmt) "ACPI: PM: " fmt
+#include <linux/delay.h>
#include <linux/dmi.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/pm_runtime.h>
#include <linux/sysfs.h>
#include <linux/acpi.h>
@@ -62,6 +64,9 @@ struct acpi_power_resource_entry {
struct acpi_power_resource *resource;
};
+static bool hp_eb_gp12pxp_quirk;
+static bool unused_power_resources_quirk;
+
static LIST_HEAD(acpi_power_resource_list);
static DEFINE_MUTEX(power_resource_list_lock);
@@ -197,7 +202,7 @@ static int __get_state(acpi_handle handle, u8 *state)
cur_state = sta & ACPI_POWER_RESOURCE_STATE_ON;
acpi_handle_debug(handle, "Power resource is %s\n",
- cur_state ? "on" : "off");
+ str_on_off(cur_state));
*state = cur_state;
return 0;
@@ -240,7 +245,7 @@ static int acpi_power_get_list_state(struct list_head *list, u8 *state)
break;
}
- pr_debug("Power resource list is %s\n", cur_state ? "on" : "off");
+ pr_debug("Power resource list is %s\n", str_on_off(cur_state));
*state = cur_state;
return 0;
@@ -950,8 +955,8 @@ struct acpi_device *acpi_add_power_resource(acpi_handle handle)
mutex_init(&resource->resource_lock);
INIT_LIST_HEAD(&resource->list_node);
INIT_LIST_HEAD(&resource->dependents);
- strcpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME);
- strcpy(acpi_device_class(device), ACPI_POWER_CLASS);
+ strscpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME);
+ strscpy(acpi_device_class(device), ACPI_POWER_CLASS);
device->power.state = ACPI_STATE_UNKNOWN;
device->flags.match_driver = true;
@@ -991,6 +996,38 @@ struct acpi_device *acpi_add_power_resource(acpi_handle handle)
}
#ifdef CONFIG_ACPI_SLEEP
+static bool resource_is_gp12pxp(acpi_handle handle)
+{
+ const char *path;
+ bool ret;
+
+ path = acpi_handle_path(handle);
+ ret = path && strcmp(path, "\\_SB_.PCI0.GP12.PXP_") == 0;
+ kfree(path);
+
+ return ret;
+}
+
+static void acpi_resume_on_eb_gp12pxp(struct acpi_power_resource *resource)
+{
+ acpi_handle_notice(resource->device.handle,
+ "HP EB quirk - turning OFF then ON\n");
+
+ __acpi_power_off(resource);
+ __acpi_power_on(resource);
+
+ /*
+ * Use the same delay as DSDT uses in modem _RST method.
+ *
+ * Otherwise we get "Unable to change power state from unknown to D0,
+ * device inaccessible" error for the modem PCI device after thaw.
+ *
+ * This power resource is normally being enabled only during thaw (once)
+ * so this wait is not a performance issue.
+ */
+ msleep(200);
+}
+
void acpi_resume_power_resources(void)
{
struct acpi_power_resource *resource;
@@ -1012,8 +1049,14 @@ void acpi_resume_power_resources(void)
if (state == ACPI_POWER_RESOURCE_STATE_OFF
&& resource->ref_count) {
- acpi_handle_debug(resource->device.handle, "Turning ON\n");
- __acpi_power_on(resource);
+ if (hp_eb_gp12pxp_quirk &&
+ resource_is_gp12pxp(resource->device.handle)) {
+ acpi_resume_on_eb_gp12pxp(resource);
+ } else {
+ acpi_handle_debug(resource->device.handle,
+ "Turning ON\n");
+ __acpi_power_on(resource);
+ }
}
mutex_unlock(&resource->resource_lock);
@@ -1023,6 +1066,41 @@ void acpi_resume_power_resources(void)
}
#endif
+static const struct dmi_system_id dmi_hp_elitebook_gp12pxp_quirk[] = {
+/*
+ * This laptop (and possibly similar models too) has power resource called
+ * "GP12.PXP_" for its WWAN modem.
+ *
+ * For this power resource to turn ON power for the modem it needs certain
+ * internal flag called "ONEN" to be set.
+ * This flag only gets set from this power resource "_OFF" method, while the
+ * actual modem power gets turned off during suspend by "GP12.PTS" method
+ * called from the global "_PTS" (Prepare To Sleep) method.
+ * On the other hand, this power resource "_OFF" method implementation just
+ * sets the aforementioned flag without actually doing anything else (it
+ * doesn't contain any code to actually turn off power).
+ *
+ * The above means that when upon hibernation finish we try to set this
+ * power resource back ON since its "_STA" method returns 0 (while the resource
+ * is still considered in use) its "_ON" method won't do anything since
+ * that "ONEN" flag is not set.
+ * Overall, this means the modem is dead until laptop is rebooted since its
+ * power has been cut by "_PTS" and its PCI configuration was lost and not able
+ * to be restored.
+ *
+ * The easiest way to workaround the issue is to call this power resource
+ * "_OFF" method before calling the "_ON" method to make sure the "ONEN"
+ * flag gets properly set.
+ */
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 855 G7 Notebook PC"),
+ },
+ },
+ {}
+};
+
static const struct dmi_system_id dmi_leave_unused_power_resources_on[] = {
{
/*
@@ -1045,7 +1123,7 @@ void acpi_turn_off_unused_power_resources(void)
{
struct acpi_power_resource *resource;
- if (dmi_check_system(dmi_leave_unused_power_resources_on))
+ if (unused_power_resources_quirk)
return;
mutex_lock(&power_resource_list_lock);
@@ -1064,3 +1142,10 @@ void acpi_turn_off_unused_power_resources(void)
mutex_unlock(&power_resource_list_lock);
}
+
+void __init acpi_power_resources_init(void)
+{
+ hp_eb_gp12pxp_quirk = dmi_check_system(dmi_hp_elitebook_gp12pxp_quirk);
+ unused_power_resources_quirk =
+ dmi_check_system(dmi_leave_unused_power_resources_on);
+}
diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c
index a35dd0e41c27..54676e3d82dd 100644
--- a/drivers/acpi/pptt.c
+++ b/drivers/acpi/pptt.c
@@ -229,18 +229,20 @@ static int acpi_pptt_leaf_node(struct acpi_table_header *table_hdr,
node_entry = ACPI_PTR_DIFF(node, table_hdr);
entry = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr,
sizeof(struct acpi_table_pptt));
- proc_sz = sizeof(struct acpi_pptt_processor *);
+ proc_sz = sizeof(struct acpi_pptt_processor);
- while ((unsigned long)entry + proc_sz < table_end) {
+ /* ignore subtable types that are smaller than a processor node */
+ while ((unsigned long)entry + proc_sz <= table_end) {
cpu_node = (struct acpi_pptt_processor *)entry;
+
if (entry->type == ACPI_PPTT_TYPE_PROCESSOR &&
cpu_node->parent == node_entry)
return 0;
if (entry->length == 0)
return 0;
+
entry = ACPI_ADD_PTR(struct acpi_subtable_header, entry,
entry->length);
-
}
return 1;
}
@@ -270,18 +272,21 @@ static struct acpi_pptt_processor *acpi_find_processor_node(struct acpi_table_he
table_end = (unsigned long)table_hdr + table_hdr->length;
entry = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr,
sizeof(struct acpi_table_pptt));
- proc_sz = sizeof(struct acpi_pptt_processor *);
+ proc_sz = sizeof(struct acpi_pptt_processor);
/* find the processor structure associated with this cpuid */
- while ((unsigned long)entry + proc_sz < table_end) {
+ while ((unsigned long)entry + proc_sz <= table_end) {
cpu_node = (struct acpi_pptt_processor *)entry;
if (entry->length == 0) {
pr_warn("Invalid zero length subtable\n");
break;
}
+ /* entry->length may not equal proc_sz, revalidate the processor structure length */
if (entry->type == ACPI_PPTT_TYPE_PROCESSOR &&
acpi_cpu_id == cpu_node->acpi_processor_id &&
+ (unsigned long)entry + entry->length <= table_end &&
+ entry->length == proc_sz + cpu_node->number_of_priv_resources * sizeof(u32) &&
acpi_pptt_leaf_node(table_hdr, cpu_node)) {
return (struct acpi_pptt_processor *)entry;
}
diff --git a/drivers/acpi/prmt.c b/drivers/acpi/prmt.c
index 747f83f7114d..6792d4385eee 100644
--- a/drivers/acpi/prmt.c
+++ b/drivers/acpi/prmt.c
@@ -85,8 +85,6 @@ static u64 efi_pa_va_lookup(efi_guid_t *guid, u64 pa)
}
}
- pr_warn("Failed to find VA for GUID: %pUL, PA: 0x%llx", guid, pa);
-
return 0;
}
@@ -152,15 +150,52 @@ acpi_parse_prmt(union acpi_subtable_headers *header, const unsigned long end)
th = &tm->handlers[cur_handler];
guid_copy(&th->guid, (guid_t *)handler_info->handler_guid);
+
+ /*
+ * Print an error message if handler_address is NULL, the parse of VA also
+ * can be skipped.
+ */
+ if (unlikely(!handler_info->handler_address)) {
+ pr_info("Skipping handler with NULL address for GUID: %pUL",
+ (guid_t *)handler_info->handler_guid);
+ continue;
+ }
+
th->handler_addr =
(void *)efi_pa_va_lookup(&th->guid, handler_info->handler_address);
+ /*
+ * Print a warning message and skip the parse of VA if handler_addr is zero
+ * which is not expected to ever happen.
+ */
+ if (unlikely(!th->handler_addr)) {
+ pr_warn("Failed to find VA of handler for GUID: %pUL, PA: 0x%llx",
+ &th->guid, handler_info->handler_address);
+ continue;
+ }
th->static_data_buffer_addr =
efi_pa_va_lookup(&th->guid, handler_info->static_data_buffer_address);
+ /*
+ * According to the PRM specification, static_data_buffer_address can be zero,
+ * so avoid printing a warning message in that case. Otherwise, if the
+ * return value of efi_pa_va_lookup() is zero, print the message.
+ */
+ if (unlikely(!th->static_data_buffer_addr && handler_info->static_data_buffer_address))
+ pr_warn("Failed to find VA of static data buffer for GUID: %pUL, PA: 0x%llx",
+ &th->guid, handler_info->static_data_buffer_address);
th->acpi_param_buffer_addr =
efi_pa_va_lookup(&th->guid, handler_info->acpi_param_buffer_address);
+ /*
+ * According to the PRM specification, acpi_param_buffer_address can be zero,
+ * so avoid printing a warning message in that case. Otherwise, if the
+ * return value of efi_pa_va_lookup() is zero, print the message.
+ */
+ if (unlikely(!th->acpi_param_buffer_addr && handler_info->acpi_param_buffer_address))
+ pr_warn("Failed to find VA of acpi param buffer for GUID: %pUL, PA: 0x%llx",
+ &th->guid, handler_info->acpi_param_buffer_address);
+
} while (++cur_handler < tm->handler_count && (handler_info = get_next_handler(handler_info)));
return 0;
@@ -287,9 +322,7 @@ static acpi_status acpi_platformrt_space_handler(u32 function,
if (!handler || !module)
goto invalid_guid;
- if (!handler->handler_addr ||
- !handler->static_data_buffer_addr ||
- !handler->acpi_param_buffer_addr) {
+ if (!handler->handler_addr) {
buffer->prm_status = PRM_HANDLER_ERROR;
return AE_OK;
}
diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
index 4322f2da6d10..c08ead07252b 100644
--- a/drivers/acpi/proc.c
+++ b/drivers/acpi/proc.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
-#include <linux/export.h>
+#include <linux/string_choices.h>
#include <linux/suspend.h>
#include <linux/bcd.h>
#include <linux/acpi.h>
@@ -30,17 +30,16 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
if (!dev->wakeup.flags.valid)
continue;
- seq_printf(seq, "%s\t S%d\t",
+ seq_printf(seq, "%s\t S%llu\t",
dev->pnp.bus_id,
- (u32) dev->wakeup.sleep_state);
+ dev->wakeup.sleep_state);
mutex_lock(&dev->physical_node_lock);
if (!dev->physical_node_count) {
seq_printf(seq, "%c%-8s\n",
dev->wakeup.flags.valid ? '*' : ' ',
- device_may_wakeup(&dev->dev) ?
- "enabled" : "disabled");
+ str_enabled_disabled(device_may_wakeup(&dev->dev)));
} else {
struct device *ldev;
list_for_each_entry(entry, &dev->physical_node_list,
@@ -55,9 +54,8 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
seq_printf(seq, "%c%-8s %s:%s\n",
dev->wakeup.flags.valid ? '*' : ' ',
- (device_may_wakeup(&dev->dev) ||
- device_may_wakeup(ldev)) ?
- "enabled" : "disabled",
+ str_enabled_disabled(device_may_wakeup(ldev) ||
+ device_may_wakeup(&dev->dev)),
ldev->bus ? ldev->bus->name :
"no-bus", dev_name(ldev));
put_device(ldev);
@@ -141,6 +139,5 @@ static const struct proc_ops acpi_system_wakeup_device_proc_ops = {
void __init acpi_sleep_proc_init(void)
{
/* 'wakeup device' [R/W] */
- proc_create("wakeup", S_IFREG | S_IRUGO | S_IWUSR,
- acpi_root_dir, &acpi_system_wakeup_device_proc_ops);
+ proc_create("wakeup", 0644, acpi_root_dir, &acpi_system_wakeup_device_proc_ops);
}
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index cb52dd000b95..5d824435b26b 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -166,8 +166,7 @@ static int __acpi_processor_start(struct acpi_device *device)
if (result && !IS_ENABLED(CONFIG_ACPI_CPU_FREQ_PSS))
dev_dbg(&device->dev, "CPPC data invalid or not present\n");
- if (!cpuidle_get_driver() || cpuidle_get_driver() == &acpi_idle_driver)
- acpi_processor_power_init(pr);
+ acpi_processor_power_init(pr);
acpi_pss_perf_init(pr);
@@ -237,6 +236,9 @@ static struct notifier_block acpi_processor_notifier_block = {
.notifier_call = acpi_processor_notifier,
};
+void __weak acpi_processor_init_invariance_cppc(void)
+{ }
+
/*
* We keep the driver loaded even when ACPI is not running.
* This is needed for the powernow-k8 driver, that works even without
@@ -260,6 +262,8 @@ static int __init acpi_processor_driver_init(void)
if (result < 0)
return result;
+ acpi_processor_register_idle_driver();
+
result = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
"acpi/cpu-drv:online",
acpi_soft_cpu_online, NULL);
@@ -270,6 +274,15 @@ static int __init acpi_processor_driver_init(void)
NULL, acpi_soft_cpu_dead);
acpi_processor_throttling_init();
+
+ /*
+ * Frequency invariance calculations on AMD platforms can't be run until
+ * after acpi_cppc_processor_probe() has been called for all online CPUs
+ */
+ acpi_processor_init_invariance_cppc();
+
+ acpi_idle_rescan_dead_smt_siblings();
+
return 0;
err:
driver_unregister(&acpi_processor_driver);
@@ -289,6 +302,7 @@ static void __exit acpi_processor_driver_exit(void)
cpuhp_remove_state_nocalls(hp_online);
cpuhp_remove_state_nocalls(CPUHP_ACPI_CPUDRV_DEAD);
+ acpi_processor_unregister_idle_driver();
driver_unregister(&acpi_processor_driver);
}
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 831fa4a12159..22b051b94a86 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -24,6 +24,8 @@
#include <acpi/processor.h>
#include <linux/context_tracking.h>
+#include "internal.h"
+
/*
* Include the apic definitions for x86 to have the APIC timer related defines
* available also for UP (on SMP it gets magically included via linux/smp.h).
@@ -49,12 +51,18 @@ module_param(latency_factor, uint, 0644);
static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device);
-struct cpuidle_driver acpi_idle_driver = {
+static struct cpuidle_driver acpi_idle_driver = {
.name = "acpi_idle",
.owner = THIS_MODULE,
};
#ifdef CONFIG_ACPI_PROCESSOR_CSTATE
+void acpi_idle_rescan_dead_smt_siblings(void)
+{
+ if (cpuidle_get_driver() == &acpi_idle_driver)
+ arch_cpu_rescan_dead_smt_siblings();
+}
+
static
DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX], acpi_cstate);
@@ -268,6 +276,10 @@ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
ACPI_CX_DESC_LEN, "ACPI P_LVL3 IOPORT 0x%x",
pr->power.states[ACPI_STATE_C3].address);
+ if (!pr->power.states[ACPI_STATE_C2].address &&
+ !pr->power.states[ACPI_STATE_C3].address)
+ return -ENODEV;
+
return 0;
}
@@ -457,10 +469,8 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
static int acpi_processor_get_cstate_info(struct acpi_processor *pr)
{
- unsigned int i;
int result;
-
/* NOTE: the idle thread may not be running while calling
* this function */
@@ -477,17 +487,7 @@ static int acpi_processor_get_cstate_info(struct acpi_processor *pr)
acpi_processor_get_power_info_default(pr);
pr->power.count = acpi_processor_power_verify(pr);
-
- /*
- * if one state of type C2 or C3 is available, mark this
- * CPU as being "idle manageable"
- */
- for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
- if (pr->power.states[i].valid) {
- pr->power.count = i;
- pr->flags.power = 1;
- }
- }
+ pr->flags.power = 1;
return 0;
}
@@ -578,7 +578,7 @@ static void __cpuidle acpi_idle_do_entry(struct acpi_processor_cx *cx)
* @dev: the target CPU
* @index: the index of suggested state
*/
-static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
+static void acpi_idle_play_dead(struct cpuidle_device *dev, int index)
{
struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
@@ -590,12 +590,11 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
raw_safe_halt();
else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
io_idle(cx->address);
+ } else if (cx->entry_method == ACPI_CSTATE_FFH) {
+ acpi_processor_ffh_play_dead(cx);
} else
- return -ENODEV;
+ return;
}
-
- /* Never reached */
- return 0;
}
static __always_inline bool acpi_idle_fallback_to_c1(struct acpi_processor *pr)
@@ -803,12 +802,12 @@ static int acpi_processor_setup_cstates(struct acpi_processor *pr)
state->enter = acpi_idle_enter;
state->flags = 0;
- if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2 ||
- cx->type == ACPI_STATE_C3) {
- state->enter_dead = acpi_idle_play_dead;
- if (cx->type != ACPI_STATE_C3)
- drv->safe_state_index = count;
- }
+
+ state->enter_dead = acpi_idle_play_dead;
+
+ if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2)
+ drv->safe_state_index = count;
+
/*
* Halt-induced C1 is not good for ->enter_s2idle, because it
* re-enables interrupts on exit. Moreover, C1 is generally not
@@ -999,11 +998,6 @@ end:
return ret;
}
-/*
- * flat_state_cnt - the number of composite LPI states after the process of flattening
- */
-static int flat_state_cnt;
-
/**
* combine_lpi_states - combine local and parent LPI states to form a composite LPI state
*
@@ -1046,9 +1040,10 @@ static void stash_composite_state(struct acpi_lpi_states_array *curr_level,
curr_level->composite_states[curr_level->composite_states_size++] = t;
}
-static int flatten_lpi_states(struct acpi_processor *pr,
- struct acpi_lpi_states_array *curr_level,
- struct acpi_lpi_states_array *prev_level)
+static unsigned int flatten_lpi_states(struct acpi_processor *pr,
+ unsigned int flat_state_cnt,
+ struct acpi_lpi_states_array *curr_level,
+ struct acpi_lpi_states_array *prev_level)
{
int i, j, state_count = curr_level->size;
struct acpi_lpi_state *p, *t = curr_level->entries;
@@ -1088,7 +1083,7 @@ static int flatten_lpi_states(struct acpi_processor *pr,
}
kfree(curr_level->entries);
- return 0;
+ return flat_state_cnt;
}
int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu)
@@ -1103,6 +1098,7 @@ static int acpi_processor_get_lpi_info(struct acpi_processor *pr)
acpi_handle handle = pr->handle, pr_ahandle;
struct acpi_device *d = NULL;
struct acpi_lpi_states_array info[2], *tmp, *prev, *curr;
+ unsigned int state_count;
/* make sure our architecture has support */
ret = acpi_processor_ffh_lpi_probe(pr->id);
@@ -1115,14 +1111,13 @@ static int acpi_processor_get_lpi_info(struct acpi_processor *pr)
if (!acpi_has_method(handle, "_LPI"))
return -EINVAL;
- flat_state_cnt = 0;
prev = &info[0];
curr = &info[1];
handle = pr->handle;
ret = acpi_processor_evaluate_lpi(handle, prev);
if (ret)
return ret;
- flatten_lpi_states(pr, prev, NULL);
+ state_count = flatten_lpi_states(pr, 0, prev, NULL);
status = acpi_get_parent(handle, &pr_ahandle);
while (ACPI_SUCCESS(status)) {
@@ -1144,18 +1139,19 @@ static int acpi_processor_get_lpi_info(struct acpi_processor *pr)
break;
/* flatten all the LPI states in this level of hierarchy */
- flatten_lpi_states(pr, curr, prev);
+ state_count = flatten_lpi_states(pr, state_count, curr, prev);
tmp = prev, prev = curr, curr = tmp;
status = acpi_get_parent(handle, &pr_ahandle);
}
- pr->power.count = flat_state_cnt;
/* reset the index after flattening */
- for (i = 0; i < pr->power.count; i++)
+ for (i = 0; i < state_count; i++)
pr->power.lpi_states[i].index = i;
+ pr->power.count = state_count;
+
/* Tell driver that _LPI is supported. */
pr->flags.has_lpi = 1;
pr->flags.power = 1;
@@ -1361,74 +1357,102 @@ int acpi_processor_power_state_has_changed(struct acpi_processor *pr)
return 0;
}
-static int acpi_processor_registered;
+void acpi_processor_register_idle_driver(void)
+{
+ struct acpi_processor *pr;
+ int ret = -ENODEV;
+ int cpu;
+
+ /*
+ * Acpi idle driver is used by all possible CPUs.
+ * Install the idle handler by the processor power info of one in them.
+ * Note that we use previously set idle handler will be used on
+ * platforms that only support C1.
+ */
+ for_each_cpu(cpu, (struct cpumask *)cpu_possible_mask) {
+ pr = per_cpu(processors, cpu);
+ if (!pr)
+ continue;
+
+ ret = acpi_processor_get_power_info(pr);
+ if (!ret) {
+ pr->flags.power_setup_done = 1;
+ acpi_processor_setup_cpuidle_states(pr);
+ break;
+ }
+ }
+
+ if (ret) {
+ pr_debug("No ACPI power information from any CPUs.\n");
+ return;
+ }
+
+ ret = cpuidle_register_driver(&acpi_idle_driver);
+ if (ret) {
+ pr_debug("register %s failed.\n", acpi_idle_driver.name);
+ return;
+ }
+ pr_debug("%s registered with cpuidle.\n", acpi_idle_driver.name);
+}
+
+void acpi_processor_unregister_idle_driver(void)
+{
+ cpuidle_unregister_driver(&acpi_idle_driver);
+}
-int acpi_processor_power_init(struct acpi_processor *pr)
+void acpi_processor_power_init(struct acpi_processor *pr)
{
- int retval;
struct cpuidle_device *dev;
+ /*
+ * The code below only works if the current cpuidle driver is the ACPI
+ * idle driver.
+ */
+ if (cpuidle_get_driver() != &acpi_idle_driver)
+ return;
+
if (disabled_by_idle_boot_param())
- return 0;
+ return;
acpi_processor_cstate_first_run_checks();
if (!acpi_processor_get_power_info(pr))
pr->flags.power_setup_done = 1;
- /*
- * Install the idle handler if processor power management is supported.
- * Note that we use previously set idle handler will be used on
- * platforms that only support C1.
- */
- if (pr->flags.power) {
- /* Register acpi_idle_driver if not already registered */
- if (!acpi_processor_registered) {
- acpi_processor_setup_cpuidle_states(pr);
- retval = cpuidle_register_driver(&acpi_idle_driver);
- if (retval)
- return retval;
- pr_debug("%s registered with cpuidle\n",
- acpi_idle_driver.name);
- }
+ if (!pr->flags.power)
+ return;
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev)
- return -ENOMEM;
- per_cpu(acpi_cpuidle_device, pr->id) = dev;
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return;
- acpi_processor_setup_cpuidle_dev(pr, dev);
+ per_cpu(acpi_cpuidle_device, pr->id) = dev;
- /* Register per-cpu cpuidle_device. Cpuidle driver
- * must already be registered before registering device
- */
- retval = cpuidle_register_device(dev);
- if (retval) {
- if (acpi_processor_registered == 0)
- cpuidle_unregister_driver(&acpi_idle_driver);
- return retval;
- }
- acpi_processor_registered++;
+ acpi_processor_setup_cpuidle_dev(pr, dev);
+
+ /*
+ * Register a cpuidle device for this CPU. The cpuidle driver using
+ * this device is expected to be registered.
+ */
+ if (cpuidle_register_device(dev)) {
+ per_cpu(acpi_cpuidle_device, pr->id) = NULL;
+ kfree(dev);
}
- return 0;
}
-int acpi_processor_power_exit(struct acpi_processor *pr)
+void acpi_processor_power_exit(struct acpi_processor *pr)
{
struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id);
if (disabled_by_idle_boot_param())
- return 0;
+ return;
if (pr->flags.power) {
cpuidle_unregister_device(dev);
- acpi_processor_registered--;
- if (acpi_processor_registered == 0)
- cpuidle_unregister_driver(&acpi_idle_driver);
-
kfree(dev);
}
pr->flags.power_setup_done = 0;
- return 0;
}
+
+MODULE_IMPORT_NS("ACPI_PROCESSOR_IDLE");
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 4265814c74f8..8972446b7162 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -20,12 +20,11 @@
#include <acpi/processor.h>
#ifdef CONFIG_X86
#include <asm/cpufeature.h>
+#include <asm/msr.h>
#endif
#define ACPI_PROCESSOR_FILE_PERFORMANCE "performance"
-static DEFINE_MUTEX(performance_mutex);
-
/*
* _PPC support is implemented as a CPUfreq policy notifier:
* This means each time a CPUfreq driver registered also with
@@ -174,6 +173,9 @@ void acpi_processor_ppc_init(struct cpufreq_policy *policy)
{
unsigned int cpu;
+ if (ignore_ppc == 1)
+ return;
+
for_each_cpu(cpu, policy->related_cpus) {
struct acpi_processor *pr = per_cpu(processors, cpu);
int ret;
@@ -194,6 +196,14 @@ void acpi_processor_ppc_init(struct cpufreq_policy *policy)
if (ret < 0)
pr_err("Failed to add freq constraint for CPU%d (%d)\n",
cpu, ret);
+
+ if (!pr->performance)
+ continue;
+
+ ret = acpi_processor_get_platform_limit(pr);
+ if (ret)
+ pr_err("Failed to update freq constraint for CPU%d (%d)\n",
+ cpu, ret);
}
}
@@ -209,6 +219,10 @@ void acpi_processor_ppc_exit(struct cpufreq_policy *policy)
}
}
+#ifdef CONFIG_X86
+
+static DEFINE_MUTEX(performance_mutex);
+
static int acpi_processor_get_performance_control(struct acpi_processor *pr)
{
int result = 0;
@@ -267,7 +281,6 @@ end:
return result;
}
-#ifdef CONFIG_X86
/*
* Some AMDs have 50MHz frequency multiples, but only provide 100MHz rounding
* in their ACPI data. Calculate the real values and fix up the _PSS data.
@@ -298,9 +311,6 @@ static void amd_fixup_frequency(struct acpi_processor_px *px, int i)
px->core_frequency = (100 * (fid + 8)) >> did;
}
}
-#else
-static void amd_fixup_frequency(struct acpi_processor_px *px, int i) {};
-#endif
static int acpi_processor_get_performance_states(struct acpi_processor *pr)
{
@@ -440,13 +450,11 @@ int acpi_processor_get_performance_info(struct acpi_processor *pr)
* the BIOS is older than the CPU and does not know its frequencies
*/
update_bios:
-#ifdef CONFIG_X86
if (acpi_has_method(pr->handle, "_PPC")) {
if(boot_cpu_has(X86_FEATURE_EST))
pr_warn(FW_BUG "BIOS needs update for CPU "
"frequency support\n");
}
-#endif
return result;
}
EXPORT_SYMBOL_GPL(acpi_processor_get_performance_info);
@@ -788,3 +796,4 @@ unlock:
mutex_unlock(&performance_mutex);
}
EXPORT_SYMBOL(acpi_processor_unregister_performance);
+#endif
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
index 1219adb11ab9..c7b1dc5687ec 100644
--- a/drivers/acpi/processor_thermal.c
+++ b/drivers/acpi/processor_thermal.c
@@ -62,19 +62,14 @@ static int phys_package_first_cpu(int cpu)
return 0;
}
-static int cpu_has_cpufreq(unsigned int cpu)
+static bool cpu_has_cpufreq(unsigned int cpu)
{
- struct cpufreq_policy *policy;
-
if (!acpi_processor_cpufreq_init)
return 0;
- policy = cpufreq_cpu_get(cpu);
- if (policy) {
- cpufreq_cpu_put(policy);
- return 1;
- }
- return 0;
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
+
+ return policy != NULL;
}
static int cpufreq_get_max_state(unsigned int cpu)
@@ -93,12 +88,31 @@ static int cpufreq_get_cur_state(unsigned int cpu)
return reduction_step(cpu);
}
+static bool cpufreq_update_thermal_limit(unsigned int cpu, struct acpi_processor *pr)
+{
+ unsigned long max_freq;
+ int ret;
+
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
+ if (!policy)
+ return false;
+
+ max_freq = (policy->cpuinfo.max_freq *
+ (100 - reduction_step(cpu) * cpufreq_thermal_reduction_pctg)) / 100;
+
+ ret = freq_qos_update_request(&pr->thermal_req, max_freq);
+ if (ret < 0) {
+ pr_warn("Failed to update thermal freq constraint: CPU%d (%d)\n",
+ pr->id, ret);
+ }
+
+ return true;
+}
+
static int cpufreq_set_cur_state(unsigned int cpu, int state)
{
- struct cpufreq_policy *policy;
struct acpi_processor *pr;
- unsigned long max_freq;
- int i, ret;
+ int i;
if (!cpu_has_cpufreq(cpu))
return 0;
@@ -120,20 +134,8 @@ static int cpufreq_set_cur_state(unsigned int cpu, int state)
if (unlikely(!freq_qos_request_active(&pr->thermal_req)))
continue;
- policy = cpufreq_cpu_get(i);
- if (!policy)
+ if (!cpufreq_update_thermal_limit(i, pr))
return -EINVAL;
-
- max_freq = (policy->cpuinfo.max_freq *
- (100 - reduction_step(i) * cpufreq_thermal_reduction_pctg)) / 100;
-
- cpufreq_cpu_put(policy);
-
- ret = freq_qos_update_request(&pr->thermal_req, max_freq);
- if (ret < 0) {
- pr_warn("Failed to update thermal freq constraint: CPU%d (%d)\n",
- pr->id, ret);
- }
}
return 0;
}
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index 00d045e5f524..f9c2bc1d4a3a 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -18,9 +18,12 @@
#include <linux/sched.h>
#include <linux/cpufreq.h>
#include <linux/acpi.h>
+#include <linux/uaccess.h>
#include <acpi/processor.h>
#include <asm/io.h>
-#include <linux/uaccess.h>
+#ifdef CONFIG_X86
+#include <asm/msr.h>
+#endif
/* ignore_tpc:
* 0 -> acpi processor driver doesn't ignore _TPC values
@@ -232,7 +235,7 @@ static int acpi_processor_throttling_notifier(unsigned long event, void *data)
if (pr->throttling_platform_limit > target_state)
target_state = pr->throttling_platform_limit;
if (target_state >= p_throttling->state_count) {
- pr_warn("Exceed the limit of T-state \n");
+ pr_warn("Exceed the limit of T-state\n");
target_state = p_throttling->state_count - 1;
}
p_tstate->target_state = target_state;
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 80a52a4e66dd..1b997a5497e7 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -83,6 +83,7 @@ static bool acpi_nondev_subnode_extract(union acpi_object *desc,
struct fwnode_handle *parent)
{
struct acpi_data_node *dn;
+ acpi_handle scope = NULL;
bool result;
if (acpi_graph_ignore_port(handle))
@@ -98,59 +99,45 @@ static bool acpi_nondev_subnode_extract(union acpi_object *desc,
INIT_LIST_HEAD(&dn->data.properties);
INIT_LIST_HEAD(&dn->data.subnodes);
- result = acpi_extract_properties(handle, desc, &dn->data);
-
- if (handle) {
- acpi_handle scope;
- acpi_status status;
+ /*
+ * The scope for the completion of relative pathname segments and
+ * subnode object lookup is the one of the namespace node (device)
+ * containing the object that has returned the package. That is, it's
+ * the scope of that object's parent device.
+ */
+ if (handle)
+ acpi_get_parent(handle, &scope);
- /*
- * The scope for the subnode object lookup is the one of the
- * namespace node (device) containing the object that has
- * returned the package. That is, it's the scope of that
- * object's parent.
- */
- status = acpi_get_parent(handle, &scope);
- if (ACPI_SUCCESS(status)
- && acpi_enumerate_nondev_subnodes(scope, desc, &dn->data,
- &dn->fwnode))
- result = true;
- } else if (acpi_enumerate_nondev_subnodes(NULL, desc, &dn->data,
- &dn->fwnode)) {
+ /*
+ * Extract properties from the _DSD-equivalent package pointed to by
+ * desc and use scope (if not NULL) for the completion of relative
+ * pathname segments.
+ *
+ * The extracted properties will be held in the new data node dn.
+ */
+ result = acpi_extract_properties(scope, desc, &dn->data);
+ /*
+ * Look for subnodes in the _DSD-equivalent package pointed to by desc
+ * and create child nodes of dn if there are any.
+ */
+ if (acpi_enumerate_nondev_subnodes(scope, desc, &dn->data, &dn->fwnode))
result = true;
- }
- if (result) {
- dn->handle = handle;
- dn->data.pointer = desc;
- list_add_tail(&dn->sibling, list);
- return true;
- }
-
- kfree(dn);
- acpi_handle_debug(handle, "Invalid properties/subnodes data, skipping\n");
- return false;
-}
-
-static bool acpi_nondev_subnode_data_ok(acpi_handle handle,
- const union acpi_object *link,
- struct list_head *list,
- struct fwnode_handle *parent)
-{
- struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
- acpi_status status;
-
- status = acpi_evaluate_object_typed(handle, NULL, NULL, &buf,
- ACPI_TYPE_PACKAGE);
- if (ACPI_FAILURE(status))
+ if (!result) {
+ kfree(dn);
+ acpi_handle_debug(handle, "Invalid properties/subnodes data, skipping\n");
return false;
+ }
- if (acpi_nondev_subnode_extract(buf.pointer, handle, link, list,
- parent))
- return true;
+ /*
+ * This will be NULL if the desc package is embedded in an outer
+ * _DSD-equivalent package and its scope cannot be determined.
+ */
+ dn->handle = handle;
+ dn->data.pointer = desc;
+ list_add_tail(&dn->sibling, list);
- ACPI_FREE(buf.pointer);
- return false;
+ return true;
}
static bool acpi_nondev_subnode_ok(acpi_handle scope,
@@ -158,9 +145,16 @@ static bool acpi_nondev_subnode_ok(acpi_handle scope,
struct list_head *list,
struct fwnode_handle *parent)
{
+ struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
acpi_handle handle;
acpi_status status;
+ /*
+ * If the scope is unknown, the _DSD-equivalent package being parsed
+ * was embedded in an outer _DSD-equivalent package as a result of
+ * direct evaluation of an object pointed to by a reference. In that
+ * case, using a pathname as the target object pointer is invalid.
+ */
if (!scope)
return false;
@@ -169,7 +163,17 @@ static bool acpi_nondev_subnode_ok(acpi_handle scope,
if (ACPI_FAILURE(status))
return false;
- return acpi_nondev_subnode_data_ok(handle, link, list, parent);
+ status = acpi_evaluate_object_typed(handle, NULL, NULL, &buf,
+ ACPI_TYPE_PACKAGE);
+ if (ACPI_FAILURE(status))
+ return false;
+
+ if (acpi_nondev_subnode_extract(buf.pointer, handle, link, list,
+ parent))
+ return true;
+
+ ACPI_FREE(buf.pointer);
+ return false;
}
static bool acpi_add_nondev_subnodes(acpi_handle scope,
@@ -180,9 +184,12 @@ static bool acpi_add_nondev_subnodes(acpi_handle scope,
bool ret = false;
int i;
+ /*
+ * Every element in the links package is expected to represent a link
+ * to a non-device node in a tree containing device-specific data.
+ */
for (i = 0; i < links->package.count; i++) {
union acpi_object *link, *desc;
- acpi_handle handle;
bool result;
link = &links->package.elements[i];
@@ -190,26 +197,53 @@ static bool acpi_add_nondev_subnodes(acpi_handle scope,
if (link->package.count != 2)
continue;
- /* The first one must be a string. */
+ /* The first one (the key) must be a string. */
if (link->package.elements[0].type != ACPI_TYPE_STRING)
continue;
- /* The second one may be a string, a reference or a package. */
+ /* The second one (the target) may be a string or a package. */
switch (link->package.elements[1].type) {
case ACPI_TYPE_STRING:
+ /*
+ * The string is expected to be a full pathname or a
+ * pathname segment relative to the given scope. That
+ * pathname is expected to point to an object returning
+ * a package that contains _DSD-equivalent information.
+ */
result = acpi_nondev_subnode_ok(scope, link, list,
parent);
break;
- case ACPI_TYPE_LOCAL_REFERENCE:
- handle = link->package.elements[1].reference.handle;
- result = acpi_nondev_subnode_data_ok(handle, link, list,
- parent);
- break;
case ACPI_TYPE_PACKAGE:
+ /*
+ * This happens when a reference is used in AML to
+ * point to the target. Since the target is expected
+ * to be a named object, a reference to it will cause it
+ * to be avaluated in place and its return package will
+ * be embedded in the links package at the location of
+ * the reference.
+ *
+ * The target package is expected to contain _DSD-
+ * equivalent information, but the scope in which it
+ * is located in the original AML is unknown. Thus
+ * it cannot contain pathname segments represented as
+ * strings because there is no way to build full
+ * pathnames out of them.
+ */
+ acpi_handle_debug(scope, "subnode %s: Unknown scope\n",
+ link->package.elements[0].string.pointer);
desc = &link->package.elements[1];
result = acpi_nondev_subnode_extract(desc, NULL, link,
list, parent);
break;
+ case ACPI_TYPE_LOCAL_REFERENCE:
+ /*
+ * It is not expected to see any local references in
+ * the links package because referencing a named object
+ * should cause it to be evaluated in place.
+ */
+ acpi_handle_info(scope, "subnode %s: Unexpected reference\n",
+ link->package.elements[0].string.pointer);
+ fallthrough;
default:
result = false;
break;
@@ -369,6 +403,9 @@ static void acpi_untie_nondev_subnodes(struct acpi_device_data *data)
struct acpi_data_node *dn;
list_for_each_entry(dn, &data->subnodes, sibling) {
+ if (!dn->handle)
+ continue;
+
acpi_detach_data(dn->handle, acpi_nondev_subnode_tag);
acpi_untie_nondev_subnodes(&dn->data);
@@ -383,6 +420,9 @@ static bool acpi_tie_nondev_subnodes(struct acpi_device_data *data)
acpi_status status;
bool ret;
+ if (!dn->handle)
+ continue;
+
status = acpi_attach_data(dn->handle, acpi_nondev_subnode_tag, dn);
if (ACPI_FAILURE(status) && status != AE_ALREADY_EXISTS) {
acpi_handle_err(dn->handle, "Can't tag data node\n");
@@ -804,13 +844,35 @@ acpi_fwnode_get_named_child_node(const struct fwnode_handle *fwnode,
return NULL;
}
+static unsigned int acpi_fwnode_get_args_count(struct fwnode_handle *fwnode,
+ const char *nargs_prop)
+{
+ const struct acpi_device_data *data;
+ const union acpi_object *obj;
+ int ret;
+
+ data = acpi_device_data_of_node(fwnode);
+ if (!data)
+ return 0;
+
+ ret = acpi_data_get_property(data, nargs_prop, ACPI_TYPE_INTEGER, &obj);
+ if (ret)
+ return 0;
+
+ return obj->integer.value;
+}
+
static int acpi_get_ref_args(struct fwnode_reference_args *args,
struct fwnode_handle *ref_fwnode,
+ const char *nargs_prop,
const union acpi_object **element,
const union acpi_object *end, size_t num_args)
{
u32 nargs = 0, i;
+ if (nargs_prop)
+ num_args = acpi_fwnode_get_args_count(ref_fwnode, nargs_prop);
+
/*
* Assume the following integer elements are all args. Stop counting on
* the first reference (possibly represented as a string) or end of the
@@ -882,45 +944,10 @@ static struct fwnode_handle *acpi_parse_string_ref(const struct fwnode_handle *f
return &dn->fwnode;
}
-/**
- * __acpi_node_get_property_reference - returns handle to the referenced object
- * @fwnode: Firmware node to get the property from
- * @propname: Name of the property
- * @index: Index of the reference to return
- * @num_args: Maximum number of arguments after each reference
- * @args: Location to store the returned reference with optional arguments
- * (may be NULL)
- *
- * Find property with @name, verifify that it is a package containing at least
- * one object reference and if so, store the ACPI device object pointer to the
- * target object in @args->adev. If the reference includes arguments, store
- * them in the @args->args[] array.
- *
- * If there's more than one reference in the property value package, @index is
- * used to select the one to return.
- *
- * It is possible to leave holes in the property value set like in the
- * example below:
- *
- * Package () {
- * "cs-gpios",
- * Package () {
- * ^GPIO, 19, 0, 0,
- * ^GPIO, 20, 0, 0,
- * 0,
- * ^GPIO, 21, 0, 0,
- * }
- * }
- *
- * Calling this function with index %2 or index %3 return %-ENOENT. If the
- * property does not contain any more values %-ENOENT is returned. The NULL
- * entry must be single integer and preferably contain value %0.
- *
- * Return: %0 on success, negative error code on failure.
- */
-int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
- const char *propname, size_t index, size_t num_args,
- struct fwnode_reference_args *args)
+static int acpi_fwnode_get_reference_args(const struct fwnode_handle *fwnode,
+ const char *propname, const char *nargs_prop,
+ unsigned int args_count, unsigned int index,
+ struct fwnode_reference_args *args)
{
const union acpi_object *element, *end;
const union acpi_object *obj;
@@ -996,10 +1023,10 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
return -EINVAL;
element++;
-
ret = acpi_get_ref_args(idx == index ? args : NULL,
acpi_fwnode_handle(device),
- &element, end, num_args);
+ nargs_prop, &element, end,
+ args_count);
if (ret < 0)
return ret;
@@ -1014,10 +1041,9 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
return -EINVAL;
element++;
-
ret = acpi_get_ref_args(idx == index ? args : NULL,
- ref_fwnode, &element, end,
- num_args);
+ ref_fwnode, nargs_prop, &element, end,
+ args_count);
if (ret < 0)
return ret;
@@ -1039,6 +1065,50 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
return -ENOENT;
}
+
+/**
+ * __acpi_node_get_property_reference - returns handle to the referenced object
+ * @fwnode: Firmware node to get the property from
+ * @propname: Name of the property
+ * @index: Index of the reference to return
+ * @num_args: Maximum number of arguments after each reference
+ * @args: Location to store the returned reference with optional arguments
+ * (may be NULL)
+ *
+ * Find property with @name, verifify that it is a package containing at least
+ * one object reference and if so, store the ACPI device object pointer to the
+ * target object in @args->adev. If the reference includes arguments, store
+ * them in the @args->args[] array.
+ *
+ * If there's more than one reference in the property value package, @index is
+ * used to select the one to return.
+ *
+ * It is possible to leave holes in the property value set like in the
+ * example below:
+ *
+ * Package () {
+ * "cs-gpios",
+ * Package () {
+ * ^GPIO, 19, 0, 0,
+ * ^GPIO, 20, 0, 0,
+ * 0,
+ * ^GPIO, 21, 0, 0,
+ * }
+ * }
+ *
+ * Calling this function with index %2 or index %3 return %-ENOENT. If the
+ * property does not contain any more values %-ENOENT is returned. The NULL
+ * entry must be single integer and preferably contain value %0.
+ *
+ * Return: %0 on success, negative error code on failure.
+ */
+int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
+ const char *propname, size_t index,
+ size_t num_args,
+ struct fwnode_reference_args *args)
+{
+ return acpi_fwnode_get_reference_args(fwnode, propname, NULL, index, num_args, args);
+}
EXPORT_SYMBOL_GPL(__acpi_node_get_property_reference);
static int acpi_data_prop_read_single(const struct acpi_device_data *data,
@@ -1187,8 +1257,6 @@ static int acpi_data_prop_read(const struct acpi_device_data *data,
}
break;
}
- if (nval == 0)
- return -EINVAL;
if (obj->type == ACPI_TYPE_BUFFER) {
if (proptype != DEV_PROP_U8)
@@ -1212,9 +1280,11 @@ static int acpi_data_prop_read(const struct acpi_device_data *data,
ret = acpi_copy_property_array_uint(items, (u64 *)val, nval);
break;
case DEV_PROP_STRING:
- ret = acpi_copy_property_array_string(
- items, (char **)val,
- min_t(u32, nval, obj->package.count));
+ nval = min_t(u32, nval, obj->package.count);
+ if (nval == 0)
+ return -ENODATA;
+
+ ret = acpi_copy_property_array_string(items, (char **)val, nval);
break;
default:
ret = -EINVAL;
@@ -1318,6 +1388,28 @@ struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode,
return NULL;
}
+/*
+ * acpi_get_next_present_subnode - Return the next present child node handle
+ * @fwnode: Firmware node to find the next child node for.
+ * @child: Handle to one of the device's child nodes or a null handle.
+ *
+ * Like acpi_get_next_subnode(), but the device nodes returned by
+ * acpi_get_next_present_subnode() are guaranteed to be present.
+ *
+ * Returns: The fwnode handle of the next present sub-node.
+ */
+static struct fwnode_handle *
+acpi_get_next_present_subnode(const struct fwnode_handle *fwnode,
+ struct fwnode_handle *child)
+{
+ do {
+ child = acpi_get_next_subnode(fwnode, child);
+ } while (is_acpi_device_node(child) &&
+ !acpi_device_is_present(to_acpi_device_node(child)));
+
+ return child;
+}
+
/**
* acpi_node_get_parent - Return parent fwnode of this fwnode
* @fwnode: Firmware node whose parent to get
@@ -1492,7 +1584,7 @@ acpi_graph_get_remote_endpoint(const struct fwnode_handle *__fwnode)
static bool acpi_fwnode_device_is_available(const struct fwnode_handle *fwnode)
{
if (!is_acpi_device_node(fwnode))
- return false;
+ return true;
return acpi_device_is_present(to_acpi_device_node(fwnode));
}
@@ -1558,16 +1650,6 @@ acpi_fwnode_property_read_string_array(const struct fwnode_handle *fwnode,
val, nval);
}
-static int
-acpi_fwnode_get_reference_args(const struct fwnode_handle *fwnode,
- const char *prop, const char *nargs_prop,
- unsigned int args_count, unsigned int index,
- struct fwnode_reference_args *args)
-{
- return __acpi_node_get_property_reference(fwnode, prop, index,
- args_count, args);
-}
-
static const char *acpi_fwnode_get_name(const struct fwnode_handle *fwnode)
{
const struct acpi_device *adev;
@@ -1656,12 +1738,13 @@ static int acpi_fwnode_irq_get(const struct fwnode_handle *fwnode,
acpi_fwnode_device_dma_supported, \
.device_get_dma_attr = acpi_fwnode_device_get_dma_attr, \
.property_present = acpi_fwnode_property_present, \
+ .property_read_bool = acpi_fwnode_property_present, \
.property_read_int_array = \
acpi_fwnode_property_read_int_array, \
.property_read_string_array = \
acpi_fwnode_property_read_string_array, \
.get_parent = acpi_node_get_parent, \
- .get_next_child_node = acpi_get_next_subnode, \
+ .get_next_child_node = acpi_get_next_present_subnode, \
.get_named_child_node = acpi_fwnode_get_named_child_node, \
.get_name = acpi_fwnode_get_name, \
.get_name_prefix = acpi_fwnode_get_name_prefix, \
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 7fe842dae1ec..d16906f46484 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -17,6 +17,7 @@
#include <linux/slab.h>
#include <linux/irq.h>
#include <linux/dmi.h>
+#include <linux/string_choices.h>
#ifdef CONFIG_X86
#define valid_IRQ(i) (((i) != 0) && ((i) != 2))
@@ -250,6 +251,9 @@ static bool acpi_decode_space(struct resource_win *win,
switch (addr->resource_type) {
case ACPI_MEMORY_RANGE:
acpi_dev_memresource_flags(res, len, wp);
+
+ if (addr->info.mem.caching == ACPI_PREFETCHABLE_MEMORY)
+ res->flags |= IORESOURCE_PREFETCH;
break;
case ACPI_IO_RANGE:
acpi_dev_ioresource_flags(res, len, iodec,
@@ -265,9 +269,6 @@ static bool acpi_decode_space(struct resource_win *win,
if (addr->producer_consumer == ACPI_PRODUCER)
res->flags |= IORESOURCE_WINDOW;
- if (addr->info.mem.caching == ACPI_PREFETCHABLE_MEMORY)
- res->flags |= IORESOURCE_PREFETCH;
-
return !(res->flags & IORESOURCE_DISABLED);
}
@@ -441,6 +442,20 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = {
},
},
{
+ /* Asus Vivobook X1404VAP */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "X1404VAP"),
+ },
+ },
+ {
+ /* Asus Vivobook X1504VAP */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "X1504VAP"),
+ },
+ },
+ {
/* Asus Vivobook X1704VAP */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
@@ -497,6 +512,13 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = {
},
},
{
+ /* Asus Vivobook Pro N6506CU* */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "N6506CU"),
+ },
+ },
+ {
/* LG Electronics 17U70P */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"),
@@ -520,7 +542,7 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = {
*/
static const struct dmi_system_id irq1_edge_low_force_override[] = {
{
- /* MECHREV Jiaolong17KS Series GM7XG0M */
+ /* MECHREVO Jiaolong17KS Series GM7XG0M */
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "GM7XG0M"),
},
@@ -557,6 +579,12 @@ static const struct dmi_system_id irq1_edge_low_force_override[] = {
},
},
{
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Eluktronics Inc."),
+ DMI_MATCH(DMI_BOARD_NAME, "MECH-17"),
+ },
+ },
+ {
/* TongFang GM6XGxX/TUXEDO Stellaris 16 Gen5 AMD */
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "GM6XGxX"),
@@ -646,6 +674,24 @@ static const struct dmi_system_id irq1_edge_low_force_override[] = {
DMI_MATCH(DMI_BOARD_NAME, "GMxHGxx"),
},
},
+ {
+ /* MACHENIKE L16P/L16P */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MACHENIKE"),
+ DMI_MATCH(DMI_BOARD_NAME, "L16P"),
+ },
+ },
+ {
+ /*
+ * TongFang GM5HG0A in case of the SKIKK Vanaheim relabel the
+ * board-name is changed, so check OEM strings instead. Note
+ * OEM string matches are always exact matches.
+ * https://bugzilla.kernel.org/show_bug.cgi?id=219614
+ */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_OEM_STRING, "GM5HG0A"),
+ },
+ },
{ }
};
@@ -671,11 +717,11 @@ static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity,
for (i = 0; i < ARRAY_SIZE(override_table); i++) {
const struct irq_override_cmp *entry = &override_table[i];
- if (dmi_check_system(entry->system) &&
- entry->irq == gsi &&
+ if (entry->irq == gsi &&
entry->triggering == triggering &&
entry->polarity == polarity &&
- entry->shareable == shareable)
+ entry->shareable == shareable &&
+ dmi_check_system(entry->system))
return entry->override;
}
@@ -735,7 +781,7 @@ static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
pr_warn("ACPI: IRQ %d override to %s%s, %s%s\n", gsi,
t ? "level" : "edge",
trig == triggering ? "" : "(!)",
- p ? "low" : "high",
+ str_low_high(p),
pol == polarity ? "" : "(!)");
triggering = trig;
polarity = pol;
diff --git a/drivers/acpi/riscv/Kconfig b/drivers/acpi/riscv/Kconfig
new file mode 100644
index 000000000000..046296a18d00
--- /dev/null
+++ b/drivers/acpi/riscv/Kconfig
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# ACPI Configuration for RISC-V
+#
+
+config ACPI_RIMT
+ bool
diff --git a/drivers/acpi/riscv/Makefile b/drivers/acpi/riscv/Makefile
index a96fdf1e2cb8..1284a076fa88 100644
--- a/drivers/acpi/riscv/Makefile
+++ b/drivers/acpi/riscv/Makefile
@@ -2,3 +2,4 @@
obj-y += rhct.o init.o irq.o
obj-$(CONFIG_ACPI_PROCESSOR_IDLE) += cpuidle.o
obj-$(CONFIG_ACPI_CPPC_LIB) += cppc.o
+obj-$(CONFIG_ACPI_RIMT) += rimt.o
diff --git a/drivers/acpi/riscv/cppc.c b/drivers/acpi/riscv/cppc.c
index 4cdff387deff..42c1a9052470 100644
--- a/drivers/acpi/riscv/cppc.c
+++ b/drivers/acpi/riscv/cppc.c
@@ -37,10 +37,8 @@ static int __init sbi_cppc_init(void)
{
if (sbi_spec_version >= sbi_mk_version(2, 0) &&
sbi_probe_extension(SBI_EXT_CPPC) > 0) {
- pr_info("SBI CPPC extension detected\n");
cppc_ext_present = true;
} else {
- pr_info("SBI CPPC extension NOT detected!!\n");
cppc_ext_present = false;
}
@@ -121,7 +119,7 @@ int cpc_read_ffh(int cpu, struct cpc_reg *reg, u64 *val)
*val = data.ret.value;
- return (data.ret.error) ? sbi_err_map_linux_errno(data.ret.error) : 0;
+ return data.ret.error;
}
return -EINVAL;
@@ -150,7 +148,7 @@ int cpc_write_ffh(int cpu, struct cpc_reg *reg, u64 val)
smp_call_function_single(cpu, cppc_ffh_csr_write, &data, 1);
- return (data.ret.error) ? sbi_err_map_linux_errno(data.ret.error) : 0;
+ return data.ret.error;
}
return -EINVAL;
diff --git a/drivers/acpi/riscv/init.c b/drivers/acpi/riscv/init.c
index 5ef97905a727..7c00f7995e86 100644
--- a/drivers/acpi/riscv/init.c
+++ b/drivers/acpi/riscv/init.c
@@ -7,7 +7,9 @@
#include <linux/acpi.h>
#include "init.h"
-void __init acpi_riscv_init(void)
+void __init acpi_arch_init(void)
{
riscv_acpi_init_gsi_mapping();
+ if (IS_ENABLED(CONFIG_ACPI_RIMT))
+ riscv_acpi_rimt_init();
}
diff --git a/drivers/acpi/riscv/init.h b/drivers/acpi/riscv/init.h
index 0b9a07e4031f..1680aa2aaf23 100644
--- a/drivers/acpi/riscv/init.h
+++ b/drivers/acpi/riscv/init.h
@@ -2,3 +2,4 @@
#include <linux/init.h>
void __init riscv_acpi_init_gsi_mapping(void);
+void __init riscv_acpi_rimt_init(void);
diff --git a/drivers/acpi/riscv/irq.c b/drivers/acpi/riscv/irq.c
index cced960c2aef..d9a2154d6c6a 100644
--- a/drivers/acpi/riscv/irq.c
+++ b/drivers/acpi/riscv/irq.c
@@ -10,6 +10,8 @@
#include "init.h"
+#define RISCV_ACPI_INTC_FLAG_PENDING BIT(0)
+
struct riscv_ext_intc_list {
acpi_handle handle;
u32 gsi_base;
@@ -17,6 +19,7 @@ struct riscv_ext_intc_list {
u32 nr_idcs;
u32 id;
u32 type;
+ u32 flag;
struct list_head list;
};
@@ -69,6 +72,22 @@ static acpi_status riscv_acpi_update_gsi_handle(u32 gsi_base, acpi_handle handle
return AE_NOT_FOUND;
}
+int riscv_acpi_update_gsi_range(u32 gsi_base, u32 nr_irqs)
+{
+ struct riscv_ext_intc_list *ext_intc_element;
+
+ list_for_each_entry(ext_intc_element, &ext_intc_list, list) {
+ if (gsi_base == ext_intc_element->gsi_base &&
+ (ext_intc_element->flag & RISCV_ACPI_INTC_FLAG_PENDING)) {
+ ext_intc_element->nr_irqs = nr_irqs;
+ ext_intc_element->flag &= ~RISCV_ACPI_INTC_FLAG_PENDING;
+ return 0;
+ }
+ }
+
+ return -ENODEV;
+}
+
int riscv_acpi_get_gsi_info(struct fwnode_handle *fwnode, u32 *gsi_base,
u32 *id, u32 *nr_irqs, u32 *nr_idcs)
{
@@ -115,20 +134,67 @@ struct fwnode_handle *riscv_acpi_get_gsi_domain_id(u32 gsi)
static int __init riscv_acpi_register_ext_intc(u32 gsi_base, u32 nr_irqs, u32 nr_idcs,
u32 id, u32 type)
{
- struct riscv_ext_intc_list *ext_intc_element;
+ struct riscv_ext_intc_list *ext_intc_element, *node, *prev;
ext_intc_element = kzalloc(sizeof(*ext_intc_element), GFP_KERNEL);
if (!ext_intc_element)
return -ENOMEM;
ext_intc_element->gsi_base = gsi_base;
- ext_intc_element->nr_irqs = nr_irqs;
+
+ /* If nr_irqs is zero, indicate it in flag and set to max range possible */
+ if (nr_irqs) {
+ ext_intc_element->nr_irqs = nr_irqs;
+ } else {
+ ext_intc_element->flag |= RISCV_ACPI_INTC_FLAG_PENDING;
+ ext_intc_element->nr_irqs = U32_MAX - ext_intc_element->gsi_base;
+ }
+
ext_intc_element->nr_idcs = nr_idcs;
ext_intc_element->id = id;
- list_add_tail(&ext_intc_element->list, &ext_intc_list);
+ list_for_each_entry(node, &ext_intc_list, list) {
+ if (node->gsi_base < ext_intc_element->gsi_base)
+ break;
+ }
+
+ /* Adjust the previous node's GSI range if that has pending registration */
+ prev = list_prev_entry(node, list);
+ if (!list_entry_is_head(prev, &ext_intc_list, list)) {
+ if (prev->flag & RISCV_ACPI_INTC_FLAG_PENDING)
+ prev->nr_irqs = ext_intc_element->gsi_base - prev->gsi_base;
+ }
+
+ list_add_tail(&ext_intc_element->list, &node->list);
return 0;
}
+static acpi_status __init riscv_acpi_create_gsi_map_smsi(acpi_handle handle, u32 level,
+ void *context, void **return_value)
+{
+ acpi_status status;
+ u64 gbase;
+
+ if (!acpi_has_method(handle, "_GSB")) {
+ acpi_handle_err(handle, "_GSB method not found\n");
+ return AE_ERROR;
+ }
+
+ status = acpi_evaluate_integer(handle, "_GSB", NULL, &gbase);
+ if (ACPI_FAILURE(status)) {
+ acpi_handle_err(handle, "failed to evaluate _GSB method\n");
+ return status;
+ }
+
+ riscv_acpi_register_ext_intc(gbase, 0, 0, 0, ACPI_RISCV_IRQCHIP_SMSI);
+ status = riscv_acpi_update_gsi_handle((u32)gbase, handle);
+ if (ACPI_FAILURE(status)) {
+ acpi_handle_err(handle, "failed to find the GSI mapping entry\n");
+ return status;
+ }
+
+ return AE_OK;
+}
+
static acpi_status __init riscv_acpi_create_gsi_map(acpi_handle handle, u32 level,
void *context, void **return_value)
{
@@ -183,6 +249,9 @@ void __init riscv_acpi_init_gsi_mapping(void)
if (acpi_table_parse_madt(ACPI_MADT_TYPE_APLIC, riscv_acpi_aplic_parse_madt, 0) > 0)
acpi_get_devices("RSCV0002", riscv_acpi_create_gsi_map, NULL, NULL);
+
+ /* Unlike PLIC/APLIC, SYSMSI doesn't have MADT */
+ acpi_get_devices("RSCV0006", riscv_acpi_create_gsi_map_smsi, NULL, NULL);
}
static acpi_handle riscv_acpi_get_gsi_handle(u32 gsi)
diff --git a/drivers/acpi/riscv/rimt.c b/drivers/acpi/riscv/rimt.c
new file mode 100644
index 000000000000..683fcfe35c31
--- /dev/null
+++ b/drivers/acpi/riscv/rimt.c
@@ -0,0 +1,520 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024-2025, Ventana Micro Systems Inc
+ * Author: Sunil V L <sunilvl@ventanamicro.com>
+ *
+ */
+
+#define pr_fmt(fmt) "ACPI: RIMT: " fmt
+
+#include <linux/acpi.h>
+#include <linux/acpi_rimt.h>
+#include <linux/iommu.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include "init.h"
+
+struct rimt_fwnode {
+ struct list_head list;
+ struct acpi_rimt_node *rimt_node;
+ struct fwnode_handle *fwnode;
+};
+
+static LIST_HEAD(rimt_fwnode_list);
+static DEFINE_SPINLOCK(rimt_fwnode_lock);
+
+#define RIMT_TYPE_MASK(type) (1 << (type))
+#define RIMT_IOMMU_TYPE BIT(0)
+
+/* Root pointer to the mapped RIMT table */
+static struct acpi_table_header *rimt_table;
+
+/**
+ * rimt_set_fwnode() - Create rimt_fwnode and use it to register
+ * iommu data in the rimt_fwnode_list
+ *
+ * @rimt_node: RIMT table node associated with the IOMMU
+ * @fwnode: fwnode associated with the RIMT node
+ *
+ * Returns: 0 on success
+ * <0 on failure
+ */
+static int rimt_set_fwnode(struct acpi_rimt_node *rimt_node,
+ struct fwnode_handle *fwnode)
+{
+ struct rimt_fwnode *np;
+
+ np = kzalloc(sizeof(*np), GFP_ATOMIC);
+
+ if (WARN_ON(!np))
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&np->list);
+ np->rimt_node = rimt_node;
+ np->fwnode = fwnode;
+
+ spin_lock(&rimt_fwnode_lock);
+ list_add_tail(&np->list, &rimt_fwnode_list);
+ spin_unlock(&rimt_fwnode_lock);
+
+ return 0;
+}
+
+/**
+ * rimt_get_fwnode() - Retrieve fwnode associated with an RIMT node
+ *
+ * @node: RIMT table node to be looked-up
+ *
+ * Returns: fwnode_handle pointer on success, NULL on failure
+ */
+static struct fwnode_handle *rimt_get_fwnode(struct acpi_rimt_node *node)
+{
+ struct fwnode_handle *fwnode = NULL;
+ struct rimt_fwnode *curr;
+
+ spin_lock(&rimt_fwnode_lock);
+ list_for_each_entry(curr, &rimt_fwnode_list, list) {
+ if (curr->rimt_node == node) {
+ fwnode = curr->fwnode;
+ break;
+ }
+ }
+ spin_unlock(&rimt_fwnode_lock);
+
+ return fwnode;
+}
+
+static acpi_status rimt_match_node_callback(struct acpi_rimt_node *node,
+ void *context)
+{
+ acpi_status status = AE_NOT_FOUND;
+ struct device *dev = context;
+
+ if (node->type == ACPI_RIMT_NODE_TYPE_IOMMU) {
+ struct acpi_rimt_iommu *iommu_node = (struct acpi_rimt_iommu *)&node->node_data;
+
+ if (dev_is_pci(dev)) {
+ struct pci_dev *pdev;
+ u16 bdf;
+
+ pdev = to_pci_dev(dev);
+ bdf = PCI_DEVID(pdev->bus->number, pdev->devfn);
+ if ((pci_domain_nr(pdev->bus) == iommu_node->pcie_segment_number) &&
+ bdf == iommu_node->pcie_bdf) {
+ status = AE_OK;
+ } else {
+ status = AE_NOT_FOUND;
+ }
+ } else {
+ struct platform_device *pdev = to_platform_device(dev);
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res && res->start == iommu_node->base_address)
+ status = AE_OK;
+ else
+ status = AE_NOT_FOUND;
+ }
+ } else if (node->type == ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX) {
+ struct acpi_rimt_pcie_rc *pci_rc;
+ struct pci_bus *bus;
+
+ bus = to_pci_bus(dev);
+ pci_rc = (struct acpi_rimt_pcie_rc *)node->node_data;
+
+ /*
+ * It is assumed that PCI segment numbers maps one-to-one
+ * with root complexes. Each segment number can represent only
+ * one root complex.
+ */
+ status = pci_rc->pcie_segment_number == pci_domain_nr(bus) ?
+ AE_OK : AE_NOT_FOUND;
+ } else if (node->type == ACPI_RIMT_NODE_TYPE_PLAT_DEVICE) {
+ struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_rimt_platform_device *ncomp;
+ struct device *plat_dev = dev;
+ struct acpi_device *adev;
+
+ /*
+ * Walk the device tree to find a device with an
+ * ACPI companion; there is no point in scanning
+ * RIMT for a device matching a platform device if
+ * the device does not have an ACPI companion to
+ * start with.
+ */
+ do {
+ adev = ACPI_COMPANION(plat_dev);
+ if (adev)
+ break;
+
+ plat_dev = plat_dev->parent;
+ } while (plat_dev);
+
+ if (!adev)
+ return status;
+
+ status = acpi_get_name(adev->handle, ACPI_FULL_PATHNAME, &buf);
+ if (ACPI_FAILURE(status)) {
+ dev_warn(plat_dev, "Can't get device full path name\n");
+ return status;
+ }
+
+ ncomp = (struct acpi_rimt_platform_device *)node->node_data;
+ status = !strcmp(ncomp->device_name, buf.pointer) ?
+ AE_OK : AE_NOT_FOUND;
+ acpi_os_free(buf.pointer);
+ }
+
+ return status;
+}
+
+static struct acpi_rimt_node *rimt_scan_node(enum acpi_rimt_node_type type,
+ void *context)
+{
+ struct acpi_rimt_node *rimt_node, *rimt_end;
+ struct acpi_table_rimt *rimt;
+ int i;
+
+ if (!rimt_table)
+ return NULL;
+
+ /* Get the first RIMT node */
+ rimt = (struct acpi_table_rimt *)rimt_table;
+ rimt_node = ACPI_ADD_PTR(struct acpi_rimt_node, rimt,
+ rimt->node_offset);
+ rimt_end = ACPI_ADD_PTR(struct acpi_rimt_node, rimt_table,
+ rimt_table->length);
+
+ for (i = 0; i < rimt->num_nodes; i++) {
+ if (WARN_TAINT(rimt_node >= rimt_end, TAINT_FIRMWARE_WORKAROUND,
+ "RIMT node pointer overflows, bad table!\n"))
+ return NULL;
+
+ if (rimt_node->type == type &&
+ ACPI_SUCCESS(rimt_match_node_callback(rimt_node, context)))
+ return rimt_node;
+
+ rimt_node = ACPI_ADD_PTR(struct acpi_rimt_node, rimt_node,
+ rimt_node->length);
+ }
+
+ return NULL;
+}
+
+static bool rimt_pcie_rc_supports_ats(struct acpi_rimt_node *node)
+{
+ struct acpi_rimt_pcie_rc *pci_rc;
+
+ pci_rc = (struct acpi_rimt_pcie_rc *)node->node_data;
+ return pci_rc->flags & ACPI_RIMT_PCIE_ATS_SUPPORTED;
+}
+
+static int rimt_iommu_xlate(struct device *dev, struct acpi_rimt_node *node, u32 deviceid)
+{
+ struct fwnode_handle *rimt_fwnode;
+
+ if (!node)
+ return -ENODEV;
+
+ rimt_fwnode = rimt_get_fwnode(node);
+
+ /*
+ * The IOMMU drivers may not be probed yet.
+ * Defer the IOMMU configuration
+ */
+ if (!rimt_fwnode)
+ return -EPROBE_DEFER;
+
+ return acpi_iommu_fwspec_init(dev, deviceid, rimt_fwnode);
+}
+
+struct rimt_pci_alias_info {
+ struct device *dev;
+ struct acpi_rimt_node *node;
+ const struct iommu_ops *ops;
+};
+
+static int rimt_id_map(struct acpi_rimt_id_mapping *map, u8 type, u32 rid_in, u32 *rid_out)
+{
+ if (rid_in < map->source_id_base ||
+ (rid_in > map->source_id_base + map->num_ids))
+ return -ENXIO;
+
+ *rid_out = map->dest_id_base + (rid_in - map->source_id_base);
+ return 0;
+}
+
+static struct acpi_rimt_node *rimt_node_get_id(struct acpi_rimt_node *node,
+ u32 *id_out, int index)
+{
+ struct acpi_rimt_platform_device *plat_node;
+ u32 id_mapping_offset, num_id_mapping;
+ struct acpi_rimt_pcie_rc *pci_node;
+ struct acpi_rimt_id_mapping *map;
+ struct acpi_rimt_node *parent;
+
+ if (node->type == ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX) {
+ pci_node = (struct acpi_rimt_pcie_rc *)&node->node_data;
+ id_mapping_offset = pci_node->id_mapping_offset;
+ num_id_mapping = pci_node->num_id_mappings;
+ } else if (node->type == ACPI_RIMT_NODE_TYPE_PLAT_DEVICE) {
+ plat_node = (struct acpi_rimt_platform_device *)&node->node_data;
+ id_mapping_offset = plat_node->id_mapping_offset;
+ num_id_mapping = plat_node->num_id_mappings;
+ } else {
+ return NULL;
+ }
+
+ if (!id_mapping_offset || !num_id_mapping || index >= num_id_mapping)
+ return NULL;
+
+ map = ACPI_ADD_PTR(struct acpi_rimt_id_mapping, node,
+ id_mapping_offset + index * sizeof(*map));
+
+ /* Firmware bug! */
+ if (!map->dest_offset) {
+ pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
+ node, node->type);
+ return NULL;
+ }
+
+ parent = ACPI_ADD_PTR(struct acpi_rimt_node, rimt_table, map->dest_offset);
+
+ if (node->type == ACPI_RIMT_NODE_TYPE_PLAT_DEVICE ||
+ node->type == ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX) {
+ *id_out = map->dest_id_base;
+ return parent;
+ }
+
+ return NULL;
+}
+
+/*
+ * RISC-V supports IOMMU as a PCI device or a platform device.
+ * When it is a platform device, there should be a namespace device as
+ * well along with RIMT. To create the link between RIMT information and
+ * the platform device, the IOMMU driver should register itself with the
+ * RIMT module. This is true for PCI based IOMMU as well.
+ */
+int rimt_iommu_register(struct device *dev)
+{
+ struct fwnode_handle *rimt_fwnode;
+ struct acpi_rimt_node *node;
+
+ node = rimt_scan_node(ACPI_RIMT_NODE_TYPE_IOMMU, dev);
+ if (!node) {
+ pr_err("Could not find IOMMU node in RIMT\n");
+ return -ENODEV;
+ }
+
+ if (dev_is_pci(dev)) {
+ rimt_fwnode = acpi_alloc_fwnode_static();
+ if (!rimt_fwnode)
+ return -ENOMEM;
+
+ rimt_fwnode->dev = dev;
+ if (!dev->fwnode)
+ dev->fwnode = rimt_fwnode;
+
+ rimt_set_fwnode(node, rimt_fwnode);
+ } else {
+ rimt_set_fwnode(node, dev->fwnode);
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_IOMMU_API
+
+static struct acpi_rimt_node *rimt_node_map_id(struct acpi_rimt_node *node,
+ u32 id_in, u32 *id_out,
+ u8 type_mask)
+{
+ struct acpi_rimt_platform_device *plat_node;
+ u32 id_mapping_offset, num_id_mapping;
+ struct acpi_rimt_pcie_rc *pci_node;
+ u32 id = id_in;
+
+ /* Parse the ID mapping tree to find specified node type */
+ while (node) {
+ struct acpi_rimt_id_mapping *map;
+ int i, rc = 0;
+ u32 map_id = id;
+
+ if (RIMT_TYPE_MASK(node->type) & type_mask) {
+ if (id_out)
+ *id_out = id;
+ return node;
+ }
+
+ if (node->type == ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX) {
+ pci_node = (struct acpi_rimt_pcie_rc *)&node->node_data;
+ id_mapping_offset = pci_node->id_mapping_offset;
+ num_id_mapping = pci_node->num_id_mappings;
+ } else if (node->type == ACPI_RIMT_NODE_TYPE_PLAT_DEVICE) {
+ plat_node = (struct acpi_rimt_platform_device *)&node->node_data;
+ id_mapping_offset = plat_node->id_mapping_offset;
+ num_id_mapping = plat_node->num_id_mappings;
+ } else {
+ goto fail_map;
+ }
+
+ if (!id_mapping_offset || !num_id_mapping)
+ goto fail_map;
+
+ map = ACPI_ADD_PTR(struct acpi_rimt_id_mapping, node,
+ id_mapping_offset);
+
+ /* Firmware bug! */
+ if (!map->dest_offset) {
+ pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
+ node, node->type);
+ goto fail_map;
+ }
+
+ /* Do the ID translation */
+ for (i = 0; i < num_id_mapping; i++, map++) {
+ rc = rimt_id_map(map, node->type, map_id, &id);
+ if (!rc)
+ break;
+ }
+
+ if (i == num_id_mapping)
+ goto fail_map;
+
+ node = ACPI_ADD_PTR(struct acpi_rimt_node, rimt_table,
+ rc ? 0 : map->dest_offset);
+ }
+
+fail_map:
+ /* Map input ID to output ID unchanged on mapping failure */
+ if (id_out)
+ *id_out = id_in;
+
+ return NULL;
+}
+
+static struct acpi_rimt_node *rimt_node_map_platform_id(struct acpi_rimt_node *node, u32 *id_out,
+ u8 type_mask, int index)
+{
+ struct acpi_rimt_node *parent;
+ u32 id;
+
+ parent = rimt_node_get_id(node, &id, index);
+ if (!parent)
+ return NULL;
+
+ if (!(RIMT_TYPE_MASK(parent->type) & type_mask))
+ parent = rimt_node_map_id(parent, id, id_out, type_mask);
+ else
+ if (id_out)
+ *id_out = id;
+
+ return parent;
+}
+
+static int rimt_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data)
+{
+ struct rimt_pci_alias_info *info = data;
+ struct acpi_rimt_node *parent;
+ u32 deviceid;
+
+ parent = rimt_node_map_id(info->node, alias, &deviceid, RIMT_IOMMU_TYPE);
+ return rimt_iommu_xlate(info->dev, parent, deviceid);
+}
+
+static int rimt_plat_iommu_map(struct device *dev, struct acpi_rimt_node *node)
+{
+ struct acpi_rimt_node *parent;
+ int err = -ENODEV, i = 0;
+ u32 deviceid = 0;
+
+ do {
+ parent = rimt_node_map_platform_id(node, &deviceid,
+ RIMT_IOMMU_TYPE,
+ i++);
+
+ if (parent)
+ err = rimt_iommu_xlate(dev, parent, deviceid);
+ } while (parent && !err);
+
+ return err;
+}
+
+static int rimt_plat_iommu_map_id(struct device *dev,
+ struct acpi_rimt_node *node,
+ const u32 *in_id)
+{
+ struct acpi_rimt_node *parent;
+ u32 deviceid;
+
+ parent = rimt_node_map_id(node, *in_id, &deviceid, RIMT_IOMMU_TYPE);
+ if (parent)
+ return rimt_iommu_xlate(dev, parent, deviceid);
+
+ return -ENODEV;
+}
+
+/**
+ * rimt_iommu_configure_id - Set-up IOMMU configuration for a device.
+ *
+ * @dev: device to configure
+ * @id_in: optional input id const value pointer
+ *
+ * Returns: 0 on success, <0 on failure
+ */
+int rimt_iommu_configure_id(struct device *dev, const u32 *id_in)
+{
+ struct acpi_rimt_node *node;
+ int err = -ENODEV;
+
+ if (dev_is_pci(dev)) {
+ struct iommu_fwspec *fwspec;
+ struct pci_bus *bus = to_pci_dev(dev)->bus;
+ struct rimt_pci_alias_info info = { .dev = dev };
+
+ node = rimt_scan_node(ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX, &bus->dev);
+ if (!node)
+ return -ENODEV;
+
+ info.node = node;
+ err = pci_for_each_dma_alias(to_pci_dev(dev),
+ rimt_pci_iommu_init, &info);
+
+ fwspec = dev_iommu_fwspec_get(dev);
+ if (fwspec && rimt_pcie_rc_supports_ats(node))
+ fwspec->flags |= IOMMU_FWSPEC_PCI_RC_ATS;
+ } else {
+ node = rimt_scan_node(ACPI_RIMT_NODE_TYPE_PLAT_DEVICE, dev);
+ if (!node)
+ return -ENODEV;
+
+ err = id_in ? rimt_plat_iommu_map_id(dev, node, id_in) :
+ rimt_plat_iommu_map(dev, node);
+ }
+
+ return err;
+}
+
+#endif
+
+void __init riscv_acpi_rimt_init(void)
+{
+ acpi_status status;
+
+ /* rimt_table will be used at runtime after the rimt init,
+ * so we don't need to call acpi_put_table() to release
+ * the RIMT table mapping.
+ */
+ status = acpi_get_table(ACPI_SIG_RIMT, 0, &rimt_table);
+ if (ACPI_FAILURE(status)) {
+ if (status != AE_NOT_FOUND) {
+ const char *msg = acpi_format_exception(status);
+
+ pr_err("Failed to get table, %s\n", msg);
+ }
+
+ return;
+ }
+}
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index 7a0914055fca..a3f95a3fffde 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -644,8 +644,8 @@ static int acpi_sbs_add(struct acpi_device *device)
sbs->hc = acpi_driver_data(acpi_dev_parent(device));
sbs->device = device;
- strcpy(acpi_device_name(device), ACPI_SBS_DEVICE_NAME);
- strcpy(acpi_device_class(device), ACPI_SBS_CLASS);
+ strscpy(acpi_device_name(device), ACPI_SBS_DEVICE_NAME);
+ strscpy(acpi_device_class(device), ACPI_SBS_CLASS);
device->driver_data = sbs;
result = acpi_charger_add(sbs);
diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
index 16f2daaa2c45..1a2bf520be23 100644
--- a/drivers/acpi/sbshc.c
+++ b/drivers/acpi/sbshc.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include "sbshc.h"
+#include "internal.h"
#define ACPI_SMB_HC_CLASS "smbus_host_ctl"
#define ACPI_SMB_HC_DEVICE_NAME "ACPI SMBus HC"
@@ -236,12 +237,6 @@ static int smbus_alarm(void *context)
return 0;
}
-typedef int (*acpi_ec_query_func) (void *data);
-
-extern int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
- acpi_handle handle, acpi_ec_query_func func,
- void *data);
-
static int acpi_smbus_hc_add(struct acpi_device *device)
{
int status;
@@ -257,8 +252,8 @@ static int acpi_smbus_hc_add(struct acpi_device *device)
return -EIO;
}
- strcpy(acpi_device_name(device), ACPI_SMB_HC_DEVICE_NAME);
- strcpy(acpi_device_class(device), ACPI_SMB_HC_CLASS);
+ strscpy(acpi_device_name(device), ACPI_SMB_HC_DEVICE_NAME);
+ strscpy(acpi_device_class(device), ACPI_SMB_HC_CLASS);
hc = kzalloc(sizeof(struct acpi_smb_hc), GFP_KERNEL);
if (!hc)
@@ -278,8 +273,6 @@ static int acpi_smbus_hc_add(struct acpi_device *device)
return 0;
}
-extern void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit);
-
static void acpi_smbus_hc_remove(struct acpi_device *device)
{
struct acpi_smb_hc *hc;
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 7ecc401fb97f..ef16d58b2949 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/acpi.h>
#include <linux/acpi_iort.h>
+#include <linux/acpi_rimt.h>
#include <linux/acpi_viot.h>
#include <linux/iommu.h>
#include <linux/signal.h>
@@ -723,10 +724,8 @@ int acpi_tie_acpi_dev(struct acpi_device *adev)
static void acpi_store_pld_crc(struct acpi_device *adev)
{
struct acpi_pld_info *pld;
- acpi_status status;
- status = acpi_get_physical_device_location(adev->handle, &pld);
- if (ACPI_FAILURE(status))
+ if (!acpi_get_physical_device_location(adev->handle, &pld))
return;
adev->pld_crc = crc32(~0, pld, sizeof(*pld));
@@ -847,6 +846,8 @@ static bool acpi_info_matches_ids(struct acpi_device_info *info,
static const char * const acpi_ignore_dep_ids[] = {
"PNP0D80", /* Windows-compatible System Power Management Controller */
"INT33BD", /* Intel Baytrail Mailbox Device */
+ "INTC10DE", /* Intel CVS LNL */
+ "INTC10E0", /* Intel CVS ARL */
"LATT2021", /* Lattice FW Update Client Driver */
NULL
};
@@ -860,6 +861,8 @@ static const char * const acpi_honor_dep_ids[] = {
"INTC10CF", /* IVSC (MTL) driver must be loaded to allow i2c access to camera sensors */
"RSCV0001", /* RISC-V PLIC */
"RSCV0002", /* RISC-V APLIC */
+ "RSCV0005", /* RISC-V SBI MPXY MBOX */
+ "RSCV0006", /* RISC-V RPMI SYSMSI */
"PNP0C0F", /* PCI Link Device */
NULL
};
@@ -1179,19 +1182,19 @@ static void acpi_device_get_busid(struct acpi_device *device)
* TBD: Shouldn't this value be unique (within the ACPI namespace)?
*/
if (!acpi_dev_parent(device)) {
- strcpy(device->pnp.bus_id, "ACPI");
+ strscpy(device->pnp.bus_id, "ACPI");
return;
}
switch (device->device_type) {
case ACPI_BUS_TYPE_POWER_BUTTON:
- strcpy(device->pnp.bus_id, "PWRF");
+ strscpy(device->pnp.bus_id, "PWRF");
break;
case ACPI_BUS_TYPE_SLEEP_BUTTON:
- strcpy(device->pnp.bus_id, "SLPF");
+ strscpy(device->pnp.bus_id, "SLPF");
break;
case ACPI_BUS_TYPE_ECDT_EC:
- strcpy(device->pnp.bus_id, "ECDT");
+ strscpy(device->pnp.bus_id, "ECDT");
break;
default:
acpi_get_name(device->handle, ACPI_SINGLE_NAME, &buffer);
@@ -1202,7 +1205,7 @@ static void acpi_device_get_busid(struct acpi_device *device)
else
break;
}
- strcpy(device->pnp.bus_id, bus_id);
+ strscpy(device->pnp.bus_id, bus_id);
break;
}
}
@@ -1453,8 +1456,8 @@ static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp,
acpi_object_is_system_bus(handle)) {
/* \_SB, \_TZ, LNXSYBUS */
acpi_add_id(pnp, ACPI_BUS_HID);
- strcpy(pnp->device_name, ACPI_BUS_DEVICE_NAME);
- strcpy(pnp->device_class, ACPI_BUS_CLASS);
+ strscpy(pnp->device_name, ACPI_BUS_DEVICE_NAME);
+ strscpy(pnp->device_class, ACPI_BUS_CLASS);
}
break;
@@ -1631,15 +1634,11 @@ static int acpi_iommu_configure_id(struct device *dev, const u32 *id_in)
err = iort_iommu_configure_id(dev, id_in);
if (err && err != -EPROBE_DEFER)
+ err = rimt_iommu_configure_id(dev, id_in);
+ if (err && err != -EPROBE_DEFER)
err = viot_iommu_configure(dev);
- mutex_unlock(&iommu_probe_device_lock);
- /*
- * If we have reason to believe the IOMMU driver missed the initial
- * iommu_probe_device() call for dev, replay it to get things in order.
- */
- if (!err && dev->bus)
- err = iommu_probe_device(dev);
+ mutex_unlock(&iommu_probe_device_lock);
return err;
}
@@ -1769,6 +1768,7 @@ static bool acpi_device_enumeration_by_parent(struct acpi_device *device)
{"CSC3557", },
{"INT33FE", },
{"INT3515", },
+ {"TXNW2781", },
/* Non-conforming _HID for Cirrus Logic already released */
{"CLSA0100", },
{"CLSA0101", },
@@ -2710,6 +2710,7 @@ void __init acpi_scan_init(void)
acpi_memory_hotplug_init();
acpi_watchdog_init();
acpi_pnp_init();
+ acpi_power_resources_init();
acpi_int340x_thermal_init();
acpi_init_lpit();
diff --git a/drivers/acpi/spcr.c b/drivers/acpi/spcr.c
index cd36a97b0ea2..d4d52d5e9016 100644
--- a/drivers/acpi/spcr.c
+++ b/drivers/acpi/spcr.c
@@ -141,12 +141,23 @@ int __init acpi_parse_spcr(bool enable_earlycon, bool enable_console)
case ACPI_DBG2_16550_NVIDIA:
uart = "uart";
break;
+ case ACPI_DBG2_RISCV_SBI_CON:
+ uart = "sbi";
+ break;
default:
err = -ENOENT;
goto done;
}
- switch (table->baud_rate) {
+ /*
+ * SPCR 1.09 defines Precise Baud Rate Filed contains a specific
+ * non-zero baud rate which overrides the value of the Configured
+ * Baud Rate field. If this field is zero or not present, Configured
+ * Baud Rate is used.
+ */
+ if (table->precise_baudrate)
+ baud_rate = table->precise_baudrate;
+ else switch (table->baud_rate) {
case 0:
/*
* SPCR 1.04 defines 0 as a preconfigured state of UART.
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 687524b50085..e596224302f4 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -319,7 +319,7 @@ struct acpi_data_attr {
};
static ssize_t acpi_table_show(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t offset, size_t count)
{
struct acpi_table_attr *table_attr =
@@ -412,7 +412,7 @@ acpi_status acpi_sysfs_table_handler(u32 event, void *table, void *context)
}
static ssize_t acpi_data_show(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t offset, size_t count)
{
struct acpi_data_attr *data_attr;
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index 9e1b01c35070..57fc8bc56166 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -56,7 +56,7 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
(struct acpi_madt_local_apic *)header;
pr_debug("LAPIC (acpi_id[0x%02x] lapic_id[0x%02x] %s)\n",
p->processor_id, p->id,
- (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
+ str_enabled_disabled(p->lapic_flags & ACPI_MADT_ENABLED));
}
break;
@@ -66,7 +66,7 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
(struct acpi_madt_local_x2apic *)header;
pr_debug("X2APIC (apic_id[0x%02x] uid[0x%02x] %s)\n",
p->local_apic_id, p->uid,
- (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
+ str_enabled_disabled(p->lapic_flags & ACPI_MADT_ENABLED));
}
break;
@@ -160,7 +160,7 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
(struct acpi_madt_local_sapic *)header;
pr_debug("LSAPIC (acpi_id[0x%02x] lsapic_id[0x%02x] lsapic_eid[0x%02x] %s)\n",
p->processor_id, p->id, p->eid,
- (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
+ str_enabled_disabled(p->lapic_flags & ACPI_MADT_ENABLED));
}
break;
@@ -183,7 +183,7 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
pr_debug("GICC (acpi_id[0x%04x] address[%llx] MPIDR[0x%llx] %s)\n",
p->uid, p->base_address,
p->arm_mpidr,
- (p->flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
+ str_enabled_disabled(p->flags & ACPI_MADT_ENABLED));
}
break;
@@ -218,7 +218,7 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
pr_debug("CORE PIC (processor_id[0x%02x] core_id[0x%02x] %s)\n",
p->processor_id, p->core_id,
- (p->flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
+ str_enabled_disabled(p->flags & ACPI_MADT_ENABLED));
}
break;
@@ -228,7 +228,7 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
pr_debug("RISC-V INTC (acpi_uid[0x%04x] hart_id[0x%llx] %s)\n",
p->uid, p->hart_id,
- (p->flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
+ str_enabled_disabled(p->flags & ACPI_MADT_ENABLED));
}
break;
@@ -396,7 +396,7 @@ static u8 __init acpi_table_checksum(u8 *buffer, u32 length)
}
/* All but ACPI_SIG_RSDP and ACPI_SIG_FACS: */
-static const char table_sigs[][ACPI_NAMESEG_SIZE] __initconst = {
+static const char table_sigs[][ACPI_NAMESEG_SIZE] __nonstring_array __initconst = {
ACPI_SIG_BERT, ACPI_SIG_BGRT, ACPI_SIG_CPEP, ACPI_SIG_ECDT,
ACPI_SIG_EINJ, ACPI_SIG_ERST, ACPI_SIG_HEST, ACPI_SIG_MADT,
ACPI_SIG_MSCT, ACPI_SIG_SBST, ACPI_SIG_SLIT, ACPI_SIG_SRAT,
@@ -408,7 +408,7 @@ static const char table_sigs[][ACPI_NAMESEG_SIZE] __initconst = {
ACPI_SIG_PSDT, ACPI_SIG_RSDT, ACPI_SIG_XSDT, ACPI_SIG_SSDT,
ACPI_SIG_IORT, ACPI_SIG_NFIT, ACPI_SIG_HMAT, ACPI_SIG_PPTT,
ACPI_SIG_NHLT, ACPI_SIG_AEST, ACPI_SIG_CEDT, ACPI_SIG_AGDI,
- ACPI_SIG_NBFT };
+ ACPI_SIG_NBFT, ACPI_SIG_SWFT};
#define ACPI_HEADER_SIZE sizeof(struct acpi_table_header)
@@ -719,8 +719,12 @@ int __init acpi_locate_initial_tables(void)
}
status = acpi_initialize_tables(initial_tables, ACPI_MAX_TABLES, 0);
- if (ACPI_FAILURE(status))
+ if (ACPI_FAILURE(status)) {
+ const char *msg = acpi_format_exception(status);
+
+ pr_warn("Failed to initialize tables, status=0x%x (%s)", status, msg);
return -EINVAL;
+ }
return 0;
}
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 78db38c7076e..8537395b417b 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -796,14 +796,20 @@ static int acpi_thermal_add(struct acpi_device *device)
return -ENOMEM;
tz->device = device;
- strcpy(tz->name, device->pnp.bus_id);
- strcpy(acpi_device_name(device), ACPI_THERMAL_DEVICE_NAME);
- strcpy(acpi_device_class(device), ACPI_THERMAL_CLASS);
+ strscpy(tz->name, device->pnp.bus_id);
+ strscpy(acpi_device_name(device), ACPI_THERMAL_DEVICE_NAME);
+ strscpy(acpi_device_class(device), ACPI_THERMAL_CLASS);
device->driver_data = tz;
acpi_thermal_aml_dependency_fix(tz);
- /* Get trip points [_CRT, _PSV, etc.] (required). */
+ /*
+ * Set the cooling mode [_SCP] to active cooling. This needs to happen before
+ * we retrieve the trip point values.
+ */
+ acpi_execute_simple_method(tz->device->handle, "_SCP", ACPI_THERMAL_MODE_ACTIVE);
+
+ /* Get trip points [_ACi, _PSV, etc.] (required). */
acpi_thermal_get_trip_points(tz);
crit_temp = acpi_thermal_get_critical_trip(tz);
@@ -814,10 +820,6 @@ static int acpi_thermal_add(struct acpi_device *device)
if (result)
goto free_memory;
- /* Set the cooling mode [_SCP] to active cooling. */
- acpi_execute_simple_method(tz->device->handle, "_SCP",
- ACPI_THERMAL_MODE_ACTIVE);
-
/* Determine the default polling frequency [_TZP]. */
if (tzp)
tz->polling_frequency = tzp;
@@ -922,7 +924,7 @@ static int acpi_thermal_suspend(struct device *dev)
static int acpi_thermal_resume(struct device *dev)
{
struct acpi_thermal *tz;
- int i, j, power_state;
+ int i, j;
if (!dev)
return -EINVAL;
@@ -937,10 +939,8 @@ static int acpi_thermal_resume(struct device *dev)
if (!acpi_thermal_trip_valid(acpi_trip))
break;
- for (j = 0; j < acpi_trip->devices.count; j++) {
- acpi_bus_update_power(acpi_trip->devices.handles[j],
- &power_state);
- }
+ for (j = 0; j < acpi_trip->devices.count; j++)
+ acpi_bus_update_power(acpi_trip->devices.handles[j], NULL);
}
acpi_queue_thermal_check(tz);
@@ -1082,7 +1082,7 @@ static void __exit acpi_thermal_exit(void)
module_init(acpi_thermal_init);
module_exit(acpi_thermal_exit);
-MODULE_IMPORT_NS(ACPI_THERMAL);
+MODULE_IMPORT_NS("ACPI_THERMAL");
MODULE_AUTHOR("Paul Diefenbaugh");
MODULE_DESCRIPTION("ACPI Thermal Zone Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/thermal_lib.c b/drivers/acpi/thermal_lib.c
index 6214d6ebe1fa..f81591927e86 100644
--- a/drivers/acpi/thermal_lib.c
+++ b/drivers/acpi/thermal_lib.c
@@ -53,25 +53,25 @@ int acpi_active_trip_temp(struct acpi_device *adev, int id, int *ret_temp)
return acpi_trip_temp(adev, obj_name, ret_temp);
}
-EXPORT_SYMBOL_NS_GPL(acpi_active_trip_temp, ACPI_THERMAL);
+EXPORT_SYMBOL_NS_GPL(acpi_active_trip_temp, "ACPI_THERMAL");
int acpi_passive_trip_temp(struct acpi_device *adev, int *ret_temp)
{
return acpi_trip_temp(adev, "_PSV", ret_temp);
}
-EXPORT_SYMBOL_NS_GPL(acpi_passive_trip_temp, ACPI_THERMAL);
+EXPORT_SYMBOL_NS_GPL(acpi_passive_trip_temp, "ACPI_THERMAL");
int acpi_hot_trip_temp(struct acpi_device *adev, int *ret_temp)
{
return acpi_trip_temp(adev, "_HOT", ret_temp);
}
-EXPORT_SYMBOL_NS_GPL(acpi_hot_trip_temp, ACPI_THERMAL);
+EXPORT_SYMBOL_NS_GPL(acpi_hot_trip_temp, "ACPI_THERMAL");
int acpi_critical_trip_temp(struct acpi_device *adev, int *ret_temp)
{
return acpi_trip_temp(adev, "_CRT", ret_temp);
}
-EXPORT_SYMBOL_NS_GPL(acpi_critical_trip_temp, ACPI_THERMAL);
+EXPORT_SYMBOL_NS_GPL(acpi_critical_trip_temp, "ACPI_THERMAL");
static int thermal_temp(int error, int temp_decik, int *ret_temp)
{
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 6de542d99518..526563a0d188 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -494,7 +494,7 @@ bool acpi_device_dep(acpi_handle target, acpi_handle match)
}
EXPORT_SYMBOL_GPL(acpi_device_dep);
-acpi_status
+bool
acpi_get_physical_device_location(acpi_handle handle, struct acpi_pld_info **pld)
{
acpi_status status;
@@ -502,9 +502,8 @@ acpi_get_physical_device_location(acpi_handle handle, struct acpi_pld_info **pld
union acpi_object *output;
status = acpi_evaluate_object(handle, "_PLD", NULL, &buffer);
-
if (ACPI_FAILURE(status))
- return status;
+ return false;
output = buffer.pointer;
@@ -523,7 +522,7 @@ acpi_get_physical_device_location(acpi_handle handle, struct acpi_pld_info **pld
out:
kfree(buffer.pointer);
- return status;
+ return ACPI_SUCCESS(status);
}
EXPORT_SYMBOL(acpi_get_physical_device_location);
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 015bd8e66c1c..4cf74f173c78 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -551,6 +551,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
},
{
.callback = video_detect_force_native,
+ /* Apple MacBook Air 7,2 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir7,2"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
/* Apple MacBook Air 9,1 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
@@ -566,6 +574,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
},
},
{
+ .callback = video_detect_force_native,
+ /* Apple MacBook Pro 11,2 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro11,2"),
+ },
+ },
+ {
/* https://bugzilla.redhat.com/show_bug.cgi?id=1217249 */
.callback = video_detect_force_native,
/* Apple MacBook Pro 12,1 */
@@ -932,6 +948,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Mipad2"),
},
},
+ /* https://gitlab.freedesktop.org/drm/amd/-/issues/4512 */
+ {
+ .callback = video_detect_force_native,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82K8"),
+ },
+ },
{ },
};
diff --git a/drivers/acpi/viot.c b/drivers/acpi/viot.c
index 2aa69a2fba73..c13a20365c2c 100644
--- a/drivers/acpi/viot.c
+++ b/drivers/acpi/viot.c
@@ -19,11 +19,11 @@
#define pr_fmt(fmt) "ACPI: VIOT: " fmt
#include <linux/acpi_viot.h>
-#include <linux/fwnode.h>
#include <linux/iommu.h>
#include <linux/list.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
struct viot_iommu {
/* Node offset within the table */
diff --git a/drivers/acpi/wakeup.c b/drivers/acpi/wakeup.c
index b02bf770aead..ff6dc957bc11 100644
--- a/drivers/acpi/wakeup.c
+++ b/drivers/acpi/wakeup.c
@@ -42,7 +42,7 @@ void acpi_enable_wakeup_devices(u8 sleep_state)
list_for_each_entry_safe(dev, tmp, &acpi_wakeup_device_list,
wakeup_list) {
if (!dev->wakeup.flags.valid
- || sleep_state > (u32) dev->wakeup.sleep_state
+ || sleep_state > dev->wakeup.sleep_state
|| !(device_may_wakeup(&dev->dev)
|| dev->wakeup.prepare_count))
continue;
@@ -67,7 +67,7 @@ void acpi_disable_wakeup_devices(u8 sleep_state)
list_for_each_entry_safe(dev, tmp, &acpi_wakeup_device_list,
wakeup_list) {
if (!dev->wakeup.flags.valid
- || sleep_state > (u32) dev->wakeup.sleep_state
+ || sleep_state > dev->wakeup.sleep_state
|| !(device_may_wakeup(&dev->dev)
|| dev->wakeup.prepare_count))
continue;
diff --git a/drivers/acpi/x86/lpss.c b/drivers/acpi/x86/lpss.c
index 258440b899a9..6daa6372f980 100644
--- a/drivers/acpi/x86/lpss.c
+++ b/drivers/acpi/x86/lpss.c
@@ -387,9 +387,6 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = {
{ "INT3435", LPSS_ADDR(lpt_uart_dev_desc) },
{ "INT3436", LPSS_ADDR(lpt_sdio_dev_desc) },
- /* Wildcat Point LPSS devices */
- { "INT3438", LPSS_ADDR(lpt_spi_dev_desc) },
-
{ }
};
diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
index 6af546b21574..4ee30c2897a2 100644
--- a/drivers/acpi/x86/utils.c
+++ b/drivers/acpi/x86/utils.c
@@ -12,6 +12,7 @@
#include <linux/acpi.h>
#include <linux/dmi.h>
+#include <linux/pci.h>
#include <linux/platform_device.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
@@ -295,6 +296,7 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
/*
* 2. Devices which also have the skip i2c/serdev quirks and which
* need the x86-android-tablets module to properly work.
+ * Sorted alphabetically.
*/
#if IS_ENABLED(CONFIG_X86_ANDROID_TABLETS)
{
@@ -308,6 +310,19 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
},
{
+ /* Acer Iconia One 8 A1-840 (non FHD version) */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "BayTrail"),
+ /* Above strings are too generic also match BIOS date */
+ DMI_MATCH(DMI_BIOS_DATE, "04/01/2014"),
+ },
+ .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
+ ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
+ ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
+ },
+ {
+ /* Asus ME176C tablet */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ME176C"),
@@ -318,23 +333,24 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
},
{
- /* Lenovo Yoga Book X90F/L */
+ /* Asus TF103C transformer 2-in-1 */
.matches = {
- DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
- DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
- DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "YETI-11"),
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TF103C"),
},
.driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
- ACPI_QUIRK_UART1_SKIP |
ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
},
{
+ /* Lenovo Yoga Book X90F/L */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "TF103C"),
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "YETI-11"),
},
.driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
+ ACPI_QUIRK_UART1_SKIP |
ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
},
@@ -358,7 +374,8 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"),
},
.driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
- ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
+ ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
+ ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
},
{
/* Medion Lifetab S10346 */
@@ -392,6 +409,32 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
},
{
+ /* Vexia Edu Atla 10 tablet 5V version */
+ .matches = {
+ /* Having all 3 of these not set is somewhat unique */
+ DMI_MATCH(DMI_SYS_VENDOR, "To be filled by O.E.M."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "To be filled by O.E.M."),
+ DMI_MATCH(DMI_BOARD_NAME, "To be filled by O.E.M."),
+ /* Above strings are too generic, also match on BIOS date */
+ DMI_MATCH(DMI_BIOS_DATE, "05/14/2015"),
+ },
+ .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
+ ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
+ },
+ {
+ /* Vexia Edu Atla 10 tablet 9V version */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
+ DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
+ /* Above strings are too generic, also match on BIOS date */
+ DMI_MATCH(DMI_BIOS_DATE, "08/25/2014"),
+ },
+ .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
+ ACPI_QUIRK_UART1_SKIP |
+ ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
+ ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
+ },
+ {
/* Whitelabel (sold as various brands) TM800A550L */
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
@@ -411,6 +454,7 @@ static const struct acpi_device_id i2c_acpi_known_good_ids[] = {
{ "10EC5640", 0 }, /* RealTek ALC5640 audio codec */
{ "10EC5651", 0 }, /* RealTek ALC5651 audio codec */
{ "INT33F4", 0 }, /* X-Powers AXP288 PMIC */
+ { "INT33F5", 0 }, /* TI Dollar Cove PMIC */
{ "INT33FD", 0 }, /* Intel Crystal Cove PMIC */
{ "INT34D3", 0 }, /* Intel Whiskey Cove PMIC */
{ "NPCE69A", 0 }, /* Asus Transformer keyboard dock */
@@ -439,18 +483,35 @@ static int acpi_dmi_skip_serdev_enumeration(struct device *controller_parent, bo
struct acpi_device *adev = ACPI_COMPANION(controller_parent);
const struct dmi_system_id *dmi_id;
long quirks = 0;
- u64 uid;
- int ret;
+ u64 uid = 0;
- ret = acpi_dev_uid_to_integer(adev, &uid);
- if (ret)
+ dmi_id = dmi_first_match(acpi_quirk_skip_dmi_ids);
+ if (!dmi_id)
return 0;
- dmi_id = dmi_first_match(acpi_quirk_skip_dmi_ids);
- if (dmi_id)
- quirks = (unsigned long)dmi_id->driver_data;
+ quirks = (unsigned long)dmi_id->driver_data;
+
+ /* uid is left at 0 on errors and 0 is not a valid UART UID */
+ acpi_dev_uid_to_integer(adev, &uid);
+
+ /* For PCI UARTs without an UID */
+ if (!uid && dev_is_pci(controller_parent)) {
+ struct pci_dev *pdev = to_pci_dev(controller_parent);
+
+ /*
+ * Devfn values for PCI UARTs on Bay Trail SoCs, which are
+ * the only devices where this fallback is necessary.
+ */
+ if (pdev->devfn == PCI_DEVFN(0x1e, 3))
+ uid = 1;
+ else if (pdev->devfn == PCI_DEVFN(0x1e, 4))
+ uid = 2;
+ }
+
+ if (!uid)
+ return 0;
- if (!dev_is_platform(controller_parent)) {
+ if (!dev_is_platform(controller_parent) && !dev_is_pci(controller_parent)) {
/* PNP enumerated UARTs */
if ((quirks & ACPI_QUIRK_PNP_UART1_SKIP) && uid == 1)
*skip = true;
@@ -505,7 +566,7 @@ int acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *s
* Set skip to true so that the tty core creates a serdev ctrl device.
* The backlight driver will manually create the serdev client device.
*/
- if (acpi_dev_hid_match(adev, "DELL0501")) {
+ if (adev && acpi_dev_hid_match(adev, "DELL0501")) {
*skip = true;
/*
* Create a platform dev for dell-uart-backlight to bind to.
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 0230c43377c1..74e34a07ef72 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -138,7 +138,7 @@ static int amba_read_periphid(struct amba_device *dev)
void __iomem *tmp;
int i, ret;
- ret = dev_pm_domain_attach(&dev->dev, true);
+ ret = dev_pm_domain_attach(&dev->dev, PD_FLAG_ATTACH_POWER_ON);
if (ret) {
dev_dbg(&dev->dev, "can't get PM domain: %d\n", ret);
goto err_out;
@@ -291,7 +291,7 @@ static int amba_probe(struct device *dev)
if (ret < 0)
break;
- ret = dev_pm_domain_attach(dev, true);
+ ret = dev_pm_domain_attach(dev, PD_FLAG_ATTACH_POWER_ON);
if (ret)
break;
@@ -364,7 +364,8 @@ static int amba_dma_configure(struct device *dev)
ret = acpi_dma_configure(dev, attr);
}
- if (!ret && !drv->driver_managed_dma) {
+ /* @drv may not be valid when we're called from the IOMMU layer */
+ if (!ret && dev->driver && !drv->driver_managed_dma) {
ret = iommu_device_use_default_domain(dev);
if (ret)
arch_teardown_dma_ops(dev);
@@ -449,6 +450,12 @@ const struct bus_type amba_bustype = {
};
EXPORT_SYMBOL_GPL(amba_bustype);
+bool dev_is_amba(const struct device *dev)
+{
+ return dev->bus == &amba_bustype;
+}
+EXPORT_SYMBOL_GPL(dev_is_amba);
+
static int __init amba_init(void)
{
return bus_register(&amba_bustype);
diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig
index 07aa8ae0a058..e2e402c9d175 100644
--- a/drivers/android/Kconfig
+++ b/drivers/android/Kconfig
@@ -4,6 +4,7 @@ menu "Android"
config ANDROID_BINDER_IPC
bool "Android Binder IPC Driver"
depends on MMU
+ depends on NET
default n
help
Binder is used in Android for both communication between processes,
@@ -13,6 +14,19 @@ config ANDROID_BINDER_IPC
Android process, using Binder to identify, invoke and pass arguments
between said processes.
+config ANDROID_BINDER_IPC_RUST
+ bool "Rust version of Android Binder IPC Driver"
+ depends on RUST && MMU && !ANDROID_BINDER_IPC
+ help
+ This enables the Rust implementation of the Binder driver.
+
+ Binder is used in Android for both communication between processes,
+ and remote method invocation.
+
+ This means one Android process can call a method/routine in another
+ Android process, using Binder to identify, invoke and pass arguments
+ between said processes.
+
config ANDROID_BINDERFS
bool "Android Binderfs filesystem"
depends on ANDROID_BINDER_IPC
@@ -27,7 +41,7 @@ config ANDROID_BINDERFS
config ANDROID_BINDER_DEVICES
string "Android Binder devices"
- depends on ANDROID_BINDER_IPC
+ depends on ANDROID_BINDER_IPC || ANDROID_BINDER_IPC_RUST
default "binder,hwbinder,vndbinder"
help
Default value for the binder.devices parameter.
@@ -37,14 +51,15 @@ config ANDROID_BINDER_DEVICES
created. Each binder device has its own context manager, and is
therefore logically separated from the other devices.
-config ANDROID_BINDER_IPC_SELFTEST
- bool "Android Binder IPC Driver Selftest"
- depends on ANDROID_BINDER_IPC
+config ANDROID_BINDER_ALLOC_KUNIT_TEST
+ tristate "KUnit Tests for Android Binder Alloc" if !KUNIT_ALL_TESTS
+ depends on ANDROID_BINDER_IPC && KUNIT
+ default KUNIT_ALL_TESTS
help
- This feature allows binder selftest to run.
+ This feature builds the binder alloc KUnit tests.
- Binder selftest checks the allocation and free of binder buffers
- exhaustively with combinations of various buffer sizes and
- alignments.
+ Each test case runs using a pared-down binder_alloc struct and
+ test-specific freelist, which allows this KUnit module to be loaded
+ for testing without interfering with a running system.
endmenu
diff --git a/drivers/android/Makefile b/drivers/android/Makefile
index c9d3d0c99c25..e0c650d3898e 100644
--- a/drivers/android/Makefile
+++ b/drivers/android/Makefile
@@ -2,5 +2,6 @@
ccflags-y += -I$(src) # needed for trace events
obj-$(CONFIG_ANDROID_BINDERFS) += binderfs.o
-obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o binder_alloc.o
-obj-$(CONFIG_ANDROID_BINDER_IPC_SELFTEST) += binder_alloc_selftest.o
+obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o binder_alloc.o binder_netlink.o
+obj-$(CONFIG_ANDROID_BINDER_ALLOC_KUNIT_TEST) += tests/
+obj-$(CONFIG_ANDROID_BINDER_IPC_RUST) += binder/
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 978740537a1a..8c99ceaa303b 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -68,10 +68,13 @@
#include <linux/sizes.h>
#include <linux/ktime.h>
+#include <kunit/visibility.h>
+
#include <uapi/linux/android/binder.h>
#include <linux/cacheflush.h>
+#include "binder_netlink.h"
#include "binder_internal.h"
#include "binder_trace.h"
@@ -79,6 +82,8 @@ static HLIST_HEAD(binder_deferred_list);
static DEFINE_MUTEX(binder_deferred_lock);
static HLIST_HEAD(binder_devices);
+static DEFINE_SPINLOCK(binder_devices_lock);
+
static HLIST_HEAD(binder_procs);
static DEFINE_MUTEX(binder_procs_lock);
@@ -1225,6 +1230,12 @@ static void binder_cleanup_ref_olocked(struct binder_ref *ref)
binder_dequeue_work(ref->proc, &ref->death->work);
binder_stats_deleted(BINDER_STAT_DEATH);
}
+
+ if (ref->freeze) {
+ binder_dequeue_work(ref->proc, &ref->freeze->work);
+ binder_stats_deleted(BINDER_STAT_FREEZE);
+ }
+
binder_stats_deleted(BINDER_STAT_REF);
}
@@ -1577,11 +1588,10 @@ static struct binder_thread *binder_get_txn_from(
{
struct binder_thread *from;
- spin_lock(&t->lock);
+ guard(spinlock)(&t->lock);
from = t->from;
if (from)
atomic_inc(&from->tmp_ref);
- spin_unlock(&t->lock);
return from;
}
@@ -1965,7 +1975,7 @@ static bool binder_validate_fixup(struct binder_proc *proc,
* struct binder_task_work_cb - for deferred close
*
* @twork: callback_head for task work
- * @fd: fd to close
+ * @file: file to close
*
* Structure to pass task work to be handled after
* returning from binder_ioctl() via task_work_add().
@@ -2984,6 +2994,69 @@ static void binder_set_txn_from_error(struct binder_transaction *t, int id,
binder_thread_dec_tmpref(from);
}
+/**
+ * binder_netlink_report() - report a transaction failure via netlink
+ * @proc: the binder proc sending the transaction
+ * @t: the binder transaction that failed
+ * @data_size: the user provided data size for the transaction
+ * @error: enum binder_driver_return_protocol returned to sender
+ */
+static void binder_netlink_report(struct binder_proc *proc,
+ struct binder_transaction *t,
+ u32 data_size,
+ u32 error)
+{
+ const char *context = proc->context->name;
+ struct sk_buff *skb;
+ void *hdr;
+
+ if (!genl_has_listeners(&binder_nl_family, &init_net,
+ BINDER_NLGRP_REPORT))
+ return;
+
+ trace_binder_netlink_report(context, t, data_size, error);
+
+ skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!skb)
+ return;
+
+ hdr = genlmsg_put(skb, 0, 0, &binder_nl_family, 0, BINDER_CMD_REPORT);
+ if (!hdr)
+ goto free_skb;
+
+ if (nla_put_u32(skb, BINDER_A_REPORT_ERROR, error) ||
+ nla_put_string(skb, BINDER_A_REPORT_CONTEXT, context) ||
+ nla_put_u32(skb, BINDER_A_REPORT_FROM_PID, t->from_pid) ||
+ nla_put_u32(skb, BINDER_A_REPORT_FROM_TID, t->from_tid))
+ goto cancel_skb;
+
+ if (t->to_proc &&
+ nla_put_u32(skb, BINDER_A_REPORT_TO_PID, t->to_proc->pid))
+ goto cancel_skb;
+
+ if (t->to_thread &&
+ nla_put_u32(skb, BINDER_A_REPORT_TO_TID, t->to_thread->pid))
+ goto cancel_skb;
+
+ if (t->is_reply && nla_put_flag(skb, BINDER_A_REPORT_IS_REPLY))
+ goto cancel_skb;
+
+ if (nla_put_u32(skb, BINDER_A_REPORT_FLAGS, t->flags) ||
+ nla_put_u32(skb, BINDER_A_REPORT_CODE, t->code) ||
+ nla_put_u32(skb, BINDER_A_REPORT_DATA_SIZE, data_size))
+ goto cancel_skb;
+
+ genlmsg_end(skb, hdr);
+ genlmsg_multicast(&binder_nl_family, skb, 0, BINDER_NLGRP_REPORT,
+ GFP_KERNEL);
+ return;
+
+cancel_skb:
+ genlmsg_cancel(skb, hdr);
+free_skb:
+ nlmsg_free(skb);
+}
+
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply,
@@ -3011,8 +3084,7 @@ static void binder_transaction(struct binder_proc *proc,
struct binder_context *context = proc->context;
int t_debug_id = atomic_inc_return(&binder_last_id);
ktime_t t_start_time = ktime_get();
- char *secctx = NULL;
- u32 secctx_sz = 0;
+ struct lsm_context lsmctx = { };
struct list_head sgc_head;
struct list_head pf_head;
const void __user *user_buffer = (const void __user *)
@@ -3034,6 +3106,32 @@ static void binder_transaction(struct binder_proc *proc,
binder_set_extended_error(&thread->ee, t_debug_id, BR_OK, 0);
binder_inner_proc_unlock(proc);
+ t = kzalloc(sizeof(*t), GFP_KERNEL);
+ if (!t) {
+ binder_txn_error("%d:%d cannot allocate transaction\n",
+ thread->pid, proc->pid);
+ return_error = BR_FAILED_REPLY;
+ return_error_param = -ENOMEM;
+ return_error_line = __LINE__;
+ goto err_alloc_t_failed;
+ }
+ INIT_LIST_HEAD(&t->fd_fixups);
+ binder_stats_created(BINDER_STAT_TRANSACTION);
+ spin_lock_init(&t->lock);
+ t->debug_id = t_debug_id;
+ t->start_time = t_start_time;
+ t->from_pid = proc->pid;
+ t->from_tid = thread->pid;
+ t->sender_euid = task_euid(proc->tsk);
+ t->code = tr->code;
+ t->flags = tr->flags;
+ t->priority = task_nice(current);
+ t->work.type = BINDER_WORK_TRANSACTION;
+ t->is_async = !reply && (tr->flags & TF_ONE_WAY);
+ t->is_reply = reply;
+ if (!reply && !(tr->flags & TF_ONE_WAY))
+ t->from = thread;
+
if (reply) {
binder_inner_proc_lock(proc);
in_reply_to = thread->transaction_stack;
@@ -3137,10 +3235,8 @@ static void binder_transaction(struct binder_proc *proc,
}
if (!target_node) {
binder_txn_error("%d:%d cannot find target node\n",
- thread->pid, proc->pid);
- /*
- * return_error is set above
- */
+ proc->pid, thread->pid);
+ /* return_error is set above */
return_error_param = -EINVAL;
return_error_line = __LINE__;
goto err_dead_binder;
@@ -3222,24 +3318,13 @@ static void binder_transaction(struct binder_proc *proc,
}
binder_inner_proc_unlock(proc);
}
+
+ t->to_proc = target_proc;
+ t->to_thread = target_thread;
if (target_thread)
e->to_thread = target_thread->pid;
e->to_proc = target_proc->pid;
- /* TODO: reuse incoming transaction for reply */
- t = kzalloc(sizeof(*t), GFP_KERNEL);
- if (t == NULL) {
- binder_txn_error("%d:%d cannot allocate transaction\n",
- thread->pid, proc->pid);
- return_error = BR_FAILED_REPLY;
- return_error_param = -ENOMEM;
- return_error_line = __LINE__;
- goto err_alloc_t_failed;
- }
- INIT_LIST_HEAD(&t->fd_fixups);
- binder_stats_created(BINDER_STAT_TRANSACTION);
- spin_lock_init(&t->lock);
-
tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
if (tcomplete == NULL) {
binder_txn_error("%d:%d cannot allocate work for transaction\n",
@@ -3251,48 +3336,28 @@ static void binder_transaction(struct binder_proc *proc,
}
binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
- t->debug_id = t_debug_id;
- t->start_time = t_start_time;
-
if (reply)
binder_debug(BINDER_DEBUG_TRANSACTION,
- "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
+ "%d:%d BC_REPLY %d -> %d:%d, data size %lld-%lld-%lld\n",
proc->pid, thread->pid, t->debug_id,
target_proc->pid, target_thread->pid,
- (u64)tr->data.ptr.buffer,
- (u64)tr->data.ptr.offsets,
(u64)tr->data_size, (u64)tr->offsets_size,
(u64)extra_buffers_size);
else
binder_debug(BINDER_DEBUG_TRANSACTION,
- "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
+ "%d:%d BC_TRANSACTION %d -> %d - node %d, data size %lld-%lld-%lld\n",
proc->pid, thread->pid, t->debug_id,
target_proc->pid, target_node->debug_id,
- (u64)tr->data.ptr.buffer,
- (u64)tr->data.ptr.offsets,
(u64)tr->data_size, (u64)tr->offsets_size,
(u64)extra_buffers_size);
- if (!reply && !(tr->flags & TF_ONE_WAY))
- t->from = thread;
- else
- t->from = NULL;
- t->from_pid = proc->pid;
- t->from_tid = thread->pid;
- t->sender_euid = task_euid(proc->tsk);
- t->to_proc = target_proc;
- t->to_thread = target_thread;
- t->code = tr->code;
- t->flags = tr->flags;
- t->priority = task_nice(current);
-
if (target_node && target_node->txn_security_ctx) {
u32 secid;
size_t added_size;
security_cred_getsecid(proc->cred, &secid);
- ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
- if (ret) {
+ ret = security_secid_to_secctx(secid, &lsmctx);
+ if (ret < 0) {
binder_txn_error("%d:%d failed to get security context\n",
thread->pid, proc->pid);
return_error = BR_FAILED_REPLY;
@@ -3300,7 +3365,7 @@ static void binder_transaction(struct binder_proc *proc,
return_error_line = __LINE__;
goto err_get_secctx_failed;
}
- added_size = ALIGN(secctx_sz, sizeof(u64));
+ added_size = ALIGN(lsmctx.len, sizeof(u64));
extra_buffers_size += added_size;
if (extra_buffers_size < added_size) {
binder_txn_error("%d:%d integer overflow of extra_buffers_size\n",
@@ -3334,23 +3399,23 @@ static void binder_transaction(struct binder_proc *proc,
t->buffer = NULL;
goto err_binder_alloc_buf_failed;
}
- if (secctx) {
+ if (lsmctx.context) {
int err;
size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
ALIGN(tr->offsets_size, sizeof(void *)) +
ALIGN(extra_buffers_size, sizeof(void *)) -
- ALIGN(secctx_sz, sizeof(u64));
+ ALIGN(lsmctx.len, sizeof(u64));
t->security_ctx = t->buffer->user_data + buf_offset;
err = binder_alloc_copy_to_buffer(&target_proc->alloc,
t->buffer, buf_offset,
- secctx, secctx_sz);
+ lsmctx.context, lsmctx.len);
if (err) {
t->security_ctx = 0;
WARN_ON(1);
}
- security_release_secctx(secctx, secctx_sz);
- secctx = NULL;
+ security_release_secctx(&lsmctx);
+ lsmctx.context = NULL;
}
t->buffer->debug_id = t->debug_id;
t->buffer->transaction = t;
@@ -3394,7 +3459,7 @@ static void binder_transaction(struct binder_proc *proc,
off_end_offset = off_start_offset + tr->offsets_size;
sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
- ALIGN(secctx_sz, sizeof(u64));
+ ALIGN(lsmctx.len, sizeof(u64));
off_min = 0;
for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
buffer_offset += sizeof(binder_size_t)) {
@@ -3678,11 +3743,13 @@ static void binder_transaction(struct binder_proc *proc,
return_error_line = __LINE__;
goto err_copy_data_failed;
}
- if (t->buffer->oneway_spam_suspect)
+ if (t->buffer->oneway_spam_suspect) {
tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT;
- else
+ binder_netlink_report(proc, t, tr->data_size,
+ BR_ONEWAY_SPAM_SUSPECT);
+ } else {
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
- t->work.type = BINDER_WORK_TRANSACTION;
+ }
if (reply) {
binder_enqueue_thread_work(thread, tcomplete);
@@ -3710,7 +3777,6 @@ static void binder_transaction(struct binder_proc *proc,
* the target replies (or there is an error).
*/
binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
- t->need_reply = 1;
t->from_parent = thread->transaction_stack;
thread->transaction_stack = t;
binder_inner_proc_unlock(proc);
@@ -3731,8 +3797,11 @@ static void binder_transaction(struct binder_proc *proc,
* process and is put in a pending queue, waiting for the target
* process to be unfrozen.
*/
- if (return_error == BR_TRANSACTION_PENDING_FROZEN)
+ if (return_error == BR_TRANSACTION_PENDING_FROZEN) {
tcomplete->type = BINDER_WORK_TRANSACTION_PENDING;
+ binder_netlink_report(proc, t, tr->data_size,
+ return_error);
+ }
binder_enqueue_thread_work(thread, tcomplete);
if (return_error &&
return_error != BR_TRANSACTION_PENDING_FROZEN)
@@ -3773,17 +3842,14 @@ err_copy_data_failed:
binder_alloc_free_buf(&target_proc->alloc, t->buffer);
err_binder_alloc_buf_failed:
err_bad_extra_size:
- if (secctx)
- security_release_secctx(secctx, secctx_sz);
+ if (lsmctx.context)
+ security_release_secctx(&lsmctx);
err_get_secctx_failed:
kfree(tcomplete);
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
err_alloc_tcomplete_failed:
if (trace_binder_txn_latency_free_enabled())
binder_txn_latency_free(t);
- kfree(t);
- binder_stats_deleted(BINDER_STAT_TRANSACTION);
-err_alloc_t_failed:
err_bad_todo_list:
err_bad_call_stack:
err_empty_call_stack:
@@ -3794,14 +3860,19 @@ err_invalid_target_handle:
binder_dec_node_tmpref(target_node);
}
+ binder_netlink_report(proc, t, tr->data_size, return_error);
+ kfree(t);
+ binder_stats_deleted(BINDER_STAT_TRANSACTION);
+err_alloc_t_failed:
+
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
- "%d:%d transaction %s to %d:%d failed %d/%d/%d, size %lld-%lld line %d\n",
+ "%d:%d transaction %s to %d:%d failed %d/%d/%d, code %u size %lld-%lld line %d\n",
proc->pid, thread->pid, reply ? "reply" :
(tr->flags & TF_ONE_WAY ? "async" : "call"),
target_proc ? target_proc->pid : 0,
target_thread ? target_thread->pid : 0,
t_debug_id, return_error, return_error_param,
- (u64)tr->data_size, (u64)tr->offsets_size,
+ tr->code, (u64)tr->data_size, (u64)tr->offsets_size,
return_error_line);
if (target_thread)
@@ -3850,7 +3921,6 @@ binder_request_freeze_notification(struct binder_proc *proc,
{
struct binder_ref_freeze *freeze;
struct binder_ref *ref;
- bool is_frozen;
freeze = kzalloc(sizeof(*freeze), GFP_KERNEL);
if (!freeze)
@@ -3866,32 +3936,31 @@ binder_request_freeze_notification(struct binder_proc *proc,
}
binder_node_lock(ref->node);
-
- if (ref->freeze || !ref->node->proc) {
- binder_user_error("%d:%d invalid BC_REQUEST_FREEZE_NOTIFICATION %s\n",
- proc->pid, thread->pid,
- ref->freeze ? "already set" : "dead node");
+ if (ref->freeze) {
+ binder_user_error("%d:%d BC_REQUEST_FREEZE_NOTIFICATION already set\n",
+ proc->pid, thread->pid);
binder_node_unlock(ref->node);
binder_proc_unlock(proc);
kfree(freeze);
return -EINVAL;
}
- binder_inner_proc_lock(ref->node->proc);
- is_frozen = ref->node->proc->is_frozen;
- binder_inner_proc_unlock(ref->node->proc);
binder_stats_created(BINDER_STAT_FREEZE);
INIT_LIST_HEAD(&freeze->work.entry);
freeze->cookie = handle_cookie->cookie;
freeze->work.type = BINDER_WORK_FROZEN_BINDER;
- freeze->is_frozen = is_frozen;
-
ref->freeze = freeze;
- binder_inner_proc_lock(proc);
- binder_enqueue_work_ilocked(&ref->freeze->work, &proc->todo);
- binder_wakeup_proc_ilocked(proc);
- binder_inner_proc_unlock(proc);
+ if (ref->node->proc) {
+ binder_inner_proc_lock(ref->node->proc);
+ freeze->is_frozen = ref->node->proc->is_frozen;
+ binder_inner_proc_unlock(ref->node->proc);
+
+ binder_inner_proc_lock(proc);
+ binder_enqueue_work_ilocked(&freeze->work, &proc->todo);
+ binder_wakeup_proc_ilocked(proc);
+ binder_inner_proc_unlock(proc);
+ }
binder_node_unlock(ref->node);
binder_proc_unlock(proc);
@@ -4220,20 +4289,21 @@ static int binder_thread_write(struct binder_proc *proc,
if (IS_ERR_OR_NULL(buffer)) {
if (PTR_ERR(buffer) == -EPERM) {
binder_user_error(
- "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
+ "%d:%d BC_FREE_BUFFER matched unreturned or currently freeing buffer at offset %lx\n",
proc->pid, thread->pid,
- (u64)data_ptr);
+ (unsigned long)data_ptr - proc->alloc.vm_start);
} else {
binder_user_error(
- "%d:%d BC_FREE_BUFFER u%016llx no match\n",
+ "%d:%d BC_FREE_BUFFER no match for buffer at offset %lx\n",
proc->pid, thread->pid,
- (u64)data_ptr);
+ (unsigned long)data_ptr - proc->alloc.vm_start);
}
break;
}
binder_debug(BINDER_DEBUG_FREE_BUFFER,
- "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
- proc->pid, thread->pid, (u64)data_ptr,
+ "%d:%d BC_FREE_BUFFER at offset %lx found buffer %d for %s transaction\n",
+ proc->pid, thread->pid,
+ (unsigned long)data_ptr - proc->alloc.vm_start,
buffer->debug_id,
buffer->transaction ? "active" : "finished");
binder_free_buf(proc, thread, buffer, false);
@@ -5050,16 +5120,14 @@ retry:
trace_binder_transaction_received(t);
binder_stat_br(proc, thread, cmd);
binder_debug(BINDER_DEBUG_TRANSACTION,
- "%d:%d %s %d %d:%d, cmd %u size %zd-%zd ptr %016llx-%016llx\n",
+ "%d:%d %s %d %d:%d, cmd %u size %zd-%zd\n",
proc->pid, thread->pid,
(cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
(cmd == BR_TRANSACTION_SEC_CTX) ?
"BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
t->debug_id, t_from ? t_from->proc->pid : 0,
t_from ? t_from->pid : 0, cmd,
- t->buffer->data_size, t->buffer->offsets_size,
- (u64)trd->data.ptr.buffer,
- (u64)trd->data.ptr.offsets);
+ t->buffer->data_size, t->buffer->offsets_size);
if (t_from)
binder_thread_dec_tmpref(t_from);
@@ -5151,6 +5219,16 @@ static void binder_release_work(struct binder_proc *proc,
} break;
case BINDER_WORK_NODE:
break;
+ case BINDER_WORK_CLEAR_FREEZE_NOTIFICATION: {
+ struct binder_ref_freeze *freeze;
+
+ freeze = container_of(w, struct binder_ref_freeze, work);
+ binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
+ "undelivered freeze notification, %016llx\n",
+ (u64)freeze->cookie);
+ kfree(freeze);
+ binder_stats_deleted(BINDER_STAT_FREEZE);
+ } break;
default:
pr_err("unexpected work type, %d, not freed\n",
wtype);
@@ -5231,6 +5309,7 @@ static void binder_free_proc(struct binder_proc *proc)
__func__, proc->outstanding_txns);
device = container_of(proc->context, struct binder_device, context);
if (refcount_dec_and_test(&device->ref)) {
+ binder_remove_device(device);
kfree(proc->context->name);
kfree(device);
}
@@ -5373,10 +5452,9 @@ static int binder_ioctl_write_read(struct file *filp, unsigned long arg,
void __user *ubuf = (void __user *)arg;
struct binder_write_read bwr;
- if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
- ret = -EFAULT;
- goto out;
- }
+ if (copy_from_user(&bwr, ubuf, sizeof(bwr)))
+ return -EFAULT;
+
binder_debug(BINDER_DEBUG_READ_WRITE,
"%d:%d write %lld at %016llx, read %lld at %016llx\n",
proc->pid, thread->pid,
@@ -5391,8 +5469,6 @@ static int binder_ioctl_write_read(struct file *filp, unsigned long arg,
trace_binder_write_done(ret);
if (ret < 0) {
bwr.read_consumed = 0;
- if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
- ret = -EFAULT;
goto out;
}
}
@@ -5406,22 +5482,17 @@ static int binder_ioctl_write_read(struct file *filp, unsigned long arg,
if (!binder_worklist_empty_ilocked(&proc->todo))
binder_wakeup_proc_ilocked(proc);
binder_inner_proc_unlock(proc);
- if (ret < 0) {
- if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
- ret = -EFAULT;
+ if (ret < 0)
goto out;
- }
}
binder_debug(BINDER_DEBUG_READ_WRITE,
"%d:%d wrote %lld of %lld, read return %lld of %lld\n",
proc->pid, thread->pid,
(u64)bwr.write_consumed, (u64)bwr.write_size,
(u64)bwr.read_consumed, (u64)bwr.read_size);
- if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
- ret = -EFAULT;
- goto out;
- }
out:
+ if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
+ ret = -EFAULT;
return ret;
}
@@ -5434,32 +5505,28 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp,
struct binder_node *new_node;
kuid_t curr_euid = current_euid();
- mutex_lock(&context->context_mgr_node_lock);
+ guard(mutex)(&context->context_mgr_node_lock);
if (context->binder_context_mgr_node) {
pr_err("BINDER_SET_CONTEXT_MGR already set\n");
- ret = -EBUSY;
- goto out;
+ return -EBUSY;
}
ret = security_binder_set_context_mgr(proc->cred);
if (ret < 0)
- goto out;
+ return ret;
if (uid_valid(context->binder_context_mgr_uid)) {
if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
from_kuid(&init_user_ns, curr_euid),
from_kuid(&init_user_ns,
context->binder_context_mgr_uid));
- ret = -EPERM;
- goto out;
+ return -EPERM;
}
} else {
context->binder_context_mgr_uid = curr_euid;
}
new_node = binder_new_node(proc, fbo);
- if (!new_node) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!new_node)
+ return -ENOMEM;
binder_node_lock(new_node);
new_node->local_weak_refs++;
new_node->local_strong_refs++;
@@ -5468,8 +5535,6 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp,
context->binder_context_mgr_node = new_node;
binder_node_unlock(new_node);
binder_put_node(new_node);
-out:
- mutex_unlock(&context->context_mgr_node_lock);
return ret;
}
@@ -5552,6 +5617,7 @@ static bool binder_txns_pending_ilocked(struct binder_proc *proc)
static void binder_add_freeze_work(struct binder_proc *proc, bool is_frozen)
{
+ struct binder_node *prev = NULL;
struct rb_node *n;
struct binder_ref *ref;
@@ -5560,7 +5626,10 @@ static void binder_add_freeze_work(struct binder_proc *proc, bool is_frozen)
struct binder_node *node;
node = rb_entry(n, struct binder_node, rb_node);
+ binder_inc_node_tmpref_ilocked(node);
binder_inner_proc_unlock(proc);
+ if (prev)
+ binder_put_node(prev);
binder_node_lock(node);
hlist_for_each_entry(ref, &node->refs, node_entry) {
/*
@@ -5586,10 +5655,15 @@ static void binder_add_freeze_work(struct binder_proc *proc, bool is_frozen)
}
binder_inner_proc_unlock(ref->proc);
}
+ prev = node;
binder_node_unlock(node);
binder_inner_proc_lock(proc);
+ if (proc->is_dead)
+ break;
}
binder_inner_proc_unlock(proc);
+ if (prev)
+ binder_put_node(prev);
}
static int binder_ioctl_freeze(struct binder_freeze_info *info,
@@ -5696,11 +5770,6 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
struct binder_thread *thread;
void __user *ubuf = (void __user *)arg;
- /*pr_info("binder_ioctl: %d:%d %x %lx\n",
- proc->pid, current->pid, cmd, arg);*/
-
- binder_selftest_alloc(&proc->alloc);
-
trace_binder_ioctl(cmd, arg);
ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
@@ -5936,10 +6005,11 @@ static void binder_vma_close(struct vm_area_struct *vma)
binder_alloc_vma_close(&proc->alloc);
}
-static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
+VISIBLE_IF_KUNIT vm_fault_t binder_vm_fault(struct vm_fault *vmf)
{
return VM_FAULT_SIGBUS;
}
+EXPORT_SYMBOL_IF_KUNIT(binder_vm_fault);
static const struct vm_operations_struct binder_vm_ops = {
.open = binder_vma_open,
@@ -6108,7 +6178,7 @@ static int binder_release(struct inode *nodp, struct file *filp)
debugfs_remove(proc->debugfs_entry);
if (proc->binderfs_entry) {
- binderfs_remove_file(proc->binderfs_entry);
+ simple_recursive_removal(proc->binderfs_entry, NULL);
proc->binderfs_entry = NULL;
}
@@ -6260,6 +6330,7 @@ static void binder_deferred_release(struct binder_proc *proc)
binder_release_work(proc, &proc->todo);
binder_release_work(proc, &proc->delivered_death);
+ binder_release_work(proc, &proc->delivered_freeze);
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
"%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
@@ -6301,14 +6372,13 @@ static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
static void
binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
{
- mutex_lock(&binder_deferred_lock);
+ guard(mutex)(&binder_deferred_lock);
proc->deferred_work |= defer;
if (hlist_unhashed(&proc->deferred_work_node)) {
hlist_add_head(&proc->deferred_work_node,
&binder_deferred_list);
schedule_work(&binder_deferred_work);
}
- mutex_unlock(&binder_deferred_lock);
}
static void print_binder_transaction_ilocked(struct seq_file *m,
@@ -6323,13 +6393,13 @@ static void print_binder_transaction_ilocked(struct seq_file *m,
spin_lock(&t->lock);
to_proc = t->to_proc;
seq_printf(m,
- "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d elapsed %lldms",
+ "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld a%d r%d elapsed %lldms",
prefix, t->debug_id, t,
t->from_pid,
t->from_tid,
to_proc ? to_proc->pid : 0,
t->to_thread ? t->to_thread->pid : 0,
- t->code, t->flags, t->priority, t->need_reply,
+ t->code, t->flags, t->priority, t->is_async, t->is_reply,
ktime_ms_delta(current_time, t->start_time));
spin_unlock(&t->lock);
@@ -6350,14 +6420,14 @@ static void print_binder_transaction_ilocked(struct seq_file *m,
seq_printf(m, " node %d", buffer->target_node->debug_id);
seq_printf(m, " size %zd:%zd offset %lx\n",
buffer->data_size, buffer->offsets_size,
- proc->alloc.buffer - buffer->user_data);
+ buffer->user_data - proc->alloc.vm_start);
}
static void print_binder_work_ilocked(struct seq_file *m,
- struct binder_proc *proc,
- const char *prefix,
- const char *transaction_prefix,
- struct binder_work *w)
+ struct binder_proc *proc,
+ const char *prefix,
+ const char *transaction_prefix,
+ struct binder_work *w, bool hash_ptrs)
{
struct binder_node *node;
struct binder_transaction *t;
@@ -6380,9 +6450,15 @@ static void print_binder_work_ilocked(struct seq_file *m,
break;
case BINDER_WORK_NODE:
node = container_of(w, struct binder_node, work);
- seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
- prefix, node->debug_id,
- (u64)node->ptr, (u64)node->cookie);
+ if (hash_ptrs)
+ seq_printf(m, "%snode work %d: u%p c%p\n",
+ prefix, node->debug_id,
+ (void *)(long)node->ptr,
+ (void *)(long)node->cookie);
+ else
+ seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
+ prefix, node->debug_id,
+ (u64)node->ptr, (u64)node->cookie);
break;
case BINDER_WORK_DEAD_BINDER:
seq_printf(m, "%shas dead binder\n", prefix);
@@ -6393,6 +6469,12 @@ static void print_binder_work_ilocked(struct seq_file *m,
case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
seq_printf(m, "%shas cleared death notification\n", prefix);
break;
+ case BINDER_WORK_FROZEN_BINDER:
+ seq_printf(m, "%shas frozen binder\n", prefix);
+ break;
+ case BINDER_WORK_CLEAR_FREEZE_NOTIFICATION:
+ seq_printf(m, "%shas cleared freeze notification\n", prefix);
+ break;
default:
seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
break;
@@ -6401,7 +6483,7 @@ static void print_binder_work_ilocked(struct seq_file *m,
static void print_binder_thread_ilocked(struct seq_file *m,
struct binder_thread *thread,
- int print_always)
+ bool print_always, bool hash_ptrs)
{
struct binder_transaction *t;
struct binder_work *w;
@@ -6431,14 +6513,16 @@ static void print_binder_thread_ilocked(struct seq_file *m,
}
list_for_each_entry(w, &thread->todo, entry) {
print_binder_work_ilocked(m, thread->proc, " ",
- " pending transaction", w);
+ " pending transaction",
+ w, hash_ptrs);
}
if (!print_always && m->count == header_pos)
m->count = start_pos;
}
static void print_binder_node_nilocked(struct seq_file *m,
- struct binder_node *node)
+ struct binder_node *node,
+ bool hash_ptrs)
{
struct binder_ref *ref;
struct binder_work *w;
@@ -6446,8 +6530,13 @@ static void print_binder_node_nilocked(struct seq_file *m,
count = hlist_count_nodes(&node->refs);
- seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
- node->debug_id, (u64)node->ptr, (u64)node->cookie,
+ if (hash_ptrs)
+ seq_printf(m, " node %d: u%p c%p", node->debug_id,
+ (void *)(long)node->ptr, (void *)(long)node->cookie);
+ else
+ seq_printf(m, " node %d: u%016llx c%016llx", node->debug_id,
+ (u64)node->ptr, (u64)node->cookie);
+ seq_printf(m, " hs %d hw %d ls %d lw %d is %d iw %d tr %d",
node->has_strong_ref, node->has_weak_ref,
node->local_strong_refs, node->local_weak_refs,
node->internal_strong_refs, count, node->tmp_refs);
@@ -6460,7 +6549,8 @@ static void print_binder_node_nilocked(struct seq_file *m,
if (node->proc) {
list_for_each_entry(w, &node->async_todo, entry)
print_binder_work_ilocked(m, node->proc, " ",
- " pending async transaction", w);
+ " pending async transaction",
+ w, hash_ptrs);
}
}
@@ -6476,8 +6566,54 @@ static void print_binder_ref_olocked(struct seq_file *m,
binder_node_unlock(ref->node);
}
-static void print_binder_proc(struct seq_file *m,
- struct binder_proc *proc, int print_all)
+/**
+ * print_next_binder_node_ilocked() - Print binder_node from a locked list
+ * @m: struct seq_file for output via seq_printf()
+ * @proc: struct binder_proc we hold the inner_proc_lock to (if any)
+ * @node: struct binder_node to print fields of
+ * @prev_node: struct binder_node we hold a temporary reference to (if any)
+ * @hash_ptrs: whether to hash @node's binder_uintptr_t fields
+ *
+ * Helper function to handle synchronization around printing a struct
+ * binder_node while iterating through @proc->nodes or the dead nodes list.
+ * Caller must hold either @proc->inner_lock (for live nodes) or
+ * binder_dead_nodes_lock. This lock will be released during the body of this
+ * function, but it will be reacquired before returning to the caller.
+ *
+ * Return: pointer to the struct binder_node we hold a tmpref on
+ */
+static struct binder_node *
+print_next_binder_node_ilocked(struct seq_file *m, struct binder_proc *proc,
+ struct binder_node *node,
+ struct binder_node *prev_node, bool hash_ptrs)
+{
+ /*
+ * Take a temporary reference on the node so that isn't freed while
+ * we print it.
+ */
+ binder_inc_node_tmpref_ilocked(node);
+ /*
+ * Live nodes need to drop the inner proc lock and dead nodes need to
+ * drop the binder_dead_nodes_lock before trying to take the node lock.
+ */
+ if (proc)
+ binder_inner_proc_unlock(proc);
+ else
+ spin_unlock(&binder_dead_nodes_lock);
+ if (prev_node)
+ binder_put_node(prev_node);
+ binder_node_inner_lock(node);
+ print_binder_node_nilocked(m, node, hash_ptrs);
+ binder_node_inner_unlock(node);
+ if (proc)
+ binder_inner_proc_lock(proc);
+ else
+ spin_lock(&binder_dead_nodes_lock);
+ return node;
+}
+
+static void print_binder_proc(struct seq_file *m, struct binder_proc *proc,
+ bool print_all, bool hash_ptrs)
{
struct binder_work *w;
struct rb_node *n;
@@ -6490,31 +6626,19 @@ static void print_binder_proc(struct seq_file *m,
header_pos = m->count;
binder_inner_proc_lock(proc);
- for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
+ for (n = rb_first(&proc->threads); n; n = rb_next(n))
print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
- rb_node), print_all);
+ rb_node), print_all, hash_ptrs);
- for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
+ for (n = rb_first(&proc->nodes); n; n = rb_next(n)) {
struct binder_node *node = rb_entry(n, struct binder_node,
rb_node);
if (!print_all && !node->has_async_transaction)
continue;
- /*
- * take a temporary reference on the node so it
- * survives and isn't removed from the tree
- * while we print it.
- */
- binder_inc_node_tmpref_ilocked(node);
- /* Need to drop inner lock to take node lock */
- binder_inner_proc_unlock(proc);
- if (last_node)
- binder_put_node(last_node);
- binder_node_inner_lock(node);
- print_binder_node_nilocked(m, node);
- binder_node_inner_unlock(node);
- last_node = node;
- binder_inner_proc_lock(proc);
+ last_node = print_next_binder_node_ilocked(m, proc, node,
+ last_node,
+ hash_ptrs);
}
binder_inner_proc_unlock(proc);
if (last_node)
@@ -6522,23 +6646,26 @@ static void print_binder_proc(struct seq_file *m,
if (print_all) {
binder_proc_lock(proc);
- for (n = rb_first(&proc->refs_by_desc);
- n != NULL;
- n = rb_next(n))
+ for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n))
print_binder_ref_olocked(m, rb_entry(n,
- struct binder_ref,
- rb_node_desc));
+ struct binder_ref,
+ rb_node_desc));
binder_proc_unlock(proc);
}
binder_alloc_print_allocated(m, &proc->alloc);
binder_inner_proc_lock(proc);
list_for_each_entry(w, &proc->todo, entry)
print_binder_work_ilocked(m, proc, " ",
- " pending transaction", w);
+ " pending transaction", w,
+ hash_ptrs);
list_for_each_entry(w, &proc->delivered_death, entry) {
seq_puts(m, " has delivered dead binder\n");
break;
}
+ list_for_each_entry(w, &proc->delivered_freeze, entry) {
+ seq_puts(m, " has delivered freeze binder\n");
+ break;
+ }
binder_inner_proc_unlock(proc);
if (!print_all && m->count == header_pos)
m->count = start_pos;
@@ -6663,7 +6790,7 @@ static void print_binder_proc_stats(struct seq_file *m,
count = 0;
ready_threads = 0;
binder_inner_proc_lock(proc);
- for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
+ for (n = rb_first(&proc->threads); n; n = rb_next(n))
count++;
list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
@@ -6677,7 +6804,7 @@ static void print_binder_proc_stats(struct seq_file *m,
ready_threads,
free_async_space);
count = 0;
- for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
+ for (n = rb_first(&proc->nodes); n; n = rb_next(n))
count++;
binder_inner_proc_unlock(proc);
seq_printf(m, " nodes: %d\n", count);
@@ -6685,7 +6812,7 @@ static void print_binder_proc_stats(struct seq_file *m,
strong = 0;
weak = 0;
binder_proc_lock(proc);
- for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
+ for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n)) {
struct binder_ref *ref = rb_entry(n, struct binder_ref,
rb_node_desc);
count++;
@@ -6712,7 +6839,7 @@ static void print_binder_proc_stats(struct seq_file *m,
print_binder_stats(m, " ", &proc->stats);
}
-static int state_show(struct seq_file *m, void *unused)
+static void print_binder_state(struct seq_file *m, bool hash_ptrs)
{
struct binder_proc *proc;
struct binder_node *node;
@@ -6723,31 +6850,40 @@ static int state_show(struct seq_file *m, void *unused)
spin_lock(&binder_dead_nodes_lock);
if (!hlist_empty(&binder_dead_nodes))
seq_puts(m, "dead nodes:\n");
- hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
- /*
- * take a temporary reference on the node so it
- * survives and isn't removed from the list
- * while we print it.
- */
- node->tmp_refs++;
- spin_unlock(&binder_dead_nodes_lock);
- if (last_node)
- binder_put_node(last_node);
- binder_node_lock(node);
- print_binder_node_nilocked(m, node);
- binder_node_unlock(node);
- last_node = node;
- spin_lock(&binder_dead_nodes_lock);
- }
+ hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
+ last_node = print_next_binder_node_ilocked(m, NULL, node,
+ last_node,
+ hash_ptrs);
spin_unlock(&binder_dead_nodes_lock);
if (last_node)
binder_put_node(last_node);
mutex_lock(&binder_procs_lock);
hlist_for_each_entry(proc, &binder_procs, proc_node)
- print_binder_proc(m, proc, 1);
+ print_binder_proc(m, proc, true, hash_ptrs);
mutex_unlock(&binder_procs_lock);
+}
+
+static void print_binder_transactions(struct seq_file *m, bool hash_ptrs)
+{
+ struct binder_proc *proc;
+ seq_puts(m, "binder transactions:\n");
+ mutex_lock(&binder_procs_lock);
+ hlist_for_each_entry(proc, &binder_procs, proc_node)
+ print_binder_proc(m, proc, false, hash_ptrs);
+ mutex_unlock(&binder_procs_lock);
+}
+
+static int state_show(struct seq_file *m, void *unused)
+{
+ print_binder_state(m, false);
+ return 0;
+}
+
+static int state_hashed_show(struct seq_file *m, void *unused)
+{
+ print_binder_state(m, true);
return 0;
}
@@ -6769,14 +6905,13 @@ static int stats_show(struct seq_file *m, void *unused)
static int transactions_show(struct seq_file *m, void *unused)
{
- struct binder_proc *proc;
-
- seq_puts(m, "binder transactions:\n");
- mutex_lock(&binder_procs_lock);
- hlist_for_each_entry(proc, &binder_procs, proc_node)
- print_binder_proc(m, proc, 0);
- mutex_unlock(&binder_procs_lock);
+ print_binder_transactions(m, false);
+ return 0;
+}
+static int transactions_hashed_show(struct seq_file *m, void *unused)
+{
+ print_binder_transactions(m, true);
return 0;
}
@@ -6785,14 +6920,13 @@ static int proc_show(struct seq_file *m, void *unused)
struct binder_proc *itr;
int pid = (unsigned long)m->private;
- mutex_lock(&binder_procs_lock);
+ guard(mutex)(&binder_procs_lock);
hlist_for_each_entry(itr, &binder_procs, proc_node) {
if (itr->pid == pid) {
seq_puts(m, "binder proc state:\n");
- print_binder_proc(m, itr, 1);
+ print_binder_proc(m, itr, true, false);
}
}
- mutex_unlock(&binder_procs_lock);
return 0;
}
@@ -6856,8 +6990,10 @@ const struct file_operations binder_fops = {
};
DEFINE_SHOW_ATTRIBUTE(state);
+DEFINE_SHOW_ATTRIBUTE(state_hashed);
DEFINE_SHOW_ATTRIBUTE(stats);
DEFINE_SHOW_ATTRIBUTE(transactions);
+DEFINE_SHOW_ATTRIBUTE(transactions_hashed);
DEFINE_SHOW_ATTRIBUTE(transaction_log);
const struct binder_debugfs_entry binder_debugfs_entries[] = {
@@ -6868,6 +7004,12 @@ const struct binder_debugfs_entry binder_debugfs_entries[] = {
.data = NULL,
},
{
+ .name = "state_hashed",
+ .mode = 0444,
+ .fops = &state_hashed_fops,
+ .data = NULL,
+ },
+ {
.name = "stats",
.mode = 0444,
.fops = &stats_fops,
@@ -6880,6 +7022,12 @@ const struct binder_debugfs_entry binder_debugfs_entries[] = {
.data = NULL,
},
{
+ .name = "transactions_hashed",
+ .mode = 0444,
+ .fops = &transactions_hashed_fops,
+ .data = NULL,
+ },
+ {
.name = "transaction_log",
.mode = 0444,
.fops = &transaction_log_fops,
@@ -6894,6 +7042,18 @@ const struct binder_debugfs_entry binder_debugfs_entries[] = {
{} /* terminator */
};
+void binder_add_device(struct binder_device *device)
+{
+ guard(spinlock)(&binder_devices_lock);
+ hlist_add_head(&device->hlist, &binder_devices);
+}
+
+void binder_remove_device(struct binder_device *device)
+{
+ guard(spinlock)(&binder_devices_lock);
+ hlist_del_init(&device->hlist);
+}
+
static int __init init_binder_device(const char *name)
{
int ret;
@@ -6918,7 +7078,7 @@ static int __init init_binder_device(const char *name)
return ret;
}
- hlist_add_head(&binder_device->hlist, &binder_devices);
+ binder_add_device(binder_device);
return ret;
}
@@ -6971,16 +7131,23 @@ static int __init binder_init(void)
}
}
- ret = init_binderfs();
+ ret = genl_register_family(&binder_nl_family);
if (ret)
goto err_init_binder_device_failed;
+ ret = init_binderfs();
+ if (ret)
+ goto err_init_binderfs_failed;
+
return ret;
+err_init_binderfs_failed:
+ genl_unregister_family(&binder_nl_family);
+
err_init_binder_device_failed:
hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
misc_deregister(&device->miscdev);
- hlist_del(&device->hlist);
+ binder_remove_device(device);
kfree(device);
}
@@ -6997,5 +7164,3 @@ device_initcall(binder_init);
#define CREATE_TRACE_POINTS
#include "binder_trace.h"
-
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/android/binder/Makefile b/drivers/android/binder/Makefile
new file mode 100644
index 000000000000..09eabb527fa0
--- /dev/null
+++ b/drivers/android/binder/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+ccflags-y += -I$(src) # needed for trace events
+
+obj-$(CONFIG_ANDROID_BINDER_IPC_RUST) += rust_binder.o
+rust_binder-y := \
+ rust_binder_main.o \
+ rust_binderfs.o \
+ rust_binder_events.o \
+ page_range_helper.o
diff --git a/drivers/android/binder/allocation.rs b/drivers/android/binder/allocation.rs
new file mode 100644
index 000000000000..7f65a9c3a0e5
--- /dev/null
+++ b/drivers/android/binder/allocation.rs
@@ -0,0 +1,602 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use core::mem::{size_of, size_of_val, MaybeUninit};
+use core::ops::Range;
+
+use kernel::{
+ bindings,
+ fs::file::{File, FileDescriptorReservation},
+ prelude::*,
+ sync::{aref::ARef, Arc},
+ transmute::{AsBytes, FromBytes},
+ uaccess::UserSliceReader,
+ uapi,
+};
+
+use crate::{
+ deferred_close::DeferredFdCloser,
+ defs::*,
+ node::{Node, NodeRef},
+ process::Process,
+ DArc,
+};
+
+#[derive(Default)]
+pub(crate) struct AllocationInfo {
+ /// Range within the allocation where we can find the offsets to the object descriptors.
+ pub(crate) offsets: Option<Range<usize>>,
+ /// The target node of the transaction this allocation is associated to.
+ /// Not set for replies.
+ pub(crate) target_node: Option<NodeRef>,
+ /// When this allocation is dropped, call `pending_oneway_finished` on the node.
+ ///
+ /// This is used to serialize oneway transaction on the same node. Binder guarantees that
+ /// oneway transactions to the same node are delivered sequentially in the order they are sent.
+ pub(crate) oneway_node: Option<DArc<Node>>,
+ /// Zero the data in the buffer on free.
+ pub(crate) clear_on_free: bool,
+ /// List of files embedded in this transaction.
+ file_list: FileList,
+}
+
+/// Represents an allocation that the kernel is currently using.
+///
+/// When allocations are idle, the range allocator holds the data related to them.
+///
+/// # Invariants
+///
+/// This allocation corresponds to an allocation in the range allocator, so the relevant pages are
+/// marked in use in the page range.
+pub(crate) struct Allocation {
+ pub(crate) offset: usize,
+ size: usize,
+ pub(crate) ptr: usize,
+ pub(crate) process: Arc<Process>,
+ allocation_info: Option<AllocationInfo>,
+ free_on_drop: bool,
+ pub(crate) oneway_spam_detected: bool,
+ #[allow(dead_code)]
+ pub(crate) debug_id: usize,
+}
+
+impl Allocation {
+ pub(crate) fn new(
+ process: Arc<Process>,
+ debug_id: usize,
+ offset: usize,
+ size: usize,
+ ptr: usize,
+ oneway_spam_detected: bool,
+ ) -> Self {
+ Self {
+ process,
+ offset,
+ size,
+ ptr,
+ debug_id,
+ oneway_spam_detected,
+ allocation_info: None,
+ free_on_drop: true,
+ }
+ }
+
+ fn size_check(&self, offset: usize, size: usize) -> Result {
+ let overflow_fail = offset.checked_add(size).is_none();
+ let cmp_size_fail = offset.wrapping_add(size) > self.size;
+ if overflow_fail || cmp_size_fail {
+ return Err(EFAULT);
+ }
+ Ok(())
+ }
+
+ pub(crate) fn copy_into(
+ &self,
+ reader: &mut UserSliceReader,
+ offset: usize,
+ size: usize,
+ ) -> Result {
+ self.size_check(offset, size)?;
+
+ // SAFETY: While this object exists, the range allocator will keep the range allocated, and
+ // in turn, the pages will be marked as in use.
+ unsafe {
+ self.process
+ .pages
+ .copy_from_user_slice(reader, self.offset + offset, size)
+ }
+ }
+
+ pub(crate) fn read<T: FromBytes>(&self, offset: usize) -> Result<T> {
+ self.size_check(offset, size_of::<T>())?;
+
+ // SAFETY: While this object exists, the range allocator will keep the range allocated, and
+ // in turn, the pages will be marked as in use.
+ unsafe { self.process.pages.read(self.offset + offset) }
+ }
+
+ pub(crate) fn write<T: ?Sized>(&self, offset: usize, obj: &T) -> Result {
+ self.size_check(offset, size_of_val::<T>(obj))?;
+
+ // SAFETY: While this object exists, the range allocator will keep the range allocated, and
+ // in turn, the pages will be marked as in use.
+ unsafe { self.process.pages.write(self.offset + offset, obj) }
+ }
+
+ pub(crate) fn fill_zero(&self) -> Result {
+ // SAFETY: While this object exists, the range allocator will keep the range allocated, and
+ // in turn, the pages will be marked as in use.
+ unsafe { self.process.pages.fill_zero(self.offset, self.size) }
+ }
+
+ pub(crate) fn keep_alive(mut self) {
+ self.process
+ .buffer_make_freeable(self.offset, self.allocation_info.take());
+ self.free_on_drop = false;
+ }
+
+ pub(crate) fn set_info(&mut self, info: AllocationInfo) {
+ self.allocation_info = Some(info);
+ }
+
+ pub(crate) fn get_or_init_info(&mut self) -> &mut AllocationInfo {
+ self.allocation_info.get_or_insert_with(Default::default)
+ }
+
+ pub(crate) fn set_info_offsets(&mut self, offsets: Range<usize>) {
+ self.get_or_init_info().offsets = Some(offsets);
+ }
+
+ pub(crate) fn set_info_oneway_node(&mut self, oneway_node: DArc<Node>) {
+ self.get_or_init_info().oneway_node = Some(oneway_node);
+ }
+
+ pub(crate) fn set_info_clear_on_drop(&mut self) {
+ self.get_or_init_info().clear_on_free = true;
+ }
+
+ pub(crate) fn set_info_target_node(&mut self, target_node: NodeRef) {
+ self.get_or_init_info().target_node = Some(target_node);
+ }
+
+ /// Reserve enough space to push at least `num_fds` fds.
+ pub(crate) fn info_add_fd_reserve(&mut self, num_fds: usize) -> Result {
+ self.get_or_init_info()
+ .file_list
+ .files_to_translate
+ .reserve(num_fds, GFP_KERNEL)?;
+
+ Ok(())
+ }
+
+ pub(crate) fn info_add_fd(
+ &mut self,
+ file: ARef<File>,
+ buffer_offset: usize,
+ close_on_free: bool,
+ ) -> Result {
+ self.get_or_init_info().file_list.files_to_translate.push(
+ FileEntry {
+ file,
+ buffer_offset,
+ close_on_free,
+ },
+ GFP_KERNEL,
+ )?;
+
+ Ok(())
+ }
+
+ pub(crate) fn set_info_close_on_free(&mut self, cof: FdsCloseOnFree) {
+ self.get_or_init_info().file_list.close_on_free = cof.0;
+ }
+
+ pub(crate) fn translate_fds(&mut self) -> Result<TranslatedFds> {
+ let file_list = match self.allocation_info.as_mut() {
+ Some(info) => &mut info.file_list,
+ None => return Ok(TranslatedFds::new()),
+ };
+
+ let files = core::mem::take(&mut file_list.files_to_translate);
+
+ let num_close_on_free = files.iter().filter(|entry| entry.close_on_free).count();
+ let mut close_on_free = KVec::with_capacity(num_close_on_free, GFP_KERNEL)?;
+
+ let mut reservations = KVec::with_capacity(files.len(), GFP_KERNEL)?;
+ for file_info in files {
+ let res = FileDescriptorReservation::get_unused_fd_flags(bindings::O_CLOEXEC)?;
+ let fd = res.reserved_fd();
+ self.write::<u32>(file_info.buffer_offset, &fd)?;
+
+ reservations.push(
+ Reservation {
+ res,
+ file: file_info.file,
+ },
+ GFP_KERNEL,
+ )?;
+ if file_info.close_on_free {
+ close_on_free.push(fd, GFP_KERNEL)?;
+ }
+ }
+
+ Ok(TranslatedFds {
+ reservations,
+ close_on_free: FdsCloseOnFree(close_on_free),
+ })
+ }
+
+ /// Should the looper return to userspace when freeing this allocation?
+ pub(crate) fn looper_need_return_on_free(&self) -> bool {
+ // Closing fds involves pushing task_work for execution when we return to userspace. Hence,
+ // we should return to userspace asap if we are closing fds.
+ match self.allocation_info {
+ Some(ref info) => !info.file_list.close_on_free.is_empty(),
+ None => false,
+ }
+ }
+}
+
+impl Drop for Allocation {
+ fn drop(&mut self) {
+ if !self.free_on_drop {
+ return;
+ }
+
+ if let Some(mut info) = self.allocation_info.take() {
+ if let Some(oneway_node) = info.oneway_node.as_ref() {
+ oneway_node.pending_oneway_finished();
+ }
+
+ info.target_node = None;
+
+ if let Some(offsets) = info.offsets.clone() {
+ let view = AllocationView::new(self, offsets.start);
+ for i in offsets.step_by(size_of::<usize>()) {
+ if view.cleanup_object(i).is_err() {
+ pr_warn!("Error cleaning up object at offset {}\n", i)
+ }
+ }
+ }
+
+ for &fd in &info.file_list.close_on_free {
+ let closer = match DeferredFdCloser::new(GFP_KERNEL) {
+ Ok(closer) => closer,
+ Err(kernel::alloc::AllocError) => {
+ // Ignore allocation failures.
+ break;
+ }
+ };
+
+ // Here, we ignore errors. The operation can fail if the fd is not valid, or if the
+ // method is called from a kthread. However, this is always called from a syscall,
+ // so the latter case cannot happen, and we don't care about the first case.
+ let _ = closer.close_fd(fd);
+ }
+
+ if info.clear_on_free {
+ if let Err(e) = self.fill_zero() {
+ pr_warn!("Failed to clear data on free: {:?}", e);
+ }
+ }
+ }
+
+ self.process.buffer_raw_free(self.ptr);
+ }
+}
+
+/// A wrapper around `Allocation` that is being created.
+///
+/// If the allocation is destroyed while wrapped in this wrapper, then the allocation will be
+/// considered to be part of a failed transaction. Successful transactions avoid that by calling
+/// `success`, which skips the destructor.
+#[repr(transparent)]
+pub(crate) struct NewAllocation(pub(crate) Allocation);
+
+impl NewAllocation {
+ pub(crate) fn success(self) -> Allocation {
+ // This skips the destructor.
+ //
+ // SAFETY: This type is `#[repr(transparent)]`, so the layout matches.
+ unsafe { core::mem::transmute(self) }
+ }
+}
+
+impl core::ops::Deref for NewAllocation {
+ type Target = Allocation;
+ fn deref(&self) -> &Allocation {
+ &self.0
+ }
+}
+
+impl core::ops::DerefMut for NewAllocation {
+ fn deref_mut(&mut self) -> &mut Allocation {
+ &mut self.0
+ }
+}
+
+/// A view into the beginning of an allocation.
+///
+/// All attempts to read or write outside of the view will fail. To intentionally access outside of
+/// this view, use the `alloc` field of this struct directly.
+pub(crate) struct AllocationView<'a> {
+ pub(crate) alloc: &'a mut Allocation,
+ limit: usize,
+}
+
+impl<'a> AllocationView<'a> {
+ pub(crate) fn new(alloc: &'a mut Allocation, limit: usize) -> Self {
+ AllocationView { alloc, limit }
+ }
+
+ pub(crate) fn read<T: FromBytes>(&self, offset: usize) -> Result<T> {
+ if offset.checked_add(size_of::<T>()).ok_or(EINVAL)? > self.limit {
+ return Err(EINVAL);
+ }
+ self.alloc.read(offset)
+ }
+
+ pub(crate) fn write<T: AsBytes>(&self, offset: usize, obj: &T) -> Result {
+ if offset.checked_add(size_of::<T>()).ok_or(EINVAL)? > self.limit {
+ return Err(EINVAL);
+ }
+ self.alloc.write(offset, obj)
+ }
+
+ pub(crate) fn copy_into(
+ &self,
+ reader: &mut UserSliceReader,
+ offset: usize,
+ size: usize,
+ ) -> Result {
+ if offset.checked_add(size).ok_or(EINVAL)? > self.limit {
+ return Err(EINVAL);
+ }
+ self.alloc.copy_into(reader, offset, size)
+ }
+
+ pub(crate) fn transfer_binder_object(
+ &self,
+ offset: usize,
+ obj: &uapi::flat_binder_object,
+ strong: bool,
+ node_ref: NodeRef,
+ ) -> Result {
+ let mut newobj = FlatBinderObject::default();
+ let node = node_ref.node.clone();
+ if Arc::ptr_eq(&node_ref.node.owner, &self.alloc.process) {
+ // The receiving process is the owner of the node, so send it a binder object (instead
+ // of a handle).
+ let (ptr, cookie) = node.get_id();
+ newobj.hdr.type_ = if strong {
+ BINDER_TYPE_BINDER
+ } else {
+ BINDER_TYPE_WEAK_BINDER
+ };
+ newobj.flags = obj.flags;
+ newobj.__bindgen_anon_1.binder = ptr as _;
+ newobj.cookie = cookie as _;
+ self.write(offset, &newobj)?;
+ // Increment the user ref count on the node. It will be decremented as part of the
+ // destruction of the buffer, when we see a binder or weak-binder object.
+ node.update_refcount(true, 1, strong);
+ } else {
+ // The receiving process is different from the owner, so we need to insert a handle to
+ // the binder object.
+ let handle = self
+ .alloc
+ .process
+ .as_arc_borrow()
+ .insert_or_update_handle(node_ref, false)?;
+ newobj.hdr.type_ = if strong {
+ BINDER_TYPE_HANDLE
+ } else {
+ BINDER_TYPE_WEAK_HANDLE
+ };
+ newobj.flags = obj.flags;
+ newobj.__bindgen_anon_1.handle = handle;
+ if self.write(offset, &newobj).is_err() {
+ // Decrement ref count on the handle we just created.
+ let _ = self
+ .alloc
+ .process
+ .as_arc_borrow()
+ .update_ref(handle, false, strong);
+ return Err(EINVAL);
+ }
+ }
+
+ Ok(())
+ }
+
+ fn cleanup_object(&self, index_offset: usize) -> Result {
+ let offset = self.alloc.read(index_offset)?;
+ let header = self.read::<BinderObjectHeader>(offset)?;
+ match header.type_ {
+ BINDER_TYPE_WEAK_BINDER | BINDER_TYPE_BINDER => {
+ let obj = self.read::<FlatBinderObject>(offset)?;
+ let strong = header.type_ == BINDER_TYPE_BINDER;
+ // SAFETY: The type is `BINDER_TYPE_{WEAK_}BINDER`, so the `binder` field is
+ // populated.
+ let ptr = unsafe { obj.__bindgen_anon_1.binder };
+ let cookie = obj.cookie;
+ self.alloc.process.update_node(ptr, cookie, strong);
+ Ok(())
+ }
+ BINDER_TYPE_WEAK_HANDLE | BINDER_TYPE_HANDLE => {
+ let obj = self.read::<FlatBinderObject>(offset)?;
+ let strong = header.type_ == BINDER_TYPE_HANDLE;
+ // SAFETY: The type is `BINDER_TYPE_{WEAK_}HANDLE`, so the `handle` field is
+ // populated.
+ let handle = unsafe { obj.__bindgen_anon_1.handle };
+ self.alloc
+ .process
+ .as_arc_borrow()
+ .update_ref(handle, false, strong)
+ }
+ _ => Ok(()),
+ }
+ }
+}
+
+/// A binder object as it is serialized.
+///
+/// # Invariants
+///
+/// All bytes must be initialized, and the value of `self.hdr.type_` must be one of the allowed
+/// types.
+#[repr(C)]
+pub(crate) union BinderObject {
+ hdr: uapi::binder_object_header,
+ fbo: uapi::flat_binder_object,
+ fdo: uapi::binder_fd_object,
+ bbo: uapi::binder_buffer_object,
+ fdao: uapi::binder_fd_array_object,
+}
+
+/// A view into a `BinderObject` that can be used in a match statement.
+pub(crate) enum BinderObjectRef<'a> {
+ Binder(&'a mut uapi::flat_binder_object),
+ Handle(&'a mut uapi::flat_binder_object),
+ Fd(&'a mut uapi::binder_fd_object),
+ Ptr(&'a mut uapi::binder_buffer_object),
+ Fda(&'a mut uapi::binder_fd_array_object),
+}
+
+impl BinderObject {
+ pub(crate) fn read_from(reader: &mut UserSliceReader) -> Result<BinderObject> {
+ let object = Self::read_from_inner(|slice| {
+ let read_len = usize::min(slice.len(), reader.len());
+ reader.clone_reader().read_slice(&mut slice[..read_len])?;
+ Ok(())
+ })?;
+
+ // If we used a object type smaller than the largest object size, then we've read more
+ // bytes than we needed to. However, we used `.clone_reader()` to avoid advancing the
+ // original reader. Now, we call `skip` so that the caller's reader is advanced by the
+ // right amount.
+ //
+ // The `skip` call fails if the reader doesn't have `size` bytes available. This could
+ // happen if the type header corresponds to an object type that is larger than the rest of
+ // the reader.
+ //
+ // Any extra bytes beyond the size of the object are inaccessible after this call, so
+ // reading them again from the `reader` later does not result in TOCTOU bugs.
+ reader.skip(object.size())?;
+
+ Ok(object)
+ }
+
+ /// Use the provided reader closure to construct a `BinderObject`.
+ ///
+ /// The closure should write the bytes for the object into the provided slice.
+ pub(crate) fn read_from_inner<R>(reader: R) -> Result<BinderObject>
+ where
+ R: FnOnce(&mut [u8; size_of::<BinderObject>()]) -> Result<()>,
+ {
+ let mut obj = MaybeUninit::<BinderObject>::zeroed();
+
+ // SAFETY: The lengths of `BinderObject` and `[u8; size_of::<BinderObject>()]` are equal,
+ // and the byte array has an alignment requirement of one, so the pointer cast is okay.
+ // Additionally, `obj` was initialized to zeros, so the byte array will not be
+ // uninitialized.
+ (reader)(unsafe { &mut *obj.as_mut_ptr().cast() })?;
+
+ // SAFETY: The entire object is initialized, so accessing this field is safe.
+ let type_ = unsafe { obj.assume_init_ref().hdr.type_ };
+ if Self::type_to_size(type_).is_none() {
+ // The value of `obj.hdr_type_` was invalid.
+ return Err(EINVAL);
+ }
+
+ // SAFETY: All bytes are initialized (since we zeroed them at the start) and we checked
+ // that `self.hdr.type_` is one of the allowed types, so the type invariants are satisfied.
+ unsafe { Ok(obj.assume_init()) }
+ }
+
+ pub(crate) fn as_ref(&mut self) -> BinderObjectRef<'_> {
+ use BinderObjectRef::*;
+ // SAFETY: The constructor ensures that all bytes of `self` are initialized, and all
+ // variants of this union accept all initialized bit patterns.
+ unsafe {
+ match self.hdr.type_ {
+ BINDER_TYPE_WEAK_BINDER | BINDER_TYPE_BINDER => Binder(&mut self.fbo),
+ BINDER_TYPE_WEAK_HANDLE | BINDER_TYPE_HANDLE => Handle(&mut self.fbo),
+ BINDER_TYPE_FD => Fd(&mut self.fdo),
+ BINDER_TYPE_PTR => Ptr(&mut self.bbo),
+ BINDER_TYPE_FDA => Fda(&mut self.fdao),
+ // SAFETY: By the type invariant, the value of `self.hdr.type_` cannot have any
+ // other value than the ones checked above.
+ _ => core::hint::unreachable_unchecked(),
+ }
+ }
+ }
+
+ pub(crate) fn size(&self) -> usize {
+ // SAFETY: The entire object is initialized, so accessing this field is safe.
+ let type_ = unsafe { self.hdr.type_ };
+
+ // SAFETY: The type invariants guarantee that the type field is correct.
+ unsafe { Self::type_to_size(type_).unwrap_unchecked() }
+ }
+
+ fn type_to_size(type_: u32) -> Option<usize> {
+ match type_ {
+ BINDER_TYPE_WEAK_BINDER => Some(size_of::<uapi::flat_binder_object>()),
+ BINDER_TYPE_BINDER => Some(size_of::<uapi::flat_binder_object>()),
+ BINDER_TYPE_WEAK_HANDLE => Some(size_of::<uapi::flat_binder_object>()),
+ BINDER_TYPE_HANDLE => Some(size_of::<uapi::flat_binder_object>()),
+ BINDER_TYPE_FD => Some(size_of::<uapi::binder_fd_object>()),
+ BINDER_TYPE_PTR => Some(size_of::<uapi::binder_buffer_object>()),
+ BINDER_TYPE_FDA => Some(size_of::<uapi::binder_fd_array_object>()),
+ _ => None,
+ }
+ }
+}
+
+#[derive(Default)]
+struct FileList {
+ files_to_translate: KVec<FileEntry>,
+ close_on_free: KVec<u32>,
+}
+
+struct FileEntry {
+ /// The file for which a descriptor will be created in the recipient process.
+ file: ARef<File>,
+ /// The offset in the buffer where the file descriptor is stored.
+ buffer_offset: usize,
+ /// Whether this fd should be closed when the allocation is freed.
+ close_on_free: bool,
+}
+
+pub(crate) struct TranslatedFds {
+ reservations: KVec<Reservation>,
+ /// If commit is called, then these fds should be closed. (If commit is not called, then they
+ /// shouldn't be closed.)
+ close_on_free: FdsCloseOnFree,
+}
+
+struct Reservation {
+ res: FileDescriptorReservation,
+ file: ARef<File>,
+}
+
+impl TranslatedFds {
+ pub(crate) fn new() -> Self {
+ Self {
+ reservations: KVec::new(),
+ close_on_free: FdsCloseOnFree(KVec::new()),
+ }
+ }
+
+ pub(crate) fn commit(self) -> FdsCloseOnFree {
+ for entry in self.reservations {
+ entry.res.fd_install(entry.file);
+ }
+
+ self.close_on_free
+ }
+}
+
+pub(crate) struct FdsCloseOnFree(KVec<u32>);
diff --git a/drivers/android/binder/context.rs b/drivers/android/binder/context.rs
new file mode 100644
index 000000000000..3d135ec03ca7
--- /dev/null
+++ b/drivers/android/binder/context.rs
@@ -0,0 +1,180 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use kernel::{
+ error::Error,
+ list::{List, ListArc, ListLinks},
+ prelude::*,
+ security,
+ str::{CStr, CString},
+ sync::{Arc, Mutex},
+ task::Kuid,
+};
+
+use crate::{error::BinderError, node::NodeRef, process::Process};
+
+kernel::sync::global_lock! {
+ // SAFETY: We call `init` in the module initializer, so it's initialized before first use.
+ pub(crate) unsafe(uninit) static CONTEXTS: Mutex<ContextList> = ContextList {
+ list: List::new(),
+ };
+}
+
+pub(crate) struct ContextList {
+ list: List<Context>,
+}
+
+pub(crate) fn get_all_contexts() -> Result<KVec<Arc<Context>>> {
+ let lock = CONTEXTS.lock();
+
+ let count = lock.list.iter().count();
+
+ let mut ctxs = KVec::with_capacity(count, GFP_KERNEL)?;
+ for ctx in &lock.list {
+ ctxs.push(Arc::from(ctx), GFP_KERNEL)?;
+ }
+ Ok(ctxs)
+}
+
+/// This struct keeps track of the processes using this context, and which process is the context
+/// manager.
+struct Manager {
+ node: Option<NodeRef>,
+ uid: Option<Kuid>,
+ all_procs: List<Process>,
+}
+
+/// There is one context per binder file (/dev/binder, /dev/hwbinder, etc)
+#[pin_data]
+pub(crate) struct Context {
+ #[pin]
+ manager: Mutex<Manager>,
+ pub(crate) name: CString,
+ #[pin]
+ links: ListLinks,
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<0> for Context { untracked; }
+}
+kernel::list::impl_list_item! {
+ impl ListItem<0> for Context {
+ using ListLinks { self.links };
+ }
+}
+
+impl Context {
+ pub(crate) fn new(name: &CStr) -> Result<Arc<Self>> {
+ let name = CString::try_from(name)?;
+ let list_ctx = ListArc::pin_init::<Error>(
+ try_pin_init!(Context {
+ name,
+ links <- ListLinks::new(),
+ manager <- kernel::new_mutex!(Manager {
+ all_procs: List::new(),
+ node: None,
+ uid: None,
+ }, "Context::manager"),
+ }),
+ GFP_KERNEL,
+ )?;
+
+ let ctx = list_ctx.clone_arc();
+ CONTEXTS.lock().list.push_back(list_ctx);
+
+ Ok(ctx)
+ }
+
+ /// Called when the file for this context is unlinked.
+ ///
+ /// No-op if called twice.
+ pub(crate) fn deregister(&self) {
+ // SAFETY: We never add the context to any other linked list than this one, so it is either
+ // in this list, or not in any list.
+ unsafe { CONTEXTS.lock().list.remove(self) };
+ }
+
+ pub(crate) fn register_process(self: &Arc<Self>, proc: ListArc<Process>) {
+ if !Arc::ptr_eq(self, &proc.ctx) {
+ pr_err!("Context::register_process called on the wrong context.");
+ return;
+ }
+ self.manager.lock().all_procs.push_back(proc);
+ }
+
+ pub(crate) fn deregister_process(self: &Arc<Self>, proc: &Process) {
+ if !Arc::ptr_eq(self, &proc.ctx) {
+ pr_err!("Context::deregister_process called on the wrong context.");
+ return;
+ }
+ // SAFETY: We just checked that this is the right list.
+ unsafe { self.manager.lock().all_procs.remove(proc) };
+ }
+
+ pub(crate) fn set_manager_node(&self, node_ref: NodeRef) -> Result {
+ let mut manager = self.manager.lock();
+ if manager.node.is_some() {
+ pr_warn!("BINDER_SET_CONTEXT_MGR already set");
+ return Err(EBUSY);
+ }
+ security::binder_set_context_mgr(&node_ref.node.owner.cred)?;
+
+ // If the context manager has been set before, ensure that we use the same euid.
+ let caller_uid = Kuid::current_euid();
+ if let Some(ref uid) = manager.uid {
+ if *uid != caller_uid {
+ return Err(EPERM);
+ }
+ }
+
+ manager.node = Some(node_ref);
+ manager.uid = Some(caller_uid);
+ Ok(())
+ }
+
+ pub(crate) fn unset_manager_node(&self) {
+ let node_ref = self.manager.lock().node.take();
+ drop(node_ref);
+ }
+
+ pub(crate) fn get_manager_node(&self, strong: bool) -> Result<NodeRef, BinderError> {
+ self.manager
+ .lock()
+ .node
+ .as_ref()
+ .ok_or_else(BinderError::new_dead)?
+ .clone(strong)
+ .map_err(BinderError::from)
+ }
+
+ pub(crate) fn for_each_proc<F>(&self, mut func: F)
+ where
+ F: FnMut(&Process),
+ {
+ let lock = self.manager.lock();
+ for proc in &lock.all_procs {
+ func(&proc);
+ }
+ }
+
+ pub(crate) fn get_all_procs(&self) -> Result<KVec<Arc<Process>>> {
+ let lock = self.manager.lock();
+ let count = lock.all_procs.iter().count();
+
+ let mut procs = KVec::with_capacity(count, GFP_KERNEL)?;
+ for proc in &lock.all_procs {
+ procs.push(Arc::from(proc), GFP_KERNEL)?;
+ }
+ Ok(procs)
+ }
+
+ pub(crate) fn get_procs_with_pid(&self, pid: i32) -> Result<KVec<Arc<Process>>> {
+ let orig = self.get_all_procs()?;
+ let mut backing = KVec::with_capacity(orig.len(), GFP_KERNEL)?;
+ for proc in orig.into_iter().filter(|proc| proc.task.pid() == pid) {
+ backing.push(proc, GFP_KERNEL)?;
+ }
+ Ok(backing)
+ }
+}
diff --git a/drivers/android/binder/deferred_close.rs b/drivers/android/binder/deferred_close.rs
new file mode 100644
index 000000000000..ac895c04d0cb
--- /dev/null
+++ b/drivers/android/binder/deferred_close.rs
@@ -0,0 +1,204 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+//! Logic for closing files in a deferred manner.
+//!
+//! This file could make sense to have in `kernel::fs`, but it was rejected for being too
+//! Binder-specific.
+
+use core::mem::MaybeUninit;
+use kernel::{
+ alloc::{AllocError, Flags},
+ bindings,
+ prelude::*,
+};
+
+/// Helper used for closing file descriptors in a way that is safe even if the file is currently
+/// held using `fdget`.
+///
+/// Additional motivation can be found in commit 80cd795630d6 ("binder: fix use-after-free due to
+/// ksys_close() during fdget()") and in the comments on `binder_do_fd_close`.
+pub(crate) struct DeferredFdCloser {
+ inner: KBox<DeferredFdCloserInner>,
+}
+
+/// SAFETY: This just holds an allocation with no real content, so there's no safety issue with
+/// moving it across threads.
+unsafe impl Send for DeferredFdCloser {}
+/// SAFETY: This just holds an allocation with no real content, so there's no safety issue with
+/// moving it across threads.
+unsafe impl Sync for DeferredFdCloser {}
+
+/// # Invariants
+///
+/// If the `file` pointer is non-null, then it points at a `struct file` and owns a refcount to
+/// that file.
+#[repr(C)]
+struct DeferredFdCloserInner {
+ twork: MaybeUninit<bindings::callback_head>,
+ file: *mut bindings::file,
+}
+
+impl DeferredFdCloser {
+ /// Create a new [`DeferredFdCloser`].
+ pub(crate) fn new(flags: Flags) -> Result<Self, AllocError> {
+ Ok(Self {
+ // INVARIANT: The `file` pointer is null, so the type invariant does not apply.
+ inner: KBox::new(
+ DeferredFdCloserInner {
+ twork: MaybeUninit::uninit(),
+ file: core::ptr::null_mut(),
+ },
+ flags,
+ )?,
+ })
+ }
+
+ /// Schedule a task work that closes the file descriptor when this task returns to userspace.
+ ///
+ /// Fails if this is called from a context where we cannot run work when returning to
+ /// userspace. (E.g., from a kthread.)
+ pub(crate) fn close_fd(self, fd: u32) -> Result<(), DeferredFdCloseError> {
+ use bindings::task_work_notify_mode_TWA_RESUME as TWA_RESUME;
+
+ // In this method, we schedule the task work before closing the file. This is because
+ // scheduling a task work is fallible, and we need to know whether it will fail before we
+ // attempt to close the file.
+
+ // Task works are not available on kthreads.
+ let current = kernel::current!();
+
+ // Check if this is a kthread.
+ // SAFETY: Reading `flags` from a task is always okay.
+ if unsafe { ((*current.as_ptr()).flags & bindings::PF_KTHREAD) != 0 } {
+ return Err(DeferredFdCloseError::TaskWorkUnavailable);
+ }
+
+ // Transfer ownership of the box's allocation to a raw pointer. This disables the
+ // destructor, so we must manually convert it back to a KBox to drop it.
+ //
+ // Until we convert it back to a `KBox`, there are no aliasing requirements on this
+ // pointer.
+ let inner = KBox::into_raw(self.inner);
+
+ // The `callback_head` field is first in the struct, so this cast correctly gives us a
+ // pointer to the field.
+ let callback_head = inner.cast::<bindings::callback_head>();
+ // SAFETY: This pointer offset operation does not go out-of-bounds.
+ let file_field = unsafe { core::ptr::addr_of_mut!((*inner).file) };
+
+ let current = current.as_ptr();
+
+ // SAFETY: This function currently has exclusive access to the `DeferredFdCloserInner`, so
+ // it is okay for us to perform unsynchronized writes to its `callback_head` field.
+ unsafe { bindings::init_task_work(callback_head, Some(Self::do_close_fd)) };
+
+ // SAFETY: This inserts the `DeferredFdCloserInner` into the task workqueue for the current
+ // task. If this operation is successful, then this transfers exclusive ownership of the
+ // `callback_head` field to the C side until it calls `do_close_fd`, and we don't touch or
+ // invalidate the field during that time.
+ //
+ // When the C side calls `do_close_fd`, the safety requirements of that method are
+ // satisfied because when a task work is executed, the callback is given ownership of the
+ // pointer.
+ //
+ // The file pointer is currently null. If it is changed to be non-null before `do_close_fd`
+ // is called, then that change happens due to the write at the end of this function, and
+ // that write has a safety comment that explains why the refcount can be dropped when
+ // `do_close_fd` runs.
+ let res = unsafe { bindings::task_work_add(current, callback_head, TWA_RESUME) };
+
+ if res != 0 {
+ // SAFETY: Scheduling the task work failed, so we still have ownership of the box, so
+ // we may destroy it.
+ unsafe { drop(KBox::from_raw(inner)) };
+
+ return Err(DeferredFdCloseError::TaskWorkUnavailable);
+ }
+
+ // This removes the fd from the fd table in `current`. The file is not fully closed until
+ // `filp_close` is called. We are given ownership of one refcount to the file.
+ //
+ // SAFETY: This is safe no matter what `fd` is. If the `fd` is valid (that is, if the
+ // pointer is non-null), then we call `filp_close` on the returned pointer as required by
+ // `file_close_fd`.
+ let file = unsafe { bindings::file_close_fd(fd) };
+ if file.is_null() {
+ // We don't clean up the task work since that might be expensive if the task work queue
+ // is long. Just let it execute and let it clean up for itself.
+ return Err(DeferredFdCloseError::BadFd);
+ }
+
+ // Acquire a second refcount to the file.
+ //
+ // SAFETY: The `file` pointer points at a file with a non-zero refcount.
+ unsafe { bindings::get_file(file) };
+
+ // This method closes the fd, consuming one of our two refcounts. There could be active
+ // light refcounts created from that fd, so we must ensure that the file has a positive
+ // refcount for the duration of those active light refcounts. We do that by holding on to
+ // the second refcount until the current task returns to userspace.
+ //
+ // SAFETY: The `file` pointer is valid. Passing `current->files` as the file table to close
+ // it in is correct, since we just got the `fd` from `file_close_fd` which also uses
+ // `current->files`.
+ //
+ // Note: fl_owner_t is currently a void pointer.
+ unsafe { bindings::filp_close(file, (*current).files as bindings::fl_owner_t) };
+
+ // We update the file pointer that the task work is supposed to fput. This transfers
+ // ownership of our last refcount.
+ //
+ // INVARIANT: This changes the `file` field of a `DeferredFdCloserInner` from null to
+ // non-null. This doesn't break the type invariant for `DeferredFdCloserInner` because we
+ // still own a refcount to the file, so we can pass ownership of that refcount to the
+ // `DeferredFdCloserInner`.
+ //
+ // When `do_close_fd` runs, it must be safe for it to `fput` the refcount. However, this is
+ // the case because all light refcounts that are associated with the fd we closed
+ // previously must be dropped when `do_close_fd`, since light refcounts must be dropped
+ // before returning to userspace.
+ //
+ // SAFETY: Task works are executed on the current thread right before we return to
+ // userspace, so this write is guaranteed to happen before `do_close_fd` is called, which
+ // means that a race is not possible here.
+ unsafe { *file_field = file };
+
+ Ok(())
+ }
+
+ /// # Safety
+ ///
+ /// The provided pointer must point at the `twork` field of a `DeferredFdCloserInner` stored in
+ /// a `KBox`, and the caller must pass exclusive ownership of that `KBox`. Furthermore, if the
+ /// file pointer is non-null, then it must be okay to release the refcount by calling `fput`.
+ unsafe extern "C" fn do_close_fd(inner: *mut bindings::callback_head) {
+ // SAFETY: The caller just passed us ownership of this box.
+ let inner = unsafe { KBox::from_raw(inner.cast::<DeferredFdCloserInner>()) };
+ if !inner.file.is_null() {
+ // SAFETY: By the type invariants, we own a refcount to this file, and the caller
+ // guarantees that dropping the refcount now is okay.
+ unsafe { bindings::fput(inner.file) };
+ }
+ // The allocation is freed when `inner` goes out of scope.
+ }
+}
+
+/// Represents a failure to close an fd in a deferred manner.
+#[derive(Copy, Clone, Debug, Eq, PartialEq)]
+pub(crate) enum DeferredFdCloseError {
+ /// Closing the fd failed because we were unable to schedule a task work.
+ TaskWorkUnavailable,
+ /// Closing the fd failed because the fd does not exist.
+ BadFd,
+}
+
+impl From<DeferredFdCloseError> for Error {
+ fn from(err: DeferredFdCloseError) -> Error {
+ match err {
+ DeferredFdCloseError::TaskWorkUnavailable => ESRCH,
+ DeferredFdCloseError::BadFd => EBADF,
+ }
+ }
+}
diff --git a/drivers/android/binder/defs.rs b/drivers/android/binder/defs.rs
new file mode 100644
index 000000000000..33f51b4139c7
--- /dev/null
+++ b/drivers/android/binder/defs.rs
@@ -0,0 +1,182 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use core::mem::MaybeUninit;
+use core::ops::{Deref, DerefMut};
+use kernel::{
+ transmute::{AsBytes, FromBytes},
+ uapi::{self, *},
+};
+
+macro_rules! pub_no_prefix {
+ ($prefix:ident, $($newname:ident),+ $(,)?) => {
+ $(pub(crate) const $newname: u32 = kernel::macros::concat_idents!($prefix, $newname);)+
+ };
+}
+
+pub_no_prefix!(
+ binder_driver_return_protocol_,
+ BR_TRANSACTION,
+ BR_TRANSACTION_SEC_CTX,
+ BR_REPLY,
+ BR_DEAD_REPLY,
+ BR_FAILED_REPLY,
+ BR_FROZEN_REPLY,
+ BR_NOOP,
+ BR_SPAWN_LOOPER,
+ BR_TRANSACTION_COMPLETE,
+ BR_TRANSACTION_PENDING_FROZEN,
+ BR_ONEWAY_SPAM_SUSPECT,
+ BR_OK,
+ BR_ERROR,
+ BR_INCREFS,
+ BR_ACQUIRE,
+ BR_RELEASE,
+ BR_DECREFS,
+ BR_DEAD_BINDER,
+ BR_CLEAR_DEATH_NOTIFICATION_DONE,
+ BR_FROZEN_BINDER,
+ BR_CLEAR_FREEZE_NOTIFICATION_DONE,
+);
+
+pub_no_prefix!(
+ binder_driver_command_protocol_,
+ BC_TRANSACTION,
+ BC_TRANSACTION_SG,
+ BC_REPLY,
+ BC_REPLY_SG,
+ BC_FREE_BUFFER,
+ BC_ENTER_LOOPER,
+ BC_EXIT_LOOPER,
+ BC_REGISTER_LOOPER,
+ BC_INCREFS,
+ BC_ACQUIRE,
+ BC_RELEASE,
+ BC_DECREFS,
+ BC_INCREFS_DONE,
+ BC_ACQUIRE_DONE,
+ BC_REQUEST_DEATH_NOTIFICATION,
+ BC_CLEAR_DEATH_NOTIFICATION,
+ BC_DEAD_BINDER_DONE,
+ BC_REQUEST_FREEZE_NOTIFICATION,
+ BC_CLEAR_FREEZE_NOTIFICATION,
+ BC_FREEZE_NOTIFICATION_DONE,
+);
+
+pub_no_prefix!(
+ flat_binder_object_flags_,
+ FLAT_BINDER_FLAG_ACCEPTS_FDS,
+ FLAT_BINDER_FLAG_TXN_SECURITY_CTX
+);
+
+pub_no_prefix!(
+ transaction_flags_,
+ TF_ONE_WAY,
+ TF_ACCEPT_FDS,
+ TF_CLEAR_BUF,
+ TF_UPDATE_TXN
+);
+
+pub(crate) use uapi::{
+ BINDER_TYPE_BINDER, BINDER_TYPE_FD, BINDER_TYPE_FDA, BINDER_TYPE_HANDLE, BINDER_TYPE_PTR,
+ BINDER_TYPE_WEAK_BINDER, BINDER_TYPE_WEAK_HANDLE,
+};
+
+macro_rules! decl_wrapper {
+ ($newname:ident, $wrapped:ty) => {
+ // Define a wrapper around the C type. Use `MaybeUninit` to enforce that the value of
+ // padding bytes must be preserved.
+ #[derive(Copy, Clone)]
+ #[repr(transparent)]
+ pub(crate) struct $newname(MaybeUninit<$wrapped>);
+
+ // SAFETY: This macro is only used with types where this is ok.
+ unsafe impl FromBytes for $newname {}
+ // SAFETY: This macro is only used with types where this is ok.
+ unsafe impl AsBytes for $newname {}
+
+ impl Deref for $newname {
+ type Target = $wrapped;
+ fn deref(&self) -> &Self::Target {
+ // SAFETY: We use `MaybeUninit` only to preserve padding. The value must still
+ // always be valid.
+ unsafe { self.0.assume_init_ref() }
+ }
+ }
+
+ impl DerefMut for $newname {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ // SAFETY: We use `MaybeUninit` only to preserve padding. The value must still
+ // always be valid.
+ unsafe { self.0.assume_init_mut() }
+ }
+ }
+
+ impl Default for $newname {
+ fn default() -> Self {
+ // Create a new value of this type where all bytes (including padding) are zeroed.
+ Self(MaybeUninit::zeroed())
+ }
+ }
+ };
+}
+
+decl_wrapper!(BinderNodeDebugInfo, uapi::binder_node_debug_info);
+decl_wrapper!(BinderNodeInfoForRef, uapi::binder_node_info_for_ref);
+decl_wrapper!(FlatBinderObject, uapi::flat_binder_object);
+decl_wrapper!(BinderFdObject, uapi::binder_fd_object);
+decl_wrapper!(BinderFdArrayObject, uapi::binder_fd_array_object);
+decl_wrapper!(BinderObjectHeader, uapi::binder_object_header);
+decl_wrapper!(BinderBufferObject, uapi::binder_buffer_object);
+decl_wrapper!(BinderTransactionData, uapi::binder_transaction_data);
+decl_wrapper!(
+ BinderTransactionDataSecctx,
+ uapi::binder_transaction_data_secctx
+);
+decl_wrapper!(BinderTransactionDataSg, uapi::binder_transaction_data_sg);
+decl_wrapper!(BinderWriteRead, uapi::binder_write_read);
+decl_wrapper!(BinderVersion, uapi::binder_version);
+decl_wrapper!(BinderFrozenStatusInfo, uapi::binder_frozen_status_info);
+decl_wrapper!(BinderFreezeInfo, uapi::binder_freeze_info);
+decl_wrapper!(BinderFrozenStateInfo, uapi::binder_frozen_state_info);
+decl_wrapper!(BinderHandleCookie, uapi::binder_handle_cookie);
+decl_wrapper!(ExtendedError, uapi::binder_extended_error);
+
+impl BinderVersion {
+ pub(crate) fn current() -> Self {
+ Self(MaybeUninit::new(uapi::binder_version {
+ protocol_version: BINDER_CURRENT_PROTOCOL_VERSION as _,
+ }))
+ }
+}
+
+impl BinderTransactionData {
+ pub(crate) fn with_buffers_size(self, buffers_size: u64) -> BinderTransactionDataSg {
+ BinderTransactionDataSg(MaybeUninit::new(uapi::binder_transaction_data_sg {
+ transaction_data: *self,
+ buffers_size,
+ }))
+ }
+}
+
+impl BinderTransactionDataSecctx {
+ /// View the inner data as wrapped in `BinderTransactionData`.
+ pub(crate) fn tr_data(&mut self) -> &mut BinderTransactionData {
+ // SAFETY: Transparent wrapper is safe to transmute.
+ unsafe {
+ &mut *(&mut self.transaction_data as *mut uapi::binder_transaction_data
+ as *mut BinderTransactionData)
+ }
+ }
+}
+
+impl ExtendedError {
+ pub(crate) fn new(id: u32, command: u32, param: i32) -> Self {
+ Self(MaybeUninit::new(uapi::binder_extended_error {
+ id,
+ command,
+ param,
+ }))
+ }
+}
diff --git a/drivers/android/binder/error.rs b/drivers/android/binder/error.rs
new file mode 100644
index 000000000000..9921827267d0
--- /dev/null
+++ b/drivers/android/binder/error.rs
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use kernel::prelude::*;
+
+use crate::defs::*;
+
+pub(crate) type BinderResult<T = ()> = core::result::Result<T, BinderError>;
+
+/// An error that will be returned to userspace via the `BINDER_WRITE_READ` ioctl rather than via
+/// errno.
+pub(crate) struct BinderError {
+ pub(crate) reply: u32,
+ source: Option<Error>,
+}
+
+impl BinderError {
+ pub(crate) fn new_dead() -> Self {
+ Self {
+ reply: BR_DEAD_REPLY,
+ source: None,
+ }
+ }
+
+ pub(crate) fn new_frozen() -> Self {
+ Self {
+ reply: BR_FROZEN_REPLY,
+ source: None,
+ }
+ }
+
+ pub(crate) fn new_frozen_oneway() -> Self {
+ Self {
+ reply: BR_TRANSACTION_PENDING_FROZEN,
+ source: None,
+ }
+ }
+
+ pub(crate) fn is_dead(&self) -> bool {
+ self.reply == BR_DEAD_REPLY
+ }
+
+ pub(crate) fn as_errno(&self) -> kernel::ffi::c_int {
+ self.source.unwrap_or(EINVAL).to_errno()
+ }
+
+ pub(crate) fn should_pr_warn(&self) -> bool {
+ self.source.is_some()
+ }
+}
+
+/// Convert an errno into a `BinderError` and store the errno used to construct it. The errno
+/// should be stored as the thread's extended error when given to userspace.
+impl From<Error> for BinderError {
+ fn from(source: Error) -> Self {
+ Self {
+ reply: BR_FAILED_REPLY,
+ source: Some(source),
+ }
+ }
+}
+
+impl From<kernel::fs::file::BadFdError> for BinderError {
+ fn from(source: kernel::fs::file::BadFdError) -> Self {
+ BinderError::from(Error::from(source))
+ }
+}
+
+impl From<kernel::alloc::AllocError> for BinderError {
+ fn from(_: kernel::alloc::AllocError) -> Self {
+ Self {
+ reply: BR_FAILED_REPLY,
+ source: Some(ENOMEM),
+ }
+ }
+}
+
+impl core::fmt::Debug for BinderError {
+ fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
+ match self.reply {
+ BR_FAILED_REPLY => match self.source.as_ref() {
+ Some(source) => f
+ .debug_struct("BR_FAILED_REPLY")
+ .field("source", source)
+ .finish(),
+ None => f.pad("BR_FAILED_REPLY"),
+ },
+ BR_DEAD_REPLY => f.pad("BR_DEAD_REPLY"),
+ BR_FROZEN_REPLY => f.pad("BR_FROZEN_REPLY"),
+ BR_TRANSACTION_PENDING_FROZEN => f.pad("BR_TRANSACTION_PENDING_FROZEN"),
+ BR_TRANSACTION_COMPLETE => f.pad("BR_TRANSACTION_COMPLETE"),
+ _ => f
+ .debug_struct("BinderError")
+ .field("reply", &self.reply)
+ .finish(),
+ }
+ }
+}
diff --git a/drivers/android/binder/freeze.rs b/drivers/android/binder/freeze.rs
new file mode 100644
index 000000000000..e68c3c8bc55a
--- /dev/null
+++ b/drivers/android/binder/freeze.rs
@@ -0,0 +1,388 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use kernel::{
+ alloc::AllocError,
+ list::ListArc,
+ prelude::*,
+ rbtree::{self, RBTreeNodeReservation},
+ seq_file::SeqFile,
+ seq_print,
+ sync::{Arc, UniqueArc},
+ uaccess::UserSliceReader,
+};
+
+use crate::{
+ defs::*, node::Node, process::Process, thread::Thread, BinderReturnWriter, DArc, DLArc,
+ DTRWrap, DeliverToRead,
+};
+
+#[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd)]
+pub(crate) struct FreezeCookie(u64);
+
+/// Represents a listener for changes to the frozen state of a process.
+pub(crate) struct FreezeListener {
+ /// The node we are listening for.
+ pub(crate) node: DArc<Node>,
+ /// The cookie of this freeze listener.
+ cookie: FreezeCookie,
+ /// What value of `is_frozen` did we most recently tell userspace about?
+ last_is_frozen: Option<bool>,
+ /// We sent a `BR_FROZEN_BINDER` and we are waiting for `BC_FREEZE_NOTIFICATION_DONE` before
+ /// sending any other commands.
+ is_pending: bool,
+ /// Userspace sent `BC_CLEAR_FREEZE_NOTIFICATION` and we need to reply with
+ /// `BR_CLEAR_FREEZE_NOTIFICATION_DONE` as soon as possible. If `is_pending` is set, then we
+ /// must wait for it to be unset before we can reply.
+ is_clearing: bool,
+ /// Number of cleared duplicates that can't be deleted until userspace sends
+ /// `BC_FREEZE_NOTIFICATION_DONE`.
+ num_pending_duplicates: u64,
+ /// Number of cleared duplicates that can be deleted.
+ num_cleared_duplicates: u64,
+}
+
+impl FreezeListener {
+ /// Is it okay to create a new listener with the same cookie as this one for the provided node?
+ ///
+ /// Under some scenarios, userspace may delete a freeze listener and immediately recreate it
+ /// with the same cookie. This results in duplicate listeners. To avoid issues with ambiguity,
+ /// we allow this only if the new listener is for the same node, and we also require that the
+ /// old listener has already been cleared.
+ fn allow_duplicate(&self, node: &DArc<Node>) -> bool {
+ Arc::ptr_eq(&self.node, node) && self.is_clearing
+ }
+}
+
+type UninitFM = UniqueArc<core::mem::MaybeUninit<DTRWrap<FreezeMessage>>>;
+
+/// Represents a notification that the freeze state has changed.
+pub(crate) struct FreezeMessage {
+ cookie: FreezeCookie,
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<0> for FreezeMessage {
+ untracked;
+ }
+}
+
+impl FreezeMessage {
+ fn new(flags: kernel::alloc::Flags) -> Result<UninitFM, AllocError> {
+ UniqueArc::new_uninit(flags)
+ }
+
+ fn init(ua: UninitFM, cookie: FreezeCookie) -> DLArc<FreezeMessage> {
+ match ua.pin_init_with(DTRWrap::new(FreezeMessage { cookie })) {
+ Ok(msg) => ListArc::from(msg),
+ Err(err) => match err {},
+ }
+ }
+}
+
+impl DeliverToRead for FreezeMessage {
+ fn do_work(
+ self: DArc<Self>,
+ thread: &Thread,
+ writer: &mut BinderReturnWriter<'_>,
+ ) -> Result<bool> {
+ let _removed_listener;
+ let mut node_refs = thread.process.node_refs.lock();
+ let Some(mut freeze_entry) = node_refs.freeze_listeners.find_mut(&self.cookie) else {
+ return Ok(true);
+ };
+ let freeze = freeze_entry.get_mut();
+
+ if freeze.num_cleared_duplicates > 0 {
+ freeze.num_cleared_duplicates -= 1;
+ drop(node_refs);
+ writer.write_code(BR_CLEAR_FREEZE_NOTIFICATION_DONE)?;
+ writer.write_payload(&self.cookie.0)?;
+ return Ok(true);
+ }
+
+ if freeze.is_pending {
+ return Ok(true);
+ }
+ if freeze.is_clearing {
+ _removed_listener = freeze_entry.remove_node();
+ drop(node_refs);
+ writer.write_code(BR_CLEAR_FREEZE_NOTIFICATION_DONE)?;
+ writer.write_payload(&self.cookie.0)?;
+ Ok(true)
+ } else {
+ let is_frozen = freeze.node.owner.inner.lock().is_frozen;
+ if freeze.last_is_frozen == Some(is_frozen) {
+ return Ok(true);
+ }
+
+ let mut state_info = BinderFrozenStateInfo::default();
+ state_info.is_frozen = is_frozen as u32;
+ state_info.cookie = freeze.cookie.0;
+ freeze.is_pending = true;
+ freeze.last_is_frozen = Some(is_frozen);
+ drop(node_refs);
+
+ writer.write_code(BR_FROZEN_BINDER)?;
+ writer.write_payload(&state_info)?;
+ // BR_FROZEN_BINDER notifications can cause transactions
+ Ok(false)
+ }
+ }
+
+ fn cancel(self: DArc<Self>) {}
+
+ fn should_sync_wakeup(&self) -> bool {
+ false
+ }
+
+ #[inline(never)]
+ fn debug_print(&self, m: &SeqFile, prefix: &str, _tprefix: &str) -> Result<()> {
+ seq_print!(m, "{}has frozen binder\n", prefix);
+ Ok(())
+ }
+}
+
+impl FreezeListener {
+ pub(crate) fn on_process_exit(&self, proc: &Arc<Process>) {
+ if !self.is_clearing {
+ self.node.remove_freeze_listener(proc);
+ }
+ }
+}
+
+impl Process {
+ pub(crate) fn request_freeze_notif(
+ self: &Arc<Self>,
+ reader: &mut UserSliceReader,
+ ) -> Result<()> {
+ let hc = reader.read::<BinderHandleCookie>()?;
+ let handle = hc.handle;
+ let cookie = FreezeCookie(hc.cookie);
+
+ let msg = FreezeMessage::new(GFP_KERNEL)?;
+ let alloc = RBTreeNodeReservation::new(GFP_KERNEL)?;
+
+ let mut node_refs_guard = self.node_refs.lock();
+ let node_refs = &mut *node_refs_guard;
+ let Some(info) = node_refs.by_handle.get_mut(&handle) else {
+ pr_warn!("BC_REQUEST_FREEZE_NOTIFICATION invalid ref {}\n", handle);
+ return Err(EINVAL);
+ };
+ if info.freeze().is_some() {
+ pr_warn!("BC_REQUEST_FREEZE_NOTIFICATION already set\n");
+ return Err(EINVAL);
+ }
+ let node_ref = info.node_ref();
+ let freeze_entry = node_refs.freeze_listeners.entry(cookie);
+
+ if let rbtree::Entry::Occupied(ref dupe) = freeze_entry {
+ if !dupe.get().allow_duplicate(&node_ref.node) {
+ pr_warn!("BC_REQUEST_FREEZE_NOTIFICATION duplicate cookie\n");
+ return Err(EINVAL);
+ }
+ }
+
+ // All failure paths must come before this call, and all modifications must come after this
+ // call.
+ node_ref.node.add_freeze_listener(self, GFP_KERNEL)?;
+
+ match freeze_entry {
+ rbtree::Entry::Vacant(entry) => {
+ entry.insert(
+ FreezeListener {
+ cookie,
+ node: node_ref.node.clone(),
+ last_is_frozen: None,
+ is_pending: false,
+ is_clearing: false,
+ num_pending_duplicates: 0,
+ num_cleared_duplicates: 0,
+ },
+ alloc,
+ );
+ }
+ rbtree::Entry::Occupied(mut dupe) => {
+ let dupe = dupe.get_mut();
+ if dupe.is_pending {
+ dupe.num_pending_duplicates += 1;
+ } else {
+ dupe.num_cleared_duplicates += 1;
+ }
+ dupe.last_is_frozen = None;
+ dupe.is_pending = false;
+ dupe.is_clearing = false;
+ }
+ }
+
+ *info.freeze() = Some(cookie);
+ let msg = FreezeMessage::init(msg, cookie);
+ drop(node_refs_guard);
+ let _ = self.push_work(msg);
+ Ok(())
+ }
+
+ pub(crate) fn freeze_notif_done(self: &Arc<Self>, reader: &mut UserSliceReader) -> Result<()> {
+ let cookie = FreezeCookie(reader.read()?);
+ let alloc = FreezeMessage::new(GFP_KERNEL)?;
+ let mut node_refs_guard = self.node_refs.lock();
+ let node_refs = &mut *node_refs_guard;
+ let Some(freeze) = node_refs.freeze_listeners.get_mut(&cookie) else {
+ pr_warn!("BC_FREEZE_NOTIFICATION_DONE {:016x} not found\n", cookie.0);
+ return Err(EINVAL);
+ };
+ let mut clear_msg = None;
+ if freeze.num_pending_duplicates > 0 {
+ clear_msg = Some(FreezeMessage::init(alloc, cookie));
+ freeze.num_pending_duplicates -= 1;
+ freeze.num_cleared_duplicates += 1;
+ } else {
+ if !freeze.is_pending {
+ pr_warn!(
+ "BC_FREEZE_NOTIFICATION_DONE {:016x} not pending\n",
+ cookie.0
+ );
+ return Err(EINVAL);
+ }
+ if freeze.is_clearing {
+ // Immediately send another FreezeMessage for BR_CLEAR_FREEZE_NOTIFICATION_DONE.
+ clear_msg = Some(FreezeMessage::init(alloc, cookie));
+ }
+ freeze.is_pending = false;
+ }
+ drop(node_refs_guard);
+ if let Some(clear_msg) = clear_msg {
+ let _ = self.push_work(clear_msg);
+ }
+ Ok(())
+ }
+
+ pub(crate) fn clear_freeze_notif(self: &Arc<Self>, reader: &mut UserSliceReader) -> Result<()> {
+ let hc = reader.read::<BinderHandleCookie>()?;
+ let handle = hc.handle;
+ let cookie = FreezeCookie(hc.cookie);
+
+ let alloc = FreezeMessage::new(GFP_KERNEL)?;
+ let mut node_refs_guard = self.node_refs.lock();
+ let node_refs = &mut *node_refs_guard;
+ let Some(info) = node_refs.by_handle.get_mut(&handle) else {
+ pr_warn!("BC_CLEAR_FREEZE_NOTIFICATION invalid ref {}\n", handle);
+ return Err(EINVAL);
+ };
+ let Some(info_cookie) = info.freeze() else {
+ pr_warn!("BC_CLEAR_FREEZE_NOTIFICATION freeze notification not active\n");
+ return Err(EINVAL);
+ };
+ if *info_cookie != cookie {
+ pr_warn!("BC_CLEAR_FREEZE_NOTIFICATION freeze notification cookie mismatch\n");
+ return Err(EINVAL);
+ }
+ let Some(listener) = node_refs.freeze_listeners.get_mut(&cookie) else {
+ pr_warn!("BC_CLEAR_FREEZE_NOTIFICATION invalid cookie {}\n", handle);
+ return Err(EINVAL);
+ };
+ listener.is_clearing = true;
+ listener.node.remove_freeze_listener(self);
+ *info.freeze() = None;
+ let mut msg = None;
+ if !listener.is_pending {
+ msg = Some(FreezeMessage::init(alloc, cookie));
+ }
+ drop(node_refs_guard);
+
+ if let Some(msg) = msg {
+ let _ = self.push_work(msg);
+ }
+ Ok(())
+ }
+
+ fn get_freeze_cookie(&self, node: &DArc<Node>) -> Option<FreezeCookie> {
+ let node_refs = &mut *self.node_refs.lock();
+ let handle = node_refs.by_node.get(&node.global_id())?;
+ let node_ref = node_refs.by_handle.get_mut(handle)?;
+ *node_ref.freeze()
+ }
+
+ /// Creates a vector of every freeze listener on this process.
+ ///
+ /// Returns pairs of the remote process listening for notifications and the local node it is
+ /// listening on.
+ #[expect(clippy::type_complexity)]
+ fn find_freeze_recipients(&self) -> Result<KVVec<(DArc<Node>, Arc<Process>)>, AllocError> {
+ // Defined before `inner` to drop after releasing spinlock if `push_within_capacity` fails.
+ let mut node_proc_pair;
+
+ // We pre-allocate space for up to 8 recipients before we take the spinlock. However, if
+ // the allocation fails, use a vector with a capacity of zero instead of failing. After
+ // all, there might not be any freeze listeners, in which case this operation could still
+ // succeed.
+ let mut recipients =
+ KVVec::with_capacity(8, GFP_KERNEL).unwrap_or_else(|_err| KVVec::new());
+
+ let mut inner = self.lock_with_nodes();
+ let mut curr = inner.nodes.cursor_front();
+ while let Some(cursor) = curr {
+ let (key, node) = cursor.current();
+ let key = *key;
+ let list = node.freeze_list(&inner.inner);
+ let len = list.len();
+
+ if recipients.spare_capacity_mut().len() < len {
+ drop(inner);
+ recipients.reserve(len, GFP_KERNEL)?;
+ inner = self.lock_with_nodes();
+ // Find the node we were looking at and try again. If the set of nodes was changed,
+ // then just proceed to the next node. This is ok because we don't guarantee the
+ // inclusion of nodes that are added or removed in parallel with this operation.
+ curr = inner.nodes.cursor_lower_bound(&key);
+ continue;
+ }
+
+ for proc in list {
+ node_proc_pair = (node.clone(), proc.clone());
+ recipients
+ .push_within_capacity(node_proc_pair)
+ .map_err(|_| {
+ pr_err!(
+ "push_within_capacity failed even though we checked the capacity\n"
+ );
+ AllocError
+ })?;
+ }
+
+ curr = cursor.move_next();
+ }
+ Ok(recipients)
+ }
+
+ /// Prepare allocations for sending freeze messages.
+ pub(crate) fn prepare_freeze_messages(&self) -> Result<FreezeMessages, AllocError> {
+ let recipients = self.find_freeze_recipients()?;
+ let mut batch = KVVec::with_capacity(recipients.len(), GFP_KERNEL)?;
+ for (node, proc) in recipients {
+ let Some(cookie) = proc.get_freeze_cookie(&node) else {
+ // If the freeze listener was removed in the meantime, just discard the
+ // notification.
+ continue;
+ };
+ let msg_alloc = FreezeMessage::new(GFP_KERNEL)?;
+ let msg = FreezeMessage::init(msg_alloc, cookie);
+ batch.push((proc, msg), GFP_KERNEL)?;
+ }
+
+ Ok(FreezeMessages { batch })
+ }
+}
+
+pub(crate) struct FreezeMessages {
+ batch: KVVec<(Arc<Process>, DLArc<FreezeMessage>)>,
+}
+
+impl FreezeMessages {
+ pub(crate) fn send_messages(self) {
+ for (proc, msg) in self.batch {
+ let _ = proc.push_work(msg);
+ }
+ }
+}
diff --git a/drivers/android/binder/node.rs b/drivers/android/binder/node.rs
new file mode 100644
index 000000000000..ade895ef791e
--- /dev/null
+++ b/drivers/android/binder/node.rs
@@ -0,0 +1,1131 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use kernel::{
+ list::{AtomicTracker, List, ListArc, ListLinks, TryNewListArc},
+ prelude::*,
+ seq_file::SeqFile,
+ seq_print,
+ sync::lock::{spinlock::SpinLockBackend, Guard},
+ sync::{Arc, LockedBy, SpinLock},
+};
+
+use crate::{
+ defs::*,
+ error::BinderError,
+ process::{NodeRefInfo, Process, ProcessInner},
+ thread::Thread,
+ transaction::Transaction,
+ BinderReturnWriter, DArc, DLArc, DTRWrap, DeliverToRead,
+};
+
+use core::mem;
+
+mod wrapper;
+pub(crate) use self::wrapper::CritIncrWrapper;
+
+#[derive(Debug)]
+pub(crate) struct CouldNotDeliverCriticalIncrement;
+
+/// Keeps track of how this node is scheduled.
+///
+/// There are two ways to schedule a node to a work list. Just schedule the node itself, or
+/// allocate a wrapper that references the node and schedule the wrapper. These wrappers exists to
+/// make it possible to "move" a node from one list to another - when `do_work` is called directly
+/// on the `Node`, then it's a no-op if there's also a pending wrapper.
+///
+/// Wrappers are generally only needed for zero-to-one refcount increments, and there are two cases
+/// of this: weak increments and strong increments. We call such increments "critical" because it
+/// is critical that they are delivered to the thread doing the increment. Some examples:
+///
+/// * One thread makes a zero-to-one strong increment, and another thread makes a zero-to-one weak
+/// increment. Delivering the node to the thread doing the weak increment is wrong, since the
+/// thread doing the strong increment may have ended a long time ago when the command is actually
+/// processed by userspace.
+///
+/// * We have a weak reference and are about to drop it on one thread. But then another thread does
+/// a zero-to-one strong increment. If the strong increment gets sent to the thread that was
+/// about to drop the weak reference, then the strong increment could be processed after the
+/// other thread has already exited, which would be too late.
+///
+/// Note that trying to create a `ListArc` to the node can succeed even if `has_normal_push` is
+/// set. This is because another thread might just have popped the node from a todo list, but not
+/// yet called `do_work`. However, if `has_normal_push` is false, then creating a `ListArc` should
+/// always succeed.
+///
+/// Like the other fields in `NodeInner`, the delivery state is protected by the process lock.
+struct DeliveryState {
+ /// Is the `Node` currently scheduled?
+ has_pushed_node: bool,
+
+ /// Is a wrapper currently scheduled?
+ ///
+ /// The wrapper is used only for strong zero2one increments.
+ has_pushed_wrapper: bool,
+
+ /// Is the currently scheduled `Node` scheduled due to a weak zero2one increment?
+ ///
+ /// Weak zero2one operations are always scheduled using the `Node`.
+ has_weak_zero2one: bool,
+
+ /// Is the currently scheduled wrapper/`Node` scheduled due to a strong zero2one increment?
+ ///
+ /// If `has_pushed_wrapper` is set, then the strong zero2one increment was scheduled using the
+ /// wrapper. Otherwise, `has_pushed_node` must be set and it was scheduled using the `Node`.
+ has_strong_zero2one: bool,
+}
+
+impl DeliveryState {
+ fn should_normal_push(&self) -> bool {
+ !self.has_pushed_node && !self.has_pushed_wrapper
+ }
+
+ fn did_normal_push(&mut self) {
+ assert!(self.should_normal_push());
+ self.has_pushed_node = true;
+ }
+
+ fn should_push_weak_zero2one(&self) -> bool {
+ !self.has_weak_zero2one && !self.has_strong_zero2one
+ }
+
+ fn can_push_weak_zero2one_normally(&self) -> bool {
+ !self.has_pushed_node
+ }
+
+ fn did_push_weak_zero2one(&mut self) {
+ assert!(self.should_push_weak_zero2one());
+ assert!(self.can_push_weak_zero2one_normally());
+ self.has_pushed_node = true;
+ self.has_weak_zero2one = true;
+ }
+
+ fn should_push_strong_zero2one(&self) -> bool {
+ !self.has_strong_zero2one
+ }
+
+ fn can_push_strong_zero2one_normally(&self) -> bool {
+ !self.has_pushed_node
+ }
+
+ fn did_push_strong_zero2one(&mut self) {
+ assert!(self.should_push_strong_zero2one());
+ assert!(self.can_push_strong_zero2one_normally());
+ self.has_pushed_node = true;
+ self.has_strong_zero2one = true;
+ }
+
+ fn did_push_strong_zero2one_wrapper(&mut self) {
+ assert!(self.should_push_strong_zero2one());
+ assert!(!self.can_push_strong_zero2one_normally());
+ self.has_pushed_wrapper = true;
+ self.has_strong_zero2one = true;
+ }
+}
+
+struct CountState {
+ /// The reference count.
+ count: usize,
+ /// Whether the process that owns this node thinks that we hold a refcount on it. (Note that
+ /// even if count is greater than one, we only increment it once in the owning process.)
+ has_count: bool,
+}
+
+impl CountState {
+ fn new() -> Self {
+ Self {
+ count: 0,
+ has_count: false,
+ }
+ }
+}
+
+struct NodeInner {
+ /// Strong refcounts held on this node by `NodeRef` objects.
+ strong: CountState,
+ /// Weak refcounts held on this node by `NodeRef` objects.
+ weak: CountState,
+ delivery_state: DeliveryState,
+ /// The binder driver guarantees that oneway transactions sent to the same node are serialized,
+ /// that is, userspace will not be given the next one until it has finished processing the
+ /// previous oneway transaction. This is done to avoid the case where two oneway transactions
+ /// arrive in opposite order from the order in which they were sent. (E.g., they could be
+ /// delivered to two different threads, which could appear as-if they were sent in opposite
+ /// order.)
+ ///
+ /// To fix that, we store pending oneway transactions in a separate list in the node, and don't
+ /// deliver the next oneway transaction until userspace signals that it has finished processing
+ /// the previous oneway transaction by calling the `BC_FREE_BUFFER` ioctl.
+ oneway_todo: List<DTRWrap<Transaction>>,
+ /// Keeps track of whether this node has a pending oneway transaction.
+ ///
+ /// When this is true, incoming oneway transactions are stored in `oneway_todo`, instead of
+ /// being delivered directly to the process.
+ has_oneway_transaction: bool,
+ /// List of processes to deliver a notification to when this node is destroyed (usually due to
+ /// the process dying).
+ death_list: List<DTRWrap<NodeDeath>, 1>,
+ /// List of processes to deliver freeze notifications to.
+ freeze_list: KVVec<Arc<Process>>,
+ /// The number of active BR_INCREFS or BR_ACQUIRE operations. (should be maximum two)
+ ///
+ /// If this is non-zero, then we postpone any BR_RELEASE or BR_DECREFS notifications until the
+ /// active operations have ended. This avoids the situation an increment and decrement get
+ /// reordered from userspace's perspective.
+ active_inc_refs: u8,
+ /// List of `NodeRefInfo` objects that reference this node.
+ refs: List<NodeRefInfo, { NodeRefInfo::LIST_NODE }>,
+}
+
+#[pin_data]
+pub(crate) struct Node {
+ pub(crate) debug_id: usize,
+ ptr: u64,
+ pub(crate) cookie: u64,
+ pub(crate) flags: u32,
+ pub(crate) owner: Arc<Process>,
+ inner: LockedBy<NodeInner, ProcessInner>,
+ #[pin]
+ links_track: AtomicTracker,
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<0> for Node {
+ tracked_by links_track: AtomicTracker;
+ }
+}
+
+// Make `oneway_todo` work.
+kernel::list::impl_list_item! {
+ impl ListItem<0> for DTRWrap<Transaction> {
+ using ListLinks { self.links.inner };
+ }
+}
+
+impl Node {
+ pub(crate) fn new(
+ ptr: u64,
+ cookie: u64,
+ flags: u32,
+ owner: Arc<Process>,
+ ) -> impl PinInit<Self> {
+ pin_init!(Self {
+ inner: LockedBy::new(
+ &owner.inner,
+ NodeInner {
+ strong: CountState::new(),
+ weak: CountState::new(),
+ delivery_state: DeliveryState {
+ has_pushed_node: false,
+ has_pushed_wrapper: false,
+ has_weak_zero2one: false,
+ has_strong_zero2one: false,
+ },
+ death_list: List::new(),
+ oneway_todo: List::new(),
+ freeze_list: KVVec::new(),
+ has_oneway_transaction: false,
+ active_inc_refs: 0,
+ refs: List::new(),
+ },
+ ),
+ debug_id: super::next_debug_id(),
+ ptr,
+ cookie,
+ flags,
+ owner,
+ links_track <- AtomicTracker::new(),
+ })
+ }
+
+ pub(crate) fn has_oneway_transaction(&self, owner_inner: &mut ProcessInner) -> bool {
+ let inner = self.inner.access_mut(owner_inner);
+ inner.has_oneway_transaction
+ }
+
+ #[inline(never)]
+ pub(crate) fn full_debug_print(
+ &self,
+ m: &SeqFile,
+ owner_inner: &mut ProcessInner,
+ ) -> Result<()> {
+ let inner = self.inner.access_mut(owner_inner);
+ seq_print!(
+ m,
+ " node {}: u{:016x} c{:016x} hs {} hw {} cs {} cw {}",
+ self.debug_id,
+ self.ptr,
+ self.cookie,
+ inner.strong.has_count,
+ inner.weak.has_count,
+ inner.strong.count,
+ inner.weak.count,
+ );
+ if !inner.refs.is_empty() {
+ seq_print!(m, " proc");
+ for node_ref in &inner.refs {
+ seq_print!(m, " {}", node_ref.process.task.pid());
+ }
+ }
+ seq_print!(m, "\n");
+ for t in &inner.oneway_todo {
+ t.debug_print_inner(m, " pending async transaction ");
+ }
+ Ok(())
+ }
+
+ /// Insert the `NodeRef` into this `refs` list.
+ ///
+ /// # Safety
+ ///
+ /// It must be the case that `info.node_ref.node` is this node.
+ pub(crate) unsafe fn insert_node_info(
+ &self,
+ info: ListArc<NodeRefInfo, { NodeRefInfo::LIST_NODE }>,
+ ) {
+ self.inner
+ .access_mut(&mut self.owner.inner.lock())
+ .refs
+ .push_front(info);
+ }
+
+ /// Insert the `NodeRef` into this `refs` list.
+ ///
+ /// # Safety
+ ///
+ /// It must be the case that `info.node_ref.node` is this node.
+ pub(crate) unsafe fn remove_node_info(
+ &self,
+ info: &NodeRefInfo,
+ ) -> Option<ListArc<NodeRefInfo, { NodeRefInfo::LIST_NODE }>> {
+ // SAFETY: We always insert `NodeRefInfo` objects into the `refs` list of the node that it
+ // references in `info.node_ref.node`. That is this node, so `info` cannot possibly be in
+ // the `refs` list of another node.
+ unsafe {
+ self.inner
+ .access_mut(&mut self.owner.inner.lock())
+ .refs
+ .remove(info)
+ }
+ }
+
+ /// An id that is unique across all binder nodes on the system. Used as the key in the
+ /// `by_node` map.
+ pub(crate) fn global_id(&self) -> usize {
+ self as *const Node as usize
+ }
+
+ pub(crate) fn get_id(&self) -> (u64, u64) {
+ (self.ptr, self.cookie)
+ }
+
+ pub(crate) fn add_death(
+ &self,
+ death: ListArc<DTRWrap<NodeDeath>, 1>,
+ guard: &mut Guard<'_, ProcessInner, SpinLockBackend>,
+ ) {
+ self.inner.access_mut(guard).death_list.push_back(death);
+ }
+
+ pub(crate) fn inc_ref_done_locked(
+ self: &DArc<Node>,
+ _strong: bool,
+ owner_inner: &mut ProcessInner,
+ ) -> Option<DLArc<Node>> {
+ let inner = self.inner.access_mut(owner_inner);
+ if inner.active_inc_refs == 0 {
+ pr_err!("inc_ref_done called when no active inc_refs");
+ return None;
+ }
+
+ inner.active_inc_refs -= 1;
+ if inner.active_inc_refs == 0 {
+ // Having active inc_refs can inhibit dropping of ref-counts. Calculate whether we
+ // would send a refcount decrement, and if so, tell the caller to schedule us.
+ let strong = inner.strong.count > 0;
+ let has_strong = inner.strong.has_count;
+ let weak = strong || inner.weak.count > 0;
+ let has_weak = inner.weak.has_count;
+
+ let should_drop_weak = !weak && has_weak;
+ let should_drop_strong = !strong && has_strong;
+
+ // If we want to drop the ref-count again, tell the caller to schedule a work node for
+ // that.
+ let need_push = should_drop_weak || should_drop_strong;
+
+ if need_push && inner.delivery_state.should_normal_push() {
+ let list_arc = ListArc::try_from_arc(self.clone()).ok().unwrap();
+ inner.delivery_state.did_normal_push();
+ Some(list_arc)
+ } else {
+ None
+ }
+ } else {
+ None
+ }
+ }
+
+ pub(crate) fn update_refcount_locked(
+ self: &DArc<Node>,
+ inc: bool,
+ strong: bool,
+ count: usize,
+ owner_inner: &mut ProcessInner,
+ ) -> Option<DLArc<Node>> {
+ let is_dead = owner_inner.is_dead;
+ let inner = self.inner.access_mut(owner_inner);
+
+ // Get a reference to the state we'll update.
+ let state = if strong {
+ &mut inner.strong
+ } else {
+ &mut inner.weak
+ };
+
+ // Update the count and determine whether we need to push work.
+ let need_push = if inc {
+ state.count += count;
+ // TODO: This method shouldn't be used for zero-to-one increments.
+ !is_dead && !state.has_count
+ } else {
+ if state.count < count {
+ pr_err!("Failure: refcount underflow!");
+ return None;
+ }
+ state.count -= count;
+ !is_dead && state.count == 0 && state.has_count
+ };
+
+ if need_push && inner.delivery_state.should_normal_push() {
+ let list_arc = ListArc::try_from_arc(self.clone()).ok().unwrap();
+ inner.delivery_state.did_normal_push();
+ Some(list_arc)
+ } else {
+ None
+ }
+ }
+
+ pub(crate) fn incr_refcount_allow_zero2one(
+ self: &DArc<Self>,
+ strong: bool,
+ owner_inner: &mut ProcessInner,
+ ) -> Result<Option<DLArc<Node>>, CouldNotDeliverCriticalIncrement> {
+ let is_dead = owner_inner.is_dead;
+ let inner = self.inner.access_mut(owner_inner);
+
+ // Get a reference to the state we'll update.
+ let state = if strong {
+ &mut inner.strong
+ } else {
+ &mut inner.weak
+ };
+
+ // Update the count and determine whether we need to push work.
+ state.count += 1;
+ if is_dead || state.has_count {
+ return Ok(None);
+ }
+
+ // Userspace needs to be notified of this.
+ if !strong && inner.delivery_state.should_push_weak_zero2one() {
+ assert!(inner.delivery_state.can_push_weak_zero2one_normally());
+ let list_arc = ListArc::try_from_arc(self.clone()).ok().unwrap();
+ inner.delivery_state.did_push_weak_zero2one();
+ Ok(Some(list_arc))
+ } else if strong && inner.delivery_state.should_push_strong_zero2one() {
+ if inner.delivery_state.can_push_strong_zero2one_normally() {
+ let list_arc = ListArc::try_from_arc(self.clone()).ok().unwrap();
+ inner.delivery_state.did_push_strong_zero2one();
+ Ok(Some(list_arc))
+ } else {
+ state.count -= 1;
+ Err(CouldNotDeliverCriticalIncrement)
+ }
+ } else {
+ // Work is already pushed, and we don't need to push again.
+ Ok(None)
+ }
+ }
+
+ pub(crate) fn incr_refcount_allow_zero2one_with_wrapper(
+ self: &DArc<Self>,
+ strong: bool,
+ wrapper: CritIncrWrapper,
+ owner_inner: &mut ProcessInner,
+ ) -> Option<DLArc<dyn DeliverToRead>> {
+ match self.incr_refcount_allow_zero2one(strong, owner_inner) {
+ Ok(Some(node)) => Some(node as _),
+ Ok(None) => None,
+ Err(CouldNotDeliverCriticalIncrement) => {
+ assert!(strong);
+ let inner = self.inner.access_mut(owner_inner);
+ inner.strong.count += 1;
+ inner.delivery_state.did_push_strong_zero2one_wrapper();
+ Some(wrapper.init(self.clone()))
+ }
+ }
+ }
+
+ pub(crate) fn update_refcount(self: &DArc<Self>, inc: bool, count: usize, strong: bool) {
+ self.owner
+ .inner
+ .lock()
+ .update_node_refcount(self, inc, strong, count, None);
+ }
+
+ pub(crate) fn populate_counts(
+ &self,
+ out: &mut BinderNodeInfoForRef,
+ guard: &Guard<'_, ProcessInner, SpinLockBackend>,
+ ) {
+ let inner = self.inner.access(guard);
+ out.strong_count = inner.strong.count as _;
+ out.weak_count = inner.weak.count as _;
+ }
+
+ pub(crate) fn populate_debug_info(
+ &self,
+ out: &mut BinderNodeDebugInfo,
+ guard: &Guard<'_, ProcessInner, SpinLockBackend>,
+ ) {
+ out.ptr = self.ptr as _;
+ out.cookie = self.cookie as _;
+ let inner = self.inner.access(guard);
+ if inner.strong.has_count {
+ out.has_strong_ref = 1;
+ }
+ if inner.weak.has_count {
+ out.has_weak_ref = 1;
+ }
+ }
+
+ pub(crate) fn force_has_count(&self, guard: &mut Guard<'_, ProcessInner, SpinLockBackend>) {
+ let inner = self.inner.access_mut(guard);
+ inner.strong.has_count = true;
+ inner.weak.has_count = true;
+ }
+
+ fn write(&self, writer: &mut BinderReturnWriter<'_>, code: u32) -> Result {
+ writer.write_code(code)?;
+ writer.write_payload(&self.ptr)?;
+ writer.write_payload(&self.cookie)?;
+ Ok(())
+ }
+
+ pub(crate) fn submit_oneway(
+ &self,
+ transaction: DLArc<Transaction>,
+ guard: &mut Guard<'_, ProcessInner, SpinLockBackend>,
+ ) -> Result<(), (BinderError, DLArc<dyn DeliverToRead>)> {
+ if guard.is_dead {
+ return Err((BinderError::new_dead(), transaction));
+ }
+
+ let inner = self.inner.access_mut(guard);
+ if inner.has_oneway_transaction {
+ inner.oneway_todo.push_back(transaction);
+ } else {
+ inner.has_oneway_transaction = true;
+ guard.push_work(transaction)?;
+ }
+ Ok(())
+ }
+
+ pub(crate) fn release(&self) {
+ let mut guard = self.owner.inner.lock();
+ while let Some(work) = self.inner.access_mut(&mut guard).oneway_todo.pop_front() {
+ drop(guard);
+ work.into_arc().cancel();
+ guard = self.owner.inner.lock();
+ }
+
+ let death_list = core::mem::take(&mut self.inner.access_mut(&mut guard).death_list);
+ drop(guard);
+ for death in death_list {
+ death.into_arc().set_dead();
+ }
+ }
+
+ pub(crate) fn pending_oneway_finished(&self) {
+ let mut guard = self.owner.inner.lock();
+ if guard.is_dead {
+ // Cleanup will happen in `Process::deferred_release`.
+ return;
+ }
+
+ let inner = self.inner.access_mut(&mut guard);
+
+ let transaction = inner.oneway_todo.pop_front();
+ inner.has_oneway_transaction = transaction.is_some();
+ if let Some(transaction) = transaction {
+ match guard.push_work(transaction) {
+ Ok(()) => {}
+ Err((_err, work)) => {
+ // Process is dead.
+ // This shouldn't happen due to the `is_dead` check, but if it does, just drop
+ // the transaction and return.
+ drop(guard);
+ drop(work);
+ }
+ }
+ }
+ }
+
+ /// Finds an outdated transaction that the given transaction can replace.
+ ///
+ /// If one is found, it is removed from the list and returned.
+ pub(crate) fn take_outdated_transaction(
+ &self,
+ new: &Transaction,
+ guard: &mut Guard<'_, ProcessInner, SpinLockBackend>,
+ ) -> Option<DLArc<Transaction>> {
+ let inner = self.inner.access_mut(guard);
+ let mut cursor = inner.oneway_todo.cursor_front();
+ while let Some(next) = cursor.peek_next() {
+ if new.can_replace(&next) {
+ return Some(next.remove());
+ }
+ cursor.move_next();
+ }
+ None
+ }
+
+ /// This is split into a separate function since it's called by both `Node::do_work` and
+ /// `NodeWrapper::do_work`.
+ fn do_work_locked(
+ &self,
+ writer: &mut BinderReturnWriter<'_>,
+ mut guard: Guard<'_, ProcessInner, SpinLockBackend>,
+ ) -> Result<bool> {
+ let inner = self.inner.access_mut(&mut guard);
+ let strong = inner.strong.count > 0;
+ let has_strong = inner.strong.has_count;
+ let weak = strong || inner.weak.count > 0;
+ let has_weak = inner.weak.has_count;
+
+ if weak && !has_weak {
+ inner.weak.has_count = true;
+ inner.active_inc_refs += 1;
+ }
+
+ if strong && !has_strong {
+ inner.strong.has_count = true;
+ inner.active_inc_refs += 1;
+ }
+
+ let no_active_inc_refs = inner.active_inc_refs == 0;
+ let should_drop_weak = no_active_inc_refs && (!weak && has_weak);
+ let should_drop_strong = no_active_inc_refs && (!strong && has_strong);
+ if should_drop_weak {
+ inner.weak.has_count = false;
+ }
+ if should_drop_strong {
+ inner.strong.has_count = false;
+ }
+ if no_active_inc_refs && !weak {
+ // Remove the node if there are no references to it.
+ guard.remove_node(self.ptr);
+ }
+ drop(guard);
+
+ if weak && !has_weak {
+ self.write(writer, BR_INCREFS)?;
+ }
+ if strong && !has_strong {
+ self.write(writer, BR_ACQUIRE)?;
+ }
+ if should_drop_strong {
+ self.write(writer, BR_RELEASE)?;
+ }
+ if should_drop_weak {
+ self.write(writer, BR_DECREFS)?;
+ }
+
+ Ok(true)
+ }
+
+ pub(crate) fn add_freeze_listener(
+ &self,
+ process: &Arc<Process>,
+ flags: kernel::alloc::Flags,
+ ) -> Result {
+ let mut vec_alloc = KVVec::<Arc<Process>>::new();
+ loop {
+ let mut guard = self.owner.inner.lock();
+ // Do not check for `guard.dead`. The `dead` flag that matters here is the owner of the
+ // listener, no the target.
+ let inner = self.inner.access_mut(&mut guard);
+ let len = inner.freeze_list.len();
+ if len >= inner.freeze_list.capacity() {
+ if len >= vec_alloc.capacity() {
+ drop(guard);
+ vec_alloc = KVVec::with_capacity((1 + len).next_power_of_two(), flags)?;
+ continue;
+ }
+ mem::swap(&mut inner.freeze_list, &mut vec_alloc);
+ for elem in vec_alloc.drain_all() {
+ inner.freeze_list.push_within_capacity(elem)?;
+ }
+ }
+ inner.freeze_list.push_within_capacity(process.clone())?;
+ return Ok(());
+ }
+ }
+
+ pub(crate) fn remove_freeze_listener(&self, p: &Arc<Process>) {
+ let _unused_capacity;
+ let mut guard = self.owner.inner.lock();
+ let inner = self.inner.access_mut(&mut guard);
+ let len = inner.freeze_list.len();
+ inner.freeze_list.retain(|proc| !Arc::ptr_eq(proc, p));
+ if len == inner.freeze_list.len() {
+ pr_warn!(
+ "Could not remove freeze listener for {}\n",
+ p.pid_in_current_ns()
+ );
+ }
+ if inner.freeze_list.is_empty() {
+ _unused_capacity = mem::replace(&mut inner.freeze_list, KVVec::new());
+ }
+ }
+
+ pub(crate) fn freeze_list<'a>(&'a self, guard: &'a ProcessInner) -> &'a [Arc<Process>] {
+ &self.inner.access(guard).freeze_list
+ }
+}
+
+impl DeliverToRead for Node {
+ fn do_work(
+ self: DArc<Self>,
+ _thread: &Thread,
+ writer: &mut BinderReturnWriter<'_>,
+ ) -> Result<bool> {
+ let mut owner_inner = self.owner.inner.lock();
+ let inner = self.inner.access_mut(&mut owner_inner);
+
+ assert!(inner.delivery_state.has_pushed_node);
+ if inner.delivery_state.has_pushed_wrapper {
+ // If the wrapper is scheduled, then we are either a normal push or weak zero2one
+ // increment, and the wrapper is a strong zero2one increment, so the wrapper always
+ // takes precedence over us.
+ assert!(inner.delivery_state.has_strong_zero2one);
+ inner.delivery_state.has_pushed_node = false;
+ inner.delivery_state.has_weak_zero2one = false;
+ return Ok(true);
+ }
+
+ inner.delivery_state.has_pushed_node = false;
+ inner.delivery_state.has_weak_zero2one = false;
+ inner.delivery_state.has_strong_zero2one = false;
+
+ self.do_work_locked(writer, owner_inner)
+ }
+
+ fn cancel(self: DArc<Self>) {}
+
+ fn should_sync_wakeup(&self) -> bool {
+ false
+ }
+
+ #[inline(never)]
+ fn debug_print(&self, m: &SeqFile, prefix: &str, _tprefix: &str) -> Result<()> {
+ seq_print!(
+ m,
+ "{}node work {}: u{:016x} c{:016x}\n",
+ prefix,
+ self.debug_id,
+ self.ptr,
+ self.cookie,
+ );
+ Ok(())
+ }
+}
+
+/// Represents something that holds one or more ref-counts to a `Node`.
+///
+/// Whenever process A holds a refcount to a node owned by a different process B, then process A
+/// will store a `NodeRef` that refers to the `Node` in process B. When process A releases the
+/// refcount, we destroy the NodeRef, which decrements the ref-count in process A.
+///
+/// This type is also used for some other cases. For example, a transaction allocation holds a
+/// refcount on the target node, and this is implemented by storing a `NodeRef` in the allocation
+/// so that the destructor of the allocation will drop a refcount of the `Node`.
+pub(crate) struct NodeRef {
+ pub(crate) node: DArc<Node>,
+ /// How many times does this NodeRef hold a refcount on the Node?
+ strong_node_count: usize,
+ weak_node_count: usize,
+ /// How many times does userspace hold a refcount on this NodeRef?
+ strong_count: usize,
+ weak_count: usize,
+}
+
+impl NodeRef {
+ pub(crate) fn new(node: DArc<Node>, strong_count: usize, weak_count: usize) -> Self {
+ Self {
+ node,
+ strong_node_count: strong_count,
+ weak_node_count: weak_count,
+ strong_count,
+ weak_count,
+ }
+ }
+
+ pub(crate) fn absorb(&mut self, mut other: Self) {
+ assert!(
+ Arc::ptr_eq(&self.node, &other.node),
+ "absorb called with differing nodes"
+ );
+ self.strong_node_count += other.strong_node_count;
+ self.weak_node_count += other.weak_node_count;
+ self.strong_count += other.strong_count;
+ self.weak_count += other.weak_count;
+ other.strong_count = 0;
+ other.weak_count = 0;
+ other.strong_node_count = 0;
+ other.weak_node_count = 0;
+
+ if self.strong_node_count >= 2 || self.weak_node_count >= 2 {
+ let mut guard = self.node.owner.inner.lock();
+ let inner = self.node.inner.access_mut(&mut guard);
+
+ if self.strong_node_count >= 2 {
+ inner.strong.count -= self.strong_node_count - 1;
+ self.strong_node_count = 1;
+ assert_ne!(inner.strong.count, 0);
+ }
+ if self.weak_node_count >= 2 {
+ inner.weak.count -= self.weak_node_count - 1;
+ self.weak_node_count = 1;
+ assert_ne!(inner.weak.count, 0);
+ }
+ }
+ }
+
+ pub(crate) fn get_count(&self) -> (usize, usize) {
+ (self.strong_count, self.weak_count)
+ }
+
+ pub(crate) fn clone(&self, strong: bool) -> Result<NodeRef> {
+ if strong && self.strong_count == 0 {
+ return Err(EINVAL);
+ }
+ Ok(self
+ .node
+ .owner
+ .inner
+ .lock()
+ .new_node_ref(self.node.clone(), strong, None))
+ }
+
+ /// Updates (increments or decrements) the number of references held against the node. If the
+ /// count being updated transitions from 0 to 1 or from 1 to 0, the node is notified by having
+ /// its `update_refcount` function called.
+ ///
+ /// Returns whether `self` should be removed (when both counts are zero).
+ pub(crate) fn update(&mut self, inc: bool, strong: bool) -> bool {
+ if strong && self.strong_count == 0 {
+ return false;
+ }
+ let (count, node_count, other_count) = if strong {
+ (
+ &mut self.strong_count,
+ &mut self.strong_node_count,
+ self.weak_count,
+ )
+ } else {
+ (
+ &mut self.weak_count,
+ &mut self.weak_node_count,
+ self.strong_count,
+ )
+ };
+ if inc {
+ if *count == 0 {
+ *node_count = 1;
+ self.node.update_refcount(true, 1, strong);
+ }
+ *count += 1;
+ } else {
+ if *count == 0 {
+ pr_warn!(
+ "pid {} performed invalid decrement on ref\n",
+ kernel::current!().pid()
+ );
+ return false;
+ }
+ *count -= 1;
+ if *count == 0 {
+ self.node.update_refcount(false, *node_count, strong);
+ *node_count = 0;
+ return other_count == 0;
+ }
+ }
+ false
+ }
+}
+
+impl Drop for NodeRef {
+ // This destructor is called conditionally from `Allocation::drop`. That branch is often
+ // mispredicted. Inlining this method call reduces the cost of those branch mispredictions.
+ #[inline(always)]
+ fn drop(&mut self) {
+ if self.strong_node_count > 0 {
+ self.node
+ .update_refcount(false, self.strong_node_count, true);
+ }
+ if self.weak_node_count > 0 {
+ self.node
+ .update_refcount(false, self.weak_node_count, false);
+ }
+ }
+}
+
+struct NodeDeathInner {
+ dead: bool,
+ cleared: bool,
+ notification_done: bool,
+ /// Indicates whether the normal flow was interrupted by removing the handle. In this case, we
+ /// need behave as if the death notification didn't exist (i.e., we don't deliver anything to
+ /// the user.
+ aborted: bool,
+}
+
+/// Used to deliver notifications when a process dies.
+///
+/// A process can request to be notified when a process dies using `BC_REQUEST_DEATH_NOTIFICATION`.
+/// This will make the driver send a `BR_DEAD_BINDER` to userspace when the process dies (or
+/// immediately if it is already dead). Userspace is supposed to respond with `BC_DEAD_BINDER_DONE`
+/// once it has processed the notification.
+///
+/// Userspace can unregister from death notifications using the `BC_CLEAR_DEATH_NOTIFICATION`
+/// command. In this case, the kernel will respond with `BR_CLEAR_DEATH_NOTIFICATION_DONE` once the
+/// notification has been removed. Note that if the remote process dies before the kernel has
+/// responded with `BR_CLEAR_DEATH_NOTIFICATION_DONE`, then the kernel will still send a
+/// `BR_DEAD_BINDER`, which userspace must be able to process. In this case, the kernel will wait
+/// for the `BC_DEAD_BINDER_DONE` command before it sends `BR_CLEAR_DEATH_NOTIFICATION_DONE`.
+///
+/// Note that even if the kernel sends a `BR_DEAD_BINDER`, this does not remove the death
+/// notification. Userspace must still remove it manually using `BC_CLEAR_DEATH_NOTIFICATION`.
+///
+/// If a process uses `BC_RELEASE` to destroy its last refcount on a node that has an active death
+/// registration, then the death registration is immediately deleted (we implement this using the
+/// `aborted` field). However, userspace is not supposed to delete a `NodeRef` without first
+/// deregistering death notifications, so this codepath is not executed under normal circumstances.
+#[pin_data]
+pub(crate) struct NodeDeath {
+ node: DArc<Node>,
+ process: Arc<Process>,
+ pub(crate) cookie: u64,
+ #[pin]
+ links_track: AtomicTracker<0>,
+ /// Used by the owner `Node` to store a list of registered death notifications.
+ ///
+ /// # Invariants
+ ///
+ /// Only ever used with the `death_list` list of `self.node`.
+ #[pin]
+ death_links: ListLinks<1>,
+ /// Used by the process to keep track of the death notifications for which we have sent a
+ /// `BR_DEAD_BINDER` but not yet received a `BC_DEAD_BINDER_DONE`.
+ ///
+ /// # Invariants
+ ///
+ /// Only ever used with the `delivered_deaths` list of `self.process`.
+ #[pin]
+ delivered_links: ListLinks<2>,
+ #[pin]
+ delivered_links_track: AtomicTracker<2>,
+ #[pin]
+ inner: SpinLock<NodeDeathInner>,
+}
+
+impl NodeDeath {
+ /// Constructs a new node death notification object.
+ pub(crate) fn new(
+ node: DArc<Node>,
+ process: Arc<Process>,
+ cookie: u64,
+ ) -> impl PinInit<DTRWrap<Self>> {
+ DTRWrap::new(pin_init!(
+ Self {
+ node,
+ process,
+ cookie,
+ links_track <- AtomicTracker::new(),
+ death_links <- ListLinks::new(),
+ delivered_links <- ListLinks::new(),
+ delivered_links_track <- AtomicTracker::new(),
+ inner <- kernel::new_spinlock!(NodeDeathInner {
+ dead: false,
+ cleared: false,
+ notification_done: false,
+ aborted: false,
+ }, "NodeDeath::inner"),
+ }
+ ))
+ }
+
+ /// Sets the cleared flag to `true`.
+ ///
+ /// It removes `self` from the node's death notification list if needed.
+ ///
+ /// Returns whether it needs to be queued.
+ pub(crate) fn set_cleared(self: &DArc<Self>, abort: bool) -> bool {
+ let (needs_removal, needs_queueing) = {
+ // Update state and determine if we need to queue a work item. We only need to do it
+ // when the node is not dead or if the user already completed the death notification.
+ let mut inner = self.inner.lock();
+ if abort {
+ inner.aborted = true;
+ }
+ if inner.cleared {
+ // Already cleared.
+ return false;
+ }
+ inner.cleared = true;
+ (!inner.dead, !inner.dead || inner.notification_done)
+ };
+
+ // Remove death notification from node.
+ if needs_removal {
+ let mut owner_inner = self.node.owner.inner.lock();
+ let node_inner = self.node.inner.access_mut(&mut owner_inner);
+ // SAFETY: A `NodeDeath` is never inserted into the death list of any node other than
+ // its owner, so it is either in this death list or in no death list.
+ unsafe { node_inner.death_list.remove(self) };
+ }
+ needs_queueing
+ }
+
+ /// Sets the 'notification done' flag to `true`.
+ pub(crate) fn set_notification_done(self: DArc<Self>, thread: &Thread) {
+ let needs_queueing = {
+ let mut inner = self.inner.lock();
+ inner.notification_done = true;
+ inner.cleared
+ };
+ if needs_queueing {
+ if let Some(death) = ListArc::try_from_arc_or_drop(self) {
+ let _ = thread.push_work_if_looper(death);
+ }
+ }
+ }
+
+ /// Sets the 'dead' flag to `true` and queues work item if needed.
+ pub(crate) fn set_dead(self: DArc<Self>) {
+ let needs_queueing = {
+ let mut inner = self.inner.lock();
+ if inner.cleared {
+ false
+ } else {
+ inner.dead = true;
+ true
+ }
+ };
+ if needs_queueing {
+ // Push the death notification to the target process. There is nothing else to do if
+ // it's already dead.
+ if let Some(death) = ListArc::try_from_arc_or_drop(self) {
+ let process = death.process.clone();
+ let _ = process.push_work(death);
+ }
+ }
+ }
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<0> for NodeDeath {
+ tracked_by links_track: AtomicTracker;
+ }
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<1> for DTRWrap<NodeDeath> { untracked; }
+}
+kernel::list::impl_list_item! {
+ impl ListItem<1> for DTRWrap<NodeDeath> {
+ using ListLinks { self.wrapped.death_links };
+ }
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<2> for DTRWrap<NodeDeath> {
+ tracked_by wrapped: NodeDeath;
+ }
+}
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<2> for NodeDeath {
+ tracked_by delivered_links_track: AtomicTracker<2>;
+ }
+}
+kernel::list::impl_list_item! {
+ impl ListItem<2> for DTRWrap<NodeDeath> {
+ using ListLinks { self.wrapped.delivered_links };
+ }
+}
+
+impl DeliverToRead for NodeDeath {
+ fn do_work(
+ self: DArc<Self>,
+ _thread: &Thread,
+ writer: &mut BinderReturnWriter<'_>,
+ ) -> Result<bool> {
+ let done = {
+ let inner = self.inner.lock();
+ if inner.aborted {
+ return Ok(true);
+ }
+ inner.cleared && (!inner.dead || inner.notification_done)
+ };
+
+ let cookie = self.cookie;
+ let cmd = if done {
+ BR_CLEAR_DEATH_NOTIFICATION_DONE
+ } else {
+ let process = self.process.clone();
+ let mut process_inner = process.inner.lock();
+ let inner = self.inner.lock();
+ if inner.aborted {
+ return Ok(true);
+ }
+ // We're still holding the inner lock, so it cannot be aborted while we insert it into
+ // the delivered list.
+ process_inner.death_delivered(self.clone());
+ BR_DEAD_BINDER
+ };
+
+ writer.write_code(cmd)?;
+ writer.write_payload(&cookie)?;
+ // DEAD_BINDER notifications can cause transactions, so stop processing work items when we
+ // get to a death notification.
+ Ok(cmd != BR_DEAD_BINDER)
+ }
+
+ fn cancel(self: DArc<Self>) {}
+
+ fn should_sync_wakeup(&self) -> bool {
+ false
+ }
+
+ #[inline(never)]
+ fn debug_print(&self, m: &SeqFile, prefix: &str, _tprefix: &str) -> Result<()> {
+ let inner = self.inner.lock();
+
+ let dead_binder = inner.dead && !inner.notification_done;
+
+ if dead_binder {
+ if inner.cleared {
+ seq_print!(m, "{}has cleared dead binder\n", prefix);
+ } else {
+ seq_print!(m, "{}has dead binder\n", prefix);
+ }
+ } else {
+ seq_print!(m, "{}has cleared death notification\n", prefix);
+ }
+
+ Ok(())
+ }
+}
diff --git a/drivers/android/binder/node/wrapper.rs b/drivers/android/binder/node/wrapper.rs
new file mode 100644
index 000000000000..43294c050502
--- /dev/null
+++ b/drivers/android/binder/node/wrapper.rs
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use kernel::{list::ListArc, prelude::*, seq_file::SeqFile, seq_print, sync::UniqueArc};
+
+use crate::{node::Node, thread::Thread, BinderReturnWriter, DArc, DLArc, DTRWrap, DeliverToRead};
+
+use core::mem::MaybeUninit;
+
+pub(crate) struct CritIncrWrapper {
+ inner: UniqueArc<MaybeUninit<DTRWrap<NodeWrapper>>>,
+}
+
+impl CritIncrWrapper {
+ pub(crate) fn new() -> Result<Self> {
+ Ok(CritIncrWrapper {
+ inner: UniqueArc::new_uninit(GFP_KERNEL)?,
+ })
+ }
+
+ pub(super) fn init(self, node: DArc<Node>) -> DLArc<dyn DeliverToRead> {
+ match self.inner.pin_init_with(DTRWrap::new(NodeWrapper { node })) {
+ Ok(initialized) => ListArc::from(initialized) as _,
+ Err(err) => match err {},
+ }
+ }
+}
+
+struct NodeWrapper {
+ node: DArc<Node>,
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<0> for NodeWrapper {
+ untracked;
+ }
+}
+
+impl DeliverToRead for NodeWrapper {
+ fn do_work(
+ self: DArc<Self>,
+ _thread: &Thread,
+ writer: &mut BinderReturnWriter<'_>,
+ ) -> Result<bool> {
+ let node = &self.node;
+ let mut owner_inner = node.owner.inner.lock();
+ let inner = node.inner.access_mut(&mut owner_inner);
+
+ let ds = &mut inner.delivery_state;
+
+ assert!(ds.has_pushed_wrapper);
+ assert!(ds.has_strong_zero2one);
+ ds.has_pushed_wrapper = false;
+ ds.has_strong_zero2one = false;
+
+ node.do_work_locked(writer, owner_inner)
+ }
+
+ fn cancel(self: DArc<Self>) {}
+
+ fn should_sync_wakeup(&self) -> bool {
+ false
+ }
+
+ #[inline(never)]
+ fn debug_print(&self, m: &SeqFile, prefix: &str, _tprefix: &str) -> Result<()> {
+ seq_print!(
+ m,
+ "{}node work {}: u{:016x} c{:016x}\n",
+ prefix,
+ self.node.debug_id,
+ self.node.ptr,
+ self.node.cookie,
+ );
+ Ok(())
+ }
+}
diff --git a/drivers/android/binder/page_range.rs b/drivers/android/binder/page_range.rs
new file mode 100644
index 000000000000..9379038f61f5
--- /dev/null
+++ b/drivers/android/binder/page_range.rs
@@ -0,0 +1,734 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+//! This module has utilities for managing a page range where unused pages may be reclaimed by a
+//! vma shrinker.
+
+// To avoid deadlocks, locks are taken in the order:
+//
+// 1. mmap lock
+// 2. spinlock
+// 3. lru spinlock
+//
+// The shrinker will use trylock methods because it locks them in a different order.
+
+use core::{
+ marker::PhantomPinned,
+ mem::{size_of, size_of_val, MaybeUninit},
+ ptr,
+};
+
+use kernel::{
+ bindings,
+ error::Result,
+ ffi::{c_ulong, c_void},
+ mm::{virt, Mm, MmWithUser},
+ new_mutex, new_spinlock,
+ page::{Page, PAGE_SHIFT, PAGE_SIZE},
+ prelude::*,
+ str::CStr,
+ sync::{aref::ARef, Mutex, SpinLock},
+ task::Pid,
+ transmute::FromBytes,
+ types::Opaque,
+ uaccess::UserSliceReader,
+};
+
+/// Represents a shrinker that can be registered with the kernel.
+///
+/// Each shrinker can be used by many `ShrinkablePageRange` objects.
+#[repr(C)]
+pub(crate) struct Shrinker {
+ inner: Opaque<*mut bindings::shrinker>,
+ list_lru: Opaque<bindings::list_lru>,
+}
+
+// SAFETY: The shrinker and list_lru are thread safe.
+unsafe impl Send for Shrinker {}
+// SAFETY: The shrinker and list_lru are thread safe.
+unsafe impl Sync for Shrinker {}
+
+impl Shrinker {
+ /// Create a new shrinker.
+ ///
+ /// # Safety
+ ///
+ /// Before using this shrinker with a `ShrinkablePageRange`, the `register` method must have
+ /// been called exactly once, and it must not have returned an error.
+ pub(crate) const unsafe fn new() -> Self {
+ Self {
+ inner: Opaque::uninit(),
+ list_lru: Opaque::uninit(),
+ }
+ }
+
+ /// Register this shrinker with the kernel.
+ pub(crate) fn register(&'static self, name: &CStr) -> Result<()> {
+ // SAFETY: These fields are not yet used, so it's okay to zero them.
+ unsafe {
+ self.inner.get().write(ptr::null_mut());
+ self.list_lru.get().write_bytes(0, 1);
+ }
+
+ // SAFETY: The field is not yet used, so we can initialize it.
+ let ret = unsafe { bindings::__list_lru_init(self.list_lru.get(), false, ptr::null_mut()) };
+ if ret != 0 {
+ return Err(Error::from_errno(ret));
+ }
+
+ // SAFETY: The `name` points at a valid c string.
+ let shrinker = unsafe { bindings::shrinker_alloc(0, name.as_char_ptr()) };
+ if shrinker.is_null() {
+ // SAFETY: We initialized it, so its okay to destroy it.
+ unsafe { bindings::list_lru_destroy(self.list_lru.get()) };
+ return Err(Error::from_errno(ret));
+ }
+
+ // SAFETY: We're about to register the shrinker, and these are the fields we need to
+ // initialize. (All other fields are already zeroed.)
+ unsafe {
+ (&raw mut (*shrinker).count_objects).write(Some(rust_shrink_count));
+ (&raw mut (*shrinker).scan_objects).write(Some(rust_shrink_scan));
+ (&raw mut (*shrinker).private_data).write(self.list_lru.get().cast());
+ }
+
+ // SAFETY: The new shrinker has been fully initialized, so we can register it.
+ unsafe { bindings::shrinker_register(shrinker) };
+
+ // SAFETY: This initializes the pointer to the shrinker so that we can use it.
+ unsafe { self.inner.get().write(shrinker) };
+
+ Ok(())
+ }
+}
+
+/// A container that manages a page range in a vma.
+///
+/// The pages can be thought of as an array of booleans of whether the pages are usable. The
+/// methods `use_range` and `stop_using_range` set all booleans in a range to true or false
+/// respectively. Initially, no pages are allocated. When a page is not used, it is not freed
+/// immediately. Instead, it is made available to the memory shrinker to free it if the device is
+/// under memory pressure.
+///
+/// It's okay for `use_range` and `stop_using_range` to race with each other, although there's no
+/// way to know whether an index ends up with true or false if a call to `use_range` races with
+/// another call to `stop_using_range` on a given index.
+///
+/// It's also okay for the two methods to race with themselves, e.g. if two threads call
+/// `use_range` on the same index, then that's fine and neither call will return until the page is
+/// allocated and mapped.
+///
+/// The methods that read or write to a range require that the page is marked as in use. So it is
+/// _not_ okay to call `stop_using_range` on a page that is in use by the methods that read or
+/// write to the page.
+#[pin_data(PinnedDrop)]
+pub(crate) struct ShrinkablePageRange {
+ /// Shrinker object registered with the kernel.
+ shrinker: &'static Shrinker,
+ /// Pid using this page range. Only used as debugging information.
+ pid: Pid,
+ /// The mm for the relevant process.
+ mm: ARef<Mm>,
+ /// Used to synchronize calls to `vm_insert_page` and `zap_page_range_single`.
+ #[pin]
+ mm_lock: Mutex<()>,
+ /// Spinlock protecting changes to pages.
+ #[pin]
+ lock: SpinLock<Inner>,
+
+ /// Must not move, since page info has pointers back.
+ #[pin]
+ _pin: PhantomPinned,
+}
+
+struct Inner {
+ /// Array of pages.
+ ///
+ /// Since this is also accessed by the shrinker, we can't use a `Box`, which asserts exclusive
+ /// ownership. To deal with that, we manage it using raw pointers.
+ pages: *mut PageInfo,
+ /// Length of the `pages` array.
+ size: usize,
+ /// The address of the vma to insert the pages into.
+ vma_addr: usize,
+}
+
+// SAFETY: proper locking is in place for `Inner`
+unsafe impl Send for Inner {}
+
+type StableMmGuard =
+ kernel::sync::lock::Guard<'static, (), kernel::sync::lock::mutex::MutexBackend>;
+
+/// An array element that describes the current state of a page.
+///
+/// There are three states:
+///
+/// * Free. The page is None. The `lru` element is not queued.
+/// * Available. The page is Some. The `lru` element is queued to the shrinker's lru.
+/// * Used. The page is Some. The `lru` element is not queued.
+///
+/// When an element is available, the shrinker is able to free the page.
+#[repr(C)]
+struct PageInfo {
+ lru: bindings::list_head,
+ page: Option<Page>,
+ range: *const ShrinkablePageRange,
+}
+
+impl PageInfo {
+ /// # Safety
+ ///
+ /// The caller ensures that writing to `me.page` is ok, and that the page is not currently set.
+ unsafe fn set_page(me: *mut PageInfo, page: Page) {
+ // SAFETY: This pointer offset is in bounds.
+ let ptr = unsafe { &raw mut (*me).page };
+
+ // SAFETY: The pointer is valid for writing, so also valid for reading.
+ if unsafe { (*ptr).is_some() } {
+ pr_err!("set_page called when there is already a page");
+ // SAFETY: We will initialize the page again below.
+ unsafe { ptr::drop_in_place(ptr) };
+ }
+
+ // SAFETY: The pointer is valid for writing.
+ unsafe { ptr::write(ptr, Some(page)) };
+ }
+
+ /// # Safety
+ ///
+ /// The caller ensures that reading from `me.page` is ok for the duration of 'a.
+ unsafe fn get_page<'a>(me: *const PageInfo) -> Option<&'a Page> {
+ // SAFETY: This pointer offset is in bounds.
+ let ptr = unsafe { &raw const (*me).page };
+
+ // SAFETY: The pointer is valid for reading.
+ unsafe { (*ptr).as_ref() }
+ }
+
+ /// # Safety
+ ///
+ /// The caller ensures that writing to `me.page` is ok for the duration of 'a.
+ unsafe fn take_page(me: *mut PageInfo) -> Option<Page> {
+ // SAFETY: This pointer offset is in bounds.
+ let ptr = unsafe { &raw mut (*me).page };
+
+ // SAFETY: The pointer is valid for reading.
+ unsafe { (*ptr).take() }
+ }
+
+ /// Add this page to the lru list, if not already in the list.
+ ///
+ /// # Safety
+ ///
+ /// The pointer must be valid, and it must be the right shrinker and nid.
+ unsafe fn list_lru_add(me: *mut PageInfo, nid: i32, shrinker: &'static Shrinker) {
+ // SAFETY: This pointer offset is in bounds.
+ let lru_ptr = unsafe { &raw mut (*me).lru };
+ // SAFETY: The lru pointer is valid, and we're not using it with any other lru list.
+ unsafe { bindings::list_lru_add(shrinker.list_lru.get(), lru_ptr, nid, ptr::null_mut()) };
+ }
+
+ /// Remove this page from the lru list, if it is in the list.
+ ///
+ /// # Safety
+ ///
+ /// The pointer must be valid, and it must be the right shrinker and nid.
+ unsafe fn list_lru_del(me: *mut PageInfo, nid: i32, shrinker: &'static Shrinker) {
+ // SAFETY: This pointer offset is in bounds.
+ let lru_ptr = unsafe { &raw mut (*me).lru };
+ // SAFETY: The lru pointer is valid, and we're not using it with any other lru list.
+ unsafe { bindings::list_lru_del(shrinker.list_lru.get(), lru_ptr, nid, ptr::null_mut()) };
+ }
+}
+
+impl ShrinkablePageRange {
+ /// Create a new `ShrinkablePageRange` using the given shrinker.
+ pub(crate) fn new(shrinker: &'static Shrinker) -> impl PinInit<Self, Error> {
+ try_pin_init!(Self {
+ shrinker,
+ pid: kernel::current!().pid(),
+ mm: ARef::from(&**kernel::current!().mm().ok_or(ESRCH)?),
+ mm_lock <- new_mutex!((), "ShrinkablePageRange::mm"),
+ lock <- new_spinlock!(Inner {
+ pages: ptr::null_mut(),
+ size: 0,
+ vma_addr: 0,
+ }, "ShrinkablePageRange"),
+ _pin: PhantomPinned,
+ })
+ }
+
+ pub(crate) fn stable_trylock_mm(&self) -> Option<StableMmGuard> {
+ // SAFETY: This extends the duration of the reference. Since this call happens before
+ // `mm_lock` is taken in the destructor of `ShrinkablePageRange`, the destructor will block
+ // until the returned guard is dropped. This ensures that the guard is valid until dropped.
+ let mm_lock = unsafe { &*ptr::from_ref(&self.mm_lock) };
+
+ mm_lock.try_lock()
+ }
+
+ /// Register a vma with this page range. Returns the size of the region.
+ pub(crate) fn register_with_vma(&self, vma: &virt::VmaNew) -> Result<usize> {
+ let num_bytes = usize::min(vma.end() - vma.start(), bindings::SZ_4M as usize);
+ let num_pages = num_bytes >> PAGE_SHIFT;
+
+ if !ptr::eq::<Mm>(&*self.mm, &**vma.mm()) {
+ pr_debug!("Failed to register with vma: invalid vma->vm_mm");
+ return Err(EINVAL);
+ }
+ if num_pages == 0 {
+ pr_debug!("Failed to register with vma: size zero");
+ return Err(EINVAL);
+ }
+
+ let mut pages = KVVec::<PageInfo>::with_capacity(num_pages, GFP_KERNEL)?;
+
+ // SAFETY: This just initializes the pages array.
+ unsafe {
+ let self_ptr = self as *const ShrinkablePageRange;
+ for i in 0..num_pages {
+ let info = pages.as_mut_ptr().add(i);
+ (&raw mut (*info).range).write(self_ptr);
+ (&raw mut (*info).page).write(None);
+ let lru = &raw mut (*info).lru;
+ (&raw mut (*lru).next).write(lru);
+ (&raw mut (*lru).prev).write(lru);
+ }
+ }
+
+ let mut inner = self.lock.lock();
+ if inner.size > 0 {
+ pr_debug!("Failed to register with vma: already registered");
+ drop(inner);
+ return Err(EBUSY);
+ }
+
+ inner.pages = pages.into_raw_parts().0;
+ inner.size = num_pages;
+ inner.vma_addr = vma.start();
+
+ Ok(num_pages)
+ }
+
+ /// Make sure that the given pages are allocated and mapped.
+ ///
+ /// Must not be called from an atomic context.
+ pub(crate) fn use_range(&self, start: usize, end: usize) -> Result<()> {
+ if start >= end {
+ return Ok(());
+ }
+ let mut inner = self.lock.lock();
+ assert!(end <= inner.size);
+
+ for i in start..end {
+ // SAFETY: This pointer offset is in bounds.
+ let page_info = unsafe { inner.pages.add(i) };
+
+ // SAFETY: The pointer is valid, and we hold the lock so reading from the page is okay.
+ if let Some(page) = unsafe { PageInfo::get_page(page_info) } {
+ // Since we're going to use the page, we should remove it from the lru list so that
+ // the shrinker will not free it.
+ //
+ // SAFETY: The pointer is valid, and this is the right shrinker.
+ //
+ // The shrinker can't free the page between the check and this call to
+ // `list_lru_del` because we hold the lock.
+ unsafe { PageInfo::list_lru_del(page_info, page.nid(), self.shrinker) };
+ } else {
+ // We have to allocate a new page. Use the slow path.
+ drop(inner);
+ // SAFETY: `i < end <= inner.size` so `i` is in bounds.
+ match unsafe { self.use_page_slow(i) } {
+ Ok(()) => {}
+ Err(err) => {
+ pr_warn!("Error in use_page_slow: {:?}", err);
+ return Err(err);
+ }
+ }
+ inner = self.lock.lock();
+ }
+ }
+ Ok(())
+ }
+
+ /// Mark the given page as in use, slow path.
+ ///
+ /// Must not be called from an atomic context.
+ ///
+ /// # Safety
+ ///
+ /// Assumes that `i` is in bounds.
+ #[cold]
+ unsafe fn use_page_slow(&self, i: usize) -> Result<()> {
+ let new_page = Page::alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO)?;
+
+ let mm_mutex = self.mm_lock.lock();
+ let inner = self.lock.lock();
+
+ // SAFETY: This pointer offset is in bounds.
+ let page_info = unsafe { inner.pages.add(i) };
+
+ // SAFETY: The pointer is valid, and we hold the lock so reading from the page is okay.
+ if let Some(page) = unsafe { PageInfo::get_page(page_info) } {
+ // The page was already there, or someone else added the page while we didn't hold the
+ // spinlock.
+ //
+ // SAFETY: The pointer is valid, and this is the right shrinker.
+ //
+ // The shrinker can't free the page between the check and this call to
+ // `list_lru_del` because we hold the lock.
+ unsafe { PageInfo::list_lru_del(page_info, page.nid(), self.shrinker) };
+ return Ok(());
+ }
+
+ let vma_addr = inner.vma_addr;
+ // Release the spinlock while we insert the page into the vma.
+ drop(inner);
+
+ // No overflow since we stay in bounds of the vma.
+ let user_page_addr = vma_addr + (i << PAGE_SHIFT);
+
+ // We use `mmput_async` when dropping the `mm` because `use_page_slow` is usually used from
+ // a remote process. If the call to `mmput` races with the process shutting down, then the
+ // caller of `use_page_slow` becomes responsible for cleaning up the `mm`, which doesn't
+ // happen until it returns to userspace. However, the caller might instead go to sleep and
+ // wait for the owner of the `mm` to wake it up, which doesn't happen because it's in the
+ // middle of a shutdown process that won't complete until the `mm` is dropped. This can
+ // amount to a deadlock.
+ //
+ // Using `mmput_async` avoids this, because then the `mm` cleanup is instead queued to a
+ // workqueue.
+ MmWithUser::into_mmput_async(self.mm.mmget_not_zero().ok_or(ESRCH)?)
+ .mmap_read_lock()
+ .vma_lookup(vma_addr)
+ .ok_or(ESRCH)?
+ .as_mixedmap_vma()
+ .ok_or(ESRCH)?
+ .vm_insert_page(user_page_addr, &new_page)
+ .inspect_err(|err| {
+ pr_warn!(
+ "Failed to vm_insert_page({}): vma_addr:{} i:{} err:{:?}",
+ user_page_addr,
+ vma_addr,
+ i,
+ err
+ )
+ })?;
+
+ let inner = self.lock.lock();
+
+ // SAFETY: The `page_info` pointer is valid and currently does not have a page. The page
+ // can be written to since we hold the lock.
+ //
+ // We released and reacquired the spinlock since we checked that the page is null, but we
+ // always hold the mm_lock mutex when setting the page to a non-null value, so it's not
+ // possible for someone else to have changed it since our check.
+ unsafe { PageInfo::set_page(page_info, new_page) };
+
+ drop(inner);
+ drop(mm_mutex);
+
+ Ok(())
+ }
+
+ /// If the given page is in use, then mark it as available so that the shrinker can free it.
+ ///
+ /// May be called from an atomic context.
+ pub(crate) fn stop_using_range(&self, start: usize, end: usize) {
+ if start >= end {
+ return;
+ }
+ let inner = self.lock.lock();
+ assert!(end <= inner.size);
+
+ for i in (start..end).rev() {
+ // SAFETY: The pointer is in bounds.
+ let page_info = unsafe { inner.pages.add(i) };
+
+ // SAFETY: Okay for reading since we have the lock.
+ if let Some(page) = unsafe { PageInfo::get_page(page_info) } {
+ // SAFETY: The pointer is valid, and it's the right shrinker.
+ unsafe { PageInfo::list_lru_add(page_info, page.nid(), self.shrinker) };
+ }
+ }
+ }
+
+ /// Helper for reading or writing to a range of bytes that may overlap with several pages.
+ ///
+ /// # Safety
+ ///
+ /// All pages touched by this operation must be in use for the duration of this call.
+ unsafe fn iterate<T>(&self, mut offset: usize, mut size: usize, mut cb: T) -> Result
+ where
+ T: FnMut(&Page, usize, usize) -> Result,
+ {
+ if size == 0 {
+ return Ok(());
+ }
+
+ let (pages, num_pages) = {
+ let inner = self.lock.lock();
+ (inner.pages, inner.size)
+ };
+ let num_bytes = num_pages << PAGE_SHIFT;
+
+ // Check that the request is within the buffer.
+ if offset.checked_add(size).ok_or(EFAULT)? > num_bytes {
+ return Err(EFAULT);
+ }
+
+ let mut page_index = offset >> PAGE_SHIFT;
+ offset &= PAGE_SIZE - 1;
+ while size > 0 {
+ let available = usize::min(size, PAGE_SIZE - offset);
+ // SAFETY: The pointer is in bounds.
+ let page_info = unsafe { pages.add(page_index) };
+ // SAFETY: The caller guarantees that this page is in the "in use" state for the
+ // duration of this call to `iterate`, so nobody will change the page.
+ let page = unsafe { PageInfo::get_page(page_info) };
+ if page.is_none() {
+ pr_warn!("Page is null!");
+ }
+ let page = page.ok_or(EFAULT)?;
+ cb(page, offset, available)?;
+ size -= available;
+ page_index += 1;
+ offset = 0;
+ }
+ Ok(())
+ }
+
+ /// Copy from userspace into this page range.
+ ///
+ /// # Safety
+ ///
+ /// All pages touched by this operation must be in use for the duration of this call.
+ pub(crate) unsafe fn copy_from_user_slice(
+ &self,
+ reader: &mut UserSliceReader,
+ offset: usize,
+ size: usize,
+ ) -> Result {
+ // SAFETY: `self.iterate` has the same safety requirements as `copy_from_user_slice`.
+ unsafe {
+ self.iterate(offset, size, |page, offset, to_copy| {
+ page.copy_from_user_slice_raw(reader, offset, to_copy)
+ })
+ }
+ }
+
+ /// Copy from this page range into kernel space.
+ ///
+ /// # Safety
+ ///
+ /// All pages touched by this operation must be in use for the duration of this call.
+ pub(crate) unsafe fn read<T: FromBytes>(&self, offset: usize) -> Result<T> {
+ let mut out = MaybeUninit::<T>::uninit();
+ let mut out_offset = 0;
+ // SAFETY: `self.iterate` has the same safety requirements as `read`.
+ unsafe {
+ self.iterate(offset, size_of::<T>(), |page, offset, to_copy| {
+ // SAFETY: The sum of `offset` and `to_copy` is bounded by the size of T.
+ let obj_ptr = (out.as_mut_ptr() as *mut u8).add(out_offset);
+ // SAFETY: The pointer points is in-bounds of the `out` variable, so it is valid.
+ page.read_raw(obj_ptr, offset, to_copy)?;
+ out_offset += to_copy;
+ Ok(())
+ })?;
+ }
+ // SAFETY: We just initialised the data.
+ Ok(unsafe { out.assume_init() })
+ }
+
+ /// Copy from kernel space into this page range.
+ ///
+ /// # Safety
+ ///
+ /// All pages touched by this operation must be in use for the duration of this call.
+ pub(crate) unsafe fn write<T: ?Sized>(&self, offset: usize, obj: &T) -> Result {
+ let mut obj_offset = 0;
+ // SAFETY: `self.iterate` has the same safety requirements as `write`.
+ unsafe {
+ self.iterate(offset, size_of_val(obj), |page, offset, to_copy| {
+ // SAFETY: The sum of `offset` and `to_copy` is bounded by the size of T.
+ let obj_ptr = (obj as *const T as *const u8).add(obj_offset);
+ // SAFETY: We have a reference to the object, so the pointer is valid.
+ page.write_raw(obj_ptr, offset, to_copy)?;
+ obj_offset += to_copy;
+ Ok(())
+ })
+ }
+ }
+
+ /// Write zeroes to the given range.
+ ///
+ /// # Safety
+ ///
+ /// All pages touched by this operation must be in use for the duration of this call.
+ pub(crate) unsafe fn fill_zero(&self, offset: usize, size: usize) -> Result {
+ // SAFETY: `self.iterate` has the same safety requirements as `copy_into`.
+ unsafe {
+ self.iterate(offset, size, |page, offset, len| {
+ page.fill_zero_raw(offset, len)
+ })
+ }
+ }
+}
+
+#[pinned_drop]
+impl PinnedDrop for ShrinkablePageRange {
+ fn drop(self: Pin<&mut Self>) {
+ let (pages, size) = {
+ let lock = self.lock.lock();
+ (lock.pages, lock.size)
+ };
+
+ if size == 0 {
+ return;
+ }
+
+ // Note: This call is also necessary for the safety of `stable_trylock_mm`.
+ let mm_lock = self.mm_lock.lock();
+
+ // This is the destructor, so unlike the other methods, we only need to worry about races
+ // with the shrinker here. Since we hold the `mm_lock`, we also can't race with the
+ // shrinker, and after this loop, the shrinker will not access any of our pages since we
+ // removed them from the lru list.
+ for i in 0..size {
+ // SAFETY: Loop is in-bounds of the size.
+ let p_ptr = unsafe { pages.add(i) };
+ // SAFETY: No other readers, so we can read.
+ if let Some(p) = unsafe { PageInfo::get_page(p_ptr) } {
+ // SAFETY: The pointer is valid and it's the right shrinker.
+ unsafe { PageInfo::list_lru_del(p_ptr, p.nid(), self.shrinker) };
+ }
+ }
+
+ drop(mm_lock);
+
+ // SAFETY: `pages` was allocated as an `KVVec<PageInfo>` with capacity `size`. Furthermore,
+ // all `size` elements are initialized. Also, the array is no longer shared with the
+ // shrinker due to the above loop.
+ drop(unsafe { KVVec::from_raw_parts(pages, size, size) });
+ }
+}
+
+/// # Safety
+/// Called by the shrinker.
+#[no_mangle]
+unsafe extern "C" fn rust_shrink_count(
+ shrink: *mut bindings::shrinker,
+ _sc: *mut bindings::shrink_control,
+) -> c_ulong {
+ // SAFETY: We can access our own private data.
+ let list_lru = unsafe { (*shrink).private_data.cast::<bindings::list_lru>() };
+ // SAFETY: Accessing the lru list is okay. Just an FFI call.
+ unsafe { bindings::list_lru_count(list_lru) }
+}
+
+/// # Safety
+/// Called by the shrinker.
+#[no_mangle]
+unsafe extern "C" fn rust_shrink_scan(
+ shrink: *mut bindings::shrinker,
+ sc: *mut bindings::shrink_control,
+) -> c_ulong {
+ // SAFETY: We can access our own private data.
+ let list_lru = unsafe { (*shrink).private_data.cast::<bindings::list_lru>() };
+ // SAFETY: Caller guarantees that it is safe to read this field.
+ let nr_to_scan = unsafe { (*sc).nr_to_scan };
+ // SAFETY: Accessing the lru list is okay. Just an FFI call.
+ unsafe {
+ bindings::list_lru_walk(
+ list_lru,
+ Some(bindings::rust_shrink_free_page_wrap),
+ ptr::null_mut(),
+ nr_to_scan,
+ )
+ }
+}
+
+const LRU_SKIP: bindings::lru_status = bindings::lru_status_LRU_SKIP;
+const LRU_REMOVED_ENTRY: bindings::lru_status = bindings::lru_status_LRU_REMOVED_RETRY;
+
+/// # Safety
+/// Called by the shrinker.
+#[no_mangle]
+unsafe extern "C" fn rust_shrink_free_page(
+ item: *mut bindings::list_head,
+ lru: *mut bindings::list_lru_one,
+ _cb_arg: *mut c_void,
+) -> bindings::lru_status {
+ // Fields that should survive after unlocking the lru lock.
+ let page;
+ let page_index;
+ let mm;
+ let mmap_read;
+ let mm_mutex;
+ let vma_addr;
+
+ {
+ // CAST: The `list_head` field is first in `PageInfo`.
+ let info = item as *mut PageInfo;
+ // SAFETY: The `range` field of `PageInfo` is immutable.
+ let range = unsafe { &*((*info).range) };
+
+ mm = match range.mm.mmget_not_zero() {
+ Some(mm) => MmWithUser::into_mmput_async(mm),
+ None => return LRU_SKIP,
+ };
+
+ mm_mutex = match range.stable_trylock_mm() {
+ Some(guard) => guard,
+ None => return LRU_SKIP,
+ };
+
+ mmap_read = match mm.mmap_read_trylock() {
+ Some(guard) => guard,
+ None => return LRU_SKIP,
+ };
+
+ // We can't lock it normally here, since we hold the lru lock.
+ let inner = match range.lock.try_lock() {
+ Some(inner) => inner,
+ None => return LRU_SKIP,
+ };
+
+ // SAFETY: The item is in this lru list, so it's okay to remove it.
+ unsafe { bindings::list_lru_isolate(lru, item) };
+
+ // SAFETY: Both pointers are in bounds of the same allocation.
+ page_index = unsafe { info.offset_from(inner.pages) } as usize;
+
+ // SAFETY: We hold the spinlock, so we can take the page.
+ //
+ // This sets the page pointer to zero before we unmap it from the vma. However, we call
+ // `zap_page_range` before we release the mmap lock, so `use_page_slow` will not be able to
+ // insert a new page until after our call to `zap_page_range`.
+ page = unsafe { PageInfo::take_page(info) };
+ vma_addr = inner.vma_addr;
+
+ // From this point on, we don't access this PageInfo or ShrinkablePageRange again, because
+ // they can be freed at any point after we unlock `lru_lock`. This is with the exception of
+ // `mm_mutex` which is kept alive by holding the lock.
+ }
+
+ // SAFETY: The lru lock is locked when this method is called.
+ unsafe { bindings::spin_unlock(&raw mut (*lru).lock) };
+
+ if let Some(vma) = mmap_read.vma_lookup(vma_addr) {
+ let user_page_addr = vma_addr + (page_index << PAGE_SHIFT);
+ vma.zap_page_range_single(user_page_addr, PAGE_SIZE);
+ }
+
+ drop(mmap_read);
+ drop(mm_mutex);
+ drop(mm);
+ drop(page);
+
+ // SAFETY: We just unlocked the lru lock, but it should be locked when we return.
+ unsafe { bindings::spin_lock(&raw mut (*lru).lock) };
+
+ LRU_REMOVED_ENTRY
+}
diff --git a/drivers/android/binder/page_range_helper.c b/drivers/android/binder/page_range_helper.c
new file mode 100644
index 000000000000..496887723ee0
--- /dev/null
+++ b/drivers/android/binder/page_range_helper.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* C helper for page_range.rs to work around a CFI violation.
+ *
+ * Bindgen currently pretends that `enum lru_status` is the same as an integer.
+ * This assumption is fine ABI-wise, but once you add CFI to the mix, it
+ * triggers a CFI violation because `enum lru_status` gets a different CFI tag.
+ *
+ * This file contains a workaround until bindgen can be fixed.
+ *
+ * Copyright (C) 2025 Google LLC.
+ */
+#include "page_range_helper.h"
+
+unsigned int rust_shrink_free_page(struct list_head *item,
+ struct list_lru_one *list,
+ void *cb_arg);
+
+enum lru_status
+rust_shrink_free_page_wrap(struct list_head *item, struct list_lru_one *list,
+ void *cb_arg)
+{
+ return rust_shrink_free_page(item, list, cb_arg);
+}
diff --git a/drivers/android/binder/page_range_helper.h b/drivers/android/binder/page_range_helper.h
new file mode 100644
index 000000000000..18dd2dd117b2
--- /dev/null
+++ b/drivers/android/binder/page_range_helper.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2025 Google, Inc.
+ */
+
+#ifndef _LINUX_PAGE_RANGE_HELPER_H
+#define _LINUX_PAGE_RANGE_HELPER_H
+
+#include <linux/list_lru.h>
+
+enum lru_status
+rust_shrink_free_page_wrap(struct list_head *item, struct list_lru_one *list,
+ void *cb_arg);
+
+#endif /* _LINUX_PAGE_RANGE_HELPER_H */
diff --git a/drivers/android/binder/process.rs b/drivers/android/binder/process.rs
new file mode 100644
index 000000000000..f13a747e784c
--- /dev/null
+++ b/drivers/android/binder/process.rs
@@ -0,0 +1,1696 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+//! This module defines the `Process` type, which represents a process using a particular binder
+//! context.
+//!
+//! The `Process` object keeps track of all of the resources that this process owns in the binder
+//! context.
+//!
+//! There is one `Process` object for each binder fd that a process has opened, so processes using
+//! several binder contexts have several `Process` objects. This ensures that the contexts are
+//! fully separated.
+
+use core::mem::take;
+
+use kernel::{
+ bindings,
+ cred::Credential,
+ error::Error,
+ fs::file::{self, File},
+ list::{List, ListArc, ListArcField, ListLinks},
+ mm,
+ prelude::*,
+ rbtree::{self, RBTree, RBTreeNode, RBTreeNodeReservation},
+ seq_file::SeqFile,
+ seq_print,
+ sync::poll::PollTable,
+ sync::{
+ lock::{spinlock::SpinLockBackend, Guard},
+ Arc, ArcBorrow, CondVar, CondVarTimeoutResult, Mutex, SpinLock, UniqueArc,
+ },
+ task::Task,
+ types::ARef,
+ uaccess::{UserSlice, UserSliceReader},
+ uapi,
+ workqueue::{self, Work},
+};
+
+use crate::{
+ allocation::{Allocation, AllocationInfo, NewAllocation},
+ context::Context,
+ defs::*,
+ error::{BinderError, BinderResult},
+ node::{CouldNotDeliverCriticalIncrement, CritIncrWrapper, Node, NodeDeath, NodeRef},
+ page_range::ShrinkablePageRange,
+ range_alloc::{RangeAllocator, ReserveNew, ReserveNewArgs},
+ stats::BinderStats,
+ thread::{PushWorkRes, Thread},
+ BinderfsProcFile, DArc, DLArc, DTRWrap, DeliverToRead,
+};
+
+#[path = "freeze.rs"]
+mod freeze;
+use self::freeze::{FreezeCookie, FreezeListener};
+
+struct Mapping {
+ address: usize,
+ alloc: RangeAllocator<AllocationInfo>,
+}
+
+impl Mapping {
+ fn new(address: usize, size: usize) -> Self {
+ Self {
+ address,
+ alloc: RangeAllocator::new(size),
+ }
+ }
+}
+
+// bitflags for defer_work.
+const PROC_DEFER_FLUSH: u8 = 1;
+const PROC_DEFER_RELEASE: u8 = 2;
+
+/// The fields of `Process` protected by the spinlock.
+pub(crate) struct ProcessInner {
+ is_manager: bool,
+ pub(crate) is_dead: bool,
+ threads: RBTree<i32, Arc<Thread>>,
+ /// INVARIANT: Threads pushed to this list must be owned by this process.
+ ready_threads: List<Thread>,
+ nodes: RBTree<u64, DArc<Node>>,
+ mapping: Option<Mapping>,
+ work: List<DTRWrap<dyn DeliverToRead>>,
+ delivered_deaths: List<DTRWrap<NodeDeath>, 2>,
+
+ /// The number of requested threads that haven't registered yet.
+ requested_thread_count: u32,
+ /// The maximum number of threads used by the process thread pool.
+ max_threads: u32,
+ /// The number of threads the started and registered with the thread pool.
+ started_thread_count: u32,
+
+ /// Bitmap of deferred work to do.
+ defer_work: u8,
+
+ /// Number of transactions to be transmitted before processes in freeze_wait
+ /// are woken up.
+ outstanding_txns: u32,
+ /// Process is frozen and unable to service binder transactions.
+ pub(crate) is_frozen: bool,
+ /// Process received sync transactions since last frozen.
+ pub(crate) sync_recv: bool,
+ /// Process received async transactions since last frozen.
+ pub(crate) async_recv: bool,
+ pub(crate) binderfs_file: Option<BinderfsProcFile>,
+ /// Check for oneway spam
+ oneway_spam_detection_enabled: bool,
+}
+
+impl ProcessInner {
+ fn new() -> Self {
+ Self {
+ is_manager: false,
+ is_dead: false,
+ threads: RBTree::new(),
+ ready_threads: List::new(),
+ mapping: None,
+ nodes: RBTree::new(),
+ work: List::new(),
+ delivered_deaths: List::new(),
+ requested_thread_count: 0,
+ max_threads: 0,
+ started_thread_count: 0,
+ defer_work: 0,
+ outstanding_txns: 0,
+ is_frozen: false,
+ sync_recv: false,
+ async_recv: false,
+ binderfs_file: None,
+ oneway_spam_detection_enabled: false,
+ }
+ }
+
+ /// Schedule the work item for execution on this process.
+ ///
+ /// If any threads are ready for work, then the work item is given directly to that thread and
+ /// it is woken up. Otherwise, it is pushed to the process work list.
+ ///
+ /// This call can fail only if the process is dead. In this case, the work item is returned to
+ /// the caller so that the caller can drop it after releasing the inner process lock. This is
+ /// necessary since the destructor of `Transaction` will take locks that can't necessarily be
+ /// taken while holding the inner process lock.
+ pub(crate) fn push_work(
+ &mut self,
+ work: DLArc<dyn DeliverToRead>,
+ ) -> Result<(), (BinderError, DLArc<dyn DeliverToRead>)> {
+ // Try to find a ready thread to which to push the work.
+ if let Some(thread) = self.ready_threads.pop_front() {
+ // Push to thread while holding state lock. This prevents the thread from giving up
+ // (for example, because of a signal) when we're about to deliver work.
+ match thread.push_work(work) {
+ PushWorkRes::Ok => Ok(()),
+ PushWorkRes::FailedDead(work) => Err((BinderError::new_dead(), work)),
+ }
+ } else if self.is_dead {
+ Err((BinderError::new_dead(), work))
+ } else {
+ let sync = work.should_sync_wakeup();
+
+ // Didn't find a thread waiting for proc work; this can happen
+ // in two scenarios:
+ // 1. All threads are busy handling transactions
+ // In that case, one of those threads should call back into
+ // the kernel driver soon and pick up this work.
+ // 2. Threads are using the (e)poll interface, in which case
+ // they may be blocked on the waitqueue without having been
+ // added to waiting_threads. For this case, we just iterate
+ // over all threads not handling transaction work, and
+ // wake them all up. We wake all because we don't know whether
+ // a thread that called into (e)poll is handling non-binder
+ // work currently.
+ self.work.push_back(work);
+
+ // Wake up polling threads, if any.
+ for thread in self.threads.values() {
+ thread.notify_if_poll_ready(sync);
+ }
+
+ Ok(())
+ }
+ }
+
+ pub(crate) fn remove_node(&mut self, ptr: u64) {
+ self.nodes.remove(&ptr);
+ }
+
+ /// Updates the reference count on the given node.
+ pub(crate) fn update_node_refcount(
+ &mut self,
+ node: &DArc<Node>,
+ inc: bool,
+ strong: bool,
+ count: usize,
+ othread: Option<&Thread>,
+ ) {
+ let push = node.update_refcount_locked(inc, strong, count, self);
+
+ // If we decided that we need to push work, push either to the process or to a thread if
+ // one is specified.
+ if let Some(node) = push {
+ if let Some(thread) = othread {
+ thread.push_work_deferred(node);
+ } else {
+ let _ = self.push_work(node);
+ // Nothing to do: `push_work` may fail if the process is dead, but that's ok as in
+ // that case, it doesn't care about the notification.
+ }
+ }
+ }
+
+ pub(crate) fn new_node_ref(
+ &mut self,
+ node: DArc<Node>,
+ strong: bool,
+ thread: Option<&Thread>,
+ ) -> NodeRef {
+ self.update_node_refcount(&node, true, strong, 1, thread);
+ let strong_count = if strong { 1 } else { 0 };
+ NodeRef::new(node, strong_count, 1 - strong_count)
+ }
+
+ pub(crate) fn new_node_ref_with_thread(
+ &mut self,
+ node: DArc<Node>,
+ strong: bool,
+ thread: &Thread,
+ wrapper: Option<CritIncrWrapper>,
+ ) -> Result<NodeRef, CouldNotDeliverCriticalIncrement> {
+ let push = match wrapper {
+ None => node
+ .incr_refcount_allow_zero2one(strong, self)?
+ .map(|node| node as _),
+ Some(wrapper) => node.incr_refcount_allow_zero2one_with_wrapper(strong, wrapper, self),
+ };
+ if let Some(node) = push {
+ thread.push_work_deferred(node);
+ }
+ let strong_count = if strong { 1 } else { 0 };
+ Ok(NodeRef::new(node, strong_count, 1 - strong_count))
+ }
+
+ /// Returns an existing node with the given pointer and cookie, if one exists.
+ ///
+ /// Returns an error if a node with the given pointer but a different cookie exists.
+ fn get_existing_node(&self, ptr: u64, cookie: u64) -> Result<Option<DArc<Node>>> {
+ match self.nodes.get(&ptr) {
+ None => Ok(None),
+ Some(node) => {
+ let (_, node_cookie) = node.get_id();
+ if node_cookie == cookie {
+ Ok(Some(node.clone()))
+ } else {
+ Err(EINVAL)
+ }
+ }
+ }
+ }
+
+ fn register_thread(&mut self) -> bool {
+ if self.requested_thread_count == 0 {
+ return false;
+ }
+
+ self.requested_thread_count -= 1;
+ self.started_thread_count += 1;
+ true
+ }
+
+ /// Finds a delivered death notification with the given cookie, removes it from the thread's
+ /// delivered list, and returns it.
+ fn pull_delivered_death(&mut self, cookie: u64) -> Option<DArc<NodeDeath>> {
+ let mut cursor = self.delivered_deaths.cursor_front();
+ while let Some(next) = cursor.peek_next() {
+ if next.cookie == cookie {
+ return Some(next.remove().into_arc());
+ }
+ cursor.move_next();
+ }
+ None
+ }
+
+ pub(crate) fn death_delivered(&mut self, death: DArc<NodeDeath>) {
+ if let Some(death) = ListArc::try_from_arc_or_drop(death) {
+ self.delivered_deaths.push_back(death);
+ } else {
+ pr_warn!("Notification added to `delivered_deaths` twice.");
+ }
+ }
+
+ pub(crate) fn add_outstanding_txn(&mut self) {
+ self.outstanding_txns += 1;
+ }
+
+ fn txns_pending_locked(&self) -> bool {
+ if self.outstanding_txns > 0 {
+ return true;
+ }
+ for thread in self.threads.values() {
+ if thread.has_current_transaction() {
+ return true;
+ }
+ }
+ false
+ }
+}
+
+/// Used to keep track of a node that this process has a handle to.
+#[pin_data]
+pub(crate) struct NodeRefInfo {
+ debug_id: usize,
+ /// The refcount that this process owns to the node.
+ node_ref: ListArcField<NodeRef, { Self::LIST_PROC }>,
+ death: ListArcField<Option<DArc<NodeDeath>>, { Self::LIST_PROC }>,
+ /// Cookie of the active freeze listener for this node.
+ freeze: ListArcField<Option<FreezeCookie>, { Self::LIST_PROC }>,
+ /// Used to store this `NodeRefInfo` in the node's `refs` list.
+ #[pin]
+ links: ListLinks<{ Self::LIST_NODE }>,
+ /// The handle for this `NodeRefInfo`.
+ handle: u32,
+ /// The process that has a handle to the node.
+ pub(crate) process: Arc<Process>,
+}
+
+impl NodeRefInfo {
+ /// The id used for the `Node::refs` list.
+ pub(crate) const LIST_NODE: u64 = 0x2da16350fb724a10;
+ /// The id used for the `ListArc` in `ProcessNodeRefs`.
+ const LIST_PROC: u64 = 0xd703a5263dcc8650;
+
+ fn new(node_ref: NodeRef, handle: u32, process: Arc<Process>) -> impl PinInit<Self> {
+ pin_init!(Self {
+ debug_id: super::next_debug_id(),
+ node_ref: ListArcField::new(node_ref),
+ death: ListArcField::new(None),
+ freeze: ListArcField::new(None),
+ links <- ListLinks::new(),
+ handle,
+ process,
+ })
+ }
+
+ kernel::list::define_list_arc_field_getter! {
+ pub(crate) fn death(&mut self<{Self::LIST_PROC}>) -> &mut Option<DArc<NodeDeath>> { death }
+ pub(crate) fn freeze(&mut self<{Self::LIST_PROC}>) -> &mut Option<FreezeCookie> { freeze }
+ pub(crate) fn node_ref(&mut self<{Self::LIST_PROC}>) -> &mut NodeRef { node_ref }
+ pub(crate) fn node_ref2(&self<{Self::LIST_PROC}>) -> &NodeRef { node_ref }
+ }
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<{Self::LIST_NODE}> for NodeRefInfo { untracked; }
+ impl ListArcSafe<{Self::LIST_PROC}> for NodeRefInfo { untracked; }
+}
+kernel::list::impl_list_item! {
+ impl ListItem<{Self::LIST_NODE}> for NodeRefInfo {
+ using ListLinks { self.links };
+ }
+}
+
+/// Keeps track of references this process has to nodes owned by other processes.
+///
+/// TODO: Currently, the rbtree requires two allocations per node reference, and two tree
+/// traversals to look up a node by `Node::global_id`. Once the rbtree is more powerful, these
+/// extra costs should be eliminated.
+struct ProcessNodeRefs {
+ /// Used to look up nodes using the 32-bit id that this process knows it by.
+ by_handle: RBTree<u32, ListArc<NodeRefInfo, { NodeRefInfo::LIST_PROC }>>,
+ /// Used to look up nodes without knowing their local 32-bit id. The usize is the address of
+ /// the underlying `Node` struct as returned by `Node::global_id`.
+ by_node: RBTree<usize, u32>,
+ /// Used to look up a `FreezeListener` by cookie.
+ ///
+ /// There might be multiple freeze listeners for the same node, but at most one of them is
+ /// active.
+ freeze_listeners: RBTree<FreezeCookie, FreezeListener>,
+}
+
+impl ProcessNodeRefs {
+ fn new() -> Self {
+ Self {
+ by_handle: RBTree::new(),
+ by_node: RBTree::new(),
+ freeze_listeners: RBTree::new(),
+ }
+ }
+}
+
+/// A process using binder.
+///
+/// Strictly speaking, there can be multiple of these per process. There is one for each binder fd
+/// that a process has opened, so processes using several binder contexts have several `Process`
+/// objects. This ensures that the contexts are fully separated.
+#[pin_data]
+pub(crate) struct Process {
+ pub(crate) ctx: Arc<Context>,
+
+ // The task leader (process).
+ pub(crate) task: ARef<Task>,
+
+ // Credential associated with file when `Process` is created.
+ pub(crate) cred: ARef<Credential>,
+
+ #[pin]
+ pub(crate) inner: SpinLock<ProcessInner>,
+
+ #[pin]
+ pub(crate) pages: ShrinkablePageRange,
+
+ // Waitqueue of processes waiting for all outstanding transactions to be
+ // processed.
+ #[pin]
+ freeze_wait: CondVar,
+
+ // Node references are in a different lock to avoid recursive acquisition when
+ // incrementing/decrementing a node in another process.
+ #[pin]
+ node_refs: Mutex<ProcessNodeRefs>,
+
+ // Work node for deferred work item.
+ #[pin]
+ defer_work: Work<Process>,
+
+ // Links for process list in Context.
+ #[pin]
+ links: ListLinks,
+
+ pub(crate) stats: BinderStats,
+}
+
+kernel::impl_has_work! {
+ impl HasWork<Process> for Process { self.defer_work }
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<0> for Process { untracked; }
+}
+kernel::list::impl_list_item! {
+ impl ListItem<0> for Process {
+ using ListLinks { self.links };
+ }
+}
+
+impl workqueue::WorkItem for Process {
+ type Pointer = Arc<Process>;
+
+ fn run(me: Arc<Self>) {
+ let defer;
+ {
+ let mut inner = me.inner.lock();
+ defer = inner.defer_work;
+ inner.defer_work = 0;
+ }
+
+ if defer & PROC_DEFER_FLUSH != 0 {
+ me.deferred_flush();
+ }
+ if defer & PROC_DEFER_RELEASE != 0 {
+ me.deferred_release();
+ }
+ }
+}
+
+impl Process {
+ fn new(ctx: Arc<Context>, cred: ARef<Credential>) -> Result<Arc<Self>> {
+ let current = kernel::current!();
+ let list_process = ListArc::pin_init::<Error>(
+ try_pin_init!(Process {
+ ctx,
+ cred,
+ inner <- kernel::new_spinlock!(ProcessInner::new(), "Process::inner"),
+ pages <- ShrinkablePageRange::new(&super::BINDER_SHRINKER),
+ node_refs <- kernel::new_mutex!(ProcessNodeRefs::new(), "Process::node_refs"),
+ freeze_wait <- kernel::new_condvar!("Process::freeze_wait"),
+ task: current.group_leader().into(),
+ defer_work <- kernel::new_work!("Process::defer_work"),
+ links <- ListLinks::new(),
+ stats: BinderStats::new(),
+ }),
+ GFP_KERNEL,
+ )?;
+
+ let process = list_process.clone_arc();
+ process.ctx.register_process(list_process);
+
+ Ok(process)
+ }
+
+ pub(crate) fn pid_in_current_ns(&self) -> kernel::task::Pid {
+ self.task.tgid_nr_ns(None)
+ }
+
+ #[inline(never)]
+ pub(crate) fn debug_print_stats(&self, m: &SeqFile, ctx: &Context) -> Result<()> {
+ seq_print!(m, "proc {}\n", self.pid_in_current_ns());
+ seq_print!(m, "context {}\n", &*ctx.name);
+
+ let inner = self.inner.lock();
+ seq_print!(m, " threads: {}\n", inner.threads.iter().count());
+ seq_print!(
+ m,
+ " requested threads: {}+{}/{}\n",
+ inner.requested_thread_count,
+ inner.started_thread_count,
+ inner.max_threads,
+ );
+ if let Some(mapping) = &inner.mapping {
+ seq_print!(
+ m,
+ " free oneway space: {}\n",
+ mapping.alloc.free_oneway_space()
+ );
+ seq_print!(m, " buffers: {}\n", mapping.alloc.count_buffers());
+ }
+ seq_print!(
+ m,
+ " outstanding transactions: {}\n",
+ inner.outstanding_txns
+ );
+ seq_print!(m, " nodes: {}\n", inner.nodes.iter().count());
+ drop(inner);
+
+ {
+ let mut refs = self.node_refs.lock();
+ let (mut count, mut weak, mut strong) = (0, 0, 0);
+ for r in refs.by_handle.values_mut() {
+ let node_ref = r.node_ref();
+ let (nstrong, nweak) = node_ref.get_count();
+ count += 1;
+ weak += nweak;
+ strong += nstrong;
+ }
+ seq_print!(m, " refs: {count} s {strong} w {weak}\n");
+ }
+
+ self.stats.debug_print(" ", m);
+
+ Ok(())
+ }
+
+ #[inline(never)]
+ pub(crate) fn debug_print(&self, m: &SeqFile, ctx: &Context, print_all: bool) -> Result<()> {
+ seq_print!(m, "proc {}\n", self.pid_in_current_ns());
+ seq_print!(m, "context {}\n", &*ctx.name);
+
+ let mut all_threads = KVec::new();
+ let mut all_nodes = KVec::new();
+ loop {
+ let inner = self.inner.lock();
+ let num_threads = inner.threads.iter().count();
+ let num_nodes = inner.nodes.iter().count();
+
+ if all_threads.capacity() < num_threads || all_nodes.capacity() < num_nodes {
+ drop(inner);
+ all_threads.reserve(num_threads, GFP_KERNEL)?;
+ all_nodes.reserve(num_nodes, GFP_KERNEL)?;
+ continue;
+ }
+
+ for thread in inner.threads.values() {
+ assert!(all_threads.len() < all_threads.capacity());
+ let _ = all_threads.push(thread.clone(), GFP_ATOMIC);
+ }
+
+ for node in inner.nodes.values() {
+ assert!(all_nodes.len() < all_nodes.capacity());
+ let _ = all_nodes.push(node.clone(), GFP_ATOMIC);
+ }
+
+ break;
+ }
+
+ for thread in all_threads {
+ thread.debug_print(m, print_all)?;
+ }
+
+ let mut inner = self.inner.lock();
+ for node in all_nodes {
+ if print_all || node.has_oneway_transaction(&mut inner) {
+ node.full_debug_print(m, &mut inner)?;
+ }
+ }
+ drop(inner);
+
+ if print_all {
+ let mut refs = self.node_refs.lock();
+ for r in refs.by_handle.values_mut() {
+ let node_ref = r.node_ref();
+ let dead = node_ref.node.owner.inner.lock().is_dead;
+ let (strong, weak) = node_ref.get_count();
+ let debug_id = node_ref.node.debug_id;
+
+ seq_print!(
+ m,
+ " ref {}: desc {} {}node {debug_id} s {strong} w {weak}",
+ r.debug_id,
+ r.handle,
+ if dead { "dead " } else { "" },
+ );
+ }
+ }
+
+ let inner = self.inner.lock();
+ for work in &inner.work {
+ work.debug_print(m, " ", " pending transaction ")?;
+ }
+ for _death in &inner.delivered_deaths {
+ seq_print!(m, " has delivered dead binder\n");
+ }
+ if let Some(mapping) = &inner.mapping {
+ mapping.alloc.debug_print(m)?;
+ }
+ drop(inner);
+
+ Ok(())
+ }
+
+ /// Attempts to fetch a work item from the process queue.
+ pub(crate) fn get_work(&self) -> Option<DLArc<dyn DeliverToRead>> {
+ self.inner.lock().work.pop_front()
+ }
+
+ /// Attempts to fetch a work item from the process queue. If none is available, it registers the
+ /// given thread as ready to receive work directly.
+ ///
+ /// This must only be called when the thread is not participating in a transaction chain; when
+ /// it is, work will always be delivered directly to the thread (and not through the process
+ /// queue).
+ pub(crate) fn get_work_or_register<'a>(
+ &'a self,
+ thread: &'a Arc<Thread>,
+ ) -> GetWorkOrRegister<'a> {
+ let mut inner = self.inner.lock();
+ // Try to get work from the process queue.
+ if let Some(work) = inner.work.pop_front() {
+ return GetWorkOrRegister::Work(work);
+ }
+
+ // Register the thread as ready.
+ GetWorkOrRegister::Register(Registration::new(thread, &mut inner))
+ }
+
+ fn get_current_thread(self: ArcBorrow<'_, Self>) -> Result<Arc<Thread>> {
+ let id = {
+ let current = kernel::current!();
+ if !core::ptr::eq(current.group_leader(), &*self.task) {
+ pr_err!("get_current_thread was called from the wrong process.");
+ return Err(EINVAL);
+ }
+ current.pid()
+ };
+
+ {
+ let inner = self.inner.lock();
+ if let Some(thread) = inner.threads.get(&id) {
+ return Ok(thread.clone());
+ }
+ }
+
+ // Allocate a new `Thread` without holding any locks.
+ let reservation = RBTreeNodeReservation::new(GFP_KERNEL)?;
+ let ta: Arc<Thread> = Thread::new(id, self.into())?;
+
+ let mut inner = self.inner.lock();
+ match inner.threads.entry(id) {
+ rbtree::Entry::Vacant(entry) => {
+ entry.insert(ta.clone(), reservation);
+ Ok(ta)
+ }
+ rbtree::Entry::Occupied(_entry) => {
+ pr_err!("Cannot create two threads with the same id.");
+ Err(EINVAL)
+ }
+ }
+ }
+
+ pub(crate) fn push_work(&self, work: DLArc<dyn DeliverToRead>) -> BinderResult {
+ // If push_work fails, drop the work item outside the lock.
+ let res = self.inner.lock().push_work(work);
+ match res {
+ Ok(()) => Ok(()),
+ Err((err, work)) => {
+ drop(work);
+ Err(err)
+ }
+ }
+ }
+
+ fn set_as_manager(
+ self: ArcBorrow<'_, Self>,
+ info: Option<FlatBinderObject>,
+ thread: &Thread,
+ ) -> Result {
+ let (ptr, cookie, flags) = if let Some(obj) = info {
+ (
+ // SAFETY: The object type for this ioctl is implicitly `BINDER_TYPE_BINDER`, so it
+ // is safe to access the `binder` field.
+ unsafe { obj.__bindgen_anon_1.binder },
+ obj.cookie,
+ obj.flags,
+ )
+ } else {
+ (0, 0, 0)
+ };
+ let node_ref = self.get_node(ptr, cookie, flags as _, true, thread)?;
+ let node = node_ref.node.clone();
+ self.ctx.set_manager_node(node_ref)?;
+ self.inner.lock().is_manager = true;
+
+ // Force the state of the node to prevent the delivery of acquire/increfs.
+ let mut owner_inner = node.owner.inner.lock();
+ node.force_has_count(&mut owner_inner);
+ Ok(())
+ }
+
+ fn get_node_inner(
+ self: ArcBorrow<'_, Self>,
+ ptr: u64,
+ cookie: u64,
+ flags: u32,
+ strong: bool,
+ thread: &Thread,
+ wrapper: Option<CritIncrWrapper>,
+ ) -> Result<Result<NodeRef, CouldNotDeliverCriticalIncrement>> {
+ // Try to find an existing node.
+ {
+ let mut inner = self.inner.lock();
+ if let Some(node) = inner.get_existing_node(ptr, cookie)? {
+ return Ok(inner.new_node_ref_with_thread(node, strong, thread, wrapper));
+ }
+ }
+
+ // Allocate the node before reacquiring the lock.
+ let node = DTRWrap::arc_pin_init(Node::new(ptr, cookie, flags, self.into()))?.into_arc();
+ let rbnode = RBTreeNode::new(ptr, node.clone(), GFP_KERNEL)?;
+ let mut inner = self.inner.lock();
+ if let Some(node) = inner.get_existing_node(ptr, cookie)? {
+ return Ok(inner.new_node_ref_with_thread(node, strong, thread, wrapper));
+ }
+
+ inner.nodes.insert(rbnode);
+ // This can only fail if someone has already pushed the node to a list, but we just created
+ // it and still hold the lock, so it can't fail right now.
+ let node_ref = inner
+ .new_node_ref_with_thread(node, strong, thread, wrapper)
+ .unwrap();
+
+ Ok(Ok(node_ref))
+ }
+
+ pub(crate) fn get_node(
+ self: ArcBorrow<'_, Self>,
+ ptr: u64,
+ cookie: u64,
+ flags: u32,
+ strong: bool,
+ thread: &Thread,
+ ) -> Result<NodeRef> {
+ let mut wrapper = None;
+ for _ in 0..2 {
+ match self.get_node_inner(ptr, cookie, flags, strong, thread, wrapper) {
+ Err(err) => return Err(err),
+ Ok(Ok(node_ref)) => return Ok(node_ref),
+ Ok(Err(CouldNotDeliverCriticalIncrement)) => {
+ wrapper = Some(CritIncrWrapper::new()?);
+ }
+ }
+ }
+ // We only get a `CouldNotDeliverCriticalIncrement` error if `wrapper` is `None`, so the
+ // loop should run at most twice.
+ unreachable!()
+ }
+
+ pub(crate) fn insert_or_update_handle(
+ self: ArcBorrow<'_, Process>,
+ node_ref: NodeRef,
+ is_mananger: bool,
+ ) -> Result<u32> {
+ {
+ let mut refs = self.node_refs.lock();
+
+ // Do a lookup before inserting.
+ if let Some(handle_ref) = refs.by_node.get(&node_ref.node.global_id()) {
+ let handle = *handle_ref;
+ let info = refs.by_handle.get_mut(&handle).unwrap();
+ info.node_ref().absorb(node_ref);
+ return Ok(handle);
+ }
+ }
+
+ // Reserve memory for tree nodes.
+ let reserve1 = RBTreeNodeReservation::new(GFP_KERNEL)?;
+ let reserve2 = RBTreeNodeReservation::new(GFP_KERNEL)?;
+ let info = UniqueArc::new_uninit(GFP_KERNEL)?;
+
+ let mut refs = self.node_refs.lock();
+
+ // Do a lookup again as node may have been inserted before the lock was reacquired.
+ if let Some(handle_ref) = refs.by_node.get(&node_ref.node.global_id()) {
+ let handle = *handle_ref;
+ let info = refs.by_handle.get_mut(&handle).unwrap();
+ info.node_ref().absorb(node_ref);
+ return Ok(handle);
+ }
+
+ // Find id.
+ let mut target: u32 = if is_mananger { 0 } else { 1 };
+ for handle in refs.by_handle.keys() {
+ if *handle > target {
+ break;
+ }
+ if *handle == target {
+ target = target.checked_add(1).ok_or(ENOMEM)?;
+ }
+ }
+
+ let gid = node_ref.node.global_id();
+ let (info_proc, info_node) = {
+ let info_init = NodeRefInfo::new(node_ref, target, self.into());
+ match info.pin_init_with(info_init) {
+ Ok(info) => ListArc::pair_from_pin_unique(info),
+ // error is infallible
+ Err(err) => match err {},
+ }
+ };
+
+ // Ensure the process is still alive while we insert a new reference.
+ //
+ // This releases the lock before inserting the nodes, but since `is_dead` is set as the
+ // first thing in `deferred_release`, process cleanup will not miss the items inserted into
+ // `refs` below.
+ if self.inner.lock().is_dead {
+ return Err(ESRCH);
+ }
+
+ // SAFETY: `info_proc` and `info_node` reference the same node, so we are inserting
+ // `info_node` into the right node's `refs` list.
+ unsafe { info_proc.node_ref2().node.insert_node_info(info_node) };
+
+ refs.by_node.insert(reserve1.into_node(gid, target));
+ refs.by_handle.insert(reserve2.into_node(target, info_proc));
+ Ok(target)
+ }
+
+ pub(crate) fn get_transaction_node(&self, handle: u32) -> BinderResult<NodeRef> {
+ // When handle is zero, try to get the context manager.
+ if handle == 0 {
+ Ok(self.ctx.get_manager_node(true)?)
+ } else {
+ Ok(self.get_node_from_handle(handle, true)?)
+ }
+ }
+
+ pub(crate) fn get_node_from_handle(&self, handle: u32, strong: bool) -> Result<NodeRef> {
+ self.node_refs
+ .lock()
+ .by_handle
+ .get_mut(&handle)
+ .ok_or(ENOENT)?
+ .node_ref()
+ .clone(strong)
+ }
+
+ pub(crate) fn remove_from_delivered_deaths(&self, death: &DArc<NodeDeath>) {
+ let mut inner = self.inner.lock();
+ // SAFETY: By the invariant on the `delivered_links` field, this is the right linked list.
+ let removed = unsafe { inner.delivered_deaths.remove(death) };
+ drop(inner);
+ drop(removed);
+ }
+
+ pub(crate) fn update_ref(
+ self: ArcBorrow<'_, Process>,
+ handle: u32,
+ inc: bool,
+ strong: bool,
+ ) -> Result {
+ if inc && handle == 0 {
+ if let Ok(node_ref) = self.ctx.get_manager_node(strong) {
+ if core::ptr::eq(&*self, &*node_ref.node.owner) {
+ return Err(EINVAL);
+ }
+ let _ = self.insert_or_update_handle(node_ref, true);
+ return Ok(());
+ }
+ }
+
+ // To preserve original binder behaviour, we only fail requests where the manager tries to
+ // increment references on itself.
+ let mut refs = self.node_refs.lock();
+ if let Some(info) = refs.by_handle.get_mut(&handle) {
+ if info.node_ref().update(inc, strong) {
+ // Clean up death if there is one attached to this node reference.
+ if let Some(death) = info.death().take() {
+ death.set_cleared(true);
+ self.remove_from_delivered_deaths(&death);
+ }
+
+ // Remove reference from process tables, and from the node's `refs` list.
+
+ // SAFETY: We are removing the `NodeRefInfo` from the right node.
+ unsafe { info.node_ref2().node.remove_node_info(info) };
+
+ let id = info.node_ref().node.global_id();
+ refs.by_handle.remove(&handle);
+ refs.by_node.remove(&id);
+ }
+ } else {
+ // All refs are cleared in process exit, so this warning is expected in that case.
+ if !self.inner.lock().is_dead {
+ pr_warn!("{}: no such ref {handle}\n", self.pid_in_current_ns());
+ }
+ }
+ Ok(())
+ }
+
+ /// Decrements the refcount of the given node, if one exists.
+ pub(crate) fn update_node(&self, ptr: u64, cookie: u64, strong: bool) {
+ let mut inner = self.inner.lock();
+ if let Ok(Some(node)) = inner.get_existing_node(ptr, cookie) {
+ inner.update_node_refcount(&node, false, strong, 1, None);
+ }
+ }
+
+ pub(crate) fn inc_ref_done(&self, reader: &mut UserSliceReader, strong: bool) -> Result {
+ let ptr = reader.read::<u64>()?;
+ let cookie = reader.read::<u64>()?;
+ let mut inner = self.inner.lock();
+ if let Ok(Some(node)) = inner.get_existing_node(ptr, cookie) {
+ if let Some(node) = node.inc_ref_done_locked(strong, &mut inner) {
+ // This only fails if the process is dead.
+ let _ = inner.push_work(node);
+ }
+ }
+ Ok(())
+ }
+
+ pub(crate) fn buffer_alloc(
+ self: &Arc<Self>,
+ debug_id: usize,
+ size: usize,
+ is_oneway: bool,
+ from_pid: i32,
+ ) -> BinderResult<NewAllocation> {
+ use kernel::page::PAGE_SIZE;
+
+ let mut reserve_new_args = ReserveNewArgs {
+ debug_id,
+ size,
+ is_oneway,
+ pid: from_pid,
+ ..ReserveNewArgs::default()
+ };
+
+ let (new_alloc, addr) = loop {
+ let mut inner = self.inner.lock();
+ let mapping = inner.mapping.as_mut().ok_or_else(BinderError::new_dead)?;
+ let alloc_request = match mapping.alloc.reserve_new(reserve_new_args)? {
+ ReserveNew::Success(new_alloc) => break (new_alloc, mapping.address),
+ ReserveNew::NeedAlloc(request) => request,
+ };
+ drop(inner);
+ // We need to allocate memory and then call `reserve_new` again.
+ reserve_new_args = alloc_request.make_alloc()?;
+ };
+
+ let res = Allocation::new(
+ self.clone(),
+ debug_id,
+ new_alloc.offset,
+ size,
+ addr + new_alloc.offset,
+ new_alloc.oneway_spam_detected,
+ );
+
+ // This allocation will be marked as in use until the `Allocation` is used to free it.
+ //
+ // This method can't be called while holding a lock, so we release the lock first. It's
+ // okay for several threads to use the method on the same index at the same time. In that
+ // case, one of the calls will allocate the given page (if missing), and the other call
+ // will wait for the other call to finish allocating the page.
+ //
+ // We will not call `stop_using_range` in parallel with this on the same page, because the
+ // allocation can only be removed via the destructor of the `Allocation` object that we
+ // currently own.
+ match self.pages.use_range(
+ new_alloc.offset / PAGE_SIZE,
+ (new_alloc.offset + size).div_ceil(PAGE_SIZE),
+ ) {
+ Ok(()) => {}
+ Err(err) => {
+ pr_warn!("use_range failure {:?}", err);
+ return Err(err.into());
+ }
+ }
+
+ Ok(NewAllocation(res))
+ }
+
+ pub(crate) fn buffer_get(self: &Arc<Self>, ptr: usize) -> Option<Allocation> {
+ let mut inner = self.inner.lock();
+ let mapping = inner.mapping.as_mut()?;
+ let offset = ptr.checked_sub(mapping.address)?;
+ let (size, debug_id, odata) = mapping.alloc.reserve_existing(offset).ok()?;
+ let mut alloc = Allocation::new(self.clone(), debug_id, offset, size, ptr, false);
+ if let Some(data) = odata {
+ alloc.set_info(data);
+ }
+ Some(alloc)
+ }
+
+ pub(crate) fn buffer_raw_free(&self, ptr: usize) {
+ let mut inner = self.inner.lock();
+ if let Some(ref mut mapping) = &mut inner.mapping {
+ let offset = match ptr.checked_sub(mapping.address) {
+ Some(offset) => offset,
+ None => return,
+ };
+
+ let freed_range = match mapping.alloc.reservation_abort(offset) {
+ Ok(freed_range) => freed_range,
+ Err(_) => {
+ pr_warn!(
+ "Pointer {:x} failed to free, base = {:x}\n",
+ ptr,
+ mapping.address
+ );
+ return;
+ }
+ };
+
+ // No more allocations in this range. Mark them as not in use.
+ //
+ // Must be done before we release the lock so that `use_range` is not used on these
+ // indices until `stop_using_range` returns.
+ self.pages
+ .stop_using_range(freed_range.start_page_idx, freed_range.end_page_idx);
+ }
+ }
+
+ pub(crate) fn buffer_make_freeable(&self, offset: usize, mut data: Option<AllocationInfo>) {
+ let mut inner = self.inner.lock();
+ if let Some(ref mut mapping) = &mut inner.mapping {
+ if mapping.alloc.reservation_commit(offset, &mut data).is_err() {
+ pr_warn!("Offset {} failed to be marked freeable\n", offset);
+ }
+ }
+ }
+
+ fn create_mapping(&self, vma: &mm::virt::VmaNew) -> Result {
+ use kernel::page::PAGE_SIZE;
+ let size = usize::min(vma.end() - vma.start(), bindings::SZ_4M as usize);
+ let mapping = Mapping::new(vma.start(), size);
+ let page_count = self.pages.register_with_vma(vma)?;
+ if page_count * PAGE_SIZE != size {
+ return Err(EINVAL);
+ }
+
+ // Save range allocator for later.
+ self.inner.lock().mapping = Some(mapping);
+
+ Ok(())
+ }
+
+ fn version(&self, data: UserSlice) -> Result {
+ data.writer().write(&BinderVersion::current())
+ }
+
+ pub(crate) fn register_thread(&self) -> bool {
+ self.inner.lock().register_thread()
+ }
+
+ fn remove_thread(&self, thread: Arc<Thread>) {
+ self.inner.lock().threads.remove(&thread.id);
+ thread.release();
+ }
+
+ fn set_max_threads(&self, max: u32) {
+ self.inner.lock().max_threads = max;
+ }
+
+ fn set_oneway_spam_detection_enabled(&self, enabled: u32) {
+ self.inner.lock().oneway_spam_detection_enabled = enabled != 0;
+ }
+
+ pub(crate) fn is_oneway_spam_detection_enabled(&self) -> bool {
+ self.inner.lock().oneway_spam_detection_enabled
+ }
+
+ fn get_node_debug_info(&self, data: UserSlice) -> Result {
+ let (mut reader, mut writer) = data.reader_writer();
+
+ // Read the starting point.
+ let ptr = reader.read::<BinderNodeDebugInfo>()?.ptr;
+ let mut out = BinderNodeDebugInfo::default();
+
+ {
+ let inner = self.inner.lock();
+ for (node_ptr, node) in &inner.nodes {
+ if *node_ptr > ptr {
+ node.populate_debug_info(&mut out, &inner);
+ break;
+ }
+ }
+ }
+
+ writer.write(&out)
+ }
+
+ fn get_node_info_from_ref(&self, data: UserSlice) -> Result {
+ let (mut reader, mut writer) = data.reader_writer();
+ let mut out = reader.read::<BinderNodeInfoForRef>()?;
+
+ if out.strong_count != 0
+ || out.weak_count != 0
+ || out.reserved1 != 0
+ || out.reserved2 != 0
+ || out.reserved3 != 0
+ {
+ return Err(EINVAL);
+ }
+
+ // Only the context manager is allowed to use this ioctl.
+ if !self.inner.lock().is_manager {
+ return Err(EPERM);
+ }
+
+ {
+ let mut node_refs = self.node_refs.lock();
+ let node_info = node_refs.by_handle.get_mut(&out.handle).ok_or(ENOENT)?;
+ let node_ref = node_info.node_ref();
+ let owner_inner = node_ref.node.owner.inner.lock();
+ node_ref.node.populate_counts(&mut out, &owner_inner);
+ }
+
+ // Write the result back.
+ writer.write(&out)
+ }
+
+ pub(crate) fn needs_thread(&self) -> bool {
+ let mut inner = self.inner.lock();
+ let ret = inner.requested_thread_count == 0
+ && inner.ready_threads.is_empty()
+ && inner.started_thread_count < inner.max_threads;
+ if ret {
+ inner.requested_thread_count += 1
+ }
+ ret
+ }
+
+ pub(crate) fn request_death(
+ self: &Arc<Self>,
+ reader: &mut UserSliceReader,
+ thread: &Thread,
+ ) -> Result {
+ let handle: u32 = reader.read()?;
+ let cookie: u64 = reader.read()?;
+
+ // Queue BR_ERROR if we can't allocate memory for the death notification.
+ let death = UniqueArc::new_uninit(GFP_KERNEL).inspect_err(|_| {
+ thread.push_return_work(BR_ERROR);
+ })?;
+ let mut refs = self.node_refs.lock();
+ let Some(info) = refs.by_handle.get_mut(&handle) else {
+ pr_warn!("BC_REQUEST_DEATH_NOTIFICATION invalid ref {handle}\n");
+ return Ok(());
+ };
+
+ // Nothing to do if there is already a death notification request for this handle.
+ if info.death().is_some() {
+ pr_warn!("BC_REQUEST_DEATH_NOTIFICATION death notification already set\n");
+ return Ok(());
+ }
+
+ let death = {
+ let death_init = NodeDeath::new(info.node_ref().node.clone(), self.clone(), cookie);
+ match death.pin_init_with(death_init) {
+ Ok(death) => death,
+ // error is infallible
+ Err(err) => match err {},
+ }
+ };
+
+ // Register the death notification.
+ {
+ let owner = info.node_ref2().node.owner.clone();
+ let mut owner_inner = owner.inner.lock();
+ if owner_inner.is_dead {
+ let death = Arc::from(death);
+ *info.death() = Some(death.clone());
+ drop(owner_inner);
+ death.set_dead();
+ } else {
+ let death = ListArc::from(death);
+ *info.death() = Some(death.clone_arc());
+ info.node_ref().node.add_death(death, &mut owner_inner);
+ }
+ }
+ Ok(())
+ }
+
+ pub(crate) fn clear_death(&self, reader: &mut UserSliceReader, thread: &Thread) -> Result {
+ let handle: u32 = reader.read()?;
+ let cookie: u64 = reader.read()?;
+
+ let mut refs = self.node_refs.lock();
+ let Some(info) = refs.by_handle.get_mut(&handle) else {
+ pr_warn!("BC_CLEAR_DEATH_NOTIFICATION invalid ref {handle}\n");
+ return Ok(());
+ };
+
+ let Some(death) = info.death().take() else {
+ pr_warn!("BC_CLEAR_DEATH_NOTIFICATION death notification not active\n");
+ return Ok(());
+ };
+ if death.cookie != cookie {
+ *info.death() = Some(death);
+ pr_warn!("BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch\n");
+ return Ok(());
+ }
+
+ // Update state and determine if we need to queue a work item. We only need to do it when
+ // the node is not dead or if the user already completed the death notification.
+ if death.set_cleared(false) {
+ if let Some(death) = ListArc::try_from_arc_or_drop(death) {
+ let _ = thread.push_work_if_looper(death);
+ }
+ }
+
+ Ok(())
+ }
+
+ pub(crate) fn dead_binder_done(&self, cookie: u64, thread: &Thread) {
+ if let Some(death) = self.inner.lock().pull_delivered_death(cookie) {
+ death.set_notification_done(thread);
+ }
+ }
+
+ /// Locks the spinlock and move the `nodes` rbtree out.
+ ///
+ /// This allows you to iterate through `nodes` while also allowing you to give other parts of
+ /// the codebase exclusive access to `ProcessInner`.
+ pub(crate) fn lock_with_nodes(&self) -> WithNodes<'_> {
+ let mut inner = self.inner.lock();
+ WithNodes {
+ nodes: take(&mut inner.nodes),
+ inner,
+ }
+ }
+
+ fn deferred_flush(&self) {
+ let inner = self.inner.lock();
+ for thread in inner.threads.values() {
+ thread.exit_looper();
+ }
+ }
+
+ fn deferred_release(self: Arc<Self>) {
+ let is_manager = {
+ let mut inner = self.inner.lock();
+ inner.is_dead = true;
+ inner.is_frozen = false;
+ inner.sync_recv = false;
+ inner.async_recv = false;
+ inner.is_manager
+ };
+
+ if is_manager {
+ self.ctx.unset_manager_node();
+ }
+
+ self.ctx.deregister_process(&self);
+
+ let binderfs_file = self.inner.lock().binderfs_file.take();
+ drop(binderfs_file);
+
+ // Release threads.
+ let threads = {
+ let mut inner = self.inner.lock();
+ let threads = take(&mut inner.threads);
+ let ready = take(&mut inner.ready_threads);
+ drop(inner);
+ drop(ready);
+
+ for thread in threads.values() {
+ thread.release();
+ }
+ threads
+ };
+
+ // Release nodes.
+ {
+ while let Some(node) = {
+ let mut lock = self.inner.lock();
+ lock.nodes.cursor_front().map(|c| c.remove_current().1)
+ } {
+ node.to_key_value().1.release();
+ }
+ }
+
+ // Clean up death listeners and remove nodes from external node info lists.
+ for info in self.node_refs.lock().by_handle.values_mut() {
+ // SAFETY: We are removing the `NodeRefInfo` from the right node.
+ unsafe { info.node_ref2().node.remove_node_info(info) };
+
+ // Remove all death notifications from the nodes (that belong to a different process).
+ let death = if let Some(existing) = info.death().take() {
+ existing
+ } else {
+ continue;
+ };
+ death.set_cleared(false);
+ }
+
+ // Clean up freeze listeners.
+ let freeze_listeners = take(&mut self.node_refs.lock().freeze_listeners);
+ for listener in freeze_listeners.values() {
+ listener.on_process_exit(&self);
+ }
+ drop(freeze_listeners);
+
+ // Release refs on foreign nodes.
+ {
+ let mut refs = self.node_refs.lock();
+ let by_handle = take(&mut refs.by_handle);
+ let by_node = take(&mut refs.by_node);
+ drop(refs);
+ drop(by_node);
+ drop(by_handle);
+ }
+
+ // Cancel all pending work items.
+ while let Some(work) = self.get_work() {
+ work.into_arc().cancel();
+ }
+
+ let delivered_deaths = take(&mut self.inner.lock().delivered_deaths);
+ drop(delivered_deaths);
+
+ // Free any resources kept alive by allocated buffers.
+ let omapping = self.inner.lock().mapping.take();
+ if let Some(mut mapping) = omapping {
+ let address = mapping.address;
+ mapping
+ .alloc
+ .take_for_each(|offset, size, debug_id, odata| {
+ let ptr = offset + address;
+ pr_warn!(
+ "{}: removing orphan mapping {offset}:{size}\n",
+ self.pid_in_current_ns()
+ );
+ let mut alloc =
+ Allocation::new(self.clone(), debug_id, offset, size, ptr, false);
+ if let Some(data) = odata {
+ alloc.set_info(data);
+ }
+ drop(alloc)
+ });
+ }
+
+ // calls to synchronize_rcu() in thread drop will happen here
+ drop(threads);
+ }
+
+ pub(crate) fn drop_outstanding_txn(&self) {
+ let wake = {
+ let mut inner = self.inner.lock();
+ if inner.outstanding_txns == 0 {
+ pr_err!("outstanding_txns underflow");
+ return;
+ }
+ inner.outstanding_txns -= 1;
+ inner.is_frozen && inner.outstanding_txns == 0
+ };
+
+ if wake {
+ self.freeze_wait.notify_all();
+ }
+ }
+
+ pub(crate) fn ioctl_freeze(&self, info: &BinderFreezeInfo) -> Result {
+ if info.enable == 0 {
+ let msgs = self.prepare_freeze_messages()?;
+ let mut inner = self.inner.lock();
+ inner.sync_recv = false;
+ inner.async_recv = false;
+ inner.is_frozen = false;
+ drop(inner);
+ msgs.send_messages();
+ return Ok(());
+ }
+
+ let mut inner = self.inner.lock();
+ inner.sync_recv = false;
+ inner.async_recv = false;
+ inner.is_frozen = true;
+
+ if info.timeout_ms > 0 {
+ let mut jiffies = kernel::time::msecs_to_jiffies(info.timeout_ms);
+ while jiffies > 0 {
+ if inner.outstanding_txns == 0 {
+ break;
+ }
+
+ match self
+ .freeze_wait
+ .wait_interruptible_timeout(&mut inner, jiffies)
+ {
+ CondVarTimeoutResult::Signal { .. } => {
+ inner.is_frozen = false;
+ return Err(ERESTARTSYS);
+ }
+ CondVarTimeoutResult::Woken { jiffies: remaining } => {
+ jiffies = remaining;
+ }
+ CondVarTimeoutResult::Timeout => {
+ jiffies = 0;
+ }
+ }
+ }
+ }
+
+ if inner.txns_pending_locked() {
+ inner.is_frozen = false;
+ Err(EAGAIN)
+ } else {
+ drop(inner);
+ match self.prepare_freeze_messages() {
+ Ok(batch) => {
+ batch.send_messages();
+ Ok(())
+ }
+ Err(kernel::alloc::AllocError) => {
+ self.inner.lock().is_frozen = false;
+ Err(ENOMEM)
+ }
+ }
+ }
+ }
+}
+
+fn get_frozen_status(data: UserSlice) -> Result {
+ let (mut reader, mut writer) = data.reader_writer();
+
+ let mut info = reader.read::<BinderFrozenStatusInfo>()?;
+ info.sync_recv = 0;
+ info.async_recv = 0;
+ let mut found = false;
+
+ for ctx in crate::context::get_all_contexts()? {
+ ctx.for_each_proc(|proc| {
+ if proc.task.pid() == info.pid as _ {
+ found = true;
+ let inner = proc.inner.lock();
+ let txns_pending = inner.txns_pending_locked();
+ info.async_recv |= inner.async_recv as u32;
+ info.sync_recv |= inner.sync_recv as u32;
+ info.sync_recv |= (txns_pending as u32) << 1;
+ }
+ });
+ }
+
+ if found {
+ writer.write(&info)?;
+ Ok(())
+ } else {
+ Err(EINVAL)
+ }
+}
+
+fn ioctl_freeze(reader: &mut UserSliceReader) -> Result {
+ let info = reader.read::<BinderFreezeInfo>()?;
+
+ // Very unlikely for there to be more than 3, since a process normally uses at most binder and
+ // hwbinder.
+ let mut procs = KVec::with_capacity(3, GFP_KERNEL)?;
+
+ let ctxs = crate::context::get_all_contexts()?;
+ for ctx in ctxs {
+ for proc in ctx.get_procs_with_pid(info.pid as i32)? {
+ procs.push(proc, GFP_KERNEL)?;
+ }
+ }
+
+ for proc in procs {
+ proc.ioctl_freeze(&info)?;
+ }
+ Ok(())
+}
+
+/// The ioctl handler.
+impl Process {
+ /// Ioctls that are write-only from the perspective of userspace.
+ ///
+ /// The kernel will only read from the pointer that userspace provided to us.
+ fn ioctl_write_only(
+ this: ArcBorrow<'_, Process>,
+ _file: &File,
+ cmd: u32,
+ reader: &mut UserSliceReader,
+ ) -> Result {
+ let thread = this.get_current_thread()?;
+ match cmd {
+ uapi::BINDER_SET_MAX_THREADS => this.set_max_threads(reader.read()?),
+ uapi::BINDER_THREAD_EXIT => this.remove_thread(thread),
+ uapi::BINDER_SET_CONTEXT_MGR => this.set_as_manager(None, &thread)?,
+ uapi::BINDER_SET_CONTEXT_MGR_EXT => {
+ this.set_as_manager(Some(reader.read()?), &thread)?
+ }
+ uapi::BINDER_ENABLE_ONEWAY_SPAM_DETECTION => {
+ this.set_oneway_spam_detection_enabled(reader.read()?)
+ }
+ uapi::BINDER_FREEZE => ioctl_freeze(reader)?,
+ _ => return Err(EINVAL),
+ }
+ Ok(())
+ }
+
+ /// Ioctls that are read/write from the perspective of userspace.
+ ///
+ /// The kernel will both read from and write to the pointer that userspace provided to us.
+ fn ioctl_write_read(
+ this: ArcBorrow<'_, Process>,
+ file: &File,
+ cmd: u32,
+ data: UserSlice,
+ ) -> Result {
+ let thread = this.get_current_thread()?;
+ let blocking = (file.flags() & file::flags::O_NONBLOCK) == 0;
+ match cmd {
+ uapi::BINDER_WRITE_READ => thread.write_read(data, blocking)?,
+ uapi::BINDER_GET_NODE_DEBUG_INFO => this.get_node_debug_info(data)?,
+ uapi::BINDER_GET_NODE_INFO_FOR_REF => this.get_node_info_from_ref(data)?,
+ uapi::BINDER_VERSION => this.version(data)?,
+ uapi::BINDER_GET_FROZEN_INFO => get_frozen_status(data)?,
+ uapi::BINDER_GET_EXTENDED_ERROR => thread.get_extended_error(data)?,
+ _ => return Err(EINVAL),
+ }
+ Ok(())
+ }
+}
+
+/// The file operations supported by `Process`.
+impl Process {
+ pub(crate) fn open(ctx: ArcBorrow<'_, Context>, file: &File) -> Result<Arc<Process>> {
+ Self::new(ctx.into(), ARef::from(file.cred()))
+ }
+
+ pub(crate) fn release(this: Arc<Process>, _file: &File) {
+ let binderfs_file;
+ let should_schedule;
+ {
+ let mut inner = this.inner.lock();
+ should_schedule = inner.defer_work == 0;
+ inner.defer_work |= PROC_DEFER_RELEASE;
+ binderfs_file = inner.binderfs_file.take();
+ }
+
+ if should_schedule {
+ // Ignore failures to schedule to the workqueue. Those just mean that we're already
+ // scheduled for execution.
+ let _ = workqueue::system().enqueue(this);
+ }
+
+ drop(binderfs_file);
+ }
+
+ pub(crate) fn flush(this: ArcBorrow<'_, Process>) -> Result {
+ let should_schedule;
+ {
+ let mut inner = this.inner.lock();
+ should_schedule = inner.defer_work == 0;
+ inner.defer_work |= PROC_DEFER_FLUSH;
+ }
+
+ if should_schedule {
+ // Ignore failures to schedule to the workqueue. Those just mean that we're already
+ // scheduled for execution.
+ let _ = workqueue::system().enqueue(Arc::from(this));
+ }
+ Ok(())
+ }
+
+ pub(crate) fn ioctl(this: ArcBorrow<'_, Process>, file: &File, cmd: u32, arg: usize) -> Result {
+ use kernel::ioctl::{_IOC_DIR, _IOC_SIZE};
+ use kernel::uapi::{_IOC_READ, _IOC_WRITE};
+
+ crate::trace::trace_ioctl(cmd, arg);
+
+ let user_slice = UserSlice::new(UserPtr::from_addr(arg), _IOC_SIZE(cmd));
+
+ const _IOC_READ_WRITE: u32 = _IOC_READ | _IOC_WRITE;
+
+ match _IOC_DIR(cmd) {
+ _IOC_WRITE => Self::ioctl_write_only(this, file, cmd, &mut user_slice.reader()),
+ _IOC_READ_WRITE => Self::ioctl_write_read(this, file, cmd, user_slice),
+ _ => Err(EINVAL),
+ }
+ }
+
+ pub(crate) fn compat_ioctl(
+ this: ArcBorrow<'_, Process>,
+ file: &File,
+ cmd: u32,
+ arg: usize,
+ ) -> Result {
+ Self::ioctl(this, file, cmd, arg)
+ }
+
+ pub(crate) fn mmap(
+ this: ArcBorrow<'_, Process>,
+ _file: &File,
+ vma: &mm::virt::VmaNew,
+ ) -> Result {
+ // We don't allow mmap to be used in a different process.
+ if !core::ptr::eq(kernel::current!().group_leader(), &*this.task) {
+ return Err(EINVAL);
+ }
+ if vma.start() == 0 {
+ return Err(EINVAL);
+ }
+
+ vma.try_clear_maywrite().map_err(|_| EPERM)?;
+ vma.set_dontcopy();
+ vma.set_mixedmap();
+
+ // TODO: Set ops. We need to learn when the user unmaps so that we can stop using it.
+ this.create_mapping(vma)
+ }
+
+ pub(crate) fn poll(
+ this: ArcBorrow<'_, Process>,
+ file: &File,
+ table: PollTable<'_>,
+ ) -> Result<u32> {
+ let thread = this.get_current_thread()?;
+ let (from_proc, mut mask) = thread.poll(file, table);
+ if mask == 0 && from_proc && !this.inner.lock().work.is_empty() {
+ mask |= bindings::POLLIN;
+ }
+ Ok(mask)
+ }
+}
+
+/// Represents that a thread has registered with the `ready_threads` list of its process.
+///
+/// The destructor of this type will unregister the thread from the list of ready threads.
+pub(crate) struct Registration<'a> {
+ thread: &'a Arc<Thread>,
+}
+
+impl<'a> Registration<'a> {
+ fn new(thread: &'a Arc<Thread>, guard: &mut Guard<'_, ProcessInner, SpinLockBackend>) -> Self {
+ assert!(core::ptr::eq(&thread.process.inner, guard.lock_ref()));
+ // INVARIANT: We are pushing this thread to the right `ready_threads` list.
+ if let Ok(list_arc) = ListArc::try_from_arc(thread.clone()) {
+ guard.ready_threads.push_front(list_arc);
+ } else {
+ // It is an error to hit this branch, and it should not be reachable. We try to do
+ // something reasonable when the failure path happens. Most likely, the thread in
+ // question will sleep forever.
+ pr_err!("Same thread registered with `ready_threads` twice.");
+ }
+ Self { thread }
+ }
+}
+
+impl Drop for Registration<'_> {
+ fn drop(&mut self) {
+ let mut inner = self.thread.process.inner.lock();
+ // SAFETY: The thread has the invariant that we never push it to any other linked list than
+ // the `ready_threads` list of its parent process. Therefore, the thread is either in that
+ // list, or in no list.
+ unsafe { inner.ready_threads.remove(self.thread) };
+ }
+}
+
+pub(crate) struct WithNodes<'a> {
+ pub(crate) inner: Guard<'a, ProcessInner, SpinLockBackend>,
+ pub(crate) nodes: RBTree<u64, DArc<Node>>,
+}
+
+impl Drop for WithNodes<'_> {
+ fn drop(&mut self) {
+ core::mem::swap(&mut self.nodes, &mut self.inner.nodes);
+ if self.nodes.iter().next().is_some() {
+ pr_err!("nodes array was modified while using lock_with_nodes\n");
+ }
+ }
+}
+
+pub(crate) enum GetWorkOrRegister<'a> {
+ Work(DLArc<dyn DeliverToRead>),
+ Register(Registration<'a>),
+}
diff --git a/drivers/android/binder/range_alloc/array.rs b/drivers/android/binder/range_alloc/array.rs
new file mode 100644
index 000000000000..07e1dec2ce63
--- /dev/null
+++ b/drivers/android/binder/range_alloc/array.rs
@@ -0,0 +1,251 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use kernel::{
+ page::{PAGE_MASK, PAGE_SIZE},
+ prelude::*,
+ seq_file::SeqFile,
+ seq_print,
+ task::Pid,
+};
+
+use crate::range_alloc::{DescriptorState, FreedRange, Range};
+
+/// Keeps track of allocations in a process' mmap.
+///
+/// Each process has an mmap where the data for incoming transactions will be placed. This struct
+/// keeps track of allocations made in the mmap. For each allocation, we store a descriptor that
+/// has metadata related to the allocation. We also keep track of available free space.
+pub(super) struct ArrayRangeAllocator<T> {
+ /// This stores all ranges that are allocated. Unlike the tree based allocator, we do *not*
+ /// store the free ranges.
+ ///
+ /// Sorted by offset.
+ pub(super) ranges: KVec<Range<T>>,
+ size: usize,
+ free_oneway_space: usize,
+}
+
+struct FindEmptyRes {
+ /// Which index in `ranges` should we insert the new range at?
+ ///
+ /// Inserting the new range at this index keeps `ranges` sorted.
+ insert_at_idx: usize,
+ /// Which offset should we insert the new range at?
+ insert_at_offset: usize,
+}
+
+impl<T> ArrayRangeAllocator<T> {
+ pub(crate) fn new(size: usize, alloc: EmptyArrayAlloc<T>) -> Self {
+ Self {
+ ranges: alloc.ranges,
+ size,
+ free_oneway_space: size / 2,
+ }
+ }
+
+ pub(crate) fn free_oneway_space(&self) -> usize {
+ self.free_oneway_space
+ }
+
+ pub(crate) fn count_buffers(&self) -> usize {
+ self.ranges.len()
+ }
+
+ pub(crate) fn total_size(&self) -> usize {
+ self.size
+ }
+
+ pub(crate) fn is_full(&self) -> bool {
+ self.ranges.len() == self.ranges.capacity()
+ }
+
+ pub(crate) fn debug_print(&self, m: &SeqFile) -> Result<()> {
+ for range in &self.ranges {
+ seq_print!(
+ m,
+ " buffer {}: {} size {} pid {} oneway {}",
+ 0,
+ range.offset,
+ range.size,
+ range.state.pid(),
+ range.state.is_oneway(),
+ );
+ if let DescriptorState::Reserved(_) = range.state {
+ seq_print!(m, " reserved\n");
+ } else {
+ seq_print!(m, " allocated\n");
+ }
+ }
+ Ok(())
+ }
+
+ /// Find somewhere to put a new range.
+ ///
+ /// Unlike the tree implementation, we do not bother to find the smallest gap. The idea is that
+ /// fragmentation isn't a big issue when we don't have many ranges.
+ ///
+ /// Returns the index that the new range should have in `self.ranges` after insertion.
+ fn find_empty_range(&self, size: usize) -> Option<FindEmptyRes> {
+ let after_last_range = self.ranges.last().map(Range::endpoint).unwrap_or(0);
+
+ if size <= self.total_size() - after_last_range {
+ // We can put the range at the end, so just do that.
+ Some(FindEmptyRes {
+ insert_at_idx: self.ranges.len(),
+ insert_at_offset: after_last_range,
+ })
+ } else {
+ let mut end_of_prev = 0;
+ for (i, range) in self.ranges.iter().enumerate() {
+ // Does it fit before the i'th range?
+ if size <= range.offset - end_of_prev {
+ return Some(FindEmptyRes {
+ insert_at_idx: i,
+ insert_at_offset: end_of_prev,
+ });
+ }
+ end_of_prev = range.endpoint();
+ }
+ None
+ }
+ }
+
+ pub(crate) fn reserve_new(
+ &mut self,
+ debug_id: usize,
+ size: usize,
+ is_oneway: bool,
+ pid: Pid,
+ ) -> Result<usize> {
+ // Compute new value of free_oneway_space, which is set only on success.
+ let new_oneway_space = if is_oneway {
+ match self.free_oneway_space.checked_sub(size) {
+ Some(new_oneway_space) => new_oneway_space,
+ None => return Err(ENOSPC),
+ }
+ } else {
+ self.free_oneway_space
+ };
+
+ let FindEmptyRes {
+ insert_at_idx,
+ insert_at_offset,
+ } = self.find_empty_range(size).ok_or(ENOSPC)?;
+ self.free_oneway_space = new_oneway_space;
+
+ let new_range = Range {
+ offset: insert_at_offset,
+ size,
+ state: DescriptorState::new(is_oneway, debug_id, pid),
+ };
+ // Insert the value at the given index to keep the array sorted.
+ self.ranges
+ .insert_within_capacity(insert_at_idx, new_range)
+ .ok()
+ .unwrap();
+
+ Ok(insert_at_offset)
+ }
+
+ pub(crate) fn reservation_abort(&mut self, offset: usize) -> Result<FreedRange> {
+ // This could use a binary search, but linear scans are usually faster for small arrays.
+ let i = self
+ .ranges
+ .iter()
+ .position(|range| range.offset == offset)
+ .ok_or(EINVAL)?;
+ let range = &self.ranges[i];
+
+ if let DescriptorState::Allocated(_) = range.state {
+ return Err(EPERM);
+ }
+
+ let size = range.size;
+ let offset = range.offset;
+
+ if range.state.is_oneway() {
+ self.free_oneway_space += size;
+ }
+
+ // This computes the range of pages that are no longer used by *any* allocated range. The
+ // caller will mark them as unused, which means that they can be freed if the system comes
+ // under memory pressure.
+ let mut freed_range = FreedRange::interior_pages(offset, size);
+ #[expect(clippy::collapsible_if)] // reads better like this
+ if offset % PAGE_SIZE != 0 {
+ if i == 0 || self.ranges[i - 1].endpoint() <= (offset & PAGE_MASK) {
+ freed_range.start_page_idx -= 1;
+ }
+ }
+ if range.endpoint() % PAGE_SIZE != 0 {
+ let page_after = (range.endpoint() & PAGE_MASK) + PAGE_SIZE;
+ if i + 1 == self.ranges.len() || page_after <= self.ranges[i + 1].offset {
+ freed_range.end_page_idx += 1;
+ }
+ }
+
+ self.ranges.remove(i)?;
+ Ok(freed_range)
+ }
+
+ pub(crate) fn reservation_commit(&mut self, offset: usize, data: &mut Option<T>) -> Result {
+ // This could use a binary search, but linear scans are usually faster for small arrays.
+ let range = self
+ .ranges
+ .iter_mut()
+ .find(|range| range.offset == offset)
+ .ok_or(ENOENT)?;
+
+ let DescriptorState::Reserved(reservation) = &range.state else {
+ return Err(ENOENT);
+ };
+
+ range.state = DescriptorState::Allocated(reservation.clone().allocate(data.take()));
+ Ok(())
+ }
+
+ pub(crate) fn reserve_existing(&mut self, offset: usize) -> Result<(usize, usize, Option<T>)> {
+ // This could use a binary search, but linear scans are usually faster for small arrays.
+ let range = self
+ .ranges
+ .iter_mut()
+ .find(|range| range.offset == offset)
+ .ok_or(ENOENT)?;
+
+ let DescriptorState::Allocated(allocation) = &mut range.state else {
+ return Err(ENOENT);
+ };
+
+ let data = allocation.take();
+ let debug_id = allocation.reservation.debug_id;
+ range.state = DescriptorState::Reserved(allocation.reservation.clone());
+ Ok((range.size, debug_id, data))
+ }
+
+ pub(crate) fn take_for_each<F: Fn(usize, usize, usize, Option<T>)>(&mut self, callback: F) {
+ for range in self.ranges.iter_mut() {
+ if let DescriptorState::Allocated(allocation) = &mut range.state {
+ callback(
+ range.offset,
+ range.size,
+ allocation.reservation.debug_id,
+ allocation.data.take(),
+ );
+ }
+ }
+ }
+}
+
+pub(crate) struct EmptyArrayAlloc<T> {
+ ranges: KVec<Range<T>>,
+}
+
+impl<T> EmptyArrayAlloc<T> {
+ pub(crate) fn try_new(capacity: usize) -> Result<Self> {
+ Ok(Self {
+ ranges: KVec::with_capacity(capacity, GFP_KERNEL)?,
+ })
+ }
+}
diff --git a/drivers/android/binder/range_alloc/mod.rs b/drivers/android/binder/range_alloc/mod.rs
new file mode 100644
index 000000000000..2301e2bc1a1f
--- /dev/null
+++ b/drivers/android/binder/range_alloc/mod.rs
@@ -0,0 +1,329 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use kernel::{page::PAGE_SIZE, prelude::*, seq_file::SeqFile, task::Pid};
+
+mod tree;
+use self::tree::{FromArrayAllocs, ReserveNewTreeAlloc, TreeRangeAllocator};
+
+mod array;
+use self::array::{ArrayRangeAllocator, EmptyArrayAlloc};
+
+enum DescriptorState<T> {
+ Reserved(Reservation),
+ Allocated(Allocation<T>),
+}
+
+impl<T> DescriptorState<T> {
+ fn new(is_oneway: bool, debug_id: usize, pid: Pid) -> Self {
+ DescriptorState::Reserved(Reservation {
+ debug_id,
+ is_oneway,
+ pid,
+ })
+ }
+
+ fn pid(&self) -> Pid {
+ match self {
+ DescriptorState::Reserved(inner) => inner.pid,
+ DescriptorState::Allocated(inner) => inner.reservation.pid,
+ }
+ }
+
+ fn is_oneway(&self) -> bool {
+ match self {
+ DescriptorState::Reserved(inner) => inner.is_oneway,
+ DescriptorState::Allocated(inner) => inner.reservation.is_oneway,
+ }
+ }
+}
+
+#[derive(Clone)]
+struct Reservation {
+ debug_id: usize,
+ is_oneway: bool,
+ pid: Pid,
+}
+
+impl Reservation {
+ fn allocate<T>(self, data: Option<T>) -> Allocation<T> {
+ Allocation {
+ data,
+ reservation: self,
+ }
+ }
+}
+
+struct Allocation<T> {
+ reservation: Reservation,
+ data: Option<T>,
+}
+
+impl<T> Allocation<T> {
+ fn deallocate(self) -> (Reservation, Option<T>) {
+ (self.reservation, self.data)
+ }
+
+ fn debug_id(&self) -> usize {
+ self.reservation.debug_id
+ }
+
+ fn take(&mut self) -> Option<T> {
+ self.data.take()
+ }
+}
+
+/// The array implementation must switch to the tree if it wants to go beyond this number of
+/// ranges.
+const TREE_THRESHOLD: usize = 8;
+
+/// Represents a range of pages that have just become completely free.
+#[derive(Copy, Clone)]
+pub(crate) struct FreedRange {
+ pub(crate) start_page_idx: usize,
+ pub(crate) end_page_idx: usize,
+}
+
+impl FreedRange {
+ fn interior_pages(offset: usize, size: usize) -> FreedRange {
+ FreedRange {
+ // Divide round up
+ start_page_idx: offset.div_ceil(PAGE_SIZE),
+ // Divide round down
+ end_page_idx: (offset + size) / PAGE_SIZE,
+ }
+ }
+}
+
+struct Range<T> {
+ offset: usize,
+ size: usize,
+ state: DescriptorState<T>,
+}
+
+impl<T> Range<T> {
+ fn endpoint(&self) -> usize {
+ self.offset + self.size
+ }
+}
+
+pub(crate) struct RangeAllocator<T> {
+ inner: Impl<T>,
+}
+
+enum Impl<T> {
+ Empty(usize),
+ Array(ArrayRangeAllocator<T>),
+ Tree(TreeRangeAllocator<T>),
+}
+
+impl<T> RangeAllocator<T> {
+ pub(crate) fn new(size: usize) -> Self {
+ Self {
+ inner: Impl::Empty(size),
+ }
+ }
+
+ pub(crate) fn free_oneway_space(&self) -> usize {
+ match &self.inner {
+ Impl::Empty(size) => size / 2,
+ Impl::Array(array) => array.free_oneway_space(),
+ Impl::Tree(tree) => tree.free_oneway_space(),
+ }
+ }
+
+ pub(crate) fn count_buffers(&self) -> usize {
+ match &self.inner {
+ Impl::Empty(_size) => 0,
+ Impl::Array(array) => array.count_buffers(),
+ Impl::Tree(tree) => tree.count_buffers(),
+ }
+ }
+
+ pub(crate) fn debug_print(&self, m: &SeqFile) -> Result<()> {
+ match &self.inner {
+ Impl::Empty(_size) => Ok(()),
+ Impl::Array(array) => array.debug_print(m),
+ Impl::Tree(tree) => tree.debug_print(m),
+ }
+ }
+
+ /// Try to reserve a new buffer, using the provided allocation if necessary.
+ pub(crate) fn reserve_new(&mut self, mut args: ReserveNewArgs<T>) -> Result<ReserveNew<T>> {
+ match &mut self.inner {
+ Impl::Empty(size) => {
+ let empty_array = match args.empty_array_alloc.take() {
+ Some(empty_array) => ArrayRangeAllocator::new(*size, empty_array),
+ None => {
+ return Ok(ReserveNew::NeedAlloc(ReserveNewNeedAlloc {
+ args,
+ need_empty_array_alloc: true,
+ need_new_tree_alloc: false,
+ need_tree_alloc: false,
+ }))
+ }
+ };
+
+ self.inner = Impl::Array(empty_array);
+ self.reserve_new(args)
+ }
+ Impl::Array(array) if array.is_full() => {
+ let allocs = match args.new_tree_alloc {
+ Some(ref mut allocs) => allocs,
+ None => {
+ return Ok(ReserveNew::NeedAlloc(ReserveNewNeedAlloc {
+ args,
+ need_empty_array_alloc: false,
+ need_new_tree_alloc: true,
+ need_tree_alloc: true,
+ }))
+ }
+ };
+
+ let new_tree =
+ TreeRangeAllocator::from_array(array.total_size(), &mut array.ranges, allocs);
+
+ self.inner = Impl::Tree(new_tree);
+ self.reserve_new(args)
+ }
+ Impl::Array(array) => {
+ let offset =
+ array.reserve_new(args.debug_id, args.size, args.is_oneway, args.pid)?;
+ Ok(ReserveNew::Success(ReserveNewSuccess {
+ offset,
+ oneway_spam_detected: false,
+ _empty_array_alloc: args.empty_array_alloc,
+ _new_tree_alloc: args.new_tree_alloc,
+ _tree_alloc: args.tree_alloc,
+ }))
+ }
+ Impl::Tree(tree) => {
+ let alloc = match args.tree_alloc {
+ Some(alloc) => alloc,
+ None => {
+ return Ok(ReserveNew::NeedAlloc(ReserveNewNeedAlloc {
+ args,
+ need_empty_array_alloc: false,
+ need_new_tree_alloc: false,
+ need_tree_alloc: true,
+ }));
+ }
+ };
+ let (offset, oneway_spam_detected) =
+ tree.reserve_new(args.debug_id, args.size, args.is_oneway, args.pid, alloc)?;
+ Ok(ReserveNew::Success(ReserveNewSuccess {
+ offset,
+ oneway_spam_detected,
+ _empty_array_alloc: args.empty_array_alloc,
+ _new_tree_alloc: args.new_tree_alloc,
+ _tree_alloc: None,
+ }))
+ }
+ }
+ }
+
+ /// Deletes the allocations at `offset`.
+ pub(crate) fn reservation_abort(&mut self, offset: usize) -> Result<FreedRange> {
+ match &mut self.inner {
+ Impl::Empty(_size) => Err(EINVAL),
+ Impl::Array(array) => array.reservation_abort(offset),
+ Impl::Tree(tree) => {
+ let freed_range = tree.reservation_abort(offset)?;
+ if tree.is_empty() {
+ self.inner = Impl::Empty(tree.total_size());
+ }
+ Ok(freed_range)
+ }
+ }
+ }
+
+ /// Called when an allocation is no longer in use by the kernel.
+ ///
+ /// The value in `data` will be stored, if any. A mutable reference is used to avoid dropping
+ /// the `T` when an error is returned.
+ pub(crate) fn reservation_commit(&mut self, offset: usize, data: &mut Option<T>) -> Result {
+ match &mut self.inner {
+ Impl::Empty(_size) => Err(EINVAL),
+ Impl::Array(array) => array.reservation_commit(offset, data),
+ Impl::Tree(tree) => tree.reservation_commit(offset, data),
+ }
+ }
+
+ /// Called when the kernel starts using an allocation.
+ ///
+ /// Returns the size of the existing entry and the data associated with it.
+ pub(crate) fn reserve_existing(&mut self, offset: usize) -> Result<(usize, usize, Option<T>)> {
+ match &mut self.inner {
+ Impl::Empty(_size) => Err(EINVAL),
+ Impl::Array(array) => array.reserve_existing(offset),
+ Impl::Tree(tree) => tree.reserve_existing(offset),
+ }
+ }
+
+ /// Call the provided callback at every allocated region.
+ ///
+ /// This destroys the range allocator. Used only during shutdown.
+ pub(crate) fn take_for_each<F: Fn(usize, usize, usize, Option<T>)>(&mut self, callback: F) {
+ match &mut self.inner {
+ Impl::Empty(_size) => {}
+ Impl::Array(array) => array.take_for_each(callback),
+ Impl::Tree(tree) => tree.take_for_each(callback),
+ }
+ }
+}
+
+/// The arguments for `reserve_new`.
+#[derive(Default)]
+pub(crate) struct ReserveNewArgs<T> {
+ pub(crate) size: usize,
+ pub(crate) is_oneway: bool,
+ pub(crate) debug_id: usize,
+ pub(crate) pid: Pid,
+ pub(crate) empty_array_alloc: Option<EmptyArrayAlloc<T>>,
+ pub(crate) new_tree_alloc: Option<FromArrayAllocs<T>>,
+ pub(crate) tree_alloc: Option<ReserveNewTreeAlloc<T>>,
+}
+
+/// The return type of `ReserveNew`.
+pub(crate) enum ReserveNew<T> {
+ Success(ReserveNewSuccess<T>),
+ NeedAlloc(ReserveNewNeedAlloc<T>),
+}
+
+/// Returned by `reserve_new` when the reservation was successul.
+pub(crate) struct ReserveNewSuccess<T> {
+ pub(crate) offset: usize,
+ pub(crate) oneway_spam_detected: bool,
+
+ // If the user supplied an allocation that we did not end up using, then we return it here.
+ // The caller will kfree it outside of the lock.
+ _empty_array_alloc: Option<EmptyArrayAlloc<T>>,
+ _new_tree_alloc: Option<FromArrayAllocs<T>>,
+ _tree_alloc: Option<ReserveNewTreeAlloc<T>>,
+}
+
+/// Returned by `reserve_new` to request the caller to make an allocation before calling the method
+/// again.
+pub(crate) struct ReserveNewNeedAlloc<T> {
+ args: ReserveNewArgs<T>,
+ need_empty_array_alloc: bool,
+ need_new_tree_alloc: bool,
+ need_tree_alloc: bool,
+}
+
+impl<T> ReserveNewNeedAlloc<T> {
+ /// Make the necessary allocations for another call to `reserve_new`.
+ pub(crate) fn make_alloc(mut self) -> Result<ReserveNewArgs<T>> {
+ if self.need_empty_array_alloc && self.args.empty_array_alloc.is_none() {
+ self.args.empty_array_alloc = Some(EmptyArrayAlloc::try_new(TREE_THRESHOLD)?);
+ }
+ if self.need_new_tree_alloc && self.args.new_tree_alloc.is_none() {
+ self.args.new_tree_alloc = Some(FromArrayAllocs::try_new(TREE_THRESHOLD)?);
+ }
+ if self.need_tree_alloc && self.args.tree_alloc.is_none() {
+ self.args.tree_alloc = Some(ReserveNewTreeAlloc::try_new()?);
+ }
+ Ok(self.args)
+ }
+}
diff --git a/drivers/android/binder/range_alloc/tree.rs b/drivers/android/binder/range_alloc/tree.rs
new file mode 100644
index 000000000000..7b1a248fcb02
--- /dev/null
+++ b/drivers/android/binder/range_alloc/tree.rs
@@ -0,0 +1,488 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use kernel::{
+ page::PAGE_SIZE,
+ prelude::*,
+ rbtree::{RBTree, RBTreeNode, RBTreeNodeReservation},
+ seq_file::SeqFile,
+ seq_print,
+ task::Pid,
+};
+
+use crate::range_alloc::{DescriptorState, FreedRange, Range};
+
+/// Keeps track of allocations in a process' mmap.
+///
+/// Each process has an mmap where the data for incoming transactions will be placed. This struct
+/// keeps track of allocations made in the mmap. For each allocation, we store a descriptor that
+/// has metadata related to the allocation. We also keep track of available free space.
+pub(super) struct TreeRangeAllocator<T> {
+ /// This collection contains descriptors for *both* ranges containing an allocation, *and* free
+ /// ranges between allocations. The free ranges get merged, so there are never two free ranges
+ /// next to each other.
+ tree: RBTree<usize, Descriptor<T>>,
+ /// Contains an entry for every free range in `self.tree`. This tree sorts the ranges by size,
+ /// letting us look up the smallest range whose size is at least some lower bound.
+ free_tree: RBTree<FreeKey, ()>,
+ size: usize,
+ free_oneway_space: usize,
+}
+
+impl<T> TreeRangeAllocator<T> {
+ pub(crate) fn from_array(
+ size: usize,
+ ranges: &mut KVec<Range<T>>,
+ alloc: &mut FromArrayAllocs<T>,
+ ) -> Self {
+ let mut tree = TreeRangeAllocator {
+ tree: RBTree::new(),
+ free_tree: RBTree::new(),
+ size,
+ free_oneway_space: size / 2,
+ };
+
+ let mut free_offset = 0;
+ for range in ranges.drain_all() {
+ let free_size = range.offset - free_offset;
+ if free_size > 0 {
+ let free_node = alloc.free_tree.pop().unwrap();
+ tree.free_tree
+ .insert(free_node.into_node((free_size, free_offset), ()));
+ let tree_node = alloc.tree.pop().unwrap();
+ tree.tree.insert(
+ tree_node.into_node(free_offset, Descriptor::new(free_offset, free_size)),
+ );
+ }
+ free_offset = range.endpoint();
+
+ if range.state.is_oneway() {
+ tree.free_oneway_space = tree.free_oneway_space.saturating_sub(range.size);
+ }
+
+ let free_res = alloc.free_tree.pop().unwrap();
+ let tree_node = alloc.tree.pop().unwrap();
+ let mut desc = Descriptor::new(range.offset, range.size);
+ desc.state = Some((range.state, free_res));
+ tree.tree.insert(tree_node.into_node(range.offset, desc));
+ }
+
+ // After the last range, we may need a free range.
+ if free_offset < size {
+ let free_size = size - free_offset;
+ let free_node = alloc.free_tree.pop().unwrap();
+ tree.free_tree
+ .insert(free_node.into_node((free_size, free_offset), ()));
+ let tree_node = alloc.tree.pop().unwrap();
+ tree.tree
+ .insert(tree_node.into_node(free_offset, Descriptor::new(free_offset, free_size)));
+ }
+
+ tree
+ }
+
+ pub(crate) fn is_empty(&self) -> bool {
+ let mut tree_iter = self.tree.values();
+ // There's always at least one range, because index zero is either the start of a free or
+ // allocated range.
+ let first_value = tree_iter.next().unwrap();
+ if tree_iter.next().is_some() {
+ // There are never two free ranges next to each other, so if there is more than one
+ // descriptor, then at least one of them must hold an allocated range.
+ return false;
+ }
+ // There is only one descriptor. Return true if it is for a free range.
+ first_value.state.is_none()
+ }
+
+ pub(crate) fn total_size(&self) -> usize {
+ self.size
+ }
+
+ pub(crate) fn free_oneway_space(&self) -> usize {
+ self.free_oneway_space
+ }
+
+ pub(crate) fn count_buffers(&self) -> usize {
+ self.tree
+ .values()
+ .filter(|desc| desc.state.is_some())
+ .count()
+ }
+
+ pub(crate) fn debug_print(&self, m: &SeqFile) -> Result<()> {
+ for desc in self.tree.values() {
+ let state = match &desc.state {
+ Some(state) => &state.0,
+ None => continue,
+ };
+ seq_print!(
+ m,
+ " buffer: {} size {} pid {}",
+ desc.offset,
+ desc.size,
+ state.pid(),
+ );
+ if state.is_oneway() {
+ seq_print!(m, " oneway");
+ }
+ match state {
+ DescriptorState::Reserved(_res) => {
+ seq_print!(m, " reserved\n");
+ }
+ DescriptorState::Allocated(_alloc) => {
+ seq_print!(m, " allocated\n");
+ }
+ }
+ }
+ Ok(())
+ }
+
+ fn find_best_match(&mut self, size: usize) -> Option<&mut Descriptor<T>> {
+ let free_cursor = self.free_tree.cursor_lower_bound(&(size, 0))?;
+ let ((_, offset), ()) = free_cursor.current();
+ self.tree.get_mut(offset)
+ }
+
+ /// Try to reserve a new buffer, using the provided allocation if necessary.
+ pub(crate) fn reserve_new(
+ &mut self,
+ debug_id: usize,
+ size: usize,
+ is_oneway: bool,
+ pid: Pid,
+ alloc: ReserveNewTreeAlloc<T>,
+ ) -> Result<(usize, bool)> {
+ // Compute new value of free_oneway_space, which is set only on success.
+ let new_oneway_space = if is_oneway {
+ match self.free_oneway_space.checked_sub(size) {
+ Some(new_oneway_space) => new_oneway_space,
+ None => return Err(ENOSPC),
+ }
+ } else {
+ self.free_oneway_space
+ };
+
+ // Start detecting spammers once we have less than 20%
+ // of async space left (which is less than 10% of total
+ // buffer size).
+ //
+ // (This will short-circut, so `low_oneway_space` is
+ // only called when necessary.)
+ let oneway_spam_detected =
+ is_oneway && new_oneway_space < self.size / 10 && self.low_oneway_space(pid);
+
+ let (found_size, found_off, tree_node, free_tree_node) = match self.find_best_match(size) {
+ None => {
+ pr_warn!("ENOSPC from range_alloc.reserve_new - size: {}", size);
+ return Err(ENOSPC);
+ }
+ Some(desc) => {
+ let found_size = desc.size;
+ let found_offset = desc.offset;
+
+ // In case we need to break up the descriptor
+ let new_desc = Descriptor::new(found_offset + size, found_size - size);
+ let (tree_node, free_tree_node, desc_node_res) = alloc.initialize(new_desc);
+
+ desc.state = Some((
+ DescriptorState::new(is_oneway, debug_id, pid),
+ desc_node_res,
+ ));
+ desc.size = size;
+
+ (found_size, found_offset, tree_node, free_tree_node)
+ }
+ };
+ self.free_oneway_space = new_oneway_space;
+ self.free_tree.remove(&(found_size, found_off));
+
+ if found_size != size {
+ self.tree.insert(tree_node);
+ self.free_tree.insert(free_tree_node);
+ }
+
+ Ok((found_off, oneway_spam_detected))
+ }
+
+ pub(crate) fn reservation_abort(&mut self, offset: usize) -> Result<FreedRange> {
+ let mut cursor = self.tree.cursor_lower_bound(&offset).ok_or_else(|| {
+ pr_warn!(
+ "EINVAL from range_alloc.reservation_abort - offset: {}",
+ offset
+ );
+ EINVAL
+ })?;
+
+ let (_, desc) = cursor.current_mut();
+
+ if desc.offset != offset {
+ pr_warn!(
+ "EINVAL from range_alloc.reservation_abort - offset: {}",
+ offset
+ );
+ return Err(EINVAL);
+ }
+
+ let (reservation, free_node_res) = desc.try_change_state(|state| match state {
+ Some((DescriptorState::Reserved(reservation), free_node_res)) => {
+ (None, Ok((reservation, free_node_res)))
+ }
+ None => {
+ pr_warn!(
+ "EINVAL from range_alloc.reservation_abort - offset: {}",
+ offset
+ );
+ (None, Err(EINVAL))
+ }
+ allocated => {
+ pr_warn!(
+ "EPERM from range_alloc.reservation_abort - offset: {}",
+ offset
+ );
+ (allocated, Err(EPERM))
+ }
+ })?;
+
+ let mut size = desc.size;
+ let mut offset = desc.offset;
+ let free_oneway_space_add = if reservation.is_oneway { size } else { 0 };
+
+ self.free_oneway_space += free_oneway_space_add;
+
+ let mut freed_range = FreedRange::interior_pages(offset, size);
+ // Compute how large the next free region needs to be to include one more page in
+ // the newly freed range.
+ let add_next_page_needed = match (offset + size) % PAGE_SIZE {
+ 0 => usize::MAX,
+ unalign => PAGE_SIZE - unalign,
+ };
+ // Compute how large the previous free region needs to be to include one more page
+ // in the newly freed range.
+ let add_prev_page_needed = match offset % PAGE_SIZE {
+ 0 => usize::MAX,
+ unalign => unalign,
+ };
+
+ // Merge next into current if next is free
+ let remove_next = match cursor.peek_next() {
+ Some((_, next)) if next.state.is_none() => {
+ if next.size >= add_next_page_needed {
+ freed_range.end_page_idx += 1;
+ }
+ self.free_tree.remove(&(next.size, next.offset));
+ size += next.size;
+ true
+ }
+ _ => false,
+ };
+
+ if remove_next {
+ let (_, desc) = cursor.current_mut();
+ desc.size = size;
+ cursor.remove_next();
+ }
+
+ // Merge current into prev if prev is free
+ match cursor.peek_prev_mut() {
+ Some((_, prev)) if prev.state.is_none() => {
+ if prev.size >= add_prev_page_needed {
+ freed_range.start_page_idx -= 1;
+ }
+ // merge previous with current, remove current
+ self.free_tree.remove(&(prev.size, prev.offset));
+ offset = prev.offset;
+ size += prev.size;
+ prev.size = size;
+ cursor.remove_current();
+ }
+ _ => {}
+ };
+
+ self.free_tree
+ .insert(free_node_res.into_node((size, offset), ()));
+
+ Ok(freed_range)
+ }
+
+ pub(crate) fn reservation_commit(&mut self, offset: usize, data: &mut Option<T>) -> Result {
+ let desc = self.tree.get_mut(&offset).ok_or(ENOENT)?;
+
+ desc.try_change_state(|state| match state {
+ Some((DescriptorState::Reserved(reservation), free_node_res)) => (
+ Some((
+ DescriptorState::Allocated(reservation.allocate(data.take())),
+ free_node_res,
+ )),
+ Ok(()),
+ ),
+ other => (other, Err(ENOENT)),
+ })
+ }
+
+ /// Takes an entry at the given offset from [`DescriptorState::Allocated`] to
+ /// [`DescriptorState::Reserved`].
+ ///
+ /// Returns the size of the existing entry and the data associated with it.
+ pub(crate) fn reserve_existing(&mut self, offset: usize) -> Result<(usize, usize, Option<T>)> {
+ let desc = self.tree.get_mut(&offset).ok_or_else(|| {
+ pr_warn!(
+ "ENOENT from range_alloc.reserve_existing - offset: {}",
+ offset
+ );
+ ENOENT
+ })?;
+
+ let (debug_id, data) = desc.try_change_state(|state| match state {
+ Some((DescriptorState::Allocated(allocation), free_node_res)) => {
+ let (reservation, data) = allocation.deallocate();
+ let debug_id = reservation.debug_id;
+ (
+ Some((DescriptorState::Reserved(reservation), free_node_res)),
+ Ok((debug_id, data)),
+ )
+ }
+ other => {
+ pr_warn!(
+ "ENOENT from range_alloc.reserve_existing - offset: {}",
+ offset
+ );
+ (other, Err(ENOENT))
+ }
+ })?;
+
+ Ok((desc.size, debug_id, data))
+ }
+
+ /// Call the provided callback at every allocated region.
+ ///
+ /// This destroys the range allocator. Used only during shutdown.
+ pub(crate) fn take_for_each<F: Fn(usize, usize, usize, Option<T>)>(&mut self, callback: F) {
+ for (_, desc) in self.tree.iter_mut() {
+ if let Some((DescriptorState::Allocated(allocation), _)) = &mut desc.state {
+ callback(
+ desc.offset,
+ desc.size,
+ allocation.debug_id(),
+ allocation.take(),
+ );
+ }
+ }
+ }
+
+ /// Find the amount and size of buffers allocated by the current caller.
+ ///
+ /// The idea is that once we cross the threshold, whoever is responsible
+ /// for the low async space is likely to try to send another async transaction,
+ /// and at some point we'll catch them in the act. This is more efficient
+ /// than keeping a map per pid.
+ fn low_oneway_space(&self, calling_pid: Pid) -> bool {
+ let mut total_alloc_size = 0;
+ let mut num_buffers = 0;
+ for (_, desc) in self.tree.iter() {
+ if let Some((state, _)) = &desc.state {
+ if state.is_oneway() && state.pid() == calling_pid {
+ total_alloc_size += desc.size;
+ num_buffers += 1;
+ }
+ }
+ }
+
+ // Warn if this pid has more than 50 transactions, or more than 50% of
+ // async space (which is 25% of total buffer size). Oneway spam is only
+ // detected when the threshold is exceeded.
+ num_buffers > 50 || total_alloc_size > self.size / 4
+ }
+}
+
+type TreeDescriptorState<T> = (DescriptorState<T>, FreeNodeRes);
+struct Descriptor<T> {
+ size: usize,
+ offset: usize,
+ state: Option<TreeDescriptorState<T>>,
+}
+
+impl<T> Descriptor<T> {
+ fn new(offset: usize, size: usize) -> Self {
+ Self {
+ size,
+ offset,
+ state: None,
+ }
+ }
+
+ fn try_change_state<F, Data>(&mut self, f: F) -> Result<Data>
+ where
+ F: FnOnce(Option<TreeDescriptorState<T>>) -> (Option<TreeDescriptorState<T>>, Result<Data>),
+ {
+ let (new_state, result) = f(self.state.take());
+ self.state = new_state;
+ result
+ }
+}
+
+// (Descriptor.size, Descriptor.offset)
+type FreeKey = (usize, usize);
+type FreeNodeRes = RBTreeNodeReservation<FreeKey, ()>;
+
+/// An allocation for use by `reserve_new`.
+pub(crate) struct ReserveNewTreeAlloc<T> {
+ tree_node_res: RBTreeNodeReservation<usize, Descriptor<T>>,
+ free_tree_node_res: FreeNodeRes,
+ desc_node_res: FreeNodeRes,
+}
+
+impl<T> ReserveNewTreeAlloc<T> {
+ pub(crate) fn try_new() -> Result<Self> {
+ let tree_node_res = RBTreeNodeReservation::new(GFP_KERNEL)?;
+ let free_tree_node_res = RBTreeNodeReservation::new(GFP_KERNEL)?;
+ let desc_node_res = RBTreeNodeReservation::new(GFP_KERNEL)?;
+ Ok(Self {
+ tree_node_res,
+ free_tree_node_res,
+ desc_node_res,
+ })
+ }
+
+ fn initialize(
+ self,
+ desc: Descriptor<T>,
+ ) -> (
+ RBTreeNode<usize, Descriptor<T>>,
+ RBTreeNode<FreeKey, ()>,
+ FreeNodeRes,
+ ) {
+ let size = desc.size;
+ let offset = desc.offset;
+ (
+ self.tree_node_res.into_node(offset, desc),
+ self.free_tree_node_res.into_node((size, offset), ()),
+ self.desc_node_res,
+ )
+ }
+}
+
+/// An allocation for creating a tree from an `ArrayRangeAllocator`.
+pub(crate) struct FromArrayAllocs<T> {
+ tree: KVec<RBTreeNodeReservation<usize, Descriptor<T>>>,
+ free_tree: KVec<RBTreeNodeReservation<FreeKey, ()>>,
+}
+
+impl<T> FromArrayAllocs<T> {
+ pub(crate) fn try_new(len: usize) -> Result<Self> {
+ let num_descriptors = 2 * len + 1;
+
+ let mut tree = KVec::with_capacity(num_descriptors, GFP_KERNEL)?;
+ for _ in 0..num_descriptors {
+ tree.push(RBTreeNodeReservation::new(GFP_KERNEL)?, GFP_KERNEL)?;
+ }
+
+ let mut free_tree = KVec::with_capacity(num_descriptors, GFP_KERNEL)?;
+ for _ in 0..num_descriptors {
+ free_tree.push(RBTreeNodeReservation::new(GFP_KERNEL)?, GFP_KERNEL)?;
+ }
+
+ Ok(Self { tree, free_tree })
+ }
+}
diff --git a/drivers/android/binder/rust_binder.h b/drivers/android/binder/rust_binder.h
new file mode 100644
index 000000000000..31806890ed1a
--- /dev/null
+++ b/drivers/android/binder/rust_binder.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2025 Google, Inc.
+ */
+
+#ifndef _LINUX_RUST_BINDER_H
+#define _LINUX_RUST_BINDER_H
+
+#include <uapi/linux/android/binder.h>
+#include <uapi/linux/android/binderfs.h>
+
+/*
+ * These symbols are exposed by `rust_binderfs.c` and exist here so that Rust
+ * Binder can call them.
+ */
+int init_rust_binderfs(void);
+
+struct dentry;
+struct inode;
+struct dentry *rust_binderfs_create_proc_file(struct inode *nodp, int pid);
+void rust_binderfs_remove_file(struct dentry *dentry);
+
+#endif
diff --git a/drivers/android/binder/rust_binder_events.c b/drivers/android/binder/rust_binder_events.c
new file mode 100644
index 000000000000..488b1470060c
--- /dev/null
+++ b/drivers/android/binder/rust_binder_events.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* rust_binder_events.c
+ *
+ * Rust Binder tracepoints.
+ *
+ * Copyright 2025 Google LLC
+ */
+
+#include "rust_binder.h"
+
+const char * const binder_command_strings[] = {
+ "BC_TRANSACTION",
+ "BC_REPLY",
+ "BC_ACQUIRE_RESULT",
+ "BC_FREE_BUFFER",
+ "BC_INCREFS",
+ "BC_ACQUIRE",
+ "BC_RELEASE",
+ "BC_DECREFS",
+ "BC_INCREFS_DONE",
+ "BC_ACQUIRE_DONE",
+ "BC_ATTEMPT_ACQUIRE",
+ "BC_REGISTER_LOOPER",
+ "BC_ENTER_LOOPER",
+ "BC_EXIT_LOOPER",
+ "BC_REQUEST_DEATH_NOTIFICATION",
+ "BC_CLEAR_DEATH_NOTIFICATION",
+ "BC_DEAD_BINDER_DONE",
+ "BC_TRANSACTION_SG",
+ "BC_REPLY_SG",
+};
+
+const char * const binder_return_strings[] = {
+ "BR_ERROR",
+ "BR_OK",
+ "BR_TRANSACTION",
+ "BR_REPLY",
+ "BR_ACQUIRE_RESULT",
+ "BR_DEAD_REPLY",
+ "BR_TRANSACTION_COMPLETE",
+ "BR_INCREFS",
+ "BR_ACQUIRE",
+ "BR_RELEASE",
+ "BR_DECREFS",
+ "BR_ATTEMPT_ACQUIRE",
+ "BR_NOOP",
+ "BR_SPAWN_LOOPER",
+ "BR_FINISHED",
+ "BR_DEAD_BINDER",
+ "BR_CLEAR_DEATH_NOTIFICATION_DONE",
+ "BR_FAILED_REPLY",
+ "BR_FROZEN_REPLY",
+ "BR_ONEWAY_SPAM_SUSPECT",
+ "BR_TRANSACTION_PENDING_FROZEN"
+};
+
+#define CREATE_TRACE_POINTS
+#define CREATE_RUST_TRACE_POINTS
+#include "rust_binder_events.h"
diff --git a/drivers/android/binder/rust_binder_events.h b/drivers/android/binder/rust_binder_events.h
new file mode 100644
index 000000000000..2f3efbf9dba6
--- /dev/null
+++ b/drivers/android/binder/rust_binder_events.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2025 Google, Inc.
+ */
+
+#undef TRACE_SYSTEM
+#undef TRACE_INCLUDE_FILE
+#undef TRACE_INCLUDE_PATH
+#define TRACE_SYSTEM rust_binder
+#define TRACE_INCLUDE_FILE rust_binder_events
+#define TRACE_INCLUDE_PATH ../drivers/android/binder
+
+#if !defined(_RUST_BINDER_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _RUST_BINDER_TRACE_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(rust_binder_ioctl,
+ TP_PROTO(unsigned int cmd, unsigned long arg),
+ TP_ARGS(cmd, arg),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, cmd)
+ __field(unsigned long, arg)
+ ),
+ TP_fast_assign(
+ __entry->cmd = cmd;
+ __entry->arg = arg;
+ ),
+ TP_printk("cmd=0x%x arg=0x%lx", __entry->cmd, __entry->arg)
+);
+
+#endif /* _RUST_BINDER_TRACE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/android/binder/rust_binder_internal.h b/drivers/android/binder/rust_binder_internal.h
new file mode 100644
index 000000000000..78288fe7964d
--- /dev/null
+++ b/drivers/android/binder/rust_binder_internal.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* rust_binder_internal.h
+ *
+ * This file contains internal data structures used by Rust Binder. Mostly,
+ * these are type definitions used only by binderfs or things that Rust Binder
+ * define and export to binderfs.
+ *
+ * It does not include things exported by binderfs to Rust Binder since this
+ * file is not included as input to bindgen.
+ *
+ * Copyright (C) 2025 Google LLC.
+ */
+
+#ifndef _LINUX_RUST_BINDER_INTERNAL_H
+#define _LINUX_RUST_BINDER_INTERNAL_H
+
+#define RUST_BINDERFS_SUPER_MAGIC 0x6c6f6f71
+
+#include <linux/seq_file.h>
+#include <uapi/linux/android/binder.h>
+#include <uapi/linux/android/binderfs.h>
+
+/*
+ * The internal data types in the Rust Binder driver are opaque to C, so we use
+ * void pointer typedefs for these types.
+ */
+typedef void *rust_binder_context;
+
+/**
+ * struct binder_device - information about a binder device node
+ * @minor: the minor number used by this device
+ * @ctx: the Rust Context used by this device, or null for binder-control
+ *
+ * This is used as the private data for files directly in binderfs, but not
+ * files in the binder_logs subdirectory. This struct owns a refcount on `ctx`
+ * and the entry for `minor` in `binderfs_minors`. For binder-control `ctx` is
+ * null.
+ */
+struct binder_device {
+ int minor;
+ rust_binder_context ctx;
+};
+
+int rust_binder_stats_show(struct seq_file *m, void *unused);
+int rust_binder_state_show(struct seq_file *m, void *unused);
+int rust_binder_transactions_show(struct seq_file *m, void *unused);
+int rust_binder_proc_show(struct seq_file *m, void *pid);
+
+extern const struct file_operations rust_binder_fops;
+rust_binder_context rust_binder_new_context(char *name);
+void rust_binder_remove_context(rust_binder_context device);
+
+/**
+ * binderfs_mount_opts - mount options for binderfs
+ * @max: maximum number of allocatable binderfs binder devices
+ * @stats_mode: enable binder stats in binderfs.
+ */
+struct binderfs_mount_opts {
+ int max;
+ int stats_mode;
+};
+
+/**
+ * binderfs_info - information about a binderfs mount
+ * @ipc_ns: The ipc namespace the binderfs mount belongs to.
+ * @control_dentry: This records the dentry of this binderfs mount
+ * binder-control device.
+ * @root_uid: uid that needs to be used when a new binder device is
+ * created.
+ * @root_gid: gid that needs to be used when a new binder device is
+ * created.
+ * @mount_opts: The mount options in use.
+ * @device_count: The current number of allocated binder devices.
+ * @proc_log_dir: Pointer to the directory dentry containing process-specific
+ * logs.
+ */
+struct binderfs_info {
+ struct ipc_namespace *ipc_ns;
+ struct dentry *control_dentry;
+ kuid_t root_uid;
+ kgid_t root_gid;
+ struct binderfs_mount_opts mount_opts;
+ int device_count;
+ struct dentry *proc_log_dir;
+};
+
+#endif /* _LINUX_RUST_BINDER_INTERNAL_H */
diff --git a/drivers/android/binder/rust_binder_main.rs b/drivers/android/binder/rust_binder_main.rs
new file mode 100644
index 000000000000..6773b7c273ec
--- /dev/null
+++ b/drivers/android/binder/rust_binder_main.rs
@@ -0,0 +1,627 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+//! Binder -- the Android IPC mechanism.
+#![recursion_limit = "256"]
+#![allow(
+ clippy::as_underscore,
+ clippy::ref_as_ptr,
+ clippy::ptr_as_ptr,
+ clippy::cast_lossless
+)]
+
+use kernel::{
+ bindings::{self, seq_file},
+ fs::File,
+ list::{ListArc, ListArcSafe, ListLinksSelfPtr, TryNewListArc},
+ prelude::*,
+ seq_file::SeqFile,
+ seq_print,
+ sync::poll::PollTable,
+ sync::Arc,
+ task::Pid,
+ transmute::AsBytes,
+ types::ForeignOwnable,
+ uaccess::UserSliceWriter,
+};
+
+use crate::{context::Context, page_range::Shrinker, process::Process, thread::Thread};
+
+use core::{
+ ptr::NonNull,
+ sync::atomic::{AtomicBool, AtomicUsize, Ordering},
+};
+
+mod allocation;
+mod context;
+mod deferred_close;
+mod defs;
+mod error;
+mod node;
+mod page_range;
+mod process;
+mod range_alloc;
+mod stats;
+mod thread;
+mod trace;
+mod transaction;
+
+#[allow(warnings)] // generated bindgen code
+mod binderfs {
+ use kernel::bindings::{dentry, inode};
+
+ extern "C" {
+ pub fn init_rust_binderfs() -> kernel::ffi::c_int;
+ }
+ extern "C" {
+ pub fn rust_binderfs_create_proc_file(
+ nodp: *mut inode,
+ pid: kernel::ffi::c_int,
+ ) -> *mut dentry;
+ }
+ extern "C" {
+ pub fn rust_binderfs_remove_file(dentry: *mut dentry);
+ }
+ pub type rust_binder_context = *mut kernel::ffi::c_void;
+ #[repr(C)]
+ #[derive(Copy, Clone)]
+ pub struct binder_device {
+ pub minor: kernel::ffi::c_int,
+ pub ctx: rust_binder_context,
+ }
+ impl Default for binder_device {
+ fn default() -> Self {
+ let mut s = ::core::mem::MaybeUninit::<Self>::uninit();
+ unsafe {
+ ::core::ptr::write_bytes(s.as_mut_ptr(), 0, 1);
+ s.assume_init()
+ }
+ }
+ }
+}
+
+module! {
+ type: BinderModule,
+ name: "rust_binder",
+ authors: ["Wedson Almeida Filho", "Alice Ryhl"],
+ description: "Android Binder",
+ license: "GPL",
+}
+
+fn next_debug_id() -> usize {
+ static NEXT_DEBUG_ID: AtomicUsize = AtomicUsize::new(0);
+
+ NEXT_DEBUG_ID.fetch_add(1, Ordering::Relaxed)
+}
+
+/// Provides a single place to write Binder return values via the
+/// supplied `UserSliceWriter`.
+pub(crate) struct BinderReturnWriter<'a> {
+ writer: UserSliceWriter,
+ thread: &'a Thread,
+}
+
+impl<'a> BinderReturnWriter<'a> {
+ fn new(writer: UserSliceWriter, thread: &'a Thread) -> Self {
+ BinderReturnWriter { writer, thread }
+ }
+
+ /// Write a return code back to user space.
+ /// Should be a `BR_` constant from [`defs`] e.g. [`defs::BR_TRANSACTION_COMPLETE`].
+ fn write_code(&mut self, code: u32) -> Result {
+ stats::GLOBAL_STATS.inc_br(code);
+ self.thread.process.stats.inc_br(code);
+ self.writer.write(&code)
+ }
+
+ /// Write something *other than* a return code to user space.
+ fn write_payload<T: AsBytes>(&mut self, payload: &T) -> Result {
+ self.writer.write(payload)
+ }
+
+ fn len(&self) -> usize {
+ self.writer.len()
+ }
+}
+
+/// Specifies how a type should be delivered to the read part of a BINDER_WRITE_READ ioctl.
+///
+/// When a value is pushed to the todo list for a process or thread, it is stored as a trait object
+/// with the type `Arc<dyn DeliverToRead>`. Trait objects are a Rust feature that lets you
+/// implement dynamic dispatch over many different types. This lets us store many different types
+/// in the todo list.
+trait DeliverToRead: ListArcSafe + Send + Sync {
+ /// Performs work. Returns true if remaining work items in the queue should be processed
+ /// immediately, or false if it should return to caller before processing additional work
+ /// items.
+ fn do_work(
+ self: DArc<Self>,
+ thread: &Thread,
+ writer: &mut BinderReturnWriter<'_>,
+ ) -> Result<bool>;
+
+ /// Cancels the given work item. This is called instead of [`DeliverToRead::do_work`] when work
+ /// won't be delivered.
+ fn cancel(self: DArc<Self>);
+
+ /// Should we use `wake_up_interruptible_sync` or `wake_up_interruptible` when scheduling this
+ /// work item?
+ ///
+ /// Generally only set to true for non-oneway transactions.
+ fn should_sync_wakeup(&self) -> bool;
+
+ fn debug_print(&self, m: &SeqFile, prefix: &str, transaction_prefix: &str) -> Result<()>;
+}
+
+// Wrapper around a `DeliverToRead` with linked list links.
+#[pin_data]
+struct DTRWrap<T: ?Sized> {
+ #[pin]
+ links: ListLinksSelfPtr<DTRWrap<dyn DeliverToRead>>,
+ #[pin]
+ wrapped: T,
+}
+kernel::list::impl_list_arc_safe! {
+ impl{T: ListArcSafe + ?Sized} ListArcSafe<0> for DTRWrap<T> {
+ tracked_by wrapped: T;
+ }
+}
+kernel::list::impl_list_item! {
+ impl ListItem<0> for DTRWrap<dyn DeliverToRead> {
+ using ListLinksSelfPtr { self.links };
+ }
+}
+
+impl<T: ?Sized> core::ops::Deref for DTRWrap<T> {
+ type Target = T;
+ fn deref(&self) -> &T {
+ &self.wrapped
+ }
+}
+
+type DArc<T> = kernel::sync::Arc<DTRWrap<T>>;
+type DLArc<T> = kernel::list::ListArc<DTRWrap<T>>;
+
+impl<T: ListArcSafe> DTRWrap<T> {
+ fn new(val: impl PinInit<T>) -> impl PinInit<Self> {
+ pin_init!(Self {
+ links <- ListLinksSelfPtr::new(),
+ wrapped <- val,
+ })
+ }
+
+ fn arc_try_new(val: T) -> Result<DLArc<T>, kernel::alloc::AllocError> {
+ ListArc::pin_init(
+ try_pin_init!(Self {
+ links <- ListLinksSelfPtr::new(),
+ wrapped: val,
+ }),
+ GFP_KERNEL,
+ )
+ .map_err(|_| kernel::alloc::AllocError)
+ }
+
+ fn arc_pin_init(init: impl PinInit<T>) -> Result<DLArc<T>, kernel::error::Error> {
+ ListArc::pin_init(
+ try_pin_init!(Self {
+ links <- ListLinksSelfPtr::new(),
+ wrapped <- init,
+ }),
+ GFP_KERNEL,
+ )
+ }
+}
+
+struct DeliverCode {
+ code: u32,
+ skip: AtomicBool,
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<0> for DeliverCode { untracked; }
+}
+
+impl DeliverCode {
+ fn new(code: u32) -> Self {
+ Self {
+ code,
+ skip: AtomicBool::new(false),
+ }
+ }
+
+ /// Disable this DeliverCode and make it do nothing.
+ ///
+ /// This is used instead of removing it from the work list, since `LinkedList::remove` is
+ /// unsafe, whereas this method is not.
+ fn skip(&self) {
+ self.skip.store(true, Ordering::Relaxed);
+ }
+}
+
+impl DeliverToRead for DeliverCode {
+ fn do_work(
+ self: DArc<Self>,
+ _thread: &Thread,
+ writer: &mut BinderReturnWriter<'_>,
+ ) -> Result<bool> {
+ if !self.skip.load(Ordering::Relaxed) {
+ writer.write_code(self.code)?;
+ }
+ Ok(true)
+ }
+
+ fn cancel(self: DArc<Self>) {}
+
+ fn should_sync_wakeup(&self) -> bool {
+ false
+ }
+
+ fn debug_print(&self, m: &SeqFile, prefix: &str, _tprefix: &str) -> Result<()> {
+ seq_print!(m, "{}", prefix);
+ if self.skip.load(Ordering::Relaxed) {
+ seq_print!(m, "(skipped) ");
+ }
+ if self.code == defs::BR_TRANSACTION_COMPLETE {
+ seq_print!(m, "transaction complete\n");
+ } else {
+ seq_print!(m, "transaction error: {}\n", self.code);
+ }
+ Ok(())
+ }
+}
+
+fn ptr_align(value: usize) -> Option<usize> {
+ let size = core::mem::size_of::<usize>() - 1;
+ Some(value.checked_add(size)? & !size)
+}
+
+// SAFETY: We call register in `init`.
+static BINDER_SHRINKER: Shrinker = unsafe { Shrinker::new() };
+
+struct BinderModule {}
+
+impl kernel::Module for BinderModule {
+ fn init(_module: &'static kernel::ThisModule) -> Result<Self> {
+ // SAFETY: The module initializer never runs twice, so we only call this once.
+ unsafe { crate::context::CONTEXTS.init() };
+
+ pr_warn!("Loaded Rust Binder.");
+
+ BINDER_SHRINKER.register(kernel::c_str!("android-binder"))?;
+
+ // SAFETY: The module is being loaded, so we can initialize binderfs.
+ unsafe { kernel::error::to_result(binderfs::init_rust_binderfs())? };
+
+ Ok(Self {})
+ }
+}
+
+/// Makes the inner type Sync.
+#[repr(transparent)]
+pub struct AssertSync<T>(T);
+// SAFETY: Used only to insert `file_operations` into a global, which is safe.
+unsafe impl<T> Sync for AssertSync<T> {}
+
+/// File operations that rust_binderfs.c can use.
+#[no_mangle]
+#[used]
+pub static rust_binder_fops: AssertSync<kernel::bindings::file_operations> = {
+ // SAFETY: All zeroes is safe for the `file_operations` type.
+ let zeroed_ops = unsafe { core::mem::MaybeUninit::zeroed().assume_init() };
+
+ let ops = kernel::bindings::file_operations {
+ owner: THIS_MODULE.as_ptr(),
+ poll: Some(rust_binder_poll),
+ unlocked_ioctl: Some(rust_binder_unlocked_ioctl),
+ compat_ioctl: Some(rust_binder_compat_ioctl),
+ mmap: Some(rust_binder_mmap),
+ open: Some(rust_binder_open),
+ release: Some(rust_binder_release),
+ flush: Some(rust_binder_flush),
+ ..zeroed_ops
+ };
+ AssertSync(ops)
+};
+
+/// # Safety
+/// Only called by binderfs.
+#[no_mangle]
+unsafe extern "C" fn rust_binder_new_context(
+ name: *const kernel::ffi::c_char,
+) -> *mut kernel::ffi::c_void {
+ // SAFETY: The caller will always provide a valid c string here.
+ let name = unsafe { kernel::str::CStr::from_char_ptr(name) };
+ match Context::new(name) {
+ Ok(ctx) => Arc::into_foreign(ctx),
+ Err(_err) => core::ptr::null_mut(),
+ }
+}
+
+/// # Safety
+/// Only called by binderfs.
+#[no_mangle]
+unsafe extern "C" fn rust_binder_remove_context(device: *mut kernel::ffi::c_void) {
+ if !device.is_null() {
+ // SAFETY: The caller ensures that the `device` pointer came from a previous call to
+ // `rust_binder_new_device`.
+ let ctx = unsafe { Arc::<Context>::from_foreign(device) };
+ ctx.deregister();
+ drop(ctx);
+ }
+}
+
+/// # Safety
+/// Only called by binderfs.
+unsafe extern "C" fn rust_binder_open(
+ inode: *mut bindings::inode,
+ file_ptr: *mut bindings::file,
+) -> kernel::ffi::c_int {
+ // SAFETY: The `rust_binderfs.c` file ensures that `i_private` is set to a
+ // `struct binder_device`.
+ let device = unsafe { (*inode).i_private } as *const binderfs::binder_device;
+
+ assert!(!device.is_null());
+
+ // SAFETY: The `rust_binderfs.c` file ensures that `device->ctx` holds a binder context when
+ // using the rust binder fops.
+ let ctx = unsafe { Arc::<Context>::borrow((*device).ctx) };
+
+ // SAFETY: The caller provides a valid file pointer to a new `struct file`.
+ let file = unsafe { File::from_raw_file(file_ptr) };
+ let process = match Process::open(ctx, file) {
+ Ok(process) => process,
+ Err(err) => return err.to_errno(),
+ };
+
+ // SAFETY: This is an `inode` for a newly created binder file.
+ match unsafe { BinderfsProcFile::new(inode, process.task.pid()) } {
+ Ok(Some(file)) => process.inner.lock().binderfs_file = Some(file),
+ Ok(None) => { /* pid already exists */ }
+ Err(err) => return err.to_errno(),
+ }
+
+ // SAFETY: This file is associated with Rust binder, so we own the `private_data` field.
+ unsafe { (*file_ptr).private_data = process.into_foreign() };
+ 0
+}
+
+/// # Safety
+/// Only called by binderfs.
+unsafe extern "C" fn rust_binder_release(
+ _inode: *mut bindings::inode,
+ file: *mut bindings::file,
+) -> kernel::ffi::c_int {
+ // SAFETY: We previously set `private_data` in `rust_binder_open`.
+ let process = unsafe { Arc::<Process>::from_foreign((*file).private_data) };
+ // SAFETY: The caller ensures that the file is valid.
+ let file = unsafe { File::from_raw_file(file) };
+ Process::release(process, file);
+ 0
+}
+
+/// # Safety
+/// Only called by binderfs.
+unsafe extern "C" fn rust_binder_compat_ioctl(
+ file: *mut bindings::file,
+ cmd: kernel::ffi::c_uint,
+ arg: kernel::ffi::c_ulong,
+) -> kernel::ffi::c_long {
+ // SAFETY: We previously set `private_data` in `rust_binder_open`.
+ let f = unsafe { Arc::<Process>::borrow((*file).private_data) };
+ // SAFETY: The caller ensures that the file is valid.
+ match Process::compat_ioctl(f, unsafe { File::from_raw_file(file) }, cmd as _, arg as _) {
+ Ok(()) => 0,
+ Err(err) => err.to_errno() as isize,
+ }
+}
+
+/// # Safety
+/// Only called by binderfs.
+unsafe extern "C" fn rust_binder_unlocked_ioctl(
+ file: *mut bindings::file,
+ cmd: kernel::ffi::c_uint,
+ arg: kernel::ffi::c_ulong,
+) -> kernel::ffi::c_long {
+ // SAFETY: We previously set `private_data` in `rust_binder_open`.
+ let f = unsafe { Arc::<Process>::borrow((*file).private_data) };
+ // SAFETY: The caller ensures that the file is valid.
+ match Process::ioctl(f, unsafe { File::from_raw_file(file) }, cmd as _, arg as _) {
+ Ok(()) => 0,
+ Err(err) => err.to_errno() as isize,
+ }
+}
+
+/// # Safety
+/// Only called by binderfs.
+unsafe extern "C" fn rust_binder_mmap(
+ file: *mut bindings::file,
+ vma: *mut bindings::vm_area_struct,
+) -> kernel::ffi::c_int {
+ // SAFETY: We previously set `private_data` in `rust_binder_open`.
+ let f = unsafe { Arc::<Process>::borrow((*file).private_data) };
+ // SAFETY: The caller ensures that the vma is valid.
+ let area = unsafe { kernel::mm::virt::VmaNew::from_raw(vma) };
+ // SAFETY: The caller ensures that the file is valid.
+ match Process::mmap(f, unsafe { File::from_raw_file(file) }, area) {
+ Ok(()) => 0,
+ Err(err) => err.to_errno(),
+ }
+}
+
+/// # Safety
+/// Only called by binderfs.
+unsafe extern "C" fn rust_binder_poll(
+ file: *mut bindings::file,
+ wait: *mut bindings::poll_table_struct,
+) -> bindings::__poll_t {
+ // SAFETY: We previously set `private_data` in `rust_binder_open`.
+ let f = unsafe { Arc::<Process>::borrow((*file).private_data) };
+ // SAFETY: The caller ensures that the file is valid.
+ let fileref = unsafe { File::from_raw_file(file) };
+ // SAFETY: The caller ensures that the `PollTable` is valid.
+ match Process::poll(f, fileref, unsafe { PollTable::from_raw(wait) }) {
+ Ok(v) => v,
+ Err(_) => bindings::POLLERR,
+ }
+}
+
+/// # Safety
+/// Only called by binderfs.
+unsafe extern "C" fn rust_binder_flush(
+ file: *mut bindings::file,
+ _id: bindings::fl_owner_t,
+) -> kernel::ffi::c_int {
+ // SAFETY: We previously set `private_data` in `rust_binder_open`.
+ let f = unsafe { Arc::<Process>::borrow((*file).private_data) };
+ match Process::flush(f) {
+ Ok(()) => 0,
+ Err(err) => err.to_errno(),
+ }
+}
+
+/// # Safety
+/// Only called by binderfs.
+#[no_mangle]
+unsafe extern "C" fn rust_binder_stats_show(
+ ptr: *mut seq_file,
+ _: *mut kernel::ffi::c_void,
+) -> kernel::ffi::c_int {
+ // SAFETY: The caller ensures that the pointer is valid and exclusive for the duration in which
+ // this method is called.
+ let m = unsafe { SeqFile::from_raw(ptr) };
+ if let Err(err) = rust_binder_stats_show_impl(m) {
+ seq_print!(m, "failed to generate state: {:?}\n", err);
+ }
+ 0
+}
+
+/// # Safety
+/// Only called by binderfs.
+#[no_mangle]
+unsafe extern "C" fn rust_binder_state_show(
+ ptr: *mut seq_file,
+ _: *mut kernel::ffi::c_void,
+) -> kernel::ffi::c_int {
+ // SAFETY: The caller ensures that the pointer is valid and exclusive for the duration in which
+ // this method is called.
+ let m = unsafe { SeqFile::from_raw(ptr) };
+ if let Err(err) = rust_binder_state_show_impl(m) {
+ seq_print!(m, "failed to generate state: {:?}\n", err);
+ }
+ 0
+}
+
+/// # Safety
+/// Only called by binderfs.
+#[no_mangle]
+unsafe extern "C" fn rust_binder_proc_show(
+ ptr: *mut seq_file,
+ _: *mut kernel::ffi::c_void,
+) -> kernel::ffi::c_int {
+ // SAFETY: Accessing the private field of `seq_file` is okay.
+ let pid = (unsafe { (*ptr).private }) as usize as Pid;
+ // SAFETY: The caller ensures that the pointer is valid and exclusive for the duration in which
+ // this method is called.
+ let m = unsafe { SeqFile::from_raw(ptr) };
+ if let Err(err) = rust_binder_proc_show_impl(m, pid) {
+ seq_print!(m, "failed to generate state: {:?}\n", err);
+ }
+ 0
+}
+
+/// # Safety
+/// Only called by binderfs.
+#[no_mangle]
+unsafe extern "C" fn rust_binder_transactions_show(
+ ptr: *mut seq_file,
+ _: *mut kernel::ffi::c_void,
+) -> kernel::ffi::c_int {
+ // SAFETY: The caller ensures that the pointer is valid and exclusive for the duration in which
+ // this method is called.
+ let m = unsafe { SeqFile::from_raw(ptr) };
+ if let Err(err) = rust_binder_transactions_show_impl(m) {
+ seq_print!(m, "failed to generate state: {:?}\n", err);
+ }
+ 0
+}
+
+fn rust_binder_transactions_show_impl(m: &SeqFile) -> Result<()> {
+ seq_print!(m, "binder transactions:\n");
+ let contexts = context::get_all_contexts()?;
+ for ctx in contexts {
+ let procs = ctx.get_all_procs()?;
+ for proc in procs {
+ proc.debug_print(m, &ctx, false)?;
+ seq_print!(m, "\n");
+ }
+ }
+ Ok(())
+}
+
+fn rust_binder_stats_show_impl(m: &SeqFile) -> Result<()> {
+ seq_print!(m, "binder stats:\n");
+ stats::GLOBAL_STATS.debug_print("", m);
+ let contexts = context::get_all_contexts()?;
+ for ctx in contexts {
+ let procs = ctx.get_all_procs()?;
+ for proc in procs {
+ proc.debug_print_stats(m, &ctx)?;
+ seq_print!(m, "\n");
+ }
+ }
+ Ok(())
+}
+
+fn rust_binder_state_show_impl(m: &SeqFile) -> Result<()> {
+ seq_print!(m, "binder state:\n");
+ let contexts = context::get_all_contexts()?;
+ for ctx in contexts {
+ let procs = ctx.get_all_procs()?;
+ for proc in procs {
+ proc.debug_print(m, &ctx, true)?;
+ seq_print!(m, "\n");
+ }
+ }
+ Ok(())
+}
+
+fn rust_binder_proc_show_impl(m: &SeqFile, pid: Pid) -> Result<()> {
+ seq_print!(m, "binder proc state:\n");
+ let contexts = context::get_all_contexts()?;
+ for ctx in contexts {
+ let procs = ctx.get_procs_with_pid(pid)?;
+ for proc in procs {
+ proc.debug_print(m, &ctx, true)?;
+ seq_print!(m, "\n");
+ }
+ }
+ Ok(())
+}
+
+struct BinderfsProcFile(NonNull<bindings::dentry>);
+
+// SAFETY: Safe to drop any thread.
+unsafe impl Send for BinderfsProcFile {}
+
+impl BinderfsProcFile {
+ /// # Safety
+ ///
+ /// Takes an inode from a newly created binder file.
+ unsafe fn new(nodp: *mut bindings::inode, pid: i32) -> Result<Option<Self>> {
+ // SAFETY: The caller passes an `inode` for a newly created binder file.
+ let dentry = unsafe { binderfs::rust_binderfs_create_proc_file(nodp, pid) };
+ match kernel::error::from_err_ptr(dentry) {
+ Ok(dentry) => Ok(NonNull::new(dentry).map(Self)),
+ Err(err) if err == EEXIST => Ok(None),
+ Err(err) => Err(err),
+ }
+ }
+}
+
+impl Drop for BinderfsProcFile {
+ fn drop(&mut self) {
+ // SAFETY: This is a dentry from `rust_binderfs_remove_file` that has not been deleted yet.
+ unsafe { binderfs::rust_binderfs_remove_file(self.0.as_ptr()) };
+ }
+}
diff --git a/drivers/android/binder/rust_binderfs.c b/drivers/android/binder/rust_binderfs.c
new file mode 100644
index 000000000000..6b497146b698
--- /dev/null
+++ b/drivers/android/binder/rust_binderfs.c
@@ -0,0 +1,850 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/compiler_types.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/fsnotify.h>
+#include <linux/gfp.h>
+#include <linux/idr.h>
+#include <linux/init.h>
+#include <linux/ipc_namespace.h>
+#include <linux/kdev_t.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/namei.h>
+#include <linux/magic.h>
+#include <linux/major.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/mount.h>
+#include <linux/fs_parser.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/spinlock_types.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/user_namespace.h>
+#include <linux/xarray.h>
+#include <uapi/asm-generic/errno-base.h>
+#include <uapi/linux/android/binder.h>
+#include <uapi/linux/android/binderfs.h>
+
+#include "rust_binder.h"
+#include "rust_binder_internal.h"
+
+#define FIRST_INODE 1
+#define SECOND_INODE 2
+#define INODE_OFFSET 3
+#define BINDERFS_MAX_MINOR (1U << MINORBITS)
+/* Ensure that the initial ipc namespace always has devices available. */
+#define BINDERFS_MAX_MINOR_CAPPED (BINDERFS_MAX_MINOR - 4)
+
+DEFINE_SHOW_ATTRIBUTE(rust_binder_stats);
+DEFINE_SHOW_ATTRIBUTE(rust_binder_state);
+DEFINE_SHOW_ATTRIBUTE(rust_binder_transactions);
+DEFINE_SHOW_ATTRIBUTE(rust_binder_proc);
+
+char *rust_binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
+module_param_named(rust_devices, rust_binder_devices_param, charp, 0444);
+
+static dev_t binderfs_dev;
+static DEFINE_MUTEX(binderfs_minors_mutex);
+static DEFINE_IDA(binderfs_minors);
+
+enum binderfs_param {
+ Opt_max,
+ Opt_stats_mode,
+};
+
+enum binderfs_stats_mode {
+ binderfs_stats_mode_unset,
+ binderfs_stats_mode_global,
+};
+
+struct binder_features {
+ bool oneway_spam_detection;
+ bool extended_error;
+ bool freeze_notification;
+};
+
+static const struct constant_table binderfs_param_stats[] = {
+ { "global", binderfs_stats_mode_global },
+ {}
+};
+
+static const struct fs_parameter_spec binderfs_fs_parameters[] = {
+ fsparam_u32("max", Opt_max),
+ fsparam_enum("stats", Opt_stats_mode, binderfs_param_stats),
+ {}
+};
+
+static struct binder_features binder_features = {
+ .oneway_spam_detection = true,
+ .extended_error = true,
+ .freeze_notification = true,
+};
+
+static inline struct binderfs_info *BINDERFS_SB(const struct super_block *sb)
+{
+ return sb->s_fs_info;
+}
+
+/**
+ * binderfs_binder_device_create - allocate inode from super block of a
+ * binderfs mount
+ * @ref_inode: inode from wich the super block will be taken
+ * @userp: buffer to copy information about new device for userspace to
+ * @req: struct binderfs_device as copied from userspace
+ *
+ * This function allocates a new binder_device and reserves a new minor
+ * number for it.
+ * Minor numbers are limited and tracked globally in binderfs_minors. The
+ * function will stash a struct binder_device for the specific binder
+ * device in i_private of the inode.
+ * It will go on to allocate a new inode from the super block of the
+ * filesystem mount, stash a struct binder_device in its i_private field
+ * and attach a dentry to that inode.
+ *
+ * Return: 0 on success, negative errno on failure
+ */
+static int binderfs_binder_device_create(struct inode *ref_inode,
+ struct binderfs_device __user *userp,
+ struct binderfs_device *req)
+{
+ int minor, ret;
+ struct dentry *dentry, *root;
+ struct binder_device *device = NULL;
+ rust_binder_context ctx = NULL;
+ struct inode *inode = NULL;
+ struct super_block *sb = ref_inode->i_sb;
+ struct binderfs_info *info = sb->s_fs_info;
+#if defined(CONFIG_IPC_NS)
+ bool use_reserve = (info->ipc_ns == &init_ipc_ns);
+#else
+ bool use_reserve = true;
+#endif
+
+ /* Reserve new minor number for the new device. */
+ mutex_lock(&binderfs_minors_mutex);
+ if (++info->device_count <= info->mount_opts.max)
+ minor = ida_alloc_max(&binderfs_minors,
+ use_reserve ? BINDERFS_MAX_MINOR :
+ BINDERFS_MAX_MINOR_CAPPED,
+ GFP_KERNEL);
+ else
+ minor = -ENOSPC;
+ if (minor < 0) {
+ --info->device_count;
+ mutex_unlock(&binderfs_minors_mutex);
+ return minor;
+ }
+ mutex_unlock(&binderfs_minors_mutex);
+
+ ret = -ENOMEM;
+ device = kzalloc(sizeof(*device), GFP_KERNEL);
+ if (!device)
+ goto err;
+
+ req->name[BINDERFS_MAX_NAME] = '\0'; /* NUL-terminate */
+
+ ctx = rust_binder_new_context(req->name);
+ if (!ctx)
+ goto err;
+
+ inode = new_inode(sb);
+ if (!inode)
+ goto err;
+
+ inode->i_ino = minor + INODE_OFFSET;
+ simple_inode_init_ts(inode);
+ init_special_inode(inode, S_IFCHR | 0600,
+ MKDEV(MAJOR(binderfs_dev), minor));
+ inode->i_fop = &rust_binder_fops;
+ inode->i_uid = info->root_uid;
+ inode->i_gid = info->root_gid;
+
+ req->major = MAJOR(binderfs_dev);
+ req->minor = minor;
+ device->ctx = ctx;
+ device->minor = minor;
+
+ if (userp && copy_to_user(userp, req, sizeof(*req))) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ root = sb->s_root;
+ inode_lock(d_inode(root));
+
+ /* look it up */
+ dentry = lookup_noperm(&QSTR(req->name), root);
+ if (IS_ERR(dentry)) {
+ inode_unlock(d_inode(root));
+ ret = PTR_ERR(dentry);
+ goto err;
+ }
+
+ if (d_really_is_positive(dentry)) {
+ /* already exists */
+ dput(dentry);
+ inode_unlock(d_inode(root));
+ ret = -EEXIST;
+ goto err;
+ }
+
+ inode->i_private = device;
+ d_instantiate(dentry, inode);
+ fsnotify_create(root->d_inode, dentry);
+ inode_unlock(d_inode(root));
+
+ return 0;
+
+err:
+ kfree(device);
+ rust_binder_remove_context(ctx);
+ mutex_lock(&binderfs_minors_mutex);
+ --info->device_count;
+ ida_free(&binderfs_minors, minor);
+ mutex_unlock(&binderfs_minors_mutex);
+ iput(inode);
+
+ return ret;
+}
+
+/**
+ * binder_ctl_ioctl - handle binder device node allocation requests
+ *
+ * The request handler for the binder-control device. All requests operate on
+ * the binderfs mount the binder-control device resides in:
+ * - BINDER_CTL_ADD
+ * Allocate a new binder device.
+ *
+ * Return: %0 on success, negative errno on failure.
+ */
+static long binder_ctl_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret = -EINVAL;
+ struct inode *inode = file_inode(file);
+ struct binderfs_device __user *device = (struct binderfs_device __user *)arg;
+ struct binderfs_device device_req;
+
+ switch (cmd) {
+ case BINDER_CTL_ADD:
+ ret = copy_from_user(&device_req, device, sizeof(device_req));
+ if (ret) {
+ ret = -EFAULT;
+ break;
+ }
+
+ ret = binderfs_binder_device_create(inode, device, &device_req);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static void binderfs_evict_inode(struct inode *inode)
+{
+ struct binder_device *device = inode->i_private;
+ struct binderfs_info *info = BINDERFS_SB(inode->i_sb);
+
+ clear_inode(inode);
+
+ if (!S_ISCHR(inode->i_mode) || !device)
+ return;
+
+ mutex_lock(&binderfs_minors_mutex);
+ --info->device_count;
+ ida_free(&binderfs_minors, device->minor);
+ mutex_unlock(&binderfs_minors_mutex);
+
+ /* ctx is null for binder-control, but this function ignores null pointers */
+ rust_binder_remove_context(device->ctx);
+
+ kfree(device);
+}
+
+static int binderfs_fs_context_parse_param(struct fs_context *fc,
+ struct fs_parameter *param)
+{
+ int opt;
+ struct binderfs_mount_opts *ctx = fc->fs_private;
+ struct fs_parse_result result;
+
+ opt = fs_parse(fc, binderfs_fs_parameters, param, &result);
+ if (opt < 0)
+ return opt;
+
+ switch (opt) {
+ case Opt_max:
+ if (result.uint_32 > BINDERFS_MAX_MINOR)
+ return invalfc(fc, "Bad value for '%s'", param->key);
+
+ ctx->max = result.uint_32;
+ break;
+ case Opt_stats_mode:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ ctx->stats_mode = result.uint_32;
+ break;
+ default:
+ return invalfc(fc, "Unsupported parameter '%s'", param->key);
+ }
+
+ return 0;
+}
+
+static int binderfs_fs_context_reconfigure(struct fs_context *fc)
+{
+ struct binderfs_mount_opts *ctx = fc->fs_private;
+ struct binderfs_info *info = BINDERFS_SB(fc->root->d_sb);
+
+ if (info->mount_opts.stats_mode != ctx->stats_mode)
+ return invalfc(fc, "Binderfs stats mode cannot be changed during a remount");
+
+ info->mount_opts.stats_mode = ctx->stats_mode;
+ info->mount_opts.max = ctx->max;
+ return 0;
+}
+
+static int binderfs_show_options(struct seq_file *seq, struct dentry *root)
+{
+ struct binderfs_info *info = BINDERFS_SB(root->d_sb);
+
+ if (info->mount_opts.max <= BINDERFS_MAX_MINOR)
+ seq_printf(seq, ",max=%d", info->mount_opts.max);
+
+ switch (info->mount_opts.stats_mode) {
+ case binderfs_stats_mode_unset:
+ break;
+ case binderfs_stats_mode_global:
+ seq_puts(seq, ",stats=global");
+ break;
+ }
+
+ return 0;
+}
+
+static const struct super_operations binderfs_super_ops = {
+ .evict_inode = binderfs_evict_inode,
+ .show_options = binderfs_show_options,
+ .statfs = simple_statfs,
+};
+
+static inline bool is_binderfs_control_device(const struct dentry *dentry)
+{
+ struct binderfs_info *info = dentry->d_sb->s_fs_info;
+
+ return info->control_dentry == dentry;
+}
+
+static int binderfs_rename(struct mnt_idmap *idmap,
+ struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry,
+ unsigned int flags)
+{
+ if (is_binderfs_control_device(old_dentry) ||
+ is_binderfs_control_device(new_dentry))
+ return -EPERM;
+
+ return simple_rename(idmap, old_dir, old_dentry, new_dir,
+ new_dentry, flags);
+}
+
+static int binderfs_unlink(struct inode *dir, struct dentry *dentry)
+{
+ if (is_binderfs_control_device(dentry))
+ return -EPERM;
+
+ return simple_unlink(dir, dentry);
+}
+
+static const struct file_operations binder_ctl_fops = {
+ .owner = THIS_MODULE,
+ .open = nonseekable_open,
+ .unlocked_ioctl = binder_ctl_ioctl,
+ .compat_ioctl = binder_ctl_ioctl,
+ .llseek = noop_llseek,
+};
+
+/**
+ * binderfs_binder_ctl_create - create a new binder-control device
+ * @sb: super block of the binderfs mount
+ *
+ * This function creates a new binder-control device node in the binderfs mount
+ * referred to by @sb.
+ *
+ * Return: 0 on success, negative errno on failure
+ */
+static int binderfs_binder_ctl_create(struct super_block *sb)
+{
+ int minor, ret;
+ struct dentry *dentry;
+ struct binder_device *device;
+ struct inode *inode = NULL;
+ struct dentry *root = sb->s_root;
+ struct binderfs_info *info = sb->s_fs_info;
+#if defined(CONFIG_IPC_NS)
+ bool use_reserve = (info->ipc_ns == &init_ipc_ns);
+#else
+ bool use_reserve = true;
+#endif
+
+ device = kzalloc(sizeof(*device), GFP_KERNEL);
+ if (!device)
+ return -ENOMEM;
+
+ /* If we have already created a binder-control node, return. */
+ if (info->control_dentry) {
+ ret = 0;
+ goto out;
+ }
+
+ ret = -ENOMEM;
+ inode = new_inode(sb);
+ if (!inode)
+ goto out;
+
+ /* Reserve a new minor number for the new device. */
+ mutex_lock(&binderfs_minors_mutex);
+ minor = ida_alloc_max(&binderfs_minors,
+ use_reserve ? BINDERFS_MAX_MINOR :
+ BINDERFS_MAX_MINOR_CAPPED,
+ GFP_KERNEL);
+ mutex_unlock(&binderfs_minors_mutex);
+ if (minor < 0) {
+ ret = minor;
+ goto out;
+ }
+
+ inode->i_ino = SECOND_INODE;
+ simple_inode_init_ts(inode);
+ init_special_inode(inode, S_IFCHR | 0600,
+ MKDEV(MAJOR(binderfs_dev), minor));
+ inode->i_fop = &binder_ctl_fops;
+ inode->i_uid = info->root_uid;
+ inode->i_gid = info->root_gid;
+
+ device->minor = minor;
+ device->ctx = NULL;
+
+ dentry = d_alloc_name(root, "binder-control");
+ if (!dentry)
+ goto out;
+
+ inode->i_private = device;
+ info->control_dentry = dentry;
+ d_add(dentry, inode);
+
+ return 0;
+
+out:
+ kfree(device);
+ iput(inode);
+
+ return ret;
+}
+
+static const struct inode_operations binderfs_dir_inode_operations = {
+ .lookup = simple_lookup,
+ .rename = binderfs_rename,
+ .unlink = binderfs_unlink,
+};
+
+static struct inode *binderfs_make_inode(struct super_block *sb, int mode)
+{
+ struct inode *ret;
+
+ ret = new_inode(sb);
+ if (ret) {
+ ret->i_ino = iunique(sb, BINDERFS_MAX_MINOR + INODE_OFFSET);
+ ret->i_mode = mode;
+ simple_inode_init_ts(ret);
+ }
+ return ret;
+}
+
+static struct dentry *binderfs_create_dentry(struct dentry *parent,
+ const char *name)
+{
+ struct dentry *dentry;
+
+ dentry = lookup_noperm(&QSTR(name), parent);
+ if (IS_ERR(dentry))
+ return dentry;
+
+ /* Return error if the file/dir already exists. */
+ if (d_really_is_positive(dentry)) {
+ dput(dentry);
+ return ERR_PTR(-EEXIST);
+ }
+
+ return dentry;
+}
+
+void rust_binderfs_remove_file(struct dentry *dentry)
+{
+ struct inode *parent_inode;
+
+ parent_inode = d_inode(dentry->d_parent);
+ inode_lock(parent_inode);
+ if (simple_positive(dentry)) {
+ dget(dentry);
+ simple_unlink(parent_inode, dentry);
+ d_delete(dentry);
+ dput(dentry);
+ }
+ inode_unlock(parent_inode);
+}
+
+static struct dentry *rust_binderfs_create_file(struct dentry *parent, const char *name,
+ const struct file_operations *fops,
+ void *data)
+{
+ struct dentry *dentry;
+ struct inode *new_inode, *parent_inode;
+ struct super_block *sb;
+
+ parent_inode = d_inode(parent);
+ inode_lock(parent_inode);
+
+ dentry = binderfs_create_dentry(parent, name);
+ if (IS_ERR(dentry))
+ goto out;
+
+ sb = parent_inode->i_sb;
+ new_inode = binderfs_make_inode(sb, S_IFREG | 0444);
+ if (!new_inode) {
+ dput(dentry);
+ dentry = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ new_inode->i_fop = fops;
+ new_inode->i_private = data;
+ d_instantiate(dentry, new_inode);
+ fsnotify_create(parent_inode, dentry);
+
+out:
+ inode_unlock(parent_inode);
+ return dentry;
+}
+
+struct dentry *rust_binderfs_create_proc_file(struct inode *nodp, int pid)
+{
+ struct binderfs_info *info = nodp->i_sb->s_fs_info;
+ struct dentry *dir = info->proc_log_dir;
+ char strbuf[20 + 1];
+ void *data = (void *)(unsigned long) pid;
+
+ if (!dir)
+ return NULL;
+
+ snprintf(strbuf, sizeof(strbuf), "%u", pid);
+ return rust_binderfs_create_file(dir, strbuf, &rust_binder_proc_fops, data);
+}
+
+static struct dentry *binderfs_create_dir(struct dentry *parent,
+ const char *name)
+{
+ struct dentry *dentry;
+ struct inode *new_inode, *parent_inode;
+ struct super_block *sb;
+
+ parent_inode = d_inode(parent);
+ inode_lock(parent_inode);
+
+ dentry = binderfs_create_dentry(parent, name);
+ if (IS_ERR(dentry))
+ goto out;
+
+ sb = parent_inode->i_sb;
+ new_inode = binderfs_make_inode(sb, S_IFDIR | 0755);
+ if (!new_inode) {
+ dput(dentry);
+ dentry = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ new_inode->i_fop = &simple_dir_operations;
+ new_inode->i_op = &simple_dir_inode_operations;
+
+ set_nlink(new_inode, 2);
+ d_instantiate(dentry, new_inode);
+ inc_nlink(parent_inode);
+ fsnotify_mkdir(parent_inode, dentry);
+
+out:
+ inode_unlock(parent_inode);
+ return dentry;
+}
+
+static int binder_features_show(struct seq_file *m, void *unused)
+{
+ bool *feature = m->private;
+
+ seq_printf(m, "%d\n", *feature);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(binder_features);
+
+static int init_binder_features(struct super_block *sb)
+{
+ struct dentry *dentry, *dir;
+
+ dir = binderfs_create_dir(sb->s_root, "features");
+ if (IS_ERR(dir))
+ return PTR_ERR(dir);
+
+ dentry = rust_binderfs_create_file(dir, "oneway_spam_detection",
+ &binder_features_fops,
+ &binder_features.oneway_spam_detection);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+
+ dentry = rust_binderfs_create_file(dir, "extended_error",
+ &binder_features_fops,
+ &binder_features.extended_error);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+
+ dentry = rust_binderfs_create_file(dir, "freeze_notification",
+ &binder_features_fops,
+ &binder_features.freeze_notification);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+
+ return 0;
+}
+
+static int init_binder_logs(struct super_block *sb)
+{
+ struct dentry *binder_logs_root_dir, *dentry, *proc_log_dir;
+ struct binderfs_info *info;
+ int ret = 0;
+
+ binder_logs_root_dir = binderfs_create_dir(sb->s_root,
+ "binder_logs");
+ if (IS_ERR(binder_logs_root_dir)) {
+ ret = PTR_ERR(binder_logs_root_dir);
+ goto out;
+ }
+
+ dentry = rust_binderfs_create_file(binder_logs_root_dir, "stats",
+ &rust_binder_stats_fops, NULL);
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
+ goto out;
+ }
+
+ dentry = rust_binderfs_create_file(binder_logs_root_dir, "state",
+ &rust_binder_state_fops, NULL);
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
+ goto out;
+ }
+
+ dentry = rust_binderfs_create_file(binder_logs_root_dir, "transactions",
+ &rust_binder_transactions_fops, NULL);
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
+ goto out;
+ }
+
+ proc_log_dir = binderfs_create_dir(binder_logs_root_dir, "proc");
+ if (IS_ERR(proc_log_dir)) {
+ ret = PTR_ERR(proc_log_dir);
+ goto out;
+ }
+ info = sb->s_fs_info;
+ info->proc_log_dir = proc_log_dir;
+
+out:
+ return ret;
+}
+
+static int binderfs_fill_super(struct super_block *sb, struct fs_context *fc)
+{
+ int ret;
+ struct binderfs_info *info;
+ struct binderfs_mount_opts *ctx = fc->fs_private;
+ struct inode *inode = NULL;
+ struct binderfs_device device_info = {};
+ const char *name;
+ size_t len;
+
+ sb->s_blocksize = PAGE_SIZE;
+ sb->s_blocksize_bits = PAGE_SHIFT;
+
+ /*
+ * The binderfs filesystem can be mounted by userns root in a
+ * non-initial userns. By default such mounts have the SB_I_NODEV flag
+ * set in s_iflags to prevent security issues where userns root can
+ * just create random device nodes via mknod() since it owns the
+ * filesystem mount. But binderfs does not allow to create any files
+ * including devices nodes. The only way to create binder devices nodes
+ * is through the binder-control device which userns root is explicitly
+ * allowed to do. So removing the SB_I_NODEV flag from s_iflags is both
+ * necessary and safe.
+ */
+ sb->s_iflags &= ~SB_I_NODEV;
+ sb->s_iflags |= SB_I_NOEXEC;
+ sb->s_magic = RUST_BINDERFS_SUPER_MAGIC;
+ sb->s_op = &binderfs_super_ops;
+ sb->s_time_gran = 1;
+
+ sb->s_fs_info = kzalloc(sizeof(struct binderfs_info), GFP_KERNEL);
+ if (!sb->s_fs_info)
+ return -ENOMEM;
+ info = sb->s_fs_info;
+
+ info->ipc_ns = get_ipc_ns(current->nsproxy->ipc_ns);
+
+ info->root_gid = make_kgid(sb->s_user_ns, 0);
+ if (!gid_valid(info->root_gid))
+ info->root_gid = GLOBAL_ROOT_GID;
+ info->root_uid = make_kuid(sb->s_user_ns, 0);
+ if (!uid_valid(info->root_uid))
+ info->root_uid = GLOBAL_ROOT_UID;
+ info->mount_opts.max = ctx->max;
+ info->mount_opts.stats_mode = ctx->stats_mode;
+
+ inode = new_inode(sb);
+ if (!inode)
+ return -ENOMEM;
+
+ inode->i_ino = FIRST_INODE;
+ inode->i_fop = &simple_dir_operations;
+ inode->i_mode = S_IFDIR | 0755;
+ simple_inode_init_ts(inode);
+ inode->i_op = &binderfs_dir_inode_operations;
+ set_nlink(inode, 2);
+
+ sb->s_root = d_make_root(inode);
+ if (!sb->s_root)
+ return -ENOMEM;
+
+ ret = binderfs_binder_ctl_create(sb);
+ if (ret)
+ return ret;
+
+ name = rust_binder_devices_param;
+ for (len = strcspn(name, ","); len > 0; len = strcspn(name, ",")) {
+ strscpy(device_info.name, name, len + 1);
+ ret = binderfs_binder_device_create(inode, NULL, &device_info);
+ if (ret)
+ return ret;
+ name += len;
+ if (*name == ',')
+ name++;
+ }
+
+ ret = init_binder_features(sb);
+ if (ret)
+ return ret;
+
+ if (info->mount_opts.stats_mode == binderfs_stats_mode_global)
+ return init_binder_logs(sb);
+
+ return 0;
+}
+
+static int binderfs_fs_context_get_tree(struct fs_context *fc)
+{
+ return get_tree_nodev(fc, binderfs_fill_super);
+}
+
+static void binderfs_fs_context_free(struct fs_context *fc)
+{
+ struct binderfs_mount_opts *ctx = fc->fs_private;
+
+ kfree(ctx);
+}
+
+static const struct fs_context_operations binderfs_fs_context_ops = {
+ .free = binderfs_fs_context_free,
+ .get_tree = binderfs_fs_context_get_tree,
+ .parse_param = binderfs_fs_context_parse_param,
+ .reconfigure = binderfs_fs_context_reconfigure,
+};
+
+static int binderfs_init_fs_context(struct fs_context *fc)
+{
+ struct binderfs_mount_opts *ctx;
+
+ ctx = kzalloc(sizeof(struct binderfs_mount_opts), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->max = BINDERFS_MAX_MINOR;
+ ctx->stats_mode = binderfs_stats_mode_unset;
+
+ fc->fs_private = ctx;
+ fc->ops = &binderfs_fs_context_ops;
+
+ return 0;
+}
+
+static void binderfs_kill_super(struct super_block *sb)
+{
+ struct binderfs_info *info = sb->s_fs_info;
+
+ /*
+ * During inode eviction struct binderfs_info is needed.
+ * So first wipe the super_block then free struct binderfs_info.
+ */
+ kill_litter_super(sb);
+
+ if (info && info->ipc_ns)
+ put_ipc_ns(info->ipc_ns);
+
+ kfree(info);
+}
+
+static struct file_system_type binder_fs_type = {
+ .name = "binder",
+ .init_fs_context = binderfs_init_fs_context,
+ .parameters = binderfs_fs_parameters,
+ .kill_sb = binderfs_kill_super,
+ .fs_flags = FS_USERNS_MOUNT,
+};
+
+int init_rust_binderfs(void)
+{
+ int ret;
+ const char *name;
+ size_t len;
+
+ /* Verify that the default binderfs device names are valid. */
+ name = rust_binder_devices_param;
+ for (len = strcspn(name, ","); len > 0; len = strcspn(name, ",")) {
+ if (len > BINDERFS_MAX_NAME)
+ return -E2BIG;
+ name += len;
+ if (*name == ',')
+ name++;
+ }
+
+ /* Allocate new major number for binderfs. */
+ ret = alloc_chrdev_region(&binderfs_dev, 0, BINDERFS_MAX_MINOR,
+ "rust_binder");
+ if (ret)
+ return ret;
+
+ ret = register_filesystem(&binder_fs_type);
+ if (ret) {
+ unregister_chrdev_region(binderfs_dev, BINDERFS_MAX_MINOR);
+ return ret;
+ }
+
+ return ret;
+}
diff --git a/drivers/android/binder/stats.rs b/drivers/android/binder/stats.rs
new file mode 100644
index 000000000000..a83ec111d2cb
--- /dev/null
+++ b/drivers/android/binder/stats.rs
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+//! Keep track of statistics for binder_logs.
+
+use crate::defs::*;
+use core::sync::atomic::{AtomicU32, Ordering::Relaxed};
+use kernel::{ioctl::_IOC_NR, seq_file::SeqFile, seq_print};
+
+const BC_COUNT: usize = _IOC_NR(BC_REPLY_SG) as usize + 1;
+const BR_COUNT: usize = _IOC_NR(BR_TRANSACTION_PENDING_FROZEN) as usize + 1;
+
+pub(crate) static GLOBAL_STATS: BinderStats = BinderStats::new();
+
+pub(crate) struct BinderStats {
+ bc: [AtomicU32; BC_COUNT],
+ br: [AtomicU32; BR_COUNT],
+}
+
+impl BinderStats {
+ pub(crate) const fn new() -> Self {
+ #[expect(clippy::declare_interior_mutable_const)]
+ const ZERO: AtomicU32 = AtomicU32::new(0);
+
+ Self {
+ bc: [ZERO; BC_COUNT],
+ br: [ZERO; BR_COUNT],
+ }
+ }
+
+ pub(crate) fn inc_bc(&self, bc: u32) {
+ let idx = _IOC_NR(bc) as usize;
+ if let Some(bc_ref) = self.bc.get(idx) {
+ bc_ref.fetch_add(1, Relaxed);
+ }
+ }
+
+ pub(crate) fn inc_br(&self, br: u32) {
+ let idx = _IOC_NR(br) as usize;
+ if let Some(br_ref) = self.br.get(idx) {
+ br_ref.fetch_add(1, Relaxed);
+ }
+ }
+
+ pub(crate) fn debug_print(&self, prefix: &str, m: &SeqFile) {
+ for (i, cnt) in self.bc.iter().enumerate() {
+ let cnt = cnt.load(Relaxed);
+ if cnt > 0 {
+ seq_print!(m, "{}{}: {}\n", prefix, command_string(i), cnt);
+ }
+ }
+ for (i, cnt) in self.br.iter().enumerate() {
+ let cnt = cnt.load(Relaxed);
+ if cnt > 0 {
+ seq_print!(m, "{}{}: {}\n", prefix, return_string(i), cnt);
+ }
+ }
+ }
+}
+
+mod strings {
+ use core::str::from_utf8_unchecked;
+ use kernel::str::CStr;
+
+ extern "C" {
+ static binder_command_strings: [*const u8; super::BC_COUNT];
+ static binder_return_strings: [*const u8; super::BR_COUNT];
+ }
+
+ pub(super) fn command_string(i: usize) -> &'static str {
+ // SAFETY: Accessing `binder_command_strings` is always safe.
+ let c_str_ptr = unsafe { binder_command_strings[i] };
+ // SAFETY: The `binder_command_strings` array only contains nul-terminated strings.
+ let bytes = unsafe { CStr::from_char_ptr(c_str_ptr) }.as_bytes();
+ // SAFETY: The `binder_command_strings` array only contains strings with ascii-chars.
+ unsafe { from_utf8_unchecked(bytes) }
+ }
+
+ pub(super) fn return_string(i: usize) -> &'static str {
+ // SAFETY: Accessing `binder_return_strings` is always safe.
+ let c_str_ptr = unsafe { binder_return_strings[i] };
+ // SAFETY: The `binder_command_strings` array only contains nul-terminated strings.
+ let bytes = unsafe { CStr::from_char_ptr(c_str_ptr) }.as_bytes();
+ // SAFETY: The `binder_command_strings` array only contains strings with ascii-chars.
+ unsafe { from_utf8_unchecked(bytes) }
+ }
+}
+use strings::{command_string, return_string};
diff --git a/drivers/android/binder/thread.rs b/drivers/android/binder/thread.rs
new file mode 100644
index 000000000000..7e34ccd394f8
--- /dev/null
+++ b/drivers/android/binder/thread.rs
@@ -0,0 +1,1596 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+//! This module defines the `Thread` type, which represents a userspace thread that is using
+//! binder.
+//!
+//! The `Process` object stores all of the threads in an rb tree.
+
+use kernel::{
+ bindings,
+ fs::{File, LocalFile},
+ list::{AtomicTracker, List, ListArc, ListLinks, TryNewListArc},
+ prelude::*,
+ security,
+ seq_file::SeqFile,
+ seq_print,
+ sync::poll::{PollCondVar, PollTable},
+ sync::{Arc, SpinLock},
+ task::Task,
+ types::ARef,
+ uaccess::UserSlice,
+ uapi,
+};
+
+use crate::{
+ allocation::{Allocation, AllocationView, BinderObject, BinderObjectRef, NewAllocation},
+ defs::*,
+ error::BinderResult,
+ process::{GetWorkOrRegister, Process},
+ ptr_align,
+ stats::GLOBAL_STATS,
+ transaction::Transaction,
+ BinderReturnWriter, DArc, DLArc, DTRWrap, DeliverCode, DeliverToRead,
+};
+
+use core::{
+ mem::size_of,
+ sync::atomic::{AtomicU32, Ordering},
+};
+
+/// Stores the layout of the scatter-gather entries. This is used during the `translate_objects`
+/// call and is discarded when it returns.
+struct ScatterGatherState {
+ /// A struct that tracks the amount of unused buffer space.
+ unused_buffer_space: UnusedBufferSpace,
+ /// Scatter-gather entries to copy.
+ sg_entries: KVec<ScatterGatherEntry>,
+ /// Indexes into `sg_entries` corresponding to the last binder_buffer_object that
+ /// was processed and all of its ancestors. The array is in sorted order.
+ ancestors: KVec<usize>,
+}
+
+/// This entry specifies an additional buffer that should be copied using the scatter-gather
+/// mechanism.
+struct ScatterGatherEntry {
+ /// The index in the offset array of the BINDER_TYPE_PTR that this entry originates from.
+ obj_index: usize,
+ /// Offset in target buffer.
+ offset: usize,
+ /// User address in source buffer.
+ sender_uaddr: usize,
+ /// Number of bytes to copy.
+ length: usize,
+ /// The minimum offset of the next fixup in this buffer.
+ fixup_min_offset: usize,
+ /// The offsets within this buffer that contain pointers which should be translated.
+ pointer_fixups: KVec<PointerFixupEntry>,
+}
+
+/// This entry specifies that a fixup should happen at `target_offset` of the
+/// buffer. If `skip` is nonzero, then the fixup is a `binder_fd_array_object`
+/// and is applied later. Otherwise if `skip` is zero, then the size of the
+/// fixup is `sizeof::<u64>()` and `pointer_value` is written to the buffer.
+struct PointerFixupEntry {
+ /// The number of bytes to skip, or zero for a `binder_buffer_object` fixup.
+ skip: usize,
+ /// The translated pointer to write when `skip` is zero.
+ pointer_value: u64,
+ /// The offset at which the value should be written. The offset is relative
+ /// to the original buffer.
+ target_offset: usize,
+}
+
+/// Return type of `apply_and_validate_fixup_in_parent`.
+struct ParentFixupInfo {
+ /// The index of the parent buffer in `sg_entries`.
+ parent_sg_index: usize,
+ /// The number of ancestors of the buffer.
+ ///
+ /// The buffer is considered an ancestor of itself, so this is always at
+ /// least one.
+ num_ancestors: usize,
+ /// New value of `fixup_min_offset` if this fixup is applied.
+ new_min_offset: usize,
+ /// The offset of the fixup in the target buffer.
+ target_offset: usize,
+}
+
+impl ScatterGatherState {
+ /// Called when a `binder_buffer_object` or `binder_fd_array_object` tries
+ /// to access a region in its parent buffer. These accesses have various
+ /// restrictions, which this method verifies.
+ ///
+ /// The `parent_offset` and `length` arguments describe the offset and
+ /// length of the access in the parent buffer.
+ ///
+ /// # Detailed restrictions
+ ///
+ /// Obviously the fixup must be in-bounds for the parent buffer.
+ ///
+ /// For safety reasons, we only allow fixups inside a buffer to happen
+ /// at increasing offsets; additionally, we only allow fixup on the last
+ /// buffer object that was verified, or one of its parents.
+ ///
+ /// Example of what is allowed:
+ ///
+ /// A
+ /// B (parent = A, offset = 0)
+ /// C (parent = A, offset = 16)
+ /// D (parent = C, offset = 0)
+ /// E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
+ ///
+ /// Examples of what is not allowed:
+ ///
+ /// Decreasing offsets within the same parent:
+ /// A
+ /// C (parent = A, offset = 16)
+ /// B (parent = A, offset = 0) // decreasing offset within A
+ ///
+ /// Arcerring to a parent that wasn't the last object or any of its parents:
+ /// A
+ /// B (parent = A, offset = 0)
+ /// C (parent = A, offset = 0)
+ /// C (parent = A, offset = 16)
+ /// D (parent = B, offset = 0) // B is not A or any of A's parents
+ fn validate_parent_fixup(
+ &self,
+ parent: usize,
+ parent_offset: usize,
+ length: usize,
+ ) -> Result<ParentFixupInfo> {
+ // Using `position` would also be correct, but `rposition` avoids
+ // quadratic running times.
+ let ancestors_i = self
+ .ancestors
+ .iter()
+ .copied()
+ .rposition(|sg_idx| self.sg_entries[sg_idx].obj_index == parent)
+ .ok_or(EINVAL)?;
+ let sg_idx = self.ancestors[ancestors_i];
+ let sg_entry = match self.sg_entries.get(sg_idx) {
+ Some(sg_entry) => sg_entry,
+ None => {
+ pr_err!(
+ "self.ancestors[{}] is {}, but self.sg_entries.len() is {}",
+ ancestors_i,
+ sg_idx,
+ self.sg_entries.len()
+ );
+ return Err(EINVAL);
+ }
+ };
+ if sg_entry.fixup_min_offset > parent_offset {
+ pr_warn!(
+ "validate_parent_fixup: fixup_min_offset={}, parent_offset={}",
+ sg_entry.fixup_min_offset,
+ parent_offset
+ );
+ return Err(EINVAL);
+ }
+ let new_min_offset = parent_offset.checked_add(length).ok_or(EINVAL)?;
+ if new_min_offset > sg_entry.length {
+ pr_warn!(
+ "validate_parent_fixup: new_min_offset={}, sg_entry.length={}",
+ new_min_offset,
+ sg_entry.length
+ );
+ return Err(EINVAL);
+ }
+ let target_offset = sg_entry.offset.checked_add(parent_offset).ok_or(EINVAL)?;
+ // The `ancestors_i + 1` operation can't overflow since the output of the addition is at
+ // most `self.ancestors.len()`, which also fits in a usize.
+ Ok(ParentFixupInfo {
+ parent_sg_index: sg_idx,
+ num_ancestors: ancestors_i + 1,
+ new_min_offset,
+ target_offset,
+ })
+ }
+}
+
+/// Keeps track of how much unused buffer space is left. The initial amount is the number of bytes
+/// requested by the user using the `buffers_size` field of `binder_transaction_data_sg`. Each time
+/// we translate an object of type `BINDER_TYPE_PTR`, some of the unused buffer space is consumed.
+struct UnusedBufferSpace {
+ /// The start of the remaining space.
+ offset: usize,
+ /// The end of the remaining space.
+ limit: usize,
+}
+impl UnusedBufferSpace {
+ /// Claim the next `size` bytes from the unused buffer space. The offset for the claimed chunk
+ /// into the buffer is returned.
+ fn claim_next(&mut self, size: usize) -> Result<usize> {
+ // We require every chunk to be aligned.
+ let size = ptr_align(size).ok_or(EINVAL)?;
+ let new_offset = self.offset.checked_add(size).ok_or(EINVAL)?;
+
+ if new_offset <= self.limit {
+ let offset = self.offset;
+ self.offset = new_offset;
+ Ok(offset)
+ } else {
+ Err(EINVAL)
+ }
+ }
+}
+
+pub(crate) enum PushWorkRes {
+ Ok,
+ FailedDead(DLArc<dyn DeliverToRead>),
+}
+
+impl PushWorkRes {
+ fn is_ok(&self) -> bool {
+ match self {
+ PushWorkRes::Ok => true,
+ PushWorkRes::FailedDead(_) => false,
+ }
+ }
+}
+
+/// The fields of `Thread` protected by the spinlock.
+struct InnerThread {
+ /// Determines the looper state of the thread. It is a bit-wise combination of the constants
+ /// prefixed with `LOOPER_`.
+ looper_flags: u32,
+
+ /// Determines whether the looper should return.
+ looper_need_return: bool,
+
+ /// Determines if thread is dead.
+ is_dead: bool,
+
+ /// Work item used to deliver error codes to the thread that started a transaction. Stored here
+ /// so that it can be reused.
+ reply_work: DArc<ThreadError>,
+
+ /// Work item used to deliver error codes to the current thread. Stored here so that it can be
+ /// reused.
+ return_work: DArc<ThreadError>,
+
+ /// Determines whether the work list below should be processed. When set to false, `work_list`
+ /// is treated as if it were empty.
+ process_work_list: bool,
+ /// List of work items to deliver to userspace.
+ work_list: List<DTRWrap<dyn DeliverToRead>>,
+ current_transaction: Option<DArc<Transaction>>,
+
+ /// Extended error information for this thread.
+ extended_error: ExtendedError,
+}
+
+const LOOPER_REGISTERED: u32 = 0x01;
+const LOOPER_ENTERED: u32 = 0x02;
+const LOOPER_EXITED: u32 = 0x04;
+const LOOPER_INVALID: u32 = 0x08;
+const LOOPER_WAITING: u32 = 0x10;
+const LOOPER_WAITING_PROC: u32 = 0x20;
+const LOOPER_POLL: u32 = 0x40;
+
+impl InnerThread {
+ fn new() -> Result<Self> {
+ fn next_err_id() -> u32 {
+ static EE_ID: AtomicU32 = AtomicU32::new(0);
+ EE_ID.fetch_add(1, Ordering::Relaxed)
+ }
+
+ Ok(Self {
+ looper_flags: 0,
+ looper_need_return: false,
+ is_dead: false,
+ process_work_list: false,
+ reply_work: ThreadError::try_new()?,
+ return_work: ThreadError::try_new()?,
+ work_list: List::new(),
+ current_transaction: None,
+ extended_error: ExtendedError::new(next_err_id(), BR_OK, 0),
+ })
+ }
+
+ fn pop_work(&mut self) -> Option<DLArc<dyn DeliverToRead>> {
+ if !self.process_work_list {
+ return None;
+ }
+
+ let ret = self.work_list.pop_front();
+ self.process_work_list = !self.work_list.is_empty();
+ ret
+ }
+
+ fn push_work(&mut self, work: DLArc<dyn DeliverToRead>) -> PushWorkRes {
+ if self.is_dead {
+ PushWorkRes::FailedDead(work)
+ } else {
+ self.work_list.push_back(work);
+ self.process_work_list = true;
+ PushWorkRes::Ok
+ }
+ }
+
+ fn push_reply_work(&mut self, code: u32) {
+ if let Ok(work) = ListArc::try_from_arc(self.reply_work.clone()) {
+ work.set_error_code(code);
+ self.push_work(work);
+ } else {
+ pr_warn!("Thread reply work is already in use.");
+ }
+ }
+
+ fn push_return_work(&mut self, reply: u32) {
+ if let Ok(work) = ListArc::try_from_arc(self.return_work.clone()) {
+ work.set_error_code(reply);
+ self.push_work(work);
+ } else {
+ pr_warn!("Thread return work is already in use.");
+ }
+ }
+
+ /// Used to push work items that do not need to be processed immediately and can wait until the
+ /// thread gets another work item.
+ fn push_work_deferred(&mut self, work: DLArc<dyn DeliverToRead>) {
+ self.work_list.push_back(work);
+ }
+
+ /// Fetches the transaction this thread can reply to. If the thread has a pending transaction
+ /// (that it could respond to) but it has also issued a transaction, it must first wait for the
+ /// previously-issued transaction to complete.
+ ///
+ /// The `thread` parameter should be the thread containing this `ThreadInner`.
+ fn pop_transaction_to_reply(&mut self, thread: &Thread) -> Result<DArc<Transaction>> {
+ let transaction = self.current_transaction.take().ok_or(EINVAL)?;
+ if core::ptr::eq(thread, transaction.from.as_ref()) {
+ self.current_transaction = Some(transaction);
+ return Err(EINVAL);
+ }
+ // Find a new current transaction for this thread.
+ self.current_transaction = transaction.find_from(thread).cloned();
+ Ok(transaction)
+ }
+
+ fn pop_transaction_replied(&mut self, transaction: &DArc<Transaction>) -> bool {
+ match self.current_transaction.take() {
+ None => false,
+ Some(old) => {
+ if !Arc::ptr_eq(transaction, &old) {
+ self.current_transaction = Some(old);
+ return false;
+ }
+ self.current_transaction = old.clone_next();
+ true
+ }
+ }
+ }
+
+ fn looper_enter(&mut self) {
+ self.looper_flags |= LOOPER_ENTERED;
+ if self.looper_flags & LOOPER_REGISTERED != 0 {
+ self.looper_flags |= LOOPER_INVALID;
+ }
+ }
+
+ fn looper_register(&mut self, valid: bool) {
+ self.looper_flags |= LOOPER_REGISTERED;
+ if !valid || self.looper_flags & LOOPER_ENTERED != 0 {
+ self.looper_flags |= LOOPER_INVALID;
+ }
+ }
+
+ fn looper_exit(&mut self) {
+ self.looper_flags |= LOOPER_EXITED;
+ }
+
+ /// Determines whether the thread is part of a pool, i.e., if it is a looper.
+ fn is_looper(&self) -> bool {
+ self.looper_flags & (LOOPER_ENTERED | LOOPER_REGISTERED) != 0
+ }
+
+ /// Determines whether the thread should attempt to fetch work items from the process queue.
+ /// This is generally case when the thread is registered as a looper and not part of a
+ /// transaction stack. But if there is local work, we want to return to userspace before we
+ /// deliver any remote work.
+ fn should_use_process_work_queue(&self) -> bool {
+ self.current_transaction.is_none() && !self.process_work_list && self.is_looper()
+ }
+
+ fn poll(&mut self) -> u32 {
+ self.looper_flags |= LOOPER_POLL;
+ if self.process_work_list || self.looper_need_return {
+ bindings::POLLIN
+ } else {
+ 0
+ }
+ }
+}
+
+/// This represents a thread that's used with binder.
+#[pin_data]
+pub(crate) struct Thread {
+ pub(crate) id: i32,
+ pub(crate) process: Arc<Process>,
+ pub(crate) task: ARef<Task>,
+ #[pin]
+ inner: SpinLock<InnerThread>,
+ #[pin]
+ work_condvar: PollCondVar,
+ /// Used to insert this thread into the process' `ready_threads` list.
+ ///
+ /// INVARIANT: May never be used for any other list than the `self.process.ready_threads`.
+ #[pin]
+ links: ListLinks,
+ #[pin]
+ links_track: AtomicTracker,
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<0> for Thread {
+ tracked_by links_track: AtomicTracker;
+ }
+}
+kernel::list::impl_list_item! {
+ impl ListItem<0> for Thread {
+ using ListLinks { self.links };
+ }
+}
+
+impl Thread {
+ pub(crate) fn new(id: i32, process: Arc<Process>) -> Result<Arc<Self>> {
+ let inner = InnerThread::new()?;
+
+ Arc::pin_init(
+ try_pin_init!(Thread {
+ id,
+ process,
+ task: ARef::from(&**kernel::current!()),
+ inner <- kernel::new_spinlock!(inner, "Thread::inner"),
+ work_condvar <- kernel::new_poll_condvar!("Thread::work_condvar"),
+ links <- ListLinks::new(),
+ links_track <- AtomicTracker::new(),
+ }),
+ GFP_KERNEL,
+ )
+ }
+
+ #[inline(never)]
+ pub(crate) fn debug_print(self: &Arc<Self>, m: &SeqFile, print_all: bool) -> Result<()> {
+ let inner = self.inner.lock();
+
+ if print_all || inner.current_transaction.is_some() || !inner.work_list.is_empty() {
+ seq_print!(
+ m,
+ " thread {}: l {:02x} need_return {}\n",
+ self.id,
+ inner.looper_flags,
+ inner.looper_need_return,
+ );
+ }
+
+ let mut t_opt = inner.current_transaction.as_ref();
+ while let Some(t) = t_opt {
+ if Arc::ptr_eq(&t.from, self) {
+ t.debug_print_inner(m, " outgoing transaction ");
+ t_opt = t.from_parent.as_ref();
+ } else if Arc::ptr_eq(&t.to, &self.process) {
+ t.debug_print_inner(m, " incoming transaction ");
+ t_opt = t.find_from(self);
+ } else {
+ t.debug_print_inner(m, " bad transaction ");
+ t_opt = None;
+ }
+ }
+
+ for work in &inner.work_list {
+ work.debug_print(m, " ", " pending transaction ")?;
+ }
+ Ok(())
+ }
+
+ pub(crate) fn get_extended_error(&self, data: UserSlice) -> Result {
+ let mut writer = data.writer();
+ let ee = self.inner.lock().extended_error;
+ writer.write(&ee)?;
+ Ok(())
+ }
+
+ pub(crate) fn set_current_transaction(&self, transaction: DArc<Transaction>) {
+ self.inner.lock().current_transaction = Some(transaction);
+ }
+
+ pub(crate) fn has_current_transaction(&self) -> bool {
+ self.inner.lock().current_transaction.is_some()
+ }
+
+ /// Attempts to fetch a work item from the thread-local queue. The behaviour if the queue is
+ /// empty depends on `wait`: if it is true, the function waits for some work to be queued (or a
+ /// signal); otherwise it returns indicating that none is available.
+ fn get_work_local(self: &Arc<Self>, wait: bool) -> Result<Option<DLArc<dyn DeliverToRead>>> {
+ {
+ let mut inner = self.inner.lock();
+ if inner.looper_need_return {
+ return Ok(inner.pop_work());
+ }
+ }
+
+ // Try once if the caller does not want to wait.
+ if !wait {
+ return self.inner.lock().pop_work().ok_or(EAGAIN).map(Some);
+ }
+
+ // Loop waiting only on the local queue (i.e., not registering with the process queue).
+ let mut inner = self.inner.lock();
+ loop {
+ if let Some(work) = inner.pop_work() {
+ return Ok(Some(work));
+ }
+
+ inner.looper_flags |= LOOPER_WAITING;
+ let signal_pending = self.work_condvar.wait_interruptible_freezable(&mut inner);
+ inner.looper_flags &= !LOOPER_WAITING;
+
+ if signal_pending {
+ return Err(EINTR);
+ }
+ if inner.looper_need_return {
+ return Ok(None);
+ }
+ }
+ }
+
+ /// Attempts to fetch a work item from the thread-local queue, falling back to the process-wide
+ /// queue if none is available locally.
+ ///
+ /// This must only be called when the thread is not participating in a transaction chain. If it
+ /// is, the local version (`get_work_local`) should be used instead.
+ fn get_work(self: &Arc<Self>, wait: bool) -> Result<Option<DLArc<dyn DeliverToRead>>> {
+ // Try to get work from the thread's work queue, using only a local lock.
+ {
+ let mut inner = self.inner.lock();
+ if let Some(work) = inner.pop_work() {
+ return Ok(Some(work));
+ }
+ if inner.looper_need_return {
+ drop(inner);
+ return Ok(self.process.get_work());
+ }
+ }
+
+ // If the caller doesn't want to wait, try to grab work from the process queue.
+ //
+ // We know nothing will have been queued directly to the thread queue because it is not in
+ // a transaction and it is not in the process' ready list.
+ if !wait {
+ return self.process.get_work().ok_or(EAGAIN).map(Some);
+ }
+
+ // Get work from the process queue. If none is available, atomically register as ready.
+ let reg = match self.process.get_work_or_register(self) {
+ GetWorkOrRegister::Work(work) => return Ok(Some(work)),
+ GetWorkOrRegister::Register(reg) => reg,
+ };
+
+ let mut inner = self.inner.lock();
+ loop {
+ if let Some(work) = inner.pop_work() {
+ return Ok(Some(work));
+ }
+
+ inner.looper_flags |= LOOPER_WAITING | LOOPER_WAITING_PROC;
+ let signal_pending = self.work_condvar.wait_interruptible_freezable(&mut inner);
+ inner.looper_flags &= !(LOOPER_WAITING | LOOPER_WAITING_PROC);
+
+ if signal_pending || inner.looper_need_return {
+ // We need to return now. We need to pull the thread off the list of ready threads
+ // (by dropping `reg`), then check the state again after it's off the list to
+ // ensure that something was not queued in the meantime. If something has been
+ // queued, we just return it (instead of the error).
+ drop(inner);
+ drop(reg);
+
+ let res = match self.inner.lock().pop_work() {
+ Some(work) => Ok(Some(work)),
+ None if signal_pending => Err(EINTR),
+ None => Ok(None),
+ };
+ return res;
+ }
+ }
+ }
+
+ /// Push the provided work item to be delivered to user space via this thread.
+ ///
+ /// Returns whether the item was successfully pushed. This can only fail if the thread is dead.
+ pub(crate) fn push_work(&self, work: DLArc<dyn DeliverToRead>) -> PushWorkRes {
+ let sync = work.should_sync_wakeup();
+
+ let res = self.inner.lock().push_work(work);
+
+ if res.is_ok() {
+ if sync {
+ self.work_condvar.notify_sync();
+ } else {
+ self.work_condvar.notify_one();
+ }
+ }
+
+ res
+ }
+
+ /// Attempts to push to given work item to the thread if it's a looper thread (i.e., if it's
+ /// part of a thread pool) and is alive. Otherwise, push the work item to the process instead.
+ pub(crate) fn push_work_if_looper(&self, work: DLArc<dyn DeliverToRead>) -> BinderResult {
+ let mut inner = self.inner.lock();
+ if inner.is_looper() && !inner.is_dead {
+ inner.push_work(work);
+ Ok(())
+ } else {
+ drop(inner);
+ self.process.push_work(work)
+ }
+ }
+
+ pub(crate) fn push_work_deferred(&self, work: DLArc<dyn DeliverToRead>) {
+ self.inner.lock().push_work_deferred(work);
+ }
+
+ pub(crate) fn push_return_work(&self, reply: u32) {
+ self.inner.lock().push_return_work(reply);
+ }
+
+ fn translate_object(
+ &self,
+ obj_index: usize,
+ offset: usize,
+ object: BinderObjectRef<'_>,
+ view: &mut AllocationView<'_>,
+ allow_fds: bool,
+ sg_state: &mut ScatterGatherState,
+ ) -> BinderResult {
+ match object {
+ BinderObjectRef::Binder(obj) => {
+ let strong = obj.hdr.type_ == BINDER_TYPE_BINDER;
+ // SAFETY: `binder` is a `binder_uintptr_t`; any bit pattern is a valid
+ // representation.
+ let ptr = unsafe { obj.__bindgen_anon_1.binder } as _;
+ let cookie = obj.cookie as _;
+ let flags = obj.flags as _;
+ let node = self
+ .process
+ .as_arc_borrow()
+ .get_node(ptr, cookie, flags, strong, self)?;
+ security::binder_transfer_binder(&self.process.cred, &view.alloc.process.cred)?;
+ view.transfer_binder_object(offset, obj, strong, node)?;
+ }
+ BinderObjectRef::Handle(obj) => {
+ let strong = obj.hdr.type_ == BINDER_TYPE_HANDLE;
+ // SAFETY: `handle` is a `u32`; any bit pattern is a valid representation.
+ let handle = unsafe { obj.__bindgen_anon_1.handle } as _;
+ let node = self.process.get_node_from_handle(handle, strong)?;
+ security::binder_transfer_binder(&self.process.cred, &view.alloc.process.cred)?;
+ view.transfer_binder_object(offset, obj, strong, node)?;
+ }
+ BinderObjectRef::Fd(obj) => {
+ if !allow_fds {
+ return Err(EPERM.into());
+ }
+
+ // SAFETY: `fd` is a `u32`; any bit pattern is a valid representation.
+ let fd = unsafe { obj.__bindgen_anon_1.fd };
+ let file = LocalFile::fget(fd)?;
+ // SAFETY: The binder driver never calls `fdget_pos` and this code runs from an
+ // ioctl, so there are no active calls to `fdget_pos` on this thread.
+ let file = unsafe { LocalFile::assume_no_fdget_pos(file) };
+ security::binder_transfer_file(
+ &self.process.cred,
+ &view.alloc.process.cred,
+ &file,
+ )?;
+
+ let mut obj_write = BinderFdObject::default();
+ obj_write.hdr.type_ = BINDER_TYPE_FD;
+ // This will be overwritten with the actual fd when the transaction is received.
+ obj_write.__bindgen_anon_1.fd = u32::MAX;
+ obj_write.cookie = obj.cookie;
+ view.write::<BinderFdObject>(offset, &obj_write)?;
+
+ const FD_FIELD_OFFSET: usize =
+ core::mem::offset_of!(uapi::binder_fd_object, __bindgen_anon_1.fd);
+
+ let field_offset = offset + FD_FIELD_OFFSET;
+
+ view.alloc.info_add_fd(file, field_offset, false)?;
+ }
+ BinderObjectRef::Ptr(obj) => {
+ let obj_length = obj.length.try_into().map_err(|_| EINVAL)?;
+ let alloc_offset = match sg_state.unused_buffer_space.claim_next(obj_length) {
+ Ok(alloc_offset) => alloc_offset,
+ Err(err) => {
+ pr_warn!(
+ "Failed to claim space for a BINDER_TYPE_PTR. (offset: {}, limit: {}, size: {})",
+ sg_state.unused_buffer_space.offset,
+ sg_state.unused_buffer_space.limit,
+ obj_length,
+ );
+ return Err(err.into());
+ }
+ };
+
+ let sg_state_idx = sg_state.sg_entries.len();
+ sg_state.sg_entries.push(
+ ScatterGatherEntry {
+ obj_index,
+ offset: alloc_offset,
+ sender_uaddr: obj.buffer as _,
+ length: obj_length,
+ pointer_fixups: KVec::new(),
+ fixup_min_offset: 0,
+ },
+ GFP_KERNEL,
+ )?;
+
+ let buffer_ptr_in_user_space = (view.alloc.ptr + alloc_offset) as u64;
+
+ if obj.flags & uapi::BINDER_BUFFER_FLAG_HAS_PARENT == 0 {
+ sg_state.ancestors.clear();
+ sg_state.ancestors.push(sg_state_idx, GFP_KERNEL)?;
+ } else {
+ // Another buffer also has a pointer to this buffer, and we need to fixup that
+ // pointer too.
+
+ let parent_index = usize::try_from(obj.parent).map_err(|_| EINVAL)?;
+ let parent_offset = usize::try_from(obj.parent_offset).map_err(|_| EINVAL)?;
+
+ let info = sg_state.validate_parent_fixup(
+ parent_index,
+ parent_offset,
+ size_of::<u64>(),
+ )?;
+
+ sg_state.ancestors.truncate(info.num_ancestors);
+ sg_state.ancestors.push(sg_state_idx, GFP_KERNEL)?;
+
+ let parent_entry = match sg_state.sg_entries.get_mut(info.parent_sg_index) {
+ Some(parent_entry) => parent_entry,
+ None => {
+ pr_err!(
+ "validate_parent_fixup returned index out of bounds for sg.entries"
+ );
+ return Err(EINVAL.into());
+ }
+ };
+
+ parent_entry.fixup_min_offset = info.new_min_offset;
+ parent_entry.pointer_fixups.push(
+ PointerFixupEntry {
+ skip: 0,
+ pointer_value: buffer_ptr_in_user_space,
+ target_offset: info.target_offset,
+ },
+ GFP_KERNEL,
+ )?;
+ }
+
+ let mut obj_write = BinderBufferObject::default();
+ obj_write.hdr.type_ = BINDER_TYPE_PTR;
+ obj_write.flags = obj.flags;
+ obj_write.buffer = buffer_ptr_in_user_space;
+ obj_write.length = obj.length;
+ obj_write.parent = obj.parent;
+ obj_write.parent_offset = obj.parent_offset;
+ view.write::<BinderBufferObject>(offset, &obj_write)?;
+ }
+ BinderObjectRef::Fda(obj) => {
+ if !allow_fds {
+ return Err(EPERM.into());
+ }
+ let parent_index = usize::try_from(obj.parent).map_err(|_| EINVAL)?;
+ let parent_offset = usize::try_from(obj.parent_offset).map_err(|_| EINVAL)?;
+ let num_fds = usize::try_from(obj.num_fds).map_err(|_| EINVAL)?;
+ let fds_len = num_fds.checked_mul(size_of::<u32>()).ok_or(EINVAL)?;
+
+ let info = sg_state.validate_parent_fixup(parent_index, parent_offset, fds_len)?;
+ view.alloc.info_add_fd_reserve(num_fds)?;
+
+ sg_state.ancestors.truncate(info.num_ancestors);
+ let parent_entry = match sg_state.sg_entries.get_mut(info.parent_sg_index) {
+ Some(parent_entry) => parent_entry,
+ None => {
+ pr_err!(
+ "validate_parent_fixup returned index out of bounds for sg.entries"
+ );
+ return Err(EINVAL.into());
+ }
+ };
+
+ parent_entry.fixup_min_offset = info.new_min_offset;
+ parent_entry
+ .pointer_fixups
+ .push(
+ PointerFixupEntry {
+ skip: fds_len,
+ pointer_value: 0,
+ target_offset: info.target_offset,
+ },
+ GFP_KERNEL,
+ )
+ .map_err(|_| ENOMEM)?;
+
+ let fda_uaddr = parent_entry
+ .sender_uaddr
+ .checked_add(parent_offset)
+ .ok_or(EINVAL)?;
+ let mut fda_bytes = KVec::new();
+ UserSlice::new(UserPtr::from_addr(fda_uaddr as _), fds_len)
+ .read_all(&mut fda_bytes, GFP_KERNEL)?;
+
+ if fds_len != fda_bytes.len() {
+ pr_err!("UserSlice::read_all returned wrong length in BINDER_TYPE_FDA");
+ return Err(EINVAL.into());
+ }
+
+ for i in (0..fds_len).step_by(size_of::<u32>()) {
+ let fd = {
+ let mut fd_bytes = [0u8; size_of::<u32>()];
+ fd_bytes.copy_from_slice(&fda_bytes[i..i + size_of::<u32>()]);
+ u32::from_ne_bytes(fd_bytes)
+ };
+
+ let file = LocalFile::fget(fd)?;
+ // SAFETY: The binder driver never calls `fdget_pos` and this code runs from an
+ // ioctl, so there are no active calls to `fdget_pos` on this thread.
+ let file = unsafe { LocalFile::assume_no_fdget_pos(file) };
+ security::binder_transfer_file(
+ &self.process.cred,
+ &view.alloc.process.cred,
+ &file,
+ )?;
+
+ // The `validate_parent_fixup` call ensuers that this addition will not
+ // overflow.
+ view.alloc.info_add_fd(file, info.target_offset + i, true)?;
+ }
+ drop(fda_bytes);
+
+ let mut obj_write = BinderFdArrayObject::default();
+ obj_write.hdr.type_ = BINDER_TYPE_FDA;
+ obj_write.num_fds = obj.num_fds;
+ obj_write.parent = obj.parent;
+ obj_write.parent_offset = obj.parent_offset;
+ view.write::<BinderFdArrayObject>(offset, &obj_write)?;
+ }
+ }
+ Ok(())
+ }
+
+ fn apply_sg(&self, alloc: &mut Allocation, sg_state: &mut ScatterGatherState) -> BinderResult {
+ for sg_entry in &mut sg_state.sg_entries {
+ let mut end_of_previous_fixup = sg_entry.offset;
+ let offset_end = sg_entry.offset.checked_add(sg_entry.length).ok_or(EINVAL)?;
+
+ let mut reader =
+ UserSlice::new(UserPtr::from_addr(sg_entry.sender_uaddr), sg_entry.length).reader();
+ for fixup in &mut sg_entry.pointer_fixups {
+ let fixup_len = if fixup.skip == 0 {
+ size_of::<u64>()
+ } else {
+ fixup.skip
+ };
+
+ let target_offset_end = fixup.target_offset.checked_add(fixup_len).ok_or(EINVAL)?;
+ if fixup.target_offset < end_of_previous_fixup || offset_end < target_offset_end {
+ pr_warn!(
+ "Fixups oob {} {} {} {}",
+ fixup.target_offset,
+ end_of_previous_fixup,
+ offset_end,
+ target_offset_end
+ );
+ return Err(EINVAL.into());
+ }
+
+ let copy_off = end_of_previous_fixup;
+ let copy_len = fixup.target_offset - end_of_previous_fixup;
+ if let Err(err) = alloc.copy_into(&mut reader, copy_off, copy_len) {
+ pr_warn!("Failed copying into alloc: {:?}", err);
+ return Err(err.into());
+ }
+ if fixup.skip == 0 {
+ let res = alloc.write::<u64>(fixup.target_offset, &fixup.pointer_value);
+ if let Err(err) = res {
+ pr_warn!("Failed copying ptr into alloc: {:?}", err);
+ return Err(err.into());
+ }
+ }
+ if let Err(err) = reader.skip(fixup_len) {
+ pr_warn!("Failed skipping {} from reader: {:?}", fixup_len, err);
+ return Err(err.into());
+ }
+ end_of_previous_fixup = target_offset_end;
+ }
+ let copy_off = end_of_previous_fixup;
+ let copy_len = offset_end - end_of_previous_fixup;
+ if let Err(err) = alloc.copy_into(&mut reader, copy_off, copy_len) {
+ pr_warn!("Failed copying remainder into alloc: {:?}", err);
+ return Err(err.into());
+ }
+ }
+ Ok(())
+ }
+
+ /// This method copies the payload of a transaction into the target process.
+ ///
+ /// The resulting payload will have several different components, which will be stored next to
+ /// each other in the allocation. Furthermore, various objects can be embedded in the payload,
+ /// and those objects have to be translated so that they make sense to the target transaction.
+ pub(crate) fn copy_transaction_data(
+ &self,
+ to_process: Arc<Process>,
+ tr: &BinderTransactionDataSg,
+ debug_id: usize,
+ allow_fds: bool,
+ txn_security_ctx_offset: Option<&mut usize>,
+ ) -> BinderResult<NewAllocation> {
+ let trd = &tr.transaction_data;
+ let is_oneway = trd.flags & TF_ONE_WAY != 0;
+ let mut secctx = if let Some(offset) = txn_security_ctx_offset {
+ let secid = self.process.cred.get_secid();
+ let ctx = match security::SecurityCtx::from_secid(secid) {
+ Ok(ctx) => ctx,
+ Err(err) => {
+ pr_warn!("Failed to get security ctx for id {}: {:?}", secid, err);
+ return Err(err.into());
+ }
+ };
+ Some((offset, ctx))
+ } else {
+ None
+ };
+
+ let data_size = trd.data_size.try_into().map_err(|_| EINVAL)?;
+ let aligned_data_size = ptr_align(data_size).ok_or(EINVAL)?;
+ let offsets_size = trd.offsets_size.try_into().map_err(|_| EINVAL)?;
+ let aligned_offsets_size = ptr_align(offsets_size).ok_or(EINVAL)?;
+ let buffers_size = tr.buffers_size.try_into().map_err(|_| EINVAL)?;
+ let aligned_buffers_size = ptr_align(buffers_size).ok_or(EINVAL)?;
+ let aligned_secctx_size = match secctx.as_ref() {
+ Some((_offset, ctx)) => ptr_align(ctx.len()).ok_or(EINVAL)?,
+ None => 0,
+ };
+
+ // This guarantees that at least `sizeof(usize)` bytes will be allocated.
+ let len = usize::max(
+ aligned_data_size
+ .checked_add(aligned_offsets_size)
+ .and_then(|sum| sum.checked_add(aligned_buffers_size))
+ .and_then(|sum| sum.checked_add(aligned_secctx_size))
+ .ok_or(ENOMEM)?,
+ size_of::<usize>(),
+ );
+ let secctx_off = aligned_data_size + aligned_offsets_size + aligned_buffers_size;
+ let mut alloc =
+ match to_process.buffer_alloc(debug_id, len, is_oneway, self.process.task.pid()) {
+ Ok(alloc) => alloc,
+ Err(err) => {
+ pr_warn!(
+ "Failed to allocate buffer. len:{}, is_oneway:{}",
+ len,
+ is_oneway
+ );
+ return Err(err);
+ }
+ };
+
+ // SAFETY: This accesses a union field, but it's okay because the field's type is valid for
+ // all bit-patterns.
+ let trd_data_ptr = unsafe { &trd.data.ptr };
+ let mut buffer_reader =
+ UserSlice::new(UserPtr::from_addr(trd_data_ptr.buffer as _), data_size).reader();
+ let mut end_of_previous_object = 0;
+ let mut sg_state = None;
+
+ // Copy offsets if there are any.
+ if offsets_size > 0 {
+ {
+ let mut reader =
+ UserSlice::new(UserPtr::from_addr(trd_data_ptr.offsets as _), offsets_size)
+ .reader();
+ alloc.copy_into(&mut reader, aligned_data_size, offsets_size)?;
+ }
+
+ let offsets_start = aligned_data_size;
+ let offsets_end = aligned_data_size + aligned_offsets_size;
+
+ // This state is used for BINDER_TYPE_PTR objects.
+ let sg_state = sg_state.insert(ScatterGatherState {
+ unused_buffer_space: UnusedBufferSpace {
+ offset: offsets_end,
+ limit: len,
+ },
+ sg_entries: KVec::new(),
+ ancestors: KVec::new(),
+ });
+
+ // Traverse the objects specified.
+ let mut view = AllocationView::new(&mut alloc, data_size);
+ for (index, index_offset) in (offsets_start..offsets_end)
+ .step_by(size_of::<usize>())
+ .enumerate()
+ {
+ let offset = view.alloc.read(index_offset)?;
+
+ if offset < end_of_previous_object {
+ pr_warn!("Got transaction with invalid offset.");
+ return Err(EINVAL.into());
+ }
+
+ // Copy data between two objects.
+ if end_of_previous_object < offset {
+ view.copy_into(
+ &mut buffer_reader,
+ end_of_previous_object,
+ offset - end_of_previous_object,
+ )?;
+ }
+
+ let mut object = BinderObject::read_from(&mut buffer_reader)?;
+
+ match self.translate_object(
+ index,
+ offset,
+ object.as_ref(),
+ &mut view,
+ allow_fds,
+ sg_state,
+ ) {
+ Ok(()) => end_of_previous_object = offset + object.size(),
+ Err(err) => {
+ pr_warn!("Error while translating object.");
+ return Err(err);
+ }
+ }
+
+ // Update the indexes containing objects to clean up.
+ let offset_after_object = index_offset + size_of::<usize>();
+ view.alloc
+ .set_info_offsets(offsets_start..offset_after_object);
+ }
+ }
+
+ // Copy remaining raw data.
+ alloc.copy_into(
+ &mut buffer_reader,
+ end_of_previous_object,
+ data_size - end_of_previous_object,
+ )?;
+
+ if let Some(sg_state) = sg_state.as_mut() {
+ if let Err(err) = self.apply_sg(&mut alloc, sg_state) {
+ pr_warn!("Failure in apply_sg: {:?}", err);
+ return Err(err);
+ }
+ }
+
+ if let Some((off_out, secctx)) = secctx.as_mut() {
+ if let Err(err) = alloc.write(secctx_off, secctx.as_bytes()) {
+ pr_warn!("Failed to write security context: {:?}", err);
+ return Err(err.into());
+ }
+ **off_out = secctx_off;
+ }
+ Ok(alloc)
+ }
+
+ fn unwind_transaction_stack(self: &Arc<Self>) {
+ let mut thread = self.clone();
+ while let Ok(transaction) = {
+ let mut inner = thread.inner.lock();
+ inner.pop_transaction_to_reply(thread.as_ref())
+ } {
+ let reply = Err(BR_DEAD_REPLY);
+ if !transaction.from.deliver_single_reply(reply, &transaction) {
+ break;
+ }
+
+ thread = transaction.from.clone();
+ }
+ }
+
+ pub(crate) fn deliver_reply(
+ &self,
+ reply: Result<DLArc<Transaction>, u32>,
+ transaction: &DArc<Transaction>,
+ ) {
+ if self.deliver_single_reply(reply, transaction) {
+ transaction.from.unwind_transaction_stack();
+ }
+ }
+
+ /// Delivers a reply to the thread that started a transaction. The reply can either be a
+ /// reply-transaction or an error code to be delivered instead.
+ ///
+ /// Returns whether the thread is dead. If it is, the caller is expected to unwind the
+ /// transaction stack by completing transactions for threads that are dead.
+ fn deliver_single_reply(
+ &self,
+ reply: Result<DLArc<Transaction>, u32>,
+ transaction: &DArc<Transaction>,
+ ) -> bool {
+ if let Ok(transaction) = &reply {
+ transaction.set_outstanding(&mut self.process.inner.lock());
+ }
+
+ {
+ let mut inner = self.inner.lock();
+ if !inner.pop_transaction_replied(transaction) {
+ return false;
+ }
+
+ if inner.is_dead {
+ return true;
+ }
+
+ match reply {
+ Ok(work) => {
+ inner.push_work(work);
+ }
+ Err(code) => inner.push_reply_work(code),
+ }
+ }
+
+ // Notify the thread now that we've released the inner lock.
+ self.work_condvar.notify_sync();
+ false
+ }
+
+ /// Determines if the given transaction is the current transaction for this thread.
+ fn is_current_transaction(&self, transaction: &DArc<Transaction>) -> bool {
+ let inner = self.inner.lock();
+ match &inner.current_transaction {
+ None => false,
+ Some(current) => Arc::ptr_eq(current, transaction),
+ }
+ }
+
+ /// Determines the current top of the transaction stack. It fails if the top is in another
+ /// thread (i.e., this thread belongs to a stack but it has called another thread). The top is
+ /// [`None`] if the thread is not currently participating in a transaction stack.
+ fn top_of_transaction_stack(&self) -> Result<Option<DArc<Transaction>>> {
+ let inner = self.inner.lock();
+ if let Some(cur) = &inner.current_transaction {
+ if core::ptr::eq(self, cur.from.as_ref()) {
+ pr_warn!("got new transaction with bad transaction stack");
+ return Err(EINVAL);
+ }
+ Ok(Some(cur.clone()))
+ } else {
+ Ok(None)
+ }
+ }
+
+ fn transaction<T>(self: &Arc<Self>, tr: &BinderTransactionDataSg, inner: T)
+ where
+ T: FnOnce(&Arc<Self>, &BinderTransactionDataSg) -> BinderResult,
+ {
+ if let Err(err) = inner(self, tr) {
+ if err.should_pr_warn() {
+ let mut ee = self.inner.lock().extended_error;
+ ee.command = err.reply;
+ ee.param = err.as_errno();
+ pr_warn!(
+ "Transaction failed: {:?} my_pid:{}",
+ err,
+ self.process.pid_in_current_ns()
+ );
+ }
+
+ self.push_return_work(err.reply);
+ }
+ }
+
+ fn transaction_inner(self: &Arc<Self>, tr: &BinderTransactionDataSg) -> BinderResult {
+ // SAFETY: Handle's type has no invalid bit patterns.
+ let handle = unsafe { tr.transaction_data.target.handle };
+ let node_ref = self.process.get_transaction_node(handle)?;
+ security::binder_transaction(&self.process.cred, &node_ref.node.owner.cred)?;
+ // TODO: We need to ensure that there isn't a pending transaction in the work queue. How
+ // could this happen?
+ let top = self.top_of_transaction_stack()?;
+ let list_completion = DTRWrap::arc_try_new(DeliverCode::new(BR_TRANSACTION_COMPLETE))?;
+ let completion = list_completion.clone_arc();
+ let transaction = Transaction::new(node_ref, top, self, tr)?;
+
+ // Check that the transaction stack hasn't changed while the lock was released, then update
+ // it with the new transaction.
+ {
+ let mut inner = self.inner.lock();
+ if !transaction.is_stacked_on(&inner.current_transaction) {
+ pr_warn!("Transaction stack changed during transaction!");
+ return Err(EINVAL.into());
+ }
+ inner.current_transaction = Some(transaction.clone_arc());
+ // We push the completion as a deferred work so that we wait for the reply before
+ // returning to userland.
+ inner.push_work_deferred(list_completion);
+ }
+
+ if let Err(e) = transaction.submit() {
+ completion.skip();
+ // Define `transaction` first to drop it after `inner`.
+ let transaction;
+ let mut inner = self.inner.lock();
+ transaction = inner.current_transaction.take().unwrap();
+ inner.current_transaction = transaction.clone_next();
+ Err(e)
+ } else {
+ Ok(())
+ }
+ }
+
+ fn reply_inner(self: &Arc<Self>, tr: &BinderTransactionDataSg) -> BinderResult {
+ let orig = self.inner.lock().pop_transaction_to_reply(self)?;
+ if !orig.from.is_current_transaction(&orig) {
+ return Err(EINVAL.into());
+ }
+
+ // We need to complete the transaction even if we cannot complete building the reply.
+ let out = (|| -> BinderResult<_> {
+ let completion = DTRWrap::arc_try_new(DeliverCode::new(BR_TRANSACTION_COMPLETE))?;
+ let process = orig.from.process.clone();
+ let allow_fds = orig.flags & TF_ACCEPT_FDS != 0;
+ let reply = Transaction::new_reply(self, process, tr, allow_fds)?;
+ self.inner.lock().push_work(completion);
+ orig.from.deliver_reply(Ok(reply), &orig);
+ Ok(())
+ })()
+ .map_err(|mut err| {
+ // At this point we only return `BR_TRANSACTION_COMPLETE` to the caller, and we must let
+ // the sender know that the transaction has completed (with an error in this case).
+ pr_warn!(
+ "Failure {:?} during reply - delivering BR_FAILED_REPLY to sender.",
+ err
+ );
+ let reply = Err(BR_FAILED_REPLY);
+ orig.from.deliver_reply(reply, &orig);
+ err.reply = BR_TRANSACTION_COMPLETE;
+ err
+ });
+
+ out
+ }
+
+ fn oneway_transaction_inner(self: &Arc<Self>, tr: &BinderTransactionDataSg) -> BinderResult {
+ // SAFETY: The `handle` field is valid for all possible byte values, so reading from the
+ // union is okay.
+ let handle = unsafe { tr.transaction_data.target.handle };
+ let node_ref = self.process.get_transaction_node(handle)?;
+ security::binder_transaction(&self.process.cred, &node_ref.node.owner.cred)?;
+ let transaction = Transaction::new(node_ref, None, self, tr)?;
+ let code = if self.process.is_oneway_spam_detection_enabled()
+ && transaction.oneway_spam_detected
+ {
+ BR_ONEWAY_SPAM_SUSPECT
+ } else {
+ BR_TRANSACTION_COMPLETE
+ };
+ let list_completion = DTRWrap::arc_try_new(DeliverCode::new(code))?;
+ let completion = list_completion.clone_arc();
+ self.inner.lock().push_work(list_completion);
+ match transaction.submit() {
+ Ok(()) => Ok(()),
+ Err(err) => {
+ completion.skip();
+ Err(err)
+ }
+ }
+ }
+
+ fn write(self: &Arc<Self>, req: &mut BinderWriteRead) -> Result {
+ let write_start = req.write_buffer.wrapping_add(req.write_consumed);
+ let write_len = req.write_size.saturating_sub(req.write_consumed);
+ let mut reader =
+ UserSlice::new(UserPtr::from_addr(write_start as _), write_len as _).reader();
+
+ while reader.len() >= size_of::<u32>() && self.inner.lock().return_work.is_unused() {
+ let before = reader.len();
+ let cmd = reader.read::<u32>()?;
+ GLOBAL_STATS.inc_bc(cmd);
+ self.process.stats.inc_bc(cmd);
+ match cmd {
+ BC_TRANSACTION => {
+ let tr = reader.read::<BinderTransactionData>()?.with_buffers_size(0);
+ if tr.transaction_data.flags & TF_ONE_WAY != 0 {
+ self.transaction(&tr, Self::oneway_transaction_inner);
+ } else {
+ self.transaction(&tr, Self::transaction_inner);
+ }
+ }
+ BC_TRANSACTION_SG => {
+ let tr = reader.read::<BinderTransactionDataSg>()?;
+ if tr.transaction_data.flags & TF_ONE_WAY != 0 {
+ self.transaction(&tr, Self::oneway_transaction_inner);
+ } else {
+ self.transaction(&tr, Self::transaction_inner);
+ }
+ }
+ BC_REPLY => {
+ let tr = reader.read::<BinderTransactionData>()?.with_buffers_size(0);
+ self.transaction(&tr, Self::reply_inner)
+ }
+ BC_REPLY_SG => {
+ let tr = reader.read::<BinderTransactionDataSg>()?;
+ self.transaction(&tr, Self::reply_inner)
+ }
+ BC_FREE_BUFFER => {
+ let buffer = self.process.buffer_get(reader.read()?);
+ if let Some(buffer) = &buffer {
+ if buffer.looper_need_return_on_free() {
+ self.inner.lock().looper_need_return = true;
+ }
+ }
+ drop(buffer);
+ }
+ BC_INCREFS => {
+ self.process
+ .as_arc_borrow()
+ .update_ref(reader.read()?, true, false)?
+ }
+ BC_ACQUIRE => {
+ self.process
+ .as_arc_borrow()
+ .update_ref(reader.read()?, true, true)?
+ }
+ BC_RELEASE => {
+ self.process
+ .as_arc_borrow()
+ .update_ref(reader.read()?, false, true)?
+ }
+ BC_DECREFS => {
+ self.process
+ .as_arc_borrow()
+ .update_ref(reader.read()?, false, false)?
+ }
+ BC_INCREFS_DONE => self.process.inc_ref_done(&mut reader, false)?,
+ BC_ACQUIRE_DONE => self.process.inc_ref_done(&mut reader, true)?,
+ BC_REQUEST_DEATH_NOTIFICATION => self.process.request_death(&mut reader, self)?,
+ BC_CLEAR_DEATH_NOTIFICATION => self.process.clear_death(&mut reader, self)?,
+ BC_DEAD_BINDER_DONE => self.process.dead_binder_done(reader.read()?, self),
+ BC_REGISTER_LOOPER => {
+ let valid = self.process.register_thread();
+ self.inner.lock().looper_register(valid);
+ }
+ BC_ENTER_LOOPER => self.inner.lock().looper_enter(),
+ BC_EXIT_LOOPER => self.inner.lock().looper_exit(),
+ BC_REQUEST_FREEZE_NOTIFICATION => self.process.request_freeze_notif(&mut reader)?,
+ BC_CLEAR_FREEZE_NOTIFICATION => self.process.clear_freeze_notif(&mut reader)?,
+ BC_FREEZE_NOTIFICATION_DONE => self.process.freeze_notif_done(&mut reader)?,
+
+ // Fail if given an unknown error code.
+ // BC_ATTEMPT_ACQUIRE and BC_ACQUIRE_RESULT are no longer supported.
+ _ => return Err(EINVAL),
+ }
+ // Update the number of write bytes consumed.
+ req.write_consumed += (before - reader.len()) as u64;
+ }
+
+ Ok(())
+ }
+
+ fn read(self: &Arc<Self>, req: &mut BinderWriteRead, wait: bool) -> Result {
+ let read_start = req.read_buffer.wrapping_add(req.read_consumed);
+ let read_len = req.read_size.saturating_sub(req.read_consumed);
+ let mut writer = BinderReturnWriter::new(
+ UserSlice::new(UserPtr::from_addr(read_start as _), read_len as _).writer(),
+ self,
+ );
+ let (in_pool, use_proc_queue) = {
+ let inner = self.inner.lock();
+ (inner.is_looper(), inner.should_use_process_work_queue())
+ };
+
+ let getter = if use_proc_queue {
+ Self::get_work
+ } else {
+ Self::get_work_local
+ };
+
+ // Reserve some room at the beginning of the read buffer so that we can send a
+ // BR_SPAWN_LOOPER if we need to.
+ let mut has_noop_placeholder = false;
+ if req.read_consumed == 0 {
+ if let Err(err) = writer.write_code(BR_NOOP) {
+ pr_warn!("Failure when writing BR_NOOP at beginning of buffer.");
+ return Err(err);
+ }
+ has_noop_placeholder = true;
+ }
+
+ // Loop doing work while there is room in the buffer.
+ let initial_len = writer.len();
+ while writer.len() >= size_of::<uapi::binder_transaction_data_secctx>() + 4 {
+ match getter(self, wait && initial_len == writer.len()) {
+ Ok(Some(work)) => match work.into_arc().do_work(self, &mut writer) {
+ Ok(true) => {}
+ Ok(false) => break,
+ Err(err) => {
+ return Err(err);
+ }
+ },
+ Ok(None) => {
+ break;
+ }
+ Err(err) => {
+ // Propagate the error if we haven't written anything else.
+ if err != EINTR && err != EAGAIN {
+ pr_warn!("Failure in work getter: {:?}", err);
+ }
+ if initial_len == writer.len() {
+ return Err(err);
+ } else {
+ break;
+ }
+ }
+ }
+ }
+
+ req.read_consumed += read_len - writer.len() as u64;
+
+ // Write BR_SPAWN_LOOPER if the process needs more threads for its pool.
+ if has_noop_placeholder && in_pool && self.process.needs_thread() {
+ let mut writer =
+ UserSlice::new(UserPtr::from_addr(req.read_buffer as _), req.read_size as _)
+ .writer();
+ writer.write(&BR_SPAWN_LOOPER)?;
+ }
+ Ok(())
+ }
+
+ pub(crate) fn write_read(self: &Arc<Self>, data: UserSlice, wait: bool) -> Result {
+ let (mut reader, mut writer) = data.reader_writer();
+ let mut req = reader.read::<BinderWriteRead>()?;
+
+ // Go through the write buffer.
+ let mut ret = Ok(());
+ if req.write_size > 0 {
+ ret = self.write(&mut req);
+ if let Err(err) = ret {
+ pr_warn!(
+ "Write failure {:?} in pid:{}",
+ err,
+ self.process.pid_in_current_ns()
+ );
+ req.read_consumed = 0;
+ writer.write(&req)?;
+ self.inner.lock().looper_need_return = false;
+ return ret;
+ }
+ }
+
+ // Go through the work queue.
+ if req.read_size > 0 {
+ ret = self.read(&mut req, wait);
+ if ret.is_err() && ret != Err(EINTR) {
+ pr_warn!(
+ "Read failure {:?} in pid:{}",
+ ret,
+ self.process.pid_in_current_ns()
+ );
+ }
+ }
+
+ // Write the request back so that the consumed fields are visible to the caller.
+ writer.write(&req)?;
+
+ self.inner.lock().looper_need_return = false;
+
+ ret
+ }
+
+ pub(crate) fn poll(&self, file: &File, table: PollTable<'_>) -> (bool, u32) {
+ table.register_wait(file, &self.work_condvar);
+ let mut inner = self.inner.lock();
+ (inner.should_use_process_work_queue(), inner.poll())
+ }
+
+ /// Make the call to `get_work` or `get_work_local` return immediately, if any.
+ pub(crate) fn exit_looper(&self) {
+ let mut inner = self.inner.lock();
+ let should_notify = inner.looper_flags & LOOPER_WAITING != 0;
+ if should_notify {
+ inner.looper_need_return = true;
+ }
+ drop(inner);
+
+ if should_notify {
+ self.work_condvar.notify_one();
+ }
+ }
+
+ pub(crate) fn notify_if_poll_ready(&self, sync: bool) {
+ // Determine if we need to notify. This requires the lock.
+ let inner = self.inner.lock();
+ let notify = inner.looper_flags & LOOPER_POLL != 0 && inner.should_use_process_work_queue();
+ drop(inner);
+
+ // Now that the lock is no longer held, notify the waiters if we have to.
+ if notify {
+ if sync {
+ self.work_condvar.notify_sync();
+ } else {
+ self.work_condvar.notify_one();
+ }
+ }
+ }
+
+ pub(crate) fn release(self: &Arc<Self>) {
+ self.inner.lock().is_dead = true;
+
+ //self.work_condvar.clear();
+ self.unwind_transaction_stack();
+
+ // Cancel all pending work items.
+ while let Ok(Some(work)) = self.get_work_local(false) {
+ work.into_arc().cancel();
+ }
+ }
+}
+
+#[pin_data]
+struct ThreadError {
+ error_code: AtomicU32,
+ #[pin]
+ links_track: AtomicTracker,
+}
+
+impl ThreadError {
+ fn try_new() -> Result<DArc<Self>> {
+ DTRWrap::arc_pin_init(pin_init!(Self {
+ error_code: AtomicU32::new(BR_OK),
+ links_track <- AtomicTracker::new(),
+ }))
+ .map(ListArc::into_arc)
+ }
+
+ fn set_error_code(&self, code: u32) {
+ self.error_code.store(code, Ordering::Relaxed);
+ }
+
+ fn is_unused(&self) -> bool {
+ self.error_code.load(Ordering::Relaxed) == BR_OK
+ }
+}
+
+impl DeliverToRead for ThreadError {
+ fn do_work(
+ self: DArc<Self>,
+ _thread: &Thread,
+ writer: &mut BinderReturnWriter<'_>,
+ ) -> Result<bool> {
+ let code = self.error_code.load(Ordering::Relaxed);
+ self.error_code.store(BR_OK, Ordering::Relaxed);
+ writer.write_code(code)?;
+ Ok(true)
+ }
+
+ fn cancel(self: DArc<Self>) {}
+
+ fn should_sync_wakeup(&self) -> bool {
+ false
+ }
+
+ fn debug_print(&self, m: &SeqFile, prefix: &str, _tprefix: &str) -> Result<()> {
+ seq_print!(
+ m,
+ "{}transaction error: {}\n",
+ prefix,
+ self.error_code.load(Ordering::Relaxed)
+ );
+ Ok(())
+ }
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<0> for ThreadError {
+ tracked_by links_track: AtomicTracker;
+ }
+}
diff --git a/drivers/android/binder/trace.rs b/drivers/android/binder/trace.rs
new file mode 100644
index 000000000000..af0e4392805e
--- /dev/null
+++ b/drivers/android/binder/trace.rs
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use kernel::ffi::{c_uint, c_ulong};
+use kernel::tracepoint::declare_trace;
+
+declare_trace! {
+ unsafe fn rust_binder_ioctl(cmd: c_uint, arg: c_ulong);
+}
+
+#[inline]
+pub(crate) fn trace_ioctl(cmd: u32, arg: usize) {
+ // SAFETY: Always safe to call.
+ unsafe { rust_binder_ioctl(cmd, arg as c_ulong) }
+}
diff --git a/drivers/android/binder/transaction.rs b/drivers/android/binder/transaction.rs
new file mode 100644
index 000000000000..02512175d622
--- /dev/null
+++ b/drivers/android/binder/transaction.rs
@@ -0,0 +1,456 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use core::sync::atomic::{AtomicBool, Ordering};
+use kernel::{
+ prelude::*,
+ seq_file::SeqFile,
+ seq_print,
+ sync::{Arc, SpinLock},
+ task::Kuid,
+ time::{Instant, Monotonic},
+ types::ScopeGuard,
+};
+
+use crate::{
+ allocation::{Allocation, TranslatedFds},
+ defs::*,
+ error::{BinderError, BinderResult},
+ node::{Node, NodeRef},
+ process::{Process, ProcessInner},
+ ptr_align,
+ thread::{PushWorkRes, Thread},
+ BinderReturnWriter, DArc, DLArc, DTRWrap, DeliverToRead,
+};
+
+#[pin_data(PinnedDrop)]
+pub(crate) struct Transaction {
+ pub(crate) debug_id: usize,
+ target_node: Option<DArc<Node>>,
+ pub(crate) from_parent: Option<DArc<Transaction>>,
+ pub(crate) from: Arc<Thread>,
+ pub(crate) to: Arc<Process>,
+ #[pin]
+ allocation: SpinLock<Option<Allocation>>,
+ is_outstanding: AtomicBool,
+ code: u32,
+ pub(crate) flags: u32,
+ data_size: usize,
+ offsets_size: usize,
+ data_address: usize,
+ sender_euid: Kuid,
+ txn_security_ctx_off: Option<usize>,
+ pub(crate) oneway_spam_detected: bool,
+ start_time: Instant<Monotonic>,
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<0> for Transaction { untracked; }
+}
+
+impl Transaction {
+ pub(crate) fn new(
+ node_ref: NodeRef,
+ from_parent: Option<DArc<Transaction>>,
+ from: &Arc<Thread>,
+ tr: &BinderTransactionDataSg,
+ ) -> BinderResult<DLArc<Self>> {
+ let debug_id = super::next_debug_id();
+ let trd = &tr.transaction_data;
+ let allow_fds = node_ref.node.flags & FLAT_BINDER_FLAG_ACCEPTS_FDS != 0;
+ let txn_security_ctx = node_ref.node.flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX != 0;
+ let mut txn_security_ctx_off = if txn_security_ctx { Some(0) } else { None };
+ let to = node_ref.node.owner.clone();
+ let mut alloc = match from.copy_transaction_data(
+ to.clone(),
+ tr,
+ debug_id,
+ allow_fds,
+ txn_security_ctx_off.as_mut(),
+ ) {
+ Ok(alloc) => alloc,
+ Err(err) => {
+ if !err.is_dead() {
+ pr_warn!("Failure in copy_transaction_data: {:?}", err);
+ }
+ return Err(err);
+ }
+ };
+ let oneway_spam_detected = alloc.oneway_spam_detected;
+ if trd.flags & TF_ONE_WAY != 0 {
+ if from_parent.is_some() {
+ pr_warn!("Oneway transaction should not be in a transaction stack.");
+ return Err(EINVAL.into());
+ }
+ alloc.set_info_oneway_node(node_ref.node.clone());
+ }
+ if trd.flags & TF_CLEAR_BUF != 0 {
+ alloc.set_info_clear_on_drop();
+ }
+ let target_node = node_ref.node.clone();
+ alloc.set_info_target_node(node_ref);
+ let data_address = alloc.ptr;
+
+ Ok(DTRWrap::arc_pin_init(pin_init!(Transaction {
+ debug_id,
+ target_node: Some(target_node),
+ from_parent,
+ sender_euid: from.process.task.euid(),
+ from: from.clone(),
+ to,
+ code: trd.code,
+ flags: trd.flags,
+ data_size: trd.data_size as _,
+ offsets_size: trd.offsets_size as _,
+ data_address,
+ allocation <- kernel::new_spinlock!(Some(alloc.success()), "Transaction::new"),
+ is_outstanding: AtomicBool::new(false),
+ txn_security_ctx_off,
+ oneway_spam_detected,
+ start_time: Instant::now(),
+ }))?)
+ }
+
+ pub(crate) fn new_reply(
+ from: &Arc<Thread>,
+ to: Arc<Process>,
+ tr: &BinderTransactionDataSg,
+ allow_fds: bool,
+ ) -> BinderResult<DLArc<Self>> {
+ let debug_id = super::next_debug_id();
+ let trd = &tr.transaction_data;
+ let mut alloc = match from.copy_transaction_data(to.clone(), tr, debug_id, allow_fds, None)
+ {
+ Ok(alloc) => alloc,
+ Err(err) => {
+ pr_warn!("Failure in copy_transaction_data: {:?}", err);
+ return Err(err);
+ }
+ };
+ let oneway_spam_detected = alloc.oneway_spam_detected;
+ if trd.flags & TF_CLEAR_BUF != 0 {
+ alloc.set_info_clear_on_drop();
+ }
+ Ok(DTRWrap::arc_pin_init(pin_init!(Transaction {
+ debug_id,
+ target_node: None,
+ from_parent: None,
+ sender_euid: from.process.task.euid(),
+ from: from.clone(),
+ to,
+ code: trd.code,
+ flags: trd.flags,
+ data_size: trd.data_size as _,
+ offsets_size: trd.offsets_size as _,
+ data_address: alloc.ptr,
+ allocation <- kernel::new_spinlock!(Some(alloc.success()), "Transaction::new"),
+ is_outstanding: AtomicBool::new(false),
+ txn_security_ctx_off: None,
+ oneway_spam_detected,
+ start_time: Instant::now(),
+ }))?)
+ }
+
+ #[inline(never)]
+ pub(crate) fn debug_print_inner(&self, m: &SeqFile, prefix: &str) {
+ seq_print!(
+ m,
+ "{}{}: from {}:{} to {} code {:x} flags {:x} elapsed {}ms",
+ prefix,
+ self.debug_id,
+ self.from.process.task.pid(),
+ self.from.id,
+ self.to.task.pid(),
+ self.code,
+ self.flags,
+ self.start_time.elapsed().as_millis(),
+ );
+ if let Some(target_node) = &self.target_node {
+ seq_print!(m, " node {}", target_node.debug_id);
+ }
+ seq_print!(m, " size {}:{}\n", self.data_size, self.offsets_size);
+ }
+
+ /// Determines if the transaction is stacked on top of the given transaction.
+ pub(crate) fn is_stacked_on(&self, onext: &Option<DArc<Self>>) -> bool {
+ match (&self.from_parent, onext) {
+ (None, None) => true,
+ (Some(from_parent), Some(next)) => Arc::ptr_eq(from_parent, next),
+ _ => false,
+ }
+ }
+
+ /// Returns a pointer to the next transaction on the transaction stack, if there is one.
+ pub(crate) fn clone_next(&self) -> Option<DArc<Self>> {
+ Some(self.from_parent.as_ref()?.clone())
+ }
+
+ /// Searches in the transaction stack for a thread that belongs to the target process. This is
+ /// useful when finding a target for a new transaction: if the node belongs to a process that
+ /// is already part of the transaction stack, we reuse the thread.
+ fn find_target_thread(&self) -> Option<Arc<Thread>> {
+ let mut it = &self.from_parent;
+ while let Some(transaction) = it {
+ if Arc::ptr_eq(&transaction.from.process, &self.to) {
+ return Some(transaction.from.clone());
+ }
+ it = &transaction.from_parent;
+ }
+ None
+ }
+
+ /// Searches in the transaction stack for a transaction originating at the given thread.
+ pub(crate) fn find_from(&self, thread: &Thread) -> Option<&DArc<Transaction>> {
+ let mut it = &self.from_parent;
+ while let Some(transaction) = it {
+ if core::ptr::eq(thread, transaction.from.as_ref()) {
+ return Some(transaction);
+ }
+
+ it = &transaction.from_parent;
+ }
+ None
+ }
+
+ pub(crate) fn set_outstanding(&self, to_process: &mut ProcessInner) {
+ // No race because this method is only called once.
+ if !self.is_outstanding.load(Ordering::Relaxed) {
+ self.is_outstanding.store(true, Ordering::Relaxed);
+ to_process.add_outstanding_txn();
+ }
+ }
+
+ /// Decrement `outstanding_txns` in `to` if it hasn't already been decremented.
+ fn drop_outstanding_txn(&self) {
+ // No race because this is called at most twice, and one of the calls are in the
+ // destructor, which is guaranteed to not race with any other operations on the
+ // transaction. It also cannot race with `set_outstanding`, since submission happens
+ // before delivery.
+ if self.is_outstanding.load(Ordering::Relaxed) {
+ self.is_outstanding.store(false, Ordering::Relaxed);
+ self.to.drop_outstanding_txn();
+ }
+ }
+
+ /// Submits the transaction to a work queue. Uses a thread if there is one in the transaction
+ /// stack, otherwise uses the destination process.
+ ///
+ /// Not used for replies.
+ pub(crate) fn submit(self: DLArc<Self>) -> BinderResult {
+ // Defined before `process_inner` so that the destructor runs after releasing the lock.
+ let mut _t_outdated;
+
+ let oneway = self.flags & TF_ONE_WAY != 0;
+ let process = self.to.clone();
+ let mut process_inner = process.inner.lock();
+
+ self.set_outstanding(&mut process_inner);
+
+ if oneway {
+ if let Some(target_node) = self.target_node.clone() {
+ if process_inner.is_frozen {
+ process_inner.async_recv = true;
+ if self.flags & TF_UPDATE_TXN != 0 {
+ if let Some(t_outdated) =
+ target_node.take_outdated_transaction(&self, &mut process_inner)
+ {
+ // Save the transaction to be dropped after locks are released.
+ _t_outdated = t_outdated;
+ }
+ }
+ }
+ match target_node.submit_oneway(self, &mut process_inner) {
+ Ok(()) => {}
+ Err((err, work)) => {
+ drop(process_inner);
+ // Drop work after releasing process lock.
+ drop(work);
+ return Err(err);
+ }
+ }
+
+ if process_inner.is_frozen {
+ return Err(BinderError::new_frozen_oneway());
+ } else {
+ return Ok(());
+ }
+ } else {
+ pr_err!("Failed to submit oneway transaction to node.");
+ }
+ }
+
+ if process_inner.is_frozen {
+ process_inner.sync_recv = true;
+ return Err(BinderError::new_frozen());
+ }
+
+ let res = if let Some(thread) = self.find_target_thread() {
+ match thread.push_work(self) {
+ PushWorkRes::Ok => Ok(()),
+ PushWorkRes::FailedDead(me) => Err((BinderError::new_dead(), me)),
+ }
+ } else {
+ process_inner.push_work(self)
+ };
+ drop(process_inner);
+
+ match res {
+ Ok(()) => Ok(()),
+ Err((err, work)) => {
+ // Drop work after releasing process lock.
+ drop(work);
+ Err(err)
+ }
+ }
+ }
+
+ /// Check whether one oneway transaction can supersede another.
+ pub(crate) fn can_replace(&self, old: &Transaction) -> bool {
+ if self.from.process.task.pid() != old.from.process.task.pid() {
+ return false;
+ }
+
+ if self.flags & old.flags & (TF_ONE_WAY | TF_UPDATE_TXN) != (TF_ONE_WAY | TF_UPDATE_TXN) {
+ return false;
+ }
+
+ let target_node_match = match (self.target_node.as_ref(), old.target_node.as_ref()) {
+ (None, None) => true,
+ (Some(tn1), Some(tn2)) => Arc::ptr_eq(tn1, tn2),
+ _ => false,
+ };
+
+ self.code == old.code && self.flags == old.flags && target_node_match
+ }
+
+ fn prepare_file_list(&self) -> Result<TranslatedFds> {
+ let mut alloc = self.allocation.lock().take().ok_or(ESRCH)?;
+
+ match alloc.translate_fds() {
+ Ok(translated) => {
+ *self.allocation.lock() = Some(alloc);
+ Ok(translated)
+ }
+ Err(err) => {
+ // Free the allocation eagerly.
+ drop(alloc);
+ Err(err)
+ }
+ }
+ }
+}
+
+impl DeliverToRead for Transaction {
+ fn do_work(
+ self: DArc<Self>,
+ thread: &Thread,
+ writer: &mut BinderReturnWriter<'_>,
+ ) -> Result<bool> {
+ let send_failed_reply = ScopeGuard::new(|| {
+ if self.target_node.is_some() && self.flags & TF_ONE_WAY == 0 {
+ let reply = Err(BR_FAILED_REPLY);
+ self.from.deliver_reply(reply, &self);
+ }
+ self.drop_outstanding_txn();
+ });
+
+ let files = if let Ok(list) = self.prepare_file_list() {
+ list
+ } else {
+ // On failure to process the list, we send a reply back to the sender and ignore the
+ // transaction on the recipient.
+ return Ok(true);
+ };
+
+ let mut tr_sec = BinderTransactionDataSecctx::default();
+ let tr = tr_sec.tr_data();
+ if let Some(target_node) = &self.target_node {
+ let (ptr, cookie) = target_node.get_id();
+ tr.target.ptr = ptr as _;
+ tr.cookie = cookie as _;
+ };
+ tr.code = self.code;
+ tr.flags = self.flags;
+ tr.data_size = self.data_size as _;
+ tr.data.ptr.buffer = self.data_address as _;
+ tr.offsets_size = self.offsets_size as _;
+ if tr.offsets_size > 0 {
+ tr.data.ptr.offsets = (self.data_address + ptr_align(self.data_size).unwrap()) as _;
+ }
+ tr.sender_euid = self.sender_euid.into_uid_in_current_ns();
+ tr.sender_pid = 0;
+ if self.target_node.is_some() && self.flags & TF_ONE_WAY == 0 {
+ // Not a reply and not one-way.
+ tr.sender_pid = self.from.process.pid_in_current_ns();
+ }
+ let code = if self.target_node.is_none() {
+ BR_REPLY
+ } else if self.txn_security_ctx_off.is_some() {
+ BR_TRANSACTION_SEC_CTX
+ } else {
+ BR_TRANSACTION
+ };
+
+ // Write the transaction code and data to the user buffer.
+ writer.write_code(code)?;
+ if let Some(off) = self.txn_security_ctx_off {
+ tr_sec.secctx = (self.data_address + off) as u64;
+ writer.write_payload(&tr_sec)?;
+ } else {
+ writer.write_payload(&*tr)?;
+ }
+
+ let mut alloc = self.allocation.lock().take().ok_or(ESRCH)?;
+
+ // Dismiss the completion of transaction with a failure. No failure paths are allowed from
+ // here on out.
+ send_failed_reply.dismiss();
+
+ // Commit files, and set FDs in FDA to be closed on buffer free.
+ let close_on_free = files.commit();
+ alloc.set_info_close_on_free(close_on_free);
+
+ // It is now the user's responsibility to clear the allocation.
+ alloc.keep_alive();
+
+ self.drop_outstanding_txn();
+
+ // When this is not a reply and not a oneway transaction, update `current_transaction`. If
+ // it's a reply, `current_transaction` has already been updated appropriately.
+ if self.target_node.is_some() && tr_sec.transaction_data.flags & TF_ONE_WAY == 0 {
+ thread.set_current_transaction(self);
+ }
+
+ Ok(false)
+ }
+
+ fn cancel(self: DArc<Self>) {
+ let allocation = self.allocation.lock().take();
+ drop(allocation);
+
+ // If this is not a reply or oneway transaction, then send a dead reply.
+ if self.target_node.is_some() && self.flags & TF_ONE_WAY == 0 {
+ let reply = Err(BR_DEAD_REPLY);
+ self.from.deliver_reply(reply, &self);
+ }
+
+ self.drop_outstanding_txn();
+ }
+
+ fn should_sync_wakeup(&self) -> bool {
+ self.flags & TF_ONE_WAY == 0
+ }
+
+ fn debug_print(&self, m: &SeqFile, _prefix: &str, tprefix: &str) -> Result<()> {
+ self.debug_print_inner(m, tprefix);
+ Ok(())
+ }
+}
+
+#[pinned_drop]
+impl PinnedDrop for Transaction {
+ fn drop(self: Pin<&mut Self>) {
+ self.drop_outstanding_txn();
+ }
+}
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index b3acbc4174fb..979c96b74cad 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -23,10 +23,11 @@
#include <linux/uaccess.h>
#include <linux/highmem.h>
#include <linux/sizes.h>
+#include <kunit/visibility.h>
#include "binder_alloc.h"
#include "binder_trace.h"
-struct list_lru binder_freelist;
+static struct list_lru binder_freelist;
static DEFINE_MUTEX(binder_alloc_mmap_lock);
@@ -57,13 +58,14 @@ static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer)
return list_entry(buffer->entry.prev, struct binder_buffer, entry);
}
-static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
- struct binder_buffer *buffer)
+VISIBLE_IF_KUNIT size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
+ struct binder_buffer *buffer)
{
if (list_is_last(&buffer->entry, &alloc->buffers))
- return alloc->buffer + alloc->buffer_size - buffer->user_data;
+ return alloc->vm_start + alloc->buffer_size - buffer->user_data;
return binder_buffer_next(buffer)->user_data - buffer->user_data;
}
+EXPORT_SYMBOL_IF_KUNIT(binder_alloc_buffer_size);
static void binder_insert_free_buffer(struct binder_alloc *alloc,
struct binder_buffer *new_buffer)
@@ -167,34 +169,31 @@ static struct binder_buffer *binder_alloc_prepare_to_free_locked(
struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
unsigned long user_ptr)
{
- struct binder_buffer *buffer;
-
- spin_lock(&alloc->lock);
- buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
- spin_unlock(&alloc->lock);
- return buffer;
+ guard(mutex)(&alloc->mutex);
+ return binder_alloc_prepare_to_free_locked(alloc, user_ptr);
}
static inline void
-binder_set_installed_page(struct binder_lru_page *lru_page,
+binder_set_installed_page(struct binder_alloc *alloc,
+ unsigned long index,
struct page *page)
{
/* Pairs with acquire in binder_get_installed_page() */
- smp_store_release(&lru_page->page_ptr, page);
+ smp_store_release(&alloc->pages[index], page);
}
static inline struct page *
-binder_get_installed_page(struct binder_lru_page *lru_page)
+binder_get_installed_page(struct binder_alloc *alloc, unsigned long index)
{
/* Pairs with release in binder_set_installed_page() */
- return smp_load_acquire(&lru_page->page_ptr);
+ return smp_load_acquire(&alloc->pages[index]);
}
static void binder_lru_freelist_add(struct binder_alloc *alloc,
unsigned long start, unsigned long end)
{
- struct binder_lru_page *page;
unsigned long page_addr;
+ struct page *page;
trace_binder_update_page_range(alloc, false, start, end);
@@ -202,65 +201,159 @@ static void binder_lru_freelist_add(struct binder_alloc *alloc,
size_t index;
int ret;
- index = (page_addr - alloc->buffer) / PAGE_SIZE;
- page = &alloc->pages[index];
-
- if (!binder_get_installed_page(page))
+ index = (page_addr - alloc->vm_start) / PAGE_SIZE;
+ page = binder_get_installed_page(alloc, index);
+ if (!page)
continue;
trace_binder_free_lru_start(alloc, index);
- ret = list_lru_add_obj(&binder_freelist, &page->lru);
+ ret = list_lru_add(alloc->freelist,
+ page_to_lru(page),
+ page_to_nid(page),
+ NULL);
WARN_ON(!ret);
trace_binder_free_lru_end(alloc, index);
}
}
-static int binder_install_single_page(struct binder_alloc *alloc,
- struct binder_lru_page *lru_page,
- unsigned long addr)
+static inline
+void binder_alloc_set_mapped(struct binder_alloc *alloc, bool state)
{
- struct page *page;
- int ret = 0;
+ /* pairs with smp_load_acquire in binder_alloc_is_mapped() */
+ smp_store_release(&alloc->mapped, state);
+}
- if (!mmget_not_zero(alloc->mm))
- return -ESRCH;
+static inline bool binder_alloc_is_mapped(struct binder_alloc *alloc)
+{
+ /* pairs with smp_store_release in binder_alloc_set_mapped() */
+ return smp_load_acquire(&alloc->mapped);
+}
+
+static struct page *binder_page_lookup(struct binder_alloc *alloc,
+ unsigned long addr)
+{
+ struct mm_struct *mm = alloc->mm;
+ struct page *page;
+ long npages = 0;
/*
- * Protected with mmap_sem in write mode as multiple tasks
- * might race to install the same page.
+ * Find an existing page in the remote mm. If missing,
+ * don't attempt to fault-in just propagate an error.
*/
- mmap_write_lock(alloc->mm);
- if (binder_get_installed_page(lru_page))
- goto out;
+ mmap_read_lock(mm);
+ if (binder_alloc_is_mapped(alloc))
+ npages = get_user_pages_remote(mm, addr, 1, FOLL_NOFAULT,
+ &page, NULL);
+ mmap_read_unlock(mm);
- if (!alloc->vma) {
- pr_err("%d: %s failed, no vma\n", alloc->pid, __func__);
- ret = -ESRCH;
- goto out;
+ return npages > 0 ? page : NULL;
+}
+
+static int binder_page_insert(struct binder_alloc *alloc,
+ unsigned long addr,
+ struct page *page)
+{
+ struct mm_struct *mm = alloc->mm;
+ struct vm_area_struct *vma;
+ int ret = -ESRCH;
+
+ /* attempt per-vma lock first */
+ vma = lock_vma_under_rcu(mm, addr);
+ if (vma) {
+ if (binder_alloc_is_mapped(alloc))
+ ret = vm_insert_page(vma, addr, page);
+ vma_end_read(vma);
+ return ret;
}
+ /* fall back to mmap_lock */
+ mmap_read_lock(mm);
+ vma = vma_lookup(mm, addr);
+ if (vma && binder_alloc_is_mapped(alloc))
+ ret = vm_insert_page(vma, addr, page);
+ mmap_read_unlock(mm);
+
+ return ret;
+}
+
+static struct page *binder_page_alloc(struct binder_alloc *alloc,
+ unsigned long index)
+{
+ struct binder_shrinker_mdata *mdata;
+ struct page *page;
+
page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
+ if (!page)
+ return NULL;
+
+ /* allocate and install shrinker metadata under page->private */
+ mdata = kzalloc(sizeof(*mdata), GFP_KERNEL);
+ if (!mdata) {
+ __free_page(page);
+ return NULL;
+ }
+
+ mdata->alloc = alloc;
+ mdata->page_index = index;
+ INIT_LIST_HEAD(&mdata->lru);
+ set_page_private(page, (unsigned long)mdata);
+
+ return page;
+}
+
+static void binder_free_page(struct page *page)
+{
+ kfree((struct binder_shrinker_mdata *)page_private(page));
+ __free_page(page);
+}
+
+static int binder_install_single_page(struct binder_alloc *alloc,
+ unsigned long index,
+ unsigned long addr)
+{
+ struct page *page;
+ int ret;
+
+ if (!mmget_not_zero(alloc->mm))
+ return -ESRCH;
+
+ page = binder_page_alloc(alloc, index);
if (!page) {
- pr_err("%d: failed to allocate page\n", alloc->pid);
ret = -ENOMEM;
goto out;
}
- ret = vm_insert_page(alloc->vma, addr, page);
- if (ret) {
+ ret = binder_page_insert(alloc, addr, page);
+ switch (ret) {
+ case -EBUSY:
+ /*
+ * EBUSY is ok. Someone installed the pte first but the
+ * alloc->pages[index] has not been updated yet. Discard
+ * our page and look up the one already installed.
+ */
+ ret = 0;
+ binder_free_page(page);
+ page = binder_page_lookup(alloc, addr);
+ if (!page) {
+ pr_err("%d: failed to find page at offset %lx\n",
+ alloc->pid, addr - alloc->vm_start);
+ ret = -ESRCH;
+ break;
+ }
+ fallthrough;
+ case 0:
+ /* Mark page installation complete and safe to use */
+ binder_set_installed_page(alloc, index, page);
+ break;
+ default:
+ binder_free_page(page);
pr_err("%d: %s failed to insert page at offset %lx with %d\n",
- alloc->pid, __func__, addr - alloc->buffer, ret);
- __free_page(page);
- ret = -ENOMEM;
- goto out;
+ alloc->pid, __func__, addr - alloc->vm_start, ret);
+ break;
}
-
- /* Mark page installation complete and safe to use */
- binder_set_installed_page(lru_page, page);
out:
- mmap_write_unlock(alloc->mm);
mmput_async(alloc->mm);
return ret;
}
@@ -269,7 +362,6 @@ static int binder_install_buffer_pages(struct binder_alloc *alloc,
struct binder_buffer *buffer,
size_t size)
{
- struct binder_lru_page *page;
unsigned long start, final;
unsigned long page_addr;
@@ -280,15 +372,13 @@ static int binder_install_buffer_pages(struct binder_alloc *alloc,
unsigned long index;
int ret;
- index = (page_addr - alloc->buffer) / PAGE_SIZE;
- page = &alloc->pages[index];
-
- if (binder_get_installed_page(page))
+ index = (page_addr - alloc->vm_start) / PAGE_SIZE;
+ if (binder_get_installed_page(alloc, index))
continue;
trace_binder_alloc_page_start(alloc, index);
- ret = binder_install_single_page(alloc, page, page_addr);
+ ret = binder_install_single_page(alloc, index, page_addr);
if (ret)
return ret;
@@ -302,8 +392,8 @@ static int binder_install_buffer_pages(struct binder_alloc *alloc,
static void binder_lru_freelist_del(struct binder_alloc *alloc,
unsigned long start, unsigned long end)
{
- struct binder_lru_page *page;
unsigned long page_addr;
+ struct page *page;
trace_binder_update_page_range(alloc, true, start, end);
@@ -311,13 +401,16 @@ static void binder_lru_freelist_del(struct binder_alloc *alloc,
unsigned long index;
bool on_lru;
- index = (page_addr - alloc->buffer) / PAGE_SIZE;
- page = &alloc->pages[index];
+ index = (page_addr - alloc->vm_start) / PAGE_SIZE;
+ page = binder_get_installed_page(alloc, index);
- if (page->page_ptr) {
+ if (page) {
trace_binder_alloc_lru_start(alloc, index);
- on_lru = list_lru_del_obj(&binder_freelist, &page->lru);
+ on_lru = list_lru_del(alloc->freelist,
+ page_to_lru(page),
+ page_to_nid(page),
+ NULL);
WARN_ON(!on_lru);
trace_binder_alloc_lru_end(alloc, index);
@@ -329,20 +422,6 @@ static void binder_lru_freelist_del(struct binder_alloc *alloc,
}
}
-static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
- struct vm_area_struct *vma)
-{
- /* pairs with smp_load_acquire in binder_alloc_get_vma() */
- smp_store_release(&alloc->vma, vma);
-}
-
-static inline struct vm_area_struct *binder_alloc_get_vma(
- struct binder_alloc *alloc)
-{
- /* pairs with smp_store_release in binder_alloc_set_vma() */
- return smp_load_acquire(&alloc->vma);
-}
-
static void debug_no_space_locked(struct binder_alloc *alloc)
{
size_t largest_alloc_size = 0;
@@ -576,7 +655,7 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
int ret;
/* Check binder_alloc is fully initialized */
- if (!binder_alloc_get_vma(alloc)) {
+ if (!binder_alloc_is_mapped(alloc)) {
binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
"%d: binder_alloc_buf, no vma\n",
alloc->pid);
@@ -597,10 +676,10 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
if (!next)
return ERR_PTR(-ENOMEM);
- spin_lock(&alloc->lock);
+ mutex_lock(&alloc->mutex);
buffer = binder_alloc_new_buf_locked(alloc, next, size, is_async);
if (IS_ERR(buffer)) {
- spin_unlock(&alloc->lock);
+ mutex_unlock(&alloc->mutex);
goto out;
}
@@ -608,7 +687,7 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
buffer->offsets_size = offsets_size;
buffer->extra_buffers_size = extra_buffers_size;
buffer->pid = current->tgid;
- spin_unlock(&alloc->lock);
+ mutex_unlock(&alloc->mutex);
ret = binder_install_buffer_pages(alloc, buffer, size);
if (ret) {
@@ -618,6 +697,7 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
out:
return buffer;
}
+EXPORT_SYMBOL_IF_KUNIT(binder_alloc_new_buf);
static unsigned long buffer_start_page(struct binder_buffer *buffer)
{
@@ -674,8 +754,8 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
BUG_ON(buffer->free);
BUG_ON(size > buffer_size);
BUG_ON(buffer->transaction != NULL);
- BUG_ON(buffer->user_data < alloc->buffer);
- BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size);
+ BUG_ON(buffer->user_data < alloc->vm_start);
+ BUG_ON(buffer->user_data > alloc->vm_start + alloc->buffer_size);
if (buffer->async_transaction) {
alloc->free_async_space += buffer_size;
@@ -734,14 +814,13 @@ static struct page *binder_alloc_get_page(struct binder_alloc *alloc,
pgoff_t *pgoffp)
{
binder_size_t buffer_space_offset = buffer_offset +
- (buffer->user_data - alloc->buffer);
+ (buffer->user_data - alloc->vm_start);
pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK;
size_t index = buffer_space_offset >> PAGE_SHIFT;
- struct binder_lru_page *lru_page;
- lru_page = &alloc->pages[index];
*pgoffp = pgoff;
- return lru_page->page_ptr;
+
+ return alloc->pages[index];
}
/**
@@ -785,18 +864,19 @@ void binder_alloc_free_buf(struct binder_alloc *alloc,
* We could eliminate the call to binder_alloc_clear_buf()
* from binder_alloc_deferred_release() by moving this to
* binder_free_buf_locked(). However, that could
- * increase contention for the alloc->lock if clear_on_free
- * is used frequently for large buffers. This lock is not
+ * increase contention for the alloc mutex if clear_on_free
+ * is used frequently for large buffers. The mutex is not
* needed for correctness here.
*/
if (buffer->clear_on_free) {
binder_alloc_clear_buf(alloc, buffer);
buffer->clear_on_free = false;
}
- spin_lock(&alloc->lock);
+ mutex_lock(&alloc->mutex);
binder_free_buf_locked(alloc, buffer);
- spin_unlock(&alloc->lock);
+ mutex_unlock(&alloc->mutex);
}
+EXPORT_SYMBOL_IF_KUNIT(binder_alloc_free_buf);
/**
* binder_alloc_mmap_handler() - map virtual address space for proc
@@ -816,7 +896,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
{
struct binder_buffer *buffer;
const char *failure_string;
- int ret, i;
+ int ret;
if (unlikely(vma->vm_mm != alloc->mm)) {
ret = -EINVAL;
@@ -834,22 +914,17 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
SZ_4M);
mutex_unlock(&binder_alloc_mmap_lock);
- alloc->buffer = vma->vm_start;
+ alloc->vm_start = vma->vm_start;
alloc->pages = kvcalloc(alloc->buffer_size / PAGE_SIZE,
sizeof(alloc->pages[0]),
GFP_KERNEL);
- if (alloc->pages == NULL) {
+ if (!alloc->pages) {
ret = -ENOMEM;
failure_string = "alloc page array";
goto err_alloc_pages_failed;
}
- for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
- alloc->pages[i].alloc = alloc;
- INIT_LIST_HEAD(&alloc->pages[i].lru);
- }
-
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
if (!buffer) {
ret = -ENOMEM;
@@ -857,14 +932,14 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
goto err_alloc_buf_struct_failed;
}
- buffer->user_data = alloc->buffer;
+ buffer->user_data = alloc->vm_start;
list_add(&buffer->entry, &alloc->buffers);
buffer->free = 1;
binder_insert_free_buffer(alloc, buffer);
alloc->free_async_space = alloc->buffer_size / 2;
/* Signal binder_alloc is fully initialized */
- binder_alloc_set_vma(alloc, vma);
+ binder_alloc_set_mapped(alloc, true);
return 0;
@@ -872,7 +947,7 @@ err_alloc_buf_struct_failed:
kvfree(alloc->pages);
alloc->pages = NULL;
err_alloc_pages_failed:
- alloc->buffer = 0;
+ alloc->vm_start = 0;
mutex_lock(&binder_alloc_mmap_lock);
alloc->buffer_size = 0;
err_already_mapped:
@@ -884,7 +959,7 @@ err_invalid_mm:
failure_string, ret);
return ret;
}
-
+EXPORT_SYMBOL_IF_KUNIT(binder_alloc_mmap_handler);
void binder_alloc_deferred_release(struct binder_alloc *alloc)
{
@@ -893,8 +968,8 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
struct binder_buffer *buffer;
buffers = 0;
- spin_lock(&alloc->lock);
- BUG_ON(alloc->vma);
+ mutex_lock(&alloc->mutex);
+ BUG_ON(alloc->mapped);
while ((n = rb_first(&alloc->allocated_buffers))) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
@@ -925,22 +1000,26 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
int i;
for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
+ struct page *page;
bool on_lru;
- if (!alloc->pages[i].page_ptr)
+ page = binder_get_installed_page(alloc, i);
+ if (!page)
continue;
- on_lru = list_lru_del_obj(&binder_freelist,
- &alloc->pages[i].lru);
+ on_lru = list_lru_del(alloc->freelist,
+ page_to_lru(page),
+ page_to_nid(page),
+ NULL);
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%s: %d: page %d %s\n",
__func__, alloc->pid, i,
on_lru ? "on lru" : "active");
- __free_page(alloc->pages[i].page_ptr);
+ binder_free_page(page);
page_count++;
}
}
- spin_unlock(&alloc->lock);
+ mutex_unlock(&alloc->mutex);
kvfree(alloc->pages);
if (alloc->mm)
mmdrop(alloc->mm);
@@ -949,6 +1028,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
"%s: %d buffers %d, pages %d\n",
__func__, alloc->pid, buffers, page_count);
}
+EXPORT_SYMBOL_IF_KUNIT(binder_alloc_deferred_release);
/**
* binder_alloc_print_allocated() - print buffer info
@@ -964,17 +1044,16 @@ void binder_alloc_print_allocated(struct seq_file *m,
struct binder_buffer *buffer;
struct rb_node *n;
- spin_lock(&alloc->lock);
+ guard(mutex)(&alloc->mutex);
for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
seq_printf(m, " buffer %d: %lx size %zd:%zd:%zd %s\n",
buffer->debug_id,
- buffer->user_data - alloc->buffer,
+ buffer->user_data - alloc->vm_start,
buffer->data_size, buffer->offsets_size,
buffer->extra_buffers_size,
buffer->transaction ? "active" : "delivered");
}
- spin_unlock(&alloc->lock);
}
/**
@@ -985,29 +1064,29 @@ void binder_alloc_print_allocated(struct seq_file *m,
void binder_alloc_print_pages(struct seq_file *m,
struct binder_alloc *alloc)
{
- struct binder_lru_page *page;
+ struct page *page;
int i;
int active = 0;
int lru = 0;
int free = 0;
- spin_lock(&alloc->lock);
+ mutex_lock(&alloc->mutex);
/*
* Make sure the binder_alloc is fully initialized, otherwise we might
* read inconsistent state.
*/
- if (binder_alloc_get_vma(alloc) != NULL) {
+ if (binder_alloc_is_mapped(alloc)) {
for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
- page = &alloc->pages[i];
- if (!page->page_ptr)
+ page = binder_get_installed_page(alloc, i);
+ if (!page)
free++;
- else if (list_empty(&page->lru))
+ else if (list_empty(page_to_lru(page)))
active++;
else
lru++;
}
}
- spin_unlock(&alloc->lock);
+ mutex_unlock(&alloc->mutex);
seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high);
}
@@ -1023,10 +1102,9 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
struct rb_node *n;
int count = 0;
- spin_lock(&alloc->lock);
+ guard(mutex)(&alloc->mutex);
for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
count++;
- spin_unlock(&alloc->lock);
return count;
}
@@ -1036,18 +1114,19 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
* @alloc: binder_alloc for this proc
*
* Called from binder_vma_close() when releasing address space.
- * Clears alloc->vma to prevent new incoming transactions from
+ * Clears alloc->mapped to prevent new incoming transactions from
* allocating more buffers.
*/
void binder_alloc_vma_close(struct binder_alloc *alloc)
{
- binder_alloc_set_vma(alloc, NULL);
+ binder_alloc_set_mapped(alloc, false);
}
+EXPORT_SYMBOL_IF_KUNIT(binder_alloc_vma_close);
/**
* binder_alloc_free_page() - shrinker callback to free pages
* @item: item to free
- * @lock: lock protecting the item
+ * @lru: list_lru instance of the item
* @cb_arg: callback argument
*
* Called from list_lru_walk() in binder_shrink_scan() to free
@@ -1055,44 +1134,54 @@ void binder_alloc_vma_close(struct binder_alloc *alloc)
*/
enum lru_status binder_alloc_free_page(struct list_head *item,
struct list_lru_one *lru,
- spinlock_t *lock,
void *cb_arg)
- __must_hold(lock)
+ __must_hold(&lru->lock)
{
- struct binder_lru_page *page = container_of(item, typeof(*page), lru);
- struct binder_alloc *alloc = page->alloc;
+ struct binder_shrinker_mdata *mdata = container_of(item, typeof(*mdata), lru);
+ struct binder_alloc *alloc = mdata->alloc;
struct mm_struct *mm = alloc->mm;
struct vm_area_struct *vma;
struct page *page_to_free;
unsigned long page_addr;
+ int mm_locked = 0;
size_t index;
if (!mmget_not_zero(mm))
goto err_mmget;
- if (!mmap_read_trylock(mm))
- goto err_mmap_read_lock_failed;
- if (!spin_trylock(&alloc->lock))
- goto err_get_alloc_lock_failed;
- if (!page->page_ptr)
- goto err_page_already_freed;
-
- index = page - alloc->pages;
- page_addr = alloc->buffer + index * PAGE_SIZE;
-
- vma = vma_lookup(mm, page_addr);
- if (vma && vma != binder_alloc_get_vma(alloc))
+
+ index = mdata->page_index;
+ page_addr = alloc->vm_start + index * PAGE_SIZE;
+
+ /* attempt per-vma lock first */
+ vma = lock_vma_under_rcu(mm, page_addr);
+ if (!vma) {
+ /* fall back to mmap_lock */
+ if (!mmap_read_trylock(mm))
+ goto err_mmap_read_lock_failed;
+ mm_locked = 1;
+ vma = vma_lookup(mm, page_addr);
+ }
+
+ if (!mutex_trylock(&alloc->mutex))
+ goto err_get_alloc_mutex_failed;
+
+ /*
+ * Since a binder_alloc can only be mapped once, we ensure
+ * the vma corresponds to this mapping by checking whether
+ * the binder_alloc is still mapped.
+ */
+ if (vma && !binder_alloc_is_mapped(alloc))
goto err_invalid_vma;
trace_binder_unmap_kernel_start(alloc, index);
- page_to_free = page->page_ptr;
- page->page_ptr = NULL;
+ page_to_free = alloc->pages[index];
+ binder_set_installed_page(alloc, index, NULL);
trace_binder_unmap_kernel_end(alloc, index);
list_lru_isolate(lru, item);
- spin_unlock(&alloc->lock);
- spin_unlock(lock);
+ spin_unlock(&lru->lock);
if (vma) {
trace_binder_unmap_user_start(alloc, index);
@@ -1102,23 +1191,29 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
trace_binder_unmap_user_end(alloc, index);
}
- mmap_read_unlock(mm);
+ mutex_unlock(&alloc->mutex);
+ if (mm_locked)
+ mmap_read_unlock(mm);
+ else
+ vma_end_read(vma);
mmput_async(mm);
- __free_page(page_to_free);
+ binder_free_page(page_to_free);
- spin_lock(lock);
return LRU_REMOVED_RETRY;
err_invalid_vma:
-err_page_already_freed:
- spin_unlock(&alloc->lock);
-err_get_alloc_lock_failed:
- mmap_read_unlock(mm);
+ mutex_unlock(&alloc->mutex);
+err_get_alloc_mutex_failed:
+ if (mm_locked)
+ mmap_read_unlock(mm);
+ else
+ vma_end_read(vma);
err_mmap_read_lock_failed:
mmput_async(mm);
err_mmget:
return LRU_SKIP;
}
+EXPORT_SYMBOL_IF_KUNIT(binder_alloc_free_page);
static unsigned long
binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
@@ -1135,6 +1230,18 @@ binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
static struct shrinker *binder_shrinker;
+VISIBLE_IF_KUNIT void __binder_alloc_init(struct binder_alloc *alloc,
+ struct list_lru *freelist)
+{
+ alloc->pid = current->group_leader->pid;
+ alloc->mm = current->mm;
+ mmgrab(alloc->mm);
+ mutex_init(&alloc->mutex);
+ INIT_LIST_HEAD(&alloc->buffers);
+ alloc->freelist = freelist;
+}
+EXPORT_SYMBOL_IF_KUNIT(__binder_alloc_init);
+
/**
* binder_alloc_init() - called by binder_open() for per-proc initialization
* @alloc: binder_alloc for this proc
@@ -1144,11 +1251,7 @@ static struct shrinker *binder_shrinker;
*/
void binder_alloc_init(struct binder_alloc *alloc)
{
- alloc->pid = current->group_leader->pid;
- alloc->mm = current->mm;
- mmgrab(alloc->mm);
- spin_lock_init(&alloc->lock);
- INIT_LIST_HEAD(&alloc->buffers);
+ __binder_alloc_init(alloc, &binder_freelist);
}
int binder_alloc_shrinker_init(void)
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
index 70387234477e..d6f1f6f2d00e 100644
--- a/drivers/android/binder_alloc.h
+++ b/drivers/android/binder_alloc.h
@@ -9,13 +9,12 @@
#include <linux/rbtree.h>
#include <linux/list.h>
#include <linux/mm.h>
-#include <linux/spinlock.h>
+#include <linux/rtmutex.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/list_lru.h>
#include <uapi/linux/android/binder.h>
-extern struct list_lru binder_freelist;
struct binder_transaction;
/**
@@ -59,34 +58,44 @@ struct binder_buffer {
};
/**
- * struct binder_lru_page - page object used for binder shrinker
- * @page_ptr: pointer to physical page in mmap'd space
- * @lru: entry in binder_freelist
- * @alloc: binder_alloc for a proc
+ * struct binder_shrinker_mdata - binder metadata used to reclaim pages
+ * @lru: LRU entry in binder_freelist
+ * @alloc: binder_alloc owning the page to reclaim
+ * @page_index: offset in @alloc->pages[] into the page to reclaim
*/
-struct binder_lru_page {
+struct binder_shrinker_mdata {
struct list_head lru;
- struct page *page_ptr;
struct binder_alloc *alloc;
+ unsigned long page_index;
};
+static inline struct list_head *page_to_lru(struct page *p)
+{
+ struct binder_shrinker_mdata *mdata;
+
+ mdata = (struct binder_shrinker_mdata *)page_private(p);
+
+ return &mdata->lru;
+}
+
/**
* struct binder_alloc - per-binder proc state for binder allocator
- * @lock: protects binder_alloc fields
- * @vma: vm_area_struct passed to mmap_handler
- * (invariant after mmap)
+ * @mutex: protects binder_alloc fields
* @mm: copy of task->mm (invariant after open)
- * @buffer: base of per-proc address space mapped via mmap
+ * @vm_start: base of per-proc address space mapped via mmap
* @buffers: list of all buffers for this proc
* @free_buffers: rb tree of buffers available for allocation
* sorted by size
* @allocated_buffers: rb tree of allocated buffers sorted by address
* @free_async_space: VA space available for async buffers. This is
* initialized at mmap time to 1/2 the full VA space
- * @pages: array of binder_lru_page
+ * @pages: array of struct page *
+ * @freelist: lru list to use for free pages (invariant after init)
* @buffer_size: size of address space specified via mmap
* @pid: pid for associated binder_proc (invariant after init)
* @pages_high: high watermark of offset in @pages
+ * @mapped: whether the vm area is mapped, each binder instance is
+ * allowed a single mapping throughout its lifetime
* @oneway_spam_detected: %true if oneway spam detection fired, clear that
* flag once the async buffer has returned to a healthy state
*
@@ -96,29 +105,25 @@ struct binder_lru_page {
* struct binder_buffer objects used to track the user buffers
*/
struct binder_alloc {
- spinlock_t lock;
- struct vm_area_struct *vma;
+ struct mutex mutex;
struct mm_struct *mm;
- unsigned long buffer;
+ unsigned long vm_start;
struct list_head buffers;
struct rb_root free_buffers;
struct rb_root allocated_buffers;
size_t free_async_space;
- struct binder_lru_page *pages;
+ struct page **pages;
+ struct list_lru *freelist;
size_t buffer_size;
int pid;
size_t pages_high;
+ bool mapped;
bool oneway_spam_detected;
};
-#ifdef CONFIG_ANDROID_BINDER_IPC_SELFTEST
-void binder_selftest_alloc(struct binder_alloc *alloc);
-#else
-static inline void binder_selftest_alloc(struct binder_alloc *alloc) {}
-#endif
enum lru_status binder_alloc_free_page(struct list_head *item,
struct list_lru_one *lru,
- spinlock_t *lock, void *cb_arg);
+ void *cb_arg);
struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
size_t data_size,
size_t offsets_size,
@@ -151,12 +156,8 @@ void binder_alloc_print_pages(struct seq_file *m,
static inline size_t
binder_alloc_get_free_async_space(struct binder_alloc *alloc)
{
- size_t free_async_space;
-
- spin_lock(&alloc->lock);
- free_async_space = alloc->free_async_space;
- spin_unlock(&alloc->lock);
- return free_async_space;
+ guard(mutex)(&alloc->mutex);
+ return alloc->free_async_space;
}
unsigned long
@@ -178,5 +179,11 @@ int binder_alloc_copy_from_buffer(struct binder_alloc *alloc,
binder_size_t buffer_offset,
size_t bytes);
+#if IS_ENABLED(CONFIG_KUNIT)
+void __binder_alloc_init(struct binder_alloc *alloc, struct list_lru *freelist);
+size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
+ struct binder_buffer *buffer);
+#endif
+
#endif /* _LINUX_BINDER_ALLOC_H */
diff --git a/drivers/android/binder_alloc_selftest.c b/drivers/android/binder_alloc_selftest.c
deleted file mode 100644
index 81442fe20a69..000000000000
--- a/drivers/android/binder_alloc_selftest.c
+++ /dev/null
@@ -1,306 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* binder_alloc_selftest.c
- *
- * Android IPC Subsystem
- *
- * Copyright (C) 2017 Google, Inc.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/mm_types.h>
-#include <linux/err.h>
-#include "binder_alloc.h"
-
-#define BUFFER_NUM 5
-#define BUFFER_MIN_SIZE (PAGE_SIZE / 8)
-
-static bool binder_selftest_run = true;
-static int binder_selftest_failures;
-static DEFINE_MUTEX(binder_selftest_lock);
-
-/**
- * enum buf_end_align_type - Page alignment of a buffer
- * end with regard to the end of the previous buffer.
- *
- * In the pictures below, buf2 refers to the buffer we
- * are aligning. buf1 refers to previous buffer by addr.
- * Symbol [ means the start of a buffer, ] means the end
- * of a buffer, and | means page boundaries.
- */
-enum buf_end_align_type {
- /**
- * @SAME_PAGE_UNALIGNED: The end of this buffer is on
- * the same page as the end of the previous buffer and
- * is not page aligned. Examples:
- * buf1 ][ buf2 ][ ...
- * buf1 ]|[ buf2 ][ ...
- */
- SAME_PAGE_UNALIGNED = 0,
- /**
- * @SAME_PAGE_ALIGNED: When the end of the previous buffer
- * is not page aligned, the end of this buffer is on the
- * same page as the end of the previous buffer and is page
- * aligned. When the previous buffer is page aligned, the
- * end of this buffer is aligned to the next page boundary.
- * Examples:
- * buf1 ][ buf2 ]| ...
- * buf1 ]|[ buf2 ]| ...
- */
- SAME_PAGE_ALIGNED,
- /**
- * @NEXT_PAGE_UNALIGNED: The end of this buffer is on
- * the page next to the end of the previous buffer and
- * is not page aligned. Examples:
- * buf1 ][ buf2 | buf2 ][ ...
- * buf1 ]|[ buf2 | buf2 ][ ...
- */
- NEXT_PAGE_UNALIGNED,
- /**
- * @NEXT_PAGE_ALIGNED: The end of this buffer is on
- * the page next to the end of the previous buffer and
- * is page aligned. Examples:
- * buf1 ][ buf2 | buf2 ]| ...
- * buf1 ]|[ buf2 | buf2 ]| ...
- */
- NEXT_PAGE_ALIGNED,
- /**
- * @NEXT_NEXT_UNALIGNED: The end of this buffer is on
- * the page that follows the page after the end of the
- * previous buffer and is not page aligned. Examples:
- * buf1 ][ buf2 | buf2 | buf2 ][ ...
- * buf1 ]|[ buf2 | buf2 | buf2 ][ ...
- */
- NEXT_NEXT_UNALIGNED,
- /**
- * @LOOP_END: The number of enum values in &buf_end_align_type.
- * It is used for controlling loop termination.
- */
- LOOP_END,
-};
-
-static void pr_err_size_seq(size_t *sizes, int *seq)
-{
- int i;
-
- pr_err("alloc sizes: ");
- for (i = 0; i < BUFFER_NUM; i++)
- pr_cont("[%zu]", sizes[i]);
- pr_cont("\n");
- pr_err("free seq: ");
- for (i = 0; i < BUFFER_NUM; i++)
- pr_cont("[%d]", seq[i]);
- pr_cont("\n");
-}
-
-static bool check_buffer_pages_allocated(struct binder_alloc *alloc,
- struct binder_buffer *buffer,
- size_t size)
-{
- unsigned long page_addr;
- unsigned long end;
- int page_index;
-
- end = PAGE_ALIGN(buffer->user_data + size);
- page_addr = buffer->user_data;
- for (; page_addr < end; page_addr += PAGE_SIZE) {
- page_index = (page_addr - alloc->buffer) / PAGE_SIZE;
- if (!alloc->pages[page_index].page_ptr ||
- !list_empty(&alloc->pages[page_index].lru)) {
- pr_err("expect alloc but is %s at page index %d\n",
- alloc->pages[page_index].page_ptr ?
- "lru" : "free", page_index);
- return false;
- }
- }
- return true;
-}
-
-static void binder_selftest_alloc_buf(struct binder_alloc *alloc,
- struct binder_buffer *buffers[],
- size_t *sizes, int *seq)
-{
- int i;
-
- for (i = 0; i < BUFFER_NUM; i++) {
- buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0);
- if (IS_ERR(buffers[i]) ||
- !check_buffer_pages_allocated(alloc, buffers[i],
- sizes[i])) {
- pr_err_size_seq(sizes, seq);
- binder_selftest_failures++;
- }
- }
-}
-
-static void binder_selftest_free_buf(struct binder_alloc *alloc,
- struct binder_buffer *buffers[],
- size_t *sizes, int *seq, size_t end)
-{
- int i;
-
- for (i = 0; i < BUFFER_NUM; i++)
- binder_alloc_free_buf(alloc, buffers[seq[i]]);
-
- for (i = 0; i < end / PAGE_SIZE; i++) {
- /**
- * Error message on a free page can be false positive
- * if binder shrinker ran during binder_alloc_free_buf
- * calls above.
- */
- if (list_empty(&alloc->pages[i].lru)) {
- pr_err_size_seq(sizes, seq);
- pr_err("expect lru but is %s at page index %d\n",
- alloc->pages[i].page_ptr ? "alloc" : "free", i);
- binder_selftest_failures++;
- }
- }
-}
-
-static void binder_selftest_free_page(struct binder_alloc *alloc)
-{
- int i;
- unsigned long count;
-
- while ((count = list_lru_count(&binder_freelist))) {
- list_lru_walk(&binder_freelist, binder_alloc_free_page,
- NULL, count);
- }
-
- for (i = 0; i < (alloc->buffer_size / PAGE_SIZE); i++) {
- if (alloc->pages[i].page_ptr) {
- pr_err("expect free but is %s at page index %d\n",
- list_empty(&alloc->pages[i].lru) ?
- "alloc" : "lru", i);
- binder_selftest_failures++;
- }
- }
-}
-
-static void binder_selftest_alloc_free(struct binder_alloc *alloc,
- size_t *sizes, int *seq, size_t end)
-{
- struct binder_buffer *buffers[BUFFER_NUM];
-
- binder_selftest_alloc_buf(alloc, buffers, sizes, seq);
- binder_selftest_free_buf(alloc, buffers, sizes, seq, end);
-
- /* Allocate from lru. */
- binder_selftest_alloc_buf(alloc, buffers, sizes, seq);
- if (list_lru_count(&binder_freelist))
- pr_err("lru list should be empty but is not\n");
-
- binder_selftest_free_buf(alloc, buffers, sizes, seq, end);
- binder_selftest_free_page(alloc);
-}
-
-static bool is_dup(int *seq, int index, int val)
-{
- int i;
-
- for (i = 0; i < index; i++) {
- if (seq[i] == val)
- return true;
- }
- return false;
-}
-
-/* Generate BUFFER_NUM factorial free orders. */
-static void binder_selftest_free_seq(struct binder_alloc *alloc,
- size_t *sizes, int *seq,
- int index, size_t end)
-{
- int i;
-
- if (index == BUFFER_NUM) {
- binder_selftest_alloc_free(alloc, sizes, seq, end);
- return;
- }
- for (i = 0; i < BUFFER_NUM; i++) {
- if (is_dup(seq, index, i))
- continue;
- seq[index] = i;
- binder_selftest_free_seq(alloc, sizes, seq, index + 1, end);
- }
-}
-
-static void binder_selftest_alloc_size(struct binder_alloc *alloc,
- size_t *end_offset)
-{
- int i;
- int seq[BUFFER_NUM] = {0};
- size_t front_sizes[BUFFER_NUM];
- size_t back_sizes[BUFFER_NUM];
- size_t last_offset, offset = 0;
-
- for (i = 0; i < BUFFER_NUM; i++) {
- last_offset = offset;
- offset = end_offset[i];
- front_sizes[i] = offset - last_offset;
- back_sizes[BUFFER_NUM - i - 1] = front_sizes[i];
- }
- /*
- * Buffers share the first or last few pages.
- * Only BUFFER_NUM - 1 buffer sizes are adjustable since
- * we need one giant buffer before getting to the last page.
- */
- back_sizes[0] += alloc->buffer_size - end_offset[BUFFER_NUM - 1];
- binder_selftest_free_seq(alloc, front_sizes, seq, 0,
- end_offset[BUFFER_NUM - 1]);
- binder_selftest_free_seq(alloc, back_sizes, seq, 0, alloc->buffer_size);
-}
-
-static void binder_selftest_alloc_offset(struct binder_alloc *alloc,
- size_t *end_offset, int index)
-{
- int align;
- size_t end, prev;
-
- if (index == BUFFER_NUM) {
- binder_selftest_alloc_size(alloc, end_offset);
- return;
- }
- prev = index == 0 ? 0 : end_offset[index - 1];
- end = prev;
-
- BUILD_BUG_ON(BUFFER_MIN_SIZE * BUFFER_NUM >= PAGE_SIZE);
-
- for (align = SAME_PAGE_UNALIGNED; align < LOOP_END; align++) {
- if (align % 2)
- end = ALIGN(end, PAGE_SIZE);
- else
- end += BUFFER_MIN_SIZE;
- end_offset[index] = end;
- binder_selftest_alloc_offset(alloc, end_offset, index + 1);
- }
-}
-
-/**
- * binder_selftest_alloc() - Test alloc and free of buffer pages.
- * @alloc: Pointer to alloc struct.
- *
- * Allocate BUFFER_NUM buffers to cover all page alignment cases,
- * then free them in all orders possible. Check that pages are
- * correctly allocated, put onto lru when buffers are freed, and
- * are freed when binder_alloc_free_page is called.
- */
-void binder_selftest_alloc(struct binder_alloc *alloc)
-{
- size_t end_offset[BUFFER_NUM];
-
- if (!binder_selftest_run)
- return;
- mutex_lock(&binder_selftest_lock);
- if (!binder_selftest_run || !alloc->vma)
- goto done;
- pr_info("STARTED\n");
- binder_selftest_alloc_offset(alloc, end_offset, 0);
- binder_selftest_run = false;
- if (binder_selftest_failures > 0)
- pr_info("%d tests FAILED\n", binder_selftest_failures);
- else
- pr_info("PASSED\n");
-
-done:
- mutex_unlock(&binder_selftest_lock);
-}
diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h
index f8d6be682f23..342574bfd28a 100644
--- a/drivers/android/binder_internal.h
+++ b/drivers/android/binder_internal.h
@@ -3,7 +3,6 @@
#ifndef _LINUX_BINDER_INTERNAL_H
#define _LINUX_BINDER_INTERNAL_H
-#include <linux/export.h>
#include <linux/fs.h>
#include <linux/list.h>
#include <linux/miscdevice.h>
@@ -25,8 +24,7 @@ struct binder_context {
/**
* struct binder_device - information about a binder device node
- * @hlist: list of binder devices (only used for devices requested via
- * CONFIG_ANDROID_BINDER_DEVICES)
+ * @hlist: list of binder devices
* @miscdev: information about a binder character device node
* @context: binder context information
* @binderfs_inode: This is the inode of the root dentry of the super block
@@ -83,7 +81,6 @@ extern bool is_binderfs_device(const struct inode *inode);
extern struct dentry *binderfs_create_file(struct dentry *dir, const char *name,
const struct file_operations *fops,
void *data);
-extern void binderfs_remove_file(struct dentry *dentry);
#else
static inline bool is_binderfs_device(const struct inode *inode)
{
@@ -96,7 +93,6 @@ static inline struct dentry *binderfs_create_file(struct dentry *dir,
{
return NULL;
}
-static inline void binderfs_remove_file(struct dentry *dentry) {}
#endif
#ifdef CONFIG_ANDROID_BINDERFS
@@ -541,8 +537,8 @@ struct binder_transaction {
struct binder_proc *to_proc;
struct binder_thread *to_thread;
struct binder_transaction *to_parent;
- unsigned need_reply:1;
- /* unsigned is_dead:1; */ /* not used at the moment */
+ unsigned is_async:1;
+ unsigned is_reply:1;
struct binder_buffer *buffer;
unsigned int code;
@@ -582,4 +578,20 @@ struct binder_object {
};
};
+/**
+ * Add a binder device to binder_devices
+ * @device: the new binder device to add to the global list
+ */
+void binder_add_device(struct binder_device *device);
+
+/**
+ * Remove a binder device to binder_devices
+ * @device: the binder device to remove from the global list
+ */
+void binder_remove_device(struct binder_device *device);
+
+#if IS_ENABLED(CONFIG_KUNIT)
+vm_fault_t binder_vm_fault(struct vm_fault *vmf);
+#endif
+
#endif /* _LINUX_BINDER_INTERNAL_H */
diff --git a/drivers/android/binder_netlink.c b/drivers/android/binder_netlink.c
new file mode 100644
index 000000000000..d05397a50ca6
--- /dev/null
+++ b/drivers/android/binder_netlink.c
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/binder.yaml */
+/* YNL-GEN kernel source */
+
+#include <net/netlink.h>
+#include <net/genetlink.h>
+
+#include "binder_netlink.h"
+
+#include <uapi/linux/android/binder_netlink.h>
+
+/* Ops table for binder */
+static const struct genl_split_ops binder_nl_ops[] = {
+};
+
+static const struct genl_multicast_group binder_nl_mcgrps[] = {
+ [BINDER_NLGRP_REPORT] = { "report", },
+};
+
+struct genl_family binder_nl_family __ro_after_init = {
+ .name = BINDER_FAMILY_NAME,
+ .version = BINDER_FAMILY_VERSION,
+ .netnsok = true,
+ .parallel_ops = true,
+ .module = THIS_MODULE,
+ .split_ops = binder_nl_ops,
+ .n_split_ops = ARRAY_SIZE(binder_nl_ops),
+ .mcgrps = binder_nl_mcgrps,
+ .n_mcgrps = ARRAY_SIZE(binder_nl_mcgrps),
+};
diff --git a/drivers/android/binder_netlink.h b/drivers/android/binder_netlink.h
new file mode 100644
index 000000000000..882c7a6b537e
--- /dev/null
+++ b/drivers/android/binder_netlink.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/binder.yaml */
+/* YNL-GEN kernel header */
+
+#ifndef _LINUX_BINDER_GEN_H
+#define _LINUX_BINDER_GEN_H
+
+#include <net/netlink.h>
+#include <net/genetlink.h>
+
+#include <uapi/linux/android/binder_netlink.h>
+
+enum {
+ BINDER_NLGRP_REPORT,
+};
+
+extern struct genl_family binder_nl_family;
+
+#endif /* _LINUX_BINDER_GEN_H */
diff --git a/drivers/android/binder_trace.h b/drivers/android/binder_trace.h
index fe38c6fc65d0..fa5eb61cf580 100644
--- a/drivers/android/binder_trace.h
+++ b/drivers/android/binder_trace.h
@@ -34,27 +34,6 @@ TRACE_EVENT(binder_ioctl,
TP_printk("cmd=0x%x arg=0x%lx", __entry->cmd, __entry->arg)
);
-DECLARE_EVENT_CLASS(binder_lock_class,
- TP_PROTO(const char *tag),
- TP_ARGS(tag),
- TP_STRUCT__entry(
- __field(const char *, tag)
- ),
- TP_fast_assign(
- __entry->tag = tag;
- ),
- TP_printk("tag=%s", __entry->tag)
-);
-
-#define DEFINE_BINDER_LOCK_EVENT(name) \
-DEFINE_EVENT(binder_lock_class, name, \
- TP_PROTO(const char *func), \
- TP_ARGS(func))
-
-DEFINE_BINDER_LOCK_EVENT(binder_lock);
-DEFINE_BINDER_LOCK_EVENT(binder_locked);
-DEFINE_BINDER_LOCK_EVENT(binder_unlock);
-
DECLARE_EVENT_CLASS(binder_function_return_class,
TP_PROTO(int ret),
TP_ARGS(ret),
@@ -328,7 +307,7 @@ TRACE_EVENT(binder_update_page_range,
TP_fast_assign(
__entry->proc = alloc->pid;
__entry->allocate = allocate;
- __entry->offset = start - alloc->buffer;
+ __entry->offset = start - alloc->vm_start;
__entry->size = end - start;
),
TP_printk("proc=%d allocate=%d offset=%zu size=%zu",
@@ -423,6 +402,43 @@ TRACE_EVENT(binder_return,
"unknown")
);
+TRACE_EVENT(binder_netlink_report,
+ TP_PROTO(const char *context,
+ struct binder_transaction *t,
+ u32 data_size,
+ u32 error),
+ TP_ARGS(context, t, data_size, error),
+ TP_STRUCT__entry(
+ __field(const char *, context)
+ __field(u32, error)
+ __field(int, from_pid)
+ __field(int, from_tid)
+ __field(int, to_pid)
+ __field(int, to_tid)
+ __field(bool, is_reply)
+ __field(unsigned int, flags)
+ __field(unsigned int, code)
+ __field(size_t, data_size)
+ ),
+ TP_fast_assign(
+ __entry->context = context;
+ __entry->error = error;
+ __entry->from_pid = t->from_pid;
+ __entry->from_tid = t->from_tid;
+ __entry->to_pid = t->to_proc ? t->to_proc->pid : 0;
+ __entry->to_tid = t->to_thread ? t->to_thread->pid : 0;
+ __entry->is_reply = t->is_reply;
+ __entry->flags = t->flags;
+ __entry->code = t->code;
+ __entry->data_size = data_size;
+ ),
+ TP_printk("from %d:%d to %d:%d context=%s error=%d is_reply=%d flags=0x%x code=0x%x size=%zu",
+ __entry->from_pid, __entry->from_tid,
+ __entry->to_pid, __entry->to_tid,
+ __entry->context, __entry->error, __entry->is_reply,
+ __entry->flags, __entry->code, __entry->data_size)
+);
+
#endif /* _BINDER_TRACE_H */
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
index ad1fa7abc323..be8e64eb39ec 100644
--- a/drivers/android/binderfs.c
+++ b/drivers/android/binderfs.c
@@ -59,6 +59,7 @@ struct binder_features {
bool oneway_spam_detection;
bool extended_error;
bool freeze_notification;
+ bool transaction_report;
};
static const struct constant_table binderfs_param_stats[] = {
@@ -76,6 +77,7 @@ static struct binder_features binder_features = {
.oneway_spam_detection = true,
.extended_error = true,
.freeze_notification = true,
+ .transaction_report = true,
};
static inline struct binderfs_info *BINDERFS_SB(const struct super_block *sb)
@@ -117,7 +119,6 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
struct dentry *dentry, *root;
struct binder_device *device;
char *name = NULL;
- size_t name_len;
struct inode *inode = NULL;
struct super_block *sb = ref_inode->i_sb;
struct binderfs_info *info = sb->s_fs_info;
@@ -161,9 +162,7 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
inode->i_gid = info->root_gid;
req->name[BINDERFS_MAX_NAME] = '\0'; /* NUL-terminate */
- name_len = strlen(req->name);
- /* Make sure to include terminating NUL byte */
- name = kmemdup(req->name, name_len + 1, GFP_KERNEL);
+ name = kstrdup(req->name, GFP_KERNEL);
if (!name)
goto err;
@@ -187,7 +186,7 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
inode_lock(d_inode(root));
/* look it up */
- dentry = lookup_one_len(name, root, name_len);
+ dentry = lookup_noperm(&QSTR(name), root);
if (IS_ERR(dentry)) {
inode_unlock(d_inode(root));
ret = PTR_ERR(dentry);
@@ -207,6 +206,8 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
fsnotify_create(root->d_inode, dentry);
inode_unlock(d_inode(root));
+ binder_add_device(device);
+
return 0;
err:
@@ -272,6 +273,7 @@ static void binderfs_evict_inode(struct inode *inode)
mutex_unlock(&binderfs_minors_mutex);
if (refcount_dec_and_test(&device->ref)) {
+ binder_remove_device(device);
kfree(device->context.name);
kfree(device);
}
@@ -484,7 +486,7 @@ static struct dentry *binderfs_create_dentry(struct dentry *parent,
{
struct dentry *dentry;
- dentry = lookup_one_len(name, parent, strlen(name));
+ dentry = lookup_noperm(&QSTR(name), parent);
if (IS_ERR(dentry))
return dentry;
@@ -497,21 +499,6 @@ static struct dentry *binderfs_create_dentry(struct dentry *parent,
return dentry;
}
-void binderfs_remove_file(struct dentry *dentry)
-{
- struct inode *parent_inode;
-
- parent_inode = d_inode(dentry->d_parent);
- inode_lock(parent_inode);
- if (simple_positive(dentry)) {
- dget(dentry);
- simple_unlink(parent_inode, dentry);
- d_delete(dentry);
- dput(dentry);
- }
- inode_unlock(parent_inode);
-}
-
struct dentry *binderfs_create_file(struct dentry *parent, const char *name,
const struct file_operations *fops,
void *data)
@@ -616,6 +603,12 @@ static int init_binder_features(struct super_block *sb)
if (IS_ERR(dentry))
return PTR_ERR(dentry);
+ dentry = binderfs_create_file(dir, "transaction_report",
+ &binder_features_fops,
+ &binder_features.transaction_report);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+
return 0;
}
diff --git a/drivers/android/dbitmap.h b/drivers/android/dbitmap.h
index 956f1bd087d1..c7299ce8b374 100644
--- a/drivers/android/dbitmap.h
+++ b/drivers/android/dbitmap.h
@@ -37,6 +37,7 @@ static inline void dbitmap_free(struct dbitmap *dmap)
{
dmap->nbits = 0;
kfree(dmap->map);
+ dmap->map = NULL;
}
/* Returns the nbits that a dbitmap can shrink to, 0 if not possible. */
diff --git a/drivers/android/tests/.kunitconfig b/drivers/android/tests/.kunitconfig
new file mode 100644
index 000000000000..39b76bab9d9a
--- /dev/null
+++ b/drivers/android/tests/.kunitconfig
@@ -0,0 +1,7 @@
+#
+# Copyright 2025 Google LLC.
+#
+
+CONFIG_KUNIT=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_ANDROID_BINDER_ALLOC_KUNIT_TEST=y
diff --git a/drivers/android/tests/Makefile b/drivers/android/tests/Makefile
new file mode 100644
index 000000000000..27268418eb03
--- /dev/null
+++ b/drivers/android/tests/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Copyright 2025 Google LLC.
+#
+
+obj-$(CONFIG_ANDROID_BINDER_ALLOC_KUNIT_TEST) += binder_alloc_kunit.o
diff --git a/drivers/android/tests/binder_alloc_kunit.c b/drivers/android/tests/binder_alloc_kunit.c
new file mode 100644
index 000000000000..9b884d977f76
--- /dev/null
+++ b/drivers/android/tests/binder_alloc_kunit.c
@@ -0,0 +1,572 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test cases for binder allocator code.
+ *
+ * Copyright 2025 Google LLC.
+ * Author: Tiffany Yang <ynaffit@google.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <kunit/test.h>
+#include <linux/anon_inodes.h>
+#include <linux/err.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/seq_buf.h>
+#include <linux/sizes.h>
+
+#include "../binder_alloc.h"
+#include "../binder_internal.h"
+
+MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
+
+#define BINDER_MMAP_SIZE SZ_128K
+
+#define BUFFER_NUM 5
+#define BUFFER_MIN_SIZE (PAGE_SIZE / 8)
+
+#define FREESEQ_BUFLEN ((3 * BUFFER_NUM) + 1)
+
+#define ALIGN_TYPE_STRLEN (12)
+
+#define ALIGNMENTS_BUFLEN (((ALIGN_TYPE_STRLEN + 6) * BUFFER_NUM) + 1)
+
+#define PRINT_ALL_CASES (0)
+
+/* 5^5 alignment combinations * 2 places to share pages * 5! free sequences */
+#define TOTAL_EXHAUSTIVE_CASES (3125 * 2 * 120)
+
+/**
+ * enum buf_end_align_type - Page alignment of a buffer
+ * end with regard to the end of the previous buffer.
+ *
+ * In the pictures below, buf2 refers to the buffer we
+ * are aligning. buf1 refers to previous buffer by addr.
+ * Symbol [ means the start of a buffer, ] means the end
+ * of a buffer, and | means page boundaries.
+ */
+enum buf_end_align_type {
+ /**
+ * @SAME_PAGE_UNALIGNED: The end of this buffer is on
+ * the same page as the end of the previous buffer and
+ * is not page aligned. Examples:
+ * buf1 ][ buf2 ][ ...
+ * buf1 ]|[ buf2 ][ ...
+ */
+ SAME_PAGE_UNALIGNED = 0,
+ /**
+ * @SAME_PAGE_ALIGNED: When the end of the previous buffer
+ * is not page aligned, the end of this buffer is on the
+ * same page as the end of the previous buffer and is page
+ * aligned. When the previous buffer is page aligned, the
+ * end of this buffer is aligned to the next page boundary.
+ * Examples:
+ * buf1 ][ buf2 ]| ...
+ * buf1 ]|[ buf2 ]| ...
+ */
+ SAME_PAGE_ALIGNED,
+ /**
+ * @NEXT_PAGE_UNALIGNED: The end of this buffer is on
+ * the page next to the end of the previous buffer and
+ * is not page aligned. Examples:
+ * buf1 ][ buf2 | buf2 ][ ...
+ * buf1 ]|[ buf2 | buf2 ][ ...
+ */
+ NEXT_PAGE_UNALIGNED,
+ /**
+ * @NEXT_PAGE_ALIGNED: The end of this buffer is on
+ * the page next to the end of the previous buffer and
+ * is page aligned. Examples:
+ * buf1 ][ buf2 | buf2 ]| ...
+ * buf1 ]|[ buf2 | buf2 ]| ...
+ */
+ NEXT_PAGE_ALIGNED,
+ /**
+ * @NEXT_NEXT_UNALIGNED: The end of this buffer is on
+ * the page that follows the page after the end of the
+ * previous buffer and is not page aligned. Examples:
+ * buf1 ][ buf2 | buf2 | buf2 ][ ...
+ * buf1 ]|[ buf2 | buf2 | buf2 ][ ...
+ */
+ NEXT_NEXT_UNALIGNED,
+ /**
+ * @LOOP_END: The number of enum values in &buf_end_align_type.
+ * It is used for controlling loop termination.
+ */
+ LOOP_END,
+};
+
+static const char *const buf_end_align_type_strs[LOOP_END] = {
+ [SAME_PAGE_UNALIGNED] = "SP_UNALIGNED",
+ [SAME_PAGE_ALIGNED] = " SP_ALIGNED ",
+ [NEXT_PAGE_UNALIGNED] = "NP_UNALIGNED",
+ [NEXT_PAGE_ALIGNED] = " NP_ALIGNED ",
+ [NEXT_NEXT_UNALIGNED] = "NN_UNALIGNED",
+};
+
+struct binder_alloc_test_case_info {
+ char alignments[ALIGNMENTS_BUFLEN];
+ struct seq_buf alignments_sb;
+ size_t *buffer_sizes;
+ int *free_sequence;
+ bool front_pages;
+};
+
+static void stringify_free_seq(struct kunit *test, int *seq, struct seq_buf *sb)
+{
+ int i;
+
+ for (i = 0; i < BUFFER_NUM; i++)
+ seq_buf_printf(sb, "[%d]", seq[i]);
+
+ KUNIT_EXPECT_FALSE(test, seq_buf_has_overflowed(sb));
+}
+
+static void stringify_alignments(struct kunit *test, int *alignments,
+ struct seq_buf *sb)
+{
+ int i;
+
+ for (i = 0; i < BUFFER_NUM; i++)
+ seq_buf_printf(sb, "[ %d:%s ]", i,
+ buf_end_align_type_strs[alignments[i]]);
+
+ KUNIT_EXPECT_FALSE(test, seq_buf_has_overflowed(sb));
+}
+
+static bool check_buffer_pages_allocated(struct kunit *test,
+ struct binder_alloc *alloc,
+ struct binder_buffer *buffer,
+ size_t size)
+{
+ unsigned long page_addr;
+ unsigned long end;
+ int page_index;
+
+ end = PAGE_ALIGN(buffer->user_data + size);
+ page_addr = buffer->user_data;
+ for (; page_addr < end; page_addr += PAGE_SIZE) {
+ page_index = (page_addr - alloc->vm_start) / PAGE_SIZE;
+ if (!alloc->pages[page_index] ||
+ !list_empty(page_to_lru(alloc->pages[page_index]))) {
+ kunit_err(test, "expect alloc but is %s at page index %d\n",
+ alloc->pages[page_index] ?
+ "lru" : "free", page_index);
+ return false;
+ }
+ }
+ return true;
+}
+
+static unsigned long binder_alloc_test_alloc_buf(struct kunit *test,
+ struct binder_alloc *alloc,
+ struct binder_buffer *buffers[],
+ size_t *sizes, int *seq)
+{
+ unsigned long failures = 0;
+ int i;
+
+ for (i = 0; i < BUFFER_NUM; i++) {
+ buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0);
+ if (IS_ERR(buffers[i]) ||
+ !check_buffer_pages_allocated(test, alloc, buffers[i], sizes[i]))
+ failures++;
+ }
+
+ return failures;
+}
+
+static unsigned long binder_alloc_test_free_buf(struct kunit *test,
+ struct binder_alloc *alloc,
+ struct binder_buffer *buffers[],
+ size_t *sizes, int *seq, size_t end)
+{
+ unsigned long failures = 0;
+ int i;
+
+ for (i = 0; i < BUFFER_NUM; i++)
+ binder_alloc_free_buf(alloc, buffers[seq[i]]);
+
+ for (i = 0; i <= (end - 1) / PAGE_SIZE; i++) {
+ if (list_empty(page_to_lru(alloc->pages[i]))) {
+ kunit_err(test, "expect lru but is %s at page index %d\n",
+ alloc->pages[i] ? "alloc" : "free", i);
+ failures++;
+ }
+ }
+
+ return failures;
+}
+
+static unsigned long binder_alloc_test_free_page(struct kunit *test,
+ struct binder_alloc *alloc)
+{
+ unsigned long failures = 0;
+ unsigned long count;
+ int i;
+
+ while ((count = list_lru_count(alloc->freelist))) {
+ list_lru_walk(alloc->freelist, binder_alloc_free_page,
+ NULL, count);
+ }
+
+ for (i = 0; i < (alloc->buffer_size / PAGE_SIZE); i++) {
+ if (alloc->pages[i]) {
+ kunit_err(test, "expect free but is %s at page index %d\n",
+ list_empty(page_to_lru(alloc->pages[i])) ?
+ "alloc" : "lru", i);
+ failures++;
+ }
+ }
+
+ return failures;
+}
+
+/* Executes one full test run for the given test case. */
+static bool binder_alloc_test_alloc_free(struct kunit *test,
+ struct binder_alloc *alloc,
+ struct binder_alloc_test_case_info *tc,
+ size_t end)
+{
+ unsigned long pages = PAGE_ALIGN(end) / PAGE_SIZE;
+ struct binder_buffer *buffers[BUFFER_NUM];
+ unsigned long failures;
+ bool failed = false;
+
+ failures = binder_alloc_test_alloc_buf(test, alloc, buffers,
+ tc->buffer_sizes,
+ tc->free_sequence);
+ failed = failed || failures;
+ KUNIT_EXPECT_EQ_MSG(test, failures, 0,
+ "Initial allocation failed: %lu/%u buffers with errors",
+ failures, BUFFER_NUM);
+
+ failures = binder_alloc_test_free_buf(test, alloc, buffers,
+ tc->buffer_sizes,
+ tc->free_sequence, end);
+ failed = failed || failures;
+ KUNIT_EXPECT_EQ_MSG(test, failures, 0,
+ "Initial buffers not freed correctly: %lu/%lu pages not on lru list",
+ failures, pages);
+
+ /* Allocate from lru. */
+ failures = binder_alloc_test_alloc_buf(test, alloc, buffers,
+ tc->buffer_sizes,
+ tc->free_sequence);
+ failed = failed || failures;
+ KUNIT_EXPECT_EQ_MSG(test, failures, 0,
+ "Reallocation failed: %lu/%u buffers with errors",
+ failures, BUFFER_NUM);
+
+ failures = list_lru_count(alloc->freelist);
+ failed = failed || failures;
+ KUNIT_EXPECT_EQ_MSG(test, failures, 0,
+ "lru list should be empty after reallocation but still has %lu pages",
+ failures);
+
+ failures = binder_alloc_test_free_buf(test, alloc, buffers,
+ tc->buffer_sizes,
+ tc->free_sequence, end);
+ failed = failed || failures;
+ KUNIT_EXPECT_EQ_MSG(test, failures, 0,
+ "Reallocated buffers not freed correctly: %lu/%lu pages not on lru list",
+ failures, pages);
+
+ failures = binder_alloc_test_free_page(test, alloc);
+ failed = failed || failures;
+ KUNIT_EXPECT_EQ_MSG(test, failures, 0,
+ "Failed to clean up allocated pages: %lu/%lu pages still installed",
+ failures, (alloc->buffer_size / PAGE_SIZE));
+
+ return failed;
+}
+
+static bool is_dup(int *seq, int index, int val)
+{
+ int i;
+
+ for (i = 0; i < index; i++) {
+ if (seq[i] == val)
+ return true;
+ }
+ return false;
+}
+
+/* Generate BUFFER_NUM factorial free orders. */
+static void permute_frees(struct kunit *test, struct binder_alloc *alloc,
+ struct binder_alloc_test_case_info *tc,
+ unsigned long *runs, unsigned long *failures,
+ int index, size_t end)
+{
+ bool case_failed;
+ int i;
+
+ if (index == BUFFER_NUM) {
+ DECLARE_SEQ_BUF(freeseq_sb, FREESEQ_BUFLEN);
+
+ case_failed = binder_alloc_test_alloc_free(test, alloc, tc, end);
+ *runs += 1;
+ *failures += case_failed;
+
+ if (case_failed || PRINT_ALL_CASES) {
+ stringify_free_seq(test, tc->free_sequence,
+ &freeseq_sb);
+ kunit_err(test, "case %lu: [%s] | %s - %s - %s", *runs,
+ case_failed ? "FAILED" : "PASSED",
+ tc->front_pages ? "front" : "back ",
+ seq_buf_str(&tc->alignments_sb),
+ seq_buf_str(&freeseq_sb));
+ }
+
+ return;
+ }
+ for (i = 0; i < BUFFER_NUM; i++) {
+ if (is_dup(tc->free_sequence, index, i))
+ continue;
+ tc->free_sequence[index] = i;
+ permute_frees(test, alloc, tc, runs, failures, index + 1, end);
+ }
+}
+
+static void gen_buf_sizes(struct kunit *test,
+ struct binder_alloc *alloc,
+ struct binder_alloc_test_case_info *tc,
+ size_t *end_offset, unsigned long *runs,
+ unsigned long *failures)
+{
+ size_t last_offset, offset = 0;
+ size_t front_sizes[BUFFER_NUM];
+ size_t back_sizes[BUFFER_NUM];
+ int seq[BUFFER_NUM] = {0};
+ int i;
+
+ tc->free_sequence = seq;
+ for (i = 0; i < BUFFER_NUM; i++) {
+ last_offset = offset;
+ offset = end_offset[i];
+ front_sizes[i] = offset - last_offset;
+ back_sizes[BUFFER_NUM - i - 1] = front_sizes[i];
+ }
+ back_sizes[0] += alloc->buffer_size - end_offset[BUFFER_NUM - 1];
+
+ /*
+ * Buffers share the first or last few pages.
+ * Only BUFFER_NUM - 1 buffer sizes are adjustable since
+ * we need one giant buffer before getting to the last page.
+ */
+ tc->front_pages = true;
+ tc->buffer_sizes = front_sizes;
+ permute_frees(test, alloc, tc, runs, failures, 0,
+ end_offset[BUFFER_NUM - 1]);
+
+ tc->front_pages = false;
+ tc->buffer_sizes = back_sizes;
+ permute_frees(test, alloc, tc, runs, failures, 0, alloc->buffer_size);
+}
+
+static void gen_buf_offsets(struct kunit *test, struct binder_alloc *alloc,
+ size_t *end_offset, int *alignments,
+ unsigned long *runs, unsigned long *failures,
+ int index)
+{
+ size_t end, prev;
+ int align;
+
+ if (index == BUFFER_NUM) {
+ struct binder_alloc_test_case_info tc = {0};
+
+ seq_buf_init(&tc.alignments_sb, tc.alignments,
+ ALIGNMENTS_BUFLEN);
+ stringify_alignments(test, alignments, &tc.alignments_sb);
+
+ gen_buf_sizes(test, alloc, &tc, end_offset, runs, failures);
+ return;
+ }
+ prev = index == 0 ? 0 : end_offset[index - 1];
+ end = prev;
+
+ BUILD_BUG_ON(BUFFER_MIN_SIZE * BUFFER_NUM >= PAGE_SIZE);
+
+ for (align = SAME_PAGE_UNALIGNED; align < LOOP_END; align++) {
+ if (align % 2)
+ end = ALIGN(end, PAGE_SIZE);
+ else
+ end += BUFFER_MIN_SIZE;
+ end_offset[index] = end;
+ alignments[index] = align;
+ gen_buf_offsets(test, alloc, end_offset, alignments, runs,
+ failures, index + 1);
+ }
+}
+
+struct binder_alloc_test {
+ struct binder_alloc alloc;
+ struct list_lru binder_test_freelist;
+ struct file *filp;
+ unsigned long mmap_uaddr;
+};
+
+static void binder_alloc_test_init_freelist(struct kunit *test)
+{
+ struct binder_alloc_test *priv = test->priv;
+
+ KUNIT_EXPECT_PTR_EQ(test, priv->alloc.freelist,
+ &priv->binder_test_freelist);
+}
+
+static void binder_alloc_test_mmap(struct kunit *test)
+{
+ struct binder_alloc_test *priv = test->priv;
+ struct binder_alloc *alloc = &priv->alloc;
+ struct binder_buffer *buf;
+ struct rb_node *n;
+
+ KUNIT_EXPECT_EQ(test, alloc->mapped, true);
+ KUNIT_EXPECT_EQ(test, alloc->buffer_size, BINDER_MMAP_SIZE);
+
+ n = rb_first(&alloc->allocated_buffers);
+ KUNIT_EXPECT_PTR_EQ(test, n, NULL);
+
+ n = rb_first(&alloc->free_buffers);
+ buf = rb_entry(n, struct binder_buffer, rb_node);
+ KUNIT_EXPECT_EQ(test, binder_alloc_buffer_size(alloc, buf),
+ BINDER_MMAP_SIZE);
+ KUNIT_EXPECT_TRUE(test, list_is_last(&buf->entry, &alloc->buffers));
+}
+
+/**
+ * binder_alloc_exhaustive_test() - Exhaustively test alloc and free of buffer pages.
+ * @test: The test context object.
+ *
+ * Allocate BUFFER_NUM buffers to cover all page alignment cases,
+ * then free them in all orders possible. Check that pages are
+ * correctly allocated, put onto lru when buffers are freed, and
+ * are freed when binder_alloc_free_page() is called.
+ */
+static void binder_alloc_exhaustive_test(struct kunit *test)
+{
+ struct binder_alloc_test *priv = test->priv;
+ size_t end_offset[BUFFER_NUM];
+ int alignments[BUFFER_NUM];
+ unsigned long failures = 0;
+ unsigned long runs = 0;
+
+ gen_buf_offsets(test, &priv->alloc, end_offset, alignments, &runs,
+ &failures, 0);
+
+ KUNIT_EXPECT_EQ(test, runs, TOTAL_EXHAUSTIVE_CASES);
+ KUNIT_EXPECT_EQ(test, failures, 0);
+}
+
+/* ===== End test cases ===== */
+
+static void binder_alloc_test_vma_close(struct vm_area_struct *vma)
+{
+ struct binder_alloc *alloc = vma->vm_private_data;
+
+ binder_alloc_vma_close(alloc);
+}
+
+static const struct vm_operations_struct binder_alloc_test_vm_ops = {
+ .close = binder_alloc_test_vma_close,
+ .fault = binder_vm_fault,
+};
+
+static int binder_alloc_test_mmap_handler(struct file *filp,
+ struct vm_area_struct *vma)
+{
+ struct binder_alloc *alloc = filp->private_data;
+
+ vm_flags_mod(vma, VM_DONTCOPY | VM_MIXEDMAP, VM_MAYWRITE);
+
+ vma->vm_ops = &binder_alloc_test_vm_ops;
+ vma->vm_private_data = alloc;
+
+ return binder_alloc_mmap_handler(alloc, vma);
+}
+
+static const struct file_operations binder_alloc_test_fops = {
+ .mmap = binder_alloc_test_mmap_handler,
+};
+
+static int binder_alloc_test_init(struct kunit *test)
+{
+ struct binder_alloc_test *priv;
+ int ret;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ test->priv = priv;
+
+ ret = list_lru_init(&priv->binder_test_freelist);
+ if (ret) {
+ kunit_err(test, "Failed to initialize test freelist\n");
+ return ret;
+ }
+
+ /* __binder_alloc_init requires mm to be attached */
+ ret = kunit_attach_mm();
+ if (ret) {
+ kunit_err(test, "Failed to attach mm\n");
+ return ret;
+ }
+ __binder_alloc_init(&priv->alloc, &priv->binder_test_freelist);
+
+ priv->filp = anon_inode_getfile("binder_alloc_kunit",
+ &binder_alloc_test_fops, &priv->alloc,
+ O_RDWR | O_CLOEXEC);
+ if (IS_ERR_OR_NULL(priv->filp)) {
+ kunit_err(test, "Failed to open binder alloc test driver file\n");
+ return priv->filp ? PTR_ERR(priv->filp) : -ENOMEM;
+ }
+
+ priv->mmap_uaddr = kunit_vm_mmap(test, priv->filp, 0, BINDER_MMAP_SIZE,
+ PROT_READ, MAP_PRIVATE | MAP_NORESERVE,
+ 0);
+ if (!priv->mmap_uaddr) {
+ kunit_err(test, "Could not map the test's transaction memory\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void binder_alloc_test_exit(struct kunit *test)
+{
+ struct binder_alloc_test *priv = test->priv;
+
+ /* Close the backing file to make sure binder_alloc_vma_close runs */
+ if (!IS_ERR_OR_NULL(priv->filp))
+ fput(priv->filp);
+
+ if (priv->alloc.mm)
+ binder_alloc_deferred_release(&priv->alloc);
+
+ /* Make sure freelist is empty */
+ KUNIT_EXPECT_EQ(test, list_lru_count(&priv->binder_test_freelist), 0);
+ list_lru_destroy(&priv->binder_test_freelist);
+}
+
+static struct kunit_case binder_alloc_test_cases[] = {
+ KUNIT_CASE(binder_alloc_test_init_freelist),
+ KUNIT_CASE(binder_alloc_test_mmap),
+ KUNIT_CASE(binder_alloc_exhaustive_test),
+ {}
+};
+
+static struct kunit_suite binder_alloc_test_suite = {
+ .name = "binder_alloc",
+ .test_cases = binder_alloc_test_cases,
+ .init = binder_alloc_test_init,
+ .exit = binder_alloc_test_exit,
+};
+
+kunit_test_suite(binder_alloc_test_suite);
+
+MODULE_AUTHOR("Tiffany Yang <ynaffit@google.com>");
+MODULE_DESCRIPTION("Binder Alloc KUnit tests");
+MODULE_LICENSE("GPL");
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index e00536b49552..120a2b7067fc 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -117,23 +117,39 @@ config SATA_AHCI
config SATA_MOBILE_LPM_POLICY
int "Default SATA Link Power Management policy"
- range 0 4
+ range 0 5
default 3
depends on SATA_AHCI
help
Select the Default SATA Link Power Management (LPM) policy to use
for chipsets / "South Bridges" supporting low-power modes. Such
chipsets are ubiquitous across laptops, desktops and servers.
-
- The value set has the following meanings:
+ Each policy combines power saving states and features:
+ - Partial: The Phy logic is powered but is in a reduced power
+ state. The exit latency from this state is no longer than
+ 10us).
+ - Slumber: The Phy logic is powered but is in an even lower power
+ state. The exit latency from this state is potentially
+ longer, but no longer than 10ms.
+ - DevSleep: The Phy logic may be powered down. The exit latency from
+ this state is no longer than 20 ms, unless otherwise
+ specified by DETO in the device Identify Device Data log.
+ - HIPM: Host Initiated Power Management (host automatically
+ transitions to partial and slumber).
+ - DIPM: Device Initiated Power Management (device automatically
+ transitions to partial and slumber).
+
+ The possible values for the default SATA link power management
+ policies are:
0 => Keep firmware settings
- 1 => Maximum performance
- 2 => Medium power
- 3 => Medium power with Device Initiated PM enabled
- 4 => Minimum power
-
- Note "Minimum power" is known to cause issues, including disk
- corruption, with some disks and should not be used.
+ 1 => No power savings (maximum performance)
+ 2 => HIPM (Partial)
+ 3 => HIPM (Partial) and DIPM (Partial and Slumber)
+ 4 => HIPM (Partial and DevSleep) and DIPM (Partial and Slumber)
+ 5 => HIPM (Slumber and DevSleep) and DIPM (Partial and Slumber)
+
+ Excluding the value 0, higher values represent policies with higher
+ power savings.
config SATA_AHCI_PLATFORM
tristate "Platform AHCI SATA support"
diff --git a/drivers/ata/acard-ahci.c b/drivers/ata/acard-ahci.c
index 547f56341705..3999305b5356 100644
--- a/drivers/ata/acard-ahci.c
+++ b/drivers/ata/acard-ahci.c
@@ -370,7 +370,7 @@ static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id
/* AHCI controllers often implement SFF compatible interface.
* Grab all PCI BARs just in case.
*/
- rc = pcim_iomap_regions_request_all(pdev, 1 << AHCI_PCI_BAR, DRV_NAME);
+ rc = pcim_request_all_regions(pdev, DRV_NAME);
if (rc == -EBUSY)
pcim_pin_device(pdev);
if (rc)
@@ -386,7 +386,9 @@ static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id
if (!(hpriv->flags & AHCI_HFLAG_NO_MSI))
pci_enable_msi(pdev);
- hpriv->mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
+ hpriv->mmio = pcim_iomap(pdev, AHCI_PCI_BAR, 0);
+ if (!hpriv->mmio)
+ return -ENOMEM;
/* save initial config */
ahci_save_initial_config(&pdev->dev, hpriv);
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 45f63b09828a..7a7f88b3fa2b 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -63,6 +63,7 @@ enum board_ids {
board_ahci_pcs_quirk_no_devslp,
board_ahci_pcs_quirk_no_sntf,
board_ahci_yes_fbs,
+ board_ahci_yes_fbs_atapi_dma,
/* board IDs for specific chipsets in alphabetical order */
board_ahci_al,
@@ -109,17 +110,17 @@ static const struct scsi_host_template ahci_sht = {
static struct ata_port_operations ahci_vt8251_ops = {
.inherits = &ahci_ops,
- .hardreset = ahci_vt8251_hardreset,
+ .reset.hardreset = ahci_vt8251_hardreset,
};
static struct ata_port_operations ahci_p5wdh_ops = {
.inherits = &ahci_ops,
- .hardreset = ahci_p5wdh_hardreset,
+ .reset.hardreset = ahci_p5wdh_hardreset,
};
static struct ata_port_operations ahci_avn_ops = {
.inherits = &ahci_ops,
- .hardreset = ahci_avn_hardreset,
+ .reset.hardreset = ahci_avn_hardreset,
};
static const struct ata_port_info ahci_port_info[] = {
@@ -188,6 +189,14 @@ static const struct ata_port_info ahci_port_info[] = {
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
+ [board_ahci_yes_fbs_atapi_dma] = {
+ AHCI_HFLAGS (AHCI_HFLAG_YES_FBS |
+ AHCI_HFLAG_ATAPI_DMA_QUIRK),
+ .flags = AHCI_FLAG_COMMON,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_ops,
+ },
/* by chipsets */
[board_ahci_al] = {
AHCI_HFLAGS (AHCI_HFLAG_NO_PMP | AHCI_HFLAG_NO_MSI),
@@ -589,6 +598,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
.driver_data = board_ahci_yes_fbs },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x91a3),
.driver_data = board_ahci_yes_fbs },
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9215),
+ .driver_data = board_ahci_yes_fbs_atapi_dma },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9230),
.driver_data = board_ahci_yes_fbs },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9235),
@@ -663,7 +674,9 @@ MODULE_PARM_DESC(marvell_enable, "Marvell SATA via AHCI (1 = enabled)");
static int mobile_lpm_policy = -1;
module_param(mobile_lpm_policy, int, 0644);
-MODULE_PARM_DESC(mobile_lpm_policy, "Default LPM policy for mobile chipsets");
+MODULE_PARM_DESC(mobile_lpm_policy,
+ "Default LPM policy. Despite its name, this parameter applies "
+ "to all chipsets, including desktop and server chipsets");
static char *ahci_mask_port_map;
module_param_named(mask_port_map, ahci_mask_port_map, charp, 0444);
@@ -676,40 +689,50 @@ MODULE_PARM_DESC(mask_port_map,
"where <pci_dev> is the PCI ID of an AHCI controller in the "
"form \"domain:bus:dev.func\"");
-static void ahci_apply_port_map_mask(struct device *dev,
- struct ahci_host_priv *hpriv, char *mask_s)
+static char *ahci_mask_port_ext;
+module_param_named(mask_port_ext, ahci_mask_port_ext, charp, 0444);
+MODULE_PARM_DESC(mask_port_ext,
+ "32-bits mask to ignore the external/hotplug capability of ports. "
+ "Valid values are: "
+ "\"<mask>\" to apply the same mask to all AHCI controller "
+ "devices, and \"<pci_dev>=<mask>,<pci_dev>=<mask>,...\" to "
+ "specify different masks for the controllers specified, "
+ "where <pci_dev> is the PCI ID of an AHCI controller in the "
+ "form \"domain:bus:dev.func\"");
+
+static u32 ahci_port_mask(struct device *dev, char *mask_s)
{
unsigned int mask;
if (kstrtouint(mask_s, 0, &mask)) {
dev_err(dev, "Invalid port map mask\n");
- return;
+ return 0;
}
- hpriv->mask_port_map = mask;
+ return mask;
}
-static void ahci_get_port_map_mask(struct device *dev,
- struct ahci_host_priv *hpriv)
+static u32 ahci_get_port_mask(struct device *dev, char *mask_p)
{
char *param, *end, *str, *mask_s;
char *name;
+ u32 mask = 0;
- if (!strlen(ahci_mask_port_map))
- return;
+ if (!mask_p || !strlen(mask_p))
+ return 0;
- str = kstrdup(ahci_mask_port_map, GFP_KERNEL);
+ str = kstrdup(mask_p, GFP_KERNEL);
if (!str)
- return;
+ return 0;
/* Handle single mask case */
if (!strchr(str, '=')) {
- ahci_apply_port_map_mask(dev, hpriv, str);
+ mask = ahci_port_mask(dev, str);
goto free;
}
/*
- * Mask list case: parse the parameter to apply the mask only if
+ * Mask list case: parse the parameter to get the mask only if
* the device name matches.
*/
param = str;
@@ -739,11 +762,13 @@ static void ahci_get_port_map_mask(struct device *dev,
param++;
}
- ahci_apply_port_map_mask(dev, hpriv, mask_s);
+ mask = ahci_port_mask(dev, mask_s);
}
free:
kfree(str);
+
+ return mask;
}
static void ahci_pci_save_initial_config(struct pci_dev *pdev,
@@ -769,8 +794,10 @@ static void ahci_pci_save_initial_config(struct pci_dev *pdev,
}
/* Handle port map masks passed as module parameter. */
- if (ahci_mask_port_map)
- ahci_get_port_map_mask(&pdev->dev, hpriv);
+ hpriv->mask_port_map =
+ ahci_get_port_mask(&pdev->dev, ahci_mask_port_map);
+ hpriv->mask_port_ext =
+ ahci_get_port_mask(&pdev->dev, ahci_mask_port_ext);
ahci_save_initial_config(&pdev->dev, hpriv);
}
@@ -1399,8 +1426,15 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
static bool ahci_broken_lpm(struct pci_dev *pdev)
{
+ /*
+ * Platforms with LPM problems.
+ * If driver_data is NULL, there is no existing BIOS version with
+ * functioning LPM.
+ * If driver_data is non-NULL, then driver_data contains the DMI BIOS
+ * build date of the first BIOS version with functioning LPM (i.e. older
+ * BIOS versions have broken LPM).
+ */
static const struct dmi_system_id sysids[] = {
- /* Various Lenovo 50 series have LPM issues with older BIOSen */
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
@@ -1427,13 +1461,30 @@ static bool ahci_broken_lpm(struct pci_dev *pdev)
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W541"),
},
+ .driver_data = "20180409", /* 2.35 */
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ASUSPRO D840MB_M840SA"),
+ },
+ /* 320 is broken, there is no known good version. */
+ },
+ {
/*
- * Note date based on release notes, 2.35 has been
- * reported to be good, but I've been unable to get
- * a hold of the reporter to get the DMI BIOS date.
- * TODO: fix this.
+ * AMD 500 Series Chipset SATA Controller [1022:43eb]
+ * on this motherboard timeouts on ports 5 and 6 when
+ * LPM is enabled, at least with WDC WD20EFAX-68FB5N0
+ * hard drives. LPM with the same drive works fine on
+ * all other ports on the same controller.
*/
- .driver_data = "20180310", /* 2.35 */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR,
+ "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_BOARD_NAME,
+ "ROG STRIX B550-F GAMING (WI-FI)"),
+ },
+ /* 3621 is broken, there is no known good version. */
},
{ } /* terminate list */
};
@@ -1444,6 +1495,9 @@ static bool ahci_broken_lpm(struct pci_dev *pdev)
if (!dmi)
return false;
+ if (!dmi->driver_data)
+ return true;
+
dmi_get_date(DMI_BIOS_DATE, &year, &month, &date);
snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date);
@@ -1665,18 +1719,20 @@ static int ahci_get_irq_vector(struct ata_host *host, int port)
return pci_irq_vector(to_pci_dev(host->dev), port);
}
-static int ahci_init_msi(struct pci_dev *pdev, unsigned int n_ports,
+static void ahci_init_irq(struct pci_dev *pdev, unsigned int n_ports,
struct ahci_host_priv *hpriv)
{
int nvec;
- if (hpriv->flags & AHCI_HFLAG_NO_MSI)
- return -ENODEV;
+ if (hpriv->flags & AHCI_HFLAG_NO_MSI) {
+ pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_INTX);
+ return;
+ }
/*
* If number of MSIs is less than number of ports then Sharing Last
* Message mode could be enforced. In this case assume that advantage
- * of multipe MSIs is negated and use single MSI mode instead.
+ * of multiple MSIs is negated and use single MSI mode instead.
*/
if (n_ports > 1) {
nvec = pci_alloc_irq_vectors(pdev, n_ports, INT_MAX,
@@ -1685,7 +1741,7 @@ static int ahci_init_msi(struct pci_dev *pdev, unsigned int n_ports,
if (!(readl(hpriv->mmio + HOST_CTL) & HOST_MRSM)) {
hpriv->get_irq_vector = ahci_get_irq_vector;
hpriv->flags |= AHCI_HFLAG_MULTI_MSI;
- return nvec;
+ return;
}
/*
@@ -1700,12 +1756,13 @@ static int ahci_init_msi(struct pci_dev *pdev, unsigned int n_ports,
/*
* If the host is not capable of supporting per-port vectors, fall
- * back to single MSI before finally attempting single MSI-X.
+ * back to single MSI before finally attempting single MSI-X or
+ * a legacy INTx.
*/
nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
if (nvec == 1)
- return nvec;
- return pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX);
+ return;
+ pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX | PCI_IRQ_INTX);
}
static void ahci_mark_external_port(struct ata_port *ap)
@@ -1714,11 +1771,20 @@ static void ahci_mark_external_port(struct ata_port *ap)
void __iomem *port_mmio = ahci_port_base(ap);
u32 tmp;
- /* mark external ports (hotplug-capable, eSATA) */
+ /*
+ * Mark external ports (hotplug-capable, eSATA), unless we were asked to
+ * ignore this feature.
+ */
tmp = readl(port_mmio + PORT_CMD);
if (((tmp & PORT_CMD_ESP) && (hpriv->cap & HOST_CAP_SXS)) ||
- (tmp & PORT_CMD_HPCP))
+ (tmp & PORT_CMD_HPCP)) {
+ if (hpriv->mask_port_ext & (1U << ap->port_no)) {
+ ata_port_info(ap,
+ "Ignoring external/hotplug capability\n");
+ return;
+ }
ap->pflags |= ATA_PFLAG_EXTERNAL;
+ }
}
static void ahci_update_initial_lpm_policy(struct ata_port *ap)
@@ -1733,15 +1799,26 @@ static void ahci_update_initial_lpm_policy(struct ata_port *ap)
* LPM if the port advertises itself as an external port.
*/
if (ap->pflags & ATA_PFLAG_EXTERNAL) {
- ata_port_dbg(ap, "external port, not enabling LPM\n");
+ ap->flags |= ATA_FLAG_NO_LPM;
+ ap->target_lpm_policy = ATA_LPM_MAX_POWER;
return;
}
+ /* If no Partial or no Slumber, we cannot support DIPM. */
+ if ((ap->host->flags & ATA_HOST_NO_PART) ||
+ (ap->host->flags & ATA_HOST_NO_SSC)) {
+ ata_port_dbg(ap, "Host does not support DIPM\n");
+ ap->flags |= ATA_FLAG_NO_DIPM;
+ }
+
/* If no LPM states are supported by the HBA, do not bother with LPM */
if ((ap->host->flags & ATA_HOST_NO_PART) &&
(ap->host->flags & ATA_HOST_NO_SSC) &&
(ap->host->flags & ATA_HOST_NO_DEVSLP)) {
- ata_port_dbg(ap, "no LPM states supported, not enabling LPM\n");
+ ata_port_dbg(ap,
+ "No LPM states supported, forcing LPM max_power\n");
+ ap->flags |= ATA_FLAG_NO_LPM;
+ ap->target_lpm_policy = ATA_LPM_MAX_POWER;
return;
}
@@ -1869,7 +1946,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* AHCI controllers often implement SFF compatible interface.
* Grab all PCI BARs just in case.
*/
- rc = pcim_iomap_regions_request_all(pdev, 1 << ahci_pci_bar, DRV_NAME);
+ rc = pcim_request_all_regions(pdev, DRV_NAME);
if (rc == -EBUSY)
pcim_pin_device(pdev);
if (rc)
@@ -1893,7 +1970,9 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ahci_sb600_enable_64bit(pdev))
hpriv->flags &= ~AHCI_HFLAG_32BIT_ONLY;
- hpriv->mmio = pcim_iomap_table(pdev)[ahci_pci_bar];
+ hpriv->mmio = pcim_iomap(pdev, ahci_pci_bar, 0);
+ if (!hpriv->mmio)
+ return -ENOMEM;
/* detect remapped nvme devices */
ahci_remap_check(pdev, ahci_pci_bar, hpriv);
@@ -1983,10 +2062,8 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
host->private_data = hpriv;
- if (ahci_init_msi(pdev, n_ports, hpriv) < 0) {
- /* legacy intx interrupts */
- pci_intx(pdev, 1);
- }
+ ahci_init_irq(pdev, n_ports, hpriv);
+
hpriv->irq = pci_irq_vector(pdev, 0);
if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 8f40f75ba08c..293b7fb216b5 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -246,6 +246,7 @@ enum {
AHCI_HFLAG_NO_SXS = BIT(26), /* SXS not supported */
AHCI_HFLAG_43BIT_ONLY = BIT(27), /* 43bit DMA addr limit */
AHCI_HFLAG_INTEL_PCS_QUIRK = BIT(28), /* apply Intel PCS quirk */
+ AHCI_HFLAG_ATAPI_DMA_QUIRK = BIT(29), /* force ATAPI to use DMA */
/* ap->flags bits */
@@ -328,7 +329,8 @@ struct ahci_port_priv {
struct ahci_host_priv {
/* Input fields */
unsigned int flags; /* AHCI_HFLAG_* */
- u32 mask_port_map; /* mask out particular bits */
+ u32 mask_port_map; /* Mask of valid ports */
+ u32 mask_port_ext; /* Mask of ports ext capability */
void __iomem * mmio; /* bus-independent mem map */
u32 cap; /* cap to use */
@@ -379,6 +381,21 @@ struct ahci_host_priv {
int port);
};
+/*
+ * Return true if a port should be ignored because it is excluded from
+ * the host port map.
+ */
+static inline bool ahci_ignore_port(struct ahci_host_priv *hpriv,
+ unsigned int portid)
+{
+ if (portid >= hpriv->nports)
+ return true;
+ /* mask_port_map not set means that all ports are available */
+ if (!hpriv->mask_port_map)
+ return false;
+ return !(hpriv->mask_port_map & (1 << portid));
+}
+
extern int ahci_ignore_sss;
extern const struct attribute_group *ahci_shost_groups[];
@@ -396,8 +413,8 @@ extern const struct attribute_group *ahci_sdev_groups[];
.shost_groups = ahci_shost_groups, \
.sdev_groups = ahci_sdev_groups, \
.change_queue_depth = ata_scsi_change_queue_depth, \
- .tag_alloc_policy = BLK_TAG_ALLOC_RR, \
- .device_configure = ata_scsi_device_configure
+ .tag_alloc_policy_rr = true, \
+ .sdev_configure = ata_scsi_sdev_configure
extern struct ata_port_operations ahci_ops;
extern struct ata_port_operations ahci_platform_ops;
diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c
index 2f16524c2526..29be74fedcf0 100644
--- a/drivers/ata/ahci_brcm.c
+++ b/drivers/ata/ahci_brcm.c
@@ -288,6 +288,9 @@ static unsigned int brcm_ahci_read_id(struct ata_device *dev,
/* Re-initialize and calibrate the PHY */
for (i = 0; i < hpriv->nports; i++) {
+ if (ahci_ignore_port(hpriv, i))
+ continue;
+
rc = phy_init(hpriv->phys[i]);
if (rc)
goto disable_phys;
@@ -571,7 +574,7 @@ static SIMPLE_DEV_PM_OPS(ahci_brcm_pm_ops, brcm_ahci_suspend, brcm_ahci_resume);
static struct platform_driver brcm_ahci_driver = {
.probe = brcm_ahci_probe,
- .remove_new = brcm_ahci_remove,
+ .remove = brcm_ahci_remove,
.shutdown = brcm_ahci_shutdown,
.driver = {
.name = DRV_NAME,
diff --git a/drivers/ata/ahci_ceva.c b/drivers/ata/ahci_ceva.c
index 11a2c199a7c2..2d6a08c23d6a 100644
--- a/drivers/ata/ahci_ceva.c
+++ b/drivers/ata/ahci_ceva.c
@@ -206,6 +206,9 @@ static int ceva_ahci_platform_enable_resources(struct ahci_host_priv *hpriv)
goto disable_clks;
for (i = 0; i < hpriv->nports; i++) {
+ if (ahci_ignore_port(hpriv, i))
+ continue;
+
rc = phy_init(hpriv->phys[i]);
if (rc)
goto disable_rsts;
@@ -215,6 +218,9 @@ static int ceva_ahci_platform_enable_resources(struct ahci_host_priv *hpriv)
ahci_platform_deassert_rsts(hpriv);
for (i = 0; i < hpriv->nports; i++) {
+ if (ahci_ignore_port(hpriv, i))
+ continue;
+
rc = phy_power_on(hpriv->phys[i]);
if (rc) {
phy_exit(hpriv->phys[i]);
@@ -402,7 +408,7 @@ MODULE_DEVICE_TABLE(of, ceva_ahci_of_match);
static struct platform_driver ceva_ahci_driver = {
.probe = ceva_ahci_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
.driver = {
.name = DRV_NAME,
.of_match_table = ceva_ahci_of_match,
diff --git a/drivers/ata/ahci_da850.c b/drivers/ata/ahci_da850.c
index 55a6627d5450..f97566c420f8 100644
--- a/drivers/ata/ahci_da850.c
+++ b/drivers/ata/ahci_da850.c
@@ -137,13 +137,13 @@ static int ahci_da850_hardreset(struct ata_link *link,
static struct ata_port_operations ahci_da850_port_ops = {
.inherits = &ahci_platform_ops,
- .softreset = ahci_da850_softreset,
+ .reset.softreset = ahci_da850_softreset,
/*
* No need to override .pmp_softreset - it's only used for actual
* PMP-enabled ports.
*/
- .hardreset = ahci_da850_hardreset,
- .pmp_hardreset = ahci_da850_hardreset,
+ .reset.hardreset = ahci_da850_hardreset,
+ .pmp_reset.hardreset = ahci_da850_hardreset,
};
static const struct ata_port_info ahci_da850_port_info = {
@@ -238,7 +238,7 @@ MODULE_DEVICE_TABLE(of, ahci_da850_of_match);
static struct platform_driver ahci_da850_driver = {
.probe = ahci_da850_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
.driver = {
.name = DRV_NAME,
.of_match_table = ahci_da850_of_match,
diff --git a/drivers/ata/ahci_dm816.c b/drivers/ata/ahci_dm816.c
index 4cb70064fb99..93faed2cfeb6 100644
--- a/drivers/ata/ahci_dm816.c
+++ b/drivers/ata/ahci_dm816.c
@@ -124,7 +124,7 @@ static int ahci_dm816_softreset(struct ata_link *link,
static struct ata_port_operations ahci_dm816_port_ops = {
.inherits = &ahci_platform_ops,
- .softreset = ahci_dm816_softreset,
+ .reset.softreset = ahci_dm816_softreset,
};
static const struct ata_port_info ahci_dm816_port_info = {
@@ -182,7 +182,7 @@ MODULE_DEVICE_TABLE(of, ahci_dm816_of_match);
static struct platform_driver ahci_dm816_driver = {
.probe = ahci_dm816_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
.driver = {
.name = AHCI_DM816_DRV_NAME,
.of_match_table = ahci_dm816_of_match,
diff --git a/drivers/ata/ahci_dwc.c b/drivers/ata/ahci_dwc.c
index ed263de3fd70..aec6d793f51a 100644
--- a/drivers/ata/ahci_dwc.c
+++ b/drivers/ata/ahci_dwc.c
@@ -478,7 +478,7 @@ MODULE_DEVICE_TABLE(of, ahci_dwc_of_match);
static struct platform_driver ahci_dwc_driver = {
.probe = ahci_dwc_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
.shutdown = ahci_platform_shutdown,
.driver = {
.name = DRV_NAME,
diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c
index 6f955e9105e8..86aedd5923ac 100644
--- a/drivers/ata/ahci_imx.c
+++ b/drivers/ata/ahci_imx.c
@@ -511,7 +511,7 @@ static int imx_sata_enable(struct ahci_host_priv *hpriv)
if (imxpriv->type == AHCI_IMX6Q || imxpriv->type == AHCI_IMX6QP) {
/*
- * set PHY Paremeters, two steps to configure the GPR13,
+ * set PHY Parameters, two steps to configure the GPR13,
* one write for rest of parameters, mask of first write
* is 0x07ffffff, and the other one write for setting
* the mpll_clk_en.
@@ -642,18 +642,19 @@ static int ahci_imx_softreset(struct ata_link *link, unsigned int *class,
int ret;
if (imxpriv->type == AHCI_IMX53)
- ret = ahci_pmp_retry_srst_ops.softreset(link, class, deadline);
+ ret = ahci_pmp_retry_srst_ops.reset.softreset(link, class,
+ deadline);
else
- ret = ahci_ops.softreset(link, class, deadline);
+ ret = ahci_ops.reset.softreset(link, class, deadline);
return ret;
}
static struct ata_port_operations ahci_imx_ops = {
- .inherits = &ahci_ops,
- .host_stop = ahci_imx_host_stop,
- .error_handler = ahci_imx_error_handler,
- .softreset = ahci_imx_softreset,
+ .inherits = &ahci_ops,
+ .host_stop = ahci_imx_host_stop,
+ .error_handler = ahci_imx_error_handler,
+ .reset.softreset = ahci_imx_softreset,
};
static const struct ata_port_info ahci_imx_port_info = {
@@ -1027,7 +1028,7 @@ static SIMPLE_DEV_PM_OPS(ahci_imx_pm_ops, imx_ahci_suspend, imx_ahci_resume);
static struct platform_driver imx_ahci_driver = {
.probe = imx_ahci_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
.driver = {
.name = DRV_NAME,
.of_match_table = imx_ahci_of_match,
diff --git a/drivers/ata/ahci_mtk.c b/drivers/ata/ahci_mtk.c
index adc851cd5578..7295b9066ae2 100644
--- a/drivers/ata/ahci_mtk.c
+++ b/drivers/ata/ahci_mtk.c
@@ -174,7 +174,7 @@ MODULE_DEVICE_TABLE(of, ahci_of_match);
static struct platform_driver mtk_ahci_driver = {
.probe = mtk_ahci_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
.driver = {
.name = DRV_NAME,
.of_match_table = ahci_of_match,
diff --git a/drivers/ata/ahci_mvebu.c b/drivers/ata/ahci_mvebu.c
index f3187351e8a6..8744dae41612 100644
--- a/drivers/ata/ahci_mvebu.c
+++ b/drivers/ata/ahci_mvebu.c
@@ -245,7 +245,7 @@ MODULE_DEVICE_TABLE(of, ahci_mvebu_of_match);
static struct platform_driver ahci_mvebu_driver = {
.probe = ahci_mvebu_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
.suspend = ahci_mvebu_suspend,
.resume = ahci_mvebu_resume,
.driver = {
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index 81fc63f6b008..c18054333f7c 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -96,7 +96,7 @@ MODULE_DEVICE_TABLE(acpi, ahci_acpi_match);
static struct platform_driver ahci_driver = {
.probe = ahci_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
.shutdown = ahci_platform_shutdown,
.driver = {
.name = DRV_NAME,
diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c
index b1a4e57578e2..0dec1a17e5b1 100644
--- a/drivers/ata/ahci_qoriq.c
+++ b/drivers/ata/ahci_qoriq.c
@@ -146,8 +146,8 @@ static int ahci_qoriq_hardreset(struct ata_link *link, unsigned int *class,
}
static struct ata_port_operations ahci_qoriq_ops = {
- .inherits = &ahci_ops,
- .hardreset = ahci_qoriq_hardreset,
+ .inherits = &ahci_ops,
+ .reset.hardreset = ahci_qoriq_hardreset,
};
static const struct ata_port_info ahci_qoriq_port_info = {
@@ -357,7 +357,7 @@ static SIMPLE_DEV_PM_OPS(ahci_qoriq_pm_ops, ahci_platform_suspend,
static struct platform_driver ahci_qoriq_driver = {
.probe = ahci_qoriq_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
.driver = {
.name = DRV_NAME,
.of_match_table = ahci_qoriq_of_match,
diff --git a/drivers/ata/ahci_seattle.c b/drivers/ata/ahci_seattle.c
index 59f97aa7ac75..3f16c1678402 100644
--- a/drivers/ata/ahci_seattle.c
+++ b/drivers/ata/ahci_seattle.c
@@ -185,7 +185,7 @@ MODULE_DEVICE_TABLE(acpi, ahci_acpi_match);
static struct platform_driver ahci_seattle_driver = {
.probe = ahci_seattle_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
.driver = {
.name = DRV_NAME,
.acpi_match_table = ahci_acpi_match,
diff --git a/drivers/ata/ahci_st.c b/drivers/ata/ahci_st.c
index 79a8b0aa37bf..4336c8a6e208 100644
--- a/drivers/ata/ahci_st.c
+++ b/drivers/ata/ahci_st.c
@@ -176,7 +176,6 @@ static int st_ahci_probe(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
static int st_ahci_suspend(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
@@ -221,9 +220,8 @@ static int st_ahci_resume(struct device *dev)
return ahci_platform_resume_host(dev);
}
-#endif
-static SIMPLE_DEV_PM_OPS(st_ahci_pm_ops, st_ahci_suspend, st_ahci_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(st_ahci_pm_ops, st_ahci_suspend, st_ahci_resume);
static const struct of_device_id st_ahci_match[] = {
{ .compatible = "st,ahci", },
@@ -234,11 +232,11 @@ MODULE_DEVICE_TABLE(of, st_ahci_match);
static struct platform_driver st_ahci_driver = {
.driver = {
.name = DRV_NAME,
- .pm = &st_ahci_pm_ops,
+ .pm = pm_sleep_ptr(&st_ahci_pm_ops),
.of_match_table = st_ahci_match,
},
.probe = st_ahci_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
};
module_platform_driver(st_ahci_driver);
diff --git a/drivers/ata/ahci_sunxi.c b/drivers/ata/ahci_sunxi.c
index 58b2683954dd..5d4584570ae0 100644
--- a/drivers/ata/ahci_sunxi.c
+++ b/drivers/ata/ahci_sunxi.c
@@ -292,7 +292,7 @@ MODULE_DEVICE_TABLE(of, ahci_sunxi_of_match);
static struct platform_driver ahci_sunxi_driver = {
.probe = ahci_sunxi_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
.driver = {
.name = DRV_NAME,
.of_match_table = ahci_sunxi_of_match,
diff --git a/drivers/ata/ahci_tegra.c b/drivers/ata/ahci_tegra.c
index 8703c2a4658b..44584eed6374 100644
--- a/drivers/ata/ahci_tegra.c
+++ b/drivers/ata/ahci_tegra.c
@@ -608,7 +608,7 @@ deinit_controller:
static struct platform_driver tegra_ahci_driver = {
.probe = tegra_ahci_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
.driver = {
.name = DRV_NAME,
.of_match_table = tegra_ahci_of_match,
diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c
index 81a1d838c0fc..6b8844646fcd 100644
--- a/drivers/ata/ahci_xgene.c
+++ b/drivers/ata/ahci_xgene.c
@@ -450,7 +450,6 @@ static int xgene_ahci_pmp_softreset(struct ata_link *link, unsigned int *class,
{
int pmp = sata_srst_pmp(link);
struct ata_port *ap = link->ap;
- u32 rc;
void __iomem *port_mmio = ahci_port_base(ap);
u32 port_fbs;
@@ -463,9 +462,7 @@ static int xgene_ahci_pmp_softreset(struct ata_link *link, unsigned int *class,
port_fbs |= pmp << PORT_FBS_DEV_OFFSET;
writel(port_fbs, port_mmio + PORT_FBS);
- rc = ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
-
- return rc;
+ return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
}
/**
@@ -500,7 +497,7 @@ static int xgene_ahci_softreset(struct ata_link *link, unsigned int *class,
u32 port_fbs;
u32 port_fbs_save;
u32 retry = 1;
- u32 rc;
+ int rc;
port_fbs_save = readl(port_mmio + PORT_FBS);
@@ -534,7 +531,7 @@ softreset_retry:
/**
* xgene_ahci_handle_broken_edge_irq - Handle the broken irq.
- * @host: Host that recieved the irq
+ * @host: Host that received the irq
* @irq_masked: HOST_IRQ_STAT value
*
* For hardware with broken edge trigger latch
@@ -613,11 +610,11 @@ static irqreturn_t xgene_ahci_irq_intr(int irq, void *dev_instance)
static struct ata_port_operations xgene_ahci_v1_ops = {
.inherits = &ahci_ops,
.host_stop = xgene_ahci_host_stop,
- .hardreset = xgene_ahci_hardreset,
+ .reset.hardreset = xgene_ahci_hardreset,
+ .reset.softreset = xgene_ahci_softreset,
+ .pmp_reset.softreset = xgene_ahci_pmp_softreset,
.read_id = xgene_ahci_read_id,
.qc_issue = xgene_ahci_qc_issue,
- .softreset = xgene_ahci_softreset,
- .pmp_softreset = xgene_ahci_pmp_softreset
};
static const struct ata_port_info xgene_ahci_v1_port_info = {
@@ -630,7 +627,7 @@ static const struct ata_port_info xgene_ahci_v1_port_info = {
static struct ata_port_operations xgene_ahci_v2_ops = {
.inherits = &ahci_ops,
.host_stop = xgene_ahci_host_stop,
- .hardreset = xgene_ahci_hardreset,
+ .reset.hardreset = xgene_ahci_hardreset,
.read_id = xgene_ahci_read_id,
};
@@ -859,7 +856,7 @@ disable_resources:
static struct platform_driver xgene_ahci_driver = {
.probe = xgene_ahci_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
.driver = {
.name = DRV_NAME,
.of_match_table = xgene_ahci_of_match,
diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
index 2f57ec00ab82..e70b6c089cf1 100644
--- a/drivers/ata/ata_generic.c
+++ b/drivers/ata/ata_generic.c
@@ -209,7 +209,7 @@ static int ata_generic_init_one(struct pci_dev *dev, const struct pci_device_id
return ata_pci_bmdma_init_one(dev, ppi, &generic_sht, (void *)id, 0);
}
-static struct pci_device_id ata_generic[] = {
+static const struct pci_device_id ata_generic[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_PCTECH, PCI_DEVICE_ID_PCTECH_SAMURAI_IDE), },
{ PCI_DEVICE(PCI_VENDOR_ID_HOLTEK, PCI_DEVICE_ID_HOLTEK_6565), },
{ PCI_DEVICE(PCI_VENDOR_ID_UMC, PCI_DEVICE_ID_UMC_UM8673F), },
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 093b940bc953..495fa096dd65 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -1074,7 +1074,7 @@ static struct ata_port_operations piix_pata_ops = {
.cable_detect = ata_cable_40wire,
.set_piomode = piix_set_piomode,
.set_dmamode = piix_set_dmamode,
- .prereset = piix_pata_prereset,
+ .reset.prereset = piix_pata_prereset,
};
static struct ata_port_operations piix_vmw_ops = {
@@ -1089,6 +1089,7 @@ static struct ata_port_operations ich_pata_ops = {
};
static struct attribute *piix_sidpr_shost_attrs[] = {
+ &dev_attr_link_power_management_supported.attr,
&dev_attr_link_power_management_policy.attr,
NULL
};
@@ -1102,7 +1103,7 @@ static const struct scsi_host_template piix_sidpr_sht = {
static struct ata_port_operations piix_sidpr_sata_ops = {
.inherits = &piix_sata_ops,
- .hardreset = sata_std_hardreset,
+ .reset.hardreset = sata_std_hardreset,
.scr_read = piix_sidpr_scr_read,
.scr_write = piix_sidpr_scr_write,
.set_lpm = piix_sidpr_set_lpm,
@@ -1725,7 +1726,7 @@ static int piix_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
* message-signalled interrupts currently).
*/
if (port_flags & PIIX_FLAG_CHECKINTR)
- pci_intx(pdev, 1);
+ pcim_intx(pdev, 1);
if (piix_check_450nx_errata(pdev)) {
/* This writes into the master table but it does not
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index fdfa7b266218..c79abdfcd7a9 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -111,6 +111,7 @@ static DEVICE_ATTR(em_buffer, S_IWUSR | S_IRUGO,
static DEVICE_ATTR(em_message_supported, S_IRUGO, ahci_show_em_supported, NULL);
static struct attribute *ahci_shost_attrs[] = {
+ &dev_attr_link_power_management_supported.attr,
&dev_attr_link_power_management_policy.attr,
&dev_attr_em_message_type.attr,
&dev_attr_em_message.attr,
@@ -162,10 +163,10 @@ struct ata_port_operations ahci_ops = {
.freeze = ahci_freeze,
.thaw = ahci_thaw,
- .softreset = ahci_softreset,
- .hardreset = ahci_hardreset,
- .postreset = ahci_postreset,
- .pmp_softreset = ahci_softreset,
+ .reset.softreset = ahci_softreset,
+ .reset.hardreset = ahci_hardreset,
+ .reset.postreset = ahci_postreset,
+ .pmp_reset.softreset = ahci_softreset,
.error_handler = ahci_error_handler,
.post_internal_cmd = ahci_post_internal_cmd,
.dev_config = ahci_dev_config,
@@ -192,7 +193,7 @@ EXPORT_SYMBOL_GPL(ahci_ops);
struct ata_port_operations ahci_pmp_retry_srst_ops = {
.inherits = &ahci_ops,
- .softreset = ahci_pmp_retry_softreset,
+ .reset.softreset = ahci_pmp_retry_softreset,
};
EXPORT_SYMBOL_GPL(ahci_pmp_retry_srst_ops);
@@ -541,6 +542,7 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
hpriv->saved_port_map = port_map;
}
+ /* mask_port_map not set means that all ports are available */
if (hpriv->mask_port_map) {
dev_warn(dev, "masking port_map 0x%lx -> 0x%lx\n",
port_map,
@@ -1033,7 +1035,7 @@ static void ahci_sw_activity(struct ata_link *link)
static void ahci_sw_activity_blink(struct timer_list *t)
{
- struct ahci_em_priv *emp = from_timer(emp, t, timer);
+ struct ahci_em_priv *emp = timer_container_of(emp, t, timer);
struct ata_link *link = emp->link;
struct ata_port *ap = link->ap;
@@ -1321,6 +1323,10 @@ static void ahci_dev_config(struct ata_device *dev)
{
struct ahci_host_priv *hpriv = dev->link->ap->host->private_data;
+ if ((dev->class == ATA_DEV_ATAPI) &&
+ (hpriv->flags & AHCI_HFLAG_ATAPI_DMA_QUIRK))
+ dev->quirks |= ATA_QUIRK_ATAPI_MOD16_DMA;
+
if (hpriv->flags & AHCI_HFLAG_SECT255) {
dev->max_sectors = 255;
ata_dev_info(dev,
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index 7a8064520a35..91d44302eac9 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -49,6 +49,9 @@ int ahci_platform_enable_phys(struct ahci_host_priv *hpriv)
int rc, i;
for (i = 0; i < hpriv->nports; i++) {
+ if (ahci_ignore_port(hpriv, i))
+ continue;
+
rc = phy_init(hpriv->phys[i]);
if (rc)
goto disable_phys;
@@ -70,6 +73,9 @@ int ahci_platform_enable_phys(struct ahci_host_priv *hpriv)
disable_phys:
while (--i >= 0) {
+ if (ahci_ignore_port(hpriv, i))
+ continue;
+
phy_power_off(hpriv->phys[i]);
phy_exit(hpriv->phys[i]);
}
@@ -88,6 +94,9 @@ void ahci_platform_disable_phys(struct ahci_host_priv *hpriv)
int i;
for (i = 0; i < hpriv->nports; i++) {
+ if (ahci_ignore_port(hpriv, i))
+ continue;
+
phy_power_off(hpriv->phys[i]);
phy_exit(hpriv->phys[i]);
}
@@ -432,6 +441,20 @@ static int ahci_platform_get_firmware(struct ahci_host_priv *hpriv,
return 0;
}
+static u32 ahci_platform_find_max_port_id(struct device *dev)
+{
+ u32 max_port = 0;
+
+ for_each_child_of_node_scoped(dev->of_node, child) {
+ u32 port;
+
+ if (!of_property_read_u32(child, "reg", &port))
+ max_port = max(max_port, port);
+ }
+
+ return max_port;
+}
+
/**
* ahci_platform_get_resources - Get platform resources
* @pdev: platform device to get resources for
@@ -458,6 +481,7 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev,
struct device *dev = &pdev->dev;
struct ahci_host_priv *hpriv;
u32 mask_port_map = 0;
+ u32 max_port;
if (!devres_open_group(dev, NULL, GFP_KERNEL))
return ERR_PTR(-ENOMEM);
@@ -549,15 +573,17 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev,
goto err_out;
}
+ /* find maximum port id for allocating structures */
+ max_port = ahci_platform_find_max_port_id(dev);
/*
- * If no sub-node was found, we still need to set nports to
- * one in order to be able to use the
+ * Set nports according to maximum port id. Clamp at
+ * AHCI_MAX_PORTS, warning message for invalid port id
+ * is generated later.
+ * When DT has no sub-nodes max_port is 0, nports is 1,
+ * in order to be able to use the
* ahci_platform_[en|dis]able_[phys|regulators] functions.
*/
- if (child_nodes)
- hpriv->nports = child_nodes;
- else
- hpriv->nports = 1;
+ hpriv->nports = min(AHCI_MAX_PORTS, max_port + 1);
hpriv->phys = devm_kcalloc(dev, hpriv->nports, sizeof(*hpriv->phys), GFP_KERNEL);
if (!hpriv->phys) {
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index d36e71f475ab..f2140fc06ba0 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -86,7 +86,7 @@ static void ata_acpi_detach_device(struct ata_port *ap, struct ata_device *dev)
* @dev: ATA device ACPI event occurred (can be NULL)
* @event: ACPI event which occurred
*
- * All ACPI bay / device realted events end up in this function. If
+ * All ACPI bay / device related events end up in this function. If
* the event is port-wide @dev is NULL. If the event is specific to a
* device, @dev points to it.
*
@@ -514,15 +514,19 @@ unsigned int ata_acpi_gtm_xfermask(struct ata_device *dev,
EXPORT_SYMBOL_GPL(ata_acpi_gtm_xfermask);
/**
- * ata_acpi_cbl_80wire - Check for 80 wire cable
+ * ata_acpi_cbl_pata_type - Return PATA cable type
* @ap: Port to check
- * @gtm: GTM data to use
*
- * Return 1 if the @gtm indicates the BIOS selected an 80wire mode.
+ * Return ATA_CBL_PATA* according to the transfer mode selected by BIOS
*/
-int ata_acpi_cbl_80wire(struct ata_port *ap, const struct ata_acpi_gtm *gtm)
+int ata_acpi_cbl_pata_type(struct ata_port *ap)
{
struct ata_device *dev;
+ int ret = ATA_CBL_PATA_UNK;
+ const struct ata_acpi_gtm *gtm = ata_acpi_init_gtm(ap);
+
+ if (!gtm)
+ return ATA_CBL_PATA40;
ata_for_each_dev(dev, &ap->link, ENABLED) {
unsigned int xfer_mask, udma_mask;
@@ -530,13 +534,17 @@ int ata_acpi_cbl_80wire(struct ata_port *ap, const struct ata_acpi_gtm *gtm)
xfer_mask = ata_acpi_gtm_xfermask(dev, gtm);
ata_unpack_xfermask(xfer_mask, NULL, NULL, &udma_mask);
- if (udma_mask & ~ATA_UDMA_MASK_40C)
- return 1;
+ ret = ATA_CBL_PATA40;
+
+ if (udma_mask & ~ATA_UDMA_MASK_40C) {
+ ret = ATA_CBL_PATA80;
+ break;
+ }
}
- return 0;
+ return ret;
}
-EXPORT_SYMBOL_GPL(ata_acpi_cbl_80wire);
+EXPORT_SYMBOL_GPL(ata_acpi_cbl_pata_type);
static void ata_acpi_gtf_to_tf(struct ata_device *dev,
const struct ata_acpi_gtf *gtf,
@@ -832,7 +840,7 @@ void ata_acpi_on_resume(struct ata_port *ap)
dev->flags |= ATA_DFLAG_ACPI_PENDING;
}
} else {
- /* SATA _GTF needs to be evaulated after _SDD and
+ /* SATA _GTF needs to be evaluated after _SDD and
* there's no reason to evaluate IDE _GTF early
* without _STM. Clear cache and schedule _GTF.
*/
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index c085dd81ebe7..ff53f5f029b4 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -65,8 +65,8 @@
#include "libata-transport.h"
const struct ata_port_operations ata_base_port_ops = {
- .prereset = ata_std_prereset,
- .postreset = ata_std_postreset,
+ .reset.prereset = ata_std_prereset,
+ .reset.postreset = ata_std_postreset,
.error_handler = ata_std_error_handler,
.sched_eh = ata_std_sched_eh,
.end_eh = ata_std_end_eh,
@@ -88,6 +88,7 @@ struct ata_force_param {
unsigned int xfer_mask;
unsigned int quirk_on;
unsigned int quirk_off;
+ unsigned int pflags_on;
u16 lflags_on;
u16 lflags_off;
};
@@ -332,6 +333,35 @@ void ata_force_cbl(struct ata_port *ap)
}
/**
+ * ata_force_pflags - force port flags according to libata.force
+ * @ap: ATA port of interest
+ *
+ * Force port flags according to libata.force and whine about it.
+ *
+ * LOCKING:
+ * EH context.
+ */
+static void ata_force_pflags(struct ata_port *ap)
+{
+ int i;
+
+ for (i = ata_force_tbl_size - 1; i >= 0; i--) {
+ const struct ata_force_ent *fe = &ata_force_tbl[i];
+
+ if (fe->port != -1 && fe->port != ap->print_id)
+ continue;
+
+ /* let pflags stack */
+ if (fe->param.pflags_on) {
+ ap->pflags |= fe->param.pflags_on;
+ ata_port_notice(ap,
+ "FORCE: port flag 0x%x forced -> 0x%x\n",
+ fe->param.pflags_on, ap->pflags);
+ }
+ }
+}
+
+/**
* ata_force_link_limits - force link limits according to libata.force
* @link: ATA link of interest
*
@@ -486,6 +516,7 @@ static void ata_force_quirks(struct ata_device *dev)
}
}
#else
+static inline void ata_force_pflags(struct ata_port *ap) { }
static inline void ata_force_link_limits(struct ata_link *link) { }
static inline void ata_force_xfermask(struct ata_device *dev) { }
static inline void ata_force_quirks(struct ata_device *dev) { }
@@ -2123,14 +2154,46 @@ retry:
return err_mask;
}
+static inline void ata_clear_log_directory(struct ata_device *dev)
+{
+ memset(dev->gp_log_dir, 0, ATA_SECT_SIZE);
+}
+
+static int ata_read_log_directory(struct ata_device *dev)
+{
+ u16 version;
+
+ /* If the log page is already cached, do nothing. */
+ version = get_unaligned_le16(&dev->gp_log_dir[0]);
+ if (version == 0x0001)
+ return 0;
+
+ if (ata_read_log_page(dev, ATA_LOG_DIRECTORY, 0, dev->gp_log_dir, 1)) {
+ ata_clear_log_directory(dev);
+ return -EIO;
+ }
+
+ version = get_unaligned_le16(&dev->gp_log_dir[0]);
+ if (version != 0x0001) {
+ ata_dev_err(dev, "Invalid log directory version 0x%04x\n",
+ version);
+ ata_clear_log_directory(dev);
+ dev->quirks |= ATA_QUIRK_NO_LOG_DIR;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int ata_log_supported(struct ata_device *dev, u8 log)
{
if (dev->quirks & ATA_QUIRK_NO_LOG_DIR)
return 0;
- if (ata_read_log_page(dev, ATA_LOG_DIRECTORY, 0, dev->sector_buf, 1))
+ if (ata_read_log_directory(dev))
return 0;
- return get_unaligned_le16(&dev->sector_buf[log * 2]);
+
+ return get_unaligned_le16(&dev->gp_log_dir[log * 2]);
}
static bool ata_identify_page_supported(struct ata_device *dev, u8 page)
@@ -2243,7 +2306,7 @@ static void ata_dev_config_ncq_non_data(struct ata_device *dev)
if (!ata_log_supported(dev, ATA_LOG_NCQ_NON_DATA)) {
ata_dev_warn(dev,
- "NCQ Send/Recv Log not supported\n");
+ "NCQ Non-Data Log not supported\n");
return;
}
err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_NON_DATA,
@@ -2390,18 +2453,7 @@ static void ata_dev_config_zac(struct ata_device *dev)
dev->zac_zones_optimal_nonseq = U32_MAX;
dev->zac_zones_max_open = U32_MAX;
- /*
- * Always set the 'ZAC' flag for Host-managed devices.
- */
- if (dev->class == ATA_DEV_ZAC)
- dev->flags |= ATA_DFLAG_ZAC;
- else if (ata_id_zoned_cap(dev->id) == 0x01)
- /*
- * Check for host-aware devices.
- */
- dev->flags |= ATA_DFLAG_ZAC;
-
- if (!(dev->flags & ATA_DFLAG_ZAC))
+ if (!ata_dev_is_zac(dev))
return;
if (!ata_identify_page_supported(dev, ATA_LOG_ZONED_INFORMATION)) {
@@ -2464,7 +2516,7 @@ static void ata_dev_config_trusted(struct ata_device *dev)
dev->flags |= ATA_DFLAG_TRUSTED;
}
-void ata_dev_cleanup_cdl_resources(struct ata_device *dev)
+static void ata_dev_cleanup_cdl_resources(struct ata_device *dev)
{
kfree(dev->cdl);
dev->cdl = NULL;
@@ -2770,17 +2822,70 @@ out:
kfree(buf);
}
+/*
+ * Configure features related to link power management.
+ */
+static void ata_dev_config_lpm(struct ata_device *dev)
+{
+ struct ata_port *ap = dev->link->ap;
+ unsigned int err_mask;
+
+ if (ap->flags & ATA_FLAG_NO_LPM) {
+ /*
+ * When the port does not support LPM, we cannot support it on
+ * the device either.
+ */
+ dev->quirks |= ATA_QUIRK_NOLPM;
+ } else {
+ /*
+ * Some WD SATA-1 drives have issues with LPM, turn on NOLPM for
+ * them.
+ */
+ if ((dev->quirks & ATA_QUIRK_WD_BROKEN_LPM) &&
+ (dev->id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
+ dev->quirks |= ATA_QUIRK_NOLPM;
+
+ /* ATI specific quirk */
+ if ((dev->quirks & ATA_QUIRK_NO_LPM_ON_ATI) &&
+ ata_dev_check_adapter(dev, PCI_VENDOR_ID_ATI))
+ dev->quirks |= ATA_QUIRK_NOLPM;
+ }
+
+ if (dev->quirks & ATA_QUIRK_NOLPM &&
+ ap->target_lpm_policy != ATA_LPM_MAX_POWER) {
+ ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
+ ap->target_lpm_policy = ATA_LPM_MAX_POWER;
+ }
+
+ /*
+ * Device Initiated Power Management (DIPM) is normally disabled by
+ * default on a device. However, DIPM may have been enabled and that
+ * setting kept even after COMRESET because of the Software Settings
+ * Preservation feature. So if the port does not support DIPM and the
+ * device does, disable DIPM on the device.
+ */
+ if (ap->flags & ATA_FLAG_NO_DIPM && ata_id_has_dipm(dev->id)) {
+ err_mask = ata_dev_set_feature(dev,
+ SETFEATURES_SATA_DISABLE, SATA_DIPM);
+ if (err_mask && err_mask != AC_ERR_DEV)
+ ata_dev_err(dev, "Disable DIPM failed, Emask 0x%x\n",
+ err_mask);
+ }
+}
+
static void ata_dev_print_features(struct ata_device *dev)
{
if (!(dev->flags & ATA_DFLAG_FEATURES_MASK))
return;
ata_dev_info(dev,
- "Features:%s%s%s%s%s%s%s%s\n",
+ "Features:%s%s%s%s%s%s%s%s%s%s\n",
dev->flags & ATA_DFLAG_FUA ? " FUA" : "",
dev->flags & ATA_DFLAG_TRUSTED ? " Trust" : "",
dev->flags & ATA_DFLAG_DA ? " Dev-Attention" : "",
dev->flags & ATA_DFLAG_DEVSLP ? " Dev-Sleep" : "",
+ ata_id_has_hipm(dev->id) ? " HIPM" : "",
+ ata_id_has_dipm(dev->id) ? " DIPM" : "",
dev->flags & ATA_DFLAG_NCQ_SEND_RECV ? " NCQ-sndrcv" : "",
dev->flags & ATA_DFLAG_NCQ_PRIO ? " NCQ-prio" : "",
dev->flags & ATA_DFLAG_CDL ? " CDL" : "",
@@ -2817,6 +2922,9 @@ int ata_dev_configure(struct ata_device *dev)
return 0;
}
+ /* Clear the general purpose log directory cache. */
+ ata_clear_log_directory(dev);
+
/* Set quirks */
dev->quirks |= ata_dev_quirks(dev);
ata_force_quirks(dev);
@@ -2840,19 +2948,6 @@ int ata_dev_configure(struct ata_device *dev)
if (rc)
return rc;
- /* some WD SATA-1 drives have issues with LPM, turn on NOLPM for them */
- if ((dev->quirks & ATA_QUIRK_WD_BROKEN_LPM) &&
- (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
- dev->quirks |= ATA_QUIRK_NOLPM;
-
- if (ap->flags & ATA_FLAG_NO_LPM)
- dev->quirks |= ATA_QUIRK_NOLPM;
-
- if (dev->quirks & ATA_QUIRK_NOLPM) {
- ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
- dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER;
- }
-
/* let ACPI work its magic */
rc = ata_acpi_on_devcfg(dev);
if (rc)
@@ -2939,6 +3034,7 @@ int ata_dev_configure(struct ata_device *dev)
ata_dev_config_chs(dev);
}
+ ata_dev_config_lpm(dev);
ata_dev_config_fua(dev);
ata_dev_config_devslp(dev);
ata_dev_config_sense_reporting(dev);
@@ -3414,7 +3510,7 @@ static int ata_dev_set_mode(struct ata_device *dev)
}
/**
- * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
+ * ata_set_mode - Program timings and issue SET FEATURES - XFER
* @link: link on which timings will be programmed
* @r_failed_dev: out parameter for failed device
*
@@ -3430,7 +3526,7 @@ static int ata_dev_set_mode(struct ata_device *dev)
* 0 on success, negative errno otherwise
*/
-int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
+int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
{
struct ata_port *ap = link->ap;
struct ata_device *dev;
@@ -3511,7 +3607,7 @@ int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
*r_failed_dev = dev;
return rc;
}
-EXPORT_SYMBOL_GPL(ata_do_set_mode);
+EXPORT_SYMBOL_GPL(ata_set_mode);
/**
* ata_wait_ready - wait for link to become ready
@@ -3897,6 +3993,7 @@ static const char * const ata_quirk_names[] = {
[__ATA_QUIRK_MAX_SEC_1024] = "maxsec1024",
[__ATA_QUIRK_MAX_TRIM_128M] = "maxtrim128m",
[__ATA_QUIRK_NO_NCQ_ON_ATI] = "noncqonati",
+ [__ATA_QUIRK_NO_LPM_ON_ATI] = "nolpmonati",
[__ATA_QUIRK_NO_ID_DEV_LOG] = "noiddevlog",
[__ATA_QUIRK_NO_LOG_DIR] = "nologdir",
[__ATA_QUIRK_NO_FUA] = "nofua",
@@ -4142,13 +4239,16 @@ static const struct ata_dev_quirks_entry __ata_dev_quirks[] = {
ATA_QUIRK_ZERO_AFTER_TRIM },
{ "Samsung SSD 860*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
ATA_QUIRK_ZERO_AFTER_TRIM |
- ATA_QUIRK_NO_NCQ_ON_ATI },
+ ATA_QUIRK_NO_NCQ_ON_ATI |
+ ATA_QUIRK_NO_LPM_ON_ATI },
{ "Samsung SSD 870*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
ATA_QUIRK_ZERO_AFTER_TRIM |
- ATA_QUIRK_NO_NCQ_ON_ATI },
+ ATA_QUIRK_NO_NCQ_ON_ATI |
+ ATA_QUIRK_NO_LPM_ON_ATI },
{ "SAMSUNG*MZ7LH*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
ATA_QUIRK_ZERO_AFTER_TRIM |
- ATA_QUIRK_NO_NCQ_ON_ATI, },
+ ATA_QUIRK_NO_NCQ_ON_ATI |
+ ATA_QUIRK_NO_LPM_ON_ATI },
{ "FCCT*M500*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
ATA_QUIRK_ZERO_AFTER_TRIM },
@@ -4502,7 +4602,7 @@ static unsigned int ata_dev_init_params(struct ata_device *dev,
return AC_ERR_INVALID;
/* set up init dev params taskfile */
- ata_dev_dbg(dev, "init dev params \n");
+ ata_dev_dbg(dev, "init dev params\n");
ata_tf_init(dev, &tf);
tf.command = ATA_CMD_INIT_DEV_PARAMS;
@@ -4544,7 +4644,7 @@ int atapi_check_dma(struct ata_queued_cmd *qc)
*/
if (!(qc->dev->quirks & ATA_QUIRK_ATAPI_MOD16_DMA) &&
unlikely(qc->nbytes & 15))
- return 1;
+ return -EOPNOTSUPP;
if (ap->ops->check_atapi_dma)
return ap->ops->check_atapi_dma(qc);
@@ -5452,6 +5552,8 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
#endif
ata_sff_port_init(ap);
+ ata_force_pflags(ap);
+
return ap;
}
EXPORT_SYMBOL_GPL(ata_port_alloc);
@@ -6264,6 +6366,9 @@ EXPORT_SYMBOL_GPL(ata_platform_remove_one);
{ "no" #name, .lflags_on = (flags) }, \
{ #name, .lflags_off = (flags) }
+#define force_pflag_on(name, flags) \
+ { #name, .pflags_on = (flags) }
+
#define force_quirk_on(name, flag) \
{ #name, .quirk_on = (flag) }
@@ -6323,6 +6428,8 @@ static const struct ata_force_param force_tbl[] __initconst = {
force_lflag_on(rstonce, ATA_LFLAG_RST_ONCE),
force_lflag_onoff(dbdelay, ATA_LFLAG_NO_DEBOUNCE_DELAY),
+ force_pflag_on(external, ATA_PFLAG_EXTERNAL),
+
force_quirk_onoff(ncq, ATA_QUIRK_NONCQ),
force_quirk_onoff(ncqtrim, ATA_QUIRK_NO_NCQ_TRIM),
force_quirk_onoff(ncqati, ATA_QUIRK_NO_NCQ_ON_ATI),
@@ -6636,12 +6743,6 @@ const struct ata_port_info ata_dummy_port_info = {
};
EXPORT_SYMBOL_GPL(ata_dummy_port_info);
-void ata_print_version(const struct device *dev, const char *version)
-{
- dev_printk(KERN_DEBUG, dev, "version %s\n", version);
-}
-EXPORT_SYMBOL(ata_print_version);
-
EXPORT_TRACEPOINT_SYMBOL_GPL(ata_tf_load);
EXPORT_TRACEPOINT_SYMBOL_GPL(ata_exec_command);
EXPORT_TRACEPOINT_SYMBOL_GPL(ata_bmdma_setup);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 3b303d4ae37a..2586e77ebf45 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -153,8 +153,6 @@ ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = {
#undef CMDS
static void __ata_port_freeze(struct ata_port *ap);
-static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
- struct ata_device **r_failed_dev);
#ifdef CONFIG_PM
static void ata_eh_handle_port_suspend(struct ata_port *ap);
static void ata_eh_handle_port_resume(struct ata_port *ap);
@@ -700,7 +698,7 @@ void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap)
ata_eh_acquire(ap);
repeat:
/* kill fast drain timer */
- del_timer_sync(&ap->fastdrain_timer);
+ timer_delete_sync(&ap->fastdrain_timer);
/* process port resume request */
ata_eh_handle_port_resume(ap);
@@ -825,7 +823,7 @@ void ata_port_wait_eh(struct ata_port *ap)
retry:
spin_lock_irqsave(ap->lock, flags);
- while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) {
+ while (ata_port_eh_scheduled(ap)) {
prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
spin_unlock_irqrestore(ap->lock, flags);
schedule();
@@ -860,7 +858,7 @@ static unsigned int ata_eh_nr_in_flight(struct ata_port *ap)
void ata_eh_fastdrain_timerfn(struct timer_list *t)
{
- struct ata_port *ap = from_timer(ap, t, fastdrain_timer);
+ struct ata_port *ap = timer_container_of(ap, t, fastdrain_timer);
unsigned long flags;
unsigned int cnt;
@@ -909,7 +907,7 @@ void ata_eh_fastdrain_timerfn(struct timer_list *t)
* LOCKING:
* spin_lock_irqsave(host lock)
*/
-static void ata_eh_set_pending(struct ata_port *ap, int fastdrain)
+static void ata_eh_set_pending(struct ata_port *ap, bool fastdrain)
{
unsigned int cnt;
@@ -949,7 +947,7 @@ void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
struct ata_port *ap = qc->ap;
qc->flags |= ATA_QCFLAG_EH;
- ata_eh_set_pending(ap, 1);
+ ata_eh_set_pending(ap, true);
/* The following will fail if timeout has already expired.
* ata_scsi_error() takes care of such scmds on EH entry.
@@ -971,7 +969,7 @@ void ata_std_sched_eh(struct ata_port *ap)
if (ap->pflags & ATA_PFLAG_INITIALIZING)
return;
- ata_eh_set_pending(ap, 1);
+ ata_eh_set_pending(ap, true);
scsi_schedule_eh(ap->scsi_host);
trace_ata_std_sched_eh(ap);
@@ -1022,7 +1020,7 @@ static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
int tag, nr_aborted = 0;
/* we're gonna abort all commands, no need for fast drain */
- ata_eh_set_pending(ap, 0);
+ ata_eh_set_pending(ap, false);
/* include internal tag in iteration */
ata_qc_for_each_with_internal(ap, qc, tag) {
@@ -1542,8 +1540,15 @@ unsigned int atapi_eh_request_sense(struct ata_device *dev,
tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
tf.command = ATA_CMD_PACKET;
- /* is it pointless to prefer PIO for "safety reasons"? */
- if (ap->flags & ATA_FLAG_PIO_DMA) {
+ /*
+ * Do not use DMA if the connected device only supports PIO, even if the
+ * port prefers PIO commands via DMA.
+ *
+ * Ideally, we should call atapi_check_dma() to check if it is safe for
+ * the LLD to use DMA for REQUEST_SENSE, but we don't have a qc.
+ * Since we can't check the command, perhaps we should only use pio?
+ */
+ if ((ap->flags & ATA_FLAG_PIO_DMA) && !(dev->flags & ATA_DFLAG_PIO)) {
tf.protocol = ATAPI_PROT_DMA;
tf.feature |= ATAPI_PKT_DMA;
} else {
@@ -2066,6 +2071,188 @@ out:
ata_eh_done(link, dev, ATA_EH_GET_SUCCESS_SENSE);
}
+/*
+ * Check if a link is established. This is a relaxed version of
+ * ata_phys_link_online() which accounts for the fact that this is potentially
+ * called after changing the link power management policy, which may not be
+ * reflected immediately in the SStatus register (e.g., we may still be seeing
+ * the PHY in partial, slumber or devsleep Partial power management state.
+ * So check that:
+ * - A device is still present, that is, DET is 1h (Device presence detected
+ * but Phy communication not established) or 3h (Device presence detected and
+ * Phy communication established)
+ * - Communication is established, that is, IPM is not 0h, indicating that PHY
+ * is online or in a low power state.
+ */
+static bool ata_eh_link_established(struct ata_link *link)
+{
+ u32 sstatus;
+ u8 det, ipm;
+
+ /*
+ * For old IDE/PATA adapters that do not have a valid scr_read method,
+ * or if reading the SStatus register fails, assume that the device is
+ * present. Device probe will determine if that is really the case.
+ */
+ if (sata_scr_read(link, SCR_STATUS, &sstatus))
+ return true;
+
+ det = sstatus & 0x0f;
+ ipm = (sstatus >> 8) & 0x0f;
+
+ return (det & 0x01) && ipm;
+}
+
+/**
+ * ata_eh_link_set_lpm - configure SATA interface power management
+ * @link: link to configure
+ * @policy: the link power management policy
+ * @r_failed_dev: out parameter for failed device
+ *
+ * Enable SATA Interface power management. This will enable
+ * Device Interface Power Management (DIPM) for min_power and
+ * medium_power_with_dipm policies, and then call driver specific
+ * callbacks for enabling Host Initiated Power management.
+ *
+ * LOCKING:
+ * EH context.
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+static int ata_eh_link_set_lpm(struct ata_link *link,
+ enum ata_lpm_policy policy,
+ struct ata_device **r_failed_dev)
+{
+ struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL;
+ struct ata_eh_context *ehc = &link->eh_context;
+ struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL;
+ enum ata_lpm_policy old_policy = link->lpm_policy;
+ bool host_has_dipm = !(link->ap->flags & ATA_FLAG_NO_DIPM);
+ unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM;
+ unsigned int err_mask;
+ int rc;
+
+ /* if the link or host doesn't do LPM, noop */
+ if (!IS_ENABLED(CONFIG_SATA_HOST) ||
+ (link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm))
+ return 0;
+
+ /*
+ * This function currently assumes that it will never be supplied policy
+ * ATA_LPM_UNKNOWN.
+ */
+ if (WARN_ON_ONCE(policy == ATA_LPM_UNKNOWN))
+ return 0;
+
+ ata_link_dbg(link, "Set LPM policy: %d -> %d\n", old_policy, policy);
+
+ /*
+ * DIPM is enabled only for ATA_LPM_MIN_POWER,
+ * ATA_LPM_MIN_POWER_WITH_PARTIAL, and ATA_LPM_MED_POWER_WITH_DIPM, as
+ * some devices misbehave when the host NACKs transition to SLUMBER.
+ */
+ ata_for_each_dev(dev, link, ENABLED) {
+ bool dev_has_hipm = ata_id_has_hipm(dev->id);
+ bool dev_has_dipm = ata_id_has_dipm(dev->id);
+
+ /* find the first enabled and LPM enabled devices */
+ if (!link_dev)
+ link_dev = dev;
+
+ if (!lpm_dev &&
+ (dev_has_hipm || (dev_has_dipm && host_has_dipm)))
+ lpm_dev = dev;
+
+ hints &= ~ATA_LPM_EMPTY;
+ if (!dev_has_hipm)
+ hints &= ~ATA_LPM_HIPM;
+
+ /* disable DIPM before changing link config */
+ if (dev_has_dipm) {
+ err_mask = ata_dev_set_feature(dev,
+ SETFEATURES_SATA_DISABLE, SATA_DIPM);
+ if (err_mask && err_mask != AC_ERR_DEV) {
+ ata_dev_warn(dev,
+ "failed to disable DIPM, Emask 0x%x\n",
+ err_mask);
+ rc = -EIO;
+ goto fail;
+ }
+ }
+ }
+
+ if (ap) {
+ rc = ap->ops->set_lpm(link, policy, hints);
+ if (!rc && ap->slave_link)
+ rc = ap->ops->set_lpm(ap->slave_link, policy, hints);
+ } else
+ rc = sata_pmp_set_lpm(link, policy, hints);
+
+ /*
+ * Attribute link config failure to the first (LPM) enabled
+ * device on the link.
+ */
+ if (rc) {
+ if (rc == -EOPNOTSUPP) {
+ link->flags |= ATA_LFLAG_NO_LPM;
+ return 0;
+ }
+ dev = lpm_dev ? lpm_dev : link_dev;
+ goto fail;
+ }
+
+ /*
+ * Low level driver acked the transition. Issue DIPM command
+ * with the new policy set.
+ */
+ link->lpm_policy = policy;
+ if (ap && ap->slave_link)
+ ap->slave_link->lpm_policy = policy;
+
+ /*
+ * Host config updated, enable DIPM if transitioning to
+ * ATA_LPM_MIN_POWER, ATA_LPM_MIN_POWER_WITH_PARTIAL, or
+ * ATA_LPM_MED_POWER_WITH_DIPM.
+ */
+ ata_for_each_dev(dev, link, ENABLED) {
+ bool dev_has_dipm = ata_id_has_dipm(dev->id);
+
+ if (policy >= ATA_LPM_MED_POWER_WITH_DIPM && host_has_dipm &&
+ dev_has_dipm) {
+ err_mask = ata_dev_set_feature(dev,
+ SETFEATURES_SATA_ENABLE, SATA_DIPM);
+ if (err_mask && err_mask != AC_ERR_DEV) {
+ ata_dev_warn(dev,
+ "failed to enable DIPM, Emask 0x%x\n",
+ err_mask);
+ rc = -EIO;
+ goto fail;
+ }
+ }
+ }
+
+ link->last_lpm_change = jiffies;
+ link->flags |= ATA_LFLAG_CHANGED;
+
+ return 0;
+
+fail:
+ /* restore the old policy */
+ link->lpm_policy = old_policy;
+ if (ap && ap->slave_link)
+ ap->slave_link->lpm_policy = old_policy;
+
+ /* if no device or only one more chance is left, disable LPM */
+ if (!dev || ehc->tries[dev->devno] <= 2) {
+ ata_link_warn(link, "disabling LPM on the link\n");
+ link->flags |= ATA_LFLAG_NO_LPM;
+ }
+ if (r_failed_dev)
+ *r_failed_dev = dev;
+ return rc;
+}
+
/**
* ata_eh_link_autopsy - analyze error and determine recovery action
* @link: host link to perform autopsy on
@@ -2599,25 +2786,28 @@ static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset,
return reset(link, classes, deadline);
}
-static int ata_eh_followup_srst_needed(struct ata_link *link, int rc)
+static bool ata_eh_followup_srst_needed(struct ata_link *link, int rc)
{
if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link))
- return 0;
+ return false;
if (rc == -EAGAIN)
- return 1;
+ return true;
if (sata_pmp_supported(link->ap) && ata_is_host_link(link))
- return 1;
- return 0;
+ return true;
+ return false;
}
int ata_eh_reset(struct ata_link *link, int classify,
- ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
- ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
+ struct ata_reset_operations *reset_ops)
{
struct ata_port *ap = link->ap;
struct ata_link *slave = ap->slave_link;
struct ata_eh_context *ehc = &link->eh_context;
struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL;
+ ata_reset_fn_t hardreset = reset_ops->hardreset;
+ ata_reset_fn_t softreset = reset_ops->softreset;
+ ata_prereset_fn_t prereset = reset_ops->prereset;
+ ata_postreset_fn_t postreset = reset_ops->postreset;
unsigned int *classes = ehc->classes;
unsigned int lflags = link->flags;
int verbose = !(ehc->i.flags & ATA_EHI_QUIET);
@@ -3116,13 +3306,13 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link,
* to ap->target_lpm_policy after revalidation is done.
*/
if (link->lpm_policy > ATA_LPM_MAX_POWER) {
- rc = ata_eh_set_lpm(link, ATA_LPM_MAX_POWER,
- r_failed_dev);
+ rc = ata_eh_link_set_lpm(link, ATA_LPM_MAX_POWER,
+ r_failed_dev);
if (rc)
goto err;
}
- if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
+ if (!ata_eh_link_established(ata_dev_phys_link(dev))) {
rc = -EIO;
goto err;
}
@@ -3226,12 +3416,12 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link,
}
/**
- * ata_set_mode - Program timings and issue SET FEATURES - XFER
+ * ata_eh_set_mode - Program timings and issue SET FEATURES - XFER
* @link: link on which timings will be programmed
* @r_failed_dev: out parameter for failed device
*
* Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
- * ata_set_mode() fails, pointer to the failing device is
+ * ata_eh_set_mode() fails, pointer to the failing device is
* returned in @r_failed_dev.
*
* LOCKING:
@@ -3240,7 +3430,8 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link,
* RETURNS:
* 0 on success, negative errno otherwise
*/
-int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
+static int ata_eh_set_mode(struct ata_link *link,
+ struct ata_device **r_failed_dev)
{
struct ata_port *ap = link->ap;
struct ata_device *dev;
@@ -3261,7 +3452,7 @@ int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
if (ap->ops->set_mode)
rc = ap->ops->set_mode(link, r_failed_dev);
else
- rc = ata_do_set_mode(link, r_failed_dev);
+ rc = ata_set_mode(link, r_failed_dev);
/* if transfer mode has changed, set DUBIOUS_XFER on device */
ata_for_each_dev(dev, link, ENABLED) {
@@ -3401,140 +3592,6 @@ static int ata_eh_maybe_retry_flush(struct ata_device *dev)
return rc;
}
-/**
- * ata_eh_set_lpm - configure SATA interface power management
- * @link: link to configure power management
- * @policy: the link power management policy
- * @r_failed_dev: out parameter for failed device
- *
- * Enable SATA Interface power management. This will enable
- * Device Interface Power Management (DIPM) for min_power and
- * medium_power_with_dipm policies, and then call driver specific
- * callbacks for enabling Host Initiated Power management.
- *
- * LOCKING:
- * EH context.
- *
- * RETURNS:
- * 0 on success, -errno on failure.
- */
-static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
- struct ata_device **r_failed_dev)
-{
- struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL;
- struct ata_eh_context *ehc = &link->eh_context;
- struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL;
- enum ata_lpm_policy old_policy = link->lpm_policy;
- bool no_dipm = link->ap->flags & ATA_FLAG_NO_DIPM;
- unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM;
- unsigned int err_mask;
- int rc;
-
- /* if the link or host doesn't do LPM, noop */
- if (!IS_ENABLED(CONFIG_SATA_HOST) ||
- (link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm))
- return 0;
-
- /*
- * DIPM is enabled only for MIN_POWER as some devices
- * misbehave when the host NACKs transition to SLUMBER. Order
- * device and link configurations such that the host always
- * allows DIPM requests.
- */
- ata_for_each_dev(dev, link, ENABLED) {
- bool hipm = ata_id_has_hipm(dev->id);
- bool dipm = ata_id_has_dipm(dev->id) && !no_dipm;
-
- /* find the first enabled and LPM enabled devices */
- if (!link_dev)
- link_dev = dev;
-
- if (!lpm_dev && (hipm || dipm))
- lpm_dev = dev;
-
- hints &= ~ATA_LPM_EMPTY;
- if (!hipm)
- hints &= ~ATA_LPM_HIPM;
-
- /* disable DIPM before changing link config */
- if (policy < ATA_LPM_MED_POWER_WITH_DIPM && dipm) {
- err_mask = ata_dev_set_feature(dev,
- SETFEATURES_SATA_DISABLE, SATA_DIPM);
- if (err_mask && err_mask != AC_ERR_DEV) {
- ata_dev_warn(dev,
- "failed to disable DIPM, Emask 0x%x\n",
- err_mask);
- rc = -EIO;
- goto fail;
- }
- }
- }
-
- if (ap) {
- rc = ap->ops->set_lpm(link, policy, hints);
- if (!rc && ap->slave_link)
- rc = ap->ops->set_lpm(ap->slave_link, policy, hints);
- } else
- rc = sata_pmp_set_lpm(link, policy, hints);
-
- /*
- * Attribute link config failure to the first (LPM) enabled
- * device on the link.
- */
- if (rc) {
- if (rc == -EOPNOTSUPP) {
- link->flags |= ATA_LFLAG_NO_LPM;
- return 0;
- }
- dev = lpm_dev ? lpm_dev : link_dev;
- goto fail;
- }
-
- /*
- * Low level driver acked the transition. Issue DIPM command
- * with the new policy set.
- */
- link->lpm_policy = policy;
- if (ap && ap->slave_link)
- ap->slave_link->lpm_policy = policy;
-
- /* host config updated, enable DIPM if transitioning to MIN_POWER */
- ata_for_each_dev(dev, link, ENABLED) {
- if (policy >= ATA_LPM_MED_POWER_WITH_DIPM && !no_dipm &&
- ata_id_has_dipm(dev->id)) {
- err_mask = ata_dev_set_feature(dev,
- SETFEATURES_SATA_ENABLE, SATA_DIPM);
- if (err_mask && err_mask != AC_ERR_DEV) {
- ata_dev_warn(dev,
- "failed to enable DIPM, Emask 0x%x\n",
- err_mask);
- rc = -EIO;
- goto fail;
- }
- }
- }
-
- link->last_lpm_change = jiffies;
- link->flags |= ATA_LFLAG_CHANGED;
-
- return 0;
-
-fail:
- /* restore the old policy */
- link->lpm_policy = old_policy;
- if (ap && ap->slave_link)
- ap->slave_link->lpm_policy = old_policy;
-
- /* if no device or only one more chance is left, disable LPM */
- if (!dev || ehc->tries[dev->devno] <= 2) {
- ata_link_warn(link, "disabling LPM on the link\n");
- link->flags |= ATA_LFLAG_NO_LPM;
- }
- if (r_failed_dev)
- *r_failed_dev = dev;
- return rc;
-}
-
int ata_link_nr_enabled(struct ata_link *link)
{
struct ata_device *dev;
@@ -3707,10 +3764,7 @@ static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
/**
* ata_eh_recover - recover host port after error
* @ap: host port to recover
- * @prereset: prereset method (can be NULL)
- * @softreset: softreset method (can be NULL)
- * @hardreset: hardreset method (can be NULL)
- * @postreset: postreset method (can be NULL)
+ * @reset_ops: The set of reset operations to use
* @r_failed_link: out parameter for failed link
*
* This is the alpha and omega, eum and yang, heart and soul of
@@ -3726,9 +3780,7 @@ static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
* RETURNS:
* 0 on success, -errno on failure.
*/
-int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
- ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
- ata_postreset_fn_t postreset,
+int ata_eh_recover(struct ata_port *ap, struct ata_reset_operations *reset_ops,
struct ata_link **r_failed_link)
{
struct ata_link *link;
@@ -3796,8 +3848,7 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
if (!(ehc->i.action & ATA_EH_RESET))
continue;
- rc = ata_eh_reset(link, ata_link_nr_vacant(link),
- prereset, softreset, hardreset, postreset);
+ rc = ata_eh_reset(link, ata_link_nr_vacant(link), reset_ops);
if (rc) {
ata_link_err(link, "reset failed, giving up\n");
goto out;
@@ -3878,7 +3929,7 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
/* configure transfer mode if necessary */
if (ehc->i.flags & ATA_EHI_SETMODE) {
- rc = ata_set_mode(link, &dev);
+ rc = ata_eh_set_mode(link, &dev);
if (rc)
goto rest_fail;
ehc->i.flags &= ~ATA_EHI_SETMODE;
@@ -3923,7 +3974,8 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
config_lpm:
/* configure link power saving */
if (link->lpm_policy != ap->target_lpm_policy) {
- rc = ata_eh_set_lpm(link, ap->target_lpm_policy, &dev);
+ rc = ata_eh_link_set_lpm(link, ap->target_lpm_policy,
+ &dev);
if (rc)
goto rest_fail;
}
@@ -4017,59 +4069,39 @@ void ata_eh_finish(struct ata_port *ap)
}
/**
- * ata_do_eh - do standard error handling
+ * ata_std_error_handler - standard error handler
* @ap: host port to handle error for
*
- * @prereset: prereset method (can be NULL)
- * @softreset: softreset method (can be NULL)
- * @hardreset: hardreset method (can be NULL)
- * @postreset: postreset method (can be NULL)
- *
* Perform standard error handling sequence.
*
* LOCKING:
* Kernel thread context (may sleep).
*/
-void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
- ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
- ata_postreset_fn_t postreset)
+void ata_std_error_handler(struct ata_port *ap)
{
- struct ata_device *dev;
+ struct ata_reset_operations *reset_ops = &ap->ops->reset;
+ struct ata_link *link = &ap->link;
int rc;
+ /* Ignore built-in hardresets if SCR access is not available */
+ if ((reset_ops->hardreset == sata_std_hardreset ||
+ reset_ops->hardreset == sata_sff_hardreset) &&
+ !sata_scr_valid(link))
+ link->flags |= ATA_LFLAG_NO_HRST;
+
ata_eh_autopsy(ap);
ata_eh_report(ap);
- rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset,
- NULL);
+ rc = ata_eh_recover(ap, reset_ops, NULL);
if (rc) {
- ata_for_each_dev(dev, &ap->link, ALL)
+ struct ata_device *dev;
+
+ ata_for_each_dev(dev, link, ALL)
ata_dev_disable(dev);
}
ata_eh_finish(ap);
}
-
-/**
- * ata_std_error_handler - standard error handler
- * @ap: host port to handle error for
- *
- * Standard error handler
- *
- * LOCKING:
- * Kernel thread context (may sleep).
- */
-void ata_std_error_handler(struct ata_port *ap)
-{
- struct ata_port_operations *ops = ap->ops;
- ata_reset_fn_t hardreset = ops->hardreset;
-
- /* ignore built-in hardreset if SCR access is not available */
- if (hardreset == sata_std_hardreset && !sata_scr_valid(&ap->link))
- hardreset = NULL;
-
- ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset);
-}
EXPORT_SYMBOL_GPL(ata_std_error_handler);
#ifdef CONFIG_PM
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
index d5d189328ae6..57023324a56f 100644
--- a/drivers/ata/libata-pmp.c
+++ b/drivers/ata/libata-pmp.c
@@ -15,9 +15,9 @@
const struct ata_port_operations sata_pmp_port_ops = {
.inherits = &sata_port_ops,
- .pmp_prereset = ata_std_prereset,
- .pmp_hardreset = sata_std_hardreset,
- .pmp_postreset = ata_std_postreset,
+ .pmp_reset.prereset = ata_std_prereset,
+ .pmp_reset.hardreset = sata_std_hardreset,
+ .pmp_reset.postreset = ata_std_postreset,
.error_handler = sata_pmp_error_handler,
};
@@ -727,10 +727,7 @@ static int sata_pmp_revalidate_quick(struct ata_device *dev)
/**
* sata_pmp_eh_recover_pmp - recover PMP
* @ap: ATA port PMP is attached to
- * @prereset: prereset method (can be NULL)
- * @softreset: softreset method
- * @hardreset: hardreset method
- * @postreset: postreset method (can be NULL)
+ * @reset_ops: The set of reset operations to use
*
* Recover PMP attached to @ap. Recovery procedure is somewhat
* similar to that of ata_eh_recover() except that reset should
@@ -744,8 +741,7 @@ static int sata_pmp_revalidate_quick(struct ata_device *dev)
* 0 on success, -errno on failure.
*/
static int sata_pmp_eh_recover_pmp(struct ata_port *ap,
- ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
- ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
+ struct ata_reset_operations *reset_ops)
{
struct ata_link *link = &ap->link;
struct ata_eh_context *ehc = &link->eh_context;
@@ -767,8 +763,7 @@ static int sata_pmp_eh_recover_pmp(struct ata_port *ap,
struct ata_link *tlink;
/* reset */
- rc = ata_eh_reset(link, 0, prereset, softreset, hardreset,
- postreset);
+ rc = ata_eh_reset(link, 0, reset_ops);
if (rc) {
ata_link_err(link, "failed to reset PMP, giving up\n");
goto fail;
@@ -932,8 +927,7 @@ static int sata_pmp_eh_recover(struct ata_port *ap)
retry:
/* PMP attached? */
if (!sata_pmp_attached(ap)) {
- rc = ata_eh_recover(ap, ops->prereset, ops->softreset,
- ops->hardreset, ops->postreset, NULL);
+ rc = ata_eh_recover(ap, &ops->reset, NULL);
if (rc) {
ata_for_each_dev(dev, &ap->link, ALL)
ata_dev_disable(dev);
@@ -951,8 +945,7 @@ static int sata_pmp_eh_recover(struct ata_port *ap)
}
/* recover pmp */
- rc = sata_pmp_eh_recover_pmp(ap, ops->prereset, ops->softreset,
- ops->hardreset, ops->postreset);
+ rc = sata_pmp_eh_recover_pmp(ap, &ops->reset);
if (rc)
goto pmp_fail;
@@ -978,8 +971,7 @@ static int sata_pmp_eh_recover(struct ata_port *ap)
goto pmp_fail;
/* recover links */
- rc = ata_eh_recover(ap, ops->pmp_prereset, ops->pmp_softreset,
- ops->pmp_hardreset, ops->pmp_postreset, &link);
+ rc = ata_eh_recover(ap, &ops->pmp_reset, &link);
if (rc)
goto link_fail;
diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c
index 9c76fb1ad2ec..b2817a2995d6 100644
--- a/drivers/ata/libata-sata.c
+++ b/drivers/ata/libata-sata.c
@@ -900,14 +900,52 @@ static const char *ata_lpm_policy_names[] = {
[ATA_LPM_MIN_POWER] = "min_power",
};
+/*
+ * Check if a port supports link power management.
+ * Must be called with the port locked.
+ */
+static bool ata_scsi_lpm_supported(struct ata_port *ap)
+{
+ struct ata_link *link;
+ struct ata_device *dev;
+
+ if (ap->flags & ATA_FLAG_NO_LPM)
+ return false;
+
+ ata_for_each_link(link, ap, EDGE) {
+ ata_for_each_dev(dev, &ap->link, ENABLED) {
+ if (dev->quirks & ATA_QUIRK_NOLPM)
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static ssize_t ata_scsi_lpm_supported_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ata_port *ap = ata_shost_to_port(shost);
+ unsigned long flags;
+ bool supported;
+
+ spin_lock_irqsave(ap->lock, flags);
+ supported = ata_scsi_lpm_supported(ap);
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ return sysfs_emit(buf, "%d\n", supported);
+}
+DEVICE_ATTR(link_power_management_supported, S_IRUGO,
+ ata_scsi_lpm_supported_show, NULL);
+EXPORT_SYMBOL_GPL(dev_attr_link_power_management_supported);
+
static ssize_t ata_scsi_lpm_store(struct device *device,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct Scsi_Host *shost = class_to_shost(device);
struct ata_port *ap = ata_shost_to_port(shost);
- struct ata_link *link;
- struct ata_device *dev;
enum ata_lpm_policy policy;
unsigned long flags;
@@ -924,13 +962,9 @@ static ssize_t ata_scsi_lpm_store(struct device *device,
spin_lock_irqsave(ap->lock, flags);
- ata_for_each_link(link, ap, EDGE) {
- ata_for_each_dev(dev, &ap->link, ENABLED) {
- if (dev->quirks & ATA_QUIRK_NOLPM) {
- count = -EOPNOTSUPP;
- goto out_unlock;
- }
- }
+ if (!ata_scsi_lpm_supported(ap)) {
+ count = -EOPNOTSUPP;
+ goto out_unlock;
}
ap->target_lpm_policy = policy;
@@ -1313,7 +1347,7 @@ int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth)
EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
/**
- * ata_sas_device_configure - Default device_configure routine for libata
+ * ata_sas_sdev_configure - Default sdev_configure routine for libata
* devices
* @sdev: SCSI device to configure
* @lim: queue limits
@@ -1323,14 +1357,14 @@ EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
* Zero.
*/
-int ata_sas_device_configure(struct scsi_device *sdev, struct queue_limits *lim,
- struct ata_port *ap)
+int ata_sas_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim,
+ struct ata_port *ap)
{
ata_scsi_sdev_config(sdev);
return ata_scsi_dev_config(sdev, lim, ap->link.device);
}
-EXPORT_SYMBOL_GPL(ata_sas_device_configure);
+EXPORT_SYMBOL_GPL(ata_sas_sdev_configure);
/**
* ata_sas_queuecmd - Issue SCSI cdb to libata-managed device
@@ -1509,7 +1543,10 @@ int ata_eh_get_ncq_success_sense(struct ata_link *link)
struct ata_queued_cmd *qc;
unsigned int err_mask, tag;
u8 *sense, sk = 0, asc = 0, ascq = 0;
- u64 sense_valid, val;
+ u16 extended_sense;
+ bool aux_icc_valid;
+ u32 sense_valid;
+ u64 val;
int ret = 0;
err_mask = ata_read_log_page(dev, ATA_LOG_SENSE_NCQ, 0, buf, 2);
@@ -1527,8 +1564,9 @@ int ata_eh_get_ncq_success_sense(struct ata_link *link)
return -EIO;
}
- sense_valid = (u64)buf[8] | ((u64)buf[9] << 8) |
- ((u64)buf[10] << 16) | ((u64)buf[11] << 24);
+ sense_valid = get_unaligned_le32(&buf[8]);
+ extended_sense = get_unaligned_le16(&buf[14]);
+ aux_icc_valid = extended_sense & BIT(15);
ata_qc_for_each_raw(ap, qc, tag) {
if (!(qc->flags & ATA_QCFLAG_EH) ||
@@ -1541,7 +1579,7 @@ int ata_eh_get_ncq_success_sense(struct ata_link *link)
* If the command does not have any sense data, clear ATA_SENSE.
* Keep ATA_QCFLAG_EH_SUCCESS_CMD so that command is finished.
*/
- if (!(sense_valid & (1ULL << tag))) {
+ if (!(sense_valid & BIT(tag))) {
qc->result_tf.status &= ~ATA_SENSE;
continue;
}
@@ -1556,6 +1594,17 @@ int ata_eh_get_ncq_success_sense(struct ata_link *link)
continue;
}
+ qc->result_tf.nsect = sense[6];
+ qc->result_tf.hob_nsect = sense[7];
+ qc->result_tf.lbal = sense[8];
+ qc->result_tf.lbam = sense[9];
+ qc->result_tf.lbah = sense[10];
+ qc->result_tf.hob_lbal = sense[11];
+ qc->result_tf.hob_lbam = sense[12];
+ qc->result_tf.hob_lbah = sense[13];
+ if (aux_icc_valid)
+ qc->result_tf.auxiliary = get_unaligned_le32(&sense[16]);
+
/* Set sense without also setting scsicmd->result */
scsi_build_sense_buffer(dev->flags & ATA_DFLAG_D_SENSE,
qc->scsicmd->sense_buffer, sk,
@@ -1619,7 +1668,7 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
return;
}
- if (!(link->sactive & (1 << tag))) {
+ if (!(link->sactive & BIT(tag))) {
ata_link_err(link, "log page 10h reported inactive tag %d\n",
tag);
return;
@@ -1644,8 +1693,6 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
if (ata_scsi_sense_is_valid(sense_key, asc, ascq)) {
ata_scsi_set_sense(dev, qc->scsicmd, sense_key, asc,
ascq);
- ata_scsi_set_sense_information(dev, qc->scsicmd,
- &qc->result_tf);
qc->flags |= ATA_QCFLAG_SENSE_VALID;
}
}
@@ -1686,6 +1733,6 @@ const struct ata_port_operations sata_port_ops = {
.inherits = &ata_base_port_ops,
.qc_defer = ata_std_qc_defer,
- .hardreset = sata_std_hardreset,
+ .reset.hardreset = sata_std_hardreset,
};
EXPORT_SYMBOL_GPL(sata_port_ops);
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index f915e3df57a9..b43a3196e2be 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -216,17 +216,21 @@ void ata_scsi_set_sense(struct ata_device *dev, struct scsi_cmnd *cmd,
scsi_build_sense(cmd, d_sense, sk, asc, ascq);
}
-void ata_scsi_set_sense_information(struct ata_device *dev,
- struct scsi_cmnd *cmd,
- const struct ata_taskfile *tf)
+static void ata_scsi_set_sense_information(struct ata_queued_cmd *qc)
{
u64 information;
- information = ata_tf_read_block(tf, dev);
+ if (!(qc->flags & ATA_QCFLAG_RTF_FILLED)) {
+ ata_dev_dbg(qc->dev,
+ "missing result TF: can't set INFORMATION sense field\n");
+ return;
+ }
+
+ information = ata_tf_read_block(&qc->result_tf, qc->dev);
if (information == U64_MAX)
return;
- scsi_set_sense_information(cmd->sense_buffer,
+ scsi_set_sense_information(qc->scsicmd->sense_buffer,
SCSI_SENSE_BUFFERSIZE, information);
}
@@ -347,7 +351,7 @@ EXPORT_SYMBOL_GPL(ata_common_sdev_groups);
/**
* ata_std_bios_param - generic bios head/sector/cylinder calculator used by sd.
* @sdev: SCSI device for which BIOS geometry is to be determined
- * @bdev: block device associated with @sdev
+ * @unused: gendisk associated with @sdev
* @capacity: capacity of SCSI device
* @geom: location to which geometry will be output
*
@@ -362,7 +366,7 @@ EXPORT_SYMBOL_GPL(ata_common_sdev_groups);
* RETURNS:
* Zero.
*/
-int ata_std_bios_param(struct scsi_device *sdev, struct block_device *bdev,
+int ata_std_bios_param(struct scsi_device *sdev, struct gendisk *unused,
sector_t capacity, int geom[])
{
geom[0] = 255;
@@ -855,18 +859,14 @@ static void ata_to_sense_error(u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc,
{0xFF, 0xFF, 0xFF, 0xFF}, // END mark
};
static const unsigned char stat_table[][4] = {
- /* Must be first because BUSY means no other bits valid */
- {0x80, ABORTED_COMMAND, 0x47, 0x00},
- // Busy, fake parity for now
- {0x40, ILLEGAL_REQUEST, 0x21, 0x04},
- // Device ready, unaligned write command
- {0x20, HARDWARE_ERROR, 0x44, 0x00},
- // Device fault, internal target failure
- {0x08, ABORTED_COMMAND, 0x47, 0x00},
- // Timed out in xfer, fake parity for now
- {0x04, RECOVERED_ERROR, 0x11, 0x00},
- // Recovered ECC error Medium error, recovered
- {0xFF, 0xFF, 0xFF, 0xFF}, // END mark
+ /* Busy: must be first because BUSY means no other bits valid */
+ { ATA_BUSY, ABORTED_COMMAND, 0x00, 0x00 },
+ /* Device fault: INTERNAL TARGET FAILURE */
+ { ATA_DF, HARDWARE_ERROR, 0x44, 0x00 },
+ /* Corrected data error */
+ { ATA_CORR, RECOVERED_ERROR, 0x00, 0x00 },
+
+ { 0xFF, 0xFF, 0xFF, 0xFF }, /* END mark */
};
/*
@@ -938,6 +938,8 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
if (!(qc->flags & ATA_QCFLAG_RTF_FILLED)) {
ata_dev_dbg(dev,
"missing result TF: can't generate ATA PT sense data\n");
+ if (qc->err_mask)
+ ata_scsi_set_sense(dev, cmd, ABORTED_COMMAND, 0, 0);
return;
}
@@ -971,8 +973,7 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
* ata_gen_ata_sense - generate a SCSI fixed sense block
* @qc: Command that we are erroring out
*
- * Generate sense block for a failed ATA command @qc. Descriptor
- * format is used to accommodate LBA48 block address.
+ * Generate sense block for a failed ATA command @qc.
*
* LOCKING:
* None.
@@ -982,8 +983,6 @@ static void ata_gen_ata_sense(struct ata_queued_cmd *qc)
struct ata_device *dev = qc->dev;
struct scsi_cmnd *cmd = qc->scsicmd;
struct ata_taskfile *tf = &qc->result_tf;
- unsigned char *sb = cmd->sense_buffer;
- u64 block;
u8 sense_key, asc, ascq;
if (ata_dev_disabled(dev)) {
@@ -995,8 +994,8 @@ static void ata_gen_ata_sense(struct ata_queued_cmd *qc)
if (!(qc->flags & ATA_QCFLAG_RTF_FILLED)) {
ata_dev_dbg(dev,
- "missing result TF: can't generate sense data\n");
- return;
+ "Missing result TF: reporting aborted command\n");
+ goto aborted;
}
/* Use ata_to_sense_error() to map status register bits
@@ -1007,19 +1006,15 @@ static void ata_gen_ata_sense(struct ata_queued_cmd *qc)
ata_to_sense_error(tf->status, tf->error,
&sense_key, &asc, &ascq);
ata_scsi_set_sense(dev, cmd, sense_key, asc, ascq);
- } else {
- /* Could not decode error */
- ata_dev_warn(dev, "could not decode error status 0x%x err_mask 0x%x\n",
- tf->status, qc->err_mask);
- ata_scsi_set_sense(dev, cmd, ABORTED_COMMAND, 0, 0);
return;
}
- block = ata_tf_read_block(&qc->result_tf, dev);
- if (block == U64_MAX)
- return;
-
- scsi_set_sense_information(sb, SCSI_SENSE_BUFFERSIZE, block);
+ /* Could not decode error */
+ ata_dev_warn(dev,
+ "Could not decode error 0x%x, status 0x%x (err_mask=0x%x)\n",
+ tf->error, tf->status, qc->err_mask);
+aborted:
+ ata_scsi_set_sense(dev, cmd, ABORTED_COMMAND, 0, 0);
}
void ata_scsi_sdev_config(struct scsi_device *sdev)
@@ -1133,7 +1128,7 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct queue_limits *lim,
}
/**
- * ata_scsi_slave_alloc - Early setup of SCSI device
+ * ata_scsi_sdev_init - Early setup of SCSI device
* @sdev: SCSI device to examine
*
* This is called from scsi_alloc_sdev() when the scsi device
@@ -1143,7 +1138,7 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct queue_limits *lim,
* Defined by SCSI layer. We don't really care.
*/
-int ata_scsi_slave_alloc(struct scsi_device *sdev)
+int ata_scsi_sdev_init(struct scsi_device *sdev)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct device_link *link;
@@ -1166,10 +1161,10 @@ int ata_scsi_slave_alloc(struct scsi_device *sdev)
return 0;
}
-EXPORT_SYMBOL_GPL(ata_scsi_slave_alloc);
+EXPORT_SYMBOL_GPL(ata_scsi_sdev_init);
/**
- * ata_scsi_device_configure - Set SCSI device attributes
+ * ata_scsi_sdev_configure - Set SCSI device attributes
* @sdev: SCSI device to examine
* @lim: queue limits
*
@@ -1181,8 +1176,7 @@ EXPORT_SYMBOL_GPL(ata_scsi_slave_alloc);
* Defined by SCSI layer. We don't really care.
*/
-int ata_scsi_device_configure(struct scsi_device *sdev,
- struct queue_limits *lim)
+int ata_scsi_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct ata_device *dev = __ata_scsi_find_dev(ap, sdev);
@@ -1192,10 +1186,10 @@ int ata_scsi_device_configure(struct scsi_device *sdev,
return 0;
}
-EXPORT_SYMBOL_GPL(ata_scsi_device_configure);
+EXPORT_SYMBOL_GPL(ata_scsi_sdev_configure);
/**
- * ata_scsi_slave_destroy - SCSI device is about to be destroyed
+ * ata_scsi_sdev_destroy - SCSI device is about to be destroyed
* @sdev: SCSI device to be destroyed
*
* @sdev is about to be destroyed for hot/warm unplugging. If
@@ -1208,7 +1202,7 @@ EXPORT_SYMBOL_GPL(ata_scsi_device_configure);
* LOCKING:
* Defined by SCSI layer. We don't really care.
*/
-void ata_scsi_slave_destroy(struct scsi_device *sdev)
+void ata_scsi_sdev_destroy(struct scsi_device *sdev)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
unsigned long flags;
@@ -1228,7 +1222,7 @@ void ata_scsi_slave_destroy(struct scsi_device *sdev)
kfree(sdev->dma_drain_buf);
}
-EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
+EXPORT_SYMBOL_GPL(ata_scsi_sdev_destroy);
/**
* ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command
@@ -1334,17 +1328,8 @@ static unsigned int ata_scsi_flush_xlat(struct ata_queued_cmd *qc)
*/
static void scsi_6_lba_len(const u8 *cdb, u64 *plba, u32 *plen)
{
- u64 lba = 0;
- u32 len;
-
- lba |= ((u64)(cdb[1] & 0x1f)) << 16;
- lba |= ((u64)cdb[2]) << 8;
- lba |= ((u64)cdb[3]);
-
- len = cdb[4];
-
- *plba = lba;
- *plen = len;
+ *plba = get_unaligned_be24(&cdb[1]) & 0x1fffff;
+ *plen = cdb[4];
}
/**
@@ -1689,8 +1674,10 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
ata_scsi_set_passthru_sense_fields(qc);
if (is_ck_cond_request)
set_status_byte(qc->scsicmd, SAM_STAT_CHECK_CONDITION);
- } else if (is_error && !have_sense) {
- ata_gen_ata_sense(qc);
+ } else if (is_error) {
+ if (!have_sense)
+ ata_gen_ata_sense(qc);
+ ata_scsi_set_sense_information(qc);
}
ata_qc_done(qc);
@@ -1781,15 +1768,10 @@ defer:
return SCSI_MLQUEUE_HOST_BUSY;
}
-struct ata_scsi_args {
- struct ata_device *dev;
- u16 *id;
- struct scsi_cmnd *cmd;
-};
-
/**
* ata_scsi_rbuf_fill - wrapper for SCSI command simulators
- * @args: device IDENTIFY data / SCSI command of interest.
+ * @dev: Target device.
+ * @cmd: SCSI command of interest.
* @actor: Callback hook for desired SCSI command simulator
*
* Takes care of the hard work of simulating a SCSI command...
@@ -1802,30 +1784,32 @@ struct ata_scsi_args {
* LOCKING:
* spin_lock_irqsave(host lock)
*/
-static void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
- unsigned int (*actor)(struct ata_scsi_args *args, u8 *rbuf))
+static void ata_scsi_rbuf_fill(struct ata_device *dev, struct scsi_cmnd *cmd,
+ unsigned int (*actor)(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf))
{
- unsigned int rc;
- struct scsi_cmnd *cmd = args->cmd;
unsigned long flags;
+ unsigned int len;
spin_lock_irqsave(&ata_scsi_rbuf_lock, flags);
memset(ata_scsi_rbuf, 0, ATA_SCSI_RBUF_SIZE);
- rc = actor(args, ata_scsi_rbuf);
- if (rc == 0)
+ len = actor(dev, cmd, ata_scsi_rbuf);
+ if (len) {
sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd),
ata_scsi_rbuf, ATA_SCSI_RBUF_SIZE);
+ cmd->result = SAM_STAT_GOOD;
+ if (scsi_bufflen(cmd) > len)
+ scsi_set_resid(cmd, scsi_bufflen(cmd) - len);
+ }
spin_unlock_irqrestore(&ata_scsi_rbuf_lock, flags);
-
- if (rc == 0)
- cmd->result = SAM_STAT_GOOD;
}
/**
- * ata_scsiop_inq_std - Simulate INQUIRY command
- * @args: device IDENTIFY data / SCSI command of interest.
+ * ata_scsiop_inq_std - Simulate standard INQUIRY command
+ * @dev: Target device.
+ * @cmd: SCSI command of interest.
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
*
* Returns standard device identification data associated
@@ -1834,7 +1818,8 @@ static void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
* LOCKING:
* spin_lock_irqsave(host lock)
*/
-static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
+static unsigned int ata_scsiop_inq_std(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf)
{
static const u8 versions[] = {
0x00,
@@ -1875,40 +1860,45 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
* Set the SCSI Removable Media Bit (RMB) if the ATA removable media
* device bit (obsolete since ATA-8 ACS) is set.
*/
- if (ata_id_removable(args->id))
+ if (ata_id_removable(dev->id))
hdr[1] |= (1 << 7);
- if (args->dev->class == ATA_DEV_ZAC) {
+ if (dev->class == ATA_DEV_ZAC) {
hdr[0] = TYPE_ZBC;
hdr[2] = 0x7; /* claim SPC-5 version compatibility */
}
- if (args->dev->flags & ATA_DFLAG_CDL)
+ if (dev->flags & ATA_DFLAG_CDL)
hdr[2] = 0xd; /* claim SPC-6 version compatibility */
memcpy(rbuf, hdr, sizeof(hdr));
memcpy(&rbuf[8], "ATA ", 8);
- ata_id_string(args->id, &rbuf[16], ATA_ID_PROD, 16);
+ ata_id_string(dev->id, &rbuf[16], ATA_ID_PROD, 16);
/* From SAT, use last 2 words from fw rev unless they are spaces */
- ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV + 2, 4);
+ ata_id_string(dev->id, &rbuf[32], ATA_ID_FW_REV + 2, 4);
if (strncmp(&rbuf[32], " ", 4) == 0)
- ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV, 4);
+ ata_id_string(dev->id, &rbuf[32], ATA_ID_FW_REV, 4);
if (rbuf[32] == 0 || rbuf[32] == ' ')
memcpy(&rbuf[32], "n/a ", 4);
- if (ata_id_zoned_cap(args->id) || args->dev->class == ATA_DEV_ZAC)
+ if (ata_id_zoned_cap(dev->id) || dev->class == ATA_DEV_ZAC)
memcpy(rbuf + 58, versions_zbc, sizeof(versions_zbc));
else
memcpy(rbuf + 58, versions, sizeof(versions));
- return 0;
+ /*
+ * Include all 8 possible version descriptors, even if not all of
+ * them are popoulated.
+ */
+ return 96;
}
/**
* ata_scsiop_inq_00 - Simulate INQUIRY VPD page 0, list of pages
- * @args: device IDENTIFY data / SCSI command of interest.
+ * @dev: Target device.
+ * @cmd: SCSI command of interest.
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
*
* Returns list of inquiry VPD pages available.
@@ -1916,7 +1906,8 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
* LOCKING:
* spin_lock_irqsave(host lock)
*/
-static unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf)
+static unsigned int ata_scsiop_inq_00(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf)
{
int i, num_pages = 0;
static const u8 pages[] = {
@@ -1932,19 +1923,20 @@ static unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf)
};
for (i = 0; i < sizeof(pages); i++) {
- if (pages[i] == 0xb6 &&
- !(args->dev->flags & ATA_DFLAG_ZAC))
+ if (pages[i] == 0xb6 && !ata_dev_is_zac(dev))
continue;
rbuf[num_pages + 4] = pages[i];
num_pages++;
}
rbuf[3] = num_pages; /* number of supported VPD pages */
- return 0;
+
+ return get_unaligned_be16(&rbuf[2]) + 4;
}
/**
* ata_scsiop_inq_80 - Simulate INQUIRY VPD page 80, device serial number
- * @args: device IDENTIFY data / SCSI command of interest.
+ * @dev: Target device.
+ * @cmd: SCSI command of interest.
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
*
* Returns ATA device serial number.
@@ -1952,7 +1944,8 @@ static unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf)
* LOCKING:
* spin_lock_irqsave(host lock)
*/
-static unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf)
+static unsigned int ata_scsiop_inq_80(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf)
{
static const u8 hdr[] = {
0,
@@ -1962,14 +1955,16 @@ static unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf)
};
memcpy(rbuf, hdr, sizeof(hdr));
- ata_id_string(args->id, (unsigned char *) &rbuf[4],
+ ata_id_string(dev->id, (unsigned char *) &rbuf[4],
ATA_ID_SERNO, ATA_ID_SERNO_LEN);
- return 0;
+
+ return get_unaligned_be16(&rbuf[2]) + 4;
}
/**
* ata_scsiop_inq_83 - Simulate INQUIRY VPD page 83, device identity
- * @args: device IDENTIFY data / SCSI command of interest.
+ * @dev: Target device.
+ * @cmd: SCSI command of interest.
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
*
* Yields two logical unit device identification designators:
@@ -1980,7 +1975,8 @@ static unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf)
* LOCKING:
* spin_lock_irqsave(host lock)
*/
-static unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf)
+static unsigned int ata_scsiop_inq_83(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf)
{
const int sat_model_serial_desc_len = 68;
int num;
@@ -1992,7 +1988,7 @@ static unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf)
rbuf[num + 0] = 2;
rbuf[num + 3] = ATA_ID_SERNO_LEN;
num += 4;
- ata_id_string(args->id, (unsigned char *) rbuf + num,
+ ata_id_string(dev->id, (unsigned char *) rbuf + num,
ATA_ID_SERNO, ATA_ID_SERNO_LEN);
num += ATA_ID_SERNO_LEN;
@@ -2004,31 +2000,33 @@ static unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf)
num += 4;
memcpy(rbuf + num, "ATA ", 8);
num += 8;
- ata_id_string(args->id, (unsigned char *) rbuf + num, ATA_ID_PROD,
+ ata_id_string(dev->id, (unsigned char *) rbuf + num, ATA_ID_PROD,
ATA_ID_PROD_LEN);
num += ATA_ID_PROD_LEN;
- ata_id_string(args->id, (unsigned char *) rbuf + num, ATA_ID_SERNO,
+ ata_id_string(dev->id, (unsigned char *) rbuf + num, ATA_ID_SERNO,
ATA_ID_SERNO_LEN);
num += ATA_ID_SERNO_LEN;
- if (ata_id_has_wwn(args->id)) {
+ if (ata_id_has_wwn(dev->id)) {
/* SAT defined lu world wide name */
/* piv=0, assoc=lu, code_set=binary, designator=NAA */
rbuf[num + 0] = 1;
rbuf[num + 1] = 3;
rbuf[num + 3] = ATA_ID_WWN_LEN;
num += 4;
- ata_id_string(args->id, (unsigned char *) rbuf + num,
+ ata_id_string(dev->id, (unsigned char *) rbuf + num,
ATA_ID_WWN, ATA_ID_WWN_LEN);
num += ATA_ID_WWN_LEN;
}
rbuf[3] = num - 4; /* page len (assume less than 256 bytes) */
- return 0;
+
+ return get_unaligned_be16(&rbuf[2]) + 4;
}
/**
* ata_scsiop_inq_89 - Simulate INQUIRY VPD page 89, ATA info
- * @args: device IDENTIFY data / SCSI command of interest.
+ * @dev: Target device.
+ * @cmd: SCSI command of interest.
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
*
* Yields SAT-specified ATA VPD page.
@@ -2036,7 +2034,8 @@ static unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf)
* LOCKING:
* spin_lock_irqsave(host lock)
*/
-static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf)
+static unsigned int ata_scsiop_inq_89(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf)
{
rbuf[1] = 0x89; /* our page code */
rbuf[2] = (0x238 >> 8); /* page size fixed at 238h */
@@ -2057,13 +2056,25 @@ static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf)
rbuf[56] = ATA_CMD_ID_ATA;
- memcpy(&rbuf[60], &args->id[0], 512);
- return 0;
+ memcpy(&rbuf[60], &dev->id[0], 512);
+
+ return get_unaligned_be16(&rbuf[2]) + 4;
}
-static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf)
+/**
+ * ata_scsiop_inq_b0 - Simulate INQUIRY VPD page B0, Block Limits
+ * @dev: Target device.
+ * @cmd: SCSI command of interest.
+ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ *
+ * Return data for the VPD page B0h (Block Limits).
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+static unsigned int ata_scsiop_inq_b0(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf)
{
- struct ata_device *dev = args->dev;
u16 min_io_sectors;
rbuf[1] = 0xb0;
@@ -2076,7 +2087,7 @@ static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf)
* logical than physical sector size we need to figure out what the
* latter is.
*/
- min_io_sectors = 1 << ata_id_log2_per_physical_sector(args->id);
+ min_io_sectors = 1 << ata_id_log2_per_physical_sector(dev->id);
put_unaligned_be16(min_io_sectors, &rbuf[6]);
/*
@@ -2088,7 +2099,7 @@ static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf)
* that we support some form of unmap - in thise case via WRITE SAME
* with the unmap bit set.
*/
- if (ata_id_has_trim(args->id)) {
+ if (ata_id_has_trim(dev->id)) {
u64 max_blocks = 65535 * ATA_MAX_TRIM_RNUM;
if (dev->quirks & ATA_QUIRK_MAX_TRIM_128M)
@@ -2098,14 +2109,27 @@ static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf)
put_unaligned_be32(1, &rbuf[28]);
}
- return 0;
+ return get_unaligned_be16(&rbuf[2]) + 4;
}
-static unsigned int ata_scsiop_inq_b1(struct ata_scsi_args *args, u8 *rbuf)
+/**
+ * ata_scsiop_inq_b1 - Simulate INQUIRY VPD page B1, Block Device
+ * Characteristics
+ * @dev: Target device.
+ * @cmd: SCSI command of interest.
+ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ *
+ * Return data for the VPD page B1h (Block Device Characteristics).
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+static unsigned int ata_scsiop_inq_b1(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf)
{
- int form_factor = ata_id_form_factor(args->id);
- int media_rotation_rate = ata_id_rotation_rate(args->id);
- u8 zoned = ata_id_zoned_cap(args->id);
+ int form_factor = ata_id_form_factor(dev->id);
+ int media_rotation_rate = ata_id_rotation_rate(dev->id);
+ u8 zoned = ata_id_zoned_cap(dev->id);
rbuf[1] = 0xb1;
rbuf[3] = 0x3c;
@@ -2115,21 +2139,52 @@ static unsigned int ata_scsiop_inq_b1(struct ata_scsi_args *args, u8 *rbuf)
if (zoned)
rbuf[8] = (zoned << 4);
- return 0;
+ return get_unaligned_be16(&rbuf[2]) + 4;
}
-static unsigned int ata_scsiop_inq_b2(struct ata_scsi_args *args, u8 *rbuf)
+/**
+ * ata_scsiop_inq_b2 - Simulate INQUIRY VPD page B2, Logical Block
+ * Provisioning
+ * @dev: Target device.
+ * @cmd: SCSI command of interest.
+ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ *
+ * Return data for the VPD page B2h (Logical Block Provisioning).
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+static unsigned int ata_scsiop_inq_b2(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf)
{
/* SCSI Thin Provisioning VPD page: SBC-3 rev 22 or later */
rbuf[1] = 0xb2;
rbuf[3] = 0x4;
rbuf[5] = 1 << 6; /* TPWS */
- return 0;
+ return get_unaligned_be16(&rbuf[2]) + 4;
}
-static unsigned int ata_scsiop_inq_b6(struct ata_scsi_args *args, u8 *rbuf)
+/**
+ * ata_scsiop_inq_b6 - Simulate INQUIRY VPD page B6, Zoned Block Device
+ * Characteristics
+ * @dev: Target device.
+ * @cmd: SCSI command of interest.
+ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ *
+ * Return data for the VPD page B2h (Zoned Block Device Characteristics).
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+static unsigned int ata_scsiop_inq_b6(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf)
{
+ if (!ata_dev_is_zac(dev)) {
+ ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
+ return 0;
+ }
+
/*
* zbc-r05 SCSI Zoned Block device characteristics VPD page
*/
@@ -2139,21 +2194,39 @@ static unsigned int ata_scsiop_inq_b6(struct ata_scsi_args *args, u8 *rbuf)
/*
* URSWRZ bit is only meaningful for host-managed ZAC drives
*/
- if (args->dev->zac_zoned_cap & 1)
+ if (dev->zac_zoned_cap & 1)
rbuf[4] |= 1;
- put_unaligned_be32(args->dev->zac_zones_optimal_open, &rbuf[8]);
- put_unaligned_be32(args->dev->zac_zones_optimal_nonseq, &rbuf[12]);
- put_unaligned_be32(args->dev->zac_zones_max_open, &rbuf[16]);
+ put_unaligned_be32(dev->zac_zones_optimal_open, &rbuf[8]);
+ put_unaligned_be32(dev->zac_zones_optimal_nonseq, &rbuf[12]);
+ put_unaligned_be32(dev->zac_zones_max_open, &rbuf[16]);
- return 0;
+ return get_unaligned_be16(&rbuf[2]) + 4;
}
-static unsigned int ata_scsiop_inq_b9(struct ata_scsi_args *args, u8 *rbuf)
+/**
+ * ata_scsiop_inq_b9 - Simulate INQUIRY VPD page B9, Concurrent Positioning
+ * Ranges
+ * @dev: Target device.
+ * @cmd: SCSI command of interest.
+ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ *
+ * Return data for the VPD page B9h (Concurrent Positioning Ranges).
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+static unsigned int ata_scsiop_inq_b9(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf)
{
- struct ata_cpr_log *cpr_log = args->dev->cpr_log;
+ struct ata_cpr_log *cpr_log = dev->cpr_log;
u8 *desc = &rbuf[64];
int i;
+ if (!cpr_log) {
+ ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
+ return 0;
+ }
+
/* SCSI Concurrent Positioning Ranges VPD page: SBC-5 rev 1 or later */
rbuf[1] = 0xb9;
put_unaligned_be16(64 + (int)cpr_log->nr_cpr * 32 - 4, &rbuf[2]);
@@ -2165,7 +2238,58 @@ static unsigned int ata_scsiop_inq_b9(struct ata_scsi_args *args, u8 *rbuf)
put_unaligned_be64(cpr_log->cpr[i].num_lbas, &desc[16]);
}
- return 0;
+ return get_unaligned_be16(&rbuf[2]) + 4;
+}
+
+/**
+ * ata_scsiop_inquiry - Simulate INQUIRY command
+ * @dev: Target device.
+ * @cmd: SCSI command of interest.
+ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ *
+ * Returns data associated with an INQUIRY command output.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+static unsigned int ata_scsiop_inquiry(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf)
+{
+ const u8 *scsicmd = cmd->cmnd;
+
+ /* is CmdDt set? */
+ if (scsicmd[1] & 2) {
+ ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
+ return 0;
+ }
+
+ /* Is EVPD clear? */
+ if ((scsicmd[1] & 1) == 0)
+ return ata_scsiop_inq_std(dev, cmd, rbuf);
+
+ switch (scsicmd[2]) {
+ case 0x00:
+ return ata_scsiop_inq_00(dev, cmd, rbuf);
+ case 0x80:
+ return ata_scsiop_inq_80(dev, cmd, rbuf);
+ case 0x83:
+ return ata_scsiop_inq_83(dev, cmd, rbuf);
+ case 0x89:
+ return ata_scsiop_inq_89(dev, cmd, rbuf);
+ case 0xb0:
+ return ata_scsiop_inq_b0(dev, cmd, rbuf);
+ case 0xb1:
+ return ata_scsiop_inq_b1(dev, cmd, rbuf);
+ case 0xb2:
+ return ata_scsiop_inq_b2(dev, cmd, rbuf);
+ case 0xb6:
+ return ata_scsiop_inq_b6(dev, cmd, rbuf);
+ case 0xb9:
+ return ata_scsiop_inq_b9(dev, cmd, rbuf);
+ default:
+ ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
+ return 0;
+ }
}
/**
@@ -2325,8 +2449,8 @@ static unsigned int ata_msense_control_ata_feature(struct ata_device *dev,
*/
put_unaligned_be16(ATA_FEATURE_SUB_MPAGE_LEN - 4, &buf[2]);
- if (dev->flags & ATA_DFLAG_CDL)
- buf[4] = 0x02; /* Support T2A and T2B pages */
+ if (dev->flags & ATA_DFLAG_CDL_ENABLED)
+ buf[4] = 0x02; /* T2A and T2B pages enabled */
else
buf[4] = 0;
@@ -2388,7 +2512,8 @@ static unsigned int ata_msense_rw_recovery(u8 *buf, bool changeable)
/**
* ata_scsiop_mode_sense - Simulate MODE SENSE 6, 10 commands
- * @args: device IDENTIFY data / SCSI command of interest.
+ * @dev: Target device.
+ * @cmd: SCSI command of interest.
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
*
* Simulate MODE SENSE commands. Assume this is invoked for direct
@@ -2398,10 +2523,10 @@ static unsigned int ata_msense_rw_recovery(u8 *buf, bool changeable)
* LOCKING:
* spin_lock_irqsave(host lock)
*/
-static unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf)
+static unsigned int ata_scsiop_mode_sense(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf)
{
- struct ata_device *dev = args->dev;
- u8 *scsicmd = args->cmd->cmnd, *p = rbuf;
+ u8 *scsicmd = cmd->cmnd, *p = rbuf;
static const u8 sat_blk_desc[] = {
0, 0, 0, 0, /* number of blocks: sat unspecified */
0,
@@ -2466,17 +2591,17 @@ static unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf)
break;
case CACHE_MPAGE:
- p += ata_msense_caching(args->id, p, page_control == 1);
+ p += ata_msense_caching(dev->id, p, page_control == 1);
break;
case CONTROL_MPAGE:
- p += ata_msense_control(args->dev, p, spg, page_control == 1);
+ p += ata_msense_control(dev, p, spg, page_control == 1);
break;
case ALL_MPAGES:
p += ata_msense_rw_recovery(p, page_control == 1);
- p += ata_msense_caching(args->id, p, page_control == 1);
- p += ata_msense_control(args->dev, p, spg, page_control == 1);
+ p += ata_msense_caching(dev->id, p, page_control == 1);
+ p += ata_msense_control(dev, p, spg, page_control == 1);
break;
default: /* invalid page code */
@@ -2494,29 +2619,33 @@ static unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf)
rbuf[3] = sizeof(sat_blk_desc);
memcpy(rbuf + 4, sat_blk_desc, sizeof(sat_blk_desc));
}
- } else {
- put_unaligned_be16(p - rbuf - 2, &rbuf[0]);
- rbuf[3] |= dpofua;
- if (ebd) {
- rbuf[7] = sizeof(sat_blk_desc);
- memcpy(rbuf + 8, sat_blk_desc, sizeof(sat_blk_desc));
- }
+
+ return rbuf[0] + 1;
}
- return 0;
+
+ put_unaligned_be16(p - rbuf - 2, &rbuf[0]);
+ rbuf[3] |= dpofua;
+ if (ebd) {
+ rbuf[7] = sizeof(sat_blk_desc);
+ memcpy(rbuf + 8, sat_blk_desc, sizeof(sat_blk_desc));
+ }
+
+ return get_unaligned_be16(&rbuf[0]) + 2;
invalid_fld:
- ata_scsi_set_invalid_field(dev, args->cmd, fp, bp);
- return 1;
+ ata_scsi_set_invalid_field(dev, cmd, fp, bp);
+ return 0;
saving_not_supp:
- ata_scsi_set_sense(dev, args->cmd, ILLEGAL_REQUEST, 0x39, 0x0);
+ ata_scsi_set_sense(dev, cmd, ILLEGAL_REQUEST, 0x39, 0x0);
/* "Saving parameters not supported" */
- return 1;
+ return 0;
}
/**
* ata_scsiop_read_cap - Simulate READ CAPACITY[ 16] commands
- * @args: device IDENTIFY data / SCSI command of interest.
+ * @dev: Target device.
+ * @cmd: SCSI command of interest.
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
*
* Simulate READ CAPACITY commands.
@@ -2524,9 +2653,10 @@ saving_not_supp:
* LOCKING:
* None.
*/
-static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
+static unsigned int ata_scsiop_read_cap(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf)
{
- struct ata_device *dev = args->dev;
+ u8 *scsicmd = cmd->cmnd;
u64 last_lba = dev->n_sectors - 1; /* LBA of the last block */
u32 sector_size; /* physical sector size in bytes */
u8 log2_per_phys;
@@ -2536,7 +2666,7 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
log2_per_phys = ata_id_log2_per_physical_sector(dev->id);
lowest_aligned = ata_id_logical_sector_offset(dev->id, log2_per_phys);
- if (args->cmd->cmnd[0] == READ_CAPACITY) {
+ if (scsicmd[0] == READ_CAPACITY) {
if (last_lba >= 0xffffffffULL)
last_lba = 0xffffffff;
@@ -2551,48 +2681,59 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
rbuf[5] = sector_size >> (8 * 2);
rbuf[6] = sector_size >> (8 * 1);
rbuf[7] = sector_size;
- } else {
- /* sector count, 64-bit */
- rbuf[0] = last_lba >> (8 * 7);
- rbuf[1] = last_lba >> (8 * 6);
- rbuf[2] = last_lba >> (8 * 5);
- rbuf[3] = last_lba >> (8 * 4);
- rbuf[4] = last_lba >> (8 * 3);
- rbuf[5] = last_lba >> (8 * 2);
- rbuf[6] = last_lba >> (8 * 1);
- rbuf[7] = last_lba;
- /* sector size */
- rbuf[ 8] = sector_size >> (8 * 3);
- rbuf[ 9] = sector_size >> (8 * 2);
- rbuf[10] = sector_size >> (8 * 1);
- rbuf[11] = sector_size;
-
- rbuf[12] = 0;
- rbuf[13] = log2_per_phys;
- rbuf[14] = (lowest_aligned >> 8) & 0x3f;
- rbuf[15] = lowest_aligned;
-
- if (ata_id_has_trim(args->id) &&
- !(dev->quirks & ATA_QUIRK_NOTRIM)) {
- rbuf[14] |= 0x80; /* LBPME */
-
- if (ata_id_has_zero_after_trim(args->id) &&
- dev->quirks & ATA_QUIRK_ZERO_AFTER_TRIM) {
- ata_dev_info(dev, "Enabling discard_zeroes_data\n");
- rbuf[14] |= 0x40; /* LBPRZ */
- }
+ return 8;
+ }
+
+ /*
+ * READ CAPACITY 16 command is defined as a service action
+ * (SERVICE_ACTION_IN_16 command).
+ */
+ if (scsicmd[0] != SERVICE_ACTION_IN_16 ||
+ (scsicmd[1] & 0x1f) != SAI_READ_CAPACITY_16) {
+ ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
+ return 0;
+ }
+
+ /* sector count, 64-bit */
+ rbuf[0] = last_lba >> (8 * 7);
+ rbuf[1] = last_lba >> (8 * 6);
+ rbuf[2] = last_lba >> (8 * 5);
+ rbuf[3] = last_lba >> (8 * 4);
+ rbuf[4] = last_lba >> (8 * 3);
+ rbuf[5] = last_lba >> (8 * 2);
+ rbuf[6] = last_lba >> (8 * 1);
+ rbuf[7] = last_lba;
+
+ /* sector size */
+ rbuf[ 8] = sector_size >> (8 * 3);
+ rbuf[ 9] = sector_size >> (8 * 2);
+ rbuf[10] = sector_size >> (8 * 1);
+ rbuf[11] = sector_size;
+
+ if (ata_id_zoned_cap(dev->id) || dev->class == ATA_DEV_ZAC)
+ rbuf[12] = (1 << 4); /* RC_BASIS */
+ rbuf[13] = log2_per_phys;
+ rbuf[14] = (lowest_aligned >> 8) & 0x3f;
+ rbuf[15] = lowest_aligned;
+
+ if (ata_id_has_trim(dev->id) && !(dev->quirks & ATA_QUIRK_NOTRIM)) {
+ rbuf[14] |= 0x80; /* LBPME */
+
+ if (ata_id_has_zero_after_trim(dev->id) &&
+ dev->quirks & ATA_QUIRK_ZERO_AFTER_TRIM) {
+ ata_dev_info(dev, "Enabling discard_zeroes_data\n");
+ rbuf[14] |= 0x40; /* LBPRZ */
}
- if (ata_id_zoned_cap(args->id) ||
- args->dev->class == ATA_DEV_ZAC)
- rbuf[12] = (1 << 4); /* RC_BASIS */
}
- return 0;
+
+ return 16;
}
/**
* ata_scsiop_report_luns - Simulate REPORT LUNS command
- * @args: device IDENTIFY data / SCSI command of interest.
+ * @dev: Target device.
+ * @cmd: SCSI command of interest.
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
*
* Simulate REPORT LUNS command.
@@ -2600,11 +2741,12 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
* LOCKING:
* spin_lock_irqsave(host lock)
*/
-static unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf)
+static unsigned int ata_scsiop_report_luns(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf)
{
rbuf[3] = 8; /* just one lun, LUN 0, size 8 bytes */
- return 0;
+ return 16;
}
/*
@@ -3312,7 +3454,8 @@ invalid_opcode:
/**
* ata_scsiop_maint_in - Simulate a subset of MAINTENANCE_IN
- * @args: device MAINTENANCE_IN data / SCSI command of interest.
+ * @dev: Target device.
+ * @cmd: SCSI command of interest.
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
*
* Yields a subset to satisfy scsi_report_opcode()
@@ -3320,17 +3463,21 @@ invalid_opcode:
* LOCKING:
* spin_lock_irqsave(host lock)
*/
-static unsigned int ata_scsiop_maint_in(struct ata_scsi_args *args, u8 *rbuf)
+static unsigned int ata_scsiop_maint_in(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf)
{
- struct ata_device *dev = args->dev;
- u8 *cdb = args->cmd->cmnd;
+ u8 *cdb = cmd->cmnd;
u8 supported = 0, cdlp = 0, rwcdlp = 0;
- unsigned int err = 0;
+
+ if ((cdb[1] & 0x1f) != MI_REPORT_SUPPORTED_OPERATION_CODES) {
+ ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
+ return 0;
+ }
if (cdb[2] != 1 && cdb[2] != 3) {
ata_dev_warn(dev, "invalid command format %d\n", cdb[2]);
- err = 2;
- goto out;
+ ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
+ return 0;
}
switch (cdb[3]) {
@@ -3398,11 +3545,12 @@ static unsigned int ata_scsiop_maint_in(struct ata_scsi_args *args, u8 *rbuf)
default:
break;
}
-out:
+
/* One command format */
rbuf[0] = rwcdlp;
rbuf[1] = cdlp | supported;
- return err;
+
+ return 4;
}
/**
@@ -3734,12 +3882,11 @@ static int ata_mselect_control_spg0(struct ata_queued_cmd *qc,
}
/*
- * Translate MODE SELECT control mode page, sub-pages f2h (ATA feature mode
+ * Translate MODE SELECT control mode page, sub-page f2h (ATA feature mode
* page) into a SET FEATURES command.
*/
-static unsigned int ata_mselect_control_ata_feature(struct ata_queued_cmd *qc,
- const u8 *buf, int len,
- u16 *fp)
+static int ata_mselect_control_ata_feature(struct ata_queued_cmd *qc,
+ const u8 *buf, int len, u16 *fp)
{
struct ata_device *dev = qc->dev;
struct ata_taskfile *tf = &qc->tf;
@@ -3758,16 +3905,21 @@ static unsigned int ata_mselect_control_ata_feature(struct ata_queued_cmd *qc,
switch (buf[0] & 0x03) {
case 0:
/* Disable CDL */
+ ata_dev_dbg(dev, "Disabling CDL\n");
cdl_action = 0;
dev->flags &= ~ATA_DFLAG_CDL_ENABLED;
break;
case 0x02:
- /* Enable CDL T2A/T2B: NCQ priority must be disabled */
+ /*
+ * Enable CDL. Since CDL is mutually exclusive with NCQ
+ * priority, allow this only if NCQ priority is disabled.
+ */
if (dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLED) {
ata_dev_err(dev,
"NCQ priority must be disabled to enable CDL\n");
return -EINVAL;
}
+ ata_dev_dbg(dev, "Enabling CDL\n");
cdl_action = 1;
dev->flags |= ATA_DFLAG_CDL_ENABLED;
break;
@@ -4159,9 +4311,10 @@ int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, struct ata_device *dev)
* scsi_queue_rq() will defer commands if scsi_host_in_recovery().
* However, this check is done without holding the ap->lock (a libata
* specific lock), so we can have received an error irq since then,
- * therefore we must check if EH is pending, while holding ap->lock.
+ * therefore we must check if EH is pending or running, while holding
+ * ap->lock.
*/
- if (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS))
+ if (ata_port_eh_scheduled(ap))
return SCSI_MLQUEUE_DEVICE_BUSY;
if (unlikely(!scmd->cmd_len))
@@ -4262,78 +4415,26 @@ EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
{
- struct ata_scsi_args args;
const u8 *scsicmd = cmd->cmnd;
u8 tmp8;
- args.dev = dev;
- args.id = dev->id;
- args.cmd = cmd;
-
switch(scsicmd[0]) {
case INQUIRY:
- if (scsicmd[1] & 2) /* is CmdDt set? */
- ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
- else if ((scsicmd[1] & 1) == 0) /* is EVPD clear? */
- ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std);
- else switch (scsicmd[2]) {
- case 0x00:
- ata_scsi_rbuf_fill(&args, ata_scsiop_inq_00);
- break;
- case 0x80:
- ata_scsi_rbuf_fill(&args, ata_scsiop_inq_80);
- break;
- case 0x83:
- ata_scsi_rbuf_fill(&args, ata_scsiop_inq_83);
- break;
- case 0x89:
- ata_scsi_rbuf_fill(&args, ata_scsiop_inq_89);
- break;
- case 0xb0:
- ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b0);
- break;
- case 0xb1:
- ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b1);
- break;
- case 0xb2:
- ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b2);
- break;
- case 0xb6:
- if (dev->flags & ATA_DFLAG_ZAC)
- ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b6);
- else
- ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
- break;
- case 0xb9:
- if (dev->cpr_log)
- ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b9);
- else
- ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
- break;
- default:
- ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
- break;
- }
+ ata_scsi_rbuf_fill(dev, cmd, ata_scsiop_inquiry);
break;
case MODE_SENSE:
case MODE_SENSE_10:
- ata_scsi_rbuf_fill(&args, ata_scsiop_mode_sense);
+ ata_scsi_rbuf_fill(dev, cmd, ata_scsiop_mode_sense);
break;
case READ_CAPACITY:
- ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
- break;
-
case SERVICE_ACTION_IN_16:
- if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16)
- ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
- else
- ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
+ ata_scsi_rbuf_fill(dev, cmd, ata_scsiop_read_cap);
break;
case REPORT_LUNS:
- ata_scsi_rbuf_fill(&args, ata_scsiop_report_luns);
+ ata_scsi_rbuf_fill(dev, cmd, ata_scsiop_report_luns);
break;
case REQUEST_SENSE:
@@ -4361,10 +4462,7 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
break;
case MAINTENANCE_IN:
- if ((scsicmd[1] & 0x1f) == MI_REPORT_SUPPORTED_OPERATION_CODES)
- ata_scsi_rbuf_fill(&args, ata_scsiop_maint_in);
- else
- ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
+ ata_scsi_rbuf_fill(dev, cmd, ata_scsiop_maint_in);
break;
/* all other commands */
@@ -4531,24 +4629,23 @@ void ata_scsi_scan_host(struct ata_port *ap, int sync)
* ata_scsi_offline_dev - offline attached SCSI device
* @dev: ATA device to offline attached SCSI device for
*
- * This function is called from ata_eh_hotplug() and responsible
- * for taking the SCSI device attached to @dev offline. This
- * function is called with host lock which protects dev->sdev
- * against clearing.
+ * This function is called from ata_eh_detach_dev() and is responsible for
+ * taking the SCSI device attached to @dev offline. This function is
+ * called with host lock which protects dev->sdev against clearing.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*
* RETURNS:
- * 1 if attached SCSI device exists, 0 otherwise.
+ * true if attached SCSI device exists, false otherwise.
*/
-int ata_scsi_offline_dev(struct ata_device *dev)
+bool ata_scsi_offline_dev(struct ata_device *dev)
{
if (dev->sdev) {
scsi_device_set_state(dev->sdev, SDEV_OFFLINE);
- return 1;
+ return true;
}
- return 0;
+ return false;
}
/**
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 67f277e1c3bf..1e2a2c33cdc8 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -31,10 +31,10 @@ const struct ata_port_operations ata_sff_port_ops = {
.freeze = ata_sff_freeze,
.thaw = ata_sff_thaw,
- .prereset = ata_sff_prereset,
- .softreset = ata_sff_softreset,
- .hardreset = sata_sff_hardreset,
- .postreset = ata_sff_postreset,
+ .reset.prereset = ata_sff_prereset,
+ .reset.softreset = ata_sff_softreset,
+ .reset.hardreset = sata_sff_hardreset,
+ .reset.postreset = ata_sff_postreset,
.error_handler = ata_sff_error_handler,
.sff_dev_select = ata_sff_dev_select,
@@ -601,7 +601,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct page *page;
- unsigned int offset;
+ unsigned int offset, count;
if (!qc->cursg) {
qc->curbytes = qc->nbytes;
@@ -614,28 +614,30 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
offset = qc->cursg->offset + qc->cursg_ofs;
/* get the current page and offset */
- page = nth_page(page, (offset >> PAGE_SHIFT));
+ page += offset >> PAGE_SHIFT;
offset %= PAGE_SIZE;
- trace_ata_sff_pio_transfer_data(qc, offset, qc->sect_size);
+ /* don't overrun current sg */
+ count = min(qc->cursg->length - qc->cursg_ofs, qc->sect_size);
+
+ trace_ata_sff_pio_transfer_data(qc, offset, count);
/*
* Split the transfer when it splits a page boundary. Note that the
* split still has to be dword aligned like all ATA data transfers.
*/
WARN_ON_ONCE(offset % 4);
- if (offset + qc->sect_size > PAGE_SIZE) {
+ if (offset + count > PAGE_SIZE) {
unsigned int split_len = PAGE_SIZE - offset;
ata_pio_xfer(qc, page, offset, split_len);
- ata_pio_xfer(qc, nth_page(page, 1), 0,
- qc->sect_size - split_len);
+ ata_pio_xfer(qc, page + 1, 0, count - split_len);
} else {
- ata_pio_xfer(qc, page, offset, qc->sect_size);
+ ata_pio_xfer(qc, page, offset, count);
}
- qc->curbytes += qc->sect_size;
- qc->cursg_ofs += qc->sect_size;
+ qc->curbytes += count;
+ qc->cursg_ofs += count;
if (qc->cursg_ofs == qc->cursg->length) {
qc->cursg = sg_next(qc->cursg);
@@ -749,7 +751,7 @@ next_sg:
offset = sg->offset + qc->cursg_ofs;
/* get the current page and offset */
- page = nth_page(page, (offset >> PAGE_SHIFT));
+ page += offset >> PAGE_SHIFT;
offset %= PAGE_SIZE;
/* don't overrun current sg */
@@ -2052,8 +2054,6 @@ EXPORT_SYMBOL_GPL(ata_sff_drain_fifo);
*/
void ata_sff_error_handler(struct ata_port *ap)
{
- ata_reset_fn_t softreset = ap->ops->softreset;
- ata_reset_fn_t hardreset = ap->ops->hardreset;
struct ata_queued_cmd *qc;
unsigned long flags;
@@ -2075,13 +2075,7 @@ void ata_sff_error_handler(struct ata_port *ap)
spin_unlock_irqrestore(ap->lock, flags);
- /* ignore built-in hardresets if SCR access is not available */
- if ((hardreset == sata_std_hardreset ||
- hardreset == sata_sff_hardreset) && !sata_scr_valid(&ap->link))
- hardreset = NULL;
-
- ata_do_eh(ap, ap->ops->prereset, softreset, hardreset,
- ap->ops->postreset);
+ ata_std_error_handler(ap);
}
EXPORT_SYMBOL_GPL(ata_sff_error_handler);
diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c
index e898be49df6b..62415fe67a11 100644
--- a/drivers/ata/libata-transport.c
+++ b/drivers/ata/libata-transport.c
@@ -202,7 +202,7 @@ show_ata_port_##name(struct device *dev, \
{ \
struct ata_port *ap = transport_class_to_port(dev); \
\
- return scnprintf(buf, 20, format_string, cast ap->field); \
+ return sysfs_emit(buf, format_string, cast ap->field); \
}
#define ata_port_simple_attr(field, name, format_string, type) \
@@ -389,7 +389,7 @@ show_ata_dev_##field(struct device *dev, \
{ \
struct ata_device *ata_dev = transport_class_to_dev(dev); \
\
- return scnprintf(buf, 20, format_string, cast ata_dev->field); \
+ return sysfs_emit(buf, format_string, cast ata_dev->field); \
}
#define ata_dev_simple_attr(field, format_string, type) \
diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c
index 4b83b517caec..799531218ea2 100644
--- a/drivers/ata/libata-zpodd.c
+++ b/drivers/ata/libata-zpodd.c
@@ -160,8 +160,7 @@ void zpodd_on_suspend(struct ata_device *dev)
return;
}
- expires = zpodd->last_ready +
- msecs_to_jiffies(zpodd_poweroff_delay * 1000);
+ expires = zpodd->last_ready + secs_to_jiffies(zpodd_poweroff_delay);
if (time_before(jiffies, expires))
return;
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 0337be4faec7..e5b977a8d3e1 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -44,6 +44,18 @@ static inline bool ata_sstatus_online(u32 sstatus)
return (sstatus & 0xf) == 0x3;
}
+static inline bool ata_dev_is_zac(struct ata_device *dev)
+{
+ /* Host managed device or host aware device */
+ return dev->class == ATA_DEV_ZAC ||
+ ata_id_zoned_cap(dev->id) == 0x01;
+}
+
+static inline bool ata_port_eh_scheduled(struct ata_port *ap)
+{
+ return ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS);
+}
+
#ifdef CONFIG_ATA_FORCE
extern void ata_force_cbl(struct ata_port *ap);
#else
@@ -90,7 +102,6 @@ extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg);
extern const char *sata_spd_string(unsigned int spd);
extern unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
u8 page, void *buf, unsigned int sectors);
-void ata_dev_cleanup_cdl_resources(struct ata_device *dev);
#define to_ata_port(d) container_of(d, struct ata_port, tdev)
@@ -137,13 +148,10 @@ extern struct ata_device *ata_scsi_find_dev(struct ata_port *ap,
extern int ata_scsi_add_hosts(struct ata_host *host,
const struct scsi_host_template *sht);
extern void ata_scsi_scan_host(struct ata_port *ap, int sync);
-extern int ata_scsi_offline_dev(struct ata_device *dev);
+extern bool ata_scsi_offline_dev(struct ata_device *dev);
extern bool ata_scsi_sense_is_valid(u8 sk, u8 asc, u8 ascq);
extern void ata_scsi_set_sense(struct ata_device *dev,
struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq);
-extern void ata_scsi_set_sense_information(struct ata_device *dev,
- struct scsi_cmnd *cmd,
- const struct ata_taskfile *tf);
extern void ata_scsi_media_change_notify(struct ata_device *dev);
extern void ata_scsi_hotplug(struct work_struct *work);
extern void ata_scsi_dev_rescan(struct work_struct *work);
@@ -172,12 +180,9 @@ extern void ata_eh_autopsy(struct ata_port *ap);
const char *ata_get_cmd_name(u8 command);
extern void ata_eh_report(struct ata_port *ap);
extern int ata_eh_reset(struct ata_link *link, int classify,
- ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
- ata_reset_fn_t hardreset, ata_postreset_fn_t postreset);
-extern int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev);
-extern int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
- ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
- ata_postreset_fn_t postreset,
+ struct ata_reset_operations *reset_ops);
+extern int ata_eh_recover(struct ata_port *ap,
+ struct ata_reset_operations *reset_ops,
struct ata_link **r_failed_disk);
extern void ata_eh_finish(struct ata_port *ap);
extern int ata_ering_map(struct ata_ering *ering,
diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c
index ab38871b5e00..23fff10af2ac 100644
--- a/drivers/ata/pata_acpi.c
+++ b/drivers/ata/pata_acpi.c
@@ -216,7 +216,7 @@ static struct ata_port_operations pacpi_ops = {
.mode_filter = pacpi_mode_filter,
.set_piomode = pacpi_set_piomode,
.set_dmamode = pacpi_set_dmamode,
- .prereset = pacpi_pre_reset,
+ .reset.prereset = pacpi_pre_reset,
.port_start = pacpi_port_start,
};
diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
index bb790edd6036..9d5cb9c34c52 100644
--- a/drivers/ata/pata_ali.c
+++ b/drivers/ata/pata_ali.c
@@ -392,11 +392,11 @@ static struct ata_port_operations ali_20_port_ops = {
* Port operations for DMA capable ALi with cable detect
*/
static struct ata_port_operations ali_c2_port_ops = {
- .inherits = &ali_dma_base_ops,
- .check_atapi_dma = ali_check_atapi_dma,
- .cable_detect = ali_c2_cable_detect,
- .dev_config = ali_lock_sectors,
- .postreset = ali_c2_c3_postreset,
+ .inherits = &ali_dma_base_ops,
+ .check_atapi_dma = ali_check_atapi_dma,
+ .cable_detect = ali_c2_cable_detect,
+ .dev_config = ali_lock_sectors,
+ .reset.postreset = ali_c2_c3_postreset,
};
/*
diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
index 5b02b89748b7..a2fecadc927d 100644
--- a/drivers/ata/pata_amd.c
+++ b/drivers/ata/pata_amd.c
@@ -394,7 +394,7 @@ static const struct scsi_host_template amd_sht = {
static const struct ata_port_operations amd_base_port_ops = {
.inherits = &ata_bmdma32_port_ops,
- .prereset = amd_pre_reset,
+ .reset.prereset = amd_pre_reset,
};
static struct ata_port_operations amd33_port_ops = {
@@ -429,7 +429,7 @@ static const struct ata_port_operations nv_base_port_ops = {
.inherits = &ata_bmdma_port_ops,
.cable_detect = ata_cable_ignore,
.mode_filter = nv_mode_filter,
- .prereset = nv_pre_reset,
+ .reset.prereset = nv_pre_reset,
.host_stop = nv_host_stop,
};
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
index d0c6924d25b6..514d549286b5 100644
--- a/drivers/ata/pata_arasan_cf.c
+++ b/drivers/ata/pata_arasan_cf.c
@@ -964,7 +964,7 @@ MODULE_DEVICE_TABLE(of, arasan_cf_id_table);
static struct platform_driver arasan_cf_driver = {
.probe = arasan_cf_probe,
- .remove_new = arasan_cf_remove,
+ .remove = arasan_cf_remove,
.driver = {
.name = DRIVER_NAME,
.pm = &arasan_cf_pm_ops,
diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
index 40544282f455..6160414172a3 100644
--- a/drivers/ata/pata_artop.c
+++ b/drivers/ata/pata_artop.c
@@ -301,7 +301,7 @@ static struct ata_port_operations artop6210_ops = {
.cable_detect = ata_cable_40wire,
.set_piomode = artop6210_set_piomode,
.set_dmamode = artop6210_set_dmamode,
- .prereset = artop62x0_pre_reset,
+ .reset.prereset = artop62x0_pre_reset,
.qc_defer = artop6210_qc_defer,
};
@@ -310,7 +310,7 @@ static struct ata_port_operations artop6260_ops = {
.cable_detect = artop6260_cable_detect,
.set_piomode = artop6260_set_piomode,
.set_dmamode = artop6260_set_dmamode,
- .prereset = artop62x0_pre_reset,
+ .reset.prereset = artop62x0_pre_reset,
};
static void atp8xx_fixup(struct pci_dev *pdev)
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
index 8c5cc803aab3..4c612f9543f6 100644
--- a/drivers/ata/pata_atiixp.c
+++ b/drivers/ata/pata_atiixp.c
@@ -264,7 +264,7 @@ static struct ata_port_operations atiixp_port_ops = {
.bmdma_start = atiixp_bmdma_start,
.bmdma_stop = atiixp_bmdma_stop,
- .prereset = atiixp_prereset,
+ .reset.prereset = atiixp_prereset,
.cable_detect = atiixp_cable_detect,
.set_piomode = atiixp_set_piomode,
.set_dmamode = atiixp_set_dmamode,
diff --git a/drivers/ata/pata_atp867x.c b/drivers/ata/pata_atp867x.c
index aaef5924f636..308f86f9e2f0 100644
--- a/drivers/ata/pata_atp867x.c
+++ b/drivers/ata/pata_atp867x.c
@@ -525,7 +525,7 @@ static int atp867x_reinit_one(struct pci_dev *pdev)
}
#endif
-static struct pci_device_id atp867x_pci_tbl[] = {
+static const struct pci_device_id atp867x_pci_tbl[] = {
{ PCI_VDEVICE(ARTOP, PCI_DEVICE_ID_ARTOP_ATP867A), 0 },
{ PCI_VDEVICE(ARTOP, PCI_DEVICE_ID_ARTOP_ATP867B), 0 },
{ },
diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
index b811efd2cc34..73e81e160c91 100644
--- a/drivers/ata/pata_cs5536.c
+++ b/drivers/ata/pata_cs5536.c
@@ -27,7 +27,7 @@
#include <scsi/scsi_host.h>
#include <linux/dmi.h>
-#ifdef CONFIG_X86_32
+#if defined(CONFIG_X86) && defined(CONFIG_X86_32)
#include <asm/msr.h>
static int use_msr;
module_param_named(msr, use_msr, int, 0644);
diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c
index 2e6eccf2902f..6fe49b303fee 100644
--- a/drivers/ata/pata_efar.c
+++ b/drivers/ata/pata_efar.c
@@ -243,7 +243,7 @@ static struct ata_port_operations efar_ops = {
.cable_detect = efar_cable_detect,
.set_piomode = efar_set_piomode,
.set_dmamode = efar_set_dmamode,
- .prereset = efar_pre_reset,
+ .reset.prereset = efar_pre_reset,
};
diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c
index f3f5b2b0ecc9..b2b9e0058333 100644
--- a/drivers/ata/pata_ep93xx.c
+++ b/drivers/ata/pata_ep93xx.c
@@ -879,8 +879,8 @@ static const struct scsi_host_template ep93xx_pata_sht = {
static struct ata_port_operations ep93xx_pata_port_ops = {
.inherits = &ata_bmdma_port_ops,
- .softreset = ep93xx_pata_softreset,
- .hardreset = ATA_OP_NULL,
+ .reset.softreset = ep93xx_pata_softreset,
+ .reset.hardreset = ATA_OP_NULL,
.sff_dev_select = ep93xx_pata_dev_select,
.sff_set_devctl = ep93xx_pata_set_devctl,
@@ -1015,7 +1015,7 @@ static struct platform_driver ep93xx_pata_platform_driver = {
.of_match_table = ep93xx_pata_of_ids,
},
.probe = ep93xx_pata_probe,
- .remove_new = ep93xx_pata_remove,
+ .remove = ep93xx_pata_remove,
};
module_platform_driver(ep93xx_pata_platform_driver);
diff --git a/drivers/ata/pata_falcon.c b/drivers/ata/pata_falcon.c
index 18ceefd176df..334c4eea41ec 100644
--- a/drivers/ata/pata_falcon.c
+++ b/drivers/ata/pata_falcon.c
@@ -225,8 +225,8 @@ static void pata_falcon_remove_one(struct platform_device *pdev)
static struct platform_driver pata_falcon_driver = {
.probe = pata_falcon_init_one,
- .remove_new = pata_falcon_remove_one,
- .driver = {
+ .remove = pata_falcon_remove_one,
+ .driver = {
.name = "atari-falcon-ide",
},
};
diff --git a/drivers/ata/pata_ftide010.c b/drivers/ata/pata_ftide010.c
index 73a9a5109238..c3a8384c3e04 100644
--- a/drivers/ata/pata_ftide010.c
+++ b/drivers/ata/pata_ftide010.c
@@ -557,7 +557,7 @@ static struct platform_driver pata_ftide010_driver = {
.of_match_table = pata_ftide010_of_match,
},
.probe = pata_ftide010_probe,
- .remove_new = pata_ftide010_remove,
+ .remove = pata_ftide010_remove,
};
module_platform_driver(pata_ftide010_driver);
diff --git a/drivers/ata/pata_gayle.c b/drivers/ata/pata_gayle.c
index 94df60ac2307..8602c3889948 100644
--- a/drivers/ata/pata_gayle.c
+++ b/drivers/ata/pata_gayle.c
@@ -202,9 +202,9 @@ static void pata_gayle_remove_one(struct platform_device *pdev)
static struct platform_driver pata_gayle_driver = {
.probe = pata_gayle_init_one,
- .remove_new = pata_gayle_remove_one,
- .driver = {
- .name = "amiga-gayle-ide",
+ .remove = pata_gayle_remove_one,
+ .driver = {
+ .name = "amiga-gayle-ide",
},
};
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
index 5280e9960025..b96e8bd2a3f8 100644
--- a/drivers/ata/pata_hpt366.c
+++ b/drivers/ata/pata_hpt366.c
@@ -322,7 +322,7 @@ static const struct scsi_host_template hpt36x_sht = {
static struct ata_port_operations hpt366_port_ops = {
.inherits = &ata_bmdma_port_ops,
- .prereset = hpt366_prereset,
+ .reset.prereset = hpt366_prereset,
.cable_detect = hpt36x_cable_detect,
.mode_filter = hpt366_filter,
.set_piomode = hpt366_set_piomode,
diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
index 4af22b819416..07e3a984cbb1 100644
--- a/drivers/ata/pata_hpt37x.c
+++ b/drivers/ata/pata_hpt37x.c
@@ -543,7 +543,7 @@ static struct ata_port_operations hpt370_port_ops = {
.cable_detect = hpt37x_cable_detect,
.set_piomode = hpt37x_set_piomode,
.set_dmamode = hpt37x_set_dmamode,
- .prereset = hpt37x_pre_reset,
+ .reset.prereset = hpt37x_pre_reset,
};
/*
@@ -567,7 +567,7 @@ static struct ata_port_operations hpt302_port_ops = {
.cable_detect = hpt37x_cable_detect,
.set_piomode = hpt37x_set_piomode,
.set_dmamode = hpt37x_set_dmamode,
- .prereset = hpt37x_pre_reset,
+ .reset.prereset = hpt37x_pre_reset,
};
/*
diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
index 5b1ecccf3c83..2cc57fcf2c46 100644
--- a/drivers/ata/pata_hpt3x2n.c
+++ b/drivers/ata/pata_hpt3x2n.c
@@ -356,7 +356,7 @@ static struct ata_port_operations hpt3xxn_port_ops = {
.cable_detect = hpt3x2n_cable_detect,
.set_piomode = hpt3x2n_set_piomode,
.set_dmamode = hpt3x2n_set_dmamode,
- .prereset = hpt3x2n_pre_reset,
+ .reset.prereset = hpt3x2n_pre_reset,
};
/*
diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c
index 61d8760f09d9..70f056e47e6b 100644
--- a/drivers/ata/pata_icside.c
+++ b/drivers/ata/pata_icside.c
@@ -336,7 +336,7 @@ static struct ata_port_operations pata_icside_port_ops = {
.cable_detect = ata_cable_40wire,
.set_dmamode = pata_icside_set_dmamode,
- .postreset = pata_icside_postreset,
+ .reset.postreset = pata_icside_postreset,
.port_start = ATA_OP_NULL, /* don't need PRD table */
};
diff --git a/drivers/ata/pata_imx.c b/drivers/ata/pata_imx.c
index d0aa8fc929b4..b37682b0578f 100644
--- a/drivers/ata/pata_imx.c
+++ b/drivers/ata/pata_imx.c
@@ -249,7 +249,7 @@ MODULE_DEVICE_TABLE(of, imx_pata_dt_ids);
static struct platform_driver pata_imx_driver = {
.probe = pata_imx_probe,
- .remove_new = pata_imx_remove,
+ .remove = pata_imx_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = imx_pata_dt_ids,
diff --git a/drivers/ata/pata_it8213.c b/drivers/ata/pata_it8213.c
index b7ac56103c8a..a6f2cfc1602e 100644
--- a/drivers/ata/pata_it8213.c
+++ b/drivers/ata/pata_it8213.c
@@ -81,7 +81,7 @@ static void it8213_set_piomode (struct ata_port *ap, struct ata_device *adev)
int control = 0;
/*
- * See Intel Document 298600-004 for the timing programing rules
+ * See Intel Document 298600-004 for the timing programming rules
* for PIIX/ICH. The 8213 is a clone so very similar
*/
@@ -238,7 +238,7 @@ static struct ata_port_operations it8213_ops = {
.cable_detect = it8213_cable_detect,
.set_piomode = it8213_set_piomode,
.set_dmamode = it8213_set_dmamode,
- .prereset = it8213_pre_reset,
+ .reset.prereset = it8213_pre_reset,
};
diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
index 8a9ee828478f..80f6a91acf6f 100644
--- a/drivers/ata/pata_ixp4xx_cf.c
+++ b/drivers/ata/pata_ixp4xx_cf.c
@@ -298,7 +298,7 @@ static struct platform_driver ixp4xx_pata_platform_driver = {
.of_match_table = ixp4xx_pata_of_match,
},
.probe = ixp4xx_pata_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
};
module_platform_driver(ixp4xx_pata_platform_driver);
diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c
index f51fb8219762..b885f33e8980 100644
--- a/drivers/ata/pata_jmicron.c
+++ b/drivers/ata/pata_jmicron.c
@@ -113,7 +113,7 @@ static const struct scsi_host_template jmicron_sht = {
static struct ata_port_operations jmicron_ops = {
.inherits = &ata_bmdma_port_ops,
- .prereset = jmicron_pre_reset,
+ .reset.prereset = jmicron_pre_reset,
};
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
index f2f36e55a1f4..9eefdc5df5df 100644
--- a/drivers/ata/pata_macio.c
+++ b/drivers/ata/pata_macio.c
@@ -758,7 +758,7 @@ static void pata_macio_irq_clear(struct ata_port *ap)
static void pata_macio_reset_hw(struct pata_macio_priv *priv, int resume)
{
- dev_dbg(priv->dev, "Enabling & resetting... \n");
+ dev_dbg(priv->dev, "Enabling & resetting...\n");
if (priv->mediabay)
return;
@@ -812,8 +812,8 @@ static void pata_macio_reset_hw(struct pata_macio_priv *priv, int resume)
/* Hook the standard slave config to fixup some HW related alignment
* restrictions
*/
-static int pata_macio_device_configure(struct scsi_device *sdev,
- struct queue_limits *lim)
+static int pata_macio_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct pata_macio_priv *priv = ap->private_data;
@@ -822,7 +822,7 @@ static int pata_macio_device_configure(struct scsi_device *sdev,
int rc;
/* First call original */
- rc = ata_scsi_device_configure(sdev, lim);
+ rc = ata_scsi_sdev_configure(sdev, lim);
if (rc)
return rc;
@@ -932,10 +932,10 @@ static const struct scsi_host_template pata_macio_sht = {
/* We may not need that strict one */
.dma_boundary = ATA_DMA_BOUNDARY,
.max_segment_size = PATA_MACIO_MAX_SEGMENT_SIZE,
- .device_configure = pata_macio_device_configure,
+ .sdev_configure = pata_macio_sdev_configure,
.sdev_groups = ata_common_sdev_groups,
.can_queue = ATA_DEF_QUEUE,
- .tag_alloc_policy = BLK_TAG_ALLOC_RR,
+ .tag_alloc_policy_rr = true,
};
static struct ata_port_operations pata_macio_ops = {
@@ -1298,7 +1298,7 @@ static int pata_macio_pci_attach(struct pci_dev *pdev,
priv->dev = &pdev->dev;
/* Get MMIO regions */
- if (pci_request_regions(pdev, "pata-macio")) {
+ if (pcim_request_all_regions(pdev, "pata-macio")) {
dev_err(&pdev->dev,
"Cannot obtain PCI resources\n");
return -EBUSY;
diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
index 8119caaad605..deab67328388 100644
--- a/drivers/ata/pata_marvell.c
+++ b/drivers/ata/pata_marvell.c
@@ -99,7 +99,7 @@ static const struct scsi_host_template marvell_sht = {
static struct ata_port_operations marvell_ops = {
.inherits = &ata_bmdma_port_ops,
.cable_detect = marvell_cable_detect,
- .prereset = marvell_pre_reset,
+ .reset.prereset = marvell_pre_reset,
};
diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
index 3f9258677915..210a63283f62 100644
--- a/drivers/ata/pata_mpc52xx.c
+++ b/drivers/ata/pata_mpc52xx.c
@@ -854,7 +854,7 @@ static const struct of_device_id mpc52xx_ata_of_match[] = {
static struct platform_driver mpc52xx_ata_of_platform_driver = {
.probe = mpc52xx_ata_probe,
- .remove_new = mpc52xx_ata_remove,
+ .remove = mpc52xx_ata_remove,
#ifdef CONFIG_PM_SLEEP
.suspend = mpc52xx_ata_suspend,
.resume = mpc52xx_ata_resume,
diff --git a/drivers/ata/pata_mpiix.c b/drivers/ata/pata_mpiix.c
index 69e4baf27d72..ce310ae7c93a 100644
--- a/drivers/ata/pata_mpiix.c
+++ b/drivers/ata/pata_mpiix.c
@@ -145,7 +145,7 @@ static struct ata_port_operations mpiix_port_ops = {
.qc_issue = mpiix_qc_issue,
.cable_detect = ata_cable_40wire,
.set_piomode = mpiix_set_piomode,
- .prereset = mpiix_pre_reset,
+ .reset.prereset = mpiix_pre_reset,
.sff_data_xfer = ata_sff_data_xfer32,
};
diff --git a/drivers/ata/pata_ns87410.c b/drivers/ata/pata_ns87410.c
index 44cc24d21d5f..bdb55c1a3280 100644
--- a/drivers/ata/pata_ns87410.c
+++ b/drivers/ata/pata_ns87410.c
@@ -123,7 +123,7 @@ static struct ata_port_operations ns87410_port_ops = {
.qc_issue = ns87410_qc_issue,
.cable_detect = ata_cable_40wire,
.set_piomode = ns87410_set_piomode,
- .prereset = ns87410_pre_reset,
+ .reset.prereset = ns87410_pre_reset,
};
static int ns87410_init_one(struct pci_dev *dev, const struct pci_device_id *id)
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index 0bb9607e7348..df42ebe98db7 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -183,7 +183,7 @@ static void octeon_cf_set_piomode(struct ata_port *ap, struct ata_device *dev)
reg_tim.s.ale = 0;
/* Not used */
reg_tim.s.page = 0;
- /* Time after IORDY to coninue to assert the data */
+ /* Time after IORDY to continue to assert the data */
reg_tim.s.wait = 0;
/* Time to wait to complete the cycle. */
reg_tim.s.pause = pause;
@@ -935,14 +935,13 @@ static int octeon_cf_probe(struct platform_device *pdev)
ap->mwdma_mask = enable_dma ? ATA_MWDMA4 : 0;
/* True IDE mode needs a timer to poll for not-busy. */
- hrtimer_init(&cf_port->delayed_finish, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- cf_port->delayed_finish.function = octeon_cf_delayed_finish;
+ hrtimer_setup(&cf_port->delayed_finish, octeon_cf_delayed_finish, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
} else {
/* 16 bit but not True IDE */
base = cs0 + 0x800;
octeon_cf_ops.sff_data_xfer = octeon_cf_data_xfer16;
- octeon_cf_ops.softreset = octeon_cf_softreset16;
+ octeon_cf_ops.reset.softreset = octeon_cf_softreset16;
octeon_cf_ops.sff_check_status = octeon_cf_check_status16;
octeon_cf_ops.sff_tf_read = octeon_cf_tf_read16;
octeon_cf_ops.sff_tf_load = octeon_cf_tf_load16;
diff --git a/drivers/ata/pata_of_platform.c b/drivers/ata/pata_of_platform.c
index 4956f0f5b93f..178b28eff170 100644
--- a/drivers/ata/pata_of_platform.c
+++ b/drivers/ata/pata_of_platform.c
@@ -89,7 +89,7 @@ static struct platform_driver pata_of_platform_driver = {
.of_match_table = pata_of_platform_match,
},
.probe = pata_of_platform_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
};
module_platform_driver(pata_of_platform_driver);
diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c
index dca82d92b004..81a7f3eb5654 100644
--- a/drivers/ata/pata_oldpiix.c
+++ b/drivers/ata/pata_oldpiix.c
@@ -70,7 +70,7 @@ static void oldpiix_set_piomode (struct ata_port *ap, struct ata_device *adev)
int control = 0;
/*
- * See Intel Document 298600-004 for the timing programing rules
+ * See Intel Document 298600-004 for the timing programming rules
* for PIIX/ICH. Note that the early PIIX does not have the slave
* timing port at 0x44.
*/
@@ -214,7 +214,7 @@ static struct ata_port_operations oldpiix_pata_ops = {
.cable_detect = ata_cable_40wire,
.set_piomode = oldpiix_set_piomode,
.set_dmamode = oldpiix_set_dmamode,
- .prereset = oldpiix_pre_reset,
+ .reset.prereset = oldpiix_pre_reset,
};
diff --git a/drivers/ata/pata_opti.c b/drivers/ata/pata_opti.c
index 3d23f57eb128..3db1b95d1404 100644
--- a/drivers/ata/pata_opti.c
+++ b/drivers/ata/pata_opti.c
@@ -156,7 +156,7 @@ static struct ata_port_operations opti_port_ops = {
.inherits = &ata_sff_port_ops,
.cable_detect = ata_cable_40wire,
.set_piomode = opti_set_piomode,
- .prereset = opti_pre_reset,
+ .reset.prereset = opti_pre_reset,
};
static int opti_init_one(struct pci_dev *dev, const struct pci_device_id *id)
diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c
index dfc36b4ec9c6..b42dba5f4e05 100644
--- a/drivers/ata/pata_optidma.c
+++ b/drivers/ata/pata_optidma.c
@@ -322,7 +322,9 @@ static int optidma_set_mode(struct ata_link *link, struct ata_device **r_failed)
u8 r;
int nybble = 4 * ap->port_no;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
- int rc = ata_do_set_mode(link, r_failed);
+ int rc;
+
+ rc = ata_set_mode(link, r_failed);
if (rc == 0) {
pci_read_config_byte(pdev, 0x43, &r);
@@ -344,7 +346,7 @@ static struct ata_port_operations optidma_port_ops = {
.set_piomode = optidma_set_pio_mode,
.set_dmamode = optidma_set_dma_mode,
.set_mode = optidma_set_mode,
- .prereset = optidma_pre_reset,
+ .reset.prereset = optidma_pre_reset,
};
static struct ata_port_operations optiplus_port_ops = {
diff --git a/drivers/ata/pata_parport/pata_parport.c b/drivers/ata/pata_parport/pata_parport.c
index 93ebf566b54e..22bd3ff6b7ae 100644
--- a/drivers/ata/pata_parport/pata_parport.c
+++ b/drivers/ata/pata_parport/pata_parport.c
@@ -321,8 +321,8 @@ static void pata_parport_drain_fifo(struct ata_queued_cmd *qc)
static struct ata_port_operations pata_parport_port_ops = {
.inherits = &ata_sff_port_ops,
- .softreset = pata_parport_softreset,
- .hardreset = NULL,
+ .reset.softreset = pata_parport_softreset,
+ .reset.hardreset = NULL,
.sff_dev_select = pata_parport_dev_select,
.sff_set_devctl = pata_parport_set_devctl,
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
index 5b602206c522..cf3810933a27 100644
--- a/drivers/ata/pata_pcmcia.c
+++ b/drivers/ata/pata_pcmcia.c
@@ -46,7 +46,7 @@ static int pcmcia_set_mode(struct ata_link *link, struct ata_device **r_failed_d
struct ata_device *slave = &link->device[1];
if (!ata_dev_enabled(master) || !ata_dev_enabled(slave))
- return ata_do_set_mode(link, r_failed_dev);
+ return ata_set_mode(link, r_failed_dev);
if (memcmp(master->id + ATA_ID_FW_REV, slave->id + ATA_ID_FW_REV,
ATA_ID_FW_REV_LEN + ATA_ID_PROD_LEN) == 0) {
@@ -58,7 +58,7 @@ static int pcmcia_set_mode(struct ata_link *link, struct ata_device **r_failed_d
ata_dev_disable(slave);
}
}
- return ata_do_set_mode(link, r_failed_dev);
+ return ata_set_mode(link, r_failed_dev);
}
/**
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
index 6820c5597b14..ae914dcb0c83 100644
--- a/drivers/ata/pata_pdc2027x.c
+++ b/drivers/ata/pata_pdc2027x.c
@@ -130,7 +130,7 @@ static struct ata_port_operations pdc2027x_pata100_ops = {
.inherits = &ata_bmdma_port_ops,
.check_atapi_dma = pdc2027x_check_atapi_dma,
.cable_detect = pdc2027x_cable_detect,
- .prereset = pdc2027x_prereset,
+ .reset.prereset = pdc2027x_prereset,
};
static struct ata_port_operations pdc2027x_pata133_ops = {
@@ -295,7 +295,7 @@ static void pdc2027x_set_piomode(struct ata_port *ap, struct ata_device *adev)
}
/* Set the PIO timing registers using value table for 133MHz */
- ata_port_dbg(ap, "Set pio regs... \n");
+ ata_port_dbg(ap, "Set PIO regs...\n");
ctcr0 = ioread32(dev_mmio(ap, adev, PDC_CTCR0));
ctcr0 &= 0xffff0000;
@@ -308,7 +308,7 @@ static void pdc2027x_set_piomode(struct ata_port *ap, struct ata_device *adev)
ctcr1 |= (pdc2027x_pio_timing_tbl[pio].value2 << 24);
iowrite32(ctcr1, dev_mmio(ap, adev, PDC_CTCR1));
- ata_port_dbg(ap, "Set to pio mode[%u] \n", pio);
+ ata_port_dbg(ap, "Set to PIO mode[%u]\n", pio);
}
/**
@@ -341,7 +341,7 @@ static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev)
iowrite32(ctcr1 & ~(1 << 7), dev_mmio(ap, adev, PDC_CTCR1));
}
- ata_port_dbg(ap, "Set udma regs... \n");
+ ata_port_dbg(ap, "Set UDMA regs...\n");
ctcr1 = ioread32(dev_mmio(ap, adev, PDC_CTCR1));
ctcr1 &= 0xff000000;
@@ -350,14 +350,14 @@ static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev)
(pdc2027x_udma_timing_tbl[udma_mode].value2 << 16);
iowrite32(ctcr1, dev_mmio(ap, adev, PDC_CTCR1));
- ata_port_dbg(ap, "Set to udma mode[%u] \n", udma_mode);
+ ata_port_dbg(ap, "Set to UDMA mode[%u]\n", udma_mode);
} else if ((dma_mode >= XFER_MW_DMA_0) &&
(dma_mode <= XFER_MW_DMA_2)) {
/* Set the MDMA timing registers with value table for 133MHz */
unsigned int mdma_mode = dma_mode & 0x07;
- ata_port_dbg(ap, "Set mdma regs... \n");
+ ata_port_dbg(ap, "Set MDMA regs...\n");
ctcr0 = ioread32(dev_mmio(ap, adev, PDC_CTCR0));
ctcr0 &= 0x0000ffff;
@@ -366,7 +366,7 @@ static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev)
iowrite32(ctcr0, dev_mmio(ap, adev, PDC_CTCR0));
- ata_port_dbg(ap, "Set to mdma mode[%u] \n", mdma_mode);
+ ata_port_dbg(ap, "Set to MDMA mode[%u]\n", mdma_mode);
} else {
ata_port_err(ap, "Unknown dma mode [%u] ignored\n", dma_mode);
}
@@ -387,7 +387,7 @@ static int pdc2027x_set_mode(struct ata_link *link, struct ata_device **r_failed
struct ata_device *dev;
int rc;
- rc = ata_do_set_mode(link, r_failed);
+ rc = ata_set_mode(link, r_failed);
if (rc < 0)
return rc;
diff --git a/drivers/ata/pata_piccolo.c b/drivers/ata/pata_piccolo.c
index ced906bf56be..beb53bd990be 100644
--- a/drivers/ata/pata_piccolo.c
+++ b/drivers/ata/pata_piccolo.c
@@ -97,7 +97,7 @@ static int ata_tosh_init_one(struct pci_dev *dev, const struct pci_device_id *id
return ata_pci_bmdma_init_one(dev, ppi, &tosh_sht, NULL, 0);
}
-static struct pci_device_id ata_tosh[] = {
+static const struct pci_device_id ata_tosh[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_1), },
{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_2), },
{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_3), },
diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
index 232c3dad7ee8..87479bc893b2 100644
--- a/drivers/ata/pata_platform.c
+++ b/drivers/ata/pata_platform.c
@@ -223,7 +223,7 @@ static int pata_platform_probe(struct platform_device *pdev)
static struct platform_driver pata_platform_driver = {
.probe = pata_platform_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
.driver = {
.name = DRV_NAME,
},
diff --git a/drivers/ata/pata_pxa.c b/drivers/ata/pata_pxa.c
index 538bd3423d85..03dbaf4a13a7 100644
--- a/drivers/ata/pata_pxa.c
+++ b/drivers/ata/pata_pxa.c
@@ -223,10 +223,16 @@ static int pxa_ata_probe(struct platform_device *pdev)
ap->ioaddr.cmd_addr = devm_ioremap(&pdev->dev, cmd_res->start,
resource_size(cmd_res));
+ if (!ap->ioaddr.cmd_addr)
+ return -ENOMEM;
ap->ioaddr.ctl_addr = devm_ioremap(&pdev->dev, ctl_res->start,
resource_size(ctl_res));
+ if (!ap->ioaddr.ctl_addr)
+ return -ENOMEM;
ap->ioaddr.bmdma_addr = devm_ioremap(&pdev->dev, dma_res->start,
resource_size(dma_res));
+ if (!ap->ioaddr.bmdma_addr)
+ return -ENOMEM;
/*
* Adjust register offsets
@@ -306,7 +312,7 @@ static void pxa_ata_remove(struct platform_device *pdev)
static struct platform_driver pxa_ata_driver = {
.probe = pxa_ata_probe,
- .remove_new = pxa_ata_remove,
+ .remove = pxa_ata_remove,
.driver = {
.name = DRV_NAME,
},
diff --git a/drivers/ata/pata_radisys.c b/drivers/ata/pata_radisys.c
index 84b001097093..40ef8072c159 100644
--- a/drivers/ata/pata_radisys.c
+++ b/drivers/ata/pata_radisys.c
@@ -45,7 +45,7 @@ static void radisys_set_piomode (struct ata_port *ap, struct ata_device *adev)
int control = 0;
/*
- * See Intel Document 298600-004 for the timing programing rules
+ * See Intel Document 298600-004 for the timing programming rules
* for PIIX/ICH. Note that the early PIIX does not have the slave
* timing port at 0x44. The Radisys is a relative of the PIIX
* but not the same so be careful.
diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
index 0fa253ad7c93..fd81e75c9402 100644
--- a/drivers/ata/pata_rb532_cf.c
+++ b/drivers/ata/pata_rb532_cf.c
@@ -164,7 +164,7 @@ static void rb532_pata_driver_remove(struct platform_device *pdev)
static struct platform_driver rb532_pata_platform_driver = {
.probe = rb532_pata_driver_probe,
- .remove_new = rb532_pata_driver_remove,
+ .remove = rb532_pata_driver_remove,
.driver = {
.name = DRV_NAME,
},
diff --git a/drivers/ata/pata_rdc.c b/drivers/ata/pata_rdc.c
index 0a9689862f71..6ff4c11e937d 100644
--- a/drivers/ata/pata_rdc.c
+++ b/drivers/ata/pata_rdc.c
@@ -276,7 +276,7 @@ static struct ata_port_operations rdc_pata_ops = {
.cable_detect = rdc_pata_cable_detect,
.set_piomode = rdc_set_piomode,
.set_dmamode = rdc_set_dmamode,
- .prereset = rdc_pata_prereset,
+ .reset.prereset = rdc_pata_prereset,
};
static const struct ata_port_info rdc_port_info = {
@@ -340,7 +340,7 @@ static int rdc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return rc;
host->private_data = hpriv;
- pci_intx(pdev, 1);
+ pcim_intx(pdev, 1);
host->flags |= ATA_HOST_PARALLEL_SCAN;
@@ -359,8 +359,8 @@ static void rdc_remove_one(struct pci_dev *pdev)
}
static const struct pci_device_id rdc_pci_tbl[] = {
- { PCI_DEVICE(0x17F3, 0x1011), },
- { PCI_DEVICE(0x17F3, 0x1012), },
+ { PCI_VDEVICE(RDC, 0x1011) },
+ { PCI_VDEVICE(RDC, 0x1012) },
{ } /* terminate list */
};
diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
index 31de06b66221..2b751e393771 100644
--- a/drivers/ata/pata_sis.c
+++ b/drivers/ata/pata_sis.c
@@ -552,7 +552,7 @@ static struct ata_port_operations sis_133_for_sata_ops = {
static struct ata_port_operations sis_base_ops = {
.inherits = &ata_bmdma_port_ops,
- .prereset = sis_pre_reset,
+ .reset.prereset = sis_pre_reset,
};
static struct ata_port_operations sis_133_ops = {
diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c
index 93882e976ede..2d24c6b3e9d9 100644
--- a/drivers/ata/pata_sl82c105.c
+++ b/drivers/ata/pata_sl82c105.c
@@ -248,7 +248,7 @@ static struct ata_port_operations sl82c105_port_ops = {
.bmdma_stop = sl82c105_bmdma_stop,
.cable_detect = ata_cable_40wire,
.set_piomode = sl82c105_set_piomode,
- .prereset = sl82c105_pre_reset,
+ .reset.prereset = sl82c105_pre_reset,
.sff_irq_check = sl82c105_sff_irq_check,
};
diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c
index 26d448a869e2..596e86a031b3 100644
--- a/drivers/ata/pata_triflex.c
+++ b/drivers/ata/pata_triflex.c
@@ -170,7 +170,7 @@ static struct ata_port_operations triflex_port_ops = {
.bmdma_stop = triflex_bmdma_stop,
.cable_detect = ata_cable_40wire,
.set_piomode = triflex_set_piomode,
- .prereset = triflex_prereset,
+ .reset.prereset = triflex_prereset,
};
static int triflex_init_one(struct pci_dev *dev, const struct pci_device_id *id)
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
index 696b99720dcb..a8c9cf685b4b 100644
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -201,11 +201,9 @@ static int via_cable_detect(struct ata_port *ap) {
two drives */
if (ata66 & (0x10100000 >> (16 * ap->port_no)))
return ATA_CBL_PATA80;
+
/* Check with ACPI so we can spot BIOS reported SATA bridges */
- if (ata_acpi_init_gtm(ap) &&
- ata_acpi_cbl_80wire(ap, ata_acpi_init_gtm(ap)))
- return ATA_CBL_PATA80;
- return ATA_CBL_PATA40;
+ return ata_acpi_cbl_pata_type(ap);
}
static int via_pre_reset(struct ata_link *link, unsigned long deadline)
@@ -368,7 +366,8 @@ static unsigned int via_mode_filter(struct ata_device *dev, unsigned int mask)
}
if (dev->class == ATA_DEV_ATAPI &&
- dmi_check_system(no_atapi_dma_dmi_table)) {
+ (dmi_check_system(no_atapi_dma_dmi_table) ||
+ config->id == PCI_DEVICE_ID_VIA_6415)) {
ata_dev_warn(dev, "controller locks up on ATAPI DMA, forcing PIO\n");
mask &= ATA_MASK_PIO;
}
@@ -452,7 +451,7 @@ static struct ata_port_operations via_port_ops = {
.cable_detect = via_cable_detect,
.set_piomode = via_set_piomode,
.set_dmamode = via_set_dmamode,
- .prereset = via_pre_reset,
+ .reset.prereset = via_pre_reset,
.sff_tf_load = via_tf_load,
.port_start = via_port_start,
.mode_filter = via_mode_filter,
diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
index 8e6b2599f0d5..17a5a59861c3 100644
--- a/drivers/ata/pdc_adma.c
+++ b/drivers/ata/pdc_adma.c
@@ -140,7 +140,7 @@ static struct ata_port_operations adma_ata_ops = {
.freeze = adma_freeze,
.thaw = adma_thaw,
- .prereset = adma_prereset,
+ .reset.prereset = adma_prereset,
.port_start = adma_port_start,
.port_stop = adma_port_stop,
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
index 52f5168e4db5..7a4f59202156 100644
--- a/drivers/ata/sata_dwc_460ex.c
+++ b/drivers/ata/sata_dwc_460ex.c
@@ -1097,7 +1097,7 @@ static struct ata_port_operations sata_dwc_ops = {
.inherits = &ata_sff_port_ops,
.error_handler = sata_dwc_error_handler,
- .hardreset = sata_dwc_hardreset,
+ .reset.hardreset = sata_dwc_hardreset,
.qc_issue = sata_dwc_qc_issue,
@@ -1240,7 +1240,7 @@ static struct platform_driver sata_dwc_driver = {
.of_match_table = sata_dwc_match,
},
.probe = sata_dwc_probe,
- .remove_new = sata_dwc_remove,
+ .remove = sata_dwc_remove,
};
module_platform_driver(sata_dwc_driver);
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 01aa05f4c3f5..84da8d6ef28e 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -1395,9 +1395,9 @@ static struct ata_port_operations sata_fsl_ops = {
.freeze = sata_fsl_freeze,
.thaw = sata_fsl_thaw,
- .softreset = sata_fsl_softreset,
- .hardreset = sata_fsl_hardreset,
- .pmp_softreset = sata_fsl_softreset,
+ .reset.softreset = sata_fsl_softreset,
+ .reset.hardreset = sata_fsl_hardreset,
+ .pmp_reset.softreset = sata_fsl_softreset,
.error_handler = sata_fsl_error_handler,
.post_internal_cmd = sata_fsl_post_internal_cmd,
@@ -1589,7 +1589,7 @@ static struct platform_driver fsl_sata_driver = {
.of_match_table = fsl_sata_match,
},
.probe = sata_fsl_probe,
- .remove_new = sata_fsl_remove,
+ .remove = sata_fsl_remove,
#ifdef CONFIG_PM_SLEEP
.suspend = sata_fsl_suspend,
.resume = sata_fsl_resume,
diff --git a/drivers/ata/sata_gemini.c b/drivers/ata/sata_gemini.c
index f574e3c3f5b4..530ee26b3012 100644
--- a/drivers/ata/sata_gemini.c
+++ b/drivers/ata/sata_gemini.c
@@ -11,7 +11,6 @@
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include <linux/delay.h>
-#include <linux/reset.h>
#include <linux/of.h>
#include <linux/clk.h>
#include <linux/io.h>
@@ -27,8 +26,6 @@
* @muxmode: the current muxing mode
* @ide_pins: if the device is using the plain IDE interface pins
* @sata_bridge: if the device enables the SATA bridge
- * @sata0_reset: SATA0 reset handler
- * @sata1_reset: SATA1 reset handler
* @sata0_pclk: SATA0 PCLK handler
* @sata1_pclk: SATA1 PCLK handler
*/
@@ -38,8 +35,6 @@ struct sata_gemini {
enum gemini_muxmode muxmode;
bool ide_pins;
bool sata_bridge;
- struct reset_control *sata0_reset;
- struct reset_control *sata1_reset;
struct clk *sata0_pclk;
struct clk *sata1_pclk;
};
@@ -224,18 +219,6 @@ void gemini_sata_stop_bridge(struct sata_gemini *sg, unsigned int bridge)
}
EXPORT_SYMBOL(gemini_sata_stop_bridge);
-int gemini_sata_reset_bridge(struct sata_gemini *sg,
- unsigned int bridge)
-{
- if (bridge == 0)
- reset_control_reset(sg->sata0_reset);
- else
- reset_control_reset(sg->sata1_reset);
- msleep(10);
- return gemini_sata_setup_bridge(sg, bridge);
-}
-EXPORT_SYMBOL(gemini_sata_reset_bridge);
-
static int gemini_sata_bridge_init(struct sata_gemini *sg)
{
struct device *dev = sg->dev;
@@ -265,21 +248,6 @@ static int gemini_sata_bridge_init(struct sata_gemini *sg)
return ret;
}
- sg->sata0_reset = devm_reset_control_get_exclusive(dev, "sata0");
- if (IS_ERR(sg->sata0_reset)) {
- dev_err(dev, "no SATA0 reset controller\n");
- clk_disable_unprepare(sg->sata1_pclk);
- clk_disable_unprepare(sg->sata0_pclk);
- return PTR_ERR(sg->sata0_reset);
- }
- sg->sata1_reset = devm_reset_control_get_exclusive(dev, "sata1");
- if (IS_ERR(sg->sata1_reset)) {
- dev_err(dev, "no SATA1 reset controller\n");
- clk_disable_unprepare(sg->sata1_pclk);
- clk_disable_unprepare(sg->sata0_pclk);
- return PTR_ERR(sg->sata1_reset);
- }
-
sata_id = readl(sg->base + GEMINI_SATA_ID);
sata_phy_id = readl(sg->base + GEMINI_SATA_PHY_ID);
sg->sata_bridge = true;
@@ -425,7 +393,7 @@ static struct platform_driver gemini_sata_driver = {
.of_match_table = gemini_sata_of_match,
},
.probe = gemini_sata_probe,
- .remove_new = gemini_sata_remove,
+ .remove = gemini_sata_remove,
};
module_platform_driver(gemini_sata_driver);
diff --git a/drivers/ata/sata_gemini.h b/drivers/ata/sata_gemini.h
index 6f6e691d6007..b6e4a5c86e01 100644
--- a/drivers/ata/sata_gemini.h
+++ b/drivers/ata/sata_gemini.h
@@ -17,6 +17,5 @@ bool gemini_sata_bridge_enabled(struct sata_gemini *sg, bool is_ata1);
enum gemini_muxmode gemini_sata_get_muxmode(struct sata_gemini *sg);
int gemini_sata_start_bridge(struct sata_gemini *sg, unsigned int bridge);
void gemini_sata_stop_bridge(struct sata_gemini *sg, unsigned int bridge);
-int gemini_sata_reset_bridge(struct sata_gemini *sg, unsigned int bridge);
#endif
diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c
index 63ef7bb073ce..3421039f4bae 100644
--- a/drivers/ata/sata_highbank.c
+++ b/drivers/ata/sata_highbank.c
@@ -348,6 +348,7 @@ static int highbank_initialize_phys(struct device *dev, void __iomem *addr)
phy_nodes[phy] = phy_data.np;
cphy_base[phy] = of_iomap(phy_nodes[phy], 0);
if (cphy_base[phy] == NULL) {
+ of_node_put(phy_data.np);
return 0;
}
phy_count += 1;
@@ -427,7 +428,7 @@ static int ahci_highbank_hardreset(struct ata_link *link, unsigned int *class,
static struct ata_port_operations ahci_highbank_ops = {
.inherits = &ahci_ops,
- .hardreset = ahci_highbank_hardreset,
+ .reset.hardreset = ahci_highbank_hardreset,
.transmit_led_message = ecx_transmit_led_message,
};
@@ -614,12 +615,12 @@ static SIMPLE_DEV_PM_OPS(ahci_highbank_pm_ops,
ahci_highbank_suspend, ahci_highbank_resume);
static struct platform_driver ahci_highbank_driver = {
- .remove_new = ata_platform_remove_one,
- .driver = {
- .name = "highbank-ahci",
- .of_match_table = ahci_of_match,
- .pm = &ahci_highbank_pm_ops,
- },
+ .remove = ata_platform_remove_one,
+ .driver = {
+ .name = "highbank-ahci",
+ .of_match_table = ahci_of_match,
+ .pm = &ahci_highbank_pm_ops,
+ },
.probe = ahci_highbank_probe,
};
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index db9c255dc9f2..46a8c20daf18 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -730,7 +730,7 @@ static struct ata_port_operations inic_port_ops = {
.freeze = inic_freeze,
.thaw = inic_thaw,
- .hardreset = inic_hardreset,
+ .reset.hardreset = inic_hardreset,
.error_handler = inic_error_handler,
.post_internal_cmd = inic_post_internal_cmd,
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 05c905827dc5..ffb396f61731 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -672,8 +672,8 @@ static const struct scsi_host_template mv6_sht = {
.dma_boundary = MV_DMA_BOUNDARY,
.sdev_groups = ata_ncq_sdev_groups,
.change_queue_depth = ata_scsi_change_queue_depth,
- .tag_alloc_policy = BLK_TAG_ALLOC_RR,
- .device_configure = ata_scsi_device_configure
+ .tag_alloc_policy_rr = true,
+ .sdev_configure = ata_scsi_sdev_configure
};
static struct ata_port_operations mv5_ops = {
@@ -687,7 +687,7 @@ static struct ata_port_operations mv5_ops = {
.freeze = mv_eh_freeze,
.thaw = mv_eh_thaw,
- .hardreset = mv_hardreset,
+ .reset.hardreset = mv_hardreset,
.scr_read = mv5_scr_read,
.scr_write = mv5_scr_write,
@@ -709,10 +709,10 @@ static struct ata_port_operations mv6_ops = {
.freeze = mv_eh_freeze,
.thaw = mv_eh_thaw,
- .hardreset = mv_hardreset,
- .softreset = mv_softreset,
- .pmp_hardreset = mv_pmp_hardreset,
- .pmp_softreset = mv_softreset,
+ .reset.hardreset = mv_hardreset,
+ .reset.softreset = mv_softreset,
+ .pmp_reset.hardreset = mv_pmp_hardreset,
+ .pmp_reset.softreset = mv_softreset,
.error_handler = mv_pmp_error_handler,
.scr_read = mv_scr_read,
@@ -4255,7 +4255,7 @@ MODULE_DEVICE_TABLE(of, mv_sata_dt_ids);
static struct platform_driver mv_platform_driver = {
.probe = mv_platform_probe,
- .remove_new = mv_platform_remove,
+ .remove = mv_platform_remove,
.suspend = mv_platform_suspend,
.resume = mv_platform_resume,
.driver = {
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 36d99043ef50..841e7de2bba6 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -296,8 +296,8 @@ static void nv_nf2_freeze(struct ata_port *ap);
static void nv_nf2_thaw(struct ata_port *ap);
static void nv_ck804_freeze(struct ata_port *ap);
static void nv_ck804_thaw(struct ata_port *ap);
-static int nv_adma_device_configure(struct scsi_device *sdev,
- struct queue_limits *lim);
+static int nv_adma_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim);
static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc);
static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
@@ -319,8 +319,8 @@ static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
static void nv_mcp55_thaw(struct ata_port *ap);
static void nv_mcp55_freeze(struct ata_port *ap);
static void nv_swncq_error_handler(struct ata_port *ap);
-static int nv_swncq_device_configure(struct scsi_device *sdev,
- struct queue_limits *lim);
+static int nv_swncq_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim);
static int nv_swncq_port_start(struct ata_port *ap);
static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc);
static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
@@ -382,10 +382,10 @@ static const struct scsi_host_template nv_adma_sht = {
.can_queue = NV_ADMA_MAX_CPBS,
.sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
.dma_boundary = NV_ADMA_DMA_BOUNDARY,
- .device_configure = nv_adma_device_configure,
+ .sdev_configure = nv_adma_sdev_configure,
.sdev_groups = ata_ncq_sdev_groups,
.change_queue_depth = ata_scsi_change_queue_depth,
- .tag_alloc_policy = BLK_TAG_ALLOC_RR,
+ .tag_alloc_policy_rr = true,
};
static const struct scsi_host_template nv_swncq_sht = {
@@ -393,10 +393,10 @@ static const struct scsi_host_template nv_swncq_sht = {
.can_queue = ATA_MAX_QUEUE - 1,
.sg_tablesize = LIBATA_MAX_PRD,
.dma_boundary = ATA_DMA_BOUNDARY,
- .device_configure = nv_swncq_device_configure,
+ .sdev_configure = nv_swncq_sdev_configure,
.sdev_groups = ata_ncq_sdev_groups,
.change_queue_depth = ata_scsi_change_queue_depth,
- .tag_alloc_policy = BLK_TAG_ALLOC_RR,
+ .tag_alloc_policy_rr = true,
};
/*
@@ -462,7 +462,7 @@ static struct ata_port_operations nv_generic_ops = {
.lost_interrupt = ATA_OP_NULL,
.scr_read = nv_scr_read,
.scr_write = nv_scr_write,
- .hardreset = nv_hardreset,
+ .reset.hardreset = nv_hardreset,
};
static struct ata_port_operations nv_nf2_ops = {
@@ -663,8 +663,8 @@ static void nv_adma_mode(struct ata_port *ap)
pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
}
-static int nv_adma_device_configure(struct scsi_device *sdev,
- struct queue_limits *lim)
+static int nv_adma_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct nv_adma_port_priv *pp = ap->private_data;
@@ -676,7 +676,7 @@ static int nv_adma_device_configure(struct scsi_device *sdev,
int adma_enable;
u32 current_reg, new_reg, config_mask;
- rc = ata_scsi_device_configure(sdev, lim);
+ rc = ata_scsi_sdev_configure(sdev, lim);
if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
/* Not a proper libata device, ignore */
@@ -1871,8 +1871,8 @@ static void nv_swncq_host_init(struct ata_host *host)
writel(~0x0, mmio + NV_INT_STATUS_MCP55);
}
-static int nv_swncq_device_configure(struct scsi_device *sdev,
- struct queue_limits *lim)
+static int nv_swncq_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
@@ -1882,7 +1882,7 @@ static int nv_swncq_device_configure(struct scsi_device *sdev,
u8 check_maxtor = 0;
unsigned char model_num[ATA_ID_PROD_LEN + 1];
- rc = ata_scsi_device_configure(sdev, lim);
+ rc = ata_scsi_sdev_configure(sdev, lim);
if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
/* Not a proper libata device, ignore */
return rc;
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index 2df1a070b25a..2a005aede123 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -188,7 +188,7 @@ static struct ata_port_operations pdc_sata_ops = {
.scr_read = pdc_sata_scr_read,
.scr_write = pdc_sata_scr_write,
.port_start = pdc_sata_port_start,
- .hardreset = pdc_sata_hardreset,
+ .reset.hardreset = pdc_sata_hardreset,
};
/* First-generation chips need a more restrictive ->check_atapi_dma op,
@@ -206,7 +206,7 @@ static struct ata_port_operations pdc_pata_ops = {
.freeze = pdc_freeze,
.thaw = pdc_thaw,
.port_start = pdc_common_port_start,
- .softreset = pdc_pata_softreset,
+ .reset.softreset = pdc_pata_softreset,
};
static const struct ata_port_info pdc_port_info[] = {
diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
index 8a6286159044..cfb9b5b61cd7 100644
--- a/drivers/ata/sata_qstor.c
+++ b/drivers/ata/sata_qstor.c
@@ -123,8 +123,8 @@ static struct ata_port_operations qs_ata_ops = {
.freeze = qs_freeze,
.thaw = qs_thaw,
- .prereset = qs_prereset,
- .softreset = ATA_OP_NULL,
+ .reset.prereset = qs_prereset,
+ .reset.softreset = ATA_OP_NULL,
.error_handler = qs_error_handler,
.lost_interrupt = ATA_OP_NULL,
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index c1469d076880..487eadd4073f 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -624,7 +624,7 @@ static struct ata_port_operations sata_rcar_port_ops = {
.freeze = sata_rcar_freeze,
.thaw = sata_rcar_thaw,
- .softreset = sata_rcar_softreset,
+ .reset.softreset = sata_rcar_softreset,
.scr_read = sata_rcar_scr_read,
.scr_write = sata_rcar_scr_write,
@@ -1009,7 +1009,7 @@ static const struct dev_pm_ops sata_rcar_pm_ops = {
static struct platform_driver sata_rcar_driver = {
.probe = sata_rcar_probe,
- .remove_new = sata_rcar_remove,
+ .remove = sata_rcar_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = sata_rcar_match,
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index 3a99f66198a9..1b6dc950a42a 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -351,7 +351,7 @@ static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed)
u32 tmp, dev_mode[2] = { };
int rc;
- rc = ata_do_set_mode(link, r_failed);
+ rc = ata_set_mode(link, r_failed);
if (rc)
return rc;
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index 72c03cbdaff4..d642ece9f07a 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -378,10 +378,9 @@ static const struct scsi_host_template sil24_sht = {
.can_queue = SIL24_MAX_CMDS,
.sg_tablesize = SIL24_MAX_SGE,
.dma_boundary = ATA_DMA_BOUNDARY,
- .tag_alloc_policy = BLK_TAG_ALLOC_FIFO,
.sdev_groups = ata_ncq_sdev_groups,
.change_queue_depth = ata_scsi_change_queue_depth,
- .device_configure = ata_scsi_device_configure
+ .sdev_configure = ata_scsi_sdev_configure
};
static struct ata_port_operations sil24_ops = {
@@ -394,10 +393,10 @@ static struct ata_port_operations sil24_ops = {
.freeze = sil24_freeze,
.thaw = sil24_thaw,
- .softreset = sil24_softreset,
- .hardreset = sil24_hardreset,
- .pmp_softreset = sil24_softreset,
- .pmp_hardreset = sil24_pmp_hardreset,
+ .reset.softreset = sil24_softreset,
+ .reset.hardreset = sil24_hardreset,
+ .pmp_reset.softreset = sil24_softreset,
+ .pmp_reset.hardreset = sil24_pmp_hardreset,
.error_handler = sil24_error_handler,
.post_internal_cmd = sil24_post_internal_cmd,
.dev_config = sil24_dev_config,
@@ -1317,7 +1316,7 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (sata_sil24_msi && !pci_enable_msi(pdev)) {
dev_info(&pdev->dev, "Using MSI\n");
- pci_intx(pdev, 0);
+ pcim_intx(pdev, 0);
}
pci_set_master(pdev);
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
index ef8724986de3..b8b6d9eff3b8 100644
--- a/drivers/ata/sata_sis.c
+++ b/drivers/ata/sata_sis.c
@@ -290,7 +290,7 @@ static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
pci_set_master(pdev);
- pci_intx(pdev, 1);
+ pcim_intx(pdev, 1);
return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
IRQF_SHARED, &sis_sht);
}
diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
index 598a872f6a08..c5d6aa36c9c3 100644
--- a/drivers/ata/sata_svw.c
+++ b/drivers/ata/sata_svw.c
@@ -340,8 +340,8 @@ static const struct scsi_host_template k2_sata_sht = {
static struct ata_port_operations k2_sata_ops = {
.inherits = &ata_bmdma_port_ops,
- .softreset = k2_sata_softreset,
- .hardreset = k2_sata_hardreset,
+ .reset.softreset = k2_sata_softreset,
+ .reset.hardreset = k2_sata_hardreset,
.sff_tf_load = k2_sata_tf_load,
.sff_tf_read = k2_sata_tf_read,
.sff_check_status = k2_stat_check_status,
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
index a482741eb181..0986ebd1eb4e 100644
--- a/drivers/ata/sata_sx4.c
+++ b/drivers/ata/sata_sx4.c
@@ -241,7 +241,7 @@ static struct ata_port_operations pdc_20621_ops = {
.freeze = pdc_freeze,
.thaw = pdc_thaw,
- .softreset = pdc_softreset,
+ .reset.softreset = pdc_softreset,
.error_handler = pdc_error_handler,
.lost_interrupt = ATA_OP_NULL,
.post_internal_cmd = pdc_post_internal_cmd,
@@ -1117,9 +1117,14 @@ static int pdc20621_prog_dimm0(struct ata_host *host)
mmio += PDC_CHIP0_OFS;
for (i = 0; i < ARRAY_SIZE(pdc_i2c_read_data); i++)
- pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
- pdc_i2c_read_data[i].reg,
- &spd0[pdc_i2c_read_data[i].ofs]);
+ if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
+ pdc_i2c_read_data[i].reg,
+ &spd0[pdc_i2c_read_data[i].ofs])) {
+ dev_err(host->dev,
+ "Failed in i2c read at index %d: device=%#x, reg=%#x\n",
+ i, PDC_DIMM0_SPD_DEV_ADDRESS, pdc_i2c_read_data[i].reg);
+ return -EIO;
+ }
data |= (spd0[4] - 8) | ((spd0[21] != 0) << 3) | ((spd0[3]-11) << 4);
data |= ((spd0[17] / 4) << 6) | ((spd0[5] / 2) << 7) |
@@ -1284,6 +1289,8 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host)
/* Programming DIMM0 Module Control Register (index_CID0:80h) */
size = pdc20621_prog_dimm0(host);
+ if (size < 0)
+ return size;
dev_dbg(host->dev, "Local DIMM Size = %dMB\n", size);
/* Programming DIMM Module Global Control Register (index_CID0:88h) */
@@ -1294,32 +1301,32 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host)
}
if (dimm_test) {
- u8 test_parttern1[40] =
+ u8 test_pattern1[40] =
{0x55,0xAA,'P','r','o','m','i','s','e',' ',
'N','o','t',' ','Y','e','t',' ',
'D','e','f','i','n','e','d',' ',
'1','.','1','0',
'9','8','0','3','1','6','1','2',0,0};
- u8 test_parttern2[40] = {0};
+ u8 test_pattern2[40] = {0};
- pdc20621_put_to_dimm(host, test_parttern2, 0x10040, 40);
- pdc20621_put_to_dimm(host, test_parttern2, 0x40, 40);
+ pdc20621_put_to_dimm(host, test_pattern2, 0x10040, 40);
+ pdc20621_put_to_dimm(host, test_pattern2, 0x40, 40);
- pdc20621_put_to_dimm(host, test_parttern1, 0x10040, 40);
- pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
- dev_info(host->dev, "DIMM test pattern 1: %x, %x, %s\n", test_parttern2[0],
- test_parttern2[1], &(test_parttern2[2]));
- pdc20621_get_from_dimm(host, test_parttern2, 0x10040,
+ pdc20621_put_to_dimm(host, test_pattern1, 0x10040, 40);
+ pdc20621_get_from_dimm(host, test_pattern2, 0x40, 40);
+ dev_info(host->dev, "DIMM test pattern 1: %x, %x, %s\n", test_pattern2[0],
+ test_pattern2[1], &(test_pattern2[2]));
+ pdc20621_get_from_dimm(host, test_pattern2, 0x10040,
40);
dev_info(host->dev, "DIMM test pattern 2: %x, %x, %s\n",
- test_parttern2[0],
- test_parttern2[1], &(test_parttern2[2]));
+ test_pattern2[0],
+ test_pattern2[1], &(test_pattern2[2]));
- pdc20621_put_to_dimm(host, test_parttern1, 0x40, 40);
- pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
+ pdc20621_put_to_dimm(host, test_pattern1, 0x40, 40);
+ pdc20621_get_from_dimm(host, test_pattern2, 0x40, 40);
dev_info(host->dev, "DIMM test pattern 3: %x, %x, %s\n",
- test_parttern2[0],
- test_parttern2[1], &(test_parttern2[2]));
+ test_pattern2[0],
+ test_pattern2[1], &(test_pattern2[2]));
}
/* ECC initiliazation. */
diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c
index 60ea45926cd1..44985796cc47 100644
--- a/drivers/ata/sata_uli.c
+++ b/drivers/ata/sata_uli.c
@@ -67,7 +67,7 @@ static struct ata_port_operations uli_ops = {
.inherits = &ata_bmdma_port_ops,
.scr_read = uli_scr_read,
.scr_write = uli_scr_write,
- .hardreset = ATA_OP_NULL,
+ .reset.hardreset = ATA_OP_NULL,
};
static const struct ata_port_info uli_port_info = {
@@ -221,7 +221,7 @@ static int uli_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
pci_set_master(pdev);
- pci_intx(pdev, 1);
+ pcim_intx(pdev, 1);
return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
IRQF_SHARED, &uli_sht);
}
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index 57cbf2cef618..68e9003ec2d4 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -25,6 +25,7 @@
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
+#include <linux/string_choices.h>
#define DRV_NAME "sata_via"
#define DRV_VERSION "2.6"
@@ -119,7 +120,7 @@ static struct ata_port_operations svia_base_ops = {
static struct ata_port_operations vt6420_sata_ops = {
.inherits = &svia_base_ops,
.freeze = svia_noop_freeze,
- .prereset = vt6420_prereset,
+ .reset.prereset = vt6420_prereset,
.bmdma_start = vt6420_bmdma_start,
};
@@ -139,7 +140,7 @@ static struct ata_port_operations vt6421_sata_ops = {
static struct ata_port_operations vt8251_ops = {
.inherits = &svia_base_ops,
- .hardreset = sata_std_hardreset,
+ .reset.hardreset = sata_std_hardreset,
.scr_read = vt8251_scr_read,
.scr_write = vt8251_scr_write,
};
@@ -359,7 +360,7 @@ static int vt6420_prereset(struct ata_link *link, unsigned long deadline)
ata_port_info(ap,
"SATA link %s 1.5 Gbps (SStatus %X SControl %X)\n",
- online ? "up" : "down", sstatus, scontrol);
+ str_up_down(online), sstatus, scontrol);
/* SStatus is read one more time */
svia_scr_read(link, SCR_STATUS, &sstatus);
diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
index d39b87537168..a53a2dfc1e17 100644
--- a/drivers/ata/sata_vsc.c
+++ b/drivers/ata/sata_vsc.c
@@ -384,7 +384,7 @@ static int vsc_sata_init_one(struct pci_dev *pdev,
pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x80);
if (pci_enable_msi(pdev) == 0)
- pci_intx(pdev, 0);
+ pcim_intx(pdev, 0);
/*
* Config offset 0x98 is "Extended Control and Status Register 0"
diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
index d4aa0f353b6c..fa3c76a2b49d 100644
--- a/drivers/atm/atmtcp.c
+++ b/drivers/atm/atmtcp.c
@@ -279,6 +279,19 @@ static struct atm_vcc *find_vcc(struct atm_dev *dev, short vpi, int vci)
return NULL;
}
+static int atmtcp_c_pre_send(struct atm_vcc *vcc, struct sk_buff *skb)
+{
+ struct atmtcp_hdr *hdr;
+
+ if (skb->len < sizeof(struct atmtcp_hdr))
+ return -EINVAL;
+
+ hdr = (struct atmtcp_hdr *)skb->data;
+ if (hdr->length == ATMTCP_HDR_MAGIC)
+ return -EINVAL;
+
+ return 0;
+}
static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
{
@@ -288,7 +301,6 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
struct sk_buff *new_skb;
int result = 0;
- if (!skb->len) return 0;
dev = vcc->dev_data;
hdr = (struct atmtcp_hdr *) skb->data;
if (hdr->length == ATMTCP_HDR_MAGIC) {
@@ -345,6 +357,7 @@ static const struct atmdev_ops atmtcp_v_dev_ops = {
static const struct atmdev_ops atmtcp_c_dev_ops = {
.close = atmtcp_c_close,
+ .pre_send = atmtcp_c_pre_send,
.send = atmtcp_c_send
};
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index cb00f8244e41..4fea1149e003 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -2569,7 +2569,7 @@ static struct platform_driver fore200e_sba_driver = {
.of_match_table = fore200e_sba_match,
},
.probe = fore200e_sba_probe,
- .remove_new = fore200e_sba_remove,
+ .remove = fore200e_sba_remove,
};
#endif
diff --git a/drivers/atm/idt77105.c b/drivers/atm/idt77105.c
index fcd70e094a2e..e6a300203e6c 100644
--- a/drivers/atm/idt77105.c
+++ b/drivers/atm/idt77105.c
@@ -366,8 +366,8 @@ EXPORT_SYMBOL(idt77105_init);
static void __exit idt77105_exit(void)
{
/* turn off timers */
- del_timer_sync(&stats_timer);
- del_timer_sync(&restart_timer);
+ timer_delete_sync(&stats_timer);
+ timer_delete_sync(&restart_timer);
}
module_exit(idt77105_exit);
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index a876024d8a05..f2e91b7d79f0 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -852,6 +852,8 @@ queue_skb(struct idt77252_dev *card, struct vc_map *vc,
IDT77252_PRV_PADDR(skb) = dma_map_single(&card->pcidev->dev, skb->data,
skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&card->pcidev->dev, IDT77252_PRV_PADDR(skb)))
+ return -ENOMEM;
error = -EINVAL;
@@ -1531,7 +1533,7 @@ idt77252_tx(struct idt77252_dev *card)
static void
tst_timer(struct timer_list *t)
{
- struct idt77252_dev *card = from_timer(card, t, tst_timer);
+ struct idt77252_dev *card = timer_container_of(card, t, tst_timer);
unsigned long base, idle, jump;
unsigned long flags;
u32 pc;
@@ -1857,6 +1859,8 @@ add_rx_skb(struct idt77252_dev *card, int queue,
paddr = dma_map_single(&card->pcidev->dev, skb->data,
skb_end_pointer(skb) - skb->data,
DMA_FROM_DEVICE);
+ if (dma_mapping_error(&card->pcidev->dev, paddr))
+ goto outpoolrm;
IDT77252_PRV_PADDR(skb) = paddr;
if (push_rx_skb(card, skb, queue)) {
@@ -1871,6 +1875,7 @@ outunmap:
dma_unmap_single(&card->pcidev->dev, IDT77252_PRV_PADDR(skb),
skb_end_pointer(skb) - skb->data, DMA_FROM_DEVICE);
+outpoolrm:
handle = IDT77252_PRV_POOL(skb);
card->sbpool[POOL_QUEUE(handle)].skb[POOL_INDEX(handle)] = NULL;
@@ -2070,7 +2075,7 @@ idt77252_rate_logindex(struct idt77252_dev *card, int pcr)
static void
idt77252_est_timer(struct timer_list *t)
{
- struct rate_estimator *est = from_timer(est, t, timer);
+ struct rate_estimator *est = timer_container_of(est, t, timer);
struct vc_map *vc = est->vc;
struct idt77252_dev *card = vc->card;
unsigned long flags;
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index d213adcefe33..301e697e22ad 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -3283,7 +3283,7 @@ static void __exit ia_module_exit(void)
{
pci_unregister_driver(&ia_driver);
- del_timer_sync(&ia_timer);
+ timer_delete_sync(&ia_timer);
}
module_init(ia_module_init);
diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
index 32d7aa141d96..0dfa2cdc897c 100644
--- a/drivers/atm/lanai.c
+++ b/drivers/atm/lanai.c
@@ -755,7 +755,7 @@ static void lanai_shutdown_rx_vci(const struct lanai_vcc *lvcc)
/* Shutdown transmitting on card.
* Unfortunately the lanai needs us to wait until all the data
* drains out of the buffer before we can dealloc it, so this
- * can take awhile -- up to 370ms for a full 128KB buffer
+ * can take a while -- up to 370ms for a full 128KB buffer
* assuming everone else is quiet. In theory the time is
* boundless if there's a CBR VCC holding things up.
*/
@@ -1758,7 +1758,7 @@ static void iter_dequeue(struct lanai_dev *lanai, vci_t vci)
static void lanai_timed_poll(struct timer_list *t)
{
- struct lanai_dev *lanai = from_timer(lanai, t, timer);
+ struct lanai_dev *lanai = timer_container_of(lanai, t, timer);
#ifndef DEBUG_RW
unsigned long flags;
#ifdef USE_POWERDOWN
@@ -1792,7 +1792,7 @@ static inline void lanai_timed_poll_start(struct lanai_dev *lanai)
static inline void lanai_timed_poll_stop(struct lanai_dev *lanai)
{
- del_timer_sync(&lanai->timer);
+ timer_delete_sync(&lanai->timer);
}
/* -------------------- INTERRUPT SERVICE: */
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index 27153d6bc781..45952cfea06b 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -300,7 +300,7 @@ static void __exit nicstar_cleanup(void)
{
XPRINTK("nicstar: nicstar_cleanup() called.\n");
- del_timer_sync(&ns_timer);
+ timer_delete_sync(&ns_timer);
pci_unregister_driver(&nicstar_driver);
diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
index 32802ea9521c..7d0fa729c2fe 100644
--- a/drivers/atm/suni.c
+++ b/drivers/atm/suni.c
@@ -347,7 +347,7 @@ static int suni_stop(struct atm_dev *dev)
for (walk = &sunis; *walk != PRIV(dev);
walk = &PRIV((*walk)->dev)->next);
*walk = PRIV((*walk)->dev)->next;
- if (!sunis) del_timer_sync(&poll_timer);
+ if (!sunis) timer_delete_sync(&poll_timer);
spin_unlock_irqrestore(&sunis_lock,flags);
kfree(PRIV(dev));
diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig
index 21545ffba065..bedc6133f970 100644
--- a/drivers/auxdisplay/Kconfig
+++ b/drivers/auxdisplay/Kconfig
@@ -489,7 +489,7 @@ config IMG_ASCII_LCD
config HT16K33
tristate "Holtek Ht16K33 LED controller with keyscan"
- depends on FB && I2C && INPUT
+ depends on FB && I2C && INPUT && BACKLIGHT_CLASS_DEVICE
select FB_SYSMEM_HELPERS
select INPUT_MATRIXKMAP
select FB_BACKLIGHT
@@ -503,6 +503,7 @@ config HT16K33
config MAX6959
tristate "Maxim MAX6958/6959 7-segment LED controller"
depends on I2C
+ select BITREVERSE
select REGMAP_I2C
select LINEDISP
help
diff --git a/drivers/auxdisplay/cfag12864b.c b/drivers/auxdisplay/cfag12864b.c
index 6526aa51fb1d..e1a94ae3eb0c 100644
--- a/drivers/auxdisplay/cfag12864b.c
+++ b/drivers/auxdisplay/cfag12864b.c
@@ -37,11 +37,6 @@ module_param(cfag12864b_rate, uint, 0444);
MODULE_PARM_DESC(cfag12864b_rate,
"Refresh rate (hertz)");
-unsigned int cfag12864b_getrate(void)
-{
- return cfag12864b_rate;
-}
-
/*
* cfag12864b Commands
*
@@ -249,11 +244,6 @@ void cfag12864b_disable(void)
mutex_unlock(&cfag12864b_mutex);
}
-unsigned char cfag12864b_isenabled(void)
-{
- return cfag12864b_updating;
-}
-
static void cfag12864b_update(struct work_struct *work)
{
unsigned char c;
@@ -293,10 +283,8 @@ static void cfag12864b_update(struct work_struct *work)
*/
EXPORT_SYMBOL_GPL(cfag12864b_buffer);
-EXPORT_SYMBOL_GPL(cfag12864b_getrate);
EXPORT_SYMBOL_GPL(cfag12864b_enable);
EXPORT_SYMBOL_GPL(cfag12864b_disable);
-EXPORT_SYMBOL_GPL(cfag12864b_isenabled);
/*
* Is the module inited?
diff --git a/drivers/auxdisplay/cfag12864bfb.c b/drivers/auxdisplay/cfag12864bfb.c
index 2b74dabe7e17..24baf6b2c587 100644
--- a/drivers/auxdisplay/cfag12864bfb.c
+++ b/drivers/auxdisplay/cfag12864bfb.c
@@ -108,7 +108,7 @@ static void cfag12864bfb_remove(struct platform_device *device)
static struct platform_driver cfag12864bfb_driver = {
.probe = cfag12864bfb_probe,
- .remove_new = cfag12864bfb_remove,
+ .remove = cfag12864bfb_remove,
.driver = {
.name = CFAG12864BFB_NAME,
},
diff --git a/drivers/auxdisplay/charlcd.c b/drivers/auxdisplay/charlcd.c
index 19b619376d48..09020bb8ad15 100644
--- a/drivers/auxdisplay/charlcd.c
+++ b/drivers/auxdisplay/charlcd.c
@@ -595,18 +595,19 @@ static int charlcd_init(struct charlcd *lcd)
return 0;
}
-struct charlcd *charlcd_alloc(void)
+struct charlcd *charlcd_alloc(unsigned int drvdata_size)
{
struct charlcd_priv *priv;
struct charlcd *lcd;
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ priv = kzalloc(sizeof(*priv) + drvdata_size, GFP_KERNEL);
if (!priv)
return NULL;
priv->esc_seq.len = -1;
lcd = &priv->lcd;
+ lcd->drvdata = priv->drvdata;
return lcd;
}
diff --git a/drivers/auxdisplay/charlcd.h b/drivers/auxdisplay/charlcd.h
index 4d4287209d04..d10b89740bca 100644
--- a/drivers/auxdisplay/charlcd.h
+++ b/drivers/auxdisplay/charlcd.h
@@ -51,7 +51,7 @@ struct charlcd {
unsigned long y;
} addr;
- void *drvdata;
+ void *drvdata; /* Set by charlcd_alloc() */
};
/**
@@ -95,7 +95,8 @@ struct charlcd_ops {
};
void charlcd_backlight(struct charlcd *lcd, enum charlcd_onoff on);
-struct charlcd *charlcd_alloc(void);
+
+struct charlcd *charlcd_alloc(unsigned int drvdata_size);
void charlcd_free(struct charlcd *lcd);
int charlcd_register(struct charlcd *lcd);
diff --git a/drivers/auxdisplay/hd44780.c b/drivers/auxdisplay/hd44780.c
index 025dc6855cb2..cef42656c4b0 100644
--- a/drivers/auxdisplay/hd44780.c
+++ b/drivers/auxdisplay/hd44780.c
@@ -222,20 +222,17 @@ static int hd44780_probe(struct platform_device *pdev)
return -EINVAL;
}
- hdc = hd44780_common_alloc();
- if (!hdc)
- return -ENOMEM;
-
- lcd = charlcd_alloc();
+ lcd = hd44780_common_alloc();
if (!lcd)
- goto fail1;
+ return -ENOMEM;
hd = kzalloc(sizeof(*hd), GFP_KERNEL);
if (!hd)
goto fail2;
+ hdc = lcd->drvdata;
hdc->hd44780 = hd;
- lcd->drvdata = hdc;
+
for (i = 0; i < ifwidth; i++) {
hd->pins[base + i] = devm_gpiod_get_index(dev, "data", i,
GPIOD_OUT_LOW);
@@ -313,9 +310,7 @@ static int hd44780_probe(struct platform_device *pdev)
fail3:
kfree(hd);
fail2:
- kfree(lcd);
-fail1:
- kfree(hdc);
+ hd44780_common_free(lcd);
return ret;
}
@@ -326,9 +321,7 @@ static void hd44780_remove(struct platform_device *pdev)
charlcd_unregister(lcd);
kfree(hdc->hd44780);
- kfree(lcd->drvdata);
-
- kfree(lcd);
+ hd44780_common_free(lcd);
}
static const struct of_device_id hd44780_of_match[] = {
@@ -339,7 +332,7 @@ MODULE_DEVICE_TABLE(of, hd44780_of_match);
static struct platform_driver hd44780_driver = {
.probe = hd44780_probe,
- .remove_new = hd44780_remove,
+ .remove = hd44780_remove,
.driver = {
.name = "hd44780",
.of_match_table = hd44780_of_match,
diff --git a/drivers/auxdisplay/hd44780_common.c b/drivers/auxdisplay/hd44780_common.c
index 4ef87c3118c0..1792fe2a4460 100644
--- a/drivers/auxdisplay/hd44780_common.c
+++ b/drivers/auxdisplay/hd44780_common.c
@@ -351,20 +351,28 @@ int hd44780_common_redefine_char(struct charlcd *lcd, char *esc)
}
EXPORT_SYMBOL_GPL(hd44780_common_redefine_char);
-struct hd44780_common *hd44780_common_alloc(void)
+struct charlcd *hd44780_common_alloc(void)
{
- struct hd44780_common *hd;
+ struct hd44780_common *hdc;
+ struct charlcd *lcd;
- hd = kzalloc(sizeof(*hd), GFP_KERNEL);
- if (!hd)
+ lcd = charlcd_alloc(sizeof(*hdc));
+ if (!lcd)
return NULL;
- hd->ifwidth = 8;
- hd->bwidth = DEFAULT_LCD_BWIDTH;
- hd->hwidth = DEFAULT_LCD_HWIDTH;
- return hd;
+ hdc = lcd->drvdata;
+ hdc->ifwidth = 8;
+ hdc->bwidth = DEFAULT_LCD_BWIDTH;
+ hdc->hwidth = DEFAULT_LCD_HWIDTH;
+ return lcd;
}
EXPORT_SYMBOL_GPL(hd44780_common_alloc);
+void hd44780_common_free(struct charlcd *lcd)
+{
+ charlcd_free(lcd);
+}
+EXPORT_SYMBOL_GPL(hd44780_common_free);
+
MODULE_DESCRIPTION("Common functions for HD44780 (and compatibles) LCD displays");
MODULE_LICENSE("GPL");
diff --git a/drivers/auxdisplay/hd44780_common.h b/drivers/auxdisplay/hd44780_common.h
index a16aa8c29c99..4c87f55722b6 100644
--- a/drivers/auxdisplay/hd44780_common.h
+++ b/drivers/auxdisplay/hd44780_common.h
@@ -30,4 +30,6 @@ int hd44780_common_blink(struct charlcd *lcd, enum charlcd_onoff on);
int hd44780_common_fontsize(struct charlcd *lcd, enum charlcd_fontsize size);
int hd44780_common_lines(struct charlcd *lcd, enum charlcd_lines lines);
int hd44780_common_redefine_char(struct charlcd *lcd, char *esc);
-struct hd44780_common *hd44780_common_alloc(void);
+
+struct charlcd *hd44780_common_alloc(void);
+void hd44780_common_free(struct charlcd *lcd);
diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c
index a816f9e10255..0b8ba754b343 100644
--- a/drivers/auxdisplay/ht16k33.c
+++ b/drivers/auxdisplay/ht16k33.c
@@ -657,7 +657,6 @@ static int ht16k33_seg_probe(struct device *dev, struct ht16k33_priv *priv,
static int ht16k33_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
- const struct of_device_id *id;
struct ht16k33_priv *priv;
uint32_t dft_brightness;
int err;
@@ -672,9 +671,8 @@ static int ht16k33_probe(struct i2c_client *client)
return -ENOMEM;
priv->client = client;
- id = i2c_of_match_device(dev->driver->of_match_table, client);
- if (id)
- priv->type = (uintptr_t)id->data;
+ priv->type = (uintptr_t)i2c_get_match_data(client);
+
i2c_set_clientdata(client, priv);
err = ht16k33_initialize(priv);
@@ -747,7 +745,9 @@ static void ht16k33_remove(struct i2c_client *client)
}
static const struct i2c_device_id ht16k33_i2c_match[] = {
- { "ht16k33", 0 },
+ { "3108", DISP_QUAD_7SEG },
+ { "3130", DISP_QUAD_14SEG },
+ { "ht16k33", DISP_MATRIX },
{ }
};
MODULE_DEVICE_TABLE(i2c, ht16k33_i2c_match);
@@ -780,5 +780,5 @@ module_i2c_driver(ht16k33_driver);
MODULE_DESCRIPTION("Holtek HT16K33 driver");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(LINEDISP);
+MODULE_IMPORT_NS("LINEDISP");
MODULE_AUTHOR("Robin van der Gracht <robin@protonic.nl>");
diff --git a/drivers/auxdisplay/img-ascii-lcd.c b/drivers/auxdisplay/img-ascii-lcd.c
index 9ba132dc6143..32e1863ef4b2 100644
--- a/drivers/auxdisplay/img-ascii-lcd.c
+++ b/drivers/auxdisplay/img-ascii-lcd.c
@@ -36,7 +36,6 @@ struct img_ascii_lcd_config {
* @base: the base address of the LCD registers
* @regmap: the regmap through which LCD registers are accessed
* @offset: the offset within regmap to the start of the LCD registers
- * @cfg: pointer to the LCD model configuration
*/
struct img_ascii_lcd_ctx {
struct linedisp linedisp;
@@ -45,7 +44,6 @@ struct img_ascii_lcd_ctx {
struct regmap *regmap;
};
u32 offset;
- const struct img_ascii_lcd_config *cfg;
};
/*
@@ -71,7 +69,7 @@ static void boston_update(struct linedisp *linedisp)
#endif
}
-static struct img_ascii_lcd_config boston_config = {
+static const struct img_ascii_lcd_config boston_config = {
.num_chars = 8,
.ops = {
.update = boston_update,
@@ -100,7 +98,7 @@ static void malta_update(struct linedisp *linedisp)
pr_err_ratelimited("Failed to update LCD display: %d\n", err);
}
-static struct img_ascii_lcd_config malta_config = {
+static const struct img_ascii_lcd_config malta_config = {
.num_chars = 8,
.external_regmap = true,
.ops = {
@@ -202,7 +200,7 @@ static void sead3_update(struct linedisp *linedisp)
pr_err_ratelimited("Failed to update LCD display: %d\n", err);
}
-static struct img_ascii_lcd_config sead3_config = {
+static const struct img_ascii_lcd_config sead3_config = {
.num_chars = 16,
.external_regmap = true,
.ops = {
@@ -291,11 +289,11 @@ static struct platform_driver img_ascii_lcd_driver = {
.of_match_table = img_ascii_lcd_matches,
},
.probe = img_ascii_lcd_probe,
- .remove_new = img_ascii_lcd_remove,
+ .remove = img_ascii_lcd_remove,
};
module_platform_driver(img_ascii_lcd_driver);
MODULE_DESCRIPTION("Imagination Technologies ASCII LCD Display");
MODULE_AUTHOR("Paul Burton <paul.burton@mips.com>");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(LINEDISP);
+MODULE_IMPORT_NS("LINEDISP");
diff --git a/drivers/auxdisplay/lcd2s.c b/drivers/auxdisplay/lcd2s.c
index 6422be0dfe20..045dbef49dee 100644
--- a/drivers/auxdisplay/lcd2s.c
+++ b/drivers/auxdisplay/lcd2s.c
@@ -298,20 +298,18 @@ static int lcd2s_i2c_probe(struct i2c_client *i2c)
I2C_FUNC_SMBUS_WRITE_BLOCK_DATA))
return -EIO;
- lcd2s = devm_kzalloc(&i2c->dev, sizeof(*lcd2s), GFP_KERNEL);
- if (!lcd2s)
- return -ENOMEM;
-
/* Test, if the display is responding */
err = lcd2s_i2c_smbus_write_byte(i2c, LCD2S_CMD_DISPLAY_OFF);
if (err < 0)
return err;
- lcd = charlcd_alloc();
+ lcd = charlcd_alloc(sizeof(*lcd2s));
if (!lcd)
return -ENOMEM;
- lcd->drvdata = lcd2s;
+ lcd->ops = &lcd2s_ops;
+
+ lcd2s = lcd->drvdata;
lcd2s->i2c = i2c;
lcd2s->charlcd = lcd;
@@ -326,8 +324,6 @@ static int lcd2s_i2c_probe(struct i2c_client *i2c)
if (err)
goto fail1;
- lcd->ops = &lcd2s_ops;
-
err = charlcd_register(lcd2s->charlcd);
if (err)
goto fail1;
@@ -349,7 +345,7 @@ static void lcd2s_i2c_remove(struct i2c_client *i2c)
}
static const struct i2c_device_id lcd2s_i2c_id[] = {
- { "lcd2s", 0 },
+ { "lcd2s" },
{ }
};
MODULE_DEVICE_TABLE(i2c, lcd2s_i2c_id);
diff --git a/drivers/auxdisplay/line-display.c b/drivers/auxdisplay/line-display.c
index 731ffdfafc4e..8590a4cd21e0 100644
--- a/drivers/auxdisplay/line-display.c
+++ b/drivers/auxdisplay/line-display.c
@@ -40,7 +40,7 @@
*/
static void linedisp_scroll(struct timer_list *t)
{
- struct linedisp *linedisp = from_timer(linedisp, t, timer);
+ struct linedisp *linedisp = timer_container_of(linedisp, t, timer);
unsigned int i, ch = linedisp->scroll_pos;
unsigned int num_chars = linedisp->num_chars;
@@ -84,7 +84,7 @@ static int linedisp_display(struct linedisp *linedisp, const char *msg,
char *new_msg;
/* stop the scroll timer */
- del_timer_sync(&linedisp->timer);
+ timer_delete_sync(&linedisp->timer);
if (count == -1)
count = strlen(msg);
@@ -183,7 +183,7 @@ static ssize_t scroll_step_ms_store(struct device *dev,
linedisp->scroll_rate = msecs_to_jiffies(ms);
if (linedisp->message && linedisp->message_len > linedisp->num_chars) {
- del_timer_sync(&linedisp->timer);
+ timer_delete_sync(&linedisp->timer);
if (linedisp->scroll_rate)
linedisp_scroll(&linedisp->timer);
}
@@ -376,12 +376,12 @@ int linedisp_register(struct linedisp *linedisp, struct device *parent,
out_del_dev:
device_del(&linedisp->dev);
out_del_timer:
- del_timer_sync(&linedisp->timer);
+ timer_delete_sync(&linedisp->timer);
out_put_device:
put_device(&linedisp->dev);
return err;
}
-EXPORT_SYMBOL_NS_GPL(linedisp_register, LINEDISP);
+EXPORT_SYMBOL_NS_GPL(linedisp_register, "LINEDISP");
/**
* linedisp_unregister - unregister a character line display
@@ -391,10 +391,10 @@ EXPORT_SYMBOL_NS_GPL(linedisp_register, LINEDISP);
void linedisp_unregister(struct linedisp *linedisp)
{
device_del(&linedisp->dev);
- del_timer_sync(&linedisp->timer);
+ timer_delete_sync(&linedisp->timer);
put_device(&linedisp->dev);
}
-EXPORT_SYMBOL_NS_GPL(linedisp_unregister, LINEDISP);
+EXPORT_SYMBOL_NS_GPL(linedisp_unregister, "LINEDISP");
MODULE_DESCRIPTION("Character line display core support");
MODULE_LICENSE("GPL");
diff --git a/drivers/auxdisplay/max6959.c b/drivers/auxdisplay/max6959.c
index 5519c014bd29..962488197b9e 100644
--- a/drivers/auxdisplay/max6959.c
+++ b/drivers/auxdisplay/max6959.c
@@ -191,4 +191,4 @@ module_i2c_driver(max6959_i2c_driver);
MODULE_DESCRIPTION("MAX6958/6959 7-segment LED controller");
MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(LINEDISP);
+MODULE_IMPORT_NS("LINEDISP");
diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c
index a731f28455b4..958c0e31e84a 100644
--- a/drivers/auxdisplay/panel.c
+++ b/drivers/auxdisplay/panel.c
@@ -831,18 +831,12 @@ static void lcd_init(void)
struct charlcd *charlcd;
struct hd44780_common *hdc;
- hdc = hd44780_common_alloc();
- if (!hdc)
+ charlcd = hd44780_common_alloc();
+ if (!charlcd)
return;
- charlcd = charlcd_alloc();
- if (!charlcd) {
- kfree(hdc);
- return;
- }
-
+ hdc = charlcd->drvdata;
hdc->hd44780 = &lcd;
- charlcd->drvdata = hdc;
/*
* Init lcd struct with load-time values to preserve exact
@@ -1660,11 +1654,11 @@ static void panel_attach(struct parport *port)
err_lcd_unreg:
if (scan_timer.function)
- del_timer_sync(&scan_timer);
+ timer_delete_sync(&scan_timer);
if (lcd.enabled)
charlcd_unregister(lcd.charlcd);
err_unreg_device:
- kfree(lcd.charlcd);
+ hd44780_common_free(lcd.charlcd);
lcd.charlcd = NULL;
parport_unregister_device(pprt);
pprt = NULL;
@@ -1681,7 +1675,7 @@ static void panel_detach(struct parport *port)
return;
}
if (scan_timer.function)
- del_timer_sync(&scan_timer);
+ timer_delete_sync(&scan_timer);
if (keypad.enabled) {
misc_deregister(&keypad_dev);
@@ -1691,8 +1685,7 @@ static void panel_detach(struct parport *port)
if (lcd.enabled) {
charlcd_unregister(lcd.charlcd);
lcd.initialized = false;
- kfree(lcd.charlcd->drvdata);
- kfree(lcd.charlcd);
+ hd44780_common_free(lcd.charlcd);
lcd.charlcd = NULL;
}
diff --git a/drivers/auxdisplay/seg-led-gpio.c b/drivers/auxdisplay/seg-led-gpio.c
index 183ab3011cbb..dfb62e9ce9b4 100644
--- a/drivers/auxdisplay/seg-led-gpio.c
+++ b/drivers/auxdisplay/seg-led-gpio.c
@@ -36,8 +36,7 @@ static void seg_led_update(struct work_struct *work)
bitmap_set_value8(values, map_to_seg7(&map->map.seg7, linedisp->buf[0]), 0);
- gpiod_set_array_value_cansleep(priv->segment_gpios->ndescs, priv->segment_gpios->desc,
- priv->segment_gpios->info, values);
+ gpiod_multi_set_value_cansleep(priv->segment_gpios, values);
}
static int seg_led_linedisp_get_map_type(struct linedisp *linedisp)
@@ -97,7 +96,7 @@ MODULE_DEVICE_TABLE(of, seg_led_of_match);
static struct platform_driver seg_led_driver = {
.probe = seg_led_probe,
- .remove_new = seg_led_remove,
+ .remove = seg_led_remove,
.driver = {
.name = "seg-led-gpio",
.of_match_table = seg_led_of_match,
@@ -108,4 +107,4 @@ module_platform_driver(seg_led_driver);
MODULE_AUTHOR("Chris Packham <chris.packham@alliedtelesis.co.nz>");
MODULE_DESCRIPTION("7 segment LED driver");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(LINEDISP);
+MODULE_IMPORT_NS("LINEDISP");
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 064eb52ff7e2..1786d87b29e2 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -167,6 +167,12 @@ config PM_QOS_KUNIT_TEST
depends on KUNIT=y
default KUNIT_ALL_TESTS
+config PM_RUNTIME_KUNIT_TEST
+ tristate "KUnit Tests for runtime PM" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ depends on PM
+ default KUNIT_ALL_TESTS
+
config HMEM_REPORTING
bool
default n
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 7fb21768ca36..8074a10183dc 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -6,7 +6,7 @@ obj-y := component.o core.o bus.o dd.o syscore.o \
cpu.o firmware.o init.o map.o devres.o \
attribute_container.o transport_class.o \
topology.o container.o property.o cacheinfo.o \
- swnode.o
+ swnode.o faux.o
obj-$(CONFIG_AUXILIARY_BUS) += auxiliary.o
obj-$(CONFIG_DEVTMPFS) += devtmpfs.o
obj-y += power/
diff --git a/drivers/base/arch_numa.c b/drivers/base/arch_numa.c
index e18701676426..c99f2ab105e5 100644
--- a/drivers/base/arch_numa.c
+++ b/drivers/base/arch_numa.c
@@ -208,6 +208,10 @@ static int __init numa_register_nodes(void)
{
int nid;
+ /* Check the validity of the memblock/node mapping */
+ if (!memblock_validate_numa_coverage(0))
+ return -EINVAL;
+
/* Finally register nodes. */
for_each_node_mask(nid, numa_nodes_parsed) {
unsigned long start_pfn, end_pfn;
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 75fcb75d5515..1037169abb45 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -11,6 +11,7 @@
#include <linux/cleanup.h>
#include <linux/cpu.h>
#include <linux/cpufreq.h>
+#include <linux/cpu_smt.h>
#include <linux/device.h>
#include <linux/of.h>
#include <linux/slab.h>
@@ -28,7 +29,7 @@
static DEFINE_PER_CPU(struct scale_freq_data __rcu *, sft_data);
static struct cpumask scale_freq_counters_mask;
static bool scale_freq_invariant;
-DEFINE_PER_CPU(unsigned long, capacity_freq_ref) = 1;
+DEFINE_PER_CPU(unsigned long, capacity_freq_ref) = 0;
EXPORT_PER_CPU_SYMBOL_GPL(capacity_freq_ref);
static bool supports_scale_freq_counters(const struct cpumask *cpus)
@@ -153,14 +154,6 @@ void topology_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq,
per_cpu(arch_freq_scale, i) = scale;
}
-DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
-EXPORT_PER_CPU_SYMBOL_GPL(cpu_scale);
-
-void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity)
-{
- per_cpu(cpu_scale, cpu) = capacity;
-}
-
DEFINE_PER_CPU(unsigned long, hw_pressure);
/**
@@ -206,53 +199,9 @@ void topology_update_hw_pressure(const struct cpumask *cpus,
}
EXPORT_SYMBOL_GPL(topology_update_hw_pressure);
-static ssize_t cpu_capacity_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct cpu *cpu = container_of(dev, struct cpu, dev);
-
- return sysfs_emit(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id));
-}
-
static void update_topology_flags_workfn(struct work_struct *work);
static DECLARE_WORK(update_topology_flags_work, update_topology_flags_workfn);
-static DEVICE_ATTR_RO(cpu_capacity);
-
-static int cpu_capacity_sysctl_add(unsigned int cpu)
-{
- struct device *cpu_dev = get_cpu_device(cpu);
-
- if (!cpu_dev)
- return -ENOENT;
-
- device_create_file(cpu_dev, &dev_attr_cpu_capacity);
-
- return 0;
-}
-
-static int cpu_capacity_sysctl_remove(unsigned int cpu)
-{
- struct device *cpu_dev = get_cpu_device(cpu);
-
- if (!cpu_dev)
- return -ENOENT;
-
- device_remove_file(cpu_dev, &dev_attr_cpu_capacity);
-
- return 0;
-}
-
-static int register_cpu_capacity_sysctl(void)
-{
- cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "topology/cpu-capacity",
- cpu_capacity_sysctl_add, cpu_capacity_sysctl_remove);
-
- return 0;
-}
-subsys_initcall(register_cpu_capacity_sysctl);
-
static int update_topology;
int topology_update_cpu_topology(void)
@@ -293,13 +242,15 @@ void topology_normalize_cpu_scale(void)
capacity_scale = 1;
for_each_possible_cpu(cpu) {
- capacity = raw_capacity[cpu] * per_cpu(capacity_freq_ref, cpu);
+ capacity = raw_capacity[cpu] *
+ (per_cpu(capacity_freq_ref, cpu) ?: 1);
capacity_scale = max(capacity, capacity_scale);
}
pr_debug("cpu_capacity: capacity_scale=%llu\n", capacity_scale);
for_each_possible_cpu(cpu) {
- capacity = raw_capacity[cpu] * per_cpu(capacity_freq_ref, cpu);
+ capacity = raw_capacity[cpu] *
+ (per_cpu(capacity_freq_ref, cpu) ?: 1);
capacity = div64_u64(capacity << SCHED_CAPACITY_SHIFT,
capacity_scale);
topology_set_cpu_scale(cpu, capacity);
@@ -366,7 +317,7 @@ void __weak freq_inv_set_max_ratio(int cpu, u64 max_rate)
#ifdef CONFIG_ACPI_CPPC_LIB
#include <acpi/cppc_acpi.h>
-void topology_init_cpu_capacity_cppc(void)
+static inline void topology_init_cpu_capacity_cppc(void)
{
u64 capacity, capacity_scale = 0;
struct cppc_perf_caps perf_caps;
@@ -417,6 +368,10 @@ void topology_init_cpu_capacity_cppc(void)
exit:
free_raw_capacity();
}
+void acpi_processor_init_invariance_cppc(void)
+{
+ topology_init_cpu_capacity_cppc();
+}
#endif
#ifdef CONFIG_CPU_FREQ
@@ -502,6 +457,10 @@ core_initcall(free_raw_capacity);
#endif
#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
+
+/* Used to enable the SMT control */
+static unsigned int max_smt_thread_num = 1;
+
/*
* This function returns the logic cpu number of the node.
* There are basically three kinds of return values:
@@ -561,6 +520,8 @@ static int __init parse_core(struct device_node *core, int package_id,
i++;
} while (1);
+ max_smt_thread_num = max_t(unsigned int, max_smt_thread_num, i);
+
cpu = get_cpu_for_node(core);
if (cpu >= 0) {
if (!leaf) {
@@ -673,6 +634,17 @@ static int __init parse_socket(struct device_node *socket)
if (!has_socket)
ret = parse_cluster(socket, 0, -1, 0);
+ /*
+ * Reset the max_smt_thread_num to 1 on failure. Since on failure
+ * we need to notify the framework the SMT is not supported, but
+ * max_smt_thread_num can be initialized to the SMT thread number
+ * of the cores which are successfully parsed.
+ */
+ if (ret)
+ max_smt_thread_num = 1;
+
+ cpu_smt_set_num_threads(max_smt_thread_num, max_smt_thread_num);
+
return ret;
}
diff --git a/drivers/base/auxiliary.c b/drivers/base/auxiliary.c
index 7823888af4f6..04bdbff4dbe5 100644
--- a/drivers/base/auxiliary.c
+++ b/drivers/base/auxiliary.c
@@ -92,7 +92,7 @@
* Auxiliary devices are created and registered by a subsystem-level core
* device that needs to break up its functionality into smaller fragments. One
* way to extend the scope of an auxiliary_device is to encapsulate it within a
- * domain- pecific structure defined by the parent device. This structure
+ * domain-specific structure defined by the parent device. This structure
* contains the auxiliary_device and any associated shared data/callbacks
* needed to establish the connection with the parent.
*
@@ -156,22 +156,33 @@
* },
* .ops = my_custom_ops,
* };
+ *
+ * Please note that such custom ops approach is valid, but it is hard to implement
+ * it right without global locks per-device to protect from auxiliary_drv removal
+ * during call to that ops. In addition, this implementation lacks proper module
+ * dependency, which causes to load/unload races between auxiliary parent and devices
+ * modules.
+ *
+ * The most easiest way to provide these ops reliably without needing to
+ * have a lock is to EXPORT_SYMBOL*() them and rely on already existing
+ * modules infrastructure for validity and correct dependencies chains.
*/
static const struct auxiliary_device_id *auxiliary_match_id(const struct auxiliary_device_id *id,
const struct auxiliary_device *auxdev)
{
- for (; id->name[0]; id++) {
- const char *p = strrchr(dev_name(&auxdev->dev), '.');
- int match_size;
+ const char *auxdev_name = dev_name(&auxdev->dev);
+ const char *p = strrchr(auxdev_name, '.');
+ int match_size;
- if (!p)
- continue;
- match_size = p - dev_name(&auxdev->dev);
+ if (!p)
+ return NULL;
+ match_size = p - auxdev_name;
+ for (; id->name[0]; id++) {
/* use dev_name(&auxdev->dev) prefix before last '.' char to match to */
if (strlen(id->name) == match_size &&
- !strncmp(dev_name(&auxdev->dev), id->name, match_size))
+ !strncmp(auxdev_name, id->name, match_size))
return id;
}
return NULL;
@@ -207,17 +218,14 @@ static int auxiliary_bus_probe(struct device *dev)
struct auxiliary_device *auxdev = to_auxiliary_dev(dev);
int ret;
- ret = dev_pm_domain_attach(dev, true);
+ ret = dev_pm_domain_attach(dev, PD_FLAG_ATTACH_POWER_ON |
+ PD_FLAG_DETACH_POWER_OFF);
if (ret) {
dev_warn(dev, "Failed to attach to PM Domain : %d\n", ret);
return ret;
}
- ret = auxdrv->probe(auxdev, auxiliary_match_id(auxdrv->id_table, auxdev));
- if (ret)
- dev_pm_domain_detach(dev, true);
-
- return ret;
+ return auxdrv->probe(auxdev, auxiliary_match_id(auxdrv->id_table, auxdev));
}
static void auxiliary_bus_remove(struct device *dev)
@@ -227,7 +235,6 @@ static void auxiliary_bus_remove(struct device *dev)
if (auxdrv->remove)
auxdrv->remove(auxdev);
- dev_pm_domain_detach(dev, true);
}
static void auxiliary_bus_shutdown(struct device *dev)
@@ -336,35 +343,6 @@ int __auxiliary_device_add(struct auxiliary_device *auxdev, const char *modname)
EXPORT_SYMBOL_GPL(__auxiliary_device_add);
/**
- * auxiliary_find_device - auxiliary device iterator for locating a particular device.
- * @start: Device to begin with
- * @data: Data to pass to match function
- * @match: Callback function to check device
- *
- * This function returns a reference to a device that is 'found'
- * for later use, as determined by the @match callback.
- *
- * The reference returned should be released with put_device().
- *
- * The callback should return 0 if the device doesn't match and non-zero
- * if it does. If the callback returns non-zero, this function will
- * return to the caller and not iterate over any more devices.
- */
-struct auxiliary_device *auxiliary_find_device(struct device *start,
- const void *data,
- device_match_t match)
-{
- struct device *dev;
-
- dev = bus_find_device(&auxiliary_bus_type, start, data, match);
- if (!dev)
- return NULL;
-
- return to_auxiliary_dev(dev);
-}
-EXPORT_SYMBOL_GPL(auxiliary_find_device);
-
-/**
* __auxiliary_driver_register - register a driver for auxiliary bus devices
* @auxdrv: auxiliary_driver structure
* @owner: owning module/driver
@@ -414,6 +392,116 @@ void auxiliary_driver_unregister(struct auxiliary_driver *auxdrv)
}
EXPORT_SYMBOL_GPL(auxiliary_driver_unregister);
+static void auxiliary_device_release(struct device *dev)
+{
+ struct auxiliary_device *auxdev = to_auxiliary_dev(dev);
+
+ of_node_put(dev->of_node);
+ kfree(auxdev);
+}
+
+/**
+ * auxiliary_device_create - create a device on the auxiliary bus
+ * @dev: parent device
+ * @modname: module name used to create the auxiliary driver name.
+ * @devname: auxiliary bus device name
+ * @platform_data: auxiliary bus device platform data
+ * @id: auxiliary bus device id
+ *
+ * Helper to create an auxiliary bus device.
+ * The device created matches driver 'modname.devname' on the auxiliary bus.
+ */
+struct auxiliary_device *auxiliary_device_create(struct device *dev,
+ const char *modname,
+ const char *devname,
+ void *platform_data,
+ int id)
+{
+ struct auxiliary_device *auxdev;
+ int ret;
+
+ auxdev = kzalloc(sizeof(*auxdev), GFP_KERNEL);
+ if (!auxdev)
+ return NULL;
+
+ auxdev->id = id;
+ auxdev->name = devname;
+ auxdev->dev.parent = dev;
+ auxdev->dev.platform_data = platform_data;
+ auxdev->dev.release = auxiliary_device_release;
+ device_set_of_node_from_dev(&auxdev->dev, dev);
+
+ ret = auxiliary_device_init(auxdev);
+ if (ret) {
+ of_node_put(auxdev->dev.of_node);
+ kfree(auxdev);
+ return NULL;
+ }
+
+ ret = __auxiliary_device_add(auxdev, modname);
+ if (ret) {
+ /*
+ * It may look odd but auxdev should not be freed here.
+ * auxiliary_device_uninit() calls device_put() which call
+ * the device release function, freeing auxdev.
+ */
+ auxiliary_device_uninit(auxdev);
+ return NULL;
+ }
+
+ return auxdev;
+}
+EXPORT_SYMBOL_GPL(auxiliary_device_create);
+
+/**
+ * auxiliary_device_destroy - remove an auxiliary device
+ * @auxdev: pointer to the auxdev to be removed
+ *
+ * Helper to remove an auxiliary device created with
+ * auxiliary_device_create()
+ */
+void auxiliary_device_destroy(void *auxdev)
+{
+ struct auxiliary_device *_auxdev = auxdev;
+
+ auxiliary_device_delete(_auxdev);
+ auxiliary_device_uninit(_auxdev);
+}
+EXPORT_SYMBOL_GPL(auxiliary_device_destroy);
+
+/**
+ * __devm_auxiliary_device_create - create a managed device on the auxiliary bus
+ * @dev: parent device
+ * @modname: module name used to create the auxiliary driver name.
+ * @devname: auxiliary bus device name
+ * @platform_data: auxiliary bus device platform data
+ * @id: auxiliary bus device id
+ *
+ * Device managed helper to create an auxiliary bus device.
+ * The device created matches driver 'modname.devname' on the auxiliary bus.
+ */
+struct auxiliary_device *__devm_auxiliary_device_create(struct device *dev,
+ const char *modname,
+ const char *devname,
+ void *platform_data,
+ int id)
+{
+ struct auxiliary_device *auxdev;
+ int ret;
+
+ auxdev = auxiliary_device_create(dev, modname, devname, platform_data, id);
+ if (!auxdev)
+ return NULL;
+
+ ret = devm_add_action_or_reset(dev, auxiliary_device_destroy,
+ auxdev);
+ if (ret)
+ return NULL;
+
+ return auxdev;
+}
+EXPORT_SYMBOL_GPL(__devm_auxiliary_device_create);
+
void __init auxiliary_bus_init(void)
{
WARN_ON(bus_register(&auxiliary_bus_type));
diff --git a/drivers/base/base.h b/drivers/base/base.h
index 8cf04a557bdb..86fa7fbb3548 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -73,6 +73,7 @@ static inline void subsys_put(struct subsys_private *sp)
kset_put(&sp->subsys);
}
+struct subsys_private *bus_to_subsys(const struct bus_type *bus);
struct subsys_private *class_to_subsys(const struct class *class);
struct driver_private {
@@ -137,6 +138,7 @@ int hypervisor_init(void);
static inline int hypervisor_init(void) { return 0; }
#endif
int platform_bus_init(void);
+int faux_bus_init(void);
void cpu_dev_init(void);
void container_dev_init(void);
#ifdef CONFIG_AUXILIARY_BUS
@@ -179,6 +181,22 @@ int driver_add_groups(const struct device_driver *drv, const struct attribute_gr
void driver_remove_groups(const struct device_driver *drv, const struct attribute_group **groups);
void device_driver_detach(struct device *dev);
+static inline void device_set_driver(struct device *dev, const struct device_driver *drv)
+{
+ /*
+ * Majority (all?) read accesses to dev->driver happens either
+ * while holding device lock or in bus/driver code that is only
+ * invoked when the device is bound to a driver and there is no
+ * concern of the pointer being changed while it is being read.
+ * However when reading device's uevent file we read driver pointer
+ * without taking device lock (so we do not block there for
+ * arbitrary amount of time). We use WRITE_ONCE() here to prevent
+ * tearing so that READ_ONCE() can safely be used in uevent code.
+ */
+ // FIXME - this cast should not be needed "soon"
+ WRITE_ONCE(dev->driver, (struct device_driver *)drv);
+}
+
int devres_release_all(struct device *dev);
void device_block_probing(void);
void device_unblock_probing(void);
@@ -230,9 +248,18 @@ void device_links_driver_cleanup(struct device *dev);
void device_links_no_driver(struct device *dev);
bool device_links_busy(struct device *dev);
void device_links_unbind_consumers(struct device *dev);
+bool device_link_flag_is_sync_state_only(u32 flags);
void fw_devlink_drivers_done(void);
void fw_devlink_probing_done(void);
+#define dev_for_each_link_to_supplier(__link, __dev) \
+ list_for_each_entry_srcu(__link, &(__dev)->links.suppliers, c_node, \
+ device_links_read_lock_held())
+
+#define dev_for_each_link_to_consumer(__link, __dev) \
+ list_for_each_entry_srcu(__link, &(__dev)->links.consumers, s_node, \
+ device_links_read_lock_held())
+
/* device pm support */
void device_pm_move_to_tail(struct device *dev);
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 657c93c38b0d..5e75e1bce551 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -57,7 +57,7 @@ static int __must_check bus_rescan_devices_helper(struct device *dev,
* NULL. A call to subsys_put() must be done when finished with the pointer in
* order for it to be properly freed.
*/
-static struct subsys_private *bus_to_subsys(const struct bus_type *bus)
+struct subsys_private *bus_to_subsys(const struct bus_type *bus)
{
struct subsys_private *sp = NULL;
struct kobject *kobj;
@@ -354,7 +354,7 @@ static struct device *next_device(struct klist_iter *i)
* count in the supplied callback.
*/
int bus_for_each_dev(const struct bus_type *bus, struct device *start,
- void *data, int (*fn)(struct device *, void *))
+ void *data, device_iter_t fn)
{
struct subsys_private *sp = bus_to_subsys(bus);
struct klist_iter i;
@@ -402,9 +402,12 @@ struct device *bus_find_device(const struct bus_type *bus,
klist_iter_init_node(&sp->klist_devices, &i,
(start ? &start->p->knode_bus : NULL));
- while ((dev = next_device(&i)))
- if (match(dev, data) && get_device(dev))
+ while ((dev = next_device(&i))) {
+ if (match(dev, data)) {
+ get_device(dev);
break;
+ }
+ }
klist_iter_exit(&i);
subsys_put(sp);
return dev;
@@ -1288,7 +1291,7 @@ EXPORT_SYMBOL_GPL(subsys_system_register);
* @groups: default attributes for the root device
*
* All 'virtual' subsystems have a /sys/devices/system/<name> root device
- * with the name of the subystem. The root device can carry subsystem-wide
+ * with the name of the subsystem. The root device can carry subsystem-wide
* attributes. All registered devices are below this single root device.
* There's no restriction on device naming. This is for kernel software
* constructs which need sysfs interface.
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index 7a7609298e18..613410705a47 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -8,6 +8,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/acpi.h>
+#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/cacheinfo.h>
#include <linux/compiler.h>
@@ -58,7 +59,7 @@ bool last_level_cache_is_valid(unsigned int cpu)
{
struct cacheinfo *llc;
- if (!cache_leaves(cpu))
+ if (!cache_leaves(cpu) || !per_cpu_cacheinfo(cpu))
return false;
llc = per_cpu_cacheinfo_idx(cpu, cache_leaves(cpu) - 1);
@@ -183,6 +184,54 @@ static bool cache_node_is_unified(struct cacheinfo *this_leaf,
return of_property_read_bool(np, "cache-unified");
}
+static bool match_cache_node(struct device_node *cpu,
+ const struct device_node *cache_node)
+{
+ struct device_node *prev, *cache = of_find_next_cache_node(cpu);
+
+ while (cache) {
+ if (cache == cache_node) {
+ of_node_put(cache);
+ return true;
+ }
+
+ prev = cache;
+ cache = of_find_next_cache_node(cache);
+ of_node_put(prev);
+ }
+
+ return false;
+}
+
+#ifndef arch_compact_of_hwid
+#define arch_compact_of_hwid(_x) (_x)
+#endif
+
+static void cache_of_set_id(struct cacheinfo *this_leaf,
+ struct device_node *cache_node)
+{
+ struct device_node *cpu;
+ u32 min_id = ~0;
+
+ for_each_of_cpu_node(cpu) {
+ u64 id = of_get_cpu_hwid(cpu, 0);
+
+ id = arch_compact_of_hwid(id);
+ if (FIELD_GET(GENMASK_ULL(63, 32), id)) {
+ of_node_put(cpu);
+ return;
+ }
+
+ if (match_cache_node(cpu, cache_node))
+ min_id = min(min_id, id);
+ }
+
+ if (min_id != ~0) {
+ this_leaf->id = min_id;
+ this_leaf->attributes |= CACHE_ID;
+ }
+}
+
static void cache_of_set_props(struct cacheinfo *this_leaf,
struct device_node *np)
{
@@ -198,6 +247,7 @@ static void cache_of_set_props(struct cacheinfo *this_leaf,
cache_get_line_size(this_leaf, np);
cache_nr_sets(this_leaf, np);
cache_associativity(this_leaf);
+ cache_of_set_id(this_leaf, np);
}
static int cache_setup_of_node(unsigned int cpu)
@@ -254,11 +304,11 @@ static int of_count_cache_leaves(struct device_node *np)
{
unsigned int leaves = 0;
- if (of_property_read_bool(np, "cache-size"))
+ if (of_property_present(np, "cache-size"))
++leaves;
- if (of_property_read_bool(np, "i-cache-size"))
+ if (of_property_present(np, "i-cache-size"))
++leaves;
- if (of_property_read_bool(np, "d-cache-size"))
+ if (of_property_present(np, "d-cache-size"))
++leaves;
if (!leaves) {
@@ -367,9 +417,7 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
for_each_online_cpu(i) {
- struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
-
- if (i == cpu || !sib_cpu_ci->info_list)
+ if (i == cpu || !per_cpu_cacheinfo(i))
continue;/* skip if itself or no cacheinfo */
for (sib_index = 0; sib_index < cache_leaves(i); sib_index++) {
sib_leaf = per_cpu_cacheinfo_idx(i, sib_index);
@@ -409,10 +457,7 @@ static void cache_shared_cpu_map_remove(unsigned int cpu)
for (index = 0; index < cache_leaves(cpu); index++) {
this_leaf = per_cpu_cacheinfo_idx(cpu, index);
for_each_cpu(sibling, &this_leaf->shared_cpu_map) {
- struct cpu_cacheinfo *sib_cpu_ci =
- get_cpu_cacheinfo(sibling);
-
- if (sibling == cpu || !sib_cpu_ci->info_list)
+ if (sibling == cpu || !per_cpu_cacheinfo(sibling))
continue;/* skip if itself or no cacheinfo */
for (sib_index = 0; sib_index < cache_leaves(sibling); sib_index++) {
@@ -463,11 +508,9 @@ int __weak populate_cache_leaves(unsigned int cpu)
return -ENOENT;
}
-static inline
-int allocate_cache_info(int cpu)
+static inline int allocate_cache_info(int cpu)
{
- per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
- sizeof(struct cacheinfo), GFP_ATOMIC);
+ per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu), sizeof(struct cacheinfo), GFP_ATOMIC);
if (!per_cpu_cacheinfo(cpu)) {
cache_leaves(cpu) = 0;
return -ENOMEM;
@@ -539,7 +582,11 @@ static inline int init_level_allocate_ci(unsigned int cpu)
*/
ci_cacheinfo(cpu)->early_ci_levels = false;
- if (cache_leaves(cpu) <= early_leaves)
+ /*
+ * Some architectures (e.g., x86) do not use early initialization.
+ * Allocate memory now in such case.
+ */
+ if (cache_leaves(cpu) <= early_leaves && per_cpu_cacheinfo(cpu))
return 0;
kfree(per_cpu_cacheinfo(cpu));
diff --git a/drivers/base/class.c b/drivers/base/class.c
index cb5359235c70..2526c57d924e 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -323,8 +323,12 @@ void class_dev_iter_init(struct class_dev_iter *iter, const struct class *class,
struct subsys_private *sp = class_to_subsys(class);
struct klist_node *start_knode = NULL;
- if (!sp)
+ memset(iter, 0, sizeof(*iter));
+ if (!sp) {
+ pr_crit("%s: class %p was not registered yet\n",
+ __func__, class);
return;
+ }
if (start)
start_knode = &start->p->knode_class;
@@ -351,6 +355,9 @@ struct device *class_dev_iter_next(struct class_dev_iter *iter)
struct klist_node *knode;
struct device *dev;
+ if (!iter->sp)
+ return NULL;
+
while (1) {
knode = klist_next(&iter->ki);
if (!knode)
@@ -395,7 +402,7 @@ EXPORT_SYMBOL_GPL(class_dev_iter_exit);
* code. There's no locking restriction.
*/
int class_for_each_device(const struct class *class, const struct device *start,
- void *data, int (*fn)(struct device *, void *))
+ void *data, device_iter_t fn)
{
struct subsys_private *sp = class_to_subsys(class);
struct class_dev_iter iter;
@@ -405,7 +412,7 @@ int class_for_each_device(const struct class *class, const struct device *start,
if (!class)
return -EINVAL;
if (!sp) {
- WARN(1, "%s called for class '%s' before it was initialized",
+ WARN(1, "%s called for class '%s' before it was registered",
__func__, class->name);
return -EINVAL;
}
@@ -453,7 +460,7 @@ struct device *class_find_device(const struct class *class, const struct device
if (!class)
return NULL;
if (!sp) {
- WARN(1, "%s called for class '%s' before it was initialized",
+ WARN(1, "%s called for class '%s' before it was registered",
__func__, class->name);
return NULL;
}
@@ -594,30 +601,10 @@ EXPORT_SYMBOL_GPL(class_compat_unregister);
* a bus device
* @cls: the compatibility class
* @dev: the target bus device
- * @device_link: an optional device to which a "device" link should be created
*/
-int class_compat_create_link(struct class_compat *cls, struct device *dev,
- struct device *device_link)
+int class_compat_create_link(struct class_compat *cls, struct device *dev)
{
- int error;
-
- error = sysfs_create_link(cls->kobj, &dev->kobj, dev_name(dev));
- if (error)
- return error;
-
- /*
- * Optionally add a "device" link (typically to the parent), as a
- * class device would have one and we want to provide as much
- * backwards compatibility as possible.
- */
- if (device_link) {
- error = sysfs_create_link(&dev->kobj, &device_link->kobj,
- "device");
- if (error)
- sysfs_remove_link(cls->kobj, dev_name(dev));
- }
-
- return error;
+ return sysfs_create_link(cls->kobj, &dev->kobj, dev_name(dev));
}
EXPORT_SYMBOL_GPL(class_compat_create_link);
@@ -626,14 +613,9 @@ EXPORT_SYMBOL_GPL(class_compat_create_link);
* a bus device
* @cls: the compatibility class
* @dev: the target bus device
- * @device_link: an optional device to which a "device" link was previously
- * created
*/
-void class_compat_remove_link(struct class_compat *cls, struct device *dev,
- struct device *device_link)
+void class_compat_remove_link(struct class_compat *cls, struct device *dev)
{
- if (device_link)
- sysfs_remove_link(&dev->kobj, "device");
sysfs_remove_link(cls->kobj, dev_name(dev));
}
EXPORT_SYMBOL_GPL(class_compat_remove_link);
diff --git a/drivers/base/component.c b/drivers/base/component.c
index 741497324d78..024ad9471b8a 100644
--- a/drivers/base/component.c
+++ b/drivers/base/component.c
@@ -87,17 +87,17 @@ static int component_devices_show(struct seq_file *s, void *data)
size_t i;
mutex_lock(&component_mutex);
- seq_printf(s, "%-40s %20s\n", "aggregate_device name", "status");
- seq_puts(s, "-------------------------------------------------------------\n");
- seq_printf(s, "%-40s %20s\n\n",
+ seq_printf(s, "%-50s %20s\n", "aggregate_device name", "status");
+ seq_puts(s, "-----------------------------------------------------------------------\n");
+ seq_printf(s, "%-50s %20s\n\n",
dev_name(m->parent), m->bound ? "bound" : "not bound");
- seq_printf(s, "%-40s %20s\n", "device name", "status");
- seq_puts(s, "-------------------------------------------------------------\n");
+ seq_printf(s, "%-50s %20s\n", "device name", "status");
+ seq_puts(s, "-----------------------------------------------------------------------\n");
for (i = 0; i < match->num; i++) {
struct component *component = match->compare[i].component;
- seq_printf(s, "%-40s %20s\n",
+ seq_printf(s, "%-50s %20s\n",
component ? dev_name(component->dev) : "(unknown)",
component ? (component->bound ? "bound" : "not bound") : "not registered");
}
@@ -569,10 +569,28 @@ void component_master_del(struct device *parent,
}
EXPORT_SYMBOL_GPL(component_master_del);
+bool component_master_is_bound(struct device *parent,
+ const struct component_master_ops *ops)
+{
+ struct aggregate_device *adev;
+
+ guard(mutex)(&component_mutex);
+ adev = __aggregate_find(parent, ops);
+ if (!adev)
+ return 0;
+
+ return adev->bound;
+}
+EXPORT_SYMBOL_GPL(component_master_is_bound);
+
static void component_unbind(struct component *component,
struct aggregate_device *adev, void *data)
{
- WARN_ON(!component->bound);
+ if (WARN_ON(!component->bound))
+ return;
+
+ dev_dbg(adev->parent, "unbinding %s component %p (ops %ps)\n",
+ dev_name(component->dev), component, component->ops);
if (component->ops && component->ops->unbind)
component->ops->unbind(component->dev, adev->parent, data);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index a4c853411a6b..3c533dab8fa5 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -26,7 +26,6 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pm_runtime.h>
-#include <linux/rcupdate.h>
#include <linux/sched/mm.h>
#include <linux/sched/signal.h>
#include <linux/slab.h>
@@ -288,7 +287,7 @@ static bool device_is_ancestor(struct device *dev, struct device *target)
#define DL_MARKER_FLAGS (DL_FLAG_INFERRED | \
DL_FLAG_CYCLE | \
DL_FLAG_MANAGED)
-static inline bool device_link_flag_is_sync_state_only(u32 flags)
+bool device_link_flag_is_sync_state_only(u32 flags)
{
return (flags & ~DL_MARKER_FLAGS) == DL_FLAG_SYNC_STATE_ONLY;
}
@@ -461,9 +460,9 @@ static ssize_t auto_remove_on_show(struct device *dev,
struct device_link *link = to_devlink(dev);
const char *output;
- if (link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
+ if (device_link_test(link, DL_FLAG_AUTOREMOVE_SUPPLIER))
output = "supplier unbind";
- else if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER)
+ else if (device_link_test(link, DL_FLAG_AUTOREMOVE_CONSUMER))
output = "consumer unbind";
else
output = "never";
@@ -477,7 +476,7 @@ static ssize_t runtime_pm_show(struct device *dev,
{
struct device_link *link = to_devlink(dev);
- return sysfs_emit(buf, "%d\n", !!(link->flags & DL_FLAG_PM_RUNTIME));
+ return sysfs_emit(buf, "%d\n", device_link_test(link, DL_FLAG_PM_RUNTIME));
}
static DEVICE_ATTR_RO(runtime_pm);
@@ -486,8 +485,7 @@ static ssize_t sync_state_only_show(struct device *dev,
{
struct device_link *link = to_devlink(dev);
- return sysfs_emit(buf, "%d\n",
- !!(link->flags & DL_FLAG_SYNC_STATE_ONLY));
+ return sysfs_emit(buf, "%d\n", device_link_test(link, DL_FLAG_SYNC_STATE_ONLY));
}
static DEVICE_ATTR_RO(sync_state_only);
@@ -553,7 +551,7 @@ void device_link_wait_removal(void)
}
EXPORT_SYMBOL_GPL(device_link_wait_removal);
-static struct class devlink_class = {
+static const struct class devlink_class = {
.name = "devlink",
.dev_groups = devlink_groups,
.dev_release = devlink_dev_release,
@@ -793,12 +791,12 @@ struct device_link *device_link_add(struct device *consumer,
if (link->consumer != consumer)
continue;
- if (link->flags & DL_FLAG_INFERRED &&
+ if (device_link_test(link, DL_FLAG_INFERRED) &&
!(flags & DL_FLAG_INFERRED))
link->flags &= ~DL_FLAG_INFERRED;
if (flags & DL_FLAG_PM_RUNTIME) {
- if (!(link->flags & DL_FLAG_PM_RUNTIME)) {
+ if (!device_link_test(link, DL_FLAG_PM_RUNTIME)) {
pm_runtime_new_link(consumer);
link->flags |= DL_FLAG_PM_RUNTIME;
}
@@ -808,8 +806,8 @@ struct device_link *device_link_add(struct device *consumer,
if (flags & DL_FLAG_STATELESS) {
kref_get(&link->kref);
- if (link->flags & DL_FLAG_SYNC_STATE_ONLY &&
- !(link->flags & DL_FLAG_STATELESS)) {
+ if (device_link_test(link, DL_FLAG_SYNC_STATE_ONLY) &&
+ !device_link_test(link, DL_FLAG_STATELESS)) {
link->flags |= DL_FLAG_STATELESS;
goto reorder;
} else {
@@ -824,7 +822,7 @@ struct device_link *device_link_add(struct device *consumer,
* update the existing link to stay around longer.
*/
if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER) {
- if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) {
+ if (device_link_test(link, DL_FLAG_AUTOREMOVE_CONSUMER)) {
link->flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER;
link->flags |= DL_FLAG_AUTOREMOVE_SUPPLIER;
}
@@ -832,12 +830,12 @@ struct device_link *device_link_add(struct device *consumer,
link->flags &= ~(DL_FLAG_AUTOREMOVE_CONSUMER |
DL_FLAG_AUTOREMOVE_SUPPLIER);
}
- if (!(link->flags & DL_FLAG_MANAGED)) {
+ if (!device_link_test(link, DL_FLAG_MANAGED)) {
kref_get(&link->kref);
link->flags |= DL_FLAG_MANAGED;
device_link_init_status(link, consumer, supplier);
}
- if (link->flags & DL_FLAG_SYNC_STATE_ONLY &&
+ if (device_link_test(link, DL_FLAG_SYNC_STATE_ONLY) &&
!(flags & DL_FLAG_SYNC_STATE_ONLY)) {
link->flags &= ~DL_FLAG_SYNC_STATE_ONLY;
goto reorder;
@@ -941,7 +939,7 @@ static void __device_link_del(struct kref *kref)
static void device_link_put_kref(struct device_link *link)
{
- if (link->flags & DL_FLAG_STATELESS)
+ if (device_link_test(link, DL_FLAG_STATELESS))
kref_put(&link->kref, __device_link_del);
else if (!device_is_registered(link->consumer))
__device_link_del(&link->kref);
@@ -1005,7 +1003,7 @@ static void device_links_missing_supplier(struct device *dev)
if (link->supplier->links.status == DL_DEV_DRIVER_BOUND) {
WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
} else {
- WARN_ON(!(link->flags & DL_FLAG_SYNC_STATE_ONLY));
+ WARN_ON(!device_link_test(link, DL_FLAG_SYNC_STATE_ONLY));
WRITE_ONCE(link->status, DL_STATE_DORMANT);
}
}
@@ -1073,14 +1071,14 @@ int device_links_check_suppliers(struct device *dev)
device_links_write_lock();
list_for_each_entry(link, &dev->links.suppliers, c_node) {
- if (!(link->flags & DL_FLAG_MANAGED))
+ if (!device_link_test(link, DL_FLAG_MANAGED))
continue;
if (link->status != DL_STATE_AVAILABLE &&
- !(link->flags & DL_FLAG_SYNC_STATE_ONLY)) {
+ !device_link_test(link, DL_FLAG_SYNC_STATE_ONLY)) {
if (dev_is_best_effort(dev) &&
- link->flags & DL_FLAG_INFERRED &&
+ device_link_test(link, DL_FLAG_INFERRED) &&
!link->supplier->can_match) {
ret = -EAGAIN;
continue;
@@ -1129,7 +1127,7 @@ static void __device_links_queue_sync_state(struct device *dev,
return;
list_for_each_entry(link, &dev->links.consumers, s_node) {
- if (!(link->flags & DL_FLAG_MANAGED))
+ if (!device_link_test(link, DL_FLAG_MANAGED))
continue;
if (link->status != DL_STATE_ACTIVE)
return;
@@ -1269,7 +1267,7 @@ void device_links_force_bind(struct device *dev)
device_links_write_lock();
list_for_each_entry_safe(link, ln, &dev->links.suppliers, c_node) {
- if (!(link->flags & DL_FLAG_MANAGED))
+ if (!device_link_test(link, DL_FLAG_MANAGED))
continue;
if (link->status != DL_STATE_AVAILABLE) {
@@ -1330,7 +1328,7 @@ void device_links_driver_bound(struct device *dev)
device_links_write_lock();
list_for_each_entry(link, &dev->links.consumers, s_node) {
- if (!(link->flags & DL_FLAG_MANAGED))
+ if (!device_link_test(link, DL_FLAG_MANAGED))
continue;
/*
@@ -1346,7 +1344,7 @@ void device_links_driver_bound(struct device *dev)
WARN_ON(link->status != DL_STATE_DORMANT);
WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
- if (link->flags & DL_FLAG_AUTOPROBE_CONSUMER)
+ if (device_link_test(link, DL_FLAG_AUTOPROBE_CONSUMER))
driver_deferred_probe_add(link->consumer);
}
@@ -1358,11 +1356,11 @@ void device_links_driver_bound(struct device *dev)
list_for_each_entry_safe(link, ln, &dev->links.suppliers, c_node) {
struct device *supplier;
- if (!(link->flags & DL_FLAG_MANAGED))
+ if (!device_link_test(link, DL_FLAG_MANAGED))
continue;
supplier = link->supplier;
- if (link->flags & DL_FLAG_SYNC_STATE_ONLY) {
+ if (device_link_test(link, DL_FLAG_SYNC_STATE_ONLY)) {
/*
* When DL_FLAG_SYNC_STATE_ONLY is set, it means no
* other DL_MANAGED_LINK_FLAGS have been set. So, it's
@@ -1370,7 +1368,7 @@ void device_links_driver_bound(struct device *dev)
*/
device_link_drop_managed(link);
} else if (dev_is_best_effort(dev) &&
- link->flags & DL_FLAG_INFERRED &&
+ device_link_test(link, DL_FLAG_INFERRED) &&
link->status != DL_STATE_CONSUMER_PROBE &&
!link->supplier->can_match) {
/*
@@ -1422,10 +1420,10 @@ static void __device_links_no_driver(struct device *dev)
struct device_link *link, *ln;
list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
- if (!(link->flags & DL_FLAG_MANAGED))
+ if (!device_link_test(link, DL_FLAG_MANAGED))
continue;
- if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) {
+ if (device_link_test(link, DL_FLAG_AUTOREMOVE_CONSUMER)) {
device_link_drop_managed(link);
continue;
}
@@ -1437,7 +1435,7 @@ static void __device_links_no_driver(struct device *dev)
if (link->supplier->links.status == DL_DEV_DRIVER_BOUND) {
WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
} else {
- WARN_ON(!(link->flags & DL_FLAG_SYNC_STATE_ONLY));
+ WARN_ON(!device_link_test(link, DL_FLAG_SYNC_STATE_ONLY));
WRITE_ONCE(link->status, DL_STATE_DORMANT);
}
}
@@ -1462,7 +1460,7 @@ void device_links_no_driver(struct device *dev)
device_links_write_lock();
list_for_each_entry(link, &dev->links.consumers, s_node) {
- if (!(link->flags & DL_FLAG_MANAGED))
+ if (!device_link_test(link, DL_FLAG_MANAGED))
continue;
/*
@@ -1499,10 +1497,10 @@ void device_links_driver_cleanup(struct device *dev)
device_links_write_lock();
list_for_each_entry_safe(link, ln, &dev->links.consumers, s_node) {
- if (!(link->flags & DL_FLAG_MANAGED))
+ if (!device_link_test(link, DL_FLAG_MANAGED))
continue;
- WARN_ON(link->flags & DL_FLAG_AUTOREMOVE_CONSUMER);
+ WARN_ON(device_link_test(link, DL_FLAG_AUTOREMOVE_CONSUMER));
WARN_ON(link->status != DL_STATE_SUPPLIER_UNBIND);
/*
@@ -1511,7 +1509,7 @@ void device_links_driver_cleanup(struct device *dev)
* has moved to DL_STATE_SUPPLIER_UNBIND.
*/
if (link->status == DL_STATE_SUPPLIER_UNBIND &&
- link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
+ device_link_test(link, DL_FLAG_AUTOREMOVE_SUPPLIER))
device_link_drop_managed(link);
WRITE_ONCE(link->status, DL_STATE_DORMANT);
@@ -1545,7 +1543,7 @@ bool device_links_busy(struct device *dev)
device_links_write_lock();
list_for_each_entry(link, &dev->links.consumers, s_node) {
- if (!(link->flags & DL_FLAG_MANAGED))
+ if (!device_link_test(link, DL_FLAG_MANAGED))
continue;
if (link->status == DL_STATE_CONSUMER_PROBE
@@ -1587,8 +1585,8 @@ void device_links_unbind_consumers(struct device *dev)
list_for_each_entry(link, &dev->links.consumers, s_node) {
enum device_link_state status;
- if (!(link->flags & DL_FLAG_MANAGED) ||
- link->flags & DL_FLAG_SYNC_STATE_ONLY)
+ if (!device_link_test(link, DL_FLAG_MANAGED) ||
+ device_link_test(link, DL_FLAG_SYNC_STATE_ONLY))
continue;
status = link->status;
@@ -1744,7 +1742,7 @@ static void fw_devlink_parse_fwtree(struct fwnode_handle *fwnode)
static void fw_devlink_relax_link(struct device_link *link)
{
- if (!(link->flags & DL_FLAG_INFERRED))
+ if (!device_link_test(link, DL_FLAG_INFERRED))
return;
if (device_link_flag_is_sync_state_only(link->flags))
@@ -1780,7 +1778,7 @@ static int fw_devlink_dev_sync_state(struct device *dev, void *data)
struct device_link *link = to_devlink(dev);
struct device *sup = link->supplier;
- if (!(link->flags & DL_FLAG_MANAGED) ||
+ if (!device_link_test(link, DL_FLAG_MANAGED) ||
link->status == DL_STATE_ACTIVE || sup->state_synced ||
!dev_has_sync_state(sup))
return 0;
@@ -1882,8 +1880,6 @@ static void fw_devlink_unblock_consumers(struct device *dev)
device_links_write_unlock();
}
-#define get_dev_from_fwnode(fwnode) get_device((fwnode)->dev)
-
static bool fwnode_init_without_drv(struct fwnode_handle *fwnode)
{
struct device *dev;
@@ -1972,7 +1968,7 @@ static struct device *fwnode_get_next_parent_dev(const struct fwnode_handle *fwn
/**
* __fw_devlink_relax_cycles - Relax and mark dependency cycles.
- * @con: Potential consumer device.
+ * @con_handle: Potential consumer device fwnode.
* @sup_handle: Potential supplier's fwnode.
*
* Needs to be called with fwnode_lock and device link lock held.
@@ -1990,10 +1986,10 @@ static struct device *fwnode_get_next_parent_dev(const struct fwnode_handle *fwn
*
* Return true if one or more cycles were found. Otherwise, return false.
*/
-static bool __fw_devlink_relax_cycles(struct device *con,
+static bool __fw_devlink_relax_cycles(struct fwnode_handle *con_handle,
struct fwnode_handle *sup_handle)
{
- struct device *sup_dev = NULL, *par_dev = NULL;
+ struct device *sup_dev = NULL, *par_dev = NULL, *con_dev = NULL;
struct fwnode_link *link;
struct device_link *dev_link;
bool ret = false;
@@ -2010,22 +2006,22 @@ static bool __fw_devlink_relax_cycles(struct device *con,
sup_handle->flags |= FWNODE_FLAG_VISITED;
- sup_dev = get_dev_from_fwnode(sup_handle);
-
/* Termination condition. */
- if (sup_dev == con) {
+ if (sup_handle == con_handle) {
pr_debug("----- cycle: start -----\n");
ret = true;
goto out;
}
+ sup_dev = get_dev_from_fwnode(sup_handle);
+ con_dev = get_dev_from_fwnode(con_handle);
/*
* If sup_dev is bound to a driver and @con hasn't started binding to a
* driver, sup_dev can't be a consumer of @con. So, no need to check
* further.
*/
if (sup_dev && sup_dev->links.status == DL_DEV_DRIVER_BOUND &&
- con->links.status == DL_DEV_NO_DRIVER) {
+ con_dev && con_dev->links.status == DL_DEV_NO_DRIVER) {
ret = false;
goto out;
}
@@ -2034,7 +2030,7 @@ static bool __fw_devlink_relax_cycles(struct device *con,
if (link->flags & FWLINK_FLAG_IGNORE)
continue;
- if (__fw_devlink_relax_cycles(con, link->supplier)) {
+ if (__fw_devlink_relax_cycles(con_handle, link->supplier)) {
__fwnode_link_cycle(link);
ret = true;
}
@@ -2049,7 +2045,7 @@ static bool __fw_devlink_relax_cycles(struct device *con,
else
par_dev = fwnode_get_next_parent_dev(sup_handle);
- if (par_dev && __fw_devlink_relax_cycles(con, par_dev->fwnode)) {
+ if (par_dev && __fw_devlink_relax_cycles(con_handle, par_dev->fwnode)) {
pr_debug("%pfwf: cycle: child of %pfwf\n", sup_handle,
par_dev->fwnode);
ret = true;
@@ -2064,10 +2060,10 @@ static bool __fw_devlink_relax_cycles(struct device *con,
* such due to a cycle.
*/
if (device_link_flag_is_sync_state_only(dev_link->flags) &&
- !(dev_link->flags & DL_FLAG_CYCLE))
+ !device_link_test(dev_link, DL_FLAG_CYCLE))
continue;
- if (__fw_devlink_relax_cycles(con,
+ if (__fw_devlink_relax_cycles(con_handle,
dev_link->supplier->fwnode)) {
pr_debug("%pfwf: cycle: depends on %pfwf\n", sup_handle,
dev_link->supplier->fwnode);
@@ -2080,6 +2076,7 @@ static bool __fw_devlink_relax_cycles(struct device *con,
out:
sup_handle->flags &= ~FWNODE_FLAG_VISITED;
put_device(sup_dev);
+ put_device(con_dev);
put_device(par_dev);
return ret;
}
@@ -2115,11 +2112,6 @@ static int fw_devlink_create_devlink(struct device *con,
if (link->flags & FWLINK_FLAG_IGNORE)
return 0;
- if (con->fwnode == link->consumer)
- flags = fw_devlink_get_flags(link->flags);
- else
- flags = FW_DEVLINK_FLAGS_PERMISSIVE;
-
/*
* In some cases, a device P might also be a supplier to its child node
* C. However, this would defer the probe of C until the probe of P
@@ -2140,25 +2132,23 @@ static int fw_devlink_create_devlink(struct device *con,
return -EINVAL;
/*
- * SYNC_STATE_ONLY device links don't block probing and supports cycles.
- * So, one might expect that cycle detection isn't necessary for them.
- * However, if the device link was marked as SYNC_STATE_ONLY because
- * it's part of a cycle, then we still need to do cycle detection. This
- * is because the consumer and supplier might be part of multiple cycles
- * and we need to detect all those cycles.
+ * Don't try to optimize by not calling the cycle detection logic under
+ * certain conditions. There's always some corner case that won't get
+ * detected.
*/
- if (!device_link_flag_is_sync_state_only(flags) ||
- flags & DL_FLAG_CYCLE) {
- device_links_write_lock();
- if (__fw_devlink_relax_cycles(con, sup_handle)) {
- __fwnode_link_cycle(link);
- flags = fw_devlink_get_flags(link->flags);
- pr_debug("----- cycle: end -----\n");
- dev_info(con, "Fixed dependency cycle(s) with %pfwf\n",
- sup_handle);
- }
- device_links_write_unlock();
+ device_links_write_lock();
+ if (__fw_devlink_relax_cycles(link->consumer, sup_handle)) {
+ __fwnode_link_cycle(link);
+ pr_debug("----- cycle: end -----\n");
+ pr_info("%pfwf: Fixed dependency cycle(s) with %pfwf\n",
+ link->consumer, sup_handle);
}
+ device_links_write_unlock();
+
+ if (con->fwnode == link->consumer)
+ flags = fw_devlink_get_flags(link->flags);
+ else
+ flags = FW_DEVLINK_FLAGS_PERMISSIVE;
if (sup_handle->flags & FWNODE_FLAG_NOT_DEVICE)
sup_dev = fwnode_get_next_parent_dev(sup_handle);
@@ -2181,8 +2171,8 @@ static int fw_devlink_create_devlink(struct device *con,
}
if (con != sup_dev && !device_link_add(con, sup_dev, flags)) {
- dev_err(con, "Failed to create device link (0x%x) with %s\n",
- flags, dev_name(sup_dev));
+ dev_err(con, "Failed to create device link (0x%x) with supplier %s for %pfwf\n",
+ flags, dev_name(sup_dev), link->consumer);
ret = -EINVAL;
}
@@ -2631,10 +2621,38 @@ static const char *dev_uevent_name(const struct kobject *kobj)
return NULL;
}
+/*
+ * Try filling "DRIVER=<name>" uevent variable for a device. Because this
+ * function may race with binding and unbinding the device from a driver,
+ * we need to be careful. Binding is generally safe, at worst we miss the
+ * fact that the device is already bound to a driver (but the driver
+ * information that is delivered through uevents is best-effort, it may
+ * become obsolete as soon as it is generated anyways). Unbinding is more
+ * risky as driver pointer is transitioning to NULL, so READ_ONCE() should
+ * be used to make sure we are dealing with the same pointer, and to
+ * ensure that driver structure is not going to disappear from under us
+ * we take bus' drivers klist lock. The assumption that only registered
+ * driver can be bound to a device, and to unregister a driver bus code
+ * will take the same lock.
+ */
+static void dev_driver_uevent(const struct device *dev, struct kobj_uevent_env *env)
+{
+ struct subsys_private *sp = bus_to_subsys(dev->bus);
+
+ if (sp) {
+ scoped_guard(spinlock, &sp->klist_drivers.k_lock) {
+ struct device_driver *drv = READ_ONCE(dev->driver);
+ if (drv)
+ add_uevent_var(env, "DRIVER=%s", drv->name);
+ }
+
+ subsys_put(sp);
+ }
+}
+
static int dev_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
{
const struct device *dev = kobj_to_dev(kobj);
- struct device_driver *driver;
int retval = 0;
/* add device node properties if present */
@@ -2663,12 +2681,8 @@ static int dev_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
if (dev->type && dev->type->name)
add_uevent_var(env, "DEVTYPE=%s", dev->type->name);
- /* Synchronize with module_remove_driver() */
- rcu_read_lock();
- driver = READ_ONCE(dev->driver);
- if (driver)
- add_uevent_var(env, "DRIVER=%s", driver->name);
- rcu_read_unlock();
+ /* Add "DRIVER=%s" variable if the device is bound to a driver */
+ dev_driver_uevent(dev, env);
/* Add common DT information about the device */
of_device_uevent(dev, env);
@@ -3709,7 +3723,7 @@ done:
device_pm_remove(dev);
dpm_sysfs_remove(dev);
DPMError:
- dev->driver = NULL;
+ device_set_driver(dev, NULL);
bus_remove_device(dev);
BusError:
device_remove_attrs(dev);
@@ -3980,8 +3994,8 @@ const char *device_get_devnode(const struct device *dev,
/**
* device_for_each_child - device child iterator.
* @parent: parent struct device.
- * @fn: function to be called for each device.
* @data: data for the callback.
+ * @fn: function to be called for each device.
*
* Iterate over @parent's child devices, and call @fn for each,
* passing it @data.
@@ -3990,7 +4004,7 @@ const char *device_get_devnode(const struct device *dev,
* other than 0, we break out and return that value.
*/
int device_for_each_child(struct device *parent, void *data,
- int (*fn)(struct device *dev, void *data))
+ device_iter_t fn)
{
struct klist_iter i;
struct device *child;
@@ -4010,8 +4024,8 @@ EXPORT_SYMBOL_GPL(device_for_each_child);
/**
* device_for_each_child_reverse - device child iterator in reversed order.
* @parent: parent struct device.
- * @fn: function to be called for each device.
* @data: data for the callback.
+ * @fn: function to be called for each device.
*
* Iterate over @parent's child devices, and call @fn for each,
* passing it @data.
@@ -4020,7 +4034,7 @@ EXPORT_SYMBOL_GPL(device_for_each_child);
* other than 0, we break out and return that value.
*/
int device_for_each_child_reverse(struct device *parent, void *data,
- int (*fn)(struct device *dev, void *data))
+ device_iter_t fn)
{
struct klist_iter i;
struct device *child;
@@ -4038,87 +4052,77 @@ int device_for_each_child_reverse(struct device *parent, void *data,
EXPORT_SYMBOL_GPL(device_for_each_child_reverse);
/**
- * device_find_child - device iterator for locating a particular device.
- * @parent: parent struct device
- * @match: Callback function to check device
- * @data: Data to pass to match function
- *
- * This is similar to the device_for_each_child() function above, but it
- * returns a reference to a device that is 'found' for later use, as
- * determined by the @match callback.
+ * device_for_each_child_reverse_from - device child iterator in reversed order.
+ * @parent: parent struct device.
+ * @from: optional starting point in child list
+ * @data: data for the callback.
+ * @fn: function to be called for each device.
*
- * The callback should return 0 if the device doesn't match and non-zero
- * if it does. If the callback returns non-zero and a reference to the
- * current device can be obtained, this function will return to the caller
- * and not iterate over any more devices.
+ * Iterate over @parent's child devices, starting at @from, and call @fn
+ * for each, passing it @data. This helper is identical to
+ * device_for_each_child_reverse() when @from is NULL.
*
- * NOTE: you will need to drop the reference with put_device() after use.
+ * @fn is checked each iteration. If it returns anything other than 0,
+ * iteration stop and that value is returned to the caller of
+ * device_for_each_child_reverse_from();
*/
-struct device *device_find_child(struct device *parent, void *data,
- int (*match)(struct device *dev, void *data))
+int device_for_each_child_reverse_from(struct device *parent,
+ struct device *from, void *data,
+ device_iter_t fn)
{
struct klist_iter i;
struct device *child;
+ int error = 0;
if (!parent || !parent->p)
- return NULL;
+ return 0;
- klist_iter_init(&parent->p->klist_children, &i);
- while ((child = next_device(&i)))
- if (match(child, data) && get_device(child))
- break;
+ klist_iter_init_node(&parent->p->klist_children, &i,
+ (from ? &from->p->knode_parent : NULL));
+ while ((child = prev_device(&i)) && !error)
+ error = fn(child, data);
klist_iter_exit(&i);
- return child;
+ return error;
}
-EXPORT_SYMBOL_GPL(device_find_child);
+EXPORT_SYMBOL_GPL(device_for_each_child_reverse_from);
/**
- * device_find_child_by_name - device iterator for locating a child device.
+ * device_find_child - device iterator for locating a particular device.
* @parent: parent struct device
- * @name: name of the child device
+ * @data: Data to pass to match function
+ * @match: Callback function to check device
*
- * This is similar to the device_find_child() function above, but it
- * returns a reference to a device that has the name @name.
+ * This is similar to the device_for_each_child() function above, but it
+ * returns a reference to a device that is 'found' for later use, as
+ * determined by the @match callback.
+ *
+ * The callback should return 0 if the device doesn't match and non-zero
+ * if it does. If the callback returns non-zero and a reference to the
+ * current device can be obtained, this function will return to the caller
+ * and not iterate over any more devices.
*
* NOTE: you will need to drop the reference with put_device() after use.
*/
-struct device *device_find_child_by_name(struct device *parent,
- const char *name)
+struct device *device_find_child(struct device *parent, const void *data,
+ device_match_t match)
{
struct klist_iter i;
struct device *child;
- if (!parent)
+ if (!parent || !parent->p)
return NULL;
klist_iter_init(&parent->p->klist_children, &i);
- while ((child = next_device(&i)))
- if (sysfs_streq(dev_name(child), name) && get_device(child))
+ while ((child = next_device(&i))) {
+ if (match(child, data)) {
+ get_device(child);
break;
+ }
+ }
klist_iter_exit(&i);
return child;
}
-EXPORT_SYMBOL_GPL(device_find_child_by_name);
-
-static int match_any(struct device *dev, void *unused)
-{
- return 1;
-}
-
-/**
- * device_find_any_child - device iterator for locating a child device, if any.
- * @parent: parent struct device
- *
- * This is similar to the device_find_child() function above, but it
- * returns a reference to a child device, if any.
- *
- * NOTE: you will need to drop the reference with put_device() after use.
- */
-struct device *device_find_any_child(struct device *parent)
-{
- return device_find_child(parent, NULL, match_any);
-}
-EXPORT_SYMBOL_GPL(device_find_any_child);
+EXPORT_SYMBOL_GPL(device_find_child);
int __init devices_init(void)
{
@@ -4980,6 +4984,49 @@ define_dev_printk_level(_dev_info, KERN_INFO);
#endif
+static void __dev_probe_failed(const struct device *dev, int err, bool fatal,
+ const char *fmt, va_list vargsp)
+{
+ struct va_format vaf;
+ va_list vargs;
+
+ /*
+ * On x86_64 and possibly on other architectures, va_list is actually a
+ * size-1 array containing a structure. As a result, function parameter
+ * vargsp decays from T[1] to T*, and &vargsp has type T** rather than
+ * T(*)[1], which is expected by its assignment to vaf.va below.
+ *
+ * One standard way to solve this mess is by creating a copy in a local
+ * variable of type va_list and then using a pointer to that local copy
+ * instead, which is the approach employed here.
+ */
+ va_copy(vargs, vargsp);
+
+ vaf.fmt = fmt;
+ vaf.va = &vargs;
+
+ switch (err) {
+ case -EPROBE_DEFER:
+ device_set_deferred_probe_reason(dev, &vaf);
+ dev_dbg(dev, "error %pe: %pV", ERR_PTR(err), &vaf);
+ break;
+
+ case -ENOMEM:
+ /* Don't print anything on -ENOMEM, there's already enough output */
+ break;
+
+ default:
+ /* Log fatal final failures as errors, otherwise produce warnings */
+ if (fatal)
+ dev_err(dev, "error %pe: %pV", ERR_PTR(err), &vaf);
+ else
+ dev_warn(dev, "error %pe: %pV", ERR_PTR(err), &vaf);
+ break;
+ }
+
+ va_end(vargs);
+}
+
/**
* dev_err_probe - probe error check and log helper
* @dev: the pointer to the struct device
@@ -4992,7 +5039,7 @@ define_dev_printk_level(_dev_info, KERN_INFO);
* -EPROBE_DEFER and propagate error upwards.
* In case of -EPROBE_DEFER it sets also defer probe reason, which can be
* checked later by reading devices_deferred debugfs attribute.
- * It replaces code sequence::
+ * It replaces the following code sequence::
*
* if (err != -EPROBE_DEFER)
* dev_err(dev, ...);
@@ -5004,47 +5051,77 @@ define_dev_printk_level(_dev_info, KERN_INFO);
*
* return dev_err_probe(dev, err, ...);
*
- * Using this helper in your probe function is totally fine even if @err is
- * known to never be -EPROBE_DEFER.
+ * Using this helper in your probe function is totally fine even if @err
+ * is known to never be -EPROBE_DEFER.
* The benefit compared to a normal dev_err() is the standardized format
- * of the error code, it being emitted symbolically (i.e. you get "EAGAIN"
- * instead of "-35") and the fact that the error code is returned which allows
- * more compact error paths.
+ * of the error code, which is emitted symbolically (i.e. you get "EAGAIN"
+ * instead of "-35"), and having the error code returned allows more
+ * compact error paths.
*
* Returns @err.
*/
int dev_err_probe(const struct device *dev, int err, const char *fmt, ...)
{
- struct va_format vaf;
- va_list args;
+ va_list vargs;
- va_start(args, fmt);
- vaf.fmt = fmt;
- vaf.va = &args;
+ va_start(vargs, fmt);
- switch (err) {
- case -EPROBE_DEFER:
- device_set_deferred_probe_reason(dev, &vaf);
- dev_dbg(dev, "error %pe: %pV", ERR_PTR(err), &vaf);
- break;
+ /* Use dev_err() for logging when err doesn't equal -EPROBE_DEFER */
+ __dev_probe_failed(dev, err, true, fmt, vargs);
- case -ENOMEM:
- /*
- * We don't print anything on -ENOMEM, there is already enough
- * output.
- */
- break;
+ va_end(vargs);
- default:
- dev_err(dev, "error %pe: %pV", ERR_PTR(err), &vaf);
- break;
- }
+ return err;
+}
+EXPORT_SYMBOL_GPL(dev_err_probe);
- va_end(args);
+/**
+ * dev_warn_probe - probe error check and log helper
+ * @dev: the pointer to the struct device
+ * @err: error value to test
+ * @fmt: printf-style format string
+ * @...: arguments as specified in the format string
+ *
+ * This helper implements common pattern present in probe functions for error
+ * checking: print debug or warning message depending if the error value is
+ * -EPROBE_DEFER and propagate error upwards.
+ * In case of -EPROBE_DEFER it sets also defer probe reason, which can be
+ * checked later by reading devices_deferred debugfs attribute.
+ * It replaces the following code sequence::
+ *
+ * if (err != -EPROBE_DEFER)
+ * dev_warn(dev, ...);
+ * else
+ * dev_dbg(dev, ...);
+ * return err;
+ *
+ * with::
+ *
+ * return dev_warn_probe(dev, err, ...);
+ *
+ * Using this helper in your probe function is totally fine even if @err
+ * is known to never be -EPROBE_DEFER.
+ * The benefit compared to a normal dev_warn() is the standardized format
+ * of the error code, which is emitted symbolically (i.e. you get "EAGAIN"
+ * instead of "-35"), and having the error code returned allows more
+ * compact error paths.
+ *
+ * Returns @err.
+ */
+int dev_warn_probe(const struct device *dev, int err, const char *fmt, ...)
+{
+ va_list vargs;
+
+ va_start(vargs, fmt);
+
+ /* Use dev_warn() for logging when err doesn't equal -EPROBE_DEFER */
+ __dev_probe_failed(dev, err, false, fmt, vargs);
+
+ va_end(vargs);
return err;
}
-EXPORT_SYMBOL_GPL(dev_err_probe);
+EXPORT_SYMBOL_GPL(dev_warn_probe);
static inline bool fwnode_is_primary(struct fwnode_handle *fwnode)
{
@@ -5118,6 +5195,67 @@ void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
EXPORT_SYMBOL_GPL(set_secondary_fwnode);
/**
+ * device_remove_of_node - Remove an of_node from a device
+ * @dev: device whose device tree node is being removed
+ */
+void device_remove_of_node(struct device *dev)
+{
+ dev = get_device(dev);
+ if (!dev)
+ return;
+
+ if (!dev->of_node)
+ goto end;
+
+ if (dev->fwnode == of_fwnode_handle(dev->of_node))
+ dev->fwnode = NULL;
+
+ of_node_put(dev->of_node);
+ dev->of_node = NULL;
+
+end:
+ put_device(dev);
+}
+EXPORT_SYMBOL_GPL(device_remove_of_node);
+
+/**
+ * device_add_of_node - Add an of_node to an existing device
+ * @dev: device whose device tree node is being added
+ * @of_node: of_node to add
+ *
+ * Return: 0 on success or error code on failure.
+ */
+int device_add_of_node(struct device *dev, struct device_node *of_node)
+{
+ int ret;
+
+ if (!of_node)
+ return -EINVAL;
+
+ dev = get_device(dev);
+ if (!dev)
+ return -EINVAL;
+
+ if (dev->of_node) {
+ dev_err(dev, "Cannot replace node %pOF with %pOF\n",
+ dev->of_node, of_node);
+ ret = -EBUSY;
+ goto end;
+ }
+
+ dev->of_node = of_node_get(of_node);
+
+ if (!dev->fwnode)
+ dev->fwnode = of_fwnode_handle(of_node);
+
+ ret = 0;
+end:
+ put_device(dev);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(device_add_of_node);
+
+/**
* device_set_of_node_from_dev - reuse device-tree node of another device
* @dev: device whose device-tree node is being set
* @dev2: device whose device-tree node is being reused
@@ -5140,21 +5278,52 @@ void device_set_node(struct device *dev, struct fwnode_handle *fwnode)
}
EXPORT_SYMBOL_GPL(device_set_node);
+/**
+ * get_dev_from_fwnode - Obtain a reference count of the struct device the
+ * struct fwnode_handle is associated with.
+ * @fwnode: The pointer to the struct fwnode_handle to obtain the struct device
+ * reference count of.
+ *
+ * This function obtains a reference count of the device the device pointer
+ * embedded in the struct fwnode_handle points to.
+ *
+ * Note that the struct device pointer embedded in struct fwnode_handle does
+ * *not* have a reference count of the struct device itself.
+ *
+ * Hence, it is a UAF (and thus a bug) to call this function if the caller can't
+ * guarantee that the last reference count of the corresponding struct device is
+ * not dropped concurrently.
+ *
+ * This is possible since struct fwnode_handle has its own reference count and
+ * hence can out-live the struct device it is associated with.
+ */
+struct device *get_dev_from_fwnode(struct fwnode_handle *fwnode)
+{
+ return get_device((fwnode)->dev);
+}
+EXPORT_SYMBOL_GPL(get_dev_from_fwnode);
+
int device_match_name(struct device *dev, const void *name)
{
return sysfs_streq(dev_name(dev), name);
}
EXPORT_SYMBOL_GPL(device_match_name);
+int device_match_type(struct device *dev, const void *type)
+{
+ return dev->type == type;
+}
+EXPORT_SYMBOL_GPL(device_match_type);
+
int device_match_of_node(struct device *dev, const void *np)
{
- return dev->of_node == np;
+ return np && dev->of_node == np;
}
EXPORT_SYMBOL_GPL(device_match_of_node);
int device_match_fwnode(struct device *dev, const void *fwnode)
{
- return dev_fwnode(dev) == fwnode;
+ return fwnode && dev_fwnode(dev) == fwnode;
}
EXPORT_SYMBOL_GPL(device_match_fwnode);
@@ -5166,13 +5335,13 @@ EXPORT_SYMBOL_GPL(device_match_devt);
int device_match_acpi_dev(struct device *dev, const void *adev)
{
- return ACPI_COMPANION(dev) == adev;
+ return adev && ACPI_COMPANION(dev) == adev;
}
EXPORT_SYMBOL(device_match_acpi_dev);
int device_match_acpi_handle(struct device *dev, const void *handle)
{
- return ACPI_HANDLE(dev) == handle;
+ return handle && ACPI_HANDLE(dev) == handle;
}
EXPORT_SYMBOL(device_match_acpi_handle);
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index fdaa24bb641a..fa0a2eef93ac 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -325,7 +325,7 @@ static void cpu_device_release(struct device *dev)
* This is an empty function to prevent the driver core from spitting a
* warning at us. Yes, I know this is directly opposite of what the
* documentation for the driver core and kobjects say, and the author
- * of this code has already been publically ridiculed for doing
+ * of this code has already been publicly ridiculed for doing
* something as foolish as this. However, at this point in time, it is
* the only way to handle the issue of statically allocated cpu
* devices. The different architectures will have their cpu device
@@ -599,6 +599,11 @@ CPU_SHOW_VULN_FALLBACK(retbleed);
CPU_SHOW_VULN_FALLBACK(spec_rstack_overflow);
CPU_SHOW_VULN_FALLBACK(gds);
CPU_SHOW_VULN_FALLBACK(reg_file_data_sampling);
+CPU_SHOW_VULN_FALLBACK(ghostwrite);
+CPU_SHOW_VULN_FALLBACK(old_microcode);
+CPU_SHOW_VULN_FALLBACK(indirect_target_selection);
+CPU_SHOW_VULN_FALLBACK(tsa);
+CPU_SHOW_VULN_FALLBACK(vmscape);
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
@@ -614,6 +619,11 @@ static DEVICE_ATTR(retbleed, 0444, cpu_show_retbleed, NULL);
static DEVICE_ATTR(spec_rstack_overflow, 0444, cpu_show_spec_rstack_overflow, NULL);
static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL);
static DEVICE_ATTR(reg_file_data_sampling, 0444, cpu_show_reg_file_data_sampling, NULL);
+static DEVICE_ATTR(ghostwrite, 0444, cpu_show_ghostwrite, NULL);
+static DEVICE_ATTR(old_microcode, 0444, cpu_show_old_microcode, NULL);
+static DEVICE_ATTR(indirect_target_selection, 0444, cpu_show_indirect_target_selection, NULL);
+static DEVICE_ATTR(tsa, 0444, cpu_show_tsa, NULL);
+static DEVICE_ATTR(vmscape, 0444, cpu_show_vmscape, NULL);
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_meltdown.attr,
@@ -630,6 +640,11 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_spec_rstack_overflow.attr,
&dev_attr_gather_data_sampling.attr,
&dev_attr_reg_file_data_sampling.attr,
+ &dev_attr_ghostwrite.attr,
+ &dev_attr_old_microcode.attr,
+ &dev_attr_indirect_target_selection.attr,
+ &dev_attr_tsa.attr,
+ &dev_attr_vmscape.attr,
NULL
};
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index f0e4b4aba885..13ab98e033ea 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -25,6 +25,7 @@
#include <linux/kthread.h>
#include <linux/wait.h>
#include <linux/async.h>
+#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
#include <linux/pinctrl/devinfo.h>
#include <linux/slab.h>
@@ -550,8 +551,9 @@ static void device_unbind_cleanup(struct device *dev)
arch_teardown_dma_ops(dev);
kfree(dev->dma_range_map);
dev->dma_range_map = NULL;
- dev->driver = NULL;
+ device_set_driver(dev, NULL);
dev_set_drvdata(dev, NULL);
+ dev_pm_domain_detach(dev, dev->power.detach_power_off);
if (dev->pm_domain && dev->pm_domain->dismiss)
dev->pm_domain->dismiss(dev);
pm_runtime_reinit(dev);
@@ -629,8 +631,7 @@ static int really_probe(struct device *dev, const struct device_driver *drv)
}
re_probe:
- // FIXME - this cast should not be needed "soon"
- dev->driver = (struct device_driver *)drv;
+ device_set_driver(dev, drv);
/* If using pinctrl, bind pins now before probing */
ret = pinctrl_bind_pins(dev);
@@ -1014,7 +1015,7 @@ static int __device_attach(struct device *dev, bool allow_async)
if (ret == 0)
ret = 1;
else {
- dev->driver = NULL;
+ device_set_driver(dev, NULL);
ret = 0;
}
} else {
diff --git a/drivers/base/devcoredump.c b/drivers/base/devcoredump.c
index c795edad1b96..37faf6156d7c 100644
--- a/drivers/base/devcoredump.c
+++ b/drivers/base/devcoredump.c
@@ -41,7 +41,7 @@ struct devcd_entry {
* devcd_data_write()
* mod_delayed_work()
* try_to_grab_pending()
- * del_timer()
+ * timer_delete()
* debug_assert_init()
* INIT_DELAYED_WORK()
* schedule_delayed_work()
@@ -106,7 +106,7 @@ static void devcd_del(struct work_struct *wk)
}
static ssize_t devcd_data_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buffer, loff_t offset, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -116,7 +116,7 @@ static ssize_t devcd_data_read(struct file *filp, struct kobject *kobj,
}
static ssize_t devcd_data_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buffer, loff_t offset, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -132,14 +132,10 @@ static ssize_t devcd_data_write(struct file *filp, struct kobject *kobj,
return count;
}
-static struct bin_attribute devcd_attr_data = {
- .attr = { .name = "data", .mode = S_IRUSR | S_IWUSR, },
- .size = 0,
- .read = devcd_data_read,
- .write = devcd_data_write,
-};
+static const struct bin_attribute devcd_attr_data =
+ __BIN_ATTR(data, 0600, devcd_data_read, devcd_data_write, 0);
-static struct bin_attribute *devcd_dev_bin_attrs[] = {
+static const struct bin_attribute *const devcd_dev_bin_attrs[] = {
&devcd_attr_data, NULL,
};
@@ -186,9 +182,9 @@ static ssize_t disabled_show(const struct class *class, const struct class_attri
* mutex_lock(&devcd->mutex);
*
*
- * In the above diagram, It looks like disabled_store() would be racing with parallely
+ * In the above diagram, it looks like disabled_store() would be racing with parallelly
* running devcd_del() and result in memory abort while acquiring devcd->mutex which
- * is called after kfree of devcd memory after dropping its last reference with
+ * is called after kfree of devcd memory after dropping its last reference with
* put_device(). However, this will not happens as fn(dev, data) runs
* with its own reference to device via klist_node so it is not its last reference.
* so, above situation would not occur.
@@ -285,6 +281,8 @@ static void devcd_free_sgtable(void *data)
* @offset: start copy from @offset@ bytes from the head of the data
* in the given scatterlist
* @data_len: the length of the data in the sg_table
+ *
+ * Returns: the number of bytes copied
*/
static ssize_t devcd_read_from_sgtable(char *buffer, loff_t offset,
size_t buf_len, void *data,
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index 2152eec0c135..c948c88d3956 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -576,7 +576,10 @@ void *devres_open_group(struct device *dev, void *id, gfp_t gfp)
}
EXPORT_SYMBOL_GPL(devres_open_group);
-/* Find devres group with ID @id. If @id is NULL, look for the latest. */
+/*
+ * Find devres group with ID @id. If @id is NULL, look for the latest open
+ * group.
+ */
static struct devres_group *find_group(struct device *dev, void *id)
{
struct devres_node *node;
@@ -687,6 +690,13 @@ int devres_release_group(struct device *dev, void *id)
spin_unlock_irqrestore(&dev->devres_lock, flags);
release_nodes(dev, &todo);
+ } else if (list_empty(&dev->devres_head)) {
+ /*
+ * dev is probably dying via devres_release_all(): groups
+ * have already been removed and are on the process of
+ * being released - don't touch and don't warn.
+ */
+ spin_unlock_irqrestore(&dev->devres_lock, flags);
} else {
WARN_ON(1);
spin_unlock_irqrestore(&dev->devres_lock, flags);
@@ -749,26 +759,50 @@ int __devm_add_action(struct device *dev, void (*action)(void *), void *data, co
}
EXPORT_SYMBOL_GPL(__devm_add_action);
+bool devm_is_action_added(struct device *dev, void (*action)(void *), void *data)
+{
+ struct action_devres devres = {
+ .data = data,
+ .action = action,
+ };
+
+ return devres_find(dev, devm_action_release, devm_action_match, &devres);
+}
+EXPORT_SYMBOL_GPL(devm_is_action_added);
+
/**
- * devm_remove_action() - removes previously added custom action
+ * devm_remove_action_nowarn() - removes previously added custom action
* @dev: Device that owns the action
* @action: Function implementing the action
* @data: Pointer to data passed to @action implementation
*
* Removes instance of @action previously added by devm_add_action().
* Both action and data should match one of the existing entries.
+ *
+ * In contrast to devm_remove_action(), this function does not WARN() if no
+ * entry could have been found.
+ *
+ * This should only be used if the action is contained in an object with
+ * independent lifetime management, e.g. the Devres rust abstraction.
+ *
+ * Causing the warning from regular driver code most likely indicates an abuse
+ * of the devres API.
+ *
+ * Returns: 0 on success, -ENOENT if no entry could have been found.
*/
-void devm_remove_action(struct device *dev, void (*action)(void *), void *data)
+int devm_remove_action_nowarn(struct device *dev,
+ void (*action)(void *),
+ void *data)
{
struct action_devres devres = {
.data = data,
.action = action,
};
- WARN_ON(devres_destroy(dev, devm_action_release, devm_action_match,
- &devres));
+ return devres_destroy(dev, devm_action_release, devm_action_match,
+ &devres);
}
-EXPORT_SYMBOL_GPL(devm_remove_action);
+EXPORT_SYMBOL_GPL(devm_remove_action_nowarn);
/**
* devm_release_action() - release previously added custom action
@@ -953,17 +987,10 @@ EXPORT_SYMBOL_GPL(devm_krealloc);
*/
char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp)
{
- size_t size;
- char *buf;
-
if (!s)
return NULL;
- size = strlen(s) + 1;
- buf = devm_kmalloc(dev, size, gfp);
- if (buf)
- memcpy(buf, s, size);
- return buf;
+ return devm_kmemdup(dev, s, strlen(s) + 1, gfp);
}
EXPORT_SYMBOL_GPL(devm_kstrdup);
@@ -1090,6 +1117,27 @@ void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp)
}
EXPORT_SYMBOL_GPL(devm_kmemdup);
+/**
+ * devm_kmemdup_const - conditionally duplicate and manage a region of memory
+ *
+ * @dev: Device this memory belongs to
+ * @src: memory region to duplicate
+ * @len: memory region length,
+ * @gfp: GFP mask to use
+ *
+ * Return: source address if it is in .rodata or the return value of kmemdup()
+ * to which the function falls back otherwise.
+ */
+const void *
+devm_kmemdup_const(struct device *dev, const void *src, size_t len, gfp_t gfp)
+{
+ if (is_kernel_rodata((unsigned long)src))
+ return src;
+
+ return devm_kmemdup(dev, src, len, gfp);
+}
+EXPORT_SYMBOL_GPL(devm_kmemdup_const);
+
struct pages_devres {
unsigned long addr;
unsigned int order;
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index b848764ef018..9d4e46ad8352 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -63,22 +63,6 @@ __setup("devtmpfs.mount=", mount_param);
static struct vfsmount *mnt;
-static struct dentry *public_dev_mount(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data)
-{
- struct super_block *s = mnt->mnt_sb;
- int err;
-
- atomic_inc(&s->s_active);
- down_write(&s->s_umount);
- err = reconfigure_single(s, flags, data);
- if (err < 0) {
- deactivate_locked_super(s);
- return ERR_PTR(err);
- }
- return dget(s->s_root);
-}
-
static struct file_system_type internal_fs_type = {
.name = "devtmpfs",
#ifdef CONFIG_TMPFS
@@ -89,9 +73,40 @@ static struct file_system_type internal_fs_type = {
.kill_sb = kill_litter_super,
};
+/* Simply take a ref on the existing mount */
+static int devtmpfs_get_tree(struct fs_context *fc)
+{
+ struct super_block *sb = mnt->mnt_sb;
+
+ atomic_inc(&sb->s_active);
+ down_write(&sb->s_umount);
+ fc->root = dget(sb->s_root);
+ return 0;
+}
+
+/* Ops are filled in during init depending on underlying shmem or ramfs type */
+struct fs_context_operations devtmpfs_context_ops = {};
+
+/* Call the underlying initialization and set to our ops */
+static int devtmpfs_init_fs_context(struct fs_context *fc)
+{
+ int ret;
+#ifdef CONFIG_TMPFS
+ ret = shmem_init_fs_context(fc);
+#else
+ ret = ramfs_init_fs_context(fc);
+#endif
+ if (ret < 0)
+ return ret;
+
+ fc->ops = &devtmpfs_context_ops;
+
+ return 0;
+}
+
static struct file_system_type dev_fs_type = {
.name = "devtmpfs",
- .mount = public_dev_mount,
+ .init_fs_context = devtmpfs_init_fs_context,
};
static int devtmpfs_submit_req(struct req *req, const char *tmp)
@@ -160,18 +175,17 @@ static int dev_mkdir(const char *name, umode_t mode)
{
struct dentry *dentry;
struct path path;
- int err;
- dentry = kern_path_create(AT_FDCWD, name, &path, LOOKUP_DIRECTORY);
+ dentry = start_creating_path(AT_FDCWD, name, &path, LOOKUP_DIRECTORY);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
- err = vfs_mkdir(&nop_mnt_idmap, d_inode(path.dentry), dentry, mode);
- if (!err)
+ dentry = vfs_mkdir(&nop_mnt_idmap, d_inode(path.dentry), dentry, mode);
+ if (!IS_ERR(dentry))
/* mark as kernel-created inode */
d_inode(dentry)->i_private = &thread;
- done_path_create(&path, dentry);
- return err;
+ end_creating_path(&path, dentry);
+ return PTR_ERR_OR_ZERO(dentry);
}
static int create_path(const char *nodepath)
@@ -208,10 +222,10 @@ static int handle_create(const char *nodename, umode_t mode, kuid_t uid,
struct path path;
int err;
- dentry = kern_path_create(AT_FDCWD, nodename, &path, 0);
+ dentry = start_creating_path(AT_FDCWD, nodename, &path, 0);
if (dentry == ERR_PTR(-ENOENT)) {
create_path(nodename);
- dentry = kern_path_create(AT_FDCWD, nodename, &path, 0);
+ dentry = start_creating_path(AT_FDCWD, nodename, &path, 0);
}
if (IS_ERR(dentry))
return PTR_ERR(dentry);
@@ -232,7 +246,7 @@ static int handle_create(const char *nodename, umode_t mode, kuid_t uid,
/* mark as kernel-created inode */
d_inode(dentry)->i_private = &thread;
}
- done_path_create(&path, dentry);
+ end_creating_path(&path, dentry);
return err;
}
@@ -242,21 +256,16 @@ static int dev_rmdir(const char *name)
struct dentry *dentry;
int err;
- dentry = kern_path_locked(name, &parent);
+ dentry = start_removing_path(name, &parent);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
- if (d_really_is_positive(dentry)) {
- if (d_inode(dentry)->i_private == &thread)
- err = vfs_rmdir(&nop_mnt_idmap, d_inode(parent.dentry),
- dentry);
- else
- err = -EPERM;
- } else {
- err = -ENOENT;
- }
- dput(dentry);
- inode_unlock(d_inode(parent.dentry));
- path_put(&parent);
+ if (d_inode(dentry)->i_private == &thread)
+ err = vfs_rmdir(&nop_mnt_idmap, d_inode(parent.dentry),
+ dentry);
+ else
+ err = -EPERM;
+
+ end_removing_path(&parent, dentry);
return err;
}
@@ -285,7 +294,7 @@ static int delete_path(const char *nodepath)
return err;
}
-static int dev_mynode(struct device *dev, struct inode *inode, struct kstat *stat)
+static int dev_mynode(struct device *dev, struct inode *inode)
{
/* did we create it */
if (inode->i_private != &thread)
@@ -293,13 +302,13 @@ static int dev_mynode(struct device *dev, struct inode *inode, struct kstat *sta
/* does the dev_t match */
if (is_blockdev(dev)) {
- if (!S_ISBLK(stat->mode))
+ if (!S_ISBLK(inode->i_mode))
return 0;
} else {
- if (!S_ISCHR(stat->mode))
+ if (!S_ISCHR(inode->i_mode))
return 0;
}
- if (stat->rdev != dev->devt)
+ if (inode->i_rdev != dev->devt)
return 0;
/* ours */
@@ -310,44 +319,36 @@ static int handle_remove(const char *nodename, struct device *dev)
{
struct path parent;
struct dentry *dentry;
+ struct inode *inode;
int deleted = 0;
- int err;
+ int err = 0;
- dentry = kern_path_locked(nodename, &parent);
+ dentry = start_removing_path(nodename, &parent);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
- if (d_really_is_positive(dentry)) {
- struct kstat stat;
- struct path p = {.mnt = parent.mnt, .dentry = dentry};
- err = vfs_getattr(&p, &stat, STATX_TYPE | STATX_MODE,
- AT_STATX_SYNC_AS_STAT);
- if (!err && dev_mynode(dev, d_inode(dentry), &stat)) {
- struct iattr newattrs;
- /*
- * before unlinking this node, reset permissions
- * of possible references like hardlinks
- */
- newattrs.ia_uid = GLOBAL_ROOT_UID;
- newattrs.ia_gid = GLOBAL_ROOT_GID;
- newattrs.ia_mode = stat.mode & ~0777;
- newattrs.ia_valid =
- ATTR_UID|ATTR_GID|ATTR_MODE;
- inode_lock(d_inode(dentry));
- notify_change(&nop_mnt_idmap, dentry, &newattrs, NULL);
- inode_unlock(d_inode(dentry));
- err = vfs_unlink(&nop_mnt_idmap, d_inode(parent.dentry),
- dentry, NULL);
- if (!err || err == -ENOENT)
- deleted = 1;
- }
- } else {
- err = -ENOENT;
+ inode = d_inode(dentry);
+ if (dev_mynode(dev, inode)) {
+ struct iattr newattrs;
+ /*
+ * before unlinking this node, reset permissions
+ * of possible references like hardlinks
+ */
+ newattrs.ia_uid = GLOBAL_ROOT_UID;
+ newattrs.ia_gid = GLOBAL_ROOT_GID;
+ newattrs.ia_mode = inode->i_mode & ~0777;
+ newattrs.ia_valid =
+ ATTR_UID|ATTR_GID|ATTR_MODE;
+ inode_lock(d_inode(dentry));
+ notify_change(&nop_mnt_idmap, dentry, &newattrs, NULL);
+ inode_unlock(d_inode(dentry));
+ err = vfs_unlink(&nop_mnt_idmap, d_inode(parent.dentry),
+ dentry, NULL);
+ if (!err || err == -ENOENT)
+ deleted = 1;
}
- dput(dentry);
- inode_unlock(d_inode(parent.dentry));
+ end_removing_path(&parent, dentry);
- path_put(&parent);
if (deleted && strchr(nodename, '/'))
delete_path(nodename);
return err;
@@ -443,6 +444,31 @@ static int __ref devtmpfsd(void *p)
}
/*
+ * Get the underlying (shmem/ramfs) context ops to build ours
+ */
+static int devtmpfs_configure_context(void)
+{
+ struct fs_context *fc;
+
+ fc = fs_context_for_reconfigure(mnt->mnt_root, mnt->mnt_sb->s_flags,
+ MS_RMT_MASK);
+ if (IS_ERR(fc))
+ return PTR_ERR(fc);
+
+ /* Set up devtmpfs_context_ops based on underlying type */
+ devtmpfs_context_ops.free = fc->ops->free;
+ devtmpfs_context_ops.dup = fc->ops->dup;
+ devtmpfs_context_ops.parse_param = fc->ops->parse_param;
+ devtmpfs_context_ops.parse_monolithic = fc->ops->parse_monolithic;
+ devtmpfs_context_ops.get_tree = &devtmpfs_get_tree;
+ devtmpfs_context_ops.reconfigure = fc->ops->reconfigure;
+
+ put_fs_context(fc);
+
+ return 0;
+}
+
+/*
* Create devtmpfs instance, driver-core devices will add their device
* nodes here.
*/
@@ -456,6 +482,13 @@ int __init devtmpfs_init(void)
pr_err("unable to create devtmpfs %ld\n", PTR_ERR(mnt));
return PTR_ERR(mnt);
}
+
+ err = devtmpfs_configure_context();
+ if (err) {
+ pr_err("unable to configure devtmpfs type %d\n", err);
+ return err;
+ }
+
err = register_filesystem(&dev_fs_type);
if (err) {
pr_err("unable to register devtmpfs type %d\n", err);
diff --git a/drivers/base/driver.c b/drivers/base/driver.c
index b4eb5b89c4ee..8ab010ddf709 100644
--- a/drivers/base/driver.c
+++ b/drivers/base/driver.c
@@ -115,7 +115,7 @@ EXPORT_SYMBOL_GPL(driver_set_override);
* Iterate over the @drv's list of devices calling @fn for each one.
*/
int driver_for_each_device(struct device_driver *drv, struct device *start,
- void *data, int (*fn)(struct device *, void *))
+ void *data, device_iter_t fn)
{
struct klist_iter i;
struct device *dev;
@@ -160,9 +160,12 @@ struct device *driver_find_device(const struct device_driver *drv,
klist_iter_init_node(&drv->p->klist_devices, &i,
(start ? &start->p->knode_driver : NULL));
- while ((dev = next_device(&i)))
- if (match(dev, data) && get_device(dev))
+ while ((dev = next_device(&i))) {
+ if (match(dev, data)) {
+ get_device(dev);
break;
+ }
+ }
klist_iter_exit(&i);
return dev;
}
diff --git a/drivers/base/faux.c b/drivers/base/faux.c
new file mode 100644
index 000000000000..21dd02124231
--- /dev/null
+++ b/drivers/base/faux.c
@@ -0,0 +1,261 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ * Copyright (c) 2025 The Linux Foundation
+ *
+ * A "simple" faux bus that allows devices to be created and added
+ * automatically to it. This is to be used whenever you need to create a
+ * device that is not associated with any "real" system resources, and do
+ * not want to have to deal with a bus/driver binding logic. It is
+ * intended to be very simple, with only a create and a destroy function
+ * available.
+ */
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/container_of.h>
+#include <linux/device/faux.h>
+#include "base.h"
+
+/*
+ * Internal wrapper structure so we can hold a pointer to the
+ * faux_device_ops for this device.
+ */
+struct faux_object {
+ struct faux_device faux_dev;
+ const struct faux_device_ops *faux_ops;
+ const struct attribute_group **groups;
+};
+#define to_faux_object(dev) container_of_const(dev, struct faux_object, faux_dev.dev)
+
+static struct device faux_bus_root = {
+ .init_name = "faux",
+};
+
+static int faux_match(struct device *dev, const struct device_driver *drv)
+{
+ /* Match always succeeds, we only have one driver */
+ return 1;
+}
+
+static int faux_probe(struct device *dev)
+{
+ struct faux_object *faux_obj = to_faux_object(dev);
+ struct faux_device *faux_dev = &faux_obj->faux_dev;
+ const struct faux_device_ops *faux_ops = faux_obj->faux_ops;
+ int ret;
+
+ if (faux_ops && faux_ops->probe) {
+ ret = faux_ops->probe(faux_dev);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Add groups after the probe succeeds to ensure resources are
+ * initialized correctly
+ */
+ ret = device_add_groups(dev, faux_obj->groups);
+ if (ret && faux_ops && faux_ops->remove)
+ faux_ops->remove(faux_dev);
+
+ return ret;
+}
+
+static void faux_remove(struct device *dev)
+{
+ struct faux_object *faux_obj = to_faux_object(dev);
+ struct faux_device *faux_dev = &faux_obj->faux_dev;
+ const struct faux_device_ops *faux_ops = faux_obj->faux_ops;
+
+ device_remove_groups(dev, faux_obj->groups);
+
+ if (faux_ops && faux_ops->remove)
+ faux_ops->remove(faux_dev);
+}
+
+static const struct bus_type faux_bus_type = {
+ .name = "faux",
+ .match = faux_match,
+ .probe = faux_probe,
+ .remove = faux_remove,
+};
+
+static struct device_driver faux_driver = {
+ .name = "faux_driver",
+ .bus = &faux_bus_type,
+ .probe_type = PROBE_FORCE_SYNCHRONOUS,
+ .suppress_bind_attrs = true,
+};
+
+static void faux_device_release(struct device *dev)
+{
+ struct faux_object *faux_obj = to_faux_object(dev);
+
+ kfree(faux_obj);
+}
+
+/**
+ * faux_device_create_with_groups - Create and register with the driver
+ * core a faux device and populate the device with an initial
+ * set of sysfs attributes.
+ * @name: The name of the device we are adding, must be unique for
+ * all faux devices.
+ * @parent: Pointer to a potential parent struct device. If set to
+ * NULL, the device will be created in the "root" of the faux
+ * device tree in sysfs.
+ * @faux_ops: struct faux_device_ops that the new device will call back
+ * into, can be NULL.
+ * @groups: The set of sysfs attributes that will be created for this
+ * device when it is registered with the driver core.
+ *
+ * Create a new faux device and register it in the driver core properly.
+ * If present, callbacks in @faux_ops will be called with the device that
+ * for the caller to do something with at the proper time given the
+ * device's lifecycle.
+ *
+ * Note, when this function is called, the functions specified in struct
+ * faux_ops can be called before the function returns, so be prepared for
+ * everything to be properly initialized before that point in time. If the
+ * probe callback (if one is present) does NOT succeed, the creation of the
+ * device will fail and NULL will be returned.
+ *
+ * Return:
+ * * NULL if an error happened with creating the device
+ * * pointer to a valid struct faux_device that is registered with sysfs
+ */
+struct faux_device *faux_device_create_with_groups(const char *name,
+ struct device *parent,
+ const struct faux_device_ops *faux_ops,
+ const struct attribute_group **groups)
+{
+ struct faux_object *faux_obj;
+ struct faux_device *faux_dev;
+ struct device *dev;
+ int ret;
+
+ faux_obj = kzalloc(sizeof(*faux_obj), GFP_KERNEL);
+ if (!faux_obj)
+ return NULL;
+
+ /* Save off the callbacks and groups so we can use them in the future */
+ faux_obj->faux_ops = faux_ops;
+ faux_obj->groups = groups;
+
+ /* Initialize the device portion and register it with the driver core */
+ faux_dev = &faux_obj->faux_dev;
+ dev = &faux_dev->dev;
+
+ device_initialize(dev);
+ dev->release = faux_device_release;
+ if (parent)
+ dev->parent = parent;
+ else
+ dev->parent = &faux_bus_root;
+ dev->bus = &faux_bus_type;
+ dev_set_name(dev, "%s", name);
+ device_set_pm_not_required(dev);
+
+ ret = device_add(dev);
+ if (ret) {
+ pr_err("%s: device_add for faux device '%s' failed with %d\n",
+ __func__, name, ret);
+ put_device(dev);
+ return NULL;
+ }
+
+ /*
+ * Verify that we did bind the driver to the device (i.e. probe worked),
+ * if not, let's fail the creation as trying to guess if probe was
+ * successful is almost impossible to determine by the caller.
+ */
+ if (!dev->driver) {
+ dev_dbg(dev, "probe did not succeed, tearing down the device\n");
+ faux_device_destroy(faux_dev);
+ faux_dev = NULL;
+ }
+
+ return faux_dev;
+}
+EXPORT_SYMBOL_GPL(faux_device_create_with_groups);
+
+/**
+ * faux_device_create - create and register with the driver core a faux device
+ * @name: The name of the device we are adding, must be unique for all
+ * faux devices.
+ * @parent: Pointer to a potential parent struct device. If set to
+ * NULL, the device will be created in the "root" of the faux
+ * device tree in sysfs.
+ * @faux_ops: struct faux_device_ops that the new device will call back
+ * into, can be NULL.
+ *
+ * Create a new faux device and register it in the driver core properly.
+ * If present, callbacks in @faux_ops will be called with the device that
+ * for the caller to do something with at the proper time given the
+ * device's lifecycle.
+ *
+ * Note, when this function is called, the functions specified in struct
+ * faux_ops can be called before the function returns, so be prepared for
+ * everything to be properly initialized before that point in time.
+ *
+ * Return:
+ * * NULL if an error happened with creating the device
+ * * pointer to a valid struct faux_device that is registered with sysfs
+ */
+struct faux_device *faux_device_create(const char *name,
+ struct device *parent,
+ const struct faux_device_ops *faux_ops)
+{
+ return faux_device_create_with_groups(name, parent, faux_ops, NULL);
+}
+EXPORT_SYMBOL_GPL(faux_device_create);
+
+/**
+ * faux_device_destroy - destroy a faux device
+ * @faux_dev: faux device to destroy
+ *
+ * Unregisters and cleans up a device that was created with a call to
+ * faux_device_create()
+ */
+void faux_device_destroy(struct faux_device *faux_dev)
+{
+ struct device *dev = &faux_dev->dev;
+
+ if (!faux_dev)
+ return;
+
+ device_del(dev);
+
+ /* The final put_device() will clean up the memory we allocated for this device. */
+ put_device(dev);
+}
+EXPORT_SYMBOL_GPL(faux_device_destroy);
+
+int __init faux_bus_init(void)
+{
+ int ret;
+
+ ret = device_register(&faux_bus_root);
+ if (ret) {
+ put_device(&faux_bus_root);
+ return ret;
+ }
+
+ ret = bus_register(&faux_bus_type);
+ if (ret)
+ goto error_bus;
+
+ ret = driver_register(&faux_driver);
+ if (ret)
+ goto error_driver;
+
+ return ret;
+
+error_driver:
+ bus_unregister(&faux_bus_type);
+
+error_bus:
+ device_unregister(&faux_bus_root);
+ return ret;
+}
diff --git a/drivers/base/firmware_loader/Kconfig b/drivers/base/firmware_loader/Kconfig
index a03701674265..752b9a9bea03 100644
--- a/drivers/base/firmware_loader/Kconfig
+++ b/drivers/base/firmware_loader/Kconfig
@@ -3,8 +3,7 @@ menu "Firmware loader"
config FW_LOADER
tristate "Firmware loading facility" if EXPERT
- select CRYPTO_HASH if FW_LOADER_DEBUG
- select CRYPTO_SHA256 if FW_LOADER_DEBUG
+ select CRYPTO_LIB_SHA256 if FW_LOADER_DEBUG
default y
help
This enables the firmware loading facility in the kernel. The kernel
@@ -28,7 +27,6 @@ config FW_LOADER
config FW_LOADER_DEBUG
bool "Log filenames and checksums for loaded firmware"
- depends on CRYPTO = FW_LOADER || CRYPTO=y
depends on DYNAMIC_DEBUG
depends on FW_LOADER
default FW_LOADER
diff --git a/drivers/base/firmware_loader/builtin/main.c b/drivers/base/firmware_loader/builtin/main.c
index a065c3150897..d36befebb1b9 100644
--- a/drivers/base/firmware_loader/builtin/main.c
+++ b/drivers/base/firmware_loader/builtin/main.c
@@ -61,7 +61,7 @@ bool firmware_request_builtin(struct firmware *fw, const char *name)
return false;
}
-EXPORT_SYMBOL_NS_GPL(firmware_request_builtin, TEST_FIRMWARE);
+EXPORT_SYMBOL_NS_GPL(firmware_request_builtin, "TEST_FIRMWARE");
/**
* firmware_request_builtin_buf() - load builtin firmware into optional buffer
diff --git a/drivers/base/firmware_loader/fallback_table.c b/drivers/base/firmware_loader/fallback_table.c
index 8432ab2c3b3c..c8afc501a8a4 100644
--- a/drivers/base/firmware_loader/fallback_table.c
+++ b/drivers/base/firmware_loader/fallback_table.c
@@ -22,10 +22,10 @@ struct firmware_fallback_config fw_fallback_config = {
.loading_timeout = 60,
.old_timeout = 60,
};
-EXPORT_SYMBOL_NS_GPL(fw_fallback_config, FIRMWARE_LOADER_PRIVATE);
+EXPORT_SYMBOL_NS_GPL(fw_fallback_config, "FIRMWARE_LOADER_PRIVATE");
#ifdef CONFIG_SYSCTL
-static struct ctl_table firmware_config_table[] = {
+static const struct ctl_table firmware_config_table[] = {
{
.procname = "force_sysfs_fallback",
.data = &fw_fallback_config.force_sysfs_fallback,
@@ -56,13 +56,13 @@ int register_firmware_config_sysctl(void)
return -ENOMEM;
return 0;
}
-EXPORT_SYMBOL_NS_GPL(register_firmware_config_sysctl, FIRMWARE_LOADER_PRIVATE);
+EXPORT_SYMBOL_NS_GPL(register_firmware_config_sysctl, "FIRMWARE_LOADER_PRIVATE");
void unregister_firmware_config_sysctl(void)
{
unregister_sysctl_table(firmware_config_sysct_table_header);
firmware_config_sysct_table_header = NULL;
}
-EXPORT_SYMBOL_NS_GPL(unregister_firmware_config_sysctl, FIRMWARE_LOADER_PRIVATE);
+EXPORT_SYMBOL_NS_GPL(unregister_firmware_config_sysctl, "FIRMWARE_LOADER_PRIVATE");
#endif /* CONFIG_SYSCTL */
diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
index 324a9a3c087a..6942c62fa59d 100644
--- a/drivers/base/firmware_loader/main.c
+++ b/drivers/base/firmware_loader/main.c
@@ -806,42 +806,15 @@ static void fw_abort_batch_reqs(struct firmware *fw)
}
#if defined(CONFIG_FW_LOADER_DEBUG)
-#include <crypto/hash.h>
#include <crypto/sha2.h>
static void fw_log_firmware_info(const struct firmware *fw, const char *name, struct device *device)
{
- struct shash_desc *shash;
- struct crypto_shash *alg;
- u8 *sha256buf;
- char *outbuf;
+ u8 digest[SHA256_DIGEST_SIZE];
- alg = crypto_alloc_shash("sha256", 0, 0);
- if (IS_ERR(alg))
- return;
-
- sha256buf = kmalloc(SHA256_DIGEST_SIZE, GFP_KERNEL);
- outbuf = kmalloc(SHA256_BLOCK_SIZE + 1, GFP_KERNEL);
- shash = kmalloc(sizeof(*shash) + crypto_shash_descsize(alg), GFP_KERNEL);
- if (!sha256buf || !outbuf || !shash)
- goto out_free;
-
- shash->tfm = alg;
-
- if (crypto_shash_digest(shash, fw->data, fw->size, sha256buf) < 0)
- goto out_shash;
-
- for (int i = 0; i < SHA256_DIGEST_SIZE; i++)
- sprintf(&outbuf[i * 2], "%02x", sha256buf[i]);
- outbuf[SHA256_BLOCK_SIZE] = 0;
- dev_dbg(device, "Loaded FW: %s, sha256: %s\n", name, outbuf);
-
-out_shash:
- crypto_free_shash(alg);
-out_free:
- kfree(shash);
- kfree(outbuf);
- kfree(sha256buf);
+ sha256(fw->data, fw->size, digest);
+ dev_dbg(device, "Loaded FW: %s, sha256: %*phN\n",
+ name, SHA256_DIGEST_SIZE, digest);
}
#else
static void fw_log_firmware_info(const struct firmware *fw, const char *name,
@@ -849,26 +822,6 @@ static void fw_log_firmware_info(const struct firmware *fw, const char *name,
{}
#endif
-/*
- * Reject firmware file names with ".." path components.
- * There are drivers that construct firmware file names from device-supplied
- * strings, and we don't want some device to be able to tell us "I would like to
- * be sent my firmware from ../../../etc/shadow, please".
- *
- * Search for ".." surrounded by either '/' or start/end of string.
- *
- * This intentionally only looks at the firmware name, not at the firmware base
- * directory or at symlink contents.
- */
-static bool name_contains_dotdot(const char *name)
-{
- size_t name_len = strlen(name);
-
- return strcmp(name, "..") == 0 || strncmp(name, "../", 3) == 0 ||
- strstr(name, "/../") != NULL ||
- (name_len >= 3 && strcmp(name+name_len-3, "/..") == 0);
-}
-
/* called from request_firmware() and request_firmware_work_func() */
static int
_request_firmware(const struct firmware **firmware_p, const char *name,
@@ -889,6 +842,17 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
goto out;
}
+
+ /*
+ * Reject firmware file names with ".." path components.
+ * There are drivers that construct firmware file names from
+ * device-supplied strings, and we don't want some device to be
+ * able to tell us "I would like to be sent my firmware from
+ * ../../../etc/shadow, please".
+ *
+ * This intentionally only looks at the firmware name, not at
+ * the firmware base directory or at symlink contents.
+ */
if (name_contains_dotdot(name)) {
dev_warn(device,
"Firmware load for '%s' refused, path contains '..' component\n",
@@ -1075,8 +1039,8 @@ EXPORT_SYMBOL_GPL(firmware_request_platform);
/**
* firmware_request_cache() - cache firmware for suspend so resume can use it
- * @name: name of firmware file
* @device: device for which firmware should be cached for
+ * @name: name of firmware file
*
* There are some devices with an optimization that enables the device to not
* require loading firmware on system reboot. This optimization may still
diff --git a/drivers/base/firmware_loader/sysfs.c b/drivers/base/firmware_loader/sysfs.c
index c9c93b47d9a5..add0b9b75edd 100644
--- a/drivers/base/firmware_loader/sysfs.c
+++ b/drivers/base/firmware_loader/sysfs.c
@@ -259,7 +259,7 @@ static void firmware_rw(struct fw_priv *fw_priv, char *buffer,
}
static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buffer, loff_t offset, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -316,7 +316,7 @@ static int fw_realloc_pages(struct fw_sysfs *fw_sysfs, int min_size)
* the driver as a firmware image.
**/
static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buffer, loff_t offset, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -356,7 +356,7 @@ out:
return retval;
}
-static struct bin_attribute firmware_attr_data = {
+static const struct bin_attribute firmware_attr_data = {
.attr = { .name = "data", .mode = 0644 },
.size = 0,
.read = firmware_data_read,
@@ -374,7 +374,7 @@ static struct attribute *fw_dev_attrs[] = {
NULL
};
-static struct bin_attribute *fw_dev_bin_attrs[] = {
+static const struct bin_attribute *const fw_dev_bin_attrs[] = {
&firmware_attr_data,
NULL
};
diff --git a/drivers/base/firmware_loader/sysfs.h b/drivers/base/firmware_loader/sysfs.h
index 2060add8ef81..1cabea544a40 100644
--- a/drivers/base/firmware_loader/sysfs.h
+++ b/drivers/base/firmware_loader/sysfs.h
@@ -6,7 +6,7 @@
#include "firmware.h"
-MODULE_IMPORT_NS(FIRMWARE_LOADER_PRIVATE);
+MODULE_IMPORT_NS("FIRMWARE_LOADER_PRIVATE");
extern struct firmware_fallback_config fw_fallback_config;
extern struct device_attribute dev_attr_loading;
diff --git a/drivers/base/init.c b/drivers/base/init.c
index c4954835128c..9d2b06d65dfc 100644
--- a/drivers/base/init.c
+++ b/drivers/base/init.c
@@ -32,6 +32,7 @@ void __init driver_init(void)
/* These are also core pieces, but must come after the
* core core pieces.
*/
+ faux_bus_init();
of_core_init();
platform_bus_init();
auxiliary_bus_init();
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 67858eeb92ed..6d84a02cfa5d 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -22,6 +22,7 @@
#include <linux/stat.h>
#include <linux/slab.h>
#include <linux/xarray.h>
+#include <linux/export.h>
#include <linux/atomic.h>
#include <linux/uaccess.h>
@@ -48,22 +49,8 @@ int mhp_online_type_from_str(const char *str)
#define to_memory_block(dev) container_of(dev, struct memory_block, dev)
-static int sections_per_block;
-
-static inline unsigned long memory_block_id(unsigned long section_nr)
-{
- return section_nr / sections_per_block;
-}
-
-static inline unsigned long pfn_to_block_id(unsigned long pfn)
-{
- return memory_block_id(pfn_to_section_nr(pfn));
-}
-
-static inline unsigned long phys_to_block_id(unsigned long phys)
-{
- return pfn_to_block_id(PFN_DOWN(phys));
-}
+int sections_per_block;
+EXPORT_SYMBOL(sections_per_block);
static int memory_subsys_online(struct device *dev);
static int memory_subsys_offline(struct device *dev);
@@ -110,6 +97,57 @@ static void memory_block_release(struct device *dev)
kfree(mem);
}
+
+/* Max block size to be set by memory_block_advise_max_size */
+static unsigned long memory_block_advised_size;
+static bool memory_block_advised_size_queried;
+
+/**
+ * memory_block_advise_max_size() - advise memory hotplug on the max suggested
+ * block size, usually for alignment.
+ * @size: suggestion for maximum block size. must be aligned on power of 2.
+ *
+ * Early boot software (pre-allocator init) may advise archs on the max block
+ * size. This value can only decrease after initialization, as the intent is
+ * to identify the largest supported alignment for all sources.
+ *
+ * Use of this value is arch-defined, as is min/max block size.
+ *
+ * Return: 0 on success
+ * -EINVAL if size is 0 or not pow2 aligned
+ * -EBUSY if value has already been probed
+ */
+int __init memory_block_advise_max_size(unsigned long size)
+{
+ if (!size || !is_power_of_2(size))
+ return -EINVAL;
+
+ if (memory_block_advised_size_queried)
+ return -EBUSY;
+
+ if (memory_block_advised_size)
+ memory_block_advised_size = min(memory_block_advised_size, size);
+ else
+ memory_block_advised_size = size;
+
+ return 0;
+}
+
+/**
+ * memory_block_advised_max_size() - query advised max hotplug block size.
+ *
+ * After the first call, the value can never change. Callers looking for the
+ * actual block size should use memory_block_size_bytes. This interface is
+ * intended for use by arch-init when initializing the hotplug block size.
+ *
+ * Return: advised size in bytes, or 0 if never set.
+ */
+unsigned long memory_block_advised_max_size(void)
+{
+ memory_block_advised_size_queried = true;
+ return memory_block_advised_size;
+}
+
unsigned long __weak memory_block_size_bytes(void)
{
return MIN_MEMORY_BLOCK_SIZE;
@@ -455,7 +493,7 @@ static ssize_t valid_zones_show(struct device *dev,
struct memory_group *group = mem->group;
struct zone *default_zone;
int nid = mem->nid;
- int len = 0;
+ int len;
/*
* Check the existing zone. Make sure that we do that only on the
@@ -466,22 +504,18 @@ static ssize_t valid_zones_show(struct device *dev,
* If !mem->zone, the memory block spans multiple zones and
* cannot get offlined.
*/
- default_zone = mem->zone;
- if (!default_zone)
- return sysfs_emit(buf, "%s\n", "none");
- len += sysfs_emit_at(buf, len, "%s", default_zone->name);
- goto out;
+ return sysfs_emit(buf, "%s\n",
+ mem->zone ? mem->zone->name : "none");
}
default_zone = zone_for_pfn_range(MMOP_ONLINE, nid, group,
start_pfn, nr_pages);
- len += sysfs_emit_at(buf, len, "%s", default_zone->name);
+ len = sysfs_emit(buf, "%s", default_zone->name);
len += print_allowed_zone(buf, len, nid, group, start_pfn, nr_pages,
MMOP_ONLINE_KERNEL, default_zone);
len += print_allowed_zone(buf, len, nid, group, start_pfn, nr_pages,
MMOP_ONLINE_MOVABLE, default_zone);
-out:
len += sysfs_emit_at(buf, len, "\n");
return len;
}
@@ -512,7 +546,7 @@ static ssize_t auto_online_blocks_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%s\n",
- online_type_to_str[mhp_default_online_type]);
+ online_type_to_str[mhp_get_default_online_type()]);
}
static ssize_t auto_online_blocks_store(struct device *dev,
@@ -524,7 +558,7 @@ static ssize_t auto_online_blocks_store(struct device *dev,
if (online_type < 0)
return -EINVAL;
- mhp_default_online_type = online_type;
+ mhp_set_default_online_type(online_type);
return count;
}
@@ -636,7 +670,7 @@ int __weak arch_get_memory_phys_device(unsigned long start_pfn)
*
* Called under device_hotplug_lock.
*/
-static struct memory_block *find_memory_block_by_id(unsigned long block_id)
+struct memory_block *find_memory_block_by_id(unsigned long block_id)
{
struct memory_block *mem;
@@ -735,21 +769,22 @@ static struct zone *early_node_zone_for_memory_block(struct memory_block *mem,
#ifdef CONFIG_NUMA
/**
- * memory_block_add_nid() - Indicate that system RAM falling into this memory
- * block device (partially) belongs to the given node.
+ * memory_block_add_nid_early() - Indicate that early system RAM falling into
+ * this memory block device (partially) belongs
+ * to the given node.
* @mem: The memory block device.
* @nid: The node id.
- * @context: The memory initialization context.
*
- * Indicate that system RAM falling into this memory block (partially) belongs
- * to the given node. If the context indicates ("early") that we are adding the
- * node during node device subsystem initialization, this will also properly
- * set/adjust mem->zone based on the zone ranges of the given node.
+ * Indicate that early system RAM falling into this memory block (partially)
+ * belongs to the given node. This will also properly set/adjust mem->zone based
+ * on the zone ranges of the given node.
+ *
+ * Memory hotplug handles this on memory block creation, where we can only have
+ * a single nid span a memory block.
*/
-void memory_block_add_nid(struct memory_block *mem, int nid,
- enum meminit_context context)
+void memory_block_add_nid_early(struct memory_block *mem, int nid)
{
- if (context == MEMINIT_EARLY && mem->nid != nid) {
+ if (mem->nid != nid) {
/*
* For early memory we have to determine the zone when setting
* the node id and handle multiple nodes spanning a single
@@ -763,19 +798,18 @@ void memory_block_add_nid(struct memory_block *mem, int nid,
mem->zone = early_node_zone_for_memory_block(mem, nid);
else
mem->zone = NULL;
+ /*
+ * If this memory block spans multiple nodes, we only indicate
+ * the last processed node. If we span multiple nodes (not applicable
+ * to hotplugged memory), zone == NULL will prohibit memory offlining
+ * and consequently unplug.
+ */
+ mem->nid = nid;
}
-
- /*
- * If this memory block spans multiple nodes, we only indicate
- * the last processed node. If we span multiple nodes (not applicable
- * to hotplugged memory), zone == NULL will prohibit memory offlining
- * and consequently unplug.
- */
- mem->nid = nid;
}
#endif
-static int add_memory_block(unsigned long block_id, unsigned long state,
+static int add_memory_block(unsigned long block_id, int nid, unsigned long state,
struct vmem_altmap *altmap,
struct memory_group *group)
{
@@ -793,7 +827,7 @@ static int add_memory_block(unsigned long block_id, unsigned long state,
mem->start_section_nr = block_id * sections_per_block;
mem->state = state;
- mem->nid = NUMA_NO_NODE;
+ mem->nid = nid;
mem->altmap = altmap;
INIT_LIST_HEAD(&mem->group_next);
@@ -820,29 +854,6 @@ static int add_memory_block(unsigned long block_id, unsigned long state,
return 0;
}
-static int __init add_boot_memory_block(unsigned long base_section_nr)
-{
- int section_count = 0;
- unsigned long nr;
-
- for (nr = base_section_nr; nr < base_section_nr + sections_per_block;
- nr++)
- if (present_section_nr(nr))
- section_count++;
-
- if (section_count == 0)
- return 0;
- return add_memory_block(memory_block_id(base_section_nr),
- MEM_ONLINE, NULL, NULL);
-}
-
-static int add_hotplug_memory_block(unsigned long block_id,
- struct vmem_altmap *altmap,
- struct memory_group *group)
-{
- return add_memory_block(block_id, MEM_OFFLINE, altmap, group);
-}
-
static void remove_memory_block(struct memory_block *memory)
{
if (WARN_ON_ONCE(memory->dev.bus != &memory_subsys))
@@ -868,7 +879,7 @@ static void remove_memory_block(struct memory_block *memory)
* Called under device_hotplug_lock.
*/
int create_memory_block_devices(unsigned long start, unsigned long size,
- struct vmem_altmap *altmap,
+ int nid, struct vmem_altmap *altmap,
struct memory_group *group)
{
const unsigned long start_block_id = pfn_to_block_id(PFN_DOWN(start));
@@ -882,7 +893,7 @@ int create_memory_block_devices(unsigned long start, unsigned long size,
return -EINVAL;
for (block_id = start_block_id; block_id != end_block_id; block_id++) {
- ret = add_hotplug_memory_block(block_id, altmap, group);
+ ret = add_memory_block(block_id, nid, MEM_OFFLINE, altmap, group);
if (ret)
break;
}
@@ -962,7 +973,7 @@ static const struct attribute_group *memory_root_attr_groups[] = {
void __init memory_dev_init(void)
{
int ret;
- unsigned long block_sz, nr;
+ unsigned long block_sz, block_id, nr;
/* Validate the configured memory block size */
block_sz = memory_block_size_bytes();
@@ -975,15 +986,23 @@ void __init memory_dev_init(void)
panic("%s() failed to register subsystem: %d\n", __func__, ret);
/*
- * Create entries for memory sections that were found
- * during boot and have been initialized
+ * Create entries for memory sections that were found during boot
+ * and have been initialized. Use @block_id to track the last
+ * handled block and initialize it to an invalid value (ULONG_MAX)
+ * to bypass the block ID matching check for the first present
+ * block so that it can be covered.
*/
- for (nr = 0; nr <= __highest_present_section_nr;
- nr += sections_per_block) {
- ret = add_boot_memory_block(nr);
- if (ret)
- panic("%s() failed to add memory block: %d\n", __func__,
- ret);
+ block_id = ULONG_MAX;
+ for_each_present_section_nr(0, nr) {
+ if (block_id != ULONG_MAX && memory_block_id(nr) == block_id)
+ continue;
+
+ block_id = memory_block_id(nr);
+ ret = add_memory_block(block_id, NUMA_NO_NODE, MEM_ONLINE, NULL, NULL);
+ if (ret) {
+ panic("%s() failed to add memory block: %d\n",
+ __func__, ret);
+ }
}
}
diff --git a/drivers/base/module.c b/drivers/base/module.c
index c4eaa1158d54..218aaa096455 100644
--- a/drivers/base/module.c
+++ b/drivers/base/module.c
@@ -7,7 +7,6 @@
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/string.h>
-#include <linux/rcupdate.h>
#include "base.h"
static char *make_driver_name(const struct device_driver *drv)
@@ -43,16 +42,13 @@ int module_add_driver(struct module *mod, const struct device_driver *drv)
if (mod)
mk = &mod->mkobj;
else if (drv->mod_name) {
- struct kobject *mkobj;
-
- /* Lookup built-in module entry in /sys/modules */
- mkobj = kset_find_obj(module_kset, drv->mod_name);
- if (mkobj) {
- mk = container_of(mkobj, struct module_kobject, kobj);
+ /* Lookup or create built-in module entry in /sys/modules */
+ mk = lookup_or_create_module_kobject(drv->mod_name);
+ if (mk) {
/* remember our module structure */
drv->p->mkobj = mk;
- /* kset_find_obj took a reference */
- kobject_put(mkobj);
+ /* lookup_or_create_module_kobject took a reference */
+ kobject_put(&mk->kobj);
}
}
@@ -102,9 +98,6 @@ void module_remove_driver(const struct device_driver *drv)
if (!drv)
return;
- /* Synchronize with dev_uevent() */
- synchronize_rcu();
-
sysfs_remove_link(&drv->p->kobj, "module");
if (drv->owner)
diff --git a/drivers/base/node.c b/drivers/base/node.c
index eb72580288e6..83aeb0518e1d 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -7,6 +7,7 @@
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/memory.h>
+#include <linux/mempolicy.h>
#include <linux/vmstat.h>
#include <linux/notifier.h>
#include <linux/node.h>
@@ -20,6 +21,7 @@
#include <linux/pm_runtime.h>
#include <linux/swap.h>
#include <linux/slab.h>
+#include <linux/memblock.h>
static const struct bus_type node_subsys = {
.name = "node",
@@ -27,7 +29,7 @@ static const struct bus_type node_subsys = {
};
static inline ssize_t cpumap_read(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -45,10 +47,10 @@ static inline ssize_t cpumap_read(struct file *file, struct kobject *kobj,
return n;
}
-static BIN_ATTR_RO(cpumap, CPUMAP_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(cpumap, CPUMAP_FILE_MAX_BYTES);
static inline ssize_t cpulist_read(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -66,7 +68,7 @@ static inline ssize_t cpulist_read(struct file *file, struct kobject *kobj,
return n;
}
-static BIN_ATTR_RO(cpulist, CPULIST_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(cpulist, CPULIST_FILE_MAX_BYTES);
/**
* struct node_access_nodes - Access class device to hold user visible
@@ -110,6 +112,27 @@ static const struct attribute_group *node_access_node_groups[] = {
NULL,
};
+#ifdef CONFIG_MEMORY_HOTPLUG
+static BLOCKING_NOTIFIER_HEAD(node_chain);
+
+int register_node_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&node_chain, nb);
+}
+EXPORT_SYMBOL(register_node_notifier);
+
+void unregister_node_notifier(struct notifier_block *nb)
+{
+ blocking_notifier_chain_unregister(&node_chain, nb);
+}
+EXPORT_SYMBOL(unregister_node_notifier);
+
+int node_notify(unsigned long val, void *v)
+{
+ return blocking_notifier_call_chain(&node_chain, val, v);
+}
+#endif
+
static void node_remove_accesses(struct node *node)
{
struct node_access_nodes *c, *cnext;
@@ -214,10 +237,56 @@ void node_set_perf_attrs(unsigned int nid, struct access_coordinate *coord,
break;
}
}
+
+ /* When setting CPU access coordinates, update mempolicy */
+ if (access == ACCESS_COORDINATE_CPU) {
+ if (mempolicy_set_node_perf(nid, coord)) {
+ pr_info("failed to set mempolicy attrs for node %d\n",
+ nid);
+ }
+ }
}
EXPORT_SYMBOL_GPL(node_set_perf_attrs);
/**
+ * node_update_perf_attrs - Update the performance values for given access class
+ * @nid: Node identifier to be updated
+ * @coord: Heterogeneous memory performance coordinates
+ * @access: The access class for the given attributes
+ */
+void node_update_perf_attrs(unsigned int nid, struct access_coordinate *coord,
+ enum access_coordinate_class access)
+{
+ struct node_access_nodes *access_node;
+ struct node *node;
+ int i;
+
+ if (WARN_ON_ONCE(!node_online(nid)))
+ return;
+
+ node = node_devices[nid];
+ list_for_each_entry(access_node, &node->access_list, list_node) {
+ if (access_node->access != access)
+ continue;
+
+ access_node->coord = *coord;
+ for (i = 0; access_attrs[i]; i++) {
+ sysfs_notify(&access_node->dev.kobj,
+ NULL, access_attrs[i]->name);
+ }
+ break;
+ }
+
+ /* When setting CPU access coordinates, update mempolicy */
+ if (access != ACCESS_COORDINATE_CPU)
+ return;
+
+ if (mempolicy_set_node_perf(nid, coord))
+ pr_info("failed to set mempolicy attrs for node %d\n", nid);
+}
+EXPORT_SYMBOL_GPL(node_update_perf_attrs);
+
+/**
* struct node_cache_info - Internal tracking for memory node caches
* @dev: Device represeting the cache level
* @node: List element for tracking in the node
@@ -244,12 +313,14 @@ CACHE_ATTR(size, "%llu")
CACHE_ATTR(line_size, "%u")
CACHE_ATTR(indexing, "%u")
CACHE_ATTR(write_policy, "%u")
+CACHE_ATTR(address_mode, "%#x")
static struct attribute *cache_attrs[] = {
&dev_attr_indexing.attr,
&dev_attr_size.attr,
&dev_attr_line_size.attr,
&dev_attr_write_policy.attr,
+ &dev_attr_address_mode.attr,
NULL,
};
ATTRIBUTE_GROUPS(cache);
@@ -466,8 +537,8 @@ static ssize_t node_read_meminfo(struct device *dev,
nid, K(node_page_state(pgdat, NR_PAGETABLE)),
nid, K(node_page_state(pgdat, NR_SECONDARY_PAGETABLE)),
nid, 0UL,
- nid, K(sum_zone_node_page_state(nid, NR_BOUNCE)),
- nid, K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
+ nid, 0UL,
+ nid, 0UL,
nid, K(sreclaimable +
node_page_state(pgdat, NR_KERNEL_MISC_RECLAIMABLE)),
nid, K(sreclaimable + sunreclaimable),
@@ -578,7 +649,7 @@ static struct attribute *node_dev_attrs[] = {
NULL
};
-static struct bin_attribute *node_dev_bin_attrs[] = {
+static const struct bin_attribute *node_dev_bin_attrs[] = {
&bin_attr_cpumap,
&bin_attr_cpulist,
NULL
@@ -586,7 +657,7 @@ static struct bin_attribute *node_dev_bin_attrs[] = {
static const struct attribute_group node_dev_group = {
.attrs = node_dev_attrs,
- .bin_attrs = node_dev_bin_attrs
+ .bin_attrs = node_dev_bin_attrs,
};
static const struct attribute_group *node_dev_groups[] = {
@@ -626,6 +697,7 @@ static int register_node(struct node *node, int num)
} else {
hugetlb_register_node(node);
compaction_register_node(node);
+ reclaim_register_node(node);
}
return error;
@@ -642,6 +714,7 @@ void unregister_node(struct node *node)
{
hugetlb_unregister_node(node);
compaction_unregister_node(node);
+ reclaim_unregister_node(node);
node_remove_accesses(node);
node_remove_caches(node);
device_unregister(&node->dev);
@@ -745,23 +818,11 @@ int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
}
#ifdef CONFIG_MEMORY_HOTPLUG
-static int __ref get_nid_for_pfn(unsigned long pfn)
-{
-#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
- if (system_state < SYSTEM_RUNNING)
- return early_pfn_to_nid(pfn);
-#endif
- return pfn_to_nid(pfn);
-}
-
static void do_register_memory_block_under_node(int nid,
- struct memory_block *mem_blk,
- enum meminit_context context)
+ struct memory_block *mem_blk)
{
int ret;
- memory_block_add_nid(mem_blk, nid, context);
-
ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj,
&mem_blk->dev.kobj,
kobject_name(&mem_blk->dev.kobj));
@@ -780,46 +841,6 @@ static void do_register_memory_block_under_node(int nid,
ret);
}
-/* register memory section under specified node if it spans that node */
-static int register_mem_block_under_node_early(struct memory_block *mem_blk,
- void *arg)
-{
- unsigned long memory_block_pfns = memory_block_size_bytes() / PAGE_SIZE;
- unsigned long start_pfn = section_nr_to_pfn(mem_blk->start_section_nr);
- unsigned long end_pfn = start_pfn + memory_block_pfns - 1;
- int nid = *(int *)arg;
- unsigned long pfn;
-
- for (pfn = start_pfn; pfn <= end_pfn; pfn++) {
- int page_nid;
-
- /*
- * memory block could have several absent sections from start.
- * skip pfn range from absent section
- */
- if (!pfn_in_present_section(pfn)) {
- pfn = round_down(pfn + PAGES_PER_SECTION,
- PAGES_PER_SECTION) - 1;
- continue;
- }
-
- /*
- * We need to check if page belongs to nid only at the boot
- * case because node's ranges can be interleaved.
- */
- page_nid = get_nid_for_pfn(pfn);
- if (page_nid < 0)
- continue;
- if (page_nid != nid)
- continue;
-
- do_register_memory_block_under_node(nid, mem_blk, MEMINIT_EARLY);
- return 0;
- }
- /* mem section does not span the specified node */
- return 0;
-}
-
/*
* During hotplug we know that all pages in the memory block belong to the same
* node.
@@ -829,7 +850,7 @@ static int register_mem_block_under_node_hotplug(struct memory_block *mem_blk,
{
int nid = *(int *)arg;
- do_register_memory_block_under_node(nid, mem_blk, MEMINIT_HOTPLUG);
+ do_register_memory_block_under_node(nid, mem_blk);
return 0;
}
@@ -848,24 +869,45 @@ void unregister_memory_block_under_nodes(struct memory_block *mem_blk)
kobject_name(&node_devices[mem_blk->nid]->dev.kobj));
}
-void register_memory_blocks_under_node(int nid, unsigned long start_pfn,
- unsigned long end_pfn,
- enum meminit_context context)
+/* register all memory blocks under the corresponding nodes */
+static void register_memory_blocks_under_nodes(void)
{
- walk_memory_blocks_func_t func;
+ struct memblock_region *r;
- if (context == MEMINIT_HOTPLUG)
- func = register_mem_block_under_node_hotplug;
- else
- func = register_mem_block_under_node_early;
+ for_each_mem_region(r) {
+ const unsigned long start_block_id = phys_to_block_id(r->base);
+ const unsigned long end_block_id = phys_to_block_id(r->base + r->size - 1);
+ const int nid = memblock_get_region_node(r);
+ unsigned long block_id;
+ if (!node_online(nid))
+ continue;
+
+ for (block_id = start_block_id; block_id <= end_block_id; block_id++) {
+ struct memory_block *mem;
+
+ mem = find_memory_block_by_id(block_id);
+ if (!mem)
+ continue;
+
+ memory_block_add_nid_early(mem, nid);
+ do_register_memory_block_under_node(nid, mem);
+ put_device(&mem->dev);
+ }
+
+ }
+}
+
+void register_memory_blocks_under_node_hotplug(int nid, unsigned long start_pfn,
+ unsigned long end_pfn)
+{
walk_memory_blocks(PFN_PHYS(start_pfn), PFN_PHYS(end_pfn - start_pfn),
- (void *)&nid, func);
+ (void *)&nid, register_mem_block_under_node_hotplug);
return;
}
#endif /* CONFIG_MEMORY_HOTPLUG */
-int __register_one_node(int nid)
+int register_one_node(int nid)
{
int error;
int cpu;
@@ -879,6 +921,10 @@ int __register_one_node(int nid)
node_devices[nid] = node;
error = register_node(node_devices[nid], nid);
+ if (error) {
+ node_devices[nid] = NULL;
+ return error;
+ }
/* link cpu under this node */
for_each_present_cpu(cpu) {
@@ -969,11 +1015,13 @@ void __init node_dev_init(void)
/*
* Create all node devices, which will properly link the node
- * to applicable memory block devices and already created cpu devices.
+ * to already created cpu devices.
*/
for_each_online_node(i) {
- ret = register_one_node(i);
+ ret = register_one_node(i);
if (ret)
panic("%s() failed to add node: %d\n", __func__, ret);
}
+
+ register_memory_blocks_under_nodes();
}
diff --git a/drivers/base/physical_location.c b/drivers/base/physical_location.c
index 951819e71b4a..a5539e294d4d 100644
--- a/drivers/base/physical_location.c
+++ b/drivers/base/physical_location.c
@@ -7,19 +7,18 @@
#include <linux/acpi.h>
#include <linux/sysfs.h>
+#include <linux/string_choices.h>
#include "physical_location.h"
bool dev_add_physical_location(struct device *dev)
{
struct acpi_pld_info *pld;
- acpi_status status;
if (!has_acpi_companion(dev))
return false;
- status = acpi_get_physical_device_location(ACPI_HANDLE(dev), &pld);
- if (ACPI_FAILURE(status))
+ if (!acpi_get_physical_device_location(ACPI_HANDLE(dev), &pld))
return false;
dev->physical_location =
@@ -118,7 +117,7 @@ static ssize_t dock_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
return sysfs_emit(buf, "%s\n",
- dev->physical_location->dock ? "yes" : "no");
+ str_yes_no(dev->physical_location->dock));
}
static DEVICE_ATTR_RO(dock);
@@ -126,7 +125,7 @@ static ssize_t lid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
return sysfs_emit(buf, "%s\n",
- dev->physical_location->lid ? "yes" : "no");
+ str_yes_no(dev->physical_location->lid));
}
static DEVICE_ATTR_RO(lid);
diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c
index 0e60dd650b5e..70db08f3ac6f 100644
--- a/drivers/base/platform-msi.c
+++ b/drivers/base/platform-msi.c
@@ -95,5 +95,6 @@ EXPORT_SYMBOL_GPL(platform_device_msi_init_and_alloc_irqs);
void platform_device_msi_free_irqs_all(struct device *dev)
{
msi_domain_free_irqs_all(dev, MSI_DEFAULT_DOMAIN);
+ msi_remove_device_irq_domain(dev, MSI_DEFAULT_DOMAIN);
}
EXPORT_SYMBOL_GPL(platform_device_msi_free_irqs_all);
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 6f2a33722c52..09450349cf32 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -982,7 +982,7 @@ struct platform_device * __init_or_module __platform_create_bundle(
struct platform_device *pdev;
int error;
- pdev = platform_device_alloc(driver->driver.name, -1);
+ pdev = platform_device_alloc(driver->driver.name, PLATFORM_DEVID_NONE);
if (!pdev) {
error = -ENOMEM;
goto err_out;
@@ -1396,15 +1396,13 @@ static int platform_probe(struct device *_dev)
if (ret < 0)
return ret;
- ret = dev_pm_domain_attach(_dev, true);
+ ret = dev_pm_domain_attach(_dev, PD_FLAG_ATTACH_POWER_ON |
+ PD_FLAG_DETACH_POWER_OFF);
if (ret)
goto out;
- if (drv->probe) {
+ if (drv->probe)
ret = drv->probe(dev);
- if (ret)
- dev_pm_domain_detach(_dev, true);
- }
out:
if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) {
@@ -1422,7 +1420,6 @@ static void platform_remove(struct device *_dev)
if (drv->remove)
drv->remove(dev);
- dev_pm_domain_detach(_dev, true);
}
static void platform_shutdown(struct device *_dev)
@@ -1440,7 +1437,7 @@ static void platform_shutdown(struct device *_dev)
static int platform_dma_configure(struct device *dev)
{
- struct platform_driver *drv = to_platform_driver(dev->driver);
+ struct device_driver *drv = READ_ONCE(dev->driver);
struct fwnode_handle *fwnode = dev_fwnode(dev);
enum dev_dma_attr attr;
int ret = 0;
@@ -1451,7 +1448,8 @@ static int platform_dma_configure(struct device *dev)
attr = acpi_get_dma_attr(to_acpi_device_node(fwnode));
ret = acpi_dma_configure(dev, attr);
}
- if (ret || drv->driver_managed_dma)
+ /* @dev->driver may not be valid when we're called from the IOMMU layer */
+ if (ret || !drv || to_platform_driver(drv)->driver_managed_dma)
return ret;
ret = iommu_device_use_default_domain(dev);
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
index 01f11629d241..2989e42d0161 100644
--- a/drivers/base/power/Makefile
+++ b/drivers/base/power/Makefile
@@ -4,5 +4,6 @@ obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o wakeup_stats.o
obj-$(CONFIG_PM_TRACE_RTC) += trace.o
obj-$(CONFIG_HAVE_CLK) += clock_ops.o
obj-$(CONFIG_PM_QOS_KUNIT_TEST) += qos-test.o
+obj-$(CONFIG_PM_RUNTIME_KUNIT_TEST) += runtime-test.o
ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index e18ba676cdf6..b69bcb37c830 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -259,39 +259,6 @@ int pm_clk_add_clk(struct device *dev, struct clk *clk)
}
EXPORT_SYMBOL_GPL(pm_clk_add_clk);
-
-/**
- * of_pm_clk_add_clk - Start using a device clock for power management.
- * @dev: Device whose clock is going to be used for power management.
- * @name: Name of clock that is going to be used for power management.
- *
- * Add the clock described in the 'clocks' device-tree node that matches
- * with the 'name' provided, to the list of clocks used for the power
- * management of @dev. On success, returns 0. Returns a negative error
- * code if the clock is not found or cannot be added.
- */
-int of_pm_clk_add_clk(struct device *dev, const char *name)
-{
- struct clk *clk;
- int ret;
-
- if (!dev || !dev->of_node || !name)
- return -EINVAL;
-
- clk = of_clk_get_by_name(dev->of_node, name);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
-
- ret = pm_clk_add_clk(dev, clk);
- if (ret) {
- clk_put(clk);
- return ret;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(of_pm_clk_add_clk);
-
/**
* of_pm_clk_add_clks - Start using device clock(s) for power management.
* @dev: Device whose clock(s) is going to be used for power management.
@@ -377,46 +344,6 @@ static void __pm_clk_remove(struct pm_clock_entry *ce)
}
/**
- * pm_clk_remove - Stop using a device clock for power management.
- * @dev: Device whose clock should not be used for PM any more.
- * @con_id: Connection ID of the clock.
- *
- * Remove the clock represented by @con_id from the list of clocks used for
- * the power management of @dev.
- */
-void pm_clk_remove(struct device *dev, const char *con_id)
-{
- struct pm_subsys_data *psd = dev_to_psd(dev);
- struct pm_clock_entry *ce;
-
- if (!psd)
- return;
-
- pm_clk_list_lock(psd);
-
- list_for_each_entry(ce, &psd->clock_list, node) {
- if (!con_id && !ce->con_id)
- goto remove;
- else if (!con_id || !ce->con_id)
- continue;
- else if (!strcmp(con_id, ce->con_id))
- goto remove;
- }
-
- pm_clk_list_unlock(psd);
- return;
-
- remove:
- list_del(&ce->node);
- if (ce->enabled_when_prepared)
- psd->clock_op_might_sleep--;
- pm_clk_list_unlock(psd);
-
- __pm_clk_remove(ce);
-}
-EXPORT_SYMBOL_GPL(pm_clk_remove);
-
-/**
* pm_clk_remove_clk - Stop using a device clock for power management.
* @dev: Device whose clock should not be used for PM any more.
* @clk: Clock pointer
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c
index cca2fd0a1aed..6ecf9ce4a4e6 100644
--- a/drivers/base/power/common.c
+++ b/drivers/base/power/common.c
@@ -11,6 +11,7 @@
#include <linux/pm_clock.h>
#include <linux/acpi.h>
#include <linux/pm_domain.h>
+#include <linux/pm_opp.h>
#include "power.h"
@@ -82,7 +83,7 @@ EXPORT_SYMBOL_GPL(dev_pm_put_subsys_data);
/**
* dev_pm_domain_attach - Attach a device to its PM domain.
* @dev: Device to attach.
- * @power_on: Used to indicate whether we should power on the device.
+ * @flags: indicate whether we should power on/off the device on attach/detach
*
* The @dev may only be attached to a single PM domain. By iterating through
* the available alternatives we try to find a valid PM domain for the device.
@@ -99,17 +100,20 @@ EXPORT_SYMBOL_GPL(dev_pm_put_subsys_data);
* Returns 0 on successfully attached PM domain, or when it is found that the
* device doesn't need a PM domain, else a negative error code.
*/
-int dev_pm_domain_attach(struct device *dev, bool power_on)
+int dev_pm_domain_attach(struct device *dev, u32 flags)
{
int ret;
if (dev->pm_domain)
return 0;
- ret = acpi_dev_pm_attach(dev, power_on);
+ ret = acpi_dev_pm_attach(dev, !!(flags & PD_FLAG_ATTACH_POWER_ON));
if (!ret)
ret = genpd_dev_pm_attach(dev);
+ if (dev->pm_domain)
+ dev->power.detach_power_off = !!(flags & PD_FLAG_DETACH_POWER_OFF);
+
return ret < 0 ? ret : 0;
}
EXPORT_SYMBOL_GPL(dev_pm_domain_attach);
@@ -222,13 +226,15 @@ int dev_pm_domain_attach_list(struct device *dev,
if (!pds)
return -ENOMEM;
- size = sizeof(*pds->pd_devs) + sizeof(*pds->pd_links);
+ size = sizeof(*pds->pd_devs) + sizeof(*pds->pd_links) +
+ sizeof(*pds->opp_tokens);
pds->pd_devs = kcalloc(num_pds, size, GFP_KERNEL);
if (!pds->pd_devs) {
ret = -ENOMEM;
goto free_pds;
}
pds->pd_links = (void *)(pds->pd_devs + num_pds);
+ pds->opp_tokens = (void *)(pds->pd_links + num_pds);
if (link_flags && pd_flags & PD_FLAG_DEV_LINK_ON)
link_flags |= DL_FLAG_RPM_ACTIVE;
@@ -244,6 +250,19 @@ int dev_pm_domain_attach_list(struct device *dev,
goto err_attach;
}
+ if (pd_flags & PD_FLAG_REQUIRED_OPP) {
+ struct dev_pm_opp_config config = {
+ .required_dev = pd_dev,
+ .required_dev_index = i,
+ };
+
+ ret = dev_pm_opp_set_config(dev, &config);
+ if (ret < 0)
+ goto err_link;
+
+ pds->opp_tokens[i] = ret;
+ }
+
if (link_flags) {
struct device_link *link;
@@ -264,9 +283,11 @@ int dev_pm_domain_attach_list(struct device *dev,
return num_pds;
err_link:
+ dev_pm_opp_clear_config(pds->opp_tokens[i]);
dev_pm_domain_detach(pd_dev, true);
err_attach:
while (--i >= 0) {
+ dev_pm_opp_clear_config(pds->opp_tokens[i]);
if (pds->pd_links[i])
device_link_del(pds->pd_links[i]);
dev_pm_domain_detach(pds->pd_devs[i], true);
@@ -361,6 +382,7 @@ void dev_pm_domain_detach_list(struct dev_pm_domain_list *list)
return;
for (i = 0; i < list->num_pds; i++) {
+ dev_pm_opp_clear_config(list->opp_tokens[i]);
if (list->pd_links[i])
device_link_del(list->pd_links[i]);
dev_pm_domain_detach(list->pd_devs[i], true);
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c
index 4fa525668cb7..6502720bb564 100644
--- a/drivers/base/power/generic_ops.c
+++ b/drivers/base/power/generic_ops.c
@@ -115,18 +115,6 @@ int pm_generic_freeze_noirq(struct device *dev)
EXPORT_SYMBOL_GPL(pm_generic_freeze_noirq);
/**
- * pm_generic_freeze_late - Generic freeze_late callback for subsystems.
- * @dev: Device to freeze.
- */
-int pm_generic_freeze_late(struct device *dev)
-{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->freeze_late ? pm->freeze_late(dev) : 0;
-}
-EXPORT_SYMBOL_GPL(pm_generic_freeze_late);
-
-/**
* pm_generic_freeze - Generic freeze callback for subsystems.
* @dev: Device to freeze.
*/
@@ -187,18 +175,6 @@ int pm_generic_thaw_noirq(struct device *dev)
EXPORT_SYMBOL_GPL(pm_generic_thaw_noirq);
/**
- * pm_generic_thaw_early - Generic thaw_early callback for subsystems.
- * @dev: Device to thaw.
- */
-int pm_generic_thaw_early(struct device *dev)
-{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->thaw_early ? pm->thaw_early(dev) : 0;
-}
-EXPORT_SYMBOL_GPL(pm_generic_thaw_early);
-
-/**
* pm_generic_thaw - Generic thaw callback for subsystems.
* @dev: Device to thaw.
*/
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 4a67e83300e1..e83503bdc1fd 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -40,10 +40,6 @@
typedef int (*pm_callback_t)(struct device *);
-#define list_for_each_entry_rcu_locked(pos, head, member) \
- list_for_each_entry_rcu(pos, head, member, \
- device_links_read_lock_held())
-
/*
* The entries in the dpm_list list are in a depth first order, simply
* because children are guaranteed to be discovered after parents, and
@@ -63,8 +59,23 @@ static LIST_HEAD(dpm_noirq_list);
static DEFINE_MUTEX(dpm_list_mtx);
static pm_message_t pm_transition;
+static DEFINE_MUTEX(async_wip_mtx);
static int async_error;
+/**
+ * pm_hibernate_is_recovering - if recovering from hibernate due to error.
+ *
+ * Used to query if dev_pm_ops.thaw() is called for normal hibernation case or
+ * recovering from some error.
+ *
+ * Return: true for error case, false for normal case.
+ */
+bool pm_hibernate_is_recovering(void)
+{
+ return pm_transition.event == PM_EVENT_RECOVER;
+}
+EXPORT_SYMBOL_GPL(pm_hibernate_is_recovering);
+
static const char *pm_verb(int event)
{
switch (event) {
@@ -249,7 +260,7 @@ static int dpm_wait_fn(struct device *dev, void *async_ptr)
static void dpm_wait_for_children(struct device *dev, bool async)
{
- device_for_each_child(dev, &async, dpm_wait_fn);
+ device_for_each_child(dev, &async, dpm_wait_fn);
}
static void dpm_wait_for_suppliers(struct device *dev, bool async)
@@ -266,8 +277,9 @@ static void dpm_wait_for_suppliers(struct device *dev, bool async)
* callbacks freeing the link objects for the links in the list we're
* walking.
*/
- list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
- if (READ_ONCE(link->status) != DL_STATE_DORMANT)
+ dev_for_each_link_to_supplier(link, dev)
+ if (READ_ONCE(link->status) != DL_STATE_DORMANT &&
+ !device_link_flag_is_sync_state_only(link->flags))
dpm_wait(link->supplier, async);
device_links_read_unlock(idx);
@@ -323,8 +335,9 @@ static void dpm_wait_for_consumers(struct device *dev, bool async)
* continue instead of trying to continue in parallel with its
* unregistration).
*/
- list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
- if (READ_ONCE(link->status) != DL_STATE_DORMANT)
+ dev_for_each_link_to_consumer(link, dev)
+ if (READ_ONCE(link->status) != DL_STATE_DORMANT &&
+ !device_link_flag_is_sync_state_only(link->flags))
dpm_wait(link->consumer, async);
device_links_read_unlock(idx);
@@ -496,6 +509,7 @@ struct dpm_watchdog {
struct device *dev;
struct task_struct *tsk;
struct timer_list timer;
+ bool fatal;
};
#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
@@ -511,12 +525,24 @@ struct dpm_watchdog {
*/
static void dpm_watchdog_handler(struct timer_list *t)
{
- struct dpm_watchdog *wd = from_timer(wd, t, timer);
+ struct dpm_watchdog *wd = timer_container_of(wd, t, timer);
+ struct timer_list *timer = &wd->timer;
+ unsigned int time_left;
+
+ if (wd->fatal) {
+ dev_emerg(wd->dev, "**** DPM device timeout ****\n");
+ show_stack(wd->tsk, NULL, KERN_EMERG);
+ panic("%s %s: unrecoverable failure\n",
+ dev_driver_string(wd->dev), dev_name(wd->dev));
+ }
- dev_emerg(wd->dev, "**** DPM device timeout ****\n");
- show_stack(wd->tsk, NULL, KERN_EMERG);
- panic("%s %s: unrecoverable failure\n",
- dev_driver_string(wd->dev), dev_name(wd->dev));
+ time_left = CONFIG_DPM_WATCHDOG_TIMEOUT - CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
+ dev_warn(wd->dev, "**** DPM device timeout after %u seconds; %u seconds until panic ****\n",
+ CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT, time_left);
+ show_stack(wd->tsk, NULL, KERN_WARNING);
+
+ wd->fatal = true;
+ mod_timer(timer, jiffies + HZ * time_left);
}
/**
@@ -530,10 +556,11 @@ static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
wd->dev = dev;
wd->tsk = current;
+ wd->fatal = CONFIG_DPM_WATCHDOG_TIMEOUT == CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
/* use same timeout value for both suspend and resume */
- timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
+ timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
add_timer(timer);
}
@@ -545,8 +572,8 @@ static void dpm_watchdog_clear(struct dpm_watchdog *wd)
{
struct timer_list *timer = &wd->timer;
- del_timer_sync(timer);
- destroy_timer_on_stack(timer);
+ timer_delete_sync(timer);
+ timer_destroy_on_stack(timer);
}
#else
#define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
@@ -583,29 +610,96 @@ static bool is_async(struct device *dev)
&& !pm_trace_is_enabled();
}
+static bool __dpm_async(struct device *dev, async_func_t func)
+{
+ if (dev->power.work_in_progress)
+ return true;
+
+ if (!is_async(dev))
+ return false;
+
+ dev->power.work_in_progress = true;
+
+ get_device(dev);
+
+ if (async_schedule_dev_nocall(func, dev))
+ return true;
+
+ put_device(dev);
+
+ return false;
+}
+
static bool dpm_async_fn(struct device *dev, async_func_t func)
{
- reinit_completion(&dev->power.completion);
+ guard(mutex)(&async_wip_mtx);
- if (is_async(dev)) {
- dev->power.async_in_progress = true;
+ return __dpm_async(dev, func);
+}
- get_device(dev);
+static int dpm_async_with_cleanup(struct device *dev, void *fn)
+{
+ guard(mutex)(&async_wip_mtx);
- if (async_schedule_dev_nocall(func, dev))
- return true;
+ if (!__dpm_async(dev, fn))
+ dev->power.work_in_progress = false;
- put_device(dev);
- }
+ return 0;
+}
+
+static void dpm_async_resume_children(struct device *dev, async_func_t func)
+{
/*
- * Because async_schedule_dev_nocall() above has returned false or it
- * has not been called at all, func() is not running and it is safe to
- * update the async_in_progress flag without extra synchronization.
+ * Prevent racing with dpm_clear_async_state() during initial list
+ * walks in dpm_noirq_resume_devices(), dpm_resume_early(), and
+ * dpm_resume().
*/
- dev->power.async_in_progress = false;
- return false;
+ guard(mutex)(&dpm_list_mtx);
+
+ /*
+ * Start processing "async" children of the device unless it's been
+ * started already for them.
+ */
+ device_for_each_child(dev, func, dpm_async_with_cleanup);
+}
+
+static void dpm_async_resume_subordinate(struct device *dev, async_func_t func)
+{
+ struct device_link *link;
+ int idx;
+
+ dpm_async_resume_children(dev, func);
+
+ idx = device_links_read_lock();
+
+ /* Start processing the device's "async" consumers. */
+ dev_for_each_link_to_consumer(link, dev)
+ if (READ_ONCE(link->status) != DL_STATE_DORMANT)
+ dpm_async_with_cleanup(link->consumer, func);
+
+ device_links_read_unlock(idx);
+}
+
+static void dpm_clear_async_state(struct device *dev)
+{
+ reinit_completion(&dev->power.completion);
+ dev->power.work_in_progress = false;
+}
+
+static bool dpm_root_device(struct device *dev)
+{
+ lockdep_assert_held(&dpm_list_mtx);
+
+ /*
+ * Since this function is required to run under dpm_list_mtx, the
+ * list_empty() below will only return true if the device's list of
+ * consumers is actually empty before calling it.
+ */
+ return !dev->parent && list_empty(&dev->links.suppliers);
}
+static void async_resume_noirq(void *data, async_cookie_t cookie);
+
/**
* device_resume_noirq - Execute a "noirq resume" callback for given device.
* @dev: Device to handle.
@@ -628,8 +722,20 @@ static void device_resume_noirq(struct device *dev, pm_message_t state, bool asy
if (dev->power.syscore || dev->power.direct_complete)
goto Out;
- if (!dev->power.is_noirq_suspended)
+ if (!dev->power.is_noirq_suspended) {
+ /*
+ * This means that system suspend has been aborted in the noirq
+ * phase before invoking the noirq suspend callback for the
+ * device, so if device_suspend_late() has left it in suspend,
+ * device_resume_early() should leave it in suspend either in
+ * case the early resume of it depends on the noirq resume that
+ * has not run.
+ */
+ if (dev_pm_skip_suspend(dev))
+ dev->power.must_resume = false;
+
goto Out;
+ }
if (!dpm_wait_for_superior(dev, async))
goto Out;
@@ -642,12 +748,12 @@ static void device_resume_noirq(struct device *dev, pm_message_t state, bool asy
* so change its status accordingly.
*
* Otherwise, the device is going to be resumed, so set its PM-runtime
- * status to "active", but do that only if DPM_FLAG_SMART_SUSPEND is set
- * to avoid confusing drivers that don't use it.
+ * status to "active" unless its power.smart_suspend flag is clear, in
+ * which case it is not necessary to update its PM-runtime status.
*/
if (skip_resume)
pm_runtime_set_suspended(dev);
- else if (dev_pm_skip_suspend(dev))
+ else if (dev_pm_smart_suspend(dev))
pm_runtime_set_active(dev);
if (dev->pm_domain) {
@@ -685,10 +791,12 @@ Out:
TRACE_RESUME(error);
if (error) {
- async_error = error;
+ WRITE_ONCE(async_error, error);
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
}
+
+ dpm_async_resume_subordinate(dev, async_resume_noirq);
}
static void async_resume_noirq(void *data, async_cookie_t cookie)
@@ -712,17 +820,20 @@ static void dpm_noirq_resume_devices(pm_message_t state)
mutex_lock(&dpm_list_mtx);
/*
- * Trigger the resume of "async" devices upfront so they don't have to
- * wait for the "non-async" ones they don't depend on.
+ * Start processing "async" root devices upfront so they don't wait for
+ * the "sync" devices they don't depend on.
*/
- list_for_each_entry(dev, &dpm_noirq_list, power.entry)
- dpm_async_fn(dev, async_resume_noirq);
+ list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
+ dpm_clear_async_state(dev);
+ if (dpm_root_device(dev))
+ dpm_async_with_cleanup(dev, async_resume_noirq);
+ }
while (!list_empty(&dpm_noirq_list)) {
dev = to_device(dpm_noirq_list.next);
list_move_tail(&dev->power.entry, &dpm_late_early_list);
- if (!dev->power.async_in_progress) {
+ if (!dpm_async_fn(dev, async_resume_noirq)) {
get_device(dev);
mutex_unlock(&dpm_list_mtx);
@@ -737,7 +848,7 @@ static void dpm_noirq_resume_devices(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
dpm_show_time(starttime, state, 0, "noirq");
- if (async_error)
+ if (READ_ONCE(async_error))
dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
@@ -758,6 +869,8 @@ void dpm_resume_noirq(pm_message_t state)
device_wakeup_disarm_wake_irqs();
}
+static void async_resume_early(void *data, async_cookie_t cookie);
+
/**
* device_resume_early - Execute an "early resume" callback for given device.
* @dev: Device to handle.
@@ -821,10 +934,12 @@ Out:
complete_all(&dev->power.completion);
if (error) {
- async_error = error;
+ WRITE_ONCE(async_error, error);
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, async ? " async early" : " early", error);
}
+
+ dpm_async_resume_subordinate(dev, async_resume_early);
}
static void async_resume_early(void *data, async_cookie_t cookie)
@@ -852,17 +967,20 @@ void dpm_resume_early(pm_message_t state)
mutex_lock(&dpm_list_mtx);
/*
- * Trigger the resume of "async" devices upfront so they don't have to
- * wait for the "non-async" ones they don't depend on.
+ * Start processing "async" root devices upfront so they don't wait for
+ * the "sync" devices they don't depend on.
*/
- list_for_each_entry(dev, &dpm_late_early_list, power.entry)
- dpm_async_fn(dev, async_resume_early);
+ list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
+ dpm_clear_async_state(dev);
+ if (dpm_root_device(dev))
+ dpm_async_with_cleanup(dev, async_resume_early);
+ }
while (!list_empty(&dpm_late_early_list)) {
dev = to_device(dpm_late_early_list.next);
list_move_tail(&dev->power.entry, &dpm_suspended_list);
- if (!dev->power.async_in_progress) {
+ if (!dpm_async_fn(dev, async_resume_early)) {
get_device(dev);
mutex_unlock(&dpm_list_mtx);
@@ -877,7 +995,7 @@ void dpm_resume_early(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
dpm_show_time(starttime, state, 0, "early");
- if (async_error)
+ if (READ_ONCE(async_error))
dpm_save_failed_step(SUSPEND_RESUME_EARLY);
trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
@@ -894,6 +1012,8 @@ void dpm_resume_start(pm_message_t state)
}
EXPORT_SYMBOL_GPL(dpm_resume_start);
+static void async_resume(void *data, async_cookie_t cookie);
+
/**
* device_resume - Execute "resume" callbacks for given device.
* @dev: Device to handle.
@@ -913,8 +1033,20 @@ static void device_resume(struct device *dev, pm_message_t state, bool async)
if (dev->power.syscore)
goto Complete;
+ if (!dev->power.is_suspended)
+ goto Complete;
+
+ dev->power.is_suspended = false;
+
if (dev->power.direct_complete) {
- /* Match the pm_runtime_disable() in __device_suspend(). */
+ /*
+ * Allow new children to be added under the device after this
+ * point if it has no PM callbacks.
+ */
+ if (dev->power.no_pm_callbacks)
+ dev->power.is_prepared = false;
+
+ /* Match the pm_runtime_disable() in device_suspend(). */
pm_runtime_enable(dev);
goto Complete;
}
@@ -931,9 +1063,6 @@ static void device_resume(struct device *dev, pm_message_t state, bool async)
*/
dev->power.is_prepared = false;
- if (!dev->power.is_suspended)
- goto Unlock;
-
if (dev->pm_domain) {
info = "power domain ";
callback = pm_op(&dev->pm_domain->ops, state);
@@ -971,9 +1100,7 @@ static void device_resume(struct device *dev, pm_message_t state, bool async)
End:
error = dpm_run_callback(callback, dev, state, info);
- dev->power.is_suspended = false;
- Unlock:
device_unlock(dev);
dpm_watchdog_clear(&wd);
@@ -983,10 +1110,12 @@ static void device_resume(struct device *dev, pm_message_t state, bool async)
TRACE_RESUME(error);
if (error) {
- async_error = error;
+ WRITE_ONCE(async_error, error);
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, async ? " async" : "", error);
}
+
+ dpm_async_resume_subordinate(dev, async_resume);
}
static void async_resume(void *data, async_cookie_t cookie)
@@ -1010,7 +1139,6 @@ void dpm_resume(pm_message_t state)
ktime_t starttime = ktime_get();
trace_suspend_resume(TPS("dpm_resume"), state.event, true);
- might_sleep();
pm_transition = state;
async_error = 0;
@@ -1018,17 +1146,20 @@ void dpm_resume(pm_message_t state)
mutex_lock(&dpm_list_mtx);
/*
- * Trigger the resume of "async" devices upfront so they don't have to
- * wait for the "non-async" ones they don't depend on.
+ * Start processing "async" root devices upfront so they don't wait for
+ * the "sync" devices they don't depend on.
*/
- list_for_each_entry(dev, &dpm_suspended_list, power.entry)
- dpm_async_fn(dev, async_resume);
+ list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
+ dpm_clear_async_state(dev);
+ if (dpm_root_device(dev))
+ dpm_async_with_cleanup(dev, async_resume);
+ }
while (!list_empty(&dpm_suspended_list)) {
dev = to_device(dpm_suspended_list.next);
list_move_tail(&dev->power.entry, &dpm_prepared_list);
- if (!dev->power.async_in_progress) {
+ if (!dpm_async_fn(dev, async_resume)) {
get_device(dev);
mutex_unlock(&dpm_list_mtx);
@@ -1043,7 +1174,7 @@ void dpm_resume(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
dpm_show_time(starttime, state, 0, NULL);
- if (async_error)
+ if (READ_ONCE(async_error))
dpm_save_failed_step(SUSPEND_RESUME);
cpufreq_resume();
@@ -1093,6 +1224,8 @@ static void device_complete(struct device *dev, pm_message_t state)
device_unlock(dev);
out:
+ /* If enabling runtime PM for the device is blocked, unblock it. */
+ pm_runtime_unblock(dev);
pm_runtime_put(dev);
}
@@ -1108,7 +1241,6 @@ void dpm_complete(pm_message_t state)
struct list_head list;
trace_suspend_resume(TPS("dpm_complete"), state.event, true);
- might_sleep();
INIT_LIST_HEAD(&list);
mutex_lock(&dpm_list_mtx);
@@ -1147,6 +1279,7 @@ void dpm_complete(pm_message_t state)
void dpm_resume_end(pm_message_t state)
{
dpm_resume(state);
+ pm_restore_gfp_mask();
dpm_complete(state);
}
EXPORT_SYMBOL_GPL(dpm_resume_end);
@@ -1154,6 +1287,82 @@ EXPORT_SYMBOL_GPL(dpm_resume_end);
/*------------------------- Suspend routines -------------------------*/
+static bool dpm_leaf_device(struct device *dev)
+{
+ struct device *child;
+
+ lockdep_assert_held(&dpm_list_mtx);
+
+ child = device_find_any_child(dev);
+ if (child) {
+ put_device(child);
+
+ return false;
+ }
+
+ /*
+ * Since this function is required to run under dpm_list_mtx, the
+ * list_empty() below will only return true if the device's list of
+ * consumers is actually empty before calling it.
+ */
+ return list_empty(&dev->links.consumers);
+}
+
+static bool dpm_async_suspend_parent(struct device *dev, async_func_t func)
+{
+ guard(mutex)(&dpm_list_mtx);
+
+ /*
+ * If the device is suspended asynchronously and the parent's callback
+ * deletes both the device and the parent itself, the parent object may
+ * be freed while this function is running, so avoid that by checking
+ * if the device has been deleted already as the parent cannot be
+ * deleted before it.
+ */
+ if (!device_pm_initialized(dev))
+ return false;
+
+ /* Start processing the device's parent if it is "async". */
+ if (dev->parent)
+ dpm_async_with_cleanup(dev->parent, func);
+
+ return true;
+}
+
+static void dpm_async_suspend_superior(struct device *dev, async_func_t func)
+{
+ struct device_link *link;
+ int idx;
+
+ if (!dpm_async_suspend_parent(dev, func))
+ return;
+
+ idx = device_links_read_lock();
+
+ /* Start processing the device's "async" suppliers. */
+ dev_for_each_link_to_supplier(link, dev)
+ if (READ_ONCE(link->status) != DL_STATE_DORMANT)
+ dpm_async_with_cleanup(link->supplier, func);
+
+ device_links_read_unlock(idx);
+}
+
+static void dpm_async_suspend_complete_all(struct list_head *device_list)
+{
+ struct device *dev;
+
+ guard(mutex)(&async_wip_mtx);
+
+ list_for_each_entry_reverse(dev, device_list, power.entry) {
+ /*
+ * In case the device is being waited for and async processing
+ * has not started for it yet, let the waiters make progress.
+ */
+ if (!dev->power.work_in_progress)
+ complete_all(&dev->power.completion);
+ }
+}
+
/**
* resume_event - Return a "resume" message for given "suspend" sleep state.
* @sleep_state: PM message representing a sleep state.
@@ -1185,12 +1394,14 @@ static void dpm_superior_set_must_resume(struct device *dev)
idx = device_links_read_lock();
- list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
+ dev_for_each_link_to_supplier(link, dev)
link->supplier->power.must_resume = true;
device_links_read_unlock(idx);
}
+static void async_suspend_noirq(void *data, async_cookie_t cookie);
+
/**
* device_suspend_noirq - Execute a "noirq suspend" callback for given device.
* @dev: Device to handle.
@@ -1200,7 +1411,7 @@ static void dpm_superior_set_must_resume(struct device *dev)
* The driver of @dev will not receive interrupts while this function is being
* executed.
*/
-static int device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
+static void device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
const char *info = NULL;
@@ -1211,7 +1422,7 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state, bool asy
dpm_wait_for_subordinate(dev, async);
- if (async_error)
+ if (READ_ONCE(async_error))
goto Complete;
if (dev->power.syscore || dev->power.direct_complete)
@@ -1244,7 +1455,7 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state, bool asy
Run:
error = dpm_run_callback(callback, dev, state, info);
if (error) {
- async_error = error;
+ WRITE_ONCE(async_error, error);
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
goto Complete;
@@ -1254,14 +1465,13 @@ Skip:
dev->power.is_noirq_suspended = true;
/*
- * Skipping the resume of devices that were in use right before the
- * system suspend (as indicated by their PM-runtime usage counters)
- * would be suboptimal. Also resume them if doing that is not allowed
- * to be skipped.
+ * Devices must be resumed unless they are explicitly allowed to be left
+ * in suspend, but even in that case skipping the resume of devices that
+ * were in use right before the system suspend (as indicated by their
+ * runtime PM usage counters and child counters) would be suboptimal.
*/
- if (atomic_read(&dev->power.usage_count) > 1 ||
- !(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
- dev->power.may_skip_resume))
+ if (!(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
+ dev->power.may_skip_resume) || !pm_runtime_need_not_resume(dev))
dev->power.must_resume = true;
if (dev->power.must_resume)
@@ -1270,7 +1480,11 @@ Skip:
Complete:
complete_all(&dev->power.completion);
TRACE_SUSPEND(error);
- return error;
+
+ if (error || READ_ONCE(async_error))
+ return;
+
+ dpm_async_suspend_superior(dev, async_suspend_noirq);
}
static void async_suspend_noirq(void *data, async_cookie_t cookie)
@@ -1284,7 +1498,8 @@ static void async_suspend_noirq(void *data, async_cookie_t cookie)
static int dpm_noirq_suspend_devices(pm_message_t state)
{
ktime_t starttime = ktime_get();
- int error = 0;
+ struct device *dev;
+ int error;
trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
@@ -1293,8 +1508,18 @@ static int dpm_noirq_suspend_devices(pm_message_t state)
mutex_lock(&dpm_list_mtx);
+ /*
+ * Start processing "async" leaf devices upfront so they don't need to
+ * wait for the "sync" devices they don't depend on.
+ */
+ list_for_each_entry_reverse(dev, &dpm_late_early_list, power.entry) {
+ dpm_clear_async_state(dev);
+ if (dpm_leaf_device(dev))
+ dpm_async_with_cleanup(dev, async_suspend_noirq);
+ }
+
while (!list_empty(&dpm_late_early_list)) {
- struct device *dev = to_device(dpm_late_early_list.prev);
+ dev = to_device(dpm_late_early_list.prev);
list_move(&dev->power.entry, &dpm_noirq_list);
@@ -1305,22 +1530,28 @@ static int dpm_noirq_suspend_devices(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
- error = device_suspend_noirq(dev, state, false);
+ device_suspend_noirq(dev, state, false);
put_device(dev);
mutex_lock(&dpm_list_mtx);
- if (error || async_error)
+ if (READ_ONCE(async_error)) {
+ dpm_async_suspend_complete_all(&dpm_late_early_list);
+ /*
+ * Move all devices to the target list to resume them
+ * properly.
+ */
+ list_splice_init(&dpm_late_early_list, &dpm_noirq_list);
break;
+ }
}
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
- if (!error)
- error = async_error;
+ error = READ_ONCE(async_error);
if (error)
dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
@@ -1365,6 +1596,8 @@ static void dpm_propagate_wakeup_to_parent(struct device *dev)
spin_unlock_irq(&parent->power.lock);
}
+static void async_suspend_late(void *data, async_cookie_t cookie);
+
/**
* device_suspend_late - Execute a "late suspend" callback for given device.
* @dev: Device to handle.
@@ -1373,7 +1606,7 @@ static void dpm_propagate_wakeup_to_parent(struct device *dev)
*
* Runtime PM is disabled for @dev while this function is being executed.
*/
-static int device_suspend_late(struct device *dev, pm_message_t state, bool async)
+static void device_suspend_late(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
const char *info = NULL;
@@ -1382,15 +1615,19 @@ static int device_suspend_late(struct device *dev, pm_message_t state, bool asyn
TRACE_DEVICE(dev);
TRACE_SUSPEND(0);
+ /*
+ * Disable runtime PM for the device without checking if there is a
+ * pending resume request for it.
+ */
__pm_runtime_disable(dev, false);
dpm_wait_for_subordinate(dev, async);
- if (async_error)
+ if (READ_ONCE(async_error))
goto Complete;
if (pm_wakeup_pending()) {
- async_error = -EBUSY;
+ WRITE_ONCE(async_error, -EBUSY);
goto Complete;
}
@@ -1424,7 +1661,7 @@ static int device_suspend_late(struct device *dev, pm_message_t state, bool asyn
Run:
error = dpm_run_callback(callback, dev, state, info);
if (error) {
- async_error = error;
+ WRITE_ONCE(async_error, error);
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, async ? " async late" : " late", error);
goto Complete;
@@ -1437,7 +1674,11 @@ Skip:
Complete:
TRACE_SUSPEND(error);
complete_all(&dev->power.completion);
- return error;
+
+ if (error || READ_ONCE(async_error))
+ return;
+
+ dpm_async_suspend_superior(dev, async_suspend_late);
}
static void async_suspend_late(void *data, async_cookie_t cookie)
@@ -1455,7 +1696,8 @@ static void async_suspend_late(void *data, async_cookie_t cookie)
int dpm_suspend_late(pm_message_t state)
{
ktime_t starttime = ktime_get();
- int error = 0;
+ struct device *dev;
+ int error;
trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
@@ -1466,8 +1708,18 @@ int dpm_suspend_late(pm_message_t state)
mutex_lock(&dpm_list_mtx);
+ /*
+ * Start processing "async" leaf devices upfront so they don't need to
+ * wait for the "sync" devices they don't depend on.
+ */
+ list_for_each_entry_reverse(dev, &dpm_suspended_list, power.entry) {
+ dpm_clear_async_state(dev);
+ if (dpm_leaf_device(dev))
+ dpm_async_with_cleanup(dev, async_suspend_late);
+ }
+
while (!list_empty(&dpm_suspended_list)) {
- struct device *dev = to_device(dpm_suspended_list.prev);
+ dev = to_device(dpm_suspended_list.prev);
list_move(&dev->power.entry, &dpm_late_early_list);
@@ -1478,22 +1730,28 @@ int dpm_suspend_late(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
- error = device_suspend_late(dev, state, false);
+ device_suspend_late(dev, state, false);
put_device(dev);
mutex_lock(&dpm_list_mtx);
- if (error || async_error)
+ if (READ_ONCE(async_error)) {
+ dpm_async_suspend_complete_all(&dpm_suspended_list);
+ /*
+ * Move all devices to the target list to resume them
+ * properly.
+ */
+ list_splice_init(&dpm_suspended_list, &dpm_late_early_list);
break;
+ }
}
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
- if (!error)
- error = async_error;
+ error = READ_ONCE(async_error);
if (error) {
dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
dpm_resume_early(resume_event(state));
@@ -1565,7 +1823,7 @@ static void dpm_clear_superiors_direct_complete(struct device *dev)
idx = device_links_read_lock();
- list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
+ dev_for_each_link_to_supplier(link, dev) {
spin_lock_irq(&link->supplier->power.lock);
link->supplier->power.direct_complete = false;
spin_unlock_irq(&link->supplier->power.lock);
@@ -1574,13 +1832,15 @@ static void dpm_clear_superiors_direct_complete(struct device *dev)
device_links_read_unlock(idx);
}
+static void async_suspend(void *data, async_cookie_t cookie);
+
/**
* device_suspend - Execute "suspend" callbacks for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
* @async: If true, the device is being suspended asynchronously.
*/
-static int device_suspend(struct device *dev, pm_message_t state, bool async)
+static void device_suspend(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
const char *info = NULL;
@@ -1592,7 +1852,7 @@ static int device_suspend(struct device *dev, pm_message_t state, bool async)
dpm_wait_for_subordinate(dev, async);
- if (async_error) {
+ if (READ_ONCE(async_error)) {
dev->power.direct_complete = false;
goto Complete;
}
@@ -1612,7 +1872,7 @@ static int device_suspend(struct device *dev, pm_message_t state, bool async)
if (pm_wakeup_pending()) {
dev->power.direct_complete = false;
- async_error = -EBUSY;
+ WRITE_ONCE(async_error, -EBUSY);
goto Complete;
}
@@ -1628,6 +1888,7 @@ static int device_suspend(struct device *dev, pm_message_t state, bool async)
pm_runtime_disable(dev);
if (pm_runtime_status_suspended(dev)) {
pm_dev_dbg(dev, state, "direct-complete ");
+ dev->power.is_suspended = true;
goto Complete;
}
@@ -1695,14 +1956,18 @@ static int device_suspend(struct device *dev, pm_message_t state, bool async)
Complete:
if (error) {
- async_error = error;
+ WRITE_ONCE(async_error, error);
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, async ? " async" : "", error);
}
complete_all(&dev->power.completion);
TRACE_SUSPEND(error);
- return error;
+
+ if (error || READ_ONCE(async_error))
+ return;
+
+ dpm_async_suspend_superior(dev, async_suspend);
}
static void async_suspend(void *data, async_cookie_t cookie)
@@ -1720,7 +1985,8 @@ static void async_suspend(void *data, async_cookie_t cookie)
int dpm_suspend(pm_message_t state)
{
ktime_t starttime = ktime_get();
- int error = 0;
+ struct device *dev;
+ int error;
trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
might_sleep();
@@ -1733,8 +1999,18 @@ int dpm_suspend(pm_message_t state)
mutex_lock(&dpm_list_mtx);
+ /*
+ * Start processing "async" leaf devices upfront so they don't need to
+ * wait for the "sync" devices they don't depend on.
+ */
+ list_for_each_entry_reverse(dev, &dpm_prepared_list, power.entry) {
+ dpm_clear_async_state(dev);
+ if (dpm_leaf_device(dev))
+ dpm_async_with_cleanup(dev, async_suspend);
+ }
+
while (!list_empty(&dpm_prepared_list)) {
- struct device *dev = to_device(dpm_prepared_list.prev);
+ dev = to_device(dpm_prepared_list.prev);
list_move(&dev->power.entry, &dpm_suspended_list);
@@ -1745,22 +2021,28 @@ int dpm_suspend(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
- error = device_suspend(dev, state, false);
+ device_suspend(dev, state, false);
put_device(dev);
mutex_lock(&dpm_list_mtx);
- if (error || async_error)
+ if (READ_ONCE(async_error)) {
+ dpm_async_suspend_complete_all(&dpm_prepared_list);
+ /*
+ * Move all devices to the target list to resume them
+ * properly.
+ */
+ list_splice_init(&dpm_prepared_list, &dpm_suspended_list);
break;
+ }
}
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
- if (!error)
- error = async_error;
+ error = READ_ONCE(async_error);
if (error)
dpm_save_failed_step(SUSPEND_SUSPEND);
@@ -1769,6 +2051,46 @@ int dpm_suspend(pm_message_t state)
return error;
}
+static bool device_prepare_smart_suspend(struct device *dev)
+{
+ struct device_link *link;
+ bool ret = true;
+ int idx;
+
+ /*
+ * The "smart suspend" feature is enabled for devices whose drivers ask
+ * for it and for devices without PM callbacks.
+ *
+ * However, if "smart suspend" is not enabled for the device's parent
+ * or any of its suppliers that take runtime PM into account, it cannot
+ * be enabled for the device either.
+ */
+ if (!dev->power.no_pm_callbacks &&
+ !dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND))
+ return false;
+
+ if (dev->parent && !dev_pm_smart_suspend(dev->parent) &&
+ !dev->parent->power.ignore_children && !pm_runtime_blocked(dev->parent))
+ return false;
+
+ idx = device_links_read_lock();
+
+ dev_for_each_link_to_supplier(link, dev) {
+ if (!device_link_test(link, DL_FLAG_PM_RUNTIME))
+ continue;
+
+ if (!dev_pm_smart_suspend(link->supplier) &&
+ !pm_runtime_blocked(link->supplier)) {
+ ret = false;
+ break;
+ }
+ }
+
+ device_links_read_unlock(idx);
+
+ return ret;
+}
+
/**
* device_prepare - Prepare a device for system power transition.
* @dev: Device to handle.
@@ -1780,6 +2102,7 @@ int dpm_suspend(pm_message_t state)
static int device_prepare(struct device *dev, pm_message_t state)
{
int (*callback)(struct device *) = NULL;
+ bool smart_suspend;
int ret = 0;
/*
@@ -1789,6 +2112,13 @@ static int device_prepare(struct device *dev, pm_message_t state)
* it again during the complete phase.
*/
pm_runtime_get_noresume(dev);
+ /*
+ * If runtime PM is disabled for the device at this point and it has
+ * never been enabled so far, it should not be enabled until this system
+ * suspend-resume cycle is complete, so prepare to trigger a warning on
+ * subsequent attempts to enable it.
+ */
+ smart_suspend = !pm_runtime_block_if_disabled(dev);
if (dev->power.syscore)
return 0;
@@ -1823,6 +2153,13 @@ unlock:
pm_runtime_put(dev);
return ret;
}
+ /* Do not enable "smart suspend" for devices with disabled runtime PM. */
+ if (smart_suspend)
+ smart_suspend = device_prepare_smart_suspend(dev);
+
+ spin_lock_irq(&dev->power.lock);
+
+ dev->power.smart_suspend = smart_suspend;
/*
* A positive return value from ->prepare() means "this device appears
* to be runtime-suspended and its state is fine, so if it really is
@@ -1830,11 +2167,12 @@ unlock:
* will do the same thing with all of its descendants". This only
* applies to suspend transitions, however.
*/
- spin_lock_irq(&dev->power.lock);
dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
(ret > 0 || dev->power.no_pm_callbacks) &&
!dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
+
spin_unlock_irq(&dev->power.lock);
+
return 0;
}
@@ -1849,7 +2187,6 @@ int dpm_prepare(pm_message_t state)
int error = 0;
trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
- might_sleep();
/*
* Give a chance for the known devices to complete their probes, before
@@ -1916,8 +2253,10 @@ int dpm_suspend_start(pm_message_t state)
error = dpm_prepare(state);
if (error)
dpm_save_failed_step(SUSPEND_PREPARE);
- else
+ else {
+ pm_restrict_gfp_mask();
error = dpm_suspend(state);
+ }
dpm_show_time(starttime, state, error, "start");
return error;
@@ -1998,6 +2337,5 @@ void device_pm_check_callbacks(struct device *dev)
bool dev_pm_skip_suspend(struct device *dev)
{
- return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
- pm_runtime_status_suspended(dev);
+ return dev_pm_smart_suspend(dev) && pm_runtime_status_suspended(dev);
}
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index bd77f6734f14..ff393cba7649 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -137,6 +137,7 @@ s32 dev_pm_qos_read_value(struct device *dev, enum dev_pm_qos_req_type type)
return ret;
}
+EXPORT_SYMBOL_GPL(dev_pm_qos_read_value);
/**
* apply_constraint - Add/modify/remove device PM QoS request.
diff --git a/drivers/base/power/runtime-test.c b/drivers/base/power/runtime-test.c
new file mode 100644
index 000000000000..477feca804c7
--- /dev/null
+++ b/drivers/base/power/runtime-test.c
@@ -0,0 +1,253 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2025 Google, Inc.
+ */
+
+#include <linux/cleanup.h>
+#include <linux/pm_runtime.h>
+#include <kunit/device.h>
+#include <kunit/test.h>
+
+#define DEVICE_NAME "pm_runtime_test_device"
+
+static void pm_runtime_depth_test(struct kunit *test)
+{
+ struct device *dev = kunit_device_register(test, DEVICE_NAME);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+ pm_runtime_enable(dev);
+
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_get_sync(dev));
+ KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev));
+ KUNIT_EXPECT_EQ(test, 1, pm_runtime_get_sync(dev)); /* "already active" */
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_put_sync(dev));
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_put_sync(dev));
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+}
+
+/* Test pm_runtime_put() and friends when already suspended. */
+static void pm_runtime_already_suspended_test(struct kunit *test)
+{
+ struct device *dev = kunit_device_register(test, DEVICE_NAME);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+ pm_runtime_enable(dev);
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+
+ pm_runtime_get_noresume(dev);
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_barrier(dev)); /* no wakeup needed */
+ pm_runtime_put(dev);
+
+ pm_runtime_get_noresume(dev);
+ KUNIT_EXPECT_EQ(test, 1, pm_runtime_put_sync(dev));
+
+ KUNIT_EXPECT_EQ(test, 1, pm_runtime_suspend(dev));
+ KUNIT_EXPECT_EQ(test, 1, pm_runtime_autosuspend(dev));
+ KUNIT_EXPECT_EQ(test, 1, pm_request_autosuspend(dev));
+
+ pm_runtime_get_noresume(dev);
+ KUNIT_EXPECT_EQ(test, 1, pm_runtime_put_sync_autosuspend(dev));
+
+ pm_runtime_get_noresume(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ /* Grab 2 refcounts */
+ pm_runtime_get_noresume(dev);
+ pm_runtime_get_noresume(dev);
+ /* The first put() sees usage_count 1 */
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_put_sync_autosuspend(dev));
+ /* The second put() sees usage_count 0 but tells us "already suspended". */
+ KUNIT_EXPECT_EQ(test, 1, pm_runtime_put_sync_autosuspend(dev));
+
+ /* Should have remained suspended the whole time. */
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+}
+
+static void pm_runtime_idle_test(struct kunit *test)
+{
+ struct device *dev = kunit_device_register(test, DEVICE_NAME);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+ pm_runtime_enable(dev);
+
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_get_sync(dev));
+ KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev));
+ KUNIT_EXPECT_EQ(test, -EAGAIN, pm_runtime_idle(dev));
+ KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev));
+ pm_runtime_put_noidle(dev);
+ KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev));
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_idle(dev));
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+ KUNIT_EXPECT_EQ(test, -EAGAIN, pm_runtime_idle(dev));
+ KUNIT_EXPECT_EQ(test, -EAGAIN, pm_request_idle(dev));
+}
+
+static void pm_runtime_disabled_test(struct kunit *test)
+{
+ struct device *dev = kunit_device_register(test, DEVICE_NAME);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+ /* Never called pm_runtime_enable() */
+ KUNIT_EXPECT_FALSE(test, pm_runtime_enabled(dev));
+
+ /* "disabled" is treated as "active" */
+ KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev));
+ KUNIT_EXPECT_FALSE(test, pm_runtime_suspended(dev));
+
+ /*
+ * Note: these "fail", but they still acquire/release refcounts, so
+ * keep them balanced.
+ */
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_runtime_get(dev));
+ pm_runtime_put(dev);
+
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_runtime_get_sync(dev));
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_runtime_put_sync(dev));
+
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_runtime_get(dev));
+ pm_runtime_put_autosuspend(dev);
+
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_runtime_resume_and_get(dev));
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_runtime_idle(dev));
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_request_idle(dev));
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_request_resume(dev));
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_request_autosuspend(dev));
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_runtime_suspend(dev));
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_runtime_resume(dev));
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_runtime_autosuspend(dev));
+
+ /* Still disabled */
+ KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev));
+ KUNIT_EXPECT_FALSE(test, pm_runtime_enabled(dev));
+}
+
+static void pm_runtime_error_test(struct kunit *test)
+{
+ struct device *dev = kunit_device_register(test, DEVICE_NAME);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+ pm_runtime_enable(dev);
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+
+ /* Fake a .runtime_resume() error */
+ dev->power.runtime_error = -EIO;
+
+ /*
+ * Note: these "fail", but they still acquire/release refcounts, so
+ * keep them balanced.
+ */
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_get(dev));
+ pm_runtime_put(dev);
+
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_get_sync(dev));
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_put_sync(dev));
+
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_get(dev));
+ pm_runtime_put_autosuspend(dev);
+
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_get(dev));
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_put_sync_autosuspend(dev));
+
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_resume_and_get(dev));
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_idle(dev));
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_request_idle(dev));
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_request_resume(dev));
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_request_autosuspend(dev));
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_suspend(dev));
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_resume(dev));
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_autosuspend(dev));
+
+ /* Error is still pending */
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+ KUNIT_EXPECT_EQ(test, -EIO, dev->power.runtime_error);
+ /* Clear error */
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_set_suspended(dev));
+ KUNIT_EXPECT_EQ(test, 0, dev->power.runtime_error);
+ /* Still suspended */
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_get(dev));
+ KUNIT_EXPECT_EQ(test, 1, pm_runtime_barrier(dev)); /* resume was pending */
+ pm_runtime_put(dev);
+ pm_runtime_suspend(dev); /* flush the put(), to suspend */
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_get_sync(dev));
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_put_sync(dev));
+
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_get_sync(dev));
+ pm_runtime_put_autosuspend(dev);
+
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_resume_and_get(dev));
+
+ /*
+ * The following should all return -EAGAIN (usage is non-zero) or 1
+ * (already resumed).
+ */
+ KUNIT_EXPECT_EQ(test, -EAGAIN, pm_runtime_idle(dev));
+ KUNIT_EXPECT_EQ(test, -EAGAIN, pm_request_idle(dev));
+ KUNIT_EXPECT_EQ(test, 1, pm_request_resume(dev));
+ KUNIT_EXPECT_EQ(test, -EAGAIN, pm_request_autosuspend(dev));
+ KUNIT_EXPECT_EQ(test, -EAGAIN, pm_runtime_suspend(dev));
+ KUNIT_EXPECT_EQ(test, 1, pm_runtime_resume(dev));
+ KUNIT_EXPECT_EQ(test, -EAGAIN, pm_runtime_autosuspend(dev));
+
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_put_sync(dev));
+
+ /* Suspended again */
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+}
+
+/*
+ * Explore a typical probe() sequence in which a device marks itself powered,
+ * but doesn't hold any runtime PM reference, so it suspends as soon as it goes
+ * idle.
+ */
+static void pm_runtime_probe_active_test(struct kunit *test)
+{
+ struct device *dev = kunit_device_register(test, DEVICE_NAME);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+ KUNIT_EXPECT_TRUE(test, pm_runtime_status_suspended(dev));
+
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_set_active(dev));
+ KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev));
+
+ pm_runtime_enable(dev);
+ KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev));
+
+ /* Nothing to flush. We stay active. */
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_barrier(dev));
+ KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev));
+
+ /* Ask for idle? Now we suspend. */
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_idle(dev));
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+}
+
+static struct kunit_case pm_runtime_test_cases[] = {
+ KUNIT_CASE(pm_runtime_depth_test),
+ KUNIT_CASE(pm_runtime_already_suspended_test),
+ KUNIT_CASE(pm_runtime_idle_test),
+ KUNIT_CASE(pm_runtime_disabled_test),
+ KUNIT_CASE(pm_runtime_error_test),
+ KUNIT_CASE(pm_runtime_probe_active_test),
+ {}
+};
+
+static struct kunit_suite pm_runtime_test_suite = {
+ .name = "pm_runtime_test_cases",
+ .test_cases = pm_runtime_test_cases,
+};
+
+kunit_test_suite(pm_runtime_test_suite);
+MODULE_DESCRIPTION("Runtime power management unit test suite");
+MODULE_LICENSE("GPL");
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 2ee45841486b..1b11a3cd4acc 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -19,10 +19,24 @@
typedef int (*pm_callback_t)(struct device *);
+static inline pm_callback_t get_callback_ptr(const void *start, size_t offset)
+{
+ return *(pm_callback_t *)(start + offset);
+}
+
+static pm_callback_t __rpm_get_driver_callback(struct device *dev,
+ size_t cb_offset)
+{
+ if (dev->driver && dev->driver->pm)
+ return get_callback_ptr(dev->driver->pm, cb_offset);
+
+ return NULL;
+}
+
static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
{
- pm_callback_t cb;
const struct dev_pm_ops *ops;
+ pm_callback_t cb = NULL;
if (dev->pm_domain)
ops = &dev->pm_domain->ops;
@@ -36,12 +50,10 @@ static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
ops = NULL;
if (ops)
- cb = *(pm_callback_t *)((void *)ops + cb_offset);
- else
- cb = NULL;
+ cb = get_callback_ptr(ops, cb_offset);
- if (!cb && dev->driver && dev->driver->pm)
- cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset);
+ if (!cb)
+ cb = __rpm_get_driver_callback(dev, cb_offset);
return cb;
}
@@ -290,7 +302,7 @@ static int rpm_get_suppliers(struct device *dev)
device_links_read_lock_held()) {
int retval;
- if (!(link->flags & DL_FLAG_PM_RUNTIME))
+ if (!device_link_test(link, DL_FLAG_PM_RUNTIME))
continue;
retval = pm_runtime_get_sync(link->supplier);
@@ -448,8 +460,19 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev)
retval = __rpm_callback(cb, dev);
}
- dev->power.runtime_error = retval;
- return retval != -EACCES ? retval : -EIO;
+ /*
+ * Since -EACCES means that runtime PM is disabled for the given device,
+ * it should not be returned by runtime PM callbacks. If it is returned
+ * nevertheless, assume it to be a transient error and convert it to
+ * -EAGAIN.
+ */
+ if (retval == -EACCES)
+ retval = -EAGAIN;
+
+ if (retval != -EAGAIN && retval != -EBUSY)
+ dev->power.runtime_error = retval;
+
+ return retval;
}
/**
@@ -475,6 +498,9 @@ static int rpm_idle(struct device *dev, int rpmflags)
if (retval < 0)
; /* Conditions are wrong. */
+ else if ((rpmflags & RPM_GET_PUT) && retval == 1)
+ ; /* put() is allowed in RPM_SUSPENDED */
+
/* Idle notifications are allowed only in the RPM_ACTIVE state. */
else if (dev->power.runtime_status != RPM_ACTIVE)
retval = -EAGAIN;
@@ -725,21 +751,18 @@ static int rpm_suspend(struct device *dev, int rpmflags)
dev->power.deferred_resume = false;
wake_up_all(&dev->power.wait_queue);
- if (retval == -EAGAIN || retval == -EBUSY) {
- dev->power.runtime_error = 0;
+ /*
+ * On transient errors, if the callback routine failed an autosuspend,
+ * and if the last_busy time has been updated so that there is a new
+ * autosuspend expiration time, automatically reschedule another
+ * autosuspend.
+ */
+ if (!dev->power.runtime_error && (rpmflags & RPM_AUTO) &&
+ pm_runtime_autosuspend_expiration(dev) != 0)
+ goto repeat;
+
+ pm_runtime_cancel_pending(dev);
- /*
- * If the callback routine failed an autosuspend, and
- * if the last_busy time has been updated so that there
- * is a new autosuspend expiration time, automatically
- * reschedule another autosuspend.
- */
- if ((rpmflags & RPM_AUTO) &&
- pm_runtime_autosuspend_expiration(dev) != 0)
- goto repeat;
- } else {
- pm_runtime_cancel_pending(dev);
- }
goto out;
}
@@ -776,6 +799,8 @@ static int rpm_resume(struct device *dev, int rpmflags)
if (dev->power.runtime_status == RPM_ACTIVE &&
dev->power.last_status == RPM_ACTIVE)
retval = 1;
+ else if (rpmflags & RPM_TRANSPARENT)
+ goto out;
else
retval = -EACCES;
}
@@ -1003,7 +1028,7 @@ static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer)
* If 'expires' is after the current time, we've been called
* too early.
*/
- if (expires > 0 && expires < ktime_get_mono_fast_ns()) {
+ if (expires > 0 && expires <= ktime_get_mono_fast_ns()) {
dev->power.timer_expires = 0;
rpm_suspend(dev, dev->power.timer_autosuspends ?
(RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
@@ -1183,10 +1208,12 @@ EXPORT_SYMBOL_GPL(__pm_runtime_resume);
*
* Return -EINVAL if runtime PM is disabled for @dev.
*
- * Otherwise, if the runtime PM status of @dev is %RPM_ACTIVE and either
- * @ign_usage_count is %true or the runtime PM usage counter of @dev is not
- * zero, increment the usage counter of @dev and return 1. Otherwise, return 0
- * without changing the usage counter.
+ * Otherwise, if its runtime PM status is %RPM_ACTIVE and (1) @ign_usage_count
+ * is set, or (2) @dev is not ignoring children and its active child count is
+ * nonero, or (3) the runtime PM usage counter of @dev is not zero, increment
+ * the usage counter of @dev and return 1.
+ *
+ * Otherwise, return 0 without changing the usage counter.
*
* If @ign_usage_count is %true, this function can be used to prevent suspending
* the device when its runtime PM status is %RPM_ACTIVE.
@@ -1208,7 +1235,8 @@ static int pm_runtime_get_conditional(struct device *dev, bool ign_usage_count)
retval = -EINVAL;
} else if (dev->power.runtime_status != RPM_ACTIVE) {
retval = 0;
- } else if (ign_usage_count) {
+ } else if (ign_usage_count || (!dev->power.ignore_children &&
+ atomic_read(&dev->power.child_count) > 0)) {
retval = 1;
atomic_inc(&dev->power.usage_count);
} else {
@@ -1241,10 +1269,16 @@ EXPORT_SYMBOL_GPL(pm_runtime_get_if_active);
* @dev: Target device.
*
* Increment the runtime PM usage counter of @dev if its runtime PM status is
- * %RPM_ACTIVE and its runtime PM usage counter is greater than 0, in which case
- * it returns 1. If the device is in a different state or its usage_count is 0,
- * 0 is returned. -EINVAL is returned if runtime PM is disabled for the device,
- * in which case also the usage_count will remain unmodified.
+ * %RPM_ACTIVE and its runtime PM usage counter is greater than 0 or it is not
+ * ignoring children and its active child count is nonzero. 1 is returned in
+ * this case.
+ *
+ * If @dev is in a different state or it is not in use (that is, its usage
+ * counter is 0, or it is ignoring children, or its active child count is 0),
+ * 0 is returned.
+ *
+ * -EINVAL is returned if runtime PM is disabled for the device, in which case
+ * also the usage counter of @dev is not updated.
*/
int pm_runtime_get_if_in_use(struct device *dev)
{
@@ -1460,20 +1494,31 @@ int pm_runtime_barrier(struct device *dev)
}
EXPORT_SYMBOL_GPL(pm_runtime_barrier);
-/**
- * __pm_runtime_disable - Disable runtime PM of a device.
- * @dev: Device to handle.
- * @check_resume: If set, check if there's a resume request for the device.
- *
- * Increment power.disable_depth for the device and if it was zero previously,
- * cancel all pending runtime PM requests for the device and wait for all
- * operations in progress to complete. The device can be either active or
- * suspended after its runtime PM has been disabled.
- *
- * If @check_resume is set and there's a resume request pending when
- * __pm_runtime_disable() is called and power.disable_depth is zero, the
- * function will wake up the device before disabling its runtime PM.
- */
+bool pm_runtime_block_if_disabled(struct device *dev)
+{
+ bool ret;
+
+ spin_lock_irq(&dev->power.lock);
+
+ ret = !pm_runtime_enabled(dev);
+ if (ret && dev->power.last_status == RPM_INVALID)
+ dev->power.last_status = RPM_BLOCKED;
+
+ spin_unlock_irq(&dev->power.lock);
+
+ return ret;
+}
+
+void pm_runtime_unblock(struct device *dev)
+{
+ spin_lock_irq(&dev->power.lock);
+
+ if (dev->power.last_status == RPM_BLOCKED)
+ dev->power.last_status = RPM_INVALID;
+
+ spin_unlock_irq(&dev->power.lock);
+}
+
void __pm_runtime_disable(struct device *dev, bool check_resume)
{
spin_lock_irq(&dev->power.lock);
@@ -1532,6 +1577,10 @@ void pm_runtime_enable(struct device *dev)
if (--dev->power.disable_depth > 0)
goto out;
+ if (dev->power.last_status == RPM_BLOCKED) {
+ dev_warn(dev, "Attempt to enable runtime PM when it is blocked\n");
+ dump_stack();
+ }
dev->power.last_status = RPM_INVALID;
dev->power.accounting_timestamp = ktime_get_mono_fast_ns();
@@ -1545,6 +1594,32 @@ out:
}
EXPORT_SYMBOL_GPL(pm_runtime_enable);
+static void pm_runtime_set_suspended_action(void *data)
+{
+ pm_runtime_set_suspended(data);
+}
+
+/**
+ * devm_pm_runtime_set_active_enabled - set_active version of devm_pm_runtime_enable.
+ *
+ * @dev: Device to handle.
+ */
+int devm_pm_runtime_set_active_enabled(struct device *dev)
+{
+ int err;
+
+ err = pm_runtime_set_active(dev);
+ if (err)
+ return err;
+
+ err = devm_add_action_or_reset(dev, pm_runtime_set_suspended_action, dev);
+ if (err)
+ return err;
+
+ return devm_pm_runtime_enable(dev);
+}
+EXPORT_SYMBOL_GPL(devm_pm_runtime_set_active_enabled);
+
static void pm_runtime_disable_action(void *data)
{
pm_runtime_dont_use_autosuspend(data);
@@ -1567,6 +1642,24 @@ int devm_pm_runtime_enable(struct device *dev)
}
EXPORT_SYMBOL_GPL(devm_pm_runtime_enable);
+static void pm_runtime_put_noidle_action(void *data)
+{
+ pm_runtime_put_noidle(data);
+}
+
+/**
+ * devm_pm_runtime_get_noresume - devres-enabled version of pm_runtime_get_noresume.
+ *
+ * @dev: Device to handle.
+ */
+int devm_pm_runtime_get_noresume(struct device *dev)
+{
+ pm_runtime_get_noresume(dev);
+
+ return devm_add_action_or_reset(dev, pm_runtime_put_noidle_action, dev);
+}
+EXPORT_SYMBOL_GPL(devm_pm_runtime_get_noresume);
+
/**
* pm_runtime_forbid - Block runtime PM of a device.
* @dev: Device to handle.
@@ -1760,12 +1853,12 @@ void pm_runtime_init(struct device *dev)
dev->power.request_pending = false;
dev->power.request = RPM_REQ_NONE;
dev->power.deferred_resume = false;
- dev->power.needs_force_resume = 0;
+ dev->power.needs_force_resume = false;
INIT_WORK(&dev->power.work, pm_runtime_work);
dev->power.timer_expires = 0;
- hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- dev->power.suspend_timer.function = pm_suspend_timer_fn;
+ hrtimer_setup(&dev->power.suspend_timer, pm_suspend_timer_fn, CLOCK_MONOTONIC,
+ HRTIMER_MODE_ABS);
init_waitqueue_head(&dev->power.wait_queue);
}
@@ -1787,6 +1880,11 @@ void pm_runtime_reinit(struct device *dev)
pm_runtime_put(dev->parent);
}
}
+ /*
+ * Clear power.needs_force_resume in case it has been set by
+ * pm_runtime_force_suspend() invoked from a driver remove callback.
+ */
+ dev->power.needs_force_resume = false;
}
/**
@@ -1810,9 +1908,8 @@ void pm_runtime_get_suppliers(struct device *dev)
idx = device_links_read_lock();
- list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
- device_links_read_lock_held())
- if (link->flags & DL_FLAG_PM_RUNTIME) {
+ dev_for_each_link_to_supplier(link, dev)
+ if (device_link_test(link, DL_FLAG_PM_RUNTIME)) {
link->supplier_preactivated = true;
pm_runtime_get_sync(link->supplier);
}
@@ -1866,7 +1963,7 @@ static void pm_runtime_drop_link_count(struct device *dev)
*/
void pm_runtime_drop_link(struct device_link *link)
{
- if (!(link->flags & DL_FLAG_PM_RUNTIME))
+ if (!device_link_test(link, DL_FLAG_PM_RUNTIME))
return;
pm_runtime_drop_link_count(link->consumer);
@@ -1874,13 +1971,23 @@ void pm_runtime_drop_link(struct device_link *link)
pm_request_idle(link->supplier);
}
-static bool pm_runtime_need_not_resume(struct device *dev)
+static pm_callback_t get_callback(struct device *dev, size_t cb_offset)
{
- return atomic_read(&dev->power.usage_count) <= 1 &&
- (atomic_read(&dev->power.child_count) == 0 ||
- dev->power.ignore_children);
+ /*
+ * Setting power.strict_midlayer means that the middle layer
+ * code does not want its runtime PM callbacks to be invoked via
+ * pm_runtime_force_suspend() and pm_runtime_force_resume(), so
+ * return a direct pointer to the driver callback in that case.
+ */
+ if (dev_pm_strict_midlayer_is_set(dev))
+ return __rpm_get_driver_callback(dev, cb_offset);
+
+ return __rpm_get_callback(dev, cb_offset);
}
+#define GET_CALLBACK(dev, callback) \
+ get_callback(dev, offsetof(struct dev_pm_ops, callback))
+
/**
* pm_runtime_force_suspend - Force a device into suspend state if needed.
* @dev: Device to suspend.
@@ -1897,10 +2004,6 @@ static bool pm_runtime_need_not_resume(struct device *dev)
* sure the device is put into low power state and it should only be used during
* system-wide PM transitions to sleep states. It assumes that the analogous
* pm_runtime_force_resume() will be used to resume the device.
- *
- * Do not use with DPM_FLAG_SMART_SUSPEND as this can lead to an inconsistent
- * state where this function has called the ->runtime_suspend callback but the
- * PM core marks the driver as runtime active.
*/
int pm_runtime_force_suspend(struct device *dev)
{
@@ -1908,10 +2011,10 @@ int pm_runtime_force_suspend(struct device *dev)
int ret;
pm_runtime_disable(dev);
- if (pm_runtime_status_suspended(dev))
+ if (pm_runtime_status_suspended(dev) || dev->power.needs_force_resume)
return 0;
- callback = RPM_GET_CALLBACK(dev, runtime_suspend);
+ callback = GET_CALLBACK(dev, runtime_suspend);
dev_pm_enable_wake_irq_check(dev, true);
ret = callback ? callback(dev) : 0;
@@ -1923,15 +2026,16 @@ int pm_runtime_force_suspend(struct device *dev)
/*
* If the device can stay in suspend after the system-wide transition
* to the working state that will follow, drop the children counter of
- * its parent, but set its status to RPM_SUSPENDED anyway in case this
- * function will be called again for it in the meantime.
+ * its parent and the usage counters of its suppliers. Otherwise, set
+ * power.needs_force_resume to let pm_runtime_force_resume() know that
+ * the device needs to be taken care of and to prevent this function
+ * from handling the device again in case the device is passed to it
+ * once more subsequently.
*/
- if (pm_runtime_need_not_resume(dev)) {
+ if (pm_runtime_need_not_resume(dev))
pm_runtime_set_suspended(dev);
- } else {
- __update_runtime_status(dev, RPM_SUSPENDED);
- dev->power.needs_force_resume = 1;
- }
+ else
+ dev->power.needs_force_resume = true;
return 0;
@@ -1942,33 +2046,37 @@ err:
}
EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
+#ifdef CONFIG_PM_SLEEP
+
/**
* pm_runtime_force_resume - Force a device into resume state if needed.
* @dev: Device to resume.
*
- * Prior invoking this function we expect the user to have brought the device
- * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
- * those actions and bring the device into full power, if it is expected to be
- * used on system resume. In the other case, we defer the resume to be managed
- * via runtime PM.
+ * This function expects that either pm_runtime_force_suspend() has put the
+ * device into a low-power state prior to calling it, or the device had been
+ * runtime-suspended before the preceding system-wide suspend transition and it
+ * was left in suspend during that transition.
+ *
+ * The actions carried out by pm_runtime_force_suspend(), or by a runtime
+ * suspend in general, are reversed and the device is brought back into full
+ * power if it is expected to be used on system resume, which is the case when
+ * its needs_force_resume flag is set or when its smart_suspend flag is set and
+ * its runtime PM status is "active".
*
- * Typically this function may be invoked from a system resume callback.
+ * In other cases, the resume is deferred to be managed via runtime PM.
+ *
+ * Typically, this function may be invoked from a system resume callback.
*/
int pm_runtime_force_resume(struct device *dev)
{
int (*callback)(struct device *);
int ret = 0;
- if (!pm_runtime_status_suspended(dev) || !dev->power.needs_force_resume)
+ if (!dev->power.needs_force_resume && (!dev_pm_smart_suspend(dev) ||
+ pm_runtime_status_suspended(dev)))
goto out;
- /*
- * The value of the parent's children counter is correct already, so
- * just update the status of the device.
- */
- __update_runtime_status(dev, RPM_ACTIVE);
-
- callback = RPM_GET_CALLBACK(dev, runtime_resume);
+ callback = GET_CALLBACK(dev, runtime_resume);
dev_pm_disable_wake_irq_check(dev, false);
ret = callback ? callback(dev) : 0;
@@ -1979,9 +2087,30 @@ int pm_runtime_force_resume(struct device *dev)
}
pm_runtime_mark_last_busy(dev);
+
out:
- dev->power.needs_force_resume = 0;
+ /*
+ * The smart_suspend flag can be cleared here because it is not going
+ * to be necessary until the next system-wide suspend transition that
+ * will update it again.
+ */
+ dev->power.smart_suspend = false;
+ /*
+ * Also clear needs_force_resume to make this function skip devices that
+ * have been seen by it once.
+ */
+ dev->power.needs_force_resume = false;
+
pm_runtime_enable(dev);
return ret;
}
EXPORT_SYMBOL_GPL(pm_runtime_force_resume);
+
+bool pm_runtime_need_not_resume(struct device *dev)
+{
+ return atomic_read(&dev->power.usage_count) <= 1 &&
+ (atomic_read(&dev->power.child_count) == 0 ||
+ dev->power.ignore_children);
+}
+
+#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index a1474fb67db9..13b31a3adc77 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -6,7 +6,6 @@
#include <linux/export.h>
#include <linux/pm_qos.h>
#include <linux/pm_runtime.h>
-#include <linux/pm_wakeup.h>
#include <linux/atomic.h>
#include <linux/jiffies.h>
#include "power.h"
@@ -509,14 +508,6 @@ static ssize_t wakeup_last_time_ms_show(struct device *dev,
return sysfs_emit(buf, "%lld\n", msec);
}
-static inline int dpm_sysfs_wakeup_change_owner(struct device *dev, kuid_t kuid,
- kgid_t kgid)
-{
- if (dev->power.wakeup && dev->power.wakeup->dev)
- return device_change_owner(dev->power.wakeup->dev, kuid, kgid);
- return 0;
-}
-
static DEVICE_ATTR_RO(wakeup_last_time_ms);
#ifdef CONFIG_PM_AUTOSLEEP
@@ -541,6 +532,15 @@ static ssize_t wakeup_prevent_sleep_time_ms_show(struct device *dev,
static DEVICE_ATTR_RO(wakeup_prevent_sleep_time_ms);
#endif /* CONFIG_PM_AUTOSLEEP */
+
+static inline int dpm_sysfs_wakeup_change_owner(struct device *dev, kuid_t kuid,
+ kgid_t kgid)
+{
+ if (dev->power.wakeup && dev->power.wakeup->dev)
+ return device_change_owner(dev->power.wakeup->dev, kuid, kgid);
+ return 0;
+}
+
#else /* CONFIG_PM_SLEEP */
static inline int dpm_sysfs_wakeup_change_owner(struct device *dev, kuid_t kuid,
kgid_t kgid)
@@ -611,15 +611,9 @@ static DEVICE_ATTR_RW(async);
#endif /* CONFIG_PM_ADVANCED_DEBUG */
static struct attribute *power_attrs[] = {
-#ifdef CONFIG_PM_ADVANCED_DEBUG
-#ifdef CONFIG_PM_SLEEP
+#if defined(CONFIG_PM_ADVANCED_DEBUG) && defined(CONFIG_PM_SLEEP)
&dev_attr_async.attr,
#endif
- &dev_attr_runtime_status.attr,
- &dev_attr_runtime_usage.attr,
- &dev_attr_runtime_active_kids.attr,
- &dev_attr_runtime_enabled.attr,
-#endif /* CONFIG_PM_ADVANCED_DEBUG */
NULL,
};
static const struct attribute_group pm_attr_group = {
@@ -650,13 +644,16 @@ static const struct attribute_group pm_wakeup_attr_group = {
};
static struct attribute *runtime_attrs[] = {
-#ifndef CONFIG_PM_ADVANCED_DEBUG
&dev_attr_runtime_status.attr,
-#endif
&dev_attr_control.attr,
&dev_attr_runtime_suspended_time.attr,
&dev_attr_runtime_active_time.attr,
&dev_attr_autosuspend_delay_ms.attr,
+#ifdef CONFIG_PM_ADVANCED_DEBUG
+ &dev_attr_runtime_usage.attr,
+ &dev_attr_runtime_active_kids.attr,
+ &dev_attr_runtime_enabled.attr,
+#endif
NULL,
};
static const struct attribute_group pm_runtime_attr_group = {
diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
index 5a5a9e978e85..8aa28c08b289 100644
--- a/drivers/base/power/wakeirq.c
+++ b/drivers/base/power/wakeirq.c
@@ -103,6 +103,32 @@ void dev_pm_clear_wake_irq(struct device *dev)
}
EXPORT_SYMBOL_GPL(dev_pm_clear_wake_irq);
+static void devm_pm_clear_wake_irq(void *dev)
+{
+ dev_pm_clear_wake_irq(dev);
+}
+
+/**
+ * devm_pm_set_wake_irq - device-managed variant of dev_pm_set_wake_irq
+ * @dev: Device entry
+ * @irq: Device IO interrupt
+ *
+ *
+ * Attach a device IO interrupt as a wake IRQ, same with dev_pm_set_wake_irq,
+ * but the device will be auto clear wake capability on driver detach.
+ */
+int devm_pm_set_wake_irq(struct device *dev, int irq)
+{
+ int ret;
+
+ ret = dev_pm_set_wake_irq(dev, irq);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dev, devm_pm_clear_wake_irq, dev);
+}
+EXPORT_SYMBOL_GPL(devm_pm_set_wake_irq);
+
/**
* handle_threaded_wake_irq - Handler for dedicated wake-up interrupts
* @irq: Device specific dedicated wake-up interrupt
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 752b417e8129..d1283ff1080b 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -77,7 +77,7 @@ static DEFINE_IDA(wakeup_ida);
* wakeup_source_create - Create a struct wakeup_source object.
* @name: Name of the new wakeup source.
*/
-struct wakeup_source *wakeup_source_create(const char *name)
+static struct wakeup_source *wakeup_source_create(const char *name)
{
struct wakeup_source *ws;
const char *ws_name;
@@ -106,7 +106,6 @@ err_name:
err_ws:
return NULL;
}
-EXPORT_SYMBOL_GPL(wakeup_source_create);
/*
* Record wakeup_source statistics being deleted into a dummy wakeup_source.
@@ -149,7 +148,7 @@ static void wakeup_source_free(struct wakeup_source *ws)
*
* Use only for wakeup source objects created with wakeup_source_create().
*/
-void wakeup_source_destroy(struct wakeup_source *ws)
+static void wakeup_source_destroy(struct wakeup_source *ws)
{
if (!ws)
return;
@@ -158,13 +157,12 @@ void wakeup_source_destroy(struct wakeup_source *ws)
wakeup_source_record(ws);
wakeup_source_free(ws);
}
-EXPORT_SYMBOL_GPL(wakeup_source_destroy);
/**
* wakeup_source_add - Add given object to the list of wakeup sources.
* @ws: Wakeup source object to add to the list.
*/
-void wakeup_source_add(struct wakeup_source *ws)
+static void wakeup_source_add(struct wakeup_source *ws)
{
unsigned long flags;
@@ -179,13 +177,12 @@ void wakeup_source_add(struct wakeup_source *ws)
list_add_rcu(&ws->entry, &wakeup_sources);
raw_spin_unlock_irqrestore(&events_lock, flags);
}
-EXPORT_SYMBOL_GPL(wakeup_source_add);
/**
* wakeup_source_remove - Remove given object from the wakeup sources list.
* @ws: Wakeup source object to remove from the list.
*/
-void wakeup_source_remove(struct wakeup_source *ws)
+static void wakeup_source_remove(struct wakeup_source *ws)
{
unsigned long flags;
@@ -197,14 +194,13 @@ void wakeup_source_remove(struct wakeup_source *ws)
raw_spin_unlock_irqrestore(&events_lock, flags);
synchronize_srcu(&wakeup_srcu);
- del_timer_sync(&ws->timer);
+ timer_delete_sync(&ws->timer);
/*
* Clear timer.function to make wakeup_source_not_registered() treat
* this wakeup source as not registered.
*/
ws->timer.function = NULL;
}
-EXPORT_SYMBOL_GPL(wakeup_source_remove);
/**
* wakeup_source_register - Create wakeup source and add it to the list.
@@ -337,7 +333,7 @@ int device_wakeup_enable(struct device *dev)
if (!dev || !dev->power.can_wakeup)
return -EINVAL;
- if (pm_suspend_target_state != PM_SUSPEND_ON)
+ if (pm_sleep_transition_in_progress())
dev_dbg(dev, "Suspicious %s() during system transition!\n", __func__);
ws = wakeup_source_register(dev, dev_name(dev));
@@ -613,7 +609,7 @@ void __pm_stay_awake(struct wakeup_source *ws)
spin_lock_irqsave(&ws->lock, flags);
wakeup_source_report_event(ws, false);
- del_timer(&ws->timer);
+ timer_delete(&ws->timer);
ws->timer_expires = 0;
spin_unlock_irqrestore(&ws->lock, flags);
@@ -693,7 +689,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
ws->max_time = duration;
ws->last_time = now;
- del_timer(&ws->timer);
+ timer_delete(&ws->timer);
ws->timer_expires = 0;
if (ws->autosleep_enabled)
@@ -763,7 +759,7 @@ EXPORT_SYMBOL_GPL(pm_relax);
*/
static void pm_wakeup_timer_fn(struct timer_list *t)
{
- struct wakeup_source *ws = from_timer(ws, t, timer);
+ struct wakeup_source *ws = timer_container_of(ws, t, timer);
unsigned long flags;
spin_lock_irqsave(&ws->lock, flags);
diff --git a/drivers/base/power/wakeup_stats.c b/drivers/base/power/wakeup_stats.c
index 6732ed2869f9..3ffd427248e8 100644
--- a/drivers/base/power/wakeup_stats.c
+++ b/drivers/base/power/wakeup_stats.c
@@ -34,6 +34,7 @@ wakeup_attr(active_count);
wakeup_attr(event_count);
wakeup_attr(wakeup_count);
wakeup_attr(expire_count);
+wakeup_attr(relax_count);
static ssize_t active_time_ms_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -119,6 +120,7 @@ static struct attribute *wakeup_source_attrs[] = {
&dev_attr_event_count.attr,
&dev_attr_wakeup_count.attr,
&dev_attr_expire_count.attr,
+ &dev_attr_relax_count.attr,
&dev_attr_active_time_ms.attr,
&dev_attr_total_time_ms.attr,
&dev_attr_max_time_ms.attr,
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 837d77e3af2b..6a63860579dd 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -71,6 +71,44 @@ bool fwnode_property_present(const struct fwnode_handle *fwnode,
EXPORT_SYMBOL_GPL(fwnode_property_present);
/**
+ * device_property_read_bool - Return the value for a boolean property of a device
+ * @dev: Device whose property is being checked
+ * @propname: Name of the property
+ *
+ * Return if property @propname is true or false in the device firmware description.
+ *
+ * Return: true if property @propname is present. Otherwise, returns false.
+ */
+bool device_property_read_bool(const struct device *dev, const char *propname)
+{
+ return fwnode_property_read_bool(dev_fwnode(dev), propname);
+}
+EXPORT_SYMBOL_GPL(device_property_read_bool);
+
+/**
+ * fwnode_property_read_bool - Return the value for a boolean property of a firmware node
+ * @fwnode: Firmware node whose property to check
+ * @propname: Name of the property
+ *
+ * Return if property @propname is true or false in the firmware description.
+ */
+bool fwnode_property_read_bool(const struct fwnode_handle *fwnode,
+ const char *propname)
+{
+ bool ret;
+
+ if (IS_ERR_OR_NULL(fwnode))
+ return false;
+
+ ret = fwnode_call_bool_op(fwnode, property_read_bool, propname);
+ if (ret)
+ return ret;
+
+ return fwnode_call_bool_op(fwnode->secondary, property_read_bool, propname);
+}
+EXPORT_SYMBOL_GPL(fwnode_property_read_bool);
+
+/**
* device_property_read_u8_array - return a u8 array property of a device
* @dev: Device to get the property of
* @propname: Name of the property
@@ -540,7 +578,7 @@ EXPORT_SYMBOL_GPL(fwnode_property_match_property_string);
* @prop: The name of the property
* @nargs_prop: The name of the property telling the number of
* arguments in the referred node. NULL if @nargs is known,
- * otherwise @nargs is ignored. Only relevant on OF.
+ * otherwise @nargs is ignored.
* @nargs: Number of arguments. Ignored if @nargs_prop is non-NULL.
* @index: Index of the reference, from zero onwards.
* @args: Result structure with reference and integer arguments.
@@ -890,22 +928,49 @@ bool fwnode_device_is_available(const struct fwnode_handle *fwnode)
EXPORT_SYMBOL_GPL(fwnode_device_is_available);
/**
- * device_get_child_node_count - return the number of child nodes for device
- * @dev: Device to count the child nodes for
+ * fwnode_get_child_node_count - return the number of child nodes for a given firmware node
+ * @fwnode: Pointer to the parent firmware node
*
- * Return: the number of child nodes for a given device.
+ * Return: the number of child nodes for a given firmware node.
+ */
+unsigned int fwnode_get_child_node_count(const struct fwnode_handle *fwnode)
+{
+ struct fwnode_handle *child;
+ unsigned int count = 0;
+
+ fwnode_for_each_child_node(fwnode, child)
+ count++;
+
+ return count;
+}
+EXPORT_SYMBOL_GPL(fwnode_get_child_node_count);
+
+/**
+ * fwnode_get_named_child_node_count - number of child nodes with given name
+ * @fwnode: Node which child nodes are counted.
+ * @name: String to match child node name against.
+ *
+ * Scan child nodes and count all the nodes with a specific name. Potential
+ * 'number' -ending after the 'at sign' for scanned names is ignored.
+ * E.g.::
+ * fwnode_get_named_child_node_count(fwnode, "channel");
+ * would match all the nodes::
+ * channel { }, channel@0 {}, channel@0xabba {}...
+ *
+ * Return: the number of child nodes with a matching name for a given device.
*/
-unsigned int device_get_child_node_count(const struct device *dev)
+unsigned int fwnode_get_named_child_node_count(const struct fwnode_handle *fwnode,
+ const char *name)
{
struct fwnode_handle *child;
unsigned int count = 0;
- device_for_each_child_node(dev, child)
+ fwnode_for_each_named_child_node(fwnode, child, name)
count++;
return count;
}
-EXPORT_SYMBOL_GPL(device_get_child_node_count);
+EXPORT_SYMBOL_GPL(fwnode_get_named_child_node_count);
bool device_dma_supported(const struct device *dev)
{
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
index b1affac70d5d..ffb2ef488298 100644
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -6,8 +6,6 @@
config REGMAP
bool
default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_W1 || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ || REGMAP_SOUNDWIRE || REGMAP_SOUNDWIRE_MBQ || REGMAP_SCCB || REGMAP_I3C || REGMAP_SPI_AVMM || REGMAP_MDIO || REGMAP_FSI)
- select IRQ_DOMAIN if REGMAP_IRQ
- select MDIO_BUS if REGMAP_MDIO
help
Enable support for the Register Map (regmap) access API.
@@ -58,12 +56,14 @@ config REGMAP_W1
config REGMAP_MDIO
tristate
+ select MDIO_BUS
config REGMAP_MMIO
tristate
config REGMAP_IRQ
bool
+ select IRQ_DOMAIN
config REGMAP_RAM
tristate
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index 83acccdc1008..6f31240ee4a9 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -59,6 +59,7 @@ struct regmap {
unsigned long raw_spinlock_flags;
};
};
+ struct lock_class_key *lock_key;
regmap_lock lock;
regmap_unlock unlock;
void *lock_arg; /* This is passed to lock/unlock functions */
@@ -72,12 +73,12 @@ struct regmap {
void *bus_context;
const char *name;
- bool async;
spinlock_t async_lock;
wait_queue_head_t async_waitq;
struct list_head async_list;
struct list_head async_free;
int async_ret;
+ bool async;
#ifdef CONFIG_DEBUG_FS
bool debugfs_disable;
@@ -116,8 +117,6 @@ struct regmap {
void *val_buf, size_t val_size);
int (*write)(void *context, const void *data, size_t count);
- bool defer_caching;
-
unsigned long read_flag_mask;
unsigned long write_flag_mask;
@@ -126,6 +125,8 @@ struct regmap {
int reg_stride;
int reg_stride_order;
+ bool defer_caching;
+
/* If set, will always write field to HW. */
bool force_write_field;
@@ -160,6 +161,9 @@ struct regmap {
struct reg_sequence *patch;
int patch_regs;
+ /* if set, the regmap core can sleep */
+ bool can_sleep;
+
/* if set, converts bulk read to single read */
bool use_single_read;
/* if set, converts bulk write to single write */
@@ -175,9 +179,6 @@ struct regmap {
void *selector_work_buf; /* Scratch buffer used for selector */
struct hwspinlock *hwlock;
-
- /* if set, the regmap core can sleep */
- bool can_sleep;
};
struct regcache_ops {
diff --git a/drivers/base/regmap/regcache-maple.c b/drivers/base/regmap/regcache-maple.c
index 8d27d3653ea3..2319c30283a6 100644
--- a/drivers/base/regmap/regcache-maple.c
+++ b/drivers/base/regmap/regcache-maple.c
@@ -73,8 +73,7 @@ static int regcache_maple_write(struct regmap *map, unsigned int reg,
rcu_read_unlock();
- entry = kmalloc((last - index + 1) * sizeof(unsigned long),
- map->alloc_flags);
+ entry = kmalloc_array(last - index + 1, sizeof(*entry), map->alloc_flags);
if (!entry)
return -ENOMEM;
@@ -204,7 +203,7 @@ static int regcache_maple_sync_block(struct regmap *map, unsigned long *entry,
* overheads.
*/
if (max - min > 1 && regmap_can_raw_write(map)) {
- buf = kmalloc(val_bytes * (max - min), map->alloc_flags);
+ buf = kmalloc_array(max - min, val_bytes, map->alloc_flags);
if (!buf) {
ret = -ENOMEM;
goto out;
@@ -320,7 +319,7 @@ static int regcache_maple_insert_block(struct regmap *map, int first,
unsigned long *entry;
int i, ret;
- entry = kcalloc(last - first + 1, sizeof(unsigned long), map->alloc_flags);
+ entry = kmalloc_array(last - first + 1, sizeof(*entry), map->alloc_flags);
if (!entry)
return -ENOMEM;
@@ -355,6 +354,9 @@ static int regcache_maple_init(struct regmap *map)
mt_init(mt);
+ if (!mt_external_lock(mt) && map->lock_key)
+ lockdep_set_class_and_subclass(&mt->ma_lock, map->lock_key, 1);
+
if (!map->num_reg_defaults)
return 0;
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index 188438186589..a9d17f316e55 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -275,18 +275,16 @@ static int regcache_rbtree_insert_to_block(struct regmap *map,
pos = (reg - base_reg) / map->reg_stride;
offset = (rbnode->base_reg - base_reg) / map->reg_stride;
- blk = krealloc(rbnode->block,
- blklen * map->cache_word_size,
- map->alloc_flags);
+ blk = krealloc_array(rbnode->block, blklen, map->cache_word_size, map->alloc_flags);
if (!blk)
return -ENOMEM;
rbnode->block = blk;
if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) {
- present = krealloc(rbnode->cache_present,
- BITS_TO_LONGS(blklen) * sizeof(*present),
- map->alloc_flags);
+ present = krealloc_array(rbnode->cache_present,
+ BITS_TO_LONGS(blklen), sizeof(*present),
+ map->alloc_flags);
if (!present)
return -ENOMEM;
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index d3659ba3cc11..c7650fa434ad 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -21,6 +21,26 @@ static const struct regcache_ops *cache_types[] = {
&regcache_flat_ops,
};
+static int regcache_defaults_cmp(const void *a, const void *b)
+{
+ const struct reg_default *x = a;
+ const struct reg_default *y = b;
+
+ if (x->reg > y->reg)
+ return 1;
+ else if (x->reg < y->reg)
+ return -1;
+ else
+ return 0;
+}
+
+void regcache_sort_defaults(struct reg_default *defaults, unsigned int ndefaults)
+{
+ sort(defaults, ndefaults, sizeof(*defaults),
+ regcache_defaults_cmp, NULL);
+}
+EXPORT_SYMBOL_GPL(regcache_sort_defaults);
+
static int regcache_hw_init(struct regmap *map)
{
int i, j;
@@ -154,7 +174,7 @@ int regcache_init(struct regmap *map, const struct regmap_config *config)
map->num_reg_defaults = config->num_reg_defaults;
map->num_reg_defaults_raw = config->num_reg_defaults_raw;
map->reg_defaults_raw = config->reg_defaults_raw;
- map->cache_word_size = DIV_ROUND_UP(config->val_bits, 8);
+ map->cache_word_size = BITS_TO_BYTES(config->val_bits);
map->cache_size_raw = map->cache_word_size * config->num_reg_defaults_raw;
map->cache = NULL;
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index fb84cda92a75..c9b4c04b1cf6 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -470,10 +470,6 @@ static ssize_t regmap_cache_only_write_file(struct file *file,
if (err)
return count;
- err = debugfs_file_get(file->f_path.dentry);
- if (err)
- return err;
-
map->lock(map->lock_arg);
if (new_val && !map->cache_only) {
@@ -486,7 +482,6 @@ static ssize_t regmap_cache_only_write_file(struct file *file,
map->cache_only = new_val;
map->unlock(map->lock_arg);
- debugfs_file_put(file->f_path.dentry);
if (require_sync) {
err = regcache_sync(map);
@@ -517,10 +512,6 @@ static ssize_t regmap_cache_bypass_write_file(struct file *file,
if (err)
return count;
- err = debugfs_file_get(file->f_path.dentry);
- if (err)
- return err;
-
map->lock(map->lock_arg);
if (new_val && !map->cache_bypass) {
@@ -532,7 +523,6 @@ static ssize_t regmap_cache_bypass_write_file(struct file *file,
map->cache_bypass = new_val;
map->unlock(map->lock_arg);
- debugfs_file_put(file->f_path.dentry);
return count;
}
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index a750e48a26b8..6112d942499b 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -6,11 +6,13 @@
//
// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+#include <linux/array_size.h>
#include <linux/device.h>
#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
+#include <linux/overflow.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/slab.h>
@@ -19,6 +21,7 @@
struct regmap_irq_chip_data {
struct mutex lock;
+ struct lock_class_key lock_key;
struct irq_chip irq_chip;
struct regmap *map;
@@ -33,6 +36,7 @@ struct regmap_irq_chip_data {
void *status_reg_buf;
unsigned int *main_status_buf;
unsigned int *status_buf;
+ unsigned int *prev_status_buf;
unsigned int *mask_buf;
unsigned int *mask_buf_def;
unsigned int *wake_buf;
@@ -193,10 +197,10 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
/* If we've changed our wakeup count propagate it to the parent */
if (d->wake_count < 0)
for (i = d->wake_count; i < 0; i++)
- irq_set_irq_wake(d->irq, 0);
+ disable_irq_wake(d->irq);
else if (d->wake_count > 0)
for (i = 0; i < d->wake_count; i++)
- irq_set_irq_wake(d->irq, 1);
+ enable_irq_wake(d->irq);
d->wake_count = 0;
@@ -332,27 +336,13 @@ static inline int read_sub_irq_data(struct regmap_irq_chip_data *data,
return ret;
}
-static irqreturn_t regmap_irq_thread(int irq, void *d)
+static int read_irq_data(struct regmap_irq_chip_data *data)
{
- struct regmap_irq_chip_data *data = d;
const struct regmap_irq_chip *chip = data->chip;
struct regmap *map = data->map;
int ret, i;
- bool handled = false;
u32 reg;
- if (chip->handle_pre_irq)
- chip->handle_pre_irq(chip->irq_drv_data);
-
- if (chip->runtime_pm) {
- ret = pm_runtime_get_sync(map->dev);
- if (ret < 0) {
- dev_err(map->dev, "IRQ thread failed to resume: %d\n",
- ret);
- goto exit;
- }
- }
-
/*
* Read only registers with active IRQs if the chip has 'main status
* register'. Else read in the statuses, using a single bulk read if
@@ -364,14 +354,11 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
memset32(data->status_buf, GENMASK(31, 0), chip->num_regs);
} else if (chip->num_main_regs) {
unsigned int max_main_bits;
- unsigned long size;
-
- size = chip->num_regs * sizeof(unsigned int);
max_main_bits = (chip->num_main_status_bits) ?
chip->num_main_status_bits : chip->num_regs;
/* Clear the status buf as we don't read all status regs */
- memset(data->status_buf, 0, size);
+ memset32(data->status_buf, 0, chip->num_regs);
/* We could support bulk read for main status registers
* but I don't expect to see devices with really many main
@@ -382,10 +369,8 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
reg = data->get_irq_reg(data, chip->main_status, i);
ret = regmap_read(map, reg, &data->main_status_buf[i]);
if (ret) {
- dev_err(map->dev,
- "Failed to read IRQ status %d\n",
- ret);
- goto exit;
+ dev_err(map->dev, "Failed to read IRQ status %d\n", ret);
+ return ret;
}
}
@@ -401,10 +386,8 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
ret = read_sub_irq_data(data, b);
if (ret != 0) {
- dev_err(map->dev,
- "Failed to read IRQ status %d\n",
- ret);
- goto exit;
+ dev_err(map->dev, "Failed to read IRQ status %d\n", ret);
+ return ret;
}
}
@@ -421,9 +404,8 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
data->status_reg_buf,
chip->num_regs);
if (ret != 0) {
- dev_err(map->dev, "Failed to read IRQ status: %d\n",
- ret);
- goto exit;
+ dev_err(map->dev, "Failed to read IRQ status: %d\n", ret);
+ return ret;
}
for (i = 0; i < data->chip->num_regs; i++) {
@@ -439,7 +421,7 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
break;
default:
BUG();
- goto exit;
+ return -EIO;
}
}
@@ -450,10 +432,8 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
ret = regmap_read(map, reg, &data->status_buf[i]);
if (ret != 0) {
- dev_err(map->dev,
- "Failed to read IRQ status: %d\n",
- ret);
- goto exit;
+ dev_err(map->dev, "Failed to read IRQ status: %d\n", ret);
+ return ret;
}
}
}
@@ -462,6 +442,42 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
for (i = 0; i < data->chip->num_regs; i++)
data->status_buf[i] = ~data->status_buf[i];
+ return 0;
+}
+
+static irqreturn_t regmap_irq_thread(int irq, void *d)
+{
+ struct regmap_irq_chip_data *data = d;
+ const struct regmap_irq_chip *chip = data->chip;
+ struct regmap *map = data->map;
+ int ret, i;
+ bool handled = false;
+ u32 reg;
+
+ if (chip->handle_pre_irq)
+ chip->handle_pre_irq(chip->irq_drv_data);
+
+ if (chip->runtime_pm) {
+ ret = pm_runtime_get_sync(map->dev);
+ if (ret < 0) {
+ dev_err(map->dev, "IRQ thread failed to resume: %d\n", ret);
+ goto exit;
+ }
+ }
+
+ ret = read_irq_data(data);
+ if (ret < 0)
+ goto exit;
+
+ if (chip->status_is_level) {
+ for (i = 0; i < data->chip->num_regs; i++) {
+ unsigned int val = data->status_buf[i];
+
+ data->status_buf[i] ^= data->prev_status_buf[i];
+ data->prev_status_buf[i] = val;
+ }
+ }
+
/*
* Ignore masked IRQs and ack if we need to; we ack early so
* there is no race between handling and acknowledging the
@@ -514,12 +530,16 @@ exit:
return IRQ_NONE;
}
+static struct lock_class_key regmap_irq_lock_class;
+static struct lock_class_key regmap_irq_request_class;
+
static int regmap_irq_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
struct regmap_irq_chip_data *data = h->host_data;
irq_set_chip_data(virq, data);
+ irq_set_lockdep_class(virq, &regmap_irq_lock_class, &regmap_irq_request_class);
irq_set_chip(virq, &data->irq_chip);
irq_set_nested_thread(virq, 1);
irq_set_parent(virq, data->irq);
@@ -704,6 +724,13 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
if (!d->status_buf)
goto err_alloc;
+ if (chip->status_is_level) {
+ d->prev_status_buf = kcalloc(chip->num_regs, sizeof(*d->prev_status_buf),
+ GFP_KERNEL);
+ if (!d->prev_status_buf)
+ goto err_alloc;
+ }
+
d->mask_buf = kcalloc(chip->num_regs, sizeof(*d->mask_buf),
GFP_KERNEL);
if (!d->mask_buf)
@@ -775,7 +802,13 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
goto err_alloc;
}
- mutex_init(&d->lock);
+ /*
+ * If one regmap-irq is the parent of another then we'll try
+ * to lock the child with the parent locked, use an explicit
+ * lock_key so lockdep can figure out what's going on.
+ */
+ lockdep_register_key(&d->lock_key);
+ mutex_init_with_key(&d->lock, &d->lock_key);
for (i = 0; i < chip->num_irqs; i++)
d->mask_buf_def[chip->irqs[i].reg_offset / map->reg_stride]
@@ -790,7 +823,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
d->mask_buf[i],
chip->irq_drv_data);
if (ret)
- goto err_alloc;
+ goto err_mutex;
}
if (chip->mask_base && !chip->handle_mask_sync) {
@@ -801,7 +834,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
if (ret) {
dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
reg, ret);
- goto err_alloc;
+ goto err_mutex;
}
}
@@ -812,7 +845,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
if (ret) {
dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
reg, ret);
- goto err_alloc;
+ goto err_mutex;
}
}
@@ -822,14 +855,14 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
/* Ack masked but set interrupts */
if (d->chip->no_status) {
/* no status register so default to all active */
- d->status_buf[i] = GENMASK(31, 0);
+ d->status_buf[i] = UINT_MAX;
} else {
reg = d->get_irq_reg(d, d->chip->status_base, i);
ret = regmap_read(map, reg, &d->status_buf[i]);
if (ret != 0) {
dev_err(map->dev, "Failed to read IRQ status: %d\n",
ret);
- goto err_alloc;
+ goto err_mutex;
}
}
@@ -853,7 +886,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
if (ret != 0) {
dev_err(map->dev, "Failed to ack 0x%x: %d\n",
reg, ret);
- goto err_alloc;
+ goto err_mutex;
}
}
}
@@ -875,14 +908,24 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
if (ret != 0) {
dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
reg, ret);
- goto err_alloc;
+ goto err_mutex;
}
}
}
+ /* Store current levels */
+ if (chip->status_is_level) {
+ ret = read_irq_data(d);
+ if (ret < 0)
+ goto err_mutex;
+
+ memcpy(d->prev_status_buf, d->status_buf,
+ array_size(d->chip->num_regs, sizeof(d->prev_status_buf[0])));
+ }
+
ret = regmap_irq_create_domain(fwnode, irq_base, chip, d);
if (ret)
- goto err_alloc;
+ goto err_mutex;
ret = request_threaded_irq(irq, NULL, regmap_irq_thread,
irq_flags | IRQF_ONESHOT,
@@ -899,13 +942,18 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
err_domain:
/* Should really dispose of the domain but... */
+err_mutex:
+ mutex_destroy(&d->lock);
+ lockdep_unregister_key(&d->lock_key);
err_alloc:
kfree(d->type_buf);
kfree(d->type_buf_def);
kfree(d->wake_buf);
kfree(d->mask_buf_def);
kfree(d->mask_buf);
+ kfree(d->main_status_buf);
kfree(d->status_buf);
+ kfree(d->prev_status_buf);
kfree(d->status_reg_buf);
if (d->config_buf) {
for (i = 0; i < chip->num_config_bases; i++)
@@ -980,13 +1028,17 @@ void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
kfree(d->wake_buf);
kfree(d->mask_buf_def);
kfree(d->mask_buf);
+ kfree(d->main_status_buf);
kfree(d->status_reg_buf);
kfree(d->status_buf);
+ kfree(d->prev_status_buf);
if (d->config_buf) {
for (i = 0; i < d->chip->num_config_bases; i++)
kfree(d->config_buf[i]);
kfree(d->config_buf);
}
+ mutex_destroy(&d->lock);
+ lockdep_unregister_key(&d->lock_key);
kfree(d);
}
EXPORT_SYMBOL_GPL(regmap_del_irq_chip);
diff --git a/drivers/base/regmap/regmap-kunit.c b/drivers/base/regmap/regmap-kunit.c
index 4bf3f1e59ed7..95c5bf2a78ee 100644
--- a/drivers/base/regmap/regmap-kunit.c
+++ b/drivers/base/regmap/regmap-kunit.c
@@ -126,7 +126,7 @@ static const struct regmap_test_param real_cache_types_list[] = {
{ .cache = REGCACHE_RBTREE, .from_reg = 0x2003 },
{ .cache = REGCACHE_RBTREE, .from_reg = 0x2004 },
{ .cache = REGCACHE_MAPLE, .from_reg = 0 },
- { .cache = REGCACHE_RBTREE, .from_reg = 0, .fast_io = true },
+ { .cache = REGCACHE_MAPLE, .from_reg = 0, .fast_io = true },
{ .cache = REGCACHE_MAPLE, .from_reg = 0x2001 },
{ .cache = REGCACHE_MAPLE, .from_reg = 0x2002 },
{ .cache = REGCACHE_MAPLE, .from_reg = 0x2003 },
@@ -736,7 +736,7 @@ static void stride(struct kunit *test)
}
}
-static struct regmap_range_cfg test_range = {
+static const struct regmap_range_cfg test_range = {
.selector_reg = 1,
.selector_mask = 0xff,
@@ -1499,6 +1499,48 @@ static void cache_present(struct kunit *test)
KUNIT_ASSERT_TRUE(test, regcache_reg_cached(map, param->from_reg + i));
}
+static void cache_write_zero(struct kunit *test)
+{
+ const struct regmap_test_param *param = test->param_value;
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ unsigned int val;
+ int i;
+
+ config = test_regmap_config;
+
+ map = gen_regmap(test, &config, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ data->read[param->from_reg + i] = false;
+
+ /* No defaults so no registers cached. */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_ASSERT_FALSE(test, regcache_reg_cached(map, param->from_reg + i));
+
+ /* We didn't trigger any reads */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_ASSERT_FALSE(test, data->read[param->from_reg + i]);
+
+ /* Write a zero value */
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 1, 0));
+
+ /* Read that zero value back */
+ KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 1, &val));
+ KUNIT_EXPECT_EQ(test, 0, val);
+
+ /* From the cache? */
+ KUNIT_ASSERT_TRUE(test, regcache_reg_cached(map, 1));
+
+ /* Try to throw it away */
+ KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 1, 1));
+ KUNIT_ASSERT_FALSE(test, regcache_reg_cached(map, 1));
+}
+
/* Check that caching the window register works with sync */
static void cache_range_window_reg(struct kunit *test)
{
@@ -2012,6 +2054,7 @@ static struct kunit_case regmap_test_cases[] = {
KUNIT_CASE_PARAM(cache_drop_all_and_sync_no_defaults, sparse_cache_types_gen_params),
KUNIT_CASE_PARAM(cache_drop_all_and_sync_has_defaults, sparse_cache_types_gen_params),
KUNIT_CASE_PARAM(cache_present, sparse_cache_types_gen_params),
+ KUNIT_CASE_PARAM(cache_write_zero, sparse_cache_types_gen_params),
KUNIT_CASE_PARAM(cache_range_window_reg, real_cache_types_only_gen_params),
KUNIT_CASE_PARAM(raw_read_defaults_single, raw_test_types_gen_params),
diff --git a/drivers/base/regmap/regmap-mmio.c b/drivers/base/regmap/regmap-mmio.c
index 99d7fd85ca7d..29e5f3175301 100644
--- a/drivers/base/regmap/regmap-mmio.c
+++ b/drivers/base/regmap/regmap-mmio.c
@@ -609,4 +609,5 @@ void regmap_mmio_detach_clk(struct regmap *map)
}
EXPORT_SYMBOL_GPL(regmap_mmio_detach_clk);
+MODULE_DESCRIPTION("regmap MMIO Module");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-sdw-mbq.c b/drivers/base/regmap/regmap-sdw-mbq.c
index c99eada83780..86644bbd0710 100644
--- a/drivers/base/regmap/regmap-sdw-mbq.c
+++ b/drivers/base/regmap/regmap-sdw-mbq.c
@@ -1,45 +1,187 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright(c) 2020 Intel Corporation.
+#include <linux/bits.h>
+#include <linux/delay.h>
#include <linux/device.h>
#include <linux/errno.h>
+#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/soundwire/sdw.h>
#include <linux/soundwire/sdw_registers.h>
+#include <sound/sdca_function.h>
#include "internal.h"
+struct regmap_mbq_context {
+ struct device *dev;
+
+ struct regmap_sdw_mbq_cfg cfg;
+
+ int val_size;
+ bool (*readable_reg)(struct device *dev, unsigned int reg);
+};
+
+static int regmap_sdw_mbq_size(struct regmap_mbq_context *ctx, unsigned int reg)
+{
+ int size = ctx->val_size;
+
+ if (ctx->cfg.mbq_size) {
+ size = ctx->cfg.mbq_size(ctx->dev, reg);
+ if (!size || size > ctx->val_size)
+ return -EINVAL;
+ }
+
+ return size;
+}
+
+static bool regmap_sdw_mbq_deferrable(struct regmap_mbq_context *ctx, unsigned int reg)
+{
+ if (ctx->cfg.deferrable)
+ return ctx->cfg.deferrable(ctx->dev, reg);
+
+ return false;
+}
+
+static int regmap_sdw_mbq_poll_busy(struct sdw_slave *slave, unsigned int reg,
+ struct regmap_mbq_context *ctx)
+{
+ struct device *dev = &slave->dev;
+ int val, ret = 0;
+
+ dev_dbg(dev, "Deferring transaction for 0x%x\n", reg);
+
+ reg = SDW_SDCA_CTL(SDW_SDCA_CTL_FUNC(reg), 0,
+ SDCA_CTL_ENTITY_0_FUNCTION_STATUS, 0);
+
+ if (ctx->readable_reg(dev, reg)) {
+ ret = read_poll_timeout(sdw_read_no_pm, val,
+ val < 0 || !(val & SDCA_CTL_ENTITY_0_FUNCTION_BUSY),
+ ctx->cfg.timeout_us, ctx->cfg.retry_us,
+ false, slave, reg);
+ if (val < 0)
+ return val;
+ if (ret)
+ dev_err(dev, "Function busy timed out 0x%x: %d\n", reg, val);
+ } else {
+ fsleep(ctx->cfg.timeout_us);
+ }
+
+ return ret;
+}
+
+static int regmap_sdw_mbq_write_impl(struct sdw_slave *slave,
+ unsigned int reg, unsigned int val,
+ int mbq_size, bool deferrable)
+{
+ int shift = mbq_size * BITS_PER_BYTE;
+ int ret;
+
+ while (--mbq_size > 0) {
+ shift -= BITS_PER_BYTE;
+
+ ret = sdw_write_no_pm(slave, SDW_SDCA_MBQ_CTL(reg),
+ (val >> shift) & 0xff);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = sdw_write_no_pm(slave, reg, val & 0xff);
+ if (deferrable && ret == -ENODATA)
+ return -EAGAIN;
+
+ return ret;
+}
+
static int regmap_sdw_mbq_write(void *context, unsigned int reg, unsigned int val)
{
- struct device *dev = context;
+ struct regmap_mbq_context *ctx = context;
+ struct device *dev = ctx->dev;
struct sdw_slave *slave = dev_to_sdw_dev(dev);
+ bool deferrable = regmap_sdw_mbq_deferrable(ctx, reg);
+ int mbq_size = regmap_sdw_mbq_size(ctx, reg);
int ret;
- ret = sdw_write_no_pm(slave, SDW_SDCA_MBQ_CTL(reg), (val >> 8) & 0xff);
- if (ret < 0)
- return ret;
+ if (mbq_size < 0)
+ return mbq_size;
+
+ /*
+ * Technically the spec does allow a device to set itself to busy for
+ * internal reasons, but since it doesn't provide any information on
+ * how to handle timeouts in that case, for now the code will only
+ * process a single wait/timeout on function busy and a single retry
+ * of the transaction.
+ */
+ ret = regmap_sdw_mbq_write_impl(slave, reg, val, mbq_size, deferrable);
+ if (ret == -EAGAIN) {
+ ret = regmap_sdw_mbq_poll_busy(slave, reg, ctx);
+ if (ret)
+ return ret;
+
+ ret = regmap_sdw_mbq_write_impl(slave, reg, val, mbq_size, false);
+ }
+
+ return ret;
+}
+
+static int regmap_sdw_mbq_read_impl(struct sdw_slave *slave,
+ unsigned int reg, unsigned int *val,
+ int mbq_size, bool deferrable)
+{
+ int shift = BITS_PER_BYTE;
+ int read;
+
+ read = sdw_read_no_pm(slave, reg);
+ if (read < 0) {
+ if (deferrable && read == -ENODATA)
+ return -EAGAIN;
+
+ return read;
+ }
+
+ *val = read;
+
+ while (--mbq_size > 0) {
+ read = sdw_read_no_pm(slave, SDW_SDCA_MBQ_CTL(reg));
+ if (read < 0)
+ return read;
+
+ *val |= read << shift;
+ shift += BITS_PER_BYTE;
+ }
- return sdw_write_no_pm(slave, reg, val & 0xff);
+ return 0;
}
static int regmap_sdw_mbq_read(void *context, unsigned int reg, unsigned int *val)
{
- struct device *dev = context;
+ struct regmap_mbq_context *ctx = context;
+ struct device *dev = ctx->dev;
struct sdw_slave *slave = dev_to_sdw_dev(dev);
- int read0;
- int read1;
+ bool deferrable = regmap_sdw_mbq_deferrable(ctx, reg);
+ int mbq_size = regmap_sdw_mbq_size(ctx, reg);
+ int ret;
- read0 = sdw_read_no_pm(slave, reg);
- if (read0 < 0)
- return read0;
+ if (mbq_size < 0)
+ return mbq_size;
- read1 = sdw_read_no_pm(slave, SDW_SDCA_MBQ_CTL(reg));
- if (read1 < 0)
- return read1;
+ /*
+ * Technically the spec does allow a device to set itself to busy for
+ * internal reasons, but since it doesn't provide any information on
+ * how to handle timeouts in that case, for now the code will only
+ * process a single wait/timeout on function busy and a single retry
+ * of the transaction.
+ */
+ ret = regmap_sdw_mbq_read_impl(slave, reg, val, mbq_size, deferrable);
+ if (ret == -EAGAIN) {
+ ret = regmap_sdw_mbq_poll_busy(slave, reg, ctx);
+ if (ret)
+ return ret;
- *val = (read1 << 8) | read0;
+ ret = regmap_sdw_mbq_read_impl(slave, reg, val, mbq_size, false);
+ }
- return 0;
+ return ret;
}
static const struct regmap_bus regmap_sdw_mbq = {
@@ -51,8 +193,7 @@ static const struct regmap_bus regmap_sdw_mbq = {
static int regmap_sdw_mbq_config_check(const struct regmap_config *config)
{
- /* MBQ-based controls are only 16-bits for now */
- if (config->val_bits != 16)
+ if (config->val_bits > (sizeof(unsigned int) * BITS_PER_BYTE))
return -ENOTSUPP;
/* Registers are 32 bits wide */
@@ -65,35 +206,69 @@ static int regmap_sdw_mbq_config_check(const struct regmap_config *config)
return 0;
}
+static struct regmap_mbq_context *
+regmap_sdw_mbq_gen_context(struct device *dev,
+ const struct regmap_config *config,
+ const struct regmap_sdw_mbq_cfg *mbq_config)
+{
+ struct regmap_mbq_context *ctx;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+
+ ctx->dev = dev;
+
+ if (mbq_config)
+ ctx->cfg = *mbq_config;
+
+ ctx->val_size = config->val_bits / BITS_PER_BYTE;
+ ctx->readable_reg = config->readable_reg;
+
+ return ctx;
+}
+
struct regmap *__regmap_init_sdw_mbq(struct sdw_slave *sdw,
const struct regmap_config *config,
+ const struct regmap_sdw_mbq_cfg *mbq_config,
struct lock_class_key *lock_key,
const char *lock_name)
{
+ struct regmap_mbq_context *ctx;
int ret;
ret = regmap_sdw_mbq_config_check(config);
if (ret)
return ERR_PTR(ret);
- return __regmap_init(&sdw->dev, &regmap_sdw_mbq,
- &sdw->dev, config, lock_key, lock_name);
+ ctx = regmap_sdw_mbq_gen_context(&sdw->dev, config, mbq_config);
+ if (IS_ERR(ctx))
+ return ERR_CAST(ctx);
+
+ return __regmap_init(&sdw->dev, &regmap_sdw_mbq, ctx,
+ config, lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__regmap_init_sdw_mbq);
struct regmap *__devm_regmap_init_sdw_mbq(struct sdw_slave *sdw,
const struct regmap_config *config,
+ const struct regmap_sdw_mbq_cfg *mbq_config,
struct lock_class_key *lock_key,
const char *lock_name)
{
+ struct regmap_mbq_context *ctx;
int ret;
ret = regmap_sdw_mbq_config_check(config);
if (ret)
return ERR_PTR(ret);
- return __devm_regmap_init(&sdw->dev, &regmap_sdw_mbq,
- &sdw->dev, config, lock_key, lock_name);
+ ctx = regmap_sdw_mbq_gen_context(&sdw->dev, config, mbq_config);
+ if (IS_ERR(ctx))
+ return ERR_CAST(ctx);
+
+ return __devm_regmap_init(&sdw->dev, &regmap_sdw_mbq, ctx,
+ config, lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_sdw_mbq);
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 4ded93687c1f..ce9be3989a21 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -598,6 +598,17 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
}
EXPORT_SYMBOL_GPL(regmap_attach_dev);
+static int dev_get_regmap_match(struct device *dev, void *res, void *data);
+
+static int regmap_detach_dev(struct device *dev, struct regmap *map)
+{
+ if (!dev)
+ return 0;
+
+ return devres_release(dev, dev_get_regmap_release,
+ dev_get_regmap_match, (void *)map->name);
+}
+
static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus,
const struct regmap_config *config)
{
@@ -745,6 +756,7 @@ struct regmap *__regmap_init(struct device *dev,
lock_key, lock_name);
}
map->lock_arg = map;
+ map->lock_key = lock_key;
}
/*
@@ -757,14 +769,13 @@ struct regmap *__regmap_init(struct device *dev,
map->alloc_flags = GFP_KERNEL;
map->reg_base = config->reg_base;
+ map->reg_shift = config->pad_bits % 8;
- map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
map->format.pad_bytes = config->pad_bits / 8;
map->format.reg_shift = config->reg_shift;
- map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8);
- map->format.buf_size = DIV_ROUND_UP(config->reg_bits +
- config->val_bits + config->pad_bits, 8);
- map->reg_shift = config->pad_bits % 8;
+ map->format.reg_bytes = BITS_TO_BYTES(config->reg_bits);
+ map->format.val_bytes = BITS_TO_BYTES(config->val_bits);
+ map->format.buf_size = BITS_TO_BYTES(config->reg_bits + config->val_bits + config->pad_bits);
if (config->reg_stride)
map->reg_stride = config->reg_stride;
else
@@ -816,7 +827,7 @@ struct regmap *__regmap_init(struct device *dev,
map->read_flag_mask = bus->read_flag_mask;
}
- if (config && config->read && config->write) {
+ if (config->read && config->write) {
map->reg_read = _regmap_bus_read;
if (config->reg_update_bits)
map->reg_update_bits = config->reg_update_bits;
@@ -1051,13 +1062,13 @@ skip_format_initialization:
/* Sanity check */
if (range_cfg->range_max < range_cfg->range_min) {
- dev_err(map->dev, "Invalid range %d: %d < %d\n", i,
+ dev_err(map->dev, "Invalid range %d: %u < %u\n", i,
range_cfg->range_max, range_cfg->range_min);
goto err_range;
}
if (range_cfg->range_max > map->max_register) {
- dev_err(map->dev, "Invalid range %d: %d > %d\n", i,
+ dev_err(map->dev, "Invalid range %d: %u > %u\n", i,
range_cfg->range_max, map->max_register);
goto err_range;
}
@@ -1162,6 +1173,8 @@ err_name:
err_map:
kfree(map);
err:
+ if (bus && bus->free_on_exit)
+ kfree(bus);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(__regmap_init);
@@ -1444,6 +1457,7 @@ void regmap_exit(struct regmap *map)
{
struct regmap_async *async;
+ regmap_detach_dev(map->dev, map);
regcache_exit(map);
regmap_debugfs_exit(map);
@@ -2244,12 +2258,14 @@ EXPORT_SYMBOL_GPL(regmap_field_update_bits_base);
* @field: Register field to operate on
* @bits: Bits to test
*
- * Returns -1 if the underlying regmap_field_read() fails, 0 if at least one of the
- * tested bits is not set and 1 if all tested bits are set.
+ * Returns negative errno if the underlying regmap_field_read() fails,
+ * 0 if at least one of the tested bits is not set and 1 if all tested
+ * bits are set.
*/
int regmap_field_test_bits(struct regmap_field *field, unsigned int bits)
{
- unsigned int val, ret;
+ unsigned int val;
+ int ret;
ret = regmap_field_read(field, &val);
if (ret)
@@ -3103,7 +3119,7 @@ int regmap_fields_read(struct regmap_field *field, unsigned int id,
EXPORT_SYMBOL_GPL(regmap_fields_read);
static int _regmap_bulk_read(struct regmap *map, unsigned int reg,
- unsigned int *regs, void *val, size_t val_count)
+ const unsigned int *regs, void *val, size_t val_count)
{
u32 *u32 = val;
u16 *u16 = val;
@@ -3197,7 +3213,7 @@ EXPORT_SYMBOL_GPL(regmap_bulk_read);
* A value of zero will be returned on success, a negative errno will
* be returned in error cases.
*/
-int regmap_multi_reg_read(struct regmap *map, unsigned int *regs, void *val,
+int regmap_multi_reg_read(struct regmap *map, const unsigned int *regs, void *val,
size_t val_count)
{
if (val_count == 0)
@@ -3295,7 +3311,8 @@ EXPORT_SYMBOL_GPL(regmap_update_bits_base);
*/
int regmap_test_bits(struct regmap *map, unsigned int reg, unsigned int bits)
{
- unsigned int val, ret;
+ unsigned int val;
+ int ret;
ret = regmap_read(map, reg, &val);
if (ret)
diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
index eb6eb25b343b..be1e9e61a7bf 100644
--- a/drivers/base/swnode.c
+++ b/drivers/base/swnode.c
@@ -529,7 +529,7 @@ software_node_get_reference_args(const struct fwnode_handle *fwnode,
if (prop->is_inline)
return -EINVAL;
- if (index * sizeof(*ref) >= prop->length)
+ if ((index + 1) * sizeof(*ref) > prop->length)
return -ENOENT;
ref_array = prop->pointer;
@@ -677,6 +677,7 @@ static const struct fwnode_operations software_node_ops = {
.get = software_node_get,
.put = software_node_put,
.property_present = software_node_property_present,
+ .property_read_bool = software_node_property_present,
.property_read_int_array = software_node_read_int_array,
.property_read_string_array = software_node_read_string_array,
.get_name = software_node_get_name,
@@ -843,7 +844,7 @@ swnode_register(const struct software_node *node, struct swnode *parent,
* of this function or by ordering the array such that parent comes before
* child.
*/
-int software_node_register_node_group(const struct software_node **node_group)
+int software_node_register_node_group(const struct software_node * const *node_group)
{
unsigned int i;
int ret;
@@ -876,8 +877,7 @@ EXPORT_SYMBOL_GPL(software_node_register_node_group);
* remove the nodes individually, in the correct order (child before
* parent).
*/
-void software_node_unregister_node_group(
- const struct software_node **node_group)
+void software_node_unregister_node_group(const struct software_node * const *node_group)
{
unsigned int i = 0;
@@ -1079,6 +1079,7 @@ void software_node_notify(struct device *dev)
if (!swnode)
return;
+ kobject_get(&swnode->kobj);
ret = sysfs_create_link(&dev->kobj, &swnode->kobj, "software_node");
if (ret)
return;
@@ -1088,8 +1089,6 @@ void software_node_notify(struct device *dev)
sysfs_remove_link(&dev->kobj, "software_node");
return;
}
-
- kobject_get(&swnode->kobj);
}
void software_node_notify_remove(struct device *dev)
diff --git a/drivers/base/test/Kconfig b/drivers/base/test/Kconfig
index 5c7fac80611c..2756870615cc 100644
--- a/drivers/base/test/Kconfig
+++ b/drivers/base/test/Kconfig
@@ -12,6 +12,7 @@ config TEST_ASYNC_DRIVER_PROBE
config DM_KUNIT_TEST
tristate "KUnit Tests for the device model" if !KUNIT_ALL_TESTS
depends on KUNIT
+ default KUNIT_ALL_TESTS
config DRIVER_PE_KUNIT_TEST
tristate "KUnit Tests for property entry API" if !KUNIT_ALL_TESTS
diff --git a/drivers/base/test/platform-device-test.c b/drivers/base/test/platform-device-test.c
index ea05b8785743..6355a2231b74 100644
--- a/drivers/base/test/platform-device-test.c
+++ b/drivers/base/test/platform-device-test.c
@@ -1,8 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
+#include <kunit/platform_device.h>
#include <kunit/resource.h>
#include <linux/device.h>
+#include <linux/device/bus.h>
+#include <linux/of_platform.h>
#include <linux/platform_device.h>
#define DEVICE_NAME "test"
@@ -217,7 +220,43 @@ static struct kunit_suite platform_device_devm_test_suite = {
.test_cases = platform_device_devm_tests,
};
-kunit_test_suite(platform_device_devm_test_suite);
+static void platform_device_find_by_null_test(struct kunit *test)
+{
+ struct platform_device *pdev;
+ int ret;
+
+ pdev = kunit_platform_device_alloc(test, DEVICE_NAME, PLATFORM_DEVID_NONE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pdev);
+
+ ret = kunit_platform_device_add(test, pdev);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ KUNIT_EXPECT_PTR_EQ(test, of_find_device_by_node(NULL), NULL);
+
+ KUNIT_EXPECT_PTR_EQ(test, bus_find_device_by_of_node(&platform_bus_type, NULL), NULL);
+ KUNIT_EXPECT_PTR_EQ(test, bus_find_device_by_fwnode(&platform_bus_type, NULL), NULL);
+ KUNIT_EXPECT_PTR_EQ(test, bus_find_device_by_acpi_dev(&platform_bus_type, NULL), NULL);
+
+ KUNIT_EXPECT_FALSE(test, device_match_of_node(&pdev->dev, NULL));
+ KUNIT_EXPECT_FALSE(test, device_match_fwnode(&pdev->dev, NULL));
+ KUNIT_EXPECT_FALSE(test, device_match_acpi_dev(&pdev->dev, NULL));
+ KUNIT_EXPECT_FALSE(test, device_match_acpi_handle(&pdev->dev, NULL));
+}
+
+static struct kunit_case platform_device_match_tests[] = {
+ KUNIT_CASE(platform_device_find_by_null_test),
+ {}
+};
+
+static struct kunit_suite platform_device_match_test_suite = {
+ .name = "platform-device-match",
+ .test_cases = platform_device_match_tests,
+};
+
+kunit_test_suites(
+ &platform_device_devm_test_suite,
+ &platform_device_match_test_suite,
+);
MODULE_DESCRIPTION("Test module for platform devices");
MODULE_AUTHOR("Maxime Ripard <mripard@kernel.org>");
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index 89f98be5c5b9..c890e2a5b428 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -23,23 +23,39 @@ static ssize_t name##_show(struct device *dev, \
#define define_siblings_read_func(name, mask) \
static ssize_t name##_read(struct file *file, struct kobject *kobj, \
- struct bin_attribute *attr, char *buf, \
+ const struct bin_attribute *attr, char *buf, \
loff_t off, size_t count) \
{ \
struct device *dev = kobj_to_dev(kobj); \
+ cpumask_var_t mask; \
+ ssize_t n; \
\
- return cpumap_print_bitmask_to_buf(buf, topology_##mask(dev->id), \
- off, count); \
+ if (!alloc_cpumask_var(&mask, GFP_KERNEL)) \
+ return -ENOMEM; \
+ \
+ cpumask_copy(mask, topology_##mask(dev->id)); \
+ n = cpumap_print_bitmask_to_buf(buf, mask, off, count); \
+ free_cpumask_var(mask); \
+ \
+ return n; \
} \
\
static ssize_t name##_list_read(struct file *file, struct kobject *kobj, \
- struct bin_attribute *attr, char *buf, \
+ const struct bin_attribute *attr, char *buf, \
loff_t off, size_t count) \
{ \
struct device *dev = kobj_to_dev(kobj); \
+ cpumask_var_t mask; \
+ ssize_t n; \
+ \
+ if (!alloc_cpumask_var(&mask, GFP_KERNEL)) \
+ return -ENOMEM; \
+ \
+ cpumask_copy(mask, topology_##mask(dev->id)); \
+ n = cpumap_print_list_to_buf(buf, mask, off, count); \
+ free_cpumask_var(mask); \
\
- return cpumap_print_list_to_buf(buf, topology_##mask(dev->id), \
- off, count); \
+ return n; \
}
define_id_show_func(physical_package_id, "%d");
@@ -62,50 +78,50 @@ define_id_show_func(ppin, "0x%llx");
static DEVICE_ATTR_ADMIN_RO(ppin);
define_siblings_read_func(thread_siblings, sibling_cpumask);
-static BIN_ATTR_RO(thread_siblings, CPUMAP_FILE_MAX_BYTES);
-static BIN_ATTR_RO(thread_siblings_list, CPULIST_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(thread_siblings, CPUMAP_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(thread_siblings_list, CPULIST_FILE_MAX_BYTES);
define_siblings_read_func(core_cpus, sibling_cpumask);
-static BIN_ATTR_RO(core_cpus, CPUMAP_FILE_MAX_BYTES);
-static BIN_ATTR_RO(core_cpus_list, CPULIST_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(core_cpus, CPUMAP_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(core_cpus_list, CPULIST_FILE_MAX_BYTES);
define_siblings_read_func(core_siblings, core_cpumask);
-static BIN_ATTR_RO(core_siblings, CPUMAP_FILE_MAX_BYTES);
-static BIN_ATTR_RO(core_siblings_list, CPULIST_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(core_siblings, CPUMAP_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(core_siblings_list, CPULIST_FILE_MAX_BYTES);
#ifdef TOPOLOGY_CLUSTER_SYSFS
define_siblings_read_func(cluster_cpus, cluster_cpumask);
-static BIN_ATTR_RO(cluster_cpus, CPUMAP_FILE_MAX_BYTES);
-static BIN_ATTR_RO(cluster_cpus_list, CPULIST_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(cluster_cpus, CPUMAP_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(cluster_cpus_list, CPULIST_FILE_MAX_BYTES);
#endif
#ifdef TOPOLOGY_DIE_SYSFS
define_siblings_read_func(die_cpus, die_cpumask);
-static BIN_ATTR_RO(die_cpus, CPUMAP_FILE_MAX_BYTES);
-static BIN_ATTR_RO(die_cpus_list, CPULIST_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(die_cpus, CPUMAP_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(die_cpus_list, CPULIST_FILE_MAX_BYTES);
#endif
define_siblings_read_func(package_cpus, core_cpumask);
-static BIN_ATTR_RO(package_cpus, CPUMAP_FILE_MAX_BYTES);
-static BIN_ATTR_RO(package_cpus_list, CPULIST_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(package_cpus, CPUMAP_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(package_cpus_list, CPULIST_FILE_MAX_BYTES);
#ifdef TOPOLOGY_BOOK_SYSFS
define_id_show_func(book_id, "%d");
static DEVICE_ATTR_RO(book_id);
define_siblings_read_func(book_siblings, book_cpumask);
-static BIN_ATTR_RO(book_siblings, CPUMAP_FILE_MAX_BYTES);
-static BIN_ATTR_RO(book_siblings_list, CPULIST_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(book_siblings, CPUMAP_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(book_siblings_list, CPULIST_FILE_MAX_BYTES);
#endif
#ifdef TOPOLOGY_DRAWER_SYSFS
define_id_show_func(drawer_id, "%d");
static DEVICE_ATTR_RO(drawer_id);
define_siblings_read_func(drawer_siblings, drawer_cpumask);
-static BIN_ATTR_RO(drawer_siblings, CPUMAP_FILE_MAX_BYTES);
-static BIN_ATTR_RO(drawer_siblings_list, CPULIST_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(drawer_siblings, CPUMAP_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(drawer_siblings_list, CPULIST_FILE_MAX_BYTES);
#endif
-static struct bin_attribute *bin_attrs[] = {
+static const struct bin_attribute *const bin_attrs[] = {
&bin_attr_core_cpus,
&bin_attr_core_cpus_list,
&bin_attr_thread_siblings,
@@ -192,3 +208,55 @@ static int __init topology_sysfs_init(void)
}
device_initcall(topology_sysfs_init);
+
+DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
+EXPORT_PER_CPU_SYMBOL_GPL(cpu_scale);
+
+void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity)
+{
+ per_cpu(cpu_scale, cpu) = capacity;
+}
+
+static ssize_t cpu_capacity_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cpu *cpu = container_of(dev, struct cpu, dev);
+
+ return sysfs_emit(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id));
+}
+
+static DEVICE_ATTR_RO(cpu_capacity);
+
+static int cpu_capacity_sysctl_add(unsigned int cpu)
+{
+ struct device *cpu_dev = get_cpu_device(cpu);
+
+ if (!cpu_dev)
+ return -ENOENT;
+
+ device_create_file(cpu_dev, &dev_attr_cpu_capacity);
+
+ return 0;
+}
+
+static int cpu_capacity_sysctl_remove(unsigned int cpu)
+{
+ struct device *cpu_dev = get_cpu_device(cpu);
+
+ if (!cpu_dev)
+ return -ENOENT;
+
+ device_remove_file(cpu_dev, &dev_attr_cpu_capacity);
+
+ return 0;
+}
+
+static int register_cpu_capacity_sysctl(void)
+{
+ cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "topology/cpu-capacity",
+ cpu_capacity_sysctl_add, cpu_capacity_sysctl_remove);
+
+ return 0;
+}
+subsys_initcall(register_cpu_capacity_sysctl);
diff --git a/drivers/base/trace.h b/drivers/base/trace.h
index e52b6eae060d..3b83b13a57ff 100644
--- a/drivers/base/trace.h
+++ b/drivers/base/trace.h
@@ -24,18 +24,18 @@ DECLARE_EVENT_CLASS(devres,
__field(struct device *, dev)
__field(const char *, op)
__field(void *, node)
- __field(const char *, name)
+ __string(name, name)
__field(size_t, size)
),
TP_fast_assign(
__assign_str(devname);
__entry->op = op;
__entry->node = node;
- __entry->name = name;
+ __assign_str(name);
__entry->size = size;
),
TP_printk("%s %3s %p %s (%zu bytes)", __get_str(devname),
- __entry->op, __entry->node, __entry->name, __entry->size)
+ __entry->op, __entry->node, __get_str(name), __entry->size)
);
DEFINE_EVENT(devres, devres_log,
diff --git a/drivers/bcma/driver_gpio.c b/drivers/bcma/driver_gpio.c
index 5f90bac6bb09..658c7e2ac8bf 100644
--- a/drivers/bcma/driver_gpio.c
+++ b/drivers/bcma/driver_gpio.c
@@ -26,12 +26,14 @@ static int bcma_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
return !!bcma_chipco_gpio_in(cc, 1 << gpio);
}
-static void bcma_gpio_set_value(struct gpio_chip *chip, unsigned gpio,
- int value)
+static int bcma_gpio_set_value(struct gpio_chip *chip, unsigned int gpio,
+ int value)
{
struct bcma_drv_cc *cc = gpiochip_get_data(chip);
bcma_chipco_gpio_out(cc, 1 << gpio, value ? 1 << gpio : 0);
+
+ return 0;
}
static int bcma_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
diff --git a/drivers/bcma/host_soc.c b/drivers/bcma/host_soc.c
index 8ae0b918e740..20b1816c570b 100644
--- a/drivers/bcma/host_soc.c
+++ b/drivers/bcma/host_soc.c
@@ -261,7 +261,7 @@ static struct platform_driver bcma_host_soc_driver = {
.of_match_table = bcma_host_soc_of_match,
},
.probe = bcma_host_soc_probe,
- .remove_new = bcma_host_soc_remove,
+ .remove = bcma_host_soc_remove,
};
int __init bcma_host_soc_register_driver(void)
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index ed209f4f2798..77d694448990 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -17,6 +17,7 @@ menuconfig BLK_DEV
if BLK_DEV
source "drivers/block/null_blk/Kconfig"
+source "drivers/block/rnull/Kconfig"
config BLK_DEV_FD
tristate "Normal floppy disk support"
@@ -130,7 +131,7 @@ config BLK_DEV_UBD_SYNC
kernel command line option. Alternatively, you can say Y here to
turn on synchronous operation by default for all block devices.
- If you're running a journalling file system (like reiserfs, for
+ If you're running a journalling file system (like xfs, for
example) in your virtual machine, you will want to say Y here. If
you care for the safety of the data in your virtual machine, Y is a
wise choice too. In all other cases (for example, if you're just
@@ -256,49 +257,6 @@ config BLK_DEV_RAM_SIZE
The default value is 4096 kilobytes. Only change this if you know
what you are doing.
-config CDROM_PKTCDVD
- tristate "Packet writing on CD/DVD media (DEPRECATED)"
- depends on !UML
- depends on SCSI
- select CDROM
- help
- Note: This driver is deprecated and will be removed from the
- kernel in the near future!
-
- If you have a CDROM/DVD drive that supports packet writing, say
- Y to include support. It should work with any MMC/Mt Fuji
- compliant ATAPI or SCSI drive, which is just about any newer
- DVD/CD writer.
-
- Currently only writing to CD-RW, DVD-RW, DVD+RW and DVDRAM discs
- is possible.
- DVD-RW disks must be in restricted overwrite mode.
-
- See the file <file:Documentation/cdrom/packet-writing.rst>
- for further information on the use of this driver.
-
- To compile this driver as a module, choose M here: the
- module will be called pktcdvd.
-
-config CDROM_PKTCDVD_BUFFERS
- int "Free buffers for data gathering"
- depends on CDROM_PKTCDVD
- default "8"
- help
- This controls the maximum number of active concurrent packets. More
- concurrent packets can increase write performance, but also require
- more memory. Each concurrent packet will require approximately 64Kb
- of non-swappable kernel memory, memory which will be allocated when
- a disc is opened for writing.
-
-config CDROM_PKTCDVD_WCACHE
- bool "Enable write caching"
- depends on CDROM_PKTCDVD
- help
- If enabled, write caching will be set for the CD-R/W device. For now
- this option is dangerous unless the CD-RW media is known good, as we
- don't do deferred write error handling yet.
-
config ATA_OVER_ETH
tristate "ATA over Ethernet support"
depends on NET
@@ -354,20 +312,11 @@ config VIRTIO_BLK
This is the virtual block driver for virtio. It can be used with
QEMU based VMMs (like KVM or Xen). Say Y or M.
-config BLK_DEV_RUST_NULL
- tristate "Rust null block driver (Experimental)"
- depends on RUST
- help
- This is the Rust implementation of the null block driver. For now it
- is only a minimal stub.
-
- If unsure, say N.
-
config BLK_DEV_RBD
tristate "Rados block device (RBD)"
depends on INET && BLOCK
select CEPH_LIB
- select LIBCRC32C
+ select CRC32
select CRYPTO_AES
select CRYPTO
help
@@ -388,12 +337,6 @@ config BLK_DEV_UBLK
definition isn't finalized yet, and might change according to future
requirement, so mark is as experimental now.
- Say Y if you want to get better performance because task_work_add()
- can be used in IO path for replacing io_uring cmd, which will become
- shared between IO tasks and ubq daemon, meantime task_work_add() can
- can handle batch more effectively, but task_work_add() isn't exported
- for module, so ublk has to be built to kernel.
-
config BLKDEV_UBLK_LEGACY_OPCODES
bool "Support legacy command opcode"
depends on BLK_DEV_UBLK
@@ -413,4 +356,23 @@ config BLKDEV_UBLK_LEGACY_OPCODES
source "drivers/block/rnbd/Kconfig"
+config BLK_DEV_ZONED_LOOP
+ tristate "Zoned loopback device support"
+ depends on BLK_DEV_ZONED
+ help
+ Saying Y here will allow you to use create a zoned block device using
+ regular files for zones (one file per zones). This is useful to test
+ file systems, device mapper and applications that support zoned block
+ devices. To create a zoned loop device, no user utility is needed, a
+ zoned loop device can be created (or re-started) using a command
+ like:
+
+ echo "add id=0,zone_size_mb=256,capacity_mb=16384,conv_zones=11" > \
+ /dev/zloop-control
+
+ See Documentation/admin-guide/blockdev/zoned_loop.rst for usage
+ details.
+
+ If unsure, say N.
+
endif # BLK_DEV
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index 1105a2d4fdcb..2d8096eb8cdf 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -9,9 +9,6 @@
# needed for trace events
ccflags-y += -I$(src)
-obj-$(CONFIG_BLK_DEV_RUST_NULL) += rnull_mod.o
-rnull_mod-y := rnull.o
-
obj-$(CONFIG_MAC_FLOPPY) += swim3.o
obj-$(CONFIG_BLK_DEV_SWIM) += swim_mod.o
obj-$(CONFIG_BLK_DEV_FD) += floppy.o
@@ -23,7 +20,6 @@ obj-$(CONFIG_AMIGA_Z2RAM) += z2ram.o
obj-$(CONFIG_N64CART) += n64cart.o
obj-$(CONFIG_BLK_DEV_RAM) += brd.o
obj-$(CONFIG_BLK_DEV_LOOP) += loop.o
-obj-$(CONFIG_CDROM_PKTCDVD) += pktcdvd.o
obj-$(CONFIG_SUNVDC) += sunvdc.o
obj-$(CONFIG_BLK_DEV_NBD) += nbd.o
@@ -39,7 +35,9 @@ obj-$(CONFIG_ZRAM) += zram/
obj-$(CONFIG_BLK_DEV_RNBD) += rnbd/
obj-$(CONFIG_BLK_DEV_NULL_BLK) += null_blk/
+obj-$(CONFIG_BLK_DEV_RUST_NULL) += rnull/
obj-$(CONFIG_BLK_DEV_UBLK) += ublk_drv.o
+obj-$(CONFIG_BLK_DEV_ZONED_LOOP) += zloop.o
swim_mod-y := swim.o swim_asm.o
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 49ced65bef4c..2932b6653b6f 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -457,7 +457,7 @@ static int fd_motor_on(int nr)
{
nr &= 3;
- del_timer(motor_off_timer + nr);
+ timer_delete(motor_off_timer + nr);
if (!unit[nr].motor) {
unit[nr].motor = 1;
@@ -1393,7 +1393,7 @@ static int non_int_flush_track (unsigned long nr)
nr&=3;
writefromint = 0;
- del_timer(&post_write_timer);
+ timer_delete(&post_write_timer);
get_fdc(nr);
if (!fd_motor_on(nr)) {
writepending = 0;
@@ -1435,7 +1435,7 @@ static int get_track(int drive, int track)
}
if (unit[drive].dirty == 1) {
- del_timer (flush_track_timer + drive);
+ timer_delete(flush_track_timer + drive);
non_int_flush_track (drive);
}
errcnt = 0;
@@ -1523,13 +1523,13 @@ static blk_status_t amiflop_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_OK;
}
-static int fd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+static int fd_getgeo(struct gendisk *disk, struct hd_geometry *geo)
{
- int drive = MINOR(bdev->bd_dev) & 3;
+ struct amiga_floppy_struct *p = disk->private_data;
- geo->heads = unit[drive].type->heads;
- geo->sectors = unit[drive].dtype->sects * unit[drive].type->sect_mult;
- geo->cylinders = unit[drive].type->tracks;
+ geo->heads = p->type->heads;
+ geo->sectors = p->dtype->sects * p->type->sect_mult;
+ geo->cylinders = p->type->tracks;
return 0;
}
@@ -1591,7 +1591,7 @@ static int fd_locked_ioctl(struct block_device *bdev, blk_mode_t mode,
case FDDEFPRM:
return -EINVAL;
case FDFLUSH: /* unconditionally, even if not needed */
- del_timer (flush_track_timer + drive);
+ timer_delete(flush_track_timer + drive);
non_int_flush_track(drive);
break;
#ifdef RAW_IOCTL
@@ -1714,7 +1714,7 @@ static void floppy_release(struct gendisk *disk)
mutex_lock(&amiflop_mutex);
if (unit[drive].dirty == 1) {
- del_timer (flush_track_timer + drive);
+ timer_delete(flush_track_timer + drive);
non_int_flush_track (drive);
}
@@ -1819,7 +1819,6 @@ static int fd_alloc_drive(int drive)
unit[drive].tag_set.nr_maps = 1;
unit[drive].tag_set.queue_depth = 2;
unit[drive].tag_set.numa_node = NUMA_NO_NODE;
- unit[drive].tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
if (blk_mq_alloc_tag_set(&unit[drive].tag_set))
goto out_cleanup_trackbuf;
diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h
index 749ae1246f4c..d35caa3c69e1 100644
--- a/drivers/block/aoe/aoe.h
+++ b/drivers/block/aoe/aoe.h
@@ -80,6 +80,7 @@ enum {
DEVFL_NEWSIZE = (1<<6), /* need to update dev size in block layer */
DEVFL_FREEING = (1<<7), /* set when device is being cleaned up */
DEVFL_FREED = (1<<8), /* device has been cleaned up */
+ DEVFL_DEAD = (1<<9), /* device has timed out of aoe_deadsecs */
};
enum {
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 2028795ec61c..34ead75e7e02 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -269,9 +269,9 @@ static blk_status_t aoeblk_queue_rq(struct blk_mq_hw_ctx *hctx,
}
static int
-aoeblk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+aoeblk_getgeo(struct gendisk *disk, struct hd_geometry *geo)
{
- struct aoedev *d = bdev->bd_disk->private_data;
+ struct aoedev *d = disk->private_data;
if ((d->flags & DEVFL_UP) == 0) {
printk(KERN_ERR "aoe: disk not up\n");
@@ -368,7 +368,6 @@ aoeblk_gdalloc(void *vp)
set->nr_hw_queues = 1;
set->queue_depth = 128;
set->numa_node = NUMA_NO_NODE;
- set->flags = BLK_MQ_F_SHOULD_MERGE;
err = blk_mq_alloc_tag_set(set);
if (err) {
pr_err("aoe: cannot allocate tag set for %ld.%d\n",
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 92b06d1de4cc..a9affb7c264d 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -745,7 +745,7 @@ rexmit_timer(struct timer_list *timer)
int utgts; /* number of aoetgt descriptors (not slots) */
int since;
- d = from_timer(d, timer, timer);
+ d = timer_container_of(d, timer, timer);
spin_lock_irqsave(&d->lock, flags);
@@ -754,7 +754,7 @@ rexmit_timer(struct timer_list *timer)
utgts = count_targets(d, NULL);
- if (d->flags & DEVFL_TKILL) {
+ if (d->flags & (DEVFL_TKILL | DEVFL_DEAD)) {
spin_unlock_irqrestore(&d->lock, flags);
return;
}
@@ -786,7 +786,8 @@ rexmit_timer(struct timer_list *timer)
* to clean up.
*/
list_splice(&flist, &d->factive[0]);
- aoedev_downdev(d);
+ d->flags |= DEVFL_DEAD;
+ queue_work(aoe_wq, &d->work);
goto out;
}
@@ -898,6 +899,9 @@ aoecmd_sleepwork(struct work_struct *work)
{
struct aoedev *d = container_of(work, struct aoedev, work);
+ if (d->flags & DEVFL_DEAD)
+ aoedev_downdev(d);
+
if (d->flags & DEVFL_GDALLOC)
aoeblk_gdalloc(d);
@@ -1757,6 +1761,6 @@ aoecmd_exit(void)
kfree(kts);
kfree(ktiowq);
- free_page((unsigned long) page_address(empty_page));
+ __free_page(empty_page);
empty_page = NULL;
}
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
index 3523dd82d7a0..3a240755045b 100644
--- a/drivers/block/aoe/aoedev.c
+++ b/drivers/block/aoe/aoedev.c
@@ -149,7 +149,7 @@ dummy_timer(struct timer_list *t)
{
struct aoedev *d;
- d = from_timer(d, t, timer);
+ d = timer_container_of(d, t, timer);
if (d->flags & DEVFL_TKILL)
return;
d->timer.expires = jiffies + HZ;
@@ -198,9 +198,13 @@ aoedev_downdev(struct aoedev *d)
{
struct aoetgt *t, **tt, **te;
struct list_head *head, *pos, *nx;
+ struct request *rq, *rqnext;
int i;
+ unsigned long flags;
- d->flags &= ~DEVFL_UP;
+ spin_lock_irqsave(&d->lock, flags);
+ d->flags &= ~(DEVFL_UP | DEVFL_DEAD);
+ spin_unlock_irqrestore(&d->lock, flags);
/* clean out active and to-be-retransmitted buffers */
for (i = 0; i < NFACTIVE; i++) {
@@ -223,13 +227,21 @@ aoedev_downdev(struct aoedev *d)
/* clean out the in-process request (if any) */
aoe_failip(d);
+ /* clean out any queued block requests */
+ list_for_each_entry_safe(rq, rqnext, &d->rq_list, queuelist) {
+ list_del_init(&rq->queuelist);
+ blk_mq_start_request(rq);
+ blk_mq_end_request(rq, BLK_STS_IOERR);
+ }
+
/* fast fail all pending I/O */
if (d->blkq) {
/* UP is cleared, freeze+quiesce to insure all are errored */
- blk_mq_freeze_queue(d->blkq);
+ unsigned int memflags = blk_mq_freeze_queue(d->blkq);
+
blk_mq_quiesce_queue(d->blkq);
blk_mq_unquiesce_queue(d->blkq);
- blk_mq_unfreeze_queue(d->blkq);
+ blk_mq_unfreeze_queue(d->blkq, memflags);
}
if (d->gd)
@@ -273,7 +285,7 @@ freedev(struct aoedev *d)
if (!freeing)
return;
- del_timer_sync(&d->timer);
+ timer_delete_sync(&d->timer);
if (d->gd) {
aoedisk_rm_debugfs(d);
del_gendisk(d->gd);
diff --git a/drivers/block/aoe/aoemain.c b/drivers/block/aoe/aoemain.c
index 6238c4c87cfc..3b21750038ee 100644
--- a/drivers/block/aoe/aoemain.c
+++ b/drivers/block/aoe/aoemain.c
@@ -28,7 +28,7 @@ static void discover_timer(struct timer_list *t)
static void __exit
aoe_exit(void)
{
- del_timer_sync(&timer);
+ timer_delete_sync(&timer);
aoenet_exit();
unregister_blkdev(AOE_MAJOR, DEVICE_NAME);
@@ -44,7 +44,7 @@ aoe_init(void)
{
int ret;
- aoe_wq = alloc_workqueue("aoe_wq", 0, 0);
+ aoe_wq = alloc_workqueue("aoe_wq", WQ_PERCPU, 0);
if (!aoe_wq)
return -ENOMEM;
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index 4ba98c6654be..7fe14266c12c 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -494,7 +494,7 @@ static inline void start_timeout(void)
static inline void stop_timeout(void)
{
- del_timer(&timeout_timer);
+ timer_delete(&timeout_timer);
}
/* Select the side to use. */
@@ -746,6 +746,7 @@ static int do_format(int drive, int type, struct atari_format_descr *desc)
unsigned char *p;
int sect, nsect;
unsigned long flags;
+ unsigned int memflags;
int ret;
if (type) {
@@ -758,7 +759,7 @@ static int do_format(int drive, int type, struct atari_format_descr *desc)
}
q = unit[drive].disk[type]->queue;
- blk_mq_freeze_queue(q);
+ memflags = blk_mq_freeze_queue(q);
blk_mq_quiesce_queue(q);
local_irq_save(flags);
@@ -783,7 +784,7 @@ static int do_format(int drive, int type, struct atari_format_descr *desc)
contents become invalid! */
BufferDrive = -1;
/* stop deselect timer */
- del_timer( &motor_off_timer );
+ timer_delete(&motor_off_timer);
FILL( 60 * (nsect / 9), 0x4e );
for( sect = 0; sect < nsect; ++sect ) {
@@ -817,7 +818,7 @@ static int do_format(int drive, int type, struct atari_format_descr *desc)
ret = FormatError ? -EIO : 0;
out:
blk_mq_unquiesce_queue(q);
- blk_mq_unfreeze_queue(q);
+ blk_mq_unfreeze_queue(q, memflags);
return ret;
}
@@ -1137,7 +1138,7 @@ static void fd_rwsec_done( int status )
DPRINT(("fd_rwsec_done()\n"));
if (read_track) {
- del_timer(&readtrack_timer);
+ timer_delete(&readtrack_timer);
if (!MultReadInProgress)
return;
MultReadInProgress = 0;
@@ -1355,7 +1356,7 @@ static void fd_times_out(struct timer_list *unused)
/* If the timeout occurred while the readtrack_check timer was
* active, we need to cancel it, else bad things will happen */
if (UseTrackbuffer)
- del_timer( &readtrack_timer );
+ timer_delete(&readtrack_timer);
FDC_WRITE( FDCREG_CMD, FDCCMD_FORCI );
udelay( 25 );
@@ -1565,7 +1566,7 @@ static blk_status_t ataflop_queue_rq(struct blk_mq_hw_ctx *hctx,
}
/* stop deselect timer */
- del_timer( &motor_off_timer );
+ timer_delete(&motor_off_timer);
ReqCnt = 0;
ReqCmd = rq_data_dir(fd_request);
@@ -2054,7 +2055,7 @@ static void atari_floppy_cleanup(void)
blk_mq_free_tag_set(&unit[i].tag_set);
}
- del_timer_sync(&fd_timer);
+ timer_delete_sync(&fd_timer);
atari_stram_free(DMABuffer);
}
@@ -2088,7 +2089,6 @@ static int __init atari_floppy_init (void)
unit[i].tag_set.nr_maps = 1;
unit[i].tag_set.queue_depth = 2;
unit[i].tag_set.numa_node = NUMA_NO_NODE;
- unit[i].tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
ret = blk_mq_alloc_tag_set(&unit[i].tag_set);
if (ret)
goto err;
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 2fd1ed101748..9778259b30d4 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -44,42 +44,74 @@ struct brd_device {
};
/*
- * Look up and return a brd's page for a given sector.
+ * Look up and return a brd's page with reference grabbed for a given sector.
*/
static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector)
{
- return xa_load(&brd->brd_pages, sector >> PAGE_SECTORS_SHIFT);
+ struct page *page;
+ XA_STATE(xas, &brd->brd_pages, sector >> PAGE_SECTORS_SHIFT);
+
+ rcu_read_lock();
+repeat:
+ page = xas_load(&xas);
+ if (xas_retry(&xas, page)) {
+ xas_reset(&xas);
+ goto repeat;
+ }
+
+ if (!page)
+ goto out;
+
+ if (!get_page_unless_zero(page)) {
+ xas_reset(&xas);
+ goto repeat;
+ }
+
+ if (unlikely(page != xas_reload(&xas))) {
+ put_page(page);
+ xas_reset(&xas);
+ goto repeat;
+ }
+out:
+ rcu_read_unlock();
+
+ return page;
}
/*
* Insert a new page for a given sector, if one does not already exist.
+ * The returned page will grab reference.
*/
-static int brd_insert_page(struct brd_device *brd, sector_t sector, gfp_t gfp)
+static struct page *brd_insert_page(struct brd_device *brd, sector_t sector,
+ blk_opf_t opf)
{
- pgoff_t idx = sector >> PAGE_SECTORS_SHIFT;
- struct page *page;
- int ret = 0;
-
- page = brd_lookup_page(brd, sector);
- if (page)
- return 0;
+ gfp_t gfp = (opf & REQ_NOWAIT) ? GFP_NOWAIT : GFP_NOIO;
+ struct page *page, *ret;
page = alloc_page(gfp | __GFP_ZERO | __GFP_HIGHMEM);
if (!page)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
xa_lock(&brd->brd_pages);
- ret = __xa_insert(&brd->brd_pages, idx, page, gfp);
- if (!ret)
+ ret = __xa_cmpxchg(&brd->brd_pages, sector >> PAGE_SECTORS_SHIFT, NULL,
+ page, gfp);
+ if (!ret) {
brd->brd_nr_pages++;
- xa_unlock(&brd->brd_pages);
+ get_page(page);
+ xa_unlock(&brd->brd_pages);
+ return page;
+ }
- if (ret < 0) {
- __free_page(page);
- if (ret == -EBUSY)
- ret = 0;
+ if (!xa_is_err(ret)) {
+ get_page(ret);
+ xa_unlock(&brd->brd_pages);
+ put_page(page);
+ return ret;
}
- return ret;
+
+ xa_unlock(&brd->brd_pages);
+ put_page(page);
+ return ERR_PTR(xa_err(ret));
}
/*
@@ -92,7 +124,7 @@ static void brd_free_pages(struct brd_device *brd)
pgoff_t idx;
xa_for_each(&brd->brd_pages, idx, page) {
- __free_page(page);
+ put_page(page);
cond_resched();
}
@@ -100,141 +132,69 @@ static void brd_free_pages(struct brd_device *brd)
}
/*
- * copy_to_brd_setup must be called before copy_to_brd. It may sleep.
+ * Process a single segment. The segment is capped to not cross page boundaries
+ * in both the bio and the brd backing memory.
*/
-static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n,
- gfp_t gfp)
-{
- unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
- size_t copy;
- int ret;
-
- copy = min_t(size_t, n, PAGE_SIZE - offset);
- ret = brd_insert_page(brd, sector, gfp);
- if (ret)
- return ret;
- if (copy < n) {
- sector += copy >> SECTOR_SHIFT;
- ret = brd_insert_page(brd, sector, gfp);
- }
- return ret;
-}
-
-/*
- * Copy n bytes from src to the brd starting at sector. Does not sleep.
- */
-static void copy_to_brd(struct brd_device *brd, const void *src,
- sector_t sector, size_t n)
+static bool brd_rw_bvec(struct brd_device *brd, struct bio *bio)
{
+ struct bio_vec bv = bio_iter_iovec(bio, bio->bi_iter);
+ sector_t sector = bio->bi_iter.bi_sector;
+ u32 offset = (sector & (PAGE_SECTORS - 1)) << SECTOR_SHIFT;
+ blk_opf_t opf = bio->bi_opf;
struct page *page;
- void *dst;
- unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
- size_t copy;
+ void *kaddr;
- copy = min_t(size_t, n, PAGE_SIZE - offset);
- page = brd_lookup_page(brd, sector);
- BUG_ON(!page);
-
- dst = kmap_atomic(page);
- memcpy(dst + offset, src, copy);
- kunmap_atomic(dst);
-
- if (copy < n) {
- src += copy;
- sector += copy >> SECTOR_SHIFT;
- copy = n - copy;
- page = brd_lookup_page(brd, sector);
- BUG_ON(!page);
-
- dst = kmap_atomic(page);
- memcpy(dst, src, copy);
- kunmap_atomic(dst);
- }
-}
-
-/*
- * Copy n bytes to dst from the brd starting at sector. Does not sleep.
- */
-static void copy_from_brd(void *dst, struct brd_device *brd,
- sector_t sector, size_t n)
-{
- struct page *page;
- void *src;
- unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
- size_t copy;
+ bv.bv_len = min_t(u32, bv.bv_len, PAGE_SIZE - offset);
- copy = min_t(size_t, n, PAGE_SIZE - offset);
page = brd_lookup_page(brd, sector);
- if (page) {
- src = kmap_atomic(page);
- memcpy(dst, src + offset, copy);
- kunmap_atomic(src);
- } else
- memset(dst, 0, copy);
-
- if (copy < n) {
- dst += copy;
- sector += copy >> SECTOR_SHIFT;
- copy = n - copy;
- page = brd_lookup_page(brd, sector);
- if (page) {
- src = kmap_atomic(page);
- memcpy(dst, src, copy);
- kunmap_atomic(src);
- } else
- memset(dst, 0, copy);
+ if (!page && op_is_write(opf)) {
+ page = brd_insert_page(brd, sector, opf);
+ if (IS_ERR(page))
+ goto out_error;
}
-}
-
-/*
- * Process a single bvec of a bio.
- */
-static int brd_do_bvec(struct brd_device *brd, struct page *page,
- unsigned int len, unsigned int off, blk_opf_t opf,
- sector_t sector)
-{
- void *mem;
- int err = 0;
+ kaddr = bvec_kmap_local(&bv);
if (op_is_write(opf)) {
- /*
- * Must use NOIO because we don't want to recurse back into the
- * block or filesystem layers from page reclaim.
- */
- gfp_t gfp = opf & REQ_NOWAIT ? GFP_NOWAIT : GFP_NOIO;
-
- err = copy_to_brd_setup(brd, sector, len, gfp);
- if (err)
- goto out;
- }
-
- mem = kmap_atomic(page);
- if (!op_is_write(opf)) {
- copy_from_brd(mem + off, brd, sector, len);
- flush_dcache_page(page);
+ memcpy_to_page(page, offset, kaddr, bv.bv_len);
} else {
- flush_dcache_page(page);
- copy_to_brd(brd, mem + off, sector, len);
+ if (page)
+ memcpy_from_page(kaddr, page, offset, bv.bv_len);
+ else
+ memset(kaddr, 0, bv.bv_len);
}
- kunmap_atomic(mem);
+ kunmap_local(kaddr);
-out:
- return err;
+ bio_advance_iter_single(bio, &bio->bi_iter, bv.bv_len);
+ if (page)
+ put_page(page);
+ return true;
+
+out_error:
+ if (PTR_ERR(page) == -ENOMEM && (opf & REQ_NOWAIT))
+ bio_wouldblock_error(bio);
+ else
+ bio_io_error(bio);
+ return false;
}
static void brd_do_discard(struct brd_device *brd, sector_t sector, u32 size)
{
- sector_t aligned_sector = (sector + PAGE_SECTORS) & ~PAGE_SECTORS;
+ sector_t aligned_sector = round_up(sector, PAGE_SECTORS);
+ sector_t aligned_end = round_down(
+ sector + (size >> SECTOR_SHIFT), PAGE_SECTORS);
struct page *page;
- size -= (aligned_sector - sector) * SECTOR_SIZE;
+ if (aligned_end <= aligned_sector)
+ return;
+
xa_lock(&brd->brd_pages);
- while (size >= PAGE_SIZE && aligned_sector < rd_size * 2) {
+ while (aligned_sector < aligned_end && aligned_sector < rd_size * 2) {
page = __xa_erase(&brd->brd_pages, aligned_sector >> PAGE_SECTORS_SHIFT);
- if (page)
- __free_page(page);
+ if (page) {
+ put_page(page);
+ brd->brd_nr_pages--;
+ }
aligned_sector += PAGE_SECTORS;
- size -= PAGE_SIZE;
}
xa_unlock(&brd->brd_pages);
}
@@ -242,36 +202,18 @@ static void brd_do_discard(struct brd_device *brd, sector_t sector, u32 size)
static void brd_submit_bio(struct bio *bio)
{
struct brd_device *brd = bio->bi_bdev->bd_disk->private_data;
- sector_t sector = bio->bi_iter.bi_sector;
- struct bio_vec bvec;
- struct bvec_iter iter;
if (unlikely(op_is_discard(bio->bi_opf))) {
- brd_do_discard(brd, sector, bio->bi_iter.bi_size);
+ brd_do_discard(brd, bio->bi_iter.bi_sector,
+ bio->bi_iter.bi_size);
bio_endio(bio);
return;
}
- bio_for_each_segment(bvec, bio, iter) {
- unsigned int len = bvec.bv_len;
- int err;
-
- /* Don't support un-aligned buffer */
- WARN_ON_ONCE((bvec.bv_offset & (SECTOR_SIZE - 1)) ||
- (len & (SECTOR_SIZE - 1)));
-
- err = brd_do_bvec(brd, bvec.bv_page, len, bvec.bv_offset,
- bio->bi_opf, sector);
- if (err) {
- if (err == -ENOMEM && bio->bi_opf & REQ_NOWAIT) {
- bio_wouldblock_error(bio);
- return;
- }
- bio_io_error(bio);
+ do {
+ if (!brd_rw_bvec(brd, bio))
return;
- }
- sector += len >> SECTOR_SHIFT;
- }
+ } while (bio->bi_iter.bi_size);
bio_endio(bio);
}
@@ -316,8 +258,40 @@ __setup("ramdisk_size=", ramdisk_size);
* (should share code eventually).
*/
static LIST_HEAD(brd_devices);
+static DEFINE_MUTEX(brd_devices_mutex);
static struct dentry *brd_debugfs_dir;
+static struct brd_device *brd_find_or_alloc_device(int i)
+{
+ struct brd_device *brd;
+
+ mutex_lock(&brd_devices_mutex);
+ list_for_each_entry(brd, &brd_devices, brd_list) {
+ if (brd->brd_number == i) {
+ mutex_unlock(&brd_devices_mutex);
+ return ERR_PTR(-EEXIST);
+ }
+ }
+
+ brd = kzalloc(sizeof(*brd), GFP_KERNEL);
+ if (!brd) {
+ mutex_unlock(&brd_devices_mutex);
+ return ERR_PTR(-ENOMEM);
+ }
+ brd->brd_number = i;
+ list_add_tail(&brd->brd_list, &brd_devices);
+ mutex_unlock(&brd_devices_mutex);
+ return brd;
+}
+
+static void brd_free_device(struct brd_device *brd)
+{
+ mutex_lock(&brd_devices_mutex);
+ list_del(&brd->brd_list);
+ mutex_unlock(&brd_devices_mutex);
+ kfree(brd);
+}
+
static int brd_alloc(int i)
{
struct brd_device *brd;
@@ -340,14 +314,9 @@ static int brd_alloc(int i)
BLK_FEAT_NOWAIT,
};
- list_for_each_entry(brd, &brd_devices, brd_list)
- if (brd->brd_number == i)
- return -EEXIST;
- brd = kzalloc(sizeof(*brd), GFP_KERNEL);
- if (!brd)
- return -ENOMEM;
- brd->brd_number = i;
- list_add_tail(&brd->brd_list, &brd_devices);
+ brd = brd_find_or_alloc_device(i);
+ if (IS_ERR(brd))
+ return PTR_ERR(brd);
xa_init(&brd->brd_pages);
@@ -378,8 +347,7 @@ static int brd_alloc(int i)
out_cleanup_disk:
put_disk(disk);
out_free_dev:
- list_del(&brd->brd_list);
- kfree(brd);
+ brd_free_device(brd);
return err;
}
@@ -398,8 +366,7 @@ static void brd_cleanup(void)
del_gendisk(brd->brd_disk);
put_disk(brd->brd_disk);
brd_free_pages(brd);
- list_del(&brd->brd_list);
- kfree(brd);
+ brd_free_device(brd);
}
}
@@ -426,16 +393,6 @@ static int __init brd_init(void)
{
int err, i;
- brd_check_and_reset_par();
-
- brd_debugfs_dir = debugfs_create_dir("ramdisk_pages", NULL);
-
- for (i = 0; i < rd_nr; i++) {
- err = brd_alloc(i);
- if (err)
- goto out_free;
- }
-
/*
* brd module now has a feature to instantiate underlying device
* structure on-demand, provided that there is an access dev node.
@@ -451,11 +408,18 @@ static int __init brd_init(void)
* dynamically.
*/
+ brd_check_and_reset_par();
+
+ brd_debugfs_dir = debugfs_create_dir("ramdisk_pages", NULL);
+
if (__register_blkdev(RAMDISK_MAJOR, "ramdisk", brd_probe)) {
err = -EIO;
goto out_free;
}
+ for (i = 0; i < rd_nr; i++)
+ brd_alloc(i);
+
pr_info("brd: module loaded\n");
return 0;
diff --git a/drivers/block/drbd/Kconfig b/drivers/block/drbd/Kconfig
index 6fb4e38fca88..495a72da04c6 100644
--- a/drivers/block/drbd/Kconfig
+++ b/drivers/block/drbd/Kconfig
@@ -10,7 +10,7 @@ config BLK_DEV_DRBD
tristate "DRBD Distributed Replicated Block Device support"
depends on PROC_FS && INET
select LRU_CACHE
- select LIBCRC32C
+ select CRC32
help
NOTE: In order to authenticate connections you have to select
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index e21492981f7d..f6d6276974ee 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -380,6 +380,9 @@ enum {
/* this is/was a write request */
__EE_WRITE,
+ /* hand back using mempool_free(e, drbd_buffer_page_pool) */
+ __EE_RELEASE_TO_MEMPOOL,
+
/* this is/was a write same request */
__EE_WRITE_SAME,
@@ -402,6 +405,7 @@ enum {
#define EE_IN_INTERVAL_TREE (1<<__EE_IN_INTERVAL_TREE)
#define EE_SUBMITTED (1<<__EE_SUBMITTED)
#define EE_WRITE (1<<__EE_WRITE)
+#define EE_RELEASE_TO_MEMPOOL (1<<__EE_RELEASE_TO_MEMPOOL)
#define EE_WRITE_SAME (1<<__EE_WRITE_SAME)
#define EE_APPLICATION (1<<__EE_APPLICATION)
#define EE_RS_THIN_REQ (1<<__EE_RS_THIN_REQ)
@@ -858,7 +862,6 @@ struct drbd_device {
struct list_head sync_ee; /* IO in progress (P_RS_DATA_REPLY gets written to disk) */
struct list_head done_ee; /* need to send P_WRITE_ACK */
struct list_head read_ee; /* [RS]P_DATA_REQUEST being read */
- struct list_head net_ee; /* zero-copy network send in progress */
struct list_head resync_reads;
atomic_t pp_in_use; /* allocated from page pool */
@@ -1329,24 +1332,6 @@ extern struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
extern mempool_t drbd_request_mempool;
extern mempool_t drbd_ee_mempool;
-/* drbd's page pool, used to buffer data received from the peer,
- * or data requested by the peer.
- *
- * This does not have an emergency reserve.
- *
- * When allocating from this pool, it first takes pages from the pool.
- * Only if the pool is depleted will try to allocate from the system.
- *
- * The assumption is that pages taken from this pool will be processed,
- * and given back, "quickly", and then can be recycled, so we can avoid
- * frequent calls to alloc_page(), and still will be able to make progress even
- * under memory pressure.
- */
-extern struct page *drbd_pp_pool;
-extern spinlock_t drbd_pp_lock;
-extern int drbd_pp_vacant;
-extern wait_queue_head_t drbd_pp_wait;
-
/* We also need a standard (emergency-reserve backed) page pool
* for meta data IO (activity log, bitmap).
* We can keep it global, as long as it is used as "N pages at a time".
@@ -1354,6 +1339,7 @@ extern wait_queue_head_t drbd_pp_wait;
*/
#define DRBD_MIN_POOL_PAGES 128
extern mempool_t drbd_md_io_page_pool;
+extern mempool_t drbd_buffer_page_pool;
/* We also need to make sure we get a bio
* when we need it for housekeeping purposes */
@@ -1488,10 +1474,7 @@ extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_peer_device *,
sector_t, unsigned int,
unsigned int,
gfp_t) __must_hold(local);
-extern void __drbd_free_peer_req(struct drbd_device *, struct drbd_peer_request *,
- int);
-#define drbd_free_peer_req(m,e) __drbd_free_peer_req(m, e, 0)
-#define drbd_free_net_peer_req(m,e) __drbd_free_peer_req(m, e, 1)
+extern void drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *req);
extern struct page *drbd_alloc_pages(struct drbd_peer_device *, unsigned int, bool);
extern void _drbd_clear_done_ee(struct drbd_device *device, struct list_head *to_be_freed);
extern int drbd_connected(struct drbd_peer_device *);
@@ -1610,16 +1593,6 @@ static inline struct page *page_chain_next(struct page *page)
for (; page && ({ n = page_chain_next(page); 1; }); page = n)
-static inline int drbd_peer_req_has_active_page(struct drbd_peer_request *peer_req)
-{
- struct page *page = peer_req->pages;
- page_chain_for_each(page) {
- if (page_count(page) > 1)
- return 1;
- }
- return 0;
-}
-
static inline union drbd_state drbd_read_state(struct drbd_device *device)
{
struct drbd_resource *resource = device->resource;
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 5bbd312c3e14..c73376886e7a 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -114,20 +114,10 @@ struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
mempool_t drbd_request_mempool;
mempool_t drbd_ee_mempool;
mempool_t drbd_md_io_page_pool;
+mempool_t drbd_buffer_page_pool;
struct bio_set drbd_md_io_bio_set;
struct bio_set drbd_io_bio_set;
-/* I do not use a standard mempool, because:
- 1) I want to hand out the pre-allocated objects first.
- 2) I want to be able to interrupt sleeping allocation with a signal.
- Note: This is a single linked list, the next pointer is the private
- member of struct page.
- */
-struct page *drbd_pp_pool;
-DEFINE_SPINLOCK(drbd_pp_lock);
-int drbd_pp_vacant;
-wait_queue_head_t drbd_pp_wait;
-
DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);
static const struct block_device_operations drbd_ops = {
@@ -1611,6 +1601,7 @@ static int _drbd_send_zc_bio(struct drbd_peer_device *peer_device, struct bio *b
static int _drbd_send_zc_ee(struct drbd_peer_device *peer_device,
struct drbd_peer_request *peer_req)
{
+ bool use_sendpage = !(peer_req->flags & EE_RELEASE_TO_MEMPOOL);
struct page *page = peer_req->pages;
unsigned len = peer_req->i.size;
int err;
@@ -1619,8 +1610,13 @@ static int _drbd_send_zc_ee(struct drbd_peer_device *peer_device,
page_chain_for_each(page) {
unsigned l = min_t(unsigned, len, PAGE_SIZE);
- err = _drbd_send_page(peer_device, page, 0, l,
- page_chain_next(page) ? MSG_MORE : 0);
+ if (likely(use_sendpage))
+ err = _drbd_send_page(peer_device, page, 0, l,
+ page_chain_next(page) ? MSG_MORE : 0);
+ else
+ err = _drbd_no_send_page(peer_device, page, 0, l,
+ page_chain_next(page) ? MSG_MORE : 0);
+
if (err)
return err;
len -= l;
@@ -1962,7 +1958,6 @@ void drbd_init_set_defaults(struct drbd_device *device)
INIT_LIST_HEAD(&device->sync_ee);
INIT_LIST_HEAD(&device->done_ee);
INIT_LIST_HEAD(&device->read_ee);
- INIT_LIST_HEAD(&device->net_ee);
INIT_LIST_HEAD(&device->resync_reads);
INIT_LIST_HEAD(&device->resync_work.list);
INIT_LIST_HEAD(&device->unplug_work.list);
@@ -2043,7 +2038,6 @@ void drbd_device_cleanup(struct drbd_device *device)
D_ASSERT(device, list_empty(&device->sync_ee));
D_ASSERT(device, list_empty(&device->done_ee));
D_ASSERT(device, list_empty(&device->read_ee));
- D_ASSERT(device, list_empty(&device->net_ee));
D_ASSERT(device, list_empty(&device->resync_reads));
D_ASSERT(device, list_empty(&first_peer_device(device)->connection->sender_work.q));
D_ASSERT(device, list_empty(&device->resync_work.list));
@@ -2055,19 +2049,11 @@ void drbd_device_cleanup(struct drbd_device *device)
static void drbd_destroy_mempools(void)
{
- struct page *page;
-
- while (drbd_pp_pool) {
- page = drbd_pp_pool;
- drbd_pp_pool = (struct page *)page_private(page);
- __free_page(page);
- drbd_pp_vacant--;
- }
-
/* D_ASSERT(device, atomic_read(&drbd_pp_vacant)==0); */
bioset_exit(&drbd_io_bio_set);
bioset_exit(&drbd_md_io_bio_set);
+ mempool_exit(&drbd_buffer_page_pool);
mempool_exit(&drbd_md_io_page_pool);
mempool_exit(&drbd_ee_mempool);
mempool_exit(&drbd_request_mempool);
@@ -2086,9 +2072,8 @@ static void drbd_destroy_mempools(void)
static int drbd_create_mempools(void)
{
- struct page *page;
const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * drbd_minor_count;
- int i, ret;
+ int ret;
/* caches */
drbd_request_cache = kmem_cache_create(
@@ -2125,6 +2110,10 @@ static int drbd_create_mempools(void)
if (ret)
goto Enomem;
+ ret = mempool_init_page_pool(&drbd_buffer_page_pool, number, 0);
+ if (ret)
+ goto Enomem;
+
ret = mempool_init_slab_pool(&drbd_request_mempool, number,
drbd_request_cache);
if (ret)
@@ -2134,15 +2123,6 @@ static int drbd_create_mempools(void)
if (ret)
goto Enomem;
- for (i = 0; i < number; i++) {
- page = alloc_page(GFP_HIGHUSER);
- if (!page)
- goto Enomem;
- set_page_private(page, (unsigned long)drbd_pp_pool);
- drbd_pp_pool = page;
- }
- drbd_pp_vacant = number;
-
return 0;
Enomem:
@@ -2169,10 +2149,6 @@ static void drbd_release_all_peer_reqs(struct drbd_device *device)
rr = drbd_free_peer_reqs(device, &device->done_ee);
if (rr)
drbd_err(device, "%d EEs in done list found!\n", rr);
-
- rr = drbd_free_peer_reqs(device, &device->net_ee);
- if (rr)
- drbd_err(device, "%d EEs in net list found!\n", rr);
}
/* caution. no locking. */
@@ -2863,11 +2839,6 @@ static int __init drbd_init(void)
return err;
}
- /*
- * allocate all necessary structs
- */
- init_waitqueue_head(&drbd_pp_wait);
-
drbd_proc = NULL; /* play safe for drbd_cleanup */
idr_init(&drbd_devices);
@@ -3034,7 +3005,7 @@ void drbd_md_sync(struct drbd_device *device)
BUILD_BUG_ON(UI_SIZE != 4);
BUILD_BUG_ON(sizeof(struct meta_data_on_disk) != 4096);
- del_timer(&device->md_sync_timer);
+ timer_delete(&device->md_sync_timer);
/* timer may be rearmed by drbd_md_mark_dirty() now. */
if (!test_and_clear_bit(MD_DIRTY, &device->flags))
return;
@@ -3591,7 +3562,8 @@ int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag)
static void md_sync_timer_fn(struct timer_list *t)
{
- struct drbd_device *device = from_timer(device, t, md_sync_timer);
+ struct drbd_device *device = timer_container_of(device, t,
+ md_sync_timer);
drbd_device_post_work(device, MD_SYNC);
}
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 720fc30e2ecc..91f3b8afb63c 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -1033,7 +1033,7 @@ drbd_determine_dev_size(struct drbd_device *device, enum dds_flags flags, struct
/* We do some synchronous IO below, which may take some time.
* Clear the timer, to avoid scary "timer expired!" messages,
* "Superblock" is written out at least twice below, anyways. */
- del_timer(&device->md_sync_timer);
+ timer_delete(&device->md_sync_timer);
/* We won't change the "al-extents" setting, we just may need
* to move the on-disk location of the activity log ringbuffer.
@@ -1330,6 +1330,7 @@ void drbd_reconsider_queue_parameters(struct drbd_device *device,
lim.max_write_zeroes_sectors = DRBD_MAX_BBIO_SECTORS;
else
lim.max_write_zeroes_sectors = 0;
+ lim.max_hw_wzeroes_unmap_sectors = 0;
if ((lim.discard_granularity >> SECTOR_SHIFT) >
lim.max_hw_discard_sectors) {
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 0c9f54197768..caaf2781136d 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -33,6 +33,7 @@
#include <linux/string.h>
#include <linux/scatterlist.h>
#include <linux/part_stat.h>
+#include <linux/mempool.h>
#include "drbd_int.h"
#include "drbd_protocol.h"
#include "drbd_req.h"
@@ -63,182 +64,31 @@ static int e_end_block(struct drbd_work *, int);
#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
-/*
- * some helper functions to deal with single linked page lists,
- * page->private being our "next" pointer.
- */
-
-/* If at least n pages are linked at head, get n pages off.
- * Otherwise, don't modify head, and return NULL.
- * Locking is the responsibility of the caller.
- */
-static struct page *page_chain_del(struct page **head, int n)
-{
- struct page *page;
- struct page *tmp;
-
- BUG_ON(!n);
- BUG_ON(!head);
-
- page = *head;
-
- if (!page)
- return NULL;
-
- while (page) {
- tmp = page_chain_next(page);
- if (--n == 0)
- break; /* found sufficient pages */
- if (tmp == NULL)
- /* insufficient pages, don't use any of them. */
- return NULL;
- page = tmp;
- }
-
- /* add end of list marker for the returned list */
- set_page_private(page, 0);
- /* actual return value, and adjustment of head */
- page = *head;
- *head = tmp;
- return page;
-}
-
-/* may be used outside of locks to find the tail of a (usually short)
- * "private" page chain, before adding it back to a global chain head
- * with page_chain_add() under a spinlock. */
-static struct page *page_chain_tail(struct page *page, int *len)
-{
- struct page *tmp;
- int i = 1;
- while ((tmp = page_chain_next(page))) {
- ++i;
- page = tmp;
- }
- if (len)
- *len = i;
- return page;
-}
-
-static int page_chain_free(struct page *page)
-{
- struct page *tmp;
- int i = 0;
- page_chain_for_each_safe(page, tmp) {
- put_page(page);
- ++i;
- }
- return i;
-}
-
-static void page_chain_add(struct page **head,
- struct page *chain_first, struct page *chain_last)
-{
-#if 1
- struct page *tmp;
- tmp = page_chain_tail(chain_first, NULL);
- BUG_ON(tmp != chain_last);
-#endif
-
- /* add chain to head */
- set_page_private(chain_last, (unsigned long)*head);
- *head = chain_first;
-}
-
-static struct page *__drbd_alloc_pages(struct drbd_device *device,
- unsigned int number)
+static struct page *__drbd_alloc_pages(unsigned int number)
{
struct page *page = NULL;
struct page *tmp = NULL;
unsigned int i = 0;
- /* Yes, testing drbd_pp_vacant outside the lock is racy.
- * So what. It saves a spin_lock. */
- if (drbd_pp_vacant >= number) {
- spin_lock(&drbd_pp_lock);
- page = page_chain_del(&drbd_pp_pool, number);
- if (page)
- drbd_pp_vacant -= number;
- spin_unlock(&drbd_pp_lock);
- if (page)
- return page;
- }
-
/* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
* "criss-cross" setup, that might cause write-out on some other DRBD,
* which in turn might block on the other node at this very place. */
for (i = 0; i < number; i++) {
- tmp = alloc_page(GFP_TRY);
+ tmp = mempool_alloc(&drbd_buffer_page_pool, GFP_TRY);
if (!tmp)
- break;
+ goto fail;
set_page_private(tmp, (unsigned long)page);
page = tmp;
}
-
- if (i == number)
- return page;
-
- /* Not enough pages immediately available this time.
- * No need to jump around here, drbd_alloc_pages will retry this
- * function "soon". */
- if (page) {
- tmp = page_chain_tail(page, NULL);
- spin_lock(&drbd_pp_lock);
- page_chain_add(&drbd_pp_pool, page, tmp);
- drbd_pp_vacant += i;
- spin_unlock(&drbd_pp_lock);
+ return page;
+fail:
+ page_chain_for_each_safe(page, tmp) {
+ set_page_private(page, 0);
+ mempool_free(page, &drbd_buffer_page_pool);
}
return NULL;
}
-static void reclaim_finished_net_peer_reqs(struct drbd_device *device,
- struct list_head *to_be_freed)
-{
- struct drbd_peer_request *peer_req, *tmp;
-
- /* The EEs are always appended to the end of the list. Since
- they are sent in order over the wire, they have to finish
- in order. As soon as we see the first not finished we can
- stop to examine the list... */
-
- list_for_each_entry_safe(peer_req, tmp, &device->net_ee, w.list) {
- if (drbd_peer_req_has_active_page(peer_req))
- break;
- list_move(&peer_req->w.list, to_be_freed);
- }
-}
-
-static void drbd_reclaim_net_peer_reqs(struct drbd_device *device)
-{
- LIST_HEAD(reclaimed);
- struct drbd_peer_request *peer_req, *t;
-
- spin_lock_irq(&device->resource->req_lock);
- reclaim_finished_net_peer_reqs(device, &reclaimed);
- spin_unlock_irq(&device->resource->req_lock);
- list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
- drbd_free_net_peer_req(device, peer_req);
-}
-
-static void conn_reclaim_net_peer_reqs(struct drbd_connection *connection)
-{
- struct drbd_peer_device *peer_device;
- int vnr;
-
- rcu_read_lock();
- idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
- struct drbd_device *device = peer_device->device;
- if (!atomic_read(&device->pp_in_use_by_net))
- continue;
-
- kref_get(&device->kref);
- rcu_read_unlock();
- drbd_reclaim_net_peer_reqs(device);
- kref_put(&device->kref, drbd_destroy_device);
- rcu_read_lock();
- }
- rcu_read_unlock();
-}
-
/**
* drbd_alloc_pages() - Returns @number pages, retries forever (or until signalled)
* @peer_device: DRBD device.
@@ -263,9 +113,8 @@ struct page *drbd_alloc_pages(struct drbd_peer_device *peer_device, unsigned int
bool retry)
{
struct drbd_device *device = peer_device->device;
- struct page *page = NULL;
+ struct page *page;
struct net_conf *nc;
- DEFINE_WAIT(wait);
unsigned int mxb;
rcu_read_lock();
@@ -273,37 +122,9 @@ struct page *drbd_alloc_pages(struct drbd_peer_device *peer_device, unsigned int
mxb = nc ? nc->max_buffers : 1000000;
rcu_read_unlock();
- if (atomic_read(&device->pp_in_use) < mxb)
- page = __drbd_alloc_pages(device, number);
-
- /* Try to keep the fast path fast, but occasionally we need
- * to reclaim the pages we lended to the network stack. */
- if (page && atomic_read(&device->pp_in_use_by_net) > 512)
- drbd_reclaim_net_peer_reqs(device);
-
- while (page == NULL) {
- prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
-
- drbd_reclaim_net_peer_reqs(device);
-
- if (atomic_read(&device->pp_in_use) < mxb) {
- page = __drbd_alloc_pages(device, number);
- if (page)
- break;
- }
-
- if (!retry)
- break;
-
- if (signal_pending(current)) {
- drbd_warn(device, "drbd_alloc_pages interrupted!\n");
- break;
- }
-
- if (schedule_timeout(HZ/10) == 0)
- mxb = UINT_MAX;
- }
- finish_wait(&drbd_pp_wait, &wait);
+ if (atomic_read(&device->pp_in_use) >= mxb)
+ schedule_timeout_interruptible(HZ / 10);
+ page = __drbd_alloc_pages(number);
if (page)
atomic_add(number, &device->pp_in_use);
@@ -314,29 +135,25 @@ struct page *drbd_alloc_pages(struct drbd_peer_device *peer_device, unsigned int
* Is also used from inside an other spin_lock_irq(&resource->req_lock);
* Either links the page chain back to the global pool,
* or returns all pages to the system. */
-static void drbd_free_pages(struct drbd_device *device, struct page *page, int is_net)
+static void drbd_free_pages(struct drbd_device *device, struct page *page)
{
- atomic_t *a = is_net ? &device->pp_in_use_by_net : &device->pp_in_use;
- int i;
+ struct page *tmp;
+ int i = 0;
if (page == NULL)
return;
- if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * drbd_minor_count)
- i = page_chain_free(page);
- else {
- struct page *tmp;
- tmp = page_chain_tail(page, &i);
- spin_lock(&drbd_pp_lock);
- page_chain_add(&drbd_pp_pool, page, tmp);
- drbd_pp_vacant += i;
- spin_unlock(&drbd_pp_lock);
- }
- i = atomic_sub_return(i, a);
+ page_chain_for_each_safe(page, tmp) {
+ set_page_private(page, 0);
+ if (page_count(page) == 1)
+ mempool_free(page, &drbd_buffer_page_pool);
+ else
+ put_page(page);
+ i++;
+ }
+ i = atomic_sub_return(i, &device->pp_in_use);
if (i < 0)
- drbd_warn(device, "ASSERTION FAILED: %s: %d < 0\n",
- is_net ? "pp_in_use_by_net" : "pp_in_use", i);
- wake_up(&drbd_pp_wait);
+ drbd_warn(device, "ASSERTION FAILED: pp_in_use: %d < 0\n", i);
}
/*
@@ -380,6 +197,8 @@ drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t secto
gfpflags_allow_blocking(gfp_mask));
if (!page)
goto fail;
+ if (!mempool_is_saturated(&drbd_buffer_page_pool))
+ peer_req->flags |= EE_RELEASE_TO_MEMPOOL;
}
memset(peer_req, 0, sizeof(*peer_req));
@@ -403,13 +222,12 @@ drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t secto
return NULL;
}
-void __drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *peer_req,
- int is_net)
+void drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *peer_req)
{
might_sleep();
if (peer_req->flags & EE_HAS_DIGEST)
kfree(peer_req->digest);
- drbd_free_pages(device, peer_req->pages, is_net);
+ drbd_free_pages(device, peer_req->pages);
D_ASSERT(device, atomic_read(&peer_req->pending_bios) == 0);
D_ASSERT(device, drbd_interval_empty(&peer_req->i));
if (!expect(device, !(peer_req->flags & EE_CALL_AL_COMPLETE_IO))) {
@@ -424,14 +242,13 @@ int drbd_free_peer_reqs(struct drbd_device *device, struct list_head *list)
LIST_HEAD(work_list);
struct drbd_peer_request *peer_req, *t;
int count = 0;
- int is_net = list == &device->net_ee;
spin_lock_irq(&device->resource->req_lock);
list_splice_init(list, &work_list);
spin_unlock_irq(&device->resource->req_lock);
list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
- __drbd_free_peer_req(device, peer_req, is_net);
+ drbd_free_peer_req(device, peer_req);
count++;
}
return count;
@@ -443,18 +260,13 @@ int drbd_free_peer_reqs(struct drbd_device *device, struct list_head *list)
static int drbd_finish_peer_reqs(struct drbd_device *device)
{
LIST_HEAD(work_list);
- LIST_HEAD(reclaimed);
struct drbd_peer_request *peer_req, *t;
int err = 0;
spin_lock_irq(&device->resource->req_lock);
- reclaim_finished_net_peer_reqs(device, &reclaimed);
list_splice_init(&device->done_ee, &work_list);
spin_unlock_irq(&device->resource->req_lock);
- list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
- drbd_free_net_peer_req(device, peer_req);
-
/* possible callbacks here:
* e_end_block, and e_end_resync_block, e_send_superseded.
* all ignore the last argument.
@@ -1975,7 +1787,7 @@ static int drbd_drain_block(struct drbd_peer_device *peer_device, int data_size)
data_size -= len;
}
kunmap(page);
- drbd_free_pages(peer_device->device, page, 0);
+ drbd_free_pages(peer_device->device, page);
return err;
}
@@ -2500,7 +2312,11 @@ static int handle_write_conflicts(struct drbd_device *device,
peer_req->w.cb = superseded ? e_send_superseded :
e_send_retry_write;
list_add_tail(&peer_req->w.list, &device->done_ee);
- queue_work(connection->ack_sender, &peer_req->peer_device->send_acks_work);
+ /* put is in drbd_send_acks_wf() */
+ kref_get(&device->kref);
+ if (!queue_work(connection->ack_sender,
+ &peer_req->peer_device->send_acks_work))
+ kref_put(&device->kref, drbd_destroy_device);
err = -ENOENT;
goto out;
@@ -5187,7 +5003,7 @@ static int drbd_disconnected(struct drbd_peer_device *peer_device)
atomic_set(&device->rs_pending_cnt, 0);
wake_up(&device->misc_wait);
- del_timer_sync(&device->resync_timer);
+ timer_delete_sync(&device->resync_timer);
resync_timer_fn(&device->resync_timer);
/* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
@@ -5220,16 +5036,6 @@ static int drbd_disconnected(struct drbd_peer_device *peer_device)
put_ldev(device);
}
- /* tcp_close and release of sendpage pages can be deferred. I don't
- * want to use SO_LINGER, because apparently it can be deferred for
- * more than 20 seconds (longest time I checked).
- *
- * Actually we don't care for exactly when the network stack does its
- * put_page(), but release our reference on these pages right here.
- */
- i = drbd_free_peer_reqs(device, &device->net_ee);
- if (i)
- drbd_info(device, "net_ee not empty, killed %u entries\n", i);
i = atomic_read(&device->pp_in_use_by_net);
if (i)
drbd_info(device, "pp_in_use_by_net = %d, expected 0\n", i);
@@ -5976,8 +5782,6 @@ int drbd_ack_receiver(struct drbd_thread *thi)
while (get_t_state(thi) == RUNNING) {
drbd_thread_current_set_cpu(thi);
- conn_reclaim_net_peer_reqs(connection);
-
if (test_and_clear_bit(SEND_PING, &connection->flags)) {
if (drbd_send_ping(connection)) {
drbd_err(connection, "drbd_send_ping has failed\n");
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 380e6584a4ee..d15826f6ee81 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -1699,7 +1699,8 @@ static bool net_timeout_reached(struct drbd_request *net_req,
void request_timer_fn(struct timer_list *t)
{
- struct drbd_device *device = from_timer(device, t, request_timer);
+ struct drbd_device *device = timer_container_of(device, t,
+ request_timer);
struct drbd_connection *connection = first_peer_device(device)->connection;
struct drbd_request *req_read, *req_write, *req_peer; /* oldest request */
struct net_conf *nc;
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 4352a50fbb3f..dea3e79d044f 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -442,7 +442,8 @@ int w_resync_timer(struct drbd_work *w, int cancel)
void resync_timer_fn(struct timer_list *t)
{
- struct drbd_device *device = from_timer(device, t, resync_timer);
+ struct drbd_device *device = timer_container_of(device, t,
+ resync_timer);
drbd_queue_work_if_unqueued(
&first_peer_device(device)->connection->sender_work,
@@ -1029,22 +1030,6 @@ out:
return 1;
}
-/* helper */
-static void move_to_net_ee_or_free(struct drbd_device *device, struct drbd_peer_request *peer_req)
-{
- if (drbd_peer_req_has_active_page(peer_req)) {
- /* This might happen if sendpage() has not finished */
- int i = PFN_UP(peer_req->i.size);
- atomic_add(i, &device->pp_in_use_by_net);
- atomic_sub(i, &device->pp_in_use);
- spin_lock_irq(&device->resource->req_lock);
- list_add_tail(&peer_req->w.list, &device->net_ee);
- spin_unlock_irq(&device->resource->req_lock);
- wake_up(&drbd_pp_wait);
- } else
- drbd_free_peer_req(device, peer_req);
-}
-
/**
* w_e_end_data_req() - Worker callback, to send a P_DATA_REPLY packet in response to a P_DATA_REQUEST
* @w: work object.
@@ -1058,9 +1043,8 @@ int w_e_end_data_req(struct drbd_work *w, int cancel)
int err;
if (unlikely(cancel)) {
- drbd_free_peer_req(device, peer_req);
- dec_unacked(device);
- return 0;
+ err = 0;
+ goto out;
}
if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
@@ -1073,12 +1057,12 @@ int w_e_end_data_req(struct drbd_work *w, int cancel)
err = drbd_send_ack(peer_device, P_NEG_DREPLY, peer_req);
}
- dec_unacked(device);
-
- move_to_net_ee_or_free(device, peer_req);
-
if (unlikely(err))
drbd_err(device, "drbd_send_block() failed\n");
+out:
+ dec_unacked(device);
+ drbd_free_peer_req(device, peer_req);
+
return err;
}
@@ -1119,9 +1103,8 @@ int w_e_end_rsdata_req(struct drbd_work *w, int cancel)
int err;
if (unlikely(cancel)) {
- drbd_free_peer_req(device, peer_req);
- dec_unacked(device);
- return 0;
+ err = 0;
+ goto out;
}
if (get_ldev_if_state(device, D_FAILED)) {
@@ -1154,13 +1137,12 @@ int w_e_end_rsdata_req(struct drbd_work *w, int cancel)
/* update resync data with failure */
drbd_rs_failed_io(peer_device, peer_req->i.sector, peer_req->i.size);
}
-
- dec_unacked(device);
-
- move_to_net_ee_or_free(device, peer_req);
-
if (unlikely(err))
drbd_err(device, "drbd_send_block() failed\n");
+out:
+ dec_unacked(device);
+ drbd_free_peer_req(device, peer_req);
+
return err;
}
@@ -1175,9 +1157,8 @@ int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
int err, eq = 0;
if (unlikely(cancel)) {
- drbd_free_peer_req(device, peer_req);
- dec_unacked(device);
- return 0;
+ err = 0;
+ goto out;
}
if (get_ldev(device)) {
@@ -1219,12 +1200,12 @@ int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
if (drbd_ratelimit())
drbd_err(device, "Sending NegDReply. I guess it gets messy.\n");
}
-
- dec_unacked(device);
- move_to_net_ee_or_free(device, peer_req);
-
if (unlikely(err))
drbd_err(device, "drbd_send_block/ack() failed\n");
+out:
+ dec_unacked(device);
+ drbd_free_peer_req(device, peer_req);
+
return err;
}
@@ -1698,7 +1679,8 @@ void drbd_rs_controller_reset(struct drbd_peer_device *peer_device)
void start_resync_timer_fn(struct timer_list *t)
{
- struct drbd_device *device = from_timer(device, t, start_resync_timer);
+ struct drbd_device *device = timer_container_of(device, t,
+ start_resync_timer);
drbd_device_post_work(device, RS_START);
}
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 3affb538b989..5336c3c5ca36 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -163,35 +163,35 @@
/* do print messages for unexpected interrupts */
static int print_unex = 1;
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/fs.h>
-#include <linux/kernel.h>
-#include <linux/timer.h>
-#include <linux/workqueue.h>
-#include <linux/fdreg.h>
-#include <linux/fd.h>
-#include <linux/hdreg.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
+#include <linux/async.h>
#include <linux/bio.h>
-#include <linux/string.h>
-#include <linux/jiffies.h>
-#include <linux/fcntl.h>
+#include <linux/compat.h>
#include <linux/delay.h>
-#include <linux/mc146818rtc.h> /* CMOS defines */
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/fcntl.h>
+#include <linux/fd.h>
+#include <linux/fdreg.h>
+#include <linux/fs.h>
+#include <linux/hdreg.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
#include <linux/major.h>
-#include <linux/platform_device.h>
+#include <linux/mc146818rtc.h> /* CMOS defines */
+#include <linux/mm.h>
#include <linux/mod_devicetable.h>
+#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
#include <linux/uaccess.h>
-#include <linux/async.h>
-#include <linux/compat.h>
+#include <linux/workqueue.h>
/*
* PS/2 floppies have much slower step rates than regular floppies.
@@ -233,8 +233,6 @@ static unsigned short virtual_dma_port = 0x3f0;
irqreturn_t floppy_interrupt(int irq, void *dev_id);
static int set_dor(int fdc, char mask, char data);
-#define K_64 0x10000 /* 64KB */
-
/* the following is the mask of allowed drives. By default units 2 and
* 3 of both floppy controllers are disabled, because switching on the
* motor of these drives causes system hangs on some PCI computers. drive
@@ -937,7 +935,7 @@ static void floppy_off(unsigned int drive)
if (!(fdc_state[fdc].dor & (0x10 << UNIT(drive))))
return;
- del_timer(motor_off_timer + drive);
+ timer_delete(motor_off_timer + drive);
/* make spindle stop in a position which minimizes spinup time
* next time */
@@ -1918,7 +1916,7 @@ static int start_motor(void (*function)(void))
mask &= ~(0x10 << UNIT(current_drive));
/* starts motor and selects floppy */
- del_timer(motor_off_timer + current_drive);
+ timer_delete(motor_off_timer + current_drive);
set_dor(current_fdc, mask, data);
/* wait_for_completion also schedules reset if needed. */
@@ -3092,16 +3090,13 @@ static int raw_cmd_copyin(int cmd, void __user *param,
*rcmd = NULL;
loop:
- ptr = kmalloc(sizeof(struct floppy_raw_cmd), GFP_KERNEL);
- if (!ptr)
- return -ENOMEM;
+ ptr = memdup_user(param, sizeof(*ptr));
+ if (IS_ERR(ptr))
+ return PTR_ERR(ptr);
*rcmd = ptr;
- ret = copy_from_user(ptr, param, sizeof(*ptr));
ptr->next = NULL;
ptr->buffer_length = 0;
ptr->kernel_data = NULL;
- if (ret)
- return -EFAULT;
param += sizeof(struct floppy_raw_cmd);
if (ptr->cmd_count > FD_RAW_CMD_FULLSIZE)
return -EINVAL;
@@ -3363,9 +3358,9 @@ static int get_floppy_geometry(int drive, int type, struct floppy_struct **g)
return 0;
}
-static int fd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+static int fd_getgeo(struct gendisk *disk, struct hd_geometry *geo)
{
- int drive = (long)bdev->bd_disk->private_data;
+ int drive = (long)disk->private_data;
int type = ITYPE(drive_state[drive].fd_device);
struct floppy_struct *g;
int ret;
@@ -3411,7 +3406,7 @@ static int fd_locked_ioctl(struct block_device *bdev, blk_mode_t mode,
struct floppy_max_errors max_errors;
struct floppy_drive_params dp;
} inparam; /* parameters coming from user space */
- const void *outparam; /* parameters passed back to user space */
+ const void *outparam = NULL; /* parameters passed back to user space */
/* convert compatibility eject ioctls into floppy eject ioctl.
* We do this in order to provide a means to eject floppy disks before
@@ -4596,7 +4591,6 @@ static int __init do_floppy_init(void)
tag_sets[drive].nr_maps = 1;
tag_sets[drive].queue_depth = 2;
tag_sets[drive].numa_node = NUMA_NO_NODE;
- tag_sets[drive].flags = BLK_MQ_F_SHOULD_MERGE;
err = blk_mq_alloc_tag_set(&tag_sets[drive]);
if (err)
goto out_put_disk;
@@ -4763,7 +4757,7 @@ out_put_disk:
for (drive = 0; drive < N_DRIVE; drive++) {
if (!disks[drive][0])
break;
- del_timer_sync(&motor_off_timer[drive]);
+ timer_delete_sync(&motor_off_timer[drive]);
put_disk(disks[drive][0]);
blk_mq_free_tag_set(&tag_sets[drive]);
}
@@ -4984,7 +4978,7 @@ static void __exit floppy_module_exit(void)
destroy_workqueue(floppy_wq);
for (drive = 0; drive < N_DRIVE; drive++) {
- del_timer_sync(&motor_off_timer[drive]);
+ timer_delete_sync(&motor_off_timer[drive]);
if (floppy_available(drive)) {
for (i = 0; i < ARRAY_SIZE(floppy_type); i++) {
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 78a7bb28defe..13ce229d450c 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -45,8 +45,6 @@ enum {
Lo_deleting,
};
-struct loop_func_table;
-
struct loop_device {
int lo_number;
loff_t lo_offset;
@@ -54,7 +52,8 @@ struct loop_device {
int lo_flags;
char lo_file_name[LO_NAME_SIZE];
- struct file * lo_backing_file;
+ struct file *lo_backing_file;
+ unsigned int lo_min_dio_size;
struct block_device *lo_device;
gfp_t old_gfp_mask;
@@ -68,7 +67,6 @@ struct loop_device {
struct list_head idle_worker_list;
struct rb_root worker_tree;
struct timer_list timer;
- bool use_dio;
bool sysfs_inited;
struct request_queue *lo_queue;
@@ -139,20 +137,35 @@ static void loop_global_unlock(struct loop_device *lo, bool global)
static int max_part;
static int part_shift;
-static loff_t get_size(loff_t offset, loff_t sizelimit, struct file *file)
+static loff_t lo_calculate_size(struct loop_device *lo, struct file *file)
{
loff_t loopsize;
+ int ret;
+
+ if (S_ISBLK(file_inode(file)->i_mode)) {
+ loopsize = i_size_read(file->f_mapping->host);
+ } else {
+ struct kstat stat;
+
+ /*
+ * Get the accurate file size. This provides better results than
+ * cached inode data, particularly for network filesystems where
+ * metadata may be stale.
+ */
+ ret = vfs_getattr_nosec(&file->f_path, &stat, STATX_SIZE, 0);
+ if (ret)
+ return 0;
- /* Compute loopsize in bytes */
- loopsize = i_size_read(file->f_mapping->host);
- if (offset > 0)
- loopsize -= offset;
+ loopsize = stat.size;
+ }
+
+ if (lo->lo_offset > 0)
+ loopsize -= lo->lo_offset;
/* offset is beyond i_size, weird but possible */
if (loopsize < 0)
return 0;
-
- if (sizelimit > 0 && sizelimit < loopsize)
- loopsize = sizelimit;
+ if (lo->lo_sizelimit > 0 && lo->lo_sizelimit < loopsize)
+ loopsize = lo->lo_sizelimit;
/*
* Unfortunately, if we want to do I/O on the device,
* the number of 512-byte sectors has to fit into a sector_t.
@@ -160,63 +173,38 @@ static loff_t get_size(loff_t offset, loff_t sizelimit, struct file *file)
return loopsize >> 9;
}
-static loff_t get_loop_size(struct loop_device *lo, struct file *file)
-{
- return get_size(lo->lo_offset, lo->lo_sizelimit, file);
-}
-
/*
* We support direct I/O only if lo_offset is aligned with the logical I/O size
* of backing device, and the logical block size of loop is bigger than that of
* the backing device.
*/
-static bool lo_bdev_can_use_dio(struct loop_device *lo,
- struct block_device *backing_bdev)
+static bool lo_can_use_dio(struct loop_device *lo)
{
- unsigned short sb_bsize = bdev_logical_block_size(backing_bdev);
-
- if (queue_logical_block_size(lo->lo_queue) < sb_bsize)
+ if (!(lo->lo_backing_file->f_mode & FMODE_CAN_ODIRECT))
+ return false;
+ if (queue_logical_block_size(lo->lo_queue) < lo->lo_min_dio_size)
return false;
- if (lo->lo_offset & (sb_bsize - 1))
+ if (lo->lo_offset & (lo->lo_min_dio_size - 1))
return false;
return true;
}
-static void __loop_update_dio(struct loop_device *lo, bool dio)
+/*
+ * Direct I/O can be enabled either by using an O_DIRECT file descriptor, or by
+ * passing in the LO_FLAGS_DIRECT_IO flag from userspace. It will be silently
+ * disabled when the device block size is too small or the offset is unaligned.
+ *
+ * loop_get_status will always report the effective LO_FLAGS_DIRECT_IO flag and
+ * not the originally passed in one.
+ */
+static inline void loop_update_dio(struct loop_device *lo)
{
- struct file *file = lo->lo_backing_file;
- struct inode *inode = file->f_mapping->host;
- struct block_device *backing_bdev = NULL;
- bool use_dio;
-
- if (S_ISBLK(inode->i_mode))
- backing_bdev = I_BDEV(inode);
- else if (inode->i_sb->s_bdev)
- backing_bdev = inode->i_sb->s_bdev;
-
- use_dio = dio && (file->f_mode & FMODE_CAN_ODIRECT) &&
- (!backing_bdev || lo_bdev_can_use_dio(lo, backing_bdev));
-
- if (lo->use_dio == use_dio)
- return;
+ lockdep_assert_held(&lo->lo_mutex);
+ WARN_ON_ONCE(lo->lo_state == Lo_bound &&
+ lo->lo_queue->mq_freeze_depth == 0);
- /* flush dirty pages before changing direct IO */
- vfs_fsync(file, 0);
-
- /*
- * The flag of LO_FLAGS_DIRECT_IO is handled similarly with
- * LO_FLAGS_READ_ONLY, both are set from kernel, and losetup
- * will get updated by ioctl(LOOP_GET_STATUS)
- */
- if (lo->lo_state == Lo_bound)
- blk_mq_freeze_queue(lo->lo_queue);
- lo->use_dio = use_dio;
- if (use_dio)
- lo->lo_flags |= LO_FLAGS_DIRECT_IO;
- else
+ if ((lo->lo_flags & LO_FLAGS_DIRECT_IO) && !lo_can_use_dio(lo))
lo->lo_flags &= ~LO_FLAGS_DIRECT_IO;
- if (lo->lo_state == Lo_bound)
- blk_mq_unfreeze_queue(lo->lo_queue);
}
/**
@@ -233,72 +221,6 @@ static void loop_set_size(struct loop_device *lo, loff_t size)
kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE);
}
-static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos)
-{
- struct iov_iter i;
- ssize_t bw;
-
- iov_iter_bvec(&i, ITER_SOURCE, bvec, 1, bvec->bv_len);
-
- bw = vfs_iter_write(file, &i, ppos, 0);
-
- if (likely(bw == bvec->bv_len))
- return 0;
-
- printk_ratelimited(KERN_ERR
- "loop: Write error at byte offset %llu, length %i.\n",
- (unsigned long long)*ppos, bvec->bv_len);
- if (bw >= 0)
- bw = -EIO;
- return bw;
-}
-
-static int lo_write_simple(struct loop_device *lo, struct request *rq,
- loff_t pos)
-{
- struct bio_vec bvec;
- struct req_iterator iter;
- int ret = 0;
-
- rq_for_each_segment(bvec, rq, iter) {
- ret = lo_write_bvec(lo->lo_backing_file, &bvec, &pos);
- if (ret < 0)
- break;
- cond_resched();
- }
-
- return ret;
-}
-
-static int lo_read_simple(struct loop_device *lo, struct request *rq,
- loff_t pos)
-{
- struct bio_vec bvec;
- struct req_iterator iter;
- struct iov_iter i;
- ssize_t len;
-
- rq_for_each_segment(bvec, rq, iter) {
- iov_iter_bvec(&i, ITER_DEST, &bvec, 1, bvec.bv_len);
- len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0);
- if (len < 0)
- return len;
-
- flush_dcache_page(bvec.bv_page);
-
- if (len != bvec.bv_len) {
- struct bio *bio;
-
- __rq_for_each_bio(bio, rq)
- zero_fill_bio(bio);
- break;
- }
- cond_resched();
- }
-
- return 0;
-}
-
static void loop_clear_limits(struct loop_device *lo, int mode)
{
struct queue_limits lim = queue_limits_start_update(lo->lo_queue);
@@ -311,6 +233,13 @@ static void loop_clear_limits(struct loop_device *lo, int mode)
lim.discard_granularity = 0;
}
+ /*
+ * XXX: this updates the queue limits without freezing the queue, which
+ * is against the locking protocol and dangerous. But we can't just
+ * freeze the queue as we're inside the ->queue_rq method here. So this
+ * should move out into a workqueue unless we get the file operations to
+ * advertise if they support specific fallocate operations.
+ */
queue_limits_commit_update(lo->lo_queue, &lim);
}
@@ -357,7 +286,7 @@ static void lo_complete_rq(struct request *rq)
struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
blk_status_t ret = BLK_STS_OK;
- if (!cmd->use_aio || cmd->ret < 0 || cmd->ret == blk_rq_bytes(rq) ||
+ if (cmd->ret < 0 || cmd->ret == blk_rq_bytes(rq) ||
req_op(rq) != REQ_OP_READ) {
if (cmd->ret < 0)
ret = errno_to_blk_status(cmd->ret);
@@ -373,14 +302,13 @@ static void lo_complete_rq(struct request *rq)
cmd->ret = 0;
blk_mq_requeue_request(rq, true);
} else {
- if (cmd->use_aio) {
- struct bio *bio = rq->bio;
+ struct bio *bio = rq->bio;
- while (bio) {
- zero_fill_bio(bio);
- bio = bio->bi_next;
- }
+ while (bio) {
+ zero_fill_bio(bio);
+ bio = bio->bi_next;
}
+
ret = BLK_STS_IOERR;
end_io:
blk_mq_end_request(rq, ret);
@@ -395,6 +323,8 @@ static void lo_rw_aio_do_completion(struct loop_cmd *cmd)
return;
kfree(cmd->bvec);
cmd->bvec = NULL;
+ if (req_op(rq) == REQ_OP_WRITE)
+ kiocb_end_write(&cmd->iocb);
if (likely(!blk_should_fake_timeout(rq->q)))
blk_mq_complete_request(rq);
}
@@ -460,20 +390,26 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
cmd->iocb.ki_pos = pos;
cmd->iocb.ki_filp = file;
- cmd->iocb.ki_complete = lo_rw_aio_complete;
- cmd->iocb.ki_flags = IOCB_DIRECT;
- cmd->iocb.ki_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0);
+ cmd->iocb.ki_ioprio = req_get_ioprio(rq);
+ if (cmd->use_aio) {
+ cmd->iocb.ki_complete = lo_rw_aio_complete;
+ cmd->iocb.ki_flags = IOCB_DIRECT;
+ } else {
+ cmd->iocb.ki_complete = NULL;
+ cmd->iocb.ki_flags = 0;
+ }
- if (rw == ITER_SOURCE)
+ if (rw == ITER_SOURCE) {
+ kiocb_start_write(&cmd->iocb);
ret = file->f_op->write_iter(&cmd->iocb, &iter);
- else
+ } else
ret = file->f_op->read_iter(&cmd->iocb, &iter);
lo_rw_aio_do_completion(cmd);
if (ret != -EIOCBQUEUED)
lo_rw_aio_complete(&cmd->iocb, ret);
- return 0;
+ return -EIOCBQUEUED;
}
static int do_req_filebacked(struct loop_device *lo, struct request *rq)
@@ -481,15 +417,6 @@ static int do_req_filebacked(struct loop_device *lo, struct request *rq)
struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
loff_t pos = ((loff_t) blk_rq_pos(rq) << 9) + lo->lo_offset;
- /*
- * lo_write_simple and lo_read_simple should have been covered
- * by io submit style function like lo_rw_aio(), one blocker
- * is that lo_read_simple() need to call flush_dcache_page after
- * the page is written from kernel, and it isn't easy to handle
- * this in io submit style function which submits all segments
- * of the req at one time. And direct read IO doesn't need to
- * run flush_dcache_page().
- */
switch (req_op(rq)) {
case REQ_OP_FLUSH:
return lo_req_flush(lo, rq);
@@ -505,27 +432,15 @@ static int do_req_filebacked(struct loop_device *lo, struct request *rq)
case REQ_OP_DISCARD:
return lo_fallocate(lo, rq, pos, FALLOC_FL_PUNCH_HOLE);
case REQ_OP_WRITE:
- if (cmd->use_aio)
- return lo_rw_aio(lo, cmd, pos, ITER_SOURCE);
- else
- return lo_write_simple(lo, rq, pos);
+ return lo_rw_aio(lo, cmd, pos, ITER_SOURCE);
case REQ_OP_READ:
- if (cmd->use_aio)
- return lo_rw_aio(lo, cmd, pos, ITER_DEST);
- else
- return lo_read_simple(lo, rq, pos);
+ return lo_rw_aio(lo, cmd, pos, ITER_DEST);
default:
WARN_ON_ONCE(1);
return -EIO;
}
}
-static inline void loop_update_dio(struct loop_device *lo)
-{
- __loop_update_dio(lo, (lo->lo_backing_file->f_flags & O_DIRECT) |
- lo->use_dio);
-}
-
static void loop_reread_partitions(struct loop_device *lo)
{
int rc;
@@ -538,6 +453,28 @@ static void loop_reread_partitions(struct loop_device *lo)
__func__, lo->lo_number, lo->lo_file_name, rc);
}
+static unsigned int loop_query_min_dio_size(struct loop_device *lo)
+{
+ struct file *file = lo->lo_backing_file;
+ struct block_device *sb_bdev = file->f_mapping->host->i_sb->s_bdev;
+ struct kstat st;
+
+ /*
+ * Use the minimal dio alignment of the file system if provided.
+ */
+ if (!vfs_getattr(&file->f_path, &st, STATX_DIOALIGN, 0) &&
+ (st.result_mask & STATX_DIOALIGN))
+ return st.dio_offset_align;
+
+ /*
+ * In a perfect world this wouldn't be needed, but as of Linux 6.13 only
+ * a handful of file systems support the STATX_DIOALIGN flag.
+ */
+ if (sb_bdev)
+ return bdev_logical_block_size(sb_bdev);
+ return SECTOR_SIZE;
+}
+
static inline int is_loop_device(struct file *file)
{
struct inode *i = file->f_mapping->host;
@@ -570,6 +507,28 @@ static int loop_validate_file(struct file *file, struct block_device *bdev)
return 0;
}
+static void loop_assign_backing_file(struct loop_device *lo, struct file *file)
+{
+ lo->lo_backing_file = file;
+ lo->old_gfp_mask = mapping_gfp_mask(file->f_mapping);
+ mapping_set_gfp_mask(file->f_mapping,
+ lo->old_gfp_mask & ~(__GFP_IO | __GFP_FS));
+ if (lo->lo_backing_file->f_flags & O_DIRECT)
+ lo->lo_flags |= LO_FLAGS_DIRECT_IO;
+ lo->lo_min_dio_size = loop_query_min_dio_size(lo);
+}
+
+static int loop_check_backing_file(struct file *file)
+{
+ if (!file->f_op->read_iter)
+ return -EINVAL;
+
+ if ((file->f_mode & FMODE_WRITE) && !file->f_op->write_iter)
+ return -EINVAL;
+
+ return 0;
+}
+
/*
* loop_change_fd switched the backing store of a loopback device to
* a new file. This is useful for operating system installers to free up
@@ -583,6 +542,7 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
{
struct file *file = fget(arg);
struct file *old_file;
+ unsigned int memflags;
int error;
bool partscan;
bool is_loop;
@@ -590,6 +550,12 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
if (!file)
return -EBADF;
+ error = loop_check_backing_file(file);
+ if (error) {
+ fput(file);
+ return error;
+ }
+
/* suppress uevents while reconfiguring the device */
dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1);
@@ -615,19 +581,23 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
error = -EINVAL;
/* size of the new backing store needs to be the same */
- if (get_loop_size(lo, file) != get_loop_size(lo, old_file))
+ if (lo_calculate_size(lo, file) != lo_calculate_size(lo, old_file))
goto out_err;
+ /*
+ * We might switch to direct I/O mode for the loop device, write back
+ * all dirty data the page cache now that so that the individual I/O
+ * operations don't have to do that.
+ */
+ vfs_fsync(file, 0);
+
/* and ... switch */
disk_force_media_change(lo->lo_disk);
- blk_mq_freeze_queue(lo->lo_queue);
+ memflags = blk_mq_freeze_queue(lo->lo_queue);
mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask);
- lo->lo_backing_file = file;
- lo->old_gfp_mask = mapping_gfp_mask(file->f_mapping);
- mapping_set_gfp_mask(file->f_mapping,
- lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
+ loop_assign_backing_file(lo, file);
loop_update_dio(lo);
- blk_mq_unfreeze_queue(lo->lo_queue);
+ blk_mq_unfreeze_queue(lo->lo_queue, memflags);
partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
loop_global_unlock(lo, is_loop);
@@ -645,19 +615,20 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
* dependency.
*/
fput(old_file);
+ dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);
if (partscan)
loop_reread_partitions(lo);
error = 0;
done:
- /* enable and uncork uevent now that we are done */
- dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);
+ kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE);
return error;
out_err:
loop_global_unlock(lo, is_loop);
out_putf:
fput(file);
+ dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);
goto done;
}
@@ -770,12 +741,11 @@ static void loop_sysfs_exit(struct loop_device *lo)
&loop_attribute_group);
}
-static void loop_config_discard(struct loop_device *lo,
- struct queue_limits *lim)
+static void loop_get_discard_config(struct loop_device *lo,
+ u32 *granularity, u32 *max_discard_sectors)
{
struct file *file = lo->lo_backing_file;
struct inode *inode = file->f_mapping->host;
- u32 granularity = 0, max_discard_sectors = 0;
struct kstatfs sbuf;
/*
@@ -786,27 +756,19 @@ static void loop_config_discard(struct loop_device *lo,
* file-backed loop devices: discarded regions read back as zero.
*/
if (S_ISBLK(inode->i_mode)) {
- struct request_queue *backingq = bdev_get_queue(I_BDEV(inode));
+ struct block_device *bdev = I_BDEV(inode);
- max_discard_sectors = backingq->limits.max_write_zeroes_sectors;
- granularity = bdev_discard_granularity(I_BDEV(inode)) ?:
- queue_physical_block_size(backingq);
+ *max_discard_sectors = bdev_write_zeroes_sectors(bdev);
+ *granularity = bdev_discard_granularity(bdev);
/*
* We use punch hole to reclaim the free space used by the
* image a.k.a. discard.
*/
} else if (file->f_op->fallocate && !vfs_statfs(&file->f_path, &sbuf)) {
- max_discard_sectors = UINT_MAX >> 9;
- granularity = sbuf.f_bsize;
+ *max_discard_sectors = UINT_MAX >> 9;
+ *granularity = sbuf.f_bsize;
}
-
- lim->max_hw_discard_sectors = max_discard_sectors;
- lim->max_write_zeroes_sectors = max_discard_sectors;
- if (max_discard_sectors)
- lim->discard_granularity = granularity;
- else
- lim->discard_granularity = 0;
}
struct loop_worker {
@@ -862,7 +824,7 @@ static void loop_queue_work(struct loop_device *lo, struct loop_cmd *cmd)
if (worker)
goto queue_work;
- worker = kzalloc(sizeof(struct loop_worker), GFP_NOWAIT | __GFP_NOWARN);
+ worker = kzalloc(sizeof(struct loop_worker), GFP_NOWAIT);
/*
* In the event we cannot allocate a worker, just queue on the
* rootcg worker and issue the I/O as the rootcg
@@ -973,25 +935,24 @@ loop_set_status_from_info(struct loop_device *lo,
memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE);
lo->lo_file_name[LO_NAME_SIZE-1] = 0;
- lo->lo_flags = info->lo_flags;
return 0;
}
-static unsigned short loop_default_blocksize(struct loop_device *lo,
- struct block_device *backing_bdev)
+static unsigned int loop_default_blocksize(struct loop_device *lo)
{
- /* In case of direct I/O, match underlying block size */
- if ((lo->lo_backing_file->f_flags & O_DIRECT) && backing_bdev)
- return bdev_logical_block_size(backing_bdev);
+ /* In case of direct I/O, match underlying minimum I/O size */
+ if (lo->lo_flags & LO_FLAGS_DIRECT_IO)
+ return lo->lo_min_dio_size;
return SECTOR_SIZE;
}
-static int loop_reconfigure_limits(struct loop_device *lo, unsigned short bsize)
+static void loop_update_limits(struct loop_device *lo, struct queue_limits *lim,
+ unsigned int bsize)
{
struct file *file = lo->lo_backing_file;
struct inode *inode = file->f_mapping->host;
struct block_device *backing_bdev = NULL;
- struct queue_limits lim;
+ u32 granularity = 0, max_discard_sectors = 0;
if (S_ISBLK(inode->i_mode))
backing_bdev = I_BDEV(inode);
@@ -999,19 +960,24 @@ static int loop_reconfigure_limits(struct loop_device *lo, unsigned short bsize)
backing_bdev = inode->i_sb->s_bdev;
if (!bsize)
- bsize = loop_default_blocksize(lo, backing_bdev);
+ bsize = loop_default_blocksize(lo);
- lim = queue_limits_start_update(lo->lo_queue);
- lim.logical_block_size = bsize;
- lim.physical_block_size = bsize;
- lim.io_min = bsize;
- lim.features &= ~(BLK_FEAT_WRITE_CACHE | BLK_FEAT_ROTATIONAL);
+ loop_get_discard_config(lo, &granularity, &max_discard_sectors);
+
+ lim->logical_block_size = bsize;
+ lim->physical_block_size = bsize;
+ lim->io_min = bsize;
+ lim->features &= ~(BLK_FEAT_WRITE_CACHE | BLK_FEAT_ROTATIONAL);
if (file->f_op->fsync && !(lo->lo_flags & LO_FLAGS_READ_ONLY))
- lim.features |= BLK_FEAT_WRITE_CACHE;
+ lim->features |= BLK_FEAT_WRITE_CACHE;
if (backing_bdev && !bdev_nonrot(backing_bdev))
- lim.features |= BLK_FEAT_ROTATIONAL;
- loop_config_discard(lo, &lim);
- return queue_limits_commit_update(lo->lo_queue, &lim);
+ lim->features |= BLK_FEAT_ROTATIONAL;
+ lim->max_hw_discard_sectors = max_discard_sectors;
+ lim->max_write_zeroes_sectors = max_discard_sectors;
+ if (max_discard_sectors)
+ lim->discard_granularity = granularity;
+ else
+ lim->discard_granularity = 0;
}
static int loop_configure(struct loop_device *lo, blk_mode_t mode,
@@ -1019,7 +985,7 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
const struct loop_config *config)
{
struct file *file = fget(config->fd);
- struct address_space *mapping;
+ struct queue_limits lim;
int error;
loff_t size;
bool partscan;
@@ -1027,6 +993,13 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
if (!file)
return -EBADF;
+
+ error = loop_check_backing_file(file);
+ if (error) {
+ fput(file);
+ return error;
+ }
+
is_loop = is_loop_device(file);
/* This is safe, since we have a reference from open(). */
@@ -1054,8 +1027,6 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
if (error)
goto out_unlock;
- mapping = file->f_mapping;
-
if ((config->info.lo_flags & ~LOOP_CONFIGURE_SETTABLE_FLAGS) != 0) {
error = -EINVAL;
goto out_unlock;
@@ -1064,6 +1035,7 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
error = loop_set_status_from_info(lo, &config->info);
if (error)
goto out_unlock;
+ lo->lo_flags = config->info.lo_flags;
if (!(file->f_mode & FMODE_WRITE) || !(mode & BLK_OPEN_WRITE) ||
!file->f_op->write_iter)
@@ -1085,20 +1057,27 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
disk_force_media_change(lo->lo_disk);
set_disk_ro(lo->lo_disk, (lo->lo_flags & LO_FLAGS_READ_ONLY) != 0);
- lo->use_dio = lo->lo_flags & LO_FLAGS_DIRECT_IO;
lo->lo_device = bdev;
- lo->lo_backing_file = file;
- lo->old_gfp_mask = mapping_gfp_mask(mapping);
- mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
+ loop_assign_backing_file(lo, file);
- error = loop_reconfigure_limits(lo, config->block_size);
+ lim = queue_limits_start_update(lo->lo_queue);
+ loop_update_limits(lo, &lim, config->block_size);
+ /* No need to freeze the queue as the device isn't bound yet. */
+ error = queue_limits_commit_update(lo->lo_queue, &lim);
if (error)
goto out_unlock;
+ /*
+ * We might switch to direct I/O mode for the loop device, write back
+ * all dirty data the page cache now that so that the individual I/O
+ * operations don't have to do that.
+ */
+ vfs_fsync(file, 0);
+
loop_update_dio(lo);
loop_sysfs_init(lo);
- size = get_loop_size(lo, file);
+ size = lo_calculate_size(lo, file);
loop_set_size(lo, size);
/* Order wrt reading lo_state in loop_validate_file(). */
@@ -1111,8 +1090,8 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
if (partscan)
clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);
- /* enable and uncork uevent now that we are done */
dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);
+ kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE);
loop_global_unlock(lo, is_loop);
if (partscan)
@@ -1151,7 +1130,12 @@ static void __loop_clr_fd(struct loop_device *lo)
lo->lo_sizelimit = 0;
memset(lo->lo_file_name, 0, LO_NAME_SIZE);
- /* reset the block size to the default */
+ /*
+ * Reset the block size to the default.
+ *
+ * No queue freezing needed because this is called from the final
+ * ->release call only, so there can't be any outstanding I/O.
+ */
lim = queue_limits_start_update(lo->lo_queue);
lim.logical_block_size = SECTOR_SIZE;
lim.physical_block_size = SECTOR_SIZE;
@@ -1245,9 +1229,9 @@ static int
loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
{
int err;
- int prev_lo_flags;
bool partscan = false;
bool size_changed = false;
+ unsigned int memflags;
err = mutex_lock_killable(&lo->lo_mutex);
if (err)
@@ -1264,38 +1248,29 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
invalidate_bdev(lo->lo_device);
}
- /* I/O need to be drained during transfer transition */
- blk_mq_freeze_queue(lo->lo_queue);
-
- prev_lo_flags = lo->lo_flags;
+ /* I/O needs to be drained before changing lo_offset or lo_sizelimit */
+ memflags = blk_mq_freeze_queue(lo->lo_queue);
err = loop_set_status_from_info(lo, info);
if (err)
goto out_unfreeze;
- /* Mask out flags that can't be set using LOOP_SET_STATUS. */
- lo->lo_flags &= LOOP_SET_STATUS_SETTABLE_FLAGS;
- /* For those flags, use the previous values instead */
- lo->lo_flags |= prev_lo_flags & ~LOOP_SET_STATUS_SETTABLE_FLAGS;
- /* For flags that can't be cleared, use previous values too */
- lo->lo_flags |= prev_lo_flags & ~LOOP_SET_STATUS_CLEARABLE_FLAGS;
+ partscan = !(lo->lo_flags & LO_FLAGS_PARTSCAN) &&
+ (info->lo_flags & LO_FLAGS_PARTSCAN);
- if (size_changed) {
- loff_t new_size = get_size(lo->lo_offset, lo->lo_sizelimit,
- lo->lo_backing_file);
- loop_set_size(lo, new_size);
- }
+ lo->lo_flags &= ~LOOP_SET_STATUS_CLEARABLE_FLAGS;
+ lo->lo_flags |= (info->lo_flags & LOOP_SET_STATUS_SETTABLE_FLAGS);
- /* update dio if lo_offset or transfer is changed */
- __loop_update_dio(lo, lo->use_dio);
+ /* update the direct I/O flag if lo_offset changed */
+ loop_update_dio(lo);
out_unfreeze:
- blk_mq_unfreeze_queue(lo->lo_queue);
-
- if (!err && (lo->lo_flags & LO_FLAGS_PARTSCAN) &&
- !(prev_lo_flags & LO_FLAGS_PARTSCAN)) {
+ blk_mq_unfreeze_queue(lo->lo_queue, memflags);
+ if (partscan)
clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);
- partscan = true;
+ if (!err && size_changed) {
+ loff_t new_size = lo_calculate_size(lo, lo->lo_backing_file);
+ loop_set_size(lo, new_size);
}
out_unlock:
mutex_unlock(&lo->lo_mutex);
@@ -1437,7 +1412,7 @@ static int loop_set_capacity(struct loop_device *lo)
if (unlikely(lo->lo_state != Lo_bound))
return -ENXIO;
- size = get_loop_size(lo, lo->lo_backing_file);
+ size = lo_calculate_size(lo, lo->lo_backing_file);
loop_set_size(lo, size);
return 0;
@@ -1445,36 +1420,75 @@ static int loop_set_capacity(struct loop_device *lo)
static int loop_set_dio(struct loop_device *lo, unsigned long arg)
{
- int error = -ENXIO;
- if (lo->lo_state != Lo_bound)
- goto out;
+ bool use_dio = !!arg;
+ unsigned int memflags;
- __loop_update_dio(lo, !!arg);
- if (lo->use_dio == !!arg)
+ if (lo->lo_state != Lo_bound)
+ return -ENXIO;
+ if (use_dio == !!(lo->lo_flags & LO_FLAGS_DIRECT_IO))
return 0;
- error = -EINVAL;
- out:
- return error;
+
+ if (use_dio) {
+ if (!lo_can_use_dio(lo))
+ return -EINVAL;
+ /* flush dirty pages before starting to use direct I/O */
+ vfs_fsync(lo->lo_backing_file, 0);
+ }
+
+ memflags = blk_mq_freeze_queue(lo->lo_queue);
+ if (use_dio)
+ lo->lo_flags |= LO_FLAGS_DIRECT_IO;
+ else
+ lo->lo_flags &= ~LO_FLAGS_DIRECT_IO;
+ blk_mq_unfreeze_queue(lo->lo_queue, memflags);
+ return 0;
}
-static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
+static int loop_set_block_size(struct loop_device *lo, blk_mode_t mode,
+ struct block_device *bdev, unsigned long arg)
{
+ struct queue_limits lim;
+ unsigned int memflags;
int err = 0;
- if (lo->lo_state != Lo_bound)
- return -ENXIO;
+ /*
+ * If we don't hold exclusive handle for the device, upgrade to it
+ * here to avoid changing device under exclusive owner.
+ */
+ if (!(mode & BLK_OPEN_EXCL)) {
+ err = bd_prepare_to_claim(bdev, loop_set_block_size, NULL);
+ if (err)
+ return err;
+ }
+
+ err = mutex_lock_killable(&lo->lo_mutex);
+ if (err)
+ goto abort_claim;
+
+ if (lo->lo_state != Lo_bound) {
+ err = -ENXIO;
+ goto unlock;
+ }
if (lo->lo_queue->limits.logical_block_size == arg)
- return 0;
+ goto unlock;
sync_blockdev(lo->lo_device);
invalidate_bdev(lo->lo_device);
- blk_mq_freeze_queue(lo->lo_queue);
- err = loop_reconfigure_limits(lo, arg);
+ lim = queue_limits_start_update(lo->lo_queue);
+ loop_update_limits(lo, &lim, arg);
+
+ memflags = blk_mq_freeze_queue(lo->lo_queue);
+ err = queue_limits_commit_update(lo->lo_queue, &lim);
loop_update_dio(lo);
- blk_mq_unfreeze_queue(lo->lo_queue);
+ blk_mq_unfreeze_queue(lo->lo_queue, memflags);
+unlock:
+ mutex_unlock(&lo->lo_mutex);
+abort_claim:
+ if (!(mode & BLK_OPEN_EXCL))
+ bd_abort_claiming(bdev, loop_set_block_size);
return err;
}
@@ -1493,9 +1507,6 @@ static int lo_simple_ioctl(struct loop_device *lo, unsigned int cmd,
case LOOP_SET_DIRECT_IO:
err = loop_set_dio(lo, arg);
break;
- case LOOP_SET_BLOCK_SIZE:
- err = loop_set_block_size(lo, arg);
- break;
default:
err = -EINVAL;
}
@@ -1550,9 +1561,12 @@ static int lo_ioctl(struct block_device *bdev, blk_mode_t mode,
break;
case LOOP_GET_STATUS64:
return loop_get_status64(lo, argp);
+ case LOOP_SET_BLOCK_SIZE:
+ if (!(mode & BLK_OPEN_WRITE) && !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ return loop_set_block_size(lo, mode, bdev, arg);
case LOOP_SET_CAPACITY:
case LOOP_SET_DIRECT_IO:
- case LOOP_SET_BLOCK_SIZE:
if (!(mode & BLK_OPEN_WRITE) && !capable(CAP_SYS_ADMIN))
return -EPERM;
fallthrough;
@@ -1855,7 +1869,7 @@ static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
cmd->use_aio = false;
break;
default:
- cmd->use_aio = lo->use_dio;
+ cmd->use_aio = lo->lo_flags & LO_FLAGS_DIRECT_IO;
break;
}
@@ -1888,7 +1902,6 @@ static void loop_handle_cmd(struct loop_cmd *cmd)
struct loop_device *lo = rq->q->queuedata;
int ret = 0;
struct mem_cgroup *old_memcg = NULL;
- const bool use_aio = cmd->use_aio;
if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) {
ret = -EIO;
@@ -1918,7 +1931,7 @@ static void loop_handle_cmd(struct loop_cmd *cmd)
}
failed:
/* complete non-aio request */
- if (!use_aio || ret) {
+ if (ret != -EIOCBQUEUED) {
if (ret == -EOPNOTSUPP)
cmd->ret = ret;
else
@@ -2024,8 +2037,7 @@ static int loop_add(int i)
lo->tag_set.queue_depth = hw_queue_depth;
lo->tag_set.numa_node = NUMA_NO_NODE;
lo->tag_set.cmd_size = sizeof(struct loop_cmd);
- lo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_STACKING |
- BLK_MQ_F_NO_SCHED_BY_DEFAULT;
+ lo->tag_set.flags = BLK_MQ_F_STACKING | BLK_MQ_F_NO_SCHED_BY_DEFAULT;
lo->tag_set.driver_data = lo;
err = blk_mq_alloc_tag_set(&lo->tag_set);
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 223faa9d5ffd..567192e371a8 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -2040,11 +2040,12 @@ static int mtip_hw_ioctl(struct driver_data *dd, unsigned int cmd,
* @dir Direction (read or write)
*
* return value
- * None
+ * 0 The IO completed successfully.
+ * -ENOMEM The DMA mapping failed.
*/
-static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq,
- struct mtip_cmd *command,
- struct blk_mq_hw_ctx *hctx)
+static int mtip_hw_submit_io(struct driver_data *dd, struct request *rq,
+ struct mtip_cmd *command,
+ struct blk_mq_hw_ctx *hctx)
{
struct mtip_cmd_hdr *hdr =
dd->port->command_list + sizeof(struct mtip_cmd_hdr) * rq->tag;
@@ -2056,12 +2057,14 @@ static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq,
unsigned int nents;
/* Map the scatter list for DMA access */
- nents = blk_rq_map_sg(hctx->queue, rq, command->sg);
- nents = dma_map_sg(&dd->pdev->dev, command->sg, nents, dma_dir);
+ command->scatter_ents = blk_rq_map_sg(rq, command->sg);
+ nents = dma_map_sg(&dd->pdev->dev, command->sg,
+ command->scatter_ents, dma_dir);
+ if (!nents)
+ return -ENOMEM;
- prefetch(&port->flags);
- command->scatter_ents = nents;
+ prefetch(&port->flags);
/*
* The number of retries for this command before it is
@@ -2112,11 +2115,13 @@ static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq,
if (unlikely(port->flags & MTIP_PF_PAUSE_IO)) {
set_bit(rq->tag, port->cmds_to_issue);
set_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags);
- return;
+ return 0;
}
/* Issue the command to the hardware */
mtip_issue_ncq_command(port, rq->tag);
+
+ return 0;
}
/*
@@ -2701,7 +2706,12 @@ static int mtip_hw_init(struct driver_data *dd)
int rv;
unsigned long timeout, timetaken;
- dd->mmio = pcim_iomap_table(dd->pdev)[MTIP_ABAR];
+ dd->mmio = pcim_iomap_region(dd->pdev, MTIP_ABAR, MTIP_DRV_NAME);
+ if (IS_ERR(dd->mmio)) {
+ dev_err(&dd->pdev->dev, "Unable to request / ioremap PCI region\n");
+ return PTR_ERR(dd->mmio);
+ }
+
mtip_detect_product(dd);
if (dd->product_type == MTIP_PRODUCT_UNKNOWN) {
@@ -3138,17 +3148,17 @@ static int mtip_block_compat_ioctl(struct block_device *dev,
* that each partition is also 4KB aligned. Non-aligned partitions adversely
* affects performance.
*
- * @dev Pointer to the block_device strucutre.
+ * @disk Pointer to the gendisk strucutre.
* @geo Pointer to a hd_geometry structure.
*
* return value
* 0 Operation completed successfully.
* -ENOTTY An error occurred while reading the drive capacity.
*/
-static int mtip_block_getgeo(struct block_device *dev,
+static int mtip_block_getgeo(struct gendisk *disk,
struct hd_geometry *geo)
{
- struct driver_data *dd = dev->bd_disk->private_data;
+ struct driver_data *dd = disk->private_data;
sector_t capacity;
if (!dd)
@@ -3310,7 +3320,9 @@ static blk_status_t mtip_queue_rq(struct blk_mq_hw_ctx *hctx,
blk_mq_start_request(rq);
- mtip_hw_submit_io(dd, rq, cmd, hctx);
+ if (mtip_hw_submit_io(dd, rq, cmd, hctx))
+ return BLK_STS_IOERR;
+
return BLK_STS_OK;
}
@@ -3411,7 +3423,6 @@ static int mtip_block_initialize(struct driver_data *dd)
dd->tags.reserved_tags = 1;
dd->tags.cmd_size = sizeof(struct mtip_cmd);
dd->tags.numa_node = dd->numa_node;
- dd->tags.flags = BLK_MQ_F_SHOULD_MERGE;
dd->tags.driver_data = dd;
dd->tags.timeout = MTIP_NCQ_CMD_TIMEOUT_MS;
@@ -3710,17 +3721,10 @@ static int mtip_pci_probe(struct pci_dev *pdev,
goto iomap_err;
}
- /* Map BAR5 to memory. */
- rv = pcim_iomap_regions(pdev, 1 << MTIP_ABAR, MTIP_DRV_NAME);
- if (rv < 0) {
- dev_err(&pdev->dev, "Unable to map regions\n");
- goto iomap_err;
- }
-
rv = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (rv) {
dev_warn(&pdev->dev, "64-bit DMA enable failed\n");
- goto setmask_err;
+ goto iomap_err;
}
/* Copy the info we may need later into the private data structure. */
@@ -3736,7 +3740,7 @@ static int mtip_pci_probe(struct pci_dev *pdev,
if (!dd->isr_workq) {
dev_warn(&pdev->dev, "Can't create wq %d\n", dd->instance);
rv = -ENOMEM;
- goto setmask_err;
+ goto iomap_err;
}
memset(cpu_list, 0, sizeof(cpu_list));
@@ -3833,8 +3837,6 @@ msi_initialize_err:
drop_cpu(dd->work[1].cpu_binding);
drop_cpu(dd->work[2].cpu_binding);
}
-setmask_err:
- pcim_iounmap_regions(pdev, 1 << MTIP_ABAR);
iomap_err:
kfree(dd);
@@ -3910,7 +3912,6 @@ static void mtip_pci_remove(struct pci_dev *pdev)
pci_disable_msi(pdev);
- pcim_iounmap_regions(pdev, 1 << MTIP_ABAR);
pci_set_drvdata(pdev, NULL);
put_disk(dd->disk);
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index b852050d8a96..1188f32a5e5e 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -62,6 +62,7 @@ struct nbd_sock {
bool dead;
int fallback_index;
int cookie;
+ struct work_struct work;
};
struct recv_thread_args {
@@ -141,6 +142,9 @@ struct nbd_device {
*/
#define NBD_CMD_INFLIGHT 2
+/* Just part of request header or data payload is sent successfully */
+#define NBD_CMD_PARTIAL_SEND 3
+
struct nbd_cmd {
struct nbd_device *nbd;
struct mutex lock;
@@ -307,7 +311,7 @@ static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock,
if (args) {
INIT_WORK(&args->work, nbd_dead_link_work);
args->index = nbd->index;
- queue_work(system_wq, &args->work);
+ queue_work(system_percpu_wq, &args->work);
}
}
if (!nsock->dead) {
@@ -327,8 +331,7 @@ static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock,
nsock->sent = 0;
}
-static int __nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
- loff_t blksize)
+static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize, loff_t blksize)
{
struct queue_limits lim;
int error;
@@ -368,7 +371,7 @@ static int __nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
lim.logical_block_size = blksize;
lim.physical_block_size = blksize;
- error = queue_limits_commit_update(nbd->disk->queue, &lim);
+ error = queue_limits_commit_update_frozen(nbd->disk->queue, &lim);
if (error)
return error;
@@ -379,18 +382,6 @@ static int __nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
return 0;
}
-static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
- loff_t blksize)
-{
- int error;
-
- blk_mq_freeze_queue(nbd->disk->queue);
- error = __nbd_set_size(nbd, bytesize, blksize);
- blk_mq_unfreeze_queue(nbd->disk->queue);
-
- return error;
-}
-
static void nbd_complete_rq(struct request *req)
{
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
@@ -466,6 +457,12 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req)
if (!mutex_trylock(&cmd->lock))
return BLK_EH_RESET_TIMER;
+ /* partial send is handled in nbd_sock's work function */
+ if (test_bit(NBD_CMD_PARTIAL_SEND, &cmd->flags)) {
+ mutex_unlock(&cmd->lock);
+ return BLK_EH_RESET_TIMER;
+ }
+
if (!test_bit(NBD_CMD_INFLIGHT, &cmd->flags)) {
mutex_unlock(&cmd->lock);
return BLK_EH_DONE;
@@ -615,6 +612,30 @@ static inline int was_interrupted(int result)
}
/*
+ * We've already sent header or part of data payload, have no choice but
+ * to set pending and schedule it in work.
+ *
+ * And we have to return BLK_STS_OK to block core, otherwise this same
+ * request may be re-dispatched with different tag, but our header has
+ * been sent out with old tag, and this way does confuse reply handling.
+ */
+static void nbd_sched_pending_work(struct nbd_device *nbd,
+ struct nbd_sock *nsock,
+ struct nbd_cmd *cmd, int sent)
+{
+ struct request *req = blk_mq_rq_from_pdu(cmd);
+
+ /* pending work should be scheduled only once */
+ WARN_ON_ONCE(test_bit(NBD_CMD_PARTIAL_SEND, &cmd->flags));
+
+ nsock->pending = req;
+ nsock->sent = sent;
+ set_bit(NBD_CMD_PARTIAL_SEND, &cmd->flags);
+ refcount_inc(&nbd->config_refs);
+ schedule_work(&nsock->work);
+}
+
+/*
* Returns BLK_STS_RESOURCE if the caller should retry after a delay.
* Returns BLK_STS_IOERR if sending failed.
*/
@@ -699,8 +720,8 @@ static blk_status_t nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd,
* completely done.
*/
if (sent) {
- nsock->pending = req;
- nsock->sent = sent;
+ nbd_sched_pending_work(nbd, nsock, cmd, sent);
+ return BLK_STS_OK;
}
set_bit(NBD_CMD_REQUEUED, &cmd->flags);
return BLK_STS_RESOURCE;
@@ -737,14 +758,8 @@ send_pages:
result = sock_xmit(nbd, index, 1, &from, flags, &sent);
if (result < 0) {
if (was_interrupted(result)) {
- /* We've already sent the header, we
- * have no choice but to set pending and
- * return BUSY.
- */
- nsock->pending = req;
- nsock->sent = sent;
- set_bit(NBD_CMD_REQUEUED, &cmd->flags);
- return BLK_STS_RESOURCE;
+ nbd_sched_pending_work(nbd, nsock, cmd, sent);
+ return BLK_STS_OK;
}
dev_err(disk_to_dev(nbd->disk),
"Send data failed (result %d)\n",
@@ -770,6 +785,14 @@ out:
return BLK_STS_OK;
requeue:
+ /*
+ * Can't requeue in case we are dealing with partial send
+ *
+ * We must run from pending work function.
+ * */
+ if (test_bit(NBD_CMD_PARTIAL_SEND, &cmd->flags))
+ return BLK_STS_OK;
+
/* retry on a different socket */
dev_err_ratelimited(disk_to_dev(nbd->disk),
"Request send failed, requeueing\n");
@@ -778,6 +801,44 @@ requeue:
return BLK_STS_OK;
}
+/* handle partial sending */
+static void nbd_pending_cmd_work(struct work_struct *work)
+{
+ struct nbd_sock *nsock = container_of(work, struct nbd_sock, work);
+ struct request *req = nsock->pending;
+ struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
+ struct nbd_device *nbd = cmd->nbd;
+ unsigned long deadline = READ_ONCE(req->deadline);
+ unsigned int wait_ms = 2;
+
+ mutex_lock(&cmd->lock);
+
+ WARN_ON_ONCE(test_bit(NBD_CMD_REQUEUED, &cmd->flags));
+ if (WARN_ON_ONCE(!test_bit(NBD_CMD_PARTIAL_SEND, &cmd->flags)))
+ goto out;
+
+ mutex_lock(&nsock->tx_lock);
+ while (true) {
+ nbd_send_cmd(nbd, cmd, cmd->index);
+ if (!nsock->pending)
+ break;
+
+ /* don't bother timeout handler for partial sending */
+ if (READ_ONCE(jiffies) + msecs_to_jiffies(wait_ms) >= deadline) {
+ cmd->status = BLK_STS_IOERR;
+ blk_mq_complete_request(req);
+ break;
+ }
+ msleep(wait_ms);
+ wait_ms *= 2;
+ }
+ mutex_unlock(&nsock->tx_lock);
+ clear_bit(NBD_CMD_PARTIAL_SEND, &cmd->flags);
+out:
+ mutex_unlock(&cmd->lock);
+ nbd_config_put(nbd);
+}
+
static int nbd_read_reply(struct nbd_device *nbd, struct socket *sock,
struct nbd_reply *reply)
{
@@ -1156,6 +1217,14 @@ static struct socket *nbd_get_socket(struct nbd_device *nbd, unsigned long fd,
if (!sock)
return NULL;
+ if (!sk_is_tcp(sock->sk) &&
+ !sk_is_stream_unix(sock->sk)) {
+ dev_err(disk_to_dev(nbd->disk), "Unsupported socket: should be TCP or UNIX.\n");
+ *err = -EINVAL;
+ sockfd_put(sock);
+ return NULL;
+ }
+
if (sock->ops->shutdown == sock_no_shutdown) {
dev_err(disk_to_dev(nbd->disk), "Unsupported socket: shutdown callout must be supported.\n");
*err = -EINVAL;
@@ -1173,6 +1242,7 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
struct socket *sock;
struct nbd_sock **socks;
struct nbd_sock *nsock;
+ unsigned int memflags;
int err;
/* Arg will be cast to int, check it to avoid overflow */
@@ -1186,7 +1256,7 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
* We need to make sure we don't get any errant requests while we're
* reallocating the ->socks array.
*/
- blk_mq_freeze_queue(nbd->disk->queue);
+ memflags = blk_mq_freeze_queue(nbd->disk->queue);
if (!netlink && !nbd->task_setup &&
!test_bit(NBD_RT_BOUND, &config->runtime_flags))
@@ -1224,14 +1294,15 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
nsock->pending = NULL;
nsock->sent = 0;
nsock->cookie = 0;
+ INIT_WORK(&nsock->work, nbd_pending_cmd_work);
socks[config->num_connections++] = nsock;
atomic_inc(&config->live_connections);
- blk_mq_unfreeze_queue(nbd->disk->queue);
+ blk_mq_unfreeze_queue(nbd->disk->queue, memflags);
return 0;
put_socket:
- blk_mq_unfreeze_queue(nbd->disk->queue);
+ blk_mq_unfreeze_queue(nbd->disk->queue, memflags);
sockfd_put(sock);
return err;
}
@@ -1410,7 +1481,17 @@ static int nbd_start_device(struct nbd_device *nbd)
return -EINVAL;
}
- blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections);
+retry:
+ mutex_unlock(&nbd->config_lock);
+ blk_mq_update_nr_hw_queues(&nbd->tag_set, num_connections);
+ mutex_lock(&nbd->config_lock);
+
+ /* if another code path updated nr_hw_queues, retry until succeed */
+ if (num_connections != config->num_connections) {
+ num_connections = config->num_connections;
+ goto retry;
+ }
+
nbd->pid = task_pid_nr(current);
nbd_parse_flags(nbd);
@@ -1841,8 +1922,7 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs)
nbd->tag_set.queue_depth = 128;
nbd->tag_set.numa_node = NUMA_NO_NODE;
nbd->tag_set.cmd_size = sizeof(struct nbd_cmd);
- nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
- BLK_MQ_F_BLOCKING;
+ nbd->tag_set.flags = BLK_MQ_F_BLOCKING;
nbd->tag_set.driver_data = nbd;
INIT_WORK(&nbd->remove_work, nbd_dev_remove_work);
nbd->backend = NULL;
@@ -2136,9 +2216,7 @@ again:
goto out;
}
}
- ret = nbd_start_device(nbd);
- if (ret)
- goto out;
+
if (info->attrs[NBD_ATTR_BACKEND_IDENTIFIER]) {
nbd->backend = nla_strdup(info->attrs[NBD_ATTR_BACKEND_IDENTIFIER],
GFP_KERNEL);
@@ -2154,6 +2232,8 @@ again:
goto out;
}
set_bit(NBD_RT_HAS_BACKEND_FILE, &config->runtime_flags);
+
+ ret = nbd_start_device(nbd);
out:
mutex_unlock(&nbd->config_lock);
if (!ret) {
@@ -2180,6 +2260,7 @@ static void nbd_disconnect_and_put(struct nbd_device *nbd)
flush_workqueue(nbd->recv_workq);
nbd_clear_que(nbd);
nbd->task_setup = NULL;
+ clear_bit(NBD_RT_BOUND, &nbd->config->runtime_flags);
mutex_unlock(&nbd->config_lock);
if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index 2f0431e42c49..f982027e8c85 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -223,7 +223,7 @@ MODULE_PARM_DESC(discard, "Support discard operations (requires memory-backed nu
static unsigned long g_cache_size;
module_param_named(cache_size, g_cache_size, ulong, 0444);
-MODULE_PARM_DESC(mbps, "Cache size in MiB for memory-backed device. Default: 0 (none)");
+MODULE_PARM_DESC(cache_size, "Cache size in MiB for memory-backed device. Default: 0 (none)");
static bool g_fua = true;
module_param_named(fua, g_fua, bool, 0444);
@@ -266,6 +266,10 @@ static bool g_zone_full;
module_param_named(zone_full, g_zone_full, bool, S_IRUGO);
MODULE_PARM_DESC(zone_full, "Initialize the sequential write required zones of a zoned device to be full. Default: false");
+static bool g_rotational;
+module_param_named(rotational, g_rotational, bool, S_IRUGO);
+MODULE_PARM_DESC(rotational, "Set the rotational feature for the device. Default: false");
+
static struct nullb_device *null_alloc_dev(void);
static void null_free_dev(struct nullb_device *dev);
static void null_del_dev(struct nullb *nullb);
@@ -468,6 +472,9 @@ NULLB_DEVICE_ATTR(no_sched, bool, NULL);
NULLB_DEVICE_ATTR(shared_tags, bool, NULL);
NULLB_DEVICE_ATTR(shared_tag_bitmap, bool, NULL);
NULLB_DEVICE_ATTR(fua, bool, NULL);
+NULLB_DEVICE_ATTR(rotational, bool, NULL);
+NULLB_DEVICE_ATTR(badblocks_once, bool, NULL);
+NULLB_DEVICE_ATTR(badblocks_partial_io, bool, NULL);
static ssize_t nullb_device_power_show(struct config_item *item, char *page)
{
@@ -554,14 +561,14 @@ static ssize_t nullb_device_badblocks_store(struct config_item *item,
goto out;
/* enable badblocks */
cmpxchg(&t_dev->badblocks.shift, -1, 0);
- if (buf[0] == '+')
- ret = badblocks_set(&t_dev->badblocks, start,
- end - start + 1, 1);
- else
- ret = badblocks_clear(&t_dev->badblocks, start,
- end - start + 1);
- if (ret == 0)
+ if (buf[0] == '+') {
+ if (badblocks_set(&t_dev->badblocks, start,
+ end - start + 1, 1))
+ ret = count;
+ } else if (badblocks_clear(&t_dev->badblocks, start,
+ end - start + 1)) {
ret = count;
+ }
out:
kfree(orig);
return ret;
@@ -587,40 +594,43 @@ static ssize_t nullb_device_zone_offline_store(struct config_item *item,
CONFIGFS_ATTR_WO(nullb_device_, zone_offline);
static struct configfs_attribute *nullb_device_attrs[] = {
- &nullb_device_attr_size,
+ &nullb_device_attr_badblocks,
+ &nullb_device_attr_badblocks_once,
+ &nullb_device_attr_badblocks_partial_io,
+ &nullb_device_attr_blocking,
+ &nullb_device_attr_blocksize,
+ &nullb_device_attr_cache_size,
&nullb_device_attr_completion_nsec,
- &nullb_device_attr_submit_queues,
- &nullb_device_attr_poll_queues,
+ &nullb_device_attr_discard,
+ &nullb_device_attr_fua,
&nullb_device_attr_home_node,
- &nullb_device_attr_queue_mode,
- &nullb_device_attr_blocksize,
- &nullb_device_attr_max_sectors,
- &nullb_device_attr_irqmode,
&nullb_device_attr_hw_queue_depth,
&nullb_device_attr_index,
- &nullb_device_attr_blocking,
- &nullb_device_attr_use_per_node_hctx,
- &nullb_device_attr_power,
- &nullb_device_attr_memory_backed,
- &nullb_device_attr_discard,
+ &nullb_device_attr_irqmode,
+ &nullb_device_attr_max_sectors,
&nullb_device_attr_mbps,
- &nullb_device_attr_cache_size,
- &nullb_device_attr_badblocks,
- &nullb_device_attr_zoned,
- &nullb_device_attr_zone_size,
+ &nullb_device_attr_memory_backed,
+ &nullb_device_attr_no_sched,
+ &nullb_device_attr_poll_queues,
+ &nullb_device_attr_power,
+ &nullb_device_attr_queue_mode,
+ &nullb_device_attr_rotational,
+ &nullb_device_attr_shared_tag_bitmap,
+ &nullb_device_attr_shared_tags,
+ &nullb_device_attr_size,
+ &nullb_device_attr_submit_queues,
+ &nullb_device_attr_use_per_node_hctx,
+ &nullb_device_attr_virt_boundary,
+ &nullb_device_attr_zone_append_max_sectors,
&nullb_device_attr_zone_capacity,
- &nullb_device_attr_zone_nr_conv,
- &nullb_device_attr_zone_max_open,
+ &nullb_device_attr_zone_full,
&nullb_device_attr_zone_max_active,
- &nullb_device_attr_zone_append_max_sectors,
- &nullb_device_attr_zone_readonly,
+ &nullb_device_attr_zone_max_open,
+ &nullb_device_attr_zone_nr_conv,
&nullb_device_attr_zone_offline,
- &nullb_device_attr_zone_full,
- &nullb_device_attr_virt_boundary,
- &nullb_device_attr_no_sched,
- &nullb_device_attr_shared_tags,
- &nullb_device_attr_shared_tag_bitmap,
- &nullb_device_attr_fua,
+ &nullb_device_attr_zone_readonly,
+ &nullb_device_attr_zone_size,
+ &nullb_device_attr_zoned,
NULL,
};
@@ -698,15 +708,28 @@ nullb_group_drop_item(struct config_group *group, struct config_item *item)
static ssize_t memb_group_features_show(struct config_item *item, char *page)
{
- return snprintf(page, PAGE_SIZE,
- "badblocks,blocking,blocksize,cache_size,fua,"
- "completion_nsec,discard,home_node,hw_queue_depth,"
- "irqmode,max_sectors,mbps,memory_backed,no_sched,"
- "poll_queues,power,queue_mode,shared_tag_bitmap,"
- "shared_tags,size,submit_queues,use_per_node_hctx,"
- "virt_boundary,zoned,zone_capacity,zone_max_active,"
- "zone_max_open,zone_nr_conv,zone_offline,zone_readonly,"
- "zone_size,zone_append_max_sectors,zone_full\n");
+
+ struct configfs_attribute **entry;
+ char delimiter = ',';
+ size_t left = PAGE_SIZE;
+ size_t written = 0;
+ int ret;
+
+ for (entry = &nullb_device_attrs[0]; *entry && left > 0; entry++) {
+ if (!*(entry + 1))
+ delimiter = '\n';
+ ret = snprintf(page + written, left, "%s%c", (*entry)->ca_name,
+ delimiter);
+ if (ret >= left) {
+ WARN_ONCE(1, "Too many null_blk features to print\n");
+ memzero_explicit(page, PAGE_SIZE);
+ return -ENOBUFS;
+ }
+ left -= ret;
+ written += ret;
+ }
+
+ return written;
}
CONFIGFS_ATTR_RO(memb_group_, features);
@@ -793,6 +816,7 @@ static struct nullb_device *null_alloc_dev(void)
dev->shared_tags = g_shared_tags;
dev->shared_tag_bitmap = g_shared_tag_bitmap;
dev->fua = g_fua;
+ dev->rotational = g_rotational;
return dev;
}
@@ -899,7 +923,7 @@ static struct nullb_page *null_radix_tree_insert(struct nullb *nullb, u64 idx,
if (radix_tree_insert(root, idx, t_page)) {
null_free_page(t_page);
t_page = radix_tree_lookup(root, idx);
- WARN_ON(!t_page || t_page->page->index != idx);
+ WARN_ON(!t_page || t_page->page->private != idx);
} else if (is_cache)
nullb->dev->curr_cache += PAGE_SIZE;
@@ -922,7 +946,7 @@ static void null_free_device_storage(struct nullb_device *dev, bool is_cache)
(void **)t_pages, pos, FREE_BATCH);
for (i = 0; i < nr_pages; i++) {
- pos = t_pages[i]->page->index;
+ pos = t_pages[i]->page->private;
ret = radix_tree_delete_item(root, pos, t_pages[i]);
WARN_ON(ret != t_pages[i]);
null_free_page(ret);
@@ -948,7 +972,7 @@ static struct nullb_page *__null_lookup_page(struct nullb *nullb,
root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
t_page = radix_tree_lookup(root, idx);
- WARN_ON(t_page && t_page->page->index != idx);
+ WARN_ON(t_page && t_page->page->private != idx);
if (t_page && (for_write || test_bit(sector_bit, t_page->bitmap)))
return t_page;
@@ -991,7 +1015,7 @@ static struct nullb_page *null_insert_page(struct nullb *nullb,
spin_lock_irq(&nullb->lock);
idx = sector >> PAGE_SECTORS_SHIFT;
- t_page->page->index = idx;
+ t_page->page->private = idx;
t_page = null_radix_tree_insert(nullb, idx, t_page, !ignore_cache);
radix_tree_preload_end();
@@ -1011,7 +1035,7 @@ static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page)
struct nullb_page *t_page, *ret;
void *dst, *src;
- idx = c_page->page->index;
+ idx = c_page->page->private;
t_page = null_insert_page(nullb, idx << PAGE_SECTORS_SHIFT, true);
@@ -1070,7 +1094,7 @@ again:
* avoid race, we don't allow page free
*/
for (i = 0; i < nr_pages; i++) {
- nullb->cache_flush_pos = c_pages[i]->page->index;
+ nullb->cache_flush_pos = c_pages[i]->page->private;
/*
* We found the page which is being flushed to disk by other
* threads
@@ -1155,7 +1179,7 @@ static int copy_from_nullb(struct nullb *nullb, struct page *dest,
memcpy_page(dest, off + count, t_page->page, offset,
temp);
else
- zero_user(dest, off + count, temp);
+ memzero_page(dest, off + count, temp);
count += temp;
sector += temp >> SECTOR_SHIFT;
@@ -1241,25 +1265,37 @@ static int null_transfer(struct nullb *nullb, struct page *page,
return err;
}
-static blk_status_t null_handle_rq(struct nullb_cmd *cmd)
+/*
+ * Transfer data for the given request. The transfer size is capped with the
+ * nr_sectors argument.
+ */
+static blk_status_t null_handle_data_transfer(struct nullb_cmd *cmd,
+ sector_t nr_sectors)
{
struct request *rq = blk_mq_rq_from_pdu(cmd);
struct nullb *nullb = cmd->nq->dev->nullb;
int err = 0;
unsigned int len;
sector_t sector = blk_rq_pos(rq);
+ unsigned int max_bytes = nr_sectors << SECTOR_SHIFT;
+ unsigned int transferred_bytes = 0;
struct req_iterator iter;
struct bio_vec bvec;
spin_lock_irq(&nullb->lock);
rq_for_each_segment(bvec, rq, iter) {
len = bvec.bv_len;
+ if (transferred_bytes + len > max_bytes)
+ len = max_bytes - transferred_bytes;
err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
op_is_write(req_op(rq)), sector,
rq->cmd_flags & REQ_FUA);
if (err)
break;
sector += len >> SECTOR_SHIFT;
+ transferred_bytes += len;
+ if (transferred_bytes >= max_bytes)
+ break;
}
spin_unlock_irq(&nullb->lock);
@@ -1287,31 +1323,51 @@ static inline blk_status_t null_handle_throttled(struct nullb_cmd *cmd)
return sts;
}
-static inline blk_status_t null_handle_badblocks(struct nullb_cmd *cmd,
- sector_t sector,
- sector_t nr_sectors)
+/*
+ * Check if the command should fail for the badblocks. If so, return
+ * BLK_STS_IOERR and return number of partial I/O sectors to be written or read,
+ * which may be less than the requested number of sectors.
+ *
+ * @cmd: The command to handle.
+ * @sector: The start sector for I/O.
+ * @nr_sectors: Specifies number of sectors to write or read, and returns the
+ * number of sectors to be written or read.
+ */
+blk_status_t null_handle_badblocks(struct nullb_cmd *cmd, sector_t sector,
+ unsigned int *nr_sectors)
{
struct badblocks *bb = &cmd->nq->dev->badblocks;
- sector_t first_bad;
- int bad_sectors;
+ struct nullb_device *dev = cmd->nq->dev;
+ unsigned int block_sectors = dev->blocksize >> SECTOR_SHIFT;
+ sector_t first_bad, bad_sectors;
+ unsigned int partial_io_sectors = 0;
- if (badblocks_check(bb, sector, nr_sectors, &first_bad, &bad_sectors))
- return BLK_STS_IOERR;
+ if (!badblocks_check(bb, sector, *nr_sectors, &first_bad, &bad_sectors))
+ return BLK_STS_OK;
- return BLK_STS_OK;
+ if (cmd->nq->dev->badblocks_once)
+ badblocks_clear(bb, first_bad, bad_sectors);
+
+ if (cmd->nq->dev->badblocks_partial_io) {
+ if (!IS_ALIGNED(first_bad, block_sectors))
+ first_bad = ALIGN_DOWN(first_bad, block_sectors);
+ if (sector < first_bad)
+ partial_io_sectors = first_bad - sector;
+ }
+ *nr_sectors = partial_io_sectors;
+
+ return BLK_STS_IOERR;
}
-static inline blk_status_t null_handle_memory_backed(struct nullb_cmd *cmd,
- enum req_op op,
- sector_t sector,
- sector_t nr_sectors)
+blk_status_t null_handle_memory_backed(struct nullb_cmd *cmd, enum req_op op,
+ sector_t sector, sector_t nr_sectors)
{
struct nullb_device *dev = cmd->nq->dev;
if (op == REQ_OP_DISCARD)
return null_handle_discard(dev, sector, nr_sectors);
- return null_handle_rq(cmd);
+ return null_handle_data_transfer(cmd, nr_sectors);
}
static void nullb_zero_read_cmd_buffer(struct nullb_cmd *cmd)
@@ -1358,18 +1414,19 @@ blk_status_t null_process_cmd(struct nullb_cmd *cmd, enum req_op op,
sector_t sector, unsigned int nr_sectors)
{
struct nullb_device *dev = cmd->nq->dev;
+ blk_status_t badblocks_ret = BLK_STS_OK;
blk_status_t ret;
- if (dev->badblocks.shift != -1) {
- ret = null_handle_badblocks(cmd, sector, nr_sectors);
+ if (dev->badblocks.shift != -1)
+ badblocks_ret = null_handle_badblocks(cmd, sector, &nr_sectors);
+
+ if (dev->memory_backed && nr_sectors) {
+ ret = null_handle_memory_backed(cmd, op, sector, nr_sectors);
if (ret != BLK_STS_OK)
return ret;
}
- if (dev->memory_backed)
- return null_handle_memory_backed(cmd, op, sector, nr_sectors);
-
- return BLK_STS_OK;
+ return badblocks_ret;
}
static void null_handle_cmd(struct nullb_cmd *cmd, sector_t sector,
@@ -1418,8 +1475,7 @@ static void nullb_setup_bwtimer(struct nullb *nullb)
{
ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL);
- hrtimer_init(&nullb->bw_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- nullb->bw_timer.function = nullb_bwtimer_fn;
+ hrtimer_setup(&nullb->bw_timer, nullb_bwtimer_fn, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
atomic_long_set(&nullb->cur_bytes, mb_per_tick(nullb->dev->mbps));
hrtimer_start(&nullb->bw_timer, timer_interval, HRTIMER_MODE_REL);
}
@@ -1541,8 +1597,8 @@ static int null_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
cmd = blk_mq_rq_to_pdu(req);
cmd->error = null_process_cmd(cmd, req_op(req), blk_rq_pos(req),
blk_rq_sectors(req));
- if (!blk_mq_add_to_batch(req, iob, (__force int) cmd->error,
- blk_mq_end_request_batch))
+ if (!blk_mq_add_to_batch(req, iob, cmd->error != BLK_STS_OK,
+ blk_mq_end_request_batch))
blk_mq_end_request(req, cmd->error);
nr++;
}
@@ -1596,8 +1652,8 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
if (!is_poll && nq->dev->irqmode == NULL_IRQ_TIMER) {
- hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- cmd->timer.function = null_cmd_timer_expired;
+ hrtimer_setup(&cmd->timer, null_cmd_timer_expired, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
}
cmd->error = BLK_STS_OK;
cmd->nq = nq;
@@ -1638,10 +1694,9 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_OK;
}
-static void null_queue_rqs(struct request **rqlist)
+static void null_queue_rqs(struct rq_list *rqlist)
{
- struct request *requeue_list = NULL;
- struct request **requeue_lastp = &requeue_list;
+ struct rq_list requeue_list = {};
struct blk_mq_queue_data bd = { };
blk_status_t ret;
@@ -1651,8 +1706,8 @@ static void null_queue_rqs(struct request **rqlist)
bd.rq = rq;
ret = null_queue_rq(rq->mq_hctx, &bd);
if (ret != BLK_STS_OK)
- rq_list_add_tail(&requeue_lastp, rq);
- } while (!rq_list_empty(*rqlist));
+ rq_list_add_tail(&requeue_list, rq);
+ } while (!rq_list_empty(rqlist));
*rqlist = requeue_list;
}
@@ -1784,9 +1839,8 @@ static int null_init_global_tag_set(void)
tag_set.nr_hw_queues = g_submit_queues;
tag_set.queue_depth = g_hw_queue_depth;
tag_set.numa_node = g_home_node;
- tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
if (g_no_sched)
- tag_set.flags |= BLK_MQ_F_NO_SCHED;
+ tag_set.flags |= BLK_MQ_F_NO_SCHED_BY_DEFAULT;
if (g_shared_tag_bitmap)
tag_set.flags |= BLK_MQ_F_TAG_HCTX_SHARED;
if (g_blocking)
@@ -1810,9 +1864,8 @@ static int null_setup_tagset(struct nullb *nullb)
nullb->tag_set->nr_hw_queues = nullb->dev->submit_queues;
nullb->tag_set->queue_depth = nullb->dev->hw_queue_depth;
nullb->tag_set->numa_node = nullb->dev->home_node;
- nullb->tag_set->flags = BLK_MQ_F_SHOULD_MERGE;
if (nullb->dev->no_sched)
- nullb->tag_set->flags |= BLK_MQ_F_NO_SCHED;
+ nullb->tag_set->flags |= BLK_MQ_F_NO_SCHED_BY_DEFAULT;
if (nullb->dev->shared_tag_bitmap)
nullb->tag_set->flags |= BLK_MQ_F_TAG_HCTX_SHARED;
if (nullb->dev->blocking)
@@ -1939,6 +1992,9 @@ static int null_add_dev(struct nullb_device *dev)
lim.features |= BLK_FEAT_FUA;
}
+ if (dev->rotational)
+ lim.features |= BLK_FEAT_ROTATIONAL;
+
nullb->disk = blk_mq_alloc_disk(nullb->tag_set, &lim, nullb);
if (IS_ERR(nullb->disk)) {
rv = PTR_ERR(nullb->disk);
@@ -1975,7 +2031,7 @@ static int null_add_dev(struct nullb_device *dev)
nullb->disk->minors = 1;
nullb->disk->fops = &null_ops;
nullb->disk->private_data = nullb;
- strscpy_pad(nullb->disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
+ strscpy(nullb->disk->disk_name, nullb->disk_name);
if (nullb->dev->zoned) {
rv = null_register_zoned_dev(nullb);
diff --git a/drivers/block/null_blk/null_blk.h b/drivers/block/null_blk/null_blk.h
index a7bb32f73ec3..7bb6128dbaaf 100644
--- a/drivers/block/null_blk/null_blk.h
+++ b/drivers/block/null_blk/null_blk.h
@@ -63,6 +63,8 @@ struct nullb_device {
unsigned long flags; /* device flags */
unsigned int curr_cache;
struct badblocks badblocks;
+ bool badblocks_once;
+ bool badblocks_partial_io;
unsigned int nr_zones;
unsigned int nr_zones_imp_open;
@@ -107,6 +109,7 @@ struct nullb_device {
bool shared_tags; /* share tag set between devices for blk-mq */
bool shared_tag_bitmap; /* use hostwide shared tags */
bool fua; /* Support FUA */
+ bool rotational; /* Fake rotational device */
};
struct nullb {
@@ -130,6 +133,10 @@ blk_status_t null_handle_discard(struct nullb_device *dev, sector_t sector,
sector_t nr_sectors);
blk_status_t null_process_cmd(struct nullb_cmd *cmd, enum req_op op,
sector_t sector, unsigned int nr_sectors);
+blk_status_t null_handle_badblocks(struct nullb_cmd *cmd, sector_t sector,
+ unsigned int *nr_sectors);
+blk_status_t null_handle_memory_backed(struct nullb_cmd *cmd, enum req_op op,
+ sector_t sector, sector_t nr_sectors);
#ifdef CONFIG_BLK_DEV_ZONED
int null_init_zoned_dev(struct nullb_device *dev, struct queue_limits *lim);
diff --git a/drivers/block/null_blk/zoned.c b/drivers/block/null_blk/zoned.c
index 9bc768b2ca56..4e5728f45989 100644
--- a/drivers/block/null_blk/zoned.c
+++ b/drivers/block/null_blk/zoned.c
@@ -166,7 +166,7 @@ int null_init_zoned_dev(struct nullb_device *dev,
lim->features |= BLK_FEAT_ZONED;
lim->chunk_sectors = dev->zone_size_sects;
- lim->max_zone_append_sectors = dev->zone_append_max_sectors;
+ lim->max_hw_zone_append_sectors = dev->zone_append_max_sectors;
lim->max_open_zones = dev->zone_max_open;
lim->max_active_zones = dev->zone_max_active;
return 0;
@@ -353,6 +353,7 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
struct nullb_device *dev = cmd->nq->dev;
unsigned int zno = null_zone_no(dev, sector);
struct nullb_zone *zone = &dev->zones[zno];
+ blk_status_t badblocks_ret = BLK_STS_OK;
blk_status_t ret;
trace_nullb_zone_op(cmd, zno, zone->cond);
@@ -412,9 +413,20 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
zone->cond = BLK_ZONE_COND_IMP_OPEN;
}
- ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
- if (ret != BLK_STS_OK)
- goto unlock_zone;
+ if (dev->badblocks.shift != -1) {
+ badblocks_ret = null_handle_badblocks(cmd, sector, &nr_sectors);
+ if (badblocks_ret != BLK_STS_OK && !nr_sectors) {
+ ret = badblocks_ret;
+ goto unlock_zone;
+ }
+ }
+
+ if (dev->memory_backed) {
+ ret = null_handle_memory_backed(cmd, REQ_OP_WRITE, sector,
+ nr_sectors);
+ if (ret != BLK_STS_OK)
+ goto unlock_zone;
+ }
zone->wp += nr_sectors;
if (zone->wp == zone->start + zone->capacity) {
@@ -429,7 +441,7 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
zone->cond = BLK_ZONE_COND_FULL;
}
- ret = BLK_STS_OK;
+ ret = badblocks_ret;
unlock_zone:
null_unlock_zone(dev, zone);
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
deleted file mode 100644
index 65b96c083b3c..000000000000
--- a/drivers/block/pktcdvd.c
+++ /dev/null
@@ -1,2916 +0,0 @@
-/*
- * Copyright (C) 2000 Jens Axboe <axboe@suse.de>
- * Copyright (C) 2001-2004 Peter Osterlund <petero2@telia.com>
- * Copyright (C) 2006 Thomas Maier <balagi@justmail.de>
- *
- * May be copied or modified under the terms of the GNU General Public
- * License. See linux/COPYING for more information.
- *
- * Packet writing layer for ATAPI and SCSI CD-RW, DVD+RW, DVD-RW and
- * DVD-RAM devices.
- *
- * Theory of operation:
- *
- * At the lowest level, there is the standard driver for the CD/DVD device,
- * such as drivers/scsi/sr.c. This driver can handle read and write requests,
- * but it doesn't know anything about the special restrictions that apply to
- * packet writing. One restriction is that write requests must be aligned to
- * packet boundaries on the physical media, and the size of a write request
- * must be equal to the packet size. Another restriction is that a
- * GPCMD_FLUSH_CACHE command has to be issued to the drive before a read
- * command, if the previous command was a write.
- *
- * The purpose of the packet writing driver is to hide these restrictions from
- * higher layers, such as file systems, and present a block device that can be
- * randomly read and written using 2kB-sized blocks.
- *
- * The lowest layer in the packet writing driver is the packet I/O scheduler.
- * Its data is defined by the struct packet_iosched and includes two bio
- * queues with pending read and write requests. These queues are processed
- * by the pkt_iosched_process_queue() function. The write requests in this
- * queue are already properly aligned and sized. This layer is responsible for
- * issuing the flush cache commands and scheduling the I/O in a good order.
- *
- * The next layer transforms unaligned write requests to aligned writes. This
- * transformation requires reading missing pieces of data from the underlying
- * block device, assembling the pieces to full packets and queuing them to the
- * packet I/O scheduler.
- *
- * At the top layer there is a custom ->submit_bio function that forwards
- * read requests directly to the iosched queue and puts write requests in the
- * unaligned write queue. A kernel thread performs the necessary read
- * gathering to convert the unaligned writes to aligned writes and then feeds
- * them to the packet I/O scheduler.
- *
- *************************************************************************/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/backing-dev.h>
-#include <linux/compat.h>
-#include <linux/debugfs.h>
-#include <linux/device.h>
-#include <linux/errno.h>
-#include <linux/file.h>
-#include <linux/freezer.h>
-#include <linux/kernel.h>
-#include <linux/kthread.h>
-#include <linux/miscdevice.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/nospec.h>
-#include <linux/pktcdvd.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/types.h>
-#include <linux/uaccess.h>
-
-#include <scsi/scsi.h>
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_ioctl.h>
-
-#include <linux/unaligned.h>
-
-#define DRIVER_NAME "pktcdvd"
-
-#define MAX_SPEED 0xffff
-
-static DEFINE_MUTEX(pktcdvd_mutex);
-static struct pktcdvd_device *pkt_devs[MAX_WRITERS];
-static struct proc_dir_entry *pkt_proc;
-static int pktdev_major;
-static int write_congestion_on = PKT_WRITE_CONGESTION_ON;
-static int write_congestion_off = PKT_WRITE_CONGESTION_OFF;
-static struct mutex ctl_mutex; /* Serialize open/close/setup/teardown */
-static mempool_t psd_pool;
-static struct bio_set pkt_bio_set;
-
-/* /sys/class/pktcdvd */
-static struct class class_pktcdvd;
-static struct dentry *pkt_debugfs_root = NULL; /* /sys/kernel/debug/pktcdvd */
-
-/* forward declaration */
-static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev);
-static int pkt_remove_dev(dev_t pkt_dev);
-
-static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
-{
- return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1);
-}
-
-/**********************************************************
- * sysfs interface for pktcdvd
- * by (C) 2006 Thomas Maier <balagi@justmail.de>
-
- /sys/class/pktcdvd/pktcdvd[0-7]/
- stat/reset
- stat/packets_started
- stat/packets_finished
- stat/kb_written
- stat/kb_read
- stat/kb_read_gather
- write_queue/size
- write_queue/congestion_off
- write_queue/congestion_on
- **********************************************************/
-
-static ssize_t packets_started_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct pktcdvd_device *pd = dev_get_drvdata(dev);
-
- return sysfs_emit(buf, "%lu\n", pd->stats.pkt_started);
-}
-static DEVICE_ATTR_RO(packets_started);
-
-static ssize_t packets_finished_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct pktcdvd_device *pd = dev_get_drvdata(dev);
-
- return sysfs_emit(buf, "%lu\n", pd->stats.pkt_ended);
-}
-static DEVICE_ATTR_RO(packets_finished);
-
-static ssize_t kb_written_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct pktcdvd_device *pd = dev_get_drvdata(dev);
-
- return sysfs_emit(buf, "%lu\n", pd->stats.secs_w >> 1);
-}
-static DEVICE_ATTR_RO(kb_written);
-
-static ssize_t kb_read_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct pktcdvd_device *pd = dev_get_drvdata(dev);
-
- return sysfs_emit(buf, "%lu\n", pd->stats.secs_r >> 1);
-}
-static DEVICE_ATTR_RO(kb_read);
-
-static ssize_t kb_read_gather_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct pktcdvd_device *pd = dev_get_drvdata(dev);
-
- return sysfs_emit(buf, "%lu\n", pd->stats.secs_rg >> 1);
-}
-static DEVICE_ATTR_RO(kb_read_gather);
-
-static ssize_t reset_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct pktcdvd_device *pd = dev_get_drvdata(dev);
-
- if (len > 0) {
- pd->stats.pkt_started = 0;
- pd->stats.pkt_ended = 0;
- pd->stats.secs_w = 0;
- pd->stats.secs_rg = 0;
- pd->stats.secs_r = 0;
- }
- return len;
-}
-static DEVICE_ATTR_WO(reset);
-
-static struct attribute *pkt_stat_attrs[] = {
- &dev_attr_packets_finished.attr,
- &dev_attr_packets_started.attr,
- &dev_attr_kb_read.attr,
- &dev_attr_kb_written.attr,
- &dev_attr_kb_read_gather.attr,
- &dev_attr_reset.attr,
- NULL,
-};
-
-static const struct attribute_group pkt_stat_group = {
- .name = "stat",
- .attrs = pkt_stat_attrs,
-};
-
-static ssize_t size_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct pktcdvd_device *pd = dev_get_drvdata(dev);
- int n;
-
- spin_lock(&pd->lock);
- n = sysfs_emit(buf, "%d\n", pd->bio_queue_size);
- spin_unlock(&pd->lock);
- return n;
-}
-static DEVICE_ATTR_RO(size);
-
-static void init_write_congestion_marks(int* lo, int* hi)
-{
- if (*hi > 0) {
- *hi = max(*hi, 500);
- *hi = min(*hi, 1000000);
- if (*lo <= 0)
- *lo = *hi - 100;
- else {
- *lo = min(*lo, *hi - 100);
- *lo = max(*lo, 100);
- }
- } else {
- *hi = -1;
- *lo = -1;
- }
-}
-
-static ssize_t congestion_off_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct pktcdvd_device *pd = dev_get_drvdata(dev);
- int n;
-
- spin_lock(&pd->lock);
- n = sysfs_emit(buf, "%d\n", pd->write_congestion_off);
- spin_unlock(&pd->lock);
- return n;
-}
-
-static ssize_t congestion_off_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct pktcdvd_device *pd = dev_get_drvdata(dev);
- int val, ret;
-
- ret = kstrtoint(buf, 10, &val);
- if (ret)
- return ret;
-
- spin_lock(&pd->lock);
- pd->write_congestion_off = val;
- init_write_congestion_marks(&pd->write_congestion_off, &pd->write_congestion_on);
- spin_unlock(&pd->lock);
- return len;
-}
-static DEVICE_ATTR_RW(congestion_off);
-
-static ssize_t congestion_on_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct pktcdvd_device *pd = dev_get_drvdata(dev);
- int n;
-
- spin_lock(&pd->lock);
- n = sysfs_emit(buf, "%d\n", pd->write_congestion_on);
- spin_unlock(&pd->lock);
- return n;
-}
-
-static ssize_t congestion_on_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct pktcdvd_device *pd = dev_get_drvdata(dev);
- int val, ret;
-
- ret = kstrtoint(buf, 10, &val);
- if (ret)
- return ret;
-
- spin_lock(&pd->lock);
- pd->write_congestion_on = val;
- init_write_congestion_marks(&pd->write_congestion_off, &pd->write_congestion_on);
- spin_unlock(&pd->lock);
- return len;
-}
-static DEVICE_ATTR_RW(congestion_on);
-
-static struct attribute *pkt_wq_attrs[] = {
- &dev_attr_congestion_on.attr,
- &dev_attr_congestion_off.attr,
- &dev_attr_size.attr,
- NULL,
-};
-
-static const struct attribute_group pkt_wq_group = {
- .name = "write_queue",
- .attrs = pkt_wq_attrs,
-};
-
-static const struct attribute_group *pkt_groups[] = {
- &pkt_stat_group,
- &pkt_wq_group,
- NULL,
-};
-
-static void pkt_sysfs_dev_new(struct pktcdvd_device *pd)
-{
- if (class_is_registered(&class_pktcdvd)) {
- pd->dev = device_create_with_groups(&class_pktcdvd, NULL,
- MKDEV(0, 0), pd, pkt_groups,
- "%s", pd->disk->disk_name);
- if (IS_ERR(pd->dev))
- pd->dev = NULL;
- }
-}
-
-static void pkt_sysfs_dev_remove(struct pktcdvd_device *pd)
-{
- if (class_is_registered(&class_pktcdvd))
- device_unregister(pd->dev);
-}
-
-
-/********************************************************************
- /sys/class/pktcdvd/
- add map block device
- remove unmap packet dev
- device_map show mappings
- *******************************************************************/
-
-static ssize_t device_map_show(const struct class *c, const struct class_attribute *attr,
- char *data)
-{
- int n = 0;
- int idx;
- mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
- for (idx = 0; idx < MAX_WRITERS; idx++) {
- struct pktcdvd_device *pd = pkt_devs[idx];
- if (!pd)
- continue;
- n += sysfs_emit_at(data, n, "%s %u:%u %u:%u\n",
- pd->disk->disk_name,
- MAJOR(pd->pkt_dev), MINOR(pd->pkt_dev),
- MAJOR(file_bdev(pd->bdev_file)->bd_dev),
- MINOR(file_bdev(pd->bdev_file)->bd_dev));
- }
- mutex_unlock(&ctl_mutex);
- return n;
-}
-static CLASS_ATTR_RO(device_map);
-
-static ssize_t add_store(const struct class *c, const struct class_attribute *attr,
- const char *buf, size_t count)
-{
- unsigned int major, minor;
-
- if (sscanf(buf, "%u:%u", &major, &minor) == 2) {
- /* pkt_setup_dev() expects caller to hold reference to self */
- if (!try_module_get(THIS_MODULE))
- return -ENODEV;
-
- pkt_setup_dev(MKDEV(major, minor), NULL);
-
- module_put(THIS_MODULE);
-
- return count;
- }
-
- return -EINVAL;
-}
-static CLASS_ATTR_WO(add);
-
-static ssize_t remove_store(const struct class *c, const struct class_attribute *attr,
- const char *buf, size_t count)
-{
- unsigned int major, minor;
- if (sscanf(buf, "%u:%u", &major, &minor) == 2) {
- pkt_remove_dev(MKDEV(major, minor));
- return count;
- }
- return -EINVAL;
-}
-static CLASS_ATTR_WO(remove);
-
-static struct attribute *class_pktcdvd_attrs[] = {
- &class_attr_add.attr,
- &class_attr_remove.attr,
- &class_attr_device_map.attr,
- NULL,
-};
-ATTRIBUTE_GROUPS(class_pktcdvd);
-
-static struct class class_pktcdvd = {
- .name = DRIVER_NAME,
- .class_groups = class_pktcdvd_groups,
-};
-
-static int pkt_sysfs_init(void)
-{
- /*
- * create control files in sysfs
- * /sys/class/pktcdvd/...
- */
- return class_register(&class_pktcdvd);
-}
-
-static void pkt_sysfs_cleanup(void)
-{
- class_unregister(&class_pktcdvd);
-}
-
-/********************************************************************
- entries in debugfs
-
- /sys/kernel/debug/pktcdvd[0-7]/
- info
-
- *******************************************************************/
-
-static void pkt_count_states(struct pktcdvd_device *pd, int *states)
-{
- struct packet_data *pkt;
- int i;
-
- for (i = 0; i < PACKET_NUM_STATES; i++)
- states[i] = 0;
-
- spin_lock(&pd->cdrw.active_list_lock);
- list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
- states[pkt->state]++;
- }
- spin_unlock(&pd->cdrw.active_list_lock);
-}
-
-static int pkt_seq_show(struct seq_file *m, void *p)
-{
- struct pktcdvd_device *pd = m->private;
- char *msg;
- int states[PACKET_NUM_STATES];
-
- seq_printf(m, "Writer %s mapped to %pg:\n", pd->disk->disk_name,
- file_bdev(pd->bdev_file));
-
- seq_printf(m, "\nSettings:\n");
- seq_printf(m, "\tpacket size:\t\t%dkB\n", pd->settings.size / 2);
-
- if (pd->settings.write_type == 0)
- msg = "Packet";
- else
- msg = "Unknown";
- seq_printf(m, "\twrite type:\t\t%s\n", msg);
-
- seq_printf(m, "\tpacket type:\t\t%s\n", pd->settings.fp ? "Fixed" : "Variable");
- seq_printf(m, "\tlink loss:\t\t%d\n", pd->settings.link_loss);
-
- seq_printf(m, "\ttrack mode:\t\t%d\n", pd->settings.track_mode);
-
- if (pd->settings.block_mode == PACKET_BLOCK_MODE1)
- msg = "Mode 1";
- else if (pd->settings.block_mode == PACKET_BLOCK_MODE2)
- msg = "Mode 2";
- else
- msg = "Unknown";
- seq_printf(m, "\tblock mode:\t\t%s\n", msg);
-
- seq_printf(m, "\nStatistics:\n");
- seq_printf(m, "\tpackets started:\t%lu\n", pd->stats.pkt_started);
- seq_printf(m, "\tpackets ended:\t\t%lu\n", pd->stats.pkt_ended);
- seq_printf(m, "\twritten:\t\t%lukB\n", pd->stats.secs_w >> 1);
- seq_printf(m, "\tread gather:\t\t%lukB\n", pd->stats.secs_rg >> 1);
- seq_printf(m, "\tread:\t\t\t%lukB\n", pd->stats.secs_r >> 1);
-
- seq_printf(m, "\nMisc:\n");
- seq_printf(m, "\treference count:\t%d\n", pd->refcnt);
- seq_printf(m, "\tflags:\t\t\t0x%lx\n", pd->flags);
- seq_printf(m, "\tread speed:\t\t%ukB/s\n", pd->read_speed);
- seq_printf(m, "\twrite speed:\t\t%ukB/s\n", pd->write_speed);
- seq_printf(m, "\tstart offset:\t\t%lu\n", pd->offset);
- seq_printf(m, "\tmode page offset:\t%u\n", pd->mode_offset);
-
- seq_printf(m, "\nQueue state:\n");
- seq_printf(m, "\tbios queued:\t\t%d\n", pd->bio_queue_size);
- seq_printf(m, "\tbios pending:\t\t%d\n", atomic_read(&pd->cdrw.pending_bios));
- seq_printf(m, "\tcurrent sector:\t\t0x%llx\n", pd->current_sector);
-
- pkt_count_states(pd, states);
- seq_printf(m, "\tstate:\t\t\ti:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
- states[0], states[1], states[2], states[3], states[4], states[5]);
-
- seq_printf(m, "\twrite congestion marks:\toff=%d on=%d\n",
- pd->write_congestion_off,
- pd->write_congestion_on);
- return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(pkt_seq);
-
-static void pkt_debugfs_dev_new(struct pktcdvd_device *pd)
-{
- if (!pkt_debugfs_root)
- return;
- pd->dfs_d_root = debugfs_create_dir(pd->disk->disk_name, pkt_debugfs_root);
-
- pd->dfs_f_info = debugfs_create_file("info", 0444, pd->dfs_d_root,
- pd, &pkt_seq_fops);
-}
-
-static void pkt_debugfs_dev_remove(struct pktcdvd_device *pd)
-{
- if (!pkt_debugfs_root)
- return;
- debugfs_remove(pd->dfs_f_info);
- debugfs_remove(pd->dfs_d_root);
- pd->dfs_f_info = NULL;
- pd->dfs_d_root = NULL;
-}
-
-static void pkt_debugfs_init(void)
-{
- pkt_debugfs_root = debugfs_create_dir(DRIVER_NAME, NULL);
-}
-
-static void pkt_debugfs_cleanup(void)
-{
- debugfs_remove(pkt_debugfs_root);
- pkt_debugfs_root = NULL;
-}
-
-/* ----------------------------------------------------------*/
-
-
-static void pkt_bio_finished(struct pktcdvd_device *pd)
-{
- struct device *ddev = disk_to_dev(pd->disk);
-
- BUG_ON(atomic_read(&pd->cdrw.pending_bios) <= 0);
- if (atomic_dec_and_test(&pd->cdrw.pending_bios)) {
- dev_dbg(ddev, "queue empty\n");
- atomic_set(&pd->iosched.attention, 1);
- wake_up(&pd->wqueue);
- }
-}
-
-/*
- * Allocate a packet_data struct
- */
-static struct packet_data *pkt_alloc_packet_data(int frames)
-{
- int i;
- struct packet_data *pkt;
-
- pkt = kzalloc(sizeof(struct packet_data), GFP_KERNEL);
- if (!pkt)
- goto no_pkt;
-
- pkt->frames = frames;
- pkt->w_bio = bio_kmalloc(frames, GFP_KERNEL);
- if (!pkt->w_bio)
- goto no_bio;
-
- for (i = 0; i < frames / FRAMES_PER_PAGE; i++) {
- pkt->pages[i] = alloc_page(GFP_KERNEL|__GFP_ZERO);
- if (!pkt->pages[i])
- goto no_page;
- }
-
- spin_lock_init(&pkt->lock);
- bio_list_init(&pkt->orig_bios);
-
- for (i = 0; i < frames; i++) {
- pkt->r_bios[i] = bio_kmalloc(1, GFP_KERNEL);
- if (!pkt->r_bios[i])
- goto no_rd_bio;
- }
-
- return pkt;
-
-no_rd_bio:
- for (i = 0; i < frames; i++)
- kfree(pkt->r_bios[i]);
-no_page:
- for (i = 0; i < frames / FRAMES_PER_PAGE; i++)
- if (pkt->pages[i])
- __free_page(pkt->pages[i]);
- kfree(pkt->w_bio);
-no_bio:
- kfree(pkt);
-no_pkt:
- return NULL;
-}
-
-/*
- * Free a packet_data struct
- */
-static void pkt_free_packet_data(struct packet_data *pkt)
-{
- int i;
-
- for (i = 0; i < pkt->frames; i++)
- kfree(pkt->r_bios[i]);
- for (i = 0; i < pkt->frames / FRAMES_PER_PAGE; i++)
- __free_page(pkt->pages[i]);
- kfree(pkt->w_bio);
- kfree(pkt);
-}
-
-static void pkt_shrink_pktlist(struct pktcdvd_device *pd)
-{
- struct packet_data *pkt, *next;
-
- BUG_ON(!list_empty(&pd->cdrw.pkt_active_list));
-
- list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_free_list, list) {
- pkt_free_packet_data(pkt);
- }
- INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
-}
-
-static int pkt_grow_pktlist(struct pktcdvd_device *pd, int nr_packets)
-{
- struct packet_data *pkt;
-
- BUG_ON(!list_empty(&pd->cdrw.pkt_free_list));
-
- while (nr_packets > 0) {
- pkt = pkt_alloc_packet_data(pd->settings.size >> 2);
- if (!pkt) {
- pkt_shrink_pktlist(pd);
- return 0;
- }
- pkt->id = nr_packets;
- pkt->pd = pd;
- list_add(&pkt->list, &pd->cdrw.pkt_free_list);
- nr_packets--;
- }
- return 1;
-}
-
-static inline struct pkt_rb_node *pkt_rbtree_next(struct pkt_rb_node *node)
-{
- struct rb_node *n = rb_next(&node->rb_node);
- if (!n)
- return NULL;
- return rb_entry(n, struct pkt_rb_node, rb_node);
-}
-
-static void pkt_rbtree_erase(struct pktcdvd_device *pd, struct pkt_rb_node *node)
-{
- rb_erase(&node->rb_node, &pd->bio_queue);
- mempool_free(node, &pd->rb_pool);
- pd->bio_queue_size--;
- BUG_ON(pd->bio_queue_size < 0);
-}
-
-/*
- * Find the first node in the pd->bio_queue rb tree with a starting sector >= s.
- */
-static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s)
-{
- struct rb_node *n = pd->bio_queue.rb_node;
- struct rb_node *next;
- struct pkt_rb_node *tmp;
-
- if (!n) {
- BUG_ON(pd->bio_queue_size > 0);
- return NULL;
- }
-
- for (;;) {
- tmp = rb_entry(n, struct pkt_rb_node, rb_node);
- if (s <= tmp->bio->bi_iter.bi_sector)
- next = n->rb_left;
- else
- next = n->rb_right;
- if (!next)
- break;
- n = next;
- }
-
- if (s > tmp->bio->bi_iter.bi_sector) {
- tmp = pkt_rbtree_next(tmp);
- if (!tmp)
- return NULL;
- }
- BUG_ON(s > tmp->bio->bi_iter.bi_sector);
- return tmp;
-}
-
-/*
- * Insert a node into the pd->bio_queue rb tree.
- */
-static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *node)
-{
- struct rb_node **p = &pd->bio_queue.rb_node;
- struct rb_node *parent = NULL;
- sector_t s = node->bio->bi_iter.bi_sector;
- struct pkt_rb_node *tmp;
-
- while (*p) {
- parent = *p;
- tmp = rb_entry(parent, struct pkt_rb_node, rb_node);
- if (s < tmp->bio->bi_iter.bi_sector)
- p = &(*p)->rb_left;
- else
- p = &(*p)->rb_right;
- }
- rb_link_node(&node->rb_node, parent, p);
- rb_insert_color(&node->rb_node, &pd->bio_queue);
- pd->bio_queue_size++;
-}
-
-/*
- * Send a packet_command to the underlying block device and
- * wait for completion.
- */
-static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc)
-{
- struct request_queue *q = bdev_get_queue(file_bdev(pd->bdev_file));
- struct scsi_cmnd *scmd;
- struct request *rq;
- int ret = 0;
-
- rq = scsi_alloc_request(q, (cgc->data_direction == CGC_DATA_WRITE) ?
- REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
- scmd = blk_mq_rq_to_pdu(rq);
-
- if (cgc->buflen) {
- ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen,
- GFP_NOIO);
- if (ret)
- goto out;
- }
-
- scmd->cmd_len = COMMAND_SIZE(cgc->cmd[0]);
- memcpy(scmd->cmnd, cgc->cmd, CDROM_PACKET_SIZE);
-
- rq->timeout = 60*HZ;
- if (cgc->quiet)
- rq->rq_flags |= RQF_QUIET;
-
- blk_execute_rq(rq, false);
- if (scmd->result)
- ret = -EIO;
-out:
- blk_mq_free_request(rq);
- return ret;
-}
-
-static const char *sense_key_string(__u8 index)
-{
- static const char * const info[] = {
- "No sense", "Recovered error", "Not ready",
- "Medium error", "Hardware error", "Illegal request",
- "Unit attention", "Data protect", "Blank check",
- };
-
- return index < ARRAY_SIZE(info) ? info[index] : "INVALID";
-}
-
-/*
- * A generic sense dump / resolve mechanism should be implemented across
- * all ATAPI + SCSI devices.
- */
-static void pkt_dump_sense(struct pktcdvd_device *pd,
- struct packet_command *cgc)
-{
- struct device *ddev = disk_to_dev(pd->disk);
- struct scsi_sense_hdr *sshdr = cgc->sshdr;
-
- if (sshdr)
- dev_err(ddev, "%*ph - sense %02x.%02x.%02x (%s)\n",
- CDROM_PACKET_SIZE, cgc->cmd,
- sshdr->sense_key, sshdr->asc, sshdr->ascq,
- sense_key_string(sshdr->sense_key));
- else
- dev_err(ddev, "%*ph - no sense\n", CDROM_PACKET_SIZE, cgc->cmd);
-}
-
-/*
- * flush the drive cache to media
- */
-static int pkt_flush_cache(struct pktcdvd_device *pd)
-{
- struct packet_command cgc;
-
- init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
- cgc.cmd[0] = GPCMD_FLUSH_CACHE;
- cgc.quiet = 1;
-
- /*
- * the IMMED bit -- we default to not setting it, although that
- * would allow a much faster close, this is safer
- */
-#if 0
- cgc.cmd[1] = 1 << 1;
-#endif
- return pkt_generic_packet(pd, &cgc);
-}
-
-/*
- * speed is given as the normal factor, e.g. 4 for 4x
- */
-static noinline_for_stack int pkt_set_speed(struct pktcdvd_device *pd,
- unsigned write_speed, unsigned read_speed)
-{
- struct packet_command cgc;
- struct scsi_sense_hdr sshdr;
- int ret;
-
- init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
- cgc.sshdr = &sshdr;
- cgc.cmd[0] = GPCMD_SET_SPEED;
- put_unaligned_be16(read_speed, &cgc.cmd[2]);
- put_unaligned_be16(write_speed, &cgc.cmd[4]);
-
- ret = pkt_generic_packet(pd, &cgc);
- if (ret)
- pkt_dump_sense(pd, &cgc);
-
- return ret;
-}
-
-/*
- * Queue a bio for processing by the low-level CD device. Must be called
- * from process context.
- */
-static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio)
-{
- /*
- * Some CDRW drives can not handle writes larger than one packet,
- * even if the size is a multiple of the packet size.
- */
- bio->bi_opf |= REQ_NOMERGE;
-
- spin_lock(&pd->iosched.lock);
- if (bio_data_dir(bio) == READ)
- bio_list_add(&pd->iosched.read_queue, bio);
- else
- bio_list_add(&pd->iosched.write_queue, bio);
- spin_unlock(&pd->iosched.lock);
-
- atomic_set(&pd->iosched.attention, 1);
- wake_up(&pd->wqueue);
-}
-
-/*
- * Process the queued read/write requests. This function handles special
- * requirements for CDRW drives:
- * - A cache flush command must be inserted before a read request if the
- * previous request was a write.
- * - Switching between reading and writing is slow, so don't do it more often
- * than necessary.
- * - Optimize for throughput at the expense of latency. This means that streaming
- * writes will never be interrupted by a read, but if the drive has to seek
- * before the next write, switch to reading instead if there are any pending
- * read requests.
- * - Set the read speed according to current usage pattern. When only reading
- * from the device, it's best to use the highest possible read speed, but
- * when switching often between reading and writing, it's better to have the
- * same read and write speeds.
- */
-static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
-{
- struct device *ddev = disk_to_dev(pd->disk);
-
- if (atomic_read(&pd->iosched.attention) == 0)
- return;
- atomic_set(&pd->iosched.attention, 0);
-
- for (;;) {
- struct bio *bio;
- int reads_queued, writes_queued;
-
- spin_lock(&pd->iosched.lock);
- reads_queued = !bio_list_empty(&pd->iosched.read_queue);
- writes_queued = !bio_list_empty(&pd->iosched.write_queue);
- spin_unlock(&pd->iosched.lock);
-
- if (!reads_queued && !writes_queued)
- break;
-
- if (pd->iosched.writing) {
- int need_write_seek = 1;
- spin_lock(&pd->iosched.lock);
- bio = bio_list_peek(&pd->iosched.write_queue);
- spin_unlock(&pd->iosched.lock);
- if (bio && (bio->bi_iter.bi_sector ==
- pd->iosched.last_write))
- need_write_seek = 0;
- if (need_write_seek && reads_queued) {
- if (atomic_read(&pd->cdrw.pending_bios) > 0) {
- dev_dbg(ddev, "write, waiting\n");
- break;
- }
- pkt_flush_cache(pd);
- pd->iosched.writing = 0;
- }
- } else {
- if (!reads_queued && writes_queued) {
- if (atomic_read(&pd->cdrw.pending_bios) > 0) {
- dev_dbg(ddev, "read, waiting\n");
- break;
- }
- pd->iosched.writing = 1;
- }
- }
-
- spin_lock(&pd->iosched.lock);
- if (pd->iosched.writing)
- bio = bio_list_pop(&pd->iosched.write_queue);
- else
- bio = bio_list_pop(&pd->iosched.read_queue);
- spin_unlock(&pd->iosched.lock);
-
- if (!bio)
- continue;
-
- if (bio_data_dir(bio) == READ)
- pd->iosched.successive_reads +=
- bio->bi_iter.bi_size >> 10;
- else {
- pd->iosched.successive_reads = 0;
- pd->iosched.last_write = bio_end_sector(bio);
- }
- if (pd->iosched.successive_reads >= HI_SPEED_SWITCH) {
- if (pd->read_speed == pd->write_speed) {
- pd->read_speed = MAX_SPEED;
- pkt_set_speed(pd, pd->write_speed, pd->read_speed);
- }
- } else {
- if (pd->read_speed != pd->write_speed) {
- pd->read_speed = pd->write_speed;
- pkt_set_speed(pd, pd->write_speed, pd->read_speed);
- }
- }
-
- atomic_inc(&pd->cdrw.pending_bios);
- submit_bio_noacct(bio);
- }
-}
-
-/*
- * Special care is needed if the underlying block device has a small
- * max_phys_segments value.
- */
-static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q)
-{
- struct device *ddev = disk_to_dev(pd->disk);
-
- if ((pd->settings.size << 9) / CD_FRAMESIZE <= queue_max_segments(q)) {
- /*
- * The cdrom device can handle one segment/frame
- */
- clear_bit(PACKET_MERGE_SEGS, &pd->flags);
- return 0;
- }
-
- if ((pd->settings.size << 9) / PAGE_SIZE <= queue_max_segments(q)) {
- /*
- * We can handle this case at the expense of some extra memory
- * copies during write operations
- */
- set_bit(PACKET_MERGE_SEGS, &pd->flags);
- return 0;
- }
-
- dev_err(ddev, "cdrom max_phys_segments too small\n");
- return -EIO;
-}
-
-static void pkt_end_io_read(struct bio *bio)
-{
- struct packet_data *pkt = bio->bi_private;
- struct pktcdvd_device *pd = pkt->pd;
- BUG_ON(!pd);
-
- dev_dbg(disk_to_dev(pd->disk), "bio=%p sec0=%llx sec=%llx err=%d\n",
- bio, pkt->sector, bio->bi_iter.bi_sector, bio->bi_status);
-
- if (bio->bi_status)
- atomic_inc(&pkt->io_errors);
- bio_uninit(bio);
- if (atomic_dec_and_test(&pkt->io_wait)) {
- atomic_inc(&pkt->run_sm);
- wake_up(&pd->wqueue);
- }
- pkt_bio_finished(pd);
-}
-
-static void pkt_end_io_packet_write(struct bio *bio)
-{
- struct packet_data *pkt = bio->bi_private;
- struct pktcdvd_device *pd = pkt->pd;
- BUG_ON(!pd);
-
- dev_dbg(disk_to_dev(pd->disk), "id=%d, err=%d\n", pkt->id, bio->bi_status);
-
- pd->stats.pkt_ended++;
-
- bio_uninit(bio);
- pkt_bio_finished(pd);
- atomic_dec(&pkt->io_wait);
- atomic_inc(&pkt->run_sm);
- wake_up(&pd->wqueue);
-}
-
-/*
- * Schedule reads for the holes in a packet
- */
-static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
-{
- struct device *ddev = disk_to_dev(pd->disk);
- int frames_read = 0;
- struct bio *bio;
- int f;
- char written[PACKET_MAX_SIZE];
-
- BUG_ON(bio_list_empty(&pkt->orig_bios));
-
- atomic_set(&pkt->io_wait, 0);
- atomic_set(&pkt->io_errors, 0);
-
- /*
- * Figure out which frames we need to read before we can write.
- */
- memset(written, 0, sizeof(written));
- spin_lock(&pkt->lock);
- bio_list_for_each(bio, &pkt->orig_bios) {
- int first_frame = (bio->bi_iter.bi_sector - pkt->sector) /
- (CD_FRAMESIZE >> 9);
- int num_frames = bio->bi_iter.bi_size / CD_FRAMESIZE;
- pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9);
- BUG_ON(first_frame < 0);
- BUG_ON(first_frame + num_frames > pkt->frames);
- for (f = first_frame; f < first_frame + num_frames; f++)
- written[f] = 1;
- }
- spin_unlock(&pkt->lock);
-
- if (pkt->cache_valid) {
- dev_dbg(ddev, "zone %llx cached\n", pkt->sector);
- goto out_account;
- }
-
- /*
- * Schedule reads for missing parts of the packet.
- */
- for (f = 0; f < pkt->frames; f++) {
- int p, offset;
-
- if (written[f])
- continue;
-
- bio = pkt->r_bios[f];
- bio_init(bio, file_bdev(pd->bdev_file), bio->bi_inline_vecs, 1,
- REQ_OP_READ);
- bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
- bio->bi_end_io = pkt_end_io_read;
- bio->bi_private = pkt;
-
- p = (f * CD_FRAMESIZE) / PAGE_SIZE;
- offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
- dev_dbg(ddev, "Adding frame %d, page:%p offs:%d\n", f,
- pkt->pages[p], offset);
- if (!bio_add_page(bio, pkt->pages[p], CD_FRAMESIZE, offset))
- BUG();
-
- atomic_inc(&pkt->io_wait);
- pkt_queue_bio(pd, bio);
- frames_read++;
- }
-
-out_account:
- dev_dbg(ddev, "need %d frames for zone %llx\n", frames_read, pkt->sector);
- pd->stats.pkt_started++;
- pd->stats.secs_rg += frames_read * (CD_FRAMESIZE >> 9);
-}
-
-/*
- * Find a packet matching zone, or the least recently used packet if
- * there is no match.
- */
-static struct packet_data *pkt_get_packet_data(struct pktcdvd_device *pd, int zone)
-{
- struct packet_data *pkt;
-
- list_for_each_entry(pkt, &pd->cdrw.pkt_free_list, list) {
- if (pkt->sector == zone || pkt->list.next == &pd->cdrw.pkt_free_list) {
- list_del_init(&pkt->list);
- if (pkt->sector != zone)
- pkt->cache_valid = 0;
- return pkt;
- }
- }
- BUG();
- return NULL;
-}
-
-static void pkt_put_packet_data(struct pktcdvd_device *pd, struct packet_data *pkt)
-{
- if (pkt->cache_valid) {
- list_add(&pkt->list, &pd->cdrw.pkt_free_list);
- } else {
- list_add_tail(&pkt->list, &pd->cdrw.pkt_free_list);
- }
-}
-
-static inline void pkt_set_state(struct device *ddev, struct packet_data *pkt,
- enum packet_data_state state)
-{
- static const char *state_name[] = {
- "IDLE", "WAITING", "READ_WAIT", "WRITE_WAIT", "RECOVERY", "FINISHED"
- };
- enum packet_data_state old_state = pkt->state;
-
- dev_dbg(ddev, "pkt %2d : s=%6llx %s -> %s\n",
- pkt->id, pkt->sector, state_name[old_state], state_name[state]);
-
- pkt->state = state;
-}
-
-/*
- * Scan the work queue to see if we can start a new packet.
- * returns non-zero if any work was done.
- */
-static int pkt_handle_queue(struct pktcdvd_device *pd)
-{
- struct device *ddev = disk_to_dev(pd->disk);
- struct packet_data *pkt, *p;
- struct bio *bio = NULL;
- sector_t zone = 0; /* Suppress gcc warning */
- struct pkt_rb_node *node, *first_node;
- struct rb_node *n;
-
- atomic_set(&pd->scan_queue, 0);
-
- if (list_empty(&pd->cdrw.pkt_free_list)) {
- dev_dbg(ddev, "no pkt\n");
- return 0;
- }
-
- /*
- * Try to find a zone we are not already working on.
- */
- spin_lock(&pd->lock);
- first_node = pkt_rbtree_find(pd, pd->current_sector);
- if (!first_node) {
- n = rb_first(&pd->bio_queue);
- if (n)
- first_node = rb_entry(n, struct pkt_rb_node, rb_node);
- }
- node = first_node;
- while (node) {
- bio = node->bio;
- zone = get_zone(bio->bi_iter.bi_sector, pd);
- list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) {
- if (p->sector == zone) {
- bio = NULL;
- goto try_next_bio;
- }
- }
- break;
-try_next_bio:
- node = pkt_rbtree_next(node);
- if (!node) {
- n = rb_first(&pd->bio_queue);
- if (n)
- node = rb_entry(n, struct pkt_rb_node, rb_node);
- }
- if (node == first_node)
- node = NULL;
- }
- spin_unlock(&pd->lock);
- if (!bio) {
- dev_dbg(ddev, "no bio\n");
- return 0;
- }
-
- pkt = pkt_get_packet_data(pd, zone);
-
- pd->current_sector = zone + pd->settings.size;
- pkt->sector = zone;
- BUG_ON(pkt->frames != pd->settings.size >> 2);
- pkt->write_size = 0;
-
- /*
- * Scan work queue for bios in the same zone and link them
- * to this packet.
- */
- spin_lock(&pd->lock);
- dev_dbg(ddev, "looking for zone %llx\n", zone);
- while ((node = pkt_rbtree_find(pd, zone)) != NULL) {
- sector_t tmp = get_zone(node->bio->bi_iter.bi_sector, pd);
-
- bio = node->bio;
- dev_dbg(ddev, "found zone=%llx\n", tmp);
- if (tmp != zone)
- break;
- pkt_rbtree_erase(pd, node);
- spin_lock(&pkt->lock);
- bio_list_add(&pkt->orig_bios, bio);
- pkt->write_size += bio->bi_iter.bi_size / CD_FRAMESIZE;
- spin_unlock(&pkt->lock);
- }
- /* check write congestion marks, and if bio_queue_size is
- * below, wake up any waiters
- */
- if (pd->congested &&
- pd->bio_queue_size <= pd->write_congestion_off) {
- pd->congested = false;
- wake_up_var(&pd->congested);
- }
- spin_unlock(&pd->lock);
-
- pkt->sleep_time = max(PACKET_WAIT_TIME, 1);
- pkt_set_state(ddev, pkt, PACKET_WAITING_STATE);
- atomic_set(&pkt->run_sm, 1);
-
- spin_lock(&pd->cdrw.active_list_lock);
- list_add(&pkt->list, &pd->cdrw.pkt_active_list);
- spin_unlock(&pd->cdrw.active_list_lock);
-
- return 1;
-}
-
-/**
- * bio_list_copy_data - copy contents of data buffers from one chain of bios to
- * another
- * @src: source bio list
- * @dst: destination bio list
- *
- * Stops when it reaches the end of either the @src list or @dst list - that is,
- * copies min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of
- * bios).
- */
-static void bio_list_copy_data(struct bio *dst, struct bio *src)
-{
- struct bvec_iter src_iter = src->bi_iter;
- struct bvec_iter dst_iter = dst->bi_iter;
-
- while (1) {
- if (!src_iter.bi_size) {
- src = src->bi_next;
- if (!src)
- break;
-
- src_iter = src->bi_iter;
- }
-
- if (!dst_iter.bi_size) {
- dst = dst->bi_next;
- if (!dst)
- break;
-
- dst_iter = dst->bi_iter;
- }
-
- bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
- }
-}
-
-/*
- * Assemble a bio to write one packet and queue the bio for processing
- * by the underlying block device.
- */
-static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
-{
- struct device *ddev = disk_to_dev(pd->disk);
- int f;
-
- bio_init(pkt->w_bio, file_bdev(pd->bdev_file), pkt->w_bio->bi_inline_vecs,
- pkt->frames, REQ_OP_WRITE);
- pkt->w_bio->bi_iter.bi_sector = pkt->sector;
- pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
- pkt->w_bio->bi_private = pkt;
-
- /* XXX: locking? */
- for (f = 0; f < pkt->frames; f++) {
- struct page *page = pkt->pages[(f * CD_FRAMESIZE) / PAGE_SIZE];
- unsigned offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
-
- if (!bio_add_page(pkt->w_bio, page, CD_FRAMESIZE, offset))
- BUG();
- }
- dev_dbg(ddev, "vcnt=%d\n", pkt->w_bio->bi_vcnt);
-
- /*
- * Fill-in bvec with data from orig_bios.
- */
- spin_lock(&pkt->lock);
- bio_list_copy_data(pkt->w_bio, pkt->orig_bios.head);
-
- pkt_set_state(ddev, pkt, PACKET_WRITE_WAIT_STATE);
- spin_unlock(&pkt->lock);
-
- dev_dbg(ddev, "Writing %d frames for zone %llx\n", pkt->write_size, pkt->sector);
-
- if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames))
- pkt->cache_valid = 1;
- else
- pkt->cache_valid = 0;
-
- /* Start the write request */
- atomic_set(&pkt->io_wait, 1);
- pkt_queue_bio(pd, pkt->w_bio);
-}
-
-static void pkt_finish_packet(struct packet_data *pkt, blk_status_t status)
-{
- struct bio *bio;
-
- if (status)
- pkt->cache_valid = 0;
-
- /* Finish all bios corresponding to this packet */
- while ((bio = bio_list_pop(&pkt->orig_bios))) {
- bio->bi_status = status;
- bio_endio(bio);
- }
-}
-
-static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data *pkt)
-{
- struct device *ddev = disk_to_dev(pd->disk);
-
- dev_dbg(ddev, "pkt %d\n", pkt->id);
-
- for (;;) {
- switch (pkt->state) {
- case PACKET_WAITING_STATE:
- if ((pkt->write_size < pkt->frames) && (pkt->sleep_time > 0))
- return;
-
- pkt->sleep_time = 0;
- pkt_gather_data(pd, pkt);
- pkt_set_state(ddev, pkt, PACKET_READ_WAIT_STATE);
- break;
-
- case PACKET_READ_WAIT_STATE:
- if (atomic_read(&pkt->io_wait) > 0)
- return;
-
- if (atomic_read(&pkt->io_errors) > 0) {
- pkt_set_state(ddev, pkt, PACKET_RECOVERY_STATE);
- } else {
- pkt_start_write(pd, pkt);
- }
- break;
-
- case PACKET_WRITE_WAIT_STATE:
- if (atomic_read(&pkt->io_wait) > 0)
- return;
-
- if (!pkt->w_bio->bi_status) {
- pkt_set_state(ddev, pkt, PACKET_FINISHED_STATE);
- } else {
- pkt_set_state(ddev, pkt, PACKET_RECOVERY_STATE);
- }
- break;
-
- case PACKET_RECOVERY_STATE:
- dev_dbg(ddev, "No recovery possible\n");
- pkt_set_state(ddev, pkt, PACKET_FINISHED_STATE);
- break;
-
- case PACKET_FINISHED_STATE:
- pkt_finish_packet(pkt, pkt->w_bio->bi_status);
- return;
-
- default:
- BUG();
- break;
- }
- }
-}
-
-static void pkt_handle_packets(struct pktcdvd_device *pd)
-{
- struct device *ddev = disk_to_dev(pd->disk);
- struct packet_data *pkt, *next;
-
- /*
- * Run state machine for active packets
- */
- list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
- if (atomic_read(&pkt->run_sm) > 0) {
- atomic_set(&pkt->run_sm, 0);
- pkt_run_state_machine(pd, pkt);
- }
- }
-
- /*
- * Move no longer active packets to the free list
- */
- spin_lock(&pd->cdrw.active_list_lock);
- list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_active_list, list) {
- if (pkt->state == PACKET_FINISHED_STATE) {
- list_del(&pkt->list);
- pkt_put_packet_data(pd, pkt);
- pkt_set_state(ddev, pkt, PACKET_IDLE_STATE);
- atomic_set(&pd->scan_queue, 1);
- }
- }
- spin_unlock(&pd->cdrw.active_list_lock);
-}
-
-/*
- * kcdrwd is woken up when writes have been queued for one of our
- * registered devices
- */
-static int kcdrwd(void *foobar)
-{
- struct pktcdvd_device *pd = foobar;
- struct device *ddev = disk_to_dev(pd->disk);
- struct packet_data *pkt;
- int states[PACKET_NUM_STATES];
- long min_sleep_time, residue;
-
- set_user_nice(current, MIN_NICE);
- set_freezable();
-
- for (;;) {
- DECLARE_WAITQUEUE(wait, current);
-
- /*
- * Wait until there is something to do
- */
- add_wait_queue(&pd->wqueue, &wait);
- for (;;) {
- set_current_state(TASK_INTERRUPTIBLE);
-
- /* Check if we need to run pkt_handle_queue */
- if (atomic_read(&pd->scan_queue) > 0)
- goto work_to_do;
-
- /* Check if we need to run the state machine for some packet */
- list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
- if (atomic_read(&pkt->run_sm) > 0)
- goto work_to_do;
- }
-
- /* Check if we need to process the iosched queues */
- if (atomic_read(&pd->iosched.attention) != 0)
- goto work_to_do;
-
- /* Otherwise, go to sleep */
- pkt_count_states(pd, states);
- dev_dbg(ddev, "i:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
- states[0], states[1], states[2], states[3], states[4], states[5]);
-
- min_sleep_time = MAX_SCHEDULE_TIMEOUT;
- list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
- if (pkt->sleep_time && pkt->sleep_time < min_sleep_time)
- min_sleep_time = pkt->sleep_time;
- }
-
- dev_dbg(ddev, "sleeping\n");
- residue = schedule_timeout(min_sleep_time);
- dev_dbg(ddev, "wake up\n");
-
- /* make swsusp happy with our thread */
- try_to_freeze();
-
- list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
- if (!pkt->sleep_time)
- continue;
- pkt->sleep_time -= min_sleep_time - residue;
- if (pkt->sleep_time <= 0) {
- pkt->sleep_time = 0;
- atomic_inc(&pkt->run_sm);
- }
- }
-
- if (kthread_should_stop())
- break;
- }
-work_to_do:
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&pd->wqueue, &wait);
-
- if (kthread_should_stop())
- break;
-
- /*
- * if pkt_handle_queue returns true, we can queue
- * another request.
- */
- while (pkt_handle_queue(pd))
- ;
-
- /*
- * Handle packet state machine
- */
- pkt_handle_packets(pd);
-
- /*
- * Handle iosched queues
- */
- pkt_iosched_process_queue(pd);
- }
-
- return 0;
-}
-
-static void pkt_print_settings(struct pktcdvd_device *pd)
-{
- dev_info(disk_to_dev(pd->disk), "%s packets, %u blocks, Mode-%c disc\n",
- pd->settings.fp ? "Fixed" : "Variable",
- pd->settings.size >> 2,
- pd->settings.block_mode == 8 ? '1' : '2');
-}
-
-static int pkt_mode_sense(struct pktcdvd_device *pd, struct packet_command *cgc, int page_code, int page_control)
-{
- memset(cgc->cmd, 0, sizeof(cgc->cmd));
-
- cgc->cmd[0] = GPCMD_MODE_SENSE_10;
- cgc->cmd[2] = page_code | (page_control << 6);
- put_unaligned_be16(cgc->buflen, &cgc->cmd[7]);
- cgc->data_direction = CGC_DATA_READ;
- return pkt_generic_packet(pd, cgc);
-}
-
-static int pkt_mode_select(struct pktcdvd_device *pd, struct packet_command *cgc)
-{
- memset(cgc->cmd, 0, sizeof(cgc->cmd));
- memset(cgc->buffer, 0, 2);
- cgc->cmd[0] = GPCMD_MODE_SELECT_10;
- cgc->cmd[1] = 0x10; /* PF */
- put_unaligned_be16(cgc->buflen, &cgc->cmd[7]);
- cgc->data_direction = CGC_DATA_WRITE;
- return pkt_generic_packet(pd, cgc);
-}
-
-static int pkt_get_disc_info(struct pktcdvd_device *pd, disc_information *di)
-{
- struct packet_command cgc;
- int ret;
-
- /* set up command and get the disc info */
- init_cdrom_command(&cgc, di, sizeof(*di), CGC_DATA_READ);
- cgc.cmd[0] = GPCMD_READ_DISC_INFO;
- cgc.cmd[8] = cgc.buflen = 2;
- cgc.quiet = 1;
-
- ret = pkt_generic_packet(pd, &cgc);
- if (ret)
- return ret;
-
- /* not all drives have the same disc_info length, so requeue
- * packet with the length the drive tells us it can supply
- */
- cgc.buflen = be16_to_cpu(di->disc_information_length) +
- sizeof(di->disc_information_length);
-
- if (cgc.buflen > sizeof(disc_information))
- cgc.buflen = sizeof(disc_information);
-
- cgc.cmd[8] = cgc.buflen;
- return pkt_generic_packet(pd, &cgc);
-}
-
-static int pkt_get_track_info(struct pktcdvd_device *pd, __u16 track, __u8 type, track_information *ti)
-{
- struct packet_command cgc;
- int ret;
-
- init_cdrom_command(&cgc, ti, 8, CGC_DATA_READ);
- cgc.cmd[0] = GPCMD_READ_TRACK_RZONE_INFO;
- cgc.cmd[1] = type & 3;
- put_unaligned_be16(track, &cgc.cmd[4]);
- cgc.cmd[8] = 8;
- cgc.quiet = 1;
-
- ret = pkt_generic_packet(pd, &cgc);
- if (ret)
- return ret;
-
- cgc.buflen = be16_to_cpu(ti->track_information_length) +
- sizeof(ti->track_information_length);
-
- if (cgc.buflen > sizeof(track_information))
- cgc.buflen = sizeof(track_information);
-
- cgc.cmd[8] = cgc.buflen;
- return pkt_generic_packet(pd, &cgc);
-}
-
-static noinline_for_stack int pkt_get_last_written(struct pktcdvd_device *pd,
- long *last_written)
-{
- disc_information di;
- track_information ti;
- __u32 last_track;
- int ret;
-
- ret = pkt_get_disc_info(pd, &di);
- if (ret)
- return ret;
-
- last_track = (di.last_track_msb << 8) | di.last_track_lsb;
- ret = pkt_get_track_info(pd, last_track, 1, &ti);
- if (ret)
- return ret;
-
- /* if this track is blank, try the previous. */
- if (ti.blank) {
- last_track--;
- ret = pkt_get_track_info(pd, last_track, 1, &ti);
- if (ret)
- return ret;
- }
-
- /* if last recorded field is valid, return it. */
- if (ti.lra_v) {
- *last_written = be32_to_cpu(ti.last_rec_address);
- } else {
- /* make it up instead */
- *last_written = be32_to_cpu(ti.track_start) +
- be32_to_cpu(ti.track_size);
- if (ti.free_blocks)
- *last_written -= (be32_to_cpu(ti.free_blocks) + 7);
- }
- return 0;
-}
-
-/*
- * write mode select package based on pd->settings
- */
-static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd)
-{
- struct device *ddev = disk_to_dev(pd->disk);
- struct packet_command cgc;
- struct scsi_sense_hdr sshdr;
- write_param_page *wp;
- char buffer[128];
- int ret, size;
-
- /* doesn't apply to DVD+RW or DVD-RAM */
- if ((pd->mmc3_profile == 0x1a) || (pd->mmc3_profile == 0x12))
- return 0;
-
- memset(buffer, 0, sizeof(buffer));
- init_cdrom_command(&cgc, buffer, sizeof(*wp), CGC_DATA_READ);
- cgc.sshdr = &sshdr;
- ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0);
- if (ret) {
- pkt_dump_sense(pd, &cgc);
- return ret;
- }
-
- size = 2 + get_unaligned_be16(&buffer[0]);
- pd->mode_offset = get_unaligned_be16(&buffer[6]);
- if (size > sizeof(buffer))
- size = sizeof(buffer);
-
- /*
- * now get it all
- */
- init_cdrom_command(&cgc, buffer, size, CGC_DATA_READ);
- cgc.sshdr = &sshdr;
- ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0);
- if (ret) {
- pkt_dump_sense(pd, &cgc);
- return ret;
- }
-
- /*
- * write page is offset header + block descriptor length
- */
- wp = (write_param_page *) &buffer[sizeof(struct mode_page_header) + pd->mode_offset];
-
- wp->fp = pd->settings.fp;
- wp->track_mode = pd->settings.track_mode;
- wp->write_type = pd->settings.write_type;
- wp->data_block_type = pd->settings.block_mode;
-
- wp->multi_session = 0;
-
-#ifdef PACKET_USE_LS
- wp->link_size = 7;
- wp->ls_v = 1;
-#endif
-
- if (wp->data_block_type == PACKET_BLOCK_MODE1) {
- wp->session_format = 0;
- wp->subhdr2 = 0x20;
- } else if (wp->data_block_type == PACKET_BLOCK_MODE2) {
- wp->session_format = 0x20;
- wp->subhdr2 = 8;
-#if 0
- wp->mcn[0] = 0x80;
- memcpy(&wp->mcn[1], PACKET_MCN, sizeof(wp->mcn) - 1);
-#endif
- } else {
- /*
- * paranoia
- */
- dev_err(ddev, "write mode wrong %d\n", wp->data_block_type);
- return 1;
- }
- wp->packet_size = cpu_to_be32(pd->settings.size >> 2);
-
- cgc.buflen = cgc.cmd[8] = size;
- ret = pkt_mode_select(pd, &cgc);
- if (ret) {
- pkt_dump_sense(pd, &cgc);
- return ret;
- }
-
- pkt_print_settings(pd);
- return 0;
-}
-
-/*
- * 1 -- we can write to this track, 0 -- we can't
- */
-static int pkt_writable_track(struct pktcdvd_device *pd, track_information *ti)
-{
- struct device *ddev = disk_to_dev(pd->disk);
-
- switch (pd->mmc3_profile) {
- case 0x1a: /* DVD+RW */
- case 0x12: /* DVD-RAM */
- /* The track is always writable on DVD+RW/DVD-RAM */
- return 1;
- default:
- break;
- }
-
- if (!ti->packet || !ti->fp)
- return 0;
-
- /*
- * "good" settings as per Mt Fuji.
- */
- if (ti->rt == 0 && ti->blank == 0)
- return 1;
-
- if (ti->rt == 0 && ti->blank == 1)
- return 1;
-
- if (ti->rt == 1 && ti->blank == 0)
- return 1;
-
- dev_err(ddev, "bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet);
- return 0;
-}
-
-/*
- * 1 -- we can write to this disc, 0 -- we can't
- */
-static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di)
-{
- struct device *ddev = disk_to_dev(pd->disk);
-
- switch (pd->mmc3_profile) {
- case 0x0a: /* CD-RW */
- case 0xffff: /* MMC3 not supported */
- break;
- case 0x1a: /* DVD+RW */
- case 0x13: /* DVD-RW */
- case 0x12: /* DVD-RAM */
- return 1;
- default:
- dev_dbg(ddev, "Wrong disc profile (%x)\n", pd->mmc3_profile);
- return 0;
- }
-
- /*
- * for disc type 0xff we should probably reserve a new track.
- * but i'm not sure, should we leave this to user apps? probably.
- */
- if (di->disc_type == 0xff) {
- dev_notice(ddev, "unknown disc - no track?\n");
- return 0;
- }
-
- if (di->disc_type != 0x20 && di->disc_type != 0) {
- dev_err(ddev, "wrong disc type (%x)\n", di->disc_type);
- return 0;
- }
-
- if (di->erasable == 0) {
- dev_err(ddev, "disc not erasable\n");
- return 0;
- }
-
- if (di->border_status == PACKET_SESSION_RESERVED) {
- dev_err(ddev, "can't write to last track (reserved)\n");
- return 0;
- }
-
- return 1;
-}
-
-static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
-{
- struct device *ddev = disk_to_dev(pd->disk);
- struct packet_command cgc;
- unsigned char buf[12];
- disc_information di;
- track_information ti;
- int ret, track;
-
- init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
- cgc.cmd[0] = GPCMD_GET_CONFIGURATION;
- cgc.cmd[8] = 8;
- ret = pkt_generic_packet(pd, &cgc);
- pd->mmc3_profile = ret ? 0xffff : get_unaligned_be16(&buf[6]);
-
- memset(&di, 0, sizeof(disc_information));
- memset(&ti, 0, sizeof(track_information));
-
- ret = pkt_get_disc_info(pd, &di);
- if (ret) {
- dev_err(ddev, "failed get_disc\n");
- return ret;
- }
-
- if (!pkt_writable_disc(pd, &di))
- return -EROFS;
-
- pd->type = di.erasable ? PACKET_CDRW : PACKET_CDR;
-
- track = 1; /* (di.last_track_msb << 8) | di.last_track_lsb; */
- ret = pkt_get_track_info(pd, track, 1, &ti);
- if (ret) {
- dev_err(ddev, "failed get_track\n");
- return ret;
- }
-
- if (!pkt_writable_track(pd, &ti)) {
- dev_err(ddev, "can't write to this track\n");
- return -EROFS;
- }
-
- /*
- * we keep packet size in 512 byte units, makes it easier to
- * deal with request calculations.
- */
- pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2;
- if (pd->settings.size == 0) {
- dev_notice(ddev, "detected zero packet size!\n");
- return -ENXIO;
- }
- if (pd->settings.size > PACKET_MAX_SECTORS) {
- dev_err(ddev, "packet size is too big\n");
- return -EROFS;
- }
- pd->settings.fp = ti.fp;
- pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
-
- if (ti.nwa_v) {
- pd->nwa = be32_to_cpu(ti.next_writable);
- set_bit(PACKET_NWA_VALID, &pd->flags);
- }
-
- /*
- * in theory we could use lra on -RW media as well and just zero
- * blocks that haven't been written yet, but in practice that
- * is just a no-go. we'll use that for -R, naturally.
- */
- if (ti.lra_v) {
- pd->lra = be32_to_cpu(ti.last_rec_address);
- set_bit(PACKET_LRA_VALID, &pd->flags);
- } else {
- pd->lra = 0xffffffff;
- set_bit(PACKET_LRA_VALID, &pd->flags);
- }
-
- /*
- * fine for now
- */
- pd->settings.link_loss = 7;
- pd->settings.write_type = 0; /* packet */
- pd->settings.track_mode = ti.track_mode;
-
- /*
- * mode1 or mode2 disc
- */
- switch (ti.data_mode) {
- case PACKET_MODE1:
- pd->settings.block_mode = PACKET_BLOCK_MODE1;
- break;
- case PACKET_MODE2:
- pd->settings.block_mode = PACKET_BLOCK_MODE2;
- break;
- default:
- dev_err(ddev, "unknown data mode\n");
- return -EROFS;
- }
- return 0;
-}
-
-/*
- * enable/disable write caching on drive
- */
-static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd)
-{
- struct device *ddev = disk_to_dev(pd->disk);
- struct packet_command cgc;
- struct scsi_sense_hdr sshdr;
- unsigned char buf[64];
- bool set = IS_ENABLED(CONFIG_CDROM_PKTCDVD_WCACHE);
- int ret;
-
- init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
- cgc.sshdr = &sshdr;
- cgc.buflen = pd->mode_offset + 12;
-
- /*
- * caching mode page might not be there, so quiet this command
- */
- cgc.quiet = 1;
-
- ret = pkt_mode_sense(pd, &cgc, GPMODE_WCACHING_PAGE, 0);
- if (ret)
- return ret;
-
- /*
- * use drive write caching -- we need deferred error handling to be
- * able to successfully recover with this option (drive will return good
- * status as soon as the cdb is validated).
- */
- buf[pd->mode_offset + 10] |= (set << 2);
-
- cgc.buflen = cgc.cmd[8] = 2 + get_unaligned_be16(&buf[0]);
- ret = pkt_mode_select(pd, &cgc);
- if (ret) {
- dev_err(ddev, "write caching control failed\n");
- pkt_dump_sense(pd, &cgc);
- } else if (!ret && set)
- dev_notice(ddev, "enabled write caching\n");
- return ret;
-}
-
-static int pkt_lock_door(struct pktcdvd_device *pd, int lockflag)
-{
- struct packet_command cgc;
-
- init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
- cgc.cmd[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL;
- cgc.cmd[4] = lockflag ? 1 : 0;
- return pkt_generic_packet(pd, &cgc);
-}
-
-/*
- * Returns drive maximum write speed
- */
-static noinline_for_stack int pkt_get_max_speed(struct pktcdvd_device *pd,
- unsigned *write_speed)
-{
- struct packet_command cgc;
- struct scsi_sense_hdr sshdr;
- unsigned char buf[256+18];
- unsigned char *cap_buf;
- int ret, offset;
-
- cap_buf = &buf[sizeof(struct mode_page_header) + pd->mode_offset];
- init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_UNKNOWN);
- cgc.sshdr = &sshdr;
-
- ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
- if (ret) {
- cgc.buflen = pd->mode_offset + cap_buf[1] + 2 +
- sizeof(struct mode_page_header);
- ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
- if (ret) {
- pkt_dump_sense(pd, &cgc);
- return ret;
- }
- }
-
- offset = 20; /* Obsoleted field, used by older drives */
- if (cap_buf[1] >= 28)
- offset = 28; /* Current write speed selected */
- if (cap_buf[1] >= 30) {
- /* If the drive reports at least one "Logical Unit Write
- * Speed Performance Descriptor Block", use the information
- * in the first block. (contains the highest speed)
- */
- int num_spdb = get_unaligned_be16(&cap_buf[30]);
- if (num_spdb > 0)
- offset = 34;
- }
-
- *write_speed = get_unaligned_be16(&cap_buf[offset]);
- return 0;
-}
-
-/* These tables from cdrecord - I don't have orange book */
-/* standard speed CD-RW (1-4x) */
-static char clv_to_speed[16] = {
- /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
- 0, 2, 4, 6, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
-};
-/* high speed CD-RW (-10x) */
-static char hs_clv_to_speed[16] = {
- /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
- 0, 2, 4, 6, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
-};
-/* ultra high speed CD-RW */
-static char us_clv_to_speed[16] = {
- /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
- 0, 2, 4, 8, 0, 0,16, 0,24,32,40,48, 0, 0, 0, 0
-};
-
-/*
- * reads the maximum media speed from ATIP
- */
-static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd,
- unsigned *speed)
-{
- struct device *ddev = disk_to_dev(pd->disk);
- struct packet_command cgc;
- struct scsi_sense_hdr sshdr;
- unsigned char buf[64];
- unsigned int size, st, sp;
- int ret;
-
- init_cdrom_command(&cgc, buf, 2, CGC_DATA_READ);
- cgc.sshdr = &sshdr;
- cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
- cgc.cmd[1] = 2;
- cgc.cmd[2] = 4; /* READ ATIP */
- cgc.cmd[8] = 2;
- ret = pkt_generic_packet(pd, &cgc);
- if (ret) {
- pkt_dump_sense(pd, &cgc);
- return ret;
- }
- size = 2 + get_unaligned_be16(&buf[0]);
- if (size > sizeof(buf))
- size = sizeof(buf);
-
- init_cdrom_command(&cgc, buf, size, CGC_DATA_READ);
- cgc.sshdr = &sshdr;
- cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
- cgc.cmd[1] = 2;
- cgc.cmd[2] = 4;
- cgc.cmd[8] = size;
- ret = pkt_generic_packet(pd, &cgc);
- if (ret) {
- pkt_dump_sense(pd, &cgc);
- return ret;
- }
-
- if (!(buf[6] & 0x40)) {
- dev_notice(ddev, "disc type is not CD-RW\n");
- return 1;
- }
- if (!(buf[6] & 0x4)) {
- dev_notice(ddev, "A1 values on media are not valid, maybe not CDRW?\n");
- return 1;
- }
-
- st = (buf[6] >> 3) & 0x7; /* disc sub-type */
-
- sp = buf[16] & 0xf; /* max speed from ATIP A1 field */
-
- /* Info from cdrecord */
- switch (st) {
- case 0: /* standard speed */
- *speed = clv_to_speed[sp];
- break;
- case 1: /* high speed */
- *speed = hs_clv_to_speed[sp];
- break;
- case 2: /* ultra high speed */
- *speed = us_clv_to_speed[sp];
- break;
- default:
- dev_notice(ddev, "unknown disc sub-type %d\n", st);
- return 1;
- }
- if (*speed) {
- dev_info(ddev, "maximum media speed: %d\n", *speed);
- return 0;
- } else {
- dev_notice(ddev, "unknown speed %d for sub-type %d\n", sp, st);
- return 1;
- }
-}
-
-static noinline_for_stack int pkt_perform_opc(struct pktcdvd_device *pd)
-{
- struct device *ddev = disk_to_dev(pd->disk);
- struct packet_command cgc;
- struct scsi_sense_hdr sshdr;
- int ret;
-
- dev_dbg(ddev, "Performing OPC\n");
-
- init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
- cgc.sshdr = &sshdr;
- cgc.timeout = 60*HZ;
- cgc.cmd[0] = GPCMD_SEND_OPC;
- cgc.cmd[1] = 1;
- ret = pkt_generic_packet(pd, &cgc);
- if (ret)
- pkt_dump_sense(pd, &cgc);
- return ret;
-}
-
-static int pkt_open_write(struct pktcdvd_device *pd)
-{
- struct device *ddev = disk_to_dev(pd->disk);
- int ret;
- unsigned int write_speed, media_write_speed, read_speed;
-
- ret = pkt_probe_settings(pd);
- if (ret) {
- dev_dbg(ddev, "failed probe\n");
- return ret;
- }
-
- ret = pkt_set_write_settings(pd);
- if (ret) {
- dev_notice(ddev, "failed saving write settings\n");
- return -EIO;
- }
-
- pkt_write_caching(pd);
-
- ret = pkt_get_max_speed(pd, &write_speed);
- if (ret)
- write_speed = 16 * 177;
- switch (pd->mmc3_profile) {
- case 0x13: /* DVD-RW */
- case 0x1a: /* DVD+RW */
- case 0x12: /* DVD-RAM */
- dev_notice(ddev, "write speed %ukB/s\n", write_speed);
- break;
- default:
- ret = pkt_media_speed(pd, &media_write_speed);
- if (ret)
- media_write_speed = 16;
- write_speed = min(write_speed, media_write_speed * 177);
- dev_notice(ddev, "write speed %ux\n", write_speed / 176);
- break;
- }
- read_speed = write_speed;
-
- ret = pkt_set_speed(pd, write_speed, read_speed);
- if (ret) {
- dev_notice(ddev, "couldn't set write speed\n");
- return -EIO;
- }
- pd->write_speed = write_speed;
- pd->read_speed = read_speed;
-
- ret = pkt_perform_opc(pd);
- if (ret)
- dev_notice(ddev, "Optimum Power Calibration failed\n");
-
- return 0;
-}
-
-/*
- * called at open time.
- */
-static int pkt_open_dev(struct pktcdvd_device *pd, bool write)
-{
- struct device *ddev = disk_to_dev(pd->disk);
- int ret;
- long lba;
- struct request_queue *q;
- struct file *bdev_file;
-
- /*
- * We need to re-open the cdrom device without O_NONBLOCK to be able
- * to read/write from/to it. It is already opened in O_NONBLOCK mode
- * so open should not fail.
- */
- bdev_file = bdev_file_open_by_dev(file_bdev(pd->bdev_file)->bd_dev,
- BLK_OPEN_READ, pd, NULL);
- if (IS_ERR(bdev_file)) {
- ret = PTR_ERR(bdev_file);
- goto out;
- }
- pd->f_open_bdev = bdev_file;
-
- ret = pkt_get_last_written(pd, &lba);
- if (ret) {
- dev_err(ddev, "pkt_get_last_written failed\n");
- goto out_putdev;
- }
-
- set_capacity(pd->disk, lba << 2);
- set_capacity_and_notify(file_bdev(pd->bdev_file)->bd_disk, lba << 2);
-
- q = bdev_get_queue(file_bdev(pd->bdev_file));
- if (write) {
- ret = pkt_open_write(pd);
- if (ret)
- goto out_putdev;
- set_bit(PACKET_WRITABLE, &pd->flags);
- } else {
- pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
- clear_bit(PACKET_WRITABLE, &pd->flags);
- }
-
- ret = pkt_set_segment_merging(pd, q);
- if (ret)
- goto out_putdev;
-
- if (write) {
- if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) {
- dev_err(ddev, "not enough memory for buffers\n");
- ret = -ENOMEM;
- goto out_putdev;
- }
- dev_info(ddev, "%lukB available on disc\n", lba << 1);
- }
- set_blocksize(bdev_file, CD_FRAMESIZE);
-
- return 0;
-
-out_putdev:
- fput(bdev_file);
-out:
- return ret;
-}
-
-/*
- * called when the device is closed. makes sure that the device flushes
- * the internal cache before we close.
- */
-static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
-{
- struct device *ddev = disk_to_dev(pd->disk);
-
- if (flush && pkt_flush_cache(pd))
- dev_notice(ddev, "not flushing cache\n");
-
- pkt_lock_door(pd, 0);
-
- pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
- fput(pd->f_open_bdev);
- pd->f_open_bdev = NULL;
-
- pkt_shrink_pktlist(pd);
-}
-
-static struct pktcdvd_device *pkt_find_dev_from_minor(unsigned int dev_minor)
-{
- if (dev_minor >= MAX_WRITERS)
- return NULL;
-
- dev_minor = array_index_nospec(dev_minor, MAX_WRITERS);
- return pkt_devs[dev_minor];
-}
-
-static int pkt_open(struct gendisk *disk, blk_mode_t mode)
-{
- struct pktcdvd_device *pd = NULL;
- int ret;
-
- mutex_lock(&pktcdvd_mutex);
- mutex_lock(&ctl_mutex);
- pd = pkt_find_dev_from_minor(disk->first_minor);
- if (!pd) {
- ret = -ENODEV;
- goto out;
- }
- BUG_ON(pd->refcnt < 0);
-
- pd->refcnt++;
- if (pd->refcnt > 1) {
- if ((mode & BLK_OPEN_WRITE) &&
- !test_bit(PACKET_WRITABLE, &pd->flags)) {
- ret = -EBUSY;
- goto out_dec;
- }
- } else {
- ret = pkt_open_dev(pd, mode & BLK_OPEN_WRITE);
- if (ret)
- goto out_dec;
- }
- mutex_unlock(&ctl_mutex);
- mutex_unlock(&pktcdvd_mutex);
- return 0;
-
-out_dec:
- pd->refcnt--;
-out:
- mutex_unlock(&ctl_mutex);
- mutex_unlock(&pktcdvd_mutex);
- return ret;
-}
-
-static void pkt_release(struct gendisk *disk)
-{
- struct pktcdvd_device *pd = disk->private_data;
-
- mutex_lock(&pktcdvd_mutex);
- mutex_lock(&ctl_mutex);
- pd->refcnt--;
- BUG_ON(pd->refcnt < 0);
- if (pd->refcnt == 0) {
- int flush = test_bit(PACKET_WRITABLE, &pd->flags);
- pkt_release_dev(pd, flush);
- }
- mutex_unlock(&ctl_mutex);
- mutex_unlock(&pktcdvd_mutex);
-}
-
-
-static void pkt_end_io_read_cloned(struct bio *bio)
-{
- struct packet_stacked_data *psd = bio->bi_private;
- struct pktcdvd_device *pd = psd->pd;
-
- psd->bio->bi_status = bio->bi_status;
- bio_put(bio);
- bio_endio(psd->bio);
- mempool_free(psd, &psd_pool);
- pkt_bio_finished(pd);
-}
-
-static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio)
-{
- struct bio *cloned_bio = bio_alloc_clone(file_bdev(pd->bdev_file), bio,
- GFP_NOIO, &pkt_bio_set);
- struct packet_stacked_data *psd = mempool_alloc(&psd_pool, GFP_NOIO);
-
- psd->pd = pd;
- psd->bio = bio;
- cloned_bio->bi_private = psd;
- cloned_bio->bi_end_io = pkt_end_io_read_cloned;
- pd->stats.secs_r += bio_sectors(bio);
- pkt_queue_bio(pd, cloned_bio);
-}
-
-static void pkt_make_request_write(struct bio *bio)
-{
- struct pktcdvd_device *pd = bio->bi_bdev->bd_disk->private_data;
- sector_t zone;
- struct packet_data *pkt;
- int was_empty, blocked_bio;
- struct pkt_rb_node *node;
-
- zone = get_zone(bio->bi_iter.bi_sector, pd);
-
- /*
- * If we find a matching packet in state WAITING or READ_WAIT, we can
- * just append this bio to that packet.
- */
- spin_lock(&pd->cdrw.active_list_lock);
- blocked_bio = 0;
- list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
- if (pkt->sector == zone) {
- spin_lock(&pkt->lock);
- if ((pkt->state == PACKET_WAITING_STATE) ||
- (pkt->state == PACKET_READ_WAIT_STATE)) {
- bio_list_add(&pkt->orig_bios, bio);
- pkt->write_size +=
- bio->bi_iter.bi_size / CD_FRAMESIZE;
- if ((pkt->write_size >= pkt->frames) &&
- (pkt->state == PACKET_WAITING_STATE)) {
- atomic_inc(&pkt->run_sm);
- wake_up(&pd->wqueue);
- }
- spin_unlock(&pkt->lock);
- spin_unlock(&pd->cdrw.active_list_lock);
- return;
- } else {
- blocked_bio = 1;
- }
- spin_unlock(&pkt->lock);
- }
- }
- spin_unlock(&pd->cdrw.active_list_lock);
-
- /*
- * Test if there is enough room left in the bio work queue
- * (queue size >= congestion on mark).
- * If not, wait till the work queue size is below the congestion off mark.
- */
- spin_lock(&pd->lock);
- if (pd->write_congestion_on > 0
- && pd->bio_queue_size >= pd->write_congestion_on) {
- struct wait_bit_queue_entry wqe;
-
- init_wait_var_entry(&wqe, &pd->congested, 0);
- for (;;) {
- prepare_to_wait_event(__var_waitqueue(&pd->congested),
- &wqe.wq_entry,
- TASK_UNINTERRUPTIBLE);
- if (pd->bio_queue_size <= pd->write_congestion_off)
- break;
- pd->congested = true;
- spin_unlock(&pd->lock);
- schedule();
- spin_lock(&pd->lock);
- }
- }
- spin_unlock(&pd->lock);
-
- /*
- * No matching packet found. Store the bio in the work queue.
- */
- node = mempool_alloc(&pd->rb_pool, GFP_NOIO);
- node->bio = bio;
- spin_lock(&pd->lock);
- BUG_ON(pd->bio_queue_size < 0);
- was_empty = (pd->bio_queue_size == 0);
- pkt_rbtree_insert(pd, node);
- spin_unlock(&pd->lock);
-
- /*
- * Wake up the worker thread.
- */
- atomic_set(&pd->scan_queue, 1);
- if (was_empty) {
- /* This wake_up is required for correct operation */
- wake_up(&pd->wqueue);
- } else if (!list_empty(&pd->cdrw.pkt_free_list) && !blocked_bio) {
- /*
- * This wake up is not required for correct operation,
- * but improves performance in some cases.
- */
- wake_up(&pd->wqueue);
- }
-}
-
-static void pkt_submit_bio(struct bio *bio)
-{
- struct pktcdvd_device *pd = bio->bi_bdev->bd_disk->private_data;
- struct device *ddev = disk_to_dev(pd->disk);
- struct bio *split;
-
- bio = bio_split_to_limits(bio);
- if (!bio)
- return;
-
- dev_dbg(ddev, "start = %6llx stop = %6llx\n",
- bio->bi_iter.bi_sector, bio_end_sector(bio));
-
- /*
- * Clone READ bios so we can have our own bi_end_io callback.
- */
- if (bio_data_dir(bio) == READ) {
- pkt_make_request_read(pd, bio);
- return;
- }
-
- if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
- dev_notice(ddev, "WRITE for ro device (%llu)\n", bio->bi_iter.bi_sector);
- goto end_io;
- }
-
- if (!bio->bi_iter.bi_size || (bio->bi_iter.bi_size % CD_FRAMESIZE)) {
- dev_err(ddev, "wrong bio size\n");
- goto end_io;
- }
-
- do {
- sector_t zone = get_zone(bio->bi_iter.bi_sector, pd);
- sector_t last_zone = get_zone(bio_end_sector(bio) - 1, pd);
-
- if (last_zone != zone) {
- BUG_ON(last_zone != zone + pd->settings.size);
-
- split = bio_split(bio, last_zone -
- bio->bi_iter.bi_sector,
- GFP_NOIO, &pkt_bio_set);
- bio_chain(split, bio);
- } else {
- split = bio;
- }
-
- pkt_make_request_write(split);
- } while (split != bio);
-
- return;
-end_io:
- bio_io_error(bio);
-}
-
-static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
-{
- struct device *ddev = disk_to_dev(pd->disk);
- int i;
- struct file *bdev_file;
- struct scsi_device *sdev;
-
- if (pd->pkt_dev == dev) {
- dev_err(ddev, "recursive setup not allowed\n");
- return -EBUSY;
- }
- for (i = 0; i < MAX_WRITERS; i++) {
- struct pktcdvd_device *pd2 = pkt_devs[i];
- if (!pd2)
- continue;
- if (file_bdev(pd2->bdev_file)->bd_dev == dev) {
- dev_err(ddev, "%pg already setup\n",
- file_bdev(pd2->bdev_file));
- return -EBUSY;
- }
- if (pd2->pkt_dev == dev) {
- dev_err(ddev, "can't chain pktcdvd devices\n");
- return -EBUSY;
- }
- }
-
- bdev_file = bdev_file_open_by_dev(dev, BLK_OPEN_READ | BLK_OPEN_NDELAY,
- NULL, NULL);
- if (IS_ERR(bdev_file))
- return PTR_ERR(bdev_file);
- sdev = scsi_device_from_queue(file_bdev(bdev_file)->bd_disk->queue);
- if (!sdev) {
- fput(bdev_file);
- return -EINVAL;
- }
- put_device(&sdev->sdev_gendev);
-
- /* This is safe, since we have a reference from open(). */
- __module_get(THIS_MODULE);
-
- pd->bdev_file = bdev_file;
-
- atomic_set(&pd->cdrw.pending_bios, 0);
- pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->disk->disk_name);
- if (IS_ERR(pd->cdrw.thread)) {
- dev_err(ddev, "can't start kernel thread\n");
- goto out_mem;
- }
-
- proc_create_single_data(pd->disk->disk_name, 0, pkt_proc, pkt_seq_show, pd);
- dev_notice(ddev, "writer mapped to %pg\n", file_bdev(bdev_file));
- return 0;
-
-out_mem:
- fput(bdev_file);
- /* This is safe: open() is still holding a reference. */
- module_put(THIS_MODULE);
- return -ENOMEM;
-}
-
-static int pkt_ioctl(struct block_device *bdev, blk_mode_t mode,
- unsigned int cmd, unsigned long arg)
-{
- struct pktcdvd_device *pd = bdev->bd_disk->private_data;
- struct device *ddev = disk_to_dev(pd->disk);
- int ret;
-
- dev_dbg(ddev, "cmd %x, dev %d:%d\n", cmd, MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
-
- mutex_lock(&pktcdvd_mutex);
- switch (cmd) {
- case CDROMEJECT:
- /*
- * The door gets locked when the device is opened, so we
- * have to unlock it or else the eject command fails.
- */
- if (pd->refcnt == 1)
- pkt_lock_door(pd, 0);
- fallthrough;
- /*
- * forward selected CDROM ioctls to CD-ROM, for UDF
- */
- case CDROMMULTISESSION:
- case CDROMREADTOCENTRY:
- case CDROM_LAST_WRITTEN:
- case CDROM_SEND_PACKET:
- case SCSI_IOCTL_SEND_COMMAND:
- if (!bdev->bd_disk->fops->ioctl)
- ret = -ENOTTY;
- else
- ret = bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg);
- break;
- default:
- dev_dbg(ddev, "Unknown ioctl (%x)\n", cmd);
- ret = -ENOTTY;
- }
- mutex_unlock(&pktcdvd_mutex);
-
- return ret;
-}
-
-static unsigned int pkt_check_events(struct gendisk *disk,
- unsigned int clearing)
-{
- struct pktcdvd_device *pd = disk->private_data;
- struct gendisk *attached_disk;
-
- if (!pd)
- return 0;
- if (!pd->bdev_file)
- return 0;
- attached_disk = file_bdev(pd->bdev_file)->bd_disk;
- if (!attached_disk || !attached_disk->fops->check_events)
- return 0;
- return attached_disk->fops->check_events(attached_disk, clearing);
-}
-
-static char *pkt_devnode(struct gendisk *disk, umode_t *mode)
-{
- return kasprintf(GFP_KERNEL, "pktcdvd/%s", disk->disk_name);
-}
-
-static const struct block_device_operations pktcdvd_ops = {
- .owner = THIS_MODULE,
- .submit_bio = pkt_submit_bio,
- .open = pkt_open,
- .release = pkt_release,
- .ioctl = pkt_ioctl,
- .compat_ioctl = blkdev_compat_ptr_ioctl,
- .check_events = pkt_check_events,
- .devnode = pkt_devnode,
-};
-
-/*
- * Set up mapping from pktcdvd device to CD-ROM device.
- */
-static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
-{
- struct queue_limits lim = {
- .max_hw_sectors = PACKET_MAX_SECTORS,
- .logical_block_size = CD_FRAMESIZE,
- .features = BLK_FEAT_ROTATIONAL,
- };
- int idx;
- int ret = -ENOMEM;
- struct pktcdvd_device *pd;
- struct gendisk *disk;
-
- mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
-
- for (idx = 0; idx < MAX_WRITERS; idx++)
- if (!pkt_devs[idx])
- break;
- if (idx == MAX_WRITERS) {
- pr_err("max %d writers supported\n", MAX_WRITERS);
- ret = -EBUSY;
- goto out_mutex;
- }
-
- pd = kzalloc(sizeof(struct pktcdvd_device), GFP_KERNEL);
- if (!pd)
- goto out_mutex;
-
- ret = mempool_init_kmalloc_pool(&pd->rb_pool, PKT_RB_POOL_SIZE,
- sizeof(struct pkt_rb_node));
- if (ret)
- goto out_mem;
-
- INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
- INIT_LIST_HEAD(&pd->cdrw.pkt_active_list);
- spin_lock_init(&pd->cdrw.active_list_lock);
-
- spin_lock_init(&pd->lock);
- spin_lock_init(&pd->iosched.lock);
- bio_list_init(&pd->iosched.read_queue);
- bio_list_init(&pd->iosched.write_queue);
- init_waitqueue_head(&pd->wqueue);
- pd->bio_queue = RB_ROOT;
-
- pd->write_congestion_on = write_congestion_on;
- pd->write_congestion_off = write_congestion_off;
-
- disk = blk_alloc_disk(&lim, NUMA_NO_NODE);
- if (IS_ERR(disk)) {
- ret = PTR_ERR(disk);
- goto out_mem;
- }
- pd->disk = disk;
- disk->major = pktdev_major;
- disk->first_minor = idx;
- disk->minors = 1;
- disk->fops = &pktcdvd_ops;
- disk->flags = GENHD_FL_REMOVABLE | GENHD_FL_NO_PART;
- snprintf(disk->disk_name, sizeof(disk->disk_name), DRIVER_NAME"%d", idx);
- disk->private_data = pd;
-
- pd->pkt_dev = MKDEV(pktdev_major, idx);
- ret = pkt_new_dev(pd, dev);
- if (ret)
- goto out_mem2;
-
- /* inherit events of the host device */
- disk->events = file_bdev(pd->bdev_file)->bd_disk->events;
-
- ret = add_disk(disk);
- if (ret)
- goto out_mem2;
-
- pkt_sysfs_dev_new(pd);
- pkt_debugfs_dev_new(pd);
-
- pkt_devs[idx] = pd;
- if (pkt_dev)
- *pkt_dev = pd->pkt_dev;
-
- mutex_unlock(&ctl_mutex);
- return 0;
-
-out_mem2:
- put_disk(disk);
-out_mem:
- mempool_exit(&pd->rb_pool);
- kfree(pd);
-out_mutex:
- mutex_unlock(&ctl_mutex);
- pr_err("setup of pktcdvd device failed\n");
- return ret;
-}
-
-/*
- * Tear down mapping from pktcdvd device to CD-ROM device.
- */
-static int pkt_remove_dev(dev_t pkt_dev)
-{
- struct pktcdvd_device *pd;
- struct device *ddev;
- int idx;
- int ret = 0;
-
- mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
-
- for (idx = 0; idx < MAX_WRITERS; idx++) {
- pd = pkt_devs[idx];
- if (pd && (pd->pkt_dev == pkt_dev))
- break;
- }
- if (idx == MAX_WRITERS) {
- pr_debug("dev not setup\n");
- ret = -ENXIO;
- goto out;
- }
-
- if (pd->refcnt > 0) {
- ret = -EBUSY;
- goto out;
- }
-
- ddev = disk_to_dev(pd->disk);
-
- if (!IS_ERR(pd->cdrw.thread))
- kthread_stop(pd->cdrw.thread);
-
- pkt_devs[idx] = NULL;
-
- pkt_debugfs_dev_remove(pd);
- pkt_sysfs_dev_remove(pd);
-
- fput(pd->bdev_file);
-
- remove_proc_entry(pd->disk->disk_name, pkt_proc);
- dev_notice(ddev, "writer unmapped\n");
-
- del_gendisk(pd->disk);
- put_disk(pd->disk);
-
- mempool_exit(&pd->rb_pool);
- kfree(pd);
-
- /* This is safe: open() is still holding a reference. */
- module_put(THIS_MODULE);
-
-out:
- mutex_unlock(&ctl_mutex);
- return ret;
-}
-
-static void pkt_get_status(struct pkt_ctrl_command *ctrl_cmd)
-{
- struct pktcdvd_device *pd;
-
- mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
-
- pd = pkt_find_dev_from_minor(ctrl_cmd->dev_index);
- if (pd) {
- ctrl_cmd->dev = new_encode_dev(file_bdev(pd->bdev_file)->bd_dev);
- ctrl_cmd->pkt_dev = new_encode_dev(pd->pkt_dev);
- } else {
- ctrl_cmd->dev = 0;
- ctrl_cmd->pkt_dev = 0;
- }
- ctrl_cmd->num_devices = MAX_WRITERS;
-
- mutex_unlock(&ctl_mutex);
-}
-
-static long pkt_ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- void __user *argp = (void __user *)arg;
- struct pkt_ctrl_command ctrl_cmd;
- int ret = 0;
- dev_t pkt_dev = 0;
-
- if (cmd != PACKET_CTRL_CMD)
- return -ENOTTY;
-
- if (copy_from_user(&ctrl_cmd, argp, sizeof(struct pkt_ctrl_command)))
- return -EFAULT;
-
- switch (ctrl_cmd.command) {
- case PKT_CTRL_CMD_SETUP:
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- ret = pkt_setup_dev(new_decode_dev(ctrl_cmd.dev), &pkt_dev);
- ctrl_cmd.pkt_dev = new_encode_dev(pkt_dev);
- break;
- case PKT_CTRL_CMD_TEARDOWN:
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- ret = pkt_remove_dev(new_decode_dev(ctrl_cmd.pkt_dev));
- break;
- case PKT_CTRL_CMD_STATUS:
- pkt_get_status(&ctrl_cmd);
- break;
- default:
- return -ENOTTY;
- }
-
- if (copy_to_user(argp, &ctrl_cmd, sizeof(struct pkt_ctrl_command)))
- return -EFAULT;
- return ret;
-}
-
-#ifdef CONFIG_COMPAT
-static long pkt_ctl_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- return pkt_ctl_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
-}
-#endif
-
-static const struct file_operations pkt_ctl_fops = {
- .open = nonseekable_open,
- .unlocked_ioctl = pkt_ctl_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = pkt_ctl_compat_ioctl,
-#endif
- .owner = THIS_MODULE,
-};
-
-static struct miscdevice pkt_misc = {
- .minor = MISC_DYNAMIC_MINOR,
- .name = DRIVER_NAME,
- .nodename = "pktcdvd/control",
- .fops = &pkt_ctl_fops
-};
-
-static int __init pkt_init(void)
-{
- int ret;
-
- mutex_init(&ctl_mutex);
-
- ret = mempool_init_kmalloc_pool(&psd_pool, PSD_POOL_SIZE,
- sizeof(struct packet_stacked_data));
- if (ret)
- return ret;
- ret = bioset_init(&pkt_bio_set, BIO_POOL_SIZE, 0, 0);
- if (ret) {
- mempool_exit(&psd_pool);
- return ret;
- }
-
- ret = register_blkdev(pktdev_major, DRIVER_NAME);
- if (ret < 0) {
- pr_err("unable to register block device\n");
- goto out2;
- }
- if (!pktdev_major)
- pktdev_major = ret;
-
- ret = pkt_sysfs_init();
- if (ret)
- goto out;
-
- pkt_debugfs_init();
-
- ret = misc_register(&pkt_misc);
- if (ret) {
- pr_err("unable to register misc device\n");
- goto out_misc;
- }
-
- pkt_proc = proc_mkdir("driver/"DRIVER_NAME, NULL);
-
- return 0;
-
-out_misc:
- pkt_debugfs_cleanup();
- pkt_sysfs_cleanup();
-out:
- unregister_blkdev(pktdev_major, DRIVER_NAME);
-out2:
- mempool_exit(&psd_pool);
- bioset_exit(&pkt_bio_set);
- return ret;
-}
-
-static void __exit pkt_exit(void)
-{
- remove_proc_entry("driver/"DRIVER_NAME, NULL);
- misc_deregister(&pkt_misc);
-
- pkt_debugfs_cleanup();
- pkt_sysfs_cleanup();
-
- unregister_blkdev(pktdev_major, DRIVER_NAME);
- mempool_exit(&psd_pool);
- bioset_exit(&pkt_bio_set);
-}
-
-MODULE_DESCRIPTION("Packet writing layer for CD/DVD drives");
-MODULE_AUTHOR("Jens Axboe <axboe@suse.de>");
-MODULE_LICENSE("GPL");
-
-module_init(pkt_init);
-module_exit(pkt_exit);
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index ff45ed766469..dc9e4a14b885 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -384,9 +384,9 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev)
unsigned int devidx;
struct queue_limits lim = {
.logical_block_size = dev->blk_size,
- .max_hw_sectors = dev->bounce_size >> 9,
+ .max_hw_sectors = BOUNCE_SIZE >> 9,
.max_segments = -1,
- .max_segment_size = dev->bounce_size,
+ .max_segment_size = BOUNCE_SIZE,
.dma_alignment = dev->blk_size - 1,
.features = BLK_FEAT_WRITE_CACHE |
BLK_FEAT_ROTATIONAL,
@@ -434,8 +434,7 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev)
ps3disk_identify(dev);
- error = blk_mq_alloc_sq_tag_set(&priv->tag_set, &ps3disk_mq_ops, 1,
- BLK_MQ_F_SHOULD_MERGE);
+ error = blk_mq_alloc_sq_tag_set(&priv->tag_set, &ps3disk_mq_ops, 1, 0);
if (error)
goto fail_teardown;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 9c8b19a22c2a..af0e21149dbc 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -4964,7 +4964,6 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
rbd_dev->tag_set.ops = &rbd_mq_ops;
rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth;
rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
- rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
rbd_dev->tag_set.nr_hw_queues = num_present_cpus();
rbd_dev->tag_set.cmd_size = sizeof(struct rbd_img_request);
@@ -7282,8 +7281,10 @@ static ssize_t do_rbd_remove(const char *buf, size_t count)
* Prevent new IO from being queued and wait for existing
* IO to complete/fail.
*/
- blk_mq_freeze_queue(rbd_dev->disk->queue);
+ unsigned int memflags = blk_mq_freeze_queue(rbd_dev->disk->queue);
+
blk_mark_disk_dead(rbd_dev->disk);
+ blk_mq_unfreeze_queue(rbd_dev->disk->queue, memflags);
}
del_gendisk(rbd_dev->disk);
@@ -7388,7 +7389,7 @@ static int __init rbd_init(void)
* The number of active work items is limited by the number of
* rbd devices * queue depth, so leave @max_active at default.
*/
- rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0);
+ rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (!rbd_wq) {
rc = -ENOMEM;
goto err_out_slab;
diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
index c34695d2eea7..f1409e54010a 100644
--- a/drivers/block/rnbd/rnbd-clt.c
+++ b/drivers/block/rnbd/rnbd-clt.c
@@ -942,11 +942,11 @@ static void rnbd_client_release(struct gendisk *gen)
rnbd_clt_put_dev(dev);
}
-static int rnbd_client_getgeo(struct block_device *block_device,
+static int rnbd_client_getgeo(struct gendisk *disk,
struct hd_geometry *geo)
{
u64 size;
- struct rnbd_clt_dev *dev = block_device->bd_disk->private_data;
+ struct rnbd_clt_dev *dev = disk->private_data;
struct queue_limits *limit = &dev->queue->limits;
size = dev->size * (limit->logical_block_size / SECTOR_SIZE);
@@ -1010,7 +1010,7 @@ static int rnbd_client_xfer_request(struct rnbd_clt_dev *dev,
* See queue limits.
*/
if ((req_op(rq) != REQ_OP_DISCARD) && (req_op(rq) != REQ_OP_WRITE_ZEROES))
- sg_cnt = blk_rq_map_sg(dev->queue, rq, iu->sgt.sgl);
+ sg_cnt = blk_rq_map_sg(rq, iu->sgt.sgl);
if (sg_cnt == 0)
sg_mark_end(&iu->sgt.sgl[0]);
@@ -1209,8 +1209,7 @@ static int setup_mq_tags(struct rnbd_clt_session *sess)
tag_set->ops = &rnbd_mq_ops;
tag_set->queue_depth = sess->queue_depth;
tag_set->numa_node = NUMA_NO_NODE;
- tag_set->flags = BLK_MQ_F_SHOULD_MERGE |
- BLK_MQ_F_TAG_QUEUE_SHARED;
+ tag_set->flags = BLK_MQ_F_TAG_QUEUE_SHARED;
tag_set->cmd_size = sizeof(struct rnbd_iu) + RNBD_RDMA_SGL_SIZE;
/* for HCTX_TYPE_DEFAULT, HCTX_TYPE_READ, HCTX_TYPE_POLL */
@@ -1810,7 +1809,7 @@ static int __init rnbd_client_init(void)
unregister_blkdev(rnbd_client_major, "rnbd");
return err;
}
- rnbd_clt_wq = alloc_workqueue("rnbd_clt_wq", 0, 0);
+ rnbd_clt_wq = alloc_workqueue("rnbd_clt_wq", WQ_PERCPU, 0);
if (!rnbd_clt_wq) {
pr_err("Failed to load module, alloc_workqueue failed.\n");
rnbd_clt_destroy_sysfs_files();
diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c
index 08ce6d96d04c..2df8941a6b14 100644
--- a/drivers/block/rnbd/rnbd-srv.c
+++ b/drivers/block/rnbd/rnbd-srv.c
@@ -147,12 +147,7 @@ static int process_rdma(struct rnbd_srv_session *srv_sess,
bio = bio_alloc(file_bdev(sess_dev->bdev_file), 1,
rnbd_to_bio_flags(le32_to_cpu(msg->rw)), GFP_KERNEL);
- if (bio_add_page(bio, virt_to_page(data), datalen,
- offset_in_page(data)) != datalen) {
- rnbd_srv_err_rl(sess_dev, "Failed to map data to bio\n");
- err = -EINVAL;
- goto bio_put;
- }
+ bio_add_virt_nofail(bio, data, datalen);
bio->bi_opf = rnbd_to_bio_flags(le32_to_cpu(msg->rw));
if (bio_has_data(bio) &&
@@ -167,7 +162,7 @@ static int process_rdma(struct rnbd_srv_session *srv_sess,
bio->bi_iter.bi_sector = le64_to_cpu(msg->sector);
prio = srv_sess->ver < RNBD_PROTO_VER_MAJOR ||
usrlen < sizeof(*msg) ? 0 : le16_to_cpu(msg->prio);
- bio_set_prio(bio, prio);
+ bio->bi_ioprio = prio;
submit_bio(bio);
diff --git a/drivers/block/rnull.rs b/drivers/block/rnull.rs
deleted file mode 100644
index b0227cf9ddd3..000000000000
--- a/drivers/block/rnull.rs
+++ /dev/null
@@ -1,73 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-//! This is a Rust implementation of the C null block driver.
-//!
-//! Supported features:
-//!
-//! - blk-mq interface
-//! - direct completion
-//! - block size 4k
-//!
-//! The driver is not configurable.
-
-use kernel::{
- alloc::flags,
- block::mq::{
- self,
- gen_disk::{self, GenDisk},
- Operations, TagSet,
- },
- error::Result,
- new_mutex, pr_info,
- prelude::*,
- sync::{Arc, Mutex},
- types::ARef,
-};
-
-module! {
- type: NullBlkModule,
- name: "rnull_mod",
- author: "Andreas Hindborg",
- license: "GPL v2",
-}
-
-struct NullBlkModule {
- _disk: Pin<Box<Mutex<GenDisk<NullBlkDevice>>>>,
-}
-
-impl kernel::Module for NullBlkModule {
- fn init(_module: &'static ThisModule) -> Result<Self> {
- pr_info!("Rust null_blk loaded\n");
- let tagset = Arc::pin_init(TagSet::new(1, 256, 1), flags::GFP_KERNEL)?;
-
- let disk = gen_disk::GenDiskBuilder::new()
- .capacity_sectors(4096 << 11)
- .logical_block_size(4096)?
- .physical_block_size(4096)?
- .rotational(false)
- .build(format_args!("rnullb{}", 0), tagset)?;
-
- let disk = Box::pin_init(new_mutex!(disk, "nullb:disk"), flags::GFP_KERNEL)?;
-
- Ok(Self { _disk: disk })
- }
-}
-
-struct NullBlkDevice;
-
-#[vtable]
-impl Operations for NullBlkDevice {
- #[inline(always)]
- fn queue_rq(rq: ARef<mq::Request<Self>>, _is_last: bool) -> Result {
- mq::Request::end_ok(rq)
- .map_err(|_e| kernel::error::code::EIO)
- // We take no refcounts on the request, so we expect to be able to
- // end the request. The request reference must be unique at this
- // point, and so `end_ok` cannot fail.
- .expect("Fatal error - expected to be able to end request");
-
- Ok(())
- }
-
- fn commit_rqs() {}
-}
diff --git a/drivers/block/rnull/Kconfig b/drivers/block/rnull/Kconfig
new file mode 100644
index 000000000000..7bc5b376c128
--- /dev/null
+++ b/drivers/block/rnull/Kconfig
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Rust null block device driver configuration
+
+config BLK_DEV_RUST_NULL
+ tristate "Rust null block driver (Experimental)"
+ depends on RUST && CONFIGFS_FS
+ help
+ This is the Rust implementation of the null block driver. Like
+ the C version, the driver allows the user to create virutal block
+ devices that can be configured via various configuration options.
+
+ If unsure, say N.
diff --git a/drivers/block/rnull/Makefile b/drivers/block/rnull/Makefile
new file mode 100644
index 000000000000..11cfa5e615dc
--- /dev/null
+++ b/drivers/block/rnull/Makefile
@@ -0,0 +1,3 @@
+
+obj-$(CONFIG_BLK_DEV_RUST_NULL) += rnull_mod.o
+rnull_mod-y := rnull.o
diff --git a/drivers/block/rnull/configfs.rs b/drivers/block/rnull/configfs.rs
new file mode 100644
index 000000000000..8498e9bae6fd
--- /dev/null
+++ b/drivers/block/rnull/configfs.rs
@@ -0,0 +1,262 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use super::{NullBlkDevice, THIS_MODULE};
+use core::fmt::{Display, Write};
+use kernel::{
+ block::mq::gen_disk::{GenDisk, GenDiskBuilder},
+ c_str,
+ configfs::{self, AttributeOperations},
+ configfs_attrs, new_mutex,
+ page::PAGE_SIZE,
+ prelude::*,
+ str::{kstrtobool_bytes, CString},
+ sync::Mutex,
+};
+use pin_init::PinInit;
+
+pub(crate) fn subsystem() -> impl PinInit<kernel::configfs::Subsystem<Config>, Error> {
+ let item_type = configfs_attrs! {
+ container: configfs::Subsystem<Config>,
+ data: Config,
+ child: DeviceConfig,
+ attributes: [
+ features: 0,
+ ],
+ };
+
+ kernel::configfs::Subsystem::new(c_str!("rnull"), item_type, try_pin_init!(Config {}))
+}
+
+#[pin_data]
+pub(crate) struct Config {}
+
+#[vtable]
+impl AttributeOperations<0> for Config {
+ type Data = Config;
+
+ fn show(_this: &Config, page: &mut [u8; PAGE_SIZE]) -> Result<usize> {
+ let mut writer = kernel::str::Formatter::new(page);
+ writer.write_str("blocksize,size,rotational,irqmode\n")?;
+ Ok(writer.bytes_written())
+ }
+}
+
+#[vtable]
+impl configfs::GroupOperations for Config {
+ type Child = DeviceConfig;
+
+ fn make_group(
+ &self,
+ name: &CStr,
+ ) -> Result<impl PinInit<configfs::Group<DeviceConfig>, Error>> {
+ let item_type = configfs_attrs! {
+ container: configfs::Group<DeviceConfig>,
+ data: DeviceConfig,
+ attributes: [
+ // Named for compatibility with C null_blk
+ power: 0,
+ blocksize: 1,
+ rotational: 2,
+ size: 3,
+ irqmode: 4,
+ ],
+ };
+
+ Ok(configfs::Group::new(
+ name.try_into()?,
+ item_type,
+ // TODO: cannot coerce new_mutex!() to impl PinInit<_, Error>, so put mutex inside
+ try_pin_init!( DeviceConfig {
+ data <- new_mutex!(DeviceConfigInner {
+ powered: false,
+ block_size: 4096,
+ rotational: false,
+ disk: None,
+ capacity_mib: 4096,
+ irq_mode: IRQMode::None,
+ name: name.try_into()?,
+ }),
+ }),
+ ))
+ }
+}
+
+#[derive(Debug, Clone, Copy)]
+pub(crate) enum IRQMode {
+ None,
+ Soft,
+}
+
+impl TryFrom<u8> for IRQMode {
+ type Error = kernel::error::Error;
+
+ fn try_from(value: u8) -> Result<Self> {
+ match value {
+ 0 => Ok(Self::None),
+ 1 => Ok(Self::Soft),
+ _ => Err(EINVAL),
+ }
+ }
+}
+
+impl Display for IRQMode {
+ fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
+ match self {
+ Self::None => f.write_str("0")?,
+ Self::Soft => f.write_str("1")?,
+ }
+ Ok(())
+ }
+}
+
+#[pin_data]
+pub(crate) struct DeviceConfig {
+ #[pin]
+ data: Mutex<DeviceConfigInner>,
+}
+
+#[pin_data]
+struct DeviceConfigInner {
+ powered: bool,
+ name: CString,
+ block_size: u32,
+ rotational: bool,
+ capacity_mib: u64,
+ irq_mode: IRQMode,
+ disk: Option<GenDisk<NullBlkDevice>>,
+}
+
+#[vtable]
+impl configfs::AttributeOperations<0> for DeviceConfig {
+ type Data = DeviceConfig;
+
+ fn show(this: &DeviceConfig, page: &mut [u8; PAGE_SIZE]) -> Result<usize> {
+ let mut writer = kernel::str::Formatter::new(page);
+
+ if this.data.lock().powered {
+ writer.write_str("1\n")?;
+ } else {
+ writer.write_str("0\n")?;
+ }
+
+ Ok(writer.bytes_written())
+ }
+
+ fn store(this: &DeviceConfig, page: &[u8]) -> Result {
+ let power_op = kstrtobool_bytes(page)?;
+ let mut guard = this.data.lock();
+
+ if !guard.powered && power_op {
+ guard.disk = Some(NullBlkDevice::new(
+ &guard.name,
+ guard.block_size,
+ guard.rotational,
+ guard.capacity_mib,
+ guard.irq_mode,
+ )?);
+ guard.powered = true;
+ } else if guard.powered && !power_op {
+ drop(guard.disk.take());
+ guard.powered = false;
+ }
+
+ Ok(())
+ }
+}
+
+#[vtable]
+impl configfs::AttributeOperations<1> for DeviceConfig {
+ type Data = DeviceConfig;
+
+ fn show(this: &DeviceConfig, page: &mut [u8; PAGE_SIZE]) -> Result<usize> {
+ let mut writer = kernel::str::Formatter::new(page);
+ writer.write_fmt(fmt!("{}\n", this.data.lock().block_size))?;
+ Ok(writer.bytes_written())
+ }
+
+ fn store(this: &DeviceConfig, page: &[u8]) -> Result {
+ if this.data.lock().powered {
+ return Err(EBUSY);
+ }
+
+ let text = core::str::from_utf8(page)?.trim();
+ let value = text.parse::<u32>().map_err(|_| EINVAL)?;
+
+ GenDiskBuilder::validate_block_size(value)?;
+ this.data.lock().block_size = value;
+ Ok(())
+ }
+}
+
+#[vtable]
+impl configfs::AttributeOperations<2> for DeviceConfig {
+ type Data = DeviceConfig;
+
+ fn show(this: &DeviceConfig, page: &mut [u8; PAGE_SIZE]) -> Result<usize> {
+ let mut writer = kernel::str::Formatter::new(page);
+
+ if this.data.lock().rotational {
+ writer.write_str("1\n")?;
+ } else {
+ writer.write_str("0\n")?;
+ }
+
+ Ok(writer.bytes_written())
+ }
+
+ fn store(this: &DeviceConfig, page: &[u8]) -> Result {
+ if this.data.lock().powered {
+ return Err(EBUSY);
+ }
+
+ this.data.lock().rotational = kstrtobool_bytes(page)?;
+
+ Ok(())
+ }
+}
+
+#[vtable]
+impl configfs::AttributeOperations<3> for DeviceConfig {
+ type Data = DeviceConfig;
+
+ fn show(this: &DeviceConfig, page: &mut [u8; PAGE_SIZE]) -> Result<usize> {
+ let mut writer = kernel::str::Formatter::new(page);
+ writer.write_fmt(fmt!("{}\n", this.data.lock().capacity_mib))?;
+ Ok(writer.bytes_written())
+ }
+
+ fn store(this: &DeviceConfig, page: &[u8]) -> Result {
+ if this.data.lock().powered {
+ return Err(EBUSY);
+ }
+
+ let text = core::str::from_utf8(page)?.trim();
+ let value = text.parse::<u64>().map_err(|_| EINVAL)?;
+
+ this.data.lock().capacity_mib = value;
+ Ok(())
+ }
+}
+
+#[vtable]
+impl configfs::AttributeOperations<4> for DeviceConfig {
+ type Data = DeviceConfig;
+
+ fn show(this: &DeviceConfig, page: &mut [u8; PAGE_SIZE]) -> Result<usize> {
+ let mut writer = kernel::str::Formatter::new(page);
+ writer.write_fmt(fmt!("{}\n", this.data.lock().irq_mode))?;
+ Ok(writer.bytes_written())
+ }
+
+ fn store(this: &DeviceConfig, page: &[u8]) -> Result {
+ if this.data.lock().powered {
+ return Err(EBUSY);
+ }
+
+ let text = core::str::from_utf8(page)?.trim();
+ let value = text.parse::<u8>().map_err(|_| EINVAL)?;
+
+ this.data.lock().irq_mode = IRQMode::try_from(value)?;
+ Ok(())
+ }
+}
diff --git a/drivers/block/rnull/rnull.rs b/drivers/block/rnull/rnull.rs
new file mode 100644
index 000000000000..1ec694d7f1a6
--- /dev/null
+++ b/drivers/block/rnull/rnull.rs
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! This is a Rust implementation of the C null block driver.
+
+mod configfs;
+
+use configfs::IRQMode;
+use kernel::{
+ block::{
+ self,
+ mq::{
+ self,
+ gen_disk::{self, GenDisk},
+ Operations, TagSet,
+ },
+ },
+ error::Result,
+ pr_info,
+ prelude::*,
+ sync::Arc,
+ types::ARef,
+};
+use pin_init::PinInit;
+
+module! {
+ type: NullBlkModule,
+ name: "rnull_mod",
+ authors: ["Andreas Hindborg"],
+ description: "Rust implementation of the C null block driver",
+ license: "GPL v2",
+}
+
+#[pin_data]
+struct NullBlkModule {
+ #[pin]
+ configfs_subsystem: kernel::configfs::Subsystem<configfs::Config>,
+}
+
+impl kernel::InPlaceModule for NullBlkModule {
+ fn init(_module: &'static ThisModule) -> impl PinInit<Self, Error> {
+ pr_info!("Rust null_blk loaded\n");
+
+ try_pin_init!(Self {
+ configfs_subsystem <- configfs::subsystem(),
+ })
+ }
+}
+
+struct NullBlkDevice;
+
+impl NullBlkDevice {
+ fn new(
+ name: &CStr,
+ block_size: u32,
+ rotational: bool,
+ capacity_mib: u64,
+ irq_mode: IRQMode,
+ ) -> Result<GenDisk<Self>> {
+ let tagset = Arc::pin_init(TagSet::new(1, 256, 1), GFP_KERNEL)?;
+
+ let queue_data = Box::new(QueueData { irq_mode }, GFP_KERNEL)?;
+
+ gen_disk::GenDiskBuilder::new()
+ .capacity_sectors(capacity_mib << (20 - block::SECTOR_SHIFT))
+ .logical_block_size(block_size)?
+ .physical_block_size(block_size)?
+ .rotational(rotational)
+ .build(fmt!("{}", name.to_str()?), tagset, queue_data)
+ }
+}
+
+struct QueueData {
+ irq_mode: IRQMode,
+}
+
+#[vtable]
+impl Operations for NullBlkDevice {
+ type QueueData = KBox<QueueData>;
+
+ #[inline(always)]
+ fn queue_rq(queue_data: &QueueData, rq: ARef<mq::Request<Self>>, _is_last: bool) -> Result {
+ match queue_data.irq_mode {
+ IRQMode::None => mq::Request::end_ok(rq)
+ .map_err(|_e| kernel::error::code::EIO)
+ // We take no refcounts on the request, so we expect to be able to
+ // end the request. The request reference must be unique at this
+ // point, and so `end_ok` cannot fail.
+ .expect("Fatal error - expected to be able to end request"),
+ IRQMode::Soft => mq::Request::complete(rq),
+ }
+ Ok(())
+ }
+
+ fn commit_rqs(_queue_data: &QueueData) {}
+
+ fn complete(rq: ARef<mq::Request<Self>>) {
+ mq::Request::end_ok(rq)
+ .map_err(|_e| kernel::error::code::EIO)
+ // We take no refcounts on the request, so we expect to be able to
+ // end the request. The request reference must be unique at this
+ // point, and so `end_ok` cannot fail.
+ .expect("Fatal error - expected to be able to end request");
+ }
+}
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 2d38331ee667..db1fe9772a4d 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -119,9 +119,8 @@ static inline u32 vdc_tx_dring_avail(struct vio_dring_state *dr)
return vio_dring_avail(dr, VDC_TX_RING_SIZE);
}
-static int vdc_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+static int vdc_getgeo(struct gendisk *disk, struct hd_geometry *geo)
{
- struct gendisk *disk = bdev->bd_disk;
sector_t nsect = get_capacity(disk);
sector_t cylinders = nsect;
@@ -485,7 +484,7 @@ static int __send_request(struct request *req)
}
sg_init_table(sg, port->ring_cookies);
- nsg = blk_rq_map_sg(req->q, req, sg);
+ nsg = blk_rq_map_sg(req, sg);
len = 0;
for (i = 0; i < nsg; i++)
@@ -829,7 +828,7 @@ static int probe_disk(struct vdc_port *port)
}
err = blk_mq_alloc_sq_tag_set(&port->tag_set, &vdc_mq_ops,
- VDC_TX_RING_SIZE, BLK_MQ_F_SHOULD_MERGE);
+ VDC_TX_RING_SIZE, 0);
if (err)
return err;
@@ -918,12 +917,12 @@ struct vdc_check_port_data {
char *type;
};
-static int vdc_device_probed(struct device *dev, void *arg)
+static int vdc_device_probed(struct device *dev, const void *arg)
{
struct vio_dev *vdev = to_vio_dev(dev);
- struct vdc_check_port_data *port_data;
+ const struct vdc_check_port_data *port_data;
- port_data = (struct vdc_check_port_data *)arg;
+ port_data = (const struct vdc_check_port_data *)arg;
if ((vdev->dev_no == port_data->dev_no) &&
(!(strcmp((char *)&vdev->type, port_data->type))) &&
@@ -957,8 +956,10 @@ static bool vdc_port_mpgroup_check(struct vio_dev *vdev)
dev = device_find_child(vdev->dev.parent, &port_data,
vdc_device_probed);
- if (dev)
+ if (dev) {
+ put_device(dev);
return true;
+ }
return false;
}
@@ -1070,7 +1071,7 @@ static void vdc_port_remove(struct vio_dev *vdev)
flush_work(&port->ldc_reset_work);
cancel_delayed_work_sync(&port->ldc_reset_timer_work);
- del_timer_sync(&port->vio.timer);
+ timer_delete_sync(&port->vio.timer);
del_gendisk(port->disk);
put_disk(port->disk);
@@ -1113,6 +1114,7 @@ static void vdc_requeue_inflight(struct vdc_port *port)
static void vdc_queue_drain(struct vdc_port *port)
{
struct request_queue *q = port->disk->queue;
+ unsigned int memflags;
/*
* Mark the queue as draining, then freeze/quiesce to ensure
@@ -1121,13 +1123,13 @@ static void vdc_queue_drain(struct vdc_port *port)
port->drain = 1;
spin_unlock_irq(&port->vio.lock);
- blk_mq_freeze_queue(q);
+ memflags = blk_mq_freeze_queue(q);
blk_mq_quiesce_queue(q);
spin_lock_irq(&port->vio.lock);
port->drain = 0;
blk_mq_unquiesce_queue(q);
- blk_mq_unfreeze_queue(q);
+ blk_mq_unfreeze_queue(q, memflags);
}
static void vdc_ldc_reset_timer_work(struct work_struct *work)
@@ -1186,7 +1188,7 @@ static void vdc_ldc_reset(struct vdc_port *port)
}
if (port->ldc_timeout)
- mod_delayed_work(system_wq, &port->ldc_reset_timer_work,
+ mod_delayed_work(system_percpu_wq, &port->ldc_reset_timer_work,
round_jiffies(jiffies + HZ * port->ldc_timeout));
mod_timer(&port->vio.timer, round_jiffies(jiffies + HZ));
return;
@@ -1214,7 +1216,7 @@ static int __init vdc_init(void)
{
int err;
- sunvdc_wq = alloc_workqueue("sunvdc", 0, 0);
+ sunvdc_wq = alloc_workqueue("sunvdc", WQ_PERCPU, 0);
if (!sunvdc_wq)
return -ENOMEM;
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index 126f151c4f2c..416015947ae6 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -711,9 +711,9 @@ static int floppy_ioctl(struct block_device *bdev, blk_mode_t mode,
return -ENOTTY;
}
-static int floppy_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+static int floppy_getgeo(struct gendisk *disk, struct hd_geometry *geo)
{
- struct floppy_state *fs = bdev->bd_disk->private_data;
+ struct floppy_state *fs = disk->private_data;
struct floppy_struct *g;
int ret;
@@ -818,7 +818,7 @@ static int swim_floppy_init(struct swim_priv *swd)
for (drive = 0; drive < swd->floppy_count; drive++) {
err = blk_mq_alloc_sq_tag_set(&swd->unit[drive].tag_set,
- &swim_mq_ops, 2, BLK_MQ_F_SHOULD_MERGE);
+ &swim_mq_ops, 2, 0);
if (err)
goto exit_put_disks;
@@ -944,7 +944,7 @@ static void swim_remove(struct platform_device *dev)
static struct platform_driver swim_driver = {
.probe = swim_probe,
- .remove_new = swim_remove,
+ .remove = swim_remove,
.driver = {
.name = CARDNAME,
},
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 90be1017f7bf..01f7aef3fcfb 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -362,7 +362,7 @@ static void set_timeout(struct floppy_state *fs, int nticks,
void (*proc)(struct timer_list *t))
{
if (fs->timeout_pending)
- del_timer(&fs->timeout);
+ timer_delete(&fs->timeout);
fs->timeout.expires = jiffies + nticks;
fs->timeout.function = proc;
add_timer(&fs->timeout);
@@ -555,7 +555,7 @@ static void act(struct floppy_state *fs)
static void scan_timeout(struct timer_list *t)
{
- struct floppy_state *fs = from_timer(fs, t, timeout);
+ struct floppy_state *fs = timer_container_of(fs, t, timeout);
struct swim3 __iomem *sw = fs->swim3;
unsigned long flags;
@@ -579,7 +579,7 @@ static void scan_timeout(struct timer_list *t)
static void seek_timeout(struct timer_list *t)
{
- struct floppy_state *fs = from_timer(fs, t, timeout);
+ struct floppy_state *fs = timer_container_of(fs, t, timeout);
struct swim3 __iomem *sw = fs->swim3;
unsigned long flags;
@@ -598,7 +598,7 @@ static void seek_timeout(struct timer_list *t)
static void settle_timeout(struct timer_list *t)
{
- struct floppy_state *fs = from_timer(fs, t, timeout);
+ struct floppy_state *fs = timer_container_of(fs, t, timeout);
struct swim3 __iomem *sw = fs->swim3;
unsigned long flags;
@@ -627,7 +627,7 @@ static void settle_timeout(struct timer_list *t)
static void xfer_timeout(struct timer_list *t)
{
- struct floppy_state *fs = from_timer(fs, t, timeout);
+ struct floppy_state *fs = timer_container_of(fs, t, timeout);
struct swim3 __iomem *sw = fs->swim3;
struct dbdma_regs __iomem *dr = fs->dma;
unsigned long flags;
@@ -677,7 +677,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
out_8(&sw->select, RELAX);
out_8(&sw->intr_enable, 0);
- del_timer(&fs->timeout);
+ timer_delete(&fs->timeout);
fs->timeout_pending = 0;
if (sw->ctrack == 0xff) {
swim3_err("%s", "Seen sector but cyl=ff?\n");
@@ -706,7 +706,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
out_8(&sw->control_bic, DO_SEEK);
out_8(&sw->select, RELAX);
out_8(&sw->intr_enable, 0);
- del_timer(&fs->timeout);
+ timer_delete(&fs->timeout);
fs->timeout_pending = 0;
if (fs->state == seeking)
++fs->retries;
@@ -716,7 +716,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
break;
case settling:
out_8(&sw->intr_enable, 0);
- del_timer(&fs->timeout);
+ timer_delete(&fs->timeout);
fs->timeout_pending = 0;
act(fs);
break;
@@ -726,7 +726,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
out_8(&sw->intr_enable, 0);
out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION);
out_8(&sw->select, RELAX);
- del_timer(&fs->timeout);
+ timer_delete(&fs->timeout);
fs->timeout_pending = 0;
dr = fs->dma;
cp = fs->dma_cmd;
@@ -840,6 +840,7 @@ static int grab_drive(struct floppy_state *fs, enum swim_state state,
static void release_drive(struct floppy_state *fs)
{
struct request_queue *q = disks[fs->index]->queue;
+ unsigned int memflags;
unsigned long flags;
swim3_dbg("%s", "-> release drive\n");
@@ -848,10 +849,10 @@ static void release_drive(struct floppy_state *fs)
fs->state = idle;
spin_unlock_irqrestore(&swim3_lock, flags);
- blk_mq_freeze_queue(q);
+ memflags = blk_mq_freeze_queue(q);
blk_mq_quiesce_queue(q);
blk_mq_unquiesce_queue(q);
- blk_mq_unfreeze_queue(q);
+ blk_mq_unfreeze_queue(q, memflags);
}
static int fd_eject(struct floppy_state *fs)
@@ -1208,8 +1209,7 @@ static int swim3_attach(struct macio_dev *mdev,
fs = &floppy_states[floppy_count];
memset(fs, 0, sizeof(*fs));
- rc = blk_mq_alloc_sq_tag_set(&fs->tag_set, &swim3_mq_ops, 2,
- BLK_MQ_F_SHOULD_MERGE);
+ rc = blk_mq_alloc_sq_tag_set(&fs->tag_set, &swim3_mq_ops, 2, 0);
if (rc)
goto out_unregister;
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index 6ba2c1dd1d87..0c74a41a6753 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -48,8 +48,15 @@
#define UBLK_MINORS (1U << MINORBITS)
+#define UBLK_INVALID_BUF_IDX ((u16)-1)
+
/* private ioctl command mirror */
#define UBLK_CMD_DEL_DEV_ASYNC _IOC_NR(UBLK_U_CMD_DEL_DEV_ASYNC)
+#define UBLK_CMD_UPDATE_SIZE _IOC_NR(UBLK_U_CMD_UPDATE_SIZE)
+#define UBLK_CMD_QUIESCE_DEV _IOC_NR(UBLK_U_CMD_QUIESCE_DEV)
+
+#define UBLK_IO_REGISTER_IO_BUF _IOC_NR(UBLK_U_IO_REGISTER_IO_BUF)
+#define UBLK_IO_UNREGISTER_IO_BUF _IOC_NR(UBLK_U_IO_UNREGISTER_IO_BUF)
/* All UBLK_F_* have to be included into UBLK_F_ALL */
#define UBLK_F_ALL (UBLK_F_SUPPORT_ZERO_COPY \
@@ -60,21 +67,44 @@
| UBLK_F_UNPRIVILEGED_DEV \
| UBLK_F_CMD_IOCTL_ENCODE \
| UBLK_F_USER_COPY \
- | UBLK_F_ZONED)
+ | UBLK_F_ZONED \
+ | UBLK_F_USER_RECOVERY_FAIL_IO \
+ | UBLK_F_UPDATE_SIZE \
+ | UBLK_F_AUTO_BUF_REG \
+ | UBLK_F_QUIESCE \
+ | UBLK_F_PER_IO_DAEMON \
+ | UBLK_F_BUF_REG_OFF_DAEMON)
+
+#define UBLK_F_ALL_RECOVERY_FLAGS (UBLK_F_USER_RECOVERY \
+ | UBLK_F_USER_RECOVERY_REISSUE \
+ | UBLK_F_USER_RECOVERY_FAIL_IO)
/* All UBLK_PARAM_TYPE_* should be included here */
#define UBLK_PARAM_TYPE_ALL \
(UBLK_PARAM_TYPE_BASIC | UBLK_PARAM_TYPE_DISCARD | \
- UBLK_PARAM_TYPE_DEVT | UBLK_PARAM_TYPE_ZONED)
-
-struct ublk_rq_data {
- struct llist_node node;
-
- struct kref ref;
-};
+ UBLK_PARAM_TYPE_DEVT | UBLK_PARAM_TYPE_ZONED | \
+ UBLK_PARAM_TYPE_DMA_ALIGN | UBLK_PARAM_TYPE_SEGMENT)
struct ublk_uring_cmd_pdu {
+ /*
+ * Store requests in same batch temporarily for queuing them to
+ * daemon context.
+ *
+ * It should have been stored to request payload, but we do want
+ * to avoid extra pre-allocation, and uring_cmd payload is always
+ * free for us
+ */
+ union {
+ struct request *req;
+ struct request *req_list;
+ };
+
+ /*
+ * The following two are valid in this cmd whole lifetime, and
+ * setup in ublk uring_cmd handler
+ */
struct ublk_queue *ubq;
+
u16 tag;
};
@@ -99,15 +129,6 @@ struct ublk_uring_cmd_pdu {
#define UBLK_IO_FLAG_OWNED_BY_SRV 0x02
/*
- * IO command is aborted, so this flag is set in case of
- * !UBLK_IO_FLAG_ACTIVE.
- *
- * After this flag is observed, any pending or new incoming request
- * associated with this io command will be failed immediately
- */
-#define UBLK_IO_FLAG_ABORTED 0x04
-
-/*
* UBLK_IO_FLAG_NEED_GET_DATA is set because IO command requires
* get data buffer address from ublksrv.
*
@@ -116,34 +137,70 @@ struct ublk_uring_cmd_pdu {
*/
#define UBLK_IO_FLAG_NEED_GET_DATA 0x08
+/*
+ * request buffer is registered automatically, so we have to unregister it
+ * before completing this request.
+ *
+ * io_uring will unregister buffer automatically for us during exiting.
+ */
+#define UBLK_IO_FLAG_AUTO_BUF_REG 0x10
+
/* atomic RW with ubq->cancel_lock */
#define UBLK_IO_FLAG_CANCELED 0x80000000
+/*
+ * Initialize refcount to a large number to include any registered buffers.
+ * UBLK_IO_COMMIT_AND_FETCH_REQ will release these references minus those for
+ * any buffers registered on the io daemon task.
+ */
+#define UBLK_REFCOUNT_INIT (REFCOUNT_MAX / 2)
+
struct ublk_io {
/* userspace buffer address from io cmd */
- __u64 addr;
+ union {
+ __u64 addr;
+ struct ublk_auto_buf_reg buf;
+ };
unsigned int flags;
int res;
- struct io_uring_cmd *cmd;
-};
+ union {
+ /* valid if UBLK_IO_FLAG_ACTIVE is set */
+ struct io_uring_cmd *cmd;
+ /* valid if UBLK_IO_FLAG_OWNED_BY_SRV is set */
+ struct request *req;
+ };
+
+ struct task_struct *task;
+
+ /*
+ * The number of uses of this I/O by the ublk server
+ * if user copy or zero copy are enabled:
+ * - UBLK_REFCOUNT_INIT from dispatch to the server
+ * until UBLK_IO_COMMIT_AND_FETCH_REQ
+ * - 1 for each inflight ublk_ch_{read,write}_iter() call
+ * - 1 for each io_uring registered buffer not registered on task
+ * The I/O can only be completed once all references are dropped.
+ * User copy and buffer registration operations are only permitted
+ * if the reference count is nonzero.
+ */
+ refcount_t ref;
+ /* Count of buffers registered on task and not yet unregistered */
+ unsigned task_registered_buffers;
+
+ void *buf_ctx_handle;
+} ____cacheline_aligned_in_smp;
struct ublk_queue {
int q_id;
int q_depth;
unsigned long flags;
- struct task_struct *ubq_daemon;
- char *io_cmd_buf;
-
- struct llist_head io_cmds;
+ struct ublksrv_io_desc *io_cmd_buf;
- unsigned long io_addr; /* mapped vm address */
- unsigned int max_io_sz;
bool force_abort;
- bool timeout;
bool canceling;
- unsigned short nr_io_ready; /* how many ios setup */
+ bool fail_io; /* copy of dev->state == UBLK_S_DEV_FAIL_IO */
spinlock_t cancel_lock;
struct ublk_device *dev;
struct ublk_io ios[];
@@ -176,11 +233,12 @@ struct ublk_device {
struct ublk_params params;
struct completion completion;
- unsigned int nr_queues_ready;
- unsigned int nr_privileged_daemon;
-
- struct work_struct quiesce_work;
- struct work_struct stop_work;
+ u32 nr_io_ready;
+ bool unprivileged_daemons;
+ struct mutex cancel_mutex;
+ bool canceling;
+ pid_t ublksrv_tgid;
+ struct delayed_work exit_work;
};
/* header of ublk_params */
@@ -189,14 +247,17 @@ struct ublk_params_header {
__u32 types;
};
-static bool ublk_abort_requests(struct ublk_device *ub, struct ublk_queue *ubq);
-
+static void ublk_io_release(void *priv);
+static void ublk_stop_dev_unlocked(struct ublk_device *ub);
+static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq);
+static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub,
+ u16 q_id, u16 tag, struct ublk_io *io, size_t offset);
static inline unsigned int ublk_req_build_flags(struct request *req);
-static inline struct ublksrv_io_desc *ublk_get_iod(struct ublk_queue *ubq,
- int tag);
-static inline bool ublk_dev_is_user_copy(const struct ublk_device *ub)
+
+static inline struct ublksrv_io_desc *
+ublk_get_iod(const struct ublk_queue *ubq, unsigned tag)
{
- return ub->dev_info.flags & UBLK_F_USER_COPY;
+ return &ubq->io_cmd_buf[tag];
}
static inline bool ublk_dev_is_zoned(const struct ublk_device *ub)
@@ -350,8 +411,7 @@ static int ublk_report_zones(struct gendisk *disk, sector_t sector,
if (ret)
goto free_req;
- ret = blk_rq_map_kern(disk->queue, req, buffer, buffer_length,
- GFP_KERNEL);
+ ret = blk_rq_map_kern(req, buffer, buffer_length, GFP_KERNEL);
if (ret)
goto erase_desc;
@@ -470,8 +530,8 @@ static blk_status_t ublk_setup_iod_zoned(struct ublk_queue *ubq,
#endif
-static inline void __ublk_complete_rq(struct request *req);
-static void ublk_complete_rq(struct kref *ref);
+static inline void __ublk_complete_rq(struct request *req, struct ublk_io *io,
+ bool need_map);
static dev_t ublk_chr_devt;
static const struct class ublk_chr_class = {
@@ -484,15 +544,17 @@ static wait_queue_head_t ublk_idr_wq; /* wait until one idr is freed */
static DEFINE_MUTEX(ublk_ctl_mutex);
+
+#define UBLK_MAX_UBLKS UBLK_MINORS
+
/*
- * Max ublk devices allowed to add
+ * Max unprivileged ublk devices allowed to add
*
* It can be extended to one per-user limit in future or even controlled
* by cgroup.
*/
-#define UBLK_MAX_UBLKS UBLK_MINORS
-static unsigned int ublks_max = 64;
-static unsigned int ublks_added; /* protected by ublk_ctl_mutex */
+static unsigned int unprivileged_ublks_max = 64;
+static unsigned int unprivileged_ublks_added; /* protected by ublk_ctl_mutex */
static struct miscdevice ublk_misc;
@@ -563,6 +625,28 @@ static int ublk_validate_params(const struct ublk_device *ub)
else if (ublk_dev_is_zoned(ub))
return -EINVAL;
+ if (ub->params.types & UBLK_PARAM_TYPE_DMA_ALIGN) {
+ const struct ublk_param_dma_align *p = &ub->params.dma;
+
+ if (p->alignment >= PAGE_SIZE)
+ return -EINVAL;
+
+ if (!is_power_of_2(p->alignment + 1))
+ return -EINVAL;
+ }
+
+ if (ub->params.types & UBLK_PARAM_TYPE_SEGMENT) {
+ const struct ublk_param_segment *p = &ub->params.seg;
+
+ if (!is_power_of_2(p->seg_boundary_mask + 1))
+ return -EINVAL;
+
+ if (p->seg_boundary_mask + 1 < UBLK_MIN_SEGMENT_SIZE)
+ return -EINVAL;
+ if (p->max_segment_size < UBLK_MIN_SEGMENT_SIZE)
+ return -EINVAL;
+ }
+
return 0;
}
@@ -574,52 +658,100 @@ static void ublk_apply_params(struct ublk_device *ub)
ublk_dev_param_zoned_apply(ub);
}
+static inline bool ublk_support_zero_copy(const struct ublk_queue *ubq)
+{
+ return ubq->flags & UBLK_F_SUPPORT_ZERO_COPY;
+}
+
+static inline bool ublk_dev_support_zero_copy(const struct ublk_device *ub)
+{
+ return ub->dev_info.flags & UBLK_F_SUPPORT_ZERO_COPY;
+}
+
+static inline bool ublk_support_auto_buf_reg(const struct ublk_queue *ubq)
+{
+ return ubq->flags & UBLK_F_AUTO_BUF_REG;
+}
+
+static inline bool ublk_dev_support_auto_buf_reg(const struct ublk_device *ub)
+{
+ return ub->dev_info.flags & UBLK_F_AUTO_BUF_REG;
+}
+
static inline bool ublk_support_user_copy(const struct ublk_queue *ubq)
{
return ubq->flags & UBLK_F_USER_COPY;
}
+static inline bool ublk_dev_support_user_copy(const struct ublk_device *ub)
+{
+ return ub->dev_info.flags & UBLK_F_USER_COPY;
+}
+
+static inline bool ublk_need_map_io(const struct ublk_queue *ubq)
+{
+ return !ublk_support_user_copy(ubq) && !ublk_support_zero_copy(ubq) &&
+ !ublk_support_auto_buf_reg(ubq);
+}
+
+static inline bool ublk_dev_need_map_io(const struct ublk_device *ub)
+{
+ return !ublk_dev_support_user_copy(ub) &&
+ !ublk_dev_support_zero_copy(ub) &&
+ !ublk_dev_support_auto_buf_reg(ub);
+}
+
static inline bool ublk_need_req_ref(const struct ublk_queue *ubq)
{
/*
* read()/write() is involved in user copy, so request reference
* has to be grabbed
+ *
+ * for zero copy, request buffer need to be registered to io_uring
+ * buffer table, so reference is needed
+ *
+ * For auto buffer register, ublk server still may issue
+ * UBLK_IO_COMMIT_AND_FETCH_REQ before one registered buffer is used up,
+ * so reference is required too.
*/
- return ublk_support_user_copy(ubq);
+ return ublk_support_user_copy(ubq) || ublk_support_zero_copy(ubq) ||
+ ublk_support_auto_buf_reg(ubq);
}
-static inline void ublk_init_req_ref(const struct ublk_queue *ubq,
- struct request *req)
+static inline bool ublk_dev_need_req_ref(const struct ublk_device *ub)
{
- if (ublk_need_req_ref(ubq)) {
- struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
+ return ublk_dev_support_user_copy(ub) ||
+ ublk_dev_support_zero_copy(ub) ||
+ ublk_dev_support_auto_buf_reg(ub);
+}
- kref_init(&data->ref);
- }
+static inline void ublk_init_req_ref(const struct ublk_queue *ubq,
+ struct ublk_io *io)
+{
+ if (ublk_need_req_ref(ubq))
+ refcount_set(&io->ref, UBLK_REFCOUNT_INIT);
}
-static inline bool ublk_get_req_ref(const struct ublk_queue *ubq,
- struct request *req)
+static inline bool ublk_get_req_ref(struct ublk_io *io)
{
- if (ublk_need_req_ref(ubq)) {
- struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
+ return refcount_inc_not_zero(&io->ref);
+}
- return kref_get_unless_zero(&data->ref);
- }
+static inline void ublk_put_req_ref(struct ublk_io *io, struct request *req)
+{
+ if (!refcount_dec_and_test(&io->ref))
+ return;
- return true;
+ /* ublk_need_map_io() and ublk_need_req_ref() are mutually exclusive */
+ __ublk_complete_rq(req, io, false);
}
-static inline void ublk_put_req_ref(const struct ublk_queue *ubq,
- struct request *req)
+static inline bool ublk_sub_req_ref(struct ublk_io *io)
{
- if (ublk_need_req_ref(ubq)) {
- struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
+ unsigned sub_refs = UBLK_REFCOUNT_INIT - io->task_registered_buffers;
- kref_put(&data->ref, ublk_complete_rq);
- } else {
- __ublk_complete_rq(req);
- }
+ io->task_registered_buffers = 0;
+ return refcount_sub_and_test(sub_refs, &io->ref);
}
static inline bool ublk_need_get_data(const struct ublk_queue *ubq)
@@ -627,6 +759,11 @@ static inline bool ublk_need_get_data(const struct ublk_queue *ubq)
return ubq->flags & UBLK_F_NEED_GET_DATA;
}
+static inline bool ublk_dev_need_get_data(const struct ublk_device *ub)
+{
+ return ub->dev_info.flags & UBLK_F_NEED_GET_DATA;
+}
+
/* Called in slow path only, keep it noinline for trace purpose */
static noinline struct ublk_device *ublk_get_device(struct ublk_device *ub)
{
@@ -652,42 +789,73 @@ static inline bool ublk_rq_has_data(const struct request *rq)
return bio_has_data(rq->bio);
}
-static inline struct ublksrv_io_desc *ublk_get_iod(struct ublk_queue *ubq,
- int tag)
+static inline struct ublksrv_io_desc *
+ublk_queue_cmd_buf(struct ublk_device *ub, int q_id)
{
- return (struct ublksrv_io_desc *)
- &(ubq->io_cmd_buf[tag * sizeof(struct ublksrv_io_desc)]);
+ return ublk_get_queue(ub, q_id)->io_cmd_buf;
}
-static inline char *ublk_queue_cmd_buf(struct ublk_device *ub, int q_id)
+static inline int __ublk_queue_cmd_buf_size(int depth)
{
- return ublk_get_queue(ub, q_id)->io_cmd_buf;
+ return round_up(depth * sizeof(struct ublksrv_io_desc), PAGE_SIZE);
}
-static inline int ublk_queue_cmd_buf_size(struct ublk_device *ub, int q_id)
+static inline int ublk_queue_cmd_buf_size(struct ublk_device *ub)
{
- struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
+ return __ublk_queue_cmd_buf_size(ub->dev_info.queue_depth);
+}
- return round_up(ubq->q_depth * sizeof(struct ublksrv_io_desc),
- PAGE_SIZE);
+static int ublk_max_cmd_buf_size(void)
+{
+ return __ublk_queue_cmd_buf_size(UBLK_MAX_QUEUE_DEPTH);
}
-static inline bool ublk_queue_can_use_recovery_reissue(
- struct ublk_queue *ubq)
+/*
+ * Should I/O outstanding to the ublk server when it exits be reissued?
+ * If not, outstanding I/O will get errors.
+ */
+static inline bool ublk_nosrv_should_reissue_outstanding(struct ublk_device *ub)
+{
+ return (ub->dev_info.flags & UBLK_F_USER_RECOVERY) &&
+ (ub->dev_info.flags & UBLK_F_USER_RECOVERY_REISSUE);
+}
+
+/*
+ * Should I/O issued while there is no ublk server queue? If not, I/O
+ * issued while there is no ublk server will get errors.
+ */
+static inline bool ublk_nosrv_dev_should_queue_io(struct ublk_device *ub)
+{
+ return (ub->dev_info.flags & UBLK_F_USER_RECOVERY) &&
+ !(ub->dev_info.flags & UBLK_F_USER_RECOVERY_FAIL_IO);
+}
+
+/*
+ * Same as ublk_nosrv_dev_should_queue_io, but uses a queue-local copy
+ * of the device flags for smaller cache footprint - better for fast
+ * paths.
+ */
+static inline bool ublk_nosrv_should_queue_io(struct ublk_queue *ubq)
{
return (ubq->flags & UBLK_F_USER_RECOVERY) &&
- (ubq->flags & UBLK_F_USER_RECOVERY_REISSUE);
+ !(ubq->flags & UBLK_F_USER_RECOVERY_FAIL_IO);
}
-static inline bool ublk_queue_can_use_recovery(
- struct ublk_queue *ubq)
+/*
+ * Should ublk devices be stopped (i.e. no recovery possible) when the
+ * ublk server exits? If not, devices can be used again by a future
+ * incarnation of a ublk server via the start_recovery/end_recovery
+ * commands.
+ */
+static inline bool ublk_nosrv_should_stop_dev(struct ublk_device *ub)
{
- return ubq->flags & UBLK_F_USER_RECOVERY;
+ return !(ub->dev_info.flags & UBLK_F_USER_RECOVERY);
}
-static inline bool ublk_can_use_recovery(struct ublk_device *ub)
+static inline bool ublk_dev_in_recoverable_state(struct ublk_device *ub)
{
- return ub->dev_info.flags & UBLK_F_USER_RECOVERY;
+ return ub->dev_info.state == UBLK_S_DEV_QUIESCED ||
+ ub->dev_info.state == UBLK_S_DEV_FAIL_IO;
}
static void ublk_free_disk(struct gendisk *disk)
@@ -863,11 +1031,11 @@ static inline bool ublk_need_unmap_req(const struct request *req)
}
static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req,
- struct ublk_io *io)
+ const struct ublk_io *io)
{
const unsigned int rq_bytes = blk_rq_bytes(req);
- if (ublk_support_user_copy(ubq))
+ if (!ublk_need_map_io(ubq))
return rq_bytes;
/*
@@ -885,13 +1053,13 @@ static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req,
return rq_bytes;
}
-static int ublk_unmap_io(const struct ublk_queue *ubq,
+static int ublk_unmap_io(bool need_map,
const struct request *req,
- struct ublk_io *io)
+ const struct ublk_io *io)
{
const unsigned int rq_bytes = blk_rq_bytes(req);
- if (ublk_support_user_copy(ubq))
+ if (!need_map)
return rq_bytes;
if (ublk_need_unmap_req(req)) {
@@ -938,13 +1106,8 @@ static blk_status_t ublk_setup_iod(struct ublk_queue *ubq, struct request *req)
{
struct ublksrv_io_desc *iod = ublk_get_iod(ubq, req->tag);
struct ublk_io *io = &ubq->ios[req->tag];
- enum req_op op = req_op(req);
u32 ublk_op;
- if (!ublk_queue_is_zoned(ubq) &&
- (op_is_zone_mgmt(op) || op == REQ_OP_ZONE_APPEND))
- return BLK_STS_IOERR;
-
switch (req_op(req)) {
case REQ_OP_READ:
ublk_op = UBLK_IO_OP_READ;
@@ -979,28 +1142,16 @@ static blk_status_t ublk_setup_iod(struct ublk_queue *ubq, struct request *req)
static inline struct ublk_uring_cmd_pdu *ublk_get_uring_cmd_pdu(
struct io_uring_cmd *ioucmd)
{
- return (struct ublk_uring_cmd_pdu *)&ioucmd->pdu;
-}
-
-static inline bool ubq_daemon_is_dying(struct ublk_queue *ubq)
-{
- return ubq->ubq_daemon->flags & PF_EXITING;
+ return io_uring_cmd_to_pdu(ioucmd, struct ublk_uring_cmd_pdu);
}
/* todo: handle partial completion */
-static inline void __ublk_complete_rq(struct request *req)
+static inline void __ublk_complete_rq(struct request *req, struct ublk_io *io,
+ bool need_map)
{
- struct ublk_queue *ubq = req->mq_hctx->driver_data;
- struct ublk_io *io = &ubq->ios[req->tag];
unsigned int unmapped_bytes;
blk_status_t res = BLK_STS_OK;
- /* called from ublk_abort_queue() code path */
- if (io->flags & UBLK_IO_FLAG_ABORTED) {
- res = BLK_STS_IOERR;
- goto exit;
- }
-
/* failed read IO if nothing is read */
if (!io->res && req_op(req) == REQ_OP_READ)
io->res = -EIO;
@@ -1021,7 +1172,7 @@ static inline void __ublk_complete_rq(struct request *req)
goto exit;
/* for READ request, writing data in iod->addr to rq buffers */
- unmapped_bytes = ublk_unmap_io(ubq, req, io);
+ unmapped_bytes = ublk_unmap_io(need_map, req, io);
/*
* Extremely impossible since we got data filled in just before
@@ -1033,7 +1184,7 @@ static inline void __ublk_complete_rq(struct request *req)
if (blk_update_request(req, BLK_STS_OK, io->res))
blk_mq_requeue_request(req, true);
- else
+ else if (likely(!blk_should_fake_timeout(req->q)))
__blk_mq_end_request(req, BLK_STS_OK);
return;
@@ -1041,37 +1192,12 @@ exit:
blk_mq_end_request(req, res);
}
-static void ublk_complete_rq(struct kref *ref)
-{
- struct ublk_rq_data *data = container_of(ref, struct ublk_rq_data,
- ref);
- struct request *req = blk_mq_rq_from_pdu(data);
-
- __ublk_complete_rq(req);
-}
-
-/*
- * Since __ublk_rq_task_work always fails requests immediately during
- * exiting, __ublk_fail_req() is only called from abort context during
- * exiting. So lock is unnecessary.
- *
- * Also aborting may not be started yet, keep in mind that one failed
- * request may be issued by block layer again.
- */
-static void __ublk_fail_req(struct ublk_queue *ubq, struct ublk_io *io,
- struct request *req)
+static struct io_uring_cmd *__ublk_prep_compl_io_cmd(struct ublk_io *io,
+ struct request *req)
{
- WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_ACTIVE);
+ /* read cmd first because req will overwrite it */
+ struct io_uring_cmd *cmd = io->cmd;
- if (ublk_queue_can_use_recovery_reissue(ubq))
- blk_mq_requeue_request(req, false);
- else
- ublk_put_req_ref(ubq, req);
-}
-
-static void ubq_complete_io_cmd(struct ublk_io *io, int res,
- unsigned issue_flags)
-{
/* mark this cmd owned by ublksrv */
io->flags |= UBLK_IO_FLAG_OWNED_BY_SRV;
@@ -1081,8 +1207,17 @@ static void ubq_complete_io_cmd(struct ublk_io *io, int res,
*/
io->flags &= ~UBLK_IO_FLAG_ACTIVE;
+ io->req = req;
+ return cmd;
+}
+
+static void ublk_complete_io_cmd(struct ublk_io *io, struct request *req,
+ int res, unsigned issue_flags)
+{
+ struct io_uring_cmd *cmd = __ublk_prep_compl_io_cmd(io, req);
+
/* tell ublksrv one io request is coming */
- io_uring_cmd_done(io->cmd, res, 0, issue_flags);
+ io_uring_cmd_done(cmd, res, issue_flags);
}
#define UBLK_REQUEUE_DELAY_MS 3
@@ -1091,66 +1226,58 @@ static inline void __ublk_abort_rq(struct ublk_queue *ubq,
struct request *rq)
{
/* We cannot process this rq so just requeue it. */
- if (ublk_queue_can_use_recovery(ubq))
+ if (ublk_nosrv_dev_should_queue_io(ubq->dev))
blk_mq_requeue_request(rq, false);
else
blk_mq_end_request(rq, BLK_STS_IOERR);
}
-static inline void __ublk_rq_task_work(struct request *req,
- unsigned issue_flags)
+static void
+ublk_auto_buf_reg_fallback(const struct ublk_queue *ubq, struct ublk_io *io)
{
- struct ublk_queue *ubq = req->mq_hctx->driver_data;
- int tag = req->tag;
- struct ublk_io *io = &ubq->ios[tag];
- unsigned int mapped_bytes;
+ unsigned tag = io - ubq->ios;
+ struct ublksrv_io_desc *iod = ublk_get_iod(ubq, tag);
- pr_devel("%s: complete: op %d, qid %d tag %d io_flags %x addr %llx\n",
- __func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags,
- ublk_get_iod(ubq, req->tag)->addr);
+ iod->op_flags |= UBLK_IO_F_NEED_REG_BUF;
+}
- /*
- * Task is exiting if either:
- *
- * (1) current != ubq_daemon.
- * io_uring_cmd_complete_in_task() tries to run task_work
- * in a workqueue if ubq_daemon(cmd's task) is PF_EXITING.
- *
- * (2) current->flags & PF_EXITING.
- */
- if (unlikely(current != ubq->ubq_daemon || current->flags & PF_EXITING)) {
- __ublk_abort_rq(ubq, req);
- return;
- }
+static bool ublk_auto_buf_reg(const struct ublk_queue *ubq, struct request *req,
+ struct ublk_io *io, unsigned int issue_flags)
+{
+ int ret;
- if (ublk_need_get_data(ubq) && ublk_need_map_req(req)) {
- /*
- * We have not handled UBLK_IO_NEED_GET_DATA command yet,
- * so immepdately pass UBLK_IO_RES_NEED_GET_DATA to ublksrv
- * and notify it.
- */
- if (!(io->flags & UBLK_IO_FLAG_NEED_GET_DATA)) {
- io->flags |= UBLK_IO_FLAG_NEED_GET_DATA;
- pr_devel("%s: need get data. op %d, qid %d tag %d io_flags %x\n",
- __func__, io->cmd->cmd_op, ubq->q_id,
- req->tag, io->flags);
- ubq_complete_io_cmd(io, UBLK_IO_RES_NEED_GET_DATA, issue_flags);
- return;
+ ret = io_buffer_register_bvec(io->cmd, req, ublk_io_release,
+ io->buf.index, issue_flags);
+ if (ret) {
+ if (io->buf.flags & UBLK_AUTO_BUF_REG_FALLBACK) {
+ ublk_auto_buf_reg_fallback(ubq, io);
+ return true;
}
- /*
- * We have handled UBLK_IO_NEED_GET_DATA command,
- * so clear UBLK_IO_FLAG_NEED_GET_DATA now and just
- * do the copy work.
- */
- io->flags &= ~UBLK_IO_FLAG_NEED_GET_DATA;
- /* update iod->addr because ublksrv may have passed a new io buffer */
- ublk_get_iod(ubq, req->tag)->addr = io->addr;
- pr_devel("%s: update iod->addr: op %d, qid %d tag %d io_flags %x addr %llx\n",
- __func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags,
- ublk_get_iod(ubq, req->tag)->addr);
+ blk_mq_end_request(req, BLK_STS_IOERR);
+ return false;
}
- mapped_bytes = ublk_map_io(ubq, req, io);
+ io->task_registered_buffers = 1;
+ io->buf_ctx_handle = io_uring_cmd_ctx_handle(io->cmd);
+ io->flags |= UBLK_IO_FLAG_AUTO_BUF_REG;
+ return true;
+}
+
+static bool ublk_prep_auto_buf_reg(struct ublk_queue *ubq,
+ struct request *req, struct ublk_io *io,
+ unsigned int issue_flags)
+{
+ ublk_init_req_ref(ubq, io);
+ if (ublk_support_auto_buf_reg(ubq) && ublk_rq_has_data(req))
+ return ublk_auto_buf_reg(ubq, req, io, issue_flags);
+
+ return true;
+}
+
+static bool ublk_start_io(const struct ublk_queue *ubq, struct request *req,
+ struct ublk_io *io)
+{
+ unsigned mapped_bytes = ublk_map_io(ubq, req, io);
/* partially mapped, update io descriptor */
if (unlikely(mapped_bytes != blk_rq_bytes(req))) {
@@ -1165,99 +1292,134 @@ static inline void __ublk_rq_task_work(struct request *req,
blk_mq_requeue_request(req, false);
blk_mq_delay_kick_requeue_list(req->q,
UBLK_REQUEUE_DELAY_MS);
- return;
+ return false;
}
ublk_get_iod(ubq, req->tag)->nr_sectors =
mapped_bytes >> 9;
}
- ublk_init_req_ref(ubq, req);
- ubq_complete_io_cmd(io, UBLK_IO_RES_OK, issue_flags);
+ return true;
}
-static inline void ublk_forward_io_cmds(struct ublk_queue *ubq,
- unsigned issue_flags)
+static void ublk_dispatch_req(struct ublk_queue *ubq,
+ struct request *req,
+ unsigned int issue_flags)
{
- struct llist_node *io_cmds = llist_del_all(&ubq->io_cmds);
- struct ublk_rq_data *data, *tmp;
+ int tag = req->tag;
+ struct ublk_io *io = &ubq->ios[tag];
- io_cmds = llist_reverse_order(io_cmds);
- llist_for_each_entry_safe(data, tmp, io_cmds, node)
- __ublk_rq_task_work(blk_mq_rq_from_pdu(data), issue_flags);
+ pr_devel("%s: complete: qid %d tag %d io_flags %x addr %llx\n",
+ __func__, ubq->q_id, req->tag, io->flags,
+ ublk_get_iod(ubq, req->tag)->addr);
+
+ /*
+ * Task is exiting if either:
+ *
+ * (1) current != io->task.
+ * io_uring_cmd_complete_in_task() tries to run task_work
+ * in a workqueue if cmd's task is PF_EXITING.
+ *
+ * (2) current->flags & PF_EXITING.
+ */
+ if (unlikely(current != io->task || current->flags & PF_EXITING)) {
+ __ublk_abort_rq(ubq, req);
+ return;
+ }
+
+ if (ublk_need_get_data(ubq) && ublk_need_map_req(req)) {
+ /*
+ * We have not handled UBLK_IO_NEED_GET_DATA command yet,
+ * so immediately pass UBLK_IO_RES_NEED_GET_DATA to ublksrv
+ * and notify it.
+ */
+ io->flags |= UBLK_IO_FLAG_NEED_GET_DATA;
+ pr_devel("%s: need get data. qid %d tag %d io_flags %x\n",
+ __func__, ubq->q_id, req->tag, io->flags);
+ ublk_complete_io_cmd(io, req, UBLK_IO_RES_NEED_GET_DATA,
+ issue_flags);
+ return;
+ }
+
+ if (!ublk_start_io(ubq, req, io))
+ return;
+
+ if (ublk_prep_auto_buf_reg(ubq, req, io, issue_flags))
+ ublk_complete_io_cmd(io, req, UBLK_IO_RES_OK, issue_flags);
}
-static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd, unsigned issue_flags)
+static void ublk_cmd_tw_cb(struct io_uring_cmd *cmd,
+ unsigned int issue_flags)
{
struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
struct ublk_queue *ubq = pdu->ubq;
- ublk_forward_io_cmds(ubq, issue_flags);
+ ublk_dispatch_req(ubq, pdu->req, issue_flags);
}
static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq)
{
- struct ublk_rq_data *data = blk_mq_rq_to_pdu(rq);
-
- if (llist_add(&data->node, &ubq->io_cmds)) {
- struct ublk_io *io = &ubq->ios[rq->tag];
+ struct io_uring_cmd *cmd = ubq->ios[rq->tag].cmd;
+ struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
- io_uring_cmd_complete_in_task(io->cmd, ublk_rq_task_work_cb);
- }
+ pdu->req = rq;
+ io_uring_cmd_complete_in_task(cmd, ublk_cmd_tw_cb);
}
-static enum blk_eh_timer_return ublk_timeout(struct request *rq)
+static void ublk_cmd_list_tw_cb(struct io_uring_cmd *cmd,
+ unsigned int issue_flags)
{
- struct ublk_queue *ubq = rq->mq_hctx->driver_data;
- unsigned int nr_inflight = 0;
- int i;
-
- if (ubq->flags & UBLK_F_UNPRIVILEGED_DEV) {
- if (!ubq->timeout) {
- send_sig(SIGKILL, ubq->ubq_daemon, 0);
- ubq->timeout = true;
- }
+ struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
+ struct request *rq = pdu->req_list;
+ struct request *next;
- return BLK_EH_DONE;
- }
+ do {
+ next = rq->rq_next;
+ rq->rq_next = NULL;
+ ublk_dispatch_req(rq->mq_hctx->driver_data, rq, issue_flags);
+ rq = next;
+ } while (rq);
+}
- if (!ubq_daemon_is_dying(ubq))
- return BLK_EH_RESET_TIMER;
+static void ublk_queue_cmd_list(struct ublk_io *io, struct rq_list *l)
+{
+ struct io_uring_cmd *cmd = io->cmd;
+ struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
- for (i = 0; i < ubq->q_depth; i++) {
- struct ublk_io *io = &ubq->ios[i];
+ pdu->req_list = rq_list_peek(l);
+ rq_list_init(l);
+ io_uring_cmd_complete_in_task(cmd, ublk_cmd_list_tw_cb);
+}
- if (!(io->flags & UBLK_IO_FLAG_ACTIVE))
- nr_inflight++;
- }
+static enum blk_eh_timer_return ublk_timeout(struct request *rq)
+{
+ struct ublk_queue *ubq = rq->mq_hctx->driver_data;
+ pid_t tgid = ubq->dev->ublksrv_tgid;
+ struct task_struct *p;
+ struct pid *pid;
- /* cancelable uring_cmd can't help us if all commands are in-flight */
- if (nr_inflight == ubq->q_depth) {
- struct ublk_device *ub = ubq->dev;
+ if (!(ubq->flags & UBLK_F_UNPRIVILEGED_DEV))
+ return BLK_EH_RESET_TIMER;
- if (ublk_abort_requests(ub, ubq)) {
- if (ublk_can_use_recovery(ub))
- schedule_work(&ub->quiesce_work);
- else
- schedule_work(&ub->stop_work);
- }
- return BLK_EH_DONE;
- }
+ if (unlikely(!tgid))
+ return BLK_EH_RESET_TIMER;
- return BLK_EH_RESET_TIMER;
+ rcu_read_lock();
+ pid = find_vpid(tgid);
+ p = pid_task(pid, PIDTYPE_PID);
+ if (p)
+ send_sig(SIGKILL, p, 0);
+ rcu_read_unlock();
+ return BLK_EH_DONE;
}
-static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
- const struct blk_mq_queue_data *bd)
+static blk_status_t ublk_prep_req(struct ublk_queue *ubq, struct request *rq,
+ bool check_cancel)
{
- struct ublk_queue *ubq = hctx->driver_data;
- struct request *rq = bd->rq;
blk_status_t res;
- /* fill iod to slot in io cmd buffer */
- res = ublk_setup_iod(ubq, rq);
- if (unlikely(res != BLK_STS_OK))
- return BLK_STS_IOERR;
+ if (unlikely(READ_ONCE(ubq->fail_io)))
+ return BLK_STS_TARGET;
/* With recovery feature enabled, force_abort is set in
* ublk_stop_dev() before calling del_gendisk(). We have to
@@ -1268,20 +1430,83 @@ static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
* Note: force_abort is guaranteed to be seen because it is set
* before request queue is unqiuesced.
*/
- if (ublk_queue_can_use_recovery(ubq) && unlikely(ubq->force_abort))
+ if (ublk_nosrv_should_queue_io(ubq) &&
+ unlikely(READ_ONCE(ubq->force_abort)))
+ return BLK_STS_IOERR;
+
+ if (check_cancel && unlikely(ubq->canceling))
+ return BLK_STS_IOERR;
+
+ /* fill iod to slot in io cmd buffer */
+ res = ublk_setup_iod(ubq, rq);
+ if (unlikely(res != BLK_STS_OK))
return BLK_STS_IOERR;
+ blk_mq_start_request(rq);
+ return BLK_STS_OK;
+}
+
+static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
+{
+ struct ublk_queue *ubq = hctx->driver_data;
+ struct request *rq = bd->rq;
+ blk_status_t res;
+
+ res = ublk_prep_req(ubq, rq, false);
+ if (res != BLK_STS_OK)
+ return res;
+
+ /*
+ * ->canceling has to be handled after ->force_abort and ->fail_io
+ * is dealt with, otherwise this request may not be failed in case
+ * of recovery, and cause hang when deleting disk
+ */
if (unlikely(ubq->canceling)) {
__ublk_abort_rq(ubq, rq);
return BLK_STS_OK;
}
- blk_mq_start_request(bd->rq);
ublk_queue_cmd(ubq, rq);
-
return BLK_STS_OK;
}
+static inline bool ublk_belong_to_same_batch(const struct ublk_io *io,
+ const struct ublk_io *io2)
+{
+ return (io_uring_cmd_ctx_handle(io->cmd) ==
+ io_uring_cmd_ctx_handle(io2->cmd)) &&
+ (io->task == io2->task);
+}
+
+static void ublk_queue_rqs(struct rq_list *rqlist)
+{
+ struct rq_list requeue_list = { };
+ struct rq_list submit_list = { };
+ struct ublk_io *io = NULL;
+ struct request *req;
+
+ while ((req = rq_list_pop(rqlist))) {
+ struct ublk_queue *this_q = req->mq_hctx->driver_data;
+ struct ublk_io *this_io = &this_q->ios[req->tag];
+
+ if (ublk_prep_req(this_q, req, true) != BLK_STS_OK) {
+ rq_list_add_tail(&requeue_list, req);
+ continue;
+ }
+
+ if (io && !ublk_belong_to_same_batch(io, this_io) &&
+ !rq_list_empty(&submit_list))
+ ublk_queue_cmd_list(io, &submit_list);
+ io = this_io;
+ rq_list_add_tail(&submit_list, req);
+ }
+
+ if (!rq_list_empty(&submit_list))
+ ublk_queue_cmd_list(io, &submit_list);
+ *rqlist = requeue_list;
+}
+
static int ublk_init_hctx(struct blk_mq_hw_ctx *hctx, void *driver_data,
unsigned int hctx_idx)
{
@@ -1294,10 +1519,42 @@ static int ublk_init_hctx(struct blk_mq_hw_ctx *hctx, void *driver_data,
static const struct blk_mq_ops ublk_mq_ops = {
.queue_rq = ublk_queue_rq,
+ .queue_rqs = ublk_queue_rqs,
.init_hctx = ublk_init_hctx,
.timeout = ublk_timeout,
};
+static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
+{
+ int i;
+
+ for (i = 0; i < ubq->q_depth; i++) {
+ struct ublk_io *io = &ubq->ios[i];
+
+ /*
+ * UBLK_IO_FLAG_CANCELED is kept for avoiding to touch
+ * io->cmd
+ */
+ io->flags &= UBLK_IO_FLAG_CANCELED;
+ io->cmd = NULL;
+ io->addr = 0;
+
+ /*
+ * old task is PF_EXITING, put it now
+ *
+ * It could be NULL in case of closing one quiesced
+ * device.
+ */
+ if (io->task) {
+ put_task_struct(io->task);
+ io->task = NULL;
+ }
+
+ WARN_ON_ONCE(refcount_read(&io->ref));
+ WARN_ON_ONCE(io->task_registered_buffers);
+ }
+}
+
static int ublk_ch_open(struct inode *inode, struct file *filp)
{
struct ublk_device *ub = container_of(inode->i_cdev,
@@ -1306,14 +1563,209 @@ static int ublk_ch_open(struct inode *inode, struct file *filp)
if (test_and_set_bit(UB_STATE_OPEN, &ub->state))
return -EBUSY;
filp->private_data = ub;
+ ub->ublksrv_tgid = current->tgid;
return 0;
}
+static void ublk_reset_ch_dev(struct ublk_device *ub)
+{
+ int i;
+
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
+ ublk_queue_reinit(ub, ublk_get_queue(ub, i));
+
+ /* set to NULL, otherwise new tasks cannot mmap io_cmd_buf */
+ ub->mm = NULL;
+ ub->nr_io_ready = 0;
+ ub->unprivileged_daemons = false;
+ ub->ublksrv_tgid = -1;
+}
+
+static struct gendisk *ublk_get_disk(struct ublk_device *ub)
+{
+ struct gendisk *disk;
+
+ spin_lock(&ub->lock);
+ disk = ub->ub_disk;
+ if (disk)
+ get_device(disk_to_dev(disk));
+ spin_unlock(&ub->lock);
+
+ return disk;
+}
+
+static void ublk_put_disk(struct gendisk *disk)
+{
+ if (disk)
+ put_device(disk_to_dev(disk));
+}
+
+/*
+ * Use this function to ensure that ->canceling is consistently set for
+ * the device and all queues. Do not set these flags directly.
+ *
+ * Caller must ensure that:
+ * - cancel_mutex is held. This ensures that there is no concurrent
+ * access to ub->canceling and no concurrent writes to ubq->canceling.
+ * - there are no concurrent reads of ubq->canceling from the queue_rq
+ * path. This can be done by quiescing the queue, or through other
+ * means.
+ */
+static void ublk_set_canceling(struct ublk_device *ub, bool canceling)
+ __must_hold(&ub->cancel_mutex)
+{
+ int i;
+
+ ub->canceling = canceling;
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
+ ublk_get_queue(ub, i)->canceling = canceling;
+}
+
+static bool ublk_check_and_reset_active_ref(struct ublk_device *ub)
+{
+ int i, j;
+
+ if (!(ub->dev_info.flags & (UBLK_F_SUPPORT_ZERO_COPY |
+ UBLK_F_AUTO_BUF_REG)))
+ return false;
+
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
+ struct ublk_queue *ubq = ublk_get_queue(ub, i);
+
+ for (j = 0; j < ubq->q_depth; j++) {
+ struct ublk_io *io = &ubq->ios[j];
+ unsigned int refs = refcount_read(&io->ref) +
+ io->task_registered_buffers;
+
+ /*
+ * UBLK_REFCOUNT_INIT or zero means no active
+ * reference
+ */
+ if (refs != UBLK_REFCOUNT_INIT && refs != 0)
+ return true;
+
+ /* reset to zero if the io hasn't active references */
+ refcount_set(&io->ref, 0);
+ io->task_registered_buffers = 0;
+ }
+ }
+ return false;
+}
+
+static void ublk_ch_release_work_fn(struct work_struct *work)
+{
+ struct ublk_device *ub =
+ container_of(work, struct ublk_device, exit_work.work);
+ struct gendisk *disk;
+ int i;
+
+ /*
+ * For zero-copy and auto buffer register modes, I/O references
+ * might not be dropped naturally when the daemon is killed, but
+ * io_uring guarantees that registered bvec kernel buffers are
+ * unregistered finally when freeing io_uring context, then the
+ * active references are dropped.
+ *
+ * Wait until active references are dropped for avoiding use-after-free
+ *
+ * registered buffer may be unregistered in io_ring's release hander,
+ * so have to wait by scheduling work function for avoiding the two
+ * file release dependency.
+ */
+ if (ublk_check_and_reset_active_ref(ub)) {
+ schedule_delayed_work(&ub->exit_work, 1);
+ return;
+ }
+
+ /*
+ * disk isn't attached yet, either device isn't live, or it has
+ * been removed already, so we needn't to do anything
+ */
+ disk = ublk_get_disk(ub);
+ if (!disk)
+ goto out;
+
+ /*
+ * All uring_cmd are done now, so abort any request outstanding to
+ * the ublk server
+ *
+ * This can be done in lockless way because ublk server has been
+ * gone
+ *
+ * More importantly, we have to provide forward progress guarantee
+ * without holding ub->mutex, otherwise control task grabbing
+ * ub->mutex triggers deadlock
+ *
+ * All requests may be inflight, so ->canceling may not be set, set
+ * it now.
+ */
+ mutex_lock(&ub->cancel_mutex);
+ ublk_set_canceling(ub, true);
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
+ ublk_abort_queue(ub, ublk_get_queue(ub, i));
+ mutex_unlock(&ub->cancel_mutex);
+ blk_mq_kick_requeue_list(disk->queue);
+
+ /*
+ * All infligh requests have been completed or requeued and any new
+ * request will be failed or requeued via `->canceling` now, so it is
+ * fine to grab ub->mutex now.
+ */
+ mutex_lock(&ub->mutex);
+
+ /* double check after grabbing lock */
+ if (!ub->ub_disk)
+ goto unlock;
+
+ /*
+ * Transition the device to the nosrv state. What exactly this
+ * means depends on the recovery flags
+ */
+ if (ublk_nosrv_should_stop_dev(ub)) {
+ /*
+ * Allow any pending/future I/O to pass through quickly
+ * with an error. This is needed because del_gendisk
+ * waits for all pending I/O to complete
+ */
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
+ WRITE_ONCE(ublk_get_queue(ub, i)->force_abort, true);
+
+ ublk_stop_dev_unlocked(ub);
+ } else {
+ if (ublk_nosrv_dev_should_queue_io(ub)) {
+ /* ->canceling is set and all requests are aborted */
+ ub->dev_info.state = UBLK_S_DEV_QUIESCED;
+ } else {
+ ub->dev_info.state = UBLK_S_DEV_FAIL_IO;
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
+ WRITE_ONCE(ublk_get_queue(ub, i)->fail_io, true);
+ }
+ }
+unlock:
+ mutex_unlock(&ub->mutex);
+ ublk_put_disk(disk);
+
+ /* all uring_cmd has been done now, reset device & ubq */
+ ublk_reset_ch_dev(ub);
+out:
+ clear_bit(UB_STATE_OPEN, &ub->state);
+
+ /* put the reference grabbed in ublk_ch_release() */
+ ublk_put_device(ub);
+}
+
static int ublk_ch_release(struct inode *inode, struct file *filp)
{
struct ublk_device *ub = filp->private_data;
- clear_bit(UB_STATE_OPEN, &ub->state);
+ /*
+ * Grab ublk device reference, so it won't be gone until we are
+ * really released from work function.
+ */
+ ublk_get_device(ub);
+
+ INIT_DELAYED_WORK(&ub->exit_work, ublk_ch_release_work_fn);
+ schedule_delayed_work(&ub->exit_work, 0);
return 0;
}
@@ -1322,7 +1774,7 @@ static int ublk_ch_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct ublk_device *ub = filp->private_data;
size_t sz = vma->vm_end - vma->vm_start;
- unsigned max_sz = UBLK_MAX_QUEUE_DEPTH * sizeof(struct ublksrv_io_desc);
+ unsigned max_sz = ublk_max_cmd_buf_size();
unsigned long pfn, end, phys_off = vma->vm_pgoff << PAGE_SHIFT;
int q_id, ret = 0;
@@ -1348,41 +1800,33 @@ static int ublk_ch_mmap(struct file *filp, struct vm_area_struct *vma)
__func__, q_id, current->pid, vma->vm_start,
phys_off, (unsigned long)sz);
- if (sz != ublk_queue_cmd_buf_size(ub, q_id))
+ if (sz != ublk_queue_cmd_buf_size(ub))
return -EINVAL;
pfn = virt_to_phys(ublk_queue_cmd_buf(ub, q_id)) >> PAGE_SHIFT;
return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
}
-static void ublk_commit_completion(struct ublk_device *ub,
- const struct ublksrv_io_cmd *ub_cmd)
+static void __ublk_fail_req(struct ublk_device *ub, struct ublk_io *io,
+ struct request *req)
{
- u32 qid = ub_cmd->q_id, tag = ub_cmd->tag;
- struct ublk_queue *ubq = ublk_get_queue(ub, qid);
- struct ublk_io *io = &ubq->ios[tag];
- struct request *req;
-
- /* now this cmd slot is owned by nbd driver */
- io->flags &= ~UBLK_IO_FLAG_OWNED_BY_SRV;
- io->res = ub_cmd->result;
-
- /* find the io request and complete */
- req = blk_mq_tag_to_rq(ub->tag_set.tags[qid], tag);
- if (WARN_ON_ONCE(unlikely(!req)))
- return;
-
- if (req_op(req) == REQ_OP_ZONE_APPEND)
- req->__sector = ub_cmd->zone_append_lba;
+ WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_ACTIVE);
- if (likely(!blk_should_fake_timeout(req->q)))
- ublk_put_req_ref(ubq, req);
+ if (ublk_nosrv_should_reissue_outstanding(ub))
+ blk_mq_requeue_request(req, false);
+ else {
+ io->res = -EIO;
+ __ublk_complete_rq(req, io, ublk_dev_need_map_io(ub));
+ }
}
/*
- * Called from ubq_daemon context via cancel fn, meantime quiesce ublk
- * blk-mq queue, so we are called exclusively with blk-mq and ubq_daemon
- * context, so everything is serialized.
+ * Called from ublk char device release handler, when any uring_cmd is
+ * done, meantime request queue is "quiesced" since all inflight requests
+ * can't be completed because ublk server is dead.
+ *
+ * So no one can hold our request IO reference any more, simply ignore the
+ * reference, and complete the request immediately
*/
static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq)
{
@@ -1391,62 +1835,62 @@ static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq)
for (i = 0; i < ubq->q_depth; i++) {
struct ublk_io *io = &ubq->ios[i];
- if (!(io->flags & UBLK_IO_FLAG_ACTIVE)) {
- struct request *rq;
-
- /*
- * Either we fail the request or ublk_rq_task_work_fn
- * will do it
- */
- rq = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], i);
- if (rq && blk_mq_request_started(rq)) {
- io->flags |= UBLK_IO_FLAG_ABORTED;
- __ublk_fail_req(ubq, io, rq);
- }
- }
+ if (io->flags & UBLK_IO_FLAG_OWNED_BY_SRV)
+ __ublk_fail_req(ub, io, io->req);
}
}
-static bool ublk_abort_requests(struct ublk_device *ub, struct ublk_queue *ubq)
+static void ublk_start_cancel(struct ublk_device *ub)
{
- struct gendisk *disk;
-
- spin_lock(&ubq->cancel_lock);
- if (ubq->canceling) {
- spin_unlock(&ubq->cancel_lock);
- return false;
- }
- ubq->canceling = true;
- spin_unlock(&ubq->cancel_lock);
-
- spin_lock(&ub->lock);
- disk = ub->ub_disk;
- if (disk)
- get_device(disk_to_dev(disk));
- spin_unlock(&ub->lock);
+ struct gendisk *disk = ublk_get_disk(ub);
/* Our disk has been dead */
if (!disk)
- return false;
+ return;
- /* Now we are serialized with ublk_queue_rq() */
+ mutex_lock(&ub->cancel_mutex);
+ if (ub->canceling)
+ goto out;
+ /*
+ * Now we are serialized with ublk_queue_rq()
+ *
+ * Make sure that ubq->canceling is set when queue is frozen,
+ * because ublk_queue_rq() has to rely on this flag for avoiding to
+ * touch completed uring_cmd
+ */
blk_mq_quiesce_queue(disk->queue);
- /* abort queue is for making forward progress */
- ublk_abort_queue(ub, ubq);
+ ublk_set_canceling(ub, true);
blk_mq_unquiesce_queue(disk->queue);
- put_device(disk_to_dev(disk));
-
- return true;
+out:
+ mutex_unlock(&ub->cancel_mutex);
+ ublk_put_disk(disk);
}
-static void ublk_cancel_cmd(struct ublk_queue *ubq, struct ublk_io *io,
+static void ublk_cancel_cmd(struct ublk_queue *ubq, unsigned tag,
unsigned int issue_flags)
{
+ struct ublk_io *io = &ubq->ios[tag];
+ struct ublk_device *ub = ubq->dev;
+ struct request *req;
bool done;
if (!(io->flags & UBLK_IO_FLAG_ACTIVE))
return;
+ /*
+ * Don't try to cancel this command if the request is started for
+ * avoiding race between io_uring_cmd_done() and
+ * io_uring_cmd_complete_in_task().
+ *
+ * Either the started request will be aborted via __ublk_abort_rq(),
+ * then this uring_cmd is canceled next time, or it will be done in
+ * task work function ublk_dispatch_req() because io_uring guarantees
+ * that ublk_dispatch_req() is always called
+ */
+ req = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], tag);
+ if (req && blk_mq_request_started(req) && req->tag == tag)
+ return;
+
spin_lock(&ubq->cancel_lock);
done = !!(io->flags & UBLK_IO_FLAG_CANCELED);
if (!done)
@@ -1454,12 +1898,23 @@ static void ublk_cancel_cmd(struct ublk_queue *ubq, struct ublk_io *io,
spin_unlock(&ubq->cancel_lock);
if (!done)
- io_uring_cmd_done(io->cmd, UBLK_IO_RES_ABORT, 0, issue_flags);
+ io_uring_cmd_done(io->cmd, UBLK_IO_RES_ABORT, issue_flags);
}
/*
* The ublk char device won't be closed when calling cancel fn, so both
* ublk device and queue are guaranteed to be live
+ *
+ * Two-stage cancel:
+ *
+ * - make every active uring_cmd done in ->cancel_fn()
+ *
+ * - aborting inflight ublk IO requests in ublk char device release handler,
+ * which depends on 1st stage because device can only be closed iff all
+ * uring_cmd are done
+ *
+ * Do _not_ try to acquire ub->mutex before all inflight requests are
+ * aborted, otherwise deadlock may be caused.
*/
static void ublk_uring_cmd_cancel_fn(struct io_uring_cmd *cmd,
unsigned int issue_flags)
@@ -1467,8 +1922,6 @@ static void ublk_uring_cmd_cancel_fn(struct io_uring_cmd *cmd,
struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
struct ublk_queue *ubq = pdu->ubq;
struct task_struct *task;
- struct ublk_device *ub;
- bool need_schedule;
struct ublk_io *io;
if (WARN_ON_ONCE(!ubq))
@@ -1478,27 +1931,21 @@ static void ublk_uring_cmd_cancel_fn(struct io_uring_cmd *cmd,
return;
task = io_uring_cmd_get_task(cmd);
- if (WARN_ON_ONCE(task && task != ubq->ubq_daemon))
+ io = &ubq->ios[pdu->tag];
+ if (WARN_ON_ONCE(task && task != io->task))
return;
- ub = ubq->dev;
- need_schedule = ublk_abort_requests(ub, ubq);
+ ublk_start_cancel(ubq->dev);
- io = &ubq->ios[pdu->tag];
WARN_ON_ONCE(io->cmd != cmd);
- ublk_cancel_cmd(ubq, io, issue_flags);
-
- if (need_schedule) {
- if (ublk_can_use_recovery(ub))
- schedule_work(&ub->quiesce_work);
- else
- schedule_work(&ub->stop_work);
- }
+ ublk_cancel_cmd(ubq, pdu->tag, issue_flags);
}
-static inline bool ublk_queue_ready(struct ublk_queue *ubq)
+static inline bool ublk_dev_ready(const struct ublk_device *ub)
{
- return ubq->nr_io_ready == ubq->q_depth;
+ u32 total = (u32)ub->dev_info.nr_hw_queues * ub->dev_info.queue_depth;
+
+ return ub->nr_io_ready == total;
}
static void ublk_cancel_queue(struct ublk_queue *ubq)
@@ -1506,7 +1953,7 @@ static void ublk_cancel_queue(struct ublk_queue *ubq)
int i;
for (i = 0; i < ubq->q_depth; i++)
- ublk_cancel_cmd(ubq, &ubq->ios[i], IO_URING_F_UNLOCKED);
+ ublk_cancel_cmd(ubq, i, IO_URING_F_UNLOCKED);
}
/* Cancel all pending commands, must be called after del_gendisk() returns */
@@ -1544,66 +1991,29 @@ static void ublk_wait_tagset_rqs_idle(struct ublk_device *ub)
}
}
-static void __ublk_quiesce_dev(struct ublk_device *ub)
-{
- pr_devel("%s: quiesce ub: dev_id %d state %s\n",
- __func__, ub->dev_info.dev_id,
- ub->dev_info.state == UBLK_S_DEV_LIVE ?
- "LIVE" : "QUIESCED");
- blk_mq_quiesce_queue(ub->ub_disk->queue);
- ublk_wait_tagset_rqs_idle(ub);
- ub->dev_info.state = UBLK_S_DEV_QUIESCED;
-}
-
-static void ublk_quiesce_work_fn(struct work_struct *work)
-{
- struct ublk_device *ub =
- container_of(work, struct ublk_device, quiesce_work);
-
- mutex_lock(&ub->mutex);
- if (ub->dev_info.state != UBLK_S_DEV_LIVE)
- goto unlock;
- __ublk_quiesce_dev(ub);
- unlock:
- mutex_unlock(&ub->mutex);
- ublk_cancel_dev(ub);
-}
-
-static void ublk_unquiesce_dev(struct ublk_device *ub)
+static void ublk_force_abort_dev(struct ublk_device *ub)
{
int i;
- pr_devel("%s: unquiesce ub: dev_id %d state %s\n",
+ pr_devel("%s: force abort ub: dev_id %d state %s\n",
__func__, ub->dev_info.dev_id,
ub->dev_info.state == UBLK_S_DEV_LIVE ?
"LIVE" : "QUIESCED");
- /* quiesce_work has run. We let requeued rqs be aborted
- * before running fallback_wq. "force_abort" must be seen
- * after request queue is unqiuesced. Then del_gendisk()
- * can move on.
- */
+ blk_mq_quiesce_queue(ub->ub_disk->queue);
+ if (ub->dev_info.state == UBLK_S_DEV_LIVE)
+ ublk_wait_tagset_rqs_idle(ub);
+
for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
ublk_get_queue(ub, i)->force_abort = true;
-
blk_mq_unquiesce_queue(ub->ub_disk->queue);
/* We may have requeued some rqs in ublk_quiesce_queue() */
blk_mq_kick_requeue_list(ub->ub_disk->queue);
}
-static void ublk_stop_dev(struct ublk_device *ub)
+static struct gendisk *ublk_detach_disk(struct ublk_device *ub)
{
struct gendisk *disk;
- mutex_lock(&ub->mutex);
- if (ub->dev_info.state == UBLK_S_DEV_DEAD)
- goto unlock;
- if (ublk_can_use_recovery(ub)) {
- if (ub->dev_info.state == UBLK_S_DEV_LIVE)
- __ublk_quiesce_dev(ub);
- ublk_unquiesce_dev(ub);
- }
- del_gendisk(ub->ub_disk);
-
/* Sync with ublk_abort_queue() by holding the lock */
spin_lock(&ub->lock);
disk = ub->ub_disk;
@@ -1611,37 +2021,66 @@ static void ublk_stop_dev(struct ublk_device *ub)
ub->dev_info.ublksrv_pid = -1;
ub->ub_disk = NULL;
spin_unlock(&ub->lock);
+
+ return disk;
+}
+
+static void ublk_stop_dev_unlocked(struct ublk_device *ub)
+ __must_hold(&ub->mutex)
+{
+ struct gendisk *disk;
+
+ if (ub->dev_info.state == UBLK_S_DEV_DEAD)
+ return;
+
+ if (ublk_nosrv_dev_should_queue_io(ub))
+ ublk_force_abort_dev(ub);
+ del_gendisk(ub->ub_disk);
+ disk = ublk_detach_disk(ub);
put_disk(disk);
- unlock:
+}
+
+static void ublk_stop_dev(struct ublk_device *ub)
+{
+ mutex_lock(&ub->mutex);
+ ublk_stop_dev_unlocked(ub);
mutex_unlock(&ub->mutex);
ublk_cancel_dev(ub);
}
-/* device can only be started after all IOs are ready */
-static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
+/* reset ublk io_uring queue & io flags */
+static void ublk_reset_io_flags(struct ublk_device *ub)
{
- mutex_lock(&ub->mutex);
- ubq->nr_io_ready++;
- if (ublk_queue_ready(ubq)) {
- ubq->ubq_daemon = current;
- get_task_struct(ubq->ubq_daemon);
- ub->nr_queues_ready++;
+ int i, j;
- if (capable(CAP_SYS_ADMIN))
- ub->nr_privileged_daemon++;
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
+ struct ublk_queue *ubq = ublk_get_queue(ub, i);
+
+ /* UBLK_IO_FLAG_CANCELED can be cleared now */
+ spin_lock(&ubq->cancel_lock);
+ for (j = 0; j < ubq->q_depth; j++)
+ ubq->ios[j].flags &= ~UBLK_IO_FLAG_CANCELED;
+ spin_unlock(&ubq->cancel_lock);
+ ubq->fail_io = false;
}
- if (ub->nr_queues_ready == ub->dev_info.nr_hw_queues)
- complete_all(&ub->completion);
- mutex_unlock(&ub->mutex);
+ mutex_lock(&ub->cancel_mutex);
+ ublk_set_canceling(ub, false);
+ mutex_unlock(&ub->cancel_mutex);
}
-static void ublk_handle_need_get_data(struct ublk_device *ub, int q_id,
- int tag)
+/* device can only be started after all IOs are ready */
+static void ublk_mark_io_ready(struct ublk_device *ub)
+ __must_hold(&ub->mutex)
{
- struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
- struct request *req = blk_mq_tag_to_rq(ub->tag_set.tags[q_id], tag);
+ if (!ub->unprivileged_daemons && !capable(CAP_SYS_ADMIN))
+ ub->unprivileged_daemons = true;
- ublk_queue_cmd(ubq, req);
+ ub->nr_io_ready++;
+ if (ublk_dev_ready(ub)) {
+ /* now we are ready for handling ublk io request */
+ ublk_reset_io_flags(ub);
+ complete_all(&ub->completion);
+ }
}
static inline int ublk_check_cmd_op(u32 cmd_op)
@@ -1657,12 +2096,66 @@ static inline int ublk_check_cmd_op(u32 cmd_op)
return 0;
}
-static inline void ublk_fill_io_cmd(struct ublk_io *io,
- struct io_uring_cmd *cmd, unsigned long buf_addr)
+static inline int ublk_set_auto_buf_reg(struct ublk_io *io, struct io_uring_cmd *cmd)
+{
+ io->buf = ublk_sqe_addr_to_auto_buf_reg(READ_ONCE(cmd->sqe->addr));
+
+ if (io->buf.reserved0 || io->buf.reserved1)
+ return -EINVAL;
+
+ if (io->buf.flags & ~UBLK_AUTO_BUF_REG_F_MASK)
+ return -EINVAL;
+ return 0;
+}
+
+static int ublk_handle_auto_buf_reg(struct ublk_io *io,
+ struct io_uring_cmd *cmd,
+ u16 *buf_idx)
+{
+ if (io->flags & UBLK_IO_FLAG_AUTO_BUF_REG) {
+ io->flags &= ~UBLK_IO_FLAG_AUTO_BUF_REG;
+
+ /*
+ * `UBLK_F_AUTO_BUF_REG` only works iff `UBLK_IO_FETCH_REQ`
+ * and `UBLK_IO_COMMIT_AND_FETCH_REQ` are issued from same
+ * `io_ring_ctx`.
+ *
+ * If this uring_cmd's io_ring_ctx isn't same with the
+ * one for registering the buffer, it is ublk server's
+ * responsibility for unregistering the buffer, otherwise
+ * this ublk request gets stuck.
+ */
+ if (io->buf_ctx_handle == io_uring_cmd_ctx_handle(cmd))
+ *buf_idx = io->buf.index;
+ }
+
+ return ublk_set_auto_buf_reg(io, cmd);
+}
+
+/* Once we return, `io->req` can't be used any more */
+static inline struct request *
+ublk_fill_io_cmd(struct ublk_io *io, struct io_uring_cmd *cmd)
{
+ struct request *req = io->req;
+
io->cmd = cmd;
io->flags |= UBLK_IO_FLAG_ACTIVE;
+ /* now this cmd slot is owned by ublk driver */
+ io->flags &= ~UBLK_IO_FLAG_OWNED_BY_SRV;
+
+ return req;
+}
+
+static inline int
+ublk_config_io_buf(const struct ublk_device *ub, struct ublk_io *io,
+ struct io_uring_cmd *cmd, unsigned long buf_addr,
+ u16 *buf_idx)
+{
+ if (ublk_dev_support_auto_buf_reg(ub))
+ return ublk_handle_auto_buf_reg(io, cmd, buf_idx);
+
io->addr = buf_addr;
+ return 0;
}
static inline void ublk_prep_cancel(struct io_uring_cmd *cmd,
@@ -1680,39 +2173,263 @@ static inline void ublk_prep_cancel(struct io_uring_cmd *cmd,
io_uring_cmd_mark_cancelable(cmd, issue_flags);
}
-static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
- unsigned int issue_flags,
- const struct ublksrv_io_cmd *ub_cmd)
+static void ublk_io_release(void *priv)
{
+ struct request *rq = priv;
+ struct ublk_queue *ubq = rq->mq_hctx->driver_data;
+ struct ublk_io *io = &ubq->ios[rq->tag];
+
+ /*
+ * task_registered_buffers may be 0 if buffers were registered off task
+ * but unregistered on task. Or after UBLK_IO_COMMIT_AND_FETCH_REQ.
+ */
+ if (current == io->task && io->task_registered_buffers)
+ io->task_registered_buffers--;
+ else
+ ublk_put_req_ref(io, rq);
+}
+
+static int ublk_register_io_buf(struct io_uring_cmd *cmd,
+ struct ublk_device *ub,
+ u16 q_id, u16 tag,
+ struct ublk_io *io,
+ unsigned int index, unsigned int issue_flags)
+{
+ struct request *req;
+ int ret;
+
+ if (!ublk_dev_support_zero_copy(ub))
+ return -EINVAL;
+
+ req = __ublk_check_and_get_req(ub, q_id, tag, io, 0);
+ if (!req)
+ return -EINVAL;
+
+ ret = io_buffer_register_bvec(cmd, req, ublk_io_release, index,
+ issue_flags);
+ if (ret) {
+ ublk_put_req_ref(io, req);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+ublk_daemon_register_io_buf(struct io_uring_cmd *cmd,
+ struct ublk_device *ub,
+ u16 q_id, u16 tag, struct ublk_io *io,
+ unsigned index, unsigned issue_flags)
+{
+ unsigned new_registered_buffers;
+ struct request *req = io->req;
+ int ret;
+
+ /*
+ * Ensure there are still references for ublk_sub_req_ref() to release.
+ * If not, fall back on the thread-safe buffer registration.
+ */
+ new_registered_buffers = io->task_registered_buffers + 1;
+ if (unlikely(new_registered_buffers >= UBLK_REFCOUNT_INIT))
+ return ublk_register_io_buf(cmd, ub, q_id, tag, io, index,
+ issue_flags);
+
+ if (!ublk_dev_support_zero_copy(ub) || !ublk_rq_has_data(req))
+ return -EINVAL;
+
+ ret = io_buffer_register_bvec(cmd, req, ublk_io_release, index,
+ issue_flags);
+ if (ret)
+ return ret;
+
+ io->task_registered_buffers = new_registered_buffers;
+ return 0;
+}
+
+static int ublk_unregister_io_buf(struct io_uring_cmd *cmd,
+ const struct ublk_device *ub,
+ unsigned int index, unsigned int issue_flags)
+{
+ if (!(ub->dev_info.flags & UBLK_F_SUPPORT_ZERO_COPY))
+ return -EINVAL;
+
+ return io_buffer_unregister_bvec(cmd, index, issue_flags);
+}
+
+static int ublk_check_fetch_buf(const struct ublk_device *ub, __u64 buf_addr)
+{
+ if (ublk_dev_need_map_io(ub)) {
+ /*
+ * FETCH_RQ has to provide IO buffer if NEED GET
+ * DATA is not enabled
+ */
+ if (!buf_addr && !ublk_dev_need_get_data(ub))
+ return -EINVAL;
+ } else if (buf_addr) {
+ /* User copy requires addr to be unset */
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int ublk_fetch(struct io_uring_cmd *cmd, struct ublk_device *ub,
+ struct ublk_io *io, __u64 buf_addr)
+{
+ int ret = 0;
+
+ /*
+ * When handling FETCH command for setting up ublk uring queue,
+ * ub->mutex is the innermost lock, and we won't block for handling
+ * FETCH, so it is fine even for IO_URING_F_NONBLOCK.
+ */
+ mutex_lock(&ub->mutex);
+ /* UBLK_IO_FETCH_REQ is only allowed before dev is setup */
+ if (ublk_dev_ready(ub)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /* allow each command to be FETCHed at most once */
+ if (io->flags & UBLK_IO_FLAG_ACTIVE) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV);
+
+ ublk_fill_io_cmd(io, cmd);
+ ret = ublk_config_io_buf(ub, io, cmd, buf_addr, NULL);
+ if (ret)
+ goto out;
+
+ WRITE_ONCE(io->task, get_task_struct(current));
+ ublk_mark_io_ready(ub);
+out:
+ mutex_unlock(&ub->mutex);
+ return ret;
+}
+
+static int ublk_check_commit_and_fetch(const struct ublk_device *ub,
+ struct ublk_io *io, __u64 buf_addr)
+{
+ struct request *req = io->req;
+
+ if (ublk_dev_need_map_io(ub)) {
+ /*
+ * COMMIT_AND_FETCH_REQ has to provide IO buffer if
+ * NEED GET DATA is not enabled or it is Read IO.
+ */
+ if (!buf_addr && (!ublk_dev_need_get_data(ub) ||
+ req_op(req) == REQ_OP_READ))
+ return -EINVAL;
+ } else if (req_op(req) != REQ_OP_ZONE_APPEND && buf_addr) {
+ /*
+ * User copy requires addr to be unset when command is
+ * not zone append
+ */
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static bool ublk_need_complete_req(const struct ublk_device *ub,
+ struct ublk_io *io)
+{
+ if (ublk_dev_need_req_ref(ub))
+ return ublk_sub_req_ref(io);
+ return true;
+}
+
+static bool ublk_get_data(const struct ublk_queue *ubq, struct ublk_io *io,
+ struct request *req)
+{
+ /*
+ * We have handled UBLK_IO_NEED_GET_DATA command,
+ * so clear UBLK_IO_FLAG_NEED_GET_DATA now and just
+ * do the copy work.
+ */
+ io->flags &= ~UBLK_IO_FLAG_NEED_GET_DATA;
+ /* update iod->addr because ublksrv may have passed a new io buffer */
+ ublk_get_iod(ubq, req->tag)->addr = io->addr;
+ pr_devel("%s: update iod->addr: qid %d tag %d io_flags %x addr %llx\n",
+ __func__, ubq->q_id, req->tag, io->flags,
+ ublk_get_iod(ubq, req->tag)->addr);
+
+ return ublk_start_io(ubq, req, io);
+}
+
+static int ublk_ch_uring_cmd_local(struct io_uring_cmd *cmd,
+ unsigned int issue_flags)
+{
+ /* May point to userspace-mapped memory */
+ const struct ublksrv_io_cmd *ub_src = io_uring_sqe_cmd(cmd->sqe);
+ u16 buf_idx = UBLK_INVALID_BUF_IDX;
struct ublk_device *ub = cmd->file->private_data;
struct ublk_queue *ubq;
struct ublk_io *io;
u32 cmd_op = cmd->cmd_op;
- unsigned tag = ub_cmd->tag;
- int ret = -EINVAL;
+ u16 q_id = READ_ONCE(ub_src->q_id);
+ u16 tag = READ_ONCE(ub_src->tag);
+ s32 result = READ_ONCE(ub_src->result);
+ u64 addr = READ_ONCE(ub_src->addr); /* unioned with zone_append_lba */
struct request *req;
+ int ret;
+ bool compl;
+
+ WARN_ON_ONCE(issue_flags & IO_URING_F_UNLOCKED);
pr_devel("%s: received: cmd op %d queue %d tag %d result %d\n",
- __func__, cmd->cmd_op, ub_cmd->q_id, tag,
- ub_cmd->result);
+ __func__, cmd->cmd_op, q_id, tag, result);
- if (ub_cmd->q_id >= ub->dev_info.nr_hw_queues)
+ ret = ublk_check_cmd_op(cmd_op);
+ if (ret)
goto out;
- ubq = ublk_get_queue(ub, ub_cmd->q_id);
- if (!ubq || ub_cmd->q_id != ubq->q_id)
- goto out;
+ /*
+ * io_buffer_unregister_bvec() doesn't access the ubq or io,
+ * so no need to validate the q_id, tag, or task
+ */
+ if (_IOC_NR(cmd_op) == UBLK_IO_UNREGISTER_IO_BUF)
+ return ublk_unregister_io_buf(cmd, ub, addr, issue_flags);
- if (ubq->ubq_daemon && ubq->ubq_daemon != current)
+ ret = -EINVAL;
+ if (q_id >= ub->dev_info.nr_hw_queues)
goto out;
- if (tag >= ubq->q_depth)
+ ubq = ublk_get_queue(ub, q_id);
+
+ if (tag >= ub->dev_info.queue_depth)
goto out;
io = &ubq->ios[tag];
+ /* UBLK_IO_FETCH_REQ can be handled on any task, which sets io->task */
+ if (unlikely(_IOC_NR(cmd_op) == UBLK_IO_FETCH_REQ)) {
+ ret = ublk_check_fetch_buf(ub, addr);
+ if (ret)
+ goto out;
+ ret = ublk_fetch(cmd, ub, io, addr);
+ if (ret)
+ goto out;
+
+ ublk_prep_cancel(cmd, issue_flags, ubq, tag);
+ return -EIOCBQUEUED;
+ }
+
+ if (READ_ONCE(io->task) != current) {
+ /*
+ * ublk_register_io_buf() accesses only the io's refcount,
+ * so can be handled on any task
+ */
+ if (_IOC_NR(cmd_op) == UBLK_IO_REGISTER_IO_BUF)
+ return ublk_register_io_buf(cmd, ub, q_id, tag, io,
+ addr, issue_flags);
+
+ goto out;
+ }
/* there is pending io cmd, something must be wrong */
- if (io->flags & UBLK_IO_FLAG_ACTIVE) {
+ if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV)) {
ret = -EBUSY;
goto out;
}
@@ -1725,72 +2442,43 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
^ (_IOC_NR(cmd_op) == UBLK_IO_NEED_GET_DATA))
goto out;
- ret = ublk_check_cmd_op(cmd_op);
- if (ret)
- goto out;
-
- ret = -EINVAL;
switch (_IOC_NR(cmd_op)) {
- case UBLK_IO_FETCH_REQ:
- /* UBLK_IO_FETCH_REQ is only allowed before queue is setup */
- if (ublk_queue_ready(ubq)) {
- ret = -EBUSY;
- goto out;
- }
- /*
- * The io is being handled by server, so COMMIT_RQ is expected
- * instead of FETCH_REQ
- */
- if (io->flags & UBLK_IO_FLAG_OWNED_BY_SRV)
- goto out;
-
- if (!ublk_support_user_copy(ubq)) {
- /*
- * FETCH_RQ has to provide IO buffer if NEED GET
- * DATA is not enabled
- */
- if (!ub_cmd->addr && !ublk_need_get_data(ubq))
- goto out;
- } else if (ub_cmd->addr) {
- /* User copy requires addr to be unset */
- ret = -EINVAL;
- goto out;
- }
-
- ublk_fill_io_cmd(io, cmd, ub_cmd->addr);
- ublk_mark_io_ready(ub, ubq);
- break;
+ case UBLK_IO_REGISTER_IO_BUF:
+ return ublk_daemon_register_io_buf(cmd, ub, q_id, tag, io, addr,
+ issue_flags);
case UBLK_IO_COMMIT_AND_FETCH_REQ:
- req = blk_mq_tag_to_rq(ub->tag_set.tags[ub_cmd->q_id], tag);
-
- if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
+ ret = ublk_check_commit_and_fetch(ub, io, addr);
+ if (ret)
goto out;
+ io->res = result;
+ req = ublk_fill_io_cmd(io, cmd);
+ ret = ublk_config_io_buf(ub, io, cmd, addr, &buf_idx);
+ compl = ublk_need_complete_req(ub, io);
+
+ /* can't touch 'ublk_io' any more */
+ if (buf_idx != UBLK_INVALID_BUF_IDX)
+ io_buffer_unregister_bvec(cmd, buf_idx, issue_flags);
+ if (req_op(req) == REQ_OP_ZONE_APPEND)
+ req->__sector = addr;
+ if (compl)
+ __ublk_complete_rq(req, io, ublk_dev_need_map_io(ub));
- if (!ublk_support_user_copy(ubq)) {
- /*
- * COMMIT_AND_FETCH_REQ has to provide IO buffer if
- * NEED GET DATA is not enabled or it is Read IO.
- */
- if (!ub_cmd->addr && (!ublk_need_get_data(ubq) ||
- req_op(req) == REQ_OP_READ))
- goto out;
- } else if (req_op(req) != REQ_OP_ZONE_APPEND && ub_cmd->addr) {
- /*
- * User copy requires addr to be unset when command is
- * not zone append
- */
- ret = -EINVAL;
+ if (ret)
goto out;
- }
-
- ublk_fill_io_cmd(io, cmd, ub_cmd->addr);
- ublk_commit_completion(ub, ub_cmd);
break;
case UBLK_IO_NEED_GET_DATA:
- if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
- goto out;
- ublk_fill_io_cmd(io, cmd, ub_cmd->addr);
- ublk_handle_need_get_data(ub, ub_cmd->q_id, ub_cmd->tag);
+ /*
+ * ublk_get_data() may fail and fallback to requeue, so keep
+ * uring_cmd active first and prepare for handling new requeued
+ * request
+ */
+ req = ublk_fill_io_cmd(io, cmd);
+ ret = ublk_config_io_buf(ub, io, cmd, addr, NULL);
+ WARN_ON_ONCE(ret);
+ if (likely(ublk_get_data(ubq, io, req))) {
+ __ublk_prep_compl_io_cmd(io, req);
+ return UBLK_IO_RES_OK;
+ }
break;
default:
goto out;
@@ -1799,25 +2487,25 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
return -EIOCBQUEUED;
out:
- io_uring_cmd_done(cmd, ret, 0, issue_flags);
pr_devel("%s: complete: cmd op %d, tag %d ret %x io_flags %x\n",
__func__, cmd_op, tag, ret, io->flags);
- return -EIOCBQUEUED;
+ return ret;
}
static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub,
- struct ublk_queue *ubq, int tag, size_t offset)
+ u16 q_id, u16 tag, struct ublk_io *io, size_t offset)
{
struct request *req;
- if (!ublk_need_req_ref(ubq))
- return NULL;
-
- req = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], tag);
+ /*
+ * can't use io->req in case of concurrent UBLK_IO_COMMIT_AND_FETCH_REQ,
+ * which would overwrite it with io->cmd
+ */
+ req = blk_mq_tag_to_rq(ub->tag_set.tags[q_id], tag);
if (!req)
return NULL;
- if (!ublk_get_req_ref(ubq, req))
+ if (!ublk_get_req_ref(io))
return NULL;
if (unlikely(!blk_mq_request_started(req) || req->tag != tag))
@@ -1831,34 +2519,17 @@ static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub,
return req;
fail_put:
- ublk_put_req_ref(ubq, req);
+ ublk_put_req_ref(io, req);
return NULL;
}
-static inline int ublk_ch_uring_cmd_local(struct io_uring_cmd *cmd,
- unsigned int issue_flags)
-{
- /*
- * Not necessary for async retry, but let's keep it simple and always
- * copy the values to avoid any potential reuse.
- */
- const struct ublksrv_io_cmd *ub_src = io_uring_sqe_cmd(cmd->sqe);
- const struct ublksrv_io_cmd ub_cmd = {
- .q_id = READ_ONCE(ub_src->q_id),
- .tag = READ_ONCE(ub_src->tag),
- .result = READ_ONCE(ub_src->result),
- .addr = READ_ONCE(ub_src->addr)
- };
-
- WARN_ON_ONCE(issue_flags & IO_URING_F_UNLOCKED);
-
- return __ublk_ch_uring_cmd(cmd, issue_flags, &ub_cmd);
-}
-
static void ublk_ch_uring_cmd_cb(struct io_uring_cmd *cmd,
unsigned int issue_flags)
{
- ublk_ch_uring_cmd_local(cmd, issue_flags);
+ int ret = ublk_ch_uring_cmd_local(cmd, issue_flags);
+
+ if (ret != -EIOCBQUEUED)
+ io_uring_cmd_done(cmd, ret, issue_flags);
}
static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
@@ -1895,7 +2566,8 @@ static inline bool ublk_check_ubuf_dir(const struct request *req,
}
static struct request *ublk_check_and_get_req(struct kiocb *iocb,
- struct iov_iter *iter, size_t *off, int dir)
+ struct iov_iter *iter, size_t *off, int dir,
+ struct ublk_io **io)
{
struct ublk_device *ub = iocb->ki_filp->private_data;
struct ublk_queue *ubq;
@@ -1920,13 +2592,14 @@ static struct request *ublk_check_and_get_req(struct kiocb *iocb,
return ERR_PTR(-EINVAL);
ubq = ublk_get_queue(ub, q_id);
- if (!ubq)
- return ERR_PTR(-EINVAL);
+ if (!ublk_dev_support_user_copy(ub))
+ return ERR_PTR(-EACCES);
- if (tag >= ubq->q_depth)
+ if (tag >= ub->dev_info.queue_depth)
return ERR_PTR(-EINVAL);
- req = __ublk_check_and_get_req(ub, ubq, tag, buf_off);
+ *io = &ubq->ios[tag];
+ req = __ublk_check_and_get_req(ub, q_id, tag, *io, buf_off);
if (!req)
return ERR_PTR(-EINVAL);
@@ -1939,42 +2612,40 @@ static struct request *ublk_check_and_get_req(struct kiocb *iocb,
*off = buf_off;
return req;
fail:
- ublk_put_req_ref(ubq, req);
+ ublk_put_req_ref(*io, req);
return ERR_PTR(-EACCES);
}
static ssize_t ublk_ch_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
- struct ublk_queue *ubq;
struct request *req;
+ struct ublk_io *io;
size_t buf_off;
size_t ret;
- req = ublk_check_and_get_req(iocb, to, &buf_off, ITER_DEST);
+ req = ublk_check_and_get_req(iocb, to, &buf_off, ITER_DEST, &io);
if (IS_ERR(req))
return PTR_ERR(req);
ret = ublk_copy_user_pages(req, buf_off, to, ITER_DEST);
- ubq = req->mq_hctx->driver_data;
- ublk_put_req_ref(ubq, req);
+ ublk_put_req_ref(io, req);
return ret;
}
static ssize_t ublk_ch_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
- struct ublk_queue *ubq;
struct request *req;
+ struct ublk_io *io;
size_t buf_off;
size_t ret;
- req = ublk_check_and_get_req(iocb, from, &buf_off, ITER_SOURCE);
+ req = ublk_check_and_get_req(iocb, from, &buf_off, ITER_SOURCE, &io);
if (IS_ERR(req))
return PTR_ERR(req);
ret = ublk_copy_user_pages(req, buf_off, from, ITER_SOURCE);
- ubq = req->mq_hctx->driver_data;
- ublk_put_req_ref(ubq, req);
+ ublk_put_req_ref(io, req);
return ret;
}
@@ -1991,11 +2662,18 @@ static const struct file_operations ublk_ch_fops = {
static void ublk_deinit_queue(struct ublk_device *ub, int q_id)
{
- int size = ublk_queue_cmd_buf_size(ub, q_id);
+ int size = ublk_queue_cmd_buf_size(ub);
struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
+ int i;
+
+ for (i = 0; i < ubq->q_depth; i++) {
+ struct ublk_io *io = &ubq->ios[i];
+ if (io->task)
+ put_task_struct(io->task);
+ WARN_ON_ONCE(refcount_read(&io->ref));
+ WARN_ON_ONCE(io->task_registered_buffers);
+ }
- if (ubq->ubq_daemon)
- put_task_struct(ubq->ubq_daemon);
if (ubq->io_cmd_buf)
free_pages((unsigned long)ubq->io_cmd_buf, get_order(size));
}
@@ -2011,7 +2689,7 @@ static int ublk_init_queue(struct ublk_device *ub, int q_id)
ubq->flags = ub->dev_info.flags;
ubq->q_id = q_id;
ubq->q_depth = ub->dev_info.queue_depth;
- size = ublk_queue_cmd_buf_size(ub, q_id);
+ size = ublk_queue_cmd_buf_size(ub);
ptr = (void *) __get_free_pages(gfp_flags, get_order(size));
if (!ptr)
@@ -2032,7 +2710,7 @@ static void ublk_deinit_queues(struct ublk_device *ub)
for (i = 0; i < nr_queues; i++)
ublk_deinit_queue(ub, i);
- kfree(ub->__queues);
+ kvfree(ub->__queues);
}
static int ublk_init_queues(struct ublk_device *ub)
@@ -2043,7 +2721,7 @@ static int ublk_init_queues(struct ublk_device *ub)
int i, ret = -ENOMEM;
ub->queue_size = ubq_size;
- ub->__queues = kcalloc(nr_queues, ubq_size, GFP_KERNEL);
+ ub->__queues = kvcalloc(nr_queues, ubq_size, GFP_KERNEL);
if (!ub->__queues)
return ret;
@@ -2099,6 +2777,7 @@ static void ublk_cdev_rel(struct device *dev)
ublk_deinit_queues(ub);
ublk_free_dev_number(ub);
mutex_destroy(&ub->mutex);
+ mutex_destroy(&ub->cancel_mutex);
kfree(ub);
}
@@ -2123,21 +2802,14 @@ static int ublk_add_chdev(struct ublk_device *ub)
if (ret)
goto fail;
- ublks_added++;
+ if (ub->dev_info.flags & UBLK_F_UNPRIVILEGED_DEV)
+ unprivileged_ublks_added++;
return 0;
fail:
put_device(dev);
return ret;
}
-static void ublk_stop_work_fn(struct work_struct *work)
-{
- struct ublk_device *ub =
- container_of(work, struct ublk_device, stop_work);
-
- ublk_stop_dev(ub);
-}
-
/* align max io buffer size with PAGE_SIZE */
static void ublk_align_max_io_size(struct ublk_device *ub)
{
@@ -2153,20 +2825,21 @@ static int ublk_add_tag_set(struct ublk_device *ub)
ub->tag_set.nr_hw_queues = ub->dev_info.nr_hw_queues;
ub->tag_set.queue_depth = ub->dev_info.queue_depth;
ub->tag_set.numa_node = NUMA_NO_NODE;
- ub->tag_set.cmd_size = sizeof(struct ublk_rq_data);
- ub->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
ub->tag_set.driver_data = ub;
return blk_mq_alloc_tag_set(&ub->tag_set);
}
static void ublk_remove(struct ublk_device *ub)
{
+ bool unprivileged;
+
ublk_stop_dev(ub);
- cancel_work_sync(&ub->stop_work);
- cancel_work_sync(&ub->quiesce_work);
cdev_device_del(&ub->cdev, &ub->cdev_dev);
+ unprivileged = ub->dev_info.flags & UBLK_F_UNPRIVILEGED_DEV;
ublk_put_device(ub);
- ublks_added--;
+
+ if (unprivileged)
+ unprivileged_ublks_added--;
}
static struct ublk_device *ublk_get_device_from_id(int idx)
@@ -2185,9 +2858,9 @@ static struct ublk_device *ublk_get_device_from_id(int idx)
return ub;
}
-static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
+static int ublk_ctrl_start_dev(struct ublk_device *ub,
+ const struct ublksrv_ctrl_cmd *header)
{
- const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
const struct ublk_param_basic *p = &ub->params.basic;
int ublksrv_pid = (int)header->data[0];
struct queue_limits lim = {
@@ -2229,7 +2902,7 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
lim.features |= BLK_FEAT_ZONED;
lim.max_active_zones = p->max_active_zones;
lim.max_open_zones = p->max_open_zones;
- lim.max_zone_append_sectors = p->max_zone_append_sectors;
+ lim.max_hw_zone_append_sectors = p->max_zone_append_sectors;
}
if (ub->params.basic.attrs & UBLK_ATTR_VOLATILE_CACHE) {
@@ -2241,9 +2914,21 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
if (ub->params.basic.attrs & UBLK_ATTR_ROTATIONAL)
lim.features |= BLK_FEAT_ROTATIONAL;
+ if (ub->params.types & UBLK_PARAM_TYPE_DMA_ALIGN)
+ lim.dma_alignment = ub->params.dma.alignment;
+
+ if (ub->params.types & UBLK_PARAM_TYPE_SEGMENT) {
+ lim.seg_boundary_mask = ub->params.seg.seg_boundary_mask;
+ lim.max_segment_size = ub->params.seg.max_segment_size;
+ lim.max_segments = ub->params.seg.max_segments;
+ }
+
if (wait_for_completion_interruptible(&ub->completion) != 0)
return -EINTR;
+ if (ub->ublksrv_tgid != ublksrv_pid)
+ return -EINVAL;
+
mutex_lock(&ub->mutex);
if (ub->dev_info.state == UBLK_S_DEV_LIVE ||
test_bit(UB_STATE_USED, &ub->state)) {
@@ -2265,8 +2950,8 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
ublk_apply_params(ub);
- /* don't probe partitions if any one ubq daemon is un-trusted */
- if (ub->nr_privileged_daemon != ub->nr_queues_ready)
+ /* don't probe partitions if any daemon task is un-trusted */
+ if (ub->unprivileged_daemons)
set_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
ublk_get_device(ub);
@@ -2286,7 +2971,7 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
out_put_cdev:
if (ret) {
- ub->dev_info.state = UBLK_S_DEV_DEAD;
+ ublk_detach_disk(ub);
ublk_put_device(ub);
}
if (ret)
@@ -2297,9 +2982,8 @@ out_unlock:
}
static int ublk_ctrl_get_queue_affinity(struct ublk_device *ub,
- struct io_uring_cmd *cmd)
+ const struct ublksrv_ctrl_cmd *header)
{
- const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
void __user *argp = (void __user *)(unsigned long)header->addr;
cpumask_var_t cpumask;
unsigned long queue;
@@ -2348,9 +3032,8 @@ static inline void ublk_dump_dev_info(struct ublksrv_ctrl_dev_info *info)
info->nr_hw_queues, info->queue_depth);
}
-static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
+static int ublk_ctrl_add_dev(const struct ublksrv_ctrl_cmd *header)
{
- const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
void __user *argp = (void __user *)(unsigned long)header->addr;
struct ublksrv_ctrl_dev_info info;
struct ublk_device *ub;
@@ -2367,11 +3050,33 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
if (copy_from_user(&info, argp, sizeof(info)))
return -EFAULT;
+ if (info.queue_depth > UBLK_MAX_QUEUE_DEPTH || !info.queue_depth ||
+ info.nr_hw_queues > UBLK_MAX_NR_QUEUES || !info.nr_hw_queues)
+ return -EINVAL;
+
if (capable(CAP_SYS_ADMIN))
info.flags &= ~UBLK_F_UNPRIVILEGED_DEV;
else if (!(info.flags & UBLK_F_UNPRIVILEGED_DEV))
return -EPERM;
+ /* forbid nonsense combinations of recovery flags */
+ switch (info.flags & UBLK_F_ALL_RECOVERY_FLAGS) {
+ case 0:
+ case UBLK_F_USER_RECOVERY:
+ case (UBLK_F_USER_RECOVERY | UBLK_F_USER_RECOVERY_REISSUE):
+ case (UBLK_F_USER_RECOVERY | UBLK_F_USER_RECOVERY_FAIL_IO):
+ break;
+ default:
+ pr_warn("%s: invalid recovery flags %llx\n", __func__,
+ info.flags & UBLK_F_ALL_RECOVERY_FLAGS);
+ return -EINVAL;
+ }
+
+ if ((info.flags & UBLK_F_QUIESCE) && !(info.flags & UBLK_F_USER_RECOVERY)) {
+ pr_warn("UBLK_F_QUIESCE requires UBLK_F_USER_RECOVERY\n");
+ return -EINVAL;
+ }
+
/*
* unprivileged device can't be trusted, but RECOVERY and
* RECOVERY_REISSUE still may hang error handling, so can't
@@ -2388,8 +3093,11 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
* For USER_COPY, we depends on userspace to fill request
* buffer by pwrite() to ublk char device, which can't be
* used for unprivileged device
+ *
+ * Same with zero copy or auto buffer register.
*/
- if (info.flags & UBLK_F_USER_COPY)
+ if (info.flags & (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY |
+ UBLK_F_AUTO_BUF_REG))
return -EINVAL;
}
@@ -2415,7 +3123,8 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
return ret;
ret = -EACCES;
- if (ublks_added >= ublks_max)
+ if ((info.flags & UBLK_F_UNPRIVILEGED_DEV) &&
+ unprivileged_ublks_added >= unprivileged_ublks_max)
goto out_unlock;
ret = -ENOMEM;
@@ -2424,8 +3133,7 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
goto out_unlock;
mutex_init(&ub->mutex);
spin_lock_init(&ub->lock);
- INIT_WORK(&ub->quiesce_work, ublk_quiesce_work_fn);
- INIT_WORK(&ub->stop_work, ublk_stop_work_fn);
+ mutex_init(&ub->cancel_mutex);
ret = ublk_alloc_dev_number(ub, header->dev_id);
if (ret < 0)
@@ -2445,22 +3153,27 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
ub->dev_info.flags &= UBLK_F_ALL;
ub->dev_info.flags |= UBLK_F_CMD_IOCTL_ENCODE |
- UBLK_F_URING_CMD_COMP_IN_TASK;
+ UBLK_F_URING_CMD_COMP_IN_TASK |
+ UBLK_F_PER_IO_DAEMON |
+ UBLK_F_BUF_REG_OFF_DAEMON;
- /* GET_DATA isn't needed any more with USER_COPY */
- if (ublk_dev_is_user_copy(ub))
+ /* GET_DATA isn't needed any more with USER_COPY or ZERO COPY */
+ if (ub->dev_info.flags & (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY |
+ UBLK_F_AUTO_BUF_REG))
ub->dev_info.flags &= ~UBLK_F_NEED_GET_DATA;
- /* Zoned storage support requires user copy feature */
+ /*
+ * Zoned storage support requires reuse `ublksrv_io_cmd->addr` for
+ * returning write_append_lba, which is only allowed in case of
+ * user copy or zero copy
+ */
if (ublk_dev_is_zoned(ub) &&
- (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) || !ublk_dev_is_user_copy(ub))) {
+ (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) || !(ub->dev_info.flags &
+ (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY)))) {
ret = -EINVAL;
goto out_free_dev_number;
}
- /* We are not ready to support zero copy */
- ub->dev_info.flags &= ~UBLK_F_SUPPORT_ZERO_COPY;
-
ub->dev_info.nr_hw_queues = min_t(unsigned int,
ub->dev_info.nr_hw_queues, nr_cpu_ids);
ublk_align_max_io_size(ub);
@@ -2492,6 +3205,7 @@ out_free_dev_number:
ublk_free_dev_number(ub);
out_free_ub:
mutex_destroy(&ub->mutex);
+ mutex_destroy(&ub->cancel_mutex);
kfree(ub);
out_unlock:
mutex_unlock(&ublk_ctl_mutex);
@@ -2560,16 +3274,12 @@ static inline void ublk_ctrl_cmd_dump(struct io_uring_cmd *cmd)
static int ublk_ctrl_stop_dev(struct ublk_device *ub)
{
ublk_stop_dev(ub);
- cancel_work_sync(&ub->stop_work);
- cancel_work_sync(&ub->quiesce_work);
-
return 0;
}
static int ublk_ctrl_get_dev_info(struct ublk_device *ub,
- struct io_uring_cmd *cmd)
+ const struct ublksrv_ctrl_cmd *header)
{
- const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
void __user *argp = (void __user *)(unsigned long)header->addr;
if (header->len < sizeof(struct ublksrv_ctrl_dev_info) || !header->addr)
@@ -2598,9 +3308,8 @@ static void ublk_ctrl_fill_params_devt(struct ublk_device *ub)
}
static int ublk_ctrl_get_params(struct ublk_device *ub,
- struct io_uring_cmd *cmd)
+ const struct ublksrv_ctrl_cmd *header)
{
- const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
void __user *argp = (void __user *)(unsigned long)header->addr;
struct ublk_params_header ph;
int ret;
@@ -2629,9 +3338,8 @@ static int ublk_ctrl_get_params(struct ublk_device *ub,
}
static int ublk_ctrl_set_params(struct ublk_device *ub,
- struct io_uring_cmd *cmd)
+ const struct ublksrv_ctrl_cmd *header)
{
- const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
void __user *argp = (void __user *)(unsigned long)header->addr;
struct ublk_params_header ph;
int ret = -EFAULT;
@@ -2648,9 +3356,12 @@ static int ublk_ctrl_set_params(struct ublk_device *ub,
if (ph.len > sizeof(struct ublk_params))
ph.len = sizeof(struct ublk_params);
- /* parameters can only be changed when device isn't live */
mutex_lock(&ub->mutex);
- if (ub->dev_info.state == UBLK_S_DEV_LIVE) {
+ if (test_bit(UB_STATE_USED, &ub->state)) {
+ /*
+ * Parameters can only be changed when device hasn't
+ * been started yet
+ */
ret = -EACCES;
} else if (copy_from_user(&ub->params, argp, ph.len)) {
ret = -EFAULT;
@@ -2666,42 +3377,13 @@ static int ublk_ctrl_set_params(struct ublk_device *ub,
return ret;
}
-static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
-{
- int i;
-
- WARN_ON_ONCE(!(ubq->ubq_daemon && ubq_daemon_is_dying(ubq)));
-
- /* All old ioucmds have to be completed */
- ubq->nr_io_ready = 0;
- /* old daemon is PF_EXITING, put it now */
- put_task_struct(ubq->ubq_daemon);
- /* We have to reset it to NULL, otherwise ub won't accept new FETCH_REQ */
- ubq->ubq_daemon = NULL;
- ubq->timeout = false;
- ubq->canceling = false;
-
- for (i = 0; i < ubq->q_depth; i++) {
- struct ublk_io *io = &ubq->ios[i];
-
- /* forget everything now and be ready for new FETCH_REQ */
- io->flags = 0;
- io->cmd = NULL;
- io->addr = 0;
- }
-}
-
static int ublk_ctrl_start_recovery(struct ublk_device *ub,
- struct io_uring_cmd *cmd)
+ const struct ublksrv_ctrl_cmd *header)
{
- const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
int ret = -EINVAL;
- int i;
mutex_lock(&ub->mutex);
- if (!ublk_can_use_recovery(ub))
- goto out_unlock;
- if (!ub->nr_queues_ready)
+ if (ublk_nosrv_should_stop_dev(ub))
goto out_unlock;
/*
* START_RECOVERY is only allowd after:
@@ -2710,24 +3392,22 @@ static int ublk_ctrl_start_recovery(struct ublk_device *ub,
* and related io_uring ctx is freed so file struct of /dev/ublkcX is
* released.
*
+ * and one of the following holds
+ *
* (2) UBLK_S_DEV_QUIESCED is set, which means the quiesce_work:
* (a)has quiesced request queue
* (b)has requeued every inflight rqs whose io_flags is ACTIVE
* (c)has requeued/aborted every inflight rqs whose io_flags is NOT ACTIVE
* (d)has completed/camceled all ioucmds owned by ther dying process
+ *
+ * (3) UBLK_S_DEV_FAIL_IO is set, which means the queue is not
+ * quiesced, but all I/O is being immediately errored
*/
- if (test_bit(UB_STATE_OPEN, &ub->state) ||
- ub->dev_info.state != UBLK_S_DEV_QUIESCED) {
+ if (test_bit(UB_STATE_OPEN, &ub->state) || !ublk_dev_in_recoverable_state(ub)) {
ret = -EBUSY;
goto out_unlock;
}
pr_devel("%s: start recovery for dev id %d.\n", __func__, header->dev_id);
- for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
- ublk_queue_reinit(ub, ublk_get_queue(ub, i));
- /* set to NULL, otherwise new ubq_daemon cannot mmap the io_cmd_buf */
- ub->mm = NULL;
- ub->nr_queues_ready = 0;
- ub->nr_privileged_daemon = 0;
init_completion(&ub->completion);
ret = 0;
out_unlock:
@@ -2736,48 +3416,46 @@ static int ublk_ctrl_start_recovery(struct ublk_device *ub,
}
static int ublk_ctrl_end_recovery(struct ublk_device *ub,
- struct io_uring_cmd *cmd)
+ const struct ublksrv_ctrl_cmd *header)
{
- const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
int ublksrv_pid = (int)header->data[0];
int ret = -EINVAL;
- pr_devel("%s: Waiting for new ubq_daemons(nr: %d) are ready, dev id %d...\n",
- __func__, ub->dev_info.nr_hw_queues, header->dev_id);
- /* wait until new ubq_daemon sending all FETCH_REQ */
+ pr_devel("%s: Waiting for all FETCH_REQs, dev id %d...\n", __func__,
+ header->dev_id);
+
if (wait_for_completion_interruptible(&ub->completion))
return -EINTR;
- pr_devel("%s: All new ubq_daemons(nr: %d) are ready, dev id %d\n",
- __func__, ub->dev_info.nr_hw_queues, header->dev_id);
+ pr_devel("%s: All FETCH_REQs received, dev id %d\n", __func__,
+ header->dev_id);
+
+ if (ub->ublksrv_tgid != ublksrv_pid)
+ return -EINVAL;
mutex_lock(&ub->mutex);
- if (!ublk_can_use_recovery(ub))
+ if (ublk_nosrv_should_stop_dev(ub))
goto out_unlock;
- if (ub->dev_info.state != UBLK_S_DEV_QUIESCED) {
+ if (!ublk_dev_in_recoverable_state(ub)) {
ret = -EBUSY;
goto out_unlock;
}
ub->dev_info.ublksrv_pid = ublksrv_pid;
+ ub->dev_info.state = UBLK_S_DEV_LIVE;
pr_devel("%s: new ublksrv_pid %d, dev id %d\n",
__func__, ublksrv_pid, header->dev_id);
- blk_mq_unquiesce_queue(ub->ub_disk->queue);
- pr_devel("%s: queue unquiesced, dev id %d.\n",
- __func__, header->dev_id);
blk_mq_kick_requeue_list(ub->ub_disk->queue);
- ub->dev_info.state = UBLK_S_DEV_LIVE;
ret = 0;
out_unlock:
mutex_unlock(&ub->mutex);
return ret;
}
-static int ublk_ctrl_get_features(struct io_uring_cmd *cmd)
+static int ublk_ctrl_get_features(const struct ublksrv_ctrl_cmd *header)
{
- const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
void __user *argp = (void __user *)(unsigned long)header->addr;
- u64 features = UBLK_F_ALL & ~UBLK_F_SUPPORT_ZERO_COPY;
+ u64 features = UBLK_F_ALL;
if (header->len != UBLK_FEATURES_LEN || !header->addr)
return -EINVAL;
@@ -2788,6 +3466,125 @@ static int ublk_ctrl_get_features(struct io_uring_cmd *cmd)
return 0;
}
+static void ublk_ctrl_set_size(struct ublk_device *ub, const struct ublksrv_ctrl_cmd *header)
+{
+ struct ublk_param_basic *p = &ub->params.basic;
+ u64 new_size = header->data[0];
+
+ mutex_lock(&ub->mutex);
+ p->dev_sectors = new_size;
+ set_capacity_and_notify(ub->ub_disk, p->dev_sectors);
+ mutex_unlock(&ub->mutex);
+}
+
+struct count_busy {
+ const struct ublk_queue *ubq;
+ unsigned int nr_busy;
+};
+
+static bool ublk_count_busy_req(struct request *rq, void *data)
+{
+ struct count_busy *idle = data;
+
+ if (!blk_mq_request_started(rq) && rq->mq_hctx->driver_data == idle->ubq)
+ idle->nr_busy += 1;
+ return true;
+}
+
+/* uring_cmd is guaranteed to be active if the associated request is idle */
+static bool ubq_has_idle_io(const struct ublk_queue *ubq)
+{
+ struct count_busy data = {
+ .ubq = ubq,
+ };
+
+ blk_mq_tagset_busy_iter(&ubq->dev->tag_set, ublk_count_busy_req, &data);
+ return data.nr_busy < ubq->q_depth;
+}
+
+/* Wait until each hw queue has at least one idle IO */
+static int ublk_wait_for_idle_io(struct ublk_device *ub,
+ unsigned int timeout_ms)
+{
+ unsigned int elapsed = 0;
+ int ret;
+
+ while (elapsed < timeout_ms && !signal_pending(current)) {
+ unsigned int queues_cancelable = 0;
+ int i;
+
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
+ struct ublk_queue *ubq = ublk_get_queue(ub, i);
+
+ queues_cancelable += !!ubq_has_idle_io(ubq);
+ }
+
+ /*
+ * Each queue needs at least one active command for
+ * notifying ublk server
+ */
+ if (queues_cancelable == ub->dev_info.nr_hw_queues)
+ break;
+
+ msleep(UBLK_REQUEUE_DELAY_MS);
+ elapsed += UBLK_REQUEUE_DELAY_MS;
+ }
+
+ if (signal_pending(current))
+ ret = -EINTR;
+ else if (elapsed >= timeout_ms)
+ ret = -EBUSY;
+ else
+ ret = 0;
+
+ return ret;
+}
+
+static int ublk_ctrl_quiesce_dev(struct ublk_device *ub,
+ const struct ublksrv_ctrl_cmd *header)
+{
+ /* zero means wait forever */
+ u64 timeout_ms = header->data[0];
+ struct gendisk *disk;
+ int ret = -ENODEV;
+
+ if (!(ub->dev_info.flags & UBLK_F_QUIESCE))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&ub->mutex);
+ disk = ublk_get_disk(ub);
+ if (!disk)
+ goto unlock;
+ if (ub->dev_info.state == UBLK_S_DEV_DEAD)
+ goto put_disk;
+
+ ret = 0;
+ /* already in expected state */
+ if (ub->dev_info.state != UBLK_S_DEV_LIVE)
+ goto put_disk;
+
+ /* Mark the device as canceling */
+ mutex_lock(&ub->cancel_mutex);
+ blk_mq_quiesce_queue(disk->queue);
+ ublk_set_canceling(ub, true);
+ blk_mq_unquiesce_queue(disk->queue);
+ mutex_unlock(&ub->cancel_mutex);
+
+ if (!timeout_ms)
+ timeout_ms = UINT_MAX;
+ ret = ublk_wait_for_idle_io(ub, timeout_ms);
+
+put_disk:
+ ublk_put_disk(disk);
+unlock:
+ mutex_unlock(&ub->mutex);
+
+ /* Cancel pending uring_cmd */
+ if (!ret)
+ ublk_cancel_dev(ub);
+ return ret;
+}
+
/*
* All control commands are sent via /dev/ublk-control, so we have to check
* the destination device's permission
@@ -2873,6 +3670,8 @@ static int ublk_ctrl_uring_cmd_permission(struct ublk_device *ub,
case UBLK_CMD_SET_PARAMS:
case UBLK_CMD_START_USER_RECOVERY:
case UBLK_CMD_END_USER_RECOVERY:
+ case UBLK_CMD_UPDATE_SIZE:
+ case UBLK_CMD_QUIESCE_DEV:
mask = MAY_READ | MAY_WRITE;
break;
default:
@@ -2914,7 +3713,7 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
goto out;
if (cmd_op == UBLK_U_CMD_GET_FEATURES) {
- ret = ublk_ctrl_get_features(cmd);
+ ret = ublk_ctrl_get_features(header);
goto out;
}
@@ -2931,17 +3730,17 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
switch (_IOC_NR(cmd_op)) {
case UBLK_CMD_START_DEV:
- ret = ublk_ctrl_start_dev(ub, cmd);
+ ret = ublk_ctrl_start_dev(ub, header);
break;
case UBLK_CMD_STOP_DEV:
ret = ublk_ctrl_stop_dev(ub);
break;
case UBLK_CMD_GET_DEV_INFO:
case UBLK_CMD_GET_DEV_INFO2:
- ret = ublk_ctrl_get_dev_info(ub, cmd);
+ ret = ublk_ctrl_get_dev_info(ub, header);
break;
case UBLK_CMD_ADD_DEV:
- ret = ublk_ctrl_add_dev(cmd);
+ ret = ublk_ctrl_add_dev(header);
break;
case UBLK_CMD_DEL_DEV:
ret = ublk_ctrl_del_dev(&ub, true);
@@ -2950,22 +3749,29 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
ret = ublk_ctrl_del_dev(&ub, false);
break;
case UBLK_CMD_GET_QUEUE_AFFINITY:
- ret = ublk_ctrl_get_queue_affinity(ub, cmd);
+ ret = ublk_ctrl_get_queue_affinity(ub, header);
break;
case UBLK_CMD_GET_PARAMS:
- ret = ublk_ctrl_get_params(ub, cmd);
+ ret = ublk_ctrl_get_params(ub, header);
break;
case UBLK_CMD_SET_PARAMS:
- ret = ublk_ctrl_set_params(ub, cmd);
+ ret = ublk_ctrl_set_params(ub, header);
break;
case UBLK_CMD_START_USER_RECOVERY:
- ret = ublk_ctrl_start_recovery(ub, cmd);
+ ret = ublk_ctrl_start_recovery(ub, header);
break;
case UBLK_CMD_END_USER_RECOVERY:
- ret = ublk_ctrl_end_recovery(ub, cmd);
+ ret = ublk_ctrl_end_recovery(ub, header);
+ break;
+ case UBLK_CMD_UPDATE_SIZE:
+ ublk_ctrl_set_size(ub, header);
+ ret = 0;
+ break;
+ case UBLK_CMD_QUIESCE_DEV:
+ ret = ublk_ctrl_quiesce_dev(ub, header);
break;
default:
- ret = -ENOTSUPP;
+ ret = -EOPNOTSUPP;
break;
}
@@ -2973,10 +3779,9 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
if (ub)
ublk_put_device(ub);
out:
- io_uring_cmd_done(cmd, ret, 0, issue_flags);
pr_devel("%s: cmd done ret %d cmd_op %x, dev id %d qid %d\n",
__func__, ret, cmd->cmd_op, header->dev_id, header->queue_id);
- return -EIOCBQUEUED;
+ return ret;
}
static const struct file_operations ublk_ctl_fops = {
@@ -2998,6 +3803,7 @@ static int __init ublk_init(void)
BUILD_BUG_ON((u64)UBLKSRV_IO_BUF_OFFSET +
UBLKSRV_IO_BUF_TOTAL_SIZE < UBLKSRV_IO_BUF_OFFSET);
+ BUILD_BUG_ON(sizeof(struct ublk_auto_buf_reg) != 8);
init_waitqueue_head(&ublk_idr_wq);
@@ -3040,23 +3846,26 @@ static void __exit ublk_exit(void)
module_init(ublk_init);
module_exit(ublk_exit);
-static int ublk_set_max_ublks(const char *buf, const struct kernel_param *kp)
+static int ublk_set_max_unprivileged_ublks(const char *buf,
+ const struct kernel_param *kp)
{
return param_set_uint_minmax(buf, kp, 0, UBLK_MAX_UBLKS);
}
-static int ublk_get_max_ublks(char *buf, const struct kernel_param *kp)
+static int ublk_get_max_unprivileged_ublks(char *buf,
+ const struct kernel_param *kp)
{
- return sysfs_emit(buf, "%u\n", ublks_max);
+ return sysfs_emit(buf, "%u\n", unprivileged_ublks_max);
}
-static const struct kernel_param_ops ublk_max_ublks_ops = {
- .set = ublk_set_max_ublks,
- .get = ublk_get_max_ublks,
+static const struct kernel_param_ops ublk_max_unprivileged_ublks_ops = {
+ .set = ublk_set_max_unprivileged_ublks,
+ .get = ublk_get_max_unprivileged_ublks,
};
-module_param_cb(ublks_max, &ublk_max_ublks_ops, &ublks_max, 0644);
-MODULE_PARM_DESC(ublks_max, "max number of ublk devices allowed to add(default: 64)");
+module_param_cb(ublks_max, &ublk_max_unprivileged_ublks_ops,
+ &unprivileged_ublks_max, 0644);
+MODULE_PARM_DESC(ublks_max, "max number of unprivileged ublk devices allowed to add(default: 64)");
MODULE_AUTHOR("Ming Lei <ming.lei@redhat.com>");
MODULE_DESCRIPTION("Userspace block device");
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 194417abc105..f061420dfb10 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -13,7 +13,6 @@
#include <linux/string_helpers.h>
#include <linux/idr.h>
#include <linux/blk-mq.h>
-#include <linux/blk-mq-virtio.h>
#include <linux/numa.h>
#include <linux/vmalloc.h>
#include <uapi/linux/virtio_ring.h>
@@ -227,7 +226,7 @@ static int virtblk_map_data(struct blk_mq_hw_ctx *hctx, struct request *req,
if (unlikely(err))
return -ENOMEM;
- return blk_rq_map_sg(hctx->queue, req, vbr->sg_table.sgl);
+ return blk_rq_map_sg(req, vbr->sg_table.sgl);
}
static void virtblk_cleanup_cmd(struct request *req)
@@ -471,18 +470,18 @@ static bool virtblk_prep_rq_batch(struct request *req)
return virtblk_prep_rq(req->mq_hctx, vblk, req, vbr) == BLK_STS_OK;
}
-static bool virtblk_add_req_batch(struct virtio_blk_vq *vq,
- struct request **rqlist)
+static void virtblk_add_req_batch(struct virtio_blk_vq *vq,
+ struct rq_list *rqlist)
{
+ struct request *req;
unsigned long flags;
- int err;
bool kick;
spin_lock_irqsave(&vq->lock, flags);
- while (!rq_list_empty(*rqlist)) {
- struct request *req = rq_list_pop(rqlist);
+ while ((req = rq_list_pop(rqlist))) {
struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
+ int err;
err = virtblk_add_req(vq->vq, vbr);
if (err) {
@@ -495,37 +494,32 @@ static bool virtblk_add_req_batch(struct virtio_blk_vq *vq,
kick = virtqueue_kick_prepare(vq->vq);
spin_unlock_irqrestore(&vq->lock, flags);
- return kick;
+ if (kick)
+ virtqueue_notify(vq->vq);
}
-static void virtio_queue_rqs(struct request **rqlist)
+static void virtio_queue_rqs(struct rq_list *rqlist)
{
- struct request *req, *next, *prev = NULL;
- struct request *requeue_list = NULL;
-
- rq_list_for_each_safe(rqlist, req, next) {
- struct virtio_blk_vq *vq = get_virtio_blk_vq(req->mq_hctx);
- bool kick;
-
- if (!virtblk_prep_rq_batch(req)) {
- rq_list_move(rqlist, &requeue_list, req, prev);
- req = prev;
- if (!req)
- continue;
- }
+ struct rq_list submit_list = { };
+ struct rq_list requeue_list = { };
+ struct virtio_blk_vq *vq = NULL;
+ struct request *req;
+
+ while ((req = rq_list_pop(rqlist))) {
+ struct virtio_blk_vq *this_vq = get_virtio_blk_vq(req->mq_hctx);
- if (!next || req->mq_hctx != next->mq_hctx) {
- req->rq_next = NULL;
- kick = virtblk_add_req_batch(vq, rqlist);
- if (kick)
- virtqueue_notify(vq->vq);
+ if (vq && vq != this_vq)
+ virtblk_add_req_batch(vq, &submit_list);
+ vq = this_vq;
- *rqlist = next;
- prev = NULL;
- } else
- prev = req;
+ if (virtblk_prep_rq_batch(req))
+ rq_list_add_tail(&submit_list, req);
+ else
+ rq_list_add_tail(&requeue_list, req);
}
+ if (vq)
+ virtblk_add_req_batch(vq, &submit_list);
*rqlist = requeue_list;
}
@@ -577,7 +571,7 @@ static int virtblk_submit_zone_report(struct virtio_blk *vblk,
vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_ZONE_REPORT);
vbr->out_hdr.sector = cpu_to_virtio64(vblk->vdev, sector);
- err = blk_rq_map_kern(q, req, report_buf, report_len, GFP_KERNEL);
+ err = blk_rq_map_kern(req, report_buf, report_len, GFP_KERNEL);
if (err)
goto out;
@@ -784,7 +778,7 @@ static int virtblk_read_zoned_limits(struct virtio_blk *vblk,
wg, v);
return -ENODEV;
}
- lim->max_zone_append_sectors = v;
+ lim->max_hw_zone_append_sectors = v;
dev_dbg(&vdev->dev, "max append sectors = %u\n", v);
return 0;
@@ -823,7 +817,7 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str)
vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_GET_ID);
vbr->out_hdr.sector = 0;
- err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
+ err = blk_rq_map_kern(req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
if (err)
goto out;
@@ -835,9 +829,9 @@ out:
}
/* We provide getgeo only to please some old bootloader/partitioning tools */
-static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
+static int virtblk_getgeo(struct gendisk *disk, struct hd_geometry *geo)
{
- struct virtio_blk *vblk = bd->bd_disk->private_data;
+ struct virtio_blk *vblk = disk->private_data;
int ret = 0;
mutex_lock(&vblk->vdev_mutex);
@@ -859,7 +853,7 @@ static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
/* some standard values, similar to sd */
geo->heads = 1 << 6;
geo->sectors = 1 << 5;
- geo->cylinders = get_capacity(bd->bd_disk) >> 11;
+ geo->cylinders = get_capacity(disk) >> 11;
}
out:
mutex_unlock(&vblk->vdev_mutex);
@@ -982,9 +976,8 @@ static int init_vq(struct virtio_blk *vblk)
return -EINVAL;
}
- num_vqs = min_t(unsigned int,
- min_not_zero(num_request_queues, nr_cpu_ids),
- num_vqs);
+ num_vqs = blk_mq_num_possible_queues(
+ min_not_zero(num_request_queues, num_vqs));
num_poll_vqs = min_t(unsigned int, poll_queues, num_vqs - 1);
@@ -1111,9 +1104,7 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
lim.features |= BLK_FEAT_WRITE_CACHE;
else
lim.features &= ~BLK_FEAT_WRITE_CACHE;
- blk_mq_freeze_queue(disk->queue);
- i = queue_limits_commit_update(disk->queue, &lim);
- blk_mq_unfreeze_queue(disk->queue);
+ i = queue_limits_commit_update_frozen(disk->queue, &lim);
if (i)
return i;
return count;
@@ -1186,7 +1177,8 @@ static void virtblk_map_queues(struct blk_mq_tag_set *set)
if (i == HCTX_TYPE_POLL)
blk_mq_map_queues(&set->map[i]);
else
- blk_mq_virtio_map_queues(&set->map[i], vblk->vdev, 0);
+ blk_mq_map_hw_queues(&set->map[i],
+ &vblk->vdev->dev, 0);
}
}
@@ -1214,11 +1206,12 @@ static int virtblk_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
while ((vbr = virtqueue_get_buf(vq->vq, &len)) != NULL) {
struct request *req = blk_mq_rq_from_pdu(vbr);
+ u8 status = virtblk_vbr_status(vbr);
found++;
if (!blk_mq_complete_request_remote(req) &&
- !blk_mq_add_to_batch(req, iob, virtblk_vbr_status(vbr),
- virtblk_complete_batch))
+ !blk_mq_add_to_batch(req, iob, status != VIRTIO_BLK_S_OK,
+ virtblk_complete_batch))
virtblk_request_done(req);
}
@@ -1486,7 +1479,6 @@ static int virtblk_probe(struct virtio_device *vdev)
vblk->tag_set.ops = &virtio_mq_ops;
vblk->tag_set.queue_depth = queue_depth;
vblk->tag_set.numa_node = NUMA_NO_NODE;
- vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
vblk->tag_set.cmd_size =
sizeof(struct virtblk_req) +
sizeof(struct scatterlist) * VIRTIO_BLK_INLINE_SG_CNT;
@@ -1587,13 +1579,16 @@ static void virtblk_remove(struct virtio_device *vdev)
put_disk(vblk->disk);
}
-#ifdef CONFIG_PM_SLEEP
-static int virtblk_freeze(struct virtio_device *vdev)
+static int virtblk_freeze_priv(struct virtio_device *vdev)
{
struct virtio_blk *vblk = vdev->priv;
+ struct request_queue *q = vblk->disk->queue;
+ unsigned int memflags;
/* Ensure no requests in virtqueues before deleting vqs. */
- blk_mq_freeze_queue(vblk->disk->queue);
+ memflags = blk_mq_freeze_queue(q);
+ blk_mq_quiesce_queue_nowait(q);
+ blk_mq_unfreeze_queue(q, memflags);
/* Ensure we don't receive any more interrupts */
virtio_reset_device(vdev);
@@ -1607,7 +1602,7 @@ static int virtblk_freeze(struct virtio_device *vdev)
return 0;
}
-static int virtblk_restore(struct virtio_device *vdev)
+static int virtblk_restore_priv(struct virtio_device *vdev)
{
struct virtio_blk *vblk = vdev->priv;
int ret;
@@ -1617,12 +1612,33 @@ static int virtblk_restore(struct virtio_device *vdev)
return ret;
virtio_device_ready(vdev);
+ blk_mq_unquiesce_queue(vblk->disk->queue);
- blk_mq_unfreeze_queue(vblk->disk->queue);
return 0;
}
+
+#ifdef CONFIG_PM_SLEEP
+static int virtblk_freeze(struct virtio_device *vdev)
+{
+ return virtblk_freeze_priv(vdev);
+}
+
+static int virtblk_restore(struct virtio_device *vdev)
+{
+ return virtblk_restore_priv(vdev);
+}
#endif
+static int virtblk_reset_prepare(struct virtio_device *vdev)
+{
+ return virtblk_freeze_priv(vdev);
+}
+
+static int virtblk_reset_done(struct virtio_device *vdev)
+{
+ return virtblk_restore_priv(vdev);
+}
+
static const struct virtio_device_id id_table[] = {
{ VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
{ 0 },
@@ -1658,13 +1674,15 @@ static struct virtio_driver virtio_blk = {
.freeze = virtblk_freeze,
.restore = virtblk_restore,
#endif
+ .reset_prepare = virtblk_reset_prepare,
+ .reset_done = virtblk_reset_done,
};
static int __init virtio_blk_init(void)
{
int error;
- virtblk_wq = alloc_workqueue("virtio-blk", 0, 0);
+ virtblk_wq = alloc_workqueue("virtio-blk", WQ_PERCPU, 0);
if (!virtblk_wq)
return -ENOMEM;
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 838064593f62..a7c2b04ab943 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -544,7 +544,7 @@ static void print_stats(struct xen_blkif_ring *ring)
ring->st_rd_req, ring->st_wr_req,
ring->st_f_req, ring->st_ds_req,
ring->persistent_gnt_c, max_pgrants);
- ring->st_print = jiffies + msecs_to_jiffies(10 * 1000);
+ ring->st_print = jiffies + secs_to_jiffies(10);
ring->st_rd_req = 0;
ring->st_wr_req = 0;
ring->st_oo_req = 0;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 59ce113b882a..04fc6b552c04 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -493,11 +493,11 @@ static void blkif_restart_queue_callback(void *arg)
schedule_work(&rinfo->work);
}
-static int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg)
+static int blkif_getgeo(struct gendisk *disk, struct hd_geometry *hg)
{
/* We don't have real geometry info, but let's at least return
values consistent with the size of the device */
- sector_t nsect = get_capacity(bd->bd_disk);
+ sector_t nsect = get_capacity(disk);
sector_t cylinders = nsect;
hg->heads = 0xff;
@@ -751,7 +751,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
id = blkif_ring_get_request(rinfo, req, &final_ring_req);
ring_req = &rinfo->shadow[id].req;
- num_sg = blk_rq_map_sg(req->q, req, rinfo->shadow[id].sg);
+ num_sg = blk_rq_map_sg(req, rinfo->shadow[id].sg);
num_grant = 0;
/* Calculate the number of grant used */
for_each_sg(rinfo->shadow[id].sg, sg, num_sg, i)
@@ -1131,7 +1131,6 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
} else
info->tag_set.queue_depth = BLK_RING_SIZE(info);
info->tag_set.numa_node = NUMA_NO_NODE;
- info->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
info->tag_set.cmd_size = sizeof(struct blkif_req);
info->tag_set.driver_data = info;
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
index 4b7219be1bb8..8c1c7f4211eb 100644
--- a/drivers/block/z2ram.c
+++ b/drivers/block/z2ram.c
@@ -354,7 +354,6 @@ static int __init z2_init(void)
tag_set.nr_maps = 1;
tag_set.queue_depth = 16;
tag_set.numa_node = NUMA_NO_NODE;
- tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
ret = blk_mq_alloc_tag_set(&tag_set);
if (ret)
goto out_unregister_blkdev;
diff --git a/drivers/block/zloop.c b/drivers/block/zloop.c
new file mode 100644
index 000000000000..a423228e201b
--- /dev/null
+++ b/drivers/block/zloop.c
@@ -0,0 +1,1386 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025, Christoph Hellwig.
+ * Copyright (c) 2025, Western Digital Corporation or its affiliates.
+ *
+ * Zoned Loop Device driver - exports a zoned block device using one file per
+ * zone as backing storage.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/blk-mq.h>
+#include <linux/blkzoned.h>
+#include <linux/pagemap.h>
+#include <linux/miscdevice.h>
+#include <linux/falloc.h>
+#include <linux/mutex.h>
+#include <linux/parser.h>
+#include <linux/seq_file.h>
+
+/*
+ * Options for adding (and removing) a device.
+ */
+enum {
+ ZLOOP_OPT_ERR = 0,
+ ZLOOP_OPT_ID = (1 << 0),
+ ZLOOP_OPT_CAPACITY = (1 << 1),
+ ZLOOP_OPT_ZONE_SIZE = (1 << 2),
+ ZLOOP_OPT_ZONE_CAPACITY = (1 << 3),
+ ZLOOP_OPT_NR_CONV_ZONES = (1 << 4),
+ ZLOOP_OPT_BASE_DIR = (1 << 5),
+ ZLOOP_OPT_NR_QUEUES = (1 << 6),
+ ZLOOP_OPT_QUEUE_DEPTH = (1 << 7),
+ ZLOOP_OPT_BUFFERED_IO = (1 << 8),
+};
+
+static const match_table_t zloop_opt_tokens = {
+ { ZLOOP_OPT_ID, "id=%d" },
+ { ZLOOP_OPT_CAPACITY, "capacity_mb=%u" },
+ { ZLOOP_OPT_ZONE_SIZE, "zone_size_mb=%u" },
+ { ZLOOP_OPT_ZONE_CAPACITY, "zone_capacity_mb=%u" },
+ { ZLOOP_OPT_NR_CONV_ZONES, "conv_zones=%u" },
+ { ZLOOP_OPT_BASE_DIR, "base_dir=%s" },
+ { ZLOOP_OPT_NR_QUEUES, "nr_queues=%u" },
+ { ZLOOP_OPT_QUEUE_DEPTH, "queue_depth=%u" },
+ { ZLOOP_OPT_BUFFERED_IO, "buffered_io" },
+ { ZLOOP_OPT_ERR, NULL }
+};
+
+/* Default values for the "add" operation. */
+#define ZLOOP_DEF_ID -1
+#define ZLOOP_DEF_ZONE_SIZE ((256ULL * SZ_1M) >> SECTOR_SHIFT)
+#define ZLOOP_DEF_NR_ZONES 64
+#define ZLOOP_DEF_NR_CONV_ZONES 8
+#define ZLOOP_DEF_BASE_DIR "/var/local/zloop"
+#define ZLOOP_DEF_NR_QUEUES 1
+#define ZLOOP_DEF_QUEUE_DEPTH 128
+#define ZLOOP_DEF_BUFFERED_IO false
+
+/* Arbitrary limit on the zone size (16GB). */
+#define ZLOOP_MAX_ZONE_SIZE_MB 16384
+
+struct zloop_options {
+ unsigned int mask;
+ int id;
+ sector_t capacity;
+ sector_t zone_size;
+ sector_t zone_capacity;
+ unsigned int nr_conv_zones;
+ char *base_dir;
+ unsigned int nr_queues;
+ unsigned int queue_depth;
+ bool buffered_io;
+};
+
+/*
+ * Device states.
+ */
+enum {
+ Zlo_creating = 0,
+ Zlo_live,
+ Zlo_deleting,
+};
+
+enum zloop_zone_flags {
+ ZLOOP_ZONE_CONV = 0,
+ ZLOOP_ZONE_SEQ_ERROR,
+};
+
+struct zloop_zone {
+ struct file *file;
+
+ unsigned long flags;
+ struct mutex lock;
+ enum blk_zone_cond cond;
+ sector_t start;
+ sector_t wp;
+
+ gfp_t old_gfp_mask;
+};
+
+struct zloop_device {
+ unsigned int id;
+ unsigned int state;
+
+ struct blk_mq_tag_set tag_set;
+ struct gendisk *disk;
+
+ struct workqueue_struct *workqueue;
+ bool buffered_io;
+
+ const char *base_dir;
+ struct file *data_dir;
+
+ unsigned int zone_shift;
+ sector_t zone_size;
+ sector_t zone_capacity;
+ unsigned int nr_zones;
+ unsigned int nr_conv_zones;
+ unsigned int block_size;
+
+ struct zloop_zone zones[] __counted_by(nr_zones);
+};
+
+struct zloop_cmd {
+ struct work_struct work;
+ atomic_t ref;
+ sector_t sector;
+ sector_t nr_sectors;
+ long ret;
+ struct kiocb iocb;
+ struct bio_vec *bvec;
+};
+
+static DEFINE_IDR(zloop_index_idr);
+static DEFINE_MUTEX(zloop_ctl_mutex);
+
+static unsigned int rq_zone_no(struct request *rq)
+{
+ struct zloop_device *zlo = rq->q->queuedata;
+
+ return blk_rq_pos(rq) >> zlo->zone_shift;
+}
+
+static int zloop_update_seq_zone(struct zloop_device *zlo, unsigned int zone_no)
+{
+ struct zloop_zone *zone = &zlo->zones[zone_no];
+ struct kstat stat;
+ sector_t file_sectors;
+ int ret;
+
+ lockdep_assert_held(&zone->lock);
+
+ ret = vfs_getattr(&zone->file->f_path, &stat, STATX_SIZE, 0);
+ if (ret < 0) {
+ pr_err("Failed to get zone %u file stat (err=%d)\n",
+ zone_no, ret);
+ set_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags);
+ return ret;
+ }
+
+ file_sectors = stat.size >> SECTOR_SHIFT;
+ if (file_sectors > zlo->zone_capacity) {
+ pr_err("Zone %u file too large (%llu sectors > %llu)\n",
+ zone_no, file_sectors, zlo->zone_capacity);
+ return -EINVAL;
+ }
+
+ if (file_sectors & ((zlo->block_size >> SECTOR_SHIFT) - 1)) {
+ pr_err("Zone %u file size not aligned to block size %u\n",
+ zone_no, zlo->block_size);
+ return -EINVAL;
+ }
+
+ if (!file_sectors) {
+ zone->cond = BLK_ZONE_COND_EMPTY;
+ zone->wp = zone->start;
+ } else if (file_sectors == zlo->zone_capacity) {
+ zone->cond = BLK_ZONE_COND_FULL;
+ zone->wp = zone->start + zlo->zone_size;
+ } else {
+ zone->cond = BLK_ZONE_COND_CLOSED;
+ zone->wp = zone->start + file_sectors;
+ }
+
+ return 0;
+}
+
+static int zloop_open_zone(struct zloop_device *zlo, unsigned int zone_no)
+{
+ struct zloop_zone *zone = &zlo->zones[zone_no];
+ int ret = 0;
+
+ if (test_bit(ZLOOP_ZONE_CONV, &zone->flags))
+ return -EIO;
+
+ mutex_lock(&zone->lock);
+
+ if (test_and_clear_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags)) {
+ ret = zloop_update_seq_zone(zlo, zone_no);
+ if (ret)
+ goto unlock;
+ }
+
+ switch (zone->cond) {
+ case BLK_ZONE_COND_EXP_OPEN:
+ break;
+ case BLK_ZONE_COND_EMPTY:
+ case BLK_ZONE_COND_CLOSED:
+ case BLK_ZONE_COND_IMP_OPEN:
+ zone->cond = BLK_ZONE_COND_EXP_OPEN;
+ break;
+ case BLK_ZONE_COND_FULL:
+ default:
+ ret = -EIO;
+ break;
+ }
+
+unlock:
+ mutex_unlock(&zone->lock);
+
+ return ret;
+}
+
+static int zloop_close_zone(struct zloop_device *zlo, unsigned int zone_no)
+{
+ struct zloop_zone *zone = &zlo->zones[zone_no];
+ int ret = 0;
+
+ if (test_bit(ZLOOP_ZONE_CONV, &zone->flags))
+ return -EIO;
+
+ mutex_lock(&zone->lock);
+
+ if (test_and_clear_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags)) {
+ ret = zloop_update_seq_zone(zlo, zone_no);
+ if (ret)
+ goto unlock;
+ }
+
+ switch (zone->cond) {
+ case BLK_ZONE_COND_CLOSED:
+ break;
+ case BLK_ZONE_COND_IMP_OPEN:
+ case BLK_ZONE_COND_EXP_OPEN:
+ if (zone->wp == zone->start)
+ zone->cond = BLK_ZONE_COND_EMPTY;
+ else
+ zone->cond = BLK_ZONE_COND_CLOSED;
+ break;
+ case BLK_ZONE_COND_EMPTY:
+ case BLK_ZONE_COND_FULL:
+ default:
+ ret = -EIO;
+ break;
+ }
+
+unlock:
+ mutex_unlock(&zone->lock);
+
+ return ret;
+}
+
+static int zloop_reset_zone(struct zloop_device *zlo, unsigned int zone_no)
+{
+ struct zloop_zone *zone = &zlo->zones[zone_no];
+ int ret = 0;
+
+ if (test_bit(ZLOOP_ZONE_CONV, &zone->flags))
+ return -EIO;
+
+ mutex_lock(&zone->lock);
+
+ if (!test_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags) &&
+ zone->cond == BLK_ZONE_COND_EMPTY)
+ goto unlock;
+
+ if (vfs_truncate(&zone->file->f_path, 0)) {
+ set_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags);
+ ret = -EIO;
+ goto unlock;
+ }
+
+ zone->cond = BLK_ZONE_COND_EMPTY;
+ zone->wp = zone->start;
+ clear_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags);
+
+unlock:
+ mutex_unlock(&zone->lock);
+
+ return ret;
+}
+
+static int zloop_reset_all_zones(struct zloop_device *zlo)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = zlo->nr_conv_zones; i < zlo->nr_zones; i++) {
+ ret = zloop_reset_zone(zlo, i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int zloop_finish_zone(struct zloop_device *zlo, unsigned int zone_no)
+{
+ struct zloop_zone *zone = &zlo->zones[zone_no];
+ int ret = 0;
+
+ if (test_bit(ZLOOP_ZONE_CONV, &zone->flags))
+ return -EIO;
+
+ mutex_lock(&zone->lock);
+
+ if (!test_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags) &&
+ zone->cond == BLK_ZONE_COND_FULL)
+ goto unlock;
+
+ if (vfs_truncate(&zone->file->f_path, zlo->zone_size << SECTOR_SHIFT)) {
+ set_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags);
+ ret = -EIO;
+ goto unlock;
+ }
+
+ zone->cond = BLK_ZONE_COND_FULL;
+ zone->wp = zone->start + zlo->zone_size;
+ clear_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags);
+
+ unlock:
+ mutex_unlock(&zone->lock);
+
+ return ret;
+}
+
+static void zloop_put_cmd(struct zloop_cmd *cmd)
+{
+ struct request *rq = blk_mq_rq_from_pdu(cmd);
+
+ if (!atomic_dec_and_test(&cmd->ref))
+ return;
+ kfree(cmd->bvec);
+ cmd->bvec = NULL;
+ if (likely(!blk_should_fake_timeout(rq->q)))
+ blk_mq_complete_request(rq);
+}
+
+static void zloop_rw_complete(struct kiocb *iocb, long ret)
+{
+ struct zloop_cmd *cmd = container_of(iocb, struct zloop_cmd, iocb);
+
+ cmd->ret = ret;
+ zloop_put_cmd(cmd);
+}
+
+static void zloop_rw(struct zloop_cmd *cmd)
+{
+ struct request *rq = blk_mq_rq_from_pdu(cmd);
+ struct zloop_device *zlo = rq->q->queuedata;
+ unsigned int zone_no = rq_zone_no(rq);
+ sector_t sector = blk_rq_pos(rq);
+ sector_t nr_sectors = blk_rq_sectors(rq);
+ bool is_append = req_op(rq) == REQ_OP_ZONE_APPEND;
+ bool is_write = req_op(rq) == REQ_OP_WRITE || is_append;
+ int rw = is_write ? ITER_SOURCE : ITER_DEST;
+ struct req_iterator rq_iter;
+ struct zloop_zone *zone;
+ struct iov_iter iter;
+ struct bio_vec tmp;
+ sector_t zone_end;
+ int nr_bvec = 0;
+ int ret;
+
+ atomic_set(&cmd->ref, 2);
+ cmd->sector = sector;
+ cmd->nr_sectors = nr_sectors;
+ cmd->ret = 0;
+
+ /* We should never get an I/O beyond the device capacity. */
+ if (WARN_ON_ONCE(zone_no >= zlo->nr_zones)) {
+ ret = -EIO;
+ goto out;
+ }
+ zone = &zlo->zones[zone_no];
+ zone_end = zone->start + zlo->zone_capacity;
+
+ /*
+ * The block layer should never send requests that are not fully
+ * contained within the zone.
+ */
+ if (WARN_ON_ONCE(sector + nr_sectors > zone->start + zlo->zone_size)) {
+ ret = -EIO;
+ goto out;
+ }
+
+ if (test_and_clear_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags)) {
+ mutex_lock(&zone->lock);
+ ret = zloop_update_seq_zone(zlo, zone_no);
+ mutex_unlock(&zone->lock);
+ if (ret)
+ goto out;
+ }
+
+ if (!test_bit(ZLOOP_ZONE_CONV, &zone->flags) && is_write) {
+ mutex_lock(&zone->lock);
+
+ if (is_append) {
+ sector = zone->wp;
+ cmd->sector = sector;
+ }
+
+ /*
+ * Write operations must be aligned to the write pointer and
+ * fully contained within the zone capacity.
+ */
+ if (sector != zone->wp || zone->wp + nr_sectors > zone_end) {
+ pr_err("Zone %u: unaligned write: sect %llu, wp %llu\n",
+ zone_no, sector, zone->wp);
+ ret = -EIO;
+ goto unlock;
+ }
+
+ /* Implicitly open the target zone. */
+ if (zone->cond == BLK_ZONE_COND_CLOSED ||
+ zone->cond == BLK_ZONE_COND_EMPTY)
+ zone->cond = BLK_ZONE_COND_IMP_OPEN;
+
+ /*
+ * Advance the write pointer of sequential zones. If the write
+ * fails, the wp position will be corrected when the next I/O
+ * copmpletes.
+ */
+ zone->wp += nr_sectors;
+ if (zone->wp == zone_end)
+ zone->cond = BLK_ZONE_COND_FULL;
+ }
+
+ rq_for_each_bvec(tmp, rq, rq_iter)
+ nr_bvec++;
+
+ if (rq->bio != rq->biotail) {
+ struct bio_vec *bvec;
+
+ cmd->bvec = kmalloc_array(nr_bvec, sizeof(*cmd->bvec), GFP_NOIO);
+ if (!cmd->bvec) {
+ ret = -EIO;
+ goto unlock;
+ }
+
+ /*
+ * The bios of the request may be started from the middle of
+ * the 'bvec' because of bio splitting, so we can't directly
+ * copy bio->bi_iov_vec to new bvec. The rq_for_each_bvec
+ * API will take care of all details for us.
+ */
+ bvec = cmd->bvec;
+ rq_for_each_bvec(tmp, rq, rq_iter) {
+ *bvec = tmp;
+ bvec++;
+ }
+ iov_iter_bvec(&iter, rw, cmd->bvec, nr_bvec, blk_rq_bytes(rq));
+ } else {
+ /*
+ * Same here, this bio may be started from the middle of the
+ * 'bvec' because of bio splitting, so offset from the bvec
+ * must be passed to iov iterator
+ */
+ iov_iter_bvec(&iter, rw,
+ __bvec_iter_bvec(rq->bio->bi_io_vec, rq->bio->bi_iter),
+ nr_bvec, blk_rq_bytes(rq));
+ iter.iov_offset = rq->bio->bi_iter.bi_bvec_done;
+ }
+
+ cmd->iocb.ki_pos = (sector - zone->start) << SECTOR_SHIFT;
+ cmd->iocb.ki_filp = zone->file;
+ cmd->iocb.ki_complete = zloop_rw_complete;
+ if (!zlo->buffered_io)
+ cmd->iocb.ki_flags = IOCB_DIRECT;
+ cmd->iocb.ki_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0);
+
+ if (rw == ITER_SOURCE)
+ ret = zone->file->f_op->write_iter(&cmd->iocb, &iter);
+ else
+ ret = zone->file->f_op->read_iter(&cmd->iocb, &iter);
+unlock:
+ if (!test_bit(ZLOOP_ZONE_CONV, &zone->flags) && is_write)
+ mutex_unlock(&zone->lock);
+out:
+ if (ret != -EIOCBQUEUED)
+ zloop_rw_complete(&cmd->iocb, ret);
+ zloop_put_cmd(cmd);
+}
+
+static void zloop_handle_cmd(struct zloop_cmd *cmd)
+{
+ struct request *rq = blk_mq_rq_from_pdu(cmd);
+ struct zloop_device *zlo = rq->q->queuedata;
+
+ switch (req_op(rq)) {
+ case REQ_OP_READ:
+ case REQ_OP_WRITE:
+ case REQ_OP_ZONE_APPEND:
+ /*
+ * zloop_rw() always executes asynchronously or completes
+ * directly.
+ */
+ zloop_rw(cmd);
+ return;
+ case REQ_OP_FLUSH:
+ /*
+ * Sync the entire FS containing the zone files instead of
+ * walking all files
+ */
+ cmd->ret = sync_filesystem(file_inode(zlo->data_dir)->i_sb);
+ break;
+ case REQ_OP_ZONE_RESET:
+ cmd->ret = zloop_reset_zone(zlo, rq_zone_no(rq));
+ break;
+ case REQ_OP_ZONE_RESET_ALL:
+ cmd->ret = zloop_reset_all_zones(zlo);
+ break;
+ case REQ_OP_ZONE_FINISH:
+ cmd->ret = zloop_finish_zone(zlo, rq_zone_no(rq));
+ break;
+ case REQ_OP_ZONE_OPEN:
+ cmd->ret = zloop_open_zone(zlo, rq_zone_no(rq));
+ break;
+ case REQ_OP_ZONE_CLOSE:
+ cmd->ret = zloop_close_zone(zlo, rq_zone_no(rq));
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ pr_err("Unsupported operation %d\n", req_op(rq));
+ cmd->ret = -EOPNOTSUPP;
+ break;
+ }
+
+ blk_mq_complete_request(rq);
+}
+
+static void zloop_cmd_workfn(struct work_struct *work)
+{
+ struct zloop_cmd *cmd = container_of(work, struct zloop_cmd, work);
+ int orig_flags = current->flags;
+
+ current->flags |= PF_LOCAL_THROTTLE | PF_MEMALLOC_NOIO;
+ zloop_handle_cmd(cmd);
+ current->flags = orig_flags;
+}
+
+static void zloop_complete_rq(struct request *rq)
+{
+ struct zloop_cmd *cmd = blk_mq_rq_to_pdu(rq);
+ struct zloop_device *zlo = rq->q->queuedata;
+ unsigned int zone_no = cmd->sector >> zlo->zone_shift;
+ struct zloop_zone *zone = &zlo->zones[zone_no];
+ blk_status_t sts = BLK_STS_OK;
+
+ switch (req_op(rq)) {
+ case REQ_OP_READ:
+ if (cmd->ret < 0)
+ pr_err("Zone %u: failed read sector %llu, %llu sectors\n",
+ zone_no, cmd->sector, cmd->nr_sectors);
+
+ if (cmd->ret >= 0 && cmd->ret != blk_rq_bytes(rq)) {
+ /* short read */
+ struct bio *bio;
+
+ __rq_for_each_bio(bio, rq)
+ zero_fill_bio(bio);
+ }
+ break;
+ case REQ_OP_WRITE:
+ case REQ_OP_ZONE_APPEND:
+ if (cmd->ret < 0)
+ pr_err("Zone %u: failed %swrite sector %llu, %llu sectors\n",
+ zone_no,
+ req_op(rq) == REQ_OP_WRITE ? "" : "append ",
+ cmd->sector, cmd->nr_sectors);
+
+ if (cmd->ret >= 0 && cmd->ret != blk_rq_bytes(rq)) {
+ pr_err("Zone %u: partial write %ld/%u B\n",
+ zone_no, cmd->ret, blk_rq_bytes(rq));
+ cmd->ret = -EIO;
+ }
+
+ if (cmd->ret < 0 && !test_bit(ZLOOP_ZONE_CONV, &zone->flags)) {
+ /*
+ * A write to a sequential zone file failed: mark the
+ * zone as having an error. This will be corrected and
+ * cleared when the next IO is submitted.
+ */
+ set_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags);
+ break;
+ }
+ if (req_op(rq) == REQ_OP_ZONE_APPEND)
+ rq->__sector = cmd->sector;
+
+ break;
+ default:
+ break;
+ }
+
+ if (cmd->ret < 0)
+ sts = errno_to_blk_status(cmd->ret);
+ blk_mq_end_request(rq, sts);
+}
+
+static blk_status_t zloop_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
+{
+ struct request *rq = bd->rq;
+ struct zloop_cmd *cmd = blk_mq_rq_to_pdu(rq);
+ struct zloop_device *zlo = rq->q->queuedata;
+
+ if (zlo->state == Zlo_deleting)
+ return BLK_STS_IOERR;
+
+ blk_mq_start_request(rq);
+
+ INIT_WORK(&cmd->work, zloop_cmd_workfn);
+ queue_work(zlo->workqueue, &cmd->work);
+
+ return BLK_STS_OK;
+}
+
+static const struct blk_mq_ops zloop_mq_ops = {
+ .queue_rq = zloop_queue_rq,
+ .complete = zloop_complete_rq,
+};
+
+static int zloop_open(struct gendisk *disk, blk_mode_t mode)
+{
+ struct zloop_device *zlo = disk->private_data;
+ int ret;
+
+ ret = mutex_lock_killable(&zloop_ctl_mutex);
+ if (ret)
+ return ret;
+
+ if (zlo->state != Zlo_live)
+ ret = -ENXIO;
+ mutex_unlock(&zloop_ctl_mutex);
+ return ret;
+}
+
+static int zloop_report_zones(struct gendisk *disk, sector_t sector,
+ unsigned int nr_zones, report_zones_cb cb, void *data)
+{
+ struct zloop_device *zlo = disk->private_data;
+ struct blk_zone blkz = {};
+ unsigned int first, i;
+ int ret;
+
+ first = disk_zone_no(disk, sector);
+ if (first >= zlo->nr_zones)
+ return 0;
+ nr_zones = min(nr_zones, zlo->nr_zones - first);
+
+ for (i = 0; i < nr_zones; i++) {
+ unsigned int zone_no = first + i;
+ struct zloop_zone *zone = &zlo->zones[zone_no];
+
+ mutex_lock(&zone->lock);
+
+ if (test_and_clear_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags)) {
+ ret = zloop_update_seq_zone(zlo, zone_no);
+ if (ret) {
+ mutex_unlock(&zone->lock);
+ return ret;
+ }
+ }
+
+ blkz.start = zone->start;
+ blkz.len = zlo->zone_size;
+ blkz.wp = zone->wp;
+ blkz.cond = zone->cond;
+ if (test_bit(ZLOOP_ZONE_CONV, &zone->flags)) {
+ blkz.type = BLK_ZONE_TYPE_CONVENTIONAL;
+ blkz.capacity = zlo->zone_size;
+ } else {
+ blkz.type = BLK_ZONE_TYPE_SEQWRITE_REQ;
+ blkz.capacity = zlo->zone_capacity;
+ }
+
+ mutex_unlock(&zone->lock);
+
+ ret = cb(&blkz, i, data);
+ if (ret)
+ return ret;
+ }
+
+ return nr_zones;
+}
+
+static void zloop_free_disk(struct gendisk *disk)
+{
+ struct zloop_device *zlo = disk->private_data;
+ unsigned int i;
+
+ blk_mq_free_tag_set(&zlo->tag_set);
+
+ for (i = 0; i < zlo->nr_zones; i++) {
+ struct zloop_zone *zone = &zlo->zones[i];
+
+ mapping_set_gfp_mask(zone->file->f_mapping,
+ zone->old_gfp_mask);
+ fput(zone->file);
+ }
+
+ fput(zlo->data_dir);
+ destroy_workqueue(zlo->workqueue);
+ kfree(zlo->base_dir);
+ kvfree(zlo);
+}
+
+static const struct block_device_operations zloop_fops = {
+ .owner = THIS_MODULE,
+ .open = zloop_open,
+ .report_zones = zloop_report_zones,
+ .free_disk = zloop_free_disk,
+};
+
+__printf(3, 4)
+static struct file *zloop_filp_open_fmt(int oflags, umode_t mode,
+ const char *fmt, ...)
+{
+ struct file *file;
+ va_list ap;
+ char *p;
+
+ va_start(ap, fmt);
+ p = kvasprintf(GFP_KERNEL, fmt, ap);
+ va_end(ap);
+
+ if (!p)
+ return ERR_PTR(-ENOMEM);
+ file = filp_open(p, oflags, mode);
+ kfree(p);
+ return file;
+}
+
+static int zloop_get_block_size(struct zloop_device *zlo,
+ struct zloop_zone *zone)
+{
+ struct block_device *sb_bdev = zone->file->f_mapping->host->i_sb->s_bdev;
+ struct kstat st;
+
+ /*
+ * If the FS block size is lower than or equal to 4K, use that as the
+ * device block size. Otherwise, fallback to the FS direct IO alignment
+ * constraint if that is provided, and to the FS underlying device
+ * physical block size if the direct IO alignment is unknown.
+ */
+ if (file_inode(zone->file)->i_sb->s_blocksize <= SZ_4K)
+ zlo->block_size = file_inode(zone->file)->i_sb->s_blocksize;
+ else if (!vfs_getattr(&zone->file->f_path, &st, STATX_DIOALIGN, 0) &&
+ (st.result_mask & STATX_DIOALIGN))
+ zlo->block_size = st.dio_offset_align;
+ else if (sb_bdev)
+ zlo->block_size = bdev_physical_block_size(sb_bdev);
+ else
+ zlo->block_size = SECTOR_SIZE;
+
+ if (zlo->zone_capacity & ((zlo->block_size >> SECTOR_SHIFT) - 1)) {
+ pr_err("Zone capacity is not aligned to block size %u\n",
+ zlo->block_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int zloop_init_zone(struct zloop_device *zlo, struct zloop_options *opts,
+ unsigned int zone_no, bool restore)
+{
+ struct zloop_zone *zone = &zlo->zones[zone_no];
+ int oflags = O_RDWR;
+ struct kstat stat;
+ sector_t file_sectors;
+ int ret;
+
+ mutex_init(&zone->lock);
+ zone->start = (sector_t)zone_no << zlo->zone_shift;
+
+ if (!restore)
+ oflags |= O_CREAT;
+
+ if (!opts->buffered_io)
+ oflags |= O_DIRECT;
+
+ if (zone_no < zlo->nr_conv_zones) {
+ /* Conventional zone file. */
+ set_bit(ZLOOP_ZONE_CONV, &zone->flags);
+ zone->cond = BLK_ZONE_COND_NOT_WP;
+ zone->wp = U64_MAX;
+
+ zone->file = zloop_filp_open_fmt(oflags, 0600, "%s/%u/cnv-%06u",
+ zlo->base_dir, zlo->id, zone_no);
+ if (IS_ERR(zone->file)) {
+ pr_err("Failed to open zone %u file %s/%u/cnv-%06u (err=%ld)",
+ zone_no, zlo->base_dir, zlo->id, zone_no,
+ PTR_ERR(zone->file));
+ return PTR_ERR(zone->file);
+ }
+
+ if (!zlo->block_size) {
+ ret = zloop_get_block_size(zlo, zone);
+ if (ret)
+ return ret;
+ }
+
+ ret = vfs_getattr(&zone->file->f_path, &stat, STATX_SIZE, 0);
+ if (ret < 0) {
+ pr_err("Failed to get zone %u file stat\n", zone_no);
+ return ret;
+ }
+ file_sectors = stat.size >> SECTOR_SHIFT;
+
+ if (restore && file_sectors != zlo->zone_size) {
+ pr_err("Invalid conventional zone %u file size (%llu sectors != %llu)\n",
+ zone_no, file_sectors, zlo->zone_capacity);
+ return ret;
+ }
+
+ ret = vfs_truncate(&zone->file->f_path,
+ zlo->zone_size << SECTOR_SHIFT);
+ if (ret < 0) {
+ pr_err("Failed to truncate zone %u file (err=%d)\n",
+ zone_no, ret);
+ return ret;
+ }
+
+ return 0;
+ }
+
+ /* Sequential zone file. */
+ zone->file = zloop_filp_open_fmt(oflags, 0600, "%s/%u/seq-%06u",
+ zlo->base_dir, zlo->id, zone_no);
+ if (IS_ERR(zone->file)) {
+ pr_err("Failed to open zone %u file %s/%u/seq-%06u (err=%ld)",
+ zone_no, zlo->base_dir, zlo->id, zone_no,
+ PTR_ERR(zone->file));
+ return PTR_ERR(zone->file);
+ }
+
+ if (!zlo->block_size) {
+ ret = zloop_get_block_size(zlo, zone);
+ if (ret)
+ return ret;
+ }
+
+ zloop_get_block_size(zlo, zone);
+
+ mutex_lock(&zone->lock);
+ ret = zloop_update_seq_zone(zlo, zone_no);
+ mutex_unlock(&zone->lock);
+
+ return ret;
+}
+
+static bool zloop_dev_exists(struct zloop_device *zlo)
+{
+ struct file *cnv, *seq;
+ bool exists;
+
+ cnv = zloop_filp_open_fmt(O_RDONLY, 0600, "%s/%u/cnv-%06u",
+ zlo->base_dir, zlo->id, 0);
+ seq = zloop_filp_open_fmt(O_RDONLY, 0600, "%s/%u/seq-%06u",
+ zlo->base_dir, zlo->id, 0);
+ exists = !IS_ERR(cnv) || !IS_ERR(seq);
+
+ if (!IS_ERR(cnv))
+ fput(cnv);
+ if (!IS_ERR(seq))
+ fput(seq);
+
+ return exists;
+}
+
+static int zloop_ctl_add(struct zloop_options *opts)
+{
+ struct queue_limits lim = {
+ .max_hw_sectors = SZ_1M >> SECTOR_SHIFT,
+ .max_hw_zone_append_sectors = SZ_1M >> SECTOR_SHIFT,
+ .chunk_sectors = opts->zone_size,
+ .features = BLK_FEAT_ZONED,
+ };
+ unsigned int nr_zones, i, j;
+ struct zloop_device *zlo;
+ int ret = -EINVAL;
+ bool restore;
+
+ __module_get(THIS_MODULE);
+
+ nr_zones = opts->capacity >> ilog2(opts->zone_size);
+ if (opts->nr_conv_zones >= nr_zones) {
+ pr_err("Invalid number of conventional zones %u\n",
+ opts->nr_conv_zones);
+ goto out;
+ }
+
+ zlo = kvzalloc(struct_size(zlo, zones, nr_zones), GFP_KERNEL);
+ if (!zlo) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ zlo->state = Zlo_creating;
+
+ ret = mutex_lock_killable(&zloop_ctl_mutex);
+ if (ret)
+ goto out_free_dev;
+
+ /* Allocate id, if @opts->id >= 0, we're requesting that specific id */
+ if (opts->id >= 0) {
+ ret = idr_alloc(&zloop_index_idr, zlo,
+ opts->id, opts->id + 1, GFP_KERNEL);
+ if (ret == -ENOSPC)
+ ret = -EEXIST;
+ } else {
+ ret = idr_alloc(&zloop_index_idr, zlo, 0, 0, GFP_KERNEL);
+ }
+ mutex_unlock(&zloop_ctl_mutex);
+ if (ret < 0)
+ goto out_free_dev;
+
+ zlo->id = ret;
+ zlo->zone_shift = ilog2(opts->zone_size);
+ zlo->zone_size = opts->zone_size;
+ if (opts->zone_capacity)
+ zlo->zone_capacity = opts->zone_capacity;
+ else
+ zlo->zone_capacity = zlo->zone_size;
+ zlo->nr_zones = nr_zones;
+ zlo->nr_conv_zones = opts->nr_conv_zones;
+ zlo->buffered_io = opts->buffered_io;
+
+ zlo->workqueue = alloc_workqueue("zloop%d", WQ_UNBOUND | WQ_FREEZABLE,
+ opts->nr_queues * opts->queue_depth, zlo->id);
+ if (!zlo->workqueue) {
+ ret = -ENOMEM;
+ goto out_free_idr;
+ }
+
+ if (opts->base_dir)
+ zlo->base_dir = kstrdup(opts->base_dir, GFP_KERNEL);
+ else
+ zlo->base_dir = kstrdup(ZLOOP_DEF_BASE_DIR, GFP_KERNEL);
+ if (!zlo->base_dir) {
+ ret = -ENOMEM;
+ goto out_destroy_workqueue;
+ }
+
+ zlo->data_dir = zloop_filp_open_fmt(O_RDONLY | O_DIRECTORY, 0, "%s/%u",
+ zlo->base_dir, zlo->id);
+ if (IS_ERR(zlo->data_dir)) {
+ ret = PTR_ERR(zlo->data_dir);
+ pr_warn("Failed to open directory %s/%u (err=%d)\n",
+ zlo->base_dir, zlo->id, ret);
+ goto out_free_base_dir;
+ }
+
+ /*
+ * If we already have zone files, we are restoring a device created by a
+ * previous add operation. In this case, zloop_init_zone() will check
+ * that the zone files are consistent with the zone configuration given.
+ */
+ restore = zloop_dev_exists(zlo);
+ for (i = 0; i < nr_zones; i++) {
+ ret = zloop_init_zone(zlo, opts, i, restore);
+ if (ret)
+ goto out_close_files;
+ }
+
+ lim.physical_block_size = zlo->block_size;
+ lim.logical_block_size = zlo->block_size;
+
+ zlo->tag_set.ops = &zloop_mq_ops;
+ zlo->tag_set.nr_hw_queues = opts->nr_queues;
+ zlo->tag_set.queue_depth = opts->queue_depth;
+ zlo->tag_set.numa_node = NUMA_NO_NODE;
+ zlo->tag_set.cmd_size = sizeof(struct zloop_cmd);
+ zlo->tag_set.driver_data = zlo;
+
+ ret = blk_mq_alloc_tag_set(&zlo->tag_set);
+ if (ret) {
+ pr_err("blk_mq_alloc_tag_set failed (err=%d)\n", ret);
+ goto out_close_files;
+ }
+
+ zlo->disk = blk_mq_alloc_disk(&zlo->tag_set, &lim, zlo);
+ if (IS_ERR(zlo->disk)) {
+ pr_err("blk_mq_alloc_disk failed (err=%d)\n", ret);
+ ret = PTR_ERR(zlo->disk);
+ goto out_cleanup_tags;
+ }
+ zlo->disk->flags = GENHD_FL_NO_PART;
+ zlo->disk->fops = &zloop_fops;
+ zlo->disk->private_data = zlo;
+ sprintf(zlo->disk->disk_name, "zloop%d", zlo->id);
+ set_capacity(zlo->disk, (u64)lim.chunk_sectors * zlo->nr_zones);
+
+ ret = blk_revalidate_disk_zones(zlo->disk);
+ if (ret)
+ goto out_cleanup_disk;
+
+ ret = add_disk(zlo->disk);
+ if (ret) {
+ pr_err("add_disk failed (err=%d)\n", ret);
+ goto out_cleanup_disk;
+ }
+
+ mutex_lock(&zloop_ctl_mutex);
+ zlo->state = Zlo_live;
+ mutex_unlock(&zloop_ctl_mutex);
+
+ pr_info("Added device %d: %u zones of %llu MB, %u B block size\n",
+ zlo->id, zlo->nr_zones,
+ ((sector_t)zlo->zone_size << SECTOR_SHIFT) >> 20,
+ zlo->block_size);
+
+ return 0;
+
+out_cleanup_disk:
+ put_disk(zlo->disk);
+out_cleanup_tags:
+ blk_mq_free_tag_set(&zlo->tag_set);
+out_close_files:
+ for (j = 0; j < i; j++) {
+ struct zloop_zone *zone = &zlo->zones[j];
+
+ if (!IS_ERR_OR_NULL(zone->file))
+ fput(zone->file);
+ }
+ fput(zlo->data_dir);
+out_free_base_dir:
+ kfree(zlo->base_dir);
+out_destroy_workqueue:
+ destroy_workqueue(zlo->workqueue);
+out_free_idr:
+ mutex_lock(&zloop_ctl_mutex);
+ idr_remove(&zloop_index_idr, zlo->id);
+ mutex_unlock(&zloop_ctl_mutex);
+out_free_dev:
+ kvfree(zlo);
+out:
+ module_put(THIS_MODULE);
+ if (ret == -ENOENT)
+ ret = -EINVAL;
+ return ret;
+}
+
+static int zloop_ctl_remove(struct zloop_options *opts)
+{
+ struct zloop_device *zlo;
+ int ret;
+
+ if (!(opts->mask & ZLOOP_OPT_ID)) {
+ pr_err("No ID specified\n");
+ return -EINVAL;
+ }
+
+ ret = mutex_lock_killable(&zloop_ctl_mutex);
+ if (ret)
+ return ret;
+
+ zlo = idr_find(&zloop_index_idr, opts->id);
+ if (!zlo || zlo->state == Zlo_creating) {
+ ret = -ENODEV;
+ } else if (zlo->state == Zlo_deleting) {
+ ret = -EINVAL;
+ } else {
+ idr_remove(&zloop_index_idr, zlo->id);
+ zlo->state = Zlo_deleting;
+ }
+
+ mutex_unlock(&zloop_ctl_mutex);
+ if (ret)
+ return ret;
+
+ del_gendisk(zlo->disk);
+ put_disk(zlo->disk);
+
+ pr_info("Removed device %d\n", opts->id);
+
+ module_put(THIS_MODULE);
+
+ return 0;
+}
+
+static int zloop_parse_options(struct zloop_options *opts, const char *buf)
+{
+ substring_t args[MAX_OPT_ARGS];
+ char *options, *o, *p;
+ unsigned int token;
+ int ret = 0;
+
+ /* Set defaults. */
+ opts->mask = 0;
+ opts->id = ZLOOP_DEF_ID;
+ opts->capacity = ZLOOP_DEF_ZONE_SIZE * ZLOOP_DEF_NR_ZONES;
+ opts->zone_size = ZLOOP_DEF_ZONE_SIZE;
+ opts->nr_conv_zones = ZLOOP_DEF_NR_CONV_ZONES;
+ opts->nr_queues = ZLOOP_DEF_NR_QUEUES;
+ opts->queue_depth = ZLOOP_DEF_QUEUE_DEPTH;
+ opts->buffered_io = ZLOOP_DEF_BUFFERED_IO;
+
+ if (!buf)
+ return 0;
+
+ /* Skip leading spaces before the options. */
+ while (isspace(*buf))
+ buf++;
+
+ options = o = kstrdup(buf, GFP_KERNEL);
+ if (!options)
+ return -ENOMEM;
+
+ /* Parse the options, doing only some light invalid value checks. */
+ while ((p = strsep(&o, ",\n")) != NULL) {
+ if (!*p)
+ continue;
+
+ token = match_token(p, zloop_opt_tokens, args);
+ opts->mask |= token;
+ switch (token) {
+ case ZLOOP_OPT_ID:
+ if (match_int(args, &opts->id)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ break;
+ case ZLOOP_OPT_CAPACITY:
+ if (match_uint(args, &token)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (!token) {
+ pr_err("Invalid capacity\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ opts->capacity =
+ ((sector_t)token * SZ_1M) >> SECTOR_SHIFT;
+ break;
+ case ZLOOP_OPT_ZONE_SIZE:
+ if (match_uint(args, &token)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (!token || token > ZLOOP_MAX_ZONE_SIZE_MB ||
+ !is_power_of_2(token)) {
+ pr_err("Invalid zone size %u\n", token);
+ ret = -EINVAL;
+ goto out;
+ }
+ opts->zone_size =
+ ((sector_t)token * SZ_1M) >> SECTOR_SHIFT;
+ break;
+ case ZLOOP_OPT_ZONE_CAPACITY:
+ if (match_uint(args, &token)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (!token) {
+ pr_err("Invalid zone capacity\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ opts->zone_capacity =
+ ((sector_t)token * SZ_1M) >> SECTOR_SHIFT;
+ break;
+ case ZLOOP_OPT_NR_CONV_ZONES:
+ if (match_uint(args, &token)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ opts->nr_conv_zones = token;
+ break;
+ case ZLOOP_OPT_BASE_DIR:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ kfree(opts->base_dir);
+ opts->base_dir = p;
+ break;
+ case ZLOOP_OPT_NR_QUEUES:
+ if (match_uint(args, &token)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (!token) {
+ pr_err("Invalid number of queues\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ opts->nr_queues = min(token, num_online_cpus());
+ break;
+ case ZLOOP_OPT_QUEUE_DEPTH:
+ if (match_uint(args, &token)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (!token) {
+ pr_err("Invalid queue depth\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ opts->queue_depth = token;
+ break;
+ case ZLOOP_OPT_BUFFERED_IO:
+ opts->buffered_io = true;
+ break;
+ case ZLOOP_OPT_ERR:
+ default:
+ pr_warn("unknown parameter or missing value '%s'\n", p);
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ ret = -EINVAL;
+ if (opts->capacity <= opts->zone_size) {
+ pr_err("Invalid capacity\n");
+ goto out;
+ }
+
+ if (opts->zone_capacity > opts->zone_size) {
+ pr_err("Invalid zone capacity\n");
+ goto out;
+ }
+
+ ret = 0;
+out:
+ kfree(options);
+ return ret;
+}
+
+enum {
+ ZLOOP_CTL_ADD,
+ ZLOOP_CTL_REMOVE,
+};
+
+static struct zloop_ctl_op {
+ int code;
+ const char *name;
+} zloop_ctl_ops[] = {
+ { ZLOOP_CTL_ADD, "add" },
+ { ZLOOP_CTL_REMOVE, "remove" },
+ { -1, NULL },
+};
+
+static ssize_t zloop_ctl_write(struct file *file, const char __user *ubuf,
+ size_t count, loff_t *pos)
+{
+ struct zloop_options opts = { };
+ struct zloop_ctl_op *op;
+ const char *buf, *opts_buf;
+ int i, ret;
+
+ if (count > PAGE_SIZE)
+ return -ENOMEM;
+
+ buf = memdup_user_nul(ubuf, count);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ for (i = 0; i < ARRAY_SIZE(zloop_ctl_ops); i++) {
+ op = &zloop_ctl_ops[i];
+ if (!op->name) {
+ pr_err("Invalid operation\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ if (!strncmp(buf, op->name, strlen(op->name)))
+ break;
+ }
+
+ if (count <= strlen(op->name))
+ opts_buf = NULL;
+ else
+ opts_buf = buf + strlen(op->name);
+
+ ret = zloop_parse_options(&opts, opts_buf);
+ if (ret) {
+ pr_err("Failed to parse options\n");
+ goto out;
+ }
+
+ switch (op->code) {
+ case ZLOOP_CTL_ADD:
+ ret = zloop_ctl_add(&opts);
+ break;
+ case ZLOOP_CTL_REMOVE:
+ ret = zloop_ctl_remove(&opts);
+ break;
+ default:
+ pr_err("Invalid operation\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+out:
+ kfree(opts.base_dir);
+ kfree(buf);
+ return ret ? ret : count;
+}
+
+static int zloop_ctl_show(struct seq_file *seq_file, void *private)
+{
+ const struct match_token *tok;
+ int i;
+
+ /* Add operation */
+ seq_printf(seq_file, "%s ", zloop_ctl_ops[0].name);
+ for (i = 0; i < ARRAY_SIZE(zloop_opt_tokens); i++) {
+ tok = &zloop_opt_tokens[i];
+ if (!tok->pattern)
+ break;
+ if (i)
+ seq_putc(seq_file, ',');
+ seq_puts(seq_file, tok->pattern);
+ }
+ seq_putc(seq_file, '\n');
+
+ /* Remove operation */
+ seq_puts(seq_file, zloop_ctl_ops[1].name);
+ seq_puts(seq_file, " id=%d\n");
+
+ return 0;
+}
+
+static int zloop_ctl_open(struct inode *inode, struct file *file)
+{
+ file->private_data = NULL;
+ return single_open(file, zloop_ctl_show, NULL);
+}
+
+static int zloop_ctl_release(struct inode *inode, struct file *file)
+{
+ return single_release(inode, file);
+}
+
+static const struct file_operations zloop_ctl_fops = {
+ .owner = THIS_MODULE,
+ .open = zloop_ctl_open,
+ .release = zloop_ctl_release,
+ .write = zloop_ctl_write,
+ .read = seq_read,
+};
+
+static struct miscdevice zloop_misc = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "zloop-control",
+ .fops = &zloop_ctl_fops,
+};
+
+static int __init zloop_init(void)
+{
+ int ret;
+
+ ret = misc_register(&zloop_misc);
+ if (ret) {
+ pr_err("Failed to register misc device: %d\n", ret);
+ return ret;
+ }
+ pr_info("Module loaded\n");
+
+ return 0;
+}
+
+static void __exit zloop_exit(void)
+{
+ misc_deregister(&zloop_misc);
+ idr_destroy(&zloop_index_idr);
+}
+
+module_init(zloop_init);
+module_exit(zloop_exit);
+
+MODULE_DESCRIPTION("Zoned loopback device");
+MODULE_LICENSE("GPL");
diff --git a/drivers/block/zram/Kconfig b/drivers/block/zram/Kconfig
index 6aea609b795c..402b7b175863 100644
--- a/drivers/block/zram/Kconfig
+++ b/drivers/block/zram/Kconfig
@@ -94,6 +94,7 @@ endchoice
config ZRAM_DEF_COMP
string
+ depends on ZRAM
default "lzo-rle" if ZRAM_DEF_COMP_LZORLE
default "lzo" if ZRAM_DEF_COMP_LZO
default "lz4" if ZRAM_DEF_COMP_LZ4
diff --git a/drivers/block/zram/backend_deflate.c b/drivers/block/zram/backend_deflate.c
index 0f7f252c12f4..b75016e0e654 100644
--- a/drivers/block/zram/backend_deflate.c
+++ b/drivers/block/zram/backend_deflate.c
@@ -8,7 +8,7 @@
#include "backend_deflate.h"
/* Use the same value as crypto API */
-#define DEFLATE_DEF_WINBITS 11
+#define DEFLATE_DEF_WINBITS (-11)
#define DEFLATE_DEF_MEMLEVEL MAX_MEM_LEVEL
struct deflate_ctx {
@@ -22,8 +22,10 @@ static void deflate_release_params(struct zcomp_params *params)
static int deflate_setup_params(struct zcomp_params *params)
{
- if (params->level == ZCOMP_PARAM_NO_LEVEL)
+ if (params->level == ZCOMP_PARAM_NOT_SET)
params->level = Z_DEFAULT_COMPRESSION;
+ if (params->deflate.winbits == ZCOMP_PARAM_NOT_SET)
+ params->deflate.winbits = DEFLATE_DEF_WINBITS;
return 0;
}
@@ -57,13 +59,13 @@ static int deflate_create(struct zcomp_params *params, struct zcomp_ctx *ctx)
return -ENOMEM;
ctx->context = zctx;
- sz = zlib_deflate_workspacesize(-DEFLATE_DEF_WINBITS, MAX_MEM_LEVEL);
+ sz = zlib_deflate_workspacesize(params->deflate.winbits, MAX_MEM_LEVEL);
zctx->cctx.workspace = vzalloc(sz);
if (!zctx->cctx.workspace)
goto error;
ret = zlib_deflateInit2(&zctx->cctx, params->level, Z_DEFLATED,
- -DEFLATE_DEF_WINBITS, DEFLATE_DEF_MEMLEVEL,
+ params->deflate.winbits, DEFLATE_DEF_MEMLEVEL,
Z_DEFAULT_STRATEGY);
if (ret != Z_OK)
goto error;
@@ -73,7 +75,7 @@ static int deflate_create(struct zcomp_params *params, struct zcomp_ctx *ctx)
if (!zctx->dctx.workspace)
goto error;
- ret = zlib_inflateInit2(&zctx->dctx, -DEFLATE_DEF_WINBITS);
+ ret = zlib_inflateInit2(&zctx->dctx, params->deflate.winbits);
if (ret != Z_OK)
goto error;
diff --git a/drivers/block/zram/backend_lz4.c b/drivers/block/zram/backend_lz4.c
index 847f3334eb38..daccd60857eb 100644
--- a/drivers/block/zram/backend_lz4.c
+++ b/drivers/block/zram/backend_lz4.c
@@ -18,7 +18,7 @@ static void lz4_release_params(struct zcomp_params *params)
static int lz4_setup_params(struct zcomp_params *params)
{
- if (params->level == ZCOMP_PARAM_NO_LEVEL)
+ if (params->level == ZCOMP_PARAM_NOT_SET)
params->level = LZ4_ACCELERATION_DEFAULT;
return 0;
diff --git a/drivers/block/zram/backend_lz4hc.c b/drivers/block/zram/backend_lz4hc.c
index 5f37d5abcaeb..9e8a35dfa56d 100644
--- a/drivers/block/zram/backend_lz4hc.c
+++ b/drivers/block/zram/backend_lz4hc.c
@@ -18,7 +18,7 @@ static void lz4hc_release_params(struct zcomp_params *params)
static int lz4hc_setup_params(struct zcomp_params *params)
{
- if (params->level == ZCOMP_PARAM_NO_LEVEL)
+ if (params->level == ZCOMP_PARAM_NOT_SET)
params->level = LZ4HC_DEFAULT_CLEVEL;
return 0;
diff --git a/drivers/block/zram/backend_zstd.c b/drivers/block/zram/backend_zstd.c
index 1184c0036f44..81defb98ed09 100644
--- a/drivers/block/zram/backend_zstd.c
+++ b/drivers/block/zram/backend_zstd.c
@@ -24,19 +24,10 @@ struct zstd_params {
/*
* For C/D dictionaries we need to provide zstd with zstd_custom_mem,
* which zstd uses internally to allocate/free memory when needed.
- *
- * This means that allocator.customAlloc() can be called from zcomp_compress()
- * under local-lock (per-CPU compression stream), in which case we must use
- * GFP_ATOMIC.
- *
- * Another complication here is that we can be configured as a swap device.
*/
static void *zstd_custom_alloc(void *opaque, size_t size)
{
- if (!preemptible())
- return kvzalloc(size, GFP_ATOMIC);
-
- return kvzalloc(size, __GFP_KSWAPD_RECLAIM | __GFP_NOWARN);
+ return kvzalloc(size, GFP_NOIO | __GFP_NOWARN);
}
static void zstd_custom_free(void *opaque, void *address)
@@ -67,7 +58,7 @@ static int zstd_setup_params(struct zcomp_params *params)
return -ENOMEM;
params->drv_data = zp;
- if (params->level == ZCOMP_PARAM_NO_LEVEL)
+ if (params->level == ZCOMP_PARAM_NOT_SET)
params->level = zstd_default_clevel();
zp->cprm = zstd_get_params(params->level, PAGE_SIZE);
diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c
index bb514403e305..b1bd1daa0060 100644
--- a/drivers/block/zram/zcomp.c
+++ b/drivers/block/zram/zcomp.c
@@ -6,9 +6,9 @@
#include <linux/slab.h>
#include <linux/wait.h>
#include <linux/sched.h>
-#include <linux/cpu.h>
-#include <linux/crypto.h>
+#include <linux/cpuhotplug.h>
#include <linux/vmalloc.h>
+#include <linux/sysfs.h>
#include "zcomp.h"
@@ -46,6 +46,7 @@ static const struct zcomp_ops *backends[] = {
static void zcomp_strm_free(struct zcomp *comp, struct zcomp_strm *zstrm)
{
comp->ops->destroy_ctx(&zstrm->ctx);
+ vfree(zstrm->local_copy);
vfree(zstrm->buffer);
zstrm->buffer = NULL;
}
@@ -58,12 +59,13 @@ static int zcomp_strm_init(struct zcomp *comp, struct zcomp_strm *zstrm)
if (ret)
return ret;
+ zstrm->local_copy = vzalloc(PAGE_SIZE);
/*
* allocate 2 pages. 1 for compressed data, plus 1 extra for the
* case when compressed size is larger than the original one
*/
zstrm->buffer = vzalloc(2 * PAGE_SIZE);
- if (!zstrm->buffer) {
+ if (!zstrm->buffer || !zstrm->local_copy) {
zcomp_strm_free(comp, zstrm);
return -ENOMEM;
}
@@ -88,34 +90,48 @@ bool zcomp_available_algorithm(const char *comp)
}
/* show available compressors */
-ssize_t zcomp_available_show(const char *comp, char *buf)
+ssize_t zcomp_available_show(const char *comp, char *buf, ssize_t at)
{
- ssize_t sz = 0;
int i;
for (i = 0; i < ARRAY_SIZE(backends) - 1; i++) {
if (!strcmp(comp, backends[i]->name)) {
- sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2,
- "[%s] ", backends[i]->name);
+ at += sysfs_emit_at(buf, at, "[%s] ",
+ backends[i]->name);
} else {
- sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2,
- "%s ", backends[i]->name);
+ at += sysfs_emit_at(buf, at, "%s ", backends[i]->name);
}
}
- sz += scnprintf(buf + sz, PAGE_SIZE - sz, "\n");
- return sz;
+ at += sysfs_emit_at(buf, at, "\n");
+ return at;
}
struct zcomp_strm *zcomp_stream_get(struct zcomp *comp)
{
- local_lock(&comp->stream->lock);
- return this_cpu_ptr(comp->stream);
+ for (;;) {
+ struct zcomp_strm *zstrm = raw_cpu_ptr(comp->stream);
+
+ /*
+ * Inspired by zswap
+ *
+ * stream is returned with ->mutex locked which prevents
+ * cpu_dead() from releasing this stream under us, however
+ * there is still a race window between raw_cpu_ptr() and
+ * mutex_lock(), during which we could have been migrated
+ * from a CPU that has already destroyed its stream. If
+ * so then unlock and re-try on the current CPU.
+ */
+ mutex_lock(&zstrm->lock);
+ if (likely(zstrm->buffer))
+ return zstrm;
+ mutex_unlock(&zstrm->lock);
+ }
}
-void zcomp_stream_put(struct zcomp *comp)
+void zcomp_stream_put(struct zcomp_strm *zstrm)
{
- local_unlock(&comp->stream->lock);
+ mutex_unlock(&zstrm->lock);
}
int zcomp_compress(struct zcomp *comp, struct zcomp_strm *zstrm,
@@ -129,6 +145,7 @@ int zcomp_compress(struct zcomp *comp, struct zcomp_strm *zstrm,
};
int ret;
+ might_sleep();
ret = comp->ops->compress(comp->params, &zstrm->ctx, &req);
if (!ret)
*dst_len = req.dst_len;
@@ -145,18 +162,16 @@ int zcomp_decompress(struct zcomp *comp, struct zcomp_strm *zstrm,
.dst_len = PAGE_SIZE,
};
+ might_sleep();
return comp->ops->decompress(comp->params, &zstrm->ctx, &req);
}
int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
{
struct zcomp *comp = hlist_entry(node, struct zcomp, node);
- struct zcomp_strm *zstrm;
+ struct zcomp_strm *zstrm = per_cpu_ptr(comp->stream, cpu);
int ret;
- zstrm = per_cpu_ptr(comp->stream, cpu);
- local_lock_init(&zstrm->lock);
-
ret = zcomp_strm_init(comp, zstrm);
if (ret)
pr_err("Can't allocate a compression stream\n");
@@ -166,16 +181,17 @@ int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
int zcomp_cpu_dead(unsigned int cpu, struct hlist_node *node)
{
struct zcomp *comp = hlist_entry(node, struct zcomp, node);
- struct zcomp_strm *zstrm;
+ struct zcomp_strm *zstrm = per_cpu_ptr(comp->stream, cpu);
- zstrm = per_cpu_ptr(comp->stream, cpu);
+ mutex_lock(&zstrm->lock);
zcomp_strm_free(comp, zstrm);
+ mutex_unlock(&zstrm->lock);
return 0;
}
static int zcomp_init(struct zcomp *comp, struct zcomp_params *params)
{
- int ret;
+ int ret, cpu;
comp->stream = alloc_percpu(struct zcomp_strm);
if (!comp->stream)
@@ -186,6 +202,9 @@ static int zcomp_init(struct zcomp *comp, struct zcomp_params *params)
if (ret)
goto cleanup;
+ for_each_possible_cpu(cpu)
+ mutex_init(&per_cpu_ptr(comp->stream, cpu)->lock);
+
ret = cpuhp_state_add_instance(CPUHP_ZCOMP_PREPARE, &comp->node);
if (ret < 0)
goto cleanup;
diff --git a/drivers/block/zram/zcomp.h b/drivers/block/zram/zcomp.h
index ad5762813842..eacfd3f7d61d 100644
--- a/drivers/block/zram/zcomp.h
+++ b/drivers/block/zram/zcomp.h
@@ -3,9 +3,13 @@
#ifndef _ZCOMP_H_
#define _ZCOMP_H_
-#include <linux/local_lock.h>
+#include <linux/mutex.h>
-#define ZCOMP_PARAM_NO_LEVEL INT_MIN
+#define ZCOMP_PARAM_NOT_SET INT_MIN
+
+struct deflate_params {
+ s32 winbits;
+};
/*
* Immutable driver (backend) parameters. The driver may attach private
@@ -17,6 +21,9 @@ struct zcomp_params {
void *dict;
size_t dict_sz;
s32 level;
+ union {
+ struct deflate_params deflate;
+ };
void *drv_data;
};
@@ -31,9 +38,11 @@ struct zcomp_ctx {
};
struct zcomp_strm {
- local_lock_t lock;
+ struct mutex lock;
/* compression buffer */
void *buffer;
+ /* local copy of handle memory */
+ void *local_copy;
struct zcomp_ctx ctx;
};
@@ -70,14 +79,14 @@ struct zcomp {
int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node);
int zcomp_cpu_dead(unsigned int cpu, struct hlist_node *node);
-ssize_t zcomp_available_show(const char *comp, char *buf);
+ssize_t zcomp_available_show(const char *comp, char *buf, ssize_t at);
bool zcomp_available_algorithm(const char *comp);
struct zcomp *zcomp_create(const char *alg, struct zcomp_params *params);
void zcomp_destroy(struct zcomp *comp);
struct zcomp_strm *zcomp_stream_get(struct zcomp *comp);
-void zcomp_stream_put(struct zcomp *comp);
+void zcomp_stream_put(struct zcomp_strm *zstrm);
int zcomp_compress(struct zcomp *comp, struct zcomp_strm *zstrm,
const void *src, unsigned int *dst_len);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index ad9c9bc3ccfc..a43074657531 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -44,6 +44,8 @@ static DEFINE_MUTEX(zram_index_mutex);
static int zram_major;
static const char *default_compressor = CONFIG_ZRAM_DEF_COMP;
+#define ZRAM_MAX_ALGO_NAME_SZ 128
+
/* Module params (documentation at end) */
static unsigned int num_devices = 1;
/*
@@ -55,22 +57,59 @@ static size_t huge_class_size;
static const struct block_device_operations zram_devops;
static void zram_free_page(struct zram *zram, size_t index);
-static int zram_read_page(struct zram *zram, struct page *page, u32 index,
- struct bio *parent);
+static int zram_read_from_zspool(struct zram *zram, struct page *page,
+ u32 index);
-static int zram_slot_trylock(struct zram *zram, u32 index)
+#define slot_dep_map(zram, index) (&(zram)->table[(index)].dep_map)
+
+static void zram_slot_lock_init(struct zram *zram, u32 index)
+{
+ static struct lock_class_key __key;
+
+ lockdep_init_map(slot_dep_map(zram, index), "zram->table[index].lock",
+ &__key, 0);
+}
+
+/*
+ * entry locking rules:
+ *
+ * 1) Lock is exclusive
+ *
+ * 2) lock() function can sleep waiting for the lock
+ *
+ * 3) Lock owner can sleep
+ *
+ * 4) Use TRY lock variant when in atomic context
+ * - must check return value and handle locking failers
+ */
+static __must_check bool zram_slot_trylock(struct zram *zram, u32 index)
{
- return spin_trylock(&zram->table[index].lock);
+ unsigned long *lock = &zram->table[index].flags;
+
+ if (!test_and_set_bit_lock(ZRAM_ENTRY_LOCK, lock)) {
+ mutex_acquire(slot_dep_map(zram, index), 0, 1, _RET_IP_);
+ lock_acquired(slot_dep_map(zram, index), _RET_IP_);
+ return true;
+ }
+
+ return false;
}
static void zram_slot_lock(struct zram *zram, u32 index)
{
- spin_lock(&zram->table[index].lock);
+ unsigned long *lock = &zram->table[index].flags;
+
+ mutex_acquire(slot_dep_map(zram, index), 0, 0, _RET_IP_);
+ wait_on_bit_lock(lock, ZRAM_ENTRY_LOCK, TASK_UNINTERRUPTIBLE);
+ lock_acquired(slot_dep_map(zram, index), _RET_IP_);
}
static void zram_slot_unlock(struct zram *zram, u32 index)
{
- spin_unlock(&zram->table[index].lock);
+ unsigned long *lock = &zram->table[index].flags;
+
+ mutex_release(slot_dep_map(zram, index), _RET_IP_);
+ clear_and_wake_up_bit(ZRAM_ENTRY_LOCK, lock);
}
static inline bool init_done(struct zram *zram)
@@ -93,7 +132,6 @@ static void zram_set_handle(struct zram *zram, u32 index, unsigned long handle)
zram->table[index].handle = handle;
}
-/* flag operations require table entry bit_spin_lock() being held */
static bool zram_test_flag(struct zram *zram, u32 index,
enum zram_pageflags flag)
{
@@ -112,17 +150,6 @@ static void zram_clear_flag(struct zram *zram, u32 index,
zram->table[index].flags &= ~BIT(flag);
}
-static inline void zram_set_element(struct zram *zram, u32 index,
- unsigned long element)
-{
- zram->table[index].element = element;
-}
-
-static unsigned long zram_get_element(struct zram *zram, u32 index)
-{
- return zram->table[index].element;
-}
-
static size_t zram_get_obj_size(struct zram *zram, u32 index)
{
return zram->table[index].flags & (BIT(ZRAM_FLAG_SHIFT) - 1);
@@ -143,6 +170,27 @@ static inline bool zram_allocated(struct zram *zram, u32 index)
zram_test_flag(zram, index, ZRAM_WB);
}
+static inline void update_used_max(struct zram *zram, const unsigned long pages)
+{
+ unsigned long cur_max = atomic_long_read(&zram->stats.max_used_pages);
+
+ do {
+ if (cur_max >= pages)
+ return;
+ } while (!atomic_long_try_cmpxchg(&zram->stats.max_used_pages,
+ &cur_max, pages));
+}
+
+static bool zram_can_store_page(struct zram *zram)
+{
+ unsigned long alloced_pages;
+
+ alloced_pages = zs_get_total_pages(zram->mem_pool);
+ update_used_max(zram, alloced_pages);
+
+ return !zram->limit_pages || alloced_pages <= zram->limit_pages;
+}
+
#if PAGE_SIZE != 4096
static inline bool is_partial_io(struct bio_vec *bvec)
{
@@ -178,23 +226,114 @@ static inline u32 zram_get_priority(struct zram *zram, u32 index)
static void zram_accessed(struct zram *zram, u32 index)
{
zram_clear_flag(zram, index, ZRAM_IDLE);
+ zram_clear_flag(zram, index, ZRAM_PP_SLOT);
#ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
zram->table[index].ac_time = ktime_get_boottime();
#endif
}
-static inline void update_used_max(struct zram *zram,
- const unsigned long pages)
+#if defined CONFIG_ZRAM_WRITEBACK || defined CONFIG_ZRAM_MULTI_COMP
+struct zram_pp_slot {
+ unsigned long index;
+ struct list_head entry;
+};
+
+/*
+ * A post-processing bucket is, essentially, a size class, this defines
+ * the range (in bytes) of pp-slots sizes in particular bucket.
+ */
+#define PP_BUCKET_SIZE_RANGE 64
+#define NUM_PP_BUCKETS ((PAGE_SIZE / PP_BUCKET_SIZE_RANGE) + 1)
+
+struct zram_pp_ctl {
+ struct list_head pp_buckets[NUM_PP_BUCKETS];
+};
+
+static struct zram_pp_ctl *init_pp_ctl(void)
{
- unsigned long cur_max = atomic_long_read(&zram->stats.max_used_pages);
+ struct zram_pp_ctl *ctl;
+ u32 idx;
- do {
- if (cur_max >= pages)
- return;
- } while (!atomic_long_try_cmpxchg(&zram->stats.max_used_pages,
- &cur_max, pages));
+ ctl = kmalloc(sizeof(*ctl), GFP_KERNEL);
+ if (!ctl)
+ return NULL;
+
+ for (idx = 0; idx < NUM_PP_BUCKETS; idx++)
+ INIT_LIST_HEAD(&ctl->pp_buckets[idx]);
+ return ctl;
}
+static void release_pp_slot(struct zram *zram, struct zram_pp_slot *pps)
+{
+ list_del_init(&pps->entry);
+
+ zram_slot_lock(zram, pps->index);
+ zram_clear_flag(zram, pps->index, ZRAM_PP_SLOT);
+ zram_slot_unlock(zram, pps->index);
+
+ kfree(pps);
+}
+
+static void release_pp_ctl(struct zram *zram, struct zram_pp_ctl *ctl)
+{
+ u32 idx;
+
+ if (!ctl)
+ return;
+
+ for (idx = 0; idx < NUM_PP_BUCKETS; idx++) {
+ while (!list_empty(&ctl->pp_buckets[idx])) {
+ struct zram_pp_slot *pps;
+
+ pps = list_first_entry(&ctl->pp_buckets[idx],
+ struct zram_pp_slot,
+ entry);
+ release_pp_slot(zram, pps);
+ }
+ }
+
+ kfree(ctl);
+}
+
+static bool place_pp_slot(struct zram *zram, struct zram_pp_ctl *ctl,
+ u32 index)
+{
+ struct zram_pp_slot *pps;
+ u32 bid;
+
+ pps = kmalloc(sizeof(*pps), GFP_NOIO | __GFP_NOWARN);
+ if (!pps)
+ return false;
+
+ INIT_LIST_HEAD(&pps->entry);
+ pps->index = index;
+
+ bid = zram_get_obj_size(zram, pps->index) / PP_BUCKET_SIZE_RANGE;
+ list_add(&pps->entry, &ctl->pp_buckets[bid]);
+
+ zram_set_flag(zram, pps->index, ZRAM_PP_SLOT);
+ return true;
+}
+
+static struct zram_pp_slot *select_pp_slot(struct zram_pp_ctl *ctl)
+{
+ struct zram_pp_slot *pps = NULL;
+ s32 idx = NUM_PP_BUCKETS - 1;
+
+ /* The higher the bucket id the more optimal slot post-processing is */
+ while (idx >= 0) {
+ pps = list_first_entry_or_null(&ctl->pp_buckets[idx],
+ struct zram_pp_slot,
+ entry);
+ if (pps)
+ break;
+
+ idx--;
+ }
+ return pps;
+}
+#endif
+
static inline void zram_fill_page(void *ptr, unsigned long len,
unsigned long value)
{
@@ -234,7 +373,7 @@ static ssize_t initstate_show(struct device *dev,
val = init_done(zram);
up_read(&zram->init_lock);
- return scnprintf(buf, PAGE_SIZE, "%u\n", val);
+ return sysfs_emit(buf, "%u\n", val);
}
static ssize_t disksize_show(struct device *dev,
@@ -242,7 +381,7 @@ static ssize_t disksize_show(struct device *dev,
{
struct zram *zram = dev_to_zram(dev);
- return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize);
+ return sysfs_emit(buf, "%llu\n", zram->disksize);
}
static ssize_t mem_limit_store(struct device *dev,
@@ -296,19 +435,28 @@ static void mark_idle(struct zram *zram, ktime_t cutoff)
for (index = 0; index < nr_pages; index++) {
/*
- * Do not mark ZRAM_UNDER_WB slot as ZRAM_IDLE to close race.
- * See the comment in writeback_store.
+ * Do not mark ZRAM_SAME slots as ZRAM_IDLE, because no
+ * post-processing (recompress, writeback) happens to the
+ * ZRAM_SAME slot.
+ *
+ * And ZRAM_WB slots simply cannot be ZRAM_IDLE.
*/
zram_slot_lock(zram, index);
- if (zram_allocated(zram, index) &&
- !zram_test_flag(zram, index, ZRAM_UNDER_WB)) {
+ if (!zram_allocated(zram, index) ||
+ zram_test_flag(zram, index, ZRAM_WB) ||
+ zram_test_flag(zram, index, ZRAM_SAME)) {
+ zram_slot_unlock(zram, index);
+ continue;
+ }
+
#ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
- is_idle = !cutoff || ktime_after(cutoff,
- zram->table[index].ac_time);
+ is_idle = !cutoff ||
+ ktime_after(cutoff, zram->table[index].ac_time);
#endif
- if (is_idle)
- zram_set_flag(zram, index, ZRAM_IDLE);
- }
+ if (is_idle)
+ zram_set_flag(zram, index, ZRAM_IDLE);
+ else
+ zram_clear_flag(zram, index, ZRAM_IDLE);
zram_slot_unlock(zram, index);
}
}
@@ -384,7 +532,7 @@ static ssize_t writeback_limit_enable_show(struct device *dev,
spin_unlock(&zram->wb_limit_lock);
up_read(&zram->init_lock);
- return scnprintf(buf, PAGE_SIZE, "%d\n", val);
+ return sysfs_emit(buf, "%d\n", val);
}
static ssize_t writeback_limit_store(struct device *dev,
@@ -419,7 +567,7 @@ static ssize_t writeback_limit_show(struct device *dev,
spin_unlock(&zram->wb_limit_lock);
up_read(&zram->init_lock);
- return scnprintf(buf, PAGE_SIZE, "%llu\n", val);
+ return sysfs_emit(buf, "%llu\n", val);
}
static void reset_bdev(struct zram *zram)
@@ -511,6 +659,12 @@ static ssize_t backing_dev_store(struct device *dev,
}
nr_pages = i_size_read(inode) >> PAGE_SHIFT;
+ /* Refuse to use zero sized device (also prevents self reference) */
+ if (!nr_pages) {
+ err = -EINVAL;
+ goto out;
+ }
+
bitmap_sz = BITS_TO_LONGS(nr_pages) * sizeof(long);
bitmap = kvzalloc(bitmap_sz, GFP_KERNEL);
if (!bitmap) {
@@ -580,64 +734,21 @@ static void read_from_bdev_async(struct zram *zram, struct page *page,
submit_bio(bio);
}
-#define PAGE_WB_SIG "page_index="
-
-#define PAGE_WRITEBACK 0
-#define HUGE_WRITEBACK (1<<0)
-#define IDLE_WRITEBACK (1<<1)
-#define INCOMPRESSIBLE_WRITEBACK (1<<2)
-
-static ssize_t writeback_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+static int zram_writeback_slots(struct zram *zram, struct zram_pp_ctl *ctl)
{
- struct zram *zram = dev_to_zram(dev);
- unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
- unsigned long index = 0;
- struct bio bio;
- struct bio_vec bio_vec;
- struct page *page;
- ssize_t ret = len;
- int mode, err;
unsigned long blk_idx = 0;
-
- if (sysfs_streq(buf, "idle"))
- mode = IDLE_WRITEBACK;
- else if (sysfs_streq(buf, "huge"))
- mode = HUGE_WRITEBACK;
- else if (sysfs_streq(buf, "huge_idle"))
- mode = IDLE_WRITEBACK | HUGE_WRITEBACK;
- else if (sysfs_streq(buf, "incompressible"))
- mode = INCOMPRESSIBLE_WRITEBACK;
- else {
- if (strncmp(buf, PAGE_WB_SIG, sizeof(PAGE_WB_SIG) - 1))
- return -EINVAL;
-
- if (kstrtol(buf + sizeof(PAGE_WB_SIG) - 1, 10, &index) ||
- index >= nr_pages)
- return -EINVAL;
-
- nr_pages = 1;
- mode = PAGE_WRITEBACK;
- }
-
- down_read(&zram->init_lock);
- if (!init_done(zram)) {
- ret = -EINVAL;
- goto release_init_lock;
- }
-
- if (!zram->backing_dev) {
- ret = -ENODEV;
- goto release_init_lock;
- }
+ struct page *page = NULL;
+ struct zram_pp_slot *pps;
+ struct bio_vec bio_vec;
+ struct bio bio;
+ int ret = 0, err;
+ u32 index;
page = alloc_page(GFP_KERNEL);
- if (!page) {
- ret = -ENOMEM;
- goto release_init_lock;
- }
+ if (!page)
+ return -ENOMEM;
- for (; nr_pages != 0; index++, nr_pages--) {
+ while ((pps = select_pp_slot(ctl))) {
spin_lock(&zram->wb_limit_lock);
if (zram->wb_limit_enable && !zram->bd_wb_limit) {
spin_unlock(&zram->wb_limit_lock);
@@ -654,40 +765,19 @@ static ssize_t writeback_store(struct device *dev,
}
}
+ index = pps->index;
zram_slot_lock(zram, index);
- if (!zram_allocated(zram, index))
- goto next;
-
- if (zram_test_flag(zram, index, ZRAM_WB) ||
- zram_test_flag(zram, index, ZRAM_SAME) ||
- zram_test_flag(zram, index, ZRAM_UNDER_WB))
- goto next;
-
- if (mode & IDLE_WRITEBACK &&
- !zram_test_flag(zram, index, ZRAM_IDLE))
- goto next;
- if (mode & HUGE_WRITEBACK &&
- !zram_test_flag(zram, index, ZRAM_HUGE))
- goto next;
- if (mode & INCOMPRESSIBLE_WRITEBACK &&
- !zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE))
- goto next;
-
/*
- * Clearing ZRAM_UNDER_WB is duty of caller.
- * IOW, zram_free_page never clear it.
+ * scan_slots() sets ZRAM_PP_SLOT and relases slot lock, so
+ * slots can change in the meantime. If slots are accessed or
+ * freed they lose ZRAM_PP_SLOT flag and hence we don't
+ * post-process them.
*/
- zram_set_flag(zram, index, ZRAM_UNDER_WB);
- /* Need for hugepage writeback racing */
- zram_set_flag(zram, index, ZRAM_IDLE);
+ if (!zram_test_flag(zram, index, ZRAM_PP_SLOT))
+ goto next;
+ if (zram_read_from_zspool(zram, page, index))
+ goto next;
zram_slot_unlock(zram, index);
- if (zram_read_page(zram, page, index, NULL)) {
- zram_slot_lock(zram, index);
- zram_clear_flag(zram, index, ZRAM_UNDER_WB);
- zram_clear_flag(zram, index, ZRAM_IDLE);
- zram_slot_unlock(zram, index);
- continue;
- }
bio_init(&bio, zram->bdev, &bio_vec, 1,
REQ_OP_WRITE | REQ_SYNC);
@@ -700,10 +790,7 @@ static ssize_t writeback_store(struct device *dev,
*/
err = submit_bio_wait(&bio);
if (err) {
- zram_slot_lock(zram, index);
- zram_clear_flag(zram, index, ZRAM_UNDER_WB);
- zram_clear_flag(zram, index, ZRAM_IDLE);
- zram_slot_unlock(zram, index);
+ release_pp_slot(zram, pps);
/*
* BIO errors are not fatal, we continue and simply
* attempt to writeback the remaining objects (pages).
@@ -717,27 +804,21 @@ static ssize_t writeback_store(struct device *dev,
}
atomic64_inc(&zram->stats.bd_writes);
+ zram_slot_lock(zram, index);
/*
- * We released zram_slot_lock so need to check if the slot was
- * changed. If there is freeing for the slot, we can catch it
- * easily by zram_allocated.
- * A subtle case is the slot is freed/reallocated/marked as
- * ZRAM_IDLE again. To close the race, idle_store doesn't
- * mark ZRAM_IDLE once it found the slot was ZRAM_UNDER_WB.
- * Thus, we could close the race by checking ZRAM_IDLE bit.
+ * Same as above, we release slot lock during writeback so
+ * slot can change under us: slot_free() or slot_free() and
+ * reallocation (zram_write_page()). In both cases slot loses
+ * ZRAM_PP_SLOT flag. No concurrent post-processing can set
+ * ZRAM_PP_SLOT on such slots until current post-processing
+ * finishes.
*/
- zram_slot_lock(zram, index);
- if (!zram_allocated(zram, index) ||
- !zram_test_flag(zram, index, ZRAM_IDLE)) {
- zram_clear_flag(zram, index, ZRAM_UNDER_WB);
- zram_clear_flag(zram, index, ZRAM_IDLE);
+ if (!zram_test_flag(zram, index, ZRAM_PP_SLOT))
goto next;
- }
zram_free_page(zram, index);
- zram_clear_flag(zram, index, ZRAM_UNDER_WB);
zram_set_flag(zram, index, ZRAM_WB);
- zram_set_element(zram, index, blk_idx);
+ zram_set_handle(zram, index, blk_idx);
blk_idx = 0;
atomic64_inc(&zram->stats.pages_stored);
spin_lock(&zram->wb_limit_lock);
@@ -746,12 +827,224 @@ static ssize_t writeback_store(struct device *dev,
spin_unlock(&zram->wb_limit_lock);
next:
zram_slot_unlock(zram, index);
+ release_pp_slot(zram, pps);
+
+ cond_resched();
}
if (blk_idx)
free_block_bdev(zram, blk_idx);
- __free_page(page);
+ if (page)
+ __free_page(page);
+
+ return ret;
+}
+
+#define PAGE_WRITEBACK 0
+#define HUGE_WRITEBACK (1 << 0)
+#define IDLE_WRITEBACK (1 << 1)
+#define INCOMPRESSIBLE_WRITEBACK (1 << 2)
+
+static int parse_page_index(char *val, unsigned long nr_pages,
+ unsigned long *lo, unsigned long *hi)
+{
+ int ret;
+
+ ret = kstrtoul(val, 10, lo);
+ if (ret)
+ return ret;
+ if (*lo >= nr_pages)
+ return -ERANGE;
+ *hi = *lo + 1;
+ return 0;
+}
+
+static int parse_page_indexes(char *val, unsigned long nr_pages,
+ unsigned long *lo, unsigned long *hi)
+{
+ char *delim;
+ int ret;
+
+ delim = strchr(val, '-');
+ if (!delim)
+ return -EINVAL;
+
+ *delim = 0x00;
+ ret = kstrtoul(val, 10, lo);
+ if (ret)
+ return ret;
+ if (*lo >= nr_pages)
+ return -ERANGE;
+
+ ret = kstrtoul(delim + 1, 10, hi);
+ if (ret)
+ return ret;
+ if (*hi >= nr_pages || *lo > *hi)
+ return -ERANGE;
+ *hi += 1;
+ return 0;
+}
+
+static int parse_mode(char *val, u32 *mode)
+{
+ *mode = 0;
+
+ if (!strcmp(val, "idle"))
+ *mode = IDLE_WRITEBACK;
+ if (!strcmp(val, "huge"))
+ *mode = HUGE_WRITEBACK;
+ if (!strcmp(val, "huge_idle"))
+ *mode = IDLE_WRITEBACK | HUGE_WRITEBACK;
+ if (!strcmp(val, "incompressible"))
+ *mode = INCOMPRESSIBLE_WRITEBACK;
+
+ if (*mode == 0)
+ return -EINVAL;
+ return 0;
+}
+
+static int scan_slots_for_writeback(struct zram *zram, u32 mode,
+ unsigned long lo, unsigned long hi,
+ struct zram_pp_ctl *ctl)
+{
+ u32 index = lo;
+
+ while (index < hi) {
+ bool ok = true;
+
+ zram_slot_lock(zram, index);
+ if (!zram_allocated(zram, index))
+ goto next;
+
+ if (zram_test_flag(zram, index, ZRAM_WB) ||
+ zram_test_flag(zram, index, ZRAM_SAME))
+ goto next;
+
+ if (mode & IDLE_WRITEBACK &&
+ !zram_test_flag(zram, index, ZRAM_IDLE))
+ goto next;
+ if (mode & HUGE_WRITEBACK &&
+ !zram_test_flag(zram, index, ZRAM_HUGE))
+ goto next;
+ if (mode & INCOMPRESSIBLE_WRITEBACK &&
+ !zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE))
+ goto next;
+
+ ok = place_pp_slot(zram, ctl, index);
+next:
+ zram_slot_unlock(zram, index);
+ if (!ok)
+ break;
+ index++;
+ }
+
+ return 0;
+}
+
+static ssize_t writeback_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct zram *zram = dev_to_zram(dev);
+ u64 nr_pages = zram->disksize >> PAGE_SHIFT;
+ unsigned long lo = 0, hi = nr_pages;
+ struct zram_pp_ctl *ctl = NULL;
+ char *args, *param, *val;
+ ssize_t ret = len;
+ int err, mode = 0;
+
+ down_read(&zram->init_lock);
+ if (!init_done(zram)) {
+ up_read(&zram->init_lock);
+ return -EINVAL;
+ }
+
+ /* Do not permit concurrent post-processing actions. */
+ if (atomic_xchg(&zram->pp_in_progress, 1)) {
+ up_read(&zram->init_lock);
+ return -EAGAIN;
+ }
+
+ if (!zram->backing_dev) {
+ ret = -ENODEV;
+ goto release_init_lock;
+ }
+
+ ctl = init_pp_ctl();
+ if (!ctl) {
+ ret = -ENOMEM;
+ goto release_init_lock;
+ }
+
+ args = skip_spaces(buf);
+ while (*args) {
+ args = next_arg(args, &param, &val);
+
+ /*
+ * Workaround to support the old writeback interface.
+ *
+ * The old writeback interface has a minor inconsistency and
+ * requires key=value only for page_index parameter, while the
+ * writeback mode is a valueless parameter.
+ *
+ * This is not the case anymore and now all parameters are
+ * required to have values, however, we need to support the
+ * legacy writeback interface format so we check if we can
+ * recognize a valueless parameter as the (legacy) writeback
+ * mode.
+ */
+ if (!val || !*val) {
+ err = parse_mode(param, &mode);
+ if (err) {
+ ret = err;
+ goto release_init_lock;
+ }
+
+ scan_slots_for_writeback(zram, mode, lo, hi, ctl);
+ break;
+ }
+
+ if (!strcmp(param, "type")) {
+ err = parse_mode(val, &mode);
+ if (err) {
+ ret = err;
+ goto release_init_lock;
+ }
+
+ scan_slots_for_writeback(zram, mode, lo, hi, ctl);
+ break;
+ }
+
+ if (!strcmp(param, "page_index")) {
+ err = parse_page_index(val, nr_pages, &lo, &hi);
+ if (err) {
+ ret = err;
+ goto release_init_lock;
+ }
+
+ scan_slots_for_writeback(zram, mode, lo, hi, ctl);
+ continue;
+ }
+
+ if (!strcmp(param, "page_indexes")) {
+ err = parse_page_indexes(val, nr_pages, &lo, &hi);
+ if (err) {
+ ret = err;
+ goto release_init_lock;
+ }
+
+ scan_slots_for_writeback(zram, mode, lo, hi, ctl);
+ continue;
+ }
+ }
+
+ err = zram_writeback_slots(zram, ctl);
+ if (err)
+ ret = err;
+
release_init_lock:
+ release_pp_ctl(zram, ctl);
+ atomic_set(&zram->pp_in_progress, 0);
up_read(&zram->init_lock);
return ret;
@@ -792,7 +1085,7 @@ static int read_from_bdev_sync(struct zram *zram, struct page *page,
work.entry = entry;
INIT_WORK_ONSTACK(&work.work, zram_sync_read);
- queue_work(system_unbound_wq, &work.work);
+ queue_work(system_dfl_wq, &work.work);
flush_work(&work.work);
destroy_work_on_stack(&work.work);
@@ -923,27 +1216,6 @@ static void zram_debugfs_register(struct zram *zram) {};
static void zram_debugfs_unregister(struct zram *zram) {};
#endif
-/*
- * We switched to per-cpu streams and this attr is not needed anymore.
- * However, we will keep it around for some time, because:
- * a) we may revert per-cpu streams in the future
- * b) it's visible to user space and we need to follow our 2 years
- * retirement rule; but we already have a number of 'soon to be
- * altered' attrs, so max_comp_streams need to wait for the next
- * layoff cycle.
- */
-static ssize_t max_comp_streams_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return scnprintf(buf, PAGE_SIZE, "%d\n", num_online_cpus());
-}
-
-static ssize_t max_comp_streams_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
-{
- return len;
-}
-
static void comp_algorithm_set(struct zram *zram, u32 prio, const char *alg)
{
/* Do not free statically defined compression algorithms */
@@ -953,24 +1225,13 @@ static void comp_algorithm_set(struct zram *zram, u32 prio, const char *alg)
zram->comp_algs[prio] = alg;
}
-static ssize_t __comp_algorithm_show(struct zram *zram, u32 prio, char *buf)
-{
- ssize_t sz;
-
- down_read(&zram->init_lock);
- sz = zcomp_available_show(zram->comp_algs[prio], buf);
- up_read(&zram->init_lock);
-
- return sz;
-}
-
static int __comp_algorithm_store(struct zram *zram, u32 prio, const char *buf)
{
char *compressor;
size_t sz;
sz = strlen(buf);
- if (sz >= CRYPTO_MAX_ALG_NAME)
+ if (sz >= ZRAM_MAX_ALGO_NAME_SZ)
return -E2BIG;
compressor = kstrdup(buf, GFP_KERNEL);
@@ -1004,13 +1265,15 @@ static void comp_params_reset(struct zram *zram, u32 prio)
struct zcomp_params *params = &zram->params[prio];
vfree(params->dict);
- params->level = ZCOMP_PARAM_NO_LEVEL;
+ params->level = ZCOMP_PARAM_NOT_SET;
+ params->deflate.winbits = ZCOMP_PARAM_NOT_SET;
params->dict_sz = 0;
params->dict = NULL;
}
static int comp_params_store(struct zram *zram, u32 prio, s32 level,
- const char *dict_path)
+ const char *dict_path,
+ struct deflate_params *deflate_params)
{
ssize_t sz = 0;
@@ -1028,6 +1291,7 @@ static int comp_params_store(struct zram *zram, u32 prio, s32 level,
zram->params[prio].dict_sz = sz;
zram->params[prio].level = level;
+ zram->params[prio].deflate.winbits = deflate_params->winbits;
return 0;
}
@@ -1036,11 +1300,14 @@ static ssize_t algorithm_params_store(struct device *dev,
const char *buf,
size_t len)
{
- s32 prio = ZRAM_PRIMARY_COMP, level = ZCOMP_PARAM_NO_LEVEL;
+ s32 prio = ZRAM_PRIMARY_COMP, level = ZCOMP_PARAM_NOT_SET;
char *args, *param, *val, *algo = NULL, *dict_path = NULL;
+ struct deflate_params deflate_params;
struct zram *zram = dev_to_zram(dev);
int ret;
+ deflate_params.winbits = ZCOMP_PARAM_NOT_SET;
+
args = skip_spaces(buf);
while (*args) {
args = next_arg(args, &param, &val);
@@ -1071,6 +1338,13 @@ static ssize_t algorithm_params_store(struct device *dev,
dict_path = val;
continue;
}
+
+ if (!strcmp(param, "deflate.winbits")) {
+ ret = kstrtoint(val, 10, &deflate_params.winbits);
+ if (ret)
+ return ret;
+ continue;
+ }
}
/* Lookup priority by algorithm name */
@@ -1092,7 +1366,7 @@ static ssize_t algorithm_params_store(struct device *dev,
if (prio < ZRAM_PRIMARY_COMP || prio >= ZRAM_MAX_COMPS)
return -EINVAL;
- ret = comp_params_store(zram, prio, level, dict_path);
+ ret = comp_params_store(zram, prio, level, dict_path, &deflate_params);
return ret ? ret : len;
}
@@ -1101,8 +1375,12 @@ static ssize_t comp_algorithm_show(struct device *dev,
char *buf)
{
struct zram *zram = dev_to_zram(dev);
+ ssize_t sz;
- return __comp_algorithm_show(zram, ZRAM_PRIMARY_COMP, buf);
+ down_read(&zram->init_lock);
+ sz = zcomp_available_show(zram->comp_algs[ZRAM_PRIMARY_COMP], buf, 0);
+ up_read(&zram->init_lock);
+ return sz;
}
static ssize_t comp_algorithm_store(struct device *dev,
@@ -1126,14 +1404,15 @@ static ssize_t recomp_algorithm_show(struct device *dev,
ssize_t sz = 0;
u32 prio;
+ down_read(&zram->init_lock);
for (prio = ZRAM_SECONDARY_COMP; prio < ZRAM_MAX_COMPS; prio++) {
if (!zram->comp_algs[prio])
continue;
- sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2, "#%d: ", prio);
- sz += __comp_algorithm_show(zram, prio, buf + sz);
+ sz += sysfs_emit_at(buf, sz, "#%d: ", prio);
+ sz += zcomp_available_show(zram->comp_algs[prio], buf, sz);
}
-
+ up_read(&zram->init_lock);
return sz;
}
@@ -1203,7 +1482,7 @@ static ssize_t io_stat_show(struct device *dev,
ssize_t ret;
down_read(&zram->init_lock);
- ret = scnprintf(buf, PAGE_SIZE,
+ ret = sysfs_emit(buf,
"%8llu %8llu 0 %8llu\n",
(u64)atomic64_read(&zram->stats.failed_reads),
(u64)atomic64_read(&zram->stats.failed_writes),
@@ -1233,7 +1512,7 @@ static ssize_t mm_stat_show(struct device *dev,
orig_size = atomic64_read(&zram->stats.pages_stored);
max_used = atomic_long_read(&zram->stats.max_used_pages);
- ret = scnprintf(buf, PAGE_SIZE,
+ ret = sysfs_emit(buf,
"%8llu %8llu %8llu %8lu %8ld %8llu %8lu %8llu %8llu\n",
orig_size << PAGE_SHIFT,
(u64)atomic64_read(&zram->stats.compr_data_size),
@@ -1258,8 +1537,8 @@ static ssize_t bd_stat_show(struct device *dev,
ssize_t ret;
down_read(&zram->init_lock);
- ret = scnprintf(buf, PAGE_SIZE,
- "%8llu %8llu %8llu\n",
+ ret = sysfs_emit(buf,
+ "%8llu %8llu %8llu\n",
FOUR_K((u64)atomic64_read(&zram->stats.bd_count)),
FOUR_K((u64)atomic64_read(&zram->stats.bd_reads)),
FOUR_K((u64)atomic64_read(&zram->stats.bd_writes)));
@@ -1277,10 +1556,9 @@ static ssize_t debug_stat_show(struct device *dev,
ssize_t ret;
down_read(&zram->init_lock);
- ret = scnprintf(buf, PAGE_SIZE,
- "version: %d\n%8llu %8llu\n",
+ ret = sysfs_emit(buf,
+ "version: %d\n0 %8llu\n",
version,
- (u64)atomic64_read(&zram->stats.writestall),
(u64)atomic64_read(&zram->stats.miss_free));
up_read(&zram->init_lock);
@@ -1299,12 +1577,16 @@ static void zram_meta_free(struct zram *zram, u64 disksize)
size_t num_pages = disksize >> PAGE_SHIFT;
size_t index;
+ if (!zram->table)
+ return;
+
/* Free all pages that are still in this zram device */
for (index = 0; index < num_pages; index++)
zram_free_page(zram, index);
zs_destroy_pool(zram->mem_pool);
vfree(zram->table);
+ zram->table = NULL;
}
static bool zram_meta_alloc(struct zram *zram, u64 disksize)
@@ -1319,6 +1601,7 @@ static bool zram_meta_alloc(struct zram *zram, u64 disksize)
zram->mem_pool = zs_create_pool(zram->disk->disk_name);
if (!zram->mem_pool) {
vfree(zram->table);
+ zram->table = NULL;
return false;
}
@@ -1326,15 +1609,11 @@ static bool zram_meta_alloc(struct zram *zram, u64 disksize)
huge_class_size = zs_huge_class_size(zram->mem_pool);
for (index = 0; index < num_pages; index++)
- spin_lock_init(&zram->table[index].lock);
+ zram_slot_lock_init(zram, index);
+
return true;
}
-/*
- * To protect concurrent access to the same index entry,
- * caller should hold this table index entry's bit_spinlock to
- * indicate this index entry is accessing.
- */
static void zram_free_page(struct zram *zram, size_t index)
{
unsigned long handle;
@@ -1342,22 +1621,20 @@ static void zram_free_page(struct zram *zram, size_t index)
#ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
zram->table[index].ac_time = 0;
#endif
- if (zram_test_flag(zram, index, ZRAM_IDLE))
- zram_clear_flag(zram, index, ZRAM_IDLE);
+
+ zram_clear_flag(zram, index, ZRAM_IDLE);
+ zram_clear_flag(zram, index, ZRAM_INCOMPRESSIBLE);
+ zram_clear_flag(zram, index, ZRAM_PP_SLOT);
+ zram_set_priority(zram, index, 0);
if (zram_test_flag(zram, index, ZRAM_HUGE)) {
zram_clear_flag(zram, index, ZRAM_HUGE);
atomic64_dec(&zram->stats.huge_pages);
}
- if (zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE))
- zram_clear_flag(zram, index, ZRAM_INCOMPRESSIBLE);
-
- zram_set_priority(zram, index, 0);
-
if (zram_test_flag(zram, index, ZRAM_WB)) {
zram_clear_flag(zram, index, ZRAM_WB);
- free_block_bdev(zram, zram_get_element(zram, index));
+ free_block_bdev(zram, zram_get_handle(zram, index));
goto out;
}
@@ -1378,65 +1655,80 @@ static void zram_free_page(struct zram *zram, size_t index)
zs_free(zram->mem_pool, handle);
atomic64_sub(zram_get_obj_size(zram, index),
- &zram->stats.compr_data_size);
+ &zram->stats.compr_data_size);
out:
atomic64_dec(&zram->stats.pages_stored);
zram_set_handle(zram, index, 0);
zram_set_obj_size(zram, index, 0);
- WARN_ON_ONCE(zram->table[index].flags &
- ~(1UL << ZRAM_UNDER_WB));
}
-/*
- * Reads (decompresses if needed) a page from zspool (zsmalloc).
- * Corresponding ZRAM slot should be locked.
- */
-static int zram_read_from_zspool(struct zram *zram, struct page *page,
+static int read_same_filled_page(struct zram *zram, struct page *page,
u32 index)
{
+ void *mem;
+
+ mem = kmap_local_page(page);
+ zram_fill_page(mem, PAGE_SIZE, zram_get_handle(zram, index));
+ kunmap_local(mem);
+ return 0;
+}
+
+static int read_incompressible_page(struct zram *zram, struct page *page,
+ u32 index)
+{
+ unsigned long handle;
+ void *src, *dst;
+
+ handle = zram_get_handle(zram, index);
+ src = zs_obj_read_begin(zram->mem_pool, handle, NULL);
+ dst = kmap_local_page(page);
+ copy_page(dst, src);
+ kunmap_local(dst);
+ zs_obj_read_end(zram->mem_pool, handle, src);
+
+ return 0;
+}
+
+static int read_compressed_page(struct zram *zram, struct page *page, u32 index)
+{
struct zcomp_strm *zstrm;
unsigned long handle;
unsigned int size;
void *src, *dst;
- u32 prio;
- int ret;
+ int ret, prio;
handle = zram_get_handle(zram, index);
- if (!handle || zram_test_flag(zram, index, ZRAM_SAME)) {
- unsigned long value;
- void *mem;
-
- value = handle ? zram_get_element(zram, index) : 0;
- mem = kmap_local_page(page);
- zram_fill_page(mem, PAGE_SIZE, value);
- kunmap_local(mem);
- return 0;
- }
-
size = zram_get_obj_size(zram, index);
+ prio = zram_get_priority(zram, index);
- if (size != PAGE_SIZE) {
- prio = zram_get_priority(zram, index);
- zstrm = zcomp_stream_get(zram->comps[prio]);
- }
+ zstrm = zcomp_stream_get(zram->comps[prio]);
+ src = zs_obj_read_begin(zram->mem_pool, handle, zstrm->local_copy);
+ dst = kmap_local_page(page);
+ ret = zcomp_decompress(zram->comps[prio], zstrm, src, size, dst);
+ kunmap_local(dst);
+ zs_obj_read_end(zram->mem_pool, handle, src);
+ zcomp_stream_put(zstrm);
- src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
- if (size == PAGE_SIZE) {
- dst = kmap_local_page(page);
- copy_page(dst, src);
- kunmap_local(dst);
- ret = 0;
- } else {
- dst = kmap_local_page(page);
- ret = zcomp_decompress(zram->comps[prio], zstrm,
- src, size, dst);
- kunmap_local(dst);
- zcomp_stream_put(zram->comps[prio]);
- }
- zs_unmap_object(zram->mem_pool, handle);
return ret;
}
+/*
+ * Reads (decompresses if needed) a page from zspool (zsmalloc).
+ * Corresponding ZRAM slot should be locked.
+ */
+static int zram_read_from_zspool(struct zram *zram, struct page *page,
+ u32 index)
+{
+ if (zram_test_flag(zram, index, ZRAM_SAME) ||
+ !zram_get_handle(zram, index))
+ return read_same_filled_page(zram, page, index);
+
+ if (!zram_test_flag(zram, index, ZRAM_HUGE))
+ return read_compressed_page(zram, page, index);
+ else
+ return read_incompressible_page(zram, page, index);
+}
+
static int zram_read_page(struct zram *zram, struct page *page, u32 index,
struct bio *parent)
{
@@ -1454,7 +1746,7 @@ static int zram_read_page(struct zram *zram, struct page *page, u32 index,
*/
zram_slot_unlock(zram, index);
- ret = read_from_bdev(zram, page, zram_get_element(zram, index),
+ ret = read_from_bdev(zram, page, zram_get_handle(zram, index),
parent);
}
@@ -1492,129 +1784,122 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
return zram_read_page(zram, bvec->bv_page, index, bio);
}
+static int write_same_filled_page(struct zram *zram, unsigned long fill,
+ u32 index)
+{
+ zram_slot_lock(zram, index);
+ zram_free_page(zram, index);
+ zram_set_flag(zram, index, ZRAM_SAME);
+ zram_set_handle(zram, index, fill);
+ zram_slot_unlock(zram, index);
+
+ atomic64_inc(&zram->stats.same_pages);
+ atomic64_inc(&zram->stats.pages_stored);
+
+ return 0;
+}
+
+static int write_incompressible_page(struct zram *zram, struct page *page,
+ u32 index)
+{
+ unsigned long handle;
+ void *src;
+
+ /*
+ * This function is called from preemptible context so we don't need
+ * to do optimistic and fallback to pessimistic handle allocation,
+ * like we do for compressible pages.
+ */
+ handle = zs_malloc(zram->mem_pool, PAGE_SIZE,
+ GFP_NOIO | __GFP_NOWARN |
+ __GFP_HIGHMEM | __GFP_MOVABLE, page_to_nid(page));
+ if (IS_ERR_VALUE(handle))
+ return PTR_ERR((void *)handle);
+
+ if (!zram_can_store_page(zram)) {
+ zs_free(zram->mem_pool, handle);
+ return -ENOMEM;
+ }
+
+ src = kmap_local_page(page);
+ zs_obj_write(zram->mem_pool, handle, src, PAGE_SIZE);
+ kunmap_local(src);
+
+ zram_slot_lock(zram, index);
+ zram_free_page(zram, index);
+ zram_set_flag(zram, index, ZRAM_HUGE);
+ zram_set_handle(zram, index, handle);
+ zram_set_obj_size(zram, index, PAGE_SIZE);
+ zram_slot_unlock(zram, index);
+
+ atomic64_add(PAGE_SIZE, &zram->stats.compr_data_size);
+ atomic64_inc(&zram->stats.huge_pages);
+ atomic64_inc(&zram->stats.huge_pages_since);
+ atomic64_inc(&zram->stats.pages_stored);
+
+ return 0;
+}
+
static int zram_write_page(struct zram *zram, struct page *page, u32 index)
{
int ret = 0;
- unsigned long alloced_pages;
- unsigned long handle = -ENOMEM;
- unsigned int comp_len = 0;
- void *src, *dst, *mem;
+ unsigned long handle;
+ unsigned int comp_len;
+ void *mem;
struct zcomp_strm *zstrm;
- unsigned long element = 0;
- enum zram_pageflags flags = 0;
+ unsigned long element;
+ bool same_filled;
mem = kmap_local_page(page);
- if (page_same_filled(mem, &element)) {
- kunmap_local(mem);
- /* Free memory associated with this sector now. */
- flags = ZRAM_SAME;
- atomic64_inc(&zram->stats.same_pages);
- goto out;
- }
+ same_filled = page_same_filled(mem, &element);
kunmap_local(mem);
+ if (same_filled)
+ return write_same_filled_page(zram, element, index);
-compress_again:
zstrm = zcomp_stream_get(zram->comps[ZRAM_PRIMARY_COMP]);
- src = kmap_local_page(page);
+ mem = kmap_local_page(page);
ret = zcomp_compress(zram->comps[ZRAM_PRIMARY_COMP], zstrm,
- src, &comp_len);
- kunmap_local(src);
+ mem, &comp_len);
+ kunmap_local(mem);
if (unlikely(ret)) {
- zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
+ zcomp_stream_put(zstrm);
pr_err("Compression failed! err=%d\n", ret);
- zs_free(zram->mem_pool, handle);
return ret;
}
- if (comp_len >= huge_class_size)
- comp_len = PAGE_SIZE;
- /*
- * handle allocation has 2 paths:
- * a) fast path is executed with preemption disabled (for
- * per-cpu streams) and has __GFP_DIRECT_RECLAIM bit clear,
- * since we can't sleep;
- * b) slow path enables preemption and attempts to allocate
- * the page with __GFP_DIRECT_RECLAIM bit set. we have to
- * put per-cpu compression stream and, thus, to re-do
- * the compression once handle is allocated.
- *
- * if we have a 'non-null' handle here then we are coming
- * from the slow path and handle has already been allocated.
- */
- if (IS_ERR_VALUE(handle))
- handle = zs_malloc(zram->mem_pool, comp_len,
- __GFP_KSWAPD_RECLAIM |
- __GFP_NOWARN |
- __GFP_HIGHMEM |
- __GFP_MOVABLE);
- if (IS_ERR_VALUE(handle)) {
- zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
- atomic64_inc(&zram->stats.writestall);
- handle = zs_malloc(zram->mem_pool, comp_len,
- GFP_NOIO | __GFP_HIGHMEM |
- __GFP_MOVABLE);
- if (IS_ERR_VALUE(handle))
- return PTR_ERR((void *)handle);
-
- if (comp_len != PAGE_SIZE)
- goto compress_again;
- /*
- * If the page is not compressible, you need to acquire the
- * lock and execute the code below. The zcomp_stream_get()
- * call is needed to disable the cpu hotplug and grab the
- * zstrm buffer back. It is necessary that the dereferencing
- * of the zstrm variable below occurs correctly.
- */
- zstrm = zcomp_stream_get(zram->comps[ZRAM_PRIMARY_COMP]);
+ if (comp_len >= huge_class_size) {
+ zcomp_stream_put(zstrm);
+ return write_incompressible_page(zram, page, index);
}
- alloced_pages = zs_get_total_pages(zram->mem_pool);
- update_used_max(zram, alloced_pages);
+ handle = zs_malloc(zram->mem_pool, comp_len,
+ GFP_NOIO | __GFP_NOWARN |
+ __GFP_HIGHMEM | __GFP_MOVABLE, page_to_nid(page));
+ if (IS_ERR_VALUE(handle)) {
+ zcomp_stream_put(zstrm);
+ return PTR_ERR((void *)handle);
+ }
- if (zram->limit_pages && alloced_pages > zram->limit_pages) {
- zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
+ if (!zram_can_store_page(zram)) {
+ zcomp_stream_put(zstrm);
zs_free(zram->mem_pool, handle);
return -ENOMEM;
}
- dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
+ zs_obj_write(zram->mem_pool, handle, zstrm->buffer, comp_len);
+ zcomp_stream_put(zstrm);
- src = zstrm->buffer;
- if (comp_len == PAGE_SIZE)
- src = kmap_local_page(page);
- memcpy(dst, src, comp_len);
- if (comp_len == PAGE_SIZE)
- kunmap_local(src);
-
- zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
- zs_unmap_object(zram->mem_pool, handle);
- atomic64_add(comp_len, &zram->stats.compr_data_size);
-out:
- /*
- * Free memory associated with this sector
- * before overwriting unused sectors.
- */
zram_slot_lock(zram, index);
zram_free_page(zram, index);
-
- if (comp_len == PAGE_SIZE) {
- zram_set_flag(zram, index, ZRAM_HUGE);
- atomic64_inc(&zram->stats.huge_pages);
- atomic64_inc(&zram->stats.huge_pages_since);
- }
-
- if (flags) {
- zram_set_flag(zram, index, flags);
- zram_set_element(zram, index, element);
- } else {
- zram_set_handle(zram, index, handle);
- zram_set_obj_size(zram, index, comp_len);
- }
+ zram_set_handle(zram, index, handle);
+ zram_set_obj_size(zram, index, comp_len);
zram_slot_unlock(zram, index);
/* Update stats */
atomic64_inc(&zram->stats.pages_stored);
+ atomic64_add(comp_len, &zram->stats.compr_data_size);
+
return ret;
}
@@ -1648,6 +1933,49 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
}
#ifdef CONFIG_ZRAM_MULTI_COMP
+#define RECOMPRESS_IDLE (1 << 0)
+#define RECOMPRESS_HUGE (1 << 1)
+
+static int scan_slots_for_recompress(struct zram *zram, u32 mode, u32 prio_max,
+ struct zram_pp_ctl *ctl)
+{
+ unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
+ unsigned long index;
+
+ for (index = 0; index < nr_pages; index++) {
+ bool ok = true;
+
+ zram_slot_lock(zram, index);
+ if (!zram_allocated(zram, index))
+ goto next;
+
+ if (mode & RECOMPRESS_IDLE &&
+ !zram_test_flag(zram, index, ZRAM_IDLE))
+ goto next;
+
+ if (mode & RECOMPRESS_HUGE &&
+ !zram_test_flag(zram, index, ZRAM_HUGE))
+ goto next;
+
+ if (zram_test_flag(zram, index, ZRAM_WB) ||
+ zram_test_flag(zram, index, ZRAM_SAME) ||
+ zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE))
+ goto next;
+
+ /* Already compressed with same of higher priority */
+ if (zram_get_priority(zram, index) + 1 >= prio_max)
+ goto next;
+
+ ok = place_pp_slot(zram, ctl, index);
+next:
+ zram_slot_unlock(zram, index);
+ if (!ok)
+ break;
+ }
+
+ return 0;
+}
+
/*
* This function will decompress (unless it's ZRAM_HUGE) the page and then
* attempt to compress it using provided compression algorithm priority
@@ -1655,7 +1983,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
*
* Corresponding ZRAM slot should be locked.
*/
-static int zram_recompress(struct zram *zram, u32 index, struct page *page,
+static int recompress_slot(struct zram *zram, u32 index, struct page *page,
u64 *num_recomp_pages, u32 threshold, u32 prio,
u32 prio_max)
{
@@ -1666,9 +1994,8 @@ static int zram_recompress(struct zram *zram, u32 index, struct page *page,
unsigned int comp_len_new;
unsigned int class_index_old;
unsigned int class_index_new;
- u32 num_recomps = 0;
- void *src, *dst;
- int ret;
+ void *src;
+ int ret = 0;
handle_old = zram_get_handle(zram, index);
if (!handle_old)
@@ -1685,7 +2012,24 @@ static int zram_recompress(struct zram *zram, u32 index, struct page *page,
if (ret)
return ret;
+ /*
+ * We touched this entry so mark it as non-IDLE. This makes sure that
+ * we don't preserve IDLE flag and don't incorrectly pick this entry
+ * for different post-processing type (e.g. writeback).
+ */
+ zram_clear_flag(zram, index, ZRAM_IDLE);
+
class_index_old = zs_lookup_class_index(zram->mem_pool, comp_len_old);
+
+ prio = max(prio, zram_get_priority(zram, index) + 1);
+ /*
+ * Recompression slots scan should not select slots that are
+ * already compressed with a higher priority algorithm, but
+ * just in case
+ */
+ if (prio >= prio_max)
+ return 0;
+
/*
* Iterate the secondary comp algorithms list (in order of priority)
* and try to recompress the page.
@@ -1694,14 +2038,6 @@ static int zram_recompress(struct zram *zram, u32 index, struct page *page,
if (!zram->comps[prio])
continue;
- /*
- * Skip if the object is already re-compressed with a higher
- * priority algorithm (or same algorithm).
- */
- if (prio <= zram_get_priority(zram, index))
- continue;
-
- num_recomps++;
zstrm = zcomp_stream_get(zram->comps[prio]);
src = kmap_local_page(page);
ret = zcomp_compress(zram->comps[prio], zstrm,
@@ -1709,8 +2045,9 @@ static int zram_recompress(struct zram *zram, u32 index, struct page *page,
kunmap_local(src);
if (ret) {
- zcomp_stream_put(zram->comps[prio]);
- return ret;
+ zcomp_stream_put(zstrm);
+ zstrm = NULL;
+ break;
}
class_index_new = zs_lookup_class_index(zram->mem_pool,
@@ -1719,7 +2056,8 @@ static int zram_recompress(struct zram *zram, u32 index, struct page *page,
/* Continue until we make progress */
if (class_index_new >= class_index_old ||
(threshold && comp_len_new >= threshold)) {
- zcomp_stream_put(zram->comps[prio]);
+ zcomp_stream_put(zstrm);
+ zstrm = NULL;
continue;
}
@@ -1728,14 +2066,6 @@ static int zram_recompress(struct zram *zram, u32 index, struct page *page,
}
/*
- * We did not try to recompress, e.g. when we have only one
- * secondary algorithm and the page is already recompressed
- * using that algorithm
- */
- if (!zstrm)
- return 0;
-
- /*
* Decrement the limit (if set) on pages we can recompress, even
* when current recompression was unsuccessful or did not compress
* the page below the threshold, because we still spent resources
@@ -1744,48 +2074,44 @@ static int zram_recompress(struct zram *zram, u32 index, struct page *page,
if (*num_recomp_pages)
*num_recomp_pages -= 1;
- if (class_index_new >= class_index_old) {
+ /* Compression error */
+ if (ret)
+ return ret;
+
+ if (!zstrm) {
/*
* Secondary algorithms failed to re-compress the page
- * in a way that would save memory, mark the object as
- * incompressible so that we will not try to compress
- * it again.
+ * in a way that would save memory.
*
- * We need to make sure that all secondary algorithms have
- * failed, so we test if the number of recompressions matches
- * the number of active secondary algorithms.
+ * Mark the object incompressible if the max-priority
+ * algorithm couldn't re-compress it.
*/
- if (num_recomps == zram->num_active_comps - 1)
- zram_set_flag(zram, index, ZRAM_INCOMPRESSIBLE);
+ if (prio < zram->num_active_comps)
+ return 0;
+ zram_set_flag(zram, index, ZRAM_INCOMPRESSIBLE);
return 0;
}
- /* Successful recompression but above threshold */
- if (threshold && comp_len_new >= threshold)
- return 0;
-
/*
- * No direct reclaim (slow path) for handle allocation and no
- * re-compression attempt (unlike in zram_write_bvec()) since
- * we already have stored that object in zsmalloc. If we cannot
- * alloc memory for recompressed object then we bail out and
- * simply keep the old (existing) object in zsmalloc.
+ * We are holding per-CPU stream mutex and entry lock so better
+ * avoid direct reclaim. Allocation error is not fatal since
+ * we still have the old object in the mem_pool.
+ *
+ * XXX: technically, the node we really want here is the node that holds
+ * the original compressed data. But that would require us to modify
+ * zsmalloc API to return this information. For now, we will make do with
+ * the node of the page allocated for recompression.
*/
handle_new = zs_malloc(zram->mem_pool, comp_len_new,
- __GFP_KSWAPD_RECLAIM |
- __GFP_NOWARN |
- __GFP_HIGHMEM |
- __GFP_MOVABLE);
+ GFP_NOIO | __GFP_NOWARN |
+ __GFP_HIGHMEM | __GFP_MOVABLE, page_to_nid(page));
if (IS_ERR_VALUE(handle_new)) {
- zcomp_stream_put(zram->comps[prio]);
+ zcomp_stream_put(zstrm);
return PTR_ERR((void *)handle_new);
}
- dst = zs_map_object(zram->mem_pool, handle_new, ZS_MM_WO);
- memcpy(dst, zstrm->buffer, comp_len_new);
- zcomp_stream_put(zram->comps[prio]);
-
- zs_unmap_object(zram->mem_pool, handle_new);
+ zs_obj_write(zram->mem_pool, handle_new, zstrm->buffer, comp_len_new);
+ zcomp_stream_put(zstrm);
zram_free_page(zram, index);
zram_set_handle(zram, index, handle_new);
@@ -1798,23 +2124,23 @@ static int zram_recompress(struct zram *zram, u32 index, struct page *page,
return 0;
}
-#define RECOMPRESS_IDLE (1 << 0)
-#define RECOMPRESS_HUGE (1 << 1)
-
static ssize_t recompress_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
- u32 prio = ZRAM_SECONDARY_COMP, prio_max = ZRAM_MAX_COMPS;
struct zram *zram = dev_to_zram(dev);
- unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
char *args, *param, *val, *algo = NULL;
u64 num_recomp_pages = ULLONG_MAX;
+ struct zram_pp_ctl *ctl = NULL;
+ struct zram_pp_slot *pps;
u32 mode = 0, threshold = 0;
- unsigned long index;
- struct page *page;
+ u32 prio, prio_max;
+ struct page *page = NULL;
ssize_t ret;
+ prio = ZRAM_SECONDARY_COMP;
+ prio_max = zram->num_active_comps;
+
args = skip_spaces(buf);
while (*args) {
args = next_arg(args, &param, &val);
@@ -1867,7 +2193,7 @@ static ssize_t recompress_store(struct device *dev,
if (prio == ZRAM_PRIMARY_COMP)
prio = ZRAM_SECONDARY_COMP;
- prio_max = min(prio + 1, ZRAM_MAX_COMPS);
+ prio_max = prio + 1;
continue;
}
}
@@ -1881,6 +2207,12 @@ static ssize_t recompress_store(struct device *dev,
goto release_init_lock;
}
+ /* Do not permit concurrent post-processing actions. */
+ if (atomic_xchg(&zram->pp_in_progress, 1)) {
+ up_read(&zram->init_lock);
+ return -EAGAIN;
+ }
+
if (algo) {
bool found = false;
@@ -1889,7 +2221,7 @@ static ssize_t recompress_store(struct device *dev,
continue;
if (!strcmp(zram->comp_algs[prio], algo)) {
- prio_max = min(prio + 1, ZRAM_MAX_COMPS);
+ prio_max = prio + 1;
found = true;
break;
}
@@ -1901,42 +2233,44 @@ static ssize_t recompress_store(struct device *dev,
}
}
+ prio_max = min(prio_max, (u32)zram->num_active_comps);
+ if (prio >= prio_max) {
+ ret = -EINVAL;
+ goto release_init_lock;
+ }
+
page = alloc_page(GFP_KERNEL);
if (!page) {
ret = -ENOMEM;
goto release_init_lock;
}
+ ctl = init_pp_ctl();
+ if (!ctl) {
+ ret = -ENOMEM;
+ goto release_init_lock;
+ }
+
+ scan_slots_for_recompress(zram, mode, prio_max, ctl);
+
ret = len;
- for (index = 0; index < nr_pages; index++) {
+ while ((pps = select_pp_slot(ctl))) {
int err = 0;
if (!num_recomp_pages)
break;
- zram_slot_lock(zram, index);
-
- if (!zram_allocated(zram, index))
- goto next;
-
- if (mode & RECOMPRESS_IDLE &&
- !zram_test_flag(zram, index, ZRAM_IDLE))
- goto next;
-
- if (mode & RECOMPRESS_HUGE &&
- !zram_test_flag(zram, index, ZRAM_HUGE))
- goto next;
-
- if (zram_test_flag(zram, index, ZRAM_WB) ||
- zram_test_flag(zram, index, ZRAM_UNDER_WB) ||
- zram_test_flag(zram, index, ZRAM_SAME) ||
- zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE))
+ zram_slot_lock(zram, pps->index);
+ if (!zram_test_flag(zram, pps->index, ZRAM_PP_SLOT))
goto next;
- err = zram_recompress(zram, index, page, &num_recomp_pages,
- threshold, prio, prio_max);
+ err = recompress_slot(zram, pps->index, page,
+ &num_recomp_pages, threshold,
+ prio, prio_max);
next:
- zram_slot_unlock(zram, index);
+ zram_slot_unlock(zram, pps->index);
+ release_pp_slot(zram, pps);
+
if (err) {
ret = err;
break;
@@ -1945,9 +2279,11 @@ next:
cond_resched();
}
- __free_page(page);
-
release_init_lock:
+ if (page)
+ __free_page(page);
+ release_pp_ctl(zram, ctl);
+ atomic_set(&zram->pp_in_progress, 0);
up_read(&zram->init_lock);
return ret;
}
@@ -2105,7 +2441,7 @@ static void zram_destroy_comps(struct zram *zram)
{
u32 prio;
- for (prio = 0; prio < ZRAM_MAX_COMPS; prio++) {
+ for (prio = ZRAM_PRIMARY_COMP; prio < ZRAM_MAX_COMPS; prio++) {
struct zcomp *comp = zram->comps[prio];
zram->comps[prio] = NULL;
@@ -2131,11 +2467,6 @@ static void zram_reset_device(struct zram *zram)
zram->limit_pages = 0;
- if (!init_done(zram)) {
- up_write(&zram->init_lock);
- return;
- }
-
set_capacity_and_notify(zram->disk, 0);
part_stat_set_all(zram->disk->part0, 0);
@@ -2144,6 +2475,7 @@ static void zram_reset_device(struct zram *zram)
zram->disksize = 0;
zram_destroy_comps(zram);
memset(&zram->stats, 0, sizeof(zram->stats));
+ atomic_set(&zram->pp_in_progress, 0);
reset_bdev(zram);
comp_algorithm_set(zram, ZRAM_PRIMARY_COMP, default_compressor);
@@ -2176,7 +2508,7 @@ static ssize_t disksize_store(struct device *dev,
goto out_unlock;
}
- for (prio = 0; prio < ZRAM_MAX_COMPS; prio++) {
+ for (prio = ZRAM_PRIMARY_COMP; prio < ZRAM_MAX_COMPS; prio++) {
if (!zram->comp_algs[prio])
continue;
@@ -2272,7 +2604,6 @@ static DEVICE_ATTR_WO(reset);
static DEVICE_ATTR_WO(mem_limit);
static DEVICE_ATTR_WO(mem_used_max);
static DEVICE_ATTR_WO(idle);
-static DEVICE_ATTR_RW(max_comp_streams);
static DEVICE_ATTR_RW(comp_algorithm);
#ifdef CONFIG_ZRAM_WRITEBACK
static DEVICE_ATTR_RW(backing_dev);
@@ -2294,7 +2625,6 @@ static struct attribute *zram_disk_attrs[] = {
&dev_attr_mem_limit.attr,
&dev_attr_mem_used_max.attr,
&dev_attr_idle.attr,
- &dev_attr_max_comp_streams.attr,
&dev_attr_comp_algorithm.attr,
#ifdef CONFIG_ZRAM_WRITEBACK
&dev_attr_backing_dev.attr,
@@ -2381,6 +2711,9 @@ static int zram_add(void)
zram->disk->fops = &zram_devops;
zram->disk->private_data = zram;
snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
+ atomic_set(&zram->pp_in_progress, 0);
+ zram_comp_params_reset(zram);
+ comp_algorithm_set(zram, ZRAM_PRIMARY_COMP, default_compressor);
/* Actual capacity set using sysfs (/sys/block/zram<id>/disksize */
set_capacity(zram->disk, 0);
@@ -2388,9 +2721,6 @@ static int zram_add(void)
if (ret)
goto out_cleanup_disk;
- zram_comp_params_reset(zram);
- comp_algorithm_set(zram, ZRAM_PRIMARY_COMP, default_compressor);
-
zram_debugfs_register(zram);
pr_info("Added device: %s\n", zram->disk->disk_name);
return device_id;
@@ -2472,7 +2802,7 @@ static ssize_t hot_add_show(const struct class *class,
if (ret < 0)
return ret;
- return scnprintf(buf, PAGE_SIZE, "%d\n", ret);
+ return sysfs_emit(buf, "%d\n", ret);
}
/* This attribute must be set to 0400, so CLASS_ATTR_RO() can not be used */
static struct class_attribute class_attr_hot_add =
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index cfc8c059db63..6cee93f9c0d0 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -17,7 +17,6 @@
#include <linux/rwsem.h>
#include <linux/zsmalloc.h>
-#include <linux/crypto.h>
#include "zcomp.h"
@@ -28,7 +27,6 @@
#define ZRAM_SECTOR_PER_LOGICAL_BLOCK \
(1 << (ZRAM_LOGICAL_BLOCK_SHIFT - SECTOR_SHIFT))
-
/*
* ZRAM is mainly used for memory efficiency so we want to keep memory
* footprint small and thus squeeze size and zram pageflags into a flags
@@ -46,8 +44,9 @@
/* Flags for zram pages (table[page_no].flags) */
enum zram_pageflags {
ZRAM_SAME = ZRAM_FLAG_SHIFT, /* Page consists the same element */
+ ZRAM_ENTRY_LOCK, /* entry access lock bit */
ZRAM_WB, /* page is stored on backing_device */
- ZRAM_UNDER_WB, /* page is under writeback */
+ ZRAM_PP_SLOT, /* Selected for post-processing */
ZRAM_HUGE, /* Incompressible page */
ZRAM_IDLE, /* not accessed page since last idle marking */
ZRAM_INCOMPRESSIBLE, /* none of the algorithms could compress it */
@@ -58,19 +57,19 @@ enum zram_pageflags {
__NR_ZRAM_PAGEFLAGS,
};
-/*-- Data structures */
-
-/* Allocated for each disk page */
+/*
+ * Allocated for each disk page. We use bit-lock (ZRAM_ENTRY_LOCK bit
+ * of flags) to save memory. There can be plenty of entries and standard
+ * locking primitives (e.g. mutex) will significantly increase sizeof()
+ * of each entry and hence of the meta table.
+ */
struct zram_table_entry {
- union {
- unsigned long handle;
- unsigned long element;
- };
- unsigned int flags;
- spinlock_t lock;
+ unsigned long handle;
+ unsigned long flags;
#ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
ktime_t ac_time;
#endif
+ struct lockdep_map dep_map;
};
struct zram_stats {
@@ -83,7 +82,6 @@ struct zram_stats {
atomic64_t huge_pages_since; /* no. of huge pages since zram set up */
atomic64_t pages_stored; /* no. of pages currently stored */
atomic_long_t max_used_pages; /* no. of maximum pages stored */
- atomic64_t writestall; /* no. of write slow paths */
atomic64_t miss_free; /* no. of missed free */
#ifdef CONFIG_ZRAM_WRITEBACK
atomic64_t bd_count; /* no. of pages in backing device */
@@ -139,5 +137,6 @@ struct zram {
#ifdef CONFIG_ZRAM_MEMORY_TRACKING
struct dentry *debugfs_dir;
#endif
+ atomic_t pp_in_progress;
};
#endif
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 18767b54df35..7df69ccb6600 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -312,7 +312,9 @@ config BT_HCIBCM4377
config BT_HCIBPA10X
tristate "HCI BPA10x USB driver"
+ depends on BT_HCIUART
depends on USB
+ select BT_HCIUART_H4
help
Bluetooth HCI BPA10x USB driver.
This driver provides support for the Digianswer BPA 100/105 Bluetooth
@@ -336,7 +338,7 @@ config BT_HCIBFUSB
config BT_HCIDTL1
tristate "HCI DTL1 (PC Card) driver"
- depends on PCMCIA
+ depends on PCMCIA && HAS_IOPORT
help
Bluetooth HCI DTL1 (PC Card) driver.
This driver provides support for Bluetooth PCMCIA devices with
@@ -349,7 +351,7 @@ config BT_HCIDTL1
config BT_HCIBT3C
tristate "HCI BT3C (PC Card) driver"
- depends on PCMCIA
+ depends on PCMCIA && HAS_IOPORT
select FW_LOADER
help
Bluetooth HCI BT3C (PC Card) driver.
@@ -363,7 +365,7 @@ config BT_HCIBT3C
config BT_HCIBLUECARD
tristate "HCI BlueCard (PC Card) driver"
- depends on PCMCIA
+ depends on PCMCIA && HAS_IOPORT
help
Bluetooth HCI BlueCard (PC Card) driver.
This driver provides support for Bluetooth PCMCIA devices with
@@ -437,8 +439,10 @@ config BT_MTKSDIO
config BT_MTKUART
tristate "MediaTek HCI UART driver"
+ depends on BT_HCIUART
depends on SERIAL_DEV_BUS
depends on USB || !BT_HCIBTUSB_MTK
+ select BT_HCIUART_H4
select BT_MTK
help
MediaTek Bluetooth HCI UART driver.
@@ -483,7 +487,9 @@ config BT_VIRTIO
config BT_NXPUART
tristate "NXP protocol support"
+ depends on BT_HCIUART
depends on SERIAL_DEV_BUS
+ select BT_HCIUART_H4
select CRC32
select CRC8
help
diff --git a/drivers/bluetooth/bfusb.c b/drivers/bluetooth/bfusb.c
index cab93935cc7f..8df310983bf6 100644
--- a/drivers/bluetooth/bfusb.c
+++ b/drivers/bluetooth/bfusb.c
@@ -365,9 +365,8 @@ static void bfusb_rx_complete(struct urb *urb)
buf += 3;
}
- if (count < len) {
+ if (count < len)
bt_dev_err(data->hdev, "block extends over URB buffer ranges");
- }
if ((hdr & 0xe1) == 0xc1)
bfusb_recv_block(data, hdr, buf, len);
@@ -671,7 +670,7 @@ static int bfusb_probe(struct usb_interface *intf, const struct usb_device_id *i
hdev->flush = bfusb_flush;
hdev->send = bfusb_send_frame;
- set_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_LOCAL_COMMANDS);
if (hci_register_dev(hdev) < 0) {
BT_ERR("Can't register HCI device");
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index 36eabf61717f..1e3a56e9b139 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -158,7 +158,7 @@ static void bluecard_detach(struct pcmcia_device *p_dev);
static void bluecard_activity_led_timeout(struct timer_list *t)
{
- struct bluecard_info *info = from_timer(info, t, timer);
+ struct bluecard_info *info = timer_container_of(info, t, timer);
unsigned int iobase = info->p_dev->resource[0]->start;
if (test_bit(CARD_ACTIVITY, &(info->hw_state))) {
@@ -638,7 +638,7 @@ static int bluecard_hci_close(struct hci_dev *hdev)
bluecard_hci_flush(hdev);
/* Stop LED timer */
- del_timer_sync(&(info->timer));
+ timer_delete_sync(&(info->timer));
/* Disable power LED */
outb(0x00, iobase + 0x30);
@@ -885,7 +885,7 @@ static void bluecard_release(struct pcmcia_device *link)
bluecard_close(info);
- del_timer_sync(&(info->timer));
+ timer_delete_sync(&(info->timer));
pcmcia_disable_device(link);
}
diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c
index 1fa58c059cbf..b7ba667a3d09 100644
--- a/drivers/bluetooth/bpa10x.c
+++ b/drivers/bluetooth/bpa10x.c
@@ -20,7 +20,7 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
-#include "h4_recv.h"
+#include "hci_uart.h"
#define VERSION "0.11"
@@ -398,7 +398,7 @@ static int bpa10x_probe(struct usb_interface *intf,
hdev->send = bpa10x_send_frame;
hdev->set_diag = bpa10x_set_diag;
- set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_RESET_ON_CLOSE);
err = hci_register_dev(hdev);
if (err < 0) {
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
index eef00467905e..3a3a56ddbb06 100644
--- a/drivers/bluetooth/btbcm.c
+++ b/drivers/bluetooth/btbcm.c
@@ -135,7 +135,7 @@ int btbcm_check_bdaddr(struct hci_dev *hdev)
if (btbcm_set_bdaddr_from_efi(hdev) != 0) {
bt_dev_info(hdev, "BCM: Using default device address (%pMR)",
&bda->bdaddr);
- set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_INVALID_BDADDR);
}
}
@@ -467,7 +467,7 @@ static int btbcm_print_controller_features(struct hci_dev *hdev)
/* Read DMI and disable broken Read LE Min/Max Tx Power */
if (dmi_first_match(disable_broken_read_transmit_power))
- set_bit(HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER);
return 0;
}
@@ -541,11 +541,10 @@ static const struct bcm_subver_table bcm_usb_subver_table[] = {
static const char *btbcm_get_board_name(struct device *dev)
{
#ifdef CONFIG_OF
- struct device_node *root;
+ struct device_node *root __free(device_node) = of_find_node_by_path("/");
char *board_type;
const char *tmp;
- root = of_find_node_by_path("/");
if (!root)
return NULL;
@@ -554,8 +553,10 @@ static const char *btbcm_get_board_name(struct device *dev)
/* get rid of any '/' in the compatible string */
board_type = devm_kstrdup(dev, tmp, GFP_KERNEL);
+ if (!board_type)
+ return NULL;
+
strreplace(board_type, '/', '-');
- of_node_put(root);
return board_type;
#else
@@ -705,7 +706,7 @@ int btbcm_finalize(struct hci_dev *hdev, bool *fw_load_done, bool use_autobaud_m
btbcm_check_bdaddr(hdev);
- set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_STRICT_DUPLICATE_FILTER);
return 0;
}
@@ -768,7 +769,7 @@ int btbcm_setup_apple(struct hci_dev *hdev)
kfree_skb(skb);
}
- set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_STRICT_DUPLICATE_FILTER);
return 0;
}
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index 438b92967bc3..9d29ab811f80 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -9,6 +9,7 @@
#include <linux/module.h>
#include <linux/firmware.h>
#include <linux/regmap.h>
+#include <linux/string_choices.h>
#include <linux/acpi.h>
#include <acpi/acpi_bus.h>
#include <linux/unaligned.h>
@@ -34,6 +35,11 @@ enum {
DSM_SET_RESET_METHOD = 3,
};
+#define BTINTEL_BT_DOMAIN 0x12
+#define BTINTEL_SAR_LEGACY 0
+#define BTINTEL_SAR_INC_PWR 1
+#define BTINTEL_SAR_INC_PWR_SUPPORTED 0
+
#define CMD_WRITE_BOOT_PARAMS 0xfc0e
struct cmd_write_boot_params {
__le32 boot_addr;
@@ -82,7 +88,7 @@ int btintel_check_bdaddr(struct hci_dev *hdev)
if (!bacmp(&bda->bdaddr, BDADDR_INTEL)) {
bt_dev_err(hdev, "Found Intel default device address (%pMR)",
&bda->bdaddr);
- set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_INVALID_BDADDR);
}
kfree_skb(skb);
@@ -477,6 +483,8 @@ int btintel_version_info_tlv(struct hci_dev *hdev,
case 0x1c: /* Gale Peak (GaP) */
case 0x1d: /* BlazarU (BzrU) */
case 0x1e: /* BlazarI (Bzr) */
+ case 0x1f: /* Scorpious Peak */
+ case 0x22: /* BlazarIW (BzrIW) */
break;
default:
bt_dev_err(hdev, "Unsupported Intel hardware variant (0x%x)",
@@ -506,13 +514,13 @@ int btintel_version_info_tlv(struct hci_dev *hdev,
bt_dev_info(hdev, "Device revision is %u", version->dev_rev_id);
bt_dev_info(hdev, "Secure boot is %s",
- version->secure_boot ? "enabled" : "disabled");
+ str_enabled_disabled(version->secure_boot));
bt_dev_info(hdev, "OTP lock is %s",
- version->otp_lock ? "enabled" : "disabled");
+ str_enabled_disabled(version->otp_lock));
bt_dev_info(hdev, "API lock is %s",
- version->api_lock ? "enabled" : "disabled");
+ str_enabled_disabled(version->api_lock));
bt_dev_info(hdev, "Debug lock is %s",
- version->debug_lock ? "enabled" : "disabled");
+ str_enabled_disabled(version->debug_lock));
bt_dev_info(hdev, "Minimum firmware build %u week %u %u",
version->min_fw_build_nn, version->min_fw_build_cw,
2000 + version->min_fw_build_yy);
@@ -548,7 +556,7 @@ int btintel_parse_version_tlv(struct hci_dev *hdev,
/* Consume Command Complete Status field */
skb_pull(skb, 1);
- /* Event parameters contatin multiple TLVs. Read each of them
+ /* Event parameters contain multiple TLVs. Read each of them
* and only keep the required data. Also, it use existing legacy
* version field like hw_platform, hw_variant, and fw_variant
* to keep the existing setup flow
@@ -882,7 +890,7 @@ int btintel_send_intel_reset(struct hci_dev *hdev, u32 boot_param)
params.boot_param = cpu_to_le32(boot_param);
- skb = __hci_cmd_sync(hdev, 0xfc01, sizeof(params), &params,
+ skb = __hci_cmd_sync(hdev, BTINTEL_HCI_OP_RESET, sizeof(params), &params,
HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
bt_dev_err(hdev, "Failed to send Intel Reset command");
@@ -927,16 +935,16 @@ int btintel_read_boot_params(struct hci_dev *hdev,
le16_to_cpu(params->dev_revid));
bt_dev_info(hdev, "Secure boot is %s",
- params->secure_boot ? "enabled" : "disabled");
+ str_enabled_disabled(params->secure_boot));
bt_dev_info(hdev, "OTP lock is %s",
- params->otp_lock ? "enabled" : "disabled");
+ str_enabled_disabled(params->otp_lock));
bt_dev_info(hdev, "API lock is %s",
- params->api_lock ? "enabled" : "disabled");
+ str_enabled_disabled(params->api_lock));
bt_dev_info(hdev, "Debug lock is %s",
- params->debug_lock ? "enabled" : "disabled");
+ str_enabled_disabled(params->debug_lock));
bt_dev_info(hdev, "Minimum firmware build %u week %u %u",
params->min_fw_build_nn, params->min_fw_build_cw,
@@ -1040,7 +1048,7 @@ static int btintel_download_firmware_payload(struct hci_dev *hdev,
* as needed.
*
* Send set of commands with 4 byte alignment from the
- * firmware data buffer as a single Data fragement.
+ * firmware data buffer as a single Data fragment.
*/
if (!(frag_len % 4)) {
err = btintel_secure_send(hdev, 0x01, frag_len, fw_ptr);
@@ -1252,6 +1260,12 @@ static void btintel_reset_to_bootloader(struct hci_dev *hdev)
struct intel_reset params;
struct sk_buff *skb;
+ /* PCIe transport uses shared hardware reset mechanism for recovery
+ * which gets triggered in pcie *setup* function on error.
+ */
+ if (hdev->bus == HCI_PCI)
+ return;
+
/* Send Intel Reset command. This will result in
* re-enumeration of BT controller.
*
@@ -1267,13 +1281,14 @@ static void btintel_reset_to_bootloader(struct hci_dev *hdev)
* boot_param: Boot address
*
*/
+
params.reset_type = 0x01;
params.patch_enable = 0x01;
params.ddc_reload = 0x01;
params.boot_option = 0x00;
params.boot_param = cpu_to_le32(0x00000000);
- skb = __hci_cmd_sync(hdev, 0xfc01, sizeof(params),
+ skb = __hci_cmd_sync(hdev, BTINTEL_HCI_OP_RESET, sizeof(params),
&params, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
bt_dev_err(hdev, "FW download error recovery failed (%ld)",
@@ -1841,6 +1856,37 @@ static int btintel_boot_wait(struct hci_dev *hdev, ktime_t calltime, int msec)
return 0;
}
+static int btintel_boot_wait_d0(struct hci_dev *hdev, ktime_t calltime,
+ int msec)
+{
+ ktime_t delta, rettime;
+ unsigned long long duration;
+ int err;
+
+ bt_dev_info(hdev, "Waiting for device transition to d0");
+
+ err = btintel_wait_on_flag_timeout(hdev, INTEL_WAIT_FOR_D0,
+ TASK_INTERRUPTIBLE,
+ msecs_to_jiffies(msec));
+ if (err == -EINTR) {
+ bt_dev_err(hdev, "Device d0 move interrupted");
+ return -EINTR;
+ }
+
+ if (err) {
+ bt_dev_err(hdev, "Device d0 move timeout");
+ return -ETIMEDOUT;
+ }
+
+ rettime = ktime_get();
+ delta = ktime_sub(rettime, calltime);
+ duration = (unsigned long long)ktime_to_ns(delta) >> 10;
+
+ bt_dev_info(hdev, "Device moved to D0 in %llu usecs", duration);
+
+ return 0;
+}
+
static int btintel_boot(struct hci_dev *hdev, u32 boot_addr)
{
ktime_t calltime;
@@ -1849,6 +1895,7 @@ static int btintel_boot(struct hci_dev *hdev, u32 boot_addr)
calltime = ktime_get();
btintel_set_flag(hdev, INTEL_BOOTING);
+ btintel_set_flag(hdev, INTEL_WAIT_FOR_D0);
err = btintel_send_intel_reset(hdev, boot_addr);
if (err) {
@@ -1861,13 +1908,28 @@ static int btintel_boot(struct hci_dev *hdev, u32 boot_addr)
* is done by the operational firmware sending bootup notification.
*
* Booting into operational firmware should not take longer than
- * 1 second. However if that happens, then just fail the setup
+ * 5 second. However if that happens, then just fail the setup
* since something went wrong.
*/
- err = btintel_boot_wait(hdev, calltime, 1000);
- if (err == -ETIMEDOUT)
+ err = btintel_boot_wait(hdev, calltime, 5000);
+ if (err == -ETIMEDOUT) {
btintel_reset_to_bootloader(hdev);
+ goto exit_error;
+ }
+ if (hdev->bus == HCI_PCI) {
+ /* In case of PCIe, after receiving bootup event, driver performs
+ * D0 entry by writing 0 to sleep control register (check
+ * btintel_pcie_recv_event())
+ * Firmware acks with alive interrupt indicating host is full ready to
+ * perform BT operation. Lets wait here till INTEL_WAIT_FOR_D0
+ * bit is cleared.
+ */
+ calltime = ktime_get();
+ err = btintel_boot_wait_d0(hdev, calltime, 2000);
+ }
+
+exit_error:
return err;
}
@@ -1966,7 +2028,7 @@ static int btintel_download_fw(struct hci_dev *hdev,
*/
if (!bacmp(&params->otp_bdaddr, BDADDR_ANY)) {
bt_dev_info(hdev, "No device address configured");
- set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_INVALID_BDADDR);
}
download:
@@ -2234,7 +2296,7 @@ static int btintel_prepare_fw_download_tlv(struct hci_dev *hdev,
*/
if (!bacmp(&ver->otp_bd_addr, BDADDR_ANY)) {
bt_dev_info(hdev, "No device address configured");
- set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_INVALID_BDADDR);
}
}
@@ -2609,7 +2671,7 @@ static u8 btintel_classify_pkt_type(struct hci_dev *hdev, struct sk_buff *skb)
* Distinguish ISO data packets form ACL data packets
* based on their connection handle value range.
*/
- if (hci_skb_pkt_type(skb) == HCI_ACLDATA_PKT) {
+ if (iso_capable(hdev) && hci_skb_pkt_type(skb) == HCI_ACLDATA_PKT) {
__u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle);
if (hci_handle(handle) >= BTINTEL_ISODATA_HANDLE_BASE)
@@ -2658,7 +2720,7 @@ static int btintel_uefi_get_dsbr(u32 *dsbr_var)
} __packed data;
efi_status_t status;
- unsigned long data_size = 0;
+ unsigned long data_size = sizeof(data);
efi_guid_t guid = EFI_GUID(0xe65d8884, 0xd4af, 0x4b20, 0x8d, 0x03,
0x77, 0x2e, 0xcc, 0x3d, 0xa5, 0x31);
@@ -2669,15 +2731,9 @@ static int btintel_uefi_get_dsbr(u32 *dsbr_var)
return -EOPNOTSUPP;
status = efi.get_variable(BTINTEL_EFI_DSBR, &guid, NULL, &data_size,
- NULL);
-
- if (status != EFI_BUFFER_TOO_SMALL || !data_size)
- return -EIO;
-
- status = efi.get_variable(BTINTEL_EFI_DSBR, &guid, NULL, &data_size,
&data);
- if (status != EFI_SUCCESS)
+ if (status != EFI_SUCCESS || data_size != sizeof(data))
return -ENXIO;
*dsbr_var = data.dsbr;
@@ -2693,20 +2749,37 @@ static int btintel_set_dsbr(struct hci_dev *hdev, struct intel_version_tlv *ver)
struct btintel_dsbr_cmd cmd;
struct sk_buff *skb;
+ u32 dsbr, cnvi;
u8 status;
- u32 dsbr;
- bool apply_dsbr;
int err;
- /* DSBR command needs to be sent for BlazarI + B0 step product after
- * downloading IML image.
+ cnvi = ver->cnvi_top & 0xfff;
+ /* DSBR command needs to be sent for,
+ * 1. BlazarI or BlazarIW + B0 step product in IML image.
+ * 2. Gale Peak2 or BlazarU in OP image.
+ * 3. Scorpious Peak in IML image.
*/
- apply_dsbr = (ver->img_type == BTINTEL_IMG_IML &&
- ((ver->cnvi_top & 0xfff) == BTINTEL_CNVI_BLAZARI) &&
- INTEL_CNVX_TOP_STEP(ver->cnvi_top) == 0x01);
- if (!apply_dsbr)
+ switch (cnvi) {
+ case BTINTEL_CNVI_BLAZARI:
+ case BTINTEL_CNVI_BLAZARIW:
+ if (ver->img_type == BTINTEL_IMG_IML &&
+ INTEL_CNVX_TOP_STEP(ver->cnvi_top) == 0x01)
+ break;
+ return 0;
+ case BTINTEL_CNVI_GAP:
+ case BTINTEL_CNVI_BLAZARU:
+ if (ver->img_type == BTINTEL_IMG_OP &&
+ hdev->bus == HCI_USB)
+ break;
+ return 0;
+ case BTINTEL_CNVI_SCP:
+ if (ver->img_type == BTINTEL_IMG_IML)
+ break;
return 0;
+ default:
+ return 0;
+ }
dsbr = 0;
err = btintel_uefi_get_dsbr(&dsbr);
@@ -2733,6 +2806,331 @@ static int btintel_set_dsbr(struct hci_dev *hdev, struct intel_version_tlv *ver)
return 0;
}
+#ifdef CONFIG_ACPI
+static acpi_status btintel_evaluate_acpi_method(struct hci_dev *hdev,
+ acpi_string method,
+ union acpi_object **ptr,
+ u8 pkg_size)
+{
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *p;
+ acpi_status status;
+ acpi_handle handle;
+
+ handle = ACPI_HANDLE(GET_HCIDEV_DEV(hdev));
+ if (!handle) {
+ bt_dev_dbg(hdev, "ACPI-BT: No ACPI support for Bluetooth device");
+ return AE_NOT_EXIST;
+ }
+
+ status = acpi_evaluate_object(handle, method, NULL, &buffer);
+
+ if (ACPI_FAILURE(status)) {
+ bt_dev_dbg(hdev, "ACPI-BT: ACPI Failure: %s method: %s",
+ acpi_format_exception(status), method);
+ return status;
+ }
+
+ p = buffer.pointer;
+
+ if (p->type != ACPI_TYPE_PACKAGE || p->package.count < pkg_size) {
+ bt_dev_warn(hdev, "ACPI-BT: Invalid object type: %d or package count: %d",
+ p->type, p->package.count);
+ kfree(buffer.pointer);
+ return AE_ERROR;
+ }
+
+ *ptr = buffer.pointer;
+ return 0;
+}
+
+static union acpi_object *btintel_acpi_get_bt_pkg(union acpi_object *buffer)
+{
+ union acpi_object *domain, *bt_pkg;
+ int i;
+
+ for (i = 1; i < buffer->package.count; i++) {
+ bt_pkg = &buffer->package.elements[i];
+ domain = &bt_pkg->package.elements[0];
+ if (domain->type == ACPI_TYPE_INTEGER &&
+ domain->integer.value == BTINTEL_BT_DOMAIN)
+ return bt_pkg;
+ }
+ return ERR_PTR(-ENOENT);
+}
+
+static int btintel_send_sar_ddc(struct hci_dev *hdev, struct btintel_cp_ddc_write *data, u8 len)
+{
+ struct sk_buff *skb;
+
+ skb = __hci_cmd_sync(hdev, 0xfc8b, len, data, HCI_CMD_TIMEOUT);
+ if (IS_ERR(skb)) {
+ bt_dev_warn(hdev, "Failed to send sar ddc id:0x%4.4x (%ld)",
+ le16_to_cpu(data->id), PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
+ kfree_skb(skb);
+ return 0;
+}
+
+static int btintel_send_edr(struct hci_dev *hdev, struct btintel_cp_ddc_write *cmd,
+ int id, struct btintel_sar_inc_pwr *sar)
+{
+ cmd->len = 5;
+ cmd->id = cpu_to_le16(id);
+ cmd->data[0] = sar->br >> 3;
+ cmd->data[1] = sar->edr2 >> 3;
+ cmd->data[2] = sar->edr3 >> 3;
+ return btintel_send_sar_ddc(hdev, cmd, 6);
+}
+
+static int btintel_send_le(struct hci_dev *hdev, struct btintel_cp_ddc_write *cmd,
+ int id, struct btintel_sar_inc_pwr *sar)
+{
+ cmd->len = 3;
+ cmd->id = cpu_to_le16(id);
+ cmd->data[0] = min3(sar->le, sar->le_lr, sar->le_2mhz) >> 3;
+ return btintel_send_sar_ddc(hdev, cmd, 4);
+}
+
+static int btintel_send_br(struct hci_dev *hdev, struct btintel_cp_ddc_write *cmd,
+ int id, struct btintel_sar_inc_pwr *sar)
+{
+ cmd->len = 3;
+ cmd->id = cpu_to_le16(id);
+ cmd->data[0] = sar->br >> 3;
+ return btintel_send_sar_ddc(hdev, cmd, 4);
+}
+
+static int btintel_send_br_mutual(struct hci_dev *hdev, struct btintel_cp_ddc_write *cmd,
+ int id, struct btintel_sar_inc_pwr *sar)
+{
+ cmd->len = 3;
+ cmd->id = cpu_to_le16(id);
+ cmd->data[0] = sar->br;
+ return btintel_send_sar_ddc(hdev, cmd, 4);
+}
+
+static int btintel_send_edr2(struct hci_dev *hdev, struct btintel_cp_ddc_write *cmd,
+ int id, struct btintel_sar_inc_pwr *sar)
+{
+ cmd->len = 3;
+ cmd->id = cpu_to_le16(id);
+ cmd->data[0] = sar->edr2;
+ return btintel_send_sar_ddc(hdev, cmd, 4);
+}
+
+static int btintel_send_edr3(struct hci_dev *hdev, struct btintel_cp_ddc_write *cmd,
+ int id, struct btintel_sar_inc_pwr *sar)
+{
+ cmd->len = 3;
+ cmd->id = cpu_to_le16(id);
+ cmd->data[0] = sar->edr3;
+ return btintel_send_sar_ddc(hdev, cmd, 4);
+}
+
+static int btintel_set_legacy_sar(struct hci_dev *hdev, struct btintel_sar_inc_pwr *sar)
+{
+ struct btintel_cp_ddc_write *cmd;
+ u8 buffer[64];
+ int ret;
+
+ cmd = (void *)buffer;
+ ret = btintel_send_br(hdev, cmd, 0x0131, sar);
+ if (ret)
+ return ret;
+
+ ret = btintel_send_br(hdev, cmd, 0x0132, sar);
+ if (ret)
+ return ret;
+
+ ret = btintel_send_le(hdev, cmd, 0x0133, sar);
+ if (ret)
+ return ret;
+
+ ret = btintel_send_edr(hdev, cmd, 0x0137, sar);
+ if (ret)
+ return ret;
+
+ ret = btintel_send_edr(hdev, cmd, 0x0138, sar);
+ if (ret)
+ return ret;
+
+ ret = btintel_send_edr(hdev, cmd, 0x013b, sar);
+ if (ret)
+ return ret;
+
+ ret = btintel_send_edr(hdev, cmd, 0x013c, sar);
+
+ return ret;
+}
+
+static int btintel_set_mutual_sar(struct hci_dev *hdev, struct btintel_sar_inc_pwr *sar)
+{
+ struct btintel_cp_ddc_write *cmd;
+ struct sk_buff *skb;
+ u8 buffer[64];
+ bool enable;
+ int ret;
+
+ cmd = (void *)buffer;
+
+ cmd->len = 3;
+ cmd->id = cpu_to_le16(0x019e);
+
+ if (sar->revision == BTINTEL_SAR_INC_PWR &&
+ sar->inc_power_mode == BTINTEL_SAR_INC_PWR_SUPPORTED)
+ cmd->data[0] = 0x01;
+ else
+ cmd->data[0] = 0x00;
+
+ ret = btintel_send_sar_ddc(hdev, cmd, 4);
+ if (ret)
+ return ret;
+
+ if (sar->revision == BTINTEL_SAR_INC_PWR &&
+ sar->inc_power_mode == BTINTEL_SAR_INC_PWR_SUPPORTED) {
+ cmd->len = 3;
+ cmd->id = cpu_to_le16(0x019f);
+ cmd->data[0] = sar->sar_2400_chain_a;
+
+ ret = btintel_send_sar_ddc(hdev, cmd, 4);
+ if (ret)
+ return ret;
+ }
+
+ ret = btintel_send_br_mutual(hdev, cmd, 0x01a0, sar);
+ if (ret)
+ return ret;
+
+ ret = btintel_send_edr2(hdev, cmd, 0x01a1, sar);
+ if (ret)
+ return ret;
+
+ ret = btintel_send_edr3(hdev, cmd, 0x01a2, sar);
+ if (ret)
+ return ret;
+
+ ret = btintel_send_le(hdev, cmd, 0x01a3, sar);
+ if (ret)
+ return ret;
+
+ enable = true;
+ skb = __hci_cmd_sync(hdev, 0xfe25, 1, &enable, HCI_CMD_TIMEOUT);
+ if (IS_ERR(skb)) {
+ bt_dev_warn(hdev, "Failed to send Intel SAR Enable (%ld)", PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
+
+ kfree_skb(skb);
+ return 0;
+}
+
+static int btintel_sar_send_to_device(struct hci_dev *hdev, struct btintel_sar_inc_pwr *sar,
+ struct intel_version_tlv *ver)
+{
+ u16 cnvi, cnvr;
+ int ret;
+
+ cnvi = ver->cnvi_top & 0xfff;
+ cnvr = ver->cnvr_top & 0xfff;
+
+ if (cnvi < BTINTEL_CNVI_BLAZARI && cnvr < BTINTEL_CNVR_FMP2) {
+ bt_dev_info(hdev, "Applying legacy Bluetooth SAR");
+ ret = btintel_set_legacy_sar(hdev, sar);
+ } else if (cnvi == BTINTEL_CNVI_GAP || cnvr == BTINTEL_CNVR_FMP2) {
+ bt_dev_info(hdev, "Applying mutual Bluetooth SAR");
+ ret = btintel_set_mutual_sar(hdev, sar);
+ } else {
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static int btintel_acpi_set_sar(struct hci_dev *hdev, struct intel_version_tlv *ver)
+{
+ union acpi_object *bt_pkg, *buffer = NULL;
+ struct btintel_sar_inc_pwr sar;
+ acpi_status status;
+ u8 revision;
+ int ret;
+
+ status = btintel_evaluate_acpi_method(hdev, "BRDS", &buffer, 2);
+ if (ACPI_FAILURE(status))
+ return -ENOENT;
+
+ bt_pkg = btintel_acpi_get_bt_pkg(buffer);
+
+ if (IS_ERR(bt_pkg)) {
+ ret = PTR_ERR(bt_pkg);
+ goto error;
+ }
+
+ if (!bt_pkg->package.count) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ revision = buffer->package.elements[0].integer.value;
+
+ if (revision > BTINTEL_SAR_INC_PWR) {
+ bt_dev_dbg(hdev, "BT_SAR: revision: 0x%2.2x not supported", revision);
+ ret = -EOPNOTSUPP;
+ goto error;
+ }
+
+ memset(&sar, 0, sizeof(sar));
+
+ if (revision == BTINTEL_SAR_LEGACY && bt_pkg->package.count == 8) {
+ sar.revision = revision;
+ sar.bt_sar_bios = bt_pkg->package.elements[1].integer.value;
+ sar.br = bt_pkg->package.elements[2].integer.value;
+ sar.edr2 = bt_pkg->package.elements[3].integer.value;
+ sar.edr3 = bt_pkg->package.elements[4].integer.value;
+ sar.le = bt_pkg->package.elements[5].integer.value;
+ sar.le_2mhz = bt_pkg->package.elements[6].integer.value;
+ sar.le_lr = bt_pkg->package.elements[7].integer.value;
+
+ } else if (revision == BTINTEL_SAR_INC_PWR && bt_pkg->package.count == 10) {
+ sar.revision = revision;
+ sar.bt_sar_bios = bt_pkg->package.elements[1].integer.value;
+ sar.inc_power_mode = bt_pkg->package.elements[2].integer.value;
+ sar.sar_2400_chain_a = bt_pkg->package.elements[3].integer.value;
+ sar.br = bt_pkg->package.elements[4].integer.value;
+ sar.edr2 = bt_pkg->package.elements[5].integer.value;
+ sar.edr3 = bt_pkg->package.elements[6].integer.value;
+ sar.le = bt_pkg->package.elements[7].integer.value;
+ sar.le_2mhz = bt_pkg->package.elements[8].integer.value;
+ sar.le_lr = bt_pkg->package.elements[9].integer.value;
+ } else {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /* Apply only if it is enabled in BIOS */
+ if (sar.bt_sar_bios != 1) {
+ bt_dev_dbg(hdev, "Bluetooth SAR is not enabled");
+ ret = -EOPNOTSUPP;
+ goto error;
+ }
+
+ ret = btintel_sar_send_to_device(hdev, &sar, ver);
+error:
+ kfree(buffer);
+ return ret;
+}
+#endif /* CONFIG_ACPI */
+
+static int btintel_set_specific_absorption_rate(struct hci_dev *hdev,
+ struct intel_version_tlv *ver)
+{
+#ifdef CONFIG_ACPI
+ return btintel_acpi_set_sar(hdev, ver);
+#endif
+ return 0;
+}
+
int btintel_bootloader_setup_tlv(struct hci_dev *hdev,
struct intel_version_tlv *ver)
{
@@ -2749,6 +3147,13 @@ int btintel_bootloader_setup_tlv(struct hci_dev *hdev,
*/
boot_param = 0x00000000;
+ /* In case of PCIe, this function might get called multiple times with
+ * same hdev instance if there is any error on firmware download.
+ * Need to clear stale bits of previous firmware download attempt.
+ */
+ for (int i = 0; i < __INTEL_NUM_FLAGS; i++)
+ btintel_clear_flag(hdev, i);
+
btintel_set_flag(hdev, INTEL_BOOTLOADER);
err = btintel_prepare_fw_download_tlv(hdev, ver, &boot_param);
@@ -2803,6 +3208,9 @@ int btintel_bootloader_setup_tlv(struct hci_dev *hdev,
hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
+ /* Send sar values to controller */
+ btintel_set_specific_absorption_rate(hdev, ver);
+
/* Set PPAG feature */
btintel_set_ppag(hdev, ver);
@@ -2835,7 +3243,7 @@ void btintel_set_msft_opcode(struct hci_dev *hdev, u8 hw_variant)
case 0x12: /* ThP */
case 0x13: /* HrP */
case 0x14: /* CcP */
- /* All Intel new genration controllers support the Microsoft vendor
+ /* All Intel new generation controllers support the Microsoft vendor
* extension are using 0xFC1E for VsMsftOpCode.
*/
case 0x17:
@@ -2845,6 +3253,8 @@ void btintel_set_msft_opcode(struct hci_dev *hdev, u8 hw_variant)
case 0x1c:
case 0x1d:
case 0x1e:
+ case 0x1f:
+ case 0x22:
hci_set_msft_opcode(hdev, 0xFC1E);
break;
default:
@@ -3027,9 +3437,9 @@ static int btintel_setup_combined(struct hci_dev *hdev)
}
/* Apply the common HCI quirks for Intel device */
- set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
- set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
- set_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_STRICT_DUPLICATE_FILTER);
+ hci_set_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY);
+ hci_set_quirk(hdev, HCI_QUIRK_NON_PERSISTENT_DIAG);
/* Set up the quality report callback for Intel devices */
hdev->set_quality_report = btintel_set_quality_report;
@@ -3067,8 +3477,8 @@ static int btintel_setup_combined(struct hci_dev *hdev)
*/
if (!btintel_test_flag(hdev,
INTEL_ROM_LEGACY_NO_WBS_SUPPORT))
- set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
- &hdev->quirks);
+ hci_set_quirk(hdev,
+ HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED);
err = btintel_legacy_rom_setup(hdev, &ver);
break;
@@ -3083,11 +3493,11 @@ static int btintel_setup_combined(struct hci_dev *hdev)
*
* All Legacy bootloader devices support WBS
*/
- set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
- &hdev->quirks);
+ hci_set_quirk(hdev,
+ HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED);
/* These variants don't seem to support LE Coded PHY */
- set_bit(HCI_QUIRK_BROKEN_LE_CODED, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_LE_CODED);
/* Setup MSFT Extension support */
btintel_set_msft_opcode(hdev, ver.hw_variant);
@@ -3163,10 +3573,10 @@ static int btintel_setup_combined(struct hci_dev *hdev)
*
* All Legacy bootloader devices support WBS
*/
- set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED);
/* These variants don't seem to support LE Coded PHY */
- set_bit(HCI_QUIRK_BROKEN_LE_CODED, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_LE_CODED);
/* Setup MSFT Extension support */
btintel_set_msft_opcode(hdev, ver.hw_variant);
@@ -3184,6 +3594,8 @@ static int btintel_setup_combined(struct hci_dev *hdev)
case 0x1b:
case 0x1d:
case 0x1e:
+ case 0x1f:
+ case 0x22:
/* Display version information of TLV type */
btintel_version_info_tlv(hdev, &ver_tlv);
@@ -3191,7 +3603,7 @@ static int btintel_setup_combined(struct hci_dev *hdev)
*
* All TLV based devices support WBS
*/
- set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED);
/* Setup MSFT Extension support */
btintel_set_msft_opcode(hdev,
@@ -3288,13 +3700,12 @@ static int btintel_diagnostics(struct hci_dev *hdev, struct sk_buff *skb)
case INTEL_TLV_TEST_EXCEPTION:
/* Generate devcoredump from exception */
if (!hci_devcd_init(hdev, skb->len)) {
- hci_devcd_append(hdev, skb);
+ hci_devcd_append(hdev, skb_clone(skb, GFP_ATOMIC));
hci_devcd_complete(hdev);
} else {
bt_dev_err(hdev, "Failed to generate devcoredump");
- kfree_skb(skb);
}
- return 0;
+ break;
default:
bt_dev_err(hdev, "Invalid exception type %02X", tlv->val[0]);
}
@@ -3321,7 +3732,8 @@ int btintel_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
* indicating that the bootup completed.
*/
btintel_bootup(hdev, ptr, len);
- break;
+ kfree_skb(skb);
+ return 0;
case 0x06:
/* When the firmware loading completes the
* device sends out a vendor specific event
@@ -3329,7 +3741,8 @@ int btintel_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
* loading.
*/
btintel_secure_send_result(hdev, ptr, len);
- break;
+ kfree_skb(skb);
+ return 0;
}
}
diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h
index aa70e4c27416..431998049e68 100644
--- a/drivers/bluetooth/btintel.h
+++ b/drivers/bluetooth/btintel.h
@@ -52,7 +52,16 @@ struct intel_tlv {
u8 val[];
} __packed;
+#define BTINTEL_HCI_OP_RESET 0xfc01
+
#define BTINTEL_CNVI_BLAZARI 0x900
+#define BTINTEL_CNVI_BLAZARIW 0x901
+#define BTINTEL_CNVI_GAP 0x910
+#define BTINTEL_CNVI_BLAZARU 0x930
+#define BTINTEL_CNVI_SCP 0xA00
+
+/* CNVR */
+#define BTINTEL_CNVR_FMP2 0x910
#define BTINTEL_IMG_BOOTLOADER 0x01 /* Bootloader image */
#define BTINTEL_IMG_IML 0x02 /* Intermediate image */
@@ -161,6 +170,26 @@ struct hci_ppag_enable_cmd {
#define INTEL_TLV_DEBUG_EXCEPTION 0x02
#define INTEL_TLV_TEST_EXCEPTION 0xDE
+struct btintel_cp_ddc_write {
+ u8 len;
+ __le16 id;
+ u8 data[];
+} __packed;
+
+/* Bluetooth SAR feature (BRDS), Revision 1 */
+struct btintel_sar_inc_pwr {
+ u8 revision;
+ u32 bt_sar_bios; /* Mode of SAR control to be used, 1:enabled in bios */
+ u32 inc_power_mode; /* Increased power mode */
+ u8 sar_2400_chain_a; /* Sar power restriction LB */
+ u8 br;
+ u8 edr2;
+ u8 edr3;
+ u8 le;
+ u8 le_2mhz;
+ u8 le_lr;
+};
+
#define INTEL_HW_PLATFORM(cnvx_bt) ((u8)(((cnvx_bt) & 0x0000ff00) >> 8))
#define INTEL_HW_VARIANT(cnvx_bt) ((u8)(((cnvx_bt) & 0x003f0000) >> 16))
#define INTEL_CNVX_TOP_TYPE(cnvx_top) ((cnvx_top) & 0x00000fff)
@@ -178,6 +207,7 @@ enum {
INTEL_ROM_LEGACY,
INTEL_ROM_LEGACY_NO_WBS_SUPPORT,
INTEL_ACPI_RESET_ACTIVE,
+ INTEL_WAIT_FOR_D0,
__INTEL_NUM_FLAGS,
};
diff --git a/drivers/bluetooth/btintel_pcie.c b/drivers/bluetooth/btintel_pcie.c
index 5252125b003f..6d3963bd56a9 100644
--- a/drivers/bluetooth/btintel_pcie.c
+++ b/drivers/bluetooth/btintel_pcie.c
@@ -15,6 +15,7 @@
#include <linux/interrupt.h>
#include <linux/unaligned.h>
+#include <linux/devcoredump.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -35,11 +36,25 @@
/* Intel Bluetooth PCIe device id table */
static const struct pci_device_id btintel_pcie_table[] = {
+ /* BlazarI, Wildcat Lake */
+ { BTINTEL_PCI_DEVICE(0x4D76, PCI_ANY_ID) },
+ /* BlazarI, Lunar Lake */
{ BTINTEL_PCI_DEVICE(0xA876, PCI_ANY_ID) },
+ /* Scorpious, Panther Lake-H484 */
+ { BTINTEL_PCI_DEVICE(0xE376, PCI_ANY_ID) },
+ /* Scorpious, Panther Lake-H404 */
+ { BTINTEL_PCI_DEVICE(0xE476, PCI_ANY_ID) },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, btintel_pcie_table);
+struct btintel_pcie_dev_recovery {
+ struct list_head list;
+ u8 count;
+ time64_t last_error;
+ char name[];
+};
+
/* Intel PCIe uses 4 bytes of HCI type instead of 1 byte BT SIG HCI type */
#define BTINTEL_PCIE_HCI_TYPE_LEN 4
#define BTINTEL_PCIE_HCI_CMD_PKT 0x00000001
@@ -48,6 +63,140 @@ MODULE_DEVICE_TABLE(pci, btintel_pcie_table);
#define BTINTEL_PCIE_HCI_EVT_PKT 0x00000004
#define BTINTEL_PCIE_HCI_ISO_PKT 0x00000005
+#define BTINTEL_PCIE_MAGIC_NUM 0xA5A5A5A5
+
+#define BTINTEL_PCIE_BLZR_HWEXP_SIZE 1024
+#define BTINTEL_PCIE_BLZR_HWEXP_DMP_ADDR 0xB00A7C00
+
+#define BTINTEL_PCIE_SCP_HWEXP_SIZE 4096
+#define BTINTEL_PCIE_SCP_HWEXP_DMP_ADDR 0xB030F800
+
+#define BTINTEL_PCIE_MAGIC_NUM 0xA5A5A5A5
+
+#define BTINTEL_PCIE_TRIGGER_REASON_USER_TRIGGER 0x17A2
+#define BTINTEL_PCIE_TRIGGER_REASON_FW_ASSERT 0x1E61
+
+#define BTINTEL_PCIE_RESET_WINDOW_SECS 5
+#define BTINTEL_PCIE_FLR_MAX_RETRY 1
+
+/* Alive interrupt context */
+enum {
+ BTINTEL_PCIE_ROM,
+ BTINTEL_PCIE_FW_DL,
+ BTINTEL_PCIE_HCI_RESET,
+ BTINTEL_PCIE_INTEL_HCI_RESET1,
+ BTINTEL_PCIE_INTEL_HCI_RESET2,
+ BTINTEL_PCIE_D0,
+ BTINTEL_PCIE_D3
+};
+
+/* Structure for dbgc fragment buffer
+ * @buf_addr_lsb: LSB of the buffer's physical address
+ * @buf_addr_msb: MSB of the buffer's physical address
+ * @buf_size: Total size of the buffer
+ */
+struct btintel_pcie_dbgc_ctxt_buf {
+ u32 buf_addr_lsb;
+ u32 buf_addr_msb;
+ u32 buf_size;
+};
+
+/* Structure for dbgc fragment
+ * @magic_num: 0XA5A5A5A5
+ * @ver: For Driver-FW compatibility
+ * @total_size: Total size of the payload debug info
+ * @num_buf: Num of allocated debug bufs
+ * @bufs: All buffer's addresses and sizes
+ */
+struct btintel_pcie_dbgc_ctxt {
+ u32 magic_num;
+ u32 ver;
+ u32 total_size;
+ u32 num_buf;
+ struct btintel_pcie_dbgc_ctxt_buf bufs[BTINTEL_PCIE_DBGC_BUFFER_COUNT];
+};
+
+struct btintel_pcie_removal {
+ struct pci_dev *pdev;
+ struct work_struct work;
+};
+
+static LIST_HEAD(btintel_pcie_recovery_list);
+static DEFINE_SPINLOCK(btintel_pcie_recovery_lock);
+
+static inline char *btintel_pcie_alivectxt_state2str(u32 alive_intr_ctxt)
+{
+ switch (alive_intr_ctxt) {
+ case BTINTEL_PCIE_ROM:
+ return "rom";
+ case BTINTEL_PCIE_FW_DL:
+ return "fw_dl";
+ case BTINTEL_PCIE_D0:
+ return "d0";
+ case BTINTEL_PCIE_D3:
+ return "d3";
+ case BTINTEL_PCIE_HCI_RESET:
+ return "hci_reset";
+ case BTINTEL_PCIE_INTEL_HCI_RESET1:
+ return "intel_reset1";
+ case BTINTEL_PCIE_INTEL_HCI_RESET2:
+ return "intel_reset2";
+ default:
+ return "unknown";
+ }
+}
+
+/* This function initializes the memory for DBGC buffers and formats the
+ * DBGC fragment which consists header info and DBGC buffer's LSB, MSB and
+ * size as the payload
+ */
+static int btintel_pcie_setup_dbgc(struct btintel_pcie_data *data)
+{
+ struct btintel_pcie_dbgc_ctxt db_frag;
+ struct data_buf *buf;
+ int i;
+
+ data->dbgc.count = BTINTEL_PCIE_DBGC_BUFFER_COUNT;
+ data->dbgc.bufs = devm_kcalloc(&data->pdev->dev, data->dbgc.count,
+ sizeof(*buf), GFP_KERNEL);
+ if (!data->dbgc.bufs)
+ return -ENOMEM;
+
+ data->dbgc.buf_v_addr = dmam_alloc_coherent(&data->pdev->dev,
+ data->dbgc.count *
+ BTINTEL_PCIE_DBGC_BUFFER_SIZE,
+ &data->dbgc.buf_p_addr,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!data->dbgc.buf_v_addr)
+ return -ENOMEM;
+
+ data->dbgc.frag_v_addr = dmam_alloc_coherent(&data->pdev->dev,
+ sizeof(struct btintel_pcie_dbgc_ctxt),
+ &data->dbgc.frag_p_addr,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!data->dbgc.frag_v_addr)
+ return -ENOMEM;
+
+ data->dbgc.frag_size = sizeof(struct btintel_pcie_dbgc_ctxt);
+
+ db_frag.magic_num = BTINTEL_PCIE_MAGIC_NUM;
+ db_frag.ver = BTINTEL_PCIE_DBGC_FRAG_VERSION;
+ db_frag.total_size = BTINTEL_PCIE_DBGC_FRAG_PAYLOAD_SIZE;
+ db_frag.num_buf = BTINTEL_PCIE_DBGC_FRAG_BUFFER_COUNT;
+
+ for (i = 0; i < data->dbgc.count; i++) {
+ buf = &data->dbgc.bufs[i];
+ buf->data_p_addr = data->dbgc.buf_p_addr + i * BTINTEL_PCIE_DBGC_BUFFER_SIZE;
+ buf->data = data->dbgc.buf_v_addr + i * BTINTEL_PCIE_DBGC_BUFFER_SIZE;
+ db_frag.bufs[i].buf_addr_lsb = lower_32_bits(buf->data_p_addr);
+ db_frag.bufs[i].buf_addr_msb = upper_32_bits(buf->data_p_addr);
+ db_frag.bufs[i].buf_size = BTINTEL_PCIE_DBGC_BUFFER_SIZE;
+ }
+
+ memcpy(data->dbgc.frag_v_addr, &db_frag, sizeof(db_frag));
+ return 0;
+}
+
static inline void ipc_print_ia_ring(struct hci_dev *hdev, struct ia *ia,
u16 queue_num)
{
@@ -64,24 +213,6 @@ static inline void ipc_print_urbd1(struct hci_dev *hdev, struct urbd1 *urbd1,
index, urbd1->frbd_tag, urbd1->status, urbd1->fixed);
}
-static int btintel_pcie_poll_bit(struct btintel_pcie_data *data, u32 offset,
- u32 bits, u32 mask, int timeout_us)
-{
- int t = 0;
- u32 reg;
-
- do {
- reg = btintel_pcie_rd_reg32(data, offset);
-
- if ((reg & mask) == (bits & mask))
- return t;
- udelay(POLL_INTERVAL_US);
- t += POLL_INTERVAL_US;
- } while (t < timeout_us);
-
- return -ETIMEDOUT;
-}
-
static struct btintel_pcie_data *btintel_pcie_get_data(struct msix_entry *entry)
{
u8 queue = entry->entry;
@@ -124,11 +255,105 @@ static void btintel_pcie_prepare_tx(struct txq *txq, u16 tfd_index,
memcpy(buf->data, skb->data, tfd->size);
}
+static inline void btintel_pcie_dump_debug_registers(struct hci_dev *hdev)
+{
+ struct btintel_pcie_data *data = hci_get_drvdata(hdev);
+ u16 cr_hia, cr_tia;
+ u32 reg, mbox_reg;
+ struct sk_buff *skb;
+ u8 buf[80];
+
+ skb = alloc_skb(1024, GFP_ATOMIC);
+ if (!skb)
+ return;
+
+ snprintf(buf, sizeof(buf), "%s", "---- Dump of debug registers ---");
+ bt_dev_dbg(hdev, "%s", buf);
+ skb_put_data(skb, buf, strlen(buf));
+
+ reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_BOOT_STAGE_REG);
+ snprintf(buf, sizeof(buf), "boot stage: 0x%8.8x", reg);
+ bt_dev_dbg(hdev, "%s", buf);
+ skb_put_data(skb, buf, strlen(buf));
+ data->boot_stage_cache = reg;
+
+ reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_IPC_STATUS_REG);
+ snprintf(buf, sizeof(buf), "ipc status: 0x%8.8x", reg);
+ skb_put_data(skb, buf, strlen(buf));
+ bt_dev_dbg(hdev, "%s", buf);
+
+ reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_IPC_CONTROL_REG);
+ snprintf(buf, sizeof(buf), "ipc control: 0x%8.8x", reg);
+ skb_put_data(skb, buf, strlen(buf));
+ bt_dev_dbg(hdev, "%s", buf);
+
+ reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_IPC_SLEEP_CTL_REG);
+ snprintf(buf, sizeof(buf), "ipc sleep control: 0x%8.8x", reg);
+ skb_put_data(skb, buf, strlen(buf));
+ bt_dev_dbg(hdev, "%s", buf);
+
+ /*Read the Mail box status and registers*/
+ reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_MBOX_STATUS_REG);
+ snprintf(buf, sizeof(buf), "mbox status: 0x%8.8x", reg);
+ skb_put_data(skb, buf, strlen(buf));
+ if (reg & BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX1) {
+ mbox_reg = btintel_pcie_rd_reg32(data,
+ BTINTEL_PCIE_CSR_MBOX_1_REG);
+ snprintf(buf, sizeof(buf), "mbox_1: 0x%8.8x", mbox_reg);
+ skb_put_data(skb, buf, strlen(buf));
+ bt_dev_dbg(hdev, "%s", buf);
+ }
+
+ if (reg & BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX2) {
+ mbox_reg = btintel_pcie_rd_reg32(data,
+ BTINTEL_PCIE_CSR_MBOX_2_REG);
+ snprintf(buf, sizeof(buf), "mbox_2: 0x%8.8x", mbox_reg);
+ skb_put_data(skb, buf, strlen(buf));
+ bt_dev_dbg(hdev, "%s", buf);
+ }
+
+ if (reg & BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX3) {
+ mbox_reg = btintel_pcie_rd_reg32(data,
+ BTINTEL_PCIE_CSR_MBOX_3_REG);
+ snprintf(buf, sizeof(buf), "mbox_3: 0x%8.8x", mbox_reg);
+ skb_put_data(skb, buf, strlen(buf));
+ bt_dev_dbg(hdev, "%s", buf);
+ }
+
+ if (reg & BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX4) {
+ mbox_reg = btintel_pcie_rd_reg32(data,
+ BTINTEL_PCIE_CSR_MBOX_4_REG);
+ snprintf(buf, sizeof(buf), "mbox_4: 0x%8.8x", mbox_reg);
+ skb_put_data(skb, buf, strlen(buf));
+ bt_dev_dbg(hdev, "%s", buf);
+ }
+
+ cr_hia = data->ia.cr_hia[BTINTEL_PCIE_RXQ_NUM];
+ cr_tia = data->ia.cr_tia[BTINTEL_PCIE_RXQ_NUM];
+ snprintf(buf, sizeof(buf), "rxq: cr_tia: %u cr_hia: %u", cr_tia, cr_hia);
+ skb_put_data(skb, buf, strlen(buf));
+ bt_dev_dbg(hdev, "%s", buf);
+
+ cr_hia = data->ia.cr_hia[BTINTEL_PCIE_TXQ_NUM];
+ cr_tia = data->ia.cr_tia[BTINTEL_PCIE_TXQ_NUM];
+ snprintf(buf, sizeof(buf), "txq: cr_tia: %u cr_hia: %u", cr_tia, cr_hia);
+ skb_put_data(skb, buf, strlen(buf));
+ bt_dev_dbg(hdev, "%s", buf);
+ snprintf(buf, sizeof(buf), "--------------------------------");
+ bt_dev_dbg(hdev, "%s", buf);
+
+ hci_recv_diag(hdev, skb);
+}
+
static int btintel_pcie_send_sync(struct btintel_pcie_data *data,
- struct sk_buff *skb)
+ struct sk_buff *skb, u32 pkt_type, u16 opcode)
{
int ret;
u16 tfd_index;
+ u32 old_ctxt;
+ bool wait_on_alive = false;
+ struct hci_dev *hdev = data->hdev;
+
struct txq *txq = &data->txq;
tfd_index = data->ia.tr_hia[BTINTEL_PCIE_TXQ_NUM];
@@ -136,6 +361,26 @@ static int btintel_pcie_send_sync(struct btintel_pcie_data *data,
if (tfd_index > txq->count)
return -ERANGE;
+ /* Firmware raises alive interrupt on HCI_OP_RESET or
+ * BTINTEL_HCI_OP_RESET
+ */
+ wait_on_alive = (pkt_type == BTINTEL_PCIE_HCI_CMD_PKT &&
+ (opcode == BTINTEL_HCI_OP_RESET || opcode == HCI_OP_RESET));
+
+ if (wait_on_alive) {
+ data->gp0_received = false;
+ old_ctxt = data->alive_intr_ctxt;
+ data->alive_intr_ctxt =
+ (opcode == BTINTEL_HCI_OP_RESET ? BTINTEL_PCIE_INTEL_HCI_RESET1 :
+ BTINTEL_PCIE_HCI_RESET);
+ bt_dev_dbg(data->hdev, "sending cmd: 0x%4.4x alive context changed: %s -> %s",
+ opcode, btintel_pcie_alivectxt_state2str(old_ctxt),
+ btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt));
+ }
+
+ memcpy(skb_push(skb, BTINTEL_PCIE_HCI_TYPE_LEN), &pkt_type,
+ BTINTEL_PCIE_HCI_TYPE_LEN);
+
/* Prepare for TX. It updates the TFD with the length of data and
* address of the DMA buffer, and copy the data to the DMA buffer
*/
@@ -153,9 +398,25 @@ static int btintel_pcie_send_sync(struct btintel_pcie_data *data,
/* Wait for the complete interrupt - URBD0 */
ret = wait_event_timeout(data->tx_wait_q, data->tx_wait_done,
msecs_to_jiffies(BTINTEL_PCIE_TX_WAIT_TIMEOUT_MS));
- if (!ret)
+ if (!ret) {
+ bt_dev_err(data->hdev, "Timeout (%u ms) on tx completion",
+ BTINTEL_PCIE_TX_WAIT_TIMEOUT_MS);
+ btintel_pcie_dump_debug_registers(data->hdev);
return -ETIME;
+ }
+ if (wait_on_alive) {
+ ret = wait_event_timeout(data->gp0_wait_q,
+ data->gp0_received,
+ msecs_to_jiffies(BTINTEL_DEFAULT_INTR_TIMEOUT_MS));
+ if (!ret) {
+ hdev->stat.err_tx++;
+ bt_dev_err(hdev, "Timeout (%u ms) on alive interrupt, alive context: %s",
+ BTINTEL_DEFAULT_INTR_TIMEOUT_MS,
+ btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt));
+ return -ETIME;
+ }
+ }
return 0;
}
@@ -219,8 +480,13 @@ static int btintel_pcie_submit_rx(struct btintel_pcie_data *data)
static int btintel_pcie_start_rx(struct btintel_pcie_data *data)
{
int i, ret;
+ struct rxq *rxq = &data->rxq;
+
+ /* Post (BTINTEL_PCIE_RX_DESCS_COUNT - 3) buffers to overcome the
+ * hardware issues leading to race condition at the firmware.
+ */
- for (i = 0; i < BTINTEL_PCIE_RX_MAX_QUEUE; i++) {
+ for (i = 0; i < rxq->count - 3; i++) {
ret = btintel_pcie_submit_rx(data);
if (ret)
return ret;
@@ -237,10 +503,249 @@ static void btintel_pcie_reset_ia(struct btintel_pcie_data *data)
memset(data->ia.cr_tia, 0, sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES);
}
-static void btintel_pcie_reset_bt(struct btintel_pcie_data *data)
+static int btintel_pcie_reset_bt(struct btintel_pcie_data *data)
+{
+ u32 reg;
+ int retry = 3;
+
+ reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
+
+ reg &= ~(BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_ENA |
+ BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT |
+ BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT);
+ reg |= BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_DISCON;
+
+ btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
+
+ do {
+ reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
+ if (reg & BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_STS)
+ break;
+ usleep_range(10000, 12000);
+
+ } while (--retry > 0);
+ usleep_range(10000, 12000);
+
+ reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
+
+ reg &= ~(BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_ENA |
+ BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT |
+ BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT);
+ reg |= BTINTEL_PCIE_CSR_FUNC_CTRL_SW_RESET;
+ btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
+ usleep_range(10000, 12000);
+
+ reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
+ bt_dev_dbg(data->hdev, "csr register after reset: 0x%8.8x", reg);
+
+ reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_BOOT_STAGE_REG);
+
+ /* If shared hardware reset is success then boot stage register shall be
+ * set to 0
+ */
+ return reg == 0 ? 0 : -ENODEV;
+}
+
+static void btintel_pcie_mac_init(struct btintel_pcie_data *data)
+{
+ u32 reg;
+
+ /* Set MAC_INIT bit to start primary bootloader */
+ reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
+ reg &= ~(BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT |
+ BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_DISCON |
+ BTINTEL_PCIE_CSR_FUNC_CTRL_SW_RESET);
+ reg |= (BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_ENA |
+ BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT);
+ btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
+}
+
+static int btintel_pcie_get_mac_access(struct btintel_pcie_data *data)
+{
+ u32 reg;
+ int retry = 15;
+
+ reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
+
+ reg |= BTINTEL_PCIE_CSR_FUNC_CTRL_STOP_MAC_ACCESS_DIS;
+ reg |= BTINTEL_PCIE_CSR_FUNC_CTRL_XTAL_CLK_REQ;
+ if ((reg & BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_STS) == 0)
+ reg |= BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_REQ;
+
+ btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
+
+ do {
+ reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
+ if (reg & BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_STS)
+ return 0;
+ /* Need delay here for Target Access harwdware to settle down*/
+ usleep_range(1000, 1200);
+
+ } while (--retry > 0);
+
+ return -ETIME;
+}
+
+static void btintel_pcie_release_mac_access(struct btintel_pcie_data *data)
+{
+ u32 reg;
+
+ reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
+
+ if (reg & BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_REQ)
+ reg &= ~BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_REQ;
+
+ if (reg & BTINTEL_PCIE_CSR_FUNC_CTRL_STOP_MAC_ACCESS_DIS)
+ reg &= ~BTINTEL_PCIE_CSR_FUNC_CTRL_STOP_MAC_ACCESS_DIS;
+
+ if (reg & BTINTEL_PCIE_CSR_FUNC_CTRL_XTAL_CLK_REQ)
+ reg &= ~BTINTEL_PCIE_CSR_FUNC_CTRL_XTAL_CLK_REQ;
+
+ btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
+}
+
+static void *btintel_pcie_copy_tlv(void *dest, enum btintel_pcie_tlv_type type,
+ void *data, size_t size)
{
- btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG,
- BTINTEL_PCIE_CSR_FUNC_CTRL_SW_RESET);
+ struct intel_tlv *tlv;
+
+ tlv = dest;
+ tlv->type = type;
+ tlv->len = size;
+ memcpy(tlv->val, data, tlv->len);
+ return dest + sizeof(*tlv) + size;
+}
+
+static int btintel_pcie_read_dram_buffers(struct btintel_pcie_data *data)
+{
+ u32 offset, prev_size, wr_ptr_status, dump_size, data_len;
+ struct btintel_pcie_dbgc *dbgc = &data->dbgc;
+ struct hci_dev *hdev = data->hdev;
+ u8 *pdata, *p, buf_idx;
+ struct intel_tlv *tlv;
+ struct timespec64 now;
+ struct tm tm_now;
+ char fw_build[128];
+ char ts[128];
+ char vendor[64];
+ char driver[64];
+
+ if (!IS_ENABLED(CONFIG_DEV_COREDUMP))
+ return -EOPNOTSUPP;
+
+
+ wr_ptr_status = btintel_pcie_rd_dev_mem(data, BTINTEL_PCIE_DBGC_CUR_DBGBUFF_STATUS);
+ offset = wr_ptr_status & BTINTEL_PCIE_DBG_OFFSET_BIT_MASK;
+
+ buf_idx = BTINTEL_PCIE_DBGC_DBG_BUF_IDX(wr_ptr_status);
+ if (buf_idx > dbgc->count) {
+ bt_dev_warn(hdev, "Buffer index is invalid");
+ return -EINVAL;
+ }
+
+ prev_size = buf_idx * BTINTEL_PCIE_DBGC_BUFFER_SIZE;
+ if (prev_size + offset >= prev_size)
+ data->dmp_hdr.write_ptr = prev_size + offset;
+ else
+ return -EINVAL;
+
+ snprintf(vendor, sizeof(vendor), "Vendor: Intel\n");
+ snprintf(driver, sizeof(driver), "Driver: %s\n",
+ data->dmp_hdr.driver_name);
+
+ ktime_get_real_ts64(&now);
+ time64_to_tm(now.tv_sec, 0, &tm_now);
+ snprintf(ts, sizeof(ts), "Dump Time: %02d-%02d-%04ld %02d:%02d:%02d",
+ tm_now.tm_mday, tm_now.tm_mon + 1, tm_now.tm_year + 1900,
+ tm_now.tm_hour, tm_now.tm_min, tm_now.tm_sec);
+
+ snprintf(fw_build, sizeof(fw_build),
+ "Firmware Timestamp: Year %u WW %02u buildtype %u build %u",
+ 2000 + (data->dmp_hdr.fw_timestamp >> 8),
+ data->dmp_hdr.fw_timestamp & 0xff, data->dmp_hdr.fw_build_type,
+ data->dmp_hdr.fw_build_num);
+
+ data_len = sizeof(*tlv) + sizeof(data->dmp_hdr.cnvi_bt) +
+ sizeof(*tlv) + sizeof(data->dmp_hdr.write_ptr) +
+ sizeof(*tlv) + sizeof(data->dmp_hdr.wrap_ctr) +
+ sizeof(*tlv) + sizeof(data->dmp_hdr.trigger_reason) +
+ sizeof(*tlv) + sizeof(data->dmp_hdr.fw_git_sha1) +
+ sizeof(*tlv) + sizeof(data->dmp_hdr.cnvr_top) +
+ sizeof(*tlv) + sizeof(data->dmp_hdr.cnvi_top) +
+ sizeof(*tlv) + strlen(ts) +
+ sizeof(*tlv) + strlen(fw_build) +
+ sizeof(*tlv) + strlen(vendor) +
+ sizeof(*tlv) + strlen(driver);
+
+ /*
+ * sizeof(u32) - signature
+ * sizeof(data_len) - to store tlv data size
+ * data_len - TLV data
+ */
+ dump_size = sizeof(u32) + sizeof(data_len) + data_len;
+
+
+ /* Add debug buffers data length to dump size */
+ dump_size += BTINTEL_PCIE_DBGC_BUFFER_SIZE * dbgc->count;
+
+ pdata = vmalloc(dump_size);
+ if (!pdata)
+ return -ENOMEM;
+ p = pdata;
+
+ *(u32 *)p = BTINTEL_PCIE_MAGIC_NUM;
+ p += sizeof(u32);
+
+ *(u32 *)p = data_len;
+ p += sizeof(u32);
+
+
+ p = btintel_pcie_copy_tlv(p, BTINTEL_VENDOR, vendor, strlen(vendor));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_DRIVER, driver, strlen(driver));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_DUMP_TIME, ts, strlen(ts));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_FW_BUILD, fw_build,
+ strlen(fw_build));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_CNVI_BT, &data->dmp_hdr.cnvi_bt,
+ sizeof(data->dmp_hdr.cnvi_bt));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_WRITE_PTR, &data->dmp_hdr.write_ptr,
+ sizeof(data->dmp_hdr.write_ptr));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_WRAP_CTR, &data->dmp_hdr.wrap_ctr,
+ sizeof(data->dmp_hdr.wrap_ctr));
+
+ data->dmp_hdr.wrap_ctr = btintel_pcie_rd_dev_mem(data,
+ BTINTEL_PCIE_DBGC_DBGBUFF_WRAP_ARND);
+
+ p = btintel_pcie_copy_tlv(p, BTINTEL_TRIGGER_REASON, &data->dmp_hdr.trigger_reason,
+ sizeof(data->dmp_hdr.trigger_reason));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_FW_SHA, &data->dmp_hdr.fw_git_sha1,
+ sizeof(data->dmp_hdr.fw_git_sha1));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_CNVR_TOP, &data->dmp_hdr.cnvr_top,
+ sizeof(data->dmp_hdr.cnvr_top));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_CNVI_TOP, &data->dmp_hdr.cnvi_top,
+ sizeof(data->dmp_hdr.cnvi_top));
+
+ memcpy(p, dbgc->bufs[0].data, dbgc->count * BTINTEL_PCIE_DBGC_BUFFER_SIZE);
+ dev_coredumpv(&hdev->dev, pdata, dump_size, GFP_KERNEL);
+ return 0;
+}
+
+static void btintel_pcie_dump_traces(struct hci_dev *hdev)
+{
+ struct btintel_pcie_data *data = hci_get_drvdata(hdev);
+ int ret = 0;
+
+ ret = btintel_pcie_get_mac_access(data);
+ if (ret) {
+ bt_dev_err(hdev, "Failed to get mac access: (%d)", ret);
+ return;
+ }
+
+ ret = btintel_pcie_read_dram_buffers(data);
+
+ btintel_pcie_release_mac_access(data);
+
+ if (ret)
+ bt_dev_err(hdev, "Failed to dump traces: (%d)", ret);
}
/* This function enables BT function by setting BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT bit in
@@ -252,6 +757,7 @@ static void btintel_pcie_reset_bt(struct btintel_pcie_data *data)
static int btintel_pcie_enable_bt(struct btintel_pcie_data *data)
{
int err;
+ u32 reg;
data->gp0_received = false;
@@ -267,22 +773,17 @@ static int btintel_pcie_enable_bt(struct btintel_pcie_data *data)
data->boot_stage_cache = 0x0;
/* Set MAC_INIT bit to start primary bootloader */
- btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
+ reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
+ reg &= ~(BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT |
+ BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_DISCON |
+ BTINTEL_PCIE_CSR_FUNC_CTRL_SW_RESET);
+ reg |= (BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_ENA |
+ BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT);
- btintel_pcie_set_reg_bits(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG,
- BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT);
-
- /* Wait until MAC_ACCESS is granted */
- err = btintel_pcie_poll_bit(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG,
- BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_STS,
- BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_STS,
- BTINTEL_DEFAULT_MAC_ACCESS_TIMEOUT_US);
- if (err < 0)
- return -ENODEV;
+ btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
/* MAC is ready. Enable BT FUNC */
btintel_pcie_set_reg_bits(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG,
- BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_ENA |
BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT);
btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
@@ -290,8 +791,9 @@ static int btintel_pcie_enable_bt(struct btintel_pcie_data *data)
/* wait for interrupt from the device after booting up to primary
* bootloader.
*/
+ data->alive_intr_ctxt = BTINTEL_PCIE_ROM;
err = wait_event_timeout(data->gp0_wait_q, data->gp0_received,
- msecs_to_jiffies(BTINTEL_DEFAULT_INTR_TIMEOUT));
+ msecs_to_jiffies(BTINTEL_DEFAULT_INTR_TIMEOUT_MS));
if (!err)
return -ETIME;
@@ -302,12 +804,82 @@ static int btintel_pcie_enable_bt(struct btintel_pcie_data *data)
return 0;
}
+static inline bool btintel_pcie_in_op(struct btintel_pcie_data *data)
+{
+ return data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_OPFW;
+}
+
+static inline bool btintel_pcie_in_iml(struct btintel_pcie_data *data)
+{
+ return data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_IML &&
+ !(data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_OPFW);
+}
+
+static inline bool btintel_pcie_in_d3(struct btintel_pcie_data *data)
+{
+ return data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_D3_STATE_READY;
+}
+
+static inline bool btintel_pcie_in_d0(struct btintel_pcie_data *data)
+{
+ return !(data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_D3_STATE_READY);
+}
+
+static void btintel_pcie_wr_sleep_cntrl(struct btintel_pcie_data *data,
+ u32 dxstate)
+{
+ bt_dev_dbg(data->hdev, "writing sleep_ctl_reg: 0x%8.8x", dxstate);
+ btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_IPC_SLEEP_CTL_REG, dxstate);
+}
+
+static int btintel_pcie_read_device_mem(struct btintel_pcie_data *data,
+ void *buf, u32 dev_addr, int len)
+{
+ int err;
+ u32 *val = buf;
+
+ /* Get device mac access */
+ err = btintel_pcie_get_mac_access(data);
+ if (err) {
+ bt_dev_err(data->hdev, "Failed to get mac access %d", err);
+ return err;
+ }
+
+ for (; len > 0; len -= 4, dev_addr += 4, val++)
+ *val = btintel_pcie_rd_dev_mem(data, dev_addr);
+
+ btintel_pcie_release_mac_access(data);
+
+ return 0;
+}
+
+static inline bool btintel_pcie_in_lockdown(struct btintel_pcie_data *data)
+{
+ return (data->boot_stage_cache &
+ BTINTEL_PCIE_CSR_BOOT_STAGE_ROM_LOCKDOWN) ||
+ (data->boot_stage_cache &
+ BTINTEL_PCIE_CSR_BOOT_STAGE_IML_LOCKDOWN);
+}
+
+static inline bool btintel_pcie_in_error(struct btintel_pcie_data *data)
+{
+ return (data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_DEVICE_ERR) ||
+ (data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_ABORT_HANDLER);
+}
+
+static void btintel_pcie_msix_gp1_handler(struct btintel_pcie_data *data)
+{
+ bt_dev_err(data->hdev, "Received gp1 mailbox interrupt");
+ btintel_pcie_dump_debug_registers(data->hdev);
+}
+
/* This function handles the MSI-X interrupt for gp0 cause (bit 0 in
* BTINTEL_PCIE_CSR_MSIX_HW_INT_CAUSES) which is sent for boot stage and image response.
*/
static void btintel_pcie_msix_gp0_handler(struct btintel_pcie_data *data)
{
- u32 reg;
+ bool submit_rx, signal_waitq;
+ u32 reg, old_ctxt;
/* This interrupt is for three different causes and it is not easy to
* know what causes the interrupt. So, it compares each register value
@@ -317,20 +889,101 @@ static void btintel_pcie_msix_gp0_handler(struct btintel_pcie_data *data)
if (reg != data->boot_stage_cache)
data->boot_stage_cache = reg;
+ bt_dev_dbg(data->hdev, "Alive context: %s old_boot_stage: 0x%8.8x new_boot_stage: 0x%8.8x",
+ btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt),
+ data->boot_stage_cache, reg);
reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_IMG_RESPONSE_REG);
if (reg != data->img_resp_cache)
data->img_resp_cache = reg;
+ if (btintel_pcie_in_error(data)) {
+ bt_dev_err(data->hdev, "Controller in error state");
+ btintel_pcie_dump_debug_registers(data->hdev);
+ return;
+ }
+
+ if (btintel_pcie_in_lockdown(data)) {
+ bt_dev_err(data->hdev, "Controller in lockdown state");
+ btintel_pcie_dump_debug_registers(data->hdev);
+ return;
+ }
+
data->gp0_received = true;
- /* If the boot stage is OP or IML, reset IA and start RX again */
- if (data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_OPFW ||
- data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_IML) {
+ old_ctxt = data->alive_intr_ctxt;
+ submit_rx = false;
+ signal_waitq = false;
+
+ switch (data->alive_intr_ctxt) {
+ case BTINTEL_PCIE_ROM:
+ data->alive_intr_ctxt = BTINTEL_PCIE_FW_DL;
+ signal_waitq = true;
+ break;
+ case BTINTEL_PCIE_FW_DL:
+ /* Error case is already handled. Ideally control shall not
+ * reach here
+ */
+ break;
+ case BTINTEL_PCIE_INTEL_HCI_RESET1:
+ if (btintel_pcie_in_op(data)) {
+ submit_rx = true;
+ signal_waitq = true;
+ break;
+ }
+
+ if (btintel_pcie_in_iml(data)) {
+ submit_rx = true;
+ signal_waitq = true;
+ data->alive_intr_ctxt = BTINTEL_PCIE_FW_DL;
+ break;
+ }
+ break;
+ case BTINTEL_PCIE_INTEL_HCI_RESET2:
+ if (btintel_test_and_clear_flag(data->hdev, INTEL_WAIT_FOR_D0)) {
+ btintel_wake_up_flag(data->hdev, INTEL_WAIT_FOR_D0);
+ data->alive_intr_ctxt = BTINTEL_PCIE_D0;
+ }
+ break;
+ case BTINTEL_PCIE_D0:
+ if (btintel_pcie_in_d3(data)) {
+ data->alive_intr_ctxt = BTINTEL_PCIE_D3;
+ signal_waitq = true;
+ break;
+ }
+ break;
+ case BTINTEL_PCIE_D3:
+ if (btintel_pcie_in_d0(data)) {
+ data->alive_intr_ctxt = BTINTEL_PCIE_D0;
+ submit_rx = true;
+ signal_waitq = true;
+ break;
+ }
+ break;
+ case BTINTEL_PCIE_HCI_RESET:
+ data->alive_intr_ctxt = BTINTEL_PCIE_D0;
+ submit_rx = true;
+ signal_waitq = true;
+ break;
+ default:
+ bt_dev_err(data->hdev, "Unknown state: 0x%2.2x",
+ data->alive_intr_ctxt);
+ break;
+ }
+
+ if (submit_rx) {
btintel_pcie_reset_ia(data);
btintel_pcie_start_rx(data);
}
- wake_up(&data->gp0_wait_q);
+ if (signal_waitq) {
+ bt_dev_dbg(data->hdev, "wake up gp0 wait_q");
+ wake_up(&data->gp0_wait_q);
+ }
+
+ if (old_ctxt != data->alive_intr_ctxt)
+ bt_dev_dbg(data->hdev, "alive context changed: %s -> %s",
+ btintel_pcie_alivectxt_state2str(old_ctxt),
+ btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt));
}
/* This function handles the MSX-X interrupt for rx queue 0 which is for TX
@@ -364,6 +1017,75 @@ static void btintel_pcie_msix_tx_handle(struct btintel_pcie_data *data)
}
}
+static int btintel_pcie_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_event_hdr *hdr = (void *)skb->data;
+ struct btintel_pcie_data *data = hci_get_drvdata(hdev);
+
+ if (skb->len > HCI_EVENT_HDR_SIZE && hdr->evt == 0xff &&
+ hdr->plen > 0) {
+ const void *ptr = skb->data + HCI_EVENT_HDR_SIZE + 1;
+ unsigned int len = skb->len - HCI_EVENT_HDR_SIZE - 1;
+
+ if (btintel_test_flag(hdev, INTEL_BOOTLOADER)) {
+ switch (skb->data[2]) {
+ case 0x02:
+ /* When switching to the operational firmware
+ * the device sends a vendor specific event
+ * indicating that the bootup completed.
+ */
+ btintel_bootup(hdev, ptr, len);
+
+ /* If bootup event is from operational image,
+ * driver needs to write sleep control register to
+ * move into D0 state
+ */
+ if (btintel_pcie_in_op(data)) {
+ btintel_pcie_wr_sleep_cntrl(data, BTINTEL_PCIE_STATE_D0);
+ data->alive_intr_ctxt = BTINTEL_PCIE_INTEL_HCI_RESET2;
+ kfree_skb(skb);
+ return 0;
+ }
+
+ if (btintel_pcie_in_iml(data)) {
+ /* In case of IML, there is no concept
+ * of D0 transition. Just mimic as if
+ * IML moved to D0 by clearing INTEL_WAIT_FOR_D0
+ * bit and waking up the task waiting on
+ * INTEL_WAIT_FOR_D0. This is required
+ * as intel_boot() is common function for
+ * both IML and OP image loading.
+ */
+ if (btintel_test_and_clear_flag(data->hdev,
+ INTEL_WAIT_FOR_D0))
+ btintel_wake_up_flag(data->hdev,
+ INTEL_WAIT_FOR_D0);
+ }
+ kfree_skb(skb);
+ return 0;
+ case 0x06:
+ /* When the firmware loading completes the
+ * device sends out a vendor specific event
+ * indicating the result of the firmware
+ * loading.
+ */
+ btintel_secure_send_result(hdev, ptr, len);
+ kfree_skb(skb);
+ return 0;
+ }
+ }
+
+ /* This is a debug event that comes from IML and OP image when it
+ * starts execution. There is no need pass this event to stack.
+ */
+ if (skb->data[2] == 0x97) {
+ hci_recv_diag(hdev, skb);
+ return 0;
+ }
+ }
+
+ return hci_recv_frame(hdev, skb);
+}
/* Process the received rx data
* It check the frame header to identify the data type and create skb
* and calling HCI API
@@ -375,7 +1097,6 @@ static int btintel_pcie_recv_frame(struct btintel_pcie_data *data,
u8 pkt_type;
u16 plen;
u32 pcie_pkt_type;
- struct sk_buff *new_skb;
void *pdata;
struct hci_dev *hdev = data->hdev;
@@ -452,24 +1173,20 @@ static int btintel_pcie_recv_frame(struct btintel_pcie_data *data,
bt_dev_dbg(hdev, "pkt_type: 0x%2.2x len: %u", pkt_type, plen);
- new_skb = bt_skb_alloc(plen, GFP_ATOMIC);
- if (!new_skb) {
- bt_dev_err(hdev, "Failed to allocate memory for skb of len: %u",
- skb->len);
- ret = -ENOMEM;
- goto exit_error;
- }
-
- hci_skb_pkt_type(new_skb) = pkt_type;
- skb_put_data(new_skb, skb->data, plen);
+ hci_skb_pkt_type(skb) = pkt_type;
hdev->stat.byte_rx += plen;
+ skb_trim(skb, plen);
if (pcie_pkt_type == BTINTEL_PCIE_HCI_EVT_PKT)
- ret = btintel_recv_event(hdev, new_skb);
+ ret = btintel_pcie_recv_event(hdev, skb);
else
- ret = hci_recv_frame(hdev, new_skb);
+ ret = hci_recv_frame(hdev, skb);
+ skb = NULL; /* skb is freed in the callee */
exit_error:
+ if (skb)
+ kfree_skb(skb);
+
if (ret)
hdev->stat.err_rx++;
@@ -478,21 +1195,152 @@ exit_error:
return ret;
}
+static void btintel_pcie_read_hwexp(struct btintel_pcie_data *data)
+{
+ int len, err, offset, pending;
+ struct sk_buff *skb;
+ u8 *buf, prefix[64];
+ u32 addr, val;
+ u16 pkt_len;
+
+ struct tlv {
+ u8 type;
+ __le16 len;
+ u8 val[];
+ } __packed;
+
+ struct tlv *tlv;
+
+ switch (data->dmp_hdr.cnvi_top & 0xfff) {
+ case BTINTEL_CNVI_BLAZARI:
+ case BTINTEL_CNVI_BLAZARIW:
+ /* only from step B0 onwards */
+ if (INTEL_CNVX_TOP_STEP(data->dmp_hdr.cnvi_top) != 0x01)
+ return;
+ len = BTINTEL_PCIE_BLZR_HWEXP_SIZE; /* exception data length */
+ addr = BTINTEL_PCIE_BLZR_HWEXP_DMP_ADDR;
+ break;
+ case BTINTEL_CNVI_SCP:
+ len = BTINTEL_PCIE_SCP_HWEXP_SIZE;
+ addr = BTINTEL_PCIE_SCP_HWEXP_DMP_ADDR;
+ break;
+ default:
+ bt_dev_err(data->hdev, "Unsupported cnvi 0x%8.8x", data->dmp_hdr.cnvi_top);
+ return;
+ }
+
+ buf = kzalloc(len, GFP_KERNEL);
+ if (!buf)
+ goto exit_on_error;
+
+ btintel_pcie_mac_init(data);
+
+ err = btintel_pcie_read_device_mem(data, buf, addr, len);
+ if (err)
+ goto exit_on_error;
+
+ val = get_unaligned_le32(buf);
+ if (val != BTINTEL_PCIE_MAGIC_NUM) {
+ bt_dev_err(data->hdev, "Invalid exception dump signature: 0x%8.8x",
+ val);
+ goto exit_on_error;
+ }
+
+ snprintf(prefix, sizeof(prefix), "Bluetooth: %s: ", bt_dev_name(data->hdev));
+
+ offset = 4;
+ do {
+ pending = len - offset;
+ if (pending < sizeof(*tlv))
+ break;
+ tlv = (struct tlv *)(buf + offset);
+
+ /* If type == 0, then there are no more TLVs to be parsed */
+ if (!tlv->type) {
+ bt_dev_dbg(data->hdev, "Invalid TLV type 0");
+ break;
+ }
+ pkt_len = le16_to_cpu(tlv->len);
+ offset += sizeof(*tlv);
+ pending = len - offset;
+ if (pkt_len > pending)
+ break;
+
+ offset += pkt_len;
+
+ /* Only TLVs of type == 1 are HCI events, no need to process other
+ * TLVs
+ */
+ if (tlv->type != 1)
+ continue;
+
+ bt_dev_dbg(data->hdev, "TLV packet length: %u", pkt_len);
+ if (pkt_len > HCI_MAX_EVENT_SIZE)
+ break;
+ skb = bt_skb_alloc(pkt_len, GFP_KERNEL);
+ if (!skb)
+ goto exit_on_error;
+ hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
+ skb_put_data(skb, tlv->val, pkt_len);
+
+ /* copy Intel specific pcie packet type */
+ val = BTINTEL_PCIE_HCI_EVT_PKT;
+ memcpy(skb_push(skb, BTINTEL_PCIE_HCI_TYPE_LEN), &val,
+ BTINTEL_PCIE_HCI_TYPE_LEN);
+
+ print_hex_dump(KERN_DEBUG, prefix, DUMP_PREFIX_OFFSET, 16, 1,
+ tlv->val, pkt_len, false);
+
+ btintel_pcie_recv_frame(data, skb);
+ } while (offset < len);
+
+exit_on_error:
+ kfree(buf);
+}
+
+static void btintel_pcie_msix_hw_exp_handler(struct btintel_pcie_data *data)
+{
+ bt_dev_err(data->hdev, "Received hw exception interrupt");
+
+ if (test_and_set_bit(BTINTEL_PCIE_CORE_HALTED, &data->flags))
+ return;
+
+ if (test_and_set_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags))
+ return;
+
+ /* Trigger device core dump when there is HW exception */
+ if (!test_and_set_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags))
+ data->dmp_hdr.trigger_reason = BTINTEL_PCIE_TRIGGER_REASON_FW_ASSERT;
+
+ queue_work(data->workqueue, &data->rx_work);
+}
+
static void btintel_pcie_rx_work(struct work_struct *work)
{
struct btintel_pcie_data *data = container_of(work,
struct btintel_pcie_data, rx_work);
struct sk_buff *skb;
- int err;
- struct hci_dev *hdev = data->hdev;
+
+ if (test_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags)) {
+ btintel_pcie_dump_traces(data->hdev);
+ clear_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags);
+ }
+
+ if (test_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags)) {
+ /* Unlike usb products, controller will not send hardware
+ * exception event on exception. Instead controller writes the
+ * hardware event to device memory along with optional debug
+ * events, raises MSIX and halts. Driver shall read the
+ * exception event from device memory and passes it stack for
+ * further processing.
+ */
+ btintel_pcie_read_hwexp(data);
+ clear_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags);
+ }
/* Process the sk_buf in queue and send to the HCI layer */
while ((skb = skb_dequeue(&data->rx_skb_q))) {
- err = btintel_pcie_recv_frame(data, skb);
- if (err)
- bt_dev_err(hdev, "Failed to send received frame: %d",
- err);
- kfree_skb(skb);
+ btintel_pcie_recv_frame(data, skb);
}
}
@@ -516,10 +1364,8 @@ static int btintel_pcie_submit_rx_work(struct btintel_pcie_data *data, u8 status
buf += sizeof(*rfh_hdr);
skb = alloc_skb(len, GFP_ATOMIC);
- if (!skb) {
- ret = -ENOMEM;
+ if (!skb)
goto resubmit;
- }
skb_put_data(skb, buf, len);
skb_queue_tail(&data->rx_skb_q, skb);
@@ -547,10 +1393,8 @@ static void btintel_pcie_msix_rx_handle(struct btintel_pcie_data *data)
bt_dev_dbg(hdev, "RXQ: cr_hia: %u cr_tia: %u", cr_hia, cr_tia);
/* Check CR_TIA and CR_HIA for change */
- if (cr_tia == cr_hia) {
- bt_dev_warn(hdev, "RXQ: no new CD found");
+ if (cr_tia == cr_hia)
return;
- }
rxq = &data->rxq;
@@ -586,6 +1430,16 @@ static irqreturn_t btintel_pcie_msix_isr(int irq, void *data)
return IRQ_WAKE_THREAD;
}
+static inline bool btintel_pcie_is_rxq_empty(struct btintel_pcie_data *data)
+{
+ return data->ia.cr_hia[BTINTEL_PCIE_RXQ_NUM] == data->ia.cr_tia[BTINTEL_PCIE_RXQ_NUM];
+}
+
+static inline bool btintel_pcie_is_txackq_empty(struct btintel_pcie_data *data)
+{
+ return data->ia.cr_tia[BTINTEL_PCIE_TXQ_NUM] == data->ia.cr_hia[BTINTEL_PCIE_TXQ_NUM];
+}
+
static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id)
{
struct msix_entry *entry = dev_id;
@@ -606,6 +1460,13 @@ static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id)
return IRQ_NONE;
}
+ /* This interrupt is raised when there is an hardware exception */
+ if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_HWEXP)
+ btintel_pcie_msix_hw_exp_handler(data);
+
+ if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP1)
+ btintel_pcie_msix_gp1_handler(data);
+
/* This interrupt is triggered by the firmware after updating
* boot_stage register and image_response register
*/
@@ -613,12 +1474,18 @@ static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id)
btintel_pcie_msix_gp0_handler(data);
/* For TX */
- if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0)
+ if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0) {
btintel_pcie_msix_tx_handle(data);
+ if (!btintel_pcie_is_rxq_empty(data))
+ btintel_pcie_msix_rx_handle(data);
+ }
/* For RX */
- if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_1)
+ if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_1) {
btintel_pcie_msix_rx_handle(data);
+ if (!btintel_pcie_is_txackq_empty(data))
+ btintel_pcie_msix_tx_handle(data);
+ }
/*
* Before sending the interrupt the HW disables it to prevent a nested
@@ -686,7 +1553,8 @@ struct btintel_pcie_causes_list {
static struct btintel_pcie_causes_list causes_list[] = {
{ BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK, 0x00 },
{ BTINTEL_PCIE_MSIX_FH_INT_CAUSES_1, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK, 0x01 },
- { BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK, 0x20 },
+ { BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK, 0x20 },
+ { BTINTEL_PCIE_MSIX_HW_INT_CAUSES_HWEXP, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK, 0x23 },
};
/* This function configures the interrupt masks for both HW_INT_CAUSES and
@@ -734,13 +1602,9 @@ static int btintel_pcie_config_pcie(struct pci_dev *pdev,
return err;
}
- err = pcim_iomap_regions(pdev, BIT(0), KBUILD_MODNAME);
- if (err)
- return err;
-
- data->base_addr = pcim_iomap_table(pdev)[0];
- if (!data->base_addr)
- return -ENODEV;
+ data->base_addr = pcim_iomap_region(pdev, 0, KBUILD_MODNAME);
+ if (IS_ERR(data->base_addr))
+ return PTR_ERR(data->base_addr);
err = btintel_pcie_setup_irq(data);
if (err)
@@ -777,6 +1641,11 @@ static void btintel_pcie_init_ci(struct btintel_pcie_data *data,
ci->addr_urbdq1 = data->rxq.urbd1s_p_addr;
ci->num_urbdq1 = data->rxq.count;
ci->urbdq_db_vec = BTINTEL_PCIE_RXQ_NUM;
+
+ ci->dbg_output_mode = 0x01;
+ ci->dbgc_addr = data->dbgc.frag_p_addr;
+ ci->dbgc_size = data->dbgc.frag_size;
+ ci->dbg_preset = 0x00;
}
static void btintel_pcie_free_txq_bufs(struct btintel_pcie_data *data,
@@ -919,8 +1788,8 @@ static int btintel_pcie_alloc(struct btintel_pcie_data *data)
* + size of index * Number of queues(2) * type of index array(4)
* + size of context information
*/
- total = (sizeof(struct tfd) + sizeof(struct urbd0) + sizeof(struct frbd)
- + sizeof(struct urbd1)) * BTINTEL_DESCS_COUNT;
+ total = (sizeof(struct tfd) + sizeof(struct urbd0)) * BTINTEL_PCIE_TX_DESCS_COUNT;
+ total += (sizeof(struct frbd) + sizeof(struct urbd1)) * BTINTEL_PCIE_RX_DESCS_COUNT;
/* Add the sum of size of index array and size of ci struct */
total += (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 4) + sizeof(struct ctx_info);
@@ -945,36 +1814,36 @@ static int btintel_pcie_alloc(struct btintel_pcie_data *data)
data->dma_v_addr = v_addr;
/* Setup descriptor count */
- data->txq.count = BTINTEL_DESCS_COUNT;
- data->rxq.count = BTINTEL_DESCS_COUNT;
+ data->txq.count = BTINTEL_PCIE_TX_DESCS_COUNT;
+ data->rxq.count = BTINTEL_PCIE_RX_DESCS_COUNT;
/* Setup tfds */
data->txq.tfds_p_addr = p_addr;
data->txq.tfds = v_addr;
- p_addr += (sizeof(struct tfd) * BTINTEL_DESCS_COUNT);
- v_addr += (sizeof(struct tfd) * BTINTEL_DESCS_COUNT);
+ p_addr += (sizeof(struct tfd) * BTINTEL_PCIE_TX_DESCS_COUNT);
+ v_addr += (sizeof(struct tfd) * BTINTEL_PCIE_TX_DESCS_COUNT);
/* Setup urbd0 */
data->txq.urbd0s_p_addr = p_addr;
data->txq.urbd0s = v_addr;
- p_addr += (sizeof(struct urbd0) * BTINTEL_DESCS_COUNT);
- v_addr += (sizeof(struct urbd0) * BTINTEL_DESCS_COUNT);
+ p_addr += (sizeof(struct urbd0) * BTINTEL_PCIE_TX_DESCS_COUNT);
+ v_addr += (sizeof(struct urbd0) * BTINTEL_PCIE_TX_DESCS_COUNT);
/* Setup FRBD*/
data->rxq.frbds_p_addr = p_addr;
data->rxq.frbds = v_addr;
- p_addr += (sizeof(struct frbd) * BTINTEL_DESCS_COUNT);
- v_addr += (sizeof(struct frbd) * BTINTEL_DESCS_COUNT);
+ p_addr += (sizeof(struct frbd) * BTINTEL_PCIE_RX_DESCS_COUNT);
+ v_addr += (sizeof(struct frbd) * BTINTEL_PCIE_RX_DESCS_COUNT);
/* Setup urbd1 */
data->rxq.urbd1s_p_addr = p_addr;
data->rxq.urbd1s = v_addr;
- p_addr += (sizeof(struct urbd1) * BTINTEL_DESCS_COUNT);
- v_addr += (sizeof(struct urbd1) * BTINTEL_DESCS_COUNT);
+ p_addr += (sizeof(struct urbd1) * BTINTEL_PCIE_RX_DESCS_COUNT);
+ v_addr += (sizeof(struct urbd1) * BTINTEL_PCIE_RX_DESCS_COUNT);
/* Setup data buffers for txq */
err = btintel_pcie_setup_txq_bufs(data, &data->txq);
@@ -989,6 +1858,11 @@ static int btintel_pcie_alloc(struct btintel_pcie_data *data)
/* Setup Index Array */
btintel_pcie_setup_ia(data, p_addr, v_addr, &data->ia);
+ /* Setup data buffers for dbgc */
+ err = btintel_pcie_setup_dbgc(data);
+ if (err)
+ goto exit_error_txq;
+
/* Setup Context Information */
p_addr += sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 4;
v_addr += sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 4;
@@ -1053,9 +1927,14 @@ static int btintel_pcie_send_frame(struct hci_dev *hdev,
struct sk_buff *skb)
{
struct btintel_pcie_data *data = hci_get_drvdata(hdev);
+ struct hci_command_hdr *cmd;
+ __u16 opcode = ~0;
int ret;
u32 type;
+ if (test_bit(BTINTEL_PCIE_CORE_HALTED, &data->flags))
+ return -ENODEV;
+
/* Due to the fw limitation, the type header of the packet should be
* 4 bytes unlike 1 byte for UART. In UART, the firmware can read
* the first byte to get the packet type and redirect the rest of data
@@ -1073,18 +1952,21 @@ static int btintel_pcie_send_frame(struct hci_dev *hdev,
switch (hci_skb_pkt_type(skb)) {
case HCI_COMMAND_PKT:
type = BTINTEL_PCIE_HCI_CMD_PKT;
+ cmd = (void *)skb->data;
+ opcode = le16_to_cpu(cmd->opcode);
if (btintel_test_flag(hdev, INTEL_BOOTLOADER)) {
struct hci_command_hdr *cmd = (void *)skb->data;
__u16 opcode = le16_to_cpu(cmd->opcode);
- /* When the 0xfc01 command is issued to boot into
- * the operational firmware, it will actually not
- * send a command complete event. To keep the flow
+ /* When the BTINTEL_HCI_OP_RESET command is issued to
+ * boot into the operational firmware, it will actually
+ * not send a command complete event. To keep the flow
* control working inject that event here.
*/
- if (opcode == 0xfc01)
+ if (opcode == BTINTEL_HCI_OP_RESET)
btintel_pcie_inject_cmd_complete(hdev, opcode);
}
+
hdev->stat.cmd_tx++;
break;
case HCI_ACLDATA_PKT:
@@ -1102,15 +1984,14 @@ static int btintel_pcie_send_frame(struct hci_dev *hdev,
bt_dev_err(hdev, "Unknown HCI packet type");
return -EILSEQ;
}
- memcpy(skb_push(skb, BTINTEL_PCIE_HCI_TYPE_LEN), &type,
- BTINTEL_PCIE_HCI_TYPE_LEN);
- ret = btintel_pcie_send_sync(data, skb);
+ ret = btintel_pcie_send_sync(data, skb, type, opcode);
if (ret) {
hdev->stat.err_tx++;
bt_dev_err(hdev, "Failed to send frame (%d)", ret);
goto exit_error;
}
+
hdev->stat.byte_tx += skb->len;
kfree_skb(skb);
@@ -1128,8 +2009,31 @@ static void btintel_pcie_release_hdev(struct btintel_pcie_data *data)
data->hdev = NULL;
}
-static int btintel_pcie_setup(struct hci_dev *hdev)
+static void btintel_pcie_disable_interrupts(struct btintel_pcie_data *data)
+{
+ spin_lock(&data->irq_lock);
+ btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK, data->fh_init_mask);
+ btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK, data->hw_init_mask);
+ spin_unlock(&data->irq_lock);
+}
+
+static void btintel_pcie_enable_interrupts(struct btintel_pcie_data *data)
+{
+ spin_lock(&data->irq_lock);
+ btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK, ~data->fh_init_mask);
+ btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK, ~data->hw_init_mask);
+ spin_unlock(&data->irq_lock);
+}
+
+static void btintel_pcie_synchronize_irqs(struct btintel_pcie_data *data)
{
+ for (int i = 0; i < data->alloc_vecs; i++)
+ synchronize_irq(data->msix_entries[i].vector);
+}
+
+static int btintel_pcie_setup_internal(struct hci_dev *hdev)
+{
+ struct btintel_pcie_data *data = hci_get_drvdata(hdev);
const u8 param[1] = { 0xFF };
struct intel_version_tlv ver_tlv;
struct sk_buff *skb;
@@ -1153,9 +2057,9 @@ static int btintel_pcie_setup(struct hci_dev *hdev)
}
/* Apply the common HCI quirks for Intel device */
- set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
- set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
- set_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_STRICT_DUPLICATE_FILTER);
+ hci_set_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY);
+ hci_set_quirk(hdev, HCI_QUIRK_NON_PERSISTENT_DIAG);
/* Set up the quality report callback for Intel devices */
hdev->set_quality_report = btintel_set_quality_report;
@@ -1187,6 +2091,8 @@ static int btintel_pcie_setup(struct hci_dev *hdev)
*/
switch (INTEL_HW_VARIANT(ver_tlv.cnvi_bt)) {
case 0x1e: /* BzrI */
+ case 0x1f: /* ScP */
+ case 0x22: /* BzrIW */
/* Display version information of TLV type */
btintel_version_info_tlv(hdev, &ver_tlv);
@@ -1194,7 +2100,7 @@ static int btintel_pcie_setup(struct hci_dev *hdev)
*
* All TLV based devices support WBS
*/
- set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED);
/* Setup MSFT Extension support */
btintel_set_msft_opcode(hdev,
@@ -1212,6 +2118,16 @@ static int btintel_pcie_setup(struct hci_dev *hdev)
break;
}
+ data->dmp_hdr.cnvi_top = ver_tlv.cnvi_top;
+ data->dmp_hdr.cnvr_top = ver_tlv.cnvr_top;
+ data->dmp_hdr.fw_timestamp = ver_tlv.timestamp;
+ data->dmp_hdr.fw_build_type = ver_tlv.build_type;
+ data->dmp_hdr.fw_build_num = ver_tlv.build_num;
+ data->dmp_hdr.cnvi_bt = ver_tlv.cnvi_bt;
+
+ if (ver_tlv.img_type == 0x02 || ver_tlv.img_type == 0x03)
+ data->dmp_hdr.fw_git_sha1 = ver_tlv.git_sha1;
+
btintel_print_fseq_info(hdev);
exit_error:
kfree_skb(skb);
@@ -1219,6 +2135,225 @@ exit_error:
return err;
}
+static int btintel_pcie_setup(struct hci_dev *hdev)
+{
+ int err, fw_dl_retry = 0;
+ struct btintel_pcie_data *data = hci_get_drvdata(hdev);
+
+ while ((err = btintel_pcie_setup_internal(hdev)) && fw_dl_retry++ < 1) {
+ bt_dev_err(hdev, "Firmware download retry count: %d",
+ fw_dl_retry);
+ btintel_pcie_dump_debug_registers(hdev);
+ btintel_pcie_disable_interrupts(data);
+ btintel_pcie_synchronize_irqs(data);
+ err = btintel_pcie_reset_bt(data);
+ if (err) {
+ bt_dev_err(hdev, "Failed to do shr reset: %d", err);
+ break;
+ }
+ usleep_range(10000, 12000);
+ btintel_pcie_reset_ia(data);
+ btintel_pcie_enable_interrupts(data);
+ btintel_pcie_config_msix(data);
+ err = btintel_pcie_enable_bt(data);
+ if (err) {
+ bt_dev_err(hdev, "Failed to enable hardware: %d", err);
+ break;
+ }
+ btintel_pcie_start_rx(data);
+ }
+
+ if (!err)
+ set_bit(BTINTEL_PCIE_SETUP_DONE, &data->flags);
+ return err;
+}
+
+static struct btintel_pcie_dev_recovery *
+btintel_pcie_get_recovery(struct pci_dev *pdev, struct device *dev)
+{
+ struct btintel_pcie_dev_recovery *tmp, *data = NULL;
+ const char *name = pci_name(pdev);
+ const size_t name_len = strlen(name) + 1;
+ struct hci_dev *hdev = to_hci_dev(dev);
+
+ spin_lock(&btintel_pcie_recovery_lock);
+ list_for_each_entry(tmp, &btintel_pcie_recovery_list, list) {
+ if (strcmp(tmp->name, name))
+ continue;
+ data = tmp;
+ break;
+ }
+ spin_unlock(&btintel_pcie_recovery_lock);
+
+ if (data) {
+ bt_dev_dbg(hdev, "Found restart data for BDF: %s", data->name);
+ return data;
+ }
+
+ data = kzalloc(struct_size(data, name, name_len), GFP_ATOMIC);
+ if (!data)
+ return NULL;
+
+ strscpy(data->name, name, name_len);
+ spin_lock(&btintel_pcie_recovery_lock);
+ list_add_tail(&data->list, &btintel_pcie_recovery_list);
+ spin_unlock(&btintel_pcie_recovery_lock);
+
+ return data;
+}
+
+static void btintel_pcie_free_restart_list(void)
+{
+ struct btintel_pcie_dev_recovery *tmp;
+
+ while ((tmp = list_first_entry_or_null(&btintel_pcie_recovery_list,
+ typeof(*tmp), list))) {
+ list_del(&tmp->list);
+ kfree(tmp);
+ }
+}
+
+static void btintel_pcie_inc_recovery_count(struct pci_dev *pdev,
+ struct device *dev)
+{
+ struct btintel_pcie_dev_recovery *data;
+ time64_t retry_window;
+
+ data = btintel_pcie_get_recovery(pdev, dev);
+ if (!data)
+ return;
+
+ retry_window = ktime_get_boottime_seconds() - data->last_error;
+ if (data->count == 0) {
+ data->last_error = ktime_get_boottime_seconds();
+ data->count++;
+ } else if (retry_window < BTINTEL_PCIE_RESET_WINDOW_SECS &&
+ data->count <= BTINTEL_PCIE_FLR_MAX_RETRY) {
+ data->count++;
+ } else if (retry_window > BTINTEL_PCIE_RESET_WINDOW_SECS) {
+ data->last_error = 0;
+ data->count = 0;
+ }
+}
+
+static int btintel_pcie_setup_hdev(struct btintel_pcie_data *data);
+
+static void btintel_pcie_removal_work(struct work_struct *wk)
+{
+ struct btintel_pcie_removal *removal =
+ container_of(wk, struct btintel_pcie_removal, work);
+ struct pci_dev *pdev = removal->pdev;
+ struct btintel_pcie_data *data;
+ int err;
+
+ pci_lock_rescan_remove();
+
+ if (!pdev->bus)
+ goto error;
+
+ data = pci_get_drvdata(pdev);
+
+ btintel_pcie_disable_interrupts(data);
+ btintel_pcie_synchronize_irqs(data);
+
+ flush_work(&data->rx_work);
+
+ bt_dev_dbg(data->hdev, "Release bluetooth interface");
+ btintel_pcie_release_hdev(data);
+
+ err = pci_reset_function(pdev);
+ if (err) {
+ BT_ERR("Failed resetting the pcie device (%d)", err);
+ goto error;
+ }
+
+ btintel_pcie_enable_interrupts(data);
+ btintel_pcie_config_msix(data);
+
+ err = btintel_pcie_enable_bt(data);
+ if (err) {
+ BT_ERR("Failed to enable bluetooth hardware after reset (%d)",
+ err);
+ goto error;
+ }
+
+ btintel_pcie_reset_ia(data);
+ btintel_pcie_start_rx(data);
+ data->flags = 0;
+
+ err = btintel_pcie_setup_hdev(data);
+ if (err) {
+ BT_ERR("Failed registering hdev (%d)", err);
+ goto error;
+ }
+error:
+ pci_dev_put(pdev);
+ pci_unlock_rescan_remove();
+ kfree(removal);
+}
+
+static void btintel_pcie_reset(struct hci_dev *hdev)
+{
+ struct btintel_pcie_removal *removal;
+ struct btintel_pcie_data *data;
+
+ data = hci_get_drvdata(hdev);
+
+ if (!test_bit(BTINTEL_PCIE_SETUP_DONE, &data->flags))
+ return;
+
+ if (test_and_set_bit(BTINTEL_PCIE_RECOVERY_IN_PROGRESS, &data->flags))
+ return;
+
+ removal = kzalloc(sizeof(*removal), GFP_ATOMIC);
+ if (!removal)
+ return;
+
+ removal->pdev = data->pdev;
+ INIT_WORK(&removal->work, btintel_pcie_removal_work);
+ pci_dev_get(removal->pdev);
+ schedule_work(&removal->work);
+}
+
+static void btintel_pcie_hw_error(struct hci_dev *hdev, u8 code)
+{
+ struct btintel_pcie_dev_recovery *data;
+ struct btintel_pcie_data *dev_data = hci_get_drvdata(hdev);
+ struct pci_dev *pdev = dev_data->pdev;
+ time64_t retry_window;
+
+ if (code == 0x13) {
+ bt_dev_err(hdev, "Encountered top exception");
+ return;
+ }
+
+ data = btintel_pcie_get_recovery(pdev, &hdev->dev);
+ if (!data)
+ return;
+
+ retry_window = ktime_get_boottime_seconds() - data->last_error;
+
+ if (retry_window < BTINTEL_PCIE_RESET_WINDOW_SECS &&
+ data->count >= BTINTEL_PCIE_FLR_MAX_RETRY) {
+ bt_dev_err(hdev, "Exhausted maximum: %d recovery attempts: %d",
+ BTINTEL_PCIE_FLR_MAX_RETRY, data->count);
+ bt_dev_dbg(hdev, "Boot time: %lld seconds",
+ ktime_get_boottime_seconds());
+ bt_dev_dbg(hdev, "last error at: %lld seconds",
+ data->last_error);
+ return;
+ }
+ btintel_pcie_inc_recovery_count(pdev, &hdev->dev);
+ btintel_pcie_reset(hdev);
+}
+
+static bool btintel_pcie_wakeup(struct hci_dev *hdev)
+{
+ struct btintel_pcie_data *data = hci_get_drvdata(hdev);
+
+ return device_may_wakeup(&data->pdev->dev);
+}
+
static int btintel_pcie_setup_hdev(struct btintel_pcie_data *data)
{
int err;
@@ -1240,9 +2375,11 @@ static int btintel_pcie_setup_hdev(struct btintel_pcie_data *data)
hdev->send = btintel_pcie_send_frame;
hdev->setup = btintel_pcie_setup;
hdev->shutdown = btintel_shutdown_combined;
- hdev->hw_error = btintel_hw_error;
+ hdev->hw_error = btintel_pcie_hw_error;
hdev->set_diag = btintel_set_diag;
hdev->set_bdaddr = btintel_set_bdaddr;
+ hdev->reset = btintel_pcie_reset;
+ hdev->wakeup = btintel_pcie_wakeup;
err = hci_register_dev(hdev);
if (err < 0) {
@@ -1250,6 +2387,7 @@ static int btintel_pcie_setup_hdev(struct btintel_pcie_data *data)
goto exit_error;
}
+ data->dmp_hdr.driver_name = KBUILD_MODNAME;
return 0;
exit_error:
@@ -1339,6 +2477,12 @@ static void btintel_pcie_remove(struct pci_dev *pdev)
data = pci_get_drvdata(pdev);
+ btintel_pcie_disable_interrupts(data);
+
+ btintel_pcie_synchronize_irqs(data);
+
+ flush_work(&data->rx_work);
+
btintel_pcie_reset_bt(data);
for (int i = 0; i < data->alloc_vecs; i++) {
struct msix_entry *msix_entry;
@@ -1351,8 +2495,6 @@ static void btintel_pcie_remove(struct pci_dev *pdev)
btintel_pcie_release_hdev(data);
- flush_work(&data->rx_work);
-
destroy_workqueue(data->workqueue);
btintel_pcie_free(data);
@@ -1362,13 +2504,132 @@ static void btintel_pcie_remove(struct pci_dev *pdev)
pci_set_drvdata(pdev, NULL);
}
+#ifdef CONFIG_DEV_COREDUMP
+static void btintel_pcie_coredump(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct btintel_pcie_data *data = pci_get_drvdata(pdev);
+
+ if (test_and_set_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags))
+ return;
+
+ data->dmp_hdr.trigger_reason = BTINTEL_PCIE_TRIGGER_REASON_USER_TRIGGER;
+ queue_work(data->workqueue, &data->rx_work);
+}
+#endif
+
+static int btintel_pcie_suspend_late(struct device *dev, pm_message_t mesg)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct btintel_pcie_data *data;
+ ktime_t start;
+ u32 dxstate;
+ int err;
+
+ data = pci_get_drvdata(pdev);
+
+ dxstate = (mesg.event == PM_EVENT_SUSPEND ?
+ BTINTEL_PCIE_STATE_D3_HOT : BTINTEL_PCIE_STATE_D3_COLD);
+
+ data->gp0_received = false;
+
+ start = ktime_get();
+
+ /* Refer: 6.4.11.7 -> Platform power management */
+ btintel_pcie_wr_sleep_cntrl(data, dxstate);
+ err = wait_event_timeout(data->gp0_wait_q, data->gp0_received,
+ msecs_to_jiffies(BTINTEL_DEFAULT_INTR_TIMEOUT_MS));
+ if (err == 0) {
+ bt_dev_err(data->hdev,
+ "Timeout (%u ms) on alive interrupt for D3 entry",
+ BTINTEL_DEFAULT_INTR_TIMEOUT_MS);
+ return -EBUSY;
+ }
+
+ bt_dev_dbg(data->hdev,
+ "device entered into d3 state from d0 in %lld us",
+ ktime_to_us(ktime_get() - start));
+
+ return 0;
+}
+
+static int btintel_pcie_suspend(struct device *dev)
+{
+ return btintel_pcie_suspend_late(dev, PMSG_SUSPEND);
+}
+
+static int btintel_pcie_hibernate(struct device *dev)
+{
+ return btintel_pcie_suspend_late(dev, PMSG_HIBERNATE);
+}
+
+static int btintel_pcie_freeze(struct device *dev)
+{
+ return btintel_pcie_suspend_late(dev, PMSG_FREEZE);
+}
+
+static int btintel_pcie_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct btintel_pcie_data *data;
+ ktime_t start;
+ int err;
+
+ data = pci_get_drvdata(pdev);
+ data->gp0_received = false;
+
+ start = ktime_get();
+
+ /* Refer: 6.4.11.7 -> Platform power management */
+ btintel_pcie_wr_sleep_cntrl(data, BTINTEL_PCIE_STATE_D0);
+ err = wait_event_timeout(data->gp0_wait_q, data->gp0_received,
+ msecs_to_jiffies(BTINTEL_DEFAULT_INTR_TIMEOUT_MS));
+ if (err == 0) {
+ bt_dev_err(data->hdev,
+ "Timeout (%u ms) on alive interrupt for D0 entry",
+ BTINTEL_DEFAULT_INTR_TIMEOUT_MS);
+ return -EBUSY;
+ }
+
+ bt_dev_dbg(data->hdev,
+ "device entered into d0 state from d3 in %lld us",
+ ktime_to_us(ktime_get() - start));
+ return 0;
+}
+
+static const struct dev_pm_ops btintel_pcie_pm_ops = {
+ .suspend = btintel_pcie_suspend,
+ .resume = btintel_pcie_resume,
+ .freeze = btintel_pcie_freeze,
+ .thaw = btintel_pcie_resume,
+ .poweroff = btintel_pcie_hibernate,
+ .restore = btintel_pcie_resume,
+};
+
static struct pci_driver btintel_pcie_driver = {
.name = KBUILD_MODNAME,
.id_table = btintel_pcie_table,
.probe = btintel_pcie_probe,
.remove = btintel_pcie_remove,
+ .driver.pm = pm_sleep_ptr(&btintel_pcie_pm_ops),
+#ifdef CONFIG_DEV_COREDUMP
+ .driver.coredump = btintel_pcie_coredump
+#endif
};
-module_pci_driver(btintel_pcie_driver);
+
+static int __init btintel_pcie_init(void)
+{
+ return pci_register_driver(&btintel_pcie_driver);
+}
+
+static void __exit btintel_pcie_exit(void)
+{
+ pci_unregister_driver(&btintel_pcie_driver);
+ btintel_pcie_free_restart_list();
+}
+
+module_init(btintel_pcie_init);
+module_exit(btintel_pcie_exit);
MODULE_AUTHOR("Tedd Ho-Jeong An <tedd.an@intel.com>");
MODULE_DESCRIPTION("Intel Bluetooth PCIe transport driver ver " VERSION);
diff --git a/drivers/bluetooth/btintel_pcie.h b/drivers/bluetooth/btintel_pcie.h
index baaff70420f5..04b21f968ad3 100644
--- a/drivers/bluetooth/btintel_pcie.h
+++ b/drivers/bluetooth/btintel_pcie.h
@@ -12,9 +12,19 @@
#define BTINTEL_PCIE_CSR_HW_REV_REG (BTINTEL_PCIE_CSR_BASE + 0x028)
#define BTINTEL_PCIE_CSR_RF_ID_REG (BTINTEL_PCIE_CSR_BASE + 0x09C)
#define BTINTEL_PCIE_CSR_BOOT_STAGE_REG (BTINTEL_PCIE_CSR_BASE + 0x108)
+#define BTINTEL_PCIE_CSR_IPC_CONTROL_REG (BTINTEL_PCIE_CSR_BASE + 0x10C)
+#define BTINTEL_PCIE_CSR_IPC_STATUS_REG (BTINTEL_PCIE_CSR_BASE + 0x110)
+#define BTINTEL_PCIE_CSR_IPC_SLEEP_CTL_REG (BTINTEL_PCIE_CSR_BASE + 0x114)
#define BTINTEL_PCIE_CSR_CI_ADDR_LSB_REG (BTINTEL_PCIE_CSR_BASE + 0x118)
#define BTINTEL_PCIE_CSR_CI_ADDR_MSB_REG (BTINTEL_PCIE_CSR_BASE + 0x11C)
#define BTINTEL_PCIE_CSR_IMG_RESPONSE_REG (BTINTEL_PCIE_CSR_BASE + 0x12C)
+#define BTINTEL_PCIE_CSR_MBOX_1_REG (BTINTEL_PCIE_CSR_BASE + 0x170)
+#define BTINTEL_PCIE_CSR_MBOX_2_REG (BTINTEL_PCIE_CSR_BASE + 0x174)
+#define BTINTEL_PCIE_CSR_MBOX_3_REG (BTINTEL_PCIE_CSR_BASE + 0x178)
+#define BTINTEL_PCIE_CSR_MBOX_4_REG (BTINTEL_PCIE_CSR_BASE + 0x17C)
+#define BTINTEL_PCIE_CSR_MBOX_STATUS_REG (BTINTEL_PCIE_CSR_BASE + 0x180)
+#define BTINTEL_PCIE_PRPH_DEV_ADDR_REG (BTINTEL_PCIE_CSR_BASE + 0x440)
+#define BTINTEL_PCIE_PRPH_DEV_RD_REG (BTINTEL_PCIE_CSR_BASE + 0x458)
#define BTINTEL_PCIE_CSR_HBUS_TARG_WRPTR (BTINTEL_PCIE_CSR_BASE + 0x460)
/* BTINTEL_PCIE_CSR Function Control Register */
@@ -22,6 +32,14 @@
#define BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT (BIT(6))
#define BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT (BIT(7))
#define BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_STS (BIT(20))
+
+#define BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_REQ (BIT(21))
+/* Stop MAC Access disconnection request */
+#define BTINTEL_PCIE_CSR_FUNC_CTRL_STOP_MAC_ACCESS_DIS (BIT(22))
+#define BTINTEL_PCIE_CSR_FUNC_CTRL_XTAL_CLK_REQ (BIT(23))
+
+#define BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_STS (BIT(28))
+#define BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_DISCON (BIT(29))
#define BTINTEL_PCIE_CSR_FUNC_CTRL_SW_RESET (BIT(31))
/* Value for BTINTEL_PCIE_CSR_BOOT_STAGE register */
@@ -30,8 +48,12 @@
#define BTINTEL_PCIE_CSR_BOOT_STAGE_OPFW (BIT(2))
#define BTINTEL_PCIE_CSR_BOOT_STAGE_ROM_LOCKDOWN (BIT(10))
#define BTINTEL_PCIE_CSR_BOOT_STAGE_IML_LOCKDOWN (BIT(11))
+#define BTINTEL_PCIE_CSR_BOOT_STAGE_DEVICE_ERR (BIT(12))
+#define BTINTEL_PCIE_CSR_BOOT_STAGE_ABORT_HANDLER (BIT(13))
+#define BTINTEL_PCIE_CSR_BOOT_STAGE_DEVICE_HALTED (BIT(14))
#define BTINTEL_PCIE_CSR_BOOT_STAGE_MAC_ACCESS_ON (BIT(16))
#define BTINTEL_PCIE_CSR_BOOT_STAGE_ALIVE (BIT(23))
+#define BTINTEL_PCIE_CSR_BOOT_STAGE_D3_STATE_READY (BIT(24))
/* Registers for MSI-X */
#define BTINTEL_PCIE_CSR_MSIX_BASE (0x2000)
@@ -44,6 +66,30 @@
#define BTINTEL_PCIE_CSR_MSIX_IVAR_BASE (BTINTEL_PCIE_CSR_MSIX_BASE + 0x0880)
#define BTINTEL_PCIE_CSR_MSIX_IVAR(cause) (BTINTEL_PCIE_CSR_MSIX_IVAR_BASE + (cause))
+/* IOSF Debug Register */
+#define BTINTEL_PCIE_DBGC_BASE_ADDR (0xf3800300)
+#define BTINTEL_PCIE_DBGC_CUR_DBGBUFF_STATUS (BTINTEL_PCIE_DBGC_BASE_ADDR + 0x1C)
+#define BTINTEL_PCIE_DBGC_DBGBUFF_WRAP_ARND (BTINTEL_PCIE_DBGC_BASE_ADDR + 0x2C)
+
+#define BTINTEL_PCIE_DBG_IDX_BIT_MASK 0x0F
+#define BTINTEL_PCIE_DBGC_DBG_BUF_IDX(data) (((data) >> 24) & BTINTEL_PCIE_DBG_IDX_BIT_MASK)
+#define BTINTEL_PCIE_DBG_OFFSET_BIT_MASK 0xFFFFFF
+
+/* The DRAM buffer count, each buffer size, and
+ * fragment buffer size
+ */
+#define BTINTEL_PCIE_DBGC_BUFFER_COUNT 16
+#define BTINTEL_PCIE_DBGC_BUFFER_SIZE (256 * 1024) /* 256 KB */
+
+#define BTINTEL_PCIE_DBGC_FRAG_VERSION 1
+#define BTINTEL_PCIE_DBGC_FRAG_BUFFER_COUNT BTINTEL_PCIE_DBGC_BUFFER_COUNT
+
+/* Magic number(4), version(4), size of payload length(4) */
+#define BTINTEL_PCIE_DBGC_FRAG_HEADER_SIZE 12
+
+/* Num of alloc Dbg buff (4) + (LSB(4), MSB(4), Size(4)) for each buffer */
+#define BTINTEL_PCIE_DBGC_FRAG_PAYLOAD_SIZE 196
+
/* Causes for the FH register interrupts */
enum msix_fh_int_causes {
BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0 = BIT(0), /* cause 0 */
@@ -53,6 +99,49 @@ enum msix_fh_int_causes {
/* Causes for the HW register interrupts */
enum msix_hw_int_causes {
BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0 = BIT(0), /* cause 32 */
+ BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP1 = BIT(1), /* cause 33 */
+ BTINTEL_PCIE_MSIX_HW_INT_CAUSES_HWEXP = BIT(3), /* cause 35 */
+};
+
+/* PCIe device states
+ * Host-Device interface is active
+ * Host-Device interface is inactive(as reflected by IPC_SLEEP_CONTROL_CSR_AD)
+ * Host-Device interface is inactive(as reflected by IPC_SLEEP_CONTROL_CSR_AD)
+ */
+enum {
+ BTINTEL_PCIE_STATE_D0 = 0,
+ BTINTEL_PCIE_STATE_D3_HOT = 2,
+ BTINTEL_PCIE_STATE_D3_COLD = 3,
+};
+
+enum {
+ BTINTEL_PCIE_CORE_HALTED,
+ BTINTEL_PCIE_HWEXP_INPROGRESS,
+ BTINTEL_PCIE_COREDUMP_INPROGRESS,
+ BTINTEL_PCIE_RECOVERY_IN_PROGRESS,
+ BTINTEL_PCIE_SETUP_DONE
+};
+
+enum btintel_pcie_tlv_type {
+ BTINTEL_CNVI_BT,
+ BTINTEL_WRITE_PTR,
+ BTINTEL_WRAP_CTR,
+ BTINTEL_TRIGGER_REASON,
+ BTINTEL_FW_SHA,
+ BTINTEL_CNVR_TOP,
+ BTINTEL_CNVI_TOP,
+ BTINTEL_DUMP_TIME,
+ BTINTEL_FW_BUILD,
+ BTINTEL_VENDOR,
+ BTINTEL_DRIVER
+};
+
+/* causes for the MBOX interrupts */
+enum msix_mbox_int_causes {
+ BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX1 = BIT(0), /* cause MBOX1 */
+ BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX2 = BIT(1), /* cause MBOX2 */
+ BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX3 = BIT(2), /* cause MBOX3 */
+ BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX4 = BIT(3), /* cause MBOX4 */
};
#define BTINTEL_PCIE_MSIX_NON_AUTO_CLEAR_CAUSE BIT(7)
@@ -67,10 +156,13 @@ enum msix_hw_int_causes {
#define BTINTEL_DEFAULT_MAC_ACCESS_TIMEOUT_US 200000
/* Default interrupt timeout in msec */
-#define BTINTEL_DEFAULT_INTR_TIMEOUT 3000
+#define BTINTEL_DEFAULT_INTR_TIMEOUT_MS 3000
+
+/* The number of descriptors in TX queues */
+#define BTINTEL_PCIE_TX_DESCS_COUNT 32
-/* The number of descriptors in TX/RX queues */
-#define BTINTEL_DESCS_COUNT 16
+/* The number of descriptors in RX queues */
+#define BTINTEL_PCIE_RX_DESCS_COUNT 64
/* Number of Queue for TX and RX
* It indicates the index of the IA(Index Array)
@@ -92,9 +184,6 @@ enum {
/* Doorbell vector for TFD */
#define BTINTEL_PCIE_TX_DB_VEC 0
-/* Number of pending RX requests for downlink */
-#define BTINTEL_PCIE_RX_MAX_QUEUE 6
-
/* Doorbell vector for FRBD */
#define BTINTEL_PCIE_RX_DB_VEC 513
@@ -311,6 +400,37 @@ struct rxq {
struct data_buf *bufs;
};
+/* Structure for DRAM Buffer
+ * @count: Number of descriptors
+ * @buf: Array of data_buf structure
+ */
+struct btintel_pcie_dbgc {
+ u16 count;
+
+ void *frag_v_addr;
+ dma_addr_t frag_p_addr;
+ u16 frag_size;
+
+ dma_addr_t buf_p_addr;
+ void *buf_v_addr;
+ struct data_buf *bufs;
+};
+
+struct btintel_pcie_dump_header {
+ const char *driver_name;
+ u32 cnvi_top;
+ u32 cnvr_top;
+ u16 fw_timestamp;
+ u8 fw_build_type;
+ u32 fw_build_num;
+ u32 fw_git_sha1;
+ u32 cnvi_bt;
+ u32 write_ptr;
+ u32 wrap_ctr;
+ u16 trigger_reason;
+ int state;
+};
+
/* struct btintel_pcie_data
* @pdev: pci device
* @hdev: hdev device
@@ -343,6 +463,7 @@ struct rxq {
* @ia: Index Array struct
* @txq: TX Queue struct
* @rxq: RX Queue struct
+ * @alive_intr_ctxt: Alive interrupt context
*/
struct btintel_pcie_data {
struct pci_dev *pdev;
@@ -389,6 +510,9 @@ struct btintel_pcie_data {
struct ia ia;
struct txq txq;
struct rxq rxq;
+ u32 alive_intr_ctxt;
+ struct btintel_pcie_dbgc dbgc;
+ struct btintel_pcie_dump_header dmp_hdr;
};
static inline u32 btintel_pcie_rd_reg32(struct btintel_pcie_data *data,
@@ -428,3 +552,11 @@ static inline void btintel_pcie_clr_reg_bits(struct btintel_pcie_data *data,
r &= ~bits;
iowrite32(r, data->base_addr + offset);
}
+
+static inline u32 btintel_pcie_rd_dev_mem(struct btintel_pcie_data *data,
+ u32 addr)
+{
+ btintel_pcie_wr_reg32(data, BTINTEL_PCIE_PRPH_DEV_ADDR_REG, addr);
+ return btintel_pcie_rd_reg32(data, BTINTEL_PCIE_PRPH_DEV_RD_REG);
+}
+
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index 18f34998a120..e26b07a9387d 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -7,6 +7,7 @@
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/string_choices.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <linux/mmc/sdio_func.h>
@@ -88,7 +89,7 @@ int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb)
else
adapter->psmode = 0;
BT_DBG("PS Mode:%s",
- (adapter->psmode) ? "Enable" : "Disable");
+ str_enable_disable(adapter->psmode));
} else {
BT_DBG("PS Mode command failed");
}
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index 07cd308f7abf..93932a0d8625 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -100,7 +100,9 @@ static int btmrvl_sdio_probe_of(struct device *dev,
}
/* Configure wakeup (enabled by default) */
- device_init_wakeup(dev, true);
+ ret = devm_device_init_wakeup(dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to init wakeup\n");
}
}
diff --git a/drivers/bluetooth/btmtk.c b/drivers/bluetooth/btmtk.c
index 9bbf20502163..a8c520dc09e1 100644
--- a/drivers/bluetooth/btmtk.c
+++ b/drivers/bluetooth/btmtk.c
@@ -324,7 +324,7 @@ int btmtk_setup_firmware(struct hci_dev *hdev, const char *fwname,
wmt_params.data = NULL;
wmt_params.status = NULL;
- /* Activate funciton the firmware providing to */
+ /* Activate function the firmware providing to */
err = wmt_cmd_sync(hdev, &wmt_params);
if (err < 0) {
bt_dev_err(hdev, "Failed to send wmt rst (%d)", err);
@@ -395,6 +395,7 @@ int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb)
{
struct btmtk_data *data = hci_get_priv(hdev);
int err;
+ bool complete = false;
if (!IS_ENABLED(CONFIG_DEV_COREDUMP)) {
kfree_skb(skb);
@@ -416,19 +417,22 @@ int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb)
fallthrough;
case HCI_DEVCOREDUMP_ACTIVE:
default:
+ /* Mediatek coredump data would be more than MTK_COREDUMP_NUM */
+ if (data->cd_info.cnt >= MTK_COREDUMP_NUM &&
+ skb->len > MTK_COREDUMP_END_LEN)
+ if (!memcmp((char *)&skb->data[skb->len - MTK_COREDUMP_END_LEN],
+ MTK_COREDUMP_END, MTK_COREDUMP_END_LEN - 1))
+ complete = true;
+
err = hci_devcd_append(hdev, skb);
if (err < 0)
break;
data->cd_info.cnt++;
- /* Mediatek coredump data would be more than MTK_COREDUMP_NUM */
- if (data->cd_info.cnt > MTK_COREDUMP_NUM &&
- skb->len > MTK_COREDUMP_END_LEN)
- if (!memcmp((char *)&skb->data[skb->len - MTK_COREDUMP_END_LEN],
- MTK_COREDUMP_END, MTK_COREDUMP_END_LEN - 1)) {
- bt_dev_info(hdev, "Mediatek coredump end");
- hci_devcd_complete(hdev);
- }
+ if (complete) {
+ bt_dev_info(hdev, "Mediatek coredump end");
+ hci_devcd_complete(hdev);
+ }
break;
}
@@ -638,12 +642,7 @@ static int btmtk_usb_hci_wmt_sync(struct hci_dev *hdev,
* WMT command.
*/
err = wait_on_bit_timeout(&data->flags, BTMTK_TX_WAIT_VND_EVT,
- TASK_INTERRUPTIBLE, HCI_INIT_TIMEOUT);
- if (err == -EINTR) {
- bt_dev_err(hdev, "Execution of wmt command interrupted");
- clear_bit(BTMTK_TX_WAIT_VND_EVT, &data->flags);
- goto err_free_wc;
- }
+ TASK_UNINTERRUPTIBLE, HCI_INIT_TIMEOUT);
if (err) {
bt_dev_err(hdev, "Execution of wmt command timed out");
@@ -1215,7 +1214,6 @@ static int btmtk_usb_isointf_init(struct hci_dev *hdev)
struct sk_buff *skb;
int err;
- init_usb_anchor(&btmtk_data->isopkt_anchor);
spin_lock_init(&btmtk_data->isorxlock);
__set_mtk_intr_interface(hdev);
@@ -1326,15 +1324,8 @@ int btmtk_usb_setup(struct hci_dev *hdev)
fwname = FIRMWARE_MT7668;
break;
case 0x7922:
- case 0x7961:
case 0x7925:
- /* Reset the device to ensure it's in the initial state before
- * downloading the firmware to ensure.
- */
-
- if (!test_bit(BTMTK_FIRMWARE_LOADED, &btmtk_data->flags))
- btmtk_usb_subsys_reset(hdev, dev_id);
-
+ case 0x7961:
btmtk_fw_get_filename(fw_bin_name, sizeof(fw_bin_name), dev_id,
fw_version, fw_flavor);
@@ -1342,12 +1333,9 @@ int btmtk_usb_setup(struct hci_dev *hdev)
btmtk_usb_hci_wmt_sync);
if (err < 0) {
bt_dev_err(hdev, "Failed to set up firmware (%d)", err);
- clear_bit(BTMTK_FIRMWARE_LOADED, &btmtk_data->flags);
return err;
}
- set_bit(BTMTK_FIRMWARE_LOADED, &btmtk_data->flags);
-
/* It's Device EndPoint Reset Option Register */
err = btmtk_usb_uhw_reg_write(hdev, MTK_EP_RST_OPT,
MTK_EP_RST_IN_OUT_OPT);
@@ -1469,10 +1457,15 @@ EXPORT_SYMBOL_GPL(btmtk_usb_setup);
int btmtk_usb_shutdown(struct hci_dev *hdev)
{
+ struct btmtk_data *data = hci_get_priv(hdev);
struct btmtk_hci_wmt_params wmt_params;
u8 param = 0;
int err;
+ err = usb_autopm_get_interface(data->intf);
+ if (err < 0)
+ return err;
+
/* Disable the device */
wmt_params.op = BTMTK_WMT_FUNC_CTRL;
wmt_params.flag = 0;
@@ -1483,9 +1476,11 @@ int btmtk_usb_shutdown(struct hci_dev *hdev)
err = btmtk_usb_hci_wmt_sync(hdev, &wmt_params);
if (err < 0) {
bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
+ usb_autopm_put_interface(data->intf);
return err;
}
+ usb_autopm_put_interface(data->intf);
return 0;
}
EXPORT_SYMBOL_GPL(btmtk_usb_shutdown);
diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c
index 11d33cd7b08f..50abefba6d04 100644
--- a/drivers/bluetooth/btmtksdio.c
+++ b/drivers/bluetooth/btmtksdio.c
@@ -29,7 +29,7 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
-#include "h4_recv.h"
+#include "hci_uart.h"
#include "btmtk.h"
#define VERSION "0.1"
@@ -610,7 +610,8 @@ static void btmtksdio_txrx_work(struct work_struct *work)
} while (int_status || time_is_before_jiffies(txrx_timeout));
/* Enable interrupt */
- sdio_writel(bdev->func, C_INT_EN_SET, MTK_REG_CHLPCR, NULL);
+ if (bdev->func->irq_handler)
+ sdio_writel(bdev->func, C_INT_EN_SET, MTK_REG_CHLPCR, NULL);
sdio_release_host(bdev->func);
@@ -681,7 +682,7 @@ static int btmtksdio_open(struct hci_dev *hdev)
if (err < 0)
goto err_release_irq;
- /* Explitly set write-1-clear method */
+ /* Explicitly set write-1-clear method */
val = sdio_readl(bdev->func, MTK_REG_CHCR, &err);
if (err < 0)
goto err_release_irq;
@@ -722,6 +723,10 @@ static int btmtksdio_close(struct hci_dev *hdev)
{
struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
+ /* Skip btmtksdio_close if BTMTKSDIO_FUNC_ENABLED isn't set */
+ if (!test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state))
+ return 0;
+
sdio_claim_host(bdev->func);
/* Disable interrupt */
@@ -1136,7 +1141,7 @@ static int btmtksdio_setup(struct hci_dev *hdev)
}
/* Enable WBS with mSBC codec */
- set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED);
/* Enable GPIO reset mechanism */
if (bdev->reset) {
@@ -1249,7 +1254,7 @@ static int btmtksdio_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
return 0;
}
-static void btmtksdio_cmd_timeout(struct hci_dev *hdev)
+static void btmtksdio_reset(struct hci_dev *hdev)
{
struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
u32 status;
@@ -1328,6 +1333,8 @@ static int btmtksdio_probe(struct sdio_func *func,
{
struct btmtksdio_dev *bdev;
struct hci_dev *hdev;
+ struct device_node *old_node;
+ bool restore_node;
int err;
bdev = devm_kzalloc(&func->dev, sizeof(*bdev), GFP_KERNEL);
@@ -1358,7 +1365,7 @@ static int btmtksdio_probe(struct sdio_func *func,
hdev->open = btmtksdio_open;
hdev->close = btmtksdio_close;
- hdev->cmd_timeout = btmtksdio_cmd_timeout;
+ hdev->reset = btmtksdio_reset;
hdev->flush = btmtksdio_flush;
hdev->setup = btmtksdio_setup;
hdev->shutdown = btmtksdio_shutdown;
@@ -1377,7 +1384,7 @@ static int btmtksdio_probe(struct sdio_func *func,
SET_HCIDEV_DEV(hdev, &func->dev);
hdev->manufacturer = 70;
- set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_NON_PERSISTENT_SETUP);
sdio_set_drvdata(func, bdev);
@@ -1396,7 +1403,7 @@ static int btmtksdio_probe(struct sdio_func *func,
if (pm_runtime_enabled(bdev->dev))
pm_runtime_disable(bdev->dev);
- /* As explaination in drivers/mmc/core/sdio_bus.c tells us:
+ /* As explanation in drivers/mmc/core/sdio_bus.c tells us:
* Unbound SDIO functions are always suspended.
* During probe, the function is set active and the usage count
* is incremented. If the driver supports runtime PM,
@@ -1407,17 +1414,28 @@ static int btmtksdio_probe(struct sdio_func *func,
*/
pm_runtime_put_noidle(bdev->dev);
- err = device_init_wakeup(bdev->dev, true);
+ err = devm_device_init_wakeup(bdev->dev);
if (err)
bt_dev_err(hdev, "failed to initialize device wakeup");
- bdev->dev->of_node = of_find_compatible_node(NULL, NULL,
- "mediatek,mt7921s-bluetooth");
+ restore_node = false;
+ if (!of_device_is_compatible(bdev->dev->of_node, "mediatek,mt7921s-bluetooth")) {
+ restore_node = true;
+ old_node = bdev->dev->of_node;
+ bdev->dev->of_node = of_find_compatible_node(NULL, NULL,
+ "mediatek,mt7921s-bluetooth");
+ }
+
bdev->reset = devm_gpiod_get_optional(bdev->dev, "reset",
GPIOD_OUT_LOW);
if (IS_ERR(bdev->reset))
err = PTR_ERR(bdev->reset);
+ if (restore_node) {
+ of_node_put(bdev->dev->of_node);
+ bdev->dev->of_node = old_node;
+ }
+
return err;
}
@@ -1429,11 +1447,15 @@ static void btmtksdio_remove(struct sdio_func *func)
if (!bdev)
return;
+ hdev = bdev->hdev;
+
+ /* Make sure to call btmtksdio_close before removing sdio card */
+ if (test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state))
+ btmtksdio_close(hdev);
+
/* Be consistent the state in btmtksdio_probe */
pm_runtime_get_noresume(bdev->dev);
- hdev = bdev->hdev;
-
sdio_set_drvdata(func, NULL);
hci_unregister_dev(hdev);
hci_free_dev(hdev);
diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c
index 64e4d835af52..d9b90ea2ad38 100644
--- a/drivers/bluetooth/btmtkuart.c
+++ b/drivers/bluetooth/btmtkuart.c
@@ -27,7 +27,7 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
-#include "h4_recv.h"
+#include "hci_uart.h"
#include "btmtk.h"
#define VERSION "0.2"
@@ -316,7 +316,7 @@ mtk_stp_split(struct btmtkuart_dev *bdev, const unsigned char *data, int count,
/* Resync STP when unexpected data is being read */
if (shdr->prefix != 0x80 || bdev->stp_dlen > 2048) {
- bt_dev_err(bdev->hdev, "stp format unexpect (%d, %d)",
+ bt_dev_err(bdev->hdev, "stp format unexpected (%d, %d)",
shdr->prefix, bdev->stp_dlen);
bdev->stp_cursor = 2;
bdev->stp_dlen = 0;
@@ -327,7 +327,7 @@ mtk_stp_split(struct btmtkuart_dev *bdev, const unsigned char *data, int count,
if (count <= 0)
return NULL;
- /* Tranlate to how much the size of data H4 can handle so far */
+ /* Translate to how much the size of data H4 can handle so far */
*sz_h4 = min_t(int, count, bdev->stp_dlen);
/* Update the remaining size of STP packet */
@@ -872,7 +872,7 @@ static int btmtkuart_probe(struct serdev_device *serdev)
SET_HCIDEV_DEV(hdev, &serdev->dev);
hdev->manufacturer = 70;
- set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_NON_PERSISTENT_SETUP);
if (btmtkuart_is_standalone(bdev)) {
err = clk_prepare_enable(bdev->osc);
diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c
index 5ea0d23e88c0..d5153fed0518 100644
--- a/drivers/bluetooth/btnxpuart.c
+++ b/drivers/bluetooth/btnxpuart.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* NXP Bluetooth driver
- * Copyright 2023 NXP
+ * Copyright 2023-2025 NXP
*/
#include <linux/module.h>
@@ -16,11 +16,15 @@
#include <linux/crc8.h>
#include <linux/crc32.h>
#include <linux/string_helpers.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of_irq.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
-#include "h4_recv.h"
+#include "hci_uart.h"
#define MANUFACTURER_NXP 37
@@ -30,20 +34,22 @@
#define BTNXPUART_SERDEV_OPEN 4
#define BTNXPUART_IR_IN_PROGRESS 5
#define BTNXPUART_FW_DOWNLOAD_ABORT 6
+#define BTNXPUART_FW_DUMP_IN_PROGRESS 7
/* NXP HW err codes */
#define BTNXPUART_IR_HW_ERR 0xb0
-#define FIRMWARE_W8987 "uart8987_bt_v0.bin"
+#define FIRMWARE_W8987 "uart8987_bt.bin"
#define FIRMWARE_W8987_OLD "uartuart8987_bt.bin"
#define FIRMWARE_W8997 "uart8997_bt_v4.bin"
#define FIRMWARE_W8997_OLD "uartuart8997_bt_v4.bin"
#define FIRMWARE_W9098 "uart9098_bt_v1.bin"
#define FIRMWARE_W9098_OLD "uartuart9098_bt_v1.bin"
-#define FIRMWARE_IW416 "uartiw416_bt_v0.bin"
+#define FIRMWARE_IW416 "uartiw416_bt.bin"
+#define FIRMWARE_IW416_OLD "uartiw416_bt_v0.bin"
#define FIRMWARE_IW612 "uartspi_n61x_v1.bin.se"
-#define FIRMWARE_IW615 "uartspi_iw610_v0.bin"
-#define FIRMWARE_SECURE_IW615 "uartspi_iw610_v0.bin.se"
+#define FIRMWARE_IW610 "uartspi_iw610.bin"
+#define FIRMWARE_SECURE_IW610 "uartspi_iw610.bin.se"
#define FIRMWARE_IW624 "uartiw624_bt.bin"
#define FIRMWARE_SECURE_IW624 "uartiw624_bt.bin.se"
#define FIRMWARE_AW693 "uartaw693_bt.bin"
@@ -59,8 +65,8 @@
#define CHIP_ID_IW624c 0x8001
#define CHIP_ID_AW693a0 0x8200
#define CHIP_ID_AW693a1 0x8201
-#define CHIP_ID_IW615a0 0x8800
-#define CHIP_ID_IW615a1 0x8801
+#define CHIP_ID_IW610a0 0x8800
+#define CHIP_ID_IW610a1 0x8801
#define FW_SECURE_MASK 0xc0
#define FW_OPEN 0x00
@@ -69,7 +75,8 @@
#define FW_AUTH_ENC 0xc0
#define HCI_NXP_PRI_BAUDRATE 115200
-#define HCI_NXP_SEC_BAUDRATE 3000000
+#define HCI_NXP_SEC_BAUDRATE_3M 3000000
+#define HCI_NXP_SEC_BAUDRATE_4M 4000000
#define MAX_FW_FILE_NAME_LEN 50
@@ -81,6 +88,7 @@
#define WAKEUP_METHOD_BREAK 1
#define WAKEUP_METHOD_EXT_BREAK 2
#define WAKEUP_METHOD_RTS 3
+#define WAKEUP_METHOD_GPIO 4
#define WAKEUP_METHOD_INVALID 0xff
/* power save mode status */
@@ -95,14 +103,19 @@
#define PS_STATE_AWAKE 0
#define PS_STATE_SLEEP 1
-/* Bluetooth vendor command : Sleep mode */
+/* NXP Vendor Commands. Refer user manual UM11628 on nxp.com */
+/* Set custom BD Address */
+#define HCI_NXP_SET_BD_ADDR 0xfc22
+/* Set Auto-Sleep mode */
#define HCI_NXP_AUTO_SLEEP_MODE 0xfc23
-/* Bluetooth vendor command : Wakeup method */
+/* Set Wakeup method */
#define HCI_NXP_WAKEUP_METHOD 0xfc53
-/* Bluetooth vendor command : Set operational baudrate */
+/* Set operational baudrate */
#define HCI_NXP_SET_OPER_SPEED 0xfc09
-/* Bluetooth vendor command: Independent Reset */
+/* Independent Reset (Soft Reset) */
#define HCI_NXP_IND_RESET 0xfcfc
+/* Bluetooth vendor command: Trigger FW dump */
+#define HCI_NXP_TRIGGER_DUMP 0xfe91
/* Bluetooth Power State : Vendor cmd params */
#define BT_PS_ENABLE 0x02
@@ -134,6 +147,9 @@ struct ps_data {
bool driver_sent_cmd;
u16 h2c_ps_interval;
u16 c2h_ps_interval;
+ bool wakeup_source;
+ struct gpio_desc *h2c_ps_gpio;
+ s32 irq_handler;
struct hci_dev *hdev;
struct work_struct work;
struct timer_list ps_timer;
@@ -158,6 +174,12 @@ struct btnxpuart_data {
const char *fw_name_old;
};
+enum bootloader_param_change {
+ not_changed,
+ cmd_sent,
+ changed
+};
+
struct btnxpuart_dev {
struct hci_dev *hdev;
struct serdev_device *serdev;
@@ -173,6 +195,7 @@ struct btnxpuart_dev {
u32 fw_v1_sent_bytes;
u32 fw_dnld_v3_offset;
u32 fw_v3_offset_correction;
+ u32 fw_v3_prev_sent;
u32 fw_v1_expected_len;
u32 boot_reg_offset;
wait_queue_head_t fw_dnld_done_wait_q;
@@ -181,12 +204,14 @@ struct btnxpuart_dev {
u32 new_baudrate;
u32 current_baudrate;
u32 fw_init_baudrate;
- bool timeout_changed;
- bool baudrate_changed;
+ u32 secondary_baudrate;
+ enum bootloader_param_change timeout_changed;
+ enum bootloader_param_change baudrate_changed;
bool helper_downloaded;
struct ps_data psdata;
struct btnxpuart_data *nxp_data;
+ struct reset_control *pdn;
};
#define NXP_V1_FW_REQ_PKT 0xa5
@@ -200,10 +225,11 @@ struct btnxpuart_dev {
#define NXP_NAK_V3 0x7b
#define NXP_CRC_ERROR_V3 0x7c
-/* Bootloader signature error codes */
-#define NXP_ACK_RX_TIMEOUT 0x0002 /* ACK not received from host */
-#define NXP_HDR_RX_TIMEOUT 0x0003 /* FW Header chunk not received */
-#define NXP_DATA_RX_TIMEOUT 0x0004 /* FW Data chunk not received */
+/* Bootloader signature error codes: Refer AN12820 from nxp.com */
+#define NXP_CRC_RX_ERROR BIT(0) /* CRC error in previous packet */
+#define NXP_ACK_RX_TIMEOUT BIT(2) /* ACK not received from host */
+#define NXP_HDR_RX_TIMEOUT BIT(3) /* FW Header chunk not received */
+#define NXP_DATA_RX_TIMEOUT BIT(4) /* FW Data chunk not received */
#define HDR_LEN 16
@@ -306,6 +332,35 @@ union nxp_v3_rx_timeout_nak_u {
u8 buf[6];
};
+struct nxp_v3_crc_nak {
+ u8 nak;
+ u8 crc;
+} __packed;
+
+union nxp_v3_crc_nak_u {
+ struct nxp_v3_crc_nak pkt;
+ u8 buf[2];
+};
+
+/* FW dump */
+#define NXP_FW_DUMP_SIZE (1024 * 1000)
+
+struct nxp_fw_dump_hdr {
+ __le16 seq_num;
+ __le16 reserved;
+ __le16 buf_type;
+ __le16 buf_len;
+};
+
+union nxp_set_bd_addr_payload {
+ struct {
+ u8 param_id;
+ u8 param_len;
+ u8 param[6];
+ } __packed data;
+ u8 buf[8];
+};
+
static u8 crc8_table[CRC8_TABLE_SIZE];
/* Default configurations */
@@ -315,17 +370,26 @@ static u8 crc8_table[CRC8_TABLE_SIZE];
static struct sk_buff *nxp_drv_send_cmd(struct hci_dev *hdev, u16 opcode,
u32 plen,
- void *param)
+ void *param,
+ bool resp)
{
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
struct ps_data *psdata = &nxpdev->psdata;
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
/* set flag to prevent nxp_enqueue from parsing values from this command and
* calling hci_cmd_sync_queue() again.
*/
psdata->driver_sent_cmd = true;
- skb = __hci_cmd_sync(hdev, opcode, plen, param, HCI_CMD_TIMEOUT);
+ if (resp) {
+ skb = __hci_cmd_sync(hdev, opcode, plen, param, HCI_CMD_TIMEOUT);
+ } else {
+ __hci_cmd_send(hdev, opcode, plen, param);
+ /* Allow command to be sent before tx_work is cancelled
+ * by btnxpuart_flush()
+ */
+ msleep(20);
+ }
psdata->driver_sent_cmd = false;
return skb;
@@ -364,7 +428,7 @@ static void ps_control(struct hci_dev *hdev, u8 ps_state)
{
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
struct ps_data *psdata = &nxpdev->psdata;
- int status;
+ int status = 0;
if (psdata->ps_state == ps_state ||
!test_bit(BTNXPUART_SERDEV_OPEN, &nxpdev->tx_state))
@@ -372,6 +436,14 @@ static void ps_control(struct hci_dev *hdev, u8 ps_state)
mutex_lock(&psdata->ps_lock);
switch (psdata->cur_h2c_wakeupmode) {
+ case WAKEUP_METHOD_GPIO:
+ if (ps_state == PS_STATE_AWAKE)
+ gpiod_set_value_cansleep(psdata->h2c_ps_gpio, 0);
+ else
+ gpiod_set_value_cansleep(psdata->h2c_ps_gpio, 1);
+ bt_dev_dbg(hdev, "Set h2c_ps_gpio: %s",
+ str_high_low(ps_state == PS_STATE_SLEEP));
+ break;
case WAKEUP_METHOD_DTR:
if (ps_state == PS_STATE_AWAKE)
status = serdev_device_set_tiocm(nxpdev->serdev, TIOCM_DTR, 0);
@@ -409,7 +481,7 @@ static void ps_work_func(struct work_struct *work)
static void ps_timeout_func(struct timer_list *t)
{
- struct ps_data *data = from_timer(data, t, ps_timer);
+ struct ps_data *data = timer_container_of(data, t, ps_timer);
struct hci_dev *hdev = data->hdev;
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
@@ -421,15 +493,72 @@ static void ps_timeout_func(struct timer_list *t)
}
}
-static void ps_setup(struct hci_dev *hdev)
+static irqreturn_t ps_host_wakeup_irq_handler(int irq, void *priv)
+{
+ struct btnxpuart_dev *nxpdev = (struct btnxpuart_dev *)priv;
+
+ bt_dev_dbg(nxpdev->hdev, "Host wakeup interrupt");
+ return IRQ_HANDLED;
+}
+static int ps_setup(struct hci_dev *hdev)
{
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
+ struct serdev_device *serdev = nxpdev->serdev;
struct ps_data *psdata = &nxpdev->psdata;
+ int ret;
+
+ /* Out-Of-Band Device Wakeup */
+ psdata->h2c_ps_gpio = devm_gpiod_get_optional(&serdev->dev, "device-wakeup",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(psdata->h2c_ps_gpio)) {
+ bt_dev_err(hdev, "Error fetching device-wakeup-gpios: %ld",
+ PTR_ERR(psdata->h2c_ps_gpio));
+ return PTR_ERR(psdata->h2c_ps_gpio);
+ }
+
+ if (device_property_read_u8(&serdev->dev, "nxp,wakein-pin", &psdata->h2c_wakeup_gpio)) {
+ psdata->h2c_wakeup_gpio = 0xff; /* 0xff: use default pin/gpio */
+ } else if (!psdata->h2c_ps_gpio) {
+ bt_dev_warn(hdev, "nxp,wakein-pin property without device-wakeup-gpios");
+ psdata->h2c_wakeup_gpio = 0xff;
+ }
+
+ /* Out-Of-Band Host Wakeup */
+ if (of_property_read_bool(serdev->dev.of_node, "wakeup-source")) {
+ psdata->irq_handler = of_irq_get_byname(serdev->dev.of_node, "wakeup");
+ bt_dev_info(nxpdev->hdev, "irq_handler: %d", psdata->irq_handler);
+ if (psdata->irq_handler > 0)
+ psdata->wakeup_source = true;
+ }
+
+ if (device_property_read_u8(&serdev->dev, "nxp,wakeout-pin", &psdata->c2h_wakeup_gpio)) {
+ psdata->c2h_wakeup_gpio = 0xff;
+ if (psdata->wakeup_source) {
+ bt_dev_warn(hdev, "host wakeup interrupt without nxp,wakeout-pin");
+ psdata->wakeup_source = false;
+ }
+ } else if (!psdata->wakeup_source) {
+ bt_dev_warn(hdev, "nxp,wakeout-pin property without host wakeup interrupt");
+ psdata->c2h_wakeup_gpio = 0xff;
+ }
+
+ if (psdata->wakeup_source) {
+ ret = devm_request_threaded_irq(&serdev->dev, psdata->irq_handler,
+ NULL, ps_host_wakeup_irq_handler,
+ IRQF_ONESHOT,
+ dev_name(&serdev->dev), nxpdev);
+ if (ret)
+ bt_dev_info(hdev, "error setting wakeup IRQ handler, ignoring\n");
+ disable_irq(psdata->irq_handler);
+ device_init_wakeup(&serdev->dev, true);
+ }
psdata->hdev = hdev;
INIT_WORK(&psdata->work, ps_work_func);
mutex_init(&psdata->ps_lock);
timer_setup(&psdata->ps_timer, ps_timeout_func, 0);
+
+ return 0;
}
static bool ps_wakeup(struct btnxpuart_dev *nxpdev)
@@ -480,7 +609,8 @@ static int send_ps_cmd(struct hci_dev *hdev, void *data)
pcmd.ps_cmd = BT_PS_DISABLE;
pcmd.c2h_ps_interval = __cpu_to_le16(psdata->c2h_ps_interval);
- skb = nxp_drv_send_cmd(hdev, HCI_NXP_AUTO_SLEEP_MODE, sizeof(pcmd), &pcmd);
+ skb = nxp_drv_send_cmd(hdev, HCI_NXP_AUTO_SLEEP_MODE, sizeof(pcmd),
+ &pcmd, true);
if (IS_ERR(skb)) {
bt_dev_err(hdev, "Setting Power Save mode failed (%ld)", PTR_ERR(skb));
return PTR_ERR(skb);
@@ -514,7 +644,12 @@ static int send_wakeup_method_cmd(struct hci_dev *hdev, void *data)
pcmd.c2h_wakeupmode = psdata->c2h_wakeupmode;
pcmd.c2h_wakeup_gpio = psdata->c2h_wakeup_gpio;
+ pcmd.h2c_wakeup_gpio = 0xff;
switch (psdata->h2c_wakeupmode) {
+ case WAKEUP_METHOD_GPIO:
+ pcmd.h2c_wakeupmode = BT_CTRL_WAKEUP_METHOD_GPIO;
+ pcmd.h2c_wakeup_gpio = psdata->h2c_wakeup_gpio;
+ break;
case WAKEUP_METHOD_DTR:
pcmd.h2c_wakeupmode = BT_CTRL_WAKEUP_METHOD_DSR;
break;
@@ -523,9 +658,9 @@ static int send_wakeup_method_cmd(struct hci_dev *hdev, void *data)
pcmd.h2c_wakeupmode = BT_CTRL_WAKEUP_METHOD_BREAK;
break;
}
- pcmd.h2c_wakeup_gpio = 0xff;
- skb = nxp_drv_send_cmd(hdev, HCI_NXP_WAKEUP_METHOD, sizeof(pcmd), &pcmd);
+ skb = nxp_drv_send_cmd(hdev, HCI_NXP_WAKEUP_METHOD, sizeof(pcmd),
+ &pcmd, true);
if (IS_ERR(skb)) {
bt_dev_err(hdev, "Setting wake-up method failed (%ld)", PTR_ERR(skb));
return PTR_ERR(skb);
@@ -549,6 +684,7 @@ static void ps_init(struct hci_dev *hdev)
{
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
struct ps_data *psdata = &nxpdev->psdata;
+ u8 default_h2c_wakeup_mode = DEFAULT_H2C_WAKEUP_MODE;
serdev_device_set_tiocm(nxpdev->serdev, 0, TIOCM_RTS);
usleep_range(5000, 10000);
@@ -556,12 +692,24 @@ static void ps_init(struct hci_dev *hdev)
usleep_range(5000, 10000);
psdata->ps_state = PS_STATE_AWAKE;
- psdata->c2h_wakeupmode = BT_HOST_WAKEUP_METHOD_NONE;
- psdata->c2h_wakeup_gpio = 0xff;
+
+ if (psdata->c2h_wakeup_gpio != 0xff)
+ psdata->c2h_wakeupmode = BT_HOST_WAKEUP_METHOD_GPIO;
+ else
+ psdata->c2h_wakeupmode = BT_HOST_WAKEUP_METHOD_NONE;
psdata->cur_h2c_wakeupmode = WAKEUP_METHOD_INVALID;
+ if (psdata->h2c_ps_gpio)
+ default_h2c_wakeup_mode = WAKEUP_METHOD_GPIO;
+
psdata->h2c_ps_interval = PS_DEFAULT_TIMEOUT_PERIOD_MS;
- switch (DEFAULT_H2C_WAKEUP_MODE) {
+
+ switch (default_h2c_wakeup_mode) {
+ case WAKEUP_METHOD_GPIO:
+ psdata->h2c_wakeupmode = WAKEUP_METHOD_GPIO;
+ gpiod_set_value_cansleep(psdata->h2c_ps_gpio, 0);
+ usleep_range(5000, 10000);
+ break;
case WAKEUP_METHOD_DTR:
psdata->h2c_wakeupmode = WAKEUP_METHOD_DTR;
serdev_device_set_tiocm(nxpdev->serdev, 0, TIOCM_DTR);
@@ -579,11 +727,6 @@ static void ps_init(struct hci_dev *hdev)
psdata->cur_psmode = PS_MODE_DISABLE;
psdata->target_ps_mode = DEFAULT_PS_MODE;
-
- if (psdata->cur_h2c_wakeupmode != psdata->h2c_wakeupmode)
- hci_cmd_sync_queue(hdev, send_wakeup_method_cmd, NULL, NULL);
- if (psdata->cur_psmode != psdata->target_ps_mode)
- hci_cmd_sync_queue(hdev, send_ps_cmd, NULL, NULL);
}
/* NXP Firmware Download Feature */
@@ -598,8 +741,8 @@ static int nxp_download_firmware(struct hci_dev *hdev)
nxpdev->boot_reg_offset = 0;
nxpdev->fw_dnld_v3_offset = 0;
nxpdev->fw_v3_offset_correction = 0;
- nxpdev->baudrate_changed = false;
- nxpdev->timeout_changed = false;
+ nxpdev->baudrate_changed = not_changed;
+ nxpdev->timeout_changed = not_changed;
nxpdev->helper_downloaded = false;
serdev_device_set_baudrate(nxpdev->serdev, HCI_NXP_PRI_BAUDRATE);
@@ -612,8 +755,10 @@ static int nxp_download_firmware(struct hci_dev *hdev)
&nxpdev->tx_state),
msecs_to_jiffies(60000));
- release_firmware(nxpdev->fw);
- memset(nxpdev->fw_name, 0, sizeof(nxpdev->fw_name));
+ if (nxpdev->fw && strlen(nxpdev->fw_name)) {
+ release_firmware(nxpdev->fw);
+ memset(nxpdev->fw_name, 0, sizeof(nxpdev->fw_name));
+ }
if (err == 0) {
bt_dev_err(hdev, "FW Download Timeout. offset: %d",
@@ -673,7 +818,10 @@ static bool nxp_fw_change_baudrate(struct hci_dev *hdev, u16 req_len)
nxpdev->fw_v3_offset_correction += req_len;
} else if (req_len == sizeof(uart_config)) {
uart_config.clkdiv.address = __cpu_to_le32(clkdivaddr);
- uart_config.clkdiv.value = __cpu_to_le32(0x00c00000);
+ if (nxpdev->new_baudrate == HCI_NXP_SEC_BAUDRATE_4M)
+ uart_config.clkdiv.value = __cpu_to_le32(0x01000000);
+ else
+ uart_config.clkdiv.value = __cpu_to_le32(0x00c00000);
uart_config.uartdiv.address = __cpu_to_le32(uartdivaddr);
uart_config.uartdiv.value = __cpu_to_le32(1);
uart_config.mcr.address = __cpu_to_le32(uartmcraddr);
@@ -728,6 +876,16 @@ static bool is_fw_downloading(struct btnxpuart_dev *nxpdev)
return test_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state);
}
+static bool ind_reset_in_progress(struct btnxpuart_dev *nxpdev)
+{
+ return test_bit(BTNXPUART_IR_IN_PROGRESS, &nxpdev->tx_state);
+}
+
+static bool fw_dump_in_progress(struct btnxpuart_dev *nxpdev)
+{
+ return test_bit(BTNXPUART_FW_DUMP_IN_PROGRESS, &nxpdev->tx_state);
+}
+
static bool process_boot_signature(struct btnxpuart_dev *nxpdev)
{
if (test_bit(BTNXPUART_CHECK_BOOT_SIGNATURE, &nxpdev->tx_state)) {
@@ -821,19 +979,19 @@ static int nxp_recv_fw_req_v1(struct hci_dev *hdev, struct sk_buff *skb)
len = __le16_to_cpu(req->len);
if (!nxp_data->helper_fw_name) {
- if (!nxpdev->timeout_changed) {
- nxpdev->timeout_changed = nxp_fw_change_timeout(hdev,
- len);
+ if (nxpdev->timeout_changed != changed) {
+ nxp_fw_change_timeout(hdev, len);
+ nxpdev->timeout_changed = changed;
goto free_skb;
}
- if (!nxpdev->baudrate_changed) {
- nxpdev->baudrate_changed = nxp_fw_change_baudrate(hdev,
- len);
- if (nxpdev->baudrate_changed) {
+ if (nxpdev->baudrate_changed != changed) {
+ nxpdev->new_baudrate = nxpdev->secondary_baudrate;
+ if (nxp_fw_change_baudrate(hdev, len)) {
+ nxpdev->baudrate_changed = changed;
serdev_device_set_baudrate(nxpdev->serdev,
- HCI_NXP_SEC_BAUDRATE);
+ nxpdev->secondary_baudrate);
serdev_device_set_flow_control(nxpdev->serdev, true);
- nxpdev->current_baudrate = HCI_NXP_SEC_BAUDRATE;
+ nxpdev->current_baudrate = nxpdev->secondary_baudrate;
}
goto free_skb;
}
@@ -854,7 +1012,7 @@ static int nxp_recv_fw_req_v1(struct hci_dev *hdev, struct sk_buff *skb)
nxpdev->helper_downloaded = true;
serdev_device_wait_until_sent(nxpdev->serdev, 0);
serdev_device_set_baudrate(nxpdev->serdev,
- HCI_NXP_SEC_BAUDRATE);
+ HCI_NXP_SEC_BAUDRATE_3M);
serdev_device_set_flow_control(nxpdev->serdev, true);
} else {
clear_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state);
@@ -946,12 +1104,12 @@ static char *nxp_get_fw_name_from_chipid(struct hci_dev *hdev, u16 chipid,
else
bt_dev_err(hdev, "Illegal loader version %02x", loader_ver);
break;
- case CHIP_ID_IW615a0:
- case CHIP_ID_IW615a1:
+ case CHIP_ID_IW610a0:
+ case CHIP_ID_IW610a1:
if ((loader_ver & FW_SECURE_MASK) == FW_OPEN)
- fw_name = FIRMWARE_IW615;
+ fw_name = FIRMWARE_IW610;
else if ((loader_ver & FW_SECURE_MASK) != FW_AUTH_ILLEGAL)
- fw_name = FIRMWARE_SECURE_IW615;
+ fw_name = FIRMWARE_SECURE_IW610;
else
bt_dev_err(hdev, "Illegal loader version %02x", loader_ver);
break;
@@ -971,6 +1129,9 @@ static char *nxp_get_old_fw_name_from_chipid(struct hci_dev *hdev, u16 chipid,
case CHIP_ID_W9098:
fw_name_old = FIRMWARE_W9098_OLD;
break;
+ case CHIP_ID_IW416:
+ fw_name_old = FIRMWARE_IW416_OLD;
+ break;
}
return fw_name_old;
}
@@ -1005,32 +1166,35 @@ static void nxp_handle_fw_download_error(struct hci_dev *hdev, struct v3_data_re
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
__u32 offset = __le32_to_cpu(req->offset);
__u16 err = __le16_to_cpu(req->error);
- union nxp_v3_rx_timeout_nak_u nak_tx_buf;
-
- switch (err) {
- case NXP_ACK_RX_TIMEOUT:
- case NXP_HDR_RX_TIMEOUT:
- case NXP_DATA_RX_TIMEOUT:
- nak_tx_buf.pkt.nak = NXP_NAK_V3;
- nak_tx_buf.pkt.offset = __cpu_to_le32(offset);
- nak_tx_buf.pkt.crc = crc8(crc8_table, nak_tx_buf.buf,
- sizeof(nak_tx_buf) - 1, 0xff);
- serdev_device_write_buf(nxpdev->serdev, nak_tx_buf.buf,
- sizeof(nak_tx_buf));
- break;
- default:
- bt_dev_dbg(hdev, "Unknown bootloader error code: %d", err);
- break;
-
+ union nxp_v3_rx_timeout_nak_u timeout_nak_buf;
+ union nxp_v3_crc_nak_u crc_nak_buf;
+
+ if (err & NXP_CRC_RX_ERROR) {
+ crc_nak_buf.pkt.nak = NXP_CRC_ERROR_V3;
+ crc_nak_buf.pkt.crc = crc8(crc8_table, crc_nak_buf.buf,
+ sizeof(crc_nak_buf) - 1, 0xff);
+ serdev_device_write_buf(nxpdev->serdev, crc_nak_buf.buf,
+ sizeof(crc_nak_buf));
+ } else if (err & NXP_ACK_RX_TIMEOUT ||
+ err & NXP_HDR_RX_TIMEOUT ||
+ err & NXP_DATA_RX_TIMEOUT) {
+ timeout_nak_buf.pkt.nak = NXP_NAK_V3;
+ timeout_nak_buf.pkt.offset = __cpu_to_le32(offset);
+ timeout_nak_buf.pkt.crc = crc8(crc8_table, timeout_nak_buf.buf,
+ sizeof(timeout_nak_buf) - 1, 0xff);
+ serdev_device_write_buf(nxpdev->serdev, timeout_nak_buf.buf,
+ sizeof(timeout_nak_buf));
+ } else {
+ bt_dev_err(hdev, "Unknown bootloader error code: %d", err);
}
-
}
static int nxp_recv_fw_req_v3(struct hci_dev *hdev, struct sk_buff *skb)
{
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
struct v3_data_req *req;
- __u16 len;
+ __u16 len = 0;
+ __u16 err = 0;
__u32 offset;
if (!process_boot_signature(nxpdev))
@@ -1040,27 +1204,45 @@ static int nxp_recv_fw_req_v3(struct hci_dev *hdev, struct sk_buff *skb)
if (!req || !nxpdev->fw)
goto free_skb;
- if (!req->error) {
+ err = __le16_to_cpu(req->error);
+
+ if (!err) {
nxp_send_ack(NXP_ACK_V3, hdev);
+ if (nxpdev->timeout_changed == cmd_sent)
+ nxpdev->timeout_changed = changed;
+ if (nxpdev->baudrate_changed == cmd_sent)
+ nxpdev->baudrate_changed = changed;
} else {
nxp_handle_fw_download_error(hdev, req);
+ if (nxpdev->timeout_changed == cmd_sent &&
+ err == NXP_CRC_RX_ERROR) {
+ nxpdev->fw_v3_offset_correction -= nxpdev->fw_v3_prev_sent;
+ nxpdev->timeout_changed = not_changed;
+ }
+ if (nxpdev->baudrate_changed == cmd_sent &&
+ err == NXP_CRC_RX_ERROR) {
+ nxpdev->fw_v3_offset_correction -= nxpdev->fw_v3_prev_sent;
+ nxpdev->baudrate_changed = not_changed;
+ }
goto free_skb;
}
len = __le16_to_cpu(req->len);
- if (!nxpdev->timeout_changed) {
- nxpdev->timeout_changed = nxp_fw_change_timeout(hdev, len);
+ if (nxpdev->timeout_changed != changed) {
+ nxp_fw_change_timeout(hdev, len);
+ nxpdev->timeout_changed = cmd_sent;
goto free_skb;
}
- if (!nxpdev->baudrate_changed) {
- nxpdev->baudrate_changed = nxp_fw_change_baudrate(hdev, len);
- if (nxpdev->baudrate_changed) {
+ if (nxpdev->baudrate_changed != changed) {
+ nxpdev->new_baudrate = nxpdev->secondary_baudrate;
+ if (nxp_fw_change_baudrate(hdev, len)) {
+ nxpdev->baudrate_changed = cmd_sent;
serdev_device_set_baudrate(nxpdev->serdev,
- HCI_NXP_SEC_BAUDRATE);
+ nxpdev->secondary_baudrate);
serdev_device_set_flow_control(nxpdev->serdev, true);
- nxpdev->current_baudrate = HCI_NXP_SEC_BAUDRATE;
+ nxpdev->current_baudrate = nxpdev->secondary_baudrate;
}
goto free_skb;
}
@@ -1088,6 +1270,7 @@ static int nxp_recv_fw_req_v3(struct hci_dev *hdev, struct sk_buff *skb)
nxpdev->fw_dnld_v3_offset, len);
free_skb:
+ nxpdev->fw_v3_prev_sent = len;
kfree_skb(skb);
return 0;
}
@@ -1103,7 +1286,8 @@ static int nxp_set_baudrate_cmd(struct hci_dev *hdev, void *data)
if (!psdata)
return 0;
- skb = nxp_drv_send_cmd(hdev, HCI_NXP_SET_OPER_SPEED, 4, (u8 *)&new_baudrate);
+ skb = nxp_drv_send_cmd(hdev, HCI_NXP_SET_OPER_SPEED, 4,
+ (u8 *)&new_baudrate, true);
if (IS_ERR(skb)) {
bt_dev_err(hdev, "Setting baudrate failed (%ld)", PTR_ERR(skb));
return PTR_ERR(skb);
@@ -1126,7 +1310,7 @@ static int nxp_set_baudrate_cmd(struct hci_dev *hdev, void *data)
static int nxp_check_boot_sign(struct btnxpuart_dev *nxpdev)
{
serdev_device_set_baudrate(nxpdev->serdev, HCI_NXP_PRI_BAUDRATE);
- if (test_bit(BTNXPUART_IR_IN_PROGRESS, &nxpdev->tx_state))
+ if (ind_reset_in_progress(nxpdev))
serdev_device_set_flow_control(nxpdev->serdev, false);
else
serdev_device_set_flow_control(nxpdev->serdev, true);
@@ -1155,10 +1339,111 @@ static int nxp_set_ind_reset(struct hci_dev *hdev, void *data)
return hci_recv_frame(hdev, skb);
}
+/* Firmware dump */
+static void nxp_coredump(struct hci_dev *hdev)
+{
+ struct sk_buff *skb;
+ u8 pcmd = 2;
+
+ skb = nxp_drv_send_cmd(hdev, HCI_NXP_TRIGGER_DUMP, 1, &pcmd, true);
+ if (IS_ERR(skb))
+ bt_dev_err(hdev, "Failed to trigger FW Dump. (%ld)", PTR_ERR(skb));
+ else
+ kfree_skb(skb);
+}
+
+static void nxp_coredump_hdr(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ /* Nothing to be added in FW dump header */
+}
+
+static int nxp_process_fw_dump(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_acl_hdr *acl_hdr = (struct hci_acl_hdr *)skb_pull_data(skb,
+ sizeof(*acl_hdr));
+ struct nxp_fw_dump_hdr *fw_dump_hdr = (struct nxp_fw_dump_hdr *)skb->data;
+ struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
+ __u16 seq_num = __le16_to_cpu(fw_dump_hdr->seq_num);
+ __u16 buf_len = __le16_to_cpu(fw_dump_hdr->buf_len);
+ int err;
+
+ if (seq_num == 0x0001) {
+ if (test_and_set_bit(BTNXPUART_FW_DUMP_IN_PROGRESS, &nxpdev->tx_state)) {
+ bt_dev_err(hdev, "FW dump already in progress");
+ goto free_skb;
+ }
+ bt_dev_warn(hdev, "==== Start FW dump ===");
+ err = hci_devcd_init(hdev, NXP_FW_DUMP_SIZE);
+ if (err < 0)
+ goto free_skb;
+
+ schedule_delayed_work(&hdev->dump.dump_timeout,
+ msecs_to_jiffies(20000));
+ }
+
+ err = hci_devcd_append(hdev, skb_clone(skb, GFP_ATOMIC));
+ if (err < 0)
+ goto free_skb;
+
+ if (buf_len == 0) {
+ bt_dev_warn(hdev, "==== FW dump complete ===");
+ hci_devcd_complete(hdev);
+ nxp_set_ind_reset(hdev, NULL);
+ }
+
+free_skb:
+ kfree_skb(skb);
+ return 0;
+}
+
+static int nxp_recv_acl_pkt(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ __u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle);
+
+ /* FW dump chunks are ACL packets with conn handle 0xfff */
+ if ((handle & 0x0FFF) == 0xFFF)
+ return nxp_process_fw_dump(hdev, skb);
+ else
+ return hci_recv_frame(hdev, skb);
+}
+
+static int nxp_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
+{
+ union nxp_set_bd_addr_payload pcmd;
+ int err;
+
+ pcmd.data.param_id = 0xfe;
+ pcmd.data.param_len = 6;
+ memcpy(pcmd.data.param, bdaddr, 6);
+
+ /* BD address can be assigned only after first reset command. */
+ err = __hci_cmd_sync_status(hdev, HCI_OP_RESET, 0, NULL,
+ HCI_INIT_TIMEOUT);
+ if (err) {
+ bt_dev_err(hdev,
+ "Reset before setting local-bd-addr failed (%d)",
+ err);
+ return err;
+ }
+
+ err = __hci_cmd_sync_status(hdev, HCI_NXP_SET_BD_ADDR, sizeof(pcmd),
+ pcmd.buf, HCI_CMD_TIMEOUT);
+ if (err) {
+ bt_dev_err(hdev, "Changing device address failed (%d)", err);
+ return err;
+ }
+
+ return 0;
+}
+
/* NXP protocol */
static int nxp_setup(struct hci_dev *hdev)
{
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
+ struct serdev_device *serdev = nxpdev->serdev;
+ char device_string[30];
+ char event_string[50];
+ char *envp[] = {device_string, event_string, NULL};
int err = 0;
if (nxp_check_boot_sign(nxpdev)) {
@@ -1171,14 +1456,15 @@ static int nxp_setup(struct hci_dev *hdev)
clear_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state);
}
+ snprintf(device_string, 30, "BTNXPUART_DEV=%s", dev_name(&serdev->dev));
+ snprintf(event_string, 50, "BTNXPUART_STATE=FW_READY");
+ bt_dev_dbg(hdev, "==== Send uevent: %s:%s ===", device_string,
+ event_string);
+ kobject_uevent_env(&serdev->dev.kobj, KOBJ_CHANGE, envp);
+
serdev_device_set_baudrate(nxpdev->serdev, nxpdev->fw_init_baudrate);
nxpdev->current_baudrate = nxpdev->fw_init_baudrate;
- if (nxpdev->current_baudrate != HCI_NXP_SEC_BAUDRATE) {
- nxpdev->new_baudrate = HCI_NXP_SEC_BAUDRATE;
- hci_cmd_sync_queue(hdev, nxp_set_baudrate_cmd, NULL, NULL);
- }
-
ps_init(hdev);
if (test_and_clear_bit(BTNXPUART_IR_IN_PROGRESS, &nxpdev->tx_state))
@@ -1187,6 +1473,22 @@ static int nxp_setup(struct hci_dev *hdev)
return 0;
}
+static int nxp_post_init(struct hci_dev *hdev)
+{
+ struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
+ struct ps_data *psdata = &nxpdev->psdata;
+
+ if (nxpdev->current_baudrate != nxpdev->secondary_baudrate) {
+ nxpdev->new_baudrate = nxpdev->secondary_baudrate;
+ nxp_set_baudrate_cmd(hdev, NULL);
+ }
+ if (psdata->cur_h2c_wakeupmode != psdata->h2c_wakeupmode)
+ send_wakeup_method_cmd(hdev, NULL);
+ if (psdata->cur_psmode != psdata->target_ps_mode)
+ send_ps_cmd(hdev, NULL);
+ return 0;
+}
+
static void nxp_hw_err(struct hci_dev *hdev, u8 code)
{
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
@@ -1205,25 +1507,47 @@ static int nxp_shutdown(struct hci_dev *hdev)
{
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
struct sk_buff *skb;
- u8 *status;
u8 pcmd = 0;
- if (test_bit(BTNXPUART_IR_IN_PROGRESS, &nxpdev->tx_state)) {
- skb = nxp_drv_send_cmd(hdev, HCI_NXP_IND_RESET, 1, &pcmd);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- status = skb_pull_data(skb, 1);
- if (status) {
- serdev_device_set_flow_control(nxpdev->serdev, false);
- set_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state);
- }
- kfree_skb(skb);
+ if (ind_reset_in_progress(nxpdev)) {
+ if (test_and_clear_bit(BTNXPUART_FW_DUMP_IN_PROGRESS,
+ &nxpdev->tx_state))
+ skb = nxp_drv_send_cmd(hdev, HCI_NXP_IND_RESET, 1,
+ &pcmd, false);
+ else
+ skb = nxp_drv_send_cmd(hdev, HCI_NXP_IND_RESET, 1,
+ &pcmd, true);
+ serdev_device_set_flow_control(nxpdev->serdev, false);
+ set_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state);
+ /* HCI_NXP_IND_RESET command may not returns any response */
+ if (!IS_ERR(skb))
+ kfree_skb(skb);
}
return 0;
}
+static bool nxp_wakeup(struct hci_dev *hdev)
+{
+ struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
+ struct ps_data *psdata = &nxpdev->psdata;
+
+ if (psdata->c2h_wakeupmode != BT_HOST_WAKEUP_METHOD_NONE)
+ return true;
+
+ return false;
+}
+
+static void nxp_reset(struct hci_dev *hdev)
+{
+ struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
+
+ if (!ind_reset_in_progress(nxpdev) && !fw_dump_in_progress(nxpdev)) {
+ bt_dev_dbg(hdev, "CMD Timeout detected. Resetting.");
+ nxp_set_ind_reset(hdev, NULL);
+ }
+}
+
static int btnxpuart_queue_skb(struct hci_dev *hdev, struct sk_buff *skb)
{
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
@@ -1244,6 +1568,9 @@ static int nxp_enqueue(struct hci_dev *hdev, struct sk_buff *skb)
struct wakeup_cmd_payload wakeup_parm;
__le32 baudrate_parm;
+ if (fw_dump_in_progress(nxpdev))
+ return -EBUSY;
+
/* if vendor commands are received from user space (e.g. hcitool), update
* driver flags accordingly and ask driver to re-send the command to FW.
* In case the payload for any command does not match expected payload
@@ -1275,6 +1602,9 @@ static int nxp_enqueue(struct hci_dev *hdev, struct sk_buff *skb)
psdata->c2h_wakeup_gpio = wakeup_parm.c2h_wakeup_gpio;
psdata->h2c_wakeup_gpio = wakeup_parm.h2c_wakeup_gpio;
switch (wakeup_parm.h2c_wakeupmode) {
+ case BT_CTRL_WAKEUP_METHOD_GPIO:
+ psdata->h2c_wakeupmode = WAKEUP_METHOD_GPIO;
+ break;
case BT_CTRL_WAKEUP_METHOD_DSR:
psdata->h2c_wakeupmode = WAKEUP_METHOD_DTR;
break;
@@ -1341,7 +1671,7 @@ static void btnxpuart_tx_work(struct work_struct *work)
skb_pull(skb, len);
if (skb->len > 0) {
skb_queue_head(&nxpdev->txq, skb);
- break;
+ continue;
}
switch (hci_skb_pkt_type(skb)) {
@@ -1409,7 +1739,7 @@ static int btnxpuart_flush(struct hci_dev *hdev)
}
static const struct h4_recv_pkt nxp_recv_pkts[] = {
- { H4_RECV_ACL, .recv = hci_recv_frame },
+ { H4_RECV_ACL, .recv = nxp_recv_acl_pkt },
{ H4_RECV_SCO, .recv = hci_recv_frame },
{ H4_RECV_EVENT, .recv = hci_recv_frame },
{ H4_RECV_ISO, .recv = hci_recv_frame },
@@ -1431,11 +1761,13 @@ static size_t btnxpuart_receive_buf(struct serdev_device *serdev,
if (IS_ERR(nxpdev->rx_skb)) {
int err = PTR_ERR(nxpdev->rx_skb);
/* Safe to ignore out-of-sync bootloader signatures */
- if (!is_fw_downloading(nxpdev))
+ if (!is_fw_downloading(nxpdev) &&
+ !ind_reset_in_progress(nxpdev))
bt_dev_err(nxpdev->hdev, "Frame reassembly failed (%d)", err);
return count;
}
- if (!is_fw_downloading(nxpdev))
+ if (!is_fw_downloading(nxpdev) &&
+ !ind_reset_in_progress(nxpdev))
nxpdev->hdev->stat.byte_rx += count;
return count;
}
@@ -1450,10 +1782,41 @@ static const struct serdev_device_ops btnxpuart_client_ops = {
.write_wakeup = btnxpuart_write_wakeup,
};
+static void nxp_coredump_notify(struct hci_dev *hdev, int state)
+{
+ struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
+ struct serdev_device *serdev = nxpdev->serdev;
+ char device_string[30];
+ char event_string[50];
+ char *envp[] = {device_string, event_string, NULL};
+
+ snprintf(device_string, 30, "BTNXPUART_DEV=%s", dev_name(&serdev->dev));
+ switch (state) {
+ case HCI_DEVCOREDUMP_ACTIVE:
+ snprintf(event_string, 50, "BTNXPUART_STATE=FW_DUMP_ACTIVE");
+ break;
+ case HCI_DEVCOREDUMP_DONE:
+ snprintf(event_string, 50, "BTNXPUART_STATE=FW_DUMP_DONE");
+ break;
+ case HCI_DEVCOREDUMP_TIMEOUT:
+ snprintf(event_string, 50, "BTNXPUART_STATE=FW_DUMP_TIMEOUT");
+ break;
+ default:
+ snprintf(event_string, 50, "BTNXPUART_STATE=FW_DUMP_STATE_%d",
+ state);
+ break;
+ }
+ bt_dev_dbg(hdev, "==== Send uevent: %s:%s ===", device_string,
+ event_string);
+ kobject_uevent_env(&serdev->dev.kobj, KOBJ_CHANGE, envp);
+}
+
static int nxp_serdev_probe(struct serdev_device *serdev)
{
struct hci_dev *hdev;
struct btnxpuart_dev *nxpdev;
+ bdaddr_t ba = {0};
+ int err;
nxpdev = devm_kzalloc(&serdev->dev, sizeof(*nxpdev), GFP_KERNEL);
if (!nxpdev)
@@ -1477,10 +1840,31 @@ static int nxp_serdev_probe(struct serdev_device *serdev)
if (!nxpdev->fw_init_baudrate)
nxpdev->fw_init_baudrate = FW_INIT_BAUDRATE;
+ device_property_read_u32(&nxpdev->serdev->dev, "max-speed",
+ &nxpdev->secondary_baudrate);
+ if (!nxpdev->secondary_baudrate ||
+ (nxpdev->secondary_baudrate != HCI_NXP_SEC_BAUDRATE_3M &&
+ nxpdev->secondary_baudrate != HCI_NXP_SEC_BAUDRATE_4M)) {
+ if (nxpdev->secondary_baudrate)
+ dev_err(&serdev->dev,
+ "Invalid max-speed. Using default 3000000.");
+ nxpdev->secondary_baudrate = HCI_NXP_SEC_BAUDRATE_3M;
+ }
+
set_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state);
crc8_populate_msb(crc8_table, POLYNOMIAL8);
+ nxpdev->pdn = devm_reset_control_get_optional_shared(&serdev->dev, NULL);
+ if (IS_ERR(nxpdev->pdn))
+ return PTR_ERR(nxpdev->pdn);
+
+ err = devm_regulator_get_enable(&serdev->dev, "vcc");
+ if (err) {
+ dev_err(&serdev->dev, "Failed to enable vcc regulator\n");
+ return err;
+ }
+
/* Initialize and register HCI device */
hdev = hci_alloc_dev();
if (!hdev) {
@@ -1488,6 +1872,8 @@ static int nxp_serdev_probe(struct serdev_device *serdev)
return -ENOMEM;
}
+ reset_control_deassert(nxpdev->pdn);
+
nxpdev->hdev = hdev;
hdev->bus = HCI_UART;
@@ -1498,20 +1884,38 @@ static int nxp_serdev_probe(struct serdev_device *serdev)
hdev->close = btnxpuart_close;
hdev->flush = btnxpuart_flush;
hdev->setup = nxp_setup;
+ hdev->post_init = nxp_post_init;
hdev->send = nxp_enqueue;
hdev->hw_error = nxp_hw_err;
hdev->shutdown = nxp_shutdown;
+ hdev->wakeup = nxp_wakeup;
+ hdev->reset = nxp_reset;
+ hdev->set_bdaddr = nxp_set_bdaddr;
SET_HCIDEV_DEV(hdev, &serdev->dev);
+ device_property_read_u8_array(&nxpdev->serdev->dev,
+ "local-bd-address",
+ (u8 *)&ba, sizeof(ba));
+ if (bacmp(&ba, BDADDR_ANY))
+ hci_set_quirk(hdev, HCI_QUIRK_USE_BDADDR_PROPERTY);
+
if (hci_register_dev(hdev) < 0) {
dev_err(&serdev->dev, "Can't register HCI device\n");
- hci_free_dev(hdev);
- return -ENODEV;
+ goto probe_fail;
}
- ps_setup(hdev);
+ if (ps_setup(hdev))
+ goto probe_fail;
+
+ hci_devcd_register(hdev, nxp_coredump, nxp_coredump_hdr,
+ nxp_coredump_notify);
return 0;
+
+probe_fail:
+ reset_control_assert(nxpdev->pdn);
+ hci_free_dev(hdev);
+ return -ENODEV;
}
static void nxp_serdev_remove(struct serdev_device *serdev)
@@ -1534,8 +1938,10 @@ static void nxp_serdev_remove(struct serdev_device *serdev)
nxp_set_baudrate_cmd(hdev, NULL);
}
}
+
ps_cleanup(nxpdev);
hci_unregister_dev(hdev);
+ reset_control_assert(nxpdev->pdn);
hci_free_dev(hdev);
}
@@ -1546,6 +1952,11 @@ static int nxp_serdev_suspend(struct device *dev)
struct ps_data *psdata = &nxpdev->psdata;
ps_control(psdata->hdev, PS_STATE_SLEEP);
+
+ if (psdata->wakeup_source) {
+ enable_irq_wake(psdata->irq_handler);
+ enable_irq(psdata->irq_handler);
+ }
return 0;
}
@@ -1554,11 +1965,27 @@ static int nxp_serdev_resume(struct device *dev)
struct btnxpuart_dev *nxpdev = dev_get_drvdata(dev);
struct ps_data *psdata = &nxpdev->psdata;
+ if (psdata->wakeup_source) {
+ disable_irq(psdata->irq_handler);
+ disable_irq_wake(psdata->irq_handler);
+ }
+
ps_control(psdata->hdev, PS_STATE_AWAKE);
return 0;
}
#endif
+#ifdef CONFIG_DEV_COREDUMP
+static void nxp_serdev_coredump(struct device *dev)
+{
+ struct btnxpuart_dev *nxpdev = dev_get_drvdata(dev);
+ struct hci_dev *hdev = nxpdev->hdev;
+
+ if (hdev->dump.coredump)
+ hdev->dump.coredump(hdev);
+}
+#endif
+
static struct btnxpuart_data w8987_data __maybe_unused = {
.helper_fw_name = NULL,
.fw_name = FIRMWARE_W8987,
@@ -1589,6 +2016,9 @@ static struct serdev_device_driver nxp_serdev_driver = {
.name = "btnxpuart",
.of_match_table = of_match_ptr(nxpuart_of_match_table),
.pm = &nxp_pm_ops,
+#ifdef CONFIG_DEV_COREDUMP
+ .coredump = nxp_serdev_coredump,
+#endif
},
};
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index dfbbac92242a..7c958d6065be 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -272,6 +272,39 @@ int qca_send_pre_shutdown_cmd(struct hci_dev *hdev)
}
EXPORT_SYMBOL_GPL(qca_send_pre_shutdown_cmd);
+static bool qca_filename_has_extension(const char *filename)
+{
+ const char *suffix = strrchr(filename, '.');
+
+ /* File extensions require a dot, but not as the first or last character */
+ if (!suffix || suffix == filename || *(suffix + 1) == '\0')
+ return 0;
+
+ /* Avoid matching directories with names that look like files with extensions */
+ return !strchr(suffix, '/');
+}
+
+static bool qca_get_alt_nvm_file(char *filename, size_t max_size)
+{
+ char fwname[64];
+ const char *suffix;
+
+ /* nvm file name has an extension, replace with .bin */
+ if (qca_filename_has_extension(filename)) {
+ suffix = strrchr(filename, '.');
+ strscpy(fwname, filename, suffix - filename + 1);
+ snprintf(fwname + (suffix - filename),
+ sizeof(fwname) - (suffix - filename), ".bin");
+ /* If nvm file is already the default one, return false to skip the retry. */
+ if (strcmp(fwname, filename) == 0)
+ return false;
+
+ snprintf(filename, max_size, "%s", fwname);
+ return true;
+ }
+ return false;
+}
+
static int qca_tlv_check_data(struct hci_dev *hdev,
struct qca_fw_config *config,
u8 *fw_data, size_t fw_size,
@@ -564,6 +597,19 @@ static int qca_download_firmware(struct hci_dev *hdev,
config->fwname, ret);
return ret;
}
+ }
+ /* If the board-specific file is missing, try loading the default
+ * one, unless that was attempted already.
+ */
+ else if (config->type == TLV_TYPE_NVM &&
+ qca_get_alt_nvm_file(config->fwname, sizeof(config->fwname))) {
+ bt_dev_info(hdev, "QCA Downloading %s", config->fwname);
+ ret = request_firmware(&fw, config->fwname, &hdev->dev);
+ if (ret) {
+ bt_dev_err(hdev, "QCA Failed to request file: %s (%d)",
+ config->fwname, ret);
+ return ret;
+ }
} else {
bt_dev_err(hdev, "QCA Failed to request file: %s (%d)",
config->fwname, ret);
@@ -693,48 +739,53 @@ static int qca_check_bdaddr(struct hci_dev *hdev, const struct qca_fw_config *co
bda = (struct hci_rp_read_bd_addr *)skb->data;
if (!bacmp(&bda->bdaddr, &config->bdaddr))
- set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_USE_BDADDR_PROPERTY);
kfree_skb(skb);
return 0;
}
-static void qca_generate_hsp_nvm_name(char *fwname, size_t max_size,
+static void qca_get_nvm_name_by_board(char *fwname, size_t max_size,
+ const char *stem, enum qca_btsoc_type soc_type,
struct qca_btsoc_version ver, u8 rom_ver, u16 bid)
{
const char *variant;
+ const char *prefix;
- /* hsp gf chip */
- if ((le32_to_cpu(ver.soc_id) & QCA_HSP_GF_SOC_MASK) == QCA_HSP_GF_SOC_ID)
- variant = "g";
- else
- variant = "";
+ /* Set the default value to variant and prefix */
+ variant = "";
+ prefix = "b";
- if (bid == 0x0)
- snprintf(fwname, max_size, "qca/hpnv%02x%s.bin", rom_ver, variant);
- else
- snprintf(fwname, max_size, "qca/hpnv%02x%s.%x", rom_ver, variant, bid);
-}
+ if (soc_type == QCA_QCA2066)
+ prefix = "";
-static inline void qca_get_nvm_name_generic(struct qca_fw_config *cfg,
- const char *stem, u8 rom_ver, u16 bid)
-{
- if (bid == 0x0)
- snprintf(cfg->fwname, sizeof(cfg->fwname), "qca/%snv%02x.bin", stem, rom_ver);
- else if (bid & 0xff00)
- snprintf(cfg->fwname, sizeof(cfg->fwname),
- "qca/%snv%02x.b%x", stem, rom_ver, bid);
- else
- snprintf(cfg->fwname, sizeof(cfg->fwname),
- "qca/%snv%02x.b%02x", stem, rom_ver, bid);
+ if (soc_type == QCA_WCN6855 || soc_type == QCA_QCA2066) {
+ /* If the chip is manufactured by GlobalFoundries */
+ if ((le32_to_cpu(ver.soc_id) & QCA_HSP_GF_SOC_MASK) == QCA_HSP_GF_SOC_ID)
+ variant = "g";
+ }
+
+ if (rom_ver != 0) {
+ if (bid == 0x0 || bid == 0xffff)
+ snprintf(fwname, max_size, "qca/%s%02x%s.bin", stem, rom_ver, variant);
+ else
+ snprintf(fwname, max_size, "qca/%s%02x%s.%s%02x", stem, rom_ver,
+ variant, prefix, bid);
+ } else {
+ if (bid == 0x0 || bid == 0xffff)
+ snprintf(fwname, max_size, "qca/%s%s.bin", stem, variant);
+ else
+ snprintf(fwname, max_size, "qca/%s%s.%s%02x", stem, variant, prefix, bid);
+ }
}
int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
enum qca_btsoc_type soc_type, struct qca_btsoc_version ver,
- const char *firmware_name)
+ const char *firmware_name, const char *rampatch_name)
{
struct qca_fw_config config = {};
+ const char *variant = "";
int err;
u8 rom_ver = 0;
u32 soc_ver;
@@ -761,44 +812,52 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
/* Download rampatch file */
config.type = TLV_TYPE_PATCH;
- switch (soc_type) {
- case QCA_WCN3990:
- case QCA_WCN3991:
- case QCA_WCN3998:
- snprintf(config.fwname, sizeof(config.fwname),
- "qca/crbtfw%02x.tlv", rom_ver);
- break;
- case QCA_WCN3988:
- snprintf(config.fwname, sizeof(config.fwname),
- "qca/apbtfw%02x.tlv", rom_ver);
- break;
- case QCA_QCA2066:
- snprintf(config.fwname, sizeof(config.fwname),
- "qca/hpbtfw%02x.tlv", rom_ver);
- break;
- case QCA_QCA6390:
- snprintf(config.fwname, sizeof(config.fwname),
- "qca/htbtfw%02x.tlv", rom_ver);
- break;
- case QCA_WCN6750:
- /* Choose mbn file by default.If mbn file is not found
- * then choose tlv file
- */
- config.type = ELF_TYPE_PATCH;
- snprintf(config.fwname, sizeof(config.fwname),
- "qca/msbtfw%02x.mbn", rom_ver);
- break;
- case QCA_WCN6855:
- snprintf(config.fwname, sizeof(config.fwname),
- "qca/hpbtfw%02x.tlv", rom_ver);
- break;
- case QCA_WCN7850:
- snprintf(config.fwname, sizeof(config.fwname),
- "qca/hmtbtfw%02x.tlv", rom_ver);
- break;
- default:
- snprintf(config.fwname, sizeof(config.fwname),
- "qca/rampatch_%08x.bin", soc_ver);
+ if (rampatch_name) {
+ snprintf(config.fwname, sizeof(config.fwname), "qca/%s", rampatch_name);
+ } else {
+ switch (soc_type) {
+ case QCA_WCN3950:
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/cmbtfw%02x.tlv", rom_ver);
+ break;
+ case QCA_WCN3990:
+ case QCA_WCN3991:
+ case QCA_WCN3998:
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/crbtfw%02x.tlv", rom_ver);
+ break;
+ case QCA_WCN3988:
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/apbtfw%02x.tlv", rom_ver);
+ break;
+ case QCA_QCA2066:
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/hpbtfw%02x.tlv", rom_ver);
+ break;
+ case QCA_QCA6390:
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/htbtfw%02x.tlv", rom_ver);
+ break;
+ case QCA_WCN6750:
+ /* Choose mbn file by default.If mbn file is not found
+ * then choose tlv file
+ */
+ config.type = ELF_TYPE_PATCH;
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/msbtfw%02x.mbn", rom_ver);
+ break;
+ case QCA_WCN6855:
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/hpbtfw%02x.tlv", rom_ver);
+ break;
+ case QCA_WCN7850:
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/hmtbtfw%02x.tlv", rom_ver);
+ break;
+ default:
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/rampatch_%08x.bin", soc_ver);
+ }
}
err = qca_download_firmware(hdev, &config, soc_type, rom_ver);
@@ -816,28 +875,42 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
/* Download NVM configuration */
config.type = TLV_TYPE_NVM;
if (firmware_name) {
- snprintf(config.fwname, sizeof(config.fwname),
- "qca/%s", firmware_name);
+ /* The firmware name has an extension, use it directly */
+ if (qca_filename_has_extension(firmware_name)) {
+ snprintf(config.fwname, sizeof(config.fwname), "qca/%s", firmware_name);
+ } else {
+ qca_read_fw_board_id(hdev, &boardid);
+ qca_get_nvm_name_by_board(config.fwname, sizeof(config.fwname),
+ firmware_name, soc_type, ver, 0, boardid);
+ }
} else {
switch (soc_type) {
+ case QCA_WCN3950:
+ if (le32_to_cpu(ver.soc_id) == QCA_WCN3950_SOC_ID_T)
+ variant = "t";
+ else if (le32_to_cpu(ver.soc_id) == QCA_WCN3950_SOC_ID_S)
+ variant = "s";
+
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/cmnv%02x%s.bin", rom_ver, variant);
+ break;
case QCA_WCN3990:
case QCA_WCN3991:
case QCA_WCN3998:
- if (le32_to_cpu(ver.soc_id) == QCA_WCN3991_SOC_ID) {
- snprintf(config.fwname, sizeof(config.fwname),
- "qca/crnv%02xu.bin", rom_ver);
- } else {
- snprintf(config.fwname, sizeof(config.fwname),
- "qca/crnv%02x.bin", rom_ver);
- }
+ if (le32_to_cpu(ver.soc_id) == QCA_WCN3991_SOC_ID)
+ variant = "u";
+
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/crnv%02x%s.bin", rom_ver, variant);
break;
case QCA_WCN3988:
snprintf(config.fwname, sizeof(config.fwname),
"qca/apnv%02x.bin", rom_ver);
break;
case QCA_QCA2066:
- qca_generate_hsp_nvm_name(config.fwname,
- sizeof(config.fwname), ver, rom_ver, boardid);
+ qca_get_nvm_name_by_board(config.fwname,
+ sizeof(config.fwname), "hpnv", soc_type, ver,
+ rom_ver, boardid);
break;
case QCA_QCA6390:
snprintf(config.fwname, sizeof(config.fwname),
@@ -848,13 +921,14 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
"qca/msnv%02x.bin", rom_ver);
break;
case QCA_WCN6855:
- snprintf(config.fwname, sizeof(config.fwname),
- "qca/hpnv%02x.bin", rom_ver);
+ qca_read_fw_board_id(hdev, &boardid);
+ qca_get_nvm_name_by_board(config.fwname, sizeof(config.fwname),
+ "hpnv", soc_type, ver, rom_ver, boardid);
break;
case QCA_WCN7850:
- qca_get_nvm_name_generic(&config, "hmt", rom_ver, boardid);
+ qca_get_nvm_name_by_board(config.fwname, sizeof(config.fwname),
+ "hmtnv", soc_type, ver, rom_ver, boardid);
break;
-
default:
snprintf(config.fwname, sizeof(config.fwname),
"qca/nvm_%08x.bin", soc_ver);
@@ -886,6 +960,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
* VsMsftOpCode.
*/
switch (soc_type) {
+ case QCA_WCN3950:
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h
index bb5207d7a8c7..8f3c1b1c77b3 100644
--- a/drivers/bluetooth/btqca.h
+++ b/drivers/bluetooth/btqca.h
@@ -41,6 +41,9 @@
#define QCA_WCN3991_SOC_ID 0x40014320
+#define QCA_WCN3950_SOC_ID_T 0x40074130
+#define QCA_WCN3950_SOC_ID_S 0x40075130
+
/* QCA chipset version can be decided by patch and SoC
* version, combination with upper 2 bytes from SoC
* and lower 2 bytes from patch will be used.
@@ -145,6 +148,7 @@ enum qca_btsoc_type {
QCA_INVALID = -1,
QCA_AR3002,
QCA_ROME,
+ QCA_WCN3950,
QCA_WCN3988,
QCA_WCN3990,
QCA_WCN3998,
@@ -161,7 +165,7 @@ enum qca_btsoc_type {
int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr);
int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
enum qca_btsoc_type soc_type, struct qca_btsoc_version ver,
- const char *firmware_name);
+ const char *firmware_name, const char *rampatch_name);
int qca_read_soc_version(struct hci_dev *hdev, struct qca_btsoc_version *ver,
enum qca_btsoc_type);
int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr);
@@ -176,7 +180,8 @@ static inline int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdad
static inline int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
enum qca_btsoc_type soc_type,
struct qca_btsoc_version ver,
- const char *firmware_name)
+ const char *firmware_name,
+ const char *rampatch_name)
{
return -EOPNOTSUPP;
}
diff --git a/drivers/bluetooth/btqcomsmd.c b/drivers/bluetooth/btqcomsmd.c
index 88dbb2f3fabf..d2e13fcb6bab 100644
--- a/drivers/bluetooth/btqcomsmd.c
+++ b/drivers/bluetooth/btqcomsmd.c
@@ -117,7 +117,7 @@ static int btqcomsmd_setup(struct hci_dev *hdev)
/* Devices do not have persistent storage for BD address. Retrieve
* it from the firmware node property.
*/
- set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_USE_BDADDR_PROPERTY);
return 0;
}
@@ -216,7 +216,7 @@ MODULE_DEVICE_TABLE(of, btqcomsmd_of_match);
static struct platform_driver btqcomsmd_driver = {
.probe = btqcomsmd_probe,
- .remove_new = btqcomsmd_remove,
+ .remove = btqcomsmd_remove,
.driver = {
.name = "btqcomsmd",
.of_match_table = btqcomsmd_of_match,
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
index 0bcb44cf7b31..6abd962502e3 100644
--- a/drivers/bluetooth/btrtl.c
+++ b/drivers/bluetooth/btrtl.c
@@ -693,7 +693,7 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev,
/* Loop from the end of the firmware parsing instructions, until
* we find an instruction that identifies the "project ID" for the
- * hardware supported by this firwmare file.
+ * hardware supported by this firmware file.
* Once we have that, we double-check that project_id is suitable
* for the hardware we are working with.
*/
@@ -1215,6 +1215,8 @@ next:
rtl_dev_err(hdev, "mandatory config file %s not found",
btrtl_dev->ic_info->cfg_name);
ret = btrtl_dev->cfg_len;
+ if (!ret)
+ ret = -EINVAL;
goto err_free;
}
}
@@ -1285,7 +1287,7 @@ void btrtl_set_quirks(struct hci_dev *hdev, struct btrtl_device_info *btrtl_dev)
/* Enable controller to do both LE scan and BR/EDR inquiry
* simultaneously.
*/
- set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY);
/* Enable central-peripheral role (able to create new connections with
* an existing connection in slave role).
@@ -1299,7 +1301,7 @@ void btrtl_set_quirks(struct hci_dev *hdev, struct btrtl_device_info *btrtl_dev)
case CHIP_ID_8851B:
case CHIP_ID_8922A:
case CHIP_ID_8852BT:
- set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED);
/* RTL8852C needs to transmit mSBC data continuously without
* the zero length of USB packets for the ALT 6 supported chips
@@ -1310,7 +1312,8 @@ void btrtl_set_quirks(struct hci_dev *hdev, struct btrtl_device_info *btrtl_dev)
if (btrtl_dev->project_id == CHIP_ID_8852A ||
btrtl_dev->project_id == CHIP_ID_8852B ||
btrtl_dev->project_id == CHIP_ID_8852C)
- set_bit(HCI_QUIRK_USE_MSFT_EXT_ADDRESS_FILTER, &hdev->quirks);
+ hci_set_quirk(hdev,
+ HCI_QUIRK_USE_MSFT_EXT_ADDRESS_FILTER);
hci_set_aosp_capable(hdev);
break;
@@ -1329,8 +1332,7 @@ void btrtl_set_quirks(struct hci_dev *hdev, struct btrtl_device_info *btrtl_dev)
* but it doesn't support any features from page 2 -
* it either responds with garbage or with error status
*/
- set_bit(HCI_QUIRK_BROKEN_LOCAL_EXT_FEATURES_PAGE_2,
- &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_LOCAL_EXT_FEATURES_PAGE_2);
break;
default:
break;
@@ -1351,12 +1353,14 @@ int btrtl_setup_realtek(struct hci_dev *hdev)
btrtl_set_quirks(hdev, btrtl_dev);
- hci_set_hw_info(hdev,
+ if (btrtl_dev->ic_info) {
+ hci_set_hw_info(hdev,
"RTL lmp_subver=%u hci_rev=%u hci_ver=%u hci_bus=%u",
btrtl_dev->ic_info->lmp_subver,
btrtl_dev->ic_info->hci_rev,
btrtl_dev->ic_info->hci_ver,
btrtl_dev->ic_info->hci_bus);
+ }
btrtl_free(btrtl_dev);
return ret;
@@ -1371,7 +1375,7 @@ int btrtl_shutdown_realtek(struct hci_dev *hdev)
/* According to the vendor driver, BT must be reset on close to avoid
* firmware crash.
*/
- skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
+ skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_CMD_TIMEOUT);
if (IS_ERR(skb)) {
ret = PTR_ERR(skb);
bt_dev_err(hdev, "HCI reset during shutdown failed");
diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
index a69feb08486a..8325655ce6aa 100644
--- a/drivers/bluetooth/btsdio.c
+++ b/drivers/bluetooth/btsdio.c
@@ -327,7 +327,7 @@ static int btsdio_probe(struct sdio_func *func,
hdev->send = btsdio_send_frame;
if (func->vendor == 0x0104 && func->device == 0x00c5)
- set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_RESET_ON_CLOSE);
err = hci_register_dev(hdev);
if (err < 0) {
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index e9534fbc92e3..5e9ebf0c5312 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -21,6 +21,7 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/hci_drv.h>
#include "btintel.h"
#include "btbcm.h"
@@ -65,6 +66,7 @@ static struct usb_driver btusb_driver;
#define BTUSB_INTEL_BROKEN_INITIAL_NCMD BIT(25)
#define BTUSB_INTEL_NO_WBS_SUPPORT BIT(26)
#define BTUSB_ACTIONS_SEMI BIT(27)
+#define BTUSB_BARROT BIT(28)
static const struct usb_device_id btusb_table[] = {
/* Generic Bluetooth USB device */
@@ -297,19 +299,49 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
/* QCA WCN6855 chipset */
- { USB_DEVICE(0x0cf3, 0xe600), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x0489, 0xe0c7), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe0c9), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe0ca), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe0cb), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe0cc), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe0ce), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe0d0), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe0d6), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe0de), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe0df), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe0e1), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe0e3), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x10ab, 0x9309), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x0489, 0xe0ea), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x10ab, 0x9409), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x0489, 0xe0ec), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x0489, 0xe0d0), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x04ca, 0x3022), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x04ca, 0x3023), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x04ca, 0x3024), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x04ca, 0x3a22), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x04ca, 0x3a24), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x04ca, 0x3a26), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x04ca, 0x3a27), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0cf3, 0xe600), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x10ab, 0x9108), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
@@ -321,8 +353,12 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x10ab, 0x9308), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x10ab, 0x9309), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x10ab, 0x9408), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x10ab, 0x9409), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x10ab, 0x9508), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x10ab, 0x9509), .driver_info = BTUSB_QCA_WCN6855 |
@@ -333,43 +369,47 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x10ab, 0x9f09), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x04ca, 0x3022), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x28de, 0x1401), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x0489, 0xe0c7), .driver_info = BTUSB_QCA_WCN6855 |
+
+ /* QCA WCN785x chipset */
+ { USB_DEVICE(0x0cf3, 0xe700), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x0489, 0xe0c9), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x0489, 0xe0fc), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x0489, 0xe0ca), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x0489, 0xe0f3), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x0489, 0xe0cb), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x0489, 0xe100), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x0489, 0xe0ce), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x0489, 0xe103), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x0489, 0xe0de), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x0489, 0xe10a), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x0489, 0xe0df), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x0489, 0xe10d), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x0489, 0xe0e1), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x0489, 0xe11b), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x0489, 0xe0ea), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x0489, 0xe11c), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x0489, 0xe0ec), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x0489, 0xe11f), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x04ca, 0x3023), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x0489, 0xe141), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x04ca, 0x3024), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x0489, 0xe14a), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x04ca, 0x3a22), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x0489, 0xe14b), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x04ca, 0x3a24), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x0489, 0xe14d), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x04ca, 0x3a26), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x13d3, 0x3623), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x04ca, 0x3a27), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x13d3, 0x3624), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
-
- /* QCA WCN785x chipset */
- { USB_DEVICE(0x0cf3, 0xe700), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x2c7c, 0x0130), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x2c7c, 0x0131), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x2c7c, 0x0132), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
/* Broadcom BCM2035 */
@@ -475,6 +515,17 @@ static const struct usb_device_id quirks_table[] = {
{ USB_DEVICE(0x13d3, 0x3549), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ /* Realtek 8851BE Bluetooth devices */
+ { USB_DEVICE(0x0bda, 0xb850), .driver_info = BTUSB_REALTEK },
+ { USB_DEVICE(0x13d3, 0x3600), .driver_info = BTUSB_REALTEK },
+ { USB_DEVICE(0x13d3, 0x3601), .driver_info = BTUSB_REALTEK },
+
+ /* Realtek 8851BU Bluetooth devices */
+ { USB_DEVICE(0x3625, 0x010b), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x2001, 0x332a), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
+
/* Realtek 8852AE Bluetooth devices */
{ USB_DEVICE(0x0bda, 0x2852), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
@@ -524,6 +575,10 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3591), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3618), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe123), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe125), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
@@ -563,6 +618,16 @@ static const struct usb_device_id quirks_table[] = {
{ USB_DEVICE(0x043e, 0x3109), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
+ /* Additional MediaTek MT7920 Bluetooth devices */
+ { USB_DEVICE(0x0489, 0xe134), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3620), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3621), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3622), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
+
/* Additional MediaTek MT7921 Bluetooth devices */
{ USB_DEVICE(0x0489, 0xe0c8), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
@@ -582,6 +647,8 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3567), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3576), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3578), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3583), .driver_info = BTUSB_MEDIATEK |
@@ -592,6 +659,8 @@ static const struct usb_device_id quirks_table[] = {
/* MediaTek MT7922 Bluetooth devices */
{ USB_DEVICE(0x13d3, 0x3585), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3610), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
/* MediaTek MT7922A Bluetooth devices */
{ USB_DEVICE(0x0489, 0xe0d8), .driver_info = BTUSB_MEDIATEK |
@@ -612,12 +681,18 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe102), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe152), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe153), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x04ca, 0x3804), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x04ca, 0x38e4), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3568), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3584), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3605), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3607), .driver_info = BTUSB_MEDIATEK |
@@ -626,16 +701,32 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3615), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3633), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x35f5, 0x7922), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
/* Additional MediaTek MT7925 Bluetooth devices */
+ { USB_DEVICE(0x0489, 0xe111), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe113), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe118), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe11e), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe124), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe139), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe14e), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe14f), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe150), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe151), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3602), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3603), .driver_info = BTUSB_MEDIATEK |
@@ -644,6 +735,16 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3608), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3613), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3627), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3628), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3630), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x2c7c, 0x7009), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
/* Additional Realtek 8723AE Bluetooth devices */
{ USB_DEVICE(0x0930, 0x021d), .driver_info = BTUSB_REALTEK },
@@ -716,6 +817,10 @@ static const struct usb_device_id quirks_table[] = {
{ USB_DEVICE(0x0cb5, 0xc547), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ /* Barrot Technology Bluetooth devices */
+ { USB_DEVICE(0x33fa, 0x0010), .driver_info = BTUSB_BARROT },
+ { USB_DEVICE(0x33fa, 0x0012), .driver_info = BTUSB_BARROT },
+
/* Actions Semiconductor ATS2851 based devices */
{ USB_DEVICE(0x10d7, 0xb012), .driver_info = BTUSB_ACTIONS_SEMI },
@@ -846,9 +951,9 @@ struct btusb_data {
int (*suspend)(struct hci_dev *hdev);
int (*resume)(struct hci_dev *hdev);
+ int (*disconnect)(struct hci_dev *hdev);
int oob_wake_irq; /* irq for out-of-band wake-on-bt */
- unsigned cmd_timeout_cnt;
struct qca_dump_info qca_dump;
};
@@ -858,11 +963,6 @@ static void btusb_reset(struct hci_dev *hdev)
struct btusb_data *data;
int err;
- if (hdev->reset) {
- hdev->reset(hdev);
- return;
- }
-
data = hci_get_drvdata(hdev);
/* This is not an unbalanced PM reference since the device will reset */
err = usb_autopm_get_interface(data->intf);
@@ -875,15 +975,12 @@ static void btusb_reset(struct hci_dev *hdev)
usb_queue_reset_device(data->intf);
}
-static void btusb_intel_cmd_timeout(struct hci_dev *hdev)
+static void btusb_intel_reset(struct hci_dev *hdev)
{
struct btusb_data *data = hci_get_drvdata(hdev);
struct gpio_desc *reset_gpio = data->reset_gpio;
struct btintel_data *intel_data = hci_get_priv(hdev);
- if (++data->cmd_timeout_cnt < 5)
- return;
-
if (intel_data->acpi_reset_method) {
if (test_and_set_bit(INTEL_ACPI_RESET_ACTIVE, intel_data->flags)) {
bt_dev_err(hdev, "acpi: last reset failed ? Not resetting again");
@@ -956,7 +1053,7 @@ static inline void btusb_rtl_alloc_devcoredump(struct hci_dev *hdev,
}
}
-static void btusb_rtl_cmd_timeout(struct hci_dev *hdev)
+static void btusb_rtl_reset(struct hci_dev *hdev)
{
struct btusb_data *data = hci_get_drvdata(hdev);
struct gpio_desc *reset_gpio = data->reset_gpio;
@@ -966,9 +1063,6 @@ static void btusb_rtl_cmd_timeout(struct hci_dev *hdev)
btusb_rtl_alloc_devcoredump(hdev, &hdr, NULL, 0);
- if (++data->cmd_timeout_cnt < 5)
- return;
-
if (!reset_gpio) {
btusb_reset(hdev);
return;
@@ -1003,19 +1097,16 @@ static void btusb_rtl_hw_error(struct hci_dev *hdev, u8 code)
btusb_rtl_alloc_devcoredump(hdev, &hdr, NULL, 0);
}
-static void btusb_qca_cmd_timeout(struct hci_dev *hdev)
+static void btusb_qca_reset(struct hci_dev *hdev)
{
struct btusb_data *data = hci_get_drvdata(hdev);
struct gpio_desc *reset_gpio = data->reset_gpio;
if (test_bit(BTUSB_HW_SSR_ACTIVE, &data->flags)) {
- bt_dev_info(hdev, "Ramdump in progress, defer cmd_timeout");
+ bt_dev_info(hdev, "Ramdump in progress, defer reset");
return;
}
- if (++data->cmd_timeout_cnt < 5)
- return;
-
if (reset_gpio) {
bt_dev_err(hdev, "Reset qca device via bt_en gpio");
@@ -1061,7 +1152,7 @@ static inline void btusb_free_frags(struct btusb_data *data)
static int btusb_recv_event(struct btusb_data *data, struct sk_buff *skb)
{
if (data->intr_interval) {
- /* Trigger dequeue immediatelly if an event is received */
+ /* Trigger dequeue immediately if an event is received */
schedule_delayed_work(&data->rx_work, 0);
}
@@ -1112,6 +1203,18 @@ static int btusb_recv_intr(struct btusb_data *data, void *buffer, int count)
}
if (!hci_skb_expect(skb)) {
+ /* Each chunk should correspond to at least 1 or more
+ * events so if there are still bytes left that doesn't
+ * constitute a new event this is likely a bug in the
+ * controller.
+ */
+ if (count && count < HCI_EVENT_HDR_SIZE) {
+ bt_dev_warn(data->hdev,
+ "Unexpected continuation: %d bytes",
+ count);
+ count = 0;
+ }
+
/* Complete frame */
btusb_recv_event(data, skb);
skb = NULL;
@@ -2075,7 +2178,8 @@ static int btusb_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
return submit_or_queue_tx_urb(hdev, urb);
case HCI_SCODATA_PKT:
- if (hci_conn_num(hdev, SCO_LINK) < 1)
+ if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
+ hci_conn_num(hdev, SCO_LINK) < 1)
return -ENODEV;
urb = alloc_isoc_urb(hdev, skb);
@@ -2404,16 +2508,18 @@ static int btusb_setup_csr(struct hci_dev *hdev)
* Probably will need to be expanded in the future;
* without these the controller will lock up.
*/
- set_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks);
- set_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks);
- set_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks);
- set_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_STORED_LINK_KEY);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_ERR_DATA_REPORTING);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL);
+ hci_set_quirk(hdev, HCI_QUIRK_NO_SUSPEND_NOTIFIER);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_READ_VOICE_SETTING);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_READ_PAGE_SCAN_TYPE);
/* Clear the reset quirk since this is not an actual
* early Bluetooth 1.1 device from CSR.
*/
- clear_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
- clear_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
+ hci_clear_quirk(hdev, HCI_QUIRK_RESET_ON_CLOSE);
+ hci_clear_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY);
/*
* Special workaround for these BT 4.0 chip clones, and potentially more:
@@ -2524,12 +2630,12 @@ static int btusb_send_frame_intel(struct hci_dev *hdev, struct sk_buff *skb)
else
urb = alloc_ctrl_urb(hdev, skb);
- /* When the 0xfc01 command is issued to boot into
- * the operational firmware, it will actually not
- * send a command complete event. To keep the flow
+ /* When the BTINTEL_HCI_OP_RESET command is issued to
+ * boot into the operational firmware, it will actually
+ * not send a command complete event. To keep the flow
* control working inject that event here.
*/
- if (opcode == 0xfc01)
+ if (opcode == BTINTEL_HCI_OP_RESET)
inject_cmd_complete(hdev, opcode);
} else {
urb = alloc_ctrl_urb(hdev, skb);
@@ -2549,7 +2655,8 @@ static int btusb_send_frame_intel(struct hci_dev *hdev, struct sk_buff *skb)
return submit_or_queue_tx_urb(hdev, urb);
case HCI_SCODATA_PKT:
- if (hci_conn_num(hdev, SCO_LINK) < 1)
+ if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
+ hci_conn_num(hdev, SCO_LINK) < 1)
return -ENODEV;
urb = alloc_isoc_urb(hdev, skb);
@@ -2607,22 +2714,30 @@ static void btusb_mtk_claim_iso_intf(struct btusb_data *data)
struct btmtk_data *btmtk_data = hci_get_priv(data->hdev);
int err;
+ /*
+ * The function usb_driver_claim_interface() is documented to need
+ * locks held if it's not called from a probe routine. The code here
+ * is called from the hci_power_on workqueue, so grab the lock.
+ */
+ device_lock(&btmtk_data->isopkt_intf->dev);
err = usb_driver_claim_interface(&btusb_driver,
btmtk_data->isopkt_intf, data);
+ device_unlock(&btmtk_data->isopkt_intf->dev);
if (err < 0) {
btmtk_data->isopkt_intf = NULL;
- bt_dev_err(data->hdev, "Failed to claim iso interface");
+ bt_dev_err(data->hdev, "Failed to claim iso interface: %d", err);
return;
}
set_bit(BTMTK_ISOPKT_OVER_INTR, &btmtk_data->flags);
+ init_usb_anchor(&btmtk_data->isopkt_anchor);
}
-static void btusb_mtk_release_iso_intf(struct btusb_data *data)
+static void btusb_mtk_release_iso_intf(struct hci_dev *hdev)
{
- struct btmtk_data *btmtk_data = hci_get_priv(data->hdev);
+ struct btmtk_data *btmtk_data = hci_get_priv(hdev);
- if (btmtk_data->isopkt_intf) {
+ if (test_bit(BTMTK_ISOPKT_OVER_INTR, &btmtk_data->flags)) {
usb_kill_anchored_urbs(&btmtk_data->isopkt_anchor);
clear_bit(BTMTK_ISOPKT_RUNNING, &btmtk_data->flags);
@@ -2636,6 +2751,16 @@ static void btusb_mtk_release_iso_intf(struct btusb_data *data)
clear_bit(BTMTK_ISOPKT_OVER_INTR, &btmtk_data->flags);
}
+static int btusb_mtk_disconnect(struct hci_dev *hdev)
+{
+ /* This function describes the specific additional steps taken by MediaTek
+ * when Bluetooth usb driver's resume function is called.
+ */
+ btusb_mtk_release_iso_intf(hdev);
+
+ return 0;
+}
+
static int btusb_mtk_reset(struct hci_dev *hdev, void *rst_data)
{
struct btusb_data *data = hci_get_drvdata(hdev);
@@ -2652,8 +2777,8 @@ static int btusb_mtk_reset(struct hci_dev *hdev, void *rst_data)
if (err < 0)
return err;
- if (test_bit(BTMTK_ISOPKT_RUNNING, &btmtk_data->flags))
- btusb_mtk_release_iso_intf(data);
+ /* Release MediaTek ISO data interface */
+ btusb_mtk_release_iso_intf(hdev);
btusb_stop_traffic(data);
usb_kill_anchored_urbs(&data->tx_anchor);
@@ -2698,22 +2823,24 @@ static int btusb_mtk_setup(struct hci_dev *hdev)
btmtk_data->reset_sync = btusb_mtk_reset;
/* Claim ISO data interface and endpoint */
- btmtk_data->isopkt_intf = usb_ifnum_to_if(data->udev, MTK_ISO_IFNUM);
- if (btmtk_data->isopkt_intf)
+ if (!test_bit(BTMTK_ISOPKT_OVER_INTR, &btmtk_data->flags)) {
+ btmtk_data->isopkt_intf = usb_ifnum_to_if(data->udev, MTK_ISO_IFNUM);
btusb_mtk_claim_iso_intf(data);
+ }
return btmtk_usb_setup(hdev);
}
static int btusb_mtk_shutdown(struct hci_dev *hdev)
{
- struct btusb_data *data = hci_get_drvdata(hdev);
- struct btmtk_data *btmtk_data = hci_get_priv(hdev);
+ int ret;
- if (test_bit(BTMTK_ISOPKT_RUNNING, &btmtk_data->flags))
- btusb_mtk_release_iso_intf(data);
+ ret = btmtk_usb_shutdown(hdev);
- return btmtk_usb_shutdown(hdev);
+ /* Release MediaTek iso interface after shutdown */
+ btusb_mtk_release_iso_intf(hdev);
+
+ return ret;
}
#ifdef CONFIG_PM
@@ -2886,55 +3013,27 @@ static void btusb_coredump_qca(struct hci_dev *hdev)
bt_dev_err(hdev, "%s: triggle crash failed (%d)", __func__, err);
}
-/*
- * ==0: not a dump pkt.
- * < 0: fails to handle a dump pkt
- * > 0: otherwise.
- */
+/* Return: 0 on success, negative errno on failure. */
static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb)
{
- int ret = 1;
+ int ret = 0;
+ unsigned int skip = 0;
u8 pkt_type;
- u8 *sk_ptr;
- unsigned int sk_len;
u16 seqno;
u32 dump_size;
- struct hci_event_hdr *event_hdr;
- struct hci_acl_hdr *acl_hdr;
struct qca_dump_hdr *dump_hdr;
struct btusb_data *btdata = hci_get_drvdata(hdev);
struct usb_device *udev = btdata->udev;
pkt_type = hci_skb_pkt_type(skb);
- sk_ptr = skb->data;
- sk_len = skb->len;
+ skip = sizeof(struct hci_event_hdr);
+ if (pkt_type == HCI_ACLDATA_PKT)
+ skip += sizeof(struct hci_acl_hdr);
- if (pkt_type == HCI_ACLDATA_PKT) {
- acl_hdr = hci_acl_hdr(skb);
- if (le16_to_cpu(acl_hdr->handle) != QCA_MEMDUMP_ACL_HANDLE)
- return 0;
- sk_ptr += HCI_ACL_HDR_SIZE;
- sk_len -= HCI_ACL_HDR_SIZE;
- event_hdr = (struct hci_event_hdr *)sk_ptr;
- } else {
- event_hdr = hci_event_hdr(skb);
- }
-
- if ((event_hdr->evt != HCI_VENDOR_PKT)
- || (event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE)))
- return 0;
-
- sk_ptr += HCI_EVENT_HDR_SIZE;
- sk_len -= HCI_EVENT_HDR_SIZE;
-
- dump_hdr = (struct qca_dump_hdr *)sk_ptr;
- if ((sk_len < offsetof(struct qca_dump_hdr, data))
- || (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS)
- || (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE))
- return 0;
+ skb_pull(skb, skip);
+ dump_hdr = (struct qca_dump_hdr *)skb->data;
- /*it is dump pkt now*/
seqno = le16_to_cpu(dump_hdr->seqno);
if (seqno == 0) {
set_bit(BTUSB_HW_SSR_ACTIVE, &btdata->flags);
@@ -2954,16 +3053,15 @@ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb)
btdata->qca_dump.ram_dump_size = dump_size;
btdata->qca_dump.ram_dump_seqno = 0;
- sk_ptr += offsetof(struct qca_dump_hdr, data0);
- sk_len -= offsetof(struct qca_dump_hdr, data0);
+
+ skb_pull(skb, offsetof(struct qca_dump_hdr, data0));
usb_disable_autosuspend(udev);
bt_dev_info(hdev, "%s memdump size(%u)\n",
(pkt_type == HCI_ACLDATA_PKT) ? "ACL" : "event",
dump_size);
} else {
- sk_ptr += offsetof(struct qca_dump_hdr, data);
- sk_len -= offsetof(struct qca_dump_hdr, data);
+ skb_pull(skb, offsetof(struct qca_dump_hdr, data));
}
if (!btdata->qca_dump.ram_dump_size) {
@@ -2983,7 +3081,6 @@ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb)
return ret;
}
- skb_pull(skb, skb->len - sk_len);
hci_devcd_append(hdev, skb);
btdata->qca_dump.ram_dump_seqno++;
if (seqno == QCA_LAST_SEQUENCE_NUM) {
@@ -3008,17 +3105,74 @@ out:
return ret;
}
+/* Return: true if the ACL packet is a dump packet, false otherwise. */
+static bool acl_pkt_is_dump_qca(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_event_hdr *event_hdr;
+ struct hci_acl_hdr *acl_hdr;
+ struct qca_dump_hdr *dump_hdr;
+ struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC);
+ bool is_dump = false;
+
+ if (!clone)
+ return false;
+
+ acl_hdr = skb_pull_data(clone, sizeof(*acl_hdr));
+ if (!acl_hdr || (le16_to_cpu(acl_hdr->handle) != QCA_MEMDUMP_ACL_HANDLE))
+ goto out;
+
+ event_hdr = skb_pull_data(clone, sizeof(*event_hdr));
+ if (!event_hdr || (event_hdr->evt != HCI_VENDOR_PKT))
+ goto out;
+
+ dump_hdr = skb_pull_data(clone, sizeof(*dump_hdr));
+ if (!dump_hdr || (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) ||
+ (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE))
+ goto out;
+
+ is_dump = true;
+out:
+ consume_skb(clone);
+ return is_dump;
+}
+
+/* Return: true if the event packet is a dump packet, false otherwise. */
+static bool evt_pkt_is_dump_qca(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_event_hdr *event_hdr;
+ struct qca_dump_hdr *dump_hdr;
+ struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC);
+ bool is_dump = false;
+
+ if (!clone)
+ return false;
+
+ event_hdr = skb_pull_data(clone, sizeof(*event_hdr));
+ if (!event_hdr || (event_hdr->evt != HCI_VENDOR_PKT))
+ goto out;
+
+ dump_hdr = skb_pull_data(clone, sizeof(*dump_hdr));
+ if (!dump_hdr || (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) ||
+ (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE))
+ goto out;
+
+ is_dump = true;
+out:
+ consume_skb(clone);
+ return is_dump;
+}
+
static int btusb_recv_acl_qca(struct hci_dev *hdev, struct sk_buff *skb)
{
- if (handle_dump_pkt_qca(hdev, skb))
- return 0;
+ if (acl_pkt_is_dump_qca(hdev, skb))
+ return handle_dump_pkt_qca(hdev, skb);
return hci_recv_frame(hdev, skb);
}
static int btusb_recv_evt_qca(struct hci_dev *hdev, struct sk_buff *skb)
{
- if (handle_dump_pkt_qca(hdev, skb))
- return 0;
+ if (evt_pkt_is_dump_qca(hdev, skb))
+ return handle_dump_pkt_qca(hdev, skb);
return hci_recv_frame(hdev, skb);
}
@@ -3061,6 +3215,12 @@ struct qca_device_info {
u8 ver_offset; /* offset of version structure in rampatch */
};
+struct qca_custom_firmware {
+ u32 rom_version;
+ u16 board_id;
+ const char *subdirectory;
+};
+
static const struct qca_device_info qca_devices_table[] = {
{ 0x00000100, 20, 4, 8 }, /* Rome 1.0 */
{ 0x00000101, 20, 4, 8 }, /* Rome 1.1 */
@@ -3074,6 +3234,57 @@ static const struct qca_device_info qca_devices_table[] = {
{ 0x00190200, 40, 4, 16 }, /* WCN785x 2.0 */
};
+static const struct qca_custom_firmware qca_custom_btfws[] = {
+ { 0x00130201, 0x030A, "QCA2066" },
+ { },
+};
+
+static u16 qca_extract_board_id(const struct qca_version *ver)
+{
+ u16 flag = le16_to_cpu(ver->flag);
+ u16 board_id = 0;
+
+ if (((flag >> 8) & 0xff) == QCA_FLAG_MULTI_NVM) {
+ /* The board_id should be split into two bytes
+ * The 1st byte is chip ID, and the 2nd byte is platform ID
+ * For example, board ID 0x010A, 0x01 is platform ID. 0x0A is chip ID
+ * we have several platforms, and platform IDs are continuously added
+ * Platform ID:
+ * 0x00 is for Mobile
+ * 0x01 is for X86
+ * 0x02 is for Automotive
+ * 0x03 is for Consumer electronic
+ */
+ board_id = (ver->chip_id << 8) + ver->platform_id;
+ }
+
+ /* Take 0xffff as invalid board ID */
+ if (board_id == 0xffff)
+ board_id = 0;
+
+ return board_id;
+}
+
+static const char *qca_get_fw_subdirectory(const struct qca_version *ver)
+{
+ const struct qca_custom_firmware *ptr;
+ u32 rom_ver;
+ u16 board_id;
+
+ rom_ver = le32_to_cpu(ver->rom_version);
+ board_id = qca_extract_board_id(ver);
+ if (!board_id)
+ return NULL;
+
+ for (ptr = qca_custom_btfws; ptr->rom_version; ptr++) {
+ if (ptr->rom_version == rom_ver &&
+ ptr->board_id == board_id)
+ return ptr->subdirectory;
+ }
+
+ return NULL;
+}
+
static int btusb_qca_send_vendor_req(struct usb_device *udev, u8 request,
void *data, u16 size)
{
@@ -3178,15 +3389,22 @@ static int btusb_setup_qca_load_rampatch(struct hci_dev *hdev,
{
struct qca_rampatch_version *rver;
const struct firmware *fw;
+ const char *fw_subdir;
u32 ver_rom, ver_patch, rver_rom;
u16 rver_rom_low, rver_rom_high, rver_patch;
- char fwname[64];
+ char fwname[80];
int err;
ver_rom = le32_to_cpu(ver->rom_version);
ver_patch = le32_to_cpu(ver->patch_version);
- snprintf(fwname, sizeof(fwname), "qca/rampatch_usb_%08x.bin", ver_rom);
+ fw_subdir = qca_get_fw_subdirectory(ver);
+ if (fw_subdir)
+ snprintf(fwname, sizeof(fwname), "qca/%s/rampatch_usb_%08x.bin",
+ fw_subdir, ver_rom);
+ else
+ snprintf(fwname, sizeof(fwname), "qca/rampatch_usb_%08x.bin",
+ ver_rom);
err = request_firmware(&fw, fwname, &hdev->dev);
if (err) {
@@ -3230,44 +3448,34 @@ static void btusb_generate_qca_nvm_name(char *fwname, size_t max_size,
const struct qca_version *ver)
{
u32 rom_version = le32_to_cpu(ver->rom_version);
- u16 flag = le16_to_cpu(ver->flag);
+ const char *variant, *fw_subdir;
+ int len;
+ u16 board_id;
- if (((flag >> 8) & 0xff) == QCA_FLAG_MULTI_NVM) {
- /* The board_id should be split into two bytes
- * The 1st byte is chip ID, and the 2nd byte is platform ID
- * For example, board ID 0x010A, 0x01 is platform ID. 0x0A is chip ID
- * we have several platforms, and platform IDs are continuously added
- * Platform ID:
- * 0x00 is for Mobile
- * 0x01 is for X86
- * 0x02 is for Automotive
- * 0x03 is for Consumer electronic
- */
- u16 board_id = (ver->chip_id << 8) + ver->platform_id;
- const char *variant;
+ fw_subdir = qca_get_fw_subdirectory(ver);
+ board_id = qca_extract_board_id(ver);
- switch (le32_to_cpu(ver->ram_version)) {
- case WCN6855_2_0_RAM_VERSION_GF:
- case WCN6855_2_1_RAM_VERSION_GF:
- variant = "_gf";
- break;
- default:
- variant = "";
- break;
- }
-
- if (board_id == 0) {
- snprintf(fwname, max_size, "qca/nvm_usb_%08x%s.bin",
- rom_version, variant);
- } else {
- snprintf(fwname, max_size, "qca/nvm_usb_%08x%s_%04x.bin",
- rom_version, variant, board_id);
- }
- } else {
- snprintf(fwname, max_size, "qca/nvm_usb_%08x.bin",
- rom_version);
+ switch (le32_to_cpu(ver->ram_version)) {
+ case WCN6855_2_0_RAM_VERSION_GF:
+ case WCN6855_2_1_RAM_VERSION_GF:
+ variant = "_gf";
+ break;
+ default:
+ variant = NULL;
+ break;
}
+ if (fw_subdir)
+ len = snprintf(fwname, max_size, "qca/%s/nvm_usb_%08x",
+ fw_subdir, rom_version);
+ else
+ len = snprintf(fwname, max_size, "qca/nvm_usb_%08x",
+ rom_version);
+ if (variant)
+ len += snprintf(fwname + len, max_size - len, "%s", variant);
+ if (board_id)
+ len += snprintf(fwname + len, max_size - len, "_%04x", board_id);
+ len += snprintf(fwname + len, max_size - len, ".bin");
}
static int btusb_setup_qca_load_nvm(struct hci_dev *hdev,
@@ -3275,7 +3483,7 @@ static int btusb_setup_qca_load_nvm(struct hci_dev *hdev,
const struct qca_device_info *info)
{
const struct firmware *fw;
- char fwname[64];
+ char fwname[80];
int err;
btusb_generate_qca_nvm_name(fwname, sizeof(fwname), ver);
@@ -3376,7 +3584,7 @@ static int btusb_setup_qca(struct hci_dev *hdev)
/* Mark HCI_OP_ENHANCED_SETUP_SYNC_CONN as broken as it doesn't seem to
* work with the likes of HSP/HFP mSBC.
*/
- set_bit(HCI_QUIRK_BROKEN_ENHANCED_SETUP_SYNC_CONN, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_ENHANCED_SETUP_SYNC_CONN);
return 0;
}
@@ -3595,12 +3803,143 @@ static ssize_t force_poll_sync_write(struct file *file,
}
static const struct file_operations force_poll_sync_fops = {
+ .owner = THIS_MODULE,
.open = simple_open,
.read = force_poll_sync_read,
.write = force_poll_sync_write,
.llseek = default_llseek,
};
+#define BTUSB_HCI_DRV_OP_SUPPORTED_ALTSETTINGS \
+ hci_opcode_pack(HCI_DRV_OGF_DRIVER_SPECIFIC, 0x0000)
+#define BTUSB_HCI_DRV_SUPPORTED_ALTSETTINGS_SIZE 0
+struct btusb_hci_drv_rp_supported_altsettings {
+ __u8 num;
+ __u8 altsettings[];
+} __packed;
+
+#define BTUSB_HCI_DRV_OP_SWITCH_ALTSETTING \
+ hci_opcode_pack(HCI_DRV_OGF_DRIVER_SPECIFIC, 0x0001)
+#define BTUSB_HCI_DRV_SWITCH_ALTSETTING_SIZE 1
+struct btusb_hci_drv_cmd_switch_altsetting {
+ __u8 altsetting;
+} __packed;
+
+static const struct {
+ u16 opcode;
+ const char *desc;
+} btusb_hci_drv_supported_commands[] = {
+ /* Common commands */
+ { HCI_DRV_OP_READ_INFO, "Read Info" },
+
+ /* Driver specific commands */
+ { BTUSB_HCI_DRV_OP_SUPPORTED_ALTSETTINGS, "Supported Altsettings" },
+ { BTUSB_HCI_DRV_OP_SWITCH_ALTSETTING, "Switch Altsetting" },
+};
+static int btusb_hci_drv_read_info(struct hci_dev *hdev, void *data,
+ u16 data_len)
+{
+ struct hci_drv_rp_read_info *rp;
+ size_t rp_size;
+ int err, i;
+ u16 opcode, num_supported_commands =
+ ARRAY_SIZE(btusb_hci_drv_supported_commands);
+
+ rp_size = sizeof(*rp) + num_supported_commands * 2;
+
+ rp = kmalloc(rp_size, GFP_KERNEL);
+ if (!rp)
+ return -ENOMEM;
+
+ strscpy_pad(rp->driver_name, btusb_driver.name);
+
+ rp->num_supported_commands = cpu_to_le16(num_supported_commands);
+ for (i = 0; i < num_supported_commands; i++) {
+ opcode = btusb_hci_drv_supported_commands[i].opcode;
+ bt_dev_info(hdev,
+ "Supported HCI Drv command (0x%02x|0x%04x): %s",
+ hci_opcode_ogf(opcode),
+ hci_opcode_ocf(opcode),
+ btusb_hci_drv_supported_commands[i].desc);
+ rp->supported_commands[i] = cpu_to_le16(opcode);
+ }
+
+ err = hci_drv_cmd_complete(hdev, HCI_DRV_OP_READ_INFO,
+ HCI_DRV_STATUS_SUCCESS, rp, rp_size);
+
+ kfree(rp);
+ return err;
+}
+
+static int btusb_hci_drv_supported_altsettings(struct hci_dev *hdev, void *data,
+ u16 data_len)
+{
+ struct btusb_data *drvdata = hci_get_drvdata(hdev);
+ struct btusb_hci_drv_rp_supported_altsettings *rp;
+ size_t rp_size;
+ int err;
+ u8 i;
+
+ /* There are at most 7 alt (0 - 6) */
+ rp = kmalloc(sizeof(*rp) + 7, GFP_KERNEL);
+ if (!rp)
+ return -ENOMEM;
+
+ rp->num = 0;
+ if (!drvdata->isoc)
+ goto done;
+
+ for (i = 0; i <= 6; i++) {
+ if (btusb_find_altsetting(drvdata, i))
+ rp->altsettings[rp->num++] = i;
+ }
+
+done:
+ rp_size = sizeof(*rp) + rp->num;
+
+ err = hci_drv_cmd_complete(hdev, BTUSB_HCI_DRV_OP_SUPPORTED_ALTSETTINGS,
+ HCI_DRV_STATUS_SUCCESS, rp, rp_size);
+ kfree(rp);
+ return err;
+}
+
+static int btusb_hci_drv_switch_altsetting(struct hci_dev *hdev, void *data,
+ u16 data_len)
+{
+ struct btusb_hci_drv_cmd_switch_altsetting *cmd = data;
+ u8 status;
+
+ if (cmd->altsetting > 6) {
+ status = HCI_DRV_STATUS_INVALID_PARAMETERS;
+ } else {
+ if (btusb_switch_alt_setting(hdev, cmd->altsetting))
+ status = HCI_DRV_STATUS_UNSPECIFIED_ERROR;
+ else
+ status = HCI_DRV_STATUS_SUCCESS;
+ }
+
+ return hci_drv_cmd_status(hdev, BTUSB_HCI_DRV_OP_SWITCH_ALTSETTING,
+ status);
+}
+
+static const struct hci_drv_handler btusb_hci_drv_common_handlers[] = {
+ { btusb_hci_drv_read_info, HCI_DRV_READ_INFO_SIZE },
+};
+
+static const struct hci_drv_handler btusb_hci_drv_specific_handlers[] = {
+ { btusb_hci_drv_supported_altsettings,
+ BTUSB_HCI_DRV_SUPPORTED_ALTSETTINGS_SIZE },
+ { btusb_hci_drv_switch_altsetting,
+ BTUSB_HCI_DRV_SWITCH_ALTSETTING_SIZE },
+};
+
+static struct hci_drv btusb_hci_drv = {
+ .common_handler_count = ARRAY_SIZE(btusb_hci_drv_common_handlers),
+ .common_handlers = btusb_hci_drv_common_handlers,
+ .specific_handler_count = ARRAY_SIZE(btusb_hci_drv_specific_handlers),
+ .specific_handlers = btusb_hci_drv_specific_handlers,
+};
+
static int btusb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
@@ -3740,12 +4079,13 @@ static int btusb_probe(struct usb_interface *intf,
data->reset_gpio = reset_gpio;
}
- hdev->open = btusb_open;
- hdev->close = btusb_close;
- hdev->flush = btusb_flush;
- hdev->send = btusb_send_frame;
- hdev->notify = btusb_notify;
- hdev->wakeup = btusb_wakeup;
+ hdev->open = btusb_open;
+ hdev->close = btusb_close;
+ hdev->flush = btusb_flush;
+ hdev->send = btusb_send_frame;
+ hdev->notify = btusb_notify;
+ hdev->wakeup = btusb_wakeup;
+ hdev->hci_drv = &btusb_hci_drv;
#ifdef CONFIG_PM
err = btusb_config_oob_wake(hdev);
@@ -3760,10 +4100,10 @@ static int btusb_probe(struct usb_interface *intf,
}
#endif
if (id->driver_info & BTUSB_CW6622)
- set_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_STORED_LINK_KEY);
if (id->driver_info & BTUSB_BCM2045)
- set_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_STORED_LINK_KEY);
if (id->driver_info & BTUSB_BCM92035)
hdev->setup = btusb_setup_bcm92035;
@@ -3797,7 +4137,7 @@ static int btusb_probe(struct usb_interface *intf,
/* Transport specific configuration */
hdev->send = btusb_send_frame_intel;
- hdev->cmd_timeout = btusb_intel_cmd_timeout;
+ hdev->reset = btusb_intel_reset;
if (id->driver_info & BTUSB_INTEL_NO_WBS_SUPPORT)
btintel_set_flag(hdev, INTEL_ROM_LEGACY_NO_WBS_SUPPORT);
@@ -3817,39 +4157,40 @@ static int btusb_probe(struct usb_interface *intf,
hdev->setup = btusb_mtk_setup;
hdev->shutdown = btusb_mtk_shutdown;
hdev->manufacturer = 70;
- hdev->cmd_timeout = btmtk_reset_sync;
+ hdev->reset = btmtk_reset_sync;
hdev->set_bdaddr = btmtk_set_bdaddr;
hdev->send = btusb_send_frame_mtk;
- set_bit(HCI_QUIRK_BROKEN_ENHANCED_SETUP_SYNC_CONN, &hdev->quirks);
- set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_ENHANCED_SETUP_SYNC_CONN);
+ hci_set_quirk(hdev, HCI_QUIRK_NON_PERSISTENT_SETUP);
data->recv_acl = btmtk_usb_recv_acl;
data->suspend = btmtk_usb_suspend;
data->resume = btmtk_usb_resume;
+ data->disconnect = btusb_mtk_disconnect;
}
if (id->driver_info & BTUSB_SWAVE) {
- set_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks);
- set_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_FIXUP_INQUIRY_MODE);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_LOCAL_COMMANDS);
}
if (id->driver_info & BTUSB_INTEL_BOOT) {
hdev->manufacturer = 2;
- set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_RAW_DEVICE);
}
if (id->driver_info & BTUSB_ATH3012) {
data->setup_on_usb = btusb_setup_qca;
hdev->set_bdaddr = btusb_set_bdaddr_ath3012;
- set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
- set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY);
+ hci_set_quirk(hdev, HCI_QUIRK_STRICT_DUPLICATE_FILTER);
}
if (id->driver_info & BTUSB_QCA_ROME) {
data->setup_on_usb = btusb_setup_qca;
hdev->shutdown = btusb_shutdown_qca;
hdev->set_bdaddr = btusb_set_bdaddr_ath3012;
- hdev->cmd_timeout = btusb_qca_cmd_timeout;
- set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
+ hdev->reset = btusb_qca_reset;
+ hci_set_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY);
btusb_check_needs_reset_resume(intf);
}
@@ -3862,8 +4203,8 @@ static int btusb_probe(struct usb_interface *intf,
data->setup_on_usb = btusb_setup_qca;
hdev->shutdown = btusb_shutdown_qca;
hdev->set_bdaddr = btusb_set_bdaddr_wcn6855;
- hdev->cmd_timeout = btusb_qca_cmd_timeout;
- set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
+ hdev->reset = btusb_qca_reset;
+ hci_set_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY);
hci_set_msft_opcode(hdev, 0xFD70);
}
@@ -3881,7 +4222,7 @@ static int btusb_probe(struct usb_interface *intf,
btrtl_set_driver_name(hdev, btusb_driver.name);
hdev->setup = btusb_setup_realtek;
hdev->shutdown = btrtl_shutdown_realtek;
- hdev->cmd_timeout = btusb_rtl_cmd_timeout;
+ hdev->reset = btusb_rtl_reset;
hdev->hw_error = btusb_rtl_hw_error;
/* Realtek devices need to set remote wakeup on auto-suspend */
@@ -3891,33 +4232,35 @@ static int btusb_probe(struct usb_interface *intf,
if (id->driver_info & BTUSB_ACTIONS_SEMI) {
/* Support is advertised, but not implemented */
- set_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks);
- set_bit(HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER, &hdev->quirks);
- set_bit(HCI_QUIRK_BROKEN_SET_RPA_TIMEOUT, &hdev->quirks);
- set_bit(HCI_QUIRK_BROKEN_EXT_SCAN, &hdev->quirks);
- set_bit(HCI_QUIRK_BROKEN_READ_ENC_KEY_SIZE, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_ERR_DATA_REPORTING);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_SET_RPA_TIMEOUT);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_EXT_SCAN);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_READ_ENC_KEY_SIZE);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_EXT_CREATE_CONN);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_WRITE_AUTH_PAYLOAD_TIMEOUT);
}
if (!reset)
- set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_RESET_ON_CLOSE);
if (force_scofix || id->driver_info & BTUSB_WRONG_SCO_MTU) {
if (!disable_scofix)
- set_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_FIXUP_BUFFER_SIZE);
}
if (id->driver_info & BTUSB_BROKEN_ISOC)
data->isoc = NULL;
if (id->driver_info & BTUSB_WIDEBAND_SPEECH)
- set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED);
if (id->driver_info & BTUSB_INVALID_LE_STATES)
- set_bit(HCI_QUIRK_BROKEN_LE_STATES, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_LE_STATES);
if (id->driver_info & BTUSB_DIGIANSWER) {
data->cmdreq_type = USB_TYPE_VENDOR;
- set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_RESET_ON_CLOSE);
}
if (id->driver_info & BTUSB_CSR) {
@@ -3926,10 +4269,10 @@ static int btusb_probe(struct usb_interface *intf,
/* Old firmware would otherwise execute USB reset */
if (bcdDevice < 0x117)
- set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_RESET_ON_CLOSE);
/* This must be set first in case we disable it for fakes */
- set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY);
/* Fake CSR devices with broken commands */
if (le16_to_cpu(udev->descriptor.idVendor) == 0x0a12 &&
@@ -3942,7 +4285,7 @@ static int btusb_probe(struct usb_interface *intf,
/* New sniffer firmware has crippled HCI interface */
if (le16_to_cpu(udev->descriptor.bcdDevice) > 0x997)
- set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_RAW_DEVICE);
}
if (id->driver_info & BTUSB_INTEL_BOOT) {
@@ -4013,6 +4356,9 @@ static void btusb_disconnect(struct usb_interface *intf)
if (data->diag)
usb_set_intfdata(data->diag, NULL);
+ if (data->disconnect)
+ data->disconnect(hdev);
+
hci_unregister_dev(hdev);
if (intf == data->intf) {
diff --git a/drivers/bluetooth/h4_recv.h b/drivers/bluetooth/h4_recv.h
deleted file mode 100644
index 28cf2d8c2d48..000000000000
--- a/drivers/bluetooth/h4_recv.h
+++ /dev/null
@@ -1,153 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- *
- * Generic Bluetooth HCI UART driver
- *
- * Copyright (C) 2015-2018 Intel Corporation
- */
-
-#include <linux/unaligned.h>
-
-struct h4_recv_pkt {
- u8 type; /* Packet type */
- u8 hlen; /* Header length */
- u8 loff; /* Data length offset in header */
- u8 lsize; /* Data length field size */
- u16 maxlen; /* Max overall packet length */
- int (*recv)(struct hci_dev *hdev, struct sk_buff *skb);
-};
-
-#define H4_RECV_ACL \
- .type = HCI_ACLDATA_PKT, \
- .hlen = HCI_ACL_HDR_SIZE, \
- .loff = 2, \
- .lsize = 2, \
- .maxlen = HCI_MAX_FRAME_SIZE \
-
-#define H4_RECV_SCO \
- .type = HCI_SCODATA_PKT, \
- .hlen = HCI_SCO_HDR_SIZE, \
- .loff = 2, \
- .lsize = 1, \
- .maxlen = HCI_MAX_SCO_SIZE
-
-#define H4_RECV_EVENT \
- .type = HCI_EVENT_PKT, \
- .hlen = HCI_EVENT_HDR_SIZE, \
- .loff = 1, \
- .lsize = 1, \
- .maxlen = HCI_MAX_EVENT_SIZE
-
-#define H4_RECV_ISO \
- .type = HCI_ISODATA_PKT, \
- .hlen = HCI_ISO_HDR_SIZE, \
- .loff = 2, \
- .lsize = 2, \
- .maxlen = HCI_MAX_FRAME_SIZE
-
-static inline struct sk_buff *h4_recv_buf(struct hci_dev *hdev,
- struct sk_buff *skb,
- const unsigned char *buffer,
- int count,
- const struct h4_recv_pkt *pkts,
- int pkts_count)
-{
- /* Check for error from previous call */
- if (IS_ERR(skb))
- skb = NULL;
-
- while (count) {
- int i, len;
-
- if (!skb) {
- for (i = 0; i < pkts_count; i++) {
- if (buffer[0] != (&pkts[i])->type)
- continue;
-
- skb = bt_skb_alloc((&pkts[i])->maxlen,
- GFP_ATOMIC);
- if (!skb)
- return ERR_PTR(-ENOMEM);
-
- hci_skb_pkt_type(skb) = (&pkts[i])->type;
- hci_skb_expect(skb) = (&pkts[i])->hlen;
- break;
- }
-
- /* Check for invalid packet type */
- if (!skb)
- return ERR_PTR(-EILSEQ);
-
- count -= 1;
- buffer += 1;
- }
-
- len = min_t(uint, hci_skb_expect(skb) - skb->len, count);
- skb_put_data(skb, buffer, len);
-
- count -= len;
- buffer += len;
-
- /* Check for partial packet */
- if (skb->len < hci_skb_expect(skb))
- continue;
-
- for (i = 0; i < pkts_count; i++) {
- if (hci_skb_pkt_type(skb) == (&pkts[i])->type)
- break;
- }
-
- if (i >= pkts_count) {
- kfree_skb(skb);
- return ERR_PTR(-EILSEQ);
- }
-
- if (skb->len == (&pkts[i])->hlen) {
- u16 dlen;
-
- switch ((&pkts[i])->lsize) {
- case 0:
- /* No variable data length */
- dlen = 0;
- break;
- case 1:
- /* Single octet variable length */
- dlen = skb->data[(&pkts[i])->loff];
- hci_skb_expect(skb) += dlen;
-
- if (skb_tailroom(skb) < dlen) {
- kfree_skb(skb);
- return ERR_PTR(-EMSGSIZE);
- }
- break;
- case 2:
- /* Double octet variable length */
- dlen = get_unaligned_le16(skb->data +
- (&pkts[i])->loff);
- hci_skb_expect(skb) += dlen;
-
- if (skb_tailroom(skb) < dlen) {
- kfree_skb(skb);
- return ERR_PTR(-EMSGSIZE);
- }
- break;
- default:
- /* Unsupported variable length */
- kfree_skb(skb);
- return ERR_PTR(-EILSEQ);
- }
-
- if (!dlen) {
- /* No more data, complete frame */
- (&pkts[i])->recv(hdev, skb);
- skb = NULL;
- }
- } else {
- /* Complete frame */
- (&pkts[i])->recv(hdev, skb);
- skb = NULL;
- }
- }
-
- return skb;
-}
diff --git a/drivers/bluetooth/hci_aml.c b/drivers/bluetooth/hci_aml.c
index dc9541e76d81..707e90f80130 100644
--- a/drivers/bluetooth/hci_aml.c
+++ b/drivers/bluetooth/hci_aml.c
@@ -313,8 +313,7 @@ static int aml_download_firmware(struct hci_dev *hdev, const char *fw_name)
goto exit;
exit:
- if (firmware)
- release_firmware(firmware);
+ release_firmware(firmware);
return ret;
}
@@ -425,7 +424,7 @@ static int aml_check_bdaddr(struct hci_dev *hdev)
if (!bacmp(&paddr->bdaddr, AML_BDADDR_DEFAULT)) {
bt_dev_info(hdev, "amlbt using default bdaddr (%pM)", &paddr->bdaddr);
- set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_INVALID_BDADDR);
}
exit:
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index 89d4c2224546..f96617b85d87 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -643,8 +643,8 @@ static int bcm_setup(struct hci_uart *hu)
* Allow the bootloader to set a valid address through the
* device tree.
*/
- if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hu->hdev->quirks))
- set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hu->hdev->quirks);
+ if (hci_test_quirk(hu->hdev, HCI_QUIRK_INVALID_BDADDR))
+ hci_set_quirk(hu->hdev, HCI_QUIRK_USE_BDADDR_PROPERTY);
if (!bcm_request_irq(bcm))
err = bcm_setup_sleep(hu);
@@ -1068,17 +1068,17 @@ static struct clk *bcm_get_txco(struct device *dev)
struct clk *clk;
/* New explicit name */
- clk = devm_clk_get(dev, "txco");
- if (!IS_ERR(clk) || PTR_ERR(clk) == -EPROBE_DEFER)
+ clk = devm_clk_get_optional(dev, "txco");
+ if (clk)
return clk;
/* Deprecated name */
- clk = devm_clk_get(dev, "extclk");
- if (!IS_ERR(clk) || PTR_ERR(clk) == -EPROBE_DEFER)
+ clk = devm_clk_get_optional(dev, "extclk");
+ if (clk)
return clk;
/* Original code used no name at all */
- return devm_clk_get(dev, NULL);
+ return devm_clk_get_optional(dev, NULL);
}
static int bcm_get_resources(struct bcm_device *dev)
@@ -1093,21 +1093,12 @@ static int bcm_get_resources(struct bcm_device *dev)
return 0;
dev->txco_clk = bcm_get_txco(dev->dev);
-
- /* Handle deferred probing */
- if (dev->txco_clk == ERR_PTR(-EPROBE_DEFER))
- return PTR_ERR(dev->txco_clk);
-
- /* Ignore all other errors as before */
if (IS_ERR(dev->txco_clk))
- dev->txco_clk = NULL;
-
- dev->lpo_clk = devm_clk_get(dev->dev, "lpo");
- if (dev->lpo_clk == ERR_PTR(-EPROBE_DEFER))
- return PTR_ERR(dev->lpo_clk);
+ return PTR_ERR(dev->txco_clk);
+ dev->lpo_clk = devm_clk_get_optional(dev->dev, "lpo");
if (IS_ERR(dev->lpo_clk))
- dev->lpo_clk = NULL;
+ return PTR_ERR(dev->lpo_clk);
/* Check if we accidentally fetched the lpo clock twice */
if (dev->lpo_clk && clk_is_match(dev->lpo_clk, dev->txco_clk)) {
@@ -1507,7 +1498,7 @@ static const struct dev_pm_ops bcm_pm_ops = {
static struct platform_driver bcm_driver = {
.probe = bcm_probe,
- .remove_new = bcm_remove,
+ .remove = bcm_remove,
.driver = {
.name = "hci_bcm",
.acpi_match_table = ACPI_PTR(bcm_acpi_match),
diff --git a/drivers/bluetooth/hci_bcm4377.c b/drivers/bluetooth/hci_bcm4377.c
index 9bce53e49cfa..45e6d84224ee 100644
--- a/drivers/bluetooth/hci_bcm4377.c
+++ b/drivers/bluetooth/hci_bcm4377.c
@@ -420,7 +420,7 @@ struct bcm4377_ring_state {
* payloads_dma:DMA address for payload buffer
* events: pointer to array of completions if waiting is allowed
* msgids: bitmap to keep track of used message ids
- * lock: Spinlock to protect access to ring structurs used in the irq handler
+ * lock: Spinlock to protect access to ring structures used in the irq handler
*/
struct bcm4377_transfer_ring {
enum bcm4377_transfer_ring_id ring_id;
@@ -1435,7 +1435,7 @@ static int bcm4377_check_bdaddr(struct bcm4377_data *bcm4377)
bda = (struct hci_rp_read_bd_addr *)skb->data;
if (!bcm4377_is_valid_bdaddr(bcm4377, &bda->bdaddr))
- set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &bcm4377->hdev->quirks);
+ hci_set_quirk(bcm4377->hdev, HCI_QUIRK_USE_BDADDR_PROPERTY);
kfree_skb(skb);
return 0;
@@ -2389,13 +2389,13 @@ static int bcm4377_probe(struct pci_dev *pdev, const struct pci_device_id *id)
hdev->setup = bcm4377_hci_setup;
if (bcm4377->hw->broken_mws_transport_config)
- set_bit(HCI_QUIRK_BROKEN_MWS_TRANSPORT_CONFIG, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_MWS_TRANSPORT_CONFIG);
if (bcm4377->hw->broken_ext_scan)
- set_bit(HCI_QUIRK_BROKEN_EXT_SCAN, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_EXT_SCAN);
if (bcm4377->hw->broken_le_coded)
- set_bit(HCI_QUIRK_BROKEN_LE_CODED, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_LE_CODED);
if (bcm4377->hw->broken_le_ext_adv_report_phy)
- set_bit(HCI_QUIRK_FIXUP_LE_EXT_ADV_REPORT_PHY, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_FIXUP_LE_EXT_ADV_REPORT_PHY);
pci_set_drvdata(pdev, bcm4377);
hci_set_drvdata(hdev, bcm4377);
diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
index 76878119d910..591abe6d63dd 100644
--- a/drivers/bluetooth/hci_bcsp.c
+++ b/drivers/bluetooth/hci_bcsp.c
@@ -382,7 +382,7 @@ static void bcsp_pkt_cull(struct bcsp_struct *bcsp)
}
if (skb_queue_empty(&bcsp->unack))
- del_timer(&bcsp->tbcsp);
+ timer_delete(&bcsp->tbcsp);
spin_unlock_irqrestore(&bcsp->unack.lock, flags);
@@ -582,6 +582,9 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
struct bcsp_struct *bcsp = hu->priv;
const unsigned char *ptr;
+ if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
+ return -EUNATCH;
+
BT_DBG("hu %p count %d rx_state %d rx_count %ld",
hu, count, bcsp->rx_state, bcsp->rx_count);
@@ -688,7 +691,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
/* Arrange to retransmit all messages in the relq. */
static void bcsp_timed_event(struct timer_list *t)
{
- struct bcsp_struct *bcsp = from_timer(bcsp, t, tbcsp);
+ struct bcsp_struct *bcsp = timer_container_of(bcsp, t, tbcsp);
struct hci_uart *hu = bcsp->hu;
struct sk_buff *skb;
unsigned long flags;
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
index c0436881a533..d0d4420c1a0f 100644
--- a/drivers/bluetooth/hci_h5.c
+++ b/drivers/bluetooth/hci_h5.c
@@ -149,7 +149,7 @@ static void h5_timed_event(struct timer_list *t)
{
const unsigned char sync_req[] = { 0x01, 0x7e };
unsigned char conf_req[3] = { 0x03, 0xfc };
- struct h5 *h5 = from_timer(h5, t, timer);
+ struct h5 *h5 = timer_container_of(h5, t, timer);
struct hci_uart *hu = h5->hu;
struct sk_buff *skb;
unsigned long flags;
@@ -197,7 +197,7 @@ static void h5_peer_reset(struct hci_uart *hu)
h5->state = H5_UNINITIALIZED;
- del_timer(&h5->timer);
+ timer_delete(&h5->timer);
skb_queue_purge(&h5->rel);
skb_queue_purge(&h5->unrel);
@@ -254,7 +254,7 @@ static int h5_close(struct hci_uart *hu)
{
struct h5 *h5 = hu->priv;
- del_timer_sync(&h5->timer);
+ timer_delete_sync(&h5->timer);
skb_queue_purge(&h5->unack);
skb_queue_purge(&h5->rel);
@@ -318,7 +318,7 @@ static void h5_pkt_cull(struct h5 *h5)
}
if (skb_queue_empty(&h5->unack))
- del_timer(&h5->timer);
+ timer_delete(&h5->timer);
unlock:
spin_unlock_irqrestore(&h5->unack.lock, flags);
diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c
index 999ccd5bb4f2..9b353c3d6442 100644
--- a/drivers/bluetooth/hci_intel.c
+++ b/drivers/bluetooth/hci_intel.c
@@ -660,7 +660,7 @@ static int intel_setup(struct hci_uart *hu)
*/
if (!bacmp(&params.otp_bdaddr, BDADDR_ANY)) {
bt_dev_info(hdev, "No device address configured");
- set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_INVALID_BDADDR);
}
/* With this Intel bootloader only the hardware variant and device
@@ -1029,12 +1029,12 @@ static struct sk_buff *intel_dequeue(struct hci_uart *hu)
struct hci_command_hdr *cmd = (void *)skb->data;
__u16 opcode = le16_to_cpu(cmd->opcode);
- /* When the 0xfc01 command is issued to boot into
- * the operational firmware, it will actually not
- * send a command complete event. To keep the flow
- * control working inject that event here.
+ /* When the BTINTEL_HCI_OP_RESET command is issued to boot into
+ * the operational firmware, it will actually not send a command
+ * complete event. To keep the flow control working inject that
+ * event here.
*/
- if (opcode == 0xfc01)
+ if (opcode == BTINTEL_HCI_OP_RESET)
inject_cmd_complete(hu->hdev, opcode);
}
@@ -1206,7 +1206,7 @@ static void intel_remove(struct platform_device *pdev)
static struct platform_driver intel_driver = {
.probe = intel_probe,
- .remove_new = intel_remove,
+ .remove = intel_remove,
.driver = {
.name = "hci_intel",
.acpi_match_table = ACPI_PTR(intel_acpi_match),
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 395d66e32a2e..d0adae3267b4 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -102,7 +102,8 @@ static inline struct sk_buff *hci_uart_dequeue(struct hci_uart *hu)
if (!skb) {
percpu_down_read(&hu->proto_lock);
- if (test_bit(HCI_UART_PROTO_READY, &hu->flags))
+ if (test_bit(HCI_UART_PROTO_READY, &hu->flags) ||
+ test_bit(HCI_UART_PROTO_INIT, &hu->flags))
skb = hu->proto->dequeue(hu);
percpu_up_read(&hu->proto_lock);
@@ -124,7 +125,8 @@ int hci_uart_tx_wakeup(struct hci_uart *hu)
if (!percpu_down_read_trylock(&hu->proto_lock))
return 0;
- if (!test_bit(HCI_UART_PROTO_READY, &hu->flags))
+ if (!test_bit(HCI_UART_PROTO_READY, &hu->flags) &&
+ !test_bit(HCI_UART_PROTO_INIT, &hu->flags))
goto no_schedule;
set_bit(HCI_UART_TX_WAKEUP, &hu->tx_state);
@@ -278,7 +280,8 @@ static int hci_uart_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
percpu_down_read(&hu->proto_lock);
- if (!test_bit(HCI_UART_PROTO_READY, &hu->flags)) {
+ if (!test_bit(HCI_UART_PROTO_READY, &hu->flags) &&
+ !test_bit(HCI_UART_PROTO_INIT, &hu->flags)) {
percpu_up_read(&hu->proto_lock);
return -EUNATCH;
}
@@ -585,7 +588,8 @@ static void hci_uart_tty_wakeup(struct tty_struct *tty)
if (tty != hu->tty)
return;
- if (test_bit(HCI_UART_PROTO_READY, &hu->flags))
+ if (test_bit(HCI_UART_PROTO_READY, &hu->flags) ||
+ test_bit(HCI_UART_PROTO_INIT, &hu->flags))
hci_uart_tx_wakeup(hu);
}
@@ -594,7 +598,7 @@ static void hci_uart_tty_wakeup(struct tty_struct *tty)
* Called by tty low level driver when receive data is
* available.
*
- * Arguments: tty pointer to tty isntance data
+ * Arguments: tty pointer to tty instance data
* data pointer to received data
* flags pointer to flags for data
* count count of received data in bytes
@@ -611,7 +615,8 @@ static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data,
percpu_down_read(&hu->proto_lock);
- if (!test_bit(HCI_UART_PROTO_READY, &hu->flags)) {
+ if (!test_bit(HCI_UART_PROTO_READY, &hu->flags) &&
+ !test_bit(HCI_UART_PROTO_INIT, &hu->flags)) {
percpu_up_read(&hu->proto_lock);
return;
}
@@ -662,13 +667,13 @@ static int hci_uart_register_dev(struct hci_uart *hu)
SET_HCIDEV_DEV(hdev, hu->tty->dev);
if (test_bit(HCI_UART_RAW_DEVICE, &hu->hdev_flags))
- set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_RAW_DEVICE);
if (test_bit(HCI_UART_EXT_CONFIG, &hu->hdev_flags))
- set_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG);
if (!test_bit(HCI_UART_RESET_ON_INIT, &hu->hdev_flags))
- set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_RESET_ON_CLOSE);
/* Only call open() for the protocol after hdev is fully initialized as
* open() (or a timer/workqueue it starts) may attempt to reference it.
@@ -707,12 +712,16 @@ static int hci_uart_set_proto(struct hci_uart *hu, int id)
hu->proto = p;
+ set_bit(HCI_UART_PROTO_INIT, &hu->flags);
+
err = hci_uart_register_dev(hu);
if (err) {
return err;
}
set_bit(HCI_UART_PROTO_READY, &hu->flags);
+ clear_bit(HCI_UART_PROTO_INIT, &hu->flags);
+
return 0;
}
diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c
index 4a0b5c3160c2..7044c86325ce 100644
--- a/drivers/bluetooth/hci_ll.c
+++ b/drivers/bluetooth/hci_ll.c
@@ -305,7 +305,7 @@ static void ll_device_woke_up(struct hci_uart *hu)
hci_uart_tx_wakeup(hu);
}
-/* Enqueue frame for transmittion (padding, crc, etc) */
+/* Enqueue frame for transmission (padding, crc, etc) */
/* may be called from two simultaneous tasklets */
static int ll_enqueue(struct hci_uart *hu, struct sk_buff *skb)
{
@@ -649,11 +649,11 @@ static int ll_setup(struct hci_uart *hu)
/* This means that there was an error getting the BD address
* during probe, so mark the device as having a bad address.
*/
- set_bit(HCI_QUIRK_INVALID_BDADDR, &hu->hdev->quirks);
+ hci_set_quirk(hu->hdev, HCI_QUIRK_INVALID_BDADDR);
} else if (bacmp(&lldev->bdaddr, BDADDR_ANY)) {
err = ll_set_bdaddr(hu->hdev, &lldev->bdaddr);
if (err)
- set_bit(HCI_QUIRK_INVALID_BDADDR, &hu->hdev->quirks);
+ hci_set_quirk(hu->hdev, HCI_QUIRK_INVALID_BDADDR);
}
/* Operational speed if any */
diff --git a/drivers/bluetooth/hci_nokia.c b/drivers/bluetooth/hci_nokia.c
index 49bbe4975be4..cd7575c20f65 100644
--- a/drivers/bluetooth/hci_nokia.c
+++ b/drivers/bluetooth/hci_nokia.c
@@ -439,7 +439,7 @@ static int nokia_setup(struct hci_uart *hu)
if (btdev->man_id == NOKIA_ID_BCM2048) {
hu->hdev->set_bdaddr = btbcm_set_bdaddr;
- set_bit(HCI_QUIRK_INVALID_BDADDR, &hu->hdev->quirks);
+ hci_set_quirk(hu->hdev, HCI_QUIRK_INVALID_BDADDR);
dev_dbg(dev, "bcm2048 has invalid bluetooth address!");
}
@@ -501,7 +501,7 @@ static int nokia_close(struct hci_uart *hu)
return 0;
}
-/* Enqueue frame for transmittion (padding, crc, etc) */
+/* Enqueue frame for transmission (padding, crc, etc) */
static int nokia_enqueue(struct hci_uart *hu, struct sk_buff *skb)
{
struct nokia_bt_dev *btdev = hu->priv;
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 37fddf6055be..4cff4d9be313 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -31,6 +31,7 @@
#include <linux/pwrseq/consumer.h>
#include <linux/regulator/consumer.h>
#include <linux/serdev.h>
+#include <linux/string_choices.h>
#include <linux/mutex.h>
#include <linux/unaligned.h>
@@ -228,7 +229,7 @@ struct qca_serdev {
u32 init_speed;
u32 oper_speed;
bool bdaddr_property_broken;
- const char *firmware_name;
+ const char *firmware_name[2];
};
static int qca_regulator_enable(struct qca_serdev *qcadev);
@@ -258,7 +259,18 @@ static const char *qca_get_firmware_name(struct hci_uart *hu)
if (hu->serdev) {
struct qca_serdev *qsd = serdev_device_get_drvdata(hu->serdev);
- return qsd->firmware_name;
+ return qsd->firmware_name[0];
+ } else {
+ return NULL;
+ }
+}
+
+static const char *qca_get_rampatch_name(struct hci_uart *hu)
+{
+ if (hu->serdev) {
+ struct qca_serdev *qsd = serdev_device_get_drvdata(hu->serdev);
+
+ return qsd->firmware_name[1];
} else {
return NULL;
}
@@ -332,8 +344,8 @@ static void serial_clock_vote(unsigned long vote, struct hci_uart *hu)
else
__serial_clock_off(hu->tty);
- BT_DBG("Vote serial clock %s(%s)", new_vote ? "true" : "false",
- vote ? "true" : "false");
+ BT_DBG("Vote serial clock %s(%s)", str_true_false(new_vote),
+ str_true_false(vote));
diff = jiffies_to_msecs(jiffies - qca->vote_last_jif);
@@ -462,7 +474,7 @@ static void qca_wq_serial_tx_clock_vote_off(struct work_struct *work)
static void hci_ibs_tx_idle_timeout(struct timer_list *t)
{
- struct qca_data *qca = from_timer(qca, t, tx_idle_timer);
+ struct qca_data *qca = timer_container_of(qca, t, tx_idle_timer);
struct hci_uart *hu = qca->hu;
unsigned long flags;
@@ -495,7 +507,7 @@ static void hci_ibs_tx_idle_timeout(struct timer_list *t)
static void hci_ibs_wake_retrans_timeout(struct timer_list *t)
{
- struct qca_data *qca = from_timer(qca, t, wake_retrans_timer);
+ struct qca_data *qca = timer_container_of(qca, t, wake_retrans_timer);
struct hci_uart *hu = qca->hu;
unsigned long flags, retrans_delay;
bool retransmit = false;
@@ -611,6 +623,7 @@ static int qca_open(struct hci_uart *hu)
qcadev = serdev_device_get_drvdata(hu->serdev);
switch (qcadev->btsoc_type) {
+ case QCA_WCN3950:
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
@@ -854,7 +867,7 @@ static void device_woke_up(struct hci_uart *hu)
skb_queue_tail(&qca->txq, skb);
/* Switch timers and change state to HCI_IBS_TX_AWAKE */
- del_timer(&qca->wake_retrans_timer);
+ timer_delete(&qca->wake_retrans_timer);
idle_delay = msecs_to_jiffies(qca->tx_idle_delay);
mod_timer(&qca->tx_idle_timer, jiffies + idle_delay);
qca->tx_ibs_state = HCI_IBS_TX_AWAKE;
@@ -873,7 +886,7 @@ static void device_woke_up(struct hci_uart *hu)
hci_uart_tx_wakeup(hu);
}
-/* Enqueue frame for transmittion (padding, crc, etc) may be called from
+/* Enqueue frame for transmission (padding, crc, etc) may be called from
* two simultaneous tasklets.
*/
static int qca_enqueue(struct hci_uart *hu, struct sk_buff *skb)
@@ -1059,7 +1072,7 @@ static void qca_controller_memdump(struct work_struct *work)
if (!seq_no) {
/* This is the first frame of memdump packet from
- * the controller, Disable IBS to recevie dump
+ * the controller, Disable IBS to receive dump
* with out any interruption, ideally time required for
* the controller to send the dump is 8 seconds. let us
* start timer to handle this asynchronous activity.
@@ -1251,6 +1264,7 @@ static const struct h4_recv_pkt qca_recv_pkts[] = {
{ H4_RECV_ACL, .recv = qca_recv_acl_data },
{ H4_RECV_SCO, .recv = hci_recv_frame },
{ H4_RECV_EVENT, .recv = qca_recv_event },
+ { H4_RECV_ISO, .recv = hci_recv_frame },
{ QCA_IBS_WAKE_IND_EVENT, .recv = qca_ibs_wake_ind },
{ QCA_IBS_WAKE_ACK_EVENT, .recv = qca_ibs_wake_ack },
{ QCA_IBS_SLEEP_IND_EVENT, .recv = qca_ibs_sleep_ind },
@@ -1354,6 +1368,7 @@ static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate)
/* Give the controller time to process the request */
switch (qca_soc_type(hu)) {
+ case QCA_WCN3950:
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
@@ -1440,6 +1455,7 @@ static unsigned int qca_get_speed(struct hci_uart *hu,
static int qca_check_speeds(struct hci_uart *hu)
{
switch (qca_soc_type(hu)) {
+ case QCA_WCN3950:
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
@@ -1482,6 +1498,7 @@ static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type)
* changing the baudrate of chip and host.
*/
switch (soc_type) {
+ case QCA_WCN3950:
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
@@ -1516,6 +1533,7 @@ static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type)
error:
switch (soc_type) {
+ case QCA_WCN3950:
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
@@ -1638,7 +1656,7 @@ static void qca_hw_error(struct hci_dev *hdev, u8 code)
clear_bit(QCA_HW_ERROR_EVENT, &qca->flags);
}
-static void qca_cmd_timeout(struct hci_dev *hdev)
+static void qca_reset(struct hci_dev *hdev)
{
struct hci_uart *hu = hci_get_drvdata(hdev);
struct qca_data *qca = hu->priv;
@@ -1734,6 +1752,7 @@ static int qca_regulator_init(struct hci_uart *hu)
}
switch (soc_type) {
+ case QCA_WCN3950:
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
@@ -1764,6 +1783,7 @@ static int qca_regulator_init(struct hci_uart *hu)
qca_set_speed(hu, QCA_INIT_SPEED);
switch (soc_type) {
+ case QCA_WCN3950:
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
@@ -1795,6 +1815,7 @@ static int qca_power_on(struct hci_dev *hdev)
return 0;
switch (soc_type) {
+ case QCA_WCN3950:
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
@@ -1855,6 +1876,7 @@ static int qca_setup(struct hci_uart *hu)
unsigned int retries = 0;
enum qca_btsoc_type soc_type = qca_soc_type(hu);
const char *firmware_name = qca_get_firmware_name(hu);
+ const char *rampatch_name = qca_get_rampatch_name(hu);
int ret;
struct qca_btsoc_version ver;
struct qca_serdev *qcadev;
@@ -1871,13 +1893,14 @@ static int qca_setup(struct hci_uart *hu)
/* Enable controller to do both LE scan and BR/EDR inquiry
* simultaneously.
*/
- set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY);
switch (soc_type) {
case QCA_QCA2066:
soc_name = "qca2066";
break;
+ case QCA_WCN3950:
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
@@ -1912,6 +1935,7 @@ retry:
clear_bit(QCA_SSR_TRIGGERED, &qca->flags);
switch (soc_type) {
+ case QCA_WCN3950:
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
@@ -1921,7 +1945,7 @@ retry:
case QCA_WCN7850:
qcadev = serdev_device_get_drvdata(hu->serdev);
if (qcadev->bdaddr_property_broken)
- set_bit(HCI_QUIRK_BDADDR_PROPERTY_BROKEN, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BDADDR_PROPERTY_BROKEN);
hci_set_aosp_capable(hdev);
@@ -1945,6 +1969,7 @@ retry:
}
switch (soc_type) {
+ case QCA_WCN3950:
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
@@ -1963,12 +1988,12 @@ retry:
/* Setup patch / NVM configurations */
ret = qca_uart_setup(hdev, qca_baudrate, soc_type, ver,
- firmware_name);
+ firmware_name, rampatch_name);
if (!ret) {
clear_bit(QCA_IBS_DISABLED, &qca->flags);
qca_debugfs_init(hdev);
hu->hdev->hw_error = qca_hw_error;
- hu->hdev->cmd_timeout = qca_cmd_timeout;
+ hu->hdev->reset = qca_reset;
if (hu->serdev) {
if (device_can_wakeup(hu->serdev->ctrl->dev.parent))
hu->hdev->wakeup = qca_wakeup;
@@ -2033,6 +2058,17 @@ static const struct hci_uart_proto qca_proto = {
.dequeue = qca_dequeue,
};
+static const struct qca_device_data qca_soc_data_wcn3950 __maybe_unused = {
+ .soc_type = QCA_WCN3950,
+ .vregs = (struct qca_vreg []) {
+ { "vddio", 15000 },
+ { "vddxo", 60000 },
+ { "vddrf", 155000 },
+ { "vddch0", 585000 },
+ },
+ .num_vregs = 4,
+};
+
static const struct qca_device_data qca_soc_data_wcn3988 __maybe_unused = {
.soc_type = QCA_WCN3988,
.vregs = (struct qca_vreg []) {
@@ -2202,10 +2238,10 @@ static int qca_power_off(struct hci_dev *hdev)
enum qca_btsoc_type soc_type = qca_soc_type(hu);
hu->hdev->hw_error = NULL;
- hu->hdev->cmd_timeout = NULL;
+ hu->hdev->reset = NULL;
- del_timer_sync(&qca->wake_retrans_timer);
- del_timer_sync(&qca->tx_idle_timer);
+ timer_delete_sync(&qca->wake_retrans_timer);
+ timer_delete_sync(&qca->tx_idle_timer);
/* Stop sending shutdown command if soc crashes. */
if (soc_type != QCA_ROME
@@ -2294,13 +2330,6 @@ static int qca_init_regulators(struct qca_power *qca,
return 0;
}
-static void qca_clk_disable_unprepare(void *data)
-{
- struct clk *clk = data;
-
- clk_disable_unprepare(clk);
-}
-
static int qca_serdev_probe(struct serdev_device *serdev)
{
struct qca_serdev *qcadev;
@@ -2316,8 +2345,8 @@ static int qca_serdev_probe(struct serdev_device *serdev)
qcadev->serdev_hu.serdev = serdev;
data = device_get_match_data(&serdev->dev);
serdev_device_set_drvdata(serdev, qcadev);
- device_property_read_string(&serdev->dev, "firmware-name",
- &qcadev->firmware_name);
+ device_property_read_string_array(&serdev->dev, "firmware-name",
+ qcadev->firmware_name, ARRAY_SIZE(qcadev->firmware_name));
device_property_read_u32(&serdev->dev, "max-speed",
&qcadev->oper_speed);
if (!qcadev->oper_speed)
@@ -2332,6 +2361,7 @@ static int qca_serdev_probe(struct serdev_device *serdev)
qcadev->btsoc_type = QCA_ROME;
switch (qcadev->btsoc_type) {
+ case QCA_WCN3950:
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
@@ -2353,26 +2383,34 @@ static int qca_serdev_probe(struct serdev_device *serdev)
switch (qcadev->btsoc_type) {
case QCA_WCN6855:
case QCA_WCN7850:
+ case QCA_WCN6750:
if (!device_property_present(&serdev->dev, "enable-gpios")) {
/*
* Backward compatibility with old DT sources. If the
* node doesn't have the 'enable-gpios' property then
* let's use the power sequencer. Otherwise, let's
- * drive everything outselves.
+ * drive everything ourselves.
*/
qcadev->bt_power->pwrseq = devm_pwrseq_get(&serdev->dev,
"bluetooth");
- if (IS_ERR(qcadev->bt_power->pwrseq))
- return PTR_ERR(qcadev->bt_power->pwrseq);
- break;
+ /*
+ * Some modules have BT_EN enabled via a hardware pull-up,
+ * meaning it is not defined in the DTS and is not controlled
+ * through the power sequence. In such cases, fall through
+ * to follow the legacy flow.
+ */
+ if (IS_ERR(qcadev->bt_power->pwrseq))
+ qcadev->bt_power->pwrseq = NULL;
+ else
+ break;
}
fallthrough;
+ case QCA_WCN3950:
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
case QCA_WCN3998:
- case QCA_WCN6750:
qcadev->bt_power->dev = &serdev->dev;
err = qca_init_regulators(qcadev->bt_power, data->vregs,
data->num_vregs);
@@ -2385,14 +2423,14 @@ static int qca_serdev_probe(struct serdev_device *serdev)
qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable",
GPIOD_OUT_LOW);
- if (IS_ERR(qcadev->bt_en) &&
- (data->soc_type == QCA_WCN6750 ||
- data->soc_type == QCA_WCN6855)) {
- dev_err(&serdev->dev, "failed to acquire BT_EN gpio\n");
- return PTR_ERR(qcadev->bt_en);
- }
+ if (IS_ERR(qcadev->bt_en))
+ return dev_err_probe(&serdev->dev,
+ PTR_ERR(qcadev->bt_en),
+ "failed to acquire BT_EN gpio\n");
- if (!qcadev->bt_en)
+ if (!qcadev->bt_en &&
+ (data->soc_type == QCA_WCN6750 ||
+ data->soc_type == QCA_WCN6855))
power_ctrl_enabled = false;
qcadev->sw_ctrl = devm_gpiod_get_optional(&serdev->dev, "swctrl",
@@ -2433,25 +2471,12 @@ static int qca_serdev_probe(struct serdev_device *serdev)
if (!qcadev->bt_en)
power_ctrl_enabled = false;
- qcadev->susclk = devm_clk_get_optional(&serdev->dev, NULL);
+ qcadev->susclk = devm_clk_get_optional_enabled_with_rate(
+ &serdev->dev, NULL, SUSCLK_RATE_32KHZ);
if (IS_ERR(qcadev->susclk)) {
dev_warn(&serdev->dev, "failed to acquire clk\n");
return PTR_ERR(qcadev->susclk);
}
- err = clk_set_rate(qcadev->susclk, SUSCLK_RATE_32KHZ);
- if (err)
- return err;
-
- err = clk_prepare_enable(qcadev->susclk);
- if (err)
- return err;
-
- err = devm_add_action_or_reset(&serdev->dev,
- qca_clk_disable_unprepare,
- qcadev->susclk);
- if (err)
- return err;
-
}
err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto);
@@ -2463,7 +2488,7 @@ static int qca_serdev_probe(struct serdev_device *serdev)
hdev = qcadev->serdev_hu.hdev;
if (power_ctrl_enabled) {
- set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_NON_PERSISTENT_SETUP);
hdev->shutdown = qca_power_off;
}
@@ -2472,11 +2497,11 @@ static int qca_serdev_probe(struct serdev_device *serdev)
* be queried via hci. Same with the valid le states quirk.
*/
if (data->capabilities & QCA_CAP_WIDEBAND_SPEECH)
- set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
- &hdev->quirks);
+ hci_set_quirk(hdev,
+ HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED);
if (!(data->capabilities & QCA_CAP_VALID_LE_STATES))
- set_bit(HCI_QUIRK_BROKEN_LE_STATES, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_LE_STATES);
}
return 0;
@@ -2526,11 +2551,11 @@ static void qca_serdev_shutdown(struct device *dev)
* invoked and the SOC is already in the initial state, so
* don't also need to send the VSC.
*/
- if (test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks) ||
+ if (hci_test_quirk(hdev, HCI_QUIRK_NON_PERSISTENT_SETUP) ||
hci_dev_test_flag(hdev, HCI_SETUP))
return;
- /* The serdev must be in open state when conrol logic arrives
+ /* The serdev must be in open state when control logic arrives
* here, so also fix the use-after-free issue caused by that
* the serdev is flushed or wrote after it is closed.
*/
@@ -2612,10 +2637,10 @@ static int __maybe_unused qca_suspend(struct device *dev)
switch (qca->tx_ibs_state) {
case HCI_IBS_TX_WAKING:
- del_timer(&qca->wake_retrans_timer);
+ timer_delete(&qca->wake_retrans_timer);
fallthrough;
case HCI_IBS_TX_AWAKE:
- del_timer(&qca->tx_idle_timer);
+ timer_delete(&qca->tx_idle_timer);
serdev_device_write_flush(hu->serdev);
cmd = HCI_IBS_SLEEP_IND;
@@ -2690,6 +2715,7 @@ static const struct of_device_id qca_bluetooth_of_match[] = {
{ .compatible = "qcom,qca6174-bt" },
{ .compatible = "qcom,qca6390-bt", .data = &qca_soc_data_qca6390},
{ .compatible = "qcom,qca9377-bt" },
+ { .compatible = "qcom,wcn3950-bt", .data = &qca_soc_data_wcn3950},
{ .compatible = "qcom,wcn3988-bt", .data = &qca_soc_data_wcn3988},
{ .compatible = "qcom,wcn3990-bt", .data = &qca_soc_data_wcn3990},
{ .compatible = "qcom,wcn3991-bt", .data = &qca_soc_data_wcn3991},
diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c
index 89a22e9b3253..593d9cefbbf9 100644
--- a/drivers/bluetooth/hci_serdev.c
+++ b/drivers/bluetooth/hci_serdev.c
@@ -152,7 +152,7 @@ static int hci_uart_close(struct hci_dev *hdev)
* BT SOC is completely powered OFF during BT OFF, holding port
* open may drain the battery.
*/
- if (test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) {
+ if (hci_test_quirk(hdev, HCI_QUIRK_NON_PERSISTENT_SETUP)) {
clear_bit(HCI_UART_PROTO_READY, &hu->flags);
serdev_device_close(hu->serdev);
}
@@ -358,13 +358,13 @@ int hci_uart_register_device_priv(struct hci_uart *hu,
SET_HCIDEV_DEV(hdev, &hu->serdev->dev);
if (test_bit(HCI_UART_NO_SUSPEND_NOTIFIER, &hu->flags))
- set_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_NO_SUSPEND_NOTIFIER);
if (test_bit(HCI_UART_RAW_DEVICE, &hu->hdev_flags))
- set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_RAW_DEVICE);
if (test_bit(HCI_UART_EXT_CONFIG, &hu->hdev_flags))
- set_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG);
if (test_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags))
return 0;
diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
index fbf3079b92a5..cbbe79b241ce 100644
--- a/drivers/bluetooth/hci_uart.h
+++ b/drivers/bluetooth/hci_uart.h
@@ -90,6 +90,7 @@ struct hci_uart {
#define HCI_UART_REGISTERED 1
#define HCI_UART_PROTO_READY 2
#define HCI_UART_NO_SUSPEND_NOTIFIER 3
+#define HCI_UART_PROTO_INIT 4
/* TX states */
#define HCI_UART_SENDING 1
@@ -120,10 +121,6 @@ void hci_uart_set_flow_control(struct hci_uart *hu, bool enable);
void hci_uart_set_speeds(struct hci_uart *hu, unsigned int init_speed,
unsigned int oper_speed);
-#ifdef CONFIG_BT_HCIUART_H4
-int h4_init(void);
-int h4_deinit(void);
-
struct h4_recv_pkt {
u8 type; /* Packet type */
u8 hlen; /* Header length */
@@ -161,6 +158,10 @@ struct h4_recv_pkt {
.lsize = 2, \
.maxlen = HCI_MAX_FRAME_SIZE \
+#ifdef CONFIG_BT_HCIUART_H4
+int h4_init(void);
+int h4_deinit(void);
+
struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb,
const unsigned char *buffer, int count,
const struct h4_recv_pkt *pkts, int pkts_count);
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index 7651321d351c..2fef08254d78 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -289,18 +289,18 @@ static void vhci_coredump(struct hci_dev *hdev)
static void vhci_coredump_hdr(struct hci_dev *hdev, struct sk_buff *skb)
{
- char buf[80];
+ const char *buf;
- snprintf(buf, sizeof(buf), "Controller Name: vhci_ctrl\n");
+ buf = "Controller Name: vhci_ctrl\n";
skb_put_data(skb, buf, strlen(buf));
- snprintf(buf, sizeof(buf), "Firmware Version: vhci_fw\n");
+ buf = "Firmware Version: vhci_fw\n";
skb_put_data(skb, buf, strlen(buf));
- snprintf(buf, sizeof(buf), "Driver: vhci_drv\n");
+ buf = "Driver: vhci_drv\n";
skb_put_data(skb, buf, strlen(buf));
- snprintf(buf, sizeof(buf), "Vendor: vhci\n");
+ buf = "Vendor: vhci\n";
skb_put_data(skb, buf, strlen(buf));
}
@@ -316,7 +316,7 @@ static inline void force_devcd_timeout(struct hci_dev *hdev,
unsigned int timeout)
{
#ifdef CONFIG_DEV_COREDUMP
- hdev->dump.timeout = msecs_to_jiffies(timeout * 1000);
+ hdev->dump.timeout = secs_to_jiffies(timeout);
#endif
}
@@ -380,6 +380,28 @@ static const struct file_operations force_devcoredump_fops = {
.write = force_devcd_write,
};
+static void vhci_debugfs_init(struct vhci_data *data)
+{
+ struct hci_dev *hdev = data->hdev;
+
+ debugfs_create_file("force_suspend", 0644, hdev->debugfs, data,
+ &force_suspend_fops);
+
+ debugfs_create_file("force_wakeup", 0644, hdev->debugfs, data,
+ &force_wakeup_fops);
+
+ if (IS_ENABLED(CONFIG_BT_MSFTEXT))
+ debugfs_create_file("msft_opcode", 0644, hdev->debugfs, data,
+ &msft_opcode_fops);
+
+ if (IS_ENABLED(CONFIG_BT_AOSPEXT))
+ debugfs_create_file("aosp_capable", 0644, hdev->debugfs, data,
+ &aosp_capable_fops);
+
+ debugfs_create_file("force_devcoredump", 0644, hdev->debugfs, data,
+ &force_devcoredump_fops);
+}
+
static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
{
struct hci_dev *hdev;
@@ -415,15 +437,16 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
hdev->get_codec_config_data = vhci_get_codec_config_data;
hdev->wakeup = vhci_wakeup;
hdev->setup = vhci_setup;
- set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_NON_PERSISTENT_SETUP);
+ hci_set_quirk(hdev, HCI_QUIRK_SYNC_FLOWCTL_SUPPORTED);
/* bit 6 is for external configuration */
if (opcode & 0x40)
- set_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG);
/* bit 7 is for raw device */
if (opcode & 0x80)
- set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_RAW_DEVICE);
if (hci_register_dev(hdev) < 0) {
BT_ERR("Can't register HCI device");
@@ -433,22 +456,8 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
return -EBUSY;
}
- debugfs_create_file("force_suspend", 0644, hdev->debugfs, data,
- &force_suspend_fops);
-
- debugfs_create_file("force_wakeup", 0644, hdev->debugfs, data,
- &force_wakeup_fops);
-
- if (IS_ENABLED(CONFIG_BT_MSFTEXT))
- debugfs_create_file("msft_opcode", 0644, hdev->debugfs, data,
- &msft_opcode_fops);
-
- if (IS_ENABLED(CONFIG_BT_AOSPEXT))
- debugfs_create_file("aosp_capable", 0644, hdev->debugfs, data,
- &aosp_capable_fops);
-
- debugfs_create_file("force_devcoredump", 0644, hdev->debugfs, data,
- &force_devcoredump_fops);
+ if (!IS_ERR_OR_NULL(hdev->debugfs))
+ vhci_debugfs_init(data);
hci_skb_pkt_type(skb) = HCI_VENDOR_PKT;
@@ -645,11 +654,26 @@ static int vhci_open(struct inode *inode, struct file *file)
file->private_data = data;
nonseekable_open(inode, file);
- schedule_delayed_work(&data->open_timeout, msecs_to_jiffies(1000));
+ schedule_delayed_work(&data->open_timeout, secs_to_jiffies(1));
return 0;
}
+static void vhci_debugfs_remove(struct hci_dev *hdev)
+{
+ debugfs_lookup_and_remove("force_suspend", hdev->debugfs);
+
+ debugfs_lookup_and_remove("force_wakeup", hdev->debugfs);
+
+ if (IS_ENABLED(CONFIG_BT_MSFTEXT))
+ debugfs_lookup_and_remove("msft_opcode", hdev->debugfs);
+
+ if (IS_ENABLED(CONFIG_BT_AOSPEXT))
+ debugfs_lookup_and_remove("aosp_capable", hdev->debugfs);
+
+ debugfs_lookup_and_remove("force_devcoredump", hdev->debugfs);
+}
+
static int vhci_release(struct inode *inode, struct file *file)
{
struct vhci_data *data = file->private_data;
@@ -661,6 +685,8 @@ static int vhci_release(struct inode *inode, struct file *file)
hdev = data->hdev;
if (hdev) {
+ if (!IS_ERR_OR_NULL(hdev->debugfs))
+ vhci_debugfs_remove(hdev);
hci_unregister_dev(hdev);
hci_free_dev(hdev);
}
diff --git a/drivers/bluetooth/virtio_bt.c b/drivers/bluetooth/virtio_bt.c
index 756f292df9e8..6f1a37e85c6a 100644
--- a/drivers/bluetooth/virtio_bt.c
+++ b/drivers/bluetooth/virtio_bt.c
@@ -327,17 +327,17 @@ static int virtbt_probe(struct virtio_device *vdev)
hdev->setup = virtbt_setup_intel;
hdev->shutdown = virtbt_shutdown_generic;
hdev->set_bdaddr = virtbt_set_bdaddr_intel;
- set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
- set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
- set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_STRICT_DUPLICATE_FILTER);
+ hci_set_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY);
+ hci_set_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED);
break;
case VIRTIO_BT_CONFIG_VENDOR_REALTEK:
hdev->manufacturer = 93;
hdev->setup = virtbt_setup_realtek;
hdev->shutdown = virtbt_shutdown_generic;
- set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
- set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY);
+ hci_set_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED);
break;
}
}
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index ff669a8ccad9..fe7600283e70 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -87,6 +87,12 @@ config HISILICON_LPC
Driver to enable I/O access to devices attached to the Low Pin
Count bus on the HiSilicon Hip06/7 SoC.
+config IMX_AIPSTZ
+ tristate "Support for IMX Secure AHB to IP Slave bus (AIPSTZ) bridge"
+ depends on ARCH_MXC
+ help
+ Enable support for IMX AIPSTZ bridge.
+
config IMX_WEIM
bool "Freescale EIM DRIVER"
depends on ARCH_MXC || COMPILE_TEST
diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile
index cddd4984d6af..8e693fe8a03a 100644
--- a/drivers/bus/Makefile
+++ b/drivers/bus/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_FSL_MC_BUS) += fsl-mc/
obj-$(CONFIG_BT1_APB) += bt1-apb.o
obj-$(CONFIG_BT1_AXI) += bt1-axi.o
+obj-$(CONFIG_IMX_AIPSTZ) += imx-aipstz.o
obj-$(CONFIG_IMX_WEIM) += imx-weim.o
obj-$(CONFIG_INTEL_IXP4XX_EB) += intel-ixp4xx-eb.o
obj-$(CONFIG_MIPS_CDMM) += mips_cdmm.o
diff --git a/drivers/bus/brcmstb_gisb.c b/drivers/bus/brcmstb_gisb.c
index ee29162da4ee..91ef99c42344 100644
--- a/drivers/bus/brcmstb_gisb.c
+++ b/drivers/bus/brcmstb_gisb.c
@@ -395,10 +395,7 @@ static struct attribute *gisb_arb_sysfs_attrs[] = {
&dev_attr_gisb_arb_timeout.attr,
NULL,
};
-
-static struct attribute_group gisb_arb_sysfs_attr_group = {
- .attrs = gisb_arb_sysfs_attrs,
-};
+ATTRIBUTE_GROUPS(gisb_arb_sysfs);
static const struct of_device_id brcmstb_gisb_arb_of_match[] = {
{ .compatible = "brcm,gisb-arb", .data = gisb_offsets_bcm7445 },
@@ -490,10 +487,6 @@ static int __init brcmstb_gisb_arb_probe(struct platform_device *pdev)
}
}
- err = sysfs_create_group(&pdev->dev.kobj, &gisb_arb_sysfs_attr_group);
- if (err)
- return err;
-
platform_set_drvdata(pdev, gdev);
list_add_tail(&gdev->next, &brcmstb_gisb_arb_device_list);
@@ -550,6 +543,7 @@ static struct platform_driver brcmstb_gisb_arb_driver = {
.name = "brcm-gisb-arb",
.of_match_table = brcmstb_gisb_arb_of_match,
.pm = &brcmstb_gisb_arb_pm_ops,
+ .dev_groups = gisb_arb_sysfs_groups,
},
};
diff --git a/drivers/bus/fsl-mc/dpmcp.c b/drivers/bus/fsl-mc/dpmcp.c
index 5fbd0dbde24a..7816c0a728ef 100644
--- a/drivers/bus/fsl-mc/dpmcp.c
+++ b/drivers/bus/fsl-mc/dpmcp.c
@@ -75,25 +75,3 @@ int dpmcp_close(struct fsl_mc_io *mc_io,
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
-
-/**
- * dpmcp_reset() - Reset the DPMCP, returns the object to initial state.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPMCP object
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpmcp_reset(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token)
-{
- struct fsl_mc_command cmd = { 0 };
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_RESET,
- cmd_flags, token);
-
- /* send command to mc*/
- return mc_send_command(mc_io, &cmd);
-}
diff --git a/drivers/bus/fsl-mc/dprc-driver.c b/drivers/bus/fsl-mc/dprc-driver.c
index 4b68c84ef485..c63a7e688db6 100644
--- a/drivers/bus/fsl-mc/dprc-driver.c
+++ b/drivers/bus/fsl-mc/dprc-driver.c
@@ -22,8 +22,8 @@ struct fsl_mc_child_objs {
struct fsl_mc_obj_desc *child_array;
};
-static bool fsl_mc_device_match(struct fsl_mc_device *mc_dev,
- struct fsl_mc_obj_desc *obj_desc)
+static bool fsl_mc_device_match(const struct fsl_mc_device *mc_dev,
+ const struct fsl_mc_obj_desc *obj_desc)
{
return mc_dev->obj_desc.id == obj_desc->id &&
strcmp(mc_dev->obj_desc.type, obj_desc->type) == 0;
@@ -112,9 +112,9 @@ void dprc_remove_devices(struct fsl_mc_device *mc_bus_dev,
}
EXPORT_SYMBOL_GPL(dprc_remove_devices);
-static int __fsl_mc_device_match(struct device *dev, void *data)
+static int __fsl_mc_device_match(struct device *dev, const void *data)
{
- struct fsl_mc_obj_desc *obj_desc = data;
+ const struct fsl_mc_obj_desc *obj_desc = data;
struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
return fsl_mc_device_match(mc_dev, obj_desc);
@@ -806,8 +806,6 @@ int dprc_cleanup(struct fsl_mc_device *mc_dev)
dev_set_msi_domain(&mc_dev->dev, NULL);
}
- fsl_mc_cleanup_all_resource_pools(mc_dev);
-
/* if this step fails we cannot go further with cleanup as there is no way of
* communicating with the firmware
*/
diff --git a/drivers/bus/fsl-mc/dprc.c b/drivers/bus/fsl-mc/dprc.c
index dd1b5c0fb7e2..38d40c09b719 100644
--- a/drivers/bus/fsl-mc/dprc.c
+++ b/drivers/bus/fsl-mc/dprc.c
@@ -489,7 +489,7 @@ int dprc_set_obj_irq(struct fsl_mc_io *mc_io,
cmd_params->irq_addr = cpu_to_le64(irq_cfg->paddr);
cmd_params->irq_num = cpu_to_le32(irq_cfg->irq_num);
cmd_params->obj_id = cpu_to_le32(obj_id);
- strscpy_pad(cmd_params->obj_type, obj_type, 16);
+ strscpy(cmd_params->obj_type, obj_type);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -561,7 +561,7 @@ int dprc_get_obj_region(struct fsl_mc_io *mc_io,
cmd_params = (struct dprc_cmd_get_obj_region *)cmd.params;
cmd_params->obj_id = cpu_to_le32(obj_id);
cmd_params->region_index = region_index;
- strscpy_pad(cmd_params->obj_type, obj_type, 16);
+ strscpy(cmd_params->obj_type, obj_type);
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
diff --git a/drivers/bus/fsl-mc/fsl-mc-allocator.c b/drivers/bus/fsl-mc/fsl-mc-allocator.c
index b5e8c021fa1f..d2ea59471323 100644
--- a/drivers/bus/fsl-mc/fsl-mc-allocator.c
+++ b/drivers/bus/fsl-mc/fsl-mc-allocator.c
@@ -555,27 +555,6 @@ void fsl_mc_init_all_resource_pools(struct fsl_mc_device *mc_bus_dev)
}
}
-static void fsl_mc_cleanup_resource_pool(struct fsl_mc_device *mc_bus_dev,
- enum fsl_mc_pool_type pool_type)
-{
- struct fsl_mc_resource *resource;
- struct fsl_mc_resource *next;
- struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
- struct fsl_mc_resource_pool *res_pool =
- &mc_bus->resource_pools[pool_type];
-
- list_for_each_entry_safe(resource, next, &res_pool->free_list, node)
- devm_kfree(&mc_bus_dev->dev, resource);
-}
-
-void fsl_mc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev)
-{
- int pool_type;
-
- for (pool_type = 0; pool_type < FSL_MC_NUM_POOL_TYPES; pool_type++)
- fsl_mc_cleanup_resource_pool(mc_bus_dev, pool_type);
-}
-
/*
* fsl_mc_allocator_probe - callback invoked when an allocatable device is
* being added to the system
@@ -656,8 +635,3 @@ int __init fsl_mc_allocator_driver_init(void)
{
return fsl_mc_driver_register(&fsl_mc_allocator_driver);
}
-
-void fsl_mc_allocator_driver_exit(void)
-{
- fsl_mc_driver_unregister(&fsl_mc_allocator_driver);
-}
diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
index 930d8a3ba722..25845c04e562 100644
--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
@@ -139,9 +139,9 @@ static int fsl_mc_bus_uevent(const struct device *dev, struct kobj_uevent_env *e
static int fsl_mc_dma_configure(struct device *dev)
{
+ const struct device_driver *drv = READ_ONCE(dev->driver);
struct device *dma_dev = dev;
struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
- struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver);
u32 input_id = mc_dev->icid;
int ret;
@@ -153,7 +153,8 @@ static int fsl_mc_dma_configure(struct device *dev)
else
ret = acpi_dma_configure_id(dev, DEV_DMA_COHERENT, &input_id);
- if (!ret && !mc_drv->driver_managed_dma) {
+ /* @drv may not be valid when we're called from the IOMMU layer */
+ if (!ret && drv && !to_fsl_mc_driver(drv)->driver_managed_dma) {
ret = iommu_device_use_default_domain(dev);
if (ret)
arch_teardown_dma_ops(dev);
@@ -175,8 +176,8 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
{
struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
- return sprintf(buf, "fsl-mc:v%08Xd%s\n", mc_dev->obj_desc.vendor,
- mc_dev->obj_desc.type);
+ return sysfs_emit(buf, "fsl-mc:v%08Xd%s\n", mc_dev->obj_desc.vendor,
+ mc_dev->obj_desc.type);
}
static DEVICE_ATTR_RO(modalias);
@@ -202,7 +203,7 @@ static ssize_t driver_override_show(struct device *dev,
{
struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
- return snprintf(buf, PAGE_SIZE, "%s\n", mc_dev->driver_override);
+ return sysfs_emit(buf, "%s\n", mc_dev->driver_override);
}
static DEVICE_ATTR_RW(driver_override);
@@ -320,90 +321,90 @@ const struct bus_type fsl_mc_bus_type = {
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_type);
-struct device_type fsl_mc_bus_dprc_type = {
+const struct device_type fsl_mc_bus_dprc_type = {
.name = "fsl_mc_bus_dprc"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dprc_type);
-struct device_type fsl_mc_bus_dpni_type = {
+const struct device_type fsl_mc_bus_dpni_type = {
.name = "fsl_mc_bus_dpni"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpni_type);
-struct device_type fsl_mc_bus_dpio_type = {
+const struct device_type fsl_mc_bus_dpio_type = {
.name = "fsl_mc_bus_dpio"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpio_type);
-struct device_type fsl_mc_bus_dpsw_type = {
+const struct device_type fsl_mc_bus_dpsw_type = {
.name = "fsl_mc_bus_dpsw"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpsw_type);
-struct device_type fsl_mc_bus_dpbp_type = {
+const struct device_type fsl_mc_bus_dpbp_type = {
.name = "fsl_mc_bus_dpbp"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpbp_type);
-struct device_type fsl_mc_bus_dpcon_type = {
+const struct device_type fsl_mc_bus_dpcon_type = {
.name = "fsl_mc_bus_dpcon"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpcon_type);
-struct device_type fsl_mc_bus_dpmcp_type = {
+const struct device_type fsl_mc_bus_dpmcp_type = {
.name = "fsl_mc_bus_dpmcp"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpmcp_type);
-struct device_type fsl_mc_bus_dpmac_type = {
+const struct device_type fsl_mc_bus_dpmac_type = {
.name = "fsl_mc_bus_dpmac"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpmac_type);
-struct device_type fsl_mc_bus_dprtc_type = {
+const struct device_type fsl_mc_bus_dprtc_type = {
.name = "fsl_mc_bus_dprtc"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dprtc_type);
-struct device_type fsl_mc_bus_dpseci_type = {
+const struct device_type fsl_mc_bus_dpseci_type = {
.name = "fsl_mc_bus_dpseci"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpseci_type);
-struct device_type fsl_mc_bus_dpdmux_type = {
+const struct device_type fsl_mc_bus_dpdmux_type = {
.name = "fsl_mc_bus_dpdmux"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdmux_type);
-struct device_type fsl_mc_bus_dpdcei_type = {
+const struct device_type fsl_mc_bus_dpdcei_type = {
.name = "fsl_mc_bus_dpdcei"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdcei_type);
-struct device_type fsl_mc_bus_dpaiop_type = {
+const struct device_type fsl_mc_bus_dpaiop_type = {
.name = "fsl_mc_bus_dpaiop"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpaiop_type);
-struct device_type fsl_mc_bus_dpci_type = {
+const struct device_type fsl_mc_bus_dpci_type = {
.name = "fsl_mc_bus_dpci"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpci_type);
-struct device_type fsl_mc_bus_dpdmai_type = {
+const struct device_type fsl_mc_bus_dpdmai_type = {
.name = "fsl_mc_bus_dpdmai"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdmai_type);
-struct device_type fsl_mc_bus_dpdbg_type = {
+const struct device_type fsl_mc_bus_dpdbg_type = {
.name = "fsl_mc_bus_dpdbg"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdbg_type);
-static struct device_type *fsl_mc_get_device_type(const char *type)
+static const struct device_type *fsl_mc_get_device_type(const char *type)
{
static const struct {
- struct device_type *dev_type;
+ const struct device_type *dev_type;
const char *type;
} dev_types[] = {
{ &fsl_mc_bus_dprc_type, "dprc" },
@@ -905,8 +906,10 @@ int fsl_mc_device_add(struct fsl_mc_obj_desc *obj_desc,
error_cleanup_dev:
kfree(mc_dev->regions);
- kfree(mc_bus);
- kfree(mc_dev);
+ if (mc_bus)
+ kfree(mc_bus);
+ else
+ kfree(mc_dev);
return error;
}
@@ -940,6 +943,7 @@ struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev,
struct fsl_mc_obj_desc endpoint_desc = {{ 0 }};
struct dprc_endpoint endpoint1 = {{ 0 }};
struct dprc_endpoint endpoint2 = {{ 0 }};
+ struct fsl_mc_bus *mc_bus;
int state, err;
mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
@@ -963,6 +967,8 @@ struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev,
strcpy(endpoint_desc.type, endpoint2.type);
endpoint_desc.id = endpoint2.id;
endpoint = fsl_mc_device_lookup(&endpoint_desc, mc_bus_dev);
+ if (endpoint)
+ return endpoint;
/*
* We know that the device has an endpoint because we verified by
@@ -970,17 +976,13 @@ struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev,
* yet discovered by the fsl-mc bus, thus the lookup returned NULL.
* Force a rescan of the devices in this container and retry the lookup.
*/
- if (!endpoint) {
- struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
-
- if (mutex_trylock(&mc_bus->scan_mutex)) {
- err = dprc_scan_objects(mc_bus_dev, true);
- mutex_unlock(&mc_bus->scan_mutex);
- }
-
- if (err < 0)
- return ERR_PTR(err);
+ mc_bus = to_fsl_mc_bus(mc_bus_dev);
+ if (mutex_trylock(&mc_bus->scan_mutex)) {
+ err = dprc_scan_objects(mc_bus_dev, true);
+ mutex_unlock(&mc_bus->scan_mutex);
}
+ if (err < 0)
+ return ERR_PTR(err);
endpoint = fsl_mc_device_lookup(&endpoint_desc, mc_bus_dev);
/*
@@ -1102,6 +1104,9 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
* Get physical address of MC portal for the root DPRC:
*/
plat_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!plat_res)
+ return -EINVAL;
+
mc_portal_phys_addr = plat_res->start;
mc_portal_size = resource_size(plat_res);
mc_portal_base_phys_addr = mc_portal_phys_addr & ~0x3ffffff;
@@ -1210,7 +1215,7 @@ static struct platform_driver fsl_mc_bus_driver = {
.acpi_match_table = fsl_mc_bus_acpi_match_table,
},
.probe = fsl_mc_bus_probe,
- .remove_new = fsl_mc_bus_remove,
+ .remove = fsl_mc_bus_remove,
.shutdown = fsl_mc_bus_remove,
};
diff --git a/drivers/bus/fsl-mc/fsl-mc-private.h b/drivers/bus/fsl-mc/fsl-mc-private.h
index b3520ea1b9f4..beed4c53533d 100644
--- a/drivers/bus/fsl-mc/fsl-mc-private.h
+++ b/drivers/bus/fsl-mc/fsl-mc-private.h
@@ -66,10 +66,6 @@ int dpmcp_close(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token);
-int dpmcp_reset(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token);
-
/*
* Data Path Resource Container (DPRC) API
*/
@@ -631,12 +627,8 @@ int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev,
int __init fsl_mc_allocator_driver_init(void);
-void fsl_mc_allocator_driver_exit(void);
-
void fsl_mc_init_all_resource_pools(struct fsl_mc_device *mc_bus_dev);
-void fsl_mc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev);
-
int __must_check fsl_mc_resource_allocate(struct fsl_mc_bus *mc_bus,
enum fsl_mc_pool_type pool_type,
struct fsl_mc_resource
diff --git a/drivers/bus/fsl-mc/fsl-mc-uapi.c b/drivers/bus/fsl-mc/fsl-mc-uapi.c
index 9c4c1395fcdb..823969e4159c 100644
--- a/drivers/bus/fsl-mc/fsl-mc-uapi.c
+++ b/drivers/bus/fsl-mc/fsl-mc-uapi.c
@@ -48,6 +48,7 @@ enum fsl_mc_cmd_index {
DPRC_GET_POOL,
DPRC_GET_POOL_COUNT,
DPRC_GET_CONNECTION,
+ DPRC_GET_MEM,
DPCI_GET_LINK_STATE,
DPCI_GET_PEER_ATTR,
DPAIOP_GET_SL_VERSION,
@@ -194,6 +195,12 @@ static struct fsl_mc_cmd_desc fsl_mc_accepted_cmds[] = {
.token = true,
.size = 32,
},
+ [DPRC_GET_MEM] = {
+ .cmdid_value = 0x16D0,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 12,
+ },
[DPCI_GET_LINK_STATE] = {
.cmdid_value = 0x0E10,
@@ -275,13 +282,13 @@ static struct fsl_mc_cmd_desc fsl_mc_accepted_cmds[] = {
.size = 8,
},
[DPSW_GET_TAILDROP] = {
- .cmdid_value = 0x0A80,
+ .cmdid_value = 0x0A90,
.cmdid_mask = 0xFFF0,
.token = true,
.size = 14,
},
[DPSW_SET_TAILDROP] = {
- .cmdid_value = 0x0A90,
+ .cmdid_value = 0x0A80,
.cmdid_mask = 0xFFF0,
.token = true,
.size = 24,
diff --git a/drivers/bus/fsl-mc/mc-io.c b/drivers/bus/fsl-mc/mc-io.c
index 95b10a6cf307..cd8754763f40 100644
--- a/drivers/bus/fsl-mc/mc-io.c
+++ b/drivers/bus/fsl-mc/mc-io.c
@@ -214,12 +214,19 @@ int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev,
if (error < 0)
goto error_cleanup_resource;
- dpmcp_dev->consumer_link = device_link_add(&mc_dev->dev,
- &dpmcp_dev->dev,
- DL_FLAG_AUTOREMOVE_CONSUMER);
- if (!dpmcp_dev->consumer_link) {
- error = -EINVAL;
- goto error_cleanup_mc_io;
+ /* If the DPRC device itself tries to allocate a portal (usually for
+ * UAPI interaction), don't add a device link between them since the
+ * DPMCP device is an actual child device of the DPRC and a reverse
+ * dependency is not allowed.
+ */
+ if (mc_dev != mc_bus_dev) {
+ dpmcp_dev->consumer_link = device_link_add(&mc_dev->dev,
+ &dpmcp_dev->dev,
+ DL_FLAG_AUTOREMOVE_CONSUMER);
+ if (!dpmcp_dev->consumer_link) {
+ error = -EINVAL;
+ goto error_cleanup_mc_io;
+ }
}
*new_mc_io = mc_io;
@@ -263,23 +270,3 @@ void fsl_mc_portal_free(struct fsl_mc_io *mc_io)
dpmcp_dev->consumer_link = NULL;
}
EXPORT_SYMBOL_GPL(fsl_mc_portal_free);
-
-/**
- * fsl_mc_portal_reset - Resets the dpmcp object for a given fsl_mc_io object
- *
- * @mc_io: Pointer to the fsl_mc_io object that wraps the MC portal to free
- */
-int fsl_mc_portal_reset(struct fsl_mc_io *mc_io)
-{
- int error;
- struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev;
-
- error = dpmcp_reset(mc_io, 0, dpmcp_dev->mc_handle);
- if (error < 0) {
- dev_err(&dpmcp_dev->dev, "dpmcp_reset() failed: %d\n", error);
- return error;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(fsl_mc_portal_reset);
diff --git a/drivers/bus/fsl-mc/mc-sys.c b/drivers/bus/fsl-mc/mc-sys.c
index f2052cd0a051..b22c59d57c8f 100644
--- a/drivers/bus/fsl-mc/mc-sys.c
+++ b/drivers/bus/fsl-mc/mc-sys.c
@@ -19,7 +19,7 @@
/*
* Timeout in milliseconds to wait for the completion of an MC command
*/
-#define MC_CMD_COMPLETION_TIMEOUT_MS 500
+#define MC_CMD_COMPLETION_TIMEOUT_MS 15000
/*
* usleep_range() min and max values used to throttle down polling
diff --git a/drivers/bus/hisi_lpc.c b/drivers/bus/hisi_lpc.c
index 09340adbacc2..53dd1573e323 100644
--- a/drivers/bus/hisi_lpc.c
+++ b/drivers/bus/hisi_lpc.c
@@ -689,6 +689,6 @@ static struct platform_driver hisi_lpc_driver = {
.acpi_match_table = hisi_lpc_acpi_match,
},
.probe = hisi_lpc_probe,
- .remove_new = hisi_lpc_remove,
+ .remove = hisi_lpc_remove,
};
builtin_platform_driver(hisi_lpc_driver);
diff --git a/drivers/bus/imx-aipstz.c b/drivers/bus/imx-aipstz.c
new file mode 100644
index 000000000000..5fdf377f5d06
--- /dev/null
+++ b/drivers/bus/imx-aipstz.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2025 NXP
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#define IMX_AIPSTZ_MPR0 0x0
+
+struct imx_aipstz_config {
+ u32 mpr0;
+};
+
+struct imx_aipstz_data {
+ void __iomem *base;
+ const struct imx_aipstz_config *default_cfg;
+};
+
+static void imx_aipstz_apply_default(struct imx_aipstz_data *data)
+{
+ writel(data->default_cfg->mpr0, data->base + IMX_AIPSTZ_MPR0);
+}
+
+static const struct of_device_id imx_aipstz_match_table[] = {
+ { .compatible = "simple-bus", },
+ { }
+};
+
+static int imx_aipstz_probe(struct platform_device *pdev)
+{
+ struct imx_aipstz_data *data;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return dev_err_probe(&pdev->dev, -ENOMEM,
+ "failed to allocate data memory\n");
+
+ data->base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
+ if (IS_ERR(data->base))
+ return dev_err_probe(&pdev->dev, -ENOMEM,
+ "failed to get/ioremap AC memory\n");
+
+ data->default_cfg = of_device_get_match_data(&pdev->dev);
+
+ imx_aipstz_apply_default(data);
+
+ dev_set_drvdata(&pdev->dev, data);
+
+ pm_runtime_set_active(&pdev->dev);
+ devm_pm_runtime_enable(&pdev->dev);
+
+ return of_platform_populate(pdev->dev.of_node, imx_aipstz_match_table,
+ NULL, &pdev->dev);
+}
+
+static void imx_aipstz_remove(struct platform_device *pdev)
+{
+ of_platform_depopulate(&pdev->dev);
+}
+
+static int imx_aipstz_runtime_resume(struct device *dev)
+{
+ struct imx_aipstz_data *data = dev_get_drvdata(dev);
+
+ /* restore potentially lost configuration during domain power-off */
+ imx_aipstz_apply_default(data);
+
+ return 0;
+}
+
+static const struct dev_pm_ops imx_aipstz_pm_ops = {
+ RUNTIME_PM_OPS(NULL, imx_aipstz_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+};
+
+/*
+ * following configuration is equivalent to:
+ * masters 0-7 => trusted for R/W + use AHB's HPROT[1] to det. privilege
+ */
+static const struct imx_aipstz_config imx8mp_aipstz_default_cfg = {
+ .mpr0 = 0x77777777,
+};
+
+static const struct of_device_id imx_aipstz_of_ids[] = {
+ { .compatible = "fsl,imx8mp-aipstz", .data = &imx8mp_aipstz_default_cfg },
+ { }
+};
+MODULE_DEVICE_TABLE(of, imx_aipstz_of_ids);
+
+static struct platform_driver imx_aipstz_of_driver = {
+ .probe = imx_aipstz_probe,
+ .remove = imx_aipstz_remove,
+ .driver = {
+ .name = "imx-aipstz",
+ .of_match_table = imx_aipstz_of_ids,
+ .pm = pm_ptr(&imx_aipstz_pm_ops),
+ },
+};
+module_platform_driver(imx_aipstz_of_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("IMX secure AHB to IP Slave bus (AIPSTZ) bridge driver");
+MODULE_AUTHOR("Laurentiu Mihalcea <laurentiu.mihalcea@nxp.com>");
diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c
index b3eafcf2a2c5..cdea24e92919 100644
--- a/drivers/bus/mhi/ep/main.c
+++ b/drivers/bus/mhi/ep/main.c
@@ -403,17 +403,13 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
{
struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id];
struct device *dev = &mhi_cntrl->mhi_dev->dev;
- size_t tr_len, read_offset, write_offset;
+ size_t tr_len, read_offset;
struct mhi_ep_buf_info buf_info = {};
u32 len = MHI_EP_DEFAULT_MTU;
struct mhi_ring_element *el;
- bool tr_done = false;
void *buf_addr;
- u32 buf_left;
int ret;
- buf_left = len;
-
do {
/* Don't process the transfer ring if the channel is not in RUNNING state */
if (mhi_chan->state != MHI_CH_STATE_RUNNING) {
@@ -426,24 +422,23 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
/* Check if there is data pending to be read from previous read operation */
if (mhi_chan->tre_bytes_left) {
dev_dbg(dev, "TRE bytes remaining: %u\n", mhi_chan->tre_bytes_left);
- tr_len = min(buf_left, mhi_chan->tre_bytes_left);
+ tr_len = min(len, mhi_chan->tre_bytes_left);
} else {
mhi_chan->tre_loc = MHI_TRE_DATA_GET_PTR(el);
mhi_chan->tre_size = MHI_TRE_DATA_GET_LEN(el);
mhi_chan->tre_bytes_left = mhi_chan->tre_size;
- tr_len = min(buf_left, mhi_chan->tre_size);
+ tr_len = min(len, mhi_chan->tre_size);
}
read_offset = mhi_chan->tre_size - mhi_chan->tre_bytes_left;
- write_offset = len - buf_left;
buf_addr = kmem_cache_zalloc(mhi_cntrl->tre_buf_cache, GFP_KERNEL);
if (!buf_addr)
return -ENOMEM;
buf_info.host_addr = mhi_chan->tre_loc + read_offset;
- buf_info.dev_addr = buf_addr + write_offset;
+ buf_info.dev_addr = buf_addr;
buf_info.size = tr_len;
buf_info.cb = mhi_ep_read_completion;
buf_info.cb_buf = buf_addr;
@@ -459,16 +454,12 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
goto err_free_buf_addr;
}
- buf_left -= tr_len;
mhi_chan->tre_bytes_left -= tr_len;
- if (!mhi_chan->tre_bytes_left) {
- if (MHI_TRE_DATA_GET_IEOT(el))
- tr_done = true;
-
+ if (!mhi_chan->tre_bytes_left)
mhi_chan->rd_offset = (mhi_chan->rd_offset + 1) % ring->ring_size;
- }
- } while (buf_left && !tr_done);
+ /* Read until the some buffer is left or the ring becomes not empty */
+ } while (!mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE));
return 0;
@@ -502,15 +493,11 @@ static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring)
mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
} else {
/* UL channel */
- do {
- ret = mhi_ep_read_channel(mhi_cntrl, ring);
- if (ret < 0) {
- dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel\n");
- return ret;
- }
-
- /* Read until the ring becomes empty */
- } while (!mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE));
+ ret = mhi_ep_read_channel(mhi_cntrl, ring);
+ if (ret < 0) {
+ dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel\n");
+ return ret;
+ }
}
return 0;
diff --git a/drivers/bus/mhi/ep/ring.c b/drivers/bus/mhi/ep/ring.c
index aeb53b2c34a8..26357ee68dee 100644
--- a/drivers/bus/mhi/ep/ring.c
+++ b/drivers/bus/mhi/ep/ring.c
@@ -131,19 +131,23 @@ int mhi_ep_ring_add_element(struct mhi_ep_ring *ring, struct mhi_ring_element *e
}
old_offset = ring->rd_offset;
- mhi_ep_ring_inc_index(ring);
dev_dbg(dev, "Adding an element to ring at offset (%zu)\n", ring->rd_offset);
+ buf_info.host_addr = ring->rbase + (old_offset * sizeof(*el));
+ buf_info.dev_addr = el;
+ buf_info.size = sizeof(*el);
+
+ ret = mhi_cntrl->write_sync(mhi_cntrl, &buf_info);
+ if (ret)
+ return ret;
+
+ mhi_ep_ring_inc_index(ring);
/* Update rp in ring context */
rp = cpu_to_le64(ring->rd_offset * sizeof(*el) + ring->rbase);
memcpy_toio((void __iomem *) &ring->ring_ctx->generic.rp, &rp, sizeof(u64));
- buf_info.host_addr = ring->rbase + (old_offset * sizeof(*el));
- buf_info.dev_addr = el;
- buf_info.size = sizeof(*el);
-
- return mhi_cntrl->write_sync(mhi_cntrl, &buf_info);
+ return ret;
}
void mhi_ep_ring_init(struct mhi_ep_ring *ring, enum mhi_ep_ring_type type, u32 id)
diff --git a/drivers/bus/mhi/host/boot.c b/drivers/bus/mhi/host/boot.c
index dedd29ca8db3..205d83ac069f 100644
--- a/drivers/bus/mhi/host/boot.c
+++ b/drivers/bus/mhi/host/boot.c
@@ -31,8 +31,8 @@ int mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
int ret;
for (i = 0; i < img_info->entries - 1; i++, mhi_buf++, bhi_vec++) {
- bhi_vec->dma_addr = mhi_buf->dma_addr;
- bhi_vec->size = mhi_buf->len;
+ bhi_vec->dma_addr = cpu_to_le64(mhi_buf->dma_addr);
+ bhi_vec->size = cpu_to_le64(mhi_buf->len);
}
dev_dbg(dev, "BHIe programming for RDDM\n");
@@ -82,9 +82,9 @@ static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl)
* other cores to shutdown while we're collecting RDDM buffer. After
* returning from this function, we expect the device to reset.
*
- * Normaly, we read/write pm_state only after grabbing the
+ * Normally, we read/write pm_state only after grabbing the
* pm_lock, since we're in a panic, skipping it. Also there is no
- * gurantee that this state change would take effect since
+ * guarantee that this state change would take effect since
* we're setting it w/o grabbing pm_lock
*/
mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
@@ -177,6 +177,36 @@ int mhi_download_rddm_image(struct mhi_controller *mhi_cntrl, bool in_panic)
}
EXPORT_SYMBOL_GPL(mhi_download_rddm_image);
+static void mhi_fw_load_error_dump(struct mhi_controller *mhi_cntrl)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
+ void __iomem *base = mhi_cntrl->bhi;
+ int ret, i;
+ u32 val;
+ struct {
+ char *name;
+ u32 offset;
+ } error_reg[] = {
+ { "ERROR_CODE", BHI_ERRCODE },
+ { "ERROR_DBG1", BHI_ERRDBG1 },
+ { "ERROR_DBG2", BHI_ERRDBG2 },
+ { "ERROR_DBG3", BHI_ERRDBG3 },
+ { NULL },
+ };
+
+ read_lock_bh(pm_lock);
+ if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
+ for (i = 0; error_reg[i].name; i++) {
+ ret = mhi_read_reg(mhi_cntrl, base, error_reg[i].offset, &val);
+ if (ret)
+ break;
+ dev_err(dev, "Reg: %s value: 0x%x\n", error_reg[i].name, val);
+ }
+ }
+ read_unlock_bh(pm_lock);
+}
+
static int mhi_fw_load_bhie(struct mhi_controller *mhi_cntrl,
const struct mhi_buf *mhi_buf)
{
@@ -226,24 +256,13 @@ static int mhi_fw_load_bhie(struct mhi_controller *mhi_cntrl,
}
static int mhi_fw_load_bhi(struct mhi_controller *mhi_cntrl,
- dma_addr_t dma_addr,
- size_t size)
+ const struct mhi_buf *mhi_buf)
{
- u32 tx_status, val, session_id;
- int i, ret;
- void __iomem *base = mhi_cntrl->bhi;
- rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
- struct {
- char *name;
- u32 offset;
- } error_reg[] = {
- { "ERROR_CODE", BHI_ERRCODE },
- { "ERROR_DBG1", BHI_ERRDBG1 },
- { "ERROR_DBG2", BHI_ERRDBG2 },
- { "ERROR_DBG3", BHI_ERRDBG3 },
- { NULL },
- };
+ rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
+ void __iomem *base = mhi_cntrl->bhi;
+ u32 tx_status, session_id;
+ int ret;
read_lock_bh(pm_lock);
if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
@@ -255,11 +274,9 @@ static int mhi_fw_load_bhi(struct mhi_controller *mhi_cntrl,
dev_dbg(dev, "Starting image download via BHI. Session ID: %u\n",
session_id);
mhi_write_reg(mhi_cntrl, base, BHI_STATUS, 0);
- mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_HIGH,
- upper_32_bits(dma_addr));
- mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_LOW,
- lower_32_bits(dma_addr));
- mhi_write_reg(mhi_cntrl, base, BHI_IMGSIZE, size);
+ mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_HIGH, upper_32_bits(mhi_buf->dma_addr));
+ mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_LOW, lower_32_bits(mhi_buf->dma_addr));
+ mhi_write_reg(mhi_cntrl, base, BHI_IMGSIZE, mhi_buf->len);
mhi_write_reg(mhi_cntrl, base, BHI_IMGTXDB, session_id);
read_unlock_bh(pm_lock);
@@ -274,18 +291,7 @@ static int mhi_fw_load_bhi(struct mhi_controller *mhi_cntrl,
if (tx_status == BHI_STATUS_ERROR) {
dev_err(dev, "Image transfer failed\n");
- read_lock_bh(pm_lock);
- if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
- for (i = 0; error_reg[i].name; i++) {
- ret = mhi_read_reg(mhi_cntrl, base,
- error_reg[i].offset, &val);
- if (ret)
- break;
- dev_err(dev, "Reg: %s value: 0x%x\n",
- error_reg[i].name, val);
- }
- }
- read_unlock_bh(pm_lock);
+ mhi_fw_load_error_dump(mhi_cntrl);
goto invalid_pm_state;
}
@@ -296,6 +302,16 @@ invalid_pm_state:
return -EIO;
}
+static void mhi_free_bhi_buffer(struct mhi_controller *mhi_cntrl,
+ struct image_info *image_info)
+{
+ struct mhi_buf *mhi_buf = image_info->mhi_buf;
+
+ dma_free_coherent(mhi_cntrl->cntrl_dev, mhi_buf->len, mhi_buf->buf, mhi_buf->dma_addr);
+ kfree(image_info->mhi_buf);
+ kfree(image_info);
+}
+
void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl,
struct image_info *image_info)
{
@@ -310,6 +326,45 @@ void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl,
kfree(image_info);
}
+static int mhi_alloc_bhi_buffer(struct mhi_controller *mhi_cntrl,
+ struct image_info **image_info,
+ size_t alloc_size)
+{
+ struct image_info *img_info;
+ struct mhi_buf *mhi_buf;
+
+ img_info = kzalloc(sizeof(*img_info), GFP_KERNEL);
+ if (!img_info)
+ return -ENOMEM;
+
+ /* Allocate memory for entry */
+ img_info->mhi_buf = kzalloc(sizeof(*img_info->mhi_buf), GFP_KERNEL);
+ if (!img_info->mhi_buf)
+ goto error_alloc_mhi_buf;
+
+ /* Allocate and populate vector table */
+ mhi_buf = img_info->mhi_buf;
+
+ mhi_buf->len = alloc_size;
+ mhi_buf->buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, mhi_buf->len,
+ &mhi_buf->dma_addr, GFP_KERNEL);
+ if (!mhi_buf->buf)
+ goto error_alloc_segment;
+
+ img_info->bhi_vec = NULL;
+ img_info->entries = 1;
+ *image_info = img_info;
+
+ return 0;
+
+error_alloc_segment:
+ kfree(mhi_buf);
+error_alloc_mhi_buf:
+ kfree(img_info);
+
+ return -ENOMEM;
+}
+
int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl,
struct image_info **image_info,
size_t alloc_size)
@@ -357,6 +412,7 @@ error_alloc_segment:
for (--i, --mhi_buf; i >= 0; i--, mhi_buf--)
dma_free_coherent(mhi_cntrl->cntrl_dev, mhi_buf->len,
mhi_buf->buf, mhi_buf->dma_addr);
+ kfree(img_info->mhi_buf);
error_alloc_mhi_buf:
kfree(img_info);
@@ -364,9 +420,9 @@ error_alloc_mhi_buf:
return -ENOMEM;
}
-static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl,
- const u8 *buf, size_t remainder,
- struct image_info *img_info)
+static void mhi_firmware_copy_bhie(struct mhi_controller *mhi_cntrl,
+ const u8 *buf, size_t remainder,
+ struct image_info *img_info)
{
size_t to_cpy;
struct mhi_buf *mhi_buf = img_info->mhi_buf;
@@ -375,8 +431,8 @@ static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl,
while (remainder) {
to_cpy = min(remainder, mhi_buf->len);
memcpy(mhi_buf->buf, buf, to_cpy);
- bhi_vec->dma_addr = mhi_buf->dma_addr;
- bhi_vec->size = to_cpy;
+ bhi_vec->dma_addr = cpu_to_le64(mhi_buf->dma_addr);
+ bhi_vec->size = cpu_to_le64(to_cpy);
buf += to_cpy;
remainder -= to_cpy;
@@ -385,15 +441,61 @@ static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl,
}
}
+static enum mhi_fw_load_type mhi_fw_load_type_get(const struct mhi_controller *mhi_cntrl)
+{
+ if (mhi_cntrl->fbc_download) {
+ return MHI_FW_LOAD_FBC;
+ } else {
+ if (mhi_cntrl->seg_len)
+ return MHI_FW_LOAD_BHIE;
+ else
+ return MHI_FW_LOAD_BHI;
+ }
+}
+
+static int mhi_load_image_bhi(struct mhi_controller *mhi_cntrl, const u8 *fw_data, size_t size)
+{
+ struct image_info *image;
+ int ret;
+
+ ret = mhi_alloc_bhi_buffer(mhi_cntrl, &image, size);
+ if (ret)
+ return ret;
+
+ /* Load the firmware into BHI vec table */
+ memcpy(image->mhi_buf->buf, fw_data, size);
+
+ ret = mhi_fw_load_bhi(mhi_cntrl, &image->mhi_buf[image->entries - 1]);
+ mhi_free_bhi_buffer(mhi_cntrl, image);
+
+ return ret;
+}
+
+static int mhi_load_image_bhie(struct mhi_controller *mhi_cntrl, const u8 *fw_data, size_t size)
+{
+ struct image_info *image;
+ int ret;
+
+ ret = mhi_alloc_bhie_table(mhi_cntrl, &image, size);
+ if (ret)
+ return ret;
+
+ mhi_firmware_copy_bhie(mhi_cntrl, fw_data, size, image);
+
+ ret = mhi_fw_load_bhie(mhi_cntrl, &image->mhi_buf[image->entries - 1]);
+ mhi_free_bhie_table(mhi_cntrl, image);
+
+ return ret;
+}
+
void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
{
const struct firmware *firmware = NULL;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_fw_load_type fw_load_type;
enum mhi_pm_state new_state;
const char *fw_name;
const u8 *fw_data;
- void *buf;
- dma_addr_t dma_addr;
size_t size, fw_sz;
int ret;
@@ -452,21 +554,17 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
fw_sz = firmware->size;
skip_req_fw:
- buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, size, &dma_addr,
- GFP_KERNEL);
- if (!buf) {
- release_firmware(firmware);
- goto error_fw_load;
- }
-
- /* Download image using BHI */
- memcpy(buf, fw_data, size);
- ret = mhi_fw_load_bhi(mhi_cntrl, dma_addr, size);
- dma_free_coherent(mhi_cntrl->cntrl_dev, size, buf, dma_addr);
+ fw_load_type = mhi_fw_load_type_get(mhi_cntrl);
+ if (fw_load_type == MHI_FW_LOAD_BHIE)
+ ret = mhi_load_image_bhie(mhi_cntrl, fw_data, size);
+ else
+ ret = mhi_load_image_bhi(mhi_cntrl, fw_data, size);
/* Error or in EDL mode, we're done */
if (ret) {
- dev_err(dev, "MHI did not load image over BHI, ret: %d\n", ret);
+ dev_err(dev, "MHI did not load image over BHI%s, ret: %d\n",
+ fw_load_type == MHI_FW_LOAD_BHIE ? "e" : "",
+ ret);
release_firmware(firmware);
goto error_fw_load;
}
@@ -485,7 +583,7 @@ skip_req_fw:
* If we're doing fbc, populate vector tables while
* device transitioning into MHI READY state
*/
- if (mhi_cntrl->fbc_download) {
+ if (fw_load_type == MHI_FW_LOAD_FBC) {
ret = mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->fbc_image, fw_sz);
if (ret) {
release_firmware(firmware);
@@ -493,7 +591,7 @@ skip_req_fw:
}
/* Load the firmware into BHIE vec table */
- mhi_firmware_copy(mhi_cntrl, fw_data, fw_sz, mhi_cntrl->fbc_image);
+ mhi_firmware_copy_bhie(mhi_cntrl, fw_data, fw_sz, mhi_cntrl->fbc_image);
}
release_firmware(firmware);
@@ -510,7 +608,7 @@ fw_load_ready_state:
return;
error_ready_state:
- if (mhi_cntrl->fbc_download) {
+ if (mhi_cntrl->fbc_image) {
mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
mhi_cntrl->fbc_image = NULL;
}
diff --git a/drivers/bus/mhi/host/debugfs.c b/drivers/bus/mhi/host/debugfs.c
index cfec7811dfbb..39e45748a24c 100644
--- a/drivers/bus/mhi/host/debugfs.c
+++ b/drivers/bus/mhi/host/debugfs.c
@@ -10,6 +10,7 @@
#include <linux/list.h>
#include <linux/mhi.h>
#include <linux/module.h>
+#include <linux/string_choices.h>
#include "internal.h"
static int mhi_debugfs_states_show(struct seq_file *m, void *d)
@@ -22,7 +23,7 @@ static int mhi_debugfs_states_show(struct seq_file *m, void *d)
mhi_is_active(mhi_cntrl) ? "Active" : "Inactive",
mhi_state_str(mhi_cntrl->dev_state),
TO_MHI_EXEC_STR(mhi_cntrl->ee),
- mhi_cntrl->wake_set ? "true" : "false");
+ str_true_false(mhi_cntrl->wake_set));
/* counters */
seq_printf(m, "M0: %u M2: %u M3: %u", mhi_cntrl->M0, mhi_cntrl->M2,
diff --git a/drivers/bus/mhi/host/init.c b/drivers/bus/mhi/host/init.c
index a9b1f8beee7b..099be8dd1900 100644
--- a/drivers/bus/mhi/host/init.c
+++ b/drivers/bus/mhi/host/init.c
@@ -176,7 +176,7 @@ static int mhi_alloc_aligned_ring(struct mhi_controller *mhi_cntrl,
return 0;
}
-void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl)
+static void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl)
{
int i;
struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
@@ -191,10 +191,9 @@ void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl)
free_irq(mhi_cntrl->irq[0], mhi_cntrl);
}
-int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
+static int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
{
struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
- struct device *dev = &mhi_cntrl->mhi_dev->dev;
unsigned long irq_flags = IRQF_SHARED | IRQF_NO_SUSPEND;
int i, ret;
@@ -221,7 +220,7 @@ int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
continue;
if (mhi_event->irq >= mhi_cntrl->nr_irqs) {
- dev_err(dev, "irq %d not available for event ring\n",
+ dev_err(mhi_cntrl->cntrl_dev, "irq %d not available for event ring\n",
mhi_event->irq);
ret = -EINVAL;
goto error_request;
@@ -232,7 +231,7 @@ int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
irq_flags,
"mhi", mhi_event);
if (ret) {
- dev_err(dev, "Error requesting irq:%d for ev:%d\n",
+ dev_err(mhi_cntrl->cntrl_dev, "Error requesting irq:%d for ev:%d\n",
mhi_cntrl->irq[mhi_event->irq], i);
goto error_request;
}
@@ -254,7 +253,7 @@ error_request:
return ret;
}
-void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl)
+static void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl)
{
int i;
struct mhi_ctxt *mhi_ctxt = mhi_cntrl->mhi_ctxt;
@@ -299,7 +298,7 @@ void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl)
mhi_cntrl->mhi_ctxt = NULL;
}
-int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl)
+static int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl)
{
struct mhi_ctxt *mhi_ctxt;
struct mhi_chan_ctxt *chan_ctxt;
@@ -1144,7 +1143,7 @@ int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl)
}
mhi_cntrl->bhi = mhi_cntrl->regs + bhi_off;
- if (mhi_cntrl->fbc_download || mhi_cntrl->rddm_size) {
+ if (mhi_cntrl->fbc_download || mhi_cntrl->rddm_size || mhi_cntrl->seg_len) {
ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF,
&bhie_off);
if (ret) {
diff --git a/drivers/bus/mhi/host/internal.h b/drivers/bus/mhi/host/internal.h
index d057e877932e..7937bb1f742c 100644
--- a/drivers/bus/mhi/host/internal.h
+++ b/drivers/bus/mhi/host/internal.h
@@ -25,8 +25,15 @@ struct mhi_ctxt {
};
struct bhi_vec_entry {
- u64 dma_addr;
- u64 size;
+ __le64 dma_addr;
+ __le64 size;
+};
+
+enum mhi_fw_load_type {
+ MHI_FW_LOAD_BHI, /* BHI only in PBL */
+ MHI_FW_LOAD_BHIE, /* BHIe only in PBL */
+ MHI_FW_LOAD_FBC, /* BHI in PBL followed by BHIe in SBL */
+ MHI_FW_LOAD_MAX,
};
enum mhi_ch_state_type {
@@ -163,6 +170,8 @@ enum mhi_pm_state {
MHI_PM_IN_ERROR_STATE(pm_state))
#define MHI_PM_IN_SUSPEND_STATE(pm_state) (pm_state & \
(MHI_PM_M3_ENTER | MHI_PM_M3))
+#define MHI_PM_FATAL_ERROR(pm_state) ((pm_state == MHI_PM_FW_DL_ERR) || \
+ (pm_state >= MHI_PM_SYS_ERR_FAIL))
#define NR_OF_CMD_RINGS 1
#define CMD_EL_PER_RING 128
@@ -255,7 +264,7 @@ struct mhi_chan {
/*
* Important: When consuming, increment tre_ring first and when
* releasing, decrement buf_ring first. If tre_ring has space, buf_ring
- * is guranteed to have space so we do not need to check both rings.
+ * is guaranteed to have space so we do not need to check both rings.
*/
struct mhi_ring buf_ring;
struct mhi_ring tre_ring;
@@ -376,19 +385,12 @@ void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl,
/* Initialization methods */
int mhi_init_mmio(struct mhi_controller *mhi_cntrl);
-int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl);
-void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl);
-int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl);
-void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl);
int mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
struct image_info *img_info);
void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl);
/* Automatically allocate and queue inbound buffers */
#define MHI_CH_INBOUND_ALLOC_BUFS BIT(0)
-int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
- struct mhi_chan *mhi_chan, unsigned int flags);
-
int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
struct mhi_chan *mhi_chan);
void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
@@ -403,6 +405,7 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
struct mhi_event *mhi_event, u32 event_quota);
int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
struct mhi_event *mhi_event, u32 event_quota);
+void mhi_uevent_notify(struct mhi_controller *mhi_cntrl, enum mhi_ee_type ee);
/* ISR handlers */
irqreturn_t mhi_irq_handler(int irq_number, void *dev);
diff --git a/drivers/bus/mhi/host/main.c b/drivers/bus/mhi/host/main.c
index 4de75674f193..861551274319 100644
--- a/drivers/bus/mhi/host/main.c
+++ b/drivers/bus/mhi/host/main.c
@@ -512,6 +512,7 @@ irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
if (mhi_cntrl->rddm_image && mhi_is_active(mhi_cntrl)) {
mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
mhi_cntrl->ee = ee;
+ mhi_uevent_notify(mhi_cntrl, mhi_cntrl->ee);
wake_up_all(&mhi_cntrl->state_event);
}
break;
@@ -602,7 +603,7 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
{
dma_addr_t ptr = MHI_TRE_GET_EV_PTR(event);
struct mhi_ring_element *local_rp, *ev_tre;
- void *dev_rp;
+ void *dev_rp, *next_rp;
struct mhi_buf_info *buf_info;
u16 xfer_len;
@@ -621,6 +622,16 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
result.dir = mhi_chan->dir;
local_rp = tre_ring->rp;
+
+ next_rp = local_rp + 1;
+ if (next_rp >= tre_ring->base + tre_ring->len)
+ next_rp = tre_ring->base;
+ if (dev_rp != next_rp && !MHI_TRE_DATA_GET_CHAIN(local_rp)) {
+ dev_err(&mhi_cntrl->mhi_dev->dev,
+ "Event element points to an unexpected TRE\n");
+ break;
+ }
+
while (local_rp != dev_rp) {
buf_info = buf_ring->rp;
/* If it's the last TRE, get length from the event */
@@ -1181,25 +1192,6 @@ int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
}
EXPORT_SYMBOL_GPL(mhi_queue_skb);
-int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
- struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags)
-{
- struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
- mhi_dev->dl_chan;
- struct mhi_buf_info buf_info = { };
-
- buf_info.p_addr = mhi_buf->dma_addr;
- buf_info.cb_buf = mhi_buf;
- buf_info.pre_mapped = true;
- buf_info.len = len;
-
- if (unlikely(mhi_chan->pre_alloc))
- return -EINVAL;
-
- return mhi_queue(mhi_dev, &buf_info, dir, mflags);
-}
-EXPORT_SYMBOL_GPL(mhi_queue_dma);
-
int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
struct mhi_buf_info *info, enum mhi_flags flags)
{
@@ -1207,11 +1199,16 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
struct mhi_ring_element *mhi_tre;
struct mhi_buf_info *buf_info;
int eot, eob, chain, bei;
- int ret;
+ int ret = 0;
/* Protect accesses for reading and incrementing WP */
write_lock_bh(&mhi_chan->lock);
+ if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) {
+ ret = -ENODEV;
+ goto out;
+ }
+
buf_ring = &mhi_chan->buf_ring;
tre_ring = &mhi_chan->tre_ring;
@@ -1229,10 +1226,8 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
if (!info->pre_mapped) {
ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
- if (ret) {
- write_unlock_bh(&mhi_chan->lock);
- return ret;
- }
+ if (ret)
+ goto out;
}
eob = !!(flags & MHI_EOB);
@@ -1250,9 +1245,10 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
mhi_add_ring_element(mhi_cntrl, tre_ring);
mhi_add_ring_element(mhi_cntrl, buf_ring);
+out:
write_unlock_bh(&mhi_chan->lock);
- return 0;
+ return ret;
}
int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
@@ -1450,7 +1446,7 @@ exit_unprepare_channel:
mutex_unlock(&mhi_chan->mutex);
}
-int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
+static int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
struct mhi_chan *mhi_chan, unsigned int flags)
{
int ret = 0;
diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c
index 9938bb034c1c..b188bbf7de04 100644
--- a/drivers/bus/mhi/host/pci_generic.c
+++ b/drivers/bus/mhi/host/pci_generic.c
@@ -34,26 +34,34 @@
/**
* struct mhi_pci_dev_info - MHI PCI device specific information
* @config: MHI controller configuration
+ * @vf_config: MHI controller configuration for Virtual function (optional)
* @name: name of the PCI module
* @fw: firmware path (if any)
* @edl: emergency download mode firmware path (if any)
* @edl_trigger: capable of triggering EDL mode in the device (if supported)
* @bar_num: PCI base address register to use for MHI MMIO register space
* @dma_data_width: DMA transfer word size (32 or 64 bits)
+ * @vf_dma_data_width: DMA transfer word size for VF's (optional)
* @mru_default: default MRU size for MBIM network packets
* @sideband_wake: Devices using dedicated sideband GPIO for wakeup instead
* of inband wake support (such as sdx24)
+ * @no_m3: M3 not supported
+ * @reset_on_remove: Set true for devices that require SoC during driver removal
*/
struct mhi_pci_dev_info {
const struct mhi_controller_config *config;
+ const struct mhi_controller_config *vf_config;
const char *name;
const char *fw;
const char *edl;
bool edl_trigger;
unsigned int bar_num;
unsigned int dma_data_width;
+ unsigned int vf_dma_data_width;
unsigned int mru_default;
bool sideband_wake;
+ bool no_m3;
+ bool reset_on_remove;
};
#define MHI_CHANNEL_CONFIG_UL(ch_num, ch_name, el_count, ev_ring) \
@@ -245,6 +253,74 @@ struct mhi_pci_dev_info {
.channel = ch_num, \
}
+static const struct mhi_channel_config mhi_qcom_qdu100_channels[] = {
+ MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 32, 2),
+ MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 32, 2),
+ MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 128, 1),
+ MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 128, 1),
+ MHI_CHANNEL_CONFIG_UL(4, "DIAG", 64, 3),
+ MHI_CHANNEL_CONFIG_DL(5, "DIAG", 64, 3),
+ MHI_CHANNEL_CONFIG_UL(9, "QDSS", 64, 3),
+ MHI_CHANNEL_CONFIG_UL(14, "NMEA", 32, 4),
+ MHI_CHANNEL_CONFIG_DL(15, "NMEA", 32, 4),
+ MHI_CHANNEL_CONFIG_UL(16, "CSM_CTRL", 32, 4),
+ MHI_CHANNEL_CONFIG_DL(17, "CSM_CTRL", 32, 4),
+ MHI_CHANNEL_CONFIG_UL(40, "MHI_PHC", 32, 4),
+ MHI_CHANNEL_CONFIG_DL(41, "MHI_PHC", 32, 4),
+ MHI_CHANNEL_CONFIG_UL(46, "IP_SW0", 256, 5),
+ MHI_CHANNEL_CONFIG_DL(47, "IP_SW0", 256, 5),
+};
+
+static struct mhi_event_config mhi_qcom_qdu100_events[] = {
+ /* first ring is control+data ring */
+ MHI_EVENT_CONFIG_CTRL(0, 64),
+ /* SAHARA dedicated event ring */
+ MHI_EVENT_CONFIG_SW_DATA(1, 256),
+ /* Software channels dedicated event ring */
+ MHI_EVENT_CONFIG_SW_DATA(2, 64),
+ MHI_EVENT_CONFIG_SW_DATA(3, 256),
+ MHI_EVENT_CONFIG_SW_DATA(4, 256),
+ /* Software IP channels dedicated event ring */
+ MHI_EVENT_CONFIG_SW_DATA(5, 512),
+ MHI_EVENT_CONFIG_SW_DATA(6, 512),
+ MHI_EVENT_CONFIG_SW_DATA(7, 512),
+};
+
+static const struct mhi_controller_config mhi_qcom_qdu100_config = {
+ .max_channels = 128,
+ .timeout_ms = 120000,
+ .num_channels = ARRAY_SIZE(mhi_qcom_qdu100_channels),
+ .ch_cfg = mhi_qcom_qdu100_channels,
+ .num_events = ARRAY_SIZE(mhi_qcom_qdu100_events),
+ .event_cfg = mhi_qcom_qdu100_events,
+};
+
+static const struct mhi_pci_dev_info mhi_qcom_qdu100_info = {
+ .name = "qcom-qdu100",
+ .fw = "qcom/qdu100/xbl_s.melf",
+ .edl_trigger = true,
+ .config = &mhi_qcom_qdu100_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .vf_dma_data_width = 40,
+ .sideband_wake = false,
+ .no_m3 = true,
+ .reset_on_remove = true,
+};
+
+static const struct mhi_channel_config mhi_qcom_sa8775p_channels[] = {
+ MHI_CHANNEL_CONFIG_UL(46, "IP_SW0", 2048, 1),
+ MHI_CHANNEL_CONFIG_DL(47, "IP_SW0", 2048, 2),
+};
+
+static struct mhi_event_config mhi_qcom_sa8775p_events[] = {
+ /* first ring is control+data ring */
+ MHI_EVENT_CONFIG_CTRL(0, 64),
+ /* Software channels dedicated event ring */
+ MHI_EVENT_CONFIG_SW_DATA(1, 64),
+ MHI_EVENT_CONFIG_SW_DATA(2, 64),
+};
+
static const struct mhi_channel_config modem_qcom_v1_mhi_channels[] = {
MHI_CHANNEL_CONFIG_UL(4, "DIAG", 16, 1),
MHI_CHANNEL_CONFIG_DL(5, "DIAG", 16, 1),
@@ -275,6 +351,15 @@ static struct mhi_event_config modem_qcom_v1_mhi_events[] = {
MHI_EVENT_CONFIG_HW_DATA(5, 2048, 101)
};
+static const struct mhi_controller_config mhi_qcom_sa8775p_config = {
+ .max_channels = 128,
+ .timeout_ms = 8000,
+ .num_channels = ARRAY_SIZE(mhi_qcom_sa8775p_channels),
+ .ch_cfg = mhi_qcom_sa8775p_channels,
+ .num_events = ARRAY_SIZE(mhi_qcom_sa8775p_events),
+ .event_cfg = mhi_qcom_sa8775p_events,
+};
+
static const struct mhi_controller_config modem_qcom_v2_mhiv_config = {
.max_channels = 128,
.timeout_ms = 8000,
@@ -294,6 +379,16 @@ static const struct mhi_controller_config modem_qcom_v1_mhiv_config = {
.event_cfg = modem_qcom_v1_mhi_events,
};
+static const struct mhi_pci_dev_info mhi_qcom_sa8775p_info = {
+ .name = "qcom-sa8775p",
+ .edl_trigger = false,
+ .config = &mhi_qcom_sa8775p_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .mru_default = 32768,
+ .sideband_wake = false,
+};
+
static const struct mhi_pci_dev_info mhi_qcom_sdx75_info = {
.name = "qcom-sdx75m",
.fw = "qcom/sdx75m/xbl.elf",
@@ -406,6 +501,23 @@ static const struct mhi_channel_config mhi_foxconn_sdx55_channels[] = {
MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3),
};
+static const struct mhi_channel_config mhi_foxconn_sdx61_channels[] = {
+ MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 32, 0),
+ MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 1),
+ MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 1),
+ MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0),
+ MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
+ MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0),
+ MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0),
+ MHI_CHANNEL_CONFIG_UL(50, "NMEA", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(51, "NMEA", 32, 0),
+ MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2),
+ MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3),
+};
+
static struct mhi_event_config mhi_foxconn_sdx55_events[] = {
MHI_EVENT_CONFIG_CTRL(0, 128),
MHI_EVENT_CONFIG_DATA(1, 128),
@@ -422,6 +534,15 @@ static const struct mhi_controller_config modem_foxconn_sdx55_config = {
.event_cfg = mhi_foxconn_sdx55_events,
};
+static const struct mhi_controller_config modem_foxconn_sdx61_config = {
+ .max_channels = 128,
+ .timeout_ms = 20000,
+ .num_channels = ARRAY_SIZE(mhi_foxconn_sdx61_channels),
+ .ch_cfg = mhi_foxconn_sdx61_channels,
+ .num_events = ARRAY_SIZE(mhi_foxconn_sdx55_events),
+ .event_cfg = mhi_foxconn_sdx55_events,
+};
+
static const struct mhi_controller_config modem_foxconn_sdx72_config = {
.max_channels = 128,
.timeout_ms = 20000,
@@ -509,8 +630,8 @@ static const struct mhi_pci_dev_info mhi_foxconn_dw5932e_info = {
.sideband_wake = false,
};
-static const struct mhi_pci_dev_info mhi_foxconn_t99w515_info = {
- .name = "foxconn-t99w515",
+static const struct mhi_pci_dev_info mhi_foxconn_t99w640_info = {
+ .name = "foxconn-t99w640",
.edl = "qcom/sdx72m/foxconn/edl.mbn",
.edl_trigger = true,
.config = &modem_foxconn_sdx72_config,
@@ -531,6 +652,17 @@ static const struct mhi_pci_dev_info mhi_foxconn_dw5934e_info = {
.sideband_wake = false,
};
+static const struct mhi_pci_dev_info mhi_foxconn_t99w696_info = {
+ .name = "foxconn-t99w696",
+ .edl = "qcom/sdx61/foxconn/prog_firehose_lite.elf",
+ .edl_trigger = true,
+ .config = &modem_foxconn_sdx61_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .mru_default = 32768,
+ .sideband_wake = false,
+};
+
static const struct mhi_channel_config mhi_mv3x_channels[] = {
MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 64, 0),
MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 64, 0),
@@ -611,6 +743,7 @@ static const struct mhi_pci_dev_info mhi_sierra_em919x_info = {
.config = &modem_sierra_em919x_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32,
+ .mru_default = 32768,
.sideband_wake = false,
};
@@ -698,6 +831,52 @@ static const struct mhi_pci_dev_info mhi_telit_fe990a_info = {
.mru_default = 32768,
};
+static const struct mhi_channel_config mhi_telit_fn920c04_channels[] = {
+ MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0),
+ MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 32, 0),
+ MHI_CHANNEL_CONFIG_UL(4, "DIAG", 64, 1),
+ MHI_CHANNEL_CONFIG_DL(5, "DIAG", 64, 1),
+ MHI_CHANNEL_CONFIG_UL(14, "QMI", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(15, "QMI", 32, 0),
+ MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
+ MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0),
+ MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0),
+ MHI_CHANNEL_CONFIG_UL(92, "DUN2", 32, 1),
+ MHI_CHANNEL_CONFIG_DL(93, "DUN2", 32, 1),
+ MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 2),
+ MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 128, 3),
+};
+
+static const struct mhi_controller_config modem_telit_fn920c04_config = {
+ .max_channels = 128,
+ .timeout_ms = 50000,
+ .num_channels = ARRAY_SIZE(mhi_telit_fn920c04_channels),
+ .ch_cfg = mhi_telit_fn920c04_channels,
+ .num_events = ARRAY_SIZE(mhi_telit_fn990_events),
+ .event_cfg = mhi_telit_fn990_events,
+};
+
+static const struct mhi_pci_dev_info mhi_telit_fn920c04_info = {
+ .name = "telit-fn920c04",
+ .config = &modem_telit_fn920c04_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .sideband_wake = false,
+ .mru_default = 32768,
+ .edl_trigger = true,
+};
+
+static const struct mhi_pci_dev_info mhi_telit_fn990b40_info = {
+ .name = "telit-fn990b40",
+ .config = &modem_telit_fn920c04_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .sideband_wake = false,
+ .mru_default = 32768,
+ .edl_trigger = true,
+};
+
static const struct mhi_pci_dev_info mhi_netprisma_lcur57_info = {
.name = "netprisma-lcur57",
.edl = "qcom/prog_firehose_sdx24.mbn",
@@ -720,6 +899,11 @@ static const struct mhi_pci_dev_info mhi_netprisma_fcun69_info = {
/* Keep the list sorted based on the PID. New VID should be added as the last entry */
static const struct pci_device_id mhi_pci_id_table[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0116),
+ .driver_data = (kernel_ulong_t) &mhi_qcom_sa8775p_info },
+ /* Telit FN920C04 (sdx35) */
+ {PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x011a, 0x1c5d, 0x2020),
+ .driver_data = (kernel_ulong_t) &mhi_telit_fn920c04_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0304),
.driver_data = (kernel_ulong_t) &mhi_qcom_sdx24_info },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, PCI_VENDOR_ID_QCOM, 0x010c),
@@ -727,6 +911,9 @@ static const struct pci_device_id mhi_pci_id_table[] = {
/* EM919x (sdx55), use the same vid:pid as qcom-sdx55m */
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, 0x18d7, 0x0200),
.driver_data = (kernel_ulong_t) &mhi_sierra_em919x_info },
+ /* EM929x (sdx65), use the same configuration as EM919x */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x18d7, 0x0301),
+ .driver_data = (kernel_ulong_t) &mhi_sierra_em919x_info },
/* Telit FN980 hardware revision v1 */
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, 0x1C5D, 0x2000),
.driver_data = (kernel_ulong_t) &mhi_telit_fn980_hw_v1_info },
@@ -738,10 +925,19 @@ static const struct pci_device_id mhi_pci_id_table[] = {
/* Telit FE990A */
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2015),
.driver_data = (kernel_ulong_t) &mhi_telit_fe990a_info },
+ /* Foxconn T99W696, all variants */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, PCI_VENDOR_ID_FOXCONN, PCI_ANY_ID),
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w696_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0308),
.driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info },
+ /* Telit FN990B40 (sdx72) */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0309, 0x1c5d, 0x201a),
+ .driver_data = (kernel_ulong_t) &mhi_telit_fn990b40_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0309),
.driver_data = (kernel_ulong_t) &mhi_qcom_sdx75_info },
+ /* QDU100, x100-DU */
+ { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0601),
+ .driver_data = (kernel_ulong_t) &mhi_qcom_qdu100_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1001), /* EM120R-GL (sdx24) */
.driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1002), /* EM160R-GL (sdx24) */
@@ -792,9 +988,9 @@ static const struct pci_device_id mhi_pci_id_table[] = {
/* DW5932e (sdx62), Non-eSIM */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f9),
.driver_data = (kernel_ulong_t) &mhi_foxconn_dw5932e_info },
- /* T99W515 (sdx72) */
+ /* T99W640 (sdx72) */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe118),
- .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w515_info },
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w640_info },
/* DW5934e(sdx72), With eSIM */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe11d),
.driver_data = (kernel_ulong_t) &mhi_foxconn_dw5934e_info },
@@ -837,6 +1033,7 @@ struct mhi_pci_device {
struct work_struct recovery_work;
struct timer_list health_check_timer;
unsigned long status;
+ bool reset_on_remove;
};
static int mhi_pci_read_reg(struct mhi_controller *mhi_cntrl,
@@ -892,7 +1089,7 @@ static bool mhi_pci_is_alive(struct mhi_controller *mhi_cntrl)
struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
u16 vendor = 0;
- if (pci_read_config_word(pdev, PCI_VENDOR_ID, &vendor))
+ if (pci_read_config_word(pci_physfn(pdev), PCI_VENDOR_ID, &vendor))
return false;
if (vendor == (u16) ~0 || vendor == 0)
@@ -907,22 +1104,18 @@ static int mhi_pci_claim(struct mhi_controller *mhi_cntrl,
struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
int err;
- err = pci_assign_resource(pdev, bar_num);
- if (err)
- return err;
-
err = pcim_enable_device(pdev);
if (err) {
dev_err(&pdev->dev, "failed to enable pci device: %d\n", err);
return err;
}
- err = pcim_iomap_regions(pdev, 1 << bar_num, pci_name(pdev));
- if (err) {
+ mhi_cntrl->regs = pcim_iomap_region(pdev, bar_num, pci_name(pdev));
+ if (IS_ERR(mhi_cntrl->regs)) {
+ err = PTR_ERR(mhi_cntrl->regs);
dev_err(&pdev->dev, "failed to map pci region: %d\n", err);
return err;
}
- mhi_cntrl->regs = pcim_iomap_table(pdev)[bar_num];
mhi_cntrl->reg_len = pci_resource_len(pdev, bar_num);
err = dma_set_mask_and_coherent(&pdev->dev, dma_mask);
@@ -949,7 +1142,7 @@ static int mhi_pci_get_irqs(struct mhi_controller *mhi_cntrl,
*/
mhi_cntrl->nr_irqs = 1 + mhi_cntrl_config->num_events;
- nr_vectors = pci_alloc_irq_vectors(pdev, 1, mhi_cntrl->nr_irqs, PCI_IRQ_MSI);
+ nr_vectors = pci_alloc_irq_vectors(pdev, 1, mhi_cntrl->nr_irqs, PCI_IRQ_MSIX | PCI_IRQ_MSI);
if (nr_vectors < 0) {
dev_err(&pdev->dev, "Error allocating MSI vectors %d\n",
nr_vectors);
@@ -1007,7 +1200,9 @@ static void mhi_pci_recovery_work(struct work_struct *work)
dev_warn(&pdev->dev, "device recovery started\n");
- del_timer(&mhi_pdev->health_check_timer);
+ if (pdev->is_physfn)
+ timer_delete(&mhi_pdev->health_check_timer);
+
pm_runtime_forbid(&pdev->dev);
/* Clean up MHI state */
@@ -1034,19 +1229,24 @@ static void mhi_pci_recovery_work(struct work_struct *work)
dev_dbg(&pdev->dev, "Recovery completed\n");
set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status);
- mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
+
+ if (pdev->is_physfn)
+ mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
+
return;
err_unprepare:
mhi_unprepare_after_power_down(mhi_cntrl);
err_try_reset:
- if (pci_reset_function(pdev))
- dev_err(&pdev->dev, "Recovery failed\n");
+ err = pci_try_reset_function(pdev);
+ if (err)
+ dev_err(&pdev->dev, "Recovery failed: %d\n", err);
}
static void health_check(struct timer_list *t)
{
- struct mhi_pci_device *mhi_pdev = from_timer(mhi_pdev, t, health_check_timer);
+ struct mhi_pci_device *mhi_pdev = timer_container_of(mhi_pdev, t,
+ health_check_timer);
struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) ||
@@ -1103,6 +1303,7 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
const struct mhi_controller_config *mhi_cntrl_config;
struct mhi_pci_device *mhi_pdev;
struct mhi_controller *mhi_cntrl;
+ unsigned int dma_data_width;
int err;
dev_info(&pdev->dev, "MHI PCI device found: %s\n", info->name);
@@ -1113,14 +1314,24 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return -ENOMEM;
INIT_WORK(&mhi_pdev->recovery_work, mhi_pci_recovery_work);
- timer_setup(&mhi_pdev->health_check_timer, health_check, 0);
- mhi_cntrl_config = info->config;
+ if (pdev->is_virtfn && info->vf_config)
+ mhi_cntrl_config = info->vf_config;
+ else
+ mhi_cntrl_config = info->config;
+
+ /* Initialize health check monitor only for Physical functions */
+ if (pdev->is_physfn)
+ timer_setup(&mhi_pdev->health_check_timer, health_check, 0);
+
mhi_cntrl = &mhi_pdev->mhi_cntrl;
+ dma_data_width = (pdev->is_virtfn && info->vf_dma_data_width) ?
+ info->vf_dma_data_width : info->dma_data_width;
+
mhi_cntrl->cntrl_dev = &pdev->dev;
mhi_cntrl->iova_start = 0;
- mhi_cntrl->iova_stop = (dma_addr_t)DMA_BIT_MASK(info->dma_data_width);
+ mhi_cntrl->iova_stop = (dma_addr_t)DMA_BIT_MASK(dma_data_width);
mhi_cntrl->fw_image = info->fw;
mhi_cntrl->edl_image = info->edl;
@@ -1132,6 +1343,9 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mhi_cntrl->mru = info->mru_default;
mhi_cntrl->name = info->name;
+ if (pdev->is_physfn)
+ mhi_pdev->reset_on_remove = info->reset_on_remove;
+
if (info->edl_trigger)
mhi_cntrl->edl_trigger = mhi_pci_generic_edl_trigger;
@@ -1141,7 +1355,7 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mhi_cntrl->wake_toggle = mhi_pci_wake_toggle_nop;
}
- err = mhi_pci_claim(mhi_cntrl, info->bar_num, DMA_BIT_MASK(info->dma_data_width));
+ err = mhi_pci_claim(mhi_cntrl, info->bar_num, DMA_BIT_MASK(dma_data_width));
if (err)
return err;
@@ -1178,10 +1392,11 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status);
/* start health check */
- mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
+ if (pdev->is_physfn)
+ mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
- /* Only allow runtime-suspend if PME capable (for wakeup) */
- if (pci_pme_capable(pdev, PCI_D3hot)) {
+ /* Allow runtime suspend only if both PME from D3Hot and M3 are supported */
+ if (pci_pme_capable(pdev, PCI_D3hot) && !(info->no_m3)) {
pm_runtime_set_autosuspend_delay(&pdev->dev, 2000);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_mark_last_busy(&pdev->dev);
@@ -1203,7 +1418,10 @@ static void mhi_pci_remove(struct pci_dev *pdev)
struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
- del_timer_sync(&mhi_pdev->health_check_timer);
+ pci_disable_sriov(pdev);
+
+ if (pdev->is_physfn)
+ timer_delete_sync(&mhi_pdev->health_check_timer);
cancel_work_sync(&mhi_pdev->recovery_work);
if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
@@ -1215,6 +1433,9 @@ static void mhi_pci_remove(struct pci_dev *pdev)
if (pci_pme_capable(pdev, PCI_D3hot))
pm_runtime_get_noresume(&pdev->dev);
+ if (mhi_pdev->reset_on_remove)
+ mhi_soc_reset(mhi_cntrl);
+
mhi_unregister_controller(mhi_cntrl);
}
@@ -1231,7 +1452,8 @@ static void mhi_pci_reset_prepare(struct pci_dev *pdev)
dev_info(&pdev->dev, "reset\n");
- del_timer(&mhi_pdev->health_check_timer);
+ if (pdev->is_physfn)
+ timer_delete(&mhi_pdev->health_check_timer);
/* Clean up MHI state */
if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
@@ -1276,7 +1498,8 @@ static void mhi_pci_reset_done(struct pci_dev *pdev)
}
set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status);
- mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
+ if (pdev->is_physfn)
+ mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
}
static pci_ers_result_t mhi_pci_error_detected(struct pci_dev *pdev,
@@ -1341,7 +1564,9 @@ static int __maybe_unused mhi_pci_runtime_suspend(struct device *dev)
if (test_and_set_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status))
return 0;
- del_timer(&mhi_pdev->health_check_timer);
+ if (pdev->is_physfn)
+ timer_delete(&mhi_pdev->health_check_timer);
+
cancel_work_sync(&mhi_pdev->recovery_work);
if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) ||
@@ -1392,7 +1617,8 @@ static int __maybe_unused mhi_pci_runtime_resume(struct device *dev)
}
/* Resume health check */
- mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
+ if (pdev->is_physfn)
+ mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
/* It can be a remote wakeup (no mhi runtime_get), update access time */
pm_runtime_mark_last_busy(dev);
@@ -1478,7 +1704,8 @@ static struct pci_driver mhi_pci_driver = {
.remove = mhi_pci_remove,
.shutdown = mhi_pci_shutdown,
.err_handler = &mhi_pci_err_handler,
- .driver.pm = &mhi_pci_pm_ops
+ .driver.pm = &mhi_pci_pm_ops,
+ .sriov_configure = pci_sriov_configure_simple,
};
module_pci_driver(mhi_pci_driver);
diff --git a/drivers/bus/mhi/host/pm.c b/drivers/bus/mhi/host/pm.c
index 11c0e751f223..b4ef115189b5 100644
--- a/drivers/bus/mhi/host/pm.c
+++ b/drivers/bus/mhi/host/pm.c
@@ -418,6 +418,7 @@ static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl)
device_for_each_child(&mhi_cntrl->mhi_dev->dev, &current_ee,
mhi_destroy_device);
mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_MISSION_MODE);
+ mhi_uevent_notify(mhi_cntrl, mhi_cntrl->ee);
/* Force MHI to be in M0 state before continuing */
ret = __mhi_device_get_sync(mhi_cntrl);
@@ -602,6 +603,7 @@ static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl)
struct mhi_cmd *mhi_cmd;
struct mhi_event_ctxt *er_ctxt;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ bool reset_device = false;
int ret, i;
dev_dbg(dev, "Transitioning from PM state: %s to: %s\n",
@@ -630,8 +632,25 @@ static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl)
/* Wake up threads waiting for state transition */
wake_up_all(&mhi_cntrl->state_event);
- /* Trigger MHI RESET so that the device will not access host memory */
+ mhi_uevent_notify(mhi_cntrl, mhi_cntrl->ee);
+
if (MHI_REG_ACCESS_VALID(prev_state)) {
+ /*
+ * If the device is in PBL or SBL, it will only respond to
+ * RESET if the device is in SYSERR state. SYSERR might
+ * already be cleared at this point.
+ */
+ enum mhi_state cur_state = mhi_get_mhi_state(mhi_cntrl);
+ enum mhi_ee_type cur_ee = mhi_get_exec_env(mhi_cntrl);
+
+ if (cur_state == MHI_STATE_SYS_ERR)
+ reset_device = true;
+ else if (cur_ee != MHI_EE_PBL && cur_ee != MHI_EE_SBL)
+ reset_device = true;
+ }
+
+ /* Trigger MHI RESET so that the device will not access host memory */
+ if (reset_device) {
u32 in_reset = -1;
unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms);
@@ -813,6 +832,8 @@ void mhi_pm_st_worker(struct work_struct *work)
mhi_create_devices(mhi_cntrl);
if (mhi_cntrl->fbc_download)
mhi_download_amss_image(mhi_cntrl);
+
+ mhi_uevent_notify(mhi_cntrl, mhi_cntrl->ee);
break;
case DEV_ST_TRANSITION_MISSION_MODE:
mhi_pm_mission_mode_transition(mhi_cntrl);
@@ -822,6 +843,7 @@ void mhi_pm_st_worker(struct work_struct *work)
mhi_cntrl->ee = MHI_EE_FP;
write_unlock_irq(&mhi_cntrl->pm_lock);
mhi_create_devices(mhi_cntrl);
+ mhi_uevent_notify(mhi_cntrl, mhi_cntrl->ee);
break;
case DEV_ST_TRANSITION_READY:
mhi_ready_state_transition(mhi_cntrl);
@@ -1224,6 +1246,8 @@ static void __mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful,
write_unlock_irq(&mhi_cntrl->pm_lock);
mutex_unlock(&mhi_cntrl->pm_mutex);
+ mhi_uevent_notify(mhi_cntrl, mhi_cntrl->ee);
+
if (destroy_device)
mhi_queue_state_transition(mhi_cntrl,
DEV_ST_TRANSITION_DISABLE_DESTROY_DEVICE);
@@ -1263,7 +1287,7 @@ int mhi_sync_power_up(struct mhi_controller *mhi_cntrl)
mhi_cntrl->ready_timeout_ms : mhi_cntrl->timeout_ms;
wait_event_timeout(mhi_cntrl->state_event,
MHI_IN_MISSION_MODE(mhi_cntrl->ee) ||
- MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
+ MHI_PM_FATAL_ERROR(mhi_cntrl->pm_state),
msecs_to_jiffies(timeout_ms));
ret = (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -ETIMEDOUT;
@@ -1296,20 +1320,6 @@ int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl)
}
EXPORT_SYMBOL_GPL(mhi_force_rddm_mode);
-void mhi_device_get(struct mhi_device *mhi_dev)
-{
- struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
-
- mhi_dev->dev_wake++;
- read_lock_bh(&mhi_cntrl->pm_lock);
- if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
- mhi_trigger_resume(mhi_cntrl);
-
- mhi_cntrl->wake_get(mhi_cntrl, true);
- read_unlock_bh(&mhi_cntrl->pm_lock);
-}
-EXPORT_SYMBOL_GPL(mhi_device_get);
-
int mhi_device_get_sync(struct mhi_device *mhi_dev)
{
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
@@ -1336,3 +1346,22 @@ void mhi_device_put(struct mhi_device *mhi_dev)
read_unlock_bh(&mhi_cntrl->pm_lock);
}
EXPORT_SYMBOL_GPL(mhi_device_put);
+
+void mhi_uevent_notify(struct mhi_controller *mhi_cntrl, enum mhi_ee_type ee)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ char *buf[2];
+ int ret;
+
+ buf[0] = kasprintf(GFP_KERNEL, "EXEC_ENV=%s", TO_MHI_EXEC_STR(ee));
+ buf[1] = NULL;
+
+ if (!buf[0])
+ return;
+
+ ret = kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, buf);
+ if (ret)
+ dev_err(dev, "Failed to send %s uevent\n", TO_MHI_EXEC_STR(ee));
+
+ kfree(buf[0]);
+}
diff --git a/drivers/bus/mhi/host/trace.h b/drivers/bus/mhi/host/trace.h
index 95613c8ebe06..3e0c41777429 100644
--- a/drivers/bus/mhi/host/trace.h
+++ b/drivers/bus/mhi/host/trace.h
@@ -9,6 +9,7 @@
#if !defined(_TRACE_EVENT_MHI_HOST_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_EVENT_MHI_HOST_H
+#include <linux/byteorder/generic.h>
#include <linux/tracepoint.h>
#include <linux/trace_seq.h>
#include "../common.h"
@@ -97,18 +98,18 @@ TRACE_EVENT(mhi_gen_tre,
__string(name, mhi_cntrl->mhi_dev->name)
__field(int, ch_num)
__field(void *, wp)
- __field(__le64, tre_ptr)
- __field(__le32, dword0)
- __field(__le32, dword1)
+ __field(uint64_t, tre_ptr)
+ __field(uint32_t, dword0)
+ __field(uint32_t, dword1)
),
TP_fast_assign(
__assign_str(name);
__entry->ch_num = mhi_chan->chan;
__entry->wp = mhi_tre;
- __entry->tre_ptr = mhi_tre->ptr;
- __entry->dword0 = mhi_tre->dword[0];
- __entry->dword1 = mhi_tre->dword[1];
+ __entry->tre_ptr = le64_to_cpu(mhi_tre->ptr);
+ __entry->dword0 = le32_to_cpu(mhi_tre->dword[0]);
+ __entry->dword1 = le32_to_cpu(mhi_tre->dword[1]);
),
TP_printk("%s: Chan: %d TRE: 0x%p TRE buf: 0x%llx DWORD0: 0x%08x DWORD1: 0x%08x\n",
@@ -176,19 +177,19 @@ DECLARE_EVENT_CLASS(mhi_process_event_ring,
TP_STRUCT__entry(
__string(name, mhi_cntrl->mhi_dev->name)
- __field(__le32, dword0)
- __field(__le32, dword1)
+ __field(uint32_t, dword0)
+ __field(uint32_t, dword1)
__field(int, state)
- __field(__le64, ptr)
+ __field(uint64_t, ptr)
__field(void *, rp)
),
TP_fast_assign(
__assign_str(name);
__entry->rp = rp;
- __entry->ptr = rp->ptr;
- __entry->dword0 = rp->dword[0];
- __entry->dword1 = rp->dword[1];
+ __entry->ptr = le64_to_cpu(rp->ptr);
+ __entry->dword0 = le32_to_cpu(rp->dword[0]);
+ __entry->dword1 = le32_to_cpu(rp->dword[1]);
__entry->state = MHI_TRE_GET_EV_STATE(rp);
),
diff --git a/drivers/bus/moxtet.c b/drivers/bus/moxtet.c
index 6276551d7968..7ce61d629a87 100644
--- a/drivers/bus/moxtet.c
+++ b/drivers/bus/moxtet.c
@@ -657,7 +657,7 @@ static void moxtet_irq_print_chip(struct irq_data *d, struct seq_file *p)
id = moxtet->modules[pos->idx];
- seq_printf(p, " moxtet-%s.%i#%i", mox_module_name(id), pos->idx,
+ seq_printf(p, "moxtet-%s.%i#%i", mox_module_name(id), pos->idx,
pos->bit);
}
@@ -737,9 +737,8 @@ static int moxtet_irq_setup(struct moxtet *moxtet)
{
int i, ret;
- moxtet->irq.domain = irq_domain_add_simple(moxtet->dev->of_node,
- MOXTET_NIRQS, 0,
- &moxtet_irq_domain, moxtet);
+ moxtet->irq.domain = irq_domain_create_simple(dev_fwnode(moxtet->dev), MOXTET_NIRQS, 0,
+ &moxtet_irq_domain, moxtet);
if (moxtet->irq.domain == NULL) {
dev_err(moxtet->dev, "Could not add IRQ domain\n");
return -ENOMEM;
diff --git a/drivers/bus/omap-ocp2scp.c b/drivers/bus/omap-ocp2scp.c
index 7d7479ba0a75..e4dfda7b3b10 100644
--- a/drivers/bus/omap-ocp2scp.c
+++ b/drivers/bus/omap-ocp2scp.c
@@ -101,7 +101,7 @@ MODULE_DEVICE_TABLE(of, omap_ocp2scp_id_table);
static struct platform_driver omap_ocp2scp_driver = {
.probe = omap_ocp2scp_probe,
- .remove_new = omap_ocp2scp_remove,
+ .remove = omap_ocp2scp_remove,
.driver = {
.name = "omap-ocp2scp",
.of_match_table = of_match_ptr(omap_ocp2scp_id_table),
diff --git a/drivers/bus/omap_l3_smx.c b/drivers/bus/omap_l3_smx.c
index ee6d29925e4d..7f0a8f8b3f4c 100644
--- a/drivers/bus/omap_l3_smx.c
+++ b/drivers/bus/omap_l3_smx.c
@@ -273,7 +273,7 @@ static void omap3_l3_remove(struct platform_device *pdev)
static struct platform_driver omap3_l3_driver = {
.probe = omap3_l3_probe,
- .remove_new = omap3_l3_remove,
+ .remove = omap3_l3_remove,
.driver = {
.name = "omap_l3_smx",
.of_match_table = of_match_ptr(omap3_l3_match),
diff --git a/drivers/bus/qcom-ssc-block-bus.c b/drivers/bus/qcom-ssc-block-bus.c
index 5931974a21fa..7f5fd4e0940d 100644
--- a/drivers/bus/qcom-ssc-block-bus.c
+++ b/drivers/bus/qcom-ssc-block-bus.c
@@ -264,18 +264,6 @@ static int qcom_ssc_block_bus_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, data);
- data->pd_names = qcom_ssc_block_pd_names;
- data->num_pds = ARRAY_SIZE(qcom_ssc_block_pd_names);
-
- /* power domains */
- ret = qcom_ssc_block_bus_pds_attach(&pdev->dev, data->pds, data->pd_names, data->num_pds);
- if (ret < 0)
- return dev_err_probe(&pdev->dev, ret, "error when attaching power domains\n");
-
- ret = qcom_ssc_block_bus_pds_enable(data->pds, data->num_pds);
- if (ret < 0)
- return dev_err_probe(&pdev->dev, ret, "error when enabling power domains\n");
-
/* low level overrides for when the HW logic doesn't "just work" */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mpm_sscaon_config0");
data->reg_mpm_sscaon_config0 = devm_ioremap_resource(&pdev->dev, res);
@@ -343,11 +331,30 @@ static int qcom_ssc_block_bus_probe(struct platform_device *pdev)
data->ssc_axi_halt = halt_args.args[0];
+ /* power domains */
+ data->pd_names = qcom_ssc_block_pd_names;
+ data->num_pds = ARRAY_SIZE(qcom_ssc_block_pd_names);
+
+ ret = qcom_ssc_block_bus_pds_attach(&pdev->dev, data->pds, data->pd_names, data->num_pds);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "error when attaching power domains\n");
+
+ ret = qcom_ssc_block_bus_pds_enable(data->pds, data->num_pds);
+ if (ret < 0) {
+ dev_err_probe(&pdev->dev, ret, "error when enabling power domains\n");
+ goto err_detach_pds_bus;
+ }
+
qcom_ssc_block_bus_init(&pdev->dev);
of_platform_populate(np, NULL, NULL, &pdev->dev);
return 0;
+
+err_detach_pds_bus:
+ qcom_ssc_block_bus_pds_detach(&pdev->dev, data->pds, data->num_pds);
+
+ return ret;
}
static void qcom_ssc_block_bus_remove(struct platform_device *pdev)
@@ -356,9 +363,6 @@ static void qcom_ssc_block_bus_remove(struct platform_device *pdev)
qcom_ssc_block_bus_deinit(&pdev->dev);
- iounmap(data->reg_mpm_sscaon_config0);
- iounmap(data->reg_mpm_sscaon_config1);
-
qcom_ssc_block_bus_pds_disable(data->pds, data->num_pds);
qcom_ssc_block_bus_pds_detach(&pdev->dev, data->pds, data->num_pds);
pm_runtime_disable(&pdev->dev);
@@ -373,7 +377,7 @@ MODULE_DEVICE_TABLE(of, qcom_ssc_block_bus_of_match);
static struct platform_driver qcom_ssc_block_bus_driver = {
.probe = qcom_ssc_block_bus_probe,
- .remove_new = qcom_ssc_block_bus_remove,
+ .remove = qcom_ssc_block_bus_remove,
.driver = {
.name = "qcom-ssc-block-bus",
.of_match_table = qcom_ssc_block_bus_of_match,
diff --git a/drivers/bus/simple-pm-bus.c b/drivers/bus/simple-pm-bus.c
index 50870c827889..d8e029e7e53f 100644
--- a/drivers/bus/simple-pm-bus.c
+++ b/drivers/bus/simple-pm-bus.c
@@ -109,9 +109,29 @@ static int simple_pm_bus_runtime_resume(struct device *dev)
return 0;
}
+static int simple_pm_bus_suspend(struct device *dev)
+{
+ struct simple_pm_bus *bus = dev_get_drvdata(dev);
+
+ if (!bus)
+ return 0;
+
+ return pm_runtime_force_suspend(dev);
+}
+
+static int simple_pm_bus_resume(struct device *dev)
+{
+ struct simple_pm_bus *bus = dev_get_drvdata(dev);
+
+ if (!bus)
+ return 0;
+
+ return pm_runtime_force_resume(dev);
+}
+
static const struct dev_pm_ops simple_pm_bus_pm_ops = {
RUNTIME_PM_OPS(simple_pm_bus_runtime_suspend, simple_pm_bus_runtime_resume, NULL)
- NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(simple_pm_bus_suspend, simple_pm_bus_resume)
};
#define ONLY_BUS ((void *) 1) /* Match if the device is only a bus. */
@@ -128,7 +148,7 @@ MODULE_DEVICE_TABLE(of, simple_pm_bus_of_match);
static struct platform_driver simple_pm_bus_driver = {
.probe = simple_pm_bus_probe,
- .remove_new = simple_pm_bus_remove,
+ .remove = simple_pm_bus_remove,
.driver = {
.name = "simple-pm-bus",
.of_match_table = simple_pm_bus_of_match,
diff --git a/drivers/bus/sun50i-de2.c b/drivers/bus/sun50i-de2.c
index 3339311ce068..dfe588179aca 100644
--- a/drivers/bus/sun50i-de2.c
+++ b/drivers/bus/sun50i-de2.c
@@ -36,7 +36,7 @@ static const struct of_device_id sun50i_de2_bus_of_match[] = {
static struct platform_driver sun50i_de2_bus_driver = {
.probe = sun50i_de2_bus_probe,
- .remove_new = sun50i_de2_bus_remove,
+ .remove = sun50i_de2_bus_remove,
.driver = {
.name = "sun50i-de2-bus",
.of_match_table = sun50i_de2_bus_of_match,
diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c
index a89d78925637..7a33c3b31d1e 100644
--- a/drivers/bus/sunxi-rsb.c
+++ b/drivers/bus/sunxi-rsb.c
@@ -832,7 +832,7 @@ MODULE_DEVICE_TABLE(of, sunxi_rsb_of_match_table);
static struct platform_driver sunxi_rsb_driver = {
.probe = sunxi_rsb_probe,
- .remove_new = sunxi_rsb_remove,
+ .remove = sunxi_rsb_remove,
.driver = {
.name = RSB_CTRL_NAME,
.of_match_table = sunxi_rsb_of_match_table,
diff --git a/drivers/bus/tegra-aconnect.c b/drivers/bus/tegra-aconnect.c
index de80008bff92..90e3b0a10816 100644
--- a/drivers/bus/tegra-aconnect.c
+++ b/drivers/bus/tegra-aconnect.c
@@ -104,7 +104,7 @@ MODULE_DEVICE_TABLE(of, tegra_aconnect_of_match);
static struct platform_driver tegra_aconnect_driver = {
.probe = tegra_aconnect_probe,
- .remove_new = tegra_aconnect_remove,
+ .remove = tegra_aconnect_remove,
.driver = {
.name = "tegra-aconnect",
.of_match_table = tegra_aconnect_of_match,
diff --git a/drivers/bus/tegra-gmi.c b/drivers/bus/tegra-gmi.c
index f5d6414df9f2..9c09141961d8 100644
--- a/drivers/bus/tegra-gmi.c
+++ b/drivers/bus/tegra-gmi.c
@@ -303,7 +303,7 @@ MODULE_DEVICE_TABLE(of, tegra_gmi_id_table);
static struct platform_driver tegra_gmi_driver = {
.probe = tegra_gmi_probe,
- .remove_new = tegra_gmi_remove,
+ .remove = tegra_gmi_remove,
.driver = {
.name = "tegra-gmi",
.of_match_table = tegra_gmi_id_table,
diff --git a/drivers/bus/ti-pwmss.c b/drivers/bus/ti-pwmss.c
index 4969c556e752..1f2cab91e438 100644
--- a/drivers/bus/ti-pwmss.c
+++ b/drivers/bus/ti-pwmss.c
@@ -44,7 +44,7 @@ static struct platform_driver pwmss_driver = {
.of_match_table = pwmss_of_match,
},
.probe = pwmss_probe,
- .remove_new = pwmss_remove,
+ .remove = pwmss_remove,
};
module_platform_driver(pwmss_driver);
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index 270a94a06e05..5566ad11399e 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -677,51 +677,6 @@ static int sysc_parse_and_check_child_range(struct sysc *ddata)
return 0;
}
-/* Interconnect instances to probe before l4_per instances */
-static struct resource early_bus_ranges[] = {
- /* am3/4 l4_wkup */
- { .start = 0x44c00000, .end = 0x44c00000 + 0x300000, },
- /* omap4/5 and dra7 l4_cfg */
- { .start = 0x4a000000, .end = 0x4a000000 + 0x300000, },
- /* omap4 l4_wkup */
- { .start = 0x4a300000, .end = 0x4a300000 + 0x30000, },
- /* omap5 and dra7 l4_wkup without dra7 dcan segment */
- { .start = 0x4ae00000, .end = 0x4ae00000 + 0x30000, },
-};
-
-static atomic_t sysc_defer = ATOMIC_INIT(10);
-
-/**
- * sysc_defer_non_critical - defer non_critical interconnect probing
- * @ddata: device driver data
- *
- * We want to probe l4_cfg and l4_wkup interconnect instances before any
- * l4_per instances as l4_per instances depend on resources on l4_cfg and
- * l4_wkup interconnects.
- */
-static int sysc_defer_non_critical(struct sysc *ddata)
-{
- struct resource *res;
- int i;
-
- if (!atomic_read(&sysc_defer))
- return 0;
-
- for (i = 0; i < ARRAY_SIZE(early_bus_ranges); i++) {
- res = &early_bus_ranges[i];
- if (ddata->module_pa >= res->start &&
- ddata->module_pa <= res->end) {
- atomic_set(&sysc_defer, 0);
-
- return 0;
- }
- }
-
- atomic_dec_if_positive(&sysc_defer);
-
- return -EPROBE_DEFER;
-}
-
static struct device_node *stdout_path;
static void sysc_init_stdout_path(struct sysc *ddata)
@@ -947,10 +902,6 @@ static int sysc_map_and_check_registers(struct sysc *ddata)
if (error)
return error;
- error = sysc_defer_non_critical(ddata);
- if (error)
- return error;
-
sysc_check_children(ddata);
if (!of_property_present(np, "reg"))
@@ -2036,6 +1987,21 @@ static void sysc_module_disable_quirk_pruss(struct sysc *ddata)
sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
}
+static void sysc_module_enable_quirk_pruss(struct sysc *ddata)
+{
+ u32 reg;
+
+ reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
+
+ /*
+ * Clearing the SYSC_PRUSS_STANDBY_INIT bit - Updates OCP master
+ * port configuration to enable memory access outside of the
+ * PRU-ICSS subsystem.
+ */
+ reg &= (~SYSC_PRUSS_STANDBY_INIT);
+ sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
+}
+
static void sysc_init_module_quirks(struct sysc *ddata)
{
if (ddata->legacy_mode || !ddata->name)
@@ -2088,8 +2054,10 @@ static void sysc_init_module_quirks(struct sysc *ddata)
ddata->module_disable_quirk = sysc_reset_done_quirk_wdt;
}
- if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_PRUSS)
+ if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_PRUSS) {
+ ddata->module_enable_quirk = sysc_module_enable_quirk_pruss;
ddata->module_disable_quirk = sysc_module_disable_quirk_pruss;
+ }
}
static int sysc_clockdomain_init(struct sysc *ddata)
@@ -2202,9 +2170,8 @@ static int sysc_reset(struct sysc *ddata)
static int sysc_init_module(struct sysc *ddata)
{
bool rstctrl_deasserted = false;
- int error = 0;
+ int error = sysc_clockdomain_init(ddata);
- error = sysc_clockdomain_init(ddata);
if (error)
return error;
@@ -3345,7 +3312,7 @@ MODULE_DEVICE_TABLE(of, sysc_match);
static struct platform_driver sysc_driver = {
.probe = sysc_probe,
- .remove_new = sysc_remove,
+ .remove = sysc_remove,
.driver = {
.name = "ti-sysc",
.of_match_table = sysc_match,
diff --git a/drivers/bus/ts-nbus.c b/drivers/bus/ts-nbus.c
index b8af44c5cdbd..2328c48b9b12 100644
--- a/drivers/bus/ts-nbus.c
+++ b/drivers/bus/ts-nbus.c
@@ -336,7 +336,7 @@ MODULE_DEVICE_TABLE(of, ts_nbus_of_match);
static struct platform_driver ts_nbus_driver = {
.probe = ts_nbus_probe,
- .remove_new = ts_nbus_remove,
+ .remove = ts_nbus_remove,
.driver = {
.name = "ts_nbus",
.of_match_table = ts_nbus_of_match,
diff --git a/drivers/cache/sifive_ccache.c b/drivers/cache/sifive_ccache.c
index 6874b72ec59d..a86800b123b9 100644
--- a/drivers/cache/sifive_ccache.c
+++ b/drivers/cache/sifive_ccache.c
@@ -118,6 +118,8 @@ static void ccache_config_read(void)
}
static const struct of_device_id sifive_ccache_ids[] = {
+ { .compatible = "eswin,eic7700-l3-cache",
+ .data = (void *)(QUIRK_NONSTANDARD_CACHE_OPS) },
{ .compatible = "sifive,fu540-c000-ccache" },
{ .compatible = "sifive,fu740-c000-ccache" },
{ .compatible = "starfive,jh7100-ccache",
@@ -149,16 +151,16 @@ static void ccache_flush_range(phys_addr_t start, size_t len)
if (!len)
return;
- mb();
+ mb(); /* complete earlier memory accesses before the cache flush */
for (line = ALIGN_DOWN(start, SIFIVE_CCACHE_LINE_SIZE); line < end;
line += SIFIVE_CCACHE_LINE_SIZE) {
#ifdef CONFIG_32BIT
- writel(line >> 4, ccache_base + SIFIVE_CCACHE_FLUSH32);
+ writel_relaxed(line >> 4, ccache_base + SIFIVE_CCACHE_FLUSH32);
#else
- writeq(line, ccache_base + SIFIVE_CCACHE_FLUSH64);
+ writeq_relaxed(line, ccache_base + SIFIVE_CCACHE_FLUSH64);
#endif
- mb();
}
+ mb(); /* issue later memory accesses after the cache flush */
}
static const struct riscv_nonstd_cache_ops ccache_mgmt_ops __initconst = {
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 6a99a459b80b..31ba1f8c1f78 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -624,9 +624,6 @@ int register_cdrom(struct gendisk *disk, struct cdrom_device_info *cdi)
if (check_media_type == 1)
cdi->options |= (int) CDO_CHECK_TYPE;
- if (CDROM_CAN(CDC_MRW_W))
- cdi->exit = cdrom_mrw_exit;
-
if (cdi->ops->read_cdda_bpc)
cdi->cdda_method = CDDA_BPC_FULL;
else
@@ -651,9 +648,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
list_del(&cdi->list);
mutex_unlock(&cdrom_mutex);
- if (cdi->exit)
- cdi->exit(cdi);
-
cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
}
EXPORT_SYMBOL(unregister_cdrom);
@@ -1106,7 +1100,7 @@ int open_for_data(struct cdrom_device_info *cdi)
}
}
- cd_dbg(CD_OPEN, "all seems well, opening the devicen");
+ cd_dbg(CD_OPEN, "all seems well, opening the device\n");
/* all seems well, we can open the device */
ret = cdo->open(cdi, 0); /* open for data */
@@ -1264,6 +1258,8 @@ void cdrom_release(struct cdrom_device_info *cdi)
cd_dbg(CD_CLOSE, "Use count for \"/dev/%s\" now zero\n",
cdi->name);
cdrom_dvd_rw_close_write(cdi);
+ if (CDROM_CAN(CDC_MRW_W))
+ cdrom_mrw_exit(cdi);
if ((cdo->capability & CDC_LOCK) && !cdi->keeplocked) {
cd_dbg(CD_CLOSE, "Unlocking door!\n");
@@ -3612,7 +3608,7 @@ static int cdrom_sysctl_handler(const struct ctl_table *ctl, int write,
}
/* Place files in /proc/sys/dev/cdrom */
-static struct ctl_table cdrom_table[] = {
+static const struct ctl_table cdrom_table[] = {
{
.procname = "info",
.data = &cdrom_sysctl_settings.info,
@@ -3677,8 +3673,7 @@ static void cdrom_sysctl_register(void)
static void cdrom_sysctl_unregister(void)
{
- if (cdrom_sysctl_header)
- unregister_sysctl_table(cdrom_sysctl_header);
+ unregister_sysctl_table(cdrom_sysctl_header);
}
#else /* CONFIG_SYSCTL */
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index 71cfe7a85913..85aceab5eac6 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -777,7 +777,7 @@ static int probe_gdrom(struct platform_device *devptr)
probe_gdrom_setupcd();
err = blk_mq_alloc_sq_tag_set(&gd.tag_set, &gdrom_mq_ops, 1,
- BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING);
+ BLK_MQ_F_BLOCKING);
if (err)
goto probe_fail_free_cd_info;
@@ -847,7 +847,7 @@ static void remove_gdrom(struct platform_device *devptr)
static struct platform_driver gdrom_driver = {
.probe = probe_gdrom,
- .remove_new = remove_gdrom,
+ .remove = remove_gdrom,
.driver = {
.name = GDROM_DEV_NAME,
},
diff --git a/drivers/cdx/Kconfig b/drivers/cdx/Kconfig
index a08958485e31..1f1e360507d7 100644
--- a/drivers/cdx/Kconfig
+++ b/drivers/cdx/Kconfig
@@ -7,7 +7,7 @@
config CDX_BUS
bool "CDX Bus driver"
- depends on OF && ARM64
+ depends on OF && ARM64 || COMPILE_TEST
help
Driver to enable Composable DMA Transfer(CDX) Bus. CDX bus
exposes Fabric devices which uses composable DMA IP to the
diff --git a/drivers/cdx/Makefile b/drivers/cdx/Makefile
index 749a3295c2bd..3ca7068a3052 100644
--- a/drivers/cdx/Makefile
+++ b/drivers/cdx/Makefile
@@ -5,7 +5,7 @@
# Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
#
-ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=CDX_BUS
+ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE='"CDX_BUS"'
obj-$(CONFIG_CDX_BUS) += cdx.o controller/
diff --git a/drivers/cdx/cdx.c b/drivers/cdx/cdx.c
index 07371cb653d3..3d50f8cd9c0b 100644
--- a/drivers/cdx/cdx.c
+++ b/drivers/cdx/cdx.c
@@ -310,7 +310,7 @@ static int cdx_probe(struct device *dev)
* Setup MSI device data so that generic MSI alloc/free can
* be used by the device driver.
*/
- if (cdx->msi_domain) {
+ if (IS_ENABLED(CONFIG_GENERIC_MSI_IRQ) && cdx->msi_domain) {
error = msi_setup_device_data(&cdx_dev->dev);
if (error)
return error;
@@ -338,7 +338,10 @@ static void cdx_shutdown(struct device *dev)
{
struct cdx_driver *cdx_drv = to_cdx_driver(dev->driver);
struct cdx_device *cdx_dev = to_cdx_device(dev);
+ struct cdx_controller *cdx = cdx_dev->cdx;
+ if (cdx_dev->is_bus && cdx_dev->enabled && cdx->ops->bus_disable)
+ cdx->ops->bus_disable(cdx, cdx_dev->bus_num);
if (cdx_drv && cdx_drv->shutdown)
cdx_drv->shutdown(cdx_dev);
}
@@ -357,7 +360,8 @@ static int cdx_dma_configure(struct device *dev)
return ret;
}
- if (!ret && !cdx_drv->driver_managed_dma) {
+ /* @cdx_drv may not be valid when we're called from the IOMMU layer */
+ if (!ret && dev->driver && !cdx_drv->driver_managed_dma) {
ret = iommu_device_use_default_domain(dev);
if (ret)
arch_teardown_dma_ops(dev);
@@ -470,8 +474,12 @@ static ssize_t driver_override_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cdx_device *cdx_dev = to_cdx_device(dev);
+ ssize_t len;
- return sysfs_emit(buf, "%s\n", cdx_dev->driver_override);
+ device_lock(dev);
+ len = sysfs_emit(buf, "%s\n", cdx_dev->driver_override);
+ device_unlock(dev);
+ return len;
}
static DEVICE_ATTR_RW(driver_override);
@@ -707,7 +715,7 @@ static const struct vm_operations_struct cdx_phys_vm_ops = {
* Return: true on success, false otherwise.
*/
static int cdx_mmap_resource(struct file *fp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
struct vm_area_struct *vma)
{
struct cdx_device *cdx_dev = to_cdx_device(kobj_to_dev(kobj));
@@ -825,7 +833,7 @@ int cdx_device_add(struct cdx_dev_params *dev_params)
((cdx->id << CDX_CONTROLLER_ID_SHIFT) | (cdx_dev->bus_num & CDX_BUS_NUM_MASK)),
cdx_dev->dev_num);
- if (cdx->msi_domain) {
+ if (IS_ENABLED(CONFIG_GENERIC_MSI_IRQ) && cdx->msi_domain) {
cdx_dev->num_msi = dev_params->num_msi;
dev_set_msi_domain(&cdx_dev->dev, cdx->msi_domain);
}
@@ -868,7 +876,7 @@ fail:
return ret;
}
-EXPORT_SYMBOL_NS_GPL(cdx_device_add, CDX_BUS_CONTROLLER);
+EXPORT_SYMBOL_NS_GPL(cdx_device_add, "CDX_BUS_CONTROLLER");
struct device *cdx_bus_add(struct cdx_controller *cdx, u8 bus_num)
{
@@ -915,7 +923,7 @@ device_add_fail:
return NULL;
}
-EXPORT_SYMBOL_NS_GPL(cdx_bus_add, CDX_BUS_CONTROLLER);
+EXPORT_SYMBOL_NS_GPL(cdx_bus_add, "CDX_BUS_CONTROLLER");
int cdx_register_controller(struct cdx_controller *cdx)
{
@@ -940,7 +948,7 @@ int cdx_register_controller(struct cdx_controller *cdx)
return 0;
}
-EXPORT_SYMBOL_NS_GPL(cdx_register_controller, CDX_BUS_CONTROLLER);
+EXPORT_SYMBOL_NS_GPL(cdx_register_controller, "CDX_BUS_CONTROLLER");
void cdx_unregister_controller(struct cdx_controller *cdx)
{
@@ -955,7 +963,7 @@ void cdx_unregister_controller(struct cdx_controller *cdx)
mutex_unlock(&cdx_controller_lock);
}
-EXPORT_SYMBOL_NS_GPL(cdx_unregister_controller, CDX_BUS_CONTROLLER);
+EXPORT_SYMBOL_NS_GPL(cdx_unregister_controller, "CDX_BUS_CONTROLLER");
static int __init cdx_bus_init(void)
{
diff --git a/drivers/cdx/cdx_msi.c b/drivers/cdx/cdx_msi.c
index e55f1716cfcb..91b95422b263 100644
--- a/drivers/cdx/cdx_msi.c
+++ b/drivers/cdx/cdx_msi.c
@@ -165,7 +165,7 @@ struct irq_domain *cdx_msi_domain_init(struct device *dev)
struct device_node *parent_node;
struct irq_domain *parent;
- fwnode_handle = of_node_to_fwnode(np);
+ fwnode_handle = of_fwnode_handle(np);
parent_node = of_parse_phandle(np, "msi-map", 1);
if (!parent_node) {
@@ -173,7 +173,8 @@ struct irq_domain *cdx_msi_domain_init(struct device *dev)
return NULL;
}
- parent = irq_find_matching_fwnode(of_node_to_fwnode(parent_node), DOMAIN_BUS_NEXUS);
+ parent = irq_find_matching_fwnode(of_fwnode_handle(parent_node), DOMAIN_BUS_NEXUS);
+ of_node_put(parent_node);
if (!parent || !msi_get_domain_info(parent)) {
dev_err(dev, "unable to locate ITS domain\n");
return NULL;
@@ -189,4 +190,4 @@ struct irq_domain *cdx_msi_domain_init(struct device *dev)
return cdx_msi_domain;
}
-EXPORT_SYMBOL_NS_GPL(cdx_msi_domain_init, CDX_BUS_CONTROLLER);
+EXPORT_SYMBOL_NS_GPL(cdx_msi_domain_init, "CDX_BUS_CONTROLLER");
diff --git a/drivers/cdx/controller/Kconfig b/drivers/cdx/controller/Kconfig
index f8e729761aee..a480b62cbd1f 100644
--- a/drivers/cdx/controller/Kconfig
+++ b/drivers/cdx/controller/Kconfig
@@ -9,7 +9,7 @@ if CDX_BUS
config CDX_CONTROLLER
tristate "CDX bus controller"
- select GENERIC_MSI_IRQ
+ depends on HAS_DMA
select REMOTEPROC
select RPMSG
help
diff --git a/drivers/cdx/controller/bitfield.h b/drivers/cdx/controller/bitfield.h
deleted file mode 100644
index 567f8ec47582..000000000000
--- a/drivers/cdx/controller/bitfield.h
+++ /dev/null
@@ -1,90 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
- * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2013 Solarflare Communications Inc.
- * Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
- */
-
-#ifndef CDX_BITFIELD_H
-#define CDX_BITFIELD_H
-
-#include <linux/bitfield.h>
-
-/* Lowest bit numbers and widths */
-#define CDX_DWORD_LBN 0
-#define CDX_DWORD_WIDTH 32
-
-/* Specified attribute (e.g. LBN) of the specified field */
-#define CDX_VAL(field, attribute) field ## _ ## attribute
-/* Low bit number of the specified field */
-#define CDX_LOW_BIT(field) CDX_VAL(field, LBN)
-/* Bit width of the specified field */
-#define CDX_WIDTH(field) CDX_VAL(field, WIDTH)
-/* High bit number of the specified field */
-#define CDX_HIGH_BIT(field) (CDX_LOW_BIT(field) + CDX_WIDTH(field) - 1)
-
-/* A doubleword (i.e. 4 byte) datatype - little-endian in HW */
-struct cdx_dword {
- __le32 cdx_u32;
-};
-
-/* Value expanders for printk */
-#define CDX_DWORD_VAL(dword) \
- ((unsigned int)le32_to_cpu((dword).cdx_u32))
-
-/*
- * Extract bit field portion [low,high) from the 32-bit little-endian
- * element which contains bits [min,max)
- */
-#define CDX_DWORD_FIELD(dword, field) \
- (FIELD_GET(GENMASK(CDX_HIGH_BIT(field), CDX_LOW_BIT(field)), \
- le32_to_cpu((dword).cdx_u32)))
-
-/*
- * Creates the portion of the named bit field that lies within the
- * range [min,max).
- */
-#define CDX_INSERT_FIELD(field, value) \
- (FIELD_PREP(GENMASK(CDX_HIGH_BIT(field), \
- CDX_LOW_BIT(field)), value))
-
-/*
- * Creates the portion of the named bit fields that lie within the
- * range [min,max).
- */
-#define CDX_INSERT_FIELDS(field1, value1, \
- field2, value2, \
- field3, value3, \
- field4, value4, \
- field5, value5, \
- field6, value6, \
- field7, value7) \
- (CDX_INSERT_FIELD(field1, (value1)) | \
- CDX_INSERT_FIELD(field2, (value2)) | \
- CDX_INSERT_FIELD(field3, (value3)) | \
- CDX_INSERT_FIELD(field4, (value4)) | \
- CDX_INSERT_FIELD(field5, (value5)) | \
- CDX_INSERT_FIELD(field6, (value6)) | \
- CDX_INSERT_FIELD(field7, (value7)))
-
-#define CDX_POPULATE_DWORD(dword, ...) \
- (dword).cdx_u32 = cpu_to_le32(CDX_INSERT_FIELDS(__VA_ARGS__))
-
-/* Populate a dword field with various numbers of arguments */
-#define CDX_POPULATE_DWORD_7 CDX_POPULATE_DWORD
-#define CDX_POPULATE_DWORD_6(dword, ...) \
- CDX_POPULATE_DWORD_7(dword, CDX_DWORD, 0, __VA_ARGS__)
-#define CDX_POPULATE_DWORD_5(dword, ...) \
- CDX_POPULATE_DWORD_6(dword, CDX_DWORD, 0, __VA_ARGS__)
-#define CDX_POPULATE_DWORD_4(dword, ...) \
- CDX_POPULATE_DWORD_5(dword, CDX_DWORD, 0, __VA_ARGS__)
-#define CDX_POPULATE_DWORD_3(dword, ...) \
- CDX_POPULATE_DWORD_4(dword, CDX_DWORD, 0, __VA_ARGS__)
-#define CDX_POPULATE_DWORD_2(dword, ...) \
- CDX_POPULATE_DWORD_3(dword, CDX_DWORD, 0, __VA_ARGS__)
-#define CDX_POPULATE_DWORD_1(dword, ...) \
- CDX_POPULATE_DWORD_2(dword, CDX_DWORD, 0, __VA_ARGS__)
-#define CDX_SET_DWORD(dword) \
- CDX_POPULATE_DWORD_1(dword, CDX_DWORD, 0xffffffff)
-
-#endif /* CDX_BITFIELD_H */
diff --git a/drivers/cdx/controller/cdx_controller.c b/drivers/cdx/controller/cdx_controller.c
index 201f9a6fbde7..280f207735da 100644
--- a/drivers/cdx/controller/cdx_controller.c
+++ b/drivers/cdx/controller/cdx_controller.c
@@ -14,7 +14,7 @@
#include "cdx_controller.h"
#include "../cdx.h"
#include "mcdi_functions.h"
-#include "mcdi.h"
+#include "mcdid.h"
static unsigned int cdx_mcdi_rpc_timeout(struct cdx_mcdi *cdx, unsigned int cmd)
{
@@ -193,21 +193,19 @@ static int xlnx_cdx_probe(struct platform_device *pdev)
cdx->ops = &cdx_ops;
/* Create MSI domain */
- cdx->msi_domain = cdx_msi_domain_init(&pdev->dev);
+ if (IS_ENABLED(CONFIG_GENERIC_MSI_IRQ))
+ cdx->msi_domain = cdx_msi_domain_init(&pdev->dev);
if (!cdx->msi_domain) {
- dev_err(&pdev->dev, "cdx_msi_domain_init() failed");
- ret = -ENODEV;
+ ret = dev_err_probe(&pdev->dev, -ENODEV, "cdx_msi_domain_init() failed");
goto cdx_msi_fail;
}
ret = cdx_setup_rpmsg(pdev);
if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Failed to register CDX RPMsg transport\n");
+ dev_err_probe(&pdev->dev, ret, "Failed to register CDX RPMsg transport\n");
goto cdx_rpmsg_fail;
}
- dev_info(&pdev->dev, "Successfully registered CDX controller with RPMsg as transport\n");
return 0;
cdx_rpmsg_fail:
@@ -246,33 +244,15 @@ MODULE_DEVICE_TABLE(of, cdx_match_table);
static struct platform_driver cdx_pdriver = {
.driver = {
.name = "cdx-controller",
- .pm = NULL,
.of_match_table = cdx_match_table,
},
.probe = xlnx_cdx_probe,
- .remove_new = xlnx_cdx_remove,
+ .remove = xlnx_cdx_remove,
};
-static int __init cdx_controller_init(void)
-{
- int ret;
-
- ret = platform_driver_register(&cdx_pdriver);
- if (ret)
- pr_err("platform_driver_register() failed: %d\n", ret);
-
- return ret;
-}
-
-static void __exit cdx_controller_exit(void)
-{
- platform_driver_unregister(&cdx_pdriver);
-}
-
-module_init(cdx_controller_init);
-module_exit(cdx_controller_exit);
+module_platform_driver(cdx_pdriver);
MODULE_AUTHOR("AMD Inc.");
MODULE_DESCRIPTION("CDX controller for AMD devices");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(CDX_BUS_CONTROLLER);
+MODULE_IMPORT_NS("CDX_BUS_CONTROLLER");
diff --git a/drivers/cdx/controller/cdx_rpmsg.c b/drivers/cdx/controller/cdx_rpmsg.c
index 04b578a0be17..59aabd99fa8f 100644
--- a/drivers/cdx/controller/cdx_rpmsg.c
+++ b/drivers/cdx/controller/cdx_rpmsg.c
@@ -15,7 +15,7 @@
#include "../cdx.h"
#include "cdx_controller.h"
#include "mcdi_functions.h"
-#include "mcdi.h"
+#include "mcdid.h"
static struct rpmsg_device_id cdx_rpmsg_id_table[] = {
{ .name = "mcdi_ipc" },
@@ -129,8 +129,7 @@ static int cdx_rpmsg_probe(struct rpmsg_device *rpdev)
chinfo.src = RPMSG_ADDR_ANY;
chinfo.dst = rpdev->dst;
- strscpy(chinfo.name, cdx_rpmsg_id_table[0].name,
- strlen(cdx_rpmsg_id_table[0].name));
+ strscpy(chinfo.name, cdx_rpmsg_id_table[0].name, sizeof(chinfo.name));
cdx_mcdi->ept = rpmsg_create_ept(rpdev, cdx_rpmsg_cb, NULL, chinfo);
if (!cdx_mcdi->ept) {
diff --git a/drivers/cdx/controller/mcdi.c b/drivers/cdx/controller/mcdi.c
index e760f8d347cc..2e82ffc18d89 100644
--- a/drivers/cdx/controller/mcdi.c
+++ b/drivers/cdx/controller/mcdi.c
@@ -23,9 +23,10 @@
#include <linux/log2.h>
#include <linux/net_tstamp.h>
#include <linux/wait.h>
+#include <linux/cdx/bitfield.h>
-#include "bitfield.h"
-#include "mcdi.h"
+#include <linux/cdx/mcdi.h>
+#include "mcdid.h"
static void cdx_mcdi_cancel_cmd(struct cdx_mcdi *cdx, struct cdx_mcdi_cmd *cmd);
static void cdx_mcdi_wait_for_cleanup(struct cdx_mcdi *cdx);
@@ -99,6 +100,19 @@ static unsigned long cdx_mcdi_rpc_timeout(struct cdx_mcdi *cdx, unsigned int cmd
return cdx->mcdi_ops->mcdi_rpc_timeout(cdx, cmd);
}
+/**
+ * cdx_mcdi_init - Initialize MCDI (Management Controller Driver Interface) state
+ * @cdx: Handle to the CDX MCDI structure
+ *
+ * This function allocates and initializes internal MCDI structures and resources
+ * for the CDX device, including the workqueue, locking primitives, and command
+ * tracking mechanisms. It sets the initial operating mode and prepares the device
+ * for MCDI operations.
+ *
+ * Return:
+ * * 0 - on success
+ * * -ENOMEM - if memory allocation or workqueue creation fails
+ */
int cdx_mcdi_init(struct cdx_mcdi *cdx)
{
struct cdx_mcdi_iface *mcdi;
@@ -128,7 +142,16 @@ fail2:
fail:
return rc;
}
+EXPORT_SYMBOL_GPL(cdx_mcdi_init);
+/**
+ * cdx_mcdi_finish - Cleanup MCDI (Management Controller Driver Interface) state
+ * @cdx: Handle to the CDX MCDI structure
+ *
+ * This function is responsible for cleaning up the MCDI (Management Controller Driver Interface)
+ * resources associated with a cdx_mcdi structure. Also destroys the mcdi workqueue.
+ *
+ */
void cdx_mcdi_finish(struct cdx_mcdi *cdx)
{
struct cdx_mcdi_iface *mcdi;
@@ -143,6 +166,7 @@ void cdx_mcdi_finish(struct cdx_mcdi *cdx)
kfree(cdx->mcdi);
cdx->mcdi = NULL;
}
+EXPORT_SYMBOL_GPL(cdx_mcdi_finish);
static bool cdx_mcdi_flushed(struct cdx_mcdi_iface *mcdi, bool ignore_cleanups)
{
@@ -553,6 +577,19 @@ static void cdx_mcdi_start_or_queue(struct cdx_mcdi_iface *mcdi,
cdx_mcdi_cmd_start_or_queue(mcdi, cmd);
}
+/**
+ * cdx_mcdi_process_cmd - Process an incoming MCDI response
+ * @cdx: Handle to the CDX MCDI structure
+ * @outbuf: Pointer to the response buffer received from the management controller
+ * @len: Length of the response buffer in bytes
+ *
+ * This function handles a response from the management controller. It locates the
+ * corresponding command using the sequence number embedded in the header,
+ * completes the command if it is still pending, and initiates any necessary cleanup.
+ *
+ * The function assumes that the response buffer is well-formed and at least one
+ * dword in size.
+ */
void cdx_mcdi_process_cmd(struct cdx_mcdi *cdx, struct cdx_dword *outbuf, int len)
{
struct cdx_mcdi_iface *mcdi;
@@ -590,6 +627,7 @@ void cdx_mcdi_process_cmd(struct cdx_mcdi *cdx, struct cdx_dword *outbuf, int le
cdx_mcdi_process_cleanup_list(mcdi->cdx, &cleanup_list);
}
+EXPORT_SYMBOL_GPL(cdx_mcdi_process_cmd);
static void cdx_mcdi_cmd_work(struct work_struct *context)
{
@@ -757,6 +795,7 @@ int cdx_mcdi_rpc(struct cdx_mcdi *cdx, unsigned int cmd,
return cdx_mcdi_rpc_sync(cdx, cmd, inbuf, inlen, outbuf, outlen,
outlen_actual, false);
}
+EXPORT_SYMBOL_GPL(cdx_mcdi_rpc);
/**
* cdx_mcdi_rpc_async - Schedule an MCDI command to run asynchronously
diff --git a/drivers/cdx/controller/mcdi.h b/drivers/cdx/controller/mcdi.h
deleted file mode 100644
index 54a65e9760ae..000000000000
--- a/drivers/cdx/controller/mcdi.h
+++ /dev/null
@@ -1,242 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
- * Copyright 2008-2013 Solarflare Communications Inc.
- * Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
- */
-
-#ifndef CDX_MCDI_H
-#define CDX_MCDI_H
-
-#include <linux/mutex.h>
-#include <linux/kref.h>
-#include <linux/rpmsg.h>
-
-#include "bitfield.h"
-#include "mc_cdx_pcol.h"
-
-#ifdef DEBUG
-#define CDX_WARN_ON_ONCE_PARANOID(x) WARN_ON_ONCE(x)
-#define CDX_WARN_ON_PARANOID(x) WARN_ON(x)
-#else
-#define CDX_WARN_ON_ONCE_PARANOID(x) do {} while (0)
-#define CDX_WARN_ON_PARANOID(x) do {} while (0)
-#endif
-
-/**
- * enum cdx_mcdi_mode - MCDI transaction mode
- * @MCDI_MODE_EVENTS: wait for an mcdi response callback.
- * @MCDI_MODE_FAIL: we think MCDI is dead, so fail-fast all calls
- */
-enum cdx_mcdi_mode {
- MCDI_MODE_EVENTS,
- MCDI_MODE_FAIL,
-};
-
-#define MCDI_RPC_TIMEOUT (10 * HZ)
-#define MCDI_RPC_LONG_TIMEOU (60 * HZ)
-#define MCDI_RPC_POST_RST_TIME (10 * HZ)
-
-#define MCDI_BUF_LEN (8 + MCDI_CTL_SDU_LEN_MAX)
-
-/**
- * enum cdx_mcdi_cmd_state - State for an individual MCDI command
- * @MCDI_STATE_QUEUED: Command not started and is waiting to run.
- * @MCDI_STATE_RETRY: Command was submitted and MC rejected with no resources,
- * as MC have too many outstanding commands. Command will be retried once
- * another command returns.
- * @MCDI_STATE_RUNNING: Command was accepted and is running.
- * @MCDI_STATE_RUNNING_CANCELLED: Command is running but the issuer cancelled
- * the command.
- * @MCDI_STATE_FINISHED: Processing of this command has completed.
- */
-
-enum cdx_mcdi_cmd_state {
- MCDI_STATE_QUEUED,
- MCDI_STATE_RETRY,
- MCDI_STATE_RUNNING,
- MCDI_STATE_RUNNING_CANCELLED,
- MCDI_STATE_FINISHED,
-};
-
-/**
- * struct cdx_mcdi - CDX MCDI Firmware interface, to interact
- * with CDX controller.
- * @mcdi: MCDI interface
- * @mcdi_ops: MCDI operations
- * @r5_rproc : R5 Remoteproc device handle
- * @rpdev: RPMsg device
- * @ept: RPMsg endpoint
- * @work: Post probe work
- */
-struct cdx_mcdi {
- /* MCDI interface */
- struct cdx_mcdi_data *mcdi;
- const struct cdx_mcdi_ops *mcdi_ops;
-
- struct rproc *r5_rproc;
- struct rpmsg_device *rpdev;
- struct rpmsg_endpoint *ept;
- struct work_struct work;
-};
-
-struct cdx_mcdi_ops {
- void (*mcdi_request)(struct cdx_mcdi *cdx,
- const struct cdx_dword *hdr, size_t hdr_len,
- const struct cdx_dword *sdu, size_t sdu_len);
- unsigned int (*mcdi_rpc_timeout)(struct cdx_mcdi *cdx, unsigned int cmd);
-};
-
-typedef void cdx_mcdi_async_completer(struct cdx_mcdi *cdx,
- unsigned long cookie, int rc,
- struct cdx_dword *outbuf,
- size_t outlen_actual);
-
-/**
- * struct cdx_mcdi_cmd - An outstanding MCDI command
- * @ref: Reference count. There will be one reference if the command is
- * in the mcdi_iface cmd_list, another if it's on a cleanup list,
- * and a third if it's queued in the work queue.
- * @list: The data for this entry in mcdi->cmd_list
- * @cleanup_list: The data for this entry in a cleanup list
- * @work: The work item for this command, queued in mcdi->workqueue
- * @mcdi: The mcdi_iface for this command
- * @state: The state of this command
- * @inlen: inbuf length
- * @inbuf: Input buffer
- * @quiet: Whether to silence errors
- * @reboot_seen: Whether a reboot has been seen during this command,
- * to prevent duplicates
- * @seq: Sequence number
- * @started: Jiffies this command was started at
- * @cookie: Context for completion function
- * @completer: Completion function
- * @handle: Command handle
- * @cmd: Command number
- * @rc: Return code
- * @outlen: Length of output buffer
- * @outbuf: Output buffer
- */
-struct cdx_mcdi_cmd {
- struct kref ref;
- struct list_head list;
- struct list_head cleanup_list;
- struct work_struct work;
- struct cdx_mcdi_iface *mcdi;
- enum cdx_mcdi_cmd_state state;
- size_t inlen;
- const struct cdx_dword *inbuf;
- bool quiet;
- bool reboot_seen;
- u8 seq;
- unsigned long started;
- unsigned long cookie;
- cdx_mcdi_async_completer *completer;
- unsigned int handle;
- unsigned int cmd;
- int rc;
- size_t outlen;
- struct cdx_dword *outbuf;
- /* followed by inbuf data if necessary */
-};
-
-/**
- * struct cdx_mcdi_iface - MCDI protocol context
- * @cdx: The associated NIC
- * @iface_lock: Serialise access to this structure
- * @outstanding_cleanups: Count of cleanups
- * @cmd_list: List of outstanding and running commands
- * @workqueue: Workqueue used for delayed processing
- * @cmd_complete_wq: Waitqueue for command completion
- * @db_held_by: Command the MC doorbell is in use by
- * @seq_held_by: Command each sequence number is in use by
- * @prev_handle: The last used command handle
- * @mode: Poll for mcdi completion, or wait for an mcdi_event
- * @prev_seq: The last used sequence number
- * @new_epoch: Indicates start of day or start of MC reboot recovery
- */
-struct cdx_mcdi_iface {
- struct cdx_mcdi *cdx;
- /* Serialise access */
- struct mutex iface_lock;
- unsigned int outstanding_cleanups;
- struct list_head cmd_list;
- struct workqueue_struct *workqueue;
- wait_queue_head_t cmd_complete_wq;
- struct cdx_mcdi_cmd *db_held_by;
- struct cdx_mcdi_cmd *seq_held_by[16];
- unsigned int prev_handle;
- enum cdx_mcdi_mode mode;
- u8 prev_seq;
- bool new_epoch;
-};
-
-/**
- * struct cdx_mcdi_data - extra state for NICs that implement MCDI
- * @iface: Interface/protocol state
- * @fn_flags: Flags for this function, as returned by %MC_CMD_DRV_ATTACH.
- */
-struct cdx_mcdi_data {
- struct cdx_mcdi_iface iface;
- u32 fn_flags;
-};
-
-static inline struct cdx_mcdi_iface *cdx_mcdi_if(struct cdx_mcdi *cdx)
-{
- return cdx->mcdi ? &cdx->mcdi->iface : NULL;
-}
-
-int cdx_mcdi_init(struct cdx_mcdi *cdx);
-void cdx_mcdi_finish(struct cdx_mcdi *cdx);
-
-void cdx_mcdi_process_cmd(struct cdx_mcdi *cdx, struct cdx_dword *outbuf, int len);
-int cdx_mcdi_rpc(struct cdx_mcdi *cdx, unsigned int cmd,
- const struct cdx_dword *inbuf, size_t inlen,
- struct cdx_dword *outbuf, size_t outlen, size_t *outlen_actual);
-int cdx_mcdi_rpc_async(struct cdx_mcdi *cdx, unsigned int cmd,
- const struct cdx_dword *inbuf, size_t inlen,
- cdx_mcdi_async_completer *complete,
- unsigned long cookie);
-int cdx_mcdi_wait_for_quiescence(struct cdx_mcdi *cdx,
- unsigned int timeout_jiffies);
-
-/*
- * We expect that 16- and 32-bit fields in MCDI requests and responses
- * are appropriately aligned, but 64-bit fields are only
- * 32-bit-aligned.
- */
-#define MCDI_DECLARE_BUF(_name, _len) struct cdx_dword _name[DIV_ROUND_UP(_len, 4)] = {{0}}
-#define _MCDI_PTR(_buf, _offset) \
- ((u8 *)(_buf) + (_offset))
-#define MCDI_PTR(_buf, _field) \
- _MCDI_PTR(_buf, MC_CMD_ ## _field ## _OFST)
-#define _MCDI_CHECK_ALIGN(_ofst, _align) \
- ((void)BUILD_BUG_ON_ZERO((_ofst) & ((_align) - 1)), \
- (_ofst))
-#define _MCDI_DWORD(_buf, _field) \
- ((_buf) + (_MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, 4) >> 2))
-
-#define MCDI_BYTE(_buf, _field) \
- ((void)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 1), \
- *MCDI_PTR(_buf, _field))
-#define MCDI_WORD(_buf, _field) \
- ((void)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2), \
- le16_to_cpu(*(__force const __le16 *)MCDI_PTR(_buf, _field)))
-#define MCDI_SET_DWORD(_buf, _field, _value) \
- CDX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), CDX_DWORD, _value)
-#define MCDI_DWORD(_buf, _field) \
- CDX_DWORD_FIELD(*_MCDI_DWORD(_buf, _field), CDX_DWORD)
-#define MCDI_POPULATE_DWORD_1(_buf, _field, _name1, _value1) \
- CDX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), \
- MC_CMD_ ## _name1, _value1)
-#define MCDI_SET_QWORD(_buf, _field, _value) \
- do { \
- CDX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[0], \
- CDX_DWORD, (u32)(_value)); \
- CDX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[1], \
- CDX_DWORD, (u64)(_value) >> 32); \
- } while (0)
-#define MCDI_QWORD(_buf, _field) \
- (CDX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[0], CDX_DWORD) | \
- (u64)CDX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[1], CDX_DWORD) << 32)
-
-#endif /* CDX_MCDI_H */
diff --git a/drivers/cdx/controller/mcdi_functions.c b/drivers/cdx/controller/mcdi_functions.c
index 885c69e6ebe5..8ae2d99be81e 100644
--- a/drivers/cdx/controller/mcdi_functions.c
+++ b/drivers/cdx/controller/mcdi_functions.c
@@ -5,7 +5,6 @@
#include <linux/module.h>
-#include "mcdi.h"
#include "mcdi_functions.h"
int cdx_mcdi_get_num_buses(struct cdx_mcdi *cdx)
diff --git a/drivers/cdx/controller/mcdi_functions.h b/drivers/cdx/controller/mcdi_functions.h
index b9942affdc6b..57fd1bae706b 100644
--- a/drivers/cdx/controller/mcdi_functions.h
+++ b/drivers/cdx/controller/mcdi_functions.h
@@ -8,7 +8,8 @@
#ifndef CDX_MCDI_FUNCTIONS_H
#define CDX_MCDI_FUNCTIONS_H
-#include "mcdi.h"
+#include <linux/cdx/mcdi.h>
+#include "mcdid.h"
#include "../cdx.h"
/**
diff --git a/drivers/cdx/controller/mcdid.h b/drivers/cdx/controller/mcdid.h
new file mode 100644
index 000000000000..7fc29f099265
--- /dev/null
+++ b/drivers/cdx/controller/mcdid.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2008-2013 Solarflare Communications Inc.
+ * Copyright (C) 2022-2025, Advanced Micro Devices, Inc.
+ */
+
+#ifndef CDX_MCDID_H
+#define CDX_MCDID_H
+
+#include <linux/mutex.h>
+#include <linux/kref.h>
+#include <linux/rpmsg.h>
+
+#include "mc_cdx_pcol.h"
+
+#ifdef DEBUG
+#define CDX_WARN_ON_ONCE_PARANOID(x) WARN_ON_ONCE(x)
+#define CDX_WARN_ON_PARANOID(x) WARN_ON(x)
+#else
+#define CDX_WARN_ON_ONCE_PARANOID(x) do {} while (0)
+#define CDX_WARN_ON_PARANOID(x) do {} while (0)
+#endif
+
+#define MCDI_BUF_LEN (8 + MCDI_CTL_SDU_LEN_MAX)
+
+static inline struct cdx_mcdi_iface *cdx_mcdi_if(struct cdx_mcdi *cdx)
+{
+ return cdx->mcdi ? &cdx->mcdi->iface : NULL;
+}
+
+int cdx_mcdi_rpc_async(struct cdx_mcdi *cdx, unsigned int cmd,
+ const struct cdx_dword *inbuf, size_t inlen,
+ cdx_mcdi_async_completer *complete,
+ unsigned long cookie);
+int cdx_mcdi_wait_for_quiescence(struct cdx_mcdi *cdx,
+ unsigned int timeout_jiffies);
+
+/*
+ * We expect that 16- and 32-bit fields in MCDI requests and responses
+ * are appropriately aligned, but 64-bit fields are only
+ * 32-bit-aligned.
+ */
+#define MCDI_BYTE(_buf, _field) \
+ ((void)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 1), \
+ *MCDI_PTR(_buf, _field))
+#define MCDI_WORD(_buf, _field) \
+ ((void)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2), \
+ le16_to_cpu(*(__force const __le16 *)MCDI_PTR(_buf, _field)))
+#define MCDI_POPULATE_DWORD_1(_buf, _field, _name1, _value1) \
+ CDX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), \
+ MC_CMD_ ## _name1, _value1)
+#define MCDI_SET_QWORD(_buf, _field, _value) \
+ do { \
+ CDX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[0], \
+ CDX_DWORD, (u32)(_value)); \
+ CDX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[1], \
+ CDX_DWORD, (u64)(_value) >> 32); \
+ } while (0)
+#define MCDI_QWORD(_buf, _field) \
+ (CDX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[0], CDX_DWORD) | \
+ (u64)CDX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[1], CDX_DWORD) << 32)
+
+#endif /* CDX_MCDID_H */
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 7c8dd0abcfdf..d2cfc584e202 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -237,7 +237,8 @@ config APPLICOM
config SONYPI
tristate "Sony Vaio Programmable I/O Control Device support"
- depends on X86_32 && PCI && INPUT
+ depends on X86_32 && PCI && INPUT && HAS_IOPORT
+ depends on ACPI_EC || !ACPI
help
This driver enables access to the Sony Programmable I/O Control
Device which can be found in many (all ?) Sony Vaio laptops.
@@ -403,7 +404,7 @@ config TELCLOCK
configuration of the telecom clock configuration settings. This
device is used for hardware synchronization across the ATCA backplane
fabric. Upon loading, the driver exports a sysfs directory,
- /sys/devices/platform/telco_clock, with a number of files for
+ /sys/devices/faux/telco_clock, with a number of files for
controlling the behavior of this hardware.
source "drivers/s390/char/Kconfig"
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index e9b360cdc99a..1291369b9126 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -6,6 +6,7 @@
obj-y += mem.o random.o
obj-$(CONFIG_TTY_PRINTK) += ttyprintk.o
obj-y += misc.o
+obj-$(CONFIG_TEST_MISC_MINOR) += misc_minor_kunit.o
obj-$(CONFIG_ATARI_DSP56K) += dsp56k.o
obj-$(CONFIG_VIRTIO_CONSOLE) += virtio_console.o
obj-$(CONFIG_UV_MMTIMER) += uv_mmtimer.o
diff --git a/drivers/char/adi.c b/drivers/char/adi.c
index f9bec10a6064..4312b0cc391c 100644
--- a/drivers/char/adi.c
+++ b/drivers/char/adi.c
@@ -131,7 +131,7 @@ static ssize_t adi_write(struct file *file, const char __user *buf,
ssize_t ret;
int i;
- if (count <= 0)
+ if (count == 0)
return -EINVAL;
ver_buf_sz = min_t(size_t, count, MAX_BUF_SZ);
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 8e41731d3642..2505df1f4e69 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -16,7 +16,7 @@
#include <linux/mmzone.h>
#include <asm/page.h> /* PAGE_SIZE */
#include <asm/e820/api.h>
-#include <asm/amd_nb.h>
+#include <asm/amd/nb.h>
#include <asm/gart.h>
#include "agp.h"
@@ -720,11 +720,6 @@ static const struct pci_device_id agp_amd64_pci_table[] = {
MODULE_DEVICE_TABLE(pci, agp_amd64_pci_table);
-static const struct pci_device_id agp_amd64_pci_promisc_table[] = {
- { PCI_DEVICE_CLASS(0, 0) },
- { }
-};
-
static DEFINE_SIMPLE_DEV_PM_OPS(agp_amd64_pm_ops, NULL, agp_amd64_resume);
static struct pci_driver agp_amd64_pci_driver = {
@@ -739,6 +734,7 @@ static struct pci_driver agp_amd64_pci_driver = {
/* Not static due to IOMMU code calling it early. */
int __init agp_amd64_init(void)
{
+ struct pci_dev *pdev = NULL;
int err = 0;
if (agp_off)
@@ -767,9 +763,13 @@ int __init agp_amd64_init(void)
}
/* Look for any AGP bridge */
- agp_amd64_pci_driver.id_table = agp_amd64_pci_promisc_table;
- err = driver_attach(&agp_amd64_pci_driver.driver);
- if (err == 0 && agp_bridges_found == 0) {
+ for_each_pci_dev(pdev)
+ if (pci_find_capability(pdev, PCI_CAP_ID_AGP))
+ pci_add_dynid(&agp_amd64_pci_driver,
+ pdev->vendor, pdev->device,
+ pdev->subsystem_vendor,
+ pdev->subsystem_device, 0, 0, 0);
+ if (agp_bridges_found == 0) {
pci_unregister_driver(&agp_amd64_pci_driver);
err = -ENODEV;
}
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index ef30445527a2..bcc26785175d 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -53,6 +53,7 @@ struct intel_gtt_driver {
* of the mmio register file, that's done in the generic code. */
void (*cleanup)(void);
void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags);
+ dma_addr_t (*read_entry)(unsigned int entry, bool *is_present, bool *is_local);
/* Flags is a more or less chipset specific opaque value.
* For chipsets that need to support old ums (non-gem) code, this
* needs to be identical to the various supported agp memory types! */
@@ -336,6 +337,19 @@ static void i810_write_entry(dma_addr_t addr, unsigned int entry,
writel_relaxed(addr | pte_flags, intel_private.gtt + entry);
}
+static dma_addr_t i810_read_entry(unsigned int entry,
+ bool *is_present, bool *is_local)
+{
+ u32 val;
+
+ val = readl(intel_private.gtt + entry);
+
+ *is_present = val & I810_PTE_VALID;
+ *is_local = val & I810_PTE_LOCAL;
+
+ return val & ~0xfff;
+}
+
static resource_size_t intel_gtt_stolen_size(void)
{
u16 gmch_ctrl;
@@ -741,6 +755,19 @@ static void i830_write_entry(dma_addr_t addr, unsigned int entry,
writel_relaxed(addr | pte_flags, intel_private.gtt + entry);
}
+static dma_addr_t i830_read_entry(unsigned int entry,
+ bool *is_present, bool *is_local)
+{
+ u32 val;
+
+ val = readl(intel_private.gtt + entry);
+
+ *is_present = val & I810_PTE_VALID;
+ *is_local = false;
+
+ return val & ~0xfff;
+}
+
bool intel_gmch_enable_gtt(void)
{
u8 __iomem *reg;
@@ -878,6 +905,13 @@ void intel_gmch_gtt_insert_sg_entries(struct sg_table *st,
}
EXPORT_SYMBOL(intel_gmch_gtt_insert_sg_entries);
+dma_addr_t intel_gmch_gtt_read_entry(unsigned int pg,
+ bool *is_present, bool *is_local)
+{
+ return intel_private.driver->read_entry(pg, is_present, is_local);
+}
+EXPORT_SYMBOL(intel_gmch_gtt_read_entry);
+
#if IS_ENABLED(CONFIG_AGP_INTEL)
static void intel_gmch_gtt_insert_pages(unsigned int first_entry,
unsigned int num_entries,
@@ -1126,6 +1160,19 @@ static void i965_write_entry(dma_addr_t addr,
writel_relaxed(addr | pte_flags, intel_private.gtt + entry);
}
+static dma_addr_t i965_read_entry(unsigned int entry,
+ bool *is_present, bool *is_local)
+{
+ u64 val;
+
+ val = readl(intel_private.gtt + entry);
+
+ *is_present = val & I810_PTE_VALID;
+ *is_local = false;
+
+ return ((val & 0xf0) << 28) | (val & ~0xfff);
+}
+
static int i9xx_setup(void)
{
phys_addr_t reg_addr;
@@ -1187,6 +1234,7 @@ static const struct intel_gtt_driver i81x_gtt_driver = {
.cleanup = i810_cleanup,
.check_flags = i830_check_flags,
.write_entry = i810_write_entry,
+ .read_entry = i810_read_entry,
};
static const struct intel_gtt_driver i8xx_gtt_driver = {
.gen = 2,
@@ -1194,6 +1242,7 @@ static const struct intel_gtt_driver i8xx_gtt_driver = {
.setup = i830_setup,
.cleanup = i830_cleanup,
.write_entry = i830_write_entry,
+ .read_entry = i830_read_entry,
.dma_mask_size = 32,
.check_flags = i830_check_flags,
.chipset_flush = i830_chipset_flush,
@@ -1205,6 +1254,7 @@ static const struct intel_gtt_driver i915_gtt_driver = {
.cleanup = i9xx_cleanup,
/* i945 is the last gpu to need phys mem (for overlay and cursors). */
.write_entry = i830_write_entry,
+ .read_entry = i830_read_entry,
.dma_mask_size = 32,
.check_flags = i830_check_flags,
.chipset_flush = i9xx_chipset_flush,
@@ -1215,6 +1265,7 @@ static const struct intel_gtt_driver g33_gtt_driver = {
.setup = i9xx_setup,
.cleanup = i9xx_cleanup,
.write_entry = i965_write_entry,
+ .read_entry = i965_read_entry,
.dma_mask_size = 36,
.check_flags = i830_check_flags,
.chipset_flush = i9xx_chipset_flush,
@@ -1225,6 +1276,7 @@ static const struct intel_gtt_driver pineview_gtt_driver = {
.setup = i9xx_setup,
.cleanup = i9xx_cleanup,
.write_entry = i965_write_entry,
+ .read_entry = i965_read_entry,
.dma_mask_size = 36,
.check_flags = i830_check_flags,
.chipset_flush = i9xx_chipset_flush,
@@ -1235,6 +1287,7 @@ static const struct intel_gtt_driver i965_gtt_driver = {
.setup = i9xx_setup,
.cleanup = i9xx_cleanup,
.write_entry = i965_write_entry,
+ .read_entry = i965_read_entry,
.dma_mask_size = 36,
.check_flags = i830_check_flags,
.chipset_flush = i9xx_chipset_flush,
@@ -1244,6 +1297,7 @@ static const struct intel_gtt_driver g4x_gtt_driver = {
.setup = i9xx_setup,
.cleanup = i9xx_cleanup,
.write_entry = i965_write_entry,
+ .read_entry = i965_read_entry,
.dma_mask_size = 36,
.check_flags = i830_check_flags,
.chipset_flush = i9xx_chipset_flush,
@@ -1254,6 +1308,7 @@ static const struct intel_gtt_driver ironlake_gtt_driver = {
.setup = i9xx_setup,
.cleanup = i9xx_cleanup,
.write_entry = i965_write_entry,
+ .read_entry = i965_read_entry,
.dma_mask_size = 36,
.check_flags = i830_check_flags,
.chipset_flush = i9xx_chipset_flush,
diff --git a/drivers/char/agp/nvidia-agp.c b/drivers/char/agp/nvidia-agp.c
index e424360fb4a1..4787391bb6b4 100644
--- a/drivers/char/agp/nvidia-agp.c
+++ b/drivers/char/agp/nvidia-agp.c
@@ -11,6 +11,7 @@
#include <linux/page-flags.h>
#include <linux/mm.h>
#include <linux/jiffies.h>
+#include <asm/msr.h>
#include "agp.h"
/* NVIDIA registers */
diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c
index e795390b070f..53ce352f7197 100644
--- a/drivers/char/apm-emulation.c
+++ b/drivers/char/apm-emulation.c
@@ -141,9 +141,6 @@ static struct apm_queue kapmd_queue;
static DEFINE_MUTEX(state_lock);
-static const char driver_version[] = "1.13"; /* no spaces */
-
-
/*
* Compatibility cruft until the IPAQ people move over to the new
@@ -435,6 +432,8 @@ static struct miscdevice apm_device = {
*/
static int proc_apm_show(struct seq_file *m, void *v)
{
+ static const char driver_version[] = "1.13"; /* no spaces */
+
struct apm_power_info info;
char *units;
diff --git a/drivers/char/dtlk.c b/drivers/char/dtlk.c
index 27f5f9d19531..16618079298a 100644
--- a/drivers/char/dtlk.c
+++ b/drivers/char/dtlk.c
@@ -243,11 +243,11 @@ static __poll_t dtlk_poll(struct file *file, poll_table * wait)
poll_wait(file, &dtlk_process_list, wait);
if (dtlk_has_indexing && dtlk_readable()) {
- del_timer(&dtlk_timer);
+ timer_delete(&dtlk_timer);
mask = EPOLLIN | EPOLLRDNORM;
}
if (dtlk_writeable()) {
- del_timer(&dtlk_timer);
+ timer_delete(&dtlk_timer);
mask |= EPOLLOUT | EPOLLWRNORM;
}
/* there are no exception conditions */
@@ -322,7 +322,7 @@ static int dtlk_release(struct inode *inode, struct file *file)
}
TRACE_RET;
- del_timer_sync(&dtlk_timer);
+ timer_delete_sync(&dtlk_timer);
return 0;
}
diff --git a/drivers/char/hangcheck-timer.c b/drivers/char/hangcheck-timer.c
index 4181bcc1c796..497fc167cb8c 100644
--- a/drivers/char/hangcheck-timer.c
+++ b/drivers/char/hangcheck-timer.c
@@ -167,7 +167,7 @@ static int __init hangcheck_init(void)
static void __exit hangcheck_exit(void)
{
- del_timer_sync(&hangcheck_ticktock);
+ timer_delete_sync(&hangcheck_ticktock);
printk("Hangcheck: Stopped hangcheck timer.\n");
}
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index e904e476e49a..4f5ccd3a1f56 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -162,6 +162,7 @@ static irqreturn_t hpet_interrupt(int irq, void *data)
static void hpet_timer_set_irq(struct hpet_dev *devp)
{
+ const unsigned int nr_irqs = irq_get_nr_irqs();
unsigned long v;
int irq, gsi;
struct hpet_timer __iomem *timer;
@@ -723,7 +724,7 @@ static int hpet_is_known(struct hpet_data *hdp)
return 0;
}
-static struct ctl_table hpet_table[] = {
+static const struct ctl_table hpet_table[] = {
{
.procname = "max-user-freq",
.data = &hpet_max_freq,
@@ -866,7 +867,7 @@ int hpet_alloc(struct hpet_data *hdp)
printk(KERN_INFO "hpet%u: at MMIO 0x%lx, IRQ%s",
hpetp->hp_which, hdp->hd_phys_address,
- hpetp->hp_ntimer > 1 ? "s" : "");
+ str_plural(hpetp->hp_ntimer));
for (i = 0; i < hpetp->hp_ntimer; i++)
printk(KERN_CONT "%s %u", i > 0 ? "," : "", hdp->hd_irq[i]);
printk(KERN_CONT "\n");
@@ -1022,8 +1023,7 @@ static int __init hpet_init(void)
result = acpi_bus_register_driver(&hpet_acpi_driver);
if (result < 0) {
- if (sysctl_header)
- unregister_sysctl_table(sysctl_header);
+ unregister_sysctl_table(sysctl_header);
misc_deregister(&hpet_misc);
return result;
}
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index b51d9e243f35..492a2a61a65b 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -50,7 +50,7 @@ config HW_RANDOM_INTEL
config HW_RANDOM_AMD
tristate "AMD HW Random Number Generator support"
- depends on (X86 || PPC_MAPLE || COMPILE_TEST)
+ depends on (X86 || COMPILE_TEST)
depends on PCI && HAS_IOPORT_MAP
default HW_RANDOM
help
@@ -62,9 +62,22 @@ config HW_RANDOM_AMD
If unsure, say Y.
+config HW_RANDOM_AIROHA
+ tristate "Airoha True HW Random Number Generator support"
+ depends on ARCH_AIROHA || COMPILE_TEST
+ default HW_RANDOM
+ help
+ This driver provides kernel-side support for the True Random Number
+ Generator hardware found on Airoha SoC.
+
+ To compile this driver as a module, choose M here: the
+ module will be called airoha-rng.
+
+ If unsure, say Y.
+
config HW_RANDOM_ATMEL
tristate "Atmel Random Number Generator support"
- depends on (ARCH_AT91 || COMPILE_TEST)
+ depends on (ARCH_MICROCHIP || COMPILE_TEST)
default HW_RANDOM
help
This driver provides kernel-side support for the Random Number
@@ -99,9 +112,22 @@ config HW_RANDOM_BCM2835
If unsure, say Y.
+config HW_RANDOM_BCM74110
+ tristate "Broadcom BCM74110 Random Number Generator support"
+ depends on ARCH_BRCMSTB || COMPILE_TEST
+ default HW_RANDOM
+ help
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on the Broadcom BCM74110 SoCs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called bcm74110-rng
+
+ If unsure, say Y.
+
config HW_RANDOM_IPROC_RNG200
tristate "Broadcom iProc/STB RNG200 support"
- depends on ARCH_BCM_IPROC || ARCH_BCM2835 || ARCH_BRCMSTB || COMPILE_TEST
+ depends on ARCH_BCM_IPROC || ARCH_BCM2835 || ARCH_BCMBCA || ARCH_BRCMSTB || COMPILE_TEST
default HW_RANDOM
help
This driver provides kernel-side support for the RNG200
@@ -286,6 +312,7 @@ config HW_RANDOM_INGENIC_TRNG
config HW_RANDOM_NOMADIK
tristate "ST-Ericsson Nomadik Random Number Generator support"
depends on ARCH_NOMADIK || COMPILE_TEST
+ depends on ARM_AMBA
default HW_RANDOM
help
This driver provides kernel-side support for the Random Number
@@ -508,10 +535,10 @@ config HW_RANDOM_NPCM
If unsure, say Y.
config HW_RANDOM_KEYSTONE
+ tristate "TI Keystone NETCP SA Hardware random number generator"
depends on ARCH_KEYSTONE || COMPILE_TEST
depends on HAS_IOMEM && OF
default HW_RANDOM
- tristate "TI Keystone NETCP SA Hardware random number generator"
help
This option enables Keystone's hardware random generator.
@@ -553,15 +580,15 @@ config HW_RANDOM_ARM_SMCCC_TRNG
module will be called arm_smccc_trng.
config HW_RANDOM_CN10K
- tristate "Marvell CN10K Random Number Generator support"
- depends on HW_RANDOM && PCI && (ARM64 || (64BIT && COMPILE_TEST))
- default HW_RANDOM if ARCH_THUNDER
- help
- This driver provides support for the True Random Number
- generator available in Marvell CN10K SoCs.
+ tristate "Marvell CN10K Random Number Generator support"
+ depends on HW_RANDOM && PCI && (ARM64 || (64BIT && COMPILE_TEST))
+ default HW_RANDOM if ARCH_THUNDER
+ help
+ This driver provides support for the True Random Number
+ generator available in Marvell CN10K SoCs.
- To compile this driver as a module, choose M here.
- The module will be called cn10k_rng. If unsure, say Y.
+ To compile this driver as a module, choose M here.
+ The module will be called cn10k_rng. If unsure, say Y.
config HW_RANDOM_JH7110
tristate "StarFive JH7110 Random Number Generator support"
@@ -580,7 +607,8 @@ config HW_RANDOM_ROCKCHIP
default HW_RANDOM
help
This driver provides kernel-side support for the True Random Number
- Generator hardware found on some Rockchip SoC like RK3566 or RK3568.
+ Generator hardware found on some Rockchip SoCs like RK3566, RK3568
+ or RK3588.
To compile this driver as a module, choose M here: the
module will be called rockchip-rng.
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index 01f012eab440..b9132b3f5d21 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -8,6 +8,7 @@ rng-core-y := core.o
obj-$(CONFIG_HW_RANDOM_TIMERIOMEM) += timeriomem-rng.o
obj-$(CONFIG_HW_RANDOM_INTEL) += intel-rng.o
obj-$(CONFIG_HW_RANDOM_AMD) += amd-rng.o
+obj-$(CONFIG_HW_RANDOM_AIROHA) += airoha-trng.o
obj-$(CONFIG_HW_RANDOM_ATMEL) += atmel-rng.o
obj-$(CONFIG_HW_RANDOM_BA431) += ba431-rng.o
obj-$(CONFIG_HW_RANDOM_GEODE) += geode-rng.o
@@ -31,6 +32,7 @@ obj-$(CONFIG_HW_RANDOM_POWERNV) += powernv-rng.o
obj-$(CONFIG_HW_RANDOM_HISI) += hisi-rng.o
obj-$(CONFIG_HW_RANDOM_HISTB) += histb-rng.o
obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o
+obj-$(CONFIG_HW_RANDOM_BCM74110) += bcm74110-rng.o
obj-$(CONFIG_HW_RANDOM_IPROC_RNG200) += iproc-rng200.o
obj-$(CONFIG_HW_RANDOM_ST) += st-rng.o
obj-$(CONFIG_HW_RANDOM_XGENE) += xgene-rng.o
diff --git a/drivers/char/hw_random/airoha-trng.c b/drivers/char/hw_random/airoha-trng.c
new file mode 100644
index 000000000000..1dbfa9505c21
--- /dev/null
+++ b/drivers/char/hw_random/airoha-trng.c
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2024 Christian Marangi */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/hw_random.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/platform_device.h>
+
+#define TRNG_IP_RDY 0x800
+#define CNT_TRANS GENMASK(15, 8)
+#define SAMPLE_RDY BIT(0)
+#define TRNG_NS_SEK_AND_DAT_EN 0x804
+#define RNG_EN BIT(31) /* referenced as ring_en */
+#define RAW_DATA_EN BIT(16)
+#define TRNG_HEALTH_TEST_SW_RST 0x808
+#define SW_RST BIT(0) /* Active High */
+#define TRNG_INTR_EN 0x818
+#define INTR_MASK BIT(16)
+#define CONTINUOUS_HEALTH_INITR_EN BIT(2)
+#define SW_STARTUP_INITR_EN BIT(1)
+#define RST_STARTUP_INITR_EN BIT(0)
+/* Notice that Health Test are done only out of Reset and with RNG_EN */
+#define TRNG_HEALTH_TEST_STATUS 0x824
+#define CONTINUOUS_HEALTH_AP_TEST_FAIL BIT(23)
+#define CONTINUOUS_HEALTH_RC_TEST_FAIL BIT(22)
+#define SW_STARTUP_TEST_DONE BIT(21)
+#define SW_STARTUP_AP_TEST_FAIL BIT(20)
+#define SW_STARTUP_RC_TEST_FAIL BIT(19)
+#define RST_STARTUP_TEST_DONE BIT(18)
+#define RST_STARTUP_AP_TEST_FAIL BIT(17)
+#define RST_STARTUP_RC_TEST_FAIL BIT(16)
+#define RAW_DATA_VALID BIT(7)
+
+#define TRNG_RAW_DATA_OUT 0x828
+
+#define TRNG_CNT_TRANS_VALID 0x80
+#define BUSY_LOOP_SLEEP 10
+#define BUSY_LOOP_TIMEOUT (BUSY_LOOP_SLEEP * 10000)
+
+struct airoha_trng {
+ void __iomem *base;
+ struct hwrng rng;
+ struct device *dev;
+
+ struct completion rng_op_done;
+};
+
+static int airoha_trng_irq_mask(struct airoha_trng *trng)
+{
+ u32 val;
+
+ val = readl(trng->base + TRNG_INTR_EN);
+ val |= INTR_MASK;
+ writel(val, trng->base + TRNG_INTR_EN);
+
+ return 0;
+}
+
+static int airoha_trng_irq_unmask(struct airoha_trng *trng)
+{
+ u32 val;
+
+ val = readl(trng->base + TRNG_INTR_EN);
+ val &= ~INTR_MASK;
+ writel(val, trng->base + TRNG_INTR_EN);
+
+ return 0;
+}
+
+static int airoha_trng_init(struct hwrng *rng)
+{
+ struct airoha_trng *trng = container_of(rng, struct airoha_trng, rng);
+ int ret;
+ u32 val;
+
+ val = readl(trng->base + TRNG_NS_SEK_AND_DAT_EN);
+ val |= RNG_EN;
+ writel(val, trng->base + TRNG_NS_SEK_AND_DAT_EN);
+
+ /* Set out of SW Reset */
+ airoha_trng_irq_unmask(trng);
+ writel(0, trng->base + TRNG_HEALTH_TEST_SW_RST);
+
+ ret = wait_for_completion_timeout(&trng->rng_op_done, BUSY_LOOP_TIMEOUT);
+ if (ret <= 0) {
+ dev_err(trng->dev, "Timeout waiting for Health Check\n");
+ airoha_trng_irq_mask(trng);
+ return -ENODEV;
+ }
+
+ /* Check if Health Test Failed */
+ val = readl(trng->base + TRNG_HEALTH_TEST_STATUS);
+ if (val & (RST_STARTUP_AP_TEST_FAIL | RST_STARTUP_RC_TEST_FAIL)) {
+ dev_err(trng->dev, "Health Check fail: %s test fail\n",
+ val & RST_STARTUP_AP_TEST_FAIL ? "AP" : "RC");
+ return -ENODEV;
+ }
+
+ /* Check if IP is ready */
+ ret = readl_poll_timeout(trng->base + TRNG_IP_RDY, val,
+ val & SAMPLE_RDY, 10, 1000);
+ if (ret < 0) {
+ dev_err(trng->dev, "Timeout waiting for IP ready");
+ return -ENODEV;
+ }
+
+ /* CNT_TRANS must be 0x80 for IP to be considered ready */
+ ret = readl_poll_timeout(trng->base + TRNG_IP_RDY, val,
+ FIELD_GET(CNT_TRANS, val) == TRNG_CNT_TRANS_VALID,
+ 10, 1000);
+ if (ret < 0) {
+ dev_err(trng->dev, "Timeout waiting for IP ready");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void airoha_trng_cleanup(struct hwrng *rng)
+{
+ struct airoha_trng *trng = container_of(rng, struct airoha_trng, rng);
+ u32 val;
+
+ val = readl(trng->base + TRNG_NS_SEK_AND_DAT_EN);
+ val &= ~RNG_EN;
+ writel(val, trng->base + TRNG_NS_SEK_AND_DAT_EN);
+
+ /* Put it in SW Reset */
+ writel(SW_RST, trng->base + TRNG_HEALTH_TEST_SW_RST);
+}
+
+static int airoha_trng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
+{
+ struct airoha_trng *trng = container_of(rng, struct airoha_trng, rng);
+ u32 *data = buf;
+ u32 status;
+ int ret;
+
+ ret = readl_poll_timeout(trng->base + TRNG_HEALTH_TEST_STATUS, status,
+ status & RAW_DATA_VALID, 10, 1000);
+ if (ret < 0) {
+ dev_err(trng->dev, "Timeout waiting for TRNG RAW Data valid\n");
+ return ret;
+ }
+
+ *data = readl(trng->base + TRNG_RAW_DATA_OUT);
+
+ return 4;
+}
+
+static irqreturn_t airoha_trng_irq(int irq, void *priv)
+{
+ struct airoha_trng *trng = (struct airoha_trng *)priv;
+
+ airoha_trng_irq_mask(trng);
+ /* Just complete the task, we will read the value later */
+ complete(&trng->rng_op_done);
+
+ return IRQ_HANDLED;
+}
+
+static int airoha_trng_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct airoha_trng *trng;
+ int irq, ret;
+ u32 val;
+
+ trng = devm_kzalloc(dev, sizeof(*trng), GFP_KERNEL);
+ if (!trng)
+ return -ENOMEM;
+
+ trng->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(trng->base))
+ return PTR_ERR(trng->base);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ airoha_trng_irq_mask(trng);
+ ret = devm_request_irq(&pdev->dev, irq, airoha_trng_irq, 0,
+ pdev->name, (void *)trng);
+ if (ret) {
+ dev_err(dev, "Can't get interrupt working.\n");
+ return ret;
+ }
+
+ init_completion(&trng->rng_op_done);
+
+ /* Enable interrupt for SW reset Health Check */
+ val = readl(trng->base + TRNG_INTR_EN);
+ val |= RST_STARTUP_INITR_EN;
+ writel(val, trng->base + TRNG_INTR_EN);
+
+ /* Set output to raw data */
+ val = readl(trng->base + TRNG_NS_SEK_AND_DAT_EN);
+ val |= RAW_DATA_EN;
+ writel(val, trng->base + TRNG_NS_SEK_AND_DAT_EN);
+
+ /* Put it in SW Reset */
+ writel(SW_RST, trng->base + TRNG_HEALTH_TEST_SW_RST);
+
+ trng->dev = dev;
+ trng->rng.name = pdev->name;
+ trng->rng.init = airoha_trng_init;
+ trng->rng.cleanup = airoha_trng_cleanup;
+ trng->rng.read = airoha_trng_read;
+
+ ret = devm_hwrng_register(dev, &trng->rng);
+ if (ret) {
+ dev_err(dev, "failed to register rng device: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id airoha_trng_of_match[] = {
+ { .compatible = "airoha,en7581-trng", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, airoha_trng_of_match);
+
+static struct platform_driver airoha_trng_driver = {
+ .driver = {
+ .name = "airoha-trng",
+ .of_match_table = airoha_trng_of_match,
+ },
+ .probe = airoha_trng_probe,
+};
+
+module_platform_driver(airoha_trng_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Christian Marangi <ansuelsmth@gmail.com>");
+MODULE_DESCRIPTION("Airoha True Random Number Generator driver");
diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c
index e9157255f851..6ed24be3481d 100644
--- a/drivers/char/hw_random/atmel-rng.c
+++ b/drivers/char/hw_random/atmel-rng.c
@@ -37,6 +37,7 @@ struct atmel_trng {
struct clk *clk;
void __iomem *base;
struct hwrng rng;
+ struct device *dev;
bool has_half_rate;
};
@@ -59,9 +60,9 @@ static int atmel_trng_read(struct hwrng *rng, void *buf, size_t max,
u32 *data = buf;
int ret;
- ret = pm_runtime_get_sync((struct device *)trng->rng.priv);
+ ret = pm_runtime_get_sync(trng->dev);
if (ret < 0) {
- pm_runtime_put_sync((struct device *)trng->rng.priv);
+ pm_runtime_put_sync(trng->dev);
return ret;
}
@@ -79,8 +80,7 @@ static int atmel_trng_read(struct hwrng *rng, void *buf, size_t max,
ret = 4;
out:
- pm_runtime_mark_last_busy((struct device *)trng->rng.priv);
- pm_runtime_put_sync_autosuspend((struct device *)trng->rng.priv);
+ pm_runtime_put_sync_autosuspend(trng->dev);
return ret;
}
@@ -134,9 +134,9 @@ static int atmel_trng_probe(struct platform_device *pdev)
return -ENODEV;
trng->has_half_rate = data->has_half_rate;
+ trng->dev = &pdev->dev;
trng->rng.name = pdev->name;
trng->rng.read = atmel_trng_read;
- trng->rng.priv = (unsigned long)&pdev->dev;
platform_set_drvdata(pdev, trng);
#ifndef CONFIG_PM
@@ -216,7 +216,7 @@ MODULE_DEVICE_TABLE(of, atmel_trng_dt_ids);
static struct platform_driver atmel_trng_driver = {
.probe = atmel_trng_probe,
- .remove_new = atmel_trng_remove,
+ .remove = atmel_trng_remove,
.driver = {
.name = "atmel-trng",
.pm = pm_ptr(&atmel_trng_pm_ops),
diff --git a/drivers/char/hw_random/bcm74110-rng.c b/drivers/char/hw_random/bcm74110-rng.c
new file mode 100644
index 000000000000..5c64148e91f1
--- /dev/null
+++ b/drivers/char/hw_random/bcm74110-rng.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2024 Broadcom
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/random.h>
+#include <linux/hw_random.h>
+
+#define HOST_REV_ID 0x00
+#define HOST_FIFO_DEPTH 0x04
+#define HOST_FIFO_COUNT 0x08
+#define HOST_FIFO_THRESHOLD 0x0c
+#define HOST_FIFO_DATA 0x10
+
+#define HOST_FIFO_COUNT_MASK 0xffff
+
+/* Delay range in microseconds */
+#define FIFO_DELAY_MIN_US 3
+#define FIFO_DELAY_MAX_US 7
+#define FIFO_DELAY_MAX_COUNT 10
+
+struct bcm74110_priv {
+ void __iomem *base;
+};
+
+static inline int bcm74110_rng_fifo_count(void __iomem *mem)
+{
+ return readl_relaxed(mem) & HOST_FIFO_COUNT_MASK;
+}
+
+static int bcm74110_rng_read(struct hwrng *rng, void *buf, size_t max,
+ bool wait)
+{
+ struct bcm74110_priv *priv = (struct bcm74110_priv *)rng->priv;
+ void __iomem *fc_addr = priv->base + HOST_FIFO_COUNT;
+ void __iomem *fd_addr = priv->base + HOST_FIFO_DATA;
+ unsigned underrun_count = 0;
+ u32 max_words = max / sizeof(u32);
+ u32 num_words;
+ unsigned i;
+
+ /*
+ * We need to check how many words are available in the RNG FIFO. If
+ * there aren't any, we need to wait for some to become available.
+ */
+ while ((num_words = bcm74110_rng_fifo_count(fc_addr)) == 0) {
+ if (!wait)
+ return 0;
+ /*
+ * As a precaution, limit how long we wait. If the FIFO doesn't
+ * refill within the allotted time, return 0 (=no data) to the
+ * caller.
+ */
+ if (likely(underrun_count < FIFO_DELAY_MAX_COUNT))
+ usleep_range(FIFO_DELAY_MIN_US, FIFO_DELAY_MAX_US);
+ else
+ return 0;
+ underrun_count++;
+ }
+ if (num_words > max_words)
+ num_words = max_words;
+
+ /* Bail early if we run out of random numbers unexpectedly */
+ for (i = 0; i < num_words && bcm74110_rng_fifo_count(fc_addr) > 0; i++)
+ ((u32 *)buf)[i] = readl_relaxed(fd_addr);
+
+ return i * sizeof(u32);
+}
+
+static struct hwrng bcm74110_hwrng = {
+ .read = bcm74110_rng_read,
+};
+
+static int bcm74110_rng_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct bcm74110_priv *priv;
+ int rc;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ bcm74110_hwrng.name = pdev->name;
+ bcm74110_hwrng.priv = (unsigned long)priv;
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ rc = devm_hwrng_register(dev, &bcm74110_hwrng);
+ if (rc)
+ dev_err(dev, "hwrng registration failed (%d)\n", rc);
+ else
+ dev_info(dev, "hwrng registered\n");
+
+ return rc;
+}
+
+static const struct of_device_id bcm74110_rng_match[] = {
+ { .compatible = "brcm,bcm74110-rng", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, bcm74110_rng_match);
+
+static struct platform_driver bcm74110_rng_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = bcm74110_rng_match,
+ },
+ .probe = bcm74110_rng_probe,
+};
+module_platform_driver(bcm74110_rng_driver);
+
+MODULE_AUTHOR("Markus Mayer <mmayer@broadcom.com>");
+MODULE_DESCRIPTION("BCM 74110 Random Number Generator (RNG) driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/hw_random/cctrng.c b/drivers/char/hw_random/cctrng.c
index 4c50efc46483..a5be9258037f 100644
--- a/drivers/char/hw_random/cctrng.c
+++ b/drivers/char/hw_random/cctrng.c
@@ -98,7 +98,6 @@ static void cc_trng_pm_put_suspend(struct device *dev)
{
int rc = 0;
- pm_runtime_mark_last_busy(dev);
rc = pm_runtime_put_autosuspend(dev);
if (rc)
dev_err(dev, "pm_runtime_put_autosuspend returned %x\n", rc);
@@ -653,7 +652,7 @@ static struct platform_driver cctrng_driver = {
.pm = &cctrng_pm,
},
.probe = cctrng_probe,
- .remove_new = cctrng_remove,
+ .remove = cctrng_remove,
};
module_platform_driver(cctrng_driver);
diff --git a/drivers/char/hw_random/cn10k-rng.c b/drivers/char/hw_random/cn10k-rng.c
index 31935316a160..3b4e78182e14 100644
--- a/drivers/char/hw_random/cn10k-rng.c
+++ b/drivers/char/hw_random/cn10k-rng.c
@@ -188,7 +188,7 @@ static int cn10k_rng_probe(struct pci_dev *pdev, const struct pci_device_id *id)
rng->reg_base = pcim_iomap(pdev, 0, 0);
if (!rng->reg_base)
- return dev_err_probe(&pdev->dev, -ENOMEM, "Error while mapping CSRs, exiting\n");
+ return -ENOMEM;
rng->ops.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
"cn10k-rng-%s", dev_name(&pdev->dev));
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 57c51efa5613..018316f54621 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -181,8 +181,15 @@ static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
int present;
BUG_ON(!mutex_is_locked(&reading_mutex));
- if (rng->read)
- return rng->read(rng, (void *)buffer, size, wait);
+ if (rng->read) {
+ int err;
+
+ err = rng->read(rng, buffer, size, wait);
+ if (WARN_ON_ONCE(err > 0 && err > size))
+ err = size;
+
+ return err;
+ }
if (rng->data_present)
present = rng->data_present(rng, wait);
diff --git a/drivers/char/hw_random/exynos-trng.c b/drivers/char/hw_random/exynos-trng.c
index 9f039fddaee3..02e207c09e81 100644
--- a/drivers/char/hw_random/exynos-trng.c
+++ b/drivers/char/hw_random/exynos-trng.c
@@ -335,7 +335,7 @@ static struct platform_driver exynos_trng_driver = {
.of_match_table = exynos_trng_dt_match,
},
.probe = exynos_trng_probe,
- .remove_new = exynos_trng_remove,
+ .remove = exynos_trng_remove,
};
module_platform_driver(exynos_trng_driver);
diff --git a/drivers/char/hw_random/histb-rng.c b/drivers/char/hw_random/histb-rng.c
index f652e1135e4b..1b91e88cc4c0 100644
--- a/drivers/char/hw_random/histb-rng.c
+++ b/drivers/char/hw_random/histb-rng.c
@@ -89,7 +89,7 @@ depth_show(struct device *dev, struct device_attribute *attr, char *buf)
struct histb_rng_priv *priv = dev_get_drvdata(dev);
void __iomem *base = priv->base;
- return sprintf(buf, "%d\n", histb_rng_get_depth(base));
+ return sprintf(buf, "%u\n", histb_rng_get_depth(base));
}
static ssize_t
diff --git a/drivers/char/hw_random/imx-rngc.c b/drivers/char/hw_random/imx-rngc.c
index 118a72acb99b..241664a9b5d9 100644
--- a/drivers/char/hw_random/imx-rngc.c
+++ b/drivers/char/hw_random/imx-rngc.c
@@ -13,6 +13,8 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
#include <linux/interrupt.h>
#include <linux/hw_random.h>
#include <linux/completion.h>
@@ -53,6 +55,7 @@
#define RNGC_SELFTEST_TIMEOUT 2500 /* us */
#define RNGC_SEED_TIMEOUT 200 /* ms */
+#define RNGC_PM_TIMEOUT 500 /* ms */
static bool self_test = true;
module_param(self_test, bool, 0);
@@ -123,7 +126,11 @@ static int imx_rngc_read(struct hwrng *rng, void *data, size_t max, bool wait)
{
struct imx_rngc *rngc = container_of(rng, struct imx_rngc, rng);
unsigned int status;
- int retval = 0;
+ int err, retval = 0;
+
+ err = pm_runtime_resume_and_get(rngc->dev);
+ if (err)
+ return err;
while (max >= sizeof(u32)) {
status = readl(rngc->base + RNGC_STATUS);
@@ -141,6 +148,8 @@ static int imx_rngc_read(struct hwrng *rng, void *data, size_t max, bool wait)
max -= sizeof(u32);
}
}
+ pm_runtime_mark_last_busy(rngc->dev);
+ pm_runtime_put(rngc->dev);
return retval ? retval : -EIO;
}
@@ -169,7 +178,11 @@ static int imx_rngc_init(struct hwrng *rng)
{
struct imx_rngc *rngc = container_of(rng, struct imx_rngc, rng);
u32 cmd, ctrl;
- int ret;
+ int ret, err;
+
+ err = pm_runtime_resume_and_get(rngc->dev);
+ if (err)
+ return err;
/* clear error */
cmd = readl(rngc->base + RNGC_COMMAND);
@@ -186,15 +199,15 @@ static int imx_rngc_init(struct hwrng *rng)
ret = wait_for_completion_timeout(&rngc->rng_op_done,
msecs_to_jiffies(RNGC_SEED_TIMEOUT));
if (!ret) {
- ret = -ETIMEDOUT;
- goto err;
+ err = -ETIMEDOUT;
+ goto out;
}
} while (rngc->err_reg == RNGC_ERROR_STATUS_STAT_ERR);
if (rngc->err_reg) {
- ret = -EIO;
- goto err;
+ err = -EIO;
+ goto out;
}
/*
@@ -205,23 +218,29 @@ static int imx_rngc_init(struct hwrng *rng)
ctrl |= RNGC_CTRL_AUTO_SEED;
writel(ctrl, rngc->base + RNGC_CONTROL);
+out:
/*
* if initialisation was successful, we keep the interrupt
* unmasked until imx_rngc_cleanup is called
* we mask the interrupt ourselves if we return an error
*/
- return 0;
+ if (err)
+ imx_rngc_irq_mask_clear(rngc);
-err:
- imx_rngc_irq_mask_clear(rngc);
- return ret;
+ pm_runtime_put(rngc->dev);
+ return err;
}
static void imx_rngc_cleanup(struct hwrng *rng)
{
struct imx_rngc *rngc = container_of(rng, struct imx_rngc, rng);
+ int err;
- imx_rngc_irq_mask_clear(rngc);
+ err = pm_runtime_resume_and_get(rngc->dev);
+ if (!err) {
+ imx_rngc_irq_mask_clear(rngc);
+ pm_runtime_put(rngc->dev);
+ }
}
static int __init imx_rngc_probe(struct platform_device *pdev)
@@ -240,7 +259,7 @@ static int __init imx_rngc_probe(struct platform_device *pdev)
if (IS_ERR(rngc->base))
return PTR_ERR(rngc->base);
- rngc->clk = devm_clk_get_enabled(&pdev->dev, NULL);
+ rngc->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(rngc->clk))
return dev_err_probe(&pdev->dev, PTR_ERR(rngc->clk), "Cannot get rng_clk\n");
@@ -248,14 +267,18 @@ static int __init imx_rngc_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
+ clk_prepare_enable(rngc->clk);
+
ver_id = readl(rngc->base + RNGC_VER_ID);
rng_type = FIELD_GET(RNG_TYPE, ver_id);
/*
* This driver supports only RNGC and RNGB. (There's a different
* driver for RNGA.)
*/
- if (rng_type != RNGC_TYPE_RNGC && rng_type != RNGC_TYPE_RNGB)
+ if (rng_type != RNGC_TYPE_RNGC && rng_type != RNGC_TYPE_RNGB) {
+ clk_disable_unprepare(rngc->clk);
return -ENODEV;
+ }
init_completion(&rngc->rng_op_done);
@@ -272,15 +295,24 @@ static int __init imx_rngc_probe(struct platform_device *pdev)
ret = devm_request_irq(&pdev->dev,
irq, imx_rngc_irq, 0, pdev->name, (void *)rngc);
- if (ret)
+ if (ret) {
+ clk_disable_unprepare(rngc->clk);
return dev_err_probe(&pdev->dev, ret, "Can't get interrupt working.\n");
+ }
if (self_test) {
ret = imx_rngc_self_test(rngc);
- if (ret)
+ if (ret) {
+ clk_disable_unprepare(rngc->clk);
return dev_err_probe(&pdev->dev, ret, "self test failed\n");
+ }
}
+ pm_runtime_set_autosuspend_delay(&pdev->dev, RNGC_PM_TIMEOUT);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ devm_pm_runtime_enable(&pdev->dev);
+
ret = devm_hwrng_register(&pdev->dev, &rngc->rng);
if (ret)
return dev_err_probe(&pdev->dev, ret, "hwrng registration failed\n");
@@ -310,7 +342,10 @@ static int imx_rngc_resume(struct device *dev)
return 0;
}
-static DEFINE_SIMPLE_DEV_PM_OPS(imx_rngc_pm_ops, imx_rngc_suspend, imx_rngc_resume);
+static const struct dev_pm_ops imx_rngc_pm_ops = {
+ SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+ RUNTIME_PM_OPS(imx_rngc_suspend, imx_rngc_resume, NULL)
+};
static const struct of_device_id imx_rngc_dt_ids[] = {
{ .compatible = "fsl,imx25-rngb" },
@@ -321,7 +356,7 @@ MODULE_DEVICE_TABLE(of, imx_rngc_dt_ids);
static struct platform_driver imx_rngc_driver = {
.driver = {
.name = KBUILD_MODNAME,
- .pm = pm_sleep_ptr(&imx_rngc_pm_ops),
+ .pm = pm_ptr(&imx_rngc_pm_ops),
.of_match_table = imx_rngc_dt_ids,
},
};
diff --git a/drivers/char/hw_random/ingenic-rng.c b/drivers/char/hw_random/ingenic-rng.c
index 2f9b6483c4a1..bbfd662d25a6 100644
--- a/drivers/char/hw_random/ingenic-rng.c
+++ b/drivers/char/hw_random/ingenic-rng.c
@@ -132,7 +132,7 @@ MODULE_DEVICE_TABLE(of, ingenic_rng_of_match);
static struct platform_driver ingenic_rng_driver = {
.probe = ingenic_rng_probe,
- .remove_new = ingenic_rng_remove,
+ .remove = ingenic_rng_remove,
.driver = {
.name = "ingenic-rng",
.of_match_table = ingenic_rng_of_match,
diff --git a/drivers/char/hw_random/ks-sa-rng.c b/drivers/char/hw_random/ks-sa-rng.c
index 36c34252b4f6..9e408144a10c 100644
--- a/drivers/char/hw_random/ks-sa-rng.c
+++ b/drivers/char/hw_random/ks-sa-rng.c
@@ -231,6 +231,10 @@ static int ks_sa_rng_probe(struct platform_device *pdev)
if (IS_ERR(ks_sa_rng->regmap_cfg))
return dev_err_probe(dev, -EINVAL, "syscon_node_to_regmap failed\n");
+ ks_sa_rng->clk = devm_clk_get_enabled(dev, NULL);
+ if (IS_ERR(ks_sa_rng->clk))
+ return dev_err_probe(dev, PTR_ERR(ks_sa_rng->clk), "Failed to get clock\n");
+
pm_runtime_enable(dev);
ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
@@ -261,7 +265,7 @@ static struct platform_driver ks_sa_rng_driver = {
.of_match_table = ks_sa_rng_dt_match,
},
.probe = ks_sa_rng_probe,
- .remove_new = ks_sa_rng_remove,
+ .remove = ks_sa_rng_remove,
};
module_platform_driver(ks_sa_rng_driver);
diff --git a/drivers/char/hw_random/mtk-rng.c b/drivers/char/hw_random/mtk-rng.c
index 1e3048f2bb38..5808d09d12c4 100644
--- a/drivers/char/hw_random/mtk-rng.c
+++ b/drivers/char/hw_random/mtk-rng.c
@@ -36,6 +36,7 @@ struct mtk_rng {
void __iomem *base;
struct clk *clk;
struct hwrng rng;
+ struct device *dev;
};
static int mtk_rng_init(struct hwrng *rng)
@@ -85,7 +86,7 @@ static int mtk_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
struct mtk_rng *priv = to_mtk_rng(rng);
int retval = 0;
- pm_runtime_get_sync((struct device *)priv->rng.priv);
+ pm_runtime_get_sync(priv->dev);
while (max >= sizeof(u32)) {
if (!mtk_rng_wait_ready(rng, wait))
@@ -97,8 +98,7 @@ static int mtk_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
max -= sizeof(u32);
}
- pm_runtime_mark_last_busy((struct device *)priv->rng.priv);
- pm_runtime_put_sync_autosuspend((struct device *)priv->rng.priv);
+ pm_runtime_put_sync_autosuspend(priv->dev);
return retval || !wait ? retval : -EIO;
}
@@ -112,13 +112,13 @@ static int mtk_rng_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
+ priv->dev = &pdev->dev;
priv->rng.name = pdev->name;
#ifndef CONFIG_PM
priv->rng.init = mtk_rng_init;
priv->rng.cleanup = mtk_rng_cleanup;
#endif
priv->rng.read = mtk_rng_read;
- priv->rng.priv = (unsigned long)&pdev->dev;
priv->rng.quality = 900;
priv->clk = devm_clk_get(&pdev->dev, "rng");
@@ -142,7 +142,9 @@ static int mtk_rng_probe(struct platform_device *pdev)
dev_set_drvdata(&pdev->dev, priv);
pm_runtime_set_autosuspend_delay(&pdev->dev, RNG_AUTOSUSPEND_TIMEOUT);
pm_runtime_use_autosuspend(&pdev->dev);
- devm_pm_runtime_enable(&pdev->dev);
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret)
+ return ret;
dev_info(&pdev->dev, "registered RNG driver\n");
diff --git a/drivers/char/hw_random/mxc-rnga.c b/drivers/char/hw_random/mxc-rnga.c
index f01eb95bee31..e3fcb8bcc29b 100644
--- a/drivers/char/hw_random/mxc-rnga.c
+++ b/drivers/char/hw_random/mxc-rnga.c
@@ -188,7 +188,7 @@ static struct platform_driver mxc_rnga_driver = {
.of_match_table = mxc_rnga_of_match,
},
.probe = mxc_rnga_probe,
- .remove_new = mxc_rnga_remove,
+ .remove = mxc_rnga_remove,
};
module_platform_driver(mxc_rnga_driver);
diff --git a/drivers/char/hw_random/n2-drv.c b/drivers/char/hw_random/n2-drv.c
index 1b49e3a86d57..ea6d5599242f 100644
--- a/drivers/char/hw_random/n2-drv.c
+++ b/drivers/char/hw_random/n2-drv.c
@@ -858,7 +858,7 @@ static struct platform_driver n2rng_driver = {
.of_match_table = n2rng_match,
},
.probe = n2rng_probe,
- .remove_new = n2rng_remove,
+ .remove = n2rng_remove,
};
module_platform_driver(n2rng_driver);
diff --git a/drivers/char/hw_random/n2rng.h b/drivers/char/hw_random/n2rng.h
index 9a870f5dc371..7612f15a261f 100644
--- a/drivers/char/hw_random/n2rng.h
+++ b/drivers/char/hw_random/n2rng.h
@@ -48,7 +48,7 @@
#define HV_RNG_NUM_CONTROL 4
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
extern unsigned long sun4v_rng_get_diag_ctl(void);
extern unsigned long sun4v_rng_ctl_read_v1(unsigned long ctl_regs_ra,
unsigned long *state,
@@ -147,6 +147,6 @@ struct n2rng {
#define N2RNG_BUSY_LIMIT 100
#define N2RNG_HCHECK_LIMIT 100
-#endif /* !(__ASSEMBLY__) */
+#endif /* !(__ASSEMBLER__) */
#endif /* _N2RNG_H */
diff --git a/drivers/char/hw_random/npcm-rng.c b/drivers/char/hw_random/npcm-rng.c
index bce8c4829a1f..40d6e29dea03 100644
--- a/drivers/char/hw_random/npcm-rng.c
+++ b/drivers/char/hw_random/npcm-rng.c
@@ -32,6 +32,7 @@
struct npcm_rng {
void __iomem *base;
struct hwrng rng;
+ struct device *dev;
u32 clkp;
};
@@ -57,7 +58,7 @@ static int npcm_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
int retval = 0;
int ready;
- pm_runtime_get_sync((struct device *)priv->rng.priv);
+ pm_runtime_get_sync(priv->dev);
while (max) {
if (wait) {
@@ -79,8 +80,7 @@ static int npcm_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
max--;
}
- pm_runtime_mark_last_busy((struct device *)priv->rng.priv);
- pm_runtime_put_sync_autosuspend((struct device *)priv->rng.priv);
+ pm_runtime_put_sync_autosuspend(priv->dev);
return retval || !wait ? retval : -EIO;
}
@@ -109,7 +109,7 @@ static int npcm_rng_probe(struct platform_device *pdev)
#endif
priv->rng.name = pdev->name;
priv->rng.read = npcm_rng_read;
- priv->rng.priv = (unsigned long)&pdev->dev;
+ priv->dev = &pdev->dev;
priv->clkp = (u32)(uintptr_t)of_device_get_match_data(&pdev->dev);
writel(NPCM_RNG_M1ROSEL, priv->base + NPCM_RNGMODE_REG);
@@ -176,7 +176,7 @@ static struct platform_driver npcm_rng_driver = {
.of_match_table = of_match_ptr(rng_dt_id),
},
.probe = npcm_rng_probe,
- .remove_new = npcm_rng_remove,
+ .remove = npcm_rng_remove,
};
module_platform_driver(npcm_rng_driver);
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index 4914a8720e58..5e8b50f15db7 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -558,7 +558,7 @@ static struct platform_driver omap_rng_driver = {
.of_match_table = of_match_ptr(omap_rng_of_match),
},
.probe = omap_rng_probe,
- .remove_new = omap_rng_remove,
+ .remove = omap_rng_remove,
};
module_platform_driver(omap_rng_driver);
diff --git a/drivers/char/hw_random/omap3-rom-rng.c b/drivers/char/hw_random/omap3-rom-rng.c
index 8064c792caf0..aa71f61c3dc9 100644
--- a/drivers/char/hw_random/omap3-rom-rng.c
+++ b/drivers/char/hw_random/omap3-rom-rng.c
@@ -56,7 +56,6 @@ static int omap3_rom_rng_read(struct hwrng *rng, void *data, size_t max, bool w)
else
r = 4;
- pm_runtime_mark_last_busy(ddata->dev);
pm_runtime_put_autosuspend(ddata->dev);
return r;
diff --git a/drivers/char/hw_random/rockchip-rng.c b/drivers/char/hw_random/rockchip-rng.c
index 289b385bbf05..6e3ed4b85605 100644
--- a/drivers/char/hw_random/rockchip-rng.c
+++ b/drivers/char/hw_random/rockchip-rng.c
@@ -1,12 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * rockchip-rng.c True Random Number Generator driver for Rockchip RK3568 SoC
+ * rockchip-rng.c True Random Number Generator driver for Rockchip SoCs
*
* Copyright (c) 2018, Fuzhou Rockchip Electronics Co., Ltd.
* Copyright (c) 2022, Aurelien Jarno
+ * Copyright (c) 2025, Collabora Ltd.
* Authors:
* Lin Jinhan <troy.lin@rock-chips.com>
* Aurelien Jarno <aurelien@aurel32.net>
+ * Nicolas Frattaroli <nicolas.frattaroli@collabora.com>
*/
#include <linux/clk.h>
#include <linux/hw_random.h>
@@ -32,6 +34,9 @@
*/
#define RK_RNG_SAMPLE_CNT 1000
+/* after how many bytes of output TRNGv1 implementations should be reseeded */
+#define RK_TRNG_V1_AUTO_RESEED_CNT 16000
+
/* TRNG registers from RK3568 TRM-Part2, section 5.4.1 */
#define TRNG_RST_CTL 0x0004
#define TRNG_RNG_CTL 0x0400
@@ -49,11 +54,88 @@
#define TRNG_RNG_SAMPLE_CNT 0x0404
#define TRNG_RNG_DOUT 0x0410
+/*
+ * TRNG V1 register definitions
+ * The TRNG V1 IP is a stand-alone TRNG implementation (not part of a crypto IP)
+ * and can be found in the Rockchip RK3588 SoC
+ */
+#define TRNG_V1_CTRL 0x0000
+#define TRNG_V1_CTRL_NOP 0x00
+#define TRNG_V1_CTRL_RAND 0x01
+#define TRNG_V1_CTRL_SEED 0x02
+
+#define TRNG_V1_STAT 0x0004
+#define TRNG_V1_STAT_SEEDED BIT(9)
+#define TRNG_V1_STAT_GENERATING BIT(30)
+#define TRNG_V1_STAT_RESEEDING BIT(31)
+
+#define TRNG_V1_MODE 0x0008
+#define TRNG_V1_MODE_128_BIT (0x00 << 3)
+#define TRNG_V1_MODE_256_BIT (0x01 << 3)
+
+/* Interrupt Enable register; unused because polling is faster */
+#define TRNG_V1_IE 0x0010
+#define TRNG_V1_IE_GLBL_EN BIT(31)
+#define TRNG_V1_IE_SEED_DONE_EN BIT(1)
+#define TRNG_V1_IE_RAND_RDY_EN BIT(0)
+
+#define TRNG_V1_ISTAT 0x0014
+#define TRNG_V1_ISTAT_RAND_RDY BIT(0)
+
+/* RAND0 ~ RAND7 */
+#define TRNG_V1_RAND0 0x0020
+#define TRNG_V1_RAND7 0x003C
+
+/* Auto Reseed Register */
+#define TRNG_V1_AUTO_RQSTS 0x0060
+
+#define TRNG_V1_VERSION 0x00F0
+#define TRNG_v1_VERSION_CODE 0x46bc
+/* end of TRNG_V1 register definitions */
+
+/*
+ * RKRNG register definitions
+ * The RKRNG IP is a stand-alone TRNG implementation (not part of a crypto IP)
+ * and can be found in the Rockchip RK3576, Rockchip RK3562 and Rockchip RK3528
+ * SoCs. It can either output true randomness (TRNG) or "deterministic"
+ * randomness derived from hashing the true entropy (DRNG). This driver
+ * implementation uses just the true entropy, and leaves stretching the entropy
+ * up to Linux.
+ */
+#define RKRNG_CFG 0x0000
+#define RKRNG_CTRL 0x0010
+#define RKRNG_CTRL_REQ_TRNG BIT(4)
+#define RKRNG_STATE 0x0014
+#define RKRNG_STATE_TRNG_RDY BIT(4)
+#define RKRNG_TRNG_DATA0 0x0050
+#define RKRNG_TRNG_DATA1 0x0054
+#define RKRNG_TRNG_DATA2 0x0058
+#define RKRNG_TRNG_DATA3 0x005C
+#define RKRNG_TRNG_DATA4 0x0060
+#define RKRNG_TRNG_DATA5 0x0064
+#define RKRNG_TRNG_DATA6 0x0068
+#define RKRNG_TRNG_DATA7 0x006C
+#define RKRNG_READ_LEN 32
+
+/* Before removing this assert, give rk3588_rng_read an upper bound of 32 */
+static_assert(RK_RNG_MAX_BYTE <= (TRNG_V1_RAND7 + 4 - TRNG_V1_RAND0),
+ "You raised RK_RNG_MAX_BYTE and broke rk3588-rng, congrats.");
+
struct rk_rng {
struct hwrng rng;
void __iomem *base;
int clk_num;
struct clk_bulk_data *clk_bulks;
+ const struct rk_rng_soc_data *soc_data;
+ struct device *dev;
+};
+
+struct rk_rng_soc_data {
+ int (*rk_rng_init)(struct hwrng *rng);
+ int (*rk_rng_read)(struct hwrng *rng, void *buf, size_t max, bool wait);
+ void (*rk_rng_cleanup)(struct hwrng *rng);
+ unsigned short quality;
+ bool reset_optional;
};
/* The mask in the upper 16 bits determines the bits that are updated */
@@ -62,19 +144,38 @@ static void rk_rng_write_ctl(struct rk_rng *rng, u32 val, u32 mask)
writel((mask << 16) | val, rng->base + TRNG_RNG_CTL);
}
-static int rk_rng_init(struct hwrng *rng)
+static inline void rk_rng_writel(struct rk_rng *rng, u32 val, u32 offset)
{
- struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
- int ret;
+ writel(val, rng->base + offset);
+}
+
+static inline u32 rk_rng_readl(struct rk_rng *rng, u32 offset)
+{
+ return readl(rng->base + offset);
+}
+static int rk_rng_enable_clks(struct rk_rng *rk_rng)
+{
+ int ret;
/* start clocks */
ret = clk_bulk_prepare_enable(rk_rng->clk_num, rk_rng->clk_bulks);
if (ret < 0) {
- dev_err((struct device *) rk_rng->rng.priv,
- "Failed to enable clks %d\n", ret);
+ dev_err(rk_rng->dev, "Failed to enable clocks: %d\n", ret);
return ret;
}
+ return 0;
+}
+
+static int rk3568_rng_init(struct hwrng *rng)
+{
+ struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
+ int ret;
+
+ ret = rk_rng_enable_clks(rk_rng);
+ if (ret < 0)
+ return ret;
+
/* set the sample period */
writel(RK_RNG_SAMPLE_CNT, rk_rng->base + TRNG_RNG_SAMPLE_CNT);
@@ -87,7 +188,7 @@ static int rk_rng_init(struct hwrng *rng)
return 0;
}
-static void rk_rng_cleanup(struct hwrng *rng)
+static void rk3568_rng_cleanup(struct hwrng *rng)
{
struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
@@ -98,14 +199,14 @@ static void rk_rng_cleanup(struct hwrng *rng)
clk_bulk_disable_unprepare(rk_rng->clk_num, rk_rng->clk_bulks);
}
-static int rk_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
+static int rk3568_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
{
struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
size_t to_read = min_t(size_t, max, RK_RNG_MAX_BYTE);
u32 reg;
int ret = 0;
- ret = pm_runtime_resume_and_get((struct device *) rk_rng->rng.priv);
+ ret = pm_runtime_resume_and_get(rk_rng->dev);
if (ret < 0)
return ret;
@@ -122,12 +223,165 @@ static int rk_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
/* Read random data stored in the registers */
memcpy_fromio(buf, rk_rng->base + TRNG_RNG_DOUT, to_read);
out:
- pm_runtime_mark_last_busy((struct device *) rk_rng->rng.priv);
- pm_runtime_put_sync_autosuspend((struct device *) rk_rng->rng.priv);
+ pm_runtime_put_sync_autosuspend(rk_rng->dev);
return (ret < 0) ? ret : to_read;
}
+static int rk3576_rng_init(struct hwrng *rng)
+{
+ struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
+
+ return rk_rng_enable_clks(rk_rng);
+}
+
+static int rk3576_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
+{
+ struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
+ size_t to_read = min_t(size_t, max, RKRNG_READ_LEN);
+ int ret = 0;
+ u32 val;
+
+ ret = pm_runtime_resume_and_get(rk_rng->dev);
+ if (ret < 0)
+ return ret;
+
+ rk_rng_writel(rk_rng, RKRNG_CTRL_REQ_TRNG | (RKRNG_CTRL_REQ_TRNG << 16),
+ RKRNG_CTRL);
+
+ if (readl_poll_timeout(rk_rng->base + RKRNG_STATE, val,
+ (val & RKRNG_STATE_TRNG_RDY), RK_RNG_POLL_PERIOD_US,
+ RK_RNG_POLL_TIMEOUT_US)) {
+ dev_err(rk_rng->dev, "timed out waiting for data\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ rk_rng_writel(rk_rng, RKRNG_STATE_TRNG_RDY, RKRNG_STATE);
+
+ memcpy_fromio(buf, rk_rng->base + RKRNG_TRNG_DATA0, to_read);
+
+out:
+ pm_runtime_put_sync_autosuspend(rk_rng->dev);
+
+ return (ret < 0) ? ret : to_read;
+}
+
+static int rk3588_rng_init(struct hwrng *rng)
+{
+ struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
+ u32 version, status, mask, istat;
+ int ret;
+
+ ret = rk_rng_enable_clks(rk_rng);
+ if (ret < 0)
+ return ret;
+
+ version = rk_rng_readl(rk_rng, TRNG_V1_VERSION);
+ if (version != TRNG_v1_VERSION_CODE) {
+ dev_err(rk_rng->dev,
+ "wrong trng version, expected = %08x, actual = %08x\n",
+ TRNG_V1_VERSION, version);
+ ret = -EFAULT;
+ goto err_disable_clk;
+ }
+
+ mask = TRNG_V1_STAT_SEEDED | TRNG_V1_STAT_GENERATING |
+ TRNG_V1_STAT_RESEEDING;
+ if (readl_poll_timeout(rk_rng->base + TRNG_V1_STAT, status,
+ (status & mask) == TRNG_V1_STAT_SEEDED,
+ RK_RNG_POLL_PERIOD_US, RK_RNG_POLL_TIMEOUT_US) < 0) {
+ dev_err(rk_rng->dev, "timed out waiting for hwrng to reseed\n");
+ ret = -ETIMEDOUT;
+ goto err_disable_clk;
+ }
+
+ /*
+ * clear ISTAT flag, downstream advises to do this to avoid
+ * auto-reseeding "on power on"
+ */
+ istat = rk_rng_readl(rk_rng, TRNG_V1_ISTAT);
+ rk_rng_writel(rk_rng, istat, TRNG_V1_ISTAT);
+
+ /* auto reseed after RK_TRNG_V1_AUTO_RESEED_CNT bytes */
+ rk_rng_writel(rk_rng, RK_TRNG_V1_AUTO_RESEED_CNT / 16, TRNG_V1_AUTO_RQSTS);
+
+ return 0;
+err_disable_clk:
+ clk_bulk_disable_unprepare(rk_rng->clk_num, rk_rng->clk_bulks);
+ return ret;
+}
+
+static void rk3588_rng_cleanup(struct hwrng *rng)
+{
+ struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
+
+ clk_bulk_disable_unprepare(rk_rng->clk_num, rk_rng->clk_bulks);
+}
+
+static int rk3588_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
+{
+ struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
+ size_t to_read = min_t(size_t, max, RK_RNG_MAX_BYTE);
+ int ret = 0;
+ u32 reg;
+
+ ret = pm_runtime_resume_and_get(rk_rng->dev);
+ if (ret < 0)
+ return ret;
+
+ /* Clear ISTAT, even without interrupts enabled, this will be updated */
+ reg = rk_rng_readl(rk_rng, TRNG_V1_ISTAT);
+ rk_rng_writel(rk_rng, reg, TRNG_V1_ISTAT);
+
+ /* generate 256 bits of random data */
+ rk_rng_writel(rk_rng, TRNG_V1_MODE_256_BIT, TRNG_V1_MODE);
+ rk_rng_writel(rk_rng, TRNG_V1_CTRL_RAND, TRNG_V1_CTRL);
+
+ ret = readl_poll_timeout_atomic(rk_rng->base + TRNG_V1_ISTAT, reg,
+ (reg & TRNG_V1_ISTAT_RAND_RDY), 0,
+ RK_RNG_POLL_TIMEOUT_US);
+ if (ret < 0)
+ goto out;
+
+ /* Read random data that's in registers TRNG_V1_RAND0 through RAND7 */
+ memcpy_fromio(buf, rk_rng->base + TRNG_V1_RAND0, to_read);
+
+out:
+ /* Clear ISTAT */
+ rk_rng_writel(rk_rng, reg, TRNG_V1_ISTAT);
+ /* close the TRNG */
+ rk_rng_writel(rk_rng, TRNG_V1_CTRL_NOP, TRNG_V1_CTRL);
+
+ pm_runtime_put_sync_autosuspend(rk_rng->dev);
+
+ return (ret < 0) ? ret : to_read;
+}
+
+static const struct rk_rng_soc_data rk3568_soc_data = {
+ .rk_rng_init = rk3568_rng_init,
+ .rk_rng_read = rk3568_rng_read,
+ .rk_rng_cleanup = rk3568_rng_cleanup,
+ .quality = 900,
+ .reset_optional = false,
+};
+
+static const struct rk_rng_soc_data rk3576_soc_data = {
+ .rk_rng_init = rk3576_rng_init,
+ .rk_rng_read = rk3576_rng_read,
+ .rk_rng_cleanup = rk3588_rng_cleanup,
+ .quality = 999, /* as determined by actual testing */
+ .reset_optional = true,
+};
+
+static const struct rk_rng_soc_data rk3588_soc_data = {
+ .rk_rng_init = rk3588_rng_init,
+ .rk_rng_read = rk3588_rng_read,
+ .rk_rng_cleanup = rk3588_rng_cleanup,
+ .quality = 999, /* as determined by actual testing */
+ .reset_optional = true,
+};
+
static int rk_rng_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -139,6 +393,7 @@ static int rk_rng_probe(struct platform_device *pdev)
if (!rk_rng)
return -ENOMEM;
+ rk_rng->soc_data = of_device_get_match_data(dev);
rk_rng->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rk_rng->base))
return PTR_ERR(rk_rng->base);
@@ -148,34 +403,40 @@ static int rk_rng_probe(struct platform_device *pdev)
return dev_err_probe(dev, rk_rng->clk_num,
"Failed to get clks property\n");
- rst = devm_reset_control_array_get_exclusive(&pdev->dev);
- if (IS_ERR(rst))
- return dev_err_probe(dev, PTR_ERR(rst), "Failed to get reset property\n");
+ if (rk_rng->soc_data->reset_optional)
+ rst = devm_reset_control_array_get_optional_exclusive(dev);
+ else
+ rst = devm_reset_control_array_get_exclusive(dev);
+
+ if (rst) {
+ if (IS_ERR(rst))
+ return dev_err_probe(dev, PTR_ERR(rst), "Failed to get reset property\n");
- reset_control_assert(rst);
- udelay(2);
- reset_control_deassert(rst);
+ reset_control_assert(rst);
+ udelay(2);
+ reset_control_deassert(rst);
+ }
platform_set_drvdata(pdev, rk_rng);
rk_rng->rng.name = dev_driver_string(dev);
if (!IS_ENABLED(CONFIG_PM)) {
- rk_rng->rng.init = rk_rng_init;
- rk_rng->rng.cleanup = rk_rng_cleanup;
+ rk_rng->rng.init = rk_rng->soc_data->rk_rng_init;
+ rk_rng->rng.cleanup = rk_rng->soc_data->rk_rng_cleanup;
}
- rk_rng->rng.read = rk_rng_read;
- rk_rng->rng.priv = (unsigned long) dev;
- rk_rng->rng.quality = 900;
+ rk_rng->rng.read = rk_rng->soc_data->rk_rng_read;
+ rk_rng->dev = dev;
+ rk_rng->rng.quality = rk_rng->soc_data->quality;
pm_runtime_set_autosuspend_delay(dev, RK_RNG_AUTOSUSPEND_DELAY);
pm_runtime_use_autosuspend(dev);
ret = devm_pm_runtime_enable(dev);
if (ret)
- return dev_err_probe(&pdev->dev, ret, "Runtime pm activation failed.\n");
+ return dev_err_probe(dev, ret, "Runtime pm activation failed.\n");
ret = devm_hwrng_register(dev, &rk_rng->rng);
if (ret)
- return dev_err_probe(&pdev->dev, ret, "Failed to register Rockchip hwrng\n");
+ return dev_err_probe(dev, ret, "Failed to register Rockchip hwrng\n");
return 0;
}
@@ -184,7 +445,7 @@ static int __maybe_unused rk_rng_runtime_suspend(struct device *dev)
{
struct rk_rng *rk_rng = dev_get_drvdata(dev);
- rk_rng_cleanup(&rk_rng->rng);
+ rk_rng->soc_data->rk_rng_cleanup(&rk_rng->rng);
return 0;
}
@@ -193,7 +454,7 @@ static int __maybe_unused rk_rng_runtime_resume(struct device *dev)
{
struct rk_rng *rk_rng = dev_get_drvdata(dev);
- return rk_rng_init(&rk_rng->rng);
+ return rk_rng->soc_data->rk_rng_init(&rk_rng->rng);
}
static const struct dev_pm_ops rk_rng_pm_ops = {
@@ -204,7 +465,9 @@ static const struct dev_pm_ops rk_rng_pm_ops = {
};
static const struct of_device_id rk_rng_dt_match[] = {
- { .compatible = "rockchip,rk3568-rng", },
+ { .compatible = "rockchip,rk3568-rng", .data = (void *)&rk3568_soc_data },
+ { .compatible = "rockchip,rk3576-rng", .data = (void *)&rk3576_soc_data },
+ { .compatible = "rockchip,rk3588-rng", .data = (void *)&rk3588_soc_data },
{ /* sentinel */ },
};
@@ -221,8 +484,9 @@ static struct platform_driver rk_rng_driver = {
module_platform_driver(rk_rng_driver);
-MODULE_DESCRIPTION("Rockchip RK3568 True Random Number Generator driver");
+MODULE_DESCRIPTION("Rockchip True Random Number Generator driver");
MODULE_AUTHOR("Lin Jinhan <troy.lin@rock-chips.com>");
MODULE_AUTHOR("Aurelien Jarno <aurelien@aurel32.net>");
MODULE_AUTHOR("Daniel Golle <daniel@makrotopia.org>");
+MODULE_AUTHOR("Nicolas Frattaroli <nicolas.frattaroli@collabora.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/stm32-rng.c b/drivers/char/hw_random/stm32-rng.c
index 9d041a67c295..9a8c00586ab0 100644
--- a/drivers/char/hw_random/stm32-rng.c
+++ b/drivers/char/hw_random/stm32-rng.c
@@ -4,6 +4,7 @@
*/
#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/hw_random.h>
#include <linux/io.h>
@@ -49,6 +50,7 @@
struct stm32_rng_data {
uint max_clock_rate;
+ uint nb_clock;
u32 cr;
u32 nscr;
u32 htcr;
@@ -72,7 +74,7 @@ struct stm32_rng_private {
struct hwrng rng;
struct device *dev;
void __iomem *base;
- struct clk *clk;
+ struct clk_bulk_data *clk_bulk;
struct reset_control *rst;
struct stm32_rng_config pm_conf;
const struct stm32_rng_data *data;
@@ -253,7 +255,6 @@ static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
}
exit_rpm:
- pm_runtime_mark_last_busy(priv->dev);
pm_runtime_put_sync_autosuspend(priv->dev);
return retval || !wait ? retval : -EIO;
@@ -266,7 +267,7 @@ static uint stm32_rng_clock_freq_restrain(struct hwrng *rng)
unsigned long clock_rate = 0;
uint clock_div = 0;
- clock_rate = clk_get_rate(priv->clk);
+ clock_rate = clk_get_rate(priv->clk_bulk[0].clk);
/*
* Get the exponent to apply on the CLKDIV field in RNG_CR register
@@ -276,7 +277,7 @@ static uint stm32_rng_clock_freq_restrain(struct hwrng *rng)
while ((clock_rate >> clock_div) > priv->data->max_clock_rate)
clock_div++;
- pr_debug("RNG clk rate : %lu\n", clk_get_rate(priv->clk) >> clock_div);
+ pr_debug("RNG clk rate : %lu\n", clk_get_rate(priv->clk_bulk[0].clk) >> clock_div);
return clock_div;
}
@@ -288,7 +289,7 @@ static int stm32_rng_init(struct hwrng *rng)
int err;
u32 reg;
- err = clk_prepare_enable(priv->clk);
+ err = clk_bulk_prepare_enable(priv->data->nb_clock, priv->clk_bulk);
if (err)
return err;
@@ -328,7 +329,7 @@ static int stm32_rng_init(struct hwrng *rng)
(!(reg & RNG_CR_CONDRST)),
10, 50000);
if (err) {
- clk_disable_unprepare(priv->clk);
+ clk_bulk_disable_unprepare(priv->data->nb_clock, priv->clk_bulk);
dev_err(priv->dev, "%s: timeout %x!\n", __func__, reg);
return -EINVAL;
}
@@ -356,12 +357,13 @@ static int stm32_rng_init(struct hwrng *rng)
reg & RNG_SR_DRDY,
10, 100000);
if (err || (reg & ~RNG_SR_DRDY)) {
- clk_disable_unprepare(priv->clk);
+ clk_bulk_disable_unprepare(priv->data->nb_clock, priv->clk_bulk);
dev_err(priv->dev, "%s: timeout:%x SR: %x!\n", __func__, err, reg);
+
return -EINVAL;
}
- clk_disable_unprepare(priv->clk);
+ clk_bulk_disable_unprepare(priv->data->nb_clock, priv->clk_bulk);
return 0;
}
@@ -379,7 +381,8 @@ static int __maybe_unused stm32_rng_runtime_suspend(struct device *dev)
reg = readl_relaxed(priv->base + RNG_CR);
reg &= ~RNG_CR_RNGEN;
writel_relaxed(reg, priv->base + RNG_CR);
- clk_disable_unprepare(priv->clk);
+
+ clk_bulk_disable_unprepare(priv->data->nb_clock, priv->clk_bulk);
return 0;
}
@@ -389,7 +392,7 @@ static int __maybe_unused stm32_rng_suspend(struct device *dev)
struct stm32_rng_private *priv = dev_get_drvdata(dev);
int err;
- err = clk_prepare_enable(priv->clk);
+ err = clk_bulk_prepare_enable(priv->data->nb_clock, priv->clk_bulk);
if (err)
return err;
@@ -403,7 +406,7 @@ static int __maybe_unused stm32_rng_suspend(struct device *dev)
writel_relaxed(priv->pm_conf.cr, priv->base + RNG_CR);
- clk_disable_unprepare(priv->clk);
+ clk_bulk_disable_unprepare(priv->data->nb_clock, priv->clk_bulk);
return 0;
}
@@ -414,7 +417,7 @@ static int __maybe_unused stm32_rng_runtime_resume(struct device *dev)
int err;
u32 reg;
- err = clk_prepare_enable(priv->clk);
+ err = clk_bulk_prepare_enable(priv->data->nb_clock, priv->clk_bulk);
if (err)
return err;
@@ -434,7 +437,7 @@ static int __maybe_unused stm32_rng_resume(struct device *dev)
int err;
u32 reg;
- err = clk_prepare_enable(priv->clk);
+ err = clk_bulk_prepare_enable(priv->data->nb_clock, priv->clk_bulk);
if (err)
return err;
@@ -462,7 +465,7 @@ static int __maybe_unused stm32_rng_resume(struct device *dev)
reg & ~RNG_CR_CONDRST, 10, 100000);
if (err) {
- clk_disable_unprepare(priv->clk);
+ clk_bulk_disable_unprepare(priv->data->nb_clock, priv->clk_bulk);
dev_err(priv->dev, "%s: timeout:%x CR: %x!\n", __func__, err, reg);
return -EINVAL;
}
@@ -472,7 +475,7 @@ static int __maybe_unused stm32_rng_resume(struct device *dev)
writel_relaxed(reg, priv->base + RNG_CR);
}
- clk_disable_unprepare(priv->clk);
+ clk_bulk_disable_unprepare(priv->data->nb_clock, priv->clk_bulk);
return 0;
}
@@ -484,9 +487,19 @@ static const struct dev_pm_ops __maybe_unused stm32_rng_pm_ops = {
stm32_rng_resume)
};
+static const struct stm32_rng_data stm32mp25_rng_data = {
+ .has_cond_reset = true,
+ .max_clock_rate = 48000000,
+ .nb_clock = 2,
+ .cr = 0x00F00D00,
+ .nscr = 0x2B5BB,
+ .htcr = 0x969D,
+};
+
static const struct stm32_rng_data stm32mp13_rng_data = {
.has_cond_reset = true,
.max_clock_rate = 48000000,
+ .nb_clock = 1,
.cr = 0x00F00D00,
.nscr = 0x2B5BB,
.htcr = 0x969D,
@@ -494,11 +507,16 @@ static const struct stm32_rng_data stm32mp13_rng_data = {
static const struct stm32_rng_data stm32_rng_data = {
.has_cond_reset = false,
- .max_clock_rate = 3000000,
+ .max_clock_rate = 48000000,
+ .nb_clock = 1,
};
static const struct of_device_id stm32_rng_match[] = {
{
+ .compatible = "st,stm32mp25-rng",
+ .data = &stm32mp25_rng_data,
+ },
+ {
.compatible = "st,stm32mp13-rng",
.data = &stm32mp13_rng_data,
},
@@ -516,6 +534,7 @@ static int stm32_rng_probe(struct platform_device *ofdev)
struct device_node *np = ofdev->dev.of_node;
struct stm32_rng_private *priv;
struct resource *res;
+ int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -525,10 +544,6 @@ static int stm32_rng_probe(struct platform_device *ofdev)
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
- priv->clk = devm_clk_get(&ofdev->dev, NULL);
- if (IS_ERR(priv->clk))
- return PTR_ERR(priv->clk);
-
priv->rst = devm_reset_control_get(&ofdev->dev, NULL);
if (!IS_ERR(priv->rst)) {
reset_control_assert(priv->rst);
@@ -551,6 +566,28 @@ static int stm32_rng_probe(struct platform_device *ofdev)
priv->rng.read = stm32_rng_read;
priv->rng.quality = 900;
+ if (!priv->data->nb_clock || priv->data->nb_clock > 2)
+ return -EINVAL;
+
+ ret = devm_clk_bulk_get_all(dev, &priv->clk_bulk);
+ if (ret != priv->data->nb_clock)
+ return dev_err_probe(dev, -EINVAL, "Failed to get clocks: %d\n", ret);
+
+ if (priv->data->nb_clock == 2) {
+ const char *id = priv->clk_bulk[1].id;
+ struct clk *clk = priv->clk_bulk[1].clk;
+
+ if (!priv->clk_bulk[0].id || !priv->clk_bulk[1].id)
+ return dev_err_probe(dev, -EINVAL, "Missing clock name\n");
+
+ if (strcmp(priv->clk_bulk[0].id, "core")) {
+ priv->clk_bulk[1].id = priv->clk_bulk[0].id;
+ priv->clk_bulk[1].clk = priv->clk_bulk[0].clk;
+ priv->clk_bulk[0].id = id;
+ priv->clk_bulk[0].clk = clk;
+ }
+ }
+
pm_runtime_set_autosuspend_delay(dev, 100);
pm_runtime_use_autosuspend(dev);
pm_runtime_enable(dev);
@@ -565,7 +602,7 @@ static struct platform_driver stm32_rng_driver = {
.of_match_table = stm32_rng_match,
},
.probe = stm32_rng_probe,
- .remove_new = stm32_rng_remove,
+ .remove = stm32_rng_remove,
};
module_platform_driver(stm32_rng_driver);
diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c
index 65b8260339f5..e61f06393209 100644
--- a/drivers/char/hw_random/timeriomem-rng.c
+++ b/drivers/char/hw_random/timeriomem-rng.c
@@ -150,10 +150,9 @@ static int timeriomem_rng_probe(struct platform_device *pdev)
priv->rng_ops.quality = pdata->quality;
}
- priv->period = ns_to_ktime(period * NSEC_PER_USEC);
+ priv->period = us_to_ktime(period);
init_completion(&priv->completion);
- hrtimer_init(&priv->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- priv->timer.function = timeriomem_rng_trigger;
+ hrtimer_setup(&priv->timer, timeriomem_rng_trigger, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
priv->rng_ops.name = dev_name(&pdev->dev);
priv->rng_ops.read = timeriomem_rng_read;
@@ -193,7 +192,7 @@ static struct platform_driver timeriomem_rng_driver = {
.of_match_table = timeriomem_rng_match,
},
.probe = timeriomem_rng_probe,
- .remove_new = timeriomem_rng_remove,
+ .remove = timeriomem_rng_remove,
};
module_platform_driver(timeriomem_rng_driver);
diff --git a/drivers/char/hw_random/xgene-rng.c b/drivers/char/hw_random/xgene-rng.c
index 642d13519464..709a36507145 100644
--- a/drivers/char/hw_random/xgene-rng.c
+++ b/drivers/char/hw_random/xgene-rng.c
@@ -88,12 +88,12 @@ struct xgene_rng_dev {
static void xgene_rng_expired_timer(struct timer_list *t)
{
- struct xgene_rng_dev *ctx = from_timer(ctx, t, failure_timer);
+ struct xgene_rng_dev *ctx = timer_container_of(ctx, t, failure_timer);
/* Clear failure counter as timer expired */
disable_irq(ctx->irq);
ctx->failure_cnt = 0;
- del_timer(&ctx->failure_timer);
+ timer_delete(&ctx->failure_timer);
enable_irq(ctx->irq);
}
@@ -375,7 +375,7 @@ MODULE_DEVICE_TABLE(of, xgene_rng_of_match);
static struct platform_driver xgene_rng_driver = {
.probe = xgene_rng_probe,
- .remove_new = xgene_rng_remove,
+ .remove = xgene_rng_remove,
.driver = {
.name = "xgene-rng",
.of_match_table = xgene_rng_of_match,
diff --git a/drivers/char/ipmi/Kconfig b/drivers/char/ipmi/Kconfig
index f4adc6feb3b2..92bed266d07c 100644
--- a/drivers/char/ipmi/Kconfig
+++ b/drivers/char/ipmi/Kconfig
@@ -84,6 +84,13 @@ config IPMI_IPMB
bus, and it also supports direct messaging on the bus using
IPMB direct messages. This module requires I2C support.
+config IPMI_LS2K
+ bool 'Loongson-2K IPMI interface'
+ depends on LOONGARCH
+ select MFD_LS2K_BMC_CORE
+ help
+ Provides a driver for Loongson-2K IPMI interfaces.
+
config IPMI_POWERNV
depends on PPC_POWERNV
tristate 'POWERNV (OPAL firmware) IPMI interface'
diff --git a/drivers/char/ipmi/Makefile b/drivers/char/ipmi/Makefile
index e0944547c9d0..4ea450a82242 100644
--- a/drivers/char/ipmi/Makefile
+++ b/drivers/char/ipmi/Makefile
@@ -8,6 +8,7 @@ ipmi_si-y := ipmi_si_intf.o ipmi_kcs_sm.o ipmi_smic_sm.o ipmi_bt_sm.o \
ipmi_si_mem_io.o
ipmi_si-$(CONFIG_HAS_IOPORT) += ipmi_si_port_io.o
ipmi_si-$(CONFIG_PCI) += ipmi_si_pci.o
+ipmi_si-$(CONFIG_IPMI_LS2K) += ipmi_si_ls2k.o
ipmi_si-$(CONFIG_PARISC) += ipmi_si_parisc.o
obj-$(CONFIG_IPMI_HANDLER) += ipmi_msghandler.o
diff --git a/drivers/char/ipmi/bt-bmc.c b/drivers/char/ipmi/bt-bmc.c
index b8b9c07d3b5d..a179d4797011 100644
--- a/drivers/char/ipmi/bt-bmc.c
+++ b/drivers/char/ipmi/bt-bmc.c
@@ -347,7 +347,7 @@ static const struct file_operations bt_bmc_fops = {
static void poll_timer(struct timer_list *t)
{
- struct bt_bmc *bt_bmc = from_timer(bt_bmc, t, poll_timer);
+ struct bt_bmc *bt_bmc = timer_container_of(bt_bmc, t, poll_timer);
bt_bmc->poll_timer.expires += msecs_to_jiffies(500);
wake_up(&bt_bmc->queue);
@@ -465,7 +465,7 @@ static void bt_bmc_remove(struct platform_device *pdev)
misc_deregister(&bt_bmc->miscdev);
if (bt_bmc->irq < 0)
- del_timer_sync(&bt_bmc->poll_timer);
+ timer_delete_sync(&bt_bmc->poll_timer);
}
static const struct of_device_id bt_bmc_match[] = {
@@ -481,7 +481,7 @@ static struct platform_driver bt_bmc_driver = {
.of_match_table = bt_bmc_match,
},
.probe = bt_bmc_probe,
- .remove_new = bt_bmc_remove,
+ .remove = bt_bmc_remove,
};
module_platform_driver(bt_bmc_driver);
diff --git a/drivers/char/ipmi/ipmb_dev_int.c b/drivers/char/ipmi/ipmb_dev_int.c
index 7296127181ec..ee2bdc7ed0da 100644
--- a/drivers/char/ipmi/ipmb_dev_int.c
+++ b/drivers/char/ipmi/ipmb_dev_int.c
@@ -321,6 +321,9 @@ static int ipmb_probe(struct i2c_client *client)
ipmb_dev->miscdev.name = devm_kasprintf(&client->dev, GFP_KERNEL,
"%s%d", "ipmb-",
client->adapter->nr);
+ if (!ipmb_dev->miscdev.name)
+ return -ENOMEM;
+
ipmb_dev->miscdev.fops = &ipmb_fops;
ipmb_dev->miscdev.parent = &client->dev;
ret = misc_register(&ipmb_dev->miscdev);
@@ -355,11 +358,13 @@ static const struct i2c_device_id ipmb_id[] = {
};
MODULE_DEVICE_TABLE(i2c, ipmb_id);
+#ifdef CONFIG_ACPI
static const struct acpi_device_id acpi_ipmb_id[] = {
{ "IPMB0001", 0 },
{},
};
MODULE_DEVICE_TABLE(acpi, acpi_ipmb_id);
+#endif
static struct i2c_driver ipmb_driver = {
.driver = {
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
index 332082e02ea5..e6ba35b71f10 100644
--- a/drivers/char/ipmi/ipmi_devintf.c
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -122,12 +122,9 @@ out:
static int ipmi_release(struct inode *inode, struct file *file)
{
struct ipmi_file_private *priv = file->private_data;
- int rv;
struct ipmi_recv_msg *msg, *next;
- rv = ipmi_destroy_user(priv->user);
- if (rv)
- return rv;
+ ipmi_destroy_user(priv->user);
list_for_each_entry_safe(msg, next, &priv->recv_msgs, link)
ipmi_free_recv_msg(msg);
diff --git a/drivers/char/ipmi/ipmi_ipmb.c b/drivers/char/ipmi/ipmi_ipmb.c
index 6a4f279c7c1f..3a51e58b2487 100644
--- a/drivers/char/ipmi/ipmi_ipmb.c
+++ b/drivers/char/ipmi/ipmi_ipmb.c
@@ -404,8 +404,7 @@ static void ipmi_ipmb_shutdown(void *send_info)
ipmi_ipmb_stop_thread(iidev);
}
-static void ipmi_ipmb_sender(void *send_info,
- struct ipmi_smi_msg *msg)
+static int ipmi_ipmb_sender(void *send_info, struct ipmi_smi_msg *msg)
{
struct ipmi_ipmb_dev *iidev = send_info;
unsigned long flags;
@@ -417,6 +416,7 @@ static void ipmi_ipmb_sender(void *send_info,
spin_unlock_irqrestore(&iidev->lock, flags);
up(&iidev->wake_thread);
+ return IPMI_CC_NO_ERROR;
}
static void ipmi_ipmb_request_events(void *send_info)
diff --git a/drivers/char/ipmi/ipmi_kcs_sm.c b/drivers/char/ipmi/ipmi_kcs_sm.c
index ecfcb50302f6..efda90dcf5b3 100644
--- a/drivers/char/ipmi/ipmi_kcs_sm.c
+++ b/drivers/char/ipmi/ipmi_kcs_sm.c
@@ -122,10 +122,10 @@ struct si_sm_data {
unsigned long error0_timeout;
};
-static unsigned int init_kcs_data_with_state(struct si_sm_data *kcs,
- struct si_sm_io *io, enum kcs_states state)
+static unsigned int init_kcs_data(struct si_sm_data *kcs,
+ struct si_sm_io *io)
{
- kcs->state = state;
+ kcs->state = KCS_IDLE;
kcs->io = io;
kcs->write_pos = 0;
kcs->write_count = 0;
@@ -140,12 +140,6 @@ static unsigned int init_kcs_data_with_state(struct si_sm_data *kcs,
return 2;
}
-static unsigned int init_kcs_data(struct si_sm_data *kcs,
- struct si_sm_io *io)
-{
- return init_kcs_data_with_state(kcs, io, KCS_IDLE);
-}
-
static inline unsigned char read_status(struct si_sm_data *kcs)
{
return kcs->io->inputb(kcs->io, 1);
@@ -276,7 +270,7 @@ static int start_kcs_transaction(struct si_sm_data *kcs, unsigned char *data,
if (size > MAX_KCS_WRITE_SIZE)
return IPMI_REQ_LEN_EXCEEDED_ERR;
- if (kcs->state != KCS_IDLE) {
+ if ((kcs->state != KCS_IDLE) && (kcs->state != KCS_HOSED)) {
dev_warn(kcs->io->dev, "KCS in invalid state %d\n", kcs->state);
return IPMI_NOT_IN_MY_STATE_ERR;
}
@@ -501,7 +495,7 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
}
if (kcs->state == KCS_HOSED) {
- init_kcs_data_with_state(kcs, kcs->io, KCS_ERROR0);
+ init_kcs_data(kcs, kcs->io);
return SI_SM_HOSED;
}
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index e12b531f5c2f..a0b67a35a5f0 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -27,7 +27,6 @@
#include <linux/ipmi_smi.h>
#include <linux/notifier.h>
#include <linux/init.h>
-#include <linux/proc_fs.h>
#include <linux/rcupdate.h>
#include <linux/interrupt.h>
#include <linux/moduleparam.h>
@@ -39,17 +38,22 @@
#define IPMI_DRIVER_VERSION "39.2"
-static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
+static struct ipmi_recv_msg *ipmi_alloc_recv_msg(struct ipmi_user *user);
+static void ipmi_set_recv_msg_user(struct ipmi_recv_msg *msg,
+ struct ipmi_user *user);
static int ipmi_init_msghandler(void);
-static void smi_recv_work(struct work_struct *t);
+static void smi_work(struct work_struct *t);
static void handle_new_recv_msgs(struct ipmi_smi *intf);
static void need_waiter(struct ipmi_smi *intf);
static int handle_one_recv_msg(struct ipmi_smi *intf,
struct ipmi_smi_msg *msg);
+static void intf_free(struct kref *ref);
static bool initialized;
static bool drvregistered;
+static struct timer_list ipmi_timer;
+
/* Numbers in this enumerator should be mapped to ipmi_panic_event_str */
enum ipmi_panic_event_op {
IPMI_SEND_PANIC_EVENT_NONE,
@@ -180,14 +184,8 @@ MODULE_PARM_DESC(max_msgs_per_user,
struct ipmi_user {
struct list_head link;
- /*
- * Set to NULL when the user is destroyed, a pointer to myself
- * so srcu_dereference can be used on it.
- */
- struct ipmi_user *self;
- struct srcu_struct release_barrier;
-
struct kref refcount;
+ refcount_t destroyed;
/* The upper layer that handles receive messages. */
const struct ipmi_user_hndl *handler;
@@ -200,30 +198,8 @@ struct ipmi_user {
bool gets_events;
atomic_t nr_msgs;
-
- /* Free must run in process context for RCU cleanup. */
- struct work_struct remove_work;
};
-static struct workqueue_struct *remove_work_wq;
-
-static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index)
- __acquires(user->release_barrier)
-{
- struct ipmi_user *ruser;
-
- *index = srcu_read_lock(&user->release_barrier);
- ruser = srcu_dereference(user->self, &user->release_barrier);
- if (!ruser)
- srcu_read_unlock(&user->release_barrier, *index);
- return ruser;
-}
-
-static void release_ipmi_user(struct ipmi_user *user, int index)
-{
- srcu_read_unlock(&user->release_barrier, index);
-}
-
struct cmd_rcvr {
struct list_head link;
@@ -327,6 +303,8 @@ struct bmc_device {
};
#define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev)
+static struct workqueue_struct *bmc_remove_work_wq;
+
static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
struct ipmi_device_id *id,
bool *guid_set, guid_t *guid);
@@ -451,14 +429,14 @@ struct ipmi_smi {
struct list_head link;
/*
- * The list of upper layers that are using me. seq_lock write
- * protects this. Read protection is with srcu.
+ * The list of upper layers that are using me.
*/
struct list_head users;
- struct srcu_struct users_srcu;
+ struct mutex users_mutex;
atomic_t nr_users;
struct device_attribute nr_users_devattr;
struct device_attribute nr_msgs_devattr;
+ struct device_attribute maintenance_mode_devattr;
/* Used for wake ups at startup. */
@@ -491,20 +469,27 @@ struct ipmi_smi {
* interface to match them up with their responses. A routine
* is called periodically to time the items in this list.
*/
- spinlock_t seq_lock;
+ struct mutex seq_lock;
struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
int curr_seq;
/*
- * Messages queued for delivery. If delivery fails (out of memory
- * for instance), They will stay in here to be processed later in a
- * periodic timer interrupt. The workqueue is for handling received
- * messages directly from the handler.
+ * Messages queued for deliver to the user.
+ */
+ struct mutex user_msgs_mutex;
+ struct list_head user_msgs;
+
+ /*
+ * Messages queued for processing. If processing fails (out
+ * of memory for instance), They will stay in here to be
+ * processed later in a periodic timer interrupt. The
+ * workqueue is for handling received messages directly from
+ * the handler.
*/
spinlock_t waiting_rcv_msgs_lock;
struct list_head waiting_rcv_msgs;
atomic_t watchdog_pretimeouts_to_deliver;
- struct work_struct recv_work;
+ struct work_struct smi_work;
spinlock_t xmit_msgs_lock;
struct list_head xmit_msgs;
@@ -522,10 +507,9 @@ struct ipmi_smi {
* Events that were queues because no one was there to receive
* them.
*/
- spinlock_t events_lock; /* For dealing with event stuff. */
+ struct mutex events_mutex; /* For dealing with event stuff. */
struct list_head waiting_events;
unsigned int waiting_events_count; /* How many events in queue? */
- char delivering_events;
char event_msg_printed;
/* How many users are waiting for events? */
@@ -560,7 +544,11 @@ struct ipmi_smi {
/* For handling of maintenance mode. */
int maintenance_mode;
- bool maintenance_mode_enable;
+
+#define IPMI_MAINTENANCE_MODE_STATE_OFF 0
+#define IPMI_MAINTENANCE_MODE_STATE_FIRMWARE 1
+#define IPMI_MAINTENANCE_MODE_STATE_RESET 2
+ int maintenance_mode_state;
int auto_maintenance_timeout;
spinlock_t maintenance_mode_lock; /* Used in a timer... */
@@ -613,6 +601,28 @@ static int __ipmi_bmc_register(struct ipmi_smi *intf,
bool guid_set, guid_t *guid, int intf_num);
static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id);
+static void free_ipmi_user(struct kref *ref)
+{
+ struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
+ struct module *owner;
+
+ owner = user->intf->owner;
+ kref_put(&user->intf->refcount, intf_free);
+ module_put(owner);
+ vfree(user);
+}
+
+static void release_ipmi_user(struct ipmi_user *user)
+{
+ kref_put(&user->refcount, free_ipmi_user);
+}
+
+static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user)
+{
+ if (!kref_get_unless_zero(&user->refcount))
+ return NULL;
+ return user;
+}
/*
* The driver model view of the IPMI messaging driver.
@@ -630,9 +640,6 @@ static DEFINE_MUTEX(ipmidriver_mutex);
static LIST_HEAD(ipmi_interfaces);
static DEFINE_MUTEX(ipmi_interfaces_mutex);
-#define ipmi_interfaces_mutex_held() \
- lockdep_is_held(&ipmi_interfaces_mutex)
-static struct srcu_struct ipmi_interfaces_srcu;
/*
* List of watchers that want to know when smi's are added and deleted.
@@ -698,27 +705,20 @@ static void free_smi_msg_list(struct list_head *q)
}
}
-static void clean_up_interface_data(struct ipmi_smi *intf)
+static void intf_free(struct kref *ref)
{
+ struct ipmi_smi *intf = container_of(ref, struct ipmi_smi, refcount);
int i;
struct cmd_rcvr *rcvr, *rcvr2;
- struct list_head list;
-
- cancel_work_sync(&intf->recv_work);
free_smi_msg_list(&intf->waiting_rcv_msgs);
free_recv_msg_list(&intf->waiting_events);
/*
* Wholesale remove all the entries from the list in the
- * interface and wait for RCU to know that none are in use.
+ * interface. No need for locks, this is single-threaded.
*/
- mutex_lock(&intf->cmd_rcvrs_mutex);
- INIT_LIST_HEAD(&list);
- list_splice_init_rcu(&intf->cmd_rcvrs, &list, synchronize_rcu);
- mutex_unlock(&intf->cmd_rcvrs_mutex);
-
- list_for_each_entry_safe(rcvr, rcvr2, &list, link)
+ list_for_each_entry_safe(rcvr, rcvr2, &intf->cmd_rcvrs, link)
kfree(rcvr);
for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
@@ -726,20 +726,17 @@ static void clean_up_interface_data(struct ipmi_smi *intf)
&& (intf->seq_table[i].recv_msg))
ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
}
-}
-
-static void intf_free(struct kref *ref)
-{
- struct ipmi_smi *intf = container_of(ref, struct ipmi_smi, refcount);
- clean_up_interface_data(intf);
kfree(intf);
}
int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
{
struct ipmi_smi *intf;
- int index, rv;
+ unsigned int count = 0, i;
+ int *interfaces = NULL;
+ struct device **devices = NULL;
+ int rv = 0;
/*
* Make sure the driver is actually initialized, this handles
@@ -753,20 +750,53 @@ int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
list_add(&watcher->link, &smi_watchers);
- index = srcu_read_lock(&ipmi_interfaces_srcu);
- list_for_each_entry_rcu(intf, &ipmi_interfaces, link,
- lockdep_is_held(&smi_watchers_mutex)) {
- int intf_num = READ_ONCE(intf->intf_num);
+ /*
+ * Build an array of ipmi interfaces and fill it in, and
+ * another array of the devices. We can't call the callback
+ * with ipmi_interfaces_mutex held. smi_watchers_mutex will
+ * keep things in order for the user.
+ */
+ mutex_lock(&ipmi_interfaces_mutex);
+ list_for_each_entry(intf, &ipmi_interfaces, link)
+ count++;
+ if (count > 0) {
+ interfaces = kmalloc_array(count, sizeof(*interfaces),
+ GFP_KERNEL);
+ if (!interfaces) {
+ rv = -ENOMEM;
+ } else {
+ devices = kmalloc_array(count, sizeof(*devices),
+ GFP_KERNEL);
+ if (!devices) {
+ kfree(interfaces);
+ interfaces = NULL;
+ rv = -ENOMEM;
+ }
+ }
+ count = 0;
+ }
+ if (interfaces) {
+ list_for_each_entry(intf, &ipmi_interfaces, link) {
+ int intf_num = READ_ONCE(intf->intf_num);
+
+ if (intf_num == -1)
+ continue;
+ devices[count] = intf->si_dev;
+ interfaces[count++] = intf_num;
+ }
+ }
+ mutex_unlock(&ipmi_interfaces_mutex);
- if (intf_num == -1)
- continue;
- watcher->new_smi(intf_num, intf->si_dev);
+ if (interfaces) {
+ for (i = 0; i < count; i++)
+ watcher->new_smi(interfaces[i], devices[i]);
+ kfree(interfaces);
+ kfree(devices);
}
- srcu_read_unlock(&ipmi_interfaces_srcu, index);
mutex_unlock(&smi_watchers_mutex);
- return 0;
+ return rv;
}
EXPORT_SYMBOL(ipmi_smi_watcher_register);
@@ -779,22 +809,17 @@ int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
}
EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
-/*
- * Must be called with smi_watchers_mutex held.
- */
static void
call_smi_watchers(int i, struct device *dev)
{
struct ipmi_smi_watcher *w;
- mutex_lock(&smi_watchers_mutex);
list_for_each_entry(w, &smi_watchers, link) {
if (try_module_get(w->owner)) {
w->new_smi(i, dev);
module_put(w->owner);
}
}
- mutex_unlock(&smi_watchers_mutex);
}
static int
@@ -939,20 +964,15 @@ static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
* risk. At this moment, simply skip it in that case.
*/
ipmi_free_recv_msg(msg);
- atomic_dec(&msg->user->nr_msgs);
} else {
- int index;
- struct ipmi_user *user = acquire_ipmi_user(msg->user, &index);
-
- if (user) {
- atomic_dec(&user->nr_msgs);
- user->handler->ipmi_recv_hndl(msg, user->handler_data);
- release_ipmi_user(user, index);
- } else {
- /* User went away, give up. */
- ipmi_free_recv_msg(msg);
- rv = -EINVAL;
- }
+ /*
+ * Deliver it in smi_work. The message will hold a
+ * refcount to the user.
+ */
+ mutex_lock(&intf->user_msgs_mutex);
+ list_add_tail(&msg->link, &intf->user_msgs);
+ mutex_unlock(&intf->user_msgs_mutex);
+ queue_work(system_wq, &intf->smi_work);
}
return rv;
@@ -1104,12 +1124,11 @@ static int intf_find_seq(struct ipmi_smi *intf,
struct ipmi_recv_msg **recv_msg)
{
int rv = -ENODEV;
- unsigned long flags;
if (seq >= IPMI_IPMB_NUM_SEQ)
return -EINVAL;
- spin_lock_irqsave(&intf->seq_lock, flags);
+ mutex_lock(&intf->seq_lock);
if (intf->seq_table[seq].inuse) {
struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
@@ -1122,7 +1141,7 @@ static int intf_find_seq(struct ipmi_smi *intf,
rv = 0;
}
}
- spin_unlock_irqrestore(&intf->seq_lock, flags);
+ mutex_unlock(&intf->seq_lock);
return rv;
}
@@ -1133,14 +1152,13 @@ static int intf_start_seq_timer(struct ipmi_smi *intf,
long msgid)
{
int rv = -ENODEV;
- unsigned long flags;
unsigned char seq;
unsigned long seqid;
GET_SEQ_FROM_MSGID(msgid, seq, seqid);
- spin_lock_irqsave(&intf->seq_lock, flags);
+ mutex_lock(&intf->seq_lock);
/*
* We do this verification because the user can be deleted
* while a message is outstanding.
@@ -1151,7 +1169,7 @@ static int intf_start_seq_timer(struct ipmi_smi *intf,
ent->timeout = ent->orig_timeout;
rv = 0;
}
- spin_unlock_irqrestore(&intf->seq_lock, flags);
+ mutex_unlock(&intf->seq_lock);
return rv;
}
@@ -1162,7 +1180,6 @@ static int intf_err_seq(struct ipmi_smi *intf,
unsigned int err)
{
int rv = -ENODEV;
- unsigned long flags;
unsigned char seq;
unsigned long seqid;
struct ipmi_recv_msg *msg = NULL;
@@ -1170,7 +1187,7 @@ static int intf_err_seq(struct ipmi_smi *intf,
GET_SEQ_FROM_MSGID(msgid, seq, seqid);
- spin_lock_irqsave(&intf->seq_lock, flags);
+ mutex_lock(&intf->seq_lock);
/*
* We do this verification because the user can be deleted
* while a message is outstanding.
@@ -1184,7 +1201,7 @@ static int intf_err_seq(struct ipmi_smi *intf,
msg = ent->recv_msg;
rv = 0;
}
- spin_unlock_irqrestore(&intf->seq_lock, flags);
+ mutex_unlock(&intf->seq_lock);
if (msg)
deliver_err_response(intf, msg, err);
@@ -1192,23 +1209,13 @@ static int intf_err_seq(struct ipmi_smi *intf,
return rv;
}
-static void free_user_work(struct work_struct *work)
-{
- struct ipmi_user *user = container_of(work, struct ipmi_user,
- remove_work);
-
- cleanup_srcu_struct(&user->release_barrier);
- vfree(user);
-}
-
int ipmi_create_user(unsigned int if_num,
const struct ipmi_user_hndl *handler,
void *handler_data,
struct ipmi_user **user)
{
- unsigned long flags;
- struct ipmi_user *new_user;
- int rv, index;
+ struct ipmi_user *new_user = NULL;
+ int rv = 0;
struct ipmi_smi *intf;
/*
@@ -1230,30 +1237,31 @@ int ipmi_create_user(unsigned int if_num,
if (rv)
return rv;
- new_user = vzalloc(sizeof(*new_user));
- if (!new_user)
- return -ENOMEM;
-
- index = srcu_read_lock(&ipmi_interfaces_srcu);
- list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+ mutex_lock(&ipmi_interfaces_mutex);
+ list_for_each_entry(intf, &ipmi_interfaces, link) {
if (intf->intf_num == if_num)
goto found;
}
/* Not found, return an error */
rv = -EINVAL;
- goto out_kfree;
+ goto out_unlock;
found:
+ if (intf->in_shutdown) {
+ rv = -ENODEV;
+ goto out_unlock;
+ }
+
if (atomic_add_return(1, &intf->nr_users) > max_users) {
rv = -EBUSY;
goto out_kfree;
}
- INIT_WORK(&new_user->remove_work, free_user_work);
-
- rv = init_srcu_struct(&new_user->release_barrier);
- if (rv)
+ new_user = vzalloc(sizeof(*new_user));
+ if (!new_user) {
+ rv = -ENOMEM;
goto out_kfree;
+ }
if (!try_module_get(intf->owner)) {
rv = -ENODEV;
@@ -1265,86 +1273,68 @@ int ipmi_create_user(unsigned int if_num,
atomic_set(&new_user->nr_msgs, 0);
kref_init(&new_user->refcount);
+ refcount_set(&new_user->destroyed, 1);
+ kref_get(&new_user->refcount); /* Destroy owns a refcount. */
new_user->handler = handler;
new_user->handler_data = handler_data;
new_user->intf = intf;
new_user->gets_events = false;
- rcu_assign_pointer(new_user->self, new_user);
- spin_lock_irqsave(&intf->seq_lock, flags);
- list_add_rcu(&new_user->link, &intf->users);
- spin_unlock_irqrestore(&intf->seq_lock, flags);
+ mutex_lock(&intf->users_mutex);
+ mutex_lock(&intf->seq_lock);
+ list_add(&new_user->link, &intf->users);
+ mutex_unlock(&intf->seq_lock);
+ mutex_unlock(&intf->users_mutex);
+
if (handler->ipmi_watchdog_pretimeout)
/* User wants pretimeouts, so make sure to watch for them. */
smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG);
- srcu_read_unlock(&ipmi_interfaces_srcu, index);
- *user = new_user;
- return 0;
out_kfree:
- atomic_dec(&intf->nr_users);
- srcu_read_unlock(&ipmi_interfaces_srcu, index);
- vfree(new_user);
+ if (rv) {
+ atomic_dec(&intf->nr_users);
+ vfree(new_user);
+ } else {
+ *user = new_user;
+ }
+out_unlock:
+ mutex_unlock(&ipmi_interfaces_mutex);
return rv;
}
EXPORT_SYMBOL(ipmi_create_user);
int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data)
{
- int rv, index;
+ int rv = -EINVAL;
struct ipmi_smi *intf;
- index = srcu_read_lock(&ipmi_interfaces_srcu);
- list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
- if (intf->intf_num == if_num)
- goto found;
+ mutex_lock(&ipmi_interfaces_mutex);
+ list_for_each_entry(intf, &ipmi_interfaces, link) {
+ if (intf->intf_num == if_num) {
+ if (!intf->handlers->get_smi_info)
+ rv = -ENOTTY;
+ else
+ rv = intf->handlers->get_smi_info(intf->send_info, data);
+ break;
+ }
}
- srcu_read_unlock(&ipmi_interfaces_srcu, index);
-
- /* Not found, return an error */
- return -EINVAL;
-
-found:
- if (!intf->handlers->get_smi_info)
- rv = -ENOTTY;
- else
- rv = intf->handlers->get_smi_info(intf->send_info, data);
- srcu_read_unlock(&ipmi_interfaces_srcu, index);
+ mutex_unlock(&ipmi_interfaces_mutex);
return rv;
}
EXPORT_SYMBOL(ipmi_get_smi_info);
-static void free_user(struct kref *ref)
-{
- struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
-
- /* SRCU cleanup must happen in workqueue context. */
- queue_work(remove_work_wq, &user->remove_work);
-}
-
+/* Must be called with intf->users_mutex held. */
static void _ipmi_destroy_user(struct ipmi_user *user)
{
struct ipmi_smi *intf = user->intf;
int i;
- unsigned long flags;
struct cmd_rcvr *rcvr;
struct cmd_rcvr *rcvrs = NULL;
- struct module *owner;
+ struct ipmi_recv_msg *msg, *msg2;
- if (!acquire_ipmi_user(user, &i)) {
- /*
- * The user has already been cleaned up, just make sure
- * nothing is using it and return.
- */
- synchronize_srcu(&user->release_barrier);
+ if (!refcount_dec_if_one(&user->destroyed))
return;
- }
-
- rcu_assign_pointer(user->self, NULL);
- release_ipmi_user(user, i);
-
- synchronize_srcu(&user->release_barrier);
if (user->handler->shutdown)
user->handler->shutdown(user->handler_data);
@@ -1355,11 +1345,11 @@ static void _ipmi_destroy_user(struct ipmi_user *user)
if (user->gets_events)
atomic_dec(&intf->event_waiters);
- /* Remove the user from the interface's sequence table. */
- spin_lock_irqsave(&intf->seq_lock, flags);
- list_del_rcu(&user->link);
+ /* Remove the user from the interface's list and sequence table. */
+ list_del(&user->link);
atomic_dec(&intf->nr_users);
+ mutex_lock(&intf->seq_lock);
for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
if (intf->seq_table[i].inuse
&& (intf->seq_table[i].recv_msg->user == user)) {
@@ -1368,13 +1358,13 @@ static void _ipmi_destroy_user(struct ipmi_user *user)
ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
}
}
- spin_unlock_irqrestore(&intf->seq_lock, flags);
+ mutex_unlock(&intf->seq_lock);
/*
* Remove the user from the command receiver's table. First
* we build a list of everything (not using the standard link,
* since other things may be using it till we do
- * synchronize_srcu()) then free everything in that list.
+ * synchronize_rcu()) then free everything in that list.
*/
mutex_lock(&intf->cmd_rcvrs_mutex);
list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link,
@@ -1386,25 +1376,33 @@ static void _ipmi_destroy_user(struct ipmi_user *user)
}
}
mutex_unlock(&intf->cmd_rcvrs_mutex);
- synchronize_rcu();
while (rcvrs) {
rcvr = rcvrs;
rcvrs = rcvr->next;
kfree(rcvr);
}
- owner = intf->owner;
- kref_put(&intf->refcount, intf_free);
- module_put(owner);
+ mutex_lock(&intf->user_msgs_mutex);
+ list_for_each_entry_safe(msg, msg2, &intf->user_msgs, link) {
+ if (msg->user != user)
+ continue;
+ list_del(&msg->link);
+ ipmi_free_recv_msg(msg);
+ }
+ mutex_unlock(&intf->user_msgs_mutex);
+
+ release_ipmi_user(user);
}
-int ipmi_destroy_user(struct ipmi_user *user)
+void ipmi_destroy_user(struct ipmi_user *user)
{
- _ipmi_destroy_user(user);
+ struct ipmi_smi *intf = user->intf;
- kref_put(&user->refcount, free_user);
+ mutex_lock(&intf->users_mutex);
+ _ipmi_destroy_user(user);
+ mutex_unlock(&intf->users_mutex);
- return 0;
+ kref_put(&user->refcount, free_ipmi_user);
}
EXPORT_SYMBOL(ipmi_destroy_user);
@@ -1413,9 +1411,9 @@ int ipmi_get_version(struct ipmi_user *user,
unsigned char *minor)
{
struct ipmi_device_id id;
- int rv, index;
+ int rv;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
@@ -1424,7 +1422,7 @@ int ipmi_get_version(struct ipmi_user *user,
*major = ipmi_version_major(&id);
*minor = ipmi_version_minor(&id);
}
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
return rv;
}
@@ -1434,9 +1432,9 @@ int ipmi_set_my_address(struct ipmi_user *user,
unsigned int channel,
unsigned char address)
{
- int index, rv = 0;
+ int rv = 0;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
@@ -1446,7 +1444,7 @@ int ipmi_set_my_address(struct ipmi_user *user,
channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
user->intf->addrinfo[channel].address = address;
}
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
return rv;
}
@@ -1456,9 +1454,9 @@ int ipmi_get_my_address(struct ipmi_user *user,
unsigned int channel,
unsigned char *address)
{
- int index, rv = 0;
+ int rv = 0;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
@@ -1468,7 +1466,7 @@ int ipmi_get_my_address(struct ipmi_user *user,
channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
*address = user->intf->addrinfo[channel].address;
}
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
return rv;
}
@@ -1478,9 +1476,9 @@ int ipmi_set_my_LUN(struct ipmi_user *user,
unsigned int channel,
unsigned char LUN)
{
- int index, rv = 0;
+ int rv = 0;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
@@ -1490,7 +1488,7 @@ int ipmi_set_my_LUN(struct ipmi_user *user,
channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
user->intf->addrinfo[channel].lun = LUN & 0x3;
}
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
return rv;
}
@@ -1500,9 +1498,9 @@ int ipmi_get_my_LUN(struct ipmi_user *user,
unsigned int channel,
unsigned char *address)
{
- int index, rv = 0;
+ int rv = 0;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
@@ -1512,7 +1510,7 @@ int ipmi_get_my_LUN(struct ipmi_user *user,
channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
*address = user->intf->addrinfo[channel].lun;
}
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
return rv;
}
@@ -1520,17 +1518,17 @@ EXPORT_SYMBOL(ipmi_get_my_LUN);
int ipmi_get_maintenance_mode(struct ipmi_user *user)
{
- int mode, index;
+ int mode;
unsigned long flags;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags);
mode = user->intf->maintenance_mode;
spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags);
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
return mode;
}
@@ -1539,17 +1537,24 @@ EXPORT_SYMBOL(ipmi_get_maintenance_mode);
static void maintenance_mode_update(struct ipmi_smi *intf)
{
if (intf->handlers->set_maintenance_mode)
+ /*
+ * Lower level drivers only care about firmware mode
+ * as it affects their timing. They don't care about
+ * reset, which disables all commands for a while.
+ */
intf->handlers->set_maintenance_mode(
- intf->send_info, intf->maintenance_mode_enable);
+ intf->send_info,
+ (intf->maintenance_mode_state ==
+ IPMI_MAINTENANCE_MODE_STATE_FIRMWARE));
}
int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode)
{
- int rv = 0, index;
+ int rv = 0;
unsigned long flags;
struct ipmi_smi *intf = user->intf;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
@@ -1557,16 +1562,17 @@ int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode)
if (intf->maintenance_mode != mode) {
switch (mode) {
case IPMI_MAINTENANCE_MODE_AUTO:
- intf->maintenance_mode_enable
- = (intf->auto_maintenance_timeout > 0);
+ /* Just leave it alone. */
break;
case IPMI_MAINTENANCE_MODE_OFF:
- intf->maintenance_mode_enable = false;
+ intf->maintenance_mode_state =
+ IPMI_MAINTENANCE_MODE_STATE_OFF;
break;
case IPMI_MAINTENANCE_MODE_ON:
- intf->maintenance_mode_enable = true;
+ intf->maintenance_mode_state =
+ IPMI_MAINTENANCE_MODE_STATE_FIRMWARE;
break;
default:
@@ -1579,7 +1585,7 @@ int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode)
}
out_unlock:
spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags);
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
return rv;
}
@@ -1587,19 +1593,17 @@ EXPORT_SYMBOL(ipmi_set_maintenance_mode);
int ipmi_set_gets_events(struct ipmi_user *user, bool val)
{
- unsigned long flags;
struct ipmi_smi *intf = user->intf;
struct ipmi_recv_msg *msg, *msg2;
struct list_head msgs;
- int index;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
INIT_LIST_HEAD(&msgs);
- spin_lock_irqsave(&intf->events_lock, flags);
+ mutex_lock(&intf->events_mutex);
if (user->gets_events == val)
goto out;
@@ -1612,13 +1616,6 @@ int ipmi_set_gets_events(struct ipmi_user *user, bool val)
atomic_dec(&intf->event_waiters);
}
- if (intf->delivering_events)
- /*
- * Another thread is delivering events for this, so
- * let it handle any new events.
- */
- goto out;
-
/* Deliver any queued events. */
while (user->gets_events && !list_empty(&intf->waiting_events)) {
list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
@@ -1629,22 +1626,15 @@ int ipmi_set_gets_events(struct ipmi_user *user, bool val)
intf->event_msg_printed = 0;
}
- intf->delivering_events = 1;
- spin_unlock_irqrestore(&intf->events_lock, flags);
-
list_for_each_entry_safe(msg, msg2, &msgs, link) {
- msg->user = user;
- kref_get(&user->refcount);
+ ipmi_set_recv_msg_user(msg, user);
deliver_local_response(intf, msg);
}
-
- spin_lock_irqsave(&intf->events_lock, flags);
- intf->delivering_events = 0;
}
out:
- spin_unlock_irqrestore(&intf->events_lock, flags);
- release_ipmi_user(user, index);
+ mutex_unlock(&intf->events_mutex);
+ release_ipmi_user(user);
return 0;
}
@@ -1689,9 +1679,9 @@ int ipmi_register_for_cmd(struct ipmi_user *user,
{
struct ipmi_smi *intf = user->intf;
struct cmd_rcvr *rcvr;
- int rv = 0, index;
+ int rv = 0;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
@@ -1721,7 +1711,7 @@ out_unlock:
if (rv)
kfree(rcvr);
out_release:
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
return rv;
}
@@ -1735,9 +1725,9 @@ int ipmi_unregister_for_cmd(struct ipmi_user *user,
struct ipmi_smi *intf = user->intf;
struct cmd_rcvr *rcvr;
struct cmd_rcvr *rcvrs = NULL;
- int i, rv = -ENOENT, index;
+ int i, rv = -ENOENT;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
@@ -1760,7 +1750,7 @@ int ipmi_unregister_for_cmd(struct ipmi_user *user,
}
mutex_unlock(&intf->cmd_rcvrs_mutex);
synchronize_rcu();
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
while (rcvrs) {
smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS);
rcvr = rcvrs;
@@ -1884,13 +1874,12 @@ static void smi_send(struct ipmi_smi *intf,
const struct ipmi_smi_handlers *handlers,
struct ipmi_smi_msg *smi_msg, int priority)
{
- int run_to_completion = intf->run_to_completion;
+ int run_to_completion = READ_ONCE(intf->run_to_completion);
unsigned long flags = 0;
if (!run_to_completion)
spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
smi_msg = smi_add_send_msg(intf, smi_msg, priority);
-
if (!run_to_completion)
spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
@@ -1943,14 +1932,20 @@ static int i_ipmi_req_sysintf(struct ipmi_smi *intf,
if (is_maintenance_mode_cmd(msg)) {
unsigned long flags;
+ int newst;
+
+ if (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST)
+ newst = IPMI_MAINTENANCE_MODE_STATE_FIRMWARE;
+ else
+ newst = IPMI_MAINTENANCE_MODE_STATE_RESET;
spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
- intf->auto_maintenance_timeout
- = maintenance_mode_timeout_ms;
+ intf->auto_maintenance_timeout = maintenance_mode_timeout_ms;
if (!intf->maintenance_mode
- && !intf->maintenance_mode_enable) {
- intf->maintenance_mode_enable = true;
+ && intf->maintenance_mode_state < newst) {
+ intf->maintenance_mode_state = newst;
maintenance_mode_update(intf);
+ mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
}
spin_unlock_irqrestore(&intf->maintenance_mode_lock,
flags);
@@ -1964,7 +1959,7 @@ static int i_ipmi_req_sysintf(struct ipmi_smi *intf,
smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3);
smi_msg->data[1] = msg->cmd;
smi_msg->msgid = msgid;
- smi_msg->user_data = recv_msg;
+ smi_msg->recv_msg = recv_msg;
if (msg->data_len > 0)
memcpy(&smi_msg->data[2], msg->data, msg->data_len);
smi_msg->data_size = msg->data_len + 2;
@@ -2045,12 +2040,9 @@ static int i_ipmi_req_ipmb(struct ipmi_smi *intf,
* Save the receive message so we can use it
* to deliver the response.
*/
- smi_msg->user_data = recv_msg;
+ smi_msg->recv_msg = recv_msg;
} else {
- /* It's a command, so get a sequence for it. */
- unsigned long flags;
-
- spin_lock_irqsave(&intf->seq_lock, flags);
+ mutex_lock(&intf->seq_lock);
if (is_maintenance_mode_cmd(msg))
intf->ipmb_maintenance_mode_timeout =
@@ -2108,7 +2100,7 @@ static int i_ipmi_req_ipmb(struct ipmi_smi *intf,
* to be correct.
*/
out_err:
- spin_unlock_irqrestore(&intf->seq_lock, flags);
+ mutex_unlock(&intf->seq_lock);
}
return rv;
@@ -2161,7 +2153,7 @@ static int i_ipmi_req_ipmb_direct(struct ipmi_smi *intf,
memcpy(smi_msg->data + 4, msg->data, msg->data_len);
smi_msg->data_size = msg->data_len + 4;
- smi_msg->user_data = recv_msg;
+ smi_msg->recv_msg = recv_msg;
return 0;
}
@@ -2224,12 +2216,9 @@ static int i_ipmi_req_lan(struct ipmi_smi *intf,
* Save the receive message so we can use it
* to deliver the response.
*/
- smi_msg->user_data = recv_msg;
+ smi_msg->recv_msg = recv_msg;
} else {
- /* It's a command, so get a sequence for it. */
- unsigned long flags;
-
- spin_lock_irqsave(&intf->seq_lock, flags);
+ mutex_lock(&intf->seq_lock);
/*
* Create a sequence number with a 1 second
@@ -2278,7 +2267,7 @@ static int i_ipmi_req_lan(struct ipmi_smi *intf,
* to be correct.
*/
out_err:
- spin_unlock_irqrestore(&intf->seq_lock, flags);
+ mutex_unlock(&intf->seq_lock);
}
return rv;
@@ -2306,24 +2295,18 @@ static int i_ipmi_request(struct ipmi_user *user,
{
struct ipmi_smi_msg *smi_msg;
struct ipmi_recv_msg *recv_msg;
+ int run_to_completion = READ_ONCE(intf->run_to_completion);
int rv = 0;
- if (user) {
- if (atomic_add_return(1, &user->nr_msgs) > max_msgs_per_user) {
- /* Decrement will happen at the end of the routine. */
- rv = -EBUSY;
- goto out;
- }
- }
-
- if (supplied_recv)
+ if (supplied_recv) {
recv_msg = supplied_recv;
- else {
- recv_msg = ipmi_alloc_recv_msg();
- if (recv_msg == NULL) {
- rv = -ENOMEM;
- goto out;
- }
+ recv_msg->user = user;
+ if (user)
+ atomic_inc(&user->nr_msgs);
+ } else {
+ recv_msg = ipmi_alloc_recv_msg(user);
+ if (IS_ERR(recv_msg))
+ return PTR_ERR(recv_msg);
}
recv_msg->user_msg_data = user_msg_data;
@@ -2334,21 +2317,22 @@ static int i_ipmi_request(struct ipmi_user *user,
if (smi_msg == NULL) {
if (!supplied_recv)
ipmi_free_recv_msg(recv_msg);
- rv = -ENOMEM;
- goto out;
+ return -ENOMEM;
}
}
- rcu_read_lock();
+ if (!run_to_completion)
+ mutex_lock(&intf->users_mutex);
+ if (intf->maintenance_mode_state == IPMI_MAINTENANCE_MODE_STATE_RESET) {
+ /* No messages while the BMC is in reset. */
+ rv = -EBUSY;
+ goto out_err;
+ }
if (intf->in_shutdown) {
rv = -ENODEV;
goto out_err;
}
- recv_msg->user = user;
- if (user)
- /* The put happens when the message is freed. */
- kref_get(&user->refcount);
recv_msg->msgid = msgid;
/*
* Store the message to send in the receive message so timeout
@@ -2377,19 +2361,19 @@ static int i_ipmi_request(struct ipmi_user *user,
if (rv) {
out_err:
- ipmi_free_smi_msg(smi_msg);
- ipmi_free_recv_msg(recv_msg);
+ if (!supplied_smi)
+ ipmi_free_smi_msg(smi_msg);
+ if (!supplied_recv)
+ ipmi_free_recv_msg(recv_msg);
} else {
dev_dbg(intf->si_dev, "Send: %*ph\n",
smi_msg->data_size, smi_msg->data);
smi_send(intf, intf->handlers, smi_msg, priority);
}
- rcu_read_unlock();
+ if (!run_to_completion)
+ mutex_unlock(&intf->users_mutex);
-out:
- if (rv && user)
- atomic_dec(&user->nr_msgs);
return rv;
}
@@ -2416,12 +2400,12 @@ int ipmi_request_settime(struct ipmi_user *user,
unsigned int retry_time_ms)
{
unsigned char saddr = 0, lun = 0;
- int rv, index;
+ int rv;
if (!user)
return -EINVAL;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
@@ -2440,7 +2424,7 @@ int ipmi_request_settime(struct ipmi_user *user,
retries,
retry_time_ms);
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
return rv;
}
EXPORT_SYMBOL(ipmi_request_settime);
@@ -2455,12 +2439,12 @@ int ipmi_request_supply_msgs(struct ipmi_user *user,
int priority)
{
unsigned char saddr = 0, lun = 0;
- int rv, index;
+ int rv;
if (!user)
return -EINVAL;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
@@ -2479,7 +2463,7 @@ int ipmi_request_supply_msgs(struct ipmi_user *user,
lun,
-1, 0);
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
return rv;
}
EXPORT_SYMBOL(ipmi_request_supply_msgs);
@@ -2640,6 +2624,12 @@ retry_bmc_lock:
(bmc->dyn_id_set && time_is_after_jiffies(bmc->dyn_id_expiry)))
goto out_noprocessing;
+ /* Don't allow sysfs access when in maintenance mode. */
+ if (intf->maintenance_mode_state) {
+ rv = -EBUSY;
+ goto out_noprocessing;
+ }
+
prev_guid_set = bmc->dyn_guid_set;
__get_guid(intf);
@@ -3066,7 +3056,7 @@ cleanup_bmc_device(struct kref *ref)
* with removing the device attributes while reading a device
* attribute.
*/
- queue_work(remove_work_wq, &bmc->remove_work);
+ queue_work(bmc_remove_work_wq, &bmc->remove_work);
}
/*
@@ -3522,20 +3512,32 @@ static ssize_t nr_msgs_show(struct device *dev,
char *buf)
{
struct ipmi_smi *intf = container_of(attr,
- struct ipmi_smi, nr_msgs_devattr);
+ struct ipmi_smi, nr_msgs_devattr);
struct ipmi_user *user;
- int index;
unsigned int count = 0;
- index = srcu_read_lock(&intf->users_srcu);
- list_for_each_entry_rcu(user, &intf->users, link)
+ mutex_lock(&intf->users_mutex);
+ list_for_each_entry(user, &intf->users, link)
count += atomic_read(&user->nr_msgs);
- srcu_read_unlock(&intf->users_srcu, index);
+ mutex_unlock(&intf->users_mutex);
return sysfs_emit(buf, "%u\n", count);
}
static DEVICE_ATTR_RO(nr_msgs);
+static ssize_t maintenance_mode_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ipmi_smi *intf = container_of(attr,
+ struct ipmi_smi,
+ maintenance_mode_devattr);
+
+ return sysfs_emit(buf, "%u %d\n", intf->maintenance_mode_state,
+ intf->auto_maintenance_timeout);
+}
+static DEVICE_ATTR_RO(maintenance_mode);
+
static void redo_bmc_reg(struct work_struct *work)
{
struct ipmi_smi *intf = container_of(work, struct ipmi_smi,
@@ -3571,12 +3573,6 @@ int ipmi_add_smi(struct module *owner,
if (!intf)
return -ENOMEM;
- rv = init_srcu_struct(&intf->users_srcu);
- if (rv) {
- kfree(intf);
- return rv;
- }
-
intf->owner = owner;
intf->bmc = &intf->tmp_bmc;
INIT_LIST_HEAD(&intf->bmc->intfs);
@@ -3593,11 +3589,14 @@ int ipmi_add_smi(struct module *owner,
}
if (slave_addr != 0)
intf->addrinfo[0].address = slave_addr;
+ INIT_LIST_HEAD(&intf->user_msgs);
+ mutex_init(&intf->user_msgs_mutex);
INIT_LIST_HEAD(&intf->users);
+ mutex_init(&intf->users_mutex);
atomic_set(&intf->nr_users, 0);
intf->handlers = handlers;
intf->send_info = send_info;
- spin_lock_init(&intf->seq_lock);
+ mutex_init(&intf->seq_lock);
for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
intf->seq_table[j].inuse = 0;
intf->seq_table[j].seqid = 0;
@@ -3605,12 +3604,12 @@ int ipmi_add_smi(struct module *owner,
intf->curr_seq = 0;
spin_lock_init(&intf->waiting_rcv_msgs_lock);
INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
- INIT_WORK(&intf->recv_work, smi_recv_work);
+ INIT_WORK(&intf->smi_work, smi_work);
atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0);
spin_lock_init(&intf->xmit_msgs_lock);
INIT_LIST_HEAD(&intf->xmit_msgs);
INIT_LIST_HEAD(&intf->hp_xmit_msgs);
- spin_lock_init(&intf->events_lock);
+ mutex_init(&intf->events_mutex);
spin_lock_init(&intf->watch_lock);
atomic_set(&intf->event_waiters, 0);
intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
@@ -3623,12 +3622,16 @@ int ipmi_add_smi(struct module *owner,
for (i = 0; i < IPMI_NUM_STATS; i++)
atomic_set(&intf->stats[i], 0);
+ /*
+ * Grab the watchers mutex so we can deliver the new interface
+ * without races.
+ */
+ mutex_lock(&smi_watchers_mutex);
mutex_lock(&ipmi_interfaces_mutex);
/* Look for a hole in the numbers. */
i = 0;
link = &ipmi_interfaces;
- list_for_each_entry_rcu(tintf, &ipmi_interfaces, link,
- ipmi_interfaces_mutex_held()) {
+ list_for_each_entry(tintf, &ipmi_interfaces, link) {
if (tintf->intf_num != i) {
link = &tintf->link;
break;
@@ -3637,9 +3640,9 @@ int ipmi_add_smi(struct module *owner,
}
/* Add the new interface in numeric order. */
if (i == 0)
- list_add_rcu(&intf->link, &ipmi_interfaces);
+ list_add(&intf->link, &ipmi_interfaces);
else
- list_add_tail_rcu(&intf->link, link);
+ list_add_tail(&intf->link, link);
rv = handlers->start_processing(send_info, intf);
if (rv)
@@ -3671,18 +3674,22 @@ int ipmi_add_smi(struct module *owner,
goto out_err_bmc_reg;
}
- /*
- * Keep memory order straight for RCU readers. Make
- * sure everything else is committed to memory before
- * setting intf_num to mark the interface valid.
- */
- smp_wmb();
+ intf->maintenance_mode_devattr = dev_attr_maintenance_mode;
+ sysfs_attr_init(&intf->maintenance_mode_devattr.attr);
+ rv = device_create_file(intf->si_dev, &intf->maintenance_mode_devattr);
+ if (rv) {
+ device_remove_file(intf->si_dev, &intf->nr_users_devattr);
+ goto out_err_bmc_reg;
+ }
+
intf->intf_num = i;
mutex_unlock(&ipmi_interfaces_mutex);
/* After this point the interface is legal to use. */
call_smi_watchers(i, intf->si_dev);
+ mutex_unlock(&smi_watchers_mutex);
+
return 0;
out_err_bmc_reg:
@@ -3691,10 +3698,9 @@ int ipmi_add_smi(struct module *owner,
if (intf->handlers->shutdown)
intf->handlers->shutdown(intf->send_info);
out_err:
- list_del_rcu(&intf->link);
+ list_del(&intf->link);
mutex_unlock(&ipmi_interfaces_mutex);
- synchronize_srcu(&ipmi_interfaces_srcu);
- cleanup_srcu_struct(&intf->users_srcu);
+ mutex_unlock(&smi_watchers_mutex);
kref_put(&intf->refcount, intf_free);
return rv;
@@ -3760,20 +3766,30 @@ static void cleanup_smi_msgs(struct ipmi_smi *intf)
void ipmi_unregister_smi(struct ipmi_smi *intf)
{
struct ipmi_smi_watcher *w;
- int intf_num, index;
+ int intf_num;
if (!intf)
return;
+
intf_num = intf->intf_num;
mutex_lock(&ipmi_interfaces_mutex);
+ cancel_work_sync(&intf->smi_work);
+ /* smi_work() can no longer be in progress after this. */
+
intf->intf_num = -1;
intf->in_shutdown = true;
- list_del_rcu(&intf->link);
+ list_del(&intf->link);
mutex_unlock(&ipmi_interfaces_mutex);
- synchronize_srcu(&ipmi_interfaces_srcu);
- /* At this point no users can be added to the interface. */
+ /*
+ * At this point no users can be added to the interface and no
+ * new messages can be sent.
+ */
+
+ if (intf->handlers->shutdown)
+ intf->handlers->shutdown(intf->send_info);
+ device_remove_file(intf->si_dev, &intf->maintenance_mode_devattr);
device_remove_file(intf->si_dev, &intf->nr_msgs_devattr);
device_remove_file(intf->si_dev, &intf->nr_users_devattr);
@@ -3786,24 +3802,19 @@ void ipmi_unregister_smi(struct ipmi_smi *intf)
w->smi_gone(intf_num);
mutex_unlock(&smi_watchers_mutex);
- index = srcu_read_lock(&intf->users_srcu);
+ mutex_lock(&intf->users_mutex);
while (!list_empty(&intf->users)) {
- struct ipmi_user *user =
- container_of(list_next_rcu(&intf->users),
- struct ipmi_user, link);
+ struct ipmi_user *user = list_first_entry(&intf->users,
+ struct ipmi_user, link);
_ipmi_destroy_user(user);
}
- srcu_read_unlock(&intf->users_srcu, index);
-
- if (intf->handlers->shutdown)
- intf->handlers->shutdown(intf->send_info);
+ mutex_unlock(&intf->users_mutex);
cleanup_smi_msgs(intf);
ipmi_bmc_unregister(intf);
- cleanup_srcu_struct(&intf->users_srcu);
kref_put(&intf->refcount, intf_free);
}
EXPORT_SYMBOL(ipmi_unregister_smi);
@@ -3881,7 +3892,7 @@ static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf,
unsigned char chan;
struct ipmi_user *user = NULL;
struct ipmi_ipmb_addr *ipmb_addr;
- struct ipmi_recv_msg *recv_msg;
+ struct ipmi_recv_msg *recv_msg = NULL;
if (msg->rsp_size < 10) {
/* Message not big enough, just ignore it. */
@@ -3902,9 +3913,8 @@ static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf,
rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
if (rcvr) {
user = rcvr->user;
- kref_get(&user->refcount);
- } else
- user = NULL;
+ recv_msg = ipmi_alloc_recv_msg(user);
+ }
rcu_read_unlock();
if (user == NULL) {
@@ -3928,58 +3938,47 @@ static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf,
dev_dbg(intf->si_dev, "Invalid command: %*ph\n",
msg->data_size, msg->data);
- rcu_read_lock();
- if (!intf->in_shutdown) {
- smi_send(intf, intf->handlers, msg, 0);
- /*
- * We used the message, so return the value
- * that causes it to not be freed or
- * queued.
- */
- rv = -1;
- }
- rcu_read_unlock();
- } else {
- recv_msg = ipmi_alloc_recv_msg();
- if (!recv_msg) {
- /*
- * We couldn't allocate memory for the
- * message, so requeue it for handling
- * later.
- */
- rv = 1;
- kref_put(&user->refcount, free_user);
- } else {
- /* Extract the source address from the data. */
- ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
- ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
- ipmb_addr->slave_addr = msg->rsp[6];
- ipmb_addr->lun = msg->rsp[7] & 3;
- ipmb_addr->channel = msg->rsp[3] & 0xf;
+ smi_send(intf, intf->handlers, msg, 0);
+ /*
+ * We used the message, so return the value that
+ * causes it to not be freed or queued.
+ */
+ rv = -1;
+ } else if (!IS_ERR(recv_msg)) {
+ /* Extract the source address from the data. */
+ ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
+ ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
+ ipmb_addr->slave_addr = msg->rsp[6];
+ ipmb_addr->lun = msg->rsp[7] & 3;
+ ipmb_addr->channel = msg->rsp[3] & 0xf;
- /*
- * Extract the rest of the message information
- * from the IPMB header.
- */
- recv_msg->user = user;
- recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
- recv_msg->msgid = msg->rsp[7] >> 2;
- recv_msg->msg.netfn = msg->rsp[4] >> 2;
- recv_msg->msg.cmd = msg->rsp[8];
- recv_msg->msg.data = recv_msg->msg_data;
+ /*
+ * Extract the rest of the message information
+ * from the IPMB header.
+ */
+ recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
+ recv_msg->msgid = msg->rsp[7] >> 2;
+ recv_msg->msg.netfn = msg->rsp[4] >> 2;
+ recv_msg->msg.cmd = msg->rsp[8];
+ recv_msg->msg.data = recv_msg->msg_data;
- /*
- * We chop off 10, not 9 bytes because the checksum
- * at the end also needs to be removed.
- */
- recv_msg->msg.data_len = msg->rsp_size - 10;
- memcpy(recv_msg->msg_data, &msg->rsp[9],
- msg->rsp_size - 10);
- if (deliver_response(intf, recv_msg))
- ipmi_inc_stat(intf, unhandled_commands);
- else
- ipmi_inc_stat(intf, handled_commands);
- }
+ /*
+ * We chop off 10, not 9 bytes because the checksum
+ * at the end also needs to be removed.
+ */
+ recv_msg->msg.data_len = msg->rsp_size - 10;
+ memcpy(recv_msg->msg_data, &msg->rsp[9],
+ msg->rsp_size - 10);
+ if (deliver_response(intf, recv_msg))
+ ipmi_inc_stat(intf, unhandled_commands);
+ else
+ ipmi_inc_stat(intf, handled_commands);
+ } else {
+ /*
+ * We couldn't allocate memory for the message, so
+ * requeue it for handling later.
+ */
+ rv = 1;
}
return rv;
@@ -3992,7 +3991,7 @@ static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf,
int rv = 0;
struct ipmi_user *user = NULL;
struct ipmi_ipmb_direct_addr *daddr;
- struct ipmi_recv_msg *recv_msg;
+ struct ipmi_recv_msg *recv_msg = NULL;
unsigned char netfn = msg->rsp[0] >> 2;
unsigned char cmd = msg->rsp[3];
@@ -4001,9 +4000,8 @@ static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf,
rcvr = find_cmd_rcvr(intf, netfn, cmd, 0);
if (rcvr) {
user = rcvr->user;
- kref_get(&user->refcount);
- } else
- user = NULL;
+ recv_msg = ipmi_alloc_recv_msg(user);
+ }
rcu_read_unlock();
if (user == NULL) {
@@ -4019,55 +4017,44 @@ static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf,
msg->data[4] = IPMI_INVALID_CMD_COMPLETION_CODE;
msg->data_size = 5;
- rcu_read_lock();
- if (!intf->in_shutdown) {
- smi_send(intf, intf->handlers, msg, 0);
- /*
- * We used the message, so return the value
- * that causes it to not be freed or
- * queued.
- */
- rv = -1;
- }
- rcu_read_unlock();
- } else {
- recv_msg = ipmi_alloc_recv_msg();
- if (!recv_msg) {
- /*
- * We couldn't allocate memory for the
- * message, so requeue it for handling
- * later.
- */
- rv = 1;
- kref_put(&user->refcount, free_user);
- } else {
- /* Extract the source address from the data. */
- daddr = (struct ipmi_ipmb_direct_addr *)&recv_msg->addr;
- daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE;
- daddr->channel = 0;
- daddr->slave_addr = msg->rsp[1];
- daddr->rs_lun = msg->rsp[0] & 3;
- daddr->rq_lun = msg->rsp[2] & 3;
+ smi_send(intf, intf->handlers, msg, 0);
+ /*
+ * We used the message, so return the value that
+ * causes it to not be freed or queued.
+ */
+ rv = -1;
+ } else if (!IS_ERR(recv_msg)) {
+ /* Extract the source address from the data. */
+ daddr = (struct ipmi_ipmb_direct_addr *)&recv_msg->addr;
+ daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE;
+ daddr->channel = 0;
+ daddr->slave_addr = msg->rsp[1];
+ daddr->rs_lun = msg->rsp[0] & 3;
+ daddr->rq_lun = msg->rsp[2] & 3;
- /*
- * Extract the rest of the message information
- * from the IPMB header.
- */
- recv_msg->user = user;
- recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
- recv_msg->msgid = (msg->rsp[2] >> 2);
- recv_msg->msg.netfn = msg->rsp[0] >> 2;
- recv_msg->msg.cmd = msg->rsp[3];
- recv_msg->msg.data = recv_msg->msg_data;
-
- recv_msg->msg.data_len = msg->rsp_size - 4;
- memcpy(recv_msg->msg_data, msg->rsp + 4,
- msg->rsp_size - 4);
- if (deliver_response(intf, recv_msg))
- ipmi_inc_stat(intf, unhandled_commands);
- else
- ipmi_inc_stat(intf, handled_commands);
- }
+ /*
+ * Extract the rest of the message information
+ * from the IPMB header.
+ */
+ recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
+ recv_msg->msgid = (msg->rsp[2] >> 2);
+ recv_msg->msg.netfn = msg->rsp[0] >> 2;
+ recv_msg->msg.cmd = msg->rsp[3];
+ recv_msg->msg.data = recv_msg->msg_data;
+
+ recv_msg->msg.data_len = msg->rsp_size - 4;
+ memcpy(recv_msg->msg_data, msg->rsp + 4,
+ msg->rsp_size - 4);
+ if (deliver_response(intf, recv_msg))
+ ipmi_inc_stat(intf, unhandled_commands);
+ else
+ ipmi_inc_stat(intf, handled_commands);
+ } else {
+ /*
+ * We couldn't allocate memory for the message, so
+ * requeue it for handling later.
+ */
+ rv = 1;
}
return rv;
@@ -4079,7 +4066,7 @@ static int handle_ipmb_direct_rcv_rsp(struct ipmi_smi *intf,
struct ipmi_recv_msg *recv_msg;
struct ipmi_ipmb_direct_addr *daddr;
- recv_msg = msg->user_data;
+ recv_msg = msg->recv_msg;
if (recv_msg == NULL) {
dev_warn(intf->si_dev,
"IPMI direct message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n");
@@ -4181,7 +4168,7 @@ static int handle_lan_get_msg_cmd(struct ipmi_smi *intf,
unsigned char chan;
struct ipmi_user *user = NULL;
struct ipmi_lan_addr *lan_addr;
- struct ipmi_recv_msg *recv_msg;
+ struct ipmi_recv_msg *recv_msg = NULL;
if (msg->rsp_size < 12) {
/* Message not big enough, just ignore it. */
@@ -4202,63 +4189,76 @@ static int handle_lan_get_msg_cmd(struct ipmi_smi *intf,
rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
if (rcvr) {
user = rcvr->user;
- kref_get(&user->refcount);
- } else
- user = NULL;
+ recv_msg = ipmi_alloc_recv_msg(user);
+ }
rcu_read_unlock();
if (user == NULL) {
- /* We didn't find a user, just give up. */
+ /* We didn't find a user, just give up and return an error. */
ipmi_inc_stat(intf, unhandled_commands);
+ msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ msg->data[1] = IPMI_SEND_MSG_CMD;
+ msg->data[2] = chan;
+ msg->data[3] = msg->rsp[4]; /* handle */
+ msg->data[4] = msg->rsp[8]; /* rsSWID */
+ msg->data[5] = ((netfn + 1) << 2) | (msg->rsp[9] & 0x3);
+ msg->data[6] = ipmb_checksum(&msg->data[3], 3);
+ msg->data[7] = msg->rsp[5]; /* rqSWID */
+ /* rqseq/lun */
+ msg->data[8] = (msg->rsp[9] & 0xfc) | (msg->rsp[6] & 0x3);
+ msg->data[9] = cmd;
+ msg->data[10] = IPMI_INVALID_CMD_COMPLETION_CODE;
+ msg->data[11] = ipmb_checksum(&msg->data[7], 4);
+ msg->data_size = 12;
+
+ dev_dbg(intf->si_dev, "Invalid command: %*ph\n",
+ msg->data_size, msg->data);
+
+ smi_send(intf, intf->handlers, msg, 0);
/*
- * Don't do anything with these messages, just allow
- * them to be freed.
+ * We used the message, so return the value that
+ * causes it to not be freed or queued.
*/
- rv = 0;
- } else {
- recv_msg = ipmi_alloc_recv_msg();
- if (!recv_msg) {
- /*
- * We couldn't allocate memory for the
- * message, so requeue it for handling later.
- */
- rv = 1;
- kref_put(&user->refcount, free_user);
- } else {
- /* Extract the source address from the data. */
- lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
- lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
- lan_addr->session_handle = msg->rsp[4];
- lan_addr->remote_SWID = msg->rsp[8];
- lan_addr->local_SWID = msg->rsp[5];
- lan_addr->lun = msg->rsp[9] & 3;
- lan_addr->channel = msg->rsp[3] & 0xf;
- lan_addr->privilege = msg->rsp[3] >> 4;
+ rv = -1;
+ } else if (!IS_ERR(recv_msg)) {
+ /* Extract the source address from the data. */
+ lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
+ lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
+ lan_addr->session_handle = msg->rsp[4];
+ lan_addr->remote_SWID = msg->rsp[8];
+ lan_addr->local_SWID = msg->rsp[5];
+ lan_addr->lun = msg->rsp[9] & 3;
+ lan_addr->channel = msg->rsp[3] & 0xf;
+ lan_addr->privilege = msg->rsp[3] >> 4;
- /*
- * Extract the rest of the message information
- * from the IPMB header.
- */
- recv_msg->user = user;
- recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
- recv_msg->msgid = msg->rsp[9] >> 2;
- recv_msg->msg.netfn = msg->rsp[6] >> 2;
- recv_msg->msg.cmd = msg->rsp[10];
- recv_msg->msg.data = recv_msg->msg_data;
+ /*
+ * Extract the rest of the message information
+ * from the IPMB header.
+ */
+ recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
+ recv_msg->msgid = msg->rsp[9] >> 2;
+ recv_msg->msg.netfn = msg->rsp[6] >> 2;
+ recv_msg->msg.cmd = msg->rsp[10];
+ recv_msg->msg.data = recv_msg->msg_data;
- /*
- * We chop off 12, not 11 bytes because the checksum
- * at the end also needs to be removed.
- */
- recv_msg->msg.data_len = msg->rsp_size - 12;
- memcpy(recv_msg->msg_data, &msg->rsp[11],
- msg->rsp_size - 12);
- if (deliver_response(intf, recv_msg))
- ipmi_inc_stat(intf, unhandled_commands);
- else
- ipmi_inc_stat(intf, handled_commands);
- }
+ /*
+ * We chop off 12, not 11 bytes because the checksum
+ * at the end also needs to be removed.
+ */
+ recv_msg->msg.data_len = msg->rsp_size - 12;
+ memcpy(recv_msg->msg_data, &msg->rsp[11],
+ msg->rsp_size - 12);
+ if (deliver_response(intf, recv_msg))
+ ipmi_inc_stat(intf, unhandled_commands);
+ else
+ ipmi_inc_stat(intf, handled_commands);
+ } else {
+ /*
+ * We couldn't allocate memory for the message, so
+ * requeue it for handling later.
+ */
+ rv = 1;
}
return rv;
@@ -4280,7 +4280,7 @@ static int handle_oem_get_msg_cmd(struct ipmi_smi *intf,
unsigned char chan;
struct ipmi_user *user = NULL;
struct ipmi_system_interface_addr *smi_addr;
- struct ipmi_recv_msg *recv_msg;
+ struct ipmi_recv_msg *recv_msg = NULL;
/*
* We expect the OEM SW to perform error checking
@@ -4309,9 +4309,8 @@ static int handle_oem_get_msg_cmd(struct ipmi_smi *intf,
rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
if (rcvr) {
user = rcvr->user;
- kref_get(&user->refcount);
- } else
- user = NULL;
+ recv_msg = ipmi_alloc_recv_msg(user);
+ }
rcu_read_unlock();
if (user == NULL) {
@@ -4324,48 +4323,42 @@ static int handle_oem_get_msg_cmd(struct ipmi_smi *intf,
*/
rv = 0;
- } else {
- recv_msg = ipmi_alloc_recv_msg();
- if (!recv_msg) {
- /*
- * We couldn't allocate memory for the
- * message, so requeue it for handling
- * later.
- */
- rv = 1;
- kref_put(&user->refcount, free_user);
- } else {
- /*
- * OEM Messages are expected to be delivered via
- * the system interface to SMS software. We might
- * need to visit this again depending on OEM
- * requirements
- */
- smi_addr = ((struct ipmi_system_interface_addr *)
- &recv_msg->addr);
- smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
- smi_addr->channel = IPMI_BMC_CHANNEL;
- smi_addr->lun = msg->rsp[0] & 3;
-
- recv_msg->user = user;
- recv_msg->user_msg_data = NULL;
- recv_msg->recv_type = IPMI_OEM_RECV_TYPE;
- recv_msg->msg.netfn = msg->rsp[0] >> 2;
- recv_msg->msg.cmd = msg->rsp[1];
- recv_msg->msg.data = recv_msg->msg_data;
+ } else if (!IS_ERR(recv_msg)) {
+ /*
+ * OEM Messages are expected to be delivered via
+ * the system interface to SMS software. We might
+ * need to visit this again depending on OEM
+ * requirements
+ */
+ smi_addr = ((struct ipmi_system_interface_addr *)
+ &recv_msg->addr);
+ smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ smi_addr->channel = IPMI_BMC_CHANNEL;
+ smi_addr->lun = msg->rsp[0] & 3;
+
+ recv_msg->user_msg_data = NULL;
+ recv_msg->recv_type = IPMI_OEM_RECV_TYPE;
+ recv_msg->msg.netfn = msg->rsp[0] >> 2;
+ recv_msg->msg.cmd = msg->rsp[1];
+ recv_msg->msg.data = recv_msg->msg_data;
- /*
- * The message starts at byte 4 which follows the
- * Channel Byte in the "GET MESSAGE" command
- */
- recv_msg->msg.data_len = msg->rsp_size - 4;
- memcpy(recv_msg->msg_data, &msg->rsp[4],
- msg->rsp_size - 4);
- if (deliver_response(intf, recv_msg))
- ipmi_inc_stat(intf, unhandled_commands);
- else
- ipmi_inc_stat(intf, handled_commands);
- }
+ /*
+ * The message starts at byte 4 which follows the
+ * Channel Byte in the "GET MESSAGE" command
+ */
+ recv_msg->msg.data_len = msg->rsp_size - 4;
+ memcpy(recv_msg->msg_data, &msg->rsp[4],
+ msg->rsp_size - 4);
+ if (deliver_response(intf, recv_msg))
+ ipmi_inc_stat(intf, unhandled_commands);
+ else
+ ipmi_inc_stat(intf, handled_commands);
+ } else {
+ /*
+ * We couldn't allocate memory for the message, so
+ * requeue it for handling later.
+ */
+ rv = 1;
}
return rv;
@@ -4395,8 +4388,7 @@ static int handle_read_event_rsp(struct ipmi_smi *intf,
struct ipmi_recv_msg *recv_msg, *recv_msg2;
struct list_head msgs;
struct ipmi_user *user;
- int rv = 0, deliver_count = 0, index;
- unsigned long flags;
+ int rv = 0, deliver_count = 0;
if (msg->rsp_size < 19) {
/* Message is too small to be an IPMB event. */
@@ -4411,7 +4403,7 @@ static int handle_read_event_rsp(struct ipmi_smi *intf,
INIT_LIST_HEAD(&msgs);
- spin_lock_irqsave(&intf->events_lock, flags);
+ mutex_lock(&intf->events_mutex);
ipmi_inc_stat(intf, events);
@@ -4419,18 +4411,20 @@ static int handle_read_event_rsp(struct ipmi_smi *intf,
* Allocate and fill in one message for every user that is
* getting events.
*/
- index = srcu_read_lock(&intf->users_srcu);
- list_for_each_entry_rcu(user, &intf->users, link) {
+ mutex_lock(&intf->users_mutex);
+ list_for_each_entry(user, &intf->users, link) {
if (!user->gets_events)
continue;
- recv_msg = ipmi_alloc_recv_msg();
- if (!recv_msg) {
- rcu_read_unlock();
+ recv_msg = ipmi_alloc_recv_msg(user);
+ if (IS_ERR(recv_msg)) {
+ mutex_unlock(&intf->users_mutex);
list_for_each_entry_safe(recv_msg, recv_msg2, &msgs,
link) {
+ user = recv_msg->user;
list_del(&recv_msg->link);
ipmi_free_recv_msg(recv_msg);
+ kref_put(&user->refcount, free_ipmi_user);
}
/*
* We couldn't allocate memory for the
@@ -4444,11 +4438,9 @@ static int handle_read_event_rsp(struct ipmi_smi *intf,
deliver_count++;
copy_event_into_recv_msg(recv_msg, msg);
- recv_msg->user = user;
- kref_get(&user->refcount);
list_add_tail(&recv_msg->link, &msgs);
}
- srcu_read_unlock(&intf->users_srcu, index);
+ mutex_unlock(&intf->users_mutex);
if (deliver_count) {
/* Now deliver all the messages. */
@@ -4461,8 +4453,8 @@ static int handle_read_event_rsp(struct ipmi_smi *intf,
* No one to receive the message, put it in queue if there's
* not already too many things in the queue.
*/
- recv_msg = ipmi_alloc_recv_msg();
- if (!recv_msg) {
+ recv_msg = ipmi_alloc_recv_msg(NULL);
+ if (IS_ERR(recv_msg)) {
/*
* We couldn't allocate memory for the
* message, so requeue it for handling
@@ -4486,7 +4478,7 @@ static int handle_read_event_rsp(struct ipmi_smi *intf,
}
out:
- spin_unlock_irqrestore(&intf->events_lock, flags);
+ mutex_unlock(&intf->events_mutex);
return rv;
}
@@ -4497,7 +4489,7 @@ static int handle_bmc_rsp(struct ipmi_smi *intf,
struct ipmi_recv_msg *recv_msg;
struct ipmi_system_interface_addr *smi_addr;
- recv_msg = msg->user_data;
+ recv_msg = msg->recv_msg;
if (recv_msg == NULL) {
dev_warn(intf->si_dev,
"IPMI SMI message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n");
@@ -4538,9 +4530,10 @@ static int handle_one_recv_msg(struct ipmi_smi *intf,
if (msg->rsp_size < 2) {
/* Message is too small to be correct. */
- dev_warn(intf->si_dev,
- "BMC returned too small a message for netfn %x cmd %x, got %d bytes\n",
- (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
+ dev_warn_ratelimited(intf->si_dev,
+ "BMC returned too small a message for netfn %x cmd %x, got %d bytes\n",
+ (msg->data[0] >> 2) | 1,
+ msg->data[1], msg->rsp_size);
return_unspecified:
/* Generate an error response for the message. */
@@ -4570,14 +4563,14 @@ return_unspecified:
} else if ((msg->data_size >= 2)
&& (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
&& (msg->data[1] == IPMI_SEND_MSG_CMD)
- && (msg->user_data == NULL)) {
+ && (msg->recv_msg == NULL)) {
- if (intf->in_shutdown)
+ if (intf->in_shutdown || intf->run_to_completion)
goto out;
/*
* This is the local response to a command send, start
- * the timer for these. The user_data will not be
+ * the timer for these. The recv_msg will not be
* NULL if this is a response send, and we will let
* response sends just go through.
*/
@@ -4616,10 +4609,10 @@ return_unspecified:
* The NetFN and Command in the response is not even
* marginally correct.
*/
- dev_warn(intf->si_dev,
- "BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n",
- (msg->data[0] >> 2) | 1, msg->data[1],
- msg->rsp[0] >> 2, msg->rsp[1]);
+ dev_warn_ratelimited(intf->si_dev,
+ "BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n",
+ (msg->data[0] >> 2) | 1, msg->data[1],
+ msg->rsp[0] >> 2, msg->rsp[1]);
goto return_unspecified;
}
@@ -4637,13 +4630,16 @@ return_unspecified:
requeue = handle_ipmb_direct_rcv_rsp(intf, msg);
} else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
&& (msg->rsp[1] == IPMI_SEND_MSG_CMD)
- && (msg->user_data != NULL)) {
+ && (msg->recv_msg != NULL)) {
/*
* It's a response to a response we sent. For this we
* deliver a send message response to the user.
*/
struct ipmi_recv_msg *recv_msg;
+ if (intf->run_to_completion)
+ goto out;
+
chan = msg->data[2] & 0x0f;
if (chan >= IPMI_MAX_CHANNELS)
/* Invalid channel number */
@@ -4651,7 +4647,7 @@ return_unspecified:
cc = msg->rsp[2];
process_response_response:
- recv_msg = msg->user_data;
+ recv_msg = msg->recv_msg;
requeue = 0;
if (!recv_msg)
@@ -4666,6 +4662,9 @@ process_response_response:
&& (msg->rsp[1] == IPMI_GET_MSG_CMD)) {
struct ipmi_channel *chans;
+ if (intf->run_to_completion)
+ goto out;
+
/* It's from the receive queue. */
chan = msg->rsp[3] & 0xf;
if (chan >= IPMI_MAX_CHANNELS) {
@@ -4740,6 +4739,9 @@ process_response_response:
} else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
&& (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) {
/* It's an asynchronous event. */
+ if (intf->run_to_completion)
+ goto out;
+
requeue = handle_read_event_rsp(intf, msg);
} else {
/* It's a response from the local BMC. */
@@ -4755,10 +4757,10 @@ process_response_response:
*/
static void handle_new_recv_msgs(struct ipmi_smi *intf)
{
- struct ipmi_smi_msg *smi_msg;
- unsigned long flags = 0;
- int rv;
- int run_to_completion = intf->run_to_completion;
+ struct ipmi_smi_msg *smi_msg;
+ unsigned long flags = 0;
+ int rv;
+ int run_to_completion = READ_ONCE(intf->run_to_completion);
/* See if any waiting messages need to be processed. */
if (!run_to_completion)
@@ -4792,31 +4794,16 @@ static void handle_new_recv_msgs(struct ipmi_smi *intf)
}
if (!run_to_completion)
spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags);
-
- /*
- * If the pretimout count is non-zero, decrement one from it and
- * deliver pretimeouts to all the users.
- */
- if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) {
- struct ipmi_user *user;
- int index;
-
- index = srcu_read_lock(&intf->users_srcu);
- list_for_each_entry_rcu(user, &intf->users, link) {
- if (user->handler->ipmi_watchdog_pretimeout)
- user->handler->ipmi_watchdog_pretimeout(
- user->handler_data);
- }
- srcu_read_unlock(&intf->users_srcu, index);
- }
}
-static void smi_recv_work(struct work_struct *t)
+static void smi_work(struct work_struct *t)
{
unsigned long flags = 0; /* keep us warning-free. */
- struct ipmi_smi *intf = from_work(intf, t, recv_work);
- int run_to_completion = intf->run_to_completion;
+ struct ipmi_smi *intf = from_work(intf, t, smi_work);
+ int run_to_completion = READ_ONCE(intf->run_to_completion);
struct ipmi_smi_msg *newmsg = NULL;
+ struct ipmi_recv_msg *msg, *msg2;
+ int cc;
/*
* Start the next message if available.
@@ -4825,9 +4812,7 @@ static void smi_recv_work(struct work_struct *t)
* because the lower layer is allowed to hold locks while calling
* message delivery.
*/
-
- rcu_read_lock();
-
+restart:
if (!run_to_completion)
spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
if (intf->curr_msg == NULL && !intf->in_shutdown) {
@@ -4845,15 +4830,64 @@ static void smi_recv_work(struct work_struct *t)
intf->curr_msg = newmsg;
}
}
-
if (!run_to_completion)
spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
- if (newmsg)
- intf->handlers->sender(intf->send_info, newmsg);
- rcu_read_unlock();
+ if (newmsg) {
+ cc = intf->handlers->sender(intf->send_info, newmsg);
+ if (cc) {
+ if (newmsg->recv_msg)
+ deliver_err_response(intf,
+ newmsg->recv_msg, cc);
+ else
+ ipmi_free_smi_msg(newmsg);
+ goto restart;
+ }
+ }
handle_new_recv_msgs(intf);
+
+ /* Nothing below applies during panic time. */
+ if (run_to_completion)
+ return;
+
+ /*
+ * If the pretimout count is non-zero, decrement one from it and
+ * deliver pretimeouts to all the users.
+ */
+ if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) {
+ struct ipmi_user *user;
+
+ mutex_lock(&intf->users_mutex);
+ list_for_each_entry(user, &intf->users, link) {
+ if (user->handler->ipmi_watchdog_pretimeout)
+ user->handler->ipmi_watchdog_pretimeout(
+ user->handler_data);
+ }
+ mutex_unlock(&intf->users_mutex);
+ }
+
+ /*
+ * Freeing the message can cause a user to be released, which
+ * can then cause the interface to be freed. Make sure that
+ * doesn't happen until we are ready.
+ */
+ kref_get(&intf->refcount);
+
+ mutex_lock(&intf->user_msgs_mutex);
+ list_for_each_entry_safe(msg, msg2, &intf->user_msgs, link) {
+ struct ipmi_user *user = msg->user;
+
+ list_del(&msg->link);
+
+ if (refcount_read(&user->destroyed) == 0)
+ ipmi_free_recv_msg(msg);
+ else
+ user->handler->ipmi_recv_hndl(msg, user->handler_data);
+ }
+ mutex_unlock(&intf->user_msgs_mutex);
+
+ kref_put(&intf->refcount, intf_free);
}
/* Handle a new message from the lower layer. */
@@ -4861,7 +4895,7 @@ void ipmi_smi_msg_received(struct ipmi_smi *intf,
struct ipmi_smi_msg *msg)
{
unsigned long flags = 0; /* keep us warning-free. */
- int run_to_completion = intf->run_to_completion;
+ int run_to_completion = READ_ONCE(intf->run_to_completion);
/*
* To preserve message order, we keep a queue and deliver from
@@ -4886,9 +4920,9 @@ void ipmi_smi_msg_received(struct ipmi_smi *intf,
spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
if (run_to_completion)
- smi_recv_work(&intf->recv_work);
+ smi_work(&intf->smi_work);
else
- queue_work(system_bh_wq, &intf->recv_work);
+ queue_work(system_wq, &intf->smi_work);
}
EXPORT_SYMBOL(ipmi_smi_msg_received);
@@ -4898,7 +4932,7 @@ void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf)
return;
atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1);
- queue_work(system_bh_wq, &intf->recv_work);
+ queue_work(system_wq, &intf->smi_work);
}
EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
@@ -4927,8 +4961,7 @@ smi_from_recv_msg(struct ipmi_smi *intf, struct ipmi_recv_msg *recv_msg,
static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent,
struct list_head *timeouts,
unsigned long timeout_period,
- int slot, unsigned long *flags,
- bool *need_timer)
+ int slot, bool *need_timer)
{
struct ipmi_recv_msg *msg;
@@ -4980,7 +5013,7 @@ static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent,
return;
}
- spin_unlock_irqrestore(&intf->seq_lock, *flags);
+ mutex_unlock(&intf->seq_lock);
/*
* Send the new message. We send with a zero
@@ -5001,7 +5034,7 @@ static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent,
} else
ipmi_free_smi_msg(smi_msg);
- spin_lock_irqsave(&intf->seq_lock, *flags);
+ mutex_lock(&intf->seq_lock);
}
}
@@ -5028,7 +5061,7 @@ static bool ipmi_timeout_handler(struct ipmi_smi *intf,
* list.
*/
INIT_LIST_HEAD(&timeouts);
- spin_lock_irqsave(&intf->seq_lock, flags);
+ mutex_lock(&intf->seq_lock);
if (intf->ipmb_maintenance_mode_timeout) {
if (intf->ipmb_maintenance_mode_timeout <= timeout_period)
intf->ipmb_maintenance_mode_timeout = 0;
@@ -5038,8 +5071,8 @@ static bool ipmi_timeout_handler(struct ipmi_smi *intf,
for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
check_msg_timeout(intf, &intf->seq_table[i],
&timeouts, timeout_period, i,
- &flags, &need_timer);
- spin_unlock_irqrestore(&intf->seq_lock, flags);
+ &need_timer);
+ mutex_unlock(&intf->seq_lock);
list_for_each_entry_safe(msg, msg2, &timeouts, link)
deliver_err_response(intf, msg, IPMI_TIMEOUT_COMPLETION_CODE);
@@ -5059,7 +5092,9 @@ static bool ipmi_timeout_handler(struct ipmi_smi *intf,
-= timeout_period;
if (!intf->maintenance_mode
&& (intf->auto_maintenance_timeout <= 0)) {
- intf->maintenance_mode_enable = false;
+ intf->maintenance_mode_state =
+ IPMI_MAINTENANCE_MODE_STATE_OFF;
+ intf->auto_maintenance_timeout = 0;
maintenance_mode_update(intf);
}
}
@@ -5067,7 +5102,7 @@ static bool ipmi_timeout_handler(struct ipmi_smi *intf,
flags);
}
- queue_work(system_bh_wq, &intf->recv_work);
+ queue_work(system_wq, &intf->smi_work);
return need_timer;
}
@@ -5075,28 +5110,28 @@ static bool ipmi_timeout_handler(struct ipmi_smi *intf,
static void ipmi_request_event(struct ipmi_smi *intf)
{
/* No event requests when in maintenance mode. */
- if (intf->maintenance_mode_enable)
+ if (intf->maintenance_mode_state)
return;
if (!intf->in_shutdown)
intf->handlers->request_events(intf->send_info);
}
-static struct timer_list ipmi_timer;
-
static atomic_t stop_operation;
-static void ipmi_timeout(struct timer_list *unused)
+static void ipmi_timeout_work(struct work_struct *work)
{
+ if (atomic_read(&stop_operation))
+ return;
+
struct ipmi_smi *intf;
bool need_timer = false;
- int index;
if (atomic_read(&stop_operation))
return;
- index = srcu_read_lock(&ipmi_interfaces_srcu);
- list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+ mutex_lock(&ipmi_interfaces_mutex);
+ list_for_each_entry(intf, &ipmi_interfaces, link) {
if (atomic_read(&intf->event_waiters)) {
intf->ticks_to_req_ev--;
if (intf->ticks_to_req_ev == 0) {
@@ -5105,15 +5140,27 @@ static void ipmi_timeout(struct timer_list *unused)
}
need_timer = true;
}
+ if (intf->maintenance_mode_state)
+ need_timer = true;
need_timer |= ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME);
}
- srcu_read_unlock(&ipmi_interfaces_srcu, index);
+ mutex_unlock(&ipmi_interfaces_mutex);
if (need_timer)
mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
}
+static DECLARE_WORK(ipmi_timer_work, ipmi_timeout_work);
+
+static void ipmi_timeout(struct timer_list *unused)
+{
+ if (atomic_read(&stop_operation))
+ return;
+
+ queue_work(system_wq, &ipmi_timer_work);
+}
+
static void need_waiter(struct ipmi_smi *intf)
{
/* Racy, but worst case we start the timer twice. */
@@ -5138,7 +5185,7 @@ struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC);
if (rv) {
rv->done = free_smi_msg;
- rv->user_data = NULL;
+ rv->recv_msg = NULL;
rv->type = IPMI_SMI_MSG_TYPE_NORMAL;
atomic_inc(&smi_msg_inuse_count);
}
@@ -5154,27 +5201,51 @@ static void free_recv_msg(struct ipmi_recv_msg *msg)
kfree(msg);
}
-static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
+static struct ipmi_recv_msg *ipmi_alloc_recv_msg(struct ipmi_user *user)
{
struct ipmi_recv_msg *rv;
+ if (user) {
+ if (atomic_add_return(1, &user->nr_msgs) > max_msgs_per_user) {
+ atomic_dec(&user->nr_msgs);
+ return ERR_PTR(-EBUSY);
+ }
+ }
+
rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
- if (rv) {
- rv->user = NULL;
- rv->done = free_recv_msg;
- atomic_inc(&recv_msg_inuse_count);
+ if (!rv) {
+ if (user)
+ atomic_dec(&user->nr_msgs);
+ return ERR_PTR(-ENOMEM);
}
+
+ rv->user = user;
+ rv->done = free_recv_msg;
+ if (user)
+ kref_get(&user->refcount);
+ atomic_inc(&recv_msg_inuse_count);
return rv;
}
void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
{
- if (msg->user && !oops_in_progress)
- kref_put(&msg->user->refcount, free_user);
+ if (msg->user && !oops_in_progress) {
+ atomic_dec(&msg->user->nr_msgs);
+ kref_put(&msg->user->refcount, free_ipmi_user);
+ }
msg->done(msg);
}
EXPORT_SYMBOL(ipmi_free_recv_msg);
+static void ipmi_set_recv_msg_user(struct ipmi_recv_msg *msg,
+ struct ipmi_user *user)
+{
+ WARN_ON_ONCE(msg->user); /* User should not be set. */
+ msg->user = user;
+ atomic_inc(&user->nr_msgs);
+ kref_get(&user->refcount);
+}
+
static atomic_t panic_done_count = ATOMIC_INIT(0);
static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
@@ -5190,9 +5261,9 @@ static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
/*
* Inside a panic, send a message and wait for a response.
*/
-static void ipmi_panic_request_and_wait(struct ipmi_smi *intf,
- struct ipmi_addr *addr,
- struct kernel_ipmi_msg *msg)
+static void _ipmi_panic_request_and_wait(struct ipmi_smi *intf,
+ struct ipmi_addr *addr,
+ struct kernel_ipmi_msg *msg)
{
struct ipmi_smi_msg smi_msg;
struct ipmi_recv_msg recv_msg;
@@ -5222,6 +5293,15 @@ static void ipmi_panic_request_and_wait(struct ipmi_smi *intf,
ipmi_poll(intf);
}
+void ipmi_panic_request_and_wait(struct ipmi_user *user,
+ struct ipmi_addr *addr,
+ struct kernel_ipmi_msg *msg)
+{
+ user->intf->run_to_completion = 1;
+ _ipmi_panic_request_and_wait(user->intf, addr, msg);
+}
+EXPORT_SYMBOL(ipmi_panic_request_and_wait);
+
static void event_receiver_fetcher(struct ipmi_smi *intf,
struct ipmi_recv_msg *msg)
{
@@ -5290,7 +5370,7 @@ static void send_panic_events(struct ipmi_smi *intf, char *str)
}
/* Send the event announcing the panic. */
- ipmi_panic_request_and_wait(intf, &addr, &msg);
+ _ipmi_panic_request_and_wait(intf, &addr, &msg);
/*
* On every interface, dump a bunch of OEM event holding the
@@ -5326,7 +5406,7 @@ static void send_panic_events(struct ipmi_smi *intf, char *str)
msg.data = NULL;
msg.data_len = 0;
intf->null_user_handler = device_id_fetcher;
- ipmi_panic_request_and_wait(intf, &addr, &msg);
+ _ipmi_panic_request_and_wait(intf, &addr, &msg);
if (intf->local_event_generator) {
/* Request the event receiver from the local MC. */
@@ -5335,7 +5415,7 @@ static void send_panic_events(struct ipmi_smi *intf, char *str)
msg.data = NULL;
msg.data_len = 0;
intf->null_user_handler = event_receiver_fetcher;
- ipmi_panic_request_and_wait(intf, &addr, &msg);
+ _ipmi_panic_request_and_wait(intf, &addr, &msg);
}
intf->null_user_handler = NULL;
@@ -5387,7 +5467,7 @@ static void send_panic_events(struct ipmi_smi *intf, char *str)
memcpy_and_pad(data+5, 11, p, size, '\0');
p += size;
- ipmi_panic_request_and_wait(intf, &addr, &msg);
+ _ipmi_panic_request_and_wait(intf, &addr, &msg);
}
}
@@ -5405,7 +5485,7 @@ static int panic_event(struct notifier_block *this,
has_panicked = 1;
/* For every registered interface, set it to run to completion. */
- list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+ list_for_each_entry(intf, &ipmi_interfaces, link) {
if (!intf->handlers || intf->intf_num == -1)
/* Interface is not ready. */
continue;
@@ -5435,7 +5515,7 @@ static int panic_event(struct notifier_block *this,
intf->handlers->set_run_to_completion(intf->send_info,
1);
- list_for_each_entry_rcu(user, &intf->users, link) {
+ list_for_each_entry(user, &intf->users, link) {
if (user->handler->ipmi_panic_handler)
user->handler->ipmi_panic_handler(
user->handler_data);
@@ -5480,15 +5560,11 @@ static int ipmi_init_msghandler(void)
if (initialized)
goto out;
- rv = init_srcu_struct(&ipmi_interfaces_srcu);
- if (rv)
- goto out;
-
- remove_work_wq = create_singlethread_workqueue("ipmi-msghandler-remove-wq");
- if (!remove_work_wq) {
+ bmc_remove_work_wq = create_singlethread_workqueue("ipmi-msghandler-remove-wq");
+ if (!bmc_remove_work_wq) {
pr_err("unable to create ipmi-msghandler-remove-wq workqueue");
rv = -ENOMEM;
- goto out_wq;
+ goto out;
}
timer_setup(&ipmi_timer, ipmi_timeout, 0);
@@ -5498,9 +5574,6 @@ static int ipmi_init_msghandler(void)
initialized = true;
-out_wq:
- if (rv)
- cleanup_srcu_struct(&ipmi_interfaces_srcu);
out:
mutex_unlock(&ipmi_interfaces_mutex);
return rv;
@@ -5524,7 +5597,7 @@ static void __exit cleanup_ipmi(void)
int count;
if (initialized) {
- destroy_workqueue(remove_work_wq);
+ destroy_workqueue(bmc_remove_work_wq);
atomic_notifier_chain_unregister(&panic_notifier_list,
&panic_block);
@@ -5540,7 +5613,8 @@ static void __exit cleanup_ipmi(void)
* here.
*/
atomic_set(&stop_operation, 1);
- del_timer_sync(&ipmi_timer);
+ timer_delete_sync(&ipmi_timer);
+ cancel_work_sync(&ipmi_timer_work);
initialized = false;
@@ -5551,8 +5625,6 @@ static void __exit cleanup_ipmi(void)
count = atomic_read(&recv_msg_inuse_count);
if (count != 0)
pr_warn("recv message count %d at exit\n", count);
-
- cleanup_srcu_struct(&ipmi_interfaces_srcu);
}
if (drvregistered)
driver_unregister(&ipmidriver.driver);
diff --git a/drivers/char/ipmi/ipmi_powernv.c b/drivers/char/ipmi/ipmi_powernv.c
index c59a86eb58c7..52a1130defe5 100644
--- a/drivers/char/ipmi/ipmi_powernv.c
+++ b/drivers/char/ipmi/ipmi_powernv.c
@@ -51,7 +51,7 @@ static void send_error_reply(struct ipmi_smi_powernv *smi,
ipmi_smi_msg_received(smi->intf, msg);
}
-static void ipmi_powernv_send(void *send_info, struct ipmi_smi_msg *msg)
+static int ipmi_powernv_send(void *send_info, struct ipmi_smi_msg *msg)
{
struct ipmi_smi_powernv *smi = send_info;
struct opal_ipmi_msg *opal_msg;
@@ -93,18 +93,19 @@ static void ipmi_powernv_send(void *send_info, struct ipmi_smi_msg *msg)
smi->interface_id, opal_msg, size);
rc = opal_ipmi_send(smi->interface_id, opal_msg, size);
pr_devel("%s: -> %d\n", __func__, rc);
-
- if (!rc) {
- smi->cur_msg = msg;
- spin_unlock_irqrestore(&smi->msg_lock, flags);
- return;
+ if (rc) {
+ comp = IPMI_ERR_UNSPECIFIED;
+ goto err_unlock;
}
- comp = IPMI_ERR_UNSPECIFIED;
+ smi->cur_msg = msg;
+ spin_unlock_irqrestore(&smi->msg_lock, flags);
+ return IPMI_CC_NO_ERROR;
+
err_unlock:
spin_unlock_irqrestore(&smi->msg_lock, flags);
err:
- send_error_reply(smi, msg, comp);
+ return comp;
}
static int ipmi_powernv_recv(struct ipmi_smi_powernv *smi)
@@ -302,7 +303,7 @@ static struct platform_driver powernv_ipmi_driver = {
.of_match_table = ipmi_powernv_match,
},
.probe = ipmi_powernv_probe,
- .remove_new = ipmi_powernv_remove,
+ .remove = ipmi_powernv_remove,
};
diff --git a/drivers/char/ipmi/ipmi_poweroff.c b/drivers/char/ipmi/ipmi_poweroff.c
index 941d2dcc8c9d..e63c316d8aaa 100644
--- a/drivers/char/ipmi/ipmi_poweroff.c
+++ b/drivers/char/ipmi/ipmi_poweroff.c
@@ -650,7 +650,7 @@ static struct ipmi_smi_watcher smi_watcher = {
#ifdef CONFIG_PROC_FS
#include <linux/sysctl.h>
-static struct ctl_table ipmi_table[] = {
+static const struct ctl_table ipmi_table[] = {
{ .procname = "poweroff_powercycle",
.data = &poweroff_powercycle,
.maxlen = sizeof(poweroff_powercycle),
@@ -699,8 +699,6 @@ static int __init ipmi_poweroff_init(void)
#ifdef MODULE
static void __exit ipmi_poweroff_cleanup(void)
{
- int rv;
-
#ifdef CONFIG_PROC_FS
unregister_sysctl_table(ipmi_table_header);
#endif
@@ -708,9 +706,7 @@ static void __exit ipmi_poweroff_cleanup(void)
ipmi_smi_watcher_unregister(&smi_watcher);
if (ready) {
- rv = ipmi_destroy_user(ipmi_user);
- if (rv)
- pr_err("could not cleanup the IPMI user: 0x%x\n", rv);
+ ipmi_destroy_user(ipmi_user);
pm_power_off = old_poweroff_func;
}
}
diff --git a/drivers/char/ipmi/ipmi_si.h b/drivers/char/ipmi/ipmi_si.h
index a7ead2a4c753..687835b53da5 100644
--- a/drivers/char/ipmi/ipmi_si.h
+++ b/drivers/char/ipmi/ipmi_si.h
@@ -26,6 +26,14 @@ enum si_type {
/* Array is defined in the ipmi_si_intf.c */
extern const char *const si_to_str[];
+struct ipmi_match_info {
+ enum si_type type;
+};
+
+extern const struct ipmi_match_info ipmi_kcs_si_info;
+extern const struct ipmi_match_info ipmi_smic_si_info;
+extern const struct ipmi_match_info ipmi_bt_si_info;
+
enum ipmi_addr_space {
IPMI_IO_ADDR_SPACE, IPMI_MEM_ADDR_SPACE
};
@@ -64,7 +72,7 @@ struct si_sm_io {
void (*irq_cleanup)(struct si_sm_io *io);
u8 slave_addr;
- enum si_type si_type;
+ const struct ipmi_match_info *si_info;
struct device *dev;
};
@@ -93,6 +101,13 @@ void ipmi_si_pci_shutdown(void);
static inline void ipmi_si_pci_init(void) { }
static inline void ipmi_si_pci_shutdown(void) { }
#endif
+#ifdef CONFIG_IPMI_LS2K
+void ipmi_si_ls2k_init(void);
+void ipmi_si_ls2k_shutdown(void);
+#else
+static inline void ipmi_si_ls2k_init(void) { }
+static inline void ipmi_si_ls2k_shutdown(void) { }
+#endif
#ifdef CONFIG_PARISC
void ipmi_si_parisc_init(void);
void ipmi_si_parisc_shutdown(void);
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index eea23a3b966e..70e55f5ff85e 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -53,6 +53,7 @@
#define SI_TIMEOUT_JIFFIES (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
#define SI_SHORT_TIMEOUT_USEC 250 /* .25ms when the SM request a
short timeout */
+#define SI_TIMEOUT_HOSED (HZ) /* 1 second when in hosed state. */
enum si_intf_state {
SI_NORMAL,
@@ -61,7 +62,8 @@ enum si_intf_state {
SI_CLEARING_FLAGS,
SI_GETTING_MESSAGES,
SI_CHECKING_ENABLES,
- SI_SETTING_ENABLES
+ SI_SETTING_ENABLES,
+ SI_HOSED
/* FIXME - add watchdog stuff. */
};
@@ -73,6 +75,10 @@ enum si_intf_state {
/* 'invalid' to allow a firmware-specified interface to be disabled */
const char *const si_to_str[] = { "invalid", "kcs", "smic", "bt", NULL };
+const struct ipmi_match_info ipmi_kcs_si_info = { .type = SI_KCS };
+const struct ipmi_match_info ipmi_smic_si_info = { .type = SI_SMIC };
+const struct ipmi_match_info ipmi_bt_si_info = { .type = SI_BT };
+
static bool initialized;
/*
@@ -309,7 +315,7 @@ static void return_hosed_msg(struct smi_info *smi_info, int cCode)
static enum si_sm_result start_next_msg(struct smi_info *smi_info)
{
- int rv;
+ int rv;
if (!smi_info->waiting_msg) {
smi_info->curr_msg = NULL;
@@ -386,6 +392,17 @@ static void start_clear_flags(struct smi_info *smi_info)
smi_info->si_state = SI_CLEARING_FLAGS;
}
+static void start_get_flags(struct smi_info *smi_info)
+{
+ unsigned char msg[2];
+
+ msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ msg[1] = IPMI_GET_MSG_FLAGS_CMD;
+
+ start_new_msg(smi_info, msg, 2);
+ smi_info->si_state = SI_GETTING_FLAGS;
+}
+
static void start_getting_msg_queue(struct smi_info *smi_info)
{
smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
@@ -692,7 +709,7 @@ static void handle_transaction_done(struct smi_info *smi_info)
break;
}
enables = current_global_enables(smi_info, 0, &irq_on);
- if (smi_info->io.si_type == SI_BT)
+ if (smi_info->io.si_info->type == SI_BT)
/* BT has its own interrupt enable bit. */
check_bt_irq(smi_info, irq_on);
if (enables != (msg[3] & GLOBAL_ENABLES_MASK)) {
@@ -738,6 +755,8 @@ static void handle_transaction_done(struct smi_info *smi_info)
}
break;
}
+ case SI_HOSED: /* Shouldn't happen. */
+ break;
}
}
@@ -752,6 +771,10 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
enum si_sm_result si_sm_result;
restart:
+ if (smi_info->si_state == SI_HOSED)
+ /* Just in case, hosed state is only left from the timeout. */
+ return SI_SM_HOSED;
+
/*
* There used to be a loop here that waited a little while
* (around 25us) before giving up. That turned out to be
@@ -775,18 +798,20 @@ restart:
/*
* Do the before return_hosed_msg, because that
- * releases the lock.
+ * releases the lock. We just disable operations for
+ * a while and retry in hosed state.
*/
- smi_info->si_state = SI_NORMAL;
+ smi_info->si_state = SI_HOSED;
if (smi_info->curr_msg != NULL) {
/*
* If we were handling a user message, format
* a response to send to the upper layer to
* tell it about the error.
*/
- return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED);
+ return_hosed_msg(smi_info, IPMI_BUS_ERR);
}
- goto restart;
+ smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_HOSED);
+ goto out;
}
/*
@@ -794,8 +819,6 @@ restart:
* this if there is not yet an upper layer to handle anything.
*/
if (si_sm_result == SI_SM_ATTN || smi_info->got_attn) {
- unsigned char msg[2];
-
if (smi_info->si_state != SI_NORMAL) {
/*
* We got an ATTN, but we are doing something else.
@@ -813,11 +836,7 @@ restart:
* interrupts work with the SMI, that's not really
* possible.
*/
- msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
- msg[1] = IPMI_GET_MSG_FLAGS_CMD;
-
- start_new_msg(smi_info, msg, 2);
- smi_info->si_state = SI_GETTING_FLAGS;
+ start_get_flags(smi_info);
goto restart;
}
}
@@ -859,7 +878,7 @@ restart:
if (si_sm_result == SI_SM_IDLE && smi_info->timer_running) {
/* Ok it if fails, the timer will just go off. */
- if (del_timer(&smi_info->si_timer))
+ if (timer_delete(&smi_info->si_timer))
smi_info->timer_running = false;
}
@@ -890,27 +909,29 @@ static void flush_messages(void *send_info)
* mode. This means we are single-threaded, no need for locks.
*/
result = smi_event_handler(smi_info, 0);
- while (result != SI_SM_IDLE) {
+ while (result != SI_SM_IDLE && result != SI_SM_HOSED) {
udelay(SI_SHORT_TIMEOUT_USEC);
result = smi_event_handler(smi_info, SI_SHORT_TIMEOUT_USEC);
}
}
-static void sender(void *send_info,
- struct ipmi_smi_msg *msg)
+static int sender(void *send_info, struct ipmi_smi_msg *msg)
{
struct smi_info *smi_info = send_info;
unsigned long flags;
debug_timestamp(smi_info, "Enqueue");
+ if (smi_info->si_state == SI_HOSED)
+ return IPMI_BUS_ERR;
+
if (smi_info->run_to_completion) {
/*
* If we are running to completion, start it. Upper
* layer will call flush_messages to clear it out.
*/
smi_info->waiting_msg = msg;
- return;
+ return IPMI_CC_NO_ERROR;
}
spin_lock_irqsave(&smi_info->si_lock, flags);
@@ -925,6 +946,7 @@ static void sender(void *send_info,
smi_info->waiting_msg = msg;
check_start_timer_thread(smi_info);
spin_unlock_irqrestore(&smi_info->si_lock, flags);
+ return IPMI_CC_NO_ERROR;
}
static void set_run_to_completion(void *send_info, bool i_run_to_completion)
@@ -1072,7 +1094,8 @@ static void set_need_watch(void *send_info, unsigned int watch_mask)
static void smi_timeout(struct timer_list *t)
{
- struct smi_info *smi_info = from_timer(smi_info, t, si_timer);
+ struct smi_info *smi_info = timer_container_of(smi_info, t,
+ si_timer);
enum si_sm_result smi_result;
unsigned long flags;
unsigned long jiffies_now;
@@ -1082,6 +1105,10 @@ static void smi_timeout(struct timer_list *t)
spin_lock_irqsave(&(smi_info->si_lock), flags);
debug_timestamp(smi_info, "Timer");
+ if (smi_info->si_state == SI_HOSED)
+ /* Try something to see if the BMC is now operational. */
+ start_get_flags(smi_info);
+
jiffies_now = jiffies;
time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
* SI_USEC_PER_JIFFY);
@@ -1091,14 +1118,11 @@ static void smi_timeout(struct timer_list *t)
/* Running with interrupts, only do long timeouts. */
timeout = jiffies + SI_TIMEOUT_JIFFIES;
smi_inc_stat(smi_info, long_timeouts);
- goto do_mod_timer;
- }
-
- /*
- * If the state machine asks for a short delay, then shorten
- * the timer timeout.
- */
- if (smi_result == SI_SM_CALL_WITH_DELAY) {
+ } else if (smi_result == SI_SM_CALL_WITH_DELAY) {
+ /*
+ * If the state machine asks for a short delay, then shorten
+ * the timer timeout.
+ */
smi_inc_stat(smi_info, short_timeouts);
timeout = jiffies + 1;
} else {
@@ -1106,7 +1130,6 @@ static void smi_timeout(struct timer_list *t)
timeout = jiffies + SI_TIMEOUT_JIFFIES;
}
-do_mod_timer:
if (smi_result != SI_SM_IDLE)
smi_mod_timer(smi_info, timeout);
else
@@ -1119,7 +1142,7 @@ irqreturn_t ipmi_si_irq_handler(int irq, void *data)
struct smi_info *smi_info = data;
unsigned long flags;
- if (smi_info->io.si_type == SI_BT)
+ if (smi_info->io.si_info->type == SI_BT)
/* We need to clear the IRQ flag for the BT interface. */
smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
IPMI_BT_INTMASK_CLEAR_IRQ_BIT
@@ -1164,7 +1187,7 @@ static int smi_start_processing(void *send_info,
* The BT interface is efficient enough to not need a thread,
* and there is no need for a thread if we have interrupts.
*/
- else if ((new_smi->io.si_type != SI_BT) && (!new_smi->io.irq))
+ else if (new_smi->io.si_info->type != SI_BT && !new_smi->io.irq)
enable = 1;
if (enable) {
@@ -1235,7 +1258,7 @@ MODULE_PARM_DESC(kipmid_max_busy_us,
void ipmi_irq_finish_setup(struct si_sm_io *io)
{
- if (io->si_type == SI_BT)
+ if (io->si_info->type == SI_BT)
/* Enable the interrupt in the BT interface. */
io->outputb(io, IPMI_BT_INTMASK_REG,
IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
@@ -1243,7 +1266,7 @@ void ipmi_irq_finish_setup(struct si_sm_io *io)
void ipmi_irq_start_cleanup(struct si_sm_io *io)
{
- if (io->si_type == SI_BT)
+ if (io->si_info->type == SI_BT)
/* Disable the interrupt in the BT interface. */
io->outputb(io, IPMI_BT_INTMASK_REG, 0);
}
@@ -1614,7 +1637,7 @@ static ssize_t type_show(struct device *dev,
{
struct smi_info *smi_info = dev_get_drvdata(dev);
- return sysfs_emit(buf, "%s\n", si_to_str[smi_info->io.si_type]);
+ return sysfs_emit(buf, "%s\n", si_to_str[smi_info->io.si_info->type]);
}
static DEVICE_ATTR_RO(type);
@@ -1649,7 +1672,7 @@ static ssize_t params_show(struct device *dev,
return sysfs_emit(buf,
"%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
- si_to_str[smi_info->io.si_type],
+ si_to_str[smi_info->io.si_info->type],
addr_space_to_str[smi_info->io.addr_space],
smi_info->io.addr_data,
smi_info->io.regspacing,
@@ -1803,7 +1826,7 @@ setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info)
{
struct ipmi_device_id *id = &smi_info->device_id;
if (id->manufacturer_id == DELL_IANA_MFR_ID &&
- smi_info->io.si_type == SI_BT)
+ smi_info->io.si_info->type == SI_BT)
register_xaction_notifier(&dell_poweredge_bt_xaction_notifier);
}
@@ -1839,7 +1862,7 @@ static inline void stop_timer_and_thread(struct smi_info *smi_info)
}
smi_info->timer_can_start = false;
- del_timer_sync(&smi_info->si_timer);
+ timer_delete_sync(&smi_info->si_timer);
}
static struct smi_info *find_dup_si(struct smi_info *info)
@@ -1907,13 +1930,13 @@ int ipmi_si_add_smi(struct si_sm_io *io)
/* We prefer ACPI over SMBIOS. */
dev_info(dup->io.dev,
"Removing SMBIOS-specified %s state machine in favor of ACPI\n",
- si_to_str[new_smi->io.si_type]);
+ si_to_str[new_smi->io.si_info->type]);
cleanup_one_si(dup);
} else {
dev_info(new_smi->io.dev,
"%s-specified %s state machine: duplicate\n",
ipmi_addr_src_to_str(new_smi->io.addr_source),
- si_to_str[new_smi->io.si_type]);
+ si_to_str[new_smi->io.si_info->type]);
rv = -EBUSY;
kfree(new_smi);
goto out_err;
@@ -1922,7 +1945,7 @@ int ipmi_si_add_smi(struct si_sm_io *io)
pr_info("Adding %s-specified %s state machine\n",
ipmi_addr_src_to_str(new_smi->io.addr_source),
- si_to_str[new_smi->io.si_type]);
+ si_to_str[new_smi->io.si_info->type]);
list_add_tail(&new_smi->link, &smi_infos);
@@ -1945,12 +1968,12 @@ static int try_smi_init(struct smi_info *new_smi)
pr_info("Trying %s-specified %s state machine at %s address 0x%lx, slave address 0x%x, irq %d\n",
ipmi_addr_src_to_str(new_smi->io.addr_source),
- si_to_str[new_smi->io.si_type],
+ si_to_str[new_smi->io.si_info->type],
addr_space_to_str[new_smi->io.addr_space],
new_smi->io.addr_data,
new_smi->io.slave_addr, new_smi->io.irq);
- switch (new_smi->io.si_type) {
+ switch (new_smi->io.si_info->type) {
case SI_KCS:
new_smi->handlers = &kcs_smi_handlers;
break;
@@ -2073,7 +2096,7 @@ static int try_smi_init(struct smi_info *new_smi)
smi_num++;
dev_info(new_smi->io.dev, "IPMI %s interface initialized\n",
- si_to_str[new_smi->io.si_type]);
+ si_to_str[new_smi->io.si_info->type]);
WARN_ON(new_smi->io.dev->init_name != NULL);
@@ -2091,10 +2114,18 @@ static int try_smi_init(struct smi_info *new_smi)
return rv;
}
+/*
+ * Devices in the same address space at the same address are the same.
+ */
+static bool __init ipmi_smi_info_same(struct smi_info *e1, struct smi_info *e2)
+{
+ return (e1->io.addr_space == e2->io.addr_space &&
+ e1->io.addr_data == e2->io.addr_data);
+}
+
static int __init init_ipmi_si(void)
{
- struct smi_info *e;
- enum ipmi_addr_src type = SI_INVALID;
+ struct smi_info *e, *e2;
if (initialized)
return 0;
@@ -2107,45 +2138,77 @@ static int __init init_ipmi_si(void)
ipmi_si_pci_init();
+ ipmi_si_ls2k_init();
+
ipmi_si_parisc_init();
- /* We prefer devices with interrupts, but in the case of a machine
- with multiple BMCs we assume that there will be several instances
- of a given type so if we succeed in registering a type then also
- try to register everything else of the same type */
mutex_lock(&smi_infos_lock);
+
+ /*
+ * Scan through all the devices. We prefer devices with
+ * interrupts, so go through those first in case there are any
+ * duplicates that don't have the interrupt set.
+ */
list_for_each_entry(e, &smi_infos, link) {
- /* Try to register a device if it has an IRQ and we either
- haven't successfully registered a device yet or this
- device has the same type as one we successfully registered */
- if (e->io.irq && (!type || e->io.addr_source == type)) {
- if (!try_smi_init(e)) {
- type = e->io.addr_source;
+ bool dup = false;
+
+ /* Register ones with interrupts first. */
+ if (!e->io.irq)
+ continue;
+
+ /*
+ * Go through the ones we have already seen to see if this
+ * is a dup.
+ */
+ list_for_each_entry(e2, &smi_infos, link) {
+ if (e2 == e)
+ break;
+ if (e2->io.irq && ipmi_smi_info_same(e, e2)) {
+ dup = true;
+ break;
}
}
+ if (!dup)
+ try_smi_init(e);
}
- /* type will only have been set if we successfully registered an si */
- if (type)
- goto skip_fallback_noirq;
+ /*
+ * Now try devices without interrupts.
+ */
+ list_for_each_entry(e, &smi_infos, link) {
+ bool dup = false;
- /* Fall back to the preferred device */
+ if (e->io.irq)
+ continue;
- list_for_each_entry(e, &smi_infos, link) {
- if (!e->io.irq && (!type || e->io.addr_source == type)) {
- if (!try_smi_init(e)) {
- type = e->io.addr_source;
+ /*
+ * Go through the ones we have already seen to see if
+ * this is a dup. We have already looked at the ones
+ * with interrupts.
+ */
+ list_for_each_entry(e2, &smi_infos, link) {
+ if (!e2->io.irq)
+ continue;
+ if (ipmi_smi_info_same(e, e2)) {
+ dup = true;
+ break;
}
}
+ list_for_each_entry(e2, &smi_infos, link) {
+ if (e2 == e)
+ break;
+ if (ipmi_smi_info_same(e, e2)) {
+ dup = true;
+ break;
+ }
+ }
+ if (!dup)
+ try_smi_init(e);
}
-skip_fallback_noirq:
initialized = true;
mutex_unlock(&smi_infos_lock);
- if (type)
- return 0;
-
mutex_lock(&smi_infos_lock);
if (unload_when_empty && list_empty(&smi_infos)) {
mutex_unlock(&smi_infos_lock);
@@ -2267,7 +2330,7 @@ struct device *ipmi_si_remove_by_data(int addr_space, enum si_type si_type,
list_for_each_entry_safe(e, tmp_e, &smi_infos, link) {
if (e->io.addr_space != addr_space)
continue;
- if (e->io.si_type != si_type)
+ if (e->io.si_info->type != si_type)
continue;
if (e->io.addr_data == addr) {
dev = get_device(e->io.dev);
@@ -2288,6 +2351,8 @@ static void cleanup_ipmi_si(void)
ipmi_si_pci_shutdown();
+ ipmi_si_ls2k_shutdown();
+
ipmi_si_parisc_shutdown();
ipmi_si_platform_shutdown();
diff --git a/drivers/char/ipmi/ipmi_si_ls2k.c b/drivers/char/ipmi/ipmi_si_ls2k.c
new file mode 100644
index 000000000000..45442c257efd
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_si_ls2k.c
@@ -0,0 +1,189 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for Loongson-2K BMC IPMI interface
+ *
+ * Copyright (C) 2024-2025 Loongson Technology Corporation Limited.
+ *
+ * Authors:
+ * Chong Qiao <qiaochong@loongson.cn>
+ * Binbin Zhou <zhoubinbin@loongson.cn>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/types.h>
+
+#include "ipmi_si.h"
+
+#define LS2K_KCS_FIFO_IBFH 0x0
+#define LS2K_KCS_FIFO_IBFT 0x1
+#define LS2K_KCS_FIFO_OBFH 0x2
+#define LS2K_KCS_FIFO_OBFT 0x3
+
+/* KCS registers */
+#define LS2K_KCS_REG_STS 0x4
+#define LS2K_KCS_REG_DATA_OUT 0x5
+#define LS2K_KCS_REG_DATA_IN 0x6
+#define LS2K_KCS_REG_CMD 0x8
+
+#define LS2K_KCS_CMD_DATA 0xa
+#define LS2K_KCS_VERSION 0xb
+#define LS2K_KCS_WR_REQ 0xc
+#define LS2K_KCS_WR_ACK 0x10
+
+#define LS2K_KCS_STS_OBF BIT(0)
+#define LS2K_KCS_STS_IBF BIT(1)
+#define LS2K_KCS_STS_SMS_ATN BIT(2)
+#define LS2K_KCS_STS_CMD BIT(3)
+
+#define LS2K_KCS_DATA_MASK (LS2K_KCS_STS_OBF | LS2K_KCS_STS_IBF | LS2K_KCS_STS_CMD)
+
+static bool ls2k_registered;
+
+static unsigned char ls2k_mem_inb_v0(const struct si_sm_io *io, unsigned int offset)
+{
+ void __iomem *addr = io->addr;
+ int reg_offset;
+
+ if (offset & BIT(0)) {
+ reg_offset = LS2K_KCS_REG_STS;
+ } else {
+ writeb(readb(addr + LS2K_KCS_REG_STS) & ~LS2K_KCS_STS_OBF, addr + LS2K_KCS_REG_STS);
+ reg_offset = LS2K_KCS_REG_DATA_OUT;
+ }
+
+ return readb(addr + reg_offset);
+}
+
+static unsigned char ls2k_mem_inb_v1(const struct si_sm_io *io, unsigned int offset)
+{
+ void __iomem *addr = io->addr;
+ unsigned char inb = 0, cmd;
+ bool obf, ibf;
+
+ obf = readb(addr + LS2K_KCS_FIFO_OBFH) ^ readb(addr + LS2K_KCS_FIFO_OBFT);
+ ibf = readb(addr + LS2K_KCS_FIFO_IBFH) ^ readb(addr + LS2K_KCS_FIFO_IBFT);
+ cmd = readb(addr + LS2K_KCS_CMD_DATA);
+
+ if (offset & BIT(0)) {
+ inb = readb(addr + LS2K_KCS_REG_STS) & ~LS2K_KCS_DATA_MASK;
+ inb |= FIELD_PREP(LS2K_KCS_STS_OBF, obf)
+ | FIELD_PREP(LS2K_KCS_STS_IBF, ibf)
+ | FIELD_PREP(LS2K_KCS_STS_CMD, cmd);
+ } else {
+ inb = readb(addr + LS2K_KCS_REG_DATA_OUT);
+ writeb(readb(addr + LS2K_KCS_FIFO_OBFH), addr + LS2K_KCS_FIFO_OBFT);
+ }
+
+ return inb;
+}
+
+static void ls2k_mem_outb_v0(const struct si_sm_io *io, unsigned int offset,
+ unsigned char val)
+{
+ void __iomem *addr = io->addr;
+ unsigned char sts = readb(addr + LS2K_KCS_REG_STS);
+ int reg_offset;
+
+ if (sts & LS2K_KCS_STS_IBF)
+ return;
+
+ if (offset & BIT(0)) {
+ reg_offset = LS2K_KCS_REG_CMD;
+ sts |= LS2K_KCS_STS_CMD;
+ } else {
+ reg_offset = LS2K_KCS_REG_DATA_IN;
+ sts &= ~LS2K_KCS_STS_CMD;
+ }
+
+ writew(val, addr + reg_offset);
+ writeb(sts | LS2K_KCS_STS_IBF, addr + LS2K_KCS_REG_STS);
+ writel(readl(addr + LS2K_KCS_WR_REQ) + 1, addr + LS2K_KCS_WR_REQ);
+}
+
+static void ls2k_mem_outb_v1(const struct si_sm_io *io, unsigned int offset,
+ unsigned char val)
+{
+ void __iomem *addr = io->addr;
+ unsigned char ibfh, ibft;
+ int reg_offset;
+
+ ibfh = readb(addr + LS2K_KCS_FIFO_IBFH);
+ ibft = readb(addr + LS2K_KCS_FIFO_IBFT);
+
+ if (ibfh ^ ibft)
+ return;
+
+ reg_offset = (offset & BIT(0)) ? LS2K_KCS_REG_CMD : LS2K_KCS_REG_DATA_IN;
+ writew(val, addr + reg_offset);
+
+ writeb(offset & BIT(0), addr + LS2K_KCS_CMD_DATA);
+ writeb(!ibft, addr + LS2K_KCS_FIFO_IBFH);
+ writel(readl(addr + LS2K_KCS_WR_REQ) + 1, addr + LS2K_KCS_WR_REQ);
+}
+
+static void ls2k_mem_cleanup(struct si_sm_io *io)
+{
+ if (io->addr)
+ iounmap(io->addr);
+}
+
+static int ipmi_ls2k_mem_setup(struct si_sm_io *io)
+{
+ unsigned char version;
+
+ io->addr = ioremap(io->addr_data, io->regspacing);
+ if (!io->addr)
+ return -EIO;
+
+ version = readb(io->addr + LS2K_KCS_VERSION);
+
+ io->inputb = version ? ls2k_mem_inb_v1 : ls2k_mem_inb_v0;
+ io->outputb = version ? ls2k_mem_outb_v1 : ls2k_mem_outb_v0;
+ io->io_cleanup = ls2k_mem_cleanup;
+
+ return 0;
+}
+
+static int ipmi_ls2k_probe(struct platform_device *pdev)
+{
+ struct si_sm_io io;
+
+ memset(&io, 0, sizeof(io));
+
+ io.si_info = &ipmi_kcs_si_info;
+ io.io_setup = ipmi_ls2k_mem_setup;
+ io.addr_data = pdev->resource[0].start;
+ io.regspacing = resource_size(&pdev->resource[0]);
+ io.dev = &pdev->dev;
+
+ dev_dbg(&pdev->dev, "addr 0x%lx, spacing %d.\n", io.addr_data, io.regspacing);
+
+ return ipmi_si_add_smi(&io);
+}
+
+static void ipmi_ls2k_remove(struct platform_device *pdev)
+{
+ ipmi_si_remove_by_dev(&pdev->dev);
+}
+
+struct platform_driver ipmi_ls2k_platform_driver = {
+ .driver = {
+ .name = "ls2k-ipmi-si",
+ },
+ .probe = ipmi_ls2k_probe,
+ .remove = ipmi_ls2k_remove,
+};
+
+void ipmi_si_ls2k_init(void)
+{
+ platform_driver_register(&ipmi_ls2k_platform_driver);
+ ls2k_registered = true;
+}
+
+void ipmi_si_ls2k_shutdown(void)
+{
+ if (ls2k_registered)
+ platform_driver_unregister(&ipmi_ls2k_platform_driver);
+}
diff --git a/drivers/char/ipmi/ipmi_si_parisc.c b/drivers/char/ipmi/ipmi_si_parisc.c
index 2be2967f6b5f..3b0a70d9adbb 100644
--- a/drivers/char/ipmi/ipmi_si_parisc.c
+++ b/drivers/char/ipmi/ipmi_si_parisc.c
@@ -13,7 +13,7 @@ static int __init ipmi_parisc_probe(struct parisc_device *dev)
memset(&io, 0, sizeof(io));
- io.si_type = SI_KCS;
+ io.si_info = &ipmi_kcs_si_info;
io.addr_source = SI_DEVICETREE;
io.addr_space = IPMI_MEM_ADDR_SPACE;
io.addr_data = dev->hpa.start;
diff --git a/drivers/char/ipmi/ipmi_si_pci.c b/drivers/char/ipmi/ipmi_si_pci.c
index b83d55685b22..17f72763322d 100644
--- a/drivers/char/ipmi/ipmi_si_pci.c
+++ b/drivers/char/ipmi/ipmi_si_pci.c
@@ -23,30 +23,32 @@ MODULE_PARM_DESC(trypci,
static int ipmi_pci_probe_regspacing(struct si_sm_io *io)
{
- if (io->si_type == SI_KCS) {
- unsigned char status;
- int regspacing;
-
- io->regsize = DEFAULT_REGSIZE;
- io->regshift = 0;
-
- /* detect 1, 4, 16byte spacing */
- for (regspacing = DEFAULT_REGSPACING; regspacing <= 16;) {
- io->regspacing = regspacing;
- if (io->io_setup(io)) {
- dev_err(io->dev, "Could not setup I/O space\n");
- return DEFAULT_REGSPACING;
- }
- /* write invalid cmd */
- io->outputb(io, 1, 0x10);
- /* read status back */
- status = io->inputb(io, 1);
- io->io_cleanup(io);
- if (status)
- return regspacing;
- regspacing *= 4;
+ unsigned char status;
+ int regspacing;
+
+ if (io->si_info->type != SI_KCS)
+ return DEFAULT_REGSPACING;
+
+ io->regsize = DEFAULT_REGSIZE;
+ io->regshift = 0;
+
+ /* detect 1, 4, 16byte spacing */
+ for (regspacing = DEFAULT_REGSPACING; regspacing <= 16;) {
+ io->regspacing = regspacing;
+ if (io->io_setup(io)) {
+ dev_err(io->dev, "Could not setup I/O space\n");
+ return DEFAULT_REGSPACING;
}
+ /* write invalid cmd */
+ io->outputb(io, 1, 0x10);
+ /* read status back */
+ status = io->inputb(io, 1);
+ io->io_cleanup(io);
+ if (status)
+ return regspacing;
+ regspacing *= 4;
}
+
return DEFAULT_REGSPACING;
}
@@ -74,15 +76,15 @@ static int ipmi_pci_probe(struct pci_dev *pdev,
switch (pdev->class) {
case PCI_CLASS_SERIAL_IPMI_SMIC:
- io.si_type = SI_SMIC;
+ io.si_info = &ipmi_smic_si_info;
break;
case PCI_CLASS_SERIAL_IPMI_KCS:
- io.si_type = SI_KCS;
+ io.si_info = &ipmi_kcs_si_info;
break;
case PCI_CLASS_SERIAL_IPMI_BT:
- io.si_type = SI_BT;
+ io.si_info = &ipmi_bt_si_info;
break;
default:
@@ -118,7 +120,7 @@ static int ipmi_pci_probe(struct pci_dev *pdev,
if (io.irq)
io.irq_setup = ipmi_std_irq_setup;
- dev_info(&pdev->dev, "%pR regsize %d spacing %d irq %d\n",
+ dev_info(&pdev->dev, "%pR regsize %u spacing %u irq %d\n",
&pdev->resource[0], io.regsize, io.regspacing, io.irq);
return ipmi_si_add_smi(&io);
diff --git a/drivers/char/ipmi/ipmi_si_platform.c b/drivers/char/ipmi/ipmi_si_platform.c
index 96ba85648120..fb6e359ae494 100644
--- a/drivers/char/ipmi/ipmi_si_platform.c
+++ b/drivers/char/ipmi/ipmi_si_platform.c
@@ -163,9 +163,13 @@ static int platform_ipmi_probe(struct platform_device *pdev)
switch (type) {
case SI_KCS:
+ io.si_info = &ipmi_kcs_si_info;
+ break;
case SI_SMIC:
+ io.si_info = &ipmi_smic_si_info;
+ break;
case SI_BT:
- io.si_type = type;
+ io.si_info = &ipmi_bt_si_info;
break;
case SI_TYPE_INVALID: /* User disabled this in hardcode. */
return -ENODEV;
@@ -213,13 +217,10 @@ static int platform_ipmi_probe(struct platform_device *pdev)
#ifdef CONFIG_OF
static const struct of_device_id of_ipmi_match[] = {
- { .type = "ipmi", .compatible = "ipmi-kcs",
- .data = (void *)(unsigned long) SI_KCS },
- { .type = "ipmi", .compatible = "ipmi-smic",
- .data = (void *)(unsigned long) SI_SMIC },
- { .type = "ipmi", .compatible = "ipmi-bt",
- .data = (void *)(unsigned long) SI_BT },
- {},
+ { .type = "ipmi", .compatible = "ipmi-kcs", .data = &ipmi_kcs_si_info },
+ { .type = "ipmi", .compatible = "ipmi-smic", .data = &ipmi_smic_si_info },
+ { .type = "ipmi", .compatible = "ipmi-bt", .data = &ipmi_bt_si_info },
+ {}
};
MODULE_DEVICE_TABLE(of, of_ipmi_match);
@@ -265,7 +266,7 @@ static int of_ipmi_probe(struct platform_device *pdev)
}
memset(&io, 0, sizeof(io));
- io.si_type = (enum si_type)device_get_match_data(&pdev->dev);
+ io.si_info = device_get_match_data(&pdev->dev);
io.addr_source = SI_DEVICETREE;
io.irq_setup = ipmi_std_irq_setup;
@@ -296,7 +297,7 @@ static int find_slave_address(struct si_sm_io *io, int slave_addr)
{
#ifdef CONFIG_IPMI_DMI_DECODE
if (!slave_addr)
- slave_addr = ipmi_dmi_get_slave_addr(io->si_type,
+ slave_addr = ipmi_dmi_get_slave_addr(io->si_info->type,
io->addr_space,
io->addr_data);
#endif
@@ -335,13 +336,13 @@ static int acpi_ipmi_probe(struct platform_device *pdev)
switch (tmp) {
case 1:
- io.si_type = SI_KCS;
+ io.si_info = &ipmi_kcs_si_info;
break;
case 2:
- io.si_type = SI_SMIC;
+ io.si_info = &ipmi_smic_si_info;
break;
case 3:
- io.si_type = SI_BT;
+ io.si_info = &ipmi_bt_si_info;
break;
case 4: /* SSIF, just ignore */
return -ENODEV;
@@ -445,7 +446,7 @@ struct platform_driver ipmi_platform_driver = {
.acpi_match_table = ACPI_PTR(acpi_ipmi_match),
},
.probe = ipmi_probe,
- .remove_new = ipmi_remove,
+ .remove = ipmi_remove,
.id_table = si_plat_ids
};
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index d04b391048fb..1b63f7d2fcda 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -481,8 +481,6 @@ static int ipmi_ssif_thread(void *data)
/* Wait for something to do */
result = wait_for_completion_interruptible(
&ssif_info->wake_thread);
- if (ssif_info->stopping)
- break;
if (result == -ERESTARTSYS)
continue;
init_completion(&ssif_info->wake_thread);
@@ -541,7 +539,8 @@ static void start_resend(struct ssif_info *ssif_info);
static void retry_timeout(struct timer_list *t)
{
- struct ssif_info *ssif_info = from_timer(ssif_info, t, retry_timer);
+ struct ssif_info *ssif_info = timer_container_of(ssif_info, t,
+ retry_timer);
unsigned long oflags, *flags;
bool waiting, resend;
@@ -565,7 +564,8 @@ static void retry_timeout(struct timer_list *t)
static void watch_timeout(struct timer_list *t)
{
- struct ssif_info *ssif_info = from_timer(ssif_info, t, watch_timer);
+ struct ssif_info *ssif_info = timer_container_of(ssif_info, t,
+ watch_timer);
unsigned long oflags, *flags;
if (ssif_info->stopping)
@@ -599,7 +599,7 @@ static void ssif_alert(struct i2c_client *client, enum i2c_alert_protocol type,
flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
if (ssif_info->waiting_alert) {
ssif_info->waiting_alert = false;
- del_timer(&ssif_info->retry_timer);
+ timer_delete(&ssif_info->retry_timer);
do_get = true;
} else if (ssif_info->curr_msg) {
ssif_info->got_alert = true;
@@ -1068,8 +1068,7 @@ static void start_next_msg(struct ssif_info *ssif_info, unsigned long *flags)
}
}
-static void sender(void *send_info,
- struct ipmi_smi_msg *msg)
+static int sender(void *send_info, struct ipmi_smi_msg *msg)
{
struct ssif_info *ssif_info = send_info;
unsigned long oflags, *flags;
@@ -1089,6 +1088,7 @@ static void sender(void *send_info,
msg->data[0], msg->data[1],
(long long)t.tv_sec, (long)t.tv_nsec / NSEC_PER_USEC);
}
+ return IPMI_CC_NO_ERROR;
}
static int get_smi_info(void *send_info, struct ipmi_smi_info *data)
@@ -1268,12 +1268,10 @@ static void shutdown_ssif(void *send_info)
schedule_timeout(1);
ssif_info->stopping = true;
- del_timer_sync(&ssif_info->watch_timer);
- del_timer_sync(&ssif_info->retry_timer);
- if (ssif_info->thread) {
- complete(&ssif_info->wake_thread);
+ timer_delete_sync(&ssif_info->watch_timer);
+ timer_delete_sync(&ssif_info->retry_timer);
+ if (ssif_info->thread)
kthread_stop(ssif_info->thread);
- }
}
static void ssif_remove(struct i2c_client *client)
@@ -2114,7 +2112,7 @@ static struct platform_driver ipmi_driver = {
.name = DEVICE_NAME,
},
.probe = ssif_platform_probe,
- .remove_new = ssif_platform_remove,
+ .remove = ssif_platform_remove,
.id_table = ssif_plat_ids
};
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 335eea80054e..a013ddbf1466 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -150,7 +150,7 @@ static char preaction[16] = "pre_none";
static unsigned char preop_val = WDOG_PREOP_NONE;
static char preop[16] = "preop_none";
-static DEFINE_SPINLOCK(ipmi_read_lock);
+static DEFINE_MUTEX(ipmi_read_mutex);
static char data_to_read;
static DECLARE_WAIT_QUEUE_HEAD(read_q);
static struct fasync_struct *fasync_q;
@@ -363,7 +363,7 @@ static int __ipmi_set_timeout(struct ipmi_smi_msg *smi_msg,
{
struct kernel_ipmi_msg msg;
unsigned char data[6];
- int rv;
+ int rv = 0;
struct ipmi_system_interface_addr addr;
int hbnow = 0;
@@ -405,14 +405,18 @@ static int __ipmi_set_timeout(struct ipmi_smi_msg *smi_msg,
msg.cmd = IPMI_WDOG_SET_TIMER;
msg.data = data;
msg.data_len = sizeof(data);
- rv = ipmi_request_supply_msgs(watchdog_user,
- (struct ipmi_addr *) &addr,
- 0,
- &msg,
- NULL,
- smi_msg,
- recv_msg,
- 1);
+ if (smi_msg)
+ rv = ipmi_request_supply_msgs(watchdog_user,
+ (struct ipmi_addr *) &addr,
+ 0,
+ &msg,
+ NULL,
+ smi_msg,
+ recv_msg,
+ 1);
+ else
+ ipmi_panic_request_and_wait(watchdog_user,
+ (struct ipmi_addr *) &addr, &msg);
if (rv)
pr_warn("set timeout error: %d\n", rv);
else if (send_heartbeat_now)
@@ -431,9 +435,7 @@ static int _ipmi_set_timeout(int do_heartbeat)
atomic_set(&msg_tofree, 2);
- rv = __ipmi_set_timeout(&smi_msg,
- &recv_msg,
- &send_heartbeat_now);
+ rv = __ipmi_set_timeout(&smi_msg, &recv_msg, &send_heartbeat_now);
if (rv) {
atomic_set(&msg_tofree, 0);
return rv;
@@ -460,27 +462,10 @@ static int ipmi_set_timeout(int do_heartbeat)
return rv;
}
-static atomic_t panic_done_count = ATOMIC_INIT(0);
-
-static void panic_smi_free(struct ipmi_smi_msg *msg)
-{
- atomic_dec(&panic_done_count);
-}
-static void panic_recv_free(struct ipmi_recv_msg *msg)
-{
- atomic_dec(&panic_done_count);
-}
-
-static struct ipmi_smi_msg panic_halt_heartbeat_smi_msg =
- INIT_IPMI_SMI_MSG(panic_smi_free);
-static struct ipmi_recv_msg panic_halt_heartbeat_recv_msg =
- INIT_IPMI_RECV_MSG(panic_recv_free);
-
static void panic_halt_ipmi_heartbeat(void)
{
struct kernel_ipmi_msg msg;
struct ipmi_system_interface_addr addr;
- int rv;
/*
* Don't reset the timer if we have the timer turned off, that
@@ -497,24 +482,10 @@ static void panic_halt_ipmi_heartbeat(void)
msg.cmd = IPMI_WDOG_RESET_TIMER;
msg.data = NULL;
msg.data_len = 0;
- atomic_add(2, &panic_done_count);
- rv = ipmi_request_supply_msgs(watchdog_user,
- (struct ipmi_addr *) &addr,
- 0,
- &msg,
- NULL,
- &panic_halt_heartbeat_smi_msg,
- &panic_halt_heartbeat_recv_msg,
- 1);
- if (rv)
- atomic_sub(2, &panic_done_count);
+ ipmi_panic_request_and_wait(watchdog_user, (struct ipmi_addr *) &addr,
+ &msg);
}
-static struct ipmi_smi_msg panic_halt_smi_msg =
- INIT_IPMI_SMI_MSG(panic_smi_free);
-static struct ipmi_recv_msg panic_halt_recv_msg =
- INIT_IPMI_RECV_MSG(panic_recv_free);
-
/*
* Special call, doesn't claim any locks. This is only to be called
* at panic or halt time, in run-to-completion mode, when the caller
@@ -526,22 +497,13 @@ static void panic_halt_ipmi_set_timeout(void)
int send_heartbeat_now;
int rv;
- /* Wait for the messages to be free. */
- while (atomic_read(&panic_done_count) != 0)
- ipmi_poll_interface(watchdog_user);
- atomic_add(2, &panic_done_count);
- rv = __ipmi_set_timeout(&panic_halt_smi_msg,
- &panic_halt_recv_msg,
- &send_heartbeat_now);
+ rv = __ipmi_set_timeout(NULL, NULL, &send_heartbeat_now);
if (rv) {
- atomic_sub(2, &panic_done_count);
pr_warn("Unable to extend the watchdog timeout\n");
} else {
if (send_heartbeat_now)
panic_halt_ipmi_heartbeat();
}
- while (atomic_read(&panic_done_count) != 0)
- ipmi_poll_interface(watchdog_user);
}
static int __ipmi_heartbeat(void)
@@ -793,7 +755,7 @@ static ssize_t ipmi_read(struct file *file,
* Reading returns if the pretimeout has gone off, and it only does
* it once per pretimeout.
*/
- spin_lock_irq(&ipmi_read_lock);
+ mutex_lock(&ipmi_read_mutex);
if (!data_to_read) {
if (file->f_flags & O_NONBLOCK) {
rv = -EAGAIN;
@@ -804,9 +766,9 @@ static ssize_t ipmi_read(struct file *file,
add_wait_queue(&read_q, &wait);
while (!data_to_read && !signal_pending(current)) {
set_current_state(TASK_INTERRUPTIBLE);
- spin_unlock_irq(&ipmi_read_lock);
+ mutex_unlock(&ipmi_read_mutex);
schedule();
- spin_lock_irq(&ipmi_read_lock);
+ mutex_lock(&ipmi_read_mutex);
}
remove_wait_queue(&read_q, &wait);
@@ -818,7 +780,7 @@ static ssize_t ipmi_read(struct file *file,
data_to_read = 0;
out:
- spin_unlock_irq(&ipmi_read_lock);
+ mutex_unlock(&ipmi_read_mutex);
if (rv == 0) {
if (copy_to_user(buf, &data_to_read, 1))
@@ -856,10 +818,10 @@ static __poll_t ipmi_poll(struct file *file, poll_table *wait)
poll_wait(file, &read_q, wait);
- spin_lock_irq(&ipmi_read_lock);
+ mutex_lock(&ipmi_read_mutex);
if (data_to_read)
mask |= (EPOLLIN | EPOLLRDNORM);
- spin_unlock_irq(&ipmi_read_lock);
+ mutex_unlock(&ipmi_read_mutex);
return mask;
}
@@ -932,13 +894,11 @@ static void ipmi_wdog_pretimeout_handler(void *handler_data)
if (atomic_inc_and_test(&preop_panic_excl))
panic("Watchdog pre-timeout");
} else if (preop_val == WDOG_PREOP_GIVE_DATA) {
- unsigned long flags;
-
- spin_lock_irqsave(&ipmi_read_lock, flags);
+ mutex_lock(&ipmi_read_mutex);
data_to_read = 1;
wake_up_interruptible(&read_q);
kill_fasync(&fasync_q, SIGIO, POLL_IN);
- spin_unlock_irqrestore(&ipmi_read_lock, flags);
+ mutex_unlock(&ipmi_read_mutex);
}
}
@@ -1064,7 +1024,6 @@ static void ipmi_register_watchdog(int ipmi_intf)
static void ipmi_unregister_watchdog(int ipmi_intf)
{
- int rv;
struct ipmi_user *loc_user = watchdog_user;
if (!loc_user)
@@ -1089,9 +1048,7 @@ static void ipmi_unregister_watchdog(int ipmi_intf)
mutex_lock(&ipmi_watchdog_mutex);
/* Disconnect from IPMI. */
- rv = ipmi_destroy_user(loc_user);
- if (rv)
- pr_warn("error unlinking from IPMI: %d\n", rv);
+ ipmi_destroy_user(loc_user);
/* If it comes back, restart it properly. */
ipmi_start_timer_on_heartbeat = 1;
@@ -1189,14 +1146,8 @@ static struct ipmi_smi_watcher smi_watcher = {
.smi_gone = ipmi_smi_gone
};
-static int action_op(const char *inval, char *outval)
+static int action_op_set_val(const char *inval)
{
- if (outval)
- strcpy(outval, action);
-
- if (!inval)
- return 0;
-
if (strcmp(inval, "reset") == 0)
action_val = WDOG_TIMEOUT_RESET;
else if (strcmp(inval, "none") == 0)
@@ -1207,18 +1158,26 @@ static int action_op(const char *inval, char *outval)
action_val = WDOG_TIMEOUT_POWER_DOWN;
else
return -EINVAL;
- strcpy(action, inval);
return 0;
}
-static int preaction_op(const char *inval, char *outval)
+static int action_op(const char *inval, char *outval)
{
+ int rv;
+
if (outval)
- strcpy(outval, preaction);
+ strcpy(outval, action);
if (!inval)
return 0;
+ rv = action_op_set_val(inval);
+ if (!rv)
+ strcpy(action, inval);
+ return rv;
+}
+static int preaction_op_set_val(const char *inval)
+{
if (strcmp(inval, "pre_none") == 0)
preaction_val = WDOG_PRETIMEOUT_NONE;
else if (strcmp(inval, "pre_smi") == 0)
@@ -1231,18 +1190,26 @@ static int preaction_op(const char *inval, char *outval)
preaction_val = WDOG_PRETIMEOUT_MSG_INT;
else
return -EINVAL;
- strcpy(preaction, inval);
return 0;
}
-static int preop_op(const char *inval, char *outval)
+static int preaction_op(const char *inval, char *outval)
{
+ int rv;
+
if (outval)
- strcpy(outval, preop);
+ strcpy(outval, preaction);
if (!inval)
return 0;
+ rv = preaction_op_set_val(inval);
+ if (!rv)
+ strcpy(preaction, inval);
+ return 0;
+}
+static int preop_op_set_val(const char *inval)
+{
if (strcmp(inval, "preop_none") == 0)
preop_val = WDOG_PREOP_NONE;
else if (strcmp(inval, "preop_panic") == 0)
@@ -1251,7 +1218,22 @@ static int preop_op(const char *inval, char *outval)
preop_val = WDOG_PREOP_GIVE_DATA;
else
return -EINVAL;
- strcpy(preop, inval);
+ return 0;
+}
+
+static int preop_op(const char *inval, char *outval)
+{
+ int rv;
+
+ if (outval)
+ strcpy(outval, preop);
+
+ if (!inval)
+ return 0;
+
+ rv = preop_op_set_val(inval);
+ if (!rv)
+ strcpy(preop, inval);
return 0;
}
@@ -1288,18 +1270,18 @@ static int __init ipmi_wdog_init(void)
{
int rv;
- if (action_op(action, NULL)) {
+ if (action_op_set_val(action)) {
action_op("reset", NULL);
pr_info("Unknown action '%s', defaulting to reset\n", action);
}
- if (preaction_op(preaction, NULL)) {
+ if (preaction_op_set_val(preaction)) {
preaction_op("pre_none", NULL);
pr_info("Unknown preaction '%s', defaulting to none\n",
preaction);
}
- if (preop_op(preop, NULL)) {
+ if (preop_op_set_val(preop)) {
preop_op("preop_none", NULL);
pr_info("Unknown preop '%s', defaulting to none\n", preop);
}
diff --git a/drivers/char/ipmi/kcs_bmc_aspeed.c b/drivers/char/ipmi/kcs_bmc_aspeed.c
index 227bf06c7ca4..a13a3470c17a 100644
--- a/drivers/char/ipmi/kcs_bmc_aspeed.c
+++ b/drivers/char/ipmi/kcs_bmc_aspeed.c
@@ -428,7 +428,7 @@ static void aspeed_kcs_irq_mask_update(struct kcs_bmc_device *kcs_bmc, u8 mask,
if (rc == -ETIMEDOUT)
mod_timer(&priv->obe.timer, jiffies + OBE_POLL_PERIOD);
} else {
- del_timer(&priv->obe.timer);
+ timer_delete(&priv->obe.timer);
}
}
@@ -655,7 +655,7 @@ static void aspeed_kcs_remove(struct platform_device *pdev)
spin_lock_irq(&priv->obe.lock);
priv->obe.remove = true;
spin_unlock_irq(&priv->obe.lock);
- del_timer_sync(&priv->obe.timer);
+ timer_delete_sync(&priv->obe.timer);
}
static const struct of_device_id ast_kcs_bmc_match[] = {
@@ -672,7 +672,7 @@ static struct platform_driver ast_kcs_bmc_driver = {
.of_match_table = ast_kcs_bmc_match,
},
.probe = aspeed_kcs_probe,
- .remove_new = aspeed_kcs_remove,
+ .remove = aspeed_kcs_remove,
};
module_platform_driver(ast_kcs_bmc_driver);
diff --git a/drivers/char/ipmi/kcs_bmc_npcm7xx.c b/drivers/char/ipmi/kcs_bmc_npcm7xx.c
index 07710198233a..4808a61bf273 100644
--- a/drivers/char/ipmi/kcs_bmc_npcm7xx.c
+++ b/drivers/char/ipmi/kcs_bmc_npcm7xx.c
@@ -241,7 +241,7 @@ static struct platform_driver npcm_kcs_bmc_driver = {
.of_match_table = npcm_kcs_bmc_match,
},
.probe = npcm7xx_kcs_probe,
- .remove_new = npcm7xx_kcs_remove,
+ .remove = npcm7xx_kcs_remove,
};
module_platform_driver(npcm_kcs_bmc_driver);
diff --git a/drivers/char/ipmi/ssif_bmc.c b/drivers/char/ipmi/ssif_bmc.c
index a14fafc583d4..7a52e3ea49ed 100644
--- a/drivers/char/ipmi/ssif_bmc.c
+++ b/drivers/char/ipmi/ssif_bmc.c
@@ -209,7 +209,7 @@ static ssize_t ssif_bmc_write(struct file *file, const char __user *buf, size_t
if (ret)
goto exit;
- del_timer(&ssif_bmc->response_timer);
+ timer_delete(&ssif_bmc->response_timer);
ssif_bmc->response_timer_inited = false;
memcpy(&ssif_bmc->response, &msg, count);
@@ -292,13 +292,13 @@ static void complete_response(struct ssif_bmc_ctx *ssif_bmc)
ssif_bmc->nbytes_processed = 0;
ssif_bmc->remain_len = 0;
ssif_bmc->busy = false;
- memset(&ssif_bmc->part_buf, 0, sizeof(struct ssif_part_buffer));
wake_up_all(&ssif_bmc->wait_queue);
}
static void response_timeout(struct timer_list *t)
{
- struct ssif_bmc_ctx *ssif_bmc = from_timer(ssif_bmc, t, response_timer);
+ struct ssif_bmc_ctx *ssif_bmc = timer_container_of(ssif_bmc, t,
+ response_timer);
unsigned long flags;
spin_lock_irqsave(&ssif_bmc->lock, flags);
@@ -744,9 +744,11 @@ static void on_stop_event(struct ssif_bmc_ctx *ssif_bmc, u8 *val)
ssif_bmc->aborting = true;
}
} else if (ssif_bmc->state == SSIF_RES_SENDING) {
- if (ssif_bmc->is_singlepart_read || ssif_bmc->block_num == 0xFF)
+ if (ssif_bmc->is_singlepart_read || ssif_bmc->block_num == 0xFF) {
+ memset(&ssif_bmc->part_buf, 0, sizeof(struct ssif_part_buffer));
/* Invalidate response buffer to denote it is sent */
complete_response(ssif_bmc);
+ }
ssif_bmc->state = SSIF_READY;
}
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 169eed162a7f..34b815901b20 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -61,29 +61,11 @@ static inline int page_is_allowed(unsigned long pfn)
{
return devmem_is_allowed(pfn);
}
-static inline int range_is_allowed(unsigned long pfn, unsigned long size)
-{
- u64 from = ((u64)pfn) << PAGE_SHIFT;
- u64 to = from + size;
- u64 cursor = from;
-
- while (cursor < to) {
- if (!devmem_is_allowed(pfn))
- return 0;
- cursor += PAGE_SIZE;
- pfn++;
- }
- return 1;
-}
#else
static inline int page_is_allowed(unsigned long pfn)
{
return 1;
}
-static inline int range_is_allowed(unsigned long pfn, unsigned long size)
-{
- return 1;
-}
#endif
static inline bool should_stop_iteration(void)
@@ -530,11 +512,18 @@ static int mmap_zero(struct file *file, struct vm_area_struct *vma)
return 0;
}
+#ifndef CONFIG_MMU
+static unsigned long get_unmapped_area_zero(struct file *file,
+ unsigned long addr, unsigned long len,
+ unsigned long pgoff, unsigned long flags)
+{
+ return -ENOSYS;
+}
+#else
static unsigned long get_unmapped_area_zero(struct file *file,
unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags)
{
-#ifdef CONFIG_MMU
if (flags & MAP_SHARED) {
/*
* mmap_zero() will call shmem_zero_setup() to create a file,
@@ -545,12 +534,18 @@ static unsigned long get_unmapped_area_zero(struct file *file,
return shmem_get_unmapped_area(NULL, addr, len, pgoff, flags);
}
- /* Otherwise flags & MAP_PRIVATE: with no shmem object beneath it */
- return mm_get_unmapped_area(current->mm, file, addr, len, pgoff, flags);
+ /*
+ * Otherwise flags & MAP_PRIVATE: with no shmem object beneath it,
+ * attempt to map aligned to huge page size if possible, otherwise we
+ * fall back to system page size mappings.
+ */
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ return thp_get_unmapped_area(file, addr, len, pgoff, flags);
#else
- return -ENOSYS;
+ return mm_get_unmapped_area(current->mm, file, addr, len, pgoff, flags);
#endif
}
+#endif /* CONFIG_MMU */
static ssize_t write_full(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index 541edc26ec89..726516fb0a3b 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -58,31 +58,27 @@ static LIST_HEAD(misc_list);
static DEFINE_MUTEX(misc_mtx);
/*
- * Assigned numbers, used for dynamic minors
+ * Assigned numbers.
*/
-#define DYNAMIC_MINORS 128 /* like dynamic majors */
static DEFINE_IDA(misc_minors_ida);
-static int misc_minor_alloc(void)
+static int misc_minor_alloc(int minor)
{
- int ret;
+ int ret = 0;
- ret = ida_alloc_max(&misc_minors_ida, DYNAMIC_MINORS - 1, GFP_KERNEL);
- if (ret >= 0) {
- ret = DYNAMIC_MINORS - ret - 1;
- } else {
+ if (minor == MISC_DYNAMIC_MINOR) {
+ /* allocate free id */
ret = ida_alloc_range(&misc_minors_ida, MISC_DYNAMIC_MINOR + 1,
MINORMASK, GFP_KERNEL);
+ } else {
+ ret = ida_alloc_range(&misc_minors_ida, minor, minor, GFP_KERNEL);
}
return ret;
}
static void misc_minor_free(int minor)
{
- if (minor < DYNAMIC_MINORS)
- ida_free(&misc_minors_ida, DYNAMIC_MINORS - minor - 1);
- else if (minor > MISC_DYNAMIC_MINOR)
- ida_free(&misc_minors_ida, minor);
+ ida_free(&misc_minors_ida, minor);
}
#ifdef CONFIG_PROC_FS
@@ -136,7 +132,8 @@ static int misc_open(struct inode *inode, struct file *file)
break;
}
- if (!new_fops) {
+ /* Only request module for fixed minor code */
+ if (!new_fops && minor < MISC_DYNAMIC_MINOR) {
mutex_unlock(&misc_mtx);
request_module("char-major-%d-%d", MISC_MAJOR, minor);
mutex_lock(&misc_mtx);
@@ -148,10 +145,11 @@ static int misc_open(struct inode *inode, struct file *file)
new_fops = fops_get(iter->fops);
break;
}
- if (!new_fops)
- goto fail;
}
+ if (!new_fops)
+ goto fail;
+
/*
* Place the miscdevice in the file's
* private_data so it can be used by the
@@ -214,12 +212,18 @@ int misc_register(struct miscdevice *misc)
int err = 0;
bool is_dynamic = (misc->minor == MISC_DYNAMIC_MINOR);
+ if (misc->minor > MISC_DYNAMIC_MINOR) {
+ pr_err("Invalid fixed minor %d for miscdevice '%s'\n",
+ misc->minor, misc->name);
+ return -EINVAL;
+ }
+
INIT_LIST_HEAD(&misc->list);
mutex_lock(&misc_mtx);
if (is_dynamic) {
- int i = misc_minor_alloc();
+ int i = misc_minor_alloc(misc->minor);
if (i < 0) {
err = -EBUSY;
@@ -228,6 +232,7 @@ int misc_register(struct miscdevice *misc)
misc->minor = i;
} else {
struct miscdevice *c;
+ int i;
list_for_each_entry(c, &misc_list, list) {
if (c->minor == misc->minor) {
@@ -235,6 +240,12 @@ int misc_register(struct miscdevice *misc)
goto out;
}
}
+
+ i = misc_minor_alloc(misc->minor);
+ if (i < 0) {
+ err = -EBUSY;
+ goto out;
+ }
}
dev = MKDEV(MISC_MAJOR, misc->minor);
@@ -243,8 +254,8 @@ int misc_register(struct miscdevice *misc)
device_create_with_groups(&misc_class, misc->parent, dev,
misc, misc->groups, "%s", misc->name);
if (IS_ERR(misc->this_device)) {
+ misc_minor_free(misc->minor);
if (is_dynamic) {
- misc_minor_free(misc->minor);
misc->minor = MISC_DYNAMIC_MINOR;
}
err = PTR_ERR(misc->this_device);
@@ -272,13 +283,12 @@ EXPORT_SYMBOL(misc_register);
void misc_deregister(struct miscdevice *misc)
{
- if (WARN_ON(list_empty(&misc->list)))
- return;
-
mutex_lock(&misc_mtx);
- list_del(&misc->list);
+ list_del_init(&misc->list);
device_destroy(&misc_class, MKDEV(MISC_MAJOR, misc->minor));
misc_minor_free(misc->minor);
+ if (misc->minor > MISC_DYNAMIC_MINOR)
+ misc->minor = MISC_DYNAMIC_MINOR;
mutex_unlock(&misc_mtx);
}
EXPORT_SYMBOL(misc_deregister);
@@ -286,15 +296,15 @@ EXPORT_SYMBOL(misc_deregister);
static int __init misc_init(void)
{
int err;
- struct proc_dir_entry *ret;
+ struct proc_dir_entry *misc_proc_file;
- ret = proc_create_seq("misc", 0, NULL, &misc_seq_ops);
+ misc_proc_file = proc_create_seq("misc", 0, NULL, &misc_seq_ops);
err = class_register(&misc_class);
if (err)
goto fail_remove;
- err = -EIO;
- if (register_chrdev(MISC_MAJOR, "misc", &misc_fops))
+ err = __register_chrdev(MISC_MAJOR, 0, MINORMASK + 1, "misc", &misc_fops);
+ if (err < 0)
goto fail_printk;
return 0;
@@ -302,7 +312,7 @@ fail_printk:
pr_err("unable to get major %d for misc devices\n", MISC_MAJOR);
class_unregister(&misc_class);
fail_remove:
- if (ret)
+ if (misc_proc_file)
remove_proc_entry("misc", NULL);
return err;
}
diff --git a/drivers/char/misc_minor_kunit.c b/drivers/char/misc_minor_kunit.c
new file mode 100644
index 000000000000..6fc8b05169c5
--- /dev/null
+++ b/drivers/char/misc_minor_kunit.c
@@ -0,0 +1,689 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <kunit/test.h>
+#include <kunit/test-bug.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/init_syscalls.h>
+
+/* static minor (LCD_MINOR) */
+static struct miscdevice dev_static_minor = {
+ .minor = LCD_MINOR,
+ .name = "dev_static_minor",
+};
+
+/* misc dynamic minor */
+static struct miscdevice dev_misc_dynamic_minor = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "dev_misc_dynamic_minor",
+};
+
+static void kunit_static_minor(struct kunit *test)
+{
+ int ret;
+
+ ret = misc_register(&dev_static_minor);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, LCD_MINOR, dev_static_minor.minor);
+ misc_deregister(&dev_static_minor);
+}
+
+static void kunit_misc_dynamic_minor(struct kunit *test)
+{
+ int ret;
+
+ ret = misc_register(&dev_misc_dynamic_minor);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ misc_deregister(&dev_misc_dynamic_minor);
+}
+
+struct miscdev_test_case {
+ const char *str;
+ int minor;
+};
+
+static struct miscdev_test_case miscdev_test_ranges[] = {
+ {
+ .str = "lower static range, top",
+ .minor = 15,
+ },
+ {
+ .str = "upper static range, bottom",
+ .minor = 130,
+ },
+ {
+ .str = "lower static range, bottom",
+ .minor = 0,
+ },
+ {
+ .str = "upper static range, top",
+ .minor = MISC_DYNAMIC_MINOR - 1,
+ },
+};
+
+KUNIT_ARRAY_PARAM_DESC(miscdev, miscdev_test_ranges, str);
+
+static int miscdev_find_minors(struct kunit_suite *suite)
+{
+ int ret;
+ struct miscdevice miscstat = {
+ .name = "miscstat",
+ };
+ int i;
+
+ for (i = 15; i >= 0; i--) {
+ miscstat.minor = i;
+ ret = misc_register(&miscstat);
+ if (ret == 0)
+ break;
+ }
+
+ if (ret == 0) {
+ kunit_info(suite, "found misc device minor %d available\n",
+ miscstat.minor);
+ miscdev_test_ranges[0].minor = miscstat.minor;
+ misc_deregister(&miscstat);
+ } else {
+ return ret;
+ }
+
+ for (i = 128; i < MISC_DYNAMIC_MINOR; i++) {
+ miscstat.minor = i;
+ ret = misc_register(&miscstat);
+ if (ret == 0)
+ break;
+ }
+
+ if (ret == 0) {
+ kunit_info(suite, "found misc device minor %d available\n",
+ miscstat.minor);
+ miscdev_test_ranges[1].minor = miscstat.minor;
+ misc_deregister(&miscstat);
+ } else {
+ return ret;
+ }
+
+ for (i = 0; i < miscdev_test_ranges[0].minor; i++) {
+ miscstat.minor = i;
+ ret = misc_register(&miscstat);
+ if (ret == 0)
+ break;
+ }
+
+ if (ret == 0) {
+ kunit_info(suite, "found misc device minor %d available\n",
+ miscstat.minor);
+ miscdev_test_ranges[2].minor = miscstat.minor;
+ misc_deregister(&miscstat);
+ } else {
+ return ret;
+ }
+
+ for (i = MISC_DYNAMIC_MINOR - 1; i > miscdev_test_ranges[1].minor; i--) {
+ miscstat.minor = i;
+ ret = misc_register(&miscstat);
+ if (ret == 0)
+ break;
+ }
+
+ if (ret == 0) {
+ kunit_info(suite, "found misc device minor %d available\n",
+ miscstat.minor);
+ miscdev_test_ranges[3].minor = miscstat.minor;
+ misc_deregister(&miscstat);
+ }
+
+ return ret;
+}
+
+static bool is_valid_dynamic_minor(int minor)
+{
+ if (minor < 0)
+ return false;
+ return minor > MISC_DYNAMIC_MINOR;
+}
+
+static int miscdev_test_open(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static const struct file_operations miscdev_test_fops = {
+ .open = miscdev_test_open,
+};
+
+static void __init miscdev_test_can_open(struct kunit *test, struct miscdevice *misc)
+{
+ int ret;
+ struct file *filp;
+ char *devname;
+
+ devname = kasprintf(GFP_KERNEL, "/dev/%s", misc->name);
+ ret = init_mknod(devname, S_IFCHR | 0600,
+ new_encode_dev(MKDEV(MISC_MAJOR, misc->minor)));
+ if (ret != 0)
+ KUNIT_FAIL(test, "failed to create node\n");
+
+ filp = filp_open(devname, O_RDONLY, 0);
+ if (IS_ERR_OR_NULL(filp))
+ KUNIT_FAIL(test, "failed to open misc device: %ld\n", PTR_ERR(filp));
+ else
+ fput(filp);
+
+ init_unlink(devname);
+ kfree(devname);
+}
+
+static void __init miscdev_test_static_basic(struct kunit *test)
+{
+ struct miscdevice misc_test = {
+ .name = "misc_test",
+ .fops = &miscdev_test_fops,
+ };
+ int ret;
+ const struct miscdev_test_case *params = test->param_value;
+
+ misc_test.minor = params->minor;
+
+ ret = misc_register(&misc_test);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_EQ(test, misc_test.minor, params->minor);
+
+ if (ret == 0) {
+ miscdev_test_can_open(test, &misc_test);
+ misc_deregister(&misc_test);
+ }
+}
+
+static void __init miscdev_test_dynamic_basic(struct kunit *test)
+{
+ struct miscdevice misc_test = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "misc_test",
+ .fops = &miscdev_test_fops,
+ };
+ int ret;
+
+ ret = misc_register(&misc_test);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(misc_test.minor));
+
+ if (ret == 0) {
+ miscdev_test_can_open(test, &misc_test);
+ misc_deregister(&misc_test);
+ }
+}
+
+static void miscdev_test_twice(struct kunit *test)
+{
+ struct miscdevice misc_test = {
+ .name = "misc_test",
+ .fops = &miscdev_test_fops,
+ };
+ int ret;
+ const struct miscdev_test_case *params = test->param_value;
+
+ misc_test.minor = params->minor;
+
+ ret = misc_register(&misc_test);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_EQ(test, misc_test.minor, params->minor);
+ if (ret == 0)
+ misc_deregister(&misc_test);
+
+ ret = misc_register(&misc_test);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_EQ(test, misc_test.minor, params->minor);
+ if (ret == 0)
+ misc_deregister(&misc_test);
+}
+
+static void miscdev_test_duplicate_minor(struct kunit *test)
+{
+ struct miscdevice misc1 = {
+ .name = "misc1",
+ .fops = &miscdev_test_fops,
+ };
+ struct miscdevice misc2 = {
+ .name = "misc2",
+ .fops = &miscdev_test_fops,
+ };
+ int ret;
+ const struct miscdev_test_case *params = test->param_value;
+
+ misc1.minor = params->minor;
+ misc2.minor = params->minor;
+
+ ret = misc_register(&misc1);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_EQ(test, misc1.minor, params->minor);
+
+ ret = misc_register(&misc2);
+ KUNIT_EXPECT_EQ(test, ret, -EBUSY);
+ if (ret == 0)
+ misc_deregister(&misc2);
+
+ misc_deregister(&misc1);
+}
+
+static void miscdev_test_duplicate_name(struct kunit *test)
+{
+ struct miscdevice misc1 = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "misc1",
+ .fops = &miscdev_test_fops,
+ };
+ struct miscdevice misc2 = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "misc1",
+ .fops = &miscdev_test_fops,
+ };
+ int ret;
+
+ ret = misc_register(&misc1);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(misc1.minor));
+
+ ret = misc_register(&misc2);
+ KUNIT_EXPECT_EQ(test, ret, -EEXIST);
+ if (ret == 0)
+ misc_deregister(&misc2);
+
+ misc_deregister(&misc1);
+}
+
+/*
+ * Test that after a duplicate name failure, the reserved minor number is
+ * freed to be allocated next.
+ */
+static void miscdev_test_duplicate_name_leak(struct kunit *test)
+{
+ struct miscdevice misc1 = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "misc1",
+ .fops = &miscdev_test_fops,
+ };
+ struct miscdevice misc2 = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "misc1",
+ .fops = &miscdev_test_fops,
+ };
+ struct miscdevice misc3 = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "misc3",
+ .fops = &miscdev_test_fops,
+ };
+ int ret;
+ int dyn_minor;
+
+ ret = misc_register(&misc1);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(misc1.minor));
+
+ /*
+ * Find out what is the next minor number available.
+ */
+ ret = misc_register(&misc3);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(misc3.minor));
+ dyn_minor = misc3.minor;
+ misc_deregister(&misc3);
+ misc3.minor = MISC_DYNAMIC_MINOR;
+
+ ret = misc_register(&misc2);
+ KUNIT_EXPECT_EQ(test, ret, -EEXIST);
+ if (ret == 0)
+ misc_deregister(&misc2);
+
+ /*
+ * Now check that we can still get the same minor we found before.
+ */
+ ret = misc_register(&misc3);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(misc3.minor));
+ KUNIT_EXPECT_EQ(test, misc3.minor, dyn_minor);
+ misc_deregister(&misc3);
+
+ misc_deregister(&misc1);
+}
+
+/*
+ * Try to register a static minor with a duplicate name. That might not
+ * deallocate the minor, preventing it from being used again.
+ */
+static void miscdev_test_duplicate_error(struct kunit *test)
+{
+ struct miscdevice miscdyn = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "name1",
+ .fops = &miscdev_test_fops,
+ };
+ struct miscdevice miscstat = {
+ .name = "name1",
+ .fops = &miscdev_test_fops,
+ };
+ struct miscdevice miscnew = {
+ .name = "name2",
+ .fops = &miscdev_test_fops,
+ };
+ int ret;
+ const struct miscdev_test_case *params = test->param_value;
+
+ miscstat.minor = params->minor;
+ miscnew.minor = params->minor;
+
+ ret = misc_register(&miscdyn);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(miscdyn.minor));
+
+ ret = misc_register(&miscstat);
+ KUNIT_EXPECT_EQ(test, ret, -EEXIST);
+ if (ret == 0)
+ misc_deregister(&miscstat);
+
+ ret = misc_register(&miscnew);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_EQ(test, miscnew.minor, params->minor);
+ if (ret == 0)
+ misc_deregister(&miscnew);
+
+ misc_deregister(&miscdyn);
+}
+
+static void __init miscdev_test_dynamic_only_range(struct kunit *test)
+{
+ int ret;
+ struct miscdevice *miscdev;
+ const int dynamic_minors = 256;
+ int i;
+
+ miscdev = kunit_kmalloc_array(test, dynamic_minors,
+ sizeof(struct miscdevice),
+ GFP_KERNEL | __GFP_ZERO);
+
+ for (i = 0; i < dynamic_minors; i++) {
+ miscdev[i].minor = MISC_DYNAMIC_MINOR;
+ miscdev[i].name = kasprintf(GFP_KERNEL, "misc_test%d", i);
+ miscdev[i].fops = &miscdev_test_fops;
+ ret = misc_register(&miscdev[i]);
+ if (ret != 0)
+ break;
+ /*
+ * This is the bug we are looking for!
+ * We asked for a dynamic minor and got a minor in the static range space.
+ */
+ if (miscdev[i].minor >= 0 && miscdev[i].minor <= 15) {
+ KUNIT_FAIL(test, "misc_register allocated minor %d\n", miscdev[i].minor);
+ i++;
+ break;
+ }
+ KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(miscdev[i].minor));
+ }
+
+ for (i--; i >= 0; i--) {
+ miscdev_test_can_open(test, &miscdev[i]);
+ misc_deregister(&miscdev[i]);
+ kfree_const(miscdev[i].name);
+ }
+
+ KUNIT_EXPECT_EQ(test, ret, 0);
+}
+
+static void __init miscdev_test_collision(struct kunit *test)
+{
+ int ret;
+ struct miscdevice *miscdev;
+ struct miscdevice miscstat = {
+ .name = "miscstat",
+ .fops = &miscdev_test_fops,
+ };
+ const int dynamic_minors = 256;
+ int i;
+
+ miscdev = kunit_kmalloc_array(test, dynamic_minors,
+ sizeof(struct miscdevice),
+ GFP_KERNEL | __GFP_ZERO);
+
+ miscstat.minor = miscdev_test_ranges[0].minor;
+ ret = misc_register(&miscstat);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_EXPECT_EQ(test, miscstat.minor, miscdev_test_ranges[0].minor);
+
+ for (i = 0; i < dynamic_minors; i++) {
+ miscdev[i].minor = MISC_DYNAMIC_MINOR;
+ miscdev[i].name = kasprintf(GFP_KERNEL, "misc_test%d", i);
+ miscdev[i].fops = &miscdev_test_fops;
+ ret = misc_register(&miscdev[i]);
+ if (ret != 0)
+ break;
+ KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(miscdev[i].minor));
+ }
+
+ for (i--; i >= 0; i--) {
+ miscdev_test_can_open(test, &miscdev[i]);
+ misc_deregister(&miscdev[i]);
+ kfree_const(miscdev[i].name);
+ }
+
+ misc_deregister(&miscstat);
+
+ KUNIT_EXPECT_EQ(test, ret, 0);
+}
+
+static void __init miscdev_test_collision_reverse(struct kunit *test)
+{
+ int ret;
+ struct miscdevice *miscdev;
+ struct miscdevice miscstat = {
+ .name = "miscstat",
+ .fops = &miscdev_test_fops,
+ };
+ const int dynamic_minors = 256;
+ int i;
+
+ miscdev = kunit_kmalloc_array(test, dynamic_minors,
+ sizeof(struct miscdevice),
+ GFP_KERNEL | __GFP_ZERO);
+
+ for (i = 0; i < dynamic_minors; i++) {
+ miscdev[i].minor = MISC_DYNAMIC_MINOR;
+ miscdev[i].name = kasprintf(GFP_KERNEL, "misc_test%d", i);
+ miscdev[i].fops = &miscdev_test_fops;
+ ret = misc_register(&miscdev[i]);
+ if (ret != 0)
+ break;
+ KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(miscdev[i].minor));
+ }
+
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ miscstat.minor = miscdev_test_ranges[0].minor;
+ ret = misc_register(&miscstat);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_EQ(test, miscstat.minor, miscdev_test_ranges[0].minor);
+ if (ret == 0)
+ misc_deregister(&miscstat);
+
+ for (i--; i >= 0; i--) {
+ miscdev_test_can_open(test, &miscdev[i]);
+ misc_deregister(&miscdev[i]);
+ kfree_const(miscdev[i].name);
+ }
+}
+
+static void __init miscdev_test_conflict(struct kunit *test)
+{
+ int ret;
+ struct miscdevice miscdyn = {
+ .name = "miscdyn",
+ .minor = MISC_DYNAMIC_MINOR,
+ .fops = &miscdev_test_fops,
+ };
+ struct miscdevice miscstat = {
+ .name = "miscstat",
+ .fops = &miscdev_test_fops,
+ };
+
+ ret = misc_register(&miscdyn);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(miscdyn.minor));
+
+ /*
+ * Try to register a static minor with the same minor as the
+ * dynamic one.
+ */
+ miscstat.minor = miscdyn.minor;
+ ret = misc_register(&miscstat);
+ KUNIT_EXPECT_EQ(test, ret, -EINVAL);
+ if (ret == 0)
+ misc_deregister(&miscstat);
+
+ miscdev_test_can_open(test, &miscdyn);
+
+ misc_deregister(&miscdyn);
+}
+
+static void __init miscdev_test_conflict_reverse(struct kunit *test)
+{
+ int ret;
+ struct miscdevice miscdyn = {
+ .name = "miscdyn",
+ .minor = MISC_DYNAMIC_MINOR,
+ .fops = &miscdev_test_fops,
+ };
+ struct miscdevice miscstat = {
+ .name = "miscstat",
+ .fops = &miscdev_test_fops,
+ };
+
+ /*
+ * Find the first available dynamic minor to use it as a static
+ * minor later on.
+ */
+ ret = misc_register(&miscdyn);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(miscdyn.minor));
+ miscstat.minor = miscdyn.minor;
+ misc_deregister(&miscdyn);
+
+ ret = misc_register(&miscstat);
+ KUNIT_EXPECT_EQ(test, ret, -EINVAL);
+ if (ret == 0)
+ misc_deregister(&miscstat);
+
+ /*
+ * Try to register a dynamic minor after registering a static minor
+ * within the dynamic range. It should work but get a different
+ * minor.
+ */
+ miscdyn.minor = MISC_DYNAMIC_MINOR;
+ ret = misc_register(&miscdyn);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_EQ(test, miscdyn.minor, miscstat.minor);
+ KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(miscdyn.minor));
+ if (ret == 0)
+ misc_deregister(&miscdyn);
+}
+
+/* Take minor(> MISC_DYNAMIC_MINOR) as invalid when register miscdevice */
+static void miscdev_test_invalid_input(struct kunit *test)
+{
+ struct miscdevice misc_test = {
+ .minor = MISC_DYNAMIC_MINOR + 1,
+ .name = "misc_test",
+ .fops = &miscdev_test_fops,
+ };
+ int ret;
+
+ ret = misc_register(&misc_test);
+ KUNIT_EXPECT_EQ(test, ret, -EINVAL);
+ if (ret == 0)
+ misc_deregister(&misc_test);
+}
+
+/*
+ * Verify if @miscdyn_a can still be registered successfully without
+ * reinitialization even if its minor ever owned was requested by
+ * another miscdevice such as @miscdyn_b.
+ */
+static void miscdev_test_dynamic_reentry(struct kunit *test)
+{
+ struct miscdevice miscdyn_a = {
+ .name = "miscdyn_a",
+ .minor = MISC_DYNAMIC_MINOR,
+ .fops = &miscdev_test_fops,
+ };
+ struct miscdevice miscdyn_b = {
+ .name = "miscdyn_b",
+ .minor = MISC_DYNAMIC_MINOR,
+ .fops = &miscdev_test_fops,
+ };
+ int ret, minor_a;
+
+ ret = misc_register(&miscdyn_a);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(miscdyn_a.minor));
+ minor_a = miscdyn_a.minor;
+ if (ret != 0)
+ return;
+ misc_deregister(&miscdyn_a);
+
+ ret = misc_register(&miscdyn_b);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_EXPECT_EQ(test, miscdyn_b.minor, minor_a);
+ if (ret != 0)
+ return;
+
+ ret = misc_register(&miscdyn_a);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(miscdyn_a.minor));
+ KUNIT_EXPECT_NE(test, miscdyn_a.minor, miscdyn_b.minor);
+ if (ret == 0)
+ misc_deregister(&miscdyn_a);
+
+ misc_deregister(&miscdyn_b);
+}
+
+static struct kunit_case test_cases[] = {
+ KUNIT_CASE(kunit_static_minor),
+ KUNIT_CASE(kunit_misc_dynamic_minor),
+ KUNIT_CASE(miscdev_test_invalid_input),
+ KUNIT_CASE_PARAM(miscdev_test_twice, miscdev_gen_params),
+ KUNIT_CASE_PARAM(miscdev_test_duplicate_minor, miscdev_gen_params),
+ KUNIT_CASE(miscdev_test_duplicate_name),
+ KUNIT_CASE(miscdev_test_duplicate_name_leak),
+ KUNIT_CASE_PARAM(miscdev_test_duplicate_error, miscdev_gen_params),
+ KUNIT_CASE(miscdev_test_dynamic_reentry),
+ {}
+};
+
+static struct kunit_suite test_suite = {
+ .name = "miscdev",
+ .suite_init = miscdev_find_minors,
+ .test_cases = test_cases,
+};
+kunit_test_suite(test_suite);
+
+static struct kunit_case __refdata test_init_cases[] = {
+ KUNIT_CASE_PARAM(miscdev_test_static_basic, miscdev_gen_params),
+ KUNIT_CASE(miscdev_test_dynamic_basic),
+ KUNIT_CASE(miscdev_test_dynamic_only_range),
+ KUNIT_CASE(miscdev_test_collision),
+ KUNIT_CASE(miscdev_test_collision_reverse),
+ KUNIT_CASE(miscdev_test_conflict),
+ KUNIT_CASE(miscdev_test_conflict_reverse),
+ {}
+};
+
+static struct kunit_suite test_init_suite = {
+ .name = "miscdev_init",
+ .suite_init = miscdev_find_minors,
+ .test_cases = test_init_cases,
+};
+kunit_test_init_section_suite(test_init_suite);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Vimal Agrawal");
+MODULE_AUTHOR("Thadeu Lima de Souza Cascardo <cascardo@igalia.com>");
+MODULE_DESCRIPTION("Test module for misc character devices");
diff --git a/drivers/char/powernv-op-panel.c b/drivers/char/powernv-op-panel.c
index f2cff1a6fed5..53467b0a6187 100644
--- a/drivers/char/powernv-op-panel.c
+++ b/drivers/char/powernv-op-panel.c
@@ -213,7 +213,7 @@ static struct platform_driver oppanel_driver = {
.of_match_table = oppanel_match,
},
.probe = oppanel_probe,
- .remove_new = oppanel_remove,
+ .remove = oppanel_remove,
};
module_platform_driver(oppanel_driver);
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 23ee76bbb4aa..b8b24b6ed3fe 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -278,7 +278,7 @@ static void crng_reseed(struct work_struct *work)
WRITE_ONCE(base_crng.generation, next_gen);
#ifdef CONFIG_VDSO_GETRANDOM
/* base_crng.generation's invalid value is ULONG_MAX, while
- * _vdso_rng_data.generation's invalid value is 0, so add one to the
+ * vdso_k_rng_data->generation's invalid value is 0, so add one to the
* former to arrive at the latter. Use smp_store_release so that this
* is ordered with the write above to base_crng.generation. Pairs with
* the smp_rmb() before the syscall in the vDSO code.
@@ -290,7 +290,7 @@ static void crng_reseed(struct work_struct *work)
* because the vDSO side only checks whether the value changed, without
* actually using or interpreting the value.
*/
- smp_store_release((unsigned long *)&__arch_get_k_vdso_rng_data()->generation, next_gen + 1);
+ smp_store_release((unsigned long *)&vdso_k_rng_data->generation, next_gen + 1);
#endif
if (!static_branch_likely(&crng_is_ready))
crng_init = CRNG_READY;
@@ -309,11 +309,11 @@ static void crng_reseed(struct work_struct *work)
* key value, at index 4, so the state should always be zeroed out
* immediately after using in order to maintain forward secrecy.
* If the state cannot be erased in a timely manner, then it is
- * safer to set the random_data parameter to &chacha_state[4] so
- * that this function overwrites it before returning.
+ * safer to set the random_data parameter to &chacha_state->x[4]
+ * so that this function overwrites it before returning.
*/
static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE],
- u32 chacha_state[CHACHA_STATE_WORDS],
+ struct chacha_state *chacha_state,
u8 *random_data, size_t random_data_len)
{
u8 first_block[CHACHA_BLOCK_SIZE];
@@ -321,8 +321,8 @@ static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE],
BUG_ON(random_data_len > 32);
chacha_init_consts(chacha_state);
- memcpy(&chacha_state[4], key, CHACHA_KEY_SIZE);
- memset(&chacha_state[12], 0, sizeof(u32) * 4);
+ memcpy(&chacha_state->x[4], key, CHACHA_KEY_SIZE);
+ memset(&chacha_state->x[12], 0, sizeof(u32) * 4);
chacha20_block(chacha_state, first_block);
memcpy(key, first_block, CHACHA_KEY_SIZE);
@@ -335,7 +335,7 @@ static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE],
* random data. It also returns up to 32 bytes on its own of random data
* that may be used; random_data_len may not be greater than 32.
*/
-static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS],
+static void crng_make_state(struct chacha_state *chacha_state,
u8 *random_data, size_t random_data_len)
{
unsigned long flags;
@@ -395,7 +395,7 @@ static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS],
static void _get_random_bytes(void *buf, size_t len)
{
- u32 chacha_state[CHACHA_STATE_WORDS];
+ struct chacha_state chacha_state;
u8 tmp[CHACHA_BLOCK_SIZE];
size_t first_block_len;
@@ -403,26 +403,26 @@ static void _get_random_bytes(void *buf, size_t len)
return;
first_block_len = min_t(size_t, 32, len);
- crng_make_state(chacha_state, buf, first_block_len);
+ crng_make_state(&chacha_state, buf, first_block_len);
len -= first_block_len;
buf += first_block_len;
while (len) {
if (len < CHACHA_BLOCK_SIZE) {
- chacha20_block(chacha_state, tmp);
+ chacha20_block(&chacha_state, tmp);
memcpy(buf, tmp, len);
memzero_explicit(tmp, sizeof(tmp));
break;
}
- chacha20_block(chacha_state, buf);
- if (unlikely(chacha_state[12] == 0))
- ++chacha_state[13];
+ chacha20_block(&chacha_state, buf);
+ if (unlikely(chacha_state.x[12] == 0))
+ ++chacha_state.x[13];
len -= CHACHA_BLOCK_SIZE;
buf += CHACHA_BLOCK_SIZE;
}
- memzero_explicit(chacha_state, sizeof(chacha_state));
+ chacha_zeroize_state(&chacha_state);
}
/*
@@ -441,7 +441,7 @@ EXPORT_SYMBOL(get_random_bytes);
static ssize_t get_random_bytes_user(struct iov_iter *iter)
{
- u32 chacha_state[CHACHA_STATE_WORDS];
+ struct chacha_state chacha_state;
u8 block[CHACHA_BLOCK_SIZE];
size_t ret = 0, copied;
@@ -453,21 +453,22 @@ static ssize_t get_random_bytes_user(struct iov_iter *iter)
* bytes, in case userspace causes copy_to_iter() below to sleep
* forever, so that we still retain forward secrecy in that case.
*/
- crng_make_state(chacha_state, (u8 *)&chacha_state[4], CHACHA_KEY_SIZE);
+ crng_make_state(&chacha_state, (u8 *)&chacha_state.x[4],
+ CHACHA_KEY_SIZE);
/*
* However, if we're doing a read of len <= 32, we don't need to
* use chacha_state after, so we can simply return those bytes to
* the user directly.
*/
if (iov_iter_count(iter) <= CHACHA_KEY_SIZE) {
- ret = copy_to_iter(&chacha_state[4], CHACHA_KEY_SIZE, iter);
+ ret = copy_to_iter(&chacha_state.x[4], CHACHA_KEY_SIZE, iter);
goto out_zero_chacha;
}
for (;;) {
- chacha20_block(chacha_state, block);
- if (unlikely(chacha_state[12] == 0))
- ++chacha_state[13];
+ chacha20_block(&chacha_state, block);
+ if (unlikely(chacha_state.x[12] == 0))
+ ++chacha_state.x[13];
copied = copy_to_iter(block, sizeof(block), iter);
ret += copied;
@@ -484,7 +485,7 @@ static ssize_t get_random_bytes_user(struct iov_iter *iter)
memzero_explicit(block, sizeof(block));
out_zero_chacha:
- memzero_explicit(chacha_state, sizeof(chacha_state));
+ chacha_zeroize_state(&chacha_state);
return ret ? ret : -EFAULT;
}
@@ -726,6 +727,7 @@ static void __cold _credit_init_bits(size_t bits)
static DECLARE_WORK(set_ready, crng_set_ready);
unsigned int new, orig, add;
unsigned long flags;
+ int m;
if (!bits)
return;
@@ -743,14 +745,14 @@ static void __cold _credit_init_bits(size_t bits)
queue_work(system_unbound_wq, &set_ready);
atomic_notifier_call_chain(&random_ready_notifier, 0, NULL);
#ifdef CONFIG_VDSO_GETRANDOM
- WRITE_ONCE(__arch_get_k_vdso_rng_data()->is_ready, true);
+ WRITE_ONCE(vdso_k_rng_data->is_ready, true);
#endif
wake_up_interruptible(&crng_init_wait);
kill_fasync(&fasync, SIGIO, POLL_IN);
pr_notice("crng init done\n");
- if (urandom_warning.missed)
- pr_notice("%d urandom warning(s) missed due to ratelimiting\n",
- urandom_warning.missed);
+ m = ratelimit_state_get_miss(&urandom_warning);
+ if (m)
+ pr_notice("%d urandom warning(s) missed due to ratelimiting\n", m);
} else if (orig < POOL_EARLY_BITS && new >= POOL_EARLY_BITS) {
spin_lock_irqsave(&base_crng.lock, flags);
/* Check if crng_init is CRNG_EMPTY, to avoid race with crng_reseed(). */
@@ -1311,9 +1313,9 @@ static void __cold try_to_generate_entropy(void)
while (!crng_ready() && !signal_pending(current)) {
/*
* Check !timer_pending() and then ensure that any previous callback has finished
- * executing by checking try_to_del_timer_sync(), before queueing the next one.
+ * executing by checking timer_delete_sync_try(), before queueing the next one.
*/
- if (!timer_pending(&stack->timer) && try_to_del_timer_sync(&stack->timer) >= 0) {
+ if (!timer_pending(&stack->timer) && timer_delete_sync_try(&stack->timer) >= 0) {
struct cpumask timer_cpus;
unsigned int num_cpus;
@@ -1352,8 +1354,8 @@ static void __cold try_to_generate_entropy(void)
}
mix_pool_bytes(&stack->entropy, sizeof(stack->entropy));
- del_timer_sync(&stack->timer);
- destroy_timer_on_stack(&stack->timer);
+ timer_delete_sync(&stack->timer);
+ timer_destroy_on_stack(&stack->timer);
}
@@ -1466,7 +1468,7 @@ static ssize_t urandom_read_iter(struct kiocb *kiocb, struct iov_iter *iter)
if (!crng_ready()) {
if (!ratelimit_disable && maxwarn <= 0)
- ++urandom_warning.missed;
+ ratelimit_state_inc_miss(&urandom_warning);
else if (ratelimit_disable || __ratelimit(&urandom_warning)) {
--maxwarn;
pr_notice("%s: uninitialized urandom read (%zu bytes read)\n",
@@ -1665,7 +1667,7 @@ static int proc_do_rointvec(const struct ctl_table *table, int write, void *buf,
return write ? 0 : proc_dointvec(table, 0, buf, lenp, ppos);
}
-static struct ctl_table random_table[] = {
+static const struct ctl_table random_table[] = {
{
.procname = "poolsize",
.data = &sysctl_poolsize,
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
index 0f8185e541ed..677bb5ac950a 100644
--- a/drivers/char/sonypi.c
+++ b/drivers/char/sonypi.c
@@ -37,6 +37,7 @@
#include <linux/kfifo.h>
#include <linux/platform_device.h>
#include <linux/gfp.h>
+#include <linux/string_choices.h>
#include <linux/uaccess.h>
#include <asm/io.h>
@@ -1268,12 +1269,12 @@ static void sonypi_display_info(void)
"compat = %s, mask = 0x%08lx, useinput = %s, acpi = %s\n",
sonypi_device.model,
verbose,
- fnkeyinit ? "on" : "off",
- camera ? "on" : "off",
- compat ? "on" : "off",
+ str_on_off(fnkeyinit),
+ str_on_off(camera),
+ str_on_off(compat),
mask,
- useinput ? "on" : "off",
- SONYPI_ACPI_ACTIVE ? "on" : "off");
+ str_on_off(useinput),
+ str_on_off(SONYPI_ACPI_ACTIVE));
printk(KERN_INFO "sonypi: enabled at irq=%d, port1=0x%x, port2=0x%x\n",
sonypi_device.irq,
sonypi_device.ioport1, sonypi_device.ioport2);
@@ -1467,7 +1468,7 @@ static struct platform_driver sonypi_driver = {
.pm = SONYPI_PM,
},
.probe = sonypi_probe,
- .remove_new = sonypi_remove,
+ .remove = sonypi_remove,
.shutdown = sonypi_shutdown,
};
diff --git a/drivers/char/tlclk.c b/drivers/char/tlclk.c
index 377bebf6c925..b381ea7e85d2 100644
--- a/drivers/char/tlclk.c
+++ b/drivers/char/tlclk.c
@@ -42,7 +42,7 @@
#include <linux/sysfs.h>
#include <linux/device.h>
#include <linux/miscdevice.h>
-#include <linux/platform_device.h>
+#include <linux/device/faux.h>
#include <asm/io.h> /* inb/outb */
#include <linux/uaccess.h>
@@ -742,7 +742,7 @@ static ssize_t store_reset (struct device *d,
static DEVICE_ATTR(reset, (S_IWUSR|S_IWGRP), NULL, store_reset);
-static struct attribute *tlclk_sysfs_entries[] = {
+static struct attribute *tlclk_attrs[] = {
&dev_attr_current_ref.attr,
&dev_attr_telclock_version.attr,
&dev_attr_alarms.attr,
@@ -766,13 +766,9 @@ static struct attribute *tlclk_sysfs_entries[] = {
&dev_attr_reset.attr,
NULL
};
+ATTRIBUTE_GROUPS(tlclk);
-static const struct attribute_group tlclk_attribute_group = {
- .name = NULL, /* put in device directory */
- .attrs = tlclk_sysfs_entries,
-};
-
-static struct platform_device *tlclk_device;
+static struct faux_device *tlclk_device;
static int __init tlclk_init(void)
{
@@ -817,24 +813,13 @@ static int __init tlclk_init(void)
goto out3;
}
- tlclk_device = platform_device_register_simple("telco_clock",
- -1, NULL, 0);
- if (IS_ERR(tlclk_device)) {
- printk(KERN_ERR "tlclk: platform_device_register failed.\n");
- ret = PTR_ERR(tlclk_device);
+ tlclk_device = faux_device_create_with_groups("telco_clock", NULL, NULL, tlclk_groups);
+ if (!tlclk_device) {
+ ret = -ENODEV;
goto out4;
}
- ret = sysfs_create_group(&tlclk_device->dev.kobj,
- &tlclk_attribute_group);
- if (ret) {
- printk(KERN_ERR "tlclk: failed to create sysfs device attributes.\n");
- goto out5;
- }
-
return 0;
-out5:
- platform_device_unregister(tlclk_device);
out4:
misc_deregister(&tlclk_miscdev);
out3:
@@ -848,13 +833,12 @@ out1:
static void __exit tlclk_cleanup(void)
{
- sysfs_remove_group(&tlclk_device->dev.kobj, &tlclk_attribute_group);
- platform_device_unregister(tlclk_device);
+ faux_device_destroy(tlclk_device);
misc_deregister(&tlclk_miscdev);
unregister_chrdev(tlclk_major, "telco_clock");
release_region(TLCLK_BASE, 8);
- del_timer_sync(&switchover_timer);
+ timer_delete_sync(&switchover_timer);
kfree(alarm_events);
}
@@ -872,7 +856,7 @@ static void switchover_timeout(struct timer_list *unused)
}
/* Alarm processing is done, wake up read task */
- del_timer(&switchover_timer);
+ timer_delete(&switchover_timer);
got_event = 1;
wake_up(&wq);
}
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index cf0be8a7939d..8a8f692b6088 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -29,10 +29,11 @@ if TCG_TPM
config TCG_TPM2_HMAC
bool "Use HMAC and encrypted transactions on the TPM bus"
- default X86_64
+ default n
select CRYPTO_ECDH
select CRYPTO_LIB_AESCFB
select CRYPTO_LIB_SHA256
+ select CRYPTO_LIB_UTILS
help
Setting this causes us to deploy a scheme which uses request
and response HMACs in addition to encryption for
@@ -162,7 +163,7 @@ config TCG_NSC
config TCG_ATMEL
tristate "Atmel TPM Interface"
- depends on PPC64 || HAS_IOPORT_MAP
+ depends on HAS_IOPORT_MAP
depends on HAS_IOPORT
help
If you have a TPM security chip from Atmel say Yes and it
@@ -189,6 +190,15 @@ config TCG_IBMVTPM
will be accessible from within Linux. To compile this driver
as a module, choose M here; the module will be called tpm_ibmvtpm.
+config TCG_LOONGSON
+ tristate "Loongson TPM Interface"
+ depends on MFD_LOONGSON_SE
+ help
+ If you want to make Loongson TPM support available, say Yes and
+ it will be accessible from within Linux. To compile this
+ driver as a module, choose M here; the module will be called
+ tpm_loongson.
+
config TCG_XEN
tristate "XEN TPM Interface"
depends on TCG_TPM && XEN
@@ -210,6 +220,15 @@ config TCG_CRB
from within Linux. To compile this driver as a module, choose
M here; the module will be called tpm_crb.
+config TCG_ARM_CRB_FFA
+ tristate "TPM CRB over Arm FF-A Transport"
+ depends on ARM_FFA_TRANSPORT && TCG_CRB
+ default TCG_CRB
+ help
+ If the Arm FF-A transport is used to access the TPM say Yes.
+ To compile this driver as a module, choose M here; the module
+ will be called tpm_crb_ffa.
+
config TCG_VTPM_PROXY
tristate "VTPM Proxy Interface"
depends on TCG_TPM
@@ -225,5 +244,15 @@ config TCG_FTPM_TEE
help
This driver proxies for firmware TPM running in TEE.
+config TCG_SVSM
+ tristate "SNP SVSM vTPM interface"
+ depends on AMD_MEM_ENCRYPT
+ help
+ This is a driver for the AMD SVSM vTPM protocol that a SEV-SNP guest
+ OS can use to discover and talk to a vTPM emulated by the Secure VM
+ Service Module (SVSM) in the guest context, but at a more privileged
+ level (usually VMPL0). To compile this driver as a module, choose M
+ here; the module will be called tpm_svsm.
+
source "drivers/char/tpm/st33zp24/Kconfig"
endif # TCG_TPM
diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
index 9bb142c75243..5b5cdc0d32e4 100644
--- a/drivers/char/tpm/Makefile
+++ b/drivers/char/tpm/Makefile
@@ -42,5 +42,8 @@ obj-$(CONFIG_TCG_IBMVTPM) += tpm_ibmvtpm.o
obj-$(CONFIG_TCG_TIS_ST33ZP24) += st33zp24/
obj-$(CONFIG_TCG_XEN) += xen-tpmfront.o
obj-$(CONFIG_TCG_CRB) += tpm_crb.o
+obj-$(CONFIG_TCG_ARM_CRB_FFA) += tpm_crb_ffa.o
obj-$(CONFIG_TCG_VTPM_PROXY) += tpm_vtpm_proxy.o
obj-$(CONFIG_TCG_FTPM_TEE) += tpm_ftpm_tee.o
+obj-$(CONFIG_TCG_SVSM) += tpm_svsm.o
+obj-$(CONFIG_TCG_LOONGSON) += tpm_loongson.o
diff --git a/drivers/char/tpm/eventlog/acpi.c b/drivers/char/tpm/eventlog/acpi.c
index 69533d0bfb51..cf02ec646f46 100644
--- a/drivers/char/tpm/eventlog/acpi.c
+++ b/drivers/char/tpm/eventlog/acpi.c
@@ -63,6 +63,11 @@ static bool tpm_is_tpm2_log(void *bios_event_log, u64 len)
return n == 0;
}
+static void tpm_bios_log_free(void *data)
+{
+ kvfree(data);
+}
+
/* read binary bios log */
int tpm_read_log_acpi(struct tpm_chip *chip)
{
@@ -136,7 +141,7 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
}
/* malloc EventLog space */
- log->bios_event_log = devm_kmalloc(&chip->dev, len, GFP_KERNEL);
+ log->bios_event_log = kvmalloc(len, GFP_KERNEL);
if (!log->bios_event_log)
return -ENOMEM;
@@ -161,10 +166,16 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
goto err;
}
+ ret = devm_add_action(&chip->dev, tpm_bios_log_free, log->bios_event_log);
+ if (ret) {
+ log->bios_event_log = NULL;
+ goto err;
+ }
+
return format;
err:
- devm_kfree(&chip->dev, log->bios_event_log);
+ tpm_bios_log_free(log->bios_event_log);
log->bios_event_log = NULL;
return ret;
}
diff --git a/drivers/char/tpm/eventlog/common.c b/drivers/char/tpm/eventlog/common.c
index 4c0bbba64ee5..691813d2a5a2 100644
--- a/drivers/char/tpm/eventlog/common.c
+++ b/drivers/char/tpm/eventlog/common.c
@@ -32,7 +32,7 @@ static int tpm_bios_measurements_open(struct inode *inode,
struct tpm_chip *chip;
inode_lock(inode);
- if (!inode->i_private) {
+ if (!inode->i_nlink) {
inode_unlock(inode);
return -ENODEV;
}
@@ -105,7 +105,7 @@ static int tpm_read_log(struct tpm_chip *chip)
void tpm_bios_log_setup(struct tpm_chip *chip)
{
const char *name = dev_name(&chip->dev);
- unsigned int cnt;
+ struct dentry *dentry;
int log_version;
int rc = 0;
@@ -117,14 +117,12 @@ void tpm_bios_log_setup(struct tpm_chip *chip)
return;
log_version = rc;
- cnt = 0;
- chip->bios_dir[cnt] = securityfs_create_dir(name, NULL);
+ chip->bios_dir = securityfs_create_dir(name, NULL);
/* NOTE: securityfs_create_dir can return ENODEV if securityfs is
* compiled out. The caller should ignore the ENODEV return code.
*/
- if (IS_ERR(chip->bios_dir[cnt]))
- goto err;
- cnt++;
+ if (IS_ERR(chip->bios_dir))
+ return;
chip->bin_log_seqops.chip = chip;
if (log_version == EFI_TCG2_EVENT_LOG_FORMAT_TCG_2)
@@ -135,14 +133,13 @@ void tpm_bios_log_setup(struct tpm_chip *chip)
&tpm1_binary_b_measurements_seqops;
- chip->bios_dir[cnt] =
+ dentry =
securityfs_create_file("binary_bios_measurements",
- 0440, chip->bios_dir[0],
+ 0440, chip->bios_dir,
(void *)&chip->bin_log_seqops,
&tpm_bios_measurements_ops);
- if (IS_ERR(chip->bios_dir[cnt]))
+ if (IS_ERR(dentry))
goto err;
- cnt++;
if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) {
@@ -150,42 +147,23 @@ void tpm_bios_log_setup(struct tpm_chip *chip)
chip->ascii_log_seqops.seqops =
&tpm1_ascii_b_measurements_seqops;
- chip->bios_dir[cnt] =
+ dentry =
securityfs_create_file("ascii_bios_measurements",
- 0440, chip->bios_dir[0],
+ 0440, chip->bios_dir,
(void *)&chip->ascii_log_seqops,
&tpm_bios_measurements_ops);
- if (IS_ERR(chip->bios_dir[cnt]))
+ if (IS_ERR(dentry))
goto err;
- cnt++;
}
return;
err:
- chip->bios_dir[cnt] = NULL;
tpm_bios_log_teardown(chip);
return;
}
void tpm_bios_log_teardown(struct tpm_chip *chip)
{
- int i;
- struct inode *inode;
-
- /* securityfs_remove currently doesn't take care of handling sync
- * between removal and opening of pseudo files. To handle this, a
- * workaround is added by making i_private = NULL here during removal
- * and to check it during open(), both within inode_lock()/unlock().
- * This design ensures that open() either safely gets kref or fails.
- */
- for (i = (TPM_NUM_EVENT_LOG_FILES - 1); i >= 0; i--) {
- if (chip->bios_dir[i]) {
- inode = d_inode(chip->bios_dir[i]);
- inode_lock(inode);
- inode->i_private = NULL;
- inode_unlock(inode);
- securityfs_remove(chip->bios_dir[i]);
- }
- }
+ securityfs_remove(chip->bios_dir);
}
diff --git a/drivers/char/tpm/eventlog/of.c b/drivers/char/tpm/eventlog/of.c
index 930fe43d5daf..92cec9722ee4 100644
--- a/drivers/char/tpm/eventlog/of.c
+++ b/drivers/char/tpm/eventlog/of.c
@@ -24,16 +24,10 @@
static int tpm_read_log_memory_region(struct tpm_chip *chip)
{
- struct device_node *node;
struct resource res;
int rc;
- node = of_parse_phandle(chip->dev.parent->of_node, "memory-region", 0);
- if (!node)
- return -ENODEV;
-
- rc = of_address_to_resource(node, 0, &res);
- of_node_put(node);
+ rc = of_reserved_mem_region_to_resource(chip->dev.parent->of_node, 0, &res);
if (rc)
return rc;
diff --git a/drivers/char/tpm/eventlog/tpm1.c b/drivers/char/tpm/eventlog/tpm1.c
index 12ee42a31c71..e7913b2853d5 100644
--- a/drivers/char/tpm/eventlog/tpm1.c
+++ b/drivers/char/tpm/eventlog/tpm1.c
@@ -257,11 +257,8 @@ static int tpm1_ascii_bios_measurements_show(struct seq_file *m, void *v)
(unsigned char *)(v + sizeof(struct tcpa_event));
eventname = kmalloc(MAX_TEXT_EVENT, GFP_KERNEL);
- if (!eventname) {
- printk(KERN_ERR "%s: ERROR - No Memory for event name\n ",
- __func__);
- return -EFAULT;
- }
+ if (!eventname)
+ return -ENOMEM;
/* 1st: PCR */
seq_printf(m, "%2d ", do_endian_conversion(event->pcr_index));
diff --git a/drivers/char/tpm/st33zp24/st33zp24.c b/drivers/char/tpm/st33zp24/st33zp24.c
index c0771980bc2f..2ed7815e4899 100644
--- a/drivers/char/tpm/st33zp24/st33zp24.c
+++ b/drivers/char/tpm/st33zp24/st33zp24.c
@@ -300,7 +300,7 @@ static irqreturn_t tpm_ioserirq_handler(int irq, void *dev_id)
* send TPM commands through the I2C bus.
*/
static int st33zp24_send(struct tpm_chip *chip, unsigned char *buf,
- size_t len)
+ size_t bufsiz, size_t len)
{
struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
u32 status, i, size, ordinal;
diff --git a/drivers/char/tpm/tpm-buf.c b/drivers/char/tpm/tpm-buf.c
index cad0048bcc3c..dc882fc9fa9e 100644
--- a/drivers/char/tpm/tpm-buf.c
+++ b/drivers/char/tpm/tpm-buf.c
@@ -147,6 +147,26 @@ void tpm_buf_append_u32(struct tpm_buf *buf, const u32 value)
EXPORT_SYMBOL_GPL(tpm_buf_append_u32);
/**
+ * tpm_buf_append_handle() - Add a handle
+ * @chip: &tpm_chip instance
+ * @buf: &tpm_buf instance
+ * @handle: a TPM object handle
+ *
+ * Add a handle to the buffer, and increase the count tracking the number of
+ * handles in the command buffer. Works only for command buffers.
+ */
+void tpm_buf_append_handle(struct tpm_chip *chip, struct tpm_buf *buf, u32 handle)
+{
+ if (buf->flags & TPM_BUF_TPM2B) {
+ dev_err(&chip->dev, "Invalid buffer type (TPM2B)\n");
+ return;
+ }
+
+ tpm_buf_append_u32(buf, handle);
+ buf->handles++;
+}
+
+/**
* tpm_buf_read() - Read from a TPM buffer
* @buf: &tpm_buf instance
* @offset: offset within the buffer
@@ -181,7 +201,7 @@ static void tpm_buf_read(struct tpm_buf *buf, off_t *offset, size_t count, void
*/
u8 tpm_buf_read_u8(struct tpm_buf *buf, off_t *offset)
{
- u8 value;
+ u8 value = 0;
tpm_buf_read(buf, offset, sizeof(value), &value);
@@ -198,7 +218,7 @@ EXPORT_SYMBOL_GPL(tpm_buf_read_u8);
*/
u16 tpm_buf_read_u16(struct tpm_buf *buf, off_t *offset)
{
- u16 value;
+ u16 value = 0;
tpm_buf_read(buf, offset, sizeof(value), &value);
@@ -215,7 +235,7 @@ EXPORT_SYMBOL_GPL(tpm_buf_read_u16);
*/
u32 tpm_buf_read_u32(struct tpm_buf *buf, off_t *offset)
{
- u32 value;
+ u32 value = 0;
tpm_buf_read(buf, offset, sizeof(value), &value);
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index 854546000c92..e25daf2396d3 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -168,6 +168,11 @@ int tpm_try_get_ops(struct tpm_chip *chip)
goto out_ops;
mutex_lock(&chip->tpm_mutex);
+
+ /* tmp_chip_start may issue IO that is denied while suspended */
+ if (chip->flags & TPM_CHIP_FLAG_SUSPENDED)
+ goto out_lock;
+
rc = tpm_chip_start(chip);
if (rc)
goto out_lock;
@@ -300,6 +305,7 @@ int tpm_class_shutdown(struct device *dev)
down_write(&chip->ops_sem);
if (chip->flags & TPM_CHIP_FLAG_TPM2) {
if (!tpm_chip_start(chip)) {
+ tpm2_end_auth_session(chip);
tpm2_shutdown(chip, TPM2_SU_CLEAR);
tpm_chip_stop(chip);
}
@@ -525,10 +531,6 @@ static int tpm_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait)
{
struct tpm_chip *chip = container_of(rng, struct tpm_chip, hwrng);
- /* Give back zero bytes, as TPM chip has not yet fully resumed: */
- if (chip->flags & TPM_CHIP_FLAG_SUSPENDED)
- return 0;
-
return tpm_get_random(chip, data, max);
}
@@ -674,6 +676,16 @@ EXPORT_SYMBOL_GPL(tpm_chip_register);
*/
void tpm_chip_unregister(struct tpm_chip *chip)
{
+#ifdef CONFIG_TCG_TPM2_HMAC
+ int rc;
+
+ rc = tpm_try_get_ops(chip);
+ if (!rc) {
+ tpm2_end_auth_session(chip);
+ tpm_put_ops(chip);
+ }
+#endif
+
tpm_del_legacy_sysfs(chip);
if (tpm_is_hwrng_enabled(chip))
hwrng_unregister(&chip->hwrng);
diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c
index c3fbbf4d3db7..f2a5e09257dd 100644
--- a/drivers/char/tpm/tpm-dev-common.c
+++ b/drivers/char/tpm/tpm-dev-common.c
@@ -27,6 +27,9 @@ static ssize_t tpm_dev_transmit(struct tpm_chip *chip, struct tpm_space *space,
struct tpm_header *header = (void *)buf;
ssize_t ret, len;
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ tpm2_end_auth_session(chip);
+
ret = tpm2_prepare_space(chip, space, buf, bufsiz);
/* If the command is not implemented by the TPM, synthesize a
* response with a TPM2_RC_COMMAND_CODE return for user-space.
@@ -88,7 +91,7 @@ out:
static void user_reader_timeout(struct timer_list *t)
{
- struct file_priv *priv = from_timer(priv, t, user_read_timer);
+ struct file_priv *priv = timer_container_of(priv, t, user_read_timer);
pr_warn("TPM user space timeout is deprecated (pid=%d)\n",
task_tgid_nr(current));
@@ -157,7 +160,7 @@ ssize_t tpm_common_read(struct file *file, char __user *buf,
out:
if (!priv->response_length) {
*off = 0;
- del_timer_sync(&priv->user_read_timer);
+ timer_delete_sync(&priv->user_read_timer);
flush_work(&priv->timeout_work);
}
mutex_unlock(&priv->buffer_mutex);
@@ -264,7 +267,7 @@ __poll_t tpm_common_poll(struct file *file, poll_table *wait)
void tpm_common_release(struct file *file, struct file_priv *priv)
{
flush_work(&priv->async_work);
- del_timer_sync(&priv->user_read_timer);
+ timer_delete_sync(&priv->user_read_timer);
flush_work(&priv->timeout_work);
file->private_data = NULL;
priv->response_length = 0;
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index 5da134f12c9a..c9f173001d0e 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -52,12 +52,43 @@ MODULE_PARM_DESC(suspend_pcr,
unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal)
{
if (chip->flags & TPM_CHIP_FLAG_TPM2)
- return tpm2_calc_ordinal_duration(chip, ordinal);
+ return tpm2_calc_ordinal_duration(ordinal);
else
return tpm1_calc_ordinal_duration(chip, ordinal);
}
EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration);
+static void tpm_chip_cancel(struct tpm_chip *chip)
+{
+ if (!chip->ops->cancel)
+ return;
+
+ chip->ops->cancel(chip);
+}
+
+static u8 tpm_chip_status(struct tpm_chip *chip)
+{
+ if (!chip->ops->status)
+ return 0;
+
+ return chip->ops->status(chip);
+}
+
+static bool tpm_chip_req_canceled(struct tpm_chip *chip, u8 status)
+{
+ if (!chip->ops->req_canceled)
+ return false;
+
+ return chip->ops->req_canceled(chip, status);
+}
+
+static bool tpm_transmit_completed(u8 status, struct tpm_chip *chip)
+{
+ u8 status_masked = status & chip->ops->req_complete_mask;
+
+ return status_masked == chip->ops->req_complete_val;
+}
+
static ssize_t tpm_try_transmit(struct tpm_chip *chip, void *buf, size_t bufsiz)
{
struct tpm_header *header = buf;
@@ -82,7 +113,7 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip, void *buf, size_t bufsiz)
return -E2BIG;
}
- rc = chip->ops->send(chip, buf, count);
+ rc = chip->ops->send(chip, buf, bufsiz, count);
if (rc < 0) {
if (rc != -EPIPE)
dev_err(&chip->dev,
@@ -90,8 +121,19 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip, void *buf, size_t bufsiz)
return rc;
}
- /* A sanity check. send() should just return zero on success e.g.
- * not the command length.
+ /*
+ * Synchronous devices return the response directly during the send()
+ * call in the same buffer.
+ */
+ if (chip->flags & TPM_CHIP_FLAG_SYNC) {
+ len = rc;
+ rc = 0;
+ goto out_sync;
+ }
+
+ /*
+ * A sanity check. send() of asynchronous devices should just return
+ * zero on success e.g. not the command length.
*/
if (rc > 0) {
dev_warn(&chip->dev,
@@ -104,12 +146,11 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip, void *buf, size_t bufsiz)
stop = jiffies + tpm_calc_ordinal_duration(chip, ordinal);
do {
- u8 status = chip->ops->status(chip);
- if ((status & chip->ops->req_complete_mask) ==
- chip->ops->req_complete_val)
+ u8 status = tpm_chip_status(chip);
+ if (tpm_transmit_completed(status, chip))
goto out_recv;
- if (chip->ops->req_canceled(chip, status)) {
+ if (tpm_chip_req_canceled(chip, status)) {
dev_err(&chip->dev, "Operation Canceled\n");
return -ECANCELED;
}
@@ -118,7 +159,14 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip, void *buf, size_t bufsiz)
rmb();
} while (time_before(jiffies, stop));
- chip->ops->cancel(chip);
+ /*
+ * Check for completion one more time, just in case the device reported
+ * it while the driver was sleeping in the busy loop above.
+ */
+ if (tpm_transmit_completed(tpm_chip_status(chip), chip))
+ goto out_recv;
+
+ tpm_chip_cancel(chip);
dev_err(&chip->dev, "Operation Timed out\n");
return -ETIME;
@@ -127,7 +175,10 @@ out_recv:
if (len < 0) {
rc = len;
dev_err(&chip->dev, "tpm_transmit: tpm_recv: error %d\n", rc);
- } else if (len < TPM_HEADER_SIZE || len != be32_to_cpu(header->length))
+ return rc;
+ }
+out_sync:
+ if (len < TPM_HEADER_SIZE || len != be32_to_cpu(header->length))
rc = -EFAULT;
return rc ? rc : len;
@@ -370,6 +421,13 @@ int tpm_pm_suspend(struct device *dev)
if (!chip)
return -ENODEV;
+ rc = tpm_try_get_ops(chip);
+ if (rc) {
+ /* Can be safely set out of locks, as no action cannot race: */
+ chip->flags |= TPM_CHIP_FLAG_SUSPENDED;
+ goto out;
+ }
+
if (chip->flags & TPM_CHIP_FLAG_ALWAYS_POWERED)
goto suspended;
@@ -377,19 +435,19 @@ int tpm_pm_suspend(struct device *dev)
!pm_suspend_via_firmware())
goto suspended;
- rc = tpm_try_get_ops(chip);
- if (!rc) {
- if (chip->flags & TPM_CHIP_FLAG_TPM2)
- tpm2_shutdown(chip, TPM2_SU_STATE);
- else
- rc = tpm1_pm_suspend(chip, tpm_suspend_pcr);
-
- tpm_put_ops(chip);
+ if (chip->flags & TPM_CHIP_FLAG_TPM2) {
+ tpm2_end_auth_session(chip);
+ tpm2_shutdown(chip, TPM2_SU_STATE);
+ goto suspended;
}
+ rc = tpm1_pm_suspend(chip, tpm_suspend_pcr);
+
suspended:
chip->flags |= TPM_CHIP_FLAG_SUSPENDED;
+ tpm_put_ops(chip);
+out:
if (rc)
dev_err(dev, "Ignoring error %d while suspending\n", rc);
return 0;
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 7bb87fa5f7a1..2726bd38e5ac 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -299,7 +299,7 @@ ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id,
ssize_t tpm2_get_pcr_allocation(struct tpm_chip *chip);
int tpm2_auto_startup(struct tpm_chip *chip);
void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type);
-unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal);
+unsigned long tpm2_calc_ordinal_duration(u32 ordinal);
int tpm2_probe(struct tpm_chip *chip);
int tpm2_get_cc_attrs_tbl(struct tpm_chip *chip);
int tpm2_find_cc(struct tpm_chip *chip, u32 cc);
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index 1e856259219e..7d77f6fbc152 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -14,6 +14,10 @@
#include "tpm.h"
#include <crypto/hash_info.h>
+static bool disable_pcr_integrity;
+module_param(disable_pcr_integrity, bool, 0444);
+MODULE_PARM_DESC(disable_pcr_integrity, "Disable integrity protection of TPM2_PCR_Extend");
+
static struct tpm2_hash tpm2_hash_map[] = {
{HASH_ALGO_SHA1, TPM_ALG_SHA1},
{HASH_ALGO_SHA256, TPM_ALG_SHA256},
@@ -24,120 +28,57 @@ static struct tpm2_hash tpm2_hash_map[] = {
int tpm2_get_timeouts(struct tpm_chip *chip)
{
- /* Fixed timeouts for TPM2 */
chip->timeout_a = msecs_to_jiffies(TPM2_TIMEOUT_A);
chip->timeout_b = msecs_to_jiffies(TPM2_TIMEOUT_B);
chip->timeout_c = msecs_to_jiffies(TPM2_TIMEOUT_C);
chip->timeout_d = msecs_to_jiffies(TPM2_TIMEOUT_D);
-
- /* PTP spec timeouts */
- chip->duration[TPM_SHORT] = msecs_to_jiffies(TPM2_DURATION_SHORT);
- chip->duration[TPM_MEDIUM] = msecs_to_jiffies(TPM2_DURATION_MEDIUM);
- chip->duration[TPM_LONG] = msecs_to_jiffies(TPM2_DURATION_LONG);
-
- /* Key creation commands long timeouts */
- chip->duration[TPM_LONG_LONG] =
- msecs_to_jiffies(TPM2_DURATION_LONG_LONG);
-
chip->flags |= TPM_CHIP_FLAG_HAVE_TIMEOUTS;
-
return 0;
}
-/**
- * tpm2_ordinal_duration_index() - returns an index to the chip duration table
- * @ordinal: TPM command ordinal.
- *
- * The function returns an index to the chip duration table
- * (enum tpm_duration), that describes the maximum amount of
- * time the chip could take to return the result for a particular ordinal.
- *
- * The values of the MEDIUM, and LONG durations are taken
- * from the PC Client Profile (PTP) specification (750, 2000 msec)
- *
- * LONG_LONG is for commands that generates keys which empirically takes
- * a longer time on some systems.
- *
- * Return:
- * * TPM_MEDIUM
- * * TPM_LONG
- * * TPM_LONG_LONG
- * * TPM_UNDEFINED
+/*
+ * Contains the maximum durations in milliseconds for TPM2 commands.
*/
-static u8 tpm2_ordinal_duration_index(u32 ordinal)
-{
- switch (ordinal) {
- /* Startup */
- case TPM2_CC_STARTUP: /* 144 */
- return TPM_MEDIUM;
-
- case TPM2_CC_SELF_TEST: /* 143 */
- return TPM_LONG;
-
- case TPM2_CC_GET_RANDOM: /* 17B */
- return TPM_LONG;
-
- case TPM2_CC_SEQUENCE_UPDATE: /* 15C */
- return TPM_MEDIUM;
- case TPM2_CC_SEQUENCE_COMPLETE: /* 13E */
- return TPM_MEDIUM;
- case TPM2_CC_EVENT_SEQUENCE_COMPLETE: /* 185 */
- return TPM_MEDIUM;
- case TPM2_CC_HASH_SEQUENCE_START: /* 186 */
- return TPM_MEDIUM;
-
- case TPM2_CC_VERIFY_SIGNATURE: /* 177 */
- return TPM_LONG_LONG;
-
- case TPM2_CC_PCR_EXTEND: /* 182 */
- return TPM_MEDIUM;
-
- case TPM2_CC_HIERARCHY_CONTROL: /* 121 */
- return TPM_LONG;
- case TPM2_CC_HIERARCHY_CHANGE_AUTH: /* 129 */
- return TPM_LONG;
-
- case TPM2_CC_GET_CAPABILITY: /* 17A */
- return TPM_MEDIUM;
-
- case TPM2_CC_NV_READ: /* 14E */
- return TPM_LONG;
-
- case TPM2_CC_CREATE_PRIMARY: /* 131 */
- return TPM_LONG_LONG;
- case TPM2_CC_CREATE: /* 153 */
- return TPM_LONG_LONG;
- case TPM2_CC_CREATE_LOADED: /* 191 */
- return TPM_LONG_LONG;
-
- default:
- return TPM_UNDEFINED;
- }
-}
+static const struct {
+ unsigned long ordinal;
+ unsigned long duration;
+} tpm2_ordinal_duration_map[] = {
+ {TPM2_CC_STARTUP, 750},
+ {TPM2_CC_SELF_TEST, 3000},
+ {TPM2_CC_GET_RANDOM, 2000},
+ {TPM2_CC_SEQUENCE_UPDATE, 750},
+ {TPM2_CC_SEQUENCE_COMPLETE, 750},
+ {TPM2_CC_EVENT_SEQUENCE_COMPLETE, 750},
+ {TPM2_CC_HASH_SEQUENCE_START, 750},
+ {TPM2_CC_VERIFY_SIGNATURE, 30000},
+ {TPM2_CC_PCR_EXTEND, 750},
+ {TPM2_CC_HIERARCHY_CONTROL, 2000},
+ {TPM2_CC_HIERARCHY_CHANGE_AUTH, 2000},
+ {TPM2_CC_GET_CAPABILITY, 750},
+ {TPM2_CC_NV_READ, 2000},
+ {TPM2_CC_CREATE_PRIMARY, 30000},
+ {TPM2_CC_CREATE, 30000},
+ {TPM2_CC_CREATE_LOADED, 30000},
+};
/**
- * tpm2_calc_ordinal_duration() - calculate the maximum command duration
- * @chip: TPM chip to use.
+ * tpm2_calc_ordinal_duration() - Calculate the maximum command duration
* @ordinal: TPM command ordinal.
*
- * The function returns the maximum amount of time the chip could take
- * to return the result for a particular ordinal in jiffies.
- *
- * Return: A maximal duration time for an ordinal in jiffies.
+ * Returns the maximum amount of time the chip is expected by kernel to
+ * take in jiffies.
*/
-unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal)
+unsigned long tpm2_calc_ordinal_duration(u32 ordinal)
{
- unsigned int index;
+ int i;
- index = tpm2_ordinal_duration_index(ordinal);
+ for (i = 0; i < ARRAY_SIZE(tpm2_ordinal_duration_map); i++)
+ if (ordinal == tpm2_ordinal_duration_map[i].ordinal)
+ return msecs_to_jiffies(tpm2_ordinal_duration_map[i].duration);
- if (index != TPM_UNDEFINED)
- return chip->duration[index];
- else
- return msecs_to_jiffies(TPM2_DURATION_DEFAULT);
+ return msecs_to_jiffies(TPM2_DURATION_DEFAULT);
}
-
struct tpm2_pcr_read_out {
__be32 update_cnt;
__be32 pcr_selects_cnt;
@@ -232,18 +173,26 @@ int tpm2_pcr_extend(struct tpm_chip *chip, u32 pcr_idx,
int rc;
int i;
- rc = tpm2_start_auth_session(chip);
- if (rc)
- return rc;
+ if (!disable_pcr_integrity) {
+ rc = tpm2_start_auth_session(chip);
+ if (rc)
+ return rc;
+ }
rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_PCR_EXTEND);
if (rc) {
- tpm2_end_auth_session(chip);
+ if (!disable_pcr_integrity)
+ tpm2_end_auth_session(chip);
return rc;
}
- tpm_buf_append_name(chip, &buf, pcr_idx, NULL);
- tpm_buf_append_hmac_session(chip, &buf, 0, NULL, 0);
+ if (!disable_pcr_integrity) {
+ tpm_buf_append_name(chip, &buf, pcr_idx, NULL);
+ tpm_buf_append_hmac_session(chip, &buf, 0, NULL, 0);
+ } else {
+ tpm_buf_append_handle(chip, &buf, pcr_idx);
+ tpm_buf_append_auth(chip, &buf, 0, NULL, 0);
+ }
tpm_buf_append_u32(&buf, chip->nr_allocated_banks);
@@ -253,9 +202,11 @@ int tpm2_pcr_extend(struct tpm_chip *chip, u32 pcr_idx,
chip->allocated_banks[i].digest_size);
}
- tpm_buf_fill_hmac_session(chip, &buf);
+ if (!disable_pcr_integrity)
+ tpm_buf_fill_hmac_session(chip, &buf);
rc = tpm_transmit_cmd(chip, &buf, 0, "attempting extend a PCR value");
- rc = tpm_buf_check_hmac_response(chip, &buf, rc);
+ if (!disable_pcr_integrity)
+ rc = tpm_buf_check_hmac_response(chip, &buf, rc);
tpm_buf_destroy(&buf);
@@ -345,7 +296,6 @@ int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max)
} while (retries-- && total < max);
tpm_buf_destroy(&buf);
- tpm2_end_auth_session(chip);
return total ? total : -EIO;
out:
diff --git a/drivers/char/tpm/tpm2-sessions.c b/drivers/char/tpm/tpm2-sessions.c
index 511c67061728..6d03c224e6b2 100644
--- a/drivers/char/tpm/tpm2-sessions.c
+++ b/drivers/char/tpm/tpm2-sessions.c
@@ -40,11 +40,6 @@
*
* These are the usage functions:
*
- * tpm2_start_auth_session() which allocates the opaque auth structure
- * and gets a session from the TPM. This must be called before
- * any of the following functions. The session is protected by a
- * session_key which is derived from a random salt value
- * encrypted to the NULL seed.
* tpm2_end_auth_session() kills the session and frees the resources.
* Under normal operation this function is done by
* tpm_buf_check_hmac_response(), so this is only to be used on
@@ -74,8 +69,8 @@
#include <linux/unaligned.h>
#include <crypto/kpp.h>
#include <crypto/ecdh.h>
-#include <crypto/hash.h>
-#include <crypto/hmac.h>
+#include <crypto/sha2.h>
+#include <crypto/utils.h>
/* maximum number of names the TPM must remember for authorization */
#define AUTH_MAX_NAMES 3
@@ -237,9 +232,7 @@ void tpm_buf_append_name(struct tpm_chip *chip, struct tpm_buf *buf,
#endif
if (!tpm2_chip_auth(chip)) {
- tpm_buf_append_u32(buf, handle);
- /* count the number of handles in the upper bits of flags */
- buf->handles++;
+ tpm_buf_append_handle(chip, buf, handle);
return;
}
@@ -272,6 +265,31 @@ void tpm_buf_append_name(struct tpm_chip *chip, struct tpm_buf *buf,
}
EXPORT_SYMBOL_GPL(tpm_buf_append_name);
+void tpm_buf_append_auth(struct tpm_chip *chip, struct tpm_buf *buf,
+ u8 attributes, u8 *passphrase, int passphrase_len)
+{
+ /* offset tells us where the sessions area begins */
+ int offset = buf->handles * 4 + TPM_HEADER_SIZE;
+ u32 len = 9 + passphrase_len;
+
+ if (tpm_buf_length(buf) != offset) {
+ /* not the first session so update the existing length */
+ len += get_unaligned_be32(&buf->data[offset]);
+ put_unaligned_be32(len, &buf->data[offset]);
+ } else {
+ tpm_buf_append_u32(buf, len);
+ }
+ /* auth handle */
+ tpm_buf_append_u32(buf, TPM2_RS_PW);
+ /* nonce */
+ tpm_buf_append_u16(buf, 0);
+ /* attributes */
+ tpm_buf_append_u8(buf, 0);
+ /* passphrase */
+ tpm_buf_append_u16(buf, passphrase_len);
+ tpm_buf_append(buf, passphrase, passphrase_len);
+}
+
/**
* tpm_buf_append_hmac_session() - Append a TPM session element
* @chip: the TPM chip structure
@@ -309,30 +327,15 @@ void tpm_buf_append_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf,
#endif
if (!tpm2_chip_auth(chip)) {
- /* offset tells us where the sessions area begins */
- int offset = buf->handles * 4 + TPM_HEADER_SIZE;
- u32 len = 9 + passphrase_len;
-
- if (tpm_buf_length(buf) != offset) {
- /* not the first session so update the existing length */
- len += get_unaligned_be32(&buf->data[offset]);
- put_unaligned_be32(len, &buf->data[offset]);
- } else {
- tpm_buf_append_u32(buf, len);
- }
- /* auth handle */
- tpm_buf_append_u32(buf, TPM2_RS_PW);
- /* nonce */
- tpm_buf_append_u16(buf, 0);
- /* attributes */
- tpm_buf_append_u8(buf, 0);
- /* passphrase */
- tpm_buf_append_u16(buf, passphrase_len);
- tpm_buf_append(buf, passphrase, passphrase_len);
+ tpm_buf_append_auth(chip, buf, attributes, passphrase,
+ passphrase_len);
return;
}
#ifdef CONFIG_TCG_TPM2_HMAC
+ /* The first write to /dev/tpm{rm0} will flush the session. */
+ attributes |= TPM2_SA_CONTINUE_SESSION;
+
/*
* The Architecture Guide requires us to strip trailing zeros
* before computing the HMAC
@@ -382,51 +385,6 @@ static int tpm2_create_primary(struct tpm_chip *chip, u32 hierarchy,
u32 *handle, u8 *name);
/*
- * It turns out the crypto hmac(sha256) is hard for us to consume
- * because it assumes a fixed key and the TPM seems to change the key
- * on every operation, so we weld the hmac init and final functions in
- * here to give it the same usage characteristics as a regular hash
- */
-static void tpm2_hmac_init(struct sha256_state *sctx, u8 *key, u32 key_len)
-{
- u8 pad[SHA256_BLOCK_SIZE];
- int i;
-
- sha256_init(sctx);
- for (i = 0; i < sizeof(pad); i++) {
- if (i < key_len)
- pad[i] = key[i];
- else
- pad[i] = 0;
- pad[i] ^= HMAC_IPAD_VALUE;
- }
- sha256_update(sctx, pad, sizeof(pad));
-}
-
-static void tpm2_hmac_final(struct sha256_state *sctx, u8 *key, u32 key_len,
- u8 *out)
-{
- u8 pad[SHA256_BLOCK_SIZE];
- int i;
-
- for (i = 0; i < sizeof(pad); i++) {
- if (i < key_len)
- pad[i] = key[i];
- else
- pad[i] = 0;
- pad[i] ^= HMAC_OPAD_VALUE;
- }
-
- /* collect the final hash; use out as temporary storage */
- sha256_final(sctx, out);
-
- sha256_init(sctx);
- sha256_update(sctx, pad, sizeof(pad));
- sha256_update(sctx, out, SHA256_DIGEST_SIZE);
- sha256_final(sctx, out);
-}
-
-/*
* assume hash sha256 and nonces u, v of size SHA256_DIGEST_SIZE but
* otherwise standard tpm2_KDFa. Note output is in bytes not bits.
*/
@@ -437,16 +395,16 @@ static void tpm2_KDFa(u8 *key, u32 key_len, const char *label, u8 *u,
const __be32 bits = cpu_to_be32(bytes * 8);
while (bytes > 0) {
- struct sha256_state sctx;
+ struct hmac_sha256_ctx hctx;
__be32 c = cpu_to_be32(counter);
- tpm2_hmac_init(&sctx, key, key_len);
- sha256_update(&sctx, (u8 *)&c, sizeof(c));
- sha256_update(&sctx, label, strlen(label)+1);
- sha256_update(&sctx, u, SHA256_DIGEST_SIZE);
- sha256_update(&sctx, v, SHA256_DIGEST_SIZE);
- sha256_update(&sctx, (u8 *)&bits, sizeof(bits));
- tpm2_hmac_final(&sctx, key, key_len, out);
+ hmac_sha256_init_usingrawkey(&hctx, key, key_len);
+ hmac_sha256_update(&hctx, (u8 *)&c, sizeof(c));
+ hmac_sha256_update(&hctx, label, strlen(label) + 1);
+ hmac_sha256_update(&hctx, u, SHA256_DIGEST_SIZE);
+ hmac_sha256_update(&hctx, v, SHA256_DIGEST_SIZE);
+ hmac_sha256_update(&hctx, (u8 *)&bits, sizeof(bits));
+ hmac_sha256_final(&hctx, out);
bytes -= SHA256_DIGEST_SIZE;
counter++;
@@ -464,7 +422,7 @@ static void tpm2_KDFa(u8 *key, u32 key_len, const char *label, u8 *u,
static void tpm2_KDFe(u8 z[EC_PT_SZ], const char *str, u8 *pt_u, u8 *pt_v,
u8 *out)
{
- struct sha256_state sctx;
+ struct sha256_ctx sctx;
/*
* this should be an iterative counter, but because we know
* we're only taking 32 bytes for the point using a sha256
@@ -484,7 +442,8 @@ static void tpm2_KDFe(u8 z[EC_PT_SZ], const char *str, u8 *pt_u, u8 *pt_v,
sha256_final(&sctx, out);
}
-static void tpm_buf_append_salt(struct tpm_buf *buf, struct tpm_chip *chip)
+static void tpm_buf_append_salt(struct tpm_buf *buf, struct tpm_chip *chip,
+ struct tpm2_auth *auth)
{
struct crypto_kpp *kpp;
struct kpp_request *req;
@@ -543,7 +502,7 @@ static void tpm_buf_append_salt(struct tpm_buf *buf, struct tpm_chip *chip)
sg_set_buf(&s[0], chip->null_ec_key_x, EC_PT_SZ);
sg_set_buf(&s[1], chip->null_ec_key_y, EC_PT_SZ);
kpp_request_set_input(req, s, EC_PT_SZ*2);
- sg_init_one(d, chip->auth->salt, EC_PT_SZ);
+ sg_init_one(d, auth->salt, EC_PT_SZ);
kpp_request_set_output(req, d, EC_PT_SZ);
crypto_kpp_compute_shared_secret(req);
kpp_request_free(req);
@@ -554,8 +513,7 @@ static void tpm_buf_append_salt(struct tpm_buf *buf, struct tpm_chip *chip)
* This works because KDFe fully consumes the secret before it
* writes the salt
*/
- tpm2_KDFe(chip->auth->salt, "SECRET", x, chip->null_ec_key_x,
- chip->auth->salt);
+ tpm2_KDFe(auth->salt, "SECRET", x, chip->null_ec_key_x, auth->salt);
out:
crypto_free_kpp(kpp);
@@ -589,7 +547,8 @@ void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf)
u8 *hmac = NULL;
u32 attrs;
u8 cphash[SHA256_DIGEST_SIZE];
- struct sha256_state sctx;
+ struct sha256_ctx sctx;
+ struct hmac_sha256_ctx hctx;
if (!auth)
return;
@@ -701,14 +660,14 @@ void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf)
sha256_final(&sctx, cphash);
/* now calculate the hmac */
- tpm2_hmac_init(&sctx, auth->session_key, sizeof(auth->session_key)
- + auth->passphrase_len);
- sha256_update(&sctx, cphash, sizeof(cphash));
- sha256_update(&sctx, auth->our_nonce, sizeof(auth->our_nonce));
- sha256_update(&sctx, auth->tpm_nonce, sizeof(auth->tpm_nonce));
- sha256_update(&sctx, &auth->attrs, 1);
- tpm2_hmac_final(&sctx, auth->session_key, sizeof(auth->session_key)
- + auth->passphrase_len, hmac);
+ hmac_sha256_init_usingrawkey(&hctx, auth->session_key,
+ sizeof(auth->session_key) +
+ auth->passphrase_len);
+ hmac_sha256_update(&hctx, cphash, sizeof(cphash));
+ hmac_sha256_update(&hctx, auth->our_nonce, sizeof(auth->our_nonce));
+ hmac_sha256_update(&hctx, auth->tpm_nonce, sizeof(auth->tpm_nonce));
+ hmac_sha256_update(&hctx, &auth->attrs, 1);
+ hmac_sha256_final(&hctx, hmac);
}
EXPORT_SYMBOL(tpm_buf_fill_hmac_session);
@@ -747,7 +706,8 @@ int tpm_buf_check_hmac_response(struct tpm_chip *chip, struct tpm_buf *buf,
off_t offset_s, offset_p;
u8 rphash[SHA256_DIGEST_SIZE];
u32 attrs, cc;
- struct sha256_state sctx;
+ struct sha256_ctx sctx;
+ struct hmac_sha256_ctx hctx;
u16 tag = be16_to_cpu(head->tag);
int parm_len, len, i, handles;
@@ -817,21 +777,20 @@ int tpm_buf_check_hmac_response(struct tpm_chip *chip, struct tpm_buf *buf,
sha256_final(&sctx, rphash);
/* now calculate the hmac */
- tpm2_hmac_init(&sctx, auth->session_key, sizeof(auth->session_key)
- + auth->passphrase_len);
- sha256_update(&sctx, rphash, sizeof(rphash));
- sha256_update(&sctx, auth->tpm_nonce, sizeof(auth->tpm_nonce));
- sha256_update(&sctx, auth->our_nonce, sizeof(auth->our_nonce));
- sha256_update(&sctx, &auth->attrs, 1);
+ hmac_sha256_init_usingrawkey(&hctx, auth->session_key,
+ sizeof(auth->session_key) +
+ auth->passphrase_len);
+ hmac_sha256_update(&hctx, rphash, sizeof(rphash));
+ hmac_sha256_update(&hctx, auth->tpm_nonce, sizeof(auth->tpm_nonce));
+ hmac_sha256_update(&hctx, auth->our_nonce, sizeof(auth->our_nonce));
+ hmac_sha256_update(&hctx, &auth->attrs, 1);
/* we're done with the rphash, so put our idea of the hmac there */
- tpm2_hmac_final(&sctx, auth->session_key, sizeof(auth->session_key)
- + auth->passphrase_len, rphash);
- if (memcmp(rphash, &buf->data[offset_s], SHA256_DIGEST_SIZE) == 0) {
- rc = 0;
- } else {
+ hmac_sha256_final(&hctx, rphash);
+ if (crypto_memneq(rphash, &buf->data[offset_s], SHA256_DIGEST_SIZE)) {
dev_err(&chip->dev, "TPM: HMAC check failed\n");
goto out;
}
+ rc = 0;
/* now do response decryption */
if (auth->attrs & TPM2_SA_ENCRYPT) {
@@ -853,7 +812,9 @@ int tpm_buf_check_hmac_response(struct tpm_chip *chip, struct tpm_buf *buf,
if (rc)
/* manually close the session if it wasn't consumed */
tpm2_flush_context(chip, auth->handle);
- memzero_explicit(auth, sizeof(*auth));
+
+ kfree_sensitive(auth);
+ chip->auth = NULL;
} else {
/* reset for next use */
auth->session = TPM_HEADER_SIZE;
@@ -881,7 +842,8 @@ void tpm2_end_auth_session(struct tpm_chip *chip)
return;
tpm2_flush_context(chip, auth->handle);
- memzero_explicit(auth, sizeof(*auth));
+ kfree_sensitive(auth);
+ chip->auth = NULL;
}
EXPORT_SYMBOL(tpm2_end_auth_session);
@@ -915,59 +877,67 @@ static int tpm2_parse_start_auth_session(struct tpm2_auth *auth,
static int tpm2_load_null(struct tpm_chip *chip, u32 *null_key)
{
- int rc;
unsigned int offset = 0; /* dummy offset for null seed context */
u8 name[SHA256_DIGEST_SIZE + 2];
+ u32 tmp_null_key;
+ int rc;
rc = tpm2_load_context(chip, chip->null_key_context, &offset,
- null_key);
- if (rc != -EINVAL)
- return rc;
+ &tmp_null_key);
+ if (rc != -EINVAL) {
+ if (!rc)
+ *null_key = tmp_null_key;
+ goto err;
+ }
- /* an integrity failure may mean the TPM has been reset */
- dev_err(&chip->dev, "NULL key integrity failure!\n");
- /* check the null name against what we know */
- tpm2_create_primary(chip, TPM2_RH_NULL, NULL, name);
- if (memcmp(name, chip->null_key_name, sizeof(name)) == 0)
- /* name unchanged, assume transient integrity failure */
- return rc;
- /*
- * Fatal TPM failure: the NULL seed has actually changed, so
- * the TPM must have been illegally reset. All in-kernel TPM
- * operations will fail because the NULL primary can't be
- * loaded to salt the sessions, but disable the TPM anyway so
- * userspace programmes can't be compromised by it.
- */
- dev_err(&chip->dev, "NULL name has changed, disabling TPM due to interference\n");
- chip->flags |= TPM_CHIP_FLAG_DISABLE;
+ /* Try to re-create null key, given the integrity failure: */
+ rc = tpm2_create_primary(chip, TPM2_RH_NULL, &tmp_null_key, name);
+ if (rc)
+ goto err;
+
+ /* Return null key if the name has not been changed: */
+ if (!memcmp(name, chip->null_key_name, sizeof(name))) {
+ *null_key = tmp_null_key;
+ return 0;
+ }
+
+ /* Deduce from the name change TPM interference: */
+ dev_err(&chip->dev, "null key integrity check failed\n");
+ tpm2_flush_context(chip, tmp_null_key);
+err:
+ if (rc) {
+ chip->flags |= TPM_CHIP_FLAG_DISABLE;
+ rc = -ENODEV;
+ }
return rc;
}
/**
- * tpm2_start_auth_session() - create a HMAC authentication session with the TPM
- * @chip: the TPM chip structure to create the session with
+ * tpm2_start_auth_session() - Create an a HMAC authentication session
+ * @chip: A TPM chip
*
- * This function loads the NULL seed from its saved context and starts
- * an authentication session on the null seed, fills in the
- * @chip->auth structure to contain all the session details necessary
- * for performing the HMAC, encrypt and decrypt operations and
- * returns. The NULL seed is flushed before this function returns.
+ * Loads the ephemeral key (null seed), and starts an HMAC authenticated
+ * session. The null seed is flushed before the return.
*
- * Return: zero on success or actual error encountered.
+ * Returns zero on success, or a POSIX error code.
*/
int tpm2_start_auth_session(struct tpm_chip *chip)
{
+ struct tpm2_auth *auth;
struct tpm_buf buf;
- struct tpm2_auth *auth = chip->auth;
- int rc;
u32 null_key;
+ int rc;
- if (!auth) {
- dev_warn_once(&chip->dev, "auth session is not active\n");
+ if (chip->auth) {
+ dev_dbg_once(&chip->dev, "auth session is active\n");
return 0;
}
+ auth = kzalloc(sizeof(*auth), GFP_KERNEL);
+ if (!auth)
+ return -ENOMEM;
+
rc = tpm2_load_null(chip, &null_key);
if (rc)
goto out;
@@ -988,7 +958,7 @@ int tpm2_start_auth_session(struct tpm_chip *chip)
tpm_buf_append(&buf, auth->our_nonce, sizeof(auth->our_nonce));
/* append encrypted salt and squirrel away unencrypted in auth */
- tpm_buf_append_salt(&buf, chip);
+ tpm_buf_append_salt(&buf, chip, auth);
/* session type (HMAC, audit or policy) */
tpm_buf_append_u8(&buf, TPM2_SE_HMAC);
@@ -1002,7 +972,7 @@ int tpm2_start_auth_session(struct tpm_chip *chip)
/* hash algorithm for session */
tpm_buf_append_u16(&buf, TPM_ALG_SHA256);
- rc = tpm_transmit_cmd(chip, &buf, 0, "start auth session");
+ rc = tpm_ret_to_err(tpm_transmit_cmd(chip, &buf, 0, "StartAuthSession"));
tpm2_flush_context(chip, null_key);
if (rc == TPM2_RC_SUCCESS)
@@ -1010,10 +980,13 @@ int tpm2_start_auth_session(struct tpm_chip *chip)
tpm_buf_destroy(&buf);
- if (rc)
- goto out;
+ if (rc == TPM2_RC_SUCCESS) {
+ chip->auth = auth;
+ return 0;
+ }
- out:
+out:
+ kfree_sensitive(auth);
return rc;
}
EXPORT_SYMBOL(tpm2_start_auth_session);
@@ -1347,20 +1320,22 @@ static int tpm2_create_null_primary(struct tpm_chip *chip)
*
* Derive and context save the null primary and allocate memory in the
* struct tpm_chip for the authorizations.
+ *
+ * Return:
+ * * 0 - OK
+ * * -errno - A system error
+ * * TPM_RC - A TPM error
*/
int tpm2_sessions_init(struct tpm_chip *chip)
{
int rc;
rc = tpm2_create_null_primary(chip);
- if (rc)
- dev_err(&chip->dev, "TPM: security failed (NULL seed derivation): %d\n", rc);
-
- chip->auth = kmalloc(sizeof(*chip->auth), GFP_KERNEL);
- if (!chip->auth)
- return -ENOMEM;
+ if (rc) {
+ dev_err(&chip->dev, "null key creation failed with %d\n", rc);
+ return rc;
+ }
return rc;
}
-EXPORT_SYMBOL(tpm2_sessions_init);
#endif /* CONFIG_TCG_TPM2_HMAC */
diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c
index 9fb2defa9dc4..f25faf468bba 100644
--- a/drivers/char/tpm/tpm_atmel.c
+++ b/drivers/char/tpm/tpm_atmel.c
@@ -15,7 +15,66 @@
*/
#include "tpm.h"
-#include "tpm_atmel.h"
+
+struct tpm_atmel_priv {
+ int region_size;
+ int have_region;
+ unsigned long base;
+ void __iomem *iobase;
+};
+
+#define atmel_getb(chip, offset) inb(atmel_get_priv(chip)->base + (offset))
+#define atmel_putb(val, chip, offset) \
+ outb(val, atmel_get_priv(chip)->base + (offset))
+#define atmel_request_region request_region
+#define atmel_release_region release_region
+/* Atmel definitions */
+enum tpm_atmel_addr {
+ TPM_ATMEL_BASE_ADDR_LO = 0x08,
+ TPM_ATMEL_BASE_ADDR_HI = 0x09
+};
+
+static inline int tpm_read_index(int base, int index)
+{
+ outb(index, base);
+ return inb(base + 1) & 0xFF;
+}
+
+/* Verify this is a 1.1 Atmel TPM */
+static int atmel_verify_tpm11(void)
+{
+ /* verify that it is an Atmel part */
+ if (tpm_read_index(TPM_ADDR, 4) != 'A' ||
+ tpm_read_index(TPM_ADDR, 5) != 'T' ||
+ tpm_read_index(TPM_ADDR, 6) != 'M' ||
+ tpm_read_index(TPM_ADDR, 7) != 'L')
+ return 1;
+
+ /* query chip for its version number */
+ if (tpm_read_index(TPM_ADDR, 0x00) != 1 ||
+ tpm_read_index(TPM_ADDR, 0x01) != 1)
+ return 1;
+
+ /* This is an atmel supported part */
+ return 0;
+}
+
+/* Determine where to talk to device */
+static void __iomem *atmel_get_base_addr(unsigned long *base, int *region_size)
+{
+ int lo, hi;
+
+ if (atmel_verify_tpm11() != 0)
+ return NULL;
+
+ lo = tpm_read_index(TPM_ADDR, TPM_ATMEL_BASE_ADDR_LO);
+ hi = tpm_read_index(TPM_ADDR, TPM_ATMEL_BASE_ADDR_HI);
+
+ *base = (hi << 8) | lo;
+ *region_size = 2;
+
+ return ioport_map(*base, *region_size);
+}
/* write status bits */
enum tpm_atmel_write_status {
@@ -89,7 +148,8 @@ static int tpm_atml_recv(struct tpm_chip *chip, u8 *buf, size_t count)
return size;
}
-static int tpm_atml_send(struct tpm_chip *chip, u8 *buf, size_t count)
+static int tpm_atml_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz,
+ size_t count)
{
struct tpm_atmel_priv *priv = dev_get_drvdata(&chip->dev);
int i;
@@ -142,7 +202,6 @@ static void atml_plat_remove(void)
tpm_chip_unregister(chip);
if (priv->have_region)
atmel_release_region(priv->base, priv->region_size);
- atmel_put_base_addr(priv->iobase);
platform_device_unregister(pdev);
}
@@ -211,7 +270,6 @@ static int __init init_atmel(void)
err_unreg_dev:
platform_device_unregister(pdev);
err_rel_reg:
- atmel_put_base_addr(iobase);
if (have_region)
atmel_release_region(base,
region_size);
diff --git a/drivers/char/tpm/tpm_atmel.h b/drivers/char/tpm/tpm_atmel.h
deleted file mode 100644
index 7ac3f69dcf0f..000000000000
--- a/drivers/char/tpm/tpm_atmel.h
+++ /dev/null
@@ -1,140 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2005 IBM Corporation
- *
- * Authors:
- * Kylene Hall <kjhall@us.ibm.com>
- *
- * Maintained by: <tpmdd-devel@lists.sourceforge.net>
- *
- * Device driver for TCG/TCPA TPM (trusted platform module).
- * Specifications at www.trustedcomputinggroup.org
- *
- * These difference are required on power because the device must be
- * discovered through the device tree and iomap must be used to get
- * around the need for holes in the io_page_mask. This does not happen
- * automatically because the tpm is not a normal pci device and lives
- * under the root node.
- */
-
-struct tpm_atmel_priv {
- int region_size;
- int have_region;
- unsigned long base;
- void __iomem *iobase;
-};
-
-#ifdef CONFIG_PPC64
-
-#include <linux/of.h>
-
-#define atmel_getb(priv, offset) readb(priv->iobase + offset)
-#define atmel_putb(val, priv, offset) writeb(val, priv->iobase + offset)
-#define atmel_request_region request_mem_region
-#define atmel_release_region release_mem_region
-
-static inline void atmel_put_base_addr(void __iomem *iobase)
-{
- iounmap(iobase);
-}
-
-static void __iomem * atmel_get_base_addr(unsigned long *base, int *region_size)
-{
- struct device_node *dn;
- unsigned long address, size;
- const unsigned int *reg;
- int reglen;
- int naddrc;
- int nsizec;
-
- dn = of_find_node_by_name(NULL, "tpm");
-
- if (!dn)
- return NULL;
-
- if (!of_device_is_compatible(dn, "AT97SC3201")) {
- of_node_put(dn);
- return NULL;
- }
-
- reg = of_get_property(dn, "reg", &reglen);
- naddrc = of_n_addr_cells(dn);
- nsizec = of_n_size_cells(dn);
-
- of_node_put(dn);
-
-
- if (naddrc == 2)
- address = ((unsigned long) reg[0] << 32) | reg[1];
- else
- address = reg[0];
-
- if (nsizec == 2)
- size =
- ((unsigned long) reg[naddrc] << 32) | reg[naddrc + 1];
- else
- size = reg[naddrc];
-
- *base = address;
- *region_size = size;
- return ioremap(*base, *region_size);
-}
-#else
-#define atmel_getb(chip, offset) inb(atmel_get_priv(chip)->base + offset)
-#define atmel_putb(val, chip, offset) \
- outb(val, atmel_get_priv(chip)->base + offset)
-#define atmel_request_region request_region
-#define atmel_release_region release_region
-/* Atmel definitions */
-enum tpm_atmel_addr {
- TPM_ATMEL_BASE_ADDR_LO = 0x08,
- TPM_ATMEL_BASE_ADDR_HI = 0x09
-};
-
-static inline int tpm_read_index(int base, int index)
-{
- outb(index, base);
- return inb(base+1) & 0xFF;
-}
-
-/* Verify this is a 1.1 Atmel TPM */
-static int atmel_verify_tpm11(void)
-{
-
- /* verify that it is an Atmel part */
- if (tpm_read_index(TPM_ADDR, 4) != 'A' ||
- tpm_read_index(TPM_ADDR, 5) != 'T' ||
- tpm_read_index(TPM_ADDR, 6) != 'M' ||
- tpm_read_index(TPM_ADDR, 7) != 'L')
- return 1;
-
- /* query chip for its version number */
- if (tpm_read_index(TPM_ADDR, 0x00) != 1 ||
- tpm_read_index(TPM_ADDR, 0x01) != 1)
- return 1;
-
- /* This is an atmel supported part */
- return 0;
-}
-
-static inline void atmel_put_base_addr(void __iomem *iobase)
-{
-}
-
-/* Determine where to talk to device */
-static void __iomem * atmel_get_base_addr(unsigned long *base, int *region_size)
-{
- int lo, hi;
-
- if (atmel_verify_tpm11() != 0)
- return NULL;
-
- lo = tpm_read_index(TPM_ADDR, TPM_ATMEL_BASE_ADDR_LO);
- hi = tpm_read_index(TPM_ADDR, TPM_ATMEL_BASE_ADDR_HI);
-
- *base = (hi << 8) | lo;
- *region_size = 2;
-
- return ioport_map(*base, *region_size);
-}
-#endif
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
index ea085b14ab7c..ed97344f2324 100644
--- a/drivers/char/tpm/tpm_crb.c
+++ b/drivers/char/tpm/tpm_crb.c
@@ -19,6 +19,7 @@
#ifdef CONFIG_ARM64
#include <linux/arm-smccc.h>
#endif
+#include "tpm_crb_ffa.h"
#include "tpm.h"
#define ACPI_SIG_TPM2 "TPM2"
@@ -100,6 +101,8 @@ struct crb_priv {
u32 smc_func_id;
u32 __iomem *pluton_start_addr;
u32 __iomem *pluton_reply_addr;
+ u8 ffa_flags;
+ u8 ffa_attributes;
};
struct tpm2_crb_smc {
@@ -110,11 +113,30 @@ struct tpm2_crb_smc {
u32 smc_func_id;
};
+/* CRB over FFA start method parameters in TCG2 ACPI table */
+struct tpm2_crb_ffa {
+ u8 flags;
+ u8 attributes;
+ u16 partition_id;
+ u8 reserved[8];
+};
+
struct tpm2_crb_pluton {
u64 start_addr;
u64 reply_addr;
};
+/*
+ * Returns true if the start method supports idle.
+ */
+static inline bool tpm_crb_has_idle(u32 start_method)
+{
+ return !(start_method == ACPI_TPM2_START_METHOD ||
+ start_method == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD ||
+ start_method == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC ||
+ start_method == ACPI_TPM2_CRB_WITH_ARM_FFA);
+}
+
static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value,
unsigned long timeout)
{
@@ -173,9 +195,7 @@ static int __crb_go_idle(struct device *dev, struct crb_priv *priv)
{
int rc;
- if ((priv->sm == ACPI_TPM2_START_METHOD) ||
- (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) ||
- (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC))
+ if (!tpm_crb_has_idle(priv->sm))
return 0;
iowrite32(CRB_CTRL_REQ_GO_IDLE, &priv->regs_t->ctrl_req);
@@ -222,9 +242,7 @@ static int __crb_cmd_ready(struct device *dev, struct crb_priv *priv)
{
int rc;
- if ((priv->sm == ACPI_TPM2_START_METHOD) ||
- (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) ||
- (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC))
+ if (!tpm_crb_has_idle(priv->sm))
return 0;
iowrite32(CRB_CTRL_REQ_CMD_READY, &priv->regs_t->ctrl_req);
@@ -255,13 +273,20 @@ static int crb_cmd_ready(struct tpm_chip *chip)
static int __crb_request_locality(struct device *dev,
struct crb_priv *priv, int loc)
{
- u32 value = CRB_LOC_STATE_LOC_ASSIGNED |
- CRB_LOC_STATE_TPM_REG_VALID_STS;
+ u32 value = CRB_LOC_STATE_LOC_ASSIGNED | CRB_LOC_STATE_TPM_REG_VALID_STS;
+ int rc;
if (!priv->regs_h)
return 0;
iowrite32(CRB_LOC_CTRL_REQUEST_ACCESS, &priv->regs_h->loc_ctrl);
+
+ if (priv->sm == ACPI_TPM2_CRB_WITH_ARM_FFA) {
+ rc = tpm_crb_ffa_start(CRB_FFA_START_TYPE_LOCALITY_REQUEST, loc);
+ if (rc)
+ return rc;
+ }
+
if (!crb_wait_for_reg_32(&priv->regs_h->loc_state, value, value,
TPM2_TIMEOUT_C)) {
dev_warn(dev, "TPM_LOC_STATE_x.requestAccess timed out\n");
@@ -281,14 +306,21 @@ static int crb_request_locality(struct tpm_chip *chip, int loc)
static int __crb_relinquish_locality(struct device *dev,
struct crb_priv *priv, int loc)
{
- u32 mask = CRB_LOC_STATE_LOC_ASSIGNED |
- CRB_LOC_STATE_TPM_REG_VALID_STS;
+ u32 mask = CRB_LOC_STATE_LOC_ASSIGNED | CRB_LOC_STATE_TPM_REG_VALID_STS;
u32 value = CRB_LOC_STATE_TPM_REG_VALID_STS;
+ int rc;
if (!priv->regs_h)
return 0;
iowrite32(CRB_LOC_CTRL_RELINQUISH, &priv->regs_h->loc_ctrl);
+
+ if (priv->sm == ACPI_TPM2_CRB_WITH_ARM_FFA) {
+ rc = tpm_crb_ffa_start(CRB_FFA_START_TYPE_LOCALITY_REQUEST, loc);
+ if (rc)
+ return rc;
+ }
+
if (!crb_wait_for_reg_32(&priv->regs_h->loc_state, mask, value,
TPM2_TIMEOUT_C)) {
dev_warn(dev, "TPM_LOC_STATE_x.Relinquish timed out\n");
@@ -394,7 +426,7 @@ static int tpm_crb_smc_start(struct device *dev, unsigned long func_id)
}
#endif
-static int crb_send(struct tpm_chip *chip, u8 *buf, size_t len)
+static int crb_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz, size_t len)
{
struct crb_priv *priv = dev_get_drvdata(&chip->dev);
int rc = 0;
@@ -423,13 +455,13 @@ static int crb_send(struct tpm_chip *chip, u8 *buf, size_t len)
* report only ACPI start but in practice seems to require both
* CRB start, hence invoking CRB start method if hid == MSFT0101.
*/
- if ((priv->sm == ACPI_TPM2_COMMAND_BUFFER) ||
- (priv->sm == ACPI_TPM2_MEMORY_MAPPED) ||
- (!strcmp(priv->hid, "MSFT0101")))
+ if (priv->sm == ACPI_TPM2_COMMAND_BUFFER ||
+ priv->sm == ACPI_TPM2_MEMORY_MAPPED ||
+ !strcmp(priv->hid, "MSFT0101"))
iowrite32(CRB_START_INVOKE, &priv->regs_t->ctrl_start);
- if ((priv->sm == ACPI_TPM2_START_METHOD) ||
- (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD))
+ if (priv->sm == ACPI_TPM2_START_METHOD ||
+ priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD)
rc = crb_do_acpi_start(chip);
if (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC) {
@@ -437,6 +469,11 @@ static int crb_send(struct tpm_chip *chip, u8 *buf, size_t len)
rc = tpm_crb_smc_start(&chip->dev, priv->smc_func_id);
}
+ if (priv->sm == ACPI_TPM2_CRB_WITH_ARM_FFA) {
+ iowrite32(CRB_START_INVOKE, &priv->regs_t->ctrl_start);
+ rc = tpm_crb_ffa_start(CRB_FFA_START_TYPE_COMMAND, chip->locality);
+ }
+
if (rc)
return rc;
@@ -446,13 +483,20 @@ static int crb_send(struct tpm_chip *chip, u8 *buf, size_t len)
static void crb_cancel(struct tpm_chip *chip)
{
struct crb_priv *priv = dev_get_drvdata(&chip->dev);
+ int rc;
iowrite32(CRB_CANCEL_INVOKE, &priv->regs_t->ctrl_cancel);
- if (((priv->sm == ACPI_TPM2_START_METHOD) ||
- (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD)) &&
+ if ((priv->sm == ACPI_TPM2_START_METHOD ||
+ priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) &&
crb_do_acpi_start(chip))
dev_err(&chip->dev, "ACPI Start failed\n");
+
+ if (priv->sm == ACPI_TPM2_CRB_WITH_ARM_FFA) {
+ rc = tpm_crb_ffa_start(CRB_FFA_START_TYPE_COMMAND, chip->locality);
+ if (rc)
+ dev_err(&chip->dev, "FF-A Start failed\n");
+ }
}
static bool crb_req_canceled(struct tpm_chip *chip, u8 status)
@@ -609,8 +653,9 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
* the control area, as one nice sane region except for some older
* stuff that puts the control area outside the ACPI IO region.
*/
- if ((priv->sm == ACPI_TPM2_COMMAND_BUFFER) ||
- (priv->sm == ACPI_TPM2_MEMORY_MAPPED)) {
+ if (priv->sm == ACPI_TPM2_COMMAND_BUFFER ||
+ priv->sm == ACPI_TPM2_CRB_WITH_ARM_FFA ||
+ priv->sm == ACPI_TPM2_MEMORY_MAPPED) {
if (iores &&
buf->control_address == iores->start +
sizeof(*priv->regs_h))
@@ -731,6 +776,7 @@ static int crb_acpi_add(struct acpi_device *device)
struct tpm_chip *chip;
struct device *dev = &device->dev;
struct tpm2_crb_smc *crb_smc;
+ struct tpm2_crb_ffa *crb_ffa;
struct tpm2_crb_pluton *crb_pluton;
acpi_status status;
u32 sm;
@@ -769,6 +815,27 @@ static int crb_acpi_add(struct acpi_device *device)
priv->smc_func_id = crb_smc->smc_func_id;
}
+ if (sm == ACPI_TPM2_CRB_WITH_ARM_FFA) {
+ if (buf->header.length < (sizeof(*buf) + sizeof(*crb_ffa))) {
+ dev_err(dev,
+ FW_BUG "TPM2 ACPI table has wrong size %u for start method type %d\n",
+ buf->header.length,
+ ACPI_TPM2_CRB_WITH_ARM_FFA);
+ rc = -EINVAL;
+ goto out;
+ }
+ crb_ffa = ACPI_ADD_PTR(struct tpm2_crb_ffa, buf, sizeof(*buf));
+ priv->ffa_flags = crb_ffa->flags;
+ priv->ffa_attributes = crb_ffa->attributes;
+ rc = tpm_crb_ffa_init();
+ if (rc) {
+ /* If FF-A driver is not available yet, request probe retry */
+ if (rc == -ENOENT)
+ rc = -EPROBE_DEFER;
+ goto out;
+ }
+ }
+
if (sm == ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON) {
if (buf->header.length < (sizeof(*buf) + sizeof(*crb_pluton))) {
dev_err(dev,
diff --git a/drivers/char/tpm/tpm_crb_ffa.c b/drivers/char/tpm/tpm_crb_ffa.c
new file mode 100644
index 000000000000..755b77b32ea4
--- /dev/null
+++ b/drivers/char/tpm/tpm_crb_ffa.c
@@ -0,0 +1,414 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024 Arm Ltd.
+ *
+ * This device driver implements the TPM CRB start method
+ * as defined in the TPM Service Command Response Buffer
+ * Interface Over FF-A (DEN0138).
+ */
+
+#define pr_fmt(fmt) "CRB_FFA: " fmt
+
+#include <linux/arm_ffa.h>
+#include <linux/delay.h>
+#include <linux/moduleparam.h>
+#include "tpm_crb_ffa.h"
+
+static unsigned int busy_timeout_ms = 2000;
+
+module_param(busy_timeout_ms, uint, 0644);
+MODULE_PARM_DESC(busy_timeout_ms,
+ "Maximum time in ms to retry before giving up on busy");
+
+/* TPM service function status codes */
+#define CRB_FFA_OK 0x05000001
+#define CRB_FFA_OK_RESULTS_RETURNED 0x05000002
+#define CRB_FFA_NOFUNC 0x8e000001
+#define CRB_FFA_NOTSUP 0x8e000002
+#define CRB_FFA_INVARG 0x8e000005
+#define CRB_FFA_INV_CRB_CTRL_DATA 0x8e000006
+#define CRB_FFA_ALREADY 0x8e000009
+#define CRB_FFA_DENIED 0x8e00000a
+#define CRB_FFA_NOMEM 0x8e00000b
+
+#define CRB_FFA_VERSION_MAJOR 1
+#define CRB_FFA_VERSION_MINOR 0
+
+/* version encoding */
+#define CRB_FFA_MAJOR_VERSION_MASK GENMASK(30, 16)
+#define CRB_FFA_MINOR_VERSION_MASK GENMASK(15, 0)
+#define CRB_FFA_MAJOR_VERSION(x) ((u16)(FIELD_GET(CRB_FFA_MAJOR_VERSION_MASK, (x))))
+#define CRB_FFA_MINOR_VERSION(x) ((u16)(FIELD_GET(CRB_FFA_MINOR_VERSION_MASK, (x))))
+
+/*
+ * Normal world sends requests with FFA_MSG_SEND_DIRECT_REQ and
+ * responses are returned with FFA_MSG_SEND_DIRECT_RESP for normal
+ * messages.
+ *
+ * All requests with FFA_MSG_SEND_DIRECT_REQ and FFA_MSG_SEND_DIRECT_RESP
+ * are using the AArch32 or AArch64 SMC calling convention with register usage
+ * as defined in FF-A specification:
+ * w0: Function ID
+ * -for 32-bit: 0x8400006F or 0x84000070
+ * -for 64-bit: 0xC400006F or 0xC4000070
+ * w1: Source/Destination IDs
+ * w2: Reserved (MBZ)
+ * w3-w7: Implementation defined, free to be used below
+ */
+
+/*
+ * Returns the version of the interface that is available
+ * Call register usage:
+ * w3: Not used (MBZ)
+ * w4: TPM service function ID, CRB_FFA_GET_INTERFACE_VERSION
+ * w5-w7: Reserved (MBZ)
+ *
+ * Return register usage:
+ * w3: Not used (MBZ)
+ * w4: TPM service function status
+ * w5: TPM service interface version
+ * Bits[31:16]: major version
+ * Bits[15:0]: minor version
+ * w6-w7: Reserved (MBZ)
+ *
+ * Possible function status codes in register w4:
+ * CRB_FFA_OK_RESULTS_RETURNED: The version of the interface has been
+ * returned.
+ */
+#define CRB_FFA_GET_INTERFACE_VERSION 0x0f000001
+
+/*
+ * Notifies the TPM service that a TPM command or TPM locality request is
+ * ready to be processed, and allows the TPM service to process it.
+ * Call register usage:
+ * w3: Not used (MBZ)
+ * w4: TPM service function ID, CRB_FFA_START
+ * w5: Start function qualifier
+ * Bits[31:8] (MBZ)
+ * Bits[7:0]
+ * 0: Notifies TPM that a command is ready to be processed
+ * 1: Notifies TPM that a locality request is ready to be processed
+ * w6: TPM locality, one of 0..4
+ * -If the start function qualifier is 0, identifies the locality
+ * from where the command originated.
+ * -If the start function qualifier is 1, identifies the locality
+ * of the locality request
+ * w6-w7: Reserved (MBZ)
+ *
+ * Return register usage:
+ * w3: Not used (MBZ)
+ * w4: TPM service function status
+ * w5-w7: Reserved (MBZ)
+ *
+ * Possible function status codes in register w4:
+ * CRB_FFA_OK: the TPM service has been notified successfully
+ * CRB_FFA_INVARG: one or more arguments are not valid
+ * CRB_FFA_INV_CRB_CTRL_DATA: CRB control data or locality control
+ * data at the given TPM locality is not valid
+ * CRB_FFA_DENIED: the TPM has previously disabled locality requests and
+ * command processing at the given locality
+ */
+#define CRB_FFA_START 0x0f000201
+
+struct tpm_crb_ffa {
+ struct ffa_device *ffa_dev;
+ u16 major_version;
+ u16 minor_version;
+ /* lock to protect sending of FF-A messages: */
+ struct mutex msg_data_lock;
+ union {
+ struct ffa_send_direct_data direct_msg_data;
+ struct ffa_send_direct_data2 direct_msg_data2;
+ };
+};
+
+static struct tpm_crb_ffa *tpm_crb_ffa;
+static struct ffa_driver tpm_crb_ffa_driver;
+
+static int tpm_crb_ffa_to_linux_errno(int errno)
+{
+ int rc;
+
+ switch (errno) {
+ case CRB_FFA_OK:
+ rc = 0;
+ break;
+ case CRB_FFA_OK_RESULTS_RETURNED:
+ rc = 0;
+ break;
+ case CRB_FFA_NOFUNC:
+ rc = -ENOENT;
+ break;
+ case CRB_FFA_NOTSUP:
+ rc = -EPERM;
+ break;
+ case CRB_FFA_INVARG:
+ rc = -EINVAL;
+ break;
+ case CRB_FFA_INV_CRB_CTRL_DATA:
+ rc = -ENOEXEC;
+ break;
+ case CRB_FFA_ALREADY:
+ rc = -EEXIST;
+ break;
+ case CRB_FFA_DENIED:
+ rc = -EACCES;
+ break;
+ case CRB_FFA_NOMEM:
+ rc = -ENOMEM;
+ break;
+ default:
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+/**
+ * tpm_crb_ffa_init - called by the CRB driver to do any needed initialization
+ *
+ * This function is called by the tpm_crb driver during the tpm_crb
+ * driver's initialization. If the tpm_crb_ffa has not been probed
+ * yet, returns -ENOENT in order to force a retry. If th ffa_crb
+ * driver had been probed but failed with an error, returns -ENODEV
+ * in order to prevent further retries.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int tpm_crb_ffa_init(void)
+{
+ int ret = 0;
+
+ if (!IS_MODULE(CONFIG_TCG_ARM_CRB_FFA)) {
+ ret = ffa_register(&tpm_crb_ffa_driver);
+ if (ret) {
+ tpm_crb_ffa = ERR_PTR(-ENODEV);
+ return ret;
+ }
+ }
+
+ if (!tpm_crb_ffa)
+ ret = -ENOENT;
+
+ if (IS_ERR_VALUE(tpm_crb_ffa))
+ ret = -ENODEV;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tpm_crb_ffa_init);
+
+static int __tpm_crb_ffa_try_send_receive(unsigned long func_id,
+ unsigned long a0, unsigned long a1,
+ unsigned long a2)
+{
+ const struct ffa_msg_ops *msg_ops;
+ int ret;
+
+ msg_ops = tpm_crb_ffa->ffa_dev->ops->msg_ops;
+
+ if (ffa_partition_supports_direct_req2_recv(tpm_crb_ffa->ffa_dev)) {
+ tpm_crb_ffa->direct_msg_data2 = (struct ffa_send_direct_data2){
+ .data = { func_id, a0, a1, a2 },
+ };
+
+ ret = msg_ops->sync_send_receive2(tpm_crb_ffa->ffa_dev,
+ &tpm_crb_ffa->direct_msg_data2);
+ if (!ret)
+ ret = tpm_crb_ffa_to_linux_errno(tpm_crb_ffa->direct_msg_data2.data[0]);
+ } else {
+ tpm_crb_ffa->direct_msg_data = (struct ffa_send_direct_data){
+ .data1 = func_id,
+ .data2 = a0,
+ .data3 = a1,
+ .data4 = a2,
+ };
+
+ ret = msg_ops->sync_send_receive(tpm_crb_ffa->ffa_dev,
+ &tpm_crb_ffa->direct_msg_data);
+ if (!ret)
+ ret = tpm_crb_ffa_to_linux_errno(tpm_crb_ffa->direct_msg_data.data1);
+ }
+
+ return ret;
+}
+
+static int __tpm_crb_ffa_send_receive(unsigned long func_id, unsigned long a0,
+ unsigned long a1, unsigned long a2)
+{
+ ktime_t start, stop;
+ int ret;
+
+ if (!tpm_crb_ffa)
+ return -ENOENT;
+
+ start = ktime_get();
+ stop = ktime_add(start, ms_to_ktime(busy_timeout_ms));
+
+ for (;;) {
+ ret = __tpm_crb_ffa_try_send_receive(func_id, a0, a1, a2);
+ if (ret != -EBUSY)
+ break;
+
+ usleep_range(50, 100);
+ if (ktime_after(ktime_get(), stop)) {
+ dev_warn(&tpm_crb_ffa->ffa_dev->dev,
+ "Busy retry timed out\n");
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * tpm_crb_ffa_get_interface_version() - gets the ABI version of the TPM service
+ * @major: Pointer to caller-allocated buffer to hold the major version
+ * number the ABI
+ * @minor: Pointer to caller-allocated buffer to hold the minor version
+ * number the ABI
+ *
+ * Returns the major and minor version of the ABI of the FF-A based TPM.
+ * Allows the caller to evaluate its compatibility with the version of
+ * the ABI.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+static int tpm_crb_ffa_get_interface_version(u16 *major, u16 *minor)
+{
+ int rc;
+
+ if (!tpm_crb_ffa)
+ return -ENOENT;
+
+ if (IS_ERR_VALUE(tpm_crb_ffa))
+ return -ENODEV;
+
+ if (!major || !minor)
+ return -EINVAL;
+
+ guard(mutex)(&tpm_crb_ffa->msg_data_lock);
+
+ rc = __tpm_crb_ffa_send_receive(CRB_FFA_GET_INTERFACE_VERSION, 0x00, 0x00, 0x00);
+ if (!rc) {
+ if (ffa_partition_supports_direct_req2_recv(tpm_crb_ffa->ffa_dev)) {
+ *major = CRB_FFA_MAJOR_VERSION(tpm_crb_ffa->direct_msg_data2.data[1]);
+ *minor = CRB_FFA_MINOR_VERSION(tpm_crb_ffa->direct_msg_data2.data[1]);
+ } else {
+ *major = CRB_FFA_MAJOR_VERSION(tpm_crb_ffa->direct_msg_data.data2);
+ *minor = CRB_FFA_MINOR_VERSION(tpm_crb_ffa->direct_msg_data.data2);
+ }
+ }
+
+ return rc;
+}
+
+/**
+ * tpm_crb_ffa_start() - signals the TPM that a field has changed in the CRB
+ * @request_type: Identifies whether the change to the CRB is in the command
+ * fields or locality fields.
+ * @locality: Specifies the locality number.
+ *
+ * Used by the CRB driver
+ * that might be useful to those using or modifying it. Begins with
+ * empty comment line, and may include additional embedded empty
+ * comment lines.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int tpm_crb_ffa_start(int request_type, int locality)
+{
+ if (!tpm_crb_ffa)
+ return -ENOENT;
+
+ if (IS_ERR_VALUE(tpm_crb_ffa))
+ return -ENODEV;
+
+ guard(mutex)(&tpm_crb_ffa->msg_data_lock);
+
+ return __tpm_crb_ffa_send_receive(CRB_FFA_START, request_type, locality, 0x00);
+}
+EXPORT_SYMBOL_GPL(tpm_crb_ffa_start);
+
+static int tpm_crb_ffa_probe(struct ffa_device *ffa_dev)
+{
+ struct tpm_crb_ffa *p;
+ int rc;
+
+ /* only one instance of a TPM partition is supported */
+ if (tpm_crb_ffa && !IS_ERR_VALUE(tpm_crb_ffa))
+ return -EEXIST;
+
+ tpm_crb_ffa = ERR_PTR(-ENODEV); // set tpm_crb_ffa so we can detect probe failure
+
+ if (!ffa_partition_supports_direct_recv(ffa_dev) &&
+ !ffa_partition_supports_direct_req2_recv(ffa_dev)) {
+ dev_warn(&ffa_dev->dev, "partition doesn't support direct message receive.\n");
+ return -EINVAL;
+ }
+
+ p = kzalloc(sizeof(*tpm_crb_ffa), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+ tpm_crb_ffa = p;
+
+ mutex_init(&tpm_crb_ffa->msg_data_lock);
+ tpm_crb_ffa->ffa_dev = ffa_dev;
+ ffa_dev_set_drvdata(ffa_dev, tpm_crb_ffa);
+
+ /* if TPM is aarch32 use 32-bit SMCs */
+ if (!ffa_partition_check_property(ffa_dev, FFA_PARTITION_AARCH64_EXEC))
+ ffa_dev->ops->msg_ops->mode_32bit_set(ffa_dev);
+
+ /* verify compatibility of TPM service version number */
+ rc = tpm_crb_ffa_get_interface_version(&tpm_crb_ffa->major_version,
+ &tpm_crb_ffa->minor_version);
+ if (rc) {
+ dev_err(&ffa_dev->dev, "failed to get crb interface version. rc:%d\n", rc);
+ goto out;
+ }
+
+ dev_info(&ffa_dev->dev, "ABI version %u.%u\n", tpm_crb_ffa->major_version,
+ tpm_crb_ffa->minor_version);
+
+ if (tpm_crb_ffa->major_version != CRB_FFA_VERSION_MAJOR ||
+ (tpm_crb_ffa->minor_version > 0 &&
+ tpm_crb_ffa->minor_version < CRB_FFA_VERSION_MINOR)) {
+ dev_warn(&ffa_dev->dev, "Incompatible ABI version\n");
+ goto out;
+ }
+
+ return 0;
+
+out:
+ kfree(tpm_crb_ffa);
+ tpm_crb_ffa = ERR_PTR(-ENODEV);
+ return -EINVAL;
+}
+
+static void tpm_crb_ffa_remove(struct ffa_device *ffa_dev)
+{
+ kfree(tpm_crb_ffa);
+ tpm_crb_ffa = NULL;
+}
+
+static const struct ffa_device_id tpm_crb_ffa_device_id[] = {
+ /* 17b862a4-1806-4faf-86b3-089a58353861 */
+ { UUID_INIT(0x17b862a4, 0x1806, 0x4faf,
+ 0x86, 0xb3, 0x08, 0x9a, 0x58, 0x35, 0x38, 0x61) },
+ {}
+};
+
+static struct ffa_driver tpm_crb_ffa_driver = {
+ .name = "ffa-crb",
+ .probe = tpm_crb_ffa_probe,
+ .remove = tpm_crb_ffa_remove,
+ .id_table = tpm_crb_ffa_device_id,
+};
+
+#ifdef MODULE
+module_ffa_driver(tpm_crb_ffa_driver);
+#endif
+
+MODULE_AUTHOR("Arm");
+MODULE_DESCRIPTION("TPM CRB FFA driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_crb_ffa.h b/drivers/char/tpm/tpm_crb_ffa.h
new file mode 100644
index 000000000000..d7e1344ea003
--- /dev/null
+++ b/drivers/char/tpm/tpm_crb_ffa.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2024 Arm Ltd.
+ *
+ * This device driver implements the TPM CRB start method
+ * as defined in the TPM Service Command Response Buffer
+ * Interface Over FF-A (DEN0138).
+ */
+#ifndef _TPM_CRB_FFA_H
+#define _TPM_CRB_FFA_H
+
+#if IS_REACHABLE(CONFIG_TCG_ARM_CRB_FFA)
+int tpm_crb_ffa_init(void);
+int tpm_crb_ffa_start(int request_type, int locality);
+#else
+static inline int tpm_crb_ffa_init(void) { return 0; }
+static inline int tpm_crb_ffa_start(int request_type, int locality) { return 0; }
+#endif
+
+#define CRB_FFA_START_TYPE_COMMAND 0
+#define CRB_FFA_START_TYPE_LOCALITY_REQUEST 1
+
+#endif
diff --git a/drivers/char/tpm/tpm_ftpm_tee.c b/drivers/char/tpm/tpm_ftpm_tee.c
index 2ea4882251cf..4e63c30aeaf1 100644
--- a/drivers/char/tpm/tpm_ftpm_tee.c
+++ b/drivers/char/tpm/tpm_ftpm_tee.c
@@ -31,45 +31,19 @@ static const uuid_t ftpm_ta_uuid =
0x82, 0xCB, 0x34, 0x3F, 0xB7, 0xF3, 0x78, 0x96);
/**
- * ftpm_tee_tpm_op_recv() - retrieve fTPM response.
- * @chip: the tpm_chip description as specified in driver/char/tpm/tpm.h.
- * @buf: the buffer to store data.
- * @count: the number of bytes to read.
- *
- * Return:
- * In case of success the number of bytes received.
- * On failure, -errno.
- */
-static int ftpm_tee_tpm_op_recv(struct tpm_chip *chip, u8 *buf, size_t count)
-{
- struct ftpm_tee_private *pvt_data = dev_get_drvdata(chip->dev.parent);
- size_t len;
-
- len = pvt_data->resp_len;
- if (count < len) {
- dev_err(&chip->dev,
- "%s: Invalid size in recv: count=%zd, resp_len=%zd\n",
- __func__, count, len);
- return -EIO;
- }
-
- memcpy(buf, pvt_data->resp_buf, len);
- pvt_data->resp_len = 0;
-
- return len;
-}
-
-/**
- * ftpm_tee_tpm_op_send() - send TPM commands through the TEE shared memory.
+ * ftpm_tee_tpm_op_send() - send TPM commands through the TEE shared memory
+ * and retrieve the response.
* @chip: the tpm_chip description as specified in driver/char/tpm/tpm.h
- * @buf: the buffer to send.
- * @len: the number of bytes to send.
+ * @buf: the buffer to send and to store the response.
+ * @bufsiz: the size of the buffer.
+ * @cmd_len: the number of bytes to send.
*
* Return:
- * In case of success, returns 0.
+ * In case of success, returns the number of bytes received.
* On failure, -errno
*/
-static int ftpm_tee_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t len)
+static int ftpm_tee_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz,
+ size_t cmd_len)
{
struct ftpm_tee_private *pvt_data = dev_get_drvdata(chip->dev.parent);
size_t resp_len;
@@ -80,16 +54,15 @@ static int ftpm_tee_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t len)
struct tee_param command_params[4];
struct tee_shm *shm = pvt_data->shm;
- if (len > MAX_COMMAND_SIZE) {
+ if (cmd_len > MAX_COMMAND_SIZE) {
dev_err(&chip->dev,
"%s: len=%zd exceeds MAX_COMMAND_SIZE supported by fTPM TA\n",
- __func__, len);
+ __func__, cmd_len);
return -EIO;
}
memset(&transceive_args, 0, sizeof(transceive_args));
memset(command_params, 0, sizeof(command_params));
- pvt_data->resp_len = 0;
/* Invoke FTPM_OPTEE_TA_SUBMIT_COMMAND function of fTPM TA */
transceive_args = (struct tee_ioctl_invoke_arg) {
@@ -103,7 +76,7 @@ static int ftpm_tee_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t len)
.attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT,
.u.memref = {
.shm = shm,
- .size = len,
+ .size = cmd_len,
.shm_offs = 0,
},
};
@@ -115,7 +88,7 @@ static int ftpm_tee_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t len)
return PTR_ERR(temp_buf);
}
memset(temp_buf, 0, (MAX_COMMAND_SIZE + MAX_RESPONSE_SIZE));
- memcpy(temp_buf, buf, len);
+ memcpy(temp_buf, buf, cmd_len);
command_params[1] = (struct tee_param) {
.attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT,
@@ -156,38 +129,21 @@ static int ftpm_tee_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t len)
__func__, resp_len);
return -EIO;
}
+ if (resp_len > bufsiz) {
+ dev_err(&chip->dev,
+ "%s: resp_len=%zd exceeds bufsiz=%zd\n",
+ __func__, resp_len, bufsiz);
+ return -EIO;
+ }
- /* sanity checks look good, cache the response */
- memcpy(pvt_data->resp_buf, temp_buf, resp_len);
- pvt_data->resp_len = resp_len;
-
- return 0;
-}
-
-static void ftpm_tee_tpm_op_cancel(struct tpm_chip *chip)
-{
- /* not supported */
-}
-
-static u8 ftpm_tee_tpm_op_status(struct tpm_chip *chip)
-{
- return 0;
-}
+ memcpy(buf, temp_buf, resp_len);
-static bool ftpm_tee_tpm_req_canceled(struct tpm_chip *chip, u8 status)
-{
- return false;
+ return resp_len;
}
static const struct tpm_class_ops ftpm_tee_tpm_ops = {
.flags = TPM_OPS_AUTO_STARTUP,
- .recv = ftpm_tee_tpm_op_recv,
.send = ftpm_tee_tpm_op_send,
- .cancel = ftpm_tee_tpm_op_cancel,
- .status = ftpm_tee_tpm_op_status,
- .req_complete_mask = 0,
- .req_complete_val = 0,
- .req_canceled = ftpm_tee_tpm_req_canceled,
};
/*
@@ -271,7 +227,7 @@ static int ftpm_tee_probe(struct device *dev)
}
pvt_data->chip = chip;
- pvt_data->chip->flags |= TPM_CHIP_FLAG_TPM2;
+ pvt_data->chip->flags |= TPM_CHIP_FLAG_TPM2 | TPM_CHIP_FLAG_SYNC;
/* Create a character device for the fTPM */
rc = tpm_chip_register(pvt_data->chip);
@@ -362,11 +318,11 @@ MODULE_DEVICE_TABLE(of, of_ftpm_tee_ids);
static struct platform_driver ftpm_tee_plat_driver = {
.driver = {
.name = "ftpm-tee",
- .of_match_table = of_match_ptr(of_ftpm_tee_ids),
+ .of_match_table = of_ftpm_tee_ids,
},
.shutdown = ftpm_plat_tee_shutdown,
.probe = ftpm_plat_tee_probe,
- .remove_new = ftpm_plat_tee_remove,
+ .remove = ftpm_plat_tee_remove,
};
/* UUID of the fTPM TA */
diff --git a/drivers/char/tpm/tpm_ftpm_tee.h b/drivers/char/tpm/tpm_ftpm_tee.h
index f98daa7bf68c..8d5c3f0d2879 100644
--- a/drivers/char/tpm/tpm_ftpm_tee.h
+++ b/drivers/char/tpm/tpm_ftpm_tee.h
@@ -21,18 +21,13 @@
/**
* struct ftpm_tee_private - fTPM's private data
* @chip: struct tpm_chip instance registered with tpm framework.
- * @state: internal state
* @session: fTPM TA session identifier.
- * @resp_len: cached response buffer length.
- * @resp_buf: cached response buffer.
* @ctx: TEE context handler.
* @shm: Memory pool shared with fTPM TA in TEE.
*/
struct ftpm_tee_private {
struct tpm_chip *chip;
u32 session;
- size_t resp_len;
- u8 resp_buf[MAX_RESPONSE_SIZE];
struct tee_context *ctx;
struct tee_shm *shm;
};
diff --git a/drivers/char/tpm/tpm_i2c_atmel.c b/drivers/char/tpm/tpm_i2c_atmel.c
index d1d27fdfe523..4f229656a8e2 100644
--- a/drivers/char/tpm/tpm_i2c_atmel.c
+++ b/drivers/char/tpm/tpm_i2c_atmel.c
@@ -37,7 +37,8 @@ struct priv_data {
u8 buffer[sizeof(struct tpm_header) + 25];
};
-static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len)
+static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz,
+ size_t len)
{
struct priv_data *priv = dev_get_drvdata(&chip->dev);
struct i2c_client *client = to_i2c_client(chip->dev.parent);
diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
index 81d8a78dc655..bdf1f329a679 100644
--- a/drivers/char/tpm/tpm_i2c_infineon.c
+++ b/drivers/char/tpm/tpm_i2c_infineon.c
@@ -514,7 +514,8 @@ out:
return size;
}
-static int tpm_tis_i2c_send(struct tpm_chip *chip, u8 *buf, size_t len)
+static int tpm_tis_i2c_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz,
+ size_t len)
{
int rc, status;
ssize_t burstcnt;
diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c
index 3c3ee5f551db..d44903b29929 100644
--- a/drivers/char/tpm/tpm_i2c_nuvoton.c
+++ b/drivers/char/tpm/tpm_i2c_nuvoton.c
@@ -350,7 +350,8 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
* tpm.c can skip polling for the data to be available as the interrupt is
* waited for here
*/
-static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t len)
+static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz,
+ size_t len)
{
struct priv_data *priv = dev_get_drvdata(&chip->dev);
struct device *dev = chip->dev.parent;
diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
index 1e5b107d1f3b..4734a69406ce 100644
--- a/drivers/char/tpm/tpm_ibmvtpm.c
+++ b/drivers/char/tpm/tpm_ibmvtpm.c
@@ -191,13 +191,15 @@ static int tpm_ibmvtpm_resume(struct device *dev)
* tpm_ibmvtpm_send() - Send a TPM command
* @chip: tpm chip struct
* @buf: buffer contains data to send
- * @count: size of buffer
+ * @bufsiz: size of the buffer
+ * @count: length of the command
*
* Return:
* 0 on success,
* -errno on error
*/
-static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
+static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz,
+ size_t count)
{
struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
bool retry = true;
@@ -450,6 +452,7 @@ static bool tpm_ibmvtpm_req_canceled(struct tpm_chip *chip, u8 status)
}
static const struct tpm_class_ops tpm_ibmvtpm = {
+ .flags = TPM_OPS_AUTO_STARTUP,
.recv = tpm_ibmvtpm_recv,
.send = tpm_ibmvtpm_send,
.cancel = tpm_ibmvtpm_cancel,
@@ -690,20 +693,6 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
if (!strcmp(id->compat, "IBM,vtpm20"))
chip->flags |= TPM_CHIP_FLAG_TPM2;
- rc = tpm_get_timeouts(chip);
- if (rc)
- goto init_irq_cleanup;
-
- if (chip->flags & TPM_CHIP_FLAG_TPM2) {
- rc = tpm2_get_cc_attrs_tbl(chip);
- if (rc)
- goto init_irq_cleanup;
-
- rc = tpm2_sessions_init(chip);
- if (rc)
- goto init_irq_cleanup;
- }
-
return tpm_chip_register(chip);
init_irq_cleanup:
do {
diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c
index 2d2ae37153ba..7638b65b851b 100644
--- a/drivers/char/tpm/tpm_infineon.c
+++ b/drivers/char/tpm/tpm_infineon.c
@@ -312,7 +312,8 @@ recv_begin:
return -EIO;
}
-static int tpm_inf_send(struct tpm_chip *chip, u8 * buf, size_t count)
+static int tpm_inf_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz,
+ size_t count)
{
int i;
int ret;
diff --git a/drivers/char/tpm/tpm_loongson.c b/drivers/char/tpm/tpm_loongson.c
new file mode 100644
index 000000000000..9e50250763d1
--- /dev/null
+++ b/drivers/char/tpm/tpm_loongson.c
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Loongson Technology Corporation Limited. */
+
+#include <linux/device.h>
+#include <linux/mfd/loongson-se.h>
+#include <linux/platform_device.h>
+#include <linux/wait.h>
+
+#include "tpm.h"
+
+struct tpm_loongson_cmd {
+ u32 cmd_id;
+ u32 data_off;
+ u32 data_len;
+ u32 pad[5];
+};
+
+static int tpm_loongson_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ struct loongson_se_engine *tpm_engine = dev_get_drvdata(&chip->dev);
+ struct tpm_loongson_cmd *cmd_ret = tpm_engine->command_ret;
+
+ if (cmd_ret->data_len > count)
+ return -EIO;
+
+ memcpy(buf, tpm_engine->data_buffer, cmd_ret->data_len);
+
+ return cmd_ret->data_len;
+}
+
+static int tpm_loongson_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz, size_t count)
+{
+ struct loongson_se_engine *tpm_engine = dev_get_drvdata(&chip->dev);
+ struct tpm_loongson_cmd *cmd = tpm_engine->command;
+
+ if (count > tpm_engine->buffer_size)
+ return -E2BIG;
+
+ cmd->data_len = count;
+ memcpy(tpm_engine->data_buffer, buf, count);
+
+ return loongson_se_send_engine_cmd(tpm_engine);
+}
+
+static const struct tpm_class_ops tpm_loongson_ops = {
+ .flags = TPM_OPS_AUTO_STARTUP,
+ .recv = tpm_loongson_recv,
+ .send = tpm_loongson_send,
+};
+
+static int tpm_loongson_probe(struct platform_device *pdev)
+{
+ struct loongson_se_engine *tpm_engine;
+ struct device *dev = &pdev->dev;
+ struct tpm_loongson_cmd *cmd;
+ struct tpm_chip *chip;
+
+ tpm_engine = loongson_se_init_engine(dev->parent, SE_ENGINE_TPM);
+ if (!tpm_engine)
+ return -ENODEV;
+ cmd = tpm_engine->command;
+ cmd->cmd_id = SE_CMD_TPM;
+ cmd->data_off = tpm_engine->buffer_off;
+
+ chip = tpmm_chip_alloc(dev, &tpm_loongson_ops);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ chip->flags = TPM_CHIP_FLAG_TPM2 | TPM_CHIP_FLAG_IRQ;
+ dev_set_drvdata(&chip->dev, tpm_engine);
+
+ return tpm_chip_register(chip);
+}
+
+static struct platform_driver tpm_loongson = {
+ .probe = tpm_loongson_probe,
+ .driver = {
+ .name = "tpm_loongson",
+ },
+};
+module_platform_driver(tpm_loongson);
+
+MODULE_ALIAS("platform:tpm_loongson");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Loongson TPM driver");
diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c
index 0f62bbc940da..879ac88f5783 100644
--- a/drivers/char/tpm/tpm_nsc.c
+++ b/drivers/char/tpm/tpm_nsc.c
@@ -178,7 +178,8 @@ static int tpm_nsc_recv(struct tpm_chip *chip, u8 * buf, size_t count)
return size;
}
-static int tpm_nsc_send(struct tpm_chip *chip, u8 * buf, size_t count)
+static int tpm_nsc_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz,
+ size_t count)
{
struct tpm_nsc_priv *priv = dev_get_drvdata(&chip->dev);
u8 data;
diff --git a/drivers/char/tpm/tpm_ppi.c b/drivers/char/tpm/tpm_ppi.c
index bc7b1b4501b3..c9793a3d986d 100644
--- a/drivers/char/tpm/tpm_ppi.c
+++ b/drivers/char/tpm/tpm_ppi.c
@@ -33,6 +33,20 @@ static const guid_t tpm_ppi_guid =
GUID_INIT(0x3DDDFAA6, 0x361B, 0x4EB4,
0xA4, 0x24, 0x8D, 0x10, 0x08, 0x9D, 0x16, 0x53);
+static const char * const tpm_ppi_info[] = {
+ "Not implemented",
+ "BIOS only",
+ "Blocked for OS by system firmware",
+ "User required",
+ "User not required",
+};
+
+/* A spinlock to protect access to the cache from concurrent reads */
+static DEFINE_MUTEX(tpm_ppi_lock);
+
+static u32 ppi_operations_cache[PPI_VS_REQ_END + 1];
+static bool ppi_cache_populated;
+
static bool tpm_ppi_req_has_parameter(u64 req)
{
return req == 23;
@@ -52,7 +66,7 @@ static ssize_t tpm_show_ppi_version(struct device *dev,
{
struct tpm_chip *chip = to_tpm_chip(dev);
- return scnprintf(buf, PAGE_SIZE, "%s\n", chip->ppi_version);
+ return sysfs_emit(buf, "%s\n", chip->ppi_version);
}
static ssize_t tpm_show_ppi_request(struct device *dev,
@@ -87,12 +101,10 @@ static ssize_t tpm_show_ppi_request(struct device *dev,
else {
req = obj->package.elements[1].integer.value;
if (tpm_ppi_req_has_parameter(req))
- size = scnprintf(buf, PAGE_SIZE,
- "%llu %llu\n", req,
- obj->package.elements[2].integer.value);
+ size = sysfs_emit(buf, "%llu %llu\n", req,
+ obj->package.elements[2].integer.value);
else
- size = scnprintf(buf, PAGE_SIZE,
- "%llu\n", req);
+ size = sysfs_emit(buf, "%llu\n", req);
}
} else if (obj->package.count == 2 &&
obj->package.elements[0].type == ACPI_TYPE_INTEGER &&
@@ -100,8 +112,8 @@ static ssize_t tpm_show_ppi_request(struct device *dev,
if (obj->package.elements[0].integer.value)
size = -EFAULT;
else
- size = scnprintf(buf, PAGE_SIZE, "%llu\n",
- obj->package.elements[1].integer.value);
+ size = sysfs_emit(buf, "%llu\n",
+ obj->package.elements[1].integer.value);
}
ACPI_FREE(obj);
@@ -211,10 +223,10 @@ static ssize_t tpm_show_ppi_transition_action(struct device *dev,
}
if (ret < ARRAY_SIZE(info) - 1)
- status = scnprintf(buf, PAGE_SIZE, "%d: %s\n", ret, info[ret]);
+ status = sysfs_emit(buf, "%d: %s\n", ret, info[ret]);
else
- status = scnprintf(buf, PAGE_SIZE, "%d: %s\n", ret,
- info[ARRAY_SIZE(info)-1]);
+ status = sysfs_emit(buf, "%d: %s\n", ret,
+ info[ARRAY_SIZE(info) - 1]);
return status;
}
@@ -255,23 +267,23 @@ static ssize_t tpm_show_ppi_response(struct device *dev,
res = ret_obj[2].integer.value;
if (req) {
if (res == 0)
- status = scnprintf(buf, PAGE_SIZE, "%llu %s\n", req,
- "0: Success");
+ status = sysfs_emit(buf, "%llu %s\n", req,
+ "0: Success");
else if (res == 0xFFFFFFF0)
- status = scnprintf(buf, PAGE_SIZE, "%llu %s\n", req,
- "0xFFFFFFF0: User Abort");
+ status = sysfs_emit(buf, "%llu %s\n", req,
+ "0xFFFFFFF0: User Abort");
else if (res == 0xFFFFFFF1)
- status = scnprintf(buf, PAGE_SIZE, "%llu %s\n", req,
- "0xFFFFFFF1: BIOS Failure");
+ status = sysfs_emit(buf, "%llu %s\n", req,
+ "0xFFFFFFF1: BIOS Failure");
else if (res >= 1 && res <= 0x00000FFF)
- status = scnprintf(buf, PAGE_SIZE, "%llu %llu: %s\n",
- req, res, "Corresponding TPM error");
+ status = sysfs_emit(buf, "%llu %llu: %s\n",
+ req, res, "Corresponding TPM error");
else
- status = scnprintf(buf, PAGE_SIZE, "%llu %llu: %s\n",
- req, res, "Error");
+ status = sysfs_emit(buf, "%llu %llu: %s\n",
+ req, res, "Error");
} else {
- status = scnprintf(buf, PAGE_SIZE, "%llu: %s\n",
- req, "No Recent Request");
+ status = sysfs_emit(buf, "%llu: %s\n",
+ req, "No Recent Request");
}
cleanup:
@@ -279,46 +291,33 @@ cleanup:
return status;
}
-static ssize_t show_ppi_operations(acpi_handle dev_handle, char *buf, u32 start,
- u32 end)
+static ssize_t cache_ppi_operations(acpi_handle dev_handle, char *buf)
{
int i;
u32 ret;
- char *str = buf;
+ int len = 0;
union acpi_object *obj, tmp;
union acpi_object argv = ACPI_INIT_DSM_ARGV4(1, &tmp);
- static char *info[] = {
- "Not implemented",
- "BIOS only",
- "Blocked for OS by BIOS",
- "User required",
- "User not required",
- };
-
if (!acpi_check_dsm(dev_handle, &tpm_ppi_guid, TPM_PPI_REVISION_ID_1,
1 << TPM_PPI_FN_GETOPR))
return -EPERM;
tmp.integer.type = ACPI_TYPE_INTEGER;
- for (i = start; i <= end; i++) {
+ for (i = 0; i <= PPI_VS_REQ_END; i++) {
tmp.integer.value = i;
obj = tpm_eval_dsm(dev_handle, TPM_PPI_FN_GETOPR,
ACPI_TYPE_INTEGER, &argv,
TPM_PPI_REVISION_ID_1);
- if (!obj) {
+ if (!obj)
return -ENOMEM;
- } else {
- ret = obj->integer.value;
- ACPI_FREE(obj);
- }
- if (ret > 0 && ret < ARRAY_SIZE(info))
- str += scnprintf(str, PAGE_SIZE, "%d %d: %s\n",
- i, ret, info[ret]);
+ ret = obj->integer.value;
+ ppi_operations_cache[i] = ret;
+ ACPI_FREE(obj);
}
- return str - buf;
+ return len;
}
static ssize_t tpm_show_ppi_tcg_operations(struct device *dev,
@@ -326,9 +325,30 @@ static ssize_t tpm_show_ppi_tcg_operations(struct device *dev,
char *buf)
{
struct tpm_chip *chip = to_tpm_chip(dev);
+ ssize_t len = 0;
+ u32 ret;
+ int i;
+
+ mutex_lock(&tpm_ppi_lock);
+ if (!ppi_cache_populated) {
+ len = cache_ppi_operations(chip->acpi_dev_handle, buf);
+ if (len < 0) {
+ mutex_unlock(&tpm_ppi_lock);
+ return len;
+ }
+
+ ppi_cache_populated = true;
+ }
- return show_ppi_operations(chip->acpi_dev_handle, buf, 0,
- PPI_TPM_REQ_MAX);
+ for (i = 0; i <= PPI_TPM_REQ_MAX; i++) {
+ ret = ppi_operations_cache[i];
+ if (ret >= 0 && ret < ARRAY_SIZE(tpm_ppi_info))
+ len += sysfs_emit_at(buf, len, "%d %d: %s\n",
+ i, ret, tpm_ppi_info[ret]);
+ }
+ mutex_unlock(&tpm_ppi_lock);
+
+ return len;
}
static ssize_t tpm_show_ppi_vs_operations(struct device *dev,
@@ -336,9 +356,30 @@ static ssize_t tpm_show_ppi_vs_operations(struct device *dev,
char *buf)
{
struct tpm_chip *chip = to_tpm_chip(dev);
+ ssize_t len = 0;
+ u32 ret;
+ int i;
+
+ mutex_lock(&tpm_ppi_lock);
+ if (!ppi_cache_populated) {
+ len = cache_ppi_operations(chip->acpi_dev_handle, buf);
+ if (len < 0) {
+ mutex_unlock(&tpm_ppi_lock);
+ return len;
+ }
+
+ ppi_cache_populated = true;
+ }
+
+ for (i = PPI_VS_REQ_START; i <= PPI_VS_REQ_END; i++) {
+ ret = ppi_operations_cache[i];
+ if (ret >= 0 && ret < ARRAY_SIZE(tpm_ppi_info))
+ len += sysfs_emit_at(buf, len, "%d %d: %s\n",
+ i, ret, tpm_ppi_info[ret]);
+ }
+ mutex_unlock(&tpm_ppi_lock);
- return show_ppi_operations(chip->acpi_dev_handle, buf, PPI_VS_REQ_START,
- PPI_VS_REQ_END);
+ return len;
}
static DEVICE_ATTR(version, S_IRUGO, tpm_show_ppi_version, NULL);
diff --git a/drivers/char/tpm/tpm_svsm.c b/drivers/char/tpm/tpm_svsm.c
new file mode 100644
index 000000000000..f5ba0f64850b
--- /dev/null
+++ b/drivers/char/tpm/tpm_svsm.c
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved.
+ *
+ * Driver for the vTPM defined by the AMD SVSM spec [1].
+ *
+ * The specification defines a protocol that a SEV-SNP guest OS can use to
+ * discover and talk to a vTPM emulated by the Secure VM Service Module (SVSM)
+ * in the guest context, but at a more privileged level (usually VMPL0).
+ *
+ * [1] "Secure VM Service Module for SEV-SNP Guests"
+ * Publication # 58019 Revision: 1.00
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/tpm_svsm.h>
+
+#include <asm/sev.h>
+
+#include "tpm.h"
+
+struct tpm_svsm_priv {
+ void *buffer;
+};
+
+static int tpm_svsm_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz,
+ size_t cmd_len)
+{
+ struct tpm_svsm_priv *priv = dev_get_drvdata(&chip->dev);
+ int ret;
+
+ ret = svsm_vtpm_cmd_request_fill(priv->buffer, 0, buf, cmd_len);
+ if (ret)
+ return ret;
+
+ /*
+ * The SVSM call uses the same buffer for the command and for the
+ * response, so after this call, the buffer will contain the response.
+ *
+ * Note: we have to use an internal buffer because the device in SVSM
+ * expects the svsm_vtpm header + data to be physically contiguous.
+ */
+ ret = snp_svsm_vtpm_send_command(priv->buffer);
+ if (ret)
+ return ret;
+
+ return svsm_vtpm_cmd_response_parse(priv->buffer, buf, bufsiz);
+}
+
+static struct tpm_class_ops tpm_chip_ops = {
+ .flags = TPM_OPS_AUTO_STARTUP,
+ .send = tpm_svsm_send,
+};
+
+static int __init tpm_svsm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct tpm_svsm_priv *priv;
+ struct tpm_chip *chip;
+ int err;
+
+ priv = devm_kmalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /*
+ * The maximum buffer supported is one page (see SVSM_VTPM_MAX_BUFFER
+ * in tpm_svsm.h).
+ */
+ priv->buffer = (void *)devm_get_free_pages(dev, GFP_KERNEL, 0);
+ if (!priv->buffer)
+ return -ENOMEM;
+
+ chip = tpmm_chip_alloc(dev, &tpm_chip_ops);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+
+ dev_set_drvdata(&chip->dev, priv);
+
+ chip->flags |= TPM_CHIP_FLAG_SYNC;
+ err = tpm2_probe(chip);
+ if (err)
+ return err;
+
+ err = tpm_chip_register(chip);
+ if (err)
+ return err;
+
+ dev_info(dev, "SNP SVSM vTPM %s device\n",
+ (chip->flags & TPM_CHIP_FLAG_TPM2) ? "2.0" : "1.2");
+
+ return 0;
+}
+
+static void __exit tpm_svsm_remove(struct platform_device *pdev)
+{
+ struct tpm_chip *chip = platform_get_drvdata(pdev);
+
+ tpm_chip_unregister(chip);
+}
+
+/*
+ * tpm_svsm_remove() lives in .exit.text. For drivers registered via
+ * module_platform_driver_probe() this is ok because they cannot get unbound
+ * at runtime. So mark the driver struct with __refdata to prevent modpost
+ * triggering a section mismatch warning.
+ */
+static struct platform_driver tpm_svsm_driver __refdata = {
+ .remove = __exit_p(tpm_svsm_remove),
+ .driver = {
+ .name = "tpm-svsm",
+ },
+};
+
+module_platform_driver_probe(tpm_svsm_driver, tpm_svsm_probe);
+
+MODULE_DESCRIPTION("SNP SVSM vTPM Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:tpm-svsm");
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index 2f7326d297ad..9aa230a63616 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -356,7 +356,7 @@ MODULE_DEVICE_TABLE(of, tis_of_platform_match);
static struct platform_driver tis_drv = {
.probe = tpm_tis_plat_probe,
- .remove_new = tpm_tis_plat_remove,
+ .remove = tpm_tis_plat_remove,
.driver = {
.name = "tpm_tis",
.pm = &tpm_tis_pm,
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index fdef214b9f6b..8954a8660ffc 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -114,11 +114,10 @@ again:
return 0;
/* process status changes without irq support */
do {
+ usleep_range(priv->timeout_min, priv->timeout_max);
status = chip->ops->status(chip);
if ((status & mask) == mask)
return 0;
- usleep_range(priv->timeout_min,
- priv->timeout_max);
} while (time_before(jiffies, stop));
return -ETIME;
}
@@ -464,7 +463,10 @@ static int tpm_tis_send_data(struct tpm_chip *chip, const u8 *buf, size_t len)
if (wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c,
&priv->int_queue, false) < 0) {
- rc = -ETIME;
+ if (test_bit(TPM_TIS_STATUS_VALID_RETRY, &priv->flags))
+ rc = -EAGAIN;
+ else
+ rc = -ETIME;
goto out_err;
}
status = tpm_tis_status(chip);
@@ -481,7 +483,10 @@ static int tpm_tis_send_data(struct tpm_chip *chip, const u8 *buf, size_t len)
if (wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c,
&priv->int_queue, false) < 0) {
- rc = -ETIME;
+ if (test_bit(TPM_TIS_STATUS_VALID_RETRY, &priv->flags))
+ rc = -EAGAIN;
+ else
+ rc = -ETIME;
goto out_err;
}
status = tpm_tis_status(chip);
@@ -546,9 +551,11 @@ static int tpm_tis_send_main(struct tpm_chip *chip, const u8 *buf, size_t len)
if (rc >= 0)
/* Data transfer done successfully */
break;
- else if (rc != -EIO)
+ else if (rc != -EAGAIN && rc != -EIO)
/* Data transfer failed, not recoverable */
return rc;
+
+ usleep_range(priv->timeout_min, priv->timeout_max);
}
/* go and do it */
@@ -573,7 +580,8 @@ out_err:
return rc;
}
-static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
+static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz,
+ size_t len)
{
int rc, irq;
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
@@ -970,8 +978,8 @@ restore_irqs:
* will call disable_irq which undoes all of the above.
*/
if (!(chip->flags & TPM_CHIP_FLAG_IRQ)) {
- tpm_tis_write8(priv, original_int_vec,
- TPM_INT_VECTOR(priv->locality));
+ tpm_tis_write8(priv, TPM_INT_VECTOR(priv->locality),
+ original_int_vec);
rc = -1;
}
@@ -1144,6 +1152,9 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
priv->timeout_max = TIS_TIMEOUT_MAX_ATML;
}
+ if (priv->manufacturer_id == TPM_VID_IFX)
+ set_bit(TPM_TIS_STATUS_VALID_RETRY, &priv->flags);
+
if (is_bsw()) {
priv->ilb_base_addr = ioremap(INTEL_LEGACY_BLK_BASE_ADDR,
ILB_REMAP_SIZE);
diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h
index 690ad8e9b731..6c3aa480396b 100644
--- a/drivers/char/tpm/tpm_tis_core.h
+++ b/drivers/char/tpm/tpm_tis_core.h
@@ -54,7 +54,7 @@ enum tis_int_flags {
enum tis_defaults {
TIS_MEM_LEN = 0x5000,
TIS_SHORT_TIMEOUT = 750, /* ms */
- TIS_LONG_TIMEOUT = 2000, /* 2 sec */
+ TIS_LONG_TIMEOUT = 4000, /* 4 secs */
TIS_TIMEOUT_MIN_ATML = 14700, /* usecs */
TIS_TIMEOUT_MAX_ATML = 15000, /* usecs */
};
@@ -89,6 +89,7 @@ enum tpm_tis_flags {
TPM_TIS_INVALID_STATUS = 1,
TPM_TIS_DEFAULT_CANCELLATION = 2,
TPM_TIS_IRQ_TESTED = 3,
+ TPM_TIS_STATUS_VALID_RETRY = 4,
};
struct tpm_tis_data {
diff --git a/drivers/char/tpm/tpm_tis_i2c_cr50.c b/drivers/char/tpm/tpm_tis_i2c_cr50.c
index adf22992138e..fc6891a0b693 100644
--- a/drivers/char/tpm/tpm_tis_i2c_cr50.c
+++ b/drivers/char/tpm/tpm_tis_i2c_cr50.c
@@ -17,6 +17,7 @@
*/
#include <linux/acpi.h>
+#include <linux/bug.h>
#include <linux/completion.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
@@ -30,11 +31,13 @@
#define TPM_CR50_MAX_BUFSIZE 64
#define TPM_CR50_TIMEOUT_SHORT_MS 2 /* Short timeout during transactions */
#define TPM_CR50_TIMEOUT_NOIRQ_MS 20 /* Timeout for TPM ready without IRQ */
-#define TPM_CR50_I2C_DID_VID 0x00281ae0L /* Device and vendor ID reg value */
-#define TPM_TI50_I2C_DID_VID 0x504a6666L /* Device and vendor ID reg value */
+#define TPM_CR50_I2C_DID_VID 0x00281ae0L /* Device and vendor ID for Cr50 H1 */
+#define TPM_TI50_DT_I2C_DID_VID 0x504a6666L /* Device and vendor ID for Ti50 DT */
+#define TPM_TI50_OT_I2C_DID_VID 0x50666666L /* Device and vendor ID for TI50 OT */
#define TPM_CR50_I2C_MAX_RETRIES 3 /* Max retries due to I2C errors */
#define TPM_CR50_I2C_RETRY_DELAY_LO 55 /* Min usecs between retries on I2C */
#define TPM_CR50_I2C_RETRY_DELAY_HI 65 /* Max usecs between retries on I2C */
+#define TPM_CR50_I2C_DEFAULT_LOC 0
#define TPM_I2C_ACCESS(l) (0x0000 | ((l) << 4))
#define TPM_I2C_STS(l) (0x0001 | ((l) << 4))
@@ -199,8 +202,6 @@ static int tpm_cr50_i2c_read(struct tpm_chip *chip, u8 addr, u8 *buffer, size_t
};
int rc;
- i2c_lock_bus(client->adapter, I2C_LOCK_SEGMENT);
-
/* Prepare for completion interrupt */
tpm_cr50_i2c_enable_tpm_irq(chip);
@@ -219,7 +220,6 @@ static int tpm_cr50_i2c_read(struct tpm_chip *chip, u8 addr, u8 *buffer, size_t
out:
tpm_cr50_i2c_disable_tpm_irq(chip);
- i2c_unlock_bus(client->adapter, I2C_LOCK_SEGMENT);
if (rc < 0)
return rc;
@@ -261,8 +261,6 @@ static int tpm_cr50_i2c_write(struct tpm_chip *chip, u8 addr, u8 *buffer,
priv->buf[0] = addr;
memcpy(priv->buf + 1, buffer, len);
- i2c_lock_bus(client->adapter, I2C_LOCK_SEGMENT);
-
/* Prepare for completion interrupt */
tpm_cr50_i2c_enable_tpm_irq(chip);
@@ -276,7 +274,6 @@ static int tpm_cr50_i2c_write(struct tpm_chip *chip, u8 addr, u8 *buffer,
out:
tpm_cr50_i2c_disable_tpm_irq(chip);
- i2c_unlock_bus(client->adapter, I2C_LOCK_SEGMENT);
if (rc < 0)
return rc;
@@ -285,25 +282,26 @@ out:
}
/**
- * tpm_cr50_check_locality() - Verify TPM locality 0 is active.
+ * tpm_cr50_check_locality() - Verify if required TPM locality is active.
* @chip: A TPM chip.
+ * @loc: Locality to be verified
*
* Return:
- * - 0: Success.
+ * - loc: Success.
* - -errno: A POSIX error code.
*/
-static int tpm_cr50_check_locality(struct tpm_chip *chip)
+static int tpm_cr50_check_locality(struct tpm_chip *chip, int loc)
{
u8 mask = TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY;
u8 buf;
int rc;
- rc = tpm_cr50_i2c_read(chip, TPM_I2C_ACCESS(0), &buf, sizeof(buf));
+ rc = tpm_cr50_i2c_read(chip, TPM_I2C_ACCESS(loc), &buf, sizeof(buf));
if (rc < 0)
return rc;
if ((buf & mask) == mask)
- return 0;
+ return loc;
return -EIO;
}
@@ -311,53 +309,72 @@ static int tpm_cr50_check_locality(struct tpm_chip *chip)
/**
* tpm_cr50_release_locality() - Release TPM locality.
* @chip: A TPM chip.
- * @force: Flag to force release if set.
+ * @loc: Locality to be released
+ *
+ * Return:
+ * - 0: Success.
+ * - -errno: A POSIX error code.
*/
-static void tpm_cr50_release_locality(struct tpm_chip *chip, bool force)
+static int tpm_cr50_release_locality(struct tpm_chip *chip, int loc)
{
+ struct i2c_client *client = to_i2c_client(chip->dev.parent);
u8 mask = TPM_ACCESS_VALID | TPM_ACCESS_REQUEST_PENDING;
- u8 addr = TPM_I2C_ACCESS(0);
+ u8 addr = TPM_I2C_ACCESS(loc);
u8 buf;
+ int rc;
- if (tpm_cr50_i2c_read(chip, addr, &buf, sizeof(buf)) < 0)
- return;
+ rc = tpm_cr50_i2c_read(chip, addr, &buf, sizeof(buf));
+ if (rc < 0)
+ goto unlock_out;
- if (force || (buf & mask) == mask) {
+ if ((buf & mask) == mask) {
buf = TPM_ACCESS_ACTIVE_LOCALITY;
- tpm_cr50_i2c_write(chip, addr, &buf, sizeof(buf));
+ rc = tpm_cr50_i2c_write(chip, addr, &buf, sizeof(buf));
}
+
+unlock_out:
+ i2c_unlock_bus(client->adapter, I2C_LOCK_SEGMENT);
+ return rc;
}
/**
- * tpm_cr50_request_locality() - Request TPM locality 0.
+ * tpm_cr50_request_locality() - Request TPM locality.
* @chip: A TPM chip.
+ * @loc: Locality to be requested.
*
* Return:
- * - 0: Success.
+ * - loc: Success.
* - -errno: A POSIX error code.
*/
-static int tpm_cr50_request_locality(struct tpm_chip *chip)
+static int tpm_cr50_request_locality(struct tpm_chip *chip, int loc)
{
+ struct i2c_client *client = to_i2c_client(chip->dev.parent);
u8 buf = TPM_ACCESS_REQUEST_USE;
unsigned long stop;
int rc;
- if (!tpm_cr50_check_locality(chip))
- return 0;
+ i2c_lock_bus(client->adapter, I2C_LOCK_SEGMENT);
- rc = tpm_cr50_i2c_write(chip, TPM_I2C_ACCESS(0), &buf, sizeof(buf));
+ if (tpm_cr50_check_locality(chip, loc) == loc)
+ return loc;
+
+ rc = tpm_cr50_i2c_write(chip, TPM_I2C_ACCESS(loc), &buf, sizeof(buf));
if (rc < 0)
- return rc;
+ goto unlock_out;
stop = jiffies + chip->timeout_a;
do {
- if (!tpm_cr50_check_locality(chip))
- return 0;
+ if (tpm_cr50_check_locality(chip, loc) == loc)
+ return loc;
msleep(TPM_CR50_TIMEOUT_SHORT_MS);
} while (time_before(jiffies, stop));
- return -ETIMEDOUT;
+ rc = -ETIMEDOUT;
+
+unlock_out:
+ i2c_unlock_bus(client->adapter, I2C_LOCK_SEGMENT);
+ return rc;
}
/**
@@ -373,7 +390,7 @@ static u8 tpm_cr50_i2c_tis_status(struct tpm_chip *chip)
{
u8 buf[4];
- if (tpm_cr50_i2c_read(chip, TPM_I2C_STS(0), buf, sizeof(buf)) < 0)
+ if (tpm_cr50_i2c_read(chip, TPM_I2C_STS(chip->locality), buf, sizeof(buf)) < 0)
return 0;
return buf[0];
@@ -389,7 +406,7 @@ static void tpm_cr50_i2c_tis_set_ready(struct tpm_chip *chip)
{
u8 buf[4] = { TPM_STS_COMMAND_READY };
- tpm_cr50_i2c_write(chip, TPM_I2C_STS(0), buf, sizeof(buf));
+ tpm_cr50_i2c_write(chip, TPM_I2C_STS(chip->locality), buf, sizeof(buf));
msleep(TPM_CR50_TIMEOUT_SHORT_MS);
}
@@ -419,7 +436,7 @@ static int tpm_cr50_i2c_get_burst_and_status(struct tpm_chip *chip, u8 mask,
stop = jiffies + chip->timeout_b;
do {
- if (tpm_cr50_i2c_read(chip, TPM_I2C_STS(0), buf, sizeof(buf)) < 0) {
+ if (tpm_cr50_i2c_read(chip, TPM_I2C_STS(chip->locality), buf, sizeof(buf)) < 0) {
msleep(TPM_CR50_TIMEOUT_SHORT_MS);
continue;
}
@@ -453,7 +470,7 @@ static int tpm_cr50_i2c_tis_recv(struct tpm_chip *chip, u8 *buf, size_t buf_len)
u8 mask = TPM_STS_VALID | TPM_STS_DATA_AVAIL;
size_t burstcnt, cur, len, expected;
- u8 addr = TPM_I2C_DATA_FIFO(0);
+ u8 addr = TPM_I2C_DATA_FIFO(chip->locality);
u32 status;
int rc;
@@ -515,7 +532,6 @@ static int tpm_cr50_i2c_tis_recv(struct tpm_chip *chip, u8 *buf, size_t buf_len)
goto out_err;
}
- tpm_cr50_release_locality(chip, false);
return cur;
out_err:
@@ -523,7 +539,6 @@ out_err:
if (tpm_cr50_i2c_tis_status(chip) & TPM_STS_COMMAND_READY)
tpm_cr50_i2c_tis_set_ready(chip);
- tpm_cr50_release_locality(chip, false);
return rc;
}
@@ -531,13 +546,15 @@ out_err:
* tpm_cr50_i2c_tis_send() - TPM transmission callback.
* @chip: A TPM chip.
* @buf: Buffer to send.
- * @len: Buffer length.
+ * @bufsiz: Buffer size.
+ * @len: Command length.
*
* Return:
* - 0: Success.
* - -errno: A POSIX error code.
*/
-static int tpm_cr50_i2c_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
+static int tpm_cr50_i2c_tis_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz,
+ size_t len)
{
size_t burstcnt, limit, sent = 0;
u8 tpm_go[4] = { TPM_STS_GO };
@@ -545,10 +562,6 @@ static int tpm_cr50_i2c_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
u32 status;
int rc;
- rc = tpm_cr50_request_locality(chip);
- if (rc < 0)
- return rc;
-
/* Wait until TPM is ready for a command */
stop = jiffies + chip->timeout_b;
while (!(tpm_cr50_i2c_tis_status(chip) & TPM_STS_COMMAND_READY)) {
@@ -577,7 +590,8 @@ static int tpm_cr50_i2c_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
* that is inserted by tpm_cr50_i2c_write()
*/
limit = min_t(size_t, burstcnt - 1, len);
- rc = tpm_cr50_i2c_write(chip, TPM_I2C_DATA_FIFO(0), &buf[sent], limit);
+ rc = tpm_cr50_i2c_write(chip, TPM_I2C_DATA_FIFO(chip->locality),
+ &buf[sent], limit);
if (rc < 0) {
dev_err(&chip->dev, "Write failed\n");
goto out_err;
@@ -598,7 +612,7 @@ static int tpm_cr50_i2c_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
}
/* Start the TPM command */
- rc = tpm_cr50_i2c_write(chip, TPM_I2C_STS(0), tpm_go,
+ rc = tpm_cr50_i2c_write(chip, TPM_I2C_STS(chip->locality), tpm_go,
sizeof(tpm_go));
if (rc < 0) {
dev_err(&chip->dev, "Start command failed\n");
@@ -611,7 +625,6 @@ out_err:
if (tpm_cr50_i2c_tis_status(chip) & TPM_STS_COMMAND_READY)
tpm_cr50_i2c_tis_set_ready(chip);
- tpm_cr50_release_locality(chip, false);
return rc;
}
@@ -650,6 +663,8 @@ static const struct tpm_class_ops cr50_i2c = {
.req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
.req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
.req_canceled = &tpm_cr50_i2c_req_canceled,
+ .request_locality = &tpm_cr50_request_locality,
+ .relinquish_locality = &tpm_cr50_release_locality,
};
#ifdef CONFIG_ACPI
@@ -669,6 +684,27 @@ MODULE_DEVICE_TABLE(of, of_cr50_i2c_match);
#endif
/**
+ * tpm_cr50_vid_to_name() - Maps VID to name.
+ * @vendor: Vendor identifier to map to name
+ *
+ * Return:
+ * A valid string for the vendor or empty string
+ */
+static const char *tpm_cr50_vid_to_name(u32 vendor)
+{
+ switch (vendor) {
+ case TPM_CR50_I2C_DID_VID:
+ return "cr50";
+ case TPM_TI50_DT_I2C_DID_VID:
+ return "ti50 DT";
+ case TPM_TI50_OT_I2C_DID_VID:
+ return "ti50 OT";
+ default:
+ return "unknown";
+ }
+}
+
+/**
* tpm_cr50_i2c_probe() - Driver probe function.
* @client: I2C client information.
*
@@ -684,6 +720,7 @@ static int tpm_cr50_i2c_probe(struct i2c_client *client)
u32 vendor;
u8 buf[4];
int rc;
+ int loc;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
return -ENODEV;
@@ -726,29 +763,37 @@ static int tpm_cr50_i2c_probe(struct i2c_client *client)
TPM_CR50_TIMEOUT_NOIRQ_MS);
}
- rc = tpm_cr50_request_locality(chip);
- if (rc < 0) {
+ loc = tpm_cr50_request_locality(chip, TPM_CR50_I2C_DEFAULT_LOC);
+ if (loc < 0) {
dev_err(dev, "Could not request locality\n");
- return rc;
+ return loc;
}
/* Read four bytes from DID_VID register */
- rc = tpm_cr50_i2c_read(chip, TPM_I2C_DID_VID(0), buf, sizeof(buf));
+ rc = tpm_cr50_i2c_read(chip, TPM_I2C_DID_VID(loc), buf, sizeof(buf));
if (rc < 0) {
dev_err(dev, "Could not read vendor id\n");
- tpm_cr50_release_locality(chip, true);
+ if (tpm_cr50_release_locality(chip, loc))
+ dev_err(dev, "Could not release locality\n");
+ return rc;
+ }
+
+ rc = tpm_cr50_release_locality(chip, loc);
+ if (rc) {
+ dev_err(dev, "Could not release locality\n");
return rc;
}
vendor = le32_to_cpup((__le32 *)buf);
- if (vendor != TPM_CR50_I2C_DID_VID && vendor != TPM_TI50_I2C_DID_VID) {
+ if (vendor != TPM_CR50_I2C_DID_VID &&
+ vendor != TPM_TI50_DT_I2C_DID_VID &&
+ vendor != TPM_TI50_OT_I2C_DID_VID) {
dev_err(dev, "Vendor ID did not match! ID was %08x\n", vendor);
- tpm_cr50_release_locality(chip, true);
return -ENODEV;
}
dev_info(dev, "%s TPM 2.0 (i2c 0x%02x irq %d id 0x%x)\n",
- vendor == TPM_TI50_I2C_DID_VID ? "ti50" : "cr50",
+ tpm_cr50_vid_to_name(vendor),
client->addr, client->irq, vendor >> 16);
return tpm_chip_register(chip);
}
@@ -772,7 +817,6 @@ static void tpm_cr50_i2c_remove(struct i2c_client *client)
}
tpm_chip_unregister(chip);
- tpm_cr50_release_locality(chip, true);
}
static SIMPLE_DEV_PM_OPS(cr50_i2c_pm, tpm_pm_suspend, tpm_pm_resume);
diff --git a/drivers/char/tpm/tpm_tis_synquacer.c b/drivers/char/tpm/tpm_tis_synquacer.c
index 0621ebec530b..4927714d277a 100644
--- a/drivers/char/tpm/tpm_tis_synquacer.c
+++ b/drivers/char/tpm/tpm_tis_synquacer.c
@@ -152,7 +152,7 @@ MODULE_DEVICE_TABLE(acpi, tpm_synquacer_acpi_tbl);
static struct platform_driver tis_synquacer_drv = {
.probe = tpm_tis_synquacer_probe,
- .remove_new = tpm_tis_synquacer_remove,
+ .remove = tpm_tis_synquacer_remove,
.driver = {
.name = "tpm_tis_synquacer",
.pm = &tpm_tis_synquacer_pm,
diff --git a/drivers/char/tpm/tpm_vtpm_proxy.c b/drivers/char/tpm/tpm_vtpm_proxy.c
index 8fe4a01eea12..0818bb517805 100644
--- a/drivers/char/tpm/tpm_vtpm_proxy.c
+++ b/drivers/char/tpm/tpm_vtpm_proxy.c
@@ -321,12 +321,14 @@ static int vtpm_proxy_is_driver_command(struct tpm_chip *chip,
*
* @chip: tpm chip to use
* @buf: send buffer
+ * @bufsiz: size of the buffer
* @count: bytes to send
*
* Return:
* 0 in case of success, negative error value otherwise.
*/
-static int vtpm_proxy_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t count)
+static int vtpm_proxy_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz,
+ size_t count)
{
struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev);
diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c
index 80cca3b83b22..556bf2256716 100644
--- a/drivers/char/tpm/xen-tpmfront.c
+++ b/drivers/char/tpm/xen-tpmfront.c
@@ -131,7 +131,8 @@ static size_t shr_data_offset(struct vtpm_shared_page *shr)
return struct_size(shr, extra_pages, shr->nr_extra_pages);
}
-static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
+static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz,
+ size_t count)
{
struct tpm_private *priv = dev_get_drvdata(&chip->dev);
struct vtpm_shared_page *shr = priv->shr;
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index c62b208b42f1..088182e54deb 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -26,6 +26,7 @@
#include <linux/workqueue.h>
#include <linux/module.h>
#include <linux/dma-mapping.h>
+#include <linux/string_choices.h>
#include "../tty/hvc/hvc_console.h"
#define is_rproc_enabled IS_ENABLED(CONFIG_REMOTEPROC)
@@ -883,9 +884,9 @@ static int pipe_to_sg(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
if (len + offset > PAGE_SIZE)
len = PAGE_SIZE - offset;
- src = kmap_atomic(buf->page);
+ src = kmap_local_page(buf->page);
memcpy(page_address(page) + offset, src + buf->offset, len);
- kunmap_atomic(src);
+ kunmap_local(src);
sg_set_page(&(sgl->sg[sgl->n]), page, len, offset);
}
@@ -923,14 +924,14 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe,
pipe_lock(pipe);
ret = 0;
- if (pipe_empty(pipe->head, pipe->tail))
+ if (pipe_is_empty(pipe))
goto error_out;
ret = wait_port_writable(port, filp->f_flags & O_NONBLOCK);
if (ret < 0)
goto error_out;
- occupancy = pipe_occupancy(pipe->head, pipe->tail);
+ occupancy = pipe_buf_usage(pipe);
buf = alloc_buf(port->portdev->vdev, 0, occupancy);
if (!buf) {
@@ -1269,8 +1270,7 @@ static int port_debugfs_show(struct seq_file *s, void *data)
seq_printf(s, "bytes_sent: %lu\n", port->stats.bytes_sent);
seq_printf(s, "bytes_received: %lu\n", port->stats.bytes_received);
seq_printf(s, "bytes_discarded: %lu\n", port->stats.bytes_discarded);
- seq_printf(s, "is_console: %s\n",
- is_console_port(port) ? "yes" : "no");
+ seq_printf(s, "is_console: %s\n", str_yes_no(is_console_port(port)));
seq_printf(s, "console_vtermno: %u\n", port->cons.vtermno);
return 0;
@@ -1321,7 +1321,6 @@ static void send_sigio_to_port(struct port *port)
static int add_port(struct ports_device *portdev, u32 id)
{
- char debugfs_name[16];
struct port *port;
dev_t devt;
int err;
@@ -1424,9 +1423,7 @@ static int add_port(struct ports_device *portdev, u32 id)
* Finally, create the debugfs file that we can use to
* inspect a port's state at any time
*/
- snprintf(debugfs_name, sizeof(debugfs_name), "vport%up%u",
- port->portdev->vdev->index, id);
- port->debugfs_file = debugfs_create_file(debugfs_name, 0444,
+ port->debugfs_file = debugfs_create_file(dev_name(port->dev), 0444,
pdrvdata.debugfs_dir,
port, &port_debugfs_fops);
return 0;
@@ -1579,8 +1576,8 @@ static void handle_control_message(struct virtio_device *vdev,
break;
case VIRTIO_CONSOLE_RESIZE: {
struct {
- __u16 rows;
- __u16 cols;
+ __virtio16 cols;
+ __virtio16 rows;
} size;
if (!is_console_port(port))
@@ -1588,7 +1585,8 @@ static void handle_control_message(struct virtio_device *vdev,
memcpy(&size, buf->buf + buf->offset + sizeof(*cpkt),
sizeof(size));
- set_console_size(port, size.rows, size.cols);
+ set_console_size(port, virtio16_to_cpu(vdev, size.rows),
+ virtio16_to_cpu(vdev, size.cols));
port->cons.hvc->irq_requested = 1;
resize_console(port);
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
index 4f6c3cb8aa41..34a345dc5e72 100644
--- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
@@ -738,7 +738,7 @@ MODULE_DEVICE_TABLE(of, hwicap_of_match);
static struct platform_driver hwicap_platform_driver = {
.probe = hwicap_drv_probe,
- .remove_new = hwicap_drv_remove,
+ .remove = hwicap_drv_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = hwicap_of_match,
diff --git a/drivers/char/xillybus/xillybus_core.c b/drivers/char/xillybus/xillybus_core.c
index 11b7c4749274..efb1ae834265 100644
--- a/drivers/char/xillybus/xillybus_core.c
+++ b/drivers/char/xillybus/xillybus_core.c
@@ -1184,8 +1184,7 @@ static int xillybus_flush(struct file *filp, fl_owner_t id)
static void xillybus_autoflush(struct work_struct *work)
{
- struct delayed_work *workitem = container_of(
- work, struct delayed_work, work);
+ struct delayed_work *workitem = to_delayed_work(work);
struct xilly_channel *channel = container_of(
workitem, struct xilly_channel, rd_workitem);
int rc;
diff --git a/drivers/char/xillybus/xillybus_of.c b/drivers/char/xillybus/xillybus_of.c
index 8802e2a6fd20..1a1e64133315 100644
--- a/drivers/char/xillybus/xillybus_of.c
+++ b/drivers/char/xillybus/xillybus_of.c
@@ -74,7 +74,7 @@ static void xilly_drv_remove(struct platform_device *op)
static struct platform_driver xillybus_platform_driver = {
.probe = xilly_drv_probe,
- .remove_new = xilly_drv_remove,
+ .remove = xilly_drv_remove,
.driver = {
.name = xillyname,
.of_match_table = xillybus_of_match,
diff --git a/drivers/clk/.kunitconfig b/drivers/clk/.kunitconfig
index 54ece9207055..08e26137f3d9 100644
--- a/drivers/clk/.kunitconfig
+++ b/drivers/clk/.kunitconfig
@@ -1,5 +1,6 @@
CONFIG_KUNIT=y
CONFIG_OF=y
+CONFIG_OF_OVERLAY=y
CONFIG_COMMON_CLK=y
CONFIG_CLK_KUNIT_TEST=y
CONFIG_CLK_FIXED_RATE_KUNIT_TEST=y
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 299bc678ed1b..3a1611008e48 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -61,7 +61,6 @@ config LMK04832
config COMMON_CLK_APPLE_NCO
tristate "Clock driver for Apple SoC NCOs"
depends on ARCH_APPLE || COMPILE_TEST
- default ARCH_APPLE
help
This driver supports NCO (Numerically Controlled Oscillator) blocks
found on Apple SoCs such as t8103 (M1). The blocks are typically
@@ -88,6 +87,15 @@ config COMMON_CLK_RK808
These multi-function devices have two fixed-rate oscillators, clocked at 32KHz each.
Clkout1 is always on, Clkout2 can off by control register.
+config COMMON_CLK_RP1
+ tristate "Raspberry Pi RP1-based clock support"
+ depends on MISC_RP1 || COMPILE_TEST
+ default MISC_RP1
+ help
+ Enable common clock framework support for Raspberry Pi RP1.
+ This multi-function device has 3 main PLLs and several clock
+ generators to drive the internal sub-peripherals.
+
config COMMON_CLK_HI655X
tristate "Clock driver for Hi655x" if EXPERT
depends on (MFD_HI655X_PMIC || COMPILE_TEST)
@@ -226,6 +234,17 @@ config COMMON_CLK_EP93XX
help
This driver supports the SoC clocks on the Cirrus Logic ep93xx.
+config COMMON_CLK_EYEQ
+ bool "Clock driver for the Mobileye EyeQ platform"
+ depends on MACH_EYEQ5 || MACH_EYEQ6H || COMPILE_TEST
+ select AUXILIARY_BUS
+ default MACH_EYEQ5 || MACH_EYEQ6H
+ help
+ This driver provides clocks found on Mobileye EyeQ5, EyeQ6L and Eye6H
+ SoCs. Controllers live in shared register regions called OLB. Driver
+ provides read-only PLLs, derived from the main crystal clock (which
+ must be constant). It also exposes some divider clocks.
+
config COMMON_CLK_FSL_FLEXSPI
tristate "Clock driver for FlexSPI on Layerscape SoCs"
depends on ARCH_LAYERSCAPE || COMPILE_TEST
@@ -259,7 +278,7 @@ config COMMON_CLK_LAN966X
tristate "Generic Clock Controller driver for LAN966X SoC"
depends on HAS_IOMEM
depends on OF
- depends on SOC_LAN966 || COMPILE_TEST
+ depends on SOC_LAN966 || ARCH_LAN969X || COMPILE_TEST
help
This driver provides support for Generic Clock Controller(GCK) on
LAN966X SoC. GCK generates and supplies clock to various peripherals
@@ -291,7 +310,7 @@ config CLK_TWL
help
Enable support for controlling the clock resources on TWL family
PMICs. These devices have some 32K clock outputs which can be
- controlled by software. For now, only the TWL6032 clocks are
+ controlled by software. For now, the TWL6032 and TWL6030 clocks are
supported.
config CLK_TWL6040
@@ -342,6 +361,15 @@ config COMMON_CLK_LOCHNAGAR
This driver supports the clocking features of the Cirrus Logic
Lochnagar audio development board.
+config COMMON_CLK_NPCM8XX
+ tristate "Clock driver for the NPCM8XX SoC Family"
+ depends on ARCH_NPCM || COMPILE_TEST
+ select AUXILIARY_BUS
+ help
+ This driver supports the clocks on the Nuvoton BMC NPCM8XX SoC Family,
+ all the clocks are initialized by the bootloader, so this driver
+ allows only reading of current settings directly from the hardware.
+
config COMMON_CLK_LOONGSON2
bool "Clock driver for Loongson-2 SoC"
depends on LOONGARCH || COMPILE_TEST
@@ -474,6 +502,15 @@ config COMMON_CLK_SP7021
Not all features of the PLL are currently supported
by the driver.
+config COMMON_CLK_RPMI
+ tristate "Clock driver based on RISC-V RPMI"
+ depends on RISCV || COMPILE_TEST
+ depends on MAILBOX
+ default RISCV
+ help
+ Support for clocks based on the clock service group defined by
+ the RISC-V platform management interface (RPMI) specification.
+
source "drivers/clk/actions/Kconfig"
source "drivers/clk/analogbits/Kconfig"
source "drivers/clk/baikal-t1/Kconfig"
@@ -484,6 +521,7 @@ source "drivers/clk/imx/Kconfig"
source "drivers/clk/ingenic/Kconfig"
source "drivers/clk/keystone/Kconfig"
source "drivers/clk/mediatek/Kconfig"
+source "drivers/clk/mmp/Kconfig"
source "drivers/clk/meson/Kconfig"
source "drivers/clk/mstar/Kconfig"
source "drivers/clk/microchip/Kconfig"
@@ -498,6 +536,7 @@ source "drivers/clk/samsung/Kconfig"
source "drivers/clk/sifive/Kconfig"
source "drivers/clk/socfpga/Kconfig"
source "drivers/clk/sophgo/Kconfig"
+source "drivers/clk/spacemit/Kconfig"
source "drivers/clk/sprd/Kconfig"
source "drivers/clk/starfive/Kconfig"
source "drivers/clk/sunxi/Kconfig"
@@ -517,7 +556,6 @@ config CLK_KUNIT_TEST
tristate "Basic Clock Framework Kunit Tests" if !KUNIT_ALL_TESTS
depends on KUNIT
default KUNIT_ALL_TESTS
- select OF_OVERLAY if OF
select DTC
help
Kunit tests for the common clock framework.
@@ -526,7 +564,6 @@ config CLK_FIXED_RATE_KUNIT_TEST
tristate "Basic fixed rate clk type KUnit test" if !KUNIT_ALL_TESTS
depends on KUNIT
default KUNIT_ALL_TESTS
- select OF_OVERLAY if OF
select DTC
help
KUnit tests for the basic fixed rate clk type.
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index fb8878a5d7d9..b74a1767ca27 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -4,6 +4,21 @@ obj-$(CONFIG_HAVE_CLK) += clk-devres.o clk-bulk.o clkdev.o
obj-$(CONFIG_COMMON_CLK) += clk.o
obj-$(CONFIG_CLK_KUNIT_TEST) += clk-test.o
clk-test-y := clk_test.o \
+ kunit_clk_assigned_rates_u64_one.dtbo.o \
+ kunit_clk_assigned_rates_u64_one_consumer.dtbo.o \
+ kunit_clk_assigned_rates_u64_multiple.dtbo.o \
+ kunit_clk_assigned_rates_u64_multiple_consumer.dtbo.o \
+ kunit_clk_assigned_rates_multiple.dtbo.o \
+ kunit_clk_assigned_rates_multiple_consumer.dtbo.o \
+ kunit_clk_assigned_rates_null.dtbo.o \
+ kunit_clk_assigned_rates_null_consumer.dtbo.o \
+ kunit_clk_assigned_rates_one.dtbo.o \
+ kunit_clk_assigned_rates_one_consumer.dtbo.o \
+ kunit_clk_assigned_rates_without.dtbo.o \
+ kunit_clk_assigned_rates_without_consumer.dtbo.o \
+ kunit_clk_assigned_rates_zero.dtbo.o \
+ kunit_clk_assigned_rates_zero_consumer.dtbo.o \
+ kunit_clk_hw_get_dev_of_node.dtbo.o \
kunit_clk_parent_data_test.dtbo.o
obj-$(CONFIG_COMMON_CLK) += clk-divider.o
obj-$(CONFIG_COMMON_CLK) += clk-fixed-factor.o
@@ -42,6 +57,7 @@ obj-$(CONFIG_COMMON_CLK_CS2000_CP) += clk-cs2000-cp.o
obj-$(CONFIG_COMMON_CLK_EP93XX) += clk-ep93xx.o
obj-$(CONFIG_ARCH_SPARX5) += clk-sparx5.o
obj-$(CONFIG_COMMON_CLK_EN7523) += clk-en7523.o
+obj-$(CONFIG_COMMON_CLK_EYEQ) += clk-eyeq.o
obj-$(CONFIG_COMMON_CLK_FIXED_MMIO) += clk-fixed-mmio.o
obj-$(CONFIG_COMMON_CLK_FSL_FLEXSPI) += clk-fsl-flexspi.o
obj-$(CONFIG_COMMON_CLK_FSL_SAI) += clk-fsl-sai.o
@@ -62,12 +78,15 @@ obj-$(CONFIG_ARCH_MILBEAUT_M10V) += clk-milbeaut.o
obj-$(CONFIG_ARCH_MOXART) += clk-moxart.o
obj-$(CONFIG_ARCH_NOMADIK) += clk-nomadik.o
obj-$(CONFIG_ARCH_NPCM7XX) += clk-npcm7xx.o
+obj-$(CONFIG_COMMON_CLK_NPCM8XX) += clk-npcm8xx.o
obj-$(CONFIG_ARCH_NSPIRE) += clk-nspire.o
obj-$(CONFIG_COMMON_CLK_PALMAS) += clk-palmas.o
obj-$(CONFIG_CLK_LS1028A_PLLDIG) += clk-plldig.o
obj-$(CONFIG_COMMON_CLK_PWM) += clk-pwm.o
obj-$(CONFIG_CLK_QORIQ) += clk-qoriq.o
obj-$(CONFIG_COMMON_CLK_RK808) += clk-rk808.o
+obj-$(CONFIG_COMMON_CLK_RP1) += clk-rp1.o
+obj-$(CONFIG_COMMON_CLK_RPMI) += clk-rpmi.o
obj-$(CONFIG_COMMON_CLK_HI655X) += clk-hi655x.o
obj-$(CONFIG_COMMON_CLK_S2MPS11) += clk-s2mps11.o
obj-$(CONFIG_COMMON_CLK_SCMI) += clk-scmi.o
@@ -129,6 +148,7 @@ obj-$(CONFIG_COMMON_CLK_SAMSUNG) += samsung/
obj-$(CONFIG_CLK_SIFIVE) += sifive/
obj-y += socfpga/
obj-y += sophgo/
+obj-y += spacemit/
obj-$(CONFIG_PLAT_SPEAR) += spear/
obj-y += sprd/
obj-$(CONFIG_ARCH_STI) += st/
diff --git a/drivers/clk/actions/owl-common.c b/drivers/clk/actions/owl-common.c
index c62024b7c737..b3dded204dc5 100644
--- a/drivers/clk/actions/owl-common.c
+++ b/drivers/clk/actions/owl-common.c
@@ -18,7 +18,6 @@ static const struct regmap_config owl_regmap_config = {
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x00cc,
- .fast_io = true,
};
static void owl_clk_set_regmap(const struct owl_clk_desc *desc,
diff --git a/drivers/clk/actions/owl-composite.c b/drivers/clk/actions/owl-composite.c
index 48f177f6ce9c..00b74f8bc437 100644
--- a/drivers/clk/actions/owl-composite.c
+++ b/drivers/clk/actions/owl-composite.c
@@ -122,13 +122,13 @@ static int owl_comp_fact_set_rate(struct clk_hw *hw, unsigned long rate,
rate, parent_rate);
}
-static long owl_comp_fix_fact_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int owl_comp_fix_fact_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct owl_composite *comp = hw_to_owl_comp(hw);
struct clk_fixed_factor *fix_fact_hw = &comp->rate.fix_fact_hw;
- return comp->fix_fact_ops->round_rate(&fix_fact_hw->hw, rate, parent_rate);
+ return comp->fix_fact_ops->determine_rate(&fix_fact_hw->hw, req);
}
static unsigned long owl_comp_fix_fact_recalc_rate(struct clk_hw *hw,
@@ -193,7 +193,7 @@ const struct clk_ops owl_comp_fix_fact_ops = {
.is_enabled = owl_comp_is_enabled,
/* fix_fact_ops */
- .round_rate = owl_comp_fix_fact_round_rate,
+ .determine_rate = owl_comp_fix_fact_determine_rate,
.recalc_rate = owl_comp_fix_fact_recalc_rate,
.set_rate = owl_comp_fix_fact_set_rate,
};
diff --git a/drivers/clk/actions/owl-divider.c b/drivers/clk/actions/owl-divider.c
index cddac00fe324..118f1393c678 100644
--- a/drivers/clk/actions/owl-divider.c
+++ b/drivers/clk/actions/owl-divider.c
@@ -23,13 +23,16 @@ long owl_divider_helper_round_rate(struct owl_clk_common *common,
div_hw->div_flags);
}
-static long owl_divider_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int owl_divider_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct owl_divider *div = hw_to_owl_divider(hw);
- return owl_divider_helper_round_rate(&div->common, &div->div_hw,
- rate, parent_rate);
+ req->rate = owl_divider_helper_round_rate(&div->common, &div->div_hw,
+ req->rate,
+ &req->best_parent_rate);
+
+ return 0;
}
unsigned long owl_divider_helper_recalc_rate(struct owl_clk_common *common,
@@ -89,6 +92,6 @@ static int owl_divider_set_rate(struct clk_hw *hw, unsigned long rate,
const struct clk_ops owl_divider_ops = {
.recalc_rate = owl_divider_recalc_rate,
- .round_rate = owl_divider_round_rate,
+ .determine_rate = owl_divider_determine_rate,
.set_rate = owl_divider_set_rate,
};
diff --git a/drivers/clk/actions/owl-factor.c b/drivers/clk/actions/owl-factor.c
index 64f316cf7cfc..12f41f6bacd6 100644
--- a/drivers/clk/actions/owl-factor.c
+++ b/drivers/clk/actions/owl-factor.c
@@ -130,14 +130,16 @@ long owl_factor_helper_round_rate(struct owl_clk_common *common,
return *parent_rate * mul / div;
}
-static long owl_factor_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int owl_factor_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct owl_factor *factor = hw_to_owl_factor(hw);
struct owl_factor_hw *factor_hw = &factor->factor_hw;
- return owl_factor_helper_round_rate(&factor->common, factor_hw,
- rate, parent_rate);
+ req->rate = owl_factor_helper_round_rate(&factor->common, factor_hw,
+ req->rate, &req->best_parent_rate);
+
+ return 0;
}
unsigned long owl_factor_helper_recalc_rate(struct owl_clk_common *common,
@@ -214,7 +216,7 @@ static int owl_factor_set_rate(struct clk_hw *hw, unsigned long rate,
}
const struct clk_ops owl_factor_ops = {
- .round_rate = owl_factor_round_rate,
+ .determine_rate = owl_factor_determine_rate,
.recalc_rate = owl_factor_recalc_rate,
.set_rate = owl_factor_set_rate,
};
diff --git a/drivers/clk/actions/owl-pll.c b/drivers/clk/actions/owl-pll.c
index 155f313986b4..869690b79cc1 100644
--- a/drivers/clk/actions/owl-pll.c
+++ b/drivers/clk/actions/owl-pll.c
@@ -56,8 +56,8 @@ static const struct clk_pll_table *_get_pll_table(
return table;
}
-static long owl_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int owl_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct owl_pll *pll = hw_to_owl_pll(hw);
struct owl_pll_hw *pll_hw = &pll->pll_hw;
@@ -65,17 +65,24 @@ static long owl_pll_round_rate(struct clk_hw *hw, unsigned long rate,
u32 mul;
if (pll_hw->table) {
- clkt = _get_pll_table(pll_hw->table, rate);
- return clkt->rate;
+ clkt = _get_pll_table(pll_hw->table, req->rate);
+ req->rate = clkt->rate;
+
+ return 0;
}
/* fixed frequency */
- if (pll_hw->width == 0)
- return pll_hw->bfreq;
+ if (pll_hw->width == 0) {
+ req->rate = pll_hw->bfreq;
- mul = owl_pll_calculate_mul(pll_hw, rate);
+ return 0;
+ }
+
+ mul = owl_pll_calculate_mul(pll_hw, req->rate);
- return pll_hw->bfreq * mul;
+ req->rate = pll_hw->bfreq * mul;
+
+ return 0;
}
static unsigned long owl_pll_recalc_rate(struct clk_hw *hw,
@@ -188,7 +195,7 @@ const struct clk_ops owl_pll_ops = {
.enable = owl_pll_enable,
.disable = owl_pll_disable,
.is_enabled = owl_pll_is_enabled,
- .round_rate = owl_pll_round_rate,
+ .determine_rate = owl_pll_determine_rate,
.recalc_rate = owl_pll_recalc_rate,
.set_rate = owl_pll_set_rate,
};
diff --git a/drivers/clk/analogbits/wrpll-cln28hpc.c b/drivers/clk/analogbits/wrpll-cln28hpc.c
index 65d422a588e1..9d178afc73bd 100644
--- a/drivers/clk/analogbits/wrpll-cln28hpc.c
+++ b/drivers/clk/analogbits/wrpll-cln28hpc.c
@@ -292,7 +292,7 @@ int wrpll_configure_for_rate(struct wrpll_cfg *c, u32 target_rate,
vco = vco_pre * f;
}
- delta = abs(target_rate - vco);
+ delta = abs(target_vco_rate - vco);
if (delta < best_delta) {
best_delta = delta;
best_r = r;
diff --git a/drivers/clk/at91/Makefile b/drivers/clk/at91/Makefile
index 8e3684ba2c74..9128a06b860d 100644
--- a/drivers/clk/at91/Makefile
+++ b/drivers/clk/at91/Makefile
@@ -24,4 +24,5 @@ obj-$(CONFIG_SOC_SAM9X7) += sam9x7.o
obj-$(CONFIG_SOC_SAMA5D3) += sama5d3.o dt-compat.o
obj-$(CONFIG_SOC_SAMA5D4) += sama5d4.o dt-compat.o
obj-$(CONFIG_SOC_SAMA5D2) += sama5d2.o dt-compat.o
+obj-$(CONFIG_SOC_SAMA7D65) += sama7d65.o
obj-$(CONFIG_SOC_SAMA7G5) += sama7g5.o
diff --git a/drivers/clk/at91/clk-audio-pll.c b/drivers/clk/at91/clk-audio-pll.c
index a92da64c12e1..bf9b635ac9d6 100644
--- a/drivers/clk/at91/clk-audio-pll.c
+++ b/drivers/clk/at91/clk-audio-pll.c
@@ -270,8 +270,8 @@ static int clk_audio_pll_frac_determine_rate(struct clk_hw *hw,
return 0;
}
-static long clk_audio_pll_pad_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_audio_pll_pad_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_hw *pclk = clk_hw_get_parent(hw);
long best_rate = -EINVAL;
@@ -283,7 +283,7 @@ static long clk_audio_pll_pad_round_rate(struct clk_hw *hw, unsigned long rate,
int best_diff = -1;
pr_debug("A PLL/PAD: %s, rate = %lu (parent_rate = %lu)\n", __func__,
- rate, *parent_rate);
+ req->rate, req->best_parent_rate);
/*
* Rate divisor is actually made of two different divisors, multiplied
@@ -304,12 +304,12 @@ static long clk_audio_pll_pad_round_rate(struct clk_hw *hw, unsigned long rate,
continue;
best_parent_rate = clk_hw_round_rate(pclk,
- rate * tmp_qd * div);
+ req->rate * tmp_qd * div);
tmp_rate = best_parent_rate / (div * tmp_qd);
- tmp_diff = abs(rate - tmp_rate);
+ tmp_diff = abs(req->rate - tmp_rate);
if (best_diff < 0 || best_diff > tmp_diff) {
- *parent_rate = best_parent_rate;
+ req->best_parent_rate = best_parent_rate;
best_rate = tmp_rate;
best_diff = tmp_diff;
}
@@ -318,11 +318,13 @@ static long clk_audio_pll_pad_round_rate(struct clk_hw *hw, unsigned long rate,
pr_debug("A PLL/PAD: %s, best_rate = %ld, best_parent_rate = %lu\n",
__func__, best_rate, best_parent_rate);
- return best_rate;
+ req->rate = best_rate;
+
+ return 0;
}
-static long clk_audio_pll_pmc_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_audio_pll_pmc_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_hw *pclk = clk_hw_get_parent(hw);
long best_rate = -EINVAL;
@@ -333,20 +335,20 @@ static long clk_audio_pll_pmc_round_rate(struct clk_hw *hw, unsigned long rate,
int best_diff = -1;
pr_debug("A PLL/PMC: %s, rate = %lu (parent_rate = %lu)\n", __func__,
- rate, *parent_rate);
+ req->rate, req->best_parent_rate);
- if (!rate)
+ if (!req->rate)
return 0;
best_parent_rate = clk_round_rate(pclk->clk, 1);
- div = max(best_parent_rate / rate, 1UL);
+ div = max(best_parent_rate / req->rate, 1UL);
for (; div <= AUDIO_PLL_QDPMC_MAX; div++) {
- best_parent_rate = clk_round_rate(pclk->clk, rate * div);
+ best_parent_rate = clk_round_rate(pclk->clk, req->rate * div);
tmp_rate = best_parent_rate / div;
- tmp_diff = abs(rate - tmp_rate);
+ tmp_diff = abs(req->rate - tmp_rate);
if (best_diff < 0 || best_diff > tmp_diff) {
- *parent_rate = best_parent_rate;
+ req->best_parent_rate = best_parent_rate;
best_rate = tmp_rate;
best_diff = tmp_diff;
tmp_qd = div;
@@ -356,9 +358,11 @@ static long clk_audio_pll_pmc_round_rate(struct clk_hw *hw, unsigned long rate,
}
pr_debug("A PLL/PMC: %s, best_rate = %ld, best_parent_rate = %lu (qd = %d)\n",
- __func__, best_rate, *parent_rate, tmp_qd - 1);
+ __func__, best_rate, req->best_parent_rate, tmp_qd - 1);
+
+ req->rate = best_rate;
- return best_rate;
+ return 0;
}
static int clk_audio_pll_frac_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -436,7 +440,7 @@ static const struct clk_ops audio_pll_pad_ops = {
.enable = clk_audio_pll_pad_enable,
.disable = clk_audio_pll_pad_disable,
.recalc_rate = clk_audio_pll_pad_recalc_rate,
- .round_rate = clk_audio_pll_pad_round_rate,
+ .determine_rate = clk_audio_pll_pad_determine_rate,
.set_rate = clk_audio_pll_pad_set_rate,
};
@@ -444,7 +448,7 @@ static const struct clk_ops audio_pll_pmc_ops = {
.enable = clk_audio_pll_pmc_enable,
.disable = clk_audio_pll_pmc_disable,
.recalc_rate = clk_audio_pll_pmc_recalc_rate,
- .round_rate = clk_audio_pll_pmc_round_rate,
+ .determine_rate = clk_audio_pll_pmc_determine_rate,
.set_rate = clk_audio_pll_pmc_set_rate,
};
diff --git a/drivers/clk/at91/clk-h32mx.c b/drivers/clk/at91/clk-h32mx.c
index 1e6c12eeda10..a9aa93b5a870 100644
--- a/drivers/clk/at91/clk-h32mx.c
+++ b/drivers/clk/at91/clk-h32mx.c
@@ -40,21 +40,32 @@ static unsigned long clk_sama5d4_h32mx_recalc_rate(struct clk_hw *hw,
return parent_rate;
}
-static long clk_sama5d4_h32mx_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_sama5d4_h32mx_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
unsigned long div;
- if (rate > *parent_rate)
- return *parent_rate;
- div = *parent_rate / 2;
- if (rate < div)
- return div;
+ if (req->rate > req->best_parent_rate) {
+ req->rate = req->best_parent_rate;
- if (rate - div < *parent_rate - rate)
- return div;
+ return 0;
+ }
+ div = req->best_parent_rate / 2;
+ if (req->rate < div) {
+ req->rate = div;
+
+ return 0;
+ }
+
+ if (req->rate - div < req->best_parent_rate - req->rate) {
+ req->rate = div;
- return *parent_rate;
+ return 0;
+ }
+
+ req->rate = req->best_parent_rate;
+
+ return 0;
}
static int clk_sama5d4_h32mx_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -77,7 +88,7 @@ static int clk_sama5d4_h32mx_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops h32mx_ops = {
.recalc_rate = clk_sama5d4_h32mx_recalc_rate,
- .round_rate = clk_sama5d4_h32mx_round_rate,
+ .determine_rate = clk_sama5d4_h32mx_determine_rate,
.set_rate = clk_sama5d4_h32mx_set_rate,
};
diff --git a/drivers/clk/at91/clk-master.c b/drivers/clk/at91/clk-master.c
index 15c46489ba85..d5ea2069ec83 100644
--- a/drivers/clk/at91/clk-master.c
+++ b/drivers/clk/at91/clk-master.c
@@ -20,7 +20,7 @@
#define PMC_MCR_CSS_SHIFT (16)
-#define MASTER_MAX_ID 4
+#define MASTER_MAX_ID 9
#define to_clk_master(hw) container_of(hw, struct clk_master, hw)
@@ -580,6 +580,9 @@ clk_sama7g5_master_recalc_rate(struct clk_hw *hw,
{
struct clk_master *master = to_clk_master(hw);
+ if (master->div == MASTER_PRES_MAX)
+ return DIV_ROUND_CLOSEST_ULL(parent_rate, 3);
+
return DIV_ROUND_CLOSEST_ULL(parent_rate, (1 << master->div));
}
diff --git a/drivers/clk/at91/clk-peripheral.c b/drivers/clk/at91/clk-peripheral.c
index c173a44c800a..e700f40fd87f 100644
--- a/drivers/clk/at91/clk-peripheral.c
+++ b/drivers/clk/at91/clk-peripheral.c
@@ -279,8 +279,11 @@ static int clk_sam9x5_peripheral_determine_rate(struct clk_hw *hw,
long best_diff = LONG_MIN;
u32 shift;
- if (periph->id < PERIPHERAL_ID_MIN || !periph->range.max)
- return parent_rate;
+ if (periph->id < PERIPHERAL_ID_MIN || !periph->range.max) {
+ req->rate = parent_rate;
+
+ return 0;
+ }
/* Fist step: check the available dividers. */
for (shift = 0; shift <= PERIPHERAL_MAX_SHIFT; shift++) {
@@ -332,50 +335,57 @@ end:
return 0;
}
-static long clk_sam9x5_peripheral_round_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long *parent_rate)
+static int clk_sam9x5_peripheral_no_parent_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
int shift = 0;
unsigned long best_rate;
unsigned long best_diff;
- unsigned long cur_rate = *parent_rate;
+ unsigned long cur_rate = req->best_parent_rate;
unsigned long cur_diff;
struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw);
- if (periph->id < PERIPHERAL_ID_MIN || !periph->range.max)
- return *parent_rate;
+ if (periph->id < PERIPHERAL_ID_MIN || !periph->range.max) {
+ req->rate = req->best_parent_rate;
+
+ return 0;
+ }
if (periph->range.max) {
for (; shift <= PERIPHERAL_MAX_SHIFT; shift++) {
- cur_rate = *parent_rate >> shift;
+ cur_rate = req->best_parent_rate >> shift;
if (cur_rate <= periph->range.max)
break;
}
}
- if (rate >= cur_rate)
- return cur_rate;
+ if (req->rate >= cur_rate) {
+ req->rate = cur_rate;
+
+ return 0;
+ }
- best_diff = cur_rate - rate;
+ best_diff = cur_rate - req->rate;
best_rate = cur_rate;
for (; shift <= PERIPHERAL_MAX_SHIFT; shift++) {
- cur_rate = *parent_rate >> shift;
- if (cur_rate < rate)
- cur_diff = rate - cur_rate;
+ cur_rate = req->best_parent_rate >> shift;
+ if (cur_rate < req->rate)
+ cur_diff = req->rate - cur_rate;
else
- cur_diff = cur_rate - rate;
+ cur_diff = cur_rate - req->rate;
if (cur_diff < best_diff) {
best_diff = cur_diff;
best_rate = cur_rate;
}
- if (!best_diff || cur_rate < rate)
+ if (!best_diff || cur_rate < req->rate)
break;
}
- return best_rate;
+ req->rate = best_rate;
+
+ return 0;
}
static int clk_sam9x5_peripheral_set_rate(struct clk_hw *hw,
@@ -427,7 +437,7 @@ static const struct clk_ops sam9x5_peripheral_ops = {
.disable = clk_sam9x5_peripheral_disable,
.is_enabled = clk_sam9x5_peripheral_is_enabled,
.recalc_rate = clk_sam9x5_peripheral_recalc_rate,
- .round_rate = clk_sam9x5_peripheral_round_rate,
+ .determine_rate = clk_sam9x5_peripheral_no_parent_determine_rate,
.set_rate = clk_sam9x5_peripheral_set_rate,
.save_context = clk_sam9x5_peripheral_save_context,
.restore_context = clk_sam9x5_peripheral_restore_context,
diff --git a/drivers/clk/at91/clk-pll.c b/drivers/clk/at91/clk-pll.c
index 249d6a53cedf..5c5f7398effe 100644
--- a/drivers/clk/at91/clk-pll.c
+++ b/drivers/clk/at91/clk-pll.c
@@ -231,13 +231,15 @@ static long clk_pll_get_best_div_mul(struct clk_pll *pll, unsigned long rate,
return bestrate;
}
-static long clk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_pll *pll = to_clk_pll(hw);
- return clk_pll_get_best_div_mul(pll, rate, *parent_rate,
- NULL, NULL, NULL);
+ req->rate = clk_pll_get_best_div_mul(pll, req->rate, req->best_parent_rate,
+ NULL, NULL, NULL);
+
+ return 0;
}
static int clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -302,7 +304,7 @@ static const struct clk_ops pll_ops = {
.unprepare = clk_pll_unprepare,
.is_prepared = clk_pll_is_prepared,
.recalc_rate = clk_pll_recalc_rate,
- .round_rate = clk_pll_round_rate,
+ .determine_rate = clk_pll_determine_rate,
.set_rate = clk_pll_set_rate,
.save_context = clk_pll_save_context,
.restore_context = clk_pll_restore_context,
diff --git a/drivers/clk/at91/clk-plldiv.c b/drivers/clk/at91/clk-plldiv.c
index ba3a1839a96d..3ac09fecc54e 100644
--- a/drivers/clk/at91/clk-plldiv.c
+++ b/drivers/clk/at91/clk-plldiv.c
@@ -33,21 +33,33 @@ static unsigned long clk_plldiv_recalc_rate(struct clk_hw *hw,
return parent_rate;
}
-static long clk_plldiv_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_plldiv_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
unsigned long div;
- if (rate > *parent_rate)
- return *parent_rate;
- div = *parent_rate / 2;
- if (rate < div)
- return div;
+ if (req->rate > req->best_parent_rate) {
+ req->rate = req->best_parent_rate;
- if (rate - div < *parent_rate - rate)
- return div;
+ return 0;
+ }
+
+ div = req->best_parent_rate / 2;
+ if (req->rate < div) {
+ req->rate = div;
+
+ return 0;
+ }
+
+ if (req->rate - div < req->best_parent_rate - req->rate) {
+ req->rate = div;
- return *parent_rate;
+ return 0;
+ }
+
+ req->rate = req->best_parent_rate;
+
+ return 0;
}
static int clk_plldiv_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -66,7 +78,7 @@ static int clk_plldiv_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops plldiv_ops = {
.recalc_rate = clk_plldiv_recalc_rate,
- .round_rate = clk_plldiv_round_rate,
+ .determine_rate = clk_plldiv_determine_rate,
.set_rate = clk_plldiv_set_rate,
};
diff --git a/drivers/clk/at91/clk-sam9x60-pll.c b/drivers/clk/at91/clk-sam9x60-pll.c
index fda041102224..3b965057ba0d 100644
--- a/drivers/clk/at91/clk-sam9x60-pll.c
+++ b/drivers/clk/at91/clk-sam9x60-pll.c
@@ -23,7 +23,7 @@
#define UPLL_DIV 2
#define PLL_MUL_MAX (FIELD_GET(PMC_PLL_CTRL1_MUL_MSK, UINT_MAX) + 1)
-#define PLL_MAX_ID 7
+#define PLL_MAX_ID 9
struct sam9x60_pll_core {
struct regmap *regmap;
@@ -93,8 +93,8 @@ static int sam9x60_frac_pll_set(struct sam9x60_pll_core *core)
spin_lock_irqsave(core->lock, flags);
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
- AT91_PMC_PLL_UPDT_ID_MSK, core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_ID_MSK, core->id);
regmap_read(regmap, AT91_PMC_PLL_CTRL1, &val);
cmul = (val & core->layout->mul_mask) >> core->layout->mul_shift;
cfrac = (val & core->layout->frac_mask) >> core->layout->frac_shift;
@@ -103,11 +103,8 @@ static int sam9x60_frac_pll_set(struct sam9x60_pll_core *core)
(cmul == frac->mul && cfrac == frac->frac))
goto unlock;
- /* Recommended value for PMC_PLL_ACR */
- if (core->characteristics->upll)
- val = AT91_PMC_PLL_ACR_DEFAULT_UPLL;
- else
- val = AT91_PMC_PLL_ACR_DEFAULT_PLLA;
+ /* Load recommended value for PMC_PLL_ACR */
+ val = core->characteristics->acr;
regmap_write(regmap, AT91_PMC_PLL_ACR, val);
regmap_write(regmap, AT91_PMC_PLL_CTRL1,
@@ -128,17 +125,17 @@ static int sam9x60_frac_pll_set(struct sam9x60_pll_core *core)
udelay(10);
}
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
- AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
- AT91_PMC_PLL_UPDT_UPDATE | core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
+ AT91_PMC_PLL_UPDT_UPDATE | core->id);
regmap_update_bits(regmap, AT91_PMC_PLL_CTRL0,
AT91_PMC_PLL_CTRL0_ENLOCK | AT91_PMC_PLL_CTRL0_ENPLL,
AT91_PMC_PLL_CTRL0_ENLOCK | AT91_PMC_PLL_CTRL0_ENPLL);
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
- AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
- AT91_PMC_PLL_UPDT_UPDATE | core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
+ AT91_PMC_PLL_UPDT_UPDATE | core->id);
while (!sam9x60_pll_ready(regmap, core->id))
cpu_relax();
@@ -164,8 +161,8 @@ static void sam9x60_frac_pll_unprepare(struct clk_hw *hw)
spin_lock_irqsave(core->lock, flags);
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
- AT91_PMC_PLL_UPDT_ID_MSK, core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_ID_MSK, core->id);
regmap_update_bits(regmap, AT91_PMC_PLL_CTRL0, AT91_PMC_PLL_CTRL0_ENPLL, 0);
@@ -173,9 +170,9 @@ static void sam9x60_frac_pll_unprepare(struct clk_hw *hw)
regmap_update_bits(regmap, AT91_PMC_PLL_ACR,
AT91_PMC_PLL_ACR_UTMIBG | AT91_PMC_PLL_ACR_UTMIVR, 0);
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
- AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
- AT91_PMC_PLL_UPDT_UPDATE | core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
+ AT91_PMC_PLL_UPDT_UPDATE | core->id);
spin_unlock_irqrestore(core->lock, flags);
}
@@ -230,12 +227,16 @@ static long sam9x60_frac_pll_compute_mul_frac(struct sam9x60_pll_core *core,
return tmprate;
}
-static long sam9x60_frac_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int sam9x60_frac_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
- return sam9x60_frac_pll_compute_mul_frac(core, rate, *parent_rate, false);
+ req->rate = sam9x60_frac_pll_compute_mul_frac(core, req->rate,
+ req->best_parent_rate,
+ false);
+
+ return 0;
}
static int sam9x60_frac_pll_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -262,8 +263,8 @@ static int sam9x60_frac_pll_set_rate_chg(struct clk_hw *hw, unsigned long rate,
spin_lock_irqsave(core->lock, irqflags);
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT, AT91_PMC_PLL_UPDT_ID_MSK,
- core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT, AT91_PMC_PLL_UPDT_ID_MSK,
+ core->id);
regmap_read(regmap, AT91_PMC_PLL_CTRL1, &val);
cmul = (val & core->layout->mul_mask) >> core->layout->mul_shift;
cfrac = (val & core->layout->frac_mask) >> core->layout->frac_shift;
@@ -275,18 +276,18 @@ static int sam9x60_frac_pll_set_rate_chg(struct clk_hw *hw, unsigned long rate,
(frac->mul << core->layout->mul_shift) |
(frac->frac << core->layout->frac_shift));
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
- AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
- AT91_PMC_PLL_UPDT_UPDATE | core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
+ AT91_PMC_PLL_UPDT_UPDATE | core->id);
regmap_update_bits(regmap, AT91_PMC_PLL_CTRL0,
AT91_PMC_PLL_CTRL0_ENLOCK | AT91_PMC_PLL_CTRL0_ENPLL,
AT91_PMC_PLL_CTRL0_ENLOCK |
AT91_PMC_PLL_CTRL0_ENPLL);
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
- AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
- AT91_PMC_PLL_UPDT_UPDATE | core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
+ AT91_PMC_PLL_UPDT_UPDATE | core->id);
while (!sam9x60_pll_ready(regmap, core->id))
cpu_relax();
@@ -321,7 +322,7 @@ static const struct clk_ops sam9x60_frac_pll_ops = {
.unprepare = sam9x60_frac_pll_unprepare,
.is_prepared = sam9x60_frac_pll_is_prepared,
.recalc_rate = sam9x60_frac_pll_recalc_rate,
- .round_rate = sam9x60_frac_pll_round_rate,
+ .determine_rate = sam9x60_frac_pll_determine_rate,
.set_rate = sam9x60_frac_pll_set_rate,
.save_context = sam9x60_frac_pll_save_context,
.restore_context = sam9x60_frac_pll_restore_context,
@@ -332,13 +333,16 @@ static const struct clk_ops sam9x60_frac_pll_ops_chg = {
.unprepare = sam9x60_frac_pll_unprepare,
.is_prepared = sam9x60_frac_pll_is_prepared,
.recalc_rate = sam9x60_frac_pll_recalc_rate,
- .round_rate = sam9x60_frac_pll_round_rate,
+ .determine_rate = sam9x60_frac_pll_determine_rate,
.set_rate = sam9x60_frac_pll_set_rate_chg,
.save_context = sam9x60_frac_pll_save_context,
.restore_context = sam9x60_frac_pll_restore_context,
};
-/* This function should be called with spinlock acquired. */
+/* This function should be called with spinlock acquired.
+ * Warning: this function must be called only if the same PLL ID was set in
+ * PLL_UPDT register previously.
+ */
static void sam9x60_div_pll_set_div(struct sam9x60_pll_core *core, u32 div,
bool enable)
{
@@ -350,9 +354,9 @@ static void sam9x60_div_pll_set_div(struct sam9x60_pll_core *core, u32 div,
core->layout->div_mask | ena_msk,
(div << core->layout->div_shift) | ena_val);
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
- AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
- AT91_PMC_PLL_UPDT_UPDATE | core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
+ AT91_PMC_PLL_UPDT_UPDATE | core->id);
while (!sam9x60_pll_ready(regmap, core->id))
cpu_relax();
@@ -366,8 +370,8 @@ static int sam9x60_div_pll_set(struct sam9x60_pll_core *core)
unsigned int val, cdiv;
spin_lock_irqsave(core->lock, flags);
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
- AT91_PMC_PLL_UPDT_ID_MSK, core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_ID_MSK, core->id);
regmap_read(regmap, AT91_PMC_PLL_CTRL0, &val);
cdiv = (val & core->layout->div_mask) >> core->layout->div_shift;
@@ -398,15 +402,15 @@ static void sam9x60_div_pll_unprepare(struct clk_hw *hw)
spin_lock_irqsave(core->lock, flags);
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
- AT91_PMC_PLL_UPDT_ID_MSK, core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_ID_MSK, core->id);
regmap_update_bits(regmap, AT91_PMC_PLL_CTRL0,
core->layout->endiv_mask, 0);
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
- AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
- AT91_PMC_PLL_UPDT_UPDATE | core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
+ AT91_PMC_PLL_UPDT_UPDATE | core->id);
spin_unlock_irqrestore(core->lock, flags);
}
@@ -487,12 +491,15 @@ static long sam9x60_div_pll_compute_div(struct sam9x60_pll_core *core,
return best_rate;
}
-static long sam9x60_div_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int sam9x60_div_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
- return sam9x60_div_pll_compute_div(core, parent_rate, rate);
+ req->rate = sam9x60_div_pll_compute_div(core, &req->best_parent_rate,
+ req->rate);
+
+ return 0;
}
static int sam9x60_div_pll_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -518,8 +525,8 @@ static int sam9x60_div_pll_set_rate_chg(struct clk_hw *hw, unsigned long rate,
div->div = DIV_ROUND_CLOSEST(parent_rate, rate) - 1;
spin_lock_irqsave(core->lock, irqflags);
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT, AT91_PMC_PLL_UPDT_ID_MSK,
- core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT, AT91_PMC_PLL_UPDT_ID_MSK,
+ core->id);
regmap_read(regmap, AT91_PMC_PLL_CTRL0, &val);
cdiv = (val & core->layout->div_mask) >> core->layout->div_shift;
@@ -574,8 +581,8 @@ static int sam9x60_div_pll_notifier_fn(struct notifier_block *notifier,
div->div = div->safe_div;
spin_lock_irqsave(core.lock, irqflags);
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT, AT91_PMC_PLL_UPDT_ID_MSK,
- core.id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT, AT91_PMC_PLL_UPDT_ID_MSK,
+ core.id);
regmap_read(regmap, AT91_PMC_PLL_CTRL0, &val);
cdiv = (val & core.layout->div_mask) >> core.layout->div_shift;
@@ -601,7 +608,7 @@ static const struct clk_ops sam9x60_div_pll_ops = {
.unprepare = sam9x60_div_pll_unprepare,
.is_prepared = sam9x60_div_pll_is_prepared,
.recalc_rate = sam9x60_div_pll_recalc_rate,
- .round_rate = sam9x60_div_pll_round_rate,
+ .determine_rate = sam9x60_div_pll_determine_rate,
.set_rate = sam9x60_div_pll_set_rate,
.save_context = sam9x60_div_pll_save_context,
.restore_context = sam9x60_div_pll_restore_context,
@@ -612,7 +619,7 @@ static const struct clk_ops sam9x60_div_pll_ops_chg = {
.unprepare = sam9x60_div_pll_unprepare,
.is_prepared = sam9x60_div_pll_is_prepared,
.recalc_rate = sam9x60_div_pll_recalc_rate,
- .round_rate = sam9x60_div_pll_round_rate,
+ .determine_rate = sam9x60_div_pll_determine_rate,
.set_rate = sam9x60_div_pll_set_rate_chg,
.save_context = sam9x60_div_pll_save_context,
.restore_context = sam9x60_div_pll_restore_context,
@@ -623,7 +630,7 @@ static const struct clk_ops sam9x60_fixed_div_pll_ops = {
.unprepare = sam9x60_div_pll_unprepare,
.is_prepared = sam9x60_div_pll_is_prepared,
.recalc_rate = sam9x60_fixed_div_pll_recalc_rate,
- .round_rate = sam9x60_div_pll_round_rate,
+ .determine_rate = sam9x60_div_pll_determine_rate,
.save_context = sam9x60_div_pll_save_context,
.restore_context = sam9x60_div_pll_restore_context,
};
diff --git a/drivers/clk/at91/clk-usb.c b/drivers/clk/at91/clk-usb.c
index b0696a928aa9..e906928cfbf0 100644
--- a/drivers/clk/at91/clk-usb.c
+++ b/drivers/clk/at91/clk-usb.c
@@ -319,8 +319,8 @@ static unsigned long at91rm9200_clk_usb_recalc_rate(struct clk_hw *hw,
return 0;
}
-static long at91rm9200_clk_usb_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int at91rm9200_clk_usb_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct at91rm9200_clk_usb *usb = to_at91rm9200_clk_usb(hw);
struct clk_hw *parent = clk_hw_get_parent(hw);
@@ -336,25 +336,27 @@ static long at91rm9200_clk_usb_round_rate(struct clk_hw *hw, unsigned long rate,
if (!usb->divisors[i])
continue;
- tmp_parent_rate = rate * usb->divisors[i];
+ tmp_parent_rate = req->rate * usb->divisors[i];
tmp_parent_rate = clk_hw_round_rate(parent, tmp_parent_rate);
tmprate = DIV_ROUND_CLOSEST(tmp_parent_rate, usb->divisors[i]);
- if (tmprate < rate)
- tmpdiff = rate - tmprate;
+ if (tmprate < req->rate)
+ tmpdiff = req->rate - tmprate;
else
- tmpdiff = tmprate - rate;
+ tmpdiff = tmprate - req->rate;
if (bestdiff < 0 || bestdiff > tmpdiff) {
bestrate = tmprate;
bestdiff = tmpdiff;
- *parent_rate = tmp_parent_rate;
+ req->best_parent_rate = tmp_parent_rate;
}
if (!bestdiff)
break;
}
- return bestrate;
+ req->rate = bestrate;
+
+ return 0;
}
static int at91rm9200_clk_usb_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -384,7 +386,7 @@ static int at91rm9200_clk_usb_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops at91rm9200_usb_ops = {
.recalc_rate = at91rm9200_clk_usb_recalc_rate,
- .round_rate = at91rm9200_clk_usb_round_rate,
+ .determine_rate = at91rm9200_clk_usb_determine_rate,
.set_rate = at91rm9200_clk_usb_set_rate,
};
diff --git a/drivers/clk/at91/pmc.c b/drivers/clk/at91/pmc.c
index 5aa9c1f1c886..acf780a81589 100644
--- a/drivers/clk/at91/pmc.c
+++ b/drivers/clk/at91/pmc.c
@@ -151,6 +151,7 @@ static struct syscore_ops pmc_syscore_ops = {
static const struct of_device_id pmc_dt_ids[] = {
{ .compatible = "atmel,sama5d2-pmc" },
{ .compatible = "microchip,sama7g5-pmc", },
+ { .compatible = "microchip,sama7d65-pmc", },
{ /* sentinel */ }
};
diff --git a/drivers/clk/at91/pmc.h b/drivers/clk/at91/pmc.h
index 4fb29ca111f7..5daa32c4cf25 100644
--- a/drivers/clk/at91/pmc.h
+++ b/drivers/clk/at91/pmc.h
@@ -80,6 +80,7 @@ struct clk_pll_characteristics {
u16 *icpll;
u8 *out;
u8 upll : 1;
+ u32 acr;
};
struct clk_programmable_layout {
diff --git a/drivers/clk/at91/sam9x60.c b/drivers/clk/at91/sam9x60.c
index db6db9e2073e..18baf4a256f4 100644
--- a/drivers/clk/at91/sam9x60.c
+++ b/drivers/clk/at91/sam9x60.c
@@ -36,6 +36,7 @@ static const struct clk_pll_characteristics plla_characteristics = {
.num_output = ARRAY_SIZE(plla_outputs),
.output = plla_outputs,
.core_output = core_outputs,
+ .acr = UL(0x00020010),
};
static const struct clk_range upll_outputs[] = {
@@ -48,6 +49,7 @@ static const struct clk_pll_characteristics upll_characteristics = {
.output = upll_outputs,
.core_output = core_outputs,
.upll = true,
+ .acr = UL(0x12023010), /* fIN = [18 MHz, 32 MHz]*/
};
static const struct clk_pll_layout pll_frac_layout = {
diff --git a/drivers/clk/at91/sam9x7.c b/drivers/clk/at91/sam9x7.c
index cbb8b220f16b..89868a0aeaba 100644
--- a/drivers/clk/at91/sam9x7.c
+++ b/drivers/clk/at91/sam9x7.c
@@ -61,44 +61,44 @@ static const struct clk_master_layout sam9x7_master_layout = {
/* Fractional PLL core output range. */
static const struct clk_range plla_core_outputs[] = {
- { .min = 375000000, .max = 1600000000 },
+ { .min = 800000000, .max = 1600000000 },
};
static const struct clk_range upll_core_outputs[] = {
- { .min = 600000000, .max = 1200000000 },
+ { .min = 600000000, .max = 960000000 },
};
static const struct clk_range lvdspll_core_outputs[] = {
- { .min = 400000000, .max = 800000000 },
+ { .min = 600000000, .max = 1200000000 },
};
static const struct clk_range audiopll_core_outputs[] = {
- { .min = 400000000, .max = 800000000 },
+ { .min = 600000000, .max = 1200000000 },
};
static const struct clk_range plladiv2_core_outputs[] = {
- { .min = 375000000, .max = 1600000000 },
+ { .min = 800000000, .max = 1600000000 },
};
/* Fractional PLL output range. */
static const struct clk_range plla_outputs[] = {
- { .min = 732421, .max = 800000000 },
+ { .min = 400000000, .max = 800000000 },
};
static const struct clk_range upll_outputs[] = {
- { .min = 300000000, .max = 600000000 },
+ { .min = 300000000, .max = 480000000 },
};
static const struct clk_range lvdspll_outputs[] = {
- { .min = 10000000, .max = 800000000 },
+ { .min = 175000000, .max = 550000000 },
};
static const struct clk_range audiopll_outputs[] = {
- { .min = 10000000, .max = 800000000 },
+ { .min = 0, .max = 300000000 },
};
static const struct clk_range plladiv2_outputs[] = {
- { .min = 366210, .max = 400000000 },
+ { .min = 200000000, .max = 400000000 },
};
/* PLL characteristics. */
@@ -107,6 +107,7 @@ static const struct clk_pll_characteristics plla_characteristics = {
.num_output = ARRAY_SIZE(plla_outputs),
.output = plla_outputs,
.core_output = plla_core_outputs,
+ .acr = UL(0x00020010), /* Old ACR_DEFAULT_PLLA value */
};
static const struct clk_pll_characteristics upll_characteristics = {
@@ -115,6 +116,7 @@ static const struct clk_pll_characteristics upll_characteristics = {
.output = upll_outputs,
.core_output = upll_core_outputs,
.upll = true,
+ .acr = UL(0x12023010), /* fIN=[20 MHz, 32 MHz] */
};
static const struct clk_pll_characteristics lvdspll_characteristics = {
@@ -122,6 +124,7 @@ static const struct clk_pll_characteristics lvdspll_characteristics = {
.num_output = ARRAY_SIZE(lvdspll_outputs),
.output = lvdspll_outputs,
.core_output = lvdspll_core_outputs,
+ .acr = UL(0x12023010), /* fIN=[20 MHz, 32 MHz] */
};
static const struct clk_pll_characteristics audiopll_characteristics = {
@@ -129,6 +132,7 @@ static const struct clk_pll_characteristics audiopll_characteristics = {
.num_output = ARRAY_SIZE(audiopll_outputs),
.output = audiopll_outputs,
.core_output = audiopll_core_outputs,
+ .acr = UL(0x12023010), /* fIN=[20 MHz, 32 MHz] */
};
static const struct clk_pll_characteristics plladiv2_characteristics = {
@@ -136,6 +140,7 @@ static const struct clk_pll_characteristics plladiv2_characteristics = {
.num_output = ARRAY_SIZE(plladiv2_outputs),
.output = plladiv2_outputs,
.core_output = plladiv2_core_outputs,
+ .acr = UL(0x00020010), /* Old ACR_DEFAULT_PLLA value */
};
/* Layout for fractional PLL ID PLLA. */
@@ -403,6 +408,7 @@ static const struct {
{ .n = "pioD_clk", .id = 44, },
{ .n = "tcb1_clk", .id = 45, },
{ .n = "dbgu_clk", .id = 47, },
+ { .n = "pmecc_clk", .id = 48, },
/*
* mpddr_clk feeds DDR controller and is enabled by bootloader thus we
* need to keep it enabled in case there is no Linux consumer for it.
diff --git a/drivers/clk/at91/sama7d65.c b/drivers/clk/at91/sama7d65.c
new file mode 100644
index 000000000000..7dee2b160ffb
--- /dev/null
+++ b/drivers/clk/at91/sama7d65.c
@@ -0,0 +1,1379 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SAMA7D65 PMC code.
+ *
+ * Copyright (C) 2024 Microchip Technology Inc. and its subsidiaries
+ *
+ * Author: Ryan Wanner <ryan.wanner@microchip.com>
+ */
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/mfd/syscon.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/clock/at91.h>
+
+#include "pmc.h"
+
+static DEFINE_SPINLOCK(pmc_pll_lock);
+static DEFINE_SPINLOCK(pmc_mck0_lock);
+static DEFINE_SPINLOCK(pmc_mckX_lock);
+
+#define PMC_INDEX_MAX 25
+
+/*
+ * PLL clocks identifiers
+ * @PLL_ID_CPU: CPU PLL identifier
+ * @PLL_ID_SYS: System PLL identifier
+ * @PLL_ID_DDR: DDR PLL identifier
+ * @PLL_ID_GPU: Graphics subsystem PLL identifier
+ * @PLL_ID_BAUD: Baud PLL identifier
+ * @PLL_ID_AUDIO: Audio PLL identifier
+ * @PLL_ID_ETH: Ethernet PLL identifier
+ * @PLL_ID_LVDS: LVDS PLL identifier
+ * @PLL_ID_USB: USB PLL identifier
+ */
+enum pll_ids {
+ PLL_ID_CPU,
+ PLL_ID_SYS,
+ PLL_ID_DDR,
+ PLL_ID_GPU,
+ PLL_ID_BAUD,
+ PLL_ID_AUDIO,
+ PLL_ID_ETH,
+ PLL_ID_LVDS,
+ PLL_ID_USB,
+ PLL_ID_MAX
+};
+
+/*
+ * PLL component identifier
+ * @PLL_COMPID_FRAC: Fractional PLL component identifier
+ * @PLL_COMPID_DIV0: 1st PLL divider component identifier
+ * @PLL_COMPID_DIV1: 2nd PLL divider component identifier
+ */
+enum pll_component_id {
+ PLL_COMPID_FRAC,
+ PLL_COMPID_DIV0,
+ PLL_COMPID_DIV1,
+ PLL_COMPID_MAX
+};
+
+/*
+ * PLL type identifiers
+ * @PLL_TYPE_FRAC: fractional PLL identifier
+ * @PLL_TYPE_DIV: divider PLL identifier
+ */
+enum pll_type {
+ PLL_TYPE_FRAC,
+ PLL_TYPE_DIV
+};
+
+/* Layout for fractional PLLs. */
+static const struct clk_pll_layout pll_layout_frac = {
+ .mul_mask = GENMASK(31, 24),
+ .frac_mask = GENMASK(21, 0),
+ .mul_shift = 24,
+ .frac_shift = 0,
+};
+
+/* Layout for DIVPMC dividers. */
+static const struct clk_pll_layout pll_layout_divpmc = {
+ .div_mask = GENMASK(7, 0),
+ .endiv_mask = BIT(29),
+ .div_shift = 0,
+ .endiv_shift = 29,
+};
+
+/* Layout for DIVIO dividers. */
+static const struct clk_pll_layout pll_layout_divio = {
+ .div_mask = GENMASK(19, 12),
+ .endiv_mask = BIT(30),
+ .div_shift = 12,
+ .endiv_shift = 30,
+};
+
+/*
+ * CPU PLL output range.
+ * Notice: The upper limit has been setup to 1000000002 due to hardware
+ * block which cannot output exactly 1GHz.
+ */
+static const struct clk_range cpu_pll_outputs[] = {
+ { .min = 2343750, .max = 1000000002 },
+};
+
+/* PLL output range. */
+static const struct clk_range pll_outputs[] = {
+ { .min = 2343750, .max = 1200000000 },
+};
+
+/*
+ * Min: fCOREPLLCK = 600 MHz, PMC_PLL_CTRL0.DIVPMC = 255
+ * Max: fCOREPLLCK = 800 MHz, PMC_PLL_CTRL0.DIVPMC = 0
+ */
+static const struct clk_range lvdspll_outputs[] = {
+ { .min = 16406250, .max = 800000000 },
+};
+
+static const struct clk_range upll_outputs[] = {
+ { .min = 480000000, .max = 480000000 },
+};
+
+/* Fractional PLL core output range. */
+static const struct clk_range core_outputs[] = {
+ { .min = 600000000, .max = 1200000000 },
+};
+
+static const struct clk_range lvdspll_core_outputs[] = {
+ { .min = 600000000, .max = 1200000000 },
+};
+
+static const struct clk_range upll_core_outputs[] = {
+ { .min = 600000000, .max = 1200000000 },
+};
+
+/* CPU PLL characteristics. */
+static const struct clk_pll_characteristics cpu_pll_characteristics = {
+ .input = { .min = 12000000, .max = 50000000 },
+ .num_output = ARRAY_SIZE(cpu_pll_outputs),
+ .output = cpu_pll_outputs,
+ .core_output = core_outputs,
+ .acr = UL(0x00070010),
+};
+
+/* PLL characteristics. */
+static const struct clk_pll_characteristics pll_characteristics = {
+ .input = { .min = 12000000, .max = 50000000 },
+ .num_output = ARRAY_SIZE(pll_outputs),
+ .output = pll_outputs,
+ .core_output = core_outputs,
+ .acr = UL(0x00070010),
+};
+
+static const struct clk_pll_characteristics lvdspll_characteristics = {
+ .input = { .min = 12000000, .max = 50000000 },
+ .num_output = ARRAY_SIZE(lvdspll_outputs),
+ .output = lvdspll_outputs,
+ .core_output = lvdspll_core_outputs,
+ .acr = UL(0x00070010),
+};
+
+static const struct clk_pll_characteristics upll_characteristics = {
+ .input = { .min = 20000000, .max = 50000000 },
+ .num_output = ARRAY_SIZE(upll_outputs),
+ .output = upll_outputs,
+ .core_output = upll_core_outputs,
+ .acr = UL(0x12020010),
+ .upll = true,
+};
+
+/*
+ * SAMA7D65 PLL possible parents
+ * @SAMA7D65_PLL_PARENT_MAINCK: MAINCK is PLL a parent
+ * @SAMA7D65_PLL_PARENT_MAIN_XTAL: MAIN XTAL is a PLL parent
+ * @SAMA7D65_PLL_PARENT_FRACCK: Frac PLL is a PLL parent (for PLL dividers)
+ */
+enum sama7d65_pll_parent {
+ SAMA7D65_PLL_PARENT_MAINCK,
+ SAMA7D65_PLL_PARENT_MAIN_XTAL,
+ SAMA7D65_PLL_PARENT_FRACCK
+};
+
+/*
+ * PLL clocks description
+ * @n: clock name
+ * @l: clock layout
+ * @c: clock characteristics
+ * @hw: pointer to clk_hw
+ * @t: clock type
+ * @f: clock flags
+ * @p: clock parent
+ * @eid: export index in sama7d65->chws[] array
+ * @safe_div: intermediate divider need to be set on PRE_RATE_CHANGE
+ * notification
+ */
+static struct sama7d65_pll {
+ const char *n;
+ const struct clk_pll_layout *l;
+ const struct clk_pll_characteristics *c;
+ struct clk_hw *hw;
+ unsigned long f;
+ enum sama7d65_pll_parent p;
+ u8 t;
+ u8 eid;
+ u8 safe_div;
+} sama7d65_plls[][PLL_COMPID_MAX] = {
+ [PLL_ID_CPU] = {
+ [PLL_COMPID_FRAC] = {
+ .n = "cpupll_fracck",
+ .p = SAMA7D65_PLL_PARENT_MAINCK,
+ .l = &pll_layout_frac,
+ .c = &cpu_pll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ /*
+ * This feeds cpupll_divpmcck which feeds CPU. It should
+ * not be disabled.
+ */
+ .f = CLK_IS_CRITICAL,
+ },
+
+ [PLL_COMPID_DIV0] = {
+ .n = "cpupll_divpmcck",
+ .p = SAMA7D65_PLL_PARENT_FRACCK,
+ .l = &pll_layout_divpmc,
+ .c = &cpu_pll_characteristics,
+ .t = PLL_TYPE_DIV,
+ /* This feeds CPU. It should not be disabled. */
+ .f = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+ .eid = PMC_CPUPLL,
+ /*
+ * Safe div=15 should be safe even for switching b/w 1GHz and
+ * 90MHz (frac pll might go up to 1.2GHz).
+ */
+ .safe_div = 15,
+ },
+ },
+
+ [PLL_ID_SYS] = {
+ [PLL_COMPID_FRAC] = {
+ .n = "syspll_fracck",
+ .p = SAMA7D65_PLL_PARENT_MAINCK,
+ .l = &pll_layout_frac,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ /*
+ * This feeds syspll_divpmcck which may feed critical parts
+ * of the systems like timers. Therefore it should not be
+ * disabled.
+ */
+ .f = CLK_IS_CRITICAL | CLK_SET_RATE_GATE,
+ },
+
+ [PLL_COMPID_DIV0] = {
+ .n = "syspll_divpmcck",
+ .p = SAMA7D65_PLL_PARENT_FRACCK,
+ .l = &pll_layout_divpmc,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_DIV,
+ /*
+ * This may feed critical parts of the systems like timers.
+ * Therefore it should not be disabled.
+ */
+ .f = CLK_IS_CRITICAL | CLK_SET_RATE_GATE,
+ .eid = PMC_SYSPLL,
+ },
+ },
+
+ [PLL_ID_DDR] = {
+ [PLL_COMPID_FRAC] = {
+ .n = "ddrpll_fracck",
+ .p = SAMA7D65_PLL_PARENT_MAINCK,
+ .l = &pll_layout_frac,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ /*
+ * This feeds ddrpll_divpmcck which feeds DDR. It should not
+ * be disabled.
+ */
+ .f = CLK_IS_CRITICAL | CLK_SET_RATE_GATE,
+ },
+
+ [PLL_COMPID_DIV0] = {
+ .n = "ddrpll_divpmcck",
+ .p = SAMA7D65_PLL_PARENT_FRACCK,
+ .l = &pll_layout_divpmc,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_DIV,
+ /* This feeds DDR. It should not be disabled. */
+ .f = CLK_IS_CRITICAL | CLK_SET_RATE_GATE,
+ },
+ },
+
+ [PLL_ID_GPU] = {
+ [PLL_COMPID_FRAC] = {
+ .n = "gpupll_fracck",
+ .p = SAMA7D65_PLL_PARENT_MAINCK,
+ .l = &pll_layout_frac,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ .f = CLK_SET_RATE_GATE,
+ },
+
+ [PLL_COMPID_DIV0] = {
+ .n = "gpupll_divpmcck",
+ .p = SAMA7D65_PLL_PARENT_FRACCK,
+ .l = &pll_layout_divpmc,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_DIV,
+ .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT,
+ },
+ },
+
+ [PLL_ID_BAUD] = {
+ [PLL_COMPID_FRAC] = {
+ .n = "baudpll_fracck",
+ .p = SAMA7D65_PLL_PARENT_MAINCK,
+ .l = &pll_layout_frac,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ .f = CLK_SET_RATE_GATE,
+ },
+
+ [PLL_COMPID_DIV0] = {
+ .n = "baudpll_divpmcck",
+ .p = SAMA7D65_PLL_PARENT_FRACCK,
+ .l = &pll_layout_divpmc,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_DIV,
+ .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT,
+ .eid = PMC_BAUDPLL,
+ },
+ },
+
+ [PLL_ID_AUDIO] = {
+ [PLL_COMPID_FRAC] = {
+ .n = "audiopll_fracck",
+ .p = SAMA7D65_PLL_PARENT_MAIN_XTAL,
+ .l = &pll_layout_frac,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ .f = CLK_SET_RATE_GATE,
+ },
+
+ [PLL_COMPID_DIV0] = {
+ .n = "audiopll_divpmcck",
+ .p = SAMA7D65_PLL_PARENT_FRACCK,
+ .l = &pll_layout_divpmc,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_DIV,
+ .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT,
+ .eid = PMC_AUDIOPMCPLL,
+ },
+
+ [PLL_COMPID_DIV1] = {
+ .n = "audiopll_diviock",
+ .p = SAMA7D65_PLL_PARENT_FRACCK,
+ .l = &pll_layout_divio,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_DIV,
+ .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT,
+ .eid = PMC_AUDIOIOPLL,
+ },
+ },
+
+ [PLL_ID_ETH] = {
+ [PLL_COMPID_FRAC] = {
+ .n = "ethpll_fracck",
+ .p = SAMA7D65_PLL_PARENT_MAIN_XTAL,
+ .l = &pll_layout_frac,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ .f = CLK_SET_RATE_GATE,
+ },
+
+ [PLL_COMPID_DIV0] = {
+ .n = "ethpll_divpmcck",
+ .p = SAMA7D65_PLL_PARENT_FRACCK,
+ .l = &pll_layout_divpmc,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_DIV,
+ .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT,
+ .eid = PMC_ETHPLL,
+ },
+ },
+
+ [PLL_ID_LVDS] = {
+ [PLL_COMPID_FRAC] = {
+ .n = "lvdspll_fracck",
+ .p = SAMA7D65_PLL_PARENT_MAIN_XTAL,
+ .l = &pll_layout_frac,
+ .c = &lvdspll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ .f = CLK_SET_RATE_GATE,
+ },
+
+ [PLL_COMPID_DIV0] = {
+ .n = "lvdspll_divpmcck",
+ .p = SAMA7D65_PLL_PARENT_FRACCK,
+ .l = &pll_layout_divpmc,
+ .c = &lvdspll_characteristics,
+ .t = PLL_TYPE_DIV,
+ .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT,
+ .eid = PMC_LVDSPLL,
+ },
+ },
+
+ [PLL_ID_USB] = {
+ [PLL_COMPID_FRAC] = {
+ .n = "usbpll_fracck",
+ .p = SAMA7D65_PLL_PARENT_MAIN_XTAL,
+ .l = &pll_layout_frac,
+ .c = &upll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ .f = CLK_SET_RATE_GATE,
+ },
+
+ [PLL_COMPID_DIV0] = {
+ .n = "usbpll_divpmcck",
+ .p = SAMA7D65_PLL_PARENT_FRACCK,
+ .l = &pll_layout_divpmc,
+ .c = &upll_characteristics,
+ .t = PLL_TYPE_DIV,
+ .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT,
+ .eid = PMC_UTMI,
+ },
+ },
+};
+
+/* Used to create an array entry identifying a PLL by its components. */
+#define PLL_IDS_TO_ARR_ENTRY(_id, _comp) { PLL_ID_##_id, PLL_COMPID_##_comp}
+
+/*
+ * Master clock (MCK[0..9]) description
+ * @n: clock name
+ * @ep_chg_chg_id: index in parents array that specifies the changeable
+ * @ep: extra parents names array (entry formed by PLL components
+ * identifiers (see enum pll_component_id))
+ * @hw: pointer to clk_hw
+ * parent
+ * @ep_count: extra parents count
+ * @ep_mux_table: mux table for extra parents
+ * @id: clock id
+ * @eid: export index in sama7d65->chws[] array
+ * @c: true if clock is critical and cannot be disabled
+ */
+static struct {
+ const char *n;
+ struct {
+ int pll_id;
+ int pll_compid;
+ } ep[4];
+ struct clk_hw *hw;
+ int ep_chg_id;
+ u8 ep_count;
+ u8 ep_mux_table[4];
+ u8 id;
+ u8 eid;
+ u8 c;
+} sama7d65_mckx[] = {
+ { .n = "mck0", }, /* Dummy entry for MCK0 to store hw in probe. */
+ { .n = "mck1",
+ .id = 1,
+ .ep = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), },
+ .ep_mux_table = { 5, },
+ .ep_count = 1,
+ .ep_chg_id = INT_MIN,
+ .eid = PMC_MCK1,
+ .c = 1, },
+
+ { .n = "mck2",
+ .id = 2,
+ .ep = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), PLL_IDS_TO_ARR_ENTRY(DDR, DIV0), },
+ .ep_mux_table = { 5, 6, },
+ .ep_count = 2,
+ .ep_chg_id = INT_MIN,
+ .c = 1, },
+
+ { .n = "mck3",
+ .id = 3,
+ .ep = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), PLL_IDS_TO_ARR_ENTRY(DDR, DIV0), },
+ .ep_mux_table = { 5, 6, },
+ .ep_count = 2,
+ .ep_chg_id = INT_MIN,
+ .eid = PMC_MCK3,
+ .c = 1, },
+
+ { .n = "mck4",
+ .id = 4,
+ .ep = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), },
+ .ep_mux_table = { 5, },
+ .ep_count = 1,
+ .ep_chg_id = INT_MIN,
+ .c = 1, },
+
+ { .n = "mck5",
+ .id = 5,
+ .ep = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), },
+ .ep_mux_table = { 5, },
+ .ep_count = 1,
+ .ep_chg_id = INT_MIN,
+ .eid = PMC_MCK5,
+ .c = 1, },
+
+ { .n = "mck6",
+ .id = 6,
+ .ep = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), },
+ .ep_mux_table = { 5, },
+ .ep_chg_id = INT_MIN,
+ .ep_count = 1,
+ .c = 1, },
+
+ { .n = "mck7",
+ .id = 7,
+ .ep = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), },
+ .ep_mux_table = { 5, },
+ .ep_chg_id = INT_MIN,
+ .ep_count = 1, },
+
+ { .n = "mck8",
+ .id = 8,
+ .ep = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), },
+ .ep_mux_table = { 5, },
+ .ep_chg_id = INT_MIN,
+ .ep_count = 1, },
+
+ { .n = "mck9",
+ .id = 9,
+ .ep = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), },
+ .ep_mux_table = { 5, },
+ .ep_chg_id = INT_MIN,
+ .ep_count = 1, },
+};
+
+/*
+ * System clock description
+ * @n: clock name
+ * @p: clock parent name
+ * @id: clock id
+ */
+static const struct {
+ const char *n;
+ const char *p;
+ u8 id;
+} sama7d65_systemck[] = {
+ { .n = "uhpck", .p = "usbck", .id = 6 },
+ { .n = "pck0", .p = "prog0", .id = 8, },
+ { .n = "pck1", .p = "prog1", .id = 9, },
+ { .n = "pck2", .p = "prog2", .id = 10, },
+ { .n = "pck3", .p = "prog3", .id = 11, },
+ { .n = "pck4", .p = "prog4", .id = 12, },
+ { .n = "pck5", .p = "prog5", .id = 13, },
+ { .n = "pck6", .p = "prog6", .id = 14, },
+ { .n = "pck7", .p = "prog7", .id = 15, },
+};
+
+/* Mux table for programmable clocks. */
+static u32 sama7d65_prog_mux_table[] = { 0, 1, 2, 5, 7, 8, 9, 10, 12 };
+
+/*
+ * Peripheral clock parent hw identifier (used to index in sama7d65_mckx[])
+ * @PCK_PARENT_HW_MCK0: pck parent hw identifier is MCK0
+ * @PCK_PARENT_HW_MCK1: pck parent hw identifier is MCK1
+ * @PCK_PARENT_HW_MCK2: pck parent hw identifier is MCK2
+ * @PCK_PARENT_HW_MCK3: pck parent hw identifier is MCK3
+ * @PCK_PARENT_HW_MCK4: pck parent hw identifier is MCK4
+ * @PCK_PARENT_HW_MCK5: pck parent hw identifier is MCK5
+ * @PCK_PARENT_HW_MCK6: pck parent hw identifier is MCK6
+ * @PCK_PARENT_HW_MCK7: pck parent hw identifier is MCK7
+ * @PCK_PARENT_HW_MCK8: pck parent hw identifier is MCK8
+ * @PCK_PARENT_HW_MCK9: pck parent hw identifier is MCK9
+ * @PCK_PARENT_HW_MAX: max identifier
+ */
+enum sama7d65_pck_parent_hw_id {
+ PCK_PARENT_HW_MCK0,
+ PCK_PARENT_HW_MCK1,
+ PCK_PARENT_HW_MCK2,
+ PCK_PARENT_HW_MCK3,
+ PCK_PARENT_HW_MCK4,
+ PCK_PARENT_HW_MCK5,
+ PCK_PARENT_HW_MCK6,
+ PCK_PARENT_HW_MCK7,
+ PCK_PARENT_HW_MCK8,
+ PCK_PARENT_HW_MCK9,
+ PCK_PARENT_HW_MAX
+};
+
+/*
+ * Peripheral clock description
+ * @n: clock name
+ * @p: clock parent hw id
+ * @r: clock range values
+ * @id: clock id
+ * @chgp: index in parent array of the changeable parent
+ */
+static struct {
+ const char *n;
+ enum sama7d65_pck_parent_hw_id p;
+ struct clk_range r;
+ u8 chgp;
+ u8 id;
+} sama7d65_periphck[] = {
+ { .n = "pioA_clk", .p = PCK_PARENT_HW_MCK0, .id = 10, },
+ { .n = "securam_clk", .p = PCK_PARENT_HW_MCK0, .id = 17, },
+ { .n = "sfr_clk", .p = PCK_PARENT_HW_MCK7, .id = 18, },
+ { .n = "hsmc_clk", .p = PCK_PARENT_HW_MCK5, .id = 20, },
+ { .n = "xdmac0_clk", .p = PCK_PARENT_HW_MCK6, .id = 21, },
+ { .n = "xdmac1_clk", .p = PCK_PARENT_HW_MCK6, .id = 22, },
+ { .n = "xdmac2_clk", .p = PCK_PARENT_HW_MCK1, .id = 23, },
+ { .n = "acc_clk", .p = PCK_PARENT_HW_MCK7, .id = 24, },
+ { .n = "aes_clk", .p = PCK_PARENT_HW_MCK6, .id = 26, },
+ { .n = "tzaesbasc_clk", .p = PCK_PARENT_HW_MCK8, .id = 27, },
+ { .n = "asrc_clk", .p = PCK_PARENT_HW_MCK9, .id = 29, .r = { .max = 200000000, }, },
+ { .n = "cpkcc_clk", .p = PCK_PARENT_HW_MCK0, .id = 30, },
+ { .n = "eic_clk", .p = PCK_PARENT_HW_MCK7, .id = 33, },
+ { .n = "flex0_clk", .p = PCK_PARENT_HW_MCK7, .id = 34, },
+ { .n = "flex1_clk", .p = PCK_PARENT_HW_MCK7, .id = 35, },
+ { .n = "flex2_clk", .p = PCK_PARENT_HW_MCK7, .id = 36, },
+ { .n = "flex3_clk", .p = PCK_PARENT_HW_MCK7, .id = 37, },
+ { .n = "flex4_clk", .p = PCK_PARENT_HW_MCK8, .id = 38, },
+ { .n = "flex5_clk", .p = PCK_PARENT_HW_MCK8, .id = 39, },
+ { .n = "flex6_clk", .p = PCK_PARENT_HW_MCK8, .id = 40, },
+ { .n = "flex7_clk", .p = PCK_PARENT_HW_MCK8, .id = 41, },
+ { .n = "flex8_clk", .p = PCK_PARENT_HW_MCK9, .id = 42, },
+ { .n = "flex9_clk", .p = PCK_PARENT_HW_MCK9, .id = 43, },
+ { .n = "flex10_clk", .p = PCK_PARENT_HW_MCK9, .id = 44, },
+ { .n = "gmac0_clk", .p = PCK_PARENT_HW_MCK6, .id = 46, },
+ { .n = "gmac1_clk", .p = PCK_PARENT_HW_MCK6, .id = 47, },
+ { .n = "gmac0_tsu_clk", .p = PCK_PARENT_HW_MCK1, .id = 49, },
+ { .n = "gmac1_tsu_clk", .p = PCK_PARENT_HW_MCK1, .id = 50, },
+ { .n = "icm_clk", .p = PCK_PARENT_HW_MCK5, .id = 53, },
+ { .n = "i2smcc0_clk", .p = PCK_PARENT_HW_MCK9, .id = 54, .r = { .max = 200000000, }, },
+ { .n = "i2smcc1_clk", .p = PCK_PARENT_HW_MCK9, .id = 55, .r = { .max = 200000000, }, },
+ { .n = "lcd_clk", .p = PCK_PARENT_HW_MCK3, .id = 56, },
+ { .n = "matrix_clk", .p = PCK_PARENT_HW_MCK5, .id = 57, },
+ { .n = "mcan0_clk", .p = PCK_PARENT_HW_MCK5, .id = 58, .r = { .max = 200000000, }, },
+ { .n = "mcan1_clk", .p = PCK_PARENT_HW_MCK5, .id = 59, .r = { .max = 200000000, }, },
+ { .n = "mcan2_clk", .p = PCK_PARENT_HW_MCK5, .id = 60, .r = { .max = 200000000, }, },
+ { .n = "mcan3_clk", .p = PCK_PARENT_HW_MCK5, .id = 61, .r = { .max = 200000000, }, },
+ { .n = "mcan4_clk", .p = PCK_PARENT_HW_MCK5, .id = 62, .r = { .max = 200000000, }, },
+ { .n = "pdmc0_clk", .p = PCK_PARENT_HW_MCK9, .id = 64, .r = { .max = 200000000, }, },
+ { .n = "pdmc1_clk", .p = PCK_PARENT_HW_MCK9, .id = 65, .r = { .max = 200000000, }, },
+ { .n = "pit64b0_clk", .p = PCK_PARENT_HW_MCK7, .id = 66, },
+ { .n = "pit64b1_clk", .p = PCK_PARENT_HW_MCK7, .id = 67, },
+ { .n = "pit64b2_clk", .p = PCK_PARENT_HW_MCK7, .id = 68, },
+ { .n = "pit64b3_clk", .p = PCK_PARENT_HW_MCK8, .id = 69, },
+ { .n = "pit64b4_clk", .p = PCK_PARENT_HW_MCK8, .id = 70, },
+ { .n = "pit64b5_clk", .p = PCK_PARENT_HW_MCK8, .id = 71, },
+ { .n = "pwm_clk", .p = PCK_PARENT_HW_MCK7, .id = 72, },
+ { .n = "qspi0_clk", .p = PCK_PARENT_HW_MCK5, .id = 73, },
+ { .n = "qspi1_clk", .p = PCK_PARENT_HW_MCK5, .id = 74, },
+ { .n = "sdmmc0_clk", .p = PCK_PARENT_HW_MCK1, .id = 75, },
+ { .n = "sdmmc1_clk", .p = PCK_PARENT_HW_MCK1, .id = 76, },
+ { .n = "sdmmc2_clk", .p = PCK_PARENT_HW_MCK1, .id = 77, },
+ { .n = "sha_clk", .p = PCK_PARENT_HW_MCK6, .id = 78, },
+ { .n = "spdifrx_clk", .p = PCK_PARENT_HW_MCK9, .id = 79, .r = { .max = 200000000, }, },
+ { .n = "spdiftx_clk", .p = PCK_PARENT_HW_MCK9, .id = 80, .r = { .max = 200000000, }, },
+ { .n = "ssc0_clk", .p = PCK_PARENT_HW_MCK7, .id = 81, .r = { .max = 200000000, }, },
+ { .n = "ssc1_clk", .p = PCK_PARENT_HW_MCK8, .id = 82, .r = { .max = 200000000, }, },
+ { .n = "tcb0_ch0_clk", .p = PCK_PARENT_HW_MCK8, .id = 83, .r = { .max = 200000000, }, },
+ { .n = "tcb0_ch1_clk", .p = PCK_PARENT_HW_MCK8, .id = 84, .r = { .max = 200000000, }, },
+ { .n = "tcb0_ch2_clk", .p = PCK_PARENT_HW_MCK8, .id = 85, .r = { .max = 200000000, }, },
+ { .n = "tcb1_ch0_clk", .p = PCK_PARENT_HW_MCK5, .id = 86, .r = { .max = 200000000, }, },
+ { .n = "tcb1_ch1_clk", .p = PCK_PARENT_HW_MCK5, .id = 87, .r = { .max = 200000000, }, },
+ { .n = "tcb1_ch2_clk", .p = PCK_PARENT_HW_MCK5, .id = 88, .r = { .max = 200000000, }, },
+ { .n = "tcpca_clk", .p = PCK_PARENT_HW_MCK5, .id = 89, },
+ { .n = "tcpcb_clk", .p = PCK_PARENT_HW_MCK5, .id = 90, },
+ { .n = "tdes_clk", .p = PCK_PARENT_HW_MCK6, .id = 91, },
+ { .n = "trng_clk", .p = PCK_PARENT_HW_MCK6, .id = 92, },
+ { .n = "udphsa_clk", .p = PCK_PARENT_HW_MCK5, .id = 99, },
+ { .n = "udphsb_clk", .p = PCK_PARENT_HW_MCK5, .id = 100, },
+ { .n = "uhphs_clk", .p = PCK_PARENT_HW_MCK5, .id = 101, },
+ { .n = "dsi_clk", .p = PCK_PARENT_HW_MCK3, .id = 103, },
+ { .n = "lvdsc_clk", .p = PCK_PARENT_HW_MCK3, .id = 104, },
+};
+
+/*
+ * Generic clock description
+ * @n: clock name
+ * @pp: PLL parents (entry formed by PLL components identifiers
+ * (see enum pll_component_id))
+ * @pp_mux_table: PLL parents mux table
+ * @r: clock output range
+ * @pp_chg_id: id in parent array of changeable PLL parent
+ * @pp_count: PLL parents count
+ * @id: clock id
+ */
+static const struct {
+ const char *n;
+ struct {
+ int pll_id;
+ int pll_compid;
+ } pp[8];
+ const char pp_mux_table[8];
+ struct clk_range r;
+ int pp_chg_id;
+ u8 pp_count;
+ u8 id;
+} sama7d65_gck[] = {
+ { .n = "adc_gclk",
+ .id = 25,
+ .r = { .max = 100000000, },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0), },
+ .pp_mux_table = { 8, 9, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "asrc_gclk",
+ .id = 29,
+ .r = { .max = 200000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0), },
+ .pp_mux_table = { 9, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex0_gclk",
+ .id = 34,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = {8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex1_gclk",
+ .id = 35,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = {8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex2_gclk",
+ .id = 36,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = {8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex3_gclk",
+ .id = 37,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = {8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex4_gclk",
+ .id = 38,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex5_gclk",
+ .id = 39,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex6_gclk",
+ .id = 40,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex7_gclk",
+ .id = 41,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex8_gclk",
+ .id = 42,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex9_gclk",
+ .id = 43,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex10_gclk",
+ .id = 44,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "gmac0_gclk",
+ .id = 46,
+ .r = { .max = 125000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = { 10, },
+ .pp_count = 1,
+ .pp_chg_id = 4, },
+
+ { .n = "gmac1_gclk",
+ .id = 47,
+ .r = { .max = 125000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = { 10, },
+ .pp_count = 1,
+ .pp_chg_id = 4, },
+
+ { .n = "gmac0_tsu_gclk",
+ .id = 49,
+ .r = { .max = 400000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = {10, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "gmac1_tsu_gclk",
+ .id = 50,
+ .r = { .max = 400000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = { 10, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "i2smcc0_gclk",
+ .id = 54,
+ .r = { .max = 100000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0), },
+ .pp_mux_table = { 9, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "i2smcc1_gclk",
+ .id = 55,
+ .r = { .max = 100000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0), },
+ .pp_mux_table = { 9, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "lcdc_gclk",
+ .id = 56,
+ .r = { .max = 90000000 },
+ .pp_count = 0,
+ .pp_chg_id = INT_MIN,
+ },
+
+ { .n = "mcan0_gclk",
+ .id = 58,
+ .r = { .max = 80000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(USB, DIV0), },
+ .pp_mux_table = { 12 },
+ .pp_count = 1,
+ .pp_chg_id = 4, },
+
+ { .n = "mcan1_gclk",
+ .id = 59,
+ .r = { .max = 80000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(USB, DIV0), },
+ .pp_mux_table = { 12 },
+ .pp_count = 1,
+ .pp_chg_id = 4, },
+
+ { .n = "mcan2_gclk",
+ .id = 60,
+ .r = { .max = 80000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(USB, DIV0), },
+ .pp_mux_table = { 12 },
+ .pp_count = 1,
+ .pp_chg_id = 4, },
+
+ { .n = "mcan3_gclk",
+ .id = 61,
+ .r = { .max = 80000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(USB, DIV0), },
+ .pp_mux_table = { 12 },
+ .pp_count = 1,
+ .pp_chg_id = 4, },
+
+ { .n = "mcan4_gclk",
+ .id = 62,
+ .r = { .max = 80000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(USB, DIV0), },
+ .pp_mux_table = { 12 },
+ .pp_count = 1,
+ .pp_chg_id = 4, },
+
+ { .n = "pdmc0_gclk",
+ .id = 64,
+ .r = { .max = 80000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0), },
+ .pp_mux_table = { 9 },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "pdmc1_gclk",
+ .id = 65,
+ .r = { .max = 80000000, },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0), },
+ .pp_mux_table = { 9, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "pit64b0_gclk",
+ .id = 66,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0),
+ PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = { 8, 9, 10, },
+ .pp_count = 3,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "pit64b1_gclk",
+ .id = 67,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0),
+ PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = { 8, 9, 10, },
+ .pp_count = 3,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "pit64b2_gclk",
+ .id = 68,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0),
+ PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = { 8, 9, 10, },
+ .pp_count = 3,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "pit64b3_gclk",
+ .id = 69,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0),
+ PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = {8, 9, 10, },
+ .pp_count = 3,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "pit64b4_gclk",
+ .id = 70,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0),
+ PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = {8, 9, 10, },
+ .pp_count = 3,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "pit64b5_gclk",
+ .id = 71,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0),
+ PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = {8, 9, 10, },
+ .pp_count = 3,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "qspi0_gclk",
+ .id = 73,
+ .r = { .max = 400000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = { 5, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "qspi1_gclk",
+ .id = 74,
+ .r = { .max = 266000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = { 5, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "sdmmc0_gclk",
+ .id = 75,
+ .r = { .max = 208000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = { 8, 10, },
+ .pp_count = 2,
+ .pp_chg_id = 4, },
+
+ { .n = "sdmmc1_gclk",
+ .id = 76,
+ .r = { .max = 208000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = { 8, 10, },
+ .pp_count = 2,
+ .pp_chg_id = 4, },
+
+ { .n = "sdmmc2_gclk",
+ .id = 77,
+ .r = { .max = 208000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = { 8, 10 },
+ .pp_count = 2,
+ .pp_chg_id = 4, },
+
+ { .n = "spdifrx_gclk",
+ .id = 79,
+ .r = { .max = 150000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0), },
+ .pp_mux_table = { 9, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "spdiftx_gclk",
+ .id = 80,
+ .r = { .max = 25000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0), },
+ .pp_mux_table = { 9, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "tcb0_ch0_gclk",
+ .id = 83,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0),
+ PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = { 8, 9, 10, },
+ .pp_count = 3,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "tcb1_ch0_gclk",
+ .id = 86,
+ .r = { .max = 67000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0),
+ PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = { 8, 9, 10, },
+ .pp_count = 3,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "DSI_gclk",
+ .id = 103,
+ .r = {.max = 27000000},
+ .pp = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), },
+ .pp_mux_table = {5},
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "I3CC_gclk",
+ .id = 105,
+ .r = {.max = 125000000},
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0),
+ PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = {8, 9, 10, },
+ .pp_count = 3,
+ .pp_chg_id = INT_MIN, },
+};
+
+/* MCK0 characteristics. */
+static const struct clk_master_characteristics mck0_characteristics = {
+ .output = { .min = 32768, .max = 200000000 },
+ .divisors = { 1, 2, 4, 3, 5 },
+ .have_div3_pres = 1,
+};
+
+/* MCK0 layout. */
+static const struct clk_master_layout mck0_layout = {
+ .mask = 0x773,
+ .pres_shift = 4,
+ .offset = 0x28,
+};
+
+/* Programmable clock layout. */
+static const struct clk_programmable_layout programmable_layout = {
+ .pres_mask = 0xff,
+ .pres_shift = 8,
+ .css_mask = 0x1f,
+ .have_slck_mck = 0,
+ .is_pres_direct = 1,
+};
+
+/* Peripheral clock layout. */
+static const struct clk_pcr_layout sama7d65_pcr_layout = {
+ .offset = 0x88,
+ .cmd = BIT(31),
+ .gckcss_mask = GENMASK(12, 8),
+ .pid_mask = GENMASK(6, 0),
+};
+
+static void __init sama7d65_pmc_setup(struct device_node *np)
+{
+ const char *main_xtal_name = "main_xtal";
+ struct pmc_data *sama7d65_pmc;
+ const char *parent_names[11];
+ void **alloc_mem = NULL;
+ int alloc_mem_size = 0;
+ struct regmap *regmap;
+ struct clk_hw *hw, *main_rc_hw, *main_osc_hw, *main_xtal_hw;
+ struct clk_hw *td_slck_hw, *md_slck_hw;
+ static struct clk_parent_data parent_data;
+ struct clk_hw *parent_hws[10];
+ bool bypass;
+ int i, j;
+
+ td_slck_hw = __clk_get_hw(of_clk_get_by_name(np, "td_slck"));
+ md_slck_hw = __clk_get_hw(of_clk_get_by_name(np, "md_slck"));
+ main_xtal_hw = __clk_get_hw(of_clk_get_by_name(np, main_xtal_name));
+
+ if (!td_slck_hw || !md_slck_hw || !main_xtal_hw)
+ return;
+
+ regmap = device_node_to_regmap(np);
+ if (IS_ERR(regmap))
+ return;
+
+ sama7d65_pmc = pmc_data_allocate(PMC_INDEX_MAX,
+ nck(sama7d65_systemck),
+ nck(sama7d65_periphck),
+ nck(sama7d65_gck), 8);
+ if (!sama7d65_pmc)
+ return;
+
+ alloc_mem = kmalloc(sizeof(void *) *
+ (ARRAY_SIZE(sama7d65_mckx) + ARRAY_SIZE(sama7d65_gck)),
+ GFP_KERNEL);
+ if (!alloc_mem)
+ goto err_free;
+
+ main_rc_hw = at91_clk_register_main_rc_osc(regmap, "main_rc_osc", 12000000,
+ 50000000);
+ if (IS_ERR(main_rc_hw))
+ goto err_free;
+
+ bypass = of_property_read_bool(np, "atmel,osc-bypass");
+
+ parent_data.name = main_xtal_name;
+ parent_data.fw_name = main_xtal_name;
+ main_osc_hw = at91_clk_register_main_osc(regmap, "main_osc", NULL,
+ &parent_data, bypass);
+ if (IS_ERR(main_osc_hw))
+ goto err_free;
+
+ parent_hws[0] = main_rc_hw;
+ parent_hws[1] = main_osc_hw;
+ hw = at91_clk_register_sam9x5_main(regmap, "mainck", NULL, parent_hws, 2);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama7d65_pmc->chws[PMC_MAIN] = hw;
+
+ for (i = 0; i < PLL_ID_MAX; i++) {
+ for (j = 0; j < PLL_COMPID_MAX; j++) {
+ struct clk_hw *parent_hw;
+
+ if (!sama7d65_plls[i][j].n)
+ continue;
+
+ switch (sama7d65_plls[i][j].t) {
+ case PLL_TYPE_FRAC:
+ switch (sama7d65_plls[i][j].p) {
+ case SAMA7D65_PLL_PARENT_MAINCK:
+ parent_hw = sama7d65_pmc->chws[PMC_MAIN];
+ break;
+ case SAMA7D65_PLL_PARENT_MAIN_XTAL:
+ parent_hw = main_xtal_hw;
+ break;
+ default:
+ /* Should not happen. */
+ parent_hw = NULL;
+ break;
+ }
+
+ hw = sam9x60_clk_register_frac_pll(regmap,
+ &pmc_pll_lock, sama7d65_plls[i][j].n,
+ NULL, parent_hw, i,
+ sama7d65_plls[i][j].c,
+ sama7d65_plls[i][j].l,
+ sama7d65_plls[i][j].f);
+ break;
+
+ case PLL_TYPE_DIV:
+ hw = sam9x60_clk_register_div_pll(regmap,
+ &pmc_pll_lock, sama7d65_plls[i][j].n,
+ NULL, sama7d65_plls[i][0].hw, i,
+ sama7d65_plls[i][j].c,
+ sama7d65_plls[i][j].l,
+ sama7d65_plls[i][j].f,
+ sama7d65_plls[i][j].safe_div);
+ break;
+
+ default:
+ continue;
+ }
+
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama7d65_plls[i][j].hw = hw;
+ if (sama7d65_plls[i][j].eid)
+ sama7d65_pmc->chws[sama7d65_plls[i][j].eid] = hw;
+ }
+ }
+
+ hw = at91_clk_register_master_div(regmap, "mck0", NULL,
+ sama7d65_plls[PLL_ID_CPU][1].hw,
+ &mck0_layout, &mck0_characteristics,
+ &pmc_mck0_lock, CLK_GET_RATE_NOCACHE, 5);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama7d65_pmc->chws[PMC_MCK] = hw;
+ sama7d65_mckx[PCK_PARENT_HW_MCK0].hw = hw;
+
+ parent_hws[0] = md_slck_hw;
+ parent_hws[1] = td_slck_hw;
+ parent_hws[2] = sama7d65_pmc->chws[PMC_MAIN];
+ for (i = PCK_PARENT_HW_MCK1; i < ARRAY_SIZE(sama7d65_mckx); i++) {
+ u8 num_parents = 3 + sama7d65_mckx[i].ep_count;
+ struct clk_hw *tmp_parent_hws[8];
+ u32 *mux_table;
+
+ mux_table = kmalloc_array(num_parents, sizeof(*mux_table),
+ GFP_KERNEL);
+ if (!mux_table)
+ goto err_free;
+
+ alloc_mem[alloc_mem_size++] = mux_table;
+
+ PMC_INIT_TABLE(mux_table, 3);
+ PMC_FILL_TABLE(&mux_table[3], sama7d65_mckx[i].ep_mux_table,
+ sama7d65_mckx[i].ep_count);
+ for (j = 0; j < sama7d65_mckx[i].ep_count; j++) {
+ u8 pll_id = sama7d65_mckx[i].ep[j].pll_id;
+ u8 pll_compid = sama7d65_mckx[i].ep[j].pll_compid;
+
+ tmp_parent_hws[j] = sama7d65_plls[pll_id][pll_compid].hw;
+ }
+ PMC_FILL_TABLE(&parent_hws[3], tmp_parent_hws,
+ sama7d65_mckx[i].ep_count);
+
+ hw = at91_clk_sama7g5_register_master(regmap, sama7d65_mckx[i].n,
+ num_parents, NULL, parent_hws,
+ mux_table, &pmc_mckX_lock,
+ sama7d65_mckx[i].id,
+ sama7d65_mckx[i].c,
+ sama7d65_mckx[i].ep_chg_id);
+
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama7d65_mckx[i].hw = hw;
+ if (sama7d65_mckx[i].eid)
+ sama7d65_pmc->chws[sama7d65_mckx[i].eid] = hw;
+ }
+
+ parent_names[0] = "syspll_divpmcck";
+ parent_names[1] = "usbpll_divpmcck";
+ parent_names[2] = "main_osc";
+ hw = sam9x60_clk_register_usb(regmap, "usbck", parent_names, 3);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ parent_hws[0] = md_slck_hw;
+ parent_hws[1] = td_slck_hw;
+ parent_hws[2] = sama7d65_pmc->chws[PMC_MAIN];
+ parent_hws[3] = sama7d65_plls[PLL_ID_SYS][PLL_COMPID_DIV0].hw;
+ parent_hws[4] = sama7d65_plls[PLL_ID_DDR][PLL_COMPID_DIV0].hw;
+ parent_hws[5] = sama7d65_plls[PLL_ID_GPU][PLL_COMPID_DIV0].hw;
+ parent_hws[6] = sama7d65_plls[PLL_ID_BAUD][PLL_COMPID_DIV0].hw;
+ parent_hws[7] = sama7d65_plls[PLL_ID_AUDIO][PLL_COMPID_DIV0].hw;
+ parent_hws[8] = sama7d65_plls[PLL_ID_ETH][PLL_COMPID_DIV0].hw;
+
+ for (i = 0; i < 8; i++) {
+ char name[6];
+
+ snprintf(name, sizeof(name), "prog%d", i);
+
+ hw = at91_clk_register_programmable(regmap, name, NULL, parent_hws,
+ 9, i,
+ &programmable_layout,
+ sama7d65_prog_mux_table);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama7d65_pmc->pchws[i] = hw;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sama7d65_systemck); i++) {
+ hw = at91_clk_register_system(regmap, sama7d65_systemck[i].n,
+ sama7d65_systemck[i].p, NULL,
+ sama7d65_systemck[i].id, 0);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama7d65_pmc->shws[sama7d65_systemck[i].id] = hw;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sama7d65_periphck); i++) {
+ hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock,
+ &sama7d65_pcr_layout,
+ sama7d65_periphck[i].n,
+ NULL,
+ sama7d65_mckx[sama7d65_periphck[i].p].hw,
+ sama7d65_periphck[i].id,
+ &sama7d65_periphck[i].r,
+ sama7d65_periphck[i].chgp ? 0 :
+ INT_MIN, 0);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama7d65_pmc->phws[sama7d65_periphck[i].id] = hw;
+ }
+
+ parent_hws[0] = md_slck_hw;
+ parent_hws[1] = td_slck_hw;
+ parent_hws[2] = sama7d65_pmc->chws[PMC_MAIN];
+ parent_hws[3] = sama7d65_pmc->chws[PMC_MCK1];
+ for (i = 0; i < ARRAY_SIZE(sama7d65_gck); i++) {
+ u8 num_parents = 4 + sama7d65_gck[i].pp_count;
+ struct clk_hw *tmp_parent_hws[8];
+ u32 *mux_table;
+
+ mux_table = kmalloc_array(num_parents, sizeof(*mux_table),
+ GFP_KERNEL);
+ if (!mux_table)
+ goto err_free;
+
+ alloc_mem[alloc_mem_size++] = mux_table;
+
+ PMC_INIT_TABLE(mux_table, 4);
+ PMC_FILL_TABLE(&mux_table[4], sama7d65_gck[i].pp_mux_table,
+ sama7d65_gck[i].pp_count);
+ for (j = 0; j < sama7d65_gck[i].pp_count; j++) {
+ u8 pll_id = sama7d65_gck[i].pp[j].pll_id;
+ u8 pll_compid = sama7d65_gck[i].pp[j].pll_compid;
+
+ tmp_parent_hws[j] = sama7d65_plls[pll_id][pll_compid].hw;
+ }
+ PMC_FILL_TABLE(&parent_hws[4], tmp_parent_hws,
+ sama7d65_gck[i].pp_count);
+
+ hw = at91_clk_register_generated(regmap, &pmc_pcr_lock,
+ &sama7d65_pcr_layout,
+ sama7d65_gck[i].n, NULL,
+ parent_hws, mux_table,
+ num_parents,
+ sama7d65_gck[i].id,
+ &sama7d65_gck[i].r,
+ sama7d65_gck[i].pp_chg_id);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama7d65_pmc->ghws[sama7d65_gck[i].id] = hw;
+ }
+
+ of_clk_add_hw_provider(np, of_clk_hw_pmc_get, sama7d65_pmc);
+ kfree(alloc_mem);
+
+ return;
+
+err_free:
+ if (alloc_mem) {
+ for (i = 0; i < alloc_mem_size; i++)
+ kfree(alloc_mem[i]);
+ kfree(alloc_mem);
+ }
+
+ kfree(sama7d65_pmc);
+}
+
+/* Some clks are used for a clocksource */
+CLK_OF_DECLARE(sama7d65_pmc, "microchip,sama7d65-pmc", sama7d65_pmc_setup);
diff --git a/drivers/clk/at91/sama7g5.c b/drivers/clk/at91/sama7g5.c
index 8385badc1c70..1340c2b00619 100644
--- a/drivers/clk/at91/sama7g5.c
+++ b/drivers/clk/at91/sama7g5.c
@@ -113,6 +113,7 @@ static const struct clk_pll_characteristics cpu_pll_characteristics = {
.num_output = ARRAY_SIZE(cpu_pll_outputs),
.output = cpu_pll_outputs,
.core_output = core_outputs,
+ .acr = UL(0x00070010),
};
/* PLL characteristics. */
@@ -121,6 +122,7 @@ static const struct clk_pll_characteristics pll_characteristics = {
.num_output = ARRAY_SIZE(pll_outputs),
.output = pll_outputs,
.core_output = core_outputs,
+ .acr = UL(0x00070010),
};
/*
diff --git a/drivers/clk/at91/sckc.c b/drivers/clk/at91/sckc.c
index 7741d8f3dbee..021d1b412af4 100644
--- a/drivers/clk/at91/sckc.c
+++ b/drivers/clk/at91/sckc.c
@@ -12,6 +12,8 @@
#include <linux/of_address.h>
#include <linux/io.h>
+#include <dt-bindings/clock/at91.h>
+
#define SLOW_CLOCK_FREQ 32768
#define SLOWCK_SW_CYCLES 5
#define SLOWCK_SW_TIME_USEC ((SLOWCK_SW_CYCLES * USEC_PER_SEC) / \
@@ -470,7 +472,7 @@ static void __init of_sam9x60_sckc_setup(struct device_node *np)
{
void __iomem *regbase = of_iomap(np, 0);
struct clk_hw_onecell_data *clk_data;
- struct clk_hw *slow_rc, *slow_osc;
+ struct clk_hw *slow_rc, *slow_osc, *hw;
const char *xtal_name;
const struct clk_hw *parent_hws[2];
static struct clk_parent_data parent_data = {
@@ -506,19 +508,19 @@ static void __init of_sam9x60_sckc_setup(struct device_node *np)
/* MD_SLCK and TD_SLCK. */
clk_data->num = 2;
- clk_data->hws[0] = clk_hw_register_fixed_rate_parent_hw(NULL, "md_slck",
- slow_rc,
- 0, 32768);
- if (IS_ERR(clk_data->hws[0]))
+ hw = clk_hw_register_fixed_rate_parent_hw(NULL, "md_slck", slow_rc,
+ 0, 32768);
+ if (IS_ERR(hw))
goto clk_data_free;
+ clk_data->hws[SCKC_MD_SLCK] = hw;
parent_hws[0] = slow_rc;
parent_hws[1] = slow_osc;
- clk_data->hws[1] = at91_clk_register_sam9x5_slow(regbase, "td_slck",
- parent_hws, 2,
- &at91sam9x60_bits);
- if (IS_ERR(clk_data->hws[1]))
+ hw = at91_clk_register_sam9x5_slow(regbase, "td_slck", parent_hws,
+ 2, &at91sam9x60_bits);
+ if (IS_ERR(hw))
goto unregister_md_slck;
+ clk_data->hws[SCKC_TD_SLCK] = hw;
ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
if (WARN_ON(ret))
@@ -527,9 +529,9 @@ static void __init of_sam9x60_sckc_setup(struct device_node *np)
return;
unregister_td_slck:
- at91_clk_unregister_sam9x5_slow(clk_data->hws[1]);
+ at91_clk_unregister_sam9x5_slow(clk_data->hws[SCKC_TD_SLCK]);
unregister_md_slck:
- clk_hw_unregister(clk_data->hws[0]);
+ clk_hw_unregister(clk_data->hws[SCKC_MD_SLCK]);
clk_data_free:
kfree(clk_data);
unregister_slow_osc:
diff --git a/drivers/clk/axs10x/i2s_pll_clock.c b/drivers/clk/axs10x/i2s_pll_clock.c
index 9667ce898428..6f3e1151b354 100644
--- a/drivers/clk/axs10x/i2s_pll_clock.c
+++ b/drivers/clk/axs10x/i2s_pll_clock.c
@@ -108,21 +108,21 @@ static unsigned long i2s_pll_recalc_rate(struct clk_hw *hw,
return ((parent_rate / idiv) * fbdiv) / odiv;
}
-static long i2s_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int i2s_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct i2s_pll_clk *clk = to_i2s_pll_clk(hw);
- const struct i2s_pll_cfg *pll_cfg = i2s_pll_get_cfg(*prate);
+ const struct i2s_pll_cfg *pll_cfg = i2s_pll_get_cfg(req->best_parent_rate);
int i;
if (!pll_cfg) {
- dev_err(clk->dev, "invalid parent rate=%ld\n", *prate);
+ dev_err(clk->dev, "invalid parent rate=%ld\n", req->best_parent_rate);
return -EINVAL;
}
for (i = 0; pll_cfg[i].rate != 0; i++)
- if (pll_cfg[i].rate == rate)
- return rate;
+ if (pll_cfg[i].rate == req->rate)
+ return 0;
return -EINVAL;
}
@@ -156,7 +156,7 @@ static int i2s_pll_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops i2s_pll_ops = {
.recalc_rate = i2s_pll_recalc_rate,
- .round_rate = i2s_pll_round_rate,
+ .determine_rate = i2s_pll_determine_rate,
.set_rate = i2s_pll_set_rate,
};
diff --git a/drivers/clk/axs10x/pll_clock.c b/drivers/clk/axs10x/pll_clock.c
index 6c7a2b62b406..c7ca473ee76c 100644
--- a/drivers/clk/axs10x/pll_clock.c
+++ b/drivers/clk/axs10x/pll_clock.c
@@ -149,8 +149,8 @@ static unsigned long axs10x_pll_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long axs10x_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int axs10x_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
int i;
long best_rate;
@@ -163,11 +163,13 @@ static long axs10x_pll_round_rate(struct clk_hw *hw, unsigned long rate,
best_rate = pll_cfg[0].rate;
for (i = 1; pll_cfg[i].rate != 0; i++) {
- if (abs(rate - pll_cfg[i].rate) < abs(rate - best_rate))
+ if (abs(req->rate - pll_cfg[i].rate) < abs(req->rate - best_rate))
best_rate = pll_cfg[i].rate;
}
- return best_rate;
+ req->rate = best_rate;
+
+ return 0;
}
static int axs10x_pll_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -208,7 +210,7 @@ static int axs10x_pll_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops axs10x_pll_ops = {
.recalc_rate = axs10x_pll_recalc_rate,
- .round_rate = axs10x_pll_round_rate,
+ .determine_rate = axs10x_pll_determine_rate,
.set_rate = axs10x_pll_set_rate,
};
diff --git a/drivers/clk/baikal-t1/ccu-div.c b/drivers/clk/baikal-t1/ccu-div.c
index 8d5fc7158f33..849d1f55765f 100644
--- a/drivers/clk/baikal-t1/ccu-div.c
+++ b/drivers/clk/baikal-t1/ccu-div.c
@@ -228,15 +228,18 @@ static inline unsigned long ccu_div_var_calc_divider(unsigned long rate,
CCU_DIV_CLKDIV_MAX(mask));
}
-static long ccu_div_var_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int ccu_div_var_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct ccu_div *div = to_ccu_div(hw);
unsigned long divider;
- divider = ccu_div_var_calc_divider(rate, *parent_rate, div->mask);
+ divider = ccu_div_var_calc_divider(req->rate, req->best_parent_rate,
+ div->mask);
- return ccu_div_calc_freq(*parent_rate, divider);
+ req->rate = ccu_div_calc_freq(req->best_parent_rate, divider);
+
+ return 0;
}
/*
@@ -308,12 +311,14 @@ static unsigned long ccu_div_fixed_recalc_rate(struct clk_hw *hw,
return ccu_div_calc_freq(parent_rate, div->divider);
}
-static long ccu_div_fixed_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int ccu_div_fixed_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct ccu_div *div = to_ccu_div(hw);
- return ccu_div_calc_freq(*parent_rate, div->divider);
+ req->rate = ccu_div_calc_freq(req->best_parent_rate, div->divider);
+
+ return 0;
}
static int ccu_div_fixed_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -534,14 +539,14 @@ static const struct clk_ops ccu_div_var_gate_to_set_ops = {
.disable = ccu_div_gate_disable,
.is_enabled = ccu_div_gate_is_enabled,
.recalc_rate = ccu_div_var_recalc_rate,
- .round_rate = ccu_div_var_round_rate,
+ .determine_rate = ccu_div_var_determine_rate,
.set_rate = ccu_div_var_set_rate_fast,
.debug_init = ccu_div_var_debug_init
};
static const struct clk_ops ccu_div_var_nogate_ops = {
.recalc_rate = ccu_div_var_recalc_rate,
- .round_rate = ccu_div_var_round_rate,
+ .determine_rate = ccu_div_var_determine_rate,
.set_rate = ccu_div_var_set_rate_slow,
.debug_init = ccu_div_var_debug_init
};
@@ -551,7 +556,7 @@ static const struct clk_ops ccu_div_gate_ops = {
.disable = ccu_div_gate_disable,
.is_enabled = ccu_div_gate_is_enabled,
.recalc_rate = ccu_div_fixed_recalc_rate,
- .round_rate = ccu_div_fixed_round_rate,
+ .determine_rate = ccu_div_fixed_determine_rate,
.set_rate = ccu_div_fixed_set_rate,
.debug_init = ccu_div_gate_debug_init
};
@@ -565,7 +570,7 @@ static const struct clk_ops ccu_div_buf_ops = {
static const struct clk_ops ccu_div_fixed_ops = {
.recalc_rate = ccu_div_fixed_recalc_rate,
- .round_rate = ccu_div_fixed_round_rate,
+ .determine_rate = ccu_div_fixed_determine_rate,
.set_rate = ccu_div_fixed_set_rate,
.debug_init = ccu_div_fixed_debug_init
};
diff --git a/drivers/clk/baikal-t1/ccu-pll.c b/drivers/clk/baikal-t1/ccu-pll.c
index 13ef28001439..357269f41cdc 100644
--- a/drivers/clk/baikal-t1/ccu-pll.c
+++ b/drivers/clk/baikal-t1/ccu-pll.c
@@ -228,14 +228,16 @@ static void ccu_pll_calc_factors(unsigned long rate, unsigned long parent_rate,
}
}
-static long ccu_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int ccu_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
unsigned long nr = 1, nf = 1, od = 1;
- ccu_pll_calc_factors(rate, *parent_rate, &nr, &nf, &od);
+ ccu_pll_calc_factors(req->rate, req->best_parent_rate, &nr, &nf, &od);
- return ccu_pll_calc_freq(*parent_rate, nr, nf, od);
+ req->rate = ccu_pll_calc_freq(req->best_parent_rate, nr, nf, od);
+
+ return 0;
}
/*
@@ -481,7 +483,7 @@ static const struct clk_ops ccu_pll_gate_to_set_ops = {
.disable = ccu_pll_disable,
.is_enabled = ccu_pll_is_enabled,
.recalc_rate = ccu_pll_recalc_rate,
- .round_rate = ccu_pll_round_rate,
+ .determine_rate = ccu_pll_determine_rate,
.set_rate = ccu_pll_set_rate_norst,
.debug_init = ccu_pll_debug_init
};
@@ -491,7 +493,7 @@ static const struct clk_ops ccu_pll_straight_set_ops = {
.disable = ccu_pll_disable,
.is_enabled = ccu_pll_is_enabled,
.recalc_rate = ccu_pll_recalc_rate,
- .round_rate = ccu_pll_round_rate,
+ .determine_rate = ccu_pll_determine_rate,
.set_rate = ccu_pll_set_rate_reset,
.debug_init = ccu_pll_debug_init
};
diff --git a/drivers/clk/baikal-t1/clk-ccu-div.c b/drivers/clk/baikal-t1/clk-ccu-div.c
index 84555a00f950..17d75e8e2e8f 100644
--- a/drivers/clk/baikal-t1/clk-ccu-div.c
+++ b/drivers/clk/baikal-t1/clk-ccu-div.c
@@ -405,7 +405,7 @@ static void ccu_div_clk_unregister(struct ccu_div_data *data, bool defer)
{
int idx;
- /* Uninstall only the clocks registered on the specfied stage */
+ /* Uninstall only the clocks registered on the specified stage */
for (idx = 0; idx < data->divs_num; ++idx) {
if (!!(data->divs_info[idx].features & CCU_DIV_BASIC) ^ defer)
continue;
diff --git a/drivers/clk/baikal-t1/clk-ccu-pll.c b/drivers/clk/baikal-t1/clk-ccu-pll.c
index fce02ce77347..921b87024feb 100644
--- a/drivers/clk/baikal-t1/clk-ccu-pll.c
+++ b/drivers/clk/baikal-t1/clk-ccu-pll.c
@@ -196,7 +196,7 @@ static void ccu_pll_clk_unregister(struct ccu_pll_data *data, bool defer)
{
int idx;
- /* Uninstall only the clocks registered on the specfied stage */
+ /* Uninstall only the clocks registered on the specified stage */
for (idx = 0; idx < CCU_PLL_NUM; ++idx) {
if (!!(pll_info[idx].features & CCU_PLL_BASIC) ^ defer)
continue;
diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
index fb04734afc80..02215ea79403 100644
--- a/drivers/clk/bcm/clk-bcm2835.c
+++ b/drivers/clk/bcm/clk-bcm2835.c
@@ -570,18 +570,23 @@ static long bcm2835_pll_rate_from_divisors(unsigned long parent_rate,
return rate >> A2W_PLL_FRAC_BITS;
}
-static long bcm2835_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int bcm2835_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct bcm2835_pll *pll = container_of(hw, struct bcm2835_pll, hw);
const struct bcm2835_pll_data *data = pll->data;
u32 ndiv, fdiv;
- rate = clamp(rate, data->min_rate, data->max_rate);
+ req->rate = clamp(req->rate, data->min_rate, data->max_rate);
- bcm2835_pll_choose_ndiv_and_fdiv(rate, *parent_rate, &ndiv, &fdiv);
+ bcm2835_pll_choose_ndiv_and_fdiv(req->rate, req->best_parent_rate,
+ &ndiv, &fdiv);
- return bcm2835_pll_rate_from_divisors(*parent_rate, ndiv, fdiv, 1);
+ req->rate = bcm2835_pll_rate_from_divisors(req->best_parent_rate,
+ ndiv, fdiv,
+ 1);
+
+ return 0;
}
static unsigned long bcm2835_pll_get_rate(struct clk_hw *hw,
@@ -783,7 +788,7 @@ static const struct clk_ops bcm2835_pll_clk_ops = {
.unprepare = bcm2835_pll_off,
.recalc_rate = bcm2835_pll_get_rate,
.set_rate = bcm2835_pll_set_rate,
- .round_rate = bcm2835_pll_round_rate,
+ .determine_rate = bcm2835_pll_determine_rate,
.debug_init = bcm2835_pll_debug_init,
};
@@ -1550,7 +1555,7 @@ static const char *const bcm2835_clock_osc_parents[] = {
.parents = bcm2835_clock_osc_parents, \
__VA_ARGS__)
-/* main peripherial parent mux */
+/* main peripheral parent mux */
static const char *const bcm2835_clock_per_parents[] = {
"gnd",
"xosc",
diff --git a/drivers/clk/bcm/clk-bcm53573-ilp.c b/drivers/clk/bcm/clk-bcm53573-ilp.c
index 83ef41d618be..b2fc05b60783 100644
--- a/drivers/clk/bcm/clk-bcm53573-ilp.c
+++ b/drivers/clk/bcm/clk-bcm53573-ilp.c
@@ -59,7 +59,7 @@ static unsigned long bcm53573_ilp_recalc_rate(struct clk_hw *hw,
/*
* At minimum we should loop for a bit to let hardware do the
* measurement. This isn't very accurate however, so for a better
- * precision lets try getting 20 different values for and use average.
+ * precision let's try getting 20 different values and use average.
*/
while (num < 20) {
regmap_read(regmap, PMU_XTAL_FREQ_RATIO, &cur_val);
diff --git a/drivers/clk/bcm/clk-iproc-asiu.c b/drivers/clk/bcm/clk-iproc-asiu.c
index dcacf55c55ae..83ec13da9b2e 100644
--- a/drivers/clk/bcm/clk-iproc-asiu.c
+++ b/drivers/clk/bcm/clk-iproc-asiu.c
@@ -98,22 +98,27 @@ static unsigned long iproc_asiu_clk_recalc_rate(struct clk_hw *hw,
return clk->rate;
}
-static long iproc_asiu_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int iproc_asiu_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
unsigned int div;
- if (rate == 0 || *parent_rate == 0)
+ if (req->rate == 0 || req->best_parent_rate == 0)
return -EINVAL;
- if (rate == *parent_rate)
- return *parent_rate;
+ if (req->rate == req->best_parent_rate)
+ return 0;
- div = DIV_ROUND_CLOSEST(*parent_rate, rate);
- if (div < 2)
- return *parent_rate;
+ div = DIV_ROUND_CLOSEST(req->best_parent_rate, req->rate);
+ if (div < 2) {
+ req->rate = req->best_parent_rate;
- return *parent_rate / div;
+ return 0;
+ }
+
+ req->rate = req->best_parent_rate / div;
+
+ return 0;
}
static int iproc_asiu_clk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -168,7 +173,7 @@ static const struct clk_ops iproc_asiu_ops = {
.enable = iproc_asiu_clk_enable,
.disable = iproc_asiu_clk_disable,
.recalc_rate = iproc_asiu_clk_recalc_rate,
- .round_rate = iproc_asiu_clk_round_rate,
+ .determine_rate = iproc_asiu_clk_determine_rate,
.set_rate = iproc_asiu_clk_set_rate,
};
diff --git a/drivers/clk/bcm/clk-kona.c b/drivers/clk/bcm/clk-kona.c
index ec5749e301ba..0171e6b2bfca 100644
--- a/drivers/clk/bcm/clk-kona.c
+++ b/drivers/clk/bcm/clk-kona.c
@@ -10,6 +10,7 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/clk-provider.h>
+#include <linux/string_choices.h>
/*
* "Policies" affect the frequencies of bus clocks provided by a
@@ -52,24 +53,6 @@ static inline u64 scaled_div_value(struct bcm_clk_div *div, u32 reg_div)
return (u64)reg_div + ((u64)1 << div->u.s.frac_width);
}
-/*
- * Build a scaled divider value as close as possible to the
- * given whole part (div_value) and fractional part (expressed
- * in billionths).
- */
-u64 scaled_div_build(struct bcm_clk_div *div, u32 div_value, u32 billionths)
-{
- u64 combined;
-
- BUG_ON(!div_value);
- BUG_ON(billionths >= BILLION);
-
- combined = (u64)div_value * BILLION + billionths;
- combined <<= div->u.s.frac_width;
-
- return DIV_ROUND_CLOSEST_ULL(combined, BILLION);
-}
-
/* The scaled minimum divisor representable by a divider */
static inline u64
scaled_div_min(struct bcm_clk_div *div)
@@ -502,7 +485,7 @@ static int clk_gate(struct ccu_data *ccu, const char *name,
return 0;
pr_err("%s: failed to %s gate for %s\n", __func__,
- enable ? "enable" : "disable", name);
+ str_enable_disable(enable), name);
return -EIO;
}
diff --git a/drivers/clk/bcm/clk-kona.h b/drivers/clk/bcm/clk-kona.h
index e09655024ac2..348a3454ce40 100644
--- a/drivers/clk/bcm/clk-kona.h
+++ b/drivers/clk/bcm/clk-kona.h
@@ -492,8 +492,6 @@ extern struct clk_ops kona_peri_clk_ops;
/* Externally visible functions */
extern u64 scaled_div_max(struct bcm_clk_div *div);
-extern u64 scaled_div_build(struct bcm_clk_div *div, u32 div_value,
- u32 billionths);
extern void __init kona_dt_ccu_setup(struct ccu_data *ccu,
struct device_node *node);
diff --git a/drivers/clk/bcm/clk-raspberrypi.c b/drivers/clk/bcm/clk-raspberrypi.c
index a18a8768feb4..1a9162f0ae31 100644
--- a/drivers/clk/bcm/clk-raspberrypi.c
+++ b/drivers/clk/bcm/clk-raspberrypi.c
@@ -34,6 +34,7 @@ static char *rpi_firmware_clk_names[] = {
[RPI_FIRMWARE_M2MC_CLK_ID] = "m2mc",
[RPI_FIRMWARE_PIXEL_BVB_CLK_ID] = "pixel-bvb",
[RPI_FIRMWARE_VEC_CLK_ID] = "vec",
+ [RPI_FIRMWARE_DISP_CLK_ID] = "disp",
};
#define RPI_FIRMWARE_STATE_ENABLE_BIT BIT(0)
@@ -56,11 +57,19 @@ struct raspberrypi_clk_data {
struct raspberrypi_clk *rpi;
};
+static inline
+const struct raspberrypi_clk_data *clk_hw_to_data(const struct clk_hw *hw)
+{
+ return container_of(hw, struct raspberrypi_clk_data, hw);
+}
+
struct raspberrypi_clk_variant {
bool export;
char *clkdev;
unsigned long min_rate;
bool minimize;
+ bool maximize;
+ u32 flags;
};
static struct raspberrypi_clk_variant
@@ -68,6 +77,7 @@ raspberrypi_clk_variants[RPI_FIRMWARE_NUM_CLK_ID] = {
[RPI_FIRMWARE_ARM_CLK_ID] = {
.export = true,
.clkdev = "cpu0",
+ .flags = CLK_IS_CRITICAL,
},
[RPI_FIRMWARE_CORE_CLK_ID] = {
.export = true,
@@ -83,6 +93,12 @@ raspberrypi_clk_variants[RPI_FIRMWARE_NUM_CLK_ID] = {
* always use the minimum the drivers will let us.
*/
.minimize = true,
+
+ /*
+ * It should never be disabled as it drives the bus for
+ * everything else.
+ */
+ .flags = CLK_IS_CRITICAL,
},
[RPI_FIRMWARE_M2MC_CLK_ID] = {
.export = true,
@@ -108,21 +124,46 @@ raspberrypi_clk_variants[RPI_FIRMWARE_NUM_CLK_ID] = {
* drivers will let us.
*/
.minimize = true,
+
+ /*
+ * As mentioned above, this clock is disabled during boot,
+ * the firmware will skip the HSM initialization, resulting
+ * in a bus lockup. Therefore, make sure it's enabled
+ * during boot, but after it, it can be enabled/disabled
+ * by the driver.
+ */
+ .flags = CLK_IGNORE_UNUSED,
},
[RPI_FIRMWARE_V3D_CLK_ID] = {
.export = true,
+ .maximize = true,
},
[RPI_FIRMWARE_PIXEL_CLK_ID] = {
.export = true,
+ .minimize = true,
+ .flags = CLK_IS_CRITICAL,
},
[RPI_FIRMWARE_HEVC_CLK_ID] = {
.export = true,
+ .minimize = true,
+ .flags = CLK_IS_CRITICAL,
+ },
+ [RPI_FIRMWARE_ISP_CLK_ID] = {
+ .export = true,
+ .minimize = true,
},
[RPI_FIRMWARE_PIXEL_BVB_CLK_ID] = {
.export = true,
+ .minimize = true,
+ .flags = CLK_IS_CRITICAL,
},
[RPI_FIRMWARE_VEC_CLK_ID] = {
.export = true,
+ .minimize = true,
+ },
+ [RPI_FIRMWARE_DISP_CLK_ID] = {
+ .export = true,
+ .minimize = true,
},
};
@@ -153,7 +194,6 @@ static int raspberrypi_clock_property(struct rpi_firmware *firmware,
struct raspberrypi_firmware_prop msg = {
.id = cpu_to_le32(data->id),
.val = cpu_to_le32(*val),
- .disable_turbo = cpu_to_le32(1),
};
int ret;
@@ -168,16 +208,18 @@ static int raspberrypi_clock_property(struct rpi_firmware *firmware,
static int raspberrypi_fw_is_prepared(struct clk_hw *hw)
{
- struct raspberrypi_clk_data *data =
- container_of(hw, struct raspberrypi_clk_data, hw);
+ const struct raspberrypi_clk_data *data = clk_hw_to_data(hw);
struct raspberrypi_clk *rpi = data->rpi;
u32 val = 0;
int ret;
ret = raspberrypi_clock_property(rpi->firmware, data,
RPI_FIRMWARE_GET_CLOCK_STATE, &val);
- if (ret)
+ if (ret) {
+ dev_err_ratelimited(rpi->dev, "Failed to get %s state: %d\n",
+ clk_hw_get_name(hw), ret);
return 0;
+ }
return !!(val & RPI_FIRMWARE_STATE_ENABLE_BIT);
}
@@ -186,16 +228,18 @@ static int raspberrypi_fw_is_prepared(struct clk_hw *hw)
static unsigned long raspberrypi_fw_get_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
- struct raspberrypi_clk_data *data =
- container_of(hw, struct raspberrypi_clk_data, hw);
+ const struct raspberrypi_clk_data *data = clk_hw_to_data(hw);
struct raspberrypi_clk *rpi = data->rpi;
u32 val = 0;
int ret;
ret = raspberrypi_clock_property(rpi->firmware, data,
RPI_FIRMWARE_GET_CLOCK_RATE, &val);
- if (ret)
+ if (ret) {
+ dev_err_ratelimited(rpi->dev, "Failed to get %s frequency: %d\n",
+ clk_hw_get_name(hw), ret);
return 0;
+ }
return val;
}
@@ -203,8 +247,7 @@ static unsigned long raspberrypi_fw_get_rate(struct clk_hw *hw,
static int raspberrypi_fw_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
- struct raspberrypi_clk_data *data =
- container_of(hw, struct raspberrypi_clk_data, hw);
+ const struct raspberrypi_clk_data *data = clk_hw_to_data(hw);
struct raspberrypi_clk *rpi = data->rpi;
u32 _rate = rate;
int ret;
@@ -221,8 +264,7 @@ static int raspberrypi_fw_set_rate(struct clk_hw *hw, unsigned long rate,
static int raspberrypi_fw_dumb_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
- struct raspberrypi_clk_data *data =
- container_of(hw, struct raspberrypi_clk_data, hw);
+ const struct raspberrypi_clk_data *data = clk_hw_to_data(hw);
struct raspberrypi_clk_variant *variant = data->variant;
/*
@@ -244,7 +286,41 @@ static int raspberrypi_fw_dumb_determine_rate(struct clk_hw *hw,
return 0;
}
+static int raspberrypi_fw_prepare(struct clk_hw *hw)
+{
+ const struct raspberrypi_clk_data *data = clk_hw_to_data(hw);
+ struct raspberrypi_clk *rpi = data->rpi;
+ u32 state = RPI_FIRMWARE_STATE_ENABLE_BIT;
+ int ret;
+
+ ret = raspberrypi_clock_property(rpi->firmware, data,
+ RPI_FIRMWARE_SET_CLOCK_STATE, &state);
+ if (ret)
+ dev_err_ratelimited(rpi->dev,
+ "Failed to set clock %s state to on: %d\n",
+ clk_hw_get_name(hw), ret);
+
+ return ret;
+}
+
+static void raspberrypi_fw_unprepare(struct clk_hw *hw)
+{
+ const struct raspberrypi_clk_data *data = clk_hw_to_data(hw);
+ struct raspberrypi_clk *rpi = data->rpi;
+ u32 state = 0;
+ int ret;
+
+ ret = raspberrypi_clock_property(rpi->firmware, data,
+ RPI_FIRMWARE_SET_CLOCK_STATE, &state);
+ if (ret)
+ dev_err_ratelimited(rpi->dev,
+ "Failed to set clock %s state to off: %d\n",
+ clk_hw_get_name(hw), ret);
+}
+
static const struct clk_ops raspberrypi_firmware_clk_ops = {
+ .prepare = raspberrypi_fw_prepare,
+ .unprepare = raspberrypi_fw_unprepare,
.is_prepared = raspberrypi_fw_is_prepared,
.recalc_rate = raspberrypi_fw_get_rate,
.determine_rate = raspberrypi_fw_dumb_determine_rate,
@@ -271,8 +347,10 @@ static struct clk_hw *raspberrypi_clk_register(struct raspberrypi_clk *rpi,
init.name = devm_kasprintf(rpi->dev, GFP_KERNEL,
"fw-clk-%s",
rpi_firmware_clk_names[id]);
+ if (!init.name)
+ return ERR_PTR(-ENOMEM);
init.ops = &raspberrypi_firmware_clk_ops;
- init.flags = CLK_GET_RATE_NOCACHE;
+ init.flags = variant->flags | CLK_GET_RATE_NOCACHE;
data->hw.init = &init;
@@ -309,6 +387,9 @@ static struct clk_hw *raspberrypi_clk_register(struct raspberrypi_clk *rpi,
}
}
+ if (variant->maximize)
+ variant->min_rate = max_rate;
+
if (variant->min_rate) {
unsigned long rate;
@@ -465,4 +546,3 @@ module_platform_driver(raspberrypi_clk_driver);
MODULE_AUTHOR("Nicolas Saenz Julienne <nsaenzjulienne@suse.de>");
MODULE_DESCRIPTION("Raspberry Pi firmware clock driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:raspberrypi-clk");
diff --git a/drivers/clk/berlin/berlin2-avpll.c b/drivers/clk/berlin/berlin2-avpll.c
index aa89b4c9464e..79f3d37a0ee0 100644
--- a/drivers/clk/berlin/berlin2-avpll.c
+++ b/drivers/clk/berlin/berlin2-avpll.c
@@ -319,7 +319,7 @@ berlin2_avpll_channel_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
/*
* AV3 divider start at VCO_CTRL14, bit 7; each 4 bits wide.
- * AV2/AV3 form a fractional divider, where only specfic values for AV3
+ * AV2/AV3 form a fractional divider, where only specific values for AV3
* are allowed. AV3 != 0 divides by AV2/2, AV3=0 is bypass.
*/
if (ch->index < 6) {
diff --git a/drivers/clk/clk-apple-nco.c b/drivers/clk/clk-apple-nco.c
index 39472a51530a..d3ced4a0f029 100644
--- a/drivers/clk/clk-apple-nco.c
+++ b/drivers/clk/clk-apple-nco.c
@@ -212,13 +212,15 @@ static unsigned long applnco_recalc_rate(struct clk_hw *hw,
((u64) div) * incbase + inc1);
}
-static long applnco_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int applnco_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- unsigned long lo = *parent_rate / (COARSE_DIV_OFFSET + LFSR_TBLSIZE) + 1;
- unsigned long hi = *parent_rate / COARSE_DIV_OFFSET;
+ unsigned long lo = req->best_parent_rate / (COARSE_DIV_OFFSET + LFSR_TBLSIZE) + 1;
+ unsigned long hi = req->best_parent_rate / COARSE_DIV_OFFSET;
- return clamp(rate, lo, hi);
+ req->rate = clamp(req->rate, lo, hi);
+
+ return 0;
}
static int applnco_enable(struct clk_hw *hw)
@@ -246,7 +248,7 @@ static void applnco_disable(struct clk_hw *hw)
static const struct clk_ops applnco_ops = {
.set_rate = applnco_set_rate,
.recalc_rate = applnco_recalc_rate,
- .round_rate = applnco_round_rate,
+ .determine_rate = applnco_determine_rate,
.enable = applnco_enable,
.disable = applnco_disable,
.is_enabled = applnco_is_enabled,
@@ -297,6 +299,9 @@ static int applnco_probe(struct platform_device *pdev)
memset(&init, 0, sizeof(init));
init.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
"%s-%d", np->name, i);
+ if (!init.name)
+ return -ENOMEM;
+
init.ops = &applnco_ops;
init.parent_data = &pdata;
init.num_parents = 1;
diff --git a/drivers/clk/clk-asm9260.c b/drivers/clk/clk-asm9260.c
index 3432c801f1bd..595cfa533fb9 100644
--- a/drivers/clk/clk-asm9260.c
+++ b/drivers/clk/clk-asm9260.c
@@ -92,8 +92,8 @@ static const struct asm9260_div_clk asm9260_div_clks[] __initconst = {
{ CLKID_SYS_CPU, "cpu_div", "main_gate", HW_CPUCLKDIV },
{ CLKID_SYS_AHB, "ahb_div", "cpu_div", HW_SYSAHBCLKDIV },
- /* i2s has two deviders: one for only external mclk and internal
- * devider for all clks. */
+ /* i2s has two dividers: one for only external mclk and internal
+ * divider for all clks. */
{ CLKID_SYS_I2S0M, "i2s0m_div", "i2s0_mclk", HW_I2S0MCLKDIV },
{ CLKID_SYS_I2S1M, "i2s1m_div", "i2s1_mclk", HW_I2S1MCLKDIV },
{ CLKID_SYS_I2S0S, "i2s0s_div", "i2s0_gate", HW_I2S0SCLKDIV },
diff --git a/drivers/clk/clk-ast2600.c b/drivers/clk/clk-ast2600.c
index faf88324f7b1..114afc13d640 100644
--- a/drivers/clk/clk-ast2600.c
+++ b/drivers/clk/clk-ast2600.c
@@ -92,7 +92,7 @@ static u8 soc_rev;
*
* There are some gates that do not have an associated reset; these are
* handled by using -1 as the index for the reset, and the consumer must
- * explictly assert/deassert reset lines as required.
+ * explicitly assert/deassert reset lines as required.
*
* Clocks marked with CLK_IS_CRITICAL:
*
diff --git a/drivers/clk/clk-axi-clkgen.c b/drivers/clk/clk-axi-clkgen.c
index bf4d8ddc93ae..fa5ccef73e60 100644
--- a/drivers/clk/clk-axi-clkgen.c
+++ b/drivers/clk/clk-axi-clkgen.c
@@ -6,13 +6,17 @@
* Author: Lars-Peter Clausen <lars@metafoo.de>
*/
-#include <linux/platform_device.h>
+#include <linux/adi-axi-common.h>
+#include <linux/bits.h>
+#include <linux/clk.h>
#include <linux/clk-provider.h>
-#include <linux/slab.h>
+#include <linux/err.h>
#include <linux/io.h>
-#include <linux/of.h>
#include <linux/module.h>
-#include <linux/err.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
#define AXI_CLKGEN_V2_REG_RESET 0x40
#define AXI_CLKGEN_V2_REG_CLKSEL 0x44
@@ -27,6 +31,9 @@
#define AXI_CLKGEN_V2_DRP_STATUS_BUSY BIT(16)
+#define ADI_CLKGEN_REG_FPGA_VOLTAGE 0x0140
+#define ADI_CLKGEN_INFO_FPGA_VOLTAGE(val) ((val) & GENMASK(15, 0))
+
#define MMCM_REG_CLKOUT5_2 0x07
#define MMCM_REG_CLKOUT0_1 0x08
#define MMCM_REG_CLKOUT0_2 0x09
@@ -89,7 +96,7 @@ static uint32_t axi_clkgen_lookup_filter(unsigned int m)
}
}
-static const uint32_t axi_clkgen_lock_table[] = {
+static const u32 axi_clkgen_lock_table[] = {
0x060603e8, 0x060603e8, 0x080803e8, 0x0b0b03e8,
0x0e0e03e8, 0x111103e8, 0x131303e8, 0x161603e8,
0x191903e8, 0x1c1c03e8, 0x1f1f0384, 0x1f1f0339,
@@ -101,7 +108,7 @@ static const uint32_t axi_clkgen_lock_table[] = {
0x1f1f012c, 0x1f1f0113, 0x1f1f0113, 0x1f1f0113,
};
-static uint32_t axi_clkgen_lookup_lock(unsigned int m)
+static u32 axi_clkgen_lookup_lock(unsigned int m)
{
if (m < ARRAY_SIZE(axi_clkgen_lock_table))
return axi_clkgen_lock_table[m];
@@ -117,14 +124,15 @@ static const struct axi_clkgen_limits axi_clkgen_zynqmp_default_limits = {
static const struct axi_clkgen_limits axi_clkgen_zynq_default_limits = {
.fpfd_min = 10000,
- .fpfd_max = 300000,
+ .fpfd_max = 450000,
.fvco_min = 600000,
.fvco_max = 1200000,
};
static void axi_clkgen_calc_params(const struct axi_clkgen_limits *limits,
- unsigned long fin, unsigned long fout,
- unsigned int *best_d, unsigned int *best_m, unsigned int *best_dout)
+ unsigned long fin, unsigned long fout,
+ unsigned int *best_d, unsigned int *best_m,
+ unsigned int *best_dout)
{
unsigned long d, d_min, d_max, _d_min, _d_max;
unsigned long m, m_min, m_max;
@@ -140,15 +148,15 @@ static void axi_clkgen_calc_params(const struct axi_clkgen_limits *limits,
*best_m = 0;
*best_dout = 0;
- d_min = max_t(unsigned long, DIV_ROUND_UP(fin, limits->fpfd_max), 1);
- d_max = min_t(unsigned long, fin / limits->fpfd_min, 80);
+ d_min = max(DIV_ROUND_UP(fin, limits->fpfd_max), 1);
+ d_max = min(fin / limits->fpfd_min, 80);
again:
fvco_min_fract = limits->fvco_min << fract_shift;
fvco_max_fract = limits->fvco_max << fract_shift;
- m_min = max_t(unsigned long, DIV_ROUND_UP(fvco_min_fract, fin) * d_min, 1);
- m_max = min_t(unsigned long, fvco_max_fract * d_max / fin, 64 << fract_shift);
+ m_min = max(DIV_ROUND_UP(fvco_min_fract, fin) * d_min, 1);
+ m_max = min(fvco_max_fract * d_max / fin, 64 << fract_shift);
for (m = m_min; m <= m_max; m++) {
_d_min = max(d_min, DIV_ROUND_UP(fin * m, fvco_max_fract));
@@ -171,7 +179,7 @@ again:
}
}
- /* Lets see if we find a better setting in fractional mode */
+ /* Let's see if we find a better setting in fractional mode */
if (fract_shift == 0) {
fract_shift = 3;
goto again;
@@ -191,9 +199,9 @@ struct axi_clkgen_div_params {
};
static void axi_clkgen_calc_clk_params(unsigned int divider,
- unsigned int frac_divider, struct axi_clkgen_div_params *params)
+ unsigned int frac_divider,
+ struct axi_clkgen_div_params *params)
{
-
memset(params, 0x0, sizeof(*params));
if (divider == 1) {
@@ -221,7 +229,7 @@ static void axi_clkgen_calc_clk_params(unsigned int divider,
if (params->edge == 0 || frac_divider == 1)
params->low--;
if (((params->edge == 0) ^ (frac_divider == 1)) ||
- (divider == 2 && frac_divider == 1))
+ (divider == 2 && frac_divider == 1))
params->frac_wf_f = 1;
params->frac_phase = params->edge * 4 + frac_divider / 2;
@@ -229,13 +237,13 @@ static void axi_clkgen_calc_clk_params(unsigned int divider,
}
static void axi_clkgen_write(struct axi_clkgen *axi_clkgen,
- unsigned int reg, unsigned int val)
+ unsigned int reg, unsigned int val)
{
writel(val, axi_clkgen->base + reg);
}
static void axi_clkgen_read(struct axi_clkgen *axi_clkgen,
- unsigned int reg, unsigned int *val)
+ unsigned int reg, unsigned int *val)
{
*val = readl(axi_clkgen->base + reg);
}
@@ -256,7 +264,7 @@ static int axi_clkgen_wait_non_busy(struct axi_clkgen *axi_clkgen)
}
static int axi_clkgen_mmcm_read(struct axi_clkgen *axi_clkgen,
- unsigned int reg, unsigned int *val)
+ unsigned int reg, unsigned int *val)
{
unsigned int reg_val;
int ret;
@@ -280,7 +288,8 @@ static int axi_clkgen_mmcm_read(struct axi_clkgen *axi_clkgen,
}
static int axi_clkgen_mmcm_write(struct axi_clkgen *axi_clkgen,
- unsigned int reg, unsigned int val, unsigned int mask)
+ unsigned int reg, unsigned int val,
+ unsigned int mask)
{
unsigned int reg_val = 0;
int ret;
@@ -301,8 +310,7 @@ static int axi_clkgen_mmcm_write(struct axi_clkgen *axi_clkgen,
return 0;
}
-static void axi_clkgen_mmcm_enable(struct axi_clkgen *axi_clkgen,
- bool enable)
+static void axi_clkgen_mmcm_enable(struct axi_clkgen *axi_clkgen, bool enable)
{
unsigned int val = AXI_CLKGEN_V2_RESET_ENABLE;
@@ -318,31 +326,31 @@ static struct axi_clkgen *clk_hw_to_axi_clkgen(struct clk_hw *clk_hw)
}
static void axi_clkgen_set_div(struct axi_clkgen *axi_clkgen,
- unsigned int reg1, unsigned int reg2, unsigned int reg3,
- struct axi_clkgen_div_params *params)
+ unsigned int reg1, unsigned int reg2,
+ unsigned int reg3,
+ struct axi_clkgen_div_params *params)
{
axi_clkgen_mmcm_write(axi_clkgen, reg1,
- (params->high << 6) | params->low, 0xefff);
+ (params->high << 6) | params->low, 0xefff);
axi_clkgen_mmcm_write(axi_clkgen, reg2,
- (params->frac << 12) | (params->frac_en << 11) |
- (params->frac_wf_r << 10) | (params->edge << 7) |
- (params->nocount << 6), 0x7fff);
+ (params->frac << 12) | (params->frac_en << 11) |
+ (params->frac_wf_r << 10) | (params->edge << 7) |
+ (params->nocount << 6), 0x7fff);
if (reg3 != 0) {
axi_clkgen_mmcm_write(axi_clkgen, reg3,
- (params->frac_phase << 11) | (params->frac_wf_f << 10), 0x3c00);
+ (params->frac_phase << 11) | (params->frac_wf_f << 10),
+ 0x3c00);
}
}
-static int axi_clkgen_set_rate(struct clk_hw *clk_hw,
- unsigned long rate, unsigned long parent_rate)
+static int axi_clkgen_set_rate(struct clk_hw *clk_hw, unsigned long rate,
+ unsigned long parent_rate)
{
struct axi_clkgen *axi_clkgen = clk_hw_to_axi_clkgen(clk_hw);
const struct axi_clkgen_limits *limits = &axi_clkgen->limits;
unsigned int d, m, dout;
struct axi_clkgen_div_params params;
- uint32_t power = 0;
- uint32_t filter;
- uint32_t lock;
+ u32 power = 0, filter, lock;
if (parent_rate == 0 || rate == 0)
return -EINVAL;
@@ -362,22 +370,22 @@ static int axi_clkgen_set_rate(struct clk_hw *clk_hw,
axi_clkgen_calc_clk_params(dout >> 3, dout & 0x7, &params);
axi_clkgen_set_div(axi_clkgen, MMCM_REG_CLKOUT0_1, MMCM_REG_CLKOUT0_2,
- MMCM_REG_CLKOUT5_2, &params);
+ MMCM_REG_CLKOUT5_2, &params);
axi_clkgen_calc_clk_params(d, 0, &params);
axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_CLK_DIV,
- (params.edge << 13) | (params.nocount << 12) |
- (params.high << 6) | params.low, 0x3fff);
+ (params.edge << 13) | (params.nocount << 12) |
+ (params.high << 6) | params.low, 0x3fff);
axi_clkgen_calc_clk_params(m >> 3, m & 0x7, &params);
axi_clkgen_set_div(axi_clkgen, MMCM_REG_CLK_FB1, MMCM_REG_CLK_FB2,
- MMCM_REG_CLKOUT6_2, &params);
+ MMCM_REG_CLKOUT6_2, &params);
axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_LOCK1, lock & 0x3ff, 0x3ff);
axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_LOCK2,
- (((lock >> 16) & 0x1f) << 10) | 0x1, 0x7fff);
+ (((lock >> 16) & 0x1f) << 10) | 0x1, 0x7fff);
axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_LOCK3,
- (((lock >> 24) & 0x1f) << 10) | 0x3e9, 0x7fff);
+ (((lock >> 24) & 0x1f) << 10) | 0x3e9, 0x7fff);
axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_FILTER1, filter >> 16, 0x9900);
axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_FILTER2, filter, 0x9900);
@@ -406,7 +414,7 @@ static int axi_clkgen_determine_rate(struct clk_hw *hw,
}
static unsigned int axi_clkgen_get_div(struct axi_clkgen *axi_clkgen,
- unsigned int reg1, unsigned int reg2)
+ unsigned int reg1, unsigned int reg2)
{
unsigned int val1, val2;
unsigned int div;
@@ -433,7 +441,7 @@ static unsigned int axi_clkgen_get_div(struct axi_clkgen *axi_clkgen,
}
static unsigned long axi_clkgen_recalc_rate(struct clk_hw *clk_hw,
- unsigned long parent_rate)
+ unsigned long parent_rate)
{
struct axi_clkgen *axi_clkgen = clk_hw_to_axi_clkgen(clk_hw);
unsigned int d, m, dout;
@@ -441,9 +449,9 @@ static unsigned long axi_clkgen_recalc_rate(struct clk_hw *clk_hw,
unsigned int val;
dout = axi_clkgen_get_div(axi_clkgen, MMCM_REG_CLKOUT0_1,
- MMCM_REG_CLKOUT0_2);
+ MMCM_REG_CLKOUT0_2);
m = axi_clkgen_get_div(axi_clkgen, MMCM_REG_CLK_FB1,
- MMCM_REG_CLK_FB2);
+ MMCM_REG_CLK_FB2);
axi_clkgen_mmcm_read(axi_clkgen, MMCM_REG_CLK_DIV, &val);
if (val & MMCM_CLK_DIV_NOCOUNT)
@@ -495,6 +503,54 @@ static u8 axi_clkgen_get_parent(struct clk_hw *clk_hw)
return parent;
}
+static int axi_clkgen_setup_limits(struct axi_clkgen *axi_clkgen,
+ struct device *dev)
+{
+ unsigned int tech, family, speed_grade, reg_value;
+
+ axi_clkgen_read(axi_clkgen, ADI_AXI_REG_FPGA_INFO, &reg_value);
+ tech = ADI_AXI_INFO_FPGA_TECH(reg_value);
+ family = ADI_AXI_INFO_FPGA_FAMILY(reg_value);
+ speed_grade = ADI_AXI_INFO_FPGA_SPEED_GRADE(reg_value);
+
+ axi_clkgen->limits.fpfd_min = 10000;
+ axi_clkgen->limits.fvco_min = 600000;
+
+ switch (speed_grade) {
+ case ADI_AXI_FPGA_SPEED_1 ... ADI_AXI_FPGA_SPEED_1LV:
+ axi_clkgen->limits.fvco_max = 1200000;
+ axi_clkgen->limits.fpfd_max = 450000;
+ break;
+ case ADI_AXI_FPGA_SPEED_2 ... ADI_AXI_FPGA_SPEED_2LV:
+ axi_clkgen->limits.fvco_max = 1440000;
+ axi_clkgen->limits.fpfd_max = 500000;
+ if (family == ADI_AXI_FPGA_FAMILY_KINTEX || family == ADI_AXI_FPGA_FAMILY_ARTIX) {
+ axi_clkgen_read(axi_clkgen, ADI_CLKGEN_REG_FPGA_VOLTAGE,
+ &reg_value);
+ if (ADI_CLKGEN_INFO_FPGA_VOLTAGE(reg_value) < 950) {
+ axi_clkgen->limits.fvco_max = 1200000;
+ axi_clkgen->limits.fpfd_max = 450000;
+ }
+ }
+ break;
+ case ADI_AXI_FPGA_SPEED_3:
+ axi_clkgen->limits.fvco_max = 1600000;
+ axi_clkgen->limits.fpfd_max = 550000;
+ break;
+ default:
+ return dev_err_probe(dev, -ENODEV, "Unknown speed grade %d\n",
+ speed_grade);
+ }
+
+ /* Overwrite vco limits for ultrascale+ */
+ if (tech == ADI_AXI_FPGA_TECH_ULTRASCALE_PLUS) {
+ axi_clkgen->limits.fvco_max = 1600000;
+ axi_clkgen->limits.fvco_min = 800000;
+ }
+
+ return 0;
+}
+
static const struct clk_ops axi_clkgen_ops = {
.recalc_rate = axi_clkgen_recalc_rate,
.determine_rate = axi_clkgen_determine_rate,
@@ -509,9 +565,11 @@ static int axi_clkgen_probe(struct platform_device *pdev)
{
const struct axi_clkgen_limits *dflt_limits;
struct axi_clkgen *axi_clkgen;
+ unsigned int pcore_version;
struct clk_init_data init;
const char *parent_names[2];
const char *clk_name;
+ struct clk *axi_clk;
unsigned int i;
int ret;
@@ -528,8 +586,24 @@ static int axi_clkgen_probe(struct platform_device *pdev)
return PTR_ERR(axi_clkgen->base);
init.num_parents = of_clk_get_parent_count(pdev->dev.of_node);
- if (init.num_parents < 1 || init.num_parents > 2)
- return -EINVAL;
+
+ axi_clk = devm_clk_get_enabled(&pdev->dev, "s_axi_aclk");
+ if (!IS_ERR(axi_clk)) {
+ if (init.num_parents < 2 || init.num_parents > 3)
+ return -EINVAL;
+
+ init.num_parents -= 1;
+ } else {
+ /*
+ * Legacy... So that old DTs which do not have clock-names still
+ * work. In this case we don't explicitly enable the AXI bus
+ * clock.
+ */
+ if (PTR_ERR(axi_clk) != -ENOENT)
+ return PTR_ERR(axi_clk);
+ if (init.num_parents < 1 || init.num_parents > 2)
+ return -EINVAL;
+ }
for (i = 0; i < init.num_parents; i++) {
parent_names[i] = of_clk_get_parent_name(pdev->dev.of_node, i);
@@ -537,11 +611,20 @@ static int axi_clkgen_probe(struct platform_device *pdev)
return -EINVAL;
}
- memcpy(&axi_clkgen->limits, dflt_limits, sizeof(axi_clkgen->limits));
+ axi_clkgen_read(axi_clkgen, ADI_AXI_REG_VERSION, &pcore_version);
+
+ if (ADI_AXI_PCORE_VER_MAJOR(pcore_version) > 0x04) {
+ ret = axi_clkgen_setup_limits(axi_clkgen, &pdev->dev);
+ if (ret)
+ return ret;
+ } else {
+ memcpy(&axi_clkgen->limits, dflt_limits,
+ sizeof(axi_clkgen->limits));
+ }
clk_name = pdev->dev.of_node->name;
of_property_read_string(pdev->dev.of_node, "clock-output-names",
- &clk_name);
+ &clk_name);
init.name = clk_name;
init.ops = &axi_clkgen_ops;
diff --git a/drivers/clk/clk-axm5516.c b/drivers/clk/clk-axm5516.c
index 4a3462ee8f3e..3823383f3fa6 100644
--- a/drivers/clk/clk-axm5516.c
+++ b/drivers/clk/clk-axm5516.c
@@ -529,7 +529,6 @@ static const struct regmap_config axmclk_regmap_config = {
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x1fffc,
- .fast_io = true,
};
static const struct of_device_id axmclk_match_table[] = {
diff --git a/drivers/clk/clk-bm1880.c b/drivers/clk/clk-bm1880.c
index 002f7360b1c6..dac190bc6e19 100644
--- a/drivers/clk/clk-bm1880.c
+++ b/drivers/clk/clk-bm1880.c
@@ -608,8 +608,8 @@ static unsigned long bm1880_clk_div_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long bm1880_clk_div_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int bm1880_clk_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct bm1880_div_hw_clock *div_hw = to_bm1880_div_clk(hw);
struct bm1880_div_clock *div = &div_hw->div;
@@ -621,13 +621,18 @@ static long bm1880_clk_div_round_rate(struct clk_hw *hw, unsigned long rate,
val = readl(reg_addr) >> div->shift;
val &= clk_div_mask(div->width);
- return divider_ro_round_rate(hw, rate, prate, div->table,
- div->width, div->flags,
- val);
+ req->rate = divider_ro_round_rate(hw, req->rate,
+ &req->best_parent_rate,
+ div->table,
+ div->width, div->flags, val);
+
+ return 0;
}
- return divider_round_rate(hw, rate, prate, div->table,
- div->width, div->flags);
+ req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate,
+ div->table, div->width, div->flags);
+
+ return 0;
}
static int bm1880_clk_div_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -665,7 +670,7 @@ static int bm1880_clk_div_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops bm1880_clk_div_ops = {
.recalc_rate = bm1880_clk_div_recalc_rate,
- .round_rate = bm1880_clk_div_round_rate,
+ .determine_rate = bm1880_clk_div_determine_rate,
.set_rate = bm1880_clk_div_set_rate,
};
diff --git a/drivers/clk/clk-cdce706.c b/drivers/clk/clk-cdce706.c
index dd3d42d9ad86..a495d313b02f 100644
--- a/drivers/clk/clk-cdce706.c
+++ b/drivers/clk/clk-cdce706.c
@@ -183,8 +183,8 @@ static unsigned long cdce706_pll_recalc_rate(struct clk_hw *hw,
return 0;
}
-static long cdce706_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int cdce706_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct cdce706_hw_data *hwd = to_hw_data(hw);
unsigned long mul, div;
@@ -192,9 +192,9 @@ static long cdce706_pll_round_rate(struct clk_hw *hw, unsigned long rate,
dev_dbg(&hwd->dev_data->client->dev,
"%s, rate: %lu, parent_rate: %lu\n",
- __func__, rate, *parent_rate);
+ __func__, req->rate, req->best_parent_rate);
- rational_best_approximation(rate, *parent_rate,
+ rational_best_approximation(req->rate, req->best_parent_rate,
CDCE706_PLL_N_MAX, CDCE706_PLL_M_MAX,
&mul, &div);
hwd->mul = mul;
@@ -204,9 +204,11 @@ static long cdce706_pll_round_rate(struct clk_hw *hw, unsigned long rate,
"%s, pll: %d, mul: %lu, div: %lu\n",
__func__, hwd->idx, mul, div);
- res = (u64)*parent_rate * hwd->mul;
+ res = (u64)req->best_parent_rate * hwd->mul;
do_div(res, hwd->div);
- return res;
+ req->rate = res;
+
+ return 0;
}
static int cdce706_pll_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -251,7 +253,7 @@ static int cdce706_pll_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops cdce706_pll_ops = {
.recalc_rate = cdce706_pll_recalc_rate,
- .round_rate = cdce706_pll_round_rate,
+ .determine_rate = cdce706_pll_determine_rate,
.set_rate = cdce706_pll_set_rate,
};
@@ -678,7 +680,7 @@ MODULE_DEVICE_TABLE(of, cdce706_dt_match);
#endif
static const struct i2c_device_id cdce706_id[] = {
- { "cdce706", 0 },
+ { "cdce706" },
{ }
};
MODULE_DEVICE_TABLE(i2c, cdce706_id);
diff --git a/drivers/clk/clk-cdce925.c b/drivers/clk/clk-cdce925.c
index e48be7a6c0e2..0b2ad21e6e4d 100644
--- a/drivers/clk/clk-cdce925.c
+++ b/drivers/clk/clk-cdce925.c
@@ -128,13 +128,15 @@ static void cdce925_pll_find_rate(unsigned long rate,
}
}
-static long cdce925_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int cdce925_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
u16 n, m;
- cdce925_pll_find_rate(rate, *parent_rate, &n, &m);
- return (long)cdce925_pll_calculate_rate(*parent_rate, n, m);
+ cdce925_pll_find_rate(req->rate, req->best_parent_rate, &n, &m);
+ req->rate = (long)cdce925_pll_calculate_rate(req->best_parent_rate, n, m);
+
+ return 0;
}
static int cdce925_pll_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -266,7 +268,7 @@ static const struct clk_ops cdce925_pll_ops = {
.prepare = cdce925_pll_prepare,
.unprepare = cdce925_pll_unprepare,
.recalc_rate = cdce925_pll_recalc_rate,
- .round_rate = cdce925_pll_round_rate,
+ .determine_rate = cdce925_pll_determine_rate,
.set_rate = cdce925_pll_set_rate,
};
@@ -420,20 +422,23 @@ static unsigned long cdce925_clk_best_parent_rate(
return rate * pdiv_best;
}
-static long cdce925_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int cdce925_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- unsigned long l_parent_rate = *parent_rate;
- u16 divider = cdce925_calc_divider(rate, l_parent_rate);
+ unsigned long l_parent_rate = req->best_parent_rate;
+ u16 divider = cdce925_calc_divider(req->rate, l_parent_rate);
- if (l_parent_rate / divider != rate) {
- l_parent_rate = cdce925_clk_best_parent_rate(hw, rate);
- divider = cdce925_calc_divider(rate, l_parent_rate);
- *parent_rate = l_parent_rate;
+ if (l_parent_rate / divider != req->rate) {
+ l_parent_rate = cdce925_clk_best_parent_rate(hw, req->rate);
+ divider = cdce925_calc_divider(req->rate, l_parent_rate);
+ req->best_parent_rate = l_parent_rate;
}
if (divider)
- return (long)(l_parent_rate / divider);
+ req->rate = (long)(l_parent_rate / divider);
+ else
+ req->rate = 0;
+
return 0;
}
@@ -451,7 +456,7 @@ static const struct clk_ops cdce925_clk_ops = {
.prepare = cdce925_clk_prepare,
.unprepare = cdce925_clk_unprepare,
.recalc_rate = cdce925_clk_recalc_rate,
- .round_rate = cdce925_clk_round_rate,
+ .determine_rate = cdce925_clk_determine_rate,
.set_rate = cdce925_clk_set_rate,
};
@@ -473,14 +478,17 @@ static u16 cdce925_y1_calc_divider(unsigned long rate,
return (u16)divider;
}
-static long cdce925_clk_y1_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int cdce925_clk_y1_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- unsigned long l_parent_rate = *parent_rate;
- u16 divider = cdce925_y1_calc_divider(rate, l_parent_rate);
+ unsigned long l_parent_rate = req->best_parent_rate;
+ u16 divider = cdce925_y1_calc_divider(req->rate, l_parent_rate);
if (divider)
- return (long)(l_parent_rate / divider);
+ req->rate = (long)(l_parent_rate / divider);
+ else
+ req->rate = 0;
+
return 0;
}
@@ -498,7 +506,7 @@ static const struct clk_ops cdce925_clk_y1_ops = {
.prepare = cdce925_clk_prepare,
.unprepare = cdce925_clk_unprepare,
.recalc_rate = cdce925_clk_recalc_rate,
- .round_rate = cdce925_clk_y1_round_rate,
+ .determine_rate = cdce925_clk_y1_determine_rate,
.set_rate = cdce925_clk_y1_set_rate,
};
@@ -601,7 +609,7 @@ static int cdce925_regulator_enable(struct device *dev, const char *name)
/* The CDCE925 uses a funky way to read/write registers. Bulk mode is
* just weird, so just use the single byte mode exclusively. */
-static struct regmap_bus regmap_cdce925_bus = {
+static const struct regmap_bus regmap_cdce925_bus = {
.write = cdce925_regmap_i2c_write,
.read = cdce925_regmap_i2c_read,
};
diff --git a/drivers/clk/clk-clps711x.c b/drivers/clk/clk-clps711x.c
index f8417ee2961a..402ab74d9bfb 100644
--- a/drivers/clk/clk-clps711x.c
+++ b/drivers/clk/clk-clps711x.c
@@ -99,7 +99,7 @@ static void __init clps711x_clk_init_dt(struct device_node *np)
*/
tmp &= ~(SYSCON1_TC1M | SYSCON1_TC1S);
/* Timer2 in prescale mode.
- * Value writen is automatically re-loaded when
+ * Value written is automatically re-loaded when
* the counter underflows.
*/
tmp |= SYSCON1_TC2M | SYSCON1_TC2S;
diff --git a/drivers/clk/clk-cs2000-cp.c b/drivers/clk/clk-cs2000-cp.c
index 35cb93ad298a..8800472ba63f 100644
--- a/drivers/clk/clk-cs2000-cp.c
+++ b/drivers/clk/clk-cs2000-cp.c
@@ -305,15 +305,19 @@ static unsigned long cs2000_recalc_rate(struct clk_hw *hw,
return cs2000_ratio_to_rate(ratio, parent_rate, priv->lf_ratio);
}
-static long cs2000_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int cs2000_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct cs2000_priv *priv = hw_to_priv(hw);
u32 ratio;
- ratio = cs2000_rate_to_ratio(*parent_rate, rate, priv->lf_ratio);
+ ratio = cs2000_rate_to_ratio(req->best_parent_rate, req->rate,
+ priv->lf_ratio);
- return cs2000_ratio_to_rate(ratio, *parent_rate, priv->lf_ratio);
+ req->rate = cs2000_ratio_to_rate(ratio, req->best_parent_rate,
+ priv->lf_ratio);
+
+ return 0;
}
static int cs2000_select_ratio_mode(struct cs2000_priv *priv,
@@ -430,7 +434,7 @@ static u8 cs2000_get_parent(struct clk_hw *hw)
static const struct clk_ops cs2000_ops = {
.get_parent = cs2000_get_parent,
.recalc_rate = cs2000_recalc_rate,
- .round_rate = cs2000_round_rate,
+ .determine_rate = cs2000_determine_rate,
.set_rate = cs2000_set_rate,
.prepare = cs2000_enable,
.unprepare = cs2000_disable,
diff --git a/drivers/clk/clk-devres.c b/drivers/clk/clk-devres.c
index 82ae1f26e634..5368d92d9b39 100644
--- a/drivers/clk/clk-devres.c
+++ b/drivers/clk/clk-devres.c
@@ -218,8 +218,8 @@ static void devm_clk_bulk_release_all_enable(struct device *dev, void *res)
clk_bulk_put_all(devres->num_clks, devres->clks);
}
-int __must_check devm_clk_bulk_get_all_enable(struct device *dev,
- struct clk_bulk_data **clks)
+int __must_check devm_clk_bulk_get_all_enabled(struct device *dev,
+ struct clk_bulk_data **clks)
{
struct clk_bulk_devres *devres;
int ret;
@@ -244,11 +244,12 @@ int __must_check devm_clk_bulk_get_all_enable(struct device *dev,
} else {
clk_bulk_put_all(devres->num_clks, devres->clks);
devres_free(devres);
+ return ret;
}
- return ret;
+ return devres->num_clks;
}
-EXPORT_SYMBOL_GPL(devm_clk_bulk_get_all_enable);
+EXPORT_SYMBOL_GPL(devm_clk_bulk_get_all_enabled);
static int devm_clk_match(struct device *dev, void *res, void *data)
{
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index a2c2b5203b0a..2601b6155afb 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -72,6 +72,8 @@ static unsigned int _get_maxdiv(const struct clk_div_table *table, u8 width,
return clk_div_mask(width);
if (flags & CLK_DIVIDER_POWER_OF_TWO)
return 1 << clk_div_mask(width);
+ if (flags & CLK_DIVIDER_EVEN_INTEGERS)
+ return 2 * (clk_div_mask(width) + 1);
if (table)
return _get_table_maxdiv(table, width);
return clk_div_mask(width) + 1;
@@ -97,6 +99,8 @@ static unsigned int _get_div(const struct clk_div_table *table,
return 1 << val;
if (flags & CLK_DIVIDER_MAX_AT_ZERO)
return val ? val : clk_div_mask(width) + 1;
+ if (flags & CLK_DIVIDER_EVEN_INTEGERS)
+ return 2 * (val + 1);
if (table)
return _get_table_div(table, val);
return val + 1;
@@ -122,6 +126,8 @@ static unsigned int _get_val(const struct clk_div_table *table,
return __ffs(div);
if (flags & CLK_DIVIDER_MAX_AT_ZERO)
return (div == clk_div_mask(width) + 1) ? 0 : div;
+ if (flags & CLK_DIVIDER_EVEN_INTEGERS)
+ return (div >> 1) - 1;
if (table)
return _get_table_val(table, div);
return div - 1;
@@ -425,27 +431,6 @@ long divider_ro_round_rate_parent(struct clk_hw *hw, struct clk_hw *parent,
}
EXPORT_SYMBOL_GPL(divider_ro_round_rate_parent);
-static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
-{
- struct clk_divider *divider = to_clk_divider(hw);
-
- /* if read only, just return current value */
- if (divider->flags & CLK_DIVIDER_READ_ONLY) {
- u32 val;
-
- val = clk_div_readl(divider) >> divider->shift;
- val &= clk_div_mask(divider->width);
-
- return divider_ro_round_rate(hw, rate, prate, divider->table,
- divider->width, divider->flags,
- val);
- }
-
- return divider_round_rate(hw, rate, prate, divider->table,
- divider->width, divider->flags);
-}
-
static int clk_divider_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
@@ -521,7 +506,6 @@ static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
const struct clk_ops clk_divider_ops = {
.recalc_rate = clk_divider_recalc_rate,
- .round_rate = clk_divider_round_rate,
.determine_rate = clk_divider_determine_rate,
.set_rate = clk_divider_set_rate,
};
@@ -529,7 +513,6 @@ EXPORT_SYMBOL_GPL(clk_divider_ops);
const struct clk_ops clk_divider_ro_ops = {
.recalc_rate = clk_divider_recalc_rate,
- .round_rate = clk_divider_round_rate,
.determine_rate = clk_divider_determine_rate,
};
EXPORT_SYMBOL_GPL(clk_divider_ro_ops);
@@ -538,7 +521,8 @@ struct clk_hw *__clk_hw_register_divider(struct device *dev,
struct device_node *np, const char *name,
const char *parent_name, const struct clk_hw *parent_hw,
const struct clk_parent_data *parent_data, unsigned long flags,
- void __iomem *reg, u8 shift, u8 width, u8 clk_divider_flags,
+ void __iomem *reg, u8 shift, u8 width,
+ unsigned long clk_divider_flags,
const struct clk_div_table *table, spinlock_t *lock)
{
struct clk_divider *div;
@@ -610,8 +594,8 @@ EXPORT_SYMBOL_GPL(__clk_hw_register_divider);
struct clk *clk_register_divider_table(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
void __iomem *reg, u8 shift, u8 width,
- u8 clk_divider_flags, const struct clk_div_table *table,
- spinlock_t *lock)
+ unsigned long clk_divider_flags,
+ const struct clk_div_table *table, spinlock_t *lock)
{
struct clk_hw *hw;
@@ -664,7 +648,8 @@ struct clk_hw *__devm_clk_hw_register_divider(struct device *dev,
struct device_node *np, const char *name,
const char *parent_name, const struct clk_hw *parent_hw,
const struct clk_parent_data *parent_data, unsigned long flags,
- void __iomem *reg, u8 shift, u8 width, u8 clk_divider_flags,
+ void __iomem *reg, u8 shift, u8 width,
+ unsigned long clk_divider_flags,
const struct clk_div_table *table, spinlock_t *lock)
{
struct clk_hw **ptr, *hw;
diff --git a/drivers/clk/clk-en7523.c b/drivers/clk/clk-en7523.c
index 22fbea61c3dc..15bbdeb60b8e 100644
--- a/drivers/clk/clk-en7523.c
+++ b/drivers/clk/clk-en7523.c
@@ -3,8 +3,10 @@
#include <linux/delay.h>
#include <linux/clk-provider.h>
#include <linux/io.h>
+#include <linux/mfd/syscon.h>
#include <linux/platform_device.h>
#include <linux/property.h>
+#include <linux/regmap.h>
#include <linux/reset-controller.h>
#include <dt-bindings/clock/en7523-clk.h>
#include <dt-bindings/reset/airoha,en7581-reset.h>
@@ -31,19 +33,14 @@
#define REG_RESET_CONTROL_PCIE1 BIT(27)
#define REG_RESET_CONTROL_PCIE2 BIT(26)
/* EN7581 */
-#define REG_PCIE0_MEM 0x00
-#define REG_PCIE0_MEM_MASK 0x04
-#define REG_PCIE1_MEM 0x08
-#define REG_PCIE1_MEM_MASK 0x0c
-#define REG_PCIE2_MEM 0x10
-#define REG_PCIE2_MEM_MASK 0x14
#define REG_NP_SCU_PCIC 0x88
#define REG_NP_SCU_SSTR 0x9c
#define REG_PCIE_XSI0_SEL_MASK GENMASK(14, 13)
#define REG_PCIE_XSI1_SEL_MASK GENMASK(12, 11)
+#define REG_CRYPTO_CLKSRC2 0x20c
-#define REG_RST_CTRL2 0x00
-#define REG_RST_CTRL1 0x04
+#define REG_RST_CTRL2 0x830
+#define REG_RST_CTRL1 0x834
struct en_clk_desc {
int id;
@@ -78,13 +75,10 @@ struct en_rst_data {
};
struct en_clk_soc_data {
+ u32 num_clocks;
const struct clk_ops pcie_ops;
- struct {
- const u16 *bank_ofs;
- const u16 *idx_map;
- u16 idx_map_nr;
- } reset;
- int (*hw_init)(struct platform_device *pdev, void __iomem *np_base);
+ int (*hw_init)(struct platform_device *pdev,
+ struct clk_hw_onecell_data *clk_data);
};
static const u32 gsw_base[] = { 400000000, 500000000 };
@@ -92,6 +86,12 @@ static const u32 emi_base[] = { 333000000, 400000000 };
static const u32 bus_base[] = { 500000000, 540000000 };
static const u32 slic_base[] = { 100000000, 3125000 };
static const u32 npu_base[] = { 333000000, 400000000, 500000000 };
+/* EN7581 */
+static const u32 emi7581_base[] = { 540000000, 480000000, 400000000, 300000000 };
+static const u32 bus7581_base[] = { 600000000, 540000000 };
+static const u32 npu7581_base[] = { 800000000, 750000000, 720000000, 600000000 };
+static const u32 crypto_base[] = { 540000000, 480000000 };
+static const u32 emmc7581_base[] = { 200000000, 150000000 };
static const struct en_clk_desc en7523_base_clks[] = {
{
@@ -189,6 +189,111 @@ static const struct en_clk_desc en7523_base_clks[] = {
}
};
+static const struct en_clk_desc en7581_base_clks[] = {
+ {
+ .id = EN7523_CLK_GSW,
+ .name = "gsw",
+
+ .base_reg = REG_GSW_CLK_DIV_SEL,
+ .base_bits = 1,
+ .base_shift = 8,
+ .base_values = gsw_base,
+ .n_base_values = ARRAY_SIZE(gsw_base),
+
+ .div_bits = 3,
+ .div_shift = 0,
+ .div_step = 1,
+ .div_offset = 1,
+ }, {
+ .id = EN7523_CLK_EMI,
+ .name = "emi",
+
+ .base_reg = REG_EMI_CLK_DIV_SEL,
+ .base_bits = 2,
+ .base_shift = 8,
+ .base_values = emi7581_base,
+ .n_base_values = ARRAY_SIZE(emi7581_base),
+
+ .div_bits = 3,
+ .div_shift = 0,
+ .div_step = 1,
+ .div_offset = 1,
+ }, {
+ .id = EN7523_CLK_BUS,
+ .name = "bus",
+
+ .base_reg = REG_BUS_CLK_DIV_SEL,
+ .base_bits = 1,
+ .base_shift = 8,
+ .base_values = bus7581_base,
+ .n_base_values = ARRAY_SIZE(bus7581_base),
+
+ .div_bits = 3,
+ .div_shift = 0,
+ .div_step = 1,
+ .div_offset = 1,
+ }, {
+ .id = EN7523_CLK_SLIC,
+ .name = "slic",
+
+ .base_reg = REG_SPI_CLK_FREQ_SEL,
+ .base_bits = 1,
+ .base_shift = 0,
+ .base_values = slic_base,
+ .n_base_values = ARRAY_SIZE(slic_base),
+
+ .div_reg = REG_SPI_CLK_DIV_SEL,
+ .div_bits = 5,
+ .div_shift = 24,
+ .div_val0 = 20,
+ .div_step = 2,
+ }, {
+ .id = EN7523_CLK_SPI,
+ .name = "spi",
+
+ .base_reg = REG_SPI_CLK_DIV_SEL,
+
+ .base_value = 400000000,
+
+ .div_bits = 5,
+ .div_shift = 8,
+ .div_val0 = 40,
+ .div_step = 2,
+ }, {
+ .id = EN7523_CLK_NPU,
+ .name = "npu",
+
+ .base_reg = REG_NPU_CLK_DIV_SEL,
+ .base_bits = 2,
+ .base_shift = 8,
+ .base_values = npu7581_base,
+ .n_base_values = ARRAY_SIZE(npu7581_base),
+
+ .div_bits = 3,
+ .div_shift = 0,
+ .div_step = 1,
+ .div_offset = 1,
+ }, {
+ .id = EN7523_CLK_CRYPTO,
+ .name = "crypto",
+
+ .base_reg = REG_CRYPTO_CLKSRC2,
+ .base_bits = 1,
+ .base_shift = 0,
+ .base_values = crypto_base,
+ .n_base_values = ARRAY_SIZE(crypto_base),
+ }, {
+ .id = EN7581_CLK_EMMC,
+ .name = "emmc",
+
+ .base_reg = REG_CRYPTO_CLKSRC2,
+ .base_bits = 1,
+ .base_shift = 12,
+ .base_values = emmc7581_base,
+ .n_base_values = ARRAY_SIZE(emmc7581_base),
+ }
+};
+
static const u16 en7581_rst_ofs[] = {
REG_RST_CTRL2,
REG_RST_CTRL1,
@@ -252,15 +357,11 @@ static const u16 en7581_rst_map[] = {
[EN7581_XPON_MAC_RST] = RST_NR_PER_BANK + 31,
};
-static unsigned int en7523_get_base_rate(void __iomem *base, unsigned int i)
+static u32 en7523_get_base_rate(const struct en_clk_desc *desc, u32 val)
{
- const struct en_clk_desc *desc = &en7523_base_clks[i];
- u32 val;
-
if (!desc->base_bits)
return desc->base_value;
- val = readl(base + desc->base_reg);
val >>= desc->base_shift;
val &= (1 << desc->base_bits) - 1;
@@ -270,16 +371,11 @@ static unsigned int en7523_get_base_rate(void __iomem *base, unsigned int i)
return desc->base_values[val];
}
-static u32 en7523_get_div(void __iomem *base, int i)
+static u32 en7523_get_div(const struct en_clk_desc *desc, u32 val)
{
- const struct en_clk_desc *desc = &en7523_base_clks[i];
- u32 reg, val;
-
if (!desc->div_bits)
return 1;
- reg = desc->div_reg ? desc->div_reg : desc->base_reg;
- val = readl(base + reg);
val >>= desc->div_shift;
val &= (1 << desc->div_bits) - 1;
@@ -393,7 +489,6 @@ static int en7581_pci_enable(struct clk_hw *hw)
REG_PCI_CONTROL_PERSTOUT;
val = readl(np_base + REG_PCI_CONTROL);
writel(val | mask, np_base + REG_PCI_CONTROL);
- msleep(250);
return 0;
}
@@ -412,44 +507,81 @@ static void en7581_pci_disable(struct clk_hw *hw)
usleep_range(1000, 2000);
}
-static int en7581_clk_hw_init(struct platform_device *pdev,
- void __iomem *np_base)
+static void en7523_register_clocks(struct device *dev, struct clk_hw_onecell_data *clk_data,
+ void __iomem *base, void __iomem *np_base)
{
- void __iomem *pb_base;
- u32 val;
+ struct clk_hw *hw;
+ u32 rate;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(en7523_base_clks); i++) {
+ const struct en_clk_desc *desc = &en7523_base_clks[i];
+ u32 reg = desc->div_reg ? desc->div_reg : desc->base_reg;
+ u32 val = readl(base + desc->base_reg);
- pb_base = devm_platform_ioremap_resource(pdev, 3);
- if (IS_ERR(pb_base))
- return PTR_ERR(pb_base);
+ rate = en7523_get_base_rate(desc, val);
+ val = readl(base + reg);
+ rate /= en7523_get_div(desc, val);
- val = readl(np_base + REG_NP_SCU_SSTR);
- val &= ~(REG_PCIE_XSI0_SEL_MASK | REG_PCIE_XSI1_SEL_MASK);
- writel(val, np_base + REG_NP_SCU_SSTR);
- val = readl(np_base + REG_NP_SCU_PCIC);
- writel(val | 3, np_base + REG_NP_SCU_PCIC);
+ hw = clk_hw_register_fixed_rate(dev, desc->name, NULL, 0, rate);
+ if (IS_ERR(hw)) {
+ pr_err("Failed to register clk %s: %ld\n",
+ desc->name, PTR_ERR(hw));
+ continue;
+ }
- writel(0x20000000, pb_base + REG_PCIE0_MEM);
- writel(0xfc000000, pb_base + REG_PCIE0_MEM_MASK);
- writel(0x24000000, pb_base + REG_PCIE1_MEM);
- writel(0xfc000000, pb_base + REG_PCIE1_MEM_MASK);
- writel(0x28000000, pb_base + REG_PCIE2_MEM);
- writel(0xfc000000, pb_base + REG_PCIE2_MEM_MASK);
+ clk_data->hws[desc->id] = hw;
+ }
+
+ hw = en7523_register_pcie_clk(dev, np_base);
+ clk_data->hws[EN7523_CLK_PCIE] = hw;
+}
+
+static int en7523_clk_hw_init(struct platform_device *pdev,
+ struct clk_hw_onecell_data *clk_data)
+{
+ void __iomem *base, *np_base;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ np_base = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(np_base))
+ return PTR_ERR(np_base);
+
+ en7523_register_clocks(&pdev->dev, clk_data, base, np_base);
return 0;
}
-static void en7523_register_clocks(struct device *dev, struct clk_hw_onecell_data *clk_data,
- void __iomem *base, void __iomem *np_base)
+static void en7581_register_clocks(struct device *dev, struct clk_hw_onecell_data *clk_data,
+ struct regmap *map, void __iomem *base)
{
struct clk_hw *hw;
u32 rate;
int i;
- for (i = 0; i < ARRAY_SIZE(en7523_base_clks); i++) {
- const struct en_clk_desc *desc = &en7523_base_clks[i];
+ for (i = 0; i < ARRAY_SIZE(en7581_base_clks); i++) {
+ const struct en_clk_desc *desc = &en7581_base_clks[i];
+ u32 val, reg = desc->div_reg ? desc->div_reg : desc->base_reg;
+ int err;
+
+ err = regmap_read(map, desc->base_reg, &val);
+ if (err) {
+ pr_err("Failed reading fixed clk rate %s: %d\n",
+ desc->name, err);
+ continue;
+ }
+ rate = en7523_get_base_rate(desc, val);
- rate = en7523_get_base_rate(base, i);
- rate /= en7523_get_div(base, i);
+ err = regmap_read(map, reg, &val);
+ if (err) {
+ pr_err("Failed reading fixed clk div %s: %d\n",
+ desc->name, err);
+ continue;
+ }
+ rate /= en7523_get_div(desc, val);
hw = clk_hw_register_fixed_rate(dev, desc->name, NULL, 0, rate);
if (IS_ERR(hw)) {
@@ -461,10 +593,8 @@ static void en7523_register_clocks(struct device *dev, struct clk_hw_onecell_dat
clk_data->hws[desc->id] = hw;
}
- hw = en7523_register_pcie_clk(dev, np_base);
+ hw = en7523_register_pcie_clk(dev, base);
clk_data->hws[EN7523_CLK_PCIE] = hw;
-
- clk_data->num = EN7523_NUM_CLOCKS;
}
static int en7523_reset_update(struct reset_controller_dev *rcdev,
@@ -516,38 +646,27 @@ static int en7523_reset_xlate(struct reset_controller_dev *rcdev,
return rst_data->idx_map[reset_spec->args[0]];
}
-static const struct reset_control_ops en7523_reset_ops = {
+static const struct reset_control_ops en7581_reset_ops = {
.assert = en7523_reset_assert,
.deassert = en7523_reset_deassert,
.status = en7523_reset_status,
};
-static int en7523_reset_register(struct platform_device *pdev,
- const struct en_clk_soc_data *soc_data)
+static int en7581_reset_register(struct device *dev, void __iomem *base)
{
- struct device *dev = &pdev->dev;
struct en_rst_data *rst_data;
- void __iomem *base;
-
- /* no reset lines available */
- if (!soc_data->reset.idx_map_nr)
- return 0;
-
- base = devm_platform_ioremap_resource(pdev, 2);
- if (IS_ERR(base))
- return PTR_ERR(base);
rst_data = devm_kzalloc(dev, sizeof(*rst_data), GFP_KERNEL);
if (!rst_data)
return -ENOMEM;
- rst_data->bank_ofs = soc_data->reset.bank_ofs;
- rst_data->idx_map = soc_data->reset.idx_map;
+ rst_data->bank_ofs = en7581_rst_ofs;
+ rst_data->idx_map = en7581_rst_map;
rst_data->base = base;
- rst_data->rcdev.nr_resets = soc_data->reset.idx_map_nr;
+ rst_data->rcdev.nr_resets = ARRAY_SIZE(en7581_rst_map);
rst_data->rcdev.of_xlate = en7523_reset_xlate;
- rst_data->rcdev.ops = &en7523_reset_ops;
+ rst_data->rcdev.ops = &en7581_reset_ops;
rst_data->rcdev.of_node = dev->of_node;
rst_data->rcdev.of_reset_n_cells = 1;
rst_data->rcdev.owner = THIS_MODULE;
@@ -556,71 +675,73 @@ static int en7523_reset_register(struct platform_device *pdev,
return devm_reset_controller_register(dev, &rst_data->rcdev);
}
-static int en7523_clk_probe(struct platform_device *pdev)
+static int en7581_clk_hw_init(struct platform_device *pdev,
+ struct clk_hw_onecell_data *clk_data)
{
- struct device_node *node = pdev->dev.of_node;
- const struct en_clk_soc_data *soc_data;
- struct clk_hw_onecell_data *clk_data;
- void __iomem *base, *np_base;
- int r;
+ struct regmap *map;
+ void __iomem *base;
+ u32 val;
+
+ map = syscon_regmap_lookup_by_compatible("airoha,en7581-chip-scu");
+ if (IS_ERR(map))
+ return PTR_ERR(map);
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
- np_base = devm_platform_ioremap_resource(pdev, 1);
- if (IS_ERR(np_base))
- return PTR_ERR(np_base);
+ en7581_register_clocks(&pdev->dev, clk_data, map, base);
+
+ val = readl(base + REG_NP_SCU_SSTR);
+ val &= ~(REG_PCIE_XSI0_SEL_MASK | REG_PCIE_XSI1_SEL_MASK);
+ writel(val, base + REG_NP_SCU_SSTR);
+ val = readl(base + REG_NP_SCU_PCIC);
+ writel(val | 3, base + REG_NP_SCU_PCIC);
+
+ return en7581_reset_register(&pdev->dev, base);
+}
+
+static int en7523_clk_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ const struct en_clk_soc_data *soc_data;
+ struct clk_hw_onecell_data *clk_data;
+ int r;
soc_data = device_get_match_data(&pdev->dev);
- if (soc_data->hw_init) {
- r = soc_data->hw_init(pdev, np_base);
- if (r)
- return r;
- }
clk_data = devm_kzalloc(&pdev->dev,
- struct_size(clk_data, hws, EN7523_NUM_CLOCKS),
+ struct_size(clk_data, hws, soc_data->num_clocks),
GFP_KERNEL);
if (!clk_data)
return -ENOMEM;
- en7523_register_clocks(&pdev->dev, clk_data, base, np_base);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ clk_data->num = soc_data->num_clocks;
+ r = soc_data->hw_init(pdev, clk_data);
if (r)
- return dev_err_probe(&pdev->dev, r, "Could not register clock provider: %s\n",
- pdev->name);
-
- r = en7523_reset_register(pdev, soc_data);
- if (r) {
- of_clk_del_provider(node);
- return dev_err_probe(&pdev->dev, r, "Could not register reset controller: %s\n",
- pdev->name);
- }
+ return r;
- return 0;
+ return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
}
static const struct en_clk_soc_data en7523_data = {
+ .num_clocks = ARRAY_SIZE(en7523_base_clks) + 1,
.pcie_ops = {
.is_enabled = en7523_pci_is_enabled,
.prepare = en7523_pci_prepare,
.unprepare = en7523_pci_unprepare,
},
+ .hw_init = en7523_clk_hw_init,
};
static const struct en_clk_soc_data en7581_data = {
+ /* We increment num_clocks by 1 to account for additional PCIe clock */
+ .num_clocks = ARRAY_SIZE(en7581_base_clks) + 1,
.pcie_ops = {
.is_enabled = en7581_pci_is_enabled,
.enable = en7581_pci_enable,
.disable = en7581_pci_disable,
},
- .reset = {
- .bank_ofs = en7581_rst_ofs,
- .idx_map = en7581_rst_map,
- .idx_map_nr = ARRAY_SIZE(en7581_rst_map),
- },
.hw_init = en7581_clk_hw_init,
};
diff --git a/drivers/clk/clk-ep93xx.c b/drivers/clk/clk-ep93xx.c
index f888aed79b11..972aadd11493 100644
--- a/drivers/clk/clk-ep93xx.c
+++ b/drivers/clk/clk-ep93xx.c
@@ -389,23 +389,25 @@ static unsigned long ep93xx_div_recalc_rate(struct clk_hw *hw,
return DIV_ROUND_CLOSEST(parent_rate, clk->div[index]);
}
-static long ep93xx_div_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int ep93xx_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct ep93xx_clk *clk = ep93xx_clk_from(hw);
unsigned long best = 0, now;
unsigned int i;
for (i = 0; i < clk->num_div; i++) {
- if ((rate * clk->div[i]) == *parent_rate)
- return rate;
+ if (req->rate * clk->div[i] == req->best_parent_rate)
+ return 0;
- now = DIV_ROUND_CLOSEST(*parent_rate, clk->div[i]);
- if (!best || is_best(rate, now, best))
+ now = DIV_ROUND_CLOSEST(req->best_parent_rate, clk->div[i]);
+ if (!best || is_best(req->rate, now, best))
best = now;
}
- return best;
+ req->rate = best;
+
+ return 0;
}
static int ep93xx_div_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -437,7 +439,7 @@ static const struct clk_ops ep93xx_div_ops = {
.disable = ep93xx_clk_disable,
.is_enabled = ep93xx_clk_is_enabled,
.recalc_rate = ep93xx_div_recalc_rate,
- .round_rate = ep93xx_div_round_rate,
+ .determine_rate = ep93xx_div_determine_rate,
.set_rate = ep93xx_div_set_rate,
};
@@ -486,9 +488,10 @@ static const struct ep93xx_gate ep93xx_uarts[] = {
static int ep93xx_uart_clock_init(struct ep93xx_clk_priv *priv)
{
struct clk_parent_data parent_data = { };
- unsigned int i, idx, ret, clk_uart_div;
+ unsigned int i, idx, clk_uart_div;
struct ep93xx_clk *clk;
u32 val;
+ int ret;
regmap_read(priv->map, EP93XX_SYSCON_PWRCNT, &val);
if (val & EP93XX_SYSCON_PWRCNT_UARTBAUD)
@@ -586,9 +589,9 @@ static unsigned long calc_pll_rate(u64 rate, u32 config_word)
static int ep93xx_plls_init(struct ep93xx_clk_priv *priv)
{
- const char fclk_divisors[] = { 1, 2, 4, 8, 16, 1, 1, 1 };
- const char hclk_divisors[] = { 1, 2, 4, 5, 6, 8, 16, 32 };
- const char pclk_divisors[] = { 1, 2, 4, 8 };
+ static const char fclk_divisors[] = { 1, 2, 4, 8, 16, 1, 1, 1 };
+ static const char hclk_divisors[] = { 1, 2, 4, 5, 6, 8, 16, 32 };
+ static const char pclk_divisors[] = { 1, 2, 4, 8 };
struct clk_parent_data xtali = { .index = 0 };
unsigned int clk_f_div, clk_h_div, clk_p_div;
unsigned long clk_pll1_rate, clk_pll2_rate;
diff --git a/drivers/clk/clk-eyeq.c b/drivers/clk/clk-eyeq.c
new file mode 100644
index 000000000000..ea1c3d78e7cd
--- /dev/null
+++ b/drivers/clk/clk-eyeq.c
@@ -0,0 +1,859 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * PLL clock driver for the Mobileye EyeQ5, EyeQ6L and EyeQ6H platforms.
+ *
+ * This controller handles:
+ * - Read-only PLLs, all derived from the same main crystal clock.
+ * - It also exposes divider clocks, those are children to PLLs.
+ * - Fixed factor clocks, children to PLLs.
+ *
+ * Parent clock is expected to be constant. This driver's registers live in a
+ * shared region called OLB. Some PLLs and fixed-factors are initialised early
+ * by of_clk_init(); if so, two clk providers are registered.
+ *
+ * We use eqc_ as prefix, as-in "EyeQ Clock", but way shorter.
+ *
+ * Copyright (C) 2024 Mobileye Vision Technologies Ltd.
+ */
+
+/*
+ * Set pr_fmt() for printing from eqc_early_init().
+ * It is called at of_clk_init() stage (read: really early).
+ */
+#define pr_fmt(fmt) "clk-eyeq: " fmt
+
+#include <linux/array_size.h>
+#include <linux/auxiliary_bus.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/io-64-nonatomic-hi-lo.h>
+#include <linux/io.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/overflow.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include <dt-bindings/clock/mobileye,eyeq5-clk.h>
+
+/* In frac mode, it enables fractional noise canceling DAC. Else, no function. */
+#define PCSR0_DAC_EN BIT(0)
+/* Fractional or integer mode */
+#define PCSR0_DSM_EN BIT(1)
+#define PCSR0_PLL_EN BIT(2)
+/* All clocks output held at 0 */
+#define PCSR0_FOUTPOSTDIV_EN BIT(3)
+#define PCSR0_POST_DIV1 GENMASK(6, 4)
+#define PCSR0_POST_DIV2 GENMASK(9, 7)
+#define PCSR0_REF_DIV GENMASK(15, 10)
+#define PCSR0_INTIN GENMASK(27, 16)
+#define PCSR0_BYPASS BIT(28)
+/* Bits 30..29 are reserved */
+#define PCSR0_PLL_LOCKED BIT(31)
+
+#define PCSR1_RESET BIT(0)
+#define PCSR1_SSGC_DIV GENMASK(4, 1)
+/* Spread amplitude (% = 0.1 * SPREAD[4:0]) */
+#define PCSR1_SPREAD GENMASK(9, 5)
+#define PCSR1_DIS_SSCG BIT(10)
+/* Down-spread or center-spread */
+#define PCSR1_DOWN_SPREAD BIT(11)
+#define PCSR1_FRAC_IN GENMASK(31, 12)
+
+struct eqc_pll {
+ unsigned int index;
+ const char *name;
+ unsigned int reg64;
+};
+
+/*
+ * Divider clock. Divider is 2*(v+1), with v the register value.
+ * Min divider is 2, max is 2*(2^width).
+ */
+struct eqc_div {
+ unsigned int index;
+ const char *name;
+ unsigned int parent;
+ unsigned int reg;
+ u8 shift;
+ u8 width;
+};
+
+struct eqc_fixed_factor {
+ unsigned int index;
+ const char *name;
+ unsigned int mult;
+ unsigned int div;
+ unsigned int parent;
+};
+
+struct eqc_match_data {
+ unsigned int pll_count;
+ const struct eqc_pll *plls;
+
+ unsigned int div_count;
+ const struct eqc_div *divs;
+
+ unsigned int fixed_factor_count;
+ const struct eqc_fixed_factor *fixed_factors;
+
+ const char *reset_auxdev_name;
+ const char *pinctrl_auxdev_name;
+
+ unsigned int early_clk_count;
+};
+
+struct eqc_early_match_data {
+ unsigned int early_pll_count;
+ const struct eqc_pll *early_plls;
+
+ unsigned int early_fixed_factor_count;
+ const struct eqc_fixed_factor *early_fixed_factors;
+
+ /*
+ * We want our of_xlate callback to EPROBE_DEFER instead of dev_err()
+ * and EINVAL. For that, we must know the total clock count.
+ */
+ unsigned int late_clk_count;
+};
+
+/*
+ * Both factors (mult and div) must fit in 32 bits. When an operation overflows,
+ * this function throws away low bits so that factors still fit in 32 bits.
+ *
+ * Precision loss depends on amplitude of mult and div. Worst theoretical
+ * loss is: (UINT_MAX+1) / UINT_MAX - 1 = 2.3e-10.
+ * This is 1Hz every 4.3GHz.
+ */
+static void eqc_pll_downshift_factors(unsigned long *mult, unsigned long *div)
+{
+ unsigned long biggest;
+ unsigned int shift;
+
+ /* This function can be removed if mult/div switch to unsigned long. */
+ static_assert(sizeof_field(struct clk_fixed_factor, mult) == sizeof(unsigned int));
+ static_assert(sizeof_field(struct clk_fixed_factor, div) == sizeof(unsigned int));
+
+ /* No overflow, nothing to be done. */
+ if (*mult <= UINT_MAX && *div <= UINT_MAX)
+ return;
+
+ /*
+ * Compute the shift required to bring the biggest factor into unsigned
+ * int range. That is, shift its highest set bit to the unsigned int
+ * most significant bit.
+ */
+ biggest = max(*mult, *div);
+ shift = __fls(biggest) - (BITS_PER_BYTE * sizeof(unsigned int)) + 1;
+
+ *mult >>= shift;
+ *div >>= shift;
+}
+
+static int eqc_pll_parse_registers(u32 r0, u32 r1, unsigned long *mult,
+ unsigned long *div, unsigned long *acc)
+{
+ u32 spread;
+
+ if (r0 & PCSR0_BYPASS) {
+ *mult = 1;
+ *div = 1;
+ *acc = 0;
+ return 0;
+ }
+
+ if (!(r0 & PCSR0_PLL_LOCKED))
+ return -EINVAL;
+
+ *mult = FIELD_GET(PCSR0_INTIN, r0);
+ *div = FIELD_GET(PCSR0_REF_DIV, r0);
+ if (r0 & PCSR0_FOUTPOSTDIV_EN)
+ *div *= FIELD_GET(PCSR0_POST_DIV1, r0) * FIELD_GET(PCSR0_POST_DIV2, r0);
+
+ /* Fractional mode, in 2^20 (0x100000) parts. */
+ if (r0 & PCSR0_DSM_EN) {
+ *div *= (1ULL << 20);
+ *mult = *mult * (1ULL << 20) + FIELD_GET(PCSR1_FRAC_IN, r1);
+ }
+
+ if (!*mult || !*div)
+ return -EINVAL;
+
+ if (r1 & (PCSR1_RESET | PCSR1_DIS_SSCG)) {
+ *acc = 0;
+ return 0;
+ }
+
+ /*
+ * Spread spectrum.
+ *
+ * Spread is 1/1000 parts of frequency, accuracy is half of
+ * that. To get accuracy, convert to ppb (parts per billion).
+ *
+ * acc = spread * 1e6 / 2
+ * with acc in parts per billion and,
+ * spread in parts per thousand.
+ */
+ spread = FIELD_GET(PCSR1_SPREAD, r1);
+ *acc = spread * 500000;
+
+ if (r1 & PCSR1_DOWN_SPREAD) {
+ /*
+ * Downspreading: the central frequency is half a
+ * spread lower.
+ */
+ *mult *= 2000 - spread;
+ *div *= 2000;
+
+ /*
+ * Previous operation might overflow 32 bits. If it
+ * does, throw away the least amount of low bits.
+ */
+ eqc_pll_downshift_factors(mult, div);
+ }
+
+ return 0;
+}
+
+static void eqc_probe_init_plls(struct device *dev, const struct eqc_match_data *data,
+ void __iomem *base, struct clk_hw_onecell_data *cells)
+{
+ unsigned long mult, div, acc;
+ const struct eqc_pll *pll;
+ struct clk_hw *hw;
+ unsigned int i;
+ u32 r0, r1;
+ u64 val;
+ int ret;
+
+ for (i = 0; i < data->pll_count; i++) {
+ pll = &data->plls[i];
+
+ val = readq(base + pll->reg64);
+ r0 = val;
+ r1 = val >> 32;
+
+ ret = eqc_pll_parse_registers(r0, r1, &mult, &div, &acc);
+ if (ret) {
+ dev_warn(dev, "failed parsing state of %s\n", pll->name);
+ cells->hws[pll->index] = ERR_PTR(ret);
+ continue;
+ }
+
+ hw = clk_hw_register_fixed_factor_with_accuracy_fwname(dev,
+ dev->of_node, pll->name, "ref", 0, mult, div, acc);
+ cells->hws[pll->index] = hw;
+ if (IS_ERR(hw))
+ dev_warn(dev, "failed registering %s: %pe\n", pll->name, hw);
+ }
+}
+
+static void eqc_probe_init_divs(struct device *dev, const struct eqc_match_data *data,
+ void __iomem *base, struct clk_hw_onecell_data *cells)
+{
+ struct clk_parent_data parent_data = { };
+ const struct eqc_div *div;
+ struct clk_hw *parent;
+ void __iomem *reg;
+ struct clk_hw *hw;
+ unsigned int i;
+
+ for (i = 0; i < data->div_count; i++) {
+ div = &data->divs[i];
+ reg = base + div->reg;
+ parent = cells->hws[div->parent];
+
+ if (IS_ERR(parent)) {
+ /* Parent is in early clk provider. */
+ parent_data.index = div->parent;
+ parent_data.hw = NULL;
+ } else {
+ /* Avoid clock lookup when we already have the hw reference. */
+ parent_data.index = 0;
+ parent_data.hw = parent;
+ }
+
+ hw = clk_hw_register_divider_table_parent_data(dev, div->name,
+ &parent_data, 0, reg, div->shift, div->width,
+ CLK_DIVIDER_EVEN_INTEGERS, NULL, NULL);
+ cells->hws[div->index] = hw;
+ if (IS_ERR(hw))
+ dev_warn(dev, "failed registering %s: %pe\n",
+ div->name, hw);
+ }
+}
+
+static void eqc_probe_init_fixed_factors(struct device *dev,
+ const struct eqc_match_data *data,
+ struct clk_hw_onecell_data *cells)
+{
+ const struct eqc_fixed_factor *ff;
+ struct clk_hw *hw, *parent_hw;
+ unsigned int i;
+
+ for (i = 0; i < data->fixed_factor_count; i++) {
+ ff = &data->fixed_factors[i];
+ parent_hw = cells->hws[ff->parent];
+
+ if (IS_ERR(parent_hw)) {
+ /* Parent is in early clk provider. */
+ hw = clk_hw_register_fixed_factor_index(dev, ff->name,
+ ff->parent, 0, ff->mult, ff->div);
+ } else {
+ /* Avoid clock lookup when we already have the hw reference. */
+ hw = clk_hw_register_fixed_factor_parent_hw(dev, ff->name,
+ parent_hw, 0, ff->mult, ff->div);
+ }
+
+ cells->hws[ff->index] = hw;
+ if (IS_ERR(hw))
+ dev_warn(dev, "failed registering %s: %pe\n",
+ ff->name, hw);
+ }
+}
+
+static void eqc_auxdev_release(struct device *dev)
+{
+ struct auxiliary_device *adev = to_auxiliary_dev(dev);
+
+ kfree(adev);
+}
+
+static int eqc_auxdev_create(struct device *dev, void __iomem *base,
+ const char *name, u32 id)
+{
+ struct auxiliary_device *adev;
+ int ret;
+
+ adev = kzalloc(sizeof(*adev), GFP_KERNEL);
+ if (!adev)
+ return -ENOMEM;
+
+ adev->name = name;
+ adev->dev.parent = dev;
+ adev->dev.platform_data = (void __force *)base;
+ adev->dev.release = eqc_auxdev_release;
+ adev->id = id;
+
+ ret = auxiliary_device_init(adev);
+ if (ret)
+ return ret;
+
+ ret = auxiliary_device_add(adev);
+ if (ret)
+ auxiliary_device_uninit(adev);
+
+ return ret;
+}
+
+static int eqc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ const struct eqc_match_data *data;
+ struct clk_hw_onecell_data *cells;
+ unsigned int i, clk_count;
+ struct resource *res;
+ void __iomem *base;
+ int ret;
+
+ data = device_get_match_data(dev);
+ if (!data)
+ return 0; /* No clocks nor auxdevs, we are done. */
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ base = ioremap(res->start, resource_size(res));
+ if (!base)
+ return -ENOMEM;
+
+ /* Init optional reset auxiliary device. */
+ if (data->reset_auxdev_name) {
+ ret = eqc_auxdev_create(dev, base, data->reset_auxdev_name, 0);
+ if (ret)
+ dev_warn(dev, "failed creating auxiliary device %s.%s: %d\n",
+ KBUILD_MODNAME, data->reset_auxdev_name, ret);
+ }
+
+ /* Init optional pinctrl auxiliary device. */
+ if (data->pinctrl_auxdev_name) {
+ ret = eqc_auxdev_create(dev, base, data->pinctrl_auxdev_name, 0);
+ if (ret)
+ dev_warn(dev, "failed creating auxiliary device %s.%s: %d\n",
+ KBUILD_MODNAME, data->pinctrl_auxdev_name, ret);
+ }
+
+ if (data->pll_count + data->div_count + data->fixed_factor_count == 0)
+ return 0; /* Zero clocks, we are done. */
+
+ clk_count = data->pll_count + data->div_count +
+ data->fixed_factor_count + data->early_clk_count;
+ cells = kzalloc(struct_size(cells, hws, clk_count), GFP_KERNEL);
+ if (!cells)
+ return -ENOMEM;
+
+ cells->num = clk_count;
+
+ /* Early PLLs are marked as errors: the early provider will get queried. */
+ for (i = 0; i < clk_count; i++)
+ cells->hws[i] = ERR_PTR(-EINVAL);
+
+ eqc_probe_init_plls(dev, data, base, cells);
+
+ eqc_probe_init_divs(dev, data, base, cells);
+
+ eqc_probe_init_fixed_factors(dev, data, cells);
+
+ return of_clk_add_hw_provider(np, of_clk_hw_onecell_get, cells);
+}
+
+/* Required early for GIC timer (pll-cpu) and UARTs (pll-per). */
+static const struct eqc_pll eqc_eyeq5_early_plls[] = {
+ { .index = EQ5C_PLL_CPU, .name = "pll-cpu", .reg64 = 0x02C },
+ { .index = EQ5C_PLL_PER, .name = "pll-per", .reg64 = 0x05C },
+};
+
+static const struct eqc_pll eqc_eyeq5_plls[] = {
+ { .index = EQ5C_PLL_VMP, .name = "pll-vmp", .reg64 = 0x034 },
+ { .index = EQ5C_PLL_PMA, .name = "pll-pma", .reg64 = 0x03C },
+ { .index = EQ5C_PLL_VDI, .name = "pll-vdi", .reg64 = 0x044 },
+ { .index = EQ5C_PLL_DDR0, .name = "pll-ddr0", .reg64 = 0x04C },
+ { .index = EQ5C_PLL_PCI, .name = "pll-pci", .reg64 = 0x054 },
+ { .index = EQ5C_PLL_PMAC, .name = "pll-pmac", .reg64 = 0x064 },
+ { .index = EQ5C_PLL_MPC, .name = "pll-mpc", .reg64 = 0x06C },
+ { .index = EQ5C_PLL_DDR1, .name = "pll-ddr1", .reg64 = 0x074 },
+};
+
+enum {
+ /*
+ * EQ5C_PLL_CPU children.
+ * EQ5C_PER_OCC_PCI is the last clock exposed in dt-bindings.
+ */
+ EQ5C_CPU_OCC = EQ5C_PER_OCC_PCI + 1,
+ EQ5C_CPU_SI_CSS0,
+ EQ5C_CPU_CPC,
+ EQ5C_CPU_CM,
+ EQ5C_CPU_MEM,
+ EQ5C_CPU_OCC_ISRAM,
+ EQ5C_CPU_ISRAM,
+ EQ5C_CPU_OCC_DBU,
+ EQ5C_CPU_SI_DBU_TP,
+
+ /*
+ * EQ5C_PLL_VDI children.
+ */
+ EQ5C_VDI_OCC_VDI,
+ EQ5C_VDI_VDI,
+ EQ5C_VDI_OCC_CAN_SER,
+ EQ5C_VDI_CAN_SER,
+ EQ5C_VDI_I2C_SER,
+
+ /*
+ * EQ5C_PLL_PER children.
+ */
+ EQ5C_PER_PERIPH,
+ EQ5C_PER_CAN,
+ EQ5C_PER_TIMER,
+ EQ5C_PER_CCF,
+ EQ5C_PER_OCC_MJPEG,
+ EQ5C_PER_HSM,
+ EQ5C_PER_MJPEG,
+ EQ5C_PER_FCMU_A,
+};
+
+static const struct eqc_fixed_factor eqc_eyeq5_early_fixed_factors[] = {
+ /* EQ5C_PLL_CPU children */
+ { EQ5C_CPU_OCC, "occ-cpu", 1, 1, EQ5C_PLL_CPU },
+ { EQ5C_CPU_SI_CSS0, "si-css0", 1, 1, EQ5C_CPU_OCC },
+ { EQ5C_CPU_CORE0, "core0", 1, 1, EQ5C_CPU_SI_CSS0 },
+ { EQ5C_CPU_CORE1, "core1", 1, 1, EQ5C_CPU_SI_CSS0 },
+ { EQ5C_CPU_CORE2, "core2", 1, 1, EQ5C_CPU_SI_CSS0 },
+ { EQ5C_CPU_CORE3, "core3", 1, 1, EQ5C_CPU_SI_CSS0 },
+
+ /* EQ5C_PLL_PER children */
+ { EQ5C_PER_OCC, "occ-periph", 1, 16, EQ5C_PLL_PER },
+ { EQ5C_PER_UART, "uart", 1, 1, EQ5C_PER_OCC },
+};
+
+static const struct eqc_fixed_factor eqc_eyeq5_fixed_factors[] = {
+ /* EQ5C_PLL_CPU children */
+ { EQ5C_CPU_CPC, "cpc", 1, 1, EQ5C_CPU_SI_CSS0 },
+ { EQ5C_CPU_CM, "cm", 1, 1, EQ5C_CPU_SI_CSS0 },
+ { EQ5C_CPU_MEM, "mem", 1, 1, EQ5C_CPU_SI_CSS0 },
+ { EQ5C_CPU_OCC_ISRAM, "occ-isram", 1, 2, EQ5C_PLL_CPU },
+ { EQ5C_CPU_ISRAM, "isram", 1, 1, EQ5C_CPU_OCC_ISRAM },
+ { EQ5C_CPU_OCC_DBU, "occ-dbu", 1, 10, EQ5C_PLL_CPU },
+ { EQ5C_CPU_SI_DBU_TP, "si-dbu-tp", 1, 1, EQ5C_CPU_OCC_DBU },
+
+ /* EQ5C_PLL_VDI children */
+ { EQ5C_VDI_OCC_VDI, "occ-vdi", 1, 2, EQ5C_PLL_VDI },
+ { EQ5C_VDI_VDI, "vdi", 1, 1, EQ5C_VDI_OCC_VDI },
+ { EQ5C_VDI_OCC_CAN_SER, "occ-can-ser", 1, 16, EQ5C_PLL_VDI },
+ { EQ5C_VDI_CAN_SER, "can-ser", 1, 1, EQ5C_VDI_OCC_CAN_SER },
+ { EQ5C_VDI_I2C_SER, "i2c-ser", 1, 20, EQ5C_PLL_VDI },
+
+ /* EQ5C_PLL_PER children */
+ { EQ5C_PER_PERIPH, "periph", 1, 1, EQ5C_PER_OCC },
+ { EQ5C_PER_CAN, "can", 1, 1, EQ5C_PER_OCC },
+ { EQ5C_PER_SPI, "spi", 1, 1, EQ5C_PER_OCC },
+ { EQ5C_PER_I2C, "i2c", 1, 1, EQ5C_PER_OCC },
+ { EQ5C_PER_TIMER, "timer", 1, 1, EQ5C_PER_OCC },
+ { EQ5C_PER_GPIO, "gpio", 1, 1, EQ5C_PER_OCC },
+ { EQ5C_PER_EMMC, "emmc-sys", 1, 10, EQ5C_PLL_PER },
+ { EQ5C_PER_CCF, "ccf-ctrl", 1, 4, EQ5C_PLL_PER },
+ { EQ5C_PER_OCC_MJPEG, "occ-mjpeg", 1, 2, EQ5C_PLL_PER },
+ { EQ5C_PER_HSM, "hsm", 1, 1, EQ5C_PER_OCC_MJPEG },
+ { EQ5C_PER_MJPEG, "mjpeg", 1, 1, EQ5C_PER_OCC_MJPEG },
+ { EQ5C_PER_FCMU_A, "fcmu-a", 1, 20, EQ5C_PLL_PER },
+ { EQ5C_PER_OCC_PCI, "occ-pci-sys", 1, 8, EQ5C_PLL_PER },
+};
+
+static const struct eqc_div eqc_eyeq5_divs[] = {
+ {
+ .index = EQ5C_DIV_OSPI,
+ .name = "div-ospi",
+ .parent = EQ5C_PLL_PER,
+ .reg = 0x11C,
+ .shift = 0,
+ .width = 4,
+ },
+};
+
+static const struct eqc_early_match_data eqc_eyeq5_early_match_data __initconst = {
+ .early_pll_count = ARRAY_SIZE(eqc_eyeq5_early_plls),
+ .early_plls = eqc_eyeq5_early_plls,
+
+ .early_fixed_factor_count = ARRAY_SIZE(eqc_eyeq5_early_fixed_factors),
+ .early_fixed_factors = eqc_eyeq5_early_fixed_factors,
+
+ .late_clk_count = ARRAY_SIZE(eqc_eyeq5_plls) + ARRAY_SIZE(eqc_eyeq5_divs) +
+ ARRAY_SIZE(eqc_eyeq5_fixed_factors),
+};
+
+static const struct eqc_match_data eqc_eyeq5_match_data = {
+ .pll_count = ARRAY_SIZE(eqc_eyeq5_plls),
+ .plls = eqc_eyeq5_plls,
+
+ .div_count = ARRAY_SIZE(eqc_eyeq5_divs),
+ .divs = eqc_eyeq5_divs,
+
+ .fixed_factor_count = ARRAY_SIZE(eqc_eyeq5_fixed_factors),
+ .fixed_factors = eqc_eyeq5_fixed_factors,
+
+ .reset_auxdev_name = "reset",
+ .pinctrl_auxdev_name = "pinctrl",
+
+ .early_clk_count = ARRAY_SIZE(eqc_eyeq5_early_plls) +
+ ARRAY_SIZE(eqc_eyeq5_early_fixed_factors),
+};
+
+static const struct eqc_pll eqc_eyeq6l_plls[] = {
+ { .index = EQ6LC_PLL_DDR, .name = "pll-ddr", .reg64 = 0x02C },
+ { .index = EQ6LC_PLL_CPU, .name = "pll-cpu", .reg64 = 0x034 }, /* also acc */
+ { .index = EQ6LC_PLL_PER, .name = "pll-per", .reg64 = 0x03C },
+ { .index = EQ6LC_PLL_VDI, .name = "pll-vdi", .reg64 = 0x044 },
+};
+
+static const struct eqc_match_data eqc_eyeq6l_match_data = {
+ .pll_count = ARRAY_SIZE(eqc_eyeq6l_plls),
+ .plls = eqc_eyeq6l_plls,
+
+ .reset_auxdev_name = "reset",
+};
+
+static const struct eqc_match_data eqc_eyeq6h_west_match_data = {
+ .reset_auxdev_name = "reset_west",
+};
+
+static const struct eqc_pll eqc_eyeq6h_east_plls[] = {
+ { .index = 0, .name = "pll-east", .reg64 = 0x074 },
+};
+
+static const struct eqc_match_data eqc_eyeq6h_east_match_data = {
+ .pll_count = ARRAY_SIZE(eqc_eyeq6h_east_plls),
+ .plls = eqc_eyeq6h_east_plls,
+
+ .reset_auxdev_name = "reset_east",
+};
+
+static const struct eqc_pll eqc_eyeq6h_south_plls[] = {
+ { .index = EQ6HC_SOUTH_PLL_VDI, .name = "pll-vdi", .reg64 = 0x000 },
+ { .index = EQ6HC_SOUTH_PLL_PCIE, .name = "pll-pcie", .reg64 = 0x008 },
+ { .index = EQ6HC_SOUTH_PLL_PER, .name = "pll-per", .reg64 = 0x010 },
+ { .index = EQ6HC_SOUTH_PLL_ISP, .name = "pll-isp", .reg64 = 0x018 },
+};
+
+static const struct eqc_div eqc_eyeq6h_south_divs[] = {
+ {
+ .index = EQ6HC_SOUTH_DIV_EMMC,
+ .name = "div-emmc",
+ .parent = EQ6HC_SOUTH_PLL_PER,
+ .reg = 0x070,
+ .shift = 4,
+ .width = 4,
+ },
+ {
+ .index = EQ6HC_SOUTH_DIV_OSPI_REF,
+ .name = "div-ospi-ref",
+ .parent = EQ6HC_SOUTH_PLL_PER,
+ .reg = 0x090,
+ .shift = 4,
+ .width = 4,
+ },
+ {
+ .index = EQ6HC_SOUTH_DIV_OSPI_SYS,
+ .name = "div-ospi-sys",
+ .parent = EQ6HC_SOUTH_PLL_PER,
+ .reg = 0x090,
+ .shift = 8,
+ .width = 1,
+ },
+ {
+ .index = EQ6HC_SOUTH_DIV_TSU,
+ .name = "div-tsu",
+ .parent = EQ6HC_SOUTH_PLL_PCIE,
+ .reg = 0x098,
+ .shift = 4,
+ .width = 8,
+ },
+};
+
+static const struct eqc_match_data eqc_eyeq6h_south_match_data = {
+ .pll_count = ARRAY_SIZE(eqc_eyeq6h_south_plls),
+ .plls = eqc_eyeq6h_south_plls,
+
+ .div_count = ARRAY_SIZE(eqc_eyeq6h_south_divs),
+ .divs = eqc_eyeq6h_south_divs,
+};
+
+static const struct eqc_pll eqc_eyeq6h_ddr0_plls[] = {
+ { .index = 0, .name = "pll-ddr0", .reg64 = 0x074 },
+};
+
+static const struct eqc_match_data eqc_eyeq6h_ddr0_match_data = {
+ .pll_count = ARRAY_SIZE(eqc_eyeq6h_ddr0_plls),
+ .plls = eqc_eyeq6h_ddr0_plls,
+};
+
+static const struct eqc_pll eqc_eyeq6h_ddr1_plls[] = {
+ { .index = 0, .name = "pll-ddr1", .reg64 = 0x074 },
+};
+
+static const struct eqc_match_data eqc_eyeq6h_ddr1_match_data = {
+ .pll_count = ARRAY_SIZE(eqc_eyeq6h_ddr1_plls),
+ .plls = eqc_eyeq6h_ddr1_plls,
+};
+
+static const struct eqc_pll eqc_eyeq6h_acc_plls[] = {
+ { .index = EQ6HC_ACC_PLL_XNN, .name = "pll-xnn", .reg64 = 0x040 },
+ { .index = EQ6HC_ACC_PLL_VMP, .name = "pll-vmp", .reg64 = 0x050 },
+ { .index = EQ6HC_ACC_PLL_PMA, .name = "pll-pma", .reg64 = 0x05C },
+ { .index = EQ6HC_ACC_PLL_MPC, .name = "pll-mpc", .reg64 = 0x068 },
+ { .index = EQ6HC_ACC_PLL_NOC, .name = "pll-noc", .reg64 = 0x070 },
+};
+
+static const struct eqc_match_data eqc_eyeq6h_acc_match_data = {
+ .pll_count = ARRAY_SIZE(eqc_eyeq6h_acc_plls),
+ .plls = eqc_eyeq6h_acc_plls,
+
+ .reset_auxdev_name = "reset_acc",
+};
+
+static const struct of_device_id eqc_match_table[] = {
+ { .compatible = "mobileye,eyeq5-olb", .data = &eqc_eyeq5_match_data },
+ { .compatible = "mobileye,eyeq6l-olb", .data = &eqc_eyeq6l_match_data },
+ { .compatible = "mobileye,eyeq6h-west-olb", .data = &eqc_eyeq6h_west_match_data },
+ { .compatible = "mobileye,eyeq6h-east-olb", .data = &eqc_eyeq6h_east_match_data },
+ { .compatible = "mobileye,eyeq6h-south-olb", .data = &eqc_eyeq6h_south_match_data },
+ { .compatible = "mobileye,eyeq6h-ddr0-olb", .data = &eqc_eyeq6h_ddr0_match_data },
+ { .compatible = "mobileye,eyeq6h-ddr1-olb", .data = &eqc_eyeq6h_ddr1_match_data },
+ { .compatible = "mobileye,eyeq6h-acc-olb", .data = &eqc_eyeq6h_acc_match_data },
+ {}
+};
+
+static struct platform_driver eqc_driver = {
+ .probe = eqc_probe,
+ .driver = {
+ .name = "clk-eyeq",
+ .of_match_table = eqc_match_table,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver(eqc_driver);
+
+/* Required early for GIC timer. */
+static const struct eqc_pll eqc_eyeq6h_central_early_plls[] = {
+ { .index = EQ6HC_CENTRAL_PLL_CPU, .name = "pll-cpu", .reg64 = 0x02C },
+};
+
+static const struct eqc_fixed_factor eqc_eyeq6h_central_early_fixed_factors[] = {
+ { EQ6HC_CENTRAL_CPU_OCC, "occ-cpu", 1, 1, EQ6HC_CENTRAL_PLL_CPU },
+};
+
+static const struct eqc_early_match_data eqc_eyeq6h_central_early_match_data __initconst = {
+ .early_pll_count = ARRAY_SIZE(eqc_eyeq6h_central_early_plls),
+ .early_plls = eqc_eyeq6h_central_early_plls,
+
+ .early_fixed_factor_count = ARRAY_SIZE(eqc_eyeq6h_central_early_fixed_factors),
+ .early_fixed_factors = eqc_eyeq6h_central_early_fixed_factors,
+};
+
+/* Required early for UART. */
+static const struct eqc_pll eqc_eyeq6h_west_early_plls[] = {
+ { .index = EQ6HC_WEST_PLL_PER, .name = "pll-west", .reg64 = 0x074 },
+};
+
+static const struct eqc_fixed_factor eqc_eyeq6h_west_early_fixed_factors[] = {
+ { EQ6HC_WEST_PER_OCC, "west-per-occ", 1, 10, EQ6HC_WEST_PLL_PER },
+ { EQ6HC_WEST_PER_UART, "west-per-uart", 1, 1, EQ6HC_WEST_PER_OCC },
+};
+
+static const struct eqc_early_match_data eqc_eyeq6h_west_early_match_data __initconst = {
+ .early_pll_count = ARRAY_SIZE(eqc_eyeq6h_west_early_plls),
+ .early_plls = eqc_eyeq6h_west_early_plls,
+
+ .early_fixed_factor_count = ARRAY_SIZE(eqc_eyeq6h_west_early_fixed_factors),
+ .early_fixed_factors = eqc_eyeq6h_west_early_fixed_factors,
+};
+
+static void __init eqc_early_init(struct device_node *np,
+ const struct eqc_early_match_data *early_data)
+{
+ struct clk_hw_onecell_data *cells;
+ unsigned int i, clk_count;
+ void __iomem *base;
+ int ret;
+
+ clk_count = early_data->early_pll_count + early_data->early_fixed_factor_count +
+ early_data->late_clk_count;
+ cells = kzalloc(struct_size(cells, hws, clk_count), GFP_KERNEL);
+ if (!cells) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ cells->num = clk_count;
+
+ /*
+ * Mark all clocks as deferred; some are registered here, the rest at
+ * platform device probe.
+ *
+ * Once the platform device is probed, its provider will take priority
+ * when looking up clocks.
+ */
+ for (i = 0; i < clk_count; i++)
+ cells->hws[i] = ERR_PTR(-EPROBE_DEFER);
+
+ /* Offsets (reg64) of early PLLs are relative to OLB block. */
+ base = of_iomap(np, 0);
+ if (!base) {
+ ret = -ENODEV;
+ goto err;
+ }
+
+ for (i = 0; i < early_data->early_pll_count; i++) {
+ const struct eqc_pll *pll = &early_data->early_plls[i];
+ unsigned long mult, div, acc;
+ struct clk_hw *hw;
+ u32 r0, r1;
+ u64 val;
+
+ val = readq(base + pll->reg64);
+ r0 = val;
+ r1 = val >> 32;
+
+ ret = eqc_pll_parse_registers(r0, r1, &mult, &div, &acc);
+ if (ret) {
+ pr_err("failed parsing state of %s\n", pll->name);
+ goto err;
+ }
+
+ hw = clk_hw_register_fixed_factor_with_accuracy_fwname(NULL,
+ np, pll->name, "ref", 0, mult, div, acc);
+ cells->hws[pll->index] = hw;
+ if (IS_ERR(hw)) {
+ pr_err("failed registering %s: %pe\n", pll->name, hw);
+ ret = PTR_ERR(hw);
+ goto err;
+ }
+ }
+
+ for (i = 0; i < early_data->early_fixed_factor_count; i++) {
+ const struct eqc_fixed_factor *ff = &early_data->early_fixed_factors[i];
+ struct clk_hw *parent_hw = cells->hws[ff->parent];
+ struct clk_hw *hw;
+
+ hw = clk_hw_register_fixed_factor_parent_hw(NULL, ff->name,
+ parent_hw, 0, ff->mult, ff->div);
+ cells->hws[ff->index] = hw;
+ if (IS_ERR(hw)) {
+ pr_err("failed registering %s: %pe\n", ff->name, hw);
+ ret = PTR_ERR(hw);
+ goto err;
+ }
+ }
+
+ ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, cells);
+ if (ret) {
+ pr_err("failed registering clk provider: %d\n", ret);
+ goto err;
+ }
+
+ return;
+
+err:
+ /*
+ * We are doomed. The system will not be able to boot.
+ *
+ * Let's still try to be good citizens by freeing resources and print
+ * a last error message that might help debugging.
+ */
+
+ pr_err("failed clk init: %d\n", ret);
+
+ if (cells) {
+ of_clk_del_provider(np);
+
+ for (i = 0; i < early_data->early_pll_count; i++) {
+ const struct eqc_pll *pll = &early_data->early_plls[i];
+ struct clk_hw *hw = cells->hws[pll->index];
+
+ if (!IS_ERR_OR_NULL(hw))
+ clk_hw_unregister_fixed_factor(hw);
+ }
+
+ kfree(cells);
+ }
+}
+
+static void __init eqc_eyeq5_early_init(struct device_node *np)
+{
+ eqc_early_init(np, &eqc_eyeq5_early_match_data);
+}
+CLK_OF_DECLARE_DRIVER(eqc_eyeq5, "mobileye,eyeq5-olb", eqc_eyeq5_early_init);
+
+static void __init eqc_eyeq6h_central_early_init(struct device_node *np)
+{
+ eqc_early_init(np, &eqc_eyeq6h_central_early_match_data);
+}
+CLK_OF_DECLARE_DRIVER(eqc_eyeq6h_central, "mobileye,eyeq6h-central-olb",
+ eqc_eyeq6h_central_early_init);
+
+static void __init eqc_eyeq6h_west_early_init(struct device_node *np)
+{
+ eqc_early_init(np, &eqc_eyeq6h_west_early_match_data);
+}
+CLK_OF_DECLARE_DRIVER(eqc_eyeq6h_west, "mobileye,eyeq6h-west-olb",
+ eqc_eyeq6h_west_early_init);
diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c
index 8fba63fc70c5..de658c9e4c53 100644
--- a/drivers/clk/clk-fixed-factor.c
+++ b/drivers/clk/clk-fixed-factor.c
@@ -30,19 +30,21 @@ static unsigned long clk_factor_recalc_rate(struct clk_hw *hw,
return (unsigned long)rate;
}
-static long clk_factor_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_factor_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_fixed_factor *fix = to_clk_fixed_factor(hw);
if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) {
unsigned long best_parent;
- best_parent = (rate / fix->mult) * fix->div;
- *prate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
+ best_parent = (req->rate / fix->mult) * fix->div;
+ req->best_parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
}
- return (*prate / fix->div) * fix->mult;
+ req->rate = (req->best_parent_rate / fix->div) * fix->mult;
+
+ return 0;
}
static int clk_factor_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -50,7 +52,7 @@ static int clk_factor_set_rate(struct clk_hw *hw, unsigned long rate,
{
/*
* We must report success but we can do so unconditionally because
- * clk_factor_round_rate returns values that ensure this call is a
+ * clk_factor_determine_rate returns values that ensure this call is a
* nop.
*/
@@ -69,7 +71,7 @@ static unsigned long clk_factor_recalc_accuracy(struct clk_hw *hw,
}
const struct clk_ops clk_fixed_factor_ops = {
- .round_rate = clk_factor_round_rate,
+ .determine_rate = clk_factor_determine_rate,
.set_rate = clk_factor_set_rate,
.recalc_rate = clk_factor_recalc_rate,
.recalc_accuracy = clk_factor_recalc_accuracy,
@@ -241,6 +243,17 @@ struct clk_hw *clk_hw_register_fixed_factor_with_accuracy_fwname(struct device *
}
EXPORT_SYMBOL_GPL(clk_hw_register_fixed_factor_with_accuracy_fwname);
+struct clk_hw *clk_hw_register_fixed_factor_index(struct device *dev,
+ const char *name, unsigned int index, unsigned long flags,
+ unsigned int mult, unsigned int div)
+{
+ const struct clk_parent_data pdata = { .index = index };
+
+ return __clk_hw_register_fixed_factor(dev, NULL, name, NULL, NULL, &pdata,
+ flags, mult, div, 0, 0, false);
+}
+EXPORT_SYMBOL_GPL(clk_hw_register_fixed_factor_index);
+
struct clk *clk_register_fixed_factor(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
unsigned int mult, unsigned int div)
diff --git a/drivers/clk/clk-fractional-divider.c b/drivers/clk/clk-fractional-divider.c
index da057172cc90..cd36a6e27f25 100644
--- a/drivers/clk/clk-fractional-divider.c
+++ b/drivers/clk/clk-fractional-divider.c
@@ -151,25 +151,32 @@ void clk_fractional_divider_general_approximation(struct clk_hw *hw,
}
EXPORT_SYMBOL_GPL(clk_fractional_divider_general_approximation);
-static long clk_fd_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_fd_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_fractional_divider *fd = to_clk_fd(hw);
unsigned long m, n;
u64 ret;
- if (!rate || (!clk_hw_can_set_rate_parent(hw) && rate >= *parent_rate))
- return *parent_rate;
+ if (!req->rate || (!clk_hw_can_set_rate_parent(hw) && req->rate >= req->best_parent_rate)) {
+ req->rate = req->best_parent_rate;
+
+ return 0;
+ }
if (fd->approximation)
- fd->approximation(hw, rate, parent_rate, &m, &n);
+ fd->approximation(hw, req->rate, &req->best_parent_rate, &m, &n);
else
- clk_fractional_divider_general_approximation(hw, rate, parent_rate, &m, &n);
+ clk_fractional_divider_general_approximation(hw, req->rate,
+ &req->best_parent_rate,
+ &m, &n);
- ret = (u64)*parent_rate * m;
+ ret = (u64)req->best_parent_rate * m;
do_div(ret, n);
- return ret;
+ req->rate = ret;
+
+ return 0;
}
static int clk_fd_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -250,7 +257,7 @@ static void clk_fd_debug_init(struct clk_hw *hw, struct dentry *dentry)
const struct clk_ops clk_fractional_divider_ops = {
.recalc_rate = clk_fd_recalc_rate,
- .round_rate = clk_fd_round_rate,
+ .determine_rate = clk_fd_determine_rate,
.set_rate = clk_fd_set_rate,
#ifdef CONFIG_DEBUG_FS
.debug_init = clk_fd_debug_init,
diff --git a/drivers/clk/clk-gate.c b/drivers/clk/clk-gate.c
index 68e585a02fd9..4746f8219132 100644
--- a/drivers/clk/clk-gate.c
+++ b/drivers/clk/clk-gate.c
@@ -15,7 +15,7 @@
#include <linux/string.h>
/**
- * DOC: basic gatable clock which can gate and ungate its output
+ * DOC: basic gateable clock which can gate and ungate its output
*
* Traits of this clock:
* prepare - clk_(un)prepare only ensures parent is (un)prepared
diff --git a/drivers/clk/clk-gemini.c b/drivers/clk/clk-gemini.c
index 856b008e07c6..e94589c38568 100644
--- a/drivers/clk/clk-gemini.c
+++ b/drivers/clk/clk-gemini.c
@@ -126,13 +126,16 @@ static unsigned long gemini_pci_recalc_rate(struct clk_hw *hw,
return 33000000;
}
-static long gemini_pci_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int gemini_pci_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
/* We support 33 and 66 MHz */
- if (rate < 48000000)
- return 33000000;
- return 66000000;
+ if (req->rate < 48000000)
+ req->rate = 33000000;
+ else
+ req->rate = 66000000;
+
+ return 0;
}
static int gemini_pci_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -179,7 +182,7 @@ static int gemini_pci_is_enabled(struct clk_hw *hw)
static const struct clk_ops gemini_pci_clk_ops = {
.recalc_rate = gemini_pci_recalc_rate,
- .round_rate = gemini_pci_round_rate,
+ .determine_rate = gemini_pci_determine_rate,
.set_rate = gemini_pci_set_rate,
.enable = gemini_pci_enable,
.disable = gemini_pci_disable,
diff --git a/drivers/clk/clk-gpio.c b/drivers/clk/clk-gpio.c
index 5b114043771d..9099c57e2715 100644
--- a/drivers/clk/clk-gpio.c
+++ b/drivers/clk/clk-gpio.c
@@ -17,13 +17,15 @@
#include <linux/device.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
/**
* DOC: basic gpio gated clock which can be enabled and disabled
* with gpio output
* Traits of this clock:
- * prepare - clk_(un)prepare only ensures parent is (un)prepared
- * enable - clk_enable and clk_disable are functional & control gpio
+ * prepare - clk_(un)prepare are functional and control a gpio that can sleep
+ * enable - clk_enable and clk_disable are functional & control
+ * non-sleeping gpio
* rate - inherits rate from parent. No clk_set_rate support
* parent - fixed parent. No clk_set_parent support
*/
@@ -199,7 +201,6 @@ static int gpio_clk_driver_probe(struct platform_device *pdev)
struct gpio_desc *gpiod;
struct clk_hw *hw;
bool is_mux;
- int ret;
is_mux = of_device_is_compatible(node, "gpio-mux-clock");
@@ -211,17 +212,9 @@ static int gpio_clk_driver_probe(struct platform_device *pdev)
gpio_name = is_mux ? "select" : "enable";
gpiod = devm_gpiod_get(dev, gpio_name, GPIOD_OUT_LOW);
- if (IS_ERR(gpiod)) {
- ret = PTR_ERR(gpiod);
- if (ret == -EPROBE_DEFER)
- pr_debug("%pOFn: %s: GPIOs not yet available, retry later\n",
- node, __func__);
- else
- pr_err("%pOFn: %s: Can't get '%s' named GPIO property\n",
- node, __func__,
- gpio_name);
- return ret;
- }
+ if (IS_ERR(gpiod))
+ return dev_err_probe(dev, PTR_ERR(gpiod),
+ "Can't get '%s' named GPIO property\n", gpio_name);
if (is_mux)
hw = clk_hw_register_gpio_mux(dev, gpiod);
@@ -247,3 +240,187 @@ static struct platform_driver gpio_clk_driver = {
},
};
builtin_platform_driver(gpio_clk_driver);
+
+/**
+ * DOC: gated fixed clock, controlled with a gpio output and a regulator
+ * Traits of this clock:
+ * prepare - clk_prepare and clk_unprepare are function & control regulator
+ * optionally a gpio that can sleep
+ * enable - clk_enable and clk_disable are functional & control gpio
+ * rate - rate is fixed and set on clock registration
+ * parent - fixed clock is a root clock and has no parent
+ */
+
+/**
+ * struct clk_gated_fixed - Gateable fixed rate clock
+ * @clk_gpio: instance of clk_gpio for gate-gpio
+ * @supply: supply regulator
+ * @rate: fixed rate
+ */
+struct clk_gated_fixed {
+ struct clk_gpio clk_gpio;
+ struct regulator *supply;
+ unsigned long rate;
+};
+
+#define to_clk_gated_fixed(_clk_gpio) container_of(_clk_gpio, struct clk_gated_fixed, clk_gpio)
+
+static unsigned long clk_gated_fixed_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ return to_clk_gated_fixed(to_clk_gpio(hw))->rate;
+}
+
+static int clk_gated_fixed_prepare(struct clk_hw *hw)
+{
+ struct clk_gated_fixed *clk = to_clk_gated_fixed(to_clk_gpio(hw));
+
+ if (!clk->supply)
+ return 0;
+
+ return regulator_enable(clk->supply);
+}
+
+static void clk_gated_fixed_unprepare(struct clk_hw *hw)
+{
+ struct clk_gated_fixed *clk = to_clk_gated_fixed(to_clk_gpio(hw));
+
+ if (!clk->supply)
+ return;
+
+ regulator_disable(clk->supply);
+}
+
+static int clk_gated_fixed_is_prepared(struct clk_hw *hw)
+{
+ struct clk_gated_fixed *clk = to_clk_gated_fixed(to_clk_gpio(hw));
+
+ if (!clk->supply)
+ return true;
+
+ return regulator_is_enabled(clk->supply);
+}
+
+/*
+ * Fixed gated clock with non-sleeping gpio.
+ *
+ * Prepare operation turns on the supply regulator
+ * and the enable operation switches the enable-gpio.
+ */
+static const struct clk_ops clk_gated_fixed_ops = {
+ .prepare = clk_gated_fixed_prepare,
+ .unprepare = clk_gated_fixed_unprepare,
+ .is_prepared = clk_gated_fixed_is_prepared,
+ .enable = clk_gpio_gate_enable,
+ .disable = clk_gpio_gate_disable,
+ .is_enabled = clk_gpio_gate_is_enabled,
+ .recalc_rate = clk_gated_fixed_recalc_rate,
+};
+
+static int clk_sleeping_gated_fixed_prepare(struct clk_hw *hw)
+{
+ int ret;
+
+ ret = clk_gated_fixed_prepare(hw);
+ if (ret)
+ return ret;
+
+ ret = clk_sleeping_gpio_gate_prepare(hw);
+ if (ret)
+ clk_gated_fixed_unprepare(hw);
+
+ return ret;
+}
+
+static void clk_sleeping_gated_fixed_unprepare(struct clk_hw *hw)
+{
+ clk_gated_fixed_unprepare(hw);
+ clk_sleeping_gpio_gate_unprepare(hw);
+}
+
+/*
+ * Fixed gated clock with non-sleeping gpio.
+ *
+ * Enabling the supply regulator and switching the enable-gpio happens
+ * both in the prepare step.
+ * is_prepared only needs to check the gpio state, as toggling the
+ * gpio is the last step when preparing.
+ */
+static const struct clk_ops clk_sleeping_gated_fixed_ops = {
+ .prepare = clk_sleeping_gated_fixed_prepare,
+ .unprepare = clk_sleeping_gated_fixed_unprepare,
+ .is_prepared = clk_sleeping_gpio_gate_is_prepared,
+ .recalc_rate = clk_gated_fixed_recalc_rate,
+};
+
+static int clk_gated_fixed_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct clk_gated_fixed *clk;
+ const struct clk_ops *ops;
+ const char *clk_name;
+ u32 rate;
+ int ret;
+
+ clk = devm_kzalloc(dev, sizeof(*clk), GFP_KERNEL);
+ if (!clk)
+ return -ENOMEM;
+
+ ret = device_property_read_u32(dev, "clock-frequency", &rate);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get clock-frequency\n");
+ clk->rate = rate;
+
+ ret = device_property_read_string(dev, "clock-output-names", &clk_name);
+ if (ret)
+ clk_name = fwnode_get_name(dev->fwnode);
+
+ clk->supply = devm_regulator_get_optional(dev, "vdd");
+ if (IS_ERR(clk->supply)) {
+ if (PTR_ERR(clk->supply) != -ENODEV)
+ return dev_err_probe(dev, PTR_ERR(clk->supply),
+ "Failed to get regulator\n");
+ clk->supply = NULL;
+ }
+
+ clk->clk_gpio.gpiod = devm_gpiod_get_optional(dev, "enable",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(clk->clk_gpio.gpiod))
+ return dev_err_probe(dev, PTR_ERR(clk->clk_gpio.gpiod),
+ "Failed to get gpio\n");
+
+ if (gpiod_cansleep(clk->clk_gpio.gpiod))
+ ops = &clk_sleeping_gated_fixed_ops;
+ else
+ ops = &clk_gated_fixed_ops;
+
+ clk->clk_gpio.hw.init = CLK_HW_INIT_NO_PARENT(clk_name, ops, 0);
+
+ /* register the clock */
+ ret = devm_clk_hw_register(dev, &clk->clk_gpio.hw);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to register clock\n");
+
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get,
+ &clk->clk_gpio.hw);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to register clock provider\n");
+
+ return 0;
+}
+
+static const struct of_device_id gated_fixed_clk_match_table[] = {
+ { .compatible = "gated-fixed-clock" },
+ { /* sentinel */ }
+};
+
+static struct platform_driver gated_fixed_clk_driver = {
+ .probe = clk_gated_fixed_probe,
+ .driver = {
+ .name = "gated-fixed-clk",
+ .of_match_table = gated_fixed_clk_match_table,
+ },
+};
+builtin_platform_driver(gated_fixed_clk_driver);
diff --git a/drivers/clk/clk-highbank.c b/drivers/clk/clk-highbank.c
index 6e68a41a70a1..cc583934ecf2 100644
--- a/drivers/clk/clk-highbank.c
+++ b/drivers/clk/clk-highbank.c
@@ -130,15 +130,17 @@ static void clk_pll_calc(unsigned long rate, unsigned long ref_freq,
*pdivf = divf;
}
-static long clk_pll_round_rate(struct clk_hw *hwclk, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
u32 divq, divf;
- unsigned long ref_freq = *parent_rate;
+ unsigned long ref_freq = req->best_parent_rate;
- clk_pll_calc(rate, ref_freq, &divq, &divf);
+ clk_pll_calc(req->rate, ref_freq, &divq, &divf);
- return (ref_freq * (divf + 1)) / (1 << divq);
+ req->rate = (ref_freq * (divf + 1)) / (1 << divq);
+
+ return 0;
}
static int clk_pll_set_rate(struct clk_hw *hwclk, unsigned long rate,
@@ -185,7 +187,7 @@ static const struct clk_ops clk_pll_ops = {
.enable = clk_pll_enable,
.disable = clk_pll_disable,
.recalc_rate = clk_pll_recalc_rate,
- .round_rate = clk_pll_round_rate,
+ .determine_rate = clk_pll_determine_rate,
.set_rate = clk_pll_set_rate,
};
@@ -227,16 +229,18 @@ static unsigned long clk_periclk_recalc_rate(struct clk_hw *hwclk,
return parent_rate / div;
}
-static long clk_periclk_round_rate(struct clk_hw *hwclk, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_periclk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
u32 div;
- div = *parent_rate / rate;
+ div = req->best_parent_rate / req->rate;
div++;
div &= ~0x1;
- return *parent_rate / div;
+ req->rate = req->best_parent_rate / div;
+
+ return 0;
}
static int clk_periclk_set_rate(struct clk_hw *hwclk, unsigned long rate,
@@ -255,7 +259,7 @@ static int clk_periclk_set_rate(struct clk_hw *hwclk, unsigned long rate,
static const struct clk_ops periclk_ops = {
.recalc_rate = clk_periclk_recalc_rate,
- .round_rate = clk_periclk_round_rate,
+ .determine_rate = clk_periclk_determine_rate,
.set_rate = clk_periclk_set_rate,
};
diff --git a/drivers/clk/clk-hsdk-pll.c b/drivers/clk/clk-hsdk-pll.c
index 5d2a90addf1a..7d56a47c2aa7 100644
--- a/drivers/clk/clk-hsdk-pll.c
+++ b/drivers/clk/clk-hsdk-pll.c
@@ -197,8 +197,8 @@ static unsigned long hsdk_pll_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long hsdk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int hsdk_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
int i;
unsigned long best_rate;
@@ -211,13 +211,15 @@ static long hsdk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
best_rate = pll_cfg[0].rate;
for (i = 1; pll_cfg[i].rate != 0; i++) {
- if (abs(rate - pll_cfg[i].rate) < abs(rate - best_rate))
+ if (abs(req->rate - pll_cfg[i].rate) < abs(req->rate - best_rate))
best_rate = pll_cfg[i].rate;
}
dev_dbg(clk->dev, "chosen best rate: %lu\n", best_rate);
- return best_rate;
+ req->rate = best_rate;
+
+ return 0;
}
static int hsdk_pll_comm_update_rate(struct hsdk_pll_clk *clk,
@@ -265,7 +267,7 @@ static int hsdk_pll_core_update_rate(struct hsdk_pll_clk *clk,
return -EINVAL;
/*
- * Program divider to div-by-1 if we succesfuly set core clock below
+ * Program divider to div-by-1 if we successfully set core clock below
* 500MHz threshold.
*/
if (rate <= CORE_IF_CLK_THRESHOLD_HZ)
@@ -296,7 +298,7 @@ static int hsdk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops hsdk_pll_ops = {
.recalc_rate = hsdk_pll_recalc_rate,
- .round_rate = hsdk_pll_round_rate,
+ .determine_rate = hsdk_pll_determine_rate,
.set_rate = hsdk_pll_set_rate,
};
diff --git a/drivers/clk/clk-lan966x.c b/drivers/clk/clk-lan966x.c
index 870fd7df50c1..16e0405fe28b 100644
--- a/drivers/clk/clk-lan966x.c
+++ b/drivers/clk/clk-lan966x.c
@@ -24,13 +24,20 @@
#define DIV_MAX 255
-static const char *clk_names[N_CLOCKS] = {
+static const char * const lan966x_clk_names[] = {
"qspi0", "qspi1", "qspi2", "sdmmc0",
"pi", "mcan0", "mcan1", "flexcom0",
"flexcom1", "flexcom2", "flexcom3",
"flexcom4", "timer1", "usb_refclk",
};
+static const char * const lan969x_clk_names[] = {
+ "qspi0", "qspi2", "sdmmc0", "sdmmc1",
+ "mcan0", "mcan1", "flexcom0",
+ "flexcom1", "flexcom2", "flexcom3",
+ "timer1", "usb_refclk",
+};
+
struct lan966x_gck {
struct clk_hw hw;
void __iomem *reg;
@@ -53,7 +60,7 @@ struct clk_gate_soc_desc {
int bit_idx;
};
-static const struct clk_gate_soc_desc clk_gate_desc[] = {
+static const struct clk_gate_soc_desc lan966x_clk_gate_desc[] = {
{ "uhphs", 11 },
{ "udphs", 10 },
{ "mcramc", 9 },
@@ -61,6 +68,37 @@ static const struct clk_gate_soc_desc clk_gate_desc[] = {
{ }
};
+static const struct clk_gate_soc_desc lan969x_clk_gate_desc[] = {
+ { "usb_drd", 10 },
+ { "mcramc", 9 },
+ { "hmatrix", 8 },
+ { }
+};
+
+struct lan966x_match_data {
+ char *name;
+ const char * const *clk_name;
+ const struct clk_gate_soc_desc *clk_gate_desc;
+ u8 num_generic_clks;
+ u8 num_total_clks;
+};
+
+static struct lan966x_match_data lan966x_desc = {
+ .name = "lan966x",
+ .clk_name = lan966x_clk_names,
+ .clk_gate_desc = lan966x_clk_gate_desc,
+ .num_total_clks = 18,
+ .num_generic_clks = 14,
+};
+
+static struct lan966x_match_data lan969x_desc = {
+ .name = "lan969x",
+ .clk_name = lan969x_clk_names,
+ .clk_gate_desc = lan969x_clk_gate_desc,
+ .num_total_clks = 15,
+ .num_generic_clks = 12,
+};
+
static DEFINE_SPINLOCK(clk_gate_lock);
static void __iomem *base;
@@ -186,24 +224,26 @@ static struct clk_hw *lan966x_gck_clk_register(struct device *dev, int i)
};
static int lan966x_gate_clk_register(struct device *dev,
+ const struct lan966x_match_data *data,
struct clk_hw_onecell_data *hw_data,
void __iomem *gate_base)
{
- int i;
+ for (int i = data->num_generic_clks; i < data->num_total_clks; ++i) {
+ int idx = i - data->num_generic_clks;
+ const struct clk_gate_soc_desc *desc;
- for (i = GCK_GATE_UHPHS; i < N_CLOCKS; ++i) {
- int idx = i - GCK_GATE_UHPHS;
+ desc = &data->clk_gate_desc[idx];
hw_data->hws[i] =
- devm_clk_hw_register_gate(dev, clk_gate_desc[idx].name,
- "lan966x", 0, gate_base,
- clk_gate_desc[idx].bit_idx,
+ devm_clk_hw_register_gate(dev, desc->name,
+ data->name, 0, gate_base,
+ desc->bit_idx,
0, &clk_gate_lock);
if (IS_ERR(hw_data->hws[i]))
return dev_err_probe(dev, PTR_ERR(hw_data->hws[i]),
"failed to register %s clock\n",
- clk_gate_desc[idx].name);
+ desc->name);
}
return 0;
@@ -211,13 +251,18 @@ static int lan966x_gate_clk_register(struct device *dev,
static int lan966x_clk_probe(struct platform_device *pdev)
{
+ const struct lan966x_match_data *data;
struct clk_hw_onecell_data *hw_data;
struct device *dev = &pdev->dev;
void __iomem *gate_base;
struct resource *res;
int i, ret;
- hw_data = devm_kzalloc(dev, struct_size(hw_data, hws, N_CLOCKS),
+ data = device_get_match_data(dev);
+ if (!data)
+ return -EINVAL;
+
+ hw_data = devm_kzalloc(dev, struct_size(hw_data, hws, data->num_total_clks),
GFP_KERNEL);
if (!hw_data)
return -ENOMEM;
@@ -228,10 +273,10 @@ static int lan966x_clk_probe(struct platform_device *pdev)
init.ops = &lan966x_gck_ops;
- hw_data->num = GCK_GATE_UHPHS;
+ hw_data->num = data->num_generic_clks;
- for (i = 0; i < GCK_GATE_UHPHS; i++) {
- init.name = clk_names[i];
+ for (i = 0; i < data->num_generic_clks; i++) {
+ init.name = data->clk_name[i];
hw_data->hws[i] = lan966x_gck_clk_register(dev, i);
if (IS_ERR(hw_data->hws[i])) {
dev_err(dev, "failed to register %s clock\n",
@@ -246,9 +291,9 @@ static int lan966x_clk_probe(struct platform_device *pdev)
if (IS_ERR(gate_base))
return PTR_ERR(gate_base);
- hw_data->num = N_CLOCKS;
+ hw_data->num = data->num_total_clks;
- ret = lan966x_gate_clk_register(dev, hw_data, gate_base);
+ ret = lan966x_gate_clk_register(dev, data, hw_data, gate_base);
if (ret)
return ret;
}
@@ -257,7 +302,8 @@ static int lan966x_clk_probe(struct platform_device *pdev)
}
static const struct of_device_id lan966x_clk_dt_ids[] = {
- { .compatible = "microchip,lan966x-gck", },
+ { .compatible = "microchip,lan966x-gck", .data = &lan966x_desc },
+ { .compatible = "microchip,lan9691-gck", .data = &lan969x_desc },
{ }
};
MODULE_DEVICE_TABLE(of, lan966x_clk_dt_ids);
diff --git a/drivers/clk/clk-lmk04832.c b/drivers/clk/clk-lmk04832.c
index c997e7491996..b2107b31efa2 100644
--- a/drivers/clk/clk-lmk04832.c
+++ b/drivers/clk/clk-lmk04832.c
@@ -375,7 +375,7 @@ static unsigned long lmk04832_vco_recalc_rate(struct clk_hw *hw,
unsigned long prate)
{
struct lmk04832 *lmk = container_of(hw, struct lmk04832, vco);
- const unsigned int pll2_p[] = {8, 2, 2, 3, 4, 5, 6, 7};
+ static const unsigned int pll2_p[] = {8, 2, 2, 3, 4, 5, 6, 7};
unsigned int pll2_n, p, pll2_r;
unsigned int pll2_misc;
unsigned long vco_rate;
@@ -491,28 +491,33 @@ static long lmk04832_calc_pll2_params(unsigned long prate, unsigned long rate,
return DIV_ROUND_CLOSEST(prate * 2 * pll2_p * pll2_n, pll2_r);
}
-static long lmk04832_vco_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int lmk04832_vco_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct lmk04832 *lmk = container_of(hw, struct lmk04832, vco);
unsigned int n, p, r;
long vco_rate;
int ret;
- ret = lmk04832_check_vco_ranges(lmk, rate);
+ ret = lmk04832_check_vco_ranges(lmk, req->rate);
if (ret < 0)
return ret;
- vco_rate = lmk04832_calc_pll2_params(*prate, rate, &n, &p, &r);
+ vco_rate = lmk04832_calc_pll2_params(req->best_parent_rate, req->rate,
+ &n, &p, &r);
if (vco_rate < 0) {
dev_err(lmk->dev, "PLL2 parameters out of range\n");
- return vco_rate;
+ req->rate = vco_rate;
+
+ return 0;
}
- if (rate != vco_rate)
+ if (req->rate != vco_rate)
return -EINVAL;
- return vco_rate;
+ req->rate = vco_rate;
+
+ return 0;
}
static int lmk04832_vco_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -579,7 +584,7 @@ static const struct clk_ops lmk04832_vco_ops = {
.prepare = lmk04832_vco_prepare,
.unprepare = lmk04832_vco_unprepare,
.recalc_rate = lmk04832_vco_recalc_rate,
- .round_rate = lmk04832_vco_round_rate,
+ .determine_rate = lmk04832_vco_determine_rate,
.set_rate = lmk04832_vco_set_rate,
};
@@ -637,7 +642,7 @@ static int lmk04832_register_vco(struct lmk04832 *lmk)
static int lmk04832_clkout_set_ddly(struct lmk04832 *lmk, int id)
{
- const int dclk_div_adj[] = {0, 0, -2, -2, 0, 3, -1, 0};
+ static const int dclk_div_adj[] = {0, 0, -2, -2, 0, 3, -1, 0};
unsigned int sclkx_y_ddly = 10;
unsigned int dclkx_y_ddly;
unsigned int dclkx_y_div;
@@ -888,25 +893,27 @@ static unsigned long lmk04832_sclk_recalc_rate(struct clk_hw *hw,
return DIV_ROUND_CLOSEST(prate, sysref_div);
}
-static long lmk04832_sclk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int lmk04832_sclk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct lmk04832 *lmk = container_of(hw, struct lmk04832, sclk);
unsigned long sclk_rate;
unsigned int sysref_div;
- sysref_div = DIV_ROUND_CLOSEST(*prate, rate);
- sclk_rate = DIV_ROUND_CLOSEST(*prate, sysref_div);
+ sysref_div = DIV_ROUND_CLOSEST(req->best_parent_rate, req->rate);
+ sclk_rate = DIV_ROUND_CLOSEST(req->best_parent_rate, sysref_div);
if (sysref_div < 0x07 || sysref_div > 0x1fff) {
dev_err(lmk->dev, "SYSREF divider out of range\n");
return -EINVAL;
}
- if (rate != sclk_rate)
+ if (req->rate != sclk_rate)
return -EINVAL;
- return sclk_rate;
+ req->rate = sclk_rate;
+
+ return 0;
}
static int lmk04832_sclk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -945,7 +952,7 @@ static const struct clk_ops lmk04832_sclk_ops = {
.prepare = lmk04832_sclk_prepare,
.unprepare = lmk04832_sclk_unprepare,
.recalc_rate = lmk04832_sclk_recalc_rate,
- .round_rate = lmk04832_sclk_round_rate,
+ .determine_rate = lmk04832_sclk_determine_rate,
.set_rate = lmk04832_sclk_set_rate,
};
@@ -1069,26 +1076,28 @@ static unsigned long lmk04832_dclk_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long lmk04832_dclk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int lmk04832_dclk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct lmk_dclk *dclk = container_of(hw, struct lmk_dclk, hw);
struct lmk04832 *lmk = dclk->lmk;
unsigned long dclk_rate;
unsigned int dclk_div;
- dclk_div = DIV_ROUND_CLOSEST(*prate, rate);
- dclk_rate = DIV_ROUND_CLOSEST(*prate, dclk_div);
+ dclk_div = DIV_ROUND_CLOSEST(req->best_parent_rate, req->rate);
+ dclk_rate = DIV_ROUND_CLOSEST(req->best_parent_rate, dclk_div);
if (dclk_div < 1 || dclk_div > 0x3ff) {
dev_err(lmk->dev, "%s_div out of range\n", clk_hw_get_name(hw));
return -EINVAL;
}
- if (rate != dclk_rate)
+ if (req->rate != dclk_rate)
return -EINVAL;
- return dclk_rate;
+ req->rate = dclk_rate;
+
+ return 0;
}
static int lmk04832_dclk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -1158,7 +1167,7 @@ static const struct clk_ops lmk04832_dclk_ops = {
.prepare = lmk04832_dclk_prepare,
.unprepare = lmk04832_dclk_unprepare,
.recalc_rate = lmk04832_dclk_recalc_rate,
- .round_rate = lmk04832_dclk_round_rate,
+ .determine_rate = lmk04832_dclk_determine_rate,
.set_rate = lmk04832_dclk_set_rate,
};
diff --git a/drivers/clk/clk-loongson1.c b/drivers/clk/clk-loongson1.c
index a3467aa6790f..f9f060d08a5f 100644
--- a/drivers/clk/clk-loongson1.c
+++ b/drivers/clk/clk-loongson1.c
@@ -93,14 +93,16 @@ static unsigned long ls1x_divider_recalc_rate(struct clk_hw *hw,
d->flags, d->width);
}
-static long ls1x_divider_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int ls1x_divider_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct ls1x_clk *ls1x_clk = to_ls1x_clk(hw);
const struct ls1x_clk_div_data *d = ls1x_clk->data;
- return divider_round_rate(hw, rate, prate, d->table,
- d->width, d->flags);
+ req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate,
+ d->table, d->width, d->flags);
+
+ return 0;
}
static int ls1x_divider_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -146,7 +148,7 @@ static int ls1x_divider_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops ls1x_clk_divider_ops = {
.recalc_rate = ls1x_divider_recalc_rate,
- .round_rate = ls1x_divider_round_rate,
+ .determine_rate = ls1x_divider_determine_rate,
.set_rate = ls1x_divider_set_rate,
};
diff --git a/drivers/clk/clk-loongson2.c b/drivers/clk/clk-loongson2.c
index 820bb1e9e3b7..9c4c6c99db3e 100644
--- a/drivers/clk/clk-loongson2.c
+++ b/drivers/clk/clk-loongson2.c
@@ -13,10 +13,6 @@
#include <linux/io-64-nonatomic-lo-hi.h>
#include <dt-bindings/clock/loongson,ls2k-clk.h>
-static const struct clk_parent_data pdata[] = {
- { .fw_name = "ref_100m", },
-};
-
enum loongson2_clk_type {
CLK_TYPE_PLL,
CLK_TYPE_SCALE,
@@ -29,8 +25,10 @@ enum loongson2_clk_type {
struct loongson2_clk_provider {
void __iomem *base;
struct device *dev;
- struct clk_hw_onecell_data clk_data;
spinlock_t clk_lock; /* protect access to DIV registers */
+
+ /* Must be last --ends in a flexible-array member. */
+ struct clk_hw_onecell_data clk_data;
};
struct loongson2_clk_data {
@@ -40,6 +38,7 @@ struct loongson2_clk_data {
u8 div_width;
u8 mult_shift;
u8 mult_width;
+ u8 bit_idx;
};
struct loongson2_clk_board_info {
@@ -48,6 +47,7 @@ struct loongson2_clk_board_info {
const char *name;
const char *parent_name;
unsigned long fixed_rate;
+ unsigned long flags;
u8 reg_offset;
u8 div_shift;
u8 div_width;
@@ -93,6 +93,19 @@ struct loongson2_clk_board_info {
.div_width = _dwidth, \
}
+#define CLK_SCALE_MODE(_id, _name, _pname, _offset, \
+ _dshift, _dwidth, _midx) \
+ { \
+ .id = _id, \
+ .type = CLK_TYPE_SCALE, \
+ .name = _name, \
+ .parent_name = _pname, \
+ .reg_offset = _offset, \
+ .div_shift = _dshift, \
+ .div_width = _dwidth, \
+ .bit_idx = _midx + 1, \
+ }
+
#define CLK_GATE(_id, _name, _pname, _offset, _bidx) \
{ \
.id = _id, \
@@ -103,6 +116,18 @@ struct loongson2_clk_board_info {
.bit_idx = _bidx, \
}
+#define CLK_GATE_FLAGS(_id, _name, _pname, _offset, _bidx, \
+ _flags) \
+ { \
+ .id = _id, \
+ .type = CLK_TYPE_GATE, \
+ .name = _name, \
+ .parent_name = _pname, \
+ .reg_offset = _offset, \
+ .bit_idx = _bidx, \
+ .flags = _flags \
+ }
+
#define CLK_FIXED(_id, _name, _pname, _rate) \
{ \
.id = _id, \
@@ -112,6 +137,51 @@ struct loongson2_clk_board_info {
.fixed_rate = _rate, \
}
+static const struct loongson2_clk_board_info ls2k0300_clks[] = {
+ /* Reference Clock */
+ CLK_PLL(LS2K0300_NODE_PLL, "pll_node", 0x00, 15, 9, 8, 7),
+ CLK_PLL(LS2K0300_DDR_PLL, "pll_ddr", 0x08, 15, 9, 8, 7),
+ CLK_PLL(LS2K0300_PIX_PLL, "pll_pix", 0x10, 15, 9, 8, 7),
+ CLK_FIXED(LS2K0300_CLK_STABLE, "clk_stable", NULL, 100000000),
+ CLK_FIXED(LS2K0300_CLK_THSENS, "clk_thsens", NULL, 10000000),
+ /* Node PLL */
+ CLK_DIV(LS2K0300_CLK_NODE_DIV, "clk_node_div", "pll_node", 0x00, 24, 7),
+ CLK_DIV(LS2K0300_CLK_GMAC_DIV, "clk_gmac_div", "pll_node", 0x04, 0, 7),
+ CLK_DIV(LS2K0300_CLK_I2S_DIV, "clk_i2s_div", "pll_node", 0x04, 8, 7),
+ CLK_GATE(LS2K0300_CLK_NODE_PLL_GATE, "clk_node_pll_gate", "clk_node_div", 0x00, 0),
+ CLK_GATE(LS2K0300_CLK_GMAC_GATE, "clk_gmac_gate", "clk_gmac_div", 0x00, 1),
+ CLK_GATE(LS2K0300_CLK_I2S_GATE, "clk_i2s_gate", "clk_i2s_div", 0x00, 2),
+ CLK_GATE_FLAGS(LS2K0300_CLK_NODE_GATE, "clk_node_gate", "clk_node_scale", 0x24, 0,
+ CLK_IS_CRITICAL),
+ CLK_SCALE_MODE(LS2K0300_CLK_NODE_SCALE, "clk_node_scale", "clk_node_pll_gate", 0x20, 0, 3,
+ 3),
+ /* DDR PLL */
+ CLK_DIV(LS2K0300_CLK_DDR_DIV, "clk_ddr_div", "pll_ddr", 0x08, 24, 7),
+ CLK_DIV(LS2K0300_CLK_NET_DIV, "clk_net_div", "pll_ddr", 0x0c, 0, 7),
+ CLK_DIV(LS2K0300_CLK_DEV_DIV, "clk_dev_div", "pll_ddr", 0x0c, 8, 7),
+ CLK_GATE(LS2K0300_CLK_NET_GATE, "clk_net_gate", "clk_net_div", 0x08, 1),
+ CLK_GATE(LS2K0300_CLK_DEV_GATE, "clk_dev_gate", "clk_dev_div", 0x08, 2),
+ CLK_GATE_FLAGS(LS2K0300_CLK_DDR_GATE, "clk_ddr_gate", "clk_ddr_div", 0x08, 0,
+ CLK_IS_CRITICAL),
+ /* PIX PLL */
+ CLK_DIV(LS2K0300_CLK_PIX_DIV, "clk_pix_div", "pll_pix", 0x10, 24, 7),
+ CLK_DIV(LS2K0300_CLK_GMACBP_DIV, "clk_gmacbp_div", "pll_pix", 0x14, 0, 7),
+ CLK_GATE(LS2K0300_CLK_PIX_PLL_GATE, "clk_pix_pll_gate", "clk_pix_div", 0x10, 0),
+ CLK_GATE(LS2K0300_CLK_PIX_GATE, "clk_pix_gate", "clk_pix_scale", 0x24, 6),
+ CLK_GATE(LS2K0300_CLK_GMACBP_GATE, "clk_gmacbp_gate", "clk_gmacbp_div", 0x10, 1),
+ CLK_SCALE_MODE(LS2K0300_CLK_PIX_SCALE, "clk_pix_scale", "clk_pix_pll_gate", 0x20, 4, 3, 7),
+ /* clk_dev_gate */
+ CLK_DIV(LS2K0300_CLK_SDIO_SCALE, "clk_sdio_scale", "clk_dev_gate", 0x20, 24, 4),
+ CLK_GATE(LS2K0300_CLK_USB_GATE, "clk_usb_gate", "clk_usb_scale", 0x24, 2),
+ CLK_GATE(LS2K0300_CLK_SDIO_GATE, "clk_sdio_gate", "clk_sdio_scale", 0x24, 4),
+ CLK_GATE(LS2K0300_CLK_APB_GATE, "clk_apb_gate", "clk_apb_scale", 0x24, 3),
+ CLK_GATE_FLAGS(LS2K0300_CLK_BOOT_GATE, "clk_boot_gate", "clk_boot_scale", 0x24, 1,
+ CLK_IS_CRITICAL),
+ CLK_SCALE_MODE(LS2K0300_CLK_USB_SCALE, "clk_usb_scale", "clk_dev_gate", 0x20, 12, 3, 15),
+ CLK_SCALE_MODE(LS2K0300_CLK_APB_SCALE, "clk_apb_scale", "clk_dev_gate", 0x20, 16, 3, 19),
+ CLK_SCALE_MODE(LS2K0300_CLK_BOOT_SCALE, "clk_boot_scale", "clk_dev_gate", 0x20, 8, 3, 11),
+};
+
static const struct loongson2_clk_board_info ls2k0500_clks[] = {
CLK_PLL(LOONGSON2_NODE_PLL, "pll_node", 0, 16, 8, 8, 6),
CLK_PLL(LOONGSON2_DDR_PLL, "pll_ddr", 0x8, 16, 8, 8, 6),
@@ -228,20 +298,26 @@ static const struct clk_ops loongson2_pll_recalc_ops = {
static unsigned long loongson2_freqscale_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
- u64 val, mult;
+ u64 val, scale;
+ u32 mode = 0;
struct loongson2_clk_data *clk = to_loongson2_clk(hw);
val = readq(clk->reg);
- mult = loongson2_rate_part(val, clk->div_shift, clk->div_width) + 1;
+ scale = loongson2_rate_part(val, clk->div_shift, clk->div_width) + 1;
+
+ if (clk->bit_idx)
+ mode = val & BIT(clk->bit_idx - 1);
- return div_u64((u64)parent_rate * mult, 8);
+ return mode == 0 ? div_u64((u64)parent_rate * scale, 8) :
+ div_u64((u64)parent_rate, scale);
}
static const struct clk_ops loongson2_freqscale_recalc_ops = {
.recalc_rate = loongson2_freqscale_recalc_rate,
};
-static struct clk_hw *loongson2_clk_register(struct loongson2_clk_provider *clp,
+static struct clk_hw *loongson2_clk_register(const char *parent,
+ struct loongson2_clk_provider *clp,
const struct loongson2_clk_board_info *cld,
const struct clk_ops *ops)
{
@@ -258,17 +334,14 @@ static struct clk_hw *loongson2_clk_register(struct loongson2_clk_provider *clp,
init.ops = ops;
init.flags = 0;
init.num_parents = 1;
-
- if (!cld->parent_name)
- init.parent_data = pdata;
- else
- init.parent_names = &cld->parent_name;
+ init.parent_names = &parent;
clk->reg = clp->base + cld->reg_offset;
clk->div_shift = cld->div_shift;
clk->div_width = cld->div_width;
clk->mult_shift = cld->mult_shift;
clk->mult_width = cld->mult_width;
+ clk->bit_idx = cld->bit_idx;
clk->hw.init = &init;
hw = &clk->hw;
@@ -286,13 +359,19 @@ static int loongson2_clk_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct loongson2_clk_provider *clp;
const struct loongson2_clk_board_info *p, *data;
+ const char *refclk_name, *parent_name;
data = device_get_match_data(dev);
if (!data)
return -EINVAL;
+ refclk_name = of_clk_get_parent_name(dev->of_node, 0);
+ if (IS_ERR(refclk_name))
+ return dev_err_probe(dev, PTR_ERR(refclk_name),
+ "failed to get refclk name\n");
+
for (p = data; p->name; p++)
- clks_num++;
+ clks_num = max(clks_num, p->id + 1);
clp = devm_kzalloc(dev, struct_size(clp, clk_data.hws, clks_num),
GFP_KERNEL);
@@ -304,37 +383,44 @@ static int loongson2_clk_probe(struct platform_device *pdev)
return PTR_ERR(clp->base);
spin_lock_init(&clp->clk_lock);
- clp->clk_data.num = clks_num + 1;
+ clp->clk_data.num = clks_num;
clp->dev = dev;
+ /* Avoid returning NULL for unused id */
+ memset_p((void **)clp->clk_data.hws, ERR_PTR(-ENOENT), clks_num);
+
for (i = 0; i < clks_num; i++) {
p = &data[i];
+ parent_name = p->parent_name ? p->parent_name : refclk_name;
+
switch (p->type) {
case CLK_TYPE_PLL:
- hw = loongson2_clk_register(clp, p,
+ hw = loongson2_clk_register(parent_name, clp, p,
&loongson2_pll_recalc_ops);
break;
case CLK_TYPE_SCALE:
- hw = loongson2_clk_register(clp, p,
+ hw = loongson2_clk_register(parent_name, clp, p,
&loongson2_freqscale_recalc_ops);
break;
case CLK_TYPE_DIVIDER:
hw = devm_clk_hw_register_divider(dev, p->name,
- p->parent_name, 0,
+ parent_name, 0,
clp->base + p->reg_offset,
p->div_shift, p->div_width,
- CLK_DIVIDER_ONE_BASED,
+ CLK_DIVIDER_ONE_BASED |
+ CLK_DIVIDER_ALLOW_ZERO,
&clp->clk_lock);
break;
case CLK_TYPE_GATE:
- hw = devm_clk_hw_register_gate(dev, p->name, p->parent_name, 0,
+ hw = devm_clk_hw_register_gate(dev, p->name, parent_name,
+ p->flags,
clp->base + p->reg_offset,
p->bit_idx, 0,
&clp->clk_lock);
break;
case CLK_TYPE_FIXED:
- hw = clk_hw_register_fixed_rate_parent_data(dev, p->name, pdata,
- 0, p->fixed_rate);
+ hw = devm_clk_hw_register_fixed_rate(dev, p->name, parent_name,
+ 0, p->fixed_rate);
break;
default:
return dev_err_probe(dev, -EINVAL, "Invalid clk type\n");
@@ -352,6 +438,7 @@ static int loongson2_clk_probe(struct platform_device *pdev)
}
static const struct of_device_id loongson2_clk_match_table[] = {
+ { .compatible = "loongson,ls2k0300-clk", .data = &ls2k0300_clks },
{ .compatible = "loongson,ls2k0500-clk", .data = &ls2k0500_clks },
{ .compatible = "loongson,ls2k-clk", .data = &ls2k1000_clks },
{ .compatible = "loongson,ls2k2000-clk", .data = &ls2k2000_clks },
diff --git a/drivers/clk/clk-max9485.c b/drivers/clk/clk-max9485.c
index be9020b6c789..0515e3e41162 100644
--- a/drivers/clk/clk-max9485.c
+++ b/drivers/clk/clk-max9485.c
@@ -159,29 +159,32 @@ static unsigned long max9485_clkout_recalc_rate(struct clk_hw *hw,
return 0;
}
-static long max9485_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int max9485_clkout_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
const struct max9485_rate *curr, *prev = NULL;
for (curr = max9485_rates; curr->out != 0; curr++) {
/* Exact matches */
- if (curr->out == rate)
- return rate;
+ if (curr->out == req->rate)
+ return 0;
/*
* Find the first entry that has a frequency higher than the
* requested one.
*/
- if (curr->out > rate) {
+ if (curr->out > req->rate) {
unsigned int mid;
/*
* If this is the first entry, clamp the value to the
* lowest possible frequency.
*/
- if (!prev)
- return curr->out;
+ if (!prev) {
+ req->rate = curr->out;
+
+ return 0;
+ }
/*
* Otherwise, determine whether the previous entry or
@@ -189,14 +192,18 @@ static long max9485_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
*/
mid = prev->out + ((curr->out - prev->out) / 2);
- return (mid > rate) ? prev->out : curr->out;
+ req->rate = mid > req->rate ? prev->out : curr->out;
+
+ return 0;
}
prev = curr;
}
/* If the last entry was still too high, clamp the value */
- return prev->out;
+ req->rate = prev->out;
+
+ return 0;
}
struct max9485_clk {
@@ -221,7 +228,7 @@ static const struct max9485_clk max9485_clks[MAX9485_NUM_CLKS] = {
.parent_index = -1,
.ops = {
.set_rate = max9485_clkout_set_rate,
- .round_rate = max9485_clkout_round_rate,
+ .determine_rate = max9485_clkout_determine_rate,
.recalc_rate = max9485_clkout_recalc_rate,
},
},
diff --git a/drivers/clk/clk-milbeaut.c b/drivers/clk/clk-milbeaut.c
index 18c20aff45f7..b4f9b7143eaa 100644
--- a/drivers/clk/clk-milbeaut.c
+++ b/drivers/clk/clk-milbeaut.c
@@ -386,8 +386,8 @@ static unsigned long m10v_clk_divider_recalc_rate(struct clk_hw *hw,
divider->flags, divider->width);
}
-static long m10v_clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int m10v_clk_divider_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct m10v_clk_divider *divider = to_m10v_div(hw);
@@ -398,13 +398,19 @@ static long m10v_clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
val = readl(divider->reg) >> divider->shift;
val &= clk_div_mask(divider->width);
- return divider_ro_round_rate(hw, rate, prate, divider->table,
- divider->width, divider->flags,
- val);
+ req->rate = divider_ro_round_rate(hw, req->rate,
+ &req->best_parent_rate,
+ divider->table,
+ divider->width,
+ divider->flags, val);
+
+ return 0;
}
- return divider_round_rate(hw, rate, prate, divider->table,
- divider->width, divider->flags);
+ req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate,
+ divider->table, divider->width, divider->flags);
+
+ return 0;
}
static int m10v_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -450,7 +456,7 @@ static int m10v_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops m10v_clk_divider_ops = {
.recalc_rate = m10v_clk_divider_recalc_rate,
- .round_rate = m10v_clk_divider_round_rate,
+ .determine_rate = m10v_clk_divider_determine_rate,
.set_rate = m10v_clk_divider_set_rate,
};
diff --git a/drivers/clk/clk-multiplier.c b/drivers/clk/clk-multiplier.c
index e507aa958da9..6f2955d408b6 100644
--- a/drivers/clk/clk-multiplier.c
+++ b/drivers/clk/clk-multiplier.c
@@ -112,14 +112,16 @@ static unsigned long __bestmult(struct clk_hw *hw, unsigned long rate,
return bestmult;
}
-static long clk_multiplier_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_multiplier_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_multiplier *mult = to_clk_multiplier(hw);
- unsigned long factor = __bestmult(hw, rate, parent_rate,
+ unsigned long factor = __bestmult(hw, req->rate, &req->best_parent_rate,
mult->width, mult->flags);
- return *parent_rate * factor;
+ req->rate = req->best_parent_rate * factor;
+
+ return 0;
}
static int clk_multiplier_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -150,7 +152,7 @@ static int clk_multiplier_set_rate(struct clk_hw *hw, unsigned long rate,
const struct clk_ops clk_multiplier_ops = {
.recalc_rate = clk_multiplier_recalc_rate,
- .round_rate = clk_multiplier_round_rate,
+ .determine_rate = clk_multiplier_determine_rate,
.set_rate = clk_multiplier_set_rate,
};
EXPORT_SYMBOL_GPL(clk_multiplier_ops);
diff --git a/drivers/clk/clk-nomadik.c b/drivers/clk/clk-nomadik.c
index 06245681dac7..fc0aeb4247f2 100644
--- a/drivers/clk/clk-nomadik.c
+++ b/drivers/clk/clk-nomadik.c
@@ -17,6 +17,7 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
+#include <linux/string_choices.h>
#include <linux/reboot.h>
/*
@@ -116,9 +117,9 @@ static void __init nomadik_src_init(void)
val = readl(src_base + SRC_XTALCR);
pr_info("SXTALO is %s\n",
- (val & SRC_XTALCR_SXTALDIS) ? "disabled" : "enabled");
+ str_disabled_enabled(val & SRC_XTALCR_SXTALDIS));
pr_info("MXTAL is %s\n",
- (val & SRC_XTALCR_MXTALSTAT) ? "enabled" : "disabled");
+ str_enabled_disabled(val & SRC_XTALCR_MXTALSTAT));
if (of_property_read_bool(np, "disable-sxtalo")) {
/* The machine uses an external oscillator circuit */
val |= SRC_XTALCR_SXTALDIS;
diff --git a/drivers/clk/clk-npcm8xx.c b/drivers/clk/clk-npcm8xx.c
new file mode 100644
index 000000000000..2138c011411d
--- /dev/null
+++ b/drivers/clk/clk-npcm8xx.c
@@ -0,0 +1,430 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Nuvoton NPCM8xx Clock Generator
+ * All the clocks are initialized by the bootloader, so this driver allows only
+ * reading of current settings directly from the hardware.
+ *
+ * Copyright (C) 2020 Nuvoton Technologies
+ * Author: Tomer Maimon <tomer.maimon@nuvoton.com>
+ */
+
+#define pr_fmt(fmt) "npcm8xx_clk: " fmt
+
+#include <linux/auxiliary_bus.h>
+#include <linux/bitfield.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/clock/nuvoton,npcm845-clk.h>
+#include <soc/nuvoton/clock-npcm8xx.h>
+
+/* npcm8xx clock registers*/
+#define NPCM8XX_CLKSEL 0x04
+#define NPCM8XX_CLKDIV1 0x08
+#define NPCM8XX_CLKDIV2 0x2C
+#define NPCM8XX_CLKDIV3 0x58
+#define NPCM8XX_CLKDIV4 0x7C
+#define NPCM8XX_PLLCON0 0x0C
+#define NPCM8XX_PLLCON1 0x10
+#define NPCM8XX_PLLCON2 0x54
+#define NPCM8XX_PLLCONG 0x60
+#define NPCM8XX_THRTL_CNT 0xC0
+
+#define PLLCON_LOKI BIT(31)
+#define PLLCON_LOKS BIT(30)
+#define PLLCON_FBDV GENMASK(27, 16)
+#define PLLCON_OTDV2 GENMASK(15, 13)
+#define PLLCON_PWDEN BIT(12)
+#define PLLCON_OTDV1 GENMASK(10, 8)
+#define PLLCON_INDV GENMASK(5, 0)
+
+static void __iomem *clk_base;
+
+struct npcm8xx_clk_pll {
+ void __iomem *pllcon;
+ unsigned int id;
+ const char *name;
+ unsigned long flags;
+ struct clk_hw hw;
+};
+
+#define to_npcm8xx_clk_pll(_hw) container_of(_hw, struct npcm8xx_clk_pll, hw)
+
+struct npcm8xx_clk_pll_data {
+ const char *name;
+ struct clk_parent_data parent;
+ unsigned int reg;
+ unsigned long flags;
+ struct clk_hw hw;
+};
+
+struct npcm8xx_clk_div_data {
+ u32 reg;
+ u8 shift;
+ u8 width;
+ const char *name;
+ const struct clk_hw *parent_hw;
+ unsigned long clk_divider_flags;
+ unsigned long flags;
+ int onecell_idx;
+ struct clk_hw hw;
+};
+
+struct npcm8xx_clk_mux_data {
+ u8 shift;
+ u32 mask;
+ const u32 *table;
+ const char *name;
+ const struct clk_parent_data *parent_data;
+ u8 num_parents;
+ unsigned long flags;
+ struct clk_hw hw;
+};
+
+static struct clk_hw hw_pll1_div2, hw_pll2_div2, hw_gfx_div2, hw_pre_clk;
+static struct npcm8xx_clk_pll_data npcm8xx_pll_clks[] = {
+ { "pll0", { .index = 0 }, NPCM8XX_PLLCON0, 0 },
+ { "pll1", { .index = 0 }, NPCM8XX_PLLCON1, 0 },
+ { "pll2", { .index = 0 }, NPCM8XX_PLLCON2, 0 },
+ { "pll_gfx", { .index = 0 }, NPCM8XX_PLLCONG, 0 },
+};
+
+static const u32 cpuck_mux_table[] = { 0, 1, 2, 7 };
+static const struct clk_parent_data cpuck_mux_parents[] = {
+ { .hw = &npcm8xx_pll_clks[0].hw },
+ { .hw = &npcm8xx_pll_clks[1].hw },
+ { .index = 0 },
+ { .hw = &npcm8xx_pll_clks[2].hw }
+};
+
+static const u32 pixcksel_mux_table[] = { 0, 2 };
+static const struct clk_parent_data pixcksel_mux_parents[] = {
+ { .hw = &npcm8xx_pll_clks[3].hw },
+ { .index = 0 }
+};
+
+static const u32 default_mux_table[] = { 0, 1, 2, 3 };
+static const struct clk_parent_data default_mux_parents[] = {
+ { .hw = &npcm8xx_pll_clks[0].hw },
+ { .hw = &npcm8xx_pll_clks[1].hw },
+ { .index = 0 },
+ { .hw = &hw_pll2_div2 }
+};
+
+static const u32 sucksel_mux_table[] = { 2, 3 };
+static const struct clk_parent_data sucksel_mux_parents[] = {
+ { .index = 0 },
+ { .hw = &hw_pll2_div2 }
+};
+
+static const u32 mccksel_mux_table[] = { 0, 2 };
+static const struct clk_parent_data mccksel_mux_parents[] = {
+ { .hw = &hw_pll1_div2 },
+ { .index = 0 }
+};
+
+static const u32 clkoutsel_mux_table[] = { 0, 1, 2, 3, 4 };
+static const struct clk_parent_data clkoutsel_mux_parents[] = {
+ { .hw = &npcm8xx_pll_clks[0].hw },
+ { .hw = &npcm8xx_pll_clks[1].hw },
+ { .index = 0 },
+ { .hw = &hw_gfx_div2 },
+ { .hw = &hw_pll2_div2 }
+};
+
+static const u32 gfxmsel_mux_table[] = { 2, 3 };
+static const struct clk_parent_data gfxmsel_mux_parents[] = {
+ { .index = 0 },
+ { .hw = &npcm8xx_pll_clks[2].hw }
+};
+
+static const u32 dvcssel_mux_table[] = { 2, 3 };
+static const struct clk_parent_data dvcssel_mux_parents[] = {
+ { .index = 0 },
+ { .hw = &npcm8xx_pll_clks[2].hw }
+};
+
+static const u32 default3_mux_table[] = { 0, 1, 2 };
+static const struct clk_parent_data default3_mux_parents[] = {
+ { .hw = &npcm8xx_pll_clks[0].hw },
+ { .hw = &npcm8xx_pll_clks[1].hw },
+ { .index = 0 }
+};
+
+static struct npcm8xx_clk_mux_data npcm8xx_muxes[] = {
+ { 0, 3, cpuck_mux_table, "cpu_mux", cpuck_mux_parents,
+ ARRAY_SIZE(cpuck_mux_parents), CLK_IS_CRITICAL },
+ { 4, 2, pixcksel_mux_table, "gfx_pixel_mux", pixcksel_mux_parents,
+ ARRAY_SIZE(pixcksel_mux_parents), 0 },
+ { 6, 2, default_mux_table, "sd_mux", default_mux_parents,
+ ARRAY_SIZE(default_mux_parents), 0 },
+ { 8, 2, default_mux_table, "uart_mux", default_mux_parents,
+ ARRAY_SIZE(default_mux_parents), 0 },
+ { 10, 2, sucksel_mux_table, "serial_usb_mux", sucksel_mux_parents,
+ ARRAY_SIZE(sucksel_mux_parents), 0 },
+ { 12, 2, mccksel_mux_table, "mc_mux", mccksel_mux_parents,
+ ARRAY_SIZE(mccksel_mux_parents), 0 },
+ { 14, 2, default_mux_table, "adc_mux", default_mux_parents,
+ ARRAY_SIZE(default_mux_parents), 0 },
+ { 16, 2, default_mux_table, "gfx_mux", default_mux_parents,
+ ARRAY_SIZE(default_mux_parents), 0 },
+ { 18, 3, clkoutsel_mux_table, "clkout_mux", clkoutsel_mux_parents,
+ ARRAY_SIZE(clkoutsel_mux_parents), 0 },
+ { 21, 2, gfxmsel_mux_table, "gfxm_mux", gfxmsel_mux_parents,
+ ARRAY_SIZE(gfxmsel_mux_parents), 0 },
+ { 23, 2, dvcssel_mux_table, "dvc_mux", dvcssel_mux_parents,
+ ARRAY_SIZE(dvcssel_mux_parents), 0 },
+ { 25, 2, default3_mux_table, "rg_mux", default3_mux_parents,
+ ARRAY_SIZE(default3_mux_parents), 0 },
+ { 27, 2, default3_mux_table, "rcp_mux", default3_mux_parents,
+ ARRAY_SIZE(default3_mux_parents), 0 },
+};
+
+/* configurable pre dividers: */
+static struct npcm8xx_clk_div_data npcm8xx_pre_divs[] = {
+ { NPCM8XX_CLKDIV1, 21, 5, "pre_adc", &npcm8xx_muxes[6].hw, CLK_DIVIDER_READ_ONLY, 0, -1 },
+ { NPCM8XX_CLKDIV1, 26, 2, "ahb", &hw_pre_clk, CLK_DIVIDER_READ_ONLY, CLK_IS_CRITICAL, NPCM8XX_CLK_AHB },
+};
+
+/* configurable dividers: */
+static struct npcm8xx_clk_div_data npcm8xx_divs[] = {
+ { NPCM8XX_CLKDIV1, 28, 3, "adc", &npcm8xx_pre_divs[0].hw, CLK_DIVIDER_READ_ONLY | CLK_DIVIDER_POWER_OF_TWO, 0, NPCM8XX_CLK_ADC },
+ { NPCM8XX_CLKDIV1, 16, 5, "uart", &npcm8xx_muxes[3].hw, 0, 0, NPCM8XX_CLK_UART },
+ { NPCM8XX_CLKDIV1, 11, 5, "mmc", &npcm8xx_muxes[2].hw, CLK_DIVIDER_READ_ONLY, 0, NPCM8XX_CLK_MMC },
+ { NPCM8XX_CLKDIV1, 6, 5, "spi3", &npcm8xx_pre_divs[1].hw, 0, 0, NPCM8XX_CLK_SPI3 },
+ { NPCM8XX_CLKDIV1, 2, 4, "pci", &npcm8xx_muxes[7].hw, CLK_DIVIDER_READ_ONLY, 0, NPCM8XX_CLK_PCI },
+
+ { NPCM8XX_CLKDIV2, 30, 2, "apb4", &npcm8xx_pre_divs[1].hw, CLK_DIVIDER_READ_ONLY | CLK_DIVIDER_POWER_OF_TWO, 0, NPCM8XX_CLK_APB4 },
+ { NPCM8XX_CLKDIV2, 28, 2, "apb3", &npcm8xx_pre_divs[1].hw, CLK_DIVIDER_READ_ONLY | CLK_DIVIDER_POWER_OF_TWO, 0, NPCM8XX_CLK_APB3 },
+ { NPCM8XX_CLKDIV2, 26, 2, "apb2", &npcm8xx_pre_divs[1].hw, CLK_DIVIDER_READ_ONLY | CLK_DIVIDER_POWER_OF_TWO, 0, NPCM8XX_CLK_APB2 },
+ { NPCM8XX_CLKDIV2, 24, 2, "apb1", &npcm8xx_pre_divs[1].hw, CLK_DIVIDER_READ_ONLY | CLK_DIVIDER_POWER_OF_TWO, 0, NPCM8XX_CLK_APB1 },
+ { NPCM8XX_CLKDIV2, 22, 2, "apb5", &npcm8xx_pre_divs[1].hw, CLK_DIVIDER_READ_ONLY | CLK_DIVIDER_POWER_OF_TWO, 0, NPCM8XX_CLK_APB5 },
+ { NPCM8XX_CLKDIV2, 16, 5, "clkout", &npcm8xx_muxes[8].hw, CLK_DIVIDER_READ_ONLY, 0, NPCM8XX_CLK_CLKOUT },
+ { NPCM8XX_CLKDIV2, 13, 3, "gfx", &npcm8xx_muxes[7].hw, CLK_DIVIDER_READ_ONLY, 0, NPCM8XX_CLK_GFX },
+ { NPCM8XX_CLKDIV2, 8, 5, "usb_bridge", &npcm8xx_muxes[4].hw, CLK_DIVIDER_READ_ONLY, 0, NPCM8XX_CLK_SU },
+ { NPCM8XX_CLKDIV2, 4, 4, "usb_host", &npcm8xx_muxes[4].hw, CLK_DIVIDER_READ_ONLY, 0, NPCM8XX_CLK_SU48 },
+ { NPCM8XX_CLKDIV2, 0, 4, "sdhc", &npcm8xx_muxes[2].hw, CLK_DIVIDER_READ_ONLY, 0, NPCM8XX_CLK_SDHC },
+
+ { NPCM8XX_CLKDIV3, 16, 8, "spi1", &npcm8xx_pre_divs[1].hw, CLK_DIVIDER_READ_ONLY, 0, NPCM8XX_CLK_SPI1 },
+ { NPCM8XX_CLKDIV3, 11, 5, "uart2", &npcm8xx_muxes[3].hw, CLK_DIVIDER_READ_ONLY, 0, NPCM8XX_CLK_UART2 },
+ { NPCM8XX_CLKDIV3, 6, 5, "spi0", &npcm8xx_pre_divs[1].hw, CLK_DIVIDER_READ_ONLY, 0, NPCM8XX_CLK_SPI0 },
+ { NPCM8XX_CLKDIV3, 1, 5, "spix", &npcm8xx_pre_divs[1].hw, CLK_DIVIDER_READ_ONLY, 0, NPCM8XX_CLK_SPIX },
+
+ { NPCM8XX_CLKDIV4, 28, 4, "rg", &npcm8xx_muxes[11].hw, CLK_DIVIDER_READ_ONLY, 0, NPCM8XX_CLK_RG },
+ { NPCM8XX_CLKDIV4, 12, 4, "rcp", &npcm8xx_muxes[12].hw, CLK_DIVIDER_READ_ONLY, 0, NPCM8XX_CLK_RCP },
+
+ { NPCM8XX_THRTL_CNT, 0, 2, "th", &npcm8xx_muxes[0].hw, CLK_DIVIDER_READ_ONLY | CLK_DIVIDER_POWER_OF_TWO, 0, NPCM8XX_CLK_TH },
+};
+
+static unsigned long npcm8xx_clk_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct npcm8xx_clk_pll *pll = to_npcm8xx_clk_pll(hw);
+ unsigned long fbdv, indv, otdv1, otdv2;
+ unsigned int val;
+ u64 ret;
+
+ if (parent_rate == 0) {
+ pr_debug("%s: parent rate is zero\n", __func__);
+ return 0;
+ }
+
+ val = readl_relaxed(pll->pllcon);
+
+ indv = FIELD_GET(PLLCON_INDV, val);
+ fbdv = FIELD_GET(PLLCON_FBDV, val);
+ otdv1 = FIELD_GET(PLLCON_OTDV1, val);
+ otdv2 = FIELD_GET(PLLCON_OTDV2, val);
+
+ ret = (u64)parent_rate * fbdv;
+ do_div(ret, indv * otdv1 * otdv2);
+
+ return ret;
+}
+
+static const struct clk_ops npcm8xx_clk_pll_ops = {
+ .recalc_rate = npcm8xx_clk_pll_recalc_rate,
+};
+
+static struct clk_hw *
+npcm8xx_clk_register_pll(struct device *dev, void __iomem *pllcon,
+ const char *name, const struct clk_parent_data *parent,
+ unsigned long flags)
+{
+ struct npcm8xx_clk_pll *pll;
+ struct clk_init_data init = {};
+ int ret;
+
+ pll = devm_kzalloc(dev, sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &npcm8xx_clk_pll_ops;
+ init.parent_data = parent;
+ init.num_parents = 1;
+ init.flags = flags;
+
+ pll->pllcon = pllcon;
+ pll->hw.init = &init;
+
+ ret = devm_clk_hw_register(dev, &pll->hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return &pll->hw;
+}
+
+static DEFINE_SPINLOCK(npcm8xx_clk_lock);
+
+static int npcm8xx_clk_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct npcm_clock_adev *rdev = to_npcm_clock_adev(adev);
+ struct clk_hw_onecell_data *npcm8xx_clk_data;
+ struct device *dev = &adev->dev;
+ struct clk_hw *hw;
+ unsigned int i;
+
+ npcm8xx_clk_data = devm_kzalloc(dev, struct_size(npcm8xx_clk_data, hws,
+ NPCM8XX_NUM_CLOCKS),
+ GFP_KERNEL);
+ if (!npcm8xx_clk_data)
+ return -ENOMEM;
+
+ clk_base = rdev->base;
+
+ npcm8xx_clk_data->num = NPCM8XX_NUM_CLOCKS;
+
+ for (i = 0; i < NPCM8XX_NUM_CLOCKS; i++)
+ npcm8xx_clk_data->hws[i] = ERR_PTR(-EPROBE_DEFER);
+
+ /* Register plls */
+ for (i = 0; i < ARRAY_SIZE(npcm8xx_pll_clks); i++) {
+ struct npcm8xx_clk_pll_data *pll_clk = &npcm8xx_pll_clks[i];
+
+ hw = npcm8xx_clk_register_pll(dev, clk_base + pll_clk->reg,
+ pll_clk->name, &pll_clk->parent,
+ pll_clk->flags);
+ if (IS_ERR(hw))
+ return dev_err_probe(dev, PTR_ERR(hw), "Can't register pll\n");
+ pll_clk->hw = *hw;
+ }
+
+ /* Register fixed dividers */
+ hw = devm_clk_hw_register_fixed_factor(dev, "pll1_div2", "pll1", 0, 1, 2);
+ if (IS_ERR(hw))
+ return dev_err_probe(dev, PTR_ERR(hw), "Can't register fixed div\n");
+ hw_pll1_div2 = *hw;
+
+ hw = devm_clk_hw_register_fixed_factor(dev, "pll2_div2", "pll2", 0, 1, 2);
+ if (IS_ERR(hw))
+ return dev_err_probe(dev, PTR_ERR(hw), "Can't register pll2 div2\n");
+ hw_pll2_div2 = *hw;
+
+ hw = devm_clk_hw_register_fixed_factor(dev, "pll_gfx_div2", "pll_gfx", 0, 1, 2);
+ if (IS_ERR(hw))
+ return dev_err_probe(dev, PTR_ERR(hw), "Can't register gfx div2\n");
+ hw_gfx_div2 = *hw;
+
+ /* Register muxes */
+ for (i = 0; i < ARRAY_SIZE(npcm8xx_muxes); i++) {
+ struct npcm8xx_clk_mux_data *mux_data = &npcm8xx_muxes[i];
+
+ hw = devm_clk_hw_register_mux_parent_data_table(dev,
+ mux_data->name,
+ mux_data->parent_data,
+ mux_data->num_parents,
+ mux_data->flags,
+ clk_base + NPCM8XX_CLKSEL,
+ mux_data->shift,
+ mux_data->mask,
+ 0,
+ mux_data->table,
+ &npcm8xx_clk_lock);
+ if (IS_ERR(hw))
+ return dev_err_probe(dev, PTR_ERR(hw), "Can't register mux\n");
+ mux_data->hw = *hw;
+ }
+
+ hw = devm_clk_hw_register_fixed_factor(dev, "pre_clk", "cpu_mux", 0, 1, 2);
+ if (IS_ERR(hw))
+ return dev_err_probe(dev, PTR_ERR(hw), "Can't register pre clk div2\n");
+ hw_pre_clk = *hw;
+
+ hw = devm_clk_hw_register_fixed_factor(dev, "axi", "th", 0, 1, 2);
+ if (IS_ERR(hw))
+ return dev_err_probe(dev, PTR_ERR(hw), "Can't register axi div2\n");
+ npcm8xx_clk_data->hws[NPCM8XX_CLK_AXI] = hw;
+
+ hw = devm_clk_hw_register_fixed_factor(dev, "atb", "axi", 0, 1, 2);
+ if (IS_ERR(hw))
+ return dev_err_probe(dev, PTR_ERR(hw), "Can't register atb div2\n");
+ npcm8xx_clk_data->hws[NPCM8XX_CLK_ATB] = hw;
+
+ /* Register pre dividers */
+ for (i = 0; i < ARRAY_SIZE(npcm8xx_pre_divs); i++) {
+ struct npcm8xx_clk_div_data *div_data = &npcm8xx_pre_divs[i];
+
+ hw = devm_clk_hw_register_divider_parent_hw(dev, div_data->name,
+ div_data->parent_hw,
+ div_data->flags,
+ clk_base + div_data->reg,
+ div_data->shift,
+ div_data->width,
+ div_data->clk_divider_flags,
+ &npcm8xx_clk_lock);
+ if (IS_ERR(hw))
+ return dev_err_probe(dev, PTR_ERR(hw), "Can't register pre div\n");
+ div_data->hw = *hw;
+
+ if (div_data->onecell_idx >= 0)
+ npcm8xx_clk_data->hws[div_data->onecell_idx] = hw;
+ }
+
+ /* Register dividers */
+ for (i = 0; i < ARRAY_SIZE(npcm8xx_divs); i++) {
+ struct npcm8xx_clk_div_data *div_data = &npcm8xx_divs[i];
+
+ hw = devm_clk_hw_register_divider_parent_hw(dev, div_data->name,
+ div_data->parent_hw,
+ div_data->flags,
+ clk_base + div_data->reg,
+ div_data->shift,
+ div_data->width,
+ div_data->clk_divider_flags,
+ &npcm8xx_clk_lock);
+ if (IS_ERR(hw))
+ return dev_err_probe(dev, PTR_ERR(hw), "Can't register div\n");
+
+ if (div_data->onecell_idx >= 0)
+ npcm8xx_clk_data->hws[div_data->onecell_idx] = hw;
+ }
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
+ npcm8xx_clk_data);
+}
+
+static const struct auxiliary_device_id npcm8xx_clock_ids[] = {
+ {
+ .name = "reset_npcm.clk-npcm8xx",
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(auxiliary, npcm8xx_clock_ids);
+
+static struct auxiliary_driver npcm8xx_clock_driver = {
+ .probe = npcm8xx_clk_probe,
+ .id_table = npcm8xx_clock_ids,
+};
+module_auxiliary_driver(npcm8xx_clock_driver);
+
+MODULE_DESCRIPTION("Clock driver for Nuvoton NPCM8XX BMC SoC");
+MODULE_AUTHOR("Tomer Maimon <tomer.maimon@nuvoton.com>");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/clk/clk-pwm.c b/drivers/clk/clk-pwm.c
index bd4f21c22004..4709f0338e37 100644
--- a/drivers/clk/clk-pwm.c
+++ b/drivers/clk/clk-pwm.c
@@ -14,6 +14,7 @@
struct clk_pwm {
struct clk_hw hw;
struct pwm_device *pwm;
+ struct pwm_state state;
u32 fixed_rate;
};
@@ -22,11 +23,28 @@ static inline struct clk_pwm *to_clk_pwm(struct clk_hw *hw)
return container_of(hw, struct clk_pwm, hw);
}
+static int clk_pwm_enable(struct clk_hw *hw)
+{
+ struct clk_pwm *clk_pwm = to_clk_pwm(hw);
+
+ return pwm_apply_atomic(clk_pwm->pwm, &clk_pwm->state);
+}
+
+static void clk_pwm_disable(struct clk_hw *hw)
+{
+ struct clk_pwm *clk_pwm = to_clk_pwm(hw);
+ struct pwm_state state = clk_pwm->state;
+
+ state.enabled = false;
+
+ pwm_apply_atomic(clk_pwm->pwm, &state);
+}
+
static int clk_pwm_prepare(struct clk_hw *hw)
{
struct clk_pwm *clk_pwm = to_clk_pwm(hw);
- return pwm_enable(clk_pwm->pwm);
+ return pwm_apply_might_sleep(clk_pwm->pwm, &clk_pwm->state);
}
static void clk_pwm_unprepare(struct clk_hw *hw)
@@ -48,8 +66,11 @@ static int clk_pwm_get_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
{
struct clk_pwm *clk_pwm = to_clk_pwm(hw);
struct pwm_state state;
+ int ret;
- pwm_get_state(clk_pwm->pwm, &state);
+ ret = pwm_get_state_hw(clk_pwm->pwm, &state);
+ if (ret)
+ return ret;
duty->num = state.duty_cycle;
duty->den = state.period;
@@ -57,6 +78,13 @@ static int clk_pwm_get_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
return 0;
}
+static const struct clk_ops clk_pwm_ops_atomic = {
+ .enable = clk_pwm_enable,
+ .disable = clk_pwm_disable,
+ .recalc_rate = clk_pwm_recalc_rate,
+ .get_duty_cycle = clk_pwm_get_duty_cycle,
+};
+
static const struct clk_ops clk_pwm_ops = {
.prepare = clk_pwm_prepare,
.unprepare = clk_pwm_unprepare,
@@ -103,20 +131,19 @@ static int clk_pwm_probe(struct platform_device *pdev)
return -EINVAL;
}
- /*
- * FIXME: pwm_apply_args() should be removed when switching to the
- * atomic PWM API.
- */
- pwm_apply_args(pwm);
- ret = pwm_config(pwm, (pargs.period + 1) >> 1, pargs.period);
- if (ret < 0)
- return ret;
+ pwm_init_state(pwm, &clk_pwm->state);
+ pwm_set_relative_duty_cycle(&clk_pwm->state, 1, 2);
+ clk_pwm->state.enabled = true;
clk_name = node->name;
of_property_read_string(node, "clock-output-names", &clk_name);
init.name = clk_name;
- init.ops = &clk_pwm_ops;
+ if (pwm_might_sleep(pwm))
+ init.ops = &clk_pwm_ops;
+ else
+ init.ops = &clk_pwm_ops_atomic;
+
init.flags = 0;
init.num_parents = 0;
diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c
index 4dcde305944c..a560edeb4b55 100644
--- a/drivers/clk/clk-qoriq.c
+++ b/drivers/clk/clk-qoriq.c
@@ -9,6 +9,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <dt-bindings/clock/fsl,qoriq-clockgen.h>
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
@@ -1065,11 +1066,8 @@ static void __init _clockgen_init(struct device_node *np, bool legacy);
static void __init legacy_init_clockgen(struct device_node *np)
{
if (!clockgen.node) {
- struct device_node *parent_np;
-
- parent_np = of_get_parent(np);
+ struct device_node *parent_np __free(device_node) = of_get_parent(np);
_clockgen_init(parent_np, true);
- of_node_put(parent_np);
}
}
diff --git a/drivers/clk/clk-rp1.c b/drivers/clk/clk-rp1.c
new file mode 100644
index 000000000000..fd144755b879
--- /dev/null
+++ b/drivers/clk/clk-rp1.c
@@ -0,0 +1,2462 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023 Raspberry Pi Ltd.
+ *
+ * Clock driver for RP1 PCIe multifunction chip.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/math64.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/units.h>
+
+#include <dt-bindings/clock/raspberrypi,rp1-clocks.h>
+
+#define PLL_SYS_OFFSET 0x08000
+#define PLL_SYS_CS (PLL_SYS_OFFSET + 0x00)
+#define PLL_SYS_PWR (PLL_SYS_OFFSET + 0x04)
+#define PLL_SYS_FBDIV_INT (PLL_SYS_OFFSET + 0x08)
+#define PLL_SYS_FBDIV_FRAC (PLL_SYS_OFFSET + 0x0c)
+#define PLL_SYS_PRIM (PLL_SYS_OFFSET + 0x10)
+#define PLL_SYS_SEC (PLL_SYS_OFFSET + 0x14)
+
+#define PLL_AUDIO_OFFSET 0x0c000
+#define PLL_AUDIO_CS (PLL_AUDIO_OFFSET + 0x00)
+#define PLL_AUDIO_PWR (PLL_AUDIO_OFFSET + 0x04)
+#define PLL_AUDIO_FBDIV_INT (PLL_AUDIO_OFFSET + 0x08)
+#define PLL_AUDIO_FBDIV_FRAC (PLL_AUDIO_OFFSET + 0x0c)
+#define PLL_AUDIO_PRIM (PLL_AUDIO_OFFSET + 0x10)
+#define PLL_AUDIO_SEC (PLL_AUDIO_OFFSET + 0x14)
+#define PLL_AUDIO_TERN (PLL_AUDIO_OFFSET + 0x18)
+
+#define PLL_VIDEO_OFFSET 0x10000
+#define PLL_VIDEO_CS (PLL_VIDEO_OFFSET + 0x00)
+#define PLL_VIDEO_PWR (PLL_VIDEO_OFFSET + 0x04)
+#define PLL_VIDEO_FBDIV_INT (PLL_VIDEO_OFFSET + 0x08)
+#define PLL_VIDEO_FBDIV_FRAC (PLL_VIDEO_OFFSET + 0x0c)
+#define PLL_VIDEO_PRIM (PLL_VIDEO_OFFSET + 0x10)
+#define PLL_VIDEO_SEC (PLL_VIDEO_OFFSET + 0x14)
+
+#define GPCLK_OE_CTRL 0x00000
+
+#define CLK_SYS_OFFSET 0x00014
+#define CLK_SYS_CTRL (CLK_SYS_OFFSET + 0x00)
+#define CLK_SYS_DIV_INT (CLK_SYS_OFFSET + 0x04)
+#define CLK_SYS_SEL (CLK_SYS_OFFSET + 0x0c)
+
+#define CLK_SLOW_OFFSET 0x00024
+#define CLK_SLOW_SYS_CTRL (CLK_SLOW_OFFSET + 0x00)
+#define CLK_SLOW_SYS_DIV_INT (CLK_SLOW_OFFSET + 0x04)
+#define CLK_SLOW_SYS_SEL (CLK_SLOW_OFFSET + 0x0c)
+
+#define CLK_DMA_OFFSET 0x00044
+#define CLK_DMA_CTRL (CLK_DMA_OFFSET + 0x00)
+#define CLK_DMA_DIV_INT (CLK_DMA_OFFSET + 0x04)
+#define CLK_DMA_SEL (CLK_DMA_OFFSET + 0x0c)
+
+#define CLK_UART_OFFSET 0x00054
+#define CLK_UART_CTRL (CLK_UART_OFFSET + 0x00)
+#define CLK_UART_DIV_INT (CLK_UART_OFFSET + 0x04)
+#define CLK_UART_SEL (CLK_UART_OFFSET + 0x0c)
+
+#define CLK_ETH_OFFSET 0x00064
+#define CLK_ETH_CTRL (CLK_ETH_OFFSET + 0x00)
+#define CLK_ETH_DIV_INT (CLK_ETH_OFFSET + 0x04)
+#define CLK_ETH_SEL (CLK_ETH_OFFSET + 0x0c)
+
+#define CLK_PWM0_OFFSET 0x00074
+#define CLK_PWM0_CTRL (CLK_PWM0_OFFSET + 0x00)
+#define CLK_PWM0_DIV_INT (CLK_PWM0_OFFSET + 0x04)
+#define CLK_PWM0_DIV_FRAC (CLK_PWM0_OFFSET + 0x08)
+#define CLK_PWM0_SEL (CLK_PWM0_OFFSET + 0x0c)
+
+#define CLK_PWM1_OFFSET 0x00084
+#define CLK_PWM1_CTRL (CLK_PWM1_OFFSET + 0x00)
+#define CLK_PWM1_DIV_INT (CLK_PWM1_OFFSET + 0x04)
+#define CLK_PWM1_DIV_FRAC (CLK_PWM1_OFFSET + 0x08)
+#define CLK_PWM1_SEL (CLK_PWM1_OFFSET + 0x0c)
+
+#define CLK_AUDIO_IN_OFFSET 0x00094
+#define CLK_AUDIO_IN_CTRL (CLK_AUDIO_IN_OFFSET + 0x00)
+#define CLK_AUDIO_IN_DIV_INT (CLK_AUDIO_IN_OFFSET + 0x04)
+#define CLK_AUDIO_IN_SEL (CLK_AUDIO_IN_OFFSET + 0x0c)
+
+#define CLK_AUDIO_OUT_OFFSET 0x000a4
+#define CLK_AUDIO_OUT_CTRL (CLK_AUDIO_OUT_OFFSET + 0x00)
+#define CLK_AUDIO_OUT_DIV_INT (CLK_AUDIO_OUT_OFFSET + 0x04)
+#define CLK_AUDIO_OUT_SEL (CLK_AUDIO_OUT_OFFSET + 0x0c)
+
+#define CLK_I2S_OFFSET 0x000b4
+#define CLK_I2S_CTRL (CLK_I2S_OFFSET + 0x00)
+#define CLK_I2S_DIV_INT (CLK_I2S_OFFSET + 0x04)
+#define CLK_I2S_SEL (CLK_I2S_OFFSET + 0x0c)
+
+#define CLK_MIPI0_CFG_OFFSET 0x000c4
+#define CLK_MIPI0_CFG_CTRL (CLK_MIPI0_CFG_OFFSET + 0x00)
+#define CLK_MIPI0_CFG_DIV_INT (CLK_MIPI0_CFG_OFFSET + 0x04)
+#define CLK_MIPI0_CFG_SEL (CLK_MIPI0_CFG_OFFSET + 0x0c)
+
+#define CLK_MIPI1_CFG_OFFSET 0x000d4
+#define CLK_MIPI1_CFG_CTRL (CLK_MIPI1_CFG_OFFSET + 0x00)
+#define CLK_MIPI1_CFG_DIV_INT (CLK_MIPI1_CFG_OFFSET + 0x04)
+#define CLK_MIPI1_CFG_SEL (CLK_MIPI1_CFG_OFFSET + 0x0c)
+
+#define CLK_PCIE_AUX_OFFSET 0x000e4
+#define CLK_PCIE_AUX_CTRL (CLK_PCIE_AUX_OFFSET + 0x00)
+#define CLK_PCIE_AUX_DIV_INT (CLK_PCIE_AUX_OFFSET + 0x04)
+#define CLK_PCIE_AUX_SEL (CLK_PCIE_AUX_OFFSET + 0x0c)
+
+#define CLK_USBH0_MICROFRAME_OFFSET 0x000f4
+#define CLK_USBH0_MICROFRAME_CTRL (CLK_USBH0_MICROFRAME_OFFSET + 0x00)
+#define CLK_USBH0_MICROFRAME_DIV_INT (CLK_USBH0_MICROFRAME_OFFSET + 0x04)
+#define CLK_USBH0_MICROFRAME_SEL (CLK_USBH0_MICROFRAME_OFFSET + 0x0c)
+
+#define CLK_USBH1_MICROFRAME_OFFSET 0x00104
+#define CLK_USBH1_MICROFRAME_CTRL (CLK_USBH1_MICROFRAME_OFFSET + 0x00)
+#define CLK_USBH1_MICROFRAME_DIV_INT (CLK_USBH1_MICROFRAME_OFFSET + 0x04)
+#define CLK_USBH1_MICROFRAME_SEL (CLK_USBH1_MICROFRAME_OFFSET + 0x0c)
+
+#define CLK_USBH0_SUSPEND_OFFSET 0x00114
+#define CLK_USBH0_SUSPEND_CTRL (CLK_USBH0_SUSPEND_OFFSET + 0x00)
+#define CLK_USBH0_SUSPEND_DIV_INT (CLK_USBH0_SUSPEND_OFFSET + 0x04)
+#define CLK_USBH0_SUSPEND_SEL (CLK_USBH0_SUSPEND_OFFSET + 0x0c)
+
+#define CLK_USBH1_SUSPEND_OFFSET 0x00124
+#define CLK_USBH1_SUSPEND_CTRL (CLK_USBH1_SUSPEND_OFFSET + 0x00)
+#define CLK_USBH1_SUSPEND_DIV_INT (CLK_USBH1_SUSPEND_OFFSET + 0x04)
+#define CLK_USBH1_SUSPEND_SEL (CLK_USBH1_SUSPEND_OFFSET + 0x0c)
+
+#define CLK_ETH_TSU_OFFSET 0x00134
+#define CLK_ETH_TSU_CTRL (CLK_ETH_TSU_OFFSET + 0x00)
+#define CLK_ETH_TSU_DIV_INT (CLK_ETH_TSU_OFFSET + 0x04)
+#define CLK_ETH_TSU_SEL (CLK_ETH_TSU_OFFSET + 0x0c)
+
+#define CLK_ADC_OFFSET 0x00144
+#define CLK_ADC_CTRL (CLK_ADC_OFFSET + 0x00)
+#define CLK_ADC_DIV_INT (CLK_ADC_OFFSET + 0x04)
+#define CLK_ADC_SEL (CLK_ADC_OFFSET + 0x0c)
+
+#define CLK_SDIO_TIMER_OFFSET 0x00154
+#define CLK_SDIO_TIMER_CTRL (CLK_SDIO_TIMER_OFFSET + 0x00)
+#define CLK_SDIO_TIMER_DIV_INT (CLK_SDIO_TIMER_OFFSET + 0x04)
+#define CLK_SDIO_TIMER_SEL (CLK_SDIO_TIMER_OFFSET + 0x0c)
+
+#define CLK_SDIO_ALT_SRC_OFFSET 0x00164
+#define CLK_SDIO_ALT_SRC_CTRL (CLK_SDIO_ALT_SRC_OFFSET + 0x00)
+#define CLK_SDIO_ALT_SRC_DIV_INT (CLK_SDIO_ALT_SRC_OFFSET + 0x04)
+#define CLK_SDIO_ALT_SRC_SEL (CLK_SDIO_ALT_SRC_OFFSET + 0x0c)
+
+#define CLK_GP0_OFFSET 0x00174
+#define CLK_GP0_CTRL (CLK_GP0_OFFSET + 0x00)
+#define CLK_GP0_DIV_INT (CLK_GP0_OFFSET + 0x04)
+#define CLK_GP0_DIV_FRAC (CLK_GP0_OFFSET + 0x08)
+#define CLK_GP0_SEL (CLK_GP0_OFFSET + 0x0c)
+
+#define CLK_GP1_OFFSET 0x00184
+#define CLK_GP1_CTRL (CLK_GP1_OFFSET + 0x00)
+#define CLK_GP1_DIV_INT (CLK_GP1_OFFSET + 0x04)
+#define CLK_GP1_DIV_FRAC (CLK_GP1_OFFSET + 0x08)
+#define CLK_GP1_SEL (CLK_GP1_OFFSET + 0x0c)
+
+#define CLK_GP2_OFFSET 0x00194
+#define CLK_GP2_CTRL (CLK_GP2_OFFSET + 0x00)
+#define CLK_GP2_DIV_INT (CLK_GP2_OFFSET + 0x04)
+#define CLK_GP2_DIV_FRAC (CLK_GP2_OFFSET + 0x08)
+#define CLK_GP2_SEL (CLK_GP2_OFFSET + 0x0c)
+
+#define CLK_GP3_OFFSET 0x001a4
+#define CLK_GP3_CTRL (CLK_GP3_OFFSET + 0x00)
+#define CLK_GP3_DIV_INT (CLK_GP3_OFFSET + 0x04)
+#define CLK_GP3_DIV_FRAC (CLK_GP3_OFFSET + 0x08)
+#define CLK_GP3_SEL (CLK_GP3_OFFSET + 0x0c)
+
+#define CLK_GP4_OFFSET 0x001b4
+#define CLK_GP4_CTRL (CLK_GP4_OFFSET + 0x00)
+#define CLK_GP4_DIV_INT (CLK_GP4_OFFSET + 0x04)
+#define CLK_GP4_DIV_FRAC (CLK_GP4_OFFSET + 0x08)
+#define CLK_GP4_SEL (CLK_GP4_OFFSET + 0x0c)
+
+#define CLK_GP5_OFFSET 0x001c4
+#define CLK_GP5_CTRL (CLK_GP5_OFFSET + 0x00)
+#define CLK_GP5_DIV_INT (CLK_GP5_OFFSET + 0x04)
+#define CLK_GP5_DIV_FRAC (CLK_GP5_OFFSET + 0x08)
+#define CLK_GP5_SEL (CLK_GP5_OFFSET + 0x0c)
+
+#define CLK_SYS_RESUS_CTRL 0x0020c
+
+#define CLK_SLOW_SYS_RESUS_CTRL 0x00214
+
+#define FC0_OFFSET 0x0021c
+#define FC0_REF_KHZ (FC0_OFFSET + 0x00)
+#define FC0_MIN_KHZ (FC0_OFFSET + 0x04)
+#define FC0_MAX_KHZ (FC0_OFFSET + 0x08)
+#define FC0_DELAY (FC0_OFFSET + 0x0c)
+#define FC0_INTERVAL (FC0_OFFSET + 0x10)
+#define FC0_SRC (FC0_OFFSET + 0x14)
+#define FC0_STATUS (FC0_OFFSET + 0x18)
+#define FC0_RESULT (FC0_OFFSET + 0x1c)
+#define FC_SIZE 0x20
+#define FC_COUNT 8
+#define FC_NUM(idx, off) ((idx) * 32 + (off))
+
+#define AUX_SEL 1
+
+#define VIDEO_CLOCKS_OFFSET 0x4000
+#define VIDEO_CLK_VEC_CTRL (VIDEO_CLOCKS_OFFSET + 0x0000)
+#define VIDEO_CLK_VEC_DIV_INT (VIDEO_CLOCKS_OFFSET + 0x0004)
+#define VIDEO_CLK_VEC_SEL (VIDEO_CLOCKS_OFFSET + 0x000c)
+#define VIDEO_CLK_DPI_CTRL (VIDEO_CLOCKS_OFFSET + 0x0010)
+#define VIDEO_CLK_DPI_DIV_INT (VIDEO_CLOCKS_OFFSET + 0x0014)
+#define VIDEO_CLK_DPI_SEL (VIDEO_CLOCKS_OFFSET + 0x001c)
+#define VIDEO_CLK_MIPI0_DPI_CTRL (VIDEO_CLOCKS_OFFSET + 0x0020)
+#define VIDEO_CLK_MIPI0_DPI_DIV_INT (VIDEO_CLOCKS_OFFSET + 0x0024)
+#define VIDEO_CLK_MIPI0_DPI_DIV_FRAC (VIDEO_CLOCKS_OFFSET + 0x0028)
+#define VIDEO_CLK_MIPI0_DPI_SEL (VIDEO_CLOCKS_OFFSET + 0x002c)
+#define VIDEO_CLK_MIPI1_DPI_CTRL (VIDEO_CLOCKS_OFFSET + 0x0030)
+#define VIDEO_CLK_MIPI1_DPI_DIV_INT (VIDEO_CLOCKS_OFFSET + 0x0034)
+#define VIDEO_CLK_MIPI1_DPI_DIV_FRAC (VIDEO_CLOCKS_OFFSET + 0x0038)
+#define VIDEO_CLK_MIPI1_DPI_SEL (VIDEO_CLOCKS_OFFSET + 0x003c)
+
+#define DIV_INT_8BIT_MAX GENMASK(7, 0) /* max divide for most clocks */
+#define DIV_INT_16BIT_MAX GENMASK(15, 0) /* max divide for GPx, PWM */
+#define DIV_INT_24BIT_MAX GENMASK(23, 0) /* max divide for CLK_SYS */
+
+#define FC0_STATUS_DONE BIT(4)
+#define FC0_STATUS_RUNNING BIT(8)
+#define FC0_RESULT_FRAC_SHIFT 5
+
+#define PLL_PRIM_DIV1_MASK GENMASK(18, 16)
+#define PLL_PRIM_DIV2_MASK GENMASK(14, 12)
+
+#define PLL_SEC_DIV_MASK GENMASK(12, 8)
+
+#define PLL_CS_LOCK BIT(31)
+#define PLL_CS_REFDIV_MASK BIT(1)
+
+#define PLL_PWR_PD BIT(0)
+#define PLL_PWR_DACPD BIT(1)
+#define PLL_PWR_DSMPD BIT(2)
+#define PLL_PWR_POSTDIVPD BIT(3)
+#define PLL_PWR_4PHASEPD BIT(4)
+#define PLL_PWR_VCOPD BIT(5)
+#define PLL_PWR_MASK GENMASK(5, 0)
+
+#define PLL_SEC_RST BIT(16)
+#define PLL_SEC_IMPL BIT(31)
+
+/* PLL phase output for both PRI and SEC */
+#define PLL_PH_EN BIT(4)
+#define PLL_PH_PHASE_SHIFT 0
+
+#define RP1_PLL_PHASE_0 0
+#define RP1_PLL_PHASE_90 1
+#define RP1_PLL_PHASE_180 2
+#define RP1_PLL_PHASE_270 3
+
+/* Clock fields for all clocks */
+#define CLK_CTRL_ENABLE BIT(11)
+#define CLK_CTRL_AUXSRC_MASK GENMASK(9, 5)
+#define CLK_CTRL_SRC_SHIFT 0
+#define CLK_DIV_FRAC_BITS 16
+
+#define LOCK_TIMEOUT_US 100000
+#define LOCK_POLL_DELAY_US 5
+
+#define MAX_CLK_PARENTS 16
+
+#define PLL_DIV_INVALID 19
+/*
+ * Secondary PLL channel output divider table.
+ * Divider values range from 8 to 19, where
+ * 19 means invalid.
+ */
+static const struct clk_div_table pll_sec_div_table[] = {
+ { 0x00, PLL_DIV_INVALID },
+ { 0x01, PLL_DIV_INVALID },
+ { 0x02, PLL_DIV_INVALID },
+ { 0x03, PLL_DIV_INVALID },
+ { 0x04, PLL_DIV_INVALID },
+ { 0x05, PLL_DIV_INVALID },
+ { 0x06, PLL_DIV_INVALID },
+ { 0x07, PLL_DIV_INVALID },
+ { 0x08, 8 },
+ { 0x09, 9 },
+ { 0x0a, 10 },
+ { 0x0b, 11 },
+ { 0x0c, 12 },
+ { 0x0d, 13 },
+ { 0x0e, 14 },
+ { 0x0f, 15 },
+ { 0x10, 16 },
+ { 0x11, 17 },
+ { 0x12, 18 },
+ { 0x13, PLL_DIV_INVALID },
+ { 0x14, PLL_DIV_INVALID },
+ { 0x15, PLL_DIV_INVALID },
+ { 0x16, PLL_DIV_INVALID },
+ { 0x17, PLL_DIV_INVALID },
+ { 0x18, PLL_DIV_INVALID },
+ { 0x19, PLL_DIV_INVALID },
+ { 0x1a, PLL_DIV_INVALID },
+ { 0x1b, PLL_DIV_INVALID },
+ { 0x1c, PLL_DIV_INVALID },
+ { 0x1d, PLL_DIV_INVALID },
+ { 0x1e, PLL_DIV_INVALID },
+ { 0x1f, PLL_DIV_INVALID },
+ { 0 }
+};
+
+struct rp1_clockman {
+ struct device *dev;
+ void __iomem *regs;
+ struct regmap *regmap;
+ spinlock_t regs_lock; /* spinlock for all clocks */
+
+ /* Must be last */
+ struct clk_hw_onecell_data onecell;
+};
+
+struct rp1_pll_core_data {
+ u32 cs_reg;
+ u32 pwr_reg;
+ u32 fbdiv_int_reg;
+ u32 fbdiv_frac_reg;
+ u32 fc0_src;
+};
+
+struct rp1_pll_data {
+ u32 ctrl_reg;
+ u32 fc0_src;
+};
+
+struct rp1_pll_ph_data {
+ unsigned int phase;
+ unsigned int fixed_divider;
+ u32 ph_reg;
+ u32 fc0_src;
+};
+
+struct rp1_pll_divider_data {
+ u32 sec_reg;
+ u32 fc0_src;
+};
+
+struct rp1_clock_data {
+ int num_std_parents;
+ int num_aux_parents;
+ u32 oe_mask;
+ u32 clk_src_mask;
+ u32 ctrl_reg;
+ u32 div_int_reg;
+ u32 div_frac_reg;
+ u32 sel_reg;
+ u32 div_int_max;
+ unsigned long max_freq;
+ u32 fc0_src;
+};
+
+struct rp1_clk_desc {
+ struct clk_hw *(*clk_register)(struct rp1_clockman *clockman,
+ struct rp1_clk_desc *desc);
+ const void *data;
+ struct clk_hw hw;
+ struct rp1_clockman *clockman;
+ unsigned long cached_rate;
+ struct clk_divider div;
+};
+
+static struct rp1_clk_desc *clk_audio_core;
+static struct rp1_clk_desc *clk_audio;
+static struct rp1_clk_desc *clk_i2s;
+static struct clk_hw *clk_xosc;
+
+static inline
+void clockman_write(struct rp1_clockman *clockman, u32 reg, u32 val)
+{
+ regmap_write(clockman->regmap, reg, val);
+}
+
+static inline u32 clockman_read(struct rp1_clockman *clockman, u32 reg)
+{
+ u32 val;
+
+ regmap_read(clockman->regmap, reg, &val);
+
+ return val;
+}
+
+static int rp1_pll_core_is_on(struct clk_hw *hw)
+{
+ struct rp1_clk_desc *pll_core = container_of(hw, struct rp1_clk_desc, hw);
+ struct rp1_clockman *clockman = pll_core->clockman;
+ const struct rp1_pll_core_data *data = pll_core->data;
+ u32 pwr = clockman_read(clockman, data->pwr_reg);
+
+ return (pwr & PLL_PWR_PD) || (pwr & PLL_PWR_POSTDIVPD);
+}
+
+static int rp1_pll_core_on(struct clk_hw *hw)
+{
+ struct rp1_clk_desc *pll_core = container_of(hw, struct rp1_clk_desc, hw);
+ struct rp1_clockman *clockman = pll_core->clockman;
+ const struct rp1_pll_core_data *data = pll_core->data;
+ u32 fbdiv_frac, val;
+ int ret;
+
+ spin_lock(&clockman->regs_lock);
+
+ if (!(clockman_read(clockman, data->cs_reg) & PLL_CS_LOCK)) {
+ /* Reset to a known state. */
+ clockman_write(clockman, data->pwr_reg, PLL_PWR_MASK);
+ clockman_write(clockman, data->fbdiv_int_reg, 20);
+ clockman_write(clockman, data->fbdiv_frac_reg, 0);
+ clockman_write(clockman, data->cs_reg, PLL_CS_REFDIV_MASK);
+ }
+
+ /* Come out of reset. */
+ fbdiv_frac = clockman_read(clockman, data->fbdiv_frac_reg);
+ clockman_write(clockman, data->pwr_reg, fbdiv_frac ? 0 : PLL_PWR_DSMPD);
+ spin_unlock(&clockman->regs_lock);
+
+ /* Wait for the PLL to lock. */
+ ret = regmap_read_poll_timeout(clockman->regmap, data->cs_reg, val,
+ val & PLL_CS_LOCK,
+ LOCK_POLL_DELAY_US, LOCK_TIMEOUT_US);
+ if (ret)
+ dev_err(clockman->dev, "%s: can't lock PLL\n",
+ clk_hw_get_name(hw));
+
+ return ret;
+}
+
+static void rp1_pll_core_off(struct clk_hw *hw)
+{
+ struct rp1_clk_desc *pll_core = container_of(hw, struct rp1_clk_desc, hw);
+ struct rp1_clockman *clockman = pll_core->clockman;
+ const struct rp1_pll_core_data *data = pll_core->data;
+
+ spin_lock(&clockman->regs_lock);
+ clockman_write(clockman, data->pwr_reg, 0);
+ spin_unlock(&clockman->regs_lock);
+}
+
+static inline unsigned long get_pll_core_divider(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long parent_rate,
+ u32 *div_int, u32 *div_frac)
+{
+ u32 fbdiv_int, fbdiv_frac;
+ unsigned long calc_rate;
+ u64 shifted_fbdiv_int;
+ u64 div_fp64; /* 32.32 fixed point fraction. */
+
+ /* Factor of reference clock to VCO frequency. */
+ div_fp64 = (u64)(rate) << 32;
+ div_fp64 = DIV_ROUND_CLOSEST_ULL(div_fp64, parent_rate);
+
+ /* Round the fractional component at 24 bits. */
+ div_fp64 += 1 << (32 - 24 - 1);
+
+ fbdiv_int = div_fp64 >> 32;
+ fbdiv_frac = (div_fp64 >> (32 - 24)) & 0xffffff;
+
+ shifted_fbdiv_int = (u64)fbdiv_int << 24;
+ calc_rate = (u64)parent_rate * (shifted_fbdiv_int + fbdiv_frac);
+ calc_rate += BIT(23);
+ calc_rate >>= 24;
+
+ *div_int = fbdiv_int;
+ *div_frac = fbdiv_frac;
+
+ return calc_rate;
+}
+
+static int rp1_pll_core_set_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate)
+{
+ struct rp1_clk_desc *pll_core = container_of(hw, struct rp1_clk_desc, hw);
+ struct rp1_clockman *clockman = pll_core->clockman;
+ const struct rp1_pll_core_data *data = pll_core->data;
+ u32 fbdiv_int, fbdiv_frac;
+
+ /* Disable dividers to start with. */
+ spin_lock(&clockman->regs_lock);
+ clockman_write(clockman, data->fbdiv_int_reg, 0);
+ clockman_write(clockman, data->fbdiv_frac_reg, 0);
+ spin_unlock(&clockman->regs_lock);
+
+ get_pll_core_divider(hw, rate, parent_rate,
+ &fbdiv_int, &fbdiv_frac);
+
+ spin_lock(&clockman->regs_lock);
+ clockman_write(clockman, data->pwr_reg, fbdiv_frac ? 0 : PLL_PWR_DSMPD);
+ clockman_write(clockman, data->fbdiv_int_reg, fbdiv_int);
+ clockman_write(clockman, data->fbdiv_frac_reg, fbdiv_frac);
+ spin_unlock(&clockman->regs_lock);
+
+ /* Check that reference frequency is no greater than VCO / 16. */
+ if (WARN_ON_ONCE(parent_rate > (rate / 16)))
+ return -ERANGE;
+
+ spin_lock(&clockman->regs_lock);
+ /* Don't need to divide ref unless parent_rate > (output freq / 16) */
+ clockman_write(clockman, data->cs_reg,
+ clockman_read(clockman, data->cs_reg) |
+ PLL_CS_REFDIV_MASK);
+ spin_unlock(&clockman->regs_lock);
+
+ return 0;
+}
+
+static unsigned long rp1_pll_core_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct rp1_clk_desc *pll_core = container_of(hw, struct rp1_clk_desc, hw);
+ struct rp1_clockman *clockman = pll_core->clockman;
+ const struct rp1_pll_core_data *data = pll_core->data;
+ u32 fbdiv_int, fbdiv_frac;
+ unsigned long calc_rate;
+ u64 shifted_fbdiv_int;
+
+ fbdiv_int = clockman_read(clockman, data->fbdiv_int_reg);
+ fbdiv_frac = clockman_read(clockman, data->fbdiv_frac_reg);
+
+ shifted_fbdiv_int = (u64)fbdiv_int << 24;
+ calc_rate = (u64)parent_rate * (shifted_fbdiv_int + fbdiv_frac);
+ calc_rate += BIT(23);
+ calc_rate >>= 24;
+
+ return calc_rate;
+}
+
+static int rp1_pll_core_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ u32 fbdiv_int, fbdiv_frac;
+
+ req->rate = get_pll_core_divider(hw, req->rate, req->best_parent_rate,
+ &fbdiv_int,
+ &fbdiv_frac);
+
+ return 0;
+}
+
+static void get_pll_prim_dividers(unsigned long rate, unsigned long parent_rate,
+ u32 *divider1, u32 *divider2)
+{
+ unsigned int div1, div2;
+ unsigned int best_div1 = 7, best_div2 = 7;
+ unsigned long best_rate_diff =
+ abs_diff(DIV_ROUND_CLOSEST(parent_rate, best_div1 * best_div2), rate);
+ unsigned long rate_diff, calc_rate;
+
+ for (div1 = 1; div1 <= 7; div1++) {
+ for (div2 = 1; div2 <= div1; div2++) {
+ calc_rate = DIV_ROUND_CLOSEST(parent_rate, div1 * div2);
+ rate_diff = abs_diff(calc_rate, rate);
+
+ if (calc_rate == rate) {
+ best_div1 = div1;
+ best_div2 = div2;
+ goto done;
+ } else if (rate_diff < best_rate_diff) {
+ best_div1 = div1;
+ best_div2 = div2;
+ best_rate_diff = rate_diff;
+ }
+ }
+ }
+
+done:
+ *divider1 = best_div1;
+ *divider2 = best_div2;
+}
+
+static int rp1_pll_set_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate)
+{
+ struct rp1_clk_desc *pll = container_of(hw, struct rp1_clk_desc, hw);
+ struct rp1_clockman *clockman = pll->clockman;
+ const struct rp1_pll_data *data = pll->data;
+
+ u32 prim, prim_div1, prim_div2;
+
+ get_pll_prim_dividers(rate, parent_rate, &prim_div1, &prim_div2);
+
+ spin_lock(&clockman->regs_lock);
+ prim = clockman_read(clockman, data->ctrl_reg);
+ prim &= ~PLL_PRIM_DIV1_MASK;
+ prim |= FIELD_PREP(PLL_PRIM_DIV1_MASK, prim_div1);
+ prim &= ~PLL_PRIM_DIV2_MASK;
+ prim |= FIELD_PREP(PLL_PRIM_DIV2_MASK, prim_div2);
+ clockman_write(clockman, data->ctrl_reg, prim);
+ spin_unlock(&clockman->regs_lock);
+
+ return 0;
+}
+
+static unsigned long rp1_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct rp1_clk_desc *pll = container_of(hw, struct rp1_clk_desc, hw);
+ struct rp1_clockman *clockman = pll->clockman;
+ const struct rp1_pll_data *data = pll->data;
+ u32 prim, prim_div1, prim_div2;
+
+ prim = clockman_read(clockman, data->ctrl_reg);
+ prim_div1 = FIELD_GET(PLL_PRIM_DIV1_MASK, prim);
+ prim_div2 = FIELD_GET(PLL_PRIM_DIV2_MASK, prim);
+
+ if (!prim_div1 || !prim_div2) {
+ dev_err(clockman->dev, "%s: (%s) zero divider value\n",
+ __func__, clk_hw_get_name(hw));
+ return 0;
+ }
+
+ return DIV_ROUND_CLOSEST(parent_rate, prim_div1 * prim_div2);
+}
+
+static int rp1_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_hw *clk_audio_hw = &clk_audio->hw;
+ u32 div1, div2;
+
+ if (hw == clk_audio_hw && clk_audio->cached_rate == req->rate)
+ req->best_parent_rate = clk_audio_core->cached_rate;
+
+ get_pll_prim_dividers(req->rate, req->best_parent_rate, &div1, &div2);
+
+ req->rate = DIV_ROUND_CLOSEST(req->best_parent_rate, div1 * div2);
+
+ return 0;
+}
+
+static int rp1_pll_ph_is_on(struct clk_hw *hw)
+{
+ struct rp1_clk_desc *pll_ph = container_of(hw, struct rp1_clk_desc, hw);
+ struct rp1_clockman *clockman = pll_ph->clockman;
+ const struct rp1_pll_ph_data *data = pll_ph->data;
+
+ return !!(clockman_read(clockman, data->ph_reg) & PLL_PH_EN);
+}
+
+static int rp1_pll_ph_on(struct clk_hw *hw)
+{
+ struct rp1_clk_desc *pll_ph = container_of(hw, struct rp1_clk_desc, hw);
+ struct rp1_clockman *clockman = pll_ph->clockman;
+ const struct rp1_pll_ph_data *data = pll_ph->data;
+ u32 ph_reg;
+
+ spin_lock(&clockman->regs_lock);
+ ph_reg = clockman_read(clockman, data->ph_reg);
+ ph_reg |= data->phase << PLL_PH_PHASE_SHIFT;
+ ph_reg |= PLL_PH_EN;
+ clockman_write(clockman, data->ph_reg, ph_reg);
+ spin_unlock(&clockman->regs_lock);
+
+ return 0;
+}
+
+static void rp1_pll_ph_off(struct clk_hw *hw)
+{
+ struct rp1_clk_desc *pll_ph = container_of(hw, struct rp1_clk_desc, hw);
+ struct rp1_clockman *clockman = pll_ph->clockman;
+ const struct rp1_pll_ph_data *data = pll_ph->data;
+
+ spin_lock(&clockman->regs_lock);
+ clockman_write(clockman, data->ph_reg,
+ clockman_read(clockman, data->ph_reg) & ~PLL_PH_EN);
+ spin_unlock(&clockman->regs_lock);
+}
+
+static unsigned long rp1_pll_ph_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct rp1_clk_desc *pll_ph = container_of(hw, struct rp1_clk_desc, hw);
+ const struct rp1_pll_ph_data *data = pll_ph->data;
+
+ return parent_rate / data->fixed_divider;
+}
+
+static int rp1_pll_ph_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct rp1_clk_desc *pll_ph = container_of(hw, struct rp1_clk_desc, hw);
+ const struct rp1_pll_ph_data *data = pll_ph->data;
+
+ req->rate = req->best_parent_rate / data->fixed_divider;
+
+ return 0;
+}
+
+static int rp1_pll_divider_is_on(struct clk_hw *hw)
+{
+ struct rp1_clk_desc *divider = container_of(hw, struct rp1_clk_desc, div.hw);
+ struct rp1_clockman *clockman = divider->clockman;
+ const struct rp1_pll_data *data = divider->data;
+
+ return !(clockman_read(clockman, data->ctrl_reg) & PLL_SEC_RST);
+}
+
+static int rp1_pll_divider_on(struct clk_hw *hw)
+{
+ struct rp1_clk_desc *divider = container_of(hw, struct rp1_clk_desc, div.hw);
+ struct rp1_clockman *clockman = divider->clockman;
+ const struct rp1_pll_data *data = divider->data;
+
+ spin_lock(&clockman->regs_lock);
+ /* Check the implementation bit is set! */
+ WARN_ON(!(clockman_read(clockman, data->ctrl_reg) & PLL_SEC_IMPL));
+ clockman_write(clockman, data->ctrl_reg,
+ clockman_read(clockman, data->ctrl_reg) & ~PLL_SEC_RST);
+ spin_unlock(&clockman->regs_lock);
+
+ return 0;
+}
+
+static void rp1_pll_divider_off(struct clk_hw *hw)
+{
+ struct rp1_clk_desc *divider = container_of(hw, struct rp1_clk_desc, div.hw);
+ struct rp1_clockman *clockman = divider->clockman;
+ const struct rp1_pll_data *data = divider->data;
+
+ spin_lock(&clockman->regs_lock);
+ clockman_write(clockman, data->ctrl_reg,
+ clockman_read(clockman, data->ctrl_reg) | PLL_SEC_RST);
+ spin_unlock(&clockman->regs_lock);
+}
+
+static int rp1_pll_divider_set_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct rp1_clk_desc *divider = container_of(hw, struct rp1_clk_desc, div.hw);
+ struct rp1_clockman *clockman = divider->clockman;
+ const struct rp1_pll_data *data = divider->data;
+ u32 div, sec;
+
+ div = DIV_ROUND_UP_ULL(parent_rate, rate);
+ div = clamp(div, 8u, 19u);
+
+ spin_lock(&clockman->regs_lock);
+ sec = clockman_read(clockman, data->ctrl_reg);
+ sec &= ~PLL_SEC_DIV_MASK;
+ sec |= FIELD_PREP(PLL_SEC_DIV_MASK, div);
+
+ /* Must keep the divider in reset to change the value. */
+ sec |= PLL_SEC_RST;
+ clockman_write(clockman, data->ctrl_reg, sec);
+
+ /* must sleep 10 pll vco cycles */
+ ndelay(div64_ul(10ULL * div * NSEC_PER_SEC, parent_rate));
+
+ sec &= ~PLL_SEC_RST;
+ clockman_write(clockman, data->ctrl_reg, sec);
+ spin_unlock(&clockman->regs_lock);
+
+ return 0;
+}
+
+static unsigned long rp1_pll_divider_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ return clk_divider_ops.recalc_rate(hw, parent_rate);
+}
+
+static int rp1_pll_divider_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ req->rate = clk_divider_ops.determine_rate(hw, req);
+
+ return 0;
+}
+
+static int rp1_clock_is_on(struct clk_hw *hw)
+{
+ struct rp1_clk_desc *clock = container_of(hw, struct rp1_clk_desc, hw);
+ struct rp1_clockman *clockman = clock->clockman;
+ const struct rp1_clock_data *data = clock->data;
+
+ return !!(clockman_read(clockman, data->ctrl_reg) & CLK_CTRL_ENABLE);
+}
+
+static unsigned long rp1_clock_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct rp1_clk_desc *clock = container_of(hw, struct rp1_clk_desc, hw);
+ struct rp1_clockman *clockman = clock->clockman;
+ const struct rp1_clock_data *data = clock->data;
+ u64 calc_rate;
+ u64 div;
+ u32 frac;
+
+ div = clockman_read(clockman, data->div_int_reg);
+ frac = (data->div_frac_reg != 0) ?
+ clockman_read(clockman, data->div_frac_reg) : 0;
+
+ /* If the integer portion of the divider is 0, treat it as 2^16 */
+ if (!div)
+ div = 1 << 16;
+
+ div = (div << CLK_DIV_FRAC_BITS) | (frac >> (32 - CLK_DIV_FRAC_BITS));
+
+ calc_rate = (u64)parent_rate << CLK_DIV_FRAC_BITS;
+ calc_rate = div64_u64(calc_rate, div);
+
+ return calc_rate;
+}
+
+static int rp1_clock_on(struct clk_hw *hw)
+{
+ struct rp1_clk_desc *clock = container_of(hw, struct rp1_clk_desc, hw);
+ struct rp1_clockman *clockman = clock->clockman;
+ const struct rp1_clock_data *data = clock->data;
+
+ spin_lock(&clockman->regs_lock);
+ clockman_write(clockman, data->ctrl_reg,
+ clockman_read(clockman, data->ctrl_reg) | CLK_CTRL_ENABLE);
+ /* If this is a GPCLK, turn on the output-enable */
+ if (data->oe_mask)
+ clockman_write(clockman, GPCLK_OE_CTRL,
+ clockman_read(clockman, GPCLK_OE_CTRL) | data->oe_mask);
+ spin_unlock(&clockman->regs_lock);
+
+ return 0;
+}
+
+static void rp1_clock_off(struct clk_hw *hw)
+{
+ struct rp1_clk_desc *clock = container_of(hw, struct rp1_clk_desc, hw);
+ struct rp1_clockman *clockman = clock->clockman;
+ const struct rp1_clock_data *data = clock->data;
+
+ spin_lock(&clockman->regs_lock);
+ clockman_write(clockman, data->ctrl_reg,
+ clockman_read(clockman, data->ctrl_reg) & ~CLK_CTRL_ENABLE);
+ /* If this is a GPCLK, turn off the output-enable */
+ if (data->oe_mask)
+ clockman_write(clockman, GPCLK_OE_CTRL,
+ clockman_read(clockman, GPCLK_OE_CTRL) & ~data->oe_mask);
+ spin_unlock(&clockman->regs_lock);
+}
+
+static u32 rp1_clock_choose_div(unsigned long rate, unsigned long parent_rate,
+ const struct rp1_clock_data *data)
+{
+ u64 div;
+
+ /*
+ * Due to earlier rounding, calculated parent_rate may differ from
+ * expected value. Don't fail on a small discrepancy near unity divide.
+ */
+ if (!rate || rate > parent_rate + (parent_rate >> CLK_DIV_FRAC_BITS))
+ return 0;
+
+ /*
+ * Always express div in fixed-point format for fractional division;
+ * If no fractional divider is present, the fraction part will be zero.
+ */
+ if (data->div_frac_reg) {
+ div = (u64)parent_rate << CLK_DIV_FRAC_BITS;
+ div = DIV_ROUND_CLOSEST_ULL(div, rate);
+ } else {
+ div = DIV_ROUND_CLOSEST_ULL(parent_rate, rate);
+ div <<= CLK_DIV_FRAC_BITS;
+ }
+
+ div = clamp(div,
+ 1ull << CLK_DIV_FRAC_BITS,
+ (u64)data->div_int_max << CLK_DIV_FRAC_BITS);
+
+ return div;
+}
+
+static u8 rp1_clock_get_parent(struct clk_hw *hw)
+{
+ struct rp1_clk_desc *clock = container_of(hw, struct rp1_clk_desc, hw);
+ struct rp1_clockman *clockman = clock->clockman;
+ const struct rp1_clock_data *data = clock->data;
+ u32 sel, ctrl;
+ u8 parent;
+
+ /* Sel is one-hot, so find the first bit set */
+ sel = clockman_read(clockman, data->sel_reg);
+ parent = ffs(sel) - 1;
+
+ /* sel == 0 implies the parent clock is not enabled yet. */
+ if (!sel) {
+ /* Read the clock src from the CTRL register instead */
+ ctrl = clockman_read(clockman, data->ctrl_reg);
+ parent = (ctrl & data->clk_src_mask) >> CLK_CTRL_SRC_SHIFT;
+ }
+
+ if (parent >= data->num_std_parents)
+ parent = AUX_SEL;
+
+ if (parent == AUX_SEL) {
+ /*
+ * Clock parent is an auxiliary source, so get the parent from
+ * the AUXSRC register field.
+ */
+ ctrl = clockman_read(clockman, data->ctrl_reg);
+ parent = FIELD_GET(CLK_CTRL_AUXSRC_MASK, ctrl);
+ parent += data->num_std_parents;
+ }
+
+ return parent;
+}
+
+static int rp1_clock_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct rp1_clk_desc *clock = container_of(hw, struct rp1_clk_desc, hw);
+ struct rp1_clockman *clockman = clock->clockman;
+ const struct rp1_clock_data *data = clock->data;
+ u32 ctrl, sel;
+
+ spin_lock(&clockman->regs_lock);
+ ctrl = clockman_read(clockman, data->ctrl_reg);
+
+ if (index >= data->num_std_parents) {
+ /* This is an aux source request */
+ if (index >= data->num_std_parents + data->num_aux_parents) {
+ spin_unlock(&clockman->regs_lock);
+ return -EINVAL;
+ }
+
+ /* Select parent from aux list */
+ ctrl &= ~CLK_CTRL_AUXSRC_MASK;
+ ctrl |= FIELD_PREP(CLK_CTRL_AUXSRC_MASK, index - data->num_std_parents);
+ /* Set src to aux list */
+ ctrl &= ~data->clk_src_mask;
+ ctrl |= (AUX_SEL << CLK_CTRL_SRC_SHIFT) & data->clk_src_mask;
+ } else {
+ ctrl &= ~data->clk_src_mask;
+ ctrl |= (index << CLK_CTRL_SRC_SHIFT) & data->clk_src_mask;
+ }
+
+ clockman_write(clockman, data->ctrl_reg, ctrl);
+ spin_unlock(&clockman->regs_lock);
+
+ sel = rp1_clock_get_parent(hw);
+ if (sel != index)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int rp1_clock_set_rate_and_parent(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long parent_rate,
+ u8 parent)
+{
+ struct rp1_clk_desc *clock = container_of(hw, struct rp1_clk_desc, hw);
+ struct rp1_clockman *clockman = clock->clockman;
+ const struct rp1_clock_data *data = clock->data;
+ u32 div = rp1_clock_choose_div(rate, parent_rate, data);
+
+ spin_lock(&clockman->regs_lock);
+
+ clockman_write(clockman, data->div_int_reg, div >> CLK_DIV_FRAC_BITS);
+ if (data->div_frac_reg)
+ clockman_write(clockman, data->div_frac_reg, div << (32 - CLK_DIV_FRAC_BITS));
+
+ spin_unlock(&clockman->regs_lock);
+
+ if (parent != 0xff)
+ return rp1_clock_set_parent(hw, parent);
+
+ return 0;
+}
+
+static int rp1_clock_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ return rp1_clock_set_rate_and_parent(hw, rate, parent_rate, 0xff);
+}
+
+static unsigned long calc_core_pll_rate(struct clk_hw *pll_hw,
+ unsigned long target_rate,
+ int *pdiv_prim, int *pdiv_clk)
+{
+ static const int prim_divs[] = {
+ 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 14, 15, 16,
+ 18, 20, 21, 24, 25, 28, 30, 35, 36, 42, 49,
+ };
+ const unsigned long xosc_rate = clk_hw_get_rate(clk_xosc);
+ const unsigned long core_min = xosc_rate * 16;
+ const unsigned long core_max = 2400000000;
+ int best_div_prim = 1, best_div_clk = 1;
+ unsigned long best_rate = core_max + 1;
+ unsigned long core_rate = 0;
+ int div_int, div_frac;
+ u64 div;
+ int i;
+
+ /* Given the target rate, choose a set of divisors/multipliers */
+ for (i = 0; i < ARRAY_SIZE(prim_divs); i++) {
+ int div_prim = prim_divs[i];
+ int div_clk;
+
+ for (div_clk = 1; div_clk <= 256; div_clk++) {
+ core_rate = target_rate * div_clk * div_prim;
+ if (core_rate >= core_min) {
+ if (core_rate < best_rate) {
+ best_rate = core_rate;
+ best_div_prim = div_prim;
+ best_div_clk = div_clk;
+ }
+ break;
+ }
+ }
+ }
+
+ if (best_rate < core_max) {
+ div = ((best_rate << 24) + xosc_rate / 2) / xosc_rate;
+ div_int = div >> 24;
+ div_frac = div % (1 << 24);
+ core_rate = (xosc_rate * ((div_int << 24) + div_frac) + (1 << 23)) >> 24;
+ } else {
+ core_rate = 0;
+ }
+
+ if (pdiv_prim)
+ *pdiv_prim = best_div_prim;
+ if (pdiv_clk)
+ *pdiv_clk = best_div_clk;
+
+ return core_rate;
+}
+
+static void rp1_clock_choose_div_and_prate(struct clk_hw *hw,
+ int parent_idx,
+ unsigned long rate,
+ unsigned long *prate,
+ unsigned long *calc_rate)
+{
+ struct rp1_clk_desc *clock = container_of(hw, struct rp1_clk_desc, hw);
+ const struct rp1_clock_data *data = clock->data;
+ struct clk_hw *clk_audio_hw = &clk_audio->hw;
+ struct clk_hw *clk_i2s_hw = &clk_i2s->hw;
+ struct clk_hw *parent;
+ u32 div;
+ u64 tmp;
+
+ parent = clk_hw_get_parent_by_index(hw, parent_idx);
+
+ if (hw == clk_i2s_hw && clk_i2s->cached_rate == rate && parent == clk_audio_hw) {
+ *prate = clk_audio->cached_rate;
+ *calc_rate = rate;
+ return;
+ }
+
+ if (hw == clk_i2s_hw && parent == clk_audio_hw) {
+ unsigned long core_rate, audio_rate, i2s_rate;
+ int div_prim, div_clk;
+
+ core_rate = calc_core_pll_rate(parent, rate, &div_prim, &div_clk);
+ audio_rate = DIV_ROUND_CLOSEST(core_rate, div_prim);
+ i2s_rate = DIV_ROUND_CLOSEST(audio_rate, div_clk);
+ clk_audio_core->cached_rate = core_rate;
+ clk_audio->cached_rate = audio_rate;
+ clk_i2s->cached_rate = i2s_rate;
+ *prate = audio_rate;
+ *calc_rate = i2s_rate;
+ return;
+ }
+
+ *prate = clk_hw_get_rate(parent);
+ div = rp1_clock_choose_div(rate, *prate, data);
+
+ if (!div) {
+ *calc_rate = 0;
+ return;
+ }
+
+ /* Recalculate to account for rounding errors */
+ tmp = (u64)*prate << CLK_DIV_FRAC_BITS;
+ tmp = div_u64(tmp, div);
+
+ /*
+ * Prevent overclocks - if all parent choices result in
+ * a downstream clock in excess of the maximum, then the
+ * call to set the clock will fail.
+ */
+ if (tmp > data->max_freq)
+ *calc_rate = 0;
+ else
+ *calc_rate = tmp;
+}
+
+static int rp1_clock_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_hw *parent, *best_parent = NULL;
+ unsigned long best_rate = 0;
+ unsigned long best_prate = 0;
+ unsigned long best_rate_diff = ULONG_MAX;
+ unsigned long prate, calc_rate;
+ size_t i;
+
+ /*
+ * If the NO_REPARENT flag is set, try to use existing parent.
+ */
+ if ((clk_hw_get_flags(hw) & CLK_SET_RATE_NO_REPARENT)) {
+ i = rp1_clock_get_parent(hw);
+ parent = clk_hw_get_parent_by_index(hw, i);
+ if (parent) {
+ rp1_clock_choose_div_and_prate(hw, i, req->rate, &prate,
+ &calc_rate);
+ if (calc_rate > 0) {
+ req->best_parent_hw = parent;
+ req->best_parent_rate = prate;
+ req->rate = calc_rate;
+ return 0;
+ }
+ }
+ }
+
+ /*
+ * Select parent clock that results in the closest rate (lower or
+ * higher)
+ */
+ for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
+ parent = clk_hw_get_parent_by_index(hw, i);
+ if (!parent)
+ continue;
+
+ rp1_clock_choose_div_and_prate(hw, i, req->rate, &prate,
+ &calc_rate);
+
+ if (abs_diff(calc_rate, req->rate) < best_rate_diff) {
+ best_parent = parent;
+ best_prate = prate;
+ best_rate = calc_rate;
+ best_rate_diff = abs_diff(calc_rate, req->rate);
+
+ if (best_rate_diff == 0)
+ break;
+ }
+ }
+
+ if (best_rate == 0)
+ return -EINVAL;
+
+ req->best_parent_hw = best_parent;
+ req->best_parent_rate = best_prate;
+ req->rate = best_rate;
+
+ return 0;
+}
+
+static int rp1_varsrc_set_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate)
+{
+ struct rp1_clk_desc *clock = container_of(hw, struct rp1_clk_desc, hw);
+
+ /*
+ * "varsrc" exists purely to let clock dividers know the frequency
+ * of an externally-managed clock source (such as MIPI DSI byte-clock)
+ * which may change at run-time as a side-effect of some other driver.
+ */
+ clock->cached_rate = rate;
+ return 0;
+}
+
+static unsigned long rp1_varsrc_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct rp1_clk_desc *clock = container_of(hw, struct rp1_clk_desc, hw);
+
+ return clock->cached_rate;
+}
+
+static int rp1_varsrc_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ return 0;
+}
+
+static const struct clk_ops rp1_pll_core_ops = {
+ .is_prepared = rp1_pll_core_is_on,
+ .prepare = rp1_pll_core_on,
+ .unprepare = rp1_pll_core_off,
+ .set_rate = rp1_pll_core_set_rate,
+ .recalc_rate = rp1_pll_core_recalc_rate,
+ .determine_rate = rp1_pll_core_determine_rate,
+};
+
+static const struct clk_ops rp1_pll_ops = {
+ .set_rate = rp1_pll_set_rate,
+ .recalc_rate = rp1_pll_recalc_rate,
+ .determine_rate = rp1_pll_determine_rate,
+};
+
+static const struct clk_ops rp1_pll_ph_ops = {
+ .is_prepared = rp1_pll_ph_is_on,
+ .prepare = rp1_pll_ph_on,
+ .unprepare = rp1_pll_ph_off,
+ .recalc_rate = rp1_pll_ph_recalc_rate,
+ .determine_rate = rp1_pll_ph_determine_rate,
+};
+
+static const struct clk_ops rp1_pll_divider_ops = {
+ .is_prepared = rp1_pll_divider_is_on,
+ .prepare = rp1_pll_divider_on,
+ .unprepare = rp1_pll_divider_off,
+ .set_rate = rp1_pll_divider_set_rate,
+ .recalc_rate = rp1_pll_divider_recalc_rate,
+ .determine_rate = rp1_pll_divider_determine_rate,
+};
+
+static const struct clk_ops rp1_clk_ops = {
+ .is_prepared = rp1_clock_is_on,
+ .prepare = rp1_clock_on,
+ .unprepare = rp1_clock_off,
+ .recalc_rate = rp1_clock_recalc_rate,
+ .get_parent = rp1_clock_get_parent,
+ .set_parent = rp1_clock_set_parent,
+ .set_rate_and_parent = rp1_clock_set_rate_and_parent,
+ .set_rate = rp1_clock_set_rate,
+ .determine_rate = rp1_clock_determine_rate,
+};
+
+static const struct clk_ops rp1_varsrc_ops = {
+ .set_rate = rp1_varsrc_set_rate,
+ .recalc_rate = rp1_varsrc_recalc_rate,
+ .determine_rate = rp1_varsrc_determine_rate,
+};
+
+static struct clk_hw *rp1_register_pll(struct rp1_clockman *clockman,
+ struct rp1_clk_desc *desc)
+{
+ int ret;
+
+ desc->clockman = clockman;
+
+ ret = devm_clk_hw_register(clockman->dev, &desc->hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return &desc->hw;
+}
+
+static struct clk_hw *rp1_register_pll_divider(struct rp1_clockman *clockman,
+ struct rp1_clk_desc *desc)
+{
+ const struct rp1_pll_data *divider_data = desc->data;
+ int ret;
+
+ desc->div.reg = clockman->regs + divider_data->ctrl_reg;
+ desc->div.shift = __ffs(PLL_SEC_DIV_MASK);
+ desc->div.width = __ffs(~(PLL_SEC_DIV_MASK >> desc->div.shift));
+ desc->div.flags = CLK_DIVIDER_ROUND_CLOSEST;
+ desc->div.lock = &clockman->regs_lock;
+ desc->div.hw.init = desc->hw.init;
+ desc->div.table = pll_sec_div_table;
+
+ desc->clockman = clockman;
+
+ ret = devm_clk_hw_register(clockman->dev, &desc->div.hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return &desc->div.hw;
+}
+
+static struct clk_hw *rp1_register_clock(struct rp1_clockman *clockman,
+ struct rp1_clk_desc *desc)
+{
+ const struct rp1_clock_data *clock_data = desc->data;
+ int ret;
+
+ if (WARN_ON_ONCE(MAX_CLK_PARENTS <
+ clock_data->num_std_parents + clock_data->num_aux_parents))
+ return ERR_PTR(-EINVAL);
+
+ /* There must be a gap for the AUX selector */
+ if (WARN_ON_ONCE(clock_data->num_std_parents > AUX_SEL &&
+ desc->hw.init->parent_data[AUX_SEL].index != -1))
+ return ERR_PTR(-EINVAL);
+
+ desc->clockman = clockman;
+
+ ret = devm_clk_hw_register(clockman->dev, &desc->hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return &desc->hw;
+}
+
+/* Assignment helper macros for different clock types. */
+#define _REGISTER(f, ...) { .clk_register = f, __VA_ARGS__ }
+
+#define CLK_DATA(type, ...) .data = &(struct type) { __VA_ARGS__ }
+
+#define REGISTER_PLL(...) _REGISTER(&rp1_register_pll, \
+ __VA_ARGS__)
+
+#define REGISTER_PLL_DIV(...) _REGISTER(&rp1_register_pll_divider, \
+ __VA_ARGS__)
+
+#define REGISTER_CLK(...) _REGISTER(&rp1_register_clock, \
+ __VA_ARGS__)
+
+static struct rp1_clk_desc pll_sys_core_desc = REGISTER_PLL(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "pll_sys_core",
+ (const struct clk_parent_data[]) { { .index = 0 } },
+ &rp1_pll_core_ops,
+ CLK_IS_CRITICAL
+ ),
+ CLK_DATA(rp1_pll_core_data,
+ .cs_reg = PLL_SYS_CS,
+ .pwr_reg = PLL_SYS_PWR,
+ .fbdiv_int_reg = PLL_SYS_FBDIV_INT,
+ .fbdiv_frac_reg = PLL_SYS_FBDIV_FRAC,
+ )
+);
+
+static struct rp1_clk_desc pll_audio_core_desc = REGISTER_PLL(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "pll_audio_core",
+ (const struct clk_parent_data[]) { { .index = 0 } },
+ &rp1_pll_core_ops,
+ CLK_IS_CRITICAL
+ ),
+ CLK_DATA(rp1_pll_core_data,
+ .cs_reg = PLL_AUDIO_CS,
+ .pwr_reg = PLL_AUDIO_PWR,
+ .fbdiv_int_reg = PLL_AUDIO_FBDIV_INT,
+ .fbdiv_frac_reg = PLL_AUDIO_FBDIV_FRAC,
+ )
+);
+
+static struct rp1_clk_desc pll_video_core_desc = REGISTER_PLL(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "pll_video_core",
+ (const struct clk_parent_data[]) { { .index = 0 } },
+ &rp1_pll_core_ops,
+ CLK_IS_CRITICAL
+ ),
+ CLK_DATA(rp1_pll_core_data,
+ .cs_reg = PLL_VIDEO_CS,
+ .pwr_reg = PLL_VIDEO_PWR,
+ .fbdiv_int_reg = PLL_VIDEO_FBDIV_INT,
+ .fbdiv_frac_reg = PLL_VIDEO_FBDIV_FRAC,
+ )
+);
+
+static struct rp1_clk_desc pll_sys_desc = REGISTER_PLL(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "pll_sys",
+ (const struct clk_parent_data[]) {
+ { .hw = &pll_sys_core_desc.hw }
+ },
+ &rp1_pll_ops,
+ 0
+ ),
+ CLK_DATA(rp1_pll_data,
+ .ctrl_reg = PLL_SYS_PRIM,
+ .fc0_src = FC_NUM(0, 2),
+ )
+);
+
+static struct rp1_clk_desc pll_audio_desc = REGISTER_PLL(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "pll_audio",
+ (const struct clk_parent_data[]) {
+ { .hw = &pll_audio_core_desc.hw }
+ },
+ &rp1_pll_ops,
+ CLK_SET_RATE_PARENT
+ ),
+ CLK_DATA(rp1_pll_data,
+ .ctrl_reg = PLL_AUDIO_PRIM,
+ .fc0_src = FC_NUM(4, 2),
+ )
+);
+
+static struct rp1_clk_desc pll_video_desc = REGISTER_PLL(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "pll_video",
+ (const struct clk_parent_data[]) {
+ { .hw = &pll_video_core_desc.hw }
+ },
+ &rp1_pll_ops,
+ 0
+ ),
+ CLK_DATA(rp1_pll_data,
+ .ctrl_reg = PLL_VIDEO_PRIM,
+ .fc0_src = FC_NUM(3, 2),
+ )
+);
+
+static struct rp1_clk_desc pll_sys_sec_desc = REGISTER_PLL_DIV(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "pll_sys_sec",
+ (const struct clk_parent_data[]) {
+ { .hw = &pll_sys_core_desc.hw }
+ },
+ &rp1_pll_divider_ops,
+ 0
+ ),
+ CLK_DATA(rp1_pll_data,
+ .ctrl_reg = PLL_SYS_SEC,
+ .fc0_src = FC_NUM(2, 2),
+ )
+);
+
+static struct rp1_clk_desc pll_video_sec_desc = REGISTER_PLL_DIV(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "pll_video_sec",
+ (const struct clk_parent_data[]) {
+ { .hw = &pll_video_core_desc.hw }
+ },
+ &rp1_pll_divider_ops,
+ 0
+ ),
+ CLK_DATA(rp1_pll_data,
+ .ctrl_reg = PLL_VIDEO_SEC,
+ .fc0_src = FC_NUM(5, 3),
+ )
+);
+
+static const struct clk_parent_data clk_eth_tsu_parents[] = {
+ { .index = 0 },
+ { .hw = &pll_video_sec_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+};
+
+static struct rp1_clk_desc clk_eth_tsu_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_eth_tsu",
+ clk_eth_tsu_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 8,
+ .ctrl_reg = CLK_ETH_TSU_CTRL,
+ .div_int_reg = CLK_ETH_TSU_DIV_INT,
+ .sel_reg = CLK_ETH_TSU_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 50 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(5, 7),
+ )
+);
+
+static const struct clk_parent_data clk_eth_parents[] = {
+ { .hw = &pll_sys_sec_desc.div.hw },
+ { .hw = &pll_sys_desc.hw },
+ { .hw = &pll_video_sec_desc.hw },
+};
+
+static struct rp1_clk_desc clk_eth_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_eth",
+ clk_eth_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 3,
+ .ctrl_reg = CLK_ETH_CTRL,
+ .div_int_reg = CLK_ETH_DIV_INT,
+ .sel_reg = CLK_ETH_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 125 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(4, 6),
+ )
+);
+
+static const struct clk_parent_data clk_sys_parents[] = {
+ { .index = 0 },
+ { .index = -1 },
+ { .hw = &pll_sys_desc.hw },
+};
+
+static struct rp1_clk_desc clk_sys_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_sys",
+ clk_sys_parents,
+ &rp1_clk_ops,
+ CLK_IS_CRITICAL
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 3,
+ .num_aux_parents = 0,
+ .ctrl_reg = CLK_SYS_CTRL,
+ .div_int_reg = CLK_SYS_DIV_INT,
+ .sel_reg = CLK_SYS_SEL,
+ .div_int_max = DIV_INT_24BIT_MAX,
+ .max_freq = 200 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(0, 4),
+ .clk_src_mask = 0x3,
+ )
+);
+
+static struct rp1_clk_desc pll_sys_pri_ph_desc = REGISTER_PLL(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "pll_sys_pri_ph",
+ (const struct clk_parent_data[]) {
+ { .hw = &pll_sys_desc.hw }
+ },
+ &rp1_pll_ph_ops,
+ 0
+ ),
+ CLK_DATA(rp1_pll_ph_data,
+ .ph_reg = PLL_SYS_PRIM,
+ .fixed_divider = 2,
+ .phase = RP1_PLL_PHASE_0,
+ .fc0_src = FC_NUM(1, 2),
+ )
+);
+
+static struct rp1_clk_desc pll_audio_pri_ph_desc = REGISTER_PLL(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "pll_audio_pri_ph",
+ (const struct clk_parent_data[]) {
+ { .hw = &pll_audio_desc.hw }
+ },
+ &rp1_pll_ph_ops,
+ 0
+ ),
+ CLK_DATA(rp1_pll_ph_data,
+ .ph_reg = PLL_AUDIO_PRIM,
+ .fixed_divider = 2,
+ .phase = RP1_PLL_PHASE_0,
+ .fc0_src = FC_NUM(5, 1),
+ )
+);
+
+static struct rp1_clk_desc pll_video_pri_ph_desc = REGISTER_PLL(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "pll_video_pri_ph",
+ (const struct clk_parent_data[]) {
+ { .hw = &pll_video_desc.hw }
+ },
+ &rp1_pll_ph_ops,
+ 0
+ ),
+ CLK_DATA(rp1_pll_ph_data,
+ .ph_reg = PLL_VIDEO_PRIM,
+ .fixed_divider = 2,
+ .phase = RP1_PLL_PHASE_0,
+ .fc0_src = FC_NUM(4, 3),
+ )
+);
+
+static struct rp1_clk_desc pll_audio_sec_desc = REGISTER_PLL_DIV(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "pll_audio_sec",
+ (const struct clk_parent_data[]) {
+ { .hw = &pll_audio_core_desc.hw }
+ },
+ &rp1_pll_divider_ops,
+ 0
+ ),
+ CLK_DATA(rp1_pll_data,
+ .ctrl_reg = PLL_AUDIO_SEC,
+ .fc0_src = FC_NUM(6, 2),
+ )
+);
+
+static struct rp1_clk_desc pll_audio_tern_desc = REGISTER_PLL_DIV(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "pll_audio_tern",
+ (const struct clk_parent_data[]) {
+ { .hw = &pll_audio_core_desc.hw }
+ },
+ &rp1_pll_divider_ops,
+ 0
+ ),
+ CLK_DATA(rp1_pll_data,
+ .ctrl_reg = PLL_AUDIO_TERN,
+ .fc0_src = FC_NUM(6, 2),
+ )
+);
+
+static struct rp1_clk_desc clk_slow_sys_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_slow_sys",
+ (const struct clk_parent_data[]) { { .index = 0 } },
+ &rp1_clk_ops,
+ CLK_IS_CRITICAL
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 1,
+ .num_aux_parents = 0,
+ .ctrl_reg = CLK_SLOW_SYS_CTRL,
+ .div_int_reg = CLK_SLOW_SYS_DIV_INT,
+ .sel_reg = CLK_SLOW_SYS_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 50 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(1, 4),
+ .clk_src_mask = 0x1,
+ )
+);
+
+static const struct clk_parent_data clk_dma_parents[] = {
+ { .hw = &pll_sys_pri_ph_desc.hw },
+ { .hw = &pll_video_desc.hw },
+ { .index = 0 },
+};
+
+static struct rp1_clk_desc clk_dma_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_dma",
+ clk_dma_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 3,
+ .ctrl_reg = CLK_DMA_CTRL,
+ .div_int_reg = CLK_DMA_DIV_INT,
+ .sel_reg = CLK_DMA_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 100 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(2, 2),
+ )
+);
+
+static const struct clk_parent_data clk_uart_parents[] = {
+ { .hw = &pll_sys_pri_ph_desc.hw },
+ { .hw = &pll_video_desc.hw },
+ { .index = 0 },
+};
+
+static struct rp1_clk_desc clk_uart_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_uart",
+ clk_uart_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 3,
+ .ctrl_reg = CLK_UART_CTRL,
+ .div_int_reg = CLK_UART_DIV_INT,
+ .sel_reg = CLK_UART_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 100 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(6, 7),
+ )
+);
+
+static const struct clk_parent_data clk_pwm0_parents[] = {
+ { .index = -1 },
+ { .hw = &pll_video_sec_desc.hw },
+ { .index = 0 },
+};
+
+static struct rp1_clk_desc clk_pwm0_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_pwm0",
+ clk_pwm0_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 3,
+ .ctrl_reg = CLK_PWM0_CTRL,
+ .div_int_reg = CLK_PWM0_DIV_INT,
+ .div_frac_reg = CLK_PWM0_DIV_FRAC,
+ .sel_reg = CLK_PWM0_SEL,
+ .div_int_max = DIV_INT_16BIT_MAX,
+ .max_freq = 76800 * HZ_PER_KHZ,
+ .fc0_src = FC_NUM(0, 5),
+ )
+);
+
+static const struct clk_parent_data clk_pwm1_parents[] = {
+ { .index = -1 },
+ { .hw = &pll_video_sec_desc.hw },
+ { .index = 0 },
+};
+
+static struct rp1_clk_desc clk_pwm1_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_pwm1",
+ clk_pwm1_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 3,
+ .ctrl_reg = CLK_PWM1_CTRL,
+ .div_int_reg = CLK_PWM1_DIV_INT,
+ .div_frac_reg = CLK_PWM1_DIV_FRAC,
+ .sel_reg = CLK_PWM1_SEL,
+ .div_int_max = DIV_INT_16BIT_MAX,
+ .max_freq = 76800 * HZ_PER_KHZ,
+ .fc0_src = FC_NUM(1, 5),
+ )
+);
+
+static const struct clk_parent_data clk_audio_in_parents[] = {
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &pll_video_sec_desc.hw },
+ { .index = 0 },
+};
+
+static struct rp1_clk_desc clk_audio_in_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_audio_in",
+ clk_audio_in_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 5,
+ .ctrl_reg = CLK_AUDIO_IN_CTRL,
+ .div_int_reg = CLK_AUDIO_IN_DIV_INT,
+ .sel_reg = CLK_AUDIO_IN_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 76800 * HZ_PER_KHZ,
+ .fc0_src = FC_NUM(2, 5),
+ )
+);
+
+static const struct clk_parent_data clk_audio_out_parents[] = {
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &pll_video_sec_desc.hw },
+ { .index = 0 },
+};
+
+static struct rp1_clk_desc clk_audio_out_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_audio_out",
+ clk_audio_out_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 4,
+ .ctrl_reg = CLK_AUDIO_OUT_CTRL,
+ .div_int_reg = CLK_AUDIO_OUT_DIV_INT,
+ .sel_reg = CLK_AUDIO_OUT_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 153600 * HZ_PER_KHZ,
+ .fc0_src = FC_NUM(3, 5),
+ )
+);
+
+static const struct clk_parent_data clk_i2s_parents[] = {
+ { .index = 0 },
+ { .hw = &pll_audio_desc.hw },
+ { .hw = &pll_audio_sec_desc.hw },
+};
+
+static struct rp1_clk_desc clk_i2s_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_i2s",
+ clk_i2s_parents,
+ &rp1_clk_ops,
+ CLK_SET_RATE_PARENT
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 3,
+ .ctrl_reg = CLK_I2S_CTRL,
+ .div_int_reg = CLK_I2S_DIV_INT,
+ .sel_reg = CLK_I2S_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 50 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(4, 4),
+ )
+);
+
+static struct rp1_clk_desc clk_mipi0_cfg_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_mipi0_cfg",
+ (const struct clk_parent_data[]) { { .index = 0 } },
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 1,
+ .ctrl_reg = CLK_MIPI0_CFG_CTRL,
+ .div_int_reg = CLK_MIPI0_CFG_DIV_INT,
+ .sel_reg = CLK_MIPI0_CFG_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 50 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(4, 5),
+ )
+);
+
+static struct rp1_clk_desc clk_mipi1_cfg_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_mipi1_cfg",
+ (const struct clk_parent_data[]) { { .index = 0 } },
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 1,
+ .ctrl_reg = CLK_MIPI1_CFG_CTRL,
+ .div_int_reg = CLK_MIPI1_CFG_DIV_INT,
+ .sel_reg = CLK_MIPI1_CFG_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 50 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(5, 6),
+ .clk_src_mask = 0x1,
+ )
+);
+
+static struct rp1_clk_desc clk_adc_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_adc",
+ (const struct clk_parent_data[]) { { .index = 0 } },
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 1,
+ .ctrl_reg = CLK_ADC_CTRL,
+ .div_int_reg = CLK_ADC_DIV_INT,
+ .sel_reg = CLK_ADC_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 50 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(5, 5),
+ )
+);
+
+static struct rp1_clk_desc clk_sdio_timer_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_sdio_timer",
+ (const struct clk_parent_data[]) { { .index = 0 } },
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 1,
+ .ctrl_reg = CLK_SDIO_TIMER_CTRL,
+ .div_int_reg = CLK_SDIO_TIMER_DIV_INT,
+ .sel_reg = CLK_SDIO_TIMER_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 50 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(3, 4),
+ )
+);
+
+static struct rp1_clk_desc clk_sdio_alt_src_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_sdio_alt_src",
+ (const struct clk_parent_data[]) {
+ { .hw = &pll_sys_desc.hw }
+ },
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 1,
+ .ctrl_reg = CLK_SDIO_ALT_SRC_CTRL,
+ .div_int_reg = CLK_SDIO_ALT_SRC_DIV_INT,
+ .sel_reg = CLK_SDIO_ALT_SRC_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 200 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(5, 4),
+ )
+);
+
+static const struct clk_parent_data clk_dpi_parents[] = {
+ { .hw = &pll_sys_desc.hw },
+ { .hw = &pll_video_sec_desc.hw },
+ { .hw = &pll_video_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+};
+
+static struct rp1_clk_desc clk_dpi_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_dpi",
+ clk_dpi_parents,
+ &rp1_clk_ops,
+ CLK_SET_RATE_NO_REPARENT /* Let DPI driver set parent */
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 8,
+ .ctrl_reg = VIDEO_CLK_DPI_CTRL,
+ .div_int_reg = VIDEO_CLK_DPI_DIV_INT,
+ .sel_reg = VIDEO_CLK_DPI_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 200 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(1, 6),
+ )
+);
+
+static const struct clk_parent_data clk_gp0_parents[] = {
+ { .index = 0 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &pll_sys_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &clk_i2s_desc.hw },
+ { .hw = &clk_adc_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &clk_sys_desc.hw },
+};
+
+static struct rp1_clk_desc clk_gp0_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_gp0",
+ clk_gp0_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 16,
+ .oe_mask = BIT(0),
+ .ctrl_reg = CLK_GP0_CTRL,
+ .div_int_reg = CLK_GP0_DIV_INT,
+ .div_frac_reg = CLK_GP0_DIV_FRAC,
+ .sel_reg = CLK_GP0_SEL,
+ .div_int_max = DIV_INT_16BIT_MAX,
+ .max_freq = 100 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(0, 1),
+ )
+);
+
+static const struct clk_parent_data clk_gp1_parents[] = {
+ { .hw = &clk_sdio_timer_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &pll_sys_pri_ph_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &clk_adc_desc.hw },
+ { .hw = &clk_dpi_desc.hw },
+ { .hw = &clk_pwm0_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+};
+
+static struct rp1_clk_desc clk_gp1_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_gp1",
+ clk_gp1_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 16,
+ .oe_mask = BIT(1),
+ .ctrl_reg = CLK_GP1_CTRL,
+ .div_int_reg = CLK_GP1_DIV_INT,
+ .div_frac_reg = CLK_GP1_DIV_FRAC,
+ .sel_reg = CLK_GP1_SEL,
+ .div_int_max = DIV_INT_16BIT_MAX,
+ .max_freq = 100 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(1, 1),
+ )
+);
+
+static struct rp1_clk_desc clksrc_mipi0_dsi_byteclk_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clksrc_mipi0_dsi_byteclk",
+ (const struct clk_parent_data[]) { { .index = 0 } },
+ &rp1_varsrc_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 1,
+ .num_aux_parents = 0,
+ )
+);
+
+static struct rp1_clk_desc clksrc_mipi1_dsi_byteclk_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clksrc_mipi1_dsi_byteclk",
+ (const struct clk_parent_data[]) { { .index = 0 } },
+ &rp1_varsrc_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 1,
+ .num_aux_parents = 0,
+ )
+);
+
+static const struct clk_parent_data clk_mipi0_dpi_parents[] = {
+ { .hw = &pll_sys_desc.hw },
+ { .hw = &pll_video_sec_desc.hw },
+ { .hw = &pll_video_desc.hw },
+ { .hw = &clksrc_mipi0_dsi_byteclk_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+};
+
+static struct rp1_clk_desc clk_mipi0_dpi_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_mipi0_dpi",
+ clk_mipi0_dpi_parents,
+ &rp1_clk_ops,
+ CLK_SET_RATE_NO_REPARENT /* Let DSI driver set parent */
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 8,
+ .ctrl_reg = VIDEO_CLK_MIPI0_DPI_CTRL,
+ .div_int_reg = VIDEO_CLK_MIPI0_DPI_DIV_INT,
+ .div_frac_reg = VIDEO_CLK_MIPI0_DPI_DIV_FRAC,
+ .sel_reg = VIDEO_CLK_MIPI0_DPI_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 200 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(2, 6),
+ )
+);
+
+static const struct clk_parent_data clk_mipi1_dpi_parents[] = {
+ { .hw = &pll_sys_desc.hw },
+ { .hw = &pll_video_sec_desc.hw },
+ { .hw = &pll_video_desc.hw },
+ { .hw = &clksrc_mipi1_dsi_byteclk_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+};
+
+static struct rp1_clk_desc clk_mipi1_dpi_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_mipi1_dpi",
+ clk_mipi1_dpi_parents,
+ &rp1_clk_ops,
+ CLK_SET_RATE_NO_REPARENT /* Let DSI driver set parent */
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 8,
+ .ctrl_reg = VIDEO_CLK_MIPI1_DPI_CTRL,
+ .div_int_reg = VIDEO_CLK_MIPI1_DPI_DIV_INT,
+ .div_frac_reg = VIDEO_CLK_MIPI1_DPI_DIV_FRAC,
+ .sel_reg = VIDEO_CLK_MIPI1_DPI_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 200 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(3, 6),
+ )
+);
+
+static const struct clk_parent_data clk_gp2_parents[] = {
+ { .hw = &clk_sdio_alt_src_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &pll_sys_sec_desc.hw },
+ { .index = -1 },
+ { .hw = &pll_video_desc.hw },
+ { .hw = &clk_audio_in_desc.hw },
+ { .hw = &clk_dpi_desc.hw },
+ { .hw = &clk_pwm0_desc.hw },
+ { .hw = &clk_pwm1_desc.hw },
+ { .hw = &clk_mipi0_dpi_desc.hw },
+ { .hw = &clk_mipi1_cfg_desc.hw },
+ { .hw = &clk_sys_desc.hw },
+};
+
+static struct rp1_clk_desc clk_gp2_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_gp2",
+ clk_gp2_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 16,
+ .oe_mask = BIT(2),
+ .ctrl_reg = CLK_GP2_CTRL,
+ .div_int_reg = CLK_GP2_DIV_INT,
+ .div_frac_reg = CLK_GP2_DIV_FRAC,
+ .sel_reg = CLK_GP2_SEL,
+ .div_int_max = DIV_INT_16BIT_MAX,
+ .max_freq = 100 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(2, 1),
+ )
+);
+
+static const struct clk_parent_data clk_gp3_parents[] = {
+ { .index = 0 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &pll_video_pri_ph_desc.hw },
+ { .hw = &clk_audio_out_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &clk_mipi1_dpi_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+};
+
+static struct rp1_clk_desc clk_gp3_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_gp3",
+ clk_gp3_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 16,
+ .oe_mask = BIT(3),
+ .ctrl_reg = CLK_GP3_CTRL,
+ .div_int_reg = CLK_GP3_DIV_INT,
+ .div_frac_reg = CLK_GP3_DIV_FRAC,
+ .sel_reg = CLK_GP3_SEL,
+ .div_int_max = DIV_INT_16BIT_MAX,
+ .max_freq = 100 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(3, 1),
+ )
+);
+
+static const struct clk_parent_data clk_gp4_parents[] = {
+ { .index = 0 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &pll_video_sec_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &clk_mipi0_cfg_desc.hw },
+ { .hw = &clk_uart_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &clk_sys_desc.hw },
+};
+
+static struct rp1_clk_desc clk_gp4_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_gp4",
+ clk_gp4_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 16,
+ .oe_mask = BIT(4),
+ .ctrl_reg = CLK_GP4_CTRL,
+ .div_int_reg = CLK_GP4_DIV_INT,
+ .div_frac_reg = CLK_GP4_DIV_FRAC,
+ .sel_reg = CLK_GP4_SEL,
+ .div_int_max = DIV_INT_16BIT_MAX,
+ .max_freq = 100 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(4, 1),
+ )
+);
+
+static const struct clk_parent_data clk_vec_parents[] = {
+ { .hw = &pll_sys_pri_ph_desc.hw },
+ { .hw = &pll_video_sec_desc.hw },
+ { .hw = &pll_video_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+};
+
+static struct rp1_clk_desc clk_vec_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_vec",
+ clk_vec_parents,
+ &rp1_clk_ops,
+ CLK_SET_RATE_NO_REPARENT /* Let VEC driver set parent */
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 8,
+ .ctrl_reg = VIDEO_CLK_VEC_CTRL,
+ .div_int_reg = VIDEO_CLK_VEC_DIV_INT,
+ .sel_reg = VIDEO_CLK_VEC_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 108 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(0, 6),
+ )
+);
+
+static const struct clk_parent_data clk_gp5_parents[] = {
+ { .index = 0 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &pll_video_sec_desc.hw },
+ { .hw = &clk_eth_tsu_desc.hw },
+ { .index = -1 },
+ { .hw = &clk_vec_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+};
+
+static struct rp1_clk_desc clk_gp5_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_gp5",
+ clk_gp5_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 16,
+ .oe_mask = BIT(5),
+ .ctrl_reg = CLK_GP5_CTRL,
+ .div_int_reg = CLK_GP5_DIV_INT,
+ .div_frac_reg = CLK_GP5_DIV_FRAC,
+ .sel_reg = CLK_GP5_SEL,
+ .div_int_max = DIV_INT_16BIT_MAX,
+ .max_freq = 100 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(5, 1),
+ )
+);
+
+static struct rp1_clk_desc *const clk_desc_array[] = {
+ [RP1_PLL_SYS_CORE] = &pll_sys_core_desc,
+ [RP1_PLL_AUDIO_CORE] = &pll_audio_core_desc,
+ [RP1_PLL_VIDEO_CORE] = &pll_video_core_desc,
+ [RP1_PLL_SYS] = &pll_sys_desc,
+ [RP1_CLK_ETH_TSU] = &clk_eth_tsu_desc,
+ [RP1_CLK_ETH] = &clk_eth_desc,
+ [RP1_CLK_SYS] = &clk_sys_desc,
+ [RP1_PLL_SYS_PRI_PH] = &pll_sys_pri_ph_desc,
+ [RP1_PLL_SYS_SEC] = &pll_sys_sec_desc,
+ [RP1_PLL_AUDIO] = &pll_audio_desc,
+ [RP1_PLL_VIDEO] = &pll_video_desc,
+ [RP1_PLL_AUDIO_PRI_PH] = &pll_audio_pri_ph_desc,
+ [RP1_PLL_VIDEO_PRI_PH] = &pll_video_pri_ph_desc,
+ [RP1_PLL_AUDIO_SEC] = &pll_audio_sec_desc,
+ [RP1_PLL_VIDEO_SEC] = &pll_video_sec_desc,
+ [RP1_PLL_AUDIO_TERN] = &pll_audio_tern_desc,
+ [RP1_CLK_SLOW_SYS] = &clk_slow_sys_desc,
+ [RP1_CLK_DMA] = &clk_dma_desc,
+ [RP1_CLK_UART] = &clk_uart_desc,
+ [RP1_CLK_PWM0] = &clk_pwm0_desc,
+ [RP1_CLK_PWM1] = &clk_pwm1_desc,
+ [RP1_CLK_AUDIO_IN] = &clk_audio_in_desc,
+ [RP1_CLK_AUDIO_OUT] = &clk_audio_out_desc,
+ [RP1_CLK_I2S] = &clk_i2s_desc,
+ [RP1_CLK_MIPI0_CFG] = &clk_mipi0_cfg_desc,
+ [RP1_CLK_MIPI1_CFG] = &clk_mipi1_cfg_desc,
+ [RP1_CLK_ADC] = &clk_adc_desc,
+ [RP1_CLK_SDIO_TIMER] = &clk_sdio_timer_desc,
+ [RP1_CLK_SDIO_ALT_SRC] = &clk_sdio_alt_src_desc,
+ [RP1_CLK_GP0] = &clk_gp0_desc,
+ [RP1_CLK_GP1] = &clk_gp1_desc,
+ [RP1_CLK_GP2] = &clk_gp2_desc,
+ [RP1_CLK_GP3] = &clk_gp3_desc,
+ [RP1_CLK_GP4] = &clk_gp4_desc,
+ [RP1_CLK_GP5] = &clk_gp5_desc,
+ [RP1_CLK_VEC] = &clk_vec_desc,
+ [RP1_CLK_DPI] = &clk_dpi_desc,
+ [RP1_CLK_MIPI0_DPI] = &clk_mipi0_dpi_desc,
+ [RP1_CLK_MIPI1_DPI] = &clk_mipi1_dpi_desc,
+ [RP1_CLK_MIPI0_DSI_BYTECLOCK] = &clksrc_mipi0_dsi_byteclk_desc,
+ [RP1_CLK_MIPI1_DSI_BYTECLOCK] = &clksrc_mipi1_dsi_byteclk_desc,
+};
+
+static const struct regmap_range rp1_reg_ranges[] = {
+ regmap_reg_range(PLL_SYS_CS, PLL_SYS_SEC),
+ regmap_reg_range(PLL_AUDIO_CS, PLL_AUDIO_TERN),
+ regmap_reg_range(PLL_VIDEO_CS, PLL_VIDEO_SEC),
+ regmap_reg_range(GPCLK_OE_CTRL, GPCLK_OE_CTRL),
+ regmap_reg_range(CLK_SYS_CTRL, CLK_SYS_DIV_INT),
+ regmap_reg_range(CLK_SYS_SEL, CLK_SYS_SEL),
+ regmap_reg_range(CLK_SLOW_SYS_CTRL, CLK_SLOW_SYS_DIV_INT),
+ regmap_reg_range(CLK_SLOW_SYS_SEL, CLK_SLOW_SYS_SEL),
+ regmap_reg_range(CLK_DMA_CTRL, CLK_DMA_DIV_INT),
+ regmap_reg_range(CLK_DMA_SEL, CLK_DMA_SEL),
+ regmap_reg_range(CLK_UART_CTRL, CLK_UART_DIV_INT),
+ regmap_reg_range(CLK_UART_SEL, CLK_UART_SEL),
+ regmap_reg_range(CLK_ETH_CTRL, CLK_ETH_DIV_INT),
+ regmap_reg_range(CLK_ETH_SEL, CLK_ETH_SEL),
+ regmap_reg_range(CLK_PWM0_CTRL, CLK_PWM0_SEL),
+ regmap_reg_range(CLK_PWM1_CTRL, CLK_PWM1_SEL),
+ regmap_reg_range(CLK_AUDIO_IN_CTRL, CLK_AUDIO_IN_DIV_INT),
+ regmap_reg_range(CLK_AUDIO_IN_SEL, CLK_AUDIO_IN_SEL),
+ regmap_reg_range(CLK_AUDIO_OUT_CTRL, CLK_AUDIO_OUT_DIV_INT),
+ regmap_reg_range(CLK_AUDIO_OUT_SEL, CLK_AUDIO_OUT_SEL),
+ regmap_reg_range(CLK_I2S_CTRL, CLK_I2S_DIV_INT),
+ regmap_reg_range(CLK_I2S_SEL, CLK_I2S_SEL),
+ regmap_reg_range(CLK_MIPI0_CFG_CTRL, CLK_MIPI0_CFG_DIV_INT),
+ regmap_reg_range(CLK_MIPI0_CFG_SEL, CLK_MIPI0_CFG_SEL),
+ regmap_reg_range(CLK_MIPI1_CFG_CTRL, CLK_MIPI1_CFG_DIV_INT),
+ regmap_reg_range(CLK_MIPI1_CFG_SEL, CLK_MIPI1_CFG_SEL),
+ regmap_reg_range(CLK_PCIE_AUX_CTRL, CLK_PCIE_AUX_DIV_INT),
+ regmap_reg_range(CLK_PCIE_AUX_SEL, CLK_PCIE_AUX_SEL),
+ regmap_reg_range(CLK_USBH0_MICROFRAME_CTRL, CLK_USBH0_MICROFRAME_DIV_INT),
+ regmap_reg_range(CLK_USBH0_MICROFRAME_SEL, CLK_USBH0_MICROFRAME_SEL),
+ regmap_reg_range(CLK_USBH1_MICROFRAME_CTRL, CLK_USBH1_MICROFRAME_DIV_INT),
+ regmap_reg_range(CLK_USBH1_MICROFRAME_SEL, CLK_USBH1_MICROFRAME_SEL),
+ regmap_reg_range(CLK_USBH0_SUSPEND_CTRL, CLK_USBH0_SUSPEND_DIV_INT),
+ regmap_reg_range(CLK_USBH0_SUSPEND_SEL, CLK_USBH0_SUSPEND_SEL),
+ regmap_reg_range(CLK_USBH1_SUSPEND_CTRL, CLK_USBH1_SUSPEND_DIV_INT),
+ regmap_reg_range(CLK_USBH1_SUSPEND_SEL, CLK_USBH1_SUSPEND_SEL),
+ regmap_reg_range(CLK_ETH_TSU_CTRL, CLK_ETH_TSU_DIV_INT),
+ regmap_reg_range(CLK_ETH_TSU_SEL, CLK_ETH_TSU_SEL),
+ regmap_reg_range(CLK_ADC_CTRL, CLK_ADC_DIV_INT),
+ regmap_reg_range(CLK_ADC_SEL, CLK_ADC_SEL),
+ regmap_reg_range(CLK_SDIO_TIMER_CTRL, CLK_SDIO_TIMER_DIV_INT),
+ regmap_reg_range(CLK_SDIO_TIMER_SEL, CLK_SDIO_TIMER_SEL),
+ regmap_reg_range(CLK_SDIO_ALT_SRC_CTRL, CLK_SDIO_ALT_SRC_DIV_INT),
+ regmap_reg_range(CLK_SDIO_ALT_SRC_SEL, CLK_SDIO_ALT_SRC_SEL),
+ regmap_reg_range(CLK_GP0_CTRL, CLK_GP0_SEL),
+ regmap_reg_range(CLK_GP1_CTRL, CLK_GP1_SEL),
+ regmap_reg_range(CLK_GP2_CTRL, CLK_GP2_SEL),
+ regmap_reg_range(CLK_GP3_CTRL, CLK_GP3_SEL),
+ regmap_reg_range(CLK_GP4_CTRL, CLK_GP4_SEL),
+ regmap_reg_range(CLK_GP5_CTRL, CLK_GP5_SEL),
+ regmap_reg_range(CLK_SYS_RESUS_CTRL, CLK_SYS_RESUS_CTRL),
+ regmap_reg_range(CLK_SLOW_SYS_RESUS_CTRL, CLK_SLOW_SYS_RESUS_CTRL),
+ regmap_reg_range(FC0_REF_KHZ, FC0_RESULT),
+ regmap_reg_range(VIDEO_CLK_VEC_CTRL, VIDEO_CLK_VEC_DIV_INT),
+ regmap_reg_range(VIDEO_CLK_VEC_SEL, VIDEO_CLK_DPI_DIV_INT),
+ regmap_reg_range(VIDEO_CLK_DPI_SEL, VIDEO_CLK_MIPI1_DPI_SEL),
+};
+
+static const struct regmap_access_table rp1_reg_table = {
+ .yes_ranges = rp1_reg_ranges,
+ .n_yes_ranges = ARRAY_SIZE(rp1_reg_ranges),
+};
+
+static const struct regmap_config rp1_clk_regmap_cfg = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = PLL_VIDEO_SEC,
+ .name = "rp1-clk",
+ .rd_table = &rp1_reg_table,
+ .disable_locking = true,
+};
+
+static int rp1_clk_probe(struct platform_device *pdev)
+{
+ const size_t asize = ARRAY_SIZE(clk_desc_array);
+ struct rp1_clk_desc *desc;
+ struct device *dev = &pdev->dev;
+ struct rp1_clockman *clockman;
+ struct clk_hw **hws;
+ unsigned int i;
+
+ clockman = devm_kzalloc(dev, struct_size(clockman, onecell.hws, asize),
+ GFP_KERNEL);
+ if (!clockman)
+ return -ENOMEM;
+
+ spin_lock_init(&clockman->regs_lock);
+ clockman->dev = dev;
+
+ clockman->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(clockman->regs))
+ return PTR_ERR(clockman->regs);
+
+ clockman->regmap = devm_regmap_init_mmio(dev, clockman->regs,
+ &rp1_clk_regmap_cfg);
+ if (IS_ERR(clockman->regmap)) {
+ dev_err_probe(dev, PTR_ERR(clockman->regmap),
+ "could not init clock regmap\n");
+ return PTR_ERR(clockman->regmap);
+ }
+
+ clockman->onecell.num = asize;
+ hws = clockman->onecell.hws;
+
+ for (i = 0; i < asize; i++) {
+ desc = clk_desc_array[i];
+ if (desc && desc->clk_register && desc->data)
+ hws[i] = desc->clk_register(clockman, desc);
+ }
+
+ clk_audio_core = &pll_audio_core_desc;
+ clk_audio = &pll_audio_desc;
+ clk_i2s = &clk_i2s_desc;
+ clk_xosc = clk_hw_get_parent_by_index(&clk_i2s->hw, 0);
+
+ platform_set_drvdata(pdev, clockman);
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
+ &clockman->onecell);
+}
+
+static const struct of_device_id rp1_clk_of_match[] = {
+ { .compatible = "raspberrypi,rp1-clocks" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, rp1_clk_of_match);
+
+static struct platform_driver rp1_clk_driver = {
+ .driver = {
+ .name = "rp1-clk",
+ .of_match_table = rp1_clk_of_match,
+ },
+ .probe = rp1_clk_probe,
+};
+
+module_platform_driver(rp1_clk_driver);
+
+MODULE_AUTHOR("Naushir Patuck <naush@raspberrypi.com>");
+MODULE_AUTHOR("Andrea della Porta <andrea.porta@suse.com>");
+MODULE_DESCRIPTION("RP1 clock driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/clk-rpmi.c b/drivers/clk/clk-rpmi.c
new file mode 100644
index 000000000000..921296aafa68
--- /dev/null
+++ b/drivers/clk/clk-rpmi.c
@@ -0,0 +1,620 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * RISC-V MPXY Based Clock Driver
+ *
+ * Copyright (C) 2025 Ventana Micro Systems Ltd.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/mailbox_client.h>
+#include <linux/mailbox/riscv-rpmi-message.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/wordpart.h>
+
+#define RPMI_CLK_DISCRETE_MAX_NUM_RATES 16
+#define RPMI_CLK_NAME_LEN 16
+
+#define to_rpmi_clk(clk) container_of(clk, struct rpmi_clk, hw)
+
+enum rpmi_clk_config {
+ RPMI_CLK_DISABLE = 0,
+ RPMI_CLK_ENABLE = 1,
+ RPMI_CLK_CONFIG_MAX_IDX
+};
+
+#define RPMI_CLK_TYPE_MASK GENMASK(1, 0)
+enum rpmi_clk_type {
+ RPMI_CLK_DISCRETE = 0,
+ RPMI_CLK_LINEAR = 1,
+ RPMI_CLK_TYPE_MAX_IDX
+};
+
+struct rpmi_clk_context {
+ struct device *dev;
+ struct mbox_chan *chan;
+ struct mbox_client client;
+ u32 max_msg_data_size;
+};
+
+/*
+ * rpmi_clk_rates represents the rates format
+ * as specified by the RPMI specification.
+ * No other data format (e.g., struct linear_range)
+ * is required to avoid to and from conversion.
+ */
+union rpmi_clk_rates {
+ u64 discrete[RPMI_CLK_DISCRETE_MAX_NUM_RATES];
+ struct {
+ u64 min;
+ u64 max;
+ u64 step;
+ } linear;
+};
+
+struct rpmi_clk {
+ struct rpmi_clk_context *context;
+ u32 id;
+ u32 num_rates;
+ u32 transition_latency;
+ enum rpmi_clk_type type;
+ union rpmi_clk_rates *rates;
+ char name[RPMI_CLK_NAME_LEN];
+ struct clk_hw hw;
+};
+
+struct rpmi_clk_rate_discrete {
+ __le32 lo;
+ __le32 hi;
+};
+
+struct rpmi_clk_rate_linear {
+ __le32 min_lo;
+ __le32 min_hi;
+ __le32 max_lo;
+ __le32 max_hi;
+ __le32 step_lo;
+ __le32 step_hi;
+};
+
+struct rpmi_get_num_clocks_rx {
+ __le32 status;
+ __le32 num_clocks;
+};
+
+struct rpmi_get_attrs_tx {
+ __le32 clkid;
+};
+
+struct rpmi_get_attrs_rx {
+ __le32 status;
+ __le32 flags;
+ __le32 num_rates;
+ __le32 transition_latency;
+ char name[RPMI_CLK_NAME_LEN];
+};
+
+struct rpmi_get_supp_rates_tx {
+ __le32 clkid;
+ __le32 clk_rate_idx;
+};
+
+struct rpmi_get_supp_rates_rx {
+ __le32 status;
+ __le32 flags;
+ __le32 remaining;
+ __le32 returned;
+ __le32 rates[];
+};
+
+struct rpmi_get_rate_tx {
+ __le32 clkid;
+};
+
+struct rpmi_get_rate_rx {
+ __le32 status;
+ __le32 lo;
+ __le32 hi;
+};
+
+struct rpmi_set_rate_tx {
+ __le32 clkid;
+ __le32 flags;
+ __le32 lo;
+ __le32 hi;
+};
+
+struct rpmi_set_rate_rx {
+ __le32 status;
+};
+
+struct rpmi_set_config_tx {
+ __le32 clkid;
+ __le32 config;
+};
+
+struct rpmi_set_config_rx {
+ __le32 status;
+};
+
+static inline u64 rpmi_clkrate_u64(u32 __hi, u32 __lo)
+{
+ return (((u64)(__hi) << 32) | (u32)(__lo));
+}
+
+static u32 rpmi_clk_get_num_clocks(struct rpmi_clk_context *context)
+{
+ struct rpmi_get_num_clocks_rx rx, *resp;
+ struct rpmi_mbox_message msg;
+ int ret;
+
+ rpmi_mbox_init_send_with_response(&msg, RPMI_CLK_SRV_GET_NUM_CLOCKS,
+ NULL, 0, &rx, sizeof(rx));
+
+ ret = rpmi_mbox_send_message(context->chan, &msg);
+ if (ret)
+ return 0;
+
+ resp = rpmi_mbox_get_msg_response(&msg);
+ if (!resp || resp->status)
+ return 0;
+
+ return le32_to_cpu(resp->num_clocks);
+}
+
+static int rpmi_clk_get_attrs(u32 clkid, struct rpmi_clk *rpmi_clk)
+{
+ struct rpmi_clk_context *context = rpmi_clk->context;
+ struct rpmi_mbox_message msg;
+ struct rpmi_get_attrs_tx tx;
+ struct rpmi_get_attrs_rx rx, *resp;
+ u8 format;
+ int ret;
+
+ tx.clkid = cpu_to_le32(clkid);
+ rpmi_mbox_init_send_with_response(&msg, RPMI_CLK_SRV_GET_ATTRIBUTES,
+ &tx, sizeof(tx), &rx, sizeof(rx));
+
+ ret = rpmi_mbox_send_message(context->chan, &msg);
+ if (ret)
+ return ret;
+
+ resp = rpmi_mbox_get_msg_response(&msg);
+ if (!resp)
+ return -EINVAL;
+ if (resp->status)
+ return rpmi_to_linux_error(le32_to_cpu(resp->status));
+
+ rpmi_clk->id = clkid;
+ rpmi_clk->num_rates = le32_to_cpu(resp->num_rates);
+ rpmi_clk->transition_latency = le32_to_cpu(resp->transition_latency);
+ strscpy(rpmi_clk->name, resp->name, RPMI_CLK_NAME_LEN);
+
+ format = le32_to_cpu(resp->flags) & RPMI_CLK_TYPE_MASK;
+ if (format >= RPMI_CLK_TYPE_MAX_IDX)
+ return -EINVAL;
+
+ rpmi_clk->type = format;
+
+ return 0;
+}
+
+static int rpmi_clk_get_supported_rates(u32 clkid, struct rpmi_clk *rpmi_clk)
+{
+ struct rpmi_clk_context *context = rpmi_clk->context;
+ struct rpmi_clk_rate_discrete *rate_discrete;
+ struct rpmi_clk_rate_linear *rate_linear;
+ struct rpmi_get_supp_rates_tx tx;
+ struct rpmi_get_supp_rates_rx *resp;
+ struct rpmi_mbox_message msg;
+ size_t clk_rate_idx;
+ int ret, rateidx, j;
+
+ tx.clkid = cpu_to_le32(clkid);
+ tx.clk_rate_idx = 0;
+
+ /*
+ * Make sure we allocate rx buffer sufficient to be accommodate all
+ * the rates sent in one RPMI message.
+ */
+ struct rpmi_get_supp_rates_rx *rx __free(kfree) =
+ kzalloc(context->max_msg_data_size, GFP_KERNEL);
+ if (!rx)
+ return -ENOMEM;
+
+ rpmi_mbox_init_send_with_response(&msg, RPMI_CLK_SRV_GET_SUPPORTED_RATES,
+ &tx, sizeof(tx), rx, context->max_msg_data_size);
+
+ ret = rpmi_mbox_send_message(context->chan, &msg);
+ if (ret)
+ return ret;
+
+ resp = rpmi_mbox_get_msg_response(&msg);
+ if (!resp)
+ return -EINVAL;
+ if (resp->status)
+ return rpmi_to_linux_error(le32_to_cpu(resp->status));
+ if (!le32_to_cpu(resp->returned))
+ return -EINVAL;
+
+ if (rpmi_clk->type == RPMI_CLK_DISCRETE) {
+ rate_discrete = (struct rpmi_clk_rate_discrete *)resp->rates;
+
+ for (rateidx = 0; rateidx < le32_to_cpu(resp->returned); rateidx++) {
+ rpmi_clk->rates->discrete[rateidx] =
+ rpmi_clkrate_u64(le32_to_cpu(rate_discrete[rateidx].hi),
+ le32_to_cpu(rate_discrete[rateidx].lo));
+ }
+
+ /*
+ * Keep sending the request message until all
+ * the rates are received.
+ */
+ clk_rate_idx = 0;
+ while (le32_to_cpu(resp->remaining)) {
+ clk_rate_idx += le32_to_cpu(resp->returned);
+ tx.clk_rate_idx = cpu_to_le32(clk_rate_idx);
+
+ rpmi_mbox_init_send_with_response(&msg,
+ RPMI_CLK_SRV_GET_SUPPORTED_RATES,
+ &tx, sizeof(tx),
+ rx, context->max_msg_data_size);
+
+ ret = rpmi_mbox_send_message(context->chan, &msg);
+ if (ret)
+ return ret;
+
+ resp = rpmi_mbox_get_msg_response(&msg);
+ if (!resp)
+ return -EINVAL;
+ if (resp->status)
+ return rpmi_to_linux_error(le32_to_cpu(resp->status));
+ if (!le32_to_cpu(resp->returned))
+ return -EINVAL;
+
+ for (j = 0; j < le32_to_cpu(resp->returned); j++) {
+ if (rateidx >= clk_rate_idx + le32_to_cpu(resp->returned))
+ break;
+ rpmi_clk->rates->discrete[rateidx++] =
+ rpmi_clkrate_u64(le32_to_cpu(rate_discrete[j].hi),
+ le32_to_cpu(rate_discrete[j].lo));
+ }
+ }
+ } else if (rpmi_clk->type == RPMI_CLK_LINEAR) {
+ rate_linear = (struct rpmi_clk_rate_linear *)resp->rates;
+
+ rpmi_clk->rates->linear.min = rpmi_clkrate_u64(le32_to_cpu(rate_linear->min_hi),
+ le32_to_cpu(rate_linear->min_lo));
+ rpmi_clk->rates->linear.max = rpmi_clkrate_u64(le32_to_cpu(rate_linear->max_hi),
+ le32_to_cpu(rate_linear->max_lo));
+ rpmi_clk->rates->linear.step = rpmi_clkrate_u64(le32_to_cpu(rate_linear->step_hi),
+ le32_to_cpu(rate_linear->step_lo));
+ }
+
+ return 0;
+}
+
+static unsigned long rpmi_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct rpmi_clk *rpmi_clk = to_rpmi_clk(hw);
+ struct rpmi_clk_context *context = rpmi_clk->context;
+ struct rpmi_mbox_message msg;
+ struct rpmi_get_rate_tx tx;
+ struct rpmi_get_rate_rx rx, *resp;
+ int ret;
+
+ tx.clkid = cpu_to_le32(rpmi_clk->id);
+
+ rpmi_mbox_init_send_with_response(&msg, RPMI_CLK_SRV_GET_RATE,
+ &tx, sizeof(tx), &rx, sizeof(rx));
+
+ ret = rpmi_mbox_send_message(context->chan, &msg);
+ if (ret)
+ return ret;
+
+ resp = rpmi_mbox_get_msg_response(&msg);
+ if (!resp)
+ return -EINVAL;
+ if (resp->status)
+ return rpmi_to_linux_error(le32_to_cpu(resp->status));
+
+ return rpmi_clkrate_u64(le32_to_cpu(resp->hi), le32_to_cpu(resp->lo));
+}
+
+static int rpmi_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct rpmi_clk *rpmi_clk = to_rpmi_clk(hw);
+ u64 fmin, fmax, ftmp;
+
+ /*
+ * Keep the requested rate if the clock format
+ * is of discrete type. Let the platform which
+ * is actually controlling the clock handle that.
+ */
+ if (rpmi_clk->type == RPMI_CLK_DISCRETE)
+ return 0;
+
+ fmin = rpmi_clk->rates->linear.min;
+ fmax = rpmi_clk->rates->linear.max;
+
+ if (req->rate <= fmin) {
+ req->rate = fmin;
+ return 0;
+ } else if (req->rate >= fmax) {
+ req->rate = fmax;
+ return 0;
+ }
+
+ ftmp = req->rate - fmin;
+ ftmp += rpmi_clk->rates->linear.step - 1;
+ do_div(ftmp, rpmi_clk->rates->linear.step);
+
+ req->rate = ftmp * rpmi_clk->rates->linear.step + fmin;
+
+ return 0;
+}
+
+static int rpmi_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct rpmi_clk *rpmi_clk = to_rpmi_clk(hw);
+ struct rpmi_clk_context *context = rpmi_clk->context;
+ struct rpmi_mbox_message msg;
+ struct rpmi_set_rate_tx tx;
+ struct rpmi_set_rate_rx rx, *resp;
+ int ret;
+
+ tx.clkid = cpu_to_le32(rpmi_clk->id);
+ tx.lo = cpu_to_le32(lower_32_bits(rate));
+ tx.hi = cpu_to_le32(upper_32_bits(rate));
+
+ rpmi_mbox_init_send_with_response(&msg, RPMI_CLK_SRV_SET_RATE,
+ &tx, sizeof(tx), &rx, sizeof(rx));
+
+ ret = rpmi_mbox_send_message(context->chan, &msg);
+ if (ret)
+ return ret;
+
+ resp = rpmi_mbox_get_msg_response(&msg);
+ if (!resp)
+ return -EINVAL;
+ if (resp->status)
+ return rpmi_to_linux_error(le32_to_cpu(resp->status));
+
+ return 0;
+}
+
+static int rpmi_clk_enable(struct clk_hw *hw)
+{
+ struct rpmi_clk *rpmi_clk = to_rpmi_clk(hw);
+ struct rpmi_clk_context *context = rpmi_clk->context;
+ struct rpmi_mbox_message msg;
+ struct rpmi_set_config_tx tx;
+ struct rpmi_set_config_rx rx, *resp;
+ int ret;
+
+ tx.config = cpu_to_le32(RPMI_CLK_ENABLE);
+ tx.clkid = cpu_to_le32(rpmi_clk->id);
+
+ rpmi_mbox_init_send_with_response(&msg, RPMI_CLK_SRV_SET_CONFIG,
+ &tx, sizeof(tx), &rx, sizeof(rx));
+
+ ret = rpmi_mbox_send_message(context->chan, &msg);
+ if (ret)
+ return ret;
+
+ resp = rpmi_mbox_get_msg_response(&msg);
+ if (!resp)
+ return -EINVAL;
+ if (resp->status)
+ return rpmi_to_linux_error(le32_to_cpu(resp->status));
+
+ return 0;
+}
+
+static void rpmi_clk_disable(struct clk_hw *hw)
+{
+ struct rpmi_clk *rpmi_clk = to_rpmi_clk(hw);
+ struct rpmi_clk_context *context = rpmi_clk->context;
+ struct rpmi_mbox_message msg;
+ struct rpmi_set_config_tx tx;
+ struct rpmi_set_config_rx rx;
+
+ tx.config = cpu_to_le32(RPMI_CLK_DISABLE);
+ tx.clkid = cpu_to_le32(rpmi_clk->id);
+
+ rpmi_mbox_init_send_with_response(&msg, RPMI_CLK_SRV_SET_CONFIG,
+ &tx, sizeof(tx), &rx, sizeof(rx));
+
+ rpmi_mbox_send_message(context->chan, &msg);
+}
+
+static const struct clk_ops rpmi_clk_ops = {
+ .recalc_rate = rpmi_clk_recalc_rate,
+ .determine_rate = rpmi_clk_determine_rate,
+ .set_rate = rpmi_clk_set_rate,
+ .prepare = rpmi_clk_enable,
+ .unprepare = rpmi_clk_disable,
+};
+
+static struct clk_hw *rpmi_clk_enumerate(struct rpmi_clk_context *context, u32 clkid)
+{
+ struct device *dev = context->dev;
+ unsigned long min_rate, max_rate;
+ union rpmi_clk_rates *rates;
+ struct rpmi_clk *rpmi_clk;
+ struct clk_init_data init = {};
+ struct clk_hw *clk_hw;
+ int ret;
+
+ rates = devm_kzalloc(dev, sizeof(*rates), GFP_KERNEL);
+ if (!rates)
+ return ERR_PTR(-ENOMEM);
+
+ rpmi_clk = devm_kzalloc(dev, sizeof(*rpmi_clk), GFP_KERNEL);
+ if (!rpmi_clk)
+ return ERR_PTR(-ENOMEM);
+
+ rpmi_clk->context = context;
+ rpmi_clk->rates = rates;
+
+ ret = rpmi_clk_get_attrs(clkid, rpmi_clk);
+ if (ret)
+ return dev_err_ptr_probe(dev, ret,
+ "Failed to get clk-%u attributes\n",
+ clkid);
+
+ ret = rpmi_clk_get_supported_rates(clkid, rpmi_clk);
+ if (ret)
+ return dev_err_ptr_probe(dev, ret,
+ "Get supported rates failed for clk-%u\n",
+ clkid);
+
+ init.flags = CLK_GET_RATE_NOCACHE;
+ init.num_parents = 0;
+ init.ops = &rpmi_clk_ops;
+ init.name = rpmi_clk->name;
+ clk_hw = &rpmi_clk->hw;
+ clk_hw->init = &init;
+
+ ret = devm_clk_hw_register(dev, clk_hw);
+ if (ret)
+ return dev_err_ptr_probe(dev, ret,
+ "Unable to register clk-%u\n",
+ clkid);
+
+ if (rpmi_clk->type == RPMI_CLK_DISCRETE) {
+ min_rate = rpmi_clk->rates->discrete[0];
+ max_rate = rpmi_clk->rates->discrete[rpmi_clk->num_rates - 1];
+ } else {
+ min_rate = rpmi_clk->rates->linear.min;
+ max_rate = rpmi_clk->rates->linear.max;
+ }
+
+ clk_hw_set_rate_range(clk_hw, min_rate, max_rate);
+
+ return clk_hw;
+}
+
+static void rpmi_clk_mbox_chan_release(void *data)
+{
+ struct mbox_chan *chan = data;
+
+ mbox_free_channel(chan);
+}
+
+static int rpmi_clk_probe(struct platform_device *pdev)
+{
+ int ret;
+ unsigned int num_clocks, i;
+ struct clk_hw_onecell_data *clk_data;
+ struct rpmi_clk_context *context;
+ struct rpmi_mbox_message msg;
+ struct clk_hw *hw_ptr;
+ struct device *dev = &pdev->dev;
+
+ context = devm_kzalloc(dev, sizeof(*context), GFP_KERNEL);
+ if (!context)
+ return -ENOMEM;
+ context->dev = dev;
+ platform_set_drvdata(pdev, context);
+
+ context->client.dev = context->dev;
+ context->client.rx_callback = NULL;
+ context->client.tx_block = false;
+ context->client.knows_txdone = true;
+ context->client.tx_tout = 0;
+
+ context->chan = mbox_request_channel(&context->client, 0);
+ if (IS_ERR(context->chan))
+ return PTR_ERR(context->chan);
+
+ ret = devm_add_action_or_reset(dev, rpmi_clk_mbox_chan_release, context->chan);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add rpmi mbox channel cleanup\n");
+
+ rpmi_mbox_init_get_attribute(&msg, RPMI_MBOX_ATTR_SPEC_VERSION);
+ ret = rpmi_mbox_send_message(context->chan, &msg);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get spec version\n");
+ if (msg.attr.value < RPMI_MKVER(1, 0)) {
+ return dev_err_probe(dev, -EINVAL,
+ "msg protocol version mismatch, expected 0x%x, found 0x%x\n",
+ RPMI_MKVER(1, 0), msg.attr.value);
+ }
+
+ rpmi_mbox_init_get_attribute(&msg, RPMI_MBOX_ATTR_SERVICEGROUP_ID);
+ ret = rpmi_mbox_send_message(context->chan, &msg);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get service group ID\n");
+ if (msg.attr.value != RPMI_SRVGRP_CLOCK) {
+ return dev_err_probe(dev, -EINVAL,
+ "service group match failed, expected 0x%x, found 0x%x\n",
+ RPMI_SRVGRP_CLOCK, msg.attr.value);
+ }
+
+ rpmi_mbox_init_get_attribute(&msg, RPMI_MBOX_ATTR_SERVICEGROUP_VERSION);
+ ret = rpmi_mbox_send_message(context->chan, &msg);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get service group version\n");
+ if (msg.attr.value < RPMI_MKVER(1, 0)) {
+ return dev_err_probe(dev, -EINVAL,
+ "service group version failed, expected 0x%x, found 0x%x\n",
+ RPMI_MKVER(1, 0), msg.attr.value);
+ }
+
+ rpmi_mbox_init_get_attribute(&msg, RPMI_MBOX_ATTR_MAX_MSG_DATA_SIZE);
+ ret = rpmi_mbox_send_message(context->chan, &msg);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get max message data size\n");
+
+ context->max_msg_data_size = msg.attr.value;
+ num_clocks = rpmi_clk_get_num_clocks(context);
+ if (!num_clocks)
+ return dev_err_probe(dev, -ENODEV, "No clocks found\n");
+
+ clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, num_clocks),
+ GFP_KERNEL);
+ if (!clk_data)
+ return dev_err_probe(dev, -ENOMEM, "No memory for clock data\n");
+ clk_data->num = num_clocks;
+
+ for (i = 0; i < clk_data->num; i++) {
+ hw_ptr = rpmi_clk_enumerate(context, i);
+ if (IS_ERR(hw_ptr)) {
+ return dev_err_probe(dev, PTR_ERR(hw_ptr),
+ "Failed to register clk-%d\n", i);
+ }
+ clk_data->hws[i] = hw_ptr;
+ }
+
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, clk_data);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to register clock HW provider\n");
+
+ return 0;
+}
+
+static const struct of_device_id rpmi_clk_of_match[] = {
+ { .compatible = "riscv,rpmi-clock" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, rpmi_clk_of_match);
+
+static struct platform_driver rpmi_clk_driver = {
+ .driver = {
+ .name = "riscv-rpmi-clock",
+ .of_match_table = rpmi_clk_of_match,
+ },
+ .probe = rpmi_clk_probe,
+};
+module_platform_driver(rpmi_clk_driver);
+
+MODULE_AUTHOR("Rahul Pathak <rpathak@ventanamicro.com>");
+MODULE_DESCRIPTION("Clock Driver based on RPMI message protocol");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c
index 014db6386624..ff7ce12a5da6 100644
--- a/drivers/clk/clk-s2mps11.c
+++ b/drivers/clk/clk-s2mps11.c
@@ -11,6 +11,7 @@
#include <linux/regmap.h>
#include <linux/clk-provider.h>
#include <linux/platform_device.h>
+#include <linux/mfd/samsung/s2mpg10.h>
#include <linux/mfd/samsung/s2mps11.h>
#include <linux/mfd/samsung/s2mps13.h>
#include <linux/mfd/samsung/s2mps14.h>
@@ -137,7 +138,12 @@ static int s2mps11_clk_probe(struct platform_device *pdev)
if (!clk_data)
return -ENOMEM;
+ clk_data->num = S2MPS11_CLKS_NUM;
+
switch (hwid) {
+ case S2MPG10:
+ s2mps11_reg = S2MPG10_PMIC_RTCBUF;
+ break;
case S2MPS11X:
s2mps11_reg = S2MPS11_REG_RTC_CTRL;
break;
@@ -186,7 +192,6 @@ static int s2mps11_clk_probe(struct platform_device *pdev)
clk_data->hws[i] = &s2mps11_clks[i].hw;
}
- clk_data->num = S2MPS11_CLKS_NUM;
of_clk_add_hw_provider(s2mps11_clks->clk_np, of_clk_hw_onecell_get,
clk_data);
@@ -220,6 +225,7 @@ static void s2mps11_clk_remove(struct platform_device *pdev)
}
static const struct platform_device_id s2mps11_clk_id[] = {
+ { "s2mpg10-clk", S2MPG10},
{ "s2mps11-clk", S2MPS11X},
{ "s2mps13-clk", S2MPS13X},
{ "s2mps14-clk", S2MPS14X},
@@ -234,12 +240,15 @@ MODULE_DEVICE_TABLE(platform, s2mps11_clk_id);
* through platform_device_id.
*
* However if device's DT node contains proper clock compatible and driver is
- * built as a module, then the *module* matching will be done trough DT aliases.
+ * built as a module, then the *module* matching will be done through DT aliases.
* This requires of_device_id table. In the same time this will not change the
* actual *device* matching so do not add .of_match_table.
*/
static const struct of_device_id s2mps11_dt_match[] __used = {
{
+ .compatible = "samsung,s2mpg10-clk",
+ .data = (void *)S2MPG10,
+ }, {
.compatible = "samsung,s2mps11-clk",
.data = (void *)S2MPS11X,
}, {
diff --git a/drivers/clk/clk-scmi.c b/drivers/clk/clk-scmi.c
index 15510c2ff21c..6b286ea6f121 100644
--- a/drivers/clk/clk-scmi.c
+++ b/drivers/clk/clk-scmi.c
@@ -54,8 +54,8 @@ static unsigned long scmi_clk_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long scmi_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int scmi_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
u64 fmin, fmax, ftmp;
struct scmi_clk *clk = to_scmi_clk(hw);
@@ -67,20 +67,27 @@ static long scmi_clk_round_rate(struct clk_hw *hw, unsigned long rate,
* running at then.
*/
if (clk->info->rate_discrete)
- return rate;
+ return 0;
fmin = clk->info->range.min_rate;
fmax = clk->info->range.max_rate;
- if (rate <= fmin)
- return fmin;
- else if (rate >= fmax)
- return fmax;
+ if (req->rate <= fmin) {
+ req->rate = fmin;
+
+ return 0;
+ } else if (req->rate >= fmax) {
+ req->rate = fmax;
- ftmp = rate - fmin;
+ return 0;
+ }
+
+ ftmp = req->rate - fmin;
ftmp += clk->info->range.step_size - 1; /* to round up */
do_div(ftmp, clk->info->range.step_size);
- return ftmp * clk->info->range.step_size + fmin;
+ req->rate = ftmp * clk->info->range.step_size + fmin;
+
+ return 0;
}
static int scmi_clk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -119,15 +126,6 @@ static u8 scmi_clk_get_parent(struct clk_hw *hw)
return p_idx;
}
-static int scmi_clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
-{
- /*
- * Suppose all the requested rates are supported, and let firmware
- * to handle the left work.
- */
- return 0;
-}
-
static int scmi_clk_enable(struct clk_hw *hw)
{
struct scmi_clk *clk = to_scmi_clk(hw);
@@ -300,7 +298,6 @@ scmi_clk_ops_alloc(struct device *dev, unsigned long feats_key)
/* Rate ops */
ops->recalc_rate = scmi_clk_recalc_rate;
- ops->round_rate = scmi_clk_round_rate;
ops->determine_rate = scmi_clk_determine_rate;
if (feats_key & BIT(SCMI_CLK_RATE_CTRL_SUPPORTED))
ops->set_rate = scmi_clk_set_rate;
@@ -349,6 +346,8 @@ scmi_clk_ops_select(struct scmi_clk *sclk, bool atomic_capable,
unsigned int atomic_threshold_us,
const struct clk_ops **clk_ops_db, size_t db_size)
{
+ int ret;
+ u32 val;
const struct scmi_clock_info *ci = sclk->info;
unsigned int feats_key = 0;
const struct clk_ops *ops;
@@ -370,8 +369,13 @@ scmi_clk_ops_select(struct scmi_clk *sclk, bool atomic_capable,
if (!ci->parent_ctrl_forbidden)
feats_key |= BIT(SCMI_CLK_PARENT_CTRL_SUPPORTED);
- if (ci->extended_config)
- feats_key |= BIT(SCMI_CLK_DUTY_CYCLE_SUPPORTED);
+ if (ci->extended_config) {
+ ret = scmi_proto_clk_ops->config_oem_get(sclk->ph, sclk->id,
+ SCMI_CLOCK_CFG_DUTY_CYCLE,
+ &val, NULL, false);
+ if (!ret)
+ feats_key |= BIT(SCMI_CLK_DUTY_CYCLE_SUPPORTED);
+ }
if (WARN_ON(feats_key >= db_size))
return NULL;
@@ -404,6 +408,7 @@ static int scmi_clocks_probe(struct scmi_device *sdev)
const struct scmi_handle *handle = sdev->handle;
struct scmi_protocol_handle *ph;
const struct clk_ops *scmi_clk_ops_db[SCMI_MAX_CLK_OPS] = {};
+ struct scmi_clk *sclks;
if (!handle)
return -ENODEV;
@@ -430,18 +435,21 @@ static int scmi_clocks_probe(struct scmi_device *sdev)
transport_is_atomic = handle->is_transport_atomic(handle,
&atomic_threshold_us);
+ sclks = devm_kcalloc(dev, count, sizeof(*sclks), GFP_KERNEL);
+ if (!sclks)
+ return -ENOMEM;
+
+ for (idx = 0; idx < count; idx++)
+ hws[idx] = &sclks[idx].hw;
+
for (idx = 0; idx < count; idx++) {
- struct scmi_clk *sclk;
+ struct scmi_clk *sclk = &sclks[idx];
const struct clk_ops *scmi_ops;
- sclk = devm_kzalloc(dev, sizeof(*sclk), GFP_KERNEL);
- if (!sclk)
- return -ENOMEM;
-
sclk->info = scmi_proto_clk_ops->info_get(ph, idx);
if (!sclk->info) {
dev_dbg(dev, "invalid clock info for idx %d\n", idx);
- devm_kfree(dev, sclk);
+ hws[idx] = NULL;
continue;
}
@@ -451,7 +459,7 @@ static int scmi_clocks_probe(struct scmi_device *sdev)
/*
* Note that the scmi_clk_ops_db is on the stack, not global,
- * because it cannot be shared between mulitple probe-sequences
+ * because it cannot be shared between multiple probe-sequences
* to avoid sharing the devm_ allocated clk_ops between multiple
* SCMI clk driver instances.
*/
@@ -479,13 +487,11 @@ static int scmi_clocks_probe(struct scmi_device *sdev)
if (err) {
dev_err(dev, "failed to register clock %d\n", idx);
devm_kfree(dev, sclk->parent_data);
- devm_kfree(dev, sclk);
hws[idx] = NULL;
} else {
dev_dbg(dev, "Registered clock:%s%s\n",
sclk->info->name,
scmi_ops->enable ? " (atomic ops)" : "");
- hws[idx] = &sclk->hw;
}
}
diff --git a/drivers/clk/clk-scpi.c b/drivers/clk/clk-scpi.c
index 19d530d52e64..0b592de7bdb2 100644
--- a/drivers/clk/clk-scpi.c
+++ b/drivers/clk/clk-scpi.c
@@ -32,8 +32,8 @@ static unsigned long scpi_clk_recalc_rate(struct clk_hw *hw,
return clk->scpi_ops->clk_get_val(clk->id);
}
-static long scpi_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int scpi_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
/*
* We can't figure out what rate it will be, so just return the
@@ -41,7 +41,7 @@ static long scpi_clk_round_rate(struct clk_hw *hw, unsigned long rate,
* after the rate is set and we'll know what rate the clock is
* running at then.
*/
- return rate;
+ return 0;
}
static int scpi_clk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -54,7 +54,7 @@ static int scpi_clk_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops scpi_clk_ops = {
.recalc_rate = scpi_clk_recalc_rate,
- .round_rate = scpi_clk_round_rate,
+ .determine_rate = scpi_clk_determine_rate,
.set_rate = scpi_clk_set_rate,
};
@@ -92,12 +92,14 @@ static unsigned long scpi_dvfs_recalc_rate(struct clk_hw *hw,
return opp->freq;
}
-static long scpi_dvfs_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int scpi_dvfs_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct scpi_clk *clk = to_scpi_clk(hw);
- return __scpi_dvfs_round_rate(clk, rate);
+ req->rate = __scpi_dvfs_round_rate(clk, req->rate);
+
+ return 0;
}
static int __scpi_find_dvfs_index(struct scpi_clk *clk, unsigned long rate)
@@ -124,7 +126,7 @@ static int scpi_dvfs_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops scpi_dvfs_ops = {
.recalc_rate = scpi_dvfs_recalc_rate,
- .round_rate = scpi_dvfs_round_rate,
+ .determine_rate = scpi_dvfs_determine_rate,
.set_rate = scpi_dvfs_set_rate,
};
diff --git a/drivers/clk/clk-si514.c b/drivers/clk/clk-si514.c
index 6ee148e5469d..f61590d70575 100644
--- a/drivers/clk/clk-si514.c
+++ b/drivers/clk/clk-si514.c
@@ -227,20 +227,28 @@ static unsigned long si514_recalc_rate(struct clk_hw *hw,
return si514_calc_rate(&settings);
}
-static long si514_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int si514_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_si514_muldiv settings;
int err;
- if (!rate)
+ if (!req->rate) {
+ req->rate = 0;
+
return 0;
+ }
- err = si514_calc_muldiv(&settings, rate);
- if (err)
- return err;
+ err = si514_calc_muldiv(&settings, req->rate);
+ if (err) {
+ req->rate = err;
- return si514_calc_rate(&settings);
+ return 0;
+ }
+
+ req->rate = si514_calc_rate(&settings);
+
+ return 0;
}
/*
@@ -289,7 +297,7 @@ static const struct clk_ops si514_clk_ops = {
.unprepare = si514_unprepare,
.is_prepared = si514_is_prepared,
.recalc_rate = si514_recalc_rate,
- .round_rate = si514_round_rate,
+ .determine_rate = si514_determine_rate,
.set_rate = si514_set_rate,
};
@@ -371,7 +379,7 @@ static int si514_probe(struct i2c_client *client)
}
static const struct i2c_device_id si514_id[] = {
- { "si514", 0 },
+ { "si514" },
{ }
};
MODULE_DEVICE_TABLE(i2c, si514_id);
diff --git a/drivers/clk/clk-si521xx.c b/drivers/clk/clk-si521xx.c
index 4f7b74f889f1..4ed4e1a5f4f2 100644
--- a/drivers/clk/clk-si521xx.c
+++ b/drivers/clk/clk-si521xx.c
@@ -164,15 +164,17 @@ static unsigned long si521xx_diff_recalc_rate(struct clk_hw *hw,
return (unsigned long)rate;
}
-static long si521xx_diff_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int si521xx_diff_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
unsigned long best_parent;
- best_parent = (rate / SI521XX_DIFF_MULT) * SI521XX_DIFF_DIV;
- *prate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
+ best_parent = (req->rate / SI521XX_DIFF_MULT) * SI521XX_DIFF_DIV;
+ req->best_parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
- return (*prate / SI521XX_DIFF_DIV) * SI521XX_DIFF_MULT;
+ req->rate = (req->best_parent_rate / SI521XX_DIFF_DIV) * SI521XX_DIFF_MULT;
+
+ return 0;
}
static int si521xx_diff_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -208,7 +210,7 @@ static void si521xx_diff_unprepare(struct clk_hw *hw)
}
static const struct clk_ops si521xx_diff_clk_ops = {
- .round_rate = si521xx_diff_round_rate,
+ .determine_rate = si521xx_diff_determine_rate,
.set_rate = si521xx_diff_set_rate,
.recalc_rate = si521xx_diff_recalc_rate,
.prepare = si521xx_diff_prepare,
diff --git a/drivers/clk/clk-si5341.c b/drivers/clk/clk-si5341.c
index 5004888c7eca..2499b771cd83 100644
--- a/drivers/clk/clk-si5341.c
+++ b/drivers/clk/clk-si5341.c
@@ -663,8 +663,8 @@ static unsigned long si5341_synth_clk_recalc_rate(struct clk_hw *hw,
return f;
}
-static long si5341_synth_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int si5341_synth_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_si5341_synth *synth = to_clk_si5341_synth(hw);
u64 f;
@@ -672,15 +672,21 @@ static long si5341_synth_clk_round_rate(struct clk_hw *hw, unsigned long rate,
/* The synthesizer accuracy is such that anything in range will work */
f = synth->data->freq_vco;
do_div(f, SI5341_SYNTH_N_MAX);
- if (rate < f)
- return f;
+ if (req->rate < f) {
+ req->rate = f;
+
+ return 0;
+ }
f = synth->data->freq_vco;
do_div(f, SI5341_SYNTH_N_MIN);
- if (rate > f)
- return f;
+ if (req->rate > f) {
+ req->rate = f;
- return rate;
+ return 0;
+ }
+
+ return 0;
}
static int si5341_synth_program(struct clk_si5341_synth *synth,
@@ -741,7 +747,7 @@ static const struct clk_ops si5341_synth_clk_ops = {
.prepare = si5341_synth_clk_prepare,
.unprepare = si5341_synth_clk_unprepare,
.recalc_rate = si5341_synth_clk_recalc_rate,
- .round_rate = si5341_synth_clk_round_rate,
+ .determine_rate = si5341_synth_clk_determine_rate,
.set_rate = si5341_synth_clk_set_rate,
};
diff --git a/drivers/clk/clk-si5351.c b/drivers/clk/clk-si5351.c
index a4c92c5ef3ff..e755db545e2e 100644
--- a/drivers/clk/clk-si5351.c
+++ b/drivers/clk/clk-si5351.c
@@ -655,7 +655,7 @@ static int si5351_msynth_determine_rate(struct clk_hw *hw,
unsigned long a, b, c;
int divby4;
- /* multisync6-7 can only handle freqencies < 150MHz */
+ /* multisync6-7 can only handle frequencies < 150MHz */
if (hwdata->num >= 6 && rate > SI5351_MULTISYNTH67_MAX_FREQ)
rate = SI5351_MULTISYNTH67_MAX_FREQ;
@@ -1048,11 +1048,11 @@ static int si5351_clkout_determine_rate(struct clk_hw *hw,
unsigned long rate = req->rate;
unsigned char rdiv;
- /* clkout6/7 can only handle output freqencies < 150MHz */
+ /* clkout6/7 can only handle output frequencies < 150MHz */
if (hwdata->num >= 6 && rate > SI5351_CLKOUT67_MAX_FREQ)
rate = SI5351_CLKOUT67_MAX_FREQ;
- /* clkout freqency is 8kHz - 160MHz */
+ /* clkout frequency is 8kHz - 160MHz */
if (rate > SI5351_CLKOUT_MAX_FREQ)
rate = SI5351_CLKOUT_MAX_FREQ;
if (rate < SI5351_CLKOUT_MIN_FREQ)
diff --git a/drivers/clk/clk-si544.c b/drivers/clk/clk-si544.c
index c88650558f32..09c06ecec1a5 100644
--- a/drivers/clk/clk-si544.c
+++ b/drivers/clk/clk-si544.c
@@ -39,7 +39,7 @@
/* Max freq depends on speed grade */
#define SI544_MIN_FREQ 200000U
-/* Si544 Internal oscilator runs at 55.05 MHz */
+/* Si544 Internal oscillator runs at 55.05 MHz */
#define FXO 55050000U
/* VCO range is 10.8 .. 12.1 GHz, max depends on speed grade */
@@ -307,16 +307,16 @@ static unsigned long si544_recalc_rate(struct clk_hw *hw,
return si544_calc_rate(&settings);
}
-static long si544_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int si544_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_si544 *data = to_clk_si544(hw);
- if (!is_valid_frequency(data, rate))
+ if (!is_valid_frequency(data, req->rate))
return -EINVAL;
/* The accuracy is less than 1 Hz, so any rate is possible */
- return rate;
+ return 0;
}
/* Calculates the maximum "small" change, 950 * rate / 1000000 */
@@ -408,7 +408,7 @@ static const struct clk_ops si544_clk_ops = {
.unprepare = si544_unprepare,
.is_prepared = si544_is_prepared,
.recalc_rate = si544_recalc_rate,
- .round_rate = si544_round_rate,
+ .determine_rate = si544_determine_rate,
.set_rate = si544_set_rate,
};
diff --git a/drivers/clk/clk-si570.c b/drivers/clk/clk-si570.c
index a549ea13be20..b0b1830dd430 100644
--- a/drivers/clk/clk-si570.c
+++ b/drivers/clk/clk-si570.c
@@ -63,7 +63,7 @@ struct clk_si570_info {
* struct clk_si570:
* @hw: Clock hw struct
* @regmap: Device's regmap
- * @div_offset: Rgister offset for dividers
+ * @div_offset: Register offset for dividers
* @info: Device info
* @fxtal: Factory xtal frequency
* @n1: Clock divider N1
@@ -181,7 +181,7 @@ static int si570_update_rfreq(struct clk_si570 *data)
}
/**
- * si570_calc_divs() - Caluclate clock dividers
+ * si570_calc_divs() - Calculate clock dividers
* @frequency: Target frequency
* @data: Driver data structure
* @out_rfreq: RFREG fractional multiplier (output)
@@ -246,34 +246,40 @@ static unsigned long si570_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long si570_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int si570_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
int err;
u64 rfreq;
unsigned int n1, hs_div;
struct clk_si570 *data = to_clk_si570(hw);
- if (!rate)
+ if (!req->rate) {
+ req->rate = 0;
+
return 0;
+ }
- if (div64_u64(abs(rate - data->frequency) * 10000LL,
+ if (div64_u64(abs(req->rate - data->frequency) * 10000LL,
data->frequency) < 35) {
- rfreq = div64_u64((data->rfreq * rate) +
- div64_u64(data->frequency, 2), data->frequency);
+ rfreq = div64_u64((data->rfreq * req->rate) +
+ div64_u64(data->frequency, 2),
+ data->frequency);
n1 = data->n1;
hs_div = data->hs_div;
} else {
- err = si570_calc_divs(rate, data, &rfreq, &n1, &hs_div);
+ err = si570_calc_divs(req->rate, data, &rfreq, &n1, &hs_div);
if (err) {
dev_err(&data->i2c_client->dev,
"unable to round rate\n");
+ req->rate = 0;
+
return 0;
}
}
- return rate;
+ return 0;
}
/**
@@ -368,7 +374,7 @@ static int si570_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops si570_clk_ops = {
.recalc_rate = si570_recalc_rate,
- .round_rate = si570_round_rate,
+ .determine_rate = si570_determine_rate,
.set_rate = si570_set_rate,
};
diff --git a/drivers/clk/clk-sp7021.c b/drivers/clk/clk-sp7021.c
index 7cb7d501d7a6..36528a71a2e6 100644
--- a/drivers/clk/clk-sp7021.c
+++ b/drivers/clk/clk-sp7021.c
@@ -7,6 +7,7 @@
#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/bitfield.h>
+#include <linux/hw_bitfield.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/err.h>
@@ -14,7 +15,7 @@
#include <dt-bindings/clock/sunplus,sp7021-clkc.h>
-/* speical div_width values for PLLTV/PLLA */
+/* special div_width values for PLLTV/PLLA */
#define DIV_TV 33
#define DIV_A 34
@@ -38,13 +39,6 @@ enum {
#define MASK_DIVN GENMASK(7, 0)
#define MASK_DIVM GENMASK(14, 8)
-/* HIWORD_MASK FIELD_PREP */
-#define HWM_FIELD_PREP(mask, value) \
-({ \
- u64 _m = mask; \
- (_m << 16) | FIELD_PREP(_m, value); \
-})
-
struct sp_pll {
struct clk_hw hw;
void __iomem *reg;
@@ -313,15 +307,15 @@ static int plltv_set_rate(struct sp_pll *clk)
u32 r0, r1, r2;
r0 = BIT(clk->bp_bit + 16);
- r0 |= HWM_FIELD_PREP(MASK_SEL_FRA, clk->p[SEL_FRA]);
- r0 |= HWM_FIELD_PREP(MASK_SDM_MOD, clk->p[SDM_MOD]);
- r0 |= HWM_FIELD_PREP(MASK_PH_SEL, clk->p[PH_SEL]);
- r0 |= HWM_FIELD_PREP(MASK_NFRA, clk->p[NFRA]);
+ r0 |= FIELD_PREP_WM16(MASK_SEL_FRA, clk->p[SEL_FRA]);
+ r0 |= FIELD_PREP_WM16(MASK_SDM_MOD, clk->p[SDM_MOD]);
+ r0 |= FIELD_PREP_WM16(MASK_PH_SEL, clk->p[PH_SEL]);
+ r0 |= FIELD_PREP_WM16(MASK_NFRA, clk->p[NFRA]);
- r1 = HWM_FIELD_PREP(MASK_DIVR, clk->p[DIVR]);
+ r1 = FIELD_PREP_WM16(MASK_DIVR, clk->p[DIVR]);
- r2 = HWM_FIELD_PREP(MASK_DIVN, clk->p[DIVN] - 1);
- r2 |= HWM_FIELD_PREP(MASK_DIVM, clk->p[DIVM] - 1);
+ r2 = FIELD_PREP_WM16(MASK_DIVN, clk->p[DIVN] - 1);
+ r2 |= FIELD_PREP_WM16(MASK_DIVM, clk->p[DIVM] - 1);
spin_lock_irqsave(&clk->lock, flags);
writel(r0, clk->reg);
@@ -412,25 +406,27 @@ static long sp_pll_calc_div(struct sp_pll *clk, unsigned long rate)
return fbdiv;
}
-static long sp_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int sp_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct sp_pll *clk = to_sp_pll(hw);
long ret;
- if (rate == *prate) {
- ret = *prate; /* bypass */
+ if (req->rate == req->best_parent_rate) {
+ ret = req->best_parent_rate; /* bypass */
} else if (clk->div_width == DIV_A) {
- ret = plla_round_rate(clk, rate);
+ ret = plla_round_rate(clk, req->rate);
} else if (clk->div_width == DIV_TV) {
- ret = plltv_div(clk, rate);
+ ret = plltv_div(clk, req->rate);
if (ret < 0)
- ret = *prate;
+ ret = req->best_parent_rate;
} else {
- ret = sp_pll_calc_div(clk, rate) * clk->brate;
+ ret = sp_pll_calc_div(clk, req->rate) * clk->brate;
}
- return ret;
+ req->rate = ret;
+
+ return 0;
}
static unsigned long sp_pll_recalc_rate(struct clk_hw *hw,
@@ -535,7 +531,7 @@ static const struct clk_ops sp_pll_ops = {
.enable = sp_pll_enable,
.disable = sp_pll_disable,
.is_enabled = sp_pll_is_enabled,
- .round_rate = sp_pll_round_rate,
+ .determine_rate = sp_pll_determine_rate,
.recalc_rate = sp_pll_recalc_rate,
.set_rate = sp_pll_set_rate
};
diff --git a/drivers/clk/clk-sparx5.c b/drivers/clk/clk-sparx5.c
index 0fad0c1a0186..b2facc9c95d4 100644
--- a/drivers/clk/clk-sparx5.c
+++ b/drivers/clk/clk-sparx5.c
@@ -213,19 +213,21 @@ static unsigned long s5_pll_recalc_rate(struct clk_hw *hw,
return conf.freq;
}
-static long s5_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int s5_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct s5_pll_conf conf;
- return s5_calc_params(rate, *parent_rate, &conf);
+ req->rate = s5_calc_params(req->rate, req->best_parent_rate, &conf);
+
+ return 0;
}
static const struct clk_ops s5_pll_ops = {
.enable = s5_pll_enable,
.disable = s5_pll_disable,
.set_rate = s5_pll_set_rate,
- .round_rate = s5_pll_round_rate,
+ .determine_rate = s5_pll_determine_rate,
.recalc_rate = s5_pll_recalc_rate,
};
diff --git a/drivers/clk/clk-stm32f4.c b/drivers/clk/clk-stm32f4.c
index 07c13ebe327d..b5d4d48432a0 100644
--- a/drivers/clk/clk-stm32f4.c
+++ b/drivers/clk/clk-stm32f4.c
@@ -5,6 +5,7 @@
* Inspired by clk-asm9260.c .
*/
+#include <linux/bitfield.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/io.h>
@@ -18,7 +19,7 @@
#include <linux/mfd/syscon.h>
/*
- * Include list of clocks wich are not derived from system clock (SYSCLOCK)
+ * Include list of clocks which are not derived from system clock (SYSCLOCK)
* The index of these clocks is the secondary index of DT bindings
*
*/
@@ -34,11 +35,20 @@
#define STM32F4_RCC_APB2ENR 0x44
#define STM32F4_RCC_BDCR 0x70
#define STM32F4_RCC_CSR 0x74
+#define STM32F4_RCC_SSCGR 0x80
#define STM32F4_RCC_PLLI2SCFGR 0x84
#define STM32F4_RCC_PLLSAICFGR 0x88
#define STM32F4_RCC_DCKCFGR 0x8c
#define STM32F7_RCC_DCKCFGR2 0x90
+#define STM32F4_RCC_PLLCFGR_N_MASK GENMASK(14, 6)
+
+#define STM32F4_RCC_SSCGR_SSCGEN BIT(31)
+#define STM32F4_RCC_SSCGR_SPREADSEL BIT(30)
+#define STM32F4_RCC_SSCGR_RESERVED_MASK GENMASK(29, 28)
+#define STM32F4_RCC_SSCGR_INCSTEP_MASK GENMASK(27, 13)
+#define STM32F4_RCC_SSCGR_MODPER_MASK GENMASK(12, 0)
+
#define NONE -1
#define NO_IDX NONE
#define NO_MUX NONE
@@ -364,6 +374,16 @@ static const struct stm32f4_gate_data stm32f769_gates[] __initconst = {
{ STM32F4_RCC_APB2ENR, 30, "mdio", "apb2_div" },
};
+enum stm32f4_pll_ssc_mod_type {
+ STM32F4_PLL_SSC_CENTER_SPREAD,
+ STM32F4_PLL_SSC_DOWN_SPREAD,
+};
+
+static const char * const stm32f4_ssc_mod_methods[] __initconst = {
+ [STM32F4_PLL_SSC_DOWN_SPREAD] = "down-spread",
+ [STM32F4_PLL_SSC_CENTER_SPREAD] = "center-spread",
+};
+
/*
* This bitmask tells us which bit offsets (0..192) on STM32F4[23]xxx
* have gate bits associated with them. Its combined hweight is 71.
@@ -423,8 +443,8 @@ static unsigned long clk_apb_mul_recalc_rate(struct clk_hw *hw,
return parent_rate;
}
-static long clk_apb_mul_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_apb_mul_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_apb_mul *am = to_clk_apb_mul(hw);
unsigned long mult = 1;
@@ -433,12 +453,14 @@ static long clk_apb_mul_round_rate(struct clk_hw *hw, unsigned long rate,
mult = 2;
if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) {
- unsigned long best_parent = rate / mult;
+ unsigned long best_parent = req->rate / mult;
- *prate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
+ req->best_parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
}
- return *prate * mult;
+ req->rate = req->best_parent_rate * mult;
+
+ return 0;
}
static int clk_apb_mul_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -454,7 +476,7 @@ static int clk_apb_mul_set_rate(struct clk_hw *hw, unsigned long rate,
}
static const struct clk_ops clk_apb_mul_factor_ops = {
- .round_rate = clk_apb_mul_round_rate,
+ .determine_rate = clk_apb_mul_determine_rate,
.set_rate = clk_apb_mul_set_rate,
.recalc_rate = clk_apb_mul_recalc_rate,
};
@@ -509,6 +531,12 @@ static const struct clk_div_table pll_divr_table[] = {
{ 2, 2 }, { 3, 3 }, { 4, 4 }, { 5, 5 }, { 6, 6 }, { 7, 7 }, { 0 }
};
+struct stm32f4_pll_ssc {
+ unsigned int mod_freq;
+ unsigned int mod_depth;
+ enum stm32f4_pll_ssc_mod_type mod_type;
+};
+
struct stm32f4_pll {
spinlock_t *lock;
struct clk_gate gate;
@@ -516,6 +544,8 @@ struct stm32f4_pll {
u8 bit_rdy_idx;
u8 status;
u8 n_start;
+ bool ssc_enable;
+ struct stm32f4_pll_ssc ssc_conf;
};
#define to_stm32f4_pll(_gate) container_of(_gate, struct stm32f4_pll, gate)
@@ -538,6 +568,7 @@ struct stm32f4_vco_data {
u8 offset;
u8 bit_idx;
u8 bit_rdy_idx;
+ bool sscg;
};
static const struct stm32f4_vco_data vco_data[] = {
@@ -632,28 +663,58 @@ static unsigned long stm32f4_pll_recalc(struct clk_hw *hw,
{
struct clk_gate *gate = to_clk_gate(hw);
struct stm32f4_pll *pll = to_stm32f4_pll(gate);
+ unsigned long val;
unsigned long n;
- n = (readl(base + pll->offset) >> 6) & 0x1ff;
+ val = readl(base + pll->offset);
+ n = FIELD_GET(STM32F4_RCC_PLLCFGR_N_MASK, val);
return parent_rate * n;
}
-static long stm32f4_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int stm32f4_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_gate *gate = to_clk_gate(hw);
struct stm32f4_pll *pll = to_stm32f4_pll(gate);
unsigned long n;
- n = rate / *prate;
+ n = req->rate / req->best_parent_rate;
if (n < pll->n_start)
n = pll->n_start;
else if (n > 432)
n = 432;
- return *prate * n;
+ req->rate = req->best_parent_rate * n;
+
+ return 0;
+}
+
+static void stm32f4_pll_set_ssc(struct clk_hw *hw, unsigned long parent_rate,
+ unsigned int ndiv)
+{
+ struct clk_gate *gate = to_clk_gate(hw);
+ struct stm32f4_pll *pll = to_stm32f4_pll(gate);
+ struct stm32f4_pll_ssc *ssc = &pll->ssc_conf;
+ u32 modeper, incstep;
+ u32 sscgr;
+
+ sscgr = readl(base + STM32F4_RCC_SSCGR);
+ /* reserved field must be kept at reset value */
+ sscgr &= STM32F4_RCC_SSCGR_RESERVED_MASK;
+
+ modeper = DIV_ROUND_CLOSEST(parent_rate, 4 * ssc->mod_freq);
+ incstep = DIV_ROUND_CLOSEST(((1 << 15) - 1) * ssc->mod_depth * ndiv,
+ 5 * 10000 * modeper);
+ sscgr |= STM32F4_RCC_SSCGR_SSCGEN |
+ FIELD_PREP(STM32F4_RCC_SSCGR_INCSTEP_MASK, incstep) |
+ FIELD_PREP(STM32F4_RCC_SSCGR_MODPER_MASK, modeper);
+
+ if (ssc->mod_type)
+ sscgr |= STM32F4_RCC_SSCGR_SPREADSEL;
+
+ writel(sscgr, base + STM32F4_RCC_SSCGR);
}
static int stm32f4_pll_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -673,9 +734,13 @@ static int stm32f4_pll_set_rate(struct clk_hw *hw, unsigned long rate,
n = rate / parent_rate;
- val = readl(base + pll->offset) & ~(0x1ff << 6);
+ val = readl(base + pll->offset) & ~STM32F4_RCC_PLLCFGR_N_MASK;
+ val |= FIELD_PREP(STM32F4_RCC_PLLCFGR_N_MASK, n);
- writel(val | ((n & 0x1ff) << 6), base + pll->offset);
+ writel(val, base + pll->offset);
+
+ if (pll->ssc_enable)
+ stm32f4_pll_set_ssc(hw, parent_rate, n);
if (pll_state)
stm32f4_pll_enable(hw);
@@ -688,7 +753,7 @@ static const struct clk_ops stm32f4_pll_gate_ops = {
.disable = stm32f4_pll_disable,
.is_enabled = stm32f4_pll_is_enabled,
.recalc_rate = stm32f4_pll_recalc,
- .round_rate = stm32f4_pll_round_rate,
+ .determine_rate = stm32f4_pll_determine_rate,
.set_rate = stm32f4_pll_set_rate,
};
@@ -782,6 +847,84 @@ static struct clk_hw *clk_register_pll_div(const char *name,
return hw;
}
+static int __init stm32f4_pll_init_ssc(struct clk_hw *hw,
+ const struct stm32f4_pll_ssc *conf)
+{
+ struct clk_gate *gate = to_clk_gate(hw);
+ struct stm32f4_pll *pll = to_stm32f4_pll(gate);
+ struct clk_hw *parent;
+ unsigned long parent_rate;
+ int pll_state;
+ unsigned long n, val;
+
+ parent = clk_hw_get_parent(hw);
+ if (!parent) {
+ pr_err("%s: failed to get clock parent\n", __func__);
+ return -ENODEV;
+ }
+
+ parent_rate = clk_hw_get_rate(parent);
+
+ pll->ssc_enable = true;
+ memcpy(&pll->ssc_conf, conf, sizeof(pll->ssc_conf));
+
+ pll_state = stm32f4_pll_is_enabled(hw);
+
+ if (pll_state)
+ stm32f4_pll_disable(hw);
+
+ val = readl(base + pll->offset);
+ n = FIELD_GET(STM32F4_RCC_PLLCFGR_N_MASK, val);
+
+ pr_debug("%s: pll: %s, parent: %s, parent-rate: %lu, n: %lu\n",
+ __func__, clk_hw_get_name(hw), clk_hw_get_name(parent),
+ parent_rate, n);
+
+ stm32f4_pll_set_ssc(hw, parent_rate, n);
+
+ if (pll_state)
+ stm32f4_pll_enable(hw);
+
+ return 0;
+}
+
+static int __init stm32f4_pll_ssc_parse_dt(struct device_node *np,
+ struct stm32f4_pll_ssc *conf)
+{
+ int ret;
+
+ if (!conf)
+ return -EINVAL;
+
+ ret = of_property_read_u32(np, "st,ssc-modfreq-hz", &conf->mod_freq);
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32(np, "st,ssc-moddepth-permyriad",
+ &conf->mod_depth);
+ if (ret) {
+ pr_err("%pOF: missing st,ssc-moddepth-permyriad\n", np);
+ return ret;
+ }
+
+ ret = fwnode_property_match_property_string(of_fwnode_handle(np),
+ "st,ssc-modmethod",
+ stm32f4_ssc_mod_methods,
+ ARRAY_SIZE(stm32f4_ssc_mod_methods));
+ if (ret < 0) {
+ pr_err("%pOF: failed to get st,ssc-modmethod\n", np);
+ return ret;
+ }
+
+ conf->mod_type = ret;
+
+ pr_debug("%pOF: SSCG settings: mod_freq: %d, mod_depth: %d mod_method: %s [%d]\n",
+ np, conf->mod_freq, conf->mod_depth,
+ stm32f4_ssc_mod_methods[ret], conf->mod_type);
+
+ return 0;
+}
+
static struct clk_hw *stm32f4_rcc_register_pll(const char *pllsrc,
const struct stm32f4_pll_data *data, spinlock_t *lock)
{
@@ -1689,7 +1832,8 @@ static void __init stm32f4_rcc_init(struct device_node *np)
const struct of_device_id *match;
const struct stm32f4_clk_data *data;
unsigned long pllm;
- struct clk_hw *pll_src_hw;
+ struct clk_hw *pll_src_hw, *pll_vco_hw;
+ struct stm32f4_pll_ssc ssc_conf;
base = of_iomap(np, 0);
if (!base) {
@@ -1748,8 +1892,8 @@ static void __init stm32f4_rcc_init(struct device_node *np)
clk_hw_register_fixed_factor(NULL, "vco_in", pll_src,
0, 1, pllm);
- stm32f4_rcc_register_pll("vco_in", &data->pll_data[0],
- &stm32f4_clk_lock);
+ pll_vco_hw = stm32f4_rcc_register_pll("vco_in", &data->pll_data[0],
+ &stm32f4_clk_lock);
clks[PLL_VCO_I2S] = stm32f4_rcc_register_pll("vco_in",
&data->pll_data[1], &stm32f4_clk_lock);
@@ -1894,6 +2038,9 @@ static void __init stm32f4_rcc_init(struct device_node *np)
of_clk_add_hw_provider(np, stm32f4_rcc_lookup_clk, NULL);
+ if (!stm32f4_pll_ssc_parse_dt(np, &ssc_conf))
+ stm32f4_pll_init_ssc(pll_vco_hw, &ssc_conf);
+
return;
fail:
kfree(clks);
diff --git a/drivers/clk/clk-tps68470.c b/drivers/clk/clk-tps68470.c
index 38f44b5b9b1b..9511248c6bc9 100644
--- a/drivers/clk/clk-tps68470.c
+++ b/drivers/clk/clk-tps68470.c
@@ -146,12 +146,14 @@ static unsigned int tps68470_clk_cfg_lookup(unsigned long rate)
return best_idx;
}
-static long tps68470_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int tps68470_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- unsigned int idx = tps68470_clk_cfg_lookup(rate);
+ unsigned int idx = tps68470_clk_cfg_lookup(req->rate);
+
+ req->rate = clk_freqs[idx].freq;
- return clk_freqs[idx].freq;
+ return 0;
}
static int tps68470_clk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -186,7 +188,7 @@ static const struct clk_ops tps68470_clk_ops = {
.prepare = tps68470_clk_prepare,
.unprepare = tps68470_clk_unprepare,
.recalc_rate = tps68470_clk_recalc_rate,
- .round_rate = tps68470_clk_round_rate,
+ .determine_rate = tps68470_clk_determine_rate,
.set_rate = tps68470_clk_set_rate,
};
diff --git a/drivers/clk/clk-twl.c b/drivers/clk/clk-twl.c
index eab9d3c8ed8a..20bc3bf8fd62 100644
--- a/drivers/clk/clk-twl.c
+++ b/drivers/clk/clk-twl.c
@@ -11,13 +11,29 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
-#define VREG_STATE 2
+#define VREG_STATE 2
+#define VREG_GRP 0
#define TWL6030_CFG_STATE_OFF 0x00
#define TWL6030_CFG_STATE_ON 0x01
#define TWL6030_CFG_STATE_MASK 0x03
+#define TWL6030_CFG_STATE_GRP_SHIFT 5
+#define TWL6030_CFG_STATE_APP_SHIFT 2
+#define TWL6030_CFG_STATE_APP_MASK (0x03 << TWL6030_CFG_STATE_APP_SHIFT)
+#define TWL6030_CFG_STATE_APP(v) (((v) & TWL6030_CFG_STATE_APP_MASK) >>\
+ TWL6030_CFG_STATE_APP_SHIFT)
+#define P1_GRP BIT(0) /* processor power group */
+#define P2_GRP BIT(1)
+#define P3_GRP BIT(2)
+#define ALL_GRP (P1_GRP | P2_GRP | P3_GRP)
+
+enum twl_type {
+ TWL_TYPE_6030,
+ TWL_TYPE_6032,
+};
struct twl_clock_info {
struct device *dev;
+ enum twl_type type;
u8 base;
struct clk_hw hw;
};
@@ -56,14 +72,21 @@ static unsigned long twl_clks_recalc_rate(struct clk_hw *hw,
static int twl6032_clks_prepare(struct clk_hw *hw)
{
struct twl_clock_info *cinfo = to_twl_clks_info(hw);
- int ret;
- ret = twlclk_write(cinfo, TWL_MODULE_PM_RECEIVER, VREG_STATE,
- TWL6030_CFG_STATE_ON);
- if (ret < 0)
- dev_err(cinfo->dev, "clk prepare failed\n");
+ if (cinfo->type == TWL_TYPE_6030) {
+ int grp;
+
+ grp = twlclk_read(cinfo, TWL_MODULE_PM_RECEIVER, VREG_GRP);
+ if (grp < 0)
+ return grp;
- return ret;
+ return twlclk_write(cinfo, TWL_MODULE_PM_RECEIVER, VREG_STATE,
+ grp << TWL6030_CFG_STATE_GRP_SHIFT |
+ TWL6030_CFG_STATE_ON);
+ }
+
+ return twlclk_write(cinfo, TWL_MODULE_PM_RECEIVER, VREG_STATE,
+ TWL6030_CFG_STATE_ON);
}
static void twl6032_clks_unprepare(struct clk_hw *hw)
@@ -71,32 +94,21 @@ static void twl6032_clks_unprepare(struct clk_hw *hw)
struct twl_clock_info *cinfo = to_twl_clks_info(hw);
int ret;
- ret = twlclk_write(cinfo, TWL_MODULE_PM_RECEIVER, VREG_STATE,
- TWL6030_CFG_STATE_OFF);
+ if (cinfo->type == TWL_TYPE_6030)
+ ret = twlclk_write(cinfo, TWL_MODULE_PM_RECEIVER, VREG_STATE,
+ ALL_GRP << TWL6030_CFG_STATE_GRP_SHIFT |
+ TWL6030_CFG_STATE_OFF);
+ else
+ ret = twlclk_write(cinfo, TWL_MODULE_PM_RECEIVER, VREG_STATE,
+ TWL6030_CFG_STATE_OFF);
+
if (ret < 0)
dev_err(cinfo->dev, "clk unprepare failed\n");
}
-static int twl6032_clks_is_prepared(struct clk_hw *hw)
-{
- struct twl_clock_info *cinfo = to_twl_clks_info(hw);
- int val;
-
- val = twlclk_read(cinfo, TWL_MODULE_PM_RECEIVER, VREG_STATE);
- if (val < 0) {
- dev_err(cinfo->dev, "clk read failed\n");
- return val;
- }
-
- val &= TWL6030_CFG_STATE_MASK;
-
- return val == TWL6030_CFG_STATE_ON;
-}
-
static const struct clk_ops twl6032_clks_ops = {
.prepare = twl6032_clks_prepare,
.unprepare = twl6032_clks_unprepare,
- .is_prepared = twl6032_clks_is_prepared,
.recalc_rate = twl_clks_recalc_rate,
};
@@ -155,6 +167,7 @@ static int twl_clks_probe(struct platform_device *pdev)
for (i = 0; i < count; i++) {
cinfo[i].base = hw_data[i].base;
cinfo[i].dev = &pdev->dev;
+ cinfo[i].type = platform_get_device_id(pdev)->driver_data;
cinfo[i].hw.init = &hw_data[i].init;
ret = devm_clk_hw_register(&pdev->dev, &cinfo[i].hw);
if (ret) {
@@ -176,7 +189,11 @@ static int twl_clks_probe(struct platform_device *pdev)
static const struct platform_device_id twl_clks_id[] = {
{
+ .name = "twl6030-clk",
+ .driver_data = TWL_TYPE_6030,
+ }, {
.name = "twl6032-clk",
+ .driver_data = TWL_TYPE_6032,
}, {
/* sentinel */
}
diff --git a/drivers/clk/clk-versaclock3.c b/drivers/clk/clk-versaclock3.c
index 76d7ea1964c3..1849863dbd67 100644
--- a/drivers/clk/clk-versaclock3.c
+++ b/drivers/clk/clk-versaclock3.c
@@ -78,9 +78,6 @@
#define VC3_PLL1_VCO_MIN 300000000UL
#define VC3_PLL1_VCO_MAX 600000000UL
-#define VC3_PLL2_VCO_MIN 400000000UL
-#define VC3_PLL2_VCO_MAX 1200000000UL
-
#define VC3_PLL3_VCO_MIN 300000000UL
#define VC3_PLL3_VCO_MAX 800000000UL
@@ -147,9 +144,13 @@ struct vc3_pfd_data {
u8 mdiv2_bitmsk;
};
+struct vc3_vco {
+ unsigned long min;
+ unsigned long max;
+};
+
struct vc3_pll_data {
- unsigned long vco_min;
- unsigned long vco_max;
+ struct vc3_vco vco;
u8 num;
u8 int_div_msb_offs;
u8 int_div_lsb_offs;
@@ -166,12 +167,17 @@ struct vc3_div_data {
struct vc3_hw_data {
struct clk_hw hw;
struct regmap *regmap;
- const void *data;
+ void *data;
u32 div_int;
u32 div_frc;
};
+struct vc3_hw_cfg {
+ struct vc3_vco pll2_vco;
+ u32 se2_clk_sel_msk;
+};
+
static const struct clk_div_table div1_divs[] = {
{ .val = 0, .div = 1, }, { .val = 1, .div = 4, },
{ .val = 2, .div = 5, }, { .val = 3, .div = 6, },
@@ -283,22 +289,25 @@ static unsigned long vc3_pfd_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long vc3_pfd_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int vc3_pfd_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct vc3_hw_data *vc3 = container_of(hw, struct vc3_hw_data, hw);
const struct vc3_pfd_data *pfd = vc3->data;
unsigned long idiv;
/* PLL cannot operate with input clock above 50 MHz. */
- if (rate > 50000000)
+ if (req->rate > 50000000)
return -EINVAL;
/* CLKIN within range of PLL input, feed directly to PLL. */
- if (*parent_rate <= 50000000)
- return *parent_rate;
+ if (req->best_parent_rate <= 50000000) {
+ req->rate = req->best_parent_rate;
+
+ return 0;
+ }
- idiv = DIV_ROUND_UP(*parent_rate, rate);
+ idiv = DIV_ROUND_UP(req->best_parent_rate, req->rate);
if (pfd->num == VC3_PFD1 || pfd->num == VC3_PFD3) {
if (idiv > 63)
return -EINVAL;
@@ -307,7 +316,9 @@ static long vc3_pfd_round_rate(struct clk_hw *hw, unsigned long rate,
return -EINVAL;
}
- return *parent_rate / idiv;
+ req->rate = req->best_parent_rate / idiv;
+
+ return 0;
}
static int vc3_pfd_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -348,7 +359,7 @@ static int vc3_pfd_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops vc3_pfd_ops = {
.recalc_rate = vc3_pfd_recalc_rate,
- .round_rate = vc3_pfd_round_rate,
+ .determine_rate = vc3_pfd_determine_rate,
.set_rate = vc3_pfd_set_rate,
};
@@ -379,36 +390,38 @@ static unsigned long vc3_pll_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long vc3_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int vc3_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct vc3_hw_data *vc3 = container_of(hw, struct vc3_hw_data, hw);
const struct vc3_pll_data *pll = vc3->data;
u64 div_frc;
- if (rate < pll->vco_min)
- rate = pll->vco_min;
- if (rate > pll->vco_max)
- rate = pll->vco_max;
+ if (req->rate < pll->vco.min)
+ req->rate = pll->vco.min;
+ if (req->rate > pll->vco.max)
+ req->rate = pll->vco.max;
- vc3->div_int = rate / *parent_rate;
+ vc3->div_int = req->rate / req->best_parent_rate;
if (pll->num == VC3_PLL2) {
if (vc3->div_int > 0x7ff)
- rate = *parent_rate * 0x7ff;
+ req->rate = req->best_parent_rate * 0x7ff;
/* Determine best fractional part, which is 16 bit wide */
- div_frc = rate % *parent_rate;
+ div_frc = req->rate % req->best_parent_rate;
div_frc *= BIT(16) - 1;
- vc3->div_frc = min_t(u64, div64_ul(div_frc, *parent_rate), U16_MAX);
- rate = (*parent_rate *
- (vc3->div_int * VC3_2_POW_16 + vc3->div_frc) / VC3_2_POW_16);
+ vc3->div_frc = min_t(u64,
+ div64_ul(div_frc, req->best_parent_rate),
+ U16_MAX);
+ req->rate = (req->best_parent_rate *
+ (vc3->div_int * VC3_2_POW_16 + vc3->div_frc) / VC3_2_POW_16);
} else {
- rate = *parent_rate * vc3->div_int;
+ req->rate = req->best_parent_rate * vc3->div_int;
}
- return rate;
+ return 0;
}
static int vc3_pll_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -435,7 +448,7 @@ static int vc3_pll_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops vc3_pll_ops = {
.recalc_rate = vc3_pll_recalc_rate,
- .round_rate = vc3_pll_round_rate,
+ .determine_rate = vc3_pll_determine_rate,
.set_rate = vc3_pll_set_rate,
};
@@ -492,8 +505,8 @@ static unsigned long vc3_div_recalc_rate(struct clk_hw *hw,
div_data->flags, div_data->width);
}
-static long vc3_div_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int vc3_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct vc3_hw_data *vc3 = container_of(hw, struct vc3_hw_data, hw);
const struct vc3_div_data *div_data = vc3->data;
@@ -505,11 +518,16 @@ static long vc3_div_round_rate(struct clk_hw *hw, unsigned long rate,
bestdiv >>= div_data->shift;
bestdiv &= VC3_DIV_MASK(div_data->width);
bestdiv = vc3_get_div(div_data->table, bestdiv, div_data->flags);
- return DIV_ROUND_UP(*parent_rate, bestdiv);
+ req->rate = DIV_ROUND_UP(req->best_parent_rate, bestdiv);
+
+ return 0;
}
- return divider_round_rate(hw, rate, parent_rate, div_data->table,
- div_data->width, div_data->flags);
+ req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate,
+ div_data->table,
+ div_data->width, div_data->flags);
+
+ return 0;
}
static int vc3_div_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -528,7 +546,7 @@ static int vc3_div_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops vc3_div_ops = {
.recalc_rate = vc3_div_recalc_rate,
- .round_rate = vc3_div_round_rate,
+ .determine_rate = vc3_div_determine_rate,
.set_rate = vc3_div_set_rate,
};
@@ -680,8 +698,10 @@ static struct vc3_hw_data clk_pll[] = {
.num = VC3_PLL1,
.int_div_msb_offs = VC3_PLL1_LOOP_FILTER_N_DIV_MSB,
.int_div_lsb_offs = VC3_PLL1_VCO_N_DIVIDER,
- .vco_min = VC3_PLL1_VCO_MIN,
- .vco_max = VC3_PLL1_VCO_MAX
+ .vco = {
+ .min = VC3_PLL1_VCO_MIN,
+ .max = VC3_PLL1_VCO_MAX
+ }
},
.hw.init = &(struct clk_init_data) {
.name = "pll1",
@@ -698,8 +718,6 @@ static struct vc3_hw_data clk_pll[] = {
.num = VC3_PLL2,
.int_div_msb_offs = VC3_PLL2_FB_INT_DIV_MSB,
.int_div_lsb_offs = VC3_PLL2_FB_INT_DIV_LSB,
- .vco_min = VC3_PLL2_VCO_MIN,
- .vco_max = VC3_PLL2_VCO_MAX
},
.hw.init = &(struct clk_init_data) {
.name = "pll2",
@@ -716,8 +734,10 @@ static struct vc3_hw_data clk_pll[] = {
.num = VC3_PLL3,
.int_div_msb_offs = VC3_PLL3_LOOP_FILTER_N_DIV_MSB,
.int_div_lsb_offs = VC3_PLL3_N_DIVIDER,
- .vco_min = VC3_PLL3_VCO_MIN,
- .vco_max = VC3_PLL3_VCO_MAX
+ .vco = {
+ .min = VC3_PLL3_VCO_MIN,
+ .max = VC3_PLL3_VCO_MAX
+ }
},
.hw.init = &(struct clk_init_data) {
.name = "pll3",
@@ -901,7 +921,6 @@ static struct vc3_hw_data clk_mux[] = {
[VC3_SE2_MUX] = {
.data = &(struct vc3_clk_data) {
.offs = VC3_SE2_CTRL_REG0,
- .bitmsk = VC3_SE2_CTRL_REG0_SE2_CLK_SEL
},
.hw.init = &(struct clk_init_data) {
.name = "se2_mux",
@@ -982,6 +1001,7 @@ static int vc3_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
u8 settings[NUM_CONFIG_REGISTERS];
+ const struct vc3_hw_cfg *data;
struct regmap *regmap;
const char *name;
int ret, i;
@@ -1029,9 +1049,16 @@ static int vc3_probe(struct i2c_client *client)
clk_pfd[i].hw.init->name);
}
+ data = i2c_get_match_data(client);
+
/* Register pll's */
for (i = 0; i < ARRAY_SIZE(clk_pll); i++) {
clk_pll[i].regmap = regmap;
+ if (i == VC3_PLL2) {
+ struct vc3_pll_data *pll_data = clk_pll[i].data;
+
+ pll_data->vco = data->pll2_vco;
+ }
ret = devm_clk_hw_register(dev, &clk_pll[i].hw);
if (ret)
return dev_err_probe(dev, ret, "%s failed\n",
@@ -1059,6 +1086,11 @@ static int vc3_probe(struct i2c_client *client)
/* Register clk muxes */
for (i = 0; i < ARRAY_SIZE(clk_mux); i++) {
clk_mux[i].regmap = regmap;
+ if (i == VC3_SE2_MUX) {
+ struct vc3_clk_data *clk_data = clk_mux[i].data;
+
+ clk_data->bitmsk = data->se2_clk_sel_msk;
+ }
ret = devm_clk_hw_register(dev, &clk_mux[i].hw);
if (ret)
return dev_err_probe(dev, ret, "%s failed\n",
@@ -1108,8 +1140,19 @@ static int vc3_probe(struct i2c_client *client)
return ret;
}
+static const struct vc3_hw_cfg vc3_5p = {
+ .pll2_vco = { .min = 400000000UL, .max = 1200000000UL },
+ .se2_clk_sel_msk = BIT(6),
+};
+
+static const struct vc3_hw_cfg vc3_5l = {
+ .pll2_vco = { .min = 30000000UL, .max = 130000000UL },
+ .se2_clk_sel_msk = BIT(0),
+};
+
static const struct of_device_id dev_ids[] = {
- { .compatible = "renesas,5p35023" },
+ { .compatible = "renesas,5p35023", .data = &vc3_5p },
+ { .compatible = "renesas,5l35023", .data = &vc3_5l },
{ /* Sentinel */ }
};
MODULE_DEVICE_TABLE(of, dev_ids);
diff --git a/drivers/clk/clk-versaclock5.c b/drivers/clk/clk-versaclock5.c
index 6d31cd54d7cf..57228e88e81d 100644
--- a/drivers/clk/clk-versaclock5.c
+++ b/drivers/clk/clk-versaclock5.c
@@ -136,7 +136,7 @@
#define VC5_MAX_FOD_NUM 4
/* flags to describe chip features */
-/* chip has built-in oscilator */
+/* chip has built-in oscillator */
#define VC5_HAS_INTERNAL_XTAL BIT(0)
/* chip has PFD requency doubler */
#define VC5_HAS_PFD_FREQ_DBL BIT(1)
@@ -304,11 +304,11 @@ static unsigned long vc5_dbl_recalc_rate(struct clk_hw *hw,
return parent_rate;
}
-static long vc5_dbl_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int vc5_dbl_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- if ((*parent_rate == rate) || ((*parent_rate * 2) == rate))
- return rate;
+ if ((req->best_parent_rate == req->rate) || ((req->best_parent_rate * 2) == req->rate))
+ return 0;
else
return -EINVAL;
}
@@ -332,7 +332,7 @@ static int vc5_dbl_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops vc5_dbl_ops = {
.recalc_rate = vc5_dbl_recalc_rate,
- .round_rate = vc5_dbl_round_rate,
+ .determine_rate = vc5_dbl_determine_rate,
.set_rate = vc5_dbl_set_rate,
};
@@ -363,24 +363,29 @@ static unsigned long vc5_pfd_recalc_rate(struct clk_hw *hw,
return parent_rate / VC5_REF_DIVIDER_REF_DIV(div);
}
-static long vc5_pfd_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int vc5_pfd_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
unsigned long idiv;
/* PLL cannot operate with input clock above 50 MHz. */
- if (rate > 50000000)
+ if (req->rate > 50000000)
return -EINVAL;
/* CLKIN within range of PLL input, feed directly to PLL. */
- if (*parent_rate <= 50000000)
- return *parent_rate;
+ if (req->best_parent_rate <= 50000000) {
+ req->rate = req->best_parent_rate;
+
+ return 0;
+ }
- idiv = DIV_ROUND_UP(*parent_rate, rate);
+ idiv = DIV_ROUND_UP(req->best_parent_rate, req->rate);
if (idiv > 127)
return -EINVAL;
- return *parent_rate / idiv;
+ req->rate = req->best_parent_rate / idiv;
+
+ return 0;
}
static int vc5_pfd_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -420,7 +425,7 @@ static int vc5_pfd_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops vc5_pfd_ops = {
.recalc_rate = vc5_pfd_recalc_rate,
- .round_rate = vc5_pfd_round_rate,
+ .determine_rate = vc5_pfd_determine_rate,
.set_rate = vc5_pfd_set_rate,
};
@@ -444,30 +449,32 @@ static unsigned long vc5_pll_recalc_rate(struct clk_hw *hw,
return (parent_rate * div_int) + ((parent_rate * div_frc) >> 24);
}
-static long vc5_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int vc5_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct vc5_hw_data *hwdata = container_of(hw, struct vc5_hw_data, hw);
struct vc5_driver_data *vc5 = hwdata->vc5;
u32 div_int;
u64 div_frc;
- rate = clamp(rate, VC5_PLL_VCO_MIN, vc5->chip_info->vco_max);
+ req->rate = clamp(req->rate, VC5_PLL_VCO_MIN, vc5->chip_info->vco_max);
/* Determine integer part, which is 12 bit wide */
- div_int = rate / *parent_rate;
+ div_int = req->rate / req->best_parent_rate;
if (div_int > 0xfff)
- rate = *parent_rate * 0xfff;
+ req->rate = req->best_parent_rate * 0xfff;
/* Determine best fractional part, which is 24 bit wide */
- div_frc = rate % *parent_rate;
+ div_frc = req->rate % req->best_parent_rate;
div_frc *= BIT(24) - 1;
- do_div(div_frc, *parent_rate);
+ do_div(div_frc, req->best_parent_rate);
hwdata->div_int = div_int;
hwdata->div_frc = (u32)div_frc;
- return (*parent_rate * div_int) + ((*parent_rate * div_frc) >> 24);
+ req->rate = (req->best_parent_rate * div_int) + ((req->best_parent_rate * div_frc) >> 24);
+
+ return 0;
}
static int vc5_pll_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -488,7 +495,7 @@ static int vc5_pll_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops vc5_pll_ops = {
.recalc_rate = vc5_pll_recalc_rate,
- .round_rate = vc5_pll_round_rate,
+ .determine_rate = vc5_pll_determine_rate,
.set_rate = vc5_pll_set_rate,
};
@@ -520,17 +527,17 @@ static unsigned long vc5_fod_recalc_rate(struct clk_hw *hw,
return div64_u64((u64)f_in << 24ULL, ((u64)div_int << 24ULL) + div_frc);
}
-static long vc5_fod_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int vc5_fod_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct vc5_hw_data *hwdata = container_of(hw, struct vc5_hw_data, hw);
/* VCO frequency is divided by two before entering FOD */
- u32 f_in = *parent_rate / 2;
+ u32 f_in = req->best_parent_rate / 2;
u32 div_int;
u64 div_frc;
/* Determine integer part, which is 12 bit wide */
- div_int = f_in / rate;
+ div_int = f_in / req->rate;
/*
* WARNING: The clock chip does not output signal if the integer part
* of the divider is 0xfff and fractional part is non-zero.
@@ -538,18 +545,20 @@ static long vc5_fod_round_rate(struct clk_hw *hw, unsigned long rate,
*/
if (div_int > 0xffe) {
div_int = 0xffe;
- rate = f_in / div_int;
+ req->rate = f_in / div_int;
}
/* Determine best fractional part, which is 30 bit wide */
- div_frc = f_in % rate;
+ div_frc = f_in % req->rate;
div_frc <<= 24;
- do_div(div_frc, rate);
+ do_div(div_frc, req->rate);
hwdata->div_int = div_int;
hwdata->div_frc = (u32)div_frc;
- return div64_u64((u64)f_in << 24ULL, ((u64)div_int << 24ULL) + div_frc);
+ req->rate = div64_u64((u64)f_in << 24ULL, ((u64)div_int << 24ULL) + div_frc);
+
+ return 0;
}
static int vc5_fod_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -589,7 +598,7 @@ static int vc5_fod_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops vc5_fod_ops = {
.recalc_rate = vc5_fod_recalc_rate,
- .round_rate = vc5_fod_round_rate,
+ .determine_rate = vc5_fod_determine_rate,
.set_rate = vc5_fod_set_rate,
};
diff --git a/drivers/clk/clk-versaclock7.c b/drivers/clk/clk-versaclock7.c
index f323263e32c3..adcc603e3259 100644
--- a/drivers/clk/clk-versaclock7.c
+++ b/drivers/clk/clk-versaclock7.c
@@ -900,17 +900,18 @@ static unsigned long vc7_fod_recalc_rate(struct clk_hw *hw, unsigned long parent
return fod_rate;
}
-static long vc7_fod_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *parent_rate)
+static int vc7_fod_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct vc7_fod_data *fod = container_of(hw, struct vc7_fod_data, hw);
unsigned long fod_rate;
pr_debug("%s - %s: requested rate: %lu, parent_rate: %lu\n",
- __func__, clk_hw_get_name(hw), rate, *parent_rate);
+ __func__, clk_hw_get_name(hw), req->rate, req->best_parent_rate);
- vc7_calc_fod_divider(rate, *parent_rate,
+ vc7_calc_fod_divider(req->rate, req->best_parent_rate,
&fod->fod_1st_int, &fod->fod_2nd_int, &fod->fod_frac);
- fod_rate = vc7_calc_fod_2nd_stage_rate(*parent_rate, fod->fod_1st_int,
+ fod_rate = vc7_calc_fod_2nd_stage_rate(req->best_parent_rate, fod->fod_1st_int,
fod->fod_2nd_int, fod->fod_frac);
pr_debug("%s - %s: fod_1st_int: %u, fod_2nd_int: %u, fod_frac: %llu\n",
@@ -918,7 +919,9 @@ static long vc7_fod_round_rate(struct clk_hw *hw, unsigned long rate, unsigned l
fod->fod_1st_int, fod->fod_2nd_int, fod->fod_frac);
pr_debug("%s - %s rate: %lu\n", __func__, clk_hw_get_name(hw), fod_rate);
- return fod_rate;
+ req->rate = fod_rate;
+
+ return 0;
}
static int vc7_fod_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate)
@@ -952,7 +955,7 @@ static int vc7_fod_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long
static const struct clk_ops vc7_fod_ops = {
.recalc_rate = vc7_fod_recalc_rate,
- .round_rate = vc7_fod_round_rate,
+ .determine_rate = vc7_fod_determine_rate,
.set_rate = vc7_fod_set_rate,
};
@@ -978,21 +981,24 @@ static unsigned long vc7_iod_recalc_rate(struct clk_hw *hw, unsigned long parent
return iod_rate;
}
-static long vc7_iod_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *parent_rate)
+static int vc7_iod_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct vc7_iod_data *iod = container_of(hw, struct vc7_iod_data, hw);
unsigned long iod_rate;
pr_debug("%s - %s: requested rate: %lu, parent_rate: %lu\n",
- __func__, clk_hw_get_name(hw), rate, *parent_rate);
+ __func__, clk_hw_get_name(hw), req->rate, req->best_parent_rate);
- vc7_calc_iod_divider(rate, *parent_rate, &iod->iod_int);
- iod_rate = div64_u64(*parent_rate, iod->iod_int);
+ vc7_calc_iod_divider(req->rate, req->best_parent_rate, &iod->iod_int);
+ iod_rate = div64_u64(req->best_parent_rate, iod->iod_int);
pr_debug("%s - %s: iod_int: %u\n", __func__, clk_hw_get_name(hw), iod->iod_int);
pr_debug("%s - %s rate: %ld\n", __func__, clk_hw_get_name(hw), iod_rate);
- return iod_rate;
+ req->rate = iod_rate;
+
+ return 0;
}
static int vc7_iod_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate)
@@ -1023,7 +1029,7 @@ static int vc7_iod_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long
static const struct clk_ops vc7_iod_ops = {
.recalc_rate = vc7_iod_recalc_rate,
- .round_rate = vc7_iod_round_rate,
+ .determine_rate = vc7_iod_determine_rate,
.set_rate = vc7_iod_set_rate,
};
@@ -1257,7 +1263,7 @@ static const struct vc7_chip_info vc7_rc21008a_info = {
.num_outputs = 8,
};
-static struct regmap_range_cfg vc7_range_cfg[] = {
+static const struct regmap_range_cfg vc7_range_cfg[] = {
{
.range_min = 0,
.range_max = VC7_MAX_REG,
diff --git a/drivers/clk/clk-vt8500.c b/drivers/clk/clk-vt8500.c
index 2a74a713ad59..eae5b3fbfb82 100644
--- a/drivers/clk/clk-vt8500.c
+++ b/drivers/clk/clk-vt8500.c
@@ -128,30 +128,31 @@ static unsigned long vt8500_dclk_recalc_rate(struct clk_hw *hw,
return parent_rate / div;
}
-static long vt8500_dclk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int vt8500_dclk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_device *cdev = to_clk_device(hw);
u32 divisor;
- if (rate == 0)
+ if (req->rate == 0)
return 0;
- divisor = *prate / rate;
+ divisor = req->best_parent_rate / req->rate;
/* If prate / rate would be decimal, incr the divisor */
- if (rate * divisor < *prate)
+ if (req->rate * divisor < req->best_parent_rate)
divisor++;
/*
* If this is a request for SDMMC we have to adjust the divisor
* when >31 to use the fixed predivisor
*/
- if ((cdev->div_mask == 0x3F) && (divisor > 31)) {
+ if ((cdev->div_mask == 0x3F) && (divisor > 31))
divisor = 64 * ((divisor / 64) + 1);
- }
- return *prate / divisor;
+ req->rate = req->best_parent_rate / divisor;
+
+ return 0;
}
static int vt8500_dclk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -202,7 +203,7 @@ static const struct clk_ops vt8500_gated_clk_ops = {
};
static const struct clk_ops vt8500_divisor_clk_ops = {
- .round_rate = vt8500_dclk_round_rate,
+ .determine_rate = vt8500_dclk_determine_rate,
.set_rate = vt8500_dclk_set_rate,
.recalc_rate = vt8500_dclk_recalc_rate,
};
@@ -211,7 +212,7 @@ static const struct clk_ops vt8500_gated_divisor_clk_ops = {
.enable = vt8500_dclk_enable,
.disable = vt8500_dclk_disable,
.is_enabled = vt8500_dclk_is_enabled,
- .round_rate = vt8500_dclk_round_rate,
+ .determine_rate = vt8500_dclk_determine_rate,
.set_rate = vt8500_dclk_set_rate,
.recalc_rate = vt8500_dclk_recalc_rate,
};
@@ -594,8 +595,8 @@ static int vtwm_pll_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
-static long vtwm_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int vtwm_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_pll *pll = to_clk_pll(hw);
u32 filter, mul, div1, div2;
@@ -604,33 +605,43 @@ static long vtwm_pll_round_rate(struct clk_hw *hw, unsigned long rate,
switch (pll->type) {
case PLL_TYPE_VT8500:
- ret = vt8500_find_pll_bits(rate, *prate, &mul, &div1);
+ ret = vt8500_find_pll_bits(req->rate, req->best_parent_rate,
+ &mul, &div1);
if (!ret)
- round_rate = VT8500_BITS_TO_FREQ(*prate, mul, div1);
+ round_rate = VT8500_BITS_TO_FREQ(req->best_parent_rate,
+ mul, div1);
break;
case PLL_TYPE_WM8650:
- ret = wm8650_find_pll_bits(rate, *prate, &mul, &div1, &div2);
+ ret = wm8650_find_pll_bits(req->rate, req->best_parent_rate,
+ &mul, &div1, &div2);
if (!ret)
- round_rate = WM8650_BITS_TO_FREQ(*prate, mul, div1, div2);
+ round_rate = WM8650_BITS_TO_FREQ(req->best_parent_rate,
+ mul, div1, div2);
break;
case PLL_TYPE_WM8750:
- ret = wm8750_find_pll_bits(rate, *prate, &filter, &mul, &div1, &div2);
+ ret = wm8750_find_pll_bits(req->rate, req->best_parent_rate,
+ &filter, &mul, &div1, &div2);
if (!ret)
- round_rate = WM8750_BITS_TO_FREQ(*prate, mul, div1, div2);
+ round_rate = WM8750_BITS_TO_FREQ(req->best_parent_rate,
+ mul, div1, div2);
break;
case PLL_TYPE_WM8850:
- ret = wm8850_find_pll_bits(rate, *prate, &mul, &div1, &div2);
+ ret = wm8850_find_pll_bits(req->rate, req->best_parent_rate,
+ &mul, &div1, &div2);
if (!ret)
- round_rate = WM8850_BITS_TO_FREQ(*prate, mul, div1, div2);
+ round_rate = WM8850_BITS_TO_FREQ(req->best_parent_rate,
+ mul, div1, div2);
break;
default:
- ret = -EINVAL;
+ return -EINVAL;
}
if (ret)
- return ret;
+ req->rate = ret;
+ else
+ req->rate = round_rate;
- return round_rate;
+ return 0;
}
static unsigned long vtwm_pll_recalc_rate(struct clk_hw *hw,
@@ -665,7 +676,7 @@ static unsigned long vtwm_pll_recalc_rate(struct clk_hw *hw,
}
static const struct clk_ops vtwm_pll_ops = {
- .round_rate = vtwm_pll_round_rate,
+ .determine_rate = vtwm_pll_determine_rate,
.set_rate = vtwm_pll_set_rate,
.recalc_rate = vtwm_pll_recalc_rate,
};
diff --git a/drivers/clk/clk-wm831x.c b/drivers/clk/clk-wm831x.c
index 34e9d4d541e2..263e927138c2 100644
--- a/drivers/clk/clk-wm831x.c
+++ b/drivers/clk/clk-wm831x.c
@@ -133,18 +133,20 @@ static unsigned long wm831x_fll_recalc_rate(struct clk_hw *hw,
return 0;
}
-static long wm831x_fll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *unused)
+static int wm831x_fll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
int best = 0;
int i;
for (i = 0; i < ARRAY_SIZE(wm831x_fll_auto_rates); i++)
- if (abs(wm831x_fll_auto_rates[i] - rate) <
- abs(wm831x_fll_auto_rates[best] - rate))
+ if (abs(wm831x_fll_auto_rates[i] - req->rate) <
+ abs(wm831x_fll_auto_rates[best] - req->rate))
best = i;
- return wm831x_fll_auto_rates[best];
+ req->rate = wm831x_fll_auto_rates[best];
+
+ return 0;
}
static int wm831x_fll_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -214,7 +216,7 @@ static const struct clk_ops wm831x_fll_ops = {
.is_prepared = wm831x_fll_is_prepared,
.prepare = wm831x_fll_prepare,
.unprepare = wm831x_fll_unprepare,
- .round_rate = wm831x_fll_round_rate,
+ .determine_rate = wm831x_fll_determine_rate,
.recalc_rate = wm831x_fll_recalc_rate,
.set_rate = wm831x_fll_set_rate,
.get_parent = wm831x_fll_get_parent,
diff --git a/drivers/clk/clk-xgene.c b/drivers/clk/clk-xgene.c
index 0c3d0cee98c8..92e39f3237c2 100644
--- a/drivers/clk/clk-xgene.c
+++ b/drivers/clk/clk-xgene.c
@@ -7,6 +7,7 @@
*/
#include <linux/module.h>
#include <linux/spinlock.h>
+#include <linux/string_choices.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/clkdev.h>
@@ -270,23 +271,28 @@ static unsigned long xgene_clk_pmd_recalc_rate(struct clk_hw *hw,
return ret;
}
-static long xgene_clk_pmd_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int xgene_clk_pmd_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct xgene_clk_pmd *fd = to_xgene_clk_pmd(hw);
u64 ret, scale;
- if (!rate || rate >= *parent_rate)
- return *parent_rate;
+ if (!req->rate || req->rate >= req->best_parent_rate) {
+ req->rate = req->best_parent_rate;
+
+ return 0;
+ }
/* freq = parent_rate * scaler / denom */
- ret = rate * fd->denom;
- scale = DIV_ROUND_UP_ULL(ret, *parent_rate);
+ ret = req->rate * fd->denom;
+ scale = DIV_ROUND_UP_ULL(ret, req->best_parent_rate);
- ret = (u64)*parent_rate * scale;
+ ret = (u64)req->best_parent_rate * scale;
do_div(ret, fd->denom);
- return ret;
+ req->rate = ret;
+
+ return 0;
}
static int xgene_clk_pmd_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -332,7 +338,7 @@ static int xgene_clk_pmd_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops xgene_clk_pmd_ops = {
.recalc_rate = xgene_clk_pmd_recalc_rate,
- .round_rate = xgene_clk_pmd_round_rate,
+ .determine_rate = xgene_clk_pmd_determine_rate,
.set_rate = xgene_clk_pmd_set_rate,
};
@@ -520,8 +526,7 @@ static int xgene_clk_is_enabled(struct clk_hw *hw)
data = xgene_clk_read(pclk->param.csr_reg +
pclk->param.reg_clk_offset);
pr_debug("%s clock is %s\n", clk_hw_get_name(hw),
- data & pclk->param.reg_clk_mask ? "enabled" :
- "disabled");
+ str_enabled_disabled(data & pclk->param.reg_clk_mask));
} else {
return 1;
}
@@ -593,23 +598,25 @@ static int xgene_clk_set_rate(struct clk_hw *hw, unsigned long rate,
return parent_rate / divider_save;
}
-static long xgene_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int xgene_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct xgene_clk *pclk = to_xgene_clk(hw);
- unsigned long parent_rate = *prate;
+ unsigned long parent_rate = req->best_parent_rate;
u32 divider;
if (pclk->param.divider_reg) {
/* Let's compute the divider */
- if (rate > parent_rate)
- rate = parent_rate;
- divider = parent_rate / rate; /* Rounded down */
+ if (req->rate > parent_rate)
+ req->rate = parent_rate;
+ divider = parent_rate / req->rate; /* Rounded down */
} else {
divider = 1;
}
- return parent_rate / divider;
+ req->rate = parent_rate / divider;
+
+ return 0;
}
static const struct clk_ops xgene_clk_ops = {
@@ -618,7 +625,7 @@ static const struct clk_ops xgene_clk_ops = {
.is_enabled = xgene_clk_is_enabled,
.recalc_rate = xgene_clk_recalc_rate,
.set_rate = xgene_clk_set_rate,
- .round_rate = xgene_clk_round_rate,
+ .determine_rate = xgene_clk_determine_rate,
};
static struct clk *xgene_register_clk(struct device *dev,
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index d02451f951cf..85d2f2481acf 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -6,21 +6,24 @@
* Standard functionality for the common clock API. See Documentation/driver-api/clk.rst
*/
+#include <linux/clk/clk-conf.h>
+#include <linux/clkdev.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
-#include <linux/clk/clk-conf.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/spinlock.h>
+#include <linux/device.h>
#include <linux/err.h>
+#include <linux/hashtable.h>
+#include <linux/init.h>
#include <linux/list.h>
-#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/of.h>
-#include <linux/device.h>
-#include <linux/init.h>
#include <linux/pm_runtime.h>
#include <linux/sched.h>
-#include <linux/clkdev.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/stringhash.h>
#include "clk.h"
@@ -33,6 +36,9 @@ static struct task_struct *enable_owner;
static int prepare_refcnt;
static int enable_refcnt;
+#define CLK_HASH_BITS 9
+static DEFINE_HASHTABLE(clk_hashtable, CLK_HASH_BITS);
+
static HLIST_HEAD(clk_root_list);
static HLIST_HEAD(clk_orphan_list);
static LIST_HEAD(clk_notifier_list);
@@ -87,6 +93,7 @@ struct clk_core {
struct clk_duty duty;
struct hlist_head children;
struct hlist_node child_node;
+ struct hlist_node hashtable_node;
struct hlist_head clks;
unsigned int notifier_count;
#ifdef CONFIG_DEBUG_FS
@@ -365,6 +372,18 @@ const char *clk_hw_get_name(const struct clk_hw *hw)
}
EXPORT_SYMBOL_GPL(clk_hw_get_name);
+struct device *clk_hw_get_dev(const struct clk_hw *hw)
+{
+ return hw->core->dev;
+}
+EXPORT_SYMBOL_GPL(clk_hw_get_dev);
+
+struct device_node *clk_hw_get_of_node(const struct clk_hw *hw)
+{
+ return hw->core->of_node;
+}
+EXPORT_SYMBOL_GPL(clk_hw_get_of_node);
+
struct clk_hw *__clk_get_hw(struct clk *clk)
{
return !clk ? NULL : clk->core->hw;
@@ -383,45 +402,20 @@ struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw)
}
EXPORT_SYMBOL_GPL(clk_hw_get_parent);
-static struct clk_core *__clk_lookup_subtree(const char *name,
- struct clk_core *core)
-{
- struct clk_core *child;
- struct clk_core *ret;
-
- if (!strcmp(core->name, name))
- return core;
-
- hlist_for_each_entry(child, &core->children, child_node) {
- ret = __clk_lookup_subtree(name, child);
- if (ret)
- return ret;
- }
-
- return NULL;
-}
-
static struct clk_core *clk_core_lookup(const char *name)
{
- struct clk_core *root_clk;
- struct clk_core *ret;
+ struct clk_core *core;
+ u32 hash;
if (!name)
return NULL;
- /* search the 'proper' clk tree first */
- hlist_for_each_entry(root_clk, &clk_root_list, child_node) {
- ret = __clk_lookup_subtree(name, root_clk);
- if (ret)
- return ret;
- }
+ hash = full_name_hash(NULL, name, strlen(name));
- /* if not found, then search the orphan tree */
- hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) {
- ret = __clk_lookup_subtree(name, root_clk);
- if (ret)
- return ret;
- }
+ /* search the hashtable */
+ hash_for_each_possible(clk_hashtable, core, hashtable_node, hash)
+ if (!strcmp(core->name, name))
+ return core;
return NULL;
}
@@ -608,12 +602,6 @@ bool clk_hw_is_prepared(const struct clk_hw *hw)
}
EXPORT_SYMBOL_GPL(clk_hw_is_prepared);
-bool clk_hw_rate_is_protected(const struct clk_hw *hw)
-{
- return clk_core_rate_is_protected(hw->core);
-}
-EXPORT_SYMBOL_GPL(clk_hw_rate_is_protected);
-
bool clk_hw_is_enabled(const struct clk_hw *hw)
{
return clk_core_is_enabled(hw->core);
@@ -2289,7 +2277,7 @@ static struct clk_core *clk_calc_new_rates(struct clk_core *core,
unsigned long min_rate;
unsigned long max_rate;
int p_index = 0;
- long ret;
+ int ret;
/* sanity */
if (IS_ERR_OR_NULL(core))
@@ -4007,6 +3995,8 @@ static int __clk_core_init(struct clk_core *core)
hlist_add_head(&core->child_node, &clk_orphan_list);
core->orphan = true;
}
+ hash_add(clk_hashtable, &core->hashtable_node,
+ full_name_hash(NULL, core->name, strlen(core->name)));
/*
* Set clk's accuracy. The preferred method is to use
@@ -4083,6 +4073,7 @@ out:
clk_pm_runtime_put(core);
unlock:
if (ret) {
+ hash_del(&core->hashtable_node);
hlist_del_init(&core->child_node);
core->hw->core = NULL;
}
@@ -4403,6 +4394,13 @@ fail_ops:
fail_name:
kref_put(&core->ref, __clk_release);
fail_out:
+ if (dev) {
+ dev_err_probe(dev, ret, "failed to register clk '%s' (%pS)\n",
+ init->name, hw);
+ } else {
+ pr_err("%pOF: error %pe: failed to register clk '%s' (%pS)\n",
+ np, ERR_PTR(ret), init->name, hw);
+ }
return ERR_PTR(ret);
}
@@ -4597,6 +4595,7 @@ void clk_unregister(struct clk *clk)
clk_core_evict_parent_cache(clk->core);
+ hash_del(&clk->core->hashtable_node);
hlist_del_init(&clk->core->child_node);
if (clk->core->prepare_count)
@@ -5264,6 +5263,10 @@ of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec)
if (!clkspec)
return ERR_PTR(-EINVAL);
+ /* Check if node in clkspec is in disabled/fail state */
+ if (!of_device_is_available(clkspec->np))
+ return ERR_PTR(-ENOENT);
+
mutex_lock(&of_clk_mutex);
list_for_each_entry(provider, &of_clk_providers, link) {
if (provider->node == clkspec->np) {
@@ -5391,8 +5394,10 @@ const char *of_clk_get_parent_name(const struct device_node *np, int index)
count++;
}
/* We went off the end of 'clock-indices' without finding it */
- if (of_property_present(clkspec.np, "clock-indices") && !found)
+ if (of_property_present(clkspec.np, "clock-indices") && !found) {
+ of_node_put(clkspec.np);
return NULL;
+ }
if (of_property_read_string_index(clkspec.np, "clock-output-names",
index,
diff --git a/drivers/clk/clk_kunit_helpers.c b/drivers/clk/clk_kunit_helpers.c
index 52fd25594c96..68a28e70bb61 100644
--- a/drivers/clk/clk_kunit_helpers.c
+++ b/drivers/clk/clk_kunit_helpers.c
@@ -203,5 +203,35 @@ int of_clk_hw_register_kunit(struct kunit *test, struct device_node *node, struc
}
EXPORT_SYMBOL_GPL(of_clk_hw_register_kunit);
+KUNIT_DEFINE_ACTION_WRAPPER(of_clk_del_provider_wrapper,
+ of_clk_del_provider, struct device_node *);
+
+/**
+ * of_clk_add_hw_provider_kunit() - Test managed of_clk_add_hw_provider()
+ * @test: The test context
+ * @np: Device node pointer associated with clock provider
+ * @get: Callback for decoding clk_hw
+ * @data: Context pointer for @get callback.
+ *
+ * Just like of_clk_add_hw_provider(), except the clk_hw provider is managed by
+ * the test case and is automatically unregistered after the test case
+ * concludes.
+ *
+ * Return: 0 on success or a negative errno value on failure.
+ */
+int of_clk_add_hw_provider_kunit(struct kunit *test, struct device_node *np,
+ struct clk_hw *(*get)(struct of_phandle_args *clkspec, void *data),
+ void *data)
+{
+ int ret;
+
+ ret = of_clk_add_hw_provider(np, get, data);
+ if (ret)
+ return ret;
+
+ return kunit_add_action_or_reset(test, of_clk_del_provider_wrapper, np);
+}
+EXPORT_SYMBOL_GPL(of_clk_add_hw_provider_kunit);
+
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("KUnit helpers for clk providers and consumers");
diff --git a/drivers/clk/clk_test.c b/drivers/clk/clk_test.c
index aa3ddcfc00eb..a268d7b5d4cb 100644
--- a/drivers/clk/clk_test.c
+++ b/drivers/clk/clk_test.c
@@ -4,6 +4,7 @@
*/
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/clk/clk-conf.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -15,6 +16,7 @@
#include <kunit/platform_device.h>
#include <kunit/test.h>
+#include "kunit_clk_assigned_rates.h"
#include "clk_parent_data_test.h"
static const struct clk_ops empty_clk_ops = { };
@@ -290,7 +292,7 @@ static void clk_test_set_set_get_rate(struct kunit *test)
}
/*
- * Test that clk_round_rate and clk_set_rate are consitent and will
+ * Test that clk_round_rate and clk_set_rate are consistent and will
* return the same frequency.
*/
static void clk_test_round_set_get_rate(struct kunit *test)
@@ -2792,49 +2794,49 @@ static struct kunit_suite clk_register_clk_parent_data_of_suite = {
};
/**
- * struct clk_register_clk_parent_data_device_ctx - Context for clk_parent_data device tests
- * @dev: device of clk under test
- * @hw: clk_hw for clk under test
+ * struct platform_driver_dev_ctx - Context to stash platform device
+ * @dev: device under test
* @pdrv: driver to attach to find @dev
*/
-struct clk_register_clk_parent_data_device_ctx {
+struct platform_driver_dev_ctx {
struct device *dev;
- struct clk_hw hw;
struct platform_driver pdrv;
};
-static inline struct clk_register_clk_parent_data_device_ctx *
-clk_register_clk_parent_data_driver_to_test_context(struct platform_device *pdev)
+static inline struct platform_driver_dev_ctx *
+pdev_to_platform_driver_dev_ctx(struct platform_device *pdev)
{
return container_of(to_platform_driver(pdev->dev.driver),
- struct clk_register_clk_parent_data_device_ctx, pdrv);
+ struct platform_driver_dev_ctx, pdrv);
}
-static int clk_register_clk_parent_data_device_probe(struct platform_device *pdev)
+static int kunit_platform_driver_dev_probe(struct platform_device *pdev)
{
- struct clk_register_clk_parent_data_device_ctx *ctx;
+ struct platform_driver_dev_ctx *ctx;
- ctx = clk_register_clk_parent_data_driver_to_test_context(pdev);
+ ctx = pdev_to_platform_driver_dev_ctx(pdev);
ctx->dev = &pdev->dev;
return 0;
}
-static void clk_register_clk_parent_data_device_driver(struct kunit *test)
+static struct device *
+kunit_of_platform_driver_dev(struct kunit *test, const struct of_device_id *match_table)
{
- struct clk_register_clk_parent_data_device_ctx *ctx = test->priv;
- static const struct of_device_id match_table[] = {
- { .compatible = "test,clk-parent-data" },
- { }
- };
+ struct platform_driver_dev_ctx *ctx;
+
+ ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
- ctx->pdrv.probe = clk_register_clk_parent_data_device_probe;
+ ctx->pdrv.probe = kunit_platform_driver_dev_probe;
ctx->pdrv.driver.of_match_table = match_table;
ctx->pdrv.driver.name = __func__;
ctx->pdrv.driver.owner = THIS_MODULE;
KUNIT_ASSERT_EQ(test, 0, kunit_platform_driver_register(test, &ctx->pdrv));
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->dev);
+
+ return ctx->dev;
}
static const struct clk_register_clk_parent_data_test_case
@@ -2907,30 +2909,34 @@ KUNIT_ARRAY_PARAM(clk_register_clk_parent_data_device_test,
*/
static void clk_register_clk_parent_data_device_test(struct kunit *test)
{
- struct clk_register_clk_parent_data_device_ctx *ctx;
+ struct device *dev;
+ struct clk_hw *hw;
const struct clk_register_clk_parent_data_test_case *test_param;
struct clk_hw *parent_hw;
struct clk_init_data init = { };
struct clk *expected_parent, *actual_parent;
+ static const struct of_device_id match_table[] = {
+ { .compatible = "test,clk-parent-data" },
+ { }
+ };
- ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
- test->priv = ctx;
-
- clk_register_clk_parent_data_device_driver(test);
+ dev = kunit_of_platform_driver_dev(test, match_table);
- expected_parent = clk_get_kunit(test, ctx->dev, "50");
+ expected_parent = clk_get_kunit(test, dev, "50");
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, expected_parent);
+ hw = kunit_kzalloc(test, sizeof(*hw), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw);
+
test_param = test->param_value;
init.parent_data = &test_param->pdata;
init.num_parents = 1;
init.name = "parent_data_device_test_clk";
init.ops = &clk_dummy_single_parent_ops;
- ctx->hw.init = &init;
- KUNIT_ASSERT_EQ(test, 0, clk_hw_register_kunit(test, ctx->dev, &ctx->hw));
+ hw->init = &init;
+ KUNIT_ASSERT_EQ(test, 0, clk_hw_register_kunit(test, dev, hw));
- parent_hw = clk_hw_get_parent(&ctx->hw);
+ parent_hw = clk_hw_get_parent(hw);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent_hw);
actual_parent = clk_hw_get_clk_kunit(test, parent_hw, __func__);
@@ -3014,18 +3020,19 @@ KUNIT_ARRAY_PARAM(clk_register_clk_parent_data_device_hw_test,
*/
static void clk_register_clk_parent_data_device_hw_test(struct kunit *test)
{
- struct clk_register_clk_parent_data_device_ctx *ctx;
+ struct device *dev;
+ struct clk_hw *hw;
const struct clk_register_clk_parent_data_test_case *test_param;
struct clk_dummy_context *parent;
struct clk_hw *parent_hw;
struct clk_parent_data pdata = { };
struct clk_init_data init = { };
+ static const struct of_device_id match_table[] = {
+ { .compatible = "test,clk-parent-data" },
+ { }
+ };
- ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
- test->priv = ctx;
-
- clk_register_clk_parent_data_device_driver(test);
+ dev = kunit_of_platform_driver_dev(test, match_table);
parent = kunit_kzalloc(test, sizeof(*parent), GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
@@ -3034,7 +3041,10 @@ static void clk_register_clk_parent_data_device_hw_test(struct kunit *test)
parent_hw->init = CLK_HW_INIT_NO_PARENT("parent-clk",
&clk_dummy_rate_ops, 0);
- KUNIT_ASSERT_EQ(test, 0, clk_hw_register_kunit(test, ctx->dev, parent_hw));
+ KUNIT_ASSERT_EQ(test, 0, clk_hw_register_kunit(test, dev, parent_hw));
+
+ hw = kunit_kzalloc(test, sizeof(*hw), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw);
test_param = test->param_value;
memcpy(&pdata, &test_param->pdata, sizeof(pdata));
@@ -3043,10 +3053,10 @@ static void clk_register_clk_parent_data_device_hw_test(struct kunit *test)
init.num_parents = 1;
init.ops = &clk_dummy_single_parent_ops;
init.name = "parent_data_device_hw_test_clk";
- ctx->hw.init = &init;
- KUNIT_ASSERT_EQ(test, 0, clk_hw_register_kunit(test, ctx->dev, &ctx->hw));
+ hw->init = &init;
+ KUNIT_ASSERT_EQ(test, 0, clk_hw_register_kunit(test, dev, hw));
- KUNIT_EXPECT_PTR_EQ(test, parent_hw, clk_hw_get_parent(&ctx->hw));
+ KUNIT_EXPECT_PTR_EQ(test, parent_hw, clk_hw_get_parent(hw));
}
static struct kunit_case clk_register_clk_parent_data_device_test_cases[] = {
@@ -3075,7 +3085,466 @@ static struct kunit_suite clk_register_clk_parent_data_device_suite = {
.test_cases = clk_register_clk_parent_data_device_test_cases,
};
+struct clk_assigned_rates_context {
+ struct clk_dummy_context clk0;
+ struct clk_dummy_context clk1;
+};
+
+/*
+ * struct clk_assigned_rates_test_param - Test parameters for clk_assigned_rates test
+ * @desc: Test description
+ * @overlay_begin: Pointer to start of DT overlay to apply for test
+ * @overlay_end: Pointer to end of DT overlay to apply for test
+ * @rate0: Initial rate of first clk
+ * @rate1: Initial rate of second clk
+ * @consumer_test: true if a consumer is being tested
+ */
+struct clk_assigned_rates_test_param {
+ const char *desc;
+ u8 *overlay_begin;
+ u8 *overlay_end;
+ unsigned long rate0;
+ unsigned long rate1;
+ bool consumer_test;
+};
+
+#define TEST_PARAM_OVERLAY(overlay_name) \
+ .overlay_begin = of_overlay_begin(overlay_name), \
+ .overlay_end = of_overlay_end(overlay_name)
+
+static void
+clk_assigned_rates_register_clk(struct kunit *test,
+ struct clk_dummy_context *ctx,
+ struct device_node *np, const char *name,
+ unsigned long rate)
+{
+ struct clk_init_data init = { };
+
+ init.name = name;
+ init.ops = &clk_dummy_rate_ops;
+ ctx->hw.init = &init;
+ ctx->rate = rate;
+
+ KUNIT_ASSERT_EQ(test, 0, of_clk_hw_register_kunit(test, np, &ctx->hw));
+ KUNIT_ASSERT_EQ(test, ctx->rate, rate);
+}
+
+/*
+ * Does most of the work of the test:
+ *
+ * 1. Apply the overlay to test
+ * 2. Register the clk or clks to test
+ * 3. Register the clk provider
+ * 4. Apply clk defaults to the consumer device if this is a consumer test
+ *
+ * The tests will set different test_param values to test different scenarios
+ * and validate that in their test functions.
+ */
+static int clk_assigned_rates_test_init(struct kunit *test)
+{
+ struct device_node *np, *consumer;
+ struct clk_hw_onecell_data *data;
+ struct clk_assigned_rates_context *ctx;
+ u32 clk_cells;
+ const struct clk_assigned_rates_test_param *test_param;
+
+ test_param = test->param_value;
+
+ KUNIT_ASSERT_EQ(test, 0, __of_overlay_apply_kunit(test,
+ test_param->overlay_begin,
+ test_param->overlay_end));
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test,
+ ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL));
+ test->priv = ctx;
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test,
+ np = of_find_compatible_node(NULL, NULL, "test,clk-assigned-rates"));
+ of_node_put_kunit(test, np);
+
+ KUNIT_ASSERT_EQ(test, 0, of_property_read_u32(np, "#clock-cells", &clk_cells));
+ /* Only support #clock-cells = <0> or <1> */
+ KUNIT_ASSERT_LT(test, clk_cells, 2);
+
+ clk_assigned_rates_register_clk(test, &ctx->clk0, np,
+ "test_assigned_rate0", test_param->rate0);
+ if (clk_cells == 0) {
+ KUNIT_ASSERT_EQ(test, 0,
+ of_clk_add_hw_provider_kunit(test, np, of_clk_hw_simple_get,
+ &ctx->clk0.hw));
+ } else if (clk_cells == 1) {
+ clk_assigned_rates_register_clk(test, &ctx->clk1, np,
+ "test_assigned_rate1", test_param->rate1);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test,
+ data = kunit_kzalloc(test, struct_size(data, hws, 2), GFP_KERNEL));
+ data->num = 2;
+ data->hws[0] = &ctx->clk0.hw;
+ data->hws[1] = &ctx->clk1.hw;
+
+ KUNIT_ASSERT_EQ(test, 0,
+ of_clk_add_hw_provider_kunit(test, np, of_clk_hw_onecell_get, data));
+ }
+
+ /* Consumers are optional */
+ if (test_param->consumer_test) {
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test,
+ consumer = of_find_compatible_node(NULL, NULL, "test,clk-consumer"));
+ of_node_put_kunit(test, consumer);
+
+ KUNIT_ASSERT_EQ(test, 0, of_clk_set_defaults(consumer, false));
+ }
+
+ return 0;
+}
+
+static void clk_assigned_rates_assigns_one(struct kunit *test)
+{
+ struct clk_assigned_rates_context *ctx = test->priv;
+
+ KUNIT_EXPECT_EQ(test, ctx->clk0.rate, ASSIGNED_RATES_0_RATE);
+}
+
+static void clk_assigned_rates_assigns_multiple(struct kunit *test)
+{
+ struct clk_assigned_rates_context *ctx = test->priv;
+
+ KUNIT_EXPECT_EQ(test, ctx->clk0.rate, ASSIGNED_RATES_0_RATE);
+ KUNIT_EXPECT_EQ(test, ctx->clk1.rate, ASSIGNED_RATES_1_RATE);
+}
+
+static void clk_assigned_rates_skips(struct kunit *test)
+{
+ struct clk_assigned_rates_context *ctx = test->priv;
+ const struct clk_assigned_rates_test_param *test_param = test->param_value;
+
+ KUNIT_EXPECT_NE(test, ctx->clk0.rate, ASSIGNED_RATES_0_RATE);
+ KUNIT_EXPECT_EQ(test, ctx->clk0.rate, test_param->rate0);
+}
+
+OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_one);
+OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_one_consumer);
+OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_u64_one);
+OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_u64_one_consumer);
+
+/* Test cases that assign one rate */
+static const struct clk_assigned_rates_test_param clk_assigned_rates_assigns_one_test_params[] = {
+ {
+ /*
+ * Test that a single cell assigned-clock-rates property
+ * assigns the rate when the property is in the provider.
+ */
+ .desc = "provider assigns",
+ TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_one),
+ },
+ {
+ /*
+ * Test that a single cell assigned-clock-rates property
+ * assigns the rate when the property is in the consumer.
+ */
+ .desc = "consumer assigns",
+ TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_one_consumer),
+ .consumer_test = true,
+ },
+ {
+ /*
+ * Test that a single cell assigned-clock-rates-u64 property
+ * assigns the rate when the property is in the provider.
+ */
+ .desc = "provider assigns u64",
+ TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_u64_one),
+ },
+ {
+ /*
+ * Test that a single cell assigned-clock-rates-u64 property
+ * assigns the rate when the property is in the consumer.
+ */
+ .desc = "consumer assigns u64",
+ TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_u64_one_consumer),
+ .consumer_test = true,
+ },
+};
+KUNIT_ARRAY_PARAM_DESC(clk_assigned_rates_assigns_one,
+ clk_assigned_rates_assigns_one_test_params, desc)
+
+OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_multiple);
+OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_multiple_consumer);
+OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_u64_multiple);
+OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_u64_multiple_consumer);
+
+/* Test cases that assign multiple rates */
+static const struct clk_assigned_rates_test_param clk_assigned_rates_assigns_multiple_test_params[] = {
+ {
+ /*
+ * Test that a multiple cell assigned-clock-rates property
+ * assigns the rates when the property is in the provider.
+ */
+ .desc = "provider assigns",
+ TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_multiple),
+ },
+ {
+ /*
+ * Test that a multiple cell assigned-clock-rates property
+ * assigns the rates when the property is in the consumer.
+ */
+ .desc = "consumer assigns",
+ TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_multiple_consumer),
+ .consumer_test = true,
+ },
+ {
+ /*
+ * Test that a single cell assigned-clock-rates-u64 property
+ * assigns the rate when the property is in the provider.
+ */
+ .desc = "provider assigns u64",
+ TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_u64_multiple),
+ },
+ {
+ /*
+ * Test that a multiple cell assigned-clock-rates-u64 property
+ * assigns the rates when the property is in the consumer.
+ */
+ .desc = "consumer assigns u64",
+ TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_u64_multiple_consumer),
+ .consumer_test = true,
+ },
+};
+KUNIT_ARRAY_PARAM_DESC(clk_assigned_rates_assigns_multiple,
+ clk_assigned_rates_assigns_multiple_test_params,
+ desc)
+
+OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_without);
+OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_without_consumer);
+OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_zero);
+OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_zero_consumer);
+OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_null);
+OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_null_consumer);
+
+/* Test cases that skip changing the rate due to malformed DT */
+static const struct clk_assigned_rates_test_param clk_assigned_rates_skips_test_params[] = {
+ {
+ /*
+ * Test that an assigned-clock-rates property without an assigned-clocks
+ * property fails when the property is in the provider.
+ */
+ .desc = "provider missing assigned-clocks",
+ TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_without),
+ .rate0 = 3000,
+ },
+ {
+ /*
+ * Test that an assigned-clock-rates property without an assigned-clocks
+ * property fails when the property is in the consumer.
+ */
+ .desc = "consumer missing assigned-clocks",
+ TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_without_consumer),
+ .rate0 = 3000,
+ .consumer_test = true,
+ },
+ {
+ /*
+ * Test that an assigned-clock-rates property of zero doesn't
+ * set a rate when the property is in the provider.
+ */
+ .desc = "provider assigned-clock-rates of zero",
+ TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_zero),
+ .rate0 = 3000,
+ },
+ {
+ /*
+ * Test that an assigned-clock-rates property of zero doesn't
+ * set a rate when the property is in the consumer.
+ */
+ .desc = "consumer assigned-clock-rates of zero",
+ TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_zero_consumer),
+ .rate0 = 3000,
+ .consumer_test = true,
+ },
+ {
+ /*
+ * Test that an assigned-clocks property with a null phandle
+ * doesn't set a rate when the property is in the provider.
+ */
+ .desc = "provider assigned-clocks null phandle",
+ TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_null),
+ .rate0 = 3000,
+ },
+ {
+ /*
+ * Test that an assigned-clocks property with a null phandle
+ * doesn't set a rate when the property is in the consumer.
+ */
+ .desc = "provider assigned-clocks null phandle",
+ TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_null_consumer),
+ .rate0 = 3000,
+ .consumer_test = true,
+ },
+};
+KUNIT_ARRAY_PARAM_DESC(clk_assigned_rates_skips,
+ clk_assigned_rates_skips_test_params,
+ desc)
+
+static struct kunit_case clk_assigned_rates_test_cases[] = {
+ KUNIT_CASE_PARAM(clk_assigned_rates_assigns_one,
+ clk_assigned_rates_assigns_one_gen_params),
+ KUNIT_CASE_PARAM(clk_assigned_rates_assigns_multiple,
+ clk_assigned_rates_assigns_multiple_gen_params),
+ KUNIT_CASE_PARAM(clk_assigned_rates_skips,
+ clk_assigned_rates_skips_gen_params),
+ {}
+};
+
+/*
+ * Test suite for assigned-clock-rates{-u64} DT property.
+ */
+static struct kunit_suite clk_assigned_rates_suite = {
+ .name = "clk_assigned_rates",
+ .test_cases = clk_assigned_rates_test_cases,
+ .init = clk_assigned_rates_test_init,
+};
+
+static const struct clk_init_data clk_hw_get_dev_of_node_init_data = {
+ .name = "clk_hw_get_dev_of_node",
+ .ops = &empty_clk_ops,
+};
+
+/*
+ * Test that a clk registered with a struct device returns the device from
+ * clk_hw_get_dev() and the node from clk_hw_get_of_node()
+ */
+static void clk_hw_register_dev_get_dev_returns_dev(struct kunit *test)
+{
+ struct device *dev;
+ struct clk_hw *hw;
+ static const struct of_device_id match_table[] = {
+ { .compatible = "test,clk-hw-get-dev-of-node" },
+ { }
+ };
+
+ KUNIT_ASSERT_EQ(test, 0, of_overlay_apply_kunit(test, kunit_clk_hw_get_dev_of_node));
+
+ dev = kunit_of_platform_driver_dev(test, match_table);
+
+ hw = kunit_kzalloc(test, sizeof(*hw), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw);
+
+ hw->init = &clk_hw_get_dev_of_node_init_data;
+ KUNIT_ASSERT_EQ(test, 0, clk_hw_register_kunit(test, dev, hw));
+
+ KUNIT_EXPECT_PTR_EQ(test, dev, clk_hw_get_dev(hw));
+ KUNIT_EXPECT_PTR_EQ(test, dev_of_node(dev), clk_hw_get_of_node(hw));
+}
+
+/*
+ * Test that a clk registered with a struct device that's not associated with
+ * an OF node returns the device from clk_hw_get_dev() and NULL from
+ * clk_hw_get_of_node()
+ */
+static void clk_hw_register_dev_no_node_get_dev_returns_dev(struct kunit *test)
+{
+ struct platform_device *pdev;
+ struct device *dev;
+ struct clk_hw *hw;
+
+ pdev = kunit_platform_device_alloc(test, "clk_hw_register_dev_no_node", -1);
+ KUNIT_ASSERT_NOT_NULL(test, pdev);
+ KUNIT_ASSERT_EQ(test, 0, kunit_platform_device_add(test, pdev));
+ dev = &pdev->dev;
+
+ hw = kunit_kzalloc(test, sizeof(*hw), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw);
+
+ hw->init = &clk_hw_get_dev_of_node_init_data;
+ KUNIT_ASSERT_EQ(test, 0, clk_hw_register_kunit(test, dev, hw));
+
+ KUNIT_EXPECT_PTR_EQ(test, dev, clk_hw_get_dev(hw));
+ KUNIT_EXPECT_PTR_EQ(test, NULL, clk_hw_get_of_node(hw));
+}
+
+/*
+ * Test that a clk registered without a struct device returns NULL from
+ * clk_hw_get_dev()
+ */
+static void clk_hw_register_NULL_get_dev_of_node_returns_NULL(struct kunit *test)
+{
+ struct clk_hw *hw;
+
+ hw = kunit_kzalloc(test, sizeof(*hw), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw);
+
+ hw->init = &clk_hw_get_dev_of_node_init_data;
+
+ KUNIT_ASSERT_EQ(test, 0, clk_hw_register_kunit(test, NULL, hw));
+
+ KUNIT_EXPECT_PTR_EQ(test, NULL, clk_hw_get_dev(hw));
+ KUNIT_EXPECT_PTR_EQ(test, NULL, clk_hw_get_of_node(hw));
+}
+
+/*
+ * Test that a clk registered with an of_node returns the node from
+ * clk_hw_get_of_node() and NULL from clk_hw_get_dev()
+ */
+static void of_clk_hw_register_node_get_of_node_returns_node(struct kunit *test)
+{
+ struct device_node *np;
+ struct clk_hw *hw;
+
+ hw = kunit_kzalloc(test, sizeof(*hw), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw);
+
+ KUNIT_ASSERT_EQ(test, 0, of_overlay_apply_kunit(test, kunit_clk_hw_get_dev_of_node));
+
+ np = of_find_compatible_node(NULL, NULL, "test,clk-hw-get-dev-of-node");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, np);
+ of_node_put_kunit(test, np);
+
+ hw->init = &clk_hw_get_dev_of_node_init_data;
+ KUNIT_ASSERT_EQ(test, 0, of_clk_hw_register_kunit(test, np, hw));
+
+ KUNIT_EXPECT_PTR_EQ(test, NULL, clk_hw_get_dev(hw));
+ KUNIT_EXPECT_PTR_EQ(test, np, clk_hw_get_of_node(hw));
+}
+
+/*
+ * Test that a clk registered without an of_node returns the node from
+ * clk_hw_get_of_node() and clk_hw_get_dev()
+ */
+static void of_clk_hw_register_NULL_get_of_node_returns_NULL(struct kunit *test)
+{
+ struct clk_hw *hw;
+
+ hw = kunit_kzalloc(test, sizeof(*hw), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw);
+
+ hw->init = &clk_hw_get_dev_of_node_init_data;
+ KUNIT_ASSERT_EQ(test, 0, of_clk_hw_register_kunit(test, NULL, hw));
+
+ KUNIT_EXPECT_PTR_EQ(test, NULL, clk_hw_get_dev(hw));
+ KUNIT_EXPECT_PTR_EQ(test, NULL, clk_hw_get_of_node(hw));
+}
+
+static struct kunit_case clk_hw_get_dev_of_node_test_cases[] = {
+ KUNIT_CASE(clk_hw_register_dev_get_dev_returns_dev),
+ KUNIT_CASE(clk_hw_register_dev_no_node_get_dev_returns_dev),
+ KUNIT_CASE(clk_hw_register_NULL_get_dev_of_node_returns_NULL),
+ KUNIT_CASE(of_clk_hw_register_node_get_of_node_returns_node),
+ KUNIT_CASE(of_clk_hw_register_NULL_get_of_node_returns_NULL),
+ {}
+};
+
+/*
+ * Test suite to verify clk_hw_get_dev() and clk_hw_get_of_node() when clk
+ * registered with clk_hw_register() and of_clk_hw_register()
+ */
+static struct kunit_suite clk_hw_get_dev_of_node_test_suite = {
+ .name = "clk_hw_get_dev_of_node_test_suite",
+ .test_cases = clk_hw_get_dev_of_node_test_cases,
+};
+
+
kunit_test_suites(
+ &clk_assigned_rates_suite,
+ &clk_hw_get_dev_of_node_test_suite,
&clk_leaf_mux_set_rate_parent_test_suite,
&clk_test_suite,
&clk_multiple_parents_mux_test_suite,
diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c
index 2f83fb97c6fb..e0bede6350e1 100644
--- a/drivers/clk/clkdev.c
+++ b/drivers/clk/clkdev.c
@@ -153,7 +153,7 @@ struct clk_lookup_alloc {
char con_id[MAX_CON_ID];
};
-static struct clk_lookup * __ref
+static __printf(3, 0) struct clk_lookup * __ref
vclkdev_alloc(struct clk_hw *hw, const char *con_id, const char *dev_fmt,
va_list ap)
{
@@ -215,7 +215,7 @@ fail:
return &cla->cl;
}
-static struct clk_lookup *
+static __printf(3, 0) struct clk_lookup *
vclkdev_create(struct clk_hw *hw, const char *con_id, const char *dev_fmt,
va_list ap)
{
@@ -303,9 +303,8 @@ void clkdev_drop(struct clk_lookup *cl)
}
EXPORT_SYMBOL(clkdev_drop);
-static struct clk_lookup *__clk_register_clkdev(struct clk_hw *hw,
- const char *con_id,
- const char *dev_id, ...)
+static __printf(3, 4) struct clk_lookup *
+__clk_register_clkdev(struct clk_hw *hw, const char *con_id, const char *dev_id, ...)
{
struct clk_lookup *cl;
va_list ap;
diff --git a/drivers/clk/davinci/Makefile b/drivers/clk/davinci/Makefile
index 5d0ae1ee72ec..f9d5c9a392e4 100644
--- a/drivers/clk/davinci/Makefile
+++ b/drivers/clk/davinci/Makefile
@@ -4,10 +4,8 @@ ifeq ($(CONFIG_COMMON_CLK), y)
obj-$(CONFIG_ARCH_DAVINCI_DA8XX) += da8xx-cfgchip.o
obj-y += pll.o
-obj-$(CONFIG_ARCH_DAVINCI_DA830) += pll-da830.o
obj-$(CONFIG_ARCH_DAVINCI_DA850) += pll-da850.o
obj-y += psc.o
-obj-$(CONFIG_ARCH_DAVINCI_DA830) += psc-da830.o
obj-$(CONFIG_ARCH_DAVINCI_DA850) += psc-da850.o
endif
diff --git a/drivers/clk/davinci/pll-da830.c b/drivers/clk/davinci/pll-da830.c
deleted file mode 100644
index 0a0d06fb25fd..000000000000
--- a/drivers/clk/davinci/pll-da830.c
+++ /dev/null
@@ -1,71 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * PLL clock descriptions for TI DA830/OMAP-L137/AM17XX
- *
- * Copyright (C) 2018 David Lechner <david@lechnology.com>
- */
-
-#include <linux/clkdev.h>
-#include <linux/clk/davinci.h>
-#include <linux/bitops.h>
-#include <linux/init.h>
-#include <linux/types.h>
-
-#include "pll.h"
-
-static const struct davinci_pll_clk_info da830_pll_info = {
- .name = "pll0",
- .pllm_mask = GENMASK(4, 0),
- .pllm_min = 4,
- .pllm_max = 32,
- .pllout_min_rate = 300000000,
- .pllout_max_rate = 600000000,
- .flags = PLL_HAS_CLKMODE | PLL_HAS_PREDIV | PLL_HAS_POSTDIV,
-};
-
-/*
- * NB: Technically, the clocks flagged as SYSCLK_FIXED_DIV are "fixed ratio",
- * meaning that we could change the divider as long as we keep the correct
- * ratio between all of the clocks, but we don't support that because there is
- * currently not a need for it.
- */
-
-SYSCLK(2, pll0_sysclk2, pll0_pllen, 5, SYSCLK_FIXED_DIV);
-SYSCLK(3, pll0_sysclk3, pll0_pllen, 5, 0);
-SYSCLK(4, pll0_sysclk4, pll0_pllen, 5, SYSCLK_FIXED_DIV);
-SYSCLK(5, pll0_sysclk5, pll0_pllen, 5, 0);
-SYSCLK(6, pll0_sysclk6, pll0_pllen, 5, SYSCLK_FIXED_DIV);
-SYSCLK(7, pll0_sysclk7, pll0_pllen, 5, 0);
-
-int da830_pll_init(struct device *dev, void __iomem *base, struct regmap *cfgchip)
-{
- struct clk *clk;
-
- davinci_pll_clk_register(dev, &da830_pll_info, "ref_clk", base, cfgchip);
-
- clk = davinci_pll_sysclk_register(dev, &pll0_sysclk2, base);
- clk_register_clkdev(clk, "pll0_sysclk2", "da830-psc0");
- clk_register_clkdev(clk, "pll0_sysclk2", "da830-psc1");
-
- clk = davinci_pll_sysclk_register(dev, &pll0_sysclk3, base);
- clk_register_clkdev(clk, "pll0_sysclk3", "da830-psc0");
-
- clk = davinci_pll_sysclk_register(dev, &pll0_sysclk4, base);
- clk_register_clkdev(clk, "pll0_sysclk4", "da830-psc0");
- clk_register_clkdev(clk, "pll0_sysclk4", "da830-psc1");
-
- clk = davinci_pll_sysclk_register(dev, &pll0_sysclk5, base);
- clk_register_clkdev(clk, "pll0_sysclk5", "da830-psc1");
-
- clk = davinci_pll_sysclk_register(dev, &pll0_sysclk6, base);
- clk_register_clkdev(clk, "pll0_sysclk6", "da830-psc0");
-
- clk = davinci_pll_sysclk_register(dev, &pll0_sysclk7, base);
-
- clk = davinci_pll_auxclk_register(dev, "pll0_auxclk", base);
- clk_register_clkdev(clk, NULL, "i2c_davinci.1");
- clk_register_clkdev(clk, "timer0", NULL);
- clk_register_clkdev(clk, NULL, "davinci-wdt");
-
- return 0;
-}
diff --git a/drivers/clk/davinci/pll.c b/drivers/clk/davinci/pll.c
index 5bbbb3a66477..bfb6bbdc036c 100644
--- a/drivers/clk/davinci/pll.c
+++ b/drivers/clk/davinci/pll.c
@@ -19,7 +19,6 @@
#include <linux/mfd/syscon.h>
#include <linux/notifier.h>
#include <linux/of.h>
-#include <linux/platform_data/clk-davinci-pll.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/regmap.h>
@@ -764,13 +763,14 @@ int of_davinci_pll_init(struct device *dev, struct device_node *node,
return PTR_ERR(clk);
}
- child = of_get_child_by_name(node, "pllout");
- if (of_device_is_available(child))
+ child = of_get_available_child_by_name(node, "pllout");
+ if (child) {
of_clk_add_provider(child, of_clk_src_simple_get, clk);
- of_node_put(child);
+ of_node_put(child);
+ }
- child = of_get_child_by_name(node, "sysclk");
- if (of_device_is_available(child)) {
+ child = of_get_available_child_by_name(node, "sysclk");
+ if (child) {
struct clk_onecell_data *clk_data;
struct clk **clks;
int n_clks = max_sysclk_id + 1;
@@ -804,11 +804,11 @@ int of_davinci_pll_init(struct device *dev, struct device_node *node,
clks[(*div_info)->id] = clk;
}
of_clk_add_provider(child, of_clk_src_onecell_get, clk_data);
+ of_node_put(child);
}
- of_node_put(child);
- child = of_get_child_by_name(node, "auxclk");
- if (of_device_is_available(child)) {
+ child = of_get_available_child_by_name(node, "auxclk");
+ if (child) {
char child_name[MAX_NAME_SIZE];
snprintf(child_name, MAX_NAME_SIZE, "%s_auxclk", info->name);
@@ -819,11 +819,12 @@ int of_davinci_pll_init(struct device *dev, struct device_node *node,
child_name, PTR_ERR(clk));
else
of_clk_add_provider(child, of_clk_src_simple_get, clk);
+
+ of_node_put(child);
}
- of_node_put(child);
- child = of_get_child_by_name(node, "obsclk");
- if (of_device_is_available(child)) {
+ child = of_get_available_child_by_name(node, "obsclk");
+ if (child) {
if (obsclk_info)
clk = davinci_pll_obsclk_register(dev, obsclk_info, base);
else
@@ -834,53 +835,23 @@ int of_davinci_pll_init(struct device *dev, struct device_node *node,
PTR_ERR(clk));
else
of_clk_add_provider(child, of_clk_src_simple_get, clk);
+ of_node_put(child);
}
- of_node_put(child);
return 0;
}
-static struct davinci_pll_platform_data *davinci_pll_get_pdata(struct device *dev)
-{
- struct davinci_pll_platform_data *pdata = dev_get_platdata(dev);
-
- /*
- * Platform data is optional, so allocate a new struct if one was not
- * provided. For device tree, this will always be the case.
- */
- if (!pdata)
- pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return NULL;
-
- /* for device tree, we need to fill in the struct */
- if (dev->of_node)
- pdata->cfgchip =
- syscon_regmap_lookup_by_compatible("ti,da830-cfgchip");
-
- return pdata;
-}
-
/* needed in early boot for clocksource/clockevent */
-#ifdef CONFIG_ARCH_DAVINCI_DA850
CLK_OF_DECLARE(da850_pll0, "ti,da850-pll0", of_da850_pll0_init);
-#endif
static const struct of_device_id davinci_pll_of_match[] = {
-#ifdef CONFIG_ARCH_DAVINCI_DA850
{ .compatible = "ti,da850-pll1", .data = of_da850_pll1_init },
-#endif
{ }
};
static const struct platform_device_id davinci_pll_id_table[] = {
-#ifdef CONFIG_ARCH_DAVINCI_DA830
- { .name = "da830-pll", .driver_data = (kernel_ulong_t)da830_pll_init },
-#endif
-#ifdef CONFIG_ARCH_DAVINCI_DA850
{ .name = "da850-pll0", .driver_data = (kernel_ulong_t)da850_pll0_init },
{ .name = "da850-pll1", .driver_data = (kernel_ulong_t)da850_pll1_init },
-#endif
{ }
};
@@ -890,8 +861,8 @@ typedef int (*davinci_pll_init)(struct device *dev, void __iomem *base,
static int davinci_pll_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct davinci_pll_platform_data *pdata;
davinci_pll_init pll_init = NULL;
+ struct regmap *cfgchip;
void __iomem *base;
pll_init = device_get_match_data(dev);
@@ -903,17 +874,13 @@ static int davinci_pll_probe(struct platform_device *pdev)
return -EINVAL;
}
- pdata = davinci_pll_get_pdata(dev);
- if (!pdata) {
- dev_err(dev, "missing platform data\n");
- return -EINVAL;
- }
+ cfgchip = syscon_regmap_lookup_by_compatible("ti,da830-cfgchip");
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
- return pll_init(dev, base, pdata->cfgchip);
+ return pll_init(dev, base, cfgchip);
}
static struct platform_driver davinci_pll_driver = {
diff --git a/drivers/clk/davinci/pll.h b/drivers/clk/davinci/pll.h
index 20bfcec2d3b5..ad286ba4ce0c 100644
--- a/drivers/clk/davinci/pll.h
+++ b/drivers/clk/davinci/pll.h
@@ -80,7 +80,7 @@ static const struct davinci_pll_sysclk_info n = { \
* @name: The name of the clock
* @parent_names: Array of names of the parent clocks
* @num_parents: Length of @parent_names
- * @table: Array of values to write to OCSEL[OCSRC] cooresponding to
+ * @table: Array of values to write to OCSEL[OCSRC] corresponding to
* @parent_names
* @ocsrc_mask: Bitmask for OCSEL[OCSRC]
*/
diff --git a/drivers/clk/davinci/psc-da830.c b/drivers/clk/davinci/psc-da830.c
deleted file mode 100644
index 6481337382a6..000000000000
--- a/drivers/clk/davinci/psc-da830.c
+++ /dev/null
@@ -1,118 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * PSC clock descriptions for TI DA830/OMAP-L137/AM17XX
- *
- * Copyright (C) 2018 David Lechner <david@lechnology.com>
- */
-
-#include <linux/clk-provider.h>
-#include <linux/clk.h>
-#include <linux/clkdev.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-
-#include "psc.h"
-
-LPSC_CLKDEV1(aemif_clkdev, NULL, "ti-aemif");
-LPSC_CLKDEV1(spi0_clkdev, NULL, "spi_davinci.0");
-LPSC_CLKDEV1(mmcsd_clkdev, NULL, "da830-mmc.0");
-LPSC_CLKDEV1(uart0_clkdev, NULL, "serial8250.0");
-
-static const struct davinci_lpsc_clk_info da830_psc0_info[] = {
- LPSC(0, 0, tpcc, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
- LPSC(1, 0, tptc0, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
- LPSC(2, 0, tptc1, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
- LPSC(3, 0, aemif, pll0_sysclk3, aemif_clkdev, LPSC_ALWAYS_ENABLED),
- LPSC(4, 0, spi0, pll0_sysclk2, spi0_clkdev, 0),
- LPSC(5, 0, mmcsd, pll0_sysclk2, mmcsd_clkdev, 0),
- LPSC(6, 0, aintc, pll0_sysclk4, NULL, LPSC_ALWAYS_ENABLED),
- LPSC(7, 0, arm_rom, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
- LPSC(8, 0, secu_mgr, pll0_sysclk4, NULL, LPSC_ALWAYS_ENABLED),
- LPSC(9, 0, uart0, pll0_sysclk2, uart0_clkdev, 0),
- LPSC(10, 0, scr0_ss, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
- LPSC(11, 0, scr1_ss, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
- LPSC(12, 0, scr2_ss, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
- LPSC(13, 0, pruss, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
- LPSC(14, 0, arm, pll0_sysclk6, NULL, LPSC_ALWAYS_ENABLED),
- { }
-};
-
-static int da830_psc0_init(struct device *dev, void __iomem *base)
-{
- return davinci_psc_register_clocks(dev, da830_psc0_info, 16, base);
-}
-
-static struct clk_bulk_data da830_psc0_parent_clks[] = {
- { .id = "pll0_sysclk2" },
- { .id = "pll0_sysclk3" },
- { .id = "pll0_sysclk4" },
- { .id = "pll0_sysclk6" },
-};
-
-const struct davinci_psc_init_data da830_psc0_init_data = {
- .parent_clks = da830_psc0_parent_clks,
- .num_parent_clks = ARRAY_SIZE(da830_psc0_parent_clks),
- .psc_init = &da830_psc0_init,
-};
-
-LPSC_CLKDEV3(usb0_clkdev, "fck", "da830-usb-phy-clks",
- NULL, "musb-da8xx",
- NULL, "cppi41-dmaengine");
-LPSC_CLKDEV1(usb1_clkdev, NULL, "ohci-da8xx");
-/* REVISIT: gpio-davinci.c should be modified to drop con_id */
-LPSC_CLKDEV1(gpio_clkdev, "gpio", NULL);
-LPSC_CLKDEV2(emac_clkdev, NULL, "davinci_emac.1",
- "fck", "davinci_mdio.0");
-LPSC_CLKDEV1(mcasp0_clkdev, NULL, "davinci-mcasp.0");
-LPSC_CLKDEV1(mcasp1_clkdev, NULL, "davinci-mcasp.1");
-LPSC_CLKDEV1(mcasp2_clkdev, NULL, "davinci-mcasp.2");
-LPSC_CLKDEV1(spi1_clkdev, NULL, "spi_davinci.1");
-LPSC_CLKDEV1(i2c1_clkdev, NULL, "i2c_davinci.2");
-LPSC_CLKDEV1(uart1_clkdev, NULL, "serial8250.1");
-LPSC_CLKDEV1(uart2_clkdev, NULL, "serial8250.2");
-LPSC_CLKDEV1(lcdc_clkdev, "fck", "da8xx_lcdc.0");
-LPSC_CLKDEV2(pwm_clkdev, "fck", "ehrpwm.0",
- "fck", "ehrpwm.1");
-LPSC_CLKDEV3(ecap_clkdev, "fck", "ecap.0",
- "fck", "ecap.1",
- "fck", "ecap.2");
-LPSC_CLKDEV2(eqep_clkdev, NULL, "eqep.0",
- NULL, "eqep.1");
-
-static const struct davinci_lpsc_clk_info da830_psc1_info[] = {
- LPSC(1, 0, usb0, pll0_sysclk2, usb0_clkdev, 0),
- LPSC(2, 0, usb1, pll0_sysclk4, usb1_clkdev, 0),
- LPSC(3, 0, gpio, pll0_sysclk4, gpio_clkdev, 0),
- LPSC(5, 0, emac, pll0_sysclk4, emac_clkdev, 0),
- LPSC(6, 0, emif3, pll0_sysclk5, NULL, LPSC_ALWAYS_ENABLED),
- LPSC(7, 0, mcasp0, pll0_sysclk2, mcasp0_clkdev, 0),
- LPSC(8, 0, mcasp1, pll0_sysclk2, mcasp1_clkdev, 0),
- LPSC(9, 0, mcasp2, pll0_sysclk2, mcasp2_clkdev, 0),
- LPSC(10, 0, spi1, pll0_sysclk2, spi1_clkdev, 0),
- LPSC(11, 0, i2c1, pll0_sysclk4, i2c1_clkdev, 0),
- LPSC(12, 0, uart1, pll0_sysclk2, uart1_clkdev, 0),
- LPSC(13, 0, uart2, pll0_sysclk2, uart2_clkdev, 0),
- LPSC(16, 0, lcdc, pll0_sysclk2, lcdc_clkdev, 0),
- LPSC(17, 0, pwm, pll0_sysclk2, pwm_clkdev, 0),
- LPSC(20, 0, ecap, pll0_sysclk2, ecap_clkdev, 0),
- LPSC(21, 0, eqep, pll0_sysclk2, eqep_clkdev, 0),
- { }
-};
-
-static int da830_psc1_init(struct device *dev, void __iomem *base)
-{
- return davinci_psc_register_clocks(dev, da830_psc1_info, 32, base);
-}
-
-static struct clk_bulk_data da830_psc1_parent_clks[] = {
- { .id = "pll0_sysclk2" },
- { .id = "pll0_sysclk4" },
- { .id = "pll0_sysclk5" },
-};
-
-const struct davinci_psc_init_data da830_psc1_init_data = {
- .parent_clks = da830_psc1_parent_clks,
- .num_parent_clks = ARRAY_SIZE(da830_psc1_parent_clks),
- .psc_init = &da830_psc1_init,
-};
diff --git a/drivers/clk/davinci/psc.c b/drivers/clk/davinci/psc.c
index 355d1be0b5d8..f3ee9397bb0c 100644
--- a/drivers/clk/davinci/psc.c
+++ b/drivers/clk/davinci/psc.c
@@ -277,6 +277,11 @@ davinci_lpsc_clk_register(struct device *dev, const char *name,
lpsc->pm_domain.name = devm_kasprintf(dev, GFP_KERNEL, "%s: %s",
best_dev_name(dev), name);
+ if (!lpsc->pm_domain.name) {
+ clk_hw_unregister(&lpsc->hw);
+ kfree(lpsc);
+ return ERR_PTR(-ENOMEM);
+ }
lpsc->pm_domain.attach_dev = davinci_psc_genpd_attach_dev;
lpsc->pm_domain.detach_dev = davinci_psc_genpd_detach_dev;
lpsc->pm_domain.flags = GENPD_FLAG_PM_CLK;
@@ -494,22 +499,14 @@ int of_davinci_psc_clk_init(struct device *dev,
}
static const struct of_device_id davinci_psc_of_match[] = {
-#ifdef CONFIG_ARCH_DAVINCI_DA850
{ .compatible = "ti,da850-psc0", .data = &of_da850_psc0_init_data },
{ .compatible = "ti,da850-psc1", .data = &of_da850_psc1_init_data },
-#endif
{ }
};
static const struct platform_device_id davinci_psc_id_table[] = {
-#ifdef CONFIG_ARCH_DAVINCI_DA830
- { .name = "da830-psc0", .driver_data = (kernel_ulong_t)&da830_psc0_init_data },
- { .name = "da830-psc1", .driver_data = (kernel_ulong_t)&da830_psc1_init_data },
-#endif
-#ifdef CONFIG_ARCH_DAVINCI_DA850
{ .name = "da850-psc0", .driver_data = (kernel_ulong_t)&da850_psc0_init_data },
{ .name = "da850-psc1", .driver_data = (kernel_ulong_t)&da850_psc1_init_data },
-#endif
{ }
};
diff --git a/drivers/clk/davinci/psc.h b/drivers/clk/davinci/psc.h
index bd23f6fd56df..742672843776 100644
--- a/drivers/clk/davinci/psc.h
+++ b/drivers/clk/davinci/psc.h
@@ -94,14 +94,9 @@ struct davinci_psc_init_data {
int (*psc_init)(struct device *dev, void __iomem *base);
};
-#ifdef CONFIG_ARCH_DAVINCI_DA830
-extern const struct davinci_psc_init_data da830_psc0_init_data;
-extern const struct davinci_psc_init_data da830_psc1_init_data;
-#endif
-#ifdef CONFIG_ARCH_DAVINCI_DA850
extern const struct davinci_psc_init_data da850_psc0_init_data;
extern const struct davinci_psc_init_data da850_psc1_init_data;
extern const struct davinci_psc_init_data of_da850_psc0_init_data;
extern const struct davinci_psc_init_data of_da850_psc1_init_data;
-#endif
+
#endif /* __CLK_DAVINCI_PSC_H__ */
diff --git a/drivers/clk/hisilicon/clk-hi3660-stub.c b/drivers/clk/hisilicon/clk-hi3660-stub.c
index 3a653d54bee0..7c8b00ee6019 100644
--- a/drivers/clk/hisilicon/clk-hi3660-stub.c
+++ b/drivers/clk/hisilicon/clk-hi3660-stub.c
@@ -34,7 +34,7 @@
.num_parents = 0, \
.flags = CLK_GET_RATE_NOCACHE, \
}, \
- },
+ }
#define to_stub_clk(_hw) container_of(_hw, struct hi3660_stub_clk, hw)
@@ -67,14 +67,14 @@ static unsigned long hi3660_stub_clk_recalc_rate(struct clk_hw *hw,
return stub_clk->rate;
}
-static long hi3660_stub_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int hi3660_stub_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
/*
* LPM3 handles rate rounding so just return whatever
* rate is requested.
*/
- return rate;
+ return 0;
}
static int hi3660_stub_clk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -97,15 +97,15 @@ static int hi3660_stub_clk_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops hi3660_stub_clk_ops = {
.recalc_rate = hi3660_stub_clk_recalc_rate,
- .round_rate = hi3660_stub_clk_round_rate,
+ .determine_rate = hi3660_stub_clk_determine_rate,
.set_rate = hi3660_stub_clk_set_rate,
};
static struct hi3660_stub_clk hi3660_stub_clks[HI3660_CLK_STUB_NUM] = {
- DEFINE_CLK_STUB(HI3660_CLK_STUB_CLUSTER0, 0x0001030A, "cpu-cluster.0")
- DEFINE_CLK_STUB(HI3660_CLK_STUB_CLUSTER1, 0x0002030A, "cpu-cluster.1")
- DEFINE_CLK_STUB(HI3660_CLK_STUB_GPU, 0x0003030A, "clk-g3d")
- DEFINE_CLK_STUB(HI3660_CLK_STUB_DDR, 0x00040309, "clk-ddrc")
+ DEFINE_CLK_STUB(HI3660_CLK_STUB_CLUSTER0, 0x0001030A, "cpu-cluster.0"),
+ DEFINE_CLK_STUB(HI3660_CLK_STUB_CLUSTER1, 0x0002030A, "cpu-cluster.1"),
+ DEFINE_CLK_STUB(HI3660_CLK_STUB_GPU, 0x0003030A, "clk-g3d"),
+ DEFINE_CLK_STUB(HI3660_CLK_STUB_DDR, 0x00040309, "clk-ddrc"),
};
static struct clk_hw *hi3660_stub_clk_hw_get(struct of_phandle_args *clkspec,
diff --git a/drivers/clk/hisilicon/clk-hi6220-stub.c b/drivers/clk/hisilicon/clk-hi6220-stub.c
index a8319795ed1c..bf99cfafafa0 100644
--- a/drivers/clk/hisilicon/clk-hi6220-stub.c
+++ b/drivers/clk/hisilicon/clk-hi6220-stub.c
@@ -161,11 +161,11 @@ static int hi6220_stub_clk_set_rate(struct clk_hw *hw, unsigned long rate,
return ret;
}
-static long hi6220_stub_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int hi6220_stub_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct hi6220_stub_clk *stub_clk = to_stub_clk(hw);
- unsigned long new_rate = rate / 1000; /* kHz */
+ unsigned long new_rate = req->rate / 1000; /* kHz */
switch (stub_clk->id) {
case HI6220_STUB_ACPU0:
@@ -181,12 +181,14 @@ static long hi6220_stub_clk_round_rate(struct clk_hw *hw, unsigned long rate,
break;
}
- return new_rate;
+ req->rate = new_rate;
+
+ return 0;
}
static const struct clk_ops hi6220_stub_clk_ops = {
.recalc_rate = hi6220_stub_clk_recalc_rate,
- .round_rate = hi6220_stub_clk_round_rate,
+ .determine_rate = hi6220_stub_clk_determine_rate,
.set_rate = hi6220_stub_clk_set_rate,
};
diff --git a/drivers/clk/hisilicon/clkdivider-hi6220.c b/drivers/clk/hisilicon/clkdivider-hi6220.c
index 5348bafe694f..6bae18a84cb6 100644
--- a/drivers/clk/hisilicon/clkdivider-hi6220.c
+++ b/drivers/clk/hisilicon/clkdivider-hi6220.c
@@ -55,13 +55,15 @@ static unsigned long hi6220_clkdiv_recalc_rate(struct clk_hw *hw,
CLK_DIVIDER_ROUND_CLOSEST, dclk->width);
}
-static long hi6220_clkdiv_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int hi6220_clkdiv_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct hi6220_clk_divider *dclk = to_hi6220_clk_divider(hw);
- return divider_round_rate(hw, rate, prate, dclk->table,
- dclk->width, CLK_DIVIDER_ROUND_CLOSEST);
+ req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate, dclk->table,
+ dclk->width, CLK_DIVIDER_ROUND_CLOSEST);
+
+ return 0;
}
static int hi6220_clkdiv_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -93,7 +95,7 @@ static int hi6220_clkdiv_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops hi6220_clkdiv_ops = {
.recalc_rate = hi6220_clkdiv_recalc_rate,
- .round_rate = hi6220_clkdiv_round_rate,
+ .determine_rate = hi6220_clkdiv_determine_rate,
.set_rate = hi6220_clkdiv_set_rate,
};
diff --git a/drivers/clk/hisilicon/clkgate-separated.c b/drivers/clk/hisilicon/clkgate-separated.c
index 90d858522967..21d4297f3225 100644
--- a/drivers/clk/hisilicon/clkgate-separated.c
+++ b/drivers/clk/hisilicon/clkgate-separated.c
@@ -17,9 +17,9 @@
#include "clk.h"
/* clock separated gate register offset */
-#define CLKGATE_SEPERATED_ENABLE 0x0
-#define CLKGATE_SEPERATED_DISABLE 0x4
-#define CLKGATE_SEPERATED_STATUS 0x8
+#define CLKGATE_SEPARATED_ENABLE 0x0
+#define CLKGATE_SEPARATED_DISABLE 0x4
+#define CLKGATE_SEPARATED_STATUS 0x8
struct clkgate_separated {
struct clk_hw hw;
@@ -40,7 +40,7 @@ static int clkgate_separated_enable(struct clk_hw *hw)
spin_lock_irqsave(sclk->lock, flags);
reg = BIT(sclk->bit_idx);
writel_relaxed(reg, sclk->enable);
- readl_relaxed(sclk->enable + CLKGATE_SEPERATED_STATUS);
+ readl_relaxed(sclk->enable + CLKGATE_SEPARATED_STATUS);
if (sclk->lock)
spin_unlock_irqrestore(sclk->lock, flags);
return 0;
@@ -56,8 +56,8 @@ static void clkgate_separated_disable(struct clk_hw *hw)
if (sclk->lock)
spin_lock_irqsave(sclk->lock, flags);
reg = BIT(sclk->bit_idx);
- writel_relaxed(reg, sclk->enable + CLKGATE_SEPERATED_DISABLE);
- readl_relaxed(sclk->enable + CLKGATE_SEPERATED_STATUS);
+ writel_relaxed(reg, sclk->enable + CLKGATE_SEPARATED_DISABLE);
+ readl_relaxed(sclk->enable + CLKGATE_SEPARATED_STATUS);
if (sclk->lock)
spin_unlock_irqrestore(sclk->lock, flags);
}
@@ -68,7 +68,7 @@ static int clkgate_separated_is_enabled(struct clk_hw *hw)
u32 reg;
sclk = container_of(hw, struct clkgate_separated, hw);
- reg = readl_relaxed(sclk->enable + CLKGATE_SEPERATED_STATUS);
+ reg = readl_relaxed(sclk->enable + CLKGATE_SEPARATED_STATUS);
reg &= BIT(sclk->bit_idx);
return reg ? 1 : 0;
@@ -100,7 +100,7 @@ struct clk *hisi_register_clkgate_sep(struct device *dev, const char *name,
init.parent_names = (parent_name ? &parent_name : NULL);
init.num_parents = (parent_name ? 1 : 0);
- sclk->enable = reg + CLKGATE_SEPERATED_ENABLE;
+ sclk->enable = reg + CLKGATE_SEPARATED_ENABLE;
sclk->bit_idx = bit_idx;
sclk->flags = clk_gate_flags;
sclk->hw.init = &init;
diff --git a/drivers/clk/imgtec/clk-boston.c b/drivers/clk/imgtec/clk-boston.c
index b00cbd045af5..db96f8bea630 100644
--- a/drivers/clk/imgtec/clk-boston.c
+++ b/drivers/clk/imgtec/clk-boston.c
@@ -67,21 +67,21 @@ static void __init clk_boston_setup(struct device_node *np)
hw = clk_hw_register_fixed_rate(NULL, "input", NULL, 0, in_freq);
if (IS_ERR(hw)) {
- pr_err("failed to register input clock: %ld\n", PTR_ERR(hw));
+ pr_err("failed to register input clock: %pe\n", hw);
goto fail_input;
}
onecell->hws[BOSTON_CLK_INPUT] = hw;
hw = clk_hw_register_fixed_rate(NULL, "sys", "input", 0, sys_freq);
if (IS_ERR(hw)) {
- pr_err("failed to register sys clock: %ld\n", PTR_ERR(hw));
+ pr_err("failed to register sys clock: %pe\n", hw);
goto fail_sys;
}
onecell->hws[BOSTON_CLK_SYS] = hw;
hw = clk_hw_register_fixed_rate(NULL, "cpu", "input", 0, cpu_freq);
if (IS_ERR(hw)) {
- pr_err("failed to register cpu clock: %ld\n", PTR_ERR(hw));
+ pr_err("failed to register cpu clock: %pe\n", hw);
goto fail_cpu;
}
onecell->hws[BOSTON_CLK_CPU] = hw;
diff --git a/drivers/clk/imx/clk-busy.c b/drivers/clk/imx/clk-busy.c
index f163df952ccc..eb27c6fee359 100644
--- a/drivers/clk/imx/clk-busy.c
+++ b/drivers/clk/imx/clk-busy.c
@@ -46,12 +46,12 @@ static unsigned long clk_busy_divider_recalc_rate(struct clk_hw *hw,
return busy->div_ops->recalc_rate(&busy->div.hw, parent_rate);
}
-static long clk_busy_divider_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_busy_divider_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_busy_divider *busy = to_clk_busy_divider(hw);
- return busy->div_ops->round_rate(&busy->div.hw, rate, prate);
+ return busy->div_ops->determine_rate(&busy->div.hw, req);
}
static int clk_busy_divider_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -69,7 +69,7 @@ static int clk_busy_divider_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops clk_busy_divider_ops = {
.recalc_rate = clk_busy_divider_recalc_rate,
- .round_rate = clk_busy_divider_round_rate,
+ .determine_rate = clk_busy_divider_determine_rate,
.set_rate = clk_busy_divider_set_rate,
};
diff --git a/drivers/clk/imx/clk-composite-8m.c b/drivers/clk/imx/clk-composite-8m.c
index f187582ba491..1467d0a1b934 100644
--- a/drivers/clk/imx/clk-composite-8m.c
+++ b/drivers/clk/imx/clk-composite-8m.c
@@ -73,21 +73,6 @@ static int imx8m_clk_composite_compute_dividers(unsigned long rate,
return ret;
}
-static long imx8m_clk_composite_divider_round_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long *prate)
-{
- int prediv_value;
- int div_value;
-
- imx8m_clk_composite_compute_dividers(rate, *prate,
- &prediv_value, &div_value);
- rate = DIV_ROUND_UP(*prate, prediv_value);
-
- return DIV_ROUND_UP(rate, div_value);
-
-}
-
static int imx8m_clk_composite_divider_set_rate(struct clk_hw *hw,
unsigned long rate,
unsigned long parent_rate)
@@ -153,7 +138,6 @@ static int imx8m_divider_determine_rate(struct clk_hw *hw,
static const struct clk_ops imx8m_clk_composite_divider_ops = {
.recalc_rate = imx8m_clk_composite_divider_recalc_rate,
- .round_rate = imx8m_clk_composite_divider_round_rate,
.set_rate = imx8m_clk_composite_divider_set_rate,
.determine_rate = imx8m_divider_determine_rate,
};
diff --git a/drivers/clk/imx/clk-composite-93.c b/drivers/clk/imx/clk-composite-93.c
index 6c6c5a30f328..513d74a39d3b 100644
--- a/drivers/clk/imx/clk-composite-93.c
+++ b/drivers/clk/imx/clk-composite-93.c
@@ -98,12 +98,6 @@ imx93_clk_composite_divider_recalc_rate(struct clk_hw *hw, unsigned long parent_
return clk_divider_ops.recalc_rate(hw, parent_rate);
}
-static long
-imx93_clk_composite_divider_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *prate)
-{
- return clk_divider_ops.round_rate(hw, rate, prate);
-}
-
static int
imx93_clk_composite_divider_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
{
@@ -141,7 +135,6 @@ static int imx93_clk_composite_divider_set_rate(struct clk_hw *hw, unsigned long
static const struct clk_ops imx93_clk_composite_divider_ops = {
.recalc_rate = imx93_clk_composite_divider_recalc_rate,
- .round_rate = imx93_clk_composite_divider_round_rate,
.determine_rate = imx93_clk_composite_divider_determine_rate,
.set_rate = imx93_clk_composite_divider_set_rate,
};
diff --git a/drivers/clk/imx/clk-cpu.c b/drivers/clk/imx/clk-cpu.c
index cb6ca4cf0535..43637cb61693 100644
--- a/drivers/clk/imx/clk-cpu.c
+++ b/drivers/clk/imx/clk-cpu.c
@@ -30,12 +30,14 @@ static unsigned long clk_cpu_recalc_rate(struct clk_hw *hw,
return clk_get_rate(cpu->div);
}
-static long clk_cpu_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_cpu_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_cpu *cpu = to_clk_cpu(hw);
- return clk_round_rate(cpu->pll, rate);
+ req->rate = clk_round_rate(cpu->pll, req->rate);
+
+ return 0;
}
static int clk_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -66,7 +68,7 @@ static int clk_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops clk_cpu_ops = {
.recalc_rate = clk_cpu_recalc_rate,
- .round_rate = clk_cpu_round_rate,
+ .determine_rate = clk_cpu_determine_rate,
.set_rate = clk_cpu_set_rate,
};
diff --git a/drivers/clk/imx/clk-fixup-div.c b/drivers/clk/imx/clk-fixup-div.c
index 100ca828b052..aa6addbeb5a8 100644
--- a/drivers/clk/imx/clk-fixup-div.c
+++ b/drivers/clk/imx/clk-fixup-div.c
@@ -18,7 +18,7 @@
* @fixup: a hook to fixup the write value
*
* The imx fixup divider clock is a subclass of basic clk_divider
- * with an addtional fixup hook.
+ * with an additional fixup hook.
*/
struct clk_fixup_div {
struct clk_divider divider;
@@ -41,12 +41,12 @@ static unsigned long clk_fixup_div_recalc_rate(struct clk_hw *hw,
return fixup_div->ops->recalc_rate(&fixup_div->divider.hw, parent_rate);
}
-static long clk_fixup_div_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_fixup_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_fixup_div *fixup_div = to_clk_fixup_div(hw);
- return fixup_div->ops->round_rate(&fixup_div->divider.hw, rate, prate);
+ return fixup_div->ops->determine_rate(&fixup_div->divider.hw, req);
}
static int clk_fixup_div_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -81,7 +81,7 @@ static int clk_fixup_div_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops clk_fixup_div_ops = {
.recalc_rate = clk_fixup_div_recalc_rate,
- .round_rate = clk_fixup_div_round_rate,
+ .determine_rate = clk_fixup_div_determine_rate,
.set_rate = clk_fixup_div_set_rate,
};
diff --git a/drivers/clk/imx/clk-fixup-mux.c b/drivers/clk/imx/clk-fixup-mux.c
index b48701864ef0..418ac9fe2c26 100644
--- a/drivers/clk/imx/clk-fixup-mux.c
+++ b/drivers/clk/imx/clk-fixup-mux.c
@@ -17,7 +17,7 @@
* @fixup: a hook to fixup the write value
*
* The imx fixup multiplexer clock is a subclass of basic clk_mux
- * with an addtional fixup hook.
+ * with an additional fixup hook.
*/
struct clk_fixup_mux {
struct clk_mux mux;
diff --git a/drivers/clk/imx/clk-frac-pll.c b/drivers/clk/imx/clk-frac-pll.c
index c703056fae85..eb668faaa38f 100644
--- a/drivers/clk/imx/clk-frac-pll.c
+++ b/drivers/clk/imx/clk-frac-pll.c
@@ -119,19 +119,19 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long clk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- u64 parent_rate = *prate;
+ u64 parent_rate = req->best_parent_rate;
u32 divff, divfi;
u64 temp64;
parent_rate *= 8;
- rate *= 2;
- temp64 = rate;
+ req->rate *= 2;
+ temp64 = req->rate;
do_div(temp64, parent_rate);
divfi = temp64;
- temp64 = rate - divfi * parent_rate;
+ temp64 = req->rate - divfi * parent_rate;
temp64 *= PLL_FRAC_DENOM;
do_div(temp64, parent_rate);
divff = temp64;
@@ -140,9 +140,11 @@ static long clk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
temp64 *= divff;
do_div(temp64, PLL_FRAC_DENOM);
- rate = parent_rate * divfi + temp64;
+ req->rate = parent_rate * divfi + temp64;
+
+ req->rate = req->rate / 2;
- return rate / 2;
+ return 0;
}
/*
@@ -198,7 +200,7 @@ static const struct clk_ops clk_frac_pll_ops = {
.unprepare = clk_pll_unprepare,
.is_prepared = clk_pll_is_prepared,
.recalc_rate = clk_pll_recalc_rate,
- .round_rate = clk_pll_round_rate,
+ .determine_rate = clk_pll_determine_rate,
.set_rate = clk_pll_set_rate,
};
diff --git a/drivers/clk/imx/clk-fracn-gppll.c b/drivers/clk/imx/clk-fracn-gppll.c
index 591e0364ee5c..090d60867250 100644
--- a/drivers/clk/imx/clk-fracn-gppll.c
+++ b/drivers/clk/imx/clk-fracn-gppll.c
@@ -134,8 +134,8 @@ imx_get_pll_settings(struct clk_fracn_gppll *pll, unsigned long rate)
return NULL;
}
-static long clk_fracn_gppll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_fracn_gppll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_fracn_gppll *pll = to_clk_fracn_gppll(hw);
const struct imx_fracn_gppll_rate_table *rate_table = pll->rate_table;
@@ -143,11 +143,16 @@ static long clk_fracn_gppll_round_rate(struct clk_hw *hw, unsigned long rate,
/* Assuming rate_table is in descending order */
for (i = 0; i < pll->rate_count; i++)
- if (rate >= rate_table[i].rate)
- return rate_table[i].rate;
+ if (req->rate >= rate_table[i].rate) {
+ req->rate = rate_table[i].rate;
+
+ return 0;
+ }
/* return minimum supported value */
- return rate_table[pll->rate_count - 1].rate;
+ req->rate = rate_table[pll->rate_count - 1].rate;
+
+ return 0;
}
static unsigned long clk_fracn_gppll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
@@ -254,9 +259,11 @@ static int clk_fracn_gppll_set_rate(struct clk_hw *hw, unsigned long drate,
pll_div = FIELD_PREP(PLL_RDIV_MASK, rate->rdiv) | rate->odiv |
FIELD_PREP(PLL_MFI_MASK, rate->mfi);
writel_relaxed(pll_div, pll->base + PLL_DIV);
+ readl(pll->base + PLL_DIV);
if (pll->flags & CLK_FRACN_GPPLL_FRACN) {
writel_relaxed(rate->mfd, pll->base + PLL_DENOMINATOR);
writel_relaxed(FIELD_PREP(PLL_MFN_MASK, rate->mfn), pll->base + PLL_NUMERATOR);
+ readl(pll->base + PLL_NUMERATOR);
}
/* Wait for 5us according to fracn mode pll doc */
@@ -265,6 +272,7 @@ static int clk_fracn_gppll_set_rate(struct clk_hw *hw, unsigned long drate,
/* Enable Powerup */
tmp |= POWERUP_MASK;
writel_relaxed(tmp, pll->base + PLL_CTRL);
+ readl(pll->base + PLL_CTRL);
/* Wait Lock */
ret = clk_fracn_gppll_wait_lock(pll);
@@ -302,14 +310,15 @@ static int clk_fracn_gppll_prepare(struct clk_hw *hw)
val |= POWERUP_MASK;
writel_relaxed(val, pll->base + PLL_CTRL);
-
- val |= CLKMUX_EN;
- writel_relaxed(val, pll->base + PLL_CTRL);
+ readl(pll->base + PLL_CTRL);
ret = clk_fracn_gppll_wait_lock(pll);
if (ret)
return ret;
+ val |= CLKMUX_EN;
+ writel_relaxed(val, pll->base + PLL_CTRL);
+
val &= ~CLKMUX_BYPASS;
writel_relaxed(val, pll->base + PLL_CTRL);
@@ -341,7 +350,7 @@ static const struct clk_ops clk_fracn_gppll_ops = {
.unprepare = clk_fracn_gppll_unprepare,
.is_prepared = clk_fracn_gppll_is_prepared,
.recalc_rate = clk_fracn_gppll_recalc_rate,
- .round_rate = clk_fracn_gppll_round_rate,
+ .determine_rate = clk_fracn_gppll_determine_rate,
.set_rate = clk_fracn_gppll_set_rate,
};
diff --git a/drivers/clk/imx/clk-gate-exclusive.c b/drivers/clk/imx/clk-gate-exclusive.c
index 77342893bb71..7017e9d4e188 100644
--- a/drivers/clk/imx/clk-gate-exclusive.c
+++ b/drivers/clk/imx/clk-gate-exclusive.c
@@ -18,7 +18,7 @@
* gate clock
*
* The imx exclusive gate clock is a subclass of basic clk_gate
- * with an addtional mask to indicate which other gate bits in the same
+ * with an additional mask to indicate which other gate bits in the same
* register is mutually exclusive to this gate clock.
*/
struct clk_gate_exclusive {
diff --git a/drivers/clk/imx/clk-imx5.c b/drivers/clk/imx/clk-imx5.c
index b82044911603..9c5f489b3975 100644
--- a/drivers/clk/imx/clk-imx5.c
+++ b/drivers/clk/imx/clk-imx5.c
@@ -454,7 +454,7 @@ static void __init mx51_clocks_init(struct device_node *np)
* longer supported. Set to one for better power saving.
*
* The effect of not setting these bits is that MIPI clocks can't be
- * enabled without the IPU clock being enabled aswell.
+ * enabled without the IPU clock being enabled as well.
*/
val = readl(MXC_CCM_CCDR);
val |= 1 << 18;
diff --git a/drivers/clk/imx/clk-imx8-acm.c b/drivers/clk/imx/clk-imx8-acm.c
index 6c351050b82a..790f7e44b11e 100644
--- a/drivers/clk/imx/clk-imx8-acm.c
+++ b/drivers/clk/imx/clk-imx8-acm.c
@@ -22,7 +22,7 @@
* struct clk_imx_acm_pm_domains - structure for multi power domain
* @pd_dev: power domain device
* @pd_dev_link: power domain device link
- * @num_domains: power domain nummber
+ * @num_domains: power domain number
*/
struct clk_imx_acm_pm_domains {
struct device **pd_dev;
@@ -294,9 +294,9 @@ static int clk_imx_acm_attach_pm_domains(struct device *dev,
DL_FLAG_STATELESS |
DL_FLAG_PM_RUNTIME |
DL_FLAG_RPM_ACTIVE);
- if (IS_ERR(dev_pm->pd_dev_link[i])) {
+ if (!dev_pm->pd_dev_link[i]) {
dev_pm_domain_detach(dev_pm->pd_dev[i], false);
- ret = PTR_ERR(dev_pm->pd_dev_link[i]);
+ ret = -EINVAL;
goto detach_pm;
}
}
diff --git a/drivers/clk/imx/clk-imx8mp-audiomix.c b/drivers/clk/imx/clk-imx8mp-audiomix.c
index b2cb157703c5..775f62dddb11 100644
--- a/drivers/clk/imx/clk-imx8mp-audiomix.c
+++ b/drivers/clk/imx/clk-imx8mp-audiomix.c
@@ -180,14 +180,14 @@ static struct clk_imx8mp_audiomix_sel sels[] = {
CLK_GATE("asrc", ASRC_IPG),
CLK_GATE("pdm", PDM_IPG),
CLK_GATE("earc", EARC_IPG),
- CLK_GATE("ocrama", OCRAMA_IPG),
+ CLK_GATE_PARENT("ocrama", OCRAMA_IPG, "axi"),
CLK_GATE("aud2htx", AUD2HTX_IPG),
CLK_GATE_PARENT("earc_phy", EARC_PHY, "sai_pll_out_div2"),
CLK_GATE("sdma2", SDMA2_ROOT),
CLK_GATE("sdma3", SDMA3_ROOT),
CLK_GATE("spba2", SPBA2_ROOT),
- CLK_GATE("dsp", DSP_ROOT),
- CLK_GATE("dspdbg", DSPDBG_ROOT),
+ CLK_GATE_PARENT("dsp", DSP_ROOT, "axi"),
+ CLK_GATE_PARENT("dspdbg", DSPDBG_ROOT, "axi"),
CLK_GATE("edma", EDMA_ROOT),
CLK_GATE_PARENT("audpll", AUDPLL_ROOT, "osc_24m"),
CLK_GATE("mu2", MU2_ROOT),
@@ -278,7 +278,8 @@ static int clk_imx8mp_audiomix_reset_controller_register(struct device *dev,
#else /* !CONFIG_RESET_CONTROLLER */
-static int clk_imx8mp_audiomix_reset_controller_register(struct clk_imx8mp_audiomix_priv *priv)
+static int clk_imx8mp_audiomix_reset_controller_register(struct device *dev,
+ struct clk_imx8mp_audiomix_priv *priv)
{
return 0;
}
diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c
index 516dbd170c8a..fe6dac70f1a1 100644
--- a/drivers/clk/imx/clk-imx8mp.c
+++ b/drivers/clk/imx/clk-imx8mp.c
@@ -8,6 +8,7 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/units.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -399,17 +400,158 @@ static const char * const imx8mp_dram_core_sels[] = {"dram_pll_out", "dram_alt_r
static const char * const imx8mp_clkout_sels[] = {"audio_pll1_out", "audio_pll2_out", "video_pll1_out",
"dummy", "dummy", "gpu_pll_out", "vpu_pll_out",
- "arm_pll_out", "sys_pll1", "sys_pll2", "sys_pll3",
- "dummy", "dummy", "osc_24m", "dummy", "osc_32k"};
+ "arm_pll_out", "sys_pll1_out", "sys_pll2_out",
+ "sys_pll3_out", "dummy", "dummy", "osc_24m",
+ "dummy", "osc_32k"};
static struct clk_hw **hws;
static struct clk_hw_onecell_data *clk_hw_data;
+struct imx8mp_clock_constraints {
+ unsigned int clkid;
+ u32 maxrate;
+};
+
+/*
+ * Below tables are taken from IMX8MPCEC Rev. 2.1, 07/2023
+ * Table 13. Maximum frequency of modules.
+ * Probable typos fixed are marked with a comment.
+ */
+static const struct imx8mp_clock_constraints imx8mp_clock_common_constraints[] = {
+ { IMX8MP_CLK_A53_DIV, 1000 * HZ_PER_MHZ },
+ { IMX8MP_CLK_ENET_AXI, 266666667 }, /* Datasheet claims 266MHz */
+ { IMX8MP_CLK_NAND_USDHC_BUS, 266666667 }, /* Datasheet claims 266MHz */
+ { IMX8MP_CLK_MEDIA_APB, 200 * HZ_PER_MHZ },
+ { IMX8MP_CLK_HDMI_APB, 133333333 }, /* Datasheet claims 133MHz */
+ { IMX8MP_CLK_ML_AXI, 800 * HZ_PER_MHZ },
+ { IMX8MP_CLK_AHB, 133333333 },
+ { IMX8MP_CLK_IPG_ROOT, 66666667 },
+ { IMX8MP_CLK_AUDIO_AHB, 400 * HZ_PER_MHZ },
+ { IMX8MP_CLK_MEDIA_DISP2_PIX, 170 * HZ_PER_MHZ },
+ { IMX8MP_CLK_DRAM_ALT, 666666667 },
+ { IMX8MP_CLK_DRAM_APB, 200 * HZ_PER_MHZ },
+ { IMX8MP_CLK_CAN1, 80 * HZ_PER_MHZ },
+ { IMX8MP_CLK_CAN2, 80 * HZ_PER_MHZ },
+ { IMX8MP_CLK_PCIE_AUX, 10 * HZ_PER_MHZ },
+ { IMX8MP_CLK_I2C5, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_I2C6, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_SAI1, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_SAI2, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_SAI3, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_SAI5, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_SAI6, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_ENET_QOS, 125 * HZ_PER_MHZ },
+ { IMX8MP_CLK_ENET_QOS_TIMER, 200 * HZ_PER_MHZ },
+ { IMX8MP_CLK_ENET_REF, 125 * HZ_PER_MHZ },
+ { IMX8MP_CLK_ENET_TIMER, 125 * HZ_PER_MHZ },
+ { IMX8MP_CLK_ENET_PHY_REF, 125 * HZ_PER_MHZ },
+ { IMX8MP_CLK_NAND, 500 * HZ_PER_MHZ },
+ { IMX8MP_CLK_QSPI, 400 * HZ_PER_MHZ },
+ { IMX8MP_CLK_USDHC1, 400 * HZ_PER_MHZ },
+ { IMX8MP_CLK_USDHC2, 400 * HZ_PER_MHZ },
+ { IMX8MP_CLK_I2C1, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_I2C2, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_I2C3, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_I2C4, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_UART1, 80 * HZ_PER_MHZ },
+ { IMX8MP_CLK_UART2, 80 * HZ_PER_MHZ },
+ { IMX8MP_CLK_UART3, 80 * HZ_PER_MHZ },
+ { IMX8MP_CLK_UART4, 80 * HZ_PER_MHZ },
+ { IMX8MP_CLK_ECSPI1, 80 * HZ_PER_MHZ },
+ { IMX8MP_CLK_ECSPI2, 80 * HZ_PER_MHZ },
+ { IMX8MP_CLK_PWM1, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_PWM2, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_PWM3, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_PWM4, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_GPT1, 100 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GPT2, 100 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GPT3, 100 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GPT4, 100 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GPT5, 100 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GPT6, 100 * HZ_PER_MHZ },
+ { IMX8MP_CLK_WDOG, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_IPP_DO_CLKO1, 200 * HZ_PER_MHZ },
+ { IMX8MP_CLK_IPP_DO_CLKO2, 200 * HZ_PER_MHZ },
+ { IMX8MP_CLK_HDMI_REF_266M, 266 * HZ_PER_MHZ },
+ { IMX8MP_CLK_USDHC3, 400 * HZ_PER_MHZ },
+ { IMX8MP_CLK_MEDIA_MIPI_PHY1_REF, 300 * HZ_PER_MHZ },
+ { IMX8MP_CLK_MEDIA_DISP1_PIX, 250 * HZ_PER_MHZ },
+ { IMX8MP_CLK_MEDIA_CAM2_PIX, 277 * HZ_PER_MHZ },
+ { IMX8MP_CLK_MEDIA_LDB, 595 * HZ_PER_MHZ },
+ { IMX8MP_CLK_MEDIA_MIPI_TEST_BYTE, 200 * HZ_PER_MHZ },
+ { IMX8MP_CLK_ECSPI3, 80 * HZ_PER_MHZ },
+ { IMX8MP_CLK_PDM, 200 * HZ_PER_MHZ },
+ { IMX8MP_CLK_SAI7, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_MAIN_AXI, 400 * HZ_PER_MHZ },
+ { /* Sentinel */ }
+};
+
+static const struct imx8mp_clock_constraints imx8mp_clock_nominal_constraints[] = {
+ { IMX8MP_CLK_M7_CORE, 600 * HZ_PER_MHZ },
+ { IMX8MP_CLK_ML_CORE, 800 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GPU3D_CORE, 800 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GPU3D_SHADER_CORE, 800 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GPU2D_CORE, 800 * HZ_PER_MHZ },
+ { IMX8MP_CLK_AUDIO_AXI_SRC, 600 * HZ_PER_MHZ },
+ { IMX8MP_CLK_HSIO_AXI, 400 * HZ_PER_MHZ },
+ { IMX8MP_CLK_MEDIA_ISP, 400 * HZ_PER_MHZ },
+ { IMX8MP_CLK_VPU_BUS, 600 * HZ_PER_MHZ },
+ { IMX8MP_CLK_MEDIA_AXI, 400 * HZ_PER_MHZ },
+ { IMX8MP_CLK_HDMI_AXI, 400 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GPU_AXI, 600 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GPU_AHB, 300 * HZ_PER_MHZ },
+ { IMX8MP_CLK_NOC, 800 * HZ_PER_MHZ },
+ { IMX8MP_CLK_NOC_IO, 600 * HZ_PER_MHZ },
+ { IMX8MP_CLK_ML_AHB, 300 * HZ_PER_MHZ },
+ { IMX8MP_CLK_VPU_G1, 600 * HZ_PER_MHZ },
+ { IMX8MP_CLK_VPU_G2, 500 * HZ_PER_MHZ },
+ { IMX8MP_CLK_MEDIA_CAM1_PIX, 400 * HZ_PER_MHZ },
+ { IMX8MP_CLK_VPU_VC8000E, 400 * HZ_PER_MHZ }, /* Datasheet claims 500MHz */
+ { IMX8MP_CLK_DRAM_CORE, 800 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GIC, 400 * HZ_PER_MHZ },
+ { /* Sentinel */ }
+};
+
+static const struct imx8mp_clock_constraints imx8mp_clock_overdrive_constraints[] = {
+ { IMX8MP_CLK_M7_CORE, 800 * HZ_PER_MHZ},
+ { IMX8MP_CLK_ML_CORE, 1000 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GPU3D_CORE, 1000 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GPU3D_SHADER_CORE, 1000 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GPU2D_CORE, 1000 * HZ_PER_MHZ },
+ { IMX8MP_CLK_AUDIO_AXI_SRC, 800 * HZ_PER_MHZ },
+ { IMX8MP_CLK_HSIO_AXI, 500 * HZ_PER_MHZ },
+ { IMX8MP_CLK_MEDIA_ISP, 500 * HZ_PER_MHZ },
+ { IMX8MP_CLK_VPU_BUS, 800 * HZ_PER_MHZ },
+ { IMX8MP_CLK_MEDIA_AXI, 500 * HZ_PER_MHZ },
+ { IMX8MP_CLK_HDMI_AXI, 500 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GPU_AXI, 800 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GPU_AHB, 400 * HZ_PER_MHZ },
+ { IMX8MP_CLK_NOC, 1000 * HZ_PER_MHZ },
+ { IMX8MP_CLK_NOC_IO, 800 * HZ_PER_MHZ },
+ { IMX8MP_CLK_ML_AHB, 400 * HZ_PER_MHZ },
+ { IMX8MP_CLK_VPU_G1, 800 * HZ_PER_MHZ },
+ { IMX8MP_CLK_VPU_G2, 700 * HZ_PER_MHZ },
+ { IMX8MP_CLK_MEDIA_CAM1_PIX, 500 * HZ_PER_MHZ },
+ { IMX8MP_CLK_VPU_VC8000E, 500 * HZ_PER_MHZ }, /* Datasheet claims 400MHz */
+ { IMX8MP_CLK_DRAM_CORE, 1000 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GIC, 500 * HZ_PER_MHZ },
+ { /* Sentinel */ }
+};
+
+static void imx8mp_clocks_apply_constraints(const struct imx8mp_clock_constraints constraints[])
+{
+ const struct imx8mp_clock_constraints *constr;
+
+ for (constr = constraints; constr->clkid; constr++)
+ clk_hw_set_rate_range(hws[constr->clkid], 0, constr->maxrate);
+}
+
static int imx8mp_clocks_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np;
void __iomem *anatop_base, *ccm_base;
+ const char *opmode;
int err;
np = of_find_compatible_node(NULL, NULL, "fsl,imx8mp-anatop");
@@ -714,6 +856,16 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
imx_check_clk_hws(hws, IMX8MP_CLK_END);
+ imx8mp_clocks_apply_constraints(imx8mp_clock_common_constraints);
+
+ err = of_property_read_string(np, "fsl,operating-mode", &opmode);
+ if (!err) {
+ if (!strcmp(opmode, "nominal"))
+ imx8mp_clocks_apply_constraints(imx8mp_clock_nominal_constraints);
+ else if (!strcmp(opmode, "overdrive"))
+ imx8mp_clocks_apply_constraints(imx8mp_clock_overdrive_constraints);
+ }
+
err = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data);
if (err < 0) {
dev_err(dev, "failed to register hws for i.MX8MP\n");
diff --git a/drivers/clk/imx/clk-imx8qxp-lpcg.c b/drivers/clk/imx/clk-imx8qxp-lpcg.c
index d0ccaa040225..1dae3410ee99 100644
--- a/drivers/clk/imx/clk-imx8qxp-lpcg.c
+++ b/drivers/clk/imx/clk-imx8qxp-lpcg.c
@@ -267,7 +267,6 @@ static int imx_lpcg_parse_clks_from_dt(struct platform_device *pdev,
if (ret)
goto unreg;
- pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
return 0;
diff --git a/drivers/clk/imx/clk-imx93.c b/drivers/clk/imx/clk-imx93.c
index c6a9bc8ecc1f..c5f358a75f30 100644
--- a/drivers/clk/imx/clk-imx93.c
+++ b/drivers/clk/imx/clk-imx93.c
@@ -15,6 +15,11 @@
#include "clk.h"
+#define IMX93_CLK_END 208
+
+#define PLAT_IMX93 BIT(0)
+#define PLAT_IMX91 BIT(1)
+
enum clk_sel {
LOW_SPEED_IO_SEL,
NON_IO_SEL,
@@ -33,6 +38,7 @@ static u32 share_count_sai2;
static u32 share_count_sai3;
static u32 share_count_mub;
static u32 share_count_pdm;
+static u32 share_count_spdif;
static const char * const a55_core_sels[] = {"a55_alt", "arm_pll"};
static const char *parent_names[MAX_SEL][4] = {
@@ -53,6 +59,7 @@ static const struct imx93_clk_root {
u32 off;
enum clk_sel sel;
unsigned long flags;
+ unsigned long plat;
} root_array[] = {
/* a55/m33/bus critical clk for system run */
{ IMX93_CLK_A55_PERIPH, "a55_periph_root", 0x0000, FAST_SEL, CLK_IS_CRITICAL },
@@ -63,9 +70,9 @@ static const struct imx93_clk_root {
{ IMX93_CLK_BUS_AON, "bus_aon_root", 0x0300, LOW_SPEED_IO_SEL, CLK_IS_CRITICAL },
{ IMX93_CLK_WAKEUP_AXI, "wakeup_axi_root", 0x0380, FAST_SEL, CLK_IS_CRITICAL },
{ IMX93_CLK_SWO_TRACE, "swo_trace_root", 0x0400, LOW_SPEED_IO_SEL, },
- { IMX93_CLK_M33_SYSTICK, "m33_systick_root", 0x0480, LOW_SPEED_IO_SEL, },
- { IMX93_CLK_FLEXIO1, "flexio1_root", 0x0500, LOW_SPEED_IO_SEL, },
- { IMX93_CLK_FLEXIO2, "flexio2_root", 0x0580, LOW_SPEED_IO_SEL, },
+ { IMX93_CLK_M33_SYSTICK, "m33_systick_root", 0x0480, LOW_SPEED_IO_SEL, 0, PLAT_IMX93, },
+ { IMX93_CLK_FLEXIO1, "flexio1_root", 0x0500, LOW_SPEED_IO_SEL, 0, PLAT_IMX93, },
+ { IMX93_CLK_FLEXIO2, "flexio2_root", 0x0580, LOW_SPEED_IO_SEL, 0, PLAT_IMX93, },
{ IMX93_CLK_LPTMR1, "lptmr1_root", 0x0700, LOW_SPEED_IO_SEL, },
{ IMX93_CLK_LPTMR2, "lptmr2_root", 0x0780, LOW_SPEED_IO_SEL, },
{ IMX93_CLK_TPM2, "tpm2_root", 0x0880, TPM_SEL, },
@@ -120,15 +127,15 @@ static const struct imx93_clk_root {
{ IMX93_CLK_HSIO_ACSCAN_80M, "hsio_acscan_80m_root", 0x1f80, LOW_SPEED_IO_SEL, },
{ IMX93_CLK_HSIO_ACSCAN_480M, "hsio_acscan_480m_root", 0x2000, MISC_SEL, },
{ IMX93_CLK_NIC_AXI, "nic_axi_root", 0x2080, FAST_SEL, CLK_IS_CRITICAL, },
- { IMX93_CLK_ML_APB, "ml_apb_root", 0x2180, LOW_SPEED_IO_SEL, },
- { IMX93_CLK_ML, "ml_root", 0x2200, FAST_SEL, },
+ { IMX93_CLK_ML_APB, "ml_apb_root", 0x2180, LOW_SPEED_IO_SEL, 0, PLAT_IMX93, },
+ { IMX93_CLK_ML, "ml_root", 0x2200, FAST_SEL, 0, PLAT_IMX93, },
{ IMX93_CLK_MEDIA_AXI, "media_axi_root", 0x2280, FAST_SEL, },
{ IMX93_CLK_MEDIA_APB, "media_apb_root", 0x2300, LOW_SPEED_IO_SEL, },
- { IMX93_CLK_MEDIA_LDB, "media_ldb_root", 0x2380, VIDEO_SEL, },
+ { IMX93_CLK_MEDIA_LDB, "media_ldb_root", 0x2380, VIDEO_SEL, 0, PLAT_IMX93, },
{ IMX93_CLK_MEDIA_DISP_PIX, "media_disp_pix_root", 0x2400, VIDEO_SEL, },
{ IMX93_CLK_CAM_PIX, "cam_pix_root", 0x2480, VIDEO_SEL, },
- { IMX93_CLK_MIPI_TEST_BYTE, "mipi_test_byte_root", 0x2500, VIDEO_SEL, },
- { IMX93_CLK_MIPI_PHY_CFG, "mipi_phy_cfg_root", 0x2580, VIDEO_SEL, },
+ { IMX93_CLK_MIPI_TEST_BYTE, "mipi_test_byte_root", 0x2500, VIDEO_SEL, 0, PLAT_IMX93, },
+ { IMX93_CLK_MIPI_PHY_CFG, "mipi_phy_cfg_root", 0x2580, VIDEO_SEL, 0, PLAT_IMX93, },
{ IMX93_CLK_ADC, "adc_root", 0x2700, LOW_SPEED_IO_SEL, },
{ IMX93_CLK_PDM, "pdm_root", 0x2780, AUDIO_SEL, },
{ IMX93_CLK_TSTMR1, "tstmr1_root", 0x2800, LOW_SPEED_IO_SEL, },
@@ -137,13 +144,16 @@ static const struct imx93_clk_root {
{ IMX93_CLK_MQS2, "mqs2_root", 0x2980, AUDIO_SEL, },
{ IMX93_CLK_AUDIO_XCVR, "audio_xcvr_root", 0x2a00, NON_IO_SEL, },
{ IMX93_CLK_SPDIF, "spdif_root", 0x2a80, AUDIO_SEL, },
- { IMX93_CLK_ENET, "enet_root", 0x2b00, NON_IO_SEL, },
- { IMX93_CLK_ENET_TIMER1, "enet_timer1_root", 0x2b80, LOW_SPEED_IO_SEL, },
- { IMX93_CLK_ENET_TIMER2, "enet_timer2_root", 0x2c00, LOW_SPEED_IO_SEL, },
- { IMX93_CLK_ENET_REF, "enet_ref_root", 0x2c80, NON_IO_SEL, },
- { IMX93_CLK_ENET_REF_PHY, "enet_ref_phy_root", 0x2d00, LOW_SPEED_IO_SEL, },
- { IMX93_CLK_I3C1_SLOW, "i3c1_slow_root", 0x2d80, LOW_SPEED_IO_SEL, },
- { IMX93_CLK_I3C2_SLOW, "i3c2_slow_root", 0x2e00, LOW_SPEED_IO_SEL, },
+ { IMX93_CLK_ENET, "enet_root", 0x2b00, NON_IO_SEL, 0, PLAT_IMX93, },
+ { IMX93_CLK_ENET_TIMER1, "enet_timer1_root", 0x2b80, LOW_SPEED_IO_SEL, 0, PLAT_IMX93, },
+ { IMX93_CLK_ENET_TIMER2, "enet_timer2_root", 0x2c00, LOW_SPEED_IO_SEL, 0, PLAT_IMX93, },
+ { IMX93_CLK_ENET_REF, "enet_ref_root", 0x2c80, NON_IO_SEL, 0, PLAT_IMX93, },
+ { IMX93_CLK_ENET_REF_PHY, "enet_ref_phy_root", 0x2d00, LOW_SPEED_IO_SEL, 0, PLAT_IMX93, },
+ { IMX91_CLK_ENET1_QOS_TSN, "enet1_qos_tsn_root", 0x2b00, NON_IO_SEL, 0, PLAT_IMX91, },
+ { IMX91_CLK_ENET_TIMER, "enet_timer_root", 0x2b80, LOW_SPEED_IO_SEL, 0, PLAT_IMX91, },
+ { IMX91_CLK_ENET2_REGULAR, "enet2_regular_root", 0x2c80, NON_IO_SEL, 0, PLAT_IMX91, },
+ { IMX93_CLK_I3C1_SLOW, "i3c1_slow_root", 0x2d80, LOW_SPEED_IO_SEL, 0, PLAT_IMX93, },
+ { IMX93_CLK_I3C2_SLOW, "i3c2_slow_root", 0x2e00, LOW_SPEED_IO_SEL, 0, PLAT_IMX93, },
{ IMX93_CLK_USB_PHY_BURUNIN, "usb_phy_root", 0x2e80, LOW_SPEED_IO_SEL, },
{ IMX93_CLK_PAL_CAME_SCAN, "pal_came_scan_root", 0x2f00, MISC_SEL, }
};
@@ -155,6 +165,7 @@ static const struct imx93_clk_ccgr {
u32 off;
unsigned long flags;
u32 *shared_count;
+ unsigned long plat;
} ccgr_array[] = {
{ IMX93_CLK_A55_GATE, "a55_alt", "a55_alt_root", 0x8000, },
/* M33 critical clk for system run */
@@ -167,10 +178,10 @@ static const struct imx93_clk_ccgr {
{ IMX93_CLK_WDOG5_GATE, "wdog5", "osc_24m", 0x8400, },
{ IMX93_CLK_SEMA1_GATE, "sema1", "bus_aon_root", 0x8440, },
{ IMX93_CLK_SEMA2_GATE, "sema2", "bus_wakeup_root", 0x8480, },
- { IMX93_CLK_MU1_A_GATE, "mu1_a", "bus_aon_root", 0x84c0, CLK_IGNORE_UNUSED },
- { IMX93_CLK_MU2_A_GATE, "mu2_a", "bus_wakeup_root", 0x84c0, CLK_IGNORE_UNUSED },
- { IMX93_CLK_MU1_B_GATE, "mu1_b", "bus_aon_root", 0x8500, 0, &share_count_mub },
- { IMX93_CLK_MU2_B_GATE, "mu2_b", "bus_wakeup_root", 0x8500, 0, &share_count_mub },
+ { IMX93_CLK_MU1_A_GATE, "mu1_a", "bus_aon_root", 0x84c0, CLK_IGNORE_UNUSED, NULL, PLAT_IMX93 },
+ { IMX93_CLK_MU2_A_GATE, "mu2_a", "bus_wakeup_root", 0x84c0, CLK_IGNORE_UNUSED, NULL, PLAT_IMX93 },
+ { IMX93_CLK_MU1_B_GATE, "mu1_b", "bus_aon_root", 0x8500, 0, &share_count_mub, PLAT_IMX93 },
+ { IMX93_CLK_MU2_B_GATE, "mu2_b", "bus_wakeup_root", 0x8500, 0, &share_count_mub, PLAT_IMX93 },
{ IMX93_CLK_EDMA1_GATE, "edma1", "m33_root", 0x8540, },
{ IMX93_CLK_EDMA2_GATE, "edma2", "wakeup_axi_root", 0x8580, },
{ IMX93_CLK_FLEXSPI1_GATE, "flexspi1", "flexspi1_root", 0x8640, },
@@ -178,8 +189,8 @@ static const struct imx93_clk_ccgr {
{ IMX93_CLK_GPIO2_GATE, "gpio2", "bus_wakeup_root", 0x88c0, },
{ IMX93_CLK_GPIO3_GATE, "gpio3", "bus_wakeup_root", 0x8900, },
{ IMX93_CLK_GPIO4_GATE, "gpio4", "bus_wakeup_root", 0x8940, },
- { IMX93_CLK_FLEXIO1_GATE, "flexio1", "flexio1_root", 0x8980, },
- { IMX93_CLK_FLEXIO2_GATE, "flexio2", "flexio2_root", 0x89c0, },
+ { IMX93_CLK_FLEXIO1_GATE, "flexio1", "flexio1_root", 0x8980, 0, NULL, PLAT_IMX93},
+ { IMX93_CLK_FLEXIO2_GATE, "flexio2", "flexio2_root", 0x89c0, 0, NULL, PLAT_IMX93},
{ IMX93_CLK_LPIT1_GATE, "lpit1", "bus_aon_root", 0x8a00, },
{ IMX93_CLK_LPIT2_GATE, "lpit2", "bus_wakeup_root", 0x8a40, },
{ IMX93_CLK_LPTMR1_GATE, "lptmr1", "lptmr1_root", 0x8a80, },
@@ -228,10 +239,10 @@ static const struct imx93_clk_ccgr {
{ IMX93_CLK_SAI3_GATE, "sai3", "sai3_root", 0x94c0, 0, &share_count_sai3},
{ IMX93_CLK_SAI3_IPG, "sai3_ipg_clk", "bus_wakeup_root", 0x94c0, 0, &share_count_sai3},
{ IMX93_CLK_MIPI_CSI_GATE, "mipi_csi", "media_apb_root", 0x9580, },
- { IMX93_CLK_MIPI_DSI_GATE, "mipi_dsi", "media_apb_root", 0x95c0, },
- { IMX93_CLK_LVDS_GATE, "lvds", "media_ldb_root", 0x9600, },
+ { IMX93_CLK_MIPI_DSI_GATE, "mipi_dsi", "media_apb_root", 0x95c0, 0, NULL, PLAT_IMX93 },
+ { IMX93_CLK_LVDS_GATE, "lvds", "media_ldb_root", 0x9600, 0, NULL, PLAT_IMX93 },
{ IMX93_CLK_LCDIF_GATE, "lcdif", "media_apb_root", 0x9640, },
- { IMX93_CLK_PXP_GATE, "pxp", "media_apb_root", 0x9680, },
+ { IMX93_CLK_PXP_GATE, "pxp", "media_apb_root", 0x9680, 0, NULL, PLAT_IMX93 },
{ IMX93_CLK_ISI_GATE, "isi", "media_apb_root", 0x96c0, },
{ IMX93_CLK_NIC_MEDIA_GATE, "nic_media", "media_axi_root", 0x9700, },
{ IMX93_CLK_USB_CONTROLLER_GATE, "usb_controller", "hsio_root", 0x9a00, },
@@ -242,10 +253,13 @@ static const struct imx93_clk_ccgr {
{ IMX93_CLK_MQS1_GATE, "mqs1", "sai1_root", 0x9b00, },
{ IMX93_CLK_MQS2_GATE, "mqs2", "sai3_root", 0x9b40, },
{ IMX93_CLK_AUD_XCVR_GATE, "aud_xcvr", "audio_xcvr_root", 0x9b80, },
- { IMX93_CLK_SPDIF_GATE, "spdif", "spdif_root", 0x9c00, },
+ { IMX93_CLK_SPDIF_IPG, "spdif_ipg_clk", "bus_wakeup_root", 0x9c00, 0, &share_count_spdif},
+ { IMX93_CLK_SPDIF_GATE, "spdif", "spdif_root", 0x9c00, 0, &share_count_spdif},
{ IMX93_CLK_HSIO_32K_GATE, "hsio_32k", "osc_32k", 0x9dc0, },
- { IMX93_CLK_ENET1_GATE, "enet1", "wakeup_axi_root", 0x9e00, },
- { IMX93_CLK_ENET_QOS_GATE, "enet_qos", "wakeup_axi_root", 0x9e40, },
+ { IMX93_CLK_ENET1_GATE, "enet1", "wakeup_axi_root", 0x9e00, 0, NULL, PLAT_IMX93, },
+ { IMX93_CLK_ENET_QOS_GATE, "enet_qos", "wakeup_axi_root", 0x9e40, 0, NULL, PLAT_IMX93, },
+ { IMX91_CLK_ENET2_REGULAR_GATE, "enet2_regular", "wakeup_axi_root", 0x9e00, 0, NULL, PLAT_IMX91, },
+ { IMX91_CLK_ENET1_QOS_TSN_GATE, "enet1_qos_tsn", "wakeup_axi_root", 0x9e40, 0, NULL, PLAT_IMX91, },
/* Critical because clk accessed during CPU idle */
{ IMX93_CLK_SYS_CNT_GATE, "sys_cnt", "osc_24m", 0x9e80, CLK_IS_CRITICAL},
{ IMX93_CLK_TSTMR1_GATE, "tstmr1", "bus_aon_root", 0x9ec0, },
@@ -265,6 +279,7 @@ static int imx93_clocks_probe(struct platform_device *pdev)
const struct imx93_clk_ccgr *ccgr;
void __iomem *base, *anatop_base;
int i, ret;
+ const unsigned long plat = (unsigned long)device_get_match_data(&pdev->dev);
clk_hw_data = devm_kzalloc(dev, struct_size(clk_hw_data, hws,
IMX93_CLK_END), GFP_KERNEL);
@@ -314,17 +329,20 @@ static int imx93_clocks_probe(struct platform_device *pdev)
for (i = 0; i < ARRAY_SIZE(root_array); i++) {
root = &root_array[i];
- clks[root->clk] = imx93_clk_composite_flags(root->name,
- parent_names[root->sel],
- 4, base + root->off, 3,
- root->flags);
+ if (!root->plat || root->plat & plat)
+ clks[root->clk] = imx93_clk_composite_flags(root->name,
+ parent_names[root->sel],
+ 4, base + root->off, 3,
+ root->flags);
}
for (i = 0; i < ARRAY_SIZE(ccgr_array); i++) {
ccgr = &ccgr_array[i];
- clks[ccgr->clk] = imx93_clk_gate(NULL, ccgr->name, ccgr->parent_name,
- ccgr->flags, base + ccgr->off, 0, 1, 1, 3,
- ccgr->shared_count);
+ if (!ccgr->plat || ccgr->plat & plat)
+ clks[ccgr->clk] = imx93_clk_gate(NULL,
+ ccgr->name, ccgr->parent_name,
+ ccgr->flags, base + ccgr->off, 0, 1, 1, 3,
+ ccgr->shared_count);
}
clks[IMX93_CLK_A55_SEL] = imx_clk_hw_mux2("a55_sel", base + 0x4820, 0, 1, a55_core_sels,
@@ -354,7 +372,8 @@ unregister_hws:
}
static const struct of_device_id imx93_clk_of_match[] = {
- { .compatible = "fsl,imx93-ccm" },
+ { .compatible = "fsl,imx93-ccm", .data = (void *)PLAT_IMX93 },
+ { .compatible = "fsl,imx91-ccm", .data = (void *)PLAT_IMX91 },
{ /* Sentinel */ },
};
MODULE_DEVICE_TABLE(of, imx93_clk_of_match);
diff --git a/drivers/clk/imx/clk-imx95-blk-ctl.c b/drivers/clk/imx/clk-imx95-blk-ctl.c
index 19a62da74be4..56bed4471995 100644
--- a/drivers/clk/imx/clk-imx95-blk-ctl.c
+++ b/drivers/clk/imx/clk-imx95-blk-ctl.c
@@ -1,8 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright 2024 NXP
+ * Copyright 2024-2025 NXP
*/
+#include <dt-bindings/clock/nxp,imx94-clock.h>
#include <dt-bindings/clock/nxp,imx95-clock.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
@@ -35,6 +36,7 @@ struct imx95_blk_ctl {
void __iomem *base;
/* clock gate register */
u32 clk_reg_restore;
+ const struct imx95_blk_ctl_dev_data *pdata;
};
struct imx95_blk_ctl_clk_dev_data {
@@ -156,7 +158,7 @@ static const struct imx95_blk_ctl_dev_data camblk_dev_data = {
.clk_reg_offset = 0,
};
-static const struct imx95_blk_ctl_clk_dev_data lvds_clk_dev_data[] = {
+static const struct imx95_blk_ctl_clk_dev_data imx95_lvds_clk_dev_data[] = {
[IMX95_CLK_DISPMIX_LVDS_PHY_DIV] = {
.name = "ldb_phy_div",
.parent_names = (const char *[]){ "ldbpll", },
@@ -213,17 +215,21 @@ static const struct imx95_blk_ctl_clk_dev_data lvds_clk_dev_data[] = {
},
};
-static const struct imx95_blk_ctl_dev_data lvds_csr_dev_data = {
- .num_clks = ARRAY_SIZE(lvds_clk_dev_data),
- .clk_dev_data = lvds_clk_dev_data,
+static const struct imx95_blk_ctl_dev_data imx95_lvds_csr_dev_data = {
+ .num_clks = ARRAY_SIZE(imx95_lvds_clk_dev_data),
+ .clk_dev_data = imx95_lvds_clk_dev_data,
.clk_reg_offset = 0,
};
-static const struct imx95_blk_ctl_clk_dev_data dispmix_csr_clk_dev_data[] = {
+static const char * const imx95_disp_engine_parents[] = {
+ "videopll1", "dsi_pll", "ldb_pll_div7"
+};
+
+static const struct imx95_blk_ctl_clk_dev_data imx95_dispmix_csr_clk_dev_data[] = {
[IMX95_CLK_DISPMIX_ENG0_SEL] = {
.name = "disp_engine0_sel",
- .parent_names = (const char *[]){"videopll1", "dsi_pll", "ldb_pll_div7", },
- .num_parents = 4,
+ .parent_names = imx95_disp_engine_parents,
+ .num_parents = ARRAY_SIZE(imx95_disp_engine_parents),
.reg = 0,
.bit_idx = 0,
.bit_width = 2,
@@ -232,8 +238,8 @@ static const struct imx95_blk_ctl_clk_dev_data dispmix_csr_clk_dev_data[] = {
},
[IMX95_CLK_DISPMIX_ENG1_SEL] = {
.name = "disp_engine1_sel",
- .parent_names = (const char *[]){"videopll1", "dsi_pll", "ldb_pll_div7", },
- .num_parents = 4,
+ .parent_names = imx95_disp_engine_parents,
+ .num_parents = ARRAY_SIZE(imx95_disp_engine_parents),
.reg = 0,
.bit_idx = 2,
.bit_width = 2,
@@ -242,9 +248,9 @@ static const struct imx95_blk_ctl_clk_dev_data dispmix_csr_clk_dev_data[] = {
}
};
-static const struct imx95_blk_ctl_dev_data dispmix_csr_dev_data = {
- .num_clks = ARRAY_SIZE(dispmix_csr_clk_dev_data),
- .clk_dev_data = dispmix_csr_clk_dev_data,
+static const struct imx95_blk_ctl_dev_data imx95_dispmix_csr_dev_data = {
+ .num_clks = ARRAY_SIZE(imx95_dispmix_csr_clk_dev_data),
+ .clk_dev_data = imx95_dispmix_csr_clk_dev_data,
.clk_reg_offset = 0,
};
@@ -277,10 +283,73 @@ static const struct imx95_blk_ctl_dev_data netcmix_dev_data = {
.clk_reg_offset = 0,
};
+static const struct imx95_blk_ctl_clk_dev_data hsio_blk_ctl_clk_dev_data[] = {
+ [0] = {
+ .name = "hsio_blk_ctl_clk",
+ .parent_names = (const char *[]){ "hsio_pll", },
+ .num_parents = 1,
+ .reg = 0,
+ .bit_idx = 6,
+ .bit_width = 1,
+ .type = CLK_GATE,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+};
+
+static const struct imx95_blk_ctl_dev_data hsio_blk_ctl_dev_data = {
+ .num_clks = 1,
+ .clk_dev_data = hsio_blk_ctl_clk_dev_data,
+ .clk_reg_offset = 0,
+};
+
+static const struct imx95_blk_ctl_clk_dev_data imx94_lvds_clk_dev_data[] = {
+ [IMX94_CLK_DISPMIX_LVDS_CLK_GATE] = {
+ .name = "lvds_clk_gate",
+ .parent_names = (const char *[]){ "ldbpll", },
+ .num_parents = 1,
+ .reg = 0,
+ .bit_idx = 1,
+ .bit_width = 1,
+ .type = CLK_GATE,
+ .flags = CLK_SET_RATE_PARENT,
+ .flags2 = CLK_GATE_SET_TO_DISABLE,
+ },
+};
+
+static const struct imx95_blk_ctl_dev_data imx94_lvds_csr_dev_data = {
+ .num_clks = ARRAY_SIZE(imx94_lvds_clk_dev_data),
+ .clk_dev_data = imx94_lvds_clk_dev_data,
+ .clk_reg_offset = 0,
+ .rpm_enabled = true,
+};
+
+static const char * const imx94_disp_engine_parents[] = {
+ "disppix", "ldb_pll_div7"
+};
+
+static const struct imx95_blk_ctl_clk_dev_data imx94_dispmix_csr_clk_dev_data[] = {
+ [IMX94_CLK_DISPMIX_CLK_SEL] = {
+ .name = "disp_clk_sel",
+ .parent_names = imx94_disp_engine_parents,
+ .num_parents = ARRAY_SIZE(imx94_disp_engine_parents),
+ .reg = 0,
+ .bit_idx = 1,
+ .bit_width = 1,
+ .type = CLK_MUX,
+ .flags = CLK_SET_RATE_NO_REPARENT | CLK_SET_RATE_PARENT,
+ },
+};
+
+static const struct imx95_blk_ctl_dev_data imx94_dispmix_csr_dev_data = {
+ .num_clks = ARRAY_SIZE(imx94_dispmix_csr_clk_dev_data),
+ .clk_dev_data = imx94_dispmix_csr_clk_dev_data,
+ .clk_reg_offset = 0,
+ .rpm_enabled = true,
+};
+
static int imx95_bc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- const struct imx95_blk_ctl_dev_data *bc_data;
struct imx95_blk_ctl *bc;
struct clk_hw_onecell_data *clk_hw_data;
struct clk_hw **hws;
@@ -310,23 +379,25 @@ static int imx95_bc_probe(struct platform_device *pdev)
return ret;
}
- bc_data = of_device_get_match_data(dev);
- if (!bc_data)
+ bc->pdata = of_device_get_match_data(dev);
+ if (!bc->pdata)
return devm_of_platform_populate(dev);
- clk_hw_data = devm_kzalloc(dev, struct_size(clk_hw_data, hws, bc_data->num_clks),
+ clk_hw_data = devm_kzalloc(dev, struct_size(clk_hw_data, hws, bc->pdata->num_clks),
GFP_KERNEL);
if (!clk_hw_data)
return -ENOMEM;
- if (bc_data->rpm_enabled)
- pm_runtime_enable(&pdev->dev);
+ if (bc->pdata->rpm_enabled) {
+ devm_pm_runtime_enable(&pdev->dev);
+ pm_runtime_resume_and_get(&pdev->dev);
+ }
- clk_hw_data->num = bc_data->num_clks;
+ clk_hw_data->num = bc->pdata->num_clks;
hws = clk_hw_data->hws;
- for (i = 0; i < bc_data->num_clks; i++) {
- const struct imx95_blk_ctl_clk_dev_data *data = &bc_data->clk_dev_data[i];
+ for (i = 0; i < bc->pdata->num_clks; i++) {
+ const struct imx95_blk_ctl_clk_dev_data *data = &bc->pdata->clk_dev_data[i];
void __iomem *reg = base + data->reg;
if (data->type == CLK_MUX) {
@@ -360,21 +431,20 @@ static int imx95_bc_probe(struct platform_device *pdev)
goto cleanup;
}
- if (pm_runtime_enabled(bc->dev))
+ if (pm_runtime_enabled(bc->dev)) {
+ pm_runtime_put_sync(&pdev->dev);
clk_disable_unprepare(bc->clk_apb);
+ }
return 0;
cleanup:
- for (i = 0; i < bc_data->num_clks; i++) {
+ for (i = 0; i < bc->pdata->num_clks; i++) {
if (IS_ERR_OR_NULL(hws[i]))
continue;
clk_hw_unregister(hws[i]);
}
- if (bc_data->rpm_enabled)
- pm_runtime_disable(&pdev->dev);
-
return ret;
}
@@ -383,15 +453,24 @@ static int imx95_bc_runtime_suspend(struct device *dev)
{
struct imx95_blk_ctl *bc = dev_get_drvdata(dev);
+ bc->clk_reg_restore = readl(bc->base + bc->pdata->clk_reg_offset);
clk_disable_unprepare(bc->clk_apb);
+
return 0;
}
static int imx95_bc_runtime_resume(struct device *dev)
{
struct imx95_blk_ctl *bc = dev_get_drvdata(dev);
+ int ret;
- return clk_prepare_enable(bc->clk_apb);
+ ret = clk_prepare_enable(bc->clk_apb);
+ if (ret)
+ return ret;
+
+ writel(bc->clk_reg_restore, bc->base + bc->pdata->clk_reg_offset);
+
+ return 0;
}
#endif
@@ -399,22 +478,12 @@ static int imx95_bc_runtime_resume(struct device *dev)
static int imx95_bc_suspend(struct device *dev)
{
struct imx95_blk_ctl *bc = dev_get_drvdata(dev);
- const struct imx95_blk_ctl_dev_data *bc_data;
- int ret;
- bc_data = of_device_get_match_data(dev);
- if (!bc_data)
+ if (pm_runtime_suspended(dev))
return 0;
- if (bc_data->rpm_enabled) {
- ret = pm_runtime_get_sync(bc->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(bc->dev);
- return ret;
- }
- }
-
- bc->clk_reg_restore = readl(bc->base + bc_data->clk_reg_offset);
+ bc->clk_reg_restore = readl(bc->base + bc->pdata->clk_reg_offset);
+ clk_disable_unprepare(bc->clk_apb);
return 0;
}
@@ -422,16 +491,16 @@ static int imx95_bc_suspend(struct device *dev)
static int imx95_bc_resume(struct device *dev)
{
struct imx95_blk_ctl *bc = dev_get_drvdata(dev);
- const struct imx95_blk_ctl_dev_data *bc_data;
+ int ret;
- bc_data = of_device_get_match_data(dev);
- if (!bc_data)
+ if (pm_runtime_suspended(dev))
return 0;
- writel(bc->clk_reg_restore, bc->base + bc_data->clk_reg_offset);
+ ret = clk_prepare_enable(bc->clk_apb);
+ if (ret)
+ return ret;
- if (bc_data->rpm_enabled)
- pm_runtime_put(bc->dev);
+ writel(bc->clk_reg_restore, bc->base + bc->pdata->clk_reg_offset);
return 0;
}
@@ -443,10 +512,13 @@ static const struct dev_pm_ops imx95_bc_pm_ops = {
};
static const struct of_device_id imx95_bc_of_match[] = {
+ { .compatible = "nxp,imx94-display-csr", .data = &imx94_dispmix_csr_dev_data },
+ { .compatible = "nxp,imx94-lvds-csr", .data = &imx94_lvds_csr_dev_data },
{ .compatible = "nxp,imx95-camera-csr", .data = &camblk_dev_data },
{ .compatible = "nxp,imx95-display-master-csr", },
- { .compatible = "nxp,imx95-lvds-csr", .data = &lvds_csr_dev_data },
- { .compatible = "nxp,imx95-display-csr", .data = &dispmix_csr_dev_data },
+ { .compatible = "nxp,imx95-display-csr", .data = &imx95_dispmix_csr_dev_data },
+ { .compatible = "nxp,imx95-lvds-csr", .data = &imx95_lvds_csr_dev_data },
+ { .compatible = "nxp,imx95-hsio-blk-ctl", .data = &hsio_blk_ctl_dev_data },
{ .compatible = "nxp,imx95-vpu-csr", .data = &vpublk_dev_data },
{ .compatible = "nxp,imx95-netcmix-blk-ctrl", .data = &netcmix_dev_data},
{ /* Sentinel */ },
diff --git a/drivers/clk/imx/clk-lpcg-scu.c b/drivers/clk/imx/clk-lpcg-scu.c
index dd5abd09f3e2..6376557a3c3d 100644
--- a/drivers/clk/imx/clk-lpcg-scu.c
+++ b/drivers/clk/imx/clk-lpcg-scu.c
@@ -6,10 +6,12 @@
#include <linux/bits.h>
#include <linux/clk-provider.h>
+#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/units.h>
#include "clk-scu.h"
@@ -41,6 +43,29 @@ struct clk_lpcg_scu {
#define to_clk_lpcg_scu(_hw) container_of(_hw, struct clk_lpcg_scu, hw)
+/* e10858 -LPCG clock gating register synchronization errata */
+static void lpcg_e10858_writel(unsigned long rate, void __iomem *reg, u32 val)
+{
+ writel(val, reg);
+
+ if (rate >= 24 * HZ_PER_MHZ || rate == 0) {
+ /*
+ * The time taken to access the LPCG registers from the AP core
+ * through the interconnect is longer than the minimum delay
+ * of 4 clock cycles required by the errata.
+ * Adding a readl will provide sufficient delay to prevent
+ * back-to-back writes.
+ */
+ readl(reg);
+ } else {
+ /*
+ * For clocks running below 24MHz, wait a minimum of
+ * 4 clock cycles.
+ */
+ ndelay(4 * (DIV_ROUND_UP(1000 * HZ_PER_MHZ, rate)));
+ }
+}
+
static int clk_lpcg_scu_enable(struct clk_hw *hw)
{
struct clk_lpcg_scu *clk = to_clk_lpcg_scu(hw);
@@ -57,7 +82,8 @@ static int clk_lpcg_scu_enable(struct clk_hw *hw)
val |= CLK_GATE_SCU_LPCG_HW_SEL;
reg |= val << clk->bit_idx;
- writel(reg, clk->reg);
+
+ lpcg_e10858_writel(clk_hw_get_rate(hw), clk->reg, reg);
spin_unlock_irqrestore(&imx_lpcg_scu_lock, flags);
@@ -74,7 +100,7 @@ static void clk_lpcg_scu_disable(struct clk_hw *hw)
reg = readl_relaxed(clk->reg);
reg &= ~(CLK_GATE_SCU_LPCG_MASK << clk->bit_idx);
- writel(reg, clk->reg);
+ lpcg_e10858_writel(clk_hw_get_rate(hw), clk->reg, reg);
spin_unlock_irqrestore(&imx_lpcg_scu_lock, flags);
}
@@ -135,6 +161,9 @@ static int __maybe_unused imx_clk_lpcg_scu_suspend(struct device *dev)
{
struct clk_lpcg_scu *clk = dev_get_drvdata(dev);
+ if (!strncmp("hdmi_lpcg", clk_hw_get_name(&clk->hw), strlen("hdmi_lpcg")))
+ return 0;
+
clk->state = readl_relaxed(clk->reg);
dev_dbg(dev, "save lpcg state 0x%x\n", clk->state);
@@ -145,13 +174,11 @@ static int __maybe_unused imx_clk_lpcg_scu_resume(struct device *dev)
{
struct clk_lpcg_scu *clk = dev_get_drvdata(dev);
- /*
- * FIXME: Sometimes writes don't work unless the CPU issues
- * them twice
- */
+ if (!strncmp("hdmi_lpcg", clk_hw_get_name(&clk->hw), strlen("hdmi_lpcg")))
+ return 0;
writel(clk->state, clk->reg);
- writel(clk->state, clk->reg);
+ lpcg_e10858_writel(0, clk->reg, clk->state);
dev_dbg(dev, "restore lpcg state 0x%x\n", clk->state);
return 0;
diff --git a/drivers/clk/imx/clk-pfd.c b/drivers/clk/imx/clk-pfd.c
index 5cf0149dfa15..31220fa7882b 100644
--- a/drivers/clk/imx/clk-pfd.c
+++ b/drivers/clk/imx/clk-pfd.c
@@ -62,24 +62,26 @@ static unsigned long clk_pfd_recalc_rate(struct clk_hw *hw,
return tmp;
}
-static long clk_pfd_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_pfd_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- u64 tmp = *prate;
+ u64 tmp = req->best_parent_rate;
u8 frac;
- tmp = tmp * 18 + rate / 2;
- do_div(tmp, rate);
+ tmp = tmp * 18 + req->rate / 2;
+ do_div(tmp, req->rate);
frac = tmp;
if (frac < 12)
frac = 12;
else if (frac > 35)
frac = 35;
- tmp = *prate;
+ tmp = req->best_parent_rate;
tmp *= 18;
do_div(tmp, frac);
- return tmp;
+ req->rate = tmp;
+
+ return 0;
}
static int clk_pfd_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -117,7 +119,7 @@ static const struct clk_ops clk_pfd_ops = {
.enable = clk_pfd_enable,
.disable = clk_pfd_disable,
.recalc_rate = clk_pfd_recalc_rate,
- .round_rate = clk_pfd_round_rate,
+ .determine_rate = clk_pfd_determine_rate,
.set_rate = clk_pfd_set_rate,
.is_enabled = clk_pfd_is_enabled,
};
diff --git a/drivers/clk/imx/clk-pll14xx.c b/drivers/clk/imx/clk-pll14xx.c
index d63564dbb12c..36d0e80b55b8 100644
--- a/drivers/clk/imx/clk-pll14xx.c
+++ b/drivers/clk/imx/clk-pll14xx.c
@@ -56,7 +56,9 @@ static const struct imx_pll14xx_rate_table imx_pll1416x_tbl[] = {
PLL_1416X_RATE(700000000U, 350, 3, 2),
PLL_1416X_RATE(640000000U, 320, 3, 2),
PLL_1416X_RATE(600000000U, 300, 3, 2),
+ PLL_1416X_RATE(416000000U, 208, 3, 2),
PLL_1416X_RATE(320000000U, 160, 3, 2),
+ PLL_1416X_RATE(208000000U, 208, 3, 3),
};
static const struct imx_pll14xx_rate_table imx_pll1443x_tbl[] = {
@@ -214,8 +216,8 @@ found:
t->mdiv, t->kdiv);
}
-static long clk_pll1416x_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_pll1416x_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_pll14xx *pll = to_clk_pll14xx(hw);
const struct imx_pll14xx_rate_table *rate_table = pll->rate_table;
@@ -223,22 +225,29 @@ static long clk_pll1416x_round_rate(struct clk_hw *hw, unsigned long rate,
/* Assuming rate_table is in descending order */
for (i = 0; i < pll->rate_count; i++)
- if (rate >= rate_table[i].rate)
- return rate_table[i].rate;
+ if (req->rate >= rate_table[i].rate) {
+ req->rate = rate_table[i].rate;
+
+ return 0;
+ }
/* return minimum supported value */
- return rate_table[pll->rate_count - 1].rate;
+ req->rate = rate_table[pll->rate_count - 1].rate;
+
+ return 0;
}
-static long clk_pll1443x_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_pll1443x_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_pll14xx *pll = to_clk_pll14xx(hw);
struct imx_pll14xx_rate_table t;
- imx_pll14xx_calc_settings(pll, rate, *prate, &t);
+ imx_pll14xx_calc_settings(pll, req->rate, req->best_parent_rate, &t);
+
+ req->rate = t.rate;
- return t.rate;
+ return 0;
}
static unsigned long clk_pll14xx_recalc_rate(struct clk_hw *hw,
@@ -468,7 +477,7 @@ static const struct clk_ops clk_pll1416x_ops = {
.unprepare = clk_pll14xx_unprepare,
.is_prepared = clk_pll14xx_is_prepared,
.recalc_rate = clk_pll14xx_recalc_rate,
- .round_rate = clk_pll1416x_round_rate,
+ .determine_rate = clk_pll1416x_determine_rate,
.set_rate = clk_pll1416x_set_rate,
};
@@ -481,7 +490,7 @@ static const struct clk_ops clk_pll1443x_ops = {
.unprepare = clk_pll14xx_unprepare,
.is_prepared = clk_pll14xx_is_prepared,
.recalc_rate = clk_pll14xx_recalc_rate,
- .round_rate = clk_pll1443x_round_rate,
+ .determine_rate = clk_pll1443x_determine_rate,
.set_rate = clk_pll1443x_set_rate,
};
diff --git a/drivers/clk/imx/clk-pllv2.c b/drivers/clk/imx/clk-pllv2.c
index ff17f0664faa..bb497ad5e0ae 100644
--- a/drivers/clk/imx/clk-pllv2.c
+++ b/drivers/clk/imx/clk-pllv2.c
@@ -178,18 +178,25 @@ static int clk_pllv2_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
-static long clk_pllv2_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_pllv2_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
u32 dp_op, dp_mfd, dp_mfn;
int ret;
- ret = __clk_pllv2_set_rate(rate, *prate, &dp_op, &dp_mfd, &dp_mfn);
- if (ret)
- return ret;
+ ret = __clk_pllv2_set_rate(req->rate, req->best_parent_rate, &dp_op,
+ &dp_mfd, &dp_mfn);
+ if (ret) {
+ req->rate = ret;
- return __clk_pllv2_recalc_rate(*prate, MXC_PLL_DP_CTL_DPDCK0_2_EN,
- dp_op, dp_mfd, dp_mfn);
+ return 0;
+ }
+
+ req->rate = __clk_pllv2_recalc_rate(req->best_parent_rate,
+ MXC_PLL_DP_CTL_DPDCK0_2_EN, dp_op,
+ dp_mfd, dp_mfn);
+
+ return 0;
}
static int clk_pllv2_prepare(struct clk_hw *hw)
@@ -235,7 +242,7 @@ static const struct clk_ops clk_pllv2_ops = {
.prepare = clk_pllv2_prepare,
.unprepare = clk_pllv2_unprepare,
.recalc_rate = clk_pllv2_recalc_rate,
- .round_rate = clk_pllv2_round_rate,
+ .determine_rate = clk_pllv2_determine_rate,
.set_rate = clk_pllv2_set_rate,
};
diff --git a/drivers/clk/imx/clk-pllv3.c b/drivers/clk/imx/clk-pllv3.c
index 11fb238ee8f0..b99508367bcb 100644
--- a/drivers/clk/imx/clk-pllv3.c
+++ b/drivers/clk/imx/clk-pllv3.c
@@ -117,13 +117,14 @@ static unsigned long clk_pllv3_recalc_rate(struct clk_hw *hw,
return (div == 1) ? parent_rate * 22 : parent_rate * 20;
}
-static long clk_pllv3_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_pllv3_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- unsigned long parent_rate = *prate;
+ unsigned long parent_rate = req->best_parent_rate;
- return (rate >= parent_rate * 22) ? parent_rate * 22 :
- parent_rate * 20;
+ req->rate = (req->rate >= parent_rate * 22) ? parent_rate * 22 : parent_rate * 20;
+
+ return 0;
}
static int clk_pllv3_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -152,7 +153,7 @@ static const struct clk_ops clk_pllv3_ops = {
.unprepare = clk_pllv3_unprepare,
.is_prepared = clk_pllv3_is_prepared,
.recalc_rate = clk_pllv3_recalc_rate,
- .round_rate = clk_pllv3_round_rate,
+ .determine_rate = clk_pllv3_determine_rate,
.set_rate = clk_pllv3_set_rate,
};
@@ -165,21 +166,23 @@ static unsigned long clk_pllv3_sys_recalc_rate(struct clk_hw *hw,
return parent_rate * div / 2;
}
-static long clk_pllv3_sys_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_pllv3_sys_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- unsigned long parent_rate = *prate;
+ unsigned long parent_rate = req->best_parent_rate;
unsigned long min_rate = parent_rate * 54 / 2;
unsigned long max_rate = parent_rate * 108 / 2;
u32 div;
- if (rate > max_rate)
- rate = max_rate;
- else if (rate < min_rate)
- rate = min_rate;
- div = rate * 2 / parent_rate;
+ if (req->rate > max_rate)
+ req->rate = max_rate;
+ else if (req->rate < min_rate)
+ req->rate = min_rate;
+ div = req->rate * 2 / parent_rate;
- return parent_rate * div / 2;
+ req->rate = parent_rate * div / 2;
+
+ return 0;
}
static int clk_pllv3_sys_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -207,7 +210,7 @@ static const struct clk_ops clk_pllv3_sys_ops = {
.unprepare = clk_pllv3_unprepare,
.is_prepared = clk_pllv3_is_prepared,
.recalc_rate = clk_pllv3_sys_recalc_rate,
- .round_rate = clk_pllv3_sys_round_rate,
+ .determine_rate = clk_pllv3_sys_determine_rate,
.set_rate = clk_pllv3_sys_set_rate,
};
@@ -226,10 +229,10 @@ static unsigned long clk_pllv3_av_recalc_rate(struct clk_hw *hw,
return parent_rate * div + (unsigned long)temp64;
}
-static long clk_pllv3_av_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_pllv3_av_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- unsigned long parent_rate = *prate;
+ unsigned long parent_rate = req->best_parent_rate;
unsigned long min_rate = parent_rate * 27;
unsigned long max_rate = parent_rate * 54;
u32 div;
@@ -237,16 +240,16 @@ static long clk_pllv3_av_round_rate(struct clk_hw *hw, unsigned long rate,
u32 max_mfd = 0x3FFFFFFF;
u64 temp64;
- if (rate > max_rate)
- rate = max_rate;
- else if (rate < min_rate)
- rate = min_rate;
+ if (req->rate > max_rate)
+ req->rate = max_rate;
+ else if (req->rate < min_rate)
+ req->rate = min_rate;
if (parent_rate <= max_mfd)
mfd = parent_rate;
- div = rate / parent_rate;
- temp64 = (u64) (rate - div * parent_rate);
+ div = req->rate / parent_rate;
+ temp64 = (u64) (req->rate - div * parent_rate);
temp64 *= mfd;
temp64 = div64_ul(temp64, parent_rate);
mfn = temp64;
@@ -255,7 +258,9 @@ static long clk_pllv3_av_round_rate(struct clk_hw *hw, unsigned long rate,
temp64 *= mfn;
do_div(temp64, mfd);
- return parent_rate * div + (unsigned long)temp64;
+ req->rate = parent_rate * div + (unsigned long)temp64;
+
+ return 0;
}
static int clk_pllv3_av_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -296,7 +301,7 @@ static const struct clk_ops clk_pllv3_av_ops = {
.unprepare = clk_pllv3_unprepare,
.is_prepared = clk_pllv3_is_prepared,
.recalc_rate = clk_pllv3_av_recalc_rate,
- .round_rate = clk_pllv3_av_round_rate,
+ .determine_rate = clk_pllv3_av_determine_rate,
.set_rate = clk_pllv3_av_set_rate,
};
@@ -355,12 +360,15 @@ static unsigned long clk_pllv3_vf610_recalc_rate(struct clk_hw *hw,
return clk_pllv3_vf610_mf_to_rate(parent_rate, mf);
}
-static long clk_pllv3_vf610_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_pllv3_vf610_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- struct clk_pllv3_vf610_mf mf = clk_pllv3_vf610_rate_to_mf(*prate, rate);
+ struct clk_pllv3_vf610_mf mf = clk_pllv3_vf610_rate_to_mf(req->best_parent_rate,
+ req->rate);
+
+ req->rate = clk_pllv3_vf610_mf_to_rate(req->best_parent_rate, mf);
- return clk_pllv3_vf610_mf_to_rate(*prate, mf);
+ return 0;
}
static int clk_pllv3_vf610_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -389,7 +397,7 @@ static const struct clk_ops clk_pllv3_vf610_ops = {
.unprepare = clk_pllv3_unprepare,
.is_prepared = clk_pllv3_is_prepared,
.recalc_rate = clk_pllv3_vf610_recalc_rate,
- .round_rate = clk_pllv3_vf610_round_rate,
+ .determine_rate = clk_pllv3_vf610_determine_rate,
.set_rate = clk_pllv3_vf610_set_rate,
};
diff --git a/drivers/clk/imx/clk-pllv4.c b/drivers/clk/imx/clk-pllv4.c
index 9b136c951762..01d05b5d5438 100644
--- a/drivers/clk/imx/clk-pllv4.c
+++ b/drivers/clk/imx/clk-pllv4.c
@@ -95,11 +95,11 @@ static unsigned long clk_pllv4_recalc_rate(struct clk_hw *hw,
return (parent_rate * mult) + (u32)temp64;
}
-static long clk_pllv4_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_pllv4_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_pllv4 *pll = to_clk_pllv4(hw);
- unsigned long parent_rate = *prate;
+ unsigned long parent_rate = req->best_parent_rate;
unsigned long round_rate, i;
u32 mfn, mfd = DEFAULT_MFD;
bool found = false;
@@ -107,7 +107,7 @@ static long clk_pllv4_round_rate(struct clk_hw *hw, unsigned long rate,
u32 mult;
if (pll->use_mult_range) {
- temp64 = (u64)rate;
+ temp64 = (u64) req->rate;
do_div(temp64, parent_rate);
mult = temp64;
if (mult >= pllv4_mult_range[1] &&
@@ -118,7 +118,7 @@ static long clk_pllv4_round_rate(struct clk_hw *hw, unsigned long rate,
} else {
for (i = 0; i < ARRAY_SIZE(pllv4_mult_table); i++) {
round_rate = parent_rate * pllv4_mult_table[i];
- if (rate >= round_rate) {
+ if (req->rate >= round_rate) {
found = true;
break;
}
@@ -127,14 +127,16 @@ static long clk_pllv4_round_rate(struct clk_hw *hw, unsigned long rate,
if (!found) {
pr_warn("%s: unable to round rate %lu, parent rate %lu\n",
- clk_hw_get_name(hw), rate, parent_rate);
+ clk_hw_get_name(hw), req->rate, parent_rate);
+ req->rate = 0;
+
return 0;
}
if (parent_rate <= MAX_MFD)
mfd = parent_rate;
- temp64 = (u64)(rate - round_rate);
+ temp64 = (u64)(req->rate - round_rate);
temp64 *= mfd;
do_div(temp64, parent_rate);
mfn = temp64;
@@ -145,14 +147,19 @@ static long clk_pllv4_round_rate(struct clk_hw *hw, unsigned long rate,
* pair of mfn/mfd, we simply return the round_rate without using
* the frac part.
*/
- if (mfn >= mfd)
- return round_rate;
+ if (mfn >= mfd) {
+ req->rate = round_rate;
+
+ return 0;
+ }
temp64 = (u64)parent_rate;
temp64 *= mfn;
do_div(temp64, mfd);
- return round_rate + (u32)temp64;
+ req->rate = round_rate + (u32)temp64;
+
+ return 0;
}
static bool clk_pllv4_is_valid_mult(struct clk_pllv4 *pll, unsigned int mult)
@@ -229,7 +236,7 @@ static void clk_pllv4_unprepare(struct clk_hw *hw)
static const struct clk_ops clk_pllv4_ops = {
.recalc_rate = clk_pllv4_recalc_rate,
- .round_rate = clk_pllv4_round_rate,
+ .determine_rate = clk_pllv4_determine_rate,
.set_rate = clk_pllv4_set_rate,
.prepare = clk_pllv4_prepare,
.unprepare = clk_pllv4_unprepare,
diff --git a/drivers/clk/imx/clk-scu.c b/drivers/clk/imx/clk-scu.c
index b1dd0c08e091..34c9dc1fb20e 100644
--- a/drivers/clk/imx/clk-scu.c
+++ b/drivers/clk/imx/clk-scu.c
@@ -269,24 +269,6 @@ static int clk_scu_determine_rate(struct clk_hw *hw,
return 0;
}
-/*
- * clk_scu_round_rate - Round clock rate for a SCU clock
- * @hw: clock to round rate for
- * @rate: rate to round
- * @parent_rate: parent rate provided by common clock framework, not used
- *
- * Returns the current clock rate, or zero in failure.
- */
-static long clk_scu_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
-{
- /*
- * Assume we support all the requested rate and let the SCU firmware
- * to handle the left work
- */
- return rate;
-}
-
static int clk_scu_atf_set_cpu_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
@@ -454,7 +436,7 @@ static const struct clk_ops clk_scu_ops = {
static const struct clk_ops clk_scu_cpu_ops = {
.recalc_rate = clk_scu_recalc_rate,
- .round_rate = clk_scu_round_rate,
+ .determine_rate = clk_scu_determine_rate,
.set_rate = clk_scu_atf_set_cpu_rate,
.prepare = clk_scu_prepare,
.unprepare = clk_scu_unprepare,
@@ -462,7 +444,7 @@ static const struct clk_ops clk_scu_cpu_ops = {
static const struct clk_ops clk_scu_pi_ops = {
.recalc_rate = clk_scu_recalc_rate,
- .round_rate = clk_scu_round_rate,
+ .determine_rate = clk_scu_determine_rate,
.set_rate = clk_scu_set_rate,
};
@@ -567,7 +549,6 @@ static int imx_clk_scu_probe(struct platform_device *pdev)
if (!((clk->rsrc == IMX_SC_R_A35) || (clk->rsrc == IMX_SC_R_A53) ||
(clk->rsrc == IMX_SC_R_A72))) {
- pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
}
@@ -596,7 +577,7 @@ static int __maybe_unused imx_clk_scu_suspend(struct device *dev)
clk->rate = clk_scu_recalc_rate(&clk->hw, 0);
else
clk->rate = clk_hw_get_rate(&clk->hw);
- clk->is_enabled = clk_hw_is_enabled(&clk->hw);
+ clk->is_enabled = clk_hw_is_prepared(&clk->hw);
if (clk->parent)
dev_dbg(dev, "save parent %s idx %u\n", clk_hw_get_name(clk->parent),
@@ -729,7 +710,7 @@ struct clk_hw *imx_clk_scu_alloc_dev(const char *name,
if (ret)
goto put_device;
- /* For API backwards compatiblilty, simply return NULL for success */
+ /* For API backwards compatibility, simply return NULL for success */
return NULL;
put_device:
@@ -766,15 +747,15 @@ static unsigned long clk_gpr_div_scu_recalc_rate(struct clk_hw *hw,
return err ? 0 : rate;
}
-static long clk_gpr_div_scu_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_gpr_div_scu_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- if (rate < *prate)
- rate = *prate / 2;
+ if (req->rate < req->best_parent_rate)
+ req->rate = req->best_parent_rate / 2;
else
- rate = *prate;
+ req->rate = req->best_parent_rate;
- return rate;
+ return 0;
}
static int clk_gpr_div_scu_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -793,7 +774,7 @@ static int clk_gpr_div_scu_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops clk_gpr_div_scu_ops = {
.recalc_rate = clk_gpr_div_scu_recalc_rate,
- .round_rate = clk_gpr_div_scu_round_rate,
+ .determine_rate = clk_gpr_div_scu_determine_rate,
.set_rate = clk_gpr_div_scu_set_rate,
};
diff --git a/drivers/clk/ingenic/cgu.c b/drivers/clk/ingenic/cgu.c
index 0c9c8344ad11..91e7ac0cc334 100644
--- a/drivers/clk/ingenic/cgu.c
+++ b/drivers/clk/ingenic/cgu.c
@@ -174,14 +174,16 @@ ingenic_pll_calc(const struct ingenic_cgu_clk_info *clk_info,
n * od);
}
-static long
-ingenic_pll_round_rate(struct clk_hw *hw, unsigned long req_rate,
- unsigned long *prate)
+static int ingenic_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
- return ingenic_pll_calc(clk_info, req_rate, *prate, NULL, NULL, NULL);
+ req->rate = ingenic_pll_calc(clk_info, req->rate, req->best_parent_rate,
+ NULL, NULL, NULL);
+
+ return 0;
}
static inline int ingenic_pll_check_stable(struct ingenic_cgu *cgu,
@@ -317,7 +319,7 @@ static int ingenic_pll_is_enabled(struct clk_hw *hw)
static const struct clk_ops ingenic_pll_ops = {
.recalc_rate = ingenic_pll_recalc_rate,
- .round_rate = ingenic_pll_round_rate,
+ .determine_rate = ingenic_pll_determine_rate,
.set_rate = ingenic_pll_set_rate,
.enable = ingenic_pll_enable,
diff --git a/drivers/clk/ingenic/cgu.h b/drivers/clk/ingenic/cgu.h
index 99da9bd86e63..0d417d69dab7 100644
--- a/drivers/clk/ingenic/cgu.h
+++ b/drivers/clk/ingenic/cgu.h
@@ -239,7 +239,7 @@ ingenic_cgu_new(const struct ingenic_cgu_clk_info *clock_info,
*
* Register the clocks described by the CGU with the common clock framework.
*
- * Return: 0 on success or -errno if unsuccesful.
+ * Return: 0 on success or -errno if unsuccessful.
*/
int ingenic_cgu_register_clocks(struct ingenic_cgu *cgu);
diff --git a/drivers/clk/ingenic/jz4780-cgu.c b/drivers/clk/ingenic/jz4780-cgu.c
index b1dadc0a5e75..07e2f3c5c454 100644
--- a/drivers/clk/ingenic/jz4780-cgu.c
+++ b/drivers/clk/ingenic/jz4780-cgu.c
@@ -128,19 +128,19 @@ static unsigned long jz4780_otg_phy_recalc_rate(struct clk_hw *hw,
return parent_rate;
}
-static long jz4780_otg_phy_round_rate(struct clk_hw *hw, unsigned long req_rate,
- unsigned long *parent_rate)
+static int jz4780_otg_phy_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- if (req_rate < 15600000)
- return 12000000;
-
- if (req_rate < 21600000)
- return 19200000;
+ if (req->rate < 15600000)
+ req->rate = 12000000;
+ else if (req->rate < 21600000)
+ req->rate = 19200000;
+ else if (req->rate < 36000000)
+ req->rate = 24000000;
+ else
+ req->rate = 48000000;
- if (req_rate < 36000000)
- return 24000000;
-
- return 48000000;
+ return 0;
}
static int jz4780_otg_phy_set_rate(struct clk_hw *hw, unsigned long req_rate,
@@ -212,7 +212,7 @@ static int jz4780_otg_phy_is_enabled(struct clk_hw *hw)
static const struct clk_ops jz4780_otg_phy_ops = {
.recalc_rate = jz4780_otg_phy_recalc_rate,
- .round_rate = jz4780_otg_phy_round_rate,
+ .determine_rate = jz4780_otg_phy_determine_rate,
.set_rate = jz4780_otg_phy_set_rate,
.enable = jz4780_otg_phy_enable,
diff --git a/drivers/clk/ingenic/x1000-cgu.c b/drivers/clk/ingenic/x1000-cgu.c
index feb03eed4fe8..d80886caf393 100644
--- a/drivers/clk/ingenic/x1000-cgu.c
+++ b/drivers/clk/ingenic/x1000-cgu.c
@@ -84,16 +84,17 @@ static unsigned long x1000_otg_phy_recalc_rate(struct clk_hw *hw,
return parent_rate;
}
-static long x1000_otg_phy_round_rate(struct clk_hw *hw, unsigned long req_rate,
- unsigned long *parent_rate)
+static int x1000_otg_phy_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- if (req_rate < 18000000)
- return 12000000;
-
- if (req_rate < 36000000)
- return 24000000;
+ if (req->rate < 18000000)
+ req->rate = 12000000;
+ else if (req->rate < 36000000)
+ req->rate = 24000000;
+ else
+ req->rate = 48000000;
- return 48000000;
+ return 0;
}
static int x1000_otg_phy_set_rate(struct clk_hw *hw, unsigned long req_rate,
@@ -161,7 +162,7 @@ static int x1000_usb_phy_is_enabled(struct clk_hw *hw)
static const struct clk_ops x1000_otg_phy_ops = {
.recalc_rate = x1000_otg_phy_recalc_rate,
- .round_rate = x1000_otg_phy_round_rate,
+ .determine_rate = x1000_otg_phy_determine_rate,
.set_rate = x1000_otg_phy_set_rate,
.enable = x1000_usb_phy_enable,
diff --git a/drivers/clk/keystone/sci-clk.c b/drivers/clk/keystone/sci-clk.c
index c5894fc9395e..a4b42811de55 100644
--- a/drivers/clk/keystone/sci-clk.c
+++ b/drivers/clk/keystone/sci-clk.c
@@ -480,13 +480,10 @@ static int ti_sci_scan_clocks_from_fw(struct sci_clk_provider *provider)
num_clks++;
}
- provider->clocks = devm_kmalloc_array(dev, num_clks, sizeof(sci_clk),
- GFP_KERNEL);
+ provider->clocks = devm_kmemdup_array(dev, clks, num_clks, sizeof(sci_clk), GFP_KERNEL);
if (!provider->clocks)
return -ENOMEM;
- memcpy(provider->clocks, clks, num_clks * sizeof(sci_clk));
-
provider->num_clocks = num_clks;
devm_kfree(dev, clks);
diff --git a/drivers/clk/keystone/syscon-clk.c b/drivers/clk/keystone/syscon-clk.c
index 935d9a2d8c2b..c509929da854 100644
--- a/drivers/clk/keystone/syscon-clk.c
+++ b/drivers/clk/keystone/syscon-clk.c
@@ -105,6 +105,12 @@ static struct clk_hw
return &priv->hw;
}
+static const struct regmap_config ti_syscon_regmap_cfg = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
static int ti_syscon_gate_clk_probe(struct platform_device *pdev)
{
const struct ti_syscon_gate_clk_data *data, *p;
@@ -113,12 +119,17 @@ static int ti_syscon_gate_clk_probe(struct platform_device *pdev)
int num_clks, num_parents, i;
const char *parent_name;
struct regmap *regmap;
+ void __iomem *base;
data = device_get_match_data(dev);
if (!data)
return -EINVAL;
- regmap = device_node_to_regmap(dev->of_node);
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ regmap = regmap_init_mmio(dev, base, &ti_syscon_regmap_cfg);
if (IS_ERR(regmap))
return dev_err_probe(dev, PTR_ERR(regmap),
"failed to get regmap\n");
diff --git a/drivers/clk/kunit_clk_assigned_rates.h b/drivers/clk/kunit_clk_assigned_rates.h
new file mode 100644
index 000000000000..df2d84dcaa93
--- /dev/null
+++ b/drivers/clk/kunit_clk_assigned_rates.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _KUNIT_CLK_ASSIGNED_RATES_H
+#define _KUNIT_CLK_ASSIGNED_RATES_H
+
+#define ASSIGNED_RATES_0_RATE 1600000
+#define ASSIGNED_RATES_1_RATE 9700000
+
+#endif
diff --git a/drivers/clk/kunit_clk_assigned_rates_multiple.dtso b/drivers/clk/kunit_clk_assigned_rates_multiple.dtso
new file mode 100644
index 000000000000..e600736e70f5
--- /dev/null
+++ b/drivers/clk/kunit_clk_assigned_rates_multiple.dtso
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+#include "kunit_clk_assigned_rates.h"
+
+&{/} {
+ clk: kunit-clock {
+ compatible = "test,clk-assigned-rates";
+ #clock-cells = <1>;
+ assigned-clocks = <&clk 0>,
+ <&clk 1>;
+ assigned-clock-rates = <ASSIGNED_RATES_0_RATE>,
+ <ASSIGNED_RATES_1_RATE>;
+ };
+};
diff --git a/drivers/clk/kunit_clk_assigned_rates_multiple_consumer.dtso b/drivers/clk/kunit_clk_assigned_rates_multiple_consumer.dtso
new file mode 100644
index 000000000000..260aba458daf
--- /dev/null
+++ b/drivers/clk/kunit_clk_assigned_rates_multiple_consumer.dtso
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+#include "kunit_clk_assigned_rates.h"
+
+&{/} {
+ clk: kunit-clock {
+ compatible = "test,clk-assigned-rates";
+ #clock-cells = <1>;
+ };
+
+ kunit-clock-consumer {
+ compatible = "test,clk-consumer";
+ assigned-clocks = <&clk 0>,
+ <&clk 1>;
+ assigned-clock-rates = <ASSIGNED_RATES_0_RATE>,
+ <ASSIGNED_RATES_1_RATE>;
+ };
+};
diff --git a/drivers/clk/kunit_clk_assigned_rates_null.dtso b/drivers/clk/kunit_clk_assigned_rates_null.dtso
new file mode 100644
index 000000000000..0b27b38a9130
--- /dev/null
+++ b/drivers/clk/kunit_clk_assigned_rates_null.dtso
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+#include "kunit_clk_assigned_rates.h"
+
+&{/} {
+ clk: kunit-clock {
+ compatible = "test,clk-assigned-rates";
+ #clock-cells = <0>;
+ assigned-clocks = <0>;
+ assigned-clock-rates = <ASSIGNED_RATES_0_RATE>;
+ };
+};
diff --git a/drivers/clk/kunit_clk_assigned_rates_null_consumer.dtso b/drivers/clk/kunit_clk_assigned_rates_null_consumer.dtso
new file mode 100644
index 000000000000..99fb332ae83d
--- /dev/null
+++ b/drivers/clk/kunit_clk_assigned_rates_null_consumer.dtso
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+#include "kunit_clk_assigned_rates.h"
+
+&{/} {
+ clk: kunit-clock {
+ compatible = "test,clk-assigned-rates";
+ #clock-cells = <0>;
+ };
+
+ kunit-clock-consumer {
+ compatible = "test,clk-consumer";
+ assigned-clocks = <0>;
+ assigned-clock-rates = <ASSIGNED_RATES_0_RATE>;
+ };
+};
diff --git a/drivers/clk/kunit_clk_assigned_rates_one.dtso b/drivers/clk/kunit_clk_assigned_rates_one.dtso
new file mode 100644
index 000000000000..dd95ec9b1cf9
--- /dev/null
+++ b/drivers/clk/kunit_clk_assigned_rates_one.dtso
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+#include "kunit_clk_assigned_rates.h"
+
+&{/} {
+ clk: kunit-clock {
+ compatible = "test,clk-assigned-rates";
+ #clock-cells = <0>;
+ assigned-clocks = <&clk>;
+ assigned-clock-rates = <ASSIGNED_RATES_0_RATE>;
+ };
+};
diff --git a/drivers/clk/kunit_clk_assigned_rates_one_consumer.dtso b/drivers/clk/kunit_clk_assigned_rates_one_consumer.dtso
new file mode 100644
index 000000000000..a41dca806318
--- /dev/null
+++ b/drivers/clk/kunit_clk_assigned_rates_one_consumer.dtso
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+#include "kunit_clk_assigned_rates.h"
+
+&{/} {
+ clk: kunit-clock {
+ compatible = "test,clk-assigned-rates";
+ #clock-cells = <0>;
+ };
+
+ kunit-clock-consumer {
+ compatible = "test,clk-consumer";
+ assigned-clocks = <&clk>;
+ assigned-clock-rates = <ASSIGNED_RATES_0_RATE>;
+ };
+};
diff --git a/drivers/clk/kunit_clk_assigned_rates_u64_multiple.dtso b/drivers/clk/kunit_clk_assigned_rates_u64_multiple.dtso
new file mode 100644
index 000000000000..389b4e2eb7f7
--- /dev/null
+++ b/drivers/clk/kunit_clk_assigned_rates_u64_multiple.dtso
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+#include "kunit_clk_assigned_rates.h"
+
+&{/} {
+ clk: kunit-clock {
+ compatible = "test,clk-assigned-rates";
+ #clock-cells = <1>;
+ assigned-clocks = <&clk 0>,
+ <&clk 1>;
+ assigned-clock-rates-u64 = /bits/ 64 <ASSIGNED_RATES_0_RATE>,
+ /bits/ 64 <ASSIGNED_RATES_1_RATE>;
+ };
+};
diff --git a/drivers/clk/kunit_clk_assigned_rates_u64_multiple_consumer.dtso b/drivers/clk/kunit_clk_assigned_rates_u64_multiple_consumer.dtso
new file mode 100644
index 000000000000..3e117fd59b7d
--- /dev/null
+++ b/drivers/clk/kunit_clk_assigned_rates_u64_multiple_consumer.dtso
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+#include "kunit_clk_assigned_rates.h"
+
+&{/} {
+ clk: kunit-clock {
+ compatible = "test,clk-assigned-rates";
+ #clock-cells = <1>;
+ };
+
+ kunit-clock-consumer {
+ compatible = "test,clk-consumer";
+ assigned-clocks = <&clk 0>,
+ <&clk 1>;
+ assigned-clock-rates-u64 = /bits/ 64 <ASSIGNED_RATES_0_RATE>,
+ /bits/ 64 <ASSIGNED_RATES_1_RATE>;
+ };
+};
diff --git a/drivers/clk/kunit_clk_assigned_rates_u64_one.dtso b/drivers/clk/kunit_clk_assigned_rates_u64_one.dtso
new file mode 100644
index 000000000000..87041264e8f5
--- /dev/null
+++ b/drivers/clk/kunit_clk_assigned_rates_u64_one.dtso
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+#include "kunit_clk_assigned_rates.h"
+
+&{/} {
+ clk: kunit-clock {
+ compatible = "test,clk-assigned-rates";
+ #clock-cells = <0>;
+ assigned-clocks = <&clk>;
+ assigned-clock-rates-u64 = /bits/ 64 <ASSIGNED_RATES_0_RATE>;
+ };
+};
diff --git a/drivers/clk/kunit_clk_assigned_rates_u64_one_consumer.dtso b/drivers/clk/kunit_clk_assigned_rates_u64_one_consumer.dtso
new file mode 100644
index 000000000000..3259c003aec0
--- /dev/null
+++ b/drivers/clk/kunit_clk_assigned_rates_u64_one_consumer.dtso
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+#include "kunit_clk_assigned_rates.h"
+
+&{/} {
+ clk: kunit-clock {
+ compatible = "test,clk-assigned-rates";
+ #clock-cells = <0>;
+ };
+
+ kunit-clock-consumer {
+ compatible = "test,clk-consumer";
+ assigned-clocks = <&clk>;
+ assigned-clock-rates-u64 = /bits/ 64 <ASSIGNED_RATES_0_RATE>;
+ };
+};
diff --git a/drivers/clk/kunit_clk_assigned_rates_without.dtso b/drivers/clk/kunit_clk_assigned_rates_without.dtso
new file mode 100644
index 000000000000..22d333495cf2
--- /dev/null
+++ b/drivers/clk/kunit_clk_assigned_rates_without.dtso
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+#include "kunit_clk_assigned_rates.h"
+
+&{/} {
+ clk: kunit-clock {
+ compatible = "test,clk-assigned-rates";
+ #clock-cells = <0>;
+ assigned-clock-rates = <ASSIGNED_RATES_0_RATE>;
+ };
+};
diff --git a/drivers/clk/kunit_clk_assigned_rates_without_consumer.dtso b/drivers/clk/kunit_clk_assigned_rates_without_consumer.dtso
new file mode 100644
index 000000000000..75ac09140f83
--- /dev/null
+++ b/drivers/clk/kunit_clk_assigned_rates_without_consumer.dtso
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+#include "kunit_clk_assigned_rates.h"
+
+&{/} {
+ clk: kunit-clock {
+ compatible = "test,clk-assigned-rates";
+ #clock-cells = <0>;
+ };
+
+ kunit-clock-consumer {
+ compatible = "test,clk-consumer";
+ assigned-clock-rates = <ASSIGNED_RATES_0_RATE>;
+ };
+};
diff --git a/drivers/clk/kunit_clk_assigned_rates_zero.dtso b/drivers/clk/kunit_clk_assigned_rates_zero.dtso
new file mode 100644
index 000000000000..08e042c2eafe
--- /dev/null
+++ b/drivers/clk/kunit_clk_assigned_rates_zero.dtso
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+&{/} {
+ clk: kunit-clock {
+ compatible = "test,clk-assigned-rates";
+ #clock-cells = <0>;
+ assigned-clocks = <&clk>;
+ assigned-clock-rates = <0>;
+ };
+};
diff --git a/drivers/clk/kunit_clk_assigned_rates_zero_consumer.dtso b/drivers/clk/kunit_clk_assigned_rates_zero_consumer.dtso
new file mode 100644
index 000000000000..1d964672e855
--- /dev/null
+++ b/drivers/clk/kunit_clk_assigned_rates_zero_consumer.dtso
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+&{/} {
+ clk: kunit-clock {
+ compatible = "test,clk-assigned-rates";
+ #clock-cells = <0>;
+ };
+
+ kunit-clock-consumer {
+ compatible = "test,clk-consumer";
+ assigned-clocks = <&clk>;
+ assigned-clock-rates = <0>;
+ };
+};
diff --git a/drivers/clk/kunit_clk_hw_get_dev_of_node.dtso b/drivers/clk/kunit_clk_hw_get_dev_of_node.dtso
new file mode 100644
index 000000000000..760717da3235
--- /dev/null
+++ b/drivers/clk/kunit_clk_hw_get_dev_of_node.dtso
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+&{/} {
+ kunit-clock-controller {
+ compatible = "test,clk-hw-get-dev-of-node";
+ #clock-cells = <0>;
+ };
+};
diff --git a/drivers/clk/mediatek/Kconfig b/drivers/clk/mediatek/Kconfig
index 70a005e7e1b1..0e8dd82aa84e 100644
--- a/drivers/clk/mediatek/Kconfig
+++ b/drivers/clk/mediatek/Kconfig
@@ -124,6 +124,43 @@ config COMMON_CLK_MT2712_VENCSYS
help
This driver supports MediaTek MT2712 vencsys clocks.
+config COMMON_CLK_MT6735
+ tristate "Main clock drivers for MediaTek MT6735"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ select COMMON_CLK_MEDIATEK
+ help
+ This enables drivers for clocks and resets provided
+ by apmixedsys, topckgen, infracfg and pericfg on the
+ MediaTek MT6735 SoC.
+
+config COMMON_CLK_MT6735_IMGSYS
+ tristate "Clock driver for MediaTek MT6735 imgsys"
+ depends on COMMON_CLK_MT6735
+ help
+ This enables a driver for clocks provided by imgsys
+ on the MediaTek MT6735 SoC.
+
+config COMMON_CLK_MT6735_MFGCFG
+ tristate "Clock driver for MediaTek MT6735 mfgcfg"
+ depends on COMMON_CLK_MT6735
+ help
+ This enables a driver for clocks and resets provided
+ by mfgcfg on the MediaTek MT6735 SoC.
+
+config COMMON_CLK_MT6735_VDECSYS
+ tristate "Clock driver for MediaTek MT6735 vdecsys"
+ depends on COMMON_CLK_MT6735
+ help
+ This enables a driver for clocks and resets provided
+ by vdecsys on the MediaTek MT6735 SoC.
+
+config COMMON_CLK_MT6735_VENCSYS
+ tristate "Clock driver for MediaTek MT6735 vencsys"
+ depends on COMMON_CLK_MT6735
+ help
+ This enables a driver for clocks provided by vencsys
+ on the MediaTek MT6735 SoC.
+
config COMMON_CLK_MT6765
bool "Clock driver for MediaTek MT6765"
depends on (ARCH_MEDIATEK && ARM64) || COMPILE_TEST
@@ -887,13 +924,6 @@ config COMMON_CLK_MT8195_APUSYS
help
This driver supports MediaTek MT8195 AI Processor Unit System clocks.
-config COMMON_CLK_MT8195_AUDSYS
- tristate "Clock driver for MediaTek MT8195 audsys"
- depends on COMMON_CLK_MT8195
- default COMMON_CLK_MT8195
- help
- This driver supports MediaTek MT8195 audsys clocks.
-
config COMMON_CLK_MT8195_IMP_IIC_WRAP
tristate "Clock driver for MediaTek MT8195 imp_iic_wrap"
depends on COMMON_CLK_MT8195
@@ -908,14 +938,6 @@ config COMMON_CLK_MT8195_MFGCFG
help
This driver supports MediaTek MT8195 mfgcfg clocks.
-config COMMON_CLK_MT8195_MSDC
- tristate "Clock driver for MediaTek MT8195 msdc"
- depends on COMMON_CLK_MT8195
- default COMMON_CLK_MT8195
- help
- This driver supports MediaTek MT8195 MMC and SD Controller's
- msdc and msdc_top clocks.
-
config COMMON_CLK_MT8195_SCP_ADSP
tristate "Clock driver for MediaTek MT8195 scp_adsp"
depends on COMMON_CLK_MT8195
@@ -980,6 +1002,77 @@ config COMMON_CLK_MT8195_VENCSYS
help
This driver supports MediaTek MT8195 vencsys clocks.
+config COMMON_CLK_MT8196
+ tristate "Clock driver for MediaTek MT8196"
+ depends on ARM64 || COMPILE_TEST
+ select COMMON_CLK_MEDIATEK
+ default ARCH_MEDIATEK
+ help
+ This driver supports MediaTek MT8196 basic clocks.
+
+config COMMON_CLK_MT8196_IMP_IIC_WRAP
+ tristate "Clock driver for MediaTek MT8196 imp_iic_wrap"
+ depends on COMMON_CLK_MT8196
+ default COMMON_CLK_MT8196
+ help
+ This driver supports MediaTek MT8196 i2c clocks.
+
+config COMMON_CLK_MT8196_MCUSYS
+ tristate "Clock driver for MediaTek MT8196 mcusys"
+ depends on COMMON_CLK_MT8196
+ default COMMON_CLK_MT8196
+ help
+ This driver supports MediaTek MT8196 mcusys clocks.
+
+config COMMON_CLK_MT8196_MDPSYS
+ tristate "Clock driver for MediaTek MT8196 mdpsys"
+ depends on COMMON_CLK_MT8196
+ default COMMON_CLK_MT8196
+ help
+ This driver supports MediaTek MT8196 mdpsys clocks.
+
+config COMMON_CLK_MT8196_MFGCFG
+ tristate "Clock driver for MediaTek MT8196 mfgcfg"
+ depends on COMMON_CLK_MT8196
+ default m
+ help
+ This driver supports MediaTek MT8196 mfgcfg clocks.
+
+config COMMON_CLK_MT8196_MMSYS
+ tristate "Clock driver for MediaTek MT8196 mmsys"
+ depends on COMMON_CLK_MT8196
+ default m
+ help
+ This driver supports MediaTek MT8196 mmsys clocks.
+
+config COMMON_CLK_MT8196_PEXTPSYS
+ tristate "Clock driver for MediaTek MT8196 pextpsys"
+ depends on COMMON_CLK_MT8196
+ default COMMON_CLK_MT8196
+ help
+ This driver supports MediaTek MT8196 pextpsys clocks.
+
+config COMMON_CLK_MT8196_UFSSYS
+ tristate "Clock driver for MediaTek MT8196 ufssys"
+ depends on COMMON_CLK_MT8196
+ default COMMON_CLK_MT8196
+ help
+ This driver supports MediaTek MT8196 ufssys clocks.
+
+config COMMON_CLK_MT8196_VDECSYS
+ tristate "Clock driver for MediaTek MT8196 vdecsys"
+ depends on COMMON_CLK_MT8196
+ default m
+ help
+ This driver supports MediaTek MT8196 vdecsys clocks.
+
+config COMMON_CLK_MT8196_VENCSYS
+ tristate "Clock driver for MediaTek MT8196 vencsys"
+ depends on COMMON_CLK_MT8196
+ default m
+ help
+ This driver supports MediaTek MT8196 vencsys clocks.
+
config COMMON_CLK_MT8365
tristate "Clock driver for MediaTek MT8365"
depends on ARCH_MEDIATEK || COMPILE_TEST
diff --git a/drivers/clk/mediatek/Makefile b/drivers/clk/mediatek/Makefile
index eeccfa039896..d8736a060dbd 100644
--- a/drivers/clk/mediatek/Makefile
+++ b/drivers/clk/mediatek/Makefile
@@ -2,6 +2,11 @@
obj-$(CONFIG_COMMON_CLK_MEDIATEK) += clk-mtk.o clk-pll.o clk-gate.o clk-apmixed.o clk-cpumux.o reset.o clk-mux.o
obj-$(CONFIG_COMMON_CLK_MEDIATEK_FHCTL) += clk-fhctl.o clk-pllfh.o
+obj-$(CONFIG_COMMON_CLK_MT6735) += clk-mt6735-apmixedsys.o clk-mt6735-infracfg.o clk-mt6735-pericfg.o clk-mt6735-topckgen.o
+obj-$(CONFIG_COMMON_CLK_MT6735_IMGSYS) += clk-mt6735-imgsys.o
+obj-$(CONFIG_COMMON_CLK_MT6735_MFGCFG) += clk-mt6735-mfgcfg.o
+obj-$(CONFIG_COMMON_CLK_MT6735_VDECSYS) += clk-mt6735-vdecsys.o
+obj-$(CONFIG_COMMON_CLK_MT6735_VENCSYS) += clk-mt6735-vencsys.o
obj-$(CONFIG_COMMON_CLK_MT6765) += clk-mt6765.o
obj-$(CONFIG_COMMON_CLK_MT6765_AUDIOSYS) += clk-mt6765-audio.o
obj-$(CONFIG_COMMON_CLK_MT6765_CAMSYS) += clk-mt6765-cam.o
@@ -145,6 +150,19 @@ obj-$(CONFIG_COMMON_CLK_MT8195_VDOSYS) += clk-mt8195-vdo0.o clk-mt8195-vdo1.o
obj-$(CONFIG_COMMON_CLK_MT8195_VENCSYS) += clk-mt8195-venc.o
obj-$(CONFIG_COMMON_CLK_MT8195_VPPSYS) += clk-mt8195-vpp0.o clk-mt8195-vpp1.o
obj-$(CONFIG_COMMON_CLK_MT8195_WPESYS) += clk-mt8195-wpe.o
+obj-$(CONFIG_COMMON_CLK_MT8196) += clk-mt8196-apmixedsys.o clk-mt8196-topckgen.o \
+ clk-mt8196-topckgen2.o clk-mt8196-vlpckgen.o \
+ clk-mt8196-peri_ao.o
+obj-$(CONFIG_COMMON_CLK_MT8196_IMP_IIC_WRAP) += clk-mt8196-imp_iic_wrap.o
+obj-$(CONFIG_COMMON_CLK_MT8196_MCUSYS) += clk-mt8196-mcu.o
+obj-$(CONFIG_COMMON_CLK_MT8196_MDPSYS) += clk-mt8196-mdpsys.o
+obj-$(CONFIG_COMMON_CLK_MT8196_MFGCFG) += clk-mt8196-mfg.o
+obj-$(CONFIG_COMMON_CLK_MT8196_MMSYS) += clk-mt8196-disp0.o clk-mt8196-disp1.o clk-mt8196-vdisp_ao.o \
+ clk-mt8196-ovl0.o clk-mt8196-ovl1.o
+obj-$(CONFIG_COMMON_CLK_MT8196_PEXTPSYS) += clk-mt8196-pextp.o
+obj-$(CONFIG_COMMON_CLK_MT8196_UFSSYS) += clk-mt8196-ufs_ao.o
+obj-$(CONFIG_COMMON_CLK_MT8196_VDECSYS) += clk-mt8196-vdec.o
+obj-$(CONFIG_COMMON_CLK_MT8196_VENCSYS) += clk-mt8196-venc.o
obj-$(CONFIG_COMMON_CLK_MT8365) += clk-mt8365-apmixedsys.o clk-mt8365.o
obj-$(CONFIG_COMMON_CLK_MT8365_APU) += clk-mt8365-apu.o
obj-$(CONFIG_COMMON_CLK_MT8365_CAM) += clk-mt8365-cam.o
diff --git a/drivers/clk/mediatek/clk-gate.c b/drivers/clk/mediatek/clk-gate.c
index 67d9e741c5e7..f6b1429ff757 100644
--- a/drivers/clk/mediatek/clk-gate.c
+++ b/drivers/clk/mediatek/clk-gate.c
@@ -5,6 +5,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/dev_printk.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/printk.h>
@@ -12,15 +13,14 @@
#include <linux/slab.h>
#include <linux/types.h>
+#include "clk-mtk.h"
#include "clk-gate.h"
struct mtk_clk_gate {
struct clk_hw hw;
struct regmap *regmap;
- int set_ofs;
- int clr_ofs;
- int sta_ofs;
- u8 bit;
+ struct regmap *regmap_hwv;
+ const struct mtk_gate *gate;
};
static inline struct mtk_clk_gate *to_mtk_clk_gate(struct clk_hw *hw)
@@ -33,9 +33,9 @@ static u32 mtk_get_clockgating(struct clk_hw *hw)
struct mtk_clk_gate *cg = to_mtk_clk_gate(hw);
u32 val;
- regmap_read(cg->regmap, cg->sta_ofs, &val);
+ regmap_read(cg->regmap, cg->gate->regs->sta_ofs, &val);
- return val & BIT(cg->bit);
+ return val & BIT(cg->gate->shift);
}
static int mtk_cg_bit_is_cleared(struct clk_hw *hw)
@@ -52,28 +52,30 @@ static void mtk_cg_set_bit(struct clk_hw *hw)
{
struct mtk_clk_gate *cg = to_mtk_clk_gate(hw);
- regmap_write(cg->regmap, cg->set_ofs, BIT(cg->bit));
+ regmap_write(cg->regmap, cg->gate->regs->set_ofs, BIT(cg->gate->shift));
}
static void mtk_cg_clr_bit(struct clk_hw *hw)
{
struct mtk_clk_gate *cg = to_mtk_clk_gate(hw);
- regmap_write(cg->regmap, cg->clr_ofs, BIT(cg->bit));
+ regmap_write(cg->regmap, cg->gate->regs->clr_ofs, BIT(cg->gate->shift));
}
static void mtk_cg_set_bit_no_setclr(struct clk_hw *hw)
{
struct mtk_clk_gate *cg = to_mtk_clk_gate(hw);
- regmap_set_bits(cg->regmap, cg->sta_ofs, BIT(cg->bit));
+ regmap_set_bits(cg->regmap, cg->gate->regs->sta_ofs,
+ BIT(cg->gate->shift));
}
static void mtk_cg_clr_bit_no_setclr(struct clk_hw *hw)
{
struct mtk_clk_gate *cg = to_mtk_clk_gate(hw);
- regmap_clear_bits(cg->regmap, cg->sta_ofs, BIT(cg->bit));
+ regmap_clear_bits(cg->regmap, cg->gate->regs->sta_ofs,
+ BIT(cg->gate->shift));
}
static int mtk_cg_enable(struct clk_hw *hw)
@@ -100,6 +102,32 @@ static void mtk_cg_disable_inv(struct clk_hw *hw)
mtk_cg_clr_bit(hw);
}
+static int mtk_cg_hwv_set_en(struct clk_hw *hw, bool enable)
+{
+ struct mtk_clk_gate *cg = to_mtk_clk_gate(hw);
+ u32 val;
+
+ regmap_write(cg->regmap_hwv,
+ enable ? cg->gate->hwv_regs->set_ofs :
+ cg->gate->hwv_regs->clr_ofs,
+ BIT(cg->gate->shift));
+
+ return regmap_read_poll_timeout_atomic(cg->regmap_hwv,
+ cg->gate->hwv_regs->sta_ofs, val,
+ val & BIT(cg->gate->shift), 0,
+ MTK_WAIT_HWV_DONE_US);
+}
+
+static int mtk_cg_hwv_enable(struct clk_hw *hw)
+{
+ return mtk_cg_hwv_set_en(hw, true);
+}
+
+static void mtk_cg_hwv_disable(struct clk_hw *hw)
+{
+ mtk_cg_hwv_set_en(hw, false);
+}
+
static int mtk_cg_enable_no_setclr(struct clk_hw *hw)
{
mtk_cg_clr_bit_no_setclr(hw);
@@ -124,6 +152,15 @@ static void mtk_cg_disable_inv_no_setclr(struct clk_hw *hw)
mtk_cg_clr_bit_no_setclr(hw);
}
+static bool mtk_cg_uses_hwv(const struct clk_ops *ops)
+{
+ if (ops == &mtk_clk_gate_hwv_ops_setclr ||
+ ops == &mtk_clk_gate_hwv_ops_setclr_inv)
+ return true;
+
+ return false;
+}
+
const struct clk_ops mtk_clk_gate_ops_setclr = {
.is_enabled = mtk_cg_bit_is_cleared,
.enable = mtk_cg_enable,
@@ -138,6 +175,20 @@ const struct clk_ops mtk_clk_gate_ops_setclr_inv = {
};
EXPORT_SYMBOL_GPL(mtk_clk_gate_ops_setclr_inv);
+const struct clk_ops mtk_clk_gate_hwv_ops_setclr = {
+ .is_enabled = mtk_cg_bit_is_cleared,
+ .enable = mtk_cg_hwv_enable,
+ .disable = mtk_cg_hwv_disable,
+};
+EXPORT_SYMBOL_GPL(mtk_clk_gate_hwv_ops_setclr);
+
+const struct clk_ops mtk_clk_gate_hwv_ops_setclr_inv = {
+ .is_enabled = mtk_cg_bit_is_set,
+ .enable = mtk_cg_hwv_enable,
+ .disable = mtk_cg_hwv_disable,
+};
+EXPORT_SYMBOL_GPL(mtk_clk_gate_hwv_ops_setclr_inv);
+
const struct clk_ops mtk_clk_gate_ops_no_setclr = {
.is_enabled = mtk_cg_bit_is_cleared,
.enable = mtk_cg_enable_no_setclr,
@@ -152,12 +203,10 @@ const struct clk_ops mtk_clk_gate_ops_no_setclr_inv = {
};
EXPORT_SYMBOL_GPL(mtk_clk_gate_ops_no_setclr_inv);
-static struct clk_hw *mtk_clk_register_gate(struct device *dev, const char *name,
- const char *parent_name,
- struct regmap *regmap, int set_ofs,
- int clr_ofs, int sta_ofs, u8 bit,
- const struct clk_ops *ops,
- unsigned long flags)
+static struct clk_hw *mtk_clk_register_gate(struct device *dev,
+ const struct mtk_gate *gate,
+ struct regmap *regmap,
+ struct regmap *regmap_hwv)
{
struct mtk_clk_gate *cg;
int ret;
@@ -167,18 +216,19 @@ static struct clk_hw *mtk_clk_register_gate(struct device *dev, const char *name
if (!cg)
return ERR_PTR(-ENOMEM);
- init.name = name;
- init.flags = flags | CLK_SET_RATE_PARENT;
- init.parent_names = parent_name ? &parent_name : NULL;
- init.num_parents = parent_name ? 1 : 0;
- init.ops = ops;
+ init.name = gate->name;
+ init.flags = gate->flags | CLK_SET_RATE_PARENT;
+ init.parent_names = gate->parent_name ? &gate->parent_name : NULL;
+ init.num_parents = gate->parent_name ? 1 : 0;
+ init.ops = gate->ops;
+ if (mtk_cg_uses_hwv(init.ops) && !regmap_hwv)
+ return dev_err_ptr_probe(
+ dev, -ENXIO,
+ "regmap not found for hardware voter clocks\n");
cg->regmap = regmap;
- cg->set_ofs = set_ofs;
- cg->clr_ofs = clr_ofs;
- cg->sta_ofs = sta_ofs;
- cg->bit = bit;
-
+ cg->regmap_hwv = regmap_hwv;
+ cg->gate = gate;
cg->hw.init = &init;
ret = clk_hw_register(dev, &cg->hw);
@@ -209,6 +259,7 @@ int mtk_clk_register_gates(struct device *dev, struct device_node *node,
int i;
struct clk_hw *hw;
struct regmap *regmap;
+ struct regmap *regmap_hwv;
if (!clk_data)
return -ENOMEM;
@@ -219,6 +270,12 @@ int mtk_clk_register_gates(struct device *dev, struct device_node *node,
return PTR_ERR(regmap);
}
+ regmap_hwv = mtk_clk_get_hwv_regmap(node);
+ if (IS_ERR(regmap_hwv))
+ return dev_err_probe(
+ dev, PTR_ERR(regmap_hwv),
+ "Cannot find hardware voter regmap for %pOF\n", node);
+
for (i = 0; i < num; i++) {
const struct mtk_gate *gate = &clks[i];
@@ -228,13 +285,7 @@ int mtk_clk_register_gates(struct device *dev, struct device_node *node,
continue;
}
- hw = mtk_clk_register_gate(dev, gate->name, gate->parent_name,
- regmap,
- gate->regs->set_ofs,
- gate->regs->clr_ofs,
- gate->regs->sta_ofs,
- gate->shift, gate->ops,
- gate->flags);
+ hw = mtk_clk_register_gate(dev, gate, regmap, regmap_hwv);
if (IS_ERR(hw)) {
pr_err("Failed to register clk %s: %pe\n", gate->name,
diff --git a/drivers/clk/mediatek/clk-gate.h b/drivers/clk/mediatek/clk-gate.h
index 1a46b4c56fc5..4f05b9855dae 100644
--- a/drivers/clk/mediatek/clk-gate.h
+++ b/drivers/clk/mediatek/clk-gate.h
@@ -19,6 +19,8 @@ extern const struct clk_ops mtk_clk_gate_ops_setclr;
extern const struct clk_ops mtk_clk_gate_ops_setclr_inv;
extern const struct clk_ops mtk_clk_gate_ops_no_setclr;
extern const struct clk_ops mtk_clk_gate_ops_no_setclr_inv;
+extern const struct clk_ops mtk_clk_gate_hwv_ops_setclr;
+extern const struct clk_ops mtk_clk_gate_hwv_ops_setclr_inv;
struct mtk_gate_regs {
u32 sta_ofs;
@@ -31,6 +33,7 @@ struct mtk_gate {
const char *name;
const char *parent_name;
const struct mtk_gate_regs *regs;
+ const struct mtk_gate_regs *hwv_regs;
int shift;
const struct clk_ops *ops;
unsigned long flags;
diff --git a/drivers/clk/mediatek/clk-mt2701-aud.c b/drivers/clk/mediatek/clk-mt2701-aud.c
index 425c69cfb105..e103121cf58e 100644
--- a/drivers/clk/mediatek/clk-mt2701-aud.c
+++ b/drivers/clk/mediatek/clk-mt2701-aud.c
@@ -55,10 +55,16 @@ static const struct mtk_gate audio_clks[] = {
GATE_DUMMY(CLK_DUMMY, "aud_dummy"),
/* AUDIO0 */
GATE_AUDIO0(CLK_AUD_AFE, "audio_afe", "aud_intbus_sel", 2),
+ GATE_DUMMY(CLK_AUD_LRCK_DETECT, "audio_lrck_detect_dummy"),
+ GATE_DUMMY(CLK_AUD_I2S, "audio_i2c_dummy"),
+ GATE_DUMMY(CLK_AUD_APLL_TUNER, "audio_apll_tuner_dummy"),
GATE_AUDIO0(CLK_AUD_HDMI, "audio_hdmi", "audpll_sel", 20),
GATE_AUDIO0(CLK_AUD_SPDF, "audio_spdf", "audpll_sel", 21),
GATE_AUDIO0(CLK_AUD_SPDF2, "audio_spdf2", "audpll_sel", 22),
GATE_AUDIO0(CLK_AUD_APLL, "audio_apll", "audpll_sel", 23),
+ GATE_DUMMY(CLK_AUD_TML, "audio_tml_dummy"),
+ GATE_DUMMY(CLK_AUD_AHB_IDLE_EXT, "audio_ahb_idle_ext_dummy"),
+ GATE_DUMMY(CLK_AUD_AHB_IDLE_INT, "audio_ahb_idle_int_dummy"),
/* AUDIO1 */
GATE_AUDIO1(CLK_AUD_I2SIN1, "audio_i2sin1", "aud_mux1_sel", 0),
GATE_AUDIO1(CLK_AUD_I2SIN2, "audio_i2sin2", "aud_mux1_sel", 1),
@@ -76,10 +82,12 @@ static const struct mtk_gate audio_clks[] = {
GATE_AUDIO1(CLK_AUD_ASRCI2, "audio_asrci2", "asm_h_sel", 13),
GATE_AUDIO1(CLK_AUD_ASRCO1, "audio_asrco1", "asm_h_sel", 14),
GATE_AUDIO1(CLK_AUD_ASRCO2, "audio_asrco2", "asm_h_sel", 15),
+ GATE_DUMMY(CLK_AUD_HDMIRX, "audio_hdmirx_dummy"),
GATE_AUDIO1(CLK_AUD_INTDIR, "audio_intdir", "intdir_sel", 20),
GATE_AUDIO1(CLK_AUD_A1SYS, "audio_a1sys", "aud_mux1_sel", 21),
GATE_AUDIO1(CLK_AUD_A2SYS, "audio_a2sys", "aud_mux2_sel", 22),
GATE_AUDIO1(CLK_AUD_AFE_CONN, "audio_afe_conn", "aud_mux1_sel", 23),
+ GATE_DUMMY(CLK_AUD_AFE_PCMIF, "audio_afe_pcmif_dummy"),
GATE_AUDIO1(CLK_AUD_AFE_MRGIF, "audio_afe_mrgif", "aud_mux1_sel", 25),
/* AUDIO2 */
GATE_AUDIO2(CLK_AUD_MMIF_UL1, "audio_ul1", "aud_mux1_sel", 0),
@@ -100,6 +108,8 @@ static const struct mtk_gate audio_clks[] = {
GATE_AUDIO2(CLK_AUD_MMIF_AWB2, "audio_awb2", "aud_mux1_sel", 15),
GATE_AUDIO2(CLK_AUD_MMIF_DAI, "audio_dai", "aud_mux1_sel", 16),
/* AUDIO3 */
+ GATE_DUMMY(CLK_AUD_DMIC1, "audio_dmic1_dummy"),
+ GATE_DUMMY(CLK_AUD_DMIC2, "audio_dmic2_dummy"),
GATE_AUDIO3(CLK_AUD_ASRCI3, "audio_asrci3", "asm_h_sel", 2),
GATE_AUDIO3(CLK_AUD_ASRCI4, "audio_asrci4", "asm_h_sel", 3),
GATE_AUDIO3(CLK_AUD_ASRCI5, "audio_asrci5", "asm_h_sel", 4),
diff --git a/drivers/clk/mediatek/clk-mt2701-bdp.c b/drivers/clk/mediatek/clk-mt2701-bdp.c
index 5da3eabffd3e..f11c7a4fa37b 100644
--- a/drivers/clk/mediatek/clk-mt2701-bdp.c
+++ b/drivers/clk/mediatek/clk-mt2701-bdp.c
@@ -31,6 +31,7 @@ static const struct mtk_gate_regs bdp1_cg_regs = {
GATE_MTK(_id, _name, _parent, &bdp1_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
static const struct mtk_gate bdp_clks[] = {
+ GATE_DUMMY(CLK_DUMMY, "bdp_dummy"),
GATE_BDP0(CLK_BDP_BRG_BA, "brg_baclk", "mm_sel", 0),
GATE_BDP0(CLK_BDP_BRG_DRAM, "brg_dram", "mm_sel", 1),
GATE_BDP0(CLK_BDP_LARB_DRAM, "larb_dram", "mm_sel", 2),
diff --git a/drivers/clk/mediatek/clk-mt2701-img.c b/drivers/clk/mediatek/clk-mt2701-img.c
index 875594bc9dcb..c158e54c4652 100644
--- a/drivers/clk/mediatek/clk-mt2701-img.c
+++ b/drivers/clk/mediatek/clk-mt2701-img.c
@@ -22,6 +22,7 @@ static const struct mtk_gate_regs img_cg_regs = {
GATE_MTK(_id, _name, _parent, &img_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
static const struct mtk_gate img_clks[] = {
+ GATE_DUMMY(CLK_DUMMY, "img_dummy"),
GATE_IMG(CLK_IMG_SMI_COMM, "img_smi_comm", "mm_sel", 0),
GATE_IMG(CLK_IMG_RESZ, "img_resz", "mm_sel", 1),
GATE_IMG(CLK_IMG_JPGDEC_SMI, "img_jpgdec_smi", "mm_sel", 5),
diff --git a/drivers/clk/mediatek/clk-mt2701-mm.c b/drivers/clk/mediatek/clk-mt2701-mm.c
index bc68fa718878..474d87d62e83 100644
--- a/drivers/clk/mediatek/clk-mt2701-mm.c
+++ b/drivers/clk/mediatek/clk-mt2701-mm.c
@@ -31,6 +31,7 @@ static const struct mtk_gate_regs disp1_cg_regs = {
GATE_MTK(_id, _name, _parent, &disp1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
static const struct mtk_gate mm_clks[] = {
+ GATE_DUMMY(CLK_DUMMY, "mm_dummy"),
GATE_DISP0(CLK_MM_SMI_COMMON, "mm_smi_comm", "mm_sel", 0),
GATE_DISP0(CLK_MM_SMI_LARB0, "mm_smi_larb0", "mm_sel", 1),
GATE_DISP0(CLK_MM_CMDQ, "mm_cmdq", "mm_sel", 2),
diff --git a/drivers/clk/mediatek/clk-mt2701-vdec.c b/drivers/clk/mediatek/clk-mt2701-vdec.c
index 94db86f8d0a4..5299d92f3aba 100644
--- a/drivers/clk/mediatek/clk-mt2701-vdec.c
+++ b/drivers/clk/mediatek/clk-mt2701-vdec.c
@@ -31,6 +31,7 @@ static const struct mtk_gate_regs vdec1_cg_regs = {
GATE_MTK(_id, _name, _parent, &vdec1_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
static const struct mtk_gate vdec_clks[] = {
+ GATE_DUMMY(CLK_DUMMY, "vdec_dummy"),
GATE_VDEC0(CLK_VDEC_CKGEN, "vdec_cken", "vdec_sel", 0),
GATE_VDEC1(CLK_VDEC_LARB, "vdec_larb_cken", "mm_sel", 0),
};
diff --git a/drivers/clk/mediatek/clk-mt6735-apmixedsys.c b/drivers/clk/mediatek/clk-mt6735-apmixedsys.c
new file mode 100644
index 000000000000..e0949911e8f7
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt6735-apmixedsys.c
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2022 Yassine Oudjana <y.oudjana@protonmail.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-pll.h"
+
+#include <dt-bindings/clock/mediatek,mt6735-apmixedsys.h>
+
+#define AP_PLL_CON_5 0x014
+#define ARMPLL_CON0 0x200
+#define ARMPLL_CON1 0x204
+#define ARMPLL_PWR_CON0 0x20c
+#define MAINPLL_CON0 0x210
+#define MAINPLL_CON1 0x214
+#define MAINPLL_PWR_CON0 0x21c
+#define UNIVPLL_CON0 0x220
+#define UNIVPLL_CON1 0x224
+#define UNIVPLL_PWR_CON0 0x22c
+#define MMPLL_CON0 0x230
+#define MMPLL_CON1 0x234
+#define MMPLL_PWR_CON0 0x23c
+#define MSDCPLL_CON0 0x240
+#define MSDCPLL_CON1 0x244
+#define MSDCPLL_PWR_CON0 0x24c
+#define VENCPLL_CON0 0x250
+#define VENCPLL_CON1 0x254
+#define VENCPLL_PWR_CON0 0x25c
+#define TVDPLL_CON0 0x260
+#define TVDPLL_CON1 0x264
+#define TVDPLL_PWR_CON0 0x26c
+#define APLL1_CON0 0x270
+#define APLL1_CON1 0x274
+#define APLL1_CON2 0x278
+#define APLL1_PWR_CON0 0x280
+#define APLL2_CON0 0x284
+#define APLL2_CON1 0x288
+#define APLL2_CON2 0x28c
+#define APLL2_PWR_CON0 0x294
+
+#define CON0_RST_BAR BIT(24)
+
+#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _rst_bar_mask, \
+ _pd_reg, _pd_shift, _tuner_reg, _tuner_en_reg, \
+ _tuner_en_bit, _pcw_reg, _pcwbits, _flags) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = "clk26m", \
+ .reg = _reg, \
+ .pwr_reg = _pwr_reg, \
+ .en_mask = _en_mask, \
+ .rst_bar_mask = _rst_bar_mask, \
+ .pd_reg = _pd_reg, \
+ .pd_shift = _pd_shift, \
+ .tuner_reg = _tuner_reg, \
+ .tuner_en_reg = _tuner_en_reg, \
+ .tuner_en_bit = _tuner_en_bit, \
+ .pcw_reg = _pcw_reg, \
+ .pcw_chg_reg = _pcw_reg, \
+ .pcwbits = _pcwbits, \
+ .flags = _flags, \
+ }
+
+static const struct mtk_pll_data apmixedsys_plls[] = {
+ PLL(CLK_APMIXED_ARMPLL, "armpll", ARMPLL_CON0, ARMPLL_PWR_CON0, 0x00000001, 0, ARMPLL_CON1, 24, 0, 0, 0, ARMPLL_CON1, 21, PLL_AO),
+ PLL(CLK_APMIXED_MAINPLL, "mainpll", MAINPLL_CON0, MAINPLL_PWR_CON0, 0xf0000101, CON0_RST_BAR, MAINPLL_CON1, 24, 0, 0, 0, MAINPLL_CON1, 21, HAVE_RST_BAR),
+ PLL(CLK_APMIXED_UNIVPLL, "univpll", UNIVPLL_CON0, UNIVPLL_PWR_CON0, 0xfc000001, CON0_RST_BAR, UNIVPLL_CON1, 24, 0, 0, 0, UNIVPLL_CON1, 21, HAVE_RST_BAR),
+ PLL(CLK_APMIXED_MMPLL, "mmpll", MMPLL_CON0, MMPLL_PWR_CON0, 0x00000001, 0, MMPLL_CON1, 24, 0, 0, 0, MMPLL_CON1, 21, 0),
+ PLL(CLK_APMIXED_MSDCPLL, "msdcpll", MSDCPLL_CON0, MSDCPLL_PWR_CON0, 0x00000001, 0, MSDCPLL_CON1, 24, 0, 0, 0, MSDCPLL_CON1, 21, 0),
+ PLL(CLK_APMIXED_VENCPLL, "vencpll", VENCPLL_CON0, VENCPLL_PWR_CON0, 0x00000001, CON0_RST_BAR, VENCPLL_CON1, 24, 0, 0, 0, VENCPLL_CON1, 21, HAVE_RST_BAR),
+ PLL(CLK_APMIXED_TVDPLL, "tvdpll", TVDPLL_CON0, TVDPLL_PWR_CON0, 0x00000001, 0, TVDPLL_CON1, 24, 0, 0, 0, TVDPLL_CON1, 21, 0),
+ PLL(CLK_APMIXED_APLL1, "apll1", APLL1_CON0, APLL1_PWR_CON0, 0x00000001, 0, APLL1_CON0, 4, APLL1_CON2, AP_PLL_CON_5, 0, APLL1_CON1, 31, 0),
+ PLL(CLK_APMIXED_APLL2, "apll2", APLL2_CON0, APLL2_PWR_CON0, 0x00000001, 0, APLL2_CON0, 4, APLL2_CON2, AP_PLL_CON_5, 1, APLL2_CON1, 31, 0)
+};
+
+static int clk_mt6735_apmixed_probe(struct platform_device *pdev)
+{
+ void __iomem *base;
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ struct clk_hw_onecell_data *clk_data;
+ int ret;
+
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ clk_data = mtk_devm_alloc_clk_data(&pdev->dev, ARRAY_SIZE(apmixedsys_plls));
+ if (!clk_data)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, clk_data);
+
+ ret = mtk_clk_register_plls(pdev->dev.of_node, apmixedsys_plls,
+ ARRAY_SIZE(apmixedsys_plls), clk_data);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register PLLs: %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_of_clk_add_hw_provider(&pdev->dev, of_clk_hw_onecell_get,
+ clk_data);
+ if (ret)
+ dev_err(&pdev->dev,
+ "Failed to register clock provider: %d\n", ret);
+
+ return ret;
+}
+
+static void clk_mt6735_apmixed_remove(struct platform_device *pdev)
+{
+ struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev);
+
+ mtk_clk_unregister_plls(apmixedsys_plls, ARRAY_SIZE(apmixedsys_plls), clk_data);
+}
+
+static const struct of_device_id of_match_mt6735_apmixedsys[] = {
+ { .compatible = "mediatek,mt6735-apmixedsys" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_mt6735_apmixedsys);
+
+static struct platform_driver clk_mt6735_apmixedsys = {
+ .probe = clk_mt6735_apmixed_probe,
+ .remove = clk_mt6735_apmixed_remove,
+ .driver = {
+ .name = "clk-mt6735-apmixedsys",
+ .of_match_table = of_match_mt6735_apmixedsys,
+ },
+};
+module_platform_driver(clk_mt6735_apmixedsys);
+
+MODULE_AUTHOR("Yassine Oudjana <y.oudjana@protonmail.com>");
+MODULE_DESCRIPTION("MediaTek MT6735 apmixedsys clock driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6735-imgsys.c b/drivers/clk/mediatek/clk-mt6735-imgsys.c
new file mode 100644
index 000000000000..c564f8f72432
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt6735-imgsys.c
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2022 Yassine Oudjana <y.oudjana@protonmail.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#include <dt-bindings/clock/mediatek,mt6735-imgsys.h>
+
+#define IMG_CG_CON 0x00
+#define IMG_CG_SET 0x04
+#define IMG_CG_CLR 0x08
+
+static struct mtk_gate_regs imgsys_cg_regs = {
+ .set_ofs = IMG_CG_SET,
+ .clr_ofs = IMG_CG_CLR,
+ .sta_ofs = IMG_CG_CON,
+};
+
+static const struct mtk_gate imgsys_gates[] = {
+ GATE_MTK(CLK_IMG_SMI_LARB2, "smi_larb2", "mm_sel", &imgsys_cg_regs, 0, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_IMG_CAM_SMI, "cam_smi", "mm_sel", &imgsys_cg_regs, 5, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_IMG_CAM_CAM, "cam_cam", "mm_sel", &imgsys_cg_regs, 6, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_IMG_SEN_TG, "sen_tg", "mm_sel", &imgsys_cg_regs, 7, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_IMG_SEN_CAM, "sen_cam", "mm_sel", &imgsys_cg_regs, 8, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_IMG_CAM_SV, "cam_sv", "mm_sel", &imgsys_cg_regs, 9, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_IMG_SUFOD, "sufod", "mm_sel", &imgsys_cg_regs, 10, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_IMG_FD, "fd", "mm_sel", &imgsys_cg_regs, 11, &mtk_clk_gate_ops_setclr),
+};
+
+static const struct mtk_clk_desc imgsys_clks = {
+ .clks = imgsys_gates,
+ .num_clks = ARRAY_SIZE(imgsys_gates),
+};
+
+static const struct of_device_id of_match_mt6735_imgsys[] = {
+ { .compatible = "mediatek,mt6735-imgsys", .data = &imgsys_clks },
+ { /* sentinel */ }
+};
+
+static struct platform_driver clk_mt6735_imgsys = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt6735-imgsys",
+ .of_match_table = of_match_mt6735_imgsys,
+ },
+};
+module_platform_driver(clk_mt6735_imgsys);
+
+MODULE_AUTHOR("Yassine Oudjana <y.oudjana@protonmail.com>");
+MODULE_DESCRIPTION("MediaTek MT6735 imgsys clock driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6735-infracfg.c b/drivers/clk/mediatek/clk-mt6735-infracfg.c
new file mode 100644
index 000000000000..c1171f903cfa
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt6735-infracfg.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2022 Yassine Oudjana <y.oudjana@protonmail.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#include <dt-bindings/clock/mediatek,mt6735-infracfg.h>
+#include <dt-bindings/reset/mediatek,mt6735-infracfg.h>
+
+#define INFRA_RST0 0x30
+#define INFRA_GLOBALCON_PDN0 0x40
+#define INFRA_PDN1 0x44
+#define INFRA_PDN_STA 0x48
+
+#define RST_NR_PER_BANK 32
+
+static struct mtk_gate_regs infra_cg_regs = {
+ .set_ofs = INFRA_GLOBALCON_PDN0,
+ .clr_ofs = INFRA_PDN1,
+ .sta_ofs = INFRA_PDN_STA,
+};
+
+static const struct mtk_gate infracfg_gates[] = {
+ GATE_MTK(CLK_INFRA_DBG, "dbg", "axi_sel", &infra_cg_regs, 0, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_INFRA_GCE, "gce", "axi_sel", &infra_cg_regs, 1, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_INFRA_TRBG, "trbg", "axi_sel", &infra_cg_regs, 2, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_INFRA_CPUM, "cpum", "axi_sel", &infra_cg_regs, 3, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_INFRA_DEVAPC, "devapc", "axi_sel", &infra_cg_regs, 4, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_INFRA_AUDIO, "audio", "aud_intbus_sel", &infra_cg_regs, 5, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_INFRA_GCPU, "gcpu", "axi_sel", &infra_cg_regs, 6, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_INFRA_L2C_SRAM, "l2csram", "axi_sel", &infra_cg_regs, 7, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_INFRA_M4U, "m4u", "axi_sel", &infra_cg_regs, 8, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_INFRA_CLDMA, "cldma", "axi_sel", &infra_cg_regs, 12, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_INFRA_CONNMCU_BUS, "connmcu_bus", "axi_sel", &infra_cg_regs, 15, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_INFRA_KP, "kp", "axi_sel", &infra_cg_regs, 16, &mtk_clk_gate_ops_setclr),
+ GATE_MTK_FLAGS(CLK_INFRA_APXGPT, "apxgpt", "axi_sel", &infra_cg_regs, 18, &mtk_clk_gate_ops_setclr, CLK_IS_CRITICAL),
+ GATE_MTK(CLK_INFRA_SEJ, "sej", "axi_sel", &infra_cg_regs, 19, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_INFRA_CCIF0_AP, "ccif0ap", "axi_sel", &infra_cg_regs, 20, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_INFRA_CCIF1_AP, "ccif1ap", "axi_sel", &infra_cg_regs, 21, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_INFRA_PMIC_SPI, "pmicspi", "pmicspi_sel", &infra_cg_regs, 22, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_INFRA_PMIC_WRAP, "pmicwrap", "axi_sel", &infra_cg_regs, 23, &mtk_clk_gate_ops_setclr)
+};
+
+static u16 infracfg_rst_bank_ofs[] = { INFRA_RST0 };
+
+static u16 infracfg_rst_idx_map[] = {
+ [MT6735_INFRA_RST0_EMI_REG] = 0 * RST_NR_PER_BANK + 0,
+ [MT6735_INFRA_RST0_DRAMC0_AO] = 0 * RST_NR_PER_BANK + 1,
+ [MT6735_INFRA_RST0_AP_CIRQ_EINT] = 0 * RST_NR_PER_BANK + 3,
+ [MT6735_INFRA_RST0_APXGPT] = 0 * RST_NR_PER_BANK + 4,
+ [MT6735_INFRA_RST0_SCPSYS] = 0 * RST_NR_PER_BANK + 5,
+ [MT6735_INFRA_RST0_KP] = 0 * RST_NR_PER_BANK + 6,
+ [MT6735_INFRA_RST0_PMIC_WRAP] = 0 * RST_NR_PER_BANK + 7,
+ [MT6735_INFRA_RST0_CLDMA_AO_TOP] = 0 * RST_NR_PER_BANK + 8,
+ [MT6735_INFRA_RST0_USBSIF_TOP] = 0 * RST_NR_PER_BANK + 9,
+ [MT6735_INFRA_RST0_EMI] = 0 * RST_NR_PER_BANK + 16,
+ [MT6735_INFRA_RST0_CCIF] = 0 * RST_NR_PER_BANK + 17,
+ [MT6735_INFRA_RST0_DRAMC0] = 0 * RST_NR_PER_BANK + 18,
+ [MT6735_INFRA_RST0_EMI_AO_REG] = 0 * RST_NR_PER_BANK + 19,
+ [MT6735_INFRA_RST0_CCIF_AO] = 0 * RST_NR_PER_BANK + 20,
+ [MT6735_INFRA_RST0_TRNG] = 0 * RST_NR_PER_BANK + 21,
+ [MT6735_INFRA_RST0_SYS_CIRQ] = 0 * RST_NR_PER_BANK + 22,
+ [MT6735_INFRA_RST0_GCE] = 0 * RST_NR_PER_BANK + 23,
+ [MT6735_INFRA_RST0_M4U] = 0 * RST_NR_PER_BANK + 24,
+ [MT6735_INFRA_RST0_CCIF1] = 0 * RST_NR_PER_BANK + 25,
+ [MT6735_INFRA_RST0_CLDMA_TOP_PD] = 0 * RST_NR_PER_BANK + 26
+};
+
+static const struct mtk_clk_rst_desc infracfg_resets = {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = infracfg_rst_bank_ofs,
+ .rst_bank_nr = ARRAY_SIZE(infracfg_rst_bank_ofs),
+ .rst_idx_map = infracfg_rst_idx_map,
+ .rst_idx_map_nr = ARRAY_SIZE(infracfg_rst_idx_map)
+};
+
+static const struct mtk_clk_desc infracfg_clks = {
+ .clks = infracfg_gates,
+ .num_clks = ARRAY_SIZE(infracfg_gates),
+
+ .rst_desc = &infracfg_resets
+};
+
+static const struct of_device_id of_match_mt6735_infracfg[] = {
+ { .compatible = "mediatek,mt6735-infracfg", .data = &infracfg_clks },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_mt6735_infracfg);
+
+static struct platform_driver clk_mt6735_infracfg = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt6735-infracfg",
+ .of_match_table = of_match_mt6735_infracfg,
+ },
+};
+module_platform_driver(clk_mt6735_infracfg);
+
+MODULE_AUTHOR("Yassine Oudjana <y.oudjana@protonmail.com>");
+MODULE_DESCRIPTION("MediaTek MT6735 infracfg clock and reset driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6735-mfgcfg.c b/drivers/clk/mediatek/clk-mt6735-mfgcfg.c
new file mode 100644
index 000000000000..1f5aedddf209
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt6735-mfgcfg.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2022 Yassine Oudjana <y.oudjana@protonmail.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#include <dt-bindings/clock/mediatek,mt6735-mfgcfg.h>
+
+#define MFG_CG_CON 0x00
+#define MFG_CG_SET 0x04
+#define MFG_CG_CLR 0x08
+#define MFG_RESET 0x0c
+
+static struct mtk_gate_regs mfgcfg_cg_regs = {
+ .set_ofs = MFG_CG_SET,
+ .clr_ofs = MFG_CG_CLR,
+ .sta_ofs = MFG_CG_CON,
+};
+
+static const struct mtk_gate mfgcfg_gates[] = {
+ GATE_MTK(CLK_MFG_BG3D, "bg3d", "mfg_sel", &mfgcfg_cg_regs, 0, &mtk_clk_gate_ops_setclr),
+};
+
+static u16 mfgcfg_rst_ofs[] = { MFG_RESET };
+
+static const struct mtk_clk_rst_desc mfgcfg_resets = {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = mfgcfg_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(mfgcfg_rst_ofs)
+};
+
+static const struct mtk_clk_desc mfgcfg_clks = {
+ .clks = mfgcfg_gates,
+ .num_clks = ARRAY_SIZE(mfgcfg_gates),
+
+ .rst_desc = &mfgcfg_resets
+};
+
+static const struct of_device_id of_match_mt6735_mfgcfg[] = {
+ { .compatible = "mediatek,mt6735-mfgcfg", .data = &mfgcfg_clks },
+ { /* sentinel */ }
+};
+
+static struct platform_driver clk_mt6735_mfgcfg = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt6735-mfgcfg",
+ .of_match_table = of_match_mt6735_mfgcfg,
+ },
+};
+module_platform_driver(clk_mt6735_mfgcfg);
+
+MODULE_AUTHOR("Yassine Oudjana <y.oudjana@protonmail.com>");
+MODULE_DESCRIPTION("Mediatek MT6735 mfgcfg clock and reset driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6735-pericfg.c b/drivers/clk/mediatek/clk-mt6735-pericfg.c
new file mode 100644
index 000000000000..cbdf6d25c1b2
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt6735-pericfg.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2022 Yassine Oudjana <y.oudjana@protonmail.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#include <dt-bindings/clock/mediatek,mt6735-pericfg.h>
+#include <dt-bindings/reset/mediatek,mt6735-pericfg.h>
+
+#define PERI_GLOBALCON_RST0 0x00
+#define PERI_GLOBALCON_RST1 0x04
+#define PERI_GLOBALCON_PDN0_SET 0x08
+#define PERI_GLOBALCON_PDN0_CLR 0x10
+#define PERI_GLOBALCON_PDN0_STA 0x18
+
+#define RST_NR_PER_BANK 32
+
+static struct mtk_gate_regs peri_cg_regs = {
+ .set_ofs = PERI_GLOBALCON_PDN0_SET,
+ .clr_ofs = PERI_GLOBALCON_PDN0_CLR,
+ .sta_ofs = PERI_GLOBALCON_PDN0_STA,
+};
+
+static const struct mtk_gate pericfg_gates[] = {
+ GATE_MTK(CLK_PERI_DISP_PWM, "disp_pwm", "disppwm_sel", &peri_cg_regs, 0, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_THERM, "therm", "axi_sel", &peri_cg_regs, 1, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_PWM1, "pwm1", "axi_sel", &peri_cg_regs, 2, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_PWM2, "pwm2", "axi_sel", &peri_cg_regs, 3, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_PWM3, "pwm3", "axi_sel", &peri_cg_regs, 4, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_PWM4, "pwm4", "axi_sel", &peri_cg_regs, 5, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_PWM5, "pwm5", "axi_sel", &peri_cg_regs, 6, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_PWM6, "pwm6", "axi_sel", &peri_cg_regs, 7, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_PWM7, "pwm7", "axi_sel", &peri_cg_regs, 8, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_PWM, "pwm", "axi_sel", &peri_cg_regs, 9, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_USB0, "usb0", "usb20_sel", &peri_cg_regs, 10, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_IRDA, "irda", "irda_sel", &peri_cg_regs, 11, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_APDMA, "apdma", "axi_sel", &peri_cg_regs, 12, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_MSDC30_0, "msdc30_0", "msdc30_0_sel", &peri_cg_regs, 13, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_MSDC30_1, "msdc30_1", "msdc30_1_sel", &peri_cg_regs, 14, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_MSDC30_2, "msdc30_2", "msdc30_2_sel", &peri_cg_regs, 15, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_MSDC30_3, "msdc30_3", "msdc30_3_sel", &peri_cg_regs, 16, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_UART0, "uart0", "uart_sel", &peri_cg_regs, 17, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_UART1, "uart1", "uart_sel", &peri_cg_regs, 18, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_UART2, "uart2", "uart_sel", &peri_cg_regs, 19, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_UART3, "uart3", "uart_sel", &peri_cg_regs, 20, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_UART4, "uart4", "uart_sel", &peri_cg_regs, 21, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_BTIF, "btif", "axi_sel", &peri_cg_regs, 22, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_I2C0, "i2c0", "axi_sel", &peri_cg_regs, 23, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_I2C1, "i2c1", "axi_sel", &peri_cg_regs, 24, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_I2C2, "i2c2", "axi_sel", &peri_cg_regs, 25, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_I2C3, "i2c3", "axi_sel", &peri_cg_regs, 26, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_AUXADC, "auxadc", "axi_sel", &peri_cg_regs, 27, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_SPI0, "spi0", "spi_sel", &peri_cg_regs, 28, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_IRTX, "irtx", "irtx_sel", &peri_cg_regs, 29, &mtk_clk_gate_ops_setclr)
+};
+
+static u16 pericfg_rst_bank_ofs[] = { PERI_GLOBALCON_RST0, PERI_GLOBALCON_RST1 };
+
+static u16 pericfg_rst_idx_map[] = {
+ [MT6735_PERI_RST0_UART0] = 0 * RST_NR_PER_BANK + 0,
+ [MT6735_PERI_RST0_UART1] = 0 * RST_NR_PER_BANK + 1,
+ [MT6735_PERI_RST0_UART2] = 0 * RST_NR_PER_BANK + 2,
+ [MT6735_PERI_RST0_UART3] = 0 * RST_NR_PER_BANK + 3,
+ [MT6735_PERI_RST0_UART4] = 0 * RST_NR_PER_BANK + 4,
+ [MT6735_PERI_RST0_BTIF] = 0 * RST_NR_PER_BANK + 6,
+ [MT6735_PERI_RST0_DISP_PWM_PERI] = 0 * RST_NR_PER_BANK + 7,
+ [MT6735_PERI_RST0_PWM] = 0 * RST_NR_PER_BANK + 8,
+ [MT6735_PERI_RST0_AUXADC] = 0 * RST_NR_PER_BANK + 10,
+ [MT6735_PERI_RST0_DMA] = 0 * RST_NR_PER_BANK + 11,
+ [MT6735_PERI_RST0_IRDA] = 0 * RST_NR_PER_BANK + 12,
+ [MT6735_PERI_RST0_IRTX] = 0 * RST_NR_PER_BANK + 13,
+ [MT6735_PERI_RST0_THERM] = 0 * RST_NR_PER_BANK + 16,
+ [MT6735_PERI_RST0_MSDC2] = 0 * RST_NR_PER_BANK + 17,
+ [MT6735_PERI_RST0_MSDC3] = 0 * RST_NR_PER_BANK + 18,
+ [MT6735_PERI_RST0_MSDC0] = 0 * RST_NR_PER_BANK + 19,
+ [MT6735_PERI_RST0_MSDC1] = 0 * RST_NR_PER_BANK + 20,
+ [MT6735_PERI_RST0_I2C0] = 0 * RST_NR_PER_BANK + 22,
+ [MT6735_PERI_RST0_I2C1] = 0 * RST_NR_PER_BANK + 23,
+ [MT6735_PERI_RST0_I2C2] = 0 * RST_NR_PER_BANK + 24,
+ [MT6735_PERI_RST0_I2C3] = 0 * RST_NR_PER_BANK + 25,
+ [MT6735_PERI_RST0_USB] = 0 * RST_NR_PER_BANK + 28,
+
+ [MT6735_PERI_RST1_SPI0] = 1 * RST_NR_PER_BANK + 1,
+};
+
+static const struct mtk_clk_rst_desc pericfg_resets = {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = pericfg_rst_bank_ofs,
+ .rst_bank_nr = ARRAY_SIZE(pericfg_rst_bank_ofs),
+ .rst_idx_map = pericfg_rst_idx_map,
+ .rst_idx_map_nr = ARRAY_SIZE(pericfg_rst_idx_map)
+};
+
+static const struct mtk_clk_desc pericfg_clks = {
+ .clks = pericfg_gates,
+ .num_clks = ARRAY_SIZE(pericfg_gates),
+
+ .rst_desc = &pericfg_resets
+};
+
+static const struct of_device_id of_match_mt6735_pericfg[] = {
+ { .compatible = "mediatek,mt6735-pericfg", .data = &pericfg_clks },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_mt6735_pericfg);
+
+static struct platform_driver clk_mt6735_pericfg = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt6735-pericfg",
+ .of_match_table = of_match_mt6735_pericfg,
+ },
+};
+module_platform_driver(clk_mt6735_pericfg);
+
+MODULE_AUTHOR("Yassine Oudjana <y.oudjana@protonmail.com>");
+MODULE_DESCRIPTION("MediaTek MT6735 pericfg clock driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6735-topckgen.c b/drivers/clk/mediatek/clk-mt6735-topckgen.c
new file mode 100644
index 000000000000..2589ebfe2271
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt6735-topckgen.c
@@ -0,0 +1,394 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2022 Yassine Oudjana <y.oudjana@protonmail.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-mux.h"
+
+#include <dt-bindings/clock/mediatek,mt6735-topckgen.h>
+
+#define CLK_CFG_0 0x40
+#define CLK_CFG_0_SET 0x44
+#define CLK_CFG_0_CLR 0x48
+#define CLK_CFG_1 0x50
+#define CLK_CFG_1_SET 0x54
+#define CLK_CFG_1_CLR 0x58
+#define CLK_CFG_2 0x60
+#define CLK_CFG_2_SET 0x64
+#define CLK_CFG_2_CLR 0x68
+#define CLK_CFG_3 0x70
+#define CLK_CFG_3_SET 0x74
+#define CLK_CFG_3_CLR 0x78
+#define CLK_CFG_4 0x80
+#define CLK_CFG_4_SET 0x84
+#define CLK_CFG_4_CLR 0x88
+#define CLK_CFG_5 0x90
+#define CLK_CFG_5_SET 0x94
+#define CLK_CFG_5_CLR 0x98
+#define CLK_CFG_6 0xa0
+#define CLK_CFG_6_SET 0xa4
+#define CLK_CFG_6_CLR 0xa8
+#define CLK_CFG_7 0xb0
+#define CLK_CFG_7_SET 0xb4
+#define CLK_CFG_7_CLR 0xb8
+
+static DEFINE_SPINLOCK(mt6735_topckgen_lock);
+
+/* Some clocks with unknown details are modeled as fixed clocks */
+static const struct mtk_fixed_clk topckgen_fixed_clks[] = {
+ /*
+ * This clock is available as a parent option for multiple
+ * muxes and seems like an alternative name for clk26m at first,
+ * but it appears alongside it in several muxes which should
+ * mean it is a separate clock.
+ */
+ FIXED_CLK(CLK_TOP_AD_SYS_26M_CK, "ad_sys_26m_ck", "clk26m", 26 * MHZ),
+ /*
+ * This clock is the parent of DMPLL divisors. It might be MEMPLL
+ * or its parent, as DMPLL appears to be an alternative name for
+ * MEMPLL.
+ */
+ FIXED_CLK(CLK_TOP_CLKPH_MCK_O, "clkph_mck_o", NULL, 0),
+ /*
+ * DMPLL clock (dmpll_ck), controlled by DDRPHY.
+ */
+ FIXED_CLK(CLK_TOP_DMPLL, "dmpll", "clkph_mck_o", 0),
+ /*
+ * MIPI DPI clock. Parent option for dpi0_sel. Unknown parent.
+ */
+ FIXED_CLK(CLK_TOP_DPI_CK, "dpi_ck", NULL, 0),
+ /*
+ * This clock is a child of WHPLL which is controlled by
+ * the modem.
+ */
+ FIXED_CLK(CLK_TOP_WHPLL_AUDIO_CK, "whpll_audio_ck", NULL, 0)
+};
+
+static const struct mtk_fixed_factor topckgen_factors[] = {
+ FACTOR(CLK_TOP_SYSPLL_D2, "syspll_d2", "mainpll", 1, 2),
+ FACTOR(CLK_TOP_SYSPLL_D3, "syspll_d3", "mainpll", 1, 3),
+ FACTOR(CLK_TOP_SYSPLL_D5, "syspll_d5", "mainpll", 1, 5),
+ FACTOR(CLK_TOP_SYSPLL1_D2, "syspll1_d2", "mainpll", 1, 2),
+ FACTOR(CLK_TOP_SYSPLL1_D4, "syspll1_d4", "mainpll", 1, 4),
+ FACTOR(CLK_TOP_SYSPLL1_D8, "syspll1_d8", "mainpll", 1, 8),
+ FACTOR(CLK_TOP_SYSPLL1_D16, "syspll1_d16", "mainpll", 1, 16),
+ FACTOR(CLK_TOP_SYSPLL2_D2, "syspll2_d2", "mainpll", 1, 2),
+ FACTOR(CLK_TOP_SYSPLL2_D4, "syspll2_d4", "mainpll", 1, 4),
+ FACTOR(CLK_TOP_SYSPLL3_D2, "syspll3_d2", "mainpll", 1, 2),
+ FACTOR(CLK_TOP_SYSPLL3_D4, "syspll3_d4", "mainpll", 1, 4),
+ FACTOR(CLK_TOP_SYSPLL4_D2, "syspll4_d2", "mainpll", 1, 2),
+ FACTOR(CLK_TOP_SYSPLL4_D4, "syspll4_d4", "mainpll", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL_D2, "univpll_d2", "univpll", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL_D3, "univpll_d3", "univpll", 1, 3),
+ FACTOR(CLK_TOP_UNIVPLL_D5, "univpll_d5", "univpll", 1, 5),
+ FACTOR(CLK_TOP_UNIVPLL_D26, "univpll_d26", "univpll", 1, 26),
+ FACTOR(CLK_TOP_UNIVPLL1_D2, "univpll1_d2", "univpll", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL1_D4, "univpll1_d4", "univpll", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL1_D8, "univpll1_d8", "univpll", 1, 8),
+ FACTOR(CLK_TOP_UNIVPLL2_D2, "univpll2_d2", "univpll", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL2_D4, "univpll2_d4", "univpll", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL2_D8, "univpll2_d8", "univpll", 1, 8),
+ FACTOR(CLK_TOP_UNIVPLL3_D2, "univpll3_d2", "univpll", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL3_D4, "univpll3_d4", "univpll", 1, 4),
+ FACTOR(CLK_TOP_MSDCPLL_D2, "msdcpll_d2", "msdcpll", 1, 2),
+ FACTOR(CLK_TOP_MSDCPLL_D4, "msdcpll_d4", "msdcpll", 1, 4),
+ FACTOR(CLK_TOP_MSDCPLL_D8, "msdcpll_d8", "msdcpll", 1, 8),
+ FACTOR(CLK_TOP_MSDCPLL_D16, "msdcpll_d16", "msdcpll", 1, 16),
+ FACTOR(CLK_TOP_VENCPLL_D3, "vencpll_d3", "vencpll", 1, 3),
+ FACTOR(CLK_TOP_TVDPLL_D2, "tvdpll_d2", "tvdpll", 1, 2),
+ FACTOR(CLK_TOP_TVDPLL_D4, "tvdpll_d4", "tvdpll", 1, 4),
+ FACTOR(CLK_TOP_DMPLL_D2, "dmpll_d2", "clkph_mck_o", 1, 2),
+ FACTOR(CLK_TOP_DMPLL_D4, "dmpll_d4", "clkph_mck_o", 1, 4),
+ FACTOR(CLK_TOP_DMPLL_D8, "dmpll_d8", "clkph_mck_o", 1, 8),
+ FACTOR(CLK_TOP_AD_SYS_26M_D2, "ad_sys_26m_d2", "clk26m", 1, 2)
+};
+
+static const char * const axi_sel_parents[] = {
+ "clk26m",
+ "syspll1_d2",
+ "syspll_d5",
+ "syspll1_d4",
+ "univpll_d5",
+ "univpll2_d2",
+ "dmpll",
+ "dmpll_d2"
+};
+
+static const char * const mem_sel_parents[] = {
+ "clk26m",
+ "dmpll"
+};
+
+static const char * const ddrphycfg_parents[] = {
+ "clk26m",
+ "syspll1_d8"
+};
+
+static const char * const mm_sel_parents[] = {
+ "clk26m",
+ "vencpll",
+ "syspll1_d2",
+ "syspll_d5",
+ "syspll1_d4",
+ "univpll_d5",
+ "univpll2_d2",
+ "dmpll"
+};
+
+static const char * const pwm_sel_parents[] = {
+ "clk26m",
+ "univpll2_d4",
+ "univpll3_d2",
+ "univpll1_d4"
+};
+
+static const char * const vdec_sel_parents[] = {
+ "clk26m",
+ "syspll1_d2",
+ "syspll_d5",
+ "syspll1_d4",
+ "univpll_d5",
+ "syspll_d2",
+ "syspll2_d2",
+ "msdcpll_d2"
+};
+
+static const char * const mfg_sel_parents[] = {
+ "clk26m",
+ "mmpll",
+ "clk26m",
+ "clk26m",
+ "clk26m",
+ "clk26m",
+ "clk26m",
+ "clk26m",
+ "clk26m",
+ "syspll_d3",
+ "syspll1_d2",
+ "syspll_d5",
+ "univpll_d3",
+ "univpll1_d2"
+};
+
+static const char * const camtg_sel_parents[] = {
+ "clk26m",
+ "univpll_d26",
+ "univpll2_d2",
+ "syspll3_d2",
+ "syspll3_d4",
+ "msdcpll_d4"
+};
+
+static const char * const uart_sel_parents[] = {
+ "clk26m",
+ "univpll2_d8"
+};
+
+static const char * const spi_sel_parents[] = {
+ "clk26m",
+ "syspll3_d2",
+ "msdcpll_d8",
+ "syspll2_d4",
+ "syspll4_d2",
+ "univpll2_d4",
+ "univpll1_d8"
+};
+
+static const char * const usb20_sel_parents[] = {
+ "clk26m",
+ "univpll1_d8",
+ "univpll3_d4"
+};
+
+static const char * const msdc50_0_sel_parents[] = {
+ "clk26m",
+ "syspll1_d2",
+ "syspll2_d2",
+ "syspll4_d2",
+ "univpll_d5",
+ "univpll1_d4"
+};
+
+static const char * const msdc30_0_sel_parents[] = {
+ "clk26m",
+ "msdcpll",
+ "msdcpll_d2",
+ "msdcpll_d4",
+ "syspll2_d2",
+ "syspll1_d4",
+ "univpll1_d4",
+ "univpll_d3",
+ "univpll_d26",
+ "syspll2_d4",
+ "univpll_d2"
+};
+
+static const char * const msdc30_1_2_sel_parents[] = {
+ "clk26m",
+ "univpll2_d2",
+ "msdcpll_d4",
+ "syspll2_d2",
+ "syspll1_d4",
+ "univpll1_d4",
+ "univpll_d26",
+ "syspll2_d4"
+};
+
+static const char * const msdc30_3_sel_parents[] = {
+ "clk26m",
+ "univpll2_d2",
+ "msdcpll_d4",
+ "syspll2_d2",
+ "syspll1_d4",
+ "univpll1_d4",
+ "univpll_d26",
+ "msdcpll_d16",
+ "syspll2_d4"
+};
+
+static const char * const audio_sel_parents[] = {
+ "clk26m",
+ "syspll3_d4",
+ "syspll4_d4",
+ "syspll1_d16"
+};
+
+static const char * const aud_intbus_sel_parents[] = {
+ "clk26m",
+ "syspll1_d4",
+ "syspll4_d2",
+ "dmpll_d4"
+};
+
+static const char * const pmicspi_sel_parents[] = {
+ "clk26m",
+ "syspll1_d8",
+ "syspll3_d4",
+ "syspll1_d16",
+ "univpll3_d4",
+ "univpll_d26",
+ "dmpll_d4",
+ "dmpll_d8"
+};
+
+static const char * const scp_sel_parents[] = {
+ "clk26m",
+ "syspll1_d8",
+ "dmpll_d2",
+ "dmpll_d4"
+};
+
+static const char * const atb_sel_parents[] = {
+ "clk26m",
+ "syspll1_d2",
+ "syspll_d5",
+ "dmpll"
+};
+
+static const char * const dpi0_sel_parents[] = {
+ "clk26m",
+ "tvdpll",
+ "tvdpll_d2",
+ "tvdpll_d4",
+ "dpi_ck"
+};
+
+static const char * const scam_sel_parents[] = {
+ "clk26m",
+ "syspll3_d2",
+ "univpll2_d4",
+ "vencpll_d3"
+};
+
+static const char * const mfg13m_sel_parents[] = {
+ "clk26m",
+ "ad_sys_26m_d2"
+};
+
+static const char * const aud_1_2_sel_parents[] = {
+ "clk26m",
+ "apll1"
+};
+
+static const char * const irda_sel_parents[] = {
+ "clk26m",
+ "univpll2_d4"
+};
+
+static const char * const irtx_sel_parents[] = {
+ "clk26m",
+ "ad_sys_26m_ck"
+};
+
+static const char * const disppwm_sel_parents[] = {
+ "clk26m",
+ "univpll2_d4",
+ "syspll4_d2_d8",
+ "ad_sys_26m_ck"
+};
+
+static const struct mtk_mux topckgen_muxes[] = {
+ MUX_CLR_SET_UPD(CLK_TOP_AXI_SEL, "axi_sel", axi_sel_parents, CLK_CFG_0, CLK_CFG_0_SET, CLK_CFG_0_CLR, 0, 3, 0, 0),
+ MUX_CLR_SET_UPD(CLK_TOP_MEM_SEL, "mem_sel", mem_sel_parents, CLK_CFG_0, CLK_CFG_0_SET, CLK_CFG_0_CLR, 8, 1, 0, 0),
+ MUX_CLR_SET_UPD(CLK_TOP_DDRPHY_SEL, "ddrphycfg_sel", ddrphycfg_parents, CLK_CFG_0, CLK_CFG_0_SET, CLK_CFG_0_CLR, 16, 1, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MM_SEL, "mm_sel", mm_sel_parents, CLK_CFG_0, CLK_CFG_0_SET, CLK_CFG_0_CLR, 24, 3, 31, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_PWM_SEL, "pwm_sel", pwm_sel_parents, CLK_CFG_1, CLK_CFG_1_SET, CLK_CFG_1_CLR, 0, 2, 7, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_VDEC_SEL, "vdec_sel", vdec_sel_parents, CLK_CFG_1, CLK_CFG_1_SET, CLK_CFG_1_CLR, 8, 3, 15, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MFG_SEL, "mfg_sel", mfg_sel_parents, CLK_CFG_1, CLK_CFG_1_SET, CLK_CFG_1_CLR, 16, 4, 23, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG_SEL, "camtg_sel", camtg_sel_parents, CLK_CFG_1, CLK_CFG_1_SET, CLK_CFG_1_CLR, 24, 3, 31, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_UART_SEL, "uart_sel", uart_sel_parents, CLK_CFG_2, CLK_CFG_2_SET, CLK_CFG_2_CLR, 0, 1, 7, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SPI_SEL, "spi_sel", spi_sel_parents, CLK_CFG_2, CLK_CFG_2_SET, CLK_CFG_2_CLR, 8, 3, 15, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_USB20_SEL, "usb20_sel", usb20_sel_parents, CLK_CFG_2, CLK_CFG_2_SET, CLK_CFG_2_CLR, 16, 2, 23, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC50_0_SEL, "msdc50_0_sel", msdc50_0_sel_parents, CLK_CFG_2, CLK_CFG_2_SET, CLK_CFG_2_CLR, 24, 3, 31, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC30_0_SEL, "msdc30_0_sel", msdc30_0_sel_parents, CLK_CFG_3, CLK_CFG_3_SET, CLK_CFG_3_CLR, 0, 4, 7, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC30_1_SEL, "msdc30_1_sel", msdc30_1_2_sel_parents, CLK_CFG_3, CLK_CFG_3_SET, CLK_CFG_3_CLR, 8, 3, 15, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC30_2_SEL, "msdc30_2_sel", msdc30_1_2_sel_parents, CLK_CFG_3, CLK_CFG_3_SET, CLK_CFG_3_CLR, 16, 3, 23, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC30_3_SEL, "msdc30_3_sel", msdc30_3_sel_parents, CLK_CFG_3, CLK_CFG_3_SET, CLK_CFG_3_CLR, 24, 4, 31, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUDIO_SEL, "audio_sel", audio_sel_parents, CLK_CFG_4, CLK_CFG_4_SET, CLK_CFG_4_CLR, 0, 2, 7, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUDINTBUS_SEL, "aud_intbus_sel", aud_intbus_sel_parents, CLK_CFG_4, CLK_CFG_4_SET, CLK_CFG_4_CLR, 8, 2, 15, 0, 0),
+ MUX_CLR_SET_UPD(CLK_TOP_PMICSPI_SEL, "pmicspi_sel", pmicspi_sel_parents, CLK_CFG_4, CLK_CFG_4_SET, CLK_CFG_4_CLR, 16, 3, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SCP_SEL, "scp_sel", scp_sel_parents, CLK_CFG_4, CLK_CFG_4_SET, CLK_CFG_4_CLR, 24, 2, 31, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_ATB_SEL, "atb_sel", atb_sel_parents, CLK_CFG_5, CLK_CFG_5_SET, CLK_CFG_5_CLR, 0, 2, 7, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DPI0_SEL, "dpi0_sel", dpi0_sel_parents, CLK_CFG_5, CLK_CFG_5_SET, CLK_CFG_5_CLR, 8, 3, 15, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SCAM_SEL, "scam_sel", scam_sel_parents, CLK_CFG_5, CLK_CFG_5_SET, CLK_CFG_5_CLR, 16, 2, 23, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MFG13M_SEL, "mfg13m_sel", mfg13m_sel_parents, CLK_CFG_5, CLK_CFG_5_SET, CLK_CFG_5_CLR, 24, 1, 31, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUD1_SEL, "aud_1_sel", aud_1_2_sel_parents, CLK_CFG_6, CLK_CFG_6_SET, CLK_CFG_6_CLR, 0, 1, 7, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUD2_SEL, "aud_2_sel", aud_1_2_sel_parents, CLK_CFG_6, CLK_CFG_6_SET, CLK_CFG_6_CLR, 8, 1, 15, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_IRDA_SEL, "irda_sel", irda_sel_parents, CLK_CFG_6, CLK_CFG_6_SET, CLK_CFG_6_CLR, 16, 1, 23, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_IRTX_SEL, "irtx_sel", irtx_sel_parents, CLK_CFG_6, CLK_CFG_6_SET, CLK_CFG_6_CLR, 24, 1, 31, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DISPPWM_SEL, "disppwm_sel", disppwm_sel_parents, CLK_CFG_7, CLK_CFG_7_SET, CLK_CFG_7_CLR, 0, 2, 7, 0, 0),
+};
+
+static const struct mtk_clk_desc topckgen_desc = {
+ .fixed_clks = topckgen_fixed_clks,
+ .num_fixed_clks = ARRAY_SIZE(topckgen_fixed_clks),
+ .factor_clks = topckgen_factors,
+ .num_factor_clks = ARRAY_SIZE(topckgen_factors),
+ .mux_clks = topckgen_muxes,
+ .num_mux_clks = ARRAY_SIZE(topckgen_muxes),
+ .clk_lock = &mt6735_topckgen_lock,
+};
+
+static const struct of_device_id of_match_mt6735_topckgen[] = {
+ { .compatible = "mediatek,mt6735-topckgen", .data = &topckgen_desc},
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_mt6735_topckgen);
+
+static struct platform_driver clk_mt6735_topckgen = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt6735-topckgen",
+ .of_match_table = of_match_mt6735_topckgen,
+ },
+};
+module_platform_driver(clk_mt6735_topckgen);
+
+MODULE_AUTHOR("Yassine Oudjana <y.oudjana@protonmail.com>");
+MODULE_DESCRIPTION("MediaTek MT6735 topckgen clock driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6735-vdecsys.c b/drivers/clk/mediatek/clk-mt6735-vdecsys.c
new file mode 100644
index 000000000000..8817085fc1db
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt6735-vdecsys.c
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2022 Yassine Oudjana <y.oudjana@protonmail.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#include <dt-bindings/clock/mediatek,mt6735-vdecsys.h>
+#include <dt-bindings/reset/mediatek,mt6735-vdecsys.h>
+
+#define VDEC_CKEN_SET 0x00
+#define VDEC_CKEN_CLR 0x04
+#define SMI_LARB1_CKEN_SET 0x08
+#define SMI_LARB1_CKEN_CLR 0x0c
+#define VDEC_RESETB_CON 0x10
+#define SMI_LARB1_RESETB_CON 0x14
+
+#define RST_NR_PER_BANK 32
+
+static struct mtk_gate_regs vdec_cg_regs = {
+ .set_ofs = VDEC_CKEN_SET,
+ .clr_ofs = VDEC_CKEN_CLR,
+ .sta_ofs = VDEC_CKEN_SET,
+};
+
+static struct mtk_gate_regs smi_larb1_cg_regs = {
+ .set_ofs = SMI_LARB1_CKEN_SET,
+ .clr_ofs = SMI_LARB1_CKEN_CLR,
+ .sta_ofs = SMI_LARB1_CKEN_SET,
+};
+
+static const struct mtk_gate vdecsys_gates[] = {
+ GATE_MTK(CLK_VDEC_VDEC, "vdec", "vdec_sel", &vdec_cg_regs, 0, &mtk_clk_gate_ops_setclr_inv),
+ GATE_MTK(CLK_VDEC_SMI_LARB1, "smi_larb1", "vdec_sel", &smi_larb1_cg_regs, 0, &mtk_clk_gate_ops_setclr_inv),
+};
+
+static u16 vdecsys_rst_bank_ofs[] = { VDEC_RESETB_CON, SMI_LARB1_RESETB_CON };
+
+static u16 vdecsys_rst_idx_map[] = {
+ [MT6735_VDEC_RST0_VDEC] = 0 * RST_NR_PER_BANK + 0,
+ [MT6735_VDEC_RST1_SMI_LARB1] = 1 * RST_NR_PER_BANK + 0,
+};
+
+static const struct mtk_clk_rst_desc vdecsys_resets = {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = vdecsys_rst_bank_ofs,
+ .rst_bank_nr = ARRAY_SIZE(vdecsys_rst_bank_ofs),
+ .rst_idx_map = vdecsys_rst_idx_map,
+ .rst_idx_map_nr = ARRAY_SIZE(vdecsys_rst_idx_map)
+};
+
+static const struct mtk_clk_desc vdecsys_clks = {
+ .clks = vdecsys_gates,
+ .num_clks = ARRAY_SIZE(vdecsys_gates),
+ .rst_desc = &vdecsys_resets
+};
+
+static const struct of_device_id of_match_mt6735_vdecsys[] = {
+ { .compatible = "mediatek,mt6735-vdecsys", .data = &vdecsys_clks },
+ { /* sentinel */ }
+};
+
+static struct platform_driver clk_mt6735_vdecsys = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt6735-vdecsys",
+ .of_match_table = of_match_mt6735_vdecsys,
+ },
+};
+module_platform_driver(clk_mt6735_vdecsys);
+
+MODULE_AUTHOR("Yassine Oudjana <y.oudjana@protonmail.com>");
+MODULE_DESCRIPTION("MediaTek MT6735 vdecsys clock and reset driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6735-vencsys.c b/drivers/clk/mediatek/clk-mt6735-vencsys.c
new file mode 100644
index 000000000000..8dec7f98492a
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt6735-vencsys.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2022 Yassine Oudjana <y.oudjana@protonmail.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#include <dt-bindings/clock/mediatek,mt6735-vencsys.h>
+
+#define VENC_CG_CON 0x00
+#define VENC_CG_SET 0x04
+#define VENC_CG_CLR 0x08
+
+static struct mtk_gate_regs venc_cg_regs = {
+ .set_ofs = VENC_CG_SET,
+ .clr_ofs = VENC_CG_CLR,
+ .sta_ofs = VENC_CG_CON,
+};
+
+static const struct mtk_gate vencsys_gates[] = {
+ GATE_MTK(CLK_VENC_SMI_LARB3, "smi_larb3", "mm_sel", &venc_cg_regs, 0, &mtk_clk_gate_ops_setclr_inv),
+ GATE_MTK(CLK_VENC_VENC, "venc", "mm_sel", &venc_cg_regs, 4, &mtk_clk_gate_ops_setclr_inv),
+ GATE_MTK(CLK_VENC_JPGENC, "jpgenc", "mm_sel", &venc_cg_regs, 8, &mtk_clk_gate_ops_setclr_inv),
+ GATE_MTK(CLK_VENC_JPGDEC, "jpgdec", "mm_sel", &venc_cg_regs, 12, &mtk_clk_gate_ops_setclr_inv),
+};
+
+static const struct mtk_clk_desc vencsys_clks = {
+ .clks = vencsys_gates,
+ .num_clks = ARRAY_SIZE(vencsys_gates),
+};
+
+static const struct of_device_id of_match_mt6735_vencsys[] = {
+ { .compatible = "mediatek,mt6735-vencsys", .data = &vencsys_clks },
+ { /* sentinel */ }
+};
+
+static struct platform_driver clk_mt6735_vencsys = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt6735-vencsys",
+ .of_match_table = of_match_mt6735_vencsys,
+ },
+};
+module_platform_driver(clk_mt6735_vencsys);
+
+MODULE_AUTHOR("Yassine Oudjana <y.oudjana@protonmail.com>");
+MODULE_DESCRIPTION("Mediatek MT6735 vencsys clock driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt7622-aud.c b/drivers/clk/mediatek/clk-mt7622-aud.c
index 931a0598e598..a4ea5e20efa2 100644
--- a/drivers/clk/mediatek/clk-mt7622-aud.c
+++ b/drivers/clk/mediatek/clk-mt7622-aud.c
@@ -75,6 +75,7 @@ static const struct mtk_gate audio_clks[] = {
GATE_AUDIO1(CLK_AUDIO_A1SYS, "audio_a1sys", "a1sys_hp_sel", 21),
GATE_AUDIO1(CLK_AUDIO_A2SYS, "audio_a2sys", "a2sys_hp_sel", 22),
GATE_AUDIO1(CLK_AUDIO_AFE_CONN, "audio_afe_conn", "a1sys_hp_sel", 23),
+ GATE_AUDIO1(CLK_AUDIO_AFE_MRGIF, "audio_afe_mrgif", "aud_mux1_sel", 25),
/* AUDIO2 */
GATE_AUDIO2(CLK_AUDIO_UL1, "audio_ul1", "a1sys_hp_sel", 0),
GATE_AUDIO2(CLK_AUDIO_UL2, "audio_ul2", "a1sys_hp_sel", 1),
diff --git a/drivers/clk/mediatek/clk-mt8188-cam.c b/drivers/clk/mediatek/clk-mt8188-cam.c
index 7500bd25387f..9b029fdd584e 100644
--- a/drivers/clk/mediatek/clk-mt8188-cam.c
+++ b/drivers/clk/mediatek/clk-mt8188-cam.c
@@ -20,6 +20,8 @@ static const struct mtk_gate_regs cam_cg_regs = {
#define GATE_CAM(_id, _name, _parent, _shift) \
GATE_MTK(_id, _name, _parent, &cam_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+#define CAM_SYS_SMI_LARB_RST_OFF (0xA0)
+
static const struct mtk_gate cam_main_clks[] = {
GATE_CAM(CLK_CAM_MAIN_LARB13, "cam_main_larb13", "top_cam", 0),
GATE_CAM(CLK_CAM_MAIN_LARB14, "cam_main_larb14", "top_cam", 1),
@@ -72,6 +74,17 @@ static const struct mtk_gate cam_yuvb_clks[] = {
GATE_CAM(CLK_CAM_YUVB_CAMTG, "cam_yuvb_camtg", "top_cam", 2),
};
+/* Reset for SMI larb 16a/16b/17a/17b */
+static u16 cam_sys_rst_ofs[] = {
+ CAM_SYS_SMI_LARB_RST_OFF,
+};
+
+static const struct mtk_clk_rst_desc cam_sys_rst_desc = {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = cam_sys_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(cam_sys_rst_ofs),
+};
+
static const struct mtk_clk_desc cam_main_desc = {
.clks = cam_main_clks,
.num_clks = ARRAY_SIZE(cam_main_clks),
@@ -80,21 +93,25 @@ static const struct mtk_clk_desc cam_main_desc = {
static const struct mtk_clk_desc cam_rawa_desc = {
.clks = cam_rawa_clks,
.num_clks = ARRAY_SIZE(cam_rawa_clks),
+ .rst_desc = &cam_sys_rst_desc,
};
static const struct mtk_clk_desc cam_rawb_desc = {
.clks = cam_rawb_clks,
.num_clks = ARRAY_SIZE(cam_rawb_clks),
+ .rst_desc = &cam_sys_rst_desc,
};
static const struct mtk_clk_desc cam_yuva_desc = {
.clks = cam_yuva_clks,
.num_clks = ARRAY_SIZE(cam_yuva_clks),
+ .rst_desc = &cam_sys_rst_desc,
};
static const struct mtk_clk_desc cam_yuvb_desc = {
.clks = cam_yuvb_clks,
.num_clks = ARRAY_SIZE(cam_yuvb_clks),
+ .rst_desc = &cam_sys_rst_desc,
};
static const struct of_device_id of_match_clk_mt8188_cam[] = {
diff --git a/drivers/clk/mediatek/clk-mt8188-img.c b/drivers/clk/mediatek/clk-mt8188-img.c
index cb2fbd4136b9..d44bfbd8308a 100644
--- a/drivers/clk/mediatek/clk-mt8188-img.c
+++ b/drivers/clk/mediatek/clk-mt8188-img.c
@@ -20,6 +20,8 @@ static const struct mtk_gate_regs imgsys_cg_regs = {
#define GATE_IMGSYS(_id, _name, _parent, _shift) \
GATE_MTK(_id, _name, _parent, &imgsys_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+#define IMG_SYS_SMI_LARB_RST_OFF (0xC)
+
static const struct mtk_gate imgsys_main_clks[] = {
GATE_IMGSYS(CLK_IMGSYS_MAIN_LARB9, "imgsys_main_larb9", "top_img", 0),
GATE_IMGSYS(CLK_IMGSYS_MAIN_TRAW0, "imgsys_main_traw0", "top_img", 1),
@@ -58,6 +60,17 @@ static const struct mtk_gate imgsys1_dip_nr_clks[] = {
GATE_IMGSYS(CLK_IMGSYS1_DIP_NR_DIP_NR, "imgsys1_dip_nr_dip_nr", "top_img", 1),
};
+/* Reset for SMI larb 10/11a/11b/11c/15 */
+static u16 img_sys_rst_ofs[] = {
+ IMG_SYS_SMI_LARB_RST_OFF,
+};
+
+static const struct mtk_clk_rst_desc img_sys_rst_desc = {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = img_sys_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(img_sys_rst_ofs),
+};
+
static const struct mtk_clk_desc imgsys_main_desc = {
.clks = imgsys_main_clks,
.num_clks = ARRAY_SIZE(imgsys_main_clks),
@@ -66,26 +79,31 @@ static const struct mtk_clk_desc imgsys_main_desc = {
static const struct mtk_clk_desc imgsys_wpe1_desc = {
.clks = imgsys_wpe1_clks,
.num_clks = ARRAY_SIZE(imgsys_wpe1_clks),
+ .rst_desc = &img_sys_rst_desc,
};
static const struct mtk_clk_desc imgsys_wpe2_desc = {
.clks = imgsys_wpe2_clks,
.num_clks = ARRAY_SIZE(imgsys_wpe2_clks),
+ .rst_desc = &img_sys_rst_desc,
};
static const struct mtk_clk_desc imgsys_wpe3_desc = {
.clks = imgsys_wpe3_clks,
.num_clks = ARRAY_SIZE(imgsys_wpe3_clks),
+ .rst_desc = &img_sys_rst_desc,
};
static const struct mtk_clk_desc imgsys1_dip_top_desc = {
.clks = imgsys1_dip_top_clks,
.num_clks = ARRAY_SIZE(imgsys1_dip_top_clks),
+ .rst_desc = &img_sys_rst_desc,
};
static const struct mtk_clk_desc imgsys1_dip_nr_desc = {
.clks = imgsys1_dip_nr_clks,
.num_clks = ARRAY_SIZE(imgsys1_dip_nr_clks),
+ .rst_desc = &img_sys_rst_desc,
};
static const struct of_device_id of_match_clk_mt8188_imgsys_main[] = {
diff --git a/drivers/clk/mediatek/clk-mt8188-ipe.c b/drivers/clk/mediatek/clk-mt8188-ipe.c
index 8f1933b71e28..70a011c1f9ce 100644
--- a/drivers/clk/mediatek/clk-mt8188-ipe.c
+++ b/drivers/clk/mediatek/clk-mt8188-ipe.c
@@ -20,6 +20,8 @@ static const struct mtk_gate_regs ipe_cg_regs = {
#define GATE_IPE(_id, _name, _parent, _shift) \
GATE_MTK(_id, _name, _parent, &ipe_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+#define IPE_SYS_SMI_LARB_RST_OFF (0xC)
+
static const struct mtk_gate ipe_clks[] = {
GATE_IPE(CLK_IPE_DPE, "ipe_dpe", "top_ipe", 0),
GATE_IPE(CLK_IPE_FDVT, "ipe_fdvt", "top_ipe", 1),
@@ -28,9 +30,21 @@ static const struct mtk_gate ipe_clks[] = {
GATE_IPE(CLK_IPE_SMI_LARB12, "ipe_smi_larb12", "top_ipe", 4),
};
+/* Reset for SMI larb 12 */
+static u16 ipe_sys_rst_ofs[] = {
+ IPE_SYS_SMI_LARB_RST_OFF,
+};
+
+static const struct mtk_clk_rst_desc ipe_sys_rst_desc = {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = ipe_sys_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(ipe_sys_rst_ofs),
+};
+
static const struct mtk_clk_desc ipe_desc = {
.clks = ipe_clks,
.num_clks = ARRAY_SIZE(ipe_clks),
+ .rst_desc = &ipe_sys_rst_desc,
};
static const struct of_device_id of_match_clk_mt8188_ipe[] = {
diff --git a/drivers/clk/mediatek/clk-mt8188-topckgen.c b/drivers/clk/mediatek/clk-mt8188-topckgen.c
index c4baf4076ed6..6b07abe9a8f5 100644
--- a/drivers/clk/mediatek/clk-mt8188-topckgen.c
+++ b/drivers/clk/mediatek/clk-mt8188-topckgen.c
@@ -342,11 +342,14 @@ static const char * const dsp7_parents[] = {
"univpll_d3"
};
+/*
+ * MFG can be also parented to "univpll_d6" and "univpll_d7":
+ * these have been removed from the parents list to let us
+ * achieve GPU DVFS without any special clock handlers.
+ */
static const char * const mfg_core_tmp_parents[] = {
"clk26m",
- "mainpll_d5_d2",
- "univpll_d6",
- "univpll_d7"
+ "mainpll_d5_d2"
};
static const char * const camtg_parents[] = {
diff --git a/drivers/clk/mediatek/clk-mt8188-vdo1.c b/drivers/clk/mediatek/clk-mt8188-vdo1.c
index 4fa355f8f0c2..f715d45e545e 100644
--- a/drivers/clk/mediatek/clk-mt8188-vdo1.c
+++ b/drivers/clk/mediatek/clk-mt8188-vdo1.c
@@ -43,6 +43,12 @@ static const struct mtk_gate_regs vdo1_4_cg_regs = {
.sta_ofs = 0x140,
};
+static const struct mtk_gate_regs vdo1_5_cg_regs = {
+ .set_ofs = 0x400,
+ .clr_ofs = 0x400,
+ .sta_ofs = 0x400,
+};
+
#define GATE_VDO1_0(_id, _name, _parent, _shift) \
GATE_MTK(_id, _name, _parent, &vdo1_0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
@@ -62,6 +68,9 @@ static const struct mtk_gate_regs vdo1_4_cg_regs = {
#define GATE_VDO1_4(_id, _name, _parent, _shift) \
GATE_MTK(_id, _name, _parent, &vdo1_4_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+#define GATE_VDO1_5(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdo1_5_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
static const struct mtk_gate vdo1_clks[] = {
/* VDO1_0 */
GATE_VDO1_0(CLK_VDO1_SMI_LARB2, "vdo1_smi_larb2", "top_vpp", 0),
@@ -129,6 +138,8 @@ static const struct mtk_gate vdo1_clks[] = {
GATE_VDO1_3(CLK_VDO1_DISP_MONITOR_DPINTF, "vdo1_disp_monitor_dpintf_ck", "top_vpp", 17),
/* VDO1_4 */
GATE_VDO1_4(CLK_VDO1_26M_SLOW, "vdo1_26m_slow_ck", "clk26m", 8),
+ /* VDO1_5 */
+ GATE_VDO1_5(CLK_VDO1_DPI1_HDMI, "vdo1_dpi1_hdmi", "hdmi_txpll", 0),
};
static const struct mtk_clk_desc vdo1_desc = {
diff --git a/drivers/clk/mediatek/clk-mt8195-infra_ao.c b/drivers/clk/mediatek/clk-mt8195-infra_ao.c
index bb648a88e43a..ad47fdb23460 100644
--- a/drivers/clk/mediatek/clk-mt8195-infra_ao.c
+++ b/drivers/clk/mediatek/clk-mt8195-infra_ao.c
@@ -103,7 +103,7 @@ static const struct mtk_gate infra_ao_clks[] = {
GATE_INFRA_AO0(CLK_INFRA_AO_CQ_DMA_FPC, "infra_ao_cq_dma_fpc", "fpc", 28),
GATE_INFRA_AO0(CLK_INFRA_AO_UART5, "infra_ao_uart5", "top_uart", 29),
/* INFRA_AO1 */
- GATE_INFRA_AO1(CLK_INFRA_AO_HDMI_26M, "infra_ao_hdmi_26m", "clk26m", 0),
+ GATE_INFRA_AO1(CLK_INFRA_AO_HDMI_26M, "infra_ao_hdmi_26m", "top_hdmi_xtal", 0),
GATE_INFRA_AO1(CLK_INFRA_AO_SPI0, "infra_ao_spi0", "top_spi", 1),
GATE_INFRA_AO1(CLK_INFRA_AO_MSDC0, "infra_ao_msdc0", "top_msdc50_0_hclk", 2),
GATE_INFRA_AO1(CLK_INFRA_AO_MSDC1, "infra_ao_msdc1", "top_axi", 4),
diff --git a/drivers/clk/mediatek/clk-mt8196-apmixedsys.c b/drivers/clk/mediatek/clk-mt8196-apmixedsys.c
new file mode 100644
index 000000000000..617f5449b88b
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-apmixedsys.c
@@ -0,0 +1,204 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-pll.h"
+
+/* APMIXEDSYS PLL control register offsets */
+#define MAINPLL_CON0 0x250
+#define MAINPLL_CON1 0x254
+#define UNIVPLL_CON0 0x264
+#define UNIVPLL_CON1 0x268
+#define MSDCPLL_CON0 0x278
+#define MSDCPLL_CON1 0x27c
+#define ADSPPLL_CON0 0x28c
+#define ADSPPLL_CON1 0x290
+#define EMIPLL_CON0 0x2a0
+#define EMIPLL_CON1 0x2a4
+#define EMIPLL2_CON0 0x2b4
+#define EMIPLL2_CON1 0x2b8
+#define NET1PLL_CON0 0x2c8
+#define NET1PLL_CON1 0x2cc
+#define SGMIIPLL_CON0 0x2dc
+#define SGMIIPLL_CON1 0x2e0
+
+/* APMIXEDSYS_GP2 PLL control register offsets*/
+#define MAINPLL2_CON0 0x250
+#define MAINPLL2_CON1 0x254
+#define UNIVPLL2_CON0 0x264
+#define UNIVPLL2_CON1 0x268
+#define MMPLL2_CON0 0x278
+#define MMPLL2_CON1 0x27c
+#define IMGPLL_CON0 0x28c
+#define IMGPLL_CON1 0x290
+#define TVDPLL1_CON0 0x2a0
+#define TVDPLL1_CON1 0x2a4
+#define TVDPLL2_CON0 0x2b4
+#define TVDPLL2_CON1 0x2b8
+#define TVDPLL3_CON0 0x2c8
+#define TVDPLL3_CON1 0x2cc
+
+#define PLLEN_ALL 0x080
+#define PLLEN_ALL_SET 0x084
+#define PLLEN_ALL_CLR 0x088
+
+#define FENC_STATUS_CON0 0x03c
+
+#define MT8196_PLL_FMAX (3800UL * MHZ)
+#define MT8196_PLL_FMIN (1500UL * MHZ)
+#define MT8196_INTEGER_BITS 8
+
+#define PLL_FENC(_id, _name, _reg, _fenc_sta_ofs, _fenc_sta_bit,\
+ _flags, _pd_reg, _pd_shift, \
+ _pcw_reg, _pcw_shift, _pcwbits, \
+ _pll_en_bit) { \
+ .id = _id, \
+ .name = _name, \
+ .reg = _reg, \
+ .fenc_sta_ofs = _fenc_sta_ofs, \
+ .fenc_sta_bit = _fenc_sta_bit, \
+ .flags = _flags, \
+ .fmax = MT8196_PLL_FMAX, \
+ .fmin = MT8196_PLL_FMIN, \
+ .pd_reg = _pd_reg, \
+ .pd_shift = _pd_shift, \
+ .pcw_reg = _pcw_reg, \
+ .pcw_shift = _pcw_shift, \
+ .pcwbits = _pcwbits, \
+ .pcwibits = MT8196_INTEGER_BITS, \
+ .en_reg = PLLEN_ALL, \
+ .en_set_reg = PLLEN_ALL_SET, \
+ .en_clr_reg = PLLEN_ALL_CLR, \
+ .pll_en_bit = _pll_en_bit, \
+ .ops = &mtk_pll_fenc_clr_set_ops, \
+}
+
+struct mtk_pll_desc {
+ const struct mtk_pll_data *clks;
+ size_t num_clks;
+};
+
+static const struct mtk_pll_data apmixed_plls[] = {
+ PLL_FENC(CLK_APMIXED_MAINPLL, "mainpll", MAINPLL_CON0, FENC_STATUS_CON0,
+ 7, PLL_AO, MAINPLL_CON1, 24, MAINPLL_CON1, 0, 22, 0),
+ PLL_FENC(CLK_APMIXED_UNIVPLL, "univpll", UNIVPLL_CON0, FENC_STATUS_CON0,
+ 6, 0, UNIVPLL_CON1, 24, UNIVPLL_CON1, 0, 22, 1),
+ PLL_FENC(CLK_APMIXED_MSDCPLL, "msdcpll", MSDCPLL_CON0, FENC_STATUS_CON0,
+ 5, 0, MSDCPLL_CON1, 24, MSDCPLL_CON1, 0, 22, 2),
+ PLL_FENC(CLK_APMIXED_ADSPPLL, "adsppll", ADSPPLL_CON0, FENC_STATUS_CON0,
+ 4, 0, ADSPPLL_CON1, 24, ADSPPLL_CON1, 0, 22, 3),
+ PLL_FENC(CLK_APMIXED_EMIPLL, "emipll", EMIPLL_CON0, FENC_STATUS_CON0, 3,
+ PLL_AO, EMIPLL_CON1, 24, EMIPLL_CON1, 0, 22, 4),
+ PLL_FENC(CLK_APMIXED_EMIPLL2, "emipll2", EMIPLL2_CON0, FENC_STATUS_CON0,
+ 2, PLL_AO, EMIPLL2_CON1, 24, EMIPLL2_CON1, 0, 22, 5),
+ PLL_FENC(CLK_APMIXED_NET1PLL, "net1pll", NET1PLL_CON0, FENC_STATUS_CON0,
+ 1, 0, NET1PLL_CON1, 24, NET1PLL_CON1, 0, 22, 6),
+ PLL_FENC(CLK_APMIXED_SGMIIPLL, "sgmiipll", SGMIIPLL_CON0, FENC_STATUS_CON0,
+ 0, 0, SGMIIPLL_CON1, 24, SGMIIPLL_CON1, 0, 22, 7),
+};
+
+static const struct mtk_pll_desc apmixed_desc = {
+ .clks = apmixed_plls,
+ .num_clks = ARRAY_SIZE(apmixed_plls),
+};
+
+static const struct mtk_pll_data apmixed2_plls[] = {
+ PLL_FENC(CLK_APMIXED2_MAINPLL2, "mainpll2", MAINPLL2_CON0, FENC_STATUS_CON0,
+ 6, 0, MAINPLL2_CON1, 24, MAINPLL2_CON1, 0, 22, 0),
+ PLL_FENC(CLK_APMIXED2_UNIVPLL2, "univpll2", UNIVPLL2_CON0, FENC_STATUS_CON0,
+ 5, 0, UNIVPLL2_CON1, 24, UNIVPLL2_CON1, 0, 22, 1),
+ PLL_FENC(CLK_APMIXED2_MMPLL2, "mmpll2", MMPLL2_CON0, FENC_STATUS_CON0,
+ 4, 0, MMPLL2_CON1, 24, MMPLL2_CON1, 0, 22, 2),
+ PLL_FENC(CLK_APMIXED2_IMGPLL, "imgpll", IMGPLL_CON0, FENC_STATUS_CON0,
+ 3, 0, IMGPLL_CON1, 24, IMGPLL_CON1, 0, 22, 3),
+ PLL_FENC(CLK_APMIXED2_TVDPLL1, "tvdpll1", TVDPLL1_CON0, FENC_STATUS_CON0,
+ 2, 0, TVDPLL1_CON1, 24, TVDPLL1_CON1, 0, 22, 4),
+ PLL_FENC(CLK_APMIXED2_TVDPLL2, "tvdpll2", TVDPLL2_CON0, FENC_STATUS_CON0,
+ 1, 0, TVDPLL2_CON1, 24, TVDPLL2_CON1, 0, 22, 5),
+ PLL_FENC(CLK_APMIXED2_TVDPLL3, "tvdpll3", TVDPLL3_CON0, FENC_STATUS_CON0,
+ 0, 0, TVDPLL3_CON1, 24, TVDPLL3_CON1, 0, 22, 6),
+};
+
+static const struct mtk_pll_desc apmixed2_desc = {
+ .clks = apmixed2_plls,
+ .num_clks = ARRAY_SIZE(apmixed2_plls),
+};
+
+static int clk_mt8196_apmixed_probe(struct platform_device *pdev)
+{
+ struct clk_hw_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ const struct mtk_pll_desc *mcd;
+ int r;
+
+ mcd = device_get_match_data(&pdev->dev);
+ if (!mcd)
+ return -EINVAL;
+
+ clk_data = mtk_alloc_clk_data(mcd->num_clks);
+ if (!clk_data)
+ return -ENOMEM;
+
+ r = mtk_clk_register_plls(node, mcd->clks, mcd->num_clks, clk_data);
+ if (r)
+ goto free_apmixed_data;
+
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ if (r)
+ goto unregister_plls;
+
+ platform_set_drvdata(pdev, clk_data);
+
+ return r;
+
+unregister_plls:
+ mtk_clk_unregister_plls(mcd->clks, mcd->num_clks, clk_data);
+free_apmixed_data:
+ mtk_free_clk_data(clk_data);
+ return r;
+}
+
+static void clk_mt8196_apmixed_remove(struct platform_device *pdev)
+{
+ const struct mtk_pll_desc *mcd = device_get_match_data(&pdev->dev);
+ struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev);
+ struct device_node *node = pdev->dev.of_node;
+
+ of_clk_del_provider(node);
+ mtk_clk_unregister_plls(mcd->clks, mcd->num_clks, clk_data);
+ mtk_free_clk_data(clk_data);
+}
+
+static const struct of_device_id of_match_clk_mt8196_apmixed[] = {
+ { .compatible = "mediatek,mt8196-apmixedsys", .data = &apmixed_desc },
+ { .compatible = "mediatek,mt8196-apmixedsys-gp2",
+ .data = &apmixed2_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_apmixed);
+
+static struct platform_driver clk_mt8196_apmixed_drv = {
+ .probe = clk_mt8196_apmixed_probe,
+ .remove = clk_mt8196_apmixed_remove,
+ .driver = {
+ .name = "clk-mt8196-apmixed",
+ .of_match_table = of_match_clk_mt8196_apmixed,
+ },
+};
+module_platform_driver(clk_mt8196_apmixed_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8196 apmixedsys clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-disp0.c b/drivers/clk/mediatek/clk-mt8196-disp0.c
new file mode 100644
index 000000000000..9474aad26e92
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-disp0.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs mm0_cg_regs = {
+ .set_ofs = 0x104,
+ .clr_ofs = 0x108,
+ .sta_ofs = 0x100,
+};
+
+static const struct mtk_gate_regs mm0_hwv_regs = {
+ .set_ofs = 0x0020,
+ .clr_ofs = 0x0024,
+ .sta_ofs = 0x2c10,
+};
+
+static const struct mtk_gate_regs mm1_cg_regs = {
+ .set_ofs = 0x114,
+ .clr_ofs = 0x118,
+ .sta_ofs = 0x110,
+};
+
+static const struct mtk_gate_regs mm1_hwv_regs = {
+ .set_ofs = 0x0028,
+ .clr_ofs = 0x002c,
+ .sta_ofs = 0x2c14,
+};
+
+#define GATE_MM0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm0_cg_regs, \
+ .shift = _shift, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ .ops = &mtk_clk_gate_ops_setclr,\
+ }
+
+#define GATE_HWV_MM0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm0_cg_regs, \
+ .hwv_regs = &mm0_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ .flags = CLK_OPS_PARENT_ENABLE \
+ }
+
+#define GATE_MM1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm1_cg_regs, \
+ .shift = _shift, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ .ops = &mtk_clk_gate_ops_setclr,\
+ }
+
+#define GATE_HWV_MM1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm1_cg_regs, \
+ .hwv_regs = &mm1_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+static const struct mtk_gate mm_clks[] = {
+ /* MM0 */
+ GATE_HWV_MM0(CLK_MM_CONFIG, "mm_config", "disp", 0),
+ GATE_HWV_MM0(CLK_MM_DISP_MUTEX0, "mm_disp_mutex0", "disp", 1),
+ GATE_HWV_MM0(CLK_MM_DISP_AAL0, "mm_disp_aal0", "disp", 2),
+ GATE_HWV_MM0(CLK_MM_DISP_AAL1, "mm_disp_aal1", "disp", 3),
+ GATE_MM0(CLK_MM_DISP_C3D0, "mm_disp_c3d0", "disp", 4),
+ GATE_MM0(CLK_MM_DISP_C3D1, "mm_disp_c3d1", "disp", 5),
+ GATE_MM0(CLK_MM_DISP_C3D2, "mm_disp_c3d2", "disp", 6),
+ GATE_MM0(CLK_MM_DISP_C3D3, "mm_disp_c3d3", "disp", 7),
+ GATE_MM0(CLK_MM_DISP_CCORR0, "mm_disp_ccorr0", "disp", 8),
+ GATE_MM0(CLK_MM_DISP_CCORR1, "mm_disp_ccorr1", "disp", 9),
+ GATE_MM0(CLK_MM_DISP_CCORR2, "mm_disp_ccorr2", "disp", 10),
+ GATE_MM0(CLK_MM_DISP_CCORR3, "mm_disp_ccorr3", "disp", 11),
+ GATE_MM0(CLK_MM_DISP_CHIST0, "mm_disp_chist0", "disp", 12),
+ GATE_MM0(CLK_MM_DISP_CHIST1, "mm_disp_chist1", "disp", 13),
+ GATE_MM0(CLK_MM_DISP_COLOR0, "mm_disp_color0", "disp", 14),
+ GATE_MM0(CLK_MM_DISP_COLOR1, "mm_disp_color1", "disp", 15),
+ GATE_MM0(CLK_MM_DISP_DITHER0, "mm_disp_dither0", "disp", 16),
+ GATE_MM0(CLK_MM_DISP_DITHER1, "mm_disp_dither1", "disp", 17),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC0, "mm_disp_dli_async0", "disp", 18),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC1, "mm_disp_dli_async1", "disp", 19),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC2, "mm_disp_dli_async2", "disp", 20),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC3, "mm_disp_dli_async3", "disp", 21),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC4, "mm_disp_dli_async4", "disp", 22),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC5, "mm_disp_dli_async5", "disp", 23),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC6, "mm_disp_dli_async6", "disp", 24),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC7, "mm_disp_dli_async7", "disp", 25),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC8, "mm_disp_dli_async8", "disp", 26),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC9, "mm_disp_dli_async9", "disp", 27),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC10, "mm_disp_dli_async10", "disp", 28),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC11, "mm_disp_dli_async11", "disp", 29),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC12, "mm_disp_dli_async12", "disp", 30),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC13, "mm_disp_dli_async13", "disp", 31),
+ /* MM1 */
+ GATE_HWV_MM1(CLK_MM_DISP_DLI_ASYNC14, "mm_disp_dli_async14", "disp", 0),
+ GATE_HWV_MM1(CLK_MM_DISP_DLI_ASYNC15, "mm_disp_dli_async15", "disp", 1),
+ GATE_HWV_MM1(CLK_MM_DISP_DLO_ASYNC0, "mm_disp_dlo_async0", "disp", 2),
+ GATE_HWV_MM1(CLK_MM_DISP_DLO_ASYNC1, "mm_disp_dlo_async1", "disp", 3),
+ GATE_HWV_MM1(CLK_MM_DISP_DLO_ASYNC2, "mm_disp_dlo_async2", "disp", 4),
+ GATE_HWV_MM1(CLK_MM_DISP_DLO_ASYNC3, "mm_disp_dlo_async3", "disp", 5),
+ GATE_HWV_MM1(CLK_MM_DISP_DLO_ASYNC4, "mm_disp_dlo_async4", "disp", 6),
+ GATE_HWV_MM1(CLK_MM_DISP_DLO_ASYNC5, "mm_disp_dlo_async5", "disp", 7),
+ GATE_HWV_MM1(CLK_MM_DISP_DLO_ASYNC6, "mm_disp_dlo_async6", "disp", 8),
+ GATE_HWV_MM1(CLK_MM_DISP_DLO_ASYNC7, "mm_disp_dlo_async7", "disp", 9),
+ GATE_HWV_MM1(CLK_MM_DISP_DLO_ASYNC8, "mm_disp_dlo_async8", "disp", 10),
+ GATE_MM1(CLK_MM_DISP_GAMMA0, "mm_disp_gamma0", "disp", 11),
+ GATE_MM1(CLK_MM_DISP_GAMMA1, "mm_disp_gamma1", "disp", 12),
+ GATE_MM1(CLK_MM_MDP_AAL0, "mm_mdp_aal0", "disp", 13),
+ GATE_MM1(CLK_MM_MDP_AAL1, "mm_mdp_aal1", "disp", 14),
+ GATE_HWV_MM1(CLK_MM_MDP_RDMA0, "mm_mdp_rdma0", "disp", 15),
+ GATE_HWV_MM1(CLK_MM_DISP_POSTMASK0, "mm_disp_postmask0", "disp", 16),
+ GATE_HWV_MM1(CLK_MM_DISP_POSTMASK1, "mm_disp_postmask1", "disp", 17),
+ GATE_HWV_MM1(CLK_MM_MDP_RSZ0, "mm_mdp_rsz0", "disp", 18),
+ GATE_HWV_MM1(CLK_MM_MDP_RSZ1, "mm_mdp_rsz1", "disp", 19),
+ GATE_HWV_MM1(CLK_MM_DISP_SPR0, "mm_disp_spr0", "disp", 20),
+ GATE_MM1(CLK_MM_DISP_TDSHP0, "mm_disp_tdshp0", "disp", 21),
+ GATE_MM1(CLK_MM_DISP_TDSHP1, "mm_disp_tdshp1", "disp", 22),
+ GATE_HWV_MM1(CLK_MM_DISP_WDMA0, "mm_disp_wdma0", "disp", 23),
+ GATE_HWV_MM1(CLK_MM_DISP_Y2R0, "mm_disp_y2r0", "disp", 24),
+ GATE_HWV_MM1(CLK_MM_SMI_SUB_COMM0, "mm_ssc", "disp", 25),
+ GATE_HWV_MM1(CLK_MM_DISP_FAKE_ENG0, "mm_disp_fake_eng0", "disp", 26),
+};
+
+static const struct mtk_clk_desc mm_mcd = {
+ .clks = mm_clks,
+ .num_clks = ARRAY_SIZE(mm_clks),
+};
+
+static const struct platform_device_id clk_mt8196_disp0_id_table[] = {
+ { .name = "clk-mt8196-disp0", .driver_data = (kernel_ulong_t)&mm_mcd },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, clk_mt8196_disp0_id_table);
+
+static struct platform_driver clk_mt8196_disp0_drv = {
+ .probe = mtk_clk_pdev_probe,
+ .remove = mtk_clk_pdev_remove,
+ .driver = {
+ .name = "clk-mt8196-disp0",
+ },
+ .id_table = clk_mt8196_disp0_id_table,
+};
+module_platform_driver(clk_mt8196_disp0_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8196 disp0 clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-disp1.c b/drivers/clk/mediatek/clk-mt8196-disp1.c
new file mode 100644
index 000000000000..3bbec79a7010
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-disp1.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs mm10_cg_regs = {
+ .set_ofs = 0x104,
+ .clr_ofs = 0x108,
+ .sta_ofs = 0x100,
+};
+
+static const struct mtk_gate_regs mm10_hwv_regs = {
+ .set_ofs = 0x0010,
+ .clr_ofs = 0x0014,
+ .sta_ofs = 0x2c08,
+};
+
+static const struct mtk_gate_regs mm11_cg_regs = {
+ .set_ofs = 0x114,
+ .clr_ofs = 0x118,
+ .sta_ofs = 0x110,
+};
+
+static const struct mtk_gate_regs mm11_hwv_regs = {
+ .set_ofs = 0x0018,
+ .clr_ofs = 0x001c,
+ .sta_ofs = 0x2c0c,
+};
+
+#define GATE_MM10(_id, _name, _parent, _shift) {\
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm10_cg_regs, \
+ .shift = _shift, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ .ops = &mtk_clk_gate_ops_setclr,\
+ }
+
+#define GATE_HWV_MM10(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm10_cg_regs, \
+ .hwv_regs = &mm10_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+#define GATE_MM11(_id, _name, _parent, _shift) {\
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm11_cg_regs, \
+ .shift = _shift, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ .ops = &mtk_clk_gate_ops_setclr,\
+ }
+
+#define GATE_HWV_MM11(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm11_cg_regs, \
+ .hwv_regs = &mm11_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ }
+
+static const struct mtk_gate mm1_clks[] = {
+ /* MM10 */
+ GATE_HWV_MM10(CLK_MM1_DISPSYS1_CONFIG, "mm1_dispsys1_config", "disp", 0),
+ GATE_HWV_MM10(CLK_MM1_DISPSYS1_S_CONFIG, "mm1_dispsys1_s_config", "disp", 1),
+ GATE_HWV_MM10(CLK_MM1_DISP_MUTEX0, "mm1_disp_mutex0", "disp", 2),
+ GATE_HWV_MM10(CLK_MM1_DISP_DLI_ASYNC20, "mm1_disp_dli_async20", "disp", 3),
+ GATE_HWV_MM10(CLK_MM1_DISP_DLI_ASYNC21, "mm1_disp_dli_async21", "disp", 4),
+ GATE_HWV_MM10(CLK_MM1_DISP_DLI_ASYNC22, "mm1_disp_dli_async22", "disp", 5),
+ GATE_HWV_MM10(CLK_MM1_DISP_DLI_ASYNC23, "mm1_disp_dli_async23", "disp", 6),
+ GATE_HWV_MM10(CLK_MM1_DISP_DLI_ASYNC24, "mm1_disp_dli_async24", "disp", 7),
+ GATE_HWV_MM10(CLK_MM1_DISP_DLI_ASYNC25, "mm1_disp_dli_async25", "disp", 8),
+ GATE_HWV_MM10(CLK_MM1_DISP_DLI_ASYNC26, "mm1_disp_dli_async26", "disp", 9),
+ GATE_HWV_MM10(CLK_MM1_DISP_DLI_ASYNC27, "mm1_disp_dli_async27", "disp", 10),
+ GATE_HWV_MM10(CLK_MM1_DISP_DLI_ASYNC28, "mm1_disp_dli_async28", "disp", 11),
+ GATE_HWV_MM10(CLK_MM1_DISP_RELAY0, "mm1_disp_relay0", "disp", 12),
+ GATE_HWV_MM10(CLK_MM1_DISP_RELAY1, "mm1_disp_relay1", "disp", 13),
+ GATE_HWV_MM10(CLK_MM1_DISP_RELAY2, "mm1_disp_relay2", "disp", 14),
+ GATE_HWV_MM10(CLK_MM1_DISP_RELAY3, "mm1_disp_relay3", "disp", 15),
+ GATE_HWV_MM10(CLK_MM1_DISP_DP_INTF0, "mm1_DP_CLK", "disp", 16),
+ GATE_HWV_MM10(CLK_MM1_DISP_DP_INTF1, "mm1_disp_dp_intf1", "disp", 17),
+ GATE_HWV_MM10(CLK_MM1_DISP_DSC_WRAP0, "mm1_disp_dsc_wrap0", "disp", 18),
+ GATE_HWV_MM10(CLK_MM1_DISP_DSC_WRAP1, "mm1_disp_dsc_wrap1", "disp", 19),
+ GATE_HWV_MM10(CLK_MM1_DISP_DSC_WRAP2, "mm1_disp_dsc_wrap2", "disp", 20),
+ GATE_HWV_MM10(CLK_MM1_DISP_DSC_WRAP3, "mm1_disp_dsc_wrap3", "disp", 21),
+ GATE_HWV_MM10(CLK_MM1_DISP_DSI0, "mm1_CLK0", "disp", 22),
+ GATE_HWV_MM10(CLK_MM1_DISP_DSI1, "mm1_CLK1", "disp", 23),
+ GATE_HWV_MM10(CLK_MM1_DISP_DSI2, "mm1_CLK2", "disp", 24),
+ GATE_HWV_MM10(CLK_MM1_DISP_DVO0, "mm1_disp_dvo0", "disp", 25),
+ GATE_HWV_MM10(CLK_MM1_DISP_GDMA0, "mm1_disp_gdma0", "disp", 26),
+ GATE_HWV_MM10(CLK_MM1_DISP_MERGE0, "mm1_disp_merge0", "disp", 27),
+ GATE_HWV_MM10(CLK_MM1_DISP_MERGE1, "mm1_disp_merge1", "disp", 28),
+ GATE_HWV_MM10(CLK_MM1_DISP_MERGE2, "mm1_disp_merge2", "disp", 29),
+ GATE_HWV_MM10(CLK_MM1_DISP_ODDMR0, "mm1_disp_oddmr0", "disp", 30),
+ GATE_HWV_MM10(CLK_MM1_DISP_POSTALIGN0, "mm1_disp_postalign0", "disp", 31),
+ /* MM11 */
+ GATE_HWV_MM11(CLK_MM1_DISP_DITHER2, "mm1_disp_dither2", "disp", 0),
+ GATE_HWV_MM11(CLK_MM1_DISP_R2Y0, "mm1_disp_r2y0", "disp", 1),
+ GATE_HWV_MM11(CLK_MM1_DISP_SPLITTER0, "mm1_disp_splitter0", "disp", 2),
+ GATE_HWV_MM11(CLK_MM1_DISP_SPLITTER1, "mm1_disp_splitter1", "disp", 3),
+ GATE_HWV_MM11(CLK_MM1_DISP_SPLITTER2, "mm1_disp_splitter2", "disp", 4),
+ GATE_HWV_MM11(CLK_MM1_DISP_SPLITTER3, "mm1_disp_splitter3", "disp", 5),
+ GATE_HWV_MM11(CLK_MM1_DISP_VDCM0, "mm1_disp_vdcm0", "disp", 6),
+ GATE_HWV_MM11(CLK_MM1_DISP_WDMA1, "mm1_disp_wdma1", "disp", 7),
+ GATE_HWV_MM11(CLK_MM1_DISP_WDMA2, "mm1_disp_wdma2", "disp", 8),
+ GATE_HWV_MM11(CLK_MM1_DISP_WDMA3, "mm1_disp_wdma3", "disp", 9),
+ GATE_HWV_MM11(CLK_MM1_DISP_WDMA4, "mm1_disp_wdma4", "disp", 10),
+ GATE_HWV_MM11(CLK_MM1_MDP_RDMA1, "mm1_mdp_rdma1", "disp", 11),
+ GATE_HWV_MM11(CLK_MM1_SMI_LARB0, "mm1_smi_larb0", "disp", 12),
+ GATE_HWV_MM11(CLK_MM1_MOD1, "mm1_mod1", "clk26m", 13),
+ GATE_HWV_MM11(CLK_MM1_MOD2, "mm1_mod2", "clk26m", 14),
+ GATE_HWV_MM11(CLK_MM1_MOD3, "mm1_mod3", "clk26m", 15),
+ GATE_HWV_MM11(CLK_MM1_MOD4, "mm1_mod4", "dp0", 16),
+ GATE_HWV_MM11(CLK_MM1_MOD5, "mm1_mod5", "dp1", 17),
+ GATE_HWV_MM11(CLK_MM1_MOD6, "mm1_mod6", "dp1", 18),
+ GATE_HWV_MM11(CLK_MM1_CG0, "mm1_cg0", "disp", 20),
+ GATE_HWV_MM11(CLK_MM1_CG1, "mm1_cg1", "disp", 21),
+ GATE_HWV_MM11(CLK_MM1_CG2, "mm1_cg2", "disp", 22),
+ GATE_HWV_MM11(CLK_MM1_CG3, "mm1_cg3", "disp", 23),
+ GATE_HWV_MM11(CLK_MM1_CG4, "mm1_cg4", "disp", 24),
+ GATE_HWV_MM11(CLK_MM1_CG5, "mm1_cg5", "disp", 25),
+ GATE_HWV_MM11(CLK_MM1_CG6, "mm1_cg6", "disp", 26),
+ GATE_HWV_MM11(CLK_MM1_CG7, "mm1_cg7", "disp", 27),
+ GATE_HWV_MM11(CLK_MM1_F26M, "mm1_f26m_ck", "clk26m", 28),
+};
+
+static const struct mtk_clk_desc mm1_mcd = {
+ .clks = mm1_clks,
+ .num_clks = ARRAY_SIZE(mm1_clks),
+};
+
+static const struct platform_device_id clk_mt8196_disp1_id_table[] = {
+ { .name = "clk-mt8196-disp1", .driver_data = (kernel_ulong_t)&mm1_mcd },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, clk_mt8196_disp1_id_table);
+
+static struct platform_driver clk_mt8196_disp1_drv = {
+ .probe = mtk_clk_pdev_probe,
+ .remove = mtk_clk_pdev_remove,
+ .driver = {
+ .name = "clk-mt8196-disp1",
+ },
+ .id_table = clk_mt8196_disp1_id_table,
+};
+module_platform_driver(clk_mt8196_disp1_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8196 disp1 clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-imp_iic_wrap.c b/drivers/clk/mediatek/clk-mt8196-imp_iic_wrap.c
new file mode 100644
index 000000000000..a63241671650
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-imp_iic_wrap.c
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs imp_cg_regs = {
+ .set_ofs = 0xe08,
+ .clr_ofs = 0xe04,
+ .sta_ofs = 0xe00,
+};
+
+#define GATE_IMP(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &imp_cg_regs, \
+ .shift = _shift, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+static const struct mtk_gate impc_clks[] = {
+ GATE_IMP(CLK_IMPC_I2C11, "impc_i2c11", "i2c_p", 0),
+ GATE_IMP(CLK_IMPC_I2C12, "impc_i2c12", "i2c_p", 1),
+ GATE_IMP(CLK_IMPC_I2C13, "impc_i2c13", "i2c_p", 2),
+ GATE_IMP(CLK_IMPC_I2C14, "impc_i2c14", "i2c_p", 3),
+};
+
+static const struct mtk_clk_desc impc_mcd = {
+ .clks = impc_clks,
+ .num_clks = ARRAY_SIZE(impc_clks),
+};
+
+static const struct mtk_gate impe_clks[] = {
+ GATE_IMP(CLK_IMPE_I2C5, "impe_i2c5", "i2c_east", 0),
+};
+
+static const struct mtk_clk_desc impe_mcd = {
+ .clks = impe_clks,
+ .num_clks = ARRAY_SIZE(impe_clks),
+};
+
+static const struct mtk_gate_regs impn_hwv_regs = {
+ .set_ofs = 0x0000,
+ .clr_ofs = 0x0004,
+ .sta_ofs = 0x2c00,
+};
+
+#define GATE_HWV_IMPN(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &imp_cg_regs, \
+ .hwv_regs = &impn_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+static const struct mtk_gate impn_clks[] = {
+ GATE_IMP(CLK_IMPN_I2C1, "impn_i2c1", "i2c_north", 0),
+ GATE_IMP(CLK_IMPN_I2C2, "impn_i2c2", "i2c_north", 1),
+ GATE_IMP(CLK_IMPN_I2C4, "impn_i2c4", "i2c_north", 2),
+ GATE_HWV_IMPN(CLK_IMPN_I2C7, "impn_i2c7", "i2c_north", 3),
+ GATE_IMP(CLK_IMPN_I2C8, "impn_i2c8", "i2c_north", 4),
+ GATE_IMP(CLK_IMPN_I2C9, "impn_i2c9", "i2c_north", 5),
+};
+
+static const struct mtk_clk_desc impn_mcd = {
+ .clks = impn_clks,
+ .num_clks = ARRAY_SIZE(impn_clks),
+};
+
+static const struct mtk_gate impw_clks[] = {
+ GATE_IMP(CLK_IMPW_I2C0, "impw_i2c0", "i2c_west", 0),
+ GATE_IMP(CLK_IMPW_I2C3, "impw_i2c3", "i2c_west", 1),
+ GATE_IMP(CLK_IMPW_I2C6, "impw_i2c6", "i2c_west", 2),
+ GATE_IMP(CLK_IMPW_I2C10, "impw_i2c10", "i2c_west", 3),
+};
+
+static const struct mtk_clk_desc impw_mcd = {
+ .clks = impw_clks,
+ .num_clks = ARRAY_SIZE(impw_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8196_imp_iic_wrap[] = {
+ { .compatible = "mediatek,mt8196-imp-iic-wrap-c", .data = &impc_mcd },
+ { .compatible = "mediatek,mt8196-imp-iic-wrap-e", .data = &impe_mcd },
+ { .compatible = "mediatek,mt8196-imp-iic-wrap-n", .data = &impn_mcd },
+ { .compatible = "mediatek,mt8196-imp-iic-wrap-w", .data = &impw_mcd },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_imp_iic_wrap);
+
+static struct platform_driver clk_mt8196_imp_iic_wrap_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8196-imp_iic_wrap",
+ .of_match_table = of_match_clk_mt8196_imp_iic_wrap,
+ },
+};
+module_platform_driver(clk_mt8196_imp_iic_wrap_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8196 I2C Wrapper clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-mcu.c b/drivers/clk/mediatek/clk-mt8196-mcu.c
new file mode 100644
index 000000000000..5cbcc411ae73
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-mcu.c
@@ -0,0 +1,167 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-pll.h"
+
+#define ARMPLL_LL_CON0 0x008
+#define ARMPLL_LL_CON1 0x00c
+#define ARMPLL_LL_CON2 0x010
+#define ARMPLL_LL_CON3 0x014
+#define ARMPLL_BL_CON0 0x008
+#define ARMPLL_BL_CON1 0x00c
+#define ARMPLL_BL_CON2 0x010
+#define ARMPLL_BL_CON3 0x014
+#define ARMPLL_B_CON0 0x008
+#define ARMPLL_B_CON1 0x00c
+#define ARMPLL_B_CON2 0x010
+#define ARMPLL_B_CON3 0x014
+#define CCIPLL_CON0 0x008
+#define CCIPLL_CON1 0x00c
+#define CCIPLL_CON2 0x010
+#define CCIPLL_CON3 0x014
+#define PTPPLL_CON0 0x008
+#define PTPPLL_CON1 0x00c
+#define PTPPLL_CON2 0x010
+#define PTPPLL_CON3 0x014
+
+#define MT8196_PLL_FMAX (3800UL * MHZ)
+#define MT8196_PLL_FMIN (1500UL * MHZ)
+#define MT8196_INTEGER_BITS 8
+
+#define PLL(_id, _name, _reg, _en_reg, _en_mask, _pll_en_bit, \
+ _flags, _rst_bar_mask, \
+ _pd_reg, _pd_shift, _tuner_reg, \
+ _tuner_en_reg, _tuner_en_bit, \
+ _pcw_reg, _pcw_shift, _pcwbits) { \
+ .id = _id, \
+ .name = _name, \
+ .reg = _reg, \
+ .en_reg = _en_reg, \
+ .en_mask = _en_mask, \
+ .pll_en_bit = _pll_en_bit, \
+ .flags = _flags, \
+ .rst_bar_mask = _rst_bar_mask, \
+ .fmax = MT8196_PLL_FMAX, \
+ .fmin = MT8196_PLL_FMIN, \
+ .pd_reg = _pd_reg, \
+ .pd_shift = _pd_shift, \
+ .tuner_reg = _tuner_reg, \
+ .tuner_en_reg = _tuner_en_reg, \
+ .tuner_en_bit = _tuner_en_bit, \
+ .pcw_reg = _pcw_reg, \
+ .pcw_shift = _pcw_shift, \
+ .pcwbits = _pcwbits, \
+ .pcwibits = MT8196_INTEGER_BITS, \
+ }
+
+static const struct mtk_pll_data cpu_bl_plls[] = {
+ PLL(CLK_CPBL_ARMPLL_BL, "armpll-bl", ARMPLL_BL_CON0, ARMPLL_BL_CON0, 0,
+ 0, PLL_AO, BIT(0), ARMPLL_BL_CON1, 24, 0, 0, 0, ARMPLL_BL_CON1, 0, 22),
+};
+
+static const struct mtk_pll_data cpu_b_plls[] = {
+ PLL(CLK_CPB_ARMPLL_B, "armpll-b", ARMPLL_B_CON0, ARMPLL_B_CON0, 0, 0,
+ PLL_AO, BIT(0), ARMPLL_B_CON1, 24, 0, 0, 0, ARMPLL_B_CON1, 0, 22),
+};
+
+static const struct mtk_pll_data cpu_ll_plls[] = {
+ PLL(CLK_CPLL_ARMPLL_LL, "armpll-ll", ARMPLL_LL_CON0, ARMPLL_LL_CON0, 0,
+ 0, PLL_AO, BIT(0), ARMPLL_LL_CON1, 24, 0, 0, 0, ARMPLL_LL_CON1, 0, 22),
+};
+
+static const struct mtk_pll_data cci_plls[] = {
+ PLL(CLK_CCIPLL, "ccipll", CCIPLL_CON0, CCIPLL_CON0, 0, 0, PLL_AO,
+ BIT(0), CCIPLL_CON1, 24, 0, 0, 0, CCIPLL_CON1, 0, 22),
+};
+
+static const struct mtk_pll_data ptp_plls[] = {
+ PLL(CLK_PTPPLL, "ptppll", PTPPLL_CON0, PTPPLL_CON0, 0, 0, PLL_AO,
+ BIT(0), PTPPLL_CON1, 24, 0, 0, 0, PTPPLL_CON1, 0, 22),
+};
+
+static const struct of_device_id of_match_clk_mt8196_mcu[] = {
+ { .compatible = "mediatek,mt8196-armpll-bl-pll-ctrl",
+ .data = &cpu_bl_plls },
+ { .compatible = "mediatek,mt8196-armpll-b-pll-ctrl",
+ .data = &cpu_b_plls },
+ { .compatible = "mediatek,mt8196-armpll-ll-pll-ctrl",
+ .data = &cpu_ll_plls },
+ { .compatible = "mediatek,mt8196-ccipll-pll-ctrl", .data = &cci_plls },
+ { .compatible = "mediatek,mt8196-ptppll-pll-ctrl", .data = &ptp_plls },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_mcu);
+
+static int clk_mt8196_mcu_probe(struct platform_device *pdev)
+{
+ const struct mtk_pll_data *plls;
+ struct clk_hw_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ const int num_plls = 1;
+ int r;
+
+ plls = of_device_get_match_data(&pdev->dev);
+ if (!plls)
+ return -EINVAL;
+
+ clk_data = mtk_alloc_clk_data(num_plls);
+ if (!clk_data)
+ return -ENOMEM;
+
+ r = mtk_clk_register_plls(node, plls, num_plls, clk_data);
+ if (r)
+ goto free_clk_data;
+
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ if (r)
+ goto unregister_plls;
+
+ platform_set_drvdata(pdev, clk_data);
+
+ return r;
+
+unregister_plls:
+ mtk_clk_unregister_plls(plls, num_plls, clk_data);
+free_clk_data:
+ mtk_free_clk_data(clk_data);
+
+ return r;
+}
+
+static void clk_mt8196_mcu_remove(struct platform_device *pdev)
+{
+ const struct mtk_pll_data *plls = of_device_get_match_data(&pdev->dev);
+ struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev);
+ struct device_node *node = pdev->dev.of_node;
+
+ of_clk_del_provider(node);
+ mtk_clk_unregister_plls(plls, 1, clk_data);
+ mtk_free_clk_data(clk_data);
+}
+
+static struct platform_driver clk_mt8196_mcu_drv = {
+ .probe = clk_mt8196_mcu_probe,
+ .remove = clk_mt8196_mcu_remove,
+ .driver = {
+ .name = "clk-mt8196-mcu",
+ .of_match_table = of_match_clk_mt8196_mcu,
+ },
+};
+module_platform_driver(clk_mt8196_mcu_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8196 mcusys clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-mdpsys.c b/drivers/clk/mediatek/clk-mt8196-mdpsys.c
new file mode 100644
index 000000000000..7667d88f0eb0
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-mdpsys.c
@@ -0,0 +1,186 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs mdp0_cg_regs = {
+ .set_ofs = 0x104,
+ .clr_ofs = 0x108,
+ .sta_ofs = 0x100,
+};
+
+static const struct mtk_gate_regs mdp1_cg_regs = {
+ .set_ofs = 0x114,
+ .clr_ofs = 0x118,
+ .sta_ofs = 0x110,
+};
+
+static const struct mtk_gate_regs mdp2_cg_regs = {
+ .set_ofs = 0x124,
+ .clr_ofs = 0x128,
+ .sta_ofs = 0x120,
+};
+
+#define GATE_MDP0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mdp0_cg_regs, \
+ .shift = _shift, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_MDP1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mdp1_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_MDP2(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mdp2_cg_regs, \
+ .shift = _shift, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+static const struct mtk_gate mdp1_clks[] = {
+ /* MDP1-0 */
+ GATE_MDP0(CLK_MDP1_MDP_MUTEX0, "mdp1_mdp_mutex0", "mdp", 0),
+ GATE_MDP0(CLK_MDP1_SMI0, "mdp1_smi0", "mdp", 1),
+ GATE_MDP0(CLK_MDP1_APB_BUS, "mdp1_apb_bus", "mdp", 2),
+ GATE_MDP0(CLK_MDP1_MDP_RDMA0, "mdp1_mdp_rdma0", "mdp", 3),
+ GATE_MDP0(CLK_MDP1_MDP_RDMA1, "mdp1_mdp_rdma1", "mdp", 4),
+ GATE_MDP0(CLK_MDP1_MDP_RDMA2, "mdp1_mdp_rdma2", "mdp", 5),
+ GATE_MDP0(CLK_MDP1_MDP_BIRSZ0, "mdp1_mdp_birsz0", "mdp", 6),
+ GATE_MDP0(CLK_MDP1_MDP_HDR0, "mdp1_mdp_hdr0", "mdp", 7),
+ GATE_MDP0(CLK_MDP1_MDP_AAL0, "mdp1_mdp_aal0", "mdp", 8),
+ GATE_MDP0(CLK_MDP1_MDP_RSZ0, "mdp1_mdp_rsz0", "mdp", 9),
+ GATE_MDP0(CLK_MDP1_MDP_RSZ2, "mdp1_mdp_rsz2", "mdp", 10),
+ GATE_MDP0(CLK_MDP1_MDP_TDSHP0, "mdp1_mdp_tdshp0", "mdp", 11),
+ GATE_MDP0(CLK_MDP1_MDP_COLOR0, "mdp1_mdp_color0", "mdp", 12),
+ GATE_MDP0(CLK_MDP1_MDP_WROT0, "mdp1_mdp_wrot0", "mdp", 13),
+ GATE_MDP0(CLK_MDP1_MDP_WROT1, "mdp1_mdp_wrot1", "mdp", 14),
+ GATE_MDP0(CLK_MDP1_MDP_WROT2, "mdp1_mdp_wrot2", "mdp", 15),
+ GATE_MDP0(CLK_MDP1_MDP_FAKE_ENG0, "mdp1_mdp_fake_eng0", "mdp", 16),
+ GATE_MDP0(CLK_MDP1_APB_DB, "mdp1_apb_db", "mdp", 17),
+ GATE_MDP0(CLK_MDP1_MDP_DLI_ASYNC0, "mdp1_mdp_dli_async0", "mdp", 18),
+ GATE_MDP0(CLK_MDP1_MDP_DLI_ASYNC1, "mdp1_mdp_dli_async1", "mdp", 19),
+ GATE_MDP0(CLK_MDP1_MDP_DLO_ASYNC0, "mdp1_mdp_dlo_async0", "mdp", 20),
+ GATE_MDP0(CLK_MDP1_MDP_DLO_ASYNC1, "mdp1_mdp_dlo_async1", "mdp", 21),
+ GATE_MDP0(CLK_MDP1_MDP_DLI_ASYNC2, "mdp1_mdp_dli_async2", "mdp", 22),
+ GATE_MDP0(CLK_MDP1_MDP_DLO_ASYNC2, "mdp1_mdp_dlo_async2", "mdp", 23),
+ GATE_MDP0(CLK_MDP1_MDP_DLO_ASYNC3, "mdp1_mdp_dlo_async3", "mdp", 24),
+ GATE_MDP0(CLK_MDP1_IMG_DL_ASYNC0, "mdp1_img_dl_async0", "mdp", 25),
+ GATE_MDP0(CLK_MDP1_MDP_RROT0, "mdp1_mdp_rrot0", "mdp", 26),
+ GATE_MDP0(CLK_MDP1_MDP_MERGE0, "mdp1_mdp_merge0", "mdp", 27),
+ GATE_MDP0(CLK_MDP1_MDP_C3D0, "mdp1_mdp_c3d0", "mdp", 28),
+ GATE_MDP0(CLK_MDP1_MDP_FG0, "mdp1_mdp_fg0", "mdp", 29),
+ GATE_MDP0(CLK_MDP1_MDP_CLA2, "mdp1_mdp_cla2", "mdp", 30),
+ GATE_MDP0(CLK_MDP1_MDP_DLO_ASYNC4, "mdp1_mdp_dlo_async4", "mdp", 31),
+ /* MDP1-1 */
+ GATE_MDP1(CLK_MDP1_VPP_RSZ0, "mdp1_vpp_rsz0", "mdp", 0),
+ GATE_MDP1(CLK_MDP1_VPP_RSZ1, "mdp1_vpp_rsz1", "mdp", 1),
+ GATE_MDP1(CLK_MDP1_MDP_DLO_ASYNC5, "mdp1_mdp_dlo_async5", "mdp", 2),
+ GATE_MDP1(CLK_MDP1_IMG0, "mdp1_img0", "mdp", 3),
+ GATE_MDP1(CLK_MDP1_F26M, "mdp1_f26m", "clk26m", 27),
+ /* MDP1-2 */
+ GATE_MDP2(CLK_MDP1_IMG_DL_RELAY0, "mdp1_img_dl_relay0", "mdp", 0),
+ GATE_MDP2(CLK_MDP1_IMG_DL_RELAY1, "mdp1_img_dl_relay1", "mdp", 8),
+};
+
+static const struct mtk_clk_desc mdp1_mcd = {
+ .clks = mdp1_clks,
+ .num_clks = ARRAY_SIZE(mdp1_clks),
+ .need_runtime_pm = true,
+};
+
+
+static const struct mtk_gate mdp_clks[] = {
+ /* MDP0 */
+ GATE_MDP0(CLK_MDP_MDP_MUTEX0, "mdp_mdp_mutex0", "mdp", 0),
+ GATE_MDP0(CLK_MDP_SMI0, "mdp_smi0", "mdp", 1),
+ GATE_MDP0(CLK_MDP_APB_BUS, "mdp_apb_bus", "mdp", 2),
+ GATE_MDP0(CLK_MDP_MDP_RDMA0, "mdp_mdp_rdma0", "mdp", 3),
+ GATE_MDP0(CLK_MDP_MDP_RDMA1, "mdp_mdp_rdma1", "mdp", 4),
+ GATE_MDP0(CLK_MDP_MDP_RDMA2, "mdp_mdp_rdma2", "mdp", 5),
+ GATE_MDP0(CLK_MDP_MDP_BIRSZ0, "mdp_mdp_birsz0", "mdp", 6),
+ GATE_MDP0(CLK_MDP_MDP_HDR0, "mdp_mdp_hdr0", "mdp", 7),
+ GATE_MDP0(CLK_MDP_MDP_AAL0, "mdp_mdp_aal0", "mdp", 8),
+ GATE_MDP0(CLK_MDP_MDP_RSZ0, "mdp_mdp_rsz0", "mdp", 9),
+ GATE_MDP0(CLK_MDP_MDP_RSZ2, "mdp_mdp_rsz2", "mdp", 10),
+ GATE_MDP0(CLK_MDP_MDP_TDSHP0, "mdp_mdp_tdshp0", "mdp", 11),
+ GATE_MDP0(CLK_MDP_MDP_COLOR0, "mdp_mdp_color0", "mdp", 12),
+ GATE_MDP0(CLK_MDP_MDP_WROT0, "mdp_mdp_wrot0", "mdp", 13),
+ GATE_MDP0(CLK_MDP_MDP_WROT1, "mdp_mdp_wrot1", "mdp", 14),
+ GATE_MDP0(CLK_MDP_MDP_WROT2, "mdp_mdp_wrot2", "mdp", 15),
+ GATE_MDP0(CLK_MDP_MDP_FAKE_ENG0, "mdp_mdp_fake_eng0", "mdp", 16),
+ GATE_MDP0(CLK_MDP_APB_DB, "mdp_apb_db", "mdp", 17),
+ GATE_MDP0(CLK_MDP_MDP_DLI_ASYNC0, "mdp_mdp_dli_async0", "mdp", 18),
+ GATE_MDP0(CLK_MDP_MDP_DLI_ASYNC1, "mdp_mdp_dli_async1", "mdp", 19),
+ GATE_MDP0(CLK_MDP_MDP_DLO_ASYNC0, "mdp_mdp_dlo_async0", "mdp", 20),
+ GATE_MDP0(CLK_MDP_MDP_DLO_ASYNC1, "mdp_mdp_dlo_async1", "mdp", 21),
+ GATE_MDP0(CLK_MDP_MDP_DLI_ASYNC2, "mdp_mdp_dli_async2", "mdp", 22),
+ GATE_MDP0(CLK_MDP_MDP_DLO_ASYNC2, "mdp_mdp_dlo_async2", "mdp", 23),
+ GATE_MDP0(CLK_MDP_MDP_DLO_ASYNC3, "mdp_mdp_dlo_async3", "mdp", 24),
+ GATE_MDP0(CLK_MDP_IMG_DL_ASYNC0, "mdp_img_dl_async0", "mdp", 25),
+ GATE_MDP0(CLK_MDP_MDP_RROT0, "mdp_mdp_rrot0", "mdp", 26),
+ GATE_MDP0(CLK_MDP_MDP_MERGE0, "mdp_mdp_merge0", "mdp", 27),
+ GATE_MDP0(CLK_MDP_MDP_C3D0, "mdp_mdp_c3d0", "mdp", 28),
+ GATE_MDP0(CLK_MDP_MDP_FG0, "mdp_mdp_fg0", "mdp", 29),
+ GATE_MDP0(CLK_MDP_MDP_CLA2, "mdp_mdp_cla2", "mdp", 30),
+ GATE_MDP0(CLK_MDP_MDP_DLO_ASYNC4, "mdp_mdp_dlo_async4", "mdp", 31),
+ /* MDP1 */
+ GATE_MDP1(CLK_MDP_VPP_RSZ0, "mdp_vpp_rsz0", "mdp", 0),
+ GATE_MDP1(CLK_MDP_VPP_RSZ1, "mdp_vpp_rsz1", "mdp", 1),
+ GATE_MDP1(CLK_MDP_MDP_DLO_ASYNC5, "mdp_mdp_dlo_async5", "mdp", 2),
+ GATE_MDP1(CLK_MDP_IMG0, "mdp_img0", "mdp", 3),
+ GATE_MDP1(CLK_MDP_F26M, "mdp_f26m", "clk26m", 27),
+ /* MDP2 */
+ GATE_MDP2(CLK_MDP_IMG_DL_RELAY0, "mdp_img_dl_relay0", "mdp", 0),
+ GATE_MDP2(CLK_MDP_IMG_DL_RELAY1, "mdp_img_dl_relay1", "mdp", 8),
+};
+
+static const struct mtk_clk_desc mdp_mcd = {
+ .clks = mdp_clks,
+ .num_clks = ARRAY_SIZE(mdp_clks),
+ .need_runtime_pm = true,
+};
+
+static const struct of_device_id of_match_clk_mt8196_mdpsys[] = {
+ { .compatible = "mediatek,mt8196-mdpsys0", .data = &mdp_mcd },
+ { .compatible = "mediatek,mt8196-mdpsys1", .data = &mdp1_mcd },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_mdpsys);
+
+static struct platform_driver clk_mt8196_mdpsys_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8196-mdpsys",
+ .of_match_table = of_match_clk_mt8196_mdpsys,
+ },
+};
+module_platform_driver(clk_mt8196_mdpsys_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8196 Multimedia Data Path clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-mfg.c b/drivers/clk/mediatek/clk-mt8196-mfg.c
new file mode 100644
index 000000000000..ae1eb9de79ae
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-mfg.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-pll.h"
+
+#define MFGPLL_CON0 0x008
+#define MFGPLL_CON1 0x00c
+#define MFGPLL_CON2 0x010
+#define MFGPLL_CON3 0x014
+#define MFGPLL_SC0_CON0 0x008
+#define MFGPLL_SC0_CON1 0x00c
+#define MFGPLL_SC0_CON2 0x010
+#define MFGPLL_SC0_CON3 0x014
+#define MFGPLL_SC1_CON0 0x008
+#define MFGPLL_SC1_CON1 0x00c
+#define MFGPLL_SC1_CON2 0x010
+#define MFGPLL_SC1_CON3 0x014
+
+#define MT8196_PLL_FMAX (3800UL * MHZ)
+#define MT8196_PLL_FMIN (1500UL * MHZ)
+#define MT8196_INTEGER_BITS 8
+
+#define PLL(_id, _name, _reg, _en_reg, _en_mask, _pll_en_bit, \
+ _flags, _rst_bar_mask, \
+ _pd_reg, _pd_shift, _tuner_reg, \
+ _tuner_en_reg, _tuner_en_bit, \
+ _pcw_reg, _pcw_shift, _pcwbits) { \
+ .id = _id, \
+ .name = _name, \
+ .reg = _reg, \
+ .en_reg = _en_reg, \
+ .en_mask = _en_mask, \
+ .pll_en_bit = _pll_en_bit, \
+ .flags = _flags, \
+ .rst_bar_mask = _rst_bar_mask, \
+ .fmax = MT8196_PLL_FMAX, \
+ .fmin = MT8196_PLL_FMIN, \
+ .pd_reg = _pd_reg, \
+ .pd_shift = _pd_shift, \
+ .tuner_reg = _tuner_reg, \
+ .tuner_en_reg = _tuner_en_reg, \
+ .tuner_en_bit = _tuner_en_bit, \
+ .pcw_reg = _pcw_reg, \
+ .pcw_shift = _pcw_shift, \
+ .pcwbits = _pcwbits, \
+ .pcwibits = MT8196_INTEGER_BITS, \
+ }
+
+static const struct mtk_pll_data mfg_ao_plls[] = {
+ PLL(CLK_MFG_AO_MFGPLL, "mfgpll", MFGPLL_CON0, MFGPLL_CON0, 0, 0, 0,
+ BIT(0), MFGPLL_CON1, 24, 0, 0, 0,
+ MFGPLL_CON1, 0, 22),
+};
+
+static const struct mtk_pll_data mfgsc0_ao_plls[] = {
+ PLL(CLK_MFGSC0_AO_MFGPLL_SC0, "mfgpll-sc0", MFGPLL_SC0_CON0,
+ MFGPLL_SC0_CON0, 0, 0, 0, BIT(0), MFGPLL_SC0_CON1, 24, 0, 0, 0,
+ MFGPLL_SC0_CON1, 0, 22),
+};
+
+static const struct mtk_pll_data mfgsc1_ao_plls[] = {
+ PLL(CLK_MFGSC1_AO_MFGPLL_SC1, "mfgpll-sc1", MFGPLL_SC1_CON0,
+ MFGPLL_SC1_CON0, 0, 0, 0, BIT(0), MFGPLL_SC1_CON1, 24, 0, 0, 0,
+ MFGPLL_SC1_CON1, 0, 22),
+};
+
+static const struct of_device_id of_match_clk_mt8196_mfg[] = {
+ { .compatible = "mediatek,mt8196-mfgpll-pll-ctrl",
+ .data = &mfg_ao_plls },
+ { .compatible = "mediatek,mt8196-mfgpll-sc0-pll-ctrl",
+ .data = &mfgsc0_ao_plls },
+ { .compatible = "mediatek,mt8196-mfgpll-sc1-pll-ctrl",
+ .data = &mfgsc1_ao_plls },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_mfg);
+
+static int clk_mt8196_mfg_probe(struct platform_device *pdev)
+{
+ const struct mtk_pll_data *plls;
+ struct clk_hw_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ const int num_plls = 1;
+ int r;
+
+ plls = of_device_get_match_data(&pdev->dev);
+ if (!plls)
+ return -EINVAL;
+
+ clk_data = mtk_alloc_clk_data(num_plls);
+ if (!clk_data)
+ return -ENOMEM;
+
+ r = mtk_clk_register_plls(node, plls, num_plls, clk_data);
+ if (r)
+ goto free_clk_data;
+
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ if (r)
+ goto unregister_plls;
+
+ platform_set_drvdata(pdev, clk_data);
+
+ return r;
+
+unregister_plls:
+ mtk_clk_unregister_plls(plls, num_plls, clk_data);
+free_clk_data:
+ mtk_free_clk_data(clk_data);
+
+ return r;
+}
+
+static void clk_mt8196_mfg_remove(struct platform_device *pdev)
+{
+ const struct mtk_pll_data *plls = of_device_get_match_data(&pdev->dev);
+ struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev);
+ struct device_node *node = pdev->dev.of_node;
+
+ of_clk_del_provider(node);
+ mtk_clk_unregister_plls(plls, 1, clk_data);
+ mtk_free_clk_data(clk_data);
+}
+
+static struct platform_driver clk_mt8196_mfg_drv = {
+ .probe = clk_mt8196_mfg_probe,
+ .remove = clk_mt8196_mfg_remove,
+ .driver = {
+ .name = "clk-mt8196-mfg",
+ .of_match_table = of_match_clk_mt8196_mfg,
+ },
+};
+module_platform_driver(clk_mt8196_mfg_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8196 GPU mfg clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-ovl0.c b/drivers/clk/mediatek/clk-mt8196-ovl0.c
new file mode 100644
index 000000000000..d4affd14d2c4
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-ovl0.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs ovl0_cg_regs = {
+ .set_ofs = 0x104,
+ .clr_ofs = 0x108,
+ .sta_ofs = 0x100,
+};
+
+static const struct mtk_gate_regs ovl0_hwv_regs = {
+ .set_ofs = 0x0060,
+ .clr_ofs = 0x0064,
+ .sta_ofs = 0x2c30,
+};
+
+static const struct mtk_gate_regs ovl1_cg_regs = {
+ .set_ofs = 0x114,
+ .clr_ofs = 0x118,
+ .sta_ofs = 0x110,
+};
+
+static const struct mtk_gate_regs ovl1_hwv_regs = {
+ .set_ofs = 0x0068,
+ .clr_ofs = 0x006c,
+ .sta_ofs = 0x2c34,
+};
+
+#define GATE_HWV_OVL0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ovl0_cg_regs, \
+ .hwv_regs = &ovl0_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+#define GATE_HWV_OVL1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ovl1_cg_regs, \
+ .hwv_regs = &ovl1_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+static const struct mtk_gate ovl_clks[] = {
+ /* OVL0 */
+ GATE_HWV_OVL0(CLK_OVLSYS_CONFIG, "ovlsys_config", "disp", 0),
+ GATE_HWV_OVL0(CLK_OVL_FAKE_ENG0, "ovl_fake_eng0", "disp", 1),
+ GATE_HWV_OVL0(CLK_OVL_FAKE_ENG1, "ovl_fake_eng1", "disp", 2),
+ GATE_HWV_OVL0(CLK_OVL_MUTEX0, "ovl_mutex0", "disp", 3),
+ GATE_HWV_OVL0(CLK_OVL_EXDMA0, "ovl_exdma0", "disp", 4),
+ GATE_HWV_OVL0(CLK_OVL_EXDMA1, "ovl_exdma1", "disp", 5),
+ GATE_HWV_OVL0(CLK_OVL_EXDMA2, "ovl_exdma2", "disp", 6),
+ GATE_HWV_OVL0(CLK_OVL_EXDMA3, "ovl_exdma3", "disp", 7),
+ GATE_HWV_OVL0(CLK_OVL_EXDMA4, "ovl_exdma4", "disp", 8),
+ GATE_HWV_OVL0(CLK_OVL_EXDMA5, "ovl_exdma5", "disp", 9),
+ GATE_HWV_OVL0(CLK_OVL_EXDMA6, "ovl_exdma6", "disp", 10),
+ GATE_HWV_OVL0(CLK_OVL_EXDMA7, "ovl_exdma7", "disp", 11),
+ GATE_HWV_OVL0(CLK_OVL_EXDMA8, "ovl_exdma8", "disp", 12),
+ GATE_HWV_OVL0(CLK_OVL_EXDMA9, "ovl_exdma9", "disp", 13),
+ GATE_HWV_OVL0(CLK_OVL_BLENDER0, "ovl_blender0", "disp", 14),
+ GATE_HWV_OVL0(CLK_OVL_BLENDER1, "ovl_blender1", "disp", 15),
+ GATE_HWV_OVL0(CLK_OVL_BLENDER2, "ovl_blender2", "disp", 16),
+ GATE_HWV_OVL0(CLK_OVL_BLENDER3, "ovl_blender3", "disp", 17),
+ GATE_HWV_OVL0(CLK_OVL_BLENDER4, "ovl_blender4", "disp", 18),
+ GATE_HWV_OVL0(CLK_OVL_BLENDER5, "ovl_blender5", "disp", 19),
+ GATE_HWV_OVL0(CLK_OVL_BLENDER6, "ovl_blender6", "disp", 20),
+ GATE_HWV_OVL0(CLK_OVL_BLENDER7, "ovl_blender7", "disp", 21),
+ GATE_HWV_OVL0(CLK_OVL_BLENDER8, "ovl_blender8", "disp", 22),
+ GATE_HWV_OVL0(CLK_OVL_BLENDER9, "ovl_blender9", "disp", 23),
+ GATE_HWV_OVL0(CLK_OVL_OUTPROC0, "ovl_outproc0", "disp", 24),
+ GATE_HWV_OVL0(CLK_OVL_OUTPROC1, "ovl_outproc1", "disp", 25),
+ GATE_HWV_OVL0(CLK_OVL_OUTPROC2, "ovl_outproc2", "disp", 26),
+ GATE_HWV_OVL0(CLK_OVL_OUTPROC3, "ovl_outproc3", "disp", 27),
+ GATE_HWV_OVL0(CLK_OVL_OUTPROC4, "ovl_outproc4", "disp", 28),
+ GATE_HWV_OVL0(CLK_OVL_OUTPROC5, "ovl_outproc5", "disp", 29),
+ GATE_HWV_OVL0(CLK_OVL_MDP_RSZ0, "ovl_mdp_rsz0", "disp", 30),
+ GATE_HWV_OVL0(CLK_OVL_MDP_RSZ1, "ovl_mdp_rsz1", "disp", 31),
+ /* OVL1 */
+ GATE_HWV_OVL1(CLK_OVL_DISP_WDMA0, "ovl_disp_wdma0", "disp", 0),
+ GATE_HWV_OVL1(CLK_OVL_DISP_WDMA1, "ovl_disp_wdma1", "disp", 1),
+ GATE_HWV_OVL1(CLK_OVL_UFBC_WDMA0, "ovl_ufbc_wdma0", "disp", 2),
+ GATE_HWV_OVL1(CLK_OVL_MDP_RDMA0, "ovl_mdp_rdma0", "disp", 3),
+ GATE_HWV_OVL1(CLK_OVL_MDP_RDMA1, "ovl_mdp_rdma1", "disp", 4),
+ GATE_HWV_OVL1(CLK_OVL_BWM0, "ovl_bwm0", "disp", 5),
+ GATE_HWV_OVL1(CLK_OVL_DLI0, "ovl_dli0", "disp", 6),
+ GATE_HWV_OVL1(CLK_OVL_DLI1, "ovl_dli1", "disp", 7),
+ GATE_HWV_OVL1(CLK_OVL_DLI2, "ovl_dli2", "disp", 8),
+ GATE_HWV_OVL1(CLK_OVL_DLI3, "ovl_dli3", "disp", 9),
+ GATE_HWV_OVL1(CLK_OVL_DLI4, "ovl_dli4", "disp", 10),
+ GATE_HWV_OVL1(CLK_OVL_DLI5, "ovl_dli5", "disp", 11),
+ GATE_HWV_OVL1(CLK_OVL_DLI6, "ovl_dli6", "disp", 12),
+ GATE_HWV_OVL1(CLK_OVL_DLI7, "ovl_dli7", "disp", 13),
+ GATE_HWV_OVL1(CLK_OVL_DLI8, "ovl_dli8", "disp", 14),
+ GATE_HWV_OVL1(CLK_OVL_DLO0, "ovl_dlo0", "disp", 15),
+ GATE_HWV_OVL1(CLK_OVL_DLO1, "ovl_dlo1", "disp", 16),
+ GATE_HWV_OVL1(CLK_OVL_DLO2, "ovl_dlo2", "disp", 17),
+ GATE_HWV_OVL1(CLK_OVL_DLO3, "ovl_dlo3", "disp", 18),
+ GATE_HWV_OVL1(CLK_OVL_DLO4, "ovl_dlo4", "disp", 19),
+ GATE_HWV_OVL1(CLK_OVL_DLO5, "ovl_dlo5", "disp", 20),
+ GATE_HWV_OVL1(CLK_OVL_DLO6, "ovl_dlo6", "disp", 21),
+ GATE_HWV_OVL1(CLK_OVL_DLO7, "ovl_dlo7", "disp", 22),
+ GATE_HWV_OVL1(CLK_OVL_DLO8, "ovl_dlo8", "disp", 23),
+ GATE_HWV_OVL1(CLK_OVL_DLO9, "ovl_dlo9", "disp", 24),
+ GATE_HWV_OVL1(CLK_OVL_DLO10, "ovl_dlo10", "disp", 25),
+ GATE_HWV_OVL1(CLK_OVL_DLO11, "ovl_dlo11", "disp", 26),
+ GATE_HWV_OVL1(CLK_OVL_DLO12, "ovl_dlo12", "disp", 27),
+ GATE_HWV_OVL1(CLK_OVLSYS_RELAY0, "ovlsys_relay0", "disp", 28),
+ GATE_HWV_OVL1(CLK_OVL_INLINEROT0, "ovl_inlinerot0", "disp", 29),
+ GATE_HWV_OVL1(CLK_OVL_SMI, "ovl_smi", "disp", 30),
+};
+
+static const struct mtk_clk_desc ovl_mcd = {
+ .clks = ovl_clks,
+ .num_clks = ARRAY_SIZE(ovl_clks),
+};
+
+static const struct platform_device_id clk_mt8196_ovl0_id_table[] = {
+ { .name = "clk-mt8196-ovl0", .driver_data = (kernel_ulong_t)&ovl_mcd },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, clk_mt8196_ovl0_id_table);
+
+static struct platform_driver clk_mt8196_ovl0_drv = {
+ .probe = mtk_clk_pdev_probe,
+ .remove = mtk_clk_pdev_remove,
+ .driver = {
+ .name = "clk-mt8196-ovl0",
+ },
+ .id_table = clk_mt8196_ovl0_id_table,
+};
+module_platform_driver(clk_mt8196_ovl0_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8196 ovl0 clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-ovl1.c b/drivers/clk/mediatek/clk-mt8196-ovl1.c
new file mode 100644
index 000000000000..c8843d0d3ede
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-ovl1.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs ovl10_cg_regs = {
+ .set_ofs = 0x104,
+ .clr_ofs = 0x108,
+ .sta_ofs = 0x100,
+};
+
+static const struct mtk_gate_regs ovl10_hwv_regs = {
+ .set_ofs = 0x0050,
+ .clr_ofs = 0x0054,
+ .sta_ofs = 0x2c28,
+};
+
+static const struct mtk_gate_regs ovl11_cg_regs = {
+ .set_ofs = 0x114,
+ .clr_ofs = 0x118,
+ .sta_ofs = 0x110,
+};
+
+static const struct mtk_gate_regs ovl11_hwv_regs = {
+ .set_ofs = 0x0058,
+ .clr_ofs = 0x005c,
+ .sta_ofs = 0x2c2c,
+};
+
+#define GATE_HWV_OVL10(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ovl10_cg_regs, \
+ .hwv_regs = &ovl10_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+#define GATE_HWV_OVL11(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ovl11_cg_regs, \
+ .hwv_regs = &ovl11_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+static const struct mtk_gate ovl1_clks[] = {
+ /* OVL10 */
+ GATE_HWV_OVL10(CLK_OVL1_OVLSYS_CONFIG, "ovl1_ovlsys_config", "disp", 0),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_FAKE_ENG0, "ovl1_ovl_fake_eng0", "disp", 1),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_FAKE_ENG1, "ovl1_ovl_fake_eng1", "disp", 2),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_MUTEX0, "ovl1_ovl_mutex0", "disp", 3),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_EXDMA0, "ovl1_ovl_exdma0", "disp", 4),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_EXDMA1, "ovl1_ovl_exdma1", "disp", 5),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_EXDMA2, "ovl1_ovl_exdma2", "disp", 6),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_EXDMA3, "ovl1_ovl_exdma3", "disp", 7),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_EXDMA4, "ovl1_ovl_exdma4", "disp", 8),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_EXDMA5, "ovl1_ovl_exdma5", "disp", 9),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_EXDMA6, "ovl1_ovl_exdma6", "disp", 10),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_EXDMA7, "ovl1_ovl_exdma7", "disp", 11),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_EXDMA8, "ovl1_ovl_exdma8", "disp", 12),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_EXDMA9, "ovl1_ovl_exdma9", "disp", 13),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_BLENDER0, "ovl1_ovl_blender0", "disp", 14),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_BLENDER1, "ovl1_ovl_blender1", "disp", 15),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_BLENDER2, "ovl1_ovl_blender2", "disp", 16),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_BLENDER3, "ovl1_ovl_blender3", "disp", 17),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_BLENDER4, "ovl1_ovl_blender4", "disp", 18),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_BLENDER5, "ovl1_ovl_blender5", "disp", 19),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_BLENDER6, "ovl1_ovl_blender6", "disp", 20),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_BLENDER7, "ovl1_ovl_blender7", "disp", 21),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_BLENDER8, "ovl1_ovl_blender8", "disp", 22),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_BLENDER9, "ovl1_ovl_blender9", "disp", 23),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_OUTPROC0, "ovl1_ovl_outproc0", "disp", 24),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_OUTPROC1, "ovl1_ovl_outproc1", "disp", 25),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_OUTPROC2, "ovl1_ovl_outproc2", "disp", 26),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_OUTPROC3, "ovl1_ovl_outproc3", "disp", 27),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_OUTPROC4, "ovl1_ovl_outproc4", "disp", 28),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_OUTPROC5, "ovl1_ovl_outproc5", "disp", 29),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_MDP_RSZ0, "ovl1_ovl_mdp_rsz0", "disp", 30),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_MDP_RSZ1, "ovl1_ovl_mdp_rsz1", "disp", 31),
+ /* OVL11 */
+ GATE_HWV_OVL11(CLK_OVL1_OVL_DISP_WDMA0, "ovl1_ovl_disp_wdma0", "disp", 0),
+ GATE_HWV_OVL11(CLK_OVL1_OVL_DISP_WDMA1, "ovl1_ovl_disp_wdma1", "disp", 1),
+ GATE_HWV_OVL11(CLK_OVL1_OVL_UFBC_WDMA0, "ovl1_ovl_ufbc_wdma0", "disp", 2),
+ GATE_HWV_OVL11(CLK_OVL1_OVL_MDP_RDMA0, "ovl1_ovl_mdp_rdma0", "disp", 3),
+ GATE_HWV_OVL11(CLK_OVL1_OVL_MDP_RDMA1, "ovl1_ovl_mdp_rdma1", "disp", 4),
+ GATE_HWV_OVL11(CLK_OVL1_OVL_BWM0, "ovl1_ovl_bwm0", "disp", 5),
+ GATE_HWV_OVL11(CLK_OVL1_DLI0, "ovl1_dli0", "disp", 6),
+ GATE_HWV_OVL11(CLK_OVL1_DLI1, "ovl1_dli1", "disp", 7),
+ GATE_HWV_OVL11(CLK_OVL1_DLI2, "ovl1_dli2", "disp", 8),
+ GATE_HWV_OVL11(CLK_OVL1_DLI3, "ovl1_dli3", "disp", 9),
+ GATE_HWV_OVL11(CLK_OVL1_DLI4, "ovl1_dli4", "disp", 10),
+ GATE_HWV_OVL11(CLK_OVL1_DLI5, "ovl1_dli5", "disp", 11),
+ GATE_HWV_OVL11(CLK_OVL1_DLI6, "ovl1_dli6", "disp", 12),
+ GATE_HWV_OVL11(CLK_OVL1_DLI7, "ovl1_dli7", "disp", 13),
+ GATE_HWV_OVL11(CLK_OVL1_DLI8, "ovl1_dli8", "disp", 14),
+ GATE_HWV_OVL11(CLK_OVL1_DLO0, "ovl1_dlo0", "disp", 15),
+ GATE_HWV_OVL11(CLK_OVL1_DLO1, "ovl1_dlo1", "disp", 16),
+ GATE_HWV_OVL11(CLK_OVL1_DLO2, "ovl1_dlo2", "disp", 17),
+ GATE_HWV_OVL11(CLK_OVL1_DLO3, "ovl1_dlo3", "disp", 18),
+ GATE_HWV_OVL11(CLK_OVL1_DLO4, "ovl1_dlo4", "disp", 19),
+ GATE_HWV_OVL11(CLK_OVL1_DLO5, "ovl1_dlo5", "disp", 20),
+ GATE_HWV_OVL11(CLK_OVL1_DLO6, "ovl1_dlo6", "disp", 21),
+ GATE_HWV_OVL11(CLK_OVL1_DLO7, "ovl1_dlo7", "disp", 22),
+ GATE_HWV_OVL11(CLK_OVL1_DLO8, "ovl1_dlo8", "disp", 23),
+ GATE_HWV_OVL11(CLK_OVL1_DLO9, "ovl1_dlo9", "disp", 24),
+ GATE_HWV_OVL11(CLK_OVL1_DLO10, "ovl1_dlo10", "disp", 25),
+ GATE_HWV_OVL11(CLK_OVL1_DLO11, "ovl1_dlo11", "disp", 26),
+ GATE_HWV_OVL11(CLK_OVL1_DLO12, "ovl1_dlo12", "disp", 27),
+ GATE_HWV_OVL11(CLK_OVL1_OVLSYS_RELAY0, "ovl1_ovlsys_relay0", "disp", 28),
+ GATE_HWV_OVL11(CLK_OVL1_OVL_INLINEROT0, "ovl1_ovl_inlinerot0", "disp", 29),
+ GATE_HWV_OVL11(CLK_OVL1_SMI, "ovl1_smi", "disp", 30),
+};
+
+static const struct mtk_clk_desc ovl1_mcd = {
+ .clks = ovl1_clks,
+ .num_clks = ARRAY_SIZE(ovl1_clks),
+};
+
+static const struct platform_device_id clk_mt8196_ovl1_id_table[] = {
+ { .name = "clk-mt8196-ovl1", .driver_data = (kernel_ulong_t)&ovl1_mcd },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, clk_mt8196_ovl1_id_table);
+
+static struct platform_driver clk_mt8196_ovl1_drv = {
+ .probe = mtk_clk_pdev_probe,
+ .remove = mtk_clk_pdev_remove,
+ .driver = {
+ .name = "clk-mt8196-ovl1",
+ },
+ .id_table = clk_mt8196_ovl1_id_table,
+};
+module_platform_driver(clk_mt8196_ovl1_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8196 ovl1 clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-peri_ao.c b/drivers/clk/mediatek/clk-mt8196-peri_ao.c
new file mode 100644
index 000000000000..f227a86c5d60
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-peri_ao.c
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs peri_ao0_cg_regs = {
+ .set_ofs = 0x24,
+ .clr_ofs = 0x28,
+ .sta_ofs = 0x10,
+};
+
+static const struct mtk_gate_regs peri_ao1_cg_regs = {
+ .set_ofs = 0x2c,
+ .clr_ofs = 0x30,
+ .sta_ofs = 0x14,
+};
+
+static const struct mtk_gate_regs peri_ao1_hwv_regs = {
+ .set_ofs = 0x0008,
+ .clr_ofs = 0x000c,
+ .sta_ofs = 0x2c04,
+};
+
+static const struct mtk_gate_regs peri_ao2_cg_regs = {
+ .set_ofs = 0x34,
+ .clr_ofs = 0x38,
+ .sta_ofs = 0x18,
+};
+
+#define GATE_PERI_AO0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &peri_ao0_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_PERI_AO1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &peri_ao1_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_HWV_PERI_AO1(_id, _name, _parent, _shift) {\
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &peri_ao1_cg_regs, \
+ .hwv_regs = &peri_ao1_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ }
+
+#define GATE_PERI_AO2(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &peri_ao2_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+static const struct mtk_gate peri_ao_clks[] = {
+ /* PERI_AO0 */
+ GATE_PERI_AO0(CLK_PERI_AO_UART0_BCLK, "peri_ao_uart0_bclk", "uart", 0),
+ GATE_PERI_AO0(CLK_PERI_AO_UART1_BCLK, "peri_ao_uart1_bclk", "uart", 1),
+ GATE_PERI_AO0(CLK_PERI_AO_UART2_BCLK, "peri_ao_uart2_bclk", "uart", 2),
+ GATE_PERI_AO0(CLK_PERI_AO_UART3_BCLK, "peri_ao_uart3_bclk", "uart", 3),
+ GATE_PERI_AO0(CLK_PERI_AO_UART4_BCLK, "peri_ao_uart4_bclk", "uart", 4),
+ GATE_PERI_AO0(CLK_PERI_AO_UART5_BCLK, "peri_ao_uart5_bclk", "uart", 5),
+ GATE_PERI_AO0(CLK_PERI_AO_PWM_X16W_HCLK, "peri_ao_pwm_x16w", "p_axi", 12),
+ GATE_PERI_AO0(CLK_PERI_AO_PWM_X16W_BCLK, "peri_ao_pwm_x16w_bclk", "pwm", 13),
+ GATE_PERI_AO0(CLK_PERI_AO_PWM_PWM_BCLK0, "peri_ao_pwm_pwm_bclk0", "pwm", 14),
+ GATE_PERI_AO0(CLK_PERI_AO_PWM_PWM_BCLK1, "peri_ao_pwm_pwm_bclk1", "pwm", 15),
+ GATE_PERI_AO0(CLK_PERI_AO_PWM_PWM_BCLK2, "peri_ao_pwm_pwm_bclk2", "pwm", 16),
+ GATE_PERI_AO0(CLK_PERI_AO_PWM_PWM_BCLK3, "peri_ao_pwm_pwm_bclk3", "pwm", 17),
+ /* PERI_AO1 */
+ GATE_HWV_PERI_AO1(CLK_PERI_AO_SPI0_BCLK, "peri_ao_spi0_bclk", "spi0_b", 0),
+ GATE_HWV_PERI_AO1(CLK_PERI_AO_SPI1_BCLK, "peri_ao_spi1_bclk", "spi1_b", 2),
+ GATE_HWV_PERI_AO1(CLK_PERI_AO_SPI2_BCLK, "peri_ao_spi2_bclk", "spi2_b", 3),
+ GATE_HWV_PERI_AO1(CLK_PERI_AO_SPI3_BCLK, "peri_ao_spi3_bclk", "spi3_b", 4),
+ GATE_HWV_PERI_AO1(CLK_PERI_AO_SPI4_BCLK, "peri_ao_spi4_bclk", "spi4_b", 5),
+ GATE_HWV_PERI_AO1(CLK_PERI_AO_SPI5_BCLK, "peri_ao_spi5_bclk", "spi5_b", 6),
+ GATE_HWV_PERI_AO1(CLK_PERI_AO_SPI6_BCLK, "peri_ao_spi6_bclk", "spi6_b", 7),
+ GATE_HWV_PERI_AO1(CLK_PERI_AO_SPI7_BCLK, "peri_ao_spi7_bclk", "spi7_b", 8),
+ GATE_PERI_AO1(CLK_PERI_AO_FLASHIF_FLASH, "peri_ao_flashif_flash", "peri_ao_flashif_27m",
+ 18),
+ GATE_PERI_AO1(CLK_PERI_AO_FLASHIF_27M, "peri_ao_flashif_27m", "sflash", 19),
+ GATE_PERI_AO1(CLK_PERI_AO_FLASHIF_DRAM, "peri_ao_flashif_dram", "p_axi", 20),
+ GATE_PERI_AO1(CLK_PERI_AO_FLASHIF_AXI, "peri_ao_flashif_axi", "peri_ao_flashif_dram", 21),
+ GATE_PERI_AO1(CLK_PERI_AO_FLASHIF_BCLK, "peri_ao_flashif_bclk", "p_axi", 22),
+ GATE_PERI_AO1(CLK_PERI_AO_AP_DMA_X32W_BCLK, "peri_ao_ap_dma_x32w_bclk", "p_axi", 26),
+ /* PERI_AO2 */
+ GATE_PERI_AO2(CLK_PERI_AO_MSDC1_MSDC_SRC, "peri_ao_msdc1_msdc_src", "msdc30_1", 1),
+ GATE_PERI_AO2(CLK_PERI_AO_MSDC1_HCLK, "peri_ao_msdc1", "peri_ao_msdc1_axi", 2),
+ GATE_PERI_AO2(CLK_PERI_AO_MSDC1_AXI, "peri_ao_msdc1_axi", "p_axi", 3),
+ GATE_PERI_AO2(CLK_PERI_AO_MSDC1_HCLK_WRAP, "peri_ao_msdc1_h_wrap", "peri_ao_msdc1", 4),
+ GATE_PERI_AO2(CLK_PERI_AO_MSDC2_MSDC_SRC, "peri_ao_msdc2_msdc_src", "msdc30_2", 10),
+ GATE_PERI_AO2(CLK_PERI_AO_MSDC2_HCLK, "peri_ao_msdc2", "peri_ao_msdc2_axi", 11),
+ GATE_PERI_AO2(CLK_PERI_AO_MSDC2_AXI, "peri_ao_msdc2_axi", "p_axi", 12),
+ GATE_PERI_AO2(CLK_PERI_AO_MSDC2_HCLK_WRAP, "peri_ao_msdc2_h_wrap", "peri_ao_msdc2", 13),
+};
+
+static const struct mtk_clk_desc peri_ao_mcd = {
+ .clks = peri_ao_clks,
+ .num_clks = ARRAY_SIZE(peri_ao_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8196_peri_ao[] = {
+ { .compatible = "mediatek,mt8196-pericfg-ao", .data = &peri_ao_mcd },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_peri_ao);
+
+static struct platform_driver clk_mt8196_peri_ao_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8196-peri-ao",
+ .of_match_table = of_match_clk_mt8196_peri_ao,
+ },
+};
+
+MODULE_DESCRIPTION("MediaTek MT8196 pericfg_ao clock controller driver");
+module_platform_driver(clk_mt8196_peri_ao_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-pextp.c b/drivers/clk/mediatek/clk-mt8196-pextp.c
new file mode 100644
index 000000000000..3e505ecc4b6e
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-pextp.c
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+#include <dt-bindings/reset/mediatek,mt8196-resets.h>
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+#include "reset.h"
+
+#define MT8196_PEXTP_RST0_SET_OFFSET 0x8
+
+static const struct mtk_gate_regs pext_cg_regs = {
+ .set_ofs = 0x18,
+ .clr_ofs = 0x1c,
+ .sta_ofs = 0x14,
+};
+
+#define GATE_PEXT(_id, _name, _parent, _shift) {\
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &pext_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr,\
+ }
+
+static const struct mtk_gate pext_clks[] = {
+ GATE_PEXT(CLK_PEXT_PEXTP_MAC_P0_TL, "pext_pm0_tl", "tl", 0),
+ GATE_PEXT(CLK_PEXT_PEXTP_MAC_P0_REF, "pext_pm0_ref", "clk26m", 1),
+ GATE_PEXT(CLK_PEXT_PEXTP_PHY_P0_MCU_BUS, "pext_pp0_mcu_bus", "clk26m", 6),
+ GATE_PEXT(CLK_PEXT_PEXTP_PHY_P0_PEXTP_REF, "pext_pp0_pextp_ref", "clk26m", 7),
+ GATE_PEXT(CLK_PEXT_PEXTP_MAC_P0_AXI_250, "pext_pm0_axi_250", "ufs_pexpt0_mem_sub", 12),
+ GATE_PEXT(CLK_PEXT_PEXTP_MAC_P0_AHB_APB, "pext_pm0_ahb_apb", "ufs_pextp0_axi", 13),
+ GATE_PEXT(CLK_PEXT_PEXTP_MAC_P0_PL_P, "pext_pm0_pl_p", "clk26m", 14),
+ GATE_PEXT(CLK_PEXT_PEXTP_VLP_AO_P0_LP, "pext_pextp_vlp_ao_p0_lp", "clk26m", 19),
+};
+
+static u16 pext_rst_ofs[] = { MT8196_PEXTP_RST0_SET_OFFSET };
+
+static u16 pext_rst_idx_map[] = {
+ [MT8196_PEXTP0_RST0_PCIE0_MAC] = 0,
+ [MT8196_PEXTP0_RST0_PCIE0_PHY] = 1,
+};
+
+static const struct mtk_clk_rst_desc pext_rst_desc = {
+ .version = MTK_RST_SET_CLR,
+ .rst_bank_ofs = pext_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(pext_rst_ofs),
+ .rst_idx_map = pext_rst_idx_map,
+ .rst_idx_map_nr = ARRAY_SIZE(pext_rst_idx_map),
+};
+
+static const struct mtk_clk_desc pext_mcd = {
+ .clks = pext_clks,
+ .num_clks = ARRAY_SIZE(pext_clks),
+ .rst_desc = &pext_rst_desc,
+};
+
+static const struct mtk_gate pext1_clks[] = {
+ GATE_PEXT(CLK_PEXT1_PEXTP_MAC_P1_TL, "pext1_pm1_tl", "tl_p1", 0),
+ GATE_PEXT(CLK_PEXT1_PEXTP_MAC_P1_REF, "pext1_pm1_ref", "clk26m", 1),
+ GATE_PEXT(CLK_PEXT1_PEXTP_MAC_P2_TL, "pext1_pm2_tl", "tl_p2", 2),
+ GATE_PEXT(CLK_PEXT1_PEXTP_MAC_P2_REF, "pext1_pm2_ref", "clk26m", 3),
+ GATE_PEXT(CLK_PEXT1_PEXTP_PHY_P1_MCU_BUS, "pext1_pp1_mcu_bus", "clk26m", 8),
+ GATE_PEXT(CLK_PEXT1_PEXTP_PHY_P1_PEXTP_REF, "pext1_pp1_pextp_ref", "clk26m", 9),
+ GATE_PEXT(CLK_PEXT1_PEXTP_PHY_P2_MCU_BUS, "pext1_pp2_mcu_bus", "clk26m", 10),
+ GATE_PEXT(CLK_PEXT1_PEXTP_PHY_P2_PEXTP_REF, "pext1_pp2_pextp_ref", "clk26m", 11),
+ GATE_PEXT(CLK_PEXT1_PEXTP_MAC_P1_AXI_250, "pext1_pm1_axi_250",
+ "pextp1_usb_axi", 16),
+ GATE_PEXT(CLK_PEXT1_PEXTP_MAC_P1_AHB_APB, "pext1_pm1_ahb_apb",
+ "pextp1_usb_mem_sub", 17),
+ GATE_PEXT(CLK_PEXT1_PEXTP_MAC_P1_PL_P, "pext1_pm1_pl_p", "clk26m", 18),
+ GATE_PEXT(CLK_PEXT1_PEXTP_MAC_P2_AXI_250, "pext1_pm2_axi_250",
+ "pextp1_usb_axi", 19),
+ GATE_PEXT(CLK_PEXT1_PEXTP_MAC_P2_AHB_APB, "pext1_pm2_ahb_apb",
+ "pextp1_usb_mem_sub", 20),
+ GATE_PEXT(CLK_PEXT1_PEXTP_MAC_P2_PL_P, "pext1_pm2_pl_p", "clk26m", 21),
+ GATE_PEXT(CLK_PEXT1_PEXTP_VLP_AO_P1_LP, "pext1_pextp_vlp_ao_p1_lp", "clk26m", 26),
+ GATE_PEXT(CLK_PEXT1_PEXTP_VLP_AO_P2_LP, "pext1_pextp_vlp_ao_p2_lp", "clk26m", 27),
+};
+
+static u16 pext1_rst_idx_map[] = {
+ [MT8196_PEXTP1_RST0_PCIE1_MAC] = 0,
+ [MT8196_PEXTP1_RST0_PCIE1_PHY] = 1,
+ [MT8196_PEXTP1_RST0_PCIE2_MAC] = 8,
+ [MT8196_PEXTP1_RST0_PCIE2_PHY] = 9,
+};
+
+static const struct mtk_clk_rst_desc pext1_rst_desc = {
+ .version = MTK_RST_SET_CLR,
+ .rst_bank_ofs = pext_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(pext_rst_ofs),
+ .rst_idx_map = pext1_rst_idx_map,
+ .rst_idx_map_nr = ARRAY_SIZE(pext1_rst_idx_map),
+};
+
+static const struct mtk_clk_desc pext1_mcd = {
+ .clks = pext1_clks,
+ .num_clks = ARRAY_SIZE(pext1_clks),
+ .rst_desc = &pext1_rst_desc,
+};
+
+static const struct of_device_id of_match_clk_mt8196_pextp[] = {
+ { .compatible = "mediatek,mt8196-pextp0cfg-ao", .data = &pext_mcd },
+ { .compatible = "mediatek,mt8196-pextp1cfg-ao", .data = &pext1_mcd },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_pextp);
+
+static struct platform_driver clk_mt8196_pextp_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8196-pextp",
+ .of_match_table = of_match_clk_mt8196_pextp,
+ },
+};
+
+module_platform_driver(clk_mt8196_pextp_drv);
+MODULE_DESCRIPTION("MediaTek MT8196 PCIe transmit phy clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-topckgen.c b/drivers/clk/mediatek/clk-mt8196-topckgen.c
new file mode 100644
index 000000000000..6ace11ef6b69
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-topckgen.c
@@ -0,0 +1,985 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-mux.h"
+
+/* MUX SEL REG */
+#define CLK_CFG_UPDATE 0x0004
+#define CLK_CFG_UPDATE1 0x0008
+#define CLK_CFG_UPDATE2 0x000c
+#define CLK_CFG_0 0x0010
+#define CLK_CFG_0_SET 0x0014
+#define CLK_CFG_0_CLR 0x0018
+#define CLK_CFG_1 0x0020
+#define CLK_CFG_1_SET 0x0024
+#define CLK_CFG_1_CLR 0x0028
+#define CLK_CFG_2 0x0030
+#define CLK_CFG_2_SET 0x0034
+#define CLK_CFG_2_CLR 0x0038
+#define CLK_CFG_3 0x0040
+#define CLK_CFG_3_SET 0x0044
+#define CLK_CFG_3_CLR 0x0048
+#define CLK_CFG_4 0x0050
+#define CLK_CFG_4_SET 0x0054
+#define CLK_CFG_4_CLR 0x0058
+#define CLK_CFG_5 0x0060
+#define CLK_CFG_5_SET 0x0064
+#define CLK_CFG_5_CLR 0x0068
+#define CLK_CFG_6 0x0070
+#define CLK_CFG_6_SET 0x0074
+#define CLK_CFG_6_CLR 0x0078
+#define CLK_CFG_7 0x0080
+#define CLK_CFG_7_SET 0x0084
+#define CLK_CFG_7_CLR 0x0088
+#define CLK_CFG_8 0x0090
+#define CLK_CFG_8_SET 0x0094
+#define CLK_CFG_8_CLR 0x0098
+#define CLK_CFG_9 0x00a0
+#define CLK_CFG_9_SET 0x00a4
+#define CLK_CFG_9_CLR 0x00a8
+#define CLK_CFG_10 0x00b0
+#define CLK_CFG_10_SET 0x00b4
+#define CLK_CFG_10_CLR 0x00b8
+#define CLK_CFG_11 0x00c0
+#define CLK_CFG_11_SET 0x00c4
+#define CLK_CFG_11_CLR 0x00c8
+#define CLK_CFG_12 0x00d0
+#define CLK_CFG_12_SET 0x00d4
+#define CLK_CFG_12_CLR 0x00d8
+#define CLK_CFG_13 0x00e0
+#define CLK_CFG_13_SET 0x00e4
+#define CLK_CFG_13_CLR 0x00e8
+#define CLK_CFG_14 0x00f0
+#define CLK_CFG_14_SET 0x00f4
+#define CLK_CFG_14_CLR 0x00f8
+#define CLK_CFG_15 0x0100
+#define CLK_CFG_15_SET 0x0104
+#define CLK_CFG_15_CLR 0x0108
+#define CLK_CFG_16 0x0110
+#define CLK_CFG_16_SET 0x0114
+#define CLK_CFG_16_CLR 0x0118
+#define CLK_CFG_17 0x0120
+#define CLK_CFG_17_SET 0x0124
+#define CLK_CFG_17_CLR 0x0128
+#define CLK_CFG_18 0x0130
+#define CLK_CFG_18_SET 0x0134
+#define CLK_CFG_18_CLR 0x0138
+#define CLK_CFG_19 0x0140
+#define CLK_CFG_19_SET 0x0144
+#define CLK_CFG_19_CLR 0x0148
+#define CLK_AUDDIV_0 0x020c
+#define CLK_FENC_STATUS_MON_0 0x0270
+#define CLK_FENC_STATUS_MON_1 0x0274
+#define CLK_FENC_STATUS_MON_2 0x0278
+
+/* MUX SHIFT */
+#define TOP_MUX_AXI_SHIFT 0
+#define TOP_MUX_MEM_SUB_SHIFT 1
+#define TOP_MUX_IO_NOC_SHIFT 2
+#define TOP_MUX_PERI_AXI_SHIFT 3
+#define TOP_MUX_UFS_PEXTP0_AXI_SHIFT 4
+#define TOP_MUX_PEXTP1_USB_AXI_SHIFT 5
+#define TOP_MUX_PERI_FMEM_SUB_SHIFT 6
+#define TOP_MUX_UFS_PEXPT0_MEM_SUB_SHIFT 7
+#define TOP_MUX_PEXTP1_USB_MEM_SUB_SHIFT 8
+#define TOP_MUX_PERI_NOC_SHIFT 9
+#define TOP_MUX_EMI_N_SHIFT 10
+#define TOP_MUX_EMI_S_SHIFT 11
+#define TOP_MUX_AP2CONN_HOST_SHIFT 14
+#define TOP_MUX_ATB_SHIFT 15
+#define TOP_MUX_CIRQ_SHIFT 16
+#define TOP_MUX_PBUS_156M_SHIFT 17
+#define TOP_MUX_EFUSE_SHIFT 20
+#define TOP_MUX_MCU_L3GIC_SHIFT 21
+#define TOP_MUX_MCU_INFRA_SHIFT 22
+#define TOP_MUX_DSP_SHIFT 23
+#define TOP_MUX_MFG_REF_SHIFT 24
+#define TOP_MUX_MFG_EB_SHIFT 26
+#define TOP_MUX_UART_SHIFT 27
+#define TOP_MUX_SPI0_BCLK_SHIFT 28
+#define TOP_MUX_SPI1_BCLK_SHIFT 29
+#define TOP_MUX_SPI2_BCLK_SHIFT 30
+#define TOP_MUX_SPI3_BCLK_SHIFT 0
+#define TOP_MUX_SPI4_BCLK_SHIFT 1
+#define TOP_MUX_SPI5_BCLK_SHIFT 2
+#define TOP_MUX_SPI6_BCLK_SHIFT 3
+#define TOP_MUX_SPI7_BCLK_SHIFT 4
+#define TOP_MUX_MSDC30_1_SHIFT 7
+#define TOP_MUX_MSDC30_2_SHIFT 8
+#define TOP_MUX_DISP_PWM_SHIFT 9
+#define TOP_MUX_USB_TOP_1P_SHIFT 10
+#define TOP_MUX_SSUSB_XHCI_1P_SHIFT 11
+#define TOP_MUX_SSUSB_FMCNT_P1_SHIFT 12
+#define TOP_MUX_I2C_PERI_SHIFT 13
+#define TOP_MUX_I2C_EAST_SHIFT 14
+#define TOP_MUX_I2C_WEST_SHIFT 15
+#define TOP_MUX_I2C_NORTH_SHIFT 16
+#define TOP_MUX_AES_UFSFDE_SHIFT 17
+#define TOP_MUX_UFS_SHIFT 18
+#define TOP_MUX_AUD_1_SHIFT 21
+#define TOP_MUX_AUD_2_SHIFT 22
+#define TOP_MUX_ADSP_SHIFT 23
+#define TOP_MUX_ADSP_UARTHUB_B_SHIFT 24
+#define TOP_MUX_DPMAIF_MAIN_SHIFT 25
+#define TOP_MUX_PWM_SHIFT 26
+#define TOP_MUX_MCUPM_SHIFT 27
+#define TOP_MUX_SFLASH_SHIFT 28
+#define TOP_MUX_IPSEAST_SHIFT 29
+#define TOP_MUX_TL_SHIFT 0
+#define TOP_MUX_TL_P1_SHIFT 1
+#define TOP_MUX_TL_P2_SHIFT 2
+#define TOP_MUX_EMI_INTERFACE_546_SHIFT 3
+#define TOP_MUX_SDF_SHIFT 4
+#define TOP_MUX_UARTHUB_BCLK_SHIFT 5
+#define TOP_MUX_DPSW_CMP_26M_SHIFT 6
+#define TOP_MUX_SMAPCK_SHIFT 7
+#define TOP_MUX_SSR_PKA_SHIFT 8
+#define TOP_MUX_SSR_DMA_SHIFT 9
+#define TOP_MUX_SSR_KDF_SHIFT 10
+#define TOP_MUX_SSR_RNG_SHIFT 11
+#define TOP_MUX_SPU0_SHIFT 12
+#define TOP_MUX_SPU1_SHIFT 13
+#define TOP_MUX_DXCC_SHIFT 14
+
+/* CKSTA REG */
+#define CKSTA_REG 0x01c8
+#define CKSTA_REG1 0x01cc
+#define CKSTA_REG2 0x01d0
+
+/* DIVIDER REG */
+#define CLK_AUDDIV_2 0x0214
+#define CLK_AUDDIV_3 0x0220
+#define CLK_AUDDIV_4 0x0224
+#define CLK_AUDDIV_5 0x0228
+
+/* HW Voter REG */
+#define HWV_CG_0_SET 0x0000
+#define HWV_CG_0_CLR 0x0004
+#define HWV_CG_0_DONE 0x2c00
+#define HWV_CG_1_SET 0x0008
+#define HWV_CG_1_CLR 0x000c
+#define HWV_CG_1_DONE 0x2c04
+#define HWV_CG_2_SET 0x0010
+#define HWV_CG_2_CLR 0x0014
+#define HWV_CG_2_DONE 0x2c08
+#define HWV_CG_3_SET 0x0018
+#define HWV_CG_3_CLR 0x001c
+#define HWV_CG_3_DONE 0x2c0c
+#define HWV_CG_4_SET 0x0020
+#define HWV_CG_4_CLR 0x0024
+#define HWV_CG_4_DONE 0x2c10
+#define HWV_CG_5_SET 0x0028
+#define HWV_CG_5_CLR 0x002c
+#define HWV_CG_5_DONE 0x2c14
+#define HWV_CG_6_SET 0x0030
+#define HWV_CG_6_CLR 0x0034
+#define HWV_CG_6_DONE 0x2c18
+#define HWV_CG_7_SET 0x0038
+#define HWV_CG_7_CLR 0x003c
+#define HWV_CG_7_DONE 0x2c1c
+#define HWV_CG_8_SET 0x0040
+#define HWV_CG_8_CLR 0x0044
+#define HWV_CG_8_DONE 0x2c20
+
+static const struct mtk_fixed_factor top_divs[] = {
+ FACTOR(CLK_TOP_MAINPLL_D3, "mainpll_d3", "mainpll", 1, 3),
+ FACTOR(CLK_TOP_MAINPLL_D4, "mainpll_d4", "mainpll", 1, 4),
+ FACTOR(CLK_TOP_MAINPLL_D4_D2, "mainpll_d4_d2", "mainpll", 1, 8),
+ FACTOR(CLK_TOP_MAINPLL_D4_D4, "mainpll_d4_d4", "mainpll", 1, 16),
+ FACTOR(CLK_TOP_MAINPLL_D4_D8, "mainpll_d4_d8", "mainpll", 1, 32),
+ FACTOR(CLK_TOP_MAINPLL_D5, "mainpll_d5", "mainpll", 1, 5),
+ FACTOR(CLK_TOP_MAINPLL_D5_D2, "mainpll_d5_d2", "mainpll", 1, 10),
+ FACTOR(CLK_TOP_MAINPLL_D5_D4, "mainpll_d5_d4", "mainpll", 1, 20),
+ FACTOR(CLK_TOP_MAINPLL_D5_D8, "mainpll_d5_d8", "mainpll", 1, 40),
+ FACTOR(CLK_TOP_MAINPLL_D6, "mainpll_d6", "mainpll", 1, 6),
+ FACTOR(CLK_TOP_MAINPLL_D6_D2, "mainpll_d6_d2", "mainpll", 1, 12),
+ FACTOR(CLK_TOP_MAINPLL_D7, "mainpll_d7", "mainpll", 1, 7),
+ FACTOR(CLK_TOP_MAINPLL_D7_D2, "mainpll_d7_d2", "mainpll", 1, 14),
+ FACTOR(CLK_TOP_MAINPLL_D7_D4, "mainpll_d7_d4", "mainpll", 1, 28),
+ FACTOR(CLK_TOP_MAINPLL_D7_D8, "mainpll_d7_d8", "mainpll", 1, 56),
+ FACTOR(CLK_TOP_MAINPLL_D9, "mainpll_d9", "mainpll", 1, 9),
+ FACTOR(CLK_TOP_UNIVPLL_D4, "univpll_d4", "univpll", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL_D4_D2, "univpll_d4_d2", "univpll", 1, 8),
+ FACTOR(CLK_TOP_UNIVPLL_D4_D4, "univpll_d4_d4", "univpll", 1, 16),
+ FACTOR(CLK_TOP_UNIVPLL_D4_D8, "univpll_d4_d8", "univpll", 1, 32),
+ FACTOR(CLK_TOP_UNIVPLL_D5, "univpll_d5", "univpll", 1, 5),
+ FACTOR(CLK_TOP_UNIVPLL_D5_D2, "univpll_d5_d2", "univpll", 1, 10),
+ FACTOR(CLK_TOP_UNIVPLL_D5_D4, "univpll_d5_d4", "univpll", 1, 20),
+ FACTOR(CLK_TOP_UNIVPLL_D6, "univpll_d6", "univpll", 1, 6),
+ FACTOR(CLK_TOP_UNIVPLL_D6_D2, "univpll_d6_d2", "univpll", 1, 12),
+ FACTOR(CLK_TOP_UNIVPLL_D6_D4, "univpll_d6_d4", "univpll", 1, 24),
+ FACTOR(CLK_TOP_UNIVPLL_D6_D8, "univpll_d6_d8", "univpll", 1, 48),
+ FACTOR(CLK_TOP_UNIVPLL_D6_D16, "univpll_d6_d16", "univpll", 1, 96),
+ FACTOR(CLK_TOP_UNIVPLL_192M, "univpll_192m", "univpll", 1, 13),
+ FACTOR(CLK_TOP_UNIVPLL_192M_D4, "univpll_192m_d4", "univpll", 1, 52),
+ FACTOR(CLK_TOP_UNIVPLL_192M_D8, "univpll_192m_d8", "univpll", 1, 104),
+ FACTOR(CLK_TOP_UNIVPLL_192M_D16, "univpll_192m_d16", "univpll", 1, 208),
+ FACTOR(CLK_TOP_UNIVPLL_192M_D32, "univpll_192m_d32", "univpll", 1, 416),
+ FACTOR(CLK_TOP_UNIVPLL_192M_D10, "univpll_192m_d10", "univpll", 1, 130),
+ FACTOR(CLK_TOP_TVDPLL1_D2, "tvdpll1_d2", "tvdpll1", 1, 2),
+ FACTOR(CLK_TOP_MSDCPLL_D2, "msdcpll_d2", "msdcpll", 1, 2),
+ FACTOR(CLK_TOP_OSC_D2, "osc_d2", "ulposc", 1, 2),
+ FACTOR(CLK_TOP_OSC_D3, "osc_d3", "ulposc", 1, 3),
+ FACTOR(CLK_TOP_OSC_D4, "osc_d4", "ulposc", 1, 4),
+ FACTOR(CLK_TOP_OSC_D5, "osc_d5", "ulposc", 1, 5),
+ FACTOR(CLK_TOP_OSC_D7, "osc_d7", "ulposc", 1, 7),
+ FACTOR(CLK_TOP_OSC_D8, "osc_d8", "ulposc", 1, 8),
+ FACTOR(CLK_TOP_OSC_D10, "osc_d10", "ulposc", 1, 10),
+ FACTOR(CLK_TOP_OSC_D14, "osc_d14", "ulposc", 1, 14),
+ FACTOR(CLK_TOP_OSC_D20, "osc_d20", "ulposc", 1, 20),
+ FACTOR(CLK_TOP_OSC_D32, "osc_d32", "ulposc", 1, 32),
+ FACTOR(CLK_TOP_OSC_D40, "osc_d40", "ulposc", 1, 40),
+};
+
+static const char * const axi_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "osc_d8",
+ "osc_d4",
+ "mainpll_d4_d4",
+ "mainpll_d7_d2"
+};
+
+static const char * const mem_sub_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "osc_d4",
+ "univpll_d4_d4",
+ "osc_d3",
+ "mainpll_d5_d2",
+ "mainpll_d4_d2",
+ "mainpll_d6",
+ "mainpll_d5",
+ "univpll_d5",
+ "mainpll_d4",
+ "mainpll_d3"
+};
+
+static const char * const io_noc_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "osc_d8",
+ "osc_d4",
+ "mainpll_d6_d2",
+ "mainpll_d9"
+};
+
+static const char * const shared_axi_parents[] = {
+ "clk26m",
+ "mainpll_d7_d8",
+ "mainpll_d5_d8",
+ "osc_d8",
+ "mainpll_d7_d4",
+ "mainpll_d5_d4",
+ "mainpll_d4_d4",
+ "mainpll_d7_d2"
+};
+
+static const char * const shared_sub_parents[] = {
+ "clk26m",
+ "mainpll_d5_d8",
+ "mainpll_d5_d4",
+ "osc_d4",
+ "univpll_d4_d4",
+ "mainpll_d5_d2",
+ "mainpll_d4_d2",
+ "mainpll_d6",
+ "mainpll_d5",
+ "univpll_d5",
+ "mainpll_d4"
+};
+
+static const char * const p_noc_parents[] = {
+ "clk26m",
+ "mainpll_d5_d8",
+ "mainpll_d5_d4",
+ "osc_d4",
+ "univpll_d4_d4",
+ "mainpll_d5_d2",
+ "mainpll_d4_d2",
+ "mainpll_d6",
+ "mainpll_d5",
+ "univpll_d5",
+ "mainpll_d4",
+ "mainpll_d3"
+};
+
+static const char * const emi_parents[] = {
+ "clk26m",
+ "osc_d4",
+ "mainpll_d5_d8",
+ "mainpll_d5_d4",
+ "mainpll_d4_d4",
+ "emipll1_ck"
+};
+
+static const char * const ap2conn_host_parents[] = {
+ "clk26m",
+ "mainpll_d7_d4"
+};
+
+static const char * const atb_parents[] = {
+ "clk26m",
+ "mainpll_d5_d2",
+ "mainpll_d4_d2",
+ "mainpll_d6"
+};
+
+static const char * const cirq_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "mainpll_d7_d4"
+};
+
+static const char * const pbus_156m_parents[] = {
+ "clk26m",
+ "mainpll_d7_d2",
+ "osc_d2",
+ "mainpll_d7"
+};
+
+static const char * const efuse_parents[] = {
+ "clk26m",
+ "osc_d20"
+};
+
+static const char * const mcu_l3gic_parents[] = {
+ "clk26m",
+ "osc_d8",
+ "mainpll_d4_d4",
+ "mainpll_d7_d2"
+};
+
+static const char * const mcu_infra_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "mainpll_d7_d2",
+ "mainpll_d5_d2",
+ "mainpll_d4_d2",
+ "mainpll_d9",
+ "mainpll_d6"
+};
+
+static const char * const dsp_parents[] = {
+ "clk26m",
+ "osc_d5",
+ "osc_d4",
+ "osc_d3",
+ "univpll_d6_d2",
+ "osc_d2",
+ "univpll_d5",
+ "osc"
+};
+
+static const char * const mfg_ref_parents[] = {
+ "clk26m",
+ "mainpll_d7_d2"
+};
+
+static const char * const mfg_eb_parents[] = {
+ "clk26m",
+ "mainpll_d7_d2",
+ "mainpll_d6_d2",
+ "mainpll_d5_d2"
+};
+
+static const char * const uart_parents[] = {
+ "clk26m",
+ "univpll_d6_d8",
+ "univpll_d6_d4",
+ "univpll_d6_d2"
+};
+
+static const char * const spi_b_parents[] = {
+ "clk26m",
+ "univpll_d6_d4",
+ "univpll_d5_d4",
+ "mainpll_d4_d4",
+ "univpll_d4_d4",
+ "mainpll_d6_d2",
+ "univpll_192m",
+ "univpll_d6_d2"
+};
+
+static const char * const msdc30_parents[] = {
+ "clk26m",
+ "univpll_d6_d4",
+ "mainpll_d6_d2",
+ "univpll_d6_d2",
+ "msdcpll_d2"
+};
+
+static const char * const disp_pwm_parents[] = {
+ "clk26m",
+ "osc_d32",
+ "osc_d8",
+ "univpll_d6_d4",
+ "univpll_d5_d4",
+ "osc_d4",
+ "mainpll_d4_d4"
+};
+
+static const char * const usb_1p_parents[] = {
+ "clk26m",
+ "univpll_d5_d4"
+};
+
+static const char * const usb_fmcnt_p1_parents[] = {
+ "clk26m",
+ "univpll_192m_d4"
+};
+
+static const char * const i2c_parents[] = {
+ "clk26m",
+ "mainpll_d4_d8",
+ "univpll_d5_d4",
+ "mainpll_d4_d4",
+ "univpll_d5_d2"
+};
+
+static const char * const aes_ufsfde_parents[] = {
+ "clk26m",
+ "mainpll_d4_d4",
+ "univpll_d6_d2",
+ "mainpll_d4_d2",
+ "univpll_d6",
+ "mainpll_d4"
+};
+
+static const char * const ufs_parents[] = {
+ "clk26m",
+ "mainpll_d4_d4",
+ "univpll_d6_d2",
+ "mainpll_d4_d2",
+ "univpll_d6",
+ "mainpll_d5",
+ "univpll_d5"
+};
+
+static const char * const aud_1_parents[] = {
+ "clk26m",
+ "vlp_apll1"
+};
+
+static const char * const aud_2_parents[] = {
+ "clk26m",
+ "vlp_apll2"
+};
+
+static const char * const adsp_parents[] = {
+ "clk26m",
+ "adsppll"
+};
+
+static const char * const adsp_uarthub_b_parents[] = {
+ "clk26m",
+ "univpll_d6_d4",
+ "univpll_d6_d2"
+};
+
+static const char * const dpmaif_main_parents[] = {
+ "clk26m",
+ "univpll_d4_d4",
+ "univpll_d5_d2",
+ "mainpll_d4_d2",
+ "univpll_d4_d2",
+ "mainpll_d6",
+ "univpll_d6",
+ "mainpll_d5",
+ "univpll_d5"
+};
+
+static const char * const pwm_parents[] = {
+ "clk26m",
+ "mainpll_d7_d4",
+ "univpll_d4_d8"
+};
+
+static const char * const mcupm_parents[] = {
+ "clk26m",
+ "mainpll_d7_d2",
+ "mainpll_d6_d2",
+ "univpll_d6_d2",
+ "mainpll_d5_d2"
+};
+
+static const char * const ipseast_parents[] = {
+ "clk26m",
+ "mainpll_d6",
+ "mainpll_d5",
+ "mainpll_d4",
+ "mainpll_d3"
+};
+
+static const char * const tl_parents[] = {
+ "clk26m",
+ "mainpll_d7_d4",
+ "mainpll_d4_d4",
+ "mainpll_d5_d2"
+};
+
+static const char * const md_emi_parents[] = {
+ "clk26m",
+ "mainpll_d4"
+};
+
+static const char * const sdf_parents[] = {
+ "clk26m",
+ "mainpll_d5_d2",
+ "mainpll_d4_d2",
+ "mainpll_d6",
+ "mainpll_d4",
+ "univpll_d4"
+};
+
+static const char * const uarthub_b_parents[] = {
+ "clk26m",
+ "univpll_d6_d4",
+ "univpll_d6_d2"
+};
+
+static const char * const dpsw_cmp_26m_parents[] = {
+ "clk26m",
+ "osc_d20"
+};
+
+static const char * const smapparents[] = {
+ "clk26m",
+ "mainpll_d4_d8"
+};
+
+static const char * const ssr_parents[] = {
+ "clk26m",
+ "mainpll_d4_d4",
+ "mainpll_d4_d2",
+ "mainpll_d7",
+ "mainpll_d6",
+ "mainpll_d5"
+};
+
+static const char * const ssr_kdf_parents[] = {
+ "clk26m",
+ "mainpll_d4_d4",
+ "mainpll_d4_d2",
+ "mainpll_d7"
+};
+
+static const char * const ssr_rng_parents[] = {
+ "clk26m",
+ "mainpll_d4_d4",
+ "mainpll_d5_d2",
+ "mainpll_d4_d2"
+};
+
+static const char * const spu_parents[] = {
+ "clk26m",
+ "mainpll_d4_d4",
+ "mainpll_d4_d2",
+ "mainpll_d7",
+ "mainpll_d6",
+ "mainpll_d5"
+};
+
+static const char * const dxcc_parents[] = {
+ "clk26m",
+ "mainpll_d4_d8",
+ "mainpll_d4_d4",
+ "mainpll_d4_d2"
+};
+
+static const char * const apll_m_parents[] = {
+ "aud_1",
+ "aud_2"
+};
+
+static const char * const sflash_parents[] = {
+ "clk26m",
+ "mainpll_d7_d8",
+ "univpll_d6_d8"
+};
+
+static const struct mtk_mux top_muxes[] = {
+ /* CLK_CFG_0 */
+ MUX_CLR_SET_UPD(CLK_TOP_AXI, "axi",
+ axi_parents, CLK_CFG_0, CLK_CFG_0_SET,
+ CLK_CFG_0_CLR, 0, 3,
+ CLK_CFG_UPDATE, TOP_MUX_AXI_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_MEM_SUB, "mem_sub",
+ mem_sub_parents, CLK_CFG_0, CLK_CFG_0_SET,
+ CLK_CFG_0_CLR, 8, 4,
+ CLK_CFG_UPDATE, TOP_MUX_MEM_SUB_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_IO_NOC, "io_noc",
+ io_noc_parents, CLK_CFG_0, CLK_CFG_0_SET,
+ CLK_CFG_0_CLR, 16, 3,
+ CLK_CFG_UPDATE, TOP_MUX_IO_NOC_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_P_AXI, "p_axi",
+ shared_axi_parents, CLK_CFG_0, CLK_CFG_0_SET,
+ CLK_CFG_0_CLR, 24, 3,
+ CLK_CFG_UPDATE, TOP_MUX_PERI_AXI_SHIFT),
+ /* CLK_CFG_1 */
+ MUX_CLR_SET_UPD(CLK_TOP_UFS_PEXTP0_AXI, "ufs_pextp0_axi",
+ shared_axi_parents, CLK_CFG_1, CLK_CFG_1_SET,
+ CLK_CFG_1_CLR, 0, 3,
+ CLK_CFG_UPDATE, TOP_MUX_UFS_PEXTP0_AXI_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_PEXTP1_USB_AXI, "pextp1_usb_axi",
+ shared_axi_parents, CLK_CFG_1, CLK_CFG_1_SET,
+ CLK_CFG_1_CLR, 8, 3,
+ CLK_CFG_UPDATE, TOP_MUX_PEXTP1_USB_AXI_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_P_FMEM_SUB, "p_fmem_sub",
+ shared_sub_parents, CLK_CFG_1, CLK_CFG_1_SET,
+ CLK_CFG_1_CLR, 16, 4,
+ CLK_CFG_UPDATE, TOP_MUX_PERI_FMEM_SUB_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_PEXPT0_MEM_SUB, "ufs_pexpt0_mem_sub",
+ shared_sub_parents, CLK_CFG_1, CLK_CFG_1_SET,
+ CLK_CFG_1_CLR, 24, 4,
+ CLK_CFG_UPDATE, TOP_MUX_UFS_PEXPT0_MEM_SUB_SHIFT),
+ /* CLK_CFG_2 */
+ MUX_CLR_SET_UPD(CLK_TOP_PEXTP1_USB_MEM_SUB, "pextp1_usb_mem_sub",
+ shared_sub_parents, CLK_CFG_2, CLK_CFG_2_SET,
+ CLK_CFG_2_CLR, 0, 4,
+ CLK_CFG_UPDATE, TOP_MUX_PEXTP1_USB_MEM_SUB_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_P_NOC, "p_noc",
+ p_noc_parents, CLK_CFG_2, CLK_CFG_2_SET,
+ CLK_CFG_2_CLR, 8, 4,
+ CLK_CFG_UPDATE, TOP_MUX_PERI_NOC_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_EMI_N, "emi_n",
+ emi_parents, CLK_CFG_2, CLK_CFG_2_SET,
+ CLK_CFG_2_CLR, 16, 3,
+ CLK_CFG_UPDATE, TOP_MUX_EMI_N_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_EMI_S, "emi_s",
+ emi_parents, CLK_CFG_2, CLK_CFG_2_SET,
+ CLK_CFG_2_CLR, 24, 3,
+ CLK_CFG_UPDATE, TOP_MUX_EMI_S_SHIFT),
+ /* CLK_CFG_3 */
+ MUX_CLR_SET_UPD(CLK_TOP_AP2CONN_HOST, "ap2conn_host",
+ ap2conn_host_parents, CLK_CFG_3, CLK_CFG_3_SET,
+ CLK_CFG_3_CLR, 16, 1,
+ CLK_CFG_UPDATE, TOP_MUX_AP2CONN_HOST_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_ATB, "atb",
+ atb_parents, CLK_CFG_3, CLK_CFG_3_SET,
+ CLK_CFG_3_CLR, 24, 2,
+ CLK_CFG_UPDATE, TOP_MUX_ATB_SHIFT),
+ /* CLK_CFG_4 */
+ MUX_CLR_SET_UPD(CLK_TOP_CIRQ, "cirq",
+ cirq_parents, CLK_CFG_4, CLK_CFG_4_SET,
+ CLK_CFG_4_CLR, 0, 2,
+ CLK_CFG_UPDATE, TOP_MUX_CIRQ_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_PBUS_156M, "pbus_156m",
+ pbus_156m_parents, CLK_CFG_4, CLK_CFG_4_SET,
+ CLK_CFG_4_CLR, 8, 2,
+ CLK_CFG_UPDATE, TOP_MUX_PBUS_156M_SHIFT),
+ /* CLK_CFG_5 */
+ MUX_CLR_SET_UPD(CLK_TOP_EFUSE, "efuse",
+ efuse_parents, CLK_CFG_5, CLK_CFG_5_SET,
+ CLK_CFG_5_CLR, 0, 1,
+ CLK_CFG_UPDATE, TOP_MUX_EFUSE_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_MCL3GIC, "mcu_l3gic",
+ mcu_l3gic_parents, CLK_CFG_5, CLK_CFG_5_SET,
+ CLK_CFG_5_CLR, 8, 2,
+ CLK_CFG_UPDATE, TOP_MUX_MCU_L3GIC_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_MCINFRA, "mcu_infra",
+ mcu_infra_parents, CLK_CFG_5, CLK_CFG_5_SET,
+ CLK_CFG_5_CLR, 16, 3,
+ CLK_CFG_UPDATE, TOP_MUX_MCU_INFRA_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_DSP, "dsp",
+ dsp_parents, CLK_CFG_5, CLK_CFG_5_SET,
+ CLK_CFG_5_CLR, 24, 3,
+ CLK_CFG_UPDATE, TOP_MUX_DSP_SHIFT),
+ /* CLK_CFG_6 */
+ MUX_GATE_FENC_CLR_SET_UPD_FLAGS(CLK_TOP_MFG_REF, "mfg_ref", mfg_ref_parents,
+ NULL, ARRAY_SIZE(mfg_ref_parents),
+ CLK_CFG_6, CLK_CFG_6_SET, CLK_CFG_6_CLR,
+ 0, 1, 7, CLK_CFG_UPDATE, TOP_MUX_MFG_REF_SHIFT,
+ CLK_FENC_STATUS_MON_0, 7, CLK_IGNORE_UNUSED),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MFG_EB, "mfg_eb",
+ mfg_eb_parents, CLK_CFG_6, CLK_CFG_6_SET,
+ CLK_CFG_6_CLR, 16, 2,
+ 23, CLK_CFG_UPDATE, TOP_MUX_MFG_EB_SHIFT),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP_UART, "uart", uart_parents,
+ CLK_CFG_6, CLK_CFG_6_SET, CLK_CFG_6_CLR,
+ HWV_CG_3_DONE, HWV_CG_3_SET, HWV_CG_3_CLR,
+ 24, 2, 31, CLK_CFG_UPDATE, TOP_MUX_UART_SHIFT,
+ CLK_FENC_STATUS_MON_0, 4),
+ /* CLK_CFG_7 */
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP_SPI0_BCLK, "spi0_b", spi_b_parents,
+ CLK_CFG_7, CLK_CFG_7_SET, CLK_CFG_7_CLR,
+ HWV_CG_4_DONE, HWV_CG_4_SET, HWV_CG_4_CLR,
+ 0, 3, 7, CLK_CFG_UPDATE, TOP_MUX_SPI0_BCLK_SHIFT,
+ CLK_FENC_STATUS_MON_0, 3),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP_SPI1_BCLK, "spi1_b", spi_b_parents,
+ CLK_CFG_7, CLK_CFG_7_SET, CLK_CFG_7_CLR,
+ HWV_CG_4_DONE, HWV_CG_4_SET, HWV_CG_4_CLR,
+ 8, 3, 15, CLK_CFG_UPDATE, TOP_MUX_SPI1_BCLK_SHIFT,
+ CLK_FENC_STATUS_MON_0, 2),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP_SPI2_BCLK, "spi2_b", spi_b_parents,
+ CLK_CFG_7, CLK_CFG_7_SET, CLK_CFG_7_CLR,
+ HWV_CG_4_DONE, HWV_CG_4_SET, HWV_CG_4_CLR,
+ 16, 3, 23, CLK_CFG_UPDATE, TOP_MUX_SPI2_BCLK_SHIFT,
+ CLK_FENC_STATUS_MON_0, 1),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP_SPI3_BCLK, "spi3_b", spi_b_parents,
+ CLK_CFG_7, CLK_CFG_7_SET, CLK_CFG_7_CLR,
+ HWV_CG_4_DONE, HWV_CG_4_SET, HWV_CG_4_CLR,
+ 24, 3, 31, CLK_CFG_UPDATE1, TOP_MUX_SPI3_BCLK_SHIFT,
+ CLK_FENC_STATUS_MON_0, 0),
+ /* CLK_CFG_8 */
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP_SPI4_BCLK, "spi4_b", spi_b_parents,
+ CLK_CFG_8, CLK_CFG_8_SET, CLK_CFG_8_CLR,
+ HWV_CG_5_DONE, HWV_CG_5_SET, HWV_CG_5_CLR,
+ 0, 3, 7, CLK_CFG_UPDATE1, TOP_MUX_SPI4_BCLK_SHIFT,
+ CLK_FENC_STATUS_MON_1, 31),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP_SPI5_BCLK, "spi5_b", spi_b_parents,
+ CLK_CFG_8, CLK_CFG_8_SET, CLK_CFG_8_CLR,
+ HWV_CG_5_DONE, HWV_CG_5_SET, HWV_CG_5_CLR,
+ 8, 3, 15, CLK_CFG_UPDATE1, TOP_MUX_SPI5_BCLK_SHIFT,
+ CLK_FENC_STATUS_MON_1, 30),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP_SPI6_BCLK, "spi6_b", spi_b_parents,
+ CLK_CFG_8, CLK_CFG_8_SET, CLK_CFG_8_CLR,
+ HWV_CG_5_DONE, HWV_CG_5_SET, HWV_CG_5_CLR,
+ 16, 3, 23, CLK_CFG_UPDATE1, TOP_MUX_SPI6_BCLK_SHIFT,
+ CLK_FENC_STATUS_MON_1, 29),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP_SPI7_BCLK, "spi7_b", spi_b_parents,
+ CLK_CFG_8, CLK_CFG_8_SET, CLK_CFG_8_CLR,
+ HWV_CG_5_DONE, HWV_CG_5_SET, HWV_CG_5_CLR,
+ 24, 3, 31, CLK_CFG_UPDATE1, TOP_MUX_SPI7_BCLK_SHIFT,
+ CLK_FENC_STATUS_MON_1, 28),
+ /* CLK_CFG_9 */
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_MSDC30_1, "msdc30_1", msdc30_parents,
+ CLK_CFG_9, CLK_CFG_9_SET, CLK_CFG_9_CLR,
+ 16, 3, 23, CLK_CFG_UPDATE1, TOP_MUX_MSDC30_1_SHIFT,
+ CLK_FENC_STATUS_MON_1, 25),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_MSDC30_2, "msdc30_2", msdc30_parents,
+ CLK_CFG_9, CLK_CFG_9_SET, CLK_CFG_9_CLR,
+ 24, 3, 31, CLK_CFG_UPDATE1, TOP_MUX_MSDC30_2_SHIFT,
+ CLK_FENC_STATUS_MON_1, 24),
+ /* CLK_CFG_10 */
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_DISP_PWM, "disp_pwm", disp_pwm_parents,
+ CLK_CFG_10, CLK_CFG_10_SET, CLK_CFG_10_CLR,
+ 0, 3, 7, CLK_CFG_UPDATE1, TOP_MUX_DISP_PWM_SHIFT,
+ CLK_FENC_STATUS_MON_1, 23),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_USB_TOP_1P, "usb_1p", usb_1p_parents,
+ CLK_CFG_10, CLK_CFG_10_SET, CLK_CFG_10_CLR,
+ 8, 1, 15, CLK_CFG_UPDATE1, TOP_MUX_USB_TOP_1P_SHIFT,
+ CLK_FENC_STATUS_MON_1, 22),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_USB_XHCI_1P, "usb_xhci_1p", usb_1p_parents,
+ CLK_CFG_10, CLK_CFG_10_SET, CLK_CFG_10_CLR,
+ 16, 1, 23, CLK_CFG_UPDATE1, TOP_MUX_SSUSB_XHCI_1P_SHIFT,
+ CLK_FENC_STATUS_MON_1, 21),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_USB_FMCNT_P1, "usb_fmcnt_p1", usb_fmcnt_p1_parents,
+ CLK_CFG_10, CLK_CFG_10_SET, CLK_CFG_10_CLR,
+ 24, 1, 31, CLK_CFG_UPDATE1, TOP_MUX_SSUSB_FMCNT_P1_SHIFT,
+ CLK_FENC_STATUS_MON_1, 20),
+ /* CLK_CFG_11 */
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_I2C_P, "i2c_p", i2c_parents,
+ CLK_CFG_11, CLK_CFG_11_SET, CLK_CFG_11_CLR,
+ 0, 3, 7, CLK_CFG_UPDATE1, TOP_MUX_I2C_PERI_SHIFT,
+ CLK_FENC_STATUS_MON_1, 19),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_I2C_EAST, "i2c_east", i2c_parents,
+ CLK_CFG_11, CLK_CFG_11_SET, CLK_CFG_11_CLR,
+ 8, 3, 15, CLK_CFG_UPDATE1, TOP_MUX_I2C_EAST_SHIFT,
+ CLK_FENC_STATUS_MON_1, 18),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_I2C_WEST, "i2c_west", i2c_parents,
+ CLK_CFG_11, CLK_CFG_11_SET, CLK_CFG_11_CLR,
+ 16, 3, 23, CLK_CFG_UPDATE1, TOP_MUX_I2C_WEST_SHIFT,
+ CLK_FENC_STATUS_MON_1, 17),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP_I2C_NORTH, "i2c_north", i2c_parents,
+ CLK_CFG_11, CLK_CFG_11_SET, CLK_CFG_11_CLR,
+ HWV_CG_6_DONE, HWV_CG_6_SET, HWV_CG_6_CLR,
+ 24, 3, 31, CLK_CFG_UPDATE1, TOP_MUX_I2C_NORTH_SHIFT,
+ CLK_FENC_STATUS_MON_1, 16),
+ /* CLK_CFG_12 */
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_AES_UFSFDE, "aes_ufsfde", aes_ufsfde_parents,
+ CLK_CFG_12, CLK_CFG_12_SET, CLK_CFG_12_CLR,
+ 0, 3, 7, CLK_CFG_UPDATE1, TOP_MUX_AES_UFSFDE_SHIFT,
+ CLK_FENC_STATUS_MON_1, 15),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_UFS, "ufs", ufs_parents,
+ CLK_CFG_12, CLK_CFG_12_SET, CLK_CFG_12_CLR,
+ 8, 3, 15, CLK_CFG_UPDATE1, TOP_MUX_UFS_SHIFT,
+ CLK_FENC_STATUS_MON_1, 14),
+ /* CLK_CFG_13 */
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_AUD_1, "aud_1", aud_1_parents,
+ CLK_CFG_13, CLK_CFG_13_SET, CLK_CFG_13_CLR,
+ 0, 1, 7, CLK_CFG_UPDATE1, TOP_MUX_AUD_1_SHIFT,
+ CLK_FENC_STATUS_MON_1, 11),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_AUD_2, "aud_2", aud_2_parents,
+ CLK_CFG_13, CLK_CFG_13_SET, CLK_CFG_13_CLR,
+ 8, 1, 15, CLK_CFG_UPDATE1, TOP_MUX_AUD_2_SHIFT,
+ CLK_FENC_STATUS_MON_1, 10),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_ADSP, "adsp", adsp_parents,
+ CLK_CFG_13, CLK_CFG_13_SET, CLK_CFG_13_CLR,
+ 16, 1, 23, CLK_CFG_UPDATE1, TOP_MUX_ADSP_SHIFT,
+ CLK_FENC_STATUS_MON_1, 9),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_ADSP_UARTHUB_B, "adsp_uarthub_b",
+ adsp_uarthub_b_parents, CLK_CFG_13, CLK_CFG_13_SET,
+ CLK_CFG_13_CLR, 24, 2, 31,
+ CLK_CFG_UPDATE1, TOP_MUX_ADSP_UARTHUB_B_SHIFT),
+ /* CLK_CFG_14 */
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_DPMAIF_MAIN, "dpmaif_main", dpmaif_main_parents,
+ CLK_CFG_14, CLK_CFG_14_SET, CLK_CFG_14_CLR,
+ 0, 4, 7, CLK_CFG_UPDATE1, TOP_MUX_DPMAIF_MAIN_SHIFT,
+ CLK_FENC_STATUS_MON_1, 7),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_PWM, "pwm", pwm_parents,
+ CLK_CFG_14, CLK_CFG_14_SET, CLK_CFG_14_CLR,
+ 8, 2, 15, CLK_CFG_UPDATE1, TOP_MUX_PWM_SHIFT,
+ CLK_FENC_STATUS_MON_1, 6),
+ MUX_CLR_SET_UPD(CLK_TOP_MCUPM, "mcupm",
+ mcupm_parents, CLK_CFG_14, CLK_CFG_14_SET,
+ CLK_CFG_14_CLR, 16, 3,
+ CLK_CFG_UPDATE1, TOP_MUX_MCUPM_SHIFT),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_SFLASH, "sflash", sflash_parents,
+ CLK_CFG_14, CLK_CFG_14_SET, CLK_CFG_14_CLR,
+ 24, 2, 31, CLK_CFG_UPDATE1, TOP_MUX_SFLASH_SHIFT,
+ CLK_FENC_STATUS_MON_1, 4),
+ /* CLK_CFG_15 */
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_IPSEAST, "ipseast", ipseast_parents,
+ CLK_CFG_15, CLK_CFG_15_SET, CLK_CFG_15_CLR,
+ 0, 3, 7, CLK_CFG_UPDATE1, TOP_MUX_IPSEAST_SHIFT,
+ CLK_FENC_STATUS_MON_1, 3),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_TL, "tl", tl_parents,
+ CLK_CFG_15, CLK_CFG_15_SET, CLK_CFG_15_CLR,
+ 16, 2, 23, CLK_CFG_UPDATE2, TOP_MUX_TL_SHIFT,
+ CLK_FENC_STATUS_MON_1, 1),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_TL_P1, "tl_p1", tl_parents,
+ CLK_CFG_15, CLK_CFG_15_SET, CLK_CFG_15_CLR,
+ 24, 2, 31, CLK_CFG_UPDATE2, TOP_MUX_TL_P1_SHIFT,
+ CLK_FENC_STATUS_MON_1, 0),
+ /* CLK_CFG_16 */
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_TL_P2, "tl_p2", tl_parents,
+ CLK_CFG_16, CLK_CFG_16_SET, CLK_CFG_16_CLR,
+ 0, 2, 7, CLK_CFG_UPDATE2, TOP_MUX_TL_P2_SHIFT,
+ CLK_FENC_STATUS_MON_2, 31),
+ MUX_CLR_SET_UPD(CLK_TOP_EMI_INTERFACE_546, "emi_interface_546",
+ md_emi_parents, CLK_CFG_16, CLK_CFG_16_SET,
+ CLK_CFG_16_CLR, 8, 1,
+ CLK_CFG_UPDATE2, TOP_MUX_EMI_INTERFACE_546_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_SDF, "sdf",
+ sdf_parents, CLK_CFG_16, CLK_CFG_16_SET,
+ CLK_CFG_16_CLR, 16, 3,
+ CLK_CFG_UPDATE2, TOP_MUX_SDF_SHIFT),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP_UARTHUB_BCLK, "uarthub_b", uarthub_b_parents,
+ CLK_CFG_16, CLK_CFG_16_SET, CLK_CFG_16_CLR,
+ HWV_CG_7_DONE, HWV_CG_7_SET, HWV_CG_7_CLR,
+ 24, 2, 31, CLK_CFG_UPDATE2, TOP_MUX_UARTHUB_BCLK_SHIFT,
+ CLK_FENC_STATUS_MON_2, 28),
+ /* CLK_CFG_17 */
+ MUX_CLR_SET_UPD(CLK_TOP_DPSW_CMP_26M, "dpsw_cmp_26m",
+ dpsw_cmp_26m_parents, CLK_CFG_17, CLK_CFG_17_SET,
+ CLK_CFG_17_CLR, 0, 1,
+ CLK_CFG_UPDATE2, TOP_MUX_DPSW_CMP_26M_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_SMAP, "smap",
+ smapparents, CLK_CFG_17, CLK_CFG_17_SET,
+ CLK_CFG_17_CLR, 8, 1,
+ CLK_CFG_UPDATE2, TOP_MUX_SMAPCK_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_SSR_PKA, "ssr_pka",
+ ssr_parents, CLK_CFG_17, CLK_CFG_17_SET,
+ CLK_CFG_17_CLR, 16, 3,
+ CLK_CFG_UPDATE2, TOP_MUX_SSR_PKA_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_SSR_DMA, "ssr_dma",
+ ssr_parents, CLK_CFG_17, CLK_CFG_17_SET,
+ CLK_CFG_17_CLR, 24, 3,
+ CLK_CFG_UPDATE2, TOP_MUX_SSR_DMA_SHIFT),
+ /* CLK_CFG_18 */
+ MUX_CLR_SET_UPD(CLK_TOP_SSR_KDF, "ssr_kdf",
+ ssr_kdf_parents, CLK_CFG_18, CLK_CFG_18_SET,
+ CLK_CFG_18_CLR, 0, 2,
+ CLK_CFG_UPDATE2, TOP_MUX_SSR_KDF_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_SSR_RNG, "ssr_rng",
+ ssr_rng_parents, CLK_CFG_18, CLK_CFG_18_SET,
+ CLK_CFG_18_CLR, 8, 2,
+ CLK_CFG_UPDATE2, TOP_MUX_SSR_RNG_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_SPU0, "spu0",
+ spu_parents, CLK_CFG_18, CLK_CFG_18_SET,
+ CLK_CFG_18_CLR, 16, 3,
+ CLK_CFG_UPDATE2, TOP_MUX_SPU0_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_SPU1, "spu1",
+ spu_parents, CLK_CFG_18, CLK_CFG_18_SET,
+ CLK_CFG_18_CLR, 24, 3,
+ CLK_CFG_UPDATE2, TOP_MUX_SPU1_SHIFT),
+ /* CLK_CFG_19 */
+ MUX_CLR_SET_UPD(CLK_TOP_DXCC, "dxcc",
+ dxcc_parents, CLK_CFG_19, CLK_CFG_19_SET,
+ CLK_CFG_19_CLR, 0, 2,
+ CLK_CFG_UPDATE2, TOP_MUX_DXCC_SHIFT),
+};
+
+static const struct mtk_composite top_aud_divs[] = {
+ /* CLK_AUDDIV_2 */
+ MUX_DIV_GATE(CLK_TOP_APLL_I2SIN0, "apll_i2sin0_m", apll_m_parents,
+ CLK_AUDDIV_0, 16, 1, CLK_AUDDIV_2, 0, 8, CLK_AUDDIV_0, 0),
+ MUX_DIV_GATE(CLK_TOP_APLL_I2SIN1, "apll_i2sin1_m", apll_m_parents,
+ CLK_AUDDIV_0, 17, 1, CLK_AUDDIV_2, 8, 8, CLK_AUDDIV_0, 1),
+ MUX_DIV_GATE(CLK_TOP_APLL_I2SIN2, "apll_i2sin2_m", apll_m_parents,
+ CLK_AUDDIV_0, 18, 1, CLK_AUDDIV_2, 16, 8, CLK_AUDDIV_0, 2),
+ MUX_DIV_GATE(CLK_TOP_APLL_I2SIN3, "apll_i2sin3_m", apll_m_parents,
+ CLK_AUDDIV_0, 19, 1, CLK_AUDDIV_2, 24, 8, CLK_AUDDIV_0, 3),
+ /* CLK_AUDDIV_3 */
+ MUX_DIV_GATE(CLK_TOP_APLL_I2SIN4, "apll_i2sin4_m", apll_m_parents,
+ CLK_AUDDIV_0, 20, 1, CLK_AUDDIV_3, 0, 8, CLK_AUDDIV_0, 4),
+ MUX_DIV_GATE(CLK_TOP_APLL_I2SIN6, "apll_i2sin6_m", apll_m_parents,
+ CLK_AUDDIV_0, 21, 1, CLK_AUDDIV_3, 8, 8, CLK_AUDDIV_0, 5),
+ MUX_DIV_GATE(CLK_TOP_APLL_I2SOUT0, "apll_i2sout0_m", apll_m_parents,
+ CLK_AUDDIV_0, 22, 1, CLK_AUDDIV_3, 16, 8, CLK_AUDDIV_0, 6),
+ MUX_DIV_GATE(CLK_TOP_APLL_I2SOUT1, "apll_i2sout1_m", apll_m_parents,
+ CLK_AUDDIV_0, 23, 1, CLK_AUDDIV_3, 24, 8, CLK_AUDDIV_0, 7),
+ /* CLK_AUDDIV_4 */
+ MUX_DIV_GATE(CLK_TOP_APLL_I2SOUT2, "apll_i2sout2_m", apll_m_parents,
+ CLK_AUDDIV_0, 24, 1, CLK_AUDDIV_4, 0, 8, CLK_AUDDIV_0, 8),
+ MUX_DIV_GATE(CLK_TOP_APLL_I2SOUT3, "apll_i2sout3_m", apll_m_parents,
+ CLK_AUDDIV_0, 25, 1, CLK_AUDDIV_4, 8, 8, CLK_AUDDIV_0, 9),
+ MUX_DIV_GATE(CLK_TOP_APLL_I2SOUT4, "apll_i2sout4_m", apll_m_parents,
+ CLK_AUDDIV_0, 26, 1, CLK_AUDDIV_4, 16, 8, CLK_AUDDIV_0, 10),
+ MUX_DIV_GATE(CLK_TOP_APLL_I2SOUT6, "apll_i2sout6_m", apll_m_parents,
+ CLK_AUDDIV_0, 27, 1, CLK_AUDDIV_4, 24, 8, CLK_AUDDIV_0, 11),
+ /* CLK_AUDDIV_5 */
+ MUX_DIV_GATE(CLK_TOP_APLL_FMI2S, "apll_fmi2s_m", apll_m_parents,
+ CLK_AUDDIV_0, 28, 1, CLK_AUDDIV_5, 0, 8, CLK_AUDDIV_0, 12),
+ MUX(CLK_TOP_APLL_TDMOUT, "apll_tdmout_m",
+ apll_m_parents, CLK_AUDDIV_0, 29, 1),
+ DIV_GATE(CLK_TOP_APLL12_DIV_TDMOUT_M, "apll12_div_tdmout_m",
+ "apll_tdmout_m", CLK_AUDDIV_0,
+ 13, CLK_AUDDIV_5, 8, 8),
+ DIV_GATE(CLK_TOP_APLL12_DIV_TDMOUT_B, "apll12_div_tdmout_b",
+ "apll_tdmout_m", CLK_AUDDIV_0,
+ 14, CLK_AUDDIV_5, 8, 16),
+};
+
+static const struct mtk_clk_desc topck_desc = {
+ .factor_clks = top_divs,
+ .num_factor_clks = ARRAY_SIZE(top_divs),
+ .mux_clks = top_muxes,
+ .num_mux_clks = ARRAY_SIZE(top_muxes),
+ .composite_clks = top_aud_divs,
+ .num_composite_clks = ARRAY_SIZE(top_aud_divs)
+};
+
+static const struct of_device_id of_match_clk_mt8196_ck[] = {
+ { .compatible = "mediatek,mt8196-topckgen", .data = &topck_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_ck);
+
+static struct platform_driver clk_mt8196_topck_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8196-topck",
+ .of_match_table = of_match_clk_mt8196_ck,
+ },
+};
+
+MODULE_DESCRIPTION("MediaTek MT8196 top clock generators driver");
+module_platform_driver(clk_mt8196_topck_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-topckgen2.c b/drivers/clk/mediatek/clk-mt8196-topckgen2.c
new file mode 100644
index 000000000000..6df93d7fbf91
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-topckgen2.c
@@ -0,0 +1,568 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-mux.h"
+
+/* MUX SEL REG */
+#define CKSYS2_CLK_CFG_UPDATE 0x0004
+#define CKSYS2_CLK_CFG_0 0x0010
+#define CKSYS2_CLK_CFG_0_SET 0x0014
+#define CKSYS2_CLK_CFG_0_CLR 0x0018
+#define CKSYS2_CLK_CFG_1 0x0020
+#define CKSYS2_CLK_CFG_1_SET 0x0024
+#define CKSYS2_CLK_CFG_1_CLR 0x0028
+#define CKSYS2_CLK_CFG_2 0x0030
+#define CKSYS2_CLK_CFG_2_SET 0x0034
+#define CKSYS2_CLK_CFG_2_CLR 0x0038
+#define CKSYS2_CLK_CFG_3 0x0040
+#define CKSYS2_CLK_CFG_3_SET 0x0044
+#define CKSYS2_CLK_CFG_3_CLR 0x0048
+#define CKSYS2_CLK_CFG_4 0x0050
+#define CKSYS2_CLK_CFG_4_SET 0x0054
+#define CKSYS2_CLK_CFG_4_CLR 0x0058
+#define CKSYS2_CLK_CFG_5 0x0060
+#define CKSYS2_CLK_CFG_5_SET 0x0064
+#define CKSYS2_CLK_CFG_5_CLR 0x0068
+#define CKSYS2_CLK_CFG_6 0x0070
+#define CKSYS2_CLK_CFG_6_SET 0x0074
+#define CKSYS2_CLK_CFG_6_CLR 0x0078
+#define CKSYS2_CLK_FENC_STATUS_MON_0 0x0174
+
+/* MUX SHIFT */
+#define TOP_MUX_SENINF0_SHIFT 0
+#define TOP_MUX_SENINF1_SHIFT 1
+#define TOP_MUX_SENINF2_SHIFT 2
+#define TOP_MUX_SENINF3_SHIFT 3
+#define TOP_MUX_SENINF4_SHIFT 4
+#define TOP_MUX_SENINF5_SHIFT 5
+#define TOP_MUX_IMG1_SHIFT 6
+#define TOP_MUX_IPE_SHIFT 7
+#define TOP_MUX_CAM_SHIFT 8
+#define TOP_MUX_CAMTM_SHIFT 9
+#define TOP_MUX_DPE_SHIFT 10
+#define TOP_MUX_VDEC_SHIFT 11
+#define TOP_MUX_CCUSYS_SHIFT 12
+#define TOP_MUX_CCUTM_SHIFT 13
+#define TOP_MUX_VENC_SHIFT 14
+#define TOP_MUX_DVO_SHIFT 15
+#define TOP_MUX_DVO_FAVT_SHIFT 16
+#define TOP_MUX_DP1_SHIFT 17
+#define TOP_MUX_DP0_SHIFT 18
+#define TOP_MUX_DISP_SHIFT 19
+#define TOP_MUX_MDP_SHIFT 20
+#define TOP_MUX_MMINFRA_SHIFT 21
+#define TOP_MUX_MMINFRA_SNOC_SHIFT 22
+#define TOP_MUX_MMUP_SHIFT 23
+#define TOP_MUX_MMINFRA_AO_SHIFT 26
+
+/* HW Voter REG */
+#define HWV_CG_30_SET 0x0058
+#define HWV_CG_30_CLR 0x005c
+#define HWV_CG_30_DONE 0x2c2c
+
+#define MM_HWV_CG_30_SET 0x00f0
+#define MM_HWV_CG_30_CLR 0x00f4
+#define MM_HWV_CG_30_DONE 0x2c78
+#define MM_HWV_CG_31_SET 0x00f8
+#define MM_HWV_CG_31_CLR 0x00fc
+#define MM_HWV_CG_31_DONE 0x2c7c
+#define MM_HWV_CG_32_SET 0x0100
+#define MM_HWV_CG_32_CLR 0x0104
+#define MM_HWV_CG_32_DONE 0x2c80
+#define MM_HWV_CG_33_SET 0x0108
+#define MM_HWV_CG_33_CLR 0x010c
+#define MM_HWV_CG_33_DONE 0x2c84
+#define MM_HWV_CG_34_SET 0x0110
+#define MM_HWV_CG_34_CLR 0x0114
+#define MM_HWV_CG_34_DONE 0x2c88
+#define MM_HWV_CG_35_SET 0x0118
+#define MM_HWV_CG_35_CLR 0x011c
+#define MM_HWV_CG_35_DONE 0x2c8c
+#define MM_HWV_CG_36_SET 0x0120
+#define MM_HWV_CG_36_CLR 0x0124
+#define MM_HWV_CG_36_DONE 0x2c90
+#define MM_HWV_MUX_UPDATE_31_0 0x0240
+
+static const struct mtk_fixed_factor top_divs[] = {
+ FACTOR(CLK_TOP2_MAINPLL2_D2, "mainpll2_d2", "mainpll2", 1, 2),
+ FACTOR(CLK_TOP2_MAINPLL2_D3, "mainpll2_d3", "mainpll2", 1, 3),
+ FACTOR(CLK_TOP2_MAINPLL2_D4, "mainpll2_d4", "mainpll2", 1, 4),
+ FACTOR(CLK_TOP2_MAINPLL2_D4_D2, "mainpll2_d4_d2", "mainpll2", 1, 8),
+ FACTOR(CLK_TOP2_MAINPLL2_D4_D4, "mainpll2_d4_d4", "mainpll2", 1, 16),
+ FACTOR(CLK_TOP2_MAINPLL2_D5, "mainpll2_d5", "mainpll2", 1, 5),
+ FACTOR(CLK_TOP2_MAINPLL2_D5_D2, "mainpll2_d5_d2", "mainpll2", 1, 10),
+ FACTOR(CLK_TOP2_MAINPLL2_D6, "mainpll2_d6", "mainpll2", 1, 6),
+ FACTOR(CLK_TOP2_MAINPLL2_D6_D2, "mainpll2_d6_d2", "mainpll2", 1, 12),
+ FACTOR(CLK_TOP2_MAINPLL2_D7, "mainpll2_d7", "mainpll2", 1, 7),
+ FACTOR(CLK_TOP2_MAINPLL2_D7_D2, "mainpll2_d7_d2", "mainpll2", 1, 14),
+ FACTOR(CLK_TOP2_MAINPLL2_D9, "mainpll2_d9", "mainpll2", 1, 9),
+ FACTOR(CLK_TOP2_UNIVPLL2_D3, "univpll2_d3", "univpll2", 1, 3),
+ FACTOR(CLK_TOP2_UNIVPLL2_D4, "univpll2_d4", "univpll2", 1, 4),
+ FACTOR(CLK_TOP2_UNIVPLL2_D4_D2, "univpll2_d4_d2", "univpll2", 1, 8),
+ FACTOR(CLK_TOP2_UNIVPLL2_D5, "univpll2_d5", "univpll2", 1, 5),
+ FACTOR(CLK_TOP2_UNIVPLL2_D5_D2, "univpll2_d5_d2", "univpll2", 1, 10),
+ FACTOR(CLK_TOP2_UNIVPLL2_D6, "univpll2_d6", "univpll2", 1, 6),
+ FACTOR(CLK_TOP2_UNIVPLL2_D6_D2, "univpll2_d6_d2", "univpll2", 1, 12),
+ FACTOR(CLK_TOP2_UNIVPLL2_D6_D4, "univpll2_d6_d4", "univpll2", 1, 24),
+ FACTOR(CLK_TOP2_UNIVPLL2_D7, "univpll2_d7", "univpll2", 1, 7),
+ FACTOR(CLK_TOP2_IMGPLL_D2, "imgpll_d2", "imgpll", 1, 2),
+ FACTOR(CLK_TOP2_IMGPLL_D4, "imgpll_d4", "imgpll", 1, 4),
+ FACTOR(CLK_TOP2_IMGPLL_D5, "imgpll_d5", "imgpll", 1, 5),
+ FACTOR(CLK_TOP2_IMGPLL_D5_D2, "imgpll_d5_d2", "imgpll", 1, 10),
+ FACTOR(CLK_TOP2_MMPLL2_D3, "mmpll2_d3", "mmpll2", 1, 3),
+ FACTOR(CLK_TOP2_MMPLL2_D4, "mmpll2_d4", "mmpll2", 1, 4),
+ FACTOR(CLK_TOP2_MMPLL2_D4_D2, "mmpll2_d4_d2", "mmpll2", 1, 8),
+ FACTOR(CLK_TOP2_MMPLL2_D5, "mmpll2_d5", "mmpll2", 1, 5),
+ FACTOR(CLK_TOP2_MMPLL2_D5_D2, "mmpll2_d5_d2", "mmpll2", 1, 10),
+ FACTOR(CLK_TOP2_MMPLL2_D6, "mmpll2_d6", "mmpll2", 1, 6),
+ FACTOR(CLK_TOP2_MMPLL2_D6_D2, "mmpll2_d6_d2", "mmpll2", 1, 12),
+ FACTOR(CLK_TOP2_MMPLL2_D7, "mmpll2_d7", "mmpll2", 1, 7),
+ FACTOR(CLK_TOP2_MMPLL2_D9, "mmpll2_d9", "mmpll2", 1, 9),
+ FACTOR(CLK_TOP2_TVDPLL1_D4, "tvdpll1_d4", "tvdpll1", 1, 4),
+ FACTOR(CLK_TOP2_TVDPLL1_D8, "tvdpll1_d8", "tvdpll1", 1, 8),
+ FACTOR(CLK_TOP2_TVDPLL1_D16, "tvdpll1_d16", "tvdpll1", 1, 16),
+ FACTOR(CLK_TOP2_TVDPLL2_D2, "tvdpll2_d2", "tvdpll2", 1, 2),
+ FACTOR(CLK_TOP2_TVDPLL2_D4, "tvdpll2_d4", "tvdpll2", 1, 4),
+ FACTOR(CLK_TOP2_TVDPLL2_D8, "tvdpll2_d8", "tvdpll2", 1, 8),
+ FACTOR(CLK_TOP2_TVDPLL2_D16, "tvdpll2_d16", "tvdpll2", 92, 1473),
+ FACTOR(CLK_TOP2_TVDPLL3_D2, "tvdpll3_d2", "tvdpll3", 1, 2),
+ FACTOR(CLK_TOP2_TVDPLL3_D4, "tvdpll3_d4", "tvdpll3", 1, 4),
+ FACTOR(CLK_TOP2_TVDPLL3_D8, "tvdpll3_d8", "tvdpll3", 1, 8),
+ FACTOR(CLK_TOP2_TVDPLL3_D16, "tvdpll3_d16", "tvdpll3", 92, 1473),
+};
+
+static const char * const seninf_parents[] = {
+ "clk26m",
+ "ck_osc_d10",
+ "ck_osc_d8",
+ "ck_osc_d5",
+ "ck_osc_d4",
+ "univpll2_d6_d2",
+ "mainpll2_d9",
+ "ck_osc_d2",
+ "mainpll2_d4_d2",
+ "univpll2_d4_d2",
+ "mmpll2_d4_d2",
+ "univpll2_d7",
+ "mainpll2_d6",
+ "mmpll2_d7",
+ "univpll2_d6",
+ "univpll2_d5"
+};
+
+static const char * const img1_parents[] = {
+ "clk26m",
+ "ck_osc_d4",
+ "ck_osc_d3",
+ "mmpll2_d6_d2",
+ "ck_osc_d2",
+ "imgpll_d5_d2",
+ "mmpll2_d5_d2",
+ "univpll2_d4_d2",
+ "mmpll2_d4_d2",
+ "mmpll2_d7",
+ "univpll2_d6",
+ "mmpll2_d6",
+ "univpll2_d5",
+ "mmpll2_d5",
+ "univpll2_d4",
+ "imgpll_d4"
+};
+
+static const char * const ipe_parents[] = {
+ "clk26m",
+ "ck_osc_d4",
+ "ck_osc_d3",
+ "ck_osc_d2",
+ "univpll2_d6",
+ "mmpll2_d6",
+ "univpll2_d5",
+ "imgpll_d5",
+ "ck_mainpll_d4",
+ "mmpll2_d5",
+ "imgpll_d4"
+};
+
+static const char * const cam_parents[] = {
+ "clk26m",
+ "ck_osc_d10",
+ "ck_osc_d4",
+ "ck_osc_d3",
+ "ck_osc_d2",
+ "mmpll2_d5_d2",
+ "univpll2_d4_d2",
+ "univpll2_d7",
+ "mmpll2_d7",
+ "univpll2_d6",
+ "mmpll2_d6",
+ "univpll2_d5",
+ "mmpll2_d5",
+ "univpll2_d4",
+ "imgpll_d4",
+ "mmpll2_d4"
+};
+
+static const char * const camtm_parents[] = {
+ "clk26m",
+ "univpll2_d6_d4",
+ "ck_osc_d4",
+ "ck_osc_d3",
+ "univpll2_d6_d2"
+};
+
+static const char * const dpe_parents[] = {
+ "clk26m",
+ "mmpll2_d5_d2",
+ "univpll2_d4_d2",
+ "mmpll2_d7",
+ "univpll2_d6",
+ "mmpll2_d6",
+ "univpll2_d5",
+ "mmpll2_d5",
+ "imgpll_d4",
+ "mmpll2_d4"
+};
+
+static const char * const vdec_parents[] = {
+ "clk26m",
+ "ck_mainpll_d5_d2",
+ "mainpll2_d4_d4",
+ "mainpll2_d7_d2",
+ "mainpll2_d6_d2",
+ "mainpll2_d5_d2",
+ "mainpll2_d9",
+ "mainpll2_d4_d2",
+ "mainpll2_d7",
+ "mainpll2_d6",
+ "univpll2_d6",
+ "mainpll2_d5",
+ "mainpll2_d4",
+ "imgpll_d2"
+};
+
+static const char * const ccusys_parents[] = {
+ "clk26m",
+ "ck_osc_d4",
+ "ck_osc_d3",
+ "ck_osc_d2",
+ "mmpll2_d5_d2",
+ "univpll2_d4_d2",
+ "mmpll2_d7",
+ "univpll2_d6",
+ "mmpll2_d6",
+ "univpll2_d5",
+ "mainpll2_d4",
+ "mainpll2_d3",
+ "univpll2_d3"
+};
+
+static const char * const ccutm_parents[] = {
+ "clk26m",
+ "univpll2_d6_d4",
+ "ck_osc_d4",
+ "ck_osc_d3",
+ "univpll2_d6_d2"
+};
+
+static const char * const venc_parents[] = {
+ "clk26m",
+ "mainpll2_d5_d2",
+ "univpll2_d5_d2",
+ "mainpll2_d4_d2",
+ "mmpll2_d9",
+ "univpll2_d4_d2",
+ "mmpll2_d4_d2",
+ "mainpll2_d6",
+ "univpll2_d6",
+ "mainpll2_d5",
+ "mmpll2_d6",
+ "univpll2_d5",
+ "mainpll2_d4",
+ "univpll2_d4",
+ "univpll2_d3"
+};
+
+static const char * const dp1_parents[] = {
+ "clk26m",
+ "tvdpll2_d16",
+ "tvdpll2_d8",
+ "tvdpll2_d4",
+ "tvdpll2_d2"
+};
+
+static const char * const dp0_parents[] = {
+ "clk26m",
+ "tvdpll1_d16",
+ "tvdpll1_d8",
+ "tvdpll1_d4",
+ "ck_tvdpll1_d2"
+};
+
+static const char * const disp_parents[] = {
+ "clk26m",
+ "ck_mainpll_d5_d2",
+ "ck_mainpll_d4_d2",
+ "ck_mainpll_d6",
+ "mainpll2_d5",
+ "mmpll2_d6",
+ "mainpll2_d4",
+ "univpll2_d4",
+ "mainpll2_d3"
+};
+
+static const char * const mdp_parents[] = {
+ "clk26m",
+ "ck_mainpll_d5_d2",
+ "mainpll2_d5_d2",
+ "mmpll2_d6_d2",
+ "mainpll2_d9",
+ "mainpll2_d4_d2",
+ "mainpll2_d7",
+ "mainpll2_d6",
+ "mainpll2_d5",
+ "mmpll2_d6",
+ "mainpll2_d4",
+ "univpll2_d4",
+ "mainpll2_d3"
+};
+
+static const char * const mminfra_parents[] = {
+ "clk26m",
+ "ck_osc_d4",
+ "ck_mainpll_d7_d2",
+ "ck_mainpll_d5_d2",
+ "ck_mainpll_d9",
+ "mmpll2_d6_d2",
+ "mainpll2_d4_d2",
+ "ck_mainpll_d6",
+ "univpll2_d6",
+ "mainpll2_d5",
+ "mmpll2_d6",
+ "univpll2_d5",
+ "mainpll2_d4",
+ "univpll2_d4",
+ "mainpll2_d3",
+ "univpll2_d3"
+};
+
+static const char * const mminfra_snoc_parents[] = {
+ "clk26m",
+ "ck_osc_d4",
+ "ck_mainpll_d7_d2",
+ "ck_mainpll_d9",
+ "ck_mainpll_d7",
+ "ck_mainpll_d6",
+ "mmpll2_d4_d2",
+ "ck_mainpll_d5",
+ "ck_mainpll_d4",
+ "univpll2_d4",
+ "mmpll2_d4",
+ "mainpll2_d3",
+ "univpll2_d3",
+ "mmpll2_d3",
+ "mainpll2_d2"
+};
+
+static const char * const mmup_parents[] = {
+ "clk26m",
+ "mainpll2_d6",
+ "mainpll2_d5",
+ "ck_osc_d2",
+ "ck_osc",
+ "ck_mainpll_d4",
+ "univpll2_d4",
+ "mainpll2_d3"
+};
+
+static const char * const mminfra_ao_parents[] = {
+ "clk26m",
+ "ck_osc_d4",
+ "ck_mainpll_d3"
+};
+
+static const char * const dvo_parents[] = {
+ "clk26m",
+ "tvdpll3_d16",
+ "tvdpll3_d8",
+ "tvdpll3_d4",
+ "tvdpll3_d2"
+};
+
+static const char * const dvo_favt_parents[] = {
+ "clk26m",
+ "tvdpll3_d16",
+ "tvdpll3_d8",
+ "tvdpll3_d4",
+ "vlp_apll1",
+ "vlp_apll2",
+ "tvdpll3_d2"
+};
+
+static const struct mtk_mux top_muxes[] = {
+ /* CKSYS2_CLK_CFG_0 */
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_SENINF0, "seninf0", seninf_parents,
+ CKSYS2_CLK_CFG_0, CKSYS2_CLK_CFG_0_SET, CKSYS2_CLK_CFG_0_CLR,
+ MM_HWV_CG_30_DONE, MM_HWV_CG_30_SET, MM_HWV_CG_30_CLR,
+ 0, 4, 7, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_SENINF0_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 31),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_SENINF1, "seninf1", seninf_parents,
+ CKSYS2_CLK_CFG_0, CKSYS2_CLK_CFG_0_SET, CKSYS2_CLK_CFG_0_CLR,
+ MM_HWV_CG_30_DONE, MM_HWV_CG_30_SET, MM_HWV_CG_30_CLR,
+ 8, 4, 15, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_SENINF1_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 30),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_SENINF2, "seninf2", seninf_parents,
+ CKSYS2_CLK_CFG_0, CKSYS2_CLK_CFG_0_SET, CKSYS2_CLK_CFG_0_CLR,
+ MM_HWV_CG_30_DONE, MM_HWV_CG_30_SET, MM_HWV_CG_30_CLR,
+ 16, 4, 23, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_SENINF2_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 29),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_SENINF3, "seninf3", seninf_parents,
+ CKSYS2_CLK_CFG_0, CKSYS2_CLK_CFG_0_SET, CKSYS2_CLK_CFG_0_CLR,
+ MM_HWV_CG_30_DONE, MM_HWV_CG_30_SET, MM_HWV_CG_30_CLR,
+ 24, 4, 31, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_SENINF3_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 28),
+ /* CKSYS2_CLK_CFG_1 */
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_SENINF4, "seninf4", seninf_parents,
+ CKSYS2_CLK_CFG_1, CKSYS2_CLK_CFG_1_SET, CKSYS2_CLK_CFG_1_CLR,
+ MM_HWV_CG_31_DONE, MM_HWV_CG_31_SET, MM_HWV_CG_31_CLR,
+ 0, 4, 7, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_SENINF4_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 27),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_SENINF5, "seninf5", seninf_parents,
+ CKSYS2_CLK_CFG_1, CKSYS2_CLK_CFG_1_SET, CKSYS2_CLK_CFG_1_CLR,
+ MM_HWV_CG_31_DONE, MM_HWV_CG_31_SET, MM_HWV_CG_31_CLR,
+ 8, 4, 15, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_SENINF5_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 26),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_IMG1, "img1", img1_parents,
+ CKSYS2_CLK_CFG_1, CKSYS2_CLK_CFG_1_SET, CKSYS2_CLK_CFG_1_CLR,
+ MM_HWV_CG_31_DONE, MM_HWV_CG_31_SET, MM_HWV_CG_31_CLR,
+ 16, 4, 23, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_IMG1_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 25),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_IPE, "ipe", ipe_parents,
+ CKSYS2_CLK_CFG_1, CKSYS2_CLK_CFG_1_SET, CKSYS2_CLK_CFG_1_CLR,
+ MM_HWV_CG_31_DONE, MM_HWV_CG_31_SET, MM_HWV_CG_31_CLR,
+ 24, 4, 31, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_IPE_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 24),
+ /* CKSYS2_CLK_CFG_2 */
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_CAM, "cam", cam_parents,
+ CKSYS2_CLK_CFG_2, CKSYS2_CLK_CFG_2_SET, CKSYS2_CLK_CFG_2_CLR,
+ MM_HWV_CG_32_DONE, MM_HWV_CG_32_SET, MM_HWV_CG_32_CLR,
+ 0, 4, 7, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_CAM_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 23),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_CAMTM, "camtm", camtm_parents,
+ CKSYS2_CLK_CFG_2, CKSYS2_CLK_CFG_2_SET, CKSYS2_CLK_CFG_2_CLR,
+ MM_HWV_CG_32_DONE, MM_HWV_CG_32_SET, MM_HWV_CG_32_CLR,
+ 8, 3, 15, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_CAMTM_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 22),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_DPE, "dpe", dpe_parents,
+ CKSYS2_CLK_CFG_2, CKSYS2_CLK_CFG_2_SET, CKSYS2_CLK_CFG_2_CLR,
+ MM_HWV_CG_32_DONE, MM_HWV_CG_32_SET, MM_HWV_CG_32_CLR,
+ 16, 4, 23, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_DPE_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 21),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_VDEC, "vdec", vdec_parents,
+ CKSYS2_CLK_CFG_2, CKSYS2_CLK_CFG_2_SET, CKSYS2_CLK_CFG_2_CLR,
+ MM_HWV_CG_32_DONE, MM_HWV_CG_32_SET, MM_HWV_CG_32_CLR,
+ 24, 4, 31, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_VDEC_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 20),
+ /* CKSYS2_CLK_CFG_3 */
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_CCUSYS, "ccusys", ccusys_parents,
+ CKSYS2_CLK_CFG_3, CKSYS2_CLK_CFG_3_SET, CKSYS2_CLK_CFG_3_CLR,
+ MM_HWV_CG_33_DONE, MM_HWV_CG_33_SET, MM_HWV_CG_33_CLR,
+ 0, 4, 7, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_CCUSYS_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 19),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_CCUTM, "ccutm", ccutm_parents,
+ CKSYS2_CLK_CFG_3, CKSYS2_CLK_CFG_3_SET, CKSYS2_CLK_CFG_3_CLR,
+ MM_HWV_CG_33_DONE, MM_HWV_CG_33_SET, MM_HWV_CG_33_CLR,
+ 8, 3, 15, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_CCUTM_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 18),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_VENC, "venc", venc_parents,
+ CKSYS2_CLK_CFG_3, CKSYS2_CLK_CFG_3_SET, CKSYS2_CLK_CFG_3_CLR,
+ MM_HWV_CG_33_DONE, MM_HWV_CG_33_SET, MM_HWV_CG_33_CLR,
+ 16, 4, 23, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_VENC_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 17),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP2_DVO, "dvo", dvo_parents,
+ CKSYS2_CLK_CFG_3, CKSYS2_CLK_CFG_3_SET, CKSYS2_CLK_CFG_3_CLR,
+ 24, 3, 31, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_DVO_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 16),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP2_DVO_FAVT, "dvo_favt", dvo_favt_parents,
+ CKSYS2_CLK_CFG_4, CKSYS2_CLK_CFG_4_SET, CKSYS2_CLK_CFG_4_CLR,
+ 0, 3, 7, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_DVO_FAVT_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 15),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP2_DP1, "dp1", dp1_parents,
+ CKSYS2_CLK_CFG_4, CKSYS2_CLK_CFG_4_SET, CKSYS2_CLK_CFG_4_CLR,
+ 8, 3, 15, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_DP1_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 14),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP2_DP0, "dp0", dp0_parents,
+ CKSYS2_CLK_CFG_4, CKSYS2_CLK_CFG_4_SET, CKSYS2_CLK_CFG_4_CLR,
+ 16, 3, 23, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_DP0_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 13),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_DISP, "disp", disp_parents,
+ CKSYS2_CLK_CFG_4, CKSYS2_CLK_CFG_4_SET, CKSYS2_CLK_CFG_4_CLR,
+ MM_HWV_CG_34_DONE, MM_HWV_CG_34_SET, MM_HWV_CG_34_CLR,
+ 24, 4, 31, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_DISP_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 12),
+ /* CKSYS2_CLK_CFG_5 */
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_MDP, "mdp", mdp_parents,
+ CKSYS2_CLK_CFG_5, CKSYS2_CLK_CFG_5_SET, CKSYS2_CLK_CFG_5_CLR,
+ MM_HWV_CG_35_DONE, MM_HWV_CG_35_SET, MM_HWV_CG_35_CLR,
+ 0, 4, 7, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_MDP_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 11),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_MMINFRA, "mminfra", mminfra_parents,
+ CKSYS2_CLK_CFG_5, CKSYS2_CLK_CFG_5_SET, CKSYS2_CLK_CFG_5_CLR,
+ MM_HWV_CG_35_DONE, MM_HWV_CG_35_SET, MM_HWV_CG_35_CLR,
+ 8, 4, 15, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_MMINFRA_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 10),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_MMINFRA_SNOC, "mminfra_snoc", mminfra_snoc_parents,
+ CKSYS2_CLK_CFG_5, CKSYS2_CLK_CFG_5_SET, CKSYS2_CLK_CFG_5_CLR,
+ MM_HWV_CG_35_DONE, MM_HWV_CG_35_SET, MM_HWV_CG_35_CLR,
+ 16, 4, 23, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_MMINFRA_SNOC_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 9),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP2_MMUP, "mmup", mmup_parents,
+ CKSYS2_CLK_CFG_5, CKSYS2_CLK_CFG_5_SET, CKSYS2_CLK_CFG_5_CLR,
+ 24, 3, 31, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_MMUP_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 8),
+ /* CKSYS2_CLK_CFG_6 */
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_MMINFRA_AO, "mminfra_ao", mminfra_ao_parents,
+ CKSYS2_CLK_CFG_6, CKSYS2_CLK_CFG_6_SET, CKSYS2_CLK_CFG_6_CLR,
+ MM_HWV_CG_36_DONE, MM_HWV_CG_36_SET, MM_HWV_CG_36_CLR,
+ 16, 2, 7, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_MMINFRA_AO_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 5),
+};
+
+static const struct mtk_clk_desc topck_desc = {
+ .factor_clks = top_divs,
+ .num_factor_clks = ARRAY_SIZE(top_divs),
+ .mux_clks = top_muxes,
+ .num_mux_clks = ARRAY_SIZE(top_muxes),
+};
+
+static const struct of_device_id of_match_clk_mt8196_ck[] = {
+ { .compatible = "mediatek,mt8196-topckgen-gp2", .data = &topck_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_ck);
+
+static struct platform_driver clk_mt8196_topck_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8196-topck2",
+ .of_match_table = of_match_clk_mt8196_ck,
+ },
+};
+
+MODULE_DESCRIPTION("MediaTek MT8196 GP2 top clock generators driver");
+module_platform_driver(clk_mt8196_topck_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-ufs_ao.c b/drivers/clk/mediatek/clk-mt8196-ufs_ao.c
new file mode 100644
index 000000000000..0c04717b7b4b
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-ufs_ao.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+#include <dt-bindings/reset/mediatek,mt8196-resets.h>
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#define MT8196_UFSAO_RST0_SET_OFFSET 0x48
+#define MT8196_UFSAO_RST1_SET_OFFSET 0x148
+
+static const struct mtk_gate_regs ufsao0_cg_regs = {
+ .set_ofs = 0x108,
+ .clr_ofs = 0x10c,
+ .sta_ofs = 0x104,
+};
+
+static const struct mtk_gate_regs ufsao1_cg_regs = {
+ .set_ofs = 0x8,
+ .clr_ofs = 0xc,
+ .sta_ofs = 0x4,
+};
+
+#define GATE_UFSAO0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ufsao0_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_UFSAO1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ufsao1_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+static const struct mtk_gate ufsao_clks[] = {
+ /* UFSAO0 */
+ GATE_UFSAO0(CLK_UFSAO_UFSHCI_UFS, "ufsao_ufshci_ufs", "ufs", 0),
+ GATE_UFSAO0(CLK_UFSAO_UFSHCI_AES, "ufsao_ufshci_aes", "aes_ufsfde", 1),
+ /* UFSAO1 */
+ GATE_UFSAO1(CLK_UFSAO_UNIPRO_TX_SYM, "ufsao_unipro_tx_sym", "clk26m", 0),
+ GATE_UFSAO1(CLK_UFSAO_UNIPRO_RX_SYM0, "ufsao_unipro_rx_sym0", "clk26m", 1),
+ GATE_UFSAO1(CLK_UFSAO_UNIPRO_RX_SYM1, "ufsao_unipro_rx_sym1", "clk26m", 2),
+ GATE_UFSAO1(CLK_UFSAO_UNIPRO_SYS, "ufsao_unipro_sys", "ufs", 3),
+ GATE_UFSAO1(CLK_UFSAO_UNIPRO_SAP, "ufsao_unipro_sap", "clk26m", 4),
+ GATE_UFSAO1(CLK_UFSAO_PHY_SAP, "ufsao_phy_sap", "clk26m", 8),
+};
+
+static u16 ufsao_rst_ofs[] = {
+ MT8196_UFSAO_RST0_SET_OFFSET,
+ MT8196_UFSAO_RST1_SET_OFFSET
+};
+
+static u16 ufsao_rst_idx_map[] = {
+ [MT8196_UFSAO_RST0_UFS_MPHY] = 8,
+ [MT8196_UFSAO_RST1_UFS_UNIPRO] = 1 * RST_NR_PER_BANK + 0,
+ [MT8196_UFSAO_RST1_UFS_CRYPTO] = 1 * RST_NR_PER_BANK + 1,
+ [MT8196_UFSAO_RST1_UFSHCI] = 1 * RST_NR_PER_BANK + 2,
+};
+
+static const struct mtk_clk_rst_desc ufsao_rst_desc = {
+ .version = MTK_RST_SET_CLR,
+ .rst_bank_ofs = ufsao_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(ufsao_rst_ofs),
+ .rst_idx_map = ufsao_rst_idx_map,
+ .rst_idx_map_nr = ARRAY_SIZE(ufsao_rst_idx_map),
+};
+
+static const struct mtk_clk_desc ufsao_mcd = {
+ .clks = ufsao_clks,
+ .num_clks = ARRAY_SIZE(ufsao_clks),
+ .rst_desc = &ufsao_rst_desc,
+};
+
+static const struct of_device_id of_match_clk_mt8196_ufs_ao[] = {
+ { .compatible = "mediatek,mt8196-ufscfg-ao", .data = &ufsao_mcd },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_ufs_ao);
+
+static struct platform_driver clk_mt8196_ufs_ao_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8196-ufs-ao",
+ .of_match_table = of_match_clk_mt8196_ufs_ao,
+ },
+};
+
+module_platform_driver(clk_mt8196_ufs_ao_drv);
+MODULE_DESCRIPTION("MediaTek MT8196 ufs_ao clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-vdec.c b/drivers/clk/mediatek/clk-mt8196-vdec.c
new file mode 100644
index 000000000000..f8dcd84a2b58
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-vdec.c
@@ -0,0 +1,253 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs vde20_cg_regs = {
+ .set_ofs = 0x0,
+ .clr_ofs = 0x4,
+ .sta_ofs = 0x0,
+};
+
+static const struct mtk_gate_regs vde20_hwv_regs = {
+ .set_ofs = 0x0088,
+ .clr_ofs = 0x008c,
+ .sta_ofs = 0x2c44,
+};
+
+static const struct mtk_gate_regs vde21_cg_regs = {
+ .set_ofs = 0x200,
+ .clr_ofs = 0x204,
+ .sta_ofs = 0x200,
+};
+
+static const struct mtk_gate_regs vde21_hwv_regs = {
+ .set_ofs = 0x0080,
+ .clr_ofs = 0x0084,
+ .sta_ofs = 0x2c40,
+};
+
+static const struct mtk_gate_regs vde22_cg_regs = {
+ .set_ofs = 0x8,
+ .clr_ofs = 0xc,
+ .sta_ofs = 0x8,
+};
+
+static const struct mtk_gate_regs vde22_hwv_regs = {
+ .set_ofs = 0x0078,
+ .clr_ofs = 0x007c,
+ .sta_ofs = 0x2c3c,
+};
+
+#define GATE_HWV_VDE20(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &vde20_cg_regs, \
+ .hwv_regs = &vde20_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr_inv,\
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+#define GATE_HWV_VDE21(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &vde21_cg_regs, \
+ .hwv_regs = &vde21_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr_inv,\
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+#define GATE_HWV_VDE22(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &vde22_cg_regs, \
+ .hwv_regs = &vde22_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr_inv,\
+ .flags = CLK_OPS_PARENT_ENABLE | \
+ CLK_IGNORE_UNUSED, \
+ }
+
+static const struct mtk_gate vde2_clks[] = {
+ /* VDE20 */
+ GATE_HWV_VDE20(CLK_VDE2_VDEC_CKEN, "vde2_vdec_cken", "vdec", 0),
+ GATE_HWV_VDE20(CLK_VDE2_VDEC_ACTIVE, "vde2_vdec_active", "vdec", 4),
+ GATE_HWV_VDE20(CLK_VDE2_VDEC_CKEN_ENG, "vde2_vdec_cken_eng", "vdec", 8),
+ /* VDE21 */
+ GATE_HWV_VDE21(CLK_VDE2_LAT_CKEN, "vde2_lat_cken", "vdec", 0),
+ GATE_HWV_VDE21(CLK_VDE2_LAT_ACTIVE, "vde2_lat_active", "vdec", 4),
+ GATE_HWV_VDE21(CLK_VDE2_LAT_CKEN_ENG, "vde2_lat_cken_eng", "vdec", 8),
+ /* VDE22 */
+ GATE_HWV_VDE22(CLK_VDE2_LARB1_CKEN, "vde2_larb1_cken", "vdec", 0),
+};
+
+static const struct mtk_clk_desc vde2_mcd = {
+ .clks = vde2_clks,
+ .num_clks = ARRAY_SIZE(vde2_clks),
+ .need_runtime_pm = true,
+};
+
+static const struct mtk_gate_regs vde10_hwv_regs = {
+ .set_ofs = 0x00a0,
+ .clr_ofs = 0x00a4,
+ .sta_ofs = 0x2c50,
+};
+
+static const struct mtk_gate_regs vde11_cg_regs = {
+ .set_ofs = 0x1e0,
+ .clr_ofs = 0x1e0,
+ .sta_ofs = 0x1e0,
+};
+
+static const struct mtk_gate_regs vde11_hwv_regs = {
+ .set_ofs = 0x00b0,
+ .clr_ofs = 0x00b4,
+ .sta_ofs = 0x2c58,
+};
+
+static const struct mtk_gate_regs vde12_cg_regs = {
+ .set_ofs = 0x1ec,
+ .clr_ofs = 0x1ec,
+ .sta_ofs = 0x1ec,
+};
+
+static const struct mtk_gate_regs vde12_hwv_regs = {
+ .set_ofs = 0x00a8,
+ .clr_ofs = 0x00ac,
+ .sta_ofs = 0x2c54,
+};
+
+static const struct mtk_gate_regs vde13_cg_regs = {
+ .set_ofs = 0x200,
+ .clr_ofs = 0x204,
+ .sta_ofs = 0x200,
+};
+
+static const struct mtk_gate_regs vde13_hwv_regs = {
+ .set_ofs = 0x0098,
+ .clr_ofs = 0x009c,
+ .sta_ofs = 0x2c4c,
+};
+
+static const struct mtk_gate_regs vde14_hwv_regs = {
+ .set_ofs = 0x0090,
+ .clr_ofs = 0x0094,
+ .sta_ofs = 0x2c48,
+};
+
+#define GATE_HWV_VDE10(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &vde20_cg_regs, \
+ .hwv_regs = &vde10_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr_inv,\
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+#define GATE_HWV_VDE11(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &vde11_cg_regs, \
+ .hwv_regs = &vde11_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr_inv, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+#define GATE_HWV_VDE12(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &vde12_cg_regs, \
+ .hwv_regs = &vde12_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr_inv, \
+ .flags = CLK_OPS_PARENT_ENABLE \
+ }
+
+#define GATE_HWV_VDE13(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &vde13_cg_regs, \
+ .hwv_regs = &vde13_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr_inv,\
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+#define GATE_HWV_VDE14(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &vde22_cg_regs, \
+ .hwv_regs = &vde14_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr_inv,\
+ .flags = CLK_OPS_PARENT_ENABLE | \
+ CLK_IGNORE_UNUSED, \
+ }
+
+static const struct mtk_gate vde1_clks[] = {
+ /* VDE10 */
+ GATE_HWV_VDE10(CLK_VDE1_VDEC_CKEN, "vde1_vdec_cken", "vdec", 0),
+ GATE_HWV_VDE10(CLK_VDE1_VDEC_ACTIVE, "vde1_vdec_active", "vdec", 4),
+ GATE_HWV_VDE10(CLK_VDE1_VDEC_CKEN_ENG, "vde1_vdec_cken_eng", "vdec", 8),
+ /* VDE11 */
+ GATE_HWV_VDE11(CLK_VDE1_VDEC_SOC_IPS_EN, "vde1_vdec_soc_ips_en", "vdec", 0),
+ /* VDE12 */
+ GATE_HWV_VDE12(CLK_VDE1_VDEC_SOC_APTV_EN, "vde1_aptv_en", "ck_tck_26m_mx9_ck", 0),
+ GATE_HWV_VDE12(CLK_VDE1_VDEC_SOC_APTV_TOP_EN, "vde1_aptv_topen", "ck_tck_26m_mx9_ck", 1),
+ /* VDE13 */
+ GATE_HWV_VDE13(CLK_VDE1_LAT_CKEN, "vde1_lat_cken", "vdec", 0),
+ GATE_HWV_VDE13(CLK_VDE1_LAT_ACTIVE, "vde1_lat_active", "vdec", 4),
+ GATE_HWV_VDE13(CLK_VDE1_LAT_CKEN_ENG, "vde1_lat_cken_eng", "vdec", 8),
+ /* VDE14 */
+ GATE_HWV_VDE14(CLK_VDE1_LARB1_CKEN, "vde1_larb1_cken", "vdec", 0),
+};
+
+static const struct mtk_clk_desc vde1_mcd = {
+ .clks = vde1_clks,
+ .num_clks = ARRAY_SIZE(vde1_clks),
+ .need_runtime_pm = true,
+};
+
+static const struct of_device_id of_match_clk_mt8196_vdec[] = {
+ { .compatible = "mediatek,mt8196-vdecsys", .data = &vde2_mcd },
+ { .compatible = "mediatek,mt8196-vdecsys-soc", .data = &vde1_mcd },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_vdec);
+
+static struct platform_driver clk_mt8196_vdec_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8196-vdec",
+ .of_match_table = of_match_clk_mt8196_vdec,
+ },
+};
+module_platform_driver(clk_mt8196_vdec_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8196 Video Decoders clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-vdisp_ao.c b/drivers/clk/mediatek/clk-mt8196-vdisp_ao.c
new file mode 100644
index 000000000000..fddb69d1c3eb
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-vdisp_ao.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs mm_v_cg_regs = {
+ .set_ofs = 0x104,
+ .clr_ofs = 0x108,
+ .sta_ofs = 0x100,
+};
+
+static const struct mtk_gate_regs mm_v_hwv_regs = {
+ .set_ofs = 0x0030,
+ .clr_ofs = 0x0034,
+ .sta_ofs = 0x2c18,
+};
+
+#define GATE_MM_AO_V(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm_v_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ .flags = CLK_OPS_PARENT_ENABLE | \
+ CLK_IS_CRITICAL, \
+ }
+
+#define GATE_HWV_MM_V(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm_v_cg_regs, \
+ .hwv_regs = &mm_v_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+static const struct mtk_gate mm_v_clks[] = {
+ GATE_HWV_MM_V(CLK_MM_V_DISP_VDISP_AO_CONFIG, "mm_v_disp_vdisp_ao_config", "disp", 0),
+ GATE_HWV_MM_V(CLK_MM_V_DISP_DPC, "mm_v_disp_dpc", "disp", 16),
+ GATE_MM_AO_V(CLK_MM_V_SMI_SUB_SOMM0, "mm_v_smi_sub_somm0", "disp", 2),
+};
+
+static const struct mtk_clk_desc mm_v_mcd = {
+ .clks = mm_v_clks,
+ .num_clks = ARRAY_SIZE(mm_v_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8196_vdisp_ao[] = {
+ { .compatible = "mediatek,mt8196-vdisp-ao", .data = &mm_v_mcd },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_vdisp_ao);
+
+static struct platform_driver clk_mt8196_vdisp_ao_drv = {
+ .probe = mtk_clk_pdev_probe,
+ .remove = mtk_clk_pdev_remove,
+ .driver = {
+ .name = "clk-mt8196-vdisp-ao",
+ .of_match_table = of_match_clk_mt8196_vdisp_ao,
+ },
+};
+module_platform_driver(clk_mt8196_vdisp_ao_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8196 vdisp_ao clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-venc.c b/drivers/clk/mediatek/clk-mt8196-venc.c
new file mode 100644
index 000000000000..13e2e36e945f
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-venc.c
@@ -0,0 +1,236 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs ven10_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+static const struct mtk_gate_regs ven10_hwv_regs = {
+ .set_ofs = 0x00b8,
+ .clr_ofs = 0x00bc,
+ .sta_ofs = 0x2c5c,
+};
+
+static const struct mtk_gate_regs ven11_cg_regs = {
+ .set_ofs = 0x10,
+ .clr_ofs = 0x14,
+ .sta_ofs = 0x10,
+};
+
+static const struct mtk_gate_regs ven11_hwv_regs = {
+ .set_ofs = 0x00c0,
+ .clr_ofs = 0x00c4,
+ .sta_ofs = 0x2c60,
+};
+
+#define GATE_VEN10(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ven10_cg_regs, \
+ .shift = _shift, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ .ops = &mtk_clk_gate_ops_setclr_inv, \
+ }
+
+#define GATE_HWV_VEN10_FLAGS(_id, _name, _parent, _shift, _flags) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ven10_cg_regs, \
+ .hwv_regs = &ven10_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr_inv, \
+ .flags = (_flags) | \
+ CLK_OPS_PARENT_ENABLE, \
+ }
+
+#define GATE_HWV_VEN10(_id, _name, _parent, _shift) \
+ GATE_HWV_VEN10_FLAGS(_id, _name, _parent, _shift, 0)
+
+#define GATE_HWV_VEN11(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ven11_cg_regs, \
+ .hwv_regs = &ven11_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr_inv,\
+ .flags = CLK_OPS_PARENT_ENABLE \
+ }
+
+static const struct mtk_gate ven1_clks[] = {
+ /* VEN10 */
+ GATE_HWV_VEN10(CLK_VEN1_CKE0_LARB, "ven1_larb", "venc", 0),
+ GATE_HWV_VEN10(CLK_VEN1_CKE1_VENC, "ven1_venc", "venc", 4),
+ GATE_VEN10(CLK_VEN1_CKE2_JPGENC, "ven1_jpgenc", "venc", 8),
+ GATE_VEN10(CLK_VEN1_CKE3_JPGDEC, "ven1_jpgdec", "venc", 12),
+ GATE_VEN10(CLK_VEN1_CKE4_JPGDEC_C1, "ven1_jpgdec_c1", "venc", 16),
+ GATE_HWV_VEN10(CLK_VEN1_CKE5_GALS, "ven1_gals", "venc", 28),
+ GATE_HWV_VEN10(CLK_VEN1_CKE29_VENC_ADAB_CTRL, "ven1_venc_adab_ctrl",
+ "venc", 29),
+ GATE_HWV_VEN10_FLAGS(CLK_VEN1_CKE29_VENC_XPC_CTRL,
+ "ven1_venc_xpc_ctrl", "venc", 30,
+ CLK_IGNORE_UNUSED),
+ GATE_HWV_VEN10(CLK_VEN1_CKE6_GALS_SRAM, "ven1_gals_sram", "venc", 31),
+ /* VEN11 */
+ GATE_HWV_VEN11(CLK_VEN1_RES_FLAT, "ven1_res_flat", "venc", 0),
+};
+
+static const struct mtk_clk_desc ven1_mcd = {
+ .clks = ven1_clks,
+ .num_clks = ARRAY_SIZE(ven1_clks),
+ .need_runtime_pm = true,
+};
+
+static const struct mtk_gate_regs ven20_hwv_regs = {
+ .set_ofs = 0x00c8,
+ .clr_ofs = 0x00cc,
+ .sta_ofs = 0x2c64,
+};
+
+static const struct mtk_gate_regs ven21_hwv_regs = {
+ .set_ofs = 0x00d0,
+ .clr_ofs = 0x00d4,
+ .sta_ofs = 0x2c68,
+};
+
+#define GATE_VEN20(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ven10_cg_regs, \
+ .shift = _shift, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ .ops = &mtk_clk_gate_ops_setclr_inv, \
+ }
+
+#define GATE_HWV_VEN20(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ven10_cg_regs, \
+ .hwv_regs = &ven20_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr_inv,\
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+#define GATE_HWV_VEN21(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ven11_cg_regs, \
+ .hwv_regs = &ven21_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ .flags = CLK_OPS_PARENT_ENABLE \
+ }
+
+static const struct mtk_gate ven2_clks[] = {
+ /* VEN20 */
+ GATE_HWV_VEN20(CLK_VEN2_CKE0_LARB, "ven2_larb", "venc", 0),
+ GATE_HWV_VEN20(CLK_VEN2_CKE1_VENC, "ven2_venc", "venc", 4),
+ GATE_VEN20(CLK_VEN2_CKE2_JPGENC, "ven2_jpgenc", "venc", 8),
+ GATE_VEN20(CLK_VEN2_CKE3_JPGDEC, "ven2_jpgdec", "venc", 12),
+ GATE_HWV_VEN20(CLK_VEN2_CKE5_GALS, "ven2_gals", "venc", 28),
+ GATE_HWV_VEN20(CLK_VEN2_CKE29_VENC_XPC_CTRL, "ven2_venc_xpc_ctrl", "venc", 30),
+ GATE_HWV_VEN20(CLK_VEN2_CKE6_GALS_SRAM, "ven2_gals_sram", "venc", 31),
+ /* VEN21 */
+ GATE_HWV_VEN21(CLK_VEN2_RES_FLAT, "ven2_res_flat", "venc", 0),
+};
+
+static const struct mtk_clk_desc ven2_mcd = {
+ .clks = ven2_clks,
+ .num_clks = ARRAY_SIZE(ven2_clks),
+ .need_runtime_pm = true,
+};
+
+static const struct mtk_gate_regs ven_c20_hwv_regs = {
+ .set_ofs = 0x00d8,
+ .clr_ofs = 0x00dc,
+ .sta_ofs = 0x2c6c,
+};
+
+static const struct mtk_gate_regs ven_c21_hwv_regs = {
+ .set_ofs = 0x00e0,
+ .clr_ofs = 0x00e4,
+ .sta_ofs = 0x2c70,
+};
+
+#define GATE_HWV_VEN_C20(_id, _name, _parent, _shift) {\
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ven10_cg_regs, \
+ .hwv_regs = &ven_c20_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr_inv,\
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+#define GATE_HWV_VEN_C21(_id, _name, _parent, _shift) {\
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ven11_cg_regs, \
+ .hwv_regs = &ven_c21_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+static const struct mtk_gate ven_c2_clks[] = {
+ /* VEN_C20 */
+ GATE_HWV_VEN_C20(CLK_VEN_C2_CKE0_LARB, "ven_c2_larb", "venc", 0),
+ GATE_HWV_VEN_C20(CLK_VEN_C2_CKE1_VENC, "ven_c2_venc", "venc", 4),
+ GATE_HWV_VEN_C20(CLK_VEN_C2_CKE5_GALS, "ven_c2_gals", "venc", 28),
+ GATE_HWV_VEN_C20(CLK_VEN_C2_CKE29_VENC_XPC_CTRL, "ven_c2_venc_xpc_ctrl",
+ "venc", 30),
+ GATE_HWV_VEN_C20(CLK_VEN_C2_CKE6_GALS_SRAM, "ven_c2_gals_sram", "venc", 31),
+ /* VEN_C21 */
+ GATE_HWV_VEN_C21(CLK_VEN_C2_RES_FLAT, "ven_c2_res_flat", "venc", 0),
+};
+
+static const struct mtk_clk_desc ven_c2_mcd = {
+ .clks = ven_c2_clks,
+ .num_clks = ARRAY_SIZE(ven_c2_clks),
+ .need_runtime_pm = true,
+};
+
+static const struct of_device_id of_match_clk_mt8196_venc[] = {
+ { .compatible = "mediatek,mt8196-vencsys", .data = &ven1_mcd },
+ { .compatible = "mediatek,mt8196-vencsys-c1", .data = &ven2_mcd },
+ { .compatible = "mediatek,mt8196-vencsys-c2", .data = &ven_c2_mcd },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_venc);
+
+static struct platform_driver clk_mt8196_venc_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8196-venc",
+ .of_match_table = of_match_clk_mt8196_venc,
+ },
+};
+module_platform_driver(clk_mt8196_venc_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8196 Video Encoders clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-vlpckgen.c b/drivers/clk/mediatek/clk-mt8196-vlpckgen.c
new file mode 100644
index 000000000000..d59a8a9d9855
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-vlpckgen.c
@@ -0,0 +1,725 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "clk-mtk.h"
+#include "clk-mux.h"
+#include "clk-pll.h"
+
+/* MUX SEL REG */
+#define VLP_CLK_CFG_UPDATE 0x0004
+#define VLP_CLK_CFG_UPDATE1 0x0008
+#define VLP_CLK_CFG_0 0x0010
+#define VLP_CLK_CFG_0_SET 0x0014
+#define VLP_CLK_CFG_0_CLR 0x0018
+#define VLP_CLK_CFG_1 0x0020
+#define VLP_CLK_CFG_1_SET 0x0024
+#define VLP_CLK_CFG_1_CLR 0x0028
+#define VLP_CLK_CFG_2 0x0030
+#define VLP_CLK_CFG_2_SET 0x0034
+#define VLP_CLK_CFG_2_CLR 0x0038
+#define VLP_CLK_CFG_3 0x0040
+#define VLP_CLK_CFG_3_SET 0x0044
+#define VLP_CLK_CFG_3_CLR 0x0048
+#define VLP_CLK_CFG_4 0x0050
+#define VLP_CLK_CFG_4_SET 0x0054
+#define VLP_CLK_CFG_4_CLR 0x0058
+#define VLP_CLK_CFG_5 0x0060
+#define VLP_CLK_CFG_5_SET 0x0064
+#define VLP_CLK_CFG_5_CLR 0x0068
+#define VLP_CLK_CFG_6 0x0070
+#define VLP_CLK_CFG_6_SET 0x0074
+#define VLP_CLK_CFG_6_CLR 0x0078
+#define VLP_CLK_CFG_7 0x0080
+#define VLP_CLK_CFG_7_SET 0x0084
+#define VLP_CLK_CFG_7_CLR 0x0088
+#define VLP_CLK_CFG_8 0x0090
+#define VLP_CLK_CFG_8_SET 0x0094
+#define VLP_CLK_CFG_8_CLR 0x0098
+#define VLP_CLK_CFG_9 0x00a0
+#define VLP_CLK_CFG_9_SET 0x00a4
+#define VLP_CLK_CFG_9_CLR 0x00a8
+#define VLP_CLK_CFG_10 0x00b0
+#define VLP_CLK_CFG_10_SET 0x00b4
+#define VLP_CLK_CFG_10_CLR 0x00b8
+#define VLP_OCIC_FENC_STATUS_MON_0 0x039c
+#define VLP_OCIC_FENC_STATUS_MON_1 0x03a0
+
+/* MUX SHIFT */
+#define TOP_MUX_SCP_SHIFT 0
+#define TOP_MUX_SCP_SPI_SHIFT 1
+#define TOP_MUX_SCP_IIC_SHIFT 2
+#define TOP_MUX_SCP_IIC_HS_SHIFT 3
+#define TOP_MUX_PWRAP_ULPOSC_SHIFT 4
+#define TOP_MUX_SPMI_M_TIA_32K_SHIFT 5
+#define TOP_MUX_APXGPT_26M_B_SHIFT 6
+#define TOP_MUX_DPSW_SHIFT 7
+#define TOP_MUX_DPSW_CENTRAL_SHIFT 8
+#define TOP_MUX_SPMI_M_MST_SHIFT 9
+#define TOP_MUX_DVFSRC_SHIFT 10
+#define TOP_MUX_PWM_VLP_SHIFT 11
+#define TOP_MUX_AXI_VLP_SHIFT 12
+#define TOP_MUX_SYSTIMER_26M_SHIFT 13
+#define TOP_MUX_SSPM_SHIFT 14
+#define TOP_MUX_SRCK_SHIFT 15
+#define TOP_MUX_CAMTG0_SHIFT 16
+#define TOP_MUX_CAMTG1_SHIFT 17
+#define TOP_MUX_CAMTG2_SHIFT 18
+#define TOP_MUX_CAMTG3_SHIFT 19
+#define TOP_MUX_CAMTG4_SHIFT 20
+#define TOP_MUX_CAMTG5_SHIFT 21
+#define TOP_MUX_CAMTG6_SHIFT 22
+#define TOP_MUX_CAMTG7_SHIFT 23
+#define TOP_MUX_SSPM_26M_SHIFT 25
+#define TOP_MUX_ULPOSC_SSPM_SHIFT 26
+#define TOP_MUX_VLP_PBUS_26M_SHIFT 27
+#define TOP_MUX_DEBUG_ERR_FLAG_VLP_26M_SHIFT 28
+#define TOP_MUX_DPMSRDMA_SHIFT 29
+#define TOP_MUX_VLP_PBUS_156M_SHIFT 30
+#define TOP_MUX_SPM_SHIFT 0
+#define TOP_MUX_MMINFRA_VLP_SHIFT 1
+#define TOP_MUX_USB_TOP_SHIFT 2
+#define TOP_MUX_SSUSB_XHCI_SHIFT 3
+#define TOP_MUX_NOC_VLP_SHIFT 4
+#define TOP_MUX_AUDIO_H_SHIFT 5
+#define TOP_MUX_AUD_ENGEN1_SHIFT 6
+#define TOP_MUX_AUD_ENGEN2_SHIFT 7
+#define TOP_MUX_AUD_INTBUS_SHIFT 8
+#define TOP_MUX_SPU_VLP_26M_SHIFT 9
+#define TOP_MUX_SPU0_VLP_SHIFT 10
+#define TOP_MUX_SPU1_VLP_SHIFT 11
+
+/* CKSTA REG */
+#define VLP_CKSTA_REG0 0x0250
+#define VLP_CKSTA_REG1 0x0254
+
+/* HW Voter REG */
+#define HWV_CG_9_SET 0x0048
+#define HWV_CG_9_CLR 0x004c
+#define HWV_CG_9_DONE 0x2c24
+#define HWV_CG_10_SET 0x0050
+#define HWV_CG_10_CLR 0x0054
+#define HWV_CG_10_DONE 0x2c28
+
+/* PLL REG */
+#define VLP_AP_PLL_CON3 0x264
+#define VLP_APLL1_TUNER_CON0 0x2a4
+#define VLP_APLL2_TUNER_CON0 0x2a8
+#define VLP_APLL1_CON0 0x274
+#define VLP_APLL1_CON1 0x278
+#define VLP_APLL1_CON2 0x27c
+#define VLP_APLL1_CON3 0x280
+#define VLP_APLL2_CON0 0x28c
+#define VLP_APLL2_CON1 0x290
+#define VLP_APLL2_CON2 0x294
+#define VLP_APLL2_CON3 0x298
+
+/* vlp apll1 tuner default value*/
+#define VLP_APLL1_TUNER_CON0_VALUE 0x6f28bd4d
+/* vlp apll2 tuner default value + 1*/
+#define VLP_APLL2_TUNER_CON0_VALUE 0x78fd5265
+
+#define VLP_PLLEN_ALL 0x080
+#define VLP_PLLEN_ALL_SET 0x084
+#define VLP_PLLEN_ALL_CLR 0x088
+
+#define MT8196_PLL_FMAX (3800UL * MHZ)
+#define MT8196_PLL_FMIN (1500UL * MHZ)
+#define MT8196_INTEGER_BITS 8
+
+#define PLL_FENC(_id, _name, _reg, _fenc_sta_ofs, _fenc_sta_bit,\
+ _flags, _pd_reg, _pd_shift, \
+ _pcw_reg, _pcw_shift, _pcwbits, \
+ _pll_en_bit) { \
+ .id = _id, \
+ .name = _name, \
+ .reg = _reg, \
+ .fenc_sta_ofs = _fenc_sta_ofs, \
+ .fenc_sta_bit = _fenc_sta_bit, \
+ .flags = _flags, \
+ .fmax = MT8196_PLL_FMAX, \
+ .fmin = MT8196_PLL_FMIN, \
+ .pd_reg = _pd_reg, \
+ .pd_shift = _pd_shift, \
+ .pcw_reg = _pcw_reg, \
+ .pcw_shift = _pcw_shift, \
+ .pcwbits = _pcwbits, \
+ .pcwibits = MT8196_INTEGER_BITS, \
+ .en_reg = VLP_PLLEN_ALL, \
+ .en_set_reg = VLP_PLLEN_ALL_SET, \
+ .en_clr_reg = VLP_PLLEN_ALL_CLR, \
+ .pll_en_bit = _pll_en_bit, \
+ .ops = &mtk_pll_fenc_clr_set_ops, \
+}
+
+static DEFINE_SPINLOCK(mt8196_clk_vlp_lock);
+
+static const struct mtk_fixed_factor vlp_divs[] = {
+ FACTOR(CLK_VLP_CLK26M, "vlp_clk26m", "clk26m", 1, 1),
+ FACTOR(CLK_VLP_APLL1_D4, "apll1_d4", "vlp_apll1", 1, 4),
+ FACTOR(CLK_VLP_APLL1_D8, "apll1_d8", "vlp_apll1", 1, 8),
+ FACTOR(CLK_VLP_APLL2_D4, "apll2_d4", "vlp_apll2", 1, 4),
+ FACTOR(CLK_VLP_APLL2_D8, "apll2_d8", "vlp_apll2", 1, 8),
+};
+
+static const char * const vlp_scp_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "mainpll_d6",
+ "mainpll_d4",
+ "mainpll_d3",
+ "vlp_apll1"
+};
+
+static const char * const vlp_scp_spi_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "mainpll_d7_d2",
+ "mainpll_d5_d2"
+};
+
+static const char * const vlp_scp_iic_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "mainpll_d5_d4",
+ "mainpll_d7_d2"
+};
+
+static const char * const vlp_scp_iic_hs_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "mainpll_d5_d4",
+ "mainpll_d7_d2",
+ "mainpll_d7"
+};
+
+static const char * const vlp_pwrap_ulposc_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "osc_d14",
+ "osc_d10"
+};
+
+static const char * const vlp_spmi_32k_parents[] = {
+ "clk26m",
+ "clk32k",
+ "osc_d20",
+ "osc_d14",
+ "osc_d10"
+};
+
+static const char * const vlp_apxgpt_26m_b_parents[] = {
+ "clk26m",
+ "osc_d20"
+};
+
+static const char * const vlp_dpsw_parents[] = {
+ "clk26m",
+ "osc_d10",
+ "osc_d7",
+ "mainpll_d7_d4"
+};
+
+static const char * const vlp_dpsw_central_parents[] = {
+ "clk26m",
+ "osc_d10",
+ "osc_d7",
+ "mainpll_d7_d4"
+};
+
+static const char * const vlp_spmi_m_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "osc_d14",
+ "osc_d10"
+};
+
+static const char * const vlp_dvfsrc_parents[] = {
+ "clk26m",
+ "osc_d20"
+};
+
+static const char * const vlp_pwm_vlp_parents[] = {
+ "clk26m",
+ "clk32k",
+ "osc_d20",
+ "osc_d8",
+ "mainpll_d4_d8"
+};
+
+static const char * const vlp_axi_vlp_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "mainpll_d7_d4",
+ "osc_d4",
+ "mainpll_d7_d2"
+};
+
+static const char * const vlp_systimer_26m_parents[] = {
+ "clk26m",
+ "osc_d20"
+};
+
+static const char * const vlp_sspm_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "mainpll_d5_d2",
+ "osc_d2",
+ "mainpll_d6"
+};
+
+static const char * const vlp_srck_parents[] = {
+ "clk26m",
+ "osc_d20"
+};
+
+static const char * const vlp_camtg0_1_parents[] = {
+ "clk26m",
+ "univpll_192m_d32",
+ "univpll_192m_d16",
+ "clk13m",
+ "osc_d40",
+ "osc_d32",
+ "univpll_192m_d10",
+ "univpll_192m_d8",
+ "univpll_d6_d16",
+ "ulposc3",
+ "osc_d20",
+ "ck2_tvdpll1_d16",
+ "univpll_d6_d8"
+};
+
+static const char * const vlp_camtg2_7_parents[] = {
+ "clk26m",
+ "univpll_192m_d32",
+ "univpll_192m_d16",
+ "clk13m",
+ "osc_d40",
+ "osc_d32",
+ "univpll_192m_d10",
+ "univpll_192m_d8",
+ "univpll_d6_d16",
+ "osc_d20",
+ "ck2_tvdpll1_d16",
+ "univpll_d6_d8"
+};
+
+static const char * const vlp_sspm_26m_parents[] = {
+ "clk26m",
+ "osc_d20"
+};
+
+static const char * const vlp_ulposc_sspm_parents[] = {
+ "clk26m",
+ "osc_d2",
+ "mainpll_d4_d2"
+};
+
+static const char * const vlp_vlp_pbus_26m_parents[] = {
+ "clk26m",
+ "osc_d20"
+};
+
+static const char * const vlp_debug_err_flag_parents[] = {
+ "clk26m",
+ "osc_d20"
+};
+
+static const char * const vlp_dpmsrdma_parents[] = {
+ "clk26m",
+ "mainpll_d7_d2"
+};
+
+static const char * const vlp_vlp_pbus_156m_parents[] = {
+ "clk26m",
+ "osc_d2",
+ "mainpll_d7_d2",
+ "mainpll_d7"
+};
+
+static const char * const vlp_spm_parents[] = {
+ "clk26m",
+ "mainpll_d7_d4"
+};
+
+static const char * const vlp_mminfra_parents[] = {
+ "clk26m",
+ "osc_d4",
+ "mainpll_d3"
+};
+
+static const char * const vlp_usb_parents[] = {
+ "clk26m",
+ "mainpll_d9"
+};
+
+static const char * const vlp_noc_vlp_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "mainpll_d9"
+};
+
+static const char * const vlp_audio_h_parents[] = {
+ "vlp_clk26m",
+ "vlp_apll1",
+ "vlp_apll2"
+};
+
+static const char * const vlp_aud_engen1_parents[] = {
+ "vlp_clk26m",
+ "apll1_d8",
+ "apll1_d4"
+};
+
+static const char * const vlp_aud_engen2_parents[] = {
+ "vlp_clk26m",
+ "apll2_d8",
+ "apll2_d4"
+};
+
+static const char * const vlp_aud_intbus_parents[] = {
+ "vlp_clk26m",
+ "mainpll_d7_d4",
+ "mainpll_d4_d4"
+};
+
+static const u8 vlp_aud_parent_index[] = { 1, 2, 3 };
+
+static const char * const vlp_spvlp_26m_parents[] = {
+ "clk26m",
+ "osc_d20"
+};
+
+static const char * const vlp_spu0_vlp_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "mainpll_d4_d4",
+ "mainpll_d4_d2",
+ "mainpll_d7",
+ "mainpll_d6",
+ "mainpll_d5"
+};
+
+static const char * const vlp_spu1_vlp_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "mainpll_d4_d4",
+ "mainpll_d4_d2",
+ "mainpll_d7",
+ "mainpll_d6",
+ "mainpll_d5"
+};
+
+static const struct mtk_mux vlp_muxes[] = {
+ /* VLP_CLK_CFG_0 */
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_VLP_SCP, "vlp_scp", vlp_scp_parents,
+ VLP_CLK_CFG_0, VLP_CLK_CFG_0_SET, VLP_CLK_CFG_0_CLR,
+ 0, 3, 7, VLP_CLK_CFG_UPDATE, TOP_MUX_SCP_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_0, 31),
+ MUX_CLR_SET_UPD(CLK_VLP_SCP_SPI, "vlp_scp_spi",
+ vlp_scp_spi_parents, VLP_CLK_CFG_0, VLP_CLK_CFG_0_SET,
+ VLP_CLK_CFG_0_CLR, 8, 2,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_SCP_SPI_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_SCP_IIC, "vlp_scp_iic",
+ vlp_scp_iic_parents, VLP_CLK_CFG_0, VLP_CLK_CFG_0_SET,
+ VLP_CLK_CFG_0_CLR, 16, 2,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_SCP_IIC_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_SCP_IIC_HS, "vlp_scp_iic_hs",
+ vlp_scp_iic_hs_parents, VLP_CLK_CFG_0, VLP_CLK_CFG_0_SET,
+ VLP_CLK_CFG_0_CLR, 24, 3,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_SCP_IIC_HS_SHIFT),
+ /* VLP_CLK_CFG_1 */
+ MUX_CLR_SET_UPD(CLK_VLP_PWRAP_ULPOSC, "vlp_pwrap_ulposc",
+ vlp_pwrap_ulposc_parents, VLP_CLK_CFG_1, VLP_CLK_CFG_1_SET,
+ VLP_CLK_CFG_1_CLR, 0, 2,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_PWRAP_ULPOSC_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_SPMI_M_TIA_32K, "vlp_spmi_32k",
+ vlp_spmi_32k_parents, VLP_CLK_CFG_1, VLP_CLK_CFG_1_SET,
+ VLP_CLK_CFG_1_CLR, 8, 3,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_SPMI_M_TIA_32K_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_APXGPT_26M_B, "vlp_apxgpt_26m_b",
+ vlp_apxgpt_26m_b_parents, VLP_CLK_CFG_1, VLP_CLK_CFG_1_SET,
+ VLP_CLK_CFG_1_CLR, 16, 1,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_APXGPT_26M_B_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_DPSW, "vlp_dpsw",
+ vlp_dpsw_parents, VLP_CLK_CFG_1, VLP_CLK_CFG_1_SET,
+ VLP_CLK_CFG_1_CLR, 24, 2,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_DPSW_SHIFT),
+ /* VLP_CLK_CFG_2 */
+ MUX_CLR_SET_UPD(CLK_VLP_DPSW_CENTRAL, "vlp_dpsw_central",
+ vlp_dpsw_central_parents, VLP_CLK_CFG_2, VLP_CLK_CFG_2_SET,
+ VLP_CLK_CFG_2_CLR, 0, 2,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_DPSW_CENTRAL_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_SPMI_M_MST, "vlp_spmi_m",
+ vlp_spmi_m_parents, VLP_CLK_CFG_2, VLP_CLK_CFG_2_SET,
+ VLP_CLK_CFG_2_CLR, 8, 2,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_SPMI_M_MST_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_DVFSRC, "vlp_dvfsrc",
+ vlp_dvfsrc_parents, VLP_CLK_CFG_2, VLP_CLK_CFG_2_SET,
+ VLP_CLK_CFG_2_CLR, 16, 1,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_DVFSRC_SHIFT),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_VLP_PWM_VLP, "vlp_pwm_vlp", vlp_pwm_vlp_parents,
+ VLP_CLK_CFG_2, VLP_CLK_CFG_2_SET, VLP_CLK_CFG_2_CLR,
+ 24, 3, 31, VLP_CLK_CFG_UPDATE, TOP_MUX_PWM_VLP_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_0, 20),
+ /* VLP_CLK_CFG_3 */
+ MUX_CLR_SET_UPD(CLK_VLP_AXI_VLP, "vlp_axi_vlp",
+ vlp_axi_vlp_parents, VLP_CLK_CFG_3, VLP_CLK_CFG_3_SET,
+ VLP_CLK_CFG_3_CLR, 0, 3,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_AXI_VLP_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_SYSTIMER_26M, "vlp_systimer_26m",
+ vlp_systimer_26m_parents, VLP_CLK_CFG_3, VLP_CLK_CFG_3_SET,
+ VLP_CLK_CFG_3_CLR, 8, 1,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_SYSTIMER_26M_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_SSPM, "vlp_sspm",
+ vlp_sspm_parents, VLP_CLK_CFG_3, VLP_CLK_CFG_3_SET,
+ VLP_CLK_CFG_3_CLR, 16, 3,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_SSPM_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_SRCK, "vlp_srck",
+ vlp_srck_parents, VLP_CLK_CFG_3, VLP_CLK_CFG_3_SET,
+ VLP_CLK_CFG_3_CLR, 24, 1,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_SRCK_SHIFT),
+ /* VLP_CLK_CFG_4 */
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_VLP_CAMTG0, "vlp_camtg0", vlp_camtg0_1_parents,
+ VLP_CLK_CFG_4, VLP_CLK_CFG_4_SET, VLP_CLK_CFG_4_CLR,
+ HWV_CG_9_DONE, HWV_CG_9_SET, HWV_CG_9_CLR,
+ 0, 4, 7, VLP_CLK_CFG_UPDATE, TOP_MUX_CAMTG0_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_0, 15),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_VLP_CAMTG1, "vlp_camtg1", vlp_camtg0_1_parents,
+ VLP_CLK_CFG_4, VLP_CLK_CFG_4_SET, VLP_CLK_CFG_4_CLR,
+ HWV_CG_9_DONE, HWV_CG_9_SET, HWV_CG_9_CLR,
+ 8, 4, 15, VLP_CLK_CFG_UPDATE, TOP_MUX_CAMTG1_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_0, 14),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_VLP_CAMTG2, "vlp_camtg2", vlp_camtg2_7_parents,
+ VLP_CLK_CFG_4, VLP_CLK_CFG_4_SET, VLP_CLK_CFG_4_CLR,
+ HWV_CG_9_DONE, HWV_CG_9_SET, HWV_CG_9_CLR,
+ 16, 4, 23, VLP_CLK_CFG_UPDATE, TOP_MUX_CAMTG2_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_0, 13),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_VLP_CAMTG3, "vlp_camtg3", vlp_camtg2_7_parents,
+ VLP_CLK_CFG_4, VLP_CLK_CFG_4_SET, VLP_CLK_CFG_4_CLR,
+ HWV_CG_9_DONE, HWV_CG_9_SET, HWV_CG_9_CLR,
+ 24, 4, 31, VLP_CLK_CFG_UPDATE, TOP_MUX_CAMTG3_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_0, 12),
+ /* VLP_CLK_CFG_5 */
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_VLP_CAMTG4, "vlp_camtg4", vlp_camtg2_7_parents,
+ VLP_CLK_CFG_5, VLP_CLK_CFG_5_SET, VLP_CLK_CFG_5_CLR,
+ HWV_CG_10_DONE, HWV_CG_10_SET, HWV_CG_10_CLR,
+ 0, 4, 7, VLP_CLK_CFG_UPDATE, TOP_MUX_CAMTG4_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_0, 11),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_VLP_CAMTG5, "vlp_camtg5", vlp_camtg2_7_parents,
+ VLP_CLK_CFG_5, VLP_CLK_CFG_5_SET, VLP_CLK_CFG_5_CLR,
+ HWV_CG_10_DONE, HWV_CG_10_SET, HWV_CG_10_CLR,
+ 8, 4, 15, VLP_CLK_CFG_UPDATE, TOP_MUX_CAMTG5_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_0, 10),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_VLP_CAMTG6, "vlp_camtg6", vlp_camtg2_7_parents,
+ VLP_CLK_CFG_5, VLP_CLK_CFG_5_SET, VLP_CLK_CFG_5_CLR,
+ HWV_CG_10_DONE, HWV_CG_10_SET, HWV_CG_10_CLR,
+ 16, 4, 23, VLP_CLK_CFG_UPDATE, TOP_MUX_CAMTG6_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_0, 9),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_VLP_CAMTG7, "vlp_camtg7", vlp_camtg2_7_parents,
+ VLP_CLK_CFG_5, VLP_CLK_CFG_5_SET, VLP_CLK_CFG_5_CLR,
+ HWV_CG_10_DONE, HWV_CG_10_SET, HWV_CG_10_CLR,
+ 24, 4, 31, VLP_CLK_CFG_UPDATE, TOP_MUX_CAMTG7_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_0, 8),
+ /* VLP_CLK_CFG_6 */
+ MUX_CLR_SET_UPD(CLK_VLP_SSPM_26M, "vlp_sspm_26m",
+ vlp_sspm_26m_parents, VLP_CLK_CFG_6, VLP_CLK_CFG_6_SET,
+ VLP_CLK_CFG_6_CLR, 8, 1,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_SSPM_26M_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_ULPOSC_SSPM, "vlp_ulposc_sspm",
+ vlp_ulposc_sspm_parents, VLP_CLK_CFG_6, VLP_CLK_CFG_6_SET,
+ VLP_CLK_CFG_6_CLR, 16, 2,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_ULPOSC_SSPM_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_VLP_PBUS_26M, "vlp_vlp_pbus_26m",
+ vlp_vlp_pbus_26m_parents, VLP_CLK_CFG_6, VLP_CLK_CFG_6_SET,
+ VLP_CLK_CFG_6_CLR, 24, 1,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_VLP_PBUS_26M_SHIFT),
+ /* VLP_CLK_CFG_7 */
+ MUX_CLR_SET_UPD(CLK_VLP_DEBUG_ERR_FLAG, "vlp_debug_err_flag",
+ vlp_debug_err_flag_parents, VLP_CLK_CFG_7, VLP_CLK_CFG_7_SET,
+ VLP_CLK_CFG_7_CLR, 0, 1,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_DEBUG_ERR_FLAG_VLP_26M_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_DPMSRDMA, "vlp_dpmsrdma",
+ vlp_dpmsrdma_parents, VLP_CLK_CFG_7, VLP_CLK_CFG_7_SET,
+ VLP_CLK_CFG_7_CLR, 8, 1,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_DPMSRDMA_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_VLP_PBUS_156M, "vlp_vlp_pbus_156m",
+ vlp_vlp_pbus_156m_parents, VLP_CLK_CFG_7, VLP_CLK_CFG_7_SET,
+ VLP_CLK_CFG_7_CLR, 16, 2,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_VLP_PBUS_156M_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_SPM, "vlp_spm",
+ vlp_spm_parents, VLP_CLK_CFG_7, VLP_CLK_CFG_7_SET,
+ VLP_CLK_CFG_7_CLR, 24, 1,
+ VLP_CLK_CFG_UPDATE1, TOP_MUX_SPM_SHIFT),
+ /* VLP_CLK_CFG_8 */
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_VLP_MMINFRA, "vlp_mminfra", vlp_mminfra_parents,
+ VLP_CLK_CFG_8, VLP_CLK_CFG_8_SET, VLP_CLK_CFG_8_CLR,
+ 0, 2, 7, VLP_CLK_CFG_UPDATE1, TOP_MUX_MMINFRA_VLP_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_1, 31),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_VLP_USB_TOP, "vlp_usb", vlp_usb_parents,
+ VLP_CLK_CFG_8, VLP_CLK_CFG_8_SET, VLP_CLK_CFG_8_CLR,
+ 8, 1, 15, VLP_CLK_CFG_UPDATE1, TOP_MUX_USB_TOP_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_1, 30),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_VLP_USB_XHCI, "vlp_usb_xhci", vlp_usb_parents,
+ VLP_CLK_CFG_8, VLP_CLK_CFG_8_SET, VLP_CLK_CFG_8_CLR,
+ 16, 1, 23, VLP_CLK_CFG_UPDATE1, TOP_MUX_SSUSB_XHCI_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_1, 29),
+ MUX_CLR_SET_UPD(CLK_VLP_NOC_VLP, "vlp_noc_vlp",
+ vlp_noc_vlp_parents, VLP_CLK_CFG_8, VLP_CLK_CFG_8_SET,
+ VLP_CLK_CFG_8_CLR, 24, 2,
+ VLP_CLK_CFG_UPDATE1, TOP_MUX_NOC_VLP_SHIFT),
+ /* VLP_CLK_CFG_9 */
+ MUX_GATE_FENC_CLR_SET_UPD_INDEXED(CLK_VLP_AUDIO_H, "vlp_audio_h",
+ vlp_audio_h_parents, vlp_aud_parent_index,
+ VLP_CLK_CFG_9, VLP_CLK_CFG_9_SET, VLP_CLK_CFG_9_CLR,
+ 0, 2, 7, VLP_CLK_CFG_UPDATE1, TOP_MUX_AUDIO_H_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_1, 27),
+ MUX_GATE_FENC_CLR_SET_UPD_INDEXED(CLK_VLP_AUD_ENGEN1, "vlp_aud_engen1",
+ vlp_aud_engen1_parents, vlp_aud_parent_index,
+ VLP_CLK_CFG_9, VLP_CLK_CFG_9_SET, VLP_CLK_CFG_9_CLR,
+ 8, 2, 15, VLP_CLK_CFG_UPDATE1, TOP_MUX_AUD_ENGEN1_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_1, 26),
+ MUX_GATE_FENC_CLR_SET_UPD_INDEXED(CLK_VLP_AUD_ENGEN2, "vlp_aud_engen2",
+ vlp_aud_engen2_parents, vlp_aud_parent_index,
+ VLP_CLK_CFG_9, VLP_CLK_CFG_9_SET, VLP_CLK_CFG_9_CLR,
+ 16, 2, 23, VLP_CLK_CFG_UPDATE1, TOP_MUX_AUD_ENGEN2_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_1, 25),
+ MUX_GATE_FENC_CLR_SET_UPD_INDEXED(CLK_VLP_AUD_INTBUS, "vlp_aud_intbus",
+ vlp_aud_intbus_parents, vlp_aud_parent_index,
+ VLP_CLK_CFG_9, VLP_CLK_CFG_9_SET, VLP_CLK_CFG_9_CLR,
+ 24, 2, 31, VLP_CLK_CFG_UPDATE1, TOP_MUX_AUD_INTBUS_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_1, 24),
+ /* VLP_CLK_CFG_10 */
+ MUX_CLR_SET_UPD(CLK_VLP_SPVLP_26M, "vlp_spvlp_26m",
+ vlp_spvlp_26m_parents, VLP_CLK_CFG_10, VLP_CLK_CFG_10_SET,
+ VLP_CLK_CFG_10_CLR, 0, 1,
+ VLP_CLK_CFG_UPDATE1, TOP_MUX_SPU_VLP_26M_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_SPU0_VLP, "vlp_spu0_vlp",
+ vlp_spu0_vlp_parents, VLP_CLK_CFG_10, VLP_CLK_CFG_10_SET,
+ VLP_CLK_CFG_10_CLR, 8, 3,
+ VLP_CLK_CFG_UPDATE1, TOP_MUX_SPU0_VLP_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_SPU1_VLP, "vlp_spu1_vlp",
+ vlp_spu1_vlp_parents, VLP_CLK_CFG_10, VLP_CLK_CFG_10_SET,
+ VLP_CLK_CFG_10_CLR, 16, 3,
+ VLP_CLK_CFG_UPDATE1, TOP_MUX_SPU1_VLP_SHIFT),
+};
+
+static const struct mtk_pll_data vlp_plls[] = {
+ PLL_FENC(CLK_VLP_APLL1, "vlp_apll1", VLP_APLL1_CON0, 0x0358, 1, 0,
+ VLP_APLL1_CON1, 24, VLP_APLL1_CON2, 0, 32, 0),
+ PLL_FENC(CLK_VLP_APLL2, "vlp_apll2", VLP_APLL2_CON0, 0x0358, 0, 0,
+ VLP_APLL2_CON1, 24, VLP_APLL2_CON2, 0, 32, 1),
+};
+
+static const struct regmap_config vlpckgen_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0x1000,
+ .fast_io = true,
+};
+
+static int clk_mt8196_vlp_probe(struct platform_device *pdev)
+{
+ static void __iomem *base;
+ struct clk_hw_onecell_data *clk_data;
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct regmap *regmap;
+
+ clk_data = mtk_alloc_clk_data(ARRAY_SIZE(vlp_muxes) +
+ ARRAY_SIZE(vlp_plls) +
+ ARRAY_SIZE(vlp_divs));
+ if (!clk_data)
+ return -ENOMEM;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ regmap = devm_regmap_init_mmio(dev, base, &vlpckgen_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ r = mtk_clk_register_factors(vlp_divs, ARRAY_SIZE(vlp_divs), clk_data);
+ if (r)
+ goto free_clk_data;
+
+ r = mtk_clk_register_muxes(&pdev->dev, vlp_muxes, ARRAY_SIZE(vlp_muxes),
+ node, &mt8196_clk_vlp_lock, clk_data);
+ if (r)
+ goto unregister_factors;
+
+ r = mtk_clk_register_plls(node, vlp_plls, ARRAY_SIZE(vlp_plls),
+ clk_data);
+ if (r)
+ goto unregister_muxes;
+
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ if (r)
+ goto unregister_plls;
+
+ platform_set_drvdata(pdev, clk_data);
+
+ /* Initialize APLL tuner registers */
+ regmap_write(regmap, VLP_APLL1_TUNER_CON0, VLP_APLL1_TUNER_CON0_VALUE);
+ regmap_write(regmap, VLP_APLL2_TUNER_CON0, VLP_APLL2_TUNER_CON0_VALUE);
+
+ return r;
+
+unregister_plls:
+ mtk_clk_unregister_plls(vlp_plls, ARRAY_SIZE(vlp_plls), clk_data);
+unregister_muxes:
+ mtk_clk_unregister_muxes(vlp_muxes, ARRAY_SIZE(vlp_muxes), clk_data);
+unregister_factors:
+ mtk_clk_unregister_factors(vlp_divs, ARRAY_SIZE(vlp_divs), clk_data);
+free_clk_data:
+ mtk_free_clk_data(clk_data);
+
+ return r;
+}
+
+static void clk_mt8196_vlp_remove(struct platform_device *pdev)
+{
+ struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev);
+ struct device_node *node = pdev->dev.of_node;
+
+ of_clk_del_provider(node);
+ mtk_clk_unregister_plls(vlp_plls, ARRAY_SIZE(vlp_plls), clk_data);
+ mtk_clk_unregister_muxes(vlp_muxes, ARRAY_SIZE(vlp_muxes), clk_data);
+ mtk_clk_unregister_factors(vlp_divs, ARRAY_SIZE(vlp_divs), clk_data);
+ mtk_free_clk_data(clk_data);
+}
+
+static const struct of_device_id of_match_clk_mt8196_vlp_ck[] = {
+ { .compatible = "mediatek,mt8196-vlpckgen" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_vlp_ck);
+
+static struct platform_driver clk_mt8196_vlp_drv = {
+ .probe = clk_mt8196_vlp_probe,
+ .remove = clk_mt8196_vlp_remove,
+ .driver = {
+ .name = "clk-mt8196-vlpck",
+ .of_match_table = of_match_clk_mt8196_vlp_ck,
+ },
+};
+
+MODULE_DESCRIPTION("MediaTek MT8196 VLP clock generator driver");
+module_platform_driver(clk_mt8196_vlp_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mtk.c b/drivers/clk/mediatek/clk-mtk.c
index ba1d1c495bc2..19cd27941747 100644
--- a/drivers/clk/mediatek/clk-mtk.c
+++ b/drivers/clk/mediatek/clk-mtk.c
@@ -685,4 +685,20 @@ void mtk_clk_simple_remove(struct platform_device *pdev)
}
EXPORT_SYMBOL_GPL(mtk_clk_simple_remove);
+struct regmap *mtk_clk_get_hwv_regmap(struct device_node *node)
+{
+ struct device_node *hwv_node;
+ struct regmap *regmap_hwv;
+
+ hwv_node = of_parse_phandle(node, "mediatek,hardware-voter", 0);
+ if (!hwv_node)
+ return NULL;
+
+ regmap_hwv = device_node_to_regmap(hwv_node);
+ of_node_put(hwv_node);
+
+ return regmap_hwv;
+}
+EXPORT_SYMBOL_GPL(mtk_clk_get_hwv_regmap);
+
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mtk.h b/drivers/clk/mediatek/clk-mtk.h
index c17fe1c2d732..5417b9264e6d 100644
--- a/drivers/clk/mediatek/clk-mtk.h
+++ b/drivers/clk/mediatek/clk-mtk.h
@@ -20,6 +20,8 @@
#define MHZ (1000 * 1000)
+#define MTK_WAIT_HWV_DONE_US 30
+
struct platform_device;
/*
@@ -173,6 +175,25 @@ struct mtk_composite {
.flags = 0, \
}
+#define MUX_DIV_GATE(_id, _name, _parents, \
+ _mux_reg, _mux_shift, _mux_width, \
+ _div_reg, _div_shift, _div_width, \
+ _gate_reg, _gate_shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_names = _parents, \
+ .num_parents = ARRAY_SIZE(_parents), \
+ .mux_reg = _mux_reg, \
+ .mux_shift = _mux_shift, \
+ .mux_width = _mux_width, \
+ .divider_reg = _div_reg, \
+ .divider_shift = _div_shift, \
+ .divider_width = _div_width, \
+ .gate_reg = _gate_reg, \
+ .gate_shift = _gate_shift, \
+ .flags = CLK_SET_RATE_PARENT, \
+ }
+
int mtk_clk_register_composites(struct device *dev,
const struct mtk_composite *mcs, int num,
void __iomem *base, spinlock_t *lock,
@@ -245,5 +266,6 @@ int mtk_clk_pdev_probe(struct platform_device *pdev);
void mtk_clk_pdev_remove(struct platform_device *pdev);
int mtk_clk_simple_probe(struct platform_device *pdev);
void mtk_clk_simple_remove(struct platform_device *pdev);
+struct regmap *mtk_clk_get_hwv_regmap(struct device_node *node);
#endif /* __DRV_CLK_MTK_H */
diff --git a/drivers/clk/mediatek/clk-mux.c b/drivers/clk/mediatek/clk-mux.c
index 60990296450b..c5af6dc078a3 100644
--- a/drivers/clk/mediatek/clk-mux.c
+++ b/drivers/clk/mediatek/clk-mux.c
@@ -8,6 +8,7 @@
#include <linux/clk-provider.h>
#include <linux/compiler_types.h>
#include <linux/container_of.h>
+#include <linux/dev_printk.h>
#include <linux/err.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
@@ -15,11 +16,15 @@
#include <linux/spinlock.h>
#include <linux/slab.h>
+#include "clk-mtk.h"
#include "clk-mux.h"
+#define MTK_WAIT_FENC_DONE_US 30
+
struct mtk_clk_mux {
struct clk_hw hw;
struct regmap *regmap;
+ struct regmap *regmap_hwv;
const struct mtk_mux *data;
spinlock_t *lock;
bool reparent;
@@ -30,6 +35,33 @@ static inline struct mtk_clk_mux *to_mtk_clk_mux(struct clk_hw *hw)
return container_of(hw, struct mtk_clk_mux, hw);
}
+static int mtk_clk_mux_fenc_enable_setclr(struct clk_hw *hw)
+{
+ struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
+ unsigned long flags;
+ u32 val;
+ int ret;
+
+ if (mux->lock)
+ spin_lock_irqsave(mux->lock, flags);
+ else
+ __acquire(mux->lock);
+
+ regmap_write(mux->regmap, mux->data->clr_ofs,
+ BIT(mux->data->gate_shift));
+
+ ret = regmap_read_poll_timeout_atomic(mux->regmap, mux->data->fenc_sta_mon_ofs,
+ val, val & BIT(mux->data->fenc_shift), 1,
+ MTK_WAIT_FENC_DONE_US);
+
+ if (mux->lock)
+ spin_unlock_irqrestore(mux->lock, flags);
+ else
+ __release(mux->lock);
+
+ return ret;
+}
+
static int mtk_clk_mux_enable_setclr(struct clk_hw *hw)
{
struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
@@ -70,6 +102,16 @@ static void mtk_clk_mux_disable_setclr(struct clk_hw *hw)
BIT(mux->data->gate_shift));
}
+static int mtk_clk_mux_fenc_is_enabled(struct clk_hw *hw)
+{
+ struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
+ u32 val;
+
+ regmap_read(mux->regmap, mux->data->fenc_sta_mon_ofs, &val);
+
+ return !!(val & BIT(mux->data->fenc_shift));
+}
+
static int mtk_clk_mux_is_enabled(struct clk_hw *hw)
{
struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
@@ -80,6 +122,41 @@ static int mtk_clk_mux_is_enabled(struct clk_hw *hw)
return (val & BIT(mux->data->gate_shift)) == 0;
}
+static int mtk_clk_mux_hwv_fenc_enable(struct clk_hw *hw)
+{
+ struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
+ u32 val;
+ int ret;
+
+ regmap_write(mux->regmap_hwv, mux->data->hwv_set_ofs,
+ BIT(mux->data->gate_shift));
+
+ ret = regmap_read_poll_timeout_atomic(mux->regmap_hwv, mux->data->hwv_sta_ofs,
+ val, val & BIT(mux->data->gate_shift), 0,
+ MTK_WAIT_HWV_DONE_US);
+ if (ret)
+ return ret;
+
+ ret = regmap_read_poll_timeout_atomic(mux->regmap, mux->data->fenc_sta_mon_ofs,
+ val, val & BIT(mux->data->fenc_shift), 1,
+ MTK_WAIT_FENC_DONE_US);
+
+ return ret;
+}
+
+static void mtk_clk_mux_hwv_disable(struct clk_hw *hw)
+{
+ struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
+ u32 val;
+
+ regmap_write(mux->regmap_hwv, mux->data->hwv_clr_ofs,
+ BIT(mux->data->gate_shift));
+
+ regmap_read_poll_timeout_atomic(mux->regmap_hwv, mux->data->hwv_sta_ofs,
+ val, (val & BIT(mux->data->gate_shift)),
+ 0, MTK_WAIT_HWV_DONE_US);
+}
+
static u8 mtk_clk_mux_get_parent(struct clk_hw *hw)
{
struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
@@ -146,9 +223,15 @@ static int mtk_clk_mux_set_parent_setclr_lock(struct clk_hw *hw, u8 index)
static int mtk_clk_mux_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
- struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
+ return clk_mux_determine_rate_flags(hw, req, 0);
+}
+
+static bool mtk_clk_mux_uses_hwv(const struct clk_ops *ops)
+{
+ if (ops == &mtk_mux_gate_hwv_fenc_clr_set_upd_ops)
+ return true;
- return clk_mux_determine_rate_flags(hw, req, mux->data->flags);
+ return false;
}
const struct clk_ops mtk_mux_clr_set_upd_ops = {
@@ -168,9 +251,30 @@ const struct clk_ops mtk_mux_gate_clr_set_upd_ops = {
};
EXPORT_SYMBOL_GPL(mtk_mux_gate_clr_set_upd_ops);
+const struct clk_ops mtk_mux_gate_fenc_clr_set_upd_ops = {
+ .enable = mtk_clk_mux_fenc_enable_setclr,
+ .disable = mtk_clk_mux_disable_setclr,
+ .is_enabled = mtk_clk_mux_fenc_is_enabled,
+ .get_parent = mtk_clk_mux_get_parent,
+ .set_parent = mtk_clk_mux_set_parent_setclr_lock,
+ .determine_rate = mtk_clk_mux_determine_rate,
+};
+EXPORT_SYMBOL_GPL(mtk_mux_gate_fenc_clr_set_upd_ops);
+
+const struct clk_ops mtk_mux_gate_hwv_fenc_clr_set_upd_ops = {
+ .enable = mtk_clk_mux_hwv_fenc_enable,
+ .disable = mtk_clk_mux_hwv_disable,
+ .is_enabled = mtk_clk_mux_fenc_is_enabled,
+ .get_parent = mtk_clk_mux_get_parent,
+ .set_parent = mtk_clk_mux_set_parent_setclr_lock,
+ .determine_rate = mtk_clk_mux_determine_rate,
+};
+EXPORT_SYMBOL_GPL(mtk_mux_gate_hwv_fenc_clr_set_upd_ops);
+
static struct clk_hw *mtk_clk_register_mux(struct device *dev,
const struct mtk_mux *mux,
struct regmap *regmap,
+ struct regmap *regmap_hwv,
spinlock_t *lock)
{
struct mtk_clk_mux *clk_mux;
@@ -186,8 +290,13 @@ static struct clk_hw *mtk_clk_register_mux(struct device *dev,
init.parent_names = mux->parent_names;
init.num_parents = mux->num_parents;
init.ops = mux->ops;
+ if (mtk_clk_mux_uses_hwv(init.ops) && !regmap_hwv)
+ return dev_err_ptr_probe(
+ dev, -ENXIO,
+ "regmap not found for hardware voter clocks\n");
clk_mux->regmap = regmap;
+ clk_mux->regmap_hwv = regmap_hwv;
clk_mux->data = mux;
clk_mux->lock = lock;
clk_mux->hw.init = &init;
@@ -220,6 +329,7 @@ int mtk_clk_register_muxes(struct device *dev,
struct clk_hw_onecell_data *clk_data)
{
struct regmap *regmap;
+ struct regmap *regmap_hwv;
struct clk_hw *hw;
int i;
@@ -229,6 +339,12 @@ int mtk_clk_register_muxes(struct device *dev,
return PTR_ERR(regmap);
}
+ regmap_hwv = mtk_clk_get_hwv_regmap(node);
+ if (IS_ERR(regmap_hwv))
+ return dev_err_probe(
+ dev, PTR_ERR(regmap_hwv),
+ "Cannot find hardware voter regmap for %pOF\n", node);
+
for (i = 0; i < num; i++) {
const struct mtk_mux *mux = &muxes[i];
@@ -238,7 +354,7 @@ int mtk_clk_register_muxes(struct device *dev,
continue;
}
- hw = mtk_clk_register_mux(dev, mux, regmap, lock);
+ hw = mtk_clk_register_mux(dev, mux, regmap, regmap_hwv, lock);
if (IS_ERR(hw)) {
pr_err("Failed to register clk %s: %pe\n", mux->name,
diff --git a/drivers/clk/mediatek/clk-mux.h b/drivers/clk/mediatek/clk-mux.h
index 943ad1d7ce4b..151e56dcf884 100644
--- a/drivers/clk/mediatek/clk-mux.h
+++ b/drivers/clk/mediatek/clk-mux.h
@@ -29,10 +29,16 @@ struct mtk_mux {
u32 clr_ofs;
u32 upd_ofs;
+ u32 hwv_set_ofs;
+ u32 hwv_clr_ofs;
+ u32 hwv_sta_ofs;
+ u32 fenc_sta_mon_ofs;
+
u8 mux_shift;
u8 mux_width;
u8 gate_shift;
s8 upd_shift;
+ u8 fenc_shift;
const struct clk_ops *ops;
signed char num_parents;
@@ -77,6 +83,8 @@ struct mtk_mux {
extern const struct clk_ops mtk_mux_clr_set_upd_ops;
extern const struct clk_ops mtk_mux_gate_clr_set_upd_ops;
+extern const struct clk_ops mtk_mux_gate_fenc_clr_set_upd_ops;
+extern const struct clk_ops mtk_mux_gate_hwv_fenc_clr_set_upd_ops;
#define MUX_GATE_CLR_SET_UPD_FLAGS(_id, _name, _parents, _mux_ofs, \
_mux_set_ofs, _mux_clr_ofs, _shift, _width, \
@@ -118,6 +126,85 @@ extern const struct clk_ops mtk_mux_gate_clr_set_upd_ops;
0, _upd_ofs, _upd, CLK_SET_RATE_PARENT, \
mtk_mux_clr_set_upd_ops)
+#define MUX_GATE_HWV_FENC_CLR_SET_UPD_FLAGS(_id, _name, _parents, \
+ _mux_ofs, _mux_set_ofs, _mux_clr_ofs, \
+ _hwv_sta_ofs, _hwv_set_ofs, _hwv_clr_ofs, \
+ _shift, _width, _gate, _upd_ofs, _upd, \
+ _fenc_sta_mon_ofs, _fenc, _flags) { \
+ .id = _id, \
+ .name = _name, \
+ .mux_ofs = _mux_ofs, \
+ .set_ofs = _mux_set_ofs, \
+ .clr_ofs = _mux_clr_ofs, \
+ .hwv_sta_ofs = _hwv_sta_ofs, \
+ .hwv_set_ofs = _hwv_set_ofs, \
+ .hwv_clr_ofs = _hwv_clr_ofs, \
+ .upd_ofs = _upd_ofs, \
+ .fenc_sta_mon_ofs = _fenc_sta_mon_ofs, \
+ .mux_shift = _shift, \
+ .mux_width = _width, \
+ .gate_shift = _gate, \
+ .upd_shift = _upd, \
+ .fenc_shift = _fenc, \
+ .parent_names = _parents, \
+ .num_parents = ARRAY_SIZE(_parents), \
+ .flags = _flags, \
+ .ops = &mtk_mux_gate_hwv_fenc_clr_set_upd_ops, \
+ }
+
+#define MUX_GATE_HWV_FENC_CLR_SET_UPD(_id, _name, _parents, \
+ _mux_ofs, _mux_set_ofs, _mux_clr_ofs, \
+ _hwv_sta_ofs, _hwv_set_ofs, _hwv_clr_ofs, \
+ _shift, _width, _gate, _upd_ofs, _upd, \
+ _fenc_sta_mon_ofs, _fenc) \
+ MUX_GATE_HWV_FENC_CLR_SET_UPD_FLAGS(_id, _name, _parents, \
+ _mux_ofs, _mux_set_ofs, _mux_clr_ofs, \
+ _hwv_sta_ofs, _hwv_set_ofs, _hwv_clr_ofs, \
+ _shift, _width, _gate, _upd_ofs, _upd, \
+ _fenc_sta_mon_ofs, _fenc, 0)
+
+#define MUX_GATE_FENC_CLR_SET_UPD_FLAGS(_id, _name, _parents, _paridx, \
+ _num_parents, _mux_ofs, _mux_set_ofs, _mux_clr_ofs, \
+ _shift, _width, _gate, _upd_ofs, _upd, \
+ _fenc_sta_mon_ofs, _fenc, _flags) { \
+ .id = _id, \
+ .name = _name, \
+ .mux_ofs = _mux_ofs, \
+ .set_ofs = _mux_set_ofs, \
+ .clr_ofs = _mux_clr_ofs, \
+ .upd_ofs = _upd_ofs, \
+ .fenc_sta_mon_ofs = _fenc_sta_mon_ofs, \
+ .mux_shift = _shift, \
+ .mux_width = _width, \
+ .gate_shift = _gate, \
+ .upd_shift = _upd, \
+ .fenc_shift = _fenc, \
+ .parent_names = _parents, \
+ .parent_index = _paridx, \
+ .num_parents = _num_parents, \
+ .flags = _flags, \
+ .ops = &mtk_mux_gate_fenc_clr_set_upd_ops, \
+ }
+
+#define MUX_GATE_FENC_CLR_SET_UPD(_id, _name, _parents, \
+ _mux_ofs, _mux_set_ofs, _mux_clr_ofs, \
+ _shift, _width, _gate, _upd_ofs, _upd, \
+ _fenc_sta_mon_ofs, _fenc) \
+ MUX_GATE_FENC_CLR_SET_UPD_FLAGS(_id, _name, _parents, \
+ NULL, ARRAY_SIZE(_parents), _mux_ofs, \
+ _mux_set_ofs, _mux_clr_ofs, _shift, \
+ _width, _gate, _upd_ofs, _upd, \
+ _fenc_sta_mon_ofs, _fenc, 0)
+
+#define MUX_GATE_FENC_CLR_SET_UPD_INDEXED(_id, _name, _parents, _paridx, \
+ _mux_ofs, _mux_set_ofs, _mux_clr_ofs, \
+ _shift, _width, _gate, _upd_ofs, _upd, \
+ _fenc_sta_mon_ofs, _fenc) \
+ MUX_GATE_FENC_CLR_SET_UPD_FLAGS(_id, _name, _parents, _paridx, \
+ ARRAY_SIZE(_paridx), _mux_ofs, _mux_set_ofs, \
+ _mux_clr_ofs, _shift, _width, _gate, _upd_ofs, _upd, \
+ _fenc_sta_mon_ofs, _fenc, 0)
+
int mtk_clk_register_muxes(struct device *dev,
const struct mtk_mux *muxes,
int num, struct device_node *node,
diff --git a/drivers/clk/mediatek/clk-pll.c b/drivers/clk/mediatek/clk-pll.c
index ce453e1718e5..cd2b6ce551c6 100644
--- a/drivers/clk/mediatek/clk-pll.c
+++ b/drivers/clk/mediatek/clk-pll.c
@@ -37,6 +37,13 @@ int mtk_pll_is_prepared(struct clk_hw *hw)
return (readl(pll->en_addr) & BIT(pll->data->pll_en_bit)) != 0;
}
+static int mtk_pll_fenc_is_prepared(struct clk_hw *hw)
+{
+ struct mtk_clk_pll *pll = to_mtk_clk_pll(hw);
+
+ return !!(readl(pll->fenc_addr) & BIT(pll->data->fenc_sta_bit));
+}
+
static unsigned long __mtk_pll_recalc_rate(struct mtk_clk_pll *pll, u32 fin,
u32 pcw, int postdiv)
{
@@ -200,16 +207,19 @@ unsigned long mtk_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
return __mtk_pll_recalc_rate(pll, parent_rate, pcw, postdiv);
}
-long mtk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+int mtk_pll_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
{
struct mtk_clk_pll *pll = to_mtk_clk_pll(hw);
u32 pcw = 0;
int postdiv;
- mtk_pll_calc_values(pll, &pcw, &postdiv, rate, *prate);
+ mtk_pll_calc_values(pll, &pcw, &postdiv, req->rate,
+ req->best_parent_rate);
+
+ req->rate = __mtk_pll_recalc_rate(pll, req->best_parent_rate, pcw,
+ postdiv);
- return __mtk_pll_recalc_rate(pll, *prate, pcw, postdiv);
+ return 0;
}
int mtk_pll_prepare(struct clk_hw *hw)
@@ -274,14 +284,43 @@ void mtk_pll_unprepare(struct clk_hw *hw)
writel(r, pll->pwr_addr);
}
+static int mtk_pll_prepare_setclr(struct clk_hw *hw)
+{
+ struct mtk_clk_pll *pll = to_mtk_clk_pll(hw);
+
+ writel(BIT(pll->data->pll_en_bit), pll->en_set_addr);
+
+ /* Wait 20us after enable for the PLL to stabilize */
+ udelay(20);
+
+ return 0;
+}
+
+static void mtk_pll_unprepare_setclr(struct clk_hw *hw)
+{
+ struct mtk_clk_pll *pll = to_mtk_clk_pll(hw);
+
+ writel(BIT(pll->data->pll_en_bit), pll->en_clr_addr);
+}
+
const struct clk_ops mtk_pll_ops = {
.is_prepared = mtk_pll_is_prepared,
.prepare = mtk_pll_prepare,
.unprepare = mtk_pll_unprepare,
.recalc_rate = mtk_pll_recalc_rate,
- .round_rate = mtk_pll_round_rate,
+ .determine_rate = mtk_pll_determine_rate,
+ .set_rate = mtk_pll_set_rate,
+};
+
+const struct clk_ops mtk_pll_fenc_clr_set_ops = {
+ .is_prepared = mtk_pll_fenc_is_prepared,
+ .prepare = mtk_pll_prepare_setclr,
+ .unprepare = mtk_pll_unprepare_setclr,
+ .recalc_rate = mtk_pll_recalc_rate,
+ .determine_rate = mtk_pll_determine_rate,
.set_rate = mtk_pll_set_rate,
};
+EXPORT_SYMBOL_GPL(mtk_pll_fenc_clr_set_ops);
struct clk_hw *mtk_clk_register_pll_ops(struct mtk_clk_pll *pll,
const struct mtk_pll_data *data,
@@ -308,9 +347,15 @@ struct clk_hw *mtk_clk_register_pll_ops(struct mtk_clk_pll *pll,
pll->en_addr = base + data->en_reg;
else
pll->en_addr = pll->base_addr + REG_CON0;
+ if (data->en_set_reg)
+ pll->en_set_addr = base + data->en_set_reg;
+ if (data->en_clr_reg)
+ pll->en_clr_addr = base + data->en_clr_reg;
pll->hw.init = &init;
pll->data = data;
+ pll->fenc_addr = base + data->fenc_sta_ofs;
+
init.name = data->name;
init.flags = (data->flags & PLL_AO) ? CLK_IS_CRITICAL : 0;
init.ops = pll_ops;
@@ -333,12 +378,13 @@ struct clk_hw *mtk_clk_register_pll(const struct mtk_pll_data *data,
{
struct mtk_clk_pll *pll;
struct clk_hw *hw;
+ const struct clk_ops *pll_ops = data->ops ? data->ops : &mtk_pll_ops;
pll = kzalloc(sizeof(*pll), GFP_KERNEL);
if (!pll)
return ERR_PTR(-ENOMEM);
- hw = mtk_clk_register_pll_ops(pll, data, base, &mtk_pll_ops);
+ hw = mtk_clk_register_pll_ops(pll, data, base, pll_ops);
if (IS_ERR(hw))
kfree(pll);
diff --git a/drivers/clk/mediatek/clk-pll.h b/drivers/clk/mediatek/clk-pll.h
index 285c8db958b3..d71c150ce83e 100644
--- a/drivers/clk/mediatek/clk-pll.h
+++ b/drivers/clk/mediatek/clk-pll.h
@@ -29,6 +29,7 @@ struct mtk_pll_data {
u32 reg;
u32 pwr_reg;
u32 en_mask;
+ u32 fenc_sta_ofs;
u32 pd_reg;
u32 tuner_reg;
u32 tuner_en_reg;
@@ -47,8 +48,11 @@ struct mtk_pll_data {
const struct mtk_pll_div_table *div_table;
const char *parent_name;
u32 en_reg;
+ u32 en_set_reg;
+ u32 en_clr_reg;
u8 pll_en_bit; /* Assume 0, indicates BIT(0) by default */
u8 pcw_chg_bit;
+ u8 fenc_sta_bit;
};
/*
@@ -68,6 +72,9 @@ struct mtk_clk_pll {
void __iomem *pcw_addr;
void __iomem *pcw_chg_addr;
void __iomem *en_addr;
+ void __iomem *en_set_addr;
+ void __iomem *en_clr_addr;
+ void __iomem *fenc_addr;
const struct mtk_pll_data *data;
};
@@ -78,6 +85,7 @@ void mtk_clk_unregister_plls(const struct mtk_pll_data *plls, int num_plls,
struct clk_hw_onecell_data *clk_data);
extern const struct clk_ops mtk_pll_ops;
+extern const struct clk_ops mtk_pll_fenc_clr_set_ops;
static inline struct mtk_clk_pll *to_mtk_clk_pll(struct clk_hw *hw)
{
@@ -96,8 +104,7 @@ void mtk_pll_calc_values(struct mtk_clk_pll *pll, u32 *pcw, u32 *postdiv,
u32 freq, u32 fin);
int mtk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate);
-long mtk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate);
+int mtk_pll_determine_rate(struct clk_hw *hw, struct clk_rate_request *req);
struct clk_hw *mtk_clk_register_pll_ops(struct mtk_clk_pll *pll,
const struct mtk_pll_data *data,
diff --git a/drivers/clk/mediatek/clk-pllfh.c b/drivers/clk/mediatek/clk-pllfh.c
index 094ec8a26d66..83630ee07ee9 100644
--- a/drivers/clk/mediatek/clk-pllfh.c
+++ b/drivers/clk/mediatek/clk-pllfh.c
@@ -42,7 +42,7 @@ static const struct clk_ops mtk_pllfh_ops = {
.prepare = mtk_pll_prepare,
.unprepare = mtk_pll_unprepare,
.recalc_rate = mtk_pll_recalc_rate,
- .round_rate = mtk_pll_round_rate,
+ .determine_rate = mtk_pll_determine_rate,
.set_rate = mtk_fhctl_set_rate,
};
diff --git a/drivers/clk/meson/Kconfig b/drivers/clk/meson/Kconfig
index 78f648c9c97d..71481607a6d5 100644
--- a/drivers/clk/meson/Kconfig
+++ b/drivers/clk/meson/Kconfig
@@ -5,6 +5,7 @@ menu "Clock support for Amlogic platforms"
config COMMON_CLK_MESON_REGMAP
tristate
select REGMAP
+ select MFD_SYSCON
config COMMON_CLK_MESON_DUALDIV
tristate
@@ -35,6 +36,8 @@ config COMMON_CLK_MESON_VCLK
select COMMON_CLK_MESON_REGMAP
config COMMON_CLK_MESON_CLKC_UTILS
+ select REGMAP
+ select MFD_SYSCON
tristate
config COMMON_CLK_MESON_AO_CLKC
@@ -43,11 +46,6 @@ config COMMON_CLK_MESON_AO_CLKC
select COMMON_CLK_MESON_CLKC_UTILS
select RESET_CONTROLLER
-config COMMON_CLK_MESON_EE_CLKC
- tristate
- select COMMON_CLK_MESON_REGMAP
- select COMMON_CLK_MESON_CLKC_UTILS
-
config COMMON_CLK_MESON_CPU_DYNDIV
tristate
select COMMON_CLK_MESON_REGMAP
@@ -55,7 +53,7 @@ config COMMON_CLK_MESON_CPU_DYNDIV
config COMMON_CLK_MESON8B
bool "Meson8 SoC Clock controller support"
depends on ARM
- default y
+ default ARCH_MESON
select COMMON_CLK_MESON_REGMAP
select COMMON_CLK_MESON_CLKC_UTILS
select COMMON_CLK_MESON_MPLL
@@ -70,14 +68,14 @@ config COMMON_CLK_MESON8B
config COMMON_CLK_GXBB
tristate "GXBB and GXL SoC clock controllers support"
depends on ARM64
- default y
+ default ARCH_MESON
select COMMON_CLK_MESON_REGMAP
+ select COMMON_CLK_MESON_CLKC_UTILS
select COMMON_CLK_MESON_DUALDIV
select COMMON_CLK_MESON_VID_PLL_DIV
select COMMON_CLK_MESON_MPLL
select COMMON_CLK_MESON_PLL
select COMMON_CLK_MESON_AO_CLKC
- select COMMON_CLK_MESON_EE_CLKC
select MFD_SYSCON
help
Support for the clock controller on AmLogic S905 devices, aka gxbb.
@@ -86,13 +84,13 @@ config COMMON_CLK_GXBB
config COMMON_CLK_AXG
tristate "AXG SoC clock controllers support"
depends on ARM64
- default y
+ default ARCH_MESON
select COMMON_CLK_MESON_REGMAP
+ select COMMON_CLK_MESON_CLKC_UTILS
select COMMON_CLK_MESON_DUALDIV
select COMMON_CLK_MESON_MPLL
select COMMON_CLK_MESON_PLL
select COMMON_CLK_MESON_AO_CLKC
- select COMMON_CLK_MESON_EE_CLKC
select MFD_SYSCON
help
Support for the clock controller on AmLogic A113D devices, aka axg.
@@ -106,6 +104,8 @@ config COMMON_CLK_AXG_AUDIO
select COMMON_CLK_MESON_SCLK_DIV
select COMMON_CLK_MESON_CLKC_UTILS
select REGMAP_MMIO
+ select AUXILIARY_BUS
+ imply RESET_MESON_AUX
help
Support for the audio clock controller on AmLogic A113D devices,
aka axg, Say Y if you want audio subsystem to work.
@@ -135,7 +135,7 @@ config COMMON_CLK_A1_PERIPHERALS
config COMMON_CLK_C3_PLL
tristate "Amlogic C3 PLL clock controller"
depends on ARM64
- default y
+ default ARCH_MESON
select COMMON_CLK_MESON_REGMAP
select COMMON_CLK_MESON_PLL
select COMMON_CLK_MESON_CLKC_UTILS
@@ -148,7 +148,7 @@ config COMMON_CLK_C3_PLL
config COMMON_CLK_C3_PERIPHERALS
tristate "Amlogic C3 peripherals clock controller"
depends on ARM64
- default y
+ default ARCH_MESON
select COMMON_CLK_MESON_REGMAP
select COMMON_CLK_MESON_DUALDIV
select COMMON_CLK_MESON_CLKC_UTILS
@@ -162,13 +162,13 @@ config COMMON_CLK_C3_PERIPHERALS
config COMMON_CLK_G12A
tristate "G12 and SM1 SoC clock controllers support"
depends on ARM64
- default y
+ default ARCH_MESON
select COMMON_CLK_MESON_REGMAP
+ select COMMON_CLK_MESON_CLKC_UTILS
select COMMON_CLK_MESON_DUALDIV
select COMMON_CLK_MESON_MPLL
select COMMON_CLK_MESON_PLL
select COMMON_CLK_MESON_AO_CLKC
- select COMMON_CLK_MESON_EE_CLKC
select COMMON_CLK_MESON_CPU_DYNDIV
select COMMON_CLK_MESON_VID_PLL_DIV
select COMMON_CLK_MESON_VCLK
@@ -180,7 +180,7 @@ config COMMON_CLK_G12A
config COMMON_CLK_S4_PLL
tristate "S4 SoC PLL clock controllers support"
depends on ARM64
- default y
+ default ARCH_MESON
select COMMON_CLK_MESON_CLKC_UTILS
select COMMON_CLK_MESON_MPLL
select COMMON_CLK_MESON_PLL
@@ -193,7 +193,7 @@ config COMMON_CLK_S4_PLL
config COMMON_CLK_S4_PERIPHERALS
tristate "S4 SoC peripherals clock controllers support"
depends on ARM64
- default y
+ default ARCH_MESON
select COMMON_CLK_MESON_CLKC_UTILS
select COMMON_CLK_MESON_REGMAP
select COMMON_CLK_MESON_DUALDIV
diff --git a/drivers/clk/meson/Makefile b/drivers/clk/meson/Makefile
index bc56a47931c1..c6998e752c68 100644
--- a/drivers/clk/meson/Makefile
+++ b/drivers/clk/meson/Makefile
@@ -5,7 +5,6 @@ obj-$(CONFIG_COMMON_CLK_MESON_CLKC_UTILS) += meson-clkc-utils.o
obj-$(CONFIG_COMMON_CLK_MESON_AO_CLKC) += meson-aoclk.o
obj-$(CONFIG_COMMON_CLK_MESON_CPU_DYNDIV) += clk-cpu-dyndiv.o
obj-$(CONFIG_COMMON_CLK_MESON_DUALDIV) += clk-dualdiv.o
-obj-$(CONFIG_COMMON_CLK_MESON_EE_CLKC) += meson-eeclk.o
obj-$(CONFIG_COMMON_CLK_MESON_MPLL) += clk-mpll.o
obj-$(CONFIG_COMMON_CLK_MESON_PHASE) += clk-phase.o
obj-$(CONFIG_COMMON_CLK_MESON_PLL) += clk-pll.o
diff --git a/drivers/clk/meson/a1-peripherals.c b/drivers/clk/meson/a1-peripherals.c
index 7aa6abb2eb1f..5e0d58c01405 100644
--- a/drivers/clk/meson/a1-peripherals.c
+++ b/drivers/clk/meson/a1-peripherals.c
@@ -10,14 +10,43 @@
#include <linux/clk-provider.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
-#include "a1-peripherals.h"
#include "clk-dualdiv.h"
#include "clk-regmap.h"
#include "meson-clkc-utils.h"
#include <dt-bindings/clock/amlogic,a1-peripherals-clkc.h>
-static struct clk_regmap xtal_in = {
+#define SYS_OSCIN_CTRL 0x0
+#define RTC_BY_OSCIN_CTRL0 0x4
+#define RTC_BY_OSCIN_CTRL1 0x8
+#define RTC_CTRL 0xc
+#define SYS_CLK_CTRL0 0x10
+#define SYS_CLK_EN0 0x1c
+#define SYS_CLK_EN1 0x20
+#define AXI_CLK_EN 0x24
+#define DSPA_CLK_EN 0x28
+#define DSPB_CLK_EN 0x2c
+#define DSPA_CLK_CTRL0 0x30
+#define DSPB_CLK_CTRL0 0x34
+#define CLK12_24_CTRL 0x38
+#define GEN_CLK_CTRL 0x3c
+#define SAR_ADC_CLK_CTRL 0xc0
+#define PWM_CLK_AB_CTRL 0xc4
+#define PWM_CLK_CD_CTRL 0xc8
+#define PWM_CLK_EF_CTRL 0xcc
+#define SPICC_CLK_CTRL 0xd0
+#define TS_CLK_CTRL 0xd4
+#define SPIFC_CLK_CTRL 0xd8
+#define USB_BUSCLK_CTRL 0xdc
+#define SD_EMMC_CLK_CTRL 0xe0
+#define CECA_CLK_CTRL0 0xe4
+#define CECA_CLK_CTRL1 0xe8
+#define CECB_CLK_CTRL0 0xec
+#define CECB_CLK_CTRL1 0xf0
+#define PSRAM_CLK_CTRL 0xf4
+#define DMC_CLK_CTRL 0xf8
+
+static struct clk_regmap a1_xtal_in = {
.data = &(struct clk_regmap_gate_data){
.offset = SYS_OSCIN_CTRL,
.bit_idx = 0,
@@ -32,7 +61,7 @@ static struct clk_regmap xtal_in = {
},
};
-static struct clk_regmap fixpll_in = {
+static struct clk_regmap a1_fixpll_in = {
.data = &(struct clk_regmap_gate_data){
.offset = SYS_OSCIN_CTRL,
.bit_idx = 1,
@@ -47,7 +76,7 @@ static struct clk_regmap fixpll_in = {
},
};
-static struct clk_regmap usb_phy_in = {
+static struct clk_regmap a1_usb_phy_in = {
.data = &(struct clk_regmap_gate_data){
.offset = SYS_OSCIN_CTRL,
.bit_idx = 2,
@@ -62,7 +91,7 @@ static struct clk_regmap usb_phy_in = {
},
};
-static struct clk_regmap usb_ctrl_in = {
+static struct clk_regmap a1_usb_ctrl_in = {
.data = &(struct clk_regmap_gate_data){
.offset = SYS_OSCIN_CTRL,
.bit_idx = 3,
@@ -77,7 +106,7 @@ static struct clk_regmap usb_ctrl_in = {
},
};
-static struct clk_regmap hifipll_in = {
+static struct clk_regmap a1_hifipll_in = {
.data = &(struct clk_regmap_gate_data){
.offset = SYS_OSCIN_CTRL,
.bit_idx = 4,
@@ -92,7 +121,7 @@ static struct clk_regmap hifipll_in = {
},
};
-static struct clk_regmap syspll_in = {
+static struct clk_regmap a1_syspll_in = {
.data = &(struct clk_regmap_gate_data){
.offset = SYS_OSCIN_CTRL,
.bit_idx = 5,
@@ -107,7 +136,7 @@ static struct clk_regmap syspll_in = {
},
};
-static struct clk_regmap dds_in = {
+static struct clk_regmap a1_dds_in = {
.data = &(struct clk_regmap_gate_data){
.offset = SYS_OSCIN_CTRL,
.bit_idx = 6,
@@ -122,7 +151,7 @@ static struct clk_regmap dds_in = {
},
};
-static struct clk_regmap rtc_32k_in = {
+static struct clk_regmap a1_rtc_32k_in = {
.data = &(struct clk_regmap_gate_data){
.offset = RTC_BY_OSCIN_CTRL0,
.bit_idx = 31,
@@ -137,7 +166,7 @@ static struct clk_regmap rtc_32k_in = {
},
};
-static const struct meson_clk_dualdiv_param clk_32k_div_table[] = {
+static const struct meson_clk_dualdiv_param a1_32k_div_table[] = {
{
.dual = 1,
.n1 = 733,
@@ -148,7 +177,7 @@ static const struct meson_clk_dualdiv_param clk_32k_div_table[] = {
{}
};
-static struct clk_regmap rtc_32k_div = {
+static struct clk_regmap a1_rtc_32k_div = {
.data = &(struct meson_clk_dualdiv_data){
.n1 = {
.reg_off = RTC_BY_OSCIN_CTRL0,
@@ -175,19 +204,19 @@ static struct clk_regmap rtc_32k_div = {
.shift = 28,
.width = 1,
},
- .table = clk_32k_div_table,
+ .table = a1_32k_div_table,
},
.hw.init = &(struct clk_init_data){
.name = "rtc_32k_div",
.ops = &meson_clk_dualdiv_ops,
.parent_hws = (const struct clk_hw *[]) {
- &rtc_32k_in.hw
+ &a1_rtc_32k_in.hw
},
.num_parents = 1,
},
};
-static struct clk_regmap rtc_32k_xtal = {
+static struct clk_regmap a1_rtc_32k_xtal = {
.data = &(struct clk_regmap_gate_data){
.offset = RTC_BY_OSCIN_CTRL1,
.bit_idx = 24,
@@ -196,13 +225,13 @@ static struct clk_regmap rtc_32k_xtal = {
.name = "rtc_32k_xtal",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &rtc_32k_in.hw
+ &a1_rtc_32k_in.hw
},
.num_parents = 1,
},
};
-static struct clk_regmap rtc_32k_sel = {
+static struct clk_regmap a1_rtc_32k_sel = {
.data = &(struct clk_regmap_mux_data) {
.offset = RTC_CTRL,
.mask = 0x3,
@@ -213,15 +242,15 @@ static struct clk_regmap rtc_32k_sel = {
.name = "rtc_32k_sel",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &rtc_32k_xtal.hw,
- &rtc_32k_div.hw,
+ &a1_rtc_32k_xtal.hw,
+ &a1_rtc_32k_div.hw,
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap rtc = {
+static struct clk_regmap a1_rtc = {
.data = &(struct clk_regmap_gate_data){
.offset = RTC_BY_OSCIN_CTRL0,
.bit_idx = 30,
@@ -230,38 +259,38 @@ static struct clk_regmap rtc = {
.name = "rtc",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &rtc_32k_sel.hw
+ &a1_rtc_32k_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static u32 mux_table_sys[] = { 0, 1, 2, 3, 7 };
-static const struct clk_parent_data sys_parents[] = {
+static u32 a1_sys_parents_val_table[] = { 0, 1, 2, 3, 7 };
+static const struct clk_parent_data a1_sys_parents[] = {
{ .fw_name = "xtal" },
{ .fw_name = "fclk_div2" },
{ .fw_name = "fclk_div3" },
{ .fw_name = "fclk_div5" },
- { .hw = &rtc.hw },
+ { .hw = &a1_rtc.hw },
};
-static struct clk_regmap sys_b_sel = {
+static struct clk_regmap a1_sys_b_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = SYS_CLK_CTRL0,
.mask = 0x7,
.shift = 26,
- .table = mux_table_sys,
+ .table = a1_sys_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "sys_b_sel",
.ops = &clk_regmap_mux_ro_ops,
- .parent_data = sys_parents,
- .num_parents = ARRAY_SIZE(sys_parents),
+ .parent_data = a1_sys_parents,
+ .num_parents = ARRAY_SIZE(a1_sys_parents),
},
};
-static struct clk_regmap sys_b_div = {
+static struct clk_regmap a1_sys_b_div = {
.data = &(struct clk_regmap_div_data){
.offset = SYS_CLK_CTRL0,
.shift = 16,
@@ -271,14 +300,14 @@ static struct clk_regmap sys_b_div = {
.name = "sys_b_div",
.ops = &clk_regmap_divider_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &sys_b_sel.hw
+ &a1_sys_b_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap sys_b = {
+static struct clk_regmap a1_sys_b = {
.data = &(struct clk_regmap_gate_data){
.offset = SYS_CLK_CTRL0,
.bit_idx = 29,
@@ -287,29 +316,29 @@ static struct clk_regmap sys_b = {
.name = "sys_b",
.ops = &clk_regmap_gate_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &sys_b_div.hw
+ &a1_sys_b_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap sys_a_sel = {
+static struct clk_regmap a1_sys_a_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = SYS_CLK_CTRL0,
.mask = 0x7,
.shift = 10,
- .table = mux_table_sys,
+ .table = a1_sys_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "sys_a_sel",
.ops = &clk_regmap_mux_ro_ops,
- .parent_data = sys_parents,
- .num_parents = ARRAY_SIZE(sys_parents),
+ .parent_data = a1_sys_parents,
+ .num_parents = ARRAY_SIZE(a1_sys_parents),
},
};
-static struct clk_regmap sys_a_div = {
+static struct clk_regmap a1_sys_a_div = {
.data = &(struct clk_regmap_div_data){
.offset = SYS_CLK_CTRL0,
.shift = 0,
@@ -319,14 +348,14 @@ static struct clk_regmap sys_a_div = {
.name = "sys_a_div",
.ops = &clk_regmap_divider_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &sys_a_sel.hw
+ &a1_sys_a_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap sys_a = {
+static struct clk_regmap a1_sys_a = {
.data = &(struct clk_regmap_gate_data){
.offset = SYS_CLK_CTRL0,
.bit_idx = 13,
@@ -335,14 +364,14 @@ static struct clk_regmap sys_a = {
.name = "sys_a",
.ops = &clk_regmap_gate_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &sys_a_div.hw
+ &a1_sys_a_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap sys = {
+static struct clk_regmap a1_sys = {
.data = &(struct clk_regmap_mux_data){
.offset = SYS_CLK_CTRL0,
.mask = 0x1,
@@ -352,8 +381,8 @@ static struct clk_regmap sys = {
.name = "sys",
.ops = &clk_regmap_mux_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &sys_a.hw,
- &sys_b.hw,
+ &a1_sys_a.hw,
+ &a1_sys_b.hw,
},
.num_parents = 2,
/*
@@ -369,32 +398,32 @@ static struct clk_regmap sys = {
},
};
-static u32 mux_table_dsp_ab[] = { 0, 1, 2, 3, 4, 7 };
-static const struct clk_parent_data dsp_ab_parent_data[] = {
+static u32 a1_dsp_parents_val_table[] = { 0, 1, 2, 3, 4, 7 };
+static const struct clk_parent_data a1_dsp_parents[] = {
{ .fw_name = "xtal", },
{ .fw_name = "fclk_div2", },
{ .fw_name = "fclk_div3", },
{ .fw_name = "fclk_div5", },
{ .fw_name = "hifi_pll", },
- { .hw = &rtc.hw },
+ { .hw = &a1_rtc.hw },
};
-static struct clk_regmap dspa_a_sel = {
+static struct clk_regmap a1_dspa_a_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = DSPA_CLK_CTRL0,
.mask = 0x7,
.shift = 10,
- .table = mux_table_dsp_ab,
+ .table = a1_dsp_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "dspa_a_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = dsp_ab_parent_data,
- .num_parents = ARRAY_SIZE(dsp_ab_parent_data),
+ .parent_data = a1_dsp_parents,
+ .num_parents = ARRAY_SIZE(a1_dsp_parents),
},
};
-static struct clk_regmap dspa_a_div = {
+static struct clk_regmap a1_dspa_a_div = {
.data = &(struct clk_regmap_div_data){
.offset = DSPA_CLK_CTRL0,
.shift = 0,
@@ -404,14 +433,14 @@ static struct clk_regmap dspa_a_div = {
.name = "dspa_a_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &dspa_a_sel.hw
+ &a1_dspa_a_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap dspa_a = {
+static struct clk_regmap a1_dspa_a = {
.data = &(struct clk_regmap_gate_data){
.offset = DSPA_CLK_CTRL0,
.bit_idx = 13,
@@ -420,29 +449,29 @@ static struct clk_regmap dspa_a = {
.name = "dspa_a",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &dspa_a_div.hw
+ &a1_dspa_a_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap dspa_b_sel = {
+static struct clk_regmap a1_dspa_b_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = DSPA_CLK_CTRL0,
.mask = 0x7,
.shift = 26,
- .table = mux_table_dsp_ab,
+ .table = a1_dsp_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "dspa_b_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = dsp_ab_parent_data,
- .num_parents = ARRAY_SIZE(dsp_ab_parent_data),
+ .parent_data = a1_dsp_parents,
+ .num_parents = ARRAY_SIZE(a1_dsp_parents),
},
};
-static struct clk_regmap dspa_b_div = {
+static struct clk_regmap a1_dspa_b_div = {
.data = &(struct clk_regmap_div_data){
.offset = DSPA_CLK_CTRL0,
.shift = 16,
@@ -452,14 +481,14 @@ static struct clk_regmap dspa_b_div = {
.name = "dspa_b_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &dspa_b_sel.hw
+ &a1_dspa_b_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap dspa_b = {
+static struct clk_regmap a1_dspa_b = {
.data = &(struct clk_regmap_gate_data){
.offset = DSPA_CLK_CTRL0,
.bit_idx = 29,
@@ -468,14 +497,14 @@ static struct clk_regmap dspa_b = {
.name = "dspa_b",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &dspa_b_div.hw
+ &a1_dspa_b_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap dspa_sel = {
+static struct clk_regmap a1_dspa_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = DSPA_CLK_CTRL0,
.mask = 0x1,
@@ -485,15 +514,15 @@ static struct clk_regmap dspa_sel = {
.name = "dspa_sel",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &dspa_a.hw,
- &dspa_b.hw,
+ &a1_dspa_a.hw,
+ &a1_dspa_b.hw,
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap dspa_en = {
+static struct clk_regmap a1_dspa_en = {
.data = &(struct clk_regmap_gate_data){
.offset = DSPA_CLK_EN,
.bit_idx = 1,
@@ -502,14 +531,14 @@ static struct clk_regmap dspa_en = {
.name = "dspa_en",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &dspa_sel.hw
+ &a1_dspa_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap dspa_en_nic = {
+static struct clk_regmap a1_dspa_en_nic = {
.data = &(struct clk_regmap_gate_data){
.offset = DSPA_CLK_EN,
.bit_idx = 0,
@@ -518,29 +547,29 @@ static struct clk_regmap dspa_en_nic = {
.name = "dspa_en_nic",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &dspa_sel.hw
+ &a1_dspa_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap dspb_a_sel = {
+static struct clk_regmap a1_dspb_a_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = DSPB_CLK_CTRL0,
.mask = 0x7,
.shift = 10,
- .table = mux_table_dsp_ab,
+ .table = a1_dsp_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "dspb_a_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = dsp_ab_parent_data,
- .num_parents = ARRAY_SIZE(dsp_ab_parent_data),
+ .parent_data = a1_dsp_parents,
+ .num_parents = ARRAY_SIZE(a1_dsp_parents),
},
};
-static struct clk_regmap dspb_a_div = {
+static struct clk_regmap a1_dspb_a_div = {
.data = &(struct clk_regmap_div_data){
.offset = DSPB_CLK_CTRL0,
.shift = 0,
@@ -550,14 +579,14 @@ static struct clk_regmap dspb_a_div = {
.name = "dspb_a_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &dspb_a_sel.hw
+ &a1_dspb_a_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap dspb_a = {
+static struct clk_regmap a1_dspb_a = {
.data = &(struct clk_regmap_gate_data){
.offset = DSPB_CLK_CTRL0,
.bit_idx = 13,
@@ -566,29 +595,29 @@ static struct clk_regmap dspb_a = {
.name = "dspb_a",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &dspb_a_div.hw
+ &a1_dspb_a_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap dspb_b_sel = {
+static struct clk_regmap a1_dspb_b_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = DSPB_CLK_CTRL0,
.mask = 0x7,
.shift = 26,
- .table = mux_table_dsp_ab,
+ .table = a1_dsp_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "dspb_b_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = dsp_ab_parent_data,
- .num_parents = ARRAY_SIZE(dsp_ab_parent_data),
+ .parent_data = a1_dsp_parents,
+ .num_parents = ARRAY_SIZE(a1_dsp_parents),
},
};
-static struct clk_regmap dspb_b_div = {
+static struct clk_regmap a1_dspb_b_div = {
.data = &(struct clk_regmap_div_data){
.offset = DSPB_CLK_CTRL0,
.shift = 16,
@@ -598,14 +627,14 @@ static struct clk_regmap dspb_b_div = {
.name = "dspb_b_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &dspb_b_sel.hw
+ &a1_dspb_b_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap dspb_b = {
+static struct clk_regmap a1_dspb_b = {
.data = &(struct clk_regmap_gate_data){
.offset = DSPB_CLK_CTRL0,
.bit_idx = 29,
@@ -614,14 +643,14 @@ static struct clk_regmap dspb_b = {
.name = "dspb_b",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &dspb_b_div.hw
+ &a1_dspb_b_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap dspb_sel = {
+static struct clk_regmap a1_dspb_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = DSPB_CLK_CTRL0,
.mask = 0x1,
@@ -631,15 +660,15 @@ static struct clk_regmap dspb_sel = {
.name = "dspb_sel",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &dspb_a.hw,
- &dspb_b.hw,
+ &a1_dspb_a.hw,
+ &a1_dspb_b.hw,
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap dspb_en = {
+static struct clk_regmap a1_dspb_en = {
.data = &(struct clk_regmap_gate_data){
.offset = DSPB_CLK_EN,
.bit_idx = 1,
@@ -648,14 +677,14 @@ static struct clk_regmap dspb_en = {
.name = "dspb_en",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &dspb_sel.hw
+ &a1_dspb_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap dspb_en_nic = {
+static struct clk_regmap a1_dspb_en_nic = {
.data = &(struct clk_regmap_gate_data){
.offset = DSPB_CLK_EN,
.bit_idx = 0,
@@ -664,14 +693,14 @@ static struct clk_regmap dspb_en_nic = {
.name = "dspb_en_nic",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &dspb_sel.hw
+ &a1_dspb_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap clk_24m = {
+static struct clk_regmap a1_24m = {
.data = &(struct clk_regmap_gate_data){
.offset = CLK12_24_CTRL,
.bit_idx = 11,
@@ -686,20 +715,20 @@ static struct clk_regmap clk_24m = {
},
};
-static struct clk_fixed_factor clk_24m_div2 = {
+static struct clk_fixed_factor a1_24m_div2 = {
.mult = 1,
.div = 2,
.hw.init = &(struct clk_init_data){
.name = "24m_div2",
.ops = &clk_fixed_factor_ops,
.parent_hws = (const struct clk_hw *[]) {
- &clk_24m.hw
+ &a1_24m.hw
},
.num_parents = 1,
},
};
-static struct clk_regmap clk_12m = {
+static struct clk_regmap a1_12m = {
.data = &(struct clk_regmap_gate_data){
.offset = CLK12_24_CTRL,
.bit_idx = 10,
@@ -708,13 +737,13 @@ static struct clk_regmap clk_12m = {
.name = "12m",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &clk_24m_div2.hw
+ &a1_24m_div2.hw
},
.num_parents = 1,
},
};
-static struct clk_regmap fclk_div2_divn_pre = {
+static struct clk_regmap a1_fclk_div2_divn_pre = {
.data = &(struct clk_regmap_div_data){
.offset = CLK12_24_CTRL,
.shift = 0,
@@ -730,7 +759,7 @@ static struct clk_regmap fclk_div2_divn_pre = {
},
};
-static struct clk_regmap fclk_div2_divn = {
+static struct clk_regmap a1_fclk_div2_divn = {
.data = &(struct clk_regmap_gate_data){
.offset = CLK12_24_CTRL,
.bit_idx = 12,
@@ -739,7 +768,7 @@ static struct clk_regmap fclk_div2_divn = {
.name = "fclk_div2_divn",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fclk_div2_divn_pre.hw
+ &a1_fclk_div2_divn_pre.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -750,10 +779,10 @@ static struct clk_regmap fclk_div2_divn = {
* the index 2 is sys_pll_div16, it will be implemented in the CPU clock driver,
* the index 4 is the clock measurement source, it's not supported yet
*/
-static u32 gen_table[] = { 0, 1, 3, 5, 6, 7, 8 };
-static const struct clk_parent_data gen_parent_data[] = {
+static u32 a1_gen_parents_val_table[] = { 0, 1, 3, 5, 6, 7, 8 };
+static const struct clk_parent_data a1_gen_parents[] = {
{ .fw_name = "xtal", },
- { .hw = &rtc.hw },
+ { .hw = &a1_rtc.hw },
{ .fw_name = "hifi_pll", },
{ .fw_name = "fclk_div2", },
{ .fw_name = "fclk_div3", },
@@ -761,18 +790,18 @@ static const struct clk_parent_data gen_parent_data[] = {
{ .fw_name = "fclk_div7", },
};
-static struct clk_regmap gen_sel = {
+static struct clk_regmap a1_gen_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = GEN_CLK_CTRL,
.mask = 0xf,
.shift = 12,
- .table = gen_table,
+ .table = a1_gen_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "gen_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = gen_parent_data,
- .num_parents = ARRAY_SIZE(gen_parent_data),
+ .parent_data = a1_gen_parents,
+ .num_parents = ARRAY_SIZE(a1_gen_parents),
/*
* The GEN clock can be connected to an external pad, so it
* may be set up directly from the device tree. Additionally,
@@ -784,7 +813,7 @@ static struct clk_regmap gen_sel = {
},
};
-static struct clk_regmap gen_div = {
+static struct clk_regmap a1_gen_div = {
.data = &(struct clk_regmap_div_data){
.offset = GEN_CLK_CTRL,
.shift = 0,
@@ -794,14 +823,14 @@ static struct clk_regmap gen_div = {
.name = "gen_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &gen_sel.hw
+ &a1_gen_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap gen = {
+static struct clk_regmap a1_gen = {
.data = &(struct clk_regmap_gate_data){
.offset = GEN_CLK_CTRL,
.bit_idx = 11,
@@ -810,14 +839,14 @@ static struct clk_regmap gen = {
.name = "gen",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &gen_div.hw
+ &a1_gen_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap saradc_sel = {
+static struct clk_regmap a1_saradc_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = SAR_ADC_CLK_CTRL,
.mask = 0x1,
@@ -828,13 +857,13 @@ static struct clk_regmap saradc_sel = {
.ops = &clk_regmap_mux_ops,
.parent_data = (const struct clk_parent_data []) {
{ .fw_name = "xtal", },
- { .hw = &sys.hw, },
+ { .hw = &a1_sys.hw, },
},
.num_parents = 2,
},
};
-static struct clk_regmap saradc_div = {
+static struct clk_regmap a1_saradc_div = {
.data = &(struct clk_regmap_div_data){
.offset = SAR_ADC_CLK_CTRL,
.shift = 0,
@@ -844,14 +873,14 @@ static struct clk_regmap saradc_div = {
.name = "saradc_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &saradc_sel.hw
+ &a1_saradc_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap saradc = {
+static struct clk_regmap a1_saradc = {
.data = &(struct clk_regmap_gate_data){
.offset = SAR_ADC_CLK_CTRL,
.bit_idx = 8,
@@ -860,20 +889,20 @@ static struct clk_regmap saradc = {
.name = "saradc",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &saradc_div.hw
+ &a1_saradc_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static const struct clk_parent_data pwm_abcd_parents[] = {
+static const struct clk_parent_data a1_pwm_abcd_parents[] = {
{ .fw_name = "xtal", },
- { .hw = &sys.hw },
- { .hw = &rtc.hw },
+ { .hw = &a1_sys.hw },
+ { .hw = &a1_rtc.hw },
};
-static struct clk_regmap pwm_a_sel = {
+static struct clk_regmap a1_pwm_a_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = PWM_CLK_AB_CTRL,
.mask = 0x1,
@@ -882,12 +911,12 @@ static struct clk_regmap pwm_a_sel = {
.hw.init = &(struct clk_init_data){
.name = "pwm_a_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = pwm_abcd_parents,
- .num_parents = ARRAY_SIZE(pwm_abcd_parents),
+ .parent_data = a1_pwm_abcd_parents,
+ .num_parents = ARRAY_SIZE(a1_pwm_abcd_parents),
},
};
-static struct clk_regmap pwm_a_div = {
+static struct clk_regmap a1_pwm_a_div = {
.data = &(struct clk_regmap_div_data){
.offset = PWM_CLK_AB_CTRL,
.shift = 0,
@@ -897,14 +926,14 @@ static struct clk_regmap pwm_a_div = {
.name = "pwm_a_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &pwm_a_sel.hw
+ &a1_pwm_a_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap pwm_a = {
+static struct clk_regmap a1_pwm_a = {
.data = &(struct clk_regmap_gate_data){
.offset = PWM_CLK_AB_CTRL,
.bit_idx = 8,
@@ -913,14 +942,14 @@ static struct clk_regmap pwm_a = {
.name = "pwm_a",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &pwm_a_div.hw
+ &a1_pwm_a_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap pwm_b_sel = {
+static struct clk_regmap a1_pwm_b_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = PWM_CLK_AB_CTRL,
.mask = 0x1,
@@ -929,12 +958,12 @@ static struct clk_regmap pwm_b_sel = {
.hw.init = &(struct clk_init_data){
.name = "pwm_b_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = pwm_abcd_parents,
- .num_parents = ARRAY_SIZE(pwm_abcd_parents),
+ .parent_data = a1_pwm_abcd_parents,
+ .num_parents = ARRAY_SIZE(a1_pwm_abcd_parents),
},
};
-static struct clk_regmap pwm_b_div = {
+static struct clk_regmap a1_pwm_b_div = {
.data = &(struct clk_regmap_div_data){
.offset = PWM_CLK_AB_CTRL,
.shift = 16,
@@ -944,14 +973,14 @@ static struct clk_regmap pwm_b_div = {
.name = "pwm_b_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &pwm_b_sel.hw
+ &a1_pwm_b_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap pwm_b = {
+static struct clk_regmap a1_pwm_b = {
.data = &(struct clk_regmap_gate_data){
.offset = PWM_CLK_AB_CTRL,
.bit_idx = 24,
@@ -960,14 +989,14 @@ static struct clk_regmap pwm_b = {
.name = "pwm_b",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &pwm_b_div.hw
+ &a1_pwm_b_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap pwm_c_sel = {
+static struct clk_regmap a1_pwm_c_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = PWM_CLK_CD_CTRL,
.mask = 0x1,
@@ -976,12 +1005,12 @@ static struct clk_regmap pwm_c_sel = {
.hw.init = &(struct clk_init_data){
.name = "pwm_c_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = pwm_abcd_parents,
- .num_parents = ARRAY_SIZE(pwm_abcd_parents),
+ .parent_data = a1_pwm_abcd_parents,
+ .num_parents = ARRAY_SIZE(a1_pwm_abcd_parents),
},
};
-static struct clk_regmap pwm_c_div = {
+static struct clk_regmap a1_pwm_c_div = {
.data = &(struct clk_regmap_div_data){
.offset = PWM_CLK_CD_CTRL,
.shift = 0,
@@ -991,14 +1020,14 @@ static struct clk_regmap pwm_c_div = {
.name = "pwm_c_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &pwm_c_sel.hw
+ &a1_pwm_c_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap pwm_c = {
+static struct clk_regmap a1_pwm_c = {
.data = &(struct clk_regmap_gate_data){
.offset = PWM_CLK_CD_CTRL,
.bit_idx = 8,
@@ -1007,14 +1036,14 @@ static struct clk_regmap pwm_c = {
.name = "pwm_c",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &pwm_c_div.hw
+ &a1_pwm_c_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap pwm_d_sel = {
+static struct clk_regmap a1_pwm_d_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = PWM_CLK_CD_CTRL,
.mask = 0x1,
@@ -1023,12 +1052,12 @@ static struct clk_regmap pwm_d_sel = {
.hw.init = &(struct clk_init_data){
.name = "pwm_d_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = pwm_abcd_parents,
- .num_parents = ARRAY_SIZE(pwm_abcd_parents),
+ .parent_data = a1_pwm_abcd_parents,
+ .num_parents = ARRAY_SIZE(a1_pwm_abcd_parents),
},
};
-static struct clk_regmap pwm_d_div = {
+static struct clk_regmap a1_pwm_d_div = {
.data = &(struct clk_regmap_div_data){
.offset = PWM_CLK_CD_CTRL,
.shift = 16,
@@ -1038,14 +1067,14 @@ static struct clk_regmap pwm_d_div = {
.name = "pwm_d_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &pwm_d_sel.hw
+ &a1_pwm_d_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap pwm_d = {
+static struct clk_regmap a1_pwm_d = {
.data = &(struct clk_regmap_gate_data){
.offset = PWM_CLK_CD_CTRL,
.bit_idx = 24,
@@ -1054,21 +1083,21 @@ static struct clk_regmap pwm_d = {
.name = "pwm_d",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &pwm_d_div.hw
+ &a1_pwm_d_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static const struct clk_parent_data pwm_ef_parents[] = {
+static const struct clk_parent_data a1_pwm_ef_parents[] = {
{ .fw_name = "xtal", },
- { .hw = &sys.hw },
+ { .hw = &a1_sys.hw },
{ .fw_name = "fclk_div5", },
- { .hw = &rtc.hw },
+ { .hw = &a1_rtc.hw },
};
-static struct clk_regmap pwm_e_sel = {
+static struct clk_regmap a1_pwm_e_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = PWM_CLK_EF_CTRL,
.mask = 0x3,
@@ -1077,12 +1106,12 @@ static struct clk_regmap pwm_e_sel = {
.hw.init = &(struct clk_init_data){
.name = "pwm_e_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = pwm_ef_parents,
- .num_parents = ARRAY_SIZE(pwm_ef_parents),
+ .parent_data = a1_pwm_ef_parents,
+ .num_parents = ARRAY_SIZE(a1_pwm_ef_parents),
},
};
-static struct clk_regmap pwm_e_div = {
+static struct clk_regmap a1_pwm_e_div = {
.data = &(struct clk_regmap_div_data){
.offset = PWM_CLK_EF_CTRL,
.shift = 0,
@@ -1092,14 +1121,14 @@ static struct clk_regmap pwm_e_div = {
.name = "pwm_e_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &pwm_e_sel.hw
+ &a1_pwm_e_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap pwm_e = {
+static struct clk_regmap a1_pwm_e = {
.data = &(struct clk_regmap_gate_data){
.offset = PWM_CLK_EF_CTRL,
.bit_idx = 8,
@@ -1108,14 +1137,14 @@ static struct clk_regmap pwm_e = {
.name = "pwm_e",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &pwm_e_div.hw
+ &a1_pwm_e_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap pwm_f_sel = {
+static struct clk_regmap a1_pwm_f_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = PWM_CLK_EF_CTRL,
.mask = 0x3,
@@ -1124,12 +1153,12 @@ static struct clk_regmap pwm_f_sel = {
.hw.init = &(struct clk_init_data){
.name = "pwm_f_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = pwm_ef_parents,
- .num_parents = ARRAY_SIZE(pwm_ef_parents),
+ .parent_data = a1_pwm_ef_parents,
+ .num_parents = ARRAY_SIZE(a1_pwm_ef_parents),
},
};
-static struct clk_regmap pwm_f_div = {
+static struct clk_regmap a1_pwm_f_div = {
.data = &(struct clk_regmap_div_data){
.offset = PWM_CLK_EF_CTRL,
.shift = 16,
@@ -1139,14 +1168,14 @@ static struct clk_regmap pwm_f_div = {
.name = "pwm_f_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &pwm_f_sel.hw
+ &a1_pwm_f_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap pwm_f = {
+static struct clk_regmap a1_pwm_f = {
.data = &(struct clk_regmap_gate_data){
.offset = PWM_CLK_EF_CTRL,
.bit_idx = 24,
@@ -1155,7 +1184,7 @@ static struct clk_regmap pwm_f = {
.name = "pwm_f",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &pwm_f_div.hw
+ &a1_pwm_f_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1171,14 +1200,14 @@ static struct clk_regmap pwm_f = {
* --------------------|/
* 24M
*/
-static const struct clk_parent_data spicc_spifc_parents[] = {
+static const struct clk_parent_data a1_spi_parents[] = {
{ .fw_name = "fclk_div2"},
{ .fw_name = "fclk_div3"},
{ .fw_name = "fclk_div5"},
{ .fw_name = "hifi_pll" },
};
-static struct clk_regmap spicc_sel = {
+static struct clk_regmap a1_spicc_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = SPICC_CLK_CTRL,
.mask = 0x3,
@@ -1187,12 +1216,12 @@ static struct clk_regmap spicc_sel = {
.hw.init = &(struct clk_init_data){
.name = "spicc_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = spicc_spifc_parents,
- .num_parents = ARRAY_SIZE(spicc_spifc_parents),
+ .parent_data = a1_spi_parents,
+ .num_parents = ARRAY_SIZE(a1_spi_parents),
},
};
-static struct clk_regmap spicc_div = {
+static struct clk_regmap a1_spicc_div = {
.data = &(struct clk_regmap_div_data){
.offset = SPICC_CLK_CTRL,
.shift = 0,
@@ -1202,14 +1231,14 @@ static struct clk_regmap spicc_div = {
.name = "spicc_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &spicc_sel.hw
+ &a1_spicc_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap spicc_sel2 = {
+static struct clk_regmap a1_spicc_sel2 = {
.data = &(struct clk_regmap_mux_data){
.offset = SPICC_CLK_CTRL,
.mask = 0x1,
@@ -1219,7 +1248,7 @@ static struct clk_regmap spicc_sel2 = {
.name = "spicc_sel2",
.ops = &clk_regmap_mux_ops,
.parent_data = (const struct clk_parent_data []) {
- { .hw = &spicc_div.hw },
+ { .hw = &a1_spicc_div.hw },
{ .fw_name = "xtal", },
},
.num_parents = 2,
@@ -1227,7 +1256,7 @@ static struct clk_regmap spicc_sel2 = {
},
};
-static struct clk_regmap spicc = {
+static struct clk_regmap a1_spicc = {
.data = &(struct clk_regmap_gate_data){
.offset = SPICC_CLK_CTRL,
.bit_idx = 8,
@@ -1236,14 +1265,14 @@ static struct clk_regmap spicc = {
.name = "spicc",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &spicc_sel2.hw
+ &a1_spicc_sel2.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap ts_div = {
+static struct clk_regmap a1_ts_div = {
.data = &(struct clk_regmap_div_data){
.offset = TS_CLK_CTRL,
.shift = 0,
@@ -1259,7 +1288,7 @@ static struct clk_regmap ts_div = {
},
};
-static struct clk_regmap ts = {
+static struct clk_regmap a1_ts = {
.data = &(struct clk_regmap_gate_data){
.offset = TS_CLK_CTRL,
.bit_idx = 8,
@@ -1268,14 +1297,14 @@ static struct clk_regmap ts = {
.name = "ts",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &ts_div.hw
+ &a1_ts_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap spifc_sel = {
+static struct clk_regmap a1_spifc_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = SPIFC_CLK_CTRL,
.mask = 0x3,
@@ -1284,12 +1313,12 @@ static struct clk_regmap spifc_sel = {
.hw.init = &(struct clk_init_data){
.name = "spifc_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = spicc_spifc_parents,
- .num_parents = ARRAY_SIZE(spicc_spifc_parents),
+ .parent_data = a1_spi_parents,
+ .num_parents = ARRAY_SIZE(a1_spi_parents),
},
};
-static struct clk_regmap spifc_div = {
+static struct clk_regmap a1_spifc_div = {
.data = &(struct clk_regmap_div_data){
.offset = SPIFC_CLK_CTRL,
.shift = 0,
@@ -1299,14 +1328,14 @@ static struct clk_regmap spifc_div = {
.name = "spifc_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &spifc_sel.hw
+ &a1_spifc_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap spifc_sel2 = {
+static struct clk_regmap a1_spifc_sel2 = {
.data = &(struct clk_regmap_mux_data){
.offset = SPIFC_CLK_CTRL,
.mask = 0x1,
@@ -1316,7 +1345,7 @@ static struct clk_regmap spifc_sel2 = {
.name = "spifc_sel2",
.ops = &clk_regmap_mux_ops,
.parent_data = (const struct clk_parent_data []) {
- { .hw = &spifc_div.hw },
+ { .hw = &a1_spifc_div.hw },
{ .fw_name = "xtal", },
},
.num_parents = 2,
@@ -1324,7 +1353,7 @@ static struct clk_regmap spifc_sel2 = {
},
};
-static struct clk_regmap spifc = {
+static struct clk_regmap a1_spifc = {
.data = &(struct clk_regmap_gate_data){
.offset = SPIFC_CLK_CTRL,
.bit_idx = 8,
@@ -1333,21 +1362,21 @@ static struct clk_regmap spifc = {
.name = "spifc",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &spifc_sel2.hw
+ &a1_spifc_sel2.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static const struct clk_parent_data usb_bus_parents[] = {
+static const struct clk_parent_data a1_usb_bus_parents[] = {
{ .fw_name = "xtal", },
- { .hw = &sys.hw },
+ { .hw = &a1_sys.hw },
{ .fw_name = "fclk_div3", },
{ .fw_name = "fclk_div5", },
};
-static struct clk_regmap usb_bus_sel = {
+static struct clk_regmap a1_usb_bus_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = USB_BUSCLK_CTRL,
.mask = 0x3,
@@ -1356,13 +1385,13 @@ static struct clk_regmap usb_bus_sel = {
.hw.init = &(struct clk_init_data){
.name = "usb_bus_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = usb_bus_parents,
- .num_parents = ARRAY_SIZE(usb_bus_parents),
+ .parent_data = a1_usb_bus_parents,
+ .num_parents = ARRAY_SIZE(a1_usb_bus_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap usb_bus_div = {
+static struct clk_regmap a1_usb_bus_div = {
.data = &(struct clk_regmap_div_data){
.offset = USB_BUSCLK_CTRL,
.shift = 0,
@@ -1372,14 +1401,14 @@ static struct clk_regmap usb_bus_div = {
.name = "usb_bus_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &usb_bus_sel.hw
+ &a1_usb_bus_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap usb_bus = {
+static struct clk_regmap a1_usb_bus = {
.data = &(struct clk_regmap_gate_data){
.offset = USB_BUSCLK_CTRL,
.bit_idx = 8,
@@ -1388,21 +1417,21 @@ static struct clk_regmap usb_bus = {
.name = "usb_bus",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &usb_bus_div.hw
+ &a1_usb_bus_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static const struct clk_parent_data sd_emmc_psram_dmc_parents[] = {
+static const struct clk_parent_data a1_sd_emmc_parents[] = {
{ .fw_name = "fclk_div2", },
{ .fw_name = "fclk_div3", },
{ .fw_name = "fclk_div5", },
{ .fw_name = "hifi_pll", },
};
-static struct clk_regmap sd_emmc_sel = {
+static struct clk_regmap a1_sd_emmc_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = SD_EMMC_CLK_CTRL,
.mask = 0x3,
@@ -1411,12 +1440,12 @@ static struct clk_regmap sd_emmc_sel = {
.hw.init = &(struct clk_init_data){
.name = "sd_emmc_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = sd_emmc_psram_dmc_parents,
- .num_parents = ARRAY_SIZE(sd_emmc_psram_dmc_parents),
+ .parent_data = a1_sd_emmc_parents,
+ .num_parents = ARRAY_SIZE(a1_sd_emmc_parents),
},
};
-static struct clk_regmap sd_emmc_div = {
+static struct clk_regmap a1_sd_emmc_div = {
.data = &(struct clk_regmap_div_data){
.offset = SD_EMMC_CLK_CTRL,
.shift = 0,
@@ -1426,14 +1455,14 @@ static struct clk_regmap sd_emmc_div = {
.name = "sd_emmc_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &sd_emmc_sel.hw
+ &a1_sd_emmc_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap sd_emmc_sel2 = {
+static struct clk_regmap a1_sd_emmc_sel2 = {
.data = &(struct clk_regmap_mux_data){
.offset = SD_EMMC_CLK_CTRL,
.mask = 0x1,
@@ -1443,7 +1472,7 @@ static struct clk_regmap sd_emmc_sel2 = {
.name = "sd_emmc_sel2",
.ops = &clk_regmap_mux_ops,
.parent_data = (const struct clk_parent_data []) {
- { .hw = &sd_emmc_div.hw },
+ { .hw = &a1_sd_emmc_div.hw },
{ .fw_name = "xtal", },
},
.num_parents = 2,
@@ -1451,7 +1480,7 @@ static struct clk_regmap sd_emmc_sel2 = {
},
};
-static struct clk_regmap sd_emmc = {
+static struct clk_regmap a1_sd_emmc = {
.data = &(struct clk_regmap_gate_data){
.offset = SD_EMMC_CLK_CTRL,
.bit_idx = 8,
@@ -1460,14 +1489,14 @@ static struct clk_regmap sd_emmc = {
.name = "sd_emmc",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &sd_emmc_sel2.hw
+ &a1_sd_emmc_sel2.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap psram_sel = {
+static struct clk_regmap a1_psram_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = PSRAM_CLK_CTRL,
.mask = 0x3,
@@ -1476,12 +1505,12 @@ static struct clk_regmap psram_sel = {
.hw.init = &(struct clk_init_data){
.name = "psram_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = sd_emmc_psram_dmc_parents,
- .num_parents = ARRAY_SIZE(sd_emmc_psram_dmc_parents),
+ .parent_data = a1_sd_emmc_parents,
+ .num_parents = ARRAY_SIZE(a1_sd_emmc_parents),
},
};
-static struct clk_regmap psram_div = {
+static struct clk_regmap a1_psram_div = {
.data = &(struct clk_regmap_div_data){
.offset = PSRAM_CLK_CTRL,
.shift = 0,
@@ -1491,14 +1520,14 @@ static struct clk_regmap psram_div = {
.name = "psram_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &psram_sel.hw
+ &a1_psram_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap psram_sel2 = {
+static struct clk_regmap a1_psram_sel2 = {
.data = &(struct clk_regmap_mux_data){
.offset = PSRAM_CLK_CTRL,
.mask = 0x1,
@@ -1508,7 +1537,7 @@ static struct clk_regmap psram_sel2 = {
.name = "psram_sel2",
.ops = &clk_regmap_mux_ops,
.parent_data = (const struct clk_parent_data []) {
- { .hw = &psram_div.hw },
+ { .hw = &a1_psram_div.hw },
{ .fw_name = "xtal", },
},
.num_parents = 2,
@@ -1516,7 +1545,7 @@ static struct clk_regmap psram_sel2 = {
},
};
-static struct clk_regmap psram = {
+static struct clk_regmap a1_psram = {
.data = &(struct clk_regmap_gate_data){
.offset = PSRAM_CLK_CTRL,
.bit_idx = 8,
@@ -1525,14 +1554,14 @@ static struct clk_regmap psram = {
.name = "psram",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &psram_sel2.hw
+ &a1_psram_sel2.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap dmc_sel = {
+static struct clk_regmap a1_dmc_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = DMC_CLK_CTRL,
.mask = 0x3,
@@ -1541,12 +1570,12 @@ static struct clk_regmap dmc_sel = {
.hw.init = &(struct clk_init_data){
.name = "dmc_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = sd_emmc_psram_dmc_parents,
- .num_parents = ARRAY_SIZE(sd_emmc_psram_dmc_parents),
+ .parent_data = a1_sd_emmc_parents,
+ .num_parents = ARRAY_SIZE(a1_sd_emmc_parents),
},
};
-static struct clk_regmap dmc_div = {
+static struct clk_regmap a1_dmc_div = {
.data = &(struct clk_regmap_div_data){
.offset = DMC_CLK_CTRL,
.shift = 0,
@@ -1556,14 +1585,14 @@ static struct clk_regmap dmc_div = {
.name = "dmc_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &dmc_sel.hw
+ &a1_dmc_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap dmc_sel2 = {
+static struct clk_regmap a1_dmc_sel2 = {
.data = &(struct clk_regmap_mux_data){
.offset = DMC_CLK_CTRL,
.mask = 0x1,
@@ -1573,7 +1602,7 @@ static struct clk_regmap dmc_sel2 = {
.name = "dmc_sel2",
.ops = &clk_regmap_mux_ops,
.parent_data = (const struct clk_parent_data []) {
- { .hw = &dmc_div.hw },
+ { .hw = &a1_dmc_div.hw },
{ .fw_name = "xtal", },
},
.num_parents = 2,
@@ -1581,7 +1610,7 @@ static struct clk_regmap dmc_sel2 = {
},
};
-static struct clk_regmap dmc = {
+static struct clk_regmap a1_dmc = {
.data = &(struct clk_regmap_gate_data){
.offset = DMC_CLK_CTRL,
.bit_idx = 8,
@@ -1590,14 +1619,14 @@ static struct clk_regmap dmc = {
.name = "dmc",
.ops = &clk_regmap_gate_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &dmc_sel2.hw
+ &a1_dmc_sel2.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap ceca_32k_in = {
+static struct clk_regmap a1_ceca_32k_in = {
.data = &(struct clk_regmap_gate_data){
.offset = CECA_CLK_CTRL0,
.bit_idx = 31,
@@ -1612,7 +1641,7 @@ static struct clk_regmap ceca_32k_in = {
},
};
-static struct clk_regmap ceca_32k_div = {
+static struct clk_regmap a1_ceca_32k_div = {
.data = &(struct meson_clk_dualdiv_data){
.n1 = {
.reg_off = CECA_CLK_CTRL0,
@@ -1639,19 +1668,19 @@ static struct clk_regmap ceca_32k_div = {
.shift = 28,
.width = 1,
},
- .table = clk_32k_div_table,
+ .table = a1_32k_div_table,
},
.hw.init = &(struct clk_init_data){
.name = "ceca_32k_div",
.ops = &meson_clk_dualdiv_ops,
.parent_hws = (const struct clk_hw *[]) {
- &ceca_32k_in.hw
+ &a1_ceca_32k_in.hw
},
.num_parents = 1,
},
};
-static struct clk_regmap ceca_32k_sel_pre = {
+static struct clk_regmap a1_ceca_32k_sel_pre = {
.data = &(struct clk_regmap_mux_data) {
.offset = CECA_CLK_CTRL1,
.mask = 0x1,
@@ -1662,15 +1691,15 @@ static struct clk_regmap ceca_32k_sel_pre = {
.name = "ceca_32k_sel_pre",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &ceca_32k_div.hw,
- &ceca_32k_in.hw,
+ &a1_ceca_32k_div.hw,
+ &a1_ceca_32k_in.hw,
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap ceca_32k_sel = {
+static struct clk_regmap a1_ceca_32k_sel = {
.data = &(struct clk_regmap_mux_data) {
.offset = CECA_CLK_CTRL1,
.mask = 0x1,
@@ -1681,14 +1710,14 @@ static struct clk_regmap ceca_32k_sel = {
.name = "ceca_32k_sel",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &ceca_32k_sel_pre.hw,
- &rtc.hw,
+ &a1_ceca_32k_sel_pre.hw,
+ &a1_rtc.hw,
},
.num_parents = 2,
},
};
-static struct clk_regmap ceca_32k_out = {
+static struct clk_regmap a1_ceca_32k_out = {
.data = &(struct clk_regmap_gate_data){
.offset = CECA_CLK_CTRL0,
.bit_idx = 30,
@@ -1697,14 +1726,14 @@ static struct clk_regmap ceca_32k_out = {
.name = "ceca_32k_out",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &ceca_32k_sel.hw
+ &a1_ceca_32k_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap cecb_32k_in = {
+static struct clk_regmap a1_cecb_32k_in = {
.data = &(struct clk_regmap_gate_data){
.offset = CECB_CLK_CTRL0,
.bit_idx = 31,
@@ -1719,7 +1748,7 @@ static struct clk_regmap cecb_32k_in = {
},
};
-static struct clk_regmap cecb_32k_div = {
+static struct clk_regmap a1_cecb_32k_div = {
.data = &(struct meson_clk_dualdiv_data){
.n1 = {
.reg_off = CECB_CLK_CTRL0,
@@ -1746,19 +1775,19 @@ static struct clk_regmap cecb_32k_div = {
.shift = 28,
.width = 1,
},
- .table = clk_32k_div_table,
+ .table = a1_32k_div_table,
},
.hw.init = &(struct clk_init_data){
.name = "cecb_32k_div",
.ops = &meson_clk_dualdiv_ops,
.parent_hws = (const struct clk_hw *[]) {
- &cecb_32k_in.hw
+ &a1_cecb_32k_in.hw
},
.num_parents = 1,
},
};
-static struct clk_regmap cecb_32k_sel_pre = {
+static struct clk_regmap a1_cecb_32k_sel_pre = {
.data = &(struct clk_regmap_mux_data) {
.offset = CECB_CLK_CTRL1,
.mask = 0x1,
@@ -1769,15 +1798,15 @@ static struct clk_regmap cecb_32k_sel_pre = {
.name = "cecb_32k_sel_pre",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &cecb_32k_div.hw,
- &cecb_32k_in.hw,
+ &a1_cecb_32k_div.hw,
+ &a1_cecb_32k_in.hw,
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap cecb_32k_sel = {
+static struct clk_regmap a1_cecb_32k_sel = {
.data = &(struct clk_regmap_mux_data) {
.offset = CECB_CLK_CTRL1,
.mask = 0x1,
@@ -1788,14 +1817,14 @@ static struct clk_regmap cecb_32k_sel = {
.name = "cecb_32k_sel",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &cecb_32k_sel_pre.hw,
- &rtc.hw,
+ &a1_cecb_32k_sel_pre.hw,
+ &a1_rtc.hw,
},
.num_parents = 2,
},
};
-static struct clk_regmap cecb_32k_out = {
+static struct clk_regmap a1_cecb_32k_out = {
.data = &(struct clk_regmap_gate_data){
.offset = CECB_CLK_CTRL0,
.bit_idx = 30,
@@ -1804,446 +1833,268 @@ static struct clk_regmap cecb_32k_out = {
.name = "cecb_32k_out",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &cecb_32k_sel.hw
+ &a1_cecb_32k_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-#define MESON_GATE(_name, _reg, _bit) \
- MESON_PCLK(_name, _reg, _bit, &sys.hw)
-
-static MESON_GATE(clktree, SYS_CLK_EN0, 0);
-static MESON_GATE(reset_ctrl, SYS_CLK_EN0, 1);
-static MESON_GATE(analog_ctrl, SYS_CLK_EN0, 2);
-static MESON_GATE(pwr_ctrl, SYS_CLK_EN0, 3);
-static MESON_GATE(pad_ctrl, SYS_CLK_EN0, 4);
-static MESON_GATE(sys_ctrl, SYS_CLK_EN0, 5);
-static MESON_GATE(temp_sensor, SYS_CLK_EN0, 6);
-static MESON_GATE(am2axi_dev, SYS_CLK_EN0, 7);
-static MESON_GATE(spicc_b, SYS_CLK_EN0, 8);
-static MESON_GATE(spicc_a, SYS_CLK_EN0, 9);
-static MESON_GATE(msr, SYS_CLK_EN0, 10);
-static MESON_GATE(audio, SYS_CLK_EN0, 11);
-static MESON_GATE(jtag_ctrl, SYS_CLK_EN0, 12);
-static MESON_GATE(saradc_en, SYS_CLK_EN0, 13);
-static MESON_GATE(pwm_ef, SYS_CLK_EN0, 14);
-static MESON_GATE(pwm_cd, SYS_CLK_EN0, 15);
-static MESON_GATE(pwm_ab, SYS_CLK_EN0, 16);
-static MESON_GATE(cec, SYS_CLK_EN0, 17);
-static MESON_GATE(i2c_s, SYS_CLK_EN0, 18);
-static MESON_GATE(ir_ctrl, SYS_CLK_EN0, 19);
-static MESON_GATE(i2c_m_d, SYS_CLK_EN0, 20);
-static MESON_GATE(i2c_m_c, SYS_CLK_EN0, 21);
-static MESON_GATE(i2c_m_b, SYS_CLK_EN0, 22);
-static MESON_GATE(i2c_m_a, SYS_CLK_EN0, 23);
-static MESON_GATE(acodec, SYS_CLK_EN0, 24);
-static MESON_GATE(otp, SYS_CLK_EN0, 25);
-static MESON_GATE(sd_emmc_a, SYS_CLK_EN0, 26);
-static MESON_GATE(usb_phy, SYS_CLK_EN0, 27);
-static MESON_GATE(usb_ctrl, SYS_CLK_EN0, 28);
-static MESON_GATE(sys_dspb, SYS_CLK_EN0, 29);
-static MESON_GATE(sys_dspa, SYS_CLK_EN0, 30);
-static MESON_GATE(dma, SYS_CLK_EN0, 31);
-static MESON_GATE(irq_ctrl, SYS_CLK_EN1, 0);
-static MESON_GATE(nic, SYS_CLK_EN1, 1);
-static MESON_GATE(gic, SYS_CLK_EN1, 2);
-static MESON_GATE(uart_c, SYS_CLK_EN1, 3);
-static MESON_GATE(uart_b, SYS_CLK_EN1, 4);
-static MESON_GATE(uart_a, SYS_CLK_EN1, 5);
-static MESON_GATE(sys_psram, SYS_CLK_EN1, 6);
-static MESON_GATE(rsa, SYS_CLK_EN1, 8);
-static MESON_GATE(coresight, SYS_CLK_EN1, 9);
-static MESON_GATE(am2axi_vad, AXI_CLK_EN, 0);
-static MESON_GATE(audio_vad, AXI_CLK_EN, 1);
-static MESON_GATE(axi_dmc, AXI_CLK_EN, 3);
-static MESON_GATE(axi_psram, AXI_CLK_EN, 4);
-static MESON_GATE(ramb, AXI_CLK_EN, 5);
-static MESON_GATE(rama, AXI_CLK_EN, 6);
-static MESON_GATE(axi_spifc, AXI_CLK_EN, 7);
-static MESON_GATE(axi_nic, AXI_CLK_EN, 8);
-static MESON_GATE(axi_dma, AXI_CLK_EN, 9);
-static MESON_GATE(cpu_ctrl, AXI_CLK_EN, 10);
-static MESON_GATE(rom, AXI_CLK_EN, 11);
-static MESON_GATE(prod_i2c, AXI_CLK_EN, 12);
+static const struct clk_parent_data a1_pclk_parents = { .hw = &a1_sys.hw };
+
+#define A1_PCLK(_name, _reg, _bit, _flags) \
+ MESON_PCLK(a1_##_name, _reg, _bit, &a1_pclk_parents, _flags)
+
+/*
+ * NOTE: The gates below are marked with CLK_IGNORE_UNUSED for historic reasons
+ * Users are encouraged to test without it and submit changes to:
+ * - remove the flag if not necessary
+ * - replace the flag with something more adequate, such as CLK_IS_CRITICAL,
+ * if appropriate.
+ * - add a comment explaining why the use of CLK_IGNORE_UNUSED is desirable
+ * for a particular clock.
+ */
+static A1_PCLK(clktree, SYS_CLK_EN0, 0, CLK_IGNORE_UNUSED);
+static A1_PCLK(reset_ctrl, SYS_CLK_EN0, 1, CLK_IGNORE_UNUSED);
+static A1_PCLK(analog_ctrl, SYS_CLK_EN0, 2, CLK_IGNORE_UNUSED);
+static A1_PCLK(pwr_ctrl, SYS_CLK_EN0, 3, CLK_IGNORE_UNUSED);
+static A1_PCLK(pad_ctrl, SYS_CLK_EN0, 4, CLK_IGNORE_UNUSED);
+static A1_PCLK(sys_ctrl, SYS_CLK_EN0, 5, CLK_IGNORE_UNUSED);
+static A1_PCLK(temp_sensor, SYS_CLK_EN0, 6, CLK_IGNORE_UNUSED);
+static A1_PCLK(am2axi_dev, SYS_CLK_EN0, 7, CLK_IGNORE_UNUSED);
+static A1_PCLK(spicc_b, SYS_CLK_EN0, 8, CLK_IGNORE_UNUSED);
+static A1_PCLK(spicc_a, SYS_CLK_EN0, 9, CLK_IGNORE_UNUSED);
+static A1_PCLK(msr, SYS_CLK_EN0, 10, CLK_IGNORE_UNUSED);
+static A1_PCLK(audio, SYS_CLK_EN0, 11, CLK_IGNORE_UNUSED);
+static A1_PCLK(jtag_ctrl, SYS_CLK_EN0, 12, CLK_IGNORE_UNUSED);
+static A1_PCLK(saradc_en, SYS_CLK_EN0, 13, CLK_IGNORE_UNUSED);
+static A1_PCLK(pwm_ef, SYS_CLK_EN0, 14, CLK_IGNORE_UNUSED);
+static A1_PCLK(pwm_cd, SYS_CLK_EN0, 15, CLK_IGNORE_UNUSED);
+static A1_PCLK(pwm_ab, SYS_CLK_EN0, 16, CLK_IGNORE_UNUSED);
+static A1_PCLK(cec, SYS_CLK_EN0, 17, CLK_IGNORE_UNUSED);
+static A1_PCLK(i2c_s, SYS_CLK_EN0, 18, CLK_IGNORE_UNUSED);
+static A1_PCLK(ir_ctrl, SYS_CLK_EN0, 19, CLK_IGNORE_UNUSED);
+static A1_PCLK(i2c_m_d, SYS_CLK_EN0, 20, CLK_IGNORE_UNUSED);
+static A1_PCLK(i2c_m_c, SYS_CLK_EN0, 21, CLK_IGNORE_UNUSED);
+static A1_PCLK(i2c_m_b, SYS_CLK_EN0, 22, CLK_IGNORE_UNUSED);
+static A1_PCLK(i2c_m_a, SYS_CLK_EN0, 23, CLK_IGNORE_UNUSED);
+static A1_PCLK(acodec, SYS_CLK_EN0, 24, CLK_IGNORE_UNUSED);
+static A1_PCLK(otp, SYS_CLK_EN0, 25, CLK_IGNORE_UNUSED);
+static A1_PCLK(sd_emmc_a, SYS_CLK_EN0, 26, CLK_IGNORE_UNUSED);
+static A1_PCLK(usb_phy, SYS_CLK_EN0, 27, CLK_IGNORE_UNUSED);
+static A1_PCLK(usb_ctrl, SYS_CLK_EN0, 28, CLK_IGNORE_UNUSED);
+static A1_PCLK(sys_dspb, SYS_CLK_EN0, 29, CLK_IGNORE_UNUSED);
+static A1_PCLK(sys_dspa, SYS_CLK_EN0, 30, CLK_IGNORE_UNUSED);
+static A1_PCLK(dma, SYS_CLK_EN0, 31, CLK_IGNORE_UNUSED);
+
+static A1_PCLK(irq_ctrl, SYS_CLK_EN1, 0, CLK_IGNORE_UNUSED);
+static A1_PCLK(nic, SYS_CLK_EN1, 1, CLK_IGNORE_UNUSED);
+static A1_PCLK(gic, SYS_CLK_EN1, 2, CLK_IGNORE_UNUSED);
+static A1_PCLK(uart_c, SYS_CLK_EN1, 3, CLK_IGNORE_UNUSED);
+static A1_PCLK(uart_b, SYS_CLK_EN1, 4, CLK_IGNORE_UNUSED);
+static A1_PCLK(uart_a, SYS_CLK_EN1, 5, CLK_IGNORE_UNUSED);
+static A1_PCLK(sys_psram, SYS_CLK_EN1, 6, CLK_IGNORE_UNUSED);
+static A1_PCLK(rsa, SYS_CLK_EN1, 8, CLK_IGNORE_UNUSED);
+static A1_PCLK(coresight, SYS_CLK_EN1, 9, CLK_IGNORE_UNUSED);
+
+static A1_PCLK(am2axi_vad, AXI_CLK_EN, 0, CLK_IGNORE_UNUSED);
+static A1_PCLK(audio_vad, AXI_CLK_EN, 1, CLK_IGNORE_UNUSED);
+static A1_PCLK(axi_dmc, AXI_CLK_EN, 3, CLK_IGNORE_UNUSED);
+static A1_PCLK(axi_psram, AXI_CLK_EN, 4, CLK_IGNORE_UNUSED);
+static A1_PCLK(ramb, AXI_CLK_EN, 5, CLK_IGNORE_UNUSED);
+static A1_PCLK(rama, AXI_CLK_EN, 6, CLK_IGNORE_UNUSED);
+static A1_PCLK(axi_spifc, AXI_CLK_EN, 7, CLK_IGNORE_UNUSED);
+static A1_PCLK(axi_nic, AXI_CLK_EN, 8, CLK_IGNORE_UNUSED);
+static A1_PCLK(axi_dma, AXI_CLK_EN, 9, CLK_IGNORE_UNUSED);
+static A1_PCLK(cpu_ctrl, AXI_CLK_EN, 10, CLK_IGNORE_UNUSED);
+static A1_PCLK(rom, AXI_CLK_EN, 11, CLK_IGNORE_UNUSED);
+static A1_PCLK(prod_i2c, AXI_CLK_EN, 12, CLK_IGNORE_UNUSED);
/* Array of all clocks registered by this provider */
-static struct clk_hw *a1_periphs_hw_clks[] = {
- [CLKID_XTAL_IN] = &xtal_in.hw,
- [CLKID_FIXPLL_IN] = &fixpll_in.hw,
- [CLKID_USB_PHY_IN] = &usb_phy_in.hw,
- [CLKID_USB_CTRL_IN] = &usb_ctrl_in.hw,
- [CLKID_HIFIPLL_IN] = &hifipll_in.hw,
- [CLKID_SYSPLL_IN] = &syspll_in.hw,
- [CLKID_DDS_IN] = &dds_in.hw,
- [CLKID_SYS] = &sys.hw,
- [CLKID_CLKTREE] = &clktree.hw,
- [CLKID_RESET_CTRL] = &reset_ctrl.hw,
- [CLKID_ANALOG_CTRL] = &analog_ctrl.hw,
- [CLKID_PWR_CTRL] = &pwr_ctrl.hw,
- [CLKID_PAD_CTRL] = &pad_ctrl.hw,
- [CLKID_SYS_CTRL] = &sys_ctrl.hw,
- [CLKID_TEMP_SENSOR] = &temp_sensor.hw,
- [CLKID_AM2AXI_DIV] = &am2axi_dev.hw,
- [CLKID_SPICC_B] = &spicc_b.hw,
- [CLKID_SPICC_A] = &spicc_a.hw,
- [CLKID_MSR] = &msr.hw,
- [CLKID_AUDIO] = &audio.hw,
- [CLKID_JTAG_CTRL] = &jtag_ctrl.hw,
- [CLKID_SARADC_EN] = &saradc_en.hw,
- [CLKID_PWM_EF] = &pwm_ef.hw,
- [CLKID_PWM_CD] = &pwm_cd.hw,
- [CLKID_PWM_AB] = &pwm_ab.hw,
- [CLKID_CEC] = &cec.hw,
- [CLKID_I2C_S] = &i2c_s.hw,
- [CLKID_IR_CTRL] = &ir_ctrl.hw,
- [CLKID_I2C_M_D] = &i2c_m_d.hw,
- [CLKID_I2C_M_C] = &i2c_m_c.hw,
- [CLKID_I2C_M_B] = &i2c_m_b.hw,
- [CLKID_I2C_M_A] = &i2c_m_a.hw,
- [CLKID_ACODEC] = &acodec.hw,
- [CLKID_OTP] = &otp.hw,
- [CLKID_SD_EMMC_A] = &sd_emmc_a.hw,
- [CLKID_USB_PHY] = &usb_phy.hw,
- [CLKID_USB_CTRL] = &usb_ctrl.hw,
- [CLKID_SYS_DSPB] = &sys_dspb.hw,
- [CLKID_SYS_DSPA] = &sys_dspa.hw,
- [CLKID_DMA] = &dma.hw,
- [CLKID_IRQ_CTRL] = &irq_ctrl.hw,
- [CLKID_NIC] = &nic.hw,
- [CLKID_GIC] = &gic.hw,
- [CLKID_UART_C] = &uart_c.hw,
- [CLKID_UART_B] = &uart_b.hw,
- [CLKID_UART_A] = &uart_a.hw,
- [CLKID_SYS_PSRAM] = &sys_psram.hw,
- [CLKID_RSA] = &rsa.hw,
- [CLKID_CORESIGHT] = &coresight.hw,
- [CLKID_AM2AXI_VAD] = &am2axi_vad.hw,
- [CLKID_AUDIO_VAD] = &audio_vad.hw,
- [CLKID_AXI_DMC] = &axi_dmc.hw,
- [CLKID_AXI_PSRAM] = &axi_psram.hw,
- [CLKID_RAMB] = &ramb.hw,
- [CLKID_RAMA] = &rama.hw,
- [CLKID_AXI_SPIFC] = &axi_spifc.hw,
- [CLKID_AXI_NIC] = &axi_nic.hw,
- [CLKID_AXI_DMA] = &axi_dma.hw,
- [CLKID_CPU_CTRL] = &cpu_ctrl.hw,
- [CLKID_ROM] = &rom.hw,
- [CLKID_PROC_I2C] = &prod_i2c.hw,
- [CLKID_DSPA_SEL] = &dspa_sel.hw,
- [CLKID_DSPB_SEL] = &dspb_sel.hw,
- [CLKID_DSPA_EN] = &dspa_en.hw,
- [CLKID_DSPA_EN_NIC] = &dspa_en_nic.hw,
- [CLKID_DSPB_EN] = &dspb_en.hw,
- [CLKID_DSPB_EN_NIC] = &dspb_en_nic.hw,
- [CLKID_RTC] = &rtc.hw,
- [CLKID_CECA_32K] = &ceca_32k_out.hw,
- [CLKID_CECB_32K] = &cecb_32k_out.hw,
- [CLKID_24M] = &clk_24m.hw,
- [CLKID_12M] = &clk_12m.hw,
- [CLKID_FCLK_DIV2_DIVN] = &fclk_div2_divn.hw,
- [CLKID_GEN] = &gen.hw,
- [CLKID_SARADC_SEL] = &saradc_sel.hw,
- [CLKID_SARADC] = &saradc.hw,
- [CLKID_PWM_A] = &pwm_a.hw,
- [CLKID_PWM_B] = &pwm_b.hw,
- [CLKID_PWM_C] = &pwm_c.hw,
- [CLKID_PWM_D] = &pwm_d.hw,
- [CLKID_PWM_E] = &pwm_e.hw,
- [CLKID_PWM_F] = &pwm_f.hw,
- [CLKID_SPICC] = &spicc.hw,
- [CLKID_TS] = &ts.hw,
- [CLKID_SPIFC] = &spifc.hw,
- [CLKID_USB_BUS] = &usb_bus.hw,
- [CLKID_SD_EMMC] = &sd_emmc.hw,
- [CLKID_PSRAM] = &psram.hw,
- [CLKID_DMC] = &dmc.hw,
- [CLKID_SYS_A_SEL] = &sys_a_sel.hw,
- [CLKID_SYS_A_DIV] = &sys_a_div.hw,
- [CLKID_SYS_A] = &sys_a.hw,
- [CLKID_SYS_B_SEL] = &sys_b_sel.hw,
- [CLKID_SYS_B_DIV] = &sys_b_div.hw,
- [CLKID_SYS_B] = &sys_b.hw,
- [CLKID_DSPA_A_SEL] = &dspa_a_sel.hw,
- [CLKID_DSPA_A_DIV] = &dspa_a_div.hw,
- [CLKID_DSPA_A] = &dspa_a.hw,
- [CLKID_DSPA_B_SEL] = &dspa_b_sel.hw,
- [CLKID_DSPA_B_DIV] = &dspa_b_div.hw,
- [CLKID_DSPA_B] = &dspa_b.hw,
- [CLKID_DSPB_A_SEL] = &dspb_a_sel.hw,
- [CLKID_DSPB_A_DIV] = &dspb_a_div.hw,
- [CLKID_DSPB_A] = &dspb_a.hw,
- [CLKID_DSPB_B_SEL] = &dspb_b_sel.hw,
- [CLKID_DSPB_B_DIV] = &dspb_b_div.hw,
- [CLKID_DSPB_B] = &dspb_b.hw,
- [CLKID_RTC_32K_IN] = &rtc_32k_in.hw,
- [CLKID_RTC_32K_DIV] = &rtc_32k_div.hw,
- [CLKID_RTC_32K_XTAL] = &rtc_32k_xtal.hw,
- [CLKID_RTC_32K_SEL] = &rtc_32k_sel.hw,
- [CLKID_CECB_32K_IN] = &cecb_32k_in.hw,
- [CLKID_CECB_32K_DIV] = &cecb_32k_div.hw,
- [CLKID_CECB_32K_SEL_PRE] = &cecb_32k_sel_pre.hw,
- [CLKID_CECB_32K_SEL] = &cecb_32k_sel.hw,
- [CLKID_CECA_32K_IN] = &ceca_32k_in.hw,
- [CLKID_CECA_32K_DIV] = &ceca_32k_div.hw,
- [CLKID_CECA_32K_SEL_PRE] = &ceca_32k_sel_pre.hw,
- [CLKID_CECA_32K_SEL] = &ceca_32k_sel.hw,
- [CLKID_DIV2_PRE] = &fclk_div2_divn_pre.hw,
- [CLKID_24M_DIV2] = &clk_24m_div2.hw,
- [CLKID_GEN_SEL] = &gen_sel.hw,
- [CLKID_GEN_DIV] = &gen_div.hw,
- [CLKID_SARADC_DIV] = &saradc_div.hw,
- [CLKID_PWM_A_SEL] = &pwm_a_sel.hw,
- [CLKID_PWM_A_DIV] = &pwm_a_div.hw,
- [CLKID_PWM_B_SEL] = &pwm_b_sel.hw,
- [CLKID_PWM_B_DIV] = &pwm_b_div.hw,
- [CLKID_PWM_C_SEL] = &pwm_c_sel.hw,
- [CLKID_PWM_C_DIV] = &pwm_c_div.hw,
- [CLKID_PWM_D_SEL] = &pwm_d_sel.hw,
- [CLKID_PWM_D_DIV] = &pwm_d_div.hw,
- [CLKID_PWM_E_SEL] = &pwm_e_sel.hw,
- [CLKID_PWM_E_DIV] = &pwm_e_div.hw,
- [CLKID_PWM_F_SEL] = &pwm_f_sel.hw,
- [CLKID_PWM_F_DIV] = &pwm_f_div.hw,
- [CLKID_SPICC_SEL] = &spicc_sel.hw,
- [CLKID_SPICC_DIV] = &spicc_div.hw,
- [CLKID_SPICC_SEL2] = &spicc_sel2.hw,
- [CLKID_TS_DIV] = &ts_div.hw,
- [CLKID_SPIFC_SEL] = &spifc_sel.hw,
- [CLKID_SPIFC_DIV] = &spifc_div.hw,
- [CLKID_SPIFC_SEL2] = &spifc_sel2.hw,
- [CLKID_USB_BUS_SEL] = &usb_bus_sel.hw,
- [CLKID_USB_BUS_DIV] = &usb_bus_div.hw,
- [CLKID_SD_EMMC_SEL] = &sd_emmc_sel.hw,
- [CLKID_SD_EMMC_DIV] = &sd_emmc_div.hw,
- [CLKID_SD_EMMC_SEL2] = &sd_emmc_sel2.hw,
- [CLKID_PSRAM_SEL] = &psram_sel.hw,
- [CLKID_PSRAM_DIV] = &psram_div.hw,
- [CLKID_PSRAM_SEL2] = &psram_sel2.hw,
- [CLKID_DMC_SEL] = &dmc_sel.hw,
- [CLKID_DMC_DIV] = &dmc_div.hw,
- [CLKID_DMC_SEL2] = &dmc_sel2.hw,
-};
-
-/* Convenience table to populate regmap in .probe */
-static struct clk_regmap *const a1_periphs_regmaps[] = {
- &xtal_in,
- &fixpll_in,
- &usb_phy_in,
- &usb_ctrl_in,
- &hifipll_in,
- &syspll_in,
- &dds_in,
- &sys,
- &clktree,
- &reset_ctrl,
- &analog_ctrl,
- &pwr_ctrl,
- &pad_ctrl,
- &sys_ctrl,
- &temp_sensor,
- &am2axi_dev,
- &spicc_b,
- &spicc_a,
- &msr,
- &audio,
- &jtag_ctrl,
- &saradc_en,
- &pwm_ef,
- &pwm_cd,
- &pwm_ab,
- &cec,
- &i2c_s,
- &ir_ctrl,
- &i2c_m_d,
- &i2c_m_c,
- &i2c_m_b,
- &i2c_m_a,
- &acodec,
- &otp,
- &sd_emmc_a,
- &usb_phy,
- &usb_ctrl,
- &sys_dspb,
- &sys_dspa,
- &dma,
- &irq_ctrl,
- &nic,
- &gic,
- &uart_c,
- &uart_b,
- &uart_a,
- &sys_psram,
- &rsa,
- &coresight,
- &am2axi_vad,
- &audio_vad,
- &axi_dmc,
- &axi_psram,
- &ramb,
- &rama,
- &axi_spifc,
- &axi_nic,
- &axi_dma,
- &cpu_ctrl,
- &rom,
- &prod_i2c,
- &dspa_sel,
- &dspb_sel,
- &dspa_en,
- &dspa_en_nic,
- &dspb_en,
- &dspb_en_nic,
- &rtc,
- &ceca_32k_out,
- &cecb_32k_out,
- &clk_24m,
- &clk_12m,
- &fclk_div2_divn,
- &gen,
- &saradc_sel,
- &saradc,
- &pwm_a,
- &pwm_b,
- &pwm_c,
- &pwm_d,
- &pwm_e,
- &pwm_f,
- &spicc,
- &ts,
- &spifc,
- &usb_bus,
- &sd_emmc,
- &psram,
- &dmc,
- &sys_a_sel,
- &sys_a_div,
- &sys_a,
- &sys_b_sel,
- &sys_b_div,
- &sys_b,
- &dspa_a_sel,
- &dspa_a_div,
- &dspa_a,
- &dspa_b_sel,
- &dspa_b_div,
- &dspa_b,
- &dspb_a_sel,
- &dspb_a_div,
- &dspb_a,
- &dspb_b_sel,
- &dspb_b_div,
- &dspb_b,
- &rtc_32k_in,
- &rtc_32k_div,
- &rtc_32k_xtal,
- &rtc_32k_sel,
- &cecb_32k_in,
- &cecb_32k_div,
- &cecb_32k_sel_pre,
- &cecb_32k_sel,
- &ceca_32k_in,
- &ceca_32k_div,
- &ceca_32k_sel_pre,
- &ceca_32k_sel,
- &fclk_div2_divn_pre,
- &gen_sel,
- &gen_div,
- &saradc_div,
- &pwm_a_sel,
- &pwm_a_div,
- &pwm_b_sel,
- &pwm_b_div,
- &pwm_c_sel,
- &pwm_c_div,
- &pwm_d_sel,
- &pwm_d_div,
- &pwm_e_sel,
- &pwm_e_div,
- &pwm_f_sel,
- &pwm_f_div,
- &spicc_sel,
- &spicc_div,
- &spicc_sel2,
- &ts_div,
- &spifc_sel,
- &spifc_div,
- &spifc_sel2,
- &usb_bus_sel,
- &usb_bus_div,
- &sd_emmc_sel,
- &sd_emmc_div,
- &sd_emmc_sel2,
- &psram_sel,
- &psram_div,
- &psram_sel2,
- &dmc_sel,
- &dmc_div,
- &dmc_sel2,
-};
-
-static const struct regmap_config a1_periphs_regmap_cfg = {
- .reg_bits = 32,
- .val_bits = 32,
- .reg_stride = 4,
- .max_register = DMC_CLK_CTRL,
-};
-
-static struct meson_clk_hw_data a1_periphs_clks = {
- .hws = a1_periphs_hw_clks,
- .num = ARRAY_SIZE(a1_periphs_hw_clks),
-};
-
-static int meson_a1_periphs_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- void __iomem *base;
- struct regmap *map;
- int clkid, i, err;
-
- base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(base))
- return dev_err_probe(dev, PTR_ERR(base),
- "can't ioremap resource\n");
-
- map = devm_regmap_init_mmio(dev, base, &a1_periphs_regmap_cfg);
- if (IS_ERR(map))
- return dev_err_probe(dev, PTR_ERR(map),
- "can't init regmap mmio region\n");
-
- /* Populate regmap for the regmap backed clocks */
- for (i = 0; i < ARRAY_SIZE(a1_periphs_regmaps); i++)
- a1_periphs_regmaps[i]->map = map;
-
- for (clkid = 0; clkid < a1_periphs_clks.num; clkid++) {
- err = devm_clk_hw_register(dev, a1_periphs_clks.hws[clkid]);
- if (err)
- return dev_err_probe(dev, err,
- "clock[%d] registration failed\n",
- clkid);
- }
-
- return devm_of_clk_add_hw_provider(dev, meson_clk_hw_get, &a1_periphs_clks);
-}
-
-static const struct of_device_id a1_periphs_clkc_match_table[] = {
- { .compatible = "amlogic,a1-peripherals-clkc", },
+static struct clk_hw *a1_peripherals_hw_clks[] = {
+ [CLKID_XTAL_IN] = &a1_xtal_in.hw,
+ [CLKID_FIXPLL_IN] = &a1_fixpll_in.hw,
+ [CLKID_USB_PHY_IN] = &a1_usb_phy_in.hw,
+ [CLKID_USB_CTRL_IN] = &a1_usb_ctrl_in.hw,
+ [CLKID_HIFIPLL_IN] = &a1_hifipll_in.hw,
+ [CLKID_SYSPLL_IN] = &a1_syspll_in.hw,
+ [CLKID_DDS_IN] = &a1_dds_in.hw,
+ [CLKID_SYS] = &a1_sys.hw,
+ [CLKID_CLKTREE] = &a1_clktree.hw,
+ [CLKID_RESET_CTRL] = &a1_reset_ctrl.hw,
+ [CLKID_ANALOG_CTRL] = &a1_analog_ctrl.hw,
+ [CLKID_PWR_CTRL] = &a1_pwr_ctrl.hw,
+ [CLKID_PAD_CTRL] = &a1_pad_ctrl.hw,
+ [CLKID_SYS_CTRL] = &a1_sys_ctrl.hw,
+ [CLKID_TEMP_SENSOR] = &a1_temp_sensor.hw,
+ [CLKID_AM2AXI_DIV] = &a1_am2axi_dev.hw,
+ [CLKID_SPICC_B] = &a1_spicc_b.hw,
+ [CLKID_SPICC_A] = &a1_spicc_a.hw,
+ [CLKID_MSR] = &a1_msr.hw,
+ [CLKID_AUDIO] = &a1_audio.hw,
+ [CLKID_JTAG_CTRL] = &a1_jtag_ctrl.hw,
+ [CLKID_SARADC_EN] = &a1_saradc_en.hw,
+ [CLKID_PWM_EF] = &a1_pwm_ef.hw,
+ [CLKID_PWM_CD] = &a1_pwm_cd.hw,
+ [CLKID_PWM_AB] = &a1_pwm_ab.hw,
+ [CLKID_CEC] = &a1_cec.hw,
+ [CLKID_I2C_S] = &a1_i2c_s.hw,
+ [CLKID_IR_CTRL] = &a1_ir_ctrl.hw,
+ [CLKID_I2C_M_D] = &a1_i2c_m_d.hw,
+ [CLKID_I2C_M_C] = &a1_i2c_m_c.hw,
+ [CLKID_I2C_M_B] = &a1_i2c_m_b.hw,
+ [CLKID_I2C_M_A] = &a1_i2c_m_a.hw,
+ [CLKID_ACODEC] = &a1_acodec.hw,
+ [CLKID_OTP] = &a1_otp.hw,
+ [CLKID_SD_EMMC_A] = &a1_sd_emmc_a.hw,
+ [CLKID_USB_PHY] = &a1_usb_phy.hw,
+ [CLKID_USB_CTRL] = &a1_usb_ctrl.hw,
+ [CLKID_SYS_DSPB] = &a1_sys_dspb.hw,
+ [CLKID_SYS_DSPA] = &a1_sys_dspa.hw,
+ [CLKID_DMA] = &a1_dma.hw,
+ [CLKID_IRQ_CTRL] = &a1_irq_ctrl.hw,
+ [CLKID_NIC] = &a1_nic.hw,
+ [CLKID_GIC] = &a1_gic.hw,
+ [CLKID_UART_C] = &a1_uart_c.hw,
+ [CLKID_UART_B] = &a1_uart_b.hw,
+ [CLKID_UART_A] = &a1_uart_a.hw,
+ [CLKID_SYS_PSRAM] = &a1_sys_psram.hw,
+ [CLKID_RSA] = &a1_rsa.hw,
+ [CLKID_CORESIGHT] = &a1_coresight.hw,
+ [CLKID_AM2AXI_VAD] = &a1_am2axi_vad.hw,
+ [CLKID_AUDIO_VAD] = &a1_audio_vad.hw,
+ [CLKID_AXI_DMC] = &a1_axi_dmc.hw,
+ [CLKID_AXI_PSRAM] = &a1_axi_psram.hw,
+ [CLKID_RAMB] = &a1_ramb.hw,
+ [CLKID_RAMA] = &a1_rama.hw,
+ [CLKID_AXI_SPIFC] = &a1_axi_spifc.hw,
+ [CLKID_AXI_NIC] = &a1_axi_nic.hw,
+ [CLKID_AXI_DMA] = &a1_axi_dma.hw,
+ [CLKID_CPU_CTRL] = &a1_cpu_ctrl.hw,
+ [CLKID_ROM] = &a1_rom.hw,
+ [CLKID_PROC_I2C] = &a1_prod_i2c.hw,
+ [CLKID_DSPA_SEL] = &a1_dspa_sel.hw,
+ [CLKID_DSPB_SEL] = &a1_dspb_sel.hw,
+ [CLKID_DSPA_EN] = &a1_dspa_en.hw,
+ [CLKID_DSPA_EN_NIC] = &a1_dspa_en_nic.hw,
+ [CLKID_DSPB_EN] = &a1_dspb_en.hw,
+ [CLKID_DSPB_EN_NIC] = &a1_dspb_en_nic.hw,
+ [CLKID_RTC] = &a1_rtc.hw,
+ [CLKID_CECA_32K] = &a1_ceca_32k_out.hw,
+ [CLKID_CECB_32K] = &a1_cecb_32k_out.hw,
+ [CLKID_24M] = &a1_24m.hw,
+ [CLKID_12M] = &a1_12m.hw,
+ [CLKID_FCLK_DIV2_DIVN] = &a1_fclk_div2_divn.hw,
+ [CLKID_GEN] = &a1_gen.hw,
+ [CLKID_SARADC_SEL] = &a1_saradc_sel.hw,
+ [CLKID_SARADC] = &a1_saradc.hw,
+ [CLKID_PWM_A] = &a1_pwm_a.hw,
+ [CLKID_PWM_B] = &a1_pwm_b.hw,
+ [CLKID_PWM_C] = &a1_pwm_c.hw,
+ [CLKID_PWM_D] = &a1_pwm_d.hw,
+ [CLKID_PWM_E] = &a1_pwm_e.hw,
+ [CLKID_PWM_F] = &a1_pwm_f.hw,
+ [CLKID_SPICC] = &a1_spicc.hw,
+ [CLKID_TS] = &a1_ts.hw,
+ [CLKID_SPIFC] = &a1_spifc.hw,
+ [CLKID_USB_BUS] = &a1_usb_bus.hw,
+ [CLKID_SD_EMMC] = &a1_sd_emmc.hw,
+ [CLKID_PSRAM] = &a1_psram.hw,
+ [CLKID_DMC] = &a1_dmc.hw,
+ [CLKID_SYS_A_SEL] = &a1_sys_a_sel.hw,
+ [CLKID_SYS_A_DIV] = &a1_sys_a_div.hw,
+ [CLKID_SYS_A] = &a1_sys_a.hw,
+ [CLKID_SYS_B_SEL] = &a1_sys_b_sel.hw,
+ [CLKID_SYS_B_DIV] = &a1_sys_b_div.hw,
+ [CLKID_SYS_B] = &a1_sys_b.hw,
+ [CLKID_DSPA_A_SEL] = &a1_dspa_a_sel.hw,
+ [CLKID_DSPA_A_DIV] = &a1_dspa_a_div.hw,
+ [CLKID_DSPA_A] = &a1_dspa_a.hw,
+ [CLKID_DSPA_B_SEL] = &a1_dspa_b_sel.hw,
+ [CLKID_DSPA_B_DIV] = &a1_dspa_b_div.hw,
+ [CLKID_DSPA_B] = &a1_dspa_b.hw,
+ [CLKID_DSPB_A_SEL] = &a1_dspb_a_sel.hw,
+ [CLKID_DSPB_A_DIV] = &a1_dspb_a_div.hw,
+ [CLKID_DSPB_A] = &a1_dspb_a.hw,
+ [CLKID_DSPB_B_SEL] = &a1_dspb_b_sel.hw,
+ [CLKID_DSPB_B_DIV] = &a1_dspb_b_div.hw,
+ [CLKID_DSPB_B] = &a1_dspb_b.hw,
+ [CLKID_RTC_32K_IN] = &a1_rtc_32k_in.hw,
+ [CLKID_RTC_32K_DIV] = &a1_rtc_32k_div.hw,
+ [CLKID_RTC_32K_XTAL] = &a1_rtc_32k_xtal.hw,
+ [CLKID_RTC_32K_SEL] = &a1_rtc_32k_sel.hw,
+ [CLKID_CECB_32K_IN] = &a1_cecb_32k_in.hw,
+ [CLKID_CECB_32K_DIV] = &a1_cecb_32k_div.hw,
+ [CLKID_CECB_32K_SEL_PRE] = &a1_cecb_32k_sel_pre.hw,
+ [CLKID_CECB_32K_SEL] = &a1_cecb_32k_sel.hw,
+ [CLKID_CECA_32K_IN] = &a1_ceca_32k_in.hw,
+ [CLKID_CECA_32K_DIV] = &a1_ceca_32k_div.hw,
+ [CLKID_CECA_32K_SEL_PRE] = &a1_ceca_32k_sel_pre.hw,
+ [CLKID_CECA_32K_SEL] = &a1_ceca_32k_sel.hw,
+ [CLKID_DIV2_PRE] = &a1_fclk_div2_divn_pre.hw,
+ [CLKID_24M_DIV2] = &a1_24m_div2.hw,
+ [CLKID_GEN_SEL] = &a1_gen_sel.hw,
+ [CLKID_GEN_DIV] = &a1_gen_div.hw,
+ [CLKID_SARADC_DIV] = &a1_saradc_div.hw,
+ [CLKID_PWM_A_SEL] = &a1_pwm_a_sel.hw,
+ [CLKID_PWM_A_DIV] = &a1_pwm_a_div.hw,
+ [CLKID_PWM_B_SEL] = &a1_pwm_b_sel.hw,
+ [CLKID_PWM_B_DIV] = &a1_pwm_b_div.hw,
+ [CLKID_PWM_C_SEL] = &a1_pwm_c_sel.hw,
+ [CLKID_PWM_C_DIV] = &a1_pwm_c_div.hw,
+ [CLKID_PWM_D_SEL] = &a1_pwm_d_sel.hw,
+ [CLKID_PWM_D_DIV] = &a1_pwm_d_div.hw,
+ [CLKID_PWM_E_SEL] = &a1_pwm_e_sel.hw,
+ [CLKID_PWM_E_DIV] = &a1_pwm_e_div.hw,
+ [CLKID_PWM_F_SEL] = &a1_pwm_f_sel.hw,
+ [CLKID_PWM_F_DIV] = &a1_pwm_f_div.hw,
+ [CLKID_SPICC_SEL] = &a1_spicc_sel.hw,
+ [CLKID_SPICC_DIV] = &a1_spicc_div.hw,
+ [CLKID_SPICC_SEL2] = &a1_spicc_sel2.hw,
+ [CLKID_TS_DIV] = &a1_ts_div.hw,
+ [CLKID_SPIFC_SEL] = &a1_spifc_sel.hw,
+ [CLKID_SPIFC_DIV] = &a1_spifc_div.hw,
+ [CLKID_SPIFC_SEL2] = &a1_spifc_sel2.hw,
+ [CLKID_USB_BUS_SEL] = &a1_usb_bus_sel.hw,
+ [CLKID_USB_BUS_DIV] = &a1_usb_bus_div.hw,
+ [CLKID_SD_EMMC_SEL] = &a1_sd_emmc_sel.hw,
+ [CLKID_SD_EMMC_DIV] = &a1_sd_emmc_div.hw,
+ [CLKID_SD_EMMC_SEL2] = &a1_sd_emmc_sel2.hw,
+ [CLKID_PSRAM_SEL] = &a1_psram_sel.hw,
+ [CLKID_PSRAM_DIV] = &a1_psram_div.hw,
+ [CLKID_PSRAM_SEL2] = &a1_psram_sel2.hw,
+ [CLKID_DMC_SEL] = &a1_dmc_sel.hw,
+ [CLKID_DMC_DIV] = &a1_dmc_div.hw,
+ [CLKID_DMC_SEL2] = &a1_dmc_sel2.hw,
+};
+
+static const struct meson_clkc_data a1_peripherals_clkc_data = {
+ .hw_clks = {
+ .hws = a1_peripherals_hw_clks,
+ .num = ARRAY_SIZE(a1_peripherals_hw_clks),
+ },
+};
+
+static const struct of_device_id a1_peripherals_clkc_match_table[] = {
+ {
+ .compatible = "amlogic,a1-peripherals-clkc",
+ .data = &a1_peripherals_clkc_data,
+ },
{}
};
-MODULE_DEVICE_TABLE(of, a1_periphs_clkc_match_table);
+MODULE_DEVICE_TABLE(of, a1_peripherals_clkc_match_table);
-static struct platform_driver a1_periphs_clkc_driver = {
- .probe = meson_a1_periphs_probe,
+static struct platform_driver a1_peripherals_clkc_driver = {
+ .probe = meson_clkc_mmio_probe,
.driver = {
.name = "a1-peripherals-clkc",
- .of_match_table = a1_periphs_clkc_match_table,
+ .of_match_table = a1_peripherals_clkc_match_table,
},
};
-module_platform_driver(a1_periphs_clkc_driver);
+module_platform_driver(a1_peripherals_clkc_driver);
MODULE_DESCRIPTION("Amlogic A1 Peripherals Clock Controller driver");
MODULE_AUTHOR("Jian Hu <jian.hu@amlogic.com>");
MODULE_AUTHOR("Dmitry Rokosov <ddrokosov@sberdevices.ru>");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(CLK_MESON);
+MODULE_IMPORT_NS("CLK_MESON");
diff --git a/drivers/clk/meson/a1-peripherals.h b/drivers/clk/meson/a1-peripherals.h
deleted file mode 100644
index 26de8530184a..000000000000
--- a/drivers/clk/meson/a1-peripherals.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/*
- * Amlogic A1 Peripherals Clock Controller internals
- *
- * Copyright (c) 2019 Amlogic, Inc. All rights reserved.
- * Author: Jian Hu <jian.hu@amlogic.com>
- *
- * Copyright (c) 2023, SberDevices. All Rights Reserved.
- * Author: Dmitry Rokosov <ddrokosov@sberdevices.ru>
- */
-
-#ifndef __A1_PERIPHERALS_H
-#define __A1_PERIPHERALS_H
-
-/* peripherals clock controller register offset */
-#define SYS_OSCIN_CTRL 0x0
-#define RTC_BY_OSCIN_CTRL0 0x4
-#define RTC_BY_OSCIN_CTRL1 0x8
-#define RTC_CTRL 0xc
-#define SYS_CLK_CTRL0 0x10
-#define SYS_CLK_EN0 0x1c
-#define SYS_CLK_EN1 0x20
-#define AXI_CLK_EN 0x24
-#define DSPA_CLK_EN 0x28
-#define DSPB_CLK_EN 0x2c
-#define DSPA_CLK_CTRL0 0x30
-#define DSPB_CLK_CTRL0 0x34
-#define CLK12_24_CTRL 0x38
-#define GEN_CLK_CTRL 0x3c
-#define SAR_ADC_CLK_CTRL 0xc0
-#define PWM_CLK_AB_CTRL 0xc4
-#define PWM_CLK_CD_CTRL 0xc8
-#define PWM_CLK_EF_CTRL 0xcc
-#define SPICC_CLK_CTRL 0xd0
-#define TS_CLK_CTRL 0xd4
-#define SPIFC_CLK_CTRL 0xd8
-#define USB_BUSCLK_CTRL 0xdc
-#define SD_EMMC_CLK_CTRL 0xe0
-#define CECA_CLK_CTRL0 0xe4
-#define CECA_CLK_CTRL1 0xe8
-#define CECB_CLK_CTRL0 0xec
-#define CECB_CLK_CTRL1 0xf0
-#define PSRAM_CLK_CTRL 0xf4
-#define DMC_CLK_CTRL 0xf8
-
-#endif /* __A1_PERIPHERALS_H */
diff --git a/drivers/clk/meson/a1-pll.c b/drivers/clk/meson/a1-pll.c
index 8e5a42d1afbb..1f82e9c7c14e 100644
--- a/drivers/clk/meson/a1-pll.c
+++ b/drivers/clk/meson/a1-pll.c
@@ -10,13 +10,23 @@
#include <linux/clk-provider.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
-#include "a1-pll.h"
+#include "clk-pll.h"
#include "clk-regmap.h"
#include "meson-clkc-utils.h"
+#define ANACTRL_FIXPLL_CTRL0 0x0
+#define ANACTRL_FIXPLL_CTRL1 0x4
+#define ANACTRL_FIXPLL_STS 0x14
+#define ANACTRL_HIFIPLL_CTRL0 0xc0
+#define ANACTRL_HIFIPLL_CTRL1 0xc4
+#define ANACTRL_HIFIPLL_CTRL2 0xc8
+#define ANACTRL_HIFIPLL_CTRL3 0xcc
+#define ANACTRL_HIFIPLL_CTRL4 0xd0
+#define ANACTRL_HIFIPLL_STS 0xd4
+
#include <dt-bindings/clock/amlogic,a1-pll-clkc.h>
-static struct clk_regmap fixed_pll_dco = {
+static struct clk_regmap a1_fixed_pll_dco = {
.data = &(struct meson_clk_pll_data){
.en = {
.reg_off = ANACTRL_FIXPLL_CTRL0,
@@ -59,7 +69,7 @@ static struct clk_regmap fixed_pll_dco = {
},
};
-static struct clk_regmap fixed_pll = {
+static struct clk_regmap a1_fixed_pll = {
.data = &(struct clk_regmap_gate_data){
.offset = ANACTRL_FIXPLL_CTRL0,
.bit_idx = 20,
@@ -68,18 +78,18 @@ static struct clk_regmap fixed_pll = {
.name = "fixed_pll",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fixed_pll_dco.hw
+ &a1_fixed_pll_dco.hw
},
.num_parents = 1,
},
};
-static const struct pll_mult_range hifi_pll_mult_range = {
+static const struct pll_mult_range a1_hifi_pll_range = {
.min = 32,
.max = 64,
};
-static const struct reg_sequence hifi_init_regs[] = {
+static const struct reg_sequence a1_hifi_pll_init_regs[] = {
{ .reg = ANACTRL_HIFIPLL_CTRL1, .def = 0x01800000 },
{ .reg = ANACTRL_HIFIPLL_CTRL2, .def = 0x00001100 },
{ .reg = ANACTRL_HIFIPLL_CTRL3, .def = 0x100a1100 },
@@ -87,7 +97,7 @@ static const struct reg_sequence hifi_init_regs[] = {
{ .reg = ANACTRL_HIFIPLL_CTRL0, .def = 0x01f18000 },
};
-static struct clk_regmap hifi_pll = {
+static struct clk_regmap a1_hifi_pll = {
.data = &(struct meson_clk_pll_data){
.en = {
.reg_off = ANACTRL_HIFIPLL_CTRL0,
@@ -124,9 +134,9 @@ static struct clk_regmap hifi_pll = {
.shift = 6,
.width = 1,
},
- .range = &hifi_pll_mult_range,
- .init_regs = hifi_init_regs,
- .init_count = ARRAY_SIZE(hifi_init_regs),
+ .range = &a1_hifi_pll_range,
+ .init_regs = a1_hifi_pll_init_regs,
+ .init_count = ARRAY_SIZE(a1_hifi_pll_init_regs),
},
.hw.init = &(struct clk_init_data){
.name = "hifi_pll",
@@ -138,20 +148,20 @@ static struct clk_regmap hifi_pll = {
},
};
-static struct clk_fixed_factor fclk_div2_div = {
+static struct clk_fixed_factor a1_fclk_div2_div = {
.mult = 1,
.div = 2,
.hw.init = &(struct clk_init_data){
.name = "fclk_div2_div",
.ops = &clk_fixed_factor_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fixed_pll.hw
+ &a1_fixed_pll.hw
},
.num_parents = 1,
},
};
-static struct clk_regmap fclk_div2 = {
+static struct clk_regmap a1_fclk_div2 = {
.data = &(struct clk_regmap_gate_data){
.offset = ANACTRL_FIXPLL_CTRL0,
.bit_idx = 21,
@@ -160,7 +170,7 @@ static struct clk_regmap fclk_div2 = {
.name = "fclk_div2",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fclk_div2_div.hw
+ &a1_fclk_div2_div.hw
},
.num_parents = 1,
/*
@@ -176,20 +186,20 @@ static struct clk_regmap fclk_div2 = {
},
};
-static struct clk_fixed_factor fclk_div3_div = {
+static struct clk_fixed_factor a1_fclk_div3_div = {
.mult = 1,
.div = 3,
.hw.init = &(struct clk_init_data){
.name = "fclk_div3_div",
.ops = &clk_fixed_factor_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fixed_pll.hw
+ &a1_fixed_pll.hw
},
.num_parents = 1,
},
};
-static struct clk_regmap fclk_div3 = {
+static struct clk_regmap a1_fclk_div3 = {
.data = &(struct clk_regmap_gate_data){
.offset = ANACTRL_FIXPLL_CTRL0,
.bit_idx = 22,
@@ -198,7 +208,7 @@ static struct clk_regmap fclk_div3 = {
.name = "fclk_div3",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fclk_div3_div.hw
+ &a1_fclk_div3_div.hw
},
.num_parents = 1,
/*
@@ -209,20 +219,20 @@ static struct clk_regmap fclk_div3 = {
},
};
-static struct clk_fixed_factor fclk_div5_div = {
+static struct clk_fixed_factor a1_fclk_div5_div = {
.mult = 1,
.div = 5,
.hw.init = &(struct clk_init_data){
.name = "fclk_div5_div",
.ops = &clk_fixed_factor_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fixed_pll.hw
+ &a1_fixed_pll.hw
},
.num_parents = 1,
},
};
-static struct clk_regmap fclk_div5 = {
+static struct clk_regmap a1_fclk_div5 = {
.data = &(struct clk_regmap_gate_data){
.offset = ANACTRL_FIXPLL_CTRL0,
.bit_idx = 23,
@@ -231,7 +241,7 @@ static struct clk_regmap fclk_div5 = {
.name = "fclk_div5",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fclk_div5_div.hw
+ &a1_fclk_div5_div.hw
},
.num_parents = 1,
/*
@@ -242,20 +252,20 @@ static struct clk_regmap fclk_div5 = {
},
};
-static struct clk_fixed_factor fclk_div7_div = {
+static struct clk_fixed_factor a1_fclk_div7_div = {
.mult = 1,
.div = 7,
.hw.init = &(struct clk_init_data){
.name = "fclk_div7_div",
.ops = &clk_fixed_factor_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fixed_pll.hw
+ &a1_fixed_pll.hw
},
.num_parents = 1,
},
};
-static struct clk_regmap fclk_div7 = {
+static struct clk_regmap a1_fclk_div7 = {
.data = &(struct clk_regmap_gate_data){
.offset = ANACTRL_FIXPLL_CTRL0,
.bit_idx = 24,
@@ -264,7 +274,7 @@ static struct clk_regmap fclk_div7 = {
.name = "fclk_div7",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fclk_div7_div.hw
+ &a1_fclk_div7_div.hw
},
.num_parents = 1,
},
@@ -272,83 +282,37 @@ static struct clk_regmap fclk_div7 = {
/* Array of all clocks registered by this provider */
static struct clk_hw *a1_pll_hw_clks[] = {
- [CLKID_FIXED_PLL_DCO] = &fixed_pll_dco.hw,
- [CLKID_FIXED_PLL] = &fixed_pll.hw,
- [CLKID_FCLK_DIV2_DIV] = &fclk_div2_div.hw,
- [CLKID_FCLK_DIV3_DIV] = &fclk_div3_div.hw,
- [CLKID_FCLK_DIV5_DIV] = &fclk_div5_div.hw,
- [CLKID_FCLK_DIV7_DIV] = &fclk_div7_div.hw,
- [CLKID_FCLK_DIV2] = &fclk_div2.hw,
- [CLKID_FCLK_DIV3] = &fclk_div3.hw,
- [CLKID_FCLK_DIV5] = &fclk_div5.hw,
- [CLKID_FCLK_DIV7] = &fclk_div7.hw,
- [CLKID_HIFI_PLL] = &hifi_pll.hw,
-};
-
-static struct clk_regmap *const a1_pll_regmaps[] = {
- &fixed_pll_dco,
- &fixed_pll,
- &fclk_div2,
- &fclk_div3,
- &fclk_div5,
- &fclk_div7,
- &hifi_pll,
+ [CLKID_FIXED_PLL_DCO] = &a1_fixed_pll_dco.hw,
+ [CLKID_FIXED_PLL] = &a1_fixed_pll.hw,
+ [CLKID_FCLK_DIV2_DIV] = &a1_fclk_div2_div.hw,
+ [CLKID_FCLK_DIV3_DIV] = &a1_fclk_div3_div.hw,
+ [CLKID_FCLK_DIV5_DIV] = &a1_fclk_div5_div.hw,
+ [CLKID_FCLK_DIV7_DIV] = &a1_fclk_div7_div.hw,
+ [CLKID_FCLK_DIV2] = &a1_fclk_div2.hw,
+ [CLKID_FCLK_DIV3] = &a1_fclk_div3.hw,
+ [CLKID_FCLK_DIV5] = &a1_fclk_div5.hw,
+ [CLKID_FCLK_DIV7] = &a1_fclk_div7.hw,
+ [CLKID_HIFI_PLL] = &a1_hifi_pll.hw,
};
-static const struct regmap_config a1_pll_regmap_cfg = {
- .reg_bits = 32,
- .val_bits = 32,
- .reg_stride = 4,
- .max_register = ANACTRL_HIFIPLL_STS,
-};
-
-static struct meson_clk_hw_data a1_pll_clks = {
- .hws = a1_pll_hw_clks,
- .num = ARRAY_SIZE(a1_pll_hw_clks),
+static const struct meson_clkc_data a1_pll_clkc_data = {
+ .hw_clks = {
+ .hws = a1_pll_hw_clks,
+ .num = ARRAY_SIZE(a1_pll_hw_clks),
+ },
};
-static int meson_a1_pll_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- void __iomem *base;
- struct regmap *map;
- int clkid, i, err;
-
- base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(base))
- return dev_err_probe(dev, PTR_ERR(base),
- "can't ioremap resource\n");
-
- map = devm_regmap_init_mmio(dev, base, &a1_pll_regmap_cfg);
- if (IS_ERR(map))
- return dev_err_probe(dev, PTR_ERR(map),
- "can't init regmap mmio region\n");
-
- /* Populate regmap for the regmap backed clocks */
- for (i = 0; i < ARRAY_SIZE(a1_pll_regmaps); i++)
- a1_pll_regmaps[i]->map = map;
-
- /* Register clocks */
- for (clkid = 0; clkid < a1_pll_clks.num; clkid++) {
- err = devm_clk_hw_register(dev, a1_pll_clks.hws[clkid]);
- if (err)
- return dev_err_probe(dev, err,
- "clock[%d] registration failed\n",
- clkid);
- }
-
- return devm_of_clk_add_hw_provider(dev, meson_clk_hw_get,
- &a1_pll_clks);
-}
-
static const struct of_device_id a1_pll_clkc_match_table[] = {
- { .compatible = "amlogic,a1-pll-clkc", },
+ {
+ .compatible = "amlogic,a1-pll-clkc",
+ .data = &a1_pll_clkc_data,
+ },
{}
};
MODULE_DEVICE_TABLE(of, a1_pll_clkc_match_table);
static struct platform_driver a1_pll_clkc_driver = {
- .probe = meson_a1_pll_probe,
+ .probe = meson_clkc_mmio_probe,
.driver = {
.name = "a1-pll-clkc",
.of_match_table = a1_pll_clkc_match_table,
@@ -356,8 +320,8 @@ static struct platform_driver a1_pll_clkc_driver = {
};
module_platform_driver(a1_pll_clkc_driver);
-MODULE_DESCRIPTION("Amlogic S4 PLL Clock Controller driver");
+MODULE_DESCRIPTION("Amlogic A1 PLL Clock Controller driver");
MODULE_AUTHOR("Jian Hu <jian.hu@amlogic.com>");
MODULE_AUTHOR("Dmitry Rokosov <ddrokosov@sberdevices.ru>");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(CLK_MESON);
+MODULE_IMPORT_NS("CLK_MESON");
diff --git a/drivers/clk/meson/a1-pll.h b/drivers/clk/meson/a1-pll.h
deleted file mode 100644
index 4be17b2bf383..000000000000
--- a/drivers/clk/meson/a1-pll.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/*
- * Amlogic A1 PLL Clock Controller internals
- *
- * Copyright (c) 2019 Amlogic, Inc. All rights reserved.
- * Author: Jian Hu <jian.hu@amlogic.com>
- *
- * Copyright (c) 2023, SberDevices. All Rights Reserved.
- * Author: Dmitry Rokosov <ddrokosov@sberdevices.ru>
- */
-
-#ifndef __A1_PLL_H
-#define __A1_PLL_H
-
-#include "clk-pll.h"
-
-/* PLL register offset */
-#define ANACTRL_FIXPLL_CTRL0 0x0
-#define ANACTRL_FIXPLL_CTRL1 0x4
-#define ANACTRL_FIXPLL_STS 0x14
-#define ANACTRL_HIFIPLL_CTRL0 0xc0
-#define ANACTRL_HIFIPLL_CTRL1 0xc4
-#define ANACTRL_HIFIPLL_CTRL2 0xc8
-#define ANACTRL_HIFIPLL_CTRL3 0xcc
-#define ANACTRL_HIFIPLL_CTRL4 0xd0
-#define ANACTRL_HIFIPLL_STS 0xd4
-
-#endif /* __A1_PLL_H */
diff --git a/drivers/clk/meson/axg-aoclk.c b/drivers/clk/meson/axg-aoclk.c
index 1dabc81535a6..902fbd34039c 100644
--- a/drivers/clk/meson/axg-aoclk.c
+++ b/drivers/clk/meson/axg-aoclk.c
@@ -34,32 +34,21 @@
#define AO_RTC_ALT_CLK_CNTL0 0x94
#define AO_RTC_ALT_CLK_CNTL1 0x98
-#define AXG_AO_GATE(_name, _bit) \
-static struct clk_regmap axg_aoclk_##_name = { \
- .data = &(struct clk_regmap_gate_data) { \
- .offset = (AO_RTI_GEN_CNTL_REG0), \
- .bit_idx = (_bit), \
- }, \
- .hw.init = &(struct clk_init_data) { \
- .name = "axg_ao_" #_name, \
- .ops = &clk_regmap_gate_ops, \
- .parent_data = &(const struct clk_parent_data) { \
- .fw_name = "mpeg-clk", \
- }, \
- .num_parents = 1, \
- .flags = CLK_IGNORE_UNUSED, \
- }, \
-}
+static const struct clk_parent_data axg_ao_pclk_parents = { .fw_name = "mpeg-clk" };
-AXG_AO_GATE(remote, 0);
-AXG_AO_GATE(i2c_master, 1);
-AXG_AO_GATE(i2c_slave, 2);
-AXG_AO_GATE(uart1, 3);
-AXG_AO_GATE(uart2, 5);
-AXG_AO_GATE(ir_blaster, 6);
-AXG_AO_GATE(saradc, 7);
+#define AXG_AO_GATE(_name, _bit, _flags) \
+ MESON_PCLK(axg_ao_##_name, AO_RTI_GEN_CNTL_REG0, _bit, \
+ &axg_ao_pclk_parents, _flags)
-static struct clk_regmap axg_aoclk_cts_oscin = {
+static AXG_AO_GATE(remote, 0, CLK_IGNORE_UNUSED);
+static AXG_AO_GATE(i2c_master, 1, CLK_IGNORE_UNUSED);
+static AXG_AO_GATE(i2c_slave, 2, CLK_IGNORE_UNUSED);
+static AXG_AO_GATE(uart1, 3, CLK_IGNORE_UNUSED);
+static AXG_AO_GATE(uart2, 5, CLK_IGNORE_UNUSED);
+static AXG_AO_GATE(ir_blaster, 6, CLK_IGNORE_UNUSED);
+static AXG_AO_GATE(saradc, 7, CLK_IGNORE_UNUSED);
+
+static struct clk_regmap axg_ao_cts_oscin = {
.data = &(struct clk_regmap_gate_data){
.offset = AO_RTI_PWR_CNTL_REG0,
.bit_idx = 14,
@@ -74,7 +63,7 @@ static struct clk_regmap axg_aoclk_cts_oscin = {
},
};
-static struct clk_regmap axg_aoclk_32k_pre = {
+static struct clk_regmap axg_ao_32k_pre = {
.data = &(struct clk_regmap_gate_data){
.offset = AO_RTC_ALT_CLK_CNTL0,
.bit_idx = 31,
@@ -83,7 +72,7 @@ static struct clk_regmap axg_aoclk_32k_pre = {
.name = "axg_ao_32k_pre",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &axg_aoclk_cts_oscin.hw
+ &axg_ao_cts_oscin.hw
},
.num_parents = 1,
},
@@ -99,7 +88,7 @@ static const struct meson_clk_dualdiv_param axg_32k_div_table[] = {
}, {}
};
-static struct clk_regmap axg_aoclk_32k_div = {
+static struct clk_regmap axg_ao_32k_div = {
.data = &(struct meson_clk_dualdiv_data){
.n1 = {
.reg_off = AO_RTC_ALT_CLK_CNTL0,
@@ -132,13 +121,13 @@ static struct clk_regmap axg_aoclk_32k_div = {
.name = "axg_ao_32k_div",
.ops = &meson_clk_dualdiv_ops,
.parent_hws = (const struct clk_hw *[]) {
- &axg_aoclk_32k_pre.hw
+ &axg_ao_32k_pre.hw
},
.num_parents = 1,
},
};
-static struct clk_regmap axg_aoclk_32k_sel = {
+static struct clk_regmap axg_ao_32k_sel = {
.data = &(struct clk_regmap_mux_data) {
.offset = AO_RTC_ALT_CLK_CNTL1,
.mask = 0x1,
@@ -149,15 +138,15 @@ static struct clk_regmap axg_aoclk_32k_sel = {
.name = "axg_ao_32k_sel",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &axg_aoclk_32k_div.hw,
- &axg_aoclk_32k_pre.hw,
+ &axg_ao_32k_div.hw,
+ &axg_ao_32k_pre.hw,
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap axg_aoclk_32k = {
+static struct clk_regmap axg_ao_32k = {
.data = &(struct clk_regmap_gate_data){
.offset = AO_RTC_ALT_CLK_CNTL0,
.bit_idx = 30,
@@ -166,14 +155,14 @@ static struct clk_regmap axg_aoclk_32k = {
.name = "axg_ao_32k",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &axg_aoclk_32k_sel.hw
+ &axg_ao_32k_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap axg_aoclk_cts_rtc_oscin = {
+static struct clk_regmap axg_ao_cts_rtc_oscin = {
.data = &(struct clk_regmap_mux_data) {
.offset = AO_RTI_PWR_CNTL_REG0,
.mask = 0x1,
@@ -184,7 +173,7 @@ static struct clk_regmap axg_aoclk_cts_rtc_oscin = {
.name = "axg_ao_cts_rtc_oscin",
.ops = &clk_regmap_mux_ops,
.parent_data = (const struct clk_parent_data []) {
- { .hw = &axg_aoclk_32k.hw },
+ { .hw = &axg_ao_32k.hw },
{ .fw_name = "ext_32k-0", },
},
.num_parents = 2,
@@ -192,7 +181,7 @@ static struct clk_regmap axg_aoclk_cts_rtc_oscin = {
},
};
-static struct clk_regmap axg_aoclk_clk81 = {
+static struct clk_regmap axg_ao_clk81 = {
.data = &(struct clk_regmap_mux_data) {
.offset = AO_RTI_PWR_CNTL_REG0,
.mask = 0x1,
@@ -200,68 +189,74 @@ static struct clk_regmap axg_aoclk_clk81 = {
.flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
+ /*
+ * NOTE: this is one of the infamous clock the pwm driver
+ * can request directly by its global name. It's wrong but
+ * there is not much we can do about it until the support
+ * for the old pwm bindings is dropped
+ */
.name = "axg_ao_clk81",
.ops = &clk_regmap_mux_ro_ops,
.parent_data = (const struct clk_parent_data []) {
{ .fw_name = "mpeg-clk", },
- { .hw = &axg_aoclk_cts_rtc_oscin.hw },
+ { .hw = &axg_ao_cts_rtc_oscin.hw },
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap axg_aoclk_saradc_mux = {
+static struct clk_regmap axg_ao_saradc_mux = {
.data = &(struct clk_regmap_mux_data) {
.offset = AO_SAR_CLK,
.mask = 0x3,
.shift = 9,
},
.hw.init = &(struct clk_init_data){
- .name = "axg_ao_saradc_mux",
+ .name = "ao_saradc_mux",
.ops = &clk_regmap_mux_ops,
.parent_data = (const struct clk_parent_data []) {
{ .fw_name = "xtal", },
- { .hw = &axg_aoclk_clk81.hw },
+ { .hw = &axg_ao_clk81.hw },
},
.num_parents = 2,
},
};
-static struct clk_regmap axg_aoclk_saradc_div = {
+static struct clk_regmap axg_ao_saradc_div = {
.data = &(struct clk_regmap_div_data) {
.offset = AO_SAR_CLK,
.shift = 0,
.width = 8,
},
.hw.init = &(struct clk_init_data){
- .name = "axg_ao_saradc_div",
+ .name = "ao_saradc_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &axg_aoclk_saradc_mux.hw
+ &axg_ao_saradc_mux.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap axg_aoclk_saradc_gate = {
+static struct clk_regmap axg_ao_saradc_gate = {
.data = &(struct clk_regmap_gate_data) {
.offset = AO_SAR_CLK,
.bit_idx = 8,
},
.hw.init = &(struct clk_init_data){
- .name = "axg_ao_saradc_gate",
+ .name = "ao_saradc_gate",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &axg_aoclk_saradc_div.hw
+ &axg_ao_saradc_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static const unsigned int axg_aoclk_reset[] = {
+static const unsigned int axg_ao_reset[] = {
[RESET_AO_REMOTE] = 16,
[RESET_AO_I2C_MASTER] = 18,
[RESET_AO_I2C_SLAVE] = 19,
@@ -270,76 +265,56 @@ static const unsigned int axg_aoclk_reset[] = {
[RESET_AO_IR_BLASTER] = 23,
};
-static struct clk_regmap *axg_aoclk_regmap[] = {
- &axg_aoclk_remote,
- &axg_aoclk_i2c_master,
- &axg_aoclk_i2c_slave,
- &axg_aoclk_uart1,
- &axg_aoclk_uart2,
- &axg_aoclk_ir_blaster,
- &axg_aoclk_saradc,
- &axg_aoclk_cts_oscin,
- &axg_aoclk_32k_pre,
- &axg_aoclk_32k_div,
- &axg_aoclk_32k_sel,
- &axg_aoclk_32k,
- &axg_aoclk_cts_rtc_oscin,
- &axg_aoclk_clk81,
- &axg_aoclk_saradc_mux,
- &axg_aoclk_saradc_div,
- &axg_aoclk_saradc_gate,
-};
-
-static struct clk_hw *axg_aoclk_hw_clks[] = {
- [CLKID_AO_REMOTE] = &axg_aoclk_remote.hw,
- [CLKID_AO_I2C_MASTER] = &axg_aoclk_i2c_master.hw,
- [CLKID_AO_I2C_SLAVE] = &axg_aoclk_i2c_slave.hw,
- [CLKID_AO_UART1] = &axg_aoclk_uart1.hw,
- [CLKID_AO_UART2] = &axg_aoclk_uart2.hw,
- [CLKID_AO_IR_BLASTER] = &axg_aoclk_ir_blaster.hw,
- [CLKID_AO_SAR_ADC] = &axg_aoclk_saradc.hw,
- [CLKID_AO_CLK81] = &axg_aoclk_clk81.hw,
- [CLKID_AO_SAR_ADC_SEL] = &axg_aoclk_saradc_mux.hw,
- [CLKID_AO_SAR_ADC_DIV] = &axg_aoclk_saradc_div.hw,
- [CLKID_AO_SAR_ADC_CLK] = &axg_aoclk_saradc_gate.hw,
- [CLKID_AO_CTS_OSCIN] = &axg_aoclk_cts_oscin.hw,
- [CLKID_AO_32K_PRE] = &axg_aoclk_32k_pre.hw,
- [CLKID_AO_32K_DIV] = &axg_aoclk_32k_div.hw,
- [CLKID_AO_32K_SEL] = &axg_aoclk_32k_sel.hw,
- [CLKID_AO_32K] = &axg_aoclk_32k.hw,
- [CLKID_AO_CTS_RTC_OSCIN] = &axg_aoclk_cts_rtc_oscin.hw,
+static struct clk_hw *axg_ao_hw_clks[] = {
+ [CLKID_AO_REMOTE] = &axg_ao_remote.hw,
+ [CLKID_AO_I2C_MASTER] = &axg_ao_i2c_master.hw,
+ [CLKID_AO_I2C_SLAVE] = &axg_ao_i2c_slave.hw,
+ [CLKID_AO_UART1] = &axg_ao_uart1.hw,
+ [CLKID_AO_UART2] = &axg_ao_uart2.hw,
+ [CLKID_AO_IR_BLASTER] = &axg_ao_ir_blaster.hw,
+ [CLKID_AO_SAR_ADC] = &axg_ao_saradc.hw,
+ [CLKID_AO_CLK81] = &axg_ao_clk81.hw,
+ [CLKID_AO_SAR_ADC_SEL] = &axg_ao_saradc_mux.hw,
+ [CLKID_AO_SAR_ADC_DIV] = &axg_ao_saradc_div.hw,
+ [CLKID_AO_SAR_ADC_CLK] = &axg_ao_saradc_gate.hw,
+ [CLKID_AO_CTS_OSCIN] = &axg_ao_cts_oscin.hw,
+ [CLKID_AO_32K_PRE] = &axg_ao_32k_pre.hw,
+ [CLKID_AO_32K_DIV] = &axg_ao_32k_div.hw,
+ [CLKID_AO_32K_SEL] = &axg_ao_32k_sel.hw,
+ [CLKID_AO_32K] = &axg_ao_32k.hw,
+ [CLKID_AO_CTS_RTC_OSCIN] = &axg_ao_cts_rtc_oscin.hw,
};
-static const struct meson_aoclk_data axg_aoclkc_data = {
+static const struct meson_aoclk_data axg_ao_clkc_data = {
.reset_reg = AO_RTI_GEN_CNTL_REG0,
- .num_reset = ARRAY_SIZE(axg_aoclk_reset),
- .reset = axg_aoclk_reset,
- .num_clks = ARRAY_SIZE(axg_aoclk_regmap),
- .clks = axg_aoclk_regmap,
- .hw_clks = {
- .hws = axg_aoclk_hw_clks,
- .num = ARRAY_SIZE(axg_aoclk_hw_clks),
+ .num_reset = ARRAY_SIZE(axg_ao_reset),
+ .reset = axg_ao_reset,
+ .clkc_data = {
+ .hw_clks = {
+ .hws = axg_ao_hw_clks,
+ .num = ARRAY_SIZE(axg_ao_hw_clks),
+ },
},
};
-static const struct of_device_id axg_aoclkc_match_table[] = {
+static const struct of_device_id axg_ao_clkc_match_table[] = {
{
.compatible = "amlogic,meson-axg-aoclkc",
- .data = &axg_aoclkc_data,
+ .data = &axg_ao_clkc_data.clkc_data,
},
{ }
};
-MODULE_DEVICE_TABLE(of, axg_aoclkc_match_table);
+MODULE_DEVICE_TABLE(of, axg_ao_clkc_match_table);
-static struct platform_driver axg_aoclkc_driver = {
+static struct platform_driver axg_ao_clkc_driver = {
.probe = meson_aoclkc_probe,
.driver = {
- .name = "axg-aoclkc",
- .of_match_table = axg_aoclkc_match_table,
+ .name = "axg-ao-clkc",
+ .of_match_table = axg_ao_clkc_match_table,
},
};
-module_platform_driver(axg_aoclkc_driver);
+module_platform_driver(axg_ao_clkc_driver);
MODULE_DESCRIPTION("Amlogic AXG Always-ON Clock Controller driver");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(CLK_MESON);
+MODULE_IMPORT_NS("CLK_MESON");
diff --git a/drivers/clk/meson/axg-audio.c b/drivers/clk/meson/axg-audio.c
index beda86349389..fd7eca652261 100644
--- a/drivers/clk/meson/axg-audio.c
+++ b/drivers/clk/meson/axg-audio.c
@@ -4,6 +4,7 @@
* Author: Jerome Brunet <jbrunet@baylibre.com>
*/
+#include <linux/auxiliary_bus.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/init.h>
@@ -12,17 +13,70 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/reset.h>
-#include <linux/reset-controller.h>
#include <linux/slab.h>
#include "meson-clkc-utils.h"
-#include "axg-audio.h"
#include "clk-regmap.h"
#include "clk-phase.h"
#include "sclk-div.h"
#include <dt-bindings/clock/axg-audio-clkc.h>
+/* Audio clock register offsets */
+#define AUDIO_CLK_GATE_EN 0x000
+#define AUDIO_MCLK_A_CTRL 0x004
+#define AUDIO_MCLK_B_CTRL 0x008
+#define AUDIO_MCLK_C_CTRL 0x00C
+#define AUDIO_MCLK_D_CTRL 0x010
+#define AUDIO_MCLK_E_CTRL 0x014
+#define AUDIO_MCLK_F_CTRL 0x018
+#define AUDIO_MST_PAD_CTRL0 0x01c
+#define AUDIO_MST_PAD_CTRL1 0x020
+#define AUDIO_SW_RESET 0x024
+#define AUDIO_MST_A_SCLK_CTRL0 0x040
+#define AUDIO_MST_A_SCLK_CTRL1 0x044
+#define AUDIO_MST_B_SCLK_CTRL0 0x048
+#define AUDIO_MST_B_SCLK_CTRL1 0x04C
+#define AUDIO_MST_C_SCLK_CTRL0 0x050
+#define AUDIO_MST_C_SCLK_CTRL1 0x054
+#define AUDIO_MST_D_SCLK_CTRL0 0x058
+#define AUDIO_MST_D_SCLK_CTRL1 0x05C
+#define AUDIO_MST_E_SCLK_CTRL0 0x060
+#define AUDIO_MST_E_SCLK_CTRL1 0x064
+#define AUDIO_MST_F_SCLK_CTRL0 0x068
+#define AUDIO_MST_F_SCLK_CTRL1 0x06C
+#define AUDIO_CLK_TDMIN_A_CTRL 0x080
+#define AUDIO_CLK_TDMIN_B_CTRL 0x084
+#define AUDIO_CLK_TDMIN_C_CTRL 0x088
+#define AUDIO_CLK_TDMIN_LB_CTRL 0x08C
+#define AUDIO_CLK_TDMOUT_A_CTRL 0x090
+#define AUDIO_CLK_TDMOUT_B_CTRL 0x094
+#define AUDIO_CLK_TDMOUT_C_CTRL 0x098
+#define AUDIO_CLK_SPDIFIN_CTRL 0x09C
+#define AUDIO_CLK_SPDIFOUT_CTRL 0x0A0
+#define AUDIO_CLK_RESAMPLE_CTRL 0x0A4
+#define AUDIO_CLK_LOCKER_CTRL 0x0A8
+#define AUDIO_CLK_PDMIN_CTRL0 0x0AC
+#define AUDIO_CLK_PDMIN_CTRL1 0x0B0
+#define AUDIO_CLK_SPDIFOUT_B_CTRL 0x0B4
+
+/* SM1 introduce new register and some shifts :( */
+#define AUDIO_CLK_GATE_EN1 0x004
+#define AUDIO_SM1_MCLK_A_CTRL 0x008
+#define AUDIO_SM1_MCLK_B_CTRL 0x00C
+#define AUDIO_SM1_MCLK_C_CTRL 0x010
+#define AUDIO_SM1_MCLK_D_CTRL 0x014
+#define AUDIO_SM1_MCLK_E_CTRL 0x018
+#define AUDIO_SM1_MCLK_F_CTRL 0x01C
+#define AUDIO_SM1_MST_PAD_CTRL0 0x020
+#define AUDIO_SM1_MST_PAD_CTRL1 0x024
+#define AUDIO_SM1_SW_RESET0 0x028
+#define AUDIO_SM1_SW_RESET1 0x02C
+#define AUDIO_CLK81_CTRL 0x030
+#define AUDIO_CLK81_EN 0x034
+#define AUDIO_EARCRX_CMDC_CLK_CTRL 0x0D0
+#define AUDIO_EARCRX_DMAC_CLK_CTRL 0x0D4
+
#define AUD_GATE(_name, _reg, _bit, _pname, _iflags) { \
.data = &(struct clk_regmap_gate_data){ \
.offset = (_reg), \
@@ -1257,505 +1311,6 @@ static struct clk_hw *sm1_audio_hw_clks[] = {
[AUD_CLKID_EARCRX_DMAC] = &sm1_earcrx_dmac_clk.hw,
};
-
-/* Convenience table to populate regmap in .probe(). */
-static struct clk_regmap *const axg_clk_regmaps[] = {
- &ddr_arb,
- &pdm,
- &tdmin_a,
- &tdmin_b,
- &tdmin_c,
- &tdmin_lb,
- &tdmout_a,
- &tdmout_b,
- &tdmout_c,
- &frddr_a,
- &frddr_b,
- &frddr_c,
- &toddr_a,
- &toddr_b,
- &toddr_c,
- &loopback,
- &spdifin,
- &spdifout,
- &resample,
- &power_detect,
- &mst_a_mclk_sel,
- &mst_b_mclk_sel,
- &mst_c_mclk_sel,
- &mst_d_mclk_sel,
- &mst_e_mclk_sel,
- &mst_f_mclk_sel,
- &mst_a_mclk_div,
- &mst_b_mclk_div,
- &mst_c_mclk_div,
- &mst_d_mclk_div,
- &mst_e_mclk_div,
- &mst_f_mclk_div,
- &mst_a_mclk,
- &mst_b_mclk,
- &mst_c_mclk,
- &mst_d_mclk,
- &mst_e_mclk,
- &mst_f_mclk,
- &spdifout_clk_sel,
- &spdifout_clk_div,
- &spdifout_clk,
- &spdifin_clk_sel,
- &spdifin_clk_div,
- &spdifin_clk,
- &pdm_dclk_sel,
- &pdm_dclk_div,
- &pdm_dclk,
- &pdm_sysclk_sel,
- &pdm_sysclk_div,
- &pdm_sysclk,
- &mst_a_sclk_pre_en,
- &mst_b_sclk_pre_en,
- &mst_c_sclk_pre_en,
- &mst_d_sclk_pre_en,
- &mst_e_sclk_pre_en,
- &mst_f_sclk_pre_en,
- &mst_a_sclk_div,
- &mst_b_sclk_div,
- &mst_c_sclk_div,
- &mst_d_sclk_div,
- &mst_e_sclk_div,
- &mst_f_sclk_div,
- &mst_a_sclk_post_en,
- &mst_b_sclk_post_en,
- &mst_c_sclk_post_en,
- &mst_d_sclk_post_en,
- &mst_e_sclk_post_en,
- &mst_f_sclk_post_en,
- &mst_a_sclk,
- &mst_b_sclk,
- &mst_c_sclk,
- &mst_d_sclk,
- &mst_e_sclk,
- &mst_f_sclk,
- &mst_a_lrclk_div,
- &mst_b_lrclk_div,
- &mst_c_lrclk_div,
- &mst_d_lrclk_div,
- &mst_e_lrclk_div,
- &mst_f_lrclk_div,
- &mst_a_lrclk,
- &mst_b_lrclk,
- &mst_c_lrclk,
- &mst_d_lrclk,
- &mst_e_lrclk,
- &mst_f_lrclk,
- &tdmin_a_sclk_sel,
- &tdmin_b_sclk_sel,
- &tdmin_c_sclk_sel,
- &tdmin_lb_sclk_sel,
- &tdmout_a_sclk_sel,
- &tdmout_b_sclk_sel,
- &tdmout_c_sclk_sel,
- &tdmin_a_sclk_pre_en,
- &tdmin_b_sclk_pre_en,
- &tdmin_c_sclk_pre_en,
- &tdmin_lb_sclk_pre_en,
- &tdmout_a_sclk_pre_en,
- &tdmout_b_sclk_pre_en,
- &tdmout_c_sclk_pre_en,
- &tdmin_a_sclk_post_en,
- &tdmin_b_sclk_post_en,
- &tdmin_c_sclk_post_en,
- &tdmin_lb_sclk_post_en,
- &tdmout_a_sclk_post_en,
- &tdmout_b_sclk_post_en,
- &tdmout_c_sclk_post_en,
- &tdmin_a_sclk,
- &tdmin_b_sclk,
- &tdmin_c_sclk,
- &tdmin_lb_sclk,
- &axg_tdmout_a_sclk,
- &axg_tdmout_b_sclk,
- &axg_tdmout_c_sclk,
- &tdmin_a_lrclk,
- &tdmin_b_lrclk,
- &tdmin_c_lrclk,
- &tdmin_lb_lrclk,
- &tdmout_a_lrclk,
- &tdmout_b_lrclk,
- &tdmout_c_lrclk,
-};
-
-static struct clk_regmap *const g12a_clk_regmaps[] = {
- &ddr_arb,
- &pdm,
- &tdmin_a,
- &tdmin_b,
- &tdmin_c,
- &tdmin_lb,
- &tdmout_a,
- &tdmout_b,
- &tdmout_c,
- &frddr_a,
- &frddr_b,
- &frddr_c,
- &toddr_a,
- &toddr_b,
- &toddr_c,
- &loopback,
- &spdifin,
- &spdifout,
- &resample,
- &power_detect,
- &spdifout_b,
- &mst_a_mclk_sel,
- &mst_b_mclk_sel,
- &mst_c_mclk_sel,
- &mst_d_mclk_sel,
- &mst_e_mclk_sel,
- &mst_f_mclk_sel,
- &mst_a_mclk_div,
- &mst_b_mclk_div,
- &mst_c_mclk_div,
- &mst_d_mclk_div,
- &mst_e_mclk_div,
- &mst_f_mclk_div,
- &mst_a_mclk,
- &mst_b_mclk,
- &mst_c_mclk,
- &mst_d_mclk,
- &mst_e_mclk,
- &mst_f_mclk,
- &spdifout_clk_sel,
- &spdifout_clk_div,
- &spdifout_clk,
- &spdifin_clk_sel,
- &spdifin_clk_div,
- &spdifin_clk,
- &pdm_dclk_sel,
- &pdm_dclk_div,
- &pdm_dclk,
- &pdm_sysclk_sel,
- &pdm_sysclk_div,
- &pdm_sysclk,
- &mst_a_sclk_pre_en,
- &mst_b_sclk_pre_en,
- &mst_c_sclk_pre_en,
- &mst_d_sclk_pre_en,
- &mst_e_sclk_pre_en,
- &mst_f_sclk_pre_en,
- &mst_a_sclk_div,
- &mst_b_sclk_div,
- &mst_c_sclk_div,
- &mst_d_sclk_div,
- &mst_e_sclk_div,
- &mst_f_sclk_div,
- &mst_a_sclk_post_en,
- &mst_b_sclk_post_en,
- &mst_c_sclk_post_en,
- &mst_d_sclk_post_en,
- &mst_e_sclk_post_en,
- &mst_f_sclk_post_en,
- &mst_a_sclk,
- &mst_b_sclk,
- &mst_c_sclk,
- &mst_d_sclk,
- &mst_e_sclk,
- &mst_f_sclk,
- &mst_a_lrclk_div,
- &mst_b_lrclk_div,
- &mst_c_lrclk_div,
- &mst_d_lrclk_div,
- &mst_e_lrclk_div,
- &mst_f_lrclk_div,
- &mst_a_lrclk,
- &mst_b_lrclk,
- &mst_c_lrclk,
- &mst_d_lrclk,
- &mst_e_lrclk,
- &mst_f_lrclk,
- &tdmin_a_sclk_sel,
- &tdmin_b_sclk_sel,
- &tdmin_c_sclk_sel,
- &tdmin_lb_sclk_sel,
- &tdmout_a_sclk_sel,
- &tdmout_b_sclk_sel,
- &tdmout_c_sclk_sel,
- &tdmin_a_sclk_pre_en,
- &tdmin_b_sclk_pre_en,
- &tdmin_c_sclk_pre_en,
- &tdmin_lb_sclk_pre_en,
- &tdmout_a_sclk_pre_en,
- &tdmout_b_sclk_pre_en,
- &tdmout_c_sclk_pre_en,
- &tdmin_a_sclk_post_en,
- &tdmin_b_sclk_post_en,
- &tdmin_c_sclk_post_en,
- &tdmin_lb_sclk_post_en,
- &tdmout_a_sclk_post_en,
- &tdmout_b_sclk_post_en,
- &tdmout_c_sclk_post_en,
- &tdmin_a_sclk,
- &tdmin_b_sclk,
- &tdmin_c_sclk,
- &tdmin_lb_sclk,
- &g12a_tdmout_a_sclk,
- &g12a_tdmout_b_sclk,
- &g12a_tdmout_c_sclk,
- &tdmin_a_lrclk,
- &tdmin_b_lrclk,
- &tdmin_c_lrclk,
- &tdmin_lb_lrclk,
- &tdmout_a_lrclk,
- &tdmout_b_lrclk,
- &tdmout_c_lrclk,
- &spdifout_b_clk_sel,
- &spdifout_b_clk_div,
- &spdifout_b_clk,
- &g12a_tdm_mclk_pad_0,
- &g12a_tdm_mclk_pad_1,
- &g12a_tdm_lrclk_pad_0,
- &g12a_tdm_lrclk_pad_1,
- &g12a_tdm_lrclk_pad_2,
- &g12a_tdm_sclk_pad_0,
- &g12a_tdm_sclk_pad_1,
- &g12a_tdm_sclk_pad_2,
- &toram,
- &eqdrc,
-};
-
-static struct clk_regmap *const sm1_clk_regmaps[] = {
- &ddr_arb,
- &pdm,
- &tdmin_a,
- &tdmin_b,
- &tdmin_c,
- &tdmin_lb,
- &tdmout_a,
- &tdmout_b,
- &tdmout_c,
- &frddr_a,
- &frddr_b,
- &frddr_c,
- &toddr_a,
- &toddr_b,
- &toddr_c,
- &loopback,
- &spdifin,
- &spdifout,
- &resample,
- &spdifout_b,
- &sm1_mst_a_mclk_sel,
- &sm1_mst_b_mclk_sel,
- &sm1_mst_c_mclk_sel,
- &sm1_mst_d_mclk_sel,
- &sm1_mst_e_mclk_sel,
- &sm1_mst_f_mclk_sel,
- &sm1_mst_a_mclk_div,
- &sm1_mst_b_mclk_div,
- &sm1_mst_c_mclk_div,
- &sm1_mst_d_mclk_div,
- &sm1_mst_e_mclk_div,
- &sm1_mst_f_mclk_div,
- &sm1_mst_a_mclk,
- &sm1_mst_b_mclk,
- &sm1_mst_c_mclk,
- &sm1_mst_d_mclk,
- &sm1_mst_e_mclk,
- &sm1_mst_f_mclk,
- &spdifout_clk_sel,
- &spdifout_clk_div,
- &spdifout_clk,
- &spdifin_clk_sel,
- &spdifin_clk_div,
- &spdifin_clk,
- &pdm_dclk_sel,
- &pdm_dclk_div,
- &pdm_dclk,
- &pdm_sysclk_sel,
- &pdm_sysclk_div,
- &pdm_sysclk,
- &mst_a_sclk_pre_en,
- &mst_b_sclk_pre_en,
- &mst_c_sclk_pre_en,
- &mst_d_sclk_pre_en,
- &mst_e_sclk_pre_en,
- &mst_f_sclk_pre_en,
- &mst_a_sclk_div,
- &mst_b_sclk_div,
- &mst_c_sclk_div,
- &mst_d_sclk_div,
- &mst_e_sclk_div,
- &mst_f_sclk_div,
- &mst_a_sclk_post_en,
- &mst_b_sclk_post_en,
- &mst_c_sclk_post_en,
- &mst_d_sclk_post_en,
- &mst_e_sclk_post_en,
- &mst_f_sclk_post_en,
- &mst_a_sclk,
- &mst_b_sclk,
- &mst_c_sclk,
- &mst_d_sclk,
- &mst_e_sclk,
- &mst_f_sclk,
- &mst_a_lrclk_div,
- &mst_b_lrclk_div,
- &mst_c_lrclk_div,
- &mst_d_lrclk_div,
- &mst_e_lrclk_div,
- &mst_f_lrclk_div,
- &mst_a_lrclk,
- &mst_b_lrclk,
- &mst_c_lrclk,
- &mst_d_lrclk,
- &mst_e_lrclk,
- &mst_f_lrclk,
- &tdmin_a_sclk_sel,
- &tdmin_b_sclk_sel,
- &tdmin_c_sclk_sel,
- &tdmin_lb_sclk_sel,
- &tdmout_a_sclk_sel,
- &tdmout_b_sclk_sel,
- &tdmout_c_sclk_sel,
- &tdmin_a_sclk_pre_en,
- &tdmin_b_sclk_pre_en,
- &tdmin_c_sclk_pre_en,
- &tdmin_lb_sclk_pre_en,
- &tdmout_a_sclk_pre_en,
- &tdmout_b_sclk_pre_en,
- &tdmout_c_sclk_pre_en,
- &tdmin_a_sclk_post_en,
- &tdmin_b_sclk_post_en,
- &tdmin_c_sclk_post_en,
- &tdmin_lb_sclk_post_en,
- &tdmout_a_sclk_post_en,
- &tdmout_b_sclk_post_en,
- &tdmout_c_sclk_post_en,
- &tdmin_a_sclk,
- &tdmin_b_sclk,
- &tdmin_c_sclk,
- &tdmin_lb_sclk,
- &g12a_tdmout_a_sclk,
- &g12a_tdmout_b_sclk,
- &g12a_tdmout_c_sclk,
- &tdmin_a_lrclk,
- &tdmin_b_lrclk,
- &tdmin_c_lrclk,
- &tdmin_lb_lrclk,
- &tdmout_a_lrclk,
- &tdmout_b_lrclk,
- &tdmout_c_lrclk,
- &spdifout_b_clk_sel,
- &spdifout_b_clk_div,
- &spdifout_b_clk,
- &sm1_tdm_mclk_pad_0,
- &sm1_tdm_mclk_pad_1,
- &sm1_tdm_lrclk_pad_0,
- &sm1_tdm_lrclk_pad_1,
- &sm1_tdm_lrclk_pad_2,
- &sm1_tdm_sclk_pad_0,
- &sm1_tdm_sclk_pad_1,
- &sm1_tdm_sclk_pad_2,
- &sm1_aud_top,
- &toram,
- &eqdrc,
- &resample_b,
- &tovad,
- &locker,
- &spdifin_lb,
- &frddr_d,
- &toddr_d,
- &loopback_b,
- &sm1_clk81_en,
- &sm1_sysclk_a_div,
- &sm1_sysclk_a_en,
- &sm1_sysclk_b_div,
- &sm1_sysclk_b_en,
- &earcrx,
- &sm1_earcrx_cmdc_clk_sel,
- &sm1_earcrx_cmdc_clk_div,
- &sm1_earcrx_cmdc_clk,
- &sm1_earcrx_dmac_clk_sel,
- &sm1_earcrx_dmac_clk_div,
- &sm1_earcrx_dmac_clk,
-};
-
-struct axg_audio_reset_data {
- struct reset_controller_dev rstc;
- struct regmap *map;
- unsigned int offset;
-};
-
-static void axg_audio_reset_reg_and_bit(struct axg_audio_reset_data *rst,
- unsigned long id,
- unsigned int *reg,
- unsigned int *bit)
-{
- unsigned int stride = regmap_get_reg_stride(rst->map);
-
- *reg = (id / (stride * BITS_PER_BYTE)) * stride;
- *reg += rst->offset;
- *bit = id % (stride * BITS_PER_BYTE);
-}
-
-static int axg_audio_reset_update(struct reset_controller_dev *rcdev,
- unsigned long id, bool assert)
-{
- struct axg_audio_reset_data *rst =
- container_of(rcdev, struct axg_audio_reset_data, rstc);
- unsigned int offset, bit;
-
- axg_audio_reset_reg_and_bit(rst, id, &offset, &bit);
-
- regmap_update_bits(rst->map, offset, BIT(bit),
- assert ? BIT(bit) : 0);
-
- return 0;
-}
-
-static int axg_audio_reset_status(struct reset_controller_dev *rcdev,
- unsigned long id)
-{
- struct axg_audio_reset_data *rst =
- container_of(rcdev, struct axg_audio_reset_data, rstc);
- unsigned int val, offset, bit;
-
- axg_audio_reset_reg_and_bit(rst, id, &offset, &bit);
-
- regmap_read(rst->map, offset, &val);
-
- return !!(val & BIT(bit));
-}
-
-static int axg_audio_reset_assert(struct reset_controller_dev *rcdev,
- unsigned long id)
-{
- return axg_audio_reset_update(rcdev, id, true);
-}
-
-static int axg_audio_reset_deassert(struct reset_controller_dev *rcdev,
- unsigned long id)
-{
- return axg_audio_reset_update(rcdev, id, false);
-}
-
-static int axg_audio_reset_toggle(struct reset_controller_dev *rcdev,
- unsigned long id)
-{
- int ret;
-
- ret = axg_audio_reset_assert(rcdev, id);
- if (ret)
- return ret;
-
- return axg_audio_reset_deassert(rcdev, id);
-}
-
-static const struct reset_control_ops axg_audio_rstc_ops = {
- .assert = axg_audio_reset_assert,
- .deassert = axg_audio_reset_deassert,
- .reset = axg_audio_reset_toggle,
- .status = axg_audio_reset_status,
-};
-
static struct regmap_config axg_audio_regmap_cfg = {
.reg_bits = 32,
.val_bits = 32,
@@ -1763,11 +1318,8 @@ static struct regmap_config axg_audio_regmap_cfg = {
};
struct audioclk_data {
- struct clk_regmap *const *regmap_clks;
- unsigned int regmap_clk_num;
struct meson_clk_hw_data hw_clks;
- unsigned int reset_offset;
- unsigned int reset_num;
+ const char *rst_drvname;
unsigned int max_register;
};
@@ -1775,7 +1327,7 @@ static int axg_audio_clkc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct audioclk_data *data;
- struct axg_audio_reset_data *rst;
+ struct auxiliary_device *auxdev;
struct regmap *map;
void __iomem *regs;
struct clk_hw *hw;
@@ -1808,10 +1360,6 @@ static int axg_audio_clkc_probe(struct platform_device *pdev)
return ret;
}
- /* Populate regmap for the regmap backed clocks */
- for (i = 0; i < data->regmap_clk_num; i++)
- data->regmap_clks[i]->map = map;
-
/* Take care to skip the registered input clocks */
for (i = AUD_CLKID_DDR_ARB; i < data->hw_clks.num; i++) {
const char *name;
@@ -1834,27 +1382,18 @@ static int axg_audio_clkc_probe(struct platform_device *pdev)
if (ret)
return ret;
- /* Stop here if there is no reset */
- if (!data->reset_num)
- return 0;
-
- rst = devm_kzalloc(dev, sizeof(*rst), GFP_KERNEL);
- if (!rst)
- return -ENOMEM;
-
- rst->map = map;
- rst->offset = data->reset_offset;
- rst->rstc.nr_resets = data->reset_num;
- rst->rstc.ops = &axg_audio_rstc_ops;
- rst->rstc.of_node = dev->of_node;
- rst->rstc.owner = THIS_MODULE;
+ /* Register auxiliary reset driver when applicable */
+ if (data->rst_drvname) {
+ auxdev = __devm_auxiliary_device_create(dev, dev->driver->name,
+ data->rst_drvname, NULL, 0);
+ if (!auxdev)
+ return -ENODEV;
+ }
- return devm_reset_controller_register(dev, &rst->rstc);
+ return 0;
}
static const struct audioclk_data axg_audioclk_data = {
- .regmap_clks = axg_clk_regmaps,
- .regmap_clk_num = ARRAY_SIZE(axg_clk_regmaps),
.hw_clks = {
.hws = axg_audio_hw_clks,
.num = ARRAY_SIZE(axg_audio_hw_clks),
@@ -1863,26 +1402,20 @@ static const struct audioclk_data axg_audioclk_data = {
};
static const struct audioclk_data g12a_audioclk_data = {
- .regmap_clks = g12a_clk_regmaps,
- .regmap_clk_num = ARRAY_SIZE(g12a_clk_regmaps),
.hw_clks = {
.hws = g12a_audio_hw_clks,
.num = ARRAY_SIZE(g12a_audio_hw_clks),
},
- .reset_offset = AUDIO_SW_RESET,
- .reset_num = 26,
+ .rst_drvname = "rst-g12a",
.max_register = AUDIO_CLK_SPDIFOUT_B_CTRL,
};
static const struct audioclk_data sm1_audioclk_data = {
- .regmap_clks = sm1_clk_regmaps,
- .regmap_clk_num = ARRAY_SIZE(sm1_clk_regmaps),
.hw_clks = {
.hws = sm1_audio_hw_clks,
.num = ARRAY_SIZE(sm1_audio_hw_clks),
},
- .reset_offset = AUDIO_SM1_SW_RESET0,
- .reset_num = 39,
+ .rst_drvname = "rst-sm1",
.max_register = AUDIO_EARCRX_DMAC_CLK_CTRL,
};
@@ -1912,4 +1445,4 @@ module_platform_driver(axg_audio_driver);
MODULE_DESCRIPTION("Amlogic AXG/G12A/SM1 Audio Clock driver");
MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(CLK_MESON);
+MODULE_IMPORT_NS("CLK_MESON");
diff --git a/drivers/clk/meson/axg-audio.h b/drivers/clk/meson/axg-audio.h
deleted file mode 100644
index 9e7765b630c9..000000000000
--- a/drivers/clk/meson/axg-audio.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
-/*
- * Copyright (c) 2018 BayLibre, SAS.
- * Author: Jerome Brunet <jbrunet@baylibre.com>
- */
-
-#ifndef __AXG_AUDIO_CLKC_H
-#define __AXG_AUDIO_CLKC_H
-
-/*
- * Audio Clock register offsets
- *
- * Register offsets from the datasheet must be multiplied by 4 before
- * to get the right offset
- */
-#define AUDIO_CLK_GATE_EN 0x000
-#define AUDIO_MCLK_A_CTRL 0x004
-#define AUDIO_MCLK_B_CTRL 0x008
-#define AUDIO_MCLK_C_CTRL 0x00C
-#define AUDIO_MCLK_D_CTRL 0x010
-#define AUDIO_MCLK_E_CTRL 0x014
-#define AUDIO_MCLK_F_CTRL 0x018
-#define AUDIO_MST_PAD_CTRL0 0x01c
-#define AUDIO_MST_PAD_CTRL1 0x020
-#define AUDIO_SW_RESET 0x024
-#define AUDIO_MST_A_SCLK_CTRL0 0x040
-#define AUDIO_MST_A_SCLK_CTRL1 0x044
-#define AUDIO_MST_B_SCLK_CTRL0 0x048
-#define AUDIO_MST_B_SCLK_CTRL1 0x04C
-#define AUDIO_MST_C_SCLK_CTRL0 0x050
-#define AUDIO_MST_C_SCLK_CTRL1 0x054
-#define AUDIO_MST_D_SCLK_CTRL0 0x058
-#define AUDIO_MST_D_SCLK_CTRL1 0x05C
-#define AUDIO_MST_E_SCLK_CTRL0 0x060
-#define AUDIO_MST_E_SCLK_CTRL1 0x064
-#define AUDIO_MST_F_SCLK_CTRL0 0x068
-#define AUDIO_MST_F_SCLK_CTRL1 0x06C
-#define AUDIO_CLK_TDMIN_A_CTRL 0x080
-#define AUDIO_CLK_TDMIN_B_CTRL 0x084
-#define AUDIO_CLK_TDMIN_C_CTRL 0x088
-#define AUDIO_CLK_TDMIN_LB_CTRL 0x08C
-#define AUDIO_CLK_TDMOUT_A_CTRL 0x090
-#define AUDIO_CLK_TDMOUT_B_CTRL 0x094
-#define AUDIO_CLK_TDMOUT_C_CTRL 0x098
-#define AUDIO_CLK_SPDIFIN_CTRL 0x09C
-#define AUDIO_CLK_SPDIFOUT_CTRL 0x0A0
-#define AUDIO_CLK_RESAMPLE_CTRL 0x0A4
-#define AUDIO_CLK_LOCKER_CTRL 0x0A8
-#define AUDIO_CLK_PDMIN_CTRL0 0x0AC
-#define AUDIO_CLK_PDMIN_CTRL1 0x0B0
-#define AUDIO_CLK_SPDIFOUT_B_CTRL 0x0B4
-
-/* SM1 introduce new register and some shifts :( */
-#define AUDIO_CLK_GATE_EN1 0x004
-#define AUDIO_SM1_MCLK_A_CTRL 0x008
-#define AUDIO_SM1_MCLK_B_CTRL 0x00C
-#define AUDIO_SM1_MCLK_C_CTRL 0x010
-#define AUDIO_SM1_MCLK_D_CTRL 0x014
-#define AUDIO_SM1_MCLK_E_CTRL 0x018
-#define AUDIO_SM1_MCLK_F_CTRL 0x01C
-#define AUDIO_SM1_MST_PAD_CTRL0 0x020
-#define AUDIO_SM1_MST_PAD_CTRL1 0x024
-#define AUDIO_SM1_SW_RESET0 0x028
-#define AUDIO_SM1_SW_RESET1 0x02C
-#define AUDIO_CLK81_CTRL 0x030
-#define AUDIO_CLK81_EN 0x034
-#define AUDIO_EARCRX_CMDC_CLK_CTRL 0x0D0
-#define AUDIO_EARCRX_DMAC_CLK_CTRL 0x0D4
-
-#endif /*__AXG_AUDIO_CLKC_H */
diff --git a/drivers/clk/meson/axg.c b/drivers/clk/meson/axg.c
index 757c7a28c53d..0a25c649ef1d 100644
--- a/drivers/clk/meson/axg.c
+++ b/drivers/clk/meson/axg.c
@@ -18,12 +18,95 @@
#include "clk-regmap.h"
#include "clk-pll.h"
#include "clk-mpll.h"
-#include "axg.h"
-#include "meson-eeclk.h"
+#include "meson-clkc-utils.h"
#include <dt-bindings/clock/axg-clkc.h>
-static DEFINE_SPINLOCK(meson_clk_lock);
+#define HHI_GP0_PLL_CNTL 0x40
+#define HHI_GP0_PLL_CNTL2 0x44
+#define HHI_GP0_PLL_CNTL3 0x48
+#define HHI_GP0_PLL_CNTL4 0x4c
+#define HHI_GP0_PLL_CNTL5 0x50
+#define HHI_GP0_PLL_STS 0x54
+#define HHI_GP0_PLL_CNTL1 0x58
+#define HHI_HIFI_PLL_CNTL 0x80
+#define HHI_HIFI_PLL_CNTL2 0x84
+#define HHI_HIFI_PLL_CNTL3 0x88
+#define HHI_HIFI_PLL_CNTL4 0x8C
+#define HHI_HIFI_PLL_CNTL5 0x90
+#define HHI_HIFI_PLL_STS 0x94
+#define HHI_HIFI_PLL_CNTL1 0x98
+
+#define HHI_XTAL_DIVN_CNTL 0xbc
+#define HHI_GCLK2_MPEG0 0xc0
+#define HHI_GCLK2_MPEG1 0xc4
+#define HHI_GCLK2_MPEG2 0xc8
+#define HHI_GCLK2_OTHER 0xd0
+#define HHI_GCLK2_AO 0xd4
+#define HHI_PCIE_PLL_CNTL 0xd8
+#define HHI_PCIE_PLL_CNTL1 0xdC
+#define HHI_PCIE_PLL_CNTL2 0xe0
+#define HHI_PCIE_PLL_CNTL3 0xe4
+#define HHI_PCIE_PLL_CNTL4 0xe8
+#define HHI_PCIE_PLL_CNTL5 0xec
+#define HHI_PCIE_PLL_CNTL6 0xf0
+#define HHI_PCIE_PLL_STS 0xf4
+
+#define HHI_MEM_PD_REG0 0x100
+#define HHI_VPU_MEM_PD_REG0 0x104
+#define HHI_VIID_CLK_DIV 0x128
+#define HHI_VIID_CLK_CNTL 0x12c
+
+#define HHI_GCLK_MPEG0 0x140
+#define HHI_GCLK_MPEG1 0x144
+#define HHI_GCLK_MPEG2 0x148
+#define HHI_GCLK_OTHER 0x150
+#define HHI_GCLK_AO 0x154
+#define HHI_SYS_CPU_CLK_CNTL1 0x15c
+#define HHI_SYS_CPU_RESET_CNTL 0x160
+#define HHI_VID_CLK_DIV 0x164
+#define HHI_SPICC_HCLK_CNTL 0x168
+
+#define HHI_MPEG_CLK_CNTL 0x174
+#define HHI_VID_CLK_CNTL 0x17c
+#define HHI_TS_CLK_CNTL 0x190
+#define HHI_VID_CLK_CNTL2 0x194
+#define HHI_SYS_CPU_CLK_CNTL0 0x19c
+#define HHI_VID_PLL_CLK_DIV 0x1a0
+#define HHI_VPU_CLK_CNTL 0x1bC
+
+#define HHI_VAPBCLK_CNTL 0x1F4
+
+#define HHI_GEN_CLK_CNTL 0x228
+
+#define HHI_VDIN_MEAS_CLK_CNTL 0x250
+#define HHI_NAND_CLK_CNTL 0x25C
+#define HHI_SD_EMMC_CLK_CNTL 0x264
+
+#define HHI_MPLL_CNTL 0x280
+#define HHI_MPLL_CNTL2 0x284
+#define HHI_MPLL_CNTL3 0x288
+#define HHI_MPLL_CNTL4 0x28C
+#define HHI_MPLL_CNTL5 0x290
+#define HHI_MPLL_CNTL6 0x294
+#define HHI_MPLL_CNTL7 0x298
+#define HHI_MPLL_CNTL8 0x29C
+#define HHI_MPLL_CNTL9 0x2A0
+#define HHI_MPLL_CNTL10 0x2A4
+
+#define HHI_MPLL3_CNTL0 0x2E0
+#define HHI_MPLL3_CNTL1 0x2E4
+#define HHI_PLL_TOP_MISC 0x2E8
+
+#define HHI_SYS_PLL_CNTL1 0x2FC
+#define HHI_SYS_PLL_CNTL 0x300
+#define HHI_SYS_PLL_CNTL2 0x304
+#define HHI_SYS_PLL_CNTL3 0x308
+#define HHI_SYS_PLL_CNTL4 0x30c
+#define HHI_SYS_PLL_CNTL5 0x310
+#define HHI_SYS_PLL_STS 0x314
+#define HHI_DPLL_TOP_I 0x318
+#define HHI_DPLL_TOP2_I 0x31C
static struct clk_regmap axg_fixed_pll_dco = {
.data = &(struct meson_clk_pll_data){
@@ -250,7 +333,7 @@ static struct clk_regmap axg_gp0_pll = {
},
};
-static const struct reg_sequence axg_hifi_init_regs[] = {
+static const struct reg_sequence axg_hifi_pll_init_regs[] = {
{ .reg = HHI_HIFI_PLL_CNTL1, .def = 0xc084b000 },
{ .reg = HHI_HIFI_PLL_CNTL2, .def = 0xb75020be },
{ .reg = HHI_HIFI_PLL_CNTL3, .def = 0x0a6a3a88 },
@@ -291,8 +374,8 @@ static struct clk_regmap axg_hifi_pll_dco = {
.width = 1,
},
.table = axg_gp0_pll_params_table,
- .init_regs = axg_hifi_init_regs,
- .init_count = ARRAY_SIZE(axg_hifi_init_regs),
+ .init_regs = axg_hifi_pll_init_regs,
+ .init_count = ARRAY_SIZE(axg_hifi_pll_init_regs),
.flags = CLK_MESON_PLL_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
@@ -506,7 +589,6 @@ static struct clk_regmap axg_mpll0_div = {
.shift = 0,
.width = 1,
},
- .lock = &meson_clk_lock,
.flags = CLK_MESON_MPLL_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
@@ -557,7 +639,6 @@ static struct clk_regmap axg_mpll1_div = {
.shift = 1,
.width = 1,
},
- .lock = &meson_clk_lock,
.flags = CLK_MESON_MPLL_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
@@ -613,7 +694,6 @@ static struct clk_regmap axg_mpll2_div = {
.shift = 2,
.width = 1,
},
- .lock = &meson_clk_lock,
.flags = CLK_MESON_MPLL_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
@@ -664,7 +744,6 @@ static struct clk_regmap axg_mpll3_div = {
.shift = 3,
.width = 1,
},
- .lock = &meson_clk_lock,
.flags = CLK_MESON_MPLL_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
@@ -701,7 +780,7 @@ static const struct pll_params_table axg_pcie_pll_params_table[] = {
{ /* sentinel */ },
};
-static const struct reg_sequence axg_pcie_init_regs[] = {
+static const struct reg_sequence axg_pcie_pll_init_regs[] = {
{ .reg = HHI_PCIE_PLL_CNTL1, .def = 0x0084a2aa },
{ .reg = HHI_PCIE_PLL_CNTL2, .def = 0xb75020be },
{ .reg = HHI_PCIE_PLL_CNTL3, .def = 0x0a47488e },
@@ -744,8 +823,8 @@ static struct clk_regmap axg_pcie_pll_dco = {
.width = 1,
},
.table = axg_pcie_pll_params_table,
- .init_regs = axg_pcie_init_regs,
- .init_count = ARRAY_SIZE(axg_pcie_init_regs),
+ .init_regs = axg_pcie_pll_init_regs,
+ .init_count = ARRAY_SIZE(axg_pcie_pll_init_regs),
},
.hw.init = &(struct clk_init_data){
.name = "pcie_pll_dco",
@@ -856,8 +935,9 @@ static struct clk_regmap axg_pcie_cml_en1 = {
},
};
-static u32 mux_table_clk81[] = { 0, 2, 3, 4, 5, 6, 7 };
-static const struct clk_parent_data clk81_parent_data[] = {
+/* clk81 is often referred as "mpeg_clk" */
+static u32 clk81_parents_val_table[] = { 0, 2, 3, 4, 5, 6, 7 };
+static const struct clk_parent_data clk81_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &axg_fclk_div7.hw },
{ .hw = &axg_mpll1.hw },
@@ -867,32 +947,32 @@ static const struct clk_parent_data clk81_parent_data[] = {
{ .hw = &axg_fclk_div5.hw },
};
-static struct clk_regmap axg_mpeg_clk_sel = {
+static struct clk_regmap axg_clk81_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_MPEG_CLK_CNTL,
.mask = 0x7,
.shift = 12,
- .table = mux_table_clk81,
+ .table = clk81_parents_val_table,
},
.hw.init = &(struct clk_init_data){
- .name = "mpeg_clk_sel",
+ .name = "clk81_sel",
.ops = &clk_regmap_mux_ro_ops,
- .parent_data = clk81_parent_data,
- .num_parents = ARRAY_SIZE(clk81_parent_data),
+ .parent_data = clk81_parents,
+ .num_parents = ARRAY_SIZE(clk81_parents),
},
};
-static struct clk_regmap axg_mpeg_clk_div = {
+static struct clk_regmap axg_clk81_div = {
.data = &(struct clk_regmap_div_data){
.offset = HHI_MPEG_CLK_CNTL,
.shift = 0,
.width = 7,
},
.hw.init = &(struct clk_init_data){
- .name = "mpeg_clk_div",
+ .name = "clk81_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &axg_mpeg_clk_sel.hw
+ &axg_clk81_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -908,14 +988,14 @@ static struct clk_regmap axg_clk81 = {
.name = "clk81",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &axg_mpeg_clk_div.hw
+ &axg_clk81_div.hw
},
.num_parents = 1,
.flags = (CLK_SET_RATE_PARENT | CLK_IS_CRITICAL),
},
};
-static const struct clk_parent_data axg_sd_emmc_clk0_parent_data[] = {
+static const struct clk_parent_data axg_sd_emmc_clk0_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &axg_fclk_div2.hw },
{ .hw = &axg_fclk_div3.hw },
@@ -924,7 +1004,7 @@ static const struct clk_parent_data axg_sd_emmc_clk0_parent_data[] = {
/*
* Following these parent clocks, we should also have had mpll2, mpll3
* and gp0_pll but these clocks are too precious to be used here. All
- * the necessary rates for MMC and NAND operation can be acheived using
+ * the necessary rates for MMC and NAND operation can be achieved using
* xtal or fclk_div clocks
*/
};
@@ -939,8 +1019,8 @@ static struct clk_regmap axg_sd_emmc_b_clk0_sel = {
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_b_clk0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = axg_sd_emmc_clk0_parent_data,
- .num_parents = ARRAY_SIZE(axg_sd_emmc_clk0_parent_data),
+ .parent_data = axg_sd_emmc_clk0_parents,
+ .num_parents = ARRAY_SIZE(axg_sd_emmc_clk0_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -989,8 +1069,8 @@ static struct clk_regmap axg_sd_emmc_c_clk0_sel = {
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_c_clk0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = axg_sd_emmc_clk0_parent_data,
- .num_parents = ARRAY_SIZE(axg_sd_emmc_clk0_parent_data),
+ .parent_data = axg_sd_emmc_clk0_parents,
+ .num_parents = ARRAY_SIZE(axg_sd_emmc_clk0_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1031,7 +1111,7 @@ static struct clk_regmap axg_sd_emmc_c_clk0 = {
/* VPU Clock */
-static const struct clk_hw *axg_vpu_parent_hws[] = {
+static const struct clk_hw *axg_vpu_parents[] = {
&axg_fclk_div4.hw,
&axg_fclk_div3.hw,
&axg_fclk_div5.hw,
@@ -1047,8 +1127,8 @@ static struct clk_regmap axg_vpu_0_sel = {
.hw.init = &(struct clk_init_data){
.name = "vpu_0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = axg_vpu_parent_hws,
- .num_parents = ARRAY_SIZE(axg_vpu_parent_hws),
+ .parent_hws = axg_vpu_parents,
+ .num_parents = ARRAY_SIZE(axg_vpu_parents),
/* We need a specific parent for VPU clock source, let it be set in DT */
.flags = CLK_SET_RATE_NO_REPARENT,
},
@@ -1096,8 +1176,8 @@ static struct clk_regmap axg_vpu_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "vpu_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = axg_vpu_parent_hws,
- .num_parents = ARRAY_SIZE(axg_vpu_parent_hws),
+ .parent_hws = axg_vpu_parents,
+ .num_parents = ARRAY_SIZE(axg_vpu_parents),
/* We need a specific parent for VPU clock source, let it be set in DT */
.flags = CLK_SET_RATE_NO_REPARENT,
},
@@ -1165,8 +1245,8 @@ static struct clk_regmap axg_vapb_0_sel = {
.hw.init = &(struct clk_init_data){
.name = "vapb_0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = axg_vpu_parent_hws,
- .num_parents = ARRAY_SIZE(axg_vpu_parent_hws),
+ .parent_hws = axg_vpu_parents,
+ .num_parents = ARRAY_SIZE(axg_vpu_parents),
.flags = CLK_SET_RATE_NO_REPARENT,
},
};
@@ -1213,8 +1293,8 @@ static struct clk_regmap axg_vapb_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "vapb_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = axg_vpu_parent_hws,
- .num_parents = ARRAY_SIZE(axg_vpu_parent_hws),
+ .parent_hws = axg_vpu_parents,
+ .num_parents = ARRAY_SIZE(axg_vpu_parents),
.flags = CLK_SET_RATE_NO_REPARENT,
},
};
@@ -1286,7 +1366,7 @@ static struct clk_regmap axg_vapb = {
/* Video Clocks */
-static const struct clk_hw *axg_vclk_parent_hws[] = {
+static const struct clk_hw *axg_vclk_parents[] = {
&axg_gp0_pll.hw,
&axg_fclk_div4.hw,
&axg_fclk_div3.hw,
@@ -1305,8 +1385,8 @@ static struct clk_regmap axg_vclk_sel = {
.hw.init = &(struct clk_init_data){
.name = "vclk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = axg_vclk_parent_hws,
- .num_parents = ARRAY_SIZE(axg_vclk_parent_hws),
+ .parent_hws = axg_vclk_parents,
+ .num_parents = ARRAY_SIZE(axg_vclk_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -1320,8 +1400,8 @@ static struct clk_regmap axg_vclk2_sel = {
.hw.init = &(struct clk_init_data){
.name = "vclk2_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = axg_vclk_parent_hws,
- .num_parents = ARRAY_SIZE(axg_vclk_parent_hws),
+ .parent_hws = axg_vclk_parents,
+ .num_parents = ARRAY_SIZE(axg_vclk_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -1660,8 +1740,8 @@ static struct clk_fixed_factor axg_vclk2_div12 = {
},
};
-static u32 mux_table_cts_sel[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
-static const struct clk_hw *axg_cts_parent_hws[] = {
+static u32 axg_cts_encl_parents_val_table[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
+static const struct clk_hw *axg_cts_encl_parents[] = {
&axg_vclk_div1.hw,
&axg_vclk_div2.hw,
&axg_vclk_div4.hw,
@@ -1679,13 +1759,13 @@ static struct clk_regmap axg_cts_encl_sel = {
.offset = HHI_VIID_CLK_DIV,
.mask = 0xf,
.shift = 12,
- .table = mux_table_cts_sel,
+ .table = axg_cts_encl_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "cts_encl_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = axg_cts_parent_hws,
- .num_parents = ARRAY_SIZE(axg_cts_parent_hws),
+ .parent_hws = axg_cts_encl_parents,
+ .num_parents = ARRAY_SIZE(axg_cts_encl_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -1708,8 +1788,8 @@ static struct clk_regmap axg_cts_encl = {
/* MIPI DSI Host Clock */
-static u32 mux_table_axg_vdin_meas[] = { 0, 1, 2, 3, 6, 7 };
-static const struct clk_parent_data axg_vdin_meas_parent_data[] = {
+static u32 axg_vdin_meas_parents_val_table[] = { 0, 1, 2, 3, 6, 7 };
+static const struct clk_parent_data axg_vdin_meas_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &axg_fclk_div4.hw },
{ .hw = &axg_fclk_div3.hw },
@@ -1724,13 +1804,13 @@ static struct clk_regmap axg_vdin_meas_sel = {
.mask = 0x7,
.shift = 21,
.flags = CLK_MUX_ROUND_CLOSEST,
- .table = mux_table_axg_vdin_meas,
+ .table = axg_vdin_meas_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "vdin_meas_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = axg_vdin_meas_parent_data,
- .num_parents = ARRAY_SIZE(axg_vdin_meas_parent_data),
+ .parent_data = axg_vdin_meas_parents,
+ .num_parents = ARRAY_SIZE(axg_vdin_meas_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1766,9 +1846,8 @@ static struct clk_regmap axg_vdin_meas = {
},
};
-static u32 mux_table_gen_clk[] = { 0, 4, 5, 6, 7, 8,
- 9, 10, 11, 13, 14, };
-static const struct clk_parent_data gen_clk_parent_data[] = {
+static u32 gen_clk_parents_val_table[] = { 0, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, };
+static const struct clk_parent_data gen_clk_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &axg_hifi_pll.hw },
{ .hw = &axg_mpll0.hw },
@@ -1787,7 +1866,7 @@ static struct clk_regmap axg_gen_clk_sel = {
.offset = HHI_GEN_CLK_CNTL,
.mask = 0xf,
.shift = 12,
- .table = mux_table_gen_clk,
+ .table = gen_clk_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "gen_clk_sel",
@@ -1798,8 +1877,8 @@ static struct clk_regmap axg_gen_clk_sel = {
* hifi_pll, mpll0, mpll1, mpll2, mpll3, fdiv4,
* fdiv3, fdiv5, [cts_msr_clk], fdiv7, gp0_pll
*/
- .parent_data = gen_clk_parent_data,
- .num_parents = ARRAY_SIZE(gen_clk_parent_data),
+ .parent_data = gen_clk_parents,
+ .num_parents = ARRAY_SIZE(gen_clk_parents),
},
};
@@ -1836,59 +1915,71 @@ static struct clk_regmap axg_gen_clk = {
},
};
-#define MESON_GATE(_name, _reg, _bit) \
- MESON_PCLK(_name, _reg, _bit, &axg_clk81.hw)
-
-/* Everything Else (EE) domain gates */
-static MESON_GATE(axg_ddr, HHI_GCLK_MPEG0, 0);
-static MESON_GATE(axg_audio_locker, HHI_GCLK_MPEG0, 2);
-static MESON_GATE(axg_mipi_dsi_host, HHI_GCLK_MPEG0, 3);
-static MESON_GATE(axg_isa, HHI_GCLK_MPEG0, 5);
-static MESON_GATE(axg_pl301, HHI_GCLK_MPEG0, 6);
-static MESON_GATE(axg_periphs, HHI_GCLK_MPEG0, 7);
-static MESON_GATE(axg_spicc_0, HHI_GCLK_MPEG0, 8);
-static MESON_GATE(axg_i2c, HHI_GCLK_MPEG0, 9);
-static MESON_GATE(axg_rng0, HHI_GCLK_MPEG0, 12);
-static MESON_GATE(axg_uart0, HHI_GCLK_MPEG0, 13);
-static MESON_GATE(axg_mipi_dsi_phy, HHI_GCLK_MPEG0, 14);
-static MESON_GATE(axg_spicc_1, HHI_GCLK_MPEG0, 15);
-static MESON_GATE(axg_pcie_a, HHI_GCLK_MPEG0, 16);
-static MESON_GATE(axg_pcie_b, HHI_GCLK_MPEG0, 17);
-static MESON_GATE(axg_hiu_reg, HHI_GCLK_MPEG0, 19);
-static MESON_GATE(axg_assist_misc, HHI_GCLK_MPEG0, 23);
-static MESON_GATE(axg_emmc_b, HHI_GCLK_MPEG0, 25);
-static MESON_GATE(axg_emmc_c, HHI_GCLK_MPEG0, 26);
-static MESON_GATE(axg_dma, HHI_GCLK_MPEG0, 27);
-static MESON_GATE(axg_spi, HHI_GCLK_MPEG0, 30);
-
-static MESON_GATE(axg_audio, HHI_GCLK_MPEG1, 0);
-static MESON_GATE(axg_eth_core, HHI_GCLK_MPEG1, 3);
-static MESON_GATE(axg_uart1, HHI_GCLK_MPEG1, 16);
-static MESON_GATE(axg_g2d, HHI_GCLK_MPEG1, 20);
-static MESON_GATE(axg_usb0, HHI_GCLK_MPEG1, 21);
-static MESON_GATE(axg_usb1, HHI_GCLK_MPEG1, 22);
-static MESON_GATE(axg_reset, HHI_GCLK_MPEG1, 23);
-static MESON_GATE(axg_usb_general, HHI_GCLK_MPEG1, 26);
-static MESON_GATE(axg_ahb_arb0, HHI_GCLK_MPEG1, 29);
-static MESON_GATE(axg_efuse, HHI_GCLK_MPEG1, 30);
-static MESON_GATE(axg_boot_rom, HHI_GCLK_MPEG1, 31);
-
-static MESON_GATE(axg_ahb_data_bus, HHI_GCLK_MPEG2, 1);
-static MESON_GATE(axg_ahb_ctrl_bus, HHI_GCLK_MPEG2, 2);
-static MESON_GATE(axg_usb1_to_ddr, HHI_GCLK_MPEG2, 8);
-static MESON_GATE(axg_usb0_to_ddr, HHI_GCLK_MPEG2, 9);
-static MESON_GATE(axg_mmc_pclk, HHI_GCLK_MPEG2, 11);
-static MESON_GATE(axg_vpu_intr, HHI_GCLK_MPEG2, 25);
-static MESON_GATE(axg_sec_ahb_ahb3_bridge, HHI_GCLK_MPEG2, 26);
-static MESON_GATE(axg_gic, HHI_GCLK_MPEG2, 30);
+static const struct clk_parent_data axg_pclk_parents = { .hw = &axg_clk81.hw };
+
+#define AXG_PCLK(_name, _reg, _bit, _flags) \
+ MESON_PCLK(axg_##_name, _reg, _bit, &axg_pclk_parents, _flags)
+
+/*
+ * Everything Else (EE) domain gates
+ *
+ * NOTE: The gates below are marked with CLK_IGNORE_UNUSED for historic reasons
+ * Users are encouraged to test without it and submit changes to:
+ * - remove the flag if not necessary
+ * - replace the flag with something more adequate, such as CLK_IS_CRITICAL,
+ * if appropriate.
+ * - add a comment explaining why the use of CLK_IGNORE_UNUSED is desirable
+ * for a particular clock.
+ */
+static AXG_PCLK(ddr, HHI_GCLK_MPEG0, 0, CLK_IGNORE_UNUSED);
+static AXG_PCLK(audio_locker, HHI_GCLK_MPEG0, 2, CLK_IGNORE_UNUSED);
+static AXG_PCLK(mipi_dsi_host, HHI_GCLK_MPEG0, 3, CLK_IGNORE_UNUSED);
+static AXG_PCLK(isa, HHI_GCLK_MPEG0, 5, CLK_IGNORE_UNUSED);
+static AXG_PCLK(pl301, HHI_GCLK_MPEG0, 6, CLK_IGNORE_UNUSED);
+static AXG_PCLK(periphs, HHI_GCLK_MPEG0, 7, CLK_IGNORE_UNUSED);
+static AXG_PCLK(spicc_0, HHI_GCLK_MPEG0, 8, CLK_IGNORE_UNUSED);
+static AXG_PCLK(i2c, HHI_GCLK_MPEG0, 9, CLK_IGNORE_UNUSED);
+static AXG_PCLK(rng0, HHI_GCLK_MPEG0, 12, CLK_IGNORE_UNUSED);
+static AXG_PCLK(uart0, HHI_GCLK_MPEG0, 13, CLK_IGNORE_UNUSED);
+static AXG_PCLK(mipi_dsi_phy, HHI_GCLK_MPEG0, 14, CLK_IGNORE_UNUSED);
+static AXG_PCLK(spicc_1, HHI_GCLK_MPEG0, 15, CLK_IGNORE_UNUSED);
+static AXG_PCLK(pcie_a, HHI_GCLK_MPEG0, 16, CLK_IGNORE_UNUSED);
+static AXG_PCLK(pcie_b, HHI_GCLK_MPEG0, 17, CLK_IGNORE_UNUSED);
+static AXG_PCLK(hiu_reg, HHI_GCLK_MPEG0, 19, CLK_IGNORE_UNUSED);
+static AXG_PCLK(assist_misc, HHI_GCLK_MPEG0, 23, CLK_IGNORE_UNUSED);
+static AXG_PCLK(emmc_b, HHI_GCLK_MPEG0, 25, CLK_IGNORE_UNUSED);
+static AXG_PCLK(emmc_c, HHI_GCLK_MPEG0, 26, CLK_IGNORE_UNUSED);
+static AXG_PCLK(dma, HHI_GCLK_MPEG0, 27, CLK_IGNORE_UNUSED);
+static AXG_PCLK(spi, HHI_GCLK_MPEG0, 30, CLK_IGNORE_UNUSED);
+
+static AXG_PCLK(audio, HHI_GCLK_MPEG1, 0, CLK_IGNORE_UNUSED);
+static AXG_PCLK(eth_core, HHI_GCLK_MPEG1, 3, CLK_IGNORE_UNUSED);
+static AXG_PCLK(uart1, HHI_GCLK_MPEG1, 16, CLK_IGNORE_UNUSED);
+static AXG_PCLK(g2d, HHI_GCLK_MPEG1, 20, CLK_IGNORE_UNUSED);
+static AXG_PCLK(usb0, HHI_GCLK_MPEG1, 21, CLK_IGNORE_UNUSED);
+static AXG_PCLK(usb1, HHI_GCLK_MPEG1, 22, CLK_IGNORE_UNUSED);
+static AXG_PCLK(reset, HHI_GCLK_MPEG1, 23, CLK_IGNORE_UNUSED);
+static AXG_PCLK(usb_general, HHI_GCLK_MPEG1, 26, CLK_IGNORE_UNUSED);
+static AXG_PCLK(ahb_arb0, HHI_GCLK_MPEG1, 29, CLK_IGNORE_UNUSED);
+static AXG_PCLK(efuse, HHI_GCLK_MPEG1, 30, CLK_IGNORE_UNUSED);
+static AXG_PCLK(boot_rom, HHI_GCLK_MPEG1, 31, CLK_IGNORE_UNUSED);
+
+static AXG_PCLK(ahb_data_bus, HHI_GCLK_MPEG2, 1, CLK_IGNORE_UNUSED);
+static AXG_PCLK(ahb_ctrl_bus, HHI_GCLK_MPEG2, 2, CLK_IGNORE_UNUSED);
+static AXG_PCLK(usb1_to_ddr, HHI_GCLK_MPEG2, 8, CLK_IGNORE_UNUSED);
+static AXG_PCLK(usb0_to_ddr, HHI_GCLK_MPEG2, 9, CLK_IGNORE_UNUSED);
+static AXG_PCLK(mmc_pclk, HHI_GCLK_MPEG2, 11, CLK_IGNORE_UNUSED);
+static AXG_PCLK(vpu_intr, HHI_GCLK_MPEG2, 25, CLK_IGNORE_UNUSED);
+static AXG_PCLK(sec_ahb_ahb3_bridge, HHI_GCLK_MPEG2, 26, CLK_IGNORE_UNUSED);
+static AXG_PCLK(gic, HHI_GCLK_MPEG2, 30, CLK_IGNORE_UNUSED);
/* Always On (AO) domain gates */
-static MESON_GATE(axg_ao_media_cpu, HHI_GCLK_AO, 0);
-static MESON_GATE(axg_ao_ahb_sram, HHI_GCLK_AO, 1);
-static MESON_GATE(axg_ao_ahb_bus, HHI_GCLK_AO, 2);
-static MESON_GATE(axg_ao_iface, HHI_GCLK_AO, 3);
-static MESON_GATE(axg_ao_i2c, HHI_GCLK_AO, 4);
+static AXG_PCLK(ao_media_cpu, HHI_GCLK_AO, 0, CLK_IGNORE_UNUSED);
+static AXG_PCLK(ao_ahb_sram, HHI_GCLK_AO, 1, CLK_IGNORE_UNUSED);
+static AXG_PCLK(ao_ahb_bus, HHI_GCLK_AO, 2, CLK_IGNORE_UNUSED);
+static AXG_PCLK(ao_iface, HHI_GCLK_AO, 3, CLK_IGNORE_UNUSED);
+static AXG_PCLK(ao_i2c, HHI_GCLK_AO, 4, CLK_IGNORE_UNUSED);
/* Array of all clocks provided by this provider */
@@ -1901,8 +1992,8 @@ static struct clk_hw *axg_hw_clks[] = {
[CLKID_FCLK_DIV5] = &axg_fclk_div5.hw,
[CLKID_FCLK_DIV7] = &axg_fclk_div7.hw,
[CLKID_GP0_PLL] = &axg_gp0_pll.hw,
- [CLKID_MPEG_SEL] = &axg_mpeg_clk_sel.hw,
- [CLKID_MPEG_DIV] = &axg_mpeg_clk_div.hw,
+ [CLKID_MPEG_SEL] = &axg_clk81_sel.hw,
+ [CLKID_MPEG_DIV] = &axg_clk81_div.hw,
[CLKID_CLK81] = &axg_clk81.hw,
[CLKID_MPLL0] = &axg_mpll0.hw,
[CLKID_MPLL1] = &axg_mpll1.hw,
@@ -2031,160 +2122,28 @@ static struct clk_hw *axg_hw_clks[] = {
[CLKID_VDIN_MEAS] = &axg_vdin_meas.hw,
};
-/* Convenience table to populate regmap in .probe */
-static struct clk_regmap *const axg_clk_regmaps[] = {
- &axg_clk81,
- &axg_ddr,
- &axg_audio_locker,
- &axg_mipi_dsi_host,
- &axg_isa,
- &axg_pl301,
- &axg_periphs,
- &axg_spicc_0,
- &axg_i2c,
- &axg_rng0,
- &axg_uart0,
- &axg_mipi_dsi_phy,
- &axg_spicc_1,
- &axg_pcie_a,
- &axg_pcie_b,
- &axg_hiu_reg,
- &axg_assist_misc,
- &axg_emmc_b,
- &axg_emmc_c,
- &axg_dma,
- &axg_spi,
- &axg_audio,
- &axg_eth_core,
- &axg_uart1,
- &axg_g2d,
- &axg_usb0,
- &axg_usb1,
- &axg_reset,
- &axg_usb_general,
- &axg_ahb_arb0,
- &axg_efuse,
- &axg_boot_rom,
- &axg_ahb_data_bus,
- &axg_ahb_ctrl_bus,
- &axg_usb1_to_ddr,
- &axg_usb0_to_ddr,
- &axg_mmc_pclk,
- &axg_vpu_intr,
- &axg_sec_ahb_ahb3_bridge,
- &axg_gic,
- &axg_ao_media_cpu,
- &axg_ao_ahb_sram,
- &axg_ao_ahb_bus,
- &axg_ao_iface,
- &axg_ao_i2c,
- &axg_sd_emmc_b_clk0,
- &axg_sd_emmc_c_clk0,
- &axg_mpeg_clk_div,
- &axg_sd_emmc_b_clk0_div,
- &axg_sd_emmc_c_clk0_div,
- &axg_mpeg_clk_sel,
- &axg_sd_emmc_b_clk0_sel,
- &axg_sd_emmc_c_clk0_sel,
- &axg_mpll0,
- &axg_mpll1,
- &axg_mpll2,
- &axg_mpll3,
- &axg_mpll0_div,
- &axg_mpll1_div,
- &axg_mpll2_div,
- &axg_mpll3_div,
- &axg_fixed_pll,
- &axg_sys_pll,
- &axg_gp0_pll,
- &axg_hifi_pll,
- &axg_mpll_prediv,
- &axg_fclk_div2,
- &axg_fclk_div3,
- &axg_fclk_div4,
- &axg_fclk_div5,
- &axg_fclk_div7,
- &axg_pcie_pll_dco,
- &axg_pcie_pll_od,
- &axg_pcie_pll,
- &axg_pcie_mux,
- &axg_pcie_ref,
- &axg_pcie_cml_en0,
- &axg_pcie_cml_en1,
- &axg_gen_clk_sel,
- &axg_gen_clk_div,
- &axg_gen_clk,
- &axg_fixed_pll_dco,
- &axg_sys_pll_dco,
- &axg_gp0_pll_dco,
- &axg_hifi_pll_dco,
- &axg_pcie_pll_dco,
- &axg_pcie_pll_od,
- &axg_vpu_0_div,
- &axg_vpu_0_sel,
- &axg_vpu_0,
- &axg_vpu_1_div,
- &axg_vpu_1_sel,
- &axg_vpu_1,
- &axg_vpu,
- &axg_vapb_0_div,
- &axg_vapb_0_sel,
- &axg_vapb_0,
- &axg_vapb_1_div,
- &axg_vapb_1_sel,
- &axg_vapb_1,
- &axg_vapb_sel,
- &axg_vapb,
- &axg_vclk,
- &axg_vclk2,
- &axg_vclk_sel,
- &axg_vclk2_sel,
- &axg_vclk_input,
- &axg_vclk2_input,
- &axg_vclk_div,
- &axg_vclk_div1,
- &axg_vclk2_div,
- &axg_vclk2_div1,
- &axg_vclk_div2_en,
- &axg_vclk_div4_en,
- &axg_vclk_div6_en,
- &axg_vclk_div12_en,
- &axg_vclk2_div2_en,
- &axg_vclk2_div4_en,
- &axg_vclk2_div6_en,
- &axg_vclk2_div12_en,
- &axg_cts_encl_sel,
- &axg_cts_encl,
- &axg_vdin_meas_sel,
- &axg_vdin_meas_div,
- &axg_vdin_meas,
-};
-
-static const struct meson_eeclkc_data axg_clkc_data = {
- .regmap_clks = axg_clk_regmaps,
- .regmap_clk_num = ARRAY_SIZE(axg_clk_regmaps),
+static const struct meson_clkc_data axg_clkc_data = {
.hw_clks = {
.hws = axg_hw_clks,
.num = ARRAY_SIZE(axg_hw_clks),
},
};
-
-static const struct of_device_id clkc_match_table[] = {
+static const struct of_device_id axg_clkc_match_table[] = {
{ .compatible = "amlogic,axg-clkc", .data = &axg_clkc_data },
{}
};
-MODULE_DEVICE_TABLE(of, clkc_match_table);
+MODULE_DEVICE_TABLE(of, axg_clkc_match_table);
-static struct platform_driver axg_driver = {
- .probe = meson_eeclkc_probe,
+static struct platform_driver axg_clkc_driver = {
+ .probe = meson_clkc_syscon_probe,
.driver = {
.name = "axg-clkc",
- .of_match_table = clkc_match_table,
+ .of_match_table = axg_clkc_match_table,
},
};
-module_platform_driver(axg_driver);
+module_platform_driver(axg_clkc_driver);
MODULE_DESCRIPTION("Amlogic AXG Main Clock Controller driver");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(CLK_MESON);
+MODULE_IMPORT_NS("CLK_MESON");
diff --git a/drivers/clk/meson/axg.h b/drivers/clk/meson/axg.h
deleted file mode 100644
index 624d8d3ce7c4..000000000000
--- a/drivers/clk/meson/axg.h
+++ /dev/null
@@ -1,105 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
-/*
- * Copyright (c) 2016 AmLogic, Inc.
- * Author: Michael Turquette <mturquette@baylibre.com>
- *
- * Copyright (c) 2017 Amlogic, inc.
- * Author: Qiufang Dai <qiufang.dai@amlogic.com>
- *
- */
-#ifndef __AXG_H
-#define __AXG_H
-
-/*
- * Clock controller register offsets
- *
- * Register offsets from the data sheet must be multiplied by 4 before
- * adding them to the base address to get the right value.
- */
-#define HHI_GP0_PLL_CNTL 0x40
-#define HHI_GP0_PLL_CNTL2 0x44
-#define HHI_GP0_PLL_CNTL3 0x48
-#define HHI_GP0_PLL_CNTL4 0x4c
-#define HHI_GP0_PLL_CNTL5 0x50
-#define HHI_GP0_PLL_STS 0x54
-#define HHI_GP0_PLL_CNTL1 0x58
-#define HHI_HIFI_PLL_CNTL 0x80
-#define HHI_HIFI_PLL_CNTL2 0x84
-#define HHI_HIFI_PLL_CNTL3 0x88
-#define HHI_HIFI_PLL_CNTL4 0x8C
-#define HHI_HIFI_PLL_CNTL5 0x90
-#define HHI_HIFI_PLL_STS 0x94
-#define HHI_HIFI_PLL_CNTL1 0x98
-
-#define HHI_XTAL_DIVN_CNTL 0xbc
-#define HHI_GCLK2_MPEG0 0xc0
-#define HHI_GCLK2_MPEG1 0xc4
-#define HHI_GCLK2_MPEG2 0xc8
-#define HHI_GCLK2_OTHER 0xd0
-#define HHI_GCLK2_AO 0xd4
-#define HHI_PCIE_PLL_CNTL 0xd8
-#define HHI_PCIE_PLL_CNTL1 0xdC
-#define HHI_PCIE_PLL_CNTL2 0xe0
-#define HHI_PCIE_PLL_CNTL3 0xe4
-#define HHI_PCIE_PLL_CNTL4 0xe8
-#define HHI_PCIE_PLL_CNTL5 0xec
-#define HHI_PCIE_PLL_CNTL6 0xf0
-#define HHI_PCIE_PLL_STS 0xf4
-
-#define HHI_MEM_PD_REG0 0x100
-#define HHI_VPU_MEM_PD_REG0 0x104
-#define HHI_VIID_CLK_DIV 0x128
-#define HHI_VIID_CLK_CNTL 0x12c
-
-#define HHI_GCLK_MPEG0 0x140
-#define HHI_GCLK_MPEG1 0x144
-#define HHI_GCLK_MPEG2 0x148
-#define HHI_GCLK_OTHER 0x150
-#define HHI_GCLK_AO 0x154
-#define HHI_SYS_CPU_CLK_CNTL1 0x15c
-#define HHI_SYS_CPU_RESET_CNTL 0x160
-#define HHI_VID_CLK_DIV 0x164
-#define HHI_SPICC_HCLK_CNTL 0x168
-
-#define HHI_MPEG_CLK_CNTL 0x174
-#define HHI_VID_CLK_CNTL 0x17c
-#define HHI_TS_CLK_CNTL 0x190
-#define HHI_VID_CLK_CNTL2 0x194
-#define HHI_SYS_CPU_CLK_CNTL0 0x19c
-#define HHI_VID_PLL_CLK_DIV 0x1a0
-#define HHI_VPU_CLK_CNTL 0x1bC
-
-#define HHI_VAPBCLK_CNTL 0x1F4
-
-#define HHI_GEN_CLK_CNTL 0x228
-
-#define HHI_VDIN_MEAS_CLK_CNTL 0x250
-#define HHI_NAND_CLK_CNTL 0x25C
-#define HHI_SD_EMMC_CLK_CNTL 0x264
-
-#define HHI_MPLL_CNTL 0x280
-#define HHI_MPLL_CNTL2 0x284
-#define HHI_MPLL_CNTL3 0x288
-#define HHI_MPLL_CNTL4 0x28C
-#define HHI_MPLL_CNTL5 0x290
-#define HHI_MPLL_CNTL6 0x294
-#define HHI_MPLL_CNTL7 0x298
-#define HHI_MPLL_CNTL8 0x29C
-#define HHI_MPLL_CNTL9 0x2A0
-#define HHI_MPLL_CNTL10 0x2A4
-
-#define HHI_MPLL3_CNTL0 0x2E0
-#define HHI_MPLL3_CNTL1 0x2E4
-#define HHI_PLL_TOP_MISC 0x2E8
-
-#define HHI_SYS_PLL_CNTL1 0x2FC
-#define HHI_SYS_PLL_CNTL 0x300
-#define HHI_SYS_PLL_CNTL2 0x304
-#define HHI_SYS_PLL_CNTL3 0x308
-#define HHI_SYS_PLL_CNTL4 0x30c
-#define HHI_SYS_PLL_CNTL5 0x310
-#define HHI_SYS_PLL_STS 0x314
-#define HHI_DPLL_TOP_I 0x318
-#define HHI_DPLL_TOP2_I 0x31C
-
-#endif /* __AXG_H */
diff --git a/drivers/clk/meson/c3-peripherals.c b/drivers/clk/meson/c3-peripherals.c
index 7dcbf4ebee07..b158756cfee4 100644
--- a/drivers/clk/meson/c3-peripherals.c
+++ b/drivers/clk/meson/c3-peripherals.c
@@ -48,7 +48,16 @@
#define SPIFC_CLK_CTRL 0x1a0
#define NNA_CLK_CTRL 0x220
-static struct clk_regmap rtc_xtal_clkin = {
+#define C3_COMP_SEL(_name, _reg, _shift, _mask, _pdata) \
+ MESON_COMP_SEL(c3_, _name, _reg, _shift, _mask, _pdata, NULL, 0, 0)
+
+#define C3_COMP_DIV(_name, _reg, _shift, _width) \
+ MESON_COMP_DIV(c3_, _name, _reg, _shift, _width, 0, CLK_SET_RATE_PARENT)
+
+#define C3_COMP_GATE(_name, _reg, _bit) \
+ MESON_COMP_GATE(c3_, _name, _reg, _bit, CLK_SET_RATE_PARENT)
+
+static struct clk_regmap c3_rtc_xtal_clkin = {
.data = &(struct clk_regmap_gate_data) {
.offset = RTC_BY_OSCIN_CTRL0,
.bit_idx = 31,
@@ -63,12 +72,12 @@ static struct clk_regmap rtc_xtal_clkin = {
},
};
-static const struct meson_clk_dualdiv_param rtc_32k_div_table[] = {
+static const struct meson_clk_dualdiv_param c3_rtc_32k_div_table[] = {
{ 733, 732, 8, 11, 1 },
{ /* sentinel */ }
};
-static struct clk_regmap rtc_32k_div = {
+static struct clk_regmap c3_rtc_32k_div = {
.data = &(struct meson_clk_dualdiv_data) {
.n1 = {
.reg_off = RTC_BY_OSCIN_CTRL0,
@@ -95,39 +104,39 @@ static struct clk_regmap rtc_32k_div = {
.shift = 28,
.width = 1,
},
- .table = rtc_32k_div_table,
+ .table = c3_rtc_32k_div_table,
},
.hw.init = &(struct clk_init_data) {
.name = "rtc_32k_div",
.ops = &meson_clk_dualdiv_ops,
.parent_hws = (const struct clk_hw *[]) {
- &rtc_xtal_clkin.hw
+ &c3_rtc_xtal_clkin.hw
},
.num_parents = 1,
},
};
-static const struct clk_parent_data rtc_32k_mux_parent_data[] = {
- { .hw = &rtc_32k_div.hw },
- { .hw = &rtc_xtal_clkin.hw }
+static const struct clk_parent_data c3_rtc_32k_parents[] = {
+ { .hw = &c3_rtc_32k_div.hw },
+ { .hw = &c3_rtc_xtal_clkin.hw }
};
-static struct clk_regmap rtc_32k_mux = {
+static struct clk_regmap c3_rtc_32k_sel = {
.data = &(struct clk_regmap_mux_data) {
.offset = RTC_BY_OSCIN_CTRL1,
.mask = 0x1,
.shift = 24,
},
.hw.init = &(struct clk_init_data) {
- .name = "rtc_32k_mux",
+ .name = "rtc_32k_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = rtc_32k_mux_parent_data,
- .num_parents = ARRAY_SIZE(rtc_32k_mux_parent_data),
+ .parent_data = c3_rtc_32k_parents,
+ .num_parents = ARRAY_SIZE(c3_rtc_32k_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap rtc_32k = {
+static struct clk_regmap c3_rtc_32k = {
.data = &(struct clk_regmap_gate_data) {
.offset = RTC_BY_OSCIN_CTRL0,
.bit_idx = 30,
@@ -136,20 +145,20 @@ static struct clk_regmap rtc_32k = {
.name = "rtc_32k",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &rtc_32k_mux.hw
+ &c3_rtc_32k_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static const struct clk_parent_data rtc_clk_mux_parent_data[] = {
+static const struct clk_parent_data c3_rtc_clk_parents[] = {
{ .fw_name = "oscin" },
- { .hw = &rtc_32k.hw },
+ { .hw = &c3_rtc_32k.hw },
{ .fw_name = "pad_osc" }
};
-static struct clk_regmap rtc_clk = {
+static struct clk_regmap c3_rtc_clk = {
.data = &(struct clk_regmap_mux_data) {
.offset = RTC_CTRL,
.mask = 0x3,
@@ -158,62 +167,45 @@ static struct clk_regmap rtc_clk = {
.hw.init = &(struct clk_init_data) {
.name = "rtc_clk",
.ops = &clk_regmap_mux_ops,
- .parent_data = rtc_clk_mux_parent_data,
- .num_parents = ARRAY_SIZE(rtc_clk_mux_parent_data),
+ .parent_data = c3_rtc_clk_parents,
+ .num_parents = ARRAY_SIZE(c3_rtc_clk_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
-#define C3_CLK_GATE(_name, _reg, _bit, _fw_name, _ops, _flags) \
-struct clk_regmap _name = { \
- .data = &(struct clk_regmap_gate_data){ \
- .offset = (_reg), \
- .bit_idx = (_bit), \
- }, \
- .hw.init = &(struct clk_init_data) { \
- .name = #_name, \
- .ops = _ops, \
- .parent_data = &(const struct clk_parent_data) { \
- .fw_name = #_fw_name, \
- }, \
- .num_parents = 1, \
- .flags = (_flags), \
- }, \
-}
-
-#define C3_SYS_GATE(_name, _reg, _bit, _flags) \
- C3_CLK_GATE(_name, _reg, _bit, sysclk, \
- &clk_regmap_gate_ops, _flags)
-
-#define C3_SYS_GATE_RO(_name, _reg, _bit) \
- C3_CLK_GATE(_name, _reg, _bit, sysclk, \
- &clk_regmap_gate_ro_ops, 0)
-
-static C3_SYS_GATE(sys_reset_ctrl, SYS_CLK_EN0_REG0, 1, 0);
-static C3_SYS_GATE(sys_pwr_ctrl, SYS_CLK_EN0_REG0, 3, 0);
-static C3_SYS_GATE(sys_pad_ctrl, SYS_CLK_EN0_REG0, 4, 0);
-static C3_SYS_GATE(sys_ctrl, SYS_CLK_EN0_REG0, 5, 0);
-static C3_SYS_GATE(sys_ts_pll, SYS_CLK_EN0_REG0, 6, 0);
+static const struct clk_parent_data c3_sys_pclk_parents = { .fw_name = "sysclk" };
+
+#define C3_SYS_PCLK(_name, _reg, _bit, _flags) \
+ MESON_PCLK(c3_##_name, _reg, _bit, &c3_sys_pclk_parents, _flags)
+
+#define C3_SYS_PCLK_RO(_name, _reg, _bit) \
+ MESON_PCLK_RO(c3_##_name, _reg, _bit, &c3_sys_pclk_parents, 0)
+
+static C3_SYS_PCLK(sys_reset_ctrl, SYS_CLK_EN0_REG0, 1, 0);
+static C3_SYS_PCLK(sys_pwr_ctrl, SYS_CLK_EN0_REG0, 3, 0);
+static C3_SYS_PCLK(sys_pad_ctrl, SYS_CLK_EN0_REG0, 4, 0);
+static C3_SYS_PCLK(sys_ctrl, SYS_CLK_EN0_REG0, 5, 0);
+static C3_SYS_PCLK(sys_ts_pll, SYS_CLK_EN0_REG0, 6, 0);
/*
* NOTE: sys_dev_arb provides the clock to the ETH and SPICC arbiters that
* access the AXI bus.
*/
-static C3_SYS_GATE(sys_dev_arb, SYS_CLK_EN0_REG0, 7, 0);
+static C3_SYS_PCLK(sys_dev_arb, SYS_CLK_EN0_REG0, 7, 0);
/*
* FIXME: sys_mmc_pclk provides the clock for the DDR PHY, DDR will only be
* initialized in bl2, and this clock should not be touched in linux.
*/
-static C3_SYS_GATE_RO(sys_mmc_pclk, SYS_CLK_EN0_REG0, 8);
+static C3_SYS_PCLK_RO(sys_mmc_pclk, SYS_CLK_EN0_REG0, 8);
/*
* NOTE: sys_cpu_ctrl provides the clock for CPU controller. After clock is
* disabled, cpu_clk and other key CPU-related configurations cannot take effect.
*/
-static C3_SYS_GATE(sys_cpu_ctrl, SYS_CLK_EN0_REG0, 11, CLK_IS_CRITICAL);
-static C3_SYS_GATE(sys_jtag_ctrl, SYS_CLK_EN0_REG0, 12, 0);
-static C3_SYS_GATE(sys_ir_ctrl, SYS_CLK_EN0_REG0, 13, 0);
+static C3_SYS_PCLK(sys_cpu_ctrl, SYS_CLK_EN0_REG0, 11, CLK_IS_CRITICAL);
+static C3_SYS_PCLK(sys_jtag_ctrl, SYS_CLK_EN0_REG0, 12, 0);
+static C3_SYS_PCLK(sys_ir_ctrl, SYS_CLK_EN0_REG0, 13, 0);
/*
* NOTE: sys_irq_ctrl provides the clock for IRQ controller. The IRQ controller
@@ -221,18 +213,18 @@ static C3_SYS_GATE(sys_ir_ctrl, SYS_CLK_EN0_REG0, 13, 0);
* AOCPU. If the clock is disabled, interrupt-related functions will occurs an
* exception.
*/
-static C3_SYS_GATE(sys_irq_ctrl, SYS_CLK_EN0_REG0, 14, CLK_IS_CRITICAL);
-static C3_SYS_GATE(sys_msr_clk, SYS_CLK_EN0_REG0, 15, 0);
-static C3_SYS_GATE(sys_rom, SYS_CLK_EN0_REG0, 16, 0);
-static C3_SYS_GATE(sys_uart_f, SYS_CLK_EN0_REG0, 17, 0);
-static C3_SYS_GATE(sys_cpu_apb, SYS_CLK_EN0_REG0, 18, 0);
-static C3_SYS_GATE(sys_rsa, SYS_CLK_EN0_REG0, 19, 0);
-static C3_SYS_GATE(sys_sar_adc, SYS_CLK_EN0_REG0, 20, 0);
-static C3_SYS_GATE(sys_startup, SYS_CLK_EN0_REG0, 21, 0);
-static C3_SYS_GATE(sys_secure, SYS_CLK_EN0_REG0, 22, 0);
-static C3_SYS_GATE(sys_spifc, SYS_CLK_EN0_REG0, 23, 0);
-static C3_SYS_GATE(sys_nna, SYS_CLK_EN0_REG0, 25, 0);
-static C3_SYS_GATE(sys_eth_mac, SYS_CLK_EN0_REG0, 26, 0);
+static C3_SYS_PCLK(sys_irq_ctrl, SYS_CLK_EN0_REG0, 14, CLK_IS_CRITICAL);
+static C3_SYS_PCLK(sys_msr_clk, SYS_CLK_EN0_REG0, 15, 0);
+static C3_SYS_PCLK(sys_rom, SYS_CLK_EN0_REG0, 16, 0);
+static C3_SYS_PCLK(sys_uart_f, SYS_CLK_EN0_REG0, 17, 0);
+static C3_SYS_PCLK(sys_cpu_apb, SYS_CLK_EN0_REG0, 18, 0);
+static C3_SYS_PCLK(sys_rsa, SYS_CLK_EN0_REG0, 19, 0);
+static C3_SYS_PCLK(sys_sar_adc, SYS_CLK_EN0_REG0, 20, 0);
+static C3_SYS_PCLK(sys_startup, SYS_CLK_EN0_REG0, 21, 0);
+static C3_SYS_PCLK(sys_secure, SYS_CLK_EN0_REG0, 22, 0);
+static C3_SYS_PCLK(sys_spifc, SYS_CLK_EN0_REG0, 23, 0);
+static C3_SYS_PCLK(sys_nna, SYS_CLK_EN0_REG0, 25, 0);
+static C3_SYS_PCLK(sys_eth_mac, SYS_CLK_EN0_REG0, 26, 0);
/*
* FIXME: sys_gic provides the clock for GIC(Generic Interrupt Controller).
@@ -240,8 +232,8 @@ static C3_SYS_GATE(sys_eth_mac, SYS_CLK_EN0_REG0, 26, 0);
* used by our GIC is the public driver in kernel, and there is no management
* clock in the driver.
*/
-static C3_SYS_GATE(sys_gic, SYS_CLK_EN0_REG0, 27, CLK_IS_CRITICAL);
-static C3_SYS_GATE(sys_rama, SYS_CLK_EN0_REG0, 28, 0);
+static C3_SYS_PCLK(sys_gic, SYS_CLK_EN0_REG0, 27, CLK_IS_CRITICAL);
+static C3_SYS_PCLK(sys_rama, SYS_CLK_EN0_REG0, 28, 0);
/*
* NOTE: sys_big_nic provides the clock to the control bus of the NIC(Network
@@ -249,84 +241,85 @@ static C3_SYS_GATE(sys_rama, SYS_CLK_EN0_REG0, 28, 0);
* SPIFC, CAPU, JTAG, EMMC, SDIO, sec_top, USB, Audio, ETH, SPICC) in the
* system. After clock is disabled, The NIC cannot work.
*/
-static C3_SYS_GATE(sys_big_nic, SYS_CLK_EN0_REG0, 29, CLK_IS_CRITICAL);
-static C3_SYS_GATE(sys_ramb, SYS_CLK_EN0_REG0, 30, 0);
-static C3_SYS_GATE(sys_audio_pclk, SYS_CLK_EN0_REG0, 31, 0);
-static C3_SYS_GATE(sys_pwm_kl, SYS_CLK_EN0_REG1, 0, 0);
-static C3_SYS_GATE(sys_pwm_ij, SYS_CLK_EN0_REG1, 1, 0);
-static C3_SYS_GATE(sys_usb, SYS_CLK_EN0_REG1, 2, 0);
-static C3_SYS_GATE(sys_sd_emmc_a, SYS_CLK_EN0_REG1, 3, 0);
-static C3_SYS_GATE(sys_sd_emmc_c, SYS_CLK_EN0_REG1, 4, 0);
-static C3_SYS_GATE(sys_pwm_ab, SYS_CLK_EN0_REG1, 5, 0);
-static C3_SYS_GATE(sys_pwm_cd, SYS_CLK_EN0_REG1, 6, 0);
-static C3_SYS_GATE(sys_pwm_ef, SYS_CLK_EN0_REG1, 7, 0);
-static C3_SYS_GATE(sys_pwm_gh, SYS_CLK_EN0_REG1, 8, 0);
-static C3_SYS_GATE(sys_spicc_1, SYS_CLK_EN0_REG1, 9, 0);
-static C3_SYS_GATE(sys_spicc_0, SYS_CLK_EN0_REG1, 10, 0);
-static C3_SYS_GATE(sys_uart_a, SYS_CLK_EN0_REG1, 11, 0);
-static C3_SYS_GATE(sys_uart_b, SYS_CLK_EN0_REG1, 12, 0);
-static C3_SYS_GATE(sys_uart_c, SYS_CLK_EN0_REG1, 13, 0);
-static C3_SYS_GATE(sys_uart_d, SYS_CLK_EN0_REG1, 14, 0);
-static C3_SYS_GATE(sys_uart_e, SYS_CLK_EN0_REG1, 15, 0);
-static C3_SYS_GATE(sys_i2c_m_a, SYS_CLK_EN0_REG1, 16, 0);
-static C3_SYS_GATE(sys_i2c_m_b, SYS_CLK_EN0_REG1, 17, 0);
-static C3_SYS_GATE(sys_i2c_m_c, SYS_CLK_EN0_REG1, 18, 0);
-static C3_SYS_GATE(sys_i2c_m_d, SYS_CLK_EN0_REG1, 19, 0);
-static C3_SYS_GATE(sys_i2c_s_a, SYS_CLK_EN0_REG1, 20, 0);
-static C3_SYS_GATE(sys_rtc, SYS_CLK_EN0_REG1, 21, 0);
-static C3_SYS_GATE(sys_ge2d, SYS_CLK_EN0_REG1, 22, 0);
-static C3_SYS_GATE(sys_isp, SYS_CLK_EN0_REG1, 23, 0);
-static C3_SYS_GATE(sys_gpv_isp_nic, SYS_CLK_EN0_REG1, 24, 0);
-static C3_SYS_GATE(sys_gpv_cve_nic, SYS_CLK_EN0_REG1, 25, 0);
-static C3_SYS_GATE(sys_mipi_dsi_host, SYS_CLK_EN0_REG1, 26, 0);
-static C3_SYS_GATE(sys_mipi_dsi_phy, SYS_CLK_EN0_REG1, 27, 0);
-static C3_SYS_GATE(sys_eth_phy, SYS_CLK_EN0_REG1, 28, 0);
-static C3_SYS_GATE(sys_acodec, SYS_CLK_EN0_REG1, 29, 0);
-static C3_SYS_GATE(sys_dwap, SYS_CLK_EN0_REG1, 30, 0);
-static C3_SYS_GATE(sys_dos, SYS_CLK_EN0_REG1, 31, 0);
-static C3_SYS_GATE(sys_cve, SYS_CLK_EN0_REG2, 0, 0);
-static C3_SYS_GATE(sys_vout, SYS_CLK_EN0_REG2, 1, 0);
-static C3_SYS_GATE(sys_vc9000e, SYS_CLK_EN0_REG2, 2, 0);
-static C3_SYS_GATE(sys_pwm_mn, SYS_CLK_EN0_REG2, 3, 0);
-static C3_SYS_GATE(sys_sd_emmc_b, SYS_CLK_EN0_REG2, 4, 0);
-
-#define C3_AXI_GATE(_name, _reg, _bit, _flags) \
- C3_CLK_GATE(_name, _reg, _bit, axiclk, \
- &clk_regmap_gate_ops, _flags)
+static C3_SYS_PCLK(sys_big_nic, SYS_CLK_EN0_REG0, 29, CLK_IS_CRITICAL);
+static C3_SYS_PCLK(sys_ramb, SYS_CLK_EN0_REG0, 30, 0);
+static C3_SYS_PCLK(sys_audio_pclk, SYS_CLK_EN0_REG0, 31, 0);
+static C3_SYS_PCLK(sys_pwm_kl, SYS_CLK_EN0_REG1, 0, 0);
+static C3_SYS_PCLK(sys_pwm_ij, SYS_CLK_EN0_REG1, 1, 0);
+static C3_SYS_PCLK(sys_usb, SYS_CLK_EN0_REG1, 2, 0);
+static C3_SYS_PCLK(sys_sd_emmc_a, SYS_CLK_EN0_REG1, 3, 0);
+static C3_SYS_PCLK(sys_sd_emmc_c, SYS_CLK_EN0_REG1, 4, 0);
+static C3_SYS_PCLK(sys_pwm_ab, SYS_CLK_EN0_REG1, 5, 0);
+static C3_SYS_PCLK(sys_pwm_cd, SYS_CLK_EN0_REG1, 6, 0);
+static C3_SYS_PCLK(sys_pwm_ef, SYS_CLK_EN0_REG1, 7, 0);
+static C3_SYS_PCLK(sys_pwm_gh, SYS_CLK_EN0_REG1, 8, 0);
+static C3_SYS_PCLK(sys_spicc_1, SYS_CLK_EN0_REG1, 9, 0);
+static C3_SYS_PCLK(sys_spicc_0, SYS_CLK_EN0_REG1, 10, 0);
+static C3_SYS_PCLK(sys_uart_a, SYS_CLK_EN0_REG1, 11, 0);
+static C3_SYS_PCLK(sys_uart_b, SYS_CLK_EN0_REG1, 12, 0);
+static C3_SYS_PCLK(sys_uart_c, SYS_CLK_EN0_REG1, 13, 0);
+static C3_SYS_PCLK(sys_uart_d, SYS_CLK_EN0_REG1, 14, 0);
+static C3_SYS_PCLK(sys_uart_e, SYS_CLK_EN0_REG1, 15, 0);
+static C3_SYS_PCLK(sys_i2c_m_a, SYS_CLK_EN0_REG1, 16, 0);
+static C3_SYS_PCLK(sys_i2c_m_b, SYS_CLK_EN0_REG1, 17, 0);
+static C3_SYS_PCLK(sys_i2c_m_c, SYS_CLK_EN0_REG1, 18, 0);
+static C3_SYS_PCLK(sys_i2c_m_d, SYS_CLK_EN0_REG1, 19, 0);
+static C3_SYS_PCLK(sys_i2c_s_a, SYS_CLK_EN0_REG1, 20, 0);
+static C3_SYS_PCLK(sys_rtc, SYS_CLK_EN0_REG1, 21, 0);
+static C3_SYS_PCLK(sys_ge2d, SYS_CLK_EN0_REG1, 22, 0);
+static C3_SYS_PCLK(sys_isp, SYS_CLK_EN0_REG1, 23, 0);
+static C3_SYS_PCLK(sys_gpv_isp_nic, SYS_CLK_EN0_REG1, 24, 0);
+static C3_SYS_PCLK(sys_gpv_cve_nic, SYS_CLK_EN0_REG1, 25, 0);
+static C3_SYS_PCLK(sys_mipi_dsi_host, SYS_CLK_EN0_REG1, 26, 0);
+static C3_SYS_PCLK(sys_mipi_dsi_phy, SYS_CLK_EN0_REG1, 27, 0);
+static C3_SYS_PCLK(sys_eth_phy, SYS_CLK_EN0_REG1, 28, 0);
+static C3_SYS_PCLK(sys_acodec, SYS_CLK_EN0_REG1, 29, 0);
+static C3_SYS_PCLK(sys_dwap, SYS_CLK_EN0_REG1, 30, 0);
+static C3_SYS_PCLK(sys_dos, SYS_CLK_EN0_REG1, 31, 0);
+static C3_SYS_PCLK(sys_cve, SYS_CLK_EN0_REG2, 0, 0);
+static C3_SYS_PCLK(sys_vout, SYS_CLK_EN0_REG2, 1, 0);
+static C3_SYS_PCLK(sys_vc9000e, SYS_CLK_EN0_REG2, 2, 0);
+static C3_SYS_PCLK(sys_pwm_mn, SYS_CLK_EN0_REG2, 3, 0);
+static C3_SYS_PCLK(sys_sd_emmc_b, SYS_CLK_EN0_REG2, 4, 0);
+
+static const struct clk_parent_data c3_axi_pclk_parents = { .fw_name = "axiclk" };
+
+#define C3_AXI_PCLK(_name, _reg, _bit, _flags) \
+ MESON_PCLK(c3_##_name, _reg, _bit, &c3_axi_pclk_parents, _flags)
/*
* NOTE: axi_sys_nic provides the clock to the AXI bus of the system NIC. After
* clock is disabled, The NIC cannot work.
*/
-static C3_AXI_GATE(axi_sys_nic, AXI_CLK_EN0, 2, CLK_IS_CRITICAL);
-static C3_AXI_GATE(axi_isp_nic, AXI_CLK_EN0, 3, 0);
-static C3_AXI_GATE(axi_cve_nic, AXI_CLK_EN0, 4, 0);
-static C3_AXI_GATE(axi_ramb, AXI_CLK_EN0, 5, 0);
-static C3_AXI_GATE(axi_rama, AXI_CLK_EN0, 6, 0);
+static C3_AXI_PCLK(axi_sys_nic, AXI_CLK_EN0, 2, CLK_IS_CRITICAL);
+static C3_AXI_PCLK(axi_isp_nic, AXI_CLK_EN0, 3, 0);
+static C3_AXI_PCLK(axi_cve_nic, AXI_CLK_EN0, 4, 0);
+static C3_AXI_PCLK(axi_ramb, AXI_CLK_EN0, 5, 0);
+static C3_AXI_PCLK(axi_rama, AXI_CLK_EN0, 6, 0);
/*
* NOTE: axi_cpu_dmc provides the clock to the AXI bus where the CPU accesses
* the DDR. After clock is disabled, The CPU will not have access to the DDR.
*/
-static C3_AXI_GATE(axi_cpu_dmc, AXI_CLK_EN0, 7, CLK_IS_CRITICAL);
-static C3_AXI_GATE(axi_nic, AXI_CLK_EN0, 8, 0);
-static C3_AXI_GATE(axi_dma, AXI_CLK_EN0, 9, 0);
+static C3_AXI_PCLK(axi_cpu_dmc, AXI_CLK_EN0, 7, CLK_IS_CRITICAL);
+static C3_AXI_PCLK(axi_nic, AXI_CLK_EN0, 8, 0);
+static C3_AXI_PCLK(axi_dma, AXI_CLK_EN0, 9, 0);
/*
* NOTE: axi_mux_nic provides the clock to the NIC's AXI bus for NN(Neural
* Network) and other devices(CPU, EMMC, SDIO, sec_top, USB, Audio, ETH, SPICC)
* to access RAM space.
*/
-static C3_AXI_GATE(axi_mux_nic, AXI_CLK_EN0, 10, 0);
-static C3_AXI_GATE(axi_cve, AXI_CLK_EN0, 12, 0);
+static C3_AXI_PCLK(axi_mux_nic, AXI_CLK_EN0, 10, 0);
+static C3_AXI_PCLK(axi_cve, AXI_CLK_EN0, 12, 0);
/*
* NOTE: axi_dev1_dmc provides the clock for the peripherals(EMMC, SDIO,
* sec_top, USB, Audio, ETH, SPICC) to access the AXI bus of the DDR.
*/
-static C3_AXI_GATE(axi_dev1_dmc, AXI_CLK_EN0, 13, 0);
-static C3_AXI_GATE(axi_dev0_dmc, AXI_CLK_EN0, 14, 0);
-static C3_AXI_GATE(axi_dsp_dmc, AXI_CLK_EN0, 15, 0);
+static C3_AXI_PCLK(axi_dev1_dmc, AXI_CLK_EN0, 13, 0);
+static C3_AXI_PCLK(axi_dev0_dmc, AXI_CLK_EN0, 14, 0);
+static C3_AXI_PCLK(axi_dsp_dmc, AXI_CLK_EN0, 15, 0);
/*
* clk_12_24m model
@@ -335,7 +328,7 @@ static C3_AXI_GATE(axi_dsp_dmc, AXI_CLK_EN0, 15, 0);
* xtal---->| gate |---->| div |------------>| pad |
* |------| |-----| |-----|
*/
-static struct clk_regmap clk_12_24m_in = {
+static struct clk_regmap c3_clk_12_24m_in = {
.data = &(struct clk_regmap_gate_data) {
.offset = CLK12_24_CTRL,
.bit_idx = 11,
@@ -350,7 +343,7 @@ static struct clk_regmap clk_12_24m_in = {
},
};
-static struct clk_regmap clk_12_24m = {
+static struct clk_regmap c3_clk_12_24m = {
.data = &(struct clk_regmap_div_data) {
.offset = CLK12_24_CTRL,
.shift = 10,
@@ -360,14 +353,14 @@ static struct clk_regmap clk_12_24m = {
.name = "clk_12_24m",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &clk_12_24m_in.hw
+ &c3_clk_12_24m_in.hw
},
.num_parents = 1,
},
};
/* Fix me: set value 0 will div by 2 like value 1 */
-static struct clk_regmap fclk_25m_div = {
+static struct clk_regmap c3_fclk_25m_div = {
.data = &(struct clk_regmap_div_data) {
.offset = CLK12_24_CTRL,
.shift = 0,
@@ -383,7 +376,7 @@ static struct clk_regmap fclk_25m_div = {
},
};
-static struct clk_regmap fclk_25m = {
+static struct clk_regmap c3_fclk_25m = {
.data = &(struct clk_regmap_gate_data) {
.offset = CLK12_24_CTRL,
.bit_idx = 12,
@@ -392,7 +385,7 @@ static struct clk_regmap fclk_25m = {
.name = "fclk_25m",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fclk_25m_div.hw
+ &c3_fclk_25m_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -404,11 +397,10 @@ static struct clk_regmap fclk_25m = {
* is manged by clock measures module. Their hardware are out of clock tree.
* Channel 4 8 9 10 11 13 14 15 16 18 are not connected.
*/
-static u32 gen_parent_table[] = { 0, 1, 2, 5, 6, 7, 17, 19, 20, 21, 22, 23, 24};
-
-static const struct clk_parent_data gen_parent_data[] = {
+static u32 c3_gen_parents_val_table[] = { 0, 1, 2, 5, 6, 7, 17, 19, 20, 21, 22, 23, 24};
+static const struct clk_parent_data c3_gen_parents[] = {
{ .fw_name = "oscin" },
- { .hw = &rtc_clk.hw },
+ { .hw = &c3_rtc_clk.hw },
{ .fw_name = "sysplldiv16" },
{ .fw_name = "gp0" },
{ .fw_name = "gp1" },
@@ -422,22 +414,22 @@ static const struct clk_parent_data gen_parent_data[] = {
{ .fw_name = "fdiv7" }
};
-static struct clk_regmap gen_sel = {
+static struct clk_regmap c3_gen_sel = {
.data = &(struct clk_regmap_mux_data) {
.offset = GEN_CLK_CTRL,
.mask = 0x1f,
.shift = 12,
- .table = gen_parent_table,
+ .table = c3_gen_parents_val_table,
},
.hw.init = &(struct clk_init_data) {
.name = "gen_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = gen_parent_data,
- .num_parents = ARRAY_SIZE(gen_parent_data),
+ .parent_data = c3_gen_parents,
+ .num_parents = ARRAY_SIZE(c3_gen_parents),
},
};
-static struct clk_regmap gen_div = {
+static struct clk_regmap c3_gen_div = {
.data = &(struct clk_regmap_div_data) {
.offset = GEN_CLK_CTRL,
.shift = 0,
@@ -447,14 +439,14 @@ static struct clk_regmap gen_div = {
.name = "gen_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &gen_sel.hw
+ &c3_gen_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap gen = {
+static struct clk_regmap c3_gen = {
.data = &(struct clk_regmap_gate_data) {
.offset = GEN_CLK_CTRL,
.bit_idx = 11,
@@ -463,214 +455,86 @@ static struct clk_regmap gen = {
.name = "gen",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &gen_div.hw
+ &c3_gen_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static const struct clk_parent_data saradc_parent_data[] = {
+static const struct clk_parent_data c3_saradc_parents[] = {
{ .fw_name = "oscin" },
{ .fw_name = "sysclk" }
};
-static struct clk_regmap saradc_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = SAR_CLK_CTRL0,
- .mask = 0x1,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "saradc_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = saradc_parent_data,
- .num_parents = ARRAY_SIZE(saradc_parent_data),
- },
-};
-
-static struct clk_regmap saradc_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = SAR_CLK_CTRL0,
- .shift = 0,
- .width = 8,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "saradc_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &saradc_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap saradc = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = SAR_CLK_CTRL0,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "saradc",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &saradc_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static C3_COMP_SEL(saradc, SAR_CLK_CTRL0, 9, 0x1, c3_saradc_parents);
+static C3_COMP_DIV(saradc, SAR_CLK_CTRL0, 0, 8);
+static C3_COMP_GATE(saradc, SAR_CLK_CTRL0, 8);
-static const struct clk_parent_data pwm_parent_data[] = {
+static const struct clk_parent_data c3_pwm_parents[] = {
{ .fw_name = "oscin" },
{ .fw_name = "gp1" },
{ .fw_name = "fdiv4" },
{ .fw_name = "fdiv3" }
};
-#define AML_PWM_CLK_MUX(_name, _reg, _shift) { \
- .data = &(struct clk_regmap_mux_data) { \
- .offset = _reg, \
- .mask = 0x3, \
- .shift = _shift, \
- }, \
- .hw.init = &(struct clk_init_data) { \
- .name = #_name "_sel", \
- .ops = &clk_regmap_mux_ops, \
- .parent_data = pwm_parent_data, \
- .num_parents = ARRAY_SIZE(pwm_parent_data), \
- }, \
-}
-
-#define AML_PWM_CLK_DIV(_name, _reg, _shift) { \
- .data = &(struct clk_regmap_div_data) { \
- .offset = _reg, \
- .shift = _shift, \
- .width = 8, \
- }, \
- .hw.init = &(struct clk_init_data) { \
- .name = #_name "_div", \
- .ops = &clk_regmap_divider_ops, \
- .parent_names = (const char *[]) { #_name "_sel" },\
- .num_parents = 1, \
- .flags = CLK_SET_RATE_PARENT, \
- }, \
-}
-
-#define AML_PWM_CLK_GATE(_name, _reg, _bit) { \
- .data = &(struct clk_regmap_gate_data) { \
- .offset = _reg, \
- .bit_idx = _bit, \
- }, \
- .hw.init = &(struct clk_init_data) { \
- .name = #_name, \
- .ops = &clk_regmap_gate_ops, \
- .parent_names = (const char *[]) { #_name "_div" },\
- .num_parents = 1, \
- .flags = CLK_SET_RATE_PARENT, \
- }, \
-}
-
-static struct clk_regmap pwm_a_sel =
- AML_PWM_CLK_MUX(pwm_a, PWM_CLK_AB_CTRL, 9);
-static struct clk_regmap pwm_a_div =
- AML_PWM_CLK_DIV(pwm_a, PWM_CLK_AB_CTRL, 0);
-static struct clk_regmap pwm_a =
- AML_PWM_CLK_GATE(pwm_a, PWM_CLK_AB_CTRL, 8);
-
-static struct clk_regmap pwm_b_sel =
- AML_PWM_CLK_MUX(pwm_b, PWM_CLK_AB_CTRL, 25);
-static struct clk_regmap pwm_b_div =
- AML_PWM_CLK_DIV(pwm_b, PWM_CLK_AB_CTRL, 16);
-static struct clk_regmap pwm_b =
- AML_PWM_CLK_GATE(pwm_b, PWM_CLK_AB_CTRL, 24);
-
-static struct clk_regmap pwm_c_sel =
- AML_PWM_CLK_MUX(pwm_c, PWM_CLK_CD_CTRL, 9);
-static struct clk_regmap pwm_c_div =
- AML_PWM_CLK_DIV(pwm_c, PWM_CLK_CD_CTRL, 0);
-static struct clk_regmap pwm_c =
- AML_PWM_CLK_GATE(pwm_c, PWM_CLK_CD_CTRL, 8);
-
-static struct clk_regmap pwm_d_sel =
- AML_PWM_CLK_MUX(pwm_d, PWM_CLK_CD_CTRL, 25);
-static struct clk_regmap pwm_d_div =
- AML_PWM_CLK_DIV(pwm_d, PWM_CLK_CD_CTRL, 16);
-static struct clk_regmap pwm_d =
- AML_PWM_CLK_GATE(pwm_d, PWM_CLK_CD_CTRL, 24);
-
-static struct clk_regmap pwm_e_sel =
- AML_PWM_CLK_MUX(pwm_e, PWM_CLK_EF_CTRL, 9);
-static struct clk_regmap pwm_e_div =
- AML_PWM_CLK_DIV(pwm_e, PWM_CLK_EF_CTRL, 0);
-static struct clk_regmap pwm_e =
- AML_PWM_CLK_GATE(pwm_e, PWM_CLK_EF_CTRL, 8);
-
-static struct clk_regmap pwm_f_sel =
- AML_PWM_CLK_MUX(pwm_f, PWM_CLK_EF_CTRL, 25);
-static struct clk_regmap pwm_f_div =
- AML_PWM_CLK_DIV(pwm_f, PWM_CLK_EF_CTRL, 16);
-static struct clk_regmap pwm_f =
- AML_PWM_CLK_GATE(pwm_f, PWM_CLK_EF_CTRL, 24);
-
-static struct clk_regmap pwm_g_sel =
- AML_PWM_CLK_MUX(pwm_g, PWM_CLK_GH_CTRL, 9);
-static struct clk_regmap pwm_g_div =
- AML_PWM_CLK_DIV(pwm_g, PWM_CLK_GH_CTRL, 0);
-static struct clk_regmap pwm_g =
- AML_PWM_CLK_GATE(pwm_g, PWM_CLK_GH_CTRL, 8);
-
-static struct clk_regmap pwm_h_sel =
- AML_PWM_CLK_MUX(pwm_h, PWM_CLK_GH_CTRL, 25);
-static struct clk_regmap pwm_h_div =
- AML_PWM_CLK_DIV(pwm_h, PWM_CLK_GH_CTRL, 16);
-static struct clk_regmap pwm_h =
- AML_PWM_CLK_GATE(pwm_h, PWM_CLK_GH_CTRL, 24);
-
-static struct clk_regmap pwm_i_sel =
- AML_PWM_CLK_MUX(pwm_i, PWM_CLK_IJ_CTRL, 9);
-static struct clk_regmap pwm_i_div =
- AML_PWM_CLK_DIV(pwm_i, PWM_CLK_IJ_CTRL, 0);
-static struct clk_regmap pwm_i =
- AML_PWM_CLK_GATE(pwm_i, PWM_CLK_IJ_CTRL, 8);
-
-static struct clk_regmap pwm_j_sel =
- AML_PWM_CLK_MUX(pwm_j, PWM_CLK_IJ_CTRL, 25);
-static struct clk_regmap pwm_j_div =
- AML_PWM_CLK_DIV(pwm_j, PWM_CLK_IJ_CTRL, 16);
-static struct clk_regmap pwm_j =
- AML_PWM_CLK_GATE(pwm_j, PWM_CLK_IJ_CTRL, 24);
-
-static struct clk_regmap pwm_k_sel =
- AML_PWM_CLK_MUX(pwm_k, PWM_CLK_KL_CTRL, 9);
-static struct clk_regmap pwm_k_div =
- AML_PWM_CLK_DIV(pwm_k, PWM_CLK_KL_CTRL, 0);
-static struct clk_regmap pwm_k =
- AML_PWM_CLK_GATE(pwm_k, PWM_CLK_KL_CTRL, 8);
-
-static struct clk_regmap pwm_l_sel =
- AML_PWM_CLK_MUX(pwm_l, PWM_CLK_KL_CTRL, 25);
-static struct clk_regmap pwm_l_div =
- AML_PWM_CLK_DIV(pwm_l, PWM_CLK_KL_CTRL, 16);
-static struct clk_regmap pwm_l =
- AML_PWM_CLK_GATE(pwm_l, PWM_CLK_KL_CTRL, 24);
-
-static struct clk_regmap pwm_m_sel =
- AML_PWM_CLK_MUX(pwm_m, PWM_CLK_MN_CTRL, 9);
-static struct clk_regmap pwm_m_div =
- AML_PWM_CLK_DIV(pwm_m, PWM_CLK_MN_CTRL, 0);
-static struct clk_regmap pwm_m =
- AML_PWM_CLK_GATE(pwm_m, PWM_CLK_MN_CTRL, 8);
-
-static struct clk_regmap pwm_n_sel =
- AML_PWM_CLK_MUX(pwm_n, PWM_CLK_MN_CTRL, 25);
-static struct clk_regmap pwm_n_div =
- AML_PWM_CLK_DIV(pwm_n, PWM_CLK_MN_CTRL, 16);
-static struct clk_regmap pwm_n =
- AML_PWM_CLK_GATE(pwm_n, PWM_CLK_MN_CTRL, 24);
-
-static const struct clk_parent_data spicc_parent_data[] = {
+static C3_COMP_SEL(pwm_a, PWM_CLK_AB_CTRL, 9, 0x3, c3_pwm_parents);
+static C3_COMP_DIV(pwm_a, PWM_CLK_AB_CTRL, 0, 8);
+static C3_COMP_GATE(pwm_a, PWM_CLK_AB_CTRL, 8);
+
+static C3_COMP_SEL(pwm_b, PWM_CLK_AB_CTRL, 25, 0x3, c3_pwm_parents);
+static C3_COMP_DIV(pwm_b, PWM_CLK_AB_CTRL, 16, 8);
+static C3_COMP_GATE(pwm_b, PWM_CLK_AB_CTRL, 24);
+
+static C3_COMP_SEL(pwm_c, PWM_CLK_CD_CTRL, 9, 0x3, c3_pwm_parents);
+static C3_COMP_DIV(pwm_c, PWM_CLK_CD_CTRL, 0, 8);
+static C3_COMP_GATE(pwm_c, PWM_CLK_CD_CTRL, 8);
+
+static C3_COMP_SEL(pwm_d, PWM_CLK_CD_CTRL, 25, 0x3, c3_pwm_parents);
+static C3_COMP_DIV(pwm_d, PWM_CLK_CD_CTRL, 16, 8);
+static C3_COMP_GATE(pwm_d, PWM_CLK_CD_CTRL, 24);
+
+static C3_COMP_SEL(pwm_e, PWM_CLK_EF_CTRL, 9, 0x3, c3_pwm_parents);
+static C3_COMP_DIV(pwm_e, PWM_CLK_EF_CTRL, 0, 8);
+static C3_COMP_GATE(pwm_e, PWM_CLK_EF_CTRL, 8);
+
+static C3_COMP_SEL(pwm_f, PWM_CLK_EF_CTRL, 25, 0x3, c3_pwm_parents);
+static C3_COMP_DIV(pwm_f, PWM_CLK_EF_CTRL, 16, 8);
+static C3_COMP_GATE(pwm_f, PWM_CLK_EF_CTRL, 24);
+
+static C3_COMP_SEL(pwm_g, PWM_CLK_GH_CTRL, 9, 0x3, c3_pwm_parents);
+static C3_COMP_DIV(pwm_g, PWM_CLK_GH_CTRL, 0, 8);
+static C3_COMP_GATE(pwm_g, PWM_CLK_GH_CTRL, 8);
+
+static C3_COMP_SEL(pwm_h, PWM_CLK_GH_CTRL, 25, 0x3, c3_pwm_parents);
+static C3_COMP_DIV(pwm_h, PWM_CLK_GH_CTRL, 16, 8);
+static C3_COMP_GATE(pwm_h, PWM_CLK_GH_CTRL, 24);
+
+static C3_COMP_SEL(pwm_i, PWM_CLK_IJ_CTRL, 9, 0x3, c3_pwm_parents);
+static C3_COMP_DIV(pwm_i, PWM_CLK_IJ_CTRL, 0, 8);
+static C3_COMP_GATE(pwm_i, PWM_CLK_IJ_CTRL, 8);
+
+static C3_COMP_SEL(pwm_j, PWM_CLK_IJ_CTRL, 25, 0x3, c3_pwm_parents);
+static C3_COMP_DIV(pwm_j, PWM_CLK_IJ_CTRL, 16, 8);
+static C3_COMP_GATE(pwm_j, PWM_CLK_IJ_CTRL, 24);
+
+static C3_COMP_SEL(pwm_k, PWM_CLK_KL_CTRL, 9, 0x3, c3_pwm_parents);
+static C3_COMP_DIV(pwm_k, PWM_CLK_KL_CTRL, 0, 8);
+static C3_COMP_GATE(pwm_k, PWM_CLK_KL_CTRL, 8);
+
+static C3_COMP_SEL(pwm_l, PWM_CLK_KL_CTRL, 25, 0x3, c3_pwm_parents);
+static C3_COMP_DIV(pwm_l, PWM_CLK_KL_CTRL, 16, 8);
+static C3_COMP_GATE(pwm_l, PWM_CLK_KL_CTRL, 24);
+
+static C3_COMP_SEL(pwm_m, PWM_CLK_MN_CTRL, 9, 0x3, c3_pwm_parents);
+static C3_COMP_DIV(pwm_m, PWM_CLK_MN_CTRL, 0, 8);
+static C3_COMP_GATE(pwm_m, PWM_CLK_MN_CTRL, 8);
+
+static C3_COMP_SEL(pwm_n, PWM_CLK_MN_CTRL, 25, 0x3, c3_pwm_parents);
+static C3_COMP_DIV(pwm_n, PWM_CLK_MN_CTRL, 16, 8);
+static C3_COMP_GATE(pwm_n, PWM_CLK_MN_CTRL, 24);
+
+static const struct clk_parent_data c3_spicc_parents[] = {
{ .fw_name = "oscin" },
{ .fw_name = "sysclk" },
{ .fw_name = "fdiv4" },
@@ -681,101 +545,15 @@ static const struct clk_parent_data spicc_parent_data[] = {
{ .fw_name = "gp1" }
};
-static struct clk_regmap spicc_a_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = SPICC_CLK_CTRL,
- .mask = 0x7,
- .shift = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "spicc_a_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = spicc_parent_data,
- .num_parents = ARRAY_SIZE(spicc_parent_data),
- },
-};
-
-static struct clk_regmap spicc_a_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = SPICC_CLK_CTRL,
- .shift = 0,
- .width = 6,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "spicc_a_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &spicc_a_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap spicc_a = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = SPICC_CLK_CTRL,
- .bit_idx = 6,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "spicc_a",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &spicc_a_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap spicc_b_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = SPICC_CLK_CTRL,
- .mask = 0x7,
- .shift = 23,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "spicc_b_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = spicc_parent_data,
- .num_parents = ARRAY_SIZE(spicc_parent_data),
- },
-};
-
-static struct clk_regmap spicc_b_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = SPICC_CLK_CTRL,
- .shift = 16,
- .width = 6,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "spicc_b_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &spicc_b_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static C3_COMP_SEL(spicc_a, SPICC_CLK_CTRL, 7, 0x7, c3_spicc_parents);
+static C3_COMP_DIV(spicc_a, SPICC_CLK_CTRL, 0, 6);
+static C3_COMP_GATE(spicc_a, SPICC_CLK_CTRL, 6);
-static struct clk_regmap spicc_b = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = SPICC_CLK_CTRL,
- .bit_idx = 22,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "spicc_b",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &spicc_b_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static C3_COMP_SEL(spicc_b, SPICC_CLK_CTRL, 23, 0x7, c3_spicc_parents);
+static C3_COMP_DIV(spicc_b, SPICC_CLK_CTRL, 16, 6);
+static C3_COMP_GATE(spicc_b, SPICC_CLK_CTRL, 22);
-static const struct clk_parent_data spifc_parent_data[] = {
+static const struct clk_parent_data c3_spifc_parents[] = {
{ .fw_name = "gp0" },
{ .fw_name = "fdiv2" },
{ .fw_name = "fdiv3" },
@@ -786,54 +564,11 @@ static const struct clk_parent_data spifc_parent_data[] = {
{ .fw_name = "fdiv7" }
};
-static struct clk_regmap spifc_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = SPIFC_CLK_CTRL,
- .mask = 0x7,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "spifc_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = spifc_parent_data,
- .num_parents = ARRAY_SIZE(spifc_parent_data),
- },
-};
-
-static struct clk_regmap spifc_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = SPIFC_CLK_CTRL,
- .shift = 0,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "spifc_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &spifc_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap spifc = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = SPIFC_CLK_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "spifc",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &spifc_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static C3_COMP_SEL(spifc, SPIFC_CLK_CTRL, 9, 0x7, c3_spifc_parents);
+static C3_COMP_DIV(spifc, SPIFC_CLK_CTRL, 0, 7);
+static C3_COMP_GATE(spifc, SPIFC_CLK_CTRL, 8);
-static const struct clk_parent_data emmc_parent_data[] = {
+static const struct clk_parent_data c3_sd_emmc_parents[] = {
{ .fw_name = "oscin" },
{ .fw_name = "fdiv2" },
{ .fw_name = "fdiv3" },
@@ -844,148 +579,19 @@ static const struct clk_parent_data emmc_parent_data[] = {
{ .fw_name = "gp0" }
};
-static struct clk_regmap sd_emmc_a_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = SD_EMMC_CLK_CTRL,
- .mask = 0x7,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "sd_emmc_a_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = emmc_parent_data,
- .num_parents = ARRAY_SIZE(emmc_parent_data),
- },
-};
+static C3_COMP_SEL(sd_emmc_a, SD_EMMC_CLK_CTRL, 9, 0x7, c3_sd_emmc_parents);
+static C3_COMP_DIV(sd_emmc_a, SD_EMMC_CLK_CTRL, 0, 7);
+static C3_COMP_GATE(sd_emmc_a, SD_EMMC_CLK_CTRL, 7);
-static struct clk_regmap sd_emmc_a_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = SD_EMMC_CLK_CTRL,
- .shift = 0,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "sd_emmc_a_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &sd_emmc_a_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static C3_COMP_SEL(sd_emmc_b, SD_EMMC_CLK_CTRL, 25, 0x7, c3_sd_emmc_parents);
+static C3_COMP_DIV(sd_emmc_b, SD_EMMC_CLK_CTRL, 16, 7);
+static C3_COMP_GATE(sd_emmc_b, SD_EMMC_CLK_CTRL, 23);
-static struct clk_regmap sd_emmc_a = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = SD_EMMC_CLK_CTRL,
- .bit_idx = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "sd_emmc_a",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &sd_emmc_a_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap sd_emmc_b_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = SD_EMMC_CLK_CTRL,
- .mask = 0x7,
- .shift = 25,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "sd_emmc_b_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = emmc_parent_data,
- .num_parents = ARRAY_SIZE(emmc_parent_data),
- },
-};
-
-static struct clk_regmap sd_emmc_b_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = SD_EMMC_CLK_CTRL,
- .shift = 16,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "sd_emmc_b_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &sd_emmc_b_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap sd_emmc_b = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = SD_EMMC_CLK_CTRL,
- .bit_idx = 23,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "sd_emmc_b",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &sd_emmc_b_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap sd_emmc_c_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = NAND_CLK_CTRL,
- .mask = 0x7,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "sd_emmc_c_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = emmc_parent_data,
- .num_parents = ARRAY_SIZE(emmc_parent_data),
- },
-};
-
-static struct clk_regmap sd_emmc_c_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = NAND_CLK_CTRL,
- .shift = 0,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "sd_emmc_c_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &sd_emmc_c_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap sd_emmc_c = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = NAND_CLK_CTRL,
- .bit_idx = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "sd_emmc_c",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &sd_emmc_c_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static C3_COMP_SEL(sd_emmc_c, NAND_CLK_CTRL, 9, 0x7, c3_sd_emmc_parents);
+static C3_COMP_DIV(sd_emmc_c, NAND_CLK_CTRL, 0, 7);
+static C3_COMP_GATE(sd_emmc_c, NAND_CLK_CTRL, 7);
-static struct clk_regmap ts_div = {
+static struct clk_regmap c3_ts_div = {
.data = &(struct clk_regmap_div_data) {
.offset = TS_CLK_CTRL,
.shift = 0,
@@ -1001,7 +607,7 @@ static struct clk_regmap ts_div = {
},
};
-static struct clk_regmap ts = {
+static struct clk_regmap c3_ts = {
.data = &(struct clk_regmap_gate_data) {
.offset = TS_CLK_CTRL,
.bit_idx = 8,
@@ -1010,29 +616,29 @@ static struct clk_regmap ts = {
.name = "ts",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &ts_div.hw
+ &c3_ts_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static const struct clk_parent_data eth_parent = {
+static const struct clk_parent_data c3_eth_parents = {
.fw_name = "fdiv2",
};
-static struct clk_fixed_factor eth_125m_div = {
+static struct clk_fixed_factor c3_eth_125m_div = {
.mult = 1,
.div = 8,
.hw.init = &(struct clk_init_data) {
.name = "eth_125m_div",
.ops = &clk_fixed_factor_ops,
- .parent_data = &eth_parent,
+ .parent_data = &c3_eth_parents,
.num_parents = 1,
},
};
-static struct clk_regmap eth_125m = {
+static struct clk_regmap c3_eth_125m = {
.data = &(struct clk_regmap_gate_data) {
.offset = ETH_CLK_CTRL,
.bit_idx = 7,
@@ -1041,14 +647,14 @@ static struct clk_regmap eth_125m = {
.name = "eth_125m",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &eth_125m_div.hw
+ &c3_eth_125m_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap eth_rmii_div = {
+static struct clk_regmap c3_eth_rmii_div = {
.data = &(struct clk_regmap_div_data) {
.offset = ETH_CLK_CTRL,
.shift = 0,
@@ -1057,12 +663,12 @@ static struct clk_regmap eth_rmii_div = {
.hw.init = &(struct clk_init_data) {
.name = "eth_rmii_div",
.ops = &clk_regmap_divider_ops,
- .parent_data = &eth_parent,
+ .parent_data = &c3_eth_parents,
.num_parents = 1,
},
};
-static struct clk_regmap eth_rmii = {
+static struct clk_regmap c3_eth_rmii = {
.data = &(struct clk_regmap_gate_data) {
.offset = ETH_CLK_CTRL,
.bit_idx = 8,
@@ -1071,14 +677,14 @@ static struct clk_regmap eth_rmii = {
.name = "eth_rmii",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &eth_rmii_div.hw
+ &c3_eth_rmii_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static const struct clk_parent_data mipi_dsi_meas_parent_data[] = {
+static const struct clk_parent_data c3_mipi_dsi_meas_parents[] = {
{ .fw_name = "oscin" },
{ .fw_name = "fdiv4" },
{ .fw_name = "fdiv3" },
@@ -1089,54 +695,11 @@ static const struct clk_parent_data mipi_dsi_meas_parent_data[] = {
{ .fw_name = "fdiv7" }
};
-static struct clk_regmap mipi_dsi_meas_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = VDIN_MEAS_CLK_CTRL,
- .mask = 0x7,
- .shift = 21,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "mipi_dsi_meas_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = mipi_dsi_meas_parent_data,
- .num_parents = ARRAY_SIZE(mipi_dsi_meas_parent_data),
- },
-};
+static C3_COMP_SEL(mipi_dsi_meas, VDIN_MEAS_CLK_CTRL, 21, 0x7, c3_mipi_dsi_meas_parents);
+static C3_COMP_DIV(mipi_dsi_meas, VDIN_MEAS_CLK_CTRL, 12, 7);
+static C3_COMP_GATE(mipi_dsi_meas, VDIN_MEAS_CLK_CTRL, 20);
-static struct clk_regmap mipi_dsi_meas_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = VDIN_MEAS_CLK_CTRL,
- .shift = 12,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "mipi_dsi_meas_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &mipi_dsi_meas_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap mipi_dsi_meas = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = VDIN_MEAS_CLK_CTRL,
- .bit_idx = 20,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "mipi_dsi_meas",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &mipi_dsi_meas_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static const struct clk_parent_data dsi_phy_parent_data[] = {
+static const struct clk_parent_data c3_dsi_phy_parents[] = {
{ .fw_name = "gp1" },
{ .fw_name = "gp0" },
{ .fw_name = "hifi" },
@@ -1147,54 +710,11 @@ static const struct clk_parent_data dsi_phy_parent_data[] = {
{ .fw_name = "fdiv7" }
};
-static struct clk_regmap dsi_phy_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = MIPIDSI_PHY_CLK_CTRL,
- .mask = 0x7,
- .shift = 12,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "dsi_phy_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = dsi_phy_parent_data,
- .num_parents = ARRAY_SIZE(dsi_phy_parent_data),
- },
-};
-
-static struct clk_regmap dsi_phy_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = MIPIDSI_PHY_CLK_CTRL,
- .shift = 0,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "dsi_phy_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &dsi_phy_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap dsi_phy = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = MIPIDSI_PHY_CLK_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "dsi_phy",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &dsi_phy_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static C3_COMP_SEL(dsi_phy, MIPIDSI_PHY_CLK_CTRL, 12, 0x7, c3_dsi_phy_parents);
+static C3_COMP_DIV(dsi_phy, MIPIDSI_PHY_CLK_CTRL, 0, 7);
+static C3_COMP_GATE(dsi_phy, MIPIDSI_PHY_CLK_CTRL, 8);
-static const struct clk_parent_data vout_mclk_parent_data[] = {
+static const struct clk_parent_data c3_vout_mclk_parents[] = {
{ .fw_name = "fdiv2p5" },
{ .fw_name = "fdiv3" },
{ .fw_name = "fdiv4" },
@@ -1205,54 +725,11 @@ static const struct clk_parent_data vout_mclk_parent_data[] = {
{ .fw_name = "fdiv7" }
};
-static struct clk_regmap vout_mclk_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = VOUTENC_CLK_CTRL,
- .mask = 0x7,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "vout_mclk_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = vout_mclk_parent_data,
- .num_parents = ARRAY_SIZE(vout_mclk_parent_data),
- },
-};
+static C3_COMP_SEL(vout_mclk, VOUTENC_CLK_CTRL, 9, 0x7, c3_vout_mclk_parents);
+static C3_COMP_DIV(vout_mclk, VOUTENC_CLK_CTRL, 0, 7);
+static C3_COMP_GATE(vout_mclk, VOUTENC_CLK_CTRL, 8);
-static struct clk_regmap vout_mclk_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = VOUTENC_CLK_CTRL,
- .shift = 0,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "vout_mclk_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &vout_mclk_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap vout_mclk = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = VOUTENC_CLK_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "vout_mclk",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &vout_mclk_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static const struct clk_parent_data vout_enc_parent_data[] = {
+static const struct clk_parent_data c3_vout_enc_parents[] = {
{ .fw_name = "gp1" },
{ .fw_name = "fdiv3" },
{ .fw_name = "fdiv4" },
@@ -1263,54 +740,11 @@ static const struct clk_parent_data vout_enc_parent_data[] = {
{ .fw_name = "fdiv7" }
};
-static struct clk_regmap vout_enc_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = VOUTENC_CLK_CTRL,
- .mask = 0x7,
- .shift = 25,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "vout_enc_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = vout_enc_parent_data,
- .num_parents = ARRAY_SIZE(vout_enc_parent_data),
- },
-};
-
-static struct clk_regmap vout_enc_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = VOUTENC_CLK_CTRL,
- .shift = 16,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "vout_enc_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &vout_enc_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap vout_enc = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = VOUTENC_CLK_CTRL,
- .bit_idx = 24,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "vout_enc",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &vout_enc_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static C3_COMP_SEL(vout_enc, VOUTENC_CLK_CTRL, 25, 0x7, c3_vout_enc_parents);
+static C3_COMP_DIV(vout_enc, VOUTENC_CLK_CTRL, 16, 7);
+static C3_COMP_GATE(vout_enc, VOUTENC_CLK_CTRL, 24);
-static const struct clk_parent_data hcodec_pre_parent_data[] = {
+static const struct clk_parent_data c3_hcodec_pre_parents[] = {
{ .fw_name = "fdiv2p5" },
{ .fw_name = "fdiv3" },
{ .fw_name = "fdiv4" },
@@ -1321,106 +755,20 @@ static const struct clk_parent_data hcodec_pre_parent_data[] = {
{ .fw_name = "oscin" }
};
-static struct clk_regmap hcodec_0_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = VDEC_CLK_CTRL,
- .mask = 0x7,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "hcodec_0_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = hcodec_pre_parent_data,
- .num_parents = ARRAY_SIZE(hcodec_pre_parent_data),
- },
-};
-
-static struct clk_regmap hcodec_0_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = VDEC_CLK_CTRL,
- .shift = 0,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "hcodec_0_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &hcodec_0_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap hcodec_0 = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = VDEC_CLK_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "hcodec_0",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &hcodec_0_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap hcodec_1_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = VDEC3_CLK_CTRL,
- .mask = 0x7,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "hcodec_1_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = hcodec_pre_parent_data,
- .num_parents = ARRAY_SIZE(hcodec_pre_parent_data),
- },
-};
-
-static struct clk_regmap hcodec_1_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = VDEC3_CLK_CTRL,
- .shift = 0,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "hcodec_1_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &hcodec_1_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static C3_COMP_SEL(hcodec_0, VDEC_CLK_CTRL, 9, 0x7, c3_hcodec_pre_parents);
+static C3_COMP_DIV(hcodec_0, VDEC_CLK_CTRL, 0, 7);
+static C3_COMP_GATE(hcodec_0, VDEC_CLK_CTRL, 8);
-static struct clk_regmap hcodec_1 = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = VDEC3_CLK_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "hcodec_1",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &hcodec_1_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static C3_COMP_SEL(hcodec_1, VDEC3_CLK_CTRL, 9, 0x7, c3_hcodec_pre_parents);
+static C3_COMP_DIV(hcodec_1, VDEC3_CLK_CTRL, 0, 7);
+static C3_COMP_GATE(hcodec_1, VDEC3_CLK_CTRL, 8);
-static const struct clk_parent_data hcodec_parent_data[] = {
- { .hw = &hcodec_0.hw },
- { .hw = &hcodec_1.hw }
+static const struct clk_parent_data c3_hcodec_parents[] = {
+ { .hw = &c3_hcodec_0.hw },
+ { .hw = &c3_hcodec_1.hw }
};
-static struct clk_regmap hcodec = {
+static struct clk_regmap c3_hcodec = {
.data = &(struct clk_regmap_mux_data) {
.offset = VDEC3_CLK_CTRL,
.mask = 0x1,
@@ -1429,13 +777,13 @@ static struct clk_regmap hcodec = {
.hw.init = &(struct clk_init_data) {
.name = "hcodec",
.ops = &clk_regmap_mux_ops,
- .parent_data = hcodec_parent_data,
- .num_parents = ARRAY_SIZE(hcodec_parent_data),
+ .parent_data = c3_hcodec_parents,
+ .num_parents = ARRAY_SIZE(c3_hcodec_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
-static const struct clk_parent_data vc9000e_parent_data[] = {
+static const struct clk_parent_data c3_vc9000e_parents[] = {
{ .fw_name = "oscin" },
{ .fw_name = "fdiv4" },
{ .fw_name = "fdiv3" },
@@ -1446,101 +794,15 @@ static const struct clk_parent_data vc9000e_parent_data[] = {
{ .fw_name = "gp0" }
};
-static struct clk_regmap vc9000e_aclk_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = VC9000E_CLK_CTRL,
- .mask = 0x7,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "vc9000e_aclk_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = vc9000e_parent_data,
- .num_parents = ARRAY_SIZE(vc9000e_parent_data),
- },
-};
-
-static struct clk_regmap vc9000e_aclk_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = VC9000E_CLK_CTRL,
- .shift = 0,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "vc9000e_aclk_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &vc9000e_aclk_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap vc9000e_aclk = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = VC9000E_CLK_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "vc9000e_aclk",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &vc9000e_aclk_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap vc9000e_core_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = VC9000E_CLK_CTRL,
- .mask = 0x7,
- .shift = 25,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "vc9000e_core_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = vc9000e_parent_data,
- .num_parents = ARRAY_SIZE(vc9000e_parent_data),
- },
-};
-
-static struct clk_regmap vc9000e_core_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = VC9000E_CLK_CTRL,
- .shift = 16,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "vc9000e_core_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &vc9000e_core_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static C3_COMP_SEL(vc9000e_aclk, VC9000E_CLK_CTRL, 9, 0x7, c3_vc9000e_parents);
+static C3_COMP_DIV(vc9000e_aclk, VC9000E_CLK_CTRL, 0, 7);
+static C3_COMP_GATE(vc9000e_aclk, VC9000E_CLK_CTRL, 8);
-static struct clk_regmap vc9000e_core = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = VC9000E_CLK_CTRL,
- .bit_idx = 24,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "vc9000e_core",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &vc9000e_core_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static C3_COMP_SEL(vc9000e_core, VC9000E_CLK_CTRL, 25, 0x7, c3_vc9000e_parents);
+static C3_COMP_DIV(vc9000e_core, VC9000E_CLK_CTRL, 16, 7);
+static C3_COMP_GATE(vc9000e_core, VC9000E_CLK_CTRL, 24);
-static const struct clk_parent_data csi_phy_parent_data[] = {
+static const struct clk_parent_data c3_csi_phy_parents[] = {
{ .fw_name = "fdiv2p5" },
{ .fw_name = "fdiv3" },
{ .fw_name = "fdiv4" },
@@ -1551,54 +813,11 @@ static const struct clk_parent_data csi_phy_parent_data[] = {
{ .fw_name = "oscin" }
};
-static struct clk_regmap csi_phy0_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = ISP0_CLK_CTRL,
- .mask = 0x7,
- .shift = 25,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "csi_phy0_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = csi_phy_parent_data,
- .num_parents = ARRAY_SIZE(csi_phy_parent_data),
- },
-};
-
-static struct clk_regmap csi_phy0_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = ISP0_CLK_CTRL,
- .shift = 16,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "csi_phy0_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &csi_phy0_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static C3_COMP_SEL(csi_phy0, ISP0_CLK_CTRL, 25, 0x7, c3_csi_phy_parents);
+static C3_COMP_DIV(csi_phy0, ISP0_CLK_CTRL, 16, 7);
+static C3_COMP_GATE(csi_phy0, ISP0_CLK_CTRL, 24);
-static struct clk_regmap csi_phy0 = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = ISP0_CLK_CTRL,
- .bit_idx = 24,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "csi_phy0",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &csi_phy0_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static const struct clk_parent_data dewarpa_parent_data[] = {
+static const struct clk_parent_data c3_dewarpa_parents[] = {
{ .fw_name = "fdiv2p5" },
{ .fw_name = "fdiv3" },
{ .fw_name = "fdiv4" },
@@ -1609,54 +828,11 @@ static const struct clk_parent_data dewarpa_parent_data[] = {
{ .fw_name = "fdiv7" }
};
-static struct clk_regmap dewarpa_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = DEWARPA_CLK_CTRL,
- .mask = 0x7,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "dewarpa_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = dewarpa_parent_data,
- .num_parents = ARRAY_SIZE(dewarpa_parent_data),
- },
-};
-
-static struct clk_regmap dewarpa_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = DEWARPA_CLK_CTRL,
- .shift = 0,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "dewarpa_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &dewarpa_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap dewarpa = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = DEWARPA_CLK_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "dewarpa",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &dewarpa_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static C3_COMP_SEL(dewarpa, DEWARPA_CLK_CTRL, 9, 0x7, c3_dewarpa_parents);
+static C3_COMP_DIV(dewarpa, DEWARPA_CLK_CTRL, 0, 7);
+static C3_COMP_GATE(dewarpa, DEWARPA_CLK_CTRL, 8);
-static const struct clk_parent_data isp_parent_data[] = {
+static const struct clk_parent_data c3_isp_parents[] = {
{ .fw_name = "fdiv2p5" },
{ .fw_name = "fdiv3" },
{ .fw_name = "fdiv4" },
@@ -1667,54 +843,11 @@ static const struct clk_parent_data isp_parent_data[] = {
{ .fw_name = "oscin" }
};
-static struct clk_regmap isp0_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = ISP0_CLK_CTRL,
- .mask = 0x7,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "isp0_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = isp_parent_data,
- .num_parents = ARRAY_SIZE(isp_parent_data),
- },
-};
-
-static struct clk_regmap isp0_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = ISP0_CLK_CTRL,
- .shift = 0,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "isp0_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &isp0_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap isp0 = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = ISP0_CLK_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "isp0",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &isp0_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static C3_COMP_SEL(isp0, ISP0_CLK_CTRL, 9, 0x7, c3_isp_parents);
+static C3_COMP_DIV(isp0, ISP0_CLK_CTRL, 0, 7);
+static C3_COMP_GATE(isp0, ISP0_CLK_CTRL, 8);
-static const struct clk_parent_data nna_core_parent_data[] = {
+static const struct clk_parent_data c3_nna_core_parents[] = {
{ .fw_name = "oscin" },
{ .fw_name = "fdiv2p5" },
{ .fw_name = "fdiv4" },
@@ -1725,54 +858,11 @@ static const struct clk_parent_data nna_core_parent_data[] = {
{ .fw_name = "hifi" }
};
-static struct clk_regmap nna_core_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = NNA_CLK_CTRL,
- .mask = 0x7,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "nna_core_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = nna_core_parent_data,
- .num_parents = ARRAY_SIZE(nna_core_parent_data),
- },
-};
+static C3_COMP_SEL(nna_core, NNA_CLK_CTRL, 9, 0x7, c3_nna_core_parents);
+static C3_COMP_DIV(nna_core, NNA_CLK_CTRL, 0, 7);
+static C3_COMP_GATE(nna_core, NNA_CLK_CTRL, 8);
-static struct clk_regmap nna_core_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = NNA_CLK_CTRL,
- .shift = 0,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "nna_core_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &nna_core_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap nna_core = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = NNA_CLK_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "nna_core",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &nna_core_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static const struct clk_parent_data ge2d_parent_data[] = {
+static const struct clk_parent_data c3_ge2d_parents[] = {
{ .fw_name = "oscin" },
{ .fw_name = "fdiv2p5" },
{ .fw_name = "fdiv3" },
@@ -1780,57 +870,14 @@ static const struct clk_parent_data ge2d_parent_data[] = {
{ .fw_name = "hifi" },
{ .fw_name = "fdiv5" },
{ .fw_name = "gp0" },
- { .hw = &rtc_clk.hw }
-};
-
-static struct clk_regmap ge2d_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = GE2D_CLK_CTRL,
- .mask = 0x7,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "ge2d_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = ge2d_parent_data,
- .num_parents = ARRAY_SIZE(ge2d_parent_data),
- },
+ { .hw = &c3_rtc_clk.hw }
};
-static struct clk_regmap ge2d_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = GE2D_CLK_CTRL,
- .shift = 0,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "ge2d_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &ge2d_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static C3_COMP_SEL(ge2d, GE2D_CLK_CTRL, 9, 0x7, c3_ge2d_parents);
+static C3_COMP_DIV(ge2d, GE2D_CLK_CTRL, 0, 7);
+static C3_COMP_GATE(ge2d, GE2D_CLK_CTRL, 8);
-static struct clk_regmap ge2d = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = GE2D_CLK_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "ge2d",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &ge2d_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static const struct clk_parent_data vapb_parent_data[] = {
+static const struct clk_parent_data c3_vapb_parents[] = {
{ .fw_name = "fdiv2p5" },
{ .fw_name = "fdiv3" },
{ .fw_name = "fdiv4" },
@@ -1841,527 +888,241 @@ static const struct clk_parent_data vapb_parent_data[] = {
{ .fw_name = "oscin" },
};
-static struct clk_regmap vapb_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = VAPB_CLK_CTRL,
- .mask = 0x7,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "vapb_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = vapb_parent_data,
- .num_parents = ARRAY_SIZE(vapb_parent_data),
+static C3_COMP_SEL(vapb, VAPB_CLK_CTRL, 9, 0x7, c3_vapb_parents);
+static C3_COMP_DIV(vapb, VAPB_CLK_CTRL, 0, 7);
+static C3_COMP_GATE(vapb, VAPB_CLK_CTRL, 8);
+
+static struct clk_hw *c3_peripherals_hw_clks[] = {
+ [CLKID_RTC_XTAL_CLKIN] = &c3_rtc_xtal_clkin.hw,
+ [CLKID_RTC_32K_DIV] = &c3_rtc_32k_div.hw,
+ [CLKID_RTC_32K_MUX] = &c3_rtc_32k_sel.hw,
+ [CLKID_RTC_32K] = &c3_rtc_32k.hw,
+ [CLKID_RTC_CLK] = &c3_rtc_clk.hw,
+ [CLKID_SYS_RESET_CTRL] = &c3_sys_reset_ctrl.hw,
+ [CLKID_SYS_PWR_CTRL] = &c3_sys_pwr_ctrl.hw,
+ [CLKID_SYS_PAD_CTRL] = &c3_sys_pad_ctrl.hw,
+ [CLKID_SYS_CTRL] = &c3_sys_ctrl.hw,
+ [CLKID_SYS_TS_PLL] = &c3_sys_ts_pll.hw,
+ [CLKID_SYS_DEV_ARB] = &c3_sys_dev_arb.hw,
+ [CLKID_SYS_MMC_PCLK] = &c3_sys_mmc_pclk.hw,
+ [CLKID_SYS_CPU_CTRL] = &c3_sys_cpu_ctrl.hw,
+ [CLKID_SYS_JTAG_CTRL] = &c3_sys_jtag_ctrl.hw,
+ [CLKID_SYS_IR_CTRL] = &c3_sys_ir_ctrl.hw,
+ [CLKID_SYS_IRQ_CTRL] = &c3_sys_irq_ctrl.hw,
+ [CLKID_SYS_MSR_CLK] = &c3_sys_msr_clk.hw,
+ [CLKID_SYS_ROM] = &c3_sys_rom.hw,
+ [CLKID_SYS_UART_F] = &c3_sys_uart_f.hw,
+ [CLKID_SYS_CPU_ARB] = &c3_sys_cpu_apb.hw,
+ [CLKID_SYS_RSA] = &c3_sys_rsa.hw,
+ [CLKID_SYS_SAR_ADC] = &c3_sys_sar_adc.hw,
+ [CLKID_SYS_STARTUP] = &c3_sys_startup.hw,
+ [CLKID_SYS_SECURE] = &c3_sys_secure.hw,
+ [CLKID_SYS_SPIFC] = &c3_sys_spifc.hw,
+ [CLKID_SYS_NNA] = &c3_sys_nna.hw,
+ [CLKID_SYS_ETH_MAC] = &c3_sys_eth_mac.hw,
+ [CLKID_SYS_GIC] = &c3_sys_gic.hw,
+ [CLKID_SYS_RAMA] = &c3_sys_rama.hw,
+ [CLKID_SYS_BIG_NIC] = &c3_sys_big_nic.hw,
+ [CLKID_SYS_RAMB] = &c3_sys_ramb.hw,
+ [CLKID_SYS_AUDIO_PCLK] = &c3_sys_audio_pclk.hw,
+ [CLKID_SYS_PWM_KL] = &c3_sys_pwm_kl.hw,
+ [CLKID_SYS_PWM_IJ] = &c3_sys_pwm_ij.hw,
+ [CLKID_SYS_USB] = &c3_sys_usb.hw,
+ [CLKID_SYS_SD_EMMC_A] = &c3_sys_sd_emmc_a.hw,
+ [CLKID_SYS_SD_EMMC_C] = &c3_sys_sd_emmc_c.hw,
+ [CLKID_SYS_PWM_AB] = &c3_sys_pwm_ab.hw,
+ [CLKID_SYS_PWM_CD] = &c3_sys_pwm_cd.hw,
+ [CLKID_SYS_PWM_EF] = &c3_sys_pwm_ef.hw,
+ [CLKID_SYS_PWM_GH] = &c3_sys_pwm_gh.hw,
+ [CLKID_SYS_SPICC_1] = &c3_sys_spicc_1.hw,
+ [CLKID_SYS_SPICC_0] = &c3_sys_spicc_0.hw,
+ [CLKID_SYS_UART_A] = &c3_sys_uart_a.hw,
+ [CLKID_SYS_UART_B] = &c3_sys_uart_b.hw,
+ [CLKID_SYS_UART_C] = &c3_sys_uart_c.hw,
+ [CLKID_SYS_UART_D] = &c3_sys_uart_d.hw,
+ [CLKID_SYS_UART_E] = &c3_sys_uart_e.hw,
+ [CLKID_SYS_I2C_M_A] = &c3_sys_i2c_m_a.hw,
+ [CLKID_SYS_I2C_M_B] = &c3_sys_i2c_m_b.hw,
+ [CLKID_SYS_I2C_M_C] = &c3_sys_i2c_m_c.hw,
+ [CLKID_SYS_I2C_M_D] = &c3_sys_i2c_m_d.hw,
+ [CLKID_SYS_I2S_S_A] = &c3_sys_i2c_s_a.hw,
+ [CLKID_SYS_RTC] = &c3_sys_rtc.hw,
+ [CLKID_SYS_GE2D] = &c3_sys_ge2d.hw,
+ [CLKID_SYS_ISP] = &c3_sys_isp.hw,
+ [CLKID_SYS_GPV_ISP_NIC] = &c3_sys_gpv_isp_nic.hw,
+ [CLKID_SYS_GPV_CVE_NIC] = &c3_sys_gpv_cve_nic.hw,
+ [CLKID_SYS_MIPI_DSI_HOST] = &c3_sys_mipi_dsi_host.hw,
+ [CLKID_SYS_MIPI_DSI_PHY] = &c3_sys_mipi_dsi_phy.hw,
+ [CLKID_SYS_ETH_PHY] = &c3_sys_eth_phy.hw,
+ [CLKID_SYS_ACODEC] = &c3_sys_acodec.hw,
+ [CLKID_SYS_DWAP] = &c3_sys_dwap.hw,
+ [CLKID_SYS_DOS] = &c3_sys_dos.hw,
+ [CLKID_SYS_CVE] = &c3_sys_cve.hw,
+ [CLKID_SYS_VOUT] = &c3_sys_vout.hw,
+ [CLKID_SYS_VC9000E] = &c3_sys_vc9000e.hw,
+ [CLKID_SYS_PWM_MN] = &c3_sys_pwm_mn.hw,
+ [CLKID_SYS_SD_EMMC_B] = &c3_sys_sd_emmc_b.hw,
+ [CLKID_AXI_SYS_NIC] = &c3_axi_sys_nic.hw,
+ [CLKID_AXI_ISP_NIC] = &c3_axi_isp_nic.hw,
+ [CLKID_AXI_CVE_NIC] = &c3_axi_cve_nic.hw,
+ [CLKID_AXI_RAMB] = &c3_axi_ramb.hw,
+ [CLKID_AXI_RAMA] = &c3_axi_rama.hw,
+ [CLKID_AXI_CPU_DMC] = &c3_axi_cpu_dmc.hw,
+ [CLKID_AXI_NIC] = &c3_axi_nic.hw,
+ [CLKID_AXI_DMA] = &c3_axi_dma.hw,
+ [CLKID_AXI_MUX_NIC] = &c3_axi_mux_nic.hw,
+ [CLKID_AXI_CVE] = &c3_axi_cve.hw,
+ [CLKID_AXI_DEV1_DMC] = &c3_axi_dev1_dmc.hw,
+ [CLKID_AXI_DEV0_DMC] = &c3_axi_dev0_dmc.hw,
+ [CLKID_AXI_DSP_DMC] = &c3_axi_dsp_dmc.hw,
+ [CLKID_12_24M_IN] = &c3_clk_12_24m_in.hw,
+ [CLKID_12M_24M] = &c3_clk_12_24m.hw,
+ [CLKID_FCLK_25M_DIV] = &c3_fclk_25m_div.hw,
+ [CLKID_FCLK_25M] = &c3_fclk_25m.hw,
+ [CLKID_GEN_SEL] = &c3_gen_sel.hw,
+ [CLKID_GEN_DIV] = &c3_gen_div.hw,
+ [CLKID_GEN] = &c3_gen.hw,
+ [CLKID_SARADC_SEL] = &c3_saradc_sel.hw,
+ [CLKID_SARADC_DIV] = &c3_saradc_div.hw,
+ [CLKID_SARADC] = &c3_saradc.hw,
+ [CLKID_PWM_A_SEL] = &c3_pwm_a_sel.hw,
+ [CLKID_PWM_A_DIV] = &c3_pwm_a_div.hw,
+ [CLKID_PWM_A] = &c3_pwm_a.hw,
+ [CLKID_PWM_B_SEL] = &c3_pwm_b_sel.hw,
+ [CLKID_PWM_B_DIV] = &c3_pwm_b_div.hw,
+ [CLKID_PWM_B] = &c3_pwm_b.hw,
+ [CLKID_PWM_C_SEL] = &c3_pwm_c_sel.hw,
+ [CLKID_PWM_C_DIV] = &c3_pwm_c_div.hw,
+ [CLKID_PWM_C] = &c3_pwm_c.hw,
+ [CLKID_PWM_D_SEL] = &c3_pwm_d_sel.hw,
+ [CLKID_PWM_D_DIV] = &c3_pwm_d_div.hw,
+ [CLKID_PWM_D] = &c3_pwm_d.hw,
+ [CLKID_PWM_E_SEL] = &c3_pwm_e_sel.hw,
+ [CLKID_PWM_E_DIV] = &c3_pwm_e_div.hw,
+ [CLKID_PWM_E] = &c3_pwm_e.hw,
+ [CLKID_PWM_F_SEL] = &c3_pwm_f_sel.hw,
+ [CLKID_PWM_F_DIV] = &c3_pwm_f_div.hw,
+ [CLKID_PWM_F] = &c3_pwm_f.hw,
+ [CLKID_PWM_G_SEL] = &c3_pwm_g_sel.hw,
+ [CLKID_PWM_G_DIV] = &c3_pwm_g_div.hw,
+ [CLKID_PWM_G] = &c3_pwm_g.hw,
+ [CLKID_PWM_H_SEL] = &c3_pwm_h_sel.hw,
+ [CLKID_PWM_H_DIV] = &c3_pwm_h_div.hw,
+ [CLKID_PWM_H] = &c3_pwm_h.hw,
+ [CLKID_PWM_I_SEL] = &c3_pwm_i_sel.hw,
+ [CLKID_PWM_I_DIV] = &c3_pwm_i_div.hw,
+ [CLKID_PWM_I] = &c3_pwm_i.hw,
+ [CLKID_PWM_J_SEL] = &c3_pwm_j_sel.hw,
+ [CLKID_PWM_J_DIV] = &c3_pwm_j_div.hw,
+ [CLKID_PWM_J] = &c3_pwm_j.hw,
+ [CLKID_PWM_K_SEL] = &c3_pwm_k_sel.hw,
+ [CLKID_PWM_K_DIV] = &c3_pwm_k_div.hw,
+ [CLKID_PWM_K] = &c3_pwm_k.hw,
+ [CLKID_PWM_L_SEL] = &c3_pwm_l_sel.hw,
+ [CLKID_PWM_L_DIV] = &c3_pwm_l_div.hw,
+ [CLKID_PWM_L] = &c3_pwm_l.hw,
+ [CLKID_PWM_M_SEL] = &c3_pwm_m_sel.hw,
+ [CLKID_PWM_M_DIV] = &c3_pwm_m_div.hw,
+ [CLKID_PWM_M] = &c3_pwm_m.hw,
+ [CLKID_PWM_N_SEL] = &c3_pwm_n_sel.hw,
+ [CLKID_PWM_N_DIV] = &c3_pwm_n_div.hw,
+ [CLKID_PWM_N] = &c3_pwm_n.hw,
+ [CLKID_SPICC_A_SEL] = &c3_spicc_a_sel.hw,
+ [CLKID_SPICC_A_DIV] = &c3_spicc_a_div.hw,
+ [CLKID_SPICC_A] = &c3_spicc_a.hw,
+ [CLKID_SPICC_B_SEL] = &c3_spicc_b_sel.hw,
+ [CLKID_SPICC_B_DIV] = &c3_spicc_b_div.hw,
+ [CLKID_SPICC_B] = &c3_spicc_b.hw,
+ [CLKID_SPIFC_SEL] = &c3_spifc_sel.hw,
+ [CLKID_SPIFC_DIV] = &c3_spifc_div.hw,
+ [CLKID_SPIFC] = &c3_spifc.hw,
+ [CLKID_SD_EMMC_A_SEL] = &c3_sd_emmc_a_sel.hw,
+ [CLKID_SD_EMMC_A_DIV] = &c3_sd_emmc_a_div.hw,
+ [CLKID_SD_EMMC_A] = &c3_sd_emmc_a.hw,
+ [CLKID_SD_EMMC_B_SEL] = &c3_sd_emmc_b_sel.hw,
+ [CLKID_SD_EMMC_B_DIV] = &c3_sd_emmc_b_div.hw,
+ [CLKID_SD_EMMC_B] = &c3_sd_emmc_b.hw,
+ [CLKID_SD_EMMC_C_SEL] = &c3_sd_emmc_c_sel.hw,
+ [CLKID_SD_EMMC_C_DIV] = &c3_sd_emmc_c_div.hw,
+ [CLKID_SD_EMMC_C] = &c3_sd_emmc_c.hw,
+ [CLKID_TS_DIV] = &c3_ts_div.hw,
+ [CLKID_TS] = &c3_ts.hw,
+ [CLKID_ETH_125M_DIV] = &c3_eth_125m_div.hw,
+ [CLKID_ETH_125M] = &c3_eth_125m.hw,
+ [CLKID_ETH_RMII_DIV] = &c3_eth_rmii_div.hw,
+ [CLKID_ETH_RMII] = &c3_eth_rmii.hw,
+ [CLKID_MIPI_DSI_MEAS_SEL] = &c3_mipi_dsi_meas_sel.hw,
+ [CLKID_MIPI_DSI_MEAS_DIV] = &c3_mipi_dsi_meas_div.hw,
+ [CLKID_MIPI_DSI_MEAS] = &c3_mipi_dsi_meas.hw,
+ [CLKID_DSI_PHY_SEL] = &c3_dsi_phy_sel.hw,
+ [CLKID_DSI_PHY_DIV] = &c3_dsi_phy_div.hw,
+ [CLKID_DSI_PHY] = &c3_dsi_phy.hw,
+ [CLKID_VOUT_MCLK_SEL] = &c3_vout_mclk_sel.hw,
+ [CLKID_VOUT_MCLK_DIV] = &c3_vout_mclk_div.hw,
+ [CLKID_VOUT_MCLK] = &c3_vout_mclk.hw,
+ [CLKID_VOUT_ENC_SEL] = &c3_vout_enc_sel.hw,
+ [CLKID_VOUT_ENC_DIV] = &c3_vout_enc_div.hw,
+ [CLKID_VOUT_ENC] = &c3_vout_enc.hw,
+ [CLKID_HCODEC_0_SEL] = &c3_hcodec_0_sel.hw,
+ [CLKID_HCODEC_0_DIV] = &c3_hcodec_0_div.hw,
+ [CLKID_HCODEC_0] = &c3_hcodec_0.hw,
+ [CLKID_HCODEC_1_SEL] = &c3_hcodec_1_sel.hw,
+ [CLKID_HCODEC_1_DIV] = &c3_hcodec_1_div.hw,
+ [CLKID_HCODEC_1] = &c3_hcodec_1.hw,
+ [CLKID_HCODEC] = &c3_hcodec.hw,
+ [CLKID_VC9000E_ACLK_SEL] = &c3_vc9000e_aclk_sel.hw,
+ [CLKID_VC9000E_ACLK_DIV] = &c3_vc9000e_aclk_div.hw,
+ [CLKID_VC9000E_ACLK] = &c3_vc9000e_aclk.hw,
+ [CLKID_VC9000E_CORE_SEL] = &c3_vc9000e_core_sel.hw,
+ [CLKID_VC9000E_CORE_DIV] = &c3_vc9000e_core_div.hw,
+ [CLKID_VC9000E_CORE] = &c3_vc9000e_core.hw,
+ [CLKID_CSI_PHY0_SEL] = &c3_csi_phy0_sel.hw,
+ [CLKID_CSI_PHY0_DIV] = &c3_csi_phy0_div.hw,
+ [CLKID_CSI_PHY0] = &c3_csi_phy0.hw,
+ [CLKID_DEWARPA_SEL] = &c3_dewarpa_sel.hw,
+ [CLKID_DEWARPA_DIV] = &c3_dewarpa_div.hw,
+ [CLKID_DEWARPA] = &c3_dewarpa.hw,
+ [CLKID_ISP0_SEL] = &c3_isp0_sel.hw,
+ [CLKID_ISP0_DIV] = &c3_isp0_div.hw,
+ [CLKID_ISP0] = &c3_isp0.hw,
+ [CLKID_NNA_CORE_SEL] = &c3_nna_core_sel.hw,
+ [CLKID_NNA_CORE_DIV] = &c3_nna_core_div.hw,
+ [CLKID_NNA_CORE] = &c3_nna_core.hw,
+ [CLKID_GE2D_SEL] = &c3_ge2d_sel.hw,
+ [CLKID_GE2D_DIV] = &c3_ge2d_div.hw,
+ [CLKID_GE2D] = &c3_ge2d.hw,
+ [CLKID_VAPB_SEL] = &c3_vapb_sel.hw,
+ [CLKID_VAPB_DIV] = &c3_vapb_div.hw,
+ [CLKID_VAPB] = &c3_vapb.hw,
+};
+
+static const struct meson_clkc_data c3_peripherals_clkc_data = {
+ .hw_clks = {
+ .hws = c3_peripherals_hw_clks,
+ .num = ARRAY_SIZE(c3_peripherals_hw_clks),
},
};
-static struct clk_regmap vapb_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = VAPB_CLK_CTRL,
- .shift = 0,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "vapb_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &vapb_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap vapb = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = VAPB_CLK_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "vapb",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &vapb_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_hw *c3_periphs_hw_clks[] = {
- [CLKID_RTC_XTAL_CLKIN] = &rtc_xtal_clkin.hw,
- [CLKID_RTC_32K_DIV] = &rtc_32k_div.hw,
- [CLKID_RTC_32K_MUX] = &rtc_32k_mux.hw,
- [CLKID_RTC_32K] = &rtc_32k.hw,
- [CLKID_RTC_CLK] = &rtc_clk.hw,
- [CLKID_SYS_RESET_CTRL] = &sys_reset_ctrl.hw,
- [CLKID_SYS_PWR_CTRL] = &sys_pwr_ctrl.hw,
- [CLKID_SYS_PAD_CTRL] = &sys_pad_ctrl.hw,
- [CLKID_SYS_CTRL] = &sys_ctrl.hw,
- [CLKID_SYS_TS_PLL] = &sys_ts_pll.hw,
- [CLKID_SYS_DEV_ARB] = &sys_dev_arb.hw,
- [CLKID_SYS_MMC_PCLK] = &sys_mmc_pclk.hw,
- [CLKID_SYS_CPU_CTRL] = &sys_cpu_ctrl.hw,
- [CLKID_SYS_JTAG_CTRL] = &sys_jtag_ctrl.hw,
- [CLKID_SYS_IR_CTRL] = &sys_ir_ctrl.hw,
- [CLKID_SYS_IRQ_CTRL] = &sys_irq_ctrl.hw,
- [CLKID_SYS_MSR_CLK] = &sys_msr_clk.hw,
- [CLKID_SYS_ROM] = &sys_rom.hw,
- [CLKID_SYS_UART_F] = &sys_uart_f.hw,
- [CLKID_SYS_CPU_ARB] = &sys_cpu_apb.hw,
- [CLKID_SYS_RSA] = &sys_rsa.hw,
- [CLKID_SYS_SAR_ADC] = &sys_sar_adc.hw,
- [CLKID_SYS_STARTUP] = &sys_startup.hw,
- [CLKID_SYS_SECURE] = &sys_secure.hw,
- [CLKID_SYS_SPIFC] = &sys_spifc.hw,
- [CLKID_SYS_NNA] = &sys_nna.hw,
- [CLKID_SYS_ETH_MAC] = &sys_eth_mac.hw,
- [CLKID_SYS_GIC] = &sys_gic.hw,
- [CLKID_SYS_RAMA] = &sys_rama.hw,
- [CLKID_SYS_BIG_NIC] = &sys_big_nic.hw,
- [CLKID_SYS_RAMB] = &sys_ramb.hw,
- [CLKID_SYS_AUDIO_PCLK] = &sys_audio_pclk.hw,
- [CLKID_SYS_PWM_KL] = &sys_pwm_kl.hw,
- [CLKID_SYS_PWM_IJ] = &sys_pwm_ij.hw,
- [CLKID_SYS_USB] = &sys_usb.hw,
- [CLKID_SYS_SD_EMMC_A] = &sys_sd_emmc_a.hw,
- [CLKID_SYS_SD_EMMC_C] = &sys_sd_emmc_c.hw,
- [CLKID_SYS_PWM_AB] = &sys_pwm_ab.hw,
- [CLKID_SYS_PWM_CD] = &sys_pwm_cd.hw,
- [CLKID_SYS_PWM_EF] = &sys_pwm_ef.hw,
- [CLKID_SYS_PWM_GH] = &sys_pwm_gh.hw,
- [CLKID_SYS_SPICC_1] = &sys_spicc_1.hw,
- [CLKID_SYS_SPICC_0] = &sys_spicc_0.hw,
- [CLKID_SYS_UART_A] = &sys_uart_a.hw,
- [CLKID_SYS_UART_B] = &sys_uart_b.hw,
- [CLKID_SYS_UART_C] = &sys_uart_c.hw,
- [CLKID_SYS_UART_D] = &sys_uart_d.hw,
- [CLKID_SYS_UART_E] = &sys_uart_e.hw,
- [CLKID_SYS_I2C_M_A] = &sys_i2c_m_a.hw,
- [CLKID_SYS_I2C_M_B] = &sys_i2c_m_b.hw,
- [CLKID_SYS_I2C_M_C] = &sys_i2c_m_c.hw,
- [CLKID_SYS_I2C_M_D] = &sys_i2c_m_d.hw,
- [CLKID_SYS_I2S_S_A] = &sys_i2c_s_a.hw,
- [CLKID_SYS_RTC] = &sys_rtc.hw,
- [CLKID_SYS_GE2D] = &sys_ge2d.hw,
- [CLKID_SYS_ISP] = &sys_isp.hw,
- [CLKID_SYS_GPV_ISP_NIC] = &sys_gpv_isp_nic.hw,
- [CLKID_SYS_GPV_CVE_NIC] = &sys_gpv_cve_nic.hw,
- [CLKID_SYS_MIPI_DSI_HOST] = &sys_mipi_dsi_host.hw,
- [CLKID_SYS_MIPI_DSI_PHY] = &sys_mipi_dsi_phy.hw,
- [CLKID_SYS_ETH_PHY] = &sys_eth_phy.hw,
- [CLKID_SYS_ACODEC] = &sys_acodec.hw,
- [CLKID_SYS_DWAP] = &sys_dwap.hw,
- [CLKID_SYS_DOS] = &sys_dos.hw,
- [CLKID_SYS_CVE] = &sys_cve.hw,
- [CLKID_SYS_VOUT] = &sys_vout.hw,
- [CLKID_SYS_VC9000E] = &sys_vc9000e.hw,
- [CLKID_SYS_PWM_MN] = &sys_pwm_mn.hw,
- [CLKID_SYS_SD_EMMC_B] = &sys_sd_emmc_b.hw,
- [CLKID_AXI_SYS_NIC] = &axi_sys_nic.hw,
- [CLKID_AXI_ISP_NIC] = &axi_isp_nic.hw,
- [CLKID_AXI_CVE_NIC] = &axi_cve_nic.hw,
- [CLKID_AXI_RAMB] = &axi_ramb.hw,
- [CLKID_AXI_RAMA] = &axi_rama.hw,
- [CLKID_AXI_CPU_DMC] = &axi_cpu_dmc.hw,
- [CLKID_AXI_NIC] = &axi_nic.hw,
- [CLKID_AXI_DMA] = &axi_dma.hw,
- [CLKID_AXI_MUX_NIC] = &axi_mux_nic.hw,
- [CLKID_AXI_CVE] = &axi_cve.hw,
- [CLKID_AXI_DEV1_DMC] = &axi_dev1_dmc.hw,
- [CLKID_AXI_DEV0_DMC] = &axi_dev0_dmc.hw,
- [CLKID_AXI_DSP_DMC] = &axi_dsp_dmc.hw,
- [CLKID_12_24M_IN] = &clk_12_24m_in.hw,
- [CLKID_12M_24M] = &clk_12_24m.hw,
- [CLKID_FCLK_25M_DIV] = &fclk_25m_div.hw,
- [CLKID_FCLK_25M] = &fclk_25m.hw,
- [CLKID_GEN_SEL] = &gen_sel.hw,
- [CLKID_GEN_DIV] = &gen_div.hw,
- [CLKID_GEN] = &gen.hw,
- [CLKID_SARADC_SEL] = &saradc_sel.hw,
- [CLKID_SARADC_DIV] = &saradc_div.hw,
- [CLKID_SARADC] = &saradc.hw,
- [CLKID_PWM_A_SEL] = &pwm_a_sel.hw,
- [CLKID_PWM_A_DIV] = &pwm_a_div.hw,
- [CLKID_PWM_A] = &pwm_a.hw,
- [CLKID_PWM_B_SEL] = &pwm_b_sel.hw,
- [CLKID_PWM_B_DIV] = &pwm_b_div.hw,
- [CLKID_PWM_B] = &pwm_b.hw,
- [CLKID_PWM_C_SEL] = &pwm_c_sel.hw,
- [CLKID_PWM_C_DIV] = &pwm_c_div.hw,
- [CLKID_PWM_C] = &pwm_c.hw,
- [CLKID_PWM_D_SEL] = &pwm_d_sel.hw,
- [CLKID_PWM_D_DIV] = &pwm_d_div.hw,
- [CLKID_PWM_D] = &pwm_d.hw,
- [CLKID_PWM_E_SEL] = &pwm_e_sel.hw,
- [CLKID_PWM_E_DIV] = &pwm_e_div.hw,
- [CLKID_PWM_E] = &pwm_e.hw,
- [CLKID_PWM_F_SEL] = &pwm_f_sel.hw,
- [CLKID_PWM_F_DIV] = &pwm_f_div.hw,
- [CLKID_PWM_F] = &pwm_f.hw,
- [CLKID_PWM_G_SEL] = &pwm_g_sel.hw,
- [CLKID_PWM_G_DIV] = &pwm_g_div.hw,
- [CLKID_PWM_G] = &pwm_g.hw,
- [CLKID_PWM_H_SEL] = &pwm_h_sel.hw,
- [CLKID_PWM_H_DIV] = &pwm_h_div.hw,
- [CLKID_PWM_H] = &pwm_h.hw,
- [CLKID_PWM_I_SEL] = &pwm_i_sel.hw,
- [CLKID_PWM_I_DIV] = &pwm_i_div.hw,
- [CLKID_PWM_I] = &pwm_i.hw,
- [CLKID_PWM_J_SEL] = &pwm_j_sel.hw,
- [CLKID_PWM_J_DIV] = &pwm_j_div.hw,
- [CLKID_PWM_J] = &pwm_j.hw,
- [CLKID_PWM_K_SEL] = &pwm_k_sel.hw,
- [CLKID_PWM_K_DIV] = &pwm_k_div.hw,
- [CLKID_PWM_K] = &pwm_k.hw,
- [CLKID_PWM_L_SEL] = &pwm_l_sel.hw,
- [CLKID_PWM_L_DIV] = &pwm_l_div.hw,
- [CLKID_PWM_L] = &pwm_l.hw,
- [CLKID_PWM_M_SEL] = &pwm_m_sel.hw,
- [CLKID_PWM_M_DIV] = &pwm_m_div.hw,
- [CLKID_PWM_M] = &pwm_m.hw,
- [CLKID_PWM_N_SEL] = &pwm_n_sel.hw,
- [CLKID_PWM_N_DIV] = &pwm_n_div.hw,
- [CLKID_PWM_N] = &pwm_n.hw,
- [CLKID_SPICC_A_SEL] = &spicc_a_sel.hw,
- [CLKID_SPICC_A_DIV] = &spicc_a_div.hw,
- [CLKID_SPICC_A] = &spicc_a.hw,
- [CLKID_SPICC_B_SEL] = &spicc_b_sel.hw,
- [CLKID_SPICC_B_DIV] = &spicc_b_div.hw,
- [CLKID_SPICC_B] = &spicc_b.hw,
- [CLKID_SPIFC_SEL] = &spifc_sel.hw,
- [CLKID_SPIFC_DIV] = &spifc_div.hw,
- [CLKID_SPIFC] = &spifc.hw,
- [CLKID_SD_EMMC_A_SEL] = &sd_emmc_a_sel.hw,
- [CLKID_SD_EMMC_A_DIV] = &sd_emmc_a_div.hw,
- [CLKID_SD_EMMC_A] = &sd_emmc_a.hw,
- [CLKID_SD_EMMC_B_SEL] = &sd_emmc_b_sel.hw,
- [CLKID_SD_EMMC_B_DIV] = &sd_emmc_b_div.hw,
- [CLKID_SD_EMMC_B] = &sd_emmc_b.hw,
- [CLKID_SD_EMMC_C_SEL] = &sd_emmc_c_sel.hw,
- [CLKID_SD_EMMC_C_DIV] = &sd_emmc_c_div.hw,
- [CLKID_SD_EMMC_C] = &sd_emmc_c.hw,
- [CLKID_TS_DIV] = &ts_div.hw,
- [CLKID_TS] = &ts.hw,
- [CLKID_ETH_125M_DIV] = &eth_125m_div.hw,
- [CLKID_ETH_125M] = &eth_125m.hw,
- [CLKID_ETH_RMII_DIV] = &eth_rmii_div.hw,
- [CLKID_ETH_RMII] = &eth_rmii.hw,
- [CLKID_MIPI_DSI_MEAS_SEL] = &mipi_dsi_meas_sel.hw,
- [CLKID_MIPI_DSI_MEAS_DIV] = &mipi_dsi_meas_div.hw,
- [CLKID_MIPI_DSI_MEAS] = &mipi_dsi_meas.hw,
- [CLKID_DSI_PHY_SEL] = &dsi_phy_sel.hw,
- [CLKID_DSI_PHY_DIV] = &dsi_phy_div.hw,
- [CLKID_DSI_PHY] = &dsi_phy.hw,
- [CLKID_VOUT_MCLK_SEL] = &vout_mclk_sel.hw,
- [CLKID_VOUT_MCLK_DIV] = &vout_mclk_div.hw,
- [CLKID_VOUT_MCLK] = &vout_mclk.hw,
- [CLKID_VOUT_ENC_SEL] = &vout_enc_sel.hw,
- [CLKID_VOUT_ENC_DIV] = &vout_enc_div.hw,
- [CLKID_VOUT_ENC] = &vout_enc.hw,
- [CLKID_HCODEC_0_SEL] = &hcodec_0_sel.hw,
- [CLKID_HCODEC_0_DIV] = &hcodec_0_div.hw,
- [CLKID_HCODEC_0] = &hcodec_0.hw,
- [CLKID_HCODEC_1_SEL] = &hcodec_1_sel.hw,
- [CLKID_HCODEC_1_DIV] = &hcodec_1_div.hw,
- [CLKID_HCODEC_1] = &hcodec_1.hw,
- [CLKID_HCODEC] = &hcodec.hw,
- [CLKID_VC9000E_ACLK_SEL] = &vc9000e_aclk_sel.hw,
- [CLKID_VC9000E_ACLK_DIV] = &vc9000e_aclk_div.hw,
- [CLKID_VC9000E_ACLK] = &vc9000e_aclk.hw,
- [CLKID_VC9000E_CORE_SEL] = &vc9000e_core_sel.hw,
- [CLKID_VC9000E_CORE_DIV] = &vc9000e_core_div.hw,
- [CLKID_VC9000E_CORE] = &vc9000e_core.hw,
- [CLKID_CSI_PHY0_SEL] = &csi_phy0_sel.hw,
- [CLKID_CSI_PHY0_DIV] = &csi_phy0_div.hw,
- [CLKID_CSI_PHY0] = &csi_phy0.hw,
- [CLKID_DEWARPA_SEL] = &dewarpa_sel.hw,
- [CLKID_DEWARPA_DIV] = &dewarpa_div.hw,
- [CLKID_DEWARPA] = &dewarpa.hw,
- [CLKID_ISP0_SEL] = &isp0_sel.hw,
- [CLKID_ISP0_DIV] = &isp0_div.hw,
- [CLKID_ISP0] = &isp0.hw,
- [CLKID_NNA_CORE_SEL] = &nna_core_sel.hw,
- [CLKID_NNA_CORE_DIV] = &nna_core_div.hw,
- [CLKID_NNA_CORE] = &nna_core.hw,
- [CLKID_GE2D_SEL] = &ge2d_sel.hw,
- [CLKID_GE2D_DIV] = &ge2d_div.hw,
- [CLKID_GE2D] = &ge2d.hw,
- [CLKID_VAPB_SEL] = &vapb_sel.hw,
- [CLKID_VAPB_DIV] = &vapb_div.hw,
- [CLKID_VAPB] = &vapb.hw,
-};
-
-/* Convenience table to populate regmap in .probe */
-static struct clk_regmap *const c3_periphs_clk_regmaps[] = {
- &rtc_xtal_clkin,
- &rtc_32k_div,
- &rtc_32k_mux,
- &rtc_32k,
- &rtc_clk,
- &sys_reset_ctrl,
- &sys_pwr_ctrl,
- &sys_pad_ctrl,
- &sys_ctrl,
- &sys_ts_pll,
- &sys_dev_arb,
- &sys_mmc_pclk,
- &sys_cpu_ctrl,
- &sys_jtag_ctrl,
- &sys_ir_ctrl,
- &sys_irq_ctrl,
- &sys_msr_clk,
- &sys_rom,
- &sys_uart_f,
- &sys_cpu_apb,
- &sys_rsa,
- &sys_sar_adc,
- &sys_startup,
- &sys_secure,
- &sys_spifc,
- &sys_nna,
- &sys_eth_mac,
- &sys_gic,
- &sys_rama,
- &sys_big_nic,
- &sys_ramb,
- &sys_audio_pclk,
- &sys_pwm_kl,
- &sys_pwm_ij,
- &sys_usb,
- &sys_sd_emmc_a,
- &sys_sd_emmc_c,
- &sys_pwm_ab,
- &sys_pwm_cd,
- &sys_pwm_ef,
- &sys_pwm_gh,
- &sys_spicc_1,
- &sys_spicc_0,
- &sys_uart_a,
- &sys_uart_b,
- &sys_uart_c,
- &sys_uart_d,
- &sys_uart_e,
- &sys_i2c_m_a,
- &sys_i2c_m_b,
- &sys_i2c_m_c,
- &sys_i2c_m_d,
- &sys_i2c_s_a,
- &sys_rtc,
- &sys_ge2d,
- &sys_isp,
- &sys_gpv_isp_nic,
- &sys_gpv_cve_nic,
- &sys_mipi_dsi_host,
- &sys_mipi_dsi_phy,
- &sys_eth_phy,
- &sys_acodec,
- &sys_dwap,
- &sys_dos,
- &sys_cve,
- &sys_vout,
- &sys_vc9000e,
- &sys_pwm_mn,
- &sys_sd_emmc_b,
- &axi_sys_nic,
- &axi_isp_nic,
- &axi_cve_nic,
- &axi_ramb,
- &axi_rama,
- &axi_cpu_dmc,
- &axi_nic,
- &axi_dma,
- &axi_mux_nic,
- &axi_cve,
- &axi_dev1_dmc,
- &axi_dev0_dmc,
- &axi_dsp_dmc,
- &clk_12_24m_in,
- &clk_12_24m,
- &fclk_25m_div,
- &fclk_25m,
- &gen_sel,
- &gen_div,
- &gen,
- &saradc_sel,
- &saradc_div,
- &saradc,
- &pwm_a_sel,
- &pwm_a_div,
- &pwm_a,
- &pwm_b_sel,
- &pwm_b_div,
- &pwm_b,
- &pwm_c_sel,
- &pwm_c_div,
- &pwm_c,
- &pwm_d_sel,
- &pwm_d_div,
- &pwm_d,
- &pwm_e_sel,
- &pwm_e_div,
- &pwm_e,
- &pwm_f_sel,
- &pwm_f_div,
- &pwm_f,
- &pwm_g_sel,
- &pwm_g_div,
- &pwm_g,
- &pwm_h_sel,
- &pwm_h_div,
- &pwm_h,
- &pwm_i_sel,
- &pwm_i_div,
- &pwm_i,
- &pwm_j_sel,
- &pwm_j_div,
- &pwm_j,
- &pwm_k_sel,
- &pwm_k_div,
- &pwm_k,
- &pwm_l_sel,
- &pwm_l_div,
- &pwm_l,
- &pwm_m_sel,
- &pwm_m_div,
- &pwm_m,
- &pwm_n_sel,
- &pwm_n_div,
- &pwm_n,
- &spicc_a_sel,
- &spicc_a_div,
- &spicc_a,
- &spicc_b_sel,
- &spicc_b_div,
- &spicc_b,
- &spifc_sel,
- &spifc_div,
- &spifc,
- &sd_emmc_a_sel,
- &sd_emmc_a_div,
- &sd_emmc_a,
- &sd_emmc_b_sel,
- &sd_emmc_b_div,
- &sd_emmc_b,
- &sd_emmc_c_sel,
- &sd_emmc_c_div,
- &sd_emmc_c,
- &ts_div,
- &ts,
- &eth_125m,
- &eth_rmii_div,
- &eth_rmii,
- &mipi_dsi_meas_sel,
- &mipi_dsi_meas_div,
- &mipi_dsi_meas,
- &dsi_phy_sel,
- &dsi_phy_div,
- &dsi_phy,
- &vout_mclk_sel,
- &vout_mclk_div,
- &vout_mclk,
- &vout_enc_sel,
- &vout_enc_div,
- &vout_enc,
- &hcodec_0_sel,
- &hcodec_0_div,
- &hcodec_0,
- &hcodec_1_sel,
- &hcodec_1_div,
- &hcodec_1,
- &hcodec,
- &vc9000e_aclk_sel,
- &vc9000e_aclk_div,
- &vc9000e_aclk,
- &vc9000e_core_sel,
- &vc9000e_core_div,
- &vc9000e_core,
- &csi_phy0_sel,
- &csi_phy0_div,
- &csi_phy0,
- &dewarpa_sel,
- &dewarpa_div,
- &dewarpa,
- &isp0_sel,
- &isp0_div,
- &isp0,
- &nna_core_sel,
- &nna_core_div,
- &nna_core,
- &ge2d_sel,
- &ge2d_div,
- &ge2d,
- &vapb_sel,
- &vapb_div,
- &vapb,
-};
-
-static const struct regmap_config clkc_regmap_config = {
- .reg_bits = 32,
- .val_bits = 32,
- .reg_stride = 4,
- .max_register = NNA_CLK_CTRL,
-};
-
-static struct meson_clk_hw_data c3_periphs_clks = {
- .hws = c3_periphs_hw_clks,
- .num = ARRAY_SIZE(c3_periphs_hw_clks),
-};
-
-static int c3_peripherals_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct regmap *regmap;
- void __iomem *base;
- int clkid, ret, i;
-
- base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(base))
- return PTR_ERR(base);
-
- regmap = devm_regmap_init_mmio(dev, base, &clkc_regmap_config);
- if (IS_ERR(regmap))
- return PTR_ERR(regmap);
-
- /* Populate regmap for the regmap backed clocks */
- for (i = 0; i < ARRAY_SIZE(c3_periphs_clk_regmaps); i++)
- c3_periphs_clk_regmaps[i]->map = regmap;
-
- for (clkid = 0; clkid < c3_periphs_clks.num; clkid++) {
- /* array might be sparse */
- if (!c3_periphs_clks.hws[clkid])
- continue;
-
- ret = devm_clk_hw_register(dev, c3_periphs_clks.hws[clkid]);
- if (ret) {
- dev_err(dev, "Clock registration failed\n");
- return ret;
- }
- }
-
- return devm_of_clk_add_hw_provider(dev, meson_clk_hw_get,
- &c3_periphs_clks);
-}
-
static const struct of_device_id c3_peripherals_clkc_match_table[] = {
{
.compatible = "amlogic,c3-peripherals-clkc",
+ .data = &c3_peripherals_clkc_data,
},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, c3_peripherals_clkc_match_table);
-static struct platform_driver c3_peripherals_driver = {
- .probe = c3_peripherals_probe,
+static struct platform_driver c3_peripherals_clkc_driver = {
+ .probe = meson_clkc_mmio_probe,
.driver = {
.name = "c3-peripherals-clkc",
.of_match_table = c3_peripherals_clkc_match_table,
},
};
-module_platform_driver(c3_peripherals_driver);
+module_platform_driver(c3_peripherals_clkc_driver);
MODULE_DESCRIPTION("Amlogic C3 Peripherals Clock Controller driver");
MODULE_AUTHOR("Chuan Liu <chuan.liu@amlogic.com>");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(CLK_MESON);
+MODULE_IMPORT_NS("CLK_MESON");
diff --git a/drivers/clk/meson/c3-pll.c b/drivers/clk/meson/c3-pll.c
index 32bd2ed9d304..dd047d17488c 100644
--- a/drivers/clk/meson/c3-pll.c
+++ b/drivers/clk/meson/c3-pll.c
@@ -34,7 +34,7 @@
#define ANACTRL_MPLL_CTRL3 0x18c
#define ANACTRL_MPLL_CTRL4 0x190
-static struct clk_regmap fclk_50m_en = {
+static struct clk_regmap c3_fclk_50m_en = {
.data = &(struct clk_regmap_gate_data) {
.offset = ANACTRL_FIXPLL_CTRL4,
.bit_idx = 0,
@@ -49,20 +49,20 @@ static struct clk_regmap fclk_50m_en = {
},
};
-static struct clk_fixed_factor fclk_50m = {
+static struct clk_fixed_factor c3_fclk_50m = {
.mult = 1,
.div = 40,
.hw.init = &(struct clk_init_data) {
.name = "fclk_50m",
.ops = &clk_fixed_factor_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fclk_50m_en.hw
+ &c3_fclk_50m_en.hw
},
.num_parents = 1,
},
};
-static struct clk_fixed_factor fclk_div2_div = {
+static struct clk_fixed_factor c3_fclk_div2_div = {
.mult = 1,
.div = 2,
.hw.init = &(struct clk_init_data) {
@@ -75,7 +75,7 @@ static struct clk_fixed_factor fclk_div2_div = {
},
};
-static struct clk_regmap fclk_div2 = {
+static struct clk_regmap c3_fclk_div2 = {
.data = &(struct clk_regmap_gate_data) {
.offset = ANACTRL_FIXPLL_CTRL4,
.bit_idx = 24,
@@ -84,13 +84,13 @@ static struct clk_regmap fclk_div2 = {
.name = "fclk_div2",
.ops = &clk_regmap_gate_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fclk_div2_div.hw
+ &c3_fclk_div2_div.hw
},
.num_parents = 1,
},
};
-static struct clk_fixed_factor fclk_div2p5_div = {
+static struct clk_fixed_factor c3_fclk_div2p5_div = {
.mult = 2,
.div = 5,
.hw.init = &(struct clk_init_data) {
@@ -103,7 +103,7 @@ static struct clk_fixed_factor fclk_div2p5_div = {
},
};
-static struct clk_regmap fclk_div2p5 = {
+static struct clk_regmap c3_fclk_div2p5 = {
.data = &(struct clk_regmap_gate_data) {
.offset = ANACTRL_FIXPLL_CTRL4,
.bit_idx = 4,
@@ -112,13 +112,13 @@ static struct clk_regmap fclk_div2p5 = {
.name = "fclk_div2p5",
.ops = &clk_regmap_gate_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fclk_div2p5_div.hw
+ &c3_fclk_div2p5_div.hw
},
.num_parents = 1,
},
};
-static struct clk_fixed_factor fclk_div3_div = {
+static struct clk_fixed_factor c3_fclk_div3_div = {
.mult = 1,
.div = 3,
.hw.init = &(struct clk_init_data) {
@@ -131,7 +131,7 @@ static struct clk_fixed_factor fclk_div3_div = {
},
};
-static struct clk_regmap fclk_div3 = {
+static struct clk_regmap c3_fclk_div3 = {
.data = &(struct clk_regmap_gate_data) {
.offset = ANACTRL_FIXPLL_CTRL4,
.bit_idx = 20,
@@ -140,13 +140,13 @@ static struct clk_regmap fclk_div3 = {
.name = "fclk_div3",
.ops = &clk_regmap_gate_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fclk_div3_div.hw
+ &c3_fclk_div3_div.hw
},
.num_parents = 1,
},
};
-static struct clk_fixed_factor fclk_div4_div = {
+static struct clk_fixed_factor c3_fclk_div4_div = {
.mult = 1,
.div = 4,
.hw.init = &(struct clk_init_data) {
@@ -159,7 +159,7 @@ static struct clk_fixed_factor fclk_div4_div = {
},
};
-static struct clk_regmap fclk_div4 = {
+static struct clk_regmap c3_fclk_div4 = {
.data = &(struct clk_regmap_gate_data) {
.offset = ANACTRL_FIXPLL_CTRL4,
.bit_idx = 21,
@@ -168,13 +168,13 @@ static struct clk_regmap fclk_div4 = {
.name = "fclk_div4",
.ops = &clk_regmap_gate_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fclk_div4_div.hw
+ &c3_fclk_div4_div.hw
},
.num_parents = 1,
},
};
-static struct clk_fixed_factor fclk_div5_div = {
+static struct clk_fixed_factor c3_fclk_div5_div = {
.mult = 1,
.div = 5,
.hw.init = &(struct clk_init_data) {
@@ -187,7 +187,7 @@ static struct clk_fixed_factor fclk_div5_div = {
},
};
-static struct clk_regmap fclk_div5 = {
+static struct clk_regmap c3_fclk_div5 = {
.data = &(struct clk_regmap_gate_data) {
.offset = ANACTRL_FIXPLL_CTRL4,
.bit_idx = 22,
@@ -196,13 +196,13 @@ static struct clk_regmap fclk_div5 = {
.name = "fclk_div5",
.ops = &clk_regmap_gate_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fclk_div5_div.hw
+ &c3_fclk_div5_div.hw
},
.num_parents = 1,
},
};
-static struct clk_fixed_factor fclk_div7_div = {
+static struct clk_fixed_factor c3_fclk_div7_div = {
.mult = 1,
.div = 7,
.hw.init = &(struct clk_init_data) {
@@ -215,7 +215,7 @@ static struct clk_fixed_factor fclk_div7_div = {
},
};
-static struct clk_regmap fclk_div7 = {
+static struct clk_regmap c3_fclk_div7 = {
.data = &(struct clk_regmap_gate_data) {
.offset = ANACTRL_FIXPLL_CTRL4,
.bit_idx = 23,
@@ -224,13 +224,13 @@ static struct clk_regmap fclk_div7 = {
.name = "fclk_div7",
.ops = &clk_regmap_gate_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fclk_div7_div.hw
+ &c3_fclk_div7_div.hw
},
.num_parents = 1,
},
};
-static const struct reg_sequence c3_gp0_init_regs[] = {
+static const struct reg_sequence c3_gp0_pll_init_regs[] = {
{ .reg = ANACTRL_GP0PLL_CTRL2, .def = 0x0 },
{ .reg = ANACTRL_GP0PLL_CTRL3, .def = 0x48681c00 },
{ .reg = ANACTRL_GP0PLL_CTRL4, .def = 0x88770290 },
@@ -243,7 +243,7 @@ static const struct pll_mult_range c3_gp0_pll_mult_range = {
.max = 250,
};
-static struct clk_regmap gp0_pll_dco = {
+static struct clk_regmap c3_gp0_pll_dco = {
.data = &(struct meson_clk_pll_data) {
.en = {
.reg_off = ANACTRL_GP0PLL_CTRL0,
@@ -276,8 +276,8 @@ static struct clk_regmap gp0_pll_dco = {
.width = 1,
},
.range = &c3_gp0_pll_mult_range,
- .init_regs = c3_gp0_init_regs,
- .init_count = ARRAY_SIZE(c3_gp0_init_regs),
+ .init_regs = c3_gp0_pll_init_regs,
+ .init_count = ARRAY_SIZE(c3_gp0_pll_init_regs),
},
.hw.init = &(struct clk_init_data) {
.name = "gp0_pll_dco",
@@ -300,7 +300,7 @@ static const struct clk_div_table c3_gp0_pll_od_table[] = {
{ /* sentinel */ }
};
-static struct clk_regmap gp0_pll = {
+static struct clk_regmap c3_gp0_pll = {
.data = &(struct clk_regmap_div_data) {
.offset = ANACTRL_GP0PLL_CTRL0,
.shift = 16,
@@ -311,14 +311,14 @@ static struct clk_regmap gp0_pll = {
.name = "gp0_pll",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &gp0_pll_dco.hw
+ &c3_gp0_pll_dco.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static const struct reg_sequence c3_hifi_init_regs[] = {
+static const struct reg_sequence c3_hifi_pll_init_regs[] = {
{ .reg = ANACTRL_HIFIPLL_CTRL2, .def = 0x0 },
{ .reg = ANACTRL_HIFIPLL_CTRL3, .def = 0x6a285c00 },
{ .reg = ANACTRL_HIFIPLL_CTRL4, .def = 0x65771290 },
@@ -326,7 +326,7 @@ static const struct reg_sequence c3_hifi_init_regs[] = {
{ .reg = ANACTRL_HIFIPLL_CTRL6, .def = 0x56540000 },
};
-static struct clk_regmap hifi_pll_dco = {
+static struct clk_regmap c3_hifi_pll_dco = {
.data = &(struct meson_clk_pll_data) {
.en = {
.reg_off = ANACTRL_HIFIPLL_CTRL0,
@@ -359,8 +359,9 @@ static struct clk_regmap hifi_pll_dco = {
.width = 1,
},
.range = &c3_gp0_pll_mult_range,
- .init_regs = c3_hifi_init_regs,
- .init_count = ARRAY_SIZE(c3_hifi_init_regs),
+ .init_regs = c3_hifi_pll_init_regs,
+ .init_count = ARRAY_SIZE(c3_hifi_pll_init_regs),
+ .frac_max = 100000,
},
.hw.init = &(struct clk_init_data) {
.name = "hifi_pll_dco",
@@ -372,7 +373,7 @@ static struct clk_regmap hifi_pll_dco = {
},
};
-static struct clk_regmap hifi_pll = {
+static struct clk_regmap c3_hifi_pll = {
.data = &(struct clk_regmap_div_data) {
.offset = ANACTRL_HIFIPLL_CTRL0,
.shift = 16,
@@ -383,14 +384,14 @@ static struct clk_regmap hifi_pll = {
.name = "hifi_pll",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &hifi_pll_dco.hw
+ &c3_hifi_pll_dco.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static const struct reg_sequence c3_mclk_init_regs[] = {
+static const struct reg_sequence c3_mclk_pll_init_regs[] = {
{ .reg = ANACTRL_MPLL_CTRL1, .def = 0x1420500f },
{ .reg = ANACTRL_MPLL_CTRL2, .def = 0x00023041 },
{ .reg = ANACTRL_MPLL_CTRL3, .def = 0x18180000 },
@@ -402,7 +403,7 @@ static const struct pll_mult_range c3_mclk_pll_mult_range = {
.max = 133,
};
-static struct clk_regmap mclk_pll_dco = {
+static struct clk_regmap c3_mclk_pll_dco = {
.data = &(struct meson_clk_pll_data) {
.en = {
.reg_off = ANACTRL_MPLL_CTRL0,
@@ -430,8 +431,8 @@ static struct clk_regmap mclk_pll_dco = {
.width = 1,
},
.range = &c3_mclk_pll_mult_range,
- .init_regs = c3_mclk_init_regs,
- .init_count = ARRAY_SIZE(c3_mclk_init_regs),
+ .init_regs = c3_mclk_pll_init_regs,
+ .init_count = ARRAY_SIZE(c3_mclk_pll_init_regs),
},
.hw.init = &(struct clk_init_data) {
.name = "mclk_pll_dco",
@@ -443,7 +444,7 @@ static struct clk_regmap mclk_pll_dco = {
},
};
-static const struct clk_div_table c3_mpll_od_table[] = {
+static const struct clk_div_table c3_mpll_pll_od_table[] = {
{ 0, 1 },
{ 1, 2 },
{ 2, 4 },
@@ -452,25 +453,25 @@ static const struct clk_div_table c3_mpll_od_table[] = {
{ /* sentinel */ }
};
-static struct clk_regmap mclk_pll_od = {
+static struct clk_regmap c3_mclk_pll_od = {
.data = &(struct clk_regmap_div_data) {
.offset = ANACTRL_MPLL_CTRL0,
.shift = 12,
.width = 3,
- .table = c3_mpll_od_table,
+ .table = c3_mpll_pll_od_table,
},
.hw.init = &(struct clk_init_data) {
.name = "mclk_pll_od",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &mclk_pll_dco.hw },
+ &c3_mclk_pll_dco.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
/* both value 0 and 1 gives divide the input rate by one */
-static struct clk_regmap mclk_pll = {
+static struct clk_regmap c3_mclk_pll = {
.data = &(struct clk_regmap_div_data) {
.offset = ANACTRL_MPLL_CTRL4,
.shift = 16,
@@ -481,20 +482,20 @@ static struct clk_regmap mclk_pll = {
.name = "mclk_pll",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &mclk_pll_od.hw
+ &c3_mclk_pll_od.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static const struct clk_parent_data mclk_parent[] = {
- { .hw = &mclk_pll.hw },
+static const struct clk_parent_data c3_mclk_parents[] = {
+ { .hw = &c3_mclk_pll.hw },
{ .fw_name = "mclk" },
- { .hw = &fclk_50m.hw }
+ { .hw = &c3_fclk_50m.hw }
};
-static struct clk_regmap mclk0_sel = {
+static struct clk_regmap c3_mclk0_sel = {
.data = &(struct clk_regmap_mux_data) {
.offset = ANACTRL_MPLL_CTRL4,
.mask = 0x3,
@@ -503,12 +504,12 @@ static struct clk_regmap mclk0_sel = {
.hw.init = &(struct clk_init_data) {
.name = "mclk0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = mclk_parent,
- .num_parents = ARRAY_SIZE(mclk_parent),
+ .parent_data = c3_mclk_parents,
+ .num_parents = ARRAY_SIZE(c3_mclk_parents),
},
};
-static struct clk_regmap mclk0_div_en = {
+static struct clk_regmap c3_mclk0_div_en = {
.data = &(struct clk_regmap_gate_data) {
.offset = ANACTRL_MPLL_CTRL4,
.bit_idx = 1,
@@ -517,14 +518,14 @@ static struct clk_regmap mclk0_div_en = {
.name = "mclk0_div_en",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &mclk0_sel.hw
+ &c3_mclk0_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap mclk0_div = {
+static struct clk_regmap c3_mclk0_div = {
.data = &(struct clk_regmap_div_data) {
.offset = ANACTRL_MPLL_CTRL4,
.shift = 2,
@@ -534,14 +535,14 @@ static struct clk_regmap mclk0_div = {
.name = "mclk0_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &mclk0_div_en.hw
+ &c3_mclk0_div_en.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap mclk0 = {
+static struct clk_regmap c3_mclk0 = {
.data = &(struct clk_regmap_gate_data) {
.offset = ANACTRL_MPLL_CTRL4,
.bit_idx = 0,
@@ -550,14 +551,14 @@ static struct clk_regmap mclk0 = {
.name = "mclk0",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &mclk0_div.hw
+ &c3_mclk0_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap mclk1_sel = {
+static struct clk_regmap c3_mclk1_sel = {
.data = &(struct clk_regmap_mux_data) {
.offset = ANACTRL_MPLL_CTRL4,
.mask = 0x3,
@@ -566,12 +567,12 @@ static struct clk_regmap mclk1_sel = {
.hw.init = &(struct clk_init_data) {
.name = "mclk1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = mclk_parent,
- .num_parents = ARRAY_SIZE(mclk_parent),
+ .parent_data = c3_mclk_parents,
+ .num_parents = ARRAY_SIZE(c3_mclk_parents),
},
};
-static struct clk_regmap mclk1_div_en = {
+static struct clk_regmap c3_mclk1_div_en = {
.data = &(struct clk_regmap_gate_data) {
.offset = ANACTRL_MPLL_CTRL4,
.bit_idx = 9,
@@ -580,14 +581,14 @@ static struct clk_regmap mclk1_div_en = {
.name = "mclk1_div_en",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &mclk1_sel.hw
+ &c3_mclk1_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap mclk1_div = {
+static struct clk_regmap c3_mclk1_div = {
.data = &(struct clk_regmap_div_data) {
.offset = ANACTRL_MPLL_CTRL4,
.shift = 10,
@@ -597,14 +598,14 @@ static struct clk_regmap mclk1_div = {
.name = "mclk1_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &mclk1_div_en.hw
+ &c3_mclk1_div_en.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap mclk1 = {
+static struct clk_regmap c3_mclk1 = {
.data = &(struct clk_regmap_gate_data) {
.offset = ANACTRL_MPLL_CTRL4,
.bit_idx = 8,
@@ -613,7 +614,7 @@ static struct clk_regmap mclk1 = {
.name = "mclk1",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &mclk1_div.hw
+ &c3_mclk1_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -621,128 +622,63 @@ static struct clk_regmap mclk1 = {
};
static struct clk_hw *c3_pll_hw_clks[] = {
- [CLKID_FCLK_50M_EN] = &fclk_50m_en.hw,
- [CLKID_FCLK_50M] = &fclk_50m.hw,
- [CLKID_FCLK_DIV2_DIV] = &fclk_div2_div.hw,
- [CLKID_FCLK_DIV2] = &fclk_div2.hw,
- [CLKID_FCLK_DIV2P5_DIV] = &fclk_div2p5_div.hw,
- [CLKID_FCLK_DIV2P5] = &fclk_div2p5.hw,
- [CLKID_FCLK_DIV3_DIV] = &fclk_div3_div.hw,
- [CLKID_FCLK_DIV3] = &fclk_div3.hw,
- [CLKID_FCLK_DIV4_DIV] = &fclk_div4_div.hw,
- [CLKID_FCLK_DIV4] = &fclk_div4.hw,
- [CLKID_FCLK_DIV5_DIV] = &fclk_div5_div.hw,
- [CLKID_FCLK_DIV5] = &fclk_div5.hw,
- [CLKID_FCLK_DIV7_DIV] = &fclk_div7_div.hw,
- [CLKID_FCLK_DIV7] = &fclk_div7.hw,
- [CLKID_GP0_PLL_DCO] = &gp0_pll_dco.hw,
- [CLKID_GP0_PLL] = &gp0_pll.hw,
- [CLKID_HIFI_PLL_DCO] = &hifi_pll_dco.hw,
- [CLKID_HIFI_PLL] = &hifi_pll.hw,
- [CLKID_MCLK_PLL_DCO] = &mclk_pll_dco.hw,
- [CLKID_MCLK_PLL_OD] = &mclk_pll_od.hw,
- [CLKID_MCLK_PLL] = &mclk_pll.hw,
- [CLKID_MCLK0_SEL] = &mclk0_sel.hw,
- [CLKID_MCLK0_SEL_EN] = &mclk0_div_en.hw,
- [CLKID_MCLK0_DIV] = &mclk0_div.hw,
- [CLKID_MCLK0] = &mclk0.hw,
- [CLKID_MCLK1_SEL] = &mclk1_sel.hw,
- [CLKID_MCLK1_SEL_EN] = &mclk1_div_en.hw,
- [CLKID_MCLK1_DIV] = &mclk1_div.hw,
- [CLKID_MCLK1] = &mclk1.hw
-};
-
-/* Convenience table to populate regmap in .probe */
-static struct clk_regmap *const c3_pll_clk_regmaps[] = {
- &fclk_50m_en,
- &fclk_div2,
- &fclk_div2p5,
- &fclk_div3,
- &fclk_div4,
- &fclk_div5,
- &fclk_div7,
- &gp0_pll_dco,
- &gp0_pll,
- &hifi_pll_dco,
- &hifi_pll,
- &mclk_pll_dco,
- &mclk_pll_od,
- &mclk_pll,
- &mclk0_sel,
- &mclk0_div_en,
- &mclk0_div,
- &mclk0,
- &mclk1_sel,
- &mclk1_div_en,
- &mclk1_div,
- &mclk1,
-};
-
-static const struct regmap_config clkc_regmap_config = {
- .reg_bits = 32,
- .val_bits = 32,
- .reg_stride = 4,
- .max_register = ANACTRL_MPLL_CTRL4,
-};
-
-static struct meson_clk_hw_data c3_pll_clks = {
- .hws = c3_pll_hw_clks,
- .num = ARRAY_SIZE(c3_pll_hw_clks),
-};
-
-static int c3_pll_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct regmap *regmap;
- void __iomem *base;
- int clkid, ret, i;
-
- base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(base))
- return PTR_ERR(base);
-
- regmap = devm_regmap_init_mmio(dev, base, &clkc_regmap_config);
- if (IS_ERR(regmap))
- return PTR_ERR(regmap);
-
- /* Populate regmap for the regmap backed clocks */
- for (i = 0; i < ARRAY_SIZE(c3_pll_clk_regmaps); i++)
- c3_pll_clk_regmaps[i]->map = regmap;
-
- for (clkid = 0; clkid < c3_pll_clks.num; clkid++) {
- /* array might be sparse */
- if (!c3_pll_clks.hws[clkid])
- continue;
-
- ret = devm_clk_hw_register(dev, c3_pll_clks.hws[clkid]);
- if (ret) {
- dev_err(dev, "Clock registration failed\n");
- return ret;
- }
- }
-
- return devm_of_clk_add_hw_provider(dev, meson_clk_hw_get,
- &c3_pll_clks);
-}
+ [CLKID_FCLK_50M_EN] = &c3_fclk_50m_en.hw,
+ [CLKID_FCLK_50M] = &c3_fclk_50m.hw,
+ [CLKID_FCLK_DIV2_DIV] = &c3_fclk_div2_div.hw,
+ [CLKID_FCLK_DIV2] = &c3_fclk_div2.hw,
+ [CLKID_FCLK_DIV2P5_DIV] = &c3_fclk_div2p5_div.hw,
+ [CLKID_FCLK_DIV2P5] = &c3_fclk_div2p5.hw,
+ [CLKID_FCLK_DIV3_DIV] = &c3_fclk_div3_div.hw,
+ [CLKID_FCLK_DIV3] = &c3_fclk_div3.hw,
+ [CLKID_FCLK_DIV4_DIV] = &c3_fclk_div4_div.hw,
+ [CLKID_FCLK_DIV4] = &c3_fclk_div4.hw,
+ [CLKID_FCLK_DIV5_DIV] = &c3_fclk_div5_div.hw,
+ [CLKID_FCLK_DIV5] = &c3_fclk_div5.hw,
+ [CLKID_FCLK_DIV7_DIV] = &c3_fclk_div7_div.hw,
+ [CLKID_FCLK_DIV7] = &c3_fclk_div7.hw,
+ [CLKID_GP0_PLL_DCO] = &c3_gp0_pll_dco.hw,
+ [CLKID_GP0_PLL] = &c3_gp0_pll.hw,
+ [CLKID_HIFI_PLL_DCO] = &c3_hifi_pll_dco.hw,
+ [CLKID_HIFI_PLL] = &c3_hifi_pll.hw,
+ [CLKID_MCLK_PLL_DCO] = &c3_mclk_pll_dco.hw,
+ [CLKID_MCLK_PLL_OD] = &c3_mclk_pll_od.hw,
+ [CLKID_MCLK_PLL] = &c3_mclk_pll.hw,
+ [CLKID_MCLK0_SEL] = &c3_mclk0_sel.hw,
+ [CLKID_MCLK0_SEL_EN] = &c3_mclk0_div_en.hw,
+ [CLKID_MCLK0_DIV] = &c3_mclk0_div.hw,
+ [CLKID_MCLK0] = &c3_mclk0.hw,
+ [CLKID_MCLK1_SEL] = &c3_mclk1_sel.hw,
+ [CLKID_MCLK1_SEL_EN] = &c3_mclk1_div_en.hw,
+ [CLKID_MCLK1_DIV] = &c3_mclk1_div.hw,
+ [CLKID_MCLK1] = &c3_mclk1.hw
+};
+
+static const struct meson_clkc_data c3_pll_clkc_data = {
+ .hw_clks = {
+ .hws = c3_pll_hw_clks,
+ .num = ARRAY_SIZE(c3_pll_hw_clks),
+ },
+};
static const struct of_device_id c3_pll_clkc_match_table[] = {
{
.compatible = "amlogic,c3-pll-clkc",
+ .data = &c3_pll_clkc_data,
},
{}
};
MODULE_DEVICE_TABLE(of, c3_pll_clkc_match_table);
-static struct platform_driver c3_pll_driver = {
- .probe = c3_pll_probe,
+static struct platform_driver c3_pll_clkc_driver = {
+ .probe = meson_clkc_mmio_probe,
.driver = {
.name = "c3-pll-clkc",
.of_match_table = c3_pll_clkc_match_table,
},
};
-module_platform_driver(c3_pll_driver);
+module_platform_driver(c3_pll_clkc_driver);
MODULE_DESCRIPTION("Amlogic C3 PLL Clock Controller driver");
MODULE_AUTHOR("Chuan Liu <chuan.liu@amlogic.com>");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(CLK_MESON);
+MODULE_IMPORT_NS("CLK_MESON");
diff --git a/drivers/clk/meson/clk-cpu-dyndiv.c b/drivers/clk/meson/clk-cpu-dyndiv.c
index 6c1f58826e24..83aedbfd2891 100644
--- a/drivers/clk/meson/clk-cpu-dyndiv.c
+++ b/drivers/clk/meson/clk-cpu-dyndiv.c
@@ -61,13 +61,14 @@ static int meson_clk_cpu_dyndiv_set_rate(struct clk_hw *hw, unsigned long rate,
};
const struct clk_ops meson_clk_cpu_dyndiv_ops = {
+ .init = clk_regmap_init,
.recalc_rate = meson_clk_cpu_dyndiv_recalc_rate,
.determine_rate = meson_clk_cpu_dyndiv_determine_rate,
.set_rate = meson_clk_cpu_dyndiv_set_rate,
};
-EXPORT_SYMBOL_NS_GPL(meson_clk_cpu_dyndiv_ops, CLK_MESON);
+EXPORT_SYMBOL_NS_GPL(meson_clk_cpu_dyndiv_ops, "CLK_MESON");
MODULE_DESCRIPTION("Amlogic CPU Dynamic Clock divider");
MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(CLK_MESON);
+MODULE_IMPORT_NS("CLK_MESON");
diff --git a/drivers/clk/meson/clk-dualdiv.c b/drivers/clk/meson/clk-dualdiv.c
index 913bf25d3771..787df6cdf841 100644
--- a/drivers/clk/meson/clk-dualdiv.c
+++ b/drivers/clk/meson/clk-dualdiv.c
@@ -126,19 +126,21 @@ static int meson_clk_dualdiv_set_rate(struct clk_hw *hw, unsigned long rate,
}
const struct clk_ops meson_clk_dualdiv_ops = {
+ .init = clk_regmap_init,
.recalc_rate = meson_clk_dualdiv_recalc_rate,
.determine_rate = meson_clk_dualdiv_determine_rate,
.set_rate = meson_clk_dualdiv_set_rate,
};
-EXPORT_SYMBOL_NS_GPL(meson_clk_dualdiv_ops, CLK_MESON);
+EXPORT_SYMBOL_NS_GPL(meson_clk_dualdiv_ops, "CLK_MESON");
const struct clk_ops meson_clk_dualdiv_ro_ops = {
+ .init = clk_regmap_init,
.recalc_rate = meson_clk_dualdiv_recalc_rate,
};
-EXPORT_SYMBOL_NS_GPL(meson_clk_dualdiv_ro_ops, CLK_MESON);
+EXPORT_SYMBOL_NS_GPL(meson_clk_dualdiv_ro_ops, "CLK_MESON");
MODULE_DESCRIPTION("Amlogic dual divider driver");
MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(CLK_MESON);
+MODULE_IMPORT_NS("CLK_MESON");
diff --git a/drivers/clk/meson/clk-mpll.c b/drivers/clk/meson/clk-mpll.c
index f639d56f0fd3..7f8dada66e16 100644
--- a/drivers/clk/meson/clk-mpll.c
+++ b/drivers/clk/meson/clk-mpll.c
@@ -112,26 +112,15 @@ static int mpll_set_rate(struct clk_hw *hw,
struct clk_regmap *clk = to_clk_regmap(hw);
struct meson_clk_mpll_data *mpll = meson_clk_mpll_data(clk);
unsigned int sdm, n2;
- unsigned long flags = 0;
params_from_rate(rate, parent_rate, &sdm, &n2, mpll->flags);
- if (mpll->lock)
- spin_lock_irqsave(mpll->lock, flags);
- else
- __acquire(mpll->lock);
-
/* Set the fractional part */
meson_parm_write(clk->map, &mpll->sdm, sdm);
/* Set the integer divider part */
meson_parm_write(clk->map, &mpll->n2, n2);
- if (mpll->lock)
- spin_unlock_irqrestore(mpll->lock, flags);
- else
- __release(mpll->lock);
-
return 0;
}
@@ -139,6 +128,11 @@ static int mpll_init(struct clk_hw *hw)
{
struct clk_regmap *clk = to_clk_regmap(hw);
struct meson_clk_mpll_data *mpll = meson_clk_mpll_data(clk);
+ int ret;
+
+ ret = clk_regmap_init(hw);
+ if (ret)
+ return ret;
if (mpll->init_count)
regmap_multi_reg_write(clk->map, mpll->init_regs,
@@ -162,10 +156,11 @@ static int mpll_init(struct clk_hw *hw)
}
const struct clk_ops meson_clk_mpll_ro_ops = {
+ .init = clk_regmap_init,
.recalc_rate = mpll_recalc_rate,
.determine_rate = mpll_determine_rate,
};
-EXPORT_SYMBOL_NS_GPL(meson_clk_mpll_ro_ops, CLK_MESON);
+EXPORT_SYMBOL_NS_GPL(meson_clk_mpll_ro_ops, "CLK_MESON");
const struct clk_ops meson_clk_mpll_ops = {
.recalc_rate = mpll_recalc_rate,
@@ -173,9 +168,9 @@ const struct clk_ops meson_clk_mpll_ops = {
.set_rate = mpll_set_rate,
.init = mpll_init,
};
-EXPORT_SYMBOL_NS_GPL(meson_clk_mpll_ops, CLK_MESON);
+EXPORT_SYMBOL_NS_GPL(meson_clk_mpll_ops, "CLK_MESON");
MODULE_DESCRIPTION("Amlogic MPLL driver");
MODULE_AUTHOR("Michael Turquette <mturquette@baylibre.com>");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(CLK_MESON);
+MODULE_IMPORT_NS("CLK_MESON");
diff --git a/drivers/clk/meson/clk-mpll.h b/drivers/clk/meson/clk-mpll.h
index a991d568c43a..4ffd3aeef799 100644
--- a/drivers/clk/meson/clk-mpll.h
+++ b/drivers/clk/meson/clk-mpll.h
@@ -20,7 +20,6 @@ struct meson_clk_mpll_data {
struct parm misc;
const struct reg_sequence *init_regs;
unsigned int init_count;
- spinlock_t *lock;
u8 flags;
};
diff --git a/drivers/clk/meson/clk-phase.c b/drivers/clk/meson/clk-phase.c
index c1526fbfb6c4..58dd982e6878 100644
--- a/drivers/clk/meson/clk-phase.c
+++ b/drivers/clk/meson/clk-phase.c
@@ -58,10 +58,11 @@ static int meson_clk_phase_set_phase(struct clk_hw *hw, int degrees)
}
const struct clk_ops meson_clk_phase_ops = {
+ .init = clk_regmap_init,
.get_phase = meson_clk_phase_get_phase,
.set_phase = meson_clk_phase_set_phase,
};
-EXPORT_SYMBOL_NS_GPL(meson_clk_phase_ops, CLK_MESON);
+EXPORT_SYMBOL_NS_GPL(meson_clk_phase_ops, "CLK_MESON");
/*
* This is a special clock for the audio controller.
@@ -83,6 +84,11 @@ static int meson_clk_triphase_sync(struct clk_hw *hw)
struct clk_regmap *clk = to_clk_regmap(hw);
struct meson_clk_triphase_data *tph = meson_clk_triphase_data(clk);
unsigned int val;
+ int ret;
+
+ ret = clk_regmap_init(hw);
+ if (ret)
+ return ret;
/* Get phase 0 and sync it to phase 1 and 2 */
val = meson_parm_read(clk->map, &tph->ph0);
@@ -123,7 +129,7 @@ const struct clk_ops meson_clk_triphase_ops = {
.get_phase = meson_clk_triphase_get_phase,
.set_phase = meson_clk_triphase_set_phase,
};
-EXPORT_SYMBOL_NS_GPL(meson_clk_triphase_ops, CLK_MESON);
+EXPORT_SYMBOL_NS_GPL(meson_clk_triphase_ops, "CLK_MESON");
/*
* This is a special clock for the audio controller.
@@ -142,6 +148,11 @@ static int meson_sclk_ws_inv_sync(struct clk_hw *hw)
struct clk_regmap *clk = to_clk_regmap(hw);
struct meson_sclk_ws_inv_data *tph = meson_sclk_ws_inv_data(clk);
unsigned int val;
+ int ret;
+
+ ret = clk_regmap_init(hw);
+ if (ret)
+ return ret;
/* Get phase and sync the inverted value to ws */
val = meson_parm_read(clk->map, &tph->ph);
@@ -178,9 +189,9 @@ const struct clk_ops meson_sclk_ws_inv_ops = {
.get_phase = meson_sclk_ws_inv_get_phase,
.set_phase = meson_sclk_ws_inv_set_phase,
};
-EXPORT_SYMBOL_NS_GPL(meson_sclk_ws_inv_ops, CLK_MESON);
+EXPORT_SYMBOL_NS_GPL(meson_sclk_ws_inv_ops, "CLK_MESON");
MODULE_DESCRIPTION("Amlogic phase driver");
MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(CLK_MESON);
+MODULE_IMPORT_NS("CLK_MESON");
diff --git a/drivers/clk/meson/clk-pll.c b/drivers/clk/meson/clk-pll.c
index bc570a2ff3a3..1ea6579a760f 100644
--- a/drivers/clk/meson/clk-pll.c
+++ b/drivers/clk/meson/clk-pll.c
@@ -57,12 +57,13 @@ static unsigned long __pll_params_to_rate(unsigned long parent_rate,
struct meson_clk_pll_data *pll)
{
u64 rate = (u64)parent_rate * m;
+ unsigned int frac_max = pll->frac_max ? pll->frac_max :
+ (1 << pll->frac.width);
if (frac && MESON_PARM_APPLICABLE(&pll->frac)) {
u64 frac_rate = (u64)parent_rate * frac;
- rate += DIV_ROUND_UP_ULL(frac_rate,
- (1 << pll->frac.width));
+ rate += DIV_ROUND_UP_ULL(frac_rate, frac_max);
}
return DIV_ROUND_UP_ULL(rate, n);
@@ -100,7 +101,8 @@ static unsigned int __pll_params_with_frac(unsigned long rate,
unsigned int n,
struct meson_clk_pll_data *pll)
{
- unsigned int frac_max = (1 << pll->frac.width);
+ unsigned int frac_max = pll->frac_max ? pll->frac_max :
+ (1 << pll->frac.width);
u64 val = (u64)rate * n;
/* Bail out if we are already over the requested rate */
@@ -309,6 +311,11 @@ static int meson_clk_pll_init(struct clk_hw *hw)
{
struct clk_regmap *clk = to_clk_regmap(hw);
struct meson_clk_pll_data *pll = meson_clk_pll_data(clk);
+ int ret;
+
+ ret = clk_regmap_init(hw);
+ if (ret)
+ return ret;
/*
* Keep the clock running, which was already initialized and enabled
@@ -466,13 +473,14 @@ static int meson_clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
* the other ops except set_rate since the rate is fixed.
*/
const struct clk_ops meson_clk_pcie_pll_ops = {
+ .init = clk_regmap_init,
.recalc_rate = meson_clk_pll_recalc_rate,
.determine_rate = meson_clk_pll_determine_rate,
.is_enabled = meson_clk_pll_is_enabled,
.enable = meson_clk_pcie_pll_enable,
.disable = meson_clk_pll_disable
};
-EXPORT_SYMBOL_NS_GPL(meson_clk_pcie_pll_ops, CLK_MESON);
+EXPORT_SYMBOL_NS_GPL(meson_clk_pcie_pll_ops, "CLK_MESON");
const struct clk_ops meson_clk_pll_ops = {
.init = meson_clk_pll_init,
@@ -483,16 +491,17 @@ const struct clk_ops meson_clk_pll_ops = {
.enable = meson_clk_pll_enable,
.disable = meson_clk_pll_disable
};
-EXPORT_SYMBOL_NS_GPL(meson_clk_pll_ops, CLK_MESON);
+EXPORT_SYMBOL_NS_GPL(meson_clk_pll_ops, "CLK_MESON");
const struct clk_ops meson_clk_pll_ro_ops = {
+ .init = clk_regmap_init,
.recalc_rate = meson_clk_pll_recalc_rate,
.is_enabled = meson_clk_pll_is_enabled,
};
-EXPORT_SYMBOL_NS_GPL(meson_clk_pll_ro_ops, CLK_MESON);
+EXPORT_SYMBOL_NS_GPL(meson_clk_pll_ro_ops, "CLK_MESON");
MODULE_DESCRIPTION("Amlogic PLL driver");
MODULE_AUTHOR("Carlo Caione <carlo@endlessm.com>");
MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(CLK_MESON);
+MODULE_IMPORT_NS("CLK_MESON");
diff --git a/drivers/clk/meson/clk-pll.h b/drivers/clk/meson/clk-pll.h
index 7b6b87274073..949157fb7bf5 100644
--- a/drivers/clk/meson/clk-pll.h
+++ b/drivers/clk/meson/clk-pll.h
@@ -43,6 +43,7 @@ struct meson_clk_pll_data {
unsigned int init_count;
const struct pll_params_table *table;
const struct pll_mult_range *range;
+ unsigned int frac_max;
u8 flags;
};
diff --git a/drivers/clk/meson/clk-regmap.c b/drivers/clk/meson/clk-regmap.c
index 07f7e441b916..1ed56fe63cae 100644
--- a/drivers/clk/meson/clk-regmap.c
+++ b/drivers/clk/meson/clk-regmap.c
@@ -4,9 +4,52 @@
* Author: Jerome Brunet <jbrunet@baylibre.com>
*/
+#include <linux/device.h>
#include <linux/module.h>
+#include <linux/mfd/syscon.h>
#include "clk-regmap.h"
+int clk_regmap_init(struct clk_hw *hw)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct device_node *np, *parent_np;
+ struct device *dev;
+
+ /* Allow regmap to be preset as it was historically done */
+ if (clk->map)
+ return 0;
+
+ /*
+ * FIXME: what follows couples the controller implementation
+ * and clk_regmap clock type. This situation is not desirable
+ * but temporary, until the controller is able to register
+ * a hook to initialize a clock type
+ */
+
+ /* Check the usual dev enabled controller with an basic IO regmap */
+ dev = clk_hw_get_dev(hw);
+ if (dev) {
+ clk->map = dev_get_regmap(dev, NULL);
+ if (clk->map)
+ return 0;
+ }
+
+ /* Move on to early and syscon based controllers */
+ np = clk_hw_get_of_node(hw);
+ if (np) {
+ parent_np = of_get_parent(np);
+ clk->map = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
+
+ if (!IS_ERR_OR_NULL(clk->map))
+ return 0;
+ }
+
+ /* Bail out if regmap can't be found */
+ return -EINVAL;
+}
+EXPORT_SYMBOL_NS_GPL(clk_regmap_init, "CLK_MESON");
+
static int clk_regmap_gate_endisable(struct clk_hw *hw, int enable)
{
struct clk_regmap *clk = to_clk_regmap(hw);
@@ -45,16 +88,18 @@ static int clk_regmap_gate_is_enabled(struct clk_hw *hw)
}
const struct clk_ops clk_regmap_gate_ops = {
+ .init = clk_regmap_init,
.enable = clk_regmap_gate_enable,
.disable = clk_regmap_gate_disable,
.is_enabled = clk_regmap_gate_is_enabled,
};
-EXPORT_SYMBOL_NS_GPL(clk_regmap_gate_ops, CLK_MESON);
+EXPORT_SYMBOL_NS_GPL(clk_regmap_gate_ops, "CLK_MESON");
const struct clk_ops clk_regmap_gate_ro_ops = {
+ .init = clk_regmap_init,
.is_enabled = clk_regmap_gate_is_enabled,
};
-EXPORT_SYMBOL_NS_GPL(clk_regmap_gate_ro_ops, CLK_MESON);
+EXPORT_SYMBOL_NS_GPL(clk_regmap_gate_ro_ops, "CLK_MESON");
static unsigned long clk_regmap_div_recalc_rate(struct clk_hw *hw,
unsigned long prate)
@@ -121,17 +166,19 @@ static int clk_regmap_div_set_rate(struct clk_hw *hw, unsigned long rate,
/* Would prefer clk_regmap_div_ro_ops but clashes with qcom */
const struct clk_ops clk_regmap_divider_ops = {
+ .init = clk_regmap_init,
.recalc_rate = clk_regmap_div_recalc_rate,
.determine_rate = clk_regmap_div_determine_rate,
.set_rate = clk_regmap_div_set_rate,
};
-EXPORT_SYMBOL_NS_GPL(clk_regmap_divider_ops, CLK_MESON);
+EXPORT_SYMBOL_NS_GPL(clk_regmap_divider_ops, "CLK_MESON");
const struct clk_ops clk_regmap_divider_ro_ops = {
+ .init = clk_regmap_init,
.recalc_rate = clk_regmap_div_recalc_rate,
.determine_rate = clk_regmap_div_determine_rate,
};
-EXPORT_SYMBOL_NS_GPL(clk_regmap_divider_ro_ops, CLK_MESON);
+EXPORT_SYMBOL_NS_GPL(clk_regmap_divider_ro_ops, "CLK_MESON");
static u8 clk_regmap_mux_get_parent(struct clk_hw *hw)
{
@@ -170,18 +217,20 @@ static int clk_regmap_mux_determine_rate(struct clk_hw *hw,
}
const struct clk_ops clk_regmap_mux_ops = {
+ .init = clk_regmap_init,
.get_parent = clk_regmap_mux_get_parent,
.set_parent = clk_regmap_mux_set_parent,
.determine_rate = clk_regmap_mux_determine_rate,
};
-EXPORT_SYMBOL_NS_GPL(clk_regmap_mux_ops, CLK_MESON);
+EXPORT_SYMBOL_NS_GPL(clk_regmap_mux_ops, "CLK_MESON");
const struct clk_ops clk_regmap_mux_ro_ops = {
+ .init = clk_regmap_init,
.get_parent = clk_regmap_mux_get_parent,
};
-EXPORT_SYMBOL_NS_GPL(clk_regmap_mux_ro_ops, CLK_MESON);
+EXPORT_SYMBOL_NS_GPL(clk_regmap_mux_ro_ops, "CLK_MESON");
MODULE_DESCRIPTION("Amlogic regmap backed clock driver");
MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(CLK_MESON);
+MODULE_IMPORT_NS("CLK_MESON");
diff --git a/drivers/clk/meson/clk-regmap.h b/drivers/clk/meson/clk-regmap.h
index e365312da54e..8e5c39b023e1 100644
--- a/drivers/clk/meson/clk-regmap.h
+++ b/drivers/clk/meson/clk-regmap.h
@@ -7,6 +7,7 @@
#ifndef __CLK_REGMAP_H
#define __CLK_REGMAP_H
+#include <linux/device.h>
#include <linux/clk-provider.h>
#include <linux/regmap.h>
@@ -31,6 +32,9 @@ static inline struct clk_regmap *to_clk_regmap(struct clk_hw *hw)
return container_of(hw, struct clk_regmap, hw);
}
+/* clk_regmap init op to get and cache regmap from the controllers */
+int clk_regmap_init(struct clk_hw *hw);
+
/**
* struct clk_regmap_gate_data - regmap backed gate specific data
*
@@ -114,24 +118,4 @@ clk_get_regmap_mux_data(struct clk_regmap *clk)
extern const struct clk_ops clk_regmap_mux_ops;
extern const struct clk_ops clk_regmap_mux_ro_ops;
-#define __MESON_PCLK(_name, _reg, _bit, _ops, _pname) \
-struct clk_regmap _name = { \
- .data = &(struct clk_regmap_gate_data){ \
- .offset = (_reg), \
- .bit_idx = (_bit), \
- }, \
- .hw.init = &(struct clk_init_data) { \
- .name = #_name, \
- .ops = _ops, \
- .parent_hws = (const struct clk_hw *[]) { _pname }, \
- .num_parents = 1, \
- .flags = (CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED), \
- }, \
-}
-
-#define MESON_PCLK(_name, _reg, _bit, _pname) \
- __MESON_PCLK(_name, _reg, _bit, &clk_regmap_gate_ops, _pname)
-
-#define MESON_PCLK_RO(_name, _reg, _bit, _pname) \
- __MESON_PCLK(_name, _reg, _bit, &clk_regmap_gate_ro_ops, _pname)
#endif /* __CLK_REGMAP_H */
diff --git a/drivers/clk/meson/g12a-aoclk.c b/drivers/clk/meson/g12a-aoclk.c
index f0a18d8c9fc2..96981da271fa 100644
--- a/drivers/clk/meson/g12a-aoclk.c
+++ b/drivers/clk/meson/g12a-aoclk.c
@@ -37,46 +37,38 @@
#define AO_RTC_ALT_CLK_CNTL0 0x94
#define AO_RTC_ALT_CLK_CNTL1 0x98
+static const struct clk_parent_data g12a_ao_pclk_parents = { .fw_name = "mpeg-clk" };
+
+#define G12A_AO_PCLK(_name, _reg, _bit, _flags) \
+ MESON_PCLK(g12a_ao_##_name, _reg, _bit, &g12a_ao_pclk_parents, _flags)
+
/*
- * Like every other peripheral clock gate in Amlogic Clock drivers,
- * we are using CLK_IGNORE_UNUSED here, so we keep the state of the
- * bootloader. The goal is to remove this flag at some point.
- * Actually removing it will require some extensive test to be done safely.
+ * NOTE: The gates below are marked with CLK_IGNORE_UNUSED for historic reasons
+ * Users are encouraged to test without it and submit changes to:
+ * - remove the flag if not necessary
+ * - replace the flag with something more adequate, such as CLK_IS_CRITICAL,
+ * if appropriate.
+ * - add a comment explaining why the use of CLK_IGNORE_UNUSED is desirable
+ * for a particular clock.
*/
-#define AXG_AO_GATE(_name, _reg, _bit) \
-static struct clk_regmap g12a_aoclk_##_name = { \
- .data = &(struct clk_regmap_gate_data) { \
- .offset = (_reg), \
- .bit_idx = (_bit), \
- }, \
- .hw.init = &(struct clk_init_data) { \
- .name = "g12a_ao_" #_name, \
- .ops = &clk_regmap_gate_ops, \
- .parent_data = &(const struct clk_parent_data) { \
- .fw_name = "mpeg-clk", \
- }, \
- .num_parents = 1, \
- .flags = CLK_IGNORE_UNUSED, \
- }, \
-}
+static G12A_AO_PCLK(ahb, AO_CLK_GATE0, 0, CLK_IGNORE_UNUSED);
+static G12A_AO_PCLK(ir_in, AO_CLK_GATE0, 1, CLK_IGNORE_UNUSED);
+static G12A_AO_PCLK(i2c_m0, AO_CLK_GATE0, 2, CLK_IGNORE_UNUSED);
+static G12A_AO_PCLK(i2c_s0, AO_CLK_GATE0, 3, CLK_IGNORE_UNUSED);
+static G12A_AO_PCLK(uart, AO_CLK_GATE0, 4, CLK_IGNORE_UNUSED);
+static G12A_AO_PCLK(prod_i2c, AO_CLK_GATE0, 5, CLK_IGNORE_UNUSED);
+static G12A_AO_PCLK(uart2, AO_CLK_GATE0, 6, CLK_IGNORE_UNUSED);
+static G12A_AO_PCLK(ir_out, AO_CLK_GATE0, 7, CLK_IGNORE_UNUSED);
+static G12A_AO_PCLK(saradc, AO_CLK_GATE0, 8, CLK_IGNORE_UNUSED);
-AXG_AO_GATE(ahb, AO_CLK_GATE0, 0);
-AXG_AO_GATE(ir_in, AO_CLK_GATE0, 1);
-AXG_AO_GATE(i2c_m0, AO_CLK_GATE0, 2);
-AXG_AO_GATE(i2c_s0, AO_CLK_GATE0, 3);
-AXG_AO_GATE(uart, AO_CLK_GATE0, 4);
-AXG_AO_GATE(prod_i2c, AO_CLK_GATE0, 5);
-AXG_AO_GATE(uart2, AO_CLK_GATE0, 6);
-AXG_AO_GATE(ir_out, AO_CLK_GATE0, 7);
-AXG_AO_GATE(saradc, AO_CLK_GATE0, 8);
-AXG_AO_GATE(mailbox, AO_CLK_GATE0_SP, 0);
-AXG_AO_GATE(m3, AO_CLK_GATE0_SP, 1);
-AXG_AO_GATE(ahb_sram, AO_CLK_GATE0_SP, 2);
-AXG_AO_GATE(rti, AO_CLK_GATE0_SP, 3);
-AXG_AO_GATE(m4_fclk, AO_CLK_GATE0_SP, 4);
-AXG_AO_GATE(m4_hclk, AO_CLK_GATE0_SP, 5);
+static G12A_AO_PCLK(mailbox, AO_CLK_GATE0_SP, 0, CLK_IGNORE_UNUSED);
+static G12A_AO_PCLK(m3, AO_CLK_GATE0_SP, 1, CLK_IGNORE_UNUSED);
+static G12A_AO_PCLK(ahb_sram, AO_CLK_GATE0_SP, 2, CLK_IGNORE_UNUSED);
+static G12A_AO_PCLK(rti, AO_CLK_GATE0_SP, 3, CLK_IGNORE_UNUSED);
+static G12A_AO_PCLK(m4_fclk, AO_CLK_GATE0_SP, 4, CLK_IGNORE_UNUSED);
+static G12A_AO_PCLK(m4_hclk, AO_CLK_GATE0_SP, 5, CLK_IGNORE_UNUSED);
-static struct clk_regmap g12a_aoclk_cts_oscin = {
+static struct clk_regmap g12a_ao_cts_oscin = {
.data = &(struct clk_regmap_gate_data){
.offset = AO_RTI_PWR_CNTL_REG0,
.bit_idx = 14,
@@ -103,22 +95,22 @@ static const struct meson_clk_dualdiv_param g12a_32k_div_table[] = {
/* 32k_by_oscin clock */
-static struct clk_regmap g12a_aoclk_32k_by_oscin_pre = {
+static struct clk_regmap g12a_ao_32k_by_oscin_pre = {
.data = &(struct clk_regmap_gate_data){
.offset = AO_RTC_ALT_CLK_CNTL0,
.bit_idx = 31,
},
.hw.init = &(struct clk_init_data){
- .name = "g12a_ao_32k_by_oscin_pre",
+ .name = "ao_32k_by_oscin_pre",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_aoclk_cts_oscin.hw
+ &g12a_ao_cts_oscin.hw
},
.num_parents = 1,
},
};
-static struct clk_regmap g12a_aoclk_32k_by_oscin_div = {
+static struct clk_regmap g12a_ao_32k_by_oscin_div = {
.data = &(struct meson_clk_dualdiv_data){
.n1 = {
.reg_off = AO_RTC_ALT_CLK_CNTL0,
@@ -148,16 +140,16 @@ static struct clk_regmap g12a_aoclk_32k_by_oscin_div = {
.table = g12a_32k_div_table,
},
.hw.init = &(struct clk_init_data){
- .name = "g12a_ao_32k_by_oscin_div",
+ .name = "ao_32k_by_oscin_div",
.ops = &meson_clk_dualdiv_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_aoclk_32k_by_oscin_pre.hw
+ &g12a_ao_32k_by_oscin_pre.hw
},
.num_parents = 1,
},
};
-static struct clk_regmap g12a_aoclk_32k_by_oscin_sel = {
+static struct clk_regmap g12a_ao_32k_by_oscin_sel = {
.data = &(struct clk_regmap_mux_data) {
.offset = AO_RTC_ALT_CLK_CNTL1,
.mask = 0x1,
@@ -165,27 +157,27 @@ static struct clk_regmap g12a_aoclk_32k_by_oscin_sel = {
.flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
- .name = "g12a_ao_32k_by_oscin_sel",
+ .name = "ao_32k_by_oscin_sel",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_aoclk_32k_by_oscin_div.hw,
- &g12a_aoclk_32k_by_oscin_pre.hw,
+ &g12a_ao_32k_by_oscin_div.hw,
+ &g12a_ao_32k_by_oscin_pre.hw,
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap g12a_aoclk_32k_by_oscin = {
+static struct clk_regmap g12a_ao_32k_by_oscin = {
.data = &(struct clk_regmap_gate_data){
.offset = AO_RTC_ALT_CLK_CNTL0,
.bit_idx = 30,
},
.hw.init = &(struct clk_init_data){
- .name = "g12a_ao_32k_by_oscin",
+ .name = "ao_32k_by_oscin",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_aoclk_32k_by_oscin_sel.hw
+ &g12a_ao_32k_by_oscin_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -194,22 +186,22 @@ static struct clk_regmap g12a_aoclk_32k_by_oscin = {
/* cec clock */
-static struct clk_regmap g12a_aoclk_cec_pre = {
+static struct clk_regmap g12a_ao_cec_pre = {
.data = &(struct clk_regmap_gate_data){
.offset = AO_CEC_CLK_CNTL_REG0,
.bit_idx = 31,
},
.hw.init = &(struct clk_init_data){
- .name = "g12a_ao_cec_pre",
+ .name = "ao_cec_pre",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_aoclk_cts_oscin.hw
+ &g12a_ao_cts_oscin.hw
},
.num_parents = 1,
},
};
-static struct clk_regmap g12a_aoclk_cec_div = {
+static struct clk_regmap g12a_ao_cec_div = {
.data = &(struct meson_clk_dualdiv_data){
.n1 = {
.reg_off = AO_CEC_CLK_CNTL_REG0,
@@ -239,16 +231,16 @@ static struct clk_regmap g12a_aoclk_cec_div = {
.table = g12a_32k_div_table,
},
.hw.init = &(struct clk_init_data){
- .name = "g12a_ao_cec_div",
+ .name = "ao_cec_div",
.ops = &meson_clk_dualdiv_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_aoclk_cec_pre.hw
+ &g12a_ao_cec_pre.hw
},
.num_parents = 1,
},
};
-static struct clk_regmap g12a_aoclk_cec_sel = {
+static struct clk_regmap g12a_ao_cec_sel = {
.data = &(struct clk_regmap_mux_data) {
.offset = AO_CEC_CLK_CNTL_REG1,
.mask = 0x1,
@@ -256,34 +248,34 @@ static struct clk_regmap g12a_aoclk_cec_sel = {
.flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
- .name = "g12a_ao_cec_sel",
+ .name = "ao_cec_sel",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_aoclk_cec_div.hw,
- &g12a_aoclk_cec_pre.hw,
+ &g12a_ao_cec_div.hw,
+ &g12a_ao_cec_pre.hw,
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap g12a_aoclk_cec = {
+static struct clk_regmap g12a_ao_cec = {
.data = &(struct clk_regmap_gate_data){
.offset = AO_CEC_CLK_CNTL_REG0,
.bit_idx = 30,
},
.hw.init = &(struct clk_init_data){
- .name = "g12a_ao_cec",
+ .name = "ao_cec",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_aoclk_cec_sel.hw
+ &g12a_ao_cec_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap g12a_aoclk_cts_rtc_oscin = {
+static struct clk_regmap g12a_ao_cts_rtc_oscin = {
.data = &(struct clk_regmap_mux_data) {
.offset = AO_RTI_PWR_CNTL_REG0,
.mask = 0x1,
@@ -291,10 +283,10 @@ static struct clk_regmap g12a_aoclk_cts_rtc_oscin = {
.flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
- .name = "g12a_ao_cts_rtc_oscin",
+ .name = "ao_cts_rtc_oscin",
.ops = &clk_regmap_mux_ops,
.parent_data = (const struct clk_parent_data []) {
- { .hw = &g12a_aoclk_32k_by_oscin.hw },
+ { .hw = &g12a_ao_32k_by_oscin.hw },
{ .fw_name = "ext-32k-0", },
},
.num_parents = 2,
@@ -302,7 +294,7 @@ static struct clk_regmap g12a_aoclk_cts_rtc_oscin = {
},
};
-static struct clk_regmap g12a_aoclk_clk81 = {
+static struct clk_regmap g12a_ao_clk81 = {
.data = &(struct clk_regmap_mux_data) {
.offset = AO_RTI_PWR_CNTL_REG0,
.mask = 0x1,
@@ -310,68 +302,74 @@ static struct clk_regmap g12a_aoclk_clk81 = {
.flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
+ /*
+ * NOTE: this is one of the infamous clock the pwm driver
+ * can request directly by its global name. It's wrong but
+ * there is not much we can do about it until the support
+ * for the old pwm bindings is dropped
+ */
.name = "g12a_ao_clk81",
.ops = &clk_regmap_mux_ro_ops,
.parent_data = (const struct clk_parent_data []) {
{ .fw_name = "mpeg-clk", },
- { .hw = &g12a_aoclk_cts_rtc_oscin.hw },
+ { .hw = &g12a_ao_cts_rtc_oscin.hw },
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap g12a_aoclk_saradc_mux = {
+static struct clk_regmap g12a_ao_saradc_mux = {
.data = &(struct clk_regmap_mux_data) {
.offset = AO_SAR_CLK,
.mask = 0x3,
.shift = 9,
},
.hw.init = &(struct clk_init_data){
- .name = "g12a_ao_saradc_mux",
+ .name = "ao_saradc_mux",
.ops = &clk_regmap_mux_ops,
.parent_data = (const struct clk_parent_data []) {
{ .fw_name = "xtal", },
- { .hw = &g12a_aoclk_clk81.hw },
+ { .hw = &g12a_ao_clk81.hw },
},
.num_parents = 2,
},
};
-static struct clk_regmap g12a_aoclk_saradc_div = {
+static struct clk_regmap g12a_ao_saradc_div = {
.data = &(struct clk_regmap_div_data) {
.offset = AO_SAR_CLK,
.shift = 0,
.width = 8,
},
.hw.init = &(struct clk_init_data){
- .name = "g12a_ao_saradc_div",
+ .name = "ao_saradc_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_aoclk_saradc_mux.hw
+ &g12a_ao_saradc_mux.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap g12a_aoclk_saradc_gate = {
+static struct clk_regmap g12a_ao_saradc_gate = {
.data = &(struct clk_regmap_gate_data) {
.offset = AO_SAR_CLK,
.bit_idx = 8,
},
.hw.init = &(struct clk_init_data){
- .name = "g12a_ao_saradc_gate",
+ .name = "ao_saradc_gate",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_aoclk_saradc_div.hw
+ &g12a_ao_saradc_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static const unsigned int g12a_aoclk_reset[] = {
+static const unsigned int g12a_ao_reset[] = {
[RESET_AO_IR_IN] = 16,
[RESET_AO_UART] = 17,
[RESET_AO_I2C_M] = 18,
@@ -381,100 +379,68 @@ static const unsigned int g12a_aoclk_reset[] = {
[RESET_AO_IR_OUT] = 23,
};
-static struct clk_regmap *g12a_aoclk_regmap[] = {
- &g12a_aoclk_ahb,
- &g12a_aoclk_ir_in,
- &g12a_aoclk_i2c_m0,
- &g12a_aoclk_i2c_s0,
- &g12a_aoclk_uart,
- &g12a_aoclk_prod_i2c,
- &g12a_aoclk_uart2,
- &g12a_aoclk_ir_out,
- &g12a_aoclk_saradc,
- &g12a_aoclk_mailbox,
- &g12a_aoclk_m3,
- &g12a_aoclk_ahb_sram,
- &g12a_aoclk_rti,
- &g12a_aoclk_m4_fclk,
- &g12a_aoclk_m4_hclk,
- &g12a_aoclk_cts_oscin,
- &g12a_aoclk_32k_by_oscin_pre,
- &g12a_aoclk_32k_by_oscin_div,
- &g12a_aoclk_32k_by_oscin_sel,
- &g12a_aoclk_32k_by_oscin,
- &g12a_aoclk_cec_pre,
- &g12a_aoclk_cec_div,
- &g12a_aoclk_cec_sel,
- &g12a_aoclk_cec,
- &g12a_aoclk_cts_rtc_oscin,
- &g12a_aoclk_clk81,
- &g12a_aoclk_saradc_mux,
- &g12a_aoclk_saradc_div,
- &g12a_aoclk_saradc_gate,
+static struct clk_hw *g12a_ao_hw_clks[] = {
+ [CLKID_AO_AHB] = &g12a_ao_ahb.hw,
+ [CLKID_AO_IR_IN] = &g12a_ao_ir_in.hw,
+ [CLKID_AO_I2C_M0] = &g12a_ao_i2c_m0.hw,
+ [CLKID_AO_I2C_S0] = &g12a_ao_i2c_s0.hw,
+ [CLKID_AO_UART] = &g12a_ao_uart.hw,
+ [CLKID_AO_PROD_I2C] = &g12a_ao_prod_i2c.hw,
+ [CLKID_AO_UART2] = &g12a_ao_uart2.hw,
+ [CLKID_AO_IR_OUT] = &g12a_ao_ir_out.hw,
+ [CLKID_AO_SAR_ADC] = &g12a_ao_saradc.hw,
+ [CLKID_AO_MAILBOX] = &g12a_ao_mailbox.hw,
+ [CLKID_AO_M3] = &g12a_ao_m3.hw,
+ [CLKID_AO_AHB_SRAM] = &g12a_ao_ahb_sram.hw,
+ [CLKID_AO_RTI] = &g12a_ao_rti.hw,
+ [CLKID_AO_M4_FCLK] = &g12a_ao_m4_fclk.hw,
+ [CLKID_AO_M4_HCLK] = &g12a_ao_m4_hclk.hw,
+ [CLKID_AO_CLK81] = &g12a_ao_clk81.hw,
+ [CLKID_AO_SAR_ADC_SEL] = &g12a_ao_saradc_mux.hw,
+ [CLKID_AO_SAR_ADC_DIV] = &g12a_ao_saradc_div.hw,
+ [CLKID_AO_SAR_ADC_CLK] = &g12a_ao_saradc_gate.hw,
+ [CLKID_AO_CTS_OSCIN] = &g12a_ao_cts_oscin.hw,
+ [CLKID_AO_32K_PRE] = &g12a_ao_32k_by_oscin_pre.hw,
+ [CLKID_AO_32K_DIV] = &g12a_ao_32k_by_oscin_div.hw,
+ [CLKID_AO_32K_SEL] = &g12a_ao_32k_by_oscin_sel.hw,
+ [CLKID_AO_32K] = &g12a_ao_32k_by_oscin.hw,
+ [CLKID_AO_CEC_PRE] = &g12a_ao_cec_pre.hw,
+ [CLKID_AO_CEC_DIV] = &g12a_ao_cec_div.hw,
+ [CLKID_AO_CEC_SEL] = &g12a_ao_cec_sel.hw,
+ [CLKID_AO_CEC] = &g12a_ao_cec.hw,
+ [CLKID_AO_CTS_RTC_OSCIN] = &g12a_ao_cts_rtc_oscin.hw,
};
-static struct clk_hw *g12a_aoclk_hw_clks[] = {
- [CLKID_AO_AHB] = &g12a_aoclk_ahb.hw,
- [CLKID_AO_IR_IN] = &g12a_aoclk_ir_in.hw,
- [CLKID_AO_I2C_M0] = &g12a_aoclk_i2c_m0.hw,
- [CLKID_AO_I2C_S0] = &g12a_aoclk_i2c_s0.hw,
- [CLKID_AO_UART] = &g12a_aoclk_uart.hw,
- [CLKID_AO_PROD_I2C] = &g12a_aoclk_prod_i2c.hw,
- [CLKID_AO_UART2] = &g12a_aoclk_uart2.hw,
- [CLKID_AO_IR_OUT] = &g12a_aoclk_ir_out.hw,
- [CLKID_AO_SAR_ADC] = &g12a_aoclk_saradc.hw,
- [CLKID_AO_MAILBOX] = &g12a_aoclk_mailbox.hw,
- [CLKID_AO_M3] = &g12a_aoclk_m3.hw,
- [CLKID_AO_AHB_SRAM] = &g12a_aoclk_ahb_sram.hw,
- [CLKID_AO_RTI] = &g12a_aoclk_rti.hw,
- [CLKID_AO_M4_FCLK] = &g12a_aoclk_m4_fclk.hw,
- [CLKID_AO_M4_HCLK] = &g12a_aoclk_m4_hclk.hw,
- [CLKID_AO_CLK81] = &g12a_aoclk_clk81.hw,
- [CLKID_AO_SAR_ADC_SEL] = &g12a_aoclk_saradc_mux.hw,
- [CLKID_AO_SAR_ADC_DIV] = &g12a_aoclk_saradc_div.hw,
- [CLKID_AO_SAR_ADC_CLK] = &g12a_aoclk_saradc_gate.hw,
- [CLKID_AO_CTS_OSCIN] = &g12a_aoclk_cts_oscin.hw,
- [CLKID_AO_32K_PRE] = &g12a_aoclk_32k_by_oscin_pre.hw,
- [CLKID_AO_32K_DIV] = &g12a_aoclk_32k_by_oscin_div.hw,
- [CLKID_AO_32K_SEL] = &g12a_aoclk_32k_by_oscin_sel.hw,
- [CLKID_AO_32K] = &g12a_aoclk_32k_by_oscin.hw,
- [CLKID_AO_CEC_PRE] = &g12a_aoclk_cec_pre.hw,
- [CLKID_AO_CEC_DIV] = &g12a_aoclk_cec_div.hw,
- [CLKID_AO_CEC_SEL] = &g12a_aoclk_cec_sel.hw,
- [CLKID_AO_CEC] = &g12a_aoclk_cec.hw,
- [CLKID_AO_CTS_RTC_OSCIN] = &g12a_aoclk_cts_rtc_oscin.hw,
-};
-
-static const struct meson_aoclk_data g12a_aoclkc_data = {
+static const struct meson_aoclk_data g12a_ao_clkc_data = {
.reset_reg = AO_RTI_GEN_CNTL_REG0,
- .num_reset = ARRAY_SIZE(g12a_aoclk_reset),
- .reset = g12a_aoclk_reset,
- .num_clks = ARRAY_SIZE(g12a_aoclk_regmap),
- .clks = g12a_aoclk_regmap,
- .hw_clks = {
- .hws = g12a_aoclk_hw_clks,
- .num = ARRAY_SIZE(g12a_aoclk_hw_clks),
+ .num_reset = ARRAY_SIZE(g12a_ao_reset),
+ .reset = g12a_ao_reset,
+ .clkc_data = {
+ .hw_clks = {
+ .hws = g12a_ao_hw_clks,
+ .num = ARRAY_SIZE(g12a_ao_hw_clks),
+ },
},
};
-static const struct of_device_id g12a_aoclkc_match_table[] = {
+static const struct of_device_id g12a_ao_clkc_match_table[] = {
{
.compatible = "amlogic,meson-g12a-aoclkc",
- .data = &g12a_aoclkc_data,
+ .data = &g12a_ao_clkc_data.clkc_data,
},
{ }
};
-MODULE_DEVICE_TABLE(of, g12a_aoclkc_match_table);
+MODULE_DEVICE_TABLE(of, g12a_ao_clkc_match_table);
-static struct platform_driver g12a_aoclkc_driver = {
+static struct platform_driver g12a_ao_clkc_driver = {
.probe = meson_aoclkc_probe,
.driver = {
.name = "g12a-aoclkc",
- .of_match_table = g12a_aoclkc_match_table,
+ .of_match_table = g12a_ao_clkc_match_table,
},
};
-module_platform_driver(g12a_aoclkc_driver);
+module_platform_driver(g12a_ao_clkc_driver);
MODULE_DESCRIPTION("Amlogic G12A Always-ON Clock Controller driver");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(CLK_MESON);
+MODULE_IMPORT_NS("CLK_MESON");
diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c
index 02dda57105b1..185b6348251d 100644
--- a/drivers/clk/meson/g12a.c
+++ b/drivers/clk/meson/g12a.c
@@ -23,12 +23,119 @@
#include "clk-cpu-dyndiv.h"
#include "vid-pll-div.h"
#include "vclk.h"
-#include "meson-eeclk.h"
-#include "g12a.h"
+#include "meson-clkc-utils.h"
#include <dt-bindings/clock/g12a-clkc.h>
-static DEFINE_SPINLOCK(meson_clk_lock);
+#define HHI_MIPI_CNTL0 0x000
+#define HHI_MIPI_CNTL1 0x004
+#define HHI_MIPI_CNTL2 0x008
+#define HHI_MIPI_STS 0x00c
+#define HHI_GP0_PLL_CNTL0 0x040
+#define HHI_GP0_PLL_CNTL1 0x044
+#define HHI_GP0_PLL_CNTL2 0x048
+#define HHI_GP0_PLL_CNTL3 0x04c
+#define HHI_GP0_PLL_CNTL4 0x050
+#define HHI_GP0_PLL_CNTL5 0x054
+#define HHI_GP0_PLL_CNTL6 0x058
+#define HHI_GP0_PLL_STS 0x05c
+#define HHI_GP1_PLL_CNTL0 0x060
+#define HHI_GP1_PLL_CNTL1 0x064
+#define HHI_GP1_PLL_CNTL2 0x068
+#define HHI_GP1_PLL_CNTL3 0x06c
+#define HHI_GP1_PLL_CNTL4 0x070
+#define HHI_GP1_PLL_CNTL5 0x074
+#define HHI_GP1_PLL_CNTL6 0x078
+#define HHI_GP1_PLL_STS 0x07c
+#define HHI_PCIE_PLL_CNTL0 0x098
+#define HHI_PCIE_PLL_CNTL1 0x09c
+#define HHI_PCIE_PLL_CNTL2 0x0a0
+#define HHI_PCIE_PLL_CNTL3 0x0a4
+#define HHI_PCIE_PLL_CNTL4 0x0a8
+#define HHI_PCIE_PLL_CNTL5 0x0ac
+#define HHI_PCIE_PLL_STS 0x0b8
+#define HHI_HIFI_PLL_CNTL0 0x0d8
+#define HHI_HIFI_PLL_CNTL1 0x0dc
+#define HHI_HIFI_PLL_CNTL2 0x0e0
+#define HHI_HIFI_PLL_CNTL3 0x0e4
+#define HHI_HIFI_PLL_CNTL4 0x0e8
+#define HHI_HIFI_PLL_CNTL5 0x0ec
+#define HHI_HIFI_PLL_CNTL6 0x0f0
+#define HHI_VIID_CLK_DIV 0x128
+#define HHI_VIID_CLK_CNTL 0x12c
+#define HHI_GCLK_MPEG0 0x140
+#define HHI_GCLK_MPEG1 0x144
+#define HHI_GCLK_MPEG2 0x148
+#define HHI_GCLK_OTHER 0x150
+#define HHI_GCLK_OTHER2 0x154
+#define HHI_SYS_CPU_CLK_CNTL1 0x15c
+#define HHI_VID_CLK_DIV 0x164
+#define HHI_MPEG_CLK_CNTL 0x174
+#define HHI_AUD_CLK_CNTL 0x178
+#define HHI_VID_CLK_CNTL 0x17c
+#define HHI_TS_CLK_CNTL 0x190
+#define HHI_VID_CLK_CNTL2 0x194
+#define HHI_SYS_CPU_CLK_CNTL0 0x19c
+#define HHI_VID_PLL_CLK_DIV 0x1a0
+#define HHI_MALI_CLK_CNTL 0x1b0
+#define HHI_VPU_CLKC_CNTL 0x1b4
+#define HHI_VPU_CLK_CNTL 0x1bc
+#define HHI_ISP_CLK_CNTL 0x1c0
+#define HHI_NNA_CLK_CNTL 0x1c8
+#define HHI_HDMI_CLK_CNTL 0x1cc
+#define HHI_VDEC_CLK_CNTL 0x1e0
+#define HHI_VDEC2_CLK_CNTL 0x1e4
+#define HHI_VDEC3_CLK_CNTL 0x1e8
+#define HHI_VDEC4_CLK_CNTL 0x1ec
+#define HHI_HDCP22_CLK_CNTL 0x1f0
+#define HHI_VAPBCLK_CNTL 0x1f4
+#define HHI_SYS_CPUB_CLK_CNTL1 0x200
+#define HHI_SYS_CPUB_CLK_CNTL 0x208
+#define HHI_VPU_CLKB_CNTL 0x20c
+#define HHI_SYS_CPU_CLK_CNTL2 0x210
+#define HHI_SYS_CPU_CLK_CNTL3 0x214
+#define HHI_SYS_CPU_CLK_CNTL4 0x218
+#define HHI_SYS_CPU_CLK_CNTL5 0x21c
+#define HHI_SYS_CPU_CLK_CNTL6 0x220
+#define HHI_GEN_CLK_CNTL 0x228
+#define HHI_VDIN_MEAS_CLK_CNTL 0x250
+#define HHI_MIPIDSI_PHY_CLK_CNTL 0x254
+#define HHI_NAND_CLK_CNTL 0x25c
+#define HHI_SD_EMMC_CLK_CNTL 0x264
+#define HHI_MPLL_CNTL0 0x278
+#define HHI_MPLL_CNTL1 0x27c
+#define HHI_MPLL_CNTL2 0x280
+#define HHI_MPLL_CNTL3 0x284
+#define HHI_MPLL_CNTL4 0x288
+#define HHI_MPLL_CNTL5 0x28c
+#define HHI_MPLL_CNTL6 0x290
+#define HHI_MPLL_CNTL7 0x294
+#define HHI_MPLL_CNTL8 0x298
+#define HHI_FIX_PLL_CNTL0 0x2a0
+#define HHI_FIX_PLL_CNTL1 0x2a4
+#define HHI_FIX_PLL_CNTL3 0x2ac
+#define HHI_SYS_PLL_CNTL0 0x2f4
+#define HHI_SYS_PLL_CNTL1 0x2f8
+#define HHI_SYS_PLL_CNTL2 0x2fc
+#define HHI_SYS_PLL_CNTL3 0x300
+#define HHI_SYS_PLL_CNTL4 0x304
+#define HHI_SYS_PLL_CNTL5 0x308
+#define HHI_SYS_PLL_CNTL6 0x30c
+#define HHI_HDMI_PLL_CNTL0 0x320
+#define HHI_HDMI_PLL_CNTL1 0x324
+#define HHI_HDMI_PLL_CNTL2 0x328
+#define HHI_HDMI_PLL_CNTL3 0x32c
+#define HHI_HDMI_PLL_CNTL4 0x330
+#define HHI_HDMI_PLL_CNTL5 0x334
+#define HHI_HDMI_PLL_CNTL6 0x338
+#define HHI_SPICC_CLK_CNTL 0x3dc
+#define HHI_SYS1_PLL_CNTL0 0x380
+#define HHI_SYS1_PLL_CNTL1 0x384
+#define HHI_SYS1_PLL_CNTL2 0x388
+#define HHI_SYS1_PLL_CNTL3 0x38c
+#define HHI_SYS1_PLL_CNTL4 0x390
+#define HHI_SYS1_PLL_CNTL5 0x394
+#define HHI_SYS1_PLL_CNTL6 0x398
static struct clk_regmap g12a_fixed_pll_dco = {
.data = &(struct meson_clk_pll_data){
@@ -279,6 +386,451 @@ static struct clk_fixed_factor g12b_sys1_pll_div16 = {
},
};
+static const struct pll_mult_range g12a_gp0_pll_mult_range = {
+ .min = 125,
+ .max = 255,
+};
+
+/*
+ * Internal gp0 pll emulation configuration parameters
+ */
+static const struct reg_sequence g12a_gp0_pll_init_regs[] = {
+ { .reg = HHI_GP0_PLL_CNTL1, .def = 0x00000000 },
+ { .reg = HHI_GP0_PLL_CNTL2, .def = 0x00000000 },
+ { .reg = HHI_GP0_PLL_CNTL3, .def = 0x48681c00 },
+ { .reg = HHI_GP0_PLL_CNTL4, .def = 0x33771290 },
+ { .reg = HHI_GP0_PLL_CNTL5, .def = 0x39272000 },
+ { .reg = HHI_GP0_PLL_CNTL6, .def = 0x56540000 },
+};
+
+static struct clk_regmap g12a_gp0_pll_dco = {
+ .data = &(struct meson_clk_pll_data){
+ .en = {
+ .reg_off = HHI_GP0_PLL_CNTL0,
+ .shift = 28,
+ .width = 1,
+ },
+ .m = {
+ .reg_off = HHI_GP0_PLL_CNTL0,
+ .shift = 0,
+ .width = 8,
+ },
+ .n = {
+ .reg_off = HHI_GP0_PLL_CNTL0,
+ .shift = 10,
+ .width = 5,
+ },
+ .frac = {
+ .reg_off = HHI_GP0_PLL_CNTL1,
+ .shift = 0,
+ .width = 17,
+ },
+ .l = {
+ .reg_off = HHI_GP0_PLL_CNTL0,
+ .shift = 31,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = HHI_GP0_PLL_CNTL0,
+ .shift = 29,
+ .width = 1,
+ },
+ .range = &g12a_gp0_pll_mult_range,
+ .init_regs = g12a_gp0_pll_init_regs,
+ .init_count = ARRAY_SIZE(g12a_gp0_pll_init_regs),
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "gp0_pll_dco",
+ .ops = &meson_clk_pll_ops,
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xtal",
+ },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_gp0_pll = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_GP0_PLL_CNTL0,
+ .shift = 16,
+ .width = 3,
+ .flags = (CLK_DIVIDER_POWER_OF_TWO |
+ CLK_DIVIDER_ROUND_CLOSEST),
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "gp0_pll",
+ .ops = &clk_regmap_divider_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_gp0_pll_dco.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap sm1_gp1_pll_dco = {
+ .data = &(struct meson_clk_pll_data){
+ .en = {
+ .reg_off = HHI_GP1_PLL_CNTL0,
+ .shift = 28,
+ .width = 1,
+ },
+ .m = {
+ .reg_off = HHI_GP1_PLL_CNTL0,
+ .shift = 0,
+ .width = 8,
+ },
+ .n = {
+ .reg_off = HHI_GP1_PLL_CNTL0,
+ .shift = 10,
+ .width = 5,
+ },
+ .frac = {
+ .reg_off = HHI_GP1_PLL_CNTL1,
+ .shift = 0,
+ .width = 17,
+ },
+ .l = {
+ .reg_off = HHI_GP1_PLL_CNTL0,
+ .shift = 31,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = HHI_GP1_PLL_CNTL0,
+ .shift = 29,
+ .width = 1,
+ },
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "gp1_pll_dco",
+ .ops = &meson_clk_pll_ro_ops,
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xtal",
+ },
+ .num_parents = 1,
+ /* This clock feeds the DSU, avoid disabling it */
+ .flags = CLK_IS_CRITICAL,
+ },
+};
+
+static struct clk_regmap sm1_gp1_pll = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_GP1_PLL_CNTL0,
+ .shift = 16,
+ .width = 3,
+ .flags = (CLK_DIVIDER_POWER_OF_TWO |
+ CLK_DIVIDER_ROUND_CLOSEST),
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "gp1_pll",
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &sm1_gp1_pll_dco.hw
+ },
+ .num_parents = 1,
+ },
+};
+
+/*
+ * Internal hifi pll emulation configuration parameters
+ */
+static const struct reg_sequence g12a_hifi_pll_init_regs[] = {
+ { .reg = HHI_HIFI_PLL_CNTL1, .def = 0x00000000 },
+ { .reg = HHI_HIFI_PLL_CNTL2, .def = 0x00000000 },
+ { .reg = HHI_HIFI_PLL_CNTL3, .def = 0x6a285c00 },
+ { .reg = HHI_HIFI_PLL_CNTL4, .def = 0x65771290 },
+ { .reg = HHI_HIFI_PLL_CNTL5, .def = 0x39272000 },
+ { .reg = HHI_HIFI_PLL_CNTL6, .def = 0x56540000 },
+};
+
+static struct clk_regmap g12a_hifi_pll_dco = {
+ .data = &(struct meson_clk_pll_data){
+ .en = {
+ .reg_off = HHI_HIFI_PLL_CNTL0,
+ .shift = 28,
+ .width = 1,
+ },
+ .m = {
+ .reg_off = HHI_HIFI_PLL_CNTL0,
+ .shift = 0,
+ .width = 8,
+ },
+ .n = {
+ .reg_off = HHI_HIFI_PLL_CNTL0,
+ .shift = 10,
+ .width = 5,
+ },
+ .frac = {
+ .reg_off = HHI_HIFI_PLL_CNTL1,
+ .shift = 0,
+ .width = 17,
+ },
+ .l = {
+ .reg_off = HHI_HIFI_PLL_CNTL0,
+ .shift = 31,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = HHI_HIFI_PLL_CNTL0,
+ .shift = 29,
+ .width = 1,
+ },
+ .range = &g12a_gp0_pll_mult_range,
+ .init_regs = g12a_hifi_pll_init_regs,
+ .init_count = ARRAY_SIZE(g12a_hifi_pll_init_regs),
+ .flags = CLK_MESON_PLL_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "hifi_pll_dco",
+ .ops = &meson_clk_pll_ops,
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xtal",
+ },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_hifi_pll = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_HIFI_PLL_CNTL0,
+ .shift = 16,
+ .width = 2,
+ .flags = (CLK_DIVIDER_POWER_OF_TWO |
+ CLK_DIVIDER_ROUND_CLOSEST),
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "hifi_pll",
+ .ops = &clk_regmap_divider_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_hifi_pll_dco.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+/*
+ * The Meson G12A PCIE PLL is fined tuned to deliver a very precise
+ * 100MHz reference clock for the PCIe Analog PHY, and thus requires
+ * a strict register sequence to enable the PLL.
+ */
+static const struct reg_sequence g12a_pcie_pll_init_regs[] = {
+ { .reg = HHI_PCIE_PLL_CNTL0, .def = 0x20090496 },
+ { .reg = HHI_PCIE_PLL_CNTL0, .def = 0x30090496 },
+ { .reg = HHI_PCIE_PLL_CNTL1, .def = 0x00000000 },
+ { .reg = HHI_PCIE_PLL_CNTL2, .def = 0x00001100 },
+ { .reg = HHI_PCIE_PLL_CNTL3, .def = 0x10058e00 },
+ { .reg = HHI_PCIE_PLL_CNTL4, .def = 0x000100c0 },
+ { .reg = HHI_PCIE_PLL_CNTL5, .def = 0x68000048 },
+ { .reg = HHI_PCIE_PLL_CNTL5, .def = 0x68000068, .delay_us = 20 },
+ { .reg = HHI_PCIE_PLL_CNTL4, .def = 0x008100c0, .delay_us = 10 },
+ { .reg = HHI_PCIE_PLL_CNTL0, .def = 0x34090496 },
+ { .reg = HHI_PCIE_PLL_CNTL0, .def = 0x14090496, .delay_us = 10 },
+ { .reg = HHI_PCIE_PLL_CNTL2, .def = 0x00001000 },
+};
+
+/* Keep a single entry table for recalc/round_rate() ops */
+static const struct pll_params_table g12a_pcie_pll_table[] = {
+ PLL_PARAMS(150, 1),
+ {0, 0},
+};
+
+static struct clk_regmap g12a_pcie_pll_dco = {
+ .data = &(struct meson_clk_pll_data){
+ .en = {
+ .reg_off = HHI_PCIE_PLL_CNTL0,
+ .shift = 28,
+ .width = 1,
+ },
+ .m = {
+ .reg_off = HHI_PCIE_PLL_CNTL0,
+ .shift = 0,
+ .width = 8,
+ },
+ .n = {
+ .reg_off = HHI_PCIE_PLL_CNTL0,
+ .shift = 10,
+ .width = 5,
+ },
+ .frac = {
+ .reg_off = HHI_PCIE_PLL_CNTL1,
+ .shift = 0,
+ .width = 12,
+ },
+ .l = {
+ .reg_off = HHI_PCIE_PLL_CNTL0,
+ .shift = 31,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = HHI_PCIE_PLL_CNTL0,
+ .shift = 29,
+ .width = 1,
+ },
+ .table = g12a_pcie_pll_table,
+ .init_regs = g12a_pcie_pll_init_regs,
+ .init_count = ARRAY_SIZE(g12a_pcie_pll_init_regs),
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "pcie_pll_dco",
+ .ops = &meson_clk_pcie_pll_ops,
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xtal",
+ },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor g12a_pcie_pll_dco_div2 = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "pcie_pll_dco_div2",
+ .ops = &clk_fixed_factor_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_pcie_pll_dco.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_pcie_pll_od = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_PCIE_PLL_CNTL0,
+ .shift = 16,
+ .width = 5,
+ .flags = CLK_DIVIDER_ROUND_CLOSEST |
+ CLK_DIVIDER_ONE_BASED |
+ CLK_DIVIDER_ALLOW_ZERO,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "pcie_pll_od",
+ .ops = &clk_regmap_divider_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_pcie_pll_dco_div2.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_fixed_factor g12a_pcie_pll = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "pcie_pll_pll",
+ .ops = &clk_fixed_factor_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_pcie_pll_od.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_hdmi_pll_dco = {
+ .data = &(struct meson_clk_pll_data){
+ .en = {
+ .reg_off = HHI_HDMI_PLL_CNTL0,
+ .shift = 28,
+ .width = 1,
+ },
+ .m = {
+ .reg_off = HHI_HDMI_PLL_CNTL0,
+ .shift = 0,
+ .width = 8,
+ },
+ .n = {
+ .reg_off = HHI_HDMI_PLL_CNTL0,
+ .shift = 10,
+ .width = 5,
+ },
+ .frac = {
+ .reg_off = HHI_HDMI_PLL_CNTL1,
+ .shift = 0,
+ .width = 16,
+ },
+ .l = {
+ .reg_off = HHI_HDMI_PLL_CNTL0,
+ .shift = 30,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = HHI_HDMI_PLL_CNTL0,
+ .shift = 29,
+ .width = 1,
+ },
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "hdmi_pll_dco",
+ .ops = &meson_clk_pll_ro_ops,
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xtal",
+ },
+ .num_parents = 1,
+ /*
+ * Display directly handle hdmi pll registers ATM, we need
+ * NOCACHE to keep our view of the clock as accurate as possible
+ */
+ .flags = CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_regmap g12a_hdmi_pll_od = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_HDMI_PLL_CNTL0,
+ .shift = 16,
+ .width = 2,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "hdmi_pll_od",
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_hdmi_pll_dco.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_hdmi_pll_od2 = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_HDMI_PLL_CNTL0,
+ .shift = 18,
+ .width = 2,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "hdmi_pll_od2",
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_hdmi_pll_od.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_hdmi_pll = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_HDMI_PLL_CNTL0,
+ .shift = 20,
+ .width = 2,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "hdmi_pll",
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_hdmi_pll_od2.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT,
+ },
+};
+
static struct clk_fixed_factor g12a_fclk_div2_div = {
.mult = 1,
.div = 2,
@@ -352,36 +904,166 @@ static struct clk_regmap g12a_fclk_div3 = {
},
};
-/* Datasheet names this field as "premux0" */
-static struct clk_regmap g12a_cpu_clk_premux0 = {
+
+static struct clk_fixed_factor g12a_fclk_div4_div = {
+ .mult = 1,
+ .div = 4,
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div4_div",
+ .ops = &clk_fixed_factor_ops,
+ .parent_hws = (const struct clk_hw *[]) { &g12a_fixed_pll.hw },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_fclk_div4 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_FIX_PLL_CNTL1,
+ .bit_idx = 21,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div4",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_fclk_div4_div.hw
+ },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor g12a_fclk_div5_div = {
+ .mult = 1,
+ .div = 5,
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div5_div",
+ .ops = &clk_fixed_factor_ops,
+ .parent_hws = (const struct clk_hw *[]) { &g12a_fixed_pll.hw },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_fclk_div5 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_FIX_PLL_CNTL1,
+ .bit_idx = 22,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div5",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_fclk_div5_div.hw
+ },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor g12a_fclk_div7_div = {
+ .mult = 1,
+ .div = 7,
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div7_div",
+ .ops = &clk_fixed_factor_ops,
+ .parent_hws = (const struct clk_hw *[]) { &g12a_fixed_pll.hw },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_fclk_div7 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_FIX_PLL_CNTL1,
+ .bit_idx = 23,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div7",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_fclk_div7_div.hw
+ },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor g12a_fclk_div2p5_div = {
+ .mult = 1,
+ .div = 5,
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div2p5_div",
+ .ops = &clk_fixed_factor_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_fixed_pll_dco.hw
+ },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_fclk_div2p5 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_FIX_PLL_CNTL1,
+ .bit_idx = 25,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div2p5",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_fclk_div2p5_div.hw
+ },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor g12a_mpll_50m_div = {
+ .mult = 1,
+ .div = 80,
+ .hw.init = &(struct clk_init_data){
+ .name = "mpll_50m_div",
+ .ops = &clk_fixed_factor_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_fixed_pll_dco.hw
+ },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_mpll_50m = {
.data = &(struct clk_regmap_mux_data){
- .offset = HHI_SYS_CPU_CLK_CNTL0,
- .mask = 0x3,
- .shift = 0,
- .flags = CLK_MUX_ROUND_CLOSEST,
+ .offset = HHI_FIX_PLL_CNTL3,
+ .mask = 0x1,
+ .shift = 5,
},
.hw.init = &(struct clk_init_data){
- .name = "cpu_clk_dyn0_sel",
- .ops = &clk_regmap_mux_ops,
+ .name = "mpll_50m",
+ .ops = &clk_regmap_mux_ro_ops,
.parent_data = (const struct clk_parent_data []) {
{ .fw_name = "xtal", },
- { .hw = &g12a_fclk_div2.hw },
- { .hw = &g12a_fclk_div3.hw },
+ { .hw = &g12a_mpll_50m_div.hw },
},
- .num_parents = 3,
- .flags = CLK_SET_RATE_PARENT,
+ .num_parents = 2,
},
};
-/* Datasheet names this field as "premux1" */
-static struct clk_regmap g12a_cpu_clk_premux1 = {
+static struct clk_fixed_factor g12a_mpll_prediv = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "mpll_prediv",
+ .ops = &clk_fixed_factor_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_fixed_pll_dco.hw
+ },
+ .num_parents = 1,
+ },
+};
+
+/* Datasheet names this field as "premux0" */
+static struct clk_regmap g12a_cpu_clk_dyn0_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPU_CLK_CNTL0,
.mask = 0x3,
- .shift = 16,
+ .shift = 0,
+ .flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
- .name = "cpu_clk_dyn1_sel",
+ .name = "cpu_clk_dyn0_sel",
.ops = &clk_regmap_mux_ops,
.parent_data = (const struct clk_parent_data []) {
{ .fw_name = "xtal", },
@@ -389,13 +1071,12 @@ static struct clk_regmap g12a_cpu_clk_premux1 = {
{ .hw = &g12a_fclk_div3.hw },
},
.num_parents = 3,
- /* This sub-tree is used a parking clock */
- .flags = CLK_SET_RATE_NO_REPARENT
+ .flags = CLK_SET_RATE_PARENT,
},
};
/* Datasheet names this field as "mux0_divn_tcnt" */
-static struct clk_regmap g12a_cpu_clk_mux0_div = {
+static struct clk_regmap g12a_cpu_clk_dyn0_div = {
.data = &(struct meson_clk_cpu_dyndiv_data){
.div = {
.reg_off = HHI_SYS_CPU_CLK_CNTL0,
@@ -412,7 +1093,7 @@ static struct clk_regmap g12a_cpu_clk_mux0_div = {
.name = "cpu_clk_dyn0_div",
.ops = &meson_clk_cpu_dyndiv_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_cpu_clk_premux0.hw
+ &g12a_cpu_clk_dyn0_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -420,7 +1101,7 @@ static struct clk_regmap g12a_cpu_clk_mux0_div = {
};
/* Datasheet names this field as "postmux0" */
-static struct clk_regmap g12a_cpu_clk_postmux0 = {
+static struct clk_regmap g12a_cpu_clk_dyn0 = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPU_CLK_CNTL0,
.mask = 0x1,
@@ -431,16 +1112,37 @@ static struct clk_regmap g12a_cpu_clk_postmux0 = {
.name = "cpu_clk_dyn0",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_cpu_clk_premux0.hw,
- &g12a_cpu_clk_mux0_div.hw,
+ &g12a_cpu_clk_dyn0_sel.hw,
+ &g12a_cpu_clk_dyn0_div.hw,
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
+/* Datasheet names this field as "premux1" */
+static struct clk_regmap g12a_cpu_clk_dyn1_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_SYS_CPU_CLK_CNTL0,
+ .mask = 0x3,
+ .shift = 16,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "cpu_clk_dyn1_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_data = (const struct clk_parent_data []) {
+ { .fw_name = "xtal", },
+ { .hw = &g12a_fclk_div2.hw },
+ { .hw = &g12a_fclk_div3.hw },
+ },
+ .num_parents = 3,
+ /* This sub-tree is used a parking clock */
+ .flags = CLK_SET_RATE_NO_REPARENT
+ },
+};
+
/* Datasheet names this field as "Mux1_divn_tcnt" */
-static struct clk_regmap g12a_cpu_clk_mux1_div = {
+static struct clk_regmap g12a_cpu_clk_dyn1_div = {
.data = &(struct clk_regmap_div_data){
.offset = HHI_SYS_CPU_CLK_CNTL0,
.shift = 20,
@@ -450,14 +1152,14 @@ static struct clk_regmap g12a_cpu_clk_mux1_div = {
.name = "cpu_clk_dyn1_div",
.ops = &clk_regmap_divider_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_cpu_clk_premux1.hw
+ &g12a_cpu_clk_dyn1_sel.hw
},
.num_parents = 1,
},
};
/* Datasheet names this field as "postmux1" */
-static struct clk_regmap g12a_cpu_clk_postmux1 = {
+static struct clk_regmap g12a_cpu_clk_dyn1 = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPU_CLK_CNTL0,
.mask = 0x1,
@@ -467,8 +1169,8 @@ static struct clk_regmap g12a_cpu_clk_postmux1 = {
.name = "cpu_clk_dyn1",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_cpu_clk_premux1.hw,
- &g12a_cpu_clk_mux1_div.hw,
+ &g12a_cpu_clk_dyn1_sel.hw,
+ &g12a_cpu_clk_dyn1_div.hw,
},
.num_parents = 2,
/* This sub-tree is used a parking clock */
@@ -488,8 +1190,8 @@ static struct clk_regmap g12a_cpu_clk_dyn = {
.name = "cpu_clk_dyn",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_cpu_clk_postmux0.hw,
- &g12a_cpu_clk_postmux1.hw,
+ &g12a_cpu_clk_dyn0.hw,
+ &g12a_cpu_clk_dyn1.hw,
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
@@ -537,7 +1239,7 @@ static struct clk_regmap g12b_cpu_clk = {
};
/* Datasheet names this field as "premux0" */
-static struct clk_regmap g12b_cpub_clk_premux0 = {
+static struct clk_regmap g12b_cpub_clk_dyn0_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPUB_CLK_CNTL,
.mask = 0x3,
@@ -558,7 +1260,7 @@ static struct clk_regmap g12b_cpub_clk_premux0 = {
};
/* Datasheet names this field as "mux0_divn_tcnt" */
-static struct clk_regmap g12b_cpub_clk_mux0_div = {
+static struct clk_regmap g12b_cpub_clk_dyn0_div = {
.data = &(struct meson_clk_cpu_dyndiv_data){
.div = {
.reg_off = HHI_SYS_CPUB_CLK_CNTL,
@@ -575,7 +1277,7 @@ static struct clk_regmap g12b_cpub_clk_mux0_div = {
.name = "cpub_clk_dyn0_div",
.ops = &meson_clk_cpu_dyndiv_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12b_cpub_clk_premux0.hw
+ &g12b_cpub_clk_dyn0_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -583,7 +1285,7 @@ static struct clk_regmap g12b_cpub_clk_mux0_div = {
};
/* Datasheet names this field as "postmux0" */
-static struct clk_regmap g12b_cpub_clk_postmux0 = {
+static struct clk_regmap g12b_cpub_clk_dyn0 = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPUB_CLK_CNTL,
.mask = 0x1,
@@ -594,8 +1296,8 @@ static struct clk_regmap g12b_cpub_clk_postmux0 = {
.name = "cpub_clk_dyn0",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12b_cpub_clk_premux0.hw,
- &g12b_cpub_clk_mux0_div.hw
+ &g12b_cpub_clk_dyn0_sel.hw,
+ &g12b_cpub_clk_dyn0_div.hw
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
@@ -603,7 +1305,7 @@ static struct clk_regmap g12b_cpub_clk_postmux0 = {
};
/* Datasheet names this field as "premux1" */
-static struct clk_regmap g12b_cpub_clk_premux1 = {
+static struct clk_regmap g12b_cpub_clk_dyn1_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPUB_CLK_CNTL,
.mask = 0x3,
@@ -624,7 +1326,7 @@ static struct clk_regmap g12b_cpub_clk_premux1 = {
};
/* Datasheet names this field as "Mux1_divn_tcnt" */
-static struct clk_regmap g12b_cpub_clk_mux1_div = {
+static struct clk_regmap g12b_cpub_clk_dyn1_div = {
.data = &(struct clk_regmap_div_data){
.offset = HHI_SYS_CPUB_CLK_CNTL,
.shift = 20,
@@ -634,14 +1336,14 @@ static struct clk_regmap g12b_cpub_clk_mux1_div = {
.name = "cpub_clk_dyn1_div",
.ops = &clk_regmap_divider_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12b_cpub_clk_premux1.hw
+ &g12b_cpub_clk_dyn1_sel.hw
},
.num_parents = 1,
},
};
/* Datasheet names this field as "postmux1" */
-static struct clk_regmap g12b_cpub_clk_postmux1 = {
+static struct clk_regmap g12b_cpub_clk_dyn1 = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPUB_CLK_CNTL,
.mask = 0x1,
@@ -651,8 +1353,8 @@ static struct clk_regmap g12b_cpub_clk_postmux1 = {
.name = "cpub_clk_dyn1",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12b_cpub_clk_premux1.hw,
- &g12b_cpub_clk_mux1_div.hw
+ &g12b_cpub_clk_dyn1_sel.hw,
+ &g12b_cpub_clk_dyn1_div.hw
},
.num_parents = 2,
/* This sub-tree is used a parking clock */
@@ -672,8 +1374,8 @@ static struct clk_regmap g12b_cpub_clk_dyn = {
.name = "cpub_clk_dyn",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12b_cpub_clk_postmux0.hw,
- &g12b_cpub_clk_postmux1.hw
+ &g12b_cpub_clk_dyn0.hw,
+ &g12b_cpub_clk_dyn1.hw
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
@@ -700,10 +1402,8 @@ static struct clk_regmap g12b_cpub_clk = {
},
};
-static struct clk_regmap sm1_gp1_pll;
-
/* Datasheet names this field as "premux0" */
-static struct clk_regmap sm1_dsu_clk_premux0 = {
+static struct clk_regmap sm1_dsu_clk_dyn0_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPU_CLK_CNTL5,
.mask = 0x3,
@@ -722,28 +1422,8 @@ static struct clk_regmap sm1_dsu_clk_premux0 = {
},
};
-/* Datasheet names this field as "premux1" */
-static struct clk_regmap sm1_dsu_clk_premux1 = {
- .data = &(struct clk_regmap_mux_data){
- .offset = HHI_SYS_CPU_CLK_CNTL5,
- .mask = 0x3,
- .shift = 16,
- },
- .hw.init = &(struct clk_init_data){
- .name = "dsu_clk_dyn1_sel",
- .ops = &clk_regmap_mux_ro_ops,
- .parent_data = (const struct clk_parent_data []) {
- { .fw_name = "xtal", },
- { .hw = &g12a_fclk_div2.hw },
- { .hw = &g12a_fclk_div3.hw },
- { .hw = &sm1_gp1_pll.hw },
- },
- .num_parents = 4,
- },
-};
-
/* Datasheet names this field as "Mux0_divn_tcnt" */
-static struct clk_regmap sm1_dsu_clk_mux0_div = {
+static struct clk_regmap sm1_dsu_clk_dyn0_div = {
.data = &(struct clk_regmap_div_data){
.offset = HHI_SYS_CPU_CLK_CNTL5,
.shift = 4,
@@ -753,14 +1433,14 @@ static struct clk_regmap sm1_dsu_clk_mux0_div = {
.name = "dsu_clk_dyn0_div",
.ops = &clk_regmap_divider_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &sm1_dsu_clk_premux0.hw
+ &sm1_dsu_clk_dyn0_sel.hw
},
.num_parents = 1,
},
};
/* Datasheet names this field as "postmux0" */
-static struct clk_regmap sm1_dsu_clk_postmux0 = {
+static struct clk_regmap sm1_dsu_clk_dyn0 = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPU_CLK_CNTL5,
.mask = 0x1,
@@ -770,15 +1450,35 @@ static struct clk_regmap sm1_dsu_clk_postmux0 = {
.name = "dsu_clk_dyn0",
.ops = &clk_regmap_mux_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &sm1_dsu_clk_premux0.hw,
- &sm1_dsu_clk_mux0_div.hw,
+ &sm1_dsu_clk_dyn0_sel.hw,
+ &sm1_dsu_clk_dyn0_div.hw,
},
.num_parents = 2,
},
};
+/* Datasheet names this field as "premux1" */
+static struct clk_regmap sm1_dsu_clk_dyn1_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_SYS_CPU_CLK_CNTL5,
+ .mask = 0x3,
+ .shift = 16,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "dsu_clk_dyn1_sel",
+ .ops = &clk_regmap_mux_ro_ops,
+ .parent_data = (const struct clk_parent_data []) {
+ { .fw_name = "xtal", },
+ { .hw = &g12a_fclk_div2.hw },
+ { .hw = &g12a_fclk_div3.hw },
+ { .hw = &sm1_gp1_pll.hw },
+ },
+ .num_parents = 4,
+ },
+};
+
/* Datasheet names this field as "Mux1_divn_tcnt" */
-static struct clk_regmap sm1_dsu_clk_mux1_div = {
+static struct clk_regmap sm1_dsu_clk_dyn1_div = {
.data = &(struct clk_regmap_div_data){
.offset = HHI_SYS_CPU_CLK_CNTL5,
.shift = 20,
@@ -788,14 +1488,14 @@ static struct clk_regmap sm1_dsu_clk_mux1_div = {
.name = "dsu_clk_dyn1_div",
.ops = &clk_regmap_divider_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &sm1_dsu_clk_premux1.hw
+ &sm1_dsu_clk_dyn1_sel.hw
},
.num_parents = 1,
},
};
/* Datasheet names this field as "postmux1" */
-static struct clk_regmap sm1_dsu_clk_postmux1 = {
+static struct clk_regmap sm1_dsu_clk_dyn1 = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPU_CLK_CNTL5,
.mask = 0x1,
@@ -805,8 +1505,8 @@ static struct clk_regmap sm1_dsu_clk_postmux1 = {
.name = "dsu_clk_dyn1",
.ops = &clk_regmap_mux_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &sm1_dsu_clk_premux1.hw,
- &sm1_dsu_clk_mux1_div.hw,
+ &sm1_dsu_clk_dyn1_sel.hw,
+ &sm1_dsu_clk_dyn1_div.hw,
},
.num_parents = 2,
},
@@ -823,8 +1523,8 @@ static struct clk_regmap sm1_dsu_clk_dyn = {
.name = "dsu_clk_dyn",
.ops = &clk_regmap_mux_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &sm1_dsu_clk_postmux0.hw,
- &sm1_dsu_clk_postmux1.hw,
+ &sm1_dsu_clk_dyn0.hw,
+ &sm1_dsu_clk_dyn1.hw,
},
.num_parents = 2,
},
@@ -936,7 +1636,7 @@ static struct notifier_block g12a_cpu_clk_mux_nb = {
.notifier_call = g12a_cpu_clk_mux_notifier_cb,
};
-struct g12a_cpu_clk_postmux_nb_data {
+struct g12a_cpu_clk_dyn_nb_data {
struct notifier_block nb;
struct clk_hw *xtal;
struct clk_hw *cpu_clk_dyn;
@@ -945,33 +1645,33 @@ struct g12a_cpu_clk_postmux_nb_data {
struct clk_hw *cpu_clk_premux1;
};
-static int g12a_cpu_clk_postmux_notifier_cb(struct notifier_block *nb,
- unsigned long event, void *data)
+static int g12a_cpu_clk_dyn_notifier_cb(struct notifier_block *nb,
+ unsigned long event, void *data)
{
- struct g12a_cpu_clk_postmux_nb_data *nb_data =
- container_of(nb, struct g12a_cpu_clk_postmux_nb_data, nb);
+ struct g12a_cpu_clk_dyn_nb_data *nb_data =
+ container_of(nb, struct g12a_cpu_clk_dyn_nb_data, nb);
switch (event) {
case PRE_RATE_CHANGE:
/*
- * This notifier means cpu_clk_postmux0 clock will be changed
+ * This notifier means cpu_clk_dyn0 clock will be changed
* to feed cpu_clk, this is the current path :
* cpu_clk
* \- cpu_clk_dyn
- * \- cpu_clk_postmux0
- * \- cpu_clk_muxX_div
- * \- cpu_clk_premux0
+ * \- cpu_clk_dyn0
+ * \- cpu_clk_dyn0_div
+ * \- cpu_clk_dyn0_sel
* \- fclk_div3 or fclk_div2
* OR
- * \- cpu_clk_premux0
+ * \- cpu_clk_dyn0_sel
* \- fclk_div3 or fclk_div2
*/
- /* Setup cpu_clk_premux1 to xtal */
+ /* Setup cpu_clk_dyn1_sel to xtal */
clk_hw_set_parent(nb_data->cpu_clk_premux1,
nb_data->xtal);
- /* Setup cpu_clk_postmux1 to bypass divider */
+ /* Setup cpu_clk_dyn1 to bypass divider */
clk_hw_set_parent(nb_data->cpu_clk_postmux1,
nb_data->cpu_clk_premux1);
@@ -983,8 +1683,8 @@ static int g12a_cpu_clk_postmux_notifier_cb(struct notifier_block *nb,
* Now, cpu_clk is 24MHz in the current path :
* cpu_clk
* \- cpu_clk_dyn
- * \- cpu_clk_postmux1
- * \- cpu_clk_premux1
+ * \- cpu_clk_dyn1
+ * \- cpu_clk_dyn1_sel
* \- xtal
*/
@@ -994,8 +1694,8 @@ static int g12a_cpu_clk_postmux_notifier_cb(struct notifier_block *nb,
case POST_RATE_CHANGE:
/*
- * The cpu_clk_postmux0 has ben updated, now switch back
- * cpu_clk_dyn to cpu_clk_postmux0 and take the changes
+ * The cpu_clk_dyn0 has ben updated, now switch back
+ * cpu_clk_dyn to cpu_clk_dyn0 and take the changes
* in account.
*/
@@ -1007,12 +1707,12 @@ static int g12a_cpu_clk_postmux_notifier_cb(struct notifier_block *nb,
* new path :
* cpu_clk
* \- cpu_clk_dyn
- * \- cpu_clk_postmux0
- * \- cpu_clk_muxX_div
- * \- cpu_clk_premux0
+ * \- cpu_clk_dyn0
+ * \- cpu_clk_dyn0_div
+ * \- cpu_clk_dyn0_sel
* \- fclk_div3 or fclk_div2
* OR
- * \- cpu_clk_premux0
+ * \- cpu_clk_dyn0_sel
* \- fclk_div3 or fclk_div2
*/
@@ -1025,20 +1725,20 @@ static int g12a_cpu_clk_postmux_notifier_cb(struct notifier_block *nb,
}
}
-static struct g12a_cpu_clk_postmux_nb_data g12a_cpu_clk_postmux0_nb_data = {
+static struct g12a_cpu_clk_dyn_nb_data g12a_cpu_clk_dyn0_nb_data = {
.cpu_clk_dyn = &g12a_cpu_clk_dyn.hw,
- .cpu_clk_postmux0 = &g12a_cpu_clk_postmux0.hw,
- .cpu_clk_postmux1 = &g12a_cpu_clk_postmux1.hw,
- .cpu_clk_premux1 = &g12a_cpu_clk_premux1.hw,
- .nb.notifier_call = g12a_cpu_clk_postmux_notifier_cb,
+ .cpu_clk_postmux0 = &g12a_cpu_clk_dyn0.hw,
+ .cpu_clk_postmux1 = &g12a_cpu_clk_dyn1.hw,
+ .cpu_clk_premux1 = &g12a_cpu_clk_dyn1_sel.hw,
+ .nb.notifier_call = g12a_cpu_clk_dyn_notifier_cb,
};
-static struct g12a_cpu_clk_postmux_nb_data g12b_cpub_clk_postmux0_nb_data = {
+static struct g12a_cpu_clk_dyn_nb_data g12b_cpub_clk_dyn0_nb_data = {
.cpu_clk_dyn = &g12b_cpub_clk_dyn.hw,
- .cpu_clk_postmux0 = &g12b_cpub_clk_postmux0.hw,
- .cpu_clk_postmux1 = &g12b_cpub_clk_postmux1.hw,
- .cpu_clk_premux1 = &g12b_cpub_clk_premux1.hw,
- .nb.notifier_call = g12a_cpu_clk_postmux_notifier_cb,
+ .cpu_clk_postmux0 = &g12b_cpub_clk_dyn0.hw,
+ .cpu_clk_postmux1 = &g12b_cpub_clk_dyn1.hw,
+ .cpu_clk_premux1 = &g12b_cpub_clk_dyn1_sel.hw,
+ .nb.notifier_call = g12a_cpu_clk_dyn_notifier_cb,
};
struct g12a_sys_pll_nb_data {
@@ -1139,8 +1839,18 @@ static struct clk_regmap g12a_cpu_clk_div16_en = {
.hw.init = &(struct clk_init_data) {
.name = "cpu_clk_div16_en",
.ops = &clk_regmap_gate_ro_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12a_cpu_clk.hw
+ .parent_data = &(const struct clk_parent_data) {
+ /*
+ * Note:
+ * G12A and G12B have different cpu clocks (with
+ * different struct clk_hw). We fallback to the global
+ * naming string mechanism so this clock picks
+ * up the appropriate one. Same goes for the other
+ * clock using cpu cluster A clock output and present
+ * on both G12 variant.
+ */
+ .name = "cpu_clk",
+ .index = -1,
},
.num_parents = 1,
/*
@@ -1205,7 +1915,10 @@ static struct clk_regmap g12a_cpu_clk_apb_div = {
.hw.init = &(struct clk_init_data){
.name = "cpu_clk_apb_div",
.ops = &clk_regmap_divider_ro_ops,
- .parent_hws = (const struct clk_hw *[]) { &g12a_cpu_clk.hw },
+ .parent_data = &(const struct clk_parent_data) {
+ .name = "cpu_clk",
+ .index = -1,
+ },
.num_parents = 1,
},
};
@@ -1239,7 +1952,10 @@ static struct clk_regmap g12a_cpu_clk_atb_div = {
.hw.init = &(struct clk_init_data){
.name = "cpu_clk_atb_div",
.ops = &clk_regmap_divider_ro_ops,
- .parent_hws = (const struct clk_hw *[]) { &g12a_cpu_clk.hw },
+ .parent_data = &(const struct clk_parent_data) {
+ .name = "cpu_clk",
+ .index = -1,
+ },
.num_parents = 1,
},
};
@@ -1273,7 +1989,10 @@ static struct clk_regmap g12a_cpu_clk_axi_div = {
.hw.init = &(struct clk_init_data){
.name = "cpu_clk_axi_div",
.ops = &clk_regmap_divider_ro_ops,
- .parent_hws = (const struct clk_hw *[]) { &g12a_cpu_clk.hw },
+ .parent_data = &(const struct clk_parent_data) {
+ .name = "cpu_clk",
+ .index = -1,
+ },
.num_parents = 1,
},
};
@@ -1308,13 +2027,6 @@ static struct clk_regmap g12a_cpu_clk_trace_div = {
.name = "cpu_clk_trace_div",
.ops = &clk_regmap_divider_ro_ops,
.parent_data = &(const struct clk_parent_data) {
- /*
- * Note:
- * G12A and G12B have different cpu_clks (with
- * different struct clk_hw). We fallback to the global
- * naming string mechanism so cpu_clk_trace_div picks
- * up the appropriate one.
- */
.name = "cpu_clk",
.index = -1,
},
@@ -1432,27 +2144,29 @@ static struct clk_fixed_factor g12b_cpub_clk_div8 = {
},
};
-static u32 mux_table_cpub[] = { 1, 2, 3, 4, 5, 6, 7 };
+static u32 g12b_cpub_clk_if_parents_val_table[] = { 1, 2, 3, 4, 5, 6, 7 };
+static const struct clk_hw *g12b_cpub_clk_if_parents[] = {
+ &g12b_cpub_clk_div2.hw,
+ &g12b_cpub_clk_div3.hw,
+ &g12b_cpub_clk_div4.hw,
+ &g12b_cpub_clk_div5.hw,
+ &g12b_cpub_clk_div6.hw,
+ &g12b_cpub_clk_div7.hw,
+ &g12b_cpub_clk_div8.hw,
+};
+
static struct clk_regmap g12b_cpub_clk_apb_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPUB_CLK_CNTL1,
.mask = 7,
.shift = 3,
- .table = mux_table_cpub,
+ .table = g12b_cpub_clk_if_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "cpub_clk_apb_sel",
.ops = &clk_regmap_mux_ro_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12b_cpub_clk_div2.hw,
- &g12b_cpub_clk_div3.hw,
- &g12b_cpub_clk_div4.hw,
- &g12b_cpub_clk_div5.hw,
- &g12b_cpub_clk_div6.hw,
- &g12b_cpub_clk_div7.hw,
- &g12b_cpub_clk_div8.hw
- },
- .num_parents = 7,
+ .parent_hws = g12b_cpub_clk_if_parents,
+ .num_parents = ARRAY_SIZE(g12b_cpub_clk_if_parents),
},
};
@@ -1481,21 +2195,13 @@ static struct clk_regmap g12b_cpub_clk_atb_sel = {
.offset = HHI_SYS_CPUB_CLK_CNTL1,
.mask = 7,
.shift = 6,
- .table = mux_table_cpub,
+ .table = g12b_cpub_clk_if_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "cpub_clk_atb_sel",
.ops = &clk_regmap_mux_ro_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12b_cpub_clk_div2.hw,
- &g12b_cpub_clk_div3.hw,
- &g12b_cpub_clk_div4.hw,
- &g12b_cpub_clk_div5.hw,
- &g12b_cpub_clk_div6.hw,
- &g12b_cpub_clk_div7.hw,
- &g12b_cpub_clk_div8.hw
- },
- .num_parents = 7,
+ .parent_hws = g12b_cpub_clk_if_parents,
+ .num_parents = ARRAY_SIZE(g12b_cpub_clk_if_parents),
},
};
@@ -1524,21 +2230,13 @@ static struct clk_regmap g12b_cpub_clk_axi_sel = {
.offset = HHI_SYS_CPUB_CLK_CNTL1,
.mask = 7,
.shift = 9,
- .table = mux_table_cpub,
+ .table = g12b_cpub_clk_if_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "cpub_clk_axi_sel",
.ops = &clk_regmap_mux_ro_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12b_cpub_clk_div2.hw,
- &g12b_cpub_clk_div3.hw,
- &g12b_cpub_clk_div4.hw,
- &g12b_cpub_clk_div5.hw,
- &g12b_cpub_clk_div6.hw,
- &g12b_cpub_clk_div7.hw,
- &g12b_cpub_clk_div8.hw
- },
- .num_parents = 7,
+ .parent_hws = g12b_cpub_clk_if_parents,
+ .num_parents = ARRAY_SIZE(g12b_cpub_clk_if_parents),
},
};
@@ -1567,21 +2265,13 @@ static struct clk_regmap g12b_cpub_clk_trace_sel = {
.offset = HHI_SYS_CPUB_CLK_CNTL1,
.mask = 7,
.shift = 20,
- .table = mux_table_cpub,
+ .table = g12b_cpub_clk_if_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "cpub_clk_trace_sel",
.ops = &clk_regmap_mux_ro_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12b_cpub_clk_div2.hw,
- &g12b_cpub_clk_div3.hw,
- &g12b_cpub_clk_div4.hw,
- &g12b_cpub_clk_div5.hw,
- &g12b_cpub_clk_div6.hw,
- &g12b_cpub_clk_div7.hw,
- &g12b_cpub_clk_div8.hw
- },
- .num_parents = 7,
+ .parent_hws = g12b_cpub_clk_if_parents,
+ .num_parents = ARRAY_SIZE(g12b_cpub_clk_if_parents),
},
};
@@ -1605,600 +2295,6 @@ static struct clk_regmap g12b_cpub_clk_trace = {
},
};
-static const struct pll_mult_range g12a_gp0_pll_mult_range = {
- .min = 125,
- .max = 255,
-};
-
-/*
- * Internal gp0 pll emulation configuration parameters
- */
-static const struct reg_sequence g12a_gp0_init_regs[] = {
- { .reg = HHI_GP0_PLL_CNTL1, .def = 0x00000000 },
- { .reg = HHI_GP0_PLL_CNTL2, .def = 0x00000000 },
- { .reg = HHI_GP0_PLL_CNTL3, .def = 0x48681c00 },
- { .reg = HHI_GP0_PLL_CNTL4, .def = 0x33771290 },
- { .reg = HHI_GP0_PLL_CNTL5, .def = 0x39272000 },
- { .reg = HHI_GP0_PLL_CNTL6, .def = 0x56540000 },
-};
-
-static struct clk_regmap g12a_gp0_pll_dco = {
- .data = &(struct meson_clk_pll_data){
- .en = {
- .reg_off = HHI_GP0_PLL_CNTL0,
- .shift = 28,
- .width = 1,
- },
- .m = {
- .reg_off = HHI_GP0_PLL_CNTL0,
- .shift = 0,
- .width = 8,
- },
- .n = {
- .reg_off = HHI_GP0_PLL_CNTL0,
- .shift = 10,
- .width = 5,
- },
- .frac = {
- .reg_off = HHI_GP0_PLL_CNTL1,
- .shift = 0,
- .width = 17,
- },
- .l = {
- .reg_off = HHI_GP0_PLL_CNTL0,
- .shift = 31,
- .width = 1,
- },
- .rst = {
- .reg_off = HHI_GP0_PLL_CNTL0,
- .shift = 29,
- .width = 1,
- },
- .range = &g12a_gp0_pll_mult_range,
- .init_regs = g12a_gp0_init_regs,
- .init_count = ARRAY_SIZE(g12a_gp0_init_regs),
- },
- .hw.init = &(struct clk_init_data){
- .name = "gp0_pll_dco",
- .ops = &meson_clk_pll_ops,
- .parent_data = &(const struct clk_parent_data) {
- .fw_name = "xtal",
- },
- .num_parents = 1,
- },
-};
-
-static struct clk_regmap g12a_gp0_pll = {
- .data = &(struct clk_regmap_div_data){
- .offset = HHI_GP0_PLL_CNTL0,
- .shift = 16,
- .width = 3,
- .flags = (CLK_DIVIDER_POWER_OF_TWO |
- CLK_DIVIDER_ROUND_CLOSEST),
- },
- .hw.init = &(struct clk_init_data){
- .name = "gp0_pll",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12a_gp0_pll_dco.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap sm1_gp1_pll_dco = {
- .data = &(struct meson_clk_pll_data){
- .en = {
- .reg_off = HHI_GP1_PLL_CNTL0,
- .shift = 28,
- .width = 1,
- },
- .m = {
- .reg_off = HHI_GP1_PLL_CNTL0,
- .shift = 0,
- .width = 8,
- },
- .n = {
- .reg_off = HHI_GP1_PLL_CNTL0,
- .shift = 10,
- .width = 5,
- },
- .frac = {
- .reg_off = HHI_GP1_PLL_CNTL1,
- .shift = 0,
- .width = 17,
- },
- .l = {
- .reg_off = HHI_GP1_PLL_CNTL0,
- .shift = 31,
- .width = 1,
- },
- .rst = {
- .reg_off = HHI_GP1_PLL_CNTL0,
- .shift = 29,
- .width = 1,
- },
- },
- .hw.init = &(struct clk_init_data){
- .name = "gp1_pll_dco",
- .ops = &meson_clk_pll_ro_ops,
- .parent_data = &(const struct clk_parent_data) {
- .fw_name = "xtal",
- },
- .num_parents = 1,
- /* This clock feeds the DSU, avoid disabling it */
- .flags = CLK_IS_CRITICAL,
- },
-};
-
-static struct clk_regmap sm1_gp1_pll = {
- .data = &(struct clk_regmap_div_data){
- .offset = HHI_GP1_PLL_CNTL0,
- .shift = 16,
- .width = 3,
- .flags = (CLK_DIVIDER_POWER_OF_TWO |
- CLK_DIVIDER_ROUND_CLOSEST),
- },
- .hw.init = &(struct clk_init_data){
- .name = "gp1_pll",
- .ops = &clk_regmap_divider_ro_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &sm1_gp1_pll_dco.hw
- },
- .num_parents = 1,
- },
-};
-
-/*
- * Internal hifi pll emulation configuration parameters
- */
-static const struct reg_sequence g12a_hifi_init_regs[] = {
- { .reg = HHI_HIFI_PLL_CNTL1, .def = 0x00000000 },
- { .reg = HHI_HIFI_PLL_CNTL2, .def = 0x00000000 },
- { .reg = HHI_HIFI_PLL_CNTL3, .def = 0x6a285c00 },
- { .reg = HHI_HIFI_PLL_CNTL4, .def = 0x65771290 },
- { .reg = HHI_HIFI_PLL_CNTL5, .def = 0x39272000 },
- { .reg = HHI_HIFI_PLL_CNTL6, .def = 0x56540000 },
-};
-
-static struct clk_regmap g12a_hifi_pll_dco = {
- .data = &(struct meson_clk_pll_data){
- .en = {
- .reg_off = HHI_HIFI_PLL_CNTL0,
- .shift = 28,
- .width = 1,
- },
- .m = {
- .reg_off = HHI_HIFI_PLL_CNTL0,
- .shift = 0,
- .width = 8,
- },
- .n = {
- .reg_off = HHI_HIFI_PLL_CNTL0,
- .shift = 10,
- .width = 5,
- },
- .frac = {
- .reg_off = HHI_HIFI_PLL_CNTL1,
- .shift = 0,
- .width = 17,
- },
- .l = {
- .reg_off = HHI_HIFI_PLL_CNTL0,
- .shift = 31,
- .width = 1,
- },
- .rst = {
- .reg_off = HHI_HIFI_PLL_CNTL0,
- .shift = 29,
- .width = 1,
- },
- .range = &g12a_gp0_pll_mult_range,
- .init_regs = g12a_hifi_init_regs,
- .init_count = ARRAY_SIZE(g12a_hifi_init_regs),
- .flags = CLK_MESON_PLL_ROUND_CLOSEST,
- },
- .hw.init = &(struct clk_init_data){
- .name = "hifi_pll_dco",
- .ops = &meson_clk_pll_ops,
- .parent_data = &(const struct clk_parent_data) {
- .fw_name = "xtal",
- },
- .num_parents = 1,
- },
-};
-
-static struct clk_regmap g12a_hifi_pll = {
- .data = &(struct clk_regmap_div_data){
- .offset = HHI_HIFI_PLL_CNTL0,
- .shift = 16,
- .width = 2,
- .flags = (CLK_DIVIDER_POWER_OF_TWO |
- CLK_DIVIDER_ROUND_CLOSEST),
- },
- .hw.init = &(struct clk_init_data){
- .name = "hifi_pll",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12a_hifi_pll_dco.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-/*
- * The Meson G12A PCIE PLL is fined tuned to deliver a very precise
- * 100MHz reference clock for the PCIe Analog PHY, and thus requires
- * a strict register sequence to enable the PLL.
- */
-static const struct reg_sequence g12a_pcie_pll_init_regs[] = {
- { .reg = HHI_PCIE_PLL_CNTL0, .def = 0x20090496 },
- { .reg = HHI_PCIE_PLL_CNTL0, .def = 0x30090496 },
- { .reg = HHI_PCIE_PLL_CNTL1, .def = 0x00000000 },
- { .reg = HHI_PCIE_PLL_CNTL2, .def = 0x00001100 },
- { .reg = HHI_PCIE_PLL_CNTL3, .def = 0x10058e00 },
- { .reg = HHI_PCIE_PLL_CNTL4, .def = 0x000100c0 },
- { .reg = HHI_PCIE_PLL_CNTL5, .def = 0x68000048 },
- { .reg = HHI_PCIE_PLL_CNTL5, .def = 0x68000068, .delay_us = 20 },
- { .reg = HHI_PCIE_PLL_CNTL4, .def = 0x008100c0, .delay_us = 10 },
- { .reg = HHI_PCIE_PLL_CNTL0, .def = 0x34090496 },
- { .reg = HHI_PCIE_PLL_CNTL0, .def = 0x14090496, .delay_us = 10 },
- { .reg = HHI_PCIE_PLL_CNTL2, .def = 0x00001000 },
-};
-
-/* Keep a single entry table for recalc/round_rate() ops */
-static const struct pll_params_table g12a_pcie_pll_table[] = {
- PLL_PARAMS(150, 1),
- {0, 0},
-};
-
-static struct clk_regmap g12a_pcie_pll_dco = {
- .data = &(struct meson_clk_pll_data){
- .en = {
- .reg_off = HHI_PCIE_PLL_CNTL0,
- .shift = 28,
- .width = 1,
- },
- .m = {
- .reg_off = HHI_PCIE_PLL_CNTL0,
- .shift = 0,
- .width = 8,
- },
- .n = {
- .reg_off = HHI_PCIE_PLL_CNTL0,
- .shift = 10,
- .width = 5,
- },
- .frac = {
- .reg_off = HHI_PCIE_PLL_CNTL1,
- .shift = 0,
- .width = 12,
- },
- .l = {
- .reg_off = HHI_PCIE_PLL_CNTL0,
- .shift = 31,
- .width = 1,
- },
- .rst = {
- .reg_off = HHI_PCIE_PLL_CNTL0,
- .shift = 29,
- .width = 1,
- },
- .table = g12a_pcie_pll_table,
- .init_regs = g12a_pcie_pll_init_regs,
- .init_count = ARRAY_SIZE(g12a_pcie_pll_init_regs),
- },
- .hw.init = &(struct clk_init_data){
- .name = "pcie_pll_dco",
- .ops = &meson_clk_pcie_pll_ops,
- .parent_data = &(const struct clk_parent_data) {
- .fw_name = "xtal",
- },
- .num_parents = 1,
- },
-};
-
-static struct clk_fixed_factor g12a_pcie_pll_dco_div2 = {
- .mult = 1,
- .div = 2,
- .hw.init = &(struct clk_init_data){
- .name = "pcie_pll_dco_div2",
- .ops = &clk_fixed_factor_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12a_pcie_pll_dco.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap g12a_pcie_pll_od = {
- .data = &(struct clk_regmap_div_data){
- .offset = HHI_PCIE_PLL_CNTL0,
- .shift = 16,
- .width = 5,
- .flags = CLK_DIVIDER_ROUND_CLOSEST |
- CLK_DIVIDER_ONE_BASED |
- CLK_DIVIDER_ALLOW_ZERO,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pcie_pll_od",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12a_pcie_pll_dco_div2.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_fixed_factor g12a_pcie_pll = {
- .mult = 1,
- .div = 2,
- .hw.init = &(struct clk_init_data){
- .name = "pcie_pll_pll",
- .ops = &clk_fixed_factor_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12a_pcie_pll_od.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap g12a_hdmi_pll_dco = {
- .data = &(struct meson_clk_pll_data){
- .en = {
- .reg_off = HHI_HDMI_PLL_CNTL0,
- .shift = 28,
- .width = 1,
- },
- .m = {
- .reg_off = HHI_HDMI_PLL_CNTL0,
- .shift = 0,
- .width = 8,
- },
- .n = {
- .reg_off = HHI_HDMI_PLL_CNTL0,
- .shift = 10,
- .width = 5,
- },
- .frac = {
- .reg_off = HHI_HDMI_PLL_CNTL1,
- .shift = 0,
- .width = 16,
- },
- .l = {
- .reg_off = HHI_HDMI_PLL_CNTL0,
- .shift = 30,
- .width = 1,
- },
- .rst = {
- .reg_off = HHI_HDMI_PLL_CNTL0,
- .shift = 29,
- .width = 1,
- },
- },
- .hw.init = &(struct clk_init_data){
- .name = "hdmi_pll_dco",
- .ops = &meson_clk_pll_ro_ops,
- .parent_data = &(const struct clk_parent_data) {
- .fw_name = "xtal",
- },
- .num_parents = 1,
- /*
- * Display directly handle hdmi pll registers ATM, we need
- * NOCACHE to keep our view of the clock as accurate as possible
- */
- .flags = CLK_GET_RATE_NOCACHE,
- },
-};
-
-static struct clk_regmap g12a_hdmi_pll_od = {
- .data = &(struct clk_regmap_div_data){
- .offset = HHI_HDMI_PLL_CNTL0,
- .shift = 16,
- .width = 2,
- .flags = CLK_DIVIDER_POWER_OF_TWO,
- },
- .hw.init = &(struct clk_init_data){
- .name = "hdmi_pll_od",
- .ops = &clk_regmap_divider_ro_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12a_hdmi_pll_dco.hw
- },
- .num_parents = 1,
- .flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap g12a_hdmi_pll_od2 = {
- .data = &(struct clk_regmap_div_data){
- .offset = HHI_HDMI_PLL_CNTL0,
- .shift = 18,
- .width = 2,
- .flags = CLK_DIVIDER_POWER_OF_TWO,
- },
- .hw.init = &(struct clk_init_data){
- .name = "hdmi_pll_od2",
- .ops = &clk_regmap_divider_ro_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12a_hdmi_pll_od.hw
- },
- .num_parents = 1,
- .flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap g12a_hdmi_pll = {
- .data = &(struct clk_regmap_div_data){
- .offset = HHI_HDMI_PLL_CNTL0,
- .shift = 20,
- .width = 2,
- .flags = CLK_DIVIDER_POWER_OF_TWO,
- },
- .hw.init = &(struct clk_init_data){
- .name = "hdmi_pll",
- .ops = &clk_regmap_divider_ro_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12a_hdmi_pll_od2.hw
- },
- .num_parents = 1,
- .flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_fixed_factor g12a_fclk_div4_div = {
- .mult = 1,
- .div = 4,
- .hw.init = &(struct clk_init_data){
- .name = "fclk_div4_div",
- .ops = &clk_fixed_factor_ops,
- .parent_hws = (const struct clk_hw *[]) { &g12a_fixed_pll.hw },
- .num_parents = 1,
- },
-};
-
-static struct clk_regmap g12a_fclk_div4 = {
- .data = &(struct clk_regmap_gate_data){
- .offset = HHI_FIX_PLL_CNTL1,
- .bit_idx = 21,
- },
- .hw.init = &(struct clk_init_data){
- .name = "fclk_div4",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12a_fclk_div4_div.hw
- },
- .num_parents = 1,
- },
-};
-
-static struct clk_fixed_factor g12a_fclk_div5_div = {
- .mult = 1,
- .div = 5,
- .hw.init = &(struct clk_init_data){
- .name = "fclk_div5_div",
- .ops = &clk_fixed_factor_ops,
- .parent_hws = (const struct clk_hw *[]) { &g12a_fixed_pll.hw },
- .num_parents = 1,
- },
-};
-
-static struct clk_regmap g12a_fclk_div5 = {
- .data = &(struct clk_regmap_gate_data){
- .offset = HHI_FIX_PLL_CNTL1,
- .bit_idx = 22,
- },
- .hw.init = &(struct clk_init_data){
- .name = "fclk_div5",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12a_fclk_div5_div.hw
- },
- .num_parents = 1,
- },
-};
-
-static struct clk_fixed_factor g12a_fclk_div7_div = {
- .mult = 1,
- .div = 7,
- .hw.init = &(struct clk_init_data){
- .name = "fclk_div7_div",
- .ops = &clk_fixed_factor_ops,
- .parent_hws = (const struct clk_hw *[]) { &g12a_fixed_pll.hw },
- .num_parents = 1,
- },
-};
-
-static struct clk_regmap g12a_fclk_div7 = {
- .data = &(struct clk_regmap_gate_data){
- .offset = HHI_FIX_PLL_CNTL1,
- .bit_idx = 23,
- },
- .hw.init = &(struct clk_init_data){
- .name = "fclk_div7",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12a_fclk_div7_div.hw
- },
- .num_parents = 1,
- },
-};
-
-static struct clk_fixed_factor g12a_fclk_div2p5_div = {
- .mult = 1,
- .div = 5,
- .hw.init = &(struct clk_init_data){
- .name = "fclk_div2p5_div",
- .ops = &clk_fixed_factor_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12a_fixed_pll_dco.hw
- },
- .num_parents = 1,
- },
-};
-
-static struct clk_regmap g12a_fclk_div2p5 = {
- .data = &(struct clk_regmap_gate_data){
- .offset = HHI_FIX_PLL_CNTL1,
- .bit_idx = 25,
- },
- .hw.init = &(struct clk_init_data){
- .name = "fclk_div2p5",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12a_fclk_div2p5_div.hw
- },
- .num_parents = 1,
- },
-};
-
-static struct clk_fixed_factor g12a_mpll_50m_div = {
- .mult = 1,
- .div = 80,
- .hw.init = &(struct clk_init_data){
- .name = "mpll_50m_div",
- .ops = &clk_fixed_factor_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12a_fixed_pll_dco.hw
- },
- .num_parents = 1,
- },
-};
-
-static struct clk_regmap g12a_mpll_50m = {
- .data = &(struct clk_regmap_mux_data){
- .offset = HHI_FIX_PLL_CNTL3,
- .mask = 0x1,
- .shift = 5,
- },
- .hw.init = &(struct clk_init_data){
- .name = "mpll_50m",
- .ops = &clk_regmap_mux_ro_ops,
- .parent_data = (const struct clk_parent_data []) {
- { .fw_name = "xtal", },
- { .hw = &g12a_mpll_50m_div.hw },
- },
- .num_parents = 2,
- },
-};
-
-static struct clk_fixed_factor g12a_mpll_prediv = {
- .mult = 1,
- .div = 2,
- .hw.init = &(struct clk_init_data){
- .name = "mpll_prediv",
- .ops = &clk_fixed_factor_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12a_fixed_pll_dco.hw
- },
- .num_parents = 1,
- },
-};
-
static const struct reg_sequence g12a_mpll0_init_regs[] = {
{ .reg = HHI_MPLL_CNTL2, .def = 0x40000033 },
};
@@ -2225,7 +2321,6 @@ static struct clk_regmap g12a_mpll0_div = {
.shift = 29,
.width = 1,
},
- .lock = &meson_clk_lock,
.init_regs = g12a_mpll0_init_regs,
.init_count = ARRAY_SIZE(g12a_mpll0_init_regs),
},
@@ -2279,7 +2374,6 @@ static struct clk_regmap g12a_mpll1_div = {
.shift = 29,
.width = 1,
},
- .lock = &meson_clk_lock,
.init_regs = g12a_mpll1_init_regs,
.init_count = ARRAY_SIZE(g12a_mpll1_init_regs),
},
@@ -2333,7 +2427,6 @@ static struct clk_regmap g12a_mpll2_div = {
.shift = 29,
.width = 1,
},
- .lock = &meson_clk_lock,
.init_regs = g12a_mpll2_init_regs,
.init_count = ARRAY_SIZE(g12a_mpll2_init_regs),
},
@@ -2387,7 +2480,6 @@ static struct clk_regmap g12a_mpll3_div = {
.shift = 29,
.width = 1,
},
- .lock = &meson_clk_lock,
.init_regs = g12a_mpll3_init_regs,
.init_count = ARRAY_SIZE(g12a_mpll3_init_regs),
},
@@ -2415,8 +2507,9 @@ static struct clk_regmap g12a_mpll3 = {
},
};
-static u32 mux_table_clk81[] = { 0, 2, 3, 4, 5, 6, 7 };
-static const struct clk_parent_data clk81_parent_data[] = {
+/* clk81 is often referred as "mpeg_clk" */
+static u32 g12a_clk81_parents_val_table[] = { 0, 2, 3, 4, 5, 6, 7 };
+static const struct clk_parent_data g12a_clk81_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &g12a_fclk_div7.hw },
{ .hw = &g12a_mpll1.hw },
@@ -2426,32 +2519,32 @@ static const struct clk_parent_data clk81_parent_data[] = {
{ .hw = &g12a_fclk_div5.hw },
};
-static struct clk_regmap g12a_mpeg_clk_sel = {
+static struct clk_regmap g12a_clk81_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_MPEG_CLK_CNTL,
.mask = 0x7,
.shift = 12,
- .table = mux_table_clk81,
+ .table = g12a_clk81_parents_val_table,
},
.hw.init = &(struct clk_init_data){
- .name = "mpeg_clk_sel",
+ .name = "clk81_sel",
.ops = &clk_regmap_mux_ro_ops,
- .parent_data = clk81_parent_data,
- .num_parents = ARRAY_SIZE(clk81_parent_data),
+ .parent_data = g12a_clk81_parents,
+ .num_parents = ARRAY_SIZE(g12a_clk81_parents),
},
};
-static struct clk_regmap g12a_mpeg_clk_div = {
+static struct clk_regmap g12a_clk81_div = {
.data = &(struct clk_regmap_div_data){
.offset = HHI_MPEG_CLK_CNTL,
.shift = 0,
.width = 7,
},
.hw.init = &(struct clk_init_data){
- .name = "mpeg_clk_div",
+ .name = "clk81_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_mpeg_clk_sel.hw
+ &g12a_clk81_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2467,14 +2560,14 @@ static struct clk_regmap g12a_clk81 = {
.name = "clk81",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_mpeg_clk_div.hw
+ &g12a_clk81_div.hw
},
.num_parents = 1,
.flags = (CLK_SET_RATE_PARENT | CLK_IS_CRITICAL),
},
};
-static const struct clk_parent_data g12a_sd_emmc_clk0_parent_data[] = {
+static const struct clk_parent_data g12a_sd_emmc_clk0_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &g12a_fclk_div2.hw },
{ .hw = &g12a_fclk_div3.hw },
@@ -2483,7 +2576,7 @@ static const struct clk_parent_data g12a_sd_emmc_clk0_parent_data[] = {
/*
* Following these parent clocks, we should also have had mpll2, mpll3
* and gp0_pll but these clocks are too precious to be used here. All
- * the necessary rates for MMC and NAND operation can be acheived using
+ * the necessary rates for MMC and NAND operation can be achieved using
* g12a_ee_core or fclk_div clocks
*/
};
@@ -2498,8 +2591,8 @@ static struct clk_regmap g12a_sd_emmc_a_clk0_sel = {
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_a_clk0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = g12a_sd_emmc_clk0_parent_data,
- .num_parents = ARRAY_SIZE(g12a_sd_emmc_clk0_parent_data),
+ .parent_data = g12a_sd_emmc_clk0_parents,
+ .num_parents = ARRAY_SIZE(g12a_sd_emmc_clk0_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2547,8 +2640,8 @@ static struct clk_regmap g12a_sd_emmc_b_clk0_sel = {
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_b_clk0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = g12a_sd_emmc_clk0_parent_data,
- .num_parents = ARRAY_SIZE(g12a_sd_emmc_clk0_parent_data),
+ .parent_data = g12a_sd_emmc_clk0_parents,
+ .num_parents = ARRAY_SIZE(g12a_sd_emmc_clk0_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2596,8 +2689,8 @@ static struct clk_regmap g12a_sd_emmc_c_clk0_sel = {
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_c_clk0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = g12a_sd_emmc_clk0_parent_data,
- .num_parents = ARRAY_SIZE(g12a_sd_emmc_clk0_parent_data),
+ .parent_data = g12a_sd_emmc_clk0_parents,
+ .num_parents = ARRAY_SIZE(g12a_sd_emmc_clk0_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2659,7 +2752,7 @@ static struct clk_regmap g12a_vid_pll_div = {
},
};
-static const struct clk_hw *g12a_vid_pll_parent_hws[] = {
+static const struct clk_hw *g12a_vid_pll_parents[] = {
&g12a_vid_pll_div.hw,
&g12a_hdmi_pll.hw,
};
@@ -2677,8 +2770,8 @@ static struct clk_regmap g12a_vid_pll_sel = {
* bit 18 selects from 2 possible parents:
* vid_pll_div or hdmi_pll
*/
- .parent_hws = g12a_vid_pll_parent_hws,
- .num_parents = ARRAY_SIZE(g12a_vid_pll_parent_hws),
+ .parent_hws = g12a_vid_pll_parents,
+ .num_parents = ARRAY_SIZE(g12a_vid_pll_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -2701,7 +2794,7 @@ static struct clk_regmap g12a_vid_pll = {
/* VPU Clock */
-static const struct clk_hw *g12a_vpu_parent_hws[] = {
+static const struct clk_hw *g12a_vpu_parents[] = {
&g12a_fclk_div3.hw,
&g12a_fclk_div4.hw,
&g12a_fclk_div5.hw,
@@ -2721,8 +2814,8 @@ static struct clk_regmap g12a_vpu_0_sel = {
.hw.init = &(struct clk_init_data){
.name = "vpu_0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = g12a_vpu_parent_hws,
- .num_parents = ARRAY_SIZE(g12a_vpu_parent_hws),
+ .parent_hws = g12a_vpu_parents,
+ .num_parents = ARRAY_SIZE(g12a_vpu_parents),
.flags = CLK_SET_RATE_NO_REPARENT,
},
};
@@ -2765,8 +2858,8 @@ static struct clk_regmap g12a_vpu_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "vpu_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = g12a_vpu_parent_hws,
- .num_parents = ARRAY_SIZE(g12a_vpu_parent_hws),
+ .parent_hws = g12a_vpu_parents,
+ .num_parents = ARRAY_SIZE(g12a_vpu_parents),
.flags = CLK_SET_RATE_NO_REPARENT,
},
};
@@ -2824,7 +2917,7 @@ static struct clk_regmap g12a_vpu = {
/* VDEC clocks */
-static const struct clk_hw *g12a_vdec_parent_hws[] = {
+static const struct clk_hw *g12a_vdec_parents[] = {
&g12a_fclk_div2p5.hw,
&g12a_fclk_div3.hw,
&g12a_fclk_div4.hw,
@@ -2844,8 +2937,8 @@ static struct clk_regmap g12a_vdec_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "vdec_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = g12a_vdec_parent_hws,
- .num_parents = ARRAY_SIZE(g12a_vdec_parent_hws),
+ .parent_hws = g12a_vdec_parents,
+ .num_parents = ARRAY_SIZE(g12a_vdec_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2894,8 +2987,8 @@ static struct clk_regmap g12a_vdec_hevcf_sel = {
.hw.init = &(struct clk_init_data){
.name = "vdec_hevcf_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = g12a_vdec_parent_hws,
- .num_parents = ARRAY_SIZE(g12a_vdec_parent_hws),
+ .parent_hws = g12a_vdec_parents,
+ .num_parents = ARRAY_SIZE(g12a_vdec_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2944,8 +3037,8 @@ static struct clk_regmap g12a_vdec_hevc_sel = {
.hw.init = &(struct clk_init_data){
.name = "vdec_hevc_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = g12a_vdec_parent_hws,
- .num_parents = ARRAY_SIZE(g12a_vdec_parent_hws),
+ .parent_hws = g12a_vdec_parents,
+ .num_parents = ARRAY_SIZE(g12a_vdec_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2986,7 +3079,7 @@ static struct clk_regmap g12a_vdec_hevc = {
/* VAPB Clock */
-static const struct clk_hw *g12a_vapb_parent_hws[] = {
+static const struct clk_hw *g12a_vapb_parents[] = {
&g12a_fclk_div4.hw,
&g12a_fclk_div3.hw,
&g12a_fclk_div5.hw,
@@ -3006,8 +3099,8 @@ static struct clk_regmap g12a_vapb_0_sel = {
.hw.init = &(struct clk_init_data){
.name = "vapb_0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = g12a_vapb_parent_hws,
- .num_parents = ARRAY_SIZE(g12a_vapb_parent_hws),
+ .parent_hws = g12a_vapb_parents,
+ .num_parents = ARRAY_SIZE(g12a_vapb_parents),
.flags = CLK_SET_RATE_NO_REPARENT,
},
};
@@ -3054,8 +3147,8 @@ static struct clk_regmap g12a_vapb_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "vapb_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = g12a_vapb_parent_hws,
- .num_parents = ARRAY_SIZE(g12a_vapb_parent_hws),
+ .parent_hws = g12a_vapb_parents,
+ .num_parents = ARRAY_SIZE(g12a_vapb_parents),
.flags = CLK_SET_RATE_NO_REPARENT,
},
};
@@ -3129,7 +3222,7 @@ static struct clk_regmap g12a_vapb = {
},
};
-static const struct clk_hw *g12a_vclk_parent_hws[] = {
+static const struct clk_hw *g12a_vclk_parents[] = {
&g12a_vid_pll.hw,
&g12a_gp0_pll.hw,
&g12a_hifi_pll.hw,
@@ -3149,8 +3242,8 @@ static struct clk_regmap g12a_vclk_sel = {
.hw.init = &(struct clk_init_data){
.name = "vclk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = g12a_vclk_parent_hws,
- .num_parents = ARRAY_SIZE(g12a_vclk_parent_hws),
+ .parent_hws = g12a_vclk_parents,
+ .num_parents = ARRAY_SIZE(g12a_vclk_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -3164,8 +3257,8 @@ static struct clk_regmap g12a_vclk2_sel = {
.hw.init = &(struct clk_init_data){
.name = "vclk2_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = g12a_vclk_parent_hws,
- .num_parents = ARRAY_SIZE(g12a_vclk_parent_hws),
+ .parent_hws = g12a_vclk_parents,
+ .num_parents = ARRAY_SIZE(g12a_vclk_parents),
.flags = CLK_SET_RATE_NO_REPARENT,
},
};
@@ -3528,8 +3621,8 @@ static struct clk_fixed_factor g12a_vclk2_div12 = {
},
};
-static u32 mux_table_cts_sel[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
-static const struct clk_hw *g12a_cts_parent_hws[] = {
+static u32 g12a_cts_parents_val_table[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
+static const struct clk_hw *g12a_cts_parents[] = {
&g12a_vclk_div1.hw,
&g12a_vclk_div2.hw,
&g12a_vclk_div4.hw,
@@ -3547,13 +3640,13 @@ static struct clk_regmap g12a_cts_enci_sel = {
.offset = HHI_VID_CLK_DIV,
.mask = 0xf,
.shift = 28,
- .table = mux_table_cts_sel,
+ .table = g12a_cts_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "cts_enci_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = g12a_cts_parent_hws,
- .num_parents = ARRAY_SIZE(g12a_cts_parent_hws),
+ .parent_hws = g12a_cts_parents,
+ .num_parents = ARRAY_SIZE(g12a_cts_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -3563,13 +3656,13 @@ static struct clk_regmap g12a_cts_encp_sel = {
.offset = HHI_VID_CLK_DIV,
.mask = 0xf,
.shift = 20,
- .table = mux_table_cts_sel,
+ .table = g12a_cts_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "cts_encp_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = g12a_cts_parent_hws,
- .num_parents = ARRAY_SIZE(g12a_cts_parent_hws),
+ .parent_hws = g12a_cts_parents,
+ .num_parents = ARRAY_SIZE(g12a_cts_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -3579,13 +3672,13 @@ static struct clk_regmap g12a_cts_encl_sel = {
.offset = HHI_VIID_CLK_DIV,
.mask = 0xf,
.shift = 12,
- .table = mux_table_cts_sel,
+ .table = g12a_cts_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "cts_encl_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = g12a_cts_parent_hws,
- .num_parents = ARRAY_SIZE(g12a_cts_parent_hws),
+ .parent_hws = g12a_cts_parents,
+ .num_parents = ARRAY_SIZE(g12a_cts_parents),
.flags = CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
},
};
@@ -3595,20 +3688,20 @@ static struct clk_regmap g12a_cts_vdac_sel = {
.offset = HHI_VIID_CLK_DIV,
.mask = 0xf,
.shift = 28,
- .table = mux_table_cts_sel,
+ .table = g12a_cts_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "cts_vdac_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = g12a_cts_parent_hws,
- .num_parents = ARRAY_SIZE(g12a_cts_parent_hws),
+ .parent_hws = g12a_cts_parents,
+ .num_parents = ARRAY_SIZE(g12a_cts_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
/* TOFIX: add support for cts_tcon */
-static u32 mux_table_hdmi_tx_sel[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
-static const struct clk_hw *g12a_cts_hdmi_tx_parent_hws[] = {
+static u32 g12a_hdmi_tx_parents_val_table[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
+static const struct clk_hw *g12a_hdmi_tx_parents[] = {
&g12a_vclk_div1.hw,
&g12a_vclk_div2.hw,
&g12a_vclk_div4.hw,
@@ -3626,13 +3719,13 @@ static struct clk_regmap g12a_hdmi_tx_sel = {
.offset = HHI_HDMI_CLK_CNTL,
.mask = 0xf,
.shift = 16,
- .table = mux_table_hdmi_tx_sel,
+ .table = g12a_hdmi_tx_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "hdmi_tx_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = g12a_cts_hdmi_tx_parent_hws,
- .num_parents = ARRAY_SIZE(g12a_cts_hdmi_tx_parent_hws),
+ .parent_hws = g12a_hdmi_tx_parents,
+ .num_parents = ARRAY_SIZE(g12a_hdmi_tx_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -3719,7 +3812,7 @@ static struct clk_regmap g12a_hdmi_tx = {
/* MIPI DSI Host Clocks */
-static const struct clk_hw *g12a_mipi_dsi_pxclk_parent_hws[] = {
+static const struct clk_hw *g12a_mipi_dsi_pxclk_parents[] = {
&g12a_vid_pll.hw,
&g12a_gp0_pll.hw,
&g12a_hifi_pll.hw,
@@ -3740,15 +3833,15 @@ static struct clk_regmap g12a_mipi_dsi_pxclk_sel = {
.hw.init = &(struct clk_init_data){
.name = "mipi_dsi_pxclk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = g12a_mipi_dsi_pxclk_parent_hws,
- .num_parents = ARRAY_SIZE(g12a_mipi_dsi_pxclk_parent_hws),
+ .parent_hws = g12a_mipi_dsi_pxclk_parents,
+ .num_parents = ARRAY_SIZE(g12a_mipi_dsi_pxclk_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_SET_RATE_PARENT,
},
};
/*
- * FIXME: Force as bypass by forcing a single /1 table entry, and doensn't on boot value
- * when setting a clock whith this node in the clock path, but doesn't garantee the divider
+ * FIXME: Force as bypass by forcing a single /1 table entry, and doesn't on boot value
+ * when setting a clock with this node in the clock path, but doesn't guarantee the divider
* is at /1 at boot until a rate is set.
*/
static const struct clk_div_table g12a_mipi_dsi_pxclk_div_table[] = {
@@ -3792,7 +3885,7 @@ static struct clk_regmap g12a_mipi_dsi_pxclk = {
/* MIPI ISP Clocks */
-static const struct clk_parent_data g12b_mipi_isp_parent_data[] = {
+static const struct clk_parent_data g12b_mipi_isp_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &g12a_gp0_pll.hw },
{ .hw = &g12a_hifi_pll.hw },
@@ -3812,8 +3905,8 @@ static struct clk_regmap g12b_mipi_isp_sel = {
.hw.init = &(struct clk_init_data){
.name = "mipi_isp_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = g12b_mipi_isp_parent_data,
- .num_parents = ARRAY_SIZE(g12b_mipi_isp_parent_data),
+ .parent_data = g12b_mipi_isp_parents,
+ .num_parents = ARRAY_SIZE(g12b_mipi_isp_parents),
},
};
@@ -3852,7 +3945,7 @@ static struct clk_regmap g12b_mipi_isp = {
/* HDMI Clocks */
-static const struct clk_parent_data g12a_hdmi_parent_data[] = {
+static const struct clk_parent_data g12a_hdmi_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &g12a_fclk_div4.hw },
{ .hw = &g12a_fclk_div3.hw },
@@ -3869,8 +3962,8 @@ static struct clk_regmap g12a_hdmi_sel = {
.hw.init = &(struct clk_init_data){
.name = "hdmi_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = g12a_hdmi_parent_data,
- .num_parents = ARRAY_SIZE(g12a_hdmi_parent_data),
+ .parent_data = g12a_hdmi_parents,
+ .num_parents = ARRAY_SIZE(g12a_hdmi_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -3910,7 +4003,7 @@ static struct clk_regmap g12a_hdmi = {
* mux because it does top-to-bottom updates the each clock tree and
* switches to the "inactive" one when CLK_SET_RATE_GATE is set.
*/
-static const struct clk_parent_data g12a_mali_0_1_parent_data[] = {
+static const struct clk_parent_data g12a_mali_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &g12a_gp0_pll.hw },
{ .hw = &g12a_hifi_pll.hw },
@@ -3930,8 +4023,8 @@ static struct clk_regmap g12a_mali_0_sel = {
.hw.init = &(struct clk_init_data){
.name = "mali_0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = g12a_mali_0_1_parent_data,
- .num_parents = 8,
+ .parent_data = g12a_mali_parents,
+ .num_parents = ARRAY_SIZE(g12a_mali_parents),
/*
* Don't request the parent to change the rate because
* all GPU frequencies can be derived from the fclk_*
@@ -3984,8 +4077,8 @@ static struct clk_regmap g12a_mali_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "mali_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = g12a_mali_0_1_parent_data,
- .num_parents = 8,
+ .parent_data = g12a_mali_parents,
+ .num_parents = ARRAY_SIZE(g12a_mali_parents),
/*
* Don't request the parent to change the rate because
* all GPU frequencies can be derived from the fclk_*
@@ -4029,11 +4122,6 @@ static struct clk_regmap g12a_mali_1 = {
},
};
-static const struct clk_hw *g12a_mali_parent_hws[] = {
- &g12a_mali_0.hw,
- &g12a_mali_1.hw,
-};
-
static struct clk_regmap g12a_mali = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_MALI_CLK_CNTL,
@@ -4043,7 +4131,10 @@ static struct clk_regmap g12a_mali = {
.hw.init = &(struct clk_init_data){
.name = "mali",
.ops = &clk_regmap_mux_ops,
- .parent_hws = g12a_mali_parent_hws,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_mali_0.hw,
+ &g12a_mali_1.hw,
+ },
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
@@ -4082,11 +4173,12 @@ static struct clk_regmap g12a_ts = {
/* SPICC SCLK source clock */
-static const struct clk_parent_data spicc_sclk_parent_data[] = {
+static const struct clk_parent_data g12a_spicc_sclk_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &g12a_clk81.hw },
{ .hw = &g12a_fclk_div4.hw },
{ .hw = &g12a_fclk_div3.hw },
+ { .hw = &g12a_fclk_div2.hw },
{ .hw = &g12a_fclk_div5.hw },
{ .hw = &g12a_fclk_div7.hw },
};
@@ -4100,8 +4192,8 @@ static struct clk_regmap g12a_spicc0_sclk_sel = {
.hw.init = &(struct clk_init_data){
.name = "spicc0_sclk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = spicc_sclk_parent_data,
- .num_parents = ARRAY_SIZE(spicc_sclk_parent_data),
+ .parent_data = g12a_spicc_sclk_parents,
+ .num_parents = ARRAY_SIZE(g12a_spicc_sclk_parents),
},
};
@@ -4147,8 +4239,8 @@ static struct clk_regmap g12a_spicc1_sclk_sel = {
.hw.init = &(struct clk_init_data){
.name = "spicc1_sclk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = spicc_sclk_parent_data,
- .num_parents = ARRAY_SIZE(spicc_sclk_parent_data),
+ .parent_data = g12a_spicc_sclk_parents,
+ .num_parents = ARRAY_SIZE(g12a_spicc_sclk_parents),
},
};
@@ -4187,7 +4279,7 @@ static struct clk_regmap g12a_spicc1_sclk = {
/* Neural Network Accelerator source clock */
-static const struct clk_parent_data nna_clk_parent_data[] = {
+static const struct clk_parent_data sm1_nna_clk_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &g12a_gp0_pll.hw, },
{ .hw = &g12a_hifi_pll.hw, },
@@ -4207,8 +4299,8 @@ static struct clk_regmap sm1_nna_axi_clk_sel = {
.hw.init = &(struct clk_init_data){
.name = "nna_axi_clk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = nna_clk_parent_data,
- .num_parents = ARRAY_SIZE(nna_clk_parent_data),
+ .parent_data = sm1_nna_clk_parents,
+ .num_parents = ARRAY_SIZE(sm1_nna_clk_parents),
},
};
@@ -4254,8 +4346,8 @@ static struct clk_regmap sm1_nna_core_clk_sel = {
.hw.init = &(struct clk_init_data){
.name = "nna_core_clk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = nna_clk_parent_data,
- .num_parents = ARRAY_SIZE(nna_clk_parent_data),
+ .parent_data = sm1_nna_clk_parents,
+ .num_parents = ARRAY_SIZE(sm1_nna_clk_parents),
},
};
@@ -4292,89 +4384,101 @@ static struct clk_regmap sm1_nna_core_clk = {
},
};
-#define MESON_GATE(_name, _reg, _bit) \
- MESON_PCLK(_name, _reg, _bit, &g12a_clk81.hw)
-
-#define MESON_GATE_RO(_name, _reg, _bit) \
- MESON_PCLK_RO(_name, _reg, _bit, &g12a_clk81.hw)
-
-/* Everything Else (EE) domain gates */
-static MESON_GATE(g12a_ddr, HHI_GCLK_MPEG0, 0);
-static MESON_GATE(g12a_dos, HHI_GCLK_MPEG0, 1);
-static MESON_GATE(g12a_audio_locker, HHI_GCLK_MPEG0, 2);
-static MESON_GATE(g12a_mipi_dsi_host, HHI_GCLK_MPEG0, 3);
-static MESON_GATE(g12a_eth_phy, HHI_GCLK_MPEG0, 4);
-static MESON_GATE(g12a_isa, HHI_GCLK_MPEG0, 5);
-static MESON_GATE(g12a_pl301, HHI_GCLK_MPEG0, 6);
-static MESON_GATE(g12a_periphs, HHI_GCLK_MPEG0, 7);
-static MESON_GATE(g12a_spicc_0, HHI_GCLK_MPEG0, 8);
-static MESON_GATE(g12a_i2c, HHI_GCLK_MPEG0, 9);
-static MESON_GATE(g12a_sana, HHI_GCLK_MPEG0, 10);
-static MESON_GATE(g12a_sd, HHI_GCLK_MPEG0, 11);
-static MESON_GATE(g12a_rng0, HHI_GCLK_MPEG0, 12);
-static MESON_GATE(g12a_uart0, HHI_GCLK_MPEG0, 13);
-static MESON_GATE(g12a_spicc_1, HHI_GCLK_MPEG0, 14);
-static MESON_GATE(g12a_hiu_reg, HHI_GCLK_MPEG0, 19);
-static MESON_GATE(g12a_mipi_dsi_phy, HHI_GCLK_MPEG0, 20);
-static MESON_GATE(g12a_assist_misc, HHI_GCLK_MPEG0, 23);
-static MESON_GATE(g12a_emmc_a, HHI_GCLK_MPEG0, 4);
-static MESON_GATE(g12a_emmc_b, HHI_GCLK_MPEG0, 25);
-static MESON_GATE(g12a_emmc_c, HHI_GCLK_MPEG0, 26);
-static MESON_GATE(g12a_audio_codec, HHI_GCLK_MPEG0, 28);
-
-static MESON_GATE(g12a_audio, HHI_GCLK_MPEG1, 0);
-static MESON_GATE(g12a_eth_core, HHI_GCLK_MPEG1, 3);
-static MESON_GATE(g12a_demux, HHI_GCLK_MPEG1, 4);
-static MESON_GATE(g12a_audio_ififo, HHI_GCLK_MPEG1, 11);
-static MESON_GATE(g12a_adc, HHI_GCLK_MPEG1, 13);
-static MESON_GATE(g12a_uart1, HHI_GCLK_MPEG1, 16);
-static MESON_GATE(g12a_g2d, HHI_GCLK_MPEG1, 20);
-static MESON_GATE(g12a_reset, HHI_GCLK_MPEG1, 23);
-static MESON_GATE(g12a_pcie_comb, HHI_GCLK_MPEG1, 24);
-static MESON_GATE(g12a_parser, HHI_GCLK_MPEG1, 25);
-static MESON_GATE(g12a_usb_general, HHI_GCLK_MPEG1, 26);
-static MESON_GATE(g12a_pcie_phy, HHI_GCLK_MPEG1, 27);
-static MESON_GATE(g12a_ahb_arb0, HHI_GCLK_MPEG1, 29);
-
-static MESON_GATE(g12a_ahb_data_bus, HHI_GCLK_MPEG2, 1);
-static MESON_GATE(g12a_ahb_ctrl_bus, HHI_GCLK_MPEG2, 2);
-static MESON_GATE(g12a_htx_hdcp22, HHI_GCLK_MPEG2, 3);
-static MESON_GATE(g12a_htx_pclk, HHI_GCLK_MPEG2, 4);
-static MESON_GATE(g12a_bt656, HHI_GCLK_MPEG2, 6);
-static MESON_GATE(g12a_usb1_to_ddr, HHI_GCLK_MPEG2, 8);
-static MESON_GATE(g12b_mipi_isp_gate, HHI_GCLK_MPEG2, 17);
-static MESON_GATE(g12a_mmc_pclk, HHI_GCLK_MPEG2, 11);
-static MESON_GATE(g12a_uart2, HHI_GCLK_MPEG2, 15);
-static MESON_GATE(g12a_vpu_intr, HHI_GCLK_MPEG2, 25);
-static MESON_GATE(g12b_csi_phy1, HHI_GCLK_MPEG2, 28);
-static MESON_GATE(g12b_csi_phy0, HHI_GCLK_MPEG2, 29);
-static MESON_GATE(g12a_gic, HHI_GCLK_MPEG2, 30);
-
-static MESON_GATE(g12a_vclk2_venci0, HHI_GCLK_OTHER, 1);
-static MESON_GATE(g12a_vclk2_venci1, HHI_GCLK_OTHER, 2);
-static MESON_GATE(g12a_vclk2_vencp0, HHI_GCLK_OTHER, 3);
-static MESON_GATE(g12a_vclk2_vencp1, HHI_GCLK_OTHER, 4);
-static MESON_GATE(g12a_vclk2_venct0, HHI_GCLK_OTHER, 5);
-static MESON_GATE(g12a_vclk2_venct1, HHI_GCLK_OTHER, 6);
-static MESON_GATE(g12a_vclk2_other, HHI_GCLK_OTHER, 7);
-static MESON_GATE(g12a_vclk2_enci, HHI_GCLK_OTHER, 8);
-static MESON_GATE(g12a_vclk2_encp, HHI_GCLK_OTHER, 9);
-static MESON_GATE(g12a_dac_clk, HHI_GCLK_OTHER, 10);
-static MESON_GATE(g12a_aoclk_gate, HHI_GCLK_OTHER, 14);
-static MESON_GATE(g12a_iec958_gate, HHI_GCLK_OTHER, 16);
-static MESON_GATE(g12a_enc480p, HHI_GCLK_OTHER, 20);
-static MESON_GATE(g12a_rng1, HHI_GCLK_OTHER, 21);
-static MESON_GATE(g12a_vclk2_enct, HHI_GCLK_OTHER, 22);
-static MESON_GATE(g12a_vclk2_encl, HHI_GCLK_OTHER, 23);
-static MESON_GATE(g12a_vclk2_venclmmc, HHI_GCLK_OTHER, 24);
-static MESON_GATE(g12a_vclk2_vencl, HHI_GCLK_OTHER, 25);
-static MESON_GATE(g12a_vclk2_other1, HHI_GCLK_OTHER, 26);
-
-static MESON_GATE_RO(g12a_dma, HHI_GCLK_OTHER2, 0);
-static MESON_GATE_RO(g12a_efuse, HHI_GCLK_OTHER2, 1);
-static MESON_GATE_RO(g12a_rom_boot, HHI_GCLK_OTHER2, 2);
-static MESON_GATE_RO(g12a_reset_sec, HHI_GCLK_OTHER2, 3);
-static MESON_GATE_RO(g12a_sec_ahb_apb3, HHI_GCLK_OTHER2, 4);
+static const struct clk_parent_data g12a_pclk_parents = { .hw = &g12a_clk81.hw };
+
+#define G12A_PCLK(_name, _reg, _bit, _flags) \
+ MESON_PCLK(_name, _reg, _bit, &g12a_pclk_parents, _flags)
+
+#define G12A_PCLK_RO(_name, _reg, _bit, _flags) \
+ MESON_PCLK_RO(_name, _reg, _bit, &g12a_pclk_parents, _flags)
+
+/*
+ * Everything Else (EE) domain gates
+ *
+ * NOTE: The gates below are marked with CLK_IGNORE_UNUSED for historic reasons
+ * Users are encouraged to test without it and submit changes to:
+ * - remove the flag if not necessary
+ * - replace the flag with something more adequate, such as CLK_IS_CRITICAL,
+ * if appropriate.
+ * - add a comment explaining why the use of CLK_IGNORE_UNUSED is desirable
+ * for a particular clock.
+ */
+static G12A_PCLK(g12a_ddr, HHI_GCLK_MPEG0, 0, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_dos, HHI_GCLK_MPEG0, 1, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_audio_locker, HHI_GCLK_MPEG0, 2, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_mipi_dsi_host, HHI_GCLK_MPEG0, 3, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_eth_phy, HHI_GCLK_MPEG0, 4, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_isa, HHI_GCLK_MPEG0, 5, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_pl301, HHI_GCLK_MPEG0, 6, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_periphs, HHI_GCLK_MPEG0, 7, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_spicc_0, HHI_GCLK_MPEG0, 8, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_i2c, HHI_GCLK_MPEG0, 9, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_sana, HHI_GCLK_MPEG0, 10, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_sd, HHI_GCLK_MPEG0, 11, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_rng0, HHI_GCLK_MPEG0, 12, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_uart0, HHI_GCLK_MPEG0, 13, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_spicc_1, HHI_GCLK_MPEG0, 14, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_hiu_reg, HHI_GCLK_MPEG0, 19, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_mipi_dsi_phy, HHI_GCLK_MPEG0, 20, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_assist_misc, HHI_GCLK_MPEG0, 23, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_emmc_a, HHI_GCLK_MPEG0, 24, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_emmc_b, HHI_GCLK_MPEG0, 25, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_emmc_c, HHI_GCLK_MPEG0, 26, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_audio_codec, HHI_GCLK_MPEG0, 28, CLK_IGNORE_UNUSED);
+
+static G12A_PCLK(g12a_audio, HHI_GCLK_MPEG1, 0, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_eth_core, HHI_GCLK_MPEG1, 3, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_demux, HHI_GCLK_MPEG1, 4, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_audio_ififo, HHI_GCLK_MPEG1, 11, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_adc, HHI_GCLK_MPEG1, 13, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_uart1, HHI_GCLK_MPEG1, 16, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_g2d, HHI_GCLK_MPEG1, 20, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_reset, HHI_GCLK_MPEG1, 23, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_pcie_comb, HHI_GCLK_MPEG1, 24, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_parser, HHI_GCLK_MPEG1, 25, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_usb_general, HHI_GCLK_MPEG1, 26, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_pcie_phy, HHI_GCLK_MPEG1, 27, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_ahb_arb0, HHI_GCLK_MPEG1, 29, CLK_IGNORE_UNUSED);
+
+static G12A_PCLK(g12a_ahb_data_bus, HHI_GCLK_MPEG2, 1, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_ahb_ctrl_bus, HHI_GCLK_MPEG2, 2, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_htx_hdcp22, HHI_GCLK_MPEG2, 3, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_htx_pclk, HHI_GCLK_MPEG2, 4, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_bt656, HHI_GCLK_MPEG2, 6, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_usb1_to_ddr, HHI_GCLK_MPEG2, 8, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12b_mipi_isp_gate, HHI_GCLK_MPEG2, 17, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_mmc_pclk, HHI_GCLK_MPEG2, 11, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_uart2, HHI_GCLK_MPEG2, 15, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_vpu_intr, HHI_GCLK_MPEG2, 25, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12b_csi_phy1, HHI_GCLK_MPEG2, 28, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12b_csi_phy0, HHI_GCLK_MPEG2, 29, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_gic, HHI_GCLK_MPEG2, 30, CLK_IGNORE_UNUSED);
+
+static G12A_PCLK(g12a_vclk2_venci0, HHI_GCLK_OTHER, 1, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_vclk2_venci1, HHI_GCLK_OTHER, 2, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_vclk2_vencp0, HHI_GCLK_OTHER, 3, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_vclk2_vencp1, HHI_GCLK_OTHER, 4, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_vclk2_venct0, HHI_GCLK_OTHER, 5, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_vclk2_venct1, HHI_GCLK_OTHER, 6, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_vclk2_other, HHI_GCLK_OTHER, 7, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_vclk2_enci, HHI_GCLK_OTHER, 8, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_vclk2_encp, HHI_GCLK_OTHER, 9, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_dac_clk, HHI_GCLK_OTHER, 10, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_aoclk_gate, HHI_GCLK_OTHER, 14, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_iec958_gate, HHI_GCLK_OTHER, 16, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_enc480p, HHI_GCLK_OTHER, 20, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_rng1, HHI_GCLK_OTHER, 21, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_vclk2_enct, HHI_GCLK_OTHER, 22, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_vclk2_encl, HHI_GCLK_OTHER, 23, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_vclk2_venclmmc, HHI_GCLK_OTHER, 24, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_vclk2_vencl, HHI_GCLK_OTHER, 25, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_vclk2_other1, HHI_GCLK_OTHER, 26, CLK_IGNORE_UNUSED);
+
+static G12A_PCLK_RO(g12a_dma, HHI_GCLK_OTHER2, 0, 0);
+static G12A_PCLK_RO(g12a_efuse, HHI_GCLK_OTHER2, 1, 0);
+static G12A_PCLK_RO(g12a_rom_boot, HHI_GCLK_OTHER2, 2, 0);
+static G12A_PCLK_RO(g12a_reset_sec, HHI_GCLK_OTHER2, 3, 0);
+static G12A_PCLK_RO(g12a_sec_ahb_apb3, HHI_GCLK_OTHER2, 4, 0);
/* Array of all clocks provided by this provider */
static struct clk_hw *g12a_hw_clks[] = {
@@ -4387,8 +4491,8 @@ static struct clk_hw *g12a_hw_clks[] = {
[CLKID_FCLK_DIV7] = &g12a_fclk_div7.hw,
[CLKID_FCLK_DIV2P5] = &g12a_fclk_div2p5.hw,
[CLKID_GP0_PLL] = &g12a_gp0_pll.hw,
- [CLKID_MPEG_SEL] = &g12a_mpeg_clk_sel.hw,
- [CLKID_MPEG_DIV] = &g12a_mpeg_clk_div.hw,
+ [CLKID_MPEG_SEL] = &g12a_clk81_sel.hw,
+ [CLKID_MPEG_DIV] = &g12a_clk81_div.hw,
[CLKID_CLK81] = &g12a_clk81.hw,
[CLKID_MPLL0] = &g12a_mpll0.hw,
[CLKID_MPLL1] = &g12a_mpll1.hw,
@@ -4560,12 +4664,12 @@ static struct clk_hw *g12a_hw_clks[] = {
[CLKID_MPLL_50M] = &g12a_mpll_50m.hw,
[CLKID_SYS_PLL_DIV16_EN] = &g12a_sys_pll_div16_en.hw,
[CLKID_SYS_PLL_DIV16] = &g12a_sys_pll_div16.hw,
- [CLKID_CPU_CLK_DYN0_SEL] = &g12a_cpu_clk_premux0.hw,
- [CLKID_CPU_CLK_DYN0_DIV] = &g12a_cpu_clk_mux0_div.hw,
- [CLKID_CPU_CLK_DYN0] = &g12a_cpu_clk_postmux0.hw,
- [CLKID_CPU_CLK_DYN1_SEL] = &g12a_cpu_clk_premux1.hw,
- [CLKID_CPU_CLK_DYN1_DIV] = &g12a_cpu_clk_mux1_div.hw,
- [CLKID_CPU_CLK_DYN1] = &g12a_cpu_clk_postmux1.hw,
+ [CLKID_CPU_CLK_DYN0_SEL] = &g12a_cpu_clk_dyn0_sel.hw,
+ [CLKID_CPU_CLK_DYN0_DIV] = &g12a_cpu_clk_dyn0_div.hw,
+ [CLKID_CPU_CLK_DYN0] = &g12a_cpu_clk_dyn0.hw,
+ [CLKID_CPU_CLK_DYN1_SEL] = &g12a_cpu_clk_dyn1_sel.hw,
+ [CLKID_CPU_CLK_DYN1_DIV] = &g12a_cpu_clk_dyn1_div.hw,
+ [CLKID_CPU_CLK_DYN1] = &g12a_cpu_clk_dyn1.hw,
[CLKID_CPU_CLK_DYN] = &g12a_cpu_clk_dyn.hw,
[CLKID_CPU_CLK] = &g12a_cpu_clk.hw,
[CLKID_CPU_CLK_DIV16_EN] = &g12a_cpu_clk_div16_en.hw,
@@ -4614,8 +4718,8 @@ static struct clk_hw *g12b_hw_clks[] = {
[CLKID_FCLK_DIV7] = &g12a_fclk_div7.hw,
[CLKID_FCLK_DIV2P5] = &g12a_fclk_div2p5.hw,
[CLKID_GP0_PLL] = &g12a_gp0_pll.hw,
- [CLKID_MPEG_SEL] = &g12a_mpeg_clk_sel.hw,
- [CLKID_MPEG_DIV] = &g12a_mpeg_clk_div.hw,
+ [CLKID_MPEG_SEL] = &g12a_clk81_sel.hw,
+ [CLKID_MPEG_DIV] = &g12a_clk81_div.hw,
[CLKID_CLK81] = &g12a_clk81.hw,
[CLKID_MPLL0] = &g12a_mpll0.hw,
[CLKID_MPLL1] = &g12a_mpll1.hw,
@@ -4787,12 +4891,12 @@ static struct clk_hw *g12b_hw_clks[] = {
[CLKID_MPLL_50M] = &g12a_mpll_50m.hw,
[CLKID_SYS_PLL_DIV16_EN] = &g12a_sys_pll_div16_en.hw,
[CLKID_SYS_PLL_DIV16] = &g12a_sys_pll_div16.hw,
- [CLKID_CPU_CLK_DYN0_SEL] = &g12a_cpu_clk_premux0.hw,
- [CLKID_CPU_CLK_DYN0_DIV] = &g12a_cpu_clk_mux0_div.hw,
- [CLKID_CPU_CLK_DYN0] = &g12a_cpu_clk_postmux0.hw,
- [CLKID_CPU_CLK_DYN1_SEL] = &g12a_cpu_clk_premux1.hw,
- [CLKID_CPU_CLK_DYN1_DIV] = &g12a_cpu_clk_mux1_div.hw,
- [CLKID_CPU_CLK_DYN1] = &g12a_cpu_clk_postmux1.hw,
+ [CLKID_CPU_CLK_DYN0_SEL] = &g12a_cpu_clk_dyn0_sel.hw,
+ [CLKID_CPU_CLK_DYN0_DIV] = &g12a_cpu_clk_dyn0_div.hw,
+ [CLKID_CPU_CLK_DYN0] = &g12a_cpu_clk_dyn0.hw,
+ [CLKID_CPU_CLK_DYN1_SEL] = &g12a_cpu_clk_dyn1_sel.hw,
+ [CLKID_CPU_CLK_DYN1_DIV] = &g12a_cpu_clk_dyn1_div.hw,
+ [CLKID_CPU_CLK_DYN1] = &g12a_cpu_clk_dyn1.hw,
[CLKID_CPU_CLK_DYN] = &g12a_cpu_clk_dyn.hw,
[CLKID_CPU_CLK] = &g12b_cpu_clk.hw,
[CLKID_CPU_CLK_DIV16_EN] = &g12a_cpu_clk_div16_en.hw,
@@ -4824,12 +4928,12 @@ static struct clk_hw *g12b_hw_clks[] = {
[CLKID_SYS1_PLL] = &g12b_sys1_pll.hw,
[CLKID_SYS1_PLL_DIV16_EN] = &g12b_sys1_pll_div16_en.hw,
[CLKID_SYS1_PLL_DIV16] = &g12b_sys1_pll_div16.hw,
- [CLKID_CPUB_CLK_DYN0_SEL] = &g12b_cpub_clk_premux0.hw,
- [CLKID_CPUB_CLK_DYN0_DIV] = &g12b_cpub_clk_mux0_div.hw,
- [CLKID_CPUB_CLK_DYN0] = &g12b_cpub_clk_postmux0.hw,
- [CLKID_CPUB_CLK_DYN1_SEL] = &g12b_cpub_clk_premux1.hw,
- [CLKID_CPUB_CLK_DYN1_DIV] = &g12b_cpub_clk_mux1_div.hw,
- [CLKID_CPUB_CLK_DYN1] = &g12b_cpub_clk_postmux1.hw,
+ [CLKID_CPUB_CLK_DYN0_SEL] = &g12b_cpub_clk_dyn0_sel.hw,
+ [CLKID_CPUB_CLK_DYN0_DIV] = &g12b_cpub_clk_dyn0_div.hw,
+ [CLKID_CPUB_CLK_DYN0] = &g12b_cpub_clk_dyn0.hw,
+ [CLKID_CPUB_CLK_DYN1_SEL] = &g12b_cpub_clk_dyn1_sel.hw,
+ [CLKID_CPUB_CLK_DYN1_DIV] = &g12b_cpub_clk_dyn1_div.hw,
+ [CLKID_CPUB_CLK_DYN1] = &g12b_cpub_clk_dyn1.hw,
[CLKID_CPUB_CLK_DYN] = &g12b_cpub_clk_dyn.hw,
[CLKID_CPUB_CLK] = &g12b_cpub_clk.hw,
[CLKID_CPUB_CLK_DIV16_EN] = &g12b_cpub_clk_div16_en.hw,
@@ -4882,8 +4986,8 @@ static struct clk_hw *sm1_hw_clks[] = {
[CLKID_FCLK_DIV7] = &g12a_fclk_div7.hw,
[CLKID_FCLK_DIV2P5] = &g12a_fclk_div2p5.hw,
[CLKID_GP0_PLL] = &g12a_gp0_pll.hw,
- [CLKID_MPEG_SEL] = &g12a_mpeg_clk_sel.hw,
- [CLKID_MPEG_DIV] = &g12a_mpeg_clk_div.hw,
+ [CLKID_MPEG_SEL] = &g12a_clk81_sel.hw,
+ [CLKID_MPEG_DIV] = &g12a_clk81_div.hw,
[CLKID_CLK81] = &g12a_clk81.hw,
[CLKID_MPLL0] = &g12a_mpll0.hw,
[CLKID_MPLL1] = &g12a_mpll1.hw,
@@ -5055,12 +5159,12 @@ static struct clk_hw *sm1_hw_clks[] = {
[CLKID_MPLL_50M] = &g12a_mpll_50m.hw,
[CLKID_SYS_PLL_DIV16_EN] = &g12a_sys_pll_div16_en.hw,
[CLKID_SYS_PLL_DIV16] = &g12a_sys_pll_div16.hw,
- [CLKID_CPU_CLK_DYN0_SEL] = &g12a_cpu_clk_premux0.hw,
- [CLKID_CPU_CLK_DYN0_DIV] = &g12a_cpu_clk_mux0_div.hw,
- [CLKID_CPU_CLK_DYN0] = &g12a_cpu_clk_postmux0.hw,
- [CLKID_CPU_CLK_DYN1_SEL] = &g12a_cpu_clk_premux1.hw,
- [CLKID_CPU_CLK_DYN1_DIV] = &g12a_cpu_clk_mux1_div.hw,
- [CLKID_CPU_CLK_DYN1] = &g12a_cpu_clk_postmux1.hw,
+ [CLKID_CPU_CLK_DYN0_SEL] = &g12a_cpu_clk_dyn0_sel.hw,
+ [CLKID_CPU_CLK_DYN0_DIV] = &g12a_cpu_clk_dyn0_div.hw,
+ [CLKID_CPU_CLK_DYN0] = &g12a_cpu_clk_dyn0.hw,
+ [CLKID_CPU_CLK_DYN1_SEL] = &g12a_cpu_clk_dyn1_sel.hw,
+ [CLKID_CPU_CLK_DYN1_DIV] = &g12a_cpu_clk_dyn1_div.hw,
+ [CLKID_CPU_CLK_DYN1] = &g12a_cpu_clk_dyn1.hw,
[CLKID_CPU_CLK_DYN] = &g12a_cpu_clk_dyn.hw,
[CLKID_CPU_CLK] = &g12a_cpu_clk.hw,
[CLKID_CPU_CLK_DIV16_EN] = &g12a_cpu_clk_div16_en.hw,
@@ -5090,12 +5194,12 @@ static struct clk_hw *sm1_hw_clks[] = {
[CLKID_TS] = &g12a_ts.hw,
[CLKID_GP1_PLL_DCO] = &sm1_gp1_pll_dco.hw,
[CLKID_GP1_PLL] = &sm1_gp1_pll.hw,
- [CLKID_DSU_CLK_DYN0_SEL] = &sm1_dsu_clk_premux0.hw,
- [CLKID_DSU_CLK_DYN0_DIV] = &sm1_dsu_clk_premux1.hw,
- [CLKID_DSU_CLK_DYN0] = &sm1_dsu_clk_mux0_div.hw,
- [CLKID_DSU_CLK_DYN1_SEL] = &sm1_dsu_clk_postmux0.hw,
- [CLKID_DSU_CLK_DYN1_DIV] = &sm1_dsu_clk_mux1_div.hw,
- [CLKID_DSU_CLK_DYN1] = &sm1_dsu_clk_postmux1.hw,
+ [CLKID_DSU_CLK_DYN0_SEL] = &sm1_dsu_clk_dyn0_sel.hw,
+ [CLKID_DSU_CLK_DYN0_DIV] = &sm1_dsu_clk_dyn0_div.hw,
+ [CLKID_DSU_CLK_DYN0] = &sm1_dsu_clk_dyn0.hw,
+ [CLKID_DSU_CLK_DYN1_SEL] = &sm1_dsu_clk_dyn1_sel.hw,
+ [CLKID_DSU_CLK_DYN1_DIV] = &sm1_dsu_clk_dyn1_div.hw,
+ [CLKID_DSU_CLK_DYN1] = &sm1_dsu_clk_dyn1.hw,
[CLKID_DSU_CLK_DYN] = &sm1_dsu_clk_dyn.hw,
[CLKID_DSU_CLK_FINAL] = &sm1_dsu_final_clk.hw,
[CLKID_DSU_CLK] = &sm1_dsu_clk.hw,
@@ -5119,269 +5223,13 @@ static struct clk_hw *sm1_hw_clks[] = {
[CLKID_MIPI_DSI_PXCLK] = &g12a_mipi_dsi_pxclk.hw,
};
-/* Convenience table to populate regmap in .probe */
-static struct clk_regmap *const g12a_clk_regmaps[] = {
- &g12a_clk81,
- &g12a_dos,
- &g12a_ddr,
- &g12a_audio_locker,
- &g12a_mipi_dsi_host,
- &g12a_eth_phy,
- &g12a_isa,
- &g12a_pl301,
- &g12a_periphs,
- &g12a_spicc_0,
- &g12a_i2c,
- &g12a_sana,
- &g12a_sd,
- &g12a_rng0,
- &g12a_uart0,
- &g12a_spicc_1,
- &g12a_hiu_reg,
- &g12a_mipi_dsi_phy,
- &g12a_assist_misc,
- &g12a_emmc_a,
- &g12a_emmc_b,
- &g12a_emmc_c,
- &g12a_audio_codec,
- &g12a_audio,
- &g12a_eth_core,
- &g12a_demux,
- &g12a_audio_ififo,
- &g12a_adc,
- &g12a_uart1,
- &g12a_g2d,
- &g12a_reset,
- &g12a_pcie_comb,
- &g12a_parser,
- &g12a_usb_general,
- &g12a_pcie_phy,
- &g12a_ahb_arb0,
- &g12a_ahb_data_bus,
- &g12a_ahb_ctrl_bus,
- &g12a_htx_hdcp22,
- &g12a_htx_pclk,
- &g12a_bt656,
- &g12a_usb1_to_ddr,
- &g12a_mmc_pclk,
- &g12a_uart2,
- &g12a_vpu_intr,
- &g12a_gic,
- &g12a_sd_emmc_a_clk0,
- &g12a_sd_emmc_b_clk0,
- &g12a_sd_emmc_c_clk0,
- &g12a_mpeg_clk_div,
- &g12a_sd_emmc_a_clk0_div,
- &g12a_sd_emmc_b_clk0_div,
- &g12a_sd_emmc_c_clk0_div,
- &g12a_mpeg_clk_sel,
- &g12a_sd_emmc_a_clk0_sel,
- &g12a_sd_emmc_b_clk0_sel,
- &g12a_sd_emmc_c_clk0_sel,
- &g12a_mpll0,
- &g12a_mpll1,
- &g12a_mpll2,
- &g12a_mpll3,
- &g12a_mpll0_div,
- &g12a_mpll1_div,
- &g12a_mpll2_div,
- &g12a_mpll3_div,
- &g12a_fixed_pll,
- &g12a_sys_pll,
- &g12a_gp0_pll,
- &g12a_hifi_pll,
- &g12a_vclk2_venci0,
- &g12a_vclk2_venci1,
- &g12a_vclk2_vencp0,
- &g12a_vclk2_vencp1,
- &g12a_vclk2_venct0,
- &g12a_vclk2_venct1,
- &g12a_vclk2_other,
- &g12a_vclk2_enci,
- &g12a_vclk2_encp,
- &g12a_dac_clk,
- &g12a_aoclk_gate,
- &g12a_iec958_gate,
- &g12a_enc480p,
- &g12a_rng1,
- &g12a_vclk2_enct,
- &g12a_vclk2_encl,
- &g12a_vclk2_venclmmc,
- &g12a_vclk2_vencl,
- &g12a_vclk2_other1,
- &g12a_fixed_pll_dco,
- &g12a_sys_pll_dco,
- &g12a_gp0_pll_dco,
- &g12a_hifi_pll_dco,
- &g12a_fclk_div2,
- &g12a_fclk_div3,
- &g12a_fclk_div4,
- &g12a_fclk_div5,
- &g12a_fclk_div7,
- &g12a_fclk_div2p5,
- &g12a_dma,
- &g12a_efuse,
- &g12a_rom_boot,
- &g12a_reset_sec,
- &g12a_sec_ahb_apb3,
- &g12a_vpu_0_sel,
- &g12a_vpu_0_div,
- &g12a_vpu_0,
- &g12a_vpu_1_sel,
- &g12a_vpu_1_div,
- &g12a_vpu_1,
- &g12a_vpu,
- &g12a_vapb_0_sel,
- &g12a_vapb_0_div,
- &g12a_vapb_0,
- &g12a_vapb_1_sel,
- &g12a_vapb_1_div,
- &g12a_vapb_1,
- &g12a_vapb_sel,
- &g12a_vapb,
- &g12a_hdmi_pll_dco,
- &g12a_hdmi_pll_od,
- &g12a_hdmi_pll_od2,
- &g12a_hdmi_pll,
- &g12a_vid_pll_div,
- &g12a_vid_pll_sel,
- &g12a_vid_pll,
- &g12a_vclk_sel,
- &g12a_vclk2_sel,
- &g12a_vclk_input,
- &g12a_vclk2_input,
- &g12a_vclk_div,
- &g12a_vclk2_div,
- &g12a_vclk,
- &g12a_vclk2,
- &g12a_vclk_div1,
- &g12a_vclk_div2_en,
- &g12a_vclk_div4_en,
- &g12a_vclk_div6_en,
- &g12a_vclk_div12_en,
- &g12a_vclk2_div1,
- &g12a_vclk2_div2_en,
- &g12a_vclk2_div4_en,
- &g12a_vclk2_div6_en,
- &g12a_vclk2_div12_en,
- &g12a_cts_enci_sel,
- &g12a_cts_encp_sel,
- &g12a_cts_encl_sel,
- &g12a_cts_vdac_sel,
- &g12a_hdmi_tx_sel,
- &g12a_cts_enci,
- &g12a_cts_encp,
- &g12a_cts_encl,
- &g12a_cts_vdac,
- &g12a_hdmi_tx,
- &g12a_hdmi_sel,
- &g12a_hdmi_div,
- &g12a_hdmi,
- &g12a_mali_0_sel,
- &g12a_mali_0_div,
- &g12a_mali_0,
- &g12a_mali_1_sel,
- &g12a_mali_1_div,
- &g12a_mali_1,
- &g12a_mali,
- &g12a_mpll_50m,
- &g12a_sys_pll_div16_en,
- &g12a_cpu_clk_premux0,
- &g12a_cpu_clk_mux0_div,
- &g12a_cpu_clk_postmux0,
- &g12a_cpu_clk_premux1,
- &g12a_cpu_clk_mux1_div,
- &g12a_cpu_clk_postmux1,
- &g12a_cpu_clk_dyn,
- &g12a_cpu_clk,
- &g12a_cpu_clk_div16_en,
- &g12a_cpu_clk_apb_div,
- &g12a_cpu_clk_apb,
- &g12a_cpu_clk_atb_div,
- &g12a_cpu_clk_atb,
- &g12a_cpu_clk_axi_div,
- &g12a_cpu_clk_axi,
- &g12a_cpu_clk_trace_div,
- &g12a_cpu_clk_trace,
- &g12a_pcie_pll_od,
- &g12a_pcie_pll_dco,
- &g12a_vdec_1_sel,
- &g12a_vdec_1_div,
- &g12a_vdec_1,
- &g12a_vdec_hevc_sel,
- &g12a_vdec_hevc_div,
- &g12a_vdec_hevc,
- &g12a_vdec_hevcf_sel,
- &g12a_vdec_hevcf_div,
- &g12a_vdec_hevcf,
- &g12a_ts_div,
- &g12a_ts,
- &g12b_cpu_clk,
- &g12b_sys1_pll_dco,
- &g12b_sys1_pll,
- &g12b_sys1_pll_div16_en,
- &g12b_cpub_clk_premux0,
- &g12b_cpub_clk_mux0_div,
- &g12b_cpub_clk_postmux0,
- &g12b_cpub_clk_premux1,
- &g12b_cpub_clk_mux1_div,
- &g12b_cpub_clk_postmux1,
- &g12b_cpub_clk_dyn,
- &g12b_cpub_clk,
- &g12b_cpub_clk_div16_en,
- &g12b_cpub_clk_apb_sel,
- &g12b_cpub_clk_apb,
- &g12b_cpub_clk_atb_sel,
- &g12b_cpub_clk_atb,
- &g12b_cpub_clk_axi_sel,
- &g12b_cpub_clk_axi,
- &g12b_cpub_clk_trace_sel,
- &g12b_cpub_clk_trace,
- &sm1_gp1_pll_dco,
- &sm1_gp1_pll,
- &sm1_dsu_clk_premux0,
- &sm1_dsu_clk_premux1,
- &sm1_dsu_clk_mux0_div,
- &sm1_dsu_clk_postmux0,
- &sm1_dsu_clk_mux1_div,
- &sm1_dsu_clk_postmux1,
- &sm1_dsu_clk_dyn,
- &sm1_dsu_final_clk,
- &sm1_dsu_clk,
- &sm1_cpu1_clk,
- &sm1_cpu2_clk,
- &sm1_cpu3_clk,
- &g12a_spicc0_sclk_sel,
- &g12a_spicc0_sclk_div,
- &g12a_spicc0_sclk,
- &g12a_spicc1_sclk_sel,
- &g12a_spicc1_sclk_div,
- &g12a_spicc1_sclk,
- &sm1_nna_axi_clk_sel,
- &sm1_nna_axi_clk_div,
- &sm1_nna_axi_clk,
- &sm1_nna_core_clk_sel,
- &sm1_nna_core_clk_div,
- &sm1_nna_core_clk,
- &g12a_mipi_dsi_pxclk_sel,
- &g12a_mipi_dsi_pxclk_div,
- &g12a_mipi_dsi_pxclk,
- &g12b_mipi_isp_sel,
- &g12b_mipi_isp_div,
- &g12b_mipi_isp,
- &g12b_mipi_isp_gate,
- &g12b_csi_phy1,
- &g12b_csi_phy0,
-};
-
static const struct reg_sequence g12a_init_regs[] = {
{ .reg = HHI_MPLL_CNTL0, .def = 0x00000543 },
};
#define DVFS_CON_ID "dvfs"
-static int meson_g12a_dvfs_setup_common(struct device *dev,
- struct clk_hw **hws)
+static int g12a_dvfs_setup_common(struct device *dev, struct clk_hw **hws)
{
struct clk *notifier_clk;
struct clk_hw *xtal;
@@ -5390,13 +5238,13 @@ static int meson_g12a_dvfs_setup_common(struct device *dev,
xtal = clk_hw_get_parent_by_index(hws[CLKID_CPU_CLK_DYN1_SEL], 0);
/* Setup clock notifier for cpu_clk_postmux0 */
- g12a_cpu_clk_postmux0_nb_data.xtal = xtal;
- notifier_clk = devm_clk_hw_get_clk(dev, &g12a_cpu_clk_postmux0.hw,
+ g12a_cpu_clk_dyn0_nb_data.xtal = xtal;
+ notifier_clk = devm_clk_hw_get_clk(dev, &g12a_cpu_clk_dyn0.hw,
DVFS_CON_ID);
ret = devm_clk_notifier_register(dev, notifier_clk,
- &g12a_cpu_clk_postmux0_nb_data.nb);
+ &g12a_cpu_clk_dyn0_nb_data.nb);
if (ret) {
- dev_err(dev, "failed to register the cpu_clk_postmux0 notifier\n");
+ dev_err(dev, "failed to register the cpu_clk_dyn0 notifier\n");
return ret;
}
@@ -5413,7 +5261,7 @@ static int meson_g12a_dvfs_setup_common(struct device *dev,
return 0;
}
-static int meson_g12b_dvfs_setup(struct platform_device *pdev)
+static int g12b_dvfs_setup(struct platform_device *pdev)
{
struct clk_hw **hws = g12b_hw_clks;
struct device *dev = &pdev->dev;
@@ -5421,7 +5269,7 @@ static int meson_g12b_dvfs_setup(struct platform_device *pdev)
struct clk_hw *xtal;
int ret;
- ret = meson_g12a_dvfs_setup_common(dev, hws);
+ ret = g12a_dvfs_setup_common(dev, hws);
if (ret)
return ret;
@@ -5450,18 +5298,19 @@ static int meson_g12b_dvfs_setup(struct platform_device *pdev)
/* Add notifiers for the second CPU cluster */
/* Setup clock notifier for cpub_clk_postmux0 */
- g12b_cpub_clk_postmux0_nb_data.xtal = xtal;
- notifier_clk = devm_clk_hw_get_clk(dev, &g12b_cpub_clk_postmux0.hw,
+ g12b_cpub_clk_dyn0_nb_data.xtal = xtal;
+ notifier_clk = devm_clk_hw_get_clk(dev, &g12b_cpub_clk_dyn0.hw,
DVFS_CON_ID);
ret = devm_clk_notifier_register(dev, notifier_clk,
- &g12b_cpub_clk_postmux0_nb_data.nb);
+ &g12b_cpub_clk_dyn0_nb_data.nb);
if (ret) {
- dev_err(dev, "failed to register the cpub_clk_postmux0 notifier\n");
+ dev_err(dev, "failed to register the cpub_clk_dyn0 notifier\n");
return ret;
}
/* Setup clock notifier for cpub_clk_dyn mux */
- notifier_clk = devm_clk_hw_get_clk(dev, &g12b_cpub_clk_dyn.hw, "dvfs");
+ notifier_clk = devm_clk_hw_get_clk(dev, &g12b_cpub_clk_dyn.hw,
+ DVFS_CON_ID);
ret = devm_clk_notifier_register(dev, notifier_clk,
&g12a_cpu_clk_mux_nb);
if (ret) {
@@ -5490,14 +5339,14 @@ static int meson_g12b_dvfs_setup(struct platform_device *pdev)
return 0;
}
-static int meson_g12a_dvfs_setup(struct platform_device *pdev)
+static int g12a_dvfs_setup(struct platform_device *pdev)
{
struct clk_hw **hws = g12a_hw_clks;
struct device *dev = &pdev->dev;
struct clk *notifier_clk;
int ret;
- ret = meson_g12a_dvfs_setup_common(dev, hws);
+ ret = g12a_dvfs_setup_common(dev, hws);
if (ret)
return ret;
@@ -5522,27 +5371,27 @@ static int meson_g12a_dvfs_setup(struct platform_device *pdev)
return 0;
}
-struct meson_g12a_data {
- const struct meson_eeclkc_data eeclkc_data;
+struct g12a_clkc_data {
+ const struct meson_clkc_data clkc_data;
int (*dvfs_setup)(struct platform_device *pdev);
};
-static int meson_g12a_probe(struct platform_device *pdev)
+static int g12a_clkc_probe(struct platform_device *pdev)
{
- const struct meson_eeclkc_data *eeclkc_data;
- const struct meson_g12a_data *g12a_data;
+ const struct meson_clkc_data *clkc_data;
+ const struct g12a_clkc_data *g12a_data;
int ret;
- eeclkc_data = of_device_get_match_data(&pdev->dev);
- if (!eeclkc_data)
+ clkc_data = of_device_get_match_data(&pdev->dev);
+ if (!clkc_data)
return -EINVAL;
- ret = meson_eeclkc_probe(pdev);
+ ret = meson_clkc_syscon_probe(pdev);
if (ret)
return ret;
- g12a_data = container_of(eeclkc_data, struct meson_g12a_data,
- eeclkc_data);
+ g12a_data = container_of(clkc_data, struct g12a_clkc_data,
+ clkc_data);
if (g12a_data->dvfs_setup)
return g12a_data->dvfs_setup(pdev);
@@ -5550,10 +5399,8 @@ static int meson_g12a_probe(struct platform_device *pdev)
return 0;
}
-static const struct meson_g12a_data g12a_clkc_data = {
- .eeclkc_data = {
- .regmap_clks = g12a_clk_regmaps,
- .regmap_clk_num = ARRAY_SIZE(g12a_clk_regmaps),
+static const struct g12a_clkc_data g12a_clkc_data = {
+ .clkc_data = {
.hw_clks = {
.hws = g12a_hw_clks,
.num = ARRAY_SIZE(g12a_hw_clks),
@@ -5561,59 +5408,55 @@ static const struct meson_g12a_data g12a_clkc_data = {
.init_regs = g12a_init_regs,
.init_count = ARRAY_SIZE(g12a_init_regs),
},
- .dvfs_setup = meson_g12a_dvfs_setup,
+ .dvfs_setup = g12a_dvfs_setup,
};
-static const struct meson_g12a_data g12b_clkc_data = {
- .eeclkc_data = {
- .regmap_clks = g12a_clk_regmaps,
- .regmap_clk_num = ARRAY_SIZE(g12a_clk_regmaps),
+static const struct g12a_clkc_data g12b_clkc_data = {
+ .clkc_data = {
.hw_clks = {
.hws = g12b_hw_clks,
.num = ARRAY_SIZE(g12b_hw_clks),
},
},
- .dvfs_setup = meson_g12b_dvfs_setup,
+ .dvfs_setup = g12b_dvfs_setup,
};
-static const struct meson_g12a_data sm1_clkc_data = {
- .eeclkc_data = {
- .regmap_clks = g12a_clk_regmaps,
- .regmap_clk_num = ARRAY_SIZE(g12a_clk_regmaps),
+static const struct g12a_clkc_data sm1_clkc_data = {
+ .clkc_data = {
.hw_clks = {
.hws = sm1_hw_clks,
.num = ARRAY_SIZE(sm1_hw_clks),
},
},
- .dvfs_setup = meson_g12a_dvfs_setup,
+ .dvfs_setup = g12a_dvfs_setup,
};
-static const struct of_device_id clkc_match_table[] = {
+static const struct of_device_id g12a_clkc_match_table[] = {
{
.compatible = "amlogic,g12a-clkc",
- .data = &g12a_clkc_data.eeclkc_data
+ .data = &g12a_clkc_data.clkc_data
},
{
.compatible = "amlogic,g12b-clkc",
- .data = &g12b_clkc_data.eeclkc_data
+ .data = &g12b_clkc_data.clkc_data
},
{
.compatible = "amlogic,sm1-clkc",
- .data = &sm1_clkc_data.eeclkc_data
+ .data = &sm1_clkc_data.clkc_data
},
{}
};
-MODULE_DEVICE_TABLE(of, clkc_match_table);
+MODULE_DEVICE_TABLE(of, g12a_clkc_match_table);
-static struct platform_driver g12a_driver = {
- .probe = meson_g12a_probe,
+static struct platform_driver g12a_clkc_driver = {
+ .probe = g12a_clkc_probe,
.driver = {
.name = "g12a-clkc",
- .of_match_table = clkc_match_table,
+ .of_match_table = g12a_clkc_match_table,
},
};
-module_platform_driver(g12a_driver);
+module_platform_driver(g12a_clkc_driver);
MODULE_DESCRIPTION("Amlogic G12/SM1 Main Clock Controller driver");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(CLK_MESON);
+MODULE_IMPORT_NS("CLK_MESON");
diff --git a/drivers/clk/meson/g12a.h b/drivers/clk/meson/g12a.h
deleted file mode 100644
index 27df99c4565a..000000000000
--- a/drivers/clk/meson/g12a.h
+++ /dev/null
@@ -1,130 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
-/*
- * Copyright (c) 2016 Amlogic, Inc.
- * Author: Michael Turquette <mturquette@baylibre.com>
- *
- * Copyright (c) 2018 Amlogic, inc.
- * Author: Qiufang Dai <qiufang.dai@amlogic.com>
- * Author: Jian Hu <jian.hu@amlogic.com>
- *
- */
-#ifndef __G12A_H
-#define __G12A_H
-
-/*
- * Clock controller register offsets
- *
- * Register offsets from the data sheet must be multiplied by 4 before
- * adding them to the base address to get the right value.
- */
-#define HHI_MIPI_CNTL0 0x000
-#define HHI_MIPI_CNTL1 0x004
-#define HHI_MIPI_CNTL2 0x008
-#define HHI_MIPI_STS 0x00C
-#define HHI_GP0_PLL_CNTL0 0x040
-#define HHI_GP0_PLL_CNTL1 0x044
-#define HHI_GP0_PLL_CNTL2 0x048
-#define HHI_GP0_PLL_CNTL3 0x04C
-#define HHI_GP0_PLL_CNTL4 0x050
-#define HHI_GP0_PLL_CNTL5 0x054
-#define HHI_GP0_PLL_CNTL6 0x058
-#define HHI_GP0_PLL_STS 0x05C
-#define HHI_GP1_PLL_CNTL0 0x060
-#define HHI_GP1_PLL_CNTL1 0x064
-#define HHI_GP1_PLL_CNTL2 0x068
-#define HHI_GP1_PLL_CNTL3 0x06C
-#define HHI_GP1_PLL_CNTL4 0x070
-#define HHI_GP1_PLL_CNTL5 0x074
-#define HHI_GP1_PLL_CNTL6 0x078
-#define HHI_GP1_PLL_STS 0x07C
-#define HHI_PCIE_PLL_CNTL0 0x098
-#define HHI_PCIE_PLL_CNTL1 0x09C
-#define HHI_PCIE_PLL_CNTL2 0x0A0
-#define HHI_PCIE_PLL_CNTL3 0x0A4
-#define HHI_PCIE_PLL_CNTL4 0x0A8
-#define HHI_PCIE_PLL_CNTL5 0x0AC
-#define HHI_PCIE_PLL_STS 0x0B8
-#define HHI_HIFI_PLL_CNTL0 0x0D8
-#define HHI_HIFI_PLL_CNTL1 0x0DC
-#define HHI_HIFI_PLL_CNTL2 0x0E0
-#define HHI_HIFI_PLL_CNTL3 0x0E4
-#define HHI_HIFI_PLL_CNTL4 0x0E8
-#define HHI_HIFI_PLL_CNTL5 0x0EC
-#define HHI_HIFI_PLL_CNTL6 0x0F0
-#define HHI_VIID_CLK_DIV 0x128
-#define HHI_VIID_CLK_CNTL 0x12C
-#define HHI_GCLK_MPEG0 0x140
-#define HHI_GCLK_MPEG1 0x144
-#define HHI_GCLK_MPEG2 0x148
-#define HHI_GCLK_OTHER 0x150
-#define HHI_GCLK_OTHER2 0x154
-#define HHI_SYS_CPU_CLK_CNTL1 0x15c
-#define HHI_VID_CLK_DIV 0x164
-#define HHI_MPEG_CLK_CNTL 0x174
-#define HHI_AUD_CLK_CNTL 0x178
-#define HHI_VID_CLK_CNTL 0x17c
-#define HHI_TS_CLK_CNTL 0x190
-#define HHI_VID_CLK_CNTL2 0x194
-#define HHI_SYS_CPU_CLK_CNTL0 0x19c
-#define HHI_VID_PLL_CLK_DIV 0x1A0
-#define HHI_MALI_CLK_CNTL 0x1b0
-#define HHI_VPU_CLKC_CNTL 0x1b4
-#define HHI_VPU_CLK_CNTL 0x1bC
-#define HHI_ISP_CLK_CNTL 0x1C0
-#define HHI_NNA_CLK_CNTL 0x1C8
-#define HHI_HDMI_CLK_CNTL 0x1CC
-#define HHI_VDEC_CLK_CNTL 0x1E0
-#define HHI_VDEC2_CLK_CNTL 0x1E4
-#define HHI_VDEC3_CLK_CNTL 0x1E8
-#define HHI_VDEC4_CLK_CNTL 0x1EC
-#define HHI_HDCP22_CLK_CNTL 0x1F0
-#define HHI_VAPBCLK_CNTL 0x1F4
-#define HHI_SYS_CPUB_CLK_CNTL1 0x200
-#define HHI_SYS_CPUB_CLK_CNTL 0x208
-#define HHI_VPU_CLKB_CNTL 0x20C
-#define HHI_SYS_CPU_CLK_CNTL2 0x210
-#define HHI_SYS_CPU_CLK_CNTL3 0x214
-#define HHI_SYS_CPU_CLK_CNTL4 0x218
-#define HHI_SYS_CPU_CLK_CNTL5 0x21c
-#define HHI_SYS_CPU_CLK_CNTL6 0x220
-#define HHI_GEN_CLK_CNTL 0x228
-#define HHI_VDIN_MEAS_CLK_CNTL 0x250
-#define HHI_MIPIDSI_PHY_CLK_CNTL 0x254
-#define HHI_NAND_CLK_CNTL 0x25C
-#define HHI_SD_EMMC_CLK_CNTL 0x264
-#define HHI_MPLL_CNTL0 0x278
-#define HHI_MPLL_CNTL1 0x27C
-#define HHI_MPLL_CNTL2 0x280
-#define HHI_MPLL_CNTL3 0x284
-#define HHI_MPLL_CNTL4 0x288
-#define HHI_MPLL_CNTL5 0x28c
-#define HHI_MPLL_CNTL6 0x290
-#define HHI_MPLL_CNTL7 0x294
-#define HHI_MPLL_CNTL8 0x298
-#define HHI_FIX_PLL_CNTL0 0x2A0
-#define HHI_FIX_PLL_CNTL1 0x2A4
-#define HHI_FIX_PLL_CNTL3 0x2AC
-#define HHI_SYS_PLL_CNTL0 0x2f4
-#define HHI_SYS_PLL_CNTL1 0x2f8
-#define HHI_SYS_PLL_CNTL2 0x2fc
-#define HHI_SYS_PLL_CNTL3 0x300
-#define HHI_SYS_PLL_CNTL4 0x304
-#define HHI_SYS_PLL_CNTL5 0x308
-#define HHI_SYS_PLL_CNTL6 0x30c
-#define HHI_HDMI_PLL_CNTL0 0x320
-#define HHI_HDMI_PLL_CNTL1 0x324
-#define HHI_HDMI_PLL_CNTL2 0x328
-#define HHI_HDMI_PLL_CNTL3 0x32c
-#define HHI_HDMI_PLL_CNTL4 0x330
-#define HHI_HDMI_PLL_CNTL5 0x334
-#define HHI_HDMI_PLL_CNTL6 0x338
-#define HHI_SPICC_CLK_CNTL 0x3dc
-#define HHI_SYS1_PLL_CNTL0 0x380
-#define HHI_SYS1_PLL_CNTL1 0x384
-#define HHI_SYS1_PLL_CNTL2 0x388
-#define HHI_SYS1_PLL_CNTL3 0x38c
-#define HHI_SYS1_PLL_CNTL4 0x390
-#define HHI_SYS1_PLL_CNTL5 0x394
-#define HHI_SYS1_PLL_CNTL6 0x398
-
-#endif /* __G12A_H */
diff --git a/drivers/clk/meson/gxbb-aoclk.c b/drivers/clk/meson/gxbb-aoclk.c
index 83b034157b35..c7dfb3a06cb5 100644
--- a/drivers/clk/meson/gxbb-aoclk.c
+++ b/drivers/clk/meson/gxbb-aoclk.c
@@ -23,31 +23,20 @@
#define AO_RTC_ALT_CLK_CNTL0 0x94
#define AO_RTC_ALT_CLK_CNTL1 0x98
-#define GXBB_AO_GATE(_name, _bit) \
-static struct clk_regmap _name##_ao = { \
- .data = &(struct clk_regmap_gate_data) { \
- .offset = AO_RTI_GEN_CNTL_REG0, \
- .bit_idx = (_bit), \
- }, \
- .hw.init = &(struct clk_init_data) { \
- .name = #_name "_ao", \
- .ops = &clk_regmap_gate_ops, \
- .parent_data = &(const struct clk_parent_data) { \
- .fw_name = "mpeg-clk", \
- }, \
- .num_parents = 1, \
- .flags = CLK_IGNORE_UNUSED, \
- }, \
-}
+static const struct clk_parent_data gxbb_ao_pclk_parents = { .fw_name = "mpeg-clk" };
-GXBB_AO_GATE(remote, 0);
-GXBB_AO_GATE(i2c_master, 1);
-GXBB_AO_GATE(i2c_slave, 2);
-GXBB_AO_GATE(uart1, 3);
-GXBB_AO_GATE(uart2, 5);
-GXBB_AO_GATE(ir_blaster, 6);
+#define GXBB_AO_PCLK(_name, _bit, _flags) \
+ MESON_PCLK(gxbb_ao_##_name, AO_RTI_GEN_CNTL_REG0, _bit, \
+ &gxbb_ao_pclk_parents, _flags)
-static struct clk_regmap ao_cts_oscin = {
+static GXBB_AO_PCLK(remote, 0, CLK_IGNORE_UNUSED);
+static GXBB_AO_PCLK(i2c_master, 1, CLK_IGNORE_UNUSED);
+static GXBB_AO_PCLK(i2c_slave, 2, CLK_IGNORE_UNUSED);
+static GXBB_AO_PCLK(uart1, 3, CLK_IGNORE_UNUSED);
+static GXBB_AO_PCLK(uart2, 5, CLK_IGNORE_UNUSED);
+static GXBB_AO_PCLK(ir_blaster, 6, CLK_IGNORE_UNUSED);
+
+static struct clk_regmap gxbb_ao_cts_oscin = {
.data = &(struct clk_regmap_gate_data){
.offset = AO_RTI_PWR_CNTL_REG0,
.bit_idx = 6,
@@ -62,7 +51,7 @@ static struct clk_regmap ao_cts_oscin = {
},
};
-static struct clk_regmap ao_32k_pre = {
+static struct clk_regmap gxbb_ao_32k_pre = {
.data = &(struct clk_regmap_gate_data){
.offset = AO_RTC_ALT_CLK_CNTL0,
.bit_idx = 31,
@@ -70,7 +59,7 @@ static struct clk_regmap ao_32k_pre = {
.hw.init = &(struct clk_init_data){
.name = "ao_32k_pre",
.ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) { &ao_cts_oscin.hw },
+ .parent_hws = (const struct clk_hw *[]) { &gxbb_ao_cts_oscin.hw },
.num_parents = 1,
},
};
@@ -85,7 +74,7 @@ static const struct meson_clk_dualdiv_param gxbb_32k_div_table[] = {
}, {}
};
-static struct clk_regmap ao_32k_div = {
+static struct clk_regmap gxbb_ao_32k_div = {
.data = &(struct meson_clk_dualdiv_data){
.n1 = {
.reg_off = AO_RTC_ALT_CLK_CNTL0,
@@ -117,12 +106,12 @@ static struct clk_regmap ao_32k_div = {
.hw.init = &(struct clk_init_data){
.name = "ao_32k_div",
.ops = &meson_clk_dualdiv_ops,
- .parent_hws = (const struct clk_hw *[]) { &ao_32k_pre.hw },
+ .parent_hws = (const struct clk_hw *[]) { &gxbb_ao_32k_pre.hw },
.num_parents = 1,
},
};
-static struct clk_regmap ao_32k_sel = {
+static struct clk_regmap gxbb_ao_32k_sel = {
.data = &(struct clk_regmap_mux_data) {
.offset = AO_RTC_ALT_CLK_CNTL1,
.mask = 0x1,
@@ -133,15 +122,15 @@ static struct clk_regmap ao_32k_sel = {
.name = "ao_32k_sel",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &ao_32k_div.hw,
- &ao_32k_pre.hw
+ &gxbb_ao_32k_div.hw,
+ &gxbb_ao_32k_pre.hw
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap ao_32k = {
+static struct clk_regmap gxbb_ao_32k = {
.data = &(struct clk_regmap_gate_data){
.offset = AO_RTC_ALT_CLK_CNTL0,
.bit_idx = 30,
@@ -149,13 +138,13 @@ static struct clk_regmap ao_32k = {
.hw.init = &(struct clk_init_data){
.name = "ao_32k",
.ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) { &ao_32k_sel.hw },
+ .parent_hws = (const struct clk_hw *[]) { &gxbb_ao_32k_sel.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap ao_cts_rtc_oscin = {
+static struct clk_regmap gxbb_ao_cts_rtc_oscin = {
.data = &(struct clk_regmap_mux_data) {
.offset = AO_RTI_PWR_CNTL_REG0,
.mask = 0x7,
@@ -170,14 +159,14 @@ static struct clk_regmap ao_cts_rtc_oscin = {
{ .fw_name = "ext-32k-0", },
{ .fw_name = "ext-32k-1", },
{ .fw_name = "ext-32k-2", },
- { .hw = &ao_32k.hw },
+ { .hw = &gxbb_ao_32k.hw },
},
.num_parents = 4,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap ao_clk81 = {
+static struct clk_regmap gxbb_ao_clk81 = {
.data = &(struct clk_regmap_mux_data) {
.offset = AO_RTI_PWR_CNTL_REG0,
.mask = 0x1,
@@ -189,14 +178,14 @@ static struct clk_regmap ao_clk81 = {
.ops = &clk_regmap_mux_ro_ops,
.parent_data = (const struct clk_parent_data []) {
{ .fw_name = "mpeg-clk", },
- { .hw = &ao_cts_rtc_oscin.hw },
+ { .hw = &gxbb_ao_cts_rtc_oscin.hw },
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap ao_cts_cec = {
+static struct clk_regmap gxbb_ao_cts_cec = {
.data = &(struct clk_regmap_mux_data) {
.offset = AO_CRT_CLK_CNTL1,
.mask = 0x1,
@@ -221,14 +210,14 @@ static struct clk_regmap ao_cts_cec = {
*/
.parent_data = (const struct clk_parent_data []) {
{ .name = "fixme", .index = -1, },
- { .hw = &ao_cts_rtc_oscin.hw },
+ { .hw = &gxbb_ao_cts_rtc_oscin.hw },
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
-static const unsigned int gxbb_aoclk_reset[] = {
+static const unsigned int gxbb_ao_reset[] = {
[RESET_AO_REMOTE] = 16,
[RESET_AO_I2C_MASTER] = 18,
[RESET_AO_I2C_SLAVE] = 19,
@@ -237,70 +226,53 @@ static const unsigned int gxbb_aoclk_reset[] = {
[RESET_AO_IR_BLASTER] = 23,
};
-static struct clk_regmap *gxbb_aoclk[] = {
- &remote_ao,
- &i2c_master_ao,
- &i2c_slave_ao,
- &uart1_ao,
- &uart2_ao,
- &ir_blaster_ao,
- &ao_cts_oscin,
- &ao_32k_pre,
- &ao_32k_div,
- &ao_32k_sel,
- &ao_32k,
- &ao_cts_rtc_oscin,
- &ao_clk81,
- &ao_cts_cec,
-};
-
-static struct clk_hw *gxbb_aoclk_hw_clks[] = {
- [CLKID_AO_REMOTE] = &remote_ao.hw,
- [CLKID_AO_I2C_MASTER] = &i2c_master_ao.hw,
- [CLKID_AO_I2C_SLAVE] = &i2c_slave_ao.hw,
- [CLKID_AO_UART1] = &uart1_ao.hw,
- [CLKID_AO_UART2] = &uart2_ao.hw,
- [CLKID_AO_IR_BLASTER] = &ir_blaster_ao.hw,
- [CLKID_AO_CEC_32K] = &ao_cts_cec.hw,
- [CLKID_AO_CTS_OSCIN] = &ao_cts_oscin.hw,
- [CLKID_AO_32K_PRE] = &ao_32k_pre.hw,
- [CLKID_AO_32K_DIV] = &ao_32k_div.hw,
- [CLKID_AO_32K_SEL] = &ao_32k_sel.hw,
- [CLKID_AO_32K] = &ao_32k.hw,
- [CLKID_AO_CTS_RTC_OSCIN] = &ao_cts_rtc_oscin.hw,
- [CLKID_AO_CLK81] = &ao_clk81.hw,
+static struct clk_hw *gxbb_ao_hw_clks[] = {
+ [CLKID_AO_REMOTE] = &gxbb_ao_remote.hw,
+ [CLKID_AO_I2C_MASTER] = &gxbb_ao_i2c_master.hw,
+ [CLKID_AO_I2C_SLAVE] = &gxbb_ao_i2c_slave.hw,
+ [CLKID_AO_UART1] = &gxbb_ao_uart1.hw,
+ [CLKID_AO_UART2] = &gxbb_ao_uart2.hw,
+ [CLKID_AO_IR_BLASTER] = &gxbb_ao_ir_blaster.hw,
+ [CLKID_AO_CEC_32K] = &gxbb_ao_cts_cec.hw,
+ [CLKID_AO_CTS_OSCIN] = &gxbb_ao_cts_oscin.hw,
+ [CLKID_AO_32K_PRE] = &gxbb_ao_32k_pre.hw,
+ [CLKID_AO_32K_DIV] = &gxbb_ao_32k_div.hw,
+ [CLKID_AO_32K_SEL] = &gxbb_ao_32k_sel.hw,
+ [CLKID_AO_32K] = &gxbb_ao_32k.hw,
+ [CLKID_AO_CTS_RTC_OSCIN] = &gxbb_ao_cts_rtc_oscin.hw,
+ [CLKID_AO_CLK81] = &gxbb_ao_clk81.hw,
};
-static const struct meson_aoclk_data gxbb_aoclkc_data = {
+static const struct meson_aoclk_data gxbb_ao_clkc_data = {
.reset_reg = AO_RTI_GEN_CNTL_REG0,
- .num_reset = ARRAY_SIZE(gxbb_aoclk_reset),
- .reset = gxbb_aoclk_reset,
- .num_clks = ARRAY_SIZE(gxbb_aoclk),
- .clks = gxbb_aoclk,
- .hw_clks = {
- .hws = gxbb_aoclk_hw_clks,
- .num = ARRAY_SIZE(gxbb_aoclk_hw_clks),
+ .num_reset = ARRAY_SIZE(gxbb_ao_reset),
+ .reset = gxbb_ao_reset,
+ .clkc_data = {
+ .hw_clks = {
+ .hws = gxbb_ao_hw_clks,
+ .num = ARRAY_SIZE(gxbb_ao_hw_clks),
+ },
},
};
-static const struct of_device_id gxbb_aoclkc_match_table[] = {
+static const struct of_device_id gxbb_ao_clkc_match_table[] = {
{
.compatible = "amlogic,meson-gx-aoclkc",
- .data = &gxbb_aoclkc_data,
+ .data = &gxbb_ao_clkc_data.clkc_data,
},
{ }
};
-MODULE_DEVICE_TABLE(of, gxbb_aoclkc_match_table);
+MODULE_DEVICE_TABLE(of, gxbb_ao_clkc_match_table);
-static struct platform_driver gxbb_aoclkc_driver = {
+static struct platform_driver gxbb_ao_clkc_driver = {
.probe = meson_aoclkc_probe,
.driver = {
.name = "gxbb-aoclkc",
- .of_match_table = gxbb_aoclkc_match_table,
+ .of_match_table = gxbb_ao_clkc_match_table,
},
};
-module_platform_driver(gxbb_aoclkc_driver);
+module_platform_driver(gxbb_ao_clkc_driver);
MODULE_DESCRIPTION("Amlogic GXBB Always-ON Clock Controller driver");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(CLK_MESON);
+MODULE_IMPORT_NS("CLK_MESON");
diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c
index f071faad1ebb..5a229c4ffae1 100644
--- a/drivers/clk/meson/gxbb.c
+++ b/drivers/clk/meson/gxbb.c
@@ -10,80 +10,111 @@
#include <linux/platform_device.h>
#include <linux/module.h>
-#include "gxbb.h"
#include "clk-regmap.h"
#include "clk-pll.h"
#include "clk-mpll.h"
-#include "meson-eeclk.h"
+#include "meson-clkc-utils.h"
#include "vid-pll-div.h"
#include <dt-bindings/clock/gxbb-clkc.h>
-static DEFINE_SPINLOCK(meson_clk_lock);
-
-static const struct pll_params_table gxbb_gp0_pll_params_table[] = {
- PLL_PARAMS(32, 1),
- PLL_PARAMS(33, 1),
- PLL_PARAMS(34, 1),
- PLL_PARAMS(35, 1),
- PLL_PARAMS(36, 1),
- PLL_PARAMS(37, 1),
- PLL_PARAMS(38, 1),
- PLL_PARAMS(39, 1),
- PLL_PARAMS(40, 1),
- PLL_PARAMS(41, 1),
- PLL_PARAMS(42, 1),
- PLL_PARAMS(43, 1),
- PLL_PARAMS(44, 1),
- PLL_PARAMS(45, 1),
- PLL_PARAMS(46, 1),
- PLL_PARAMS(47, 1),
- PLL_PARAMS(48, 1),
- PLL_PARAMS(49, 1),
- PLL_PARAMS(50, 1),
- PLL_PARAMS(51, 1),
- PLL_PARAMS(52, 1),
- PLL_PARAMS(53, 1),
- PLL_PARAMS(54, 1),
- PLL_PARAMS(55, 1),
- PLL_PARAMS(56, 1),
- PLL_PARAMS(57, 1),
- PLL_PARAMS(58, 1),
- PLL_PARAMS(59, 1),
- PLL_PARAMS(60, 1),
- PLL_PARAMS(61, 1),
- PLL_PARAMS(62, 1),
- { /* sentinel */ },
-};
-
-static const struct pll_params_table gxl_gp0_pll_params_table[] = {
- PLL_PARAMS(42, 1),
- PLL_PARAMS(43, 1),
- PLL_PARAMS(44, 1),
- PLL_PARAMS(45, 1),
- PLL_PARAMS(46, 1),
- PLL_PARAMS(47, 1),
- PLL_PARAMS(48, 1),
- PLL_PARAMS(49, 1),
- PLL_PARAMS(50, 1),
- PLL_PARAMS(51, 1),
- PLL_PARAMS(52, 1),
- PLL_PARAMS(53, 1),
- PLL_PARAMS(54, 1),
- PLL_PARAMS(55, 1),
- PLL_PARAMS(56, 1),
- PLL_PARAMS(57, 1),
- PLL_PARAMS(58, 1),
- PLL_PARAMS(59, 1),
- PLL_PARAMS(60, 1),
- PLL_PARAMS(61, 1),
- PLL_PARAMS(62, 1),
- PLL_PARAMS(63, 1),
- PLL_PARAMS(64, 1),
- PLL_PARAMS(65, 1),
- PLL_PARAMS(66, 1),
- { /* sentinel */ },
-};
+#define SCR 0x2c
+#define TIMEOUT_VALUE 0x3c
+
+#define HHI_GP0_PLL_CNTL 0x40
+#define HHI_GP0_PLL_CNTL2 0x44
+#define HHI_GP0_PLL_CNTL3 0x48
+#define HHI_GP0_PLL_CNTL4 0x4c
+#define HHI_GP0_PLL_CNTL5 0x50
+#define HHI_GP0_PLL_CNTL1 0x58
+
+#define HHI_XTAL_DIVN_CNTL 0xbc
+#define HHI_TIMER90K 0xec
+
+#define HHI_MEM_PD_REG0 0x100
+#define HHI_MEM_PD_REG1 0x104
+#define HHI_VPU_MEM_PD_REG1 0x108
+#define HHI_VIID_CLK_DIV 0x128
+#define HHI_VIID_CLK_CNTL 0x12c
+
+#define HHI_GCLK_MPEG0 0x140
+#define HHI_GCLK_MPEG1 0x144
+#define HHI_GCLK_MPEG2 0x148
+#define HHI_GCLK_OTHER 0x150
+#define HHI_GCLK_AO 0x154
+#define HHI_SYS_OSCIN_CNTL 0x158
+#define HHI_SYS_CPU_CLK_CNTL1 0x15c
+#define HHI_SYS_CPU_RESET_CNTL 0x160
+#define HHI_VID_CLK_DIV 0x164
+
+#define HHI_MPEG_CLK_CNTL 0x174
+#define HHI_AUD_CLK_CNTL 0x178
+#define HHI_VID_CLK_CNTL 0x17c
+#define HHI_AUD_CLK_CNTL2 0x190
+#define HHI_VID_CLK_CNTL2 0x194
+#define HHI_SYS_CPU_CLK_CNTL0 0x19c
+#define HHI_VID_PLL_CLK_DIV 0x1a0
+#define HHI_AUD_CLK_CNTL3 0x1a4
+#define HHI_MALI_CLK_CNTL 0x1b0
+#define HHI_VPU_CLK_CNTL 0x1bc
+
+#define HHI_HDMI_CLK_CNTL 0x1cc
+#define HHI_VDEC_CLK_CNTL 0x1e0
+#define HHI_VDEC2_CLK_CNTL 0x1e4
+#define HHI_VDEC3_CLK_CNTL 0x1e8
+#define HHI_VDEC4_CLK_CNTL 0x1ec
+#define HHI_HDCP22_CLK_CNTL 0x1f0
+#define HHI_VAPBCLK_CNTL 0x1f4
+
+#define HHI_VPU_CLKB_CNTL 0x20c
+#define HHI_USB_CLK_CNTL 0x220
+#define HHI_32K_CLK_CNTL 0x224
+#define HHI_GEN_CLK_CNTL 0x228
+
+#define HHI_PCM_CLK_CNTL 0x258
+#define HHI_NAND_CLK_CNTL 0x25c
+#define HHI_SD_EMMC_CLK_CNTL 0x264
+
+#define HHI_MPLL_CNTL 0x280
+#define HHI_MPLL_CNTL2 0x284
+#define HHI_MPLL_CNTL3 0x288
+#define HHI_MPLL_CNTL4 0x28c
+#define HHI_MPLL_CNTL5 0x290
+#define HHI_MPLL_CNTL6 0x294
+#define HHI_MPLL_CNTL7 0x298
+#define HHI_MPLL_CNTL8 0x29c
+#define HHI_MPLL_CNTL9 0x2a0
+#define HHI_MPLL_CNTL10 0x2a4
+
+#define HHI_MPLL3_CNTL0 0x2e0
+#define HHI_MPLL3_CNTL1 0x2e4
+#define HHI_VDAC_CNTL0 0x2f4
+#define HHI_VDAC_CNTL1 0x2f8
+
+#define HHI_SYS_PLL_CNTL 0x300
+#define HHI_SYS_PLL_CNTL2 0x304
+#define HHI_SYS_PLL_CNTL3 0x308
+#define HHI_SYS_PLL_CNTL4 0x30c
+#define HHI_SYS_PLL_CNTL5 0x310
+#define HHI_DPLL_TOP_I 0x318
+#define HHI_DPLL_TOP2_I 0x31c
+#define HHI_HDMI_PLL_CNTL 0x320
+#define HHI_HDMI_PLL_CNTL2 0x324
+#define HHI_HDMI_PLL_CNTL3 0x328
+#define HHI_HDMI_PLL_CNTL4 0x32c
+#define HHI_HDMI_PLL_CNTL5 0x330
+#define HHI_HDMI_PLL_CNTL6 0x334
+#define HHI_HDMI_PLL_CNTL_I 0x338
+#define HHI_HDMI_PLL_CNTL7 0x33c
+
+#define HHI_HDMI_PHY_CNTL0 0x3a0
+#define HHI_HDMI_PHY_CNTL1 0x3a4
+#define HHI_HDMI_PHY_CNTL2 0x3a8
+#define HHI_HDMI_PHY_CNTL3 0x3ac
+
+#define HHI_VID_LOCK_CLK_CNTL 0x3c8
+#define HHI_BT656_CLK_CNTL 0x3d4
+#define HHI_SAR_CLK_CNTL 0x3d8
static struct clk_regmap gxbb_fixed_pll_dco = {
.data = &(struct meson_clk_pll_data){
@@ -428,7 +459,42 @@ static struct clk_regmap gxbb_sys_pll = {
},
};
-static const struct reg_sequence gxbb_gp0_init_regs[] = {
+static const struct pll_params_table gxbb_gp0_pll_params_table[] = {
+ PLL_PARAMS(32, 1),
+ PLL_PARAMS(33, 1),
+ PLL_PARAMS(34, 1),
+ PLL_PARAMS(35, 1),
+ PLL_PARAMS(36, 1),
+ PLL_PARAMS(37, 1),
+ PLL_PARAMS(38, 1),
+ PLL_PARAMS(39, 1),
+ PLL_PARAMS(40, 1),
+ PLL_PARAMS(41, 1),
+ PLL_PARAMS(42, 1),
+ PLL_PARAMS(43, 1),
+ PLL_PARAMS(44, 1),
+ PLL_PARAMS(45, 1),
+ PLL_PARAMS(46, 1),
+ PLL_PARAMS(47, 1),
+ PLL_PARAMS(48, 1),
+ PLL_PARAMS(49, 1),
+ PLL_PARAMS(50, 1),
+ PLL_PARAMS(51, 1),
+ PLL_PARAMS(52, 1),
+ PLL_PARAMS(53, 1),
+ PLL_PARAMS(54, 1),
+ PLL_PARAMS(55, 1),
+ PLL_PARAMS(56, 1),
+ PLL_PARAMS(57, 1),
+ PLL_PARAMS(58, 1),
+ PLL_PARAMS(59, 1),
+ PLL_PARAMS(60, 1),
+ PLL_PARAMS(61, 1),
+ PLL_PARAMS(62, 1),
+ { /* sentinel */ },
+};
+
+static const struct reg_sequence gxbb_gp0_pll_init_regs[] = {
{ .reg = HHI_GP0_PLL_CNTL2, .def = 0x69c80000 },
{ .reg = HHI_GP0_PLL_CNTL3, .def = 0x0a5590c4 },
{ .reg = HHI_GP0_PLL_CNTL4, .def = 0x0000500d },
@@ -462,8 +528,8 @@ static struct clk_regmap gxbb_gp0_pll_dco = {
.width = 1,
},
.table = gxbb_gp0_pll_params_table,
- .init_regs = gxbb_gp0_init_regs,
- .init_count = ARRAY_SIZE(gxbb_gp0_init_regs),
+ .init_regs = gxbb_gp0_pll_init_regs,
+ .init_count = ARRAY_SIZE(gxbb_gp0_pll_init_regs),
},
.hw.init = &(struct clk_init_data){
.name = "gp0_pll_dco",
@@ -475,7 +541,36 @@ static struct clk_regmap gxbb_gp0_pll_dco = {
},
};
-static const struct reg_sequence gxl_gp0_init_regs[] = {
+static const struct pll_params_table gxl_gp0_pll_params_table[] = {
+ PLL_PARAMS(42, 1),
+ PLL_PARAMS(43, 1),
+ PLL_PARAMS(44, 1),
+ PLL_PARAMS(45, 1),
+ PLL_PARAMS(46, 1),
+ PLL_PARAMS(47, 1),
+ PLL_PARAMS(48, 1),
+ PLL_PARAMS(49, 1),
+ PLL_PARAMS(50, 1),
+ PLL_PARAMS(51, 1),
+ PLL_PARAMS(52, 1),
+ PLL_PARAMS(53, 1),
+ PLL_PARAMS(54, 1),
+ PLL_PARAMS(55, 1),
+ PLL_PARAMS(56, 1),
+ PLL_PARAMS(57, 1),
+ PLL_PARAMS(58, 1),
+ PLL_PARAMS(59, 1),
+ PLL_PARAMS(60, 1),
+ PLL_PARAMS(61, 1),
+ PLL_PARAMS(62, 1),
+ PLL_PARAMS(63, 1),
+ PLL_PARAMS(64, 1),
+ PLL_PARAMS(65, 1),
+ PLL_PARAMS(66, 1),
+ { /* sentinel */ },
+};
+
+static const struct reg_sequence gxl_gp0_pll_init_regs[] = {
{ .reg = HHI_GP0_PLL_CNTL1, .def = 0xc084b000 },
{ .reg = HHI_GP0_PLL_CNTL2, .def = 0xb75020be },
{ .reg = HHI_GP0_PLL_CNTL3, .def = 0x0a59a288 },
@@ -516,8 +611,8 @@ static struct clk_regmap gxl_gp0_pll_dco = {
.width = 1,
},
.table = gxl_gp0_pll_params_table,
- .init_regs = gxl_gp0_init_regs,
- .init_count = ARRAY_SIZE(gxl_gp0_init_regs),
+ .init_regs = gxl_gp0_pll_init_regs,
+ .init_count = ARRAY_SIZE(gxl_gp0_pll_init_regs),
},
.hw.init = &(struct clk_init_data){
.name = "gp0_pll_dco",
@@ -731,7 +826,6 @@ static struct clk_regmap gxbb_mpll0_div = {
.shift = 16,
.width = 9,
},
- .lock = &meson_clk_lock,
},
.hw.init = &(struct clk_init_data){
.name = "mpll0_div",
@@ -760,7 +854,6 @@ static struct clk_regmap gxl_mpll0_div = {
.shift = 16,
.width = 9,
},
- .lock = &meson_clk_lock,
},
.hw.init = &(struct clk_init_data){
.name = "mpll0_div",
@@ -812,7 +905,6 @@ static struct clk_regmap gxbb_mpll1_div = {
.shift = 16,
.width = 9,
},
- .lock = &meson_clk_lock,
},
.hw.init = &(struct clk_init_data){
.name = "mpll1_div",
@@ -855,7 +947,6 @@ static struct clk_regmap gxbb_mpll2_div = {
.shift = 16,
.width = 9,
},
- .lock = &meson_clk_lock,
},
.hw.init = &(struct clk_init_data){
.name = "mpll2_div",
@@ -881,8 +972,9 @@ static struct clk_regmap gxbb_mpll2 = {
},
};
-static u32 mux_table_clk81[] = { 0, 2, 3, 4, 5, 6, 7 };
-static const struct clk_parent_data clk81_parent_data[] = {
+/* clk81 is often referred as "mpeg_clk" */
+static u32 clk81_parents_val_table[] = { 0, 2, 3, 4, 5, 6, 7 };
+static const struct clk_parent_data clk81_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &gxbb_fclk_div7.hw },
{ .hw = &gxbb_mpll1.hw },
@@ -892,37 +984,37 @@ static const struct clk_parent_data clk81_parent_data[] = {
{ .hw = &gxbb_fclk_div5.hw },
};
-static struct clk_regmap gxbb_mpeg_clk_sel = {
+static struct clk_regmap gxbb_clk81_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_MPEG_CLK_CNTL,
.mask = 0x7,
.shift = 12,
- .table = mux_table_clk81,
+ .table = clk81_parents_val_table,
},
.hw.init = &(struct clk_init_data){
- .name = "mpeg_clk_sel",
+ .name = "clk81_sel",
.ops = &clk_regmap_mux_ro_ops,
/*
* bits 14:12 selects from 8 possible parents:
* xtal, 1'b0 (wtf), fclk_div7, mpll_clkout1, mpll_clkout2,
* fclk_div4, fclk_div3, fclk_div5
*/
- .parent_data = clk81_parent_data,
- .num_parents = ARRAY_SIZE(clk81_parent_data),
+ .parent_data = clk81_parents,
+ .num_parents = ARRAY_SIZE(clk81_parents),
},
};
-static struct clk_regmap gxbb_mpeg_clk_div = {
+static struct clk_regmap gxbb_clk81_div = {
.data = &(struct clk_regmap_div_data){
.offset = HHI_MPEG_CLK_CNTL,
.shift = 0,
.width = 7,
},
.hw.init = &(struct clk_init_data){
- .name = "mpeg_clk_div",
+ .name = "clk81_div",
.ops = &clk_regmap_divider_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &gxbb_mpeg_clk_sel.hw
+ &gxbb_clk81_sel.hw
},
.num_parents = 1,
},
@@ -938,7 +1030,7 @@ static struct clk_regmap gxbb_clk81 = {
.name = "clk81",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &gxbb_mpeg_clk_div.hw
+ &gxbb_clk81_div.hw
},
.num_parents = 1,
.flags = CLK_IS_CRITICAL,
@@ -1003,7 +1095,7 @@ static struct clk_regmap gxbb_sar_adc_clk = {
* switches to the "inactive" one when CLK_SET_RATE_GATE is set.
*/
-static const struct clk_parent_data gxbb_mali_0_1_parent_data[] = {
+static const struct clk_parent_data gxbb_mali_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &gxbb_gp0_pll.hw },
{ .hw = &gxbb_mpll2.hw },
@@ -1023,8 +1115,8 @@ static struct clk_regmap gxbb_mali_0_sel = {
.hw.init = &(struct clk_init_data){
.name = "mali_0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = gxbb_mali_0_1_parent_data,
- .num_parents = 8,
+ .parent_data = gxbb_mali_parents,
+ .num_parents = ARRAY_SIZE(gxbb_mali_parents),
/*
* Don't request the parent to change the rate because
* all GPU frequencies can be derived from the fclk_*
@@ -1077,8 +1169,8 @@ static struct clk_regmap gxbb_mali_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "mali_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = gxbb_mali_0_1_parent_data,
- .num_parents = 8,
+ .parent_data = gxbb_mali_parents,
+ .num_parents = ARRAY_SIZE(gxbb_mali_parents),
/*
* Don't request the parent to change the rate because
* all GPU frequencies can be derived from the fclk_*
@@ -1122,11 +1214,6 @@ static struct clk_regmap gxbb_mali_1 = {
},
};
-static const struct clk_hw *gxbb_mali_parent_hws[] = {
- &gxbb_mali_0.hw,
- &gxbb_mali_1.hw,
-};
-
static struct clk_regmap gxbb_mali = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_MALI_CLK_CNTL,
@@ -1136,29 +1223,35 @@ static struct clk_regmap gxbb_mali = {
.hw.init = &(struct clk_init_data){
.name = "mali",
.ops = &clk_regmap_mux_ops,
- .parent_hws = gxbb_mali_parent_hws,
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_mali_0.hw,
+ &gxbb_mali_1.hw,
+ },
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
+static u32 gxbb_cts_mclk_parents_val_table[] = { 1, 2, 3 };
+static const struct clk_hw *gxbb_cts_mclk_parents[] = {
+ &gxbb_mpll0.hw,
+ &gxbb_mpll1.hw,
+ &gxbb_mpll2.hw,
+};
+
static struct clk_regmap gxbb_cts_amclk_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_AUD_CLK_CNTL,
.mask = 0x3,
.shift = 9,
- .table = (u32[]){ 1, 2, 3 },
+ .table = gxbb_cts_mclk_parents_val_table,
.flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
.name = "cts_amclk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &gxbb_mpll0.hw,
- &gxbb_mpll1.hw,
- &gxbb_mpll2.hw,
- },
- .num_parents = 3,
+ .parent_hws = gxbb_cts_mclk_parents,
+ .num_parents = ARRAY_SIZE(gxbb_cts_mclk_parents),
},
};
@@ -1201,18 +1294,14 @@ static struct clk_regmap gxbb_cts_mclk_i958_sel = {
.offset = HHI_AUD_CLK_CNTL2,
.mask = 0x3,
.shift = 25,
- .table = (u32[]){ 1, 2, 3 },
+ .table = gxbb_cts_mclk_parents_val_table,
.flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data) {
.name = "cts_mclk_i958_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &gxbb_mpll0.hw,
- &gxbb_mpll1.hw,
- &gxbb_mpll2.hw,
- },
- .num_parents = 3,
+ .parent_hws = gxbb_cts_mclk_parents,
+ .num_parents = ARRAY_SIZE(gxbb_cts_mclk_parents),
},
};
@@ -1272,14 +1361,13 @@ static struct clk_regmap gxbb_cts_i958 = {
},
};
-static const struct clk_parent_data gxbb_32k_clk_parent_data[] = {
+/*
+ * This table skips a clock named 'cts_slow_oscin' in the documentation
+ * This clock does not exist yet in this controller or the AO one
+ */
+static u32 gxbb_32k_clk_parents_val_table[] = { 0, 2, 3 };
+static const struct clk_parent_data gxbb_32k_clk_parents[] = {
{ .fw_name = "xtal", },
- /*
- * FIXME: This clock is provided by the ao clock controller but the
- * clock is not yet part of the binding of this controller, so string
- * name must be use to set this parent.
- */
- { .name = "cts_slow_oscin", .index = -1 },
{ .hw = &gxbb_fclk_div3.hw },
{ .hw = &gxbb_fclk_div5.hw },
};
@@ -1289,11 +1377,12 @@ static struct clk_regmap gxbb_32k_clk_sel = {
.offset = HHI_32K_CLK_CNTL,
.mask = 0x3,
.shift = 16,
- },
+ .table = gxbb_32k_clk_parents_val_table,
+ },
.hw.init = &(struct clk_init_data){
.name = "32k_clk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = gxbb_32k_clk_parent_data,
+ .parent_data = gxbb_32k_clk_parents,
.num_parents = 4,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1312,7 +1401,7 @@ static struct clk_regmap gxbb_32k_clk_div = {
&gxbb_32k_clk_sel.hw
},
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT | CLK_DIVIDER_ROUND_CLOSEST,
+ .flags = CLK_SET_RATE_PARENT,
},
};
@@ -1332,7 +1421,7 @@ static struct clk_regmap gxbb_32k_clk = {
},
};
-static const struct clk_parent_data gxbb_sd_emmc_clk0_parent_data[] = {
+static const struct clk_parent_data gxbb_sd_emmc_clk0_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &gxbb_fclk_div2.hw },
{ .hw = &gxbb_fclk_div3.hw },
@@ -1341,7 +1430,7 @@ static const struct clk_parent_data gxbb_sd_emmc_clk0_parent_data[] = {
/*
* Following these parent clocks, we should also have had mpll2, mpll3
* and gp0_pll but these clocks are too precious to be used here. All
- * the necessary rates for MMC and NAND operation can be acheived using
+ * the necessary rates for MMC and NAND operation can be achieved using
* xtal or fclk_div clocks
*/
};
@@ -1356,8 +1445,8 @@ static struct clk_regmap gxbb_sd_emmc_a_clk0_sel = {
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_a_clk0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = gxbb_sd_emmc_clk0_parent_data,
- .num_parents = ARRAY_SIZE(gxbb_sd_emmc_clk0_parent_data),
+ .parent_data = gxbb_sd_emmc_clk0_parents,
+ .num_parents = ARRAY_SIZE(gxbb_sd_emmc_clk0_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1406,8 +1495,8 @@ static struct clk_regmap gxbb_sd_emmc_b_clk0_sel = {
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_b_clk0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = gxbb_sd_emmc_clk0_parent_data,
- .num_parents = ARRAY_SIZE(gxbb_sd_emmc_clk0_parent_data),
+ .parent_data = gxbb_sd_emmc_clk0_parents,
+ .num_parents = ARRAY_SIZE(gxbb_sd_emmc_clk0_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1456,8 +1545,8 @@ static struct clk_regmap gxbb_sd_emmc_c_clk0_sel = {
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_c_clk0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = gxbb_sd_emmc_clk0_parent_data,
- .num_parents = ARRAY_SIZE(gxbb_sd_emmc_clk0_parent_data),
+ .parent_data = gxbb_sd_emmc_clk0_parents,
+ .num_parents = ARRAY_SIZE(gxbb_sd_emmc_clk0_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1498,7 +1587,7 @@ static struct clk_regmap gxbb_sd_emmc_c_clk0 = {
/* VPU Clock */
-static const struct clk_hw *gxbb_vpu_parent_hws[] = {
+static const struct clk_hw *gxbb_vpu_parents[] = {
&gxbb_fclk_div4.hw,
&gxbb_fclk_div3.hw,
&gxbb_fclk_div5.hw,
@@ -1518,8 +1607,8 @@ static struct clk_regmap gxbb_vpu_0_sel = {
* bits 9:10 selects from 4 possible parents:
* fclk_div4, fclk_div3, fclk_div5, fclk_div7,
*/
- .parent_hws = gxbb_vpu_parent_hws,
- .num_parents = ARRAY_SIZE(gxbb_vpu_parent_hws),
+ .parent_hws = gxbb_vpu_parents,
+ .num_parents = ARRAY_SIZE(gxbb_vpu_parents),
.flags = CLK_SET_RATE_NO_REPARENT,
},
};
@@ -1566,8 +1655,8 @@ static struct clk_regmap gxbb_vpu_1_sel = {
* bits 25:26 selects from 4 possible parents:
* fclk_div4, fclk_div3, fclk_div5, fclk_div7,
*/
- .parent_hws = gxbb_vpu_parent_hws,
- .num_parents = ARRAY_SIZE(gxbb_vpu_parent_hws),
+ .parent_hws = gxbb_vpu_parents,
+ .num_parents = ARRAY_SIZE(gxbb_vpu_parents),
.flags = CLK_SET_RATE_NO_REPARENT,
},
};
@@ -1625,7 +1714,7 @@ static struct clk_regmap gxbb_vpu = {
/* VAPB Clock */
-static const struct clk_hw *gxbb_vapb_parent_hws[] = {
+static const struct clk_hw *gxbb_vapb_parents[] = {
&gxbb_fclk_div4.hw,
&gxbb_fclk_div3.hw,
&gxbb_fclk_div5.hw,
@@ -1645,8 +1734,8 @@ static struct clk_regmap gxbb_vapb_0_sel = {
* bits 9:10 selects from 4 possible parents:
* fclk_div4, fclk_div3, fclk_div5, fclk_div7,
*/
- .parent_hws = gxbb_vapb_parent_hws,
- .num_parents = ARRAY_SIZE(gxbb_vapb_parent_hws),
+ .parent_hws = gxbb_vapb_parents,
+ .num_parents = ARRAY_SIZE(gxbb_vapb_parents),
.flags = CLK_SET_RATE_NO_REPARENT,
},
};
@@ -1697,8 +1786,8 @@ static struct clk_regmap gxbb_vapb_1_sel = {
* bits 25:26 selects from 4 possible parents:
* fclk_div4, fclk_div3, fclk_div5, fclk_div7,
*/
- .parent_hws = gxbb_vapb_parent_hws,
- .num_parents = ARRAY_SIZE(gxbb_vapb_parent_hws),
+ .parent_hws = gxbb_vapb_parents,
+ .num_parents = ARRAY_SIZE(gxbb_vapb_parents),
.flags = CLK_SET_RATE_NO_REPARENT,
},
};
@@ -1806,7 +1895,7 @@ static struct clk_regmap gxbb_vid_pll_div = {
},
};
-static const struct clk_parent_data gxbb_vid_pll_parent_data[] = {
+static const struct clk_parent_data gxbb_vid_pll_parents[] = {
{ .hw = &gxbb_vid_pll_div.hw },
/*
* Note:
@@ -1831,8 +1920,8 @@ static struct clk_regmap gxbb_vid_pll_sel = {
* bit 18 selects from 2 possible parents:
* vid_pll_div or hdmi_pll
*/
- .parent_data = gxbb_vid_pll_parent_data,
- .num_parents = ARRAY_SIZE(gxbb_vid_pll_parent_data),
+ .parent_data = gxbb_vid_pll_parents,
+ .num_parents = ARRAY_SIZE(gxbb_vid_pll_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -1853,7 +1942,7 @@ static struct clk_regmap gxbb_vid_pll = {
},
};
-static const struct clk_hw *gxbb_vclk_parent_hws[] = {
+static const struct clk_hw *gxbb_vclk_parents[] = {
&gxbb_vid_pll.hw,
&gxbb_fclk_div4.hw,
&gxbb_fclk_div3.hw,
@@ -1877,8 +1966,8 @@ static struct clk_regmap gxbb_vclk_sel = {
* vid_pll, fclk_div4, fclk_div3, fclk_div5,
* vid_pll, fclk_div7, mp1
*/
- .parent_hws = gxbb_vclk_parent_hws,
- .num_parents = ARRAY_SIZE(gxbb_vclk_parent_hws),
+ .parent_hws = gxbb_vclk_parents,
+ .num_parents = ARRAY_SIZE(gxbb_vclk_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -1897,8 +1986,8 @@ static struct clk_regmap gxbb_vclk2_sel = {
* vid_pll, fclk_div4, fclk_div3, fclk_div5,
* vid_pll, fclk_div7, mp1
*/
- .parent_hws = gxbb_vclk_parent_hws,
- .num_parents = ARRAY_SIZE(gxbb_vclk_parent_hws),
+ .parent_hws = gxbb_vclk_parents,
+ .num_parents = ARRAY_SIZE(gxbb_vclk_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -2237,8 +2326,8 @@ static struct clk_fixed_factor gxbb_vclk2_div12 = {
},
};
-static u32 mux_table_cts_sel[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
-static const struct clk_hw *gxbb_cts_parent_hws[] = {
+static u32 gxbb_cts_parents_val_table[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
+static const struct clk_hw *gxbb_cts_parents[] = {
&gxbb_vclk_div1.hw,
&gxbb_vclk_div2.hw,
&gxbb_vclk_div4.hw,
@@ -2256,13 +2345,13 @@ static struct clk_regmap gxbb_cts_enci_sel = {
.offset = HHI_VID_CLK_DIV,
.mask = 0xf,
.shift = 28,
- .table = mux_table_cts_sel,
+ .table = gxbb_cts_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "cts_enci_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = gxbb_cts_parent_hws,
- .num_parents = ARRAY_SIZE(gxbb_cts_parent_hws),
+ .parent_hws = gxbb_cts_parents,
+ .num_parents = ARRAY_SIZE(gxbb_cts_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -2272,13 +2361,13 @@ static struct clk_regmap gxbb_cts_encp_sel = {
.offset = HHI_VID_CLK_DIV,
.mask = 0xf,
.shift = 20,
- .table = mux_table_cts_sel,
+ .table = gxbb_cts_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "cts_encp_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = gxbb_cts_parent_hws,
- .num_parents = ARRAY_SIZE(gxbb_cts_parent_hws),
+ .parent_hws = gxbb_cts_parents,
+ .num_parents = ARRAY_SIZE(gxbb_cts_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -2288,50 +2377,13 @@ static struct clk_regmap gxbb_cts_vdac_sel = {
.offset = HHI_VIID_CLK_DIV,
.mask = 0xf,
.shift = 28,
- .table = mux_table_cts_sel,
+ .table = gxbb_cts_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "cts_vdac_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = gxbb_cts_parent_hws,
- .num_parents = ARRAY_SIZE(gxbb_cts_parent_hws),
- .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
- },
-};
-
-/* TOFIX: add support for cts_tcon */
-static u32 mux_table_hdmi_tx_sel[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
-static const struct clk_hw *gxbb_cts_hdmi_tx_parent_hws[] = {
- &gxbb_vclk_div1.hw,
- &gxbb_vclk_div2.hw,
- &gxbb_vclk_div4.hw,
- &gxbb_vclk_div6.hw,
- &gxbb_vclk_div12.hw,
- &gxbb_vclk2_div1.hw,
- &gxbb_vclk2_div2.hw,
- &gxbb_vclk2_div4.hw,
- &gxbb_vclk2_div6.hw,
- &gxbb_vclk2_div12.hw,
-};
-
-static struct clk_regmap gxbb_hdmi_tx_sel = {
- .data = &(struct clk_regmap_mux_data){
- .offset = HHI_HDMI_CLK_CNTL,
- .mask = 0xf,
- .shift = 16,
- .table = mux_table_hdmi_tx_sel,
- },
- .hw.init = &(struct clk_init_data){
- .name = "hdmi_tx_sel",
- .ops = &clk_regmap_mux_ops,
- /*
- * bits 31:28 selects from 12 possible parents:
- * vclk_div1, vclk_div2, vclk_div4, vclk_div6, vclk_div12
- * vclk2_div1, vclk2_div2, vclk2_div4, vclk2_div6, vclk2_div12,
- * cts_tcon
- */
- .parent_hws = gxbb_cts_hdmi_tx_parent_hws,
- .num_parents = ARRAY_SIZE(gxbb_cts_hdmi_tx_parent_hws),
+ .parent_hws = gxbb_cts_parents,
+ .num_parents = ARRAY_SIZE(gxbb_cts_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -2384,6 +2436,43 @@ static struct clk_regmap gxbb_cts_vdac = {
},
};
+/* TOFIX: add support for cts_tcon */
+static u32 gxbb_hdmi_tx_parents_val_table[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
+static const struct clk_hw *gxbb_hdmi_tx_parents[] = {
+ &gxbb_vclk_div1.hw,
+ &gxbb_vclk_div2.hw,
+ &gxbb_vclk_div4.hw,
+ &gxbb_vclk_div6.hw,
+ &gxbb_vclk_div12.hw,
+ &gxbb_vclk2_div1.hw,
+ &gxbb_vclk2_div2.hw,
+ &gxbb_vclk2_div4.hw,
+ &gxbb_vclk2_div6.hw,
+ &gxbb_vclk2_div12.hw,
+};
+
+static struct clk_regmap gxbb_hdmi_tx_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_HDMI_CLK_CNTL,
+ .mask = 0xf,
+ .shift = 16,
+ .table = gxbb_hdmi_tx_parents_val_table,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "hdmi_tx_sel",
+ .ops = &clk_regmap_mux_ops,
+ /*
+ * bits 31:28 selects from 12 possible parents:
+ * vclk_div1, vclk_div2, vclk_div4, vclk_div6, vclk_div12
+ * vclk2_div1, vclk2_div2, vclk2_div4, vclk2_div6, vclk2_div12,
+ * cts_tcon
+ */
+ .parent_hws = gxbb_hdmi_tx_parents,
+ .num_parents = ARRAY_SIZE(gxbb_hdmi_tx_parents),
+ .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
+ },
+};
+
static struct clk_regmap gxbb_hdmi_tx = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_VID_CLK_CNTL2,
@@ -2402,7 +2491,7 @@ static struct clk_regmap gxbb_hdmi_tx = {
/* HDMI Clocks */
-static const struct clk_parent_data gxbb_hdmi_parent_data[] = {
+static const struct clk_parent_data gxbb_hdmi_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &gxbb_fclk_div4.hw },
{ .hw = &gxbb_fclk_div3.hw },
@@ -2419,8 +2508,8 @@ static struct clk_regmap gxbb_hdmi_sel = {
.hw.init = &(struct clk_init_data){
.name = "hdmi_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = gxbb_hdmi_parent_data,
- .num_parents = ARRAY_SIZE(gxbb_hdmi_parent_data),
+ .parent_data = gxbb_hdmi_parents,
+ .num_parents = ARRAY_SIZE(gxbb_hdmi_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -2456,7 +2545,7 @@ static struct clk_regmap gxbb_hdmi = {
/* VDEC clocks */
-static const struct clk_hw *gxbb_vdec_parent_hws[] = {
+static const struct clk_hw *gxbb_vdec_parents[] = {
&gxbb_fclk_div4.hw,
&gxbb_fclk_div3.hw,
&gxbb_fclk_div5.hw,
@@ -2473,8 +2562,8 @@ static struct clk_regmap gxbb_vdec_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "vdec_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = gxbb_vdec_parent_hws,
- .num_parents = ARRAY_SIZE(gxbb_vdec_parent_hws),
+ .parent_hws = gxbb_vdec_parents,
+ .num_parents = ARRAY_SIZE(gxbb_vdec_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2523,8 +2612,8 @@ static struct clk_regmap gxbb_vdec_hevc_sel = {
.hw.init = &(struct clk_init_data){
.name = "vdec_hevc_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = gxbb_vdec_parent_hws,
- .num_parents = ARRAY_SIZE(gxbb_vdec_parent_hws),
+ .parent_hws = gxbb_vdec_parents,
+ .num_parents = ARRAY_SIZE(gxbb_vdec_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2563,9 +2652,8 @@ static struct clk_regmap gxbb_vdec_hevc = {
},
};
-static u32 mux_table_gen_clk[] = { 0, 4, 5, 6, 7, 8,
- 9, 10, 11, 13, 14, };
-static const struct clk_parent_data gen_clk_parent_data[] = {
+static u32 gxbb_gen_clk_parents_val_table[] = { 0, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, };
+static const struct clk_parent_data gxbb_gen_clk_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &gxbb_vdec_1.hw },
{ .hw = &gxbb_vdec_hevc.hw },
@@ -2584,7 +2672,7 @@ static struct clk_regmap gxbb_gen_clk_sel = {
.offset = HHI_GEN_CLK_CNTL,
.mask = 0xf,
.shift = 12,
- .table = mux_table_gen_clk,
+ .table = gxbb_gen_clk_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "gen_clk_sel",
@@ -2595,8 +2683,8 @@ static struct clk_regmap gxbb_gen_clk_sel = {
* vid_pll, vid2_pll (hevc), mpll0, mpll1, mpll2, fdiv4,
* fdiv3, fdiv5, [cts_msr_clk], fdiv7, gp0_pll
*/
- .parent_data = gen_clk_parent_data,
- .num_parents = ARRAY_SIZE(gen_clk_parent_data),
+ .parent_data = gxbb_gen_clk_parents,
+ .num_parents = ARRAY_SIZE(gxbb_gen_clk_parents),
},
};
@@ -2633,100 +2721,118 @@ static struct clk_regmap gxbb_gen_clk = {
},
};
-#define MESON_GATE(_name, _reg, _bit) \
- MESON_PCLK(_name, _reg, _bit, &gxbb_clk81.hw)
-
-/* Everything Else (EE) domain gates */
-static MESON_GATE(gxbb_ddr, HHI_GCLK_MPEG0, 0);
-static MESON_GATE(gxbb_dos, HHI_GCLK_MPEG0, 1);
-static MESON_GATE(gxbb_isa, HHI_GCLK_MPEG0, 5);
-static MESON_GATE(gxbb_pl301, HHI_GCLK_MPEG0, 6);
-static MESON_GATE(gxbb_periphs, HHI_GCLK_MPEG0, 7);
-static MESON_GATE(gxbb_spicc, HHI_GCLK_MPEG0, 8);
-static MESON_GATE(gxbb_i2c, HHI_GCLK_MPEG0, 9);
-static MESON_GATE(gxbb_sana, HHI_GCLK_MPEG0, 10);
-static MESON_GATE(gxbb_smart_card, HHI_GCLK_MPEG0, 11);
-static MESON_GATE(gxbb_rng0, HHI_GCLK_MPEG0, 12);
-static MESON_GATE(gxbb_uart0, HHI_GCLK_MPEG0, 13);
-static MESON_GATE(gxbb_sdhc, HHI_GCLK_MPEG0, 14);
-static MESON_GATE(gxbb_stream, HHI_GCLK_MPEG0, 15);
-static MESON_GATE(gxbb_async_fifo, HHI_GCLK_MPEG0, 16);
-static MESON_GATE(gxbb_sdio, HHI_GCLK_MPEG0, 17);
-static MESON_GATE(gxbb_abuf, HHI_GCLK_MPEG0, 18);
-static MESON_GATE(gxbb_hiu_iface, HHI_GCLK_MPEG0, 19);
-static MESON_GATE(gxbb_assist_misc, HHI_GCLK_MPEG0, 23);
-static MESON_GATE(gxbb_emmc_a, HHI_GCLK_MPEG0, 24);
-static MESON_GATE(gxbb_emmc_b, HHI_GCLK_MPEG0, 25);
-static MESON_GATE(gxbb_emmc_c, HHI_GCLK_MPEG0, 26);
-static MESON_GATE(gxl_acodec, HHI_GCLK_MPEG0, 28);
-static MESON_GATE(gxbb_spi, HHI_GCLK_MPEG0, 30);
-
-static MESON_GATE(gxbb_i2s_spdif, HHI_GCLK_MPEG1, 2);
-static MESON_GATE(gxbb_eth, HHI_GCLK_MPEG1, 3);
-static MESON_GATE(gxbb_demux, HHI_GCLK_MPEG1, 4);
-static MESON_GATE(gxbb_blkmv, HHI_GCLK_MPEG1, 14);
-static MESON_GATE(gxbb_aiu, HHI_GCLK_MPEG1, 15);
-static MESON_GATE(gxbb_uart1, HHI_GCLK_MPEG1, 16);
-static MESON_GATE(gxbb_g2d, HHI_GCLK_MPEG1, 20);
-static MESON_GATE(gxbb_usb0, HHI_GCLK_MPEG1, 21);
-static MESON_GATE(gxbb_usb1, HHI_GCLK_MPEG1, 22);
-static MESON_GATE(gxbb_reset, HHI_GCLK_MPEG1, 23);
-static MESON_GATE(gxbb_nand, HHI_GCLK_MPEG1, 24);
-static MESON_GATE(gxbb_dos_parser, HHI_GCLK_MPEG1, 25);
-static MESON_GATE(gxbb_usb, HHI_GCLK_MPEG1, 26);
-static MESON_GATE(gxbb_vdin1, HHI_GCLK_MPEG1, 28);
-static MESON_GATE(gxbb_ahb_arb0, HHI_GCLK_MPEG1, 29);
-static MESON_GATE(gxbb_efuse, HHI_GCLK_MPEG1, 30);
-static MESON_GATE(gxbb_boot_rom, HHI_GCLK_MPEG1, 31);
-
-static MESON_GATE(gxbb_ahb_data_bus, HHI_GCLK_MPEG2, 1);
-static MESON_GATE(gxbb_ahb_ctrl_bus, HHI_GCLK_MPEG2, 2);
-static MESON_GATE(gxbb_hdmi_intr_sync, HHI_GCLK_MPEG2, 3);
-static MESON_GATE(gxbb_hdmi_pclk, HHI_GCLK_MPEG2, 4);
-static MESON_GATE(gxbb_usb1_ddr_bridge, HHI_GCLK_MPEG2, 8);
-static MESON_GATE(gxbb_usb0_ddr_bridge, HHI_GCLK_MPEG2, 9);
-static MESON_GATE(gxbb_mmc_pclk, HHI_GCLK_MPEG2, 11);
-static MESON_GATE(gxbb_dvin, HHI_GCLK_MPEG2, 12);
-static MESON_GATE(gxbb_uart2, HHI_GCLK_MPEG2, 15);
-static MESON_GATE(gxbb_sar_adc, HHI_GCLK_MPEG2, 22);
-static MESON_GATE(gxbb_vpu_intr, HHI_GCLK_MPEG2, 25);
-static MESON_GATE(gxbb_sec_ahb_ahb3_bridge, HHI_GCLK_MPEG2, 26);
-static MESON_GATE(gxbb_clk81_a53, HHI_GCLK_MPEG2, 29);
-
-static MESON_GATE(gxbb_vclk2_venci0, HHI_GCLK_OTHER, 1);
-static MESON_GATE(gxbb_vclk2_venci1, HHI_GCLK_OTHER, 2);
-static MESON_GATE(gxbb_vclk2_vencp0, HHI_GCLK_OTHER, 3);
-static MESON_GATE(gxbb_vclk2_vencp1, HHI_GCLK_OTHER, 4);
-static MESON_GATE(gxbb_gclk_venci_int0, HHI_GCLK_OTHER, 8);
-static MESON_GATE(gxbb_gclk_vencp_int, HHI_GCLK_OTHER, 9);
-static MESON_GATE(gxbb_dac_clk, HHI_GCLK_OTHER, 10);
-static MESON_GATE(gxbb_aoclk_gate, HHI_GCLK_OTHER, 14);
-static MESON_GATE(gxbb_iec958_gate, HHI_GCLK_OTHER, 16);
-static MESON_GATE(gxbb_enc480p, HHI_GCLK_OTHER, 20);
-static MESON_GATE(gxbb_rng1, HHI_GCLK_OTHER, 21);
-static MESON_GATE(gxbb_gclk_venci_int1, HHI_GCLK_OTHER, 22);
-static MESON_GATE(gxbb_vclk2_venclmcc, HHI_GCLK_OTHER, 24);
-static MESON_GATE(gxbb_vclk2_vencl, HHI_GCLK_OTHER, 25);
-static MESON_GATE(gxbb_vclk_other, HHI_GCLK_OTHER, 26);
-static MESON_GATE(gxbb_edp, HHI_GCLK_OTHER, 31);
+static const struct clk_parent_data gxbb_pclk_parents = { .hw = &gxbb_clk81.hw };
+
+#define GXBB_PCLK(_name, _reg, _bit, _flags) \
+ MESON_PCLK(_name, _reg, _bit, &gxbb_pclk_parents, _flags)
+
+/*
+ * Everything Else (EE) domain gates
+ *
+ * NOTE: The gates below are marked with CLK_IGNORE_UNUSED for historic reasons
+ * Users are encouraged to test without it and submit changes to:
+ * - remove the flag if not necessary
+ * - replace the flag with something more adequate, such as CLK_IS_CRITICAL,
+ * if appropriate.
+ * - add a comment explaining why the use of CLK_IGNORE_UNUSED is desirable
+ * for a particular clock.
+ */
+static GXBB_PCLK(gxbb_ddr, HHI_GCLK_MPEG0, 0, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_dos, HHI_GCLK_MPEG0, 1, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_isa, HHI_GCLK_MPEG0, 5, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_pl301, HHI_GCLK_MPEG0, 6, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_periphs, HHI_GCLK_MPEG0, 7, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_spicc, HHI_GCLK_MPEG0, 8, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_i2c, HHI_GCLK_MPEG0, 9, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_sana, HHI_GCLK_MPEG0, 10, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_smart_card, HHI_GCLK_MPEG0, 11, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_rng0, HHI_GCLK_MPEG0, 12, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_uart0, HHI_GCLK_MPEG0, 13, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_sdhc, HHI_GCLK_MPEG0, 14, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_stream, HHI_GCLK_MPEG0, 15, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_async_fifo, HHI_GCLK_MPEG0, 16, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_sdio, HHI_GCLK_MPEG0, 17, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_abuf, HHI_GCLK_MPEG0, 18, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_hiu_iface, HHI_GCLK_MPEG0, 19, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_assist_misc, HHI_GCLK_MPEG0, 23, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_emmc_a, HHI_GCLK_MPEG0, 24, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_emmc_b, HHI_GCLK_MPEG0, 25, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_emmc_c, HHI_GCLK_MPEG0, 26, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxl_acodec, HHI_GCLK_MPEG0, 28, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_spi, HHI_GCLK_MPEG0, 30, CLK_IGNORE_UNUSED);
+
+static GXBB_PCLK(gxbb_i2s_spdif, HHI_GCLK_MPEG1, 2, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_eth, HHI_GCLK_MPEG1, 3, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_demux, HHI_GCLK_MPEG1, 4, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_blkmv, HHI_GCLK_MPEG1, 14, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_aiu, HHI_GCLK_MPEG1, 15, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_uart1, HHI_GCLK_MPEG1, 16, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_g2d, HHI_GCLK_MPEG1, 20, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_usb0, HHI_GCLK_MPEG1, 21, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_usb1, HHI_GCLK_MPEG1, 22, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_reset, HHI_GCLK_MPEG1, 23, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_nand, HHI_GCLK_MPEG1, 24, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_dos_parser, HHI_GCLK_MPEG1, 25, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_usb, HHI_GCLK_MPEG1, 26, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_vdin1, HHI_GCLK_MPEG1, 28, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_ahb_arb0, HHI_GCLK_MPEG1, 29, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_efuse, HHI_GCLK_MPEG1, 30, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_boot_rom, HHI_GCLK_MPEG1, 31, CLK_IGNORE_UNUSED);
+
+static GXBB_PCLK(gxbb_ahb_data_bus, HHI_GCLK_MPEG2, 1, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_ahb_ctrl_bus, HHI_GCLK_MPEG2, 2, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_hdmi_intr_sync, HHI_GCLK_MPEG2, 3, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_hdmi_pclk, HHI_GCLK_MPEG2, 4, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_usb1_ddr_bridge, HHI_GCLK_MPEG2, 8, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_usb0_ddr_bridge, HHI_GCLK_MPEG2, 9, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_mmc_pclk, HHI_GCLK_MPEG2, 11, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_dvin, HHI_GCLK_MPEG2, 12, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_uart2, HHI_GCLK_MPEG2, 15, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_sar_adc, HHI_GCLK_MPEG2, 22, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_vpu_intr, HHI_GCLK_MPEG2, 25, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_sec_ahb_ahb3_bridge, HHI_GCLK_MPEG2, 26, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_clk81_a53, HHI_GCLK_MPEG2, 29, CLK_IGNORE_UNUSED);
+
+static GXBB_PCLK(gxbb_vclk2_venci0, HHI_GCLK_OTHER, 1, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_vclk2_venci1, HHI_GCLK_OTHER, 2, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_vclk2_vencp0, HHI_GCLK_OTHER, 3, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_vclk2_vencp1, HHI_GCLK_OTHER, 4, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_gclk_venci_int0, HHI_GCLK_OTHER, 8, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_gclk_vencp_int, HHI_GCLK_OTHER, 9, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_dac_clk, HHI_GCLK_OTHER, 10, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_aoclk_gate, HHI_GCLK_OTHER, 14, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_iec958_gate, HHI_GCLK_OTHER, 16, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_enc480p, HHI_GCLK_OTHER, 20, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_rng1, HHI_GCLK_OTHER, 21, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_gclk_venci_int1, HHI_GCLK_OTHER, 22, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_vclk2_venclmcc, HHI_GCLK_OTHER, 24, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_vclk2_vencl, HHI_GCLK_OTHER, 25, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_vclk_other, HHI_GCLK_OTHER, 26, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_edp, HHI_GCLK_OTHER, 31, CLK_IGNORE_UNUSED);
/* Always On (AO) domain gates */
-static MESON_GATE(gxbb_ao_media_cpu, HHI_GCLK_AO, 0);
-static MESON_GATE(gxbb_ao_ahb_sram, HHI_GCLK_AO, 1);
-static MESON_GATE(gxbb_ao_ahb_bus, HHI_GCLK_AO, 2);
-static MESON_GATE(gxbb_ao_iface, HHI_GCLK_AO, 3);
-static MESON_GATE(gxbb_ao_i2c, HHI_GCLK_AO, 4);
+static GXBB_PCLK(gxbb_ao_media_cpu, HHI_GCLK_AO, 0, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_ao_ahb_sram, HHI_GCLK_AO, 1, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_ao_ahb_bus, HHI_GCLK_AO, 2, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_ao_iface, HHI_GCLK_AO, 3, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_ao_i2c, HHI_GCLK_AO, 4, CLK_IGNORE_UNUSED);
/* AIU gates */
-static MESON_PCLK(gxbb_aiu_glue, HHI_GCLK_MPEG1, 6, &gxbb_aiu.hw);
-static MESON_PCLK(gxbb_iec958, HHI_GCLK_MPEG1, 7, &gxbb_aiu_glue.hw);
-static MESON_PCLK(gxbb_i2s_out, HHI_GCLK_MPEG1, 8, &gxbb_aiu_glue.hw);
-static MESON_PCLK(gxbb_amclk, HHI_GCLK_MPEG1, 9, &gxbb_aiu_glue.hw);
-static MESON_PCLK(gxbb_aififo2, HHI_GCLK_MPEG1, 10, &gxbb_aiu_glue.hw);
-static MESON_PCLK(gxbb_mixer, HHI_GCLK_MPEG1, 11, &gxbb_aiu_glue.hw);
-static MESON_PCLK(gxbb_mixer_iface, HHI_GCLK_MPEG1, 12, &gxbb_aiu_glue.hw);
-static MESON_PCLK(gxbb_adc, HHI_GCLK_MPEG1, 13, &gxbb_aiu_glue.hw);
+static const struct clk_parent_data gxbb_aiu_glue_parents = { .hw = &gxbb_aiu.hw };
+static MESON_PCLK(gxbb_aiu_glue, HHI_GCLK_MPEG1, 6, &gxbb_aiu_glue_parents, CLK_IGNORE_UNUSED);
+
+static const struct clk_parent_data gxbb_aiu_pclk_parents = { .hw = &gxbb_aiu_glue.hw };
+#define GXBB_AIU_PCLK(_name, _bit, _flags) \
+ MESON_PCLK(_name, HHI_GCLK_MPEG1, _bit, &gxbb_aiu_pclk_parents, _flags)
+
+static GXBB_AIU_PCLK(gxbb_iec958, 7, CLK_IGNORE_UNUSED);
+static GXBB_AIU_PCLK(gxbb_i2s_out, 8, CLK_IGNORE_UNUSED);
+static GXBB_AIU_PCLK(gxbb_amclk, 9, CLK_IGNORE_UNUSED);
+static GXBB_AIU_PCLK(gxbb_aififo2, 10, CLK_IGNORE_UNUSED);
+static GXBB_AIU_PCLK(gxbb_mixer, 11, CLK_IGNORE_UNUSED);
+static GXBB_AIU_PCLK(gxbb_mixer_iface, 12, CLK_IGNORE_UNUSED);
+static GXBB_AIU_PCLK(gxbb_adc, 13, CLK_IGNORE_UNUSED);
/* Array of all clocks provided by this provider */
@@ -2740,8 +2846,8 @@ static struct clk_hw *gxbb_hw_clks[] = {
[CLKID_FCLK_DIV5] = &gxbb_fclk_div5.hw,
[CLKID_FCLK_DIV7] = &gxbb_fclk_div7.hw,
[CLKID_GP0_PLL] = &gxbb_gp0_pll.hw,
- [CLKID_MPEG_SEL] = &gxbb_mpeg_clk_sel.hw,
- [CLKID_MPEG_DIV] = &gxbb_mpeg_clk_div.hw,
+ [CLKID_MPEG_SEL] = &gxbb_clk81_sel.hw,
+ [CLKID_MPEG_DIV] = &gxbb_clk81_div.hw,
[CLKID_CLK81] = &gxbb_clk81.hw,
[CLKID_MPLL0] = &gxbb_mpll0.hw,
[CLKID_MPLL1] = &gxbb_mpll1.hw,
@@ -2948,8 +3054,8 @@ static struct clk_hw *gxl_hw_clks[] = {
[CLKID_FCLK_DIV5] = &gxbb_fclk_div5.hw,
[CLKID_FCLK_DIV7] = &gxbb_fclk_div7.hw,
[CLKID_GP0_PLL] = &gxbb_gp0_pll.hw,
- [CLKID_MPEG_SEL] = &gxbb_mpeg_clk_sel.hw,
- [CLKID_MPEG_DIV] = &gxbb_mpeg_clk_div.hw,
+ [CLKID_MPEG_SEL] = &gxbb_clk81_sel.hw,
+ [CLKID_MPEG_DIV] = &gxbb_clk81_div.hw,
[CLKID_CLK81] = &gxbb_clk81.hw,
[CLKID_MPLL0] = &gxbb_mpll0.hw,
[CLKID_MPLL1] = &gxbb_mpll1.hw,
@@ -3146,429 +3252,36 @@ static struct clk_hw *gxl_hw_clks[] = {
[CLKID_ACODEC] = &gxl_acodec.hw,
};
-static struct clk_regmap *const gxbb_clk_regmaps[] = {
- &gxbb_clk81,
- &gxbb_ddr,
- &gxbb_dos,
- &gxbb_isa,
- &gxbb_pl301,
- &gxbb_periphs,
- &gxbb_spicc,
- &gxbb_i2c,
- &gxbb_sar_adc,
- &gxbb_smart_card,
- &gxbb_rng0,
- &gxbb_uart0,
- &gxbb_sdhc,
- &gxbb_stream,
- &gxbb_async_fifo,
- &gxbb_sdio,
- &gxbb_abuf,
- &gxbb_hiu_iface,
- &gxbb_assist_misc,
- &gxbb_spi,
- &gxbb_i2s_spdif,
- &gxbb_eth,
- &gxbb_demux,
- &gxbb_aiu_glue,
- &gxbb_iec958,
- &gxbb_i2s_out,
- &gxbb_amclk,
- &gxbb_aififo2,
- &gxbb_mixer,
- &gxbb_mixer_iface,
- &gxbb_adc,
- &gxbb_blkmv,
- &gxbb_aiu,
- &gxbb_uart1,
- &gxbb_g2d,
- &gxbb_usb0,
- &gxbb_usb1,
- &gxbb_reset,
- &gxbb_nand,
- &gxbb_dos_parser,
- &gxbb_usb,
- &gxbb_vdin1,
- &gxbb_ahb_arb0,
- &gxbb_efuse,
- &gxbb_boot_rom,
- &gxbb_ahb_data_bus,
- &gxbb_ahb_ctrl_bus,
- &gxbb_hdmi_intr_sync,
- &gxbb_hdmi_pclk,
- &gxbb_usb1_ddr_bridge,
- &gxbb_usb0_ddr_bridge,
- &gxbb_mmc_pclk,
- &gxbb_dvin,
- &gxbb_uart2,
- &gxbb_sana,
- &gxbb_vpu_intr,
- &gxbb_sec_ahb_ahb3_bridge,
- &gxbb_clk81_a53,
- &gxbb_vclk2_venci0,
- &gxbb_vclk2_venci1,
- &gxbb_vclk2_vencp0,
- &gxbb_vclk2_vencp1,
- &gxbb_gclk_venci_int0,
- &gxbb_gclk_vencp_int,
- &gxbb_dac_clk,
- &gxbb_aoclk_gate,
- &gxbb_iec958_gate,
- &gxbb_enc480p,
- &gxbb_rng1,
- &gxbb_gclk_venci_int1,
- &gxbb_vclk2_venclmcc,
- &gxbb_vclk2_vencl,
- &gxbb_vclk_other,
- &gxbb_edp,
- &gxbb_ao_media_cpu,
- &gxbb_ao_ahb_sram,
- &gxbb_ao_ahb_bus,
- &gxbb_ao_iface,
- &gxbb_ao_i2c,
- &gxbb_emmc_a,
- &gxbb_emmc_b,
- &gxbb_emmc_c,
- &gxbb_sar_adc_clk,
- &gxbb_mali_0,
- &gxbb_mali_1,
- &gxbb_cts_amclk,
- &gxbb_cts_mclk_i958,
- &gxbb_32k_clk,
- &gxbb_sd_emmc_a_clk0,
- &gxbb_sd_emmc_b_clk0,
- &gxbb_sd_emmc_c_clk0,
- &gxbb_vpu_0,
- &gxbb_vpu_1,
- &gxbb_vapb_0,
- &gxbb_vapb_1,
- &gxbb_vapb,
- &gxbb_mpeg_clk_div,
- &gxbb_sar_adc_clk_div,
- &gxbb_mali_0_div,
- &gxbb_mali_1_div,
- &gxbb_cts_mclk_i958_div,
- &gxbb_32k_clk_div,
- &gxbb_sd_emmc_a_clk0_div,
- &gxbb_sd_emmc_b_clk0_div,
- &gxbb_sd_emmc_c_clk0_div,
- &gxbb_vpu_0_div,
- &gxbb_vpu_1_div,
- &gxbb_vapb_0_div,
- &gxbb_vapb_1_div,
- &gxbb_mpeg_clk_sel,
- &gxbb_sar_adc_clk_sel,
- &gxbb_mali_0_sel,
- &gxbb_mali_1_sel,
- &gxbb_mali,
- &gxbb_cts_amclk_sel,
- &gxbb_cts_mclk_i958_sel,
- &gxbb_cts_i958,
- &gxbb_32k_clk_sel,
- &gxbb_sd_emmc_a_clk0_sel,
- &gxbb_sd_emmc_b_clk0_sel,
- &gxbb_sd_emmc_c_clk0_sel,
- &gxbb_vpu_0_sel,
- &gxbb_vpu_1_sel,
- &gxbb_vpu,
- &gxbb_vapb_0_sel,
- &gxbb_vapb_1_sel,
- &gxbb_vapb_sel,
- &gxbb_mpll0,
- &gxbb_mpll1,
- &gxbb_mpll2,
- &gxbb_mpll0_div,
- &gxbb_mpll1_div,
- &gxbb_mpll2_div,
- &gxbb_cts_amclk_div,
- &gxbb_fixed_pll,
- &gxbb_sys_pll,
- &gxbb_mpll_prediv,
- &gxbb_fclk_div2,
- &gxbb_fclk_div3,
- &gxbb_fclk_div4,
- &gxbb_fclk_div5,
- &gxbb_fclk_div7,
- &gxbb_vdec_1_sel,
- &gxbb_vdec_1_div,
- &gxbb_vdec_1,
- &gxbb_vdec_hevc_sel,
- &gxbb_vdec_hevc_div,
- &gxbb_vdec_hevc,
- &gxbb_gen_clk_sel,
- &gxbb_gen_clk_div,
- &gxbb_gen_clk,
- &gxbb_fixed_pll_dco,
- &gxbb_sys_pll_dco,
- &gxbb_gp0_pll,
- &gxbb_vid_pll,
- &gxbb_vid_pll_sel,
- &gxbb_vid_pll_div,
- &gxbb_vclk,
- &gxbb_vclk_sel,
- &gxbb_vclk_div,
- &gxbb_vclk_input,
- &gxbb_vclk_div1,
- &gxbb_vclk_div2_en,
- &gxbb_vclk_div4_en,
- &gxbb_vclk_div6_en,
- &gxbb_vclk_div12_en,
- &gxbb_vclk2,
- &gxbb_vclk2_sel,
- &gxbb_vclk2_div,
- &gxbb_vclk2_input,
- &gxbb_vclk2_div1,
- &gxbb_vclk2_div2_en,
- &gxbb_vclk2_div4_en,
- &gxbb_vclk2_div6_en,
- &gxbb_vclk2_div12_en,
- &gxbb_cts_enci,
- &gxbb_cts_enci_sel,
- &gxbb_cts_encp,
- &gxbb_cts_encp_sel,
- &gxbb_cts_vdac,
- &gxbb_cts_vdac_sel,
- &gxbb_hdmi_tx,
- &gxbb_hdmi_tx_sel,
- &gxbb_hdmi_sel,
- &gxbb_hdmi_div,
- &gxbb_hdmi,
- &gxbb_gp0_pll_dco,
- &gxbb_hdmi_pll,
- &gxbb_hdmi_pll_od,
- &gxbb_hdmi_pll_od2,
- &gxbb_hdmi_pll_dco,
-};
-
-static struct clk_regmap *const gxl_clk_regmaps[] = {
- &gxbb_clk81,
- &gxbb_ddr,
- &gxbb_dos,
- &gxbb_isa,
- &gxbb_pl301,
- &gxbb_periphs,
- &gxbb_spicc,
- &gxbb_i2c,
- &gxbb_sar_adc,
- &gxbb_smart_card,
- &gxbb_rng0,
- &gxbb_uart0,
- &gxbb_sdhc,
- &gxbb_stream,
- &gxbb_async_fifo,
- &gxbb_sdio,
- &gxbb_abuf,
- &gxbb_hiu_iface,
- &gxbb_assist_misc,
- &gxbb_spi,
- &gxbb_i2s_spdif,
- &gxbb_eth,
- &gxbb_demux,
- &gxbb_aiu_glue,
- &gxbb_iec958,
- &gxbb_i2s_out,
- &gxbb_amclk,
- &gxbb_aififo2,
- &gxbb_mixer,
- &gxbb_mixer_iface,
- &gxbb_adc,
- &gxbb_blkmv,
- &gxbb_aiu,
- &gxbb_uart1,
- &gxbb_g2d,
- &gxbb_usb0,
- &gxbb_usb1,
- &gxbb_reset,
- &gxbb_nand,
- &gxbb_dos_parser,
- &gxbb_usb,
- &gxbb_vdin1,
- &gxbb_ahb_arb0,
- &gxbb_efuse,
- &gxbb_boot_rom,
- &gxbb_ahb_data_bus,
- &gxbb_ahb_ctrl_bus,
- &gxbb_hdmi_intr_sync,
- &gxbb_hdmi_pclk,
- &gxbb_usb1_ddr_bridge,
- &gxbb_usb0_ddr_bridge,
- &gxbb_mmc_pclk,
- &gxbb_dvin,
- &gxbb_uart2,
- &gxbb_sana,
- &gxbb_vpu_intr,
- &gxbb_sec_ahb_ahb3_bridge,
- &gxbb_clk81_a53,
- &gxbb_vclk2_venci0,
- &gxbb_vclk2_venci1,
- &gxbb_vclk2_vencp0,
- &gxbb_vclk2_vencp1,
- &gxbb_gclk_venci_int0,
- &gxbb_gclk_vencp_int,
- &gxbb_dac_clk,
- &gxbb_aoclk_gate,
- &gxbb_iec958_gate,
- &gxbb_enc480p,
- &gxbb_rng1,
- &gxbb_gclk_venci_int1,
- &gxbb_vclk2_venclmcc,
- &gxbb_vclk2_vencl,
- &gxbb_vclk_other,
- &gxbb_edp,
- &gxbb_ao_media_cpu,
- &gxbb_ao_ahb_sram,
- &gxbb_ao_ahb_bus,
- &gxbb_ao_iface,
- &gxbb_ao_i2c,
- &gxbb_emmc_a,
- &gxbb_emmc_b,
- &gxbb_emmc_c,
- &gxbb_sar_adc_clk,
- &gxbb_mali_0,
- &gxbb_mali_1,
- &gxbb_cts_amclk,
- &gxbb_cts_mclk_i958,
- &gxbb_32k_clk,
- &gxbb_sd_emmc_a_clk0,
- &gxbb_sd_emmc_b_clk0,
- &gxbb_sd_emmc_c_clk0,
- &gxbb_vpu_0,
- &gxbb_vpu_1,
- &gxbb_vapb_0,
- &gxbb_vapb_1,
- &gxbb_vapb,
- &gxbb_mpeg_clk_div,
- &gxbb_sar_adc_clk_div,
- &gxbb_mali_0_div,
- &gxbb_mali_1_div,
- &gxbb_cts_mclk_i958_div,
- &gxbb_32k_clk_div,
- &gxbb_sd_emmc_a_clk0_div,
- &gxbb_sd_emmc_b_clk0_div,
- &gxbb_sd_emmc_c_clk0_div,
- &gxbb_vpu_0_div,
- &gxbb_vpu_1_div,
- &gxbb_vapb_0_div,
- &gxbb_vapb_1_div,
- &gxbb_mpeg_clk_sel,
- &gxbb_sar_adc_clk_sel,
- &gxbb_mali_0_sel,
- &gxbb_mali_1_sel,
- &gxbb_mali,
- &gxbb_cts_amclk_sel,
- &gxbb_cts_mclk_i958_sel,
- &gxbb_cts_i958,
- &gxbb_32k_clk_sel,
- &gxbb_sd_emmc_a_clk0_sel,
- &gxbb_sd_emmc_b_clk0_sel,
- &gxbb_sd_emmc_c_clk0_sel,
- &gxbb_vpu_0_sel,
- &gxbb_vpu_1_sel,
- &gxbb_vpu,
- &gxbb_vapb_0_sel,
- &gxbb_vapb_1_sel,
- &gxbb_vapb_sel,
- &gxbb_mpll0,
- &gxbb_mpll1,
- &gxbb_mpll2,
- &gxl_mpll0_div,
- &gxbb_mpll1_div,
- &gxbb_mpll2_div,
- &gxbb_cts_amclk_div,
- &gxbb_fixed_pll,
- &gxbb_sys_pll,
- &gxbb_mpll_prediv,
- &gxbb_fclk_div2,
- &gxbb_fclk_div3,
- &gxbb_fclk_div4,
- &gxbb_fclk_div5,
- &gxbb_fclk_div7,
- &gxbb_vdec_1_sel,
- &gxbb_vdec_1_div,
- &gxbb_vdec_1,
- &gxbb_vdec_hevc_sel,
- &gxbb_vdec_hevc_div,
- &gxbb_vdec_hevc,
- &gxbb_gen_clk_sel,
- &gxbb_gen_clk_div,
- &gxbb_gen_clk,
- &gxbb_fixed_pll_dco,
- &gxbb_sys_pll_dco,
- &gxbb_gp0_pll,
- &gxbb_vid_pll,
- &gxbb_vid_pll_sel,
- &gxbb_vid_pll_div,
- &gxbb_vclk,
- &gxbb_vclk_sel,
- &gxbb_vclk_div,
- &gxbb_vclk_input,
- &gxbb_vclk_div1,
- &gxbb_vclk_div2_en,
- &gxbb_vclk_div4_en,
- &gxbb_vclk_div6_en,
- &gxbb_vclk_div12_en,
- &gxbb_vclk2,
- &gxbb_vclk2_sel,
- &gxbb_vclk2_div,
- &gxbb_vclk2_input,
- &gxbb_vclk2_div1,
- &gxbb_vclk2_div2_en,
- &gxbb_vclk2_div4_en,
- &gxbb_vclk2_div6_en,
- &gxbb_vclk2_div12_en,
- &gxbb_cts_enci,
- &gxbb_cts_enci_sel,
- &gxbb_cts_encp,
- &gxbb_cts_encp_sel,
- &gxbb_cts_vdac,
- &gxbb_cts_vdac_sel,
- &gxbb_hdmi_tx,
- &gxbb_hdmi_tx_sel,
- &gxbb_hdmi_sel,
- &gxbb_hdmi_div,
- &gxbb_hdmi,
- &gxl_gp0_pll_dco,
- &gxl_hdmi_pll,
- &gxl_hdmi_pll_od,
- &gxl_hdmi_pll_od2,
- &gxl_hdmi_pll_dco,
- &gxl_acodec,
-};
-
-static const struct meson_eeclkc_data gxbb_clkc_data = {
- .regmap_clks = gxbb_clk_regmaps,
- .regmap_clk_num = ARRAY_SIZE(gxbb_clk_regmaps),
+static const struct meson_clkc_data gxbb_clkc_data = {
.hw_clks = {
.hws = gxbb_hw_clks,
.num = ARRAY_SIZE(gxbb_hw_clks),
},
};
-static const struct meson_eeclkc_data gxl_clkc_data = {
- .regmap_clks = gxl_clk_regmaps,
- .regmap_clk_num = ARRAY_SIZE(gxl_clk_regmaps),
+static const struct meson_clkc_data gxl_clkc_data = {
.hw_clks = {
.hws = gxl_hw_clks,
.num = ARRAY_SIZE(gxl_hw_clks),
},
};
-static const struct of_device_id clkc_match_table[] = {
+static const struct of_device_id gxbb_clkc_match_table[] = {
{ .compatible = "amlogic,gxbb-clkc", .data = &gxbb_clkc_data },
{ .compatible = "amlogic,gxl-clkc", .data = &gxl_clkc_data },
{},
};
-MODULE_DEVICE_TABLE(of, clkc_match_table);
+MODULE_DEVICE_TABLE(of, gxbb_clkc_match_table);
-static struct platform_driver gxbb_driver = {
- .probe = meson_eeclkc_probe,
+static struct platform_driver gxbb_clkc_driver = {
+ .probe = meson_clkc_syscon_probe,
.driver = {
.name = "gxbb-clkc",
- .of_match_table = clkc_match_table,
+ .of_match_table = gxbb_clkc_match_table,
},
};
-module_platform_driver(gxbb_driver);
+module_platform_driver(gxbb_clkc_driver);
MODULE_DESCRIPTION("Amlogic GXBB Main Clock Controller driver");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(CLK_MESON);
+MODULE_IMPORT_NS("CLK_MESON");
diff --git a/drivers/clk/meson/gxbb.h b/drivers/clk/meson/gxbb.h
deleted file mode 100644
index ba5f39a8d746..000000000000
--- a/drivers/clk/meson/gxbb.h
+++ /dev/null
@@ -1,115 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
-/*
- * Copyright (c) 2016 AmLogic, Inc.
- * Author: Michael Turquette <mturquette@baylibre.com>
- */
-
-#ifndef __GXBB_H
-#define __GXBB_H
-
-/*
- * Clock controller register offsets
- *
- * Register offsets from the data sheet are listed in comment blocks below.
- * Those offsets must be multiplied by 4 before adding them to the base address
- * to get the right value
- */
-#define SCR 0x2C /* 0x0b offset in data sheet */
-#define TIMEOUT_VALUE 0x3c /* 0x0f offset in data sheet */
-
-#define HHI_GP0_PLL_CNTL 0x40 /* 0x10 offset in data sheet */
-#define HHI_GP0_PLL_CNTL2 0x44 /* 0x11 offset in data sheet */
-#define HHI_GP0_PLL_CNTL3 0x48 /* 0x12 offset in data sheet */
-#define HHI_GP0_PLL_CNTL4 0x4c /* 0x13 offset in data sheet */
-#define HHI_GP0_PLL_CNTL5 0x50 /* 0x14 offset in data sheet */
-#define HHI_GP0_PLL_CNTL1 0x58 /* 0x16 offset in data sheet */
-
-#define HHI_XTAL_DIVN_CNTL 0xbc /* 0x2f offset in data sheet */
-#define HHI_TIMER90K 0xec /* 0x3b offset in data sheet */
-
-#define HHI_MEM_PD_REG0 0x100 /* 0x40 offset in data sheet */
-#define HHI_MEM_PD_REG1 0x104 /* 0x41 offset in data sheet */
-#define HHI_VPU_MEM_PD_REG1 0x108 /* 0x42 offset in data sheet */
-#define HHI_VIID_CLK_DIV 0x128 /* 0x4a offset in data sheet */
-#define HHI_VIID_CLK_CNTL 0x12c /* 0x4b offset in data sheet */
-
-#define HHI_GCLK_MPEG0 0x140 /* 0x50 offset in data sheet */
-#define HHI_GCLK_MPEG1 0x144 /* 0x51 offset in data sheet */
-#define HHI_GCLK_MPEG2 0x148 /* 0x52 offset in data sheet */
-#define HHI_GCLK_OTHER 0x150 /* 0x54 offset in data sheet */
-#define HHI_GCLK_AO 0x154 /* 0x55 offset in data sheet */
-#define HHI_SYS_OSCIN_CNTL 0x158 /* 0x56 offset in data sheet */
-#define HHI_SYS_CPU_CLK_CNTL1 0x15c /* 0x57 offset in data sheet */
-#define HHI_SYS_CPU_RESET_CNTL 0x160 /* 0x58 offset in data sheet */
-#define HHI_VID_CLK_DIV 0x164 /* 0x59 offset in data sheet */
-
-#define HHI_MPEG_CLK_CNTL 0x174 /* 0x5d offset in data sheet */
-#define HHI_AUD_CLK_CNTL 0x178 /* 0x5e offset in data sheet */
-#define HHI_VID_CLK_CNTL 0x17c /* 0x5f offset in data sheet */
-#define HHI_AUD_CLK_CNTL2 0x190 /* 0x64 offset in data sheet */
-#define HHI_VID_CLK_CNTL2 0x194 /* 0x65 offset in data sheet */
-#define HHI_SYS_CPU_CLK_CNTL0 0x19c /* 0x67 offset in data sheet */
-#define HHI_VID_PLL_CLK_DIV 0x1a0 /* 0x68 offset in data sheet */
-#define HHI_AUD_CLK_CNTL3 0x1a4 /* 0x69 offset in data sheet */
-#define HHI_MALI_CLK_CNTL 0x1b0 /* 0x6c offset in data sheet */
-#define HHI_VPU_CLK_CNTL 0x1bC /* 0x6f offset in data sheet */
-
-#define HHI_HDMI_CLK_CNTL 0x1CC /* 0x73 offset in data sheet */
-#define HHI_VDEC_CLK_CNTL 0x1E0 /* 0x78 offset in data sheet */
-#define HHI_VDEC2_CLK_CNTL 0x1E4 /* 0x79 offset in data sheet */
-#define HHI_VDEC3_CLK_CNTL 0x1E8 /* 0x7a offset in data sheet */
-#define HHI_VDEC4_CLK_CNTL 0x1EC /* 0x7b offset in data sheet */
-#define HHI_HDCP22_CLK_CNTL 0x1F0 /* 0x7c offset in data sheet */
-#define HHI_VAPBCLK_CNTL 0x1F4 /* 0x7d offset in data sheet */
-
-#define HHI_VPU_CLKB_CNTL 0x20C /* 0x83 offset in data sheet */
-#define HHI_USB_CLK_CNTL 0x220 /* 0x88 offset in data sheet */
-#define HHI_32K_CLK_CNTL 0x224 /* 0x89 offset in data sheet */
-#define HHI_GEN_CLK_CNTL 0x228 /* 0x8a offset in data sheet */
-
-#define HHI_PCM_CLK_CNTL 0x258 /* 0x96 offset in data sheet */
-#define HHI_NAND_CLK_CNTL 0x25C /* 0x97 offset in data sheet */
-#define HHI_SD_EMMC_CLK_CNTL 0x264 /* 0x99 offset in data sheet */
-
-#define HHI_MPLL_CNTL 0x280 /* 0xa0 offset in data sheet */
-#define HHI_MPLL_CNTL2 0x284 /* 0xa1 offset in data sheet */
-#define HHI_MPLL_CNTL3 0x288 /* 0xa2 offset in data sheet */
-#define HHI_MPLL_CNTL4 0x28C /* 0xa3 offset in data sheet */
-#define HHI_MPLL_CNTL5 0x290 /* 0xa4 offset in data sheet */
-#define HHI_MPLL_CNTL6 0x294 /* 0xa5 offset in data sheet */
-#define HHI_MPLL_CNTL7 0x298 /* MP0, 0xa6 offset in data sheet */
-#define HHI_MPLL_CNTL8 0x29C /* MP1, 0xa7 offset in data sheet */
-#define HHI_MPLL_CNTL9 0x2A0 /* MP2, 0xa8 offset in data sheet */
-#define HHI_MPLL_CNTL10 0x2A4 /* MP2, 0xa9 offset in data sheet */
-
-#define HHI_MPLL3_CNTL0 0x2E0 /* 0xb8 offset in data sheet */
-#define HHI_MPLL3_CNTL1 0x2E4 /* 0xb9 offset in data sheet */
-#define HHI_VDAC_CNTL0 0x2F4 /* 0xbd offset in data sheet */
-#define HHI_VDAC_CNTL1 0x2F8 /* 0xbe offset in data sheet */
-
-#define HHI_SYS_PLL_CNTL 0x300 /* 0xc0 offset in data sheet */
-#define HHI_SYS_PLL_CNTL2 0x304 /* 0xc1 offset in data sheet */
-#define HHI_SYS_PLL_CNTL3 0x308 /* 0xc2 offset in data sheet */
-#define HHI_SYS_PLL_CNTL4 0x30c /* 0xc3 offset in data sheet */
-#define HHI_SYS_PLL_CNTL5 0x310 /* 0xc4 offset in data sheet */
-#define HHI_DPLL_TOP_I 0x318 /* 0xc6 offset in data sheet */
-#define HHI_DPLL_TOP2_I 0x31C /* 0xc7 offset in data sheet */
-#define HHI_HDMI_PLL_CNTL 0x320 /* 0xc8 offset in data sheet */
-#define HHI_HDMI_PLL_CNTL2 0x324 /* 0xc9 offset in data sheet */
-#define HHI_HDMI_PLL_CNTL3 0x328 /* 0xca offset in data sheet */
-#define HHI_HDMI_PLL_CNTL4 0x32C /* 0xcb offset in data sheet */
-#define HHI_HDMI_PLL_CNTL5 0x330 /* 0xcc offset in data sheet */
-#define HHI_HDMI_PLL_CNTL6 0x334 /* 0xcd offset in data sheet */
-#define HHI_HDMI_PLL_CNTL_I 0x338 /* 0xce offset in data sheet */
-#define HHI_HDMI_PLL_CNTL7 0x33C /* 0xcf offset in data sheet */
-
-#define HHI_HDMI_PHY_CNTL0 0x3A0 /* 0xe8 offset in data sheet */
-#define HHI_HDMI_PHY_CNTL1 0x3A4 /* 0xe9 offset in data sheet */
-#define HHI_HDMI_PHY_CNTL2 0x3A8 /* 0xea offset in data sheet */
-#define HHI_HDMI_PHY_CNTL3 0x3AC /* 0xeb offset in data sheet */
-
-#define HHI_VID_LOCK_CLK_CNTL 0x3C8 /* 0xf2 offset in data sheet */
-#define HHI_BT656_CLK_CNTL 0x3D4 /* 0xf5 offset in data sheet */
-#define HHI_SAR_CLK_CNTL 0x3D8 /* 0xf6 offset in data sheet */
-
-#endif /* __GXBB_H */
diff --git a/drivers/clk/meson/meson-aoclk.c b/drivers/clk/meson/meson-aoclk.c
index 053940ee8940..8f6bdea18119 100644
--- a/drivers/clk/meson/meson-aoclk.c
+++ b/drivers/clk/meson/meson-aoclk.c
@@ -18,6 +18,7 @@
#include <linux/slab.h>
#include "meson-aoclk.h"
+#include "clk-regmap.h"
static int meson_aoclk_do_reset(struct reset_controller_dev *rcdev,
unsigned long id)
@@ -36,15 +37,23 @@ static const struct reset_control_ops meson_aoclk_reset_ops = {
int meson_aoclkc_probe(struct platform_device *pdev)
{
struct meson_aoclk_reset_controller *rstc;
- struct meson_aoclk_data *data;
+ const struct meson_clkc_data *clkc_data;
+ const struct meson_aoclk_data *data;
struct device *dev = &pdev->dev;
struct device_node *np;
struct regmap *regmap;
- int ret, clkid;
+ int ret;
- data = (struct meson_aoclk_data *) of_device_get_match_data(dev);
- if (!data)
- return -ENODEV;
+ clkc_data = of_device_get_match_data(dev);
+ if (!clkc_data)
+ return -EINVAL;
+
+ ret = meson_clkc_syscon_probe(pdev);
+ if (ret)
+ return ret;
+
+ data = container_of(clkc_data, struct meson_aoclk_data,
+ clkc_data);
rstc = devm_kzalloc(dev, sizeof(*rstc), GFP_KERNEL);
if (!rstc)
@@ -70,26 +79,10 @@ int meson_aoclkc_probe(struct platform_device *pdev)
return ret;
}
- /* Populate regmap */
- for (clkid = 0; clkid < data->num_clks; clkid++)
- data->clks[clkid]->map = regmap;
-
- /* Register all clks */
- for (clkid = 0; clkid < data->hw_clks.num; clkid++) {
- if (!data->hw_clks.hws[clkid])
- continue;
-
- ret = devm_clk_hw_register(dev, data->hw_clks.hws[clkid]);
- if (ret) {
- dev_err(dev, "Clock registration failed\n");
- return ret;
- }
- }
-
- return devm_of_clk_add_hw_provider(dev, meson_clk_hw_get, (void *)&data->hw_clks);
+ return 0;
}
-EXPORT_SYMBOL_NS_GPL(meson_aoclkc_probe, CLK_MESON);
+EXPORT_SYMBOL_NS_GPL(meson_aoclkc_probe, "CLK_MESON");
MODULE_DESCRIPTION("Amlogic Always-ON Clock Controller helpers");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(CLK_MESON);
+MODULE_IMPORT_NS("CLK_MESON");
diff --git a/drivers/clk/meson/meson-aoclk.h b/drivers/clk/meson/meson-aoclk.h
index 308be3e4814a..2c83e73d3a77 100644
--- a/drivers/clk/meson/meson-aoclk.h
+++ b/drivers/clk/meson/meson-aoclk.h
@@ -20,12 +20,10 @@
#include "meson-clkc-utils.h"
struct meson_aoclk_data {
+ const struct meson_clkc_data clkc_data;
const unsigned int reset_reg;
const int num_reset;
const unsigned int *reset;
- const int num_clks;
- struct clk_regmap **clks;
- struct meson_clk_hw_data hw_clks;
};
struct meson_aoclk_reset_controller {
diff --git a/drivers/clk/meson/meson-clkc-utils.c b/drivers/clk/meson/meson-clkc-utils.c
index a8cd2c21fab7..870f50548e26 100644
--- a/drivers/clk/meson/meson-clkc-utils.c
+++ b/drivers/clk/meson/meson-clkc-utils.c
@@ -3,9 +3,13 @@
* Copyright (c) 2023 Neil Armstrong <neil.armstrong@linaro.org>
*/
-#include <linux/of_device.h>
#include <linux/clk-provider.h>
+#include <linux/mfd/syscon.h>
#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
#include "meson-clkc-utils.h"
struct clk_hw *meson_clk_hw_get(struct of_phandle_args *clkspec, void *clk_hw_data)
@@ -20,8 +24,88 @@ struct clk_hw *meson_clk_hw_get(struct of_phandle_args *clkspec, void *clk_hw_da
return data->hws[idx];
}
-EXPORT_SYMBOL_NS_GPL(meson_clk_hw_get, CLK_MESON);
+EXPORT_SYMBOL_NS_GPL(meson_clk_hw_get, "CLK_MESON");
+
+static int meson_clkc_init(struct device *dev, struct regmap *map)
+{
+ const struct meson_clkc_data *data;
+ struct clk_hw *hw;
+ int ret, i;
+
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return -EINVAL;
+
+ if (data->init_count)
+ regmap_multi_reg_write(map, data->init_regs, data->init_count);
+
+ for (i = 0; i < data->hw_clks.num; i++) {
+ hw = data->hw_clks.hws[i];
+
+ /* array might be sparse */
+ if (!hw)
+ continue;
+
+ ret = devm_clk_hw_register(dev, hw);
+ if (ret) {
+ dev_err(dev, "registering %s clock failed\n",
+ hw->init->name);
+ return ret;
+ }
+ }
+
+ return devm_of_clk_add_hw_provider(dev, meson_clk_hw_get, (void *)&data->hw_clks);
+}
+
+int meson_clkc_syscon_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np;
+ struct regmap *map;
+
+ np = of_get_parent(dev->of_node);
+ map = syscon_node_to_regmap(np);
+ of_node_put(np);
+ if (IS_ERR(map)) {
+ dev_err(dev, "failed to get parent syscon regmap\n");
+ return PTR_ERR(map);
+ }
+
+ return meson_clkc_init(dev, map);
+}
+EXPORT_SYMBOL_NS_GPL(meson_clkc_syscon_probe, "CLK_MESON");
+
+int meson_clkc_mmio_probe(struct platform_device *pdev)
+{
+ const struct meson_clkc_data *data;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ void __iomem *base;
+ struct regmap *map;
+ struct regmap_config regmap_cfg = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ };
+
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return -EINVAL;
+
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ regmap_cfg.max_register = resource_size(res) - regmap_cfg.reg_stride;
+
+ map = devm_regmap_init_mmio(dev, base, &regmap_cfg);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+
+ return meson_clkc_init(dev, map);
+}
+EXPORT_SYMBOL_NS_GPL(meson_clkc_mmio_probe, "CLK_MESON");
MODULE_DESCRIPTION("Amlogic Clock Controller Utilities");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(CLK_MESON);
+MODULE_IMPORT_NS("CLK_MESON");
diff --git a/drivers/clk/meson/meson-clkc-utils.h b/drivers/clk/meson/meson-clkc-utils.h
index fe6f40728949..ddadf14b4923 100644
--- a/drivers/clk/meson/meson-clkc-utils.h
+++ b/drivers/clk/meson/meson-clkc-utils.h
@@ -9,6 +9,8 @@
#include <linux/of_device.h>
#include <linux/clk-provider.h>
+struct platform_device;
+
struct meson_clk_hw_data {
struct clk_hw **hws;
unsigned int num;
@@ -16,4 +18,91 @@ struct meson_clk_hw_data {
struct clk_hw *meson_clk_hw_get(struct of_phandle_args *clkspec, void *clk_hw_data);
+struct meson_clkc_data {
+ const struct reg_sequence *init_regs;
+ unsigned int init_count;
+ struct meson_clk_hw_data hw_clks;
+};
+
+int meson_clkc_syscon_probe(struct platform_device *pdev);
+int meson_clkc_mmio_probe(struct platform_device *pdev);
+
+#define __MESON_PCLK(_name, _reg, _bit, _ops, _pdata, _flags) \
+struct clk_regmap _name = { \
+ .data = &(struct clk_regmap_gate_data) { \
+ .offset = (_reg), \
+ .bit_idx = (_bit), \
+ }, \
+ .hw.init = &(struct clk_init_data) { \
+ .name = #_name, \
+ .ops = _ops, \
+ .parent_data = (_pdata), \
+ .num_parents = 1, \
+ .flags = (_flags), \
+ }, \
+}
+
+#define MESON_PCLK(_name, _reg, _bit, _pdata, _flags) \
+ __MESON_PCLK(_name, _reg, _bit, &clk_regmap_gate_ops, _pdata, _flags)
+
+#define MESON_PCLK_RO(_name, _reg, _bit, _pdata, _flags) \
+ __MESON_PCLK(_name, _reg, _bit, &clk_regmap_gate_ro_ops, _pdata, _flags)
+
+/* Helpers for the usual sel/div/gate composite clocks */
+#define MESON_COMP_SEL(_prefix, _name, _reg, _shift, _mask, _pdata, \
+ _table, _dflags, _iflags) \
+struct clk_regmap _prefix##_name##_sel = { \
+ .data = &(struct clk_regmap_mux_data) { \
+ .offset = (_reg), \
+ .mask = (_mask), \
+ .shift = (_shift), \
+ .flags = (_dflags), \
+ .table = (_table), \
+ }, \
+ .hw.init = &(struct clk_init_data){ \
+ .name = #_name "_sel", \
+ .ops = &clk_regmap_mux_ops, \
+ .parent_data = _pdata, \
+ .num_parents = ARRAY_SIZE(_pdata), \
+ .flags = (_iflags), \
+ }, \
+}
+
+#define MESON_COMP_DIV(_prefix, _name, _reg, _shift, _width, \
+ _dflags, _iflags) \
+struct clk_regmap _prefix##_name##_div = { \
+ .data = &(struct clk_regmap_div_data) { \
+ .offset = (_reg), \
+ .shift = (_shift), \
+ .width = (_width), \
+ .flags = (_dflags), \
+ }, \
+ .hw.init = &(struct clk_init_data) { \
+ .name = #_name "_div", \
+ .ops = &clk_regmap_divider_ops, \
+ .parent_hws = (const struct clk_hw *[]) { \
+ &_prefix##_name##_sel.hw \
+ }, \
+ .num_parents = 1, \
+ .flags = (_iflags), \
+ }, \
+}
+
+#define MESON_COMP_GATE(_prefix, _name, _reg, _bit, _iflags) \
+struct clk_regmap _prefix##_name = { \
+ .data = &(struct clk_regmap_gate_data) { \
+ .offset = (_reg), \
+ .bit_idx = (_bit), \
+ }, \
+ .hw.init = &(struct clk_init_data) { \
+ .name = #_name, \
+ .ops = &clk_regmap_gate_ops, \
+ .parent_hws = (const struct clk_hw *[]) { \
+ &_prefix##_name##_div.hw \
+ }, \
+ .num_parents = 1, \
+ .flags = (_iflags), \
+ }, \
+}
+
#endif
diff --git a/drivers/clk/meson/meson-eeclk.c b/drivers/clk/meson/meson-eeclk.c
deleted file mode 100644
index 66f79e384fe5..000000000000
--- a/drivers/clk/meson/meson-eeclk.c
+++ /dev/null
@@ -1,64 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (c) 2019 BayLibre, SAS.
- * Author: Jerome Brunet <jbrunet@baylibre.com>
- */
-
-#include <linux/clk-provider.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <linux/mfd/syscon.h>
-#include <linux/regmap.h>
-#include <linux/module.h>
-
-#include "clk-regmap.h"
-#include "meson-eeclk.h"
-
-int meson_eeclkc_probe(struct platform_device *pdev)
-{
- const struct meson_eeclkc_data *data;
- struct device *dev = &pdev->dev;
- struct device_node *np;
- struct regmap *map;
- int ret, i;
-
- data = of_device_get_match_data(dev);
- if (!data)
- return -EINVAL;
-
- /* Get the hhi system controller node */
- np = of_get_parent(dev->of_node);
- map = syscon_node_to_regmap(np);
- of_node_put(np);
- if (IS_ERR(map)) {
- dev_err(dev,
- "failed to get HHI regmap\n");
- return PTR_ERR(map);
- }
-
- if (data->init_count)
- regmap_multi_reg_write(map, data->init_regs, data->init_count);
-
- /* Populate regmap for the regmap backed clocks */
- for (i = 0; i < data->regmap_clk_num; i++)
- data->regmap_clks[i]->map = map;
-
- for (i = 0; i < data->hw_clks.num; i++) {
- /* array might be sparse */
- if (!data->hw_clks.hws[i])
- continue;
-
- ret = devm_clk_hw_register(dev, data->hw_clks.hws[i]);
- if (ret) {
- dev_err(dev, "Clock registration failed\n");
- return ret;
- }
- }
-
- return devm_of_clk_add_hw_provider(dev, meson_clk_hw_get, (void *)&data->hw_clks);
-}
-EXPORT_SYMBOL_NS_GPL(meson_eeclkc_probe, CLK_MESON);
-
-MODULE_DESCRIPTION("Amlogic Main Clock Controller Helpers");
-MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(CLK_MESON);
diff --git a/drivers/clk/meson/meson-eeclk.h b/drivers/clk/meson/meson-eeclk.h
deleted file mode 100644
index 37a48b75c660..000000000000
--- a/drivers/clk/meson/meson-eeclk.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (c) 2019 BayLibre, SAS.
- * Author: Jerome Brunet <jbrunet@baylibre.com>
- */
-
-#ifndef __MESON_CLKC_H
-#define __MESON_CLKC_H
-
-#include <linux/clk-provider.h>
-#include "clk-regmap.h"
-#include "meson-clkc-utils.h"
-
-struct platform_device;
-
-struct meson_eeclkc_data {
- struct clk_regmap *const *regmap_clks;
- unsigned int regmap_clk_num;
- const struct reg_sequence *init_regs;
- unsigned int init_count;
- struct meson_clk_hw_data hw_clks;
-};
-
-int meson_eeclkc_probe(struct platform_device *pdev);
-
-#endif /* __MESON_CLKC_H */
diff --git a/drivers/clk/meson/meson8-ddr.c b/drivers/clk/meson/meson8-ddr.c
index 4b73ea244b63..0f93774f7371 100644
--- a/drivers/clk/meson/meson8-ddr.c
+++ b/drivers/clk/meson/meson8-ddr.c
@@ -12,6 +12,7 @@
#include "clk-regmap.h"
#include "clk-pll.h"
+#include "meson-clkc-utils.h"
#define AM_DDR_PLL_CNTL 0x00
#define AM_DDR_PLL_CNTL1 0x04
@@ -77,69 +78,31 @@ static struct clk_regmap meson8_ddr_pll = {
},
};
-static struct clk_hw_onecell_data meson8_ddr_clk_hw_onecell_data = {
- .hws = {
- [DDR_CLKID_DDR_PLL_DCO] = &meson8_ddr_pll_dco.hw,
- [DDR_CLKID_DDR_PLL] = &meson8_ddr_pll.hw,
- },
- .num = 2,
-};
-
-static struct clk_regmap *const meson8_ddr_clk_regmaps[] = {
- &meson8_ddr_pll_dco,
- &meson8_ddr_pll,
+static struct clk_hw *meson8_ddr_hw_clks[] = {
+ [DDR_CLKID_DDR_PLL_DCO] = &meson8_ddr_pll_dco.hw,
+ [DDR_CLKID_DDR_PLL] = &meson8_ddr_pll.hw,
};
-static const struct regmap_config meson8_ddr_clkc_regmap_config = {
- .reg_bits = 8,
- .val_bits = 32,
- .reg_stride = 4,
- .max_register = DDR_CLK_STS,
+static const struct meson_clkc_data meson8_ddr_clkc_data = {
+ .hw_clks = {
+ .hws = meson8_ddr_hw_clks,
+ .num = ARRAY_SIZE(meson8_ddr_hw_clks),
+ },
};
-static int meson8_ddr_clkc_probe(struct platform_device *pdev)
-{
- struct regmap *regmap;
- void __iomem *base;
- struct clk_hw *hw;
- int ret, i;
-
- base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(base))
- return PTR_ERR(base);
-
- regmap = devm_regmap_init_mmio(&pdev->dev, base,
- &meson8_ddr_clkc_regmap_config);
- if (IS_ERR(regmap))
- return PTR_ERR(regmap);
-
- /* Populate regmap */
- for (i = 0; i < ARRAY_SIZE(meson8_ddr_clk_regmaps); i++)
- meson8_ddr_clk_regmaps[i]->map = regmap;
-
- /* Register all clks */
- for (i = 0; i < meson8_ddr_clk_hw_onecell_data.num; i++) {
- hw = meson8_ddr_clk_hw_onecell_data.hws[i];
-
- ret = devm_clk_hw_register(&pdev->dev, hw);
- if (ret) {
- dev_err(&pdev->dev, "Clock registration failed\n");
- return ret;
- }
- }
-
- return devm_of_clk_add_hw_provider(&pdev->dev, of_clk_hw_onecell_get,
- &meson8_ddr_clk_hw_onecell_data);
-}
-
static const struct of_device_id meson8_ddr_clkc_match_table[] = {
- { .compatible = "amlogic,meson8-ddr-clkc" },
- { .compatible = "amlogic,meson8b-ddr-clkc" },
+ {
+ .compatible = "amlogic,meson8-ddr-clkc",
+ .data = &meson8_ddr_clkc_data,
+ }, {
+ .compatible = "amlogic,meson8b-ddr-clkc",
+ .data = &meson8_ddr_clkc_data,
+ },
{ /* sentinel */ }
};
static struct platform_driver meson8_ddr_clkc_driver = {
- .probe = meson8_ddr_clkc_probe,
+ .probe = meson_clkc_mmio_probe,
.driver = {
.name = "meson8-ddr-clkc",
.of_match_table = meson8_ddr_clkc_match_table,
diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c
index b7417ac262d3..95d0b9cbd904 100644
--- a/drivers/clk/meson/meson8b.c
+++ b/drivers/clk/meson/meson8b.c
@@ -16,7 +16,6 @@
#include <linux/slab.h>
#include <linux/regmap.h>
-#include "meson8b.h"
#include "clk-regmap.h"
#include "meson-clkc-utils.h"
#include "clk-pll.h"
@@ -25,7 +24,71 @@
#include <dt-bindings/clock/meson8b-clkc.h>
#include <dt-bindings/reset/amlogic,meson8b-clkc-reset.h>
-static DEFINE_SPINLOCK(meson_clk_lock);
+/*
+ * Clock controller register offsets
+ *
+ * Register offsets from the HardKernel[0] data sheet must be multiplied
+ * by 4 before adding them to the base address to get the right value
+ *
+ * [0] https://dn.odroid.com/S805/Datasheet/S805_Datasheet%20V0.8%2020150126.pdf
+ */
+#define HHI_GP_PLL_CNTL 0x40
+#define HHI_GP_PLL_CNTL2 0x44
+#define HHI_GP_PLL_CNTL3 0x48
+#define HHI_GP_PLL_CNTL4 0x4C
+#define HHI_GP_PLL_CNTL5 0x50
+#define HHI_VIID_CLK_DIV 0x128
+#define HHI_VIID_CLK_CNTL 0x12c
+#define HHI_GCLK_MPEG0 0x140
+#define HHI_GCLK_MPEG1 0x144
+#define HHI_GCLK_MPEG2 0x148
+#define HHI_GCLK_OTHER 0x150
+#define HHI_GCLK_AO 0x154
+#define HHI_SYS_CPU_CLK_CNTL1 0x15c
+#define HHI_VID_CLK_DIV 0x164
+#define HHI_MPEG_CLK_CNTL 0x174
+#define HHI_AUD_CLK_CNTL 0x178
+#define HHI_VID_CLK_CNTL 0x17c
+#define HHI_AUD_CLK_CNTL2 0x190
+#define HHI_VID_CLK_CNTL2 0x194
+#define HHI_VID_DIVIDER_CNTL 0x198
+#define HHI_SYS_CPU_CLK_CNTL0 0x19c
+#define HHI_MALI_CLK_CNTL 0x1b0
+#define HHI_VPU_CLK_CNTL 0x1bc
+#define HHI_HDMI_CLK_CNTL 0x1cc
+#define HHI_VDEC_CLK_CNTL 0x1e0
+#define HHI_VDEC2_CLK_CNTL 0x1e4
+#define HHI_VDEC3_CLK_CNTL 0x1e8
+#define HHI_NAND_CLK_CNTL 0x25c
+#define HHI_MPLL_CNTL 0x280
+#define HHI_SYS_PLL_CNTL 0x300
+#define HHI_VID_PLL_CNTL 0x320
+#define HHI_VID_PLL_CNTL2 0x324
+#define HHI_VID_PLL_CNTL3 0x328
+#define HHI_VID_PLL_CNTL4 0x32c
+#define HHI_VID_PLL_CNTL5 0x330
+#define HHI_VID_PLL_CNTL6 0x334
+#define HHI_VID2_PLL_CNTL 0x380
+#define HHI_VID2_PLL_CNTL2 0x384
+#define HHI_VID2_PLL_CNTL3 0x388
+#define HHI_VID2_PLL_CNTL4 0x38c
+#define HHI_VID2_PLL_CNTL5 0x390
+#define HHI_VID2_PLL_CNTL6 0x394
+
+/*
+ * MPLL register offeset taken from the S905 datasheet. Vendor kernel source
+ * confirm these are the same for the S805.
+ */
+#define HHI_MPLL_CNTL 0x280
+#define HHI_MPLL_CNTL2 0x284
+#define HHI_MPLL_CNTL3 0x288
+#define HHI_MPLL_CNTL4 0x28c
+#define HHI_MPLL_CNTL5 0x290
+#define HHI_MPLL_CNTL6 0x294
+#define HHI_MPLL_CNTL7 0x298
+#define HHI_MPLL_CNTL8 0x29c
+#define HHI_MPLL_CNTL9 0x2a0
+#define HHI_MPLL_CNTL10 0x2a4
struct meson8b_clk_reset {
struct reset_controller_dev reset;
@@ -151,7 +214,7 @@ static const struct reg_sequence meson8b_hdmi_pll_init_regs[] = {
{ .reg = HHI_VID2_PLL_CNTL2, .def = 0x0430a800 },
};
-static const struct pll_params_table hdmi_pll_params_table[] = {
+static const struct pll_params_table meson8b_hdmi_pll_params_table[] = {
PLL_PARAMS(40, 1),
PLL_PARAMS(42, 1),
PLL_PARAMS(44, 1),
@@ -204,7 +267,7 @@ static struct clk_regmap meson8b_hdmi_pll_dco = {
.shift = 29,
.width = 1,
},
- .table = hdmi_pll_params_table,
+ .table = meson8b_hdmi_pll_params_table,
.init_regs = meson8b_hdmi_pll_init_regs,
.init_count = ARRAY_SIZE(meson8b_hdmi_pll_init_regs),
},
@@ -492,7 +555,6 @@ static struct clk_regmap meson8b_mpll0_div = {
.shift = 25,
.width = 1,
},
- .lock = &meson_clk_lock,
},
.hw.init = &(struct clk_init_data){
.name = "mpll0_div",
@@ -537,7 +599,6 @@ static struct clk_regmap meson8b_mpll1_div = {
.shift = 16,
.width = 9,
},
- .lock = &meson_clk_lock,
},
.hw.init = &(struct clk_init_data){
.name = "mpll1_div",
@@ -582,7 +643,6 @@ static struct clk_regmap meson8b_mpll2_div = {
.shift = 16,
.width = 9,
},
- .lock = &meson_clk_lock,
},
.hw.init = &(struct clk_init_data){
.name = "mpll2_div",
@@ -610,16 +670,17 @@ static struct clk_regmap meson8b_mpll2 = {
},
};
-static u32 mux_table_clk81[] = { 6, 5, 7 };
-static struct clk_regmap meson8b_mpeg_clk_sel = {
+/* clk81 is often referred as "mpeg_clk" */
+static u32 meson8b_clk81_parents_val_table[] = { 6, 5, 7 };
+static struct clk_regmap meson8b_clk81_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_MPEG_CLK_CNTL,
.mask = 0x7,
.shift = 12,
- .table = mux_table_clk81,
+ .table = meson8b_clk81_parents_val_table,
},
.hw.init = &(struct clk_init_data){
- .name = "mpeg_clk_sel",
+ .name = "clk81_sel",
.ops = &clk_regmap_mux_ro_ops,
/*
* FIXME bits 14:12 selects from 8 possible parents:
@@ -635,17 +696,17 @@ static struct clk_regmap meson8b_mpeg_clk_sel = {
},
};
-static struct clk_regmap meson8b_mpeg_clk_div = {
+static struct clk_regmap meson8b_clk81_div = {
.data = &(struct clk_regmap_div_data){
.offset = HHI_MPEG_CLK_CNTL,
.shift = 0,
.width = 7,
},
.hw.init = &(struct clk_init_data){
- .name = "mpeg_clk_div",
+ .name = "clk81_div",
.ops = &clk_regmap_divider_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &meson8b_mpeg_clk_sel.hw
+ &meson8b_clk81_sel.hw
},
.num_parents = 1,
},
@@ -660,7 +721,7 @@ static struct clk_regmap meson8b_clk81 = {
.name = "clk81",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &meson8b_mpeg_clk_div.hw
+ &meson8b_clk81_div.hw
},
.num_parents = 1,
.flags = CLK_IS_CRITICAL,
@@ -714,7 +775,7 @@ static struct clk_fixed_factor meson8b_cpu_in_div3 = {
},
};
-static const struct clk_div_table cpu_scale_table[] = {
+static const struct clk_div_table meson8b_cpu_scale_div_table[] = {
{ .val = 1, .div = 4 },
{ .val = 2, .div = 6 },
{ .val = 3, .div = 8 },
@@ -731,7 +792,7 @@ static struct clk_regmap meson8b_cpu_scale_div = {
.offset = HHI_SYS_CPU_CLK_CNTL1,
.shift = 20,
.width = 10,
- .table = cpu_scale_table,
+ .table = meson8b_cpu_scale_div_table,
.flags = CLK_DIVIDER_ALLOW_ZERO,
},
.hw.init = &(struct clk_init_data){
@@ -745,13 +806,13 @@ static struct clk_regmap meson8b_cpu_scale_div = {
},
};
-static u32 mux_table_cpu_scale_out_sel[] = { 0, 1, 3 };
+static u32 meson8b_cpu_scale_out_parents_val_table[] = { 0, 1, 3 };
static struct clk_regmap meson8b_cpu_scale_out_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPU_CLK_CNTL0,
.mask = 0x3,
.shift = 2,
- .table = mux_table_cpu_scale_out_sel,
+ .table = meson8b_cpu_scale_out_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "cpu_scale_out_sel",
@@ -833,13 +894,13 @@ static struct clk_regmap meson8b_nand_clk_div = {
},
};
-static struct clk_regmap meson8b_nand_clk_gate = {
+static struct clk_regmap meson8b_nand_clk = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_NAND_CLK_CNTL,
.bit_idx = 8,
},
.hw.init = &(struct clk_init_data){
- .name = "nand_clk_gate",
+ .name = "nand_clk",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_nand_clk_div.hw
@@ -940,160 +1001,137 @@ static struct clk_fixed_factor meson8b_cpu_clk_div8 = {
},
};
-static u32 mux_table_apb[] = { 1, 2, 3, 4, 5, 6, 7 };
-static struct clk_regmap meson8b_apb_clk_sel = {
+static u32 meson8b_cpu_if_parents_val_table[] = { 1, 2, 3, 4, 5, 6, 7 };
+static const struct clk_hw *meson8b_cpu_if_parents[] = {
+ &meson8b_cpu_clk_div2.hw,
+ &meson8b_cpu_clk_div3.hw,
+ &meson8b_cpu_clk_div4.hw,
+ &meson8b_cpu_clk_div5.hw,
+ &meson8b_cpu_clk_div6.hw,
+ &meson8b_cpu_clk_div7.hw,
+ &meson8b_cpu_clk_div8.hw,
+};
+
+static struct clk_regmap meson8b_apb_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPU_CLK_CNTL1,
.mask = 0x7,
.shift = 3,
- .table = mux_table_apb,
+ .table = meson8b_cpu_if_parents_val_table,
},
.hw.init = &(struct clk_init_data){
- .name = "apb_clk_sel",
+ .name = "apb_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &meson8b_cpu_clk_div2.hw,
- &meson8b_cpu_clk_div3.hw,
- &meson8b_cpu_clk_div4.hw,
- &meson8b_cpu_clk_div5.hw,
- &meson8b_cpu_clk_div6.hw,
- &meson8b_cpu_clk_div7.hw,
- &meson8b_cpu_clk_div8.hw,
- },
- .num_parents = 7,
+ .parent_hws = meson8b_cpu_if_parents,
+ .num_parents = ARRAY_SIZE(meson8b_cpu_if_parents),
},
};
-static struct clk_regmap meson8b_apb_clk_gate = {
+static struct clk_regmap meson8b_apb = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_SYS_CPU_CLK_CNTL1,
.bit_idx = 16,
.flags = CLK_GATE_SET_TO_DISABLE,
},
.hw.init = &(struct clk_init_data){
- .name = "apb_clk_dis",
+ .name = "apb",
.ops = &clk_regmap_gate_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &meson8b_apb_clk_sel.hw
+ &meson8b_apb_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap meson8b_periph_clk_sel = {
+static struct clk_regmap meson8b_periph_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPU_CLK_CNTL1,
.mask = 0x7,
.shift = 6,
},
.hw.init = &(struct clk_init_data){
- .name = "periph_clk_sel",
+ .name = "periph_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &meson8b_cpu_clk_div2.hw,
- &meson8b_cpu_clk_div3.hw,
- &meson8b_cpu_clk_div4.hw,
- &meson8b_cpu_clk_div5.hw,
- &meson8b_cpu_clk_div6.hw,
- &meson8b_cpu_clk_div7.hw,
- &meson8b_cpu_clk_div8.hw,
- },
- .num_parents = 7,
+ .parent_hws = meson8b_cpu_if_parents,
+ .num_parents = ARRAY_SIZE(meson8b_cpu_if_parents),
},
};
-static struct clk_regmap meson8b_periph_clk_gate = {
+static struct clk_regmap meson8b_periph = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_SYS_CPU_CLK_CNTL1,
.bit_idx = 17,
.flags = CLK_GATE_SET_TO_DISABLE,
},
.hw.init = &(struct clk_init_data){
- .name = "periph_clk_dis",
+ .name = "periph",
.ops = &clk_regmap_gate_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &meson8b_periph_clk_sel.hw
+ &meson8b_periph_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static u32 mux_table_axi[] = { 1, 2, 3, 4, 5, 6, 7 };
-static struct clk_regmap meson8b_axi_clk_sel = {
+static struct clk_regmap meson8b_axi_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPU_CLK_CNTL1,
.mask = 0x7,
.shift = 9,
- .table = mux_table_axi,
+ .table = meson8b_cpu_if_parents_val_table,
},
.hw.init = &(struct clk_init_data){
- .name = "axi_clk_sel",
+ .name = "axi_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &meson8b_cpu_clk_div2.hw,
- &meson8b_cpu_clk_div3.hw,
- &meson8b_cpu_clk_div4.hw,
- &meson8b_cpu_clk_div5.hw,
- &meson8b_cpu_clk_div6.hw,
- &meson8b_cpu_clk_div7.hw,
- &meson8b_cpu_clk_div8.hw,
- },
- .num_parents = 7,
+ .parent_hws = meson8b_cpu_if_parents,
+ .num_parents = ARRAY_SIZE(meson8b_cpu_if_parents),
},
};
-static struct clk_regmap meson8b_axi_clk_gate = {
+static struct clk_regmap meson8b_axi = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_SYS_CPU_CLK_CNTL1,
.bit_idx = 18,
.flags = CLK_GATE_SET_TO_DISABLE,
},
.hw.init = &(struct clk_init_data){
- .name = "axi_clk_dis",
+ .name = "axi",
.ops = &clk_regmap_gate_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &meson8b_axi_clk_sel.hw
+ &meson8b_axi_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap meson8b_l2_dram_clk_sel = {
+static struct clk_regmap meson8b_l2_dram_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPU_CLK_CNTL1,
.mask = 0x7,
.shift = 12,
},
.hw.init = &(struct clk_init_data){
- .name = "l2_dram_clk_sel",
+ .name = "l2_dram_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &meson8b_cpu_clk_div2.hw,
- &meson8b_cpu_clk_div3.hw,
- &meson8b_cpu_clk_div4.hw,
- &meson8b_cpu_clk_div5.hw,
- &meson8b_cpu_clk_div6.hw,
- &meson8b_cpu_clk_div7.hw,
- &meson8b_cpu_clk_div8.hw,
- },
- .num_parents = 7,
+ .parent_hws = meson8b_cpu_if_parents,
+ .num_parents = ARRAY_SIZE(meson8b_cpu_if_parents),
},
};
-static struct clk_regmap meson8b_l2_dram_clk_gate = {
+static struct clk_regmap meson8b_l2_dram = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_SYS_CPU_CLK_CNTL1,
.bit_idx = 19,
.flags = CLK_GATE_SET_TO_DISABLE,
},
.hw.init = &(struct clk_init_data){
- .name = "l2_dram_clk_dis",
+ .name = "l2_dram",
.ops = &clk_regmap_gate_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &meson8b_l2_dram_clk_sel.hw
+ &meson8b_l2_dram_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1226,7 +1264,7 @@ static struct clk_regmap meson8b_vid_pll_final_div = {
},
};
-static const struct clk_hw *meson8b_vclk_mux_parent_hws[] = {
+static const struct clk_hw *meson8b_vclk_parents[] = {
&meson8b_vid_pll_final_div.hw,
&meson8b_fclk_div4.hw,
&meson8b_fclk_div3.hw,
@@ -1245,8 +1283,8 @@ static struct clk_regmap meson8b_vclk_in_sel = {
.hw.init = &(struct clk_init_data){
.name = "vclk_in_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_vclk_mux_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_vclk_mux_parent_hws),
+ .parent_hws = meson8b_vclk_parents,
+ .num_parents = ARRAY_SIZE(meson8b_vclk_parents),
.flags = CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
},
};
@@ -1283,13 +1321,13 @@ static struct clk_regmap meson8b_vclk_en = {
},
};
-static struct clk_regmap meson8b_vclk_div1_gate = {
+static struct clk_regmap meson8b_vclk_div1 = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_VID_CLK_CNTL,
.bit_idx = 0,
},
.hw.init = &(struct clk_init_data){
- .name = "vclk_div1_en",
+ .name = "vclk_div1",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk_en.hw
@@ -1303,7 +1341,7 @@ static struct clk_fixed_factor meson8b_vclk_div2_div = {
.mult = 1,
.div = 2,
.hw.init = &(struct clk_init_data){
- .name = "vclk_div2",
+ .name = "vclk_div2_div",
.ops = &clk_fixed_factor_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk_en.hw
@@ -1313,13 +1351,13 @@ static struct clk_fixed_factor meson8b_vclk_div2_div = {
}
};
-static struct clk_regmap meson8b_vclk_div2_div_gate = {
+static struct clk_regmap meson8b_vclk_div2 = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_VID_CLK_CNTL,
.bit_idx = 1,
},
.hw.init = &(struct clk_init_data){
- .name = "vclk_div2_en",
+ .name = "vclk_div2",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk_div2_div.hw
@@ -1333,7 +1371,7 @@ static struct clk_fixed_factor meson8b_vclk_div4_div = {
.mult = 1,
.div = 4,
.hw.init = &(struct clk_init_data){
- .name = "vclk_div4",
+ .name = "vclk_div4_div",
.ops = &clk_fixed_factor_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk_en.hw
@@ -1343,13 +1381,13 @@ static struct clk_fixed_factor meson8b_vclk_div4_div = {
}
};
-static struct clk_regmap meson8b_vclk_div4_div_gate = {
+static struct clk_regmap meson8b_vclk_div4 = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_VID_CLK_CNTL,
.bit_idx = 2,
},
.hw.init = &(struct clk_init_data){
- .name = "vclk_div4_en",
+ .name = "vclk_div4",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk_div4_div.hw
@@ -1363,7 +1401,7 @@ static struct clk_fixed_factor meson8b_vclk_div6_div = {
.mult = 1,
.div = 6,
.hw.init = &(struct clk_init_data){
- .name = "vclk_div6",
+ .name = "vclk_div6_div",
.ops = &clk_fixed_factor_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk_en.hw
@@ -1373,13 +1411,13 @@ static struct clk_fixed_factor meson8b_vclk_div6_div = {
}
};
-static struct clk_regmap meson8b_vclk_div6_div_gate = {
+static struct clk_regmap meson8b_vclk_div6 = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_VID_CLK_CNTL,
.bit_idx = 3,
},
.hw.init = &(struct clk_init_data){
- .name = "vclk_div6_en",
+ .name = "vclk_div6",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk_div6_div.hw
@@ -1393,7 +1431,7 @@ static struct clk_fixed_factor meson8b_vclk_div12_div = {
.mult = 1,
.div = 12,
.hw.init = &(struct clk_init_data){
- .name = "vclk_div12",
+ .name = "vclk_div12_div",
.ops = &clk_fixed_factor_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk_en.hw
@@ -1403,13 +1441,13 @@ static struct clk_fixed_factor meson8b_vclk_div12_div = {
}
};
-static struct clk_regmap meson8b_vclk_div12_div_gate = {
+static struct clk_regmap meson8b_vclk_div12 = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_VID_CLK_CNTL,
.bit_idx = 4,
},
.hw.init = &(struct clk_init_data){
- .name = "vclk_div12_en",
+ .name = "vclk_div12",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk_div12_div.hw
@@ -1428,13 +1466,13 @@ static struct clk_regmap meson8b_vclk2_in_sel = {
.hw.init = &(struct clk_init_data){
.name = "vclk2_in_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_vclk_mux_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_vclk_mux_parent_hws),
+ .parent_hws = meson8b_vclk_parents,
+ .num_parents = ARRAY_SIZE(meson8b_vclk_parents),
.flags = CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
},
};
-static struct clk_regmap meson8b_vclk2_clk_in_en = {
+static struct clk_regmap meson8b_vclk2_in_en = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_VIID_CLK_DIV,
.bit_idx = 16,
@@ -1450,7 +1488,7 @@ static struct clk_regmap meson8b_vclk2_clk_in_en = {
},
};
-static struct clk_regmap meson8b_vclk2_clk_en = {
+static struct clk_regmap meson8b_vclk2_en = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_VIID_CLK_DIV,
.bit_idx = 19,
@@ -1459,23 +1497,23 @@ static struct clk_regmap meson8b_vclk2_clk_en = {
.name = "vclk2_en",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &meson8b_vclk2_clk_in_en.hw
+ &meson8b_vclk2_in_en.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap meson8b_vclk2_div1_gate = {
+static struct clk_regmap meson8b_vclk2_div1 = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_VIID_CLK_DIV,
.bit_idx = 0,
},
.hw.init = &(struct clk_init_data){
- .name = "vclk2_div1_en",
+ .name = "vclk2_div1",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &meson8b_vclk2_clk_en.hw
+ &meson8b_vclk2_en.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1486,23 +1524,23 @@ static struct clk_fixed_factor meson8b_vclk2_div2_div = {
.mult = 1,
.div = 2,
.hw.init = &(struct clk_init_data){
- .name = "vclk2_div2",
+ .name = "vclk2_div2_div",
.ops = &clk_fixed_factor_ops,
.parent_hws = (const struct clk_hw *[]) {
- &meson8b_vclk2_clk_en.hw
+ &meson8b_vclk2_en.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
}
};
-static struct clk_regmap meson8b_vclk2_div2_div_gate = {
+static struct clk_regmap meson8b_vclk2_div2 = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_VIID_CLK_DIV,
.bit_idx = 1,
},
.hw.init = &(struct clk_init_data){
- .name = "vclk2_div2_en",
+ .name = "vclk2_div2",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk2_div2_div.hw
@@ -1516,23 +1554,23 @@ static struct clk_fixed_factor meson8b_vclk2_div4_div = {
.mult = 1,
.div = 4,
.hw.init = &(struct clk_init_data){
- .name = "vclk2_div4",
+ .name = "vclk2_div4_div",
.ops = &clk_fixed_factor_ops,
.parent_hws = (const struct clk_hw *[]) {
- &meson8b_vclk2_clk_en.hw
+ &meson8b_vclk2_en.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
}
};
-static struct clk_regmap meson8b_vclk2_div4_div_gate = {
+static struct clk_regmap meson8b_vclk2_div4 = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_VIID_CLK_DIV,
.bit_idx = 2,
},
.hw.init = &(struct clk_init_data){
- .name = "vclk2_div4_en",
+ .name = "vclk2_div4",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk2_div4_div.hw
@@ -1546,23 +1584,23 @@ static struct clk_fixed_factor meson8b_vclk2_div6_div = {
.mult = 1,
.div = 6,
.hw.init = &(struct clk_init_data){
- .name = "vclk2_div6",
+ .name = "vclk2_div6_div",
.ops = &clk_fixed_factor_ops,
.parent_hws = (const struct clk_hw *[]) {
- &meson8b_vclk2_clk_en.hw
+ &meson8b_vclk2_en.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
}
};
-static struct clk_regmap meson8b_vclk2_div6_div_gate = {
+static struct clk_regmap meson8b_vclk2_div6 = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_VIID_CLK_DIV,
.bit_idx = 3,
},
.hw.init = &(struct clk_init_data){
- .name = "vclk2_div6_en",
+ .name = "vclk2_div6",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk2_div6_div.hw
@@ -1576,23 +1614,23 @@ static struct clk_fixed_factor meson8b_vclk2_div12_div = {
.mult = 1,
.div = 12,
.hw.init = &(struct clk_init_data){
- .name = "vclk2_div12",
+ .name = "vclk2_div12_div",
.ops = &clk_fixed_factor_ops,
.parent_hws = (const struct clk_hw *[]) {
- &meson8b_vclk2_clk_en.hw
+ &meson8b_vclk2_en.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
}
};
-static struct clk_regmap meson8b_vclk2_div12_div_gate = {
+static struct clk_regmap meson8b_vclk2_div12 = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_VIID_CLK_DIV,
.bit_idx = 4,
},
.hw.init = &(struct clk_init_data){
- .name = "vclk2_div12_en",
+ .name = "vclk2_div12",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk2_div12_div.hw
@@ -1602,12 +1640,12 @@ static struct clk_regmap meson8b_vclk2_div12_div_gate = {
},
};
-static const struct clk_hw *meson8b_vclk_enc_mux_parent_hws[] = {
- &meson8b_vclk_div1_gate.hw,
- &meson8b_vclk_div2_div_gate.hw,
- &meson8b_vclk_div4_div_gate.hw,
- &meson8b_vclk_div6_div_gate.hw,
- &meson8b_vclk_div12_div_gate.hw,
+static const struct clk_hw *meson8b_vclk_enc_parents[] = {
+ &meson8b_vclk_div1.hw,
+ &meson8b_vclk_div2.hw,
+ &meson8b_vclk_div4.hw,
+ &meson8b_vclk_div6.hw,
+ &meson8b_vclk_div12.hw,
};
static struct clk_regmap meson8b_cts_enct_sel = {
@@ -1619,8 +1657,8 @@ static struct clk_regmap meson8b_cts_enct_sel = {
.hw.init = &(struct clk_init_data){
.name = "cts_enct_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_vclk_enc_mux_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_vclk_enc_mux_parent_hws),
+ .parent_hws = meson8b_vclk_enc_parents,
+ .num_parents = ARRAY_SIZE(meson8b_vclk_enc_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1650,8 +1688,8 @@ static struct clk_regmap meson8b_cts_encp_sel = {
.hw.init = &(struct clk_init_data){
.name = "cts_encp_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_vclk_enc_mux_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_vclk_enc_mux_parent_hws),
+ .parent_hws = meson8b_vclk_enc_parents,
+ .num_parents = ARRAY_SIZE(meson8b_vclk_enc_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1681,8 +1719,8 @@ static struct clk_regmap meson8b_cts_enci_sel = {
.hw.init = &(struct clk_init_data){
.name = "cts_enci_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_vclk_enc_mux_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_vclk_enc_mux_parent_hws),
+ .parent_hws = meson8b_vclk_enc_parents,
+ .num_parents = ARRAY_SIZE(meson8b_vclk_enc_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1712,8 +1750,8 @@ static struct clk_regmap meson8b_hdmi_tx_pixel_sel = {
.hw.init = &(struct clk_init_data){
.name = "hdmi_tx_pixel_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_vclk_enc_mux_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_vclk_enc_mux_parent_hws),
+ .parent_hws = meson8b_vclk_enc_parents,
+ .num_parents = ARRAY_SIZE(meson8b_vclk_enc_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1734,14 +1772,6 @@ static struct clk_regmap meson8b_hdmi_tx_pixel = {
},
};
-static const struct clk_hw *meson8b_vclk2_enc_mux_parent_hws[] = {
- &meson8b_vclk2_div1_gate.hw,
- &meson8b_vclk2_div2_div_gate.hw,
- &meson8b_vclk2_div4_div_gate.hw,
- &meson8b_vclk2_div6_div_gate.hw,
- &meson8b_vclk2_div12_div_gate.hw,
-};
-
static struct clk_regmap meson8b_cts_encl_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_VIID_CLK_DIV,
@@ -1751,8 +1781,8 @@ static struct clk_regmap meson8b_cts_encl_sel = {
.hw.init = &(struct clk_init_data){
.name = "cts_encl_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_vclk2_enc_mux_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_vclk2_enc_mux_parent_hws),
+ .parent_hws = meson8b_vclk_enc_parents,
+ .num_parents = ARRAY_SIZE(meson8b_vclk_enc_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1782,8 +1812,8 @@ static struct clk_regmap meson8b_cts_vdac0_sel = {
.hw.init = &(struct clk_init_data){
.name = "cts_vdac0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_vclk2_enc_mux_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_vclk2_enc_mux_parent_hws),
+ .parent_hws = meson8b_vclk_enc_parents,
+ .num_parents = ARRAY_SIZE(meson8b_vclk_enc_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1866,7 +1896,8 @@ static struct clk_regmap meson8b_hdmi_sys = {
* CLK_SET_RATE_GATE is set.
* Meson8 only has mali_0 and no glitch-free mux.
*/
-static const struct clk_parent_data meson8b_mali_0_1_parent_data[] = {
+static u32 meson8b_mali_parents_val_table[] = { 0, 2, 3, 4, 5, 6, 7 };
+static const struct clk_parent_data meson8b_mali_parents[] = {
{ .fw_name = "xtal", .name = "xtal", .index = -1, },
{ .hw = &meson8b_mpll2.hw, },
{ .hw = &meson8b_mpll1.hw, },
@@ -1876,20 +1907,18 @@ static const struct clk_parent_data meson8b_mali_0_1_parent_data[] = {
{ .hw = &meson8b_fclk_div5.hw, },
};
-static u32 meson8b_mali_0_1_mux_table[] = { 0, 2, 3, 4, 5, 6, 7 };
-
static struct clk_regmap meson8b_mali_0_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_MALI_CLK_CNTL,
.mask = 0x7,
.shift = 9,
- .table = meson8b_mali_0_1_mux_table,
+ .table = meson8b_mali_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "mali_0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = meson8b_mali_0_1_parent_data,
- .num_parents = ARRAY_SIZE(meson8b_mali_0_1_parent_data),
+ .parent_data = meson8b_mali_parents,
+ .num_parents = ARRAY_SIZE(meson8b_mali_parents),
/*
* Don't propagate rate changes up because the only changeable
* parents are mpll1 and mpll2 but we need those for audio and
@@ -1938,13 +1967,13 @@ static struct clk_regmap meson8b_mali_1_sel = {
.offset = HHI_MALI_CLK_CNTL,
.mask = 0x7,
.shift = 25,
- .table = meson8b_mali_0_1_mux_table,
+ .table = meson8b_mali_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "mali_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = meson8b_mali_0_1_parent_data,
- .num_parents = ARRAY_SIZE(meson8b_mali_0_1_parent_data),
+ .parent_data = meson8b_mali_parents,
+ .num_parents = ARRAY_SIZE(meson8b_mali_parents),
/*
* Don't propagate rate changes up because the only changeable
* parents are mpll1 and mpll2 but we need those for audio and
@@ -2079,20 +2108,13 @@ static struct clk_regmap meson8m2_gp_pll = {
},
};
-static const struct clk_hw *meson8b_vpu_0_1_parent_hws[] = {
+static const struct clk_hw *meson8b_vpu_parents[] = {
&meson8b_fclk_div4.hw,
&meson8b_fclk_div3.hw,
&meson8b_fclk_div5.hw,
&meson8b_fclk_div7.hw,
};
-static const struct clk_hw *mmeson8m2_vpu_0_1_parent_hws[] = {
- &meson8b_fclk_div4.hw,
- &meson8b_fclk_div3.hw,
- &meson8b_fclk_div5.hw,
- &meson8m2_gp_pll.hw,
-};
-
static struct clk_regmap meson8b_vpu_0_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_VPU_CLK_CNTL,
@@ -2102,12 +2124,19 @@ static struct clk_regmap meson8b_vpu_0_sel = {
.hw.init = &(struct clk_init_data){
.name = "vpu_0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_vpu_0_1_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_vpu_0_1_parent_hws),
+ .parent_hws = meson8b_vpu_parents,
+ .num_parents = ARRAY_SIZE(meson8b_vpu_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
+static const struct clk_hw *mmeson8m2_vpu_parents[] = {
+ &meson8b_fclk_div4.hw,
+ &meson8b_fclk_div3.hw,
+ &meson8b_fclk_div5.hw,
+ &meson8m2_gp_pll.hw,
+};
+
static struct clk_regmap meson8m2_vpu_0_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_VPU_CLK_CNTL,
@@ -2117,8 +2146,8 @@ static struct clk_regmap meson8m2_vpu_0_sel = {
.hw.init = &(struct clk_init_data){
.name = "vpu_0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = mmeson8m2_vpu_0_1_parent_hws,
- .num_parents = ARRAY_SIZE(mmeson8m2_vpu_0_1_parent_hws),
+ .parent_hws = mmeson8m2_vpu_parents,
+ .num_parents = ARRAY_SIZE(mmeson8m2_vpu_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2173,8 +2202,8 @@ static struct clk_regmap meson8b_vpu_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "vpu_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_vpu_0_1_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_vpu_0_1_parent_hws),
+ .parent_hws = meson8b_vpu_parents,
+ .num_parents = ARRAY_SIZE(meson8b_vpu_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2188,8 +2217,8 @@ static struct clk_regmap meson8m2_vpu_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "vpu_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = mmeson8m2_vpu_0_1_parent_hws,
- .num_parents = ARRAY_SIZE(mmeson8m2_vpu_0_1_parent_hws),
+ .parent_hws = mmeson8m2_vpu_parents,
+ .num_parents = ARRAY_SIZE(mmeson8m2_vpu_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2261,7 +2290,7 @@ static struct clk_regmap meson8b_vpu = {
},
};
-static const struct clk_hw *meson8b_vdec_parent_hws[] = {
+static const struct clk_hw *meson8b_vdec_parents[] = {
&meson8b_fclk_div4.hw,
&meson8b_fclk_div3.hw,
&meson8b_fclk_div5.hw,
@@ -2280,8 +2309,8 @@ static struct clk_regmap meson8b_vdec_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "vdec_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_vdec_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_vdec_parent_hws),
+ .parent_hws = meson8b_vdec_parents,
+ .num_parents = ARRAY_SIZE(meson8b_vdec_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2383,8 +2412,8 @@ static struct clk_regmap meson8b_vdec_hcodec_sel = {
.hw.init = &(struct clk_init_data){
.name = "vdec_hcodec_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_vdec_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_vdec_parent_hws),
+ .parent_hws = meson8b_vdec_parents,
+ .num_parents = ARRAY_SIZE(meson8b_vdec_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2433,8 +2462,8 @@ static struct clk_regmap meson8b_vdec_2_sel = {
.hw.init = &(struct clk_init_data){
.name = "vdec_2_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_vdec_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_vdec_parent_hws),
+ .parent_hws = meson8b_vdec_parents,
+ .num_parents = ARRAY_SIZE(meson8b_vdec_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2483,8 +2512,8 @@ static struct clk_regmap meson8b_vdec_hevc_sel = {
.hw.init = &(struct clk_init_data){
.name = "vdec_hevc_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_vdec_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_vdec_parent_hws),
+ .parent_hws = meson8b_vdec_parents,
+ .num_parents = ARRAY_SIZE(meson8b_vdec_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2543,27 +2572,26 @@ static struct clk_regmap meson8b_vdec_hevc = {
};
/* TODO: the clock at index 0 is "DDR_PLL" which we don't support yet */
-static const struct clk_hw *meson8b_cts_amclk_parent_hws[] = {
+static u32 meson8b_cts_mclk_parents_val_table[] = { 1, 2, 3 };
+static const struct clk_hw *meson8b_cts_mclk_parents[] = {
&meson8b_mpll0.hw,
&meson8b_mpll1.hw,
&meson8b_mpll2.hw
};
-static u32 meson8b_cts_amclk_mux_table[] = { 1, 2, 3 };
-
static struct clk_regmap meson8b_cts_amclk_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_AUD_CLK_CNTL,
.mask = 0x3,
.shift = 9,
- .table = meson8b_cts_amclk_mux_table,
+ .table = meson8b_cts_mclk_parents_val_table,
.flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
.name = "cts_amclk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_cts_amclk_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_cts_amclk_parent_hws),
+ .parent_hws = meson8b_cts_mclk_parents,
+ .num_parents = ARRAY_SIZE(meson8b_cts_mclk_parents),
},
};
@@ -2601,28 +2629,19 @@ static struct clk_regmap meson8b_cts_amclk = {
},
};
-/* TODO: the clock at index 0 is "DDR_PLL" which we don't support yet */
-static const struct clk_hw *meson8b_cts_mclk_i958_parent_hws[] = {
- &meson8b_mpll0.hw,
- &meson8b_mpll1.hw,
- &meson8b_mpll2.hw
-};
-
-static u32 meson8b_cts_mclk_i958_mux_table[] = { 1, 2, 3 };
-
static struct clk_regmap meson8b_cts_mclk_i958_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_AUD_CLK_CNTL2,
.mask = 0x3,
.shift = 25,
- .table = meson8b_cts_mclk_i958_mux_table,
+ .table = meson8b_cts_mclk_parents_val_table,
.flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data) {
.name = "cts_mclk_i958_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_cts_mclk_i958_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_cts_mclk_i958_parent_hws),
+ .parent_hws = meson8b_cts_mclk_parents,
+ .num_parents = ARRAY_SIZE(meson8b_cts_mclk_parents),
},
};
@@ -2682,113 +2701,128 @@ static struct clk_regmap meson8b_cts_i958 = {
},
};
-#define MESON_GATE(_name, _reg, _bit) \
- MESON_PCLK(_name, _reg, _bit, &meson8b_clk81.hw)
-
-/* Everything Else (EE) domain gates */
-
-static MESON_GATE(meson8b_ddr, HHI_GCLK_MPEG0, 0);
-static MESON_GATE(meson8b_dos, HHI_GCLK_MPEG0, 1);
-static MESON_GATE(meson8b_isa, HHI_GCLK_MPEG0, 5);
-static MESON_GATE(meson8b_pl301, HHI_GCLK_MPEG0, 6);
-static MESON_GATE(meson8b_periphs, HHI_GCLK_MPEG0, 7);
-static MESON_GATE(meson8b_spicc, HHI_GCLK_MPEG0, 8);
-static MESON_GATE(meson8b_i2c, HHI_GCLK_MPEG0, 9);
-static MESON_GATE(meson8b_sar_adc, HHI_GCLK_MPEG0, 10);
-static MESON_GATE(meson8b_smart_card, HHI_GCLK_MPEG0, 11);
-static MESON_GATE(meson8b_rng0, HHI_GCLK_MPEG0, 12);
-static MESON_GATE(meson8b_uart0, HHI_GCLK_MPEG0, 13);
-static MESON_GATE(meson8b_sdhc, HHI_GCLK_MPEG0, 14);
-static MESON_GATE(meson8b_stream, HHI_GCLK_MPEG0, 15);
-static MESON_GATE(meson8b_async_fifo, HHI_GCLK_MPEG0, 16);
-static MESON_GATE(meson8b_sdio, HHI_GCLK_MPEG0, 17);
-static MESON_GATE(meson8b_abuf, HHI_GCLK_MPEG0, 18);
-static MESON_GATE(meson8b_hiu_iface, HHI_GCLK_MPEG0, 19);
-static MESON_GATE(meson8b_assist_misc, HHI_GCLK_MPEG0, 23);
-static MESON_GATE(meson8b_spi, HHI_GCLK_MPEG0, 30);
-
-static MESON_GATE(meson8b_i2s_spdif, HHI_GCLK_MPEG1, 2);
-static MESON_GATE(meson8b_eth, HHI_GCLK_MPEG1, 3);
-static MESON_GATE(meson8b_demux, HHI_GCLK_MPEG1, 4);
-static MESON_GATE(meson8b_blkmv, HHI_GCLK_MPEG1, 14);
-static MESON_GATE(meson8b_aiu, HHI_GCLK_MPEG1, 15);
-static MESON_GATE(meson8b_uart1, HHI_GCLK_MPEG1, 16);
-static MESON_GATE(meson8b_g2d, HHI_GCLK_MPEG1, 20);
-static MESON_GATE(meson8b_usb0, HHI_GCLK_MPEG1, 21);
-static MESON_GATE(meson8b_usb1, HHI_GCLK_MPEG1, 22);
-static MESON_GATE(meson8b_reset, HHI_GCLK_MPEG1, 23);
-static MESON_GATE(meson8b_nand, HHI_GCLK_MPEG1, 24);
-static MESON_GATE(meson8b_dos_parser, HHI_GCLK_MPEG1, 25);
-static MESON_GATE(meson8b_usb, HHI_GCLK_MPEG1, 26);
-static MESON_GATE(meson8b_vdin1, HHI_GCLK_MPEG1, 28);
-static MESON_GATE(meson8b_ahb_arb0, HHI_GCLK_MPEG1, 29);
-static MESON_GATE(meson8b_efuse, HHI_GCLK_MPEG1, 30);
-static MESON_GATE(meson8b_boot_rom, HHI_GCLK_MPEG1, 31);
-
-static MESON_GATE(meson8b_ahb_data_bus, HHI_GCLK_MPEG2, 1);
-static MESON_GATE(meson8b_ahb_ctrl_bus, HHI_GCLK_MPEG2, 2);
-static MESON_GATE(meson8b_hdmi_intr_sync, HHI_GCLK_MPEG2, 3);
-static MESON_GATE(meson8b_hdmi_pclk, HHI_GCLK_MPEG2, 4);
-static MESON_GATE(meson8b_usb1_ddr_bridge, HHI_GCLK_MPEG2, 8);
-static MESON_GATE(meson8b_usb0_ddr_bridge, HHI_GCLK_MPEG2, 9);
-static MESON_GATE(meson8b_mmc_pclk, HHI_GCLK_MPEG2, 11);
-static MESON_GATE(meson8b_dvin, HHI_GCLK_MPEG2, 12);
-static MESON_GATE(meson8b_uart2, HHI_GCLK_MPEG2, 15);
-static MESON_GATE(meson8b_sana, HHI_GCLK_MPEG2, 22);
-static MESON_GATE(meson8b_vpu_intr, HHI_GCLK_MPEG2, 25);
-static MESON_GATE(meson8b_sec_ahb_ahb3_bridge, HHI_GCLK_MPEG2, 26);
-static MESON_GATE(meson8b_clk81_a9, HHI_GCLK_MPEG2, 29);
-
-static MESON_GATE(meson8b_vclk2_venci0, HHI_GCLK_OTHER, 1);
-static MESON_GATE(meson8b_vclk2_venci1, HHI_GCLK_OTHER, 2);
-static MESON_GATE(meson8b_vclk2_vencp0, HHI_GCLK_OTHER, 3);
-static MESON_GATE(meson8b_vclk2_vencp1, HHI_GCLK_OTHER, 4);
-static MESON_GATE(meson8b_gclk_venci_int, HHI_GCLK_OTHER, 8);
-static MESON_GATE(meson8b_gclk_vencp_int, HHI_GCLK_OTHER, 9);
-static MESON_GATE(meson8b_dac_clk, HHI_GCLK_OTHER, 10);
-static MESON_GATE(meson8b_aoclk_gate, HHI_GCLK_OTHER, 14);
-static MESON_GATE(meson8b_iec958_gate, HHI_GCLK_OTHER, 16);
-static MESON_GATE(meson8b_enc480p, HHI_GCLK_OTHER, 20);
-static MESON_GATE(meson8b_rng1, HHI_GCLK_OTHER, 21);
-static MESON_GATE(meson8b_gclk_vencl_int, HHI_GCLK_OTHER, 22);
-static MESON_GATE(meson8b_vclk2_venclmcc, HHI_GCLK_OTHER, 24);
-static MESON_GATE(meson8b_vclk2_vencl, HHI_GCLK_OTHER, 25);
-static MESON_GATE(meson8b_vclk2_other, HHI_GCLK_OTHER, 26);
-static MESON_GATE(meson8b_edp, HHI_GCLK_OTHER, 31);
+static const struct clk_parent_data meson8b_pclk_parents = { .hw = &meson8b_clk81.hw };
+
+#define MESON8B_PCLK(_name, _reg, _bit, _flags) \
+ MESON_PCLK(_name, _reg, _bit, &meson8b_pclk_parents, _flags)
+
+/*
+ * Everything Else (EE) domain gates
+ *
+ * NOTE: The gates below are marked with CLK_IGNORE_UNUSED for historic reasons
+ * Users are encouraged to test without it and submit changes to:
+ * - remove the flag if not necessary
+ * - replace the flag with something more adequate, such as CLK_IS_CRITICAL,
+ * if appropriate.
+ * - add a comment explaining why the use of CLK_IGNORE_UNUSED is desirable
+ * for a particular clock.
+ */
+static MESON8B_PCLK(meson8b_ddr, HHI_GCLK_MPEG0, 0, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_dos, HHI_GCLK_MPEG0, 1, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_isa, HHI_GCLK_MPEG0, 5, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_pl301, HHI_GCLK_MPEG0, 6, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_periphs, HHI_GCLK_MPEG0, 7, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_spicc, HHI_GCLK_MPEG0, 8, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_i2c, HHI_GCLK_MPEG0, 9, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_sar_adc, HHI_GCLK_MPEG0, 10, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_smart_card, HHI_GCLK_MPEG0, 11, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_rng0, HHI_GCLK_MPEG0, 12, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_uart0, HHI_GCLK_MPEG0, 13, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_sdhc, HHI_GCLK_MPEG0, 14, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_stream, HHI_GCLK_MPEG0, 15, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_async_fifo, HHI_GCLK_MPEG0, 16, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_sdio, HHI_GCLK_MPEG0, 17, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_abuf, HHI_GCLK_MPEG0, 18, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_hiu_iface, HHI_GCLK_MPEG0, 19, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_assist_misc, HHI_GCLK_MPEG0, 23, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_spi, HHI_GCLK_MPEG0, 30, CLK_IGNORE_UNUSED);
+
+static MESON8B_PCLK(meson8b_i2s_spdif, HHI_GCLK_MPEG1, 2, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_eth, HHI_GCLK_MPEG1, 3, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_demux, HHI_GCLK_MPEG1, 4, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_blkmv, HHI_GCLK_MPEG1, 14, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_aiu, HHI_GCLK_MPEG1, 15, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_uart1, HHI_GCLK_MPEG1, 16, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_g2d, HHI_GCLK_MPEG1, 20, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_usb0, HHI_GCLK_MPEG1, 21, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_usb1, HHI_GCLK_MPEG1, 22, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_reset, HHI_GCLK_MPEG1, 23, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_nand, HHI_GCLK_MPEG1, 24, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_dos_parser, HHI_GCLK_MPEG1, 25, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_usb, HHI_GCLK_MPEG1, 26, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_vdin1, HHI_GCLK_MPEG1, 28, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_ahb_arb0, HHI_GCLK_MPEG1, 29, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_efuse, HHI_GCLK_MPEG1, 30, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_boot_rom, HHI_GCLK_MPEG1, 31, CLK_IGNORE_UNUSED);
+
+static MESON8B_PCLK(meson8b_ahb_data_bus, HHI_GCLK_MPEG2, 1, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_ahb_ctrl_bus, HHI_GCLK_MPEG2, 2, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_hdmi_intr_sync, HHI_GCLK_MPEG2, 3, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_hdmi_pclk, HHI_GCLK_MPEG2, 4, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_usb1_ddr_bridge, HHI_GCLK_MPEG2, 8, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_usb0_ddr_bridge, HHI_GCLK_MPEG2, 9, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_mmc_pclk, HHI_GCLK_MPEG2, 11, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_dvin, HHI_GCLK_MPEG2, 12, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_uart2, HHI_GCLK_MPEG2, 15, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_sana, HHI_GCLK_MPEG2, 22, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_vpu_intr, HHI_GCLK_MPEG2, 25, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_sec_ahb_ahb3_bridge, HHI_GCLK_MPEG2, 26, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_clk81_a9, HHI_GCLK_MPEG2, 29, CLK_IGNORE_UNUSED);
+
+static MESON8B_PCLK(meson8b_vclk2_venci0, HHI_GCLK_OTHER, 1, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_vclk2_venci1, HHI_GCLK_OTHER, 2, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_vclk2_vencp0, HHI_GCLK_OTHER, 3, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_vclk2_vencp1, HHI_GCLK_OTHER, 4, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_gclk_venci_int, HHI_GCLK_OTHER, 8, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_gclk_vencp_int, HHI_GCLK_OTHER, 9, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_dac_clk, HHI_GCLK_OTHER, 10, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_aoclk_gate, HHI_GCLK_OTHER, 14, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_iec958_gate, HHI_GCLK_OTHER, 16, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_enc480p, HHI_GCLK_OTHER, 20, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_rng1, HHI_GCLK_OTHER, 21, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_gclk_vencl_int, HHI_GCLK_OTHER, 22, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_vclk2_venclmcc, HHI_GCLK_OTHER, 24, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_vclk2_vencl, HHI_GCLK_OTHER, 25, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_vclk2_other, HHI_GCLK_OTHER, 26, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_edp, HHI_GCLK_OTHER, 31, CLK_IGNORE_UNUSED);
/* AIU gates */
-#define MESON_AIU_GLUE_GATE(_name, _reg, _bit) \
- MESON_PCLK(_name, _reg, _bit, &meson8b_aiu_glue.hw)
-
-static MESON_PCLK(meson8b_aiu_glue, HHI_GCLK_MPEG1, 6, &meson8b_aiu.hw);
-static MESON_AIU_GLUE_GATE(meson8b_iec958, HHI_GCLK_MPEG1, 7);
-static MESON_AIU_GLUE_GATE(meson8b_i2s_out, HHI_GCLK_MPEG1, 8);
-static MESON_AIU_GLUE_GATE(meson8b_amclk, HHI_GCLK_MPEG1, 9);
-static MESON_AIU_GLUE_GATE(meson8b_aififo2, HHI_GCLK_MPEG1, 10);
-static MESON_AIU_GLUE_GATE(meson8b_mixer, HHI_GCLK_MPEG1, 11);
-static MESON_AIU_GLUE_GATE(meson8b_mixer_iface, HHI_GCLK_MPEG1, 12);
-static MESON_AIU_GLUE_GATE(meson8b_adc, HHI_GCLK_MPEG1, 13);
+static const struct clk_parent_data meson8b_aiu_glue_parents = { .hw = &meson8b_aiu.hw };
+static MESON_PCLK(meson8b_aiu_glue, HHI_GCLK_MPEG1, 6,
+ &meson8b_aiu_glue_parents, CLK_IGNORE_UNUSED);
+
+static const struct clk_parent_data meson8b_aiu_pclk_parents = { .hw = &meson8b_aiu_glue.hw };
+#define MESON8B_AIU_PCLK(_name, _bit, _flags) \
+ MESON_PCLK(_name, HHI_GCLK_MPEG1, _bit, &meson8b_aiu_pclk_parents, _flags)
+
+static MESON8B_AIU_PCLK(meson8b_iec958, 7, CLK_IGNORE_UNUSED);
+static MESON8B_AIU_PCLK(meson8b_i2s_out, 8, CLK_IGNORE_UNUSED);
+static MESON8B_AIU_PCLK(meson8b_amclk, 9, CLK_IGNORE_UNUSED);
+static MESON8B_AIU_PCLK(meson8b_aififo2, 10, CLK_IGNORE_UNUSED);
+static MESON8B_AIU_PCLK(meson8b_mixer, 11, CLK_IGNORE_UNUSED);
+static MESON8B_AIU_PCLK(meson8b_mixer_iface, 12, CLK_IGNORE_UNUSED);
+static MESON8B_AIU_PCLK(meson8b_adc, 13, CLK_IGNORE_UNUSED);
/* Always On (AO) domain gates */
-static MESON_GATE(meson8b_ao_media_cpu, HHI_GCLK_AO, 0);
-static MESON_GATE(meson8b_ao_ahb_sram, HHI_GCLK_AO, 1);
-static MESON_GATE(meson8b_ao_ahb_bus, HHI_GCLK_AO, 2);
-static MESON_GATE(meson8b_ao_iface, HHI_GCLK_AO, 3);
+static MESON8B_PCLK(meson8b_ao_media_cpu, HHI_GCLK_AO, 0, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_ao_ahb_sram, HHI_GCLK_AO, 1, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_ao_ahb_bus, HHI_GCLK_AO, 2, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_ao_iface, HHI_GCLK_AO, 3, CLK_IGNORE_UNUSED);
static struct clk_hw *meson8_hw_clks[] = {
- [CLKID_PLL_FIXED] = &meson8b_fixed_pll.hw,
- [CLKID_PLL_VID] = &meson8b_vid_pll.hw,
- [CLKID_PLL_SYS] = &meson8b_sys_pll.hw,
- [CLKID_FCLK_DIV2] = &meson8b_fclk_div2.hw,
- [CLKID_FCLK_DIV3] = &meson8b_fclk_div3.hw,
- [CLKID_FCLK_DIV4] = &meson8b_fclk_div4.hw,
- [CLKID_FCLK_DIV5] = &meson8b_fclk_div5.hw,
- [CLKID_FCLK_DIV7] = &meson8b_fclk_div7.hw,
- [CLKID_CPUCLK] = &meson8b_cpu_clk.hw,
- [CLKID_MPEG_SEL] = &meson8b_mpeg_clk_sel.hw,
- [CLKID_MPEG_DIV] = &meson8b_mpeg_clk_div.hw,
- [CLKID_CLK81] = &meson8b_clk81.hw,
+ [CLKID_PLL_FIXED] = &meson8b_fixed_pll.hw,
+ [CLKID_PLL_VID] = &meson8b_vid_pll.hw,
+ [CLKID_PLL_SYS] = &meson8b_sys_pll.hw,
+ [CLKID_FCLK_DIV2] = &meson8b_fclk_div2.hw,
+ [CLKID_FCLK_DIV3] = &meson8b_fclk_div3.hw,
+ [CLKID_FCLK_DIV4] = &meson8b_fclk_div4.hw,
+ [CLKID_FCLK_DIV5] = &meson8b_fclk_div5.hw,
+ [CLKID_FCLK_DIV7] = &meson8b_fclk_div7.hw,
+ [CLKID_CPUCLK] = &meson8b_cpu_clk.hw,
+ [CLKID_MPEG_SEL] = &meson8b_clk81_sel.hw,
+ [CLKID_MPEG_DIV] = &meson8b_clk81_div.hw,
+ [CLKID_CLK81] = &meson8b_clk81.hw,
[CLKID_DDR] = &meson8b_ddr.hw,
[CLKID_DOS] = &meson8b_dos.hw,
[CLKID_ISA] = &meson8b_isa.hw,
@@ -2885,7 +2919,7 @@ static struct clk_hw *meson8_hw_clks[] = {
[CLKID_FCLK_DIV7_DIV] = &meson8b_fclk_div7_div.hw,
[CLKID_NAND_SEL] = &meson8b_nand_clk_sel.hw,
[CLKID_NAND_DIV] = &meson8b_nand_clk_div.hw,
- [CLKID_NAND_CLK] = &meson8b_nand_clk_gate.hw,
+ [CLKID_NAND_CLK] = &meson8b_nand_clk.hw,
[CLKID_PLL_FIXED_DCO] = &meson8b_fixed_pll_dco.hw,
[CLKID_HDMI_PLL_DCO] = &meson8b_hdmi_pll_dco.hw,
[CLKID_PLL_SYS_DCO] = &meson8b_sys_pll_dco.hw,
@@ -2896,14 +2930,14 @@ static struct clk_hw *meson8_hw_clks[] = {
[CLKID_CPU_CLK_DIV6] = &meson8b_cpu_clk_div6.hw,
[CLKID_CPU_CLK_DIV7] = &meson8b_cpu_clk_div7.hw,
[CLKID_CPU_CLK_DIV8] = &meson8b_cpu_clk_div8.hw,
- [CLKID_APB_SEL] = &meson8b_apb_clk_sel.hw,
- [CLKID_APB] = &meson8b_apb_clk_gate.hw,
- [CLKID_PERIPH_SEL] = &meson8b_periph_clk_sel.hw,
- [CLKID_PERIPH] = &meson8b_periph_clk_gate.hw,
- [CLKID_AXI_SEL] = &meson8b_axi_clk_sel.hw,
- [CLKID_AXI] = &meson8b_axi_clk_gate.hw,
- [CLKID_L2_DRAM_SEL] = &meson8b_l2_dram_clk_sel.hw,
- [CLKID_L2_DRAM] = &meson8b_l2_dram_clk_gate.hw,
+ [CLKID_APB_SEL] = &meson8b_apb_sel.hw,
+ [CLKID_APB] = &meson8b_apb.hw,
+ [CLKID_PERIPH_SEL] = &meson8b_periph_sel.hw,
+ [CLKID_PERIPH] = &meson8b_periph.hw,
+ [CLKID_AXI_SEL] = &meson8b_axi_sel.hw,
+ [CLKID_AXI] = &meson8b_axi.hw,
+ [CLKID_L2_DRAM_SEL] = &meson8b_l2_dram_sel.hw,
+ [CLKID_L2_DRAM] = &meson8b_l2_dram.hw,
[CLKID_HDMI_PLL_LVDS_OUT] = &meson8b_hdmi_pll_lvds_out.hw,
[CLKID_HDMI_PLL_HDMI_OUT] = &meson8b_hdmi_pll_hdmi_out.hw,
[CLKID_VID_PLL_IN_SEL] = &meson8b_vid_pll_in_sel.hw,
@@ -2914,27 +2948,27 @@ static struct clk_hw *meson8_hw_clks[] = {
[CLKID_VCLK_IN_SEL] = &meson8b_vclk_in_sel.hw,
[CLKID_VCLK_IN_EN] = &meson8b_vclk_in_en.hw,
[CLKID_VCLK_EN] = &meson8b_vclk_en.hw,
- [CLKID_VCLK_DIV1] = &meson8b_vclk_div1_gate.hw,
+ [CLKID_VCLK_DIV1] = &meson8b_vclk_div1.hw,
[CLKID_VCLK_DIV2_DIV] = &meson8b_vclk_div2_div.hw,
- [CLKID_VCLK_DIV2] = &meson8b_vclk_div2_div_gate.hw,
+ [CLKID_VCLK_DIV2] = &meson8b_vclk_div2.hw,
[CLKID_VCLK_DIV4_DIV] = &meson8b_vclk_div4_div.hw,
- [CLKID_VCLK_DIV4] = &meson8b_vclk_div4_div_gate.hw,
+ [CLKID_VCLK_DIV4] = &meson8b_vclk_div4.hw,
[CLKID_VCLK_DIV6_DIV] = &meson8b_vclk_div6_div.hw,
- [CLKID_VCLK_DIV6] = &meson8b_vclk_div6_div_gate.hw,
+ [CLKID_VCLK_DIV6] = &meson8b_vclk_div6.hw,
[CLKID_VCLK_DIV12_DIV] = &meson8b_vclk_div12_div.hw,
- [CLKID_VCLK_DIV12] = &meson8b_vclk_div12_div_gate.hw,
+ [CLKID_VCLK_DIV12] = &meson8b_vclk_div12.hw,
[CLKID_VCLK2_IN_SEL] = &meson8b_vclk2_in_sel.hw,
- [CLKID_VCLK2_IN_EN] = &meson8b_vclk2_clk_in_en.hw,
- [CLKID_VCLK2_EN] = &meson8b_vclk2_clk_en.hw,
- [CLKID_VCLK2_DIV1] = &meson8b_vclk2_div1_gate.hw,
+ [CLKID_VCLK2_IN_EN] = &meson8b_vclk2_in_en.hw,
+ [CLKID_VCLK2_EN] = &meson8b_vclk2_en.hw,
+ [CLKID_VCLK2_DIV1] = &meson8b_vclk2_div1.hw,
[CLKID_VCLK2_DIV2_DIV] = &meson8b_vclk2_div2_div.hw,
- [CLKID_VCLK2_DIV2] = &meson8b_vclk2_div2_div_gate.hw,
+ [CLKID_VCLK2_DIV2] = &meson8b_vclk2_div2.hw,
[CLKID_VCLK2_DIV4_DIV] = &meson8b_vclk2_div4_div.hw,
- [CLKID_VCLK2_DIV4] = &meson8b_vclk2_div4_div_gate.hw,
+ [CLKID_VCLK2_DIV4] = &meson8b_vclk2_div4.hw,
[CLKID_VCLK2_DIV6_DIV] = &meson8b_vclk2_div6_div.hw,
- [CLKID_VCLK2_DIV6] = &meson8b_vclk2_div6_div_gate.hw,
+ [CLKID_VCLK2_DIV6] = &meson8b_vclk2_div6.hw,
[CLKID_VCLK2_DIV12_DIV] = &meson8b_vclk2_div12_div.hw,
- [CLKID_VCLK2_DIV12] = &meson8b_vclk2_div12_div_gate.hw,
+ [CLKID_VCLK2_DIV12] = &meson8b_vclk2_div12.hw,
[CLKID_CTS_ENCT_SEL] = &meson8b_cts_enct_sel.hw,
[CLKID_CTS_ENCT] = &meson8b_cts_enct.hw,
[CLKID_CTS_ENCP_SEL] = &meson8b_cts_encp_sel.hw,
@@ -2981,18 +3015,18 @@ static struct clk_hw *meson8_hw_clks[] = {
};
static struct clk_hw *meson8b_hw_clks[] = {
- [CLKID_PLL_FIXED] = &meson8b_fixed_pll.hw,
- [CLKID_PLL_VID] = &meson8b_vid_pll.hw,
- [CLKID_PLL_SYS] = &meson8b_sys_pll.hw,
- [CLKID_FCLK_DIV2] = &meson8b_fclk_div2.hw,
- [CLKID_FCLK_DIV3] = &meson8b_fclk_div3.hw,
- [CLKID_FCLK_DIV4] = &meson8b_fclk_div4.hw,
- [CLKID_FCLK_DIV5] = &meson8b_fclk_div5.hw,
- [CLKID_FCLK_DIV7] = &meson8b_fclk_div7.hw,
- [CLKID_CPUCLK] = &meson8b_cpu_clk.hw,
- [CLKID_MPEG_SEL] = &meson8b_mpeg_clk_sel.hw,
- [CLKID_MPEG_DIV] = &meson8b_mpeg_clk_div.hw,
- [CLKID_CLK81] = &meson8b_clk81.hw,
+ [CLKID_PLL_FIXED] = &meson8b_fixed_pll.hw,
+ [CLKID_PLL_VID] = &meson8b_vid_pll.hw,
+ [CLKID_PLL_SYS] = &meson8b_sys_pll.hw,
+ [CLKID_FCLK_DIV2] = &meson8b_fclk_div2.hw,
+ [CLKID_FCLK_DIV3] = &meson8b_fclk_div3.hw,
+ [CLKID_FCLK_DIV4] = &meson8b_fclk_div4.hw,
+ [CLKID_FCLK_DIV5] = &meson8b_fclk_div5.hw,
+ [CLKID_FCLK_DIV7] = &meson8b_fclk_div7.hw,
+ [CLKID_CPUCLK] = &meson8b_cpu_clk.hw,
+ [CLKID_MPEG_SEL] = &meson8b_clk81_sel.hw,
+ [CLKID_MPEG_DIV] = &meson8b_clk81_div.hw,
+ [CLKID_CLK81] = &meson8b_clk81.hw,
[CLKID_DDR] = &meson8b_ddr.hw,
[CLKID_DOS] = &meson8b_dos.hw,
[CLKID_ISA] = &meson8b_isa.hw,
@@ -3089,7 +3123,7 @@ static struct clk_hw *meson8b_hw_clks[] = {
[CLKID_FCLK_DIV7_DIV] = &meson8b_fclk_div7_div.hw,
[CLKID_NAND_SEL] = &meson8b_nand_clk_sel.hw,
[CLKID_NAND_DIV] = &meson8b_nand_clk_div.hw,
- [CLKID_NAND_CLK] = &meson8b_nand_clk_gate.hw,
+ [CLKID_NAND_CLK] = &meson8b_nand_clk.hw,
[CLKID_PLL_FIXED_DCO] = &meson8b_fixed_pll_dco.hw,
[CLKID_HDMI_PLL_DCO] = &meson8b_hdmi_pll_dco.hw,
[CLKID_PLL_SYS_DCO] = &meson8b_sys_pll_dco.hw,
@@ -3100,14 +3134,14 @@ static struct clk_hw *meson8b_hw_clks[] = {
[CLKID_CPU_CLK_DIV6] = &meson8b_cpu_clk_div6.hw,
[CLKID_CPU_CLK_DIV7] = &meson8b_cpu_clk_div7.hw,
[CLKID_CPU_CLK_DIV8] = &meson8b_cpu_clk_div8.hw,
- [CLKID_APB_SEL] = &meson8b_apb_clk_sel.hw,
- [CLKID_APB] = &meson8b_apb_clk_gate.hw,
- [CLKID_PERIPH_SEL] = &meson8b_periph_clk_sel.hw,
- [CLKID_PERIPH] = &meson8b_periph_clk_gate.hw,
- [CLKID_AXI_SEL] = &meson8b_axi_clk_sel.hw,
- [CLKID_AXI] = &meson8b_axi_clk_gate.hw,
- [CLKID_L2_DRAM_SEL] = &meson8b_l2_dram_clk_sel.hw,
- [CLKID_L2_DRAM] = &meson8b_l2_dram_clk_gate.hw,
+ [CLKID_APB_SEL] = &meson8b_apb_sel.hw,
+ [CLKID_APB] = &meson8b_apb.hw,
+ [CLKID_PERIPH_SEL] = &meson8b_periph_sel.hw,
+ [CLKID_PERIPH] = &meson8b_periph.hw,
+ [CLKID_AXI_SEL] = &meson8b_axi_sel.hw,
+ [CLKID_AXI] = &meson8b_axi.hw,
+ [CLKID_L2_DRAM_SEL] = &meson8b_l2_dram_sel.hw,
+ [CLKID_L2_DRAM] = &meson8b_l2_dram.hw,
[CLKID_HDMI_PLL_LVDS_OUT] = &meson8b_hdmi_pll_lvds_out.hw,
[CLKID_HDMI_PLL_HDMI_OUT] = &meson8b_hdmi_pll_hdmi_out.hw,
[CLKID_VID_PLL_IN_SEL] = &meson8b_vid_pll_in_sel.hw,
@@ -3118,27 +3152,27 @@ static struct clk_hw *meson8b_hw_clks[] = {
[CLKID_VCLK_IN_SEL] = &meson8b_vclk_in_sel.hw,
[CLKID_VCLK_IN_EN] = &meson8b_vclk_in_en.hw,
[CLKID_VCLK_EN] = &meson8b_vclk_en.hw,
- [CLKID_VCLK_DIV1] = &meson8b_vclk_div1_gate.hw,
+ [CLKID_VCLK_DIV1] = &meson8b_vclk_div1.hw,
[CLKID_VCLK_DIV2_DIV] = &meson8b_vclk_div2_div.hw,
- [CLKID_VCLK_DIV2] = &meson8b_vclk_div2_div_gate.hw,
+ [CLKID_VCLK_DIV2] = &meson8b_vclk_div2.hw,
[CLKID_VCLK_DIV4_DIV] = &meson8b_vclk_div4_div.hw,
- [CLKID_VCLK_DIV4] = &meson8b_vclk_div4_div_gate.hw,
+ [CLKID_VCLK_DIV4] = &meson8b_vclk_div4.hw,
[CLKID_VCLK_DIV6_DIV] = &meson8b_vclk_div6_div.hw,
- [CLKID_VCLK_DIV6] = &meson8b_vclk_div6_div_gate.hw,
+ [CLKID_VCLK_DIV6] = &meson8b_vclk_div6.hw,
[CLKID_VCLK_DIV12_DIV] = &meson8b_vclk_div12_div.hw,
- [CLKID_VCLK_DIV12] = &meson8b_vclk_div12_div_gate.hw,
+ [CLKID_VCLK_DIV12] = &meson8b_vclk_div12.hw,
[CLKID_VCLK2_IN_SEL] = &meson8b_vclk2_in_sel.hw,
- [CLKID_VCLK2_IN_EN] = &meson8b_vclk2_clk_in_en.hw,
- [CLKID_VCLK2_EN] = &meson8b_vclk2_clk_en.hw,
- [CLKID_VCLK2_DIV1] = &meson8b_vclk2_div1_gate.hw,
+ [CLKID_VCLK2_IN_EN] = &meson8b_vclk2_in_en.hw,
+ [CLKID_VCLK2_EN] = &meson8b_vclk2_en.hw,
+ [CLKID_VCLK2_DIV1] = &meson8b_vclk2_div1.hw,
[CLKID_VCLK2_DIV2_DIV] = &meson8b_vclk2_div2_div.hw,
- [CLKID_VCLK2_DIV2] = &meson8b_vclk2_div2_div_gate.hw,
+ [CLKID_VCLK2_DIV2] = &meson8b_vclk2_div2.hw,
[CLKID_VCLK2_DIV4_DIV] = &meson8b_vclk2_div4_div.hw,
- [CLKID_VCLK2_DIV4] = &meson8b_vclk2_div4_div_gate.hw,
+ [CLKID_VCLK2_DIV4] = &meson8b_vclk2_div4.hw,
[CLKID_VCLK2_DIV6_DIV] = &meson8b_vclk2_div6_div.hw,
- [CLKID_VCLK2_DIV6] = &meson8b_vclk2_div6_div_gate.hw,
+ [CLKID_VCLK2_DIV6] = &meson8b_vclk2_div6.hw,
[CLKID_VCLK2_DIV12_DIV] = &meson8b_vclk2_div12_div.hw,
- [CLKID_VCLK2_DIV12] = &meson8b_vclk2_div12_div_gate.hw,
+ [CLKID_VCLK2_DIV12] = &meson8b_vclk2_div12.hw,
[CLKID_CTS_ENCT_SEL] = &meson8b_cts_enct_sel.hw,
[CLKID_CTS_ENCT] = &meson8b_cts_enct.hw,
[CLKID_CTS_ENCP_SEL] = &meson8b_cts_encp_sel.hw,
@@ -3196,18 +3230,18 @@ static struct clk_hw *meson8b_hw_clks[] = {
};
static struct clk_hw *meson8m2_hw_clks[] = {
- [CLKID_PLL_FIXED] = &meson8b_fixed_pll.hw,
- [CLKID_PLL_VID] = &meson8b_vid_pll.hw,
- [CLKID_PLL_SYS] = &meson8b_sys_pll.hw,
- [CLKID_FCLK_DIV2] = &meson8b_fclk_div2.hw,
- [CLKID_FCLK_DIV3] = &meson8b_fclk_div3.hw,
- [CLKID_FCLK_DIV4] = &meson8b_fclk_div4.hw,
- [CLKID_FCLK_DIV5] = &meson8b_fclk_div5.hw,
- [CLKID_FCLK_DIV7] = &meson8b_fclk_div7.hw,
- [CLKID_CPUCLK] = &meson8b_cpu_clk.hw,
- [CLKID_MPEG_SEL] = &meson8b_mpeg_clk_sel.hw,
- [CLKID_MPEG_DIV] = &meson8b_mpeg_clk_div.hw,
- [CLKID_CLK81] = &meson8b_clk81.hw,
+ [CLKID_PLL_FIXED] = &meson8b_fixed_pll.hw,
+ [CLKID_PLL_VID] = &meson8b_vid_pll.hw,
+ [CLKID_PLL_SYS] = &meson8b_sys_pll.hw,
+ [CLKID_FCLK_DIV2] = &meson8b_fclk_div2.hw,
+ [CLKID_FCLK_DIV3] = &meson8b_fclk_div3.hw,
+ [CLKID_FCLK_DIV4] = &meson8b_fclk_div4.hw,
+ [CLKID_FCLK_DIV5] = &meson8b_fclk_div5.hw,
+ [CLKID_FCLK_DIV7] = &meson8b_fclk_div7.hw,
+ [CLKID_CPUCLK] = &meson8b_cpu_clk.hw,
+ [CLKID_MPEG_SEL] = &meson8b_clk81_sel.hw,
+ [CLKID_MPEG_DIV] = &meson8b_clk81_div.hw,
+ [CLKID_CLK81] = &meson8b_clk81.hw,
[CLKID_DDR] = &meson8b_ddr.hw,
[CLKID_DOS] = &meson8b_dos.hw,
[CLKID_ISA] = &meson8b_isa.hw,
@@ -3304,7 +3338,7 @@ static struct clk_hw *meson8m2_hw_clks[] = {
[CLKID_FCLK_DIV7_DIV] = &meson8b_fclk_div7_div.hw,
[CLKID_NAND_SEL] = &meson8b_nand_clk_sel.hw,
[CLKID_NAND_DIV] = &meson8b_nand_clk_div.hw,
- [CLKID_NAND_CLK] = &meson8b_nand_clk_gate.hw,
+ [CLKID_NAND_CLK] = &meson8b_nand_clk.hw,
[CLKID_PLL_FIXED_DCO] = &meson8b_fixed_pll_dco.hw,
[CLKID_HDMI_PLL_DCO] = &meson8b_hdmi_pll_dco.hw,
[CLKID_PLL_SYS_DCO] = &meson8b_sys_pll_dco.hw,
@@ -3315,14 +3349,14 @@ static struct clk_hw *meson8m2_hw_clks[] = {
[CLKID_CPU_CLK_DIV6] = &meson8b_cpu_clk_div6.hw,
[CLKID_CPU_CLK_DIV7] = &meson8b_cpu_clk_div7.hw,
[CLKID_CPU_CLK_DIV8] = &meson8b_cpu_clk_div8.hw,
- [CLKID_APB_SEL] = &meson8b_apb_clk_sel.hw,
- [CLKID_APB] = &meson8b_apb_clk_gate.hw,
- [CLKID_PERIPH_SEL] = &meson8b_periph_clk_sel.hw,
- [CLKID_PERIPH] = &meson8b_periph_clk_gate.hw,
- [CLKID_AXI_SEL] = &meson8b_axi_clk_sel.hw,
- [CLKID_AXI] = &meson8b_axi_clk_gate.hw,
- [CLKID_L2_DRAM_SEL] = &meson8b_l2_dram_clk_sel.hw,
- [CLKID_L2_DRAM] = &meson8b_l2_dram_clk_gate.hw,
+ [CLKID_APB_SEL] = &meson8b_apb_sel.hw,
+ [CLKID_APB] = &meson8b_apb.hw,
+ [CLKID_PERIPH_SEL] = &meson8b_periph_sel.hw,
+ [CLKID_PERIPH] = &meson8b_periph.hw,
+ [CLKID_AXI_SEL] = &meson8b_axi_sel.hw,
+ [CLKID_AXI] = &meson8b_axi.hw,
+ [CLKID_L2_DRAM_SEL] = &meson8b_l2_dram_sel.hw,
+ [CLKID_L2_DRAM] = &meson8b_l2_dram.hw,
[CLKID_HDMI_PLL_LVDS_OUT] = &meson8b_hdmi_pll_lvds_out.hw,
[CLKID_HDMI_PLL_HDMI_OUT] = &meson8b_hdmi_pll_hdmi_out.hw,
[CLKID_VID_PLL_IN_SEL] = &meson8b_vid_pll_in_sel.hw,
@@ -3333,27 +3367,27 @@ static struct clk_hw *meson8m2_hw_clks[] = {
[CLKID_VCLK_IN_SEL] = &meson8b_vclk_in_sel.hw,
[CLKID_VCLK_IN_EN] = &meson8b_vclk_in_en.hw,
[CLKID_VCLK_EN] = &meson8b_vclk_en.hw,
- [CLKID_VCLK_DIV1] = &meson8b_vclk_div1_gate.hw,
+ [CLKID_VCLK_DIV1] = &meson8b_vclk_div1.hw,
[CLKID_VCLK_DIV2_DIV] = &meson8b_vclk_div2_div.hw,
- [CLKID_VCLK_DIV2] = &meson8b_vclk_div2_div_gate.hw,
+ [CLKID_VCLK_DIV2] = &meson8b_vclk_div2.hw,
[CLKID_VCLK_DIV4_DIV] = &meson8b_vclk_div4_div.hw,
- [CLKID_VCLK_DIV4] = &meson8b_vclk_div4_div_gate.hw,
+ [CLKID_VCLK_DIV4] = &meson8b_vclk_div4.hw,
[CLKID_VCLK_DIV6_DIV] = &meson8b_vclk_div6_div.hw,
- [CLKID_VCLK_DIV6] = &meson8b_vclk_div6_div_gate.hw,
+ [CLKID_VCLK_DIV6] = &meson8b_vclk_div6.hw,
[CLKID_VCLK_DIV12_DIV] = &meson8b_vclk_div12_div.hw,
- [CLKID_VCLK_DIV12] = &meson8b_vclk_div12_div_gate.hw,
+ [CLKID_VCLK_DIV12] = &meson8b_vclk_div12.hw,
[CLKID_VCLK2_IN_SEL] = &meson8b_vclk2_in_sel.hw,
- [CLKID_VCLK2_IN_EN] = &meson8b_vclk2_clk_in_en.hw,
- [CLKID_VCLK2_EN] = &meson8b_vclk2_clk_en.hw,
- [CLKID_VCLK2_DIV1] = &meson8b_vclk2_div1_gate.hw,
+ [CLKID_VCLK2_IN_EN] = &meson8b_vclk2_in_en.hw,
+ [CLKID_VCLK2_EN] = &meson8b_vclk2_en.hw,
+ [CLKID_VCLK2_DIV1] = &meson8b_vclk2_div1.hw,
[CLKID_VCLK2_DIV2_DIV] = &meson8b_vclk2_div2_div.hw,
- [CLKID_VCLK2_DIV2] = &meson8b_vclk2_div2_div_gate.hw,
+ [CLKID_VCLK2_DIV2] = &meson8b_vclk2_div2.hw,
[CLKID_VCLK2_DIV4_DIV] = &meson8b_vclk2_div4_div.hw,
- [CLKID_VCLK2_DIV4] = &meson8b_vclk2_div4_div_gate.hw,
+ [CLKID_VCLK2_DIV4] = &meson8b_vclk2_div4.hw,
[CLKID_VCLK2_DIV6_DIV] = &meson8b_vclk2_div6_div.hw,
- [CLKID_VCLK2_DIV6] = &meson8b_vclk2_div6_div_gate.hw,
+ [CLKID_VCLK2_DIV6] = &meson8b_vclk2_div6.hw,
[CLKID_VCLK2_DIV12_DIV] = &meson8b_vclk2_div12_div.hw,
- [CLKID_VCLK2_DIV12] = &meson8b_vclk2_div12_div_gate.hw,
+ [CLKID_VCLK2_DIV12] = &meson8b_vclk2_div12.hw,
[CLKID_CTS_ENCT_SEL] = &meson8b_cts_enct_sel.hw,
[CLKID_CTS_ENCT] = &meson8b_cts_enct.hw,
[CLKID_CTS_ENCP_SEL] = &meson8b_cts_encp_sel.hw,
@@ -3412,202 +3446,6 @@ static struct clk_hw *meson8m2_hw_clks[] = {
[CLKID_HDMI_PLL_DCO_IN] = &hdmi_pll_dco_in.hw,
};
-static struct clk_regmap *const meson8b_clk_regmaps[] = {
- &meson8b_clk81,
- &meson8b_ddr,
- &meson8b_dos,
- &meson8b_isa,
- &meson8b_pl301,
- &meson8b_periphs,
- &meson8b_spicc,
- &meson8b_i2c,
- &meson8b_sar_adc,
- &meson8b_smart_card,
- &meson8b_rng0,
- &meson8b_uart0,
- &meson8b_sdhc,
- &meson8b_stream,
- &meson8b_async_fifo,
- &meson8b_sdio,
- &meson8b_abuf,
- &meson8b_hiu_iface,
- &meson8b_assist_misc,
- &meson8b_spi,
- &meson8b_i2s_spdif,
- &meson8b_eth,
- &meson8b_demux,
- &meson8b_aiu_glue,
- &meson8b_iec958,
- &meson8b_i2s_out,
- &meson8b_amclk,
- &meson8b_aififo2,
- &meson8b_mixer,
- &meson8b_mixer_iface,
- &meson8b_adc,
- &meson8b_blkmv,
- &meson8b_aiu,
- &meson8b_uart1,
- &meson8b_g2d,
- &meson8b_usb0,
- &meson8b_usb1,
- &meson8b_reset,
- &meson8b_nand,
- &meson8b_dos_parser,
- &meson8b_usb,
- &meson8b_vdin1,
- &meson8b_ahb_arb0,
- &meson8b_efuse,
- &meson8b_boot_rom,
- &meson8b_ahb_data_bus,
- &meson8b_ahb_ctrl_bus,
- &meson8b_hdmi_intr_sync,
- &meson8b_hdmi_pclk,
- &meson8b_usb1_ddr_bridge,
- &meson8b_usb0_ddr_bridge,
- &meson8b_mmc_pclk,
- &meson8b_dvin,
- &meson8b_uart2,
- &meson8b_sana,
- &meson8b_vpu_intr,
- &meson8b_sec_ahb_ahb3_bridge,
- &meson8b_clk81_a9,
- &meson8b_vclk2_venci0,
- &meson8b_vclk2_venci1,
- &meson8b_vclk2_vencp0,
- &meson8b_vclk2_vencp1,
- &meson8b_gclk_venci_int,
- &meson8b_gclk_vencp_int,
- &meson8b_dac_clk,
- &meson8b_aoclk_gate,
- &meson8b_iec958_gate,
- &meson8b_enc480p,
- &meson8b_rng1,
- &meson8b_gclk_vencl_int,
- &meson8b_vclk2_venclmcc,
- &meson8b_vclk2_vencl,
- &meson8b_vclk2_other,
- &meson8b_edp,
- &meson8b_ao_media_cpu,
- &meson8b_ao_ahb_sram,
- &meson8b_ao_ahb_bus,
- &meson8b_ao_iface,
- &meson8b_mpeg_clk_div,
- &meson8b_mpeg_clk_sel,
- &meson8b_mpll0,
- &meson8b_mpll1,
- &meson8b_mpll2,
- &meson8b_mpll0_div,
- &meson8b_mpll1_div,
- &meson8b_mpll2_div,
- &meson8b_fixed_pll,
- &meson8b_sys_pll,
- &meson8b_cpu_in_sel,
- &meson8b_cpu_scale_div,
- &meson8b_cpu_scale_out_sel,
- &meson8b_cpu_clk,
- &meson8b_mpll_prediv,
- &meson8b_fclk_div2,
- &meson8b_fclk_div3,
- &meson8b_fclk_div4,
- &meson8b_fclk_div5,
- &meson8b_fclk_div7,
- &meson8b_nand_clk_sel,
- &meson8b_nand_clk_div,
- &meson8b_nand_clk_gate,
- &meson8b_fixed_pll_dco,
- &meson8b_hdmi_pll_dco,
- &meson8b_sys_pll_dco,
- &meson8b_apb_clk_sel,
- &meson8b_apb_clk_gate,
- &meson8b_periph_clk_sel,
- &meson8b_periph_clk_gate,
- &meson8b_axi_clk_sel,
- &meson8b_axi_clk_gate,
- &meson8b_l2_dram_clk_sel,
- &meson8b_l2_dram_clk_gate,
- &meson8b_hdmi_pll_lvds_out,
- &meson8b_hdmi_pll_hdmi_out,
- &meson8b_vid_pll_in_sel,
- &meson8b_vid_pll_in_en,
- &meson8b_vid_pll_pre_div,
- &meson8b_vid_pll_post_div,
- &meson8b_vid_pll,
- &meson8b_vid_pll_final_div,
- &meson8b_vclk_in_sel,
- &meson8b_vclk_in_en,
- &meson8b_vclk_en,
- &meson8b_vclk_div1_gate,
- &meson8b_vclk_div2_div_gate,
- &meson8b_vclk_div4_div_gate,
- &meson8b_vclk_div6_div_gate,
- &meson8b_vclk_div12_div_gate,
- &meson8b_vclk2_in_sel,
- &meson8b_vclk2_clk_in_en,
- &meson8b_vclk2_clk_en,
- &meson8b_vclk2_div1_gate,
- &meson8b_vclk2_div2_div_gate,
- &meson8b_vclk2_div4_div_gate,
- &meson8b_vclk2_div6_div_gate,
- &meson8b_vclk2_div12_div_gate,
- &meson8b_cts_enct_sel,
- &meson8b_cts_enct,
- &meson8b_cts_encp_sel,
- &meson8b_cts_encp,
- &meson8b_cts_enci_sel,
- &meson8b_cts_enci,
- &meson8b_hdmi_tx_pixel_sel,
- &meson8b_hdmi_tx_pixel,
- &meson8b_cts_encl_sel,
- &meson8b_cts_encl,
- &meson8b_cts_vdac0_sel,
- &meson8b_cts_vdac0,
- &meson8b_hdmi_sys_sel,
- &meson8b_hdmi_sys_div,
- &meson8b_hdmi_sys,
- &meson8b_mali_0_sel,
- &meson8b_mali_0_div,
- &meson8b_mali_0,
- &meson8b_mali_1_sel,
- &meson8b_mali_1_div,
- &meson8b_mali_1,
- &meson8b_mali,
- &meson8m2_gp_pll_dco,
- &meson8m2_gp_pll,
- &meson8b_vpu_0_sel,
- &meson8m2_vpu_0_sel,
- &meson8b_vpu_0_div,
- &meson8b_vpu_0,
- &meson8b_vpu_1_sel,
- &meson8m2_vpu_1_sel,
- &meson8b_vpu_1_div,
- &meson8b_vpu_1,
- &meson8b_vpu,
- &meson8b_vdec_1_sel,
- &meson8b_vdec_1_1_div,
- &meson8b_vdec_1_1,
- &meson8b_vdec_1_2_div,
- &meson8b_vdec_1_2,
- &meson8b_vdec_1,
- &meson8b_vdec_hcodec_sel,
- &meson8b_vdec_hcodec_div,
- &meson8b_vdec_hcodec,
- &meson8b_vdec_2_sel,
- &meson8b_vdec_2_div,
- &meson8b_vdec_2,
- &meson8b_vdec_hevc_sel,
- &meson8b_vdec_hevc_div,
- &meson8b_vdec_hevc_en,
- &meson8b_vdec_hevc,
- &meson8b_cts_amclk,
- &meson8b_cts_amclk_sel,
- &meson8b_cts_amclk_div,
- &meson8b_cts_mclk_i958_sel,
- &meson8b_cts_mclk_i958_div,
- &meson8b_cts_mclk_i958,
- &meson8b_cts_i958,
- &meson8b_vid_pll_lvds_en,
-};
-
static const struct meson8b_clk_reset_line {
u32 reg;
u8 bit_idx;
@@ -3702,7 +3540,6 @@ static int meson8b_clk_reset_update(struct reset_controller_dev *rcdev,
container_of(rcdev, struct meson8b_clk_reset, reset);
const struct meson8b_clk_reset_line *reset;
unsigned int value = 0;
- unsigned long flags;
if (id >= ARRAY_SIZE(meson8b_clk_reset_bits))
return -EINVAL;
@@ -3712,13 +3549,9 @@ static int meson8b_clk_reset_update(struct reset_controller_dev *rcdev,
if (assert != reset->active_low)
value = BIT(reset->bit_idx);
- spin_lock_irqsave(&meson_clk_lock, flags);
-
regmap_update_bits(meson8b_clk_reset->regmap, reset->reg,
BIT(reset->bit_idx), value);
- spin_unlock_irqrestore(&meson_clk_lock, flags);
-
return 0;
}
@@ -3829,10 +3662,6 @@ static void __init meson8b_clkc_init_common(struct device_node *np,
return;
}
- /* Populate regmap for the regmap backed clocks */
- for (i = 0; i < ARRAY_SIZE(meson8b_clk_regmaps); i++)
- meson8b_clk_regmaps[i]->map = map;
-
/*
* register all clks and start with the first used ID (which is
* CLKID_PLL_FIXED)
diff --git a/drivers/clk/meson/meson8b.h b/drivers/clk/meson/meson8b.h
deleted file mode 100644
index a5b6e67eeefb..000000000000
--- a/drivers/clk/meson/meson8b.h
+++ /dev/null
@@ -1,80 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (c) 2015 Endless Mobile, Inc.
- * Author: Carlo Caione <carlo@endlessm.com>
- *
- * Copyright (c) 2016 BayLibre, Inc.
- * Michael Turquette <mturquette@baylibre.com>
- */
-
-#ifndef __MESON8B_H
-#define __MESON8B_H
-
-/*
- * Clock controller register offsets
- *
- * Register offsets from the HardKernel[0] data sheet are listed in comment
- * blocks below. Those offsets must be multiplied by 4 before adding them to
- * the base address to get the right value
- *
- * [0] https://dn.odroid.com/S805/Datasheet/S805_Datasheet%20V0.8%2020150126.pdf
- */
-#define HHI_GP_PLL_CNTL 0x40 /* 0x10 offset in data sheet */
-#define HHI_GP_PLL_CNTL2 0x44 /* 0x11 offset in data sheet */
-#define HHI_GP_PLL_CNTL3 0x48 /* 0x12 offset in data sheet */
-#define HHI_GP_PLL_CNTL4 0x4C /* 0x13 offset in data sheet */
-#define HHI_GP_PLL_CNTL5 0x50 /* 0x14 offset in data sheet */
-#define HHI_VIID_CLK_DIV 0x128 /* 0x4a offset in data sheet */
-#define HHI_VIID_CLK_CNTL 0x12c /* 0x4b offset in data sheet */
-#define HHI_GCLK_MPEG0 0x140 /* 0x50 offset in data sheet */
-#define HHI_GCLK_MPEG1 0x144 /* 0x51 offset in data sheet */
-#define HHI_GCLK_MPEG2 0x148 /* 0x52 offset in data sheet */
-#define HHI_GCLK_OTHER 0x150 /* 0x54 offset in data sheet */
-#define HHI_GCLK_AO 0x154 /* 0x55 offset in data sheet */
-#define HHI_SYS_CPU_CLK_CNTL1 0x15c /* 0x57 offset in data sheet */
-#define HHI_VID_CLK_DIV 0x164 /* 0x59 offset in data sheet */
-#define HHI_MPEG_CLK_CNTL 0x174 /* 0x5d offset in data sheet */
-#define HHI_AUD_CLK_CNTL 0x178 /* 0x5e offset in data sheet */
-#define HHI_VID_CLK_CNTL 0x17c /* 0x5f offset in data sheet */
-#define HHI_AUD_CLK_CNTL2 0x190 /* 0x64 offset in data sheet */
-#define HHI_VID_CLK_CNTL2 0x194 /* 0x65 offset in data sheet */
-#define HHI_VID_DIVIDER_CNTL 0x198 /* 0x66 offset in data sheet */
-#define HHI_SYS_CPU_CLK_CNTL0 0x19c /* 0x67 offset in data sheet */
-#define HHI_MALI_CLK_CNTL 0x1b0 /* 0x6c offset in data sheet */
-#define HHI_VPU_CLK_CNTL 0x1bc /* 0x6f offset in data sheet */
-#define HHI_HDMI_CLK_CNTL 0x1cc /* 0x73 offset in data sheet */
-#define HHI_VDEC_CLK_CNTL 0x1e0 /* 0x78 offset in data sheet */
-#define HHI_VDEC2_CLK_CNTL 0x1e4 /* 0x79 offset in data sheet */
-#define HHI_VDEC3_CLK_CNTL 0x1e8 /* 0x7a offset in data sheet */
-#define HHI_NAND_CLK_CNTL 0x25c /* 0x97 offset in data sheet */
-#define HHI_MPLL_CNTL 0x280 /* 0xa0 offset in data sheet */
-#define HHI_SYS_PLL_CNTL 0x300 /* 0xc0 offset in data sheet */
-#define HHI_VID_PLL_CNTL 0x320 /* 0xc8 offset in data sheet */
-#define HHI_VID_PLL_CNTL2 0x324 /* 0xc9 offset in data sheet */
-#define HHI_VID_PLL_CNTL3 0x328 /* 0xca offset in data sheet */
-#define HHI_VID_PLL_CNTL4 0x32c /* 0xcb offset in data sheet */
-#define HHI_VID_PLL_CNTL5 0x330 /* 0xcc offset in data sheet */
-#define HHI_VID_PLL_CNTL6 0x334 /* 0xcd offset in data sheet */
-#define HHI_VID2_PLL_CNTL 0x380 /* 0xe0 offset in data sheet */
-#define HHI_VID2_PLL_CNTL2 0x384 /* 0xe1 offset in data sheet */
-#define HHI_VID2_PLL_CNTL3 0x388 /* 0xe2 offset in data sheet */
-#define HHI_VID2_PLL_CNTL4 0x38c /* 0xe3 offset in data sheet */
-#define HHI_VID2_PLL_CNTL5 0x390 /* 0xe4 offset in data sheet */
-#define HHI_VID2_PLL_CNTL6 0x394 /* 0xe5 offset in data sheet */
-
-/*
- * MPLL register offeset taken from the S905 datasheet. Vendor kernel source
- * confirm these are the same for the S805.
- */
-#define HHI_MPLL_CNTL 0x280 /* 0xa0 offset in data sheet */
-#define HHI_MPLL_CNTL2 0x284 /* 0xa1 offset in data sheet */
-#define HHI_MPLL_CNTL3 0x288 /* 0xa2 offset in data sheet */
-#define HHI_MPLL_CNTL4 0x28C /* 0xa3 offset in data sheet */
-#define HHI_MPLL_CNTL5 0x290 /* 0xa4 offset in data sheet */
-#define HHI_MPLL_CNTL6 0x294 /* 0xa5 offset in data sheet */
-#define HHI_MPLL_CNTL7 0x298 /* 0xa6 offset in data sheet */
-#define HHI_MPLL_CNTL8 0x29C /* 0xa7 offset in data sheet */
-#define HHI_MPLL_CNTL9 0x2A0 /* 0xa8 offset in data sheet */
-#define HHI_MPLL_CNTL10 0x2A4 /* 0xa9 offset in data sheet */
-
-#endif /* __MESON8B_H */
diff --git a/drivers/clk/meson/s4-peripherals.c b/drivers/clk/meson/s4-peripherals.c
index c930cf0614a0..6d69b132d1e1 100644
--- a/drivers/clk/meson/s4-peripherals.c
+++ b/drivers/clk/meson/s4-peripherals.c
@@ -13,10 +13,64 @@
#include "clk-regmap.h"
#include "vid-pll-div.h"
#include "clk-dualdiv.h"
-#include "s4-peripherals.h"
#include "meson-clkc-utils.h"
#include <dt-bindings/clock/amlogic,s4-peripherals-clkc.h>
+#define CLKCTRL_RTC_BY_OSCIN_CTRL0 0x008
+#define CLKCTRL_RTC_BY_OSCIN_CTRL1 0x00c
+#define CLKCTRL_RTC_CTRL 0x010
+#define CLKCTRL_SYS_CLK_CTRL0 0x040
+#define CLKCTRL_SYS_CLK_EN0_REG0 0x044
+#define CLKCTRL_SYS_CLK_EN0_REG1 0x048
+#define CLKCTRL_SYS_CLK_EN0_REG2 0x04c
+#define CLKCTRL_SYS_CLK_EN0_REG3 0x050
+#define CLKCTRL_CECA_CTRL0 0x088
+#define CLKCTRL_CECA_CTRL1 0x08c
+#define CLKCTRL_CECB_CTRL0 0x090
+#define CLKCTRL_CECB_CTRL1 0x094
+#define CLKCTRL_SC_CLK_CTRL 0x098
+#define CLKCTRL_CLK12_24_CTRL 0x0a8
+#define CLKCTRL_VID_CLK_CTRL 0x0c0
+#define CLKCTRL_VID_CLK_CTRL2 0x0c4
+#define CLKCTRL_VID_CLK_DIV 0x0c8
+#define CLKCTRL_VIID_CLK_DIV 0x0cc
+#define CLKCTRL_VIID_CLK_CTRL 0x0d0
+#define CLKCTRL_HDMI_CLK_CTRL 0x0e0
+#define CLKCTRL_VID_PLL_CLK_DIV 0x0e4
+#define CLKCTRL_VPU_CLK_CTRL 0x0e8
+#define CLKCTRL_VPU_CLKB_CTRL 0x0ec
+#define CLKCTRL_VPU_CLKC_CTRL 0x0f0
+#define CLKCTRL_VID_LOCK_CLK_CTRL 0x0f4
+#define CLKCTRL_VDIN_MEAS_CLK_CTRL 0x0f8
+#define CLKCTRL_VAPBCLK_CTRL 0x0fc
+#define CLKCTRL_HDCP22_CTRL 0x100
+#define CLKCTRL_VDEC_CLK_CTRL 0x140
+#define CLKCTRL_VDEC2_CLK_CTRL 0x144
+#define CLKCTRL_VDEC3_CLK_CTRL 0x148
+#define CLKCTRL_VDEC4_CLK_CTRL 0x14c
+#define CLKCTRL_TS_CLK_CTRL 0x158
+#define CLKCTRL_MALI_CLK_CTRL 0x15c
+#define CLKCTRL_NAND_CLK_CTRL 0x168
+#define CLKCTRL_SD_EMMC_CLK_CTRL 0x16c
+#define CLKCTRL_SPICC_CLK_CTRL 0x174
+#define CLKCTRL_GEN_CLK_CTRL 0x178
+#define CLKCTRL_SAR_CLK_CTRL 0x17c
+#define CLKCTRL_PWM_CLK_AB_CTRL 0x180
+#define CLKCTRL_PWM_CLK_CD_CTRL 0x184
+#define CLKCTRL_PWM_CLK_EF_CTRL 0x188
+#define CLKCTRL_PWM_CLK_GH_CTRL 0x18c
+#define CLKCTRL_PWM_CLK_IJ_CTRL 0x190
+#define CLKCTRL_DEMOD_CLK_CTRL 0x200
+
+#define S4_COMP_SEL(_name, _reg, _shift, _mask, _pdata) \
+ MESON_COMP_SEL(s4_, _name, _reg, _shift, _mask, _pdata, NULL, 0, 0)
+
+#define S4_COMP_DIV(_name, _reg, _shift, _width) \
+ MESON_COMP_DIV(s4_, _name, _reg, _shift, _width, 0, CLK_SET_RATE_PARENT)
+
+#define S4_COMP_GATE(_name, _reg, _bit) \
+ MESON_COMP_GATE(s4_, _name, _reg, _bit, CLK_SET_RATE_PARENT)
+
static struct clk_regmap s4_rtc_32k_by_oscin_clkin = {
.data = &(struct clk_regmap_gate_data){
.offset = CLKCTRL_RTC_BY_OSCIN_CTRL0,
@@ -137,8 +191,8 @@ static struct clk_regmap s4_rtc_clk = {
};
/* The index 5 is AXI_CLK, which is dedicated to AXI. So skip it. */
-static u32 mux_table_sys_ab_clk_sel[] = { 0, 1, 2, 3, 4, 6, 7 };
-static const struct clk_parent_data sys_ab_clk_parent_data[] = {
+static u32 s4_sysclk_parents_val_table[] = { 0, 1, 2, 3, 4, 6, 7 };
+static const struct clk_parent_data s4_sysclk_parents[] = {
{ .fw_name = "xtal" },
{ .fw_name = "fclk_div2" },
{ .fw_name = "fclk_div3" },
@@ -160,13 +214,13 @@ static struct clk_regmap s4_sysclk_b_sel = {
.offset = CLKCTRL_SYS_CLK_CTRL0,
.mask = 0x7,
.shift = 26,
- .table = mux_table_sys_ab_clk_sel,
+ .table = s4_sysclk_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "sysclk_b_sel",
.ops = &clk_regmap_mux_ro_ops,
- .parent_data = sys_ab_clk_parent_data,
- .num_parents = ARRAY_SIZE(sys_ab_clk_parent_data),
+ .parent_data = s4_sysclk_parents,
+ .num_parents = ARRAY_SIZE(s4_sysclk_parents),
},
};
@@ -206,13 +260,13 @@ static struct clk_regmap s4_sysclk_a_sel = {
.offset = CLKCTRL_SYS_CLK_CTRL0,
.mask = 0x7,
.shift = 10,
- .table = mux_table_sys_ab_clk_sel,
+ .table = s4_sysclk_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "sysclk_a_sel",
.ops = &clk_regmap_mux_ro_ops,
- .parent_data = sys_ab_clk_parent_data,
- .num_parents = ARRAY_SIZE(sys_ab_clk_parent_data),
+ .parent_data = s4_sysclk_parents,
+ .num_parents = ARRAY_SIZE(s4_sysclk_parents),
},
};
@@ -478,24 +532,24 @@ static struct clk_regmap s4_cecb_32k_clkout = {
},
};
-static const struct clk_parent_data s4_sc_parent_data[] = {
+static const struct clk_parent_data s4_sc_clk_parents[] = {
{ .fw_name = "fclk_div4" },
{ .fw_name = "fclk_div3" },
{ .fw_name = "fclk_div5" },
{ .fw_name = "xtal", }
};
-static struct clk_regmap s4_sc_clk_mux = {
+static struct clk_regmap s4_sc_clk_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_SC_CLK_CTRL,
.mask = 0x3,
.shift = 9,
},
.hw.init = &(struct clk_init_data) {
- .name = "sc_clk_mux",
+ .name = "sc_clk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_sc_parent_data,
- .num_parents = ARRAY_SIZE(s4_sc_parent_data),
+ .parent_data = s4_sc_clk_parents,
+ .num_parents = ARRAY_SIZE(s4_sc_clk_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -510,20 +564,20 @@ static struct clk_regmap s4_sc_clk_div = {
.name = "sc_clk_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &s4_sc_clk_mux.hw
+ &s4_sc_clk_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap s4_sc_clk_gate = {
+static struct clk_regmap s4_sc_clk = {
.data = &(struct clk_regmap_gate_data){
.offset = CLKCTRL_SC_CLK_CTRL,
.bit_idx = 8,
},
.hw.init = &(struct clk_init_data){
- .name = "sc_clk_gate",
+ .name = "sc_clk",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&s4_sc_clk_div.hw
@@ -533,13 +587,13 @@ static struct clk_regmap s4_sc_clk_gate = {
},
};
-static struct clk_regmap s4_12_24M_clk_gate = {
+static struct clk_regmap s4_12_24M = {
.data = &(struct clk_regmap_gate_data){
.offset = CLKCTRL_CLK12_24_CTRL,
.bit_idx = 11,
},
.hw.init = &(struct clk_init_data) {
- .name = "12_24m_gate",
+ .name = "12_24M",
.ops = &clk_regmap_gate_ops,
.parent_data = (const struct clk_parent_data []) {
{ .fw_name = "xtal", }
@@ -548,32 +602,32 @@ static struct clk_regmap s4_12_24M_clk_gate = {
},
};
-static struct clk_fixed_factor s4_12M_clk_div = {
+static struct clk_fixed_factor s4_12M_div = {
.mult = 1,
.div = 2,
.hw.init = &(struct clk_init_data){
- .name = "12M",
+ .name = "12M_div",
.ops = &clk_fixed_factor_ops,
.parent_hws = (const struct clk_hw *[]) {
- &s4_12_24M_clk_gate.hw
+ &s4_12_24M.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap s4_12_24M_clk = {
+static struct clk_regmap s4_12_24M_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_CLK12_24_CTRL,
.mask = 0x1,
.shift = 10,
},
.hw.init = &(struct clk_init_data) {
- .name = "12_24m",
+ .name = "12_24M_sel",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &s4_12_24M_clk_gate.hw,
- &s4_12M_clk_div.hw,
+ &s4_12_24M.hw,
+ &s4_12M_div.hw,
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
@@ -642,7 +696,7 @@ static struct clk_regmap s4_vid_pll = {
},
};
-static const struct clk_parent_data s4_vclk_parent_data[] = {
+static const struct clk_parent_data s4_vclk_parents[] = {
{ .hw = &s4_vid_pll.hw },
{ .fw_name = "gp0_pll", },
{ .fw_name = "hifi_pll", },
@@ -662,8 +716,8 @@ static struct clk_regmap s4_vclk_sel = {
.hw.init = &(struct clk_init_data){
.name = "vclk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_vclk_parent_data,
- .num_parents = ARRAY_SIZE(s4_vclk_parent_data),
+ .parent_data = s4_vclk_parents,
+ .num_parents = ARRAY_SIZE(s4_vclk_parents),
.flags = 0,
},
};
@@ -677,8 +731,8 @@ static struct clk_regmap s4_vclk2_sel = {
.hw.init = &(struct clk_init_data){
.name = "vclk2_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_vclk_parent_data,
- .num_parents = ARRAY_SIZE(s4_vclk_parent_data),
+ .parent_data = s4_vclk_parents,
+ .num_parents = ARRAY_SIZE(s4_vclk_parents),
.flags = 0,
},
};
@@ -1026,8 +1080,8 @@ static struct clk_fixed_factor s4_vclk2_div12 = {
};
/* The 5,6,7 indexes corresponds to no real clock, so there are not used. */
-static u32 mux_table_cts_sel[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
-static const struct clk_hw *s4_cts_parent_hws[] = {
+static u32 s4_cts_parents_val_table[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
+static const struct clk_hw *s4_cts_parents[] = {
&s4_vclk_div1.hw,
&s4_vclk_div2.hw,
&s4_vclk_div4.hw,
@@ -1045,13 +1099,13 @@ static struct clk_regmap s4_cts_enci_sel = {
.offset = CLKCTRL_VID_CLK_DIV,
.mask = 0xf,
.shift = 28,
- .table = mux_table_cts_sel,
+ .table = s4_cts_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "cts_enci_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = s4_cts_parent_hws,
- .num_parents = ARRAY_SIZE(s4_cts_parent_hws),
+ .parent_hws = s4_cts_parents,
+ .num_parents = ARRAY_SIZE(s4_cts_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1061,13 +1115,13 @@ static struct clk_regmap s4_cts_encp_sel = {
.offset = CLKCTRL_VID_CLK_DIV,
.mask = 0xf,
.shift = 20,
- .table = mux_table_cts_sel,
+ .table = s4_cts_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "cts_encp_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = s4_cts_parent_hws,
- .num_parents = ARRAY_SIZE(s4_cts_parent_hws),
+ .parent_hws = s4_cts_parents,
+ .num_parents = ARRAY_SIZE(s4_cts_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1077,20 +1131,20 @@ static struct clk_regmap s4_cts_vdac_sel = {
.offset = CLKCTRL_VIID_CLK_DIV,
.mask = 0xf,
.shift = 28,
- .table = mux_table_cts_sel,
+ .table = s4_cts_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "cts_vdac_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = s4_cts_parent_hws,
- .num_parents = ARRAY_SIZE(s4_cts_parent_hws),
+ .parent_hws = s4_cts_parents,
+ .num_parents = ARRAY_SIZE(s4_cts_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
/* The 5,6,7 indexes corresponds to no real clock, so there are not used. */
-static u32 mux_table_hdmi_tx_sel[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
-static const struct clk_hw *s4_cts_hdmi_tx_parent_hws[] = {
+static u32 s4_hdmi_tx_parents_val_table[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
+static const struct clk_hw *s4_hdmi_tx_parents[] = {
&s4_vclk_div1.hw,
&s4_vclk_div2.hw,
&s4_vclk_div4.hw,
@@ -1108,13 +1162,13 @@ static struct clk_regmap s4_hdmi_tx_sel = {
.offset = CLKCTRL_HDMI_CLK_CTRL,
.mask = 0xf,
.shift = 16,
- .table = mux_table_hdmi_tx_sel,
+ .table = s4_hdmi_tx_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "hdmi_tx_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = s4_cts_hdmi_tx_parent_hws,
- .num_parents = ARRAY_SIZE(s4_cts_hdmi_tx_parent_hws),
+ .parent_hws = s4_hdmi_tx_parents,
+ .num_parents = ARRAY_SIZE(s4_hdmi_tx_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1184,7 +1238,7 @@ static struct clk_regmap s4_hdmi_tx = {
};
/* HDMI Clocks */
-static const struct clk_parent_data s4_hdmi_parent_data[] = {
+static const struct clk_parent_data s4_hdmi_parents[] = {
{ .fw_name = "xtal", },
{ .fw_name = "fclk_div4", },
{ .fw_name = "fclk_div3", },
@@ -1201,8 +1255,8 @@ static struct clk_regmap s4_hdmi_sel = {
.hw.init = &(struct clk_init_data){
.name = "hdmi_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_hdmi_parent_data,
- .num_parents = ARRAY_SIZE(s4_hdmi_parent_data),
+ .parent_data = s4_hdmi_parents,
+ .num_parents = ARRAY_SIZE(s4_hdmi_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1253,7 +1307,7 @@ static struct clk_regmap s4_ts_clk_div = {
},
};
-static struct clk_regmap s4_ts_clk_gate = {
+static struct clk_regmap s4_ts_clk = {
.data = &(struct clk_regmap_gate_data){
.offset = CLKCTRL_TS_CLK_CTRL,
.bit_idx = 8,
@@ -1275,7 +1329,7 @@ static struct clk_regmap s4_ts_clk_gate = {
* mux because it does top-to-bottom updates the each clock tree and
* switches to the "inactive" one when CLK_SET_RATE_GATE is set.
*/
-static const struct clk_parent_data s4_mali_0_1_parent_data[] = {
+static const struct clk_parent_data s4_mali_parents[] = {
{ .fw_name = "xtal", },
{ .fw_name = "gp0_pll", },
{ .fw_name = "hifi_pll", },
@@ -1295,8 +1349,8 @@ static struct clk_regmap s4_mali_0_sel = {
.hw.init = &(struct clk_init_data){
.name = "mali_0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_mali_0_1_parent_data,
- .num_parents = ARRAY_SIZE(s4_mali_0_1_parent_data),
+ .parent_data = s4_mali_parents,
+ .num_parents = ARRAY_SIZE(s4_mali_parents),
/*
* Don't request the parent to change the rate because
* all GPU frequencies can be derived from the fclk_*
@@ -1349,8 +1403,8 @@ static struct clk_regmap s4_mali_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "mali_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_mali_0_1_parent_data,
- .num_parents = ARRAY_SIZE(s4_mali_0_1_parent_data),
+ .parent_data = s4_mali_parents,
+ .num_parents = ARRAY_SIZE(s4_mali_parents),
.flags = 0,
},
};
@@ -1388,28 +1442,26 @@ static struct clk_regmap s4_mali_1 = {
},
};
-static const struct clk_hw *s4_mali_parent_hws[] = {
- &s4_mali_0.hw,
- &s4_mali_1.hw
-};
-
-static struct clk_regmap s4_mali_mux = {
+static struct clk_regmap s4_mali_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_MALI_CLK_CTRL,
.mask = 1,
.shift = 31,
},
.hw.init = &(struct clk_init_data){
- .name = "mali",
+ .name = "mali_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = s4_mali_parent_hws,
+ .parent_hws = (const struct clk_hw *[]) {
+ &s4_mali_0.hw,
+ &s4_mali_1.hw,
+ },
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
/* VDEC clocks */
-static const struct clk_parent_data s4_dec_parent_data[] = {
+static const struct clk_parent_data s4_dec_parents[] = {
{ .fw_name = "fclk_div2p5", },
{ .fw_name = "fclk_div3", },
{ .fw_name = "fclk_div4", },
@@ -1420,7 +1472,7 @@ static const struct clk_parent_data s4_dec_parent_data[] = {
{ .fw_name = "xtal", }
};
-static struct clk_regmap s4_vdec_p0_mux = {
+static struct clk_regmap s4_vdec_p0_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_VDEC_CLK_CTRL,
.mask = 0x7,
@@ -1428,10 +1480,10 @@ static struct clk_regmap s4_vdec_p0_mux = {
.flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data) {
- .name = "vdec_p0_mux",
+ .name = "vdec_p0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_dec_parent_data,
- .num_parents = ARRAY_SIZE(s4_dec_parent_data),
+ .parent_data = s4_dec_parents,
+ .num_parents = ARRAY_SIZE(s4_dec_parents),
.flags = 0,
},
};
@@ -1447,7 +1499,7 @@ static struct clk_regmap s4_vdec_p0_div = {
.name = "vdec_p0_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &s4_vdec_p0_mux.hw
+ &s4_vdec_p0_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1470,7 +1522,7 @@ static struct clk_regmap s4_vdec_p0 = {
},
};
-static struct clk_regmap s4_vdec_p1_mux = {
+static struct clk_regmap s4_vdec_p1_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_VDEC3_CLK_CTRL,
.mask = 0x7,
@@ -1478,10 +1530,10 @@ static struct clk_regmap s4_vdec_p1_mux = {
.flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data) {
- .name = "vdec_p1_mux",
+ .name = "vdec_p1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_dec_parent_data,
- .num_parents = ARRAY_SIZE(s4_dec_parent_data),
+ .parent_data = s4_dec_parents,
+ .num_parents = ARRAY_SIZE(s4_dec_parents),
.flags = 0,
},
};
@@ -1497,7 +1549,7 @@ static struct clk_regmap s4_vdec_p1_div = {
.name = "vdec_p1_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &s4_vdec_p1_mux.hw
+ &s4_vdec_p1_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1520,27 +1572,25 @@ static struct clk_regmap s4_vdec_p1 = {
},
};
-static const struct clk_hw *s4_vdec_mux_parent_hws[] = {
- &s4_vdec_p0.hw,
- &s4_vdec_p1.hw
-};
-
-static struct clk_regmap s4_vdec_mux = {
+static struct clk_regmap s4_vdec_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_VDEC3_CLK_CTRL,
.mask = 0x1,
.shift = 15,
},
.hw.init = &(struct clk_init_data) {
- .name = "vdec_mux",
+ .name = "vdec_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = s4_vdec_mux_parent_hws,
- .num_parents = ARRAY_SIZE(s4_vdec_mux_parent_hws),
+ .parent_hws = (const struct clk_hw *[]) {
+ &s4_vdec_p0.hw,
+ &s4_vdec_p1.hw,
+ },
+ .num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap s4_hevcf_p0_mux = {
+static struct clk_regmap s4_hevcf_p0_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_VDEC2_CLK_CTRL,
.mask = 0x7,
@@ -1548,10 +1598,10 @@ static struct clk_regmap s4_hevcf_p0_mux = {
.flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data) {
- .name = "hevcf_p0_mux",
+ .name = "hevcf_p0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_dec_parent_data,
- .num_parents = ARRAY_SIZE(s4_dec_parent_data),
+ .parent_data = s4_dec_parents,
+ .num_parents = ARRAY_SIZE(s4_dec_parents),
.flags = 0,
},
};
@@ -1567,7 +1617,7 @@ static struct clk_regmap s4_hevcf_p0_div = {
.name = "hevcf_p0_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &s4_hevcf_p0_mux.hw
+ &s4_hevcf_p0_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1580,7 +1630,7 @@ static struct clk_regmap s4_hevcf_p0 = {
.bit_idx = 8,
},
.hw.init = &(struct clk_init_data){
- .name = "hevcf_p0_gate",
+ .name = "hevcf_p0",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&s4_hevcf_p0_div.hw
@@ -1590,7 +1640,7 @@ static struct clk_regmap s4_hevcf_p0 = {
},
};
-static struct clk_regmap s4_hevcf_p1_mux = {
+static struct clk_regmap s4_hevcf_p1_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_VDEC4_CLK_CTRL,
.mask = 0x7,
@@ -1598,10 +1648,10 @@ static struct clk_regmap s4_hevcf_p1_mux = {
.flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data) {
- .name = "hevcf_p1_mux",
+ .name = "hevcf_p1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_dec_parent_data,
- .num_parents = ARRAY_SIZE(s4_dec_parent_data),
+ .parent_data = s4_dec_parents,
+ .num_parents = ARRAY_SIZE(s4_dec_parents),
.flags = 0,
},
};
@@ -1617,7 +1667,7 @@ static struct clk_regmap s4_hevcf_p1_div = {
.name = "hevcf_p1_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &s4_hevcf_p1_mux.hw
+ &s4_hevcf_p1_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1640,28 +1690,26 @@ static struct clk_regmap s4_hevcf_p1 = {
},
};
-static const struct clk_hw *s4_hevcf_mux_parent_hws[] = {
- &s4_hevcf_p0.hw,
- &s4_hevcf_p1.hw
-};
-
-static struct clk_regmap s4_hevcf_mux = {
+static struct clk_regmap s4_hevcf_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_VDEC4_CLK_CTRL,
.mask = 0x1,
.shift = 15,
},
.hw.init = &(struct clk_init_data) {
- .name = "hevcf",
+ .name = "hevcf_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = s4_hevcf_mux_parent_hws,
- .num_parents = ARRAY_SIZE(s4_hevcf_mux_parent_hws),
+ .parent_hws = (const struct clk_hw *[]) {
+ &s4_hevcf_p0.hw,
+ &s4_hevcf_p1.hw,
+ },
+ .num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
/* VPU Clock */
-static const struct clk_parent_data s4_vpu_parent_data[] = {
+static const struct clk_parent_data s4_vpu_parents[] = {
{ .fw_name = "fclk_div3", },
{ .fw_name = "fclk_div4", },
{ .fw_name = "fclk_div5", },
@@ -1681,8 +1729,8 @@ static struct clk_regmap s4_vpu_0_sel = {
.hw.init = &(struct clk_init_data){
.name = "vpu_0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_vpu_parent_data,
- .num_parents = ARRAY_SIZE(s4_vpu_parent_data),
+ .parent_data = s4_vpu_parents,
+ .num_parents = ARRAY_SIZE(s4_vpu_parents),
.flags = 0,
},
};
@@ -1725,8 +1773,8 @@ static struct clk_regmap s4_vpu_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "vpu_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_vpu_parent_data,
- .num_parents = ARRAY_SIZE(s4_vpu_parent_data),
+ .parent_data = s4_vpu_parents,
+ .num_parents = ARRAY_SIZE(s4_vpu_parents),
.flags = 0,
},
};
@@ -1778,24 +1826,24 @@ static struct clk_regmap s4_vpu = {
},
};
-static const struct clk_parent_data vpu_clkb_tmp_parent_data[] = {
+static const struct clk_parent_data vpu_clkb_tmp_parents[] = {
{ .hw = &s4_vpu.hw },
{ .fw_name = "fclk_div4", },
{ .fw_name = "fclk_div5", },
{ .fw_name = "fclk_div7", }
};
-static struct clk_regmap s4_vpu_clkb_tmp_mux = {
+static struct clk_regmap s4_vpu_clkb_tmp_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_VPU_CLKB_CTRL,
.mask = 0x3,
.shift = 20,
},
.hw.init = &(struct clk_init_data) {
- .name = "vpu_clkb_tmp_mux",
+ .name = "vpu_clkb_tmp_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = vpu_clkb_tmp_parent_data,
- .num_parents = ARRAY_SIZE(vpu_clkb_tmp_parent_data),
+ .parent_data = vpu_clkb_tmp_parents,
+ .num_parents = ARRAY_SIZE(vpu_clkb_tmp_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1810,7 +1858,7 @@ static struct clk_regmap s4_vpu_clkb_tmp_div = {
.name = "vpu_clkb_tmp_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &s4_vpu_clkb_tmp_mux.hw
+ &s4_vpu_clkb_tmp_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1866,7 +1914,7 @@ static struct clk_regmap s4_vpu_clkb = {
},
};
-static const struct clk_parent_data s4_vpu_clkc_parent_data[] = {
+static const struct clk_parent_data s4_vpu_clkc_parents[] = {
{ .fw_name = "fclk_div4", },
{ .fw_name = "fclk_div3", },
{ .fw_name = "fclk_div5", },
@@ -1877,17 +1925,17 @@ static const struct clk_parent_data s4_vpu_clkc_parent_data[] = {
{ .fw_name = "gp0_pll", },
};
-static struct clk_regmap s4_vpu_clkc_p0_mux = {
+static struct clk_regmap s4_vpu_clkc_p0_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_VPU_CLKC_CTRL,
.mask = 0x7,
.shift = 9,
},
.hw.init = &(struct clk_init_data) {
- .name = "vpu_clkc_p0_mux",
+ .name = "vpu_clkc_p0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_vpu_clkc_parent_data,
- .num_parents = ARRAY_SIZE(s4_vpu_clkc_parent_data),
+ .parent_data = s4_vpu_clkc_parents,
+ .num_parents = ARRAY_SIZE(s4_vpu_clkc_parents),
.flags = 0,
},
};
@@ -1902,7 +1950,7 @@ static struct clk_regmap s4_vpu_clkc_p0_div = {
.name = "vpu_clkc_p0_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &s4_vpu_clkc_p0_mux.hw
+ &s4_vpu_clkc_p0_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1925,17 +1973,17 @@ static struct clk_regmap s4_vpu_clkc_p0 = {
},
};
-static struct clk_regmap s4_vpu_clkc_p1_mux = {
+static struct clk_regmap s4_vpu_clkc_p1_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_VPU_CLKC_CTRL,
.mask = 0x7,
.shift = 25,
},
.hw.init = &(struct clk_init_data) {
- .name = "vpu_clkc_p1_mux",
+ .name = "vpu_clkc_p1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_vpu_clkc_parent_data,
- .num_parents = ARRAY_SIZE(s4_vpu_clkc_parent_data),
+ .parent_data = s4_vpu_clkc_parents,
+ .num_parents = ARRAY_SIZE(s4_vpu_clkc_parents),
.flags = 0,
},
};
@@ -1950,7 +1998,7 @@ static struct clk_regmap s4_vpu_clkc_p1_div = {
.name = "vpu_clkc_p1_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &s4_vpu_clkc_p1_mux.hw
+ &s4_vpu_clkc_p1_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1973,28 +2021,26 @@ static struct clk_regmap s4_vpu_clkc_p1 = {
},
};
-static const struct clk_hw *s4_vpu_mux_parent_hws[] = {
- &s4_vpu_clkc_p0.hw,
- &s4_vpu_clkc_p1.hw
-};
-
-static struct clk_regmap s4_vpu_clkc_mux = {
+static struct clk_regmap s4_vpu_clkc_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_VPU_CLKC_CTRL,
.mask = 0x1,
.shift = 31,
},
.hw.init = &(struct clk_init_data) {
- .name = "vpu_clkc_mux",
+ .name = "vpu_clkc_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = s4_vpu_mux_parent_hws,
- .num_parents = ARRAY_SIZE(s4_vpu_mux_parent_hws),
+ .parent_hws = (const struct clk_hw *[]) {
+ &s4_vpu_clkc_p0.hw,
+ &s4_vpu_clkc_p1.hw,
+ },
+ .num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
/* VAPB Clock */
-static const struct clk_parent_data s4_vapb_parent_data[] = {
+static const struct clk_parent_data s4_vapb_parents[] = {
{ .fw_name = "fclk_div4", },
{ .fw_name = "fclk_div3", },
{ .fw_name = "fclk_div5", },
@@ -2014,8 +2060,8 @@ static struct clk_regmap s4_vapb_0_sel = {
.hw.init = &(struct clk_init_data){
.name = "vapb_0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_vapb_parent_data,
- .num_parents = ARRAY_SIZE(s4_vapb_parent_data),
+ .parent_data = s4_vapb_parents,
+ .num_parents = ARRAY_SIZE(s4_vapb_parents),
.flags = 0,
},
};
@@ -2062,8 +2108,8 @@ static struct clk_regmap s4_vapb_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "vapb_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_vapb_parent_data,
- .num_parents = ARRAY_SIZE(s4_vapb_parent_data),
+ .parent_data = s4_vapb_parents,
+ .num_parents = ARRAY_SIZE(s4_vapb_parents),
.flags = 0,
},
};
@@ -2119,13 +2165,13 @@ static struct clk_regmap s4_vapb = {
},
};
-static struct clk_regmap s4_ge2d_gate = {
+static struct clk_regmap s4_ge2d = {
.data = &(struct clk_regmap_gate_data){
.offset = CLKCTRL_VAPBCLK_CTRL,
.bit_idx = 30,
},
.hw.init = &(struct clk_init_data) {
- .name = "ge2d_clk",
+ .name = "ge2d",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) { &s4_vapb.hw },
.num_parents = 1,
@@ -2133,24 +2179,24 @@ static struct clk_regmap s4_ge2d_gate = {
},
};
-static const struct clk_parent_data s4_esmclk_parent_data[] = {
+static const struct clk_parent_data s4_hdcp22_esmclk_parents[] = {
{ .fw_name = "fclk_div7", },
{ .fw_name = "fclk_div4", },
{ .fw_name = "fclk_div3", },
{ .fw_name = "fclk_div5", },
};
-static struct clk_regmap s4_hdcp22_esmclk_mux = {
+static struct clk_regmap s4_hdcp22_esmclk_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_HDCP22_CTRL,
.mask = 0x3,
.shift = 9,
},
.hw.init = &(struct clk_init_data) {
- .name = "hdcp22_esmclk_mux",
+ .name = "hdcp22_esmclk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_esmclk_parent_data,
- .num_parents = ARRAY_SIZE(s4_esmclk_parent_data),
+ .parent_data = s4_hdcp22_esmclk_parents,
+ .num_parents = ARRAY_SIZE(s4_hdcp22_esmclk_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2165,20 +2211,20 @@ static struct clk_regmap s4_hdcp22_esmclk_div = {
.name = "hdcp22_esmclk_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &s4_hdcp22_esmclk_mux.hw
+ &s4_hdcp22_esmclk_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap s4_hdcp22_esmclk_gate = {
+static struct clk_regmap s4_hdcp22_esmclk = {
.data = &(struct clk_regmap_gate_data){
.offset = CLKCTRL_HDCP22_CTRL,
.bit_idx = 8,
},
.hw.init = &(struct clk_init_data){
- .name = "hdcp22_esmclk_gate",
+ .name = "hdcp22_esmclk",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&s4_hdcp22_esmclk_div.hw
@@ -2188,24 +2234,24 @@ static struct clk_regmap s4_hdcp22_esmclk_gate = {
},
};
-static const struct clk_parent_data s4_skpclk_parent_data[] = {
+static const struct clk_parent_data s4_hdcp22_skpclk_parents[] = {
{ .fw_name = "xtal", },
{ .fw_name = "fclk_div4", },
{ .fw_name = "fclk_div3", },
{ .fw_name = "fclk_div5", },
};
-static struct clk_regmap s4_hdcp22_skpclk_mux = {
+static struct clk_regmap s4_hdcp22_skpclk_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_HDCP22_CTRL,
.mask = 0x3,
.shift = 25,
},
.hw.init = &(struct clk_init_data) {
- .name = "hdcp22_skpclk_mux",
+ .name = "hdcp22_skpclk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_skpclk_parent_data,
- .num_parents = ARRAY_SIZE(s4_skpclk_parent_data),
+ .parent_data = s4_hdcp22_skpclk_parents,
+ .num_parents = ARRAY_SIZE(s4_hdcp22_skpclk_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2220,20 +2266,20 @@ static struct clk_regmap s4_hdcp22_skpclk_div = {
.name = "hdcp22_skpclk_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &s4_hdcp22_skpclk_mux.hw
+ &s4_hdcp22_skpclk_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap s4_hdcp22_skpclk_gate = {
+static struct clk_regmap s4_hdcp22_skpclk = {
.data = &(struct clk_regmap_gate_data){
.offset = CLKCTRL_HDCP22_CTRL,
.bit_idx = 24,
},
.hw.init = &(struct clk_init_data){
- .name = "hdcp22_skpclk_gate",
+ .name = "hdcp22_skpclk",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&s4_hdcp22_skpclk_div.hw
@@ -2243,7 +2289,7 @@ static struct clk_regmap s4_hdcp22_skpclk_gate = {
},
};
-static const struct clk_parent_data s4_vdin_parent_data[] = {
+static const struct clk_parent_data s4_vdin_parents[] = {
{ .fw_name = "xtal", },
{ .fw_name = "fclk_div4", },
{ .fw_name = "fclk_div3", },
@@ -2251,17 +2297,17 @@ static const struct clk_parent_data s4_vdin_parent_data[] = {
{ .hw = &s4_vid_pll.hw }
};
-static struct clk_regmap s4_vdin_meas_mux = {
+static struct clk_regmap s4_vdin_meas_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_VDIN_MEAS_CLK_CTRL,
.mask = 0x7,
.shift = 9,
},
.hw.init = &(struct clk_init_data) {
- .name = "vdin_meas_mux",
+ .name = "vdin_meas_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_vdin_parent_data,
- .num_parents = ARRAY_SIZE(s4_vdin_parent_data),
+ .parent_data = s4_vdin_parents,
+ .num_parents = ARRAY_SIZE(s4_vdin_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2276,20 +2322,20 @@ static struct clk_regmap s4_vdin_meas_div = {
.name = "vdin_meas_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &s4_vdin_meas_mux.hw
+ &s4_vdin_meas_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap s4_vdin_meas_gate = {
+static struct clk_regmap s4_vdin_meas = {
.data = &(struct clk_regmap_gate_data){
.offset = CLKCTRL_VDIN_MEAS_CLK_CTRL,
.bit_idx = 8,
},
.hw.init = &(struct clk_init_data){
- .name = "vdin_meas_gate",
+ .name = "vdin_meas",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&s4_vdin_meas_div.hw
@@ -2300,7 +2346,7 @@ static struct clk_regmap s4_vdin_meas_gate = {
};
/* EMMC/NAND clock */
-static const struct clk_parent_data s4_sd_emmc_clk0_parent_data[] = {
+static const struct clk_parent_data s4_sd_emmc_clk0_parents[] = {
{ .fw_name = "xtal", },
{ .fw_name = "fclk_div2", },
{ .fw_name = "fclk_div3", },
@@ -2320,8 +2366,8 @@ static struct clk_regmap s4_sd_emmc_c_clk0_sel = {
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_c_clk0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_sd_emmc_clk0_parent_data,
- .num_parents = ARRAY_SIZE(s4_sd_emmc_clk0_parent_data),
+ .parent_data = s4_sd_emmc_clk0_parents,
+ .num_parents = ARRAY_SIZE(s4_sd_emmc_clk0_parents),
.flags = 0,
},
};
@@ -2368,8 +2414,8 @@ static struct clk_regmap s4_sd_emmc_a_clk0_sel = {
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_a_clk0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_sd_emmc_clk0_parent_data,
- .num_parents = ARRAY_SIZE(s4_sd_emmc_clk0_parent_data),
+ .parent_data = s4_sd_emmc_clk0_parents,
+ .num_parents = ARRAY_SIZE(s4_sd_emmc_clk0_parents),
.flags = 0,
},
};
@@ -2416,8 +2462,8 @@ static struct clk_regmap s4_sd_emmc_b_clk0_sel = {
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_b_clk0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_sd_emmc_clk0_parent_data,
- .num_parents = ARRAY_SIZE(s4_sd_emmc_clk0_parent_data),
+ .parent_data = s4_sd_emmc_clk0_parents,
+ .num_parents = ARRAY_SIZE(s4_sd_emmc_clk0_parents),
.flags = 0,
},
};
@@ -2456,7 +2502,7 @@ static struct clk_regmap s4_sd_emmc_b_clk0 = {
};
/* SPICC Clock */
-static const struct clk_parent_data s4_spicc_parent_data[] = {
+static const struct clk_parent_data s4_spicc_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &s4_sys_clk.hw },
{ .fw_name = "fclk_div4", },
@@ -2466,17 +2512,17 @@ static const struct clk_parent_data s4_spicc_parent_data[] = {
{ .fw_name = "fclk_div7", },
};
-static struct clk_regmap s4_spicc0_mux = {
+static struct clk_regmap s4_spicc0_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_SPICC_CLK_CTRL,
.mask = 0x7,
.shift = 7,
},
.hw.init = &(struct clk_init_data) {
- .name = "spicc0_mux",
+ .name = "spicc0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_spicc_parent_data,
- .num_parents = ARRAY_SIZE(s4_spicc_parent_data),
+ .parent_data = s4_spicc_parents,
+ .num_parents = ARRAY_SIZE(s4_spicc_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2491,20 +2537,20 @@ static struct clk_regmap s4_spicc0_div = {
.name = "spicc0_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &s4_spicc0_mux.hw
+ &s4_spicc0_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap s4_spicc0_gate = {
+static struct clk_regmap s4_spicc0_en = {
.data = &(struct clk_regmap_gate_data){
.offset = CLKCTRL_SPICC_CLK_CTRL,
.bit_idx = 6,
},
.hw.init = &(struct clk_init_data){
- .name = "spicc0",
+ .name = "spicc0_en",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&s4_spicc0_div.hw
@@ -2515,500 +2561,61 @@ static struct clk_regmap s4_spicc0_gate = {
};
/* PWM Clock */
-static const struct clk_parent_data s4_pwm_parent_data[] = {
+static const struct clk_parent_data s4_pwm_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &s4_vid_pll.hw },
{ .fw_name = "fclk_div4", },
{ .fw_name = "fclk_div3", },
};
-static struct clk_regmap s4_pwm_a_mux = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = CLKCTRL_PWM_CLK_AB_CTRL,
- .mask = 0x3,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_a_mux",
- .ops = &clk_regmap_mux_ops,
- .parent_data = s4_pwm_parent_data,
- .num_parents = ARRAY_SIZE(s4_pwm_parent_data),
- .flags = 0,
- },
-};
+static S4_COMP_SEL(pwm_a, CLKCTRL_PWM_CLK_AB_CTRL, 9, 0x3, s4_pwm_parents);
+static S4_COMP_DIV(pwm_a, CLKCTRL_PWM_CLK_AB_CTRL, 0, 8);
+static S4_COMP_GATE(pwm_a, CLKCTRL_PWM_CLK_AB_CTRL, 8);
-static struct clk_regmap s4_pwm_a_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = CLKCTRL_PWM_CLK_AB_CTRL,
- .shift = 0,
- .width = 8,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_a_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_a_mux.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static S4_COMP_SEL(pwm_b, CLKCTRL_PWM_CLK_AB_CTRL, 25, 0x3, s4_pwm_parents);
+static S4_COMP_DIV(pwm_b, CLKCTRL_PWM_CLK_AB_CTRL, 16, 8);
+static S4_COMP_GATE(pwm_b, CLKCTRL_PWM_CLK_AB_CTRL, 24);
-static struct clk_regmap s4_pwm_a_gate = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = CLKCTRL_PWM_CLK_AB_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_a_gate",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_a_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static S4_COMP_SEL(pwm_c, CLKCTRL_PWM_CLK_CD_CTRL, 9, 0x3, s4_pwm_parents);
+static S4_COMP_DIV(pwm_c, CLKCTRL_PWM_CLK_CD_CTRL, 0, 8);
+static S4_COMP_GATE(pwm_c, CLKCTRL_PWM_CLK_CD_CTRL, 8);
-static struct clk_regmap s4_pwm_b_mux = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = CLKCTRL_PWM_CLK_AB_CTRL,
- .mask = 0x3,
- .shift = 25,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_b_mux",
- .ops = &clk_regmap_mux_ops,
- .parent_data = s4_pwm_parent_data,
- .num_parents = ARRAY_SIZE(s4_pwm_parent_data),
- .flags = 0,
- },
-};
+static S4_COMP_SEL(pwm_d, CLKCTRL_PWM_CLK_CD_CTRL, 25, 0x3, s4_pwm_parents);
+static S4_COMP_DIV(pwm_d, CLKCTRL_PWM_CLK_CD_CTRL, 16, 8);
+static S4_COMP_GATE(pwm_d, CLKCTRL_PWM_CLK_CD_CTRL, 24);
-static struct clk_regmap s4_pwm_b_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = CLKCTRL_PWM_CLK_AB_CTRL,
- .shift = 16,
- .width = 8,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_b_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_b_mux.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static S4_COMP_SEL(pwm_e, CLKCTRL_PWM_CLK_EF_CTRL, 9, 0x3, s4_pwm_parents);
+static S4_COMP_DIV(pwm_e, CLKCTRL_PWM_CLK_EF_CTRL, 0, 8);
+static S4_COMP_GATE(pwm_e, CLKCTRL_PWM_CLK_EF_CTRL, 8);
-static struct clk_regmap s4_pwm_b_gate = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = CLKCTRL_PWM_CLK_AB_CTRL,
- .bit_idx = 24,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_b_gate",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_b_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static S4_COMP_SEL(pwm_f, CLKCTRL_PWM_CLK_EF_CTRL, 25, 0x3, s4_pwm_parents);
+static S4_COMP_DIV(pwm_f, CLKCTRL_PWM_CLK_EF_CTRL, 16, 8);
+static S4_COMP_GATE(pwm_f, CLKCTRL_PWM_CLK_EF_CTRL, 24);
-static struct clk_regmap s4_pwm_c_mux = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = CLKCTRL_PWM_CLK_CD_CTRL,
- .mask = 0x3,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_c_mux",
- .ops = &clk_regmap_mux_ops,
- .parent_data = s4_pwm_parent_data,
- .num_parents = ARRAY_SIZE(s4_pwm_parent_data),
- .flags = 0,
- },
-};
+static S4_COMP_SEL(pwm_g, CLKCTRL_PWM_CLK_GH_CTRL, 9, 0x3, s4_pwm_parents);
+static S4_COMP_DIV(pwm_g, CLKCTRL_PWM_CLK_GH_CTRL, 0, 8);
+static S4_COMP_GATE(pwm_g, CLKCTRL_PWM_CLK_GH_CTRL, 8);
-static struct clk_regmap s4_pwm_c_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = CLKCTRL_PWM_CLK_CD_CTRL,
- .shift = 0,
- .width = 8,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_c_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_c_mux.hw
- },
- .num_parents = 1,
- },
-};
+static S4_COMP_SEL(pwm_h, CLKCTRL_PWM_CLK_GH_CTRL, 25, 0x3, s4_pwm_parents);
+static S4_COMP_DIV(pwm_h, CLKCTRL_PWM_CLK_GH_CTRL, 16, 8);
+static S4_COMP_GATE(pwm_h, CLKCTRL_PWM_CLK_GH_CTRL, 24);
-static struct clk_regmap s4_pwm_c_gate = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = CLKCTRL_PWM_CLK_CD_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_c_gate",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_c_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap s4_pwm_d_mux = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = CLKCTRL_PWM_CLK_CD_CTRL,
- .mask = 0x3,
- .shift = 25,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_d_mux",
- .ops = &clk_regmap_mux_ops,
- .parent_data = s4_pwm_parent_data,
- .num_parents = ARRAY_SIZE(s4_pwm_parent_data),
- .flags = 0,
- },
-};
-
-static struct clk_regmap s4_pwm_d_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = CLKCTRL_PWM_CLK_CD_CTRL,
- .shift = 16,
- .width = 8,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_d_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_d_mux.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap s4_pwm_d_gate = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = CLKCTRL_PWM_CLK_CD_CTRL,
- .bit_idx = 24,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_d_gate",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_d_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap s4_pwm_e_mux = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = CLKCTRL_PWM_CLK_EF_CTRL,
- .mask = 0x3,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_e_mux",
- .ops = &clk_regmap_mux_ops,
- .parent_data = s4_pwm_parent_data,
- .num_parents = ARRAY_SIZE(s4_pwm_parent_data),
- .flags = 0,
- },
-};
-
-static struct clk_regmap s4_pwm_e_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = CLKCTRL_PWM_CLK_EF_CTRL,
- .shift = 0,
- .width = 8,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_e_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_e_mux.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static S4_COMP_SEL(pwm_i, CLKCTRL_PWM_CLK_IJ_CTRL, 9, 0x3, s4_pwm_parents);
+static S4_COMP_DIV(pwm_i, CLKCTRL_PWM_CLK_IJ_CTRL, 0, 8);
+static S4_COMP_GATE(pwm_i, CLKCTRL_PWM_CLK_IJ_CTRL, 8);
-static struct clk_regmap s4_pwm_e_gate = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = CLKCTRL_PWM_CLK_EF_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_e_gate",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_e_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static S4_COMP_SEL(pwm_j, CLKCTRL_PWM_CLK_IJ_CTRL, 25, 0x3, s4_pwm_parents);
+static S4_COMP_DIV(pwm_j, CLKCTRL_PWM_CLK_IJ_CTRL, 16, 8);
+static S4_COMP_GATE(pwm_j, CLKCTRL_PWM_CLK_IJ_CTRL, 24);
-static struct clk_regmap s4_pwm_f_mux = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = CLKCTRL_PWM_CLK_EF_CTRL,
- .mask = 0x3,
- .shift = 25,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_f_mux",
- .ops = &clk_regmap_mux_ops,
- .parent_data = s4_pwm_parent_data,
- .num_parents = ARRAY_SIZE(s4_pwm_parent_data),
- .flags = 0,
- },
-};
-
-static struct clk_regmap s4_pwm_f_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = CLKCTRL_PWM_CLK_EF_CTRL,
- .shift = 16,
- .width = 8,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_f_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_f_mux.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap s4_pwm_f_gate = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = CLKCTRL_PWM_CLK_EF_CTRL,
- .bit_idx = 24,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_f_gate",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_f_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap s4_pwm_g_mux = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = CLKCTRL_PWM_CLK_GH_CTRL,
- .mask = 0x3,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_g_mux",
- .ops = &clk_regmap_mux_ops,
- .parent_data = s4_pwm_parent_data,
- .num_parents = ARRAY_SIZE(s4_pwm_parent_data),
- .flags = 0,
- },
-};
-
-static struct clk_regmap s4_pwm_g_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = CLKCTRL_PWM_CLK_GH_CTRL,
- .shift = 0,
- .width = 8,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_g_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_g_mux.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap s4_pwm_g_gate = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = CLKCTRL_PWM_CLK_GH_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_g_gate",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_g_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap s4_pwm_h_mux = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = CLKCTRL_PWM_CLK_GH_CTRL,
- .mask = 0x3,
- .shift = 25,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_h_mux",
- .ops = &clk_regmap_mux_ops,
- .parent_data = s4_pwm_parent_data,
- .num_parents = ARRAY_SIZE(s4_pwm_parent_data),
- .flags = 0,
- },
-};
-
-static struct clk_regmap s4_pwm_h_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = CLKCTRL_PWM_CLK_GH_CTRL,
- .shift = 16,
- .width = 8,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_h_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_h_mux.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap s4_pwm_h_gate = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = CLKCTRL_PWM_CLK_GH_CTRL,
- .bit_idx = 24,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_h_gate",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_h_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap s4_pwm_i_mux = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = CLKCTRL_PWM_CLK_IJ_CTRL,
- .mask = 0x3,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_i_mux",
- .ops = &clk_regmap_mux_ops,
- .parent_data = s4_pwm_parent_data,
- .num_parents = ARRAY_SIZE(s4_pwm_parent_data),
- .flags = 0,
- },
-};
-
-static struct clk_regmap s4_pwm_i_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = CLKCTRL_PWM_CLK_IJ_CTRL,
- .shift = 0,
- .width = 8,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_i_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_i_mux.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap s4_pwm_i_gate = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = CLKCTRL_PWM_CLK_IJ_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_i_gate",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_i_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap s4_pwm_j_mux = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = CLKCTRL_PWM_CLK_IJ_CTRL,
- .mask = 0x3,
- .shift = 25,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_j_mux",
- .ops = &clk_regmap_mux_ops,
- .parent_data = s4_pwm_parent_data,
- .num_parents = ARRAY_SIZE(s4_pwm_parent_data),
- .flags = 0,
- },
-};
-
-static struct clk_regmap s4_pwm_j_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = CLKCTRL_PWM_CLK_IJ_CTRL,
- .shift = 16,
- .width = 8,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_j_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_j_mux.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap s4_pwm_j_gate = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = CLKCTRL_PWM_CLK_IJ_CTRL,
- .bit_idx = 24,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_j_gate",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_j_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap s4_saradc_mux = {
+static struct clk_regmap s4_saradc_sel = {
.data = &(struct clk_regmap_mux_data) {
.offset = CLKCTRL_SAR_CLK_CTRL,
.mask = 0x3,
.shift = 9,
},
.hw.init = &(struct clk_init_data){
- .name = "saradc_mux",
+ .name = "saradc_sel",
.ops = &clk_regmap_mux_ops,
.parent_data = (const struct clk_parent_data []) {
{ .fw_name = "xtal", },
@@ -3029,20 +2636,20 @@ static struct clk_regmap s4_saradc_div = {
.name = "saradc_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &s4_saradc_mux.hw
+ &s4_saradc_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap s4_saradc_gate = {
+static struct clk_regmap s4_saradc = {
.data = &(struct clk_regmap_gate_data) {
.offset = CLKCTRL_SAR_CLK_CTRL,
.bit_idx = 8,
},
.hw.init = &(struct clk_init_data){
- .name = "saradc_clk",
+ .name = "saradc",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&s4_saradc_div.hw
@@ -3057,9 +2664,8 @@ static struct clk_regmap s4_saradc_gate = {
* corresponding clock sources are not described in the clock tree and internal clock
* for debug, so they are skipped.
*/
-static u32 s4_gen_clk_mux_table[] = { 0, 4, 5, 7, 19, 21, 22,
- 23, 24, 25, 26, 27, 28 };
-static const struct clk_parent_data s4_gen_clk_parent_data[] = {
+static u32 s4_gen_clk_parents_val_table[] = { 0, 4, 5, 7, 19, 21, 22, 23, 24, 25, 26, 27, 28 };
+static const struct clk_parent_data s4_gen_clk_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &s4_vid_pll.hw },
{ .fw_name = "gp0_pll", },
@@ -3080,13 +2686,13 @@ static struct clk_regmap s4_gen_clk_sel = {
.offset = CLKCTRL_GEN_CLK_CTRL,
.mask = 0x1f,
.shift = 12,
- .table = s4_gen_clk_mux_table,
+ .table = s4_gen_clk_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "gen_clk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_gen_clk_parent_data,
- .num_parents = ARRAY_SIZE(s4_gen_clk_parent_data),
+ .parent_data = s4_gen_clk_parents,
+ .num_parents = ARRAY_SIZE(s4_gen_clk_parents),
/*
* Because the GEN clock can be connected to an external pad
* and may be set up directly from the device tree. Don't
@@ -3129,173 +2735,75 @@ static struct clk_regmap s4_gen_clk = {
},
};
-static const struct clk_parent_data s4_adc_extclk_in_parent_data[] = {
- { .fw_name = "xtal", },
- { .fw_name = "fclk_div4", },
- { .fw_name = "fclk_div3", },
- { .fw_name = "fclk_div5", },
- { .fw_name = "fclk_div7", },
- { .fw_name = "mpll2", },
- { .fw_name = "gp0_pll", },
- { .fw_name = "hifi_pll", },
-};
-
-static struct clk_regmap s4_adc_extclk_in_mux = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = CLKCTRL_DEMOD_CLK_CTRL,
- .mask = 0x7,
- .shift = 25,
- },
- .hw.init = &(struct clk_init_data){
- .name = "adc_extclk_in_mux",
- .ops = &clk_regmap_mux_ops,
- .parent_data = s4_adc_extclk_in_parent_data,
- .num_parents = ARRAY_SIZE(s4_adc_extclk_in_parent_data),
- .flags = 0,
- },
-};
-
-static struct clk_regmap s4_adc_extclk_in_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = CLKCTRL_DEMOD_CLK_CTRL,
- .shift = 16,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data){
- .name = "adc_extclk_in_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_adc_extclk_in_mux.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap s4_adc_extclk_in_gate = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = CLKCTRL_DEMOD_CLK_CTRL,
- .bit_idx = 24,
- },
- .hw.init = &(struct clk_init_data){
- .name = "adc_extclk_in",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_adc_extclk_in_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static const struct clk_parent_data s4_pclk_parents = { .hw = &s4_sys_clk.hw };
-static struct clk_regmap s4_demod_core_clk_mux = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = CLKCTRL_DEMOD_CLK_CTRL,
- .mask = 0x3,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data){
- .name = "demod_core_clk_mux",
- .ops = &clk_regmap_mux_ops,
- .parent_data = (const struct clk_parent_data []) {
- { .fw_name = "xtal", },
- { .fw_name = "fclk_div7", },
- { .fw_name = "fclk_div4", },
- { .hw = &s4_adc_extclk_in_gate.hw }
- },
- .num_parents = 4,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap s4_demod_core_clk_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = CLKCTRL_DEMOD_CLK_CTRL,
- .shift = 0,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data){
- .name = "demod_core_clk_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_demod_core_clk_mux.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+#define S4_PCLK(_name, _reg, _bit, _flags) \
+ MESON_PCLK(_name, _reg, _bit, &s4_pclk_parents, _flags)
-static struct clk_regmap s4_demod_core_clk_gate = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = CLKCTRL_DEMOD_CLK_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data){
- .name = "demod_core_clk",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_demod_core_clk_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-#define MESON_GATE(_name, _reg, _bit) \
- MESON_PCLK(_name, _reg, _bit, &s4_sys_clk.hw)
-
-static MESON_GATE(s4_ddr, CLKCTRL_SYS_CLK_EN0_REG0, 0);
-static MESON_GATE(s4_dos, CLKCTRL_SYS_CLK_EN0_REG0, 1);
-static MESON_GATE(s4_ethphy, CLKCTRL_SYS_CLK_EN0_REG0, 4);
-static MESON_GATE(s4_mali, CLKCTRL_SYS_CLK_EN0_REG0, 6);
-static MESON_GATE(s4_aocpu, CLKCTRL_SYS_CLK_EN0_REG0, 13);
-static MESON_GATE(s4_aucpu, CLKCTRL_SYS_CLK_EN0_REG0, 14);
-static MESON_GATE(s4_cec, CLKCTRL_SYS_CLK_EN0_REG0, 16);
-static MESON_GATE(s4_sdemmca, CLKCTRL_SYS_CLK_EN0_REG0, 24);
-static MESON_GATE(s4_sdemmcb, CLKCTRL_SYS_CLK_EN0_REG0, 25);
-static MESON_GATE(s4_nand, CLKCTRL_SYS_CLK_EN0_REG0, 26);
-static MESON_GATE(s4_smartcard, CLKCTRL_SYS_CLK_EN0_REG0, 27);
-static MESON_GATE(s4_acodec, CLKCTRL_SYS_CLK_EN0_REG0, 28);
-static MESON_GATE(s4_spifc, CLKCTRL_SYS_CLK_EN0_REG0, 29);
-static MESON_GATE(s4_msr_clk, CLKCTRL_SYS_CLK_EN0_REG0, 30);
-static MESON_GATE(s4_ir_ctrl, CLKCTRL_SYS_CLK_EN0_REG0, 31);
-static MESON_GATE(s4_audio, CLKCTRL_SYS_CLK_EN0_REG1, 0);
-static MESON_GATE(s4_eth, CLKCTRL_SYS_CLK_EN0_REG1, 3);
-static MESON_GATE(s4_uart_a, CLKCTRL_SYS_CLK_EN0_REG1, 5);
-static MESON_GATE(s4_uart_b, CLKCTRL_SYS_CLK_EN0_REG1, 6);
-static MESON_GATE(s4_uart_c, CLKCTRL_SYS_CLK_EN0_REG1, 7);
-static MESON_GATE(s4_uart_d, CLKCTRL_SYS_CLK_EN0_REG1, 8);
-static MESON_GATE(s4_uart_e, CLKCTRL_SYS_CLK_EN0_REG1, 9);
-static MESON_GATE(s4_aififo, CLKCTRL_SYS_CLK_EN0_REG1, 11);
-static MESON_GATE(s4_ts_ddr, CLKCTRL_SYS_CLK_EN0_REG1, 15);
-static MESON_GATE(s4_ts_pll, CLKCTRL_SYS_CLK_EN0_REG1, 16);
-static MESON_GATE(s4_g2d, CLKCTRL_SYS_CLK_EN0_REG1, 20);
-static MESON_GATE(s4_spicc0, CLKCTRL_SYS_CLK_EN0_REG1, 21);
-static MESON_GATE(s4_usb, CLKCTRL_SYS_CLK_EN0_REG1, 26);
-static MESON_GATE(s4_i2c_m_a, CLKCTRL_SYS_CLK_EN0_REG1, 30);
-static MESON_GATE(s4_i2c_m_b, CLKCTRL_SYS_CLK_EN0_REG1, 31);
-static MESON_GATE(s4_i2c_m_c, CLKCTRL_SYS_CLK_EN0_REG2, 0);
-static MESON_GATE(s4_i2c_m_d, CLKCTRL_SYS_CLK_EN0_REG2, 1);
-static MESON_GATE(s4_i2c_m_e, CLKCTRL_SYS_CLK_EN0_REG2, 2);
-static MESON_GATE(s4_hdmitx_apb, CLKCTRL_SYS_CLK_EN0_REG2, 4);
-static MESON_GATE(s4_i2c_s_a, CLKCTRL_SYS_CLK_EN0_REG2, 5);
-static MESON_GATE(s4_usb1_to_ddr, CLKCTRL_SYS_CLK_EN0_REG2, 8);
-static MESON_GATE(s4_hdcp22, CLKCTRL_SYS_CLK_EN0_REG2, 10);
-static MESON_GATE(s4_mmc_apb, CLKCTRL_SYS_CLK_EN0_REG2, 11);
-static MESON_GATE(s4_rsa, CLKCTRL_SYS_CLK_EN0_REG2, 18);
-static MESON_GATE(s4_cpu_debug, CLKCTRL_SYS_CLK_EN0_REG2, 19);
-static MESON_GATE(s4_vpu_intr, CLKCTRL_SYS_CLK_EN0_REG2, 25);
-static MESON_GATE(s4_demod, CLKCTRL_SYS_CLK_EN0_REG2, 27);
-static MESON_GATE(s4_sar_adc, CLKCTRL_SYS_CLK_EN0_REG2, 28);
-static MESON_GATE(s4_gic, CLKCTRL_SYS_CLK_EN0_REG2, 30);
-static MESON_GATE(s4_pwm_ab, CLKCTRL_SYS_CLK_EN0_REG3, 7);
-static MESON_GATE(s4_pwm_cd, CLKCTRL_SYS_CLK_EN0_REG3, 8);
-static MESON_GATE(s4_pwm_ef, CLKCTRL_SYS_CLK_EN0_REG3, 9);
-static MESON_GATE(s4_pwm_gh, CLKCTRL_SYS_CLK_EN0_REG3, 10);
-static MESON_GATE(s4_pwm_ij, CLKCTRL_SYS_CLK_EN0_REG3, 11);
+/*
+ * NOTE: The gates below are marked with CLK_IGNORE_UNUSED for historic reasons
+ * Users are encouraged to test without it and submit changes to:
+ * - remove the flag if not necessary
+ * - replace the flag with something more adequate, such as CLK_IS_CRITICAL,
+ * if appropriate.
+ * - add a comment explaining why the use of CLK_IGNORE_UNUSED is desirable
+ * for a particular clock.
+ */
+static S4_PCLK(s4_ddr, CLKCTRL_SYS_CLK_EN0_REG0, 0, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_dos, CLKCTRL_SYS_CLK_EN0_REG0, 1, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_ethphy, CLKCTRL_SYS_CLK_EN0_REG0, 4, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_mali, CLKCTRL_SYS_CLK_EN0_REG0, 6, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_aocpu, CLKCTRL_SYS_CLK_EN0_REG0, 13, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_aucpu, CLKCTRL_SYS_CLK_EN0_REG0, 14, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_cec, CLKCTRL_SYS_CLK_EN0_REG0, 16, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_sdemmca, CLKCTRL_SYS_CLK_EN0_REG0, 24, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_sdemmcb, CLKCTRL_SYS_CLK_EN0_REG0, 25, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_nand, CLKCTRL_SYS_CLK_EN0_REG0, 26, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_smartcard, CLKCTRL_SYS_CLK_EN0_REG0, 27, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_acodec, CLKCTRL_SYS_CLK_EN0_REG0, 28, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_spifc, CLKCTRL_SYS_CLK_EN0_REG0, 29, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_msr_clk, CLKCTRL_SYS_CLK_EN0_REG0, 30, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_ir_ctrl, CLKCTRL_SYS_CLK_EN0_REG0, 31, CLK_IGNORE_UNUSED);
+
+static S4_PCLK(s4_audio, CLKCTRL_SYS_CLK_EN0_REG1, 0, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_eth, CLKCTRL_SYS_CLK_EN0_REG1, 3, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_uart_a, CLKCTRL_SYS_CLK_EN0_REG1, 5, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_uart_b, CLKCTRL_SYS_CLK_EN0_REG1, 6, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_uart_c, CLKCTRL_SYS_CLK_EN0_REG1, 7, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_uart_d, CLKCTRL_SYS_CLK_EN0_REG1, 8, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_uart_e, CLKCTRL_SYS_CLK_EN0_REG1, 9, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_aififo, CLKCTRL_SYS_CLK_EN0_REG1, 11, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_ts_ddr, CLKCTRL_SYS_CLK_EN0_REG1, 15, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_ts_pll, CLKCTRL_SYS_CLK_EN0_REG1, 16, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_g2d, CLKCTRL_SYS_CLK_EN0_REG1, 20, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_spicc0, CLKCTRL_SYS_CLK_EN0_REG1, 21, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_usb, CLKCTRL_SYS_CLK_EN0_REG1, 26, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_i2c_m_a, CLKCTRL_SYS_CLK_EN0_REG1, 30, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_i2c_m_b, CLKCTRL_SYS_CLK_EN0_REG1, 31, CLK_IGNORE_UNUSED);
+
+static S4_PCLK(s4_i2c_m_c, CLKCTRL_SYS_CLK_EN0_REG2, 0, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_i2c_m_d, CLKCTRL_SYS_CLK_EN0_REG2, 1, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_i2c_m_e, CLKCTRL_SYS_CLK_EN0_REG2, 2, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_hdmitx_apb, CLKCTRL_SYS_CLK_EN0_REG2, 4, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_i2c_s_a, CLKCTRL_SYS_CLK_EN0_REG2, 5, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_usb1_to_ddr, CLKCTRL_SYS_CLK_EN0_REG2, 8, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_hdcp22, CLKCTRL_SYS_CLK_EN0_REG2, 10, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_mmc_apb, CLKCTRL_SYS_CLK_EN0_REG2, 11, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_rsa, CLKCTRL_SYS_CLK_EN0_REG2, 18, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_cpu_debug, CLKCTRL_SYS_CLK_EN0_REG2, 19, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_vpu_intr, CLKCTRL_SYS_CLK_EN0_REG2, 25, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_demod, CLKCTRL_SYS_CLK_EN0_REG2, 27, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_sar_adc, CLKCTRL_SYS_CLK_EN0_REG2, 28, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_gic, CLKCTRL_SYS_CLK_EN0_REG2, 30, CLK_IGNORE_UNUSED);
+
+static S4_PCLK(s4_pwm_ab, CLKCTRL_SYS_CLK_EN0_REG3, 7, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_pwm_cd, CLKCTRL_SYS_CLK_EN0_REG3, 8, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_pwm_ef, CLKCTRL_SYS_CLK_EN0_REG3, 9, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_pwm_gh, CLKCTRL_SYS_CLK_EN0_REG3, 10, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_pwm_ij, CLKCTRL_SYS_CLK_EN0_REG3, 11, CLK_IGNORE_UNUSED);
/* Array of all clocks provided by this provider */
-static struct clk_hw *s4_periphs_hw_clks[] = {
+static struct clk_hw *s4_peripherals_hw_clks[] = {
[CLKID_RTC_32K_CLKIN] = &s4_rtc_32k_by_oscin_clkin.hw,
[CLKID_RTC_32K_DIV] = &s4_rtc_32k_by_oscin_div.hw,
[CLKID_RTC_32K_SEL] = &s4_rtc_32k_by_oscin_sel.hw,
@@ -3318,12 +2826,12 @@ static struct clk_hw *s4_periphs_hw_clks[] = {
[CLKID_CECB_32K_SEL_PRE] = &s4_cecb_32k_sel_pre.hw,
[CLKID_CECB_32K_SEL] = &s4_cecb_32k_sel.hw,
[CLKID_CECB_32K_CLKOUT] = &s4_cecb_32k_clkout.hw,
- [CLKID_SC_CLK_SEL] = &s4_sc_clk_mux.hw,
+ [CLKID_SC_CLK_SEL] = &s4_sc_clk_sel.hw,
[CLKID_SC_CLK_DIV] = &s4_sc_clk_div.hw,
- [CLKID_SC] = &s4_sc_clk_gate.hw,
- [CLKID_12_24M] = &s4_12_24M_clk_gate.hw,
- [CLKID_12M_CLK_DIV] = &s4_12M_clk_div.hw,
- [CLKID_12_24M_CLK_SEL] = &s4_12_24M_clk.hw,
+ [CLKID_SC] = &s4_sc_clk.hw,
+ [CLKID_12_24M] = &s4_12_24M.hw,
+ [CLKID_12M_CLK_DIV] = &s4_12M_div.hw,
+ [CLKID_12_24M_CLK_SEL] = &s4_12_24M_sel.hw,
[CLKID_VID_PLL_DIV] = &s4_vid_pll_div.hw,
[CLKID_VID_PLL_SEL] = &s4_vid_pll_sel.hw,
[CLKID_VID_PLL] = &s4_vid_pll.hw,
@@ -3365,28 +2873,28 @@ static struct clk_hw *s4_periphs_hw_clks[] = {
[CLKID_HDMI_DIV] = &s4_hdmi_div.hw,
[CLKID_HDMI] = &s4_hdmi.hw,
[CLKID_TS_CLK_DIV] = &s4_ts_clk_div.hw,
- [CLKID_TS] = &s4_ts_clk_gate.hw,
+ [CLKID_TS] = &s4_ts_clk.hw,
[CLKID_MALI_0_SEL] = &s4_mali_0_sel.hw,
[CLKID_MALI_0_DIV] = &s4_mali_0_div.hw,
[CLKID_MALI_0] = &s4_mali_0.hw,
[CLKID_MALI_1_SEL] = &s4_mali_1_sel.hw,
[CLKID_MALI_1_DIV] = &s4_mali_1_div.hw,
[CLKID_MALI_1] = &s4_mali_1.hw,
- [CLKID_MALI_SEL] = &s4_mali_mux.hw,
- [CLKID_VDEC_P0_SEL] = &s4_vdec_p0_mux.hw,
+ [CLKID_MALI_SEL] = &s4_mali_sel.hw,
+ [CLKID_VDEC_P0_SEL] = &s4_vdec_p0_sel.hw,
[CLKID_VDEC_P0_DIV] = &s4_vdec_p0_div.hw,
[CLKID_VDEC_P0] = &s4_vdec_p0.hw,
- [CLKID_VDEC_P1_SEL] = &s4_vdec_p1_mux.hw,
+ [CLKID_VDEC_P1_SEL] = &s4_vdec_p1_sel.hw,
[CLKID_VDEC_P1_DIV] = &s4_vdec_p1_div.hw,
[CLKID_VDEC_P1] = &s4_vdec_p1.hw,
- [CLKID_VDEC_SEL] = &s4_vdec_mux.hw,
- [CLKID_HEVCF_P0_SEL] = &s4_hevcf_p0_mux.hw,
+ [CLKID_VDEC_SEL] = &s4_vdec_sel.hw,
+ [CLKID_HEVCF_P0_SEL] = &s4_hevcf_p0_sel.hw,
[CLKID_HEVCF_P0_DIV] = &s4_hevcf_p0_div.hw,
[CLKID_HEVCF_P0] = &s4_hevcf_p0.hw,
- [CLKID_HEVCF_P1_SEL] = &s4_hevcf_p1_mux.hw,
+ [CLKID_HEVCF_P1_SEL] = &s4_hevcf_p1_sel.hw,
[CLKID_HEVCF_P1_DIV] = &s4_hevcf_p1_div.hw,
[CLKID_HEVCF_P1] = &s4_hevcf_p1.hw,
- [CLKID_HEVCF_SEL] = &s4_hevcf_mux.hw,
+ [CLKID_HEVCF_SEL] = &s4_hevcf_sel.hw,
[CLKID_VPU_0_SEL] = &s4_vpu_0_sel.hw,
[CLKID_VPU_0_DIV] = &s4_vpu_0_div.hw,
[CLKID_VPU_0] = &s4_vpu_0.hw,
@@ -3394,18 +2902,18 @@ static struct clk_hw *s4_periphs_hw_clks[] = {
[CLKID_VPU_1_DIV] = &s4_vpu_1_div.hw,
[CLKID_VPU_1] = &s4_vpu_1.hw,
[CLKID_VPU] = &s4_vpu.hw,
- [CLKID_VPU_CLKB_TMP_SEL] = &s4_vpu_clkb_tmp_mux.hw,
+ [CLKID_VPU_CLKB_TMP_SEL] = &s4_vpu_clkb_tmp_sel.hw,
[CLKID_VPU_CLKB_TMP_DIV] = &s4_vpu_clkb_tmp_div.hw,
[CLKID_VPU_CLKB_TMP] = &s4_vpu_clkb_tmp.hw,
[CLKID_VPU_CLKB_DIV] = &s4_vpu_clkb_div.hw,
[CLKID_VPU_CLKB] = &s4_vpu_clkb.hw,
- [CLKID_VPU_CLKC_P0_SEL] = &s4_vpu_clkc_p0_mux.hw,
+ [CLKID_VPU_CLKC_P0_SEL] = &s4_vpu_clkc_p0_sel.hw,
[CLKID_VPU_CLKC_P0_DIV] = &s4_vpu_clkc_p0_div.hw,
[CLKID_VPU_CLKC_P0] = &s4_vpu_clkc_p0.hw,
- [CLKID_VPU_CLKC_P1_SEL] = &s4_vpu_clkc_p1_mux.hw,
+ [CLKID_VPU_CLKC_P1_SEL] = &s4_vpu_clkc_p1_sel.hw,
[CLKID_VPU_CLKC_P1_DIV] = &s4_vpu_clkc_p1_div.hw,
[CLKID_VPU_CLKC_P1] = &s4_vpu_clkc_p1.hw,
- [CLKID_VPU_CLKC_SEL] = &s4_vpu_clkc_mux.hw,
+ [CLKID_VPU_CLKC_SEL] = &s4_vpu_clkc_sel.hw,
[CLKID_VAPB_0_SEL] = &s4_vapb_0_sel.hw,
[CLKID_VAPB_0_DIV] = &s4_vapb_0_div.hw,
[CLKID_VAPB_0] = &s4_vapb_0.hw,
@@ -3413,10 +2921,10 @@ static struct clk_hw *s4_periphs_hw_clks[] = {
[CLKID_VAPB_1_DIV] = &s4_vapb_1_div.hw,
[CLKID_VAPB_1] = &s4_vapb_1.hw,
[CLKID_VAPB] = &s4_vapb.hw,
- [CLKID_GE2D] = &s4_ge2d_gate.hw,
- [CLKID_VDIN_MEAS_SEL] = &s4_vdin_meas_mux.hw,
+ [CLKID_GE2D] = &s4_ge2d.hw,
+ [CLKID_VDIN_MEAS_SEL] = &s4_vdin_meas_sel.hw,
[CLKID_VDIN_MEAS_DIV] = &s4_vdin_meas_div.hw,
- [CLKID_VDIN_MEAS] = &s4_vdin_meas_gate.hw,
+ [CLKID_VDIN_MEAS] = &s4_vdin_meas.hw,
[CLKID_SD_EMMC_C_CLK_SEL] = &s4_sd_emmc_c_clk0_sel.hw,
[CLKID_SD_EMMC_C_CLK_DIV] = &s4_sd_emmc_c_clk0_div.hw,
[CLKID_SD_EMMC_C] = &s4_sd_emmc_c_clk0.hw,
@@ -3426,42 +2934,42 @@ static struct clk_hw *s4_periphs_hw_clks[] = {
[CLKID_SD_EMMC_B_CLK_SEL] = &s4_sd_emmc_b_clk0_sel.hw,
[CLKID_SD_EMMC_B_CLK_DIV] = &s4_sd_emmc_b_clk0_div.hw,
[CLKID_SD_EMMC_B] = &s4_sd_emmc_b_clk0.hw,
- [CLKID_SPICC0_SEL] = &s4_spicc0_mux.hw,
+ [CLKID_SPICC0_SEL] = &s4_spicc0_sel.hw,
[CLKID_SPICC0_DIV] = &s4_spicc0_div.hw,
- [CLKID_SPICC0_EN] = &s4_spicc0_gate.hw,
- [CLKID_PWM_A_SEL] = &s4_pwm_a_mux.hw,
+ [CLKID_SPICC0_EN] = &s4_spicc0_en.hw,
+ [CLKID_PWM_A_SEL] = &s4_pwm_a_sel.hw,
[CLKID_PWM_A_DIV] = &s4_pwm_a_div.hw,
- [CLKID_PWM_A] = &s4_pwm_a_gate.hw,
- [CLKID_PWM_B_SEL] = &s4_pwm_b_mux.hw,
+ [CLKID_PWM_A] = &s4_pwm_a.hw,
+ [CLKID_PWM_B_SEL] = &s4_pwm_b_sel.hw,
[CLKID_PWM_B_DIV] = &s4_pwm_b_div.hw,
- [CLKID_PWM_B] = &s4_pwm_b_gate.hw,
- [CLKID_PWM_C_SEL] = &s4_pwm_c_mux.hw,
+ [CLKID_PWM_B] = &s4_pwm_b.hw,
+ [CLKID_PWM_C_SEL] = &s4_pwm_c_sel.hw,
[CLKID_PWM_C_DIV] = &s4_pwm_c_div.hw,
- [CLKID_PWM_C] = &s4_pwm_c_gate.hw,
- [CLKID_PWM_D_SEL] = &s4_pwm_d_mux.hw,
+ [CLKID_PWM_C] = &s4_pwm_c.hw,
+ [CLKID_PWM_D_SEL] = &s4_pwm_d_sel.hw,
[CLKID_PWM_D_DIV] = &s4_pwm_d_div.hw,
- [CLKID_PWM_D] = &s4_pwm_d_gate.hw,
- [CLKID_PWM_E_SEL] = &s4_pwm_e_mux.hw,
+ [CLKID_PWM_D] = &s4_pwm_d.hw,
+ [CLKID_PWM_E_SEL] = &s4_pwm_e_sel.hw,
[CLKID_PWM_E_DIV] = &s4_pwm_e_div.hw,
- [CLKID_PWM_E] = &s4_pwm_e_gate.hw,
- [CLKID_PWM_F_SEL] = &s4_pwm_f_mux.hw,
+ [CLKID_PWM_E] = &s4_pwm_e.hw,
+ [CLKID_PWM_F_SEL] = &s4_pwm_f_sel.hw,
[CLKID_PWM_F_DIV] = &s4_pwm_f_div.hw,
- [CLKID_PWM_F] = &s4_pwm_f_gate.hw,
- [CLKID_PWM_G_SEL] = &s4_pwm_g_mux.hw,
+ [CLKID_PWM_F] = &s4_pwm_f.hw,
+ [CLKID_PWM_G_SEL] = &s4_pwm_g_sel.hw,
[CLKID_PWM_G_DIV] = &s4_pwm_g_div.hw,
- [CLKID_PWM_G] = &s4_pwm_g_gate.hw,
- [CLKID_PWM_H_SEL] = &s4_pwm_h_mux.hw,
+ [CLKID_PWM_G] = &s4_pwm_g.hw,
+ [CLKID_PWM_H_SEL] = &s4_pwm_h_sel.hw,
[CLKID_PWM_H_DIV] = &s4_pwm_h_div.hw,
- [CLKID_PWM_H] = &s4_pwm_h_gate.hw,
- [CLKID_PWM_I_SEL] = &s4_pwm_i_mux.hw,
+ [CLKID_PWM_H] = &s4_pwm_h.hw,
+ [CLKID_PWM_I_SEL] = &s4_pwm_i_sel.hw,
[CLKID_PWM_I_DIV] = &s4_pwm_i_div.hw,
- [CLKID_PWM_I] = &s4_pwm_i_gate.hw,
- [CLKID_PWM_J_SEL] = &s4_pwm_j_mux.hw,
+ [CLKID_PWM_I] = &s4_pwm_i.hw,
+ [CLKID_PWM_J_SEL] = &s4_pwm_j_sel.hw,
[CLKID_PWM_J_DIV] = &s4_pwm_j_div.hw,
- [CLKID_PWM_J] = &s4_pwm_j_gate.hw,
- [CLKID_SARADC_SEL] = &s4_saradc_mux.hw,
+ [CLKID_PWM_J] = &s4_pwm_j.hw,
+ [CLKID_SARADC_SEL] = &s4_saradc_sel.hw,
[CLKID_SARADC_DIV] = &s4_saradc_div.hw,
- [CLKID_SARADC] = &s4_saradc_gate.hw,
+ [CLKID_SARADC] = &s4_saradc.hw,
[CLKID_GEN_SEL] = &s4_gen_clk_sel.hw,
[CLKID_GEN_DIV] = &s4_gen_clk_div.hw,
[CLKID_GEN] = &s4_gen_clk.hw,
@@ -3514,304 +3022,40 @@ static struct clk_hw *s4_periphs_hw_clks[] = {
[CLKID_PWM_EF] = &s4_pwm_ef.hw,
[CLKID_PWM_GH] = &s4_pwm_gh.hw,
[CLKID_PWM_IJ] = &s4_pwm_ij.hw,
- [CLKID_HDCP22_ESMCLK_SEL] = &s4_hdcp22_esmclk_mux.hw,
+ [CLKID_HDCP22_ESMCLK_SEL] = &s4_hdcp22_esmclk_sel.hw,
[CLKID_HDCP22_ESMCLK_DIV] = &s4_hdcp22_esmclk_div.hw,
- [CLKID_HDCP22_ESMCLK] = &s4_hdcp22_esmclk_gate.hw,
- [CLKID_HDCP22_SKPCLK_SEL] = &s4_hdcp22_skpclk_mux.hw,
+ [CLKID_HDCP22_ESMCLK] = &s4_hdcp22_esmclk.hw,
+ [CLKID_HDCP22_SKPCLK_SEL] = &s4_hdcp22_skpclk_sel.hw,
[CLKID_HDCP22_SKPCLK_DIV] = &s4_hdcp22_skpclk_div.hw,
- [CLKID_HDCP22_SKPCLK] = &s4_hdcp22_skpclk_gate.hw,
-};
-
-/* Convenience table to populate regmap in .probe */
-static struct clk_regmap *const s4_periphs_clk_regmaps[] = {
- &s4_rtc_32k_by_oscin_clkin,
- &s4_rtc_32k_by_oscin_div,
- &s4_rtc_32k_by_oscin_sel,
- &s4_rtc_32k_by_oscin,
- &s4_rtc_clk,
- &s4_sysclk_b_sel,
- &s4_sysclk_b_div,
- &s4_sysclk_b,
- &s4_sysclk_a_sel,
- &s4_sysclk_a_div,
- &s4_sysclk_a,
- &s4_sys_clk,
- &s4_ceca_32k_clkin,
- &s4_ceca_32k_div,
- &s4_ceca_32k_sel_pre,
- &s4_ceca_32k_sel,
- &s4_ceca_32k_clkout,
- &s4_cecb_32k_clkin,
- &s4_cecb_32k_div,
- &s4_cecb_32k_sel_pre,
- &s4_cecb_32k_sel,
- &s4_cecb_32k_clkout,
- &s4_sc_clk_mux,
- &s4_sc_clk_div,
- &s4_sc_clk_gate,
- &s4_12_24M_clk_gate,
- &s4_12_24M_clk,
- &s4_vid_pll_div,
- &s4_vid_pll_sel,
- &s4_vid_pll,
- &s4_vclk_sel,
- &s4_vclk2_sel,
- &s4_vclk_input,
- &s4_vclk2_input,
- &s4_vclk_div,
- &s4_vclk2_div,
- &s4_vclk,
- &s4_vclk2,
- &s4_vclk_div1,
- &s4_vclk_div2_en,
- &s4_vclk_div4_en,
- &s4_vclk_div6_en,
- &s4_vclk_div12_en,
- &s4_vclk2_div1,
- &s4_vclk2_div2_en,
- &s4_vclk2_div4_en,
- &s4_vclk2_div6_en,
- &s4_vclk2_div12_en,
- &s4_cts_enci_sel,
- &s4_cts_encp_sel,
- &s4_cts_vdac_sel,
- &s4_hdmi_tx_sel,
- &s4_cts_enci,
- &s4_cts_encp,
- &s4_cts_vdac,
- &s4_hdmi_tx,
- &s4_hdmi_sel,
- &s4_hdmi_div,
- &s4_hdmi,
- &s4_ts_clk_div,
- &s4_ts_clk_gate,
- &s4_mali_0_sel,
- &s4_mali_0_div,
- &s4_mali_0,
- &s4_mali_1_sel,
- &s4_mali_1_div,
- &s4_mali_1,
- &s4_mali_mux,
- &s4_vdec_p0_mux,
- &s4_vdec_p0_div,
- &s4_vdec_p0,
- &s4_vdec_p1_mux,
- &s4_vdec_p1_div,
- &s4_vdec_p1,
- &s4_vdec_mux,
- &s4_hevcf_p0_mux,
- &s4_hevcf_p0_div,
- &s4_hevcf_p0,
- &s4_hevcf_p1_mux,
- &s4_hevcf_p1_div,
- &s4_hevcf_p1,
- &s4_hevcf_mux,
- &s4_vpu_0_sel,
- &s4_vpu_0_div,
- &s4_vpu_0,
- &s4_vpu_1_sel,
- &s4_vpu_1_div,
- &s4_vpu_1,
- &s4_vpu,
- &s4_vpu_clkb_tmp_mux,
- &s4_vpu_clkb_tmp_div,
- &s4_vpu_clkb_tmp,
- &s4_vpu_clkb_div,
- &s4_vpu_clkb,
- &s4_vpu_clkc_p0_mux,
- &s4_vpu_clkc_p0_div,
- &s4_vpu_clkc_p0,
- &s4_vpu_clkc_p1_mux,
- &s4_vpu_clkc_p1_div,
- &s4_vpu_clkc_p1,
- &s4_vpu_clkc_mux,
- &s4_vapb_0_sel,
- &s4_vapb_0_div,
- &s4_vapb_0,
- &s4_vapb_1_sel,
- &s4_vapb_1_div,
- &s4_vapb_1,
- &s4_vapb,
- &s4_ge2d_gate,
- &s4_hdcp22_esmclk_mux,
- &s4_hdcp22_esmclk_div,
- &s4_hdcp22_esmclk_gate,
- &s4_hdcp22_skpclk_mux,
- &s4_hdcp22_skpclk_div,
- &s4_hdcp22_skpclk_gate,
- &s4_vdin_meas_mux,
- &s4_vdin_meas_div,
- &s4_vdin_meas_gate,
- &s4_sd_emmc_c_clk0_sel,
- &s4_sd_emmc_c_clk0_div,
- &s4_sd_emmc_c_clk0,
- &s4_sd_emmc_a_clk0_sel,
- &s4_sd_emmc_a_clk0_div,
- &s4_sd_emmc_a_clk0,
- &s4_sd_emmc_b_clk0_sel,
- &s4_sd_emmc_b_clk0_div,
- &s4_sd_emmc_b_clk0,
- &s4_spicc0_mux,
- &s4_spicc0_div,
- &s4_spicc0_gate,
- &s4_pwm_a_mux,
- &s4_pwm_a_div,
- &s4_pwm_a_gate,
- &s4_pwm_b_mux,
- &s4_pwm_b_div,
- &s4_pwm_b_gate,
- &s4_pwm_c_mux,
- &s4_pwm_c_div,
- &s4_pwm_c_gate,
- &s4_pwm_d_mux,
- &s4_pwm_d_div,
- &s4_pwm_d_gate,
- &s4_pwm_e_mux,
- &s4_pwm_e_div,
- &s4_pwm_e_gate,
- &s4_pwm_f_mux,
- &s4_pwm_f_div,
- &s4_pwm_f_gate,
- &s4_pwm_g_mux,
- &s4_pwm_g_div,
- &s4_pwm_g_gate,
- &s4_pwm_h_mux,
- &s4_pwm_h_div,
- &s4_pwm_h_gate,
- &s4_pwm_i_mux,
- &s4_pwm_i_div,
- &s4_pwm_i_gate,
- &s4_pwm_j_mux,
- &s4_pwm_j_div,
- &s4_pwm_j_gate,
- &s4_saradc_mux,
- &s4_saradc_div,
- &s4_saradc_gate,
- &s4_gen_clk_sel,
- &s4_gen_clk_div,
- &s4_gen_clk,
- &s4_ddr,
- &s4_dos,
- &s4_ethphy,
- &s4_mali,
- &s4_aocpu,
- &s4_aucpu,
- &s4_cec,
- &s4_sdemmca,
- &s4_sdemmcb,
- &s4_nand,
- &s4_smartcard,
- &s4_acodec,
- &s4_spifc,
- &s4_msr_clk,
- &s4_ir_ctrl,
- &s4_audio,
- &s4_eth,
- &s4_uart_a,
- &s4_uart_b,
- &s4_uart_c,
- &s4_uart_d,
- &s4_uart_e,
- &s4_aififo,
- &s4_ts_ddr,
- &s4_ts_pll,
- &s4_g2d,
- &s4_spicc0,
- &s4_usb,
- &s4_i2c_m_a,
- &s4_i2c_m_b,
- &s4_i2c_m_c,
- &s4_i2c_m_d,
- &s4_i2c_m_e,
- &s4_hdmitx_apb,
- &s4_i2c_s_a,
- &s4_usb1_to_ddr,
- &s4_hdcp22,
- &s4_mmc_apb,
- &s4_rsa,
- &s4_cpu_debug,
- &s4_vpu_intr,
- &s4_demod,
- &s4_sar_adc,
- &s4_gic,
- &s4_pwm_ab,
- &s4_pwm_cd,
- &s4_pwm_ef,
- &s4_pwm_gh,
- &s4_pwm_ij,
- &s4_demod_core_clk_mux,
- &s4_demod_core_clk_div,
- &s4_demod_core_clk_gate,
- &s4_adc_extclk_in_mux,
- &s4_adc_extclk_in_div,
- &s4_adc_extclk_in_gate,
-};
-
-static const struct regmap_config clkc_regmap_config = {
- .reg_bits = 32,
- .val_bits = 32,
- .reg_stride = 4,
- .max_register = CLKCTRL_DEMOD_CLK_CTRL,
-};
-
-static struct meson_clk_hw_data s4_periphs_clks = {
- .hws = s4_periphs_hw_clks,
- .num = ARRAY_SIZE(s4_periphs_hw_clks),
-};
-
-static int meson_s4_periphs_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct regmap *regmap;
- void __iomem *base;
- int ret, i;
-
- base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(base))
- return dev_err_probe(dev, PTR_ERR(base),
- "can't ioremap resource\n");
-
- regmap = devm_regmap_init_mmio(dev, base, &clkc_regmap_config);
- if (IS_ERR(regmap))
- return dev_err_probe(dev, PTR_ERR(regmap),
- "can't init regmap mmio region\n");
-
- /* Populate regmap for the regmap backed clocks */
- for (i = 0; i < ARRAY_SIZE(s4_periphs_clk_regmaps); i++)
- s4_periphs_clk_regmaps[i]->map = regmap;
-
- for (i = 0; i < s4_periphs_clks.num; i++) {
- /* array might be sparse */
- if (!s4_periphs_clks.hws[i])
- continue;
-
- ret = devm_clk_hw_register(dev, s4_periphs_clks.hws[i]);
- if (ret)
- return dev_err_probe(dev, ret,
- "clock[%d] registration failed\n", i);
- }
-
- return devm_of_clk_add_hw_provider(dev, meson_clk_hw_get, &s4_periphs_clks);
-}
-
-static const struct of_device_id clkc_match_table[] = {
+ [CLKID_HDCP22_SKPCLK] = &s4_hdcp22_skpclk.hw,
+};
+
+static const struct meson_clkc_data s4_peripherals_clkc_data = {
+ .hw_clks = {
+ .hws = s4_peripherals_hw_clks,
+ .num = ARRAY_SIZE(s4_peripherals_hw_clks),
+ },
+};
+
+static const struct of_device_id s4_peripherals_clkc_match_table[] = {
{
.compatible = "amlogic,s4-peripherals-clkc",
+ .data = &s4_peripherals_clkc_data,
},
{}
};
-MODULE_DEVICE_TABLE(of, clkc_match_table);
+MODULE_DEVICE_TABLE(of, s4_peripherals_clkc_match_table);
-static struct platform_driver s4_driver = {
- .probe = meson_s4_periphs_probe,
+static struct platform_driver s4_peripherals_clkc_driver = {
+ .probe = meson_clkc_mmio_probe,
.driver = {
- .name = "s4-periphs-clkc",
- .of_match_table = clkc_match_table,
+ .name = "s4-peripherals-clkc",
+ .of_match_table = s4_peripherals_clkc_match_table,
},
};
-module_platform_driver(s4_driver);
+module_platform_driver(s4_peripherals_clkc_driver);
MODULE_DESCRIPTION("Amlogic S4 Peripherals Clock Controller driver");
MODULE_AUTHOR("Yu Tu <yu.tu@amlogic.com>");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(CLK_MESON);
+MODULE_IMPORT_NS("CLK_MESON");
diff --git a/drivers/clk/meson/s4-peripherals.h b/drivers/clk/meson/s4-peripherals.h
deleted file mode 100644
index 1e298713c2b2..000000000000
--- a/drivers/clk/meson/s4-peripherals.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
-/*
- * Copyright (c) 2022-2023 Amlogic, inc. All rights reserved
- * Author: Yu Tu <yu.tu@amlogic.com>
- */
-
-#ifndef __MESON_S4_PERIPHERALS_H__
-#define __MESON_S4_PERIPHERALS_H__
-
-#define CLKCTRL_RTC_BY_OSCIN_CTRL0 0x008
-#define CLKCTRL_RTC_BY_OSCIN_CTRL1 0x00c
-#define CLKCTRL_RTC_CTRL 0x010
-#define CLKCTRL_SYS_CLK_CTRL0 0x040
-#define CLKCTRL_SYS_CLK_EN0_REG0 0x044
-#define CLKCTRL_SYS_CLK_EN0_REG1 0x048
-#define CLKCTRL_SYS_CLK_EN0_REG2 0x04c
-#define CLKCTRL_SYS_CLK_EN0_REG3 0x050
-#define CLKCTRL_CECA_CTRL0 0x088
-#define CLKCTRL_CECA_CTRL1 0x08c
-#define CLKCTRL_CECB_CTRL0 0x090
-#define CLKCTRL_CECB_CTRL1 0x094
-#define CLKCTRL_SC_CLK_CTRL 0x098
-#define CLKCTRL_CLK12_24_CTRL 0x0a8
-#define CLKCTRL_VID_CLK_CTRL 0x0c0
-#define CLKCTRL_VID_CLK_CTRL2 0x0c4
-#define CLKCTRL_VID_CLK_DIV 0x0c8
-#define CLKCTRL_VIID_CLK_DIV 0x0cc
-#define CLKCTRL_VIID_CLK_CTRL 0x0d0
-#define CLKCTRL_HDMI_CLK_CTRL 0x0e0
-#define CLKCTRL_VID_PLL_CLK_DIV 0x0e4
-#define CLKCTRL_VPU_CLK_CTRL 0x0e8
-#define CLKCTRL_VPU_CLKB_CTRL 0x0ec
-#define CLKCTRL_VPU_CLKC_CTRL 0x0f0
-#define CLKCTRL_VID_LOCK_CLK_CTRL 0x0f4
-#define CLKCTRL_VDIN_MEAS_CLK_CTRL 0x0f8
-#define CLKCTRL_VAPBCLK_CTRL 0x0fc
-#define CLKCTRL_HDCP22_CTRL 0x100
-#define CLKCTRL_VDEC_CLK_CTRL 0x140
-#define CLKCTRL_VDEC2_CLK_CTRL 0x144
-#define CLKCTRL_VDEC3_CLK_CTRL 0x148
-#define CLKCTRL_VDEC4_CLK_CTRL 0x14c
-#define CLKCTRL_TS_CLK_CTRL 0x158
-#define CLKCTRL_MALI_CLK_CTRL 0x15c
-#define CLKCTRL_NAND_CLK_CTRL 0x168
-#define CLKCTRL_SD_EMMC_CLK_CTRL 0x16c
-#define CLKCTRL_SPICC_CLK_CTRL 0x174
-#define CLKCTRL_GEN_CLK_CTRL 0x178
-#define CLKCTRL_SAR_CLK_CTRL 0x17c
-#define CLKCTRL_PWM_CLK_AB_CTRL 0x180
-#define CLKCTRL_PWM_CLK_CD_CTRL 0x184
-#define CLKCTRL_PWM_CLK_EF_CTRL 0x188
-#define CLKCTRL_PWM_CLK_GH_CTRL 0x18c
-#define CLKCTRL_PWM_CLK_IJ_CTRL 0x190
-#define CLKCTRL_DEMOD_CLK_CTRL 0x200
-
-#endif /* __MESON_S4_PERIPHERALS_H__ */
diff --git a/drivers/clk/meson/s4-pll.c b/drivers/clk/meson/s4-pll.c
index b0258933fb9d..56ce6f566e53 100644
--- a/drivers/clk/meson/s4-pll.c
+++ b/drivers/clk/meson/s4-pll.c
@@ -13,11 +13,36 @@
#include "clk-mpll.h"
#include "clk-pll.h"
#include "clk-regmap.h"
-#include "s4-pll.h"
#include "meson-clkc-utils.h"
#include <dt-bindings/clock/amlogic,s4-pll-clkc.h>
-static DEFINE_SPINLOCK(meson_clk_lock);
+#define ANACTRL_FIXPLL_CTRL0 0x040
+#define ANACTRL_FIXPLL_CTRL1 0x044
+#define ANACTRL_FIXPLL_CTRL3 0x04c
+#define ANACTRL_GP0PLL_CTRL0 0x080
+#define ANACTRL_GP0PLL_CTRL1 0x084
+#define ANACTRL_GP0PLL_CTRL2 0x088
+#define ANACTRL_GP0PLL_CTRL3 0x08c
+#define ANACTRL_GP0PLL_CTRL4 0x090
+#define ANACTRL_GP0PLL_CTRL5 0x094
+#define ANACTRL_GP0PLL_CTRL6 0x098
+#define ANACTRL_HIFIPLL_CTRL0 0x100
+#define ANACTRL_HIFIPLL_CTRL1 0x104
+#define ANACTRL_HIFIPLL_CTRL2 0x108
+#define ANACTRL_HIFIPLL_CTRL3 0x10c
+#define ANACTRL_HIFIPLL_CTRL4 0x110
+#define ANACTRL_HIFIPLL_CTRL5 0x114
+#define ANACTRL_HIFIPLL_CTRL6 0x118
+#define ANACTRL_MPLL_CTRL0 0x180
+#define ANACTRL_MPLL_CTRL1 0x184
+#define ANACTRL_MPLL_CTRL2 0x188
+#define ANACTRL_MPLL_CTRL3 0x18c
+#define ANACTRL_MPLL_CTRL4 0x190
+#define ANACTRL_MPLL_CTRL5 0x194
+#define ANACTRL_MPLL_CTRL6 0x198
+#define ANACTRL_MPLL_CTRL7 0x19c
+#define ANACTRL_MPLL_CTRL8 0x1a0
+#define ANACTRL_HDMIPLL_CTRL0 0x1c0
/*
* These clock are a fixed value (fixed_pll is 2GHz) that is initialized by ROMcode.
@@ -256,7 +281,7 @@ static const struct pll_mult_range s4_gp0_pll_mult_range = {
/*
* Internal gp0 pll emulation configuration parameters
*/
-static const struct reg_sequence s4_gp0_init_regs[] = {
+static const struct reg_sequence s4_gp0_pll_init_regs[] = {
{ .reg = ANACTRL_GP0PLL_CTRL1, .def = 0x00000000 },
{ .reg = ANACTRL_GP0PLL_CTRL2, .def = 0x00000000 },
{ .reg = ANACTRL_GP0PLL_CTRL3, .def = 0x48681c00 },
@@ -293,8 +318,8 @@ static struct clk_regmap s4_gp0_pll_dco = {
.width = 1,
},
.range = &s4_gp0_pll_mult_range,
- .init_regs = s4_gp0_init_regs,
- .init_count = ARRAY_SIZE(s4_gp0_init_regs),
+ .init_regs = s4_gp0_pll_init_regs,
+ .init_count = ARRAY_SIZE(s4_gp0_pll_init_regs),
},
.hw.init = &(struct clk_init_data){
.name = "gp0_pll_dco",
@@ -328,8 +353,7 @@ static struct clk_regmap s4_gp0_pll = {
/*
* Internal hifi pll emulation configuration parameters
*/
-static const struct reg_sequence s4_hifi_init_regs[] = {
- { .reg = ANACTRL_HIFIPLL_CTRL1, .def = 0x00010e56 },
+static const struct reg_sequence s4_hifi_pll_init_regs[] = {
{ .reg = ANACTRL_HIFIPLL_CTRL2, .def = 0x00000000 },
{ .reg = ANACTRL_HIFIPLL_CTRL3, .def = 0x6a285c00 },
{ .reg = ANACTRL_HIFIPLL_CTRL4, .def = 0x65771290 },
@@ -354,6 +378,11 @@ static struct clk_regmap s4_hifi_pll_dco = {
.shift = 10,
.width = 5,
},
+ .frac = {
+ .reg_off = ANACTRL_HIFIPLL_CTRL1,
+ .shift = 0,
+ .width = 17,
+ },
.l = {
.reg_off = ANACTRL_HIFIPLL_CTRL0,
.shift = 31,
@@ -365,8 +394,9 @@ static struct clk_regmap s4_hifi_pll_dco = {
.width = 1,
},
.range = &s4_gp0_pll_mult_range,
- .init_regs = s4_hifi_init_regs,
- .init_count = ARRAY_SIZE(s4_hifi_init_regs),
+ .init_regs = s4_hifi_pll_init_regs,
+ .init_count = ARRAY_SIZE(s4_hifi_pll_init_regs),
+ .frac_max = 100000,
.flags = CLK_MESON_PLL_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
@@ -542,7 +572,6 @@ static struct clk_regmap s4_mpll0_div = {
.shift = 29,
.width = 1,
},
- .lock = &meson_clk_lock,
.init_regs = s4_mpll0_init_regs,
.init_count = ARRAY_SIZE(s4_mpll0_init_regs),
},
@@ -596,7 +625,6 @@ static struct clk_regmap s4_mpll1_div = {
.shift = 29,
.width = 1,
},
- .lock = &meson_clk_lock,
.init_regs = s4_mpll1_init_regs,
.init_count = ARRAY_SIZE(s4_mpll1_init_regs),
},
@@ -650,7 +678,6 @@ static struct clk_regmap s4_mpll2_div = {
.shift = 29,
.width = 1,
},
- .lock = &meson_clk_lock,
.init_regs = s4_mpll2_init_regs,
.init_count = ARRAY_SIZE(s4_mpll2_init_regs),
},
@@ -704,7 +731,6 @@ static struct clk_regmap s4_mpll3_div = {
.shift = 29,
.width = 1,
},
- .lock = &meson_clk_lock,
.init_regs = s4_mpll3_init_regs,
.init_count = ARRAY_SIZE(s4_mpll3_init_regs),
},
@@ -768,109 +794,38 @@ static struct clk_hw *s4_pll_hw_clks[] = {
[CLKID_MPLL3] = &s4_mpll3.hw,
};
-static struct clk_regmap *const s4_pll_clk_regmaps[] = {
- &s4_fixed_pll_dco,
- &s4_fixed_pll,
- &s4_fclk_div2,
- &s4_fclk_div3,
- &s4_fclk_div4,
- &s4_fclk_div5,
- &s4_fclk_div7,
- &s4_fclk_div2p5,
- &s4_gp0_pll_dco,
- &s4_gp0_pll,
- &s4_hifi_pll_dco,
- &s4_hifi_pll,
- &s4_hdmi_pll_dco,
- &s4_hdmi_pll_od,
- &s4_hdmi_pll,
- &s4_mpll_50m,
- &s4_mpll0_div,
- &s4_mpll0,
- &s4_mpll1_div,
- &s4_mpll1,
- &s4_mpll2_div,
- &s4_mpll2,
- &s4_mpll3_div,
- &s4_mpll3,
-};
-
-static const struct reg_sequence s4_init_regs[] = {
+static const struct reg_sequence s4_pll_init_regs[] = {
{ .reg = ANACTRL_MPLL_CTRL0, .def = 0x00000543 },
};
-static const struct regmap_config clkc_regmap_config = {
- .reg_bits = 32,
- .val_bits = 32,
- .reg_stride = 4,
- .max_register = ANACTRL_HDMIPLL_CTRL0,
-};
-
-static struct meson_clk_hw_data s4_pll_clks = {
- .hws = s4_pll_hw_clks,
- .num = ARRAY_SIZE(s4_pll_hw_clks),
+static const struct meson_clkc_data s4_pll_clkc_data = {
+ .hw_clks = {
+ .hws = s4_pll_hw_clks,
+ .num = ARRAY_SIZE(s4_pll_hw_clks),
+ },
+ .init_regs = s4_pll_init_regs,
+ .init_count = ARRAY_SIZE(s4_pll_init_regs),
};
-static int meson_s4_pll_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct regmap *regmap;
- void __iomem *base;
- int ret, i;
-
- base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(base))
- return dev_err_probe(dev, PTR_ERR(base),
- "can't ioremap resource\n");
-
- regmap = devm_regmap_init_mmio(dev, base, &clkc_regmap_config);
- if (IS_ERR(regmap))
- return dev_err_probe(dev, PTR_ERR(regmap),
- "can't init regmap mmio region\n");
-
- ret = regmap_multi_reg_write(regmap, s4_init_regs, ARRAY_SIZE(s4_init_regs));
- if (ret)
- return dev_err_probe(dev, ret,
- "Failed to init registers\n");
-
- /* Populate regmap for the regmap backed clocks */
- for (i = 0; i < ARRAY_SIZE(s4_pll_clk_regmaps); i++)
- s4_pll_clk_regmaps[i]->map = regmap;
-
- /* Register clocks */
- for (i = 0; i < s4_pll_clks.num; i++) {
- /* array might be sparse */
- if (!s4_pll_clks.hws[i])
- continue;
-
- ret = devm_clk_hw_register(dev, s4_pll_clks.hws[i]);
- if (ret)
- return dev_err_probe(dev, ret,
- "clock[%d] registration failed\n", i);
- }
-
- return devm_of_clk_add_hw_provider(dev, meson_clk_hw_get,
- &s4_pll_clks);
-}
-
-static const struct of_device_id clkc_match_table[] = {
+static const struct of_device_id s4_pll_clkc_match_table[] = {
{
.compatible = "amlogic,s4-pll-clkc",
+ .data = &s4_pll_clkc_data,
},
{}
};
-MODULE_DEVICE_TABLE(of, clkc_match_table);
+MODULE_DEVICE_TABLE(of, s4_pll_clkc_match_table);
-static struct platform_driver s4_driver = {
- .probe = meson_s4_pll_probe,
+static struct platform_driver s4_pll_clkc_driver = {
+ .probe = meson_clkc_mmio_probe,
.driver = {
.name = "s4-pll-clkc",
- .of_match_table = clkc_match_table,
+ .of_match_table = s4_pll_clkc_match_table,
},
};
-module_platform_driver(s4_driver);
+module_platform_driver(s4_pll_clkc_driver);
MODULE_DESCRIPTION("Amlogic S4 PLL Clock Controller driver");
MODULE_AUTHOR("Yu Tu <yu.tu@amlogic.com>");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(CLK_MESON);
+MODULE_IMPORT_NS("CLK_MESON");
diff --git a/drivers/clk/meson/s4-pll.h b/drivers/clk/meson/s4-pll.h
deleted file mode 100644
index ff7d58302f2a..000000000000
--- a/drivers/clk/meson/s4-pll.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
-/*
- * Copyright (c) 2022-2023 Amlogic, inc. All rights reserved
- * Author: Yu Tu <yu.tu@amlogic.com>
- */
-
-#ifndef __MESON_S4_PLL_H__
-#define __MESON_S4_PLL_H__
-
-#define ANACTRL_FIXPLL_CTRL0 0x040
-#define ANACTRL_FIXPLL_CTRL1 0x044
-#define ANACTRL_FIXPLL_CTRL3 0x04c
-#define ANACTRL_GP0PLL_CTRL0 0x080
-#define ANACTRL_GP0PLL_CTRL1 0x084
-#define ANACTRL_GP0PLL_CTRL2 0x088
-#define ANACTRL_GP0PLL_CTRL3 0x08c
-#define ANACTRL_GP0PLL_CTRL4 0x090
-#define ANACTRL_GP0PLL_CTRL5 0x094
-#define ANACTRL_GP0PLL_CTRL6 0x098
-#define ANACTRL_HIFIPLL_CTRL0 0x100
-#define ANACTRL_HIFIPLL_CTRL1 0x104
-#define ANACTRL_HIFIPLL_CTRL2 0x108
-#define ANACTRL_HIFIPLL_CTRL3 0x10c
-#define ANACTRL_HIFIPLL_CTRL4 0x110
-#define ANACTRL_HIFIPLL_CTRL5 0x114
-#define ANACTRL_HIFIPLL_CTRL6 0x118
-#define ANACTRL_MPLL_CTRL0 0x180
-#define ANACTRL_MPLL_CTRL1 0x184
-#define ANACTRL_MPLL_CTRL2 0x188
-#define ANACTRL_MPLL_CTRL3 0x18c
-#define ANACTRL_MPLL_CTRL4 0x190
-#define ANACTRL_MPLL_CTRL5 0x194
-#define ANACTRL_MPLL_CTRL6 0x198
-#define ANACTRL_MPLL_CTRL7 0x19c
-#define ANACTRL_MPLL_CTRL8 0x1a0
-#define ANACTRL_HDMIPLL_CTRL0 0x1c0
-
-#endif /* __MESON_S4_PLL_H__ */
diff --git a/drivers/clk/meson/sclk-div.c b/drivers/clk/meson/sclk-div.c
index ae03b048182f..4ba3d82810e8 100644
--- a/drivers/clk/meson/sclk-div.c
+++ b/drivers/clk/meson/sclk-div.c
@@ -222,6 +222,11 @@ static int sclk_div_init(struct clk_hw *hw)
struct clk_regmap *clk = to_clk_regmap(hw);
struct meson_sclk_div_data *sclk = meson_sclk_div_data(clk);
unsigned int val;
+ int ret;
+
+ ret = clk_regmap_init(hw);
+ if (ret)
+ return ret;
val = meson_parm_read(clk->map, &sclk->div);
@@ -247,9 +252,9 @@ const struct clk_ops meson_sclk_div_ops = {
.set_duty_cycle = sclk_div_set_duty_cycle,
.init = sclk_div_init,
};
-EXPORT_SYMBOL_NS_GPL(meson_sclk_div_ops, CLK_MESON);
+EXPORT_SYMBOL_NS_GPL(meson_sclk_div_ops, "CLK_MESON");
MODULE_DESCRIPTION("Amlogic Sample divider driver");
MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(CLK_MESON);
+MODULE_IMPORT_NS("CLK_MESON");
diff --git a/drivers/clk/meson/vclk.c b/drivers/clk/meson/vclk.c
index 36f637d2d01b..009bd1193042 100644
--- a/drivers/clk/meson/vclk.c
+++ b/drivers/clk/meson/vclk.c
@@ -45,11 +45,12 @@ static int meson_vclk_gate_is_enabled(struct clk_hw *hw)
}
const struct clk_ops meson_vclk_gate_ops = {
+ .init = clk_regmap_init,
.enable = meson_vclk_gate_enable,
.disable = meson_vclk_gate_disable,
.is_enabled = meson_vclk_gate_is_enabled,
};
-EXPORT_SYMBOL_NS_GPL(meson_vclk_gate_ops, CLK_MESON);
+EXPORT_SYMBOL_NS_GPL(meson_vclk_gate_ops, "CLK_MESON");
/* The VCLK Divider has supplementary reset & enable bits */
@@ -127,6 +128,7 @@ static int meson_vclk_div_is_enabled(struct clk_hw *hw)
}
const struct clk_ops meson_vclk_div_ops = {
+ .init = clk_regmap_init,
.recalc_rate = meson_vclk_div_recalc_rate,
.determine_rate = meson_vclk_div_determine_rate,
.set_rate = meson_vclk_div_set_rate,
@@ -134,9 +136,9 @@ const struct clk_ops meson_vclk_div_ops = {
.disable = meson_vclk_div_disable,
.is_enabled = meson_vclk_div_is_enabled,
};
-EXPORT_SYMBOL_NS_GPL(meson_vclk_div_ops, CLK_MESON);
+EXPORT_SYMBOL_NS_GPL(meson_vclk_div_ops, "CLK_MESON");
MODULE_DESCRIPTION("Amlogic vclk clock driver");
MODULE_AUTHOR("Neil Armstrong <neil.armstrong@linaro.org>");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(CLK_MESON);
+MODULE_IMPORT_NS("CLK_MESON");
diff --git a/drivers/clk/meson/vid-pll-div.c b/drivers/clk/meson/vid-pll-div.c
index 486cf68fc97a..2a3cdbe6d86a 100644
--- a/drivers/clk/meson/vid-pll-div.c
+++ b/drivers/clk/meson/vid-pll-div.c
@@ -90,11 +90,12 @@ static unsigned long meson_vid_pll_div_recalc_rate(struct clk_hw *hw,
}
const struct clk_ops meson_vid_pll_div_ro_ops = {
+ .init = clk_regmap_init,
.recalc_rate = meson_vid_pll_div_recalc_rate,
};
-EXPORT_SYMBOL_NS_GPL(meson_vid_pll_div_ro_ops, CLK_MESON);
+EXPORT_SYMBOL_NS_GPL(meson_vid_pll_div_ro_ops, "CLK_MESON");
MODULE_DESCRIPTION("Amlogic video pll divider driver");
MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(CLK_MESON);
+MODULE_IMPORT_NS("CLK_MESON");
diff --git a/drivers/clk/microchip/clk-core.c b/drivers/clk/microchip/clk-core.c
index 1b4f023cdc8b..b34348d491f3 100644
--- a/drivers/clk/microchip/clk-core.c
+++ b/drivers/clk/microchip/clk-core.c
@@ -155,11 +155,13 @@ static unsigned long pbclk_recalc_rate(struct clk_hw *hw,
return parent_rate / pbclk_read_pbdiv(pb);
}
-static long pbclk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int pbclk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- return calc_best_divided_rate(rate, *parent_rate,
- PB_DIV_MAX, PB_DIV_MIN);
+ req->rate = calc_best_divided_rate(req->rate, req->best_parent_rate,
+ PB_DIV_MAX, PB_DIV_MIN);
+
+ return 0;
}
static int pbclk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -207,7 +209,7 @@ const struct clk_ops pic32_pbclk_ops = {
.disable = pbclk_disable,
.is_enabled = pbclk_is_enabled,
.recalc_rate = pbclk_recalc_rate,
- .round_rate = pbclk_round_rate,
+ .determine_rate = pbclk_determine_rate,
.set_rate = pbclk_set_rate,
};
@@ -326,7 +328,7 @@ static void roclk_calc_div_trim(unsigned long rate,
* i.e. fout = fin / 2 * DIV
* whereas DIV = rodiv + (rotrim / 512)
*
- * Since kernel does not perform floating-point arithmatic so
+ * Since kernel does not perform floating-point arithmetic so
* (rotrim/512) will be zero. And DIV & rodiv will result same.
*
* ie. fout = (fin * 256) / [(512 * rodiv) + rotrim] ... from (1)
@@ -372,18 +374,6 @@ static unsigned long roclk_recalc_rate(struct clk_hw *hw,
return roclk_calc_rate(parent_rate, rodiv, rotrim);
}
-static long roclk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
-{
- u32 rotrim, rodiv;
-
- /* calculate dividers for new rate */
- roclk_calc_div_trim(rate, *parent_rate, &rodiv, &rotrim);
-
- /* caclulate new rate (rounding) based on new rodiv & rotrim */
- return roclk_calc_rate(*parent_rate, rodiv, rotrim);
-}
-
static int roclk_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
@@ -394,6 +384,8 @@ static int roclk_determine_rate(struct clk_hw *hw,
/* find a parent which can generate nearest clkrate >= rate */
for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
+ u32 rotrim, rodiv;
+
/* get parent */
parent_clk = clk_hw_get_parent_by_index(hw, i);
if (!parent_clk)
@@ -404,7 +396,12 @@ static int roclk_determine_rate(struct clk_hw *hw,
if (req->rate > parent_rate)
continue;
- nearest_rate = roclk_round_rate(hw, req->rate, &parent_rate);
+ /* calculate dividers for new rate */
+ roclk_calc_div_trim(req->rate, req->best_parent_rate, &rodiv, &rotrim);
+
+ /* caclulate new rate (rounding) based on new rodiv & rotrim */
+ nearest_rate = roclk_calc_rate(req->best_parent_rate, rodiv, rotrim);
+
delta = abs(nearest_rate - req->rate);
if ((nearest_rate >= req->rate) && (delta < best_delta)) {
best_parent_clk = parent_clk;
@@ -665,12 +662,15 @@ static unsigned long spll_clk_recalc_rate(struct clk_hw *hw,
return rate64;
}
-static long spll_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int spll_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct pic32_sys_pll *pll = clkhw_to_spll(hw);
- return spll_calc_mult_div(pll, rate, *parent_rate, NULL, NULL);
+ req->rate = spll_calc_mult_div(pll, req->rate, req->best_parent_rate,
+ NULL, NULL);
+
+ return 0;
}
static int spll_clk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -725,7 +725,7 @@ static int spll_clk_set_rate(struct clk_hw *hw, unsigned long rate,
/* SPLL clock operation */
const struct clk_ops pic32_spll_ops = {
.recalc_rate = spll_clk_recalc_rate,
- .round_rate = spll_clk_round_rate,
+ .determine_rate = spll_clk_determine_rate,
.set_rate = spll_clk_set_rate,
};
@@ -780,10 +780,13 @@ static unsigned long sclk_get_rate(struct clk_hw *hw, unsigned long parent_rate)
return parent_rate / div;
}
-static long sclk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int sclk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- return calc_best_divided_rate(rate, *parent_rate, SLEW_SYSDIV, 1);
+ req->rate = calc_best_divided_rate(req->rate, req->best_parent_rate,
+ SLEW_SYSDIV, 1);
+
+ return 0;
}
static int sclk_set_rate(struct clk_hw *hw,
@@ -909,7 +912,7 @@ static int sclk_init(struct clk_hw *hw)
const struct clk_ops pic32_sclk_ops = {
.get_parent = sclk_get_parent,
.set_parent = sclk_set_parent,
- .round_rate = sclk_round_rate,
+ .determine_rate = sclk_determine_rate,
.set_rate = sclk_set_rate,
.recalc_rate = sclk_get_rate,
.init = sclk_init,
diff --git a/drivers/clk/microchip/clk-mpfs.c b/drivers/clk/microchip/clk-mpfs.c
index 28ec0da88cb3..c22632a7439c 100644
--- a/drivers/clk/microchip/clk-mpfs.c
+++ b/drivers/clk/microchip/clk-mpfs.c
@@ -443,4 +443,4 @@ MODULE_DESCRIPTION("Microchip PolarFire SoC Clock Driver");
MODULE_AUTHOR("Padmarao Begari <padmarao.begari@microchip.com>");
MODULE_AUTHOR("Daire McNamara <daire.mcnamara@microchip.com>");
MODULE_AUTHOR("Conor Dooley <conor.dooley@microchip.com>");
-MODULE_IMPORT_NS(MCHP_CLK_MPFS);
+MODULE_IMPORT_NS("MCHP_CLK_MPFS");
diff --git a/drivers/clk/mmp/Kconfig b/drivers/clk/mmp/Kconfig
new file mode 100644
index 000000000000..b0d2fea3cda5
--- /dev/null
+++ b/drivers/clk/mmp/Kconfig
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config COMMON_CLK_PXA1908
+ bool "Clock driver for Marvell PXA1908"
+ depends on ARCH_MMP || COMPILE_TEST
+ depends on OF
+ default y if ARCH_MMP && ARM64
+ select AUXILIARY_BUS
+ help
+ This driver supports the Marvell PXA1908 SoC clocks.
diff --git a/drivers/clk/mmp/Makefile b/drivers/clk/mmp/Makefile
index 441bf83080a1..0a94f2f08563 100644
--- a/drivers/clk/mmp/Makefile
+++ b/drivers/clk/mmp/Makefile
@@ -11,4 +11,7 @@ obj-$(CONFIG_MACH_MMP_DT) += clk-of-pxa168.o clk-of-pxa910.o
obj-$(CONFIG_COMMON_CLK_MMP2) += clk-of-mmp2.o clk-pll.o pwr-island.o
obj-$(CONFIG_COMMON_CLK_MMP2_AUDIO) += clk-audio.o
-obj-y += clk-of-pxa1928.o
+obj-$(CONFIG_COMMON_CLK_PXA1908) += clk-pxa1908-apbc.o clk-pxa1908-apbcp.o \
+ clk-pxa1908-mpmu.o clk-pxa1908-apmu.o
+
+obj-$(CONFIG_ARCH_MMP) += clk-of-pxa1928.o
diff --git a/drivers/clk/mmp/clk-audio.c b/drivers/clk/mmp/clk-audio.c
index 88d798d510cd..ed27fc796c94 100644
--- a/drivers/clk/mmp/clk-audio.c
+++ b/drivers/clk/mmp/clk-audio.c
@@ -164,23 +164,23 @@ static unsigned long audio_pll_recalc_rate(struct clk_hw *hw,
return 0;
}
-static long audio_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int audio_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
unsigned int prediv;
unsigned int postdiv;
long rounded = 0;
for (prediv = 0; prediv < ARRAY_SIZE(predivs); prediv++) {
- if (predivs[prediv].parent_rate != *parent_rate)
+ if (predivs[prediv].parent_rate != req->best_parent_rate)
continue;
for (postdiv = 0; postdiv < ARRAY_SIZE(postdivs); postdiv++) {
long freq = predivs[prediv].freq_vco;
freq /= postdivs[postdiv].divisor;
- if (freq == rate)
- return rate;
- if (freq < rate)
+ if (freq == req->rate)
+ return 0;
+ if (freq < req->rate)
continue;
if (rounded && freq > rounded)
continue;
@@ -188,7 +188,9 @@ static long audio_pll_round_rate(struct clk_hw *hw, unsigned long rate,
}
}
- return rounded;
+ req->rate = rounded;
+
+ return 0;
}
static int audio_pll_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -228,7 +230,7 @@ static int audio_pll_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops audio_pll_ops = {
.recalc_rate = audio_pll_recalc_rate,
- .round_rate = audio_pll_round_rate,
+ .determine_rate = audio_pll_determine_rate,
.set_rate = audio_pll_set_rate,
};
diff --git a/drivers/clk/mmp/clk-frac.c b/drivers/clk/mmp/clk-frac.c
index 1b90867b60c4..0b1bb01346f0 100644
--- a/drivers/clk/mmp/clk-frac.c
+++ b/drivers/clk/mmp/clk-frac.c
@@ -21,30 +21,32 @@
#define to_clk_factor(hw) container_of(hw, struct mmp_clk_factor, hw)
-static long clk_factor_round_rate(struct clk_hw *hw, unsigned long drate,
- unsigned long *prate)
+static int clk_factor_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct mmp_clk_factor *factor = to_clk_factor(hw);
u64 rate = 0, prev_rate;
+ struct u32_fract *d;
int i;
for (i = 0; i < factor->ftbl_cnt; i++) {
- prev_rate = rate;
- rate = *prate;
- rate *= factor->ftbl[i].den;
- do_div(rate, factor->ftbl[i].num * factor->masks->factor);
+ d = &factor->ftbl[i];
- if (rate > drate)
+ prev_rate = rate;
+ rate = (u64)(req->best_parent_rate) * d->denominator;
+ do_div(rate, d->numerator * factor->masks->factor);
+ if (rate > req->rate)
break;
}
- if ((i == 0) || (i == factor->ftbl_cnt)) {
- return rate;
- } else {
- if ((drate - prev_rate) > (rate - drate))
- return rate;
- else
- return prev_rate;
- }
+
+ if ((i == 0) || (i == factor->ftbl_cnt))
+ req->rate = rate;
+ else if ((req->rate - prev_rate) > (rate - req->rate))
+ req->rate = rate;
+ else
+ req->rate = prev_rate;
+
+ return 0;
}
static unsigned long clk_factor_recalc_rate(struct clk_hw *hw,
@@ -52,23 +54,22 @@ static unsigned long clk_factor_recalc_rate(struct clk_hw *hw,
{
struct mmp_clk_factor *factor = to_clk_factor(hw);
struct mmp_clk_factor_masks *masks = factor->masks;
- unsigned int val, num, den;
+ struct u32_fract d;
+ unsigned int val;
u64 rate;
val = readl_relaxed(factor->base);
/* calculate numerator */
- num = (val >> masks->num_shift) & masks->num_mask;
+ d.numerator = (val >> masks->num_shift) & masks->num_mask;
/* calculate denominator */
- den = (val >> masks->den_shift) & masks->den_mask;
-
- if (!den)
+ d.denominator = (val >> masks->den_shift) & masks->den_mask;
+ if (!d.denominator)
return 0;
- rate = parent_rate;
- rate *= den;
- do_div(rate, num * factor->masks->factor);
+ rate = (u64)parent_rate * d.denominator;
+ do_div(rate, d.numerator * factor->masks->factor);
return rate;
}
@@ -82,18 +83,18 @@ static int clk_factor_set_rate(struct clk_hw *hw, unsigned long drate,
int i;
unsigned long val;
unsigned long flags = 0;
+ struct u32_fract *d;
u64 rate = 0;
for (i = 0; i < factor->ftbl_cnt; i++) {
- rate = prate;
- rate *= factor->ftbl[i].den;
- do_div(rate, factor->ftbl[i].num * factor->masks->factor);
+ d = &factor->ftbl[i];
+ rate = (u64)prate * d->denominator;
+ do_div(rate, d->numerator * factor->masks->factor);
if (rate > drate)
break;
}
- if (i > 0)
- i--;
+ d = i ? &factor->ftbl[i - 1] : &factor->ftbl[0];
if (factor->lock)
spin_lock_irqsave(factor->lock, flags);
@@ -101,10 +102,10 @@ static int clk_factor_set_rate(struct clk_hw *hw, unsigned long drate,
val = readl_relaxed(factor->base);
val &= ~(masks->num_mask << masks->num_shift);
- val |= (factor->ftbl[i].num & masks->num_mask) << masks->num_shift;
+ val |= (d->numerator & masks->num_mask) << masks->num_shift;
val &= ~(masks->den_mask << masks->den_shift);
- val |= (factor->ftbl[i].den & masks->den_mask) << masks->den_shift;
+ val |= (d->denominator & masks->den_mask) << masks->den_shift;
writel_relaxed(val, factor->base);
@@ -118,7 +119,8 @@ static int clk_factor_init(struct clk_hw *hw)
{
struct mmp_clk_factor *factor = to_clk_factor(hw);
struct mmp_clk_factor_masks *masks = factor->masks;
- u32 val, num, den;
+ struct u32_fract d;
+ u32 val;
int i;
unsigned long flags = 0;
@@ -128,23 +130,22 @@ static int clk_factor_init(struct clk_hw *hw)
val = readl(factor->base);
/* calculate numerator */
- num = (val >> masks->num_shift) & masks->num_mask;
+ d.numerator = (val >> masks->num_shift) & masks->num_mask;
/* calculate denominator */
- den = (val >> masks->den_shift) & masks->den_mask;
+ d.denominator = (val >> masks->den_shift) & masks->den_mask;
for (i = 0; i < factor->ftbl_cnt; i++)
- if (den == factor->ftbl[i].den && num == factor->ftbl[i].num)
+ if (d.denominator == factor->ftbl[i].denominator &&
+ d.numerator == factor->ftbl[i].numerator)
break;
if (i >= factor->ftbl_cnt) {
val &= ~(masks->num_mask << masks->num_shift);
- val |= (factor->ftbl[0].num & masks->num_mask) <<
- masks->num_shift;
+ val |= (factor->ftbl[0].numerator & masks->num_mask) << masks->num_shift;
val &= ~(masks->den_mask << masks->den_shift);
- val |= (factor->ftbl[0].den & masks->den_mask) <<
- masks->den_shift;
+ val |= (factor->ftbl[0].denominator & masks->den_mask) << masks->den_shift;
}
if (!(val & masks->enable_mask) || i >= factor->ftbl_cnt) {
@@ -160,7 +161,7 @@ static int clk_factor_init(struct clk_hw *hw)
static const struct clk_ops clk_factor_ops = {
.recalc_rate = clk_factor_recalc_rate,
- .round_rate = clk_factor_round_rate,
+ .determine_rate = clk_factor_determine_rate,
.set_rate = clk_factor_set_rate,
.init = clk_factor_init,
};
@@ -168,8 +169,7 @@ static const struct clk_ops clk_factor_ops = {
struct clk *mmp_clk_register_factor(const char *name, const char *parent_name,
unsigned long flags, void __iomem *base,
struct mmp_clk_factor_masks *masks,
- struct mmp_clk_factor_tbl *ftbl,
- unsigned int ftbl_cnt, spinlock_t *lock)
+ struct u32_fract *ftbl, unsigned int ftbl_cnt, spinlock_t *lock)
{
struct mmp_clk_factor *factor;
struct clk_init_data init;
diff --git a/drivers/clk/mmp/clk-gate.c b/drivers/clk/mmp/clk-gate.c
index 350eeb3e9e25..6855815ee8be 100644
--- a/drivers/clk/mmp/clk-gate.c
+++ b/drivers/clk/mmp/clk-gate.c
@@ -15,7 +15,7 @@
#include "clk.h"
/*
- * Some clocks will have mutiple bits to enable the clocks, and
+ * Some clocks will have multiple bits to enable the clocks, and
* the bits to disable the clock is not same as enabling bits.
*/
diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c
index eaad36ee323d..a4f15cee630e 100644
--- a/drivers/clk/mmp/clk-of-mmp2.c
+++ b/drivers/clk/mmp/clk-of-mmp2.c
@@ -143,9 +143,9 @@ static struct mmp_clk_factor_masks uart_factor_masks = {
.den_shift = 0,
};
-static struct mmp_clk_factor_tbl uart_factor_tbl[] = {
- {.num = 8125, .den = 1536}, /*14.745MHZ */
- {.num = 3521, .den = 689}, /*19.23MHZ */
+static struct u32_fract uart_factor_tbl[] = {
+ { .numerator = 8125, .denominator = 1536 }, /* 14.745MHZ */
+ { .numerator = 3521, .denominator = 689 }, /* 19.23MHZ */
};
static struct mmp_clk_factor_masks i2s_factor_masks = {
@@ -157,16 +157,16 @@ static struct mmp_clk_factor_masks i2s_factor_masks = {
.enable_mask = 0xd0000000,
};
-static struct mmp_clk_factor_tbl i2s_factor_tbl[] = {
- {.num = 24868, .den = 511}, /* 2.0480 MHz */
- {.num = 28003, .den = 793}, /* 2.8224 MHz */
- {.num = 24941, .den = 1025}, /* 4.0960 MHz */
- {.num = 28003, .den = 1586}, /* 5.6448 MHz */
- {.num = 31158, .den = 2561}, /* 8.1920 MHz */
- {.num = 16288, .den = 1845}, /* 11.2896 MHz */
- {.num = 20772, .den = 2561}, /* 12.2880 MHz */
- {.num = 8144, .den = 1845}, /* 22.5792 MHz */
- {.num = 10386, .den = 2561}, /* 24.5760 MHz */
+static struct u32_fract i2s_factor_tbl[] = {
+ { .numerator = 24868, .denominator = 511 }, /* 2.0480 MHz */
+ { .numerator = 28003, .denominator = 793 }, /* 2.8224 MHz */
+ { .numerator = 24941, .denominator = 1025 }, /* 4.0960 MHz */
+ { .numerator = 28003, .denominator = 1586 }, /* 5.6448 MHz */
+ { .numerator = 31158, .denominator = 2561 }, /* 8.1920 MHz */
+ { .numerator = 16288, .denominator = 1845 }, /* 11.2896 MHz */
+ { .numerator = 20772, .denominator = 2561 }, /* 12.2880 MHz */
+ { .numerator = 8144, .denominator = 1845 }, /* 22.5792 MHz */
+ { .numerator = 10386, .denominator = 2561 }, /* 24.5760 MHz */
};
static DEFINE_SPINLOCK(acgr_lock);
diff --git a/drivers/clk/mmp/clk-of-pxa168.c b/drivers/clk/mmp/clk-of-pxa168.c
index c5a7ba1deaa3..5f250427e60d 100644
--- a/drivers/clk/mmp/clk-of-pxa168.c
+++ b/drivers/clk/mmp/clk-of-pxa168.c
@@ -106,8 +106,8 @@ static struct mmp_clk_factor_masks uart_factor_masks = {
.den_shift = 0,
};
-static struct mmp_clk_factor_tbl uart_factor_tbl[] = {
- {.num = 8125, .den = 1536}, /*14.745MHZ */
+static struct u32_fract uart_factor_tbl[] = {
+ { .numerator = 8125, .denominator = 1536 }, /* 14.745MHZ */
};
static void pxa168_pll_init(struct pxa168_clk_unit *pxa_unit)
diff --git a/drivers/clk/mmp/clk-of-pxa1928.c b/drivers/clk/mmp/clk-of-pxa1928.c
index 9def4b5f10e9..ebb6e278eda3 100644
--- a/drivers/clk/mmp/clk-of-pxa1928.c
+++ b/drivers/clk/mmp/clk-of-pxa1928.c
@@ -61,9 +61,9 @@ static struct mmp_clk_factor_masks uart_factor_masks = {
.den_shift = 0,
};
-static struct mmp_clk_factor_tbl uart_factor_tbl[] = {
- {.num = 832, .den = 234}, /*58.5MHZ */
- {.num = 1, .den = 1}, /*26MHZ */
+static struct u32_fract uart_factor_tbl[] = {
+ { .numerator = 832, .denominator = 234 }, /* 58.5MHZ */
+ { .numerator = 1, .denominator = 1 }, /* 26MHZ */
};
static void pxa1928_pll_init(struct pxa1928_clk_unit *pxa_unit)
diff --git a/drivers/clk/mmp/clk-of-pxa910.c b/drivers/clk/mmp/clk-of-pxa910.c
index 7a38c424782e..fe65e7bdb411 100644
--- a/drivers/clk/mmp/clk-of-pxa910.c
+++ b/drivers/clk/mmp/clk-of-pxa910.c
@@ -86,8 +86,8 @@ static struct mmp_clk_factor_masks uart_factor_masks = {
.den_shift = 0,
};
-static struct mmp_clk_factor_tbl uart_factor_tbl[] = {
- {.num = 8125, .den = 1536}, /*14.745MHZ */
+static struct u32_fract uart_factor_tbl[] = {
+ { .numerator = 8125, .denominator = 1536 }, /* 14.745MHZ */
};
static void pxa910_pll_init(struct pxa910_clk_unit *pxa_unit)
diff --git a/drivers/clk/mmp/clk-pxa1908-apbc.c b/drivers/clk/mmp/clk-pxa1908-apbc.c
new file mode 100644
index 000000000000..3fd7b5e644f3
--- /dev/null
+++ b/drivers/clk/mmp/clk-pxa1908-apbc.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+#include <dt-bindings/clock/marvell,pxa1908.h>
+
+#include "clk.h"
+
+#define APBC_UART0 0x0
+#define APBC_UART1 0x4
+#define APBC_GPIO 0x8
+#define APBC_PWM0 0xc
+#define APBC_PWM1 0x10
+#define APBC_PWM2 0x14
+#define APBC_PWM3 0x18
+#define APBC_SSP0 0x1c
+#define APBC_SSP1 0x20
+#define APBC_IPC_RST 0x24
+#define APBC_RTC 0x28
+#define APBC_TWSI0 0x2c
+#define APBC_KPC 0x30
+#define APBC_SWJTAG 0x40
+#define APBC_SSP2 0x4c
+#define APBC_TWSI1 0x60
+#define APBC_THERMAL 0x6c
+#define APBC_TWSI3 0x70
+
+#define APBC_NR_CLKS 19
+
+struct pxa1908_clk_unit {
+ struct mmp_clk_unit unit;
+ void __iomem *base;
+};
+
+static DEFINE_SPINLOCK(pwm0_lock);
+static DEFINE_SPINLOCK(pwm2_lock);
+
+static DEFINE_SPINLOCK(uart0_lock);
+static DEFINE_SPINLOCK(uart1_lock);
+
+static const char * const uart_parent_names[] = {"pll1_117", "uart_pll"};
+static const char * const ssp_parent_names[] = {"pll1_d16", "pll1_d48", "pll1_d24", "pll1_d12"};
+
+static struct mmp_param_gate_clk apbc_gate_clks[] = {
+ {PXA1908_CLK_TWSI0, "twsi0_clk", "pll1_32", CLK_SET_RATE_PARENT, APBC_TWSI0, 0x7, 3, 0, 0, NULL},
+ {PXA1908_CLK_TWSI1, "twsi1_clk", "pll1_32", CLK_SET_RATE_PARENT, APBC_TWSI1, 0x7, 3, 0, 0, NULL},
+ {PXA1908_CLK_TWSI3, "twsi3_clk", "pll1_32", CLK_SET_RATE_PARENT, APBC_TWSI3, 0x7, 3, 0, 0, NULL},
+ {PXA1908_CLK_GPIO, "gpio_clk", "vctcxo", CLK_SET_RATE_PARENT, APBC_GPIO, 0x7, 3, 0, 0, NULL},
+ {PXA1908_CLK_KPC, "kpc_clk", "clk32", CLK_SET_RATE_PARENT, APBC_KPC, 0x7, 3, 0, MMP_CLK_GATE_NEED_DELAY, NULL},
+ {PXA1908_CLK_RTC, "rtc_clk", "clk32", CLK_SET_RATE_PARENT, APBC_RTC, 0x87, 0x83, 0, MMP_CLK_GATE_NEED_DELAY, NULL},
+ {PXA1908_CLK_PWM0, "pwm0_clk", "pwm01_apb_share", CLK_SET_RATE_PARENT, APBC_PWM0, 0x2, 2, 0, 0, &pwm0_lock},
+ {PXA1908_CLK_PWM1, "pwm1_clk", "pwm01_apb_share", CLK_SET_RATE_PARENT, APBC_PWM1, 0x6, 2, 0, 0, NULL},
+ {PXA1908_CLK_PWM2, "pwm2_clk", "pwm23_apb_share", CLK_SET_RATE_PARENT, APBC_PWM2, 0x2, 2, 0, 0, NULL},
+ {PXA1908_CLK_PWM3, "pwm3_clk", "pwm23_apb_share", CLK_SET_RATE_PARENT, APBC_PWM3, 0x6, 2, 0, 0, NULL},
+ {PXA1908_CLK_UART0, "uart0_clk", "uart0_mux", CLK_SET_RATE_PARENT, APBC_UART0, 0x7, 3, 0, 0, &uart0_lock},
+ {PXA1908_CLK_UART1, "uart1_clk", "uart1_mux", CLK_SET_RATE_PARENT, APBC_UART1, 0x7, 3, 0, 0, &uart1_lock},
+ {PXA1908_CLK_THERMAL, "thermal_clk", NULL, 0, APBC_THERMAL, 0x7, 3, 0, 0, NULL},
+ {PXA1908_CLK_IPC_RST, "ipc_clk", NULL, 0, APBC_IPC_RST, 0x7, 3, 0, 0, NULL},
+ {PXA1908_CLK_SSP0, "ssp0_clk", "ssp0_mux", 0, APBC_SSP0, 0x7, 3, 0, 0, NULL},
+ {PXA1908_CLK_SSP2, "ssp2_clk", "ssp2_mux", 0, APBC_SSP2, 0x7, 3, 0, 0, NULL},
+};
+
+static struct mmp_param_mux_clk apbc_mux_clks[] = {
+ {0, "uart0_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, APBC_UART0, 4, 3, 0, &uart0_lock},
+ {0, "uart1_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, APBC_UART1, 4, 3, 0, &uart1_lock},
+ {0, "ssp0_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), 0, APBC_SSP0, 4, 3, 0, NULL},
+ {0, "ssp2_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), 0, APBC_SSP2, 4, 3, 0, NULL},
+};
+
+static void pxa1908_apb_periph_clk_init(struct pxa1908_clk_unit *pxa_unit)
+{
+ struct mmp_clk_unit *unit = &pxa_unit->unit;
+ struct clk *clk;
+
+ mmp_clk_register_gate(NULL, "pwm01_apb_share", "pll1_d48",
+ CLK_SET_RATE_PARENT,
+ pxa_unit->base + APBC_PWM0,
+ 0x5, 1, 0, 0, &pwm0_lock);
+ mmp_clk_register_gate(NULL, "pwm23_apb_share", "pll1_d48",
+ CLK_SET_RATE_PARENT,
+ pxa_unit->base + APBC_PWM2,
+ 0x5, 1, 0, 0, &pwm2_lock);
+ clk = mmp_clk_register_apbc("swjtag", NULL,
+ pxa_unit->base + APBC_SWJTAG, 10, 0, NULL);
+ mmp_clk_add(unit, PXA1908_CLK_SWJTAG, clk);
+ mmp_register_mux_clks(unit, apbc_mux_clks, pxa_unit->base,
+ ARRAY_SIZE(apbc_mux_clks));
+ mmp_register_gate_clks(unit, apbc_gate_clks, pxa_unit->base,
+ ARRAY_SIZE(apbc_gate_clks));
+}
+
+static int pxa1908_apbc_probe(struct platform_device *pdev)
+{
+ struct pxa1908_clk_unit *pxa_unit;
+
+ pxa_unit = devm_kzalloc(&pdev->dev, sizeof(*pxa_unit), GFP_KERNEL);
+ if (!pxa_unit)
+ return -ENOMEM;
+
+ pxa_unit->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(pxa_unit->base))
+ return PTR_ERR(pxa_unit->base);
+
+ mmp_clk_init(pdev->dev.of_node, &pxa_unit->unit, APBC_NR_CLKS);
+
+ pxa1908_apb_periph_clk_init(pxa_unit);
+
+ return 0;
+}
+
+static const struct of_device_id pxa1908_apbc_match_table[] = {
+ { .compatible = "marvell,pxa1908-apbc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, pxa1908_apbc_match_table);
+
+static struct platform_driver pxa1908_apbc_driver = {
+ .probe = pxa1908_apbc_probe,
+ .driver = {
+ .name = "pxa1908-apbc",
+ .of_match_table = pxa1908_apbc_match_table
+ }
+};
+module_platform_driver(pxa1908_apbc_driver);
+
+MODULE_AUTHOR("Duje Mihanović <duje.mihanovic@skole.hr>");
+MODULE_DESCRIPTION("Marvell PXA1908 APBC Clock Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mmp/clk-pxa1908-apbcp.c b/drivers/clk/mmp/clk-pxa1908-apbcp.c
new file mode 100644
index 000000000000..f638d7e89b47
--- /dev/null
+++ b/drivers/clk/mmp/clk-pxa1908-apbcp.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+#include <dt-bindings/clock/marvell,pxa1908.h>
+
+#include "clk.h"
+
+#define APBCP_UART2 0x1c
+#define APBCP_TWSI2 0x28
+#define APBCP_AICER 0x38
+
+#define APBCP_NR_CLKS 4
+
+struct pxa1908_clk_unit {
+ struct mmp_clk_unit unit;
+ void __iomem *base;
+};
+
+static DEFINE_SPINLOCK(uart2_lock);
+
+static const char * const uart_parent_names[] = {"pll1_117", "uart_pll"};
+
+static struct mmp_param_gate_clk apbcp_gate_clks[] = {
+ {PXA1908_CLK_UART2, "uart2_clk", "uart2_mux", CLK_SET_RATE_PARENT, APBCP_UART2, 0x7, 0x3, 0x0, 0, &uart2_lock},
+ {PXA1908_CLK_TWSI2, "twsi2_clk", "pll1_32", CLK_SET_RATE_PARENT, APBCP_TWSI2, 0x7, 0x3, 0x0, 0, NULL},
+ {PXA1908_CLK_AICER, "ripc_clk", NULL, 0, APBCP_AICER, 0x7, 0x2, 0x0, 0, NULL},
+};
+
+static struct mmp_param_mux_clk apbcp_mux_clks[] = {
+ {0, "uart2_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, APBCP_UART2, 4, 3, 0, &uart2_lock},
+};
+
+static void pxa1908_apb_p_periph_clk_init(struct pxa1908_clk_unit *pxa_unit)
+{
+ struct mmp_clk_unit *unit = &pxa_unit->unit;
+
+ mmp_register_mux_clks(unit, apbcp_mux_clks, pxa_unit->base,
+ ARRAY_SIZE(apbcp_mux_clks));
+ mmp_register_gate_clks(unit, apbcp_gate_clks, pxa_unit->base,
+ ARRAY_SIZE(apbcp_gate_clks));
+}
+
+static int pxa1908_apbcp_probe(struct platform_device *pdev)
+{
+ struct pxa1908_clk_unit *pxa_unit;
+
+ pxa_unit = devm_kzalloc(&pdev->dev, sizeof(*pxa_unit), GFP_KERNEL);
+ if (!pxa_unit)
+ return -ENOMEM;
+
+ pxa_unit->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(pxa_unit->base))
+ return PTR_ERR(pxa_unit->base);
+
+ mmp_clk_init(pdev->dev.of_node, &pxa_unit->unit, APBCP_NR_CLKS);
+
+ pxa1908_apb_p_periph_clk_init(pxa_unit);
+
+ return 0;
+}
+
+static const struct of_device_id pxa1908_apbcp_match_table[] = {
+ { .compatible = "marvell,pxa1908-apbcp" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, pxa1908_apbcp_match_table);
+
+static struct platform_driver pxa1908_apbcp_driver = {
+ .probe = pxa1908_apbcp_probe,
+ .driver = {
+ .name = "pxa1908-apbcp",
+ .of_match_table = pxa1908_apbcp_match_table
+ }
+};
+module_platform_driver(pxa1908_apbcp_driver);
+
+MODULE_AUTHOR("Duje Mihanović <duje.mihanovic@skole.hr>");
+MODULE_DESCRIPTION("Marvell PXA1908 APBCP Clock Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mmp/clk-pxa1908-apmu.c b/drivers/clk/mmp/clk-pxa1908-apmu.c
new file mode 100644
index 000000000000..7594a495a009
--- /dev/null
+++ b/drivers/clk/mmp/clk-pxa1908-apmu.c
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/auxiliary_bus.h>
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+#include <dt-bindings/clock/marvell,pxa1908.h>
+
+#include "clk.h"
+
+#define APMU_CLK_GATE_CTRL 0x40
+#define APMU_CCIC1 0x24
+#define APMU_ISP 0x38
+#define APMU_DSI1 0x44
+#define APMU_DISP1 0x4c
+#define APMU_CCIC0 0x50
+#define APMU_SDH0 0x54
+#define APMU_SDH1 0x58
+#define APMU_USB 0x5c
+#define APMU_NF 0x60
+#define APMU_VPU 0xa4
+#define APMU_GC 0xcc
+#define APMU_SDH2 0xe0
+#define APMU_GC2D 0xf4
+#define APMU_TRACE 0x108
+#define APMU_DVC_DFC_DEBUG 0x140
+
+#define APMU_NR_CLKS 17
+
+struct pxa1908_clk_unit {
+ struct mmp_clk_unit unit;
+ void __iomem *base;
+};
+
+static DEFINE_SPINLOCK(pll1_lock);
+static struct mmp_param_general_gate_clk pll1_gate_clks[] = {
+ {PXA1908_CLK_PLL1_D2_GATE, "pll1_d2_gate", "pll1_d2", 0, APMU_CLK_GATE_CTRL, 29, 0, &pll1_lock},
+ {PXA1908_CLK_PLL1_416_GATE, "pll1_416_gate", "pll1_416", 0, APMU_CLK_GATE_CTRL, 27, 0, &pll1_lock},
+ {PXA1908_CLK_PLL1_624_GATE, "pll1_624_gate", "pll1_624", 0, APMU_CLK_GATE_CTRL, 26, 0, &pll1_lock},
+ {PXA1908_CLK_PLL1_832_GATE, "pll1_832_gate", "pll1_832", 0, APMU_CLK_GATE_CTRL, 30, 0, &pll1_lock},
+ {PXA1908_CLK_PLL1_1248_GATE, "pll1_1248_gate", "pll1_1248", 0, APMU_CLK_GATE_CTRL, 28, 0, &pll1_lock},
+};
+
+static DEFINE_SPINLOCK(sdh0_lock);
+static DEFINE_SPINLOCK(sdh1_lock);
+static DEFINE_SPINLOCK(sdh2_lock);
+
+static const char * const sdh_parent_names[] = {"pll1_416", "pll1_624"};
+
+static struct mmp_clk_mix_config sdh_mix_config = {
+ .reg_info = DEFINE_MIX_REG_INFO(3, 8, 2, 6, 11),
+};
+
+static struct mmp_param_gate_clk apmu_gate_clks[] = {
+ {PXA1908_CLK_USB, "usb_clk", NULL, 0, APMU_USB, 0x9, 0x9, 0x1, 0, NULL},
+ {PXA1908_CLK_SDH0, "sdh0_clk", "sdh0_mix_clk", CLK_SET_RATE_PARENT | CLK_SET_RATE_UNGATE, APMU_SDH0, 0x12, 0x12, 0x0, 0, &sdh0_lock},
+ {PXA1908_CLK_SDH1, "sdh1_clk", "sdh1_mix_clk", CLK_SET_RATE_PARENT | CLK_SET_RATE_UNGATE, APMU_SDH1, 0x12, 0x12, 0x0, 0, &sdh1_lock},
+ {PXA1908_CLK_SDH2, "sdh2_clk", "sdh2_mix_clk", CLK_SET_RATE_PARENT | CLK_SET_RATE_UNGATE, APMU_SDH2, 0x12, 0x12, 0x0, 0, &sdh2_lock}
+};
+
+static void pxa1908_axi_periph_clk_init(struct pxa1908_clk_unit *pxa_unit)
+{
+ struct mmp_clk_unit *unit = &pxa_unit->unit;
+
+ mmp_register_general_gate_clks(unit, pll1_gate_clks,
+ pxa_unit->base, ARRAY_SIZE(pll1_gate_clks));
+
+ sdh_mix_config.reg_info.reg_clk_ctrl = pxa_unit->base + APMU_SDH0;
+ mmp_clk_register_mix(NULL, "sdh0_mix_clk", sdh_parent_names,
+ ARRAY_SIZE(sdh_parent_names), CLK_SET_RATE_PARENT,
+ &sdh_mix_config, &sdh0_lock);
+ sdh_mix_config.reg_info.reg_clk_ctrl = pxa_unit->base + APMU_SDH1;
+ mmp_clk_register_mix(NULL, "sdh1_mix_clk", sdh_parent_names,
+ ARRAY_SIZE(sdh_parent_names), CLK_SET_RATE_PARENT,
+ &sdh_mix_config, &sdh1_lock);
+ sdh_mix_config.reg_info.reg_clk_ctrl = pxa_unit->base + APMU_SDH2;
+ mmp_clk_register_mix(NULL, "sdh2_mix_clk", sdh_parent_names,
+ ARRAY_SIZE(sdh_parent_names), CLK_SET_RATE_PARENT,
+ &sdh_mix_config, &sdh2_lock);
+
+ mmp_register_gate_clks(unit, apmu_gate_clks, pxa_unit->base,
+ ARRAY_SIZE(apmu_gate_clks));
+}
+
+static int pxa1908_apmu_probe(struct platform_device *pdev)
+{
+ struct pxa1908_clk_unit *pxa_unit;
+ struct auxiliary_device *adev;
+
+ pxa_unit = devm_kzalloc(&pdev->dev, sizeof(*pxa_unit), GFP_KERNEL);
+ if (!pxa_unit)
+ return -ENOMEM;
+
+ pxa_unit->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(pxa_unit->base))
+ return PTR_ERR(pxa_unit->base);
+
+ adev = devm_auxiliary_device_create(&pdev->dev, "power", NULL);
+ if (IS_ERR(adev))
+ return dev_err_probe(&pdev->dev, PTR_ERR(adev),
+ "Failed to register power controller\n");
+
+ mmp_clk_init(pdev->dev.of_node, &pxa_unit->unit, APMU_NR_CLKS);
+
+ pxa1908_axi_periph_clk_init(pxa_unit);
+
+ return 0;
+}
+
+static const struct of_device_id pxa1908_apmu_match_table[] = {
+ { .compatible = "marvell,pxa1908-apmu" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, pxa1908_apmu_match_table);
+
+static struct platform_driver pxa1908_apmu_driver = {
+ .probe = pxa1908_apmu_probe,
+ .driver = {
+ .name = "pxa1908-apmu",
+ .of_match_table = pxa1908_apmu_match_table
+ }
+};
+module_platform_driver(pxa1908_apmu_driver);
+
+MODULE_AUTHOR("Duje Mihanović <duje.mihanovic@skole.hr>");
+MODULE_DESCRIPTION("Marvell PXA1908 APMU Clock Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mmp/clk-pxa1908-mpmu.c b/drivers/clk/mmp/clk-pxa1908-mpmu.c
new file mode 100644
index 000000000000..90b4b2488574
--- /dev/null
+++ b/drivers/clk/mmp/clk-pxa1908-mpmu.c
@@ -0,0 +1,112 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/bits.h>
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/units.h>
+
+#include <dt-bindings/clock/marvell,pxa1908.h>
+
+#include "clk.h"
+
+#define MPMU_UART_PLL 0x14
+
+#define MPMU_NR_CLKS 39
+
+struct pxa1908_clk_unit {
+ struct mmp_clk_unit unit;
+ void __iomem *base;
+};
+
+static struct mmp_param_fixed_rate_clk fixed_rate_clks[] = {
+ {PXA1908_CLK_CLK32, "clk32", NULL, 0, 32768},
+ {PXA1908_CLK_VCTCXO, "vctcxo", NULL, 0, 26 * HZ_PER_MHZ},
+ {PXA1908_CLK_PLL1_624, "pll1_624", NULL, 0, 624 * HZ_PER_MHZ},
+ {PXA1908_CLK_PLL1_416, "pll1_416", NULL, 0, 416 * HZ_PER_MHZ},
+ {PXA1908_CLK_PLL1_499, "pll1_499", NULL, 0, 499 * HZ_PER_MHZ},
+ {PXA1908_CLK_PLL1_832, "pll1_832", NULL, 0, 832 * HZ_PER_MHZ},
+ {PXA1908_CLK_PLL1_1248, "pll1_1248", NULL, 0, 1248 * HZ_PER_MHZ},
+};
+
+static struct mmp_param_fixed_factor_clk fixed_factor_clks[] = {
+ {PXA1908_CLK_PLL1_D2, "pll1_d2", "pll1_624", 1, 2, 0},
+ {PXA1908_CLK_PLL1_D4, "pll1_d4", "pll1_d2", 1, 2, 0},
+ {PXA1908_CLK_PLL1_D6, "pll1_d6", "pll1_d2", 1, 3, 0},
+ {PXA1908_CLK_PLL1_D8, "pll1_d8", "pll1_d4", 1, 2, 0},
+ {PXA1908_CLK_PLL1_D12, "pll1_d12", "pll1_d6", 1, 2, 0},
+ {PXA1908_CLK_PLL1_D13, "pll1_d13", "pll1_624", 1, 13, 0},
+ {PXA1908_CLK_PLL1_D16, "pll1_d16", "pll1_d8", 1, 2, 0},
+ {PXA1908_CLK_PLL1_D24, "pll1_d24", "pll1_d12", 1, 2, 0},
+ {PXA1908_CLK_PLL1_D48, "pll1_d48", "pll1_d24", 1, 2, 0},
+ {PXA1908_CLK_PLL1_D96, "pll1_d96", "pll1_d48", 1, 2, 0},
+ {PXA1908_CLK_PLL1_32, "pll1_32", "pll1_d13", 2, 3, 0},
+ {PXA1908_CLK_PLL1_208, "pll1_208", "pll1_d2", 2, 3, 0},
+ {PXA1908_CLK_PLL1_117, "pll1_117", "pll1_624", 3, 16, 0},
+};
+
+static struct u32_fract uart_factor_tbl[] = {
+ {.numerator = 8125, .denominator = 1536}, /* 14.745MHz */
+};
+
+static struct mmp_clk_factor_masks uart_factor_masks = {
+ .factor = 2,
+ .num_mask = GENMASK(12, 0),
+ .den_mask = GENMASK(12, 0),
+ .num_shift = 16,
+ .den_shift = 0,
+};
+
+static void pxa1908_pll_init(struct pxa1908_clk_unit *pxa_unit)
+{
+ struct mmp_clk_unit *unit = &pxa_unit->unit;
+
+ mmp_register_fixed_rate_clks(unit, fixed_rate_clks,
+ ARRAY_SIZE(fixed_rate_clks));
+
+ mmp_register_fixed_factor_clks(unit, fixed_factor_clks,
+ ARRAY_SIZE(fixed_factor_clks));
+
+ mmp_clk_register_factor("uart_pll", "pll1_d4",
+ CLK_SET_RATE_PARENT,
+ pxa_unit->base + MPMU_UART_PLL,
+ &uart_factor_masks, uart_factor_tbl,
+ ARRAY_SIZE(uart_factor_tbl), NULL);
+}
+
+static int pxa1908_mpmu_probe(struct platform_device *pdev)
+{
+ struct pxa1908_clk_unit *pxa_unit;
+
+ pxa_unit = devm_kzalloc(&pdev->dev, sizeof(*pxa_unit), GFP_KERNEL);
+ if (!pxa_unit)
+ return -ENOMEM;
+
+ pxa_unit->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(pxa_unit->base))
+ return PTR_ERR(pxa_unit->base);
+
+ mmp_clk_init(pdev->dev.of_node, &pxa_unit->unit, MPMU_NR_CLKS);
+
+ pxa1908_pll_init(pxa_unit);
+
+ return 0;
+}
+
+static const struct of_device_id pxa1908_mpmu_match_table[] = {
+ { .compatible = "marvell,pxa1908-mpmu" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, pxa1908_mpmu_match_table);
+
+static struct platform_driver pxa1908_mpmu_driver = {
+ .probe = pxa1908_mpmu_probe,
+ .driver = {
+ .name = "pxa1908-mpmu",
+ .of_match_table = pxa1908_mpmu_match_table
+ }
+};
+module_platform_driver(pxa1908_mpmu_driver);
+
+MODULE_AUTHOR("Duje Mihanović <duje.mihanovic@skole.hr>");
+MODULE_DESCRIPTION("Marvell PXA1908 MPMU Clock Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mmp/clk.h b/drivers/clk/mmp/clk.h
index 55ac05379781..c83cec169ddc 100644
--- a/drivers/clk/mmp/clk.h
+++ b/drivers/clk/mmp/clk.h
@@ -3,6 +3,7 @@
#define __MACH_MMP_CLK_H
#include <linux/clk-provider.h>
+#include <linux/math.h>
#include <linux/pm_domain.h>
#include <linux/clkdev.h>
@@ -20,16 +21,11 @@ struct mmp_clk_factor_masks {
unsigned int enable_mask;
};
-struct mmp_clk_factor_tbl {
- unsigned int num;
- unsigned int den;
-};
-
struct mmp_clk_factor {
struct clk_hw hw;
void __iomem *base;
struct mmp_clk_factor_masks *masks;
- struct mmp_clk_factor_tbl *ftbl;
+ struct u32_fract *ftbl;
unsigned int ftbl_cnt;
spinlock_t *lock;
};
@@ -37,7 +33,7 @@ struct mmp_clk_factor {
extern struct clk *mmp_clk_register_factor(const char *name,
const char *parent_name, unsigned long flags,
void __iomem *base, struct mmp_clk_factor_masks *masks,
- struct mmp_clk_factor_tbl *ftbl, unsigned int ftbl_cnt,
+ struct u32_fract *ftbl, unsigned int ftbl_cnt,
spinlock_t *lock);
/* Clock type "mix" */
diff --git a/drivers/clk/mmp/pwr-island.c b/drivers/clk/mmp/pwr-island.c
index edaa2433a472..eaf5d2c5e593 100644
--- a/drivers/clk/mmp/pwr-island.c
+++ b/drivers/clk/mmp/pwr-island.c
@@ -106,10 +106,10 @@ struct generic_pm_domain *mmp_pm_domain_register(const char *name,
pm_domain->flags = flags;
pm_domain->lock = lock;
- pm_genpd_init(&pm_domain->genpd, NULL, true);
pm_domain->genpd.name = name;
pm_domain->genpd.power_on = mmp_pm_domain_power_on;
pm_domain->genpd.power_off = mmp_pm_domain_power_off;
+ pm_genpd_init(&pm_domain->genpd, NULL, true);
return &pm_domain->genpd;
}
diff --git a/drivers/clk/mstar/clk-msc313-cpupll.c b/drivers/clk/mstar/clk-msc313-cpupll.c
index a93e2dba09d3..3e643be02fe2 100644
--- a/drivers/clk/mstar/clk-msc313-cpupll.c
+++ b/drivers/clk/mstar/clk-msc313-cpupll.c
@@ -140,20 +140,22 @@ static unsigned long msc313_cpupll_recalc_rate(struct clk_hw *hw, unsigned long
parent_rate);
}
-static long msc313_cpupll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int msc313_cpupll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- u32 reg = msc313_cpupll_regforfrequecy(rate, *parent_rate);
- long rounded = msc313_cpupll_frequencyforreg(reg, *parent_rate);
+ u32 reg = msc313_cpupll_regforfrequecy(req->rate, req->best_parent_rate);
+ long rounded = msc313_cpupll_frequencyforreg(reg, req->best_parent_rate);
/*
* This is my poor attempt at making sure the resulting
* rate doesn't overshoot the requested rate.
*/
- for (; rounded >= rate && reg > 0; reg--)
- rounded = msc313_cpupll_frequencyforreg(reg, *parent_rate);
+ for (; rounded >= req->rate && reg > 0; reg--)
+ rounded = msc313_cpupll_frequencyforreg(reg, req->best_parent_rate);
- return rounded;
+ req->rate = rounded;
+
+ return 0;
}
static int msc313_cpupll_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate)
@@ -168,7 +170,7 @@ static int msc313_cpupll_set_rate(struct clk_hw *hw, unsigned long rate, unsigne
static const struct clk_ops msc313_cpupll_ops = {
.recalc_rate = msc313_cpupll_recalc_rate,
- .round_rate = msc313_cpupll_round_rate,
+ .determine_rate = msc313_cpupll_determine_rate,
.set_rate = msc313_cpupll_set_rate,
};
diff --git a/drivers/clk/mvebu/ap-cpu-clk.c b/drivers/clk/mvebu/ap-cpu-clk.c
index 677cc3514849..1e44ace7d951 100644
--- a/drivers/clk/mvebu/ap-cpu-clk.c
+++ b/drivers/clk/mvebu/ap-cpu-clk.c
@@ -210,19 +210,21 @@ static int ap_cpu_clk_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
-static long ap_cpu_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int ap_cpu_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- int divider = *parent_rate / rate;
+ int divider = req->best_parent_rate / req->rate;
divider = min(divider, APN806_MAX_DIVIDER);
- return *parent_rate / divider;
+ req->rate = req->best_parent_rate / divider;
+
+ return 0;
}
static const struct clk_ops ap_cpu_clk_ops = {
.recalc_rate = ap_cpu_clk_recalc_rate,
- .round_rate = ap_cpu_clk_round_rate,
+ .determine_rate = ap_cpu_clk_determine_rate,
.set_rate = ap_cpu_clk_set_rate,
};
diff --git a/drivers/clk/mvebu/armada-37xx-periph.c b/drivers/clk/mvebu/armada-37xx-periph.c
index 13906e31bef8..bd0bc8e7b1e7 100644
--- a/drivers/clk/mvebu/armada-37xx-periph.c
+++ b/drivers/clk/mvebu/armada-37xx-periph.c
@@ -454,12 +454,12 @@ static unsigned long clk_pm_cpu_recalc_rate(struct clk_hw *hw,
return DIV_ROUND_UP_ULL((u64)parent_rate, div);
}
-static long clk_pm_cpu_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_pm_cpu_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_pm_cpu *pm_cpu = to_clk_pm_cpu(hw);
struct regmap *base = pm_cpu->nb_pm_base;
- unsigned int div = *parent_rate / rate;
+ unsigned int div = req->best_parent_rate / req->rate;
unsigned int load_level;
/* only available when DVFS is enabled */
if (!armada_3700_pm_dvfs_is_enabled(base))
@@ -474,13 +474,16 @@ static long clk_pm_cpu_round_rate(struct clk_hw *hw, unsigned long rate,
val >>= offset;
val &= ARMADA_37XX_NB_TBG_DIV_MASK;
- if (val == div)
+ if (val == div) {
/*
* We found a load level matching the target
* divider, switch to this load level and
* return.
*/
- return *parent_rate / div;
+ req->rate = req->best_parent_rate / div;
+
+ return 0;
+ }
}
/* We didn't find any valid divider */
@@ -600,7 +603,7 @@ static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops clk_pm_cpu_ops = {
.get_parent = clk_pm_cpu_get_parent,
- .round_rate = clk_pm_cpu_round_rate,
+ .determine_rate = clk_pm_cpu_determine_rate,
.set_rate = clk_pm_cpu_set_rate,
.recalc_rate = clk_pm_cpu_recalc_rate,
};
diff --git a/drivers/clk/mvebu/armada-xp.c b/drivers/clk/mvebu/armada-xp.c
index 45665655a258..8d31a595a27c 100644
--- a/drivers/clk/mvebu/armada-xp.c
+++ b/drivers/clk/mvebu/armada-xp.c
@@ -7,7 +7,6 @@
* Gregory CLEMENT <gregory.clement@free-electrons.com>
* Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
* Andrew Lunn <andrew@lunn.ch>
- *
*/
#include <linux/kernel.h>
@@ -19,8 +18,8 @@
/*
* Core Clocks
*
- * Armada XP Sample At Reset is a 64 bit bitfiled split in two
- * register of 32 bits
+ * Armada XP Sample At Reset is a 64 bit bitfield split in two
+ * registers of 32 bits
*/
#define SARL 0 /* Low part [0:31] */
diff --git a/drivers/clk/mvebu/clk-corediv.c b/drivers/clk/mvebu/clk-corediv.c
index 818b175391fa..628032341cbb 100644
--- a/drivers/clk/mvebu/clk-corediv.c
+++ b/drivers/clk/mvebu/clk-corediv.c
@@ -135,19 +135,21 @@ static unsigned long clk_corediv_recalc_rate(struct clk_hw *hwclk,
return parent_rate / div;
}
-static long clk_corediv_round_rate(struct clk_hw *hwclk, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_corediv_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
/* Valid ratio are 1:4, 1:5, 1:6 and 1:8 */
u32 div;
- div = *parent_rate / rate;
+ div = req->best_parent_rate / req->rate;
if (div < 4)
div = 4;
else if (div > 6)
div = 8;
- return *parent_rate / div;
+ req->rate = req->best_parent_rate / div;
+
+ return 0;
}
static int clk_corediv_set_rate(struct clk_hw *hwclk, unsigned long rate,
@@ -199,7 +201,7 @@ static const struct clk_corediv_soc_desc armada370_corediv_soc = {
.disable = clk_corediv_disable,
.is_enabled = clk_corediv_is_enabled,
.recalc_rate = clk_corediv_recalc_rate,
- .round_rate = clk_corediv_round_rate,
+ .determine_rate = clk_corediv_determine_rate,
.set_rate = clk_corediv_set_rate,
},
.ratio_reload = BIT(8),
@@ -215,7 +217,7 @@ static const struct clk_corediv_soc_desc armada380_corediv_soc = {
.disable = clk_corediv_disable,
.is_enabled = clk_corediv_is_enabled,
.recalc_rate = clk_corediv_recalc_rate,
- .round_rate = clk_corediv_round_rate,
+ .determine_rate = clk_corediv_determine_rate,
.set_rate = clk_corediv_set_rate,
},
.ratio_reload = BIT(8),
@@ -228,7 +230,7 @@ static const struct clk_corediv_soc_desc armada375_corediv_soc = {
.ndescs = ARRAY_SIZE(mvebu_corediv_desc),
.ops = {
.recalc_rate = clk_corediv_recalc_rate,
- .round_rate = clk_corediv_round_rate,
+ .determine_rate = clk_corediv_determine_rate,
.set_rate = clk_corediv_set_rate,
},
.ratio_reload = BIT(8),
@@ -240,7 +242,7 @@ static const struct clk_corediv_soc_desc mv98dx3236_corediv_soc = {
.ndescs = ARRAY_SIZE(mv98dx3236_corediv_desc),
.ops = {
.recalc_rate = clk_corediv_recalc_rate,
- .round_rate = clk_corediv_round_rate,
+ .determine_rate = clk_corediv_determine_rate,
.set_rate = clk_corediv_set_rate,
},
.ratio_reload = BIT(10),
diff --git a/drivers/clk/mvebu/clk-cpu.c b/drivers/clk/mvebu/clk-cpu.c
index db2b38c21304..0de7660e73d2 100644
--- a/drivers/clk/mvebu/clk-cpu.c
+++ b/drivers/clk/mvebu/clk-cpu.c
@@ -56,19 +56,21 @@ static unsigned long clk_cpu_recalc_rate(struct clk_hw *hwclk,
return parent_rate / div;
}
-static long clk_cpu_round_rate(struct clk_hw *hwclk, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_cpu_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
/* Valid ratio are 1:1, 1:2 and 1:3 */
u32 div;
- div = *parent_rate / rate;
+ div = req->best_parent_rate / req->rate;
if (div == 0)
div = 1;
else if (div > 3)
div = 3;
- return *parent_rate / div;
+ req->rate = req->best_parent_rate / div;
+
+ return 0;
}
static int clk_cpu_off_set_rate(struct clk_hw *hwclk, unsigned long rate,
@@ -159,7 +161,7 @@ static int clk_cpu_set_rate(struct clk_hw *hwclk, unsigned long rate,
static const struct clk_ops cpu_ops = {
.recalc_rate = clk_cpu_recalc_rate,
- .round_rate = clk_cpu_round_rate,
+ .determine_rate = clk_cpu_determine_rate,
.set_rate = clk_cpu_set_rate,
};
diff --git a/drivers/clk/mvebu/dove-divider.c b/drivers/clk/mvebu/dove-divider.c
index 0a90452ee808..47cc49e4cd99 100644
--- a/drivers/clk/mvebu/dove-divider.c
+++ b/drivers/clk/mvebu/dove-divider.c
@@ -108,23 +108,23 @@ static unsigned long dove_recalc_rate(struct clk_hw *hw, unsigned long parent)
return rate;
}
-static long dove_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent)
+static int dove_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct dove_clk *dc = to_dove_clk(hw);
- unsigned long parent_rate = *parent;
+ unsigned long parent_rate = req->best_parent_rate;
int divider;
- divider = dove_calc_divider(dc, rate, parent_rate, false);
+ divider = dove_calc_divider(dc, req->rate, parent_rate, false);
if (divider < 0)
return divider;
- rate = DIV_ROUND_CLOSEST(parent_rate, divider);
+ req->rate = DIV_ROUND_CLOSEST(parent_rate, divider);
pr_debug("%s(): %s divider=%u parent=%lu rate=%lu\n",
- __func__, dc->name, divider, parent_rate, rate);
+ __func__, dc->name, divider, parent_rate, req->rate);
- return rate;
+ return 0;
}
static int dove_set_clock(struct clk_hw *hw, unsigned long rate,
@@ -154,7 +154,7 @@ static int dove_set_clock(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops dove_divider_ops = {
.set_rate = dove_set_clock,
- .round_rate = dove_round_rate,
+ .determine_rate = dove_determine_rate,
.recalc_rate = dove_recalc_rate,
};
diff --git a/drivers/clk/mxs/clk-div.c b/drivers/clk/mxs/clk-div.c
index 928e8b1c46a1..8afe1a9c1552 100644
--- a/drivers/clk/mxs/clk-div.c
+++ b/drivers/clk/mxs/clk-div.c
@@ -16,7 +16,7 @@
* @busy: busy bit shift
*
* The mxs divider clock is a subclass of basic clk_divider with an
- * addtional busy bit.
+ * additional busy bit.
*/
struct clk_div {
struct clk_divider divider;
@@ -40,12 +40,12 @@ static unsigned long clk_div_recalc_rate(struct clk_hw *hw,
return div->ops->recalc_rate(&div->divider.hw, parent_rate);
}
-static long clk_div_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_div *div = to_clk_div(hw);
- return div->ops->round_rate(&div->divider.hw, rate, prate);
+ return div->ops->determine_rate(&div->divider.hw, req);
}
static int clk_div_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -63,7 +63,7 @@ static int clk_div_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops clk_div_ops = {
.recalc_rate = clk_div_recalc_rate,
- .round_rate = clk_div_round_rate,
+ .determine_rate = clk_div_determine_rate,
.set_rate = clk_div_set_rate,
};
diff --git a/drivers/clk/mxs/clk-frac.c b/drivers/clk/mxs/clk-frac.c
index bba0d840dd76..73f514fb84ff 100644
--- a/drivers/clk/mxs/clk-frac.c
+++ b/drivers/clk/mxs/clk-frac.c
@@ -44,18 +44,18 @@ static unsigned long clk_frac_recalc_rate(struct clk_hw *hw,
return tmp_rate >> frac->width;
}
-static long clk_frac_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_frac_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_frac *frac = to_clk_frac(hw);
- unsigned long parent_rate = *prate;
+ unsigned long parent_rate = req->best_parent_rate;
u32 div;
u64 tmp, tmp_rate, result;
- if (rate > parent_rate)
+ if (req->rate > parent_rate)
return -EINVAL;
- tmp = rate;
+ tmp = req->rate;
tmp <<= frac->width;
do_div(tmp, parent_rate);
div = tmp;
@@ -67,7 +67,9 @@ static long clk_frac_round_rate(struct clk_hw *hw, unsigned long rate,
result = tmp_rate >> frac->width;
if ((result << frac->width) < tmp_rate)
result += 1;
- return result;
+ req->rate = result;
+
+ return 0;
}
static int clk_frac_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -103,7 +105,7 @@ static int clk_frac_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops clk_frac_ops = {
.recalc_rate = clk_frac_recalc_rate,
- .round_rate = clk_frac_round_rate,
+ .determine_rate = clk_frac_determine_rate,
.set_rate = clk_frac_set_rate,
};
diff --git a/drivers/clk/mxs/clk-ref.c b/drivers/clk/mxs/clk-ref.c
index 2297259da89a..a99ee4cd2ece 100644
--- a/drivers/clk/mxs/clk-ref.c
+++ b/drivers/clk/mxs/clk-ref.c
@@ -57,22 +57,24 @@ static unsigned long clk_ref_recalc_rate(struct clk_hw *hw,
return tmp;
}
-static long clk_ref_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_ref_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- unsigned long parent_rate = *prate;
+ unsigned long parent_rate = req->best_parent_rate;
u64 tmp = parent_rate;
u8 frac;
- tmp = tmp * 18 + rate / 2;
- do_div(tmp, rate);
+ tmp = tmp * 18 + req->rate / 2;
+ do_div(tmp, req->rate);
frac = clamp(tmp, 18, 35);
tmp = parent_rate;
tmp *= 18;
do_div(tmp, frac);
- return tmp;
+ req->rate = tmp;
+
+ return 0;
}
static int clk_ref_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -104,7 +106,7 @@ static const struct clk_ops clk_ref_ops = {
.enable = clk_ref_enable,
.disable = clk_ref_disable,
.recalc_rate = clk_ref_recalc_rate,
- .round_rate = clk_ref_round_rate,
+ .determine_rate = clk_ref_determine_rate,
.set_rate = clk_ref_set_rate,
};
diff --git a/drivers/clk/nuvoton/Kconfig b/drivers/clk/nuvoton/Kconfig
index fe4b7f62f467..e7019b69ea74 100644
--- a/drivers/clk/nuvoton/Kconfig
+++ b/drivers/clk/nuvoton/Kconfig
@@ -4,7 +4,7 @@
config COMMON_CLK_NUVOTON
bool "Nuvoton clock controller common support"
depends on ARCH_MA35 || COMPILE_TEST
- default y
+ default ARCH_MA35
help
Say y here to enable common clock controller for Nuvoton platforms.
@@ -12,7 +12,7 @@ if COMMON_CLK_NUVOTON
config CLK_MA35D1
bool "Nuvoton MA35D1 clock controller support"
- default y
+ default ARCH_MA35
help
Build the clock controller driver for MA35D1 SoC.
diff --git a/drivers/clk/nuvoton/clk-ma35d1-divider.c b/drivers/clk/nuvoton/clk-ma35d1-divider.c
index bb8c23d2b895..e39f53d5bf45 100644
--- a/drivers/clk/nuvoton/clk-ma35d1-divider.c
+++ b/drivers/clk/nuvoton/clk-ma35d1-divider.c
@@ -39,12 +39,16 @@ static unsigned long ma35d1_clkdiv_recalc_rate(struct clk_hw *hw, unsigned long
CLK_DIVIDER_ROUND_CLOSEST, dclk->width);
}
-static long ma35d1_clkdiv_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *prate)
+static int ma35d1_clkdiv_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct ma35d1_adc_clk_div *dclk = to_ma35d1_adc_clk_div(hw);
- return divider_round_rate(hw, rate, prate, dclk->table,
- dclk->width, CLK_DIVIDER_ROUND_CLOSEST);
+ req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate,
+ dclk->table, dclk->width,
+ CLK_DIVIDER_ROUND_CLOSEST);
+
+ return 0;
}
static int ma35d1_clkdiv_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate)
@@ -71,7 +75,7 @@ static int ma35d1_clkdiv_set_rate(struct clk_hw *hw, unsigned long rate, unsigne
static const struct clk_ops ma35d1_adc_clkdiv_ops = {
.recalc_rate = ma35d1_clkdiv_recalc_rate,
- .round_rate = ma35d1_clkdiv_round_rate,
+ .determine_rate = ma35d1_clkdiv_determine_rate,
.set_rate = ma35d1_clkdiv_set_rate,
};
diff --git a/drivers/clk/nuvoton/clk-ma35d1-pll.c b/drivers/clk/nuvoton/clk-ma35d1-pll.c
index ff3fb8b87c24..4620acfe47e8 100644
--- a/drivers/clk/nuvoton/clk-ma35d1-pll.c
+++ b/drivers/clk/nuvoton/clk-ma35d1-pll.c
@@ -244,35 +244,43 @@ static unsigned long ma35d1_clk_pll_recalc_rate(struct clk_hw *hw, unsigned long
return 0;
}
-static long ma35d1_clk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int ma35d1_clk_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct ma35d1_clk_pll *pll = to_ma35d1_clk_pll(hw);
u32 reg_ctl[3] = { 0 };
unsigned long pll_freq;
long ret;
- if (*parent_rate < PLL_FREF_MIN_FREQ || *parent_rate > PLL_FREF_MAX_FREQ)
+ if (req->best_parent_rate < PLL_FREF_MIN_FREQ || req->best_parent_rate > PLL_FREF_MAX_FREQ)
return -EINVAL;
- ret = ma35d1_pll_find_closest(pll, rate, *parent_rate, reg_ctl, &pll_freq);
+ ret = ma35d1_pll_find_closest(pll, req->rate, req->best_parent_rate,
+ reg_ctl, &pll_freq);
if (ret < 0)
return ret;
switch (pll->id) {
case CAPLL:
reg_ctl[0] = readl_relaxed(pll->ctl0_base);
- pll_freq = ma35d1_calc_smic_pll_freq(reg_ctl[0], *parent_rate);
- return pll_freq;
+ pll_freq = ma35d1_calc_smic_pll_freq(reg_ctl[0], req->best_parent_rate);
+ req->rate = pll_freq;
+
+ return 0;
case DDRPLL:
case APLL:
case EPLL:
case VPLL:
reg_ctl[0] = readl_relaxed(pll->ctl0_base);
reg_ctl[1] = readl_relaxed(pll->ctl1_base);
- pll_freq = ma35d1_calc_pll_freq(pll->mode, reg_ctl, *parent_rate);
- return pll_freq;
+ pll_freq = ma35d1_calc_pll_freq(pll->mode, reg_ctl, req->best_parent_rate);
+ req->rate = pll_freq;
+
+ return 0;
}
+
+ req->rate = 0;
+
return 0;
}
@@ -311,12 +319,12 @@ static const struct clk_ops ma35d1_clk_pll_ops = {
.unprepare = ma35d1_clk_pll_unprepare,
.set_rate = ma35d1_clk_pll_set_rate,
.recalc_rate = ma35d1_clk_pll_recalc_rate,
- .round_rate = ma35d1_clk_pll_round_rate,
+ .determine_rate = ma35d1_clk_pll_determine_rate,
};
static const struct clk_ops ma35d1_clk_fixed_pll_ops = {
.recalc_rate = ma35d1_clk_pll_recalc_rate,
- .round_rate = ma35d1_clk_pll_round_rate,
+ .determine_rate = ma35d1_clk_pll_determine_rate,
};
struct clk_hw *ma35d1_reg_clk_pll(struct device *dev, u32 id, u8 u8mode, const char *name,
diff --git a/drivers/clk/nxp/clk-lpc18xx-ccu.c b/drivers/clk/nxp/clk-lpc18xx-ccu.c
index ddb28b38f549..751b786d73f8 100644
--- a/drivers/clk/nxp/clk-lpc18xx-ccu.c
+++ b/drivers/clk/nxp/clk-lpc18xx-ccu.c
@@ -148,7 +148,7 @@ static int lpc18xx_ccu_gate_endisable(struct clk_hw *hw, bool enable)
val |= LPC18XX_CCU_RUN;
} else {
/*
- * To safely disable a branch clock a squence of two separate
+ * To safely disable a branch clock a sequence of two separate
* writes must be used. First write should set the AUTO bit
* and the next write should clear the RUN bit.
*/
diff --git a/drivers/clk/nxp/clk-lpc18xx-cgu.c b/drivers/clk/nxp/clk-lpc18xx-cgu.c
index 81efa885069b..b9e204d63a97 100644
--- a/drivers/clk/nxp/clk-lpc18xx-cgu.c
+++ b/drivers/clk/nxp/clk-lpc18xx-cgu.c
@@ -370,23 +370,25 @@ static unsigned long lpc18xx_pll0_recalc_rate(struct clk_hw *hw,
return 0;
}
-static long lpc18xx_pll0_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int lpc18xx_pll0_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
unsigned long m;
- if (*prate < rate) {
+ if (req->best_parent_rate < req->rate) {
pr_warn("%s: pll dividers not supported\n", __func__);
return -EINVAL;
}
- m = DIV_ROUND_UP_ULL(*prate, rate * 2);
- if (m <= 0 && m > LPC18XX_PLL0_MSEL_MAX) {
- pr_warn("%s: unable to support rate %lu\n", __func__, rate);
+ m = DIV_ROUND_UP_ULL(req->best_parent_rate, req->rate * 2);
+ if (m == 0 || m > LPC18XX_PLL0_MSEL_MAX) {
+ pr_warn("%s: unable to support rate %lu\n", __func__, req->rate);
return -EINVAL;
}
- return 2 * *prate * m;
+ req->rate = 2 * req->best_parent_rate * m;
+
+ return 0;
}
static int lpc18xx_pll0_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -402,7 +404,7 @@ static int lpc18xx_pll0_set_rate(struct clk_hw *hw, unsigned long rate,
}
m = DIV_ROUND_UP_ULL(parent_rate, rate * 2);
- if (m <= 0 && m > LPC18XX_PLL0_MSEL_MAX) {
+ if (m == 0 || m > LPC18XX_PLL0_MSEL_MAX) {
pr_warn("%s: unable to support rate %lu\n", __func__, rate);
return -EINVAL;
}
@@ -443,7 +445,7 @@ static int lpc18xx_pll0_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops lpc18xx_pll0_ops = {
.recalc_rate = lpc18xx_pll0_recalc_rate,
- .round_rate = lpc18xx_pll0_round_rate,
+ .determine_rate = lpc18xx_pll0_determine_rate,
.set_rate = lpc18xx_pll0_set_rate,
};
diff --git a/drivers/clk/nxp/clk-lpc32xx.c b/drivers/clk/nxp/clk-lpc32xx.c
index e00f270bc6aa..23f980cf6a2b 100644
--- a/drivers/clk/nxp/clk-lpc32xx.c
+++ b/drivers/clk/nxp/clk-lpc32xx.c
@@ -68,7 +68,6 @@ static const struct regmap_config lpc32xx_scb_regmap_config = {
.reg_stride = 4,
.val_format_endian = REGMAP_ENDIAN_LITTLE,
.max_register = 0x114,
- .fast_io = true,
};
static struct regmap *clk_regmap;
@@ -579,17 +578,17 @@ static int clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
return regmap_update_bits(clk_regmap, clk->reg, 0x1FFFF, val);
}
-static long clk_hclk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_hclk_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct lpc32xx_pll_clk *clk = to_lpc32xx_pll_clk(hw);
- u64 m_i, o = rate, i = *parent_rate, d = (u64)rate << 6;
+ u64 m_i, o = req->rate, i = req->best_parent_rate, d = (u64)req->rate << 6;
u64 m = 0, n = 0, p = 0;
int p_i, n_i;
- pr_debug("%s: %lu/%lu\n", clk_hw_get_name(hw), *parent_rate, rate);
+ pr_debug("%s: %lu/%lu\n", clk_hw_get_name(hw), req->best_parent_rate, req->rate);
- if (rate > 266500000)
+ if (req->rate > 266500000)
return -EINVAL;
/* Have to check all 20 possibilities to find the minimal M */
@@ -614,9 +613,9 @@ static long clk_hclk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
}
}
- if (d == (u64)rate << 6) {
+ if (d == (u64)req->rate << 6) {
pr_err("%s: %lu: no valid PLL parameters are found\n",
- clk_hw_get_name(hw), rate);
+ clk_hw_get_name(hw), req->rate);
return -EINVAL;
}
@@ -634,22 +633,25 @@ static long clk_hclk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
if (!d)
pr_debug("%s: %lu: found exact match: %llu/%llu/%llu\n",
- clk_hw_get_name(hw), rate, m, n, p);
+ clk_hw_get_name(hw), req->rate, m, n, p);
else
pr_debug("%s: %lu: found closest: %llu/%llu/%llu - %llu\n",
- clk_hw_get_name(hw), rate, m, n, p, o);
+ clk_hw_get_name(hw), req->rate, m, n, p, o);
- return o;
+ req->rate = o;
+
+ return 0;
}
-static long clk_usb_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_usb_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct lpc32xx_pll_clk *clk = to_lpc32xx_pll_clk(hw);
struct clk_hw *usb_div_hw, *osc_hw;
u64 d_i, n_i, m, o;
- pr_debug("%s: %lu/%lu\n", clk_hw_get_name(hw), *parent_rate, rate);
+ pr_debug("%s: %lu/%lu\n", clk_hw_get_name(hw), req->best_parent_rate,
+ req->rate);
/*
* The only supported USB clock is 48MHz, with PLL internal constraints
@@ -657,7 +659,7 @@ static long clk_usb_pll_round_rate(struct clk_hw *hw, unsigned long rate,
* and post-divider must be 4, this slightly simplifies calculation of
* USB divider, USB PLL N and M parameters.
*/
- if (rate != 48000000)
+ if (req->rate != 48000000)
return -EINVAL;
/* USB divider clock */
@@ -685,30 +687,30 @@ static long clk_usb_pll_round_rate(struct clk_hw *hw, unsigned long rate,
clk->m_div = m;
clk->p_div = 2;
clk->mode = PLL_NON_INTEGER;
- *parent_rate = div64_u64(o, d_i);
+ req->best_parent_rate = div64_u64(o, d_i);
- return rate;
+ return 0;
}
}
return -EINVAL;
}
-#define LPC32XX_DEFINE_PLL_OPS(_name, _rc, _sr, _rr) \
+#define LPC32XX_DEFINE_PLL_OPS(_name, _rc, _sr, _dr) \
static const struct clk_ops clk_ ##_name ## _ops = { \
.enable = clk_pll_enable, \
.disable = clk_pll_disable, \
.is_enabled = clk_pll_is_enabled, \
.recalc_rate = _rc, \
.set_rate = _sr, \
- .round_rate = _rr, \
+ .determine_rate = _dr, \
}
LPC32XX_DEFINE_PLL_OPS(pll_397x, clk_pll_397x_recalc_rate, NULL, NULL);
LPC32XX_DEFINE_PLL_OPS(hclk_pll, clk_pll_recalc_rate,
- clk_pll_set_rate, clk_hclk_pll_round_rate);
+ clk_pll_set_rate, clk_hclk_pll_determine_rate);
LPC32XX_DEFINE_PLL_OPS(usb_pll, clk_pll_recalc_rate,
- clk_pll_set_rate, clk_usb_pll_round_rate);
+ clk_pll_set_rate, clk_usb_pll_determine_rate);
static int clk_ddram_is_enabled(struct clk_hw *hw)
{
@@ -955,8 +957,8 @@ static unsigned long clk_divider_recalc_rate(struct clk_hw *hw,
divider->flags, divider->width);
}
-static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_divider_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct lpc32xx_clk_div *divider = to_lpc32xx_div(hw);
unsigned int bestdiv;
@@ -968,11 +970,15 @@ static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
bestdiv &= div_mask(divider->width);
bestdiv = _get_div(divider->table, bestdiv, divider->flags,
divider->width);
- return DIV_ROUND_UP(*prate, bestdiv);
+ req->rate = DIV_ROUND_UP(req->best_parent_rate, bestdiv);
+
+ return 0;
}
- return divider_round_rate(hw, rate, prate, divider->table,
- divider->width, divider->flags);
+ req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate,
+ divider->table, divider->width, divider->flags);
+
+ return 0;
}
static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -991,7 +997,7 @@ static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops lpc32xx_clk_divider_ops = {
.recalc_rate = clk_divider_recalc_rate,
- .round_rate = clk_divider_round_rate,
+ .determine_rate = clk_divider_determine_rate,
.set_rate = clk_divider_set_rate,
};
diff --git a/drivers/clk/pistachio/clk-pll.c b/drivers/clk/pistachio/clk-pll.c
index 025b9df76cdb..d05337915e2b 100644
--- a/drivers/clk/pistachio/clk-pll.c
+++ b/drivers/clk/pistachio/clk-pll.c
@@ -139,19 +139,23 @@ pll_get_params(struct pistachio_clk_pll *pll, unsigned long fref,
return NULL;
}
-static long pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int pll_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
{
struct pistachio_clk_pll *pll = to_pistachio_pll(hw);
unsigned int i;
for (i = 0; i < pll->nr_rates; i++) {
- if (i > 0 && pll->rates[i].fref == *parent_rate &&
- pll->rates[i].fout <= rate)
- return pll->rates[i - 1].fout;
+ if (i > 0 && pll->rates[i].fref == req->best_parent_rate &&
+ pll->rates[i].fout <= req->rate) {
+ req->rate = pll->rates[i - 1].fout;
+
+ return 0;
+ }
}
- return pll->rates[0].fout;
+ req->rate = pll->rates[0].fout;
+
+ return 0;
}
static int pll_gf40lp_frac_enable(struct clk_hw *hw)
@@ -300,7 +304,7 @@ static const struct clk_ops pll_gf40lp_frac_ops = {
.disable = pll_gf40lp_frac_disable,
.is_enabled = pll_gf40lp_frac_is_enabled,
.recalc_rate = pll_gf40lp_frac_recalc_rate,
- .round_rate = pll_round_rate,
+ .determine_rate = pll_determine_rate,
.set_rate = pll_gf40lp_frac_set_rate,
};
@@ -432,7 +436,7 @@ static const struct clk_ops pll_gf40lp_laint_ops = {
.disable = pll_gf40lp_laint_disable,
.is_enabled = pll_gf40lp_laint_is_enabled,
.recalc_rate = pll_gf40lp_laint_recalc_rate,
- .round_rate = pll_round_rate,
+ .determine_rate = pll_determine_rate,
.set_rate = pll_gf40lp_laint_set_rate,
};
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index a3e2a09e2105..78a303842613 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -19,6 +19,33 @@ menuconfig COMMON_CLK_QCOM
if COMMON_CLK_QCOM
+config CLK_GLYMUR_DISPCC
+ tristate "GLYMUR Display Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select CLK_GLYMUR_GCC
+ help
+ Support for the display clock controllers on Qualcomm
+ Technologies, Inc. GLYMUR devices.
+ Say Y if you want to support display devices and functionality such as
+ splash screen.
+
+config CLK_GLYMUR_GCC
+ tristate "GLYMUR Global Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select QCOM_GDSC
+ help
+ Support for the global clock controller on GLYMUR devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ I2C, USB, UFS, SDCC, etc.
+
+config CLK_GLYMUR_TCSRCC
+ tristate "GLYMUR TCSR Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select QCOM_GDSC
+ help
+ Support for the TCSR clock controller on GLYMUR devices.
+ Say Y if you want to use peripheral devices such as USB/PCIe/EDP.
+
config CLK_X1E80100_CAMCC
tristate "X1E80100 Camera Clock Controller"
depends on ARM64 || COMPILE_TEST
@@ -64,6 +91,15 @@ config CLK_X1E80100_TCSRCC
Support for the TCSR clock controller on X1E80100 devices.
Say Y if you want to use peripheral devices such as SD/UFS.
+config CLK_X1P42100_GPUCC
+ tristate "X1P42100 Graphics Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select CLK_X1E80100_GCC
+ help
+ Support for the graphics clock controller on X1P42100 devices.
+ Say Y if you want to support graphics controller devices and
+ functionality such as 3D graphics.
+
config CLK_QCM2290_GPUCC
tristate "QCM2290 Graphics Clock Controller"
depends on ARM64 || COMPILE_TEST
@@ -178,6 +214,15 @@ config IPQ_APSS_PLL
Say Y if you want to support CPU frequency scaling on ipq based
devices.
+config IPQ_APSS_5424
+ tristate "IPQ APSS Clock Controller"
+ select IPQ_APSS_PLL
+ default y if IPQ_GCC_5424
+ help
+ Support for APSS Clock controller on Qualcom IPQ5424 platform.
+ Say Y if you want to support CPU frequency scaling on ipq based
+ devices.
+
config IPQ_APSS_6018
tristate "IPQ APSS Clock Controller"
select IPQ_APSS_PLL
@@ -190,6 +235,15 @@ config IPQ_APSS_6018
Say Y if you want to support CPU frequency scaling on
ipq based devices.
+config IPQ_CMN_PLL
+ tristate "IPQ CMN PLL Clock Controller"
+ help
+ Support for CMN PLL clock controller on IPQ platform. The
+ CMN PLL consumes the AHB/SYS clocks from GCC and supplies
+ the output clocks to the networking hardware and GCC blocks.
+ Say Y or M if you want to support CMN PLL clock on the IPQ
+ based devices.
+
config IPQ_GCC_4019
tristate "IPQ4019 Global Clock Controller"
help
@@ -199,7 +253,7 @@ config IPQ_GCC_4019
config IPQ_GCC_5018
tristate "IPQ5018 Global Clock Controller"
- depends on ARM64 || COMPILE_TEST
+ depends on ARM || ARM64 || COMPILE_TEST
help
Support for global clock controller on ipq5018 devices.
Say Y if you want to use peripheral devices such as UART, SPI,
@@ -213,6 +267,14 @@ config IPQ_GCC_5332
Say Y if you want to use peripheral devices such as UART, SPI,
i2c, USB, SD/eMMC, etc.
+config IPQ_GCC_5424
+ tristate "IPQ5424 Global Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ help
+ Support for the global clock controller on ipq5424 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, SD/eMMC, etc.
+
config IPQ_GCC_6018
tristate "IPQ6018 Global Clock Controller"
help
@@ -255,6 +317,13 @@ config IPQ_GCC_9574
i2c, USB, SD/eMMC, etc. Select this for the root clock
of ipq9574.
+config IPQ_NSSCC_9574
+ tristate "IPQ9574 NSS Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ depends on IPQ_GCC_9574
+ help
+ Support for NSS clock controller on ipq9574 devices.
+
config IPQ_NSSCC_QCA8K
tristate "QCA8K(QCA8386 or QCA8084) NSS Clock Controller"
depends on MDIO_BUS
@@ -290,12 +359,12 @@ config MSM_GCC_8916
SD/eMMC, display, graphics, camera etc.
config MSM_GCC_8917
- tristate "MSM8917/QM215 Global Clock Controller"
+ tristate "MSM89(17/37)/QM215 Global Clock Controller"
depends on ARM64 || COMPILE_TEST
select QCOM_GDSC
help
- Support for the global clock controller on msm8917 and qm215
- devices.
+ Support for the global clock controller on msm8917, msm8937
+ and qm215 devices.
Say Y if you want to use devices such as UART, SPI i2c, USB,
SD/eMMC, display, graphics, camera etc.
@@ -460,6 +529,25 @@ config QCM_DISPCC_2290
Say Y if you want to support display devices and functionality such as
splash screen.
+config QCS_DISPCC_615
+ tristate "QCS615 Display Clock Controller"
+ select QCS_GCC_615
+ help
+ Support for the display clock controller on Qualcomm Technologies, Inc
+ QCS615 devices.
+ Say Y if you want to support display devices and functionality such as
+ splash screen.
+
+config QCS_CAMCC_615
+ tristate "QCS615 Camera Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select QCS_GCC_615
+ help
+ Support for the camera clock controller on Qualcomm Technologies, Inc
+ QCS615 devices.
+ Say Y if you want to support camera devices and functionality such as
+ capturing pictures.
+
config QCS_GCC_404
tristate "QCS404 Global Clock Controller"
help
@@ -467,6 +555,51 @@ config QCS_GCC_404
Say Y if you want to use multimedia devices or peripheral
devices such as UART, SPI, I2C, USB, SD/eMMC, PCIe etc.
+config SA_CAMCC_8775P
+ tristate "SA8775P Camera Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select SA_GCC_8775P
+ help
+ Support for the camera clock controller on Qualcomm Technologies, Inc
+ SA8775P devices.
+ Say Y if you want to support camera devices and functionality such as
+ capturing pictures.
+
+config QCS_GCC_8300
+ tristate "QCS8300 Global Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select QCOM_GDSC
+ help
+ Support for the global clock controller on Qualcomm Technologies, Inc
+ QCS8300 devices.
+ Say Y if you want to use peripheral devices such as UART,
+ SPI, I2C, USB, SD/UFS, PCIe etc.
+
+config QCS_GCC_615
+ tristate "QCS615 Global Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select QCOM_GDSC
+ help
+ Support for the global clock controller on QCS615 devices.
+ Say Y if you want to use multimedia devices or peripheral
+ devices such as UART, SPI, I2C, USB, SD/eMMC, PCIe etc.
+
+config QCS_GPUCC_615
+ tristate "QCS615 Graphics clock controller"
+ select QCS_GCC_615
+ help
+ Support for the graphics clock controller on QCS615 devices.
+ Say Y if you want to support graphics controller devices and
+ functionality such as 3D graphics.
+
+config QCS_VIDEOCC_615
+ tristate "QCS615 Video Clock Controller"
+ select QCS_GCC_615
+ help
+ Support for the video clock controller on QCS615 devices.
+ Say Y if you want to support video devices and functionality such as
+ video encode and decode.
+
config SC_CAMCC_7180
tristate "SC7180 Camera Clock Controller"
depends on ARM64 || COMPILE_TEST
@@ -487,6 +620,16 @@ config SC_CAMCC_7280
Say Y if you want to support camera devices and functionality such as
capturing pictures.
+config SC_CAMCC_8180X
+ tristate "SC8180X Camera Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select SC_GCC_8180X
+ help
+ Support for the camera clock controller on Qualcomm Technologies, Inc
+ SC8180X devices.
+ Say Y if you want to support camera devices and functionality such as
+ capturing pictures.
+
config SC_CAMCC_8280XP
tristate "SC8280XP Camera Clock Controller"
depends on ARM64 || COMPILE_TEST
@@ -497,6 +640,16 @@ config SC_CAMCC_8280XP
Say Y if you want to support camera devices and functionality such as
capturing pictures.
+config SA_DISPCC_8775P
+ tristate "SA8775P Display Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select SA_GCC_8775P
+ help
+ Support for the two display clock controllers on Qualcomm
+ Technologies, Inc. SA8775P devices.
+ Say Y if you want to support display devices and functionality such as
+ splash screen.
+
config SC_DISPCC_7180
tristate "SC7180 Display Clock Controller"
depends on ARM64 || COMPILE_TEST
@@ -545,6 +698,24 @@ config SA_GPUCC_8775P
Say Y if you want to support graphics controller devices and
functionality such as 3D graphics.
+config SAR_GCC_2130P
+ tristate "SAR2130P Global Clock Controller"
+ select QCOM_GDSC
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on SAR2130P devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ I2C, USB, SDCC, etc.
+
+config SAR_GPUCC_2130P
+ tristate "SAR2130P Graphics clock controller"
+ select QCOM_GDSC
+ select SAR_GCC_2130P
+ help
+ Support for the graphics clock controller on SAR2130P devices.
+ Say Y if you want to support graphics controller devices and
+ functionality such as 3D graphics.
+
config SC_GCC_7180
tristate "SC7180 Global Clock Controller"
select QCOM_GDSC
@@ -834,6 +1005,14 @@ config SM_CAMCC_7150
Support for the camera clock controller on SM7150 devices.
Say Y if you want to support camera devices and camera functionality.
+config SM_CAMCC_MILOS
+ tristate "Milos Camera Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select SM_GCC_MILOS
+ help
+ Support for the camera clock controller on Milos devices.
+ Say Y if you want to support camera devices and camera functionality.
+
config SM_CAMCC_8150
tristate "SM8150 Camera Clock Controller"
depends on ARM64 || COMPILE_TEST
@@ -857,7 +1036,7 @@ config SM_CAMCC_8450
depends on ARM64 || COMPILE_TEST
select SM_GCC_8450
help
- Support for the camera clock controller on SM8450 devices.
+ Support for the camera clock controller on SM8450 or SM8475 devices.
Say Y if you want to support camera devices and camera functionality.
config SM_CAMCC_8550
@@ -946,23 +1125,44 @@ config SM_DISPCC_6375
Say Y if you want to support display devices and functionality such as
splash screen.
+config SM_DISPCC_MILOS
+ tristate "Milos Display Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ depends on SM_GCC_MILOS
+ help
+ Support for the display clock controller on Qualcomm Technologies, Inc
+ Milos devices.
+ Say Y if you want to support display devices and functionality such as
+ splash screen.
+
config SM_DISPCC_8450
tristate "SM8450 Display Clock Controller"
depends on ARM64 || COMPILE_TEST
depends on SM_GCC_8450
help
Support for the display clock controller on Qualcomm Technologies, Inc
- SM8450 devices.
+ SM8450 or SM8475 devices.
Say Y if you want to support display devices and functionality such as
splash screen.
config SM_DISPCC_8550
tristate "SM8550 Display Clock Controller"
depends on ARM64 || COMPILE_TEST
- depends on SM_GCC_8550 || SM_GCC_8650
+ depends on SM_GCC_8550 || SM_GCC_8650 || SAR_GCC_2130P
+ help
+ Support for the display clock controller on Qualcomm Technologies, Inc
+ SAR2130P, SM8550 or SM8650 devices.
+ Say Y if you want to support display devices and functionality such as
+ splash screen.
+
+config SM_DISPCC_8750
+ tristate "SM8750 Display Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ depends on SM_GCC_8750
+ select QCOM_GDSC
help
Support for the display clock controller on Qualcomm Technologies, Inc
- SM8550 or SM8650 devices.
+ SM8750 devices.
Say Y if you want to support display devices and functionality such as
splash screen.
@@ -987,6 +1187,7 @@ config SM_GCC_6115
config SM_GCC_6125
tristate "SM6125 Global Clock Controller"
depends on ARM64 || COMPILE_TEST
+ select QCOM_GDSC
help
Support for the global clock controller on SM6125 devices.
Say Y if you want to use peripheral devices such as UART,
@@ -1019,9 +1220,19 @@ config SM_GCC_7150
Say Y if you want to use peripheral devices such as UART,
SPI, I2C, USB, SD/UFS, PCIe etc.
+config SM_GCC_MILOS
+ tristate "Milos Global Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select QCOM_GDSC
+ help
+ Support for the global clock controller on Milos devices.
+ Say Y if you want to use peripheral devices such as UART,
+ SPI, I2C, USB, SD/UFS, PCIe etc.
+
config SM_GCC_8150
tristate "SM8150 Global Clock Controller"
depends on ARM64 || COMPILE_TEST
+ select QCOM_GDSC
help
Support for the global clock controller on SM8150 devices.
Say Y if you want to use peripheral devices such as UART,
@@ -1050,7 +1261,8 @@ config SM_GCC_8450
depends on ARM64 || COMPILE_TEST
select QCOM_GDSC
help
- Support for the global clock controller on SM8450 devices.
+ Support for the global clock controller on SM8450 or SM8475
+ devices.
Say Y if you want to use peripheral devices such as UART,
SPI, I2C, USB, SD/UFS, PCIe etc.
@@ -1072,6 +1284,15 @@ config SM_GCC_8650
Say Y if you want to use peripheral devices such as UART,
SPI, I2C, USB, SD/UFS, PCIe etc.
+config SM_GCC_8750
+ tristate "SM8750 Global Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select QCOM_GDSC
+ help
+ Support for the global clock controller on SM8750 devices.
+ Say Y if you want to use peripheral devices such as UART,
+ SPI, I2C, USB, SD/UFS, PCIe etc.
+
config SM_GPUCC_4450
tristate "SM4450 Graphics Clock Controller"
depends on ARM64 || COMPILE_TEST
@@ -1117,6 +1338,15 @@ config SM_GPUCC_6350
Say Y if you want to support graphics controller devices and
functionality such as 3D graphics.
+config SM_GPUCC_MILOS
+ tristate "Milos Graphics Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select SM_GCC_MILOS
+ help
+ Support for the graphics clock controller on Milos devices.
+ Say Y if you want to support graphics controller devices and
+ functionality such as 3D graphics.
+
config SM_GPUCC_8150
tristate "SM8150 Graphics Clock Controller"
depends on ARM64 || COMPILE_TEST
@@ -1149,7 +1379,8 @@ config SM_GPUCC_8450
depends on ARM64 || COMPILE_TEST
select SM_GCC_8450
help
- Support for the graphics clock controller on SM8450 devices.
+ Support for the graphics clock controller on SM8450 or SM8475
+ devices.
Say Y if you want to support graphics controller devices and
functionality such as 3D graphics.
@@ -1171,6 +1402,15 @@ config SM_GPUCC_8650
Say Y if you want to support graphics controller devices and
functionality such as 3D graphics.
+config SM_LPASSCC_6115
+ tristate "SM6115 Low Power Audio Subsystem (LPASS) Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select SM_GCC_6115
+ help
+ Support for the LPASS clock controller on SM6115 devices.
+ Say Y if you want to toggle LPASS-adjacent resets within
+ this clock controller to reset the LPASS subsystem.
+
config SM_TCSRCC_8550
tristate "SM8550 TCSR Clock Controller"
depends on ARM64 || COMPILE_TEST
@@ -1187,6 +1427,34 @@ config SM_TCSRCC_8650
Support for the TCSR clock controller on SM8650 devices.
Say Y if you want to use peripheral devices such as SD/UFS.
+config SM_TCSRCC_8750
+ tristate "SM8750 TCSR Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select QCOM_GDSC
+ help
+ Support for the TCSR clock controller on SM8750 devices.
+ Say Y if you want to use peripheral devices such as UFS/USB/PCIe.
+
+config SA_VIDEOCC_8775P
+ tristate "SA8775P Video Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select SA_GCC_8775P
+ select QCOM_GDSC
+ help
+ Support for the video clock controller on Qualcomm Technologies, Inc.
+ SA8775P devices.
+ Say Y if you want to support video devices and functionality such as
+ video encode/decode.
+
+config SM_VIDEOCC_6350
+ tristate "SM6350 Video Clock Controller"
+ select SM_GCC_6350
+ select QCOM_GDSC
+ help
+ Support for the video clock controller on SM6350 devices.
+ Say Y if you want to support video devices and functionality such as
+ video encode and decode.
+
config SM_VIDEOCC_7150
tristate "SM7150 Video Clock Controller"
depends on ARM64 || COMPILE_TEST
@@ -1197,6 +1465,17 @@ config SM_VIDEOCC_7150
Say Y if you want to support video devices and functionality such as
video encode and decode.
+config SM_VIDEOCC_MILOS
+ tristate "Milos Video Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select SM_GCC_MILOS
+ select QCOM_GDSC
+ help
+ Support for the video clock controller on Qualcomm Technologies, Inc.
+ Milos devices.
+ Say Y if you want to support video devices and functionality such as
+ video encode/decode.
+
config SM_VIDEOCC_8150
tristate "SM8150 Video Clock Controller"
depends on ARM64 || COMPILE_TEST
@@ -1230,11 +1509,10 @@ config SM_VIDEOCC_8350
config SM_VIDEOCC_8550
tristate "SM8550 Video Clock Controller"
depends on ARM64 || COMPILE_TEST
- select SM_GCC_8550
select QCOM_GDSC
help
Support for the video clock controller on Qualcomm Technologies, Inc.
- SM8550 devices.
+ SM8550 or SM8650 or X1E80100 devices.
Say Y if you want to support video devices and functionality such as
video encode/decode.
@@ -1283,7 +1561,7 @@ config SM_VIDEOCC_8450
select QCOM_GDSC
help
Support for the video clock controller on Qualcomm Technologies, Inc.
- SM8450 devices.
+ SM8450 or SM8475 devices.
Say Y if you want to support video devices and functionality such as
video encode/decode.
endif
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 2b378667a63f..8051d481c439 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -21,21 +21,29 @@ clk-qcom-$(CONFIG_QCOM_GDSC) += gdsc.o
obj-$(CONFIG_APQ_GCC_8084) += gcc-apq8084.o
obj-$(CONFIG_APQ_MMCC_8084) += mmcc-apq8084.o
obj-$(CONFIG_CLK_GFM_LPASS_SM8250) += lpass-gfm-sm8250.o
+obj-$(CONFIG_CLK_GLYMUR_DISPCC) += dispcc-glymur.o
+obj-$(CONFIG_CLK_GLYMUR_GCC) += gcc-glymur.o
+obj-$(CONFIG_CLK_GLYMUR_TCSRCC) += tcsrcc-glymur.o
obj-$(CONFIG_CLK_X1E80100_CAMCC) += camcc-x1e80100.o
obj-$(CONFIG_CLK_X1E80100_DISPCC) += dispcc-x1e80100.o
obj-$(CONFIG_CLK_X1E80100_GCC) += gcc-x1e80100.o
obj-$(CONFIG_CLK_X1E80100_GPUCC) += gpucc-x1e80100.o
obj-$(CONFIG_CLK_X1E80100_TCSRCC) += tcsrcc-x1e80100.o
+obj-$(CONFIG_CLK_X1P42100_GPUCC) += gpucc-x1p42100.o
obj-$(CONFIG_CLK_QCM2290_GPUCC) += gpucc-qcm2290.o
obj-$(CONFIG_IPQ_APSS_PLL) += apss-ipq-pll.o
+obj-$(CONFIG_IPQ_APSS_5424) += apss-ipq5424.o
obj-$(CONFIG_IPQ_APSS_6018) += apss-ipq6018.o
+obj-$(CONFIG_IPQ_CMN_PLL) += ipq-cmn-pll.o
obj-$(CONFIG_IPQ_GCC_4019) += gcc-ipq4019.o
obj-$(CONFIG_IPQ_GCC_5018) += gcc-ipq5018.o
obj-$(CONFIG_IPQ_GCC_5332) += gcc-ipq5332.o
+obj-$(CONFIG_IPQ_GCC_5424) += gcc-ipq5424.o
obj-$(CONFIG_IPQ_GCC_6018) += gcc-ipq6018.o
obj-$(CONFIG_IPQ_GCC_806X) += gcc-ipq806x.o
obj-$(CONFIG_IPQ_GCC_8074) += gcc-ipq8074.o
obj-$(CONFIG_IPQ_GCC_9574) += gcc-ipq9574.o
+obj-$(CONFIG_IPQ_NSSCC_9574) += nsscc-ipq9574.o
obj-$(CONFIG_IPQ_LCC_806X) += lcc-ipq806x.o
obj-$(CONFIG_IPQ_NSSCC_QCA8K) += nsscc-qca8k.o
obj-$(CONFIG_MDM_GCC_9607) += gcc-mdm9607.o
@@ -69,19 +77,31 @@ obj-$(CONFIG_QCOM_CLK_RPMH) += clk-rpmh.o
obj-$(CONFIG_QCOM_CLK_SMD_RPM) += clk-smd-rpm.o
obj-$(CONFIG_QCM_GCC_2290) += gcc-qcm2290.o
obj-$(CONFIG_QCM_DISPCC_2290) += dispcc-qcm2290.o
+obj-$(CONFIG_QCS_DISPCC_615) += dispcc-qcs615.o
+obj-$(CONFIG_QCS_CAMCC_615) += camcc-qcs615.o
obj-$(CONFIG_QCS_GCC_404) += gcc-qcs404.o
+obj-$(CONFIG_QCS_GCC_615) += gcc-qcs615.o
+obj-$(CONFIG_QCS_GCC_8300) += gcc-qcs8300.o
+obj-$(CONFIG_QCS_GPUCC_615) += gpucc-qcs615.o
+obj-$(CONFIG_QCS_VIDEOCC_615) += videocc-qcs615.o
obj-$(CONFIG_QCS_Q6SSTOP_404) += q6sstop-qcs404.o
obj-$(CONFIG_QCS_TURING_404) += turingcc-qcs404.o
obj-$(CONFIG_QDU_ECPRICC_1000) += ecpricc-qdu1000.o
obj-$(CONFIG_QDU_GCC_1000) += gcc-qdu1000.o
obj-$(CONFIG_SC_CAMCC_7180) += camcc-sc7180.o
obj-$(CONFIG_SC_CAMCC_7280) += camcc-sc7280.o
+obj-$(CONFIG_SC_CAMCC_8180X) += camcc-sc8180x.o
obj-$(CONFIG_SC_CAMCC_8280XP) += camcc-sc8280xp.o
obj-$(CONFIG_SC_DISPCC_7180) += dispcc-sc7180.o
obj-$(CONFIG_SC_DISPCC_7280) += dispcc-sc7280.o
obj-$(CONFIG_SC_DISPCC_8280XP) += dispcc-sc8280xp.o
+obj-$(CONFIG_SA_CAMCC_8775P) += camcc-sa8775p.o
+obj-$(CONFIG_SA_DISPCC_8775P) += dispcc0-sa8775p.o dispcc1-sa8775p.o
obj-$(CONFIG_SA_GCC_8775P) += gcc-sa8775p.o
obj-$(CONFIG_SA_GPUCC_8775P) += gpucc-sa8775p.o
+obj-$(CONFIG_SA_VIDEOCC_8775P) += videocc-sa8775p.o
+obj-$(CONFIG_SAR_GCC_2130P) += gcc-sar2130p.o
+obj-$(CONFIG_SAR_GPUCC_2130P) += gpucc-sar2130p.o
obj-$(CONFIG_SC_GCC_7180) += gcc-sc7180.o
obj-$(CONFIG_SC_GCC_7280) += gcc-sc7280.o
obj-$(CONFIG_SC_GCC_8180X) += gcc-sc8180x.o
@@ -115,6 +135,7 @@ obj-$(CONFIG_SM_CAMCC_8250) += camcc-sm8250.o
obj-$(CONFIG_SM_CAMCC_8450) += camcc-sm8450.o
obj-$(CONFIG_SM_CAMCC_8550) += camcc-sm8550.o
obj-$(CONFIG_SM_CAMCC_8650) += camcc-sm8650.o
+obj-$(CONFIG_SM_CAMCC_MILOS) += camcc-milos.o
obj-$(CONFIG_SM_DISPCC_4450) += dispcc-sm4450.o
obj-$(CONFIG_SM_DISPCC_6115) += dispcc-sm6115.o
obj-$(CONFIG_SM_DISPCC_6125) += dispcc-sm6125.o
@@ -124,6 +145,8 @@ obj-$(CONFIG_SM_DISPCC_7150) += dispcc-sm7150.o
obj-$(CONFIG_SM_DISPCC_8250) += dispcc-sm8250.o
obj-$(CONFIG_SM_DISPCC_8450) += dispcc-sm8450.o
obj-$(CONFIG_SM_DISPCC_8550) += dispcc-sm8550.o
+obj-$(CONFIG_SM_DISPCC_8750) += dispcc-sm8750.o
+obj-$(CONFIG_SM_DISPCC_MILOS) += dispcc-milos.o
obj-$(CONFIG_SM_GCC_4450) += gcc-sm4450.o
obj-$(CONFIG_SM_GCC_6115) += gcc-sm6115.o
obj-$(CONFIG_SM_GCC_6125) += gcc-sm6125.o
@@ -136,6 +159,8 @@ obj-$(CONFIG_SM_GCC_8350) += gcc-sm8350.o
obj-$(CONFIG_SM_GCC_8450) += gcc-sm8450.o
obj-$(CONFIG_SM_GCC_8550) += gcc-sm8550.o
obj-$(CONFIG_SM_GCC_8650) += gcc-sm8650.o
+obj-$(CONFIG_SM_GCC_8750) += gcc-sm8750.o
+obj-$(CONFIG_SM_GCC_MILOS) += gcc-milos.o
obj-$(CONFIG_SM_GPUCC_4450) += gpucc-sm4450.o
obj-$(CONFIG_SM_GPUCC_6115) += gpucc-sm6115.o
obj-$(CONFIG_SM_GPUCC_6125) += gpucc-sm6125.o
@@ -147,14 +172,19 @@ obj-$(CONFIG_SM_GPUCC_8350) += gpucc-sm8350.o
obj-$(CONFIG_SM_GPUCC_8450) += gpucc-sm8450.o
obj-$(CONFIG_SM_GPUCC_8550) += gpucc-sm8550.o
obj-$(CONFIG_SM_GPUCC_8650) += gpucc-sm8650.o
+obj-$(CONFIG_SM_GPUCC_MILOS) += gpucc-milos.o
+obj-$(CONFIG_SM_LPASSCC_6115) += lpasscc-sm6115.o
obj-$(CONFIG_SM_TCSRCC_8550) += tcsrcc-sm8550.o
obj-$(CONFIG_SM_TCSRCC_8650) += tcsrcc-sm8650.o
+obj-$(CONFIG_SM_TCSRCC_8750) += tcsrcc-sm8750.o
+obj-$(CONFIG_SM_VIDEOCC_6350) += videocc-sm6350.o
obj-$(CONFIG_SM_VIDEOCC_7150) += videocc-sm7150.o
obj-$(CONFIG_SM_VIDEOCC_8150) += videocc-sm8150.o
obj-$(CONFIG_SM_VIDEOCC_8250) += videocc-sm8250.o
obj-$(CONFIG_SM_VIDEOCC_8350) += videocc-sm8350.o
obj-$(CONFIG_SM_VIDEOCC_8450) += videocc-sm8450.o
obj-$(CONFIG_SM_VIDEOCC_8550) += videocc-sm8550.o
+obj-$(CONFIG_SM_VIDEOCC_MILOS) += videocc-milos.o
obj-$(CONFIG_SPMI_PMIC_CLKDIV) += clk-spmi-pmic-div.o
obj-$(CONFIG_KPSS_XCC) += kpss-xcc.o
obj-$(CONFIG_QCOM_HFPLL) += hfpll.o
diff --git a/drivers/clk/qcom/a53-pll.c b/drivers/clk/qcom/a53-pll.c
index f43d455ab4b8..724a642311e5 100644
--- a/drivers/clk/qcom/a53-pll.c
+++ b/drivers/clk/qcom/a53-pll.c
@@ -33,7 +33,6 @@ static const struct regmap_config a53pll_regmap_config = {
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x40,
- .fast_io = true,
};
static struct pll_freq_tbl *qcom_a53pll_get_freq_tbl(struct device *dev)
diff --git a/drivers/clk/qcom/a7-pll.c b/drivers/clk/qcom/a7-pll.c
index c4a53e5db229..04b5492a3c21 100644
--- a/drivers/clk/qcom/a7-pll.c
+++ b/drivers/clk/qcom/a7-pll.c
@@ -27,7 +27,7 @@ static struct clk_alpha_pll a7pll = {
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "a7pll",
- .parent_data = &(const struct clk_parent_data){
+ .parent_data = &(const struct clk_parent_data){
.fw_name = "bi_tcxo",
},
.num_parents = 1,
@@ -50,7 +50,6 @@ static const struct regmap_config a7pll_regmap_config = {
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x1000,
- .fast_io = true,
};
static int qcom_a7pll_probe(struct platform_device *pdev)
diff --git a/drivers/clk/qcom/apcs-sdx55.c b/drivers/clk/qcom/apcs-sdx55.c
index 76ece6c4a969..90dd1f1855c2 100644
--- a/drivers/clk/qcom/apcs-sdx55.c
+++ b/drivers/clk/qcom/apcs-sdx55.c
@@ -111,7 +111,11 @@ static int qcom_apcs_sdx55_clk_probe(struct platform_device *pdev)
* driver, there seems to be no better place to do this. So do it here!
*/
cpu_dev = get_cpu_device(0);
- dev_pm_domain_attach(cpu_dev, true);
+ ret = dev_pm_domain_attach(cpu_dev, PD_FLAG_ATTACH_POWER_ON);
+ if (ret) {
+ dev_err_probe(dev, ret, "can't get PM domain: %d\n", ret);
+ goto err;
+ }
return 0;
diff --git a/drivers/clk/qcom/apss-ipq-pll.c b/drivers/clk/qcom/apss-ipq-pll.c
index e8632db2c542..3a8987fe7008 100644
--- a/drivers/clk/qcom/apss-ipq-pll.c
+++ b/drivers/clk/qcom/apss-ipq-pll.c
@@ -73,20 +73,19 @@ static const struct alpha_pll_config ipq5018_pll_config = {
.main_output_mask = BIT(0),
.aux_output_mask = BIT(1),
.early_output_mask = BIT(3),
- .alpha_en_mask = BIT(24),
.status_val = 0x3,
.status_mask = GENMASK(10, 8),
.lock_det = BIT(2),
.test_ctl_hi_val = 0x00400003,
};
+/* 1.080 GHz configuration */
static const struct alpha_pll_config ipq5332_pll_config = {
.l = 0x2d,
.config_ctl_val = 0x4001075b,
.main_output_mask = BIT(0),
.aux_output_mask = BIT(1),
.early_output_mask = BIT(3),
- .alpha_en_mask = BIT(24),
.status_val = 0x3,
.status_mask = GENMASK(10, 8),
.lock_det = BIT(2),
@@ -170,7 +169,6 @@ static const struct regmap_config ipq_pll_regmap_config = {
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x40,
- .fast_io = true,
};
static int apss_ipq_pll_probe(struct platform_device *pdev)
diff --git a/drivers/clk/qcom/apss-ipq5424.c b/drivers/clk/qcom/apss-ipq5424.c
new file mode 100644
index 000000000000..4c67f722e009
--- /dev/null
+++ b/drivers/clk/qcom/apss-ipq5424.c
@@ -0,0 +1,265 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2025, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/interconnect-provider.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/arm/qcom,ids.h>
+#include <dt-bindings/clock/qcom,apss-ipq.h>
+#include <dt-bindings/interconnect/qcom,ipq5424.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "common.h"
+
+enum {
+ DT_XO,
+ DT_CLK_REF,
+};
+
+enum {
+ P_XO,
+ P_GPLL0,
+ P_APSS_PLL_EARLY,
+ P_L3_PLL,
+};
+
+struct apss_clk {
+ struct notifier_block cpu_clk_notifier;
+ struct clk_hw *hw;
+ struct device *dev;
+ struct clk *l3_clk;
+};
+
+static const struct alpha_pll_config apss_pll_config = {
+ .l = 0x3b,
+ .config_ctl_val = 0x08200920,
+ .config_ctl_hi_val = 0x05008001,
+ .config_ctl_hi1_val = 0x04000000,
+ .user_ctl_val = 0xf,
+};
+
+static struct clk_alpha_pll ipq5424_apss_pll = {
+ .offset = 0x0,
+ .config = &apss_pll_config,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_HUAYRA_2290],
+ .flags = SUPPORTS_DYNAMIC_UPDATE,
+ .clkr = {
+ .enable_reg = 0x0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "apss_pll",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_XO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_huayra_ops,
+ },
+ },
+};
+
+static const struct clk_parent_data parents_apss_silver_clk_src[] = {
+ { .index = DT_XO },
+ { .index = DT_CLK_REF },
+ { .hw = &ipq5424_apss_pll.clkr.hw },
+};
+
+static const struct parent_map parents_apss_silver_clk_src_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 4 },
+ { P_APSS_PLL_EARLY, 5 },
+};
+
+static const struct freq_tbl ftbl_apss_clk_src[] = {
+ F(816000000, P_APSS_PLL_EARLY, 1, 0, 0),
+ F(1416000000, P_APSS_PLL_EARLY, 1, 0, 0),
+ F(1800000000, P_APSS_PLL_EARLY, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 apss_silver_clk_src = {
+ .cmd_rcgr = 0x0080,
+ .freq_tbl = ftbl_apss_clk_src,
+ .hid_width = 5,
+ .parent_map = parents_apss_silver_clk_src_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "apss_silver_clk_src",
+ .parent_data = parents_apss_silver_clk_src,
+ .num_parents = ARRAY_SIZE(parents_apss_silver_clk_src),
+ .ops = &clk_rcg2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_branch apss_silver_core_clk = {
+ .halt_reg = 0x008c,
+ .clkr = {
+ .enable_reg = 0x008c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "apss_silver_core_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &apss_silver_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct alpha_pll_config l3_pll_config = {
+ .l = 0x29,
+ .config_ctl_val = 0x08200920,
+ .config_ctl_hi_val = 0x05008001,
+ .config_ctl_hi1_val = 0x04000000,
+ .user_ctl_val = 0xf,
+};
+
+static struct clk_alpha_pll ipq5424_l3_pll = {
+ .offset = 0x10000,
+ .config = &l3_pll_config,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_HUAYRA_2290],
+ .flags = SUPPORTS_DYNAMIC_UPDATE,
+ .clkr = {
+ .enable_reg = 0x0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "l3_pll",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_XO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_huayra_ops,
+ },
+ },
+};
+
+static const struct clk_parent_data parents_l3_clk_src[] = {
+ { .index = DT_XO },
+ { .index = DT_CLK_REF },
+ { .hw = &ipq5424_l3_pll.clkr.hw },
+};
+
+static const struct parent_map parents_l3_clk_src_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 4 },
+ { P_L3_PLL, 5 },
+};
+
+static const struct freq_tbl ftbl_l3_clk_src[] = {
+ F(816000000, P_L3_PLL, 1, 0, 0),
+ F(984000000, P_L3_PLL, 1, 0, 0),
+ F(1272000000, P_L3_PLL, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 l3_clk_src = {
+ .cmd_rcgr = 0x10080,
+ .freq_tbl = ftbl_l3_clk_src,
+ .hid_width = 5,
+ .parent_map = parents_l3_clk_src_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "l3_clk_src",
+ .parent_data = parents_l3_clk_src,
+ .num_parents = ARRAY_SIZE(parents_l3_clk_src),
+ .ops = &clk_rcg2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_branch l3_core_clk = {
+ .halt_reg = 0x1008c,
+ .clkr = {
+ .enable_reg = 0x1008c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "l3_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &l3_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct regmap_config apss_ipq5424_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x20000,
+ .fast_io = true,
+};
+
+static struct clk_regmap *apss_ipq5424_clks[] = {
+ [APSS_PLL_EARLY] = &ipq5424_apss_pll.clkr,
+ [APSS_SILVER_CLK_SRC] = &apss_silver_clk_src.clkr,
+ [APSS_SILVER_CORE_CLK] = &apss_silver_core_clk.clkr,
+ [L3_PLL] = &ipq5424_l3_pll.clkr,
+ [L3_CLK_SRC] = &l3_clk_src.clkr,
+ [L3_CORE_CLK] = &l3_core_clk.clkr,
+};
+
+static struct clk_alpha_pll *ipa5424_apss_plls[] = {
+ &ipq5424_l3_pll,
+ &ipq5424_apss_pll,
+};
+
+static struct qcom_cc_driver_data ipa5424_apss_driver_data = {
+ .alpha_plls = ipa5424_apss_plls,
+ .num_alpha_plls = ARRAY_SIZE(ipa5424_apss_plls),
+};
+
+#define IPQ_APPS_PLL_ID (5424 * 3) /* some unique value */
+
+static const struct qcom_icc_hws_data icc_ipq5424_cpu_l3[] = {
+ { MASTER_CPU, SLAVE_L3, L3_CORE_CLK },
+};
+
+static const struct qcom_cc_desc apss_ipq5424_desc = {
+ .config = &apss_ipq5424_regmap_config,
+ .clks = apss_ipq5424_clks,
+ .num_clks = ARRAY_SIZE(apss_ipq5424_clks),
+ .icc_hws = icc_ipq5424_cpu_l3,
+ .num_icc_hws = ARRAY_SIZE(icc_ipq5424_cpu_l3),
+ .icc_first_node_id = IPQ_APPS_PLL_ID,
+ .driver_data = &ipa5424_apss_driver_data,
+};
+
+static int apss_ipq5424_probe(struct platform_device *pdev)
+{
+ return qcom_cc_probe(pdev, &apss_ipq5424_desc);
+}
+
+static const struct of_device_id apss_ipq5424_match_table[] = {
+ { .compatible = "qcom,ipq5424-apss-clk" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, apss_ipq5424_match_table);
+
+static struct platform_driver apss_ipq5424_driver = {
+ .probe = apss_ipq5424_probe,
+ .driver = {
+ .name = "apss-ipq5424-clk",
+ .of_match_table = apss_ipq5424_match_table,
+ .sync_state = icc_sync_state,
+ },
+};
+
+module_platform_driver(apss_ipq5424_driver);
+
+MODULE_DESCRIPTION("QCOM APSS IPQ5424 CLK Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/camcc-milos.c b/drivers/clk/qcom/camcc-milos.c
new file mode 100644
index 000000000000..0077c9c9249f
--- /dev/null
+++ b/drivers/clk/qcom/camcc-milos.c
@@ -0,0 +1,2161 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2025, Luca Weiss <luca.weiss@fairphone.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,milos-camcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+/* Need to match the order of clocks in DT binding */
+enum {
+ DT_BI_TCXO,
+ DT_SLEEP_CLK,
+ DT_IFACE,
+};
+
+enum {
+ P_BI_TCXO,
+ P_CAM_CC_PLL0_OUT_EVEN,
+ P_CAM_CC_PLL0_OUT_MAIN,
+ P_CAM_CC_PLL0_OUT_ODD,
+ P_CAM_CC_PLL1_OUT_EVEN,
+ P_CAM_CC_PLL1_OUT_MAIN,
+ P_CAM_CC_PLL2_OUT_MAIN,
+ P_CAM_CC_PLL3_OUT_EVEN,
+ P_CAM_CC_PLL4_OUT_EVEN,
+ P_CAM_CC_PLL4_OUT_MAIN,
+ P_CAM_CC_PLL5_OUT_EVEN,
+ P_CAM_CC_PLL5_OUT_MAIN,
+ P_CAM_CC_PLL6_OUT_EVEN,
+ P_CAM_CC_PLL6_OUT_MAIN,
+ P_SLEEP_CLK,
+};
+
+static const struct pll_vco lucid_ole_vco[] = {
+ { 249600000, 2300000000, 0 },
+};
+
+static const struct pll_vco rivian_ole_vco[] = {
+ { 777000000, 1285000000, 0 },
+};
+
+/* 1200.0 MHz Configuration */
+static const struct alpha_pll_config cam_cc_pll0_config = {
+ .l = 0x3e,
+ .alpha = 0x8000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00008400,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll cam_cc_pll0 = {
+ .offset = 0x0,
+ .config = &cam_cc_pll0_config,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll0_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll0_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll0_out_odd[] = {
+ { 0x2, 3 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll0_out_odd = {
+ .offset = 0x0,
+ .post_div_shift = 14,
+ .post_div_table = post_div_table_cam_cc_pll0_out_odd,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll0_out_odd),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll0_out_odd",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+ },
+};
+
+/* 600.0 MHz Configuration */
+static const struct alpha_pll_config cam_cc_pll1_config = {
+ .l = 0x1f,
+ .alpha = 0x4000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll cam_cc_pll1 = {
+ .offset = 0x1000,
+ .config = &cam_cc_pll1_config,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll1_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll1_out_even = {
+ .offset = 0x1000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll1_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll1_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll1_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll1.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+ },
+};
+
+/* 960.0 MHz Configuration */
+static const struct alpha_pll_config cam_cc_pll2_config = {
+ .l = 0x32,
+ .alpha = 0x0,
+ .config_ctl_val = 0x10000030,
+ .config_ctl_hi_val = 0x80890263,
+ .config_ctl_hi1_val = 0x00000217,
+ .user_ctl_val = 0x00000001,
+ .user_ctl_hi_val = 0x00100000,
+};
+
+static struct clk_alpha_pll cam_cc_pll2 = {
+ .offset = 0x2000,
+ .config = &cam_cc_pll2_config,
+ .vco_table = rivian_ole_vco,
+ .num_vco = ARRAY_SIZE(rivian_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_RIVIAN_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll2",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_rivian_evo_ops,
+ },
+ },
+};
+
+/* 600.0 MHz Configuration */
+static const struct alpha_pll_config cam_cc_pll3_config = {
+ .l = 0x1f,
+ .alpha = 0x4000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll cam_cc_pll3 = {
+ .offset = 0x3000,
+ .config = &cam_cc_pll3_config,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll3",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll3_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll3_out_even = {
+ .offset = 0x3000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll3_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll3_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll3_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll3.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+ },
+};
+
+/* 700.0 MHz Configuration */
+static const struct alpha_pll_config cam_cc_pll4_config = {
+ .l = 0x24,
+ .alpha = 0x7555,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll cam_cc_pll4 = {
+ .offset = 0x4000,
+ .config = &cam_cc_pll4_config,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll4",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll4_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll4_out_even = {
+ .offset = 0x4000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll4_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll4_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll4_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll4.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+ },
+};
+
+/* 700.0 MHz Configuration */
+static const struct alpha_pll_config cam_cc_pll5_config = {
+ .l = 0x24,
+ .alpha = 0x7555,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll cam_cc_pll5 = {
+ .offset = 0x5000,
+ .config = &cam_cc_pll5_config,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll5",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll5_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll5_out_even = {
+ .offset = 0x5000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll5_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll5_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll5_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll5.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+ },
+};
+
+/* 700.0 MHz Configuration */
+static const struct alpha_pll_config cam_cc_pll6_config = {
+ .l = 0x24,
+ .alpha = 0x7555,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll cam_cc_pll6 = {
+ .offset = 0x6000,
+ .config = &cam_cc_pll6_config,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll6",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll6_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll6_out_even = {
+ .offset = 0x6000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll6_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll6_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll6_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll6.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+ },
+};
+
+static const struct parent_map cam_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL0_OUT_MAIN, 1 },
+ { P_CAM_CC_PLL0_OUT_ODD, 5 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll0.clkr.hw },
+ { .hw = &cam_cc_pll0_out_odd.clkr.hw },
+ { .hw = &cam_cc_pll0_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL2_OUT_MAIN, 4 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll2.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL0_OUT_MAIN, 1 },
+ { P_CAM_CC_PLL1_OUT_MAIN, 2 },
+ { P_CAM_CC_PLL1_OUT_EVEN, 3 },
+ { P_CAM_CC_PLL0_OUT_ODD, 5 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll0.clkr.hw },
+ { .hw = &cam_cc_pll1.clkr.hw },
+ { .hw = &cam_cc_pll1_out_even.clkr.hw },
+ { .hw = &cam_cc_pll0_out_odd.clkr.hw },
+ { .hw = &cam_cc_pll0_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL0_OUT_ODD, 5 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll0_out_odd.clkr.hw },
+ { .hw = &cam_cc_pll0_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL0_OUT_MAIN, 1 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll0.clkr.hw },
+ { .hw = &cam_cc_pll0_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL0_OUT_MAIN, 1 },
+ { P_CAM_CC_PLL3_OUT_EVEN, 5 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_5[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll0.clkr.hw },
+ { .hw = &cam_cc_pll3_out_even.clkr.hw },
+ { .hw = &cam_cc_pll0_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_6[] = {
+ { P_SLEEP_CLK, 0 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_6_ao[] = {
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map cam_cc_parent_map_7[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL0_OUT_MAIN, 1 },
+ { P_CAM_CC_PLL4_OUT_EVEN, 2 },
+ { P_CAM_CC_PLL4_OUT_MAIN, 3 },
+ { P_CAM_CC_PLL0_OUT_ODD, 5 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_7[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll0.clkr.hw },
+ { .hw = &cam_cc_pll4_out_even.clkr.hw },
+ { .hw = &cam_cc_pll4.clkr.hw },
+ { .hw = &cam_cc_pll0_out_odd.clkr.hw },
+ { .hw = &cam_cc_pll0_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_8[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL0_OUT_MAIN, 1 },
+ { P_CAM_CC_PLL5_OUT_EVEN, 2 },
+ { P_CAM_CC_PLL5_OUT_MAIN, 3 },
+ { P_CAM_CC_PLL0_OUT_ODD, 5 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_8[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll0.clkr.hw },
+ { .hw = &cam_cc_pll5_out_even.clkr.hw },
+ { .hw = &cam_cc_pll5.clkr.hw },
+ { .hw = &cam_cc_pll0_out_odd.clkr.hw },
+ { .hw = &cam_cc_pll0_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_9[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL0_OUT_MAIN, 1 },
+ { P_CAM_CC_PLL6_OUT_EVEN, 2 },
+ { P_CAM_CC_PLL6_OUT_MAIN, 3 },
+ { P_CAM_CC_PLL0_OUT_ODD, 5 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_9[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll0.clkr.hw },
+ { .hw = &cam_cc_pll6_out_even.clkr.hw },
+ { .hw = &cam_cc_pll6.clkr.hw },
+ { .hw = &cam_cc_pll0_out_odd.clkr.hw },
+ { .hw = &cam_cc_pll0_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_10[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_10[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct freq_tbl ftbl_cam_cc_bps_clk_src[] = {
+ F(300000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ F(410000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ F(460000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ F(700000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_bps_clk_src = {
+ .cmd_rcgr = 0x1a004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_2,
+ .freq_tbl = ftbl_cam_cc_bps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_clk_src",
+ .parent_data = cam_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_camnoc_axi_clk_src[] = {
+ F(150000000, P_CAM_CC_PLL0_OUT_EVEN, 4, 0, 0),
+ F(240000000, P_CAM_CC_PLL0_OUT_EVEN, 2.5, 0, 0),
+ F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_camnoc_axi_clk_src = {
+ .cmd_rcgr = 0x2401c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_camnoc_axi_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_axi_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_cci_0_clk_src[] = {
+ F(37500000, P_CAM_CC_PLL0_OUT_EVEN, 16, 0, 0),
+ F(50000000, P_CAM_CC_PLL0_OUT_EVEN, 12, 0, 0),
+ F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_cci_0_clk_src = {
+ .cmd_rcgr = 0x21004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_3,
+ .freq_tbl = ftbl_cam_cc_cci_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_0_clk_src",
+ .parent_data = cam_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_cci_1_clk_src = {
+ .cmd_rcgr = 0x22004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_3,
+ .freq_tbl = ftbl_cam_cc_cci_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_1_clk_src",
+ .parent_data = cam_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_cphy_rx_clk_src[] = {
+ F(200000000, P_CAM_CC_PLL0_OUT_EVEN, 3, 0, 0),
+ F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_EVEN, 1.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_cphy_rx_clk_src = {
+ .cmd_rcgr = 0x1c05c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cphy_rx_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_cre_clk_src[] = {
+ F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_cre_clk_src = {
+ .cmd_rcgr = 0x27004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_2,
+ .freq_tbl = ftbl_cam_cc_cre_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cre_clk_src",
+ .parent_data = cam_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_csi0phytimer_clk_src[] = {
+ F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_csi0phytimer_clk_src = {
+ .cmd_rcgr = 0x19004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi0phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi1phytimer_clk_src = {
+ .cmd_rcgr = 0x19028,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi1phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi2phytimer_clk_src = {
+ .cmd_rcgr = 0x1904c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi2phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi3phytimer_clk_src = {
+ .cmd_rcgr = 0x19070,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi3phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_fast_ahb_clk_src[] = {
+ F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+ F(150000000, P_CAM_CC_PLL0_OUT_EVEN, 4, 0, 0),
+ F(200000000, P_CAM_CC_PLL0_OUT_MAIN, 6, 0, 0),
+ F(240000000, P_CAM_CC_PLL0_OUT_MAIN, 5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_fast_ahb_clk_src = {
+ .cmd_rcgr = 0x1a030,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_fast_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_fast_ahb_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_icp_clk_src[] = {
+ F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(480000000, P_CAM_CC_PLL0_OUT_MAIN, 2.5, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_icp_clk_src = {
+ .cmd_rcgr = 0x20014,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_4,
+ .freq_tbl = ftbl_cam_cc_icp_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_clk_src",
+ .parent_data = cam_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_mclk0_clk_src[] = {
+ F(19200000, P_CAM_CC_PLL2_OUT_MAIN, 1, 1, 50),
+ F(24000000, P_CAM_CC_PLL2_OUT_MAIN, 10, 1, 4),
+ F(64000000, P_CAM_CC_PLL2_OUT_MAIN, 15, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_mclk0_clk_src = {
+ .cmd_rcgr = 0x18004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk0_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk1_clk_src = {
+ .cmd_rcgr = 0x18024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk1_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk2_clk_src = {
+ .cmd_rcgr = 0x18044,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk2_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk3_clk_src = {
+ .cmd_rcgr = 0x18064,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk3_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk4_clk_src = {
+ .cmd_rcgr = 0x18084,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk4_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ope_0_clk_src[] = {
+ F(300000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(410000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(520000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(645000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(700000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ope_0_clk_src = {
+ .cmd_rcgr = 0x1b004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_5,
+ .freq_tbl = ftbl_cam_cc_ope_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ope_0_clk_src",
+ .parent_data = cam_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_sleep_clk_src[] = {
+ F(32000, P_SLEEP_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_sleep_clk_src = {
+ .cmd_rcgr = 0x25044,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_6,
+ .freq_tbl = ftbl_cam_cc_sleep_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sleep_clk_src",
+ .parent_data = cam_cc_parent_data_6_ao,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_6_ao),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_slow_ahb_clk_src[] = {
+ F(80000000, P_CAM_CC_PLL0_OUT_EVEN, 7.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_slow_ahb_clk_src = {
+ .cmd_rcgr = 0x1a04c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_slow_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_slow_ahb_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_tfe_0_clk_src[] = {
+ F(350000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ F(570000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ F(725000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_tfe_0_clk_src = {
+ .cmd_rcgr = 0x1c004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_7,
+ .freq_tbl = ftbl_cam_cc_tfe_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_0_clk_src",
+ .parent_data = cam_cc_parent_data_7,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_7),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_tfe_0_csid_clk_src = {
+ .cmd_rcgr = 0x1c030,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_0_csid_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_tfe_1_clk_src[] = {
+ F(350000000, P_CAM_CC_PLL5_OUT_EVEN, 1, 0, 0),
+ F(570000000, P_CAM_CC_PLL5_OUT_EVEN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL5_OUT_EVEN, 1, 0, 0),
+ F(725000000, P_CAM_CC_PLL5_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_tfe_1_clk_src = {
+ .cmd_rcgr = 0x1d004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_8,
+ .freq_tbl = ftbl_cam_cc_tfe_1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_1_clk_src",
+ .parent_data = cam_cc_parent_data_8,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_8),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_tfe_1_csid_clk_src = {
+ .cmd_rcgr = 0x1d030,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_1_csid_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_tfe_2_clk_src[] = {
+ F(350000000, P_CAM_CC_PLL6_OUT_EVEN, 1, 0, 0),
+ F(570000000, P_CAM_CC_PLL6_OUT_EVEN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL6_OUT_EVEN, 1, 0, 0),
+ F(725000000, P_CAM_CC_PLL6_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_tfe_2_clk_src = {
+ .cmd_rcgr = 0x1e004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_9,
+ .freq_tbl = ftbl_cam_cc_tfe_2_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_2_clk_src",
+ .parent_data = cam_cc_parent_data_9,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_9),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_tfe_2_csid_clk_src = {
+ .cmd_rcgr = 0x1e030,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_2_csid_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_xo_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_xo_clk_src = {
+ .cmd_rcgr = 0x25020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_10,
+ .freq_tbl = ftbl_cam_cc_xo_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_xo_clk_src",
+ .parent_data = cam_cc_parent_data_10,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_10),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch cam_cc_bps_ahb_clk = {
+ .halt_reg = 0x1a064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1a064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_bps_areg_clk = {
+ .halt_reg = 0x1a048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1a048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_areg_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_bps_clk = {
+ .halt_reg = 0x1a01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1a01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_bps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_camnoc_atb_clk = {
+ .halt_reg = 0x24040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x24040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_atb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_camnoc_axi_hf_clk = {
+ .halt_reg = 0x24010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x24010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_axi_hf_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_camnoc_axi_sf_clk = {
+ .halt_reg = 0x24004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x24004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_axi_sf_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_camnoc_nrt_axi_clk = {
+ .halt_reg = 0x2404c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2404c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2404c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_nrt_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_camnoc_rt_axi_clk = {
+ .halt_reg = 0x24034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x24034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_rt_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cci_0_clk = {
+ .halt_reg = 0x2101c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2101c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cci_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cci_1_clk = {
+ .halt_reg = 0x2201c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2201c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cci_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_core_ahb_clk = {
+ .halt_reg = 0x2501c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x2501c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_core_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_ahb_clk = {
+ .halt_reg = 0x23004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x23004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cre_ahb_clk = {
+ .halt_reg = 0x27020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x27020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cre_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cre_clk = {
+ .halt_reg = 0x2701c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2701c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cre_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cre_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi0phytimer_clk = {
+ .halt_reg = 0x1901c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1901c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi0phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi0phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi1phytimer_clk = {
+ .halt_reg = 0x19040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x19040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi1phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi1phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi2phytimer_clk = {
+ .halt_reg = 0x19064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x19064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi2phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi2phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi3phytimer_clk = {
+ .halt_reg = 0x19088,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x19088,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi3phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi3phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy0_clk = {
+ .halt_reg = 0x19020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x19020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy1_clk = {
+ .halt_reg = 0x19044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x19044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy2_clk = {
+ .halt_reg = 0x19068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x19068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy3_clk = {
+ .halt_reg = 0x1908c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1908c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_icp_atb_clk = {
+ .halt_reg = 0x20004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x20004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_atb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_icp_clk = {
+ .halt_reg = 0x2002c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2002c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_icp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_icp_cti_clk = {
+ .halt_reg = 0x20008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x20008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_cti_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_icp_ts_clk = {
+ .halt_reg = 0x2000c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2000c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_ts_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk0_clk = {
+ .halt_reg = 0x1801c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1801c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk1_clk = {
+ .halt_reg = 0x1803c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1803c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk2_clk = {
+ .halt_reg = 0x1805c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1805c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk3_clk = {
+ .halt_reg = 0x1807c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1807c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk4_clk = {
+ .halt_reg = 0x1809c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1809c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ope_0_ahb_clk = {
+ .halt_reg = 0x1b034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1b034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ope_0_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ope_0_areg_clk = {
+ .halt_reg = 0x1b030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1b030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ope_0_areg_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ope_0_clk = {
+ .halt_reg = 0x1b01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1b01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ope_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ope_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_soc_ahb_clk = {
+ .halt_reg = 0x25018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x25018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_soc_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_sys_tmr_clk = {
+ .halt_reg = 0x20038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x20038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sys_tmr_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_tfe_0_ahb_clk = {
+ .halt_reg = 0x1c078,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1c078,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_0_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_tfe_0_clk = {
+ .halt_reg = 0x1c01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1c01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_tfe_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_tfe_0_cphy_rx_clk = {
+ .halt_reg = 0x1c074,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1c074,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_0_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_tfe_0_csid_clk = {
+ .halt_reg = 0x1c048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1c048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_0_csid_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_tfe_0_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_tfe_1_ahb_clk = {
+ .halt_reg = 0x1d058,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1d058,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_1_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_tfe_1_clk = {
+ .halt_reg = 0x1d01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1d01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_tfe_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_tfe_1_cphy_rx_clk = {
+ .halt_reg = 0x1d054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1d054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_1_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_tfe_1_csid_clk = {
+ .halt_reg = 0x1d048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1d048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_1_csid_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_tfe_1_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_tfe_2_ahb_clk = {
+ .halt_reg = 0x1e058,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1e058,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_2_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_tfe_2_clk = {
+ .halt_reg = 0x1e01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1e01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_tfe_2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_tfe_2_cphy_rx_clk = {
+ .halt_reg = 0x1e054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1e054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_2_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_tfe_2_csid_clk = {
+ .halt_reg = 0x1e048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1e048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_2_csid_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_tfe_2_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_top_shift_clk = {
+ .halt_reg = 0x25040,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x25040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_top_shift_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc cam_cc_camss_top_gdsc = {
+ .gdscr = 0x25004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "cam_cc_camss_top_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct clk_regmap *cam_cc_milos_clocks[] = {
+ [CAM_CC_BPS_AHB_CLK] = &cam_cc_bps_ahb_clk.clkr,
+ [CAM_CC_BPS_AREG_CLK] = &cam_cc_bps_areg_clk.clkr,
+ [CAM_CC_BPS_CLK] = &cam_cc_bps_clk.clkr,
+ [CAM_CC_BPS_CLK_SRC] = &cam_cc_bps_clk_src.clkr,
+ [CAM_CC_CAMNOC_ATB_CLK] = &cam_cc_camnoc_atb_clk.clkr,
+ [CAM_CC_CAMNOC_AXI_CLK_SRC] = &cam_cc_camnoc_axi_clk_src.clkr,
+ [CAM_CC_CAMNOC_AXI_HF_CLK] = &cam_cc_camnoc_axi_hf_clk.clkr,
+ [CAM_CC_CAMNOC_AXI_SF_CLK] = &cam_cc_camnoc_axi_sf_clk.clkr,
+ [CAM_CC_CAMNOC_NRT_AXI_CLK] = &cam_cc_camnoc_nrt_axi_clk.clkr,
+ [CAM_CC_CAMNOC_RT_AXI_CLK] = &cam_cc_camnoc_rt_axi_clk.clkr,
+ [CAM_CC_CCI_0_CLK] = &cam_cc_cci_0_clk.clkr,
+ [CAM_CC_CCI_0_CLK_SRC] = &cam_cc_cci_0_clk_src.clkr,
+ [CAM_CC_CCI_1_CLK] = &cam_cc_cci_1_clk.clkr,
+ [CAM_CC_CCI_1_CLK_SRC] = &cam_cc_cci_1_clk_src.clkr,
+ [CAM_CC_CORE_AHB_CLK] = &cam_cc_core_ahb_clk.clkr,
+ [CAM_CC_CPAS_AHB_CLK] = &cam_cc_cpas_ahb_clk.clkr,
+ [CAM_CC_CPHY_RX_CLK_SRC] = &cam_cc_cphy_rx_clk_src.clkr,
+ [CAM_CC_CRE_AHB_CLK] = &cam_cc_cre_ahb_clk.clkr,
+ [CAM_CC_CRE_CLK] = &cam_cc_cre_clk.clkr,
+ [CAM_CC_CRE_CLK_SRC] = &cam_cc_cre_clk_src.clkr,
+ [CAM_CC_CSI0PHYTIMER_CLK] = &cam_cc_csi0phytimer_clk.clkr,
+ [CAM_CC_CSI0PHYTIMER_CLK_SRC] = &cam_cc_csi0phytimer_clk_src.clkr,
+ [CAM_CC_CSI1PHYTIMER_CLK] = &cam_cc_csi1phytimer_clk.clkr,
+ [CAM_CC_CSI1PHYTIMER_CLK_SRC] = &cam_cc_csi1phytimer_clk_src.clkr,
+ [CAM_CC_CSI2PHYTIMER_CLK] = &cam_cc_csi2phytimer_clk.clkr,
+ [CAM_CC_CSI2PHYTIMER_CLK_SRC] = &cam_cc_csi2phytimer_clk_src.clkr,
+ [CAM_CC_CSI3PHYTIMER_CLK] = &cam_cc_csi3phytimer_clk.clkr,
+ [CAM_CC_CSI3PHYTIMER_CLK_SRC] = &cam_cc_csi3phytimer_clk_src.clkr,
+ [CAM_CC_CSIPHY0_CLK] = &cam_cc_csiphy0_clk.clkr,
+ [CAM_CC_CSIPHY1_CLK] = &cam_cc_csiphy1_clk.clkr,
+ [CAM_CC_CSIPHY2_CLK] = &cam_cc_csiphy2_clk.clkr,
+ [CAM_CC_CSIPHY3_CLK] = &cam_cc_csiphy3_clk.clkr,
+ [CAM_CC_FAST_AHB_CLK_SRC] = &cam_cc_fast_ahb_clk_src.clkr,
+ [CAM_CC_ICP_ATB_CLK] = &cam_cc_icp_atb_clk.clkr,
+ [CAM_CC_ICP_CLK] = &cam_cc_icp_clk.clkr,
+ [CAM_CC_ICP_CLK_SRC] = &cam_cc_icp_clk_src.clkr,
+ [CAM_CC_ICP_CTI_CLK] = &cam_cc_icp_cti_clk.clkr,
+ [CAM_CC_ICP_TS_CLK] = &cam_cc_icp_ts_clk.clkr,
+ [CAM_CC_MCLK0_CLK] = &cam_cc_mclk0_clk.clkr,
+ [CAM_CC_MCLK0_CLK_SRC] = &cam_cc_mclk0_clk_src.clkr,
+ [CAM_CC_MCLK1_CLK] = &cam_cc_mclk1_clk.clkr,
+ [CAM_CC_MCLK1_CLK_SRC] = &cam_cc_mclk1_clk_src.clkr,
+ [CAM_CC_MCLK2_CLK] = &cam_cc_mclk2_clk.clkr,
+ [CAM_CC_MCLK2_CLK_SRC] = &cam_cc_mclk2_clk_src.clkr,
+ [CAM_CC_MCLK3_CLK] = &cam_cc_mclk3_clk.clkr,
+ [CAM_CC_MCLK3_CLK_SRC] = &cam_cc_mclk3_clk_src.clkr,
+ [CAM_CC_MCLK4_CLK] = &cam_cc_mclk4_clk.clkr,
+ [CAM_CC_MCLK4_CLK_SRC] = &cam_cc_mclk4_clk_src.clkr,
+ [CAM_CC_OPE_0_AHB_CLK] = &cam_cc_ope_0_ahb_clk.clkr,
+ [CAM_CC_OPE_0_AREG_CLK] = &cam_cc_ope_0_areg_clk.clkr,
+ [CAM_CC_OPE_0_CLK] = &cam_cc_ope_0_clk.clkr,
+ [CAM_CC_OPE_0_CLK_SRC] = &cam_cc_ope_0_clk_src.clkr,
+ [CAM_CC_PLL0] = &cam_cc_pll0.clkr,
+ [CAM_CC_PLL0_OUT_EVEN] = &cam_cc_pll0_out_even.clkr,
+ [CAM_CC_PLL0_OUT_ODD] = &cam_cc_pll0_out_odd.clkr,
+ [CAM_CC_PLL1] = &cam_cc_pll1.clkr,
+ [CAM_CC_PLL1_OUT_EVEN] = &cam_cc_pll1_out_even.clkr,
+ [CAM_CC_PLL2] = &cam_cc_pll2.clkr,
+ [CAM_CC_PLL3] = &cam_cc_pll3.clkr,
+ [CAM_CC_PLL3_OUT_EVEN] = &cam_cc_pll3_out_even.clkr,
+ [CAM_CC_PLL4] = &cam_cc_pll4.clkr,
+ [CAM_CC_PLL4_OUT_EVEN] = &cam_cc_pll4_out_even.clkr,
+ [CAM_CC_PLL5] = &cam_cc_pll5.clkr,
+ [CAM_CC_PLL5_OUT_EVEN] = &cam_cc_pll5_out_even.clkr,
+ [CAM_CC_PLL6] = &cam_cc_pll6.clkr,
+ [CAM_CC_PLL6_OUT_EVEN] = &cam_cc_pll6_out_even.clkr,
+ [CAM_CC_SLEEP_CLK_SRC] = &cam_cc_sleep_clk_src.clkr,
+ [CAM_CC_SLOW_AHB_CLK_SRC] = &cam_cc_slow_ahb_clk_src.clkr,
+ [CAM_CC_SOC_AHB_CLK] = &cam_cc_soc_ahb_clk.clkr,
+ [CAM_CC_SYS_TMR_CLK] = &cam_cc_sys_tmr_clk.clkr,
+ [CAM_CC_TFE_0_AHB_CLK] = &cam_cc_tfe_0_ahb_clk.clkr,
+ [CAM_CC_TFE_0_CLK] = &cam_cc_tfe_0_clk.clkr,
+ [CAM_CC_TFE_0_CLK_SRC] = &cam_cc_tfe_0_clk_src.clkr,
+ [CAM_CC_TFE_0_CPHY_RX_CLK] = &cam_cc_tfe_0_cphy_rx_clk.clkr,
+ [CAM_CC_TFE_0_CSID_CLK] = &cam_cc_tfe_0_csid_clk.clkr,
+ [CAM_CC_TFE_0_CSID_CLK_SRC] = &cam_cc_tfe_0_csid_clk_src.clkr,
+ [CAM_CC_TFE_1_AHB_CLK] = &cam_cc_tfe_1_ahb_clk.clkr,
+ [CAM_CC_TFE_1_CLK] = &cam_cc_tfe_1_clk.clkr,
+ [CAM_CC_TFE_1_CLK_SRC] = &cam_cc_tfe_1_clk_src.clkr,
+ [CAM_CC_TFE_1_CPHY_RX_CLK] = &cam_cc_tfe_1_cphy_rx_clk.clkr,
+ [CAM_CC_TFE_1_CSID_CLK] = &cam_cc_tfe_1_csid_clk.clkr,
+ [CAM_CC_TFE_1_CSID_CLK_SRC] = &cam_cc_tfe_1_csid_clk_src.clkr,
+ [CAM_CC_TFE_2_AHB_CLK] = &cam_cc_tfe_2_ahb_clk.clkr,
+ [CAM_CC_TFE_2_CLK] = &cam_cc_tfe_2_clk.clkr,
+ [CAM_CC_TFE_2_CLK_SRC] = &cam_cc_tfe_2_clk_src.clkr,
+ [CAM_CC_TFE_2_CPHY_RX_CLK] = &cam_cc_tfe_2_cphy_rx_clk.clkr,
+ [CAM_CC_TFE_2_CSID_CLK] = &cam_cc_tfe_2_csid_clk.clkr,
+ [CAM_CC_TFE_2_CSID_CLK_SRC] = &cam_cc_tfe_2_csid_clk_src.clkr,
+ [CAM_CC_TOP_SHIFT_CLK] = &cam_cc_top_shift_clk.clkr,
+ [CAM_CC_XO_CLK_SRC] = &cam_cc_xo_clk_src.clkr,
+};
+
+static const struct qcom_reset_map cam_cc_milos_resets[] = {
+ [CAM_CC_BPS_BCR] = { 0x1a000 },
+ [CAM_CC_CAMNOC_BCR] = { 0x24000 },
+ [CAM_CC_CAMSS_TOP_BCR] = { 0x25000 },
+ [CAM_CC_CCI_0_BCR] = { 0x21000 },
+ [CAM_CC_CCI_1_BCR] = { 0x22000 },
+ [CAM_CC_CPAS_BCR] = { 0x23000 },
+ [CAM_CC_CRE_BCR] = { 0x27000 },
+ [CAM_CC_CSI0PHY_BCR] = { 0x19000 },
+ [CAM_CC_CSI1PHY_BCR] = { 0x19024 },
+ [CAM_CC_CSI2PHY_BCR] = { 0x19048 },
+ [CAM_CC_CSI3PHY_BCR] = { 0x1906c },
+ [CAM_CC_ICP_BCR] = { 0x20000 },
+ [CAM_CC_MCLK0_BCR] = { 0x18000 },
+ [CAM_CC_MCLK1_BCR] = { 0x18020 },
+ [CAM_CC_MCLK2_BCR] = { 0x18040 },
+ [CAM_CC_MCLK3_BCR] = { 0x18060 },
+ [CAM_CC_MCLK4_BCR] = { 0x18080 },
+ [CAM_CC_OPE_0_BCR] = { 0x1b000 },
+ [CAM_CC_TFE_0_BCR] = { 0x1c000 },
+ [CAM_CC_TFE_1_BCR] = { 0x1d000 },
+ [CAM_CC_TFE_2_BCR] = { 0x1e000 },
+};
+
+static struct gdsc *cam_cc_milos_gdscs[] = {
+ [CAM_CC_CAMSS_TOP_GDSC] = &cam_cc_camss_top_gdsc,
+};
+
+static struct clk_alpha_pll *cam_cc_milos_plls[] = {
+ &cam_cc_pll0,
+ &cam_cc_pll1,
+ &cam_cc_pll2,
+ &cam_cc_pll3,
+ &cam_cc_pll4,
+ &cam_cc_pll5,
+ &cam_cc_pll6,
+};
+
+static u32 cam_cc_milos_critical_cbcrs[] = {
+ 0x25038, /* CAM_CC_GDSC_CLK */
+ 0x2505c, /* CAM_CC_SLEEP_CLK */
+};
+
+static const struct regmap_config cam_cc_milos_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x30728,
+ .fast_io = true,
+};
+
+static struct qcom_cc_driver_data cam_cc_milos_driver_data = {
+ .alpha_plls = cam_cc_milos_plls,
+ .num_alpha_plls = ARRAY_SIZE(cam_cc_milos_plls),
+ .clk_cbcrs = cam_cc_milos_critical_cbcrs,
+ .num_clk_cbcrs = ARRAY_SIZE(cam_cc_milos_critical_cbcrs),
+};
+
+static const struct qcom_cc_desc cam_cc_milos_desc = {
+ .config = &cam_cc_milos_regmap_config,
+ .clks = cam_cc_milos_clocks,
+ .num_clks = ARRAY_SIZE(cam_cc_milos_clocks),
+ .resets = cam_cc_milos_resets,
+ .num_resets = ARRAY_SIZE(cam_cc_milos_resets),
+ .gdscs = cam_cc_milos_gdscs,
+ .num_gdscs = ARRAY_SIZE(cam_cc_milos_gdscs),
+ .use_rpm = true,
+ .driver_data = &cam_cc_milos_driver_data,
+};
+
+static const struct of_device_id cam_cc_milos_match_table[] = {
+ { .compatible = "qcom,milos-camcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, cam_cc_milos_match_table);
+
+static int cam_cc_milos_probe(struct platform_device *pdev)
+{
+ return qcom_cc_probe(pdev, &cam_cc_milos_desc);
+}
+
+static struct platform_driver cam_cc_milos_driver = {
+ .probe = cam_cc_milos_probe,
+ .driver = {
+ .name = "cam_cc-milos",
+ .of_match_table = cam_cc_milos_match_table,
+ },
+};
+
+module_platform_driver(cam_cc_milos_driver);
+
+MODULE_DESCRIPTION("QTI CAM_CC Milos Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/camcc-qcs615.c b/drivers/clk/qcom/camcc-qcs615.c
new file mode 100644
index 000000000000..c063a3bfacd0
--- /dev/null
+++ b/drivers/clk/qcom/camcc-qcs615.c
@@ -0,0 +1,1597 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,qcs615-camcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO,
+};
+
+enum {
+ P_BI_TCXO,
+ P_CAM_CC_PLL0_OUT_AUX,
+ P_CAM_CC_PLL1_OUT_AUX,
+ P_CAM_CC_PLL2_OUT_AUX2,
+ P_CAM_CC_PLL2_OUT_EARLY,
+ P_CAM_CC_PLL3_OUT_MAIN,
+};
+
+static const struct pll_vco brammo_vco[] = {
+ { 500000000, 1250000000, 0 },
+};
+
+static const struct pll_vco spark_vco[] = {
+ { 1000000000, 2100000000, 0 },
+ { 750000000, 1500000000, 1 },
+ { 500000000, 1000000000, 2 },
+ { 300000000, 500000000, 3 },
+ { 550000000, 1100000000, 4 },
+};
+
+/* 600MHz configuration VCO - 2 */
+static const struct alpha_pll_config cam_cc_pll0_config = {
+ .l = 0x1f,
+ .alpha_hi = 0x40,
+ .alpha_en_mask = BIT(24),
+ .vco_val = BIT(21),
+ .vco_mask = GENMASK(21, 20),
+ .aux_output_mask = BIT(1),
+ .config_ctl_val = 0x4001055b,
+ .test_ctl_hi_val = 0x1,
+ .test_ctl_hi_mask = 0x1,
+};
+
+static struct clk_alpha_pll cam_cc_pll0 = {
+ .offset = 0x0,
+ .config = &cam_cc_pll0_config,
+ .vco_table = spark_vco,
+ .num_vco = ARRAY_SIZE(spark_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+/* 808MHz configuration VCO - 2 */
+static struct alpha_pll_config cam_cc_pll1_config = {
+ .l = 0x2a,
+ .alpha_hi = 0x15,
+ .alpha = 0x55555555,
+ .alpha_en_mask = BIT(24),
+ .vco_val = BIT(21),
+ .vco_mask = GENMASK(21, 20),
+ .aux_output_mask = BIT(1),
+ .config_ctl_val = 0x4001055b,
+ .test_ctl_hi_val = 0x1,
+ .test_ctl_hi_mask = 0x1,
+};
+
+static struct clk_alpha_pll cam_cc_pll1 = {
+ .offset = 0x1000,
+ .config = &cam_cc_pll1_config,
+ .vco_table = spark_vco,
+ .num_vco = ARRAY_SIZE(spark_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+/* 960MHz configuration VCO - 0 */
+static struct alpha_pll_config cam_cc_pll2_config = {
+ .l = 0x32,
+ .vco_val = 0x0,
+ .vco_mask = GENMASK(21, 20),
+ .early_output_mask = BIT(3),
+ .aux2_output_mask = BIT(2),
+ .post_div_val = 0x1 << 8,
+ .post_div_mask = 0x3 << 8,
+ .config_ctl_val = 0x04289,
+ .test_ctl_val = 0x08000000,
+ .test_ctl_mask = 0x08000000,
+};
+
+static struct clk_alpha_pll cam_cc_pll2 = {
+ .offset = 0x2000,
+ .config = &cam_cc_pll2_config,
+ .vco_table = brammo_vco,
+ .num_vco = ARRAY_SIZE(brammo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_BRAMMO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll2",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll2_out_aux2[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll2_out_aux2 = {
+ .offset = 0x2000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_cam_cc_pll2_out_aux2,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll2_out_aux2),
+ .width = 2,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_BRAMMO],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll2_out_aux2",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll2.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+/* 1080MHz configuration - VCO - 0 */
+static struct alpha_pll_config cam_cc_pll3_config = {
+ .l = 0x38,
+ .alpha_hi = 0x40,
+ .alpha_en_mask = BIT(24),
+ .vco_val = 0x0,
+ .vco_mask = GENMASK(21, 20),
+ .main_output_mask = BIT(0),
+ .config_ctl_val = 0x4001055b,
+ .test_ctl_hi_val = 0x1,
+ .test_ctl_hi_mask = 0x1,
+};
+
+static struct clk_alpha_pll cam_cc_pll3 = {
+ .offset = 0x3000,
+ .config = &cam_cc_pll3_config,
+ .vco_table = spark_vco,
+ .num_vco = ARRAY_SIZE(spark_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll3",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static const struct parent_map cam_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL1_OUT_AUX, 2 },
+ { P_CAM_CC_PLL0_OUT_AUX, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll1.clkr.hw },
+ { .hw = &cam_cc_pll0.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL2_OUT_EARLY, 4 },
+ { P_CAM_CC_PLL3_OUT_MAIN, 5 },
+ { P_CAM_CC_PLL0_OUT_AUX, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll2.clkr.hw },
+ { .hw = &cam_cc_pll3.clkr.hw },
+ { .hw = &cam_cc_pll0.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL1_OUT_AUX, 2 },
+ { P_CAM_CC_PLL2_OUT_EARLY, 4 },
+ { P_CAM_CC_PLL3_OUT_MAIN, 5 },
+ { P_CAM_CC_PLL0_OUT_AUX, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll1.clkr.hw },
+ { .hw = &cam_cc_pll2.clkr.hw },
+ { .hw = &cam_cc_pll3.clkr.hw },
+ { .hw = &cam_cc_pll0.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL2_OUT_AUX2, 1 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll2_out_aux2.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL3_OUT_MAIN, 5 },
+ { P_CAM_CC_PLL0_OUT_AUX, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll3.clkr.hw },
+ { .hw = &cam_cc_pll0.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL0_OUT_AUX, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_5[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll0.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL1_OUT_AUX, 2 },
+ { P_CAM_CC_PLL3_OUT_MAIN, 5 },
+ { P_CAM_CC_PLL0_OUT_AUX, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_6[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll1.clkr.hw },
+ { .hw = &cam_cc_pll3.clkr.hw },
+ { .hw = &cam_cc_pll0.clkr.hw },
+};
+
+static const struct freq_tbl ftbl_cam_cc_bps_clk_src[] = {
+ F(200000000, P_CAM_CC_PLL0_OUT_AUX, 3, 0, 0),
+ F(360000000, P_CAM_CC_PLL3_OUT_MAIN, 3, 0, 0),
+ F(432000000, P_CAM_CC_PLL3_OUT_MAIN, 2.5, 0, 0),
+ F(480000000, P_CAM_CC_PLL2_OUT_EARLY, 2, 0, 0),
+ F(540000000, P_CAM_CC_PLL3_OUT_MAIN, 2, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_AUX, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_bps_clk_src = {
+ .cmd_rcgr = 0x6010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_bps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_cci_clk_src[] = {
+ F(37500000, P_CAM_CC_PLL0_OUT_AUX, 16, 0, 0),
+ F(50000000, P_CAM_CC_PLL0_OUT_AUX, 12, 0, 0),
+ F(100000000, P_CAM_CC_PLL0_OUT_AUX, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_cci_clk_src = {
+ .cmd_rcgr = 0xb0d8,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_5,
+ .freq_tbl = ftbl_cam_cc_cci_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_clk_src",
+ .parent_data = cam_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_5),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_cphy_rx_clk_src[] = {
+ F(100000000, P_CAM_CC_PLL0_OUT_AUX, 6, 0, 0),
+ F(200000000, P_CAM_CC_PLL0_OUT_AUX, 3, 0, 0),
+ F(269333333, P_CAM_CC_PLL1_OUT_AUX, 3, 0, 0),
+ F(320000000, P_CAM_CC_PLL2_OUT_EARLY, 3, 0, 0),
+ F(384000000, P_CAM_CC_PLL2_OUT_EARLY, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_cphy_rx_clk_src = {
+ .cmd_rcgr = 0x9064,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_2,
+ .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cphy_rx_clk_src",
+ .parent_data = cam_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_csi0phytimer_clk_src[] = {
+ F(100000000, P_CAM_CC_PLL0_OUT_AUX, 6, 0, 0),
+ F(200000000, P_CAM_CC_PLL0_OUT_AUX, 3, 0, 0),
+ F(269333333, P_CAM_CC_PLL1_OUT_AUX, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_csi0phytimer_clk_src = {
+ .cmd_rcgr = 0x5004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi0phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi1phytimer_clk_src = {
+ .cmd_rcgr = 0x5028,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi1phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi2phytimer_clk_src = {
+ .cmd_rcgr = 0x504c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi2phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_fast_ahb_clk_src[] = {
+ F(100000000, P_CAM_CC_PLL0_OUT_AUX, 6, 0, 0),
+ F(200000000, P_CAM_CC_PLL0_OUT_AUX, 3, 0, 0),
+ F(300000000, P_CAM_CC_PLL0_OUT_AUX, 2, 0, 0),
+ F(404000000, P_CAM_CC_PLL1_OUT_AUX, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_fast_ahb_clk_src = {
+ .cmd_rcgr = 0x603c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_fast_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_fast_ahb_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_icp_clk_src[] = {
+ F(240000000, P_CAM_CC_PLL0_OUT_AUX, 2.5, 0, 0),
+ F(360000000, P_CAM_CC_PLL3_OUT_MAIN, 3, 0, 0),
+ F(432000000, P_CAM_CC_PLL3_OUT_MAIN, 2.5, 0, 0),
+ F(480000000, P_CAM_CC_PLL2_OUT_EARLY, 2, 0, 0),
+ F(540000000, P_CAM_CC_PLL3_OUT_MAIN, 2, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_AUX, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_icp_clk_src = {
+ .cmd_rcgr = 0xb088,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_icp_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_0_clk_src[] = {
+ F(240000000, P_CAM_CC_PLL0_OUT_AUX, 2.5, 0, 0),
+ F(360000000, P_CAM_CC_PLL3_OUT_MAIN, 3, 0, 0),
+ F(432000000, P_CAM_CC_PLL3_OUT_MAIN, 2.5, 0, 0),
+ F(540000000, P_CAM_CC_PLL3_OUT_MAIN, 2, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_AUX, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_0_clk_src = {
+ .cmd_rcgr = 0x9010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_4,
+ .freq_tbl = ftbl_cam_cc_ife_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_clk_src",
+ .parent_data = cam_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_4),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_0_csid_clk_src[] = {
+ F(100000000, P_CAM_CC_PLL0_OUT_AUX, 6, 0, 0),
+ F(200000000, P_CAM_CC_PLL0_OUT_AUX, 3, 0, 0),
+ F(320000000, P_CAM_CC_PLL2_OUT_EARLY, 3, 0, 0),
+ F(404000000, P_CAM_CC_PLL1_OUT_AUX, 2, 0, 0),
+ F(480000000, P_CAM_CC_PLL2_OUT_EARLY, 2, 0, 0),
+ F(540000000, P_CAM_CC_PLL3_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_0_csid_clk_src = {
+ .cmd_rcgr = 0x903c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_2,
+ .freq_tbl = ftbl_cam_cc_ife_0_csid_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_csid_clk_src",
+ .parent_data = cam_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_1_clk_src = {
+ .cmd_rcgr = 0xa010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_4,
+ .freq_tbl = ftbl_cam_cc_ife_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_clk_src",
+ .parent_data = cam_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_4),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_1_csid_clk_src = {
+ .cmd_rcgr = 0xa034,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_2,
+ .freq_tbl = ftbl_cam_cc_ife_0_csid_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_csid_clk_src",
+ .parent_data = cam_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_clk_src = {
+ .cmd_rcgr = 0xb004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_4,
+ .freq_tbl = ftbl_cam_cc_ife_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_clk_src",
+ .parent_data = cam_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_4),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_csid_clk_src = {
+ .cmd_rcgr = 0xb024,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_2,
+ .freq_tbl = ftbl_cam_cc_ife_0_csid_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_csid_clk_src",
+ .parent_data = cam_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ipe_0_clk_src = {
+ .cmd_rcgr = 0x7010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_icp_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_0_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_jpeg_clk_src[] = {
+ F(66666667, P_CAM_CC_PLL0_OUT_AUX, 9, 0, 0),
+ F(133333333, P_CAM_CC_PLL0_OUT_AUX, 4.5, 0, 0),
+ F(216000000, P_CAM_CC_PLL3_OUT_MAIN, 5, 0, 0),
+ F(320000000, P_CAM_CC_PLL2_OUT_EARLY, 3, 0, 0),
+ F(480000000, P_CAM_CC_PLL2_OUT_EARLY, 2, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_AUX, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_jpeg_clk_src = {
+ .cmd_rcgr = 0xb04c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_jpeg_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_jpeg_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_lrme_clk_src[] = {
+ F(200000000, P_CAM_CC_PLL0_OUT_AUX, 3, 0, 0),
+ F(216000000, P_CAM_CC_PLL3_OUT_MAIN, 5, 0, 0),
+ F(300000000, P_CAM_CC_PLL0_OUT_AUX, 2, 0, 0),
+ F(404000000, P_CAM_CC_PLL1_OUT_AUX, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_lrme_clk_src = {
+ .cmd_rcgr = 0xb0f8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_6,
+ .freq_tbl = ftbl_cam_cc_lrme_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_lrme_clk_src",
+ .parent_data = cam_cc_parent_data_6,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_6),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_mclk0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(24000000, P_CAM_CC_PLL2_OUT_AUX2, 10, 1, 2),
+ F(34285714, P_CAM_CC_PLL2_OUT_AUX2, 14, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_mclk0_clk_src = {
+ .cmd_rcgr = 0x4004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_3,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk0_clk_src",
+ .parent_data = cam_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_3),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk1_clk_src = {
+ .cmd_rcgr = 0x4024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_3,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk1_clk_src",
+ .parent_data = cam_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_3),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk2_clk_src = {
+ .cmd_rcgr = 0x4044,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_3,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk2_clk_src",
+ .parent_data = cam_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_3),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk3_clk_src = {
+ .cmd_rcgr = 0x4064,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_3,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk3_clk_src",
+ .parent_data = cam_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_3),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_slow_ahb_clk_src[] = {
+ F(80000000, P_CAM_CC_PLL0_OUT_AUX, 7.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_slow_ahb_clk_src = {
+ .cmd_rcgr = 0x6058,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_slow_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_slow_ahb_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_branch cam_cc_bps_ahb_clk = {
+ .halt_reg = 0x6070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6070,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_bps_areg_clk = {
+ .halt_reg = 0x6054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_areg_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_bps_axi_clk = {
+ .halt_reg = 0x6038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_bps_clk = {
+ .halt_reg = 0x6028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_bps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_camnoc_axi_clk = {
+ .halt_reg = 0xb124,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb124,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cci_clk = {
+ .halt_reg = 0xb0f0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb0f0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cci_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_core_ahb_clk = {
+ .halt_reg = 0xb144,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xb144,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_core_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_ahb_clk = {
+ .halt_reg = 0xb11c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb11c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi0phytimer_clk = {
+ .halt_reg = 0x501c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x501c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi0phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi0phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi1phytimer_clk = {
+ .halt_reg = 0x5040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi1phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi1phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi2phytimer_clk = {
+ .halt_reg = 0x5064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi2phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi2phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy0_clk = {
+ .halt_reg = 0x5020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy1_clk = {
+ .halt_reg = 0x5044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy2_clk = {
+ .halt_reg = 0x5068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_icp_clk = {
+ .halt_reg = 0xb0a0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb0a0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_icp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_axi_clk = {
+ .halt_reg = 0x9080,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9080,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_clk = {
+ .halt_reg = 0x9028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_cphy_rx_clk = {
+ .halt_reg = 0x907c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x907c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_csid_clk = {
+ .halt_reg = 0x9054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_csid_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_0_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_dsp_clk = {
+ .halt_reg = 0x9038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_dsp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_axi_clk = {
+ .halt_reg = 0xa058,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa058,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_clk = {
+ .halt_reg = 0xa028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_cphy_rx_clk = {
+ .halt_reg = 0xa054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_csid_clk = {
+ .halt_reg = 0xa04c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa04c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_csid_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_1_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_dsp_clk = {
+ .halt_reg = 0xa030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_dsp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_clk = {
+ .halt_reg = 0xb01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_lite_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_cphy_rx_clk = {
+ .halt_reg = 0xb044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_csid_clk = {
+ .halt_reg = 0xb03c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb03c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_csid_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_lite_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_0_ahb_clk = {
+ .halt_reg = 0x7040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_0_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_0_areg_clk = {
+ .halt_reg = 0x703c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x703c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_0_areg_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_0_axi_clk = {
+ .halt_reg = 0x7038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_0_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_0_clk = {
+ .halt_reg = 0x7028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ipe_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_jpeg_clk = {
+ .halt_reg = 0xb064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_jpeg_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_jpeg_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_lrme_clk = {
+ .halt_reg = 0xb110,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb110,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_lrme_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_lrme_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk0_clk = {
+ .halt_reg = 0x401c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x401c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk1_clk = {
+ .halt_reg = 0x403c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x403c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk2_clk = {
+ .halt_reg = 0x405c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x405c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk3_clk = {
+ .halt_reg = 0x407c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x407c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_soc_ahb_clk = {
+ .halt_reg = 0xb140,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb140,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_soc_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_sys_tmr_clk = {
+ .halt_reg = 0xb0a8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb0a8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sys_tmr_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc titan_top_gdsc = {
+ .gdscr = 0xb134,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "titan_top_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc bps_gdsc = {
+ .gdscr = 0x6004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "bps_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &titan_top_gdsc.pd,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc ife_0_gdsc = {
+ .gdscr = 0x9004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "ife_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &titan_top_gdsc.pd,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc ife_1_gdsc = {
+ .gdscr = 0xa004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "ife_1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &titan_top_gdsc.pd,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc ipe_0_gdsc = {
+ .gdscr = 0x7004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "ipe_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &titan_top_gdsc.pd,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct clk_regmap *cam_cc_qcs615_clocks[] = {
+ [CAM_CC_BPS_AHB_CLK] = &cam_cc_bps_ahb_clk.clkr,
+ [CAM_CC_BPS_AREG_CLK] = &cam_cc_bps_areg_clk.clkr,
+ [CAM_CC_BPS_AXI_CLK] = &cam_cc_bps_axi_clk.clkr,
+ [CAM_CC_BPS_CLK] = &cam_cc_bps_clk.clkr,
+ [CAM_CC_BPS_CLK_SRC] = &cam_cc_bps_clk_src.clkr,
+ [CAM_CC_CAMNOC_AXI_CLK] = &cam_cc_camnoc_axi_clk.clkr,
+ [CAM_CC_CCI_CLK] = &cam_cc_cci_clk.clkr,
+ [CAM_CC_CCI_CLK_SRC] = &cam_cc_cci_clk_src.clkr,
+ [CAM_CC_CORE_AHB_CLK] = &cam_cc_core_ahb_clk.clkr,
+ [CAM_CC_CPAS_AHB_CLK] = &cam_cc_cpas_ahb_clk.clkr,
+ [CAM_CC_CPHY_RX_CLK_SRC] = &cam_cc_cphy_rx_clk_src.clkr,
+ [CAM_CC_CSI0PHYTIMER_CLK] = &cam_cc_csi0phytimer_clk.clkr,
+ [CAM_CC_CSI0PHYTIMER_CLK_SRC] = &cam_cc_csi0phytimer_clk_src.clkr,
+ [CAM_CC_CSI1PHYTIMER_CLK] = &cam_cc_csi1phytimer_clk.clkr,
+ [CAM_CC_CSI1PHYTIMER_CLK_SRC] = &cam_cc_csi1phytimer_clk_src.clkr,
+ [CAM_CC_CSI2PHYTIMER_CLK] = &cam_cc_csi2phytimer_clk.clkr,
+ [CAM_CC_CSI2PHYTIMER_CLK_SRC] = &cam_cc_csi2phytimer_clk_src.clkr,
+ [CAM_CC_CSIPHY0_CLK] = &cam_cc_csiphy0_clk.clkr,
+ [CAM_CC_CSIPHY1_CLK] = &cam_cc_csiphy1_clk.clkr,
+ [CAM_CC_CSIPHY2_CLK] = &cam_cc_csiphy2_clk.clkr,
+ [CAM_CC_FAST_AHB_CLK_SRC] = &cam_cc_fast_ahb_clk_src.clkr,
+ [CAM_CC_ICP_CLK] = &cam_cc_icp_clk.clkr,
+ [CAM_CC_ICP_CLK_SRC] = &cam_cc_icp_clk_src.clkr,
+ [CAM_CC_IFE_0_AXI_CLK] = &cam_cc_ife_0_axi_clk.clkr,
+ [CAM_CC_IFE_0_CLK] = &cam_cc_ife_0_clk.clkr,
+ [CAM_CC_IFE_0_CLK_SRC] = &cam_cc_ife_0_clk_src.clkr,
+ [CAM_CC_IFE_0_CPHY_RX_CLK] = &cam_cc_ife_0_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_0_CSID_CLK] = &cam_cc_ife_0_csid_clk.clkr,
+ [CAM_CC_IFE_0_CSID_CLK_SRC] = &cam_cc_ife_0_csid_clk_src.clkr,
+ [CAM_CC_IFE_0_DSP_CLK] = &cam_cc_ife_0_dsp_clk.clkr,
+ [CAM_CC_IFE_1_AXI_CLK] = &cam_cc_ife_1_axi_clk.clkr,
+ [CAM_CC_IFE_1_CLK] = &cam_cc_ife_1_clk.clkr,
+ [CAM_CC_IFE_1_CLK_SRC] = &cam_cc_ife_1_clk_src.clkr,
+ [CAM_CC_IFE_1_CPHY_RX_CLK] = &cam_cc_ife_1_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_1_CSID_CLK] = &cam_cc_ife_1_csid_clk.clkr,
+ [CAM_CC_IFE_1_CSID_CLK_SRC] = &cam_cc_ife_1_csid_clk_src.clkr,
+ [CAM_CC_IFE_1_DSP_CLK] = &cam_cc_ife_1_dsp_clk.clkr,
+ [CAM_CC_IFE_LITE_CLK] = &cam_cc_ife_lite_clk.clkr,
+ [CAM_CC_IFE_LITE_CLK_SRC] = &cam_cc_ife_lite_clk_src.clkr,
+ [CAM_CC_IFE_LITE_CPHY_RX_CLK] = &cam_cc_ife_lite_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_LITE_CSID_CLK] = &cam_cc_ife_lite_csid_clk.clkr,
+ [CAM_CC_IFE_LITE_CSID_CLK_SRC] = &cam_cc_ife_lite_csid_clk_src.clkr,
+ [CAM_CC_IPE_0_AHB_CLK] = &cam_cc_ipe_0_ahb_clk.clkr,
+ [CAM_CC_IPE_0_AREG_CLK] = &cam_cc_ipe_0_areg_clk.clkr,
+ [CAM_CC_IPE_0_AXI_CLK] = &cam_cc_ipe_0_axi_clk.clkr,
+ [CAM_CC_IPE_0_CLK] = &cam_cc_ipe_0_clk.clkr,
+ [CAM_CC_IPE_0_CLK_SRC] = &cam_cc_ipe_0_clk_src.clkr,
+ [CAM_CC_JPEG_CLK] = &cam_cc_jpeg_clk.clkr,
+ [CAM_CC_JPEG_CLK_SRC] = &cam_cc_jpeg_clk_src.clkr,
+ [CAM_CC_LRME_CLK] = &cam_cc_lrme_clk.clkr,
+ [CAM_CC_LRME_CLK_SRC] = &cam_cc_lrme_clk_src.clkr,
+ [CAM_CC_MCLK0_CLK] = &cam_cc_mclk0_clk.clkr,
+ [CAM_CC_MCLK0_CLK_SRC] = &cam_cc_mclk0_clk_src.clkr,
+ [CAM_CC_MCLK1_CLK] = &cam_cc_mclk1_clk.clkr,
+ [CAM_CC_MCLK1_CLK_SRC] = &cam_cc_mclk1_clk_src.clkr,
+ [CAM_CC_MCLK2_CLK] = &cam_cc_mclk2_clk.clkr,
+ [CAM_CC_MCLK2_CLK_SRC] = &cam_cc_mclk2_clk_src.clkr,
+ [CAM_CC_MCLK3_CLK] = &cam_cc_mclk3_clk.clkr,
+ [CAM_CC_MCLK3_CLK_SRC] = &cam_cc_mclk3_clk_src.clkr,
+ [CAM_CC_PLL0] = &cam_cc_pll0.clkr,
+ [CAM_CC_PLL1] = &cam_cc_pll1.clkr,
+ [CAM_CC_PLL2] = &cam_cc_pll2.clkr,
+ [CAM_CC_PLL2_OUT_AUX2] = &cam_cc_pll2_out_aux2.clkr,
+ [CAM_CC_PLL3] = &cam_cc_pll3.clkr,
+ [CAM_CC_SLOW_AHB_CLK_SRC] = &cam_cc_slow_ahb_clk_src.clkr,
+ [CAM_CC_SOC_AHB_CLK] = &cam_cc_soc_ahb_clk.clkr,
+ [CAM_CC_SYS_TMR_CLK] = &cam_cc_sys_tmr_clk.clkr,
+};
+
+static struct gdsc *cam_cc_qcs615_gdscs[] = {
+ [BPS_GDSC] = &bps_gdsc,
+ [IFE_0_GDSC] = &ife_0_gdsc,
+ [IFE_1_GDSC] = &ife_1_gdsc,
+ [IPE_0_GDSC] = &ipe_0_gdsc,
+ [TITAN_TOP_GDSC] = &titan_top_gdsc,
+};
+
+static const struct qcom_reset_map cam_cc_qcs615_resets[] = {
+ [CAM_CC_BPS_BCR] = { 0x6000 },
+ [CAM_CC_CAMNOC_BCR] = { 0xb120 },
+ [CAM_CC_CCI_BCR] = { 0xb0d4 },
+ [CAM_CC_CPAS_BCR] = { 0xb118 },
+ [CAM_CC_CSI0PHY_BCR] = { 0x5000 },
+ [CAM_CC_CSI1PHY_BCR] = { 0x5024 },
+ [CAM_CC_CSI2PHY_BCR] = { 0x5048 },
+ [CAM_CC_ICP_BCR] = { 0xb074 },
+ [CAM_CC_IFE_0_BCR] = { 0x9000 },
+ [CAM_CC_IFE_1_BCR] = { 0xa000 },
+ [CAM_CC_IFE_LITE_BCR] = { 0xb000 },
+ [CAM_CC_IPE_0_BCR] = { 0x7000 },
+ [CAM_CC_JPEG_BCR] = { 0xb048 },
+ [CAM_CC_LRME_BCR] = { 0xb0f4 },
+ [CAM_CC_MCLK0_BCR] = { 0x4000 },
+ [CAM_CC_MCLK1_BCR] = { 0x4020 },
+ [CAM_CC_MCLK2_BCR] = { 0x4040 },
+ [CAM_CC_MCLK3_BCR] = { 0x4060 },
+ [CAM_CC_TITAN_TOP_BCR] = { 0xb130 },
+};
+
+static struct clk_alpha_pll *cam_cc_qcs615_plls[] = {
+ &cam_cc_pll0,
+ &cam_cc_pll1,
+ &cam_cc_pll2,
+ &cam_cc_pll3,
+};
+
+static const struct regmap_config cam_cc_qcs615_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xd004,
+ .fast_io = true,
+};
+
+static struct qcom_cc_driver_data cam_cc_qcs615_driver_data = {
+ .alpha_plls = cam_cc_qcs615_plls,
+ .num_alpha_plls = ARRAY_SIZE(cam_cc_qcs615_plls),
+};
+
+static const struct qcom_cc_desc cam_cc_qcs615_desc = {
+ .config = &cam_cc_qcs615_regmap_config,
+ .clks = cam_cc_qcs615_clocks,
+ .num_clks = ARRAY_SIZE(cam_cc_qcs615_clocks),
+ .resets = cam_cc_qcs615_resets,
+ .num_resets = ARRAY_SIZE(cam_cc_qcs615_resets),
+ .gdscs = cam_cc_qcs615_gdscs,
+ .num_gdscs = ARRAY_SIZE(cam_cc_qcs615_gdscs),
+ .driver_data = &cam_cc_qcs615_driver_data,
+};
+
+static const struct of_device_id cam_cc_qcs615_match_table[] = {
+ { .compatible = "qcom,qcs615-camcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, cam_cc_qcs615_match_table);
+
+static int cam_cc_qcs615_probe(struct platform_device *pdev)
+{
+ return qcom_cc_probe(pdev, &cam_cc_qcs615_desc);
+}
+
+static struct platform_driver cam_cc_qcs615_driver = {
+ .probe = cam_cc_qcs615_probe,
+ .driver = {
+ .name = "camcc-qcs615",
+ .of_match_table = cam_cc_qcs615_match_table,
+ },
+};
+
+module_platform_driver(cam_cc_qcs615_driver);
+
+MODULE_DESCRIPTION("QTI CAMCC QCS615 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/camcc-sa8775p.c b/drivers/clk/qcom/camcc-sa8775p.c
new file mode 100644
index 000000000000..50e5a131261b
--- /dev/null
+++ b/drivers/clk/qcom/camcc-sa8775p.c
@@ -0,0 +1,1960 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,qcs8300-camcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_IFACE,
+ DT_BI_TCXO,
+ DT_BI_TCXO_AO,
+ DT_SLEEP_CLK,
+};
+
+enum {
+ P_BI_TCXO,
+ P_BI_TCXO_AO,
+ P_CAM_CC_PLL0_OUT_EVEN,
+ P_CAM_CC_PLL0_OUT_MAIN,
+ P_CAM_CC_PLL0_OUT_ODD,
+ P_CAM_CC_PLL2_OUT_EVEN,
+ P_CAM_CC_PLL2_OUT_MAIN,
+ P_CAM_CC_PLL3_OUT_EVEN,
+ P_CAM_CC_PLL4_OUT_EVEN,
+ P_CAM_CC_PLL5_OUT_EVEN,
+ P_SLEEP_CLK,
+};
+
+static const struct pll_vco lucid_evo_vco[] = {
+ { 249600000, 2020000000, 0 },
+};
+
+static const struct pll_vco rivian_evo_vco[] = {
+ { 864000000, 1056000000, 0 },
+};
+
+static const struct alpha_pll_config cam_cc_pll0_config = {
+ .l = 0x3e,
+ .alpha = 0x8000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00008400,
+ .user_ctl_hi_val = 0x00400805,
+};
+
+static struct clk_alpha_pll cam_cc_pll0 = {
+ .offset = 0x0,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll0_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll0_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_evo_ops,
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll0_out_odd[] = {
+ { 0x2, 3 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll0_out_odd = {
+ .offset = 0x0,
+ .post_div_shift = 14,
+ .post_div_table = post_div_table_cam_cc_pll0_out_odd,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll0_out_odd),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll0_out_odd",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_evo_ops,
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll2_config = {
+ .l = 0x32,
+ .alpha = 0x0,
+ .config_ctl_val = 0x90008820,
+ .config_ctl_hi_val = 0x00890263,
+ .config_ctl_hi1_val = 0x00000247,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00400000,
+};
+
+static struct clk_alpha_pll cam_cc_pll2 = {
+ .offset = 0x1000,
+ .vco_table = rivian_evo_vco,
+ .num_vco = ARRAY_SIZE(rivian_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_RIVIAN_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll2",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_rivian_evo_ops,
+ },
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll3_config = {
+ .l = 0x32,
+ .alpha = 0x0,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00400805,
+};
+
+static struct clk_alpha_pll cam_cc_pll3 = {
+ .offset = 0x2000,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll3",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll3_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll3_out_even = {
+ .offset = 0x2000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll3_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll3_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll3_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll3.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_evo_ops,
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll4_config = {
+ .l = 0x32,
+ .alpha = 0x0,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00400805,
+};
+
+static struct clk_alpha_pll cam_cc_pll4 = {
+ .offset = 0x3000,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll4",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll4_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll4_out_even = {
+ .offset = 0x3000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll4_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll4_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll4_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll4.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_evo_ops,
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll5_config = {
+ .l = 0x32,
+ .alpha = 0x0,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00400805,
+};
+
+static struct clk_alpha_pll cam_cc_pll5 = {
+ .offset = 0x4000,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll5",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll5_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll5_out_even = {
+ .offset = 0x4000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll5_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll5_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll5_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll5.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_evo_ops,
+ },
+};
+
+static const struct parent_map cam_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL0_OUT_MAIN, 1 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 2 },
+ { P_CAM_CC_PLL0_OUT_ODD, 3 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll0.clkr.hw },
+ { .hw = &cam_cc_pll0_out_even.clkr.hw },
+ { .hw = &cam_cc_pll0_out_odd.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL2_OUT_EVEN, 3 },
+ { P_CAM_CC_PLL2_OUT_MAIN, 5 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll2.clkr.hw },
+ { .hw = &cam_cc_pll2.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL4_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll4_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL5_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll5_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL3_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll3_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_5[] = {
+ { P_SLEEP_CLK, 0 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_5[] = {
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map cam_cc_parent_map_6_ao[] = {
+ { P_BI_TCXO_AO, 0 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_6_ao[] = {
+ { .index = DT_BI_TCXO_AO },
+};
+
+static const struct freq_tbl ftbl_cam_cc_camnoc_axi_clk_src[] = {
+ F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_camnoc_axi_clk_src = {
+ .cmd_rcgr = 0x13170,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_camnoc_axi_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_axi_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_cci_0_clk_src[] = {
+ F(37500000, P_CAM_CC_PLL0_OUT_MAIN, 16, 1, 2),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_cci_0_clk_src = {
+ .cmd_rcgr = 0x130a0,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cci_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_0_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_cci_1_clk_src = {
+ .cmd_rcgr = 0x130bc,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cci_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_1_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_cci_2_clk_src = {
+ .cmd_rcgr = 0x130d8,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cci_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_2_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_cci_3_clk_src = {
+ .cmd_rcgr = 0x130f4,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cci_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_3_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_cphy_rx_clk_src[] = {
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_cphy_rx_clk_src = {
+ .cmd_rcgr = 0x11034,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cphy_rx_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi0phytimer_clk_src = {
+ .cmd_rcgr = 0x15074,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi0phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi1phytimer_clk_src = {
+ .cmd_rcgr = 0x15098,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi1phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi2phytimer_clk_src = {
+ .cmd_rcgr = 0x150b8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi2phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi3phytimer_clk_src = {
+ .cmd_rcgr = 0x150d8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi3phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csid_clk_src = {
+ .cmd_rcgr = 0x13150,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csid_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_fast_ahb_clk_src[] = {
+ F(300000000, P_CAM_CC_PLL0_OUT_MAIN, 4, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_fast_ahb_clk_src = {
+ .cmd_rcgr = 0x13120,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_fast_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_fast_ahb_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_icp_clk_src[] = {
+ F(480000000, P_CAM_CC_PLL0_OUT_MAIN, 2.5, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_icp_clk_src = {
+ .cmd_rcgr = 0x1307c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_icp_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_0_clk_src[] = {
+ F(480000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_0_clk_src = {
+ .cmd_rcgr = 0x11004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_2,
+ .freq_tbl = ftbl_cam_cc_ife_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_clk_src",
+ .parent_data = cam_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_1_clk_src[] = {
+ F(480000000, P_CAM_CC_PLL5_OUT_EVEN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL5_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_1_clk_src = {
+ .cmd_rcgr = 0x12004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_3,
+ .freq_tbl = ftbl_cam_cc_ife_1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_clk_src",
+ .parent_data = cam_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_lite_clk_src[] = {
+ F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(480000000, P_CAM_CC_PLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_clk_src = {
+ .cmd_rcgr = 0x13000,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_ife_lite_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_csid_clk_src = {
+ .cmd_rcgr = 0x13020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_csid_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ipe_clk_src[] = {
+ F(480000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ipe_clk_src = {
+ .cmd_rcgr = 0x10004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_4,
+ .freq_tbl = ftbl_cam_cc_ipe_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_clk_src",
+ .parent_data = cam_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_mclk0_clk_src[] = {
+ F(19200000, P_CAM_CC_PLL2_OUT_MAIN, 1, 1, 50),
+ F(24000000, P_CAM_CC_PLL2_OUT_MAIN, 10, 1, 4),
+ F(64000000, P_CAM_CC_PLL2_OUT_MAIN, 15, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_mclk0_clk_src = {
+ .cmd_rcgr = 0x15004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk0_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk1_clk_src = {
+ .cmd_rcgr = 0x15020,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk1_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk2_clk_src = {
+ .cmd_rcgr = 0x1503c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk2_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk3_clk_src = {
+ .cmd_rcgr = 0x15058,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk3_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_sleep_clk_src[] = {
+ F(32000, P_SLEEP_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_sleep_clk_src = {
+ .cmd_rcgr = 0x131f0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_5,
+ .freq_tbl = ftbl_cam_cc_sleep_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sleep_clk_src",
+ .parent_data = cam_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_slow_ahb_clk_src[] = {
+ F(80000000, P_CAM_CC_PLL0_OUT_EVEN, 7.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_slow_ahb_clk_src = {
+ .cmd_rcgr = 0x13138,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_slow_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_slow_ahb_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_xo_clk_src[] = {
+ F(19200000, P_BI_TCXO_AO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_xo_clk_src = {
+ .cmd_rcgr = 0x131d4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_6_ao,
+ .freq_tbl = ftbl_cam_cc_xo_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_xo_clk_src",
+ .parent_data = cam_cc_parent_data_6_ao,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_6_ao),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_branch cam_cc_camnoc_axi_clk = {
+ .halt_reg = 0x13188,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13188,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_camnoc_dcd_xo_clk = {
+ .halt_reg = 0x13190,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13190,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_dcd_xo_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_qdss_debug_xo_clk = {
+ .halt_reg = 0x131b8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x131b8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_qdss_debug_xo_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cci_0_clk = {
+ .halt_reg = 0x130b8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x130b8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cci_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cci_1_clk = {
+ .halt_reg = 0x130d4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x130d4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cci_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cci_2_clk = {
+ .halt_reg = 0x130f0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x130f0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cci_2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cci_3_clk = {
+ .halt_reg = 0x1310c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1310c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cci_3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_core_ahb_clk = {
+ .halt_reg = 0x131d0,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x131d0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_core_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_ahb_clk = {
+ .halt_reg = 0x13110,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13110,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_fast_ahb_clk = {
+ .halt_reg = 0x13118,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13118,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_fast_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_ife_0_clk = {
+ .halt_reg = 0x11024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x11024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_ife_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_ife_1_clk = {
+ .halt_reg = 0x12024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x12024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_ife_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_ife_lite_clk = {
+ .halt_reg = 0x1301c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1301c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_ife_lite_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_lite_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_ipe_clk = {
+ .halt_reg = 0x10024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_ipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_sfe_lite_0_clk = {
+ .halt_reg = 0x13050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13050,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_sfe_lite_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_sfe_lite_1_clk = {
+ .halt_reg = 0x13068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_sfe_lite_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi0phytimer_clk = {
+ .halt_reg = 0x1508c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1508c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi0phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi0phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi1phytimer_clk = {
+ .halt_reg = 0x150b0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x150b0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi1phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi1phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi2phytimer_clk = {
+ .halt_reg = 0x150d0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x150d0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi2phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi2phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi3phytimer_clk = {
+ .halt_reg = 0x150f0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x150f0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi3phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi3phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csid_clk = {
+ .halt_reg = 0x13168,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13168,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csid_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csid_csiphy_rx_clk = {
+ .halt_reg = 0x15094,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15094,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csid_csiphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy0_clk = {
+ .halt_reg = 0x15090,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15090,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy1_clk = {
+ .halt_reg = 0x150b4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x150b4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy2_clk = {
+ .halt_reg = 0x150d4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x150d4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy3_clk = {
+ .halt_reg = 0x150f4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x150f4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_icp_ahb_clk = {
+ .halt_reg = 0x1309c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1309c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_icp_clk = {
+ .halt_reg = 0x13094,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13094,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_icp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_clk = {
+ .halt_reg = 0x1101c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1101c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_fast_ahb_clk = {
+ .halt_reg = 0x11030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x11030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_fast_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_clk = {
+ .halt_reg = 0x1201c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1201c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_fast_ahb_clk = {
+ .halt_reg = 0x12030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x12030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_fast_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_ahb_clk = {
+ .halt_reg = 0x13044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_clk = {
+ .halt_reg = 0x13018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_lite_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_cphy_rx_clk = {
+ .halt_reg = 0x13040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_csid_clk = {
+ .halt_reg = 0x13038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_csid_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_lite_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_ahb_clk = {
+ .halt_reg = 0x10030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_clk = {
+ .halt_reg = 0x1001c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1001c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_fast_ahb_clk = {
+ .halt_reg = 0x10034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_fast_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk0_clk = {
+ .halt_reg = 0x1501c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1501c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk1_clk = {
+ .halt_reg = 0x15038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk2_clk = {
+ .halt_reg = 0x15054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk3_clk = {
+ .halt_reg = 0x15070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15070,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_sfe_lite_0_clk = {
+ .halt_reg = 0x1304c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1304c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sfe_lite_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_sfe_lite_0_fast_ahb_clk = {
+ .halt_reg = 0x1305c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1305c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sfe_lite_0_fast_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_sfe_lite_1_clk = {
+ .halt_reg = 0x13064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sfe_lite_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_sfe_lite_1_fast_ahb_clk = {
+ .halt_reg = 0x13074,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13074,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sfe_lite_1_fast_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_sm_obs_clk = {
+ .halt_reg = 0x1510c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x1510c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sm_obs_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_titan_top_accu_shift_clk = {
+ .halt_reg = 0x131f0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x131f0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_titan_top_accu_shift_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc cam_cc_titan_top_gdsc = {
+ .gdscr = 0x131bc,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "cam_cc_titan_top_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct clk_regmap *cam_cc_sa8775p_clocks[] = {
+ [CAM_CC_CAMNOC_AXI_CLK] = &cam_cc_camnoc_axi_clk.clkr,
+ [CAM_CC_CAMNOC_AXI_CLK_SRC] = &cam_cc_camnoc_axi_clk_src.clkr,
+ [CAM_CC_CAMNOC_DCD_XO_CLK] = &cam_cc_camnoc_dcd_xo_clk.clkr,
+ [CAM_CC_CCI_0_CLK] = &cam_cc_cci_0_clk.clkr,
+ [CAM_CC_CCI_0_CLK_SRC] = &cam_cc_cci_0_clk_src.clkr,
+ [CAM_CC_CCI_1_CLK] = &cam_cc_cci_1_clk.clkr,
+ [CAM_CC_CCI_1_CLK_SRC] = &cam_cc_cci_1_clk_src.clkr,
+ [CAM_CC_CCI_2_CLK] = &cam_cc_cci_2_clk.clkr,
+ [CAM_CC_CCI_2_CLK_SRC] = &cam_cc_cci_2_clk_src.clkr,
+ [CAM_CC_CCI_3_CLK] = &cam_cc_cci_3_clk.clkr,
+ [CAM_CC_CCI_3_CLK_SRC] = &cam_cc_cci_3_clk_src.clkr,
+ [CAM_CC_CORE_AHB_CLK] = &cam_cc_core_ahb_clk.clkr,
+ [CAM_CC_CPAS_AHB_CLK] = &cam_cc_cpas_ahb_clk.clkr,
+ [CAM_CC_CPAS_FAST_AHB_CLK] = &cam_cc_cpas_fast_ahb_clk.clkr,
+ [CAM_CC_CPAS_IFE_0_CLK] = &cam_cc_cpas_ife_0_clk.clkr,
+ [CAM_CC_CPAS_IFE_1_CLK] = &cam_cc_cpas_ife_1_clk.clkr,
+ [CAM_CC_CPAS_IFE_LITE_CLK] = &cam_cc_cpas_ife_lite_clk.clkr,
+ [CAM_CC_CPAS_IPE_CLK] = &cam_cc_cpas_ipe_clk.clkr,
+ [CAM_CC_CPAS_SFE_LITE_0_CLK] = &cam_cc_cpas_sfe_lite_0_clk.clkr,
+ [CAM_CC_CPAS_SFE_LITE_1_CLK] = &cam_cc_cpas_sfe_lite_1_clk.clkr,
+ [CAM_CC_CPHY_RX_CLK_SRC] = &cam_cc_cphy_rx_clk_src.clkr,
+ [CAM_CC_CSI0PHYTIMER_CLK] = &cam_cc_csi0phytimer_clk.clkr,
+ [CAM_CC_CSI0PHYTIMER_CLK_SRC] = &cam_cc_csi0phytimer_clk_src.clkr,
+ [CAM_CC_CSI1PHYTIMER_CLK] = &cam_cc_csi1phytimer_clk.clkr,
+ [CAM_CC_CSI1PHYTIMER_CLK_SRC] = &cam_cc_csi1phytimer_clk_src.clkr,
+ [CAM_CC_CSI2PHYTIMER_CLK] = &cam_cc_csi2phytimer_clk.clkr,
+ [CAM_CC_CSI2PHYTIMER_CLK_SRC] = &cam_cc_csi2phytimer_clk_src.clkr,
+ [CAM_CC_CSI3PHYTIMER_CLK] = &cam_cc_csi3phytimer_clk.clkr,
+ [CAM_CC_CSI3PHYTIMER_CLK_SRC] = &cam_cc_csi3phytimer_clk_src.clkr,
+ [CAM_CC_CSID_CLK] = &cam_cc_csid_clk.clkr,
+ [CAM_CC_CSID_CLK_SRC] = &cam_cc_csid_clk_src.clkr,
+ [CAM_CC_CSID_CSIPHY_RX_CLK] = &cam_cc_csid_csiphy_rx_clk.clkr,
+ [CAM_CC_CSIPHY0_CLK] = &cam_cc_csiphy0_clk.clkr,
+ [CAM_CC_CSIPHY1_CLK] = &cam_cc_csiphy1_clk.clkr,
+ [CAM_CC_CSIPHY2_CLK] = &cam_cc_csiphy2_clk.clkr,
+ [CAM_CC_CSIPHY3_CLK] = &cam_cc_csiphy3_clk.clkr,
+ [CAM_CC_FAST_AHB_CLK_SRC] = &cam_cc_fast_ahb_clk_src.clkr,
+ [CAM_CC_ICP_AHB_CLK] = &cam_cc_icp_ahb_clk.clkr,
+ [CAM_CC_ICP_CLK] = &cam_cc_icp_clk.clkr,
+ [CAM_CC_ICP_CLK_SRC] = &cam_cc_icp_clk_src.clkr,
+ [CAM_CC_IFE_0_CLK] = &cam_cc_ife_0_clk.clkr,
+ [CAM_CC_IFE_0_CLK_SRC] = &cam_cc_ife_0_clk_src.clkr,
+ [CAM_CC_IFE_0_FAST_AHB_CLK] = &cam_cc_ife_0_fast_ahb_clk.clkr,
+ [CAM_CC_IFE_1_CLK] = &cam_cc_ife_1_clk.clkr,
+ [CAM_CC_IFE_1_CLK_SRC] = &cam_cc_ife_1_clk_src.clkr,
+ [CAM_CC_IFE_1_FAST_AHB_CLK] = &cam_cc_ife_1_fast_ahb_clk.clkr,
+ [CAM_CC_IFE_LITE_AHB_CLK] = &cam_cc_ife_lite_ahb_clk.clkr,
+ [CAM_CC_IFE_LITE_CLK] = &cam_cc_ife_lite_clk.clkr,
+ [CAM_CC_IFE_LITE_CLK_SRC] = &cam_cc_ife_lite_clk_src.clkr,
+ [CAM_CC_IFE_LITE_CPHY_RX_CLK] = &cam_cc_ife_lite_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_LITE_CSID_CLK] = &cam_cc_ife_lite_csid_clk.clkr,
+ [CAM_CC_IFE_LITE_CSID_CLK_SRC] = &cam_cc_ife_lite_csid_clk_src.clkr,
+ [CAM_CC_IPE_AHB_CLK] = &cam_cc_ipe_ahb_clk.clkr,
+ [CAM_CC_IPE_CLK] = &cam_cc_ipe_clk.clkr,
+ [CAM_CC_IPE_CLK_SRC] = &cam_cc_ipe_clk_src.clkr,
+ [CAM_CC_IPE_FAST_AHB_CLK] = &cam_cc_ipe_fast_ahb_clk.clkr,
+ [CAM_CC_MCLK0_CLK] = &cam_cc_mclk0_clk.clkr,
+ [CAM_CC_MCLK0_CLK_SRC] = &cam_cc_mclk0_clk_src.clkr,
+ [CAM_CC_MCLK1_CLK] = &cam_cc_mclk1_clk.clkr,
+ [CAM_CC_MCLK1_CLK_SRC] = &cam_cc_mclk1_clk_src.clkr,
+ [CAM_CC_MCLK2_CLK] = &cam_cc_mclk2_clk.clkr,
+ [CAM_CC_MCLK2_CLK_SRC] = &cam_cc_mclk2_clk_src.clkr,
+ [CAM_CC_MCLK3_CLK] = &cam_cc_mclk3_clk.clkr,
+ [CAM_CC_MCLK3_CLK_SRC] = &cam_cc_mclk3_clk_src.clkr,
+ [CAM_CC_PLL0] = &cam_cc_pll0.clkr,
+ [CAM_CC_PLL0_OUT_EVEN] = &cam_cc_pll0_out_even.clkr,
+ [CAM_CC_PLL0_OUT_ODD] = &cam_cc_pll0_out_odd.clkr,
+ [CAM_CC_PLL2] = &cam_cc_pll2.clkr,
+ [CAM_CC_PLL3] = &cam_cc_pll3.clkr,
+ [CAM_CC_PLL3_OUT_EVEN] = &cam_cc_pll3_out_even.clkr,
+ [CAM_CC_PLL4] = &cam_cc_pll4.clkr,
+ [CAM_CC_PLL4_OUT_EVEN] = &cam_cc_pll4_out_even.clkr,
+ [CAM_CC_PLL5] = &cam_cc_pll5.clkr,
+ [CAM_CC_PLL5_OUT_EVEN] = &cam_cc_pll5_out_even.clkr,
+ [CAM_CC_SFE_LITE_0_CLK] = &cam_cc_sfe_lite_0_clk.clkr,
+ [CAM_CC_SFE_LITE_0_FAST_AHB_CLK] = &cam_cc_sfe_lite_0_fast_ahb_clk.clkr,
+ [CAM_CC_SFE_LITE_1_CLK] = &cam_cc_sfe_lite_1_clk.clkr,
+ [CAM_CC_SFE_LITE_1_FAST_AHB_CLK] = &cam_cc_sfe_lite_1_fast_ahb_clk.clkr,
+ [CAM_CC_SLEEP_CLK_SRC] = &cam_cc_sleep_clk_src.clkr,
+ [CAM_CC_SLOW_AHB_CLK_SRC] = &cam_cc_slow_ahb_clk_src.clkr,
+ [CAM_CC_SM_OBS_CLK] = &cam_cc_sm_obs_clk.clkr,
+ [CAM_CC_TITAN_TOP_ACCU_SHIFT_CLK] = NULL,
+ [CAM_CC_XO_CLK_SRC] = &cam_cc_xo_clk_src.clkr,
+ [CAM_CC_QDSS_DEBUG_XO_CLK] = &cam_cc_qdss_debug_xo_clk.clkr,
+};
+
+static struct gdsc *cam_cc_sa8775p_gdscs[] = {
+ [CAM_CC_TITAN_TOP_GDSC] = &cam_cc_titan_top_gdsc,
+};
+
+static const struct qcom_reset_map cam_cc_sa8775p_resets[] = {
+ [CAM_CC_ICP_BCR] = { 0x13078 },
+ [CAM_CC_IFE_0_BCR] = { 0x11000 },
+ [CAM_CC_IFE_1_BCR] = { 0x12000 },
+ [CAM_CC_IPE_0_BCR] = { 0x10000 },
+ [CAM_CC_SFE_LITE_0_BCR] = { 0x13048 },
+ [CAM_CC_SFE_LITE_1_BCR] = { 0x13060 },
+};
+
+static const struct regmap_config cam_cc_sa8775p_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x16218,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc cam_cc_sa8775p_desc = {
+ .config = &cam_cc_sa8775p_regmap_config,
+ .clks = cam_cc_sa8775p_clocks,
+ .num_clks = ARRAY_SIZE(cam_cc_sa8775p_clocks),
+ .resets = cam_cc_sa8775p_resets,
+ .num_resets = ARRAY_SIZE(cam_cc_sa8775p_resets),
+ .gdscs = cam_cc_sa8775p_gdscs,
+ .num_gdscs = ARRAY_SIZE(cam_cc_sa8775p_gdscs),
+};
+
+static const struct of_device_id cam_cc_sa8775p_match_table[] = {
+ { .compatible = "qcom,qcs8300-camcc" },
+ { .compatible = "qcom,sa8775p-camcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, cam_cc_sa8775p_match_table);
+
+static int cam_cc_sa8775p_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret)
+ return ret;
+
+ regmap = qcom_cc_map(pdev, &cam_cc_sa8775p_desc);
+ if (IS_ERR(regmap)) {
+ pm_runtime_put(&pdev->dev);
+ return PTR_ERR(regmap);
+ }
+
+ clk_lucid_evo_pll_configure(&cam_cc_pll0, regmap, &cam_cc_pll0_config);
+ clk_rivian_evo_pll_configure(&cam_cc_pll2, regmap, &cam_cc_pll2_config);
+ clk_lucid_evo_pll_configure(&cam_cc_pll3, regmap, &cam_cc_pll3_config);
+ clk_lucid_evo_pll_configure(&cam_cc_pll4, regmap, &cam_cc_pll4_config);
+ clk_lucid_evo_pll_configure(&cam_cc_pll5, regmap, &cam_cc_pll5_config);
+
+ if (device_is_compatible(&pdev->dev, "qcom,qcs8300-camcc")) {
+ cam_cc_camnoc_axi_clk_src.cmd_rcgr = 0x13154;
+ cam_cc_camnoc_axi_clk.halt_reg = 0x1316c;
+ cam_cc_camnoc_axi_clk.clkr.enable_reg = 0x1316c;
+ cam_cc_camnoc_dcd_xo_clk.halt_reg = 0x13174;
+ cam_cc_camnoc_dcd_xo_clk.clkr.enable_reg = 0x13174;
+
+ cam_cc_csi0phytimer_clk_src.cmd_rcgr = 0x15054;
+ cam_cc_csi1phytimer_clk_src.cmd_rcgr = 0x15078;
+ cam_cc_csi2phytimer_clk_src.cmd_rcgr = 0x15098;
+ cam_cc_csid_clk_src.cmd_rcgr = 0x13134;
+
+ cam_cc_mclk0_clk_src.cmd_rcgr = 0x15000;
+ cam_cc_mclk1_clk_src.cmd_rcgr = 0x1501c;
+ cam_cc_mclk2_clk_src.cmd_rcgr = 0x15038;
+
+ cam_cc_fast_ahb_clk_src.cmd_rcgr = 0x13104;
+ cam_cc_slow_ahb_clk_src.cmd_rcgr = 0x1311c;
+ cam_cc_xo_clk_src.cmd_rcgr = 0x131b8;
+ cam_cc_sleep_clk_src.cmd_rcgr = 0x131d4;
+
+ cam_cc_core_ahb_clk.halt_reg = 0x131b4;
+ cam_cc_core_ahb_clk.clkr.enable_reg = 0x131b4;
+
+ cam_cc_cpas_ahb_clk.halt_reg = 0x130f4;
+ cam_cc_cpas_ahb_clk.clkr.enable_reg = 0x130f4;
+ cam_cc_cpas_fast_ahb_clk.halt_reg = 0x130fc;
+ cam_cc_cpas_fast_ahb_clk.clkr.enable_reg = 0x130fc;
+
+ cam_cc_csi0phytimer_clk.halt_reg = 0x1506c;
+ cam_cc_csi0phytimer_clk.clkr.enable_reg = 0x1506c;
+ cam_cc_csi1phytimer_clk.halt_reg = 0x15090;
+ cam_cc_csi1phytimer_clk.clkr.enable_reg = 0x15090;
+ cam_cc_csi2phytimer_clk.halt_reg = 0x150b0;
+ cam_cc_csi2phytimer_clk.clkr.enable_reg = 0x150b0;
+ cam_cc_csid_clk.halt_reg = 0x1314c;
+ cam_cc_csid_clk.clkr.enable_reg = 0x1314c;
+ cam_cc_csid_csiphy_rx_clk.halt_reg = 0x15074;
+ cam_cc_csid_csiphy_rx_clk.clkr.enable_reg = 0x15074;
+ cam_cc_csiphy0_clk.halt_reg = 0x15070;
+ cam_cc_csiphy0_clk.clkr.enable_reg = 0x15070;
+ cam_cc_csiphy1_clk.halt_reg = 0x15094;
+ cam_cc_csiphy1_clk.clkr.enable_reg = 0x15094;
+ cam_cc_csiphy2_clk.halt_reg = 0x150b4;
+ cam_cc_csiphy2_clk.clkr.enable_reg = 0x150b4;
+
+ cam_cc_mclk0_clk.halt_reg = 0x15018;
+ cam_cc_mclk0_clk.clkr.enable_reg = 0x15018;
+ cam_cc_mclk1_clk.halt_reg = 0x15034;
+ cam_cc_mclk1_clk.clkr.enable_reg = 0x15034;
+ cam_cc_mclk2_clk.halt_reg = 0x15050;
+ cam_cc_mclk2_clk.clkr.enable_reg = 0x15050;
+ cam_cc_qdss_debug_xo_clk.halt_reg = 0x1319c;
+ cam_cc_qdss_debug_xo_clk.clkr.enable_reg = 0x1319c;
+
+ cam_cc_titan_top_gdsc.gdscr = 0x131a0;
+
+ cam_cc_sa8775p_clocks[CAM_CC_CCI_3_CLK] = NULL;
+ cam_cc_sa8775p_clocks[CAM_CC_CCI_3_CLK_SRC] = NULL;
+ cam_cc_sa8775p_clocks[CAM_CC_CSI3PHYTIMER_CLK] = NULL;
+ cam_cc_sa8775p_clocks[CAM_CC_CSI3PHYTIMER_CLK_SRC] = NULL;
+ cam_cc_sa8775p_clocks[CAM_CC_CSIPHY3_CLK] = NULL;
+ cam_cc_sa8775p_clocks[CAM_CC_MCLK3_CLK] = NULL;
+ cam_cc_sa8775p_clocks[CAM_CC_MCLK3_CLK_SRC] = NULL;
+ cam_cc_sa8775p_clocks[CAM_CC_TITAN_TOP_ACCU_SHIFT_CLK] =
+ &cam_cc_titan_top_accu_shift_clk.clkr;
+
+ /* Keep some clocks always enabled */
+ qcom_branch_set_clk_en(regmap, 0x13178); /* CAM_CC_CAMNOC_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0x131d0); /* CAM_CC_GDSC_CLK */
+ qcom_branch_set_clk_en(regmap, 0x131ec); /* CAM_CC_SLEEP_CLK */
+ } else {
+ /* Keep some clocks always enabled */
+ qcom_branch_set_clk_en(regmap, 0x13194); /* CAM_CC_CAMNOC_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0x131ec); /* CAM_CC_GDSC_CLK */
+ qcom_branch_set_clk_en(regmap, 0x13208); /* CAM_CC_SLEEP_CLK */
+ }
+
+ ret = qcom_cc_really_probe(&pdev->dev, &cam_cc_sa8775p_desc, regmap);
+
+ pm_runtime_put(&pdev->dev);
+
+ return ret;
+}
+
+static struct platform_driver cam_cc_sa8775p_driver = {
+ .probe = cam_cc_sa8775p_probe,
+ .driver = {
+ .name = "camcc-sa8775p",
+ .of_match_table = cam_cc_sa8775p_match_table,
+ },
+};
+
+module_platform_driver(cam_cc_sa8775p_driver);
+
+MODULE_DESCRIPTION("QTI CAMCC SA8775P Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/camcc-sc7180.c b/drivers/clk/qcom/camcc-sc7180.c
index 10e924cd533d..5031df813b4a 100644
--- a/drivers/clk/qcom/camcc-sc7180.c
+++ b/drivers/clk/qcom/camcc-sc7180.c
@@ -5,8 +5,8 @@
#include <linux/clk-provider.h>
#include <linux/err.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_clock.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/clk/qcom/camcc-sc7280.c b/drivers/clk/qcom/camcc-sc7280.c
index accd257632df..55545f5fdb98 100644
--- a/drivers/clk/qcom/camcc-sc7280.c
+++ b/drivers/clk/qcom/camcc-sc7280.c
@@ -7,8 +7,8 @@
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/clk/qcom/camcc-sc8180x.c b/drivers/clk/qcom/camcc-sc8180x.c
new file mode 100644
index 000000000000..388fedf1dc81
--- /dev/null
+++ b/drivers/clk/qcom/camcc-sc8180x.c
@@ -0,0 +1,2889 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sc8180x-camcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_IFACE,
+ DT_BI_TCXO,
+ DT_SLEEP_CLK,
+};
+
+enum {
+ P_BI_TCXO,
+ P_CAM_CC_PLL0_OUT_EVEN,
+ P_CAM_CC_PLL0_OUT_MAIN,
+ P_CAM_CC_PLL0_OUT_ODD,
+ P_CAM_CC_PLL1_OUT_EVEN,
+ P_CAM_CC_PLL2_OUT_EARLY,
+ P_CAM_CC_PLL2_OUT_MAIN,
+ P_CAM_CC_PLL3_OUT_EVEN,
+ P_CAM_CC_PLL4_OUT_EVEN,
+ P_CAM_CC_PLL5_OUT_EVEN,
+ P_CAM_CC_PLL6_OUT_EVEN,
+ P_SLEEP_CLK,
+};
+
+static const struct pll_vco regera_vco[] = {
+ { 600000000, 3300000000, 0 },
+};
+
+static const struct pll_vco trion_vco[] = {
+ { 249600000, 2000000000, 0 },
+};
+
+static const struct alpha_pll_config cam_cc_pll0_config = {
+ .l = 0x3e,
+ .alpha = 0x8000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002267,
+ .config_ctl_hi1_val = 0x00000024,
+ .test_ctl_hi1_val = 0x00000020,
+ .user_ctl_val = 0x00003100,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x000000d0,
+};
+
+static struct clk_alpha_pll cam_cc_pll0 = {
+ .offset = 0x0,
+ .vco_table = trion_vco,
+ .num_vco = ARRAY_SIZE(trion_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_trion_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll0_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_cam_cc_pll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll0_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_trion_ops,
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll0_out_odd[] = {
+ { 0x3, 3 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll0_out_odd = {
+ .offset = 0x0,
+ .post_div_shift = 12,
+ .post_div_table = post_div_table_cam_cc_pll0_out_odd,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll0_out_odd),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll0_out_odd",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_trion_ops,
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll1_config = {
+ .l = 0x13,
+ .alpha = 0x8800,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002267,
+ .config_ctl_hi1_val = 0x00000024,
+ .test_ctl_hi1_val = 0x00000020,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x000000d0,
+};
+
+static struct clk_alpha_pll cam_cc_pll1 = {
+ .offset = 0x1000,
+ .vco_table = trion_vco,
+ .num_vco = ARRAY_SIZE(trion_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_trion_ops,
+ },
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll2_config = {
+ .l = 0x32,
+ .alpha = 0x0,
+ .config_ctl_val = 0x10000807,
+ .config_ctl_hi_val = 0x00000011,
+ .config_ctl_hi1_val = 0x04300142,
+ .test_ctl_val = 0x04000400,
+ .test_ctl_hi_val = 0x00004000,
+ .test_ctl_hi1_val = 0x00000000,
+ .user_ctl_val = 0x00000100,
+};
+
+static struct clk_alpha_pll cam_cc_pll2 = {
+ .offset = 0x2000,
+ .vco_table = regera_vco,
+ .num_vco = ARRAY_SIZE(regera_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_REGERA],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll2",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_regera_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll2_out_main[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll2_out_main = {
+ .offset = 0x2000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_cam_cc_pll2_out_main,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll2_out_main),
+ .width = 2,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_REGERA],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll2_out_main",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll2.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_trion_ops,
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll3_config = {
+ .l = 0x14,
+ .alpha = 0xd555,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002267,
+ .config_ctl_hi1_val = 0x00000024,
+ .test_ctl_hi1_val = 0x00000020,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x000000d0,
+};
+
+static struct clk_alpha_pll cam_cc_pll3 = {
+ .offset = 0x3000,
+ .vco_table = trion_vco,
+ .num_vco = ARRAY_SIZE(trion_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll3",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_trion_ops,
+ },
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll4_config = {
+ .l = 0x14,
+ .alpha = 0xd555,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002267,
+ .config_ctl_hi1_val = 0x00000024,
+ .test_ctl_hi1_val = 0x00000020,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x000000d0,
+};
+
+static struct clk_alpha_pll cam_cc_pll4 = {
+ .offset = 0x4000,
+ .vco_table = trion_vco,
+ .num_vco = ARRAY_SIZE(trion_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll4",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_trion_ops,
+ },
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll5_config = {
+ .l = 0x14,
+ .alpha = 0xd555,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002267,
+ .config_ctl_hi1_val = 0x00000024,
+ .test_ctl_hi1_val = 0x00000020,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x000000d0,
+};
+
+static struct clk_alpha_pll cam_cc_pll5 = {
+ .offset = 0x4078,
+ .vco_table = trion_vco,
+ .num_vco = ARRAY_SIZE(trion_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll5",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_trion_ops,
+ },
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll6_config = {
+ .l = 0x14,
+ .alpha = 0xd555,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002267,
+ .config_ctl_hi1_val = 0x00000024,
+ .test_ctl_hi1_val = 0x00000020,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x000000d0,
+};
+
+static struct clk_alpha_pll cam_cc_pll6 = {
+ .offset = 0x40f0,
+ .vco_table = trion_vco,
+ .num_vco = ARRAY_SIZE(trion_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll6",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_trion_ops,
+ },
+ },
+};
+
+static const struct parent_map cam_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL0_OUT_MAIN, 1 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 2 },
+ { P_CAM_CC_PLL0_OUT_ODD, 3 },
+ { P_CAM_CC_PLL2_OUT_MAIN, 5 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll0.clkr.hw },
+ { .hw = &cam_cc_pll0_out_even.clkr.hw },
+ { .hw = &cam_cc_pll0_out_odd.clkr.hw },
+ { .hw = &cam_cc_pll2_out_main.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL2_OUT_EARLY, 5 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll2.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL3_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll3.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL4_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll4.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL5_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll5.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL6_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_5[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll6.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL1_OUT_EVEN, 4 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_6[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll1.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_7[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_7[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct freq_tbl ftbl_cam_cc_bps_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+ F(200000000, P_CAM_CC_PLL0_OUT_ODD, 2, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ F(480000000, P_CAM_CC_PLL2_OUT_MAIN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_bps_clk_src = {
+ .cmd_rcgr = 0x7010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_bps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_camnoc_axi_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(150000000, P_CAM_CC_PLL0_OUT_EVEN, 4, 0, 0),
+ F(266666667, P_CAM_CC_PLL0_OUT_ODD, 1.5, 0, 0),
+ F(320000000, P_CAM_CC_PLL2_OUT_MAIN, 1.5, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(480000000, P_CAM_CC_PLL2_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_camnoc_axi_clk_src = {
+ .cmd_rcgr = 0xc170,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_camnoc_axi_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_axi_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_cci_0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(37500000, P_CAM_CC_PLL0_OUT_EVEN, 16, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_cci_0_clk_src = {
+ .cmd_rcgr = 0xc108,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cci_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_0_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_cci_1_clk_src = {
+ .cmd_rcgr = 0xc124,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cci_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_1_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_cci_2_clk_src = {
+ .cmd_rcgr = 0xc204,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cci_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_2_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_cci_3_clk_src = {
+ .cmd_rcgr = 0xc220,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cci_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_3_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_cphy_rx_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_cphy_rx_clk_src = {
+ .cmd_rcgr = 0xa064,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cphy_rx_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_csi0phytimer_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_csi0phytimer_clk_src = {
+ .cmd_rcgr = 0x6004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi0phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi1phytimer_clk_src = {
+ .cmd_rcgr = 0x6028,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi1phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi2phytimer_clk_src = {
+ .cmd_rcgr = 0x604c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi2phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi3phytimer_clk_src = {
+ .cmd_rcgr = 0x6070,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi3phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_fast_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(50000000, P_CAM_CC_PLL0_OUT_EVEN, 12, 0, 0),
+ F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+ F(200000000, P_CAM_CC_PLL0_OUT_EVEN, 3, 0, 0),
+ F(300000000, P_CAM_CC_PLL0_OUT_MAIN, 4, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_fast_ahb_clk_src = {
+ .cmd_rcgr = 0x703c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_fast_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_fast_ahb_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_fd_core_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ F(480000000, P_CAM_CC_PLL2_OUT_MAIN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_fd_core_clk_src = {
+ .cmd_rcgr = 0xc0e0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_fd_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_fd_core_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_icp_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_icp_clk_src = {
+ .cmd_rcgr = 0xc0b8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_icp_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(400000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(558000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(637000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(760000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_0_clk_src = {
+ .cmd_rcgr = 0xa010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_2,
+ .freq_tbl = ftbl_cam_cc_ife_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_clk_src",
+ .parent_data = cam_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_0_csid_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(75000000, P_CAM_CC_PLL0_OUT_EVEN, 8, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ F(480000000, P_CAM_CC_PLL2_OUT_MAIN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_0_csid_clk_src = {
+ .cmd_rcgr = 0xa03c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_ife_0_csid_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_csid_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_1_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(400000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ F(558000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ F(637000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ F(760000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_1_clk_src = {
+ .cmd_rcgr = 0xb010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_3,
+ .freq_tbl = ftbl_cam_cc_ife_1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_clk_src",
+ .parent_data = cam_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_1_csid_clk_src = {
+ .cmd_rcgr = 0xb034,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_ife_0_csid_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_csid_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_2_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(400000000, P_CAM_CC_PLL5_OUT_EVEN, 1, 0, 0),
+ F(558000000, P_CAM_CC_PLL5_OUT_EVEN, 1, 0, 0),
+ F(637000000, P_CAM_CC_PLL5_OUT_EVEN, 1, 0, 0),
+ F(760000000, P_CAM_CC_PLL5_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_2_clk_src = {
+ .cmd_rcgr = 0xf010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_4,
+ .freq_tbl = ftbl_cam_cc_ife_2_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_2_clk_src",
+ .parent_data = cam_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_2_csid_clk_src = {
+ .cmd_rcgr = 0xf03c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_fd_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_2_csid_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_3_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(400000000, P_CAM_CC_PLL6_OUT_EVEN, 1, 0, 0),
+ F(558000000, P_CAM_CC_PLL6_OUT_EVEN, 1, 0, 0),
+ F(637000000, P_CAM_CC_PLL6_OUT_EVEN, 1, 0, 0),
+ F(760000000, P_CAM_CC_PLL6_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_3_clk_src = {
+ .cmd_rcgr = 0xf07c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_5,
+ .freq_tbl = ftbl_cam_cc_ife_3_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_3_clk_src",
+ .parent_data = cam_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_3_csid_clk_src = {
+ .cmd_rcgr = 0xf0a8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_fd_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_3_csid_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_lite_0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(320000000, P_CAM_CC_PLL2_OUT_MAIN, 1.5, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ F(480000000, P_CAM_CC_PLL2_OUT_MAIN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_0_clk_src = {
+ .cmd_rcgr = 0xc004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_ife_lite_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_0_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_0_csid_clk_src = {
+ .cmd_rcgr = 0xc020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_fd_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_0_csid_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_1_clk_src = {
+ .cmd_rcgr = 0xc048,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_ife_lite_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_1_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_1_csid_clk_src = {
+ .cmd_rcgr = 0xc064,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_fd_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_1_csid_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_2_clk_src = {
+ .cmd_rcgr = 0xc240,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_ife_lite_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_2_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_2_csid_clk_src = {
+ .cmd_rcgr = 0xc25c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_fd_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_2_csid_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_3_clk_src = {
+ .cmd_rcgr = 0xc284,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_ife_lite_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_3_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_3_csid_clk_src = {
+ .cmd_rcgr = 0xc2a0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_fd_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_3_csid_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ipe_0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(375000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ F(475000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ F(520000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ipe_0_clk_src = {
+ .cmd_rcgr = 0x8010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_6,
+ .freq_tbl = ftbl_cam_cc_ipe_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_0_clk_src",
+ .parent_data = cam_cc_parent_data_6,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_jpeg_clk_src = {
+ .cmd_rcgr = 0xc08c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_bps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_jpeg_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_lrme_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+ F(240000000, P_CAM_CC_PLL2_OUT_MAIN, 2, 0, 0),
+ F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ F(320000000, P_CAM_CC_PLL2_OUT_MAIN, 1.5, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_lrme_clk_src = {
+ .cmd_rcgr = 0xc144,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_lrme_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_lrme_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_mclk0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(24000000, P_CAM_CC_PLL2_OUT_EARLY, 10, 1, 4),
+ F(68571429, P_CAM_CC_PLL2_OUT_EARLY, 14, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_mclk0_clk_src = {
+ .cmd_rcgr = 0x5004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk0_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk1_clk_src = {
+ .cmd_rcgr = 0x5024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk1_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk2_clk_src = {
+ .cmd_rcgr = 0x5044,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk2_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk3_clk_src = {
+ .cmd_rcgr = 0x5064,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk3_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk4_clk_src = {
+ .cmd_rcgr = 0x5084,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk4_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk5_clk_src = {
+ .cmd_rcgr = 0x50a4,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk5_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk6_clk_src = {
+ .cmd_rcgr = 0x50c4,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk6_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk7_clk_src = {
+ .cmd_rcgr = 0x50e4,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk7_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_slow_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(80000000, P_CAM_CC_PLL0_OUT_EVEN, 7.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_slow_ahb_clk_src = {
+ .cmd_rcgr = 0x7058,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_slow_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_slow_ahb_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_xo_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_xo_clk_src = {
+ .cmd_rcgr = 0xc1cc,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_7,
+ .freq_tbl = ftbl_cam_cc_xo_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_xo_clk_src",
+ .parent_data = cam_cc_parent_data_7,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_7),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_branch cam_cc_bps_ahb_clk = {
+ .halt_reg = 0x7070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7070,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_bps_areg_clk = {
+ .halt_reg = 0x7054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_areg_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_bps_axi_clk = {
+ .halt_reg = 0x7038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_bps_clk = {
+ .halt_reg = 0x7028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_bps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_camnoc_axi_clk = {
+ .halt_reg = 0xc18c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc18c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_camnoc_dcd_xo_clk = {
+ .halt_reg = 0xc194,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc194,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_dcd_xo_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cci_0_clk = {
+ .halt_reg = 0xc120,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc120,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cci_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cci_1_clk = {
+ .halt_reg = 0xc13c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc13c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cci_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cci_2_clk = {
+ .halt_reg = 0xc21c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc21c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cci_2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cci_3_clk = {
+ .halt_reg = 0xc238,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc238,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cci_3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_core_ahb_clk = {
+ .halt_reg = 0xc1c8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xc1c8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_core_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_ahb_clk = {
+ .halt_reg = 0xc168,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc168,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi0phytimer_clk = {
+ .halt_reg = 0x601c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x601c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi0phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi0phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi1phytimer_clk = {
+ .halt_reg = 0x6040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi1phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi1phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi2phytimer_clk = {
+ .halt_reg = 0x6064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi2phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi2phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi3phytimer_clk = {
+ .halt_reg = 0x6088,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6088,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi3phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi3phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy0_clk = {
+ .halt_reg = 0x6020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy1_clk = {
+ .halt_reg = 0x6044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy2_clk = {
+ .halt_reg = 0x6068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy3_clk = {
+ .halt_reg = 0x608c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x608c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_fd_core_clk = {
+ .halt_reg = 0xc0f8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc0f8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_fd_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fd_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_fd_core_uar_clk = {
+ .halt_reg = 0xc100,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc100,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_fd_core_uar_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fd_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_icp_ahb_clk = {
+ .halt_reg = 0xc0d8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc0d8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_icp_clk = {
+ .halt_reg = 0xc0d0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc0d0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_icp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_axi_clk = {
+ .halt_reg = 0xa080,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa080,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_clk = {
+ .halt_reg = 0xa028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_cphy_rx_clk = {
+ .halt_reg = 0xa07c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa07c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_csid_clk = {
+ .halt_reg = 0xa054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_csid_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_0_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_dsp_clk = {
+ .halt_reg = 0xa038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_dsp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_axi_clk = {
+ .halt_reg = 0xb058,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb058,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_clk = {
+ .halt_reg = 0xb028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_cphy_rx_clk = {
+ .halt_reg = 0xb054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_csid_clk = {
+ .halt_reg = 0xb04c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb04c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_csid_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_1_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_dsp_clk = {
+ .halt_reg = 0xb030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_dsp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_2_axi_clk = {
+ .halt_reg = 0xf068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_2_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_2_clk = {
+ .halt_reg = 0xf028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_2_cphy_rx_clk = {
+ .halt_reg = 0xf064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_2_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_2_csid_clk = {
+ .halt_reg = 0xf054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_2_csid_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_2_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_2_dsp_clk = {
+ .halt_reg = 0xf038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_2_dsp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_3_axi_clk = {
+ .halt_reg = 0xf0d4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf0d4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_3_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_3_clk = {
+ .halt_reg = 0xf094,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf094,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_3_cphy_rx_clk = {
+ .halt_reg = 0xf0d0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf0d0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_3_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_3_csid_clk = {
+ .halt_reg = 0xf0c0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf0c0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_3_csid_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_3_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_3_dsp_clk = {
+ .halt_reg = 0xf0a4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf0a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_3_dsp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_0_clk = {
+ .halt_reg = 0xc01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_lite_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_0_cphy_rx_clk = {
+ .halt_reg = 0xc040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_0_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_0_csid_clk = {
+ .halt_reg = 0xc038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_0_csid_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_lite_0_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_1_clk = {
+ .halt_reg = 0xc060,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc060,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_lite_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_1_cphy_rx_clk = {
+ .halt_reg = 0xc084,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc084,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_1_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_1_csid_clk = {
+ .halt_reg = 0xc07c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc07c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_1_csid_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_lite_1_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_2_clk = {
+ .halt_reg = 0xc258,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc258,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_lite_2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_2_cphy_rx_clk = {
+ .halt_reg = 0xc27c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc27c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_2_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_2_csid_clk = {
+ .halt_reg = 0xc274,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc274,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_2_csid_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_lite_2_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_3_clk = {
+ .halt_reg = 0xc29c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc29c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_lite_3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_3_cphy_rx_clk = {
+ .halt_reg = 0xc2c0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc2c0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_3_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_3_csid_clk = {
+ .halt_reg = 0xc2b8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc2b8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_3_csid_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_lite_3_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_0_ahb_clk = {
+ .halt_reg = 0x8040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_0_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_0_areg_clk = {
+ .halt_reg = 0x803c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x803c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_0_areg_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_0_axi_clk = {
+ .halt_reg = 0x8038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_0_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_0_clk = {
+ .halt_reg = 0x8028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ipe_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_1_ahb_clk = {
+ .halt_reg = 0x9028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_1_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_1_areg_clk = {
+ .halt_reg = 0x9024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_1_areg_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_1_axi_clk = {
+ .halt_reg = 0x9020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_1_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_1_clk = {
+ .halt_reg = 0x9010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ipe_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_jpeg_clk = {
+ .halt_reg = 0xc0a4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc0a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_jpeg_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_jpeg_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_lrme_clk = {
+ .halt_reg = 0xc15c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc15c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_lrme_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_lrme_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk0_clk = {
+ .halt_reg = 0x501c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x501c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk1_clk = {
+ .halt_reg = 0x503c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x503c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk2_clk = {
+ .halt_reg = 0x505c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x505c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk3_clk = {
+ .halt_reg = 0x507c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x507c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk4_clk = {
+ .halt_reg = 0x509c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x509c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk5_clk = {
+ .halt_reg = 0x50bc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x50bc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk5_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk6_clk = {
+ .halt_reg = 0x50dc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x50dc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk6_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk7_clk = {
+ .halt_reg = 0x50fc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x50fc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk7_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk7_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc titan_top_gdsc = {
+ .gdscr = 0xc1bc,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "titan_top_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc bps_gdsc = {
+ .gdscr = 0x7004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "bps_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &titan_top_gdsc.pd,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc ife_0_gdsc = {
+ .gdscr = 0xa004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "ife_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &titan_top_gdsc.pd,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc ife_1_gdsc = {
+ .gdscr = 0xb004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "ife_1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &titan_top_gdsc.pd,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc ife_2_gdsc = {
+ .gdscr = 0xf004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "ife_2_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &titan_top_gdsc.pd,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc ife_3_gdsc = {
+ .gdscr = 0xf070,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "ife_3_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &titan_top_gdsc.pd,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc ipe_0_gdsc = {
+ .gdscr = 0x8004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "ipe_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &titan_top_gdsc.pd,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc ipe_1_gdsc = {
+ .gdscr = 0x9004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "ipe_1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &titan_top_gdsc.pd,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct clk_regmap *cam_cc_sc8180x_clocks[] = {
+ [CAM_CC_BPS_AHB_CLK] = &cam_cc_bps_ahb_clk.clkr,
+ [CAM_CC_BPS_AREG_CLK] = &cam_cc_bps_areg_clk.clkr,
+ [CAM_CC_BPS_AXI_CLK] = &cam_cc_bps_axi_clk.clkr,
+ [CAM_CC_BPS_CLK] = &cam_cc_bps_clk.clkr,
+ [CAM_CC_BPS_CLK_SRC] = &cam_cc_bps_clk_src.clkr,
+ [CAM_CC_CAMNOC_AXI_CLK] = &cam_cc_camnoc_axi_clk.clkr,
+ [CAM_CC_CAMNOC_AXI_CLK_SRC] = &cam_cc_camnoc_axi_clk_src.clkr,
+ [CAM_CC_CAMNOC_DCD_XO_CLK] = &cam_cc_camnoc_dcd_xo_clk.clkr,
+ [CAM_CC_CCI_0_CLK] = &cam_cc_cci_0_clk.clkr,
+ [CAM_CC_CCI_0_CLK_SRC] = &cam_cc_cci_0_clk_src.clkr,
+ [CAM_CC_CCI_1_CLK] = &cam_cc_cci_1_clk.clkr,
+ [CAM_CC_CCI_1_CLK_SRC] = &cam_cc_cci_1_clk_src.clkr,
+ [CAM_CC_CCI_2_CLK] = &cam_cc_cci_2_clk.clkr,
+ [CAM_CC_CCI_2_CLK_SRC] = &cam_cc_cci_2_clk_src.clkr,
+ [CAM_CC_CCI_3_CLK] = &cam_cc_cci_3_clk.clkr,
+ [CAM_CC_CCI_3_CLK_SRC] = &cam_cc_cci_3_clk_src.clkr,
+ [CAM_CC_CORE_AHB_CLK] = &cam_cc_core_ahb_clk.clkr,
+ [CAM_CC_CPAS_AHB_CLK] = &cam_cc_cpas_ahb_clk.clkr,
+ [CAM_CC_CPHY_RX_CLK_SRC] = &cam_cc_cphy_rx_clk_src.clkr,
+ [CAM_CC_CSI0PHYTIMER_CLK] = &cam_cc_csi0phytimer_clk.clkr,
+ [CAM_CC_CSI0PHYTIMER_CLK_SRC] = &cam_cc_csi0phytimer_clk_src.clkr,
+ [CAM_CC_CSI1PHYTIMER_CLK] = &cam_cc_csi1phytimer_clk.clkr,
+ [CAM_CC_CSI1PHYTIMER_CLK_SRC] = &cam_cc_csi1phytimer_clk_src.clkr,
+ [CAM_CC_CSI2PHYTIMER_CLK] = &cam_cc_csi2phytimer_clk.clkr,
+ [CAM_CC_CSI2PHYTIMER_CLK_SRC] = &cam_cc_csi2phytimer_clk_src.clkr,
+ [CAM_CC_CSI3PHYTIMER_CLK] = &cam_cc_csi3phytimer_clk.clkr,
+ [CAM_CC_CSI3PHYTIMER_CLK_SRC] = &cam_cc_csi3phytimer_clk_src.clkr,
+ [CAM_CC_CSIPHY0_CLK] = &cam_cc_csiphy0_clk.clkr,
+ [CAM_CC_CSIPHY1_CLK] = &cam_cc_csiphy1_clk.clkr,
+ [CAM_CC_CSIPHY2_CLK] = &cam_cc_csiphy2_clk.clkr,
+ [CAM_CC_CSIPHY3_CLK] = &cam_cc_csiphy3_clk.clkr,
+ [CAM_CC_FAST_AHB_CLK_SRC] = &cam_cc_fast_ahb_clk_src.clkr,
+ [CAM_CC_FD_CORE_CLK] = &cam_cc_fd_core_clk.clkr,
+ [CAM_CC_FD_CORE_CLK_SRC] = &cam_cc_fd_core_clk_src.clkr,
+ [CAM_CC_FD_CORE_UAR_CLK] = &cam_cc_fd_core_uar_clk.clkr,
+ [CAM_CC_ICP_AHB_CLK] = &cam_cc_icp_ahb_clk.clkr,
+ [CAM_CC_ICP_CLK] = &cam_cc_icp_clk.clkr,
+ [CAM_CC_ICP_CLK_SRC] = &cam_cc_icp_clk_src.clkr,
+ [CAM_CC_IFE_0_AXI_CLK] = &cam_cc_ife_0_axi_clk.clkr,
+ [CAM_CC_IFE_0_CLK] = &cam_cc_ife_0_clk.clkr,
+ [CAM_CC_IFE_0_CLK_SRC] = &cam_cc_ife_0_clk_src.clkr,
+ [CAM_CC_IFE_0_CPHY_RX_CLK] = &cam_cc_ife_0_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_0_CSID_CLK] = &cam_cc_ife_0_csid_clk.clkr,
+ [CAM_CC_IFE_0_CSID_CLK_SRC] = &cam_cc_ife_0_csid_clk_src.clkr,
+ [CAM_CC_IFE_0_DSP_CLK] = &cam_cc_ife_0_dsp_clk.clkr,
+ [CAM_CC_IFE_1_AXI_CLK] = &cam_cc_ife_1_axi_clk.clkr,
+ [CAM_CC_IFE_1_CLK] = &cam_cc_ife_1_clk.clkr,
+ [CAM_CC_IFE_1_CLK_SRC] = &cam_cc_ife_1_clk_src.clkr,
+ [CAM_CC_IFE_1_CPHY_RX_CLK] = &cam_cc_ife_1_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_1_CSID_CLK] = &cam_cc_ife_1_csid_clk.clkr,
+ [CAM_CC_IFE_1_CSID_CLK_SRC] = &cam_cc_ife_1_csid_clk_src.clkr,
+ [CAM_CC_IFE_1_DSP_CLK] = &cam_cc_ife_1_dsp_clk.clkr,
+ [CAM_CC_IFE_2_AXI_CLK] = &cam_cc_ife_2_axi_clk.clkr,
+ [CAM_CC_IFE_2_CLK] = &cam_cc_ife_2_clk.clkr,
+ [CAM_CC_IFE_2_CLK_SRC] = &cam_cc_ife_2_clk_src.clkr,
+ [CAM_CC_IFE_2_CPHY_RX_CLK] = &cam_cc_ife_2_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_2_CSID_CLK] = &cam_cc_ife_2_csid_clk.clkr,
+ [CAM_CC_IFE_2_CSID_CLK_SRC] = &cam_cc_ife_2_csid_clk_src.clkr,
+ [CAM_CC_IFE_2_DSP_CLK] = &cam_cc_ife_2_dsp_clk.clkr,
+ [CAM_CC_IFE_3_AXI_CLK] = &cam_cc_ife_3_axi_clk.clkr,
+ [CAM_CC_IFE_3_CLK] = &cam_cc_ife_3_clk.clkr,
+ [CAM_CC_IFE_3_CLK_SRC] = &cam_cc_ife_3_clk_src.clkr,
+ [CAM_CC_IFE_3_CPHY_RX_CLK] = &cam_cc_ife_3_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_3_CSID_CLK] = &cam_cc_ife_3_csid_clk.clkr,
+ [CAM_CC_IFE_3_CSID_CLK_SRC] = &cam_cc_ife_3_csid_clk_src.clkr,
+ [CAM_CC_IFE_3_DSP_CLK] = &cam_cc_ife_3_dsp_clk.clkr,
+ [CAM_CC_IFE_LITE_0_CLK] = &cam_cc_ife_lite_0_clk.clkr,
+ [CAM_CC_IFE_LITE_0_CLK_SRC] = &cam_cc_ife_lite_0_clk_src.clkr,
+ [CAM_CC_IFE_LITE_0_CPHY_RX_CLK] = &cam_cc_ife_lite_0_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_LITE_0_CSID_CLK] = &cam_cc_ife_lite_0_csid_clk.clkr,
+ [CAM_CC_IFE_LITE_0_CSID_CLK_SRC] = &cam_cc_ife_lite_0_csid_clk_src.clkr,
+ [CAM_CC_IFE_LITE_1_CLK] = &cam_cc_ife_lite_1_clk.clkr,
+ [CAM_CC_IFE_LITE_1_CLK_SRC] = &cam_cc_ife_lite_1_clk_src.clkr,
+ [CAM_CC_IFE_LITE_1_CPHY_RX_CLK] = &cam_cc_ife_lite_1_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_LITE_1_CSID_CLK] = &cam_cc_ife_lite_1_csid_clk.clkr,
+ [CAM_CC_IFE_LITE_1_CSID_CLK_SRC] = &cam_cc_ife_lite_1_csid_clk_src.clkr,
+ [CAM_CC_IFE_LITE_2_CLK] = &cam_cc_ife_lite_2_clk.clkr,
+ [CAM_CC_IFE_LITE_2_CLK_SRC] = &cam_cc_ife_lite_2_clk_src.clkr,
+ [CAM_CC_IFE_LITE_2_CPHY_RX_CLK] = &cam_cc_ife_lite_2_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_LITE_2_CSID_CLK] = &cam_cc_ife_lite_2_csid_clk.clkr,
+ [CAM_CC_IFE_LITE_2_CSID_CLK_SRC] = &cam_cc_ife_lite_2_csid_clk_src.clkr,
+ [CAM_CC_IFE_LITE_3_CLK] = &cam_cc_ife_lite_3_clk.clkr,
+ [CAM_CC_IFE_LITE_3_CLK_SRC] = &cam_cc_ife_lite_3_clk_src.clkr,
+ [CAM_CC_IFE_LITE_3_CPHY_RX_CLK] = &cam_cc_ife_lite_3_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_LITE_3_CSID_CLK] = &cam_cc_ife_lite_3_csid_clk.clkr,
+ [CAM_CC_IFE_LITE_3_CSID_CLK_SRC] = &cam_cc_ife_lite_3_csid_clk_src.clkr,
+ [CAM_CC_IPE_0_AHB_CLK] = &cam_cc_ipe_0_ahb_clk.clkr,
+ [CAM_CC_IPE_0_AREG_CLK] = &cam_cc_ipe_0_areg_clk.clkr,
+ [CAM_CC_IPE_0_AXI_CLK] = &cam_cc_ipe_0_axi_clk.clkr,
+ [CAM_CC_IPE_0_CLK] = &cam_cc_ipe_0_clk.clkr,
+ [CAM_CC_IPE_0_CLK_SRC] = &cam_cc_ipe_0_clk_src.clkr,
+ [CAM_CC_IPE_1_AHB_CLK] = &cam_cc_ipe_1_ahb_clk.clkr,
+ [CAM_CC_IPE_1_AREG_CLK] = &cam_cc_ipe_1_areg_clk.clkr,
+ [CAM_CC_IPE_1_AXI_CLK] = &cam_cc_ipe_1_axi_clk.clkr,
+ [CAM_CC_IPE_1_CLK] = &cam_cc_ipe_1_clk.clkr,
+ [CAM_CC_JPEG_CLK] = &cam_cc_jpeg_clk.clkr,
+ [CAM_CC_JPEG_CLK_SRC] = &cam_cc_jpeg_clk_src.clkr,
+ [CAM_CC_LRME_CLK] = &cam_cc_lrme_clk.clkr,
+ [CAM_CC_LRME_CLK_SRC] = &cam_cc_lrme_clk_src.clkr,
+ [CAM_CC_MCLK0_CLK] = &cam_cc_mclk0_clk.clkr,
+ [CAM_CC_MCLK0_CLK_SRC] = &cam_cc_mclk0_clk_src.clkr,
+ [CAM_CC_MCLK1_CLK] = &cam_cc_mclk1_clk.clkr,
+ [CAM_CC_MCLK1_CLK_SRC] = &cam_cc_mclk1_clk_src.clkr,
+ [CAM_CC_MCLK2_CLK] = &cam_cc_mclk2_clk.clkr,
+ [CAM_CC_MCLK2_CLK_SRC] = &cam_cc_mclk2_clk_src.clkr,
+ [CAM_CC_MCLK3_CLK] = &cam_cc_mclk3_clk.clkr,
+ [CAM_CC_MCLK3_CLK_SRC] = &cam_cc_mclk3_clk_src.clkr,
+ [CAM_CC_MCLK4_CLK] = &cam_cc_mclk4_clk.clkr,
+ [CAM_CC_MCLK4_CLK_SRC] = &cam_cc_mclk4_clk_src.clkr,
+ [CAM_CC_MCLK5_CLK] = &cam_cc_mclk5_clk.clkr,
+ [CAM_CC_MCLK5_CLK_SRC] = &cam_cc_mclk5_clk_src.clkr,
+ [CAM_CC_MCLK6_CLK] = &cam_cc_mclk6_clk.clkr,
+ [CAM_CC_MCLK6_CLK_SRC] = &cam_cc_mclk6_clk_src.clkr,
+ [CAM_CC_MCLK7_CLK] = &cam_cc_mclk7_clk.clkr,
+ [CAM_CC_MCLK7_CLK_SRC] = &cam_cc_mclk7_clk_src.clkr,
+ [CAM_CC_PLL0] = &cam_cc_pll0.clkr,
+ [CAM_CC_PLL0_OUT_EVEN] = &cam_cc_pll0_out_even.clkr,
+ [CAM_CC_PLL0_OUT_ODD] = &cam_cc_pll0_out_odd.clkr,
+ [CAM_CC_PLL1] = &cam_cc_pll1.clkr,
+ [CAM_CC_PLL2] = &cam_cc_pll2.clkr,
+ [CAM_CC_PLL2_OUT_MAIN] = &cam_cc_pll2_out_main.clkr,
+ [CAM_CC_PLL3] = &cam_cc_pll3.clkr,
+ [CAM_CC_PLL4] = &cam_cc_pll4.clkr,
+ [CAM_CC_PLL5] = &cam_cc_pll5.clkr,
+ [CAM_CC_PLL6] = &cam_cc_pll6.clkr,
+ [CAM_CC_SLOW_AHB_CLK_SRC] = &cam_cc_slow_ahb_clk_src.clkr,
+ [CAM_CC_XO_CLK_SRC] = &cam_cc_xo_clk_src.clkr,
+};
+
+static struct gdsc *cam_cc_sc8180x_gdscs[] = {
+ [BPS_GDSC] = &bps_gdsc,
+ [IFE_0_GDSC] = &ife_0_gdsc,
+ [IFE_1_GDSC] = &ife_1_gdsc,
+ [IFE_2_GDSC] = &ife_2_gdsc,
+ [IFE_3_GDSC] = &ife_3_gdsc,
+ [IPE_0_GDSC] = &ipe_0_gdsc,
+ [IPE_1_GDSC] = &ipe_1_gdsc,
+ [TITAN_TOP_GDSC] = &titan_top_gdsc,
+};
+
+static const struct qcom_reset_map cam_cc_sc8180x_resets[] = {
+ [CAM_CC_BPS_BCR] = { 0x7000 },
+ [CAM_CC_CAMNOC_BCR] = { 0xc16c },
+ [CAM_CC_CCI_BCR] = { 0xc104 },
+ [CAM_CC_CPAS_BCR] = { 0xc164 },
+ [CAM_CC_CSI0PHY_BCR] = { 0x6000 },
+ [CAM_CC_CSI1PHY_BCR] = { 0x6024 },
+ [CAM_CC_CSI2PHY_BCR] = { 0x6048 },
+ [CAM_CC_CSI3PHY_BCR] = { 0x606c },
+ [CAM_CC_FD_BCR] = { 0xc0dc },
+ [CAM_CC_ICP_BCR] = { 0xc0b4 },
+ [CAM_CC_IFE_0_BCR] = { 0xa000 },
+ [CAM_CC_IFE_1_BCR] = { 0xb000 },
+ [CAM_CC_IFE_2_BCR] = { 0xf000 },
+ [CAM_CC_IFE_3_BCR] = { 0xf06c },
+ [CAM_CC_IFE_LITE_0_BCR] = { 0xc000 },
+ [CAM_CC_IFE_LITE_1_BCR] = { 0xc044 },
+ [CAM_CC_IFE_LITE_2_BCR] = { 0xc23c },
+ [CAM_CC_IFE_LITE_3_BCR] = { 0xc280 },
+ [CAM_CC_IPE_0_BCR] = { 0x8000 },
+ [CAM_CC_IPE_1_BCR] = { 0x9000 },
+ [CAM_CC_JPEG_BCR] = { 0xc088 },
+ [CAM_CC_LRME_BCR] = { 0xc140 },
+ [CAM_CC_MCLK0_BCR] = { 0x5000 },
+ [CAM_CC_MCLK1_BCR] = { 0x5020 },
+ [CAM_CC_MCLK2_BCR] = { 0x5040 },
+ [CAM_CC_MCLK3_BCR] = { 0x5060 },
+ [CAM_CC_MCLK4_BCR] = { 0x5080 },
+ [CAM_CC_MCLK5_BCR] = { 0x50a0 },
+ [CAM_CC_MCLK6_BCR] = { 0x50c0 },
+ [CAM_CC_MCLK7_BCR] = { 0x50e0 },
+};
+
+static const struct regmap_config cam_cc_sc8180x_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xf0d4,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc cam_cc_sc8180x_desc = {
+ .config = &cam_cc_sc8180x_regmap_config,
+ .clks = cam_cc_sc8180x_clocks,
+ .num_clks = ARRAY_SIZE(cam_cc_sc8180x_clocks),
+ .resets = cam_cc_sc8180x_resets,
+ .num_resets = ARRAY_SIZE(cam_cc_sc8180x_resets),
+ .gdscs = cam_cc_sc8180x_gdscs,
+ .num_gdscs = ARRAY_SIZE(cam_cc_sc8180x_gdscs),
+};
+
+static const struct of_device_id cam_cc_sc8180x_match_table[] = {
+ { .compatible = "qcom,sc8180x-camcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, cam_cc_sc8180x_match_table);
+
+static int cam_cc_sc8180x_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret)
+ return ret;
+
+ regmap = qcom_cc_map(pdev, &cam_cc_sc8180x_desc);
+ if (IS_ERR(regmap)) {
+ pm_runtime_put(&pdev->dev);
+ return PTR_ERR(regmap);
+ }
+
+ clk_trion_pll_configure(&cam_cc_pll0, regmap, &cam_cc_pll0_config);
+ clk_trion_pll_configure(&cam_cc_pll1, regmap, &cam_cc_pll1_config);
+ clk_regera_pll_configure(&cam_cc_pll2, regmap, &cam_cc_pll2_config);
+ clk_trion_pll_configure(&cam_cc_pll3, regmap, &cam_cc_pll3_config);
+ clk_trion_pll_configure(&cam_cc_pll4, regmap, &cam_cc_pll4_config);
+ clk_trion_pll_configure(&cam_cc_pll5, regmap, &cam_cc_pll5_config);
+ clk_trion_pll_configure(&cam_cc_pll6, regmap, &cam_cc_pll6_config);
+
+ /* Keep some clocks always enabled */
+ qcom_branch_set_clk_en(regmap, 0xc1e4); /* CAM_CC_GDSC_CLK */
+ qcom_branch_set_clk_en(regmap, 0xc200); /* CAM_CC_SLEEP_CLK */
+
+ ret = qcom_cc_really_probe(&pdev->dev, &cam_cc_sc8180x_desc, regmap);
+
+ pm_runtime_put(&pdev->dev);
+
+ return ret;
+}
+
+static struct platform_driver cam_cc_sc8180x_driver = {
+ .probe = cam_cc_sc8180x_probe,
+ .driver = {
+ .name = "camcc-sc8180x",
+ .of_match_table = cam_cc_sc8180x_match_table,
+ },
+};
+
+module_platform_driver(cam_cc_sc8180x_driver);
+
+MODULE_DESCRIPTION("QTI CAMCC SC8180X Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/camcc-sc8280xp.c b/drivers/clk/qcom/camcc-sc8280xp.c
index 479964f91608..18f5a3eb313e 100644
--- a/drivers/clk/qcom/camcc-sc8280xp.c
+++ b/drivers/clk/qcom/camcc-sc8280xp.c
@@ -2987,7 +2987,7 @@ static const struct regmap_config camcc_sc8280xp_regmap_config = {
.fast_io = true,
};
-static struct qcom_cc_desc camcc_sc8280xp_desc = {
+static const struct qcom_cc_desc camcc_sc8280xp_desc = {
.config = &camcc_sc8280xp_regmap_config,
.clks = camcc_sc8280xp_clocks,
.num_clks = ARRAY_SIZE(camcc_sc8280xp_clocks),
diff --git a/drivers/clk/qcom/camcc-sdm845.c b/drivers/clk/qcom/camcc-sdm845.c
index 40022a10f8c0..cf60e8dd292a 100644
--- a/drivers/clk/qcom/camcc-sdm845.c
+++ b/drivers/clk/qcom/camcc-sdm845.c
@@ -4,6 +4,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/clk/qcom/camcc-sm4450.c b/drivers/clk/qcom/camcc-sm4450.c
index f8503ced3d05..6170d5ad9cbf 100644
--- a/drivers/clk/qcom/camcc-sm4450.c
+++ b/drivers/clk/qcom/camcc-sm4450.c
@@ -6,7 +6,6 @@
#include <linux/clk-provider.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
-#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -1641,7 +1640,7 @@ static const struct regmap_config cam_cc_sm4450_regmap_config = {
.fast_io = true,
};
-static struct qcom_cc_desc cam_cc_sm4450_desc = {
+static const struct qcom_cc_desc cam_cc_sm4450_desc = {
.config = &cam_cc_sm4450_regmap_config,
.clks = cam_cc_sm4450_clocks,
.num_clks = ARRAY_SIZE(cam_cc_sm4450_clocks),
diff --git a/drivers/clk/qcom/camcc-sm6350.c b/drivers/clk/qcom/camcc-sm6350.c
index f6634cc8663e..8aac97d29ce3 100644
--- a/drivers/clk/qcom/camcc-sm6350.c
+++ b/drivers/clk/qcom/camcc-sm6350.c
@@ -5,6 +5,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -1694,6 +1695,9 @@ static struct clk_branch camcc_sys_tmr_clk = {
static struct gdsc bps_gdsc = {
.gdscr = 0x6004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
.pd = {
.name = "bps_gdsc",
},
@@ -1703,6 +1707,9 @@ static struct gdsc bps_gdsc = {
static struct gdsc ipe_0_gdsc = {
.gdscr = 0x7004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
.pd = {
.name = "ipe_0_gdsc",
},
@@ -1712,6 +1719,9 @@ static struct gdsc ipe_0_gdsc = {
static struct gdsc ife_0_gdsc = {
.gdscr = 0x9004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
.pd = {
.name = "ife_0_gdsc",
},
@@ -1720,6 +1730,9 @@ static struct gdsc ife_0_gdsc = {
static struct gdsc ife_1_gdsc = {
.gdscr = 0xa004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
.pd = {
.name = "ife_1_gdsc",
},
@@ -1728,6 +1741,9 @@ static struct gdsc ife_1_gdsc = {
static struct gdsc ife_2_gdsc = {
.gdscr = 0xb004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
.pd = {
.name = "ife_2_gdsc",
},
@@ -1736,6 +1752,9 @@ static struct gdsc ife_2_gdsc = {
static struct gdsc titan_top_gdsc = {
.gdscr = 0x14004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
.pd = {
.name = "titan_top_gdsc",
},
diff --git a/drivers/clk/qcom/camcc-sm7150.c b/drivers/clk/qcom/camcc-sm7150.c
index 39033a6bb616..4a3baf5d8e85 100644
--- a/drivers/clk/qcom/camcc-sm7150.c
+++ b/drivers/clk/qcom/camcc-sm7150.c
@@ -7,7 +7,6 @@
#include <linux/clk-provider.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/clk/qcom/camcc-sm8150.c b/drivers/clk/qcom/camcc-sm8150.c
index bb3009818ad7..62aadb27c50e 100644
--- a/drivers/clk/qcom/camcc-sm8150.c
+++ b/drivers/clk/qcom/camcc-sm8150.c
@@ -6,9 +6,9 @@
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/pm_runtime.h>
@@ -2094,7 +2094,7 @@ static const struct regmap_config cam_cc_sm8150_regmap_config = {
.fast_io = true,
};
-static struct qcom_cc_desc cam_cc_sm8150_desc = {
+static const struct qcom_cc_desc cam_cc_sm8150_desc = {
.config = &cam_cc_sm8150_regmap_config,
.clks = cam_cc_sm8150_clocks,
.num_clks = ARRAY_SIZE(cam_cc_sm8150_clocks),
diff --git a/drivers/clk/qcom/camcc-sm8250.c b/drivers/clk/qcom/camcc-sm8250.c
index 34d2f17520dc..6da89c49ba3d 100644
--- a/drivers/clk/qcom/camcc-sm8250.c
+++ b/drivers/clk/qcom/camcc-sm8250.c
@@ -4,10 +4,10 @@
*/
#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
-#include <linux/reset-controller.h>
#include <dt-bindings/clock/qcom,camcc-sm8250.h>
@@ -411,7 +411,7 @@ static struct clk_rcg2 cam_cc_bps_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -433,7 +433,7 @@ static struct clk_rcg2 cam_cc_camnoc_axi_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -454,7 +454,7 @@ static struct clk_rcg2 cam_cc_cci_0_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -469,7 +469,7 @@ static struct clk_rcg2 cam_cc_cci_1_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -490,7 +490,7 @@ static struct clk_rcg2 cam_cc_cphy_rx_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -511,7 +511,7 @@ static struct clk_rcg2 cam_cc_csi0phytimer_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -526,7 +526,7 @@ static struct clk_rcg2 cam_cc_csi1phytimer_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -556,7 +556,7 @@ static struct clk_rcg2 cam_cc_csi3phytimer_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -571,7 +571,7 @@ static struct clk_rcg2 cam_cc_csi4phytimer_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -586,7 +586,7 @@ static struct clk_rcg2 cam_cc_csi5phytimer_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -611,7 +611,7 @@ static struct clk_rcg2 cam_cc_fast_ahb_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -634,7 +634,7 @@ static struct clk_rcg2 cam_cc_fd_core_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -649,7 +649,7 @@ static struct clk_rcg2 cam_cc_icp_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -673,7 +673,7 @@ static struct clk_rcg2 cam_cc_ife_0_clk_src = {
.parent_data = cam_cc_parent_data_2,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_2),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -710,7 +710,7 @@ static struct clk_rcg2 cam_cc_ife_0_csid_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -734,7 +734,7 @@ static struct clk_rcg2 cam_cc_ife_1_clk_src = {
.parent_data = cam_cc_parent_data_3,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_3),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -749,7 +749,7 @@ static struct clk_rcg2 cam_cc_ife_1_csid_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -771,7 +771,7 @@ static struct clk_rcg2 cam_cc_ife_lite_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -786,7 +786,7 @@ static struct clk_rcg2 cam_cc_ife_lite_csid_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -810,7 +810,7 @@ static struct clk_rcg2 cam_cc_ipe_0_clk_src = {
.parent_data = cam_cc_parent_data_4,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_4),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -825,7 +825,7 @@ static struct clk_rcg2 cam_cc_jpeg_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -847,7 +847,7 @@ static struct clk_rcg2 cam_cc_mclk0_clk_src = {
.parent_data = cam_cc_parent_data_1,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -862,7 +862,7 @@ static struct clk_rcg2 cam_cc_mclk1_clk_src = {
.parent_data = cam_cc_parent_data_1,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -877,7 +877,7 @@ static struct clk_rcg2 cam_cc_mclk2_clk_src = {
.parent_data = cam_cc_parent_data_1,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -892,7 +892,7 @@ static struct clk_rcg2 cam_cc_mclk3_clk_src = {
.parent_data = cam_cc_parent_data_1,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -907,7 +907,7 @@ static struct clk_rcg2 cam_cc_mclk4_clk_src = {
.parent_data = cam_cc_parent_data_1,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -922,7 +922,7 @@ static struct clk_rcg2 cam_cc_mclk5_clk_src = {
.parent_data = cam_cc_parent_data_1,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -993,7 +993,7 @@ static struct clk_rcg2 cam_cc_slow_ahb_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
diff --git a/drivers/clk/qcom/camcc-sm8450.c b/drivers/clk/qcom/camcc-sm8450.c
index 26b78eed15ef..4dd8be8cc988 100644
--- a/drivers/clk/qcom/camcc-sm8450.c
+++ b/drivers/clk/qcom/camcc-sm8450.c
@@ -54,6 +54,10 @@ static const struct pll_vco rivian_evo_vco[] = {
{ 864000000, 1056000000, 0 },
};
+static const struct pll_vco rivian_ole_vco[] = {
+ { 864000000, 1075000000, 0 },
+};
+
static const struct clk_parent_data pll_parent_data_tcxo = { .index = DT_BI_TCXO };
static const struct alpha_pll_config cam_cc_pll0_config = {
@@ -66,8 +70,23 @@ static const struct alpha_pll_config cam_cc_pll0_config = {
.user_ctl_hi_val = 0x00000805,
};
+static const struct alpha_pll_config sm8475_cam_cc_pll0_config = {
+ .l = 0x3e,
+ .alpha = 0x8000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00008400,
+ .user_ctl_hi_val = 0x00000005,
+};
+
static struct clk_alpha_pll cam_cc_pll0 = {
.offset = 0x0,
+ .config = &cam_cc_pll0_config,
.vco_table = lucid_evo_vco,
.num_vco = ARRAY_SIZE(lucid_evo_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
@@ -86,6 +105,16 @@ static const struct clk_div_table post_div_table_cam_cc_pll0_out_even[] = {
{ }
};
+static struct clk_init_data sm8475_cam_cc_pll0_out_even_init = {
+ .name = "cam_cc_pll0_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+};
+
static struct clk_alpha_pll_postdiv cam_cc_pll0_out_even = {
.offset = 0x0,
.post_div_shift = 10,
@@ -109,6 +138,16 @@ static const struct clk_div_table post_div_table_cam_cc_pll0_out_odd[] = {
{ }
};
+static struct clk_init_data sm8475_cam_cc_pll0_out_odd_init = {
+ .name = "cam_cc_pll0_out_odd",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+};
+
static struct clk_alpha_pll_postdiv cam_cc_pll0_out_odd = {
.offset = 0x0,
.post_div_shift = 14,
@@ -137,8 +176,23 @@ static const struct alpha_pll_config cam_cc_pll1_config = {
.user_ctl_hi_val = 0x00000805,
};
+static const struct alpha_pll_config sm8475_cam_cc_pll1_config = {
+ .l = 0x25,
+ .alpha = 0xeaaa,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000005,
+};
+
static struct clk_alpha_pll cam_cc_pll1 = {
.offset = 0x1000,
+ .config = &cam_cc_pll1_config,
.vco_table = lucid_evo_vco,
.num_vco = ARRAY_SIZE(lucid_evo_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
@@ -157,6 +211,16 @@ static const struct clk_div_table post_div_table_cam_cc_pll1_out_even[] = {
{ }
};
+static struct clk_init_data sm8475_cam_cc_pll1_out_even_init = {
+ .name = "cam_cc_pll1_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll1.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+};
+
static struct clk_alpha_pll_postdiv cam_cc_pll1_out_even = {
.offset = 0x1000,
.post_div_shift = 10,
@@ -183,8 +247,19 @@ static const struct alpha_pll_config cam_cc_pll2_config = {
.config_ctl_hi1_val = 0x00000217,
};
+static const struct alpha_pll_config sm8475_cam_cc_pll2_config = {
+ .l = 0x32,
+ .alpha = 0x0,
+ .config_ctl_val = 0x10000030,
+ .config_ctl_hi_val = 0x80890263,
+ .config_ctl_hi1_val = 0x00000217,
+ .user_ctl_val = 0x00000001,
+ .user_ctl_hi_val = 0x00000000,
+};
+
static struct clk_alpha_pll cam_cc_pll2 = {
.offset = 0x2000,
+ .config = &cam_cc_pll2_config,
.vco_table = rivian_evo_vco,
.num_vco = ARRAY_SIZE(rivian_evo_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_RIVIAN_EVO],
@@ -208,8 +283,23 @@ static const struct alpha_pll_config cam_cc_pll3_config = {
.user_ctl_hi_val = 0x00000805,
};
+static const struct alpha_pll_config sm8475_cam_cc_pll3_config = {
+ .l = 0x2d,
+ .alpha = 0x0,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000005,
+};
+
static struct clk_alpha_pll cam_cc_pll3 = {
.offset = 0x3000,
+ .config = &cam_cc_pll3_config,
.vco_table = lucid_evo_vco,
.num_vco = ARRAY_SIZE(lucid_evo_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
@@ -228,6 +318,16 @@ static const struct clk_div_table post_div_table_cam_cc_pll3_out_even[] = {
{ }
};
+static struct clk_init_data sm8475_cam_cc_pll3_out_even_init = {
+ .name = "cam_cc_pll3_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll3.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+};
+
static struct clk_alpha_pll_postdiv cam_cc_pll3_out_even = {
.offset = 0x3000,
.post_div_shift = 10,
@@ -256,8 +356,23 @@ static const struct alpha_pll_config cam_cc_pll4_config = {
.user_ctl_hi_val = 0x00000805,
};
+static const struct alpha_pll_config sm8475_cam_cc_pll4_config = {
+ .l = 0x2d,
+ .alpha = 0x0,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000005,
+};
+
static struct clk_alpha_pll cam_cc_pll4 = {
.offset = 0x4000,
+ .config = &cam_cc_pll4_config,
.vco_table = lucid_evo_vco,
.num_vco = ARRAY_SIZE(lucid_evo_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
@@ -276,6 +391,16 @@ static const struct clk_div_table post_div_table_cam_cc_pll4_out_even[] = {
{ }
};
+static struct clk_init_data sm8475_cam_cc_pll4_out_even_init = {
+ .name = "cam_cc_pll4_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll4.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+};
+
static struct clk_alpha_pll_postdiv cam_cc_pll4_out_even = {
.offset = 0x4000,
.post_div_shift = 10,
@@ -304,8 +429,23 @@ static const struct alpha_pll_config cam_cc_pll5_config = {
.user_ctl_hi_val = 0x00000805,
};
+static const struct alpha_pll_config sm8475_cam_cc_pll5_config = {
+ .l = 0x2d,
+ .alpha = 0x0,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000005,
+};
+
static struct clk_alpha_pll cam_cc_pll5 = {
.offset = 0x5000,
+ .config = &cam_cc_pll5_config,
.vco_table = lucid_evo_vco,
.num_vco = ARRAY_SIZE(lucid_evo_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
@@ -324,6 +464,16 @@ static const struct clk_div_table post_div_table_cam_cc_pll5_out_even[] = {
{ }
};
+static struct clk_init_data sm8475_cam_cc_pll5_out_even_init = {
+ .name = "cam_cc_pll5_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll5.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+};
+
static struct clk_alpha_pll_postdiv cam_cc_pll5_out_even = {
.offset = 0x5000,
.post_div_shift = 10,
@@ -352,8 +502,23 @@ static const struct alpha_pll_config cam_cc_pll6_config = {
.user_ctl_hi_val = 0x00000805,
};
+static const struct alpha_pll_config sm8475_cam_cc_pll6_config = {
+ .l = 0x2d,
+ .alpha = 0x0,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000005,
+};
+
static struct clk_alpha_pll cam_cc_pll6 = {
.offset = 0x6000,
+ .config = &cam_cc_pll6_config,
.vco_table = lucid_evo_vco,
.num_vco = ARRAY_SIZE(lucid_evo_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
@@ -372,6 +537,16 @@ static const struct clk_div_table post_div_table_cam_cc_pll6_out_even[] = {
{ }
};
+static struct clk_init_data sm8475_cam_cc_pll6_out_even_init = {
+ .name = "cam_cc_pll6_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll6.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+};
+
static struct clk_alpha_pll_postdiv cam_cc_pll6_out_even = {
.offset = 0x6000,
.post_div_shift = 10,
@@ -400,8 +575,23 @@ static const struct alpha_pll_config cam_cc_pll7_config = {
.user_ctl_hi_val = 0x00000805,
};
+static const struct alpha_pll_config sm8475_cam_cc_pll7_config = {
+ .l = 0x2d,
+ .alpha = 0x0,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000005,
+};
+
static struct clk_alpha_pll cam_cc_pll7 = {
.offset = 0x7000,
+ .config = &cam_cc_pll7_config,
.vco_table = lucid_evo_vco,
.num_vco = ARRAY_SIZE(lucid_evo_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
@@ -420,6 +610,16 @@ static const struct clk_div_table post_div_table_cam_cc_pll7_out_even[] = {
{ }
};
+static struct clk_init_data sm8475_cam_cc_pll7_out_even_init = {
+ .name = "cam_cc_pll7_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll7.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+};
+
static struct clk_alpha_pll_postdiv cam_cc_pll7_out_even = {
.offset = 0x7000,
.post_div_shift = 10,
@@ -448,8 +648,23 @@ static const struct alpha_pll_config cam_cc_pll8_config = {
.user_ctl_hi_val = 0x00000805,
};
+static const struct alpha_pll_config sm8475_cam_cc_pll8_config = {
+ .l = 0x32,
+ .alpha = 0x0,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000005,
+};
+
static struct clk_alpha_pll cam_cc_pll8 = {
.offset = 0x8000,
+ .config = &cam_cc_pll8_config,
.vco_table = lucid_evo_vco,
.num_vco = ARRAY_SIZE(lucid_evo_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
@@ -468,6 +683,16 @@ static const struct clk_div_table post_div_table_cam_cc_pll8_out_even[] = {
{ }
};
+static struct clk_init_data sm8475_cam_cc_pll8_out_even_init = {
+ .name = "cam_cc_pll8_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll8.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+};
+
static struct clk_alpha_pll_postdiv cam_cc_pll8_out_even = {
.offset = 0x8000,
.post_div_shift = 10,
@@ -1260,24 +1485,6 @@ static struct clk_rcg2 cam_cc_xo_clk_src = {
},
};
-static struct clk_branch cam_cc_gdsc_clk = {
- .halt_reg = 0x1320c,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x1320c,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "cam_cc_gdsc_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &cam_cc_xo_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch cam_cc_bps_ahb_clk = {
.halt_reg = 0x1004c,
.halt_check = BRANCH_HALT,
@@ -2603,7 +2810,6 @@ static struct clk_regmap *cam_cc_sm8450_clocks[] = {
[CAM_CC_CSIPHY4_CLK] = &cam_cc_csiphy4_clk.clkr,
[CAM_CC_CSIPHY5_CLK] = &cam_cc_csiphy5_clk.clkr,
[CAM_CC_FAST_AHB_CLK_SRC] = &cam_cc_fast_ahb_clk_src.clkr,
- [CAM_CC_GDSC_CLK] = &cam_cc_gdsc_clk.clkr,
[CAM_CC_ICP_AHB_CLK] = &cam_cc_icp_ahb_clk.clkr,
[CAM_CC_ICP_CLK] = &cam_cc_icp_clk.clkr,
[CAM_CC_ICP_CLK_SRC] = &cam_cc_icp_clk_src.clkr,
@@ -2697,6 +2903,22 @@ static const struct qcom_reset_map cam_cc_sm8450_resets[] = {
[CAM_CC_SFE_1_BCR] = { 0x13094 },
};
+static struct clk_alpha_pll *cam_cc_sm8450_plls[] = {
+ &cam_cc_pll0,
+ &cam_cc_pll1,
+ &cam_cc_pll2,
+ &cam_cc_pll3,
+ &cam_cc_pll4,
+ &cam_cc_pll5,
+ &cam_cc_pll6,
+ &cam_cc_pll7,
+ &cam_cc_pll8,
+};
+
+static u32 cam_cc_sm8450_critical_cbcrs[] = {
+ 0x1320c, /* CAM_CC_GDSC_CLK */
+};
+
static const struct regmap_config cam_cc_sm8450_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
@@ -2805,6 +3027,13 @@ static struct gdsc *cam_cc_sm8450_gdscs[] = {
[TITAN_TOP_GDSC] = &titan_top_gdsc,
};
+static struct qcom_cc_driver_data cam_cc_sm8450_driver_data = {
+ .alpha_plls = cam_cc_sm8450_plls,
+ .num_alpha_plls = ARRAY_SIZE(cam_cc_sm8450_plls),
+ .clk_cbcrs = cam_cc_sm8450_critical_cbcrs,
+ .num_clk_cbcrs = ARRAY_SIZE(cam_cc_sm8450_critical_cbcrs),
+};
+
static const struct qcom_cc_desc cam_cc_sm8450_desc = {
.config = &cam_cc_sm8450_regmap_config,
.clks = cam_cc_sm8450_clocks,
@@ -2813,33 +3042,77 @@ static const struct qcom_cc_desc cam_cc_sm8450_desc = {
.num_resets = ARRAY_SIZE(cam_cc_sm8450_resets),
.gdscs = cam_cc_sm8450_gdscs,
.num_gdscs = ARRAY_SIZE(cam_cc_sm8450_gdscs),
+ .use_rpm = true,
+ .driver_data = &cam_cc_sm8450_driver_data,
};
static const struct of_device_id cam_cc_sm8450_match_table[] = {
{ .compatible = "qcom,sm8450-camcc" },
+ { .compatible = "qcom,sm8475-camcc" },
{ }
};
MODULE_DEVICE_TABLE(of, cam_cc_sm8450_match_table);
static int cam_cc_sm8450_probe(struct platform_device *pdev)
{
- struct regmap *regmap;
-
- regmap = qcom_cc_map(pdev, &cam_cc_sm8450_desc);
- if (IS_ERR(regmap))
- return PTR_ERR(regmap);
-
- clk_lucid_evo_pll_configure(&cam_cc_pll0, regmap, &cam_cc_pll0_config);
- clk_lucid_evo_pll_configure(&cam_cc_pll1, regmap, &cam_cc_pll1_config);
- clk_rivian_evo_pll_configure(&cam_cc_pll2, regmap, &cam_cc_pll2_config);
- clk_lucid_evo_pll_configure(&cam_cc_pll3, regmap, &cam_cc_pll3_config);
- clk_lucid_evo_pll_configure(&cam_cc_pll4, regmap, &cam_cc_pll4_config);
- clk_lucid_evo_pll_configure(&cam_cc_pll5, regmap, &cam_cc_pll5_config);
- clk_lucid_evo_pll_configure(&cam_cc_pll6, regmap, &cam_cc_pll6_config);
- clk_lucid_evo_pll_configure(&cam_cc_pll7, regmap, &cam_cc_pll7_config);
- clk_lucid_evo_pll_configure(&cam_cc_pll8, regmap, &cam_cc_pll8_config);
-
- return qcom_cc_really_probe(&pdev->dev, &cam_cc_sm8450_desc, regmap);
+ if (of_device_is_compatible(pdev->dev.of_node, "qcom,sm8475-camcc")) {
+ /* Update CAMCC PLL0 */
+ cam_cc_pll0.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE];
+ cam_cc_pll0_out_even.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE];
+ cam_cc_pll0_out_even.clkr.hw.init = &sm8475_cam_cc_pll0_out_even_init;
+ cam_cc_pll0_out_odd.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE];
+ cam_cc_pll0_out_odd.clkr.hw.init = &sm8475_cam_cc_pll0_out_odd_init;
+
+ /* Update CAMCC PLL1 */
+ cam_cc_pll1.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE];
+ cam_cc_pll1_out_even.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE];
+ cam_cc_pll1_out_even.clkr.hw.init = &sm8475_cam_cc_pll1_out_even_init;
+
+ /* Update CAMCC PLL2 */
+ cam_cc_pll2.vco_table = rivian_ole_vco;
+
+ /* Update CAMCC PLL3 */
+ cam_cc_pll3.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE];
+ cam_cc_pll3_out_even.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE];
+ cam_cc_pll3_out_even.clkr.hw.init = &sm8475_cam_cc_pll3_out_even_init;
+
+ /* Update CAMCC PLL4 */
+ cam_cc_pll4.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE];
+ cam_cc_pll4_out_even.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE];
+ cam_cc_pll4_out_even.clkr.hw.init = &sm8475_cam_cc_pll4_out_even_init;
+
+ /* Update CAMCC PLL5 */
+ cam_cc_pll5.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE];
+ cam_cc_pll5_out_even.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE];
+ cam_cc_pll5_out_even.clkr.hw.init = &sm8475_cam_cc_pll5_out_even_init;
+
+ /* Update CAMCC PLL6 */
+ cam_cc_pll6.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE];
+ cam_cc_pll6_out_even.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE];
+ cam_cc_pll6_out_even.clkr.hw.init = &sm8475_cam_cc_pll6_out_even_init;
+
+ /* Update CAMCC PLL7 */
+ cam_cc_pll7.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE];
+ cam_cc_pll7_out_even.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE];
+ cam_cc_pll7_out_even.clkr.hw.init = &sm8475_cam_cc_pll7_out_even_init;
+
+ /* Update CAMCC PLL8 */
+ cam_cc_pll8.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE];
+ cam_cc_pll8_out_even.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE];
+ cam_cc_pll8_out_even.clkr.hw.init = &sm8475_cam_cc_pll8_out_even_init;
+
+ cam_cc_pll0.config = &sm8475_cam_cc_pll0_config;
+ cam_cc_pll1.config = &sm8475_cam_cc_pll1_config;
+ cam_cc_pll2.config = &sm8475_cam_cc_pll2_config;
+ cam_cc_pll3.config = &sm8475_cam_cc_pll3_config;
+ cam_cc_pll4.config = &sm8475_cam_cc_pll4_config;
+ cam_cc_pll5.config = &sm8475_cam_cc_pll5_config;
+ cam_cc_pll6.config = &sm8475_cam_cc_pll6_config;
+ cam_cc_pll7.config = &sm8475_cam_cc_pll7_config;
+ cam_cc_pll8.config = &sm8475_cam_cc_pll8_config;
+ }
+
+ return qcom_cc_probe(pdev, &cam_cc_sm8450_desc);
}
static struct platform_driver cam_cc_sm8450_driver = {
@@ -2852,5 +3125,5 @@ static struct platform_driver cam_cc_sm8450_driver = {
module_platform_driver(cam_cc_sm8450_driver);
-MODULE_DESCRIPTION("QCOM CAMCC SM8450 Driver");
+MODULE_DESCRIPTION("QCOM CAMCC SM8450 / SM8475 Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/camcc-sm8550.c b/drivers/clk/qcom/camcc-sm8550.c
index eac850bb690a..63aed9e4c362 100644
--- a/drivers/clk/qcom/camcc-sm8550.c
+++ b/drivers/clk/qcom/camcc-sm8550.c
@@ -7,7 +7,6 @@
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <dt-bindings/clock/qcom,sm8550-camcc.h>
@@ -74,6 +73,7 @@ static const struct alpha_pll_config cam_cc_pll0_config = {
static struct clk_alpha_pll cam_cc_pll0 = {
.offset = 0x0,
+ .config = &cam_cc_pll0_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -151,6 +151,7 @@ static const struct alpha_pll_config cam_cc_pll1_config = {
static struct clk_alpha_pll cam_cc_pll1 = {
.offset = 0x1000,
+ .config = &cam_cc_pll1_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -201,6 +202,7 @@ static const struct alpha_pll_config cam_cc_pll2_config = {
static struct clk_alpha_pll cam_cc_pll2 = {
.offset = 0x2000,
+ .config = &cam_cc_pll2_config,
.vco_table = rivian_ole_vco,
.num_vco = ARRAY_SIZE(rivian_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_RIVIAN_EVO],
@@ -232,6 +234,7 @@ static const struct alpha_pll_config cam_cc_pll3_config = {
static struct clk_alpha_pll cam_cc_pll3 = {
.offset = 0x3000,
+ .config = &cam_cc_pll3_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -286,6 +289,7 @@ static const struct alpha_pll_config cam_cc_pll4_config = {
static struct clk_alpha_pll cam_cc_pll4 = {
.offset = 0x4000,
+ .config = &cam_cc_pll4_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -340,6 +344,7 @@ static const struct alpha_pll_config cam_cc_pll5_config = {
static struct clk_alpha_pll cam_cc_pll5 = {
.offset = 0x5000,
+ .config = &cam_cc_pll5_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -394,6 +399,7 @@ static const struct alpha_pll_config cam_cc_pll6_config = {
static struct clk_alpha_pll cam_cc_pll6 = {
.offset = 0x6000,
+ .config = &cam_cc_pll6_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -448,6 +454,7 @@ static const struct alpha_pll_config cam_cc_pll7_config = {
static struct clk_alpha_pll cam_cc_pll7 = {
.offset = 0x7000,
+ .config = &cam_cc_pll7_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -502,6 +509,7 @@ static const struct alpha_pll_config cam_cc_pll8_config = {
static struct clk_alpha_pll cam_cc_pll8 = {
.offset = 0x8000,
+ .config = &cam_cc_pll8_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -556,6 +564,7 @@ static const struct alpha_pll_config cam_cc_pll9_config = {
static struct clk_alpha_pll cam_cc_pll9 = {
.offset = 0x9000,
+ .config = &cam_cc_pll9_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -610,6 +619,7 @@ static const struct alpha_pll_config cam_cc_pll10_config = {
static struct clk_alpha_pll cam_cc_pll10 = {
.offset = 0xa000,
+ .config = &cam_cc_pll10_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -664,6 +674,7 @@ static const struct alpha_pll_config cam_cc_pll11_config = {
static struct clk_alpha_pll cam_cc_pll11 = {
.offset = 0xb000,
+ .config = &cam_cc_pll11_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -718,6 +729,7 @@ static const struct alpha_pll_config cam_cc_pll12_config = {
static struct clk_alpha_pll cam_cc_pll12 = {
.offset = 0xc000,
+ .config = &cam_cc_pll12_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -3479,6 +3491,27 @@ static const struct qcom_reset_map cam_cc_sm8550_resets[] = {
[CAM_CC_SFE_1_BCR] = { 0x133dc },
};
+static struct clk_alpha_pll *cam_cc_sm8550_plls[] = {
+ &cam_cc_pll0,
+ &cam_cc_pll1,
+ &cam_cc_pll2,
+ &cam_cc_pll3,
+ &cam_cc_pll4,
+ &cam_cc_pll5,
+ &cam_cc_pll6,
+ &cam_cc_pll7,
+ &cam_cc_pll8,
+ &cam_cc_pll9,
+ &cam_cc_pll10,
+ &cam_cc_pll11,
+ &cam_cc_pll12,
+};
+
+static u32 cam_cc_sm8550_critical_cbcrs[] = {
+ 0x1419c, /* CAM_CC_GDSC_CLK */
+ 0x142cc, /* CAM_CC_SLEEP_CLK */
+};
+
static const struct regmap_config cam_cc_sm8550_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
@@ -3487,7 +3520,14 @@ static const struct regmap_config cam_cc_sm8550_regmap_config = {
.fast_io = true,
};
-static struct qcom_cc_desc cam_cc_sm8550_desc = {
+static struct qcom_cc_driver_data cam_cc_sm8550_driver_data = {
+ .alpha_plls = cam_cc_sm8550_plls,
+ .num_alpha_plls = ARRAY_SIZE(cam_cc_sm8550_plls),
+ .clk_cbcrs = cam_cc_sm8550_critical_cbcrs,
+ .num_clk_cbcrs = ARRAY_SIZE(cam_cc_sm8550_critical_cbcrs),
+};
+
+static const struct qcom_cc_desc cam_cc_sm8550_desc = {
.config = &cam_cc_sm8550_regmap_config,
.clks = cam_cc_sm8550_clocks,
.num_clks = ARRAY_SIZE(cam_cc_sm8550_clocks),
@@ -3495,6 +3535,8 @@ static struct qcom_cc_desc cam_cc_sm8550_desc = {
.num_resets = ARRAY_SIZE(cam_cc_sm8550_resets),
.gdscs = cam_cc_sm8550_gdscs,
.num_gdscs = ARRAY_SIZE(cam_cc_sm8550_gdscs),
+ .use_rpm = true,
+ .driver_data = &cam_cc_sm8550_driver_data,
};
static const struct of_device_id cam_cc_sm8550_match_table[] = {
@@ -3505,46 +3547,7 @@ MODULE_DEVICE_TABLE(of, cam_cc_sm8550_match_table);
static int cam_cc_sm8550_probe(struct platform_device *pdev)
{
- struct regmap *regmap;
- int ret;
-
- ret = devm_pm_runtime_enable(&pdev->dev);
- if (ret)
- return ret;
-
- ret = pm_runtime_resume_and_get(&pdev->dev);
- if (ret)
- return ret;
-
- regmap = qcom_cc_map(pdev, &cam_cc_sm8550_desc);
- if (IS_ERR(regmap)) {
- pm_runtime_put(&pdev->dev);
- return PTR_ERR(regmap);
- }
-
- clk_lucid_ole_pll_configure(&cam_cc_pll0, regmap, &cam_cc_pll0_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll1, regmap, &cam_cc_pll1_config);
- clk_rivian_evo_pll_configure(&cam_cc_pll2, regmap, &cam_cc_pll2_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll3, regmap, &cam_cc_pll3_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll4, regmap, &cam_cc_pll4_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll5, regmap, &cam_cc_pll5_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll6, regmap, &cam_cc_pll6_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll7, regmap, &cam_cc_pll7_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll8, regmap, &cam_cc_pll8_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll9, regmap, &cam_cc_pll9_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll10, regmap, &cam_cc_pll10_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll11, regmap, &cam_cc_pll11_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll12, regmap, &cam_cc_pll12_config);
-
- /* Keep some clocks always-on */
- qcom_branch_set_clk_en(regmap, 0x1419c); /* CAM_CC_GDSC_CLK */
- qcom_branch_set_clk_en(regmap, 0x142cc); /* CAM_CC_SLEEP_CLK */
-
- ret = qcom_cc_really_probe(&pdev->dev, &cam_cc_sm8550_desc, regmap);
-
- pm_runtime_put(&pdev->dev);
-
- return ret;
+ return qcom_cc_probe(pdev, &cam_cc_sm8550_desc);
}
static struct platform_driver cam_cc_sm8550_driver = {
diff --git a/drivers/clk/qcom/camcc-sm8650.c b/drivers/clk/qcom/camcc-sm8650.c
index a37e52a67ed4..8b388904f56f 100644
--- a/drivers/clk/qcom/camcc-sm8650.c
+++ b/drivers/clk/qcom/camcc-sm8650.c
@@ -7,7 +7,6 @@
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <dt-bindings/clock/qcom,sm8650-camcc.h>
@@ -72,6 +71,7 @@ static const struct alpha_pll_config cam_cc_pll0_config = {
static struct clk_alpha_pll cam_cc_pll0 = {
.offset = 0x0,
+ .config = &cam_cc_pll0_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -149,6 +149,7 @@ static const struct alpha_pll_config cam_cc_pll1_config = {
static struct clk_alpha_pll cam_cc_pll1 = {
.offset = 0x1000,
+ .config = &cam_cc_pll1_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -199,6 +200,7 @@ static const struct alpha_pll_config cam_cc_pll2_config = {
static struct clk_alpha_pll cam_cc_pll2 = {
.offset = 0x2000,
+ .config = &cam_cc_pll2_config,
.vco_table = rivian_ole_vco,
.num_vco = ARRAY_SIZE(rivian_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_RIVIAN_EVO],
@@ -230,6 +232,7 @@ static const struct alpha_pll_config cam_cc_pll3_config = {
static struct clk_alpha_pll cam_cc_pll3 = {
.offset = 0x3000,
+ .config = &cam_cc_pll3_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -284,6 +287,7 @@ static const struct alpha_pll_config cam_cc_pll4_config = {
static struct clk_alpha_pll cam_cc_pll4 = {
.offset = 0x4000,
+ .config = &cam_cc_pll4_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -338,6 +342,7 @@ static const struct alpha_pll_config cam_cc_pll5_config = {
static struct clk_alpha_pll cam_cc_pll5 = {
.offset = 0x5000,
+ .config = &cam_cc_pll5_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -392,6 +397,7 @@ static const struct alpha_pll_config cam_cc_pll6_config = {
static struct clk_alpha_pll cam_cc_pll6 = {
.offset = 0x6000,
+ .config = &cam_cc_pll6_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -446,6 +452,7 @@ static const struct alpha_pll_config cam_cc_pll7_config = {
static struct clk_alpha_pll cam_cc_pll7 = {
.offset = 0x7000,
+ .config = &cam_cc_pll7_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -500,6 +507,7 @@ static const struct alpha_pll_config cam_cc_pll8_config = {
static struct clk_alpha_pll cam_cc_pll8 = {
.offset = 0x8000,
+ .config = &cam_cc_pll8_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -554,6 +562,7 @@ static const struct alpha_pll_config cam_cc_pll9_config = {
static struct clk_alpha_pll cam_cc_pll9 = {
.offset = 0x9000,
+ .config = &cam_cc_pll9_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -631,6 +640,7 @@ static const struct alpha_pll_config cam_cc_pll10_config = {
static struct clk_alpha_pll cam_cc_pll10 = {
.offset = 0xa000,
+ .config = &cam_cc_pll10_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -3509,6 +3519,27 @@ static const struct qcom_reset_map cam_cc_sm8650_resets[] = {
[CAM_CC_SFE_2_BCR] = { 0x130f4 },
};
+static struct clk_alpha_pll *cam_cc_sm8650_plls[] = {
+ &cam_cc_pll0,
+ &cam_cc_pll1,
+ &cam_cc_pll2,
+ &cam_cc_pll3,
+ &cam_cc_pll4,
+ &cam_cc_pll5,
+ &cam_cc_pll6,
+ &cam_cc_pll7,
+ &cam_cc_pll8,
+ &cam_cc_pll9,
+ &cam_cc_pll10,
+};
+
+static u32 cam_cc_sm8650_critical_cbcrs[] = {
+ 0x132ec, /* CAM_CC_GDSC_CLK */
+ 0x13308, /* CAM_CC_SLEEP_CLK */
+ 0x13314, /* CAM_CC_DRV_XO_CLK */
+ 0x13318, /* CAM_CC_DRV_AHB_CLK */
+};
+
static const struct regmap_config cam_cc_sm8650_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
@@ -3517,7 +3548,14 @@ static const struct regmap_config cam_cc_sm8650_regmap_config = {
.fast_io = true,
};
-static struct qcom_cc_desc cam_cc_sm8650_desc = {
+static struct qcom_cc_driver_data cam_cc_sm8650_driver_data = {
+ .alpha_plls = cam_cc_sm8650_plls,
+ .num_alpha_plls = ARRAY_SIZE(cam_cc_sm8650_plls),
+ .clk_cbcrs = cam_cc_sm8650_critical_cbcrs,
+ .num_clk_cbcrs = ARRAY_SIZE(cam_cc_sm8650_critical_cbcrs),
+};
+
+static const struct qcom_cc_desc cam_cc_sm8650_desc = {
.config = &cam_cc_sm8650_regmap_config,
.clks = cam_cc_sm8650_clocks,
.num_clks = ARRAY_SIZE(cam_cc_sm8650_clocks),
@@ -3525,6 +3563,8 @@ static struct qcom_cc_desc cam_cc_sm8650_desc = {
.num_resets = ARRAY_SIZE(cam_cc_sm8650_resets),
.gdscs = cam_cc_sm8650_gdscs,
.num_gdscs = ARRAY_SIZE(cam_cc_sm8650_gdscs),
+ .use_rpm = true,
+ .driver_data = &cam_cc_sm8650_driver_data,
};
static const struct of_device_id cam_cc_sm8650_match_table[] = {
@@ -3535,46 +3575,7 @@ MODULE_DEVICE_TABLE(of, cam_cc_sm8650_match_table);
static int cam_cc_sm8650_probe(struct platform_device *pdev)
{
- struct regmap *regmap;
- int ret;
-
- ret = devm_pm_runtime_enable(&pdev->dev);
- if (ret)
- return ret;
-
- ret = pm_runtime_resume_and_get(&pdev->dev);
- if (ret)
- return ret;
-
- regmap = qcom_cc_map(pdev, &cam_cc_sm8650_desc);
- if (IS_ERR(regmap)) {
- pm_runtime_put(&pdev->dev);
- return PTR_ERR(regmap);
- }
-
- clk_lucid_ole_pll_configure(&cam_cc_pll0, regmap, &cam_cc_pll0_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll1, regmap, &cam_cc_pll1_config);
- clk_rivian_evo_pll_configure(&cam_cc_pll2, regmap, &cam_cc_pll2_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll3, regmap, &cam_cc_pll3_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll4, regmap, &cam_cc_pll4_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll5, regmap, &cam_cc_pll5_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll6, regmap, &cam_cc_pll6_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll7, regmap, &cam_cc_pll7_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll8, regmap, &cam_cc_pll8_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll9, regmap, &cam_cc_pll9_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll10, regmap, &cam_cc_pll10_config);
-
- /* Keep clocks always enabled */
- qcom_branch_set_clk_en(regmap, 0x13318); /* CAM_CC_DRV_AHB_CLK */
- qcom_branch_set_clk_en(regmap, 0x13314); /* CAM_CC_DRV_XO_CLK */
- qcom_branch_set_clk_en(regmap, 0x132ec); /* CAM_CC_GDSC_CLK */
- qcom_branch_set_clk_en(regmap, 0x13308); /* CAM_CC_SLEEP_CLK */
-
- ret = qcom_cc_really_probe(&pdev->dev, &cam_cc_sm8650_desc, regmap);
-
- pm_runtime_put(&pdev->dev);
-
- return ret;
+ return qcom_cc_probe(pdev, &cam_cc_sm8650_desc);
}
static struct platform_driver cam_cc_sm8650_driver = {
diff --git a/drivers/clk/qcom/camcc-x1e80100.c b/drivers/clk/qcom/camcc-x1e80100.c
index 85e76c7712ad..cbcc1c9fcb34 100644
--- a/drivers/clk/qcom/camcc-x1e80100.c
+++ b/drivers/clk/qcom/camcc-x1e80100.c
@@ -7,7 +7,6 @@
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <dt-bindings/clock/qcom,x1e80100-camcc.h>
@@ -67,6 +66,7 @@ static const struct alpha_pll_config cam_cc_pll0_config = {
static struct clk_alpha_pll cam_cc_pll0 = {
.offset = 0x0,
+ .config = &cam_cc_pll0_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -144,6 +144,7 @@ static const struct alpha_pll_config cam_cc_pll1_config = {
static struct clk_alpha_pll cam_cc_pll1 = {
.offset = 0x1000,
+ .config = &cam_cc_pll1_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -194,6 +195,7 @@ static const struct alpha_pll_config cam_cc_pll2_config = {
static struct clk_alpha_pll cam_cc_pll2 = {
.offset = 0x2000,
+ .config = &cam_cc_pll2_config,
.vco_table = rivian_ole_vco,
.num_vco = ARRAY_SIZE(rivian_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_RIVIAN_EVO],
@@ -225,6 +227,7 @@ static const struct alpha_pll_config cam_cc_pll3_config = {
static struct clk_alpha_pll cam_cc_pll3 = {
.offset = 0x3000,
+ .config = &cam_cc_pll3_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -279,6 +282,7 @@ static const struct alpha_pll_config cam_cc_pll4_config = {
static struct clk_alpha_pll cam_cc_pll4 = {
.offset = 0x4000,
+ .config = &cam_cc_pll4_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -333,6 +337,7 @@ static const struct alpha_pll_config cam_cc_pll6_config = {
static struct clk_alpha_pll cam_cc_pll6 = {
.offset = 0x6000,
+ .config = &cam_cc_pll6_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -387,6 +392,7 @@ static const struct alpha_pll_config cam_cc_pll8_config = {
static struct clk_alpha_pll cam_cc_pll8 = {
.offset = 0x8000,
+ .config = &cam_cc_pll8_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -2212,6 +2218,8 @@ static struct clk_branch cam_cc_sfe_0_fast_ahb_clk = {
},
};
+static struct gdsc cam_cc_titan_top_gdsc;
+
static struct gdsc cam_cc_bps_gdsc = {
.gdscr = 0x10004,
.en_rest_wait_val = 0x2,
@@ -2221,6 +2229,7 @@ static struct gdsc cam_cc_bps_gdsc = {
.name = "cam_cc_bps_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .parent = &cam_cc_titan_top_gdsc.pd,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
@@ -2233,6 +2242,7 @@ static struct gdsc cam_cc_ife_0_gdsc = {
.name = "cam_cc_ife_0_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .parent = &cam_cc_titan_top_gdsc.pd,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
@@ -2245,6 +2255,7 @@ static struct gdsc cam_cc_ife_1_gdsc = {
.name = "cam_cc_ife_1_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .parent = &cam_cc_titan_top_gdsc.pd,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
@@ -2257,6 +2268,7 @@ static struct gdsc cam_cc_ipe_0_gdsc = {
.name = "cam_cc_ipe_0_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .parent = &cam_cc_titan_top_gdsc.pd,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
@@ -2269,6 +2281,7 @@ static struct gdsc cam_cc_sfe_0_gdsc = {
.name = "cam_cc_sfe_0_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .parent = &cam_cc_titan_top_gdsc.pd,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
@@ -2411,6 +2424,21 @@ static const struct qcom_reset_map cam_cc_x1e80100_resets[] = {
[CAM_CC_SFE_0_BCR] = { 0x1327c },
};
+static struct clk_alpha_pll *cam_cc_x1e80100_plls[] = {
+ &cam_cc_pll0,
+ &cam_cc_pll1,
+ &cam_cc_pll2,
+ &cam_cc_pll3,
+ &cam_cc_pll4,
+ &cam_cc_pll6,
+ &cam_cc_pll8,
+};
+
+static u32 cam_cc_x1e80100_critical_cbcrs[] = {
+ 0x13a9c, /* CAM_CC_GDSC_CLK */
+ 0x13ab8, /* CAM_CC_SLEEP_CLK */
+};
+
static const struct regmap_config cam_cc_x1e80100_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
@@ -2419,6 +2447,13 @@ static const struct regmap_config cam_cc_x1e80100_regmap_config = {
.fast_io = true,
};
+static struct qcom_cc_driver_data cam_cc_x1e80100_driver_data = {
+ .alpha_plls = cam_cc_x1e80100_plls,
+ .num_alpha_plls = ARRAY_SIZE(cam_cc_x1e80100_plls),
+ .clk_cbcrs = cam_cc_x1e80100_critical_cbcrs,
+ .num_clk_cbcrs = ARRAY_SIZE(cam_cc_x1e80100_critical_cbcrs),
+};
+
static const struct qcom_cc_desc cam_cc_x1e80100_desc = {
.config = &cam_cc_x1e80100_regmap_config,
.clks = cam_cc_x1e80100_clocks,
@@ -2427,6 +2462,8 @@ static const struct qcom_cc_desc cam_cc_x1e80100_desc = {
.num_resets = ARRAY_SIZE(cam_cc_x1e80100_resets),
.gdscs = cam_cc_x1e80100_gdscs,
.num_gdscs = ARRAY_SIZE(cam_cc_x1e80100_gdscs),
+ .use_rpm = true,
+ .driver_data = &cam_cc_x1e80100_driver_data,
};
static const struct of_device_id cam_cc_x1e80100_match_table[] = {
@@ -2437,40 +2474,7 @@ MODULE_DEVICE_TABLE(of, cam_cc_x1e80100_match_table);
static int cam_cc_x1e80100_probe(struct platform_device *pdev)
{
- struct regmap *regmap;
- int ret;
-
- ret = devm_pm_runtime_enable(&pdev->dev);
- if (ret)
- return ret;
-
- ret = pm_runtime_resume_and_get(&pdev->dev);
- if (ret)
- return ret;
-
- regmap = qcom_cc_map(pdev, &cam_cc_x1e80100_desc);
- if (IS_ERR(regmap)) {
- pm_runtime_put(&pdev->dev);
- return PTR_ERR(regmap);
- }
-
- clk_lucid_ole_pll_configure(&cam_cc_pll0, regmap, &cam_cc_pll0_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll1, regmap, &cam_cc_pll1_config);
- clk_rivian_evo_pll_configure(&cam_cc_pll2, regmap, &cam_cc_pll2_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll3, regmap, &cam_cc_pll3_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll4, regmap, &cam_cc_pll4_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll6, regmap, &cam_cc_pll6_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll8, regmap, &cam_cc_pll8_config);
-
- /* Keep clocks always enabled */
- qcom_branch_set_clk_en(regmap, 0x13a9c); /* CAM_CC_GDSC_CLK */
- qcom_branch_set_clk_en(regmap, 0x13ab8); /* CAM_CC_SLEEP_CLK */
-
- ret = qcom_cc_really_probe(&pdev->dev, &cam_cc_x1e80100_desc, regmap);
-
- pm_runtime_put(&pdev->dev);
-
- return ret;
+ return qcom_cc_probe(pdev, &cam_cc_x1e80100_desc);
}
static struct platform_driver cam_cc_x1e80100_driver = {
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index f9105443d7db..6aeba40358c1 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -40,7 +40,7 @@
#define PLL_USER_CTL(p) ((p)->offset + (p)->regs[PLL_OFF_USER_CTL])
# define PLL_POST_DIV_SHIFT 8
-# define PLL_POST_DIV_MASK(p) GENMASK((p)->width - 1, 0)
+# define PLL_POST_DIV_MASK(p) GENMASK((p)->width ? (p)->width - 1 : 3, 0)
# define PLL_ALPHA_MSB BIT(15)
# define PLL_ALPHA_EN BIT(24)
# define PLL_ALPHA_MODE BIT(25)
@@ -58,12 +58,15 @@
#define PLL_TEST_CTL_U(p) ((p)->offset + (p)->regs[PLL_OFF_TEST_CTL_U])
#define PLL_TEST_CTL_U1(p) ((p)->offset + (p)->regs[PLL_OFF_TEST_CTL_U1])
#define PLL_TEST_CTL_U2(p) ((p)->offset + (p)->regs[PLL_OFF_TEST_CTL_U2])
+#define PLL_TEST_CTL_U3(p) ((p)->offset + (p)->regs[PLL_OFF_TEST_CTL_U3])
#define PLL_STATUS(p) ((p)->offset + (p)->regs[PLL_OFF_STATUS])
#define PLL_OPMODE(p) ((p)->offset + (p)->regs[PLL_OFF_OPMODE])
#define PLL_FRAC(p) ((p)->offset + (p)->regs[PLL_OFF_FRAC])
+#define GET_PLL_TYPE(pll) (((pll)->regs - clk_alpha_pll_regs[0]) / PLL_OFF_MAX_REGS)
+
const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
- [CLK_ALPHA_PLL_TYPE_DEFAULT] = {
+ [CLK_ALPHA_PLL_TYPE_DEFAULT] = {
[PLL_OFF_L_VAL] = 0x04,
[PLL_OFF_ALPHA_VAL] = 0x08,
[PLL_OFF_ALPHA_VAL_U] = 0x0c,
@@ -74,7 +77,7 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_TEST_CTL_U] = 0x20,
[PLL_OFF_STATUS] = 0x24,
},
- [CLK_ALPHA_PLL_TYPE_HUAYRA] = {
+ [CLK_ALPHA_PLL_TYPE_HUAYRA] = {
[PLL_OFF_L_VAL] = 0x04,
[PLL_OFF_ALPHA_VAL] = 0x08,
[PLL_OFF_USER_CTL] = 0x10,
@@ -84,7 +87,7 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_TEST_CTL_U] = 0x20,
[PLL_OFF_STATUS] = 0x24,
},
- [CLK_ALPHA_PLL_TYPE_HUAYRA_APSS] = {
+ [CLK_ALPHA_PLL_TYPE_HUAYRA_APSS] = {
[PLL_OFF_L_VAL] = 0x08,
[PLL_OFF_ALPHA_VAL] = 0x10,
[PLL_OFF_USER_CTL] = 0x18,
@@ -94,7 +97,7 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_TEST_CTL] = 0x30,
[PLL_OFF_TEST_CTL_U] = 0x34,
},
- [CLK_ALPHA_PLL_TYPE_HUAYRA_2290] = {
+ [CLK_ALPHA_PLL_TYPE_HUAYRA_2290] = {
[PLL_OFF_L_VAL] = 0x04,
[PLL_OFF_ALPHA_VAL] = 0x08,
[PLL_OFF_USER_CTL] = 0x0c,
@@ -107,7 +110,7 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_OPMODE] = 0x28,
[PLL_OFF_STATUS] = 0x38,
},
- [CLK_ALPHA_PLL_TYPE_BRAMMO] = {
+ [CLK_ALPHA_PLL_TYPE_BRAMMO] = {
[PLL_OFF_L_VAL] = 0x04,
[PLL_OFF_ALPHA_VAL] = 0x08,
[PLL_OFF_ALPHA_VAL_U] = 0x0c,
@@ -116,7 +119,7 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_TEST_CTL] = 0x1c,
[PLL_OFF_STATUS] = 0x24,
},
- [CLK_ALPHA_PLL_TYPE_FABIA] = {
+ [CLK_ALPHA_PLL_TYPE_FABIA] = {
[PLL_OFF_L_VAL] = 0x04,
[PLL_OFF_USER_CTL] = 0x0c,
[PLL_OFF_USER_CTL_U] = 0x10,
@@ -144,7 +147,7 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_OPMODE] = 0x38,
[PLL_OFF_ALPHA_VAL] = 0x40,
},
- [CLK_ALPHA_PLL_TYPE_AGERA] = {
+ [CLK_ALPHA_PLL_TYPE_AGERA] = {
[PLL_OFF_L_VAL] = 0x04,
[PLL_OFF_ALPHA_VAL] = 0x08,
[PLL_OFF_USER_CTL] = 0x0c,
@@ -154,7 +157,7 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_TEST_CTL_U] = 0x1c,
[PLL_OFF_STATUS] = 0x2c,
},
- [CLK_ALPHA_PLL_TYPE_ZONDA] = {
+ [CLK_ALPHA_PLL_TYPE_ZONDA] = {
[PLL_OFF_L_VAL] = 0x04,
[PLL_OFF_ALPHA_VAL] = 0x08,
[PLL_OFF_USER_CTL] = 0x0c,
@@ -197,6 +200,37 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_TEST_CTL_U1] = 0x34,
[PLL_OFF_TEST_CTL_U2] = 0x38,
},
+ [CLK_ALPHA_PLL_TYPE_PONGO_ELU] = {
+ [PLL_OFF_OPMODE] = 0x04,
+ [PLL_OFF_STATE] = 0x08,
+ [PLL_OFF_STATUS] = 0x0c,
+ [PLL_OFF_L_VAL] = 0x10,
+ [PLL_OFF_USER_CTL] = 0x14,
+ [PLL_OFF_USER_CTL_U] = 0x18,
+ [PLL_OFF_CONFIG_CTL] = 0x1c,
+ [PLL_OFF_CONFIG_CTL_U] = 0x20,
+ [PLL_OFF_CONFIG_CTL_U1] = 0x24,
+ [PLL_OFF_CONFIG_CTL_U2] = 0x28,
+ [PLL_OFF_TEST_CTL] = 0x2c,
+ [PLL_OFF_TEST_CTL_U] = 0x30,
+ [PLL_OFF_TEST_CTL_U1] = 0x34,
+ [PLL_OFF_TEST_CTL_U2] = 0x38,
+ [PLL_OFF_TEST_CTL_U3] = 0x3c,
+ },
+ [CLK_ALPHA_PLL_TYPE_TAYCAN_ELU] = {
+ [PLL_OFF_OPMODE] = 0x04,
+ [PLL_OFF_STATE] = 0x08,
+ [PLL_OFF_STATUS] = 0x0c,
+ [PLL_OFF_L_VAL] = 0x10,
+ [PLL_OFF_ALPHA_VAL] = 0x14,
+ [PLL_OFF_USER_CTL] = 0x18,
+ [PLL_OFF_USER_CTL_U] = 0x1c,
+ [PLL_OFF_CONFIG_CTL] = 0x20,
+ [PLL_OFF_CONFIG_CTL_U] = 0x24,
+ [PLL_OFF_CONFIG_CTL_U1] = 0x28,
+ [PLL_OFF_TEST_CTL] = 0x2c,
+ [PLL_OFF_TEST_CTL_U] = 0x30,
+ },
[CLK_ALPHA_PLL_TYPE_RIVIAN_EVO] = {
[PLL_OFF_OPMODE] = 0x04,
[PLL_OFF_STATUS] = 0x0c,
@@ -209,7 +243,7 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_TEST_CTL] = 0x28,
[PLL_OFF_TEST_CTL_U] = 0x2c,
},
- [CLK_ALPHA_PLL_TYPE_DEFAULT_EVO] = {
+ [CLK_ALPHA_PLL_TYPE_DEFAULT_EVO] = {
[PLL_OFF_L_VAL] = 0x04,
[PLL_OFF_ALPHA_VAL] = 0x08,
[PLL_OFF_ALPHA_VAL_U] = 0x0c,
@@ -220,7 +254,7 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_CONFIG_CTL] = 0x20,
[PLL_OFF_STATUS] = 0x24,
},
- [CLK_ALPHA_PLL_TYPE_BRAMMO_EVO] = {
+ [CLK_ALPHA_PLL_TYPE_BRAMMO_EVO] = {
[PLL_OFF_L_VAL] = 0x04,
[PLL_OFF_ALPHA_VAL] = 0x08,
[PLL_OFF_ALPHA_VAL_U] = 0x0c,
@@ -241,7 +275,7 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_TEST_CTL] = 0x30,
[PLL_OFF_TEST_CTL_U] = 0x34,
},
- [CLK_ALPHA_PLL_TYPE_STROMER_PLUS] = {
+ [CLK_ALPHA_PLL_TYPE_STROMER_PLUS] = {
[PLL_OFF_L_VAL] = 0x04,
[PLL_OFF_USER_CTL] = 0x08,
[PLL_OFF_USER_CTL_U] = 0x0c,
@@ -252,7 +286,7 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_ALPHA_VAL] = 0x24,
[PLL_OFF_ALPHA_VAL_U] = 0x28,
},
- [CLK_ALPHA_PLL_TYPE_ZONDA_OLE] = {
+ [CLK_ALPHA_PLL_TYPE_ZONDA_OLE] = {
[PLL_OFF_L_VAL] = 0x04,
[PLL_OFF_ALPHA_VAL] = 0x08,
[PLL_OFF_USER_CTL] = 0x0c,
@@ -267,6 +301,17 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_OPMODE] = 0x30,
[PLL_OFF_STATUS] = 0x3c,
},
+ [CLK_ALPHA_PLL_TYPE_NSS_HUAYRA] = {
+ [PLL_OFF_L_VAL] = 0x04,
+ [PLL_OFF_ALPHA_VAL] = 0x08,
+ [PLL_OFF_TEST_CTL] = 0x0c,
+ [PLL_OFF_TEST_CTL_U] = 0x10,
+ [PLL_OFF_USER_CTL] = 0x14,
+ [PLL_OFF_CONFIG_CTL] = 0x18,
+ [PLL_OFF_CONFIG_CTL_U] = 0x1c,
+ [PLL_OFF_STATUS] = 0x20,
+ },
+
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_regs);
@@ -312,6 +357,12 @@ EXPORT_SYMBOL_GPL(clk_alpha_pll_regs);
#define LUCID_EVO_PLL_CAL_L_VAL_SHIFT 16
#define LUCID_OLE_PLL_RINGOSC_CAL_L_VAL_SHIFT 24
+/* PONGO ELU PLL specific setting and offsets */
+#define PONGO_PLL_OUT_MASK GENMASK(1, 0)
+#define PONGO_PLL_L_VAL_MASK GENMASK(11, 0)
+#define PONGO_XO_PRESENT BIT(10)
+#define PONGO_CLOCK_SELECT BIT(12)
+
/* ZONDA PLL specific */
#define ZONDA_PLL_OUT_MASK 0xf
#define ZONDA_STAY_IN_CFA BIT(16)
@@ -341,7 +392,8 @@ static int wait_for_pll(struct clk_alpha_pll *pll, u32 mask, bool inverse,
if (ret)
return ret;
- for (count = 200; count > 0; count--) {
+ /* Pongo PLLs using a 32KHz reference can take upwards of 1500us to lock. */
+ for (count = 1500; count > 0; count--) {
ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
if (ret)
return ret;
@@ -421,6 +473,8 @@ void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
mask |= config->pre_div_mask;
mask |= config->post_div_mask;
mask |= config->vco_mask;
+ mask |= config->alpha_en_mask;
+ mask |= config->alpha_mode_mask;
regmap_update_bits(regmap, PLL_USER_CTL(pll), mask, val);
@@ -657,14 +711,19 @@ clk_alpha_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
u32 alpha_width = pll_alpha_width(pll);
- regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l);
+ if (regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l))
+ return 0;
+
+ if (regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl))
+ return 0;
- regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl);
if (ctl & PLL_ALPHA_EN) {
- regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &low);
+ if (regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &low))
+ return 0;
if (alpha_width > 32) {
- regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL_U(pll),
- &high);
+ if (regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL_U(pll),
+ &high))
+ return 0;
a = (u64)high << 32 | low;
} else {
a = low & GENMASK(alpha_width - 1, 0);
@@ -731,6 +790,29 @@ static int clk_alpha_pll_update_latch(struct clk_alpha_pll *pll,
return __clk_alpha_pll_update_latch(pll);
}
+static void clk_alpha_pll_update_configs(struct clk_alpha_pll *pll, const struct pll_vco *vco,
+ u32 l, u64 alpha, u32 alpha_width, bool alpha_en)
+{
+ regmap_write(pll->clkr.regmap, PLL_L_VAL(pll), l);
+
+ if (alpha_width > ALPHA_BITWIDTH)
+ alpha <<= alpha_width - ALPHA_BITWIDTH;
+
+ if (alpha_width > 32)
+ regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL_U(pll), upper_32_bits(alpha));
+
+ regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL(pll), lower_32_bits(alpha));
+
+ if (vco) {
+ regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll),
+ PLL_VCO_MASK << PLL_VCO_SHIFT,
+ vco->val << PLL_VCO_SHIFT);
+ }
+
+ if (alpha_en)
+ regmap_set_bits(pll->clkr.regmap, PLL_USER_CTL(pll), PLL_ALPHA_EN);
+}
+
static int __clk_alpha_pll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long prate,
int (*is_enabled)(struct clk_hw *))
@@ -748,24 +830,7 @@ static int __clk_alpha_pll_set_rate(struct clk_hw *hw, unsigned long rate,
return -EINVAL;
}
- regmap_write(pll->clkr.regmap, PLL_L_VAL(pll), l);
-
- if (alpha_width > ALPHA_BITWIDTH)
- a <<= alpha_width - ALPHA_BITWIDTH;
-
- if (alpha_width > 32)
- regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL_U(pll), a >> 32);
-
- regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL(pll), a);
-
- if (vco) {
- regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll),
- PLL_VCO_MASK << PLL_VCO_SHIFT,
- vco->val << PLL_VCO_SHIFT);
- }
-
- regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll),
- PLL_ALPHA_EN, PLL_ALPHA_EN);
+ clk_alpha_pll_update_configs(pll, vco, l, a, alpha_width, true);
return clk_alpha_pll_update_latch(pll, is_enabled);
}
@@ -784,22 +849,25 @@ static int clk_alpha_pll_hwfsm_set_rate(struct clk_hw *hw, unsigned long rate,
clk_alpha_pll_hwfsm_is_enabled);
}
-static long clk_alpha_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_alpha_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
u32 l, alpha_width = pll_alpha_width(pll);
u64 a;
unsigned long min_freq, max_freq;
- rate = alpha_pll_round_rate(rate, *prate, &l, &a, alpha_width);
- if (!pll->vco_table || alpha_pll_find_vco(pll, rate))
- return rate;
+ req->rate = alpha_pll_round_rate(req->rate, req->best_parent_rate, &l,
+ &a, alpha_width);
+ if (!pll->vco_table || alpha_pll_find_vco(pll, req->rate))
+ return 0;
min_freq = pll->vco_table[0].min_freq;
max_freq = pll->vco_table[pll->num_vco - 1].max_freq;
- return clamp(rate, min_freq, max_freq);
+ req->rate = clamp(req->rate, min_freq, max_freq);
+
+ return 0;
}
void clk_huayra_2290_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
@@ -890,8 +958,11 @@ alpha_pll_huayra_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
u32 l, alpha = 0, ctl, alpha_m, alpha_n;
- regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l);
- regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl);
+ if (regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l))
+ return 0;
+
+ if (regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl))
+ return 0;
if (ctl & PLL_ALPHA_EN) {
regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &alpha);
@@ -980,12 +1051,15 @@ static int alpha_pll_huayra_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
-static long alpha_pll_huayra_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int alpha_pll_huayra_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
u32 l, a;
- return alpha_huayra_pll_round_rate(rate, *prate, &l, &a);
+ req->rate = alpha_huayra_pll_round_rate(req->rate,
+ req->best_parent_rate, &l, &a);
+
+ return 0;
}
static int trion_pll_is_enabled(struct clk_alpha_pll *pll,
@@ -1085,8 +1159,11 @@ clk_trion_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
u32 l, frac, alpha_width = pll_alpha_width(pll);
- regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l);
- regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &frac);
+ if (regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l))
+ return 0;
+
+ if (regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &frac))
+ return 0;
return alpha_pll_calc_rate(parent_rate, l, frac, alpha_width);
}
@@ -1104,7 +1181,7 @@ const struct clk_ops clk_alpha_pll_ops = {
.disable = clk_alpha_pll_disable,
.is_enabled = clk_alpha_pll_is_enabled,
.recalc_rate = clk_alpha_pll_recalc_rate,
- .round_rate = clk_alpha_pll_round_rate,
+ .determine_rate = clk_alpha_pll_determine_rate,
.set_rate = clk_alpha_pll_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_ops);
@@ -1114,7 +1191,7 @@ const struct clk_ops clk_alpha_pll_huayra_ops = {
.disable = clk_alpha_pll_disable,
.is_enabled = clk_alpha_pll_is_enabled,
.recalc_rate = alpha_pll_huayra_recalc_rate,
- .round_rate = alpha_pll_huayra_round_rate,
+ .determine_rate = alpha_pll_huayra_determine_rate,
.set_rate = alpha_pll_huayra_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_huayra_ops);
@@ -1124,7 +1201,7 @@ const struct clk_ops clk_alpha_pll_hwfsm_ops = {
.disable = clk_alpha_pll_hwfsm_disable,
.is_enabled = clk_alpha_pll_hwfsm_is_enabled,
.recalc_rate = clk_alpha_pll_recalc_rate,
- .round_rate = clk_alpha_pll_round_rate,
+ .determine_rate = clk_alpha_pll_determine_rate,
.set_rate = clk_alpha_pll_hwfsm_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_hwfsm_ops);
@@ -1134,7 +1211,7 @@ const struct clk_ops clk_alpha_pll_fixed_trion_ops = {
.disable = clk_trion_pll_disable,
.is_enabled = clk_trion_pll_is_enabled,
.recalc_rate = clk_trion_pll_recalc_rate,
- .round_rate = clk_alpha_pll_round_rate,
+ .determine_rate = clk_alpha_pll_determine_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_fixed_trion_ops);
@@ -1144,7 +1221,8 @@ clk_alpha_pll_postdiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
u32 ctl;
- regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl);
+ if (regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl))
+ return 0;
ctl >>= PLL_POST_DIV_SHIFT;
ctl &= PLL_POST_DIV_MASK(pll);
@@ -1168,9 +1246,8 @@ static const struct clk_div_table clk_alpha_2bit_div_table[] = {
{ }
};
-static long
-clk_alpha_pll_postdiv_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_alpha_pll_postdiv_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
const struct clk_div_table *table;
@@ -1180,13 +1257,15 @@ clk_alpha_pll_postdiv_round_rate(struct clk_hw *hw, unsigned long rate,
else
table = clk_alpha_div_table;
- return divider_round_rate(hw, rate, prate, table,
- pll->width, CLK_DIVIDER_POWER_OF_TWO);
+ req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate,
+ table, pll->width,
+ CLK_DIVIDER_POWER_OF_TWO);
+
+ return 0;
}
-static long
-clk_alpha_pll_postdiv_round_ro_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_alpha_pll_postdiv_ro_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
u32 ctl, div;
@@ -1198,9 +1277,12 @@ clk_alpha_pll_postdiv_round_ro_rate(struct clk_hw *hw, unsigned long rate,
div = 1 << fls(ctl);
if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)
- *prate = clk_hw_round_rate(clk_hw_get_parent(hw), div * rate);
+ req->best_parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw),
+ div * req->rate);
+
+ req->rate = DIV_ROUND_UP_ULL((u64)req->best_parent_rate, div);
- return DIV_ROUND_UP_ULL((u64)*prate, div);
+ return 0;
}
static int clk_alpha_pll_postdiv_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -1219,13 +1301,13 @@ static int clk_alpha_pll_postdiv_set_rate(struct clk_hw *hw, unsigned long rate,
const struct clk_ops clk_alpha_pll_postdiv_ops = {
.recalc_rate = clk_alpha_pll_postdiv_recalc_rate,
- .round_rate = clk_alpha_pll_postdiv_round_rate,
+ .determine_rate = clk_alpha_pll_postdiv_determine_rate,
.set_rate = clk_alpha_pll_postdiv_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_ops);
const struct clk_ops clk_alpha_pll_postdiv_ro_ops = {
- .round_rate = clk_alpha_pll_postdiv_round_ro_rate,
+ .determine_rate = clk_alpha_pll_postdiv_ro_determine_rate,
.recalc_rate = clk_alpha_pll_postdiv_recalc_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_ro_ops);
@@ -1360,8 +1442,11 @@ static unsigned long alpha_pll_fabia_recalc_rate(struct clk_hw *hw,
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
u32 l, frac, alpha_width = pll_alpha_width(pll);
- regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l);
- regmap_read(pll->clkr.regmap, PLL_FRAC(pll), &frac);
+ if (regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l))
+ return 0;
+
+ if (regmap_read(pll->clkr.regmap, PLL_FRAC(pll), &frac))
+ return 0;
return alpha_pll_calc_rate(parent_rate, l, frac, alpha_width);
}
@@ -1467,7 +1552,7 @@ const struct clk_ops clk_alpha_pll_fabia_ops = {
.is_enabled = clk_alpha_pll_is_enabled,
.set_rate = alpha_pll_fabia_set_rate,
.recalc_rate = alpha_pll_fabia_recalc_rate,
- .round_rate = clk_alpha_pll_round_rate,
+ .determine_rate = clk_alpha_pll_determine_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_fabia_ops);
@@ -1476,7 +1561,7 @@ const struct clk_ops clk_alpha_pll_fixed_fabia_ops = {
.disable = alpha_pll_fabia_disable,
.is_enabled = clk_alpha_pll_is_enabled,
.recalc_rate = alpha_pll_fabia_recalc_rate,
- .round_rate = clk_alpha_pll_round_rate,
+ .determine_rate = clk_alpha_pll_determine_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_fixed_fabia_ops);
@@ -1511,7 +1596,8 @@ clk_trion_pll_postdiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
struct regmap *regmap = pll->clkr.regmap;
u32 i, div = 1, val;
- regmap_read(regmap, PLL_USER_CTL(pll), &val);
+ if (regmap_read(regmap, PLL_USER_CTL(pll), &val))
+ return 0;
val >>= pll->post_div_shift;
val &= PLL_POST_DIV_MASK(pll);
@@ -1526,14 +1612,16 @@ clk_trion_pll_postdiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
return (parent_rate / div);
}
-static long
-clk_trion_pll_postdiv_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_trion_pll_postdiv_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
- return divider_round_rate(hw, rate, prate, pll->post_div_table,
- pll->width, CLK_DIVIDER_ROUND_CLOSEST);
+ req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate,
+ pll->post_div_table,
+ pll->width, CLK_DIVIDER_ROUND_CLOSEST);
+
+ return 0;
};
static int
@@ -1559,18 +1647,21 @@ clk_trion_pll_postdiv_set_rate(struct clk_hw *hw, unsigned long rate,
const struct clk_ops clk_alpha_pll_postdiv_trion_ops = {
.recalc_rate = clk_trion_pll_postdiv_recalc_rate,
- .round_rate = clk_trion_pll_postdiv_round_rate,
+ .determine_rate = clk_trion_pll_postdiv_determine_rate,
.set_rate = clk_trion_pll_postdiv_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_trion_ops);
-static long clk_alpha_pll_postdiv_fabia_round_rate(struct clk_hw *hw,
- unsigned long rate, unsigned long *prate)
+static int clk_alpha_pll_postdiv_fabia_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
- return divider_round_rate(hw, rate, prate, pll->post_div_table,
- pll->width, CLK_DIVIDER_ROUND_CLOSEST);
+ req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate,
+ pll->post_div_table,
+ pll->width, CLK_DIVIDER_ROUND_CLOSEST);
+
+ return 0;
}
static int clk_alpha_pll_postdiv_fabia_set_rate(struct clk_hw *hw,
@@ -1605,7 +1696,7 @@ static int clk_alpha_pll_postdiv_fabia_set_rate(struct clk_hw *hw,
const struct clk_ops clk_alpha_pll_postdiv_fabia_ops = {
.recalc_rate = clk_alpha_pll_postdiv_fabia_recalc_rate,
- .round_rate = clk_alpha_pll_postdiv_fabia_round_rate,
+ .determine_rate = clk_alpha_pll_postdiv_fabia_determine_rate,
.set_rate = clk_alpha_pll_postdiv_fabia_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_fabia_ops);
@@ -1757,7 +1848,7 @@ const struct clk_ops clk_alpha_pll_trion_ops = {
.disable = clk_trion_pll_disable,
.is_enabled = clk_trion_pll_is_enabled,
.recalc_rate = clk_trion_pll_recalc_rate,
- .round_rate = clk_alpha_pll_round_rate,
+ .determine_rate = clk_alpha_pll_determine_rate,
.set_rate = alpha_pll_trion_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_trion_ops);
@@ -1768,14 +1859,14 @@ const struct clk_ops clk_alpha_pll_lucid_ops = {
.disable = clk_trion_pll_disable,
.is_enabled = clk_trion_pll_is_enabled,
.recalc_rate = clk_trion_pll_recalc_rate,
- .round_rate = clk_alpha_pll_round_rate,
+ .determine_rate = clk_alpha_pll_determine_rate,
.set_rate = alpha_pll_trion_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_lucid_ops);
const struct clk_ops clk_alpha_pll_postdiv_lucid_ops = {
.recalc_rate = clk_alpha_pll_postdiv_fabia_recalc_rate,
- .round_rate = clk_alpha_pll_postdiv_fabia_round_rate,
+ .determine_rate = clk_alpha_pll_postdiv_fabia_determine_rate,
.set_rate = clk_alpha_pll_postdiv_fabia_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_lucid_ops);
@@ -1827,7 +1918,7 @@ const struct clk_ops clk_alpha_pll_agera_ops = {
.disable = clk_alpha_pll_disable,
.is_enabled = clk_alpha_pll_is_enabled,
.recalc_rate = alpha_pll_fabia_recalc_rate,
- .round_rate = clk_alpha_pll_round_rate,
+ .determine_rate = clk_alpha_pll_determine_rate,
.set_rate = clk_alpha_pll_agera_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_agera_ops);
@@ -1903,9 +1994,8 @@ static int alpha_pll_lucid_5lpe_enable(struct clk_hw *hw)
}
/* Check if PLL is already enabled, return if enabled */
- ret = trion_pll_is_enabled(pll, pll->clkr.regmap);
- if (ret < 0)
- return ret;
+ if (trion_pll_is_enabled(pll, pll->clkr.regmap))
+ return 0;
ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll), PLL_RESET_N, PLL_RESET_N);
if (ret)
@@ -2044,7 +2134,7 @@ const struct clk_ops clk_alpha_pll_lucid_5lpe_ops = {
.disable = alpha_pll_lucid_5lpe_disable,
.is_enabled = clk_trion_pll_is_enabled,
.recalc_rate = clk_trion_pll_recalc_rate,
- .round_rate = clk_alpha_pll_round_rate,
+ .determine_rate = clk_alpha_pll_determine_rate,
.set_rate = alpha_pll_lucid_5lpe_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_lucid_5lpe_ops);
@@ -2054,13 +2144,13 @@ const struct clk_ops clk_alpha_pll_fixed_lucid_5lpe_ops = {
.disable = alpha_pll_lucid_5lpe_disable,
.is_enabled = clk_trion_pll_is_enabled,
.recalc_rate = clk_trion_pll_recalc_rate,
- .round_rate = clk_alpha_pll_round_rate,
+ .determine_rate = clk_alpha_pll_determine_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_fixed_lucid_5lpe_ops);
const struct clk_ops clk_alpha_pll_postdiv_lucid_5lpe_ops = {
.recalc_rate = clk_alpha_pll_postdiv_fabia_recalc_rate,
- .round_rate = clk_alpha_pll_postdiv_fabia_round_rate,
+ .determine_rate = clk_alpha_pll_postdiv_fabia_determine_rate,
.set_rate = clk_lucid_5lpe_pll_postdiv_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_lucid_5lpe_ops);
@@ -2229,7 +2319,7 @@ const struct clk_ops clk_alpha_pll_zonda_ops = {
.disable = clk_zonda_pll_disable,
.is_enabled = clk_trion_pll_is_enabled,
.recalc_rate = clk_trion_pll_recalc_rate,
- .round_rate = clk_alpha_pll_round_rate,
+ .determine_rate = clk_alpha_pll_determine_rate,
.set_rate = clk_zonda_pll_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_zonda_ops);
@@ -2318,13 +2408,8 @@ static int alpha_pll_lucid_evo_enable(struct clk_hw *hw)
}
/* Check if PLL is already enabled */
- ret = trion_pll_is_enabled(pll, regmap);
- if (ret < 0) {
- return ret;
- } else if (ret) {
- pr_warn("%s PLL is already enabled\n", clk_hw_get_name(&pll->clkr.hw));
+ if (trion_pll_is_enabled(pll, regmap))
return 0;
- }
ret = regmap_update_bits(regmap, PLL_MODE(pll), PLL_RESET_N, PLL_RESET_N);
if (ret)
@@ -2438,9 +2523,12 @@ static unsigned long alpha_pll_lucid_evo_recalc_rate(struct clk_hw *hw,
struct regmap *regmap = pll->clkr.regmap;
u32 l, frac;
- regmap_read(regmap, PLL_L_VAL(pll), &l);
+ if (regmap_read(regmap, PLL_L_VAL(pll), &l))
+ return 0;
l &= LUCID_EVO_PLL_L_VAL_MASK;
- regmap_read(regmap, PLL_ALPHA_VAL(pll), &frac);
+
+ if (regmap_read(regmap, PLL_ALPHA_VAL(pll), &frac))
+ return 0;
return alpha_pll_calc_rate(parent_rate, l, frac, pll_alpha_width(pll));
}
@@ -2456,13 +2544,13 @@ const struct clk_ops clk_alpha_pll_fixed_lucid_evo_ops = {
.disable = alpha_pll_lucid_evo_disable,
.is_enabled = clk_trion_pll_is_enabled,
.recalc_rate = alpha_pll_lucid_evo_recalc_rate,
- .round_rate = clk_alpha_pll_round_rate,
+ .determine_rate = clk_alpha_pll_determine_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_fixed_lucid_evo_ops);
const struct clk_ops clk_alpha_pll_postdiv_lucid_evo_ops = {
.recalc_rate = clk_alpha_pll_postdiv_fabia_recalc_rate,
- .round_rate = clk_alpha_pll_postdiv_fabia_round_rate,
+ .determine_rate = clk_alpha_pll_postdiv_fabia_determine_rate,
.set_rate = clk_lucid_evo_pll_postdiv_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_lucid_evo_ops);
@@ -2473,7 +2561,7 @@ const struct clk_ops clk_alpha_pll_lucid_evo_ops = {
.disable = alpha_pll_lucid_evo_disable,
.is_enabled = clk_trion_pll_is_enabled,
.recalc_rate = alpha_pll_lucid_evo_recalc_rate,
- .round_rate = clk_alpha_pll_round_rate,
+ .determine_rate = clk_alpha_pll_determine_rate,
.set_rate = alpha_pll_lucid_5lpe_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_lucid_evo_ops);
@@ -2484,11 +2572,149 @@ const struct clk_ops clk_alpha_pll_reset_lucid_evo_ops = {
.disable = alpha_pll_reset_lucid_evo_disable,
.is_enabled = clk_trion_pll_is_enabled,
.recalc_rate = alpha_pll_lucid_evo_recalc_rate,
- .round_rate = clk_alpha_pll_round_rate,
+ .determine_rate = clk_alpha_pll_determine_rate,
.set_rate = alpha_pll_lucid_5lpe_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_reset_lucid_evo_ops);
+static int alpha_pll_pongo_elu_prepare(struct clk_hw *hw)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ struct regmap *regmap = pll->clkr.regmap;
+ int ret;
+
+ /* Enable PLL intially to one-time calibrate against XO. */
+ regmap_write(regmap, PLL_OPMODE(pll), PLL_RUN);
+ regmap_update_bits(regmap, PLL_MODE(pll), PLL_RESET_N, PLL_RESET_N);
+ regmap_update_bits(regmap, PLL_MODE(pll), PONGO_XO_PRESENT, PONGO_XO_PRESENT);
+
+ /* Set regmap for wait_for_pll() */
+ pll->clkr.regmap = regmap;
+ ret = wait_for_pll_enable_lock(pll);
+ if (ret) {
+ /* Reverse calibration - disable PLL output */
+ regmap_update_bits(regmap, PLL_MODE(pll), PLL_OUTCTRL, 0);
+ return ret;
+ }
+
+ /* Disable PLL after one-time calibration. */
+ regmap_write(regmap, PLL_OPMODE(pll), PLL_STANDBY);
+
+ /* Select internally generated clock. */
+ regmap_update_bits(regmap, PLL_MODE(pll), PONGO_CLOCK_SELECT,
+ PONGO_CLOCK_SELECT);
+
+ return 0;
+}
+
+static int alpha_pll_pongo_elu_enable(struct clk_hw *hw)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ struct regmap *regmap = pll->clkr.regmap;
+ int ret;
+
+ /* Check if PLL is already enabled */
+ if (trion_pll_is_enabled(pll, regmap))
+ return 0;
+
+ ret = regmap_update_bits(regmap, PLL_MODE(pll), PLL_RESET_N, PLL_RESET_N);
+ if (ret)
+ return ret;
+
+ /* Set operation mode to RUN */
+ regmap_write(regmap, PLL_OPMODE(pll), PLL_RUN);
+
+ ret = wait_for_pll_enable_lock(pll);
+ if (ret)
+ return ret;
+
+ /* Enable the global PLL outputs */
+ ret = regmap_update_bits(regmap, PLL_MODE(pll), PLL_OUTCTRL, PLL_OUTCTRL);
+ if (ret)
+ return ret;
+
+ /* Ensure that the write above goes through before returning. */
+ mb();
+
+ return ret;
+}
+
+static void alpha_pll_pongo_elu_disable(struct clk_hw *hw)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ struct regmap *regmap = pll->clkr.regmap;
+ int ret;
+
+ /* Disable the global PLL output */
+ ret = regmap_update_bits(regmap, PLL_MODE(pll), PLL_OUTCTRL, 0);
+ if (ret)
+ return;
+
+ /* Place the PLL mode in STANDBY */
+ regmap_write(regmap, PLL_OPMODE(pll), PLL_STANDBY);
+}
+
+static unsigned long alpha_pll_pongo_elu_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ struct regmap *regmap = pll->clkr.regmap;
+ u32 l;
+
+ if (regmap_read(regmap, PLL_L_VAL(pll), &l))
+ return 0;
+
+ l &= PONGO_PLL_L_VAL_MASK;
+
+ return alpha_pll_calc_rate(parent_rate, l, 0, pll_alpha_width(pll));
+}
+
+const struct clk_ops clk_alpha_pll_pongo_elu_ops = {
+ .prepare = alpha_pll_pongo_elu_prepare,
+ .enable = alpha_pll_pongo_elu_enable,
+ .disable = alpha_pll_pongo_elu_disable,
+ .recalc_rate = alpha_pll_pongo_elu_recalc_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_pongo_elu_ops);
+
+void clk_pongo_elu_pll_configure(struct clk_alpha_pll *pll,
+ struct regmap *regmap,
+ const struct alpha_pll_config *config)
+{
+ u32 val;
+
+ regmap_update_bits(regmap, PLL_USER_CTL(pll), PONGO_PLL_OUT_MASK,
+ PONGO_PLL_OUT_MASK);
+
+ if (trion_pll_is_enabled(pll, regmap))
+ return;
+
+ if (regmap_read(regmap, PLL_L_VAL(pll), &val))
+ return;
+ val &= PONGO_PLL_L_VAL_MASK;
+ if (val)
+ return;
+
+ clk_alpha_pll_write_config(regmap, PLL_L_VAL(pll), config->l);
+ clk_alpha_pll_write_config(regmap, PLL_ALPHA_VAL(pll), config->alpha);
+ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL(pll), config->config_ctl_val);
+ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL_U(pll), config->config_ctl_hi_val);
+ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL_U1(pll), config->config_ctl_hi1_val);
+ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL_U2(pll), config->config_ctl_hi2_val);
+ clk_alpha_pll_write_config(regmap, PLL_USER_CTL(pll),
+ config->user_ctl_val | PONGO_PLL_OUT_MASK);
+ clk_alpha_pll_write_config(regmap, PLL_USER_CTL_U(pll), config->user_ctl_hi_val);
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL(pll), config->test_ctl_val);
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL_U(pll), config->test_ctl_hi_val);
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL_U1(pll), config->test_ctl_hi1_val);
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL_U2(pll), config->test_ctl_hi2_val);
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL_U3(pll), config->test_ctl_hi3_val);
+
+ /* Disable PLL output */
+ regmap_update_bits(regmap, PLL_MODE(pll), PLL_OUTCTRL, 0);
+}
+EXPORT_SYMBOL_GPL(clk_pongo_elu_pll_configure);
+
void clk_rivian_evo_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config)
{
@@ -2515,27 +2741,31 @@ static unsigned long clk_rivian_evo_pll_recalc_rate(struct clk_hw *hw,
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
u32 l;
- regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l);
+ if (regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l))
+ return 0;
return parent_rate * l;
}
-static long clk_rivian_evo_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_rivian_evo_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
unsigned long min_freq, max_freq;
u32 l;
u64 a;
- rate = alpha_pll_round_rate(rate, *prate, &l, &a, 0);
- if (!pll->vco_table || alpha_pll_find_vco(pll, rate))
- return rate;
+ req->rate = alpha_pll_round_rate(req->rate, req->best_parent_rate, &l,
+ &a, 0);
+ if (!pll->vco_table || alpha_pll_find_vco(pll, req->rate))
+ return 0;
min_freq = pll->vco_table[0].min_freq;
max_freq = pll->vco_table[pll->num_vco - 1].max_freq;
- return clamp(rate, min_freq, max_freq);
+ req->rate = clamp(req->rate, min_freq, max_freq);
+
+ return 0;
}
const struct clk_ops clk_alpha_pll_rivian_evo_ops = {
@@ -2543,7 +2773,7 @@ const struct clk_ops clk_alpha_pll_rivian_evo_ops = {
.disable = alpha_pll_lucid_5lpe_disable,
.is_enabled = clk_trion_pll_is_enabled,
.recalc_rate = clk_rivian_evo_pll_recalc_rate,
- .round_rate = clk_rivian_evo_pll_round_rate,
+ .determine_rate = clk_rivian_evo_pll_determine_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_rivian_evo_ops);
@@ -2752,7 +2982,212 @@ const struct clk_ops clk_alpha_pll_regera_ops = {
.disable = clk_zonda_pll_disable,
.is_enabled = clk_alpha_pll_is_enabled,
.recalc_rate = clk_trion_pll_recalc_rate,
- .round_rate = clk_alpha_pll_round_rate,
+ .determine_rate = clk_alpha_pll_determine_rate,
.set_rate = clk_zonda_pll_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_regera_ops);
+
+void qcom_clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap)
+{
+ const struct clk_init_data *init = pll->clkr.hw.init;
+
+ switch (GET_PLL_TYPE(pll)) {
+ case CLK_ALPHA_PLL_TYPE_LUCID_OLE:
+ clk_lucid_ole_pll_configure(pll, regmap, pll->config);
+ break;
+ case CLK_ALPHA_PLL_TYPE_LUCID_EVO:
+ clk_lucid_evo_pll_configure(pll, regmap, pll->config);
+ break;
+ case CLK_ALPHA_PLL_TYPE_TAYCAN_ELU:
+ clk_taycan_elu_pll_configure(pll, regmap, pll->config);
+ break;
+ case CLK_ALPHA_PLL_TYPE_RIVIAN_EVO:
+ clk_rivian_evo_pll_configure(pll, regmap, pll->config);
+ break;
+ case CLK_ALPHA_PLL_TYPE_TRION:
+ clk_trion_pll_configure(pll, regmap, pll->config);
+ break;
+ case CLK_ALPHA_PLL_TYPE_HUAYRA_2290:
+ clk_huayra_2290_pll_configure(pll, regmap, pll->config);
+ break;
+ case CLK_ALPHA_PLL_TYPE_FABIA:
+ clk_fabia_pll_configure(pll, regmap, pll->config);
+ break;
+ case CLK_ALPHA_PLL_TYPE_AGERA:
+ clk_agera_pll_configure(pll, regmap, pll->config);
+ break;
+ case CLK_ALPHA_PLL_TYPE_PONGO_ELU:
+ clk_pongo_elu_pll_configure(pll, regmap, pll->config);
+ break;
+ case CLK_ALPHA_PLL_TYPE_ZONDA:
+ case CLK_ALPHA_PLL_TYPE_ZONDA_OLE:
+ clk_zonda_pll_configure(pll, regmap, pll->config);
+ break;
+ case CLK_ALPHA_PLL_TYPE_STROMER:
+ case CLK_ALPHA_PLL_TYPE_STROMER_PLUS:
+ clk_stromer_pll_configure(pll, regmap, pll->config);
+ break;
+ case CLK_ALPHA_PLL_TYPE_DEFAULT:
+ case CLK_ALPHA_PLL_TYPE_DEFAULT_EVO:
+ case CLK_ALPHA_PLL_TYPE_HUAYRA:
+ case CLK_ALPHA_PLL_TYPE_HUAYRA_APSS:
+ case CLK_ALPHA_PLL_TYPE_BRAMMO:
+ case CLK_ALPHA_PLL_TYPE_BRAMMO_EVO:
+ clk_alpha_pll_configure(pll, regmap, pll->config);
+ break;
+ default:
+ WARN(1, "%s: invalid pll type\n", init->name);
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(qcom_clk_alpha_pll_configure);
+
+static int clk_alpha_pll_slew_update(struct clk_alpha_pll *pll)
+{
+ u32 val;
+ int ret;
+
+ regmap_set_bits(pll->clkr.regmap, PLL_MODE(pll), PLL_UPDATE);
+ regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
+
+ ret = wait_for_pll_update(pll);
+ if (ret)
+ return ret;
+ /*
+ * Hardware programming mandates a wait of at least 570ns before polling the LOCK
+ * detect bit. Have a delay of 1us just to be safe.
+ */
+ udelay(1);
+
+ return wait_for_pll_enable_lock(pll);
+}
+
+static int clk_alpha_pll_slew_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ const struct pll_vco *curr_vco, *vco;
+ unsigned long freq_hz;
+ u64 a;
+ u32 l;
+
+ freq_hz = alpha_pll_round_rate(rate, parent_rate, &l, &a, ALPHA_REG_BITWIDTH);
+ if (freq_hz != rate) {
+ pr_err("alpha_pll: Call clk_set_rate with rounded rates!\n");
+ return -EINVAL;
+ }
+
+ curr_vco = alpha_pll_find_vco(pll, clk_hw_get_rate(hw));
+ if (!curr_vco) {
+ pr_err("alpha pll: not in a valid vco range\n");
+ return -EINVAL;
+ }
+
+ vco = alpha_pll_find_vco(pll, freq_hz);
+ if (!vco) {
+ pr_err("alpha pll: not in a valid vco range\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Dynamic pll update will not support switching frequencies across
+ * vco ranges. In those cases fall back to normal alpha set rate.
+ */
+ if (curr_vco->val != vco->val)
+ return clk_alpha_pll_set_rate(hw, rate, parent_rate);
+
+ clk_alpha_pll_update_configs(pll, NULL, l, a, ALPHA_REG_BITWIDTH, false);
+
+ /* Ensure that the write above goes before slewing the PLL */
+ mb();
+
+ if (clk_hw_is_enabled(hw))
+ return clk_alpha_pll_slew_update(pll);
+
+ return 0;
+}
+
+/*
+ * Slewing plls should be bought up at frequency which is in the middle of the
+ * desired VCO range. So after bringing up the pll at calibration freq, set it
+ * back to desired frequency(that was set by previous clk_set_rate).
+ */
+static int clk_alpha_pll_calibrate(struct clk_hw *hw)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ struct clk_hw *parent;
+ const struct pll_vco *vco;
+ unsigned long calibration_freq, freq_hz;
+ u64 a;
+ u32 l;
+ int rc;
+
+ parent = clk_hw_get_parent(hw);
+ if (!parent) {
+ pr_err("alpha pll: no valid parent found\n");
+ return -EINVAL;
+ }
+
+ vco = alpha_pll_find_vco(pll, clk_hw_get_rate(hw));
+ if (!vco) {
+ pr_err("alpha pll: not in a valid vco range\n");
+ return -EINVAL;
+ }
+
+ /*
+ * As during slewing plls vco_sel won't be allowed to change, vco table
+ * should have only one entry table, i.e. index = 0, find the
+ * calibration frequency.
+ */
+ calibration_freq = (pll->vco_table[0].min_freq + pll->vco_table[0].max_freq) / 2;
+
+ freq_hz = alpha_pll_round_rate(calibration_freq, clk_hw_get_rate(parent),
+ &l, &a, ALPHA_REG_BITWIDTH);
+ if (freq_hz != calibration_freq) {
+ pr_err("alpha_pll: call clk_set_rate with rounded rates!\n");
+ return -EINVAL;
+ }
+
+ clk_alpha_pll_update_configs(pll, vco, l, a, ALPHA_REG_BITWIDTH, false);
+
+ /* Bringup the pll at calibration frequency */
+ rc = clk_alpha_pll_enable(hw);
+ if (rc) {
+ pr_err("alpha pll calibration failed\n");
+ return rc;
+ }
+
+ /*
+ * PLL is already running at calibration frequency.
+ * So slew pll to the previously set frequency.
+ */
+ freq_hz = alpha_pll_round_rate(clk_hw_get_rate(hw),
+ clk_hw_get_rate(parent), &l, &a, ALPHA_REG_BITWIDTH);
+
+ pr_debug("pll %s: setting back to required rate %lu, freq_hz %ld\n",
+ clk_hw_get_name(hw), clk_hw_get_rate(hw), freq_hz);
+
+ clk_alpha_pll_update_configs(pll, NULL, l, a, ALPHA_REG_BITWIDTH, true);
+
+ return clk_alpha_pll_slew_update(pll);
+}
+
+static int clk_alpha_pll_slew_enable(struct clk_hw *hw)
+{
+ int rc;
+
+ rc = clk_alpha_pll_calibrate(hw);
+ if (rc)
+ return rc;
+
+ return clk_alpha_pll_enable(hw);
+}
+
+const struct clk_ops clk_alpha_pll_slew_ops = {
+ .enable = clk_alpha_pll_slew_enable,
+ .disable = clk_alpha_pll_disable,
+ .recalc_rate = clk_alpha_pll_recalc_rate,
+ .determine_rate = clk_alpha_pll_determine_rate,
+ .set_rate = clk_alpha_pll_slew_set_rate,
+};
+EXPORT_SYMBOL(clk_alpha_pll_slew_ops);
diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h
index 55eca04b23a1..0903a05b18cc 100644
--- a/drivers/clk/qcom/clk-alpha-pll.h
+++ b/drivers/clk/qcom/clk-alpha-pll.h
@@ -27,11 +27,15 @@ enum {
CLK_ALPHA_PLL_TYPE_ZONDA_OLE,
CLK_ALPHA_PLL_TYPE_LUCID_EVO,
CLK_ALPHA_PLL_TYPE_LUCID_OLE,
+ CLK_ALPHA_PLL_TYPE_PONGO_ELU,
+ CLK_ALPHA_PLL_TYPE_TAYCAN_ELU,
+ CLK_ALPHA_PLL_TYPE_TAYCAN_EKO_T = CLK_ALPHA_PLL_TYPE_TAYCAN_ELU,
CLK_ALPHA_PLL_TYPE_RIVIAN_EVO,
CLK_ALPHA_PLL_TYPE_DEFAULT_EVO,
CLK_ALPHA_PLL_TYPE_BRAMMO_EVO,
CLK_ALPHA_PLL_TYPE_STROMER,
CLK_ALPHA_PLL_TYPE_STROMER_PLUS,
+ CLK_ALPHA_PLL_TYPE_NSS_HUAYRA,
CLK_ALPHA_PLL_TYPE_MAX,
};
@@ -51,6 +55,7 @@ enum {
PLL_OFF_TEST_CTL_U,
PLL_OFF_TEST_CTL_U1,
PLL_OFF_TEST_CTL_U2,
+ PLL_OFF_TEST_CTL_U3,
PLL_OFF_STATE,
PLL_OFF_STATUS,
PLL_OFF_OPMODE,
@@ -77,6 +82,7 @@ struct pll_vco {
* struct clk_alpha_pll - phase locked loop (PLL)
* @offset: base address of registers
* @regs: alpha pll register map (see @clk_alpha_pll_regs)
+ * @config: array of pll settings
* @vco_table: array of VCO settings
* @num_vco: number of VCO settings in @vco_table
* @flags: bitmask to indicate features supported by the hardware
@@ -86,6 +92,7 @@ struct clk_alpha_pll {
u32 offset;
const u8 *regs;
+ const struct alpha_pll_config *config;
const struct pll_vco *vco_table;
size_t num_vco;
#define SUPPORTS_OFFLINE_REQ BIT(0)
@@ -136,6 +143,7 @@ struct alpha_pll_config {
u32 test_ctl_hi_mask;
u32 test_ctl_hi1_val;
u32 test_ctl_hi2_val;
+ u32 test_ctl_hi3_val;
u32 main_output_mask;
u32 aux_output_mask;
u32 aux2_output_mask;
@@ -184,17 +192,25 @@ extern const struct clk_ops clk_alpha_pll_zonda_ops;
#define clk_alpha_pll_zonda_ole_ops clk_alpha_pll_zonda_ops
extern const struct clk_ops clk_alpha_pll_lucid_evo_ops;
+#define clk_alpha_pll_taycan_elu_ops clk_alpha_pll_lucid_evo_ops
+#define clk_alpha_pll_taycan_eko_t_ops clk_alpha_pll_lucid_evo_ops
extern const struct clk_ops clk_alpha_pll_reset_lucid_evo_ops;
#define clk_alpha_pll_reset_lucid_ole_ops clk_alpha_pll_reset_lucid_evo_ops
extern const struct clk_ops clk_alpha_pll_fixed_lucid_evo_ops;
#define clk_alpha_pll_fixed_lucid_ole_ops clk_alpha_pll_fixed_lucid_evo_ops
+#define clk_alpha_pll_fixed_taycan_elu_ops clk_alpha_pll_fixed_lucid_evo_ops
+#define clk_alpha_pll_fixed_taycan_eko_t_ops clk_alpha_pll_fixed_lucid_evo_ops
extern const struct clk_ops clk_alpha_pll_postdiv_lucid_evo_ops;
#define clk_alpha_pll_postdiv_lucid_ole_ops clk_alpha_pll_postdiv_lucid_evo_ops
+#define clk_alpha_pll_postdiv_taycan_elu_ops clk_alpha_pll_postdiv_lucid_evo_ops
+#define clk_alpha_pll_postdiv_taycan_eko_t_ops clk_alpha_pll_postdiv_lucid_evo_ops
+extern const struct clk_ops clk_alpha_pll_pongo_elu_ops;
extern const struct clk_ops clk_alpha_pll_rivian_evo_ops;
#define clk_alpha_pll_postdiv_rivian_evo_ops clk_alpha_pll_postdiv_fabia_ops
extern const struct clk_ops clk_alpha_pll_regera_ops;
+extern const struct clk_ops clk_alpha_pll_slew_ops;
void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config);
@@ -217,11 +233,19 @@ void clk_lucid_evo_pll_configure(struct clk_alpha_pll *pll, struct regmap *regma
const struct alpha_pll_config *config);
void clk_lucid_ole_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config);
+void clk_pongo_elu_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+ const struct alpha_pll_config *config);
+#define clk_taycan_elu_pll_configure(pll, regmap, config) \
+ clk_lucid_evo_pll_configure(pll, regmap, config)
+#define clk_taycan_eko_t_pll_configure(pll, regmap, config) \
+ clk_lucid_evo_pll_configure(pll, regmap, config)
+
void clk_rivian_evo_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config);
void clk_stromer_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config);
void clk_regera_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config);
+void qcom_clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap);
#endif
diff --git a/drivers/clk/qcom/clk-branch.c b/drivers/clk/qcom/clk-branch.c
index 229480c5b075..0f10090d4ae6 100644
--- a/drivers/clk/qcom/clk-branch.c
+++ b/drivers/clk/qcom/clk-branch.c
@@ -28,7 +28,7 @@ static bool clk_branch_in_hwcg_mode(const struct clk_branch *br)
static bool clk_branch_check_halt(const struct clk_branch *br, bool enabling)
{
- bool invert = (br->halt_check == BRANCH_HALT_ENABLE);
+ bool invert = (br->halt_check & BRANCH_HALT_ENABLE);
u32 val;
regmap_read(br->clkr.regmap, br->halt_reg, &val);
@@ -44,7 +44,7 @@ static bool clk_branch2_check_halt(const struct clk_branch *br, bool enabling)
{
u32 val;
u32 mask;
- bool invert = (br->halt_check == BRANCH_HALT_ENABLE);
+ bool invert = (br->halt_check & BRANCH_HALT_ENABLE);
mask = CBCR_NOC_FSM_STATUS;
mask |= CBCR_CLK_OFF;
diff --git a/drivers/clk/qcom/clk-cbf-8996.c b/drivers/clk/qcom/clk-cbf-8996.c
index ce4efcd995ea..0b40ed601f9a 100644
--- a/drivers/clk/qcom/clk-cbf-8996.c
+++ b/drivers/clk/qcom/clk-cbf-8996.c
@@ -212,7 +212,6 @@ static const struct regmap_config cbf_msm8996_regmap_config = {
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x10000,
- .fast_io = true,
.val_format_endian = REGMAP_ENDIAN_LITTLE,
};
diff --git a/drivers/clk/qcom/clk-cpu-8996.c b/drivers/clk/qcom/clk-cpu-8996.c
index 72689448a653..21d13c0841ed 100644
--- a/drivers/clk/qcom/clk-cpu-8996.c
+++ b/drivers/clk/qcom/clk-cpu-8996.c
@@ -411,7 +411,6 @@ static const struct regmap_config cpu_msm8996_regmap_config = {
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x80210,
- .fast_io = true,
.val_format_endian = REGMAP_ENDIAN_LITTLE,
};
diff --git a/drivers/clk/qcom/clk-rcg.c b/drivers/clk/qcom/clk-rcg.c
index 88845baa7f84..31f0650b48ba 100644
--- a/drivers/clk/qcom/clk-rcg.c
+++ b/drivers/clk/qcom/clk-rcg.c
@@ -423,7 +423,7 @@ static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f,
rate = tmp;
}
} else {
- rate = clk_hw_get_rate(p);
+ rate = clk_hw_get_rate(p);
}
req->best_parent_hw = p;
req->best_parent_rate = rate;
@@ -597,6 +597,7 @@ struct frac_entry {
};
static const struct frac_entry pixel_table[] = {
+ { 1, 1 },
{ 1, 2 },
{ 1, 3 },
{ 3, 16 },
diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h
index 8e0f3372dc7a..4fbdf4880d03 100644
--- a/drivers/clk/qcom/clk-rcg.h
+++ b/drivers/clk/qcom/clk-rcg.h
@@ -189,6 +189,7 @@ struct clk_rcg2_gfx3d {
container_of(to_clk_rcg2(_hw), struct clk_rcg2_gfx3d, rcg)
extern const struct clk_ops clk_rcg2_ops;
+extern const struct clk_ops clk_rcg2_gp_ops;
extern const struct clk_ops clk_rcg2_floor_ops;
extern const struct clk_ops clk_rcg2_fm_ops;
extern const struct clk_ops clk_rcg2_mux_closest_ops;
@@ -198,6 +199,7 @@ extern const struct clk_ops clk_byte2_ops;
extern const struct clk_ops clk_pixel_ops;
extern const struct clk_ops clk_gfx3d_ops;
extern const struct clk_ops clk_rcg2_shared_ops;
+extern const struct clk_ops clk_rcg2_shared_floor_ops;
extern const struct clk_ops clk_rcg2_shared_no_init_park_ops;
extern const struct clk_ops clk_dp_ops;
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index bf26c5448f00..e18cb8807d73 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -8,11 +8,13 @@
#include <linux/err.h>
#include <linux/bug.h>
#include <linux/export.h>
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/rational.h>
#include <linux/regmap.h>
#include <linux/math64.h>
+#include <linux/gcd.h>
#include <linux/minmax.h>
#include <linux/slab.h>
@@ -32,6 +34,7 @@
#define CFG_REG 0x4
#define CFG_SRC_DIV_SHIFT 0
+#define CFG_SRC_DIV_LENGTH 8
#define CFG_SRC_SEL_SHIFT 8
#define CFG_SRC_SEL_MASK (0x7 << CFG_SRC_SEL_SHIFT)
#define CFG_MODE_SHIFT 12
@@ -148,12 +151,32 @@ static int clk_rcg2_set_parent(struct clk_hw *hw, u8 index)
return update_config(rcg);
}
-/*
- * Calculate m/n:d rate
+/**
+ * convert_to_reg_val() - Convert divisor values to hardware values.
+ *
+ * @f: Frequency table with pure m/n/pre_div parameters.
+ */
+static void convert_to_reg_val(struct freq_tbl *f)
+{
+ f->pre_div *= 2;
+ f->pre_div -= 1;
+}
+
+/**
+ * calc_rate() - Calculate rate based on m/n:d values
+ *
+ * @rate: Parent rate.
+ * @m: Multiplier.
+ * @n: Divisor.
+ * @mode: Use zero to ignore m/n calculation.
+ * @hid_div: Pre divisor register value. Pre divisor value
+ * relates to hid_div as pre_div = (hid_div + 1) / 2.
+ *
+ * Return calculated rate according to formula:
*
* parent_rate m
* rate = ----------- x ---
- * hid_div n
+ * pre_div n
*/
static unsigned long
calc_rate(unsigned long rate, u32 m, u32 n, u32 mode, u32 hid_div)
@@ -178,7 +201,7 @@ __clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate, u32 cfg)
regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m);
m &= mask;
regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), &n);
- n = ~n;
+ n = ~n;
n &= mask;
n += m;
mode = cfg & CFG_MODE_MASK;
@@ -251,7 +274,7 @@ static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f,
rate = tmp;
}
} else {
- rate = clk_hw_get_rate(p);
+ rate = clk_hw_get_rate(p);
}
req->best_parent_hw = p;
req->best_parent_rate = rate;
@@ -288,7 +311,7 @@ __clk_rcg2_select_conf(struct clk_hw *hw, const struct freq_multi_tbl *f,
if (!p)
continue;
- parent_rate = clk_hw_get_rate(p);
+ parent_rate = clk_hw_get_rate(p);
rate = calc_rate(parent_rate, conf->n, conf->m, conf->n, conf->pre_div);
if (rate == req_rate) {
@@ -359,7 +382,7 @@ static int _freq_tbl_fm_determine_rate(struct clk_hw *hw, const struct freq_mult
rate = tmp;
}
} else {
- rate = clk_hw_get_rate(p);
+ rate = clk_hw_get_rate(p);
}
req->best_parent_hw = p;
@@ -393,16 +416,110 @@ static int clk_rcg2_fm_determine_rate(struct clk_hw *hw,
return _freq_tbl_fm_determine_rate(hw, rcg->freq_multi_tbl, req);
}
-static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f,
- u32 *_cfg)
+/**
+ * clk_rcg2_split_div() - Split multiplier that doesn't fit in n neither in pre_div.
+ *
+ * @multiplier: Multiplier to split between n and pre_div.
+ * @pre_div: Pointer to pre divisor value.
+ * @n: Pointer to n divisor value.
+ * @pre_div_max: Pre divisor maximum value.
+ */
+static inline void clk_rcg2_split_div(int multiplier, unsigned int *pre_div,
+ u16 *n, unsigned int pre_div_max)
+{
+ *n = mult_frac(multiplier * *n, *pre_div, pre_div_max);
+ *pre_div = pre_div_max;
+}
+
+static void clk_rcg2_calc_mnd(u64 parent_rate, u64 rate, struct freq_tbl *f,
+ unsigned int mnd_max, unsigned int pre_div_max)
+{
+ int i = 2;
+ unsigned int pre_div = 1;
+ unsigned long rates_gcd, scaled_parent_rate;
+ u16 m, n = 1, n_candidate = 1, n_max;
+
+ rates_gcd = gcd(parent_rate, rate);
+ m = div64_u64(rate, rates_gcd);
+ scaled_parent_rate = div64_u64(parent_rate, rates_gcd);
+ while (scaled_parent_rate > (mnd_max + m) * pre_div_max) {
+ // we're exceeding divisor's range, trying lower scale.
+ if (m > 1) {
+ m--;
+ scaled_parent_rate = mult_frac(scaled_parent_rate, m, (m + 1));
+ } else {
+ // cannot lower scale, just set max divisor values.
+ f->n = mnd_max + m;
+ f->pre_div = pre_div_max;
+ f->m = m;
+ return;
+ }
+ }
+
+ n_max = m + mnd_max;
+
+ while (scaled_parent_rate > 1) {
+ while (scaled_parent_rate % i == 0) {
+ n_candidate *= i;
+ if (n_candidate < n_max)
+ n = n_candidate;
+ else if (pre_div * i < pre_div_max)
+ pre_div *= i;
+ else
+ clk_rcg2_split_div(i, &pre_div, &n, pre_div_max);
+
+ scaled_parent_rate /= i;
+ }
+ i++;
+ }
+
+ f->m = m;
+ f->n = n;
+ f->pre_div = pre_div > 1 ? pre_div : 0;
+}
+
+static int clk_rcg2_determine_gp_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ struct freq_tbl f_tbl = {}, *f = &f_tbl;
+ int mnd_max = BIT(rcg->mnd_width) - 1;
+ int hid_max = BIT(rcg->hid_width) - 1;
+ struct clk_hw *parent;
+ u64 parent_rate;
+
+ parent = clk_hw_get_parent(hw);
+ parent_rate = clk_get_rate(parent->clk);
+ if (!parent_rate)
+ return -EINVAL;
+
+ clk_rcg2_calc_mnd(parent_rate, req->rate, f, mnd_max, hid_max / 2);
+ convert_to_reg_val(f);
+ req->rate = calc_rate(parent_rate, f->m, f->n, f->n, f->pre_div);
+
+ return 0;
+}
+
+static int __clk_rcg2_configure_parent(struct clk_rcg2 *rcg, u8 src, u32 *_cfg)
{
- u32 cfg, mask, d_val, not2d_val, n_minus_m;
struct clk_hw *hw = &rcg->clkr.hw;
- int ret, index = qcom_find_src_index(hw, rcg->parent_map, f->src);
+ int index = qcom_find_src_index(hw, rcg->parent_map, src);
if (index < 0)
return index;
+ *_cfg &= ~CFG_SRC_SEL_MASK;
+ *_cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
+
+ return 0;
+}
+
+static int __clk_rcg2_configure_mnd(struct clk_rcg2 *rcg, const struct freq_tbl *f,
+ u32 *_cfg)
+{
+ u32 cfg, mask, d_val, not2d_val, n_minus_m;
+ int ret;
+
if (rcg->mnd_width && f->n) {
mask = BIT(rcg->mnd_width) - 1;
ret = regmap_update_bits(rcg->clkr.regmap,
@@ -431,9 +548,8 @@ static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f,
}
mask = BIT(rcg->hid_width) - 1;
- mask |= CFG_SRC_SEL_MASK | CFG_MODE_MASK | CFG_HW_CLK_CTRL_MASK;
+ mask |= CFG_MODE_MASK | CFG_HW_CLK_CTRL_MASK;
cfg = f->pre_div << CFG_SRC_DIV_SHIFT;
- cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
if (rcg->mnd_width && f->n && (f->m != f->n))
cfg |= CFG_MODE_DUAL_EDGE;
if (rcg->hw_clk_ctrl)
@@ -445,6 +561,22 @@ static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f,
return 0;
}
+static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f,
+ u32 *_cfg)
+{
+ int ret;
+
+ ret = __clk_rcg2_configure_parent(rcg, f->src, _cfg);
+ if (ret)
+ return ret;
+
+ ret = __clk_rcg2_configure_mnd(rcg, f, _cfg);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
{
u32 cfg;
@@ -465,6 +597,26 @@ static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
return update_config(rcg);
}
+static int clk_rcg2_configure_gp(struct clk_rcg2 *rcg, const struct freq_tbl *f)
+{
+ u32 cfg;
+ int ret;
+
+ ret = regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
+ if (ret)
+ return ret;
+
+ ret = __clk_rcg2_configure_mnd(rcg, f, &cfg);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), cfg);
+ if (ret)
+ return ret;
+
+ return update_config(rcg);
+}
+
static int __clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
enum freq_policy policy)
{
@@ -518,6 +670,22 @@ static int clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
return __clk_rcg2_set_rate(hw, rate, CEIL);
}
+static int clk_rcg2_set_gp_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ int mnd_max = BIT(rcg->mnd_width) - 1;
+ int hid_max = BIT(rcg->hid_width) - 1;
+ struct freq_tbl f_tbl = {}, *f = &f_tbl;
+ int ret;
+
+ clk_rcg2_calc_mnd(parent_rate, rate, f, mnd_max, hid_max / 2);
+ convert_to_reg_val(f);
+ ret = clk_rcg2_configure_gp(rcg, f);
+
+ return ret;
+}
+
static int clk_rcg2_set_floor_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
@@ -645,6 +813,18 @@ const struct clk_ops clk_rcg2_ops = {
};
EXPORT_SYMBOL_GPL(clk_rcg2_ops);
+const struct clk_ops clk_rcg2_gp_ops = {
+ .is_enabled = clk_rcg2_is_enabled,
+ .get_parent = clk_rcg2_get_parent,
+ .set_parent = clk_rcg2_set_parent,
+ .recalc_rate = clk_rcg2_recalc_rate,
+ .determine_rate = clk_rcg2_determine_gp_rate,
+ .set_rate = clk_rcg2_set_gp_rate,
+ .get_duty_cycle = clk_rcg2_get_duty_cycle,
+ .set_duty_cycle = clk_rcg2_set_duty_cycle,
+};
+EXPORT_SYMBOL_GPL(clk_rcg2_gp_ops);
+
const struct clk_ops clk_rcg2_floor_ops = {
.is_enabled = clk_rcg2_is_enabled,
.get_parent = clk_rcg2_get_parent,
@@ -1186,15 +1366,23 @@ clk_rcg2_shared_force_enable_clear(struct clk_hw *hw, const struct freq_tbl *f)
return clk_rcg2_clear_force_enable(hw);
}
-static int clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
+static int __clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate,
+ enum freq_policy policy)
{
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
const struct freq_tbl *f;
- f = qcom_find_freq(rcg->freq_tbl, rate);
- if (!f)
+ switch (policy) {
+ case FLOOR:
+ f = qcom_find_freq_floor(rcg->freq_tbl, rate);
+ break;
+ case CEIL:
+ f = qcom_find_freq(rcg->freq_tbl, rate);
+ break;
+ default:
return -EINVAL;
+ }
/*
* In case clock is disabled, update the M, N and D registers, cache
@@ -1207,10 +1395,28 @@ static int clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate,
return clk_rcg2_shared_force_enable_clear(hw, f);
}
+static int clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ return __clk_rcg2_shared_set_rate(hw, rate, parent_rate, CEIL);
+}
+
static int clk_rcg2_shared_set_rate_and_parent(struct clk_hw *hw,
unsigned long rate, unsigned long parent_rate, u8 index)
{
- return clk_rcg2_shared_set_rate(hw, rate, parent_rate);
+ return __clk_rcg2_shared_set_rate(hw, rate, parent_rate, CEIL);
+}
+
+static int clk_rcg2_shared_set_floor_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ return __clk_rcg2_shared_set_rate(hw, rate, parent_rate, FLOOR);
+}
+
+static int clk_rcg2_shared_set_floor_rate_and_parent(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate, u8 index)
+{
+ return __clk_rcg2_shared_set_rate(hw, rate, parent_rate, FLOOR);
}
static int clk_rcg2_shared_enable(struct clk_hw *hw)
@@ -1348,6 +1554,18 @@ const struct clk_ops clk_rcg2_shared_ops = {
};
EXPORT_SYMBOL_GPL(clk_rcg2_shared_ops);
+const struct clk_ops clk_rcg2_shared_floor_ops = {
+ .enable = clk_rcg2_shared_enable,
+ .disable = clk_rcg2_shared_disable,
+ .get_parent = clk_rcg2_shared_get_parent,
+ .set_parent = clk_rcg2_shared_set_parent,
+ .recalc_rate = clk_rcg2_shared_recalc_rate,
+ .determine_rate = clk_rcg2_determine_floor_rate,
+ .set_rate = clk_rcg2_shared_set_floor_rate,
+ .set_rate_and_parent = clk_rcg2_shared_set_floor_rate_and_parent,
+};
+EXPORT_SYMBOL_GPL(clk_rcg2_shared_floor_ops);
+
static int clk_rcg2_shared_no_init_park(struct clk_hw *hw)
{
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
diff --git a/drivers/clk/qcom/clk-regmap-divider.c b/drivers/clk/qcom/clk-regmap-divider.c
index 63c9fca0d65d..4f5395f0ab6d 100644
--- a/drivers/clk/qcom/clk-regmap-divider.c
+++ b/drivers/clk/qcom/clk-regmap-divider.c
@@ -15,8 +15,8 @@ static inline struct clk_regmap_div *to_clk_regmap_div(struct clk_hw *hw)
return container_of(to_clk_regmap(hw), struct clk_regmap_div, clkr);
}
-static long div_round_ro_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int div_ro_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_regmap_div *divider = to_clk_regmap_div(hw);
struct clk_regmap *clkr = &divider->clkr;
@@ -26,17 +26,24 @@ static long div_round_ro_rate(struct clk_hw *hw, unsigned long rate,
val >>= divider->shift;
val &= BIT(divider->width) - 1;
- return divider_ro_round_rate(hw, rate, prate, NULL, divider->width,
- CLK_DIVIDER_ROUND_CLOSEST, val);
+ req->rate = divider_ro_round_rate(hw, req->rate,
+ &req->best_parent_rate, NULL,
+ divider->width,
+ CLK_DIVIDER_ROUND_CLOSEST, val);
+
+ return 0;
}
-static long div_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int div_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
{
struct clk_regmap_div *divider = to_clk_regmap_div(hw);
- return divider_round_rate(hw, rate, prate, NULL, divider->width,
- CLK_DIVIDER_ROUND_CLOSEST);
+ req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate,
+ NULL,
+ divider->width,
+ CLK_DIVIDER_ROUND_CLOSEST);
+
+ return 0;
}
static int div_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -70,14 +77,14 @@ static unsigned long div_recalc_rate(struct clk_hw *hw,
}
const struct clk_ops clk_regmap_div_ops = {
- .round_rate = div_round_rate,
+ .determine_rate = div_determine_rate,
.set_rate = div_set_rate,
.recalc_rate = div_recalc_rate,
};
EXPORT_SYMBOL_GPL(clk_regmap_div_ops);
const struct clk_ops clk_regmap_div_ro_ops = {
- .round_rate = div_round_ro_rate,
+ .determine_rate = div_ro_determine_rate,
.recalc_rate = div_recalc_rate,
};
EXPORT_SYMBOL_GPL(clk_regmap_div_ro_ops);
diff --git a/drivers/clk/qcom/clk-rpm.c b/drivers/clk/qcom/clk-rpm.c
index 9da034f8f2ff..be0145631197 100644
--- a/drivers/clk/qcom/clk-rpm.c
+++ b/drivers/clk/qcom/clk-rpm.c
@@ -4,6 +4,7 @@
* Copyright (c) 2014, The Linux Foundation. All rights reserved.
*/
+#include <linux/cleanup.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/export.h>
@@ -224,10 +225,10 @@ static void clk_rpm_unprepare(struct clk_hw *hw)
unsigned long active_rate, sleep_rate;
int ret;
- mutex_lock(&rpm_clk_lock);
+ guard(mutex)(&rpm_clk_lock);
if (!r->rate)
- goto out;
+ return;
/* Take peer clock's rate into account only if it's enabled. */
if (peer->enabled)
@@ -237,17 +238,14 @@ static void clk_rpm_unprepare(struct clk_hw *hw)
active_rate = r->branch ? !!peer_rate : peer_rate;
ret = clk_rpm_set_rate_active(r, active_rate);
if (ret)
- goto out;
+ return;
sleep_rate = r->branch ? !!peer_sleep_rate : peer_sleep_rate;
ret = clk_rpm_set_rate_sleep(r, sleep_rate);
if (ret)
- goto out;
+ return;
r->enabled = false;
-
-out:
- mutex_unlock(&rpm_clk_lock);
}
static int clk_rpm_xo_prepare(struct clk_hw *hw)
@@ -324,12 +322,12 @@ static int clk_rpm_set_rate(struct clk_hw *hw,
unsigned long active_rate, sleep_rate;
unsigned long this_rate = 0, this_sleep_rate = 0;
unsigned long peer_rate = 0, peer_sleep_rate = 0;
- int ret = 0;
+ int ret;
- mutex_lock(&rpm_clk_lock);
+ guard(mutex)(&rpm_clk_lock);
if (!r->enabled)
- goto out;
+ return 0;
to_active_sleep(r, rate, &this_rate, &this_sleep_rate);
@@ -341,30 +339,27 @@ static int clk_rpm_set_rate(struct clk_hw *hw,
active_rate = max(this_rate, peer_rate);
ret = clk_rpm_set_rate_active(r, active_rate);
if (ret)
- goto out;
+ return ret;
sleep_rate = max(this_sleep_rate, peer_sleep_rate);
ret = clk_rpm_set_rate_sleep(r, sleep_rate);
if (ret)
- goto out;
+ return ret;
r->rate = rate;
-out:
- mutex_unlock(&rpm_clk_lock);
-
- return ret;
+ return 0;
}
-static long clk_rpm_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_rpm_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
/*
* RPM handles rate rounding and we don't have a way to
* know what the rate will be, so just return whatever
* rate is requested.
*/
- return rate;
+ return 0;
}
static unsigned long clk_rpm_recalc_rate(struct clk_hw *hw,
@@ -388,7 +383,7 @@ static const struct clk_ops clk_rpm_xo_ops = {
static const struct clk_ops clk_rpm_fixed_ops = {
.prepare = clk_rpm_fixed_prepare,
.unprepare = clk_rpm_fixed_unprepare,
- .round_rate = clk_rpm_round_rate,
+ .determine_rate = clk_rpm_determine_rate,
.recalc_rate = clk_rpm_recalc_rate,
};
@@ -396,7 +391,7 @@ static const struct clk_ops clk_rpm_ops = {
.prepare = clk_rpm_prepare,
.unprepare = clk_rpm_unprepare,
.set_rate = clk_rpm_set_rate,
- .round_rate = clk_rpm_round_rate,
+ .determine_rate = clk_rpm_determine_rate,
.recalc_rate = clk_rpm_recalc_rate,
};
diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c
index 4acde937114a..63c38cb47bc4 100644
--- a/drivers/clk/qcom/clk-rpmh.c
+++ b/drivers/clk/qcom/clk-rpmh.c
@@ -9,6 +9,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/string_choices.h>
#include <soc/qcom/cmd-db.h>
#include <soc/qcom/rpmh.h>
#include <soc/qcom/tcs.h>
@@ -65,6 +66,8 @@ struct clk_rpmh {
struct clk_rpmh_desc {
struct clk_hw **clks;
size_t num_clks;
+ /* RPMh clock clkaN are optional for this platform */
+ bool clka_optional;
};
static DEFINE_MUTEX(rpmh_clk_lock);
@@ -84,7 +87,7 @@ static DEFINE_MUTEX(rpmh_clk_lock);
.hw.init = &(struct clk_init_data){ \
.ops = &clk_rpmh_ops, \
.name = #_name, \
- .parent_data = &(const struct clk_parent_data){ \
+ .parent_data = &(const struct clk_parent_data){ \
.fw_name = "xo", \
.name = "xo_board", \
}, \
@@ -102,7 +105,7 @@ static DEFINE_MUTEX(rpmh_clk_lock);
.hw.init = &(struct clk_init_data){ \
.ops = &clk_rpmh_ops, \
.name = #_name "_ao", \
- .parent_data = &(const struct clk_parent_data){ \
+ .parent_data = &(const struct clk_parent_data){ \
.fw_name = "xo", \
.name = "xo_board", \
}, \
@@ -179,7 +182,7 @@ static int clk_rpmh_send_aggregate_command(struct clk_rpmh *c)
}
c->last_sent_aggr_state = c->aggr_state;
- c->peer->last_sent_aggr_state = c->last_sent_aggr_state;
+ c->peer->last_sent_aggr_state = c->last_sent_aggr_state;
return 0;
}
@@ -206,7 +209,7 @@ static int clk_rpmh_aggregate_state_send_command(struct clk_rpmh *c,
c->state = c->valid_state_mask;
WARN(1, "clk: %s failed to %s\n", c->res_name,
- enable ? "enable" : "disable");
+ str_enable_disable(enable));
return ret;
}
@@ -318,10 +321,10 @@ static int clk_rpmh_bcm_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
-static long clk_rpmh_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_rpmh_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- return rate;
+ return 0;
}
static unsigned long clk_rpmh_bcm_recalc_rate(struct clk_hw *hw,
@@ -329,14 +332,14 @@ static unsigned long clk_rpmh_bcm_recalc_rate(struct clk_hw *hw,
{
struct clk_rpmh *c = to_clk_rpmh(hw);
- return c->aggr_state * c->unit;
+ return (unsigned long)c->aggr_state * c->unit;
}
static const struct clk_ops clk_rpmh_bcm_ops = {
.prepare = clk_rpmh_bcm_prepare,
.unprepare = clk_rpmh_bcm_unprepare,
.set_rate = clk_rpmh_bcm_set_rate,
- .round_rate = clk_rpmh_round_rate,
+ .determine_rate = clk_rpmh_determine_rate,
.recalc_rate = clk_rpmh_bcm_recalc_rate,
};
@@ -368,6 +371,8 @@ DEFINE_CLK_RPMH_VRM(rf_clk2, _d, "rfclkd2", 1);
DEFINE_CLK_RPMH_VRM(rf_clk3, _d, "rfclkd3", 1);
DEFINE_CLK_RPMH_VRM(rf_clk4, _d, "rfclkd4", 1);
+DEFINE_CLK_RPMH_VRM(rf_clk3, _a2, "rfclka3", 2);
+
DEFINE_CLK_RPMH_VRM(clk1, _a1, "clka1", 1);
DEFINE_CLK_RPMH_VRM(clk2, _a1, "clka2", 1);
DEFINE_CLK_RPMH_VRM(clk3, _a1, "clka3", 1);
@@ -381,14 +386,33 @@ DEFINE_CLK_RPMH_VRM(clk6, _a2, "clka6", 2);
DEFINE_CLK_RPMH_VRM(clk7, _a2, "clka7", 2);
DEFINE_CLK_RPMH_VRM(clk8, _a2, "clka8", 2);
+DEFINE_CLK_RPMH_VRM(clk7, _a4, "clka7", 4);
+
DEFINE_CLK_RPMH_VRM(div_clk1, _div2, "divclka1", 2);
+DEFINE_CLK_RPMH_VRM(clk3, _a, "C3A_E0", 1);
+DEFINE_CLK_RPMH_VRM(clk4, _a, "C4A_E0", 1);
+DEFINE_CLK_RPMH_VRM(clk5, _a, "C5A_E0", 1);
+DEFINE_CLK_RPMH_VRM(clk8, _a, "C8A_E0", 1);
+
DEFINE_CLK_RPMH_BCM(ce, "CE0");
DEFINE_CLK_RPMH_BCM(hwkm, "HK0");
DEFINE_CLK_RPMH_BCM(ipa, "IP0");
DEFINE_CLK_RPMH_BCM(pka, "PKA0");
DEFINE_CLK_RPMH_BCM(qpic_clk, "QP0");
+static struct clk_hw *sar2130p_rpmh_clocks[] = {
+ [RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div1.hw,
+ [RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div1_ao.hw,
+ [RPMH_RF_CLK1] = &clk_rpmh_rf_clk1_a.hw,
+ [RPMH_RF_CLK1_A] = &clk_rpmh_rf_clk1_a_ao.hw,
+};
+
+static const struct clk_rpmh_desc clk_rpmh_sar2130p = {
+ .clks = sar2130p_rpmh_clocks,
+ .num_clks = ARRAY_SIZE(sar2130p_rpmh_clocks),
+};
+
static struct clk_hw *sdm845_rpmh_clocks[] = {
[RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div2.hw,
[RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div2_ao.hw,
@@ -524,6 +548,29 @@ static const struct clk_rpmh_desc clk_rpmh_sc8180x = {
.num_clks = ARRAY_SIZE(sc8180x_rpmh_clocks),
};
+static struct clk_hw *milos_rpmh_clocks[] = {
+ [RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div4.hw,
+ [RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div4_ao.hw,
+ [RPMH_LN_BB_CLK2] = &clk_rpmh_clk7_a4.hw,
+ [RPMH_LN_BB_CLK2_A] = &clk_rpmh_clk7_a4_ao.hw,
+ /*
+ * RPMH_LN_BB_CLK3(_A) and RPMH_LN_BB_CLK4(_A) are marked as optional
+ * downstream, but do not exist in cmd-db on SM7635, so skip them.
+ */
+ [RPMH_RF_CLK1] = &clk_rpmh_clk1_a1.hw,
+ [RPMH_RF_CLK1_A] = &clk_rpmh_clk1_a1_ao.hw,
+ [RPMH_RF_CLK2] = &clk_rpmh_clk2_a1.hw,
+ [RPMH_RF_CLK2_A] = &clk_rpmh_clk2_a1_ao.hw,
+ [RPMH_RF_CLK3] = &clk_rpmh_clk3_a1.hw,
+ [RPMH_RF_CLK3_A] = &clk_rpmh_clk3_a1_ao.hw,
+ [RPMH_IPA_CLK] = &clk_rpmh_ipa.hw,
+};
+
+static const struct clk_rpmh_desc clk_rpmh_milos = {
+ .clks = milos_rpmh_clocks,
+ .num_clks = ARRAY_SIZE(milos_rpmh_clocks),
+};
+
static struct clk_hw *sm8250_rpmh_clocks[] = {
[RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div2.hw,
[RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div2_ao.hw,
@@ -633,6 +680,7 @@ static struct clk_hw *sm8550_rpmh_clocks[] = {
static const struct clk_rpmh_desc clk_rpmh_sm8550 = {
.clks = sm8550_rpmh_clocks,
.num_clks = ARRAY_SIZE(sm8550_rpmh_clocks),
+ .clka_optional = true,
};
static struct clk_hw *sm8650_rpmh_clocks[] = {
@@ -664,6 +712,7 @@ static struct clk_hw *sm8650_rpmh_clocks[] = {
static const struct clk_rpmh_desc clk_rpmh_sm8650 = {
.clks = sm8650_rpmh_clocks,
.num_clks = ARRAY_SIZE(sm8650_rpmh_clocks),
+ .clka_optional = true,
};
static struct clk_hw *sc7280_rpmh_clocks[] = {
@@ -795,6 +844,62 @@ static const struct clk_rpmh_desc clk_rpmh_x1e80100 = {
.num_clks = ARRAY_SIZE(x1e80100_rpmh_clocks),
};
+static struct clk_hw *qcs615_rpmh_clocks[] = {
+ [RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div2.hw,
+ [RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div2_ao.hw,
+ [RPMH_LN_BB_CLK2] = &clk_rpmh_ln_bb_clk2_a2.hw,
+ [RPMH_LN_BB_CLK2_A] = &clk_rpmh_ln_bb_clk2_a2_ao.hw,
+ [RPMH_LN_BB_CLK3] = &clk_rpmh_ln_bb_clk3_a2.hw,
+ [RPMH_LN_BB_CLK3_A] = &clk_rpmh_ln_bb_clk3_a2_ao.hw,
+ [RPMH_RF_CLK1] = &clk_rpmh_rf_clk1_a.hw,
+ [RPMH_RF_CLK1_A] = &clk_rpmh_rf_clk1_a_ao.hw,
+ [RPMH_RF_CLK2] = &clk_rpmh_rf_clk2_a.hw,
+ [RPMH_RF_CLK2_A] = &clk_rpmh_rf_clk2_a_ao.hw,
+};
+
+static const struct clk_rpmh_desc clk_rpmh_qcs615 = {
+ .clks = qcs615_rpmh_clocks,
+ .num_clks = ARRAY_SIZE(qcs615_rpmh_clocks),
+};
+
+static struct clk_hw *sm8750_rpmh_clocks[] = {
+ [RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div2.hw,
+ [RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div2_ao.hw,
+ [RPMH_LN_BB_CLK1] = &clk_rpmh_clk6_a2.hw,
+ [RPMH_LN_BB_CLK1_A] = &clk_rpmh_clk6_a2_ao.hw,
+ [RPMH_LN_BB_CLK3] = &clk_rpmh_clk8_a2.hw,
+ [RPMH_LN_BB_CLK3_A] = &clk_rpmh_clk8_a2_ao.hw,
+ [RPMH_RF_CLK1] = &clk_rpmh_rf_clk1_a.hw,
+ [RPMH_RF_CLK1_A] = &clk_rpmh_rf_clk1_a_ao.hw,
+ [RPMH_RF_CLK2] = &clk_rpmh_rf_clk2_a.hw,
+ [RPMH_RF_CLK2_A] = &clk_rpmh_rf_clk2_a_ao.hw,
+ [RPMH_RF_CLK3] = &clk_rpmh_rf_clk3_a2.hw,
+ [RPMH_RF_CLK3_A] = &clk_rpmh_rf_clk3_a2_ao.hw,
+ [RPMH_IPA_CLK] = &clk_rpmh_ipa.hw,
+};
+
+static const struct clk_rpmh_desc clk_rpmh_sm8750 = {
+ .clks = sm8750_rpmh_clocks,
+ .num_clks = ARRAY_SIZE(sm8750_rpmh_clocks),
+ .clka_optional = true,
+};
+
+static struct clk_hw *glymur_rpmh_clocks[] = {
+ [RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div2.hw,
+ [RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div2_ao.hw,
+ [RPMH_RF_CLK3] = &clk_rpmh_clk3_a.hw,
+ [RPMH_RF_CLK3_A] = &clk_rpmh_clk3_a_ao.hw,
+ [RPMH_RF_CLK4] = &clk_rpmh_clk4_a.hw,
+ [RPMH_RF_CLK4_A] = &clk_rpmh_clk4_a_ao.hw,
+ [RPMH_RF_CLK5] = &clk_rpmh_clk5_a.hw,
+ [RPMH_RF_CLK5_A] = &clk_rpmh_clk5_a_ao.hw,
+};
+
+static const struct clk_rpmh_desc clk_rpmh_glymur = {
+ .clks = glymur_rpmh_clocks,
+ .num_clks = ARRAY_SIZE(glymur_rpmh_clocks),
+};
+
static struct clk_hw *of_clk_rpmh_hw_get(struct of_phandle_args *clkspec,
void *data)
{
@@ -836,6 +941,12 @@ static int clk_rpmh_probe(struct platform_device *pdev)
rpmh_clk = to_clk_rpmh(hw_clks[i]);
res_addr = cmd_db_read_addr(rpmh_clk->res_name);
if (!res_addr) {
+ hw_clks[i] = NULL;
+
+ if (desc->clka_optional &&
+ !strncmp(rpmh_clk->res_name, "clka", sizeof("clka") - 1))
+ continue;
+
dev_err(&pdev->dev, "missing RPMh resource address for %s\n",
rpmh_clk->res_name);
return -ENODEV;
@@ -878,9 +989,14 @@ static int clk_rpmh_probe(struct platform_device *pdev)
}
static const struct of_device_id clk_rpmh_match_table[] = {
+ { .compatible = "qcom,glymur-rpmh-clk", .data = &clk_rpmh_glymur},
+ { .compatible = "qcom,milos-rpmh-clk", .data = &clk_rpmh_milos},
+ { .compatible = "qcom,qcs615-rpmh-clk", .data = &clk_rpmh_qcs615},
{ .compatible = "qcom,qdu1000-rpmh-clk", .data = &clk_rpmh_qdu1000},
{ .compatible = "qcom,sa8775p-rpmh-clk", .data = &clk_rpmh_sa8775p},
+ { .compatible = "qcom,sar2130p-rpmh-clk", .data = &clk_rpmh_sar2130p},
{ .compatible = "qcom,sc7180-rpmh-clk", .data = &clk_rpmh_sc7180},
+ { .compatible = "qcom,sc7280-rpmh-clk", .data = &clk_rpmh_sc7280},
{ .compatible = "qcom,sc8180x-rpmh-clk", .data = &clk_rpmh_sc8180x},
{ .compatible = "qcom,sc8280xp-rpmh-clk", .data = &clk_rpmh_sc8280xp},
{ .compatible = "qcom,sdm845-rpmh-clk", .data = &clk_rpmh_sdm845},
@@ -896,7 +1012,7 @@ static const struct of_device_id clk_rpmh_match_table[] = {
{ .compatible = "qcom,sm8450-rpmh-clk", .data = &clk_rpmh_sm8450},
{ .compatible = "qcom,sm8550-rpmh-clk", .data = &clk_rpmh_sm8550},
{ .compatible = "qcom,sm8650-rpmh-clk", .data = &clk_rpmh_sm8650},
- { .compatible = "qcom,sc7280-rpmh-clk", .data = &clk_rpmh_sc7280},
+ { .compatible = "qcom,sm8750-rpmh-clk", .data = &clk_rpmh_sm8750},
{ .compatible = "qcom,x1e80100-rpmh-clk", .data = &clk_rpmh_x1e80100},
{ }
};
diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c
index 45c5255bcd11..103db984a40b 100644
--- a/drivers/clk/qcom/clk-smd-rpm.c
+++ b/drivers/clk/qcom/clk-smd-rpm.c
@@ -4,6 +4,7 @@
* Copyright (c) 2014, The Linux Foundation. All rights reserved.
*/
+#include <linux/cleanup.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/export.h>
@@ -29,7 +30,7 @@
.hw.init = &(struct clk_init_data){ \
.ops = &clk_smd_rpm_ops, \
.name = #_name, \
- .parent_data = &(const struct clk_parent_data){ \
+ .parent_data = &(const struct clk_parent_data){ \
.fw_name = "xo", \
.name = "xo_board", \
}, \
@@ -46,7 +47,7 @@
.hw.init = &(struct clk_init_data){ \
.ops = &clk_smd_rpm_ops, \
.name = #_active, \
- .parent_data = &(const struct clk_parent_data){ \
+ .parent_data = &(const struct clk_parent_data){ \
.fw_name = "xo", \
.name = "xo_board", \
}, \
@@ -73,7 +74,7 @@
.hw.init = &(struct clk_init_data){ \
.ops = &clk_smd_rpm_branch_ops, \
.name = #_name, \
- .parent_data = &(const struct clk_parent_data){ \
+ .parent_data = &(const struct clk_parent_data){ \
.fw_name = "xo", \
.name = "xo_board", \
}, \
@@ -91,7 +92,7 @@
.hw.init = &(struct clk_init_data){ \
.ops = &clk_smd_rpm_branch_ops, \
.name = #_active, \
- .parent_data = &(const struct clk_parent_data){ \
+ .parent_data = &(const struct clk_parent_data){ \
.fw_name = "xo", \
.name = "xo_board", \
}, \
@@ -309,10 +310,10 @@ static void clk_smd_rpm_unprepare(struct clk_hw *hw)
unsigned long active_rate, sleep_rate;
int ret;
- mutex_lock(&rpm_smd_clk_lock);
+ guard(mutex)(&rpm_smd_clk_lock);
if (!r->rate)
- goto out;
+ return;
/* Take peer clock's rate into account only if it's enabled. */
if (peer->enabled)
@@ -322,17 +323,14 @@ static void clk_smd_rpm_unprepare(struct clk_hw *hw)
active_rate = r->branch ? !!peer_rate : peer_rate;
ret = clk_smd_rpm_set_rate_active(r, active_rate);
if (ret)
- goto out;
+ return;
sleep_rate = r->branch ? !!peer_sleep_rate : peer_sleep_rate;
ret = clk_smd_rpm_set_rate_sleep(r, sleep_rate);
if (ret)
- goto out;
+ return;
r->enabled = false;
-
-out:
- mutex_unlock(&rpm_smd_clk_lock);
}
static int clk_smd_rpm_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -345,10 +343,10 @@ static int clk_smd_rpm_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long peer_rate = 0, peer_sleep_rate = 0;
int ret = 0;
- mutex_lock(&rpm_smd_clk_lock);
+ guard(mutex)(&rpm_smd_clk_lock);
if (!r->enabled)
- goto out;
+ return 0;
to_active_sleep(r, rate, &this_rate, &this_sleep_rate);
@@ -360,30 +358,27 @@ static int clk_smd_rpm_set_rate(struct clk_hw *hw, unsigned long rate,
active_rate = max(this_rate, peer_rate);
ret = clk_smd_rpm_set_rate_active(r, active_rate);
if (ret)
- goto out;
+ return ret;
sleep_rate = max(this_sleep_rate, peer_sleep_rate);
ret = clk_smd_rpm_set_rate_sleep(r, sleep_rate);
if (ret)
- goto out;
+ return ret;
r->rate = rate;
-out:
- mutex_unlock(&rpm_smd_clk_lock);
-
- return ret;
+ return 0;
}
-static long clk_smd_rpm_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_smd_rpm_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
/*
* RPM handles rate rounding and we don't have a way to
* know what the rate will be, so just return whatever
* rate is requested.
*/
- return rate;
+ return 0;
}
static unsigned long clk_smd_rpm_recalc_rate(struct clk_hw *hw,
@@ -432,7 +427,7 @@ static const struct clk_ops clk_smd_rpm_ops = {
.prepare = clk_smd_rpm_prepare,
.unprepare = clk_smd_rpm_unprepare,
.set_rate = clk_smd_rpm_set_rate,
- .round_rate = clk_smd_rpm_round_rate,
+ .determine_rate = clk_smd_rpm_determine_rate,
.recalc_rate = clk_smd_rpm_recalc_rate,
};
@@ -491,6 +486,7 @@ DEFINE_CLK_SMD_RPM(qup, QCOM_SMD_RPM_QUP_CLK, 0);
DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(bb_clk1, 1, 19200000);
DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(bb_clk2, 2, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(bb_clk3, 3, 19200000);
DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(ln_bb_clk1, 1, 19200000);
DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(ln_bb_clk2, 2, 19200000);
DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(ln_bb_clk3, 3, 19200000);
@@ -700,6 +696,60 @@ static const struct rpm_smd_clk_desc rpm_clk_msm8936 = {
.num_icc_clks = ARRAY_SIZE(bimc_pcnoc_snoc_smmnoc_icc_clks),
};
+static struct clk_smd_rpm *msm8937_clks[] = {
+ [RPM_SMD_XO_CLK_SRC] = &clk_smd_rpm_branch_bi_tcxo,
+ [RPM_SMD_XO_A_CLK_SRC] = &clk_smd_rpm_branch_bi_tcxo_a,
+ [RPM_SMD_QDSS_CLK] = &clk_smd_rpm_qdss_clk,
+ [RPM_SMD_QDSS_A_CLK] = &clk_smd_rpm_qdss_a_clk,
+ [RPM_SMD_BB_CLK1] = &clk_smd_rpm_bb_clk1,
+ [RPM_SMD_BB_CLK1_A] = &clk_smd_rpm_bb_clk1_a,
+ [RPM_SMD_BB_CLK2] = &clk_smd_rpm_bb_clk2,
+ [RPM_SMD_BB_CLK2_A] = &clk_smd_rpm_bb_clk2_a,
+ [RPM_SMD_RF_CLK2] = &clk_smd_rpm_rf_clk2,
+ [RPM_SMD_RF_CLK2_A] = &clk_smd_rpm_rf_clk2_a,
+ [RPM_SMD_DIV_CLK2] = &clk_smd_rpm_div_clk2,
+ [RPM_SMD_DIV_A_CLK2] = &clk_smd_rpm_div_clk2_a,
+ [RPM_SMD_BB_CLK1_PIN] = &clk_smd_rpm_bb_clk1_pin,
+ [RPM_SMD_BB_CLK1_A_PIN] = &clk_smd_rpm_bb_clk1_a_pin,
+ [RPM_SMD_BB_CLK2_PIN] = &clk_smd_rpm_bb_clk2_pin,
+ [RPM_SMD_BB_CLK2_A_PIN] = &clk_smd_rpm_bb_clk2_a_pin,
+};
+
+static const struct rpm_smd_clk_desc rpm_clk_msm8937 = {
+ .clks = msm8937_clks,
+ .num_clks = ARRAY_SIZE(msm8937_clks),
+ .icc_clks = bimc_pcnoc_snoc_smmnoc_icc_clks,
+ .num_icc_clks = ARRAY_SIZE(bimc_pcnoc_snoc_smmnoc_icc_clks),
+};
+
+static struct clk_smd_rpm *msm8940_clks[] = {
+ [RPM_SMD_XO_CLK_SRC] = &clk_smd_rpm_branch_bi_tcxo,
+ [RPM_SMD_XO_A_CLK_SRC] = &clk_smd_rpm_branch_bi_tcxo_a,
+ [RPM_SMD_IPA_CLK] = &clk_smd_rpm_ipa_clk,
+ [RPM_SMD_IPA_A_CLK] = &clk_smd_rpm_ipa_a_clk,
+ [RPM_SMD_QDSS_CLK] = &clk_smd_rpm_qdss_clk,
+ [RPM_SMD_QDSS_A_CLK] = &clk_smd_rpm_qdss_a_clk,
+ [RPM_SMD_BB_CLK1] = &clk_smd_rpm_bb_clk1,
+ [RPM_SMD_BB_CLK1_A] = &clk_smd_rpm_bb_clk1_a,
+ [RPM_SMD_BB_CLK2] = &clk_smd_rpm_bb_clk2,
+ [RPM_SMD_BB_CLK2_A] = &clk_smd_rpm_bb_clk2_a,
+ [RPM_SMD_RF_CLK2] = &clk_smd_rpm_rf_clk2,
+ [RPM_SMD_RF_CLK2_A] = &clk_smd_rpm_rf_clk2_a,
+ [RPM_SMD_DIV_CLK2] = &clk_smd_rpm_div_clk2,
+ [RPM_SMD_DIV_A_CLK2] = &clk_smd_rpm_div_clk2_a,
+ [RPM_SMD_BB_CLK1_PIN] = &clk_smd_rpm_bb_clk1_pin,
+ [RPM_SMD_BB_CLK1_A_PIN] = &clk_smd_rpm_bb_clk1_a_pin,
+ [RPM_SMD_BB_CLK2_PIN] = &clk_smd_rpm_bb_clk2_pin,
+ [RPM_SMD_BB_CLK2_A_PIN] = &clk_smd_rpm_bb_clk2_a_pin,
+};
+
+static const struct rpm_smd_clk_desc rpm_clk_msm8940 = {
+ .clks = msm8940_clks,
+ .num_clks = ARRAY_SIZE(msm8940_clks),
+ .icc_clks = bimc_pcnoc_snoc_smmnoc_icc_clks,
+ .num_icc_clks = ARRAY_SIZE(bimc_pcnoc_snoc_smmnoc_icc_clks),
+};
+
static struct clk_smd_rpm *msm8974_clks[] = {
[RPM_SMD_XO_CLK_SRC] = &clk_smd_rpm_branch_bi_tcxo,
[RPM_SMD_XO_A_CLK_SRC] = &clk_smd_rpm_branch_bi_tcxo_a,
@@ -997,6 +1047,36 @@ static const struct rpm_smd_clk_desc rpm_clk_msm8998 = {
.num_icc_clks = ARRAY_SIZE(msm8998_icc_clks),
};
+static struct clk_smd_rpm *sdm429_clks[] = {
+ [RPM_SMD_XO_CLK_SRC] = &clk_smd_rpm_branch_bi_tcxo,
+ [RPM_SMD_XO_A_CLK_SRC] = &clk_smd_rpm_branch_bi_tcxo_a,
+ [RPM_SMD_QDSS_CLK] = &clk_smd_rpm_qdss_clk,
+ [RPM_SMD_QDSS_A_CLK] = &clk_smd_rpm_qdss_a_clk,
+ [RPM_SMD_BB_CLK1] = &clk_smd_rpm_bb_clk1,
+ [RPM_SMD_BB_CLK1_A] = &clk_smd_rpm_bb_clk1_a,
+ [RPM_SMD_BB_CLK2] = &clk_smd_rpm_bb_clk2,
+ [RPM_SMD_BB_CLK2_A] = &clk_smd_rpm_bb_clk2_a,
+ [RPM_SMD_BB_CLK3] = &clk_smd_rpm_bb_clk3,
+ [RPM_SMD_BB_CLK3_A] = &clk_smd_rpm_bb_clk3_a,
+ [RPM_SMD_RF_CLK2] = &clk_smd_rpm_rf_clk2,
+ [RPM_SMD_RF_CLK2_A] = &clk_smd_rpm_rf_clk2_a,
+ [RPM_SMD_DIV_CLK2] = &clk_smd_rpm_div_clk2,
+ [RPM_SMD_DIV_A_CLK2] = &clk_smd_rpm_div_clk2_a,
+ [RPM_SMD_BB_CLK1_PIN] = &clk_smd_rpm_bb_clk1_pin,
+ [RPM_SMD_BB_CLK1_A_PIN] = &clk_smd_rpm_bb_clk1_a_pin,
+ [RPM_SMD_BB_CLK2_PIN] = &clk_smd_rpm_bb_clk2_pin,
+ [RPM_SMD_BB_CLK2_A_PIN] = &clk_smd_rpm_bb_clk2_a_pin,
+ [RPM_SMD_BB_CLK3_PIN] = &clk_smd_rpm_bb_clk3_pin,
+ [RPM_SMD_BB_CLK3_A_PIN] = &clk_smd_rpm_bb_clk3_a_pin,
+};
+
+static const struct rpm_smd_clk_desc rpm_clk_sdm429 = {
+ .clks = sdm429_clks,
+ .num_clks = ARRAY_SIZE(sdm429_clks),
+ .icc_clks = bimc_pcnoc_snoc_smmnoc_icc_clks,
+ .num_icc_clks = ARRAY_SIZE(bimc_pcnoc_snoc_smmnoc_icc_clks),
+};
+
static struct clk_smd_rpm *sdm660_clks[] = {
[RPM_SMD_XO_CLK_SRC] = &clk_smd_rpm_branch_bi_tcxo,
[RPM_SMD_XO_A_CLK_SRC] = &clk_smd_rpm_branch_bi_tcxo_a,
@@ -1216,6 +1296,8 @@ static const struct of_device_id rpm_smd_clk_match_table[] = {
{ .compatible = "qcom,rpmcc-msm8916", .data = &rpm_clk_msm8916 },
{ .compatible = "qcom,rpmcc-msm8917", .data = &rpm_clk_msm8917 },
{ .compatible = "qcom,rpmcc-msm8936", .data = &rpm_clk_msm8936 },
+ { .compatible = "qcom,rpmcc-msm8937", .data = &rpm_clk_msm8937 },
+ { .compatible = "qcom,rpmcc-msm8940", .data = &rpm_clk_msm8940 },
{ .compatible = "qcom,rpmcc-msm8953", .data = &rpm_clk_msm8953 },
{ .compatible = "qcom,rpmcc-msm8974", .data = &rpm_clk_msm8974 },
{ .compatible = "qcom,rpmcc-msm8976", .data = &rpm_clk_msm8976 },
@@ -1225,6 +1307,7 @@ static const struct of_device_id rpm_smd_clk_match_table[] = {
{ .compatible = "qcom,rpmcc-msm8998", .data = &rpm_clk_msm8998 },
{ .compatible = "qcom,rpmcc-qcm2290", .data = &rpm_clk_qcm2290 },
{ .compatible = "qcom,rpmcc-qcs404", .data = &rpm_clk_qcs404 },
+ { .compatible = "qcom,rpmcc-sdm429", .data = &rpm_clk_sdm429 },
{ .compatible = "qcom,rpmcc-sdm660", .data = &rpm_clk_sdm660 },
{ .compatible = "qcom,rpmcc-sm6115", .data = &rpm_clk_sm6115 },
{ .compatible = "qcom,rpmcc-sm6125", .data = &rpm_clk_sm6125 },
diff --git a/drivers/clk/qcom/clk-spmi-pmic-div.c b/drivers/clk/qcom/clk-spmi-pmic-div.c
index f394031eb0e5..3e2ac6745325 100644
--- a/drivers/clk/qcom/clk-spmi-pmic-div.c
+++ b/drivers/clk/qcom/clk-spmi-pmic-div.c
@@ -3,6 +3,7 @@
*/
#include <linux/bitops.h>
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
@@ -111,16 +112,18 @@ static void clk_spmi_pmic_div_disable(struct clk_hw *hw)
spin_unlock_irqrestore(&clkdiv->lock, flags);
}
-static long clk_spmi_pmic_div_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_spmi_pmic_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
unsigned int div, div_factor;
- div = DIV_ROUND_UP(*parent_rate, rate);
+ div = DIV_ROUND_UP(req->best_parent_rate, req->rate);
div_factor = div_to_div_factor(div);
div = div_factor_to_div(div_factor);
- return *parent_rate / div;
+ req->rate = req->best_parent_rate / div;
+
+ return 0;
}
static unsigned long
@@ -140,30 +143,26 @@ static int clk_spmi_pmic_div_set_rate(struct clk_hw *hw, unsigned long rate,
{
struct clkdiv *clkdiv = to_clkdiv(hw);
unsigned int div_factor = div_to_div_factor(parent_rate / rate);
- unsigned long flags;
bool enabled;
int ret;
- spin_lock_irqsave(&clkdiv->lock, flags);
+ guard(spinlock_irqsave)(&clkdiv->lock);
+
enabled = is_spmi_pmic_clkdiv_enabled(clkdiv);
if (enabled) {
ret = spmi_pmic_clkdiv_set_enable_state(clkdiv, false);
if (ret)
- goto unlock;
+ return ret;
}
ret = regmap_update_bits(clkdiv->regmap, clkdiv->base + REG_DIV_CTL1,
DIV_CTL1_DIV_FACTOR_MASK, div_factor);
if (ret)
- goto unlock;
+ return ret;
if (enabled)
ret = __spmi_pmic_clkdiv_set_enable_state(clkdiv, true,
div_factor);
-
-unlock:
- spin_unlock_irqrestore(&clkdiv->lock, flags);
-
return ret;
}
@@ -172,7 +171,7 @@ static const struct clk_ops clk_spmi_pmic_div_ops = {
.disable = clk_spmi_pmic_div_disable,
.set_rate = clk_spmi_pmic_div_set_rate,
.recalc_rate = clk_spmi_pmic_div_recalc_rate,
- .round_rate = clk_spmi_pmic_div_round_rate,
+ .determine_rate = clk_spmi_pmic_div_determine_rate,
};
struct spmi_pmic_div_clk_cc {
diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c
index 33cc1f73c69d..121591886774 100644
--- a/drivers/clk/qcom/common.c
+++ b/drivers/clk/qcom/common.c
@@ -9,10 +9,13 @@
#include <linux/platform_device.h>
#include <linux/clk-provider.h>
#include <linux/interconnect-clk.h>
+#include <linux/pm_runtime.h>
#include <linux/reset-controller.h>
#include <linux/of.h>
#include "common.h"
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
#include "clk-rcg.h"
#include "clk-regmap.h"
#include "reset.h"
@@ -22,6 +25,7 @@ struct qcom_cc {
struct qcom_reset_controller reset;
struct clk_regmap **rclks;
size_t num_rclks;
+ struct dev_pm_domain_list *pd_list;
};
const
@@ -273,8 +277,8 @@ static int qcom_cc_icc_register(struct device *dev,
icd[i].slave_id = desc->icc_hws[i].slave_id;
hws = &desc->clks[desc->icc_hws[i].clk_id]->hw;
icd[i].clk = devm_clk_hw_get_clk(dev, hws, "icc");
- if (!icd[i].clk)
- return dev_err_probe(dev, -ENOENT,
+ if (IS_ERR(icd[i].clk))
+ return dev_err_probe(dev, PTR_ERR(icd[i].clk),
"(%d) clock entry is null\n", i);
icd[i].name = clk_hw_get_name(hws);
}
@@ -283,6 +287,40 @@ static int qcom_cc_icc_register(struct device *dev,
desc->num_icc_hws, icd);
}
+static int qcom_cc_clk_pll_configure(const struct qcom_cc_driver_data *data,
+ struct regmap *regmap)
+{
+ const struct clk_init_data *init;
+ struct clk_alpha_pll *pll;
+ int i;
+
+ for (i = 0; i < data->num_alpha_plls; i++) {
+ pll = data->alpha_plls[i];
+ init = pll->clkr.hw.init;
+
+ if (!pll->config || !pll->regs) {
+ pr_err("%s: missing pll config or regs\n", init->name);
+ return -EINVAL;
+ }
+
+ qcom_clk_alpha_pll_configure(pll, regmap);
+ }
+
+ return 0;
+}
+
+static void qcom_cc_clk_regs_configure(struct device *dev, const struct qcom_cc_driver_data *data,
+ struct regmap *regmap)
+{
+ int i;
+
+ for (i = 0; i < data->num_clk_cbcrs; i++)
+ qcom_branch_set_clk_en(regmap, data->clk_cbcrs[i]);
+
+ if (data->clk_regs_configure)
+ data->clk_regs_configure(dev, regmap);
+}
+
int qcom_cc_really_probe(struct device *dev,
const struct qcom_cc_desc *desc, struct regmap *regmap)
{
@@ -299,6 +337,28 @@ int qcom_cc_really_probe(struct device *dev,
if (!cc)
return -ENOMEM;
+ ret = devm_pm_domain_attach_list(dev, NULL, &cc->pd_list);
+ if (ret < 0 && ret != -EEXIST)
+ return ret;
+
+ if (desc->use_rpm) {
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return ret;
+ }
+
+ if (desc->driver_data) {
+ ret = qcom_cc_clk_pll_configure(desc->driver_data, regmap);
+ if (ret)
+ goto put_rpm;
+
+ qcom_cc_clk_regs_configure(dev, desc->driver_data, regmap);
+ }
+
reset = &cc->reset;
reset->rcdev.of_node = dev->of_node;
reset->rcdev.ops = &qcom_reset_ops;
@@ -309,22 +369,35 @@ int qcom_cc_really_probe(struct device *dev,
ret = devm_reset_controller_register(dev, &reset->rcdev);
if (ret)
- return ret;
+ goto put_rpm;
if (desc->gdscs && desc->num_gdscs) {
scd = devm_kzalloc(dev, sizeof(*scd), GFP_KERNEL);
- if (!scd)
- return -ENOMEM;
+ if (!scd) {
+ ret = -ENOMEM;
+ goto put_rpm;
+ }
scd->dev = dev;
scd->scs = desc->gdscs;
scd->num = desc->num_gdscs;
+ scd->pd_list = cc->pd_list;
ret = gdsc_register(scd, &reset->rcdev, regmap);
if (ret)
- return ret;
+ goto put_rpm;
ret = devm_add_action_or_reset(dev, qcom_cc_gdsc_unregister,
scd);
if (ret)
- return ret;
+ goto put_rpm;
+ }
+
+ if (desc->driver_data &&
+ desc->driver_data->dfs_rcgs &&
+ desc->driver_data->num_dfs_rcgs) {
+ ret = qcom_cc_register_rcg_dfs(regmap,
+ desc->driver_data->dfs_rcgs,
+ desc->driver_data->num_dfs_rcgs);
+ if (ret)
+ goto put_rpm;
}
cc->rclks = rclks;
@@ -335,7 +408,7 @@ int qcom_cc_really_probe(struct device *dev,
for (i = 0; i < num_clk_hws; i++) {
ret = devm_clk_hw_register(dev, clk_hws[i]);
if (ret)
- return ret;
+ goto put_rpm;
}
for (i = 0; i < num_clks; i++) {
@@ -344,14 +417,20 @@ int qcom_cc_really_probe(struct device *dev,
ret = devm_clk_register_regmap(dev, rclks[i]);
if (ret)
- return ret;
+ goto put_rpm;
}
ret = devm_of_clk_add_hw_provider(dev, qcom_cc_clk_hw_get, cc);
if (ret)
- return ret;
+ goto put_rpm;
+
+ ret = qcom_cc_icc_register(dev, desc);
+
+put_rpm:
+ if (desc->use_rpm)
+ pm_runtime_put(dev);
- return qcom_cc_icc_register(dev, desc);
+ return ret;
}
EXPORT_SYMBOL_GPL(qcom_cc_really_probe);
diff --git a/drivers/clk/qcom/common.h b/drivers/clk/qcom/common.h
index 7e57f8fe8ea6..953c91f7b145 100644
--- a/drivers/clk/qcom/common.h
+++ b/drivers/clk/qcom/common.h
@@ -25,6 +25,16 @@ struct qcom_icc_hws_data {
int clk_id;
};
+struct qcom_cc_driver_data {
+ struct clk_alpha_pll **alpha_plls;
+ size_t num_alpha_plls;
+ u32 *clk_cbcrs;
+ size_t num_clk_cbcrs;
+ const struct clk_rcg_dfs_data *dfs_rcgs;
+ size_t num_dfs_rcgs;
+ void (*clk_regs_configure)(struct device *dev, struct regmap *regmap);
+};
+
struct qcom_cc_desc {
const struct regmap_config *config;
struct clk_regmap **clks;
@@ -35,9 +45,11 @@ struct qcom_cc_desc {
size_t num_gdscs;
struct clk_hw **clk_hws;
size_t num_clk_hws;
- struct qcom_icc_hws_data *icc_hws;
+ const struct qcom_icc_hws_data *icc_hws;
size_t num_icc_hws;
unsigned int icc_first_node_id;
+ bool use_rpm;
+ struct qcom_cc_driver_data *driver_data;
};
/**
diff --git a/drivers/clk/qcom/dispcc-glymur.c b/drivers/clk/qcom/dispcc-glymur.c
new file mode 100644
index 000000000000..5203fa6383f6
--- /dev/null
+++ b/drivers/clk/qcom/dispcc-glymur.c
@@ -0,0 +1,1982 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025, Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,glymur-dispcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO,
+ DT_SLEEP_CLK,
+ DT_DP0_PHY_PLL_LINK_CLK,
+ DT_DP0_PHY_PLL_VCO_DIV_CLK,
+ DT_DP1_PHY_PLL_LINK_CLK,
+ DT_DP1_PHY_PLL_VCO_DIV_CLK,
+ DT_DP2_PHY_PLL_LINK_CLK,
+ DT_DP2_PHY_PLL_VCO_DIV_CLK,
+ DT_DP3_PHY_PLL_LINK_CLK,
+ DT_DP3_PHY_PLL_VCO_DIV_CLK,
+ DT_DSI0_PHY_PLL_OUT_BYTECLK,
+ DT_DSI0_PHY_PLL_OUT_DSICLK,
+ DT_DSI1_PHY_PLL_OUT_BYTECLK,
+ DT_DSI1_PHY_PLL_OUT_DSICLK,
+ DT_STANDALONE_PHY_PLL0_LINK_CLK,
+ DT_STANDALONE_PHY_PLL0_VCO_DIV_CLK,
+ DT_STANDALONE_PHY_PLL1_LINK_CLK,
+ DT_STANDALONE_PHY_PLL1_VCO_DIV_CLK,
+};
+
+enum {
+ P_BI_TCXO,
+ P_SLEEP_CLK,
+ P_DISP_CC_PLL0_OUT_MAIN,
+ P_DISP_CC_PLL1_OUT_EVEN,
+ P_DISP_CC_PLL1_OUT_MAIN,
+ P_DP0_PHY_PLL_LINK_CLK,
+ P_DP0_PHY_PLL_VCO_DIV_CLK,
+ P_DP1_PHY_PLL_LINK_CLK,
+ P_DP1_PHY_PLL_VCO_DIV_CLK,
+ P_DP2_PHY_PLL_LINK_CLK,
+ P_DP2_PHY_PLL_VCO_DIV_CLK,
+ P_DP3_PHY_PLL_LINK_CLK,
+ P_DP3_PHY_PLL_VCO_DIV_CLK,
+ P_DSI0_PHY_PLL_OUT_BYTECLK,
+ P_DSI0_PHY_PLL_OUT_DSICLK,
+ P_DSI1_PHY_PLL_OUT_BYTECLK,
+ P_DSI1_PHY_PLL_OUT_DSICLK,
+ P_STANDALONE_PHY_PLL0_LINK_CLK,
+ P_STANDALONE_PHY_PLL0_VCO_DIV_CLK,
+ P_STANDALONE_PHY_PLL1_LINK_CLK,
+ P_STANDALONE_PHY_PLL1_VCO_DIV_CLK,
+};
+
+static const struct pll_vco taycan_eko_t_vco[] = {
+ { 249600000, 2500000000, 0 },
+};
+
+/* 257.142858 MHz Configuration */
+static const struct alpha_pll_config disp_cc_pll0_config = {
+ .l = 0xd,
+ .alpha = 0x6492,
+ .config_ctl_val = 0x25c400e7,
+ .config_ctl_hi_val = 0x0a8060e0,
+ .config_ctl_hi1_val = 0xf51dea20,
+ .user_ctl_val = 0x00000008,
+ .user_ctl_hi_val = 0x00000002,
+};
+
+static struct clk_alpha_pll disp_cc_pll0 = {
+ .offset = 0x0,
+ .config = &disp_cc_pll0_config,
+ .vco_table = taycan_eko_t_vco,
+ .num_vco = ARRAY_SIZE(taycan_eko_t_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_EKO_T],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_taycan_eko_t_ops,
+ },
+ },
+};
+
+/* 600.0 MHz Configuration */
+static const struct alpha_pll_config disp_cc_pll1_config = {
+ .l = 0x1f,
+ .alpha = 0x4000,
+ .config_ctl_val = 0x25c400e7,
+ .config_ctl_hi_val = 0x0a8060e0,
+ .config_ctl_hi1_val = 0xf51dea20,
+ .user_ctl_val = 0x00000008,
+ .user_ctl_hi_val = 0x00000002,
+};
+
+static struct clk_alpha_pll disp_cc_pll1 = {
+ .offset = 0x1000,
+ .config = &disp_cc_pll1_config,
+ .vco_table = taycan_eko_t_vco,
+ .num_vco = ARRAY_SIZE(taycan_eko_t_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_EKO_T],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_pll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_taycan_eko_t_ops,
+ },
+ },
+};
+
+static const struct parent_map disp_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_STANDALONE_PHY_PLL0_VCO_DIV_CLK, 1 },
+ { P_DP0_PHY_PLL_VCO_DIV_CLK, 2 },
+ { P_DP3_PHY_PLL_VCO_DIV_CLK, 3 },
+ { P_DP1_PHY_PLL_VCO_DIV_CLK, 4 },
+ { P_STANDALONE_PHY_PLL1_VCO_DIV_CLK, 5 },
+ { P_DP2_PHY_PLL_VCO_DIV_CLK, 6 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_STANDALONE_PHY_PLL0_VCO_DIV_CLK },
+ { .index = DT_DP0_PHY_PLL_VCO_DIV_CLK },
+ { .index = DT_DP3_PHY_PLL_VCO_DIV_CLK },
+ { .index = DT_DP1_PHY_PLL_VCO_DIV_CLK },
+ { .index = DT_STANDALONE_PHY_PLL1_VCO_DIV_CLK },
+ { .index = DT_DP2_PHY_PLL_VCO_DIV_CLK },
+};
+
+static const struct parent_map disp_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map disp_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_DSICLK, 1 },
+ { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 },
+ { P_DSI1_PHY_PLL_OUT_DSICLK, 3 },
+ { P_DSI1_PHY_PLL_OUT_BYTECLK, 4 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DSI0_PHY_PLL_OUT_DSICLK },
+ { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK },
+ { .index = DT_DSI1_PHY_PLL_OUT_DSICLK },
+ { .index = DT_DSI1_PHY_PLL_OUT_BYTECLK },
+};
+
+static const struct parent_map disp_cc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_DP0_PHY_PLL_LINK_CLK, 1 },
+ { P_DP1_PHY_PLL_LINK_CLK, 2 },
+ { P_DP2_PHY_PLL_LINK_CLK, 3 },
+ { P_DP3_PHY_PLL_LINK_CLK, 4 },
+ { P_STANDALONE_PHY_PLL1_LINK_CLK, 5 },
+ { P_STANDALONE_PHY_PLL0_LINK_CLK, 6 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DP0_PHY_PLL_LINK_CLK },
+ { .index = DT_DP1_PHY_PLL_LINK_CLK },
+ { .index = DT_DP2_PHY_PLL_LINK_CLK },
+ { .index = DT_DP3_PHY_PLL_LINK_CLK },
+ { .index = DT_STANDALONE_PHY_PLL1_LINK_CLK },
+ { .index = DT_STANDALONE_PHY_PLL0_LINK_CLK },
+};
+
+static const struct parent_map disp_cc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_DSICLK, 1 },
+ { P_DSI1_PHY_PLL_OUT_DSICLK, 3 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DSI0_PHY_PLL_OUT_DSICLK },
+ { .index = DT_DSI1_PHY_PLL_OUT_DSICLK },
+};
+
+static const struct parent_map disp_cc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 },
+ { P_DSI1_PHY_PLL_OUT_BYTECLK, 4 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_5[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK },
+ { .index = DT_DSI1_PHY_PLL_OUT_BYTECLK },
+};
+
+static const struct parent_map disp_cc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_DISP_CC_PLL1_OUT_MAIN, 4 },
+ { P_DISP_CC_PLL1_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_6[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &disp_cc_pll1.clkr.hw },
+ { .hw = &disp_cc_pll1.clkr.hw },
+};
+
+static const struct parent_map disp_cc_parent_map_7[] = {
+ { P_BI_TCXO, 0 },
+ { P_DISP_CC_PLL0_OUT_MAIN, 1 },
+ { P_DISP_CC_PLL1_OUT_MAIN, 4 },
+ { P_DISP_CC_PLL1_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_7[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &disp_cc_pll0.clkr.hw },
+ { .hw = &disp_cc_pll1.clkr.hw },
+ { .hw = &disp_cc_pll1.clkr.hw },
+};
+
+static const struct parent_map disp_cc_parent_map_8[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_8[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map disp_cc_parent_map_9[] = {
+ { P_SLEEP_CLK, 0 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_9[] = {
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct freq_tbl ftbl_disp_cc_esync0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_esync0_clk_src = {
+ .cmd_rcgr = 0x80c0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_4,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_esync0_clk_src",
+ .parent_data = disp_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_esync1_clk_src = {
+ .cmd_rcgr = 0x80d8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_4,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_esync1_clk_src",
+ .parent_data = disp_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(37500000, P_DISP_CC_PLL1_OUT_MAIN, 16, 0, 0),
+ F(75000000, P_DISP_CC_PLL1_OUT_MAIN, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_ahb_clk_src = {
+ .cmd_rcgr = 0x8360,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_6,
+ .freq_tbl = ftbl_disp_cc_mdss_ahb_clk_src,
+ .hw_clk_ctrl = true,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_ahb_clk_src",
+ .parent_data = disp_cc_parent_data_6,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_byte0_clk_src = {
+ .cmd_rcgr = 0x8180,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_byte1_clk_src = {
+ .cmd_rcgr = 0x819c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte1_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx0_aux_clk_src = {
+ .cmd_rcgr = 0x8234,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_aux_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx0_link_clk_src = {
+ .cmd_rcgr = 0x81e8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx0_pixel0_clk_src = {
+ .cmd_rcgr = 0x8204,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_pixel0_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx0_pixel1_clk_src = {
+ .cmd_rcgr = 0x821c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_pixel1_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx1_aux_clk_src = {
+ .cmd_rcgr = 0x8298,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_aux_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx1_link_clk_src = {
+ .cmd_rcgr = 0x827c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_link_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx1_pixel0_clk_src = {
+ .cmd_rcgr = 0x824c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_pixel0_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx1_pixel1_clk_src = {
+ .cmd_rcgr = 0x8264,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_pixel1_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx2_aux_clk_src = {
+ .cmd_rcgr = 0x82fc,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_aux_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx2_link_clk_src = {
+ .cmd_rcgr = 0x82b0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_link_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx2_pixel0_clk_src = {
+ .cmd_rcgr = 0x82cc,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_pixel0_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx2_pixel1_clk_src = {
+ .cmd_rcgr = 0x82e4,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_pixel1_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx3_aux_clk_src = {
+ .cmd_rcgr = 0x8348,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_aux_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx3_link_clk_src = {
+ .cmd_rcgr = 0x832c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_link_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx3_pixel0_clk_src = {
+ .cmd_rcgr = 0x8314,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_pixel0_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_esc0_clk_src = {
+ .cmd_rcgr = 0x81b8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_5,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_esc0_clk_src",
+ .parent_data = disp_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_esc1_clk_src = {
+ .cmd_rcgr = 0x81d0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_5,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_esc1_clk_src",
+ .parent_data = disp_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(85714286, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(100000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(150000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(156000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(205000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(337000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(417000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(532000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(600000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(660000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(717000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_mdp_clk_src = {
+ .cmd_rcgr = 0x8150,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_7,
+ .freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src,
+ .hw_clk_ctrl = true,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_clk_src",
+ .parent_data = disp_cc_parent_data_7,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_7),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_pclk0_clk_src = {
+ .cmd_rcgr = 0x8108,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk0_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_pixel_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_pclk1_clk_src = {
+ .cmd_rcgr = 0x8120,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk1_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_pixel_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_pclk2_clk_src = {
+ .cmd_rcgr = 0x8138,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk2_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_pixel_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_vsync_clk_src = {
+ .cmd_rcgr = 0x8168,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_vsync_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_osc_clk_src = {
+ .cmd_rcgr = 0x80f0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_8,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_osc_clk_src",
+ .parent_data = disp_cc_parent_data_8,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_8),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_sleep_clk_src[] = {
+ F(32000, P_SLEEP_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_sleep_clk_src = {
+ .cmd_rcgr = 0xe064,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_9,
+ .freq_tbl = ftbl_disp_cc_sleep_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_sleep_clk_src",
+ .parent_data = disp_cc_parent_data_9,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_9),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_xo_clk_src = {
+ .cmd_rcgr = 0xe044,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_xo_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_byte0_div_clk_src = {
+ .reg = 0x8198,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_byte1_div_clk_src = {
+ .reg = 0x81b4,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte1_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx0_link_div_clk_src = {
+ .reg = 0x8200,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx0_link_dpin_div_clk_src = {
+ .reg = 0x838c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_dpin_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx1_link_div_clk_src = {
+ .reg = 0x8294,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_link_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx1_link_dpin_div_clk_src = {
+ .reg = 0x8390,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_link_dpin_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx2_link_div_clk_src = {
+ .reg = 0x82c8,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_link_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx2_link_dpin_div_clk_src = {
+ .reg = 0x8394,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_link_dpin_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx3_link_div_clk_src = {
+ .reg = 0x8344,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_link_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx3_link_dpin_div_clk_src = {
+ .reg = 0x8398,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_link_dpin_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch disp_cc_esync0_clk = {
+ .halt_reg = 0x80b8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80b8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_esync0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_esync0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_esync1_clk = {
+ .halt_reg = 0x80bc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80bc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_esync1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_esync1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_accu_shift_clk = {
+ .halt_reg = 0xe060,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xe060,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_accu_shift_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_ahb1_clk = {
+ .halt_reg = 0xa028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_ahb1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_ahb_clk = {
+ .halt_reg = 0x80b0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80b0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte0_clk = {
+ .halt_reg = 0x8034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte0_intf_clk = {
+ .halt_reg = 0x8038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte0_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte1_clk = {
+ .halt_reg = 0x803c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x803c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte1_intf_clk = {
+ .halt_reg = 0x8040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte1_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte1_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_aux_clk = {
+ .halt_reg = 0x8064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_link_clk = {
+ .halt_reg = 0x804c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x804c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_link_dpin_clk = {
+ .halt_reg = 0x837c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x837c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_dpin_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_dpin_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_link_intf_clk = {
+ .halt_reg = 0x8054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_pixel0_clk = {
+ .halt_reg = 0x805c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x805c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_pixel0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_pixel0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_pixel1_clk = {
+ .halt_reg = 0x8060,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8060,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_pixel1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_pixel1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_usb_router_link_intf_clk = {
+ .halt_reg = 0x8050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8050,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_usb_router_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_aux_clk = {
+ .halt_reg = 0x8080,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8080,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_link_clk = {
+ .halt_reg = 0x8070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8070,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_link_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_link_dpin_clk = {
+ .halt_reg = 0x8380,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8380,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_link_dpin_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_link_dpin_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_link_intf_clk = {
+ .halt_reg = 0x8078,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8078,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_pixel0_clk = {
+ .halt_reg = 0x8068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_pixel0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_pixel0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_pixel1_clk = {
+ .halt_reg = 0x806c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x806c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_pixel1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_pixel1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_usb_router_link_intf_clk = {
+ .halt_reg = 0x8074,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8074,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_usb_router_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_aux_clk = {
+ .halt_reg = 0x8098,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8098,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_link_clk = {
+ .halt_reg = 0x808c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x808c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_link_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_link_dpin_clk = {
+ .halt_reg = 0x8384,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8384,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_link_dpin_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_link_dpin_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_link_intf_clk = {
+ .halt_reg = 0x8090,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8090,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_pixel0_clk = {
+ .halt_reg = 0x8084,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8084,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_pixel0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_pixel0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_pixel1_clk = {
+ .halt_reg = 0x8088,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8088,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_pixel1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_pixel1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_usb_router_link_intf_clk = {
+ .halt_reg = 0x8378,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8378,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_usb_router_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx3_aux_clk = {
+ .halt_reg = 0x80a8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80a8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx3_link_clk = {
+ .halt_reg = 0x80a0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80a0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_link_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx3_link_dpin_clk = {
+ .halt_reg = 0x8388,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8388,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_link_dpin_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_link_dpin_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx3_link_intf_clk = {
+ .halt_reg = 0x80a4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx3_pixel0_clk = {
+ .halt_reg = 0x809c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x809c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_pixel0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_pixel0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_esc0_clk = {
+ .halt_reg = 0x8044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_esc0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_esc0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_esc1_clk = {
+ .halt_reg = 0x8048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_esc1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_esc1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp1_clk = {
+ .halt_reg = 0xa004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_clk = {
+ .halt_reg = 0x8010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_lut1_clk = {
+ .halt_reg = 0xa014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xa014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_lut1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_lut_clk = {
+ .halt_reg = 0x8020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x8020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_lut_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_non_gdsc_ahb_clk = {
+ .halt_reg = 0xc004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xc004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_non_gdsc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_pclk0_clk = {
+ .halt_reg = 0x8004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_pclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_pclk1_clk = {
+ .halt_reg = 0x8008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_pclk1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_pclk2_clk = {
+ .halt_reg = 0x800c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x800c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_pclk2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rscc_ahb_clk = {
+ .halt_reg = 0xc00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_rscc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rscc_vsync_clk = {
+ .halt_reg = 0xc008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_rscc_vsync_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_vsync1_clk = {
+ .halt_reg = 0xa024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_vsync1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_vsync_clk = {
+ .halt_reg = 0x8030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_vsync_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_osc_clk = {
+ .halt_reg = 0x80b4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80b4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_osc_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_osc_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc disp_cc_mdss_core_gdsc = {
+ .gdscr = 0x9000,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "disp_cc_mdss_core_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = HW_CTRL | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc disp_cc_mdss_core_int2_gdsc = {
+ .gdscr = 0xb000,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "disp_cc_mdss_core_int2_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = HW_CTRL | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct clk_regmap *disp_cc_glymur_clocks[] = {
+ [DISP_CC_ESYNC0_CLK] = &disp_cc_esync0_clk.clkr,
+ [DISP_CC_ESYNC0_CLK_SRC] = &disp_cc_esync0_clk_src.clkr,
+ [DISP_CC_ESYNC1_CLK] = &disp_cc_esync1_clk.clkr,
+ [DISP_CC_ESYNC1_CLK_SRC] = &disp_cc_esync1_clk_src.clkr,
+ [DISP_CC_MDSS_ACCU_SHIFT_CLK] = &disp_cc_mdss_accu_shift_clk.clkr,
+ [DISP_CC_MDSS_AHB1_CLK] = &disp_cc_mdss_ahb1_clk.clkr,
+ [DISP_CC_MDSS_AHB_CLK] = &disp_cc_mdss_ahb_clk.clkr,
+ [DISP_CC_MDSS_AHB_CLK_SRC] = &disp_cc_mdss_ahb_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_CLK] = &disp_cc_mdss_byte0_clk.clkr,
+ [DISP_CC_MDSS_BYTE0_CLK_SRC] = &disp_cc_mdss_byte0_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_DIV_CLK_SRC] = &disp_cc_mdss_byte0_div_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_INTF_CLK] = &disp_cc_mdss_byte0_intf_clk.clkr,
+ [DISP_CC_MDSS_BYTE1_CLK] = &disp_cc_mdss_byte1_clk.clkr,
+ [DISP_CC_MDSS_BYTE1_CLK_SRC] = &disp_cc_mdss_byte1_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE1_DIV_CLK_SRC] = &disp_cc_mdss_byte1_div_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE1_INTF_CLK] = &disp_cc_mdss_byte1_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_AUX_CLK] = &disp_cc_mdss_dptx0_aux_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_AUX_CLK_SRC] = &disp_cc_mdss_dptx0_aux_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_CLK] = &disp_cc_mdss_dptx0_link_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_CLK_SRC] = &disp_cc_mdss_dptx0_link_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx0_link_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_DPIN_CLK] = &disp_cc_mdss_dptx0_link_dpin_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_DPIN_DIV_CLK_SRC] = &disp_cc_mdss_dptx0_link_dpin_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_INTF_CLK] = &disp_cc_mdss_dptx0_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_PIXEL0_CLK] = &disp_cc_mdss_dptx0_pixel0_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx0_pixel0_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_PIXEL1_CLK] = &disp_cc_mdss_dptx0_pixel1_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_PIXEL1_CLK_SRC] = &disp_cc_mdss_dptx0_pixel1_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK] =
+ &disp_cc_mdss_dptx0_usb_router_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_AUX_CLK] = &disp_cc_mdss_dptx1_aux_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_AUX_CLK_SRC] = &disp_cc_mdss_dptx1_aux_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_LINK_CLK] = &disp_cc_mdss_dptx1_link_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_LINK_CLK_SRC] = &disp_cc_mdss_dptx1_link_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx1_link_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_LINK_DPIN_CLK] = &disp_cc_mdss_dptx1_link_dpin_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_LINK_DPIN_DIV_CLK_SRC] = &disp_cc_mdss_dptx1_link_dpin_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_LINK_INTF_CLK] = &disp_cc_mdss_dptx1_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_PIXEL0_CLK] = &disp_cc_mdss_dptx1_pixel0_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx1_pixel0_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_PIXEL1_CLK] = &disp_cc_mdss_dptx1_pixel1_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_PIXEL1_CLK_SRC] = &disp_cc_mdss_dptx1_pixel1_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_USB_ROUTER_LINK_INTF_CLK] =
+ &disp_cc_mdss_dptx1_usb_router_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_AUX_CLK] = &disp_cc_mdss_dptx2_aux_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_AUX_CLK_SRC] = &disp_cc_mdss_dptx2_aux_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX2_LINK_CLK] = &disp_cc_mdss_dptx2_link_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_LINK_CLK_SRC] = &disp_cc_mdss_dptx2_link_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX2_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx2_link_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX2_LINK_DPIN_CLK] = &disp_cc_mdss_dptx2_link_dpin_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_LINK_DPIN_DIV_CLK_SRC] = &disp_cc_mdss_dptx2_link_dpin_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX2_LINK_INTF_CLK] = &disp_cc_mdss_dptx2_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_PIXEL0_CLK] = &disp_cc_mdss_dptx2_pixel0_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx2_pixel0_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX2_PIXEL1_CLK] = &disp_cc_mdss_dptx2_pixel1_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_PIXEL1_CLK_SRC] = &disp_cc_mdss_dptx2_pixel1_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX2_USB_ROUTER_LINK_INTF_CLK] =
+ &disp_cc_mdss_dptx2_usb_router_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_AUX_CLK] = &disp_cc_mdss_dptx3_aux_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_AUX_CLK_SRC] = &disp_cc_mdss_dptx3_aux_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX3_LINK_CLK] = &disp_cc_mdss_dptx3_link_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_LINK_CLK_SRC] = &disp_cc_mdss_dptx3_link_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX3_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx3_link_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX3_LINK_DPIN_CLK] = &disp_cc_mdss_dptx3_link_dpin_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_LINK_DPIN_DIV_CLK_SRC] = &disp_cc_mdss_dptx3_link_dpin_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX3_LINK_INTF_CLK] = &disp_cc_mdss_dptx3_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_PIXEL0_CLK] = &disp_cc_mdss_dptx3_pixel0_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx3_pixel0_clk_src.clkr,
+ [DISP_CC_MDSS_ESC0_CLK] = &disp_cc_mdss_esc0_clk.clkr,
+ [DISP_CC_MDSS_ESC0_CLK_SRC] = &disp_cc_mdss_esc0_clk_src.clkr,
+ [DISP_CC_MDSS_ESC1_CLK] = &disp_cc_mdss_esc1_clk.clkr,
+ [DISP_CC_MDSS_ESC1_CLK_SRC] = &disp_cc_mdss_esc1_clk_src.clkr,
+ [DISP_CC_MDSS_MDP1_CLK] = &disp_cc_mdss_mdp1_clk.clkr,
+ [DISP_CC_MDSS_MDP_CLK] = &disp_cc_mdss_mdp_clk.clkr,
+ [DISP_CC_MDSS_MDP_CLK_SRC] = &disp_cc_mdss_mdp_clk_src.clkr,
+ [DISP_CC_MDSS_MDP_LUT1_CLK] = &disp_cc_mdss_mdp_lut1_clk.clkr,
+ [DISP_CC_MDSS_MDP_LUT_CLK] = &disp_cc_mdss_mdp_lut_clk.clkr,
+ [DISP_CC_MDSS_NON_GDSC_AHB_CLK] = &disp_cc_mdss_non_gdsc_ahb_clk.clkr,
+ [DISP_CC_MDSS_PCLK0_CLK] = &disp_cc_mdss_pclk0_clk.clkr,
+ [DISP_CC_MDSS_PCLK0_CLK_SRC] = &disp_cc_mdss_pclk0_clk_src.clkr,
+ [DISP_CC_MDSS_PCLK1_CLK] = &disp_cc_mdss_pclk1_clk.clkr,
+ [DISP_CC_MDSS_PCLK1_CLK_SRC] = &disp_cc_mdss_pclk1_clk_src.clkr,
+ [DISP_CC_MDSS_PCLK2_CLK] = &disp_cc_mdss_pclk2_clk.clkr,
+ [DISP_CC_MDSS_PCLK2_CLK_SRC] = &disp_cc_mdss_pclk2_clk_src.clkr,
+ [DISP_CC_MDSS_RSCC_AHB_CLK] = &disp_cc_mdss_rscc_ahb_clk.clkr,
+ [DISP_CC_MDSS_RSCC_VSYNC_CLK] = &disp_cc_mdss_rscc_vsync_clk.clkr,
+ [DISP_CC_MDSS_VSYNC1_CLK] = &disp_cc_mdss_vsync1_clk.clkr,
+ [DISP_CC_MDSS_VSYNC_CLK] = &disp_cc_mdss_vsync_clk.clkr,
+ [DISP_CC_MDSS_VSYNC_CLK_SRC] = &disp_cc_mdss_vsync_clk_src.clkr,
+ [DISP_CC_OSC_CLK] = &disp_cc_osc_clk.clkr,
+ [DISP_CC_OSC_CLK_SRC] = &disp_cc_osc_clk_src.clkr,
+ [DISP_CC_PLL0] = &disp_cc_pll0.clkr,
+ [DISP_CC_PLL1] = &disp_cc_pll1.clkr,
+ [DISP_CC_SLEEP_CLK_SRC] = &disp_cc_sleep_clk_src.clkr,
+ [DISP_CC_XO_CLK_SRC] = &disp_cc_xo_clk_src.clkr,
+};
+
+static struct gdsc *disp_cc_glymur_gdscs[] = {
+ [DISP_CC_MDSS_CORE_GDSC] = &disp_cc_mdss_core_gdsc,
+ [DISP_CC_MDSS_CORE_INT2_GDSC] = &disp_cc_mdss_core_int2_gdsc,
+};
+
+static const struct qcom_reset_map disp_cc_glymur_resets[] = {
+ [DISP_CC_MDSS_CORE_BCR] = { 0x8000 },
+ [DISP_CC_MDSS_CORE_INT2_BCR] = { 0xa000 },
+ [DISP_CC_MDSS_RSCC_BCR] = { 0xc000 },
+};
+
+static struct clk_alpha_pll *disp_cc_glymur_plls[] = {
+ &disp_cc_pll0,
+ &disp_cc_pll1,
+};
+
+static u32 disp_cc_glymur_critical_cbcrs[] = {
+ 0xe07c, /* DISP_CC_SLEEP_CLK */
+ 0xe05c, /* DISP_CC_XO_CLK */
+};
+
+static const struct regmap_config disp_cc_glymur_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x11014,
+ .fast_io = true,
+};
+
+static struct qcom_cc_driver_data disp_cc_glymur_driver_data = {
+ .alpha_plls = disp_cc_glymur_plls,
+ .num_alpha_plls = ARRAY_SIZE(disp_cc_glymur_plls),
+ .clk_cbcrs = disp_cc_glymur_critical_cbcrs,
+ .num_clk_cbcrs = ARRAY_SIZE(disp_cc_glymur_critical_cbcrs),
+};
+
+static const struct qcom_cc_desc disp_cc_glymur_desc = {
+ .config = &disp_cc_glymur_regmap_config,
+ .clks = disp_cc_glymur_clocks,
+ .num_clks = ARRAY_SIZE(disp_cc_glymur_clocks),
+ .resets = disp_cc_glymur_resets,
+ .num_resets = ARRAY_SIZE(disp_cc_glymur_resets),
+ .gdscs = disp_cc_glymur_gdscs,
+ .num_gdscs = ARRAY_SIZE(disp_cc_glymur_gdscs),
+ .use_rpm = true,
+ .driver_data = &disp_cc_glymur_driver_data,
+};
+
+static const struct of_device_id disp_cc_glymur_match_table[] = {
+ { .compatible = "qcom,glymur-dispcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, disp_cc_glymur_match_table);
+
+static int disp_cc_glymur_probe(struct platform_device *pdev)
+{
+ return qcom_cc_probe(pdev, &disp_cc_glymur_desc);
+}
+
+static struct platform_driver disp_cc_glymur_driver = {
+ .probe = disp_cc_glymur_probe,
+ .driver = {
+ .name = "dispcc-glymur",
+ .of_match_table = disp_cc_glymur_match_table,
+ },
+};
+
+module_platform_driver(disp_cc_glymur_driver);
+
+MODULE_DESCRIPTION("QTI DISPCC GLYMUR Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/dispcc-milos.c b/drivers/clk/qcom/dispcc-milos.c
new file mode 100644
index 000000000000..95b6dd89d9ae
--- /dev/null
+++ b/drivers/clk/qcom/dispcc-milos.c
@@ -0,0 +1,974 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2025, Luca Weiss <luca.weiss@fairphone.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,milos-dispcc.h>
+
+#include "common.h"
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "reset.h"
+#include "gdsc.h"
+
+/* Need to match the order of clocks in DT binding */
+enum {
+ DT_BI_TCXO,
+ DT_SLEEP_CLK,
+ DT_AHB_CLK,
+ DT_GCC_DISP_GPLL0_CLK,
+ DT_DSI0_PHY_PLL_OUT_BYTECLK,
+ DT_DSI0_PHY_PLL_OUT_DSICLK,
+ DT_DP0_PHY_PLL_LINK_CLK,
+ DT_DP0_PHY_PLL_VCO_DIV_CLK,
+};
+
+#define DISP_CC_MISC_CMD 0xF000
+
+enum {
+ P_BI_TCXO,
+ P_DISP_CC_PLL0_OUT_EVEN,
+ P_DISP_CC_PLL0_OUT_MAIN,
+ P_DP0_PHY_PLL_LINK_CLK,
+ P_DP0_PHY_PLL_VCO_DIV_CLK,
+ P_DSI0_PHY_PLL_OUT_BYTECLK,
+ P_DSI0_PHY_PLL_OUT_DSICLK,
+ P_GCC_DISP_GPLL0_CLK,
+ P_SLEEP_CLK,
+};
+
+static const struct pll_vco lucid_ole_vco[] = {
+ { 249600000, 2300000000, 0 },
+};
+
+/* 257.142858 MHz Configuration */
+static const struct alpha_pll_config disp_cc_pll0_config = {
+ .l = 0xd,
+ .alpha = 0x6492,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll disp_cc_pll0 = {
+ .offset = 0x0,
+ .config = &disp_cc_pll0_config,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct parent_map disp_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map disp_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_DSICLK, 1 },
+ { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DSI0_PHY_PLL_OUT_DSICLK },
+ { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK },
+};
+
+static const struct parent_map disp_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_DP0_PHY_PLL_LINK_CLK, 1 },
+ { P_DP0_PHY_PLL_VCO_DIV_CLK, 2 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DP0_PHY_PLL_LINK_CLK },
+ { .index = DT_DP0_PHY_PLL_VCO_DIV_CLK },
+};
+
+static const struct parent_map disp_cc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_DISP_GPLL0_CLK, 4 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_GCC_DISP_GPLL0_CLK },
+};
+
+static const struct parent_map disp_cc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_DP0_PHY_PLL_LINK_CLK, 1 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DP0_PHY_PLL_LINK_CLK },
+};
+
+static const struct parent_map disp_cc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_5[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK },
+};
+
+static const struct parent_map disp_cc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_DISP_CC_PLL0_OUT_MAIN, 1 },
+ { P_GCC_DISP_GPLL0_CLK, 4 },
+ { P_DISP_CC_PLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_6[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &disp_cc_pll0.clkr.hw },
+ { .index = DT_GCC_DISP_GPLL0_CLK },
+ { .hw = &disp_cc_pll0.clkr.hw },
+};
+
+static const struct parent_map disp_cc_parent_map_7[] = {
+ { P_SLEEP_CLK, 0 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_7_ao[] = {
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(37500000, P_GCC_DISP_GPLL0_CLK, 8, 0, 0),
+ F(75000000, P_GCC_DISP_GPLL0_CLK, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_ahb_clk_src = {
+ .cmd_rcgr = 0x8130,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .freq_tbl = ftbl_disp_cc_mdss_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_ahb_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_byte0_clk_src = {
+ .cmd_rcgr = 0x8098,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_dptx0_aux_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx0_aux_clk_src = {
+ .cmd_rcgr = 0x8118,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_mdss_dptx0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_aux_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx0_link_clk_src = {
+ .cmd_rcgr = 0x80cc,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_clk_src",
+ .parent_data = disp_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx0_pixel0_clk_src = {
+ .cmd_rcgr = 0x80e8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_pixel0_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx0_pixel1_clk_src = {
+ .cmd_rcgr = 0x8100,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_pixel1_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_esc0_clk_src[] = {
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(12800000, P_BI_TCXO, 1.5, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+static struct clk_rcg2 disp_cc_mdss_esc0_clk_src = {
+ .cmd_rcgr = 0x80b4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_5,
+ .freq_tbl = ftbl_disp_cc_mdss_esc0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_esc0_clk_src",
+ .parent_data = disp_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(85714286, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(100000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(200000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(342000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(402000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(535000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(600000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(630000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_mdp_clk_src = {
+ .cmd_rcgr = 0x8068,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_6,
+ .freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_clk_src",
+ .parent_data = disp_cc_parent_data_6,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_pclk0_clk_src = {
+ .cmd_rcgr = 0x8050,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk0_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_pixel_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_vsync_clk_src = {
+ .cmd_rcgr = 0x8080,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_mdss_dptx0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_vsync_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_sleep_clk_src[] = {
+ F(32000, P_SLEEP_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_sleep_clk_src = {
+ .cmd_rcgr = 0xe054,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_7,
+ .freq_tbl = ftbl_disp_cc_sleep_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_sleep_clk_src",
+ .parent_data = disp_cc_parent_data_7_ao,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_7_ao),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_xo_clk_src = {
+ .cmd_rcgr = 0xe034,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_mdss_dptx0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_xo_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_byte0_div_clk_src = {
+ .reg = 0x80b0,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx0_link_div_clk_src = {
+ .reg = 0x80e4,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch disp_cc_mdss_accu_clk = {
+ .halt_reg = 0xe050,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xe050,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_accu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_ahb1_clk = {
+ .halt_reg = 0xa020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_ahb1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_ahb_clk = {
+ .halt_reg = 0x804c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x804c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte0_clk = {
+ .halt_reg = 0x8024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte0_intf_clk = {
+ .halt_reg = 0x8028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte0_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_aux_clk = {
+ .halt_reg = 0x8048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_crypto_clk = {
+ .halt_reg = 0x803c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x803c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_crypto_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_link_clk = {
+ .halt_reg = 0x8030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_link_intf_clk = {
+ .halt_reg = 0x8038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_pixel0_clk = {
+ .halt_reg = 0x8040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_pixel0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_pixel0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_pixel1_clk = {
+ .halt_reg = 0x8044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_pixel1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_pixel1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_usb_router_link_intf_clk = {
+ .halt_reg = 0x8034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_usb_router_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_esc0_clk = {
+ .halt_reg = 0x802c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x802c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_esc0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_esc0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp1_clk = {
+ .halt_reg = 0xa004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_clk = {
+ .halt_reg = 0x8008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_lut1_clk = {
+ .halt_reg = 0xa010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_lut1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_lut_clk = {
+ .halt_reg = 0x8014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x8014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_lut_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_non_gdsc_ahb_clk = {
+ .halt_reg = 0xc004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xc004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_non_gdsc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_pclk0_clk = {
+ .halt_reg = 0x8004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_pclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rscc_ahb_clk = {
+ .halt_reg = 0xc00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_rscc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rscc_vsync_clk = {
+ .halt_reg = 0xc008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_rscc_vsync_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_vsync1_clk = {
+ .halt_reg = 0xa01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_vsync1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_vsync_clk = {
+ .halt_reg = 0x8020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_vsync_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc disp_cc_mdss_core_gdsc = {
+ .gdscr = 0x9000,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "disp_cc_mdss_core_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | HW_CTRL | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc disp_cc_mdss_core_int2_gdsc = {
+ .gdscr = 0xb000,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "disp_cc_mdss_core_int2_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | HW_CTRL | RETAIN_FF_ENABLE,
+};
+
+static struct clk_regmap *disp_cc_milos_clocks[] = {
+ [DISP_CC_MDSS_ACCU_CLK] = &disp_cc_mdss_accu_clk.clkr,
+ [DISP_CC_MDSS_AHB1_CLK] = &disp_cc_mdss_ahb1_clk.clkr,
+ [DISP_CC_MDSS_AHB_CLK] = &disp_cc_mdss_ahb_clk.clkr,
+ [DISP_CC_MDSS_AHB_CLK_SRC] = &disp_cc_mdss_ahb_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_CLK] = &disp_cc_mdss_byte0_clk.clkr,
+ [DISP_CC_MDSS_BYTE0_CLK_SRC] = &disp_cc_mdss_byte0_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_DIV_CLK_SRC] = &disp_cc_mdss_byte0_div_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_INTF_CLK] = &disp_cc_mdss_byte0_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_AUX_CLK] = &disp_cc_mdss_dptx0_aux_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_AUX_CLK_SRC] = &disp_cc_mdss_dptx0_aux_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_CRYPTO_CLK] = &disp_cc_mdss_dptx0_crypto_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_CLK] = &disp_cc_mdss_dptx0_link_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_CLK_SRC] = &disp_cc_mdss_dptx0_link_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx0_link_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_INTF_CLK] = &disp_cc_mdss_dptx0_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_PIXEL0_CLK] = &disp_cc_mdss_dptx0_pixel0_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx0_pixel0_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_PIXEL1_CLK] = &disp_cc_mdss_dptx0_pixel1_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_PIXEL1_CLK_SRC] = &disp_cc_mdss_dptx0_pixel1_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK] =
+ &disp_cc_mdss_dptx0_usb_router_link_intf_clk.clkr,
+ [DISP_CC_MDSS_ESC0_CLK] = &disp_cc_mdss_esc0_clk.clkr,
+ [DISP_CC_MDSS_ESC0_CLK_SRC] = &disp_cc_mdss_esc0_clk_src.clkr,
+ [DISP_CC_MDSS_MDP1_CLK] = &disp_cc_mdss_mdp1_clk.clkr,
+ [DISP_CC_MDSS_MDP_CLK] = &disp_cc_mdss_mdp_clk.clkr,
+ [DISP_CC_MDSS_MDP_CLK_SRC] = &disp_cc_mdss_mdp_clk_src.clkr,
+ [DISP_CC_MDSS_MDP_LUT1_CLK] = &disp_cc_mdss_mdp_lut1_clk.clkr,
+ [DISP_CC_MDSS_MDP_LUT_CLK] = &disp_cc_mdss_mdp_lut_clk.clkr,
+ [DISP_CC_MDSS_NON_GDSC_AHB_CLK] = &disp_cc_mdss_non_gdsc_ahb_clk.clkr,
+ [DISP_CC_MDSS_PCLK0_CLK] = &disp_cc_mdss_pclk0_clk.clkr,
+ [DISP_CC_MDSS_PCLK0_CLK_SRC] = &disp_cc_mdss_pclk0_clk_src.clkr,
+ [DISP_CC_MDSS_RSCC_AHB_CLK] = &disp_cc_mdss_rscc_ahb_clk.clkr,
+ [DISP_CC_MDSS_RSCC_VSYNC_CLK] = &disp_cc_mdss_rscc_vsync_clk.clkr,
+ [DISP_CC_MDSS_VSYNC1_CLK] = &disp_cc_mdss_vsync1_clk.clkr,
+ [DISP_CC_MDSS_VSYNC_CLK] = &disp_cc_mdss_vsync_clk.clkr,
+ [DISP_CC_MDSS_VSYNC_CLK_SRC] = &disp_cc_mdss_vsync_clk_src.clkr,
+ [DISP_CC_PLL0] = &disp_cc_pll0.clkr,
+ [DISP_CC_SLEEP_CLK_SRC] = &disp_cc_sleep_clk_src.clkr,
+ [DISP_CC_XO_CLK_SRC] = &disp_cc_xo_clk_src.clkr,
+};
+
+static const struct qcom_reset_map disp_cc_milos_resets[] = {
+ [DISP_CC_MDSS_CORE_BCR] = { 0x8000 },
+ [DISP_CC_MDSS_CORE_INT2_BCR] = { 0xa000 },
+ [DISP_CC_MDSS_RSCC_BCR] = { 0xc000 },
+};
+
+static struct gdsc *disp_cc_milos_gdscs[] = {
+ [DISP_CC_MDSS_CORE_GDSC] = &disp_cc_mdss_core_gdsc,
+ [DISP_CC_MDSS_CORE_INT2_GDSC] = &disp_cc_mdss_core_int2_gdsc,
+};
+
+static struct clk_alpha_pll *disp_cc_milos_plls[] = {
+ &disp_cc_pll0,
+};
+
+static u32 disp_cc_milos_critical_cbcrs[] = {
+ 0xe06c, /* DISP_CC_SLEEP_CLK */
+ 0xe04c, /* DISP_CC_XO_CLK */
+};
+
+static const struct regmap_config disp_cc_milos_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x11008,
+ .fast_io = true,
+};
+
+static void disp_cc_milos_clk_regs_configure(struct device *dev, struct regmap *regmap)
+{
+ /* Enable clock gating for MDP clocks */
+ regmap_update_bits(regmap, DISP_CC_MISC_CMD, 0x10, 0x10);
+}
+
+
+static struct qcom_cc_driver_data disp_cc_milos_driver_data = {
+ .alpha_plls = disp_cc_milos_plls,
+ .num_alpha_plls = ARRAY_SIZE(disp_cc_milos_plls),
+ .clk_cbcrs = disp_cc_milos_critical_cbcrs,
+ .num_clk_cbcrs = ARRAY_SIZE(disp_cc_milos_critical_cbcrs),
+ .clk_regs_configure = disp_cc_milos_clk_regs_configure,
+};
+
+static const struct qcom_cc_desc disp_cc_milos_desc = {
+ .config = &disp_cc_milos_regmap_config,
+ .clks = disp_cc_milos_clocks,
+ .num_clks = ARRAY_SIZE(disp_cc_milos_clocks),
+ .resets = disp_cc_milos_resets,
+ .num_resets = ARRAY_SIZE(disp_cc_milos_resets),
+ .gdscs = disp_cc_milos_gdscs,
+ .num_gdscs = ARRAY_SIZE(disp_cc_milos_gdscs),
+ .use_rpm = true,
+ .driver_data = &disp_cc_milos_driver_data,
+};
+
+static const struct of_device_id disp_cc_milos_match_table[] = {
+ { .compatible = "qcom,milos-dispcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, disp_cc_milos_match_table);
+
+static int disp_cc_milos_probe(struct platform_device *pdev)
+{
+ return qcom_cc_probe(pdev, &disp_cc_milos_desc);
+}
+
+static struct platform_driver disp_cc_milos_driver = {
+ .probe = disp_cc_milos_probe,
+ .driver = {
+ .name = "disp_cc-milos",
+ .of_match_table = disp_cc_milos_match_table,
+ },
+};
+
+module_platform_driver(disp_cc_milos_driver);
+
+MODULE_DESCRIPTION("QTI DISP_CC Milos Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/dispcc-qcm2290.c b/drivers/clk/qcom/dispcc-qcm2290.c
index 449ffea2295d..6d88d067337f 100644
--- a/drivers/clk/qcom/dispcc-qcm2290.c
+++ b/drivers/clk/qcom/dispcc-qcm2290.c
@@ -4,10 +4,11 @@
* Copyright (c) 2021, Linaro Ltd.
*/
+#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -40,8 +41,6 @@ static const struct pll_vco spark_vco[] = {
/* 768MHz configuration */
static const struct alpha_pll_config disp_cc_pll0_config = {
.l = 0x28,
- .alpha = 0x0,
- .alpha_en_mask = BIT(24),
.vco_val = 0x2 << 20,
.vco_mask = GENMASK(21, 20),
.main_output_mask = BIT(0),
diff --git a/drivers/clk/qcom/dispcc-qcs615.c b/drivers/clk/qcom/dispcc-qcs615.c
new file mode 100644
index 000000000000..4a6d78466098
--- /dev/null
+++ b/drivers/clk/qcom/dispcc-qcs615.c
@@ -0,0 +1,792 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,qcs615-dispcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO,
+ DT_GPLL0,
+ DT_DSI0_PHY_PLL_OUT_BYTECLK,
+ DT_DSI0_PHY_PLL_OUT_DSICLK,
+ DT_DSI1_PHY_PLL_OUT_DSICLK,
+ DT_DP_PHY_PLL_LINK_CLK,
+ DT_DP_PHY_PLL_VCO_DIV_CLK,
+};
+
+enum {
+ P_BI_TCXO,
+ P_DISP_CC_PLL0_OUT_MAIN,
+ P_DP_PHY_PLL_LINK_CLK,
+ P_DP_PHY_PLL_VCO_DIV_CLK,
+ P_DSI0_PHY_PLL_OUT_BYTECLK,
+ P_DSI0_PHY_PLL_OUT_DSICLK,
+ P_DSI1_PHY_PLL_OUT_DSICLK,
+ P_GPLL0_OUT_MAIN,
+};
+
+static const struct pll_vco disp_cc_pll_vco[] = {
+ { 500000000, 1000000000, 2 },
+};
+
+/* 576MHz configuration VCO - 2 */
+static struct alpha_pll_config disp_cc_pll0_config = {
+ .l = 0x1e,
+ .vco_val = BIT(21),
+ .vco_mask = GENMASK(21, 20),
+ .main_output_mask = BIT(0),
+ .config_ctl_val = 0x4001055b,
+ .test_ctl_hi_val = 0x1,
+ .test_ctl_hi_mask = 0x1,
+};
+
+static struct clk_alpha_pll disp_cc_pll0 = {
+ .offset = 0x0,
+ .config = &disp_cc_pll0_config,
+ .vco_table = disp_cc_pll_vco,
+ .num_vco = ARRAY_SIZE(disp_cc_pll_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_slew_ops,
+ },
+ },
+};
+
+static const struct parent_map disp_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_DP_PHY_PLL_LINK_CLK, 1 },
+ { P_DP_PHY_PLL_VCO_DIV_CLK, 2 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DP_PHY_PLL_LINK_CLK },
+ { .index = DT_DP_PHY_PLL_VCO_DIV_CLK },
+};
+
+static const struct parent_map disp_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map disp_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_BYTECLK, 1 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK },
+};
+
+static const struct parent_map disp_cc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_DISP_CC_PLL0_OUT_MAIN, 1 },
+ { P_GPLL0_OUT_MAIN, 4 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &disp_cc_pll0.clkr.hw },
+ { .index = DT_GPLL0 },
+};
+
+static const struct parent_map disp_cc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 4 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_GPLL0 },
+};
+
+static const struct parent_map disp_cc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_DSICLK, 1 },
+ { P_DSI1_PHY_PLL_OUT_DSICLK, 2 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_5[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DSI0_PHY_PLL_OUT_DSICLK },
+ { .index = DT_DSI1_PHY_PLL_OUT_DSICLK },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(37500000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(75000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_ahb_clk_src = {
+ .cmd_rcgr = 0x2170,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_4,
+ .freq_tbl = ftbl_disp_cc_mdss_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_ahb_clk_src",
+ .parent_data = disp_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_4),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_byte0_clk_src = {
+ .cmd_rcgr = 0x20c0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_dp_aux1_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_aux_clk_src = {
+ .cmd_rcgr = 0x2158,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_mdss_dp_aux1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_dp_aux_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_crypto_clk_src = {
+ .cmd_rcgr = 0x2110,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dp_crypto_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_link_clk_src = {
+ .cmd_rcgr = 0x20f4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dp_link_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_pixel1_clk_src = {
+ .cmd_rcgr = 0x2140,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dp_pixel1_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_pixel_clk_src = {
+ .cmd_rcgr = 0x2128,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dp_pixel_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_esc0_clk_src = {
+ .cmd_rcgr = 0x20dc,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .freq_tbl = ftbl_disp_cc_mdss_dp_aux1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_esc0_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(192000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(256000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(307000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_mdp_clk_src = {
+ .cmd_rcgr = 0x2078,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_pclk0_clk_src = {
+ .cmd_rcgr = 0x2060,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_5,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk0_clk_src",
+ .parent_data = disp_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_pixel_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_rot_clk_src = {
+ .cmd_rcgr = 0x2090,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_rot_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_vsync_clk_src = {
+ .cmd_rcgr = 0x20a8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_mdss_dp_aux1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_vsync_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_byte0_div_clk_src = {
+ .reg = 0x20d8,
+ .shift = 0,
+ .width = 2,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dp_link_div_clk_src = {
+ .reg = 0x210c,
+ .shift = 0,
+ .width = 2,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dp_link_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dp_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch disp_cc_mdss_ahb_clk = {
+ .halt_reg = 0x2048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte0_clk = {
+ .halt_reg = 0x2024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte0_intf_clk = {
+ .halt_reg = 0x2028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte0_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dp_aux_clk = {
+ .halt_reg = 0x2044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dp_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dp_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dp_crypto_clk = {
+ .halt_reg = 0x2038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dp_crypto_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dp_crypto_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dp_link_clk = {
+ .halt_reg = 0x2030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dp_link_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dp_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dp_link_intf_clk = {
+ .halt_reg = 0x2034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dp_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dp_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dp_pixel1_clk = {
+ .halt_reg = 0x2040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dp_pixel1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dp_pixel1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dp_pixel_clk = {
+ .halt_reg = 0x203c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x203c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dp_pixel_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dp_pixel_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_esc0_clk = {
+ .halt_reg = 0x202c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x202c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_esc0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_esc0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_clk = {
+ .halt_reg = 0x2008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_lut_clk = {
+ .halt_reg = 0x2018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_lut_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_non_gdsc_ahb_clk = {
+ .halt_reg = 0x4004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_non_gdsc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_pclk0_clk = {
+ .halt_reg = 0x2004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_pclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rot_clk = {
+ .halt_reg = 0x2010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_rot_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_rot_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rscc_ahb_clk = {
+ .halt_reg = 0x400c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x400c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_rscc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rscc_vsync_clk = {
+ .halt_reg = 0x4008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_rscc_vsync_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_vsync_clk = {
+ .halt_reg = 0x2020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_vsync_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc mdss_core_gdsc = {
+ .gdscr = 0x3000,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "mdss_core_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = HW_CTRL | POLL_CFG_GDSCR,
+};
+
+static struct clk_regmap *disp_cc_qcs615_clocks[] = {
+ [DISP_CC_MDSS_AHB_CLK] = &disp_cc_mdss_ahb_clk.clkr,
+ [DISP_CC_MDSS_AHB_CLK_SRC] = &disp_cc_mdss_ahb_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_CLK] = &disp_cc_mdss_byte0_clk.clkr,
+ [DISP_CC_MDSS_BYTE0_CLK_SRC] = &disp_cc_mdss_byte0_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_DIV_CLK_SRC] = &disp_cc_mdss_byte0_div_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_INTF_CLK] = &disp_cc_mdss_byte0_intf_clk.clkr,
+ [DISP_CC_MDSS_DP_AUX_CLK] = &disp_cc_mdss_dp_aux_clk.clkr,
+ [DISP_CC_MDSS_DP_AUX_CLK_SRC] = &disp_cc_mdss_dp_aux_clk_src.clkr,
+ [DISP_CC_MDSS_DP_CRYPTO_CLK] = &disp_cc_mdss_dp_crypto_clk.clkr,
+ [DISP_CC_MDSS_DP_CRYPTO_CLK_SRC] = &disp_cc_mdss_dp_crypto_clk_src.clkr,
+ [DISP_CC_MDSS_DP_LINK_CLK] = &disp_cc_mdss_dp_link_clk.clkr,
+ [DISP_CC_MDSS_DP_LINK_CLK_SRC] = &disp_cc_mdss_dp_link_clk_src.clkr,
+ [DISP_CC_MDSS_DP_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dp_link_div_clk_src.clkr,
+ [DISP_CC_MDSS_DP_LINK_INTF_CLK] = &disp_cc_mdss_dp_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DP_PIXEL1_CLK] = &disp_cc_mdss_dp_pixel1_clk.clkr,
+ [DISP_CC_MDSS_DP_PIXEL1_CLK_SRC] = &disp_cc_mdss_dp_pixel1_clk_src.clkr,
+ [DISP_CC_MDSS_DP_PIXEL_CLK] = &disp_cc_mdss_dp_pixel_clk.clkr,
+ [DISP_CC_MDSS_DP_PIXEL_CLK_SRC] = &disp_cc_mdss_dp_pixel_clk_src.clkr,
+ [DISP_CC_MDSS_ESC0_CLK] = &disp_cc_mdss_esc0_clk.clkr,
+ [DISP_CC_MDSS_ESC0_CLK_SRC] = &disp_cc_mdss_esc0_clk_src.clkr,
+ [DISP_CC_MDSS_MDP_CLK] = &disp_cc_mdss_mdp_clk.clkr,
+ [DISP_CC_MDSS_MDP_CLK_SRC] = &disp_cc_mdss_mdp_clk_src.clkr,
+ [DISP_CC_MDSS_MDP_LUT_CLK] = &disp_cc_mdss_mdp_lut_clk.clkr,
+ [DISP_CC_MDSS_NON_GDSC_AHB_CLK] = &disp_cc_mdss_non_gdsc_ahb_clk.clkr,
+ [DISP_CC_MDSS_PCLK0_CLK] = &disp_cc_mdss_pclk0_clk.clkr,
+ [DISP_CC_MDSS_PCLK0_CLK_SRC] = &disp_cc_mdss_pclk0_clk_src.clkr,
+ [DISP_CC_MDSS_ROT_CLK] = &disp_cc_mdss_rot_clk.clkr,
+ [DISP_CC_MDSS_ROT_CLK_SRC] = &disp_cc_mdss_rot_clk_src.clkr,
+ [DISP_CC_MDSS_RSCC_AHB_CLK] = &disp_cc_mdss_rscc_ahb_clk.clkr,
+ [DISP_CC_MDSS_RSCC_VSYNC_CLK] = &disp_cc_mdss_rscc_vsync_clk.clkr,
+ [DISP_CC_MDSS_VSYNC_CLK] = &disp_cc_mdss_vsync_clk.clkr,
+ [DISP_CC_MDSS_VSYNC_CLK_SRC] = &disp_cc_mdss_vsync_clk_src.clkr,
+ [DISP_CC_PLL0] = &disp_cc_pll0.clkr,
+};
+
+static struct gdsc *disp_cc_qcs615_gdscs[] = {
+ [MDSS_CORE_GDSC] = &mdss_core_gdsc,
+};
+
+static struct clk_alpha_pll *disp_cc_qcs615_plls[] = {
+ &disp_cc_pll0,
+};
+
+static u32 disp_cc_qcs615_critical_cbcrs[] = {
+ 0x6054, /* DISP_CC_XO_CLK */
+};
+
+static const struct regmap_config disp_cc_qcs615_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x10000,
+ .fast_io = true,
+};
+
+static struct qcom_cc_driver_data disp_cc_qcs615_driver_data = {
+ .alpha_plls = disp_cc_qcs615_plls,
+ .num_alpha_plls = ARRAY_SIZE(disp_cc_qcs615_plls),
+ .clk_cbcrs = disp_cc_qcs615_critical_cbcrs,
+ .num_clk_cbcrs = ARRAY_SIZE(disp_cc_qcs615_critical_cbcrs),
+};
+
+static const struct qcom_cc_desc disp_cc_qcs615_desc = {
+ .config = &disp_cc_qcs615_regmap_config,
+ .clks = disp_cc_qcs615_clocks,
+ .num_clks = ARRAY_SIZE(disp_cc_qcs615_clocks),
+ .gdscs = disp_cc_qcs615_gdscs,
+ .num_gdscs = ARRAY_SIZE(disp_cc_qcs615_gdscs),
+ .driver_data = &disp_cc_qcs615_driver_data,
+};
+
+static const struct of_device_id disp_cc_qcs615_match_table[] = {
+ { .compatible = "qcom,qcs615-dispcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, disp_cc_qcs615_match_table);
+
+static int disp_cc_qcs615_probe(struct platform_device *pdev)
+{
+ return qcom_cc_probe(pdev, &disp_cc_qcs615_desc);
+}
+
+static struct platform_driver disp_cc_qcs615_driver = {
+ .probe = disp_cc_qcs615_probe,
+ .driver = {
+ .name = "dispcc-qcs615",
+ .of_match_table = disp_cc_qcs615_match_table,
+ },
+};
+
+module_platform_driver(disp_cc_qcs615_driver);
+
+MODULE_DESCRIPTION("QTI DISPCC QCS615 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/dispcc-sc7180.c b/drivers/clk/qcom/dispcc-sc7180.c
index 4710247be530..ab1a8d419863 100644
--- a/drivers/clk/qcom/dispcc-sc7180.c
+++ b/drivers/clk/qcom/dispcc-sc7180.c
@@ -4,6 +4,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/clk/qcom/dispcc-sc7280.c b/drivers/clk/qcom/dispcc-sc7280.c
index db0745954894..465dc06c8712 100644
--- a/drivers/clk/qcom/dispcc-sc7280.c
+++ b/drivers/clk/qcom/dispcc-sc7280.c
@@ -4,6 +4,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -16,6 +17,7 @@
#include "clk-regmap-divider.h"
#include "common.h"
#include "gdsc.h"
+#include "reset.h"
enum {
P_BI_TCXO,
@@ -846,6 +848,11 @@ static struct gdsc *disp_cc_sc7280_gdscs[] = {
[DISP_CC_MDSS_CORE_GDSC] = &disp_cc_mdss_core_gdsc,
};
+static const struct qcom_reset_map disp_cc_sc7280_resets[] = {
+ [DISP_CC_MDSS_CORE_BCR] = { 0x1000 },
+ [DISP_CC_MDSS_RSCC_BCR] = { 0x2000 },
+};
+
static const struct regmap_config disp_cc_sc7280_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
@@ -860,6 +867,8 @@ static const struct qcom_cc_desc disp_cc_sc7280_desc = {
.num_clks = ARRAY_SIZE(disp_cc_sc7280_clocks),
.gdscs = disp_cc_sc7280_gdscs,
.num_gdscs = ARRAY_SIZE(disp_cc_sc7280_gdscs),
+ .resets = disp_cc_sc7280_resets,
+ .num_resets = ARRAY_SIZE(disp_cc_sc7280_resets),
};
static const struct of_device_id disp_cc_sc7280_match_table[] = {
diff --git a/drivers/clk/qcom/dispcc-sc8280xp.c b/drivers/clk/qcom/dispcc-sc8280xp.c
index f1ca9ae0b33f..5903a759d4af 100644
--- a/drivers/clk/qcom/dispcc-sc8280xp.c
+++ b/drivers/clk/qcom/dispcc-sc8280xp.c
@@ -5,13 +5,12 @@
*/
#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/property.h>
#include <linux/pm_clock.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
-#include <linux/reset-controller.h>
#include <dt-bindings/clock/qcom,dispcc-sc8280xp.h>
@@ -3114,7 +3113,7 @@ static const struct regmap_config disp_cc_sc8280xp_regmap_config = {
.fast_io = true,
};
-static struct qcom_cc_desc disp0_cc_sc8280xp_desc = {
+static const struct qcom_cc_desc disp0_cc_sc8280xp_desc = {
.config = &disp_cc_sc8280xp_regmap_config,
.clks = disp0_cc_sc8280xp_clocks,
.num_clks = ARRAY_SIZE(disp0_cc_sc8280xp_clocks),
@@ -3124,7 +3123,7 @@ static struct qcom_cc_desc disp0_cc_sc8280xp_desc = {
.num_gdscs = ARRAY_SIZE(disp0_cc_sc8280xp_gdscs),
};
-static struct qcom_cc_desc disp1_cc_sc8280xp_desc = {
+static const struct qcom_cc_desc disp1_cc_sc8280xp_desc = {
.config = &disp_cc_sc8280xp_regmap_config,
.clks = disp1_cc_sc8280xp_clocks,
.num_clks = ARRAY_SIZE(disp1_cc_sc8280xp_clocks),
diff --git a/drivers/clk/qcom/dispcc-sdm845.c b/drivers/clk/qcom/dispcc-sdm845.c
index e6139e8f74dc..2f9e9665d7e9 100644
--- a/drivers/clk/qcom/dispcc-sdm845.c
+++ b/drivers/clk/qcom/dispcc-sdm845.c
@@ -4,10 +4,10 @@
*/
#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
-#include <linux/reset-controller.h>
#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
diff --git a/drivers/clk/qcom/dispcc-sm4450.c b/drivers/clk/qcom/dispcc-sm4450.c
index 98ba016bc57f..e8752d01c8e6 100644
--- a/drivers/clk/qcom/dispcc-sm4450.c
+++ b/drivers/clk/qcom/dispcc-sm4450.c
@@ -6,7 +6,6 @@
#include <linux/clk-provider.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
-#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -722,7 +721,7 @@ static const struct regmap_config disp_cc_sm4450_regmap_config = {
.fast_io = true,
};
-static struct qcom_cc_desc disp_cc_sm4450_desc = {
+static const struct qcom_cc_desc disp_cc_sm4450_desc = {
.config = &disp_cc_sm4450_regmap_config,
.clks = disp_cc_sm4450_clocks,
.num_clks = ARRAY_SIZE(disp_cc_sm4450_clocks),
diff --git a/drivers/clk/qcom/dispcc-sm6115.c b/drivers/clk/qcom/dispcc-sm6115.c
index 939887f82ecc..8ae25d51db94 100644
--- a/drivers/clk/qcom/dispcc-sm6115.c
+++ b/drivers/clk/qcom/dispcc-sm6115.c
@@ -5,10 +5,11 @@
* Copyright (c) 2021, Linaro Ltd.
*/
+#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -48,8 +49,6 @@ static const struct pll_vco spark_vco[] = {
/* 768MHz configuration */
static const struct alpha_pll_config disp_cc_pll0_config = {
.l = 0x28,
- .alpha = 0x0,
- .alpha_en_mask = BIT(24),
.vco_val = 0x2 << 20,
.vco_mask = GENMASK(21, 20),
.main_output_mask = BIT(0),
diff --git a/drivers/clk/qcom/dispcc-sm6125.c b/drivers/clk/qcom/dispcc-sm6125.c
index 51c7492816fb..851d38a487d3 100644
--- a/drivers/clk/qcom/dispcc-sm6125.c
+++ b/drivers/clk/qcom/dispcc-sm6125.c
@@ -4,6 +4,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/clk/qcom/dispcc-sm6350.c b/drivers/clk/qcom/dispcc-sm6350.c
index 50facb36701a..b0bd163a449c 100644
--- a/drivers/clk/qcom/dispcc-sm6350.c
+++ b/drivers/clk/qcom/dispcc-sm6350.c
@@ -5,6 +5,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -187,13 +188,12 @@ static struct clk_rcg2 disp_cc_mdss_dp_aux_clk_src = {
.cmd_rcgr = 0x1144,
.mnd_width = 0,
.hid_width = 5,
+ .parent_map = disp_cc_parent_map_6,
.freq_tbl = ftbl_disp_cc_mdss_dp_aux_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "disp_cc_mdss_dp_aux_clk_src",
- .parent_data = &(const struct clk_parent_data){
- .fw_name = "bi_tcxo",
- },
- .num_parents = 1,
+ .parent_data = disp_cc_parent_data_6,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_6),
.ops = &clk_rcg2_ops,
},
};
@@ -681,6 +681,9 @@ static struct clk_branch disp_cc_xo_clk = {
static struct gdsc mdss_gdsc = {
.gdscr = 0x1004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
.pd = {
.name = "mdss_gdsc",
},
diff --git a/drivers/clk/qcom/dispcc-sm6375.c b/drivers/clk/qcom/dispcc-sm6375.c
index 167dd369a794..ec9dbb1f4a7c 100644
--- a/drivers/clk/qcom/dispcc-sm6375.c
+++ b/drivers/clk/qcom/dispcc-sm6375.c
@@ -5,6 +5,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/clk/qcom/dispcc-sm7150.c b/drivers/clk/qcom/dispcc-sm7150.c
index d32bd7df1433..bdfff246ed3f 100644
--- a/drivers/clk/qcom/dispcc-sm7150.c
+++ b/drivers/clk/qcom/dispcc-sm7150.c
@@ -8,7 +8,6 @@
#include <linux/clk-provider.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/clk/qcom/dispcc-sm8250.c b/drivers/clk/qcom/dispcc-sm8250.c
index 884bbd3fb305..8f433e1e7028 100644
--- a/drivers/clk/qcom/dispcc-sm8250.c
+++ b/drivers/clk/qcom/dispcc-sm8250.c
@@ -4,11 +4,11 @@
*/
#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
-#include <linux/reset-controller.h>
#include <dt-bindings/clock/qcom,dispcc-sm8250.h>
diff --git a/drivers/clk/qcom/dispcc-sm8450.c b/drivers/clk/qcom/dispcc-sm8450.c
index d1d3f60789ee..9ce9fd28e55b 100644
--- a/drivers/clk/qcom/dispcc-sm8450.c
+++ b/drivers/clk/qcom/dispcc-sm8450.c
@@ -4,12 +4,11 @@
* Copyright (c) 2022, Linaro Ltd.
*/
-#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/pm_runtime.h>
@@ -85,6 +84,29 @@ static const struct alpha_pll_config disp_cc_pll0_config = {
.user_ctl_hi_val = 0x00000805,
};
+static const struct alpha_pll_config sm8475_disp_cc_pll0_config = {
+ .l = 0xd,
+ .alpha = 0x6492,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_init_data sm8475_disp_cc_pll0_init = {
+ .name = "disp_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_reset_lucid_ole_ops,
+};
+
static struct clk_alpha_pll disp_cc_pll0 = {
.offset = 0x0,
.vco_table = lucid_evo_vco,
@@ -112,6 +134,29 @@ static const struct alpha_pll_config disp_cc_pll1_config = {
.user_ctl_hi_val = 0x00000805,
};
+static const struct alpha_pll_config sm8475_disp_cc_pll1_config = {
+ .l = 0x1f,
+ .alpha = 0x4000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_init_data sm8475_disp_cc_pll1_init = {
+ .name = "disp_cc_pll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_reset_lucid_ole_ops,
+};
+
static struct clk_alpha_pll disp_cc_pll1 = {
.offset = 0x1000,
.vco_table = lucid_evo_vco,
@@ -1734,7 +1779,7 @@ static const struct regmap_config disp_cc_sm8450_regmap_config = {
.fast_io = true,
};
-static struct qcom_cc_desc disp_cc_sm8450_desc = {
+static const struct qcom_cc_desc disp_cc_sm8450_desc = {
.config = &disp_cc_sm8450_regmap_config,
.clks = disp_cc_sm8450_clocks,
.num_clks = ARRAY_SIZE(disp_cc_sm8450_clocks),
@@ -1746,6 +1791,7 @@ static struct qcom_cc_desc disp_cc_sm8450_desc = {
static const struct of_device_id disp_cc_sm8450_match_table[] = {
{ .compatible = "qcom,sm8450-dispcc" },
+ { .compatible = "qcom,sm8475-dispcc" },
{ }
};
MODULE_DEVICE_TABLE(of, disp_cc_sm8450_match_table);
@@ -1769,8 +1815,21 @@ static int disp_cc_sm8450_probe(struct platform_device *pdev)
goto err_put_rpm;
}
- clk_lucid_evo_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
- clk_lucid_evo_pll_configure(&disp_cc_pll1, regmap, &disp_cc_pll1_config);
+ if (of_device_is_compatible(pdev->dev.of_node, "qcom,sm8475-dispcc")) {
+ /* Update DISPCC PLL0 */
+ disp_cc_pll0.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE];
+ disp_cc_pll0.clkr.hw.init = &sm8475_disp_cc_pll0_init;
+
+ /* Update DISPCC PLL1 */
+ disp_cc_pll1.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE];
+ disp_cc_pll1.clkr.hw.init = &sm8475_disp_cc_pll1_init;
+
+ clk_lucid_ole_pll_configure(&disp_cc_pll0, regmap, &sm8475_disp_cc_pll0_config);
+ clk_lucid_ole_pll_configure(&disp_cc_pll1, regmap, &sm8475_disp_cc_pll1_config);
+ } else {
+ clk_lucid_evo_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
+ clk_lucid_evo_pll_configure(&disp_cc_pll1, regmap, &disp_cc_pll1_config);
+ }
/* Enable clock gating for MDP clocks */
regmap_update_bits(regmap, DISP_CC_MISC_CMD, 0x10, 0x10);
@@ -1802,5 +1861,5 @@ static struct platform_driver disp_cc_sm8450_driver = {
module_platform_driver(disp_cc_sm8450_driver);
-MODULE_DESCRIPTION("QTI DISPCC SM8450 Driver");
+MODULE_DESCRIPTION("QTI DISPCC SM8450 / SM8475 Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/dispcc-sm8550.c b/drivers/clk/qcom/dispcc-sm8550.c
index 7f9021ca0ecb..f27140c649f5 100644
--- a/drivers/clk/qcom/dispcc-sm8550.c
+++ b/drivers/clk/qcom/dispcc-sm8550.c
@@ -4,12 +4,11 @@
* Copyright (c) 2023, Linaro Ltd.
*/
-#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/pm_runtime.h>
@@ -75,7 +74,7 @@ static struct pll_vco lucid_ole_vco[] = {
{ 249600000, 2000000000, 0 },
};
-static const struct alpha_pll_config disp_cc_pll0_config = {
+static struct alpha_pll_config disp_cc_pll0_config = {
.l = 0xd,
.alpha = 0x6492,
.config_ctl_val = 0x20485699,
@@ -106,7 +105,7 @@ static struct clk_alpha_pll disp_cc_pll0 = {
},
};
-static const struct alpha_pll_config disp_cc_pll1_config = {
+static struct alpha_pll_config disp_cc_pll1_config = {
.l = 0x1f,
.alpha = 0x4000,
.config_ctl_val = 0x20485699,
@@ -594,6 +593,13 @@ static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src[] = {
{ }
};
+static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src_sar2130p[] = {
+ F(200000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(325000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(514000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src_sm8650[] = {
F(19200000, P_BI_TCXO, 1, 0, 0),
F(85714286, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
@@ -1739,7 +1745,7 @@ static const struct regmap_config disp_cc_sm8550_regmap_config = {
.fast_io = true,
};
-static struct qcom_cc_desc disp_cc_sm8550_desc = {
+static const struct qcom_cc_desc disp_cc_sm8550_desc = {
.config = &disp_cc_sm8550_regmap_config,
.clks = disp_cc_sm8550_clocks,
.num_clks = ARRAY_SIZE(disp_cc_sm8550_clocks),
@@ -1750,6 +1756,7 @@ static struct qcom_cc_desc disp_cc_sm8550_desc = {
};
static const struct of_device_id disp_cc_sm8550_match_table[] = {
+ { .compatible = "qcom,sar2130p-dispcc" },
{ .compatible = "qcom,sm8550-dispcc" },
{ .compatible = "qcom,sm8650-dispcc" },
{ }
@@ -1780,6 +1787,12 @@ static int disp_cc_sm8550_probe(struct platform_device *pdev)
disp_cc_mdss_mdp_clk_src.freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src_sm8650;
disp_cc_mdss_dptx1_usb_router_link_intf_clk.clkr.hw.init->parent_hws[0] =
&disp_cc_mdss_dptx1_link_div_clk_src.clkr.hw;
+ } else if (of_device_is_compatible(pdev->dev.of_node, "qcom,sar2130p-dispcc")) {
+ disp_cc_pll0_config.l = 0x1f;
+ disp_cc_pll0_config.alpha = 0x4000;
+ disp_cc_pll0_config.user_ctl_val = 0x1;
+ disp_cc_pll1_config.user_ctl_val = 0x1;
+ disp_cc_mdss_mdp_clk_src.freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src_sar2130p;
}
clk_lucid_ole_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
diff --git a/drivers/clk/qcom/dispcc-sm8750.c b/drivers/clk/qcom/dispcc-sm8750.c
new file mode 100644
index 000000000000..ca09da111a50
--- /dev/null
+++ b/drivers/clk/qcom/dispcc-sm8750.c
@@ -0,0 +1,1961 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023-2024, Linaro Ltd.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/pm_runtime.h>
+
+#include <dt-bindings/clock/qcom,sm8750-dispcc.h>
+
+#include "common.h"
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "reset.h"
+#include "gdsc.h"
+
+/* Need to match the order of clocks in DT binding */
+enum {
+ DT_BI_TCXO,
+ DT_BI_TCXO_AO,
+ DT_AHB_CLK,
+ DT_SLEEP_CLK,
+
+ DT_DSI0_PHY_PLL_OUT_BYTECLK,
+ DT_DSI0_PHY_PLL_OUT_DSICLK,
+ DT_DSI1_PHY_PLL_OUT_BYTECLK,
+ DT_DSI1_PHY_PLL_OUT_DSICLK,
+
+ DT_DP0_PHY_PLL_LINK_CLK,
+ DT_DP0_PHY_PLL_VCO_DIV_CLK,
+ DT_DP1_PHY_PLL_LINK_CLK,
+ DT_DP1_PHY_PLL_VCO_DIV_CLK,
+ DT_DP2_PHY_PLL_LINK_CLK,
+ DT_DP2_PHY_PLL_VCO_DIV_CLK,
+ DT_DP3_PHY_PLL_LINK_CLK,
+ DT_DP3_PHY_PLL_VCO_DIV_CLK,
+};
+
+#define DISP_CC_MISC_CMD 0xF000
+
+enum {
+ P_BI_TCXO,
+ P_DISP_CC_PLL0_OUT_MAIN,
+ P_DISP_CC_PLL1_OUT_EVEN,
+ P_DISP_CC_PLL1_OUT_MAIN,
+ P_DISP_CC_PLL2_OUT_MAIN,
+ P_DP0_PHY_PLL_LINK_CLK,
+ P_DP0_PHY_PLL_VCO_DIV_CLK,
+ P_DP1_PHY_PLL_LINK_CLK,
+ P_DP1_PHY_PLL_VCO_DIV_CLK,
+ P_DP2_PHY_PLL_LINK_CLK,
+ P_DP2_PHY_PLL_VCO_DIV_CLK,
+ P_DP3_PHY_PLL_LINK_CLK,
+ P_DP3_PHY_PLL_VCO_DIV_CLK,
+ P_DSI0_PHY_PLL_OUT_BYTECLK,
+ P_DSI0_PHY_PLL_OUT_DSICLK,
+ P_DSI1_PHY_PLL_OUT_BYTECLK,
+ P_DSI1_PHY_PLL_OUT_DSICLK,
+ P_SLEEP_CLK,
+};
+
+static const struct pll_vco pongo_elu_vco[] = {
+ { 38400000, 38400000, 0 },
+};
+
+static const struct pll_vco taycan_elu_vco[] = {
+ { 249600000, 2500000000, 0 },
+};
+
+static struct alpha_pll_config disp_cc_pll0_config = {
+ .l = 0xd,
+ .alpha = 0x6492,
+ .config_ctl_val = 0x19660387,
+ .config_ctl_hi_val = 0x098060a0,
+ .config_ctl_hi1_val = 0xb416cb20,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000002,
+};
+
+static struct clk_alpha_pll disp_cc_pll0 = {
+ .offset = 0x0,
+ .vco_table = taycan_elu_vco,
+ .num_vco = ARRAY_SIZE(taycan_elu_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_ELU],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_taycan_elu_ops,
+ },
+ },
+};
+
+static struct alpha_pll_config disp_cc_pll1_config = {
+ .l = 0x1f,
+ .alpha = 0x4000,
+ .config_ctl_val = 0x19660387,
+ .config_ctl_hi_val = 0x098060a0,
+ .config_ctl_hi1_val = 0xb416cb20,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000002,
+};
+
+static struct clk_alpha_pll disp_cc_pll1 = {
+ .offset = 0x1000,
+ .vco_table = taycan_elu_vco,
+ .num_vco = ARRAY_SIZE(taycan_elu_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_ELU],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_pll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_taycan_elu_ops,
+ },
+ },
+};
+
+static const struct alpha_pll_config disp_cc_pll2_config = {
+ .l = 0x493,
+ .alpha = 0x0,
+ .config_ctl_val = 0x60000f68,
+ .config_ctl_hi_val = 0x0001c808,
+ .config_ctl_hi1_val = 0x00000000,
+ .config_ctl_hi2_val = 0x040082f4,
+ .test_ctl_val = 0x00008000,
+ .test_ctl_hi_val = 0x0080c496,
+ .test_ctl_hi1_val = 0x40100180,
+ .test_ctl_hi2_val = 0x441001bc,
+ .test_ctl_hi3_val = 0x002003d8,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00e50302,
+};
+
+static struct clk_alpha_pll disp_cc_pll2 = {
+ .offset = 0x2000,
+ .vco_table = pongo_elu_vco,
+ .num_vco = ARRAY_SIZE(pongo_elu_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_PONGO_ELU],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_pll2",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_SLEEP_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_pongo_elu_ops,
+ },
+ },
+};
+
+static const struct parent_map disp_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_0_ao[] = {
+ { .index = DT_BI_TCXO_AO },
+};
+
+static const struct parent_map disp_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_DSICLK, 1 },
+ { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 },
+ { P_DSI1_PHY_PLL_OUT_DSICLK, 3 },
+ { P_DSI1_PHY_PLL_OUT_BYTECLK, 4 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DSI0_PHY_PLL_OUT_DSICLK },
+ { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK },
+ { .index = DT_DSI1_PHY_PLL_OUT_DSICLK },
+ { .index = DT_DSI1_PHY_PLL_OUT_BYTECLK },
+};
+
+static const struct parent_map disp_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_DP3_PHY_PLL_VCO_DIV_CLK, 3 },
+ { P_DP1_PHY_PLL_VCO_DIV_CLK, 4 },
+ { P_DP2_PHY_PLL_VCO_DIV_CLK, 6 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DP3_PHY_PLL_VCO_DIV_CLK },
+ { .index = DT_DP1_PHY_PLL_VCO_DIV_CLK },
+ { .index = DT_DP2_PHY_PLL_VCO_DIV_CLK },
+};
+
+static const struct parent_map disp_cc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_DP1_PHY_PLL_LINK_CLK, 2 },
+ { P_DP2_PHY_PLL_LINK_CLK, 3 },
+ { P_DP3_PHY_PLL_LINK_CLK, 4 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DP1_PHY_PLL_LINK_CLK },
+ { .index = DT_DP2_PHY_PLL_LINK_CLK },
+ { .index = DT_DP3_PHY_PLL_LINK_CLK },
+};
+
+static const struct parent_map disp_cc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_DSICLK, 1 },
+ { P_DISP_CC_PLL2_OUT_MAIN, 2 },
+ { P_DSI1_PHY_PLL_OUT_DSICLK, 3 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DSI0_PHY_PLL_OUT_DSICLK },
+ { .hw = &disp_cc_pll2.clkr.hw },
+ { .index = DT_DSI1_PHY_PLL_OUT_DSICLK },
+};
+
+static const struct parent_map disp_cc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_DP0_PHY_PLL_LINK_CLK, 1 },
+ { P_DP0_PHY_PLL_VCO_DIV_CLK, 2 },
+ { P_DP3_PHY_PLL_VCO_DIV_CLK, 3 },
+ { P_DP1_PHY_PLL_VCO_DIV_CLK, 4 },
+ { P_DP2_PHY_PLL_VCO_DIV_CLK, 6 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_5[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DP0_PHY_PLL_LINK_CLK },
+ { .index = DT_DP0_PHY_PLL_VCO_DIV_CLK },
+ { .index = DT_DP3_PHY_PLL_VCO_DIV_CLK },
+ { .index = DT_DP1_PHY_PLL_VCO_DIV_CLK },
+ { .index = DT_DP2_PHY_PLL_VCO_DIV_CLK },
+};
+
+static const struct parent_map disp_cc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 },
+ { P_DSI1_PHY_PLL_OUT_BYTECLK, 4 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_6[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK },
+ { .index = DT_DSI1_PHY_PLL_OUT_BYTECLK },
+};
+
+static const struct parent_map disp_cc_parent_map_7[] = {
+ { P_BI_TCXO, 0 },
+ { P_DISP_CC_PLL1_OUT_MAIN, 4 },
+ { P_DISP_CC_PLL1_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_7[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &disp_cc_pll1.clkr.hw },
+ { .hw = &disp_cc_pll1.clkr.hw },
+};
+
+static const struct parent_map disp_cc_parent_map_8[] = {
+ { P_BI_TCXO, 0 },
+ { P_DP0_PHY_PLL_LINK_CLK, 1 },
+ { P_DP1_PHY_PLL_LINK_CLK, 2 },
+ { P_DP2_PHY_PLL_LINK_CLK, 3 },
+ { P_DP3_PHY_PLL_LINK_CLK, 4 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_8[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DP0_PHY_PLL_LINK_CLK },
+ { .index = DT_DP1_PHY_PLL_LINK_CLK },
+ { .index = DT_DP2_PHY_PLL_LINK_CLK },
+ { .index = DT_DP3_PHY_PLL_LINK_CLK },
+};
+
+static const struct parent_map disp_cc_parent_map_9[] = {
+ { P_BI_TCXO, 0 },
+ { P_DISP_CC_PLL0_OUT_MAIN, 1 },
+ { P_DISP_CC_PLL1_OUT_MAIN, 4 },
+ { P_DISP_CC_PLL1_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_9[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &disp_cc_pll0.clkr.hw },
+ { .hw = &disp_cc_pll1.clkr.hw },
+ { .hw = &disp_cc_pll1.clkr.hw },
+};
+
+static const struct parent_map disp_cc_parent_map_10[] = {
+ { P_BI_TCXO, 0 },
+ { P_DISP_CC_PLL2_OUT_MAIN, 2 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_10[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &disp_cc_pll2.clkr.hw },
+};
+
+static const struct parent_map disp_cc_parent_map_11[] = {
+ { P_SLEEP_CLK, 0 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_11[] = {
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct freq_tbl ftbl_disp_cc_esync0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_esync0_clk_src = {
+ .cmd_rcgr = 0x80c0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_4,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_esync0_clk_src",
+ .parent_data = disp_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_esync1_clk_src = {
+ .cmd_rcgr = 0x80d8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_4,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_esync1_clk_src",
+ .parent_data = disp_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(37500000, P_DISP_CC_PLL1_OUT_MAIN, 16, 0, 0),
+ F(75000000, P_DISP_CC_PLL1_OUT_MAIN, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_ahb_clk_src = {
+ .cmd_rcgr = 0x8360,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_7,
+ .freq_tbl = ftbl_disp_cc_mdss_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_ahb_clk_src",
+ .parent_data = disp_cc_parent_data_7,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_7),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_byte0_clk_src = {
+ .cmd_rcgr = 0x8180,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_byte1_clk_src = {
+ .cmd_rcgr = 0x819c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte1_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx0_aux_clk_src = {
+ .cmd_rcgr = 0x8234,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_aux_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx0_link_clk_src = {
+ .cmd_rcgr = 0x81e8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_8,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_clk_src",
+ .parent_data = disp_cc_parent_data_8,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_8),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx0_pixel0_clk_src = {
+ .cmd_rcgr = 0x8204,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_5,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_pixel0_clk_src",
+ .parent_data = disp_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx0_pixel1_clk_src = {
+ .cmd_rcgr = 0x821c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_5,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_pixel1_clk_src",
+ .parent_data = disp_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx1_aux_clk_src = {
+ .cmd_rcgr = 0x8298,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_aux_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx1_link_clk_src = {
+ .cmd_rcgr = 0x827c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_link_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx1_pixel0_clk_src = {
+ .cmd_rcgr = 0x824c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_pixel0_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx1_pixel1_clk_src = {
+ .cmd_rcgr = 0x8264,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_pixel1_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx2_aux_clk_src = {
+ .cmd_rcgr = 0x82fc,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_aux_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx2_link_clk_src = {
+ .cmd_rcgr = 0x82b0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_link_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx2_pixel0_clk_src = {
+ .cmd_rcgr = 0x82cc,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_pixel0_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx2_pixel1_clk_src = {
+ .cmd_rcgr = 0x82e4,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_pixel1_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx3_aux_clk_src = {
+ .cmd_rcgr = 0x8348,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_aux_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx3_link_clk_src = {
+ .cmd_rcgr = 0x832c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_link_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx3_pixel0_clk_src = {
+ .cmd_rcgr = 0x8314,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_pixel0_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_esc0_clk_src = {
+ .cmd_rcgr = 0x81b8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_6,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_esc0_clk_src",
+ .parent_data = disp_cc_parent_data_6,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_esc1_clk_src = {
+ .cmd_rcgr = 0x81d0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_6,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_esc1_clk_src",
+ .parent_data = disp_cc_parent_data_6,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(85714286, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(100000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(156000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(207000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(337000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(417000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(532000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(575000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_mdp_clk_src = {
+ .cmd_rcgr = 0x8150,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_9,
+ .freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_clk_src",
+ .parent_data = disp_cc_parent_data_9,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_9),
+ .flags = CLK_SET_RATE_PARENT,
+ /*
+ * TODO: Downstream does not manage the clock directly, but
+ * places votes via new hardware block called "cesta".
+ * It is not clear whether such approach should be taken instead
+ * of manual control.
+ */
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_pclk0_clk_src = {
+ .cmd_rcgr = 0x8108,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk0_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+ .ops = &clk_pixel_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_pclk1_clk_src = {
+ .cmd_rcgr = 0x8120,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk1_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+ .ops = &clk_pixel_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_pclk2_clk_src = {
+ .cmd_rcgr = 0x8138,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk2_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+ .ops = &clk_pixel_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_vsync_clk_src = {
+ .cmd_rcgr = 0x8168,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_vsync_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_osc_clk_src[] = {
+ F(38400000, P_DISP_CC_PLL2_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_osc_clk_src = {
+ .cmd_rcgr = 0x80f0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_10,
+ .freq_tbl = ftbl_disp_cc_osc_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_osc_clk_src",
+ .parent_data = disp_cc_parent_data_10,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_10),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_sleep_clk_src[] = {
+ F(32000, P_SLEEP_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_sleep_clk_src = {
+ .cmd_rcgr = 0xe064,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_11,
+ .freq_tbl = ftbl_disp_cc_sleep_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_sleep_clk_src",
+ .parent_data = disp_cc_parent_data_11,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_11),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_xo_clk_src = {
+ .cmd_rcgr = 0xe044,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_xo_clk_src",
+ .parent_data = disp_cc_parent_data_0_ao,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0_ao),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_byte0_div_clk_src = {
+ .reg = 0x8198,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_byte1_div_clk_src = {
+ .reg = 0x81b4,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte1_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx0_link_div_clk_src = {
+ .reg = 0x8200,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx1_link_div_clk_src = {
+ .reg = 0x8294,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_link_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx2_link_div_clk_src = {
+ .reg = 0x82c8,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_link_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx3_link_div_clk_src = {
+ .reg = 0x8344,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_link_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch disp_cc_esync0_clk = {
+ .halt_reg = 0x80b8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80b8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_esync0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_esync0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_esync1_clk = {
+ .halt_reg = 0x80bc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80bc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_esync1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_esync1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_accu_shift_clk = {
+ .halt_reg = 0xe060,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xe060,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_accu_shift_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_ahb1_clk = {
+ .halt_reg = 0xa028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_ahb1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_ahb_clk = {
+ .halt_reg = 0x80b0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80b0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte0_clk = {
+ .halt_reg = 0x8034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte0_intf_clk = {
+ .halt_reg = 0x8038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte0_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte1_clk = {
+ .halt_reg = 0x803c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x803c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte1_intf_clk = {
+ .halt_reg = 0x8040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte1_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte1_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_aux_clk = {
+ .halt_reg = 0x8064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_crypto_clk = {
+ .halt_reg = 0x8058,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8058,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_crypto_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_link_clk = {
+ .halt_reg = 0x804c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x804c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_link_intf_clk = {
+ .halt_reg = 0x8054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_pixel0_clk = {
+ .halt_reg = 0x805c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x805c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_pixel0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_pixel0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_pixel1_clk = {
+ .halt_reg = 0x8060,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8060,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_pixel1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_pixel1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_usb_router_link_intf_clk = {
+ .halt_reg = 0x8050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8050,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_usb_router_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_aux_clk = {
+ .halt_reg = 0x8080,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8080,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_crypto_clk = {
+ .halt_reg = 0x807c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x807c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_crypto_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_link_clk = {
+ .halt_reg = 0x8070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8070,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_link_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_link_intf_clk = {
+ .halt_reg = 0x8078,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8078,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_pixel0_clk = {
+ .halt_reg = 0x8068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_pixel0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_pixel0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_pixel1_clk = {
+ .halt_reg = 0x806c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x806c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_pixel1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_pixel1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_usb_router_link_intf_clk = {
+ .halt_reg = 0x8074,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8074,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_usb_router_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_aux_clk = {
+ .halt_reg = 0x8098,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8098,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_crypto_clk = {
+ .halt_reg = 0x8094,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8094,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_crypto_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_link_clk = {
+ .halt_reg = 0x808c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x808c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_link_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_link_intf_clk = {
+ .halt_reg = 0x8090,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8090,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_pixel0_clk = {
+ .halt_reg = 0x8084,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8084,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_pixel0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_pixel0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_pixel1_clk = {
+ .halt_reg = 0x8088,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8088,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_pixel1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_pixel1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx3_aux_clk = {
+ .halt_reg = 0x80a8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80a8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx3_crypto_clk = {
+ .halt_reg = 0x80ac,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80ac,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_crypto_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx3_link_clk = {
+ .halt_reg = 0x80a0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80a0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_link_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx3_link_intf_clk = {
+ .halt_reg = 0x80a4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx3_pixel0_clk = {
+ .halt_reg = 0x809c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x809c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_pixel0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_pixel0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_esc0_clk = {
+ .halt_reg = 0x8044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_esc0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_esc0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_esc1_clk = {
+ .halt_reg = 0x8048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_esc1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_esc1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp1_clk = {
+ .halt_reg = 0xa004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_clk = {
+ .halt_reg = 0x8010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_lut1_clk = {
+ .halt_reg = 0xa014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_lut1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_lut_clk = {
+ .halt_reg = 0x8020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x8020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_lut_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_non_gdsc_ahb_clk = {
+ .halt_reg = 0xc004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xc004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_non_gdsc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_pclk0_clk = {
+ .halt_reg = 0x8004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_pclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_pclk1_clk = {
+ .halt_reg = 0x8008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_pclk1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_pclk2_clk = {
+ .halt_reg = 0x800c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x800c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_pclk2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_vsync1_clk = {
+ .halt_reg = 0xa024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_vsync1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_vsync_clk = {
+ .halt_reg = 0x8030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_vsync_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_osc_clk = {
+ .halt_reg = 0x80b4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80b4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_osc_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_osc_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc mdss_gdsc = {
+ .gdscr = 0x9000,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "mdss_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | HW_CTRL | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc mdss_int2_gdsc = {
+ .gdscr = 0xb000,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "mdss_int2_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | HW_CTRL | RETAIN_FF_ENABLE,
+};
+
+static struct clk_regmap *disp_cc_sm8750_clocks[] = {
+ [DISP_CC_ESYNC0_CLK] = &disp_cc_esync0_clk.clkr,
+ [DISP_CC_ESYNC0_CLK_SRC] = &disp_cc_esync0_clk_src.clkr,
+ [DISP_CC_ESYNC1_CLK] = &disp_cc_esync1_clk.clkr,
+ [DISP_CC_ESYNC1_CLK_SRC] = &disp_cc_esync1_clk_src.clkr,
+ [DISP_CC_MDSS_ACCU_SHIFT_CLK] = &disp_cc_mdss_accu_shift_clk.clkr,
+ [DISP_CC_MDSS_AHB1_CLK] = &disp_cc_mdss_ahb1_clk.clkr,
+ [DISP_CC_MDSS_AHB_CLK] = &disp_cc_mdss_ahb_clk.clkr,
+ [DISP_CC_MDSS_AHB_CLK_SRC] = &disp_cc_mdss_ahb_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_CLK] = &disp_cc_mdss_byte0_clk.clkr,
+ [DISP_CC_MDSS_BYTE0_CLK_SRC] = &disp_cc_mdss_byte0_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_DIV_CLK_SRC] = &disp_cc_mdss_byte0_div_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_INTF_CLK] = &disp_cc_mdss_byte0_intf_clk.clkr,
+ [DISP_CC_MDSS_BYTE1_CLK] = &disp_cc_mdss_byte1_clk.clkr,
+ [DISP_CC_MDSS_BYTE1_CLK_SRC] = &disp_cc_mdss_byte1_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE1_DIV_CLK_SRC] = &disp_cc_mdss_byte1_div_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE1_INTF_CLK] = &disp_cc_mdss_byte1_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_AUX_CLK] = &disp_cc_mdss_dptx0_aux_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_AUX_CLK_SRC] = &disp_cc_mdss_dptx0_aux_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_CRYPTO_CLK] = &disp_cc_mdss_dptx0_crypto_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_CLK] = &disp_cc_mdss_dptx0_link_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_CLK_SRC] = &disp_cc_mdss_dptx0_link_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx0_link_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_INTF_CLK] = &disp_cc_mdss_dptx0_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_PIXEL0_CLK] = &disp_cc_mdss_dptx0_pixel0_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx0_pixel0_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_PIXEL1_CLK] = &disp_cc_mdss_dptx0_pixel1_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_PIXEL1_CLK_SRC] = &disp_cc_mdss_dptx0_pixel1_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK] =
+ &disp_cc_mdss_dptx0_usb_router_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_AUX_CLK] = &disp_cc_mdss_dptx1_aux_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_AUX_CLK_SRC] = &disp_cc_mdss_dptx1_aux_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_CRYPTO_CLK] = &disp_cc_mdss_dptx1_crypto_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_LINK_CLK] = &disp_cc_mdss_dptx1_link_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_LINK_CLK_SRC] = &disp_cc_mdss_dptx1_link_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx1_link_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_LINK_INTF_CLK] = &disp_cc_mdss_dptx1_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_PIXEL0_CLK] = &disp_cc_mdss_dptx1_pixel0_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx1_pixel0_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_PIXEL1_CLK] = &disp_cc_mdss_dptx1_pixel1_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_PIXEL1_CLK_SRC] = &disp_cc_mdss_dptx1_pixel1_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_USB_ROUTER_LINK_INTF_CLK] =
+ &disp_cc_mdss_dptx1_usb_router_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_AUX_CLK] = &disp_cc_mdss_dptx2_aux_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_AUX_CLK_SRC] = &disp_cc_mdss_dptx2_aux_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX2_CRYPTO_CLK] = &disp_cc_mdss_dptx2_crypto_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_LINK_CLK] = &disp_cc_mdss_dptx2_link_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_LINK_CLK_SRC] = &disp_cc_mdss_dptx2_link_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX2_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx2_link_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX2_LINK_INTF_CLK] = &disp_cc_mdss_dptx2_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_PIXEL0_CLK] = &disp_cc_mdss_dptx2_pixel0_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx2_pixel0_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX2_PIXEL1_CLK] = &disp_cc_mdss_dptx2_pixel1_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_PIXEL1_CLK_SRC] = &disp_cc_mdss_dptx2_pixel1_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX3_AUX_CLK] = &disp_cc_mdss_dptx3_aux_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_AUX_CLK_SRC] = &disp_cc_mdss_dptx3_aux_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX3_CRYPTO_CLK] = &disp_cc_mdss_dptx3_crypto_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_LINK_CLK] = &disp_cc_mdss_dptx3_link_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_LINK_CLK_SRC] = &disp_cc_mdss_dptx3_link_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX3_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx3_link_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX3_LINK_INTF_CLK] = &disp_cc_mdss_dptx3_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_PIXEL0_CLK] = &disp_cc_mdss_dptx3_pixel0_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx3_pixel0_clk_src.clkr,
+ [DISP_CC_MDSS_ESC0_CLK] = &disp_cc_mdss_esc0_clk.clkr,
+ [DISP_CC_MDSS_ESC0_CLK_SRC] = &disp_cc_mdss_esc0_clk_src.clkr,
+ [DISP_CC_MDSS_ESC1_CLK] = &disp_cc_mdss_esc1_clk.clkr,
+ [DISP_CC_MDSS_ESC1_CLK_SRC] = &disp_cc_mdss_esc1_clk_src.clkr,
+ [DISP_CC_MDSS_MDP1_CLK] = &disp_cc_mdss_mdp1_clk.clkr,
+ [DISP_CC_MDSS_MDP_CLK] = &disp_cc_mdss_mdp_clk.clkr,
+ [DISP_CC_MDSS_MDP_CLK_SRC] = &disp_cc_mdss_mdp_clk_src.clkr,
+ [DISP_CC_MDSS_MDP_LUT1_CLK] = &disp_cc_mdss_mdp_lut1_clk.clkr,
+ [DISP_CC_MDSS_MDP_LUT_CLK] = &disp_cc_mdss_mdp_lut_clk.clkr,
+ [DISP_CC_MDSS_NON_GDSC_AHB_CLK] = &disp_cc_mdss_non_gdsc_ahb_clk.clkr,
+ [DISP_CC_MDSS_PCLK0_CLK] = &disp_cc_mdss_pclk0_clk.clkr,
+ [DISP_CC_MDSS_PCLK0_CLK_SRC] = &disp_cc_mdss_pclk0_clk_src.clkr,
+ [DISP_CC_MDSS_PCLK1_CLK] = &disp_cc_mdss_pclk1_clk.clkr,
+ [DISP_CC_MDSS_PCLK1_CLK_SRC] = &disp_cc_mdss_pclk1_clk_src.clkr,
+ [DISP_CC_MDSS_PCLK2_CLK] = &disp_cc_mdss_pclk2_clk.clkr,
+ [DISP_CC_MDSS_PCLK2_CLK_SRC] = &disp_cc_mdss_pclk2_clk_src.clkr,
+ [DISP_CC_MDSS_VSYNC1_CLK] = &disp_cc_mdss_vsync1_clk.clkr,
+ [DISP_CC_MDSS_VSYNC_CLK] = &disp_cc_mdss_vsync_clk.clkr,
+ [DISP_CC_MDSS_VSYNC_CLK_SRC] = &disp_cc_mdss_vsync_clk_src.clkr,
+ [DISP_CC_OSC_CLK] = &disp_cc_osc_clk.clkr,
+ [DISP_CC_OSC_CLK_SRC] = &disp_cc_osc_clk_src.clkr,
+ [DISP_CC_PLL0] = &disp_cc_pll0.clkr,
+ [DISP_CC_PLL1] = &disp_cc_pll1.clkr,
+ [DISP_CC_PLL2] = &disp_cc_pll2.clkr,
+ [DISP_CC_SLEEP_CLK_SRC] = &disp_cc_sleep_clk_src.clkr,
+ [DISP_CC_XO_CLK_SRC] = &disp_cc_xo_clk_src.clkr,
+};
+
+static const struct qcom_reset_map disp_cc_sm8750_resets[] = {
+ [DISP_CC_MDSS_CORE_BCR] = { 0x8000 },
+ [DISP_CC_MDSS_CORE_INT2_BCR] = { 0xa000 },
+ [DISP_CC_MDSS_RSCC_BCR] = { 0xc000 },
+};
+
+static struct gdsc *disp_cc_sm8750_gdscs[] = {
+ [MDSS_GDSC] = &mdss_gdsc,
+ [MDSS_INT2_GDSC] = &mdss_int2_gdsc,
+};
+
+static const struct regmap_config disp_cc_sm8750_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xf004, /* 0x10000, 0x10004 and maybe others are for TZ */
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc disp_cc_sm8750_desc = {
+ .config = &disp_cc_sm8750_regmap_config,
+ .clks = disp_cc_sm8750_clocks,
+ .num_clks = ARRAY_SIZE(disp_cc_sm8750_clocks),
+ .resets = disp_cc_sm8750_resets,
+ .num_resets = ARRAY_SIZE(disp_cc_sm8750_resets),
+ .gdscs = disp_cc_sm8750_gdscs,
+ .num_gdscs = ARRAY_SIZE(disp_cc_sm8750_gdscs),
+};
+
+static const struct of_device_id disp_cc_sm8750_match_table[] = {
+ { .compatible = "qcom,sm8750-dispcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, disp_cc_sm8750_match_table);
+
+static int disp_cc_sm8750_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret)
+ return ret;
+
+ regmap = qcom_cc_map(pdev, &disp_cc_sm8750_desc);
+ if (IS_ERR(regmap)) {
+ ret = PTR_ERR(regmap);
+ goto err_put_rpm;
+ }
+
+ clk_taycan_elu_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
+ clk_taycan_elu_pll_configure(&disp_cc_pll1, regmap, &disp_cc_pll1_config);
+ clk_pongo_elu_pll_configure(&disp_cc_pll2, regmap, &disp_cc_pll2_config);
+
+ /* Enable clock gating for MDP clocks */
+ regmap_update_bits(regmap, DISP_CC_MISC_CMD, 0x10, 0x10);
+
+ /* Keep some clocks always-on */
+ qcom_branch_set_clk_en(regmap, 0xe07c); /* DISP_CC_SLEEP_CLK */
+ qcom_branch_set_clk_en(regmap, 0xe05c); /* DISP_CC_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0xc00c); /* DISP_CC_MDSS_RSCC_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0xc008); /* DISP_CC_MDSS_RSCC_VSYNC_CLK */
+
+ ret = qcom_cc_really_probe(&pdev->dev, &disp_cc_sm8750_desc, regmap);
+ if (ret)
+ goto err_put_rpm;
+
+ pm_runtime_put(&pdev->dev);
+
+ return 0;
+
+err_put_rpm:
+ pm_runtime_put_sync(&pdev->dev);
+
+ return ret;
+}
+
+static struct platform_driver disp_cc_sm8750_driver = {
+ .probe = disp_cc_sm8750_probe,
+ .driver = {
+ .name = "disp_cc-sm8750",
+ .of_match_table = disp_cc_sm8750_match_table,
+ },
+};
+
+module_platform_driver(disp_cc_sm8750_driver);
+
+MODULE_DESCRIPTION("QTI DISPCC SM8750 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/dispcc0-sa8775p.c b/drivers/clk/qcom/dispcc0-sa8775p.c
new file mode 100644
index 000000000000..aeda9cf4bfee
--- /dev/null
+++ b/drivers/clk/qcom/dispcc0-sa8775p.c
@@ -0,0 +1,1480 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sa8775p-dispcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_IFACE,
+ DT_BI_TCXO,
+ DT_BI_TCXO_AO,
+ DT_SLEEP_CLK,
+ DT_DP0_PHY_PLL_LINK_CLK,
+ DT_DP0_PHY_PLL_VCO_DIV_CLK,
+ DT_DP1_PHY_PLL_LINK_CLK,
+ DT_DP1_PHY_PLL_VCO_DIV_CLK,
+ DT_DSI0_PHY_PLL_OUT_BYTECLK,
+ DT_DSI0_PHY_PLL_OUT_DSICLK,
+ DT_DSI1_PHY_PLL_OUT_BYTECLK,
+ DT_DSI1_PHY_PLL_OUT_DSICLK,
+};
+
+enum {
+ P_BI_TCXO,
+ P_DP0_PHY_PLL_LINK_CLK,
+ P_DP0_PHY_PLL_VCO_DIV_CLK,
+ P_DP1_PHY_PLL_LINK_CLK,
+ P_DP1_PHY_PLL_VCO_DIV_CLK,
+ P_DSI0_PHY_PLL_OUT_BYTECLK,
+ P_DSI0_PHY_PLL_OUT_DSICLK,
+ P_DSI1_PHY_PLL_OUT_BYTECLK,
+ P_DSI1_PHY_PLL_OUT_DSICLK,
+ P_MDSS_0_DISP_CC_PLL0_OUT_MAIN,
+ P_MDSS_0_DISP_CC_PLL1_OUT_EVEN,
+ P_MDSS_0_DISP_CC_PLL1_OUT_MAIN,
+ P_SLEEP_CLK,
+};
+
+static const struct pll_vco lucid_evo_vco[] = {
+ { 249600000, 2020000000, 0 },
+};
+
+static const struct alpha_pll_config mdss_0_disp_cc_pll0_config = {
+ .l = 0x3a,
+ .alpha = 0x9800,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00400805,
+};
+
+static struct clk_alpha_pll mdss_0_disp_cc_pll0 = {
+ .offset = 0x0,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct alpha_pll_config mdss_0_disp_cc_pll1_config = {
+ .l = 0x1f,
+ .alpha = 0x4000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00400805,
+};
+
+static struct clk_alpha_pll mdss_0_disp_cc_pll1 = {
+ .offset = 0x1000,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_pll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct parent_map disp_cc_0_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_DP0_PHY_PLL_LINK_CLK, 1 },
+ { P_DP0_PHY_PLL_VCO_DIV_CLK, 2 },
+ { P_DP1_PHY_PLL_VCO_DIV_CLK, 4 },
+};
+
+static const struct clk_parent_data disp_cc_0_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DP0_PHY_PLL_LINK_CLK },
+ { .index = DT_DP0_PHY_PLL_VCO_DIV_CLK },
+ { .index = DT_DP1_PHY_PLL_VCO_DIV_CLK },
+};
+
+static const struct parent_map disp_cc_0_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_DSICLK, 1 },
+ { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 },
+ { P_DSI1_PHY_PLL_OUT_DSICLK, 3 },
+ { P_DSI1_PHY_PLL_OUT_BYTECLK, 4 },
+};
+
+static const struct clk_parent_data disp_cc_0_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DSI0_PHY_PLL_OUT_DSICLK },
+ { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK },
+ { .index = DT_DSI1_PHY_PLL_OUT_DSICLK },
+ { .index = DT_DSI1_PHY_PLL_OUT_BYTECLK },
+};
+
+static const struct parent_map disp_cc_0_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data disp_cc_0_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct clk_parent_data disp_cc_0_parent_data_2_ao[] = {
+ { .index = DT_BI_TCXO_AO },
+};
+
+static const struct parent_map disp_cc_0_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_DP0_PHY_PLL_LINK_CLK, 1 },
+ { P_DP1_PHY_PLL_LINK_CLK, 2 },
+};
+
+static const struct clk_parent_data disp_cc_0_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DP0_PHY_PLL_LINK_CLK },
+ { .index = DT_DP1_PHY_PLL_LINK_CLK },
+};
+
+static const struct parent_map disp_cc_0_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 },
+ { P_DSI1_PHY_PLL_OUT_BYTECLK, 4 },
+};
+
+static const struct clk_parent_data disp_cc_0_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK },
+ { .index = DT_DSI1_PHY_PLL_OUT_BYTECLK },
+};
+
+static const struct parent_map disp_cc_0_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_MDSS_0_DISP_CC_PLL1_OUT_MAIN, 4 },
+ { P_MDSS_0_DISP_CC_PLL1_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data disp_cc_0_parent_data_5[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &mdss_0_disp_cc_pll1.clkr.hw },
+ { .hw = &mdss_0_disp_cc_pll1.clkr.hw },
+};
+
+static const struct parent_map disp_cc_0_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_MDSS_0_DISP_CC_PLL0_OUT_MAIN, 1 },
+ { P_MDSS_0_DISP_CC_PLL1_OUT_MAIN, 4 },
+ { P_MDSS_0_DISP_CC_PLL1_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data disp_cc_0_parent_data_6[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &mdss_0_disp_cc_pll0.clkr.hw },
+ { .hw = &mdss_0_disp_cc_pll1.clkr.hw },
+ { .hw = &mdss_0_disp_cc_pll1.clkr.hw },
+};
+
+static const struct parent_map disp_cc_0_parent_map_7[] = {
+ { P_SLEEP_CLK, 0 },
+};
+
+static const struct clk_parent_data disp_cc_0_parent_data_7[] = {
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct freq_tbl ftbl_mdss_0_disp_cc_mdss_ahb_clk_src[] = {
+ F(37500000, P_MDSS_0_DISP_CC_PLL1_OUT_MAIN, 16, 0, 0),
+ F(75000000, P_MDSS_0_DISP_CC_PLL1_OUT_MAIN, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 mdss_0_disp_cc_mdss_ahb_clk_src = {
+ .cmd_rcgr = 0x824c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_0_parent_map_5,
+ .freq_tbl = ftbl_mdss_0_disp_cc_mdss_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_ahb_clk_src",
+ .parent_data = disp_cc_0_parent_data_5,
+ .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_mdss_0_disp_cc_mdss_byte0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 mdss_0_disp_cc_mdss_byte0_clk_src = {
+ .cmd_rcgr = 0x80ec,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_0_parent_map_1,
+ .freq_tbl = ftbl_mdss_0_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_byte0_clk_src",
+ .parent_data = disp_cc_0_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_0_disp_cc_mdss_byte1_clk_src = {
+ .cmd_rcgr = 0x8108,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_0_parent_map_1,
+ .freq_tbl = ftbl_mdss_0_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_byte1_clk_src",
+ .parent_data = disp_cc_0_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_0_disp_cc_mdss_dptx0_aux_clk_src = {
+ .cmd_rcgr = 0x81b8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_0_parent_map_2,
+ .freq_tbl = ftbl_mdss_0_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_dptx0_aux_clk_src",
+ .parent_data = disp_cc_0_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_0_disp_cc_mdss_dptx0_crypto_clk_src = {
+ .cmd_rcgr = 0x8170,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_0_parent_map_3,
+ .freq_tbl = ftbl_mdss_0_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_dptx0_crypto_clk_src",
+ .parent_data = disp_cc_0_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_0_disp_cc_mdss_dptx0_link_clk_src = {
+ .cmd_rcgr = 0x8154,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_0_parent_map_3,
+ .freq_tbl = ftbl_mdss_0_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_dptx0_link_clk_src",
+ .parent_data = disp_cc_0_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_0_disp_cc_mdss_dptx0_pixel0_clk_src = {
+ .cmd_rcgr = 0x8188,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_0_parent_map_0,
+ .freq_tbl = ftbl_mdss_0_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_dptx0_pixel0_clk_src",
+ .parent_data = disp_cc_0_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_0_disp_cc_mdss_dptx0_pixel1_clk_src = {
+ .cmd_rcgr = 0x81a0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_0_parent_map_0,
+ .freq_tbl = ftbl_mdss_0_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_dptx0_pixel1_clk_src",
+ .parent_data = disp_cc_0_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_0_disp_cc_mdss_dptx0_pixel2_clk_src = {
+ .cmd_rcgr = 0x826c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_0_parent_map_0,
+ .freq_tbl = ftbl_mdss_0_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_dptx0_pixel2_clk_src",
+ .parent_data = disp_cc_0_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_0_disp_cc_mdss_dptx0_pixel3_clk_src = {
+ .cmd_rcgr = 0x8284,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_0_parent_map_0,
+ .freq_tbl = ftbl_mdss_0_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_dptx0_pixel3_clk_src",
+ .parent_data = disp_cc_0_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_0_disp_cc_mdss_dptx1_aux_clk_src = {
+ .cmd_rcgr = 0x8234,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_0_parent_map_2,
+ .freq_tbl = ftbl_mdss_0_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_dptx1_aux_clk_src",
+ .parent_data = disp_cc_0_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_0_disp_cc_mdss_dptx1_crypto_clk_src = {
+ .cmd_rcgr = 0x821c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_0_parent_map_3,
+ .freq_tbl = ftbl_mdss_0_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_dptx1_crypto_clk_src",
+ .parent_data = disp_cc_0_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_0_disp_cc_mdss_dptx1_link_clk_src = {
+ .cmd_rcgr = 0x8200,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_0_parent_map_3,
+ .freq_tbl = ftbl_mdss_0_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_dptx1_link_clk_src",
+ .parent_data = disp_cc_0_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_0_disp_cc_mdss_dptx1_pixel0_clk_src = {
+ .cmd_rcgr = 0x81d0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_0_parent_map_0,
+ .freq_tbl = ftbl_mdss_0_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_dptx1_pixel0_clk_src",
+ .parent_data = disp_cc_0_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_0_disp_cc_mdss_dptx1_pixel1_clk_src = {
+ .cmd_rcgr = 0x81e8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_0_parent_map_0,
+ .freq_tbl = ftbl_mdss_0_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_dptx1_pixel1_clk_src",
+ .parent_data = disp_cc_0_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_0_disp_cc_mdss_esc0_clk_src = {
+ .cmd_rcgr = 0x8124,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_0_parent_map_4,
+ .freq_tbl = ftbl_mdss_0_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_esc0_clk_src",
+ .parent_data = disp_cc_0_parent_data_4,
+ .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_0_disp_cc_mdss_esc1_clk_src = {
+ .cmd_rcgr = 0x813c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_0_parent_map_4,
+ .freq_tbl = ftbl_mdss_0_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_esc1_clk_src",
+ .parent_data = disp_cc_0_parent_data_4,
+ .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_mdss_0_disp_cc_mdss_mdp_clk_src[] = {
+ F(375000000, P_MDSS_0_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(500000000, P_MDSS_0_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(575000000, P_MDSS_0_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(650000000, P_MDSS_0_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 mdss_0_disp_cc_mdss_mdp_clk_src = {
+ .cmd_rcgr = 0x80bc,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_0_parent_map_6,
+ .freq_tbl = ftbl_mdss_0_disp_cc_mdss_mdp_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_mdp_clk_src",
+ .parent_data = disp_cc_0_parent_data_6,
+ .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_0_disp_cc_mdss_pclk0_clk_src = {
+ .cmd_rcgr = 0x808c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = disp_cc_0_parent_map_1,
+ .freq_tbl = ftbl_mdss_0_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_pclk0_clk_src",
+ .parent_data = disp_cc_0_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_pixel_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_0_disp_cc_mdss_pclk1_clk_src = {
+ .cmd_rcgr = 0x80a4,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = disp_cc_0_parent_map_1,
+ .freq_tbl = ftbl_mdss_0_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_pclk1_clk_src",
+ .parent_data = disp_cc_0_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_pixel_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_0_disp_cc_mdss_vsync_clk_src = {
+ .cmd_rcgr = 0x80d4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_0_parent_map_2,
+ .freq_tbl = ftbl_mdss_0_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_vsync_clk_src",
+ .parent_data = disp_cc_0_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_mdss_0_disp_cc_sleep_clk_src[] = {
+ F(32000, P_SLEEP_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 mdss_0_disp_cc_sleep_clk_src = {
+ .cmd_rcgr = 0xc058,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_0_parent_map_7,
+ .freq_tbl = ftbl_mdss_0_disp_cc_sleep_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_sleep_clk_src",
+ .parent_data = disp_cc_0_parent_data_7,
+ .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_7),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_0_disp_cc_xo_clk_src = {
+ .cmd_rcgr = 0xc03c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_0_parent_map_2,
+ .freq_tbl = ftbl_mdss_0_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_xo_clk_src",
+ .parent_data = disp_cc_0_parent_data_2_ao,
+ .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_2_ao),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_regmap_div mdss_0_disp_cc_mdss_byte0_div_clk_src = {
+ .reg = 0x8104,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_byte0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div mdss_0_disp_cc_mdss_byte1_div_clk_src = {
+ .reg = 0x8120,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_byte1_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_byte1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div mdss_0_disp_cc_mdss_dptx0_link_div_clk_src = {
+ .reg = 0x816c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_dptx0_link_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div mdss_0_disp_cc_mdss_dptx1_link_div_clk_src = {
+ .reg = 0x8218,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_dptx1_link_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_dptx1_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_ahb1_clk = {
+ .halt_reg = 0x8088,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8088,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_ahb1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_ahb_clk = {
+ .halt_reg = 0x8084,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8084,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_byte0_clk = {
+ .halt_reg = 0x8034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_byte0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_byte0_intf_clk = {
+ .halt_reg = 0x8038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_byte0_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_byte0_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_byte1_clk = {
+ .halt_reg = 0x803c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x803c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_byte1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_byte1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_byte1_intf_clk = {
+ .halt_reg = 0x8040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_byte1_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_byte1_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_dptx0_aux_clk = {
+ .halt_reg = 0x805c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x805c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_dptx0_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_dptx0_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_dptx0_crypto_clk = {
+ .halt_reg = 0x8058,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8058,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_dptx0_crypto_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_dptx0_crypto_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_dptx0_link_clk = {
+ .halt_reg = 0x804c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x804c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_dptx0_link_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_dptx0_link_intf_clk = {
+ .halt_reg = 0x8050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8050,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_dptx0_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_dptx0_pixel0_clk = {
+ .halt_reg = 0x8060,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8060,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_dptx0_pixel0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_dptx0_pixel0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_dptx0_pixel1_clk = {
+ .halt_reg = 0x8064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_dptx0_pixel1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_dptx0_pixel1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_dptx0_pixel2_clk = {
+ .halt_reg = 0x8264,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8264,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_dptx0_pixel2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_dptx0_pixel2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_dptx0_pixel3_clk = {
+ .halt_reg = 0x8268,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8268,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_dptx0_pixel3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_dptx0_pixel3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_dptx0_usb_router_link_intf_clk = {
+ .halt_reg = 0x8054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_dptx0_usb_router_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_dptx1_aux_clk = {
+ .halt_reg = 0x8080,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8080,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_dptx1_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_dptx1_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_dptx1_crypto_clk = {
+ .halt_reg = 0x807c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x807c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_dptx1_crypto_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_dptx1_crypto_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_dptx1_link_clk = {
+ .halt_reg = 0x8070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8070,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_dptx1_link_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_dptx1_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_dptx1_link_intf_clk = {
+ .halt_reg = 0x8074,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8074,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_dptx1_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_dptx1_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_dptx1_pixel0_clk = {
+ .halt_reg = 0x8068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_dptx1_pixel0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_dptx1_pixel0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_dptx1_pixel1_clk = {
+ .halt_reg = 0x806c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x806c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_dptx1_pixel1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_dptx1_pixel1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_dptx1_usb_router_link_intf_clk = {
+ .halt_reg = 0x8078,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8078,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_dptx1_usb_router_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_dptx1_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_esc0_clk = {
+ .halt_reg = 0x8044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_esc0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_esc0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_esc1_clk = {
+ .halt_reg = 0x8048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_esc1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_esc1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_mdp1_clk = {
+ .halt_reg = 0x8014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_mdp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_mdp_clk = {
+ .halt_reg = 0x800c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x800c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_mdp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_mdp_lut1_clk = {
+ .halt_reg = 0x8024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x8024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_mdp_lut1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_mdp_lut_clk = {
+ .halt_reg = 0x801c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x801c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_mdp_lut_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_non_gdsc_ahb_clk = {
+ .halt_reg = 0xa004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xa004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_non_gdsc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_pclk0_clk = {
+ .halt_reg = 0x8004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_pclk0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_pclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_pclk1_clk = {
+ .halt_reg = 0x8008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_pclk1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_pclk1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_pll_lock_monitor_clk = {
+ .halt_reg = 0xe000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_pll_lock_monitor_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_rscc_ahb_clk = {
+ .halt_reg = 0xa00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_rscc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_rscc_vsync_clk = {
+ .halt_reg = 0xa008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_rscc_vsync_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_vsync1_clk = {
+ .halt_reg = 0x8030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_vsync1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_vsync_clk = {
+ .halt_reg = 0x802c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x802c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_vsync_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_sm_obs_clk = {
+ .halt_reg = 0x11014,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x11014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_sm_obs_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc mdss_0_disp_cc_mdss_core_gdsc = {
+ .gdscr = 0x9000,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "mdss_0_disp_cc_mdss_core_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | HW_CTRL,
+};
+
+static struct gdsc mdss_0_disp_cc_mdss_core_int2_gdsc = {
+ .gdscr = 0xd000,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "mdss_0_disp_cc_mdss_core_int2_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | HW_CTRL,
+};
+
+static struct clk_regmap *disp_cc_0_sa8775p_clocks[] = {
+ [MDSS_DISP_CC_MDSS_AHB1_CLK] = &mdss_0_disp_cc_mdss_ahb1_clk.clkr,
+ [MDSS_DISP_CC_MDSS_AHB_CLK] = &mdss_0_disp_cc_mdss_ahb_clk.clkr,
+ [MDSS_DISP_CC_MDSS_AHB_CLK_SRC] = &mdss_0_disp_cc_mdss_ahb_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_BYTE0_CLK] = &mdss_0_disp_cc_mdss_byte0_clk.clkr,
+ [MDSS_DISP_CC_MDSS_BYTE0_CLK_SRC] = &mdss_0_disp_cc_mdss_byte0_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_BYTE0_DIV_CLK_SRC] = &mdss_0_disp_cc_mdss_byte0_div_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_BYTE0_INTF_CLK] = &mdss_0_disp_cc_mdss_byte0_intf_clk.clkr,
+ [MDSS_DISP_CC_MDSS_BYTE1_CLK] = &mdss_0_disp_cc_mdss_byte1_clk.clkr,
+ [MDSS_DISP_CC_MDSS_BYTE1_CLK_SRC] = &mdss_0_disp_cc_mdss_byte1_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_BYTE1_DIV_CLK_SRC] = &mdss_0_disp_cc_mdss_byte1_div_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_BYTE1_INTF_CLK] = &mdss_0_disp_cc_mdss_byte1_intf_clk.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_AUX_CLK] = &mdss_0_disp_cc_mdss_dptx0_aux_clk.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_AUX_CLK_SRC] = &mdss_0_disp_cc_mdss_dptx0_aux_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_CRYPTO_CLK] = &mdss_0_disp_cc_mdss_dptx0_crypto_clk.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_CRYPTO_CLK_SRC] = &mdss_0_disp_cc_mdss_dptx0_crypto_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_LINK_CLK] = &mdss_0_disp_cc_mdss_dptx0_link_clk.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_LINK_CLK_SRC] = &mdss_0_disp_cc_mdss_dptx0_link_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_LINK_DIV_CLK_SRC] =
+ &mdss_0_disp_cc_mdss_dptx0_link_div_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_LINK_INTF_CLK] = &mdss_0_disp_cc_mdss_dptx0_link_intf_clk.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_PIXEL0_CLK] = &mdss_0_disp_cc_mdss_dptx0_pixel0_clk.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_PIXEL0_CLK_SRC] = &mdss_0_disp_cc_mdss_dptx0_pixel0_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_PIXEL1_CLK] = &mdss_0_disp_cc_mdss_dptx0_pixel1_clk.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_PIXEL1_CLK_SRC] = &mdss_0_disp_cc_mdss_dptx0_pixel1_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_PIXEL2_CLK] = &mdss_0_disp_cc_mdss_dptx0_pixel2_clk.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_PIXEL2_CLK_SRC] = &mdss_0_disp_cc_mdss_dptx0_pixel2_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_PIXEL3_CLK] = &mdss_0_disp_cc_mdss_dptx0_pixel3_clk.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_PIXEL3_CLK_SRC] = &mdss_0_disp_cc_mdss_dptx0_pixel3_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK] =
+ &mdss_0_disp_cc_mdss_dptx0_usb_router_link_intf_clk.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX1_AUX_CLK] = &mdss_0_disp_cc_mdss_dptx1_aux_clk.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX1_AUX_CLK_SRC] = &mdss_0_disp_cc_mdss_dptx1_aux_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX1_CRYPTO_CLK] = &mdss_0_disp_cc_mdss_dptx1_crypto_clk.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX1_CRYPTO_CLK_SRC] = &mdss_0_disp_cc_mdss_dptx1_crypto_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX1_LINK_CLK] = &mdss_0_disp_cc_mdss_dptx1_link_clk.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX1_LINK_CLK_SRC] = &mdss_0_disp_cc_mdss_dptx1_link_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX1_LINK_DIV_CLK_SRC] =
+ &mdss_0_disp_cc_mdss_dptx1_link_div_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX1_LINK_INTF_CLK] = &mdss_0_disp_cc_mdss_dptx1_link_intf_clk.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX1_PIXEL0_CLK] = &mdss_0_disp_cc_mdss_dptx1_pixel0_clk.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX1_PIXEL0_CLK_SRC] = &mdss_0_disp_cc_mdss_dptx1_pixel0_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX1_PIXEL1_CLK] = &mdss_0_disp_cc_mdss_dptx1_pixel1_clk.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX1_PIXEL1_CLK_SRC] = &mdss_0_disp_cc_mdss_dptx1_pixel1_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX1_USB_ROUTER_LINK_INTF_CLK] =
+ &mdss_0_disp_cc_mdss_dptx1_usb_router_link_intf_clk.clkr,
+ [MDSS_DISP_CC_MDSS_ESC0_CLK] = &mdss_0_disp_cc_mdss_esc0_clk.clkr,
+ [MDSS_DISP_CC_MDSS_ESC0_CLK_SRC] = &mdss_0_disp_cc_mdss_esc0_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_ESC1_CLK] = &mdss_0_disp_cc_mdss_esc1_clk.clkr,
+ [MDSS_DISP_CC_MDSS_ESC1_CLK_SRC] = &mdss_0_disp_cc_mdss_esc1_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_MDP1_CLK] = &mdss_0_disp_cc_mdss_mdp1_clk.clkr,
+ [MDSS_DISP_CC_MDSS_MDP_CLK] = &mdss_0_disp_cc_mdss_mdp_clk.clkr,
+ [MDSS_DISP_CC_MDSS_MDP_CLK_SRC] = &mdss_0_disp_cc_mdss_mdp_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_MDP_LUT1_CLK] = &mdss_0_disp_cc_mdss_mdp_lut1_clk.clkr,
+ [MDSS_DISP_CC_MDSS_MDP_LUT_CLK] = &mdss_0_disp_cc_mdss_mdp_lut_clk.clkr,
+ [MDSS_DISP_CC_MDSS_NON_GDSC_AHB_CLK] = &mdss_0_disp_cc_mdss_non_gdsc_ahb_clk.clkr,
+ [MDSS_DISP_CC_MDSS_PCLK0_CLK] = &mdss_0_disp_cc_mdss_pclk0_clk.clkr,
+ [MDSS_DISP_CC_MDSS_PCLK0_CLK_SRC] = &mdss_0_disp_cc_mdss_pclk0_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_PCLK1_CLK] = &mdss_0_disp_cc_mdss_pclk1_clk.clkr,
+ [MDSS_DISP_CC_MDSS_PCLK1_CLK_SRC] = &mdss_0_disp_cc_mdss_pclk1_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_PLL_LOCK_MONITOR_CLK] = &mdss_0_disp_cc_mdss_pll_lock_monitor_clk.clkr,
+ [MDSS_DISP_CC_MDSS_RSCC_AHB_CLK] = &mdss_0_disp_cc_mdss_rscc_ahb_clk.clkr,
+ [MDSS_DISP_CC_MDSS_RSCC_VSYNC_CLK] = &mdss_0_disp_cc_mdss_rscc_vsync_clk.clkr,
+ [MDSS_DISP_CC_MDSS_VSYNC1_CLK] = &mdss_0_disp_cc_mdss_vsync1_clk.clkr,
+ [MDSS_DISP_CC_MDSS_VSYNC_CLK] = &mdss_0_disp_cc_mdss_vsync_clk.clkr,
+ [MDSS_DISP_CC_MDSS_VSYNC_CLK_SRC] = &mdss_0_disp_cc_mdss_vsync_clk_src.clkr,
+ [MDSS_DISP_CC_PLL0] = &mdss_0_disp_cc_pll0.clkr,
+ [MDSS_DISP_CC_PLL1] = &mdss_0_disp_cc_pll1.clkr,
+ [MDSS_DISP_CC_SLEEP_CLK_SRC] = &mdss_0_disp_cc_sleep_clk_src.clkr,
+ [MDSS_DISP_CC_SM_OBS_CLK] = &mdss_0_disp_cc_sm_obs_clk.clkr,
+ [MDSS_DISP_CC_XO_CLK_SRC] = &mdss_0_disp_cc_xo_clk_src.clkr,
+};
+
+static struct gdsc *disp_cc_0_sa8775p_gdscs[] = {
+ [MDSS_DISP_CC_MDSS_CORE_GDSC] = &mdss_0_disp_cc_mdss_core_gdsc,
+ [MDSS_DISP_CC_MDSS_CORE_INT2_GDSC] = &mdss_0_disp_cc_mdss_core_int2_gdsc,
+};
+
+static const struct qcom_reset_map disp_cc_0_sa8775p_resets[] = {
+ [MDSS_DISP_CC_MDSS_CORE_BCR] = { 0x8000 },
+ [MDSS_DISP_CC_MDSS_RSCC_BCR] = { 0xa000 },
+};
+
+static const struct regmap_config disp_cc_0_sa8775p_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x12414,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc disp_cc_0_sa8775p_desc = {
+ .config = &disp_cc_0_sa8775p_regmap_config,
+ .clks = disp_cc_0_sa8775p_clocks,
+ .num_clks = ARRAY_SIZE(disp_cc_0_sa8775p_clocks),
+ .resets = disp_cc_0_sa8775p_resets,
+ .num_resets = ARRAY_SIZE(disp_cc_0_sa8775p_resets),
+ .gdscs = disp_cc_0_sa8775p_gdscs,
+ .num_gdscs = ARRAY_SIZE(disp_cc_0_sa8775p_gdscs),
+};
+
+static const struct of_device_id disp_cc_0_sa8775p_match_table[] = {
+ { .compatible = "qcom,sa8775p-dispcc0" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, disp_cc_0_sa8775p_match_table);
+
+static int disp_cc_0_sa8775p_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret)
+ return ret;
+
+ regmap = qcom_cc_map(pdev, &disp_cc_0_sa8775p_desc);
+ if (IS_ERR(regmap)) {
+ pm_runtime_put(&pdev->dev);
+ return PTR_ERR(regmap);
+ }
+
+ clk_lucid_evo_pll_configure(&mdss_0_disp_cc_pll0, regmap, &mdss_0_disp_cc_pll0_config);
+ clk_lucid_evo_pll_configure(&mdss_0_disp_cc_pll1, regmap, &mdss_0_disp_cc_pll1_config);
+
+ /* Keep some clocks always enabled */
+ qcom_branch_set_clk_en(regmap, 0xc070); /* MDSS_0_DISP_CC_SLEEP_CLK */
+ qcom_branch_set_clk_en(regmap, 0xc054); /* MDSS_0_DISP_CC_XO_CLK */
+
+ ret = qcom_cc_really_probe(&pdev->dev, &disp_cc_0_sa8775p_desc, regmap);
+
+ pm_runtime_put(&pdev->dev);
+
+ return ret;
+}
+
+static struct platform_driver disp_cc_0_sa8775p_driver = {
+ .probe = disp_cc_0_sa8775p_probe,
+ .driver = {
+ .name = "dispcc0-sa8775p",
+ .of_match_table = disp_cc_0_sa8775p_match_table,
+ },
+};
+
+module_platform_driver(disp_cc_0_sa8775p_driver);
+
+MODULE_DESCRIPTION("QTI DISPCC0 SA8775P Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/dispcc1-sa8775p.c b/drivers/clk/qcom/dispcc1-sa8775p.c
new file mode 100644
index 000000000000..cd55d1c11902
--- /dev/null
+++ b/drivers/clk/qcom/dispcc1-sa8775p.c
@@ -0,0 +1,1480 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sa8775p-dispcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_IFACE,
+ DT_BI_TCXO,
+ DT_BI_TCXO_AO,
+ DT_SLEEP_CLK,
+ DT_DP0_PHY_PLL_LINK_CLK,
+ DT_DP0_PHY_PLL_VCO_DIV_CLK,
+ DT_DP1_PHY_PLL_LINK_CLK,
+ DT_DP1_PHY_PLL_VCO_DIV_CLK,
+ DT_DSI0_PHY_PLL_OUT_BYTECLK,
+ DT_DSI0_PHY_PLL_OUT_DSICLK,
+ DT_DSI1_PHY_PLL_OUT_BYTECLK,
+ DT_DSI1_PHY_PLL_OUT_DSICLK,
+};
+
+enum {
+ P_BI_TCXO,
+ P_DP0_PHY_PLL_LINK_CLK,
+ P_DP0_PHY_PLL_VCO_DIV_CLK,
+ P_DP1_PHY_PLL_LINK_CLK,
+ P_DP1_PHY_PLL_VCO_DIV_CLK,
+ P_DSI0_PHY_PLL_OUT_BYTECLK,
+ P_DSI0_PHY_PLL_OUT_DSICLK,
+ P_DSI1_PHY_PLL_OUT_BYTECLK,
+ P_DSI1_PHY_PLL_OUT_DSICLK,
+ P_MDSS_1_DISP_CC_PLL0_OUT_MAIN,
+ P_MDSS_1_DISP_CC_PLL1_OUT_EVEN,
+ P_MDSS_1_DISP_CC_PLL1_OUT_MAIN,
+ P_SLEEP_CLK,
+};
+
+static const struct pll_vco lucid_evo_vco[] = {
+ { 249600000, 2020000000, 0 },
+};
+
+static const struct alpha_pll_config mdss_1_disp_cc_pll0_config = {
+ .l = 0x3a,
+ .alpha = 0x9800,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00400805,
+};
+
+static struct clk_alpha_pll mdss_1_disp_cc_pll0 = {
+ .offset = 0x0,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct alpha_pll_config mdss_1_disp_cc_pll1_config = {
+ .l = 0x1f,
+ .alpha = 0x4000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00400805,
+};
+
+static struct clk_alpha_pll mdss_1_disp_cc_pll1 = {
+ .offset = 0x1000,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_pll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct parent_map disp_cc_1_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_DP0_PHY_PLL_LINK_CLK, 1 },
+ { P_DP0_PHY_PLL_VCO_DIV_CLK, 2 },
+ { P_DP1_PHY_PLL_VCO_DIV_CLK, 4 },
+};
+
+static const struct clk_parent_data disp_cc_1_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DP0_PHY_PLL_LINK_CLK },
+ { .index = DT_DP0_PHY_PLL_VCO_DIV_CLK },
+ { .index = DT_DP1_PHY_PLL_VCO_DIV_CLK },
+};
+
+static const struct parent_map disp_cc_1_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_DSICLK, 1 },
+ { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 },
+ { P_DSI1_PHY_PLL_OUT_DSICLK, 3 },
+ { P_DSI1_PHY_PLL_OUT_BYTECLK, 4 },
+};
+
+static const struct clk_parent_data disp_cc_1_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DSI0_PHY_PLL_OUT_DSICLK },
+ { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK },
+ { .index = DT_DSI1_PHY_PLL_OUT_DSICLK },
+ { .index = DT_DSI1_PHY_PLL_OUT_BYTECLK },
+};
+
+static const struct parent_map disp_cc_1_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data disp_cc_1_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct clk_parent_data disp_cc_1_parent_data_2_ao[] = {
+ { .index = DT_BI_TCXO_AO },
+};
+
+static const struct parent_map disp_cc_1_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_DP0_PHY_PLL_LINK_CLK, 1 },
+ { P_DP1_PHY_PLL_LINK_CLK, 2 },
+};
+
+static const struct clk_parent_data disp_cc_1_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DP0_PHY_PLL_LINK_CLK },
+ { .index = DT_DP1_PHY_PLL_LINK_CLK },
+};
+
+static const struct parent_map disp_cc_1_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 },
+ { P_DSI1_PHY_PLL_OUT_BYTECLK, 4 },
+};
+
+static const struct clk_parent_data disp_cc_1_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK },
+ { .index = DT_DSI1_PHY_PLL_OUT_BYTECLK },
+};
+
+static const struct parent_map disp_cc_1_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_MDSS_1_DISP_CC_PLL1_OUT_MAIN, 4 },
+ { P_MDSS_1_DISP_CC_PLL1_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data disp_cc_1_parent_data_5[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &mdss_1_disp_cc_pll1.clkr.hw },
+ { .hw = &mdss_1_disp_cc_pll1.clkr.hw },
+};
+
+static const struct parent_map disp_cc_1_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_MDSS_1_DISP_CC_PLL0_OUT_MAIN, 1 },
+ { P_MDSS_1_DISP_CC_PLL1_OUT_MAIN, 4 },
+ { P_MDSS_1_DISP_CC_PLL1_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data disp_cc_1_parent_data_6[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &mdss_1_disp_cc_pll0.clkr.hw },
+ { .hw = &mdss_1_disp_cc_pll1.clkr.hw },
+ { .hw = &mdss_1_disp_cc_pll1.clkr.hw },
+};
+
+static const struct parent_map disp_cc_1_parent_map_7[] = {
+ { P_SLEEP_CLK, 0 },
+};
+
+static const struct clk_parent_data disp_cc_1_parent_data_7_ao[] = {
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct freq_tbl ftbl_mdss_1_disp_cc_mdss_ahb_clk_src[] = {
+ F(37500000, P_MDSS_1_DISP_CC_PLL1_OUT_MAIN, 16, 0, 0),
+ F(75000000, P_MDSS_1_DISP_CC_PLL1_OUT_MAIN, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 mdss_1_disp_cc_mdss_ahb_clk_src = {
+ .cmd_rcgr = 0x824c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_1_parent_map_5,
+ .freq_tbl = ftbl_mdss_1_disp_cc_mdss_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_ahb_clk_src",
+ .parent_data = disp_cc_1_parent_data_5,
+ .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_mdss_1_disp_cc_mdss_byte0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 mdss_1_disp_cc_mdss_byte0_clk_src = {
+ .cmd_rcgr = 0x80ec,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_1_parent_map_1,
+ .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_byte0_clk_src",
+ .parent_data = disp_cc_1_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_1_disp_cc_mdss_byte1_clk_src = {
+ .cmd_rcgr = 0x8108,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_1_parent_map_1,
+ .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_byte1_clk_src",
+ .parent_data = disp_cc_1_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_1_disp_cc_mdss_dptx0_aux_clk_src = {
+ .cmd_rcgr = 0x81b8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_1_parent_map_2,
+ .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_dptx0_aux_clk_src",
+ .parent_data = disp_cc_1_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_1_disp_cc_mdss_dptx0_crypto_clk_src = {
+ .cmd_rcgr = 0x8170,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_1_parent_map_3,
+ .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_dptx0_crypto_clk_src",
+ .parent_data = disp_cc_1_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_1_disp_cc_mdss_dptx0_link_clk_src = {
+ .cmd_rcgr = 0x8154,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_1_parent_map_3,
+ .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_dptx0_link_clk_src",
+ .parent_data = disp_cc_1_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_1_disp_cc_mdss_dptx0_pixel0_clk_src = {
+ .cmd_rcgr = 0x8188,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_1_parent_map_0,
+ .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_dptx0_pixel0_clk_src",
+ .parent_data = disp_cc_1_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_1_disp_cc_mdss_dptx0_pixel1_clk_src = {
+ .cmd_rcgr = 0x81a0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_1_parent_map_0,
+ .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_dptx0_pixel1_clk_src",
+ .parent_data = disp_cc_1_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_1_disp_cc_mdss_dptx0_pixel2_clk_src = {
+ .cmd_rcgr = 0x826c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_1_parent_map_0,
+ .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_dptx0_pixel2_clk_src",
+ .parent_data = disp_cc_1_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_1_disp_cc_mdss_dptx0_pixel3_clk_src = {
+ .cmd_rcgr = 0x8284,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_1_parent_map_0,
+ .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_dptx0_pixel3_clk_src",
+ .parent_data = disp_cc_1_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_1_disp_cc_mdss_dptx1_aux_clk_src = {
+ .cmd_rcgr = 0x8234,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_1_parent_map_2,
+ .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_dptx1_aux_clk_src",
+ .parent_data = disp_cc_1_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_1_disp_cc_mdss_dptx1_crypto_clk_src = {
+ .cmd_rcgr = 0x821c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_1_parent_map_3,
+ .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_dptx1_crypto_clk_src",
+ .parent_data = disp_cc_1_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_1_disp_cc_mdss_dptx1_link_clk_src = {
+ .cmd_rcgr = 0x8200,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_1_parent_map_3,
+ .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_dptx1_link_clk_src",
+ .parent_data = disp_cc_1_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_1_disp_cc_mdss_dptx1_pixel0_clk_src = {
+ .cmd_rcgr = 0x81d0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_1_parent_map_0,
+ .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_dptx1_pixel0_clk_src",
+ .parent_data = disp_cc_1_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_1_disp_cc_mdss_dptx1_pixel1_clk_src = {
+ .cmd_rcgr = 0x81e8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_1_parent_map_0,
+ .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_dptx1_pixel1_clk_src",
+ .parent_data = disp_cc_1_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_1_disp_cc_mdss_esc0_clk_src = {
+ .cmd_rcgr = 0x8124,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_1_parent_map_4,
+ .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_esc0_clk_src",
+ .parent_data = disp_cc_1_parent_data_4,
+ .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_1_disp_cc_mdss_esc1_clk_src = {
+ .cmd_rcgr = 0x813c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_1_parent_map_4,
+ .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_esc1_clk_src",
+ .parent_data = disp_cc_1_parent_data_4,
+ .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_mdss_1_disp_cc_mdss_mdp_clk_src[] = {
+ F(375000000, P_MDSS_1_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(500000000, P_MDSS_1_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(575000000, P_MDSS_1_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(650000000, P_MDSS_1_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 mdss_1_disp_cc_mdss_mdp_clk_src = {
+ .cmd_rcgr = 0x80bc,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_1_parent_map_6,
+ .freq_tbl = ftbl_mdss_1_disp_cc_mdss_mdp_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_mdp_clk_src",
+ .parent_data = disp_cc_1_parent_data_6,
+ .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_1_disp_cc_mdss_pclk0_clk_src = {
+ .cmd_rcgr = 0x808c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = disp_cc_1_parent_map_1,
+ .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_pclk0_clk_src",
+ .parent_data = disp_cc_1_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_pixel_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_1_disp_cc_mdss_pclk1_clk_src = {
+ .cmd_rcgr = 0x80a4,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = disp_cc_1_parent_map_1,
+ .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_pclk1_clk_src",
+ .parent_data = disp_cc_1_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_pixel_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_1_disp_cc_mdss_vsync_clk_src = {
+ .cmd_rcgr = 0x80d4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_1_parent_map_2,
+ .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_vsync_clk_src",
+ .parent_data = disp_cc_1_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_mdss_1_disp_cc_sleep_clk_src[] = {
+ F(32000, P_SLEEP_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 mdss_1_disp_cc_sleep_clk_src = {
+ .cmd_rcgr = 0xc058,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_1_parent_map_7,
+ .freq_tbl = ftbl_mdss_1_disp_cc_sleep_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_sleep_clk_src",
+ .parent_data = disp_cc_1_parent_data_7_ao,
+ .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_7_ao),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_1_disp_cc_xo_clk_src = {
+ .cmd_rcgr = 0xc03c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_1_parent_map_2,
+ .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_xo_clk_src",
+ .parent_data = disp_cc_1_parent_data_2_ao,
+ .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_2_ao),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_regmap_div mdss_1_disp_cc_mdss_byte0_div_clk_src = {
+ .reg = 0x8104,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_byte0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div mdss_1_disp_cc_mdss_byte1_div_clk_src = {
+ .reg = 0x8120,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_byte1_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_byte1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div mdss_1_disp_cc_mdss_dptx0_link_div_clk_src = {
+ .reg = 0x816c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_dptx0_link_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div mdss_1_disp_cc_mdss_dptx1_link_div_clk_src = {
+ .reg = 0x8218,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_dptx1_link_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_dptx1_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_ahb1_clk = {
+ .halt_reg = 0x8088,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8088,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_ahb1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_ahb_clk = {
+ .halt_reg = 0x8084,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8084,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_byte0_clk = {
+ .halt_reg = 0x8034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_byte0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_byte0_intf_clk = {
+ .halt_reg = 0x8038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_byte0_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_byte0_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_byte1_clk = {
+ .halt_reg = 0x803c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x803c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_byte1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_byte1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_byte1_intf_clk = {
+ .halt_reg = 0x8040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_byte1_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_byte1_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_dptx0_aux_clk = {
+ .halt_reg = 0x805c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x805c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_dptx0_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_dptx0_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_dptx0_crypto_clk = {
+ .halt_reg = 0x8058,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8058,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_dptx0_crypto_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_dptx0_crypto_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_dptx0_link_clk = {
+ .halt_reg = 0x804c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x804c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_dptx0_link_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_dptx0_link_intf_clk = {
+ .halt_reg = 0x8050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8050,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_dptx0_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_dptx0_pixel0_clk = {
+ .halt_reg = 0x8060,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8060,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_dptx0_pixel0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_dptx0_pixel0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_dptx0_pixel1_clk = {
+ .halt_reg = 0x8064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_dptx0_pixel1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_dptx0_pixel1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_dptx0_pixel2_clk = {
+ .halt_reg = 0x8264,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8264,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_dptx0_pixel2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_dptx0_pixel2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_dptx0_pixel3_clk = {
+ .halt_reg = 0x8268,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8268,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_dptx0_pixel3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_dptx0_pixel3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_dptx0_usb_router_link_intf_clk = {
+ .halt_reg = 0x8054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_dptx0_usb_router_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_dptx1_aux_clk = {
+ .halt_reg = 0x8080,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8080,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_dptx1_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_dptx1_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_dptx1_crypto_clk = {
+ .halt_reg = 0x807c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x807c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_dptx1_crypto_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_dptx1_crypto_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_dptx1_link_clk = {
+ .halt_reg = 0x8070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8070,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_dptx1_link_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_dptx1_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_dptx1_link_intf_clk = {
+ .halt_reg = 0x8074,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8074,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_dptx1_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_dptx1_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_dptx1_pixel0_clk = {
+ .halt_reg = 0x8068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_dptx1_pixel0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_dptx1_pixel0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_dptx1_pixel1_clk = {
+ .halt_reg = 0x806c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x806c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_dptx1_pixel1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_dptx1_pixel1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_dptx1_usb_router_link_intf_clk = {
+ .halt_reg = 0x8078,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8078,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_dptx1_usb_router_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_dptx1_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_esc0_clk = {
+ .halt_reg = 0x8044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_esc0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_esc0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_esc1_clk = {
+ .halt_reg = 0x8048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_esc1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_esc1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_mdp1_clk = {
+ .halt_reg = 0x8014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_mdp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_mdp_clk = {
+ .halt_reg = 0x800c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x800c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_mdp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_mdp_lut1_clk = {
+ .halt_reg = 0x8024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x8024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_mdp_lut1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_mdp_lut_clk = {
+ .halt_reg = 0x801c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x801c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_mdp_lut_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_non_gdsc_ahb_clk = {
+ .halt_reg = 0xa004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xa004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_non_gdsc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_pclk0_clk = {
+ .halt_reg = 0x8004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_pclk0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_pclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_pclk1_clk = {
+ .halt_reg = 0x8008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_pclk1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_pclk1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_pll_lock_monitor_clk = {
+ .halt_reg = 0xe000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_pll_lock_monitor_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_rscc_ahb_clk = {
+ .halt_reg = 0xa00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_rscc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_rscc_vsync_clk = {
+ .halt_reg = 0xa008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_rscc_vsync_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_vsync1_clk = {
+ .halt_reg = 0x8030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_vsync1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_vsync_clk = {
+ .halt_reg = 0x802c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x802c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_vsync_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_sm_obs_clk = {
+ .halt_reg = 0x11014,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x11014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_sm_obs_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc mdss_1_disp_cc_mdss_core_gdsc = {
+ .gdscr = 0x9000,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "mdss_1_disp_cc_mdss_core_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | HW_CTRL,
+};
+
+static struct gdsc mdss_1_disp_cc_mdss_core_int2_gdsc = {
+ .gdscr = 0xd000,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "mdss_1_disp_cc_mdss_core_int2_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | HW_CTRL,
+};
+
+static struct clk_regmap *disp_cc_1_sa8775p_clocks[] = {
+ [MDSS_DISP_CC_MDSS_AHB1_CLK] = &mdss_1_disp_cc_mdss_ahb1_clk.clkr,
+ [MDSS_DISP_CC_MDSS_AHB_CLK] = &mdss_1_disp_cc_mdss_ahb_clk.clkr,
+ [MDSS_DISP_CC_MDSS_AHB_CLK_SRC] = &mdss_1_disp_cc_mdss_ahb_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_BYTE0_CLK] = &mdss_1_disp_cc_mdss_byte0_clk.clkr,
+ [MDSS_DISP_CC_MDSS_BYTE0_CLK_SRC] = &mdss_1_disp_cc_mdss_byte0_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_BYTE0_DIV_CLK_SRC] = &mdss_1_disp_cc_mdss_byte0_div_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_BYTE0_INTF_CLK] = &mdss_1_disp_cc_mdss_byte0_intf_clk.clkr,
+ [MDSS_DISP_CC_MDSS_BYTE1_CLK] = &mdss_1_disp_cc_mdss_byte1_clk.clkr,
+ [MDSS_DISP_CC_MDSS_BYTE1_CLK_SRC] = &mdss_1_disp_cc_mdss_byte1_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_BYTE1_DIV_CLK_SRC] = &mdss_1_disp_cc_mdss_byte1_div_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_BYTE1_INTF_CLK] = &mdss_1_disp_cc_mdss_byte1_intf_clk.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_AUX_CLK] = &mdss_1_disp_cc_mdss_dptx0_aux_clk.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_AUX_CLK_SRC] = &mdss_1_disp_cc_mdss_dptx0_aux_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_CRYPTO_CLK] = &mdss_1_disp_cc_mdss_dptx0_crypto_clk.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_CRYPTO_CLK_SRC] = &mdss_1_disp_cc_mdss_dptx0_crypto_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_LINK_CLK] = &mdss_1_disp_cc_mdss_dptx0_link_clk.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_LINK_CLK_SRC] = &mdss_1_disp_cc_mdss_dptx0_link_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_LINK_DIV_CLK_SRC] =
+ &mdss_1_disp_cc_mdss_dptx0_link_div_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_LINK_INTF_CLK] = &mdss_1_disp_cc_mdss_dptx0_link_intf_clk.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_PIXEL0_CLK] = &mdss_1_disp_cc_mdss_dptx0_pixel0_clk.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_PIXEL0_CLK_SRC] = &mdss_1_disp_cc_mdss_dptx0_pixel0_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_PIXEL1_CLK] = &mdss_1_disp_cc_mdss_dptx0_pixel1_clk.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_PIXEL1_CLK_SRC] = &mdss_1_disp_cc_mdss_dptx0_pixel1_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_PIXEL2_CLK] = &mdss_1_disp_cc_mdss_dptx0_pixel2_clk.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_PIXEL2_CLK_SRC] = &mdss_1_disp_cc_mdss_dptx0_pixel2_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_PIXEL3_CLK] = &mdss_1_disp_cc_mdss_dptx0_pixel3_clk.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_PIXEL3_CLK_SRC] = &mdss_1_disp_cc_mdss_dptx0_pixel3_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK] =
+ &mdss_1_disp_cc_mdss_dptx0_usb_router_link_intf_clk.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX1_AUX_CLK] = &mdss_1_disp_cc_mdss_dptx1_aux_clk.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX1_AUX_CLK_SRC] = &mdss_1_disp_cc_mdss_dptx1_aux_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX1_CRYPTO_CLK] = &mdss_1_disp_cc_mdss_dptx1_crypto_clk.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX1_CRYPTO_CLK_SRC] = &mdss_1_disp_cc_mdss_dptx1_crypto_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX1_LINK_CLK] = &mdss_1_disp_cc_mdss_dptx1_link_clk.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX1_LINK_CLK_SRC] = &mdss_1_disp_cc_mdss_dptx1_link_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX1_LINK_DIV_CLK_SRC] =
+ &mdss_1_disp_cc_mdss_dptx1_link_div_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX1_LINK_INTF_CLK] = &mdss_1_disp_cc_mdss_dptx1_link_intf_clk.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX1_PIXEL0_CLK] = &mdss_1_disp_cc_mdss_dptx1_pixel0_clk.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX1_PIXEL0_CLK_SRC] = &mdss_1_disp_cc_mdss_dptx1_pixel0_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX1_PIXEL1_CLK] = &mdss_1_disp_cc_mdss_dptx1_pixel1_clk.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX1_PIXEL1_CLK_SRC] = &mdss_1_disp_cc_mdss_dptx1_pixel1_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX1_USB_ROUTER_LINK_INTF_CLK] =
+ &mdss_1_disp_cc_mdss_dptx1_usb_router_link_intf_clk.clkr,
+ [MDSS_DISP_CC_MDSS_ESC0_CLK] = &mdss_1_disp_cc_mdss_esc0_clk.clkr,
+ [MDSS_DISP_CC_MDSS_ESC0_CLK_SRC] = &mdss_1_disp_cc_mdss_esc0_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_ESC1_CLK] = &mdss_1_disp_cc_mdss_esc1_clk.clkr,
+ [MDSS_DISP_CC_MDSS_ESC1_CLK_SRC] = &mdss_1_disp_cc_mdss_esc1_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_MDP1_CLK] = &mdss_1_disp_cc_mdss_mdp1_clk.clkr,
+ [MDSS_DISP_CC_MDSS_MDP_CLK] = &mdss_1_disp_cc_mdss_mdp_clk.clkr,
+ [MDSS_DISP_CC_MDSS_MDP_CLK_SRC] = &mdss_1_disp_cc_mdss_mdp_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_MDP_LUT1_CLK] = &mdss_1_disp_cc_mdss_mdp_lut1_clk.clkr,
+ [MDSS_DISP_CC_MDSS_MDP_LUT_CLK] = &mdss_1_disp_cc_mdss_mdp_lut_clk.clkr,
+ [MDSS_DISP_CC_MDSS_NON_GDSC_AHB_CLK] = &mdss_1_disp_cc_mdss_non_gdsc_ahb_clk.clkr,
+ [MDSS_DISP_CC_MDSS_PCLK0_CLK] = &mdss_1_disp_cc_mdss_pclk0_clk.clkr,
+ [MDSS_DISP_CC_MDSS_PCLK0_CLK_SRC] = &mdss_1_disp_cc_mdss_pclk0_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_PCLK1_CLK] = &mdss_1_disp_cc_mdss_pclk1_clk.clkr,
+ [MDSS_DISP_CC_MDSS_PCLK1_CLK_SRC] = &mdss_1_disp_cc_mdss_pclk1_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_PLL_LOCK_MONITOR_CLK] = &mdss_1_disp_cc_mdss_pll_lock_monitor_clk.clkr,
+ [MDSS_DISP_CC_MDSS_RSCC_AHB_CLK] = &mdss_1_disp_cc_mdss_rscc_ahb_clk.clkr,
+ [MDSS_DISP_CC_MDSS_RSCC_VSYNC_CLK] = &mdss_1_disp_cc_mdss_rscc_vsync_clk.clkr,
+ [MDSS_DISP_CC_MDSS_VSYNC1_CLK] = &mdss_1_disp_cc_mdss_vsync1_clk.clkr,
+ [MDSS_DISP_CC_MDSS_VSYNC_CLK] = &mdss_1_disp_cc_mdss_vsync_clk.clkr,
+ [MDSS_DISP_CC_MDSS_VSYNC_CLK_SRC] = &mdss_1_disp_cc_mdss_vsync_clk_src.clkr,
+ [MDSS_DISP_CC_PLL0] = &mdss_1_disp_cc_pll0.clkr,
+ [MDSS_DISP_CC_PLL1] = &mdss_1_disp_cc_pll1.clkr,
+ [MDSS_DISP_CC_SLEEP_CLK_SRC] = &mdss_1_disp_cc_sleep_clk_src.clkr,
+ [MDSS_DISP_CC_SM_OBS_CLK] = &mdss_1_disp_cc_sm_obs_clk.clkr,
+ [MDSS_DISP_CC_XO_CLK_SRC] = &mdss_1_disp_cc_xo_clk_src.clkr,
+};
+
+static struct gdsc *disp_cc_1_sa8775p_gdscs[] = {
+ [MDSS_DISP_CC_MDSS_CORE_GDSC] = &mdss_1_disp_cc_mdss_core_gdsc,
+ [MDSS_DISP_CC_MDSS_CORE_INT2_GDSC] = &mdss_1_disp_cc_mdss_core_int2_gdsc,
+};
+
+static const struct qcom_reset_map disp_cc_1_sa8775p_resets[] = {
+ [MDSS_DISP_CC_MDSS_CORE_BCR] = { 0x8000 },
+ [MDSS_DISP_CC_MDSS_RSCC_BCR] = { 0xa000 },
+};
+
+static const struct regmap_config disp_cc_1_sa8775p_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x12414,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc disp_cc_1_sa8775p_desc = {
+ .config = &disp_cc_1_sa8775p_regmap_config,
+ .clks = disp_cc_1_sa8775p_clocks,
+ .num_clks = ARRAY_SIZE(disp_cc_1_sa8775p_clocks),
+ .resets = disp_cc_1_sa8775p_resets,
+ .num_resets = ARRAY_SIZE(disp_cc_1_sa8775p_resets),
+ .gdscs = disp_cc_1_sa8775p_gdscs,
+ .num_gdscs = ARRAY_SIZE(disp_cc_1_sa8775p_gdscs),
+};
+
+static const struct of_device_id disp_cc_1_sa8775p_match_table[] = {
+ { .compatible = "qcom,sa8775p-dispcc1" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, disp_cc_1_sa8775p_match_table);
+
+static int disp_cc_1_sa8775p_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret)
+ return ret;
+
+ regmap = qcom_cc_map(pdev, &disp_cc_1_sa8775p_desc);
+ if (IS_ERR(regmap)) {
+ pm_runtime_put(&pdev->dev);
+ return PTR_ERR(regmap);
+ }
+
+ clk_lucid_evo_pll_configure(&mdss_1_disp_cc_pll0, regmap, &mdss_1_disp_cc_pll0_config);
+ clk_lucid_evo_pll_configure(&mdss_1_disp_cc_pll1, regmap, &mdss_1_disp_cc_pll1_config);
+
+ /* Keep some clocks always enabled */
+ qcom_branch_set_clk_en(regmap, 0xc070); /* MDSS_1_DISP_CC_SLEEP_CLK */
+ qcom_branch_set_clk_en(regmap, 0xc054); /* MDSS_1_DISP_CC_XO_CLK */
+
+ ret = qcom_cc_really_probe(&pdev->dev, &disp_cc_1_sa8775p_desc, regmap);
+
+ pm_runtime_put(&pdev->dev);
+
+ return ret;
+}
+
+static struct platform_driver disp_cc_1_sa8775p_driver = {
+ .probe = disp_cc_1_sa8775p_probe,
+ .driver = {
+ .name = "dispcc1-sa8775p",
+ .of_match_table = disp_cc_1_sa8775p_match_table,
+ },
+};
+
+module_platform_driver(disp_cc_1_sa8775p_driver);
+
+MODULE_DESCRIPTION("QTI DISPCC1 SA8775P Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/gcc-glymur.c b/drivers/clk/qcom/gcc-glymur.c
new file mode 100644
index 000000000000..62059120f972
--- /dev/null
+++ b/drivers/clk/qcom/gcc-glymur.c
@@ -0,0 +1,8616 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025, Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,glymur-gcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "clk-regmap-phy-mux.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO,
+ DT_BI_TCXO_AO,
+ DT_SLEEP_CLK,
+ DT_GCC_USB4_0_PHY_DP0_GMUX_CLK_SRC,
+ DT_GCC_USB4_0_PHY_DP1_GMUX_CLK_SRC,
+ DT_GCC_USB4_0_PHY_PCIE_PIPEGMUX_CLK_SRC,
+ DT_GCC_USB4_0_PHY_PIPEGMUX_CLK_SRC,
+ DT_GCC_USB4_0_PHY_SYS_PIPEGMUX_CLK_SRC,
+ DT_GCC_USB4_1_PHY_DP0_GMUX_2_CLK_SRC,
+ DT_GCC_USB4_1_PHY_DP1_GMUX_2_CLK_SRC,
+ DT_GCC_USB4_1_PHY_PCIE_PIPEGMUX_CLK_SRC,
+ DT_GCC_USB4_1_PHY_PIPEGMUX_CLK_SRC,
+ DT_GCC_USB4_1_PHY_SYS_PIPEGMUX_CLK_SRC,
+ DT_GCC_USB4_2_PHY_DP0_GMUX_CLK_SRC,
+ DT_GCC_USB4_2_PHY_DP1_GMUX_CLK_SRC,
+ DT_GCC_USB4_2_PHY_PCIE_PIPEGMUX_CLK_SRC,
+ DT_GCC_USB4_2_PHY_PIPEGMUX_CLK_SRC,
+ DT_GCC_USB4_2_PHY_SYS_PIPEGMUX_CLK_SRC,
+ DT_PCIE_3A_PIPE_CLK,
+ DT_PCIE_3B_PIPE_CLK,
+ DT_PCIE_4_PIPE_CLK,
+ DT_PCIE_5_PIPE_CLK,
+ DT_PCIE_6_PIPE_CLK,
+ DT_QUSB4PHY_0_GCC_USB4_RX0_CLK,
+ DT_QUSB4PHY_0_GCC_USB4_RX1_CLK,
+ DT_QUSB4PHY_1_GCC_USB4_RX0_CLK,
+ DT_QUSB4PHY_1_GCC_USB4_RX1_CLK,
+ DT_QUSB4PHY_2_GCC_USB4_RX0_CLK,
+ DT_QUSB4PHY_2_GCC_USB4_RX1_CLK,
+ DT_UFS_PHY_RX_SYMBOL_0_CLK,
+ DT_UFS_PHY_RX_SYMBOL_1_CLK,
+ DT_UFS_PHY_TX_SYMBOL_0_CLK,
+ DT_USB3_PHY_0_WRAPPER_GCC_USB30_PIPE_CLK,
+ DT_USB3_PHY_1_WRAPPER_GCC_USB30_PIPE_CLK,
+ DT_USB3_PHY_2_WRAPPER_GCC_USB30_PIPE_CLK,
+ DT_USB3_UNI_PHY_MP_GCC_USB30_PIPE_0_CLK,
+ DT_USB3_UNI_PHY_MP_GCC_USB30_PIPE_1_CLK,
+ DT_USB4_0_PHY_GCC_USB4_PCIE_PIPE_CLK,
+ DT_USB4_0_PHY_GCC_USB4RTR_MAX_PIPE_CLK,
+ DT_USB4_1_PHY_GCC_USB4_PCIE_PIPE_CLK,
+ DT_USB4_1_PHY_GCC_USB4RTR_MAX_PIPE_CLK,
+ DT_USB4_2_PHY_GCC_USB4_PCIE_PIPE_CLK,
+ DT_USB4_2_PHY_GCC_USB4RTR_MAX_PIPE_CLK,
+};
+
+enum {
+ P_BI_TCXO,
+ P_GCC_GPLL0_OUT_EVEN,
+ P_GCC_GPLL0_OUT_MAIN,
+ P_GCC_GPLL14_OUT_EVEN,
+ P_GCC_GPLL14_OUT_MAIN,
+ P_GCC_GPLL1_OUT_MAIN,
+ P_GCC_GPLL4_OUT_MAIN,
+ P_GCC_GPLL5_OUT_MAIN,
+ P_GCC_GPLL7_OUT_MAIN,
+ P_GCC_GPLL8_OUT_MAIN,
+ P_GCC_GPLL9_OUT_MAIN,
+ P_GCC_USB3_PRIM_PHY_PIPE_CLK_SRC,
+ P_GCC_USB3_SEC_PHY_PIPE_CLK_SRC,
+ P_GCC_USB3_TERT_PHY_PIPE_CLK_SRC,
+ P_GCC_USB4_0_PHY_DP0_GMUX_CLK_SRC,
+ P_GCC_USB4_0_PHY_DP1_GMUX_CLK_SRC,
+ P_GCC_USB4_0_PHY_PCIE_PIPEGMUX_CLK_SRC,
+ P_GCC_USB4_0_PHY_PIPEGMUX_CLK_SRC,
+ P_GCC_USB4_0_PHY_SYS_PIPEGMUX_CLK_SRC,
+ P_GCC_USB4_1_PHY_DP0_GMUX_2_CLK_SRC,
+ P_GCC_USB4_1_PHY_DP1_GMUX_2_CLK_SRC,
+ P_GCC_USB4_1_PHY_PCIE_PIPEGMUX_CLK_SRC,
+ P_GCC_USB4_1_PHY_PIPEGMUX_CLK_SRC,
+ P_GCC_USB4_1_PHY_PLL_PIPE_CLK_SRC,
+ P_GCC_USB4_1_PHY_SYS_PIPEGMUX_CLK_SRC,
+ P_GCC_USB4_2_PHY_DP0_GMUX_CLK_SRC,
+ P_GCC_USB4_2_PHY_DP1_GMUX_CLK_SRC,
+ P_GCC_USB4_2_PHY_PCIE_PIPEGMUX_CLK_SRC,
+ P_GCC_USB4_2_PHY_PIPEGMUX_CLK_SRC,
+ P_GCC_USB4_2_PHY_SYS_PIPEGMUX_CLK_SRC,
+ P_PCIE_3A_PIPE_CLK,
+ P_PCIE_3B_PIPE_CLK,
+ P_PCIE_4_PIPE_CLK,
+ P_PCIE_5_PIPE_CLK,
+ P_PCIE_6_PIPE_CLK,
+ P_QUSB4PHY_0_GCC_USB4_RX0_CLK,
+ P_QUSB4PHY_0_GCC_USB4_RX1_CLK,
+ P_QUSB4PHY_1_GCC_USB4_RX0_CLK,
+ P_QUSB4PHY_1_GCC_USB4_RX1_CLK,
+ P_QUSB4PHY_2_GCC_USB4_RX0_CLK,
+ P_QUSB4PHY_2_GCC_USB4_RX1_CLK,
+ P_SLEEP_CLK,
+ P_UFS_PHY_RX_SYMBOL_0_CLK,
+ P_UFS_PHY_RX_SYMBOL_1_CLK,
+ P_UFS_PHY_TX_SYMBOL_0_CLK,
+ P_USB3_PHY_0_WRAPPER_GCC_USB30_PIPE_CLK,
+ P_USB3_PHY_1_WRAPPER_GCC_USB30_PIPE_CLK,
+ P_USB3_PHY_2_WRAPPER_GCC_USB30_PIPE_CLK,
+ P_USB3_UNI_PHY_MP_GCC_USB30_PIPE_0_CLK,
+ P_USB3_UNI_PHY_MP_GCC_USB30_PIPE_1_CLK,
+ P_USB4_0_PHY_GCC_USB4_PCIE_PIPE_CLK,
+ P_USB4_0_PHY_GCC_USB4RTR_MAX_PIPE_CLK,
+ P_USB4_1_PHY_GCC_USB4_PCIE_PIPE_CLK,
+ P_USB4_1_PHY_GCC_USB4RTR_MAX_PIPE_CLK,
+ P_USB4_2_PHY_GCC_USB4_PCIE_PIPE_CLK,
+ P_USB4_2_PHY_GCC_USB4RTR_MAX_PIPE_CLK,
+};
+
+static struct clk_alpha_pll gcc_gpll0 = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_EKO_T],
+ .clkr = {
+ .enable_reg = 0x62040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_taycan_eko_t_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gcc_gpll0_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gcc_gpll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_gcc_gpll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_gcc_gpll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_EKO_T],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll0_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_taycan_eko_t_ops,
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll1 = {
+ .offset = 0x1000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_EKO_T],
+ .clkr = {
+ .enable_reg = 0x62040,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_taycan_eko_t_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll14 = {
+ .offset = 0xe000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_EKO_T],
+ .clkr = {
+ .enable_reg = 0x62040,
+ .enable_mask = BIT(14),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll14",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_taycan_eko_t_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gcc_gpll14_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gcc_gpll14_out_even = {
+ .offset = 0xe000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_gcc_gpll14_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_gcc_gpll14_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_EKO_T],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll14_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll14.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_taycan_eko_t_ops,
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll4 = {
+ .offset = 0x4000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_EKO_T],
+ .clkr = {
+ .enable_reg = 0x62040,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll4",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_taycan_eko_t_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll5 = {
+ .offset = 0x5000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_EKO_T],
+ .clkr = {
+ .enable_reg = 0x62040,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll5",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_taycan_eko_t_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll7 = {
+ .offset = 0x7000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_EKO_T],
+ .clkr = {
+ .enable_reg = 0x62040,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll7",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_taycan_eko_t_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll8 = {
+ .offset = 0x8000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_EKO_T],
+ .clkr = {
+ .enable_reg = 0x62040,
+ .enable_mask = BIT(8),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll8",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_taycan_eko_t_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll9 = {
+ .offset = 0x9000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_EKO_T],
+ .clkr = {
+ .enable_reg = 0x62040,
+ .enable_mask = BIT(9),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll9",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_taycan_eko_t_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb3_prim_phy_pipe_clk_src;
+static struct clk_regmap_mux gcc_usb3_sec_phy_pipe_clk_src;
+static struct clk_regmap_mux gcc_usb3_tert_phy_pipe_clk_src;
+
+static struct clk_rcg2 gcc_usb4_1_phy_pll_pipe_clk_src;
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL1_OUT_MAIN, 4 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll1.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_SLEEP_CLK, 5 },
+};
+
+static const struct clk_parent_data gcc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL1_OUT_MAIN, 4 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll1.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_SLEEP_CLK, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .index = DT_SLEEP_CLK },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data gcc_parent_data_5[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_6[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_7[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL14_OUT_MAIN, 1 },
+ { P_GCC_GPLL14_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_7[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll14.clkr.hw },
+ { .hw = &gcc_gpll14_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_8[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+};
+
+static const struct clk_parent_data gcc_parent_data_8[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll4.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_9[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL8_OUT_MAIN, 2 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_9[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll8.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_10[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL7_OUT_MAIN, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_10[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll7.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_11[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL7_OUT_MAIN, 2 },
+ { P_GCC_GPLL8_OUT_MAIN, 3 },
+ { P_SLEEP_CLK, 5 },
+};
+
+static const struct clk_parent_data gcc_parent_data_11[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll7.clkr.hw },
+ { .hw = &gcc_gpll8.clkr.hw },
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map gcc_parent_map_17[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL9_OUT_MAIN, 2 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_17[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll9.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_18[] = {
+ { P_UFS_PHY_RX_SYMBOL_0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_18[] = {
+ { .index = DT_UFS_PHY_RX_SYMBOL_0_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_19[] = {
+ { P_UFS_PHY_RX_SYMBOL_1_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_19[] = {
+ { .index = DT_UFS_PHY_RX_SYMBOL_1_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_20[] = {
+ { P_UFS_PHY_TX_SYMBOL_0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_20[] = {
+ { .index = DT_UFS_PHY_TX_SYMBOL_0_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_21[] = {
+ { P_GCC_USB3_PRIM_PHY_PIPE_CLK_SRC, 0 },
+ { P_USB4_0_PHY_GCC_USB4RTR_MAX_PIPE_CLK, 1 },
+ { P_GCC_USB4_0_PHY_PIPEGMUX_CLK_SRC, 3 },
+};
+
+static const struct clk_parent_data gcc_parent_data_21[] = {
+ { .hw = &gcc_usb3_prim_phy_pipe_clk_src.clkr.hw },
+ { .index = DT_USB4_0_PHY_GCC_USB4RTR_MAX_PIPE_CLK },
+ { .index = DT_GCC_USB4_0_PHY_PIPEGMUX_CLK_SRC },
+};
+
+static const struct parent_map gcc_parent_map_22[] = {
+ { P_GCC_USB3_SEC_PHY_PIPE_CLK_SRC, 0 },
+ { P_USB4_1_PHY_GCC_USB4RTR_MAX_PIPE_CLK, 1 },
+ { P_GCC_USB4_1_PHY_PLL_PIPE_CLK_SRC, 2 },
+ { P_GCC_USB4_1_PHY_PIPEGMUX_CLK_SRC, 3 },
+};
+
+static const struct clk_parent_data gcc_parent_data_22[] = {
+ { .hw = &gcc_usb3_sec_phy_pipe_clk_src.clkr.hw },
+ { .index = DT_USB4_1_PHY_GCC_USB4RTR_MAX_PIPE_CLK },
+ { .hw = &gcc_usb4_1_phy_pll_pipe_clk_src.clkr.hw },
+ { .index = DT_GCC_USB4_1_PHY_PIPEGMUX_CLK_SRC },
+};
+
+static const struct parent_map gcc_parent_map_23[] = {
+ { P_GCC_USB3_TERT_PHY_PIPE_CLK_SRC, 0 },
+ { P_USB4_2_PHY_GCC_USB4RTR_MAX_PIPE_CLK, 1 },
+ { P_GCC_USB4_2_PHY_PIPEGMUX_CLK_SRC, 3 },
+};
+
+static const struct clk_parent_data gcc_parent_data_23[] = {
+ { .hw = &gcc_usb3_tert_phy_pipe_clk_src.clkr.hw },
+ { .index = DT_USB4_2_PHY_GCC_USB4RTR_MAX_PIPE_CLK },
+ { .index = DT_GCC_USB4_2_PHY_PIPEGMUX_CLK_SRC },
+};
+
+static const struct parent_map gcc_parent_map_24[] = {
+ { P_USB3_UNI_PHY_MP_GCC_USB30_PIPE_0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_24[] = {
+ { .index = DT_USB3_UNI_PHY_MP_GCC_USB30_PIPE_0_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_25[] = {
+ { P_USB3_UNI_PHY_MP_GCC_USB30_PIPE_1_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_25[] = {
+ { .index = DT_USB3_UNI_PHY_MP_GCC_USB30_PIPE_1_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_26[] = {
+ { P_USB3_PHY_0_WRAPPER_GCC_USB30_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_26[] = {
+ { .index = DT_USB3_PHY_0_WRAPPER_GCC_USB30_PIPE_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_27[] = {
+ { P_USB3_PHY_1_WRAPPER_GCC_USB30_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_27[] = {
+ { .index = DT_USB3_PHY_1_WRAPPER_GCC_USB30_PIPE_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_28[] = {
+ { P_USB3_PHY_2_WRAPPER_GCC_USB30_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_28[] = {
+ { .index = DT_USB3_PHY_2_WRAPPER_GCC_USB30_PIPE_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_29[] = {
+ { P_GCC_USB4_0_PHY_DP0_GMUX_CLK_SRC, 0 },
+ { P_USB4_0_PHY_GCC_USB4RTR_MAX_PIPE_CLK, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_29[] = {
+ { .index = DT_GCC_USB4_0_PHY_DP0_GMUX_CLK_SRC },
+ { .index = DT_USB4_0_PHY_GCC_USB4RTR_MAX_PIPE_CLK },
+};
+
+static const struct parent_map gcc_parent_map_30[] = {
+ { P_GCC_USB4_0_PHY_DP1_GMUX_CLK_SRC, 0 },
+ { P_USB4_0_PHY_GCC_USB4RTR_MAX_PIPE_CLK, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_30[] = {
+ { .index = DT_GCC_USB4_0_PHY_DP1_GMUX_CLK_SRC },
+ { .index = DT_USB4_0_PHY_GCC_USB4RTR_MAX_PIPE_CLK },
+};
+
+static const struct parent_map gcc_parent_map_31[] = {
+ { P_USB4_0_PHY_GCC_USB4_PCIE_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_31[] = {
+ { .index = DT_USB4_0_PHY_GCC_USB4_PCIE_PIPE_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_32[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL7_OUT_MAIN, 2 },
+ { P_SLEEP_CLK, 5 },
+};
+
+static const struct clk_parent_data gcc_parent_data_32[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll7.clkr.hw },
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map gcc_parent_map_33[] = {
+ { P_GCC_USB4_0_PHY_PCIE_PIPEGMUX_CLK_SRC, 0 },
+ { P_USB4_0_PHY_GCC_USB4_PCIE_PIPE_CLK, 1 },
+};
+
+static const struct clk_parent_data gcc_parent_data_33[] = {
+ { .index = DT_GCC_USB4_0_PHY_PCIE_PIPEGMUX_CLK_SRC },
+ { .index = DT_USB4_0_PHY_GCC_USB4_PCIE_PIPE_CLK },
+};
+
+static const struct parent_map gcc_parent_map_34[] = {
+ { P_QUSB4PHY_0_GCC_USB4_RX0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_34[] = {
+ { .index = DT_QUSB4PHY_0_GCC_USB4_RX0_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_35[] = {
+ { P_QUSB4PHY_0_GCC_USB4_RX1_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_35[] = {
+ { .index = DT_QUSB4PHY_0_GCC_USB4_RX1_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_36[] = {
+ { P_GCC_USB4_0_PHY_SYS_PIPEGMUX_CLK_SRC, 0 },
+ { P_USB4_0_PHY_GCC_USB4_PCIE_PIPE_CLK, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_36[] = {
+ { .index = DT_GCC_USB4_0_PHY_SYS_PIPEGMUX_CLK_SRC },
+ { .index = DT_USB4_0_PHY_GCC_USB4_PCIE_PIPE_CLK },
+};
+
+static const struct parent_map gcc_parent_map_37[] = {
+ { P_GCC_USB4_1_PHY_DP0_GMUX_2_CLK_SRC, 0 },
+ { P_USB4_1_PHY_GCC_USB4RTR_MAX_PIPE_CLK, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_37[] = {
+ { .index = DT_GCC_USB4_1_PHY_DP0_GMUX_2_CLK_SRC },
+ { .index = DT_USB4_1_PHY_GCC_USB4RTR_MAX_PIPE_CLK },
+};
+
+static const struct parent_map gcc_parent_map_38[] = {
+ { P_GCC_USB4_1_PHY_DP1_GMUX_2_CLK_SRC, 0 },
+ { P_USB4_1_PHY_GCC_USB4RTR_MAX_PIPE_CLK, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_38[] = {
+ { .index = DT_GCC_USB4_1_PHY_DP1_GMUX_2_CLK_SRC },
+ { .index = DT_USB4_1_PHY_GCC_USB4RTR_MAX_PIPE_CLK },
+};
+
+static const struct parent_map gcc_parent_map_39[] = {
+ { P_USB4_1_PHY_GCC_USB4_PCIE_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_39[] = {
+ { .index = DT_USB4_1_PHY_GCC_USB4_PCIE_PIPE_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_40[] = {
+ { P_GCC_USB4_1_PHY_PCIE_PIPEGMUX_CLK_SRC, 0 },
+ { P_USB4_1_PHY_GCC_USB4_PCIE_PIPE_CLK, 1 },
+};
+
+static const struct clk_parent_data gcc_parent_data_40[] = {
+ { .index = DT_GCC_USB4_1_PHY_PCIE_PIPEGMUX_CLK_SRC },
+ { .index = DT_USB4_1_PHY_GCC_USB4_PCIE_PIPE_CLK },
+};
+
+static const struct parent_map gcc_parent_map_41[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL5_OUT_MAIN, 3 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_41[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll5.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_42[] = {
+ { P_QUSB4PHY_1_GCC_USB4_RX0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_42[] = {
+ { .index = DT_QUSB4PHY_1_GCC_USB4_RX0_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_43[] = {
+ { P_QUSB4PHY_1_GCC_USB4_RX1_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_43[] = {
+ { .index = DT_QUSB4PHY_1_GCC_USB4_RX1_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_44[] = {
+ { P_GCC_USB4_1_PHY_SYS_PIPEGMUX_CLK_SRC, 0 },
+ { P_USB4_1_PHY_GCC_USB4_PCIE_PIPE_CLK, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_44[] = {
+ { .index = DT_GCC_USB4_1_PHY_SYS_PIPEGMUX_CLK_SRC },
+ { .index = DT_USB4_1_PHY_GCC_USB4_PCIE_PIPE_CLK },
+};
+
+static const struct parent_map gcc_parent_map_45[] = {
+ { P_GCC_USB4_2_PHY_DP0_GMUX_CLK_SRC, 0 },
+ { P_USB4_2_PHY_GCC_USB4RTR_MAX_PIPE_CLK, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_45[] = {
+ { .index = DT_GCC_USB4_2_PHY_DP0_GMUX_CLK_SRC },
+ { .index = DT_USB4_2_PHY_GCC_USB4RTR_MAX_PIPE_CLK },
+};
+
+static const struct parent_map gcc_parent_map_46[] = {
+ { P_GCC_USB4_2_PHY_DP1_GMUX_CLK_SRC, 0 },
+ { P_USB4_2_PHY_GCC_USB4RTR_MAX_PIPE_CLK, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_46[] = {
+ { .index = DT_GCC_USB4_2_PHY_DP1_GMUX_CLK_SRC },
+ { .index = DT_USB4_2_PHY_GCC_USB4RTR_MAX_PIPE_CLK },
+};
+
+static const struct parent_map gcc_parent_map_47[] = {
+ { P_USB4_2_PHY_GCC_USB4_PCIE_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_47[] = {
+ { .index = DT_USB4_2_PHY_GCC_USB4_PCIE_PIPE_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_48[] = {
+ { P_GCC_USB4_2_PHY_PCIE_PIPEGMUX_CLK_SRC, 0 },
+ { P_USB4_2_PHY_GCC_USB4_PCIE_PIPE_CLK, 1 },
+};
+
+static const struct clk_parent_data gcc_parent_data_48[] = {
+ { .index = DT_GCC_USB4_2_PHY_PCIE_PIPEGMUX_CLK_SRC },
+ { .index = DT_USB4_2_PHY_GCC_USB4_PCIE_PIPE_CLK },
+};
+
+static const struct parent_map gcc_parent_map_49[] = {
+ { P_QUSB4PHY_2_GCC_USB4_RX0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_49[] = {
+ { .index = DT_QUSB4PHY_2_GCC_USB4_RX0_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_50[] = {
+ { P_QUSB4PHY_2_GCC_USB4_RX1_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_50[] = {
+ { .index = DT_QUSB4PHY_2_GCC_USB4_RX1_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_51[] = {
+ { P_GCC_USB4_2_PHY_SYS_PIPEGMUX_CLK_SRC, 0 },
+ { P_USB4_2_PHY_GCC_USB4_PCIE_PIPE_CLK, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_51[] = {
+ { .index = DT_GCC_USB4_2_PHY_SYS_PIPEGMUX_CLK_SRC },
+ { .index = DT_USB4_2_PHY_GCC_USB4_PCIE_PIPE_CLK },
+};
+
+static struct clk_regmap_phy_mux gcc_pcie_3a_pipe_clk_src = {
+ .reg = 0xdc088,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3a_pipe_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_PCIE_3A_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_pcie_3b_pipe_clk_src = {
+ .reg = 0x941b4,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3b_pipe_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_PCIE_3B_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_pcie_4_pipe_clk_src = {
+ .reg = 0x881a4,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_pipe_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_PCIE_4_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_pcie_5_pipe_clk_src = {
+ .reg = 0xc309c,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_5_pipe_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_PCIE_5_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_pcie_6_pipe_clk_src = {
+ .reg = 0x8a1a4,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6_pipe_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_PCIE_6_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_rx_symbol_0_clk_src = {
+ .reg = 0x7706c,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_18,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_0_clk_src",
+ .parent_data = gcc_parent_data_18,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_18),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_rx_symbol_1_clk_src = {
+ .reg = 0x770f0,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_19,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_1_clk_src",
+ .parent_data = gcc_parent_data_19,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_19),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_tx_symbol_0_clk_src = {
+ .reg = 0x7705c,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_20,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_tx_symbol_0_clk_src",
+ .parent_data = gcc_parent_data_20,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_20),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb34_prim_phy_pipe_clk_src = {
+ .reg = 0x2b0b8,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_21,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb34_prim_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_21,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_21),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb34_sec_phy_pipe_clk_src = {
+ .reg = 0x2d0c4,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_22,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb34_sec_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_22,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_22),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb34_tert_phy_pipe_clk_src = {
+ .reg = 0xe00bc,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_23,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb34_tert_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_23,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_23),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb3_mp_phy_pipe_0_clk_src = {
+ .reg = 0x9a07c,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_24,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_mp_phy_pipe_0_clk_src",
+ .parent_data = gcc_parent_data_24,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_24),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb3_mp_phy_pipe_1_clk_src = {
+ .reg = 0x9a084,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_25,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_mp_phy_pipe_1_clk_src",
+ .parent_data = gcc_parent_data_25,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_25),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb3_prim_phy_pipe_clk_src = {
+ .reg = 0x3f08c,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_26,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_26,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_26),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb3_sec_phy_pipe_clk_src = {
+ .reg = 0xe207c,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_27,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_sec_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_27,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_27),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb3_tert_phy_pipe_clk_src = {
+ .reg = 0xe107c,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_28,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_tert_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_28,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_28),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_0_phy_dp0_clk_src = {
+ .reg = 0x2b080,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_29,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_dp0_clk_src",
+ .parent_data = gcc_parent_data_29,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_29),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_0_phy_dp1_clk_src = {
+ .reg = 0x2b134,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_30,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_dp1_clk_src",
+ .parent_data = gcc_parent_data_30,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_30),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_0_phy_p2rr2p_pipe_clk_src = {
+ .reg = 0x2b0f0,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_31,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_p2rr2p_pipe_clk_src",
+ .parent_data = gcc_parent_data_31,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_31),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_0_phy_pcie_pipe_mux_clk_src = {
+ .reg = 0x2b120,
+ .shift = 0,
+ .width = 1,
+ .parent_map = gcc_parent_map_33,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_pcie_pipe_mux_clk_src",
+ .parent_data = gcc_parent_data_33,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_33),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_0_phy_rx0_clk_src = {
+ .reg = 0x2b0c0,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_34,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_rx0_clk_src",
+ .parent_data = gcc_parent_data_34,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_34),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_0_phy_rx1_clk_src = {
+ .reg = 0x2b0d4,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_35,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_rx1_clk_src",
+ .parent_data = gcc_parent_data_35,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_35),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_0_phy_sys_clk_src = {
+ .reg = 0x2b100,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_36,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_sys_clk_src",
+ .parent_data = gcc_parent_data_36,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_36),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_1_phy_dp0_clk_src = {
+ .reg = 0x2d08c,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_37,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_dp0_clk_src",
+ .parent_data = gcc_parent_data_37,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_37),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_1_phy_dp1_clk_src = {
+ .reg = 0x2d154,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_38,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_dp1_clk_src",
+ .parent_data = gcc_parent_data_38,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_38),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_1_phy_p2rr2p_pipe_clk_src = {
+ .reg = 0x2d114,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_39,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_p2rr2p_pipe_clk_src",
+ .parent_data = gcc_parent_data_39,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_39),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_1_phy_pcie_pipe_mux_clk_src = {
+ .reg = 0x2d140,
+ .shift = 0,
+ .width = 1,
+ .parent_map = gcc_parent_map_40,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_pcie_pipe_mux_clk_src",
+ .parent_data = gcc_parent_data_40,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_40),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_1_phy_rx0_clk_src = {
+ .reg = 0x2d0e4,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_42,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_rx0_clk_src",
+ .parent_data = gcc_parent_data_42,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_42),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_1_phy_rx1_clk_src = {
+ .reg = 0x2d0f8,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_43,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_rx1_clk_src",
+ .parent_data = gcc_parent_data_43,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_43),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_1_phy_sys_clk_src = {
+ .reg = 0x2d124,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_44,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_sys_clk_src",
+ .parent_data = gcc_parent_data_44,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_44),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_2_phy_dp0_clk_src = {
+ .reg = 0xe0084,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_45,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_dp0_clk_src",
+ .parent_data = gcc_parent_data_45,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_45),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_2_phy_dp1_clk_src = {
+ .reg = 0xe013c,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_46,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_dp1_clk_src",
+ .parent_data = gcc_parent_data_46,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_46),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_2_phy_p2rr2p_pipe_clk_src = {
+ .reg = 0xe00f4,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_47,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_p2rr2p_pipe_clk_src",
+ .parent_data = gcc_parent_data_47,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_47),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_2_phy_pcie_pipe_mux_clk_src = {
+ .reg = 0xe0124,
+ .shift = 0,
+ .width = 1,
+ .parent_map = gcc_parent_map_48,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_pcie_pipe_mux_clk_src",
+ .parent_data = gcc_parent_data_48,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_48),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_2_phy_rx0_clk_src = {
+ .reg = 0xe00c4,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_49,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_rx0_clk_src",
+ .parent_data = gcc_parent_data_49,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_49),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_2_phy_rx1_clk_src = {
+ .reg = 0xe00d8,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_50,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_rx1_clk_src",
+ .parent_data = gcc_parent_data_50,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_50),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_2_phy_sys_clk_src = {
+ .reg = 0xe0104,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_51,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_sys_clk_src",
+ .parent_data = gcc_parent_data_51,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_51),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
+ F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_gp1_clk_src = {
+ .cmd_rcgr = 0x64004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp1_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp2_clk_src = {
+ .cmd_rcgr = 0x92004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp2_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp3_clk_src = {
+ .cmd_rcgr = 0x93004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp3_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_aux_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_0_aux_clk_src = {
+ .cmd_rcgr = 0xc8168,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_phy_rchng_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_0_phy_rchng_clk_src = {
+ .cmd_rcgr = 0xc803c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_1_aux_clk_src = {
+ .cmd_rcgr = 0x2e168,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_1_phy_rchng_clk_src = {
+ .cmd_rcgr = 0x2e03c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_2_aux_clk_src = {
+ .cmd_rcgr = 0xc0168,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_2_phy_rchng_clk_src = {
+ .cmd_rcgr = 0xc003c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_3a_aux_clk_src = {
+ .cmd_rcgr = 0xdc08c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3a_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_3a_phy_rchng_clk_src = {
+ .cmd_rcgr = 0xdc070,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3a_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_3b_aux_clk_src = {
+ .cmd_rcgr = 0x941b8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3b_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_3b_phy_rchng_clk_src = {
+ .cmd_rcgr = 0x94088,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3b_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_4_aux_clk_src = {
+ .cmd_rcgr = 0x881a8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_4_phy_rchng_clk_src = {
+ .cmd_rcgr = 0x88078,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_5_aux_clk_src = {
+ .cmd_rcgr = 0xc30a0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_5_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_5_phy_rchng_clk_src = {
+ .cmd_rcgr = 0xc3084,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_5_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_6_aux_clk_src = {
+ .cmd_rcgr = 0x8a1a8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_6_phy_rchng_clk_src = {
+ .cmd_rcgr = 0x8a078,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_phy_3a_aux_clk_src = {
+ .cmd_rcgr = 0x6c01c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_phy_3a_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_phy_3b_aux_clk_src = {
+ .cmd_rcgr = 0x7501c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_phy_3b_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_phy_4_aux_clk_src = {
+ .cmd_rcgr = 0xd3018,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_phy_4_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_phy_5_aux_clk_src = {
+ .cmd_rcgr = 0xd2018,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_phy_5_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_phy_6_aux_clk_src = {
+ .cmd_rcgr = 0xd4018,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_phy_6_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = {
+ F(60000000, P_GCC_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pdm2_clk_src = {
+ .cmd_rcgr = 0x33010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pdm2_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_oob_qspi_s0_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(120000000, P_GCC_GPLL0_OUT_MAIN, 5, 0, 0),
+ F(150000000, P_GCC_GPLL0_OUT_EVEN, 2, 0, 0),
+ F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(403000000, P_GCC_GPLL4_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_oob_qspi_s0_clk_src_init = {
+ .name = "gcc_qupv3_oob_qspi_s0_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_oob_qspi_s0_clk_src = {
+ .cmd_rcgr = 0xe7044,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_qupv3_oob_qspi_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_oob_qspi_s0_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_oob_qspi_s1_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(120000000, P_GCC_GPLL0_OUT_MAIN, 5, 0, 0),
+ F(150000000, P_GCC_GPLL0_OUT_EVEN, 2, 0, 0),
+ F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_oob_qspi_s1_clk_src_init = {
+ .name = "gcc_qupv3_oob_qspi_s1_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_oob_qspi_s1_clk_src = {
+ .cmd_rcgr = 0xe7170,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_oob_qspi_s1_clk_src,
+ .clkr.hw.init = &gcc_qupv3_oob_qspi_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_qspi_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_qspi_s2_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_qspi_s2_clk_src = {
+ .cmd_rcgr = 0x287a0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_oob_qspi_s1_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_qspi_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_qspi_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_qspi_s3_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_qspi_s3_clk_src = {
+ .cmd_rcgr = 0x288d0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_oob_qspi_s1_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_qspi_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_qspi_s6_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_qspi_s6_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_qspi_s6_clk_src = {
+ .cmd_rcgr = 0x2866c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_oob_qspi_s1_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_qspi_s6_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(120000000, P_GCC_GPLL0_OUT_MAIN, 5, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s0_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
+ .cmd_rcgr = 0x28014,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s1_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
+ .cmd_rcgr = 0x28150,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s4_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s4_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
+ .cmd_rcgr = 0x282b4,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s4_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s5_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
+ .cmd_rcgr = 0x283f0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s4_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s7_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s7_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s7_clk_src = {
+ .cmd_rcgr = 0x28540,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s4_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s7_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_qspi_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_qspi_s2_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_qspi_s2_clk_src = {
+ .cmd_rcgr = 0xb37a0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_oob_qspi_s1_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_qspi_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_qspi_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_qspi_s3_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_qspi_s3_clk_src = {
+ .cmd_rcgr = 0xb38d0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_oob_qspi_s1_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_qspi_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_qspi_s6_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_qspi_s6_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_qspi_s6_clk_src = {
+ .cmd_rcgr = 0xb366c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_oob_qspi_s1_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_qspi_s6_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s0_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
+ .cmd_rcgr = 0xb3014,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s1_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
+ .cmd_rcgr = 0xb3150,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s4_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
+ .cmd_rcgr = 0xb32b4,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s4_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s5_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
+ .cmd_rcgr = 0xb33f0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s4_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s7_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s7_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s7_clk_src = {
+ .cmd_rcgr = 0xb3540,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s4_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s7_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_qspi_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_qspi_s2_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_qspi_s2_clk_src = {
+ .cmd_rcgr = 0xb47a0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_oob_qspi_s1_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_qspi_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_qspi_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_qspi_s3_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_qspi_s3_clk_src = {
+ .cmd_rcgr = 0xb48d0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_oob_qspi_s1_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_qspi_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_qspi_s6_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_qspi_s6_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_qspi_s6_clk_src = {
+ .cmd_rcgr = 0xb466c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_oob_qspi_s1_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_qspi_s6_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s0_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s0_clk_src = {
+ .cmd_rcgr = 0xb4014,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s1_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s1_clk_src = {
+ .cmd_rcgr = 0xb4150,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s4_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s4_clk_src = {
+ .cmd_rcgr = 0xb42b4,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s4_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s5_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s5_clk_src = {
+ .cmd_rcgr = 0xb43f0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s4_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s7_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s7_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s7_clk_src = {
+ .cmd_rcgr = 0xb4540,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s4_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s7_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(202000000, P_GCC_GPLL9_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+ .cmd_rcgr = 0xb001c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_17,
+ .freq_tbl = ftbl_gcc_sdcc2_apps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc2_apps_clk_src",
+ .parent_data = gcc_parent_data_17,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_17),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc4_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(75000000, P_GCC_GPLL0_OUT_MAIN, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc4_apps_clk_src = {
+ .cmd_rcgr = 0xdf01c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_sdcc4_apps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc4_apps_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_axi_clk_src[] = {
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(201500000, P_GCC_GPLL4_OUT_MAIN, 4, 0, 0),
+ F(403000000, P_GCC_GPLL4_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = {
+ .cmd_rcgr = 0x77038,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_ufs_phy_axi_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_axi_clk_src",
+ .parent_data = gcc_parent_data_6,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_ice_core_clk_src[] = {
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(201500000, P_GCC_GPLL4_OUT_MAIN, 4, 0, 0),
+ F(403000000, P_GCC_GPLL4_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_ice_core_clk_src = {
+ .cmd_rcgr = 0x77090,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_ufs_phy_ice_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ice_core_clk_src",
+ .parent_data = gcc_parent_data_6,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = {
+ .cmd_rcgr = 0x770c4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = {
+ .cmd_rcgr = 0x770a8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_ufs_phy_ice_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_unipro_core_clk_src",
+ .parent_data = gcc_parent_data_6,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb20_master_clk_src[] = {
+ F(60000000, P_GCC_GPLL14_OUT_MAIN, 10, 0, 0),
+ F(120000000, P_GCC_GPLL14_OUT_MAIN, 5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb20_master_clk_src = {
+ .cmd_rcgr = 0xbc030,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_7,
+ .freq_tbl = ftbl_gcc_usb20_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb20_master_clk_src",
+ .parent_data = gcc_parent_data_7,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_7),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb20_mock_utmi_clk_src = {
+ .cmd_rcgr = 0xbc048,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_7,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb20_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_7,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_7),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_mp_master_clk_src[] = {
+ F(66666667, P_GCC_GPLL0_OUT_EVEN, 4.5, 0, 0),
+ F(133333333, P_GCC_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GCC_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_mp_master_clk_src = {
+ .cmd_rcgr = 0x9a03c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_mp_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_mp_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_mp_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x9a054,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_mp_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
+ .cmd_rcgr = 0x3f04c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_mp_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x3f064,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_sec_master_clk_src = {
+ .cmd_rcgr = 0xe203c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_mp_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_sec_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_sec_mock_utmi_clk_src = {
+ .cmd_rcgr = 0xe2054,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_sec_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_tert_master_clk_src = {
+ .cmd_rcgr = 0xe103c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_mp_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_tert_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_tert_mock_utmi_clk_src = {
+ .cmd_rcgr = 0xe1054,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_tert_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_mp_phy_aux_clk_src = {
+ .cmd_rcgr = 0x9a088,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_mp_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_8),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = {
+ .cmd_rcgr = 0x3f090,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_8),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_sec_phy_aux_clk_src = {
+ .cmd_rcgr = 0xe2080,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_sec_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_8),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_tert_phy_aux_clk_src = {
+ .cmd_rcgr = 0xe1080,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_tert_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_8),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb4_0_master_clk_src[] = {
+ F(85714286, P_GCC_GPLL0_OUT_EVEN, 3.5, 0, 0),
+ F(177666750, P_GCC_GPLL8_OUT_MAIN, 4, 0, 0),
+ F(355333500, P_GCC_GPLL8_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb4_0_master_clk_src = {
+ .cmd_rcgr = 0x2b02c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_9,
+ .freq_tbl = ftbl_gcc_usb4_0_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_master_clk_src",
+ .parent_data = gcc_parent_data_9,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_9),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb4_0_phy_pcie_pipe_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(125000000, P_GCC_GPLL7_OUT_MAIN, 4, 0, 0),
+ F(250000000, P_GCC_GPLL7_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb4_0_phy_pcie_pipe_clk_src = {
+ .cmd_rcgr = 0x2b104,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_32,
+ .freq_tbl = ftbl_gcc_usb4_0_phy_pcie_pipe_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_pcie_pipe_clk_src",
+ .parent_data = gcc_parent_data_32,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_32),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb4_0_sb_if_clk_src = {
+ .cmd_rcgr = 0x2b0a0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_sb_if_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb4_0_tmu_clk_src = {
+ .cmd_rcgr = 0x2b084,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_10,
+ .freq_tbl = ftbl_gcc_usb4_0_phy_pcie_pipe_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_tmu_clk_src",
+ .parent_data = gcc_parent_data_10,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_10),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb4_1_master_clk_src = {
+ .cmd_rcgr = 0x2d02c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_9,
+ .freq_tbl = ftbl_gcc_usb4_0_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_master_clk_src",
+ .parent_data = gcc_parent_data_9,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_9),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb4_1_phy_pcie_pipe_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(177666750, P_GCC_GPLL8_OUT_MAIN, 4, 0, 0),
+ F(355333500, P_GCC_GPLL8_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb4_1_phy_pcie_pipe_clk_src = {
+ .cmd_rcgr = 0x2d128,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_11,
+ .freq_tbl = ftbl_gcc_usb4_1_phy_pcie_pipe_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_pcie_pipe_clk_src",
+ .parent_data = gcc_parent_data_11,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_11),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb4_1_phy_pll_pipe_clk_src[] = {
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(311000000, P_GCC_GPLL5_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb4_1_phy_pll_pipe_clk_src = {
+ .cmd_rcgr = 0x2d0c8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_41,
+ .freq_tbl = ftbl_gcc_usb4_1_phy_pll_pipe_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_pll_pipe_clk_src",
+ .parent_data = gcc_parent_data_41,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_41),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb4_1_sb_if_clk_src = {
+ .cmd_rcgr = 0x2d0ac,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_sb_if_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb4_1_tmu_clk_src = {
+ .cmd_rcgr = 0x2d090,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_10,
+ .freq_tbl = ftbl_gcc_usb4_0_phy_pcie_pipe_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_tmu_clk_src",
+ .parent_data = gcc_parent_data_10,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_10),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb4_2_master_clk_src = {
+ .cmd_rcgr = 0xe002c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_9,
+ .freq_tbl = ftbl_gcc_usb4_0_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_master_clk_src",
+ .parent_data = gcc_parent_data_9,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_9),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb4_2_phy_pcie_pipe_clk_src = {
+ .cmd_rcgr = 0xe0108,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_11,
+ .freq_tbl = ftbl_gcc_usb4_0_phy_pcie_pipe_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_pcie_pipe_clk_src",
+ .parent_data = gcc_parent_data_11,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_11),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb4_2_sb_if_clk_src = {
+ .cmd_rcgr = 0xe00a4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_sb_if_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb4_2_tmu_clk_src = {
+ .cmd_rcgr = 0xe0088,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_10,
+ .freq_tbl = ftbl_gcc_usb4_0_phy_pcie_pipe_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_tmu_clk_src",
+ .parent_data = gcc_parent_data_10,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_10),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_pcie_3b_pipe_div_clk_src = {
+ .reg = 0x94070,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3b_pipe_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_3b_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_pcie_4_pipe_div_clk_src = {
+ .reg = 0x88060,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_pipe_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_4_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_pcie_5_pipe_div_clk_src = {
+ .reg = 0xc306c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_5_pipe_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_5_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_pcie_6_pipe_div_clk_src = {
+ .reg = 0x8a060,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6_pipe_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_6_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_oob_s0_clk_src = {
+ .reg = 0xe7024,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_oob_s0_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_oob_qspi_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_oob_s1_clk_src = {
+ .reg = 0xe7038,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_oob_s1_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_oob_qspi_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_wrap0_s2_clk_src = {
+ .reg = 0x2828c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s2_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_qspi_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_wrap0_s3_clk_src = {
+ .reg = 0x282a0,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s3_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_qspi_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_wrap0_s6_clk_src = {
+ .reg = 0x2852c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s6_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_qspi_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_wrap1_s2_clk_src = {
+ .reg = 0xb328c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s2_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_qspi_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_wrap1_s3_clk_src = {
+ .reg = 0xb32a0,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s3_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_qspi_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_wrap1_s6_clk_src = {
+ .reg = 0xb352c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s6_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_qspi_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_wrap2_s2_clk_src = {
+ .reg = 0xb428c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s2_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_qspi_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_wrap2_s3_clk_src = {
+ .reg = 0xb42a0,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s3_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_qspi_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_wrap2_s6_clk_src = {
+ .reg = 0xb452c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s6_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_qspi_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb20_mock_utmi_postdiv_clk_src = {
+ .reg = 0xbc174,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb20_mock_utmi_postdiv_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb20_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb30_mp_mock_utmi_postdiv_clk_src = {
+ .reg = 0x9a06c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_mp_mock_utmi_postdiv_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_mp_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb30_prim_mock_utmi_postdiv_clk_src = {
+ .reg = 0x3f07c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_postdiv_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb30_sec_mock_utmi_postdiv_clk_src = {
+ .reg = 0xe206c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_sec_mock_utmi_postdiv_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_sec_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb30_tert_mock_utmi_postdiv_clk_src = {
+ .reg = 0xe106c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_tert_mock_utmi_postdiv_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_tert_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_pcie_3a_west_sf_axi_clk = {
+ .halt_reg = 0xdc0bc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_noc_pcie_3a_west_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_pcie_3b_west_sf_axi_clk = {
+ .halt_reg = 0x941ec,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(28),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_noc_pcie_3b_west_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_pcie_4_west_sf_axi_clk = {
+ .halt_reg = 0x881d0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(29),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_noc_pcie_4_west_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_pcie_5_east_sf_axi_clk = {
+ .halt_reg = 0xc30d0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(30),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_noc_pcie_5_east_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_pcie_6_west_sf_axi_clk = {
+ .halt_reg = 0x8a1d0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(31),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_noc_pcie_6_west_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_phy_axi_clk = {
+ .halt_reg = 0x77000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77000,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_ufs_phy_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb2_prim_axi_clk = {
+ .halt_reg = 0xbc17c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xbc17c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xbc17c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb2_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb20_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_mp_axi_clk = {
+ .halt_reg = 0x9a004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9a004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x9a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb3_mp_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_mp_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_prim_axi_clk = {
+ .halt_reg = 0x3f00c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x3f00c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3f00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb3_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_sec_axi_clk = {
+ .halt_reg = 0xe2004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xe2004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xe2004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb3_sec_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_sec_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_tert_axi_clk = {
+ .halt_reg = 0xe1004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xe1004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xe1004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb3_tert_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_tert_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb4_0_axi_clk = {
+ .halt_reg = 0x2b000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2b000,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2b000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb4_0_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb4_1_axi_clk = {
+ .halt_reg = 0x2d000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2d000,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2d000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb4_1_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb4_2_axi_clk = {
+ .halt_reg = 0xe0000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xe0000,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xe0000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb4_2_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_av1e_ahb_clk = {
+ .halt_reg = 0x9b02c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9b02c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x9b02c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_av1e_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_av1e_axi_clk = {
+ .halt_reg = 0x9b030,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x9b030,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x9b030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_av1e_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_av1e_xo_clk = {
+ .halt_reg = 0x9b044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9b044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_av1e_xo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x34038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x34038,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_hf_axi_clk = {
+ .halt_reg = 0x26014,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x26014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_camera_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_sf_axi_clk = {
+ .halt_reg = 0x26028,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x26028,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_camera_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_pcie_anoc_ahb_clk = {
+ .halt_reg = 0x82004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x82004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(19),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_pcie_anoc_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_pcie_anoc_south_ahb_clk = {
+ .halt_reg = 0xba2ec,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba2ec,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_pcie_anoc_south_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb2_prim_axi_clk = {
+ .halt_reg = 0xbc178,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xbc178,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xbc178,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_usb2_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb20_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_mp_axi_clk = {
+ .halt_reg = 0x9a000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9a000,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x9a000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_usb3_mp_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_mp_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_prim_axi_clk = {
+ .halt_reg = 0x3f000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x3f000,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3f000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_usb3_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_sec_axi_clk = {
+ .halt_reg = 0xe2000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xe2000,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xe2000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_usb3_sec_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_sec_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_tert_axi_clk = {
+ .halt_reg = 0xe1000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xe1000,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xe1000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_usb3_tert_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_tert_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb_anoc_ahb_clk = {
+ .halt_reg = 0x3f004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x3f004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(17),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_usb_anoc_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb_anoc_south_ahb_clk = {
+ .halt_reg = 0x3f008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x3f008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(18),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_usb_anoc_south_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_hf_axi_clk = {
+ .halt_reg = 0x27008,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x27008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_disp_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ .flags = CLK_IS_CRITICAL,
+ },
+ },
+};
+
+static struct clk_branch gcc_eva_ahb_clk = {
+ .halt_reg = 0x9b004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9b004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x9b004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_eva_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_eva_axi0_clk = {
+ .halt_reg = 0x9b008,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x9b008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x9b008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_eva_axi0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_eva_axi0c_clk = {
+ .halt_reg = 0x9b01c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9b01c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x9b01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_eva_axi0c_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_eva_xo_clk = {
+ .halt_reg = 0x9b024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9b024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_eva_xo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x64000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x64000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x92000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x92000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x93000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x93000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gemnoc_gfx_clk = {
+ .halt_reg = 0x71010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x71010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x71010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_gemnoc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_clk_src = {
+ .halt_reg = 0x71024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x71024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_gpll0_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_div_clk_src = {
+ .halt_reg = 0x7102c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7102c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62038,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_gpll0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0_out_even.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_aux_clk = {
+ .halt_reg = 0xc8018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(25),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
+ .halt_reg = 0xba4a8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba4a8,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(24),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
+ .halt_reg = 0xba498,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0xba498,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(23),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_phy_rchng_clk = {
+ .halt_reg = 0xc8038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_pipe_clk = {
+ .halt_reg = 0xc8028,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(26),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_phy_pcie_pipe_mux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_axi_clk = {
+ .halt_reg = 0xba488,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba488,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_q2a_axi_clk = {
+ .halt_reg = 0xba484,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(21),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_aux_clk = {
+ .halt_reg = 0x2e018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(18),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_1_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_cfg_ahb_clk = {
+ .halt_reg = 0xba480,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba480,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(17),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_mstr_axi_clk = {
+ .halt_reg = 0xba470,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0xba470,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_phy_rchng_clk = {
+ .halt_reg = 0x2e038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_1_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_pipe_clk = {
+ .halt_reg = 0x2e028,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(19),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_phy_pcie_pipe_mux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_slv_axi_clk = {
+ .halt_reg = 0xba460,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba460,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_slv_q2a_axi_clk = {
+ .halt_reg = 0xba45c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(14),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_aux_clk = {
+ .halt_reg = 0xc0018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_2_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_cfg_ahb_clk = {
+ .halt_reg = 0xba4d0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba4d0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(31),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_mstr_axi_clk = {
+ .halt_reg = 0xba4c0,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0xba4c0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(30),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_phy_rchng_clk = {
+ .halt_reg = 0xc0038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(2),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_2_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_pipe_clk = {
+ .halt_reg = 0xc0028,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_phy_pcie_pipe_mux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_slv_axi_clk = {
+ .halt_reg = 0xba4b0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba4b0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(29),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_slv_q2a_axi_clk = {
+ .halt_reg = 0xba4ac,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(28),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3a_aux_clk = {
+ .halt_reg = 0xdc04c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xdc04c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3a_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_3a_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3a_cfg_ahb_clk = {
+ .halt_reg = 0xba4f0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba4f0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3a_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3a_mstr_axi_clk = {
+ .halt_reg = 0xdc038,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0xdc038,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(14),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3a_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3a_phy_rchng_clk = {
+ .halt_reg = 0xdc06c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xdc06c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(18),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3a_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_3a_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3a_pipe_clk = {
+ .halt_reg = 0xdc05c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0xdc05c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(17),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3a_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_3a_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3a_slv_axi_clk = {
+ .halt_reg = 0xdc024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xdc024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(13),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3a_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3a_slv_q2a_axi_clk = {
+ .halt_reg = 0xdc01c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xdc01c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(12),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3a_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3b_aux_clk = {
+ .halt_reg = 0x94050,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(25),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3b_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_3b_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3b_cfg_ahb_clk = {
+ .halt_reg = 0xba4f4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba4f4,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(24),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3b_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3b_mstr_axi_clk = {
+ .halt_reg = 0x94038,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x94038,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(23),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3b_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3b_phy_rchng_clk = {
+ .halt_reg = 0x94084,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(28),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3b_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_3b_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3b_pipe_clk = {
+ .halt_reg = 0x94060,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(26),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3b_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_3b_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3b_pipe_div2_clk = {
+ .halt_reg = 0x94074,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3b_pipe_div2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_3b_pipe_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3b_slv_axi_clk = {
+ .halt_reg = 0x94024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x94024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3b_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3b_slv_q2a_axi_clk = {
+ .halt_reg = 0x9401c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(21),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3b_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_4_aux_clk = {
+ .halt_reg = 0x88040,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(17),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_4_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_4_cfg_ahb_clk = {
+ .halt_reg = 0xba4fc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba4fc,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_4_mstr_axi_clk = {
+ .halt_reg = 0x88030,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x88030,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_4_phy_rchng_clk = {
+ .halt_reg = 0x88074,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_4_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_4_pipe_clk = {
+ .halt_reg = 0x88050,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(18),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_4_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_4_pipe_div2_clk = {
+ .halt_reg = 0x88064,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(19),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_pipe_div2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_4_pipe_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_4_slv_axi_clk = {
+ .halt_reg = 0x88020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x88020,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(14),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_4_slv_q2a_axi_clk = {
+ .halt_reg = 0x8801c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(13),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_5_aux_clk = {
+ .halt_reg = 0xc304c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_5_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_5_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_5_cfg_ahb_clk = {
+ .halt_reg = 0xba4f8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba4f8,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_5_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_5_mstr_axi_clk = {
+ .halt_reg = 0xc3038,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0xc3038,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_5_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_5_phy_rchng_clk = {
+ .halt_reg = 0xc3080,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(8),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_5_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_5_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_5_pipe_clk = {
+ .halt_reg = 0xc305c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(6),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_5_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_5_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_5_pipe_div2_clk = {
+ .halt_reg = 0xc3070,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_5_pipe_div2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_5_pipe_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_5_slv_axi_clk = {
+ .halt_reg = 0xc3024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xc3024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(2),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_5_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_5_slv_q2a_axi_clk = {
+ .halt_reg = 0xc301c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_5_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_6_aux_clk = {
+ .halt_reg = 0x8a040,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_6_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_6_cfg_ahb_clk = {
+ .halt_reg = 0xba500,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba500,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(26),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_6_mstr_axi_clk = {
+ .halt_reg = 0x8a030,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x8a030,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(25),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_6_phy_rchng_clk = {
+ .halt_reg = 0x8a074,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(30),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_6_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_6_pipe_clk = {
+ .halt_reg = 0x8a050,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(28),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_6_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_6_pipe_div2_clk = {
+ .halt_reg = 0x8a064,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(29),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6_pipe_div2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_6_pipe_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_6_slv_axi_clk = {
+ .halt_reg = 0x8a020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8a020,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(24),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_6_slv_q2a_axi_clk = {
+ .halt_reg = 0x8a01c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(23),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_noc_pwrctl_clk = {
+ .halt_reg = 0xba2ac,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_noc_pwrctl_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_noc_qosgen_extref_clk = {
+ .halt_reg = 0xba2a8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(6),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_noc_qosgen_extref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_noc_sf_center_clk = {
+ .halt_reg = 0xba2b0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba2b0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(8),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_noc_sf_center_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_noc_slave_sf_east_clk = {
+ .halt_reg = 0xba2b8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba2b8,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(9),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_noc_slave_sf_east_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_noc_slave_sf_west_clk = {
+ .halt_reg = 0xba2c0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba2c0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_noc_slave_sf_west_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_noc_tsctr_clk = {
+ .halt_reg = 0xba2a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba2a4,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_noc_tsctr_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_phy_3a_aux_clk = {
+ .halt_reg = 0x6c038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6c038,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(19),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_phy_3a_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_phy_3a_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_phy_3b_aux_clk = {
+ .halt_reg = 0x75034,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(31),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_phy_3b_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_phy_3b_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_phy_4_aux_clk = {
+ .halt_reg = 0xd3030,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(21),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_phy_4_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_phy_4_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_phy_5_aux_clk = {
+ .halt_reg = 0xd2030,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(11),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_phy_5_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_phy_5_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_phy_6_aux_clk = {
+ .halt_reg = 0xd4030,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(31),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_phy_6_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_phy_6_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_rscc_cfg_ahb_clk = {
+ .halt_reg = 0xb8004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xb8004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62038,
+ .enable_mask = BIT(2),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_rscc_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_rscc_xo_clk = {
+ .halt_reg = 0xb8008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62038,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_rscc_xo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x3300c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pdm2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x33004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x33004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x33004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_xo4_clk = {
+ .halt_reg = 0x33008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x33008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm_xo4_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_av1e_ahb_clk = {
+ .halt_reg = 0x9b048,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9b048,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x9b048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_av1e_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_cmd_ahb_clk = {
+ .halt_reg = 0x26010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x26010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_camera_cmd_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_nrt_ahb_clk = {
+ .halt_reg = 0x26008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x26008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_camera_nrt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_rt_ahb_clk = {
+ .halt_reg = 0x2600c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2600c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2600c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_camera_rt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_gpu_ahb_clk = {
+ .halt_reg = 0x71008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x71008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x71008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_gpu_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_pcie_3a_ahb_clk = {
+ .halt_reg = 0xdc018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xdc018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(11),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_pcie_3a_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_pcie_3b_ahb_clk = {
+ .halt_reg = 0x94018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x94018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_pcie_3b_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_pcie_4_ahb_clk = {
+ .halt_reg = 0x88018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x88018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(12),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_pcie_4_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_pcie_5_ahb_clk = {
+ .halt_reg = 0xc3018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xc3018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_pcie_5_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_pcie_6_ahb_clk = {
+ .halt_reg = 0x8a018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8a018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_pcie_6_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_cv_cpu_ahb_clk = {
+ .halt_reg = 0x32018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x32018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_cv_cpu_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_cvp_ahb_clk = {
+ .halt_reg = 0x32008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x32008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_cvp_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_v_cpu_ahb_clk = {
+ .halt_reg = 0x32014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x32014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_v_cpu_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_vcodec1_ahb_clk = {
+ .halt_reg = 0x32010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x32010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_vcodec1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_vcodec_ahb_clk = {
+ .halt_reg = 0x3200c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x3200c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3200c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_vcodec_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_oob_core_2x_clk = {
+ .halt_reg = 0xc5040,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_oob_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_oob_core_clk = {
+ .halt_reg = 0xc502c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_oob_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_oob_m_ahb_clk = {
+ .halt_reg = 0xe7004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xe7004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xe7004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_oob_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_oob_qspi_s0_clk = {
+ .halt_reg = 0xe7040,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(9),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_oob_qspi_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_oob_qspi_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_oob_qspi_s1_clk = {
+ .halt_reg = 0xe729c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_oob_qspi_s1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_oob_qspi_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_oob_s0_clk = {
+ .halt_reg = 0xe7014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(6),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_oob_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_oob_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_oob_s1_clk = {
+ .halt_reg = 0xe7028,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_oob_s1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_oob_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_oob_s_ahb_clk = {
+ .halt_reg = 0xc5028,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xc5028,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_oob_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_oob_tcxo_clk = {
+ .halt_reg = 0xe703c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(8),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_oob_tcxo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_2x_clk = {
+ .halt_reg = 0xc5448,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(12),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_clk = {
+ .halt_reg = 0xc5434,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(11),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_qspi_s2_clk = {
+ .halt_reg = 0x2879c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_qspi_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_qspi_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_qspi_s3_clk = {
+ .halt_reg = 0x288cc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(23),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_qspi_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_qspi_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_qspi_s6_clk = {
+ .halt_reg = 0x28798,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(21),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_qspi_s6_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_qspi_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s0_clk = {
+ .halt_reg = 0x28004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(13),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s1_clk = {
+ .halt_reg = 0x28140,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(14),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s2_clk = {
+ .halt_reg = 0x2827c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s3_clk = {
+ .halt_reg = 0x28290,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s4_clk = {
+ .halt_reg = 0x282a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(17),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s5_clk = {
+ .halt_reg = 0x283e0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(18),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s5_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s6_clk = {
+ .halt_reg = 0x2851c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(19),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s6_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s7_clk = {
+ .halt_reg = 0x28530,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s7_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s7_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_2x_clk = {
+ .halt_reg = 0xc5198,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(14),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_clk = {
+ .halt_reg = 0xc5184,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(13),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_qspi_s2_clk = {
+ .halt_reg = 0xb379c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(24),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_qspi_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_qspi_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_qspi_s3_clk = {
+ .halt_reg = 0xb38cc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(25),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_qspi_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_qspi_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_qspi_s6_clk = {
+ .halt_reg = 0xb3798,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(23),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_qspi_s6_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_qspi_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s0_clk = {
+ .halt_reg = 0xb3004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s1_clk = {
+ .halt_reg = 0xb3140,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s2_clk = {
+ .halt_reg = 0xb327c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(17),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s3_clk = {
+ .halt_reg = 0xb3290,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(18),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s4_clk = {
+ .halt_reg = 0xb32a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(19),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s5_clk = {
+ .halt_reg = 0xb33e0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s5_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s6_clk = {
+ .halt_reg = 0xb351c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(21),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s6_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s7_clk = {
+ .halt_reg = 0xb3530,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s7_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s7_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_core_2x_clk = {
+ .halt_reg = 0xc52f0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(29),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_core_clk = {
+ .halt_reg = 0xc52dc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(28),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_qspi_s2_clk = {
+ .halt_reg = 0xb479c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_qspi_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_qspi_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_qspi_s3_clk = {
+ .halt_reg = 0xb48cc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(8),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_qspi_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_qspi_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_qspi_s6_clk = {
+ .halt_reg = 0xb4798,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(6),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_qspi_s6_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_qspi_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s0_clk = {
+ .halt_reg = 0xb4004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(30),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s1_clk = {
+ .halt_reg = 0xb4140,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(31),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s2_clk = {
+ .halt_reg = 0xb427c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s3_clk = {
+ .halt_reg = 0xb4290,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s4_clk = {
+ .halt_reg = 0xb42a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(2),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s5_clk = {
+ .halt_reg = 0xb43e0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s5_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s6_clk = {
+ .halt_reg = 0xb451c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s6_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s7_clk = {
+ .halt_reg = 0xb4530,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s7_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s7_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_m_ahb_clk = {
+ .halt_reg = 0xc542c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xc542c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(9),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_0_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_s_ahb_clk = {
+ .halt_reg = 0xc5430,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xc5430,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_0_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_m_ahb_clk = {
+ .halt_reg = 0xc517c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xc517c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(11),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_1_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_s_ahb_clk = {
+ .halt_reg = 0xc5180,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xc5180,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(12),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_1_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_2_m_ahb_clk = {
+ .halt_reg = 0xc52d4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xc52d4,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(26),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_2_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_2_s_ahb_clk = {
+ .halt_reg = 0xc52d8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xc52d8,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_2_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .halt_reg = 0xb0014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb0014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+ .halt_reg = 0xb0004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb0004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc2_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc2_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_ahb_clk = {
+ .halt_reg = 0xdf014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xdf014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc4_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_apps_clk = {
+ .halt_reg = 0xdf004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xdf004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc4_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc4_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ahb_clk = {
+ .halt_reg = 0xba504,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba504,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xba504,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_axi_clk = {
+ .halt_reg = 0x7701c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7701c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7701c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ice_core_clk = {
+ .halt_reg = 0x77080,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77080,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77080,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ice_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_phy_aux_clk = {
+ .halt_reg = 0x770c0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x770c0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x770c0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_0_clk = {
+ .halt_reg = 0x77034,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x77034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_rx_symbol_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_1_clk = {
+ .halt_reg = 0x770dc,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x770dc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_rx_symbol_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_tx_symbol_0_clk = {
+ .halt_reg = 0x77030,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x77030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_tx_symbol_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_tx_symbol_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_unipro_core_clk = {
+ .halt_reg = 0x77070,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77070,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77070,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_unipro_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_unipro_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb20_master_clk = {
+ .halt_reg = 0xbc018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xbc018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb20_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb20_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb20_mock_utmi_clk = {
+ .halt_reg = 0xbc02c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xbc02c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb20_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb20_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb20_sleep_clk = {
+ .halt_reg = 0xbc028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xbc028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb20_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_mp_master_clk = {
+ .halt_reg = 0x9a024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9a024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_mp_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_mp_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_mp_mock_utmi_clk = {
+ .halt_reg = 0x9a038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9a038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_mp_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_mp_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_mp_sleep_clk = {
+ .halt_reg = 0x9a034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9a034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_mp_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_master_clk = {
+ .halt_reg = 0x3f030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3f030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_mock_utmi_clk = {
+ .halt_reg = 0x3f048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3f048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_sleep_clk = {
+ .halt_reg = 0x3f044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3f044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sec_master_clk = {
+ .halt_reg = 0xe2024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe2024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_sec_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_sec_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sec_mock_utmi_clk = {
+ .halt_reg = 0xe2038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe2038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_sec_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_sec_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sec_sleep_clk = {
+ .halt_reg = 0xe2034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe2034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_sec_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_tert_master_clk = {
+ .halt_reg = 0xe1024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe1024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_tert_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_tert_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_tert_mock_utmi_clk = {
+ .halt_reg = 0xe1038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe1038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_tert_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_tert_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_tert_sleep_clk = {
+ .halt_reg = 0xe1034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe1034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_tert_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_mp_phy_aux_clk = {
+ .halt_reg = 0x9a070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9a070,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_mp_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_mp_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_mp_phy_com_aux_clk = {
+ .halt_reg = 0x9a074,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9a074,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_mp_phy_com_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_mp_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_mp_phy_pipe_0_clk = {
+ .halt_reg = 0x9a078,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x9a078,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_mp_phy_pipe_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_mp_phy_pipe_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_mp_phy_pipe_1_clk = {
+ .halt_reg = 0x9a080,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x9a080,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_mp_phy_pipe_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_mp_phy_pipe_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_aux_clk = {
+ .halt_reg = 0x3f080,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3f080,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = {
+ .halt_reg = 0x3f084,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3f084,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_com_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_pipe_clk = {
+ .halt_reg = 0x3f088,
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0x3f088,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3f088,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb34_prim_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_phy_aux_clk = {
+ .halt_reg = 0xe2070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe2070,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_sec_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_sec_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_phy_com_aux_clk = {
+ .halt_reg = 0xe2074,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe2074,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_sec_phy_com_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_sec_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_phy_pipe_clk = {
+ .halt_reg = 0xe2078,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xe2078,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xe2078,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_sec_phy_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb34_sec_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_tert_phy_aux_clk = {
+ .halt_reg = 0xe1070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe1070,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_tert_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_tert_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_tert_phy_com_aux_clk = {
+ .halt_reg = 0xe1074,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe1074,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_tert_phy_com_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_tert_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_tert_phy_pipe_clk = {
+ .halt_reg = 0xe1078,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xe1078,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xe1078,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_tert_phy_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb34_tert_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_0_cfg_ahb_clk = {
+ .halt_reg = 0xba450,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba450,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xba450,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_0_dp0_clk = {
+ .halt_reg = 0x2b070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2b070,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_dp0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_phy_dp0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_0_dp1_clk = {
+ .halt_reg = 0x2b124,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2b124,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_dp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_phy_dp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_0_master_clk = {
+ .halt_reg = 0x2b01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2b01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_0_phy_p2rr2p_pipe_clk = {
+ .halt_reg = 0x2b0f4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2b0f4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_p2rr2p_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_phy_p2rr2p_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_0_phy_pcie_pipe_clk = {
+ .halt_reg = 0x2b04c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(11),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_pcie_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_phy_pcie_pipe_mux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_0_phy_rx0_clk = {
+ .halt_reg = 0x2b0c4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2b0c4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_rx0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_phy_rx0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_0_phy_rx1_clk = {
+ .halt_reg = 0x2b0d8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2b0d8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_rx1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_phy_rx1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_0_phy_usb_pipe_clk = {
+ .halt_reg = 0x2b0bc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2b0bc,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2b0bc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_usb_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb34_prim_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_0_sb_if_clk = {
+ .halt_reg = 0x2b048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2b048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_sb_if_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_sb_if_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_0_sys_clk = {
+ .halt_reg = 0x2b05c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2b05c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_sys_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_phy_sys_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_0_tmu_clk = {
+ .halt_reg = 0x2b09c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2b09c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2b09c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_tmu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_tmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_0_uc_hrr_clk = {
+ .halt_reg = 0x2b06c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2b06c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_uc_hrr_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_phy_sys_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_cfg_ahb_clk = {
+ .halt_reg = 0xba454,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba454,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xba454,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_dp0_clk = {
+ .halt_reg = 0x2d07c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2d07c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_dp0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_phy_dp0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_dp1_clk = {
+ .halt_reg = 0x2d144,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2d144,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_dp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_phy_dp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_master_clk = {
+ .halt_reg = 0x2d01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2d01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_phy_p2rr2p_pipe_clk = {
+ .halt_reg = 0x2d118,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2d118,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_p2rr2p_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_phy_p2rr2p_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_phy_pcie_pipe_clk = {
+ .halt_reg = 0x2d04c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(12),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_pcie_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_phy_pcie_pipe_mux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_phy_rx0_clk = {
+ .halt_reg = 0x2d0e8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2d0e8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_rx0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_phy_rx0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_phy_rx1_clk = {
+ .halt_reg = 0x2d0fc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2d0fc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_rx1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_phy_rx1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_phy_usb_pipe_clk = {
+ .halt_reg = 0x2d0e0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2d0e0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2d0e0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_usb_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb34_sec_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_sb_if_clk = {
+ .halt_reg = 0x2d048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2d048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_sb_if_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_sb_if_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_sys_clk = {
+ .halt_reg = 0x2d05c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2d05c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_sys_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_phy_sys_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_tmu_clk = {
+ .halt_reg = 0x2d0a8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2d0a8,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2d0a8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_tmu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_tmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_uc_hrr_clk = {
+ .halt_reg = 0x2d06c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2d06c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_uc_hrr_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_phy_sys_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_2_cfg_ahb_clk = {
+ .halt_reg = 0xba458,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba458,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xba458,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_2_dp0_clk = {
+ .halt_reg = 0xe0070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe0070,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_dp0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_phy_dp0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_2_dp1_clk = {
+ .halt_reg = 0xe0128,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe0128,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_dp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_phy_dp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_2_master_clk = {
+ .halt_reg = 0xe001c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe001c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_2_phy_p2rr2p_pipe_clk = {
+ .halt_reg = 0xe00f8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe00f8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_p2rr2p_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_phy_p2rr2p_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_2_phy_pcie_pipe_clk = {
+ .halt_reg = 0xe004c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(13),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_pcie_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_phy_pcie_pipe_mux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_2_phy_rx0_clk = {
+ .halt_reg = 0xe00c8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe00c8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_rx0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_phy_rx0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_2_phy_rx1_clk = {
+ .halt_reg = 0xe00dc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe00dc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_rx1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_phy_rx1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_2_phy_usb_pipe_clk = {
+ .halt_reg = 0xe00c0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xe00c0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xe00c0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_usb_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb34_tert_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_2_sb_if_clk = {
+ .halt_reg = 0xe0048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe0048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_sb_if_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_sb_if_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_2_sys_clk = {
+ .halt_reg = 0xe005c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe005c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_sys_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_phy_sys_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_2_tmu_clk = {
+ .halt_reg = 0xe00a0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xe00a0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xe00a0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_tmu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_tmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_2_uc_hrr_clk = {
+ .halt_reg = 0xe006c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe006c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_uc_hrr_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_phy_sys_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi0_clk = {
+ .halt_reg = 0x3201c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x3201c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3201c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_video_axi0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi0c_clk = {
+ .halt_reg = 0x32030,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x32030,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_video_axi0c_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi1_clk = {
+ .halt_reg = 0x32044,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x32044,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_video_axi1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc gcc_pcie_0_tunnel_gdsc = {
+ .gdscr = 0xc8004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_pcie_0_tunnel_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_pcie_1_tunnel_gdsc = {
+ .gdscr = 0x2e004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_pcie_1_tunnel_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_pcie_2_tunnel_gdsc = {
+ .gdscr = 0xc0004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_pcie_2_tunnel_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_pcie_3a_gdsc = {
+ .gdscr = 0xdc004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_pcie_3a_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_pcie_3a_phy_gdsc = {
+ .gdscr = 0x6c004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gcc_pcie_3a_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_pcie_3b_gdsc = {
+ .gdscr = 0x94004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_pcie_3b_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_pcie_3b_phy_gdsc = {
+ .gdscr = 0x75004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gcc_pcie_3b_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_pcie_4_gdsc = {
+ .gdscr = 0x88004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_pcie_4_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_pcie_4_phy_gdsc = {
+ .gdscr = 0xd3004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gcc_pcie_4_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_pcie_5_gdsc = {
+ .gdscr = 0xc3004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_pcie_5_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_pcie_5_phy_gdsc = {
+ .gdscr = 0xd2004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gcc_pcie_5_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_pcie_6_gdsc = {
+ .gdscr = 0x8a004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_pcie_6_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_pcie_6_phy_gdsc = {
+ .gdscr = 0xd4004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gcc_pcie_6_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_ufs_phy_gdsc = {
+ .gdscr = 0x77008,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_ufs_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb20_prim_gdsc = {
+ .gdscr = 0xbc004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_usb20_prim_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb30_mp_gdsc = {
+ .gdscr = 0x9a010,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_usb30_mp_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb30_prim_gdsc = {
+ .gdscr = 0x3f01c,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_usb30_prim_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb30_sec_gdsc = {
+ .gdscr = 0xe2010,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_usb30_sec_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb30_tert_gdsc = {
+ .gdscr = 0xe1010,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_usb30_tert_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb3_mp_ss0_phy_gdsc = {
+ .gdscr = 0x5400c,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gcc_usb3_mp_ss0_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb3_mp_ss1_phy_gdsc = {
+ .gdscr = 0x5402c,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gcc_usb3_mp_ss1_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb4_0_gdsc = {
+ .gdscr = 0x2b008,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_usb4_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb4_1_gdsc = {
+ .gdscr = 0x2d008,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_usb4_1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb4_2_gdsc = {
+ .gdscr = 0xe0008,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_usb4_2_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb_0_phy_gdsc = {
+ .gdscr = 0xdb024,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gcc_usb_0_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb_1_phy_gdsc = {
+ .gdscr = 0x2c024,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gcc_usb_1_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb_2_phy_gdsc = {
+ .gdscr = 0xbe024,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gcc_usb_2_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct clk_regmap *gcc_glymur_clocks[] = {
+ [GCC_AGGRE_NOC_PCIE_3A_WEST_SF_AXI_CLK] = &gcc_aggre_noc_pcie_3a_west_sf_axi_clk.clkr,
+ [GCC_AGGRE_NOC_PCIE_3B_WEST_SF_AXI_CLK] = &gcc_aggre_noc_pcie_3b_west_sf_axi_clk.clkr,
+ [GCC_AGGRE_NOC_PCIE_4_WEST_SF_AXI_CLK] = &gcc_aggre_noc_pcie_4_west_sf_axi_clk.clkr,
+ [GCC_AGGRE_NOC_PCIE_5_EAST_SF_AXI_CLK] = &gcc_aggre_noc_pcie_5_east_sf_axi_clk.clkr,
+ [GCC_AGGRE_NOC_PCIE_6_WEST_SF_AXI_CLK] = &gcc_aggre_noc_pcie_6_west_sf_axi_clk.clkr,
+ [GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr,
+ [GCC_AGGRE_USB2_PRIM_AXI_CLK] = &gcc_aggre_usb2_prim_axi_clk.clkr,
+ [GCC_AGGRE_USB3_MP_AXI_CLK] = &gcc_aggre_usb3_mp_axi_clk.clkr,
+ [GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr,
+ [GCC_AGGRE_USB3_SEC_AXI_CLK] = &gcc_aggre_usb3_sec_axi_clk.clkr,
+ [GCC_AGGRE_USB3_TERT_AXI_CLK] = &gcc_aggre_usb3_tert_axi_clk.clkr,
+ [GCC_AGGRE_USB4_0_AXI_CLK] = &gcc_aggre_usb4_0_axi_clk.clkr,
+ [GCC_AGGRE_USB4_1_AXI_CLK] = &gcc_aggre_usb4_1_axi_clk.clkr,
+ [GCC_AGGRE_USB4_2_AXI_CLK] = &gcc_aggre_usb4_2_axi_clk.clkr,
+ [GCC_AV1E_AHB_CLK] = &gcc_av1e_ahb_clk.clkr,
+ [GCC_AV1E_AXI_CLK] = &gcc_av1e_axi_clk.clkr,
+ [GCC_AV1E_XO_CLK] = &gcc_av1e_xo_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CAMERA_HF_AXI_CLK] = &gcc_camera_hf_axi_clk.clkr,
+ [GCC_CAMERA_SF_AXI_CLK] = &gcc_camera_sf_axi_clk.clkr,
+ [GCC_CFG_NOC_PCIE_ANOC_AHB_CLK] = &gcc_cfg_noc_pcie_anoc_ahb_clk.clkr,
+ [GCC_CFG_NOC_PCIE_ANOC_SOUTH_AHB_CLK] = &gcc_cfg_noc_pcie_anoc_south_ahb_clk.clkr,
+ [GCC_CFG_NOC_USB2_PRIM_AXI_CLK] = &gcc_cfg_noc_usb2_prim_axi_clk.clkr,
+ [GCC_CFG_NOC_USB3_MP_AXI_CLK] = &gcc_cfg_noc_usb3_mp_axi_clk.clkr,
+ [GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr,
+ [GCC_CFG_NOC_USB3_SEC_AXI_CLK] = &gcc_cfg_noc_usb3_sec_axi_clk.clkr,
+ [GCC_CFG_NOC_USB3_TERT_AXI_CLK] = &gcc_cfg_noc_usb3_tert_axi_clk.clkr,
+ [GCC_CFG_NOC_USB_ANOC_AHB_CLK] = &gcc_cfg_noc_usb_anoc_ahb_clk.clkr,
+ [GCC_CFG_NOC_USB_ANOC_SOUTH_AHB_CLK] = &gcc_cfg_noc_usb_anoc_south_ahb_clk.clkr,
+ [GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr,
+ [GCC_EVA_AHB_CLK] = &gcc_eva_ahb_clk.clkr,
+ [GCC_EVA_AXI0_CLK] = &gcc_eva_axi0_clk.clkr,
+ [GCC_EVA_AXI0C_CLK] = &gcc_eva_axi0c_clk.clkr,
+ [GCC_EVA_XO_CLK] = &gcc_eva_xo_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
+ [GCC_GPLL0] = &gcc_gpll0.clkr,
+ [GCC_GPLL0_OUT_EVEN] = &gcc_gpll0_out_even.clkr,
+ [GCC_GPLL1] = &gcc_gpll1.clkr,
+ [GCC_GPLL14] = &gcc_gpll14.clkr,
+ [GCC_GPLL14_OUT_EVEN] = &gcc_gpll14_out_even.clkr,
+ [GCC_GPLL4] = &gcc_gpll4.clkr,
+ [GCC_GPLL5] = &gcc_gpll5.clkr,
+ [GCC_GPLL7] = &gcc_gpll7.clkr,
+ [GCC_GPLL8] = &gcc_gpll8.clkr,
+ [GCC_GPLL9] = &gcc_gpll9.clkr,
+ [GCC_GPU_GEMNOC_GFX_CLK] = &gcc_gpu_gemnoc_gfx_clk.clkr,
+ [GCC_GPU_GPLL0_CLK_SRC] = &gcc_gpu_gpll0_clk_src.clkr,
+ [GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr,
+ [GCC_PCIE_0_AUX_CLK] = &gcc_pcie_0_aux_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK_SRC] = &gcc_pcie_0_aux_clk_src.clkr,
+ [GCC_PCIE_0_CFG_AHB_CLK] = &gcc_pcie_0_cfg_ahb_clk.clkr,
+ [GCC_PCIE_0_MSTR_AXI_CLK] = &gcc_pcie_0_mstr_axi_clk.clkr,
+ [GCC_PCIE_0_PHY_RCHNG_CLK] = &gcc_pcie_0_phy_rchng_clk.clkr,
+ [GCC_PCIE_0_PHY_RCHNG_CLK_SRC] = &gcc_pcie_0_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_0_PIPE_CLK] = &gcc_pcie_0_pipe_clk.clkr,
+ [GCC_PCIE_0_SLV_AXI_CLK] = &gcc_pcie_0_slv_axi_clk.clkr,
+ [GCC_PCIE_0_SLV_Q2A_AXI_CLK] = &gcc_pcie_0_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_1_AUX_CLK] = &gcc_pcie_1_aux_clk.clkr,
+ [GCC_PCIE_1_AUX_CLK_SRC] = &gcc_pcie_1_aux_clk_src.clkr,
+ [GCC_PCIE_1_CFG_AHB_CLK] = &gcc_pcie_1_cfg_ahb_clk.clkr,
+ [GCC_PCIE_1_MSTR_AXI_CLK] = &gcc_pcie_1_mstr_axi_clk.clkr,
+ [GCC_PCIE_1_PHY_RCHNG_CLK] = &gcc_pcie_1_phy_rchng_clk.clkr,
+ [GCC_PCIE_1_PHY_RCHNG_CLK_SRC] = &gcc_pcie_1_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_1_PIPE_CLK] = &gcc_pcie_1_pipe_clk.clkr,
+ [GCC_PCIE_1_SLV_AXI_CLK] = &gcc_pcie_1_slv_axi_clk.clkr,
+ [GCC_PCIE_1_SLV_Q2A_AXI_CLK] = &gcc_pcie_1_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_2_AUX_CLK] = &gcc_pcie_2_aux_clk.clkr,
+ [GCC_PCIE_2_AUX_CLK_SRC] = &gcc_pcie_2_aux_clk_src.clkr,
+ [GCC_PCIE_2_CFG_AHB_CLK] = &gcc_pcie_2_cfg_ahb_clk.clkr,
+ [GCC_PCIE_2_MSTR_AXI_CLK] = &gcc_pcie_2_mstr_axi_clk.clkr,
+ [GCC_PCIE_2_PHY_RCHNG_CLK] = &gcc_pcie_2_phy_rchng_clk.clkr,
+ [GCC_PCIE_2_PHY_RCHNG_CLK_SRC] = &gcc_pcie_2_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_2_PIPE_CLK] = &gcc_pcie_2_pipe_clk.clkr,
+ [GCC_PCIE_2_SLV_AXI_CLK] = &gcc_pcie_2_slv_axi_clk.clkr,
+ [GCC_PCIE_2_SLV_Q2A_AXI_CLK] = &gcc_pcie_2_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_3A_AUX_CLK] = &gcc_pcie_3a_aux_clk.clkr,
+ [GCC_PCIE_3A_AUX_CLK_SRC] = &gcc_pcie_3a_aux_clk_src.clkr,
+ [GCC_PCIE_3A_CFG_AHB_CLK] = &gcc_pcie_3a_cfg_ahb_clk.clkr,
+ [GCC_PCIE_3A_MSTR_AXI_CLK] = &gcc_pcie_3a_mstr_axi_clk.clkr,
+ [GCC_PCIE_3A_PHY_RCHNG_CLK] = &gcc_pcie_3a_phy_rchng_clk.clkr,
+ [GCC_PCIE_3A_PHY_RCHNG_CLK_SRC] = &gcc_pcie_3a_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_3A_PIPE_CLK] = &gcc_pcie_3a_pipe_clk.clkr,
+ [GCC_PCIE_3A_PIPE_CLK_SRC] = &gcc_pcie_3a_pipe_clk_src.clkr,
+ [GCC_PCIE_3A_SLV_AXI_CLK] = &gcc_pcie_3a_slv_axi_clk.clkr,
+ [GCC_PCIE_3A_SLV_Q2A_AXI_CLK] = &gcc_pcie_3a_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_3B_AUX_CLK] = &gcc_pcie_3b_aux_clk.clkr,
+ [GCC_PCIE_3B_AUX_CLK_SRC] = &gcc_pcie_3b_aux_clk_src.clkr,
+ [GCC_PCIE_3B_CFG_AHB_CLK] = &gcc_pcie_3b_cfg_ahb_clk.clkr,
+ [GCC_PCIE_3B_MSTR_AXI_CLK] = &gcc_pcie_3b_mstr_axi_clk.clkr,
+ [GCC_PCIE_3B_PHY_RCHNG_CLK] = &gcc_pcie_3b_phy_rchng_clk.clkr,
+ [GCC_PCIE_3B_PHY_RCHNG_CLK_SRC] = &gcc_pcie_3b_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_3B_PIPE_CLK] = &gcc_pcie_3b_pipe_clk.clkr,
+ [GCC_PCIE_3B_PIPE_CLK_SRC] = &gcc_pcie_3b_pipe_clk_src.clkr,
+ [GCC_PCIE_3B_PIPE_DIV2_CLK] = &gcc_pcie_3b_pipe_div2_clk.clkr,
+ [GCC_PCIE_3B_PIPE_DIV_CLK_SRC] = &gcc_pcie_3b_pipe_div_clk_src.clkr,
+ [GCC_PCIE_3B_SLV_AXI_CLK] = &gcc_pcie_3b_slv_axi_clk.clkr,
+ [GCC_PCIE_3B_SLV_Q2A_AXI_CLK] = &gcc_pcie_3b_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_4_AUX_CLK] = &gcc_pcie_4_aux_clk.clkr,
+ [GCC_PCIE_4_AUX_CLK_SRC] = &gcc_pcie_4_aux_clk_src.clkr,
+ [GCC_PCIE_4_CFG_AHB_CLK] = &gcc_pcie_4_cfg_ahb_clk.clkr,
+ [GCC_PCIE_4_MSTR_AXI_CLK] = &gcc_pcie_4_mstr_axi_clk.clkr,
+ [GCC_PCIE_4_PHY_RCHNG_CLK] = &gcc_pcie_4_phy_rchng_clk.clkr,
+ [GCC_PCIE_4_PHY_RCHNG_CLK_SRC] = &gcc_pcie_4_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_4_PIPE_CLK] = &gcc_pcie_4_pipe_clk.clkr,
+ [GCC_PCIE_4_PIPE_CLK_SRC] = &gcc_pcie_4_pipe_clk_src.clkr,
+ [GCC_PCIE_4_PIPE_DIV2_CLK] = &gcc_pcie_4_pipe_div2_clk.clkr,
+ [GCC_PCIE_4_PIPE_DIV_CLK_SRC] = &gcc_pcie_4_pipe_div_clk_src.clkr,
+ [GCC_PCIE_4_SLV_AXI_CLK] = &gcc_pcie_4_slv_axi_clk.clkr,
+ [GCC_PCIE_4_SLV_Q2A_AXI_CLK] = &gcc_pcie_4_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_5_AUX_CLK] = &gcc_pcie_5_aux_clk.clkr,
+ [GCC_PCIE_5_AUX_CLK_SRC] = &gcc_pcie_5_aux_clk_src.clkr,
+ [GCC_PCIE_5_CFG_AHB_CLK] = &gcc_pcie_5_cfg_ahb_clk.clkr,
+ [GCC_PCIE_5_MSTR_AXI_CLK] = &gcc_pcie_5_mstr_axi_clk.clkr,
+ [GCC_PCIE_5_PHY_RCHNG_CLK] = &gcc_pcie_5_phy_rchng_clk.clkr,
+ [GCC_PCIE_5_PHY_RCHNG_CLK_SRC] = &gcc_pcie_5_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_5_PIPE_CLK] = &gcc_pcie_5_pipe_clk.clkr,
+ [GCC_PCIE_5_PIPE_CLK_SRC] = &gcc_pcie_5_pipe_clk_src.clkr,
+ [GCC_PCIE_5_PIPE_DIV2_CLK] = &gcc_pcie_5_pipe_div2_clk.clkr,
+ [GCC_PCIE_5_PIPE_DIV_CLK_SRC] = &gcc_pcie_5_pipe_div_clk_src.clkr,
+ [GCC_PCIE_5_SLV_AXI_CLK] = &gcc_pcie_5_slv_axi_clk.clkr,
+ [GCC_PCIE_5_SLV_Q2A_AXI_CLK] = &gcc_pcie_5_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_6_AUX_CLK] = &gcc_pcie_6_aux_clk.clkr,
+ [GCC_PCIE_6_AUX_CLK_SRC] = &gcc_pcie_6_aux_clk_src.clkr,
+ [GCC_PCIE_6_CFG_AHB_CLK] = &gcc_pcie_6_cfg_ahb_clk.clkr,
+ [GCC_PCIE_6_MSTR_AXI_CLK] = &gcc_pcie_6_mstr_axi_clk.clkr,
+ [GCC_PCIE_6_PHY_RCHNG_CLK] = &gcc_pcie_6_phy_rchng_clk.clkr,
+ [GCC_PCIE_6_PHY_RCHNG_CLK_SRC] = &gcc_pcie_6_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_6_PIPE_CLK] = &gcc_pcie_6_pipe_clk.clkr,
+ [GCC_PCIE_6_PIPE_CLK_SRC] = &gcc_pcie_6_pipe_clk_src.clkr,
+ [GCC_PCIE_6_PIPE_DIV2_CLK] = &gcc_pcie_6_pipe_div2_clk.clkr,
+ [GCC_PCIE_6_PIPE_DIV_CLK_SRC] = &gcc_pcie_6_pipe_div_clk_src.clkr,
+ [GCC_PCIE_6_SLV_AXI_CLK] = &gcc_pcie_6_slv_axi_clk.clkr,
+ [GCC_PCIE_6_SLV_Q2A_AXI_CLK] = &gcc_pcie_6_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_NOC_PWRCTL_CLK] = &gcc_pcie_noc_pwrctl_clk.clkr,
+ [GCC_PCIE_NOC_QOSGEN_EXTREF_CLK] = &gcc_pcie_noc_qosgen_extref_clk.clkr,
+ [GCC_PCIE_NOC_SF_CENTER_CLK] = &gcc_pcie_noc_sf_center_clk.clkr,
+ [GCC_PCIE_NOC_SLAVE_SF_EAST_CLK] = &gcc_pcie_noc_slave_sf_east_clk.clkr,
+ [GCC_PCIE_NOC_SLAVE_SF_WEST_CLK] = &gcc_pcie_noc_slave_sf_west_clk.clkr,
+ [GCC_PCIE_NOC_TSCTR_CLK] = &gcc_pcie_noc_tsctr_clk.clkr,
+ [GCC_PCIE_PHY_3A_AUX_CLK] = &gcc_pcie_phy_3a_aux_clk.clkr,
+ [GCC_PCIE_PHY_3A_AUX_CLK_SRC] = &gcc_pcie_phy_3a_aux_clk_src.clkr,
+ [GCC_PCIE_PHY_3B_AUX_CLK] = &gcc_pcie_phy_3b_aux_clk.clkr,
+ [GCC_PCIE_PHY_3B_AUX_CLK_SRC] = &gcc_pcie_phy_3b_aux_clk_src.clkr,
+ [GCC_PCIE_PHY_4_AUX_CLK] = &gcc_pcie_phy_4_aux_clk.clkr,
+ [GCC_PCIE_PHY_4_AUX_CLK_SRC] = &gcc_pcie_phy_4_aux_clk_src.clkr,
+ [GCC_PCIE_PHY_5_AUX_CLK] = &gcc_pcie_phy_5_aux_clk.clkr,
+ [GCC_PCIE_PHY_5_AUX_CLK_SRC] = &gcc_pcie_phy_5_aux_clk_src.clkr,
+ [GCC_PCIE_PHY_6_AUX_CLK] = &gcc_pcie_phy_6_aux_clk.clkr,
+ [GCC_PCIE_PHY_6_AUX_CLK_SRC] = &gcc_pcie_phy_6_aux_clk_src.clkr,
+ [GCC_PCIE_RSCC_CFG_AHB_CLK] = &gcc_pcie_rscc_cfg_ahb_clk.clkr,
+ [GCC_PCIE_RSCC_XO_CLK] = &gcc_pcie_rscc_xo_clk.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr,
+ [GCC_QMIP_AV1E_AHB_CLK] = &gcc_qmip_av1e_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_CMD_AHB_CLK] = &gcc_qmip_camera_cmd_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_NRT_AHB_CLK] = &gcc_qmip_camera_nrt_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_RT_AHB_CLK] = &gcc_qmip_camera_rt_ahb_clk.clkr,
+ [GCC_QMIP_GPU_AHB_CLK] = &gcc_qmip_gpu_ahb_clk.clkr,
+ [GCC_QMIP_PCIE_3A_AHB_CLK] = &gcc_qmip_pcie_3a_ahb_clk.clkr,
+ [GCC_QMIP_PCIE_3B_AHB_CLK] = &gcc_qmip_pcie_3b_ahb_clk.clkr,
+ [GCC_QMIP_PCIE_4_AHB_CLK] = &gcc_qmip_pcie_4_ahb_clk.clkr,
+ [GCC_QMIP_PCIE_5_AHB_CLK] = &gcc_qmip_pcie_5_ahb_clk.clkr,
+ [GCC_QMIP_PCIE_6_AHB_CLK] = &gcc_qmip_pcie_6_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_CV_CPU_AHB_CLK] = &gcc_qmip_video_cv_cpu_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_CVP_AHB_CLK] = &gcc_qmip_video_cvp_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_V_CPU_AHB_CLK] = &gcc_qmip_video_v_cpu_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_VCODEC1_AHB_CLK] = &gcc_qmip_video_vcodec1_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_VCODEC_AHB_CLK] = &gcc_qmip_video_vcodec_ahb_clk.clkr,
+ [GCC_QUPV3_OOB_CORE_2X_CLK] = &gcc_qupv3_oob_core_2x_clk.clkr,
+ [GCC_QUPV3_OOB_CORE_CLK] = &gcc_qupv3_oob_core_clk.clkr,
+ [GCC_QUPV3_OOB_M_AHB_CLK] = &gcc_qupv3_oob_m_ahb_clk.clkr,
+ [GCC_QUPV3_OOB_QSPI_S0_CLK] = &gcc_qupv3_oob_qspi_s0_clk.clkr,
+ [GCC_QUPV3_OOB_QSPI_S0_CLK_SRC] = &gcc_qupv3_oob_qspi_s0_clk_src.clkr,
+ [GCC_QUPV3_OOB_QSPI_S1_CLK] = &gcc_qupv3_oob_qspi_s1_clk.clkr,
+ [GCC_QUPV3_OOB_QSPI_S1_CLK_SRC] = &gcc_qupv3_oob_qspi_s1_clk_src.clkr,
+ [GCC_QUPV3_OOB_S0_CLK] = &gcc_qupv3_oob_s0_clk.clkr,
+ [GCC_QUPV3_OOB_S0_CLK_SRC] = &gcc_qupv3_oob_s0_clk_src.clkr,
+ [GCC_QUPV3_OOB_S1_CLK] = &gcc_qupv3_oob_s1_clk.clkr,
+ [GCC_QUPV3_OOB_S1_CLK_SRC] = &gcc_qupv3_oob_s1_clk_src.clkr,
+ [GCC_QUPV3_OOB_S_AHB_CLK] = &gcc_qupv3_oob_s_ahb_clk.clkr,
+ [GCC_QUPV3_OOB_TCXO_CLK] = &gcc_qupv3_oob_tcxo_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_2X_CLK] = &gcc_qupv3_wrap0_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_CLK] = &gcc_qupv3_wrap0_core_clk.clkr,
+ [GCC_QUPV3_WRAP0_QSPI_S2_CLK] = &gcc_qupv3_wrap0_qspi_s2_clk.clkr,
+ [GCC_QUPV3_WRAP0_QSPI_S2_CLK_SRC] = &gcc_qupv3_wrap0_qspi_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_QSPI_S3_CLK] = &gcc_qupv3_wrap0_qspi_s3_clk.clkr,
+ [GCC_QUPV3_WRAP0_QSPI_S3_CLK_SRC] = &gcc_qupv3_wrap0_qspi_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_QSPI_S6_CLK] = &gcc_qupv3_wrap0_qspi_s6_clk.clkr,
+ [GCC_QUPV3_WRAP0_QSPI_S6_CLK_SRC] = &gcc_qupv3_wrap0_qspi_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK] = &gcc_qupv3_wrap0_s0_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK_SRC] = &gcc_qupv3_wrap0_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK] = &gcc_qupv3_wrap0_s1_clk.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK_SRC] = &gcc_qupv3_wrap0_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK] = &gcc_qupv3_wrap0_s2_clk.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK_SRC] = &gcc_qupv3_wrap0_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK] = &gcc_qupv3_wrap0_s3_clk.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK_SRC] = &gcc_qupv3_wrap0_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK] = &gcc_qupv3_wrap0_s4_clk.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK_SRC] = &gcc_qupv3_wrap0_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK] = &gcc_qupv3_wrap0_s5_clk.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK_SRC] = &gcc_qupv3_wrap0_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S6_CLK] = &gcc_qupv3_wrap0_s6_clk.clkr,
+ [GCC_QUPV3_WRAP0_S6_CLK_SRC] = &gcc_qupv3_wrap0_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S7_CLK] = &gcc_qupv3_wrap0_s7_clk.clkr,
+ [GCC_QUPV3_WRAP0_S7_CLK_SRC] = &gcc_qupv3_wrap0_s7_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_CORE_2X_CLK] = &gcc_qupv3_wrap1_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP1_CORE_CLK] = &gcc_qupv3_wrap1_core_clk.clkr,
+ [GCC_QUPV3_WRAP1_QSPI_S2_CLK] = &gcc_qupv3_wrap1_qspi_s2_clk.clkr,
+ [GCC_QUPV3_WRAP1_QSPI_S2_CLK_SRC] = &gcc_qupv3_wrap1_qspi_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_QSPI_S3_CLK] = &gcc_qupv3_wrap1_qspi_s3_clk.clkr,
+ [GCC_QUPV3_WRAP1_QSPI_S3_CLK_SRC] = &gcc_qupv3_wrap1_qspi_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_QSPI_S6_CLK] = &gcc_qupv3_wrap1_qspi_s6_clk.clkr,
+ [GCC_QUPV3_WRAP1_QSPI_S6_CLK_SRC] = &gcc_qupv3_wrap1_qspi_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK] = &gcc_qupv3_wrap1_s0_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK_SRC] = &gcc_qupv3_wrap1_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK] = &gcc_qupv3_wrap1_s1_clk.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK_SRC] = &gcc_qupv3_wrap1_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK] = &gcc_qupv3_wrap1_s2_clk.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK_SRC] = &gcc_qupv3_wrap1_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK] = &gcc_qupv3_wrap1_s3_clk.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK_SRC] = &gcc_qupv3_wrap1_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK] = &gcc_qupv3_wrap1_s4_clk.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK_SRC] = &gcc_qupv3_wrap1_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK] = &gcc_qupv3_wrap1_s5_clk.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK_SRC] = &gcc_qupv3_wrap1_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S6_CLK] = &gcc_qupv3_wrap1_s6_clk.clkr,
+ [GCC_QUPV3_WRAP1_S6_CLK_SRC] = &gcc_qupv3_wrap1_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S7_CLK] = &gcc_qupv3_wrap1_s7_clk.clkr,
+ [GCC_QUPV3_WRAP1_S7_CLK_SRC] = &gcc_qupv3_wrap1_s7_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_CORE_2X_CLK] = &gcc_qupv3_wrap2_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP2_CORE_CLK] = &gcc_qupv3_wrap2_core_clk.clkr,
+ [GCC_QUPV3_WRAP2_QSPI_S2_CLK] = &gcc_qupv3_wrap2_qspi_s2_clk.clkr,
+ [GCC_QUPV3_WRAP2_QSPI_S2_CLK_SRC] = &gcc_qupv3_wrap2_qspi_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_QSPI_S3_CLK] = &gcc_qupv3_wrap2_qspi_s3_clk.clkr,
+ [GCC_QUPV3_WRAP2_QSPI_S3_CLK_SRC] = &gcc_qupv3_wrap2_qspi_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_QSPI_S6_CLK] = &gcc_qupv3_wrap2_qspi_s6_clk.clkr,
+ [GCC_QUPV3_WRAP2_QSPI_S6_CLK_SRC] = &gcc_qupv3_wrap2_qspi_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S0_CLK] = &gcc_qupv3_wrap2_s0_clk.clkr,
+ [GCC_QUPV3_WRAP2_S0_CLK_SRC] = &gcc_qupv3_wrap2_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S1_CLK] = &gcc_qupv3_wrap2_s1_clk.clkr,
+ [GCC_QUPV3_WRAP2_S1_CLK_SRC] = &gcc_qupv3_wrap2_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S2_CLK] = &gcc_qupv3_wrap2_s2_clk.clkr,
+ [GCC_QUPV3_WRAP2_S2_CLK_SRC] = &gcc_qupv3_wrap2_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S3_CLK] = &gcc_qupv3_wrap2_s3_clk.clkr,
+ [GCC_QUPV3_WRAP2_S3_CLK_SRC] = &gcc_qupv3_wrap2_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S4_CLK] = &gcc_qupv3_wrap2_s4_clk.clkr,
+ [GCC_QUPV3_WRAP2_S4_CLK_SRC] = &gcc_qupv3_wrap2_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S5_CLK] = &gcc_qupv3_wrap2_s5_clk.clkr,
+ [GCC_QUPV3_WRAP2_S5_CLK_SRC] = &gcc_qupv3_wrap2_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S6_CLK] = &gcc_qupv3_wrap2_s6_clk.clkr,
+ [GCC_QUPV3_WRAP2_S6_CLK_SRC] = &gcc_qupv3_wrap2_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S7_CLK] = &gcc_qupv3_wrap2_s7_clk.clkr,
+ [GCC_QUPV3_WRAP2_S7_CLK_SRC] = &gcc_qupv3_wrap2_s7_clk_src.clkr,
+ [GCC_QUPV3_WRAP_0_M_AHB_CLK] = &gcc_qupv3_wrap_0_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_0_S_AHB_CLK] = &gcc_qupv3_wrap_0_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_M_AHB_CLK] = &gcc_qupv3_wrap_1_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_S_AHB_CLK] = &gcc_qupv3_wrap_1_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_2_M_AHB_CLK] = &gcc_qupv3_wrap_2_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_2_S_AHB_CLK] = &gcc_qupv3_wrap_2_s_ahb_clk.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_SDCC2_APPS_CLK_SRC] = &gcc_sdcc2_apps_clk_src.clkr,
+ [GCC_SDCC4_AHB_CLK] = &gcc_sdcc4_ahb_clk.clkr,
+ [GCC_SDCC4_APPS_CLK] = &gcc_sdcc4_apps_clk.clkr,
+ [GCC_SDCC4_APPS_CLK_SRC] = &gcc_sdcc4_apps_clk_src.clkr,
+ [GCC_UFS_PHY_AHB_CLK] = &gcc_ufs_phy_ahb_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK] = &gcc_ufs_phy_axi_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK_SRC] = &gcc_ufs_phy_axi_clk_src.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK] = &gcc_ufs_phy_ice_core_clk.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK_SRC] = &gcc_ufs_phy_ice_core_clk_src.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK] = &gcc_ufs_phy_phy_aux_clk.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK_SRC] = &gcc_ufs_phy_phy_aux_clk_src.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK] = &gcc_ufs_phy_rx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC] = &gcc_ufs_phy_rx_symbol_0_clk_src.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_1_CLK] = &gcc_ufs_phy_rx_symbol_1_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC] = &gcc_ufs_phy_rx_symbol_1_clk_src.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK] = &gcc_ufs_phy_tx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC] = &gcc_ufs_phy_tx_symbol_0_clk_src.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK] = &gcc_ufs_phy_unipro_core_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC] = &gcc_ufs_phy_unipro_core_clk_src.clkr,
+ [GCC_USB20_MASTER_CLK] = &gcc_usb20_master_clk.clkr,
+ [GCC_USB20_MASTER_CLK_SRC] = &gcc_usb20_master_clk_src.clkr,
+ [GCC_USB20_MOCK_UTMI_CLK] = &gcc_usb20_mock_utmi_clk.clkr,
+ [GCC_USB20_MOCK_UTMI_CLK_SRC] = &gcc_usb20_mock_utmi_clk_src.clkr,
+ [GCC_USB20_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb20_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB20_SLEEP_CLK] = &gcc_usb20_sleep_clk.clkr,
+ [GCC_USB30_MP_MASTER_CLK] = &gcc_usb30_mp_master_clk.clkr,
+ [GCC_USB30_MP_MASTER_CLK_SRC] = &gcc_usb30_mp_master_clk_src.clkr,
+ [GCC_USB30_MP_MOCK_UTMI_CLK] = &gcc_usb30_mp_mock_utmi_clk.clkr,
+ [GCC_USB30_MP_MOCK_UTMI_CLK_SRC] = &gcc_usb30_mp_mock_utmi_clk_src.clkr,
+ [GCC_USB30_MP_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb30_mp_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB30_MP_SLEEP_CLK] = &gcc_usb30_mp_sleep_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK] = &gcc_usb30_prim_master_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK_SRC] = &gcc_usb30_prim_master_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK] = &gcc_usb30_prim_mock_utmi_clk.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC] = &gcc_usb30_prim_mock_utmi_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB30_PRIM_SLEEP_CLK] = &gcc_usb30_prim_sleep_clk.clkr,
+ [GCC_USB30_SEC_MASTER_CLK] = &gcc_usb30_sec_master_clk.clkr,
+ [GCC_USB30_SEC_MASTER_CLK_SRC] = &gcc_usb30_sec_master_clk_src.clkr,
+ [GCC_USB30_SEC_MOCK_UTMI_CLK] = &gcc_usb30_sec_mock_utmi_clk.clkr,
+ [GCC_USB30_SEC_MOCK_UTMI_CLK_SRC] = &gcc_usb30_sec_mock_utmi_clk_src.clkr,
+ [GCC_USB30_SEC_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb30_sec_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB30_SEC_SLEEP_CLK] = &gcc_usb30_sec_sleep_clk.clkr,
+ [GCC_USB30_TERT_MASTER_CLK] = &gcc_usb30_tert_master_clk.clkr,
+ [GCC_USB30_TERT_MASTER_CLK_SRC] = &gcc_usb30_tert_master_clk_src.clkr,
+ [GCC_USB30_TERT_MOCK_UTMI_CLK] = &gcc_usb30_tert_mock_utmi_clk.clkr,
+ [GCC_USB30_TERT_MOCK_UTMI_CLK_SRC] = &gcc_usb30_tert_mock_utmi_clk_src.clkr,
+ [GCC_USB30_TERT_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb30_tert_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB30_TERT_SLEEP_CLK] = &gcc_usb30_tert_sleep_clk.clkr,
+ [GCC_USB34_PRIM_PHY_PIPE_CLK_SRC] = &gcc_usb34_prim_phy_pipe_clk_src.clkr,
+ [GCC_USB34_SEC_PHY_PIPE_CLK_SRC] = &gcc_usb34_sec_phy_pipe_clk_src.clkr,
+ [GCC_USB34_TERT_PHY_PIPE_CLK_SRC] = &gcc_usb34_tert_phy_pipe_clk_src.clkr,
+ [GCC_USB3_MP_PHY_AUX_CLK] = &gcc_usb3_mp_phy_aux_clk.clkr,
+ [GCC_USB3_MP_PHY_AUX_CLK_SRC] = &gcc_usb3_mp_phy_aux_clk_src.clkr,
+ [GCC_USB3_MP_PHY_COM_AUX_CLK] = &gcc_usb3_mp_phy_com_aux_clk.clkr,
+ [GCC_USB3_MP_PHY_PIPE_0_CLK] = &gcc_usb3_mp_phy_pipe_0_clk.clkr,
+ [GCC_USB3_MP_PHY_PIPE_0_CLK_SRC] = &gcc_usb3_mp_phy_pipe_0_clk_src.clkr,
+ [GCC_USB3_MP_PHY_PIPE_1_CLK] = &gcc_usb3_mp_phy_pipe_1_clk.clkr,
+ [GCC_USB3_MP_PHY_PIPE_1_CLK_SRC] = &gcc_usb3_mp_phy_pipe_1_clk_src.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK] = &gcc_usb3_prim_phy_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr,
+ [GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK_SRC] = &gcc_usb3_prim_phy_pipe_clk_src.clkr,
+ [GCC_USB3_SEC_PHY_AUX_CLK] = &gcc_usb3_sec_phy_aux_clk.clkr,
+ [GCC_USB3_SEC_PHY_AUX_CLK_SRC] = &gcc_usb3_sec_phy_aux_clk_src.clkr,
+ [GCC_USB3_SEC_PHY_COM_AUX_CLK] = &gcc_usb3_sec_phy_com_aux_clk.clkr,
+ [GCC_USB3_SEC_PHY_PIPE_CLK] = &gcc_usb3_sec_phy_pipe_clk.clkr,
+ [GCC_USB3_SEC_PHY_PIPE_CLK_SRC] = &gcc_usb3_sec_phy_pipe_clk_src.clkr,
+ [GCC_USB3_TERT_PHY_AUX_CLK] = &gcc_usb3_tert_phy_aux_clk.clkr,
+ [GCC_USB3_TERT_PHY_AUX_CLK_SRC] = &gcc_usb3_tert_phy_aux_clk_src.clkr,
+ [GCC_USB3_TERT_PHY_COM_AUX_CLK] = &gcc_usb3_tert_phy_com_aux_clk.clkr,
+ [GCC_USB3_TERT_PHY_PIPE_CLK] = &gcc_usb3_tert_phy_pipe_clk.clkr,
+ [GCC_USB3_TERT_PHY_PIPE_CLK_SRC] = &gcc_usb3_tert_phy_pipe_clk_src.clkr,
+ [GCC_USB4_0_CFG_AHB_CLK] = &gcc_usb4_0_cfg_ahb_clk.clkr,
+ [GCC_USB4_0_DP0_CLK] = &gcc_usb4_0_dp0_clk.clkr,
+ [GCC_USB4_0_DP1_CLK] = &gcc_usb4_0_dp1_clk.clkr,
+ [GCC_USB4_0_MASTER_CLK] = &gcc_usb4_0_master_clk.clkr,
+ [GCC_USB4_0_MASTER_CLK_SRC] = &gcc_usb4_0_master_clk_src.clkr,
+ [GCC_USB4_0_PHY_DP0_CLK_SRC] = &gcc_usb4_0_phy_dp0_clk_src.clkr,
+ [GCC_USB4_0_PHY_DP1_CLK_SRC] = &gcc_usb4_0_phy_dp1_clk_src.clkr,
+ [GCC_USB4_0_PHY_P2RR2P_PIPE_CLK] = &gcc_usb4_0_phy_p2rr2p_pipe_clk.clkr,
+ [GCC_USB4_0_PHY_P2RR2P_PIPE_CLK_SRC] = &gcc_usb4_0_phy_p2rr2p_pipe_clk_src.clkr,
+ [GCC_USB4_0_PHY_PCIE_PIPE_CLK] = &gcc_usb4_0_phy_pcie_pipe_clk.clkr,
+ [GCC_USB4_0_PHY_PCIE_PIPE_CLK_SRC] = &gcc_usb4_0_phy_pcie_pipe_clk_src.clkr,
+ [GCC_USB4_0_PHY_PCIE_PIPE_MUX_CLK_SRC] = &gcc_usb4_0_phy_pcie_pipe_mux_clk_src.clkr,
+ [GCC_USB4_0_PHY_RX0_CLK] = &gcc_usb4_0_phy_rx0_clk.clkr,
+ [GCC_USB4_0_PHY_RX0_CLK_SRC] = &gcc_usb4_0_phy_rx0_clk_src.clkr,
+ [GCC_USB4_0_PHY_RX1_CLK] = &gcc_usb4_0_phy_rx1_clk.clkr,
+ [GCC_USB4_0_PHY_RX1_CLK_SRC] = &gcc_usb4_0_phy_rx1_clk_src.clkr,
+ [GCC_USB4_0_PHY_SYS_CLK_SRC] = &gcc_usb4_0_phy_sys_clk_src.clkr,
+ [GCC_USB4_0_PHY_USB_PIPE_CLK] = &gcc_usb4_0_phy_usb_pipe_clk.clkr,
+ [GCC_USB4_0_SB_IF_CLK] = &gcc_usb4_0_sb_if_clk.clkr,
+ [GCC_USB4_0_SB_IF_CLK_SRC] = &gcc_usb4_0_sb_if_clk_src.clkr,
+ [GCC_USB4_0_SYS_CLK] = &gcc_usb4_0_sys_clk.clkr,
+ [GCC_USB4_0_TMU_CLK] = &gcc_usb4_0_tmu_clk.clkr,
+ [GCC_USB4_0_TMU_CLK_SRC] = &gcc_usb4_0_tmu_clk_src.clkr,
+ [GCC_USB4_0_UC_HRR_CLK] = &gcc_usb4_0_uc_hrr_clk.clkr,
+ [GCC_USB4_1_CFG_AHB_CLK] = &gcc_usb4_1_cfg_ahb_clk.clkr,
+ [GCC_USB4_1_DP0_CLK] = &gcc_usb4_1_dp0_clk.clkr,
+ [GCC_USB4_1_DP1_CLK] = &gcc_usb4_1_dp1_clk.clkr,
+ [GCC_USB4_1_MASTER_CLK] = &gcc_usb4_1_master_clk.clkr,
+ [GCC_USB4_1_MASTER_CLK_SRC] = &gcc_usb4_1_master_clk_src.clkr,
+ [GCC_USB4_1_PHY_DP0_CLK_SRC] = &gcc_usb4_1_phy_dp0_clk_src.clkr,
+ [GCC_USB4_1_PHY_DP1_CLK_SRC] = &gcc_usb4_1_phy_dp1_clk_src.clkr,
+ [GCC_USB4_1_PHY_P2RR2P_PIPE_CLK] = &gcc_usb4_1_phy_p2rr2p_pipe_clk.clkr,
+ [GCC_USB4_1_PHY_P2RR2P_PIPE_CLK_SRC] = &gcc_usb4_1_phy_p2rr2p_pipe_clk_src.clkr,
+ [GCC_USB4_1_PHY_PCIE_PIPE_CLK] = &gcc_usb4_1_phy_pcie_pipe_clk.clkr,
+ [GCC_USB4_1_PHY_PCIE_PIPE_CLK_SRC] = &gcc_usb4_1_phy_pcie_pipe_clk_src.clkr,
+ [GCC_USB4_1_PHY_PCIE_PIPE_MUX_CLK_SRC] = &gcc_usb4_1_phy_pcie_pipe_mux_clk_src.clkr,
+ [GCC_USB4_1_PHY_PLL_PIPE_CLK_SRC] = &gcc_usb4_1_phy_pll_pipe_clk_src.clkr,
+ [GCC_USB4_1_PHY_RX0_CLK] = &gcc_usb4_1_phy_rx0_clk.clkr,
+ [GCC_USB4_1_PHY_RX0_CLK_SRC] = &gcc_usb4_1_phy_rx0_clk_src.clkr,
+ [GCC_USB4_1_PHY_RX1_CLK] = &gcc_usb4_1_phy_rx1_clk.clkr,
+ [GCC_USB4_1_PHY_RX1_CLK_SRC] = &gcc_usb4_1_phy_rx1_clk_src.clkr,
+ [GCC_USB4_1_PHY_SYS_CLK_SRC] = &gcc_usb4_1_phy_sys_clk_src.clkr,
+ [GCC_USB4_1_PHY_USB_PIPE_CLK] = &gcc_usb4_1_phy_usb_pipe_clk.clkr,
+ [GCC_USB4_1_SB_IF_CLK] = &gcc_usb4_1_sb_if_clk.clkr,
+ [GCC_USB4_1_SB_IF_CLK_SRC] = &gcc_usb4_1_sb_if_clk_src.clkr,
+ [GCC_USB4_1_SYS_CLK] = &gcc_usb4_1_sys_clk.clkr,
+ [GCC_USB4_1_TMU_CLK] = &gcc_usb4_1_tmu_clk.clkr,
+ [GCC_USB4_1_TMU_CLK_SRC] = &gcc_usb4_1_tmu_clk_src.clkr,
+ [GCC_USB4_1_UC_HRR_CLK] = &gcc_usb4_1_uc_hrr_clk.clkr,
+ [GCC_USB4_2_CFG_AHB_CLK] = &gcc_usb4_2_cfg_ahb_clk.clkr,
+ [GCC_USB4_2_DP0_CLK] = &gcc_usb4_2_dp0_clk.clkr,
+ [GCC_USB4_2_DP1_CLK] = &gcc_usb4_2_dp1_clk.clkr,
+ [GCC_USB4_2_MASTER_CLK] = &gcc_usb4_2_master_clk.clkr,
+ [GCC_USB4_2_MASTER_CLK_SRC] = &gcc_usb4_2_master_clk_src.clkr,
+ [GCC_USB4_2_PHY_DP0_CLK_SRC] = &gcc_usb4_2_phy_dp0_clk_src.clkr,
+ [GCC_USB4_2_PHY_DP1_CLK_SRC] = &gcc_usb4_2_phy_dp1_clk_src.clkr,
+ [GCC_USB4_2_PHY_P2RR2P_PIPE_CLK] = &gcc_usb4_2_phy_p2rr2p_pipe_clk.clkr,
+ [GCC_USB4_2_PHY_P2RR2P_PIPE_CLK_SRC] = &gcc_usb4_2_phy_p2rr2p_pipe_clk_src.clkr,
+ [GCC_USB4_2_PHY_PCIE_PIPE_CLK] = &gcc_usb4_2_phy_pcie_pipe_clk.clkr,
+ [GCC_USB4_2_PHY_PCIE_PIPE_CLK_SRC] = &gcc_usb4_2_phy_pcie_pipe_clk_src.clkr,
+ [GCC_USB4_2_PHY_PCIE_PIPE_MUX_CLK_SRC] = &gcc_usb4_2_phy_pcie_pipe_mux_clk_src.clkr,
+ [GCC_USB4_2_PHY_RX0_CLK] = &gcc_usb4_2_phy_rx0_clk.clkr,
+ [GCC_USB4_2_PHY_RX0_CLK_SRC] = &gcc_usb4_2_phy_rx0_clk_src.clkr,
+ [GCC_USB4_2_PHY_RX1_CLK] = &gcc_usb4_2_phy_rx1_clk.clkr,
+ [GCC_USB4_2_PHY_RX1_CLK_SRC] = &gcc_usb4_2_phy_rx1_clk_src.clkr,
+ [GCC_USB4_2_PHY_SYS_CLK_SRC] = &gcc_usb4_2_phy_sys_clk_src.clkr,
+ [GCC_USB4_2_PHY_USB_PIPE_CLK] = &gcc_usb4_2_phy_usb_pipe_clk.clkr,
+ [GCC_USB4_2_SB_IF_CLK] = &gcc_usb4_2_sb_if_clk.clkr,
+ [GCC_USB4_2_SB_IF_CLK_SRC] = &gcc_usb4_2_sb_if_clk_src.clkr,
+ [GCC_USB4_2_SYS_CLK] = &gcc_usb4_2_sys_clk.clkr,
+ [GCC_USB4_2_TMU_CLK] = &gcc_usb4_2_tmu_clk.clkr,
+ [GCC_USB4_2_TMU_CLK_SRC] = &gcc_usb4_2_tmu_clk_src.clkr,
+ [GCC_USB4_2_UC_HRR_CLK] = &gcc_usb4_2_uc_hrr_clk.clkr,
+ [GCC_VIDEO_AXI0_CLK] = &gcc_video_axi0_clk.clkr,
+ [GCC_VIDEO_AXI0C_CLK] = &gcc_video_axi0c_clk.clkr,
+ [GCC_VIDEO_AXI1_CLK] = &gcc_video_axi1_clk.clkr,
+};
+
+static struct gdsc *gcc_glymur_gdscs[] = {
+ [GCC_PCIE_0_TUNNEL_GDSC] = &gcc_pcie_0_tunnel_gdsc,
+ [GCC_PCIE_1_TUNNEL_GDSC] = &gcc_pcie_1_tunnel_gdsc,
+ [GCC_PCIE_2_TUNNEL_GDSC] = &gcc_pcie_2_tunnel_gdsc,
+ [GCC_PCIE_3A_GDSC] = &gcc_pcie_3a_gdsc,
+ [GCC_PCIE_3A_PHY_GDSC] = &gcc_pcie_3a_phy_gdsc,
+ [GCC_PCIE_3B_GDSC] = &gcc_pcie_3b_gdsc,
+ [GCC_PCIE_3B_PHY_GDSC] = &gcc_pcie_3b_phy_gdsc,
+ [GCC_PCIE_4_GDSC] = &gcc_pcie_4_gdsc,
+ [GCC_PCIE_4_PHY_GDSC] = &gcc_pcie_4_phy_gdsc,
+ [GCC_PCIE_5_GDSC] = &gcc_pcie_5_gdsc,
+ [GCC_PCIE_5_PHY_GDSC] = &gcc_pcie_5_phy_gdsc,
+ [GCC_PCIE_6_GDSC] = &gcc_pcie_6_gdsc,
+ [GCC_PCIE_6_PHY_GDSC] = &gcc_pcie_6_phy_gdsc,
+ [GCC_UFS_PHY_GDSC] = &gcc_ufs_phy_gdsc,
+ [GCC_USB20_PRIM_GDSC] = &gcc_usb20_prim_gdsc,
+ [GCC_USB30_MP_GDSC] = &gcc_usb30_mp_gdsc,
+ [GCC_USB30_PRIM_GDSC] = &gcc_usb30_prim_gdsc,
+ [GCC_USB30_SEC_GDSC] = &gcc_usb30_sec_gdsc,
+ [GCC_USB30_TERT_GDSC] = &gcc_usb30_tert_gdsc,
+ [GCC_USB3_MP_SS0_PHY_GDSC] = &gcc_usb3_mp_ss0_phy_gdsc,
+ [GCC_USB3_MP_SS1_PHY_GDSC] = &gcc_usb3_mp_ss1_phy_gdsc,
+ [GCC_USB4_0_GDSC] = &gcc_usb4_0_gdsc,
+ [GCC_USB4_1_GDSC] = &gcc_usb4_1_gdsc,
+ [GCC_USB4_2_GDSC] = &gcc_usb4_2_gdsc,
+ [GCC_USB_0_PHY_GDSC] = &gcc_usb_0_phy_gdsc,
+ [GCC_USB_1_PHY_GDSC] = &gcc_usb_1_phy_gdsc,
+ [GCC_USB_2_PHY_GDSC] = &gcc_usb_2_phy_gdsc,
+};
+
+static const struct qcom_reset_map gcc_glymur_resets[] = {
+ [GCC_AV1E_BCR] = { 0x9b028 },
+ [GCC_CAMERA_BCR] = { 0x26000 },
+ [GCC_DISPLAY_BCR] = { 0x27000 },
+ [GCC_EVA_BCR] = { 0x9b000 },
+ [GCC_GPU_BCR] = { 0x71000 },
+ [GCC_PCIE_0_LINK_DOWN_BCR] = { 0xbc2d0 },
+ [GCC_PCIE_0_NOCSR_COM_PHY_BCR] = { 0xbc2dc },
+ [GCC_PCIE_0_PHY_BCR] = { 0xbc2d8 },
+ [GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR] = { 0xbc2e0 },
+ [GCC_PCIE_0_TUNNEL_BCR] = { 0xc8000 },
+ [GCC_PCIE_1_LINK_DOWN_BCR] = { 0x7f018 },
+ [GCC_PCIE_1_NOCSR_COM_PHY_BCR] = { 0x7f024 },
+ [GCC_PCIE_1_PHY_BCR] = { 0x7f020 },
+ [GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR] = { 0x7f028 },
+ [GCC_PCIE_1_TUNNEL_BCR] = { 0x2e000 },
+ [GCC_PCIE_2_LINK_DOWN_BCR] = { 0x281d0 },
+ [GCC_PCIE_2_NOCSR_COM_PHY_BCR] = { 0x281dc },
+ [GCC_PCIE_2_PHY_BCR] = { 0x281d8 },
+ [GCC_PCIE_2_PHY_NOCSR_COM_PHY_BCR] = { 0x281e0 },
+ [GCC_PCIE_2_TUNNEL_BCR] = { 0xc0000 },
+ [GCC_PCIE_3A_BCR] = { 0xdc000 },
+ [GCC_PCIE_3A_LINK_DOWN_BCR] = { 0x7b0a0 },
+ [GCC_PCIE_3A_NOCSR_COM_PHY_BCR] = { 0x7b0ac },
+ [GCC_PCIE_3A_PHY_BCR] = { 0x6c000 },
+ [GCC_PCIE_3A_PHY_NOCSR_COM_PHY_BCR] = { 0x7b0b0 },
+ [GCC_PCIE_3B_BCR] = { 0x94000 },
+ [GCC_PCIE_3B_LINK_DOWN_BCR] = { 0x7a0c0 },
+ [GCC_PCIE_3B_NOCSR_COM_PHY_BCR] = { 0x7a0cc },
+ [GCC_PCIE_3B_PHY_BCR] = { 0x75000 },
+ [GCC_PCIE_3B_PHY_NOCSR_COM_PHY_BCR] = { 0x7a0c8 },
+ [GCC_PCIE_4_BCR] = { 0x88000 },
+ [GCC_PCIE_4_LINK_DOWN_BCR] = { 0x980c0 },
+ [GCC_PCIE_4_NOCSR_COM_PHY_BCR] = { 0x980cc },
+ [GCC_PCIE_4_PHY_BCR] = { 0xd3000 },
+ [GCC_PCIE_4_PHY_NOCSR_COM_PHY_BCR] = { 0x980d0 },
+ [GCC_PCIE_5_BCR] = { 0xc3000 },
+ [GCC_PCIE_5_LINK_DOWN_BCR] = { 0x850c0 },
+ [GCC_PCIE_5_NOCSR_COM_PHY_BCR] = { 0x850cc },
+ [GCC_PCIE_5_PHY_BCR] = { 0xd2000 },
+ [GCC_PCIE_5_PHY_NOCSR_COM_PHY_BCR] = { 0x850d0 },
+ [GCC_PCIE_6_BCR] = { 0x8a000 },
+ [GCC_PCIE_6_LINK_DOWN_BCR] = { 0x3a0b0 },
+ [GCC_PCIE_6_NOCSR_COM_PHY_BCR] = { 0x3a0bc },
+ [GCC_PCIE_6_PHY_BCR] = { 0xd4000 },
+ [GCC_PCIE_6_PHY_NOCSR_COM_PHY_BCR] = { 0x3a0c0 },
+ [GCC_PCIE_NOC_BCR] = { 0xba294 },
+ [GCC_PCIE_PHY_BCR] = { 0x6f000 },
+ [GCC_PCIE_PHY_CFG_AHB_BCR] = { 0x7f00c },
+ [GCC_PCIE_PHY_COM_BCR] = { 0x7f010 },
+ [GCC_PCIE_RSCC_BCR] = { 0xb8000 },
+ [GCC_PDM_BCR] = { 0x33000 },
+ [GCC_QUPV3_WRAPPER_0_BCR] = { 0x28000 },
+ [GCC_QUPV3_WRAPPER_1_BCR] = { 0xb3000 },
+ [GCC_QUPV3_WRAPPER_2_BCR] = { 0xb4000 },
+ [GCC_QUPV3_WRAPPER_OOB_BCR] = { 0xe7000 },
+ [GCC_QUSB2PHY_HS0_MP_BCR] = { 0xca000 },
+ [GCC_QUSB2PHY_HS1_MP_BCR] = { 0xe6000 },
+ [GCC_QUSB2PHY_PRIM_BCR] = { 0xad024 },
+ [GCC_QUSB2PHY_SEC_BCR] = { 0xae000 },
+ [GCC_QUSB2PHY_TERT_BCR] = { 0xc9000 },
+ [GCC_QUSB2PHY_USB20_HS_BCR] = { 0xe9000 },
+ [GCC_SDCC2_BCR] = { 0xb0000 },
+ [GCC_SDCC4_BCR] = { 0xdf000 },
+ [GCC_TCSR_PCIE_BCR] = { 0x281e4 },
+ [GCC_UFS_PHY_BCR] = { 0x77004 },
+ [GCC_USB20_PRIM_BCR] = { 0xbc000 },
+ [GCC_USB30_MP_BCR] = { 0x9a00c },
+ [GCC_USB30_PRIM_BCR] = { 0x3f018 },
+ [GCC_USB30_SEC_BCR] = { 0xe200c },
+ [GCC_USB30_TERT_BCR] = { 0xe100c },
+ [GCC_USB3_MP_SS0_PHY_BCR] = { 0x54008 },
+ [GCC_USB3_MP_SS1_PHY_BCR] = { 0x54028 },
+ [GCC_USB3_PHY_PRIM_BCR] = { 0xdb000 },
+ [GCC_USB3_PHY_SEC_BCR] = { 0x2c000 },
+ [GCC_USB3_PHY_TERT_BCR] = { 0xbe000 },
+ [GCC_USB3_UNIPHY_MP0_BCR] = { 0x54000 },
+ [GCC_USB3_UNIPHY_MP1_BCR] = { 0x54020 },
+ [GCC_USB3PHY_PHY_PRIM_BCR] = { 0xdb004 },
+ [GCC_USB3PHY_PHY_SEC_BCR] = { 0x2c004 },
+ [GCC_USB3PHY_PHY_TERT_BCR] = { 0xbe004 },
+ [GCC_USB3UNIPHY_PHY_MP0_BCR] = { 0x54004 },
+ [GCC_USB3UNIPHY_PHY_MP1_BCR] = { 0x54024 },
+ [GCC_USB4_0_BCR] = { 0x2b004 },
+ [GCC_USB4_0_DP0_PHY_PRIM_BCR] = { 0xdb010 },
+ [GCC_USB4_1_BCR] = { 0x2d004 },
+ [GCC_USB4_2_BCR] = { 0xe0004 },
+ [GCC_USB_0_PHY_BCR] = { 0xdb020 },
+ [GCC_USB_1_PHY_BCR] = { 0x2c020 },
+ [GCC_USB_2_PHY_BCR] = { 0xbe020 },
+ [GCC_VIDEO_AXI0_CLK_ARES] = { 0x3201c, 2 },
+ [GCC_VIDEO_AXI1_CLK_ARES] = { 0x32044, 2 },
+ [GCC_VIDEO_BCR] = { 0x32000 },
+};
+
+static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = {
+ DEFINE_RCG_DFS(gcc_qupv3_oob_qspi_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_oob_qspi_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_qspi_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_qspi_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_qspi_s6_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s7_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_qspi_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_qspi_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_qspi_s6_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s7_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_qspi_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_qspi_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_qspi_s6_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s7_clk_src),
+};
+
+static u32 gcc_glymur_critical_cbcrs[] = {
+ 0x26004, /* GCC_CAMERA_AHB_CLK */
+ 0x26040, /* GCC_CAMERA_XO_CLK */
+ 0x27004, /* GCC_DISP_AHB_CLK */
+ 0x71004, /* GCC_GPU_CFG_AHB_CLK */
+ 0x32004, /* GCC_VIDEO_AHB_CLK */
+ 0x32058, /* GCC_VIDEO_XO_CLK */
+};
+
+static const struct regmap_config gcc_glymur_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x1f8ff0,
+ .fast_io = true,
+};
+
+static void clk_glymur_regs_configure(struct device *dev, struct regmap *regmap)
+{
+ /* FORCE_MEM_CORE_ON for ufs phy ice core clocks */
+ qcom_branch_set_force_mem_core(regmap, gcc_ufs_phy_ice_core_clk, true);
+}
+
+static struct qcom_cc_driver_data gcc_glymur_driver_data = {
+ .clk_cbcrs = gcc_glymur_critical_cbcrs,
+ .num_clk_cbcrs = ARRAY_SIZE(gcc_glymur_critical_cbcrs),
+ .dfs_rcgs = gcc_dfs_clocks,
+ .num_dfs_rcgs = ARRAY_SIZE(gcc_dfs_clocks),
+ .clk_regs_configure = clk_glymur_regs_configure,
+};
+
+static const struct qcom_cc_desc gcc_glymur_desc = {
+ .config = &gcc_glymur_regmap_config,
+ .clks = gcc_glymur_clocks,
+ .num_clks = ARRAY_SIZE(gcc_glymur_clocks),
+ .resets = gcc_glymur_resets,
+ .num_resets = ARRAY_SIZE(gcc_glymur_resets),
+ .gdscs = gcc_glymur_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_glymur_gdscs),
+ .driver_data = &gcc_glymur_driver_data,
+};
+
+static const struct of_device_id gcc_glymur_match_table[] = {
+ { .compatible = "qcom,glymur-gcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_glymur_match_table);
+
+static int gcc_glymur_probe(struct platform_device *pdev)
+{
+ return qcom_cc_probe(pdev, &gcc_glymur_desc);
+}
+
+static struct platform_driver gcc_glymur_driver = {
+ .probe = gcc_glymur_probe,
+ .driver = {
+ .name = "gcc-glymur",
+ .of_match_table = gcc_glymur_match_table,
+ },
+};
+
+static int __init gcc_glymur_init(void)
+{
+ return platform_driver_register(&gcc_glymur_driver);
+}
+subsys_initcall(gcc_glymur_init);
+
+static void __exit gcc_glymur_exit(void)
+{
+ platform_driver_unregister(&gcc_glymur_driver);
+}
+module_exit(gcc_glymur_exit);
+
+MODULE_DESCRIPTION("QTI GCC GLYMUR Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/gcc-ipq4019.c b/drivers/clk/qcom/gcc-ipq4019.c
index d38628b52268..5ac44cfb53ce 100644
--- a/drivers/clk/qcom/gcc-ipq4019.c
+++ b/drivers/clk/qcom/gcc-ipq4019.c
@@ -125,21 +125,23 @@ static const struct clk_fepll_vco gcc_fepll_vco = {
* It looks up the frequency table and returns the next higher frequency
* supported in hardware.
*/
-static long clk_cpu_div_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *p_rate)
+static int clk_cpu_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_fepll *pll = to_clk_fepll(hw);
struct clk_hw *p_hw;
const struct freq_tbl *f;
- f = qcom_find_freq(pll->freq_tbl, rate);
+ f = qcom_find_freq(pll->freq_tbl, req->rate);
if (!f)
return -EINVAL;
p_hw = clk_hw_get_parent_by_index(hw, f->src);
- *p_rate = clk_hw_get_rate(p_hw);
+ req->best_parent_rate = clk_hw_get_rate(p_hw);
+
+ req->rate = f->freq;
- return f->freq;
+ return 0;
};
/*
@@ -205,7 +207,7 @@ clk_cpu_div_recalc_rate(struct clk_hw *hw,
};
static const struct clk_ops clk_regmap_cpu_div_ops = {
- .round_rate = clk_cpu_div_round_rate,
+ .determine_rate = clk_cpu_div_determine_rate,
.set_rate = clk_cpu_div_set_rate,
.recalc_rate = clk_cpu_div_recalc_rate,
};
diff --git a/drivers/clk/qcom/gcc-ipq5018.c b/drivers/clk/qcom/gcc-ipq5018.c
index 70f5dcb96700..dcda2be8c1a5 100644
--- a/drivers/clk/qcom/gcc-ipq5018.c
+++ b/drivers/clk/qcom/gcc-ipq5018.c
@@ -1371,7 +1371,7 @@ static struct clk_branch gcc_xo_clk = {
&gcc_xo_clk_src.clkr.hw,
},
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
},
},
@@ -3660,7 +3660,7 @@ static const struct qcom_reset_map gcc_ipq5018_resets[] = {
[GCC_WCSS_AXI_S_ARES] = { 0x59008, 6 },
[GCC_WCSS_Q6_BCR] = { 0x18004, 0 },
[GCC_WCSSAON_RESET] = { 0x59010, 0},
- [GCC_GEPHY_MISC_ARES] = { 0x56004, 0 },
+ [GCC_GEPHY_MISC_ARES] = { 0x56004, .bitmask = GENMASK(3, 0) },
};
static const struct of_device_id gcc_ipq5018_match_table[] = {
diff --git a/drivers/clk/qcom/gcc-ipq5332.c b/drivers/clk/qcom/gcc-ipq5332.c
index 9536b2b7d07c..9246e97d785a 100644
--- a/drivers/clk/qcom/gcc-ipq5332.c
+++ b/drivers/clk/qcom/gcc-ipq5332.c
@@ -2185,150 +2185,6 @@ static struct clk_branch gcc_prng_ahb_clk = {
},
};
-static struct clk_branch gcc_q6_ahb_clk = {
- .halt_reg = 0x25014,
- .halt_check = BRANCH_HALT_VOTED,
- .clkr = {
- .enable_reg = 0x25014,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_q6_ahb_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &gcc_wcss_ahb_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_q6_ahb_s_clk = {
- .halt_reg = 0x25018,
- .halt_check = BRANCH_HALT_VOTED,
- .clkr = {
- .enable_reg = 0x25018,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_q6_ahb_s_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &gcc_wcss_ahb_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_q6_axim_clk = {
- .halt_reg = 0x2500c,
- .halt_check = BRANCH_HALT_VOTED,
- .clkr = {
- .enable_reg = 0x2500c,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_q6_axim_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &gcc_q6_axim_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_q6_axis_clk = {
- .halt_reg = 0x25010,
- .halt_check = BRANCH_HALT_VOTED,
- .clkr = {
- .enable_reg = 0x25010,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_q6_axis_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &gcc_system_noc_bfdcd_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_q6_tsctr_1to2_clk = {
- .halt_reg = 0x25020,
- .halt_check = BRANCH_HALT_VOTED,
- .clkr = {
- .enable_reg = 0x25020,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_q6_tsctr_1to2_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &gcc_qdss_tsctr_div2_clk_src.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_q6ss_atbm_clk = {
- .halt_reg = 0x2501c,
- .halt_check = BRANCH_HALT_VOTED,
- .clkr = {
- .enable_reg = 0x2501c,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_q6ss_atbm_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &gcc_qdss_at_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_q6ss_pclkdbg_clk = {
- .halt_reg = 0x25024,
- .halt_check = BRANCH_HALT_VOTED,
- .clkr = {
- .enable_reg = 0x25024,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_q6ss_pclkdbg_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &gcc_qdss_dap_div_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_q6ss_trig_clk = {
- .halt_reg = 0x250a0,
- .halt_check = BRANCH_HALT_VOTED,
- .clkr = {
- .enable_reg = 0x250a0,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_q6ss_trig_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &gcc_qdss_dap_div_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch gcc_qdss_at_clk = {
.halt_reg = 0x2d038,
.halt_check = BRANCH_HALT_VOTED,
@@ -2756,24 +2612,6 @@ static struct clk_branch gcc_sys_noc_at_clk = {
},
};
-static struct clk_branch gcc_sys_noc_wcss_ahb_clk = {
- .halt_reg = 0x2e030,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x2e030,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_sys_noc_wcss_ahb_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &gcc_wcss_ahb_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch gcc_uniphy0_ahb_clk = {
.halt_reg = 0x16010,
.halt_check = BRANCH_HALT,
@@ -2989,204 +2827,6 @@ static struct clk_branch gcc_usb0_sleep_clk = {
},
};
-static struct clk_branch gcc_wcss_axim_clk = {
- .halt_reg = 0x2505c,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x2505c,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_wcss_axim_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &gcc_system_noc_bfdcd_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_wcss_axis_clk = {
- .halt_reg = 0x25060,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x25060,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_wcss_axis_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &gcc_system_noc_bfdcd_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_wcss_dbg_ifc_apb_bdg_clk = {
- .halt_reg = 0x25048,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x25048,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_wcss_dbg_ifc_apb_bdg_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &gcc_qdss_dap_div_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_wcss_dbg_ifc_apb_clk = {
- .halt_reg = 0x25038,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x25038,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_wcss_dbg_ifc_apb_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &gcc_qdss_dap_div_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_wcss_dbg_ifc_atb_bdg_clk = {
- .halt_reg = 0x2504c,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x2504c,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_wcss_dbg_ifc_atb_bdg_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &gcc_qdss_at_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_wcss_dbg_ifc_atb_clk = {
- .halt_reg = 0x2503c,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x2503c,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_wcss_dbg_ifc_atb_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &gcc_qdss_at_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_wcss_dbg_ifc_nts_bdg_clk = {
- .halt_reg = 0x25050,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x25050,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_wcss_dbg_ifc_nts_bdg_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &gcc_qdss_tsctr_div2_clk_src.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_wcss_dbg_ifc_nts_clk = {
- .halt_reg = 0x25040,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x25040,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_wcss_dbg_ifc_nts_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &gcc_qdss_tsctr_div2_clk_src.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_wcss_ecahb_clk = {
- .halt_reg = 0x25058,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x25058,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_wcss_ecahb_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &gcc_wcss_ahb_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_wcss_mst_async_bdg_clk = {
- .halt_reg = 0x2e0b0,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x2e0b0,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_wcss_mst_async_bdg_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &gcc_system_noc_bfdcd_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_wcss_slv_async_bdg_clk = {
- .halt_reg = 0x2e0b4,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x2e0b4,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_wcss_slv_async_bdg_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &gcc_system_noc_bfdcd_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch gcc_xo_clk = {
.halt_reg = 0x34018,
.halt_check = BRANCH_HALT,
@@ -3362,15 +3002,7 @@ static struct clk_regmap *gcc_ipq5332_clocks[] = {
[GCC_PCNOC_BFDCD_CLK_SRC] = &gcc_pcnoc_bfdcd_clk_src.clkr,
[GCC_PCNOC_LPASS_CLK] = &gcc_pcnoc_lpass_clk.clkr,
[GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
- [GCC_Q6_AHB_CLK] = &gcc_q6_ahb_clk.clkr,
- [GCC_Q6_AHB_S_CLK] = &gcc_q6_ahb_s_clk.clkr,
- [GCC_Q6_AXIM_CLK] = &gcc_q6_axim_clk.clkr,
[GCC_Q6_AXIM_CLK_SRC] = &gcc_q6_axim_clk_src.clkr,
- [GCC_Q6_AXIS_CLK] = &gcc_q6_axis_clk.clkr,
- [GCC_Q6_TSCTR_1TO2_CLK] = &gcc_q6_tsctr_1to2_clk.clkr,
- [GCC_Q6SS_ATBM_CLK] = &gcc_q6ss_atbm_clk.clkr,
- [GCC_Q6SS_PCLKDBG_CLK] = &gcc_q6ss_pclkdbg_clk.clkr,
- [GCC_Q6SS_TRIG_CLK] = &gcc_q6ss_trig_clk.clkr,
[GCC_QDSS_AT_CLK] = &gcc_qdss_at_clk.clkr,
[GCC_QDSS_AT_CLK_SRC] = &gcc_qdss_at_clk_src.clkr,
[GCC_QDSS_CFG_AHB_CLK] = &gcc_qdss_cfg_ahb_clk.clkr,
@@ -3400,7 +3032,6 @@ static struct clk_regmap *gcc_ipq5332_clocks[] = {
[GCC_SNOC_PCIE3_2LANE_S_CLK] = &gcc_snoc_pcie3_2lane_s_clk.clkr,
[GCC_SNOC_USB_CLK] = &gcc_snoc_usb_clk.clkr,
[GCC_SYS_NOC_AT_CLK] = &gcc_sys_noc_at_clk.clkr,
- [GCC_SYS_NOC_WCSS_AHB_CLK] = &gcc_sys_noc_wcss_ahb_clk.clkr,
[GCC_SYSTEM_NOC_BFDCD_CLK_SRC] = &gcc_system_noc_bfdcd_clk_src.clkr,
[GCC_UNIPHY0_AHB_CLK] = &gcc_uniphy0_ahb_clk.clkr,
[GCC_UNIPHY0_SYS_CLK] = &gcc_uniphy0_sys_clk.clkr,
@@ -3421,17 +3052,6 @@ static struct clk_regmap *gcc_ipq5332_clocks[] = {
[GCC_USB0_PIPE_CLK] = &gcc_usb0_pipe_clk.clkr,
[GCC_USB0_SLEEP_CLK] = &gcc_usb0_sleep_clk.clkr,
[GCC_WCSS_AHB_CLK_SRC] = &gcc_wcss_ahb_clk_src.clkr,
- [GCC_WCSS_AXIM_CLK] = &gcc_wcss_axim_clk.clkr,
- [GCC_WCSS_AXIS_CLK] = &gcc_wcss_axis_clk.clkr,
- [GCC_WCSS_DBG_IFC_APB_BDG_CLK] = &gcc_wcss_dbg_ifc_apb_bdg_clk.clkr,
- [GCC_WCSS_DBG_IFC_APB_CLK] = &gcc_wcss_dbg_ifc_apb_clk.clkr,
- [GCC_WCSS_DBG_IFC_ATB_BDG_CLK] = &gcc_wcss_dbg_ifc_atb_bdg_clk.clkr,
- [GCC_WCSS_DBG_IFC_ATB_CLK] = &gcc_wcss_dbg_ifc_atb_clk.clkr,
- [GCC_WCSS_DBG_IFC_NTS_BDG_CLK] = &gcc_wcss_dbg_ifc_nts_bdg_clk.clkr,
- [GCC_WCSS_DBG_IFC_NTS_CLK] = &gcc_wcss_dbg_ifc_nts_clk.clkr,
- [GCC_WCSS_ECAHB_CLK] = &gcc_wcss_ecahb_clk.clkr,
- [GCC_WCSS_MST_ASYNC_BDG_CLK] = &gcc_wcss_mst_async_bdg_clk.clkr,
- [GCC_WCSS_SLV_ASYNC_BDG_CLK] = &gcc_wcss_slv_async_bdg_clk.clkr,
[GCC_XO_CLK] = &gcc_xo_clk.clkr,
[GCC_XO_CLK_SRC] = &gcc_xo_clk_src.clkr,
[GCC_XO_DIV4_CLK] = &gcc_xo_div4_clk.clkr,
@@ -3622,7 +3242,7 @@ static const struct qcom_reset_map gcc_ipq5332_resets[] = {
#define IPQ_APPS_ID 5332 /* some unique value */
-static struct qcom_icc_hws_data icc_ipq5332_hws[] = {
+static const struct qcom_icc_hws_data icc_ipq5332_hws[] = {
{ MASTER_SNOC_PCIE3_1_M, SLAVE_SNOC_PCIE3_1_M, GCC_SNOC_PCIE3_1LANE_M_CLK },
{ MASTER_ANOC_PCIE3_1_S, SLAVE_ANOC_PCIE3_1_S, GCC_SNOC_PCIE3_1LANE_S_CLK },
{ MASTER_SNOC_PCIE3_2_M, SLAVE_SNOC_PCIE3_2_M, GCC_SNOC_PCIE3_2LANE_M_CLK },
diff --git a/drivers/clk/qcom/gcc-ipq5424.c b/drivers/clk/qcom/gcc-ipq5424.c
new file mode 100644
index 000000000000..3d42f3d85c7a
--- /dev/null
+++ b/drivers/clk/qcom/gcc-ipq5424.c
@@ -0,0 +1,3316 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018,2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/interconnect-provider.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,ipq5424-gcc.h>
+#include <dt-bindings/interconnect/qcom,ipq5424.h>
+#include <dt-bindings/reset/qcom,ipq5424-gcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "clk-regmap-phy-mux.h"
+#include "common.h"
+#include "reset.h"
+
+enum {
+ DT_XO,
+ DT_SLEEP_CLK,
+ DT_PCIE30_PHY0_PIPE_CLK,
+ DT_PCIE30_PHY1_PIPE_CLK,
+ DT_PCIE30_PHY2_PIPE_CLK,
+ DT_PCIE30_PHY3_PIPE_CLK,
+ DT_USB_PCIE_WRAPPER_PIPE_CLK,
+};
+
+enum {
+ P_GCC_GPLL0_OUT_MAIN_DIV_CLK_SRC,
+ P_GPLL0_OUT_AUX,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL2_OUT_AUX,
+ P_GPLL2_OUT_MAIN,
+ P_GPLL4_OUT_AUX,
+ P_GPLL4_OUT_MAIN,
+ P_SLEEP_CLK,
+ P_XO,
+ P_USB3PHY_0_PIPE,
+};
+
+static const struct clk_parent_data gcc_parent_data_xo = { .index = DT_XO };
+
+static struct clk_alpha_pll gpll0 = {
+ .offset = 0x20000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
+ .clkr = {
+ .enable_reg = 0xb000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpll0",
+ .parent_data = &gcc_parent_data_xo,
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static struct clk_fixed_factor gpll0_div2 = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpll0_div2",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gpll0.clkr.hw
+ },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll2 = {
+ .offset = 0x21000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_NSS_HUAYRA],
+ .clkr = {
+ .enable_reg = 0xb000,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpll2",
+ .parent_data = &gcc_parent_data_xo,
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gpll2_out_main[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll2_out_main = {
+ .offset = 0x21000,
+ .post_div_table = post_div_table_gpll2_out_main,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpll2_out_main),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_NSS_HUAYRA],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpll2_out_main",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll2.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll4 = {
+ .offset = 0x22000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
+ .clkr = {
+ .enable_reg = 0xb000,
+ .enable_mask = BIT(2),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpll4",
+ .parent_data = &gcc_parent_data_xo,
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ /*
+ * There are no consumers for this GPLL in kernel yet,
+ * (will be added soon), so the clock framework
+ * disables this source. But some of the clocks
+ * initialized by boot loaders uses this source. So we
+ * need to keep this clock ON. Add the
+ * CLK_IGNORE_UNUSED flag so the clock will not be
+ * disabled. Once the consumer in kernel is added, we
+ * can get rid of this flag.
+ */
+ .flags = CLK_IGNORE_UNUSED,
+ },
+ },
+};
+
+static const struct parent_map gcc_parent_map_xo[] = {
+ { P_XO, 0 },
+};
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL0_OUT_MAIN_DIV_CLK_SRC, 4 },
+};
+
+static const struct clk_parent_data gcc_parent_data_0[] = {
+ { .index = DT_XO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_div2.hw },
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+};
+
+static const struct clk_parent_data gcc_parent_data_1[] = {
+ { .index = DT_XO },
+ { .hw = &gpll0.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL4_OUT_MAIN, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_2[] = {
+ { .index = DT_XO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll4.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_XO, 0 },
+ { P_GPLL4_OUT_MAIN, 1 },
+ { P_GPLL0_OUT_AUX, 2 },
+ { P_GCC_GPLL0_OUT_MAIN_DIV_CLK_SRC, 4 },
+};
+
+static const struct clk_parent_data gcc_parent_data_3[] = {
+ { .index = DT_XO },
+ { .hw = &gpll4.clkr.hw },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_div2.hw },
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_XO, 0 },
+};
+
+static const struct clk_parent_data gcc_parent_data_4[] = {
+ { .index = DT_XO },
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+ { P_XO, 0 },
+ { P_GPLL4_OUT_AUX, 1 },
+ { P_GPLL0_OUT_MAIN, 3 },
+ { P_GCC_GPLL0_OUT_MAIN_DIV_CLK_SRC, 4 },
+};
+
+static const struct clk_parent_data gcc_parent_data_5[] = {
+ { .index = DT_XO },
+ { .hw = &gpll4.clkr.hw },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_div2.hw },
+};
+
+static const struct parent_map gcc_parent_map_6[] = {
+ { P_SLEEP_CLK, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_6[] = {
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map gcc_parent_map_7[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL0_OUT_AUX, 2 },
+ { P_SLEEP_CLK, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_7[] = {
+ { .index = DT_XO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0.clkr.hw },
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map gcc_parent_map_8[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL2_OUT_MAIN, 2 },
+ { P_GCC_GPLL0_OUT_MAIN_DIV_CLK_SRC, 4 },
+};
+
+static const struct clk_parent_data gcc_parent_data_8[] = {
+ { .index = DT_XO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll2_out_main.clkr.hw },
+ { .hw = &gpll0_div2.hw },
+};
+
+static const struct parent_map gcc_parent_map_9[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL4_OUT_MAIN, 2 },
+ { P_GCC_GPLL0_OUT_MAIN_DIV_CLK_SRC, 4 },
+};
+
+static const struct clk_parent_data gcc_parent_data_9[] = {
+ { .index = DT_XO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll4.clkr.hw },
+ { .hw = &gpll0_div2.hw },
+};
+
+static const struct parent_map gcc_parent_map_10[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_AUX, 2 },
+ { P_SLEEP_CLK, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_10[] = {
+ { .index = DT_XO },
+ { .hw = &gpll0.clkr.hw },
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map gcc_parent_map_11[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL2_OUT_AUX, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_11[] = {
+ { .index = DT_XO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll2.clkr.hw },
+};
+
+static const struct freq_tbl ftbl_gcc_adss_pwm_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_adss_pwm_clk_src = {
+ .cmd_rcgr = 0x1c004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_adss_pwm_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_adss_pwm_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_nss_ts_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_xo_clk_src = {
+ .cmd_rcgr = 0x34004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_xo,
+ .freq_tbl = ftbl_gcc_nss_ts_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_xo_clk_src",
+ .parent_data = &gcc_parent_data_xo,
+ .num_parents = 1,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_xo_clk = {
+ .halt_reg = 0x34018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x34018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_xo_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_fixed_factor gcc_xo_div4_clk_src = {
+ .mult = 1,
+ .div = 4,
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_xo_div4_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gcc_xo_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_nss_ts_clk_src = {
+ .cmd_rcgr = 0x17088,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_nss_ts_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_nss_ts_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie0_axi_m_clk_src[] = {
+ F(240000000, P_GPLL4_OUT_MAIN, 5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie0_axi_m_clk_src = {
+ .cmd_rcgr = 0x28018,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie0_axi_m_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie0_axi_m_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie0_axi_s_clk_src = {
+ .cmd_rcgr = 0x28020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie0_axi_m_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie0_axi_s_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie1_axi_m_clk_src = {
+ .cmd_rcgr = 0x29018,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie0_axi_m_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie1_axi_m_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie1_axi_s_clk_src = {
+ .cmd_rcgr = 0x29020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie0_axi_m_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie1_axi_s_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie2_axi_m_clk_src[] = {
+ F(266666667, P_GPLL4_OUT_MAIN, 4.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie2_axi_m_clk_src = {
+ .cmd_rcgr = 0x2a018,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie2_axi_m_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie2_axi_m_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie2_axi_s_clk_src = {
+ .cmd_rcgr = 0x2a020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie0_axi_m_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie2_axi_s_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie3_axi_m_clk_src = {
+ .cmd_rcgr = 0x2b018,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie2_axi_m_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie3_axi_m_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie3_axi_s_clk_src = {
+ .cmd_rcgr = 0x2b020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie0_axi_m_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie3_axi_s_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_aux_clk_src[] = {
+ F(20000000, P_GPLL0_OUT_MAIN, 10, 1, 4),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_aux_clk_src = {
+ .cmd_rcgr = 0x28004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_7,
+ .freq_tbl = ftbl_gcc_pcie_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_aux_clk_src",
+ .parent_data = gcc_parent_data_7,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_7),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_i2c0_clk_src[] = {
+ F(4800000, P_XO, 5, 0, 0),
+ F(9600000, P_XO, 2.5, 0, 0),
+ F(24000000, P_XO, 1, 0, 0),
+ F(50000000, P_GPLL0_OUT_MAIN, 16, 0, 0),
+ F(64000000, P_GPLL0_OUT_MAIN, 12.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_qupv3_i2c0_clk_src = {
+ .cmd_rcgr = 0x2018,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_i2c0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_i2c1_clk_src = {
+ .cmd_rcgr = 0x3018,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_i2c0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_spi0_clk_src[] = {
+ F(960000, P_XO, 10, 2, 5),
+ F(4800000, P_XO, 5, 0, 0),
+ F(9600000, P_XO, 2, 4, 5),
+ F(16000000, P_GPLL0_OUT_MAIN, 10, 1, 5),
+ F(24000000, P_XO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_MAIN, 16, 1, 2),
+ F(32000000, P_GPLL0_OUT_MAIN, 10, 2, 5),
+ F(50000000, P_GPLL0_OUT_MAIN, 16, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_qupv3_spi0_clk_src = {
+ .cmd_rcgr = 0x4004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_spi0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_spi0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_spi1_clk_src = {
+ .cmd_rcgr = 0x5004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_spi0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_spi1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_uart0_clk_src[] = {
+ F(3686400, P_GCC_GPLL0_OUT_MAIN_DIV_CLK_SRC, 1, 144, 15625),
+ F(7372800, P_GCC_GPLL0_OUT_MAIN_DIV_CLK_SRC, 1, 288, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_MAIN_DIV_CLK_SRC, 1, 576, 15625),
+ F(24000000, P_XO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_MAIN, 16, 1, 2),
+ F(32000000, P_GPLL0_OUT_MAIN, 1, 1, 25),
+ F(40000000, P_GPLL0_OUT_MAIN, 1, 1, 20),
+ F(46400000, P_GPLL0_OUT_MAIN, 1, 29, 500),
+ F(48000000, P_GPLL0_OUT_MAIN, 1, 3, 50),
+ F(51200000, P_GPLL0_OUT_MAIN, 1, 8, 125),
+ F(56000000, P_GPLL0_OUT_MAIN, 1, 7, 100),
+ F(58982400, P_GPLL0_OUT_MAIN, 1, 1152, 15625),
+ F(60000000, P_GPLL0_OUT_MAIN, 1, 3, 40),
+ F(64000000, P_GPLL0_OUT_MAIN, 12.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_qupv3_uart0_clk_src = {
+ .cmd_rcgr = 0x202c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_uart0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_uart0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_uart1_clk_src = {
+ .cmd_rcgr = 0x302c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_uart0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_uart1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_apps_clk_src[] = {
+ F(144000, P_XO, 16, 12, 125),
+ F(400000, P_XO, 12, 1, 5),
+ F(24000000, P_GPLL2_OUT_MAIN, 12, 1, 2),
+ F(48000000, P_GPLL2_OUT_MAIN, 12, 0, 0),
+ F(96000000, P_GPLL2_OUT_MAIN, 6, 0, 0),
+ F(177777778, P_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ F(192000000, P_GPLL2_OUT_MAIN, 3, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc1_apps_clk_src = {
+ .cmd_rcgr = 0x33004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_gcc_sdcc1_apps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_apps_clk_src",
+ .parent_data = gcc_parent_data_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_8),
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_ice_core_clk_src[] = {
+ F(300000000, P_GPLL4_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc1_ice_core_clk_src = {
+ .cmd_rcgr = 0x33018,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_9,
+ .freq_tbl = ftbl_gcc_sdcc1_ice_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_ice_core_clk_src",
+ .parent_data = gcc_parent_data_9,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_9),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_uniphy_sys_clk_src = {
+ .cmd_rcgr = 0x17090,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_nss_ts_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_uniphy_sys_clk_src",
+ .parent_data = &gcc_parent_data_xo,
+ .num_parents = 1,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb0_aux_clk_src = {
+ .cmd_rcgr = 0x2c018,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_10,
+ .freq_tbl = ftbl_gcc_nss_ts_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb0_aux_clk_src",
+ .parent_data = gcc_parent_data_10,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_10),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb0_master_clk_src[] = {
+ F(200000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb0_master_clk_src = {
+ .cmd_rcgr = 0x2c004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb0_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb0_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb0_mock_utmi_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ F(60000000, P_GPLL4_OUT_AUX, 10, 1, 2),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb0_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x2c02c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_usb0_mock_utmi_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb0_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb1_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x3c004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_usb0_mock_utmi_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb1_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_wcss_ahb_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ F(133333333, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_wcss_ahb_clk_src = {
+ .cmd_rcgr = 0x25030,
+ .freq_tbl = ftbl_gcc_wcss_ahb_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_wcss_ahb_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qdss_at_clk_src[] = {
+ F(240000000, P_GPLL4_OUT_MAIN, 5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_qdss_at_clk_src = {
+ .cmd_rcgr = 0x2d004,
+ .freq_tbl = ftbl_gcc_qdss_at_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qdss_at_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qdss_tsctr_clk_src[] = {
+ F(600000000, P_GPLL4_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_qdss_tsctr_clk_src = {
+ .cmd_rcgr = 0x2d01c,
+ .freq_tbl = ftbl_gcc_qdss_tsctr_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qdss_tsctr_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_fixed_factor gcc_qdss_tsctr_div2_clk_src = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qdss_tsctr_div2_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gcc_qdss_tsctr_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_fixed_factor gcc_qdss_dap_sync_clk_src = {
+ .mult = 1,
+ .div = 4,
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qdss_dap_sync_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gcc_qdss_tsctr_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_system_noc_bfdcd_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ F(133333333, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(266666667, P_GPLL4_OUT_MAIN, 4.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_system_noc_bfdcd_clk_src = {
+ .cmd_rcgr = 0x2e004,
+ .freq_tbl = ftbl_gcc_system_noc_bfdcd_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_9,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_system_noc_bfdcd_clk_src",
+ .parent_data = gcc_parent_data_9,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_9),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcnoc_bfdcd_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ F(50000000, P_GPLL0_OUT_MAIN, 16, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcnoc_bfdcd_clk_src = {
+ .cmd_rcgr = 0x31004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcnoc_bfdcd_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcnoc_bfdcd_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_lpass_sway_clk_src[] = {
+ F(133333333, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_lpass_sway_clk_src = {
+ .cmd_rcgr = 0x27004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_lpass_sway_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_lpass_sway_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_lpass_axim_clk_src = {
+ .cmd_rcgr = 0x2700c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_lpass_sway_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_lpass_axim_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_fixed_factor gcc_eud_at_div_clk_src = {
+ .mult = 1,
+ .div = 6,
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_eud_at_div_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gcc_qdss_at_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sleep_clk_src[] = {
+ F(32000, P_SLEEP_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sleep_clk_src = {
+ .cmd_rcgr = 0x3400c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_sleep_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sleep_clk_src",
+ .parent_data = gcc_parent_data_6,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_6),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qpic_io_macro_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(320000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ F(400000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_qpic_io_macro_clk_src = {
+ .cmd_rcgr = 0x32004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_11,
+ .freq_tbl = ftbl_gcc_qpic_io_macro_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qpic_io_macro_clk_src",
+ .parent_data = gcc_parent_data_11,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_11),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qpic_clk_src = {
+ .cmd_rcgr = 0x32020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_11,
+ .freq_tbl = ftbl_gcc_qpic_io_macro_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qpic_clk_src",
+ .parent_data = gcc_parent_data_11,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_11),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie0_rchng_clk_src = {
+ .cmd_rcgr = 0x28028,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_adss_pwm_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie0_rchng_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie1_rchng_clk_src = {
+ .cmd_rcgr = 0x29028,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_adss_pwm_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie1_rchng_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie2_rchng_clk_src = {
+ .cmd_rcgr = 0x2a028,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_adss_pwm_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie2_rchng_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie3_rchng_clk_src = {
+ .cmd_rcgr = 0x2b028,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_adss_pwm_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie3_rchng_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_i2c0_div_clk_src = {
+ .reg = 0x2020,
+ .shift = 0,
+ .width = 2,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_i2c1_div_clk_src = {
+ .reg = 0x3020,
+ .shift = 0,
+ .width = 2,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c1_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb0_mock_utmi_div_clk_src = {
+ .reg = 0x2c040,
+ .shift = 0,
+ .width = 2,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb0_mock_utmi_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb0_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb1_mock_utmi_div_clk_src = {
+ .reg = 0x3c018,
+ .shift = 0,
+ .width = 2,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb1_mock_utmi_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb1_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch gcc_adss_pwm_clk = {
+ .halt_reg = 0x1c00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1c00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_adss_pwm_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_adss_pwm_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cnoc_pcie0_1lane_s_clk = {
+ .halt_reg = 0x31088,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x31088,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cnoc_pcie0_1lane_s_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie0_axi_s_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cnoc_pcie1_1lane_s_clk = {
+ .halt_reg = 0x3108c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3108c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cnoc_pcie1_1lane_s_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie1_axi_s_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cnoc_pcie2_2lane_s_clk = {
+ .halt_reg = 0x31090,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x31090,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cnoc_pcie2_2lane_s_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie2_axi_s_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cnoc_pcie3_2lane_s_clk = {
+ .halt_reg = 0x31094,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x31094,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cnoc_pcie3_2lane_s_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie3_axi_s_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cnoc_usb_clk = {
+ .halt_reg = 0x310a8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x310a8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cnoc_usb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb0_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdio_ahb_clk = {
+ .halt_reg = 0x17040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x17040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_mdio_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nss_ts_clk = {
+ .halt_reg = 0x17018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x17018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_nss_ts_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_nss_ts_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nsscc_clk = {
+ .halt_reg = 0x17034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x17034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_nsscc_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nsscfg_clk = {
+ .halt_reg = 0x1702c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1702c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_nsscfg_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nssnoc_atb_clk = {
+ .halt_reg = 0x17014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x17014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_nssnoc_atb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qdss_at_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nssnoc_nsscc_clk = {
+ .halt_reg = 0x17030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x17030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_nssnoc_nsscc_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nssnoc_pcnoc_1_clk = {
+ .halt_reg = 0x17080,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x17080,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_nssnoc_pcnoc_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nssnoc_qosgen_ref_clk = {
+ .halt_reg = 0x1701c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1701c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_nssnoc_qosgen_ref_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gcc_xo_div4_clk_src.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nssnoc_snoc_1_clk = {
+ .halt_reg = 0x1707c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1707c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_nssnoc_snoc_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_system_noc_bfdcd_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nssnoc_snoc_clk = {
+ .halt_reg = 0x17028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x17028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_nssnoc_snoc_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_system_noc_bfdcd_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nssnoc_timeout_ref_clk = {
+ .halt_reg = 0x17020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x17020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_nssnoc_timeout_ref_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_xo_div4_clk_src.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nssnoc_xo_dcd_clk = {
+ .halt_reg = 0x17074,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x17074,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_nssnoc_xo_dcd_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie0_ahb_clk = {
+ .halt_reg = 0x28030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie0_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie0_aux_clk = {
+ .halt_reg = 0x28070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28070,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie0_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie0_axi_m_clk = {
+ .halt_reg = 0x28038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie0_axi_m_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie0_axi_m_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_anoc_pcie0_1lane_m_clk = {
+ .halt_reg = 0x2e07c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2e07c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_anoc_pcie0_1lane_m_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie0_axi_m_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie0_axi_s_bridge_clk = {
+ .halt_reg = 0x28048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie0_axi_s_bridge_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie0_axi_s_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie0_axi_s_clk = {
+ .halt_reg = 0x28040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie0_axi_s_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie0_axi_s_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_pcie0_pipe_clk_src = {
+ .reg = 0x28064,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "pcie0_pipe_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_PCIE30_PHY0_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie0_pipe_clk = {
+ .halt_reg = 0x28068,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x28068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie0_pipe_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gcc_pcie0_pipe_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie1_ahb_clk = {
+ .halt_reg = 0x29030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x29030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie1_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie1_aux_clk = {
+ .halt_reg = 0x29074,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x29074,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie1_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie1_axi_m_clk = {
+ .halt_reg = 0x29038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x29038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie1_axi_m_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie1_axi_m_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_anoc_pcie1_1lane_m_clk = {
+ .halt_reg = 0x2e084,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2e084,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_anoc_pcie1_1lane_m_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie1_axi_m_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie1_axi_s_bridge_clk = {
+ .halt_reg = 0x29048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x29048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie1_axi_s_bridge_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie1_axi_s_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie1_axi_s_clk = {
+ .halt_reg = 0x29040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x29040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie1_axi_s_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie1_axi_s_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_pcie1_pipe_clk_src = {
+ .reg = 0x29064,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "pcie1_pipe_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_PCIE30_PHY1_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie1_pipe_clk = {
+ .halt_reg = 0x29068,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x29068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie1_pipe_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gcc_pcie1_pipe_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie2_ahb_clk = {
+ .halt_reg = 0x2a030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2a030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie2_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie2_aux_clk = {
+ .halt_reg = 0x2a078,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2a078,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie2_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie2_axi_m_clk = {
+ .halt_reg = 0x2a038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2a038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie2_axi_m_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie2_axi_m_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_anoc_pcie2_2lane_m_clk = {
+ .halt_reg = 0x2e080,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2e080,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_anoc_pcie2_2lane_m_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie2_axi_m_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie2_axi_s_bridge_clk = {
+ .halt_reg = 0x2a048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2a048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie2_axi_s_bridge_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie2_axi_s_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie2_axi_s_clk = {
+ .halt_reg = 0x2a040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2a040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie2_axi_s_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie2_axi_s_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_pcie2_pipe_clk_src = {
+ .reg = 0x2a064,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "pcie2_pipe_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_PCIE30_PHY2_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie2_pipe_clk = {
+ .halt_reg = 0x2a068,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x2a068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie2_pipe_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gcc_pcie2_pipe_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie3_ahb_clk = {
+ .halt_reg = 0x2b030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2b030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie3_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie3_aux_clk = {
+ .halt_reg = 0x2b07c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2b07c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie3_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie3_axi_m_clk = {
+ .halt_reg = 0x2b038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2b038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie3_axi_m_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie3_axi_m_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_anoc_pcie3_2lane_m_clk = {
+ .halt_reg = 0x2e090,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2e090,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_anoc_pcie3_2lane_m_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie3_axi_m_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie3_axi_s_bridge_clk = {
+ .halt_reg = 0x2b048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2b048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie3_axi_s_bridge_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie3_axi_s_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie3_axi_s_clk = {
+ .halt_reg = 0x2b040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2b040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie3_axi_s_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie3_axi_s_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_pcie3_pipe_clk_src = {
+ .reg = 0x2b064,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "pcie3_pipe_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_PCIE30_PHY3_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie3_pipe_clk = {
+ .halt_reg = 0x2b068,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x2b068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie3_pipe_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gcc_pcie3_pipe_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_prng_ahb_clk = {
+ .halt_reg = 0x13024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xb004,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_prng_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_ahb_mst_clk = {
+ .halt_reg = 0x1014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xb004,
+ .enable_mask = BIT(14),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_ahb_mst_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_ahb_slv_clk = {
+ .halt_reg = 0x102c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xb004,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_ahb_slv_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_i2c0_clk = {
+ .halt_reg = 0x2024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c0_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_i2c1_clk = {
+ .halt_reg = 0x3024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c1_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_spi0_clk = {
+ .halt_reg = 0x4020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_spi0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_spi0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_spi1_clk = {
+ .halt_reg = 0x5020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_spi1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_spi1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_uart0_clk = {
+ .halt_reg = 0x2040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_uart0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_uart0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_uart1_clk = {
+ .halt_reg = 0x3040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_uart1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_uart1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ahb_clk = {
+ .halt_reg = 0x3303c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3303c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_apps_clk = {
+ .halt_reg = 0x3302c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3302c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc1_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ice_core_clk = {
+ .halt_reg = 0x33034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x33034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_ice_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc1_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_uniphy0_ahb_clk = {
+ .halt_reg = 0x1704c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1704c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_uniphy0_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_uniphy0_sys_clk = {
+ .halt_reg = 0x17048,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x17048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_uniphy0_sys_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_uniphy_sys_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_uniphy1_ahb_clk = {
+ .halt_reg = 0x1705c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1705c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_uniphy1_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_uniphy1_sys_clk = {
+ .halt_reg = 0x17058,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x17058,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_uniphy1_sys_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_uniphy_sys_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_uniphy2_ahb_clk = {
+ .halt_reg = 0x1706c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1706c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_uniphy2_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_uniphy2_sys_clk = {
+ .halt_reg = 0x17068,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x17068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_uniphy2_sys_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_uniphy_sys_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb0_aux_clk = {
+ .halt_reg = 0x2c04c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x2c04c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb0_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb0_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb0_master_clk = {
+ .halt_reg = 0x2c044,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x2c044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb0_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb0_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb0_mock_utmi_clk = {
+ .halt_reg = 0x2c050,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x2c050,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb0_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb0_mock_utmi_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb1_mock_utmi_clk = {
+ .halt_reg = 0x3c024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x3c024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb1_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb1_mock_utmi_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb0_phy_cfg_ahb_clk = {
+ .halt_reg = 0x2c05c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x2c05c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb0_phy_cfg_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb1_phy_cfg_ahb_clk = {
+ .halt_reg = 0x3c01c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x3c01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb1_phy_cfg_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb1_master_clk = {
+ .halt_reg = 0x3c028,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x3c028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb1_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_usb0_pipe_clk_src = {
+ .reg = 0x2c074,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb0_pipe_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_USB_PCIE_WRAPPER_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb0_pipe_clk = {
+ .halt_reg = 0x2c054,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x2c054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb0_pipe_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gcc_usb0_pipe_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb0_sleep_clk = {
+ .halt_reg = 0x2c058,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x2c058,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb0_sleep_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sleep_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb1_sleep_clk = {
+ .halt_reg = 0x3c020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x3c020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb1_sleep_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sleep_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cmn_12gpll_ahb_clk = {
+ .halt_reg = 0x3a004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cmn_12gpll_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cmn_12gpll_sys_clk = {
+ .halt_reg = 0x3a008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3a008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cmn_12gpll_sys_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_uniphy_sys_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_lpass_sway_clk = {
+ .halt_reg = 0x27014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x27014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_lpass_sway_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_lpass_sway_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cnoc_lpass_cfg_clk = {
+ .halt_reg = 0x2e028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2e028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cnoc_lpass_cfg_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_lpass_sway_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_lpass_core_axim_clk = {
+ .halt_reg = 0x27018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x27018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_lpass_core_axim_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_lpass_axim_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_snoc_lpass_clk = {
+ .halt_reg = 0x31020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x31020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_snoc_lpass_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_lpass_axim_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb0_eud_at_clk = {
+ .halt_reg = 0x30004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x30004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb0_eud_at_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_eud_at_div_clk_src.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qpic_ahb_clk = {
+ .halt_reg = 0x32010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x32010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qpic_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qpic_clk = {
+ .halt_reg = 0x32028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x32028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qpic_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qpic_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qpic_io_macro_clk = {
+ .halt_reg = 0x3200c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3200c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qpic_io_macro_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qpic_io_macro_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qdss_dap_clk = {
+ .halt_reg = 0x2d058,
+ .clkr = {
+ .enable_reg = 0x2d058,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qdss_dap_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gcc_qdss_dap_sync_clk_src.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qdss_at_clk = {
+ .halt_reg = 0x2d034,
+ .clkr = {
+ .enable_reg = 0x2d034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qdss_at_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gcc_qdss_at_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie0_rchng_clk = {
+ .halt_reg = 0x28028,
+ .clkr = {
+ .enable_reg = 0x28028,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie0_rchng_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gcc_pcie0_rchng_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie1_rchng_clk = {
+ .halt_reg = 0x29028,
+ .clkr = {
+ .enable_reg = 0x29028,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie1_rchng_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gcc_pcie1_rchng_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie2_rchng_clk = {
+ .halt_reg = 0x2a028,
+ .clkr = {
+ .enable_reg = 0x2a028,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie2_rchng_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gcc_pcie2_rchng_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie3_rchng_clk = {
+ .halt_reg = 0x2b028,
+ .clkr = {
+ .enable_reg = 0x2b028,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie3_rchng_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gcc_pcie3_rchng_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap *gcc_ipq5424_clocks[] = {
+ [GCC_ADSS_PWM_CLK] = &gcc_adss_pwm_clk.clkr,
+ [GCC_ADSS_PWM_CLK_SRC] = &gcc_adss_pwm_clk_src.clkr,
+ [GCC_CNOC_PCIE0_1LANE_S_CLK] = &gcc_cnoc_pcie0_1lane_s_clk.clkr,
+ [GCC_CNOC_PCIE1_1LANE_S_CLK] = &gcc_cnoc_pcie1_1lane_s_clk.clkr,
+ [GCC_CNOC_PCIE2_2LANE_S_CLK] = &gcc_cnoc_pcie2_2lane_s_clk.clkr,
+ [GCC_CNOC_PCIE3_2LANE_S_CLK] = &gcc_cnoc_pcie3_2lane_s_clk.clkr,
+ [GCC_CNOC_USB_CLK] = &gcc_cnoc_usb_clk.clkr,
+ [GCC_MDIO_AHB_CLK] = &gcc_mdio_ahb_clk.clkr,
+ [GCC_NSS_TS_CLK] = &gcc_nss_ts_clk.clkr,
+ [GCC_NSS_TS_CLK_SRC] = &gcc_nss_ts_clk_src.clkr,
+ [GCC_NSSCC_CLK] = &gcc_nsscc_clk.clkr,
+ [GCC_NSSCFG_CLK] = &gcc_nsscfg_clk.clkr,
+ [GCC_NSSNOC_ATB_CLK] = &gcc_nssnoc_atb_clk.clkr,
+ [GCC_NSSNOC_NSSCC_CLK] = &gcc_nssnoc_nsscc_clk.clkr,
+ [GCC_NSSNOC_PCNOC_1_CLK] = &gcc_nssnoc_pcnoc_1_clk.clkr,
+ [GCC_NSSNOC_QOSGEN_REF_CLK] = &gcc_nssnoc_qosgen_ref_clk.clkr,
+ [GCC_NSSNOC_SNOC_1_CLK] = &gcc_nssnoc_snoc_1_clk.clkr,
+ [GCC_NSSNOC_SNOC_CLK] = &gcc_nssnoc_snoc_clk.clkr,
+ [GCC_NSSNOC_TIMEOUT_REF_CLK] = &gcc_nssnoc_timeout_ref_clk.clkr,
+ [GCC_NSSNOC_XO_DCD_CLK] = &gcc_nssnoc_xo_dcd_clk.clkr,
+ [GCC_PCIE0_AHB_CLK] = &gcc_pcie0_ahb_clk.clkr,
+ [GCC_PCIE0_AUX_CLK] = &gcc_pcie0_aux_clk.clkr,
+ [GCC_PCIE0_AXI_M_CLK] = &gcc_pcie0_axi_m_clk.clkr,
+ [GCC_PCIE0_AXI_M_CLK_SRC] = &gcc_pcie0_axi_m_clk_src.clkr,
+ [GCC_PCIE0_AXI_S_BRIDGE_CLK] = &gcc_pcie0_axi_s_bridge_clk.clkr,
+ [GCC_PCIE0_AXI_S_CLK] = &gcc_pcie0_axi_s_clk.clkr,
+ [GCC_PCIE0_AXI_S_CLK_SRC] = &gcc_pcie0_axi_s_clk_src.clkr,
+ [GCC_PCIE0_PIPE_CLK] = &gcc_pcie0_pipe_clk.clkr,
+ [GCC_ANOC_PCIE0_1LANE_M_CLK] = &gcc_anoc_pcie0_1lane_m_clk.clkr,
+ [GCC_PCIE0_PIPE_CLK_SRC] = &gcc_pcie0_pipe_clk_src.clkr,
+ [GCC_PCIE0_RCHNG_CLK_SRC] = &gcc_pcie0_rchng_clk_src.clkr,
+ [GCC_PCIE0_RCHNG_CLK] = &gcc_pcie0_rchng_clk.clkr,
+ [GCC_PCIE1_AHB_CLK] = &gcc_pcie1_ahb_clk.clkr,
+ [GCC_PCIE1_AUX_CLK] = &gcc_pcie1_aux_clk.clkr,
+ [GCC_PCIE1_AXI_M_CLK] = &gcc_pcie1_axi_m_clk.clkr,
+ [GCC_PCIE1_AXI_M_CLK_SRC] = &gcc_pcie1_axi_m_clk_src.clkr,
+ [GCC_PCIE1_AXI_S_BRIDGE_CLK] = &gcc_pcie1_axi_s_bridge_clk.clkr,
+ [GCC_PCIE1_AXI_S_CLK] = &gcc_pcie1_axi_s_clk.clkr,
+ [GCC_PCIE1_AXI_S_CLK_SRC] = &gcc_pcie1_axi_s_clk_src.clkr,
+ [GCC_PCIE1_PIPE_CLK] = &gcc_pcie1_pipe_clk.clkr,
+ [GCC_ANOC_PCIE1_1LANE_M_CLK] = &gcc_anoc_pcie1_1lane_m_clk.clkr,
+ [GCC_PCIE1_PIPE_CLK_SRC] = &gcc_pcie1_pipe_clk_src.clkr,
+ [GCC_PCIE1_RCHNG_CLK_SRC] = &gcc_pcie1_rchng_clk_src.clkr,
+ [GCC_PCIE1_RCHNG_CLK] = &gcc_pcie1_rchng_clk.clkr,
+ [GCC_PCIE2_AHB_CLK] = &gcc_pcie2_ahb_clk.clkr,
+ [GCC_PCIE2_AUX_CLK] = &gcc_pcie2_aux_clk.clkr,
+ [GCC_PCIE2_AXI_M_CLK] = &gcc_pcie2_axi_m_clk.clkr,
+ [GCC_PCIE2_AXI_M_CLK_SRC] = &gcc_pcie2_axi_m_clk_src.clkr,
+ [GCC_PCIE2_AXI_S_BRIDGE_CLK] = &gcc_pcie2_axi_s_bridge_clk.clkr,
+ [GCC_PCIE2_AXI_S_CLK] = &gcc_pcie2_axi_s_clk.clkr,
+ [GCC_PCIE2_AXI_S_CLK_SRC] = &gcc_pcie2_axi_s_clk_src.clkr,
+ [GCC_PCIE2_PIPE_CLK] = &gcc_pcie2_pipe_clk.clkr,
+ [GCC_ANOC_PCIE2_2LANE_M_CLK] = &gcc_anoc_pcie2_2lane_m_clk.clkr,
+ [GCC_PCIE2_PIPE_CLK_SRC] = &gcc_pcie2_pipe_clk_src.clkr,
+ [GCC_PCIE2_RCHNG_CLK_SRC] = &gcc_pcie2_rchng_clk_src.clkr,
+ [GCC_PCIE2_RCHNG_CLK] = &gcc_pcie2_rchng_clk.clkr,
+ [GCC_PCIE3_AHB_CLK] = &gcc_pcie3_ahb_clk.clkr,
+ [GCC_PCIE3_AUX_CLK] = &gcc_pcie3_aux_clk.clkr,
+ [GCC_PCIE3_AXI_M_CLK] = &gcc_pcie3_axi_m_clk.clkr,
+ [GCC_PCIE3_AXI_M_CLK_SRC] = &gcc_pcie3_axi_m_clk_src.clkr,
+ [GCC_PCIE3_AXI_S_BRIDGE_CLK] = &gcc_pcie3_axi_s_bridge_clk.clkr,
+ [GCC_PCIE3_AXI_S_CLK] = &gcc_pcie3_axi_s_clk.clkr,
+ [GCC_PCIE3_AXI_S_CLK_SRC] = &gcc_pcie3_axi_s_clk_src.clkr,
+ [GCC_PCIE3_PIPE_CLK] = &gcc_pcie3_pipe_clk.clkr,
+ [GCC_ANOC_PCIE3_2LANE_M_CLK] = &gcc_anoc_pcie3_2lane_m_clk.clkr,
+ [GCC_PCIE3_PIPE_CLK_SRC] = &gcc_pcie3_pipe_clk_src.clkr,
+ [GCC_PCIE3_RCHNG_CLK_SRC] = &gcc_pcie3_rchng_clk_src.clkr,
+ [GCC_PCIE3_RCHNG_CLK] = &gcc_pcie3_rchng_clk.clkr,
+ [GCC_PCIE_AUX_CLK_SRC] = &gcc_pcie_aux_clk_src.clkr,
+ [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+ [GCC_QUPV3_AHB_MST_CLK] = &gcc_qupv3_ahb_mst_clk.clkr,
+ [GCC_QUPV3_AHB_SLV_CLK] = &gcc_qupv3_ahb_slv_clk.clkr,
+ [GCC_QUPV3_I2C0_CLK] = &gcc_qupv3_i2c0_clk.clkr,
+ [GCC_QUPV3_I2C0_CLK_SRC] = &gcc_qupv3_i2c0_clk_src.clkr,
+ [GCC_QUPV3_I2C0_DIV_CLK_SRC] = &gcc_qupv3_i2c0_div_clk_src.clkr,
+ [GCC_QUPV3_I2C1_CLK] = &gcc_qupv3_i2c1_clk.clkr,
+ [GCC_QUPV3_I2C1_CLK_SRC] = &gcc_qupv3_i2c1_clk_src.clkr,
+ [GCC_QUPV3_I2C1_DIV_CLK_SRC] = &gcc_qupv3_i2c1_div_clk_src.clkr,
+ [GCC_QUPV3_SPI0_CLK] = &gcc_qupv3_spi0_clk.clkr,
+ [GCC_QUPV3_SPI0_CLK_SRC] = &gcc_qupv3_spi0_clk_src.clkr,
+ [GCC_QUPV3_SPI1_CLK] = &gcc_qupv3_spi1_clk.clkr,
+ [GCC_QUPV3_SPI1_CLK_SRC] = &gcc_qupv3_spi1_clk_src.clkr,
+ [GCC_QUPV3_UART0_CLK] = &gcc_qupv3_uart0_clk.clkr,
+ [GCC_QUPV3_UART0_CLK_SRC] = &gcc_qupv3_uart0_clk_src.clkr,
+ [GCC_QUPV3_UART1_CLK] = &gcc_qupv3_uart1_clk.clkr,
+ [GCC_QUPV3_UART1_CLK_SRC] = &gcc_qupv3_uart1_clk_src.clkr,
+ [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+ [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+ [GCC_SDCC1_APPS_CLK_SRC] = &gcc_sdcc1_apps_clk_src.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK] = &gcc_sdcc1_ice_core_clk.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK_SRC] = &gcc_sdcc1_ice_core_clk_src.clkr,
+ [GCC_UNIPHY0_AHB_CLK] = &gcc_uniphy0_ahb_clk.clkr,
+ [GCC_UNIPHY0_SYS_CLK] = &gcc_uniphy0_sys_clk.clkr,
+ [GCC_UNIPHY1_AHB_CLK] = &gcc_uniphy1_ahb_clk.clkr,
+ [GCC_UNIPHY1_SYS_CLK] = &gcc_uniphy1_sys_clk.clkr,
+ [GCC_UNIPHY2_AHB_CLK] = &gcc_uniphy2_ahb_clk.clkr,
+ [GCC_UNIPHY2_SYS_CLK] = &gcc_uniphy2_sys_clk.clkr,
+ [GCC_UNIPHY_SYS_CLK_SRC] = &gcc_uniphy_sys_clk_src.clkr,
+ [GCC_USB0_AUX_CLK] = &gcc_usb0_aux_clk.clkr,
+ [GCC_USB0_AUX_CLK_SRC] = &gcc_usb0_aux_clk_src.clkr,
+ [GCC_USB0_MASTER_CLK] = &gcc_usb0_master_clk.clkr,
+ [GCC_USB0_MASTER_CLK_SRC] = &gcc_usb0_master_clk_src.clkr,
+ [GCC_USB0_MOCK_UTMI_CLK] = &gcc_usb0_mock_utmi_clk.clkr,
+ [GCC_USB0_MOCK_UTMI_CLK_SRC] = &gcc_usb0_mock_utmi_clk_src.clkr,
+ [GCC_USB0_MOCK_UTMI_DIV_CLK_SRC] = &gcc_usb0_mock_utmi_div_clk_src.clkr,
+ [GCC_USB0_EUD_AT_CLK] = &gcc_usb0_eud_at_clk.clkr,
+ [GCC_USB0_PIPE_CLK_SRC] = &gcc_usb0_pipe_clk_src.clkr,
+ [GCC_USB1_MOCK_UTMI_CLK] = &gcc_usb1_mock_utmi_clk.clkr,
+ [GCC_USB1_MOCK_UTMI_CLK_SRC] = &gcc_usb1_mock_utmi_clk_src.clkr,
+ [GCC_USB1_MOCK_UTMI_DIV_CLK_SRC] = &gcc_usb1_mock_utmi_div_clk_src.clkr,
+ [GCC_USB0_PHY_CFG_AHB_CLK] = &gcc_usb0_phy_cfg_ahb_clk.clkr,
+ [GCC_USB1_PHY_CFG_AHB_CLK] = &gcc_usb1_phy_cfg_ahb_clk.clkr,
+ [GCC_USB0_PIPE_CLK] = &gcc_usb0_pipe_clk.clkr,
+ [GCC_USB0_SLEEP_CLK] = &gcc_usb0_sleep_clk.clkr,
+ [GCC_USB1_SLEEP_CLK] = &gcc_usb1_sleep_clk.clkr,
+ [GCC_USB1_MASTER_CLK] = &gcc_usb1_master_clk.clkr,
+ [GCC_WCSS_AHB_CLK_SRC] = &gcc_wcss_ahb_clk_src.clkr,
+ [GCC_CMN_12GPLL_AHB_CLK] = &gcc_cmn_12gpll_ahb_clk.clkr,
+ [GCC_CMN_12GPLL_SYS_CLK] = &gcc_cmn_12gpll_sys_clk.clkr,
+ [GCC_LPASS_SWAY_CLK] = &gcc_lpass_sway_clk.clkr,
+ [GCC_CNOC_LPASS_CFG_CLK] = &gcc_cnoc_lpass_cfg_clk.clkr,
+ [GCC_LPASS_CORE_AXIM_CLK] = &gcc_lpass_core_axim_clk.clkr,
+ [GCC_SNOC_LPASS_CLK] = &gcc_snoc_lpass_clk.clkr,
+ [GCC_QDSS_AT_CLK_SRC] = &gcc_qdss_at_clk_src.clkr,
+ [GCC_QDSS_TSCTR_CLK_SRC] = &gcc_qdss_tsctr_clk_src.clkr,
+ [GCC_SYSTEM_NOC_BFDCD_CLK_SRC] = &gcc_system_noc_bfdcd_clk_src.clkr,
+ [GCC_PCNOC_BFDCD_CLK_SRC] = &gcc_pcnoc_bfdcd_clk_src.clkr,
+ [GCC_LPASS_SWAY_CLK_SRC] = &gcc_lpass_sway_clk_src.clkr,
+ [GCC_LPASS_AXIM_CLK_SRC] = &gcc_lpass_axim_clk_src.clkr,
+ [GCC_SLEEP_CLK_SRC] = &gcc_sleep_clk_src.clkr,
+ [GCC_QPIC_IO_MACRO_CLK] = &gcc_qpic_io_macro_clk.clkr,
+ [GCC_QPIC_IO_MACRO_CLK_SRC] = &gcc_qpic_io_macro_clk_src.clkr,
+ [GCC_QPIC_CLK] = &gcc_qpic_clk.clkr,
+ [GCC_QPIC_CLK_SRC] = &gcc_qpic_clk_src.clkr,
+ [GCC_QPIC_AHB_CLK] = &gcc_qpic_ahb_clk.clkr,
+ [GCC_XO_CLK_SRC] = &gcc_xo_clk_src.clkr,
+ [GCC_XO_CLK] = &gcc_xo_clk.clkr,
+ [GCC_QDSS_DAP_CLK] = &gcc_qdss_dap_clk.clkr,
+ [GCC_QDSS_AT_CLK] = &gcc_qdss_at_clk.clkr,
+ [GPLL0] = &gpll0.clkr,
+ [GPLL2] = &gpll2.clkr,
+ [GPLL2_OUT_MAIN] = &gpll2_out_main.clkr,
+ [GPLL4] = &gpll4.clkr,
+};
+
+static const struct qcom_reset_map gcc_ipq5424_resets[] = {
+ [GCC_QUPV3_BCR] = { 0x01000, 0 },
+ [GCC_QUPV3_I2C0_BCR] = { 0x02000, 0 },
+ [GCC_QUPV3_UART0_BCR] = { 0x02020, 0 },
+ [GCC_QUPV3_I2C1_BCR] = { 0x03000, 0 },
+ [GCC_QUPV3_UART1_BCR] = { 0x03028, 0 },
+ [GCC_QUPV3_SPI0_BCR] = { 0x04000, 0 },
+ [GCC_QUPV3_SPI1_BCR] = { 0x05000, 0 },
+ [GCC_IMEM_BCR] = { 0x0e000, 0 },
+ [GCC_TME_BCR] = { 0x100000, 0 },
+ [GCC_DDRSS_BCR] = { 0x11000, 0 },
+ [GCC_PRNG_BCR] = { 0x13020, 0 },
+ [GCC_BOOT_ROM_BCR] = { 0x13028, 0 },
+ [GCC_NSS_BCR] = { 0x17000, 0 },
+ [GCC_MDIO_BCR] = { 0x1703c, 0 },
+ [GCC_UNIPHY0_BCR] = { 0x17044, 0 },
+ [GCC_UNIPHY1_BCR] = { 0x17054, 0 },
+ [GCC_UNIPHY2_BCR] = { 0x17064, 0 },
+ [GCC_WCSS_BCR] = { 0x18004, 0 },
+ [GCC_SEC_CTRL_BCR] = { 0x1a000, 0 },
+ [GCC_TME_SEC_BUS_BCR] = { 0xa1030, 0 },
+ [GCC_ADSS_BCR] = { 0x1c000, 0 },
+ [GCC_LPASS_BCR] = { 0x27000, 0 },
+ [GCC_PCIE0_BCR] = { 0x28000, 0 },
+ [GCC_PCIE0_LINK_DOWN_BCR] = { 0x28054, 0 },
+ [GCC_PCIE0PHY_PHY_BCR] = { 0x2805c, 0 },
+ [GCC_PCIE0_PHY_BCR] = { 0x28060, 0 },
+ [GCC_PCIE1_BCR] = { 0x29000, 0 },
+ [GCC_PCIE1_LINK_DOWN_BCR] = { 0x29054, 0 },
+ [GCC_PCIE1PHY_PHY_BCR] = { 0x2905c, 0 },
+ [GCC_PCIE1_PHY_BCR] = { 0x29060, 0 },
+ [GCC_PCIE2_BCR] = { 0x2a000, 0 },
+ [GCC_PCIE2_LINK_DOWN_BCR] = { 0x2a054, 0 },
+ [GCC_PCIE2PHY_PHY_BCR] = { 0x2a05c, 0 },
+ [GCC_PCIE2_PHY_BCR] = { 0x2a060, 0 },
+ [GCC_PCIE3_BCR] = { 0x2b000, 0 },
+ [GCC_PCIE3_LINK_DOWN_BCR] = { 0x2b054, 0 },
+ [GCC_PCIE3PHY_PHY_BCR] = { 0x2b05c, 0 },
+ [GCC_PCIE3_PHY_BCR] = { 0x2b060, 0 },
+ [GCC_USB_BCR] = { 0x2c000, 0 },
+ [GCC_QUSB2_0_PHY_BCR] = { 0x2c068, 0 },
+ [GCC_USB0_PHY_BCR] = { 0x2c06c, 0 },
+ [GCC_USB3PHY_0_PHY_BCR] = { 0x2c070, 0 },
+ [GCC_QDSS_BCR] = { 0x2d000, 0 },
+ [GCC_SNOC_BCR] = { 0x2e000, 0 },
+ [GCC_ANOC_BCR] = { 0x2e074, 0 },
+ [GCC_PCNOC_BCR] = { 0x31000, 0 },
+ [GCC_PCNOC_BUS_TIMEOUT0_BCR] = { 0x31030, 0 },
+ [GCC_PCNOC_BUS_TIMEOUT1_BCR] = { 0x31038, 0 },
+ [GCC_PCNOC_BUS_TIMEOUT2_BCR] = { 0x31040, 0 },
+ [GCC_PCNOC_BUS_TIMEOUT3_BCR] = { 0x31048, 0 },
+ [GCC_PCNOC_BUS_TIMEOUT4_BCR] = { 0x31050, 0 },
+ [GCC_PCNOC_BUS_TIMEOUT5_BCR] = { 0x31058, 0 },
+ [GCC_PCNOC_BUS_TIMEOUT6_BCR] = { 0x31060, 0 },
+ [GCC_PCNOC_BUS_TIMEOUT7_BCR] = { 0x31068, 0 },
+ [GCC_PCNOC_BUS_TIMEOUT8_BCR] = { 0x31070, 0 },
+ [GCC_PCNOC_BUS_TIMEOUT9_BCR] = { 0x31078, 0 },
+ [GCC_QPIC_BCR] = { 0x32000, 0 },
+ [GCC_SDCC_BCR] = { 0x33000, 0 },
+ [GCC_DCC_BCR] = { 0x35000, 0 },
+ [GCC_SPDM_BCR] = { 0x36000, 0 },
+ [GCC_MPM_BCR] = { 0x37000, 0 },
+ [GCC_APC0_VOLTAGE_DROOP_DETECTOR_BCR] = { 0x38000, 0 },
+ [GCC_RBCPR_BCR] = { 0x39000, 0 },
+ [GCC_CMN_BLK_BCR] = { 0x3a000, 0 },
+ [GCC_TCSR_BCR] = { 0x3d000, 0 },
+ [GCC_TLMM_BCR] = { 0x3e000, 0 },
+ [GCC_QUPV3_AHB_MST_ARES] = { 0x01014, 2 },
+ [GCC_QUPV3_CORE_ARES] = { 0x01018, 2 },
+ [GCC_QUPV3_2X_CORE_ARES] = { 0x01020, 2 },
+ [GCC_QUPV3_SLEEP_ARES] = { 0x01028, 2 },
+ [GCC_QUPV3_AHB_SLV_ARES] = { 0x0102c, 2 },
+ [GCC_QUPV3_I2C0_ARES] = { 0x02024, 2 },
+ [GCC_QUPV3_UART0_ARES] = { 0x02040, 2 },
+ [GCC_QUPV3_I2C1_ARES] = { 0x03024, 2 },
+ [GCC_QUPV3_UART1_ARES] = { 0x03040, 2 },
+ [GCC_QUPV3_SPI0_ARES] = { 0x04020, 2 },
+ [GCC_QUPV3_SPI1_ARES] = { 0x05020, 2 },
+ [GCC_DEBUG_ARES] = { 0x06068, 2 },
+ [GCC_GP1_ARES] = { 0x08018, 2 },
+ [GCC_GP2_ARES] = { 0x09018, 2 },
+ [GCC_GP3_ARES] = { 0x0a018, 2 },
+ [GCC_IMEM_AXI_ARES] = { 0x0e004, 2 },
+ [GCC_IMEM_CFG_AHB_ARES] = { 0x0e00c, 2 },
+ [GCC_TME_ARES] = { 0x100b4, 2 },
+ [GCC_TME_TS_ARES] = { 0x100c0, 2 },
+ [GCC_TME_SLOW_ARES] = { 0x100d0, 2 },
+ [GCC_TME_RTC_TOGGLE_ARES] = { 0x100d8, 2 },
+ [GCC_TIC_ARES] = { 0x12004, 2 },
+ [GCC_PRNG_AHB_ARES] = { 0x13024, 2 },
+ [GCC_BOOT_ROM_AHB_ARES] = { 0x1302c, 2 },
+ [GCC_NSSNOC_ATB_ARES] = { 0x17014, 2 },
+ [GCC_NSS_TS_ARES] = { 0x17018, 2 },
+ [GCC_NSSNOC_QOSGEN_REF_ARES] = { 0x1701c, 2 },
+ [GCC_NSSNOC_TIMEOUT_REF_ARES] = { 0x17020, 2 },
+ [GCC_NSSNOC_MEMNOC_ARES] = { 0x17024, 2 },
+ [GCC_NSSNOC_SNOC_ARES] = { 0x17028, 2 },
+ [GCC_NSSCFG_ARES] = { 0x1702c, 2 },
+ [GCC_NSSNOC_NSSCC_ARES] = { 0x17030, 2 },
+ [GCC_NSSCC_ARES] = { 0x17034, 2 },
+ [GCC_MDIO_AHB_ARES] = { 0x17040, 2 },
+ [GCC_UNIPHY0_SYS_ARES] = { 0x17048, 2 },
+ [GCC_UNIPHY0_AHB_ARES] = { 0x1704c, 2 },
+ [GCC_UNIPHY1_SYS_ARES] = { 0x17058, 2 },
+ [GCC_UNIPHY1_AHB_ARES] = { 0x1705c, 2 },
+ [GCC_UNIPHY2_SYS_ARES] = { 0x17068, 2 },
+ [GCC_UNIPHY2_AHB_ARES] = { 0x1706c, 2 },
+ [GCC_NSSNOC_XO_DCD_ARES] = { 0x17074, 2 },
+ [GCC_NSSNOC_SNOC_1_ARES] = { 0x1707c, 2 },
+ [GCC_NSSNOC_PCNOC_1_ARES] = { 0x17080, 2 },
+ [GCC_NSSNOC_MEMNOC_1_ARES] = { 0x17084, 2 },
+ [GCC_DDRSS_ATB_ARES] = { 0x19004, 2 },
+ [GCC_DDRSS_AHB_ARES] = { 0x19008, 2 },
+ [GCC_GEMNOC_AHB_ARES] = { 0x1900c, 2 },
+ [GCC_GEMNOC_Q6_AXI_ARES] = { 0x19010, 2 },
+ [GCC_GEMNOC_NSSNOC_ARES] = { 0x19014, 2 },
+ [GCC_GEMNOC_SNOC_ARES] = { 0x19018, 2 },
+ [GCC_GEMNOC_APSS_ARES] = { 0x1901c, 2 },
+ [GCC_GEMNOC_QOSGEN_EXTREF_ARES] = { 0x19024, 2 },
+ [GCC_GEMNOC_TS_ARES] = { 0x19028, 2 },
+ [GCC_DDRSS_SMS_SLOW_ARES] = { 0x1902c, 2 },
+ [GCC_GEMNOC_CNOC_ARES] = { 0x19038, 2 },
+ [GCC_GEMNOC_XO_DBG_ARES] = { 0x19040, 2 },
+ [GCC_GEMNOC_ANOC_ARES] = { 0x19048, 2 },
+ [GCC_DDRSS_LLCC_ATB_ARES] = { 0x1904c, 2 },
+ [GCC_LLCC_TPDM_CFG_ARES] = { 0x19050, 2 },
+ [GCC_TME_BUS_ARES] = { 0x1a014, 2 },
+ [GCC_SEC_CTRL_ACC_ARES] = { 0x1a018, 2 },
+ [GCC_SEC_CTRL_ARES] = { 0x1a020, 2 },
+ [GCC_SEC_CTRL_SENSE_ARES] = { 0x1a028, 2 },
+ [GCC_SEC_CTRL_AHB_ARES] = { 0x1a038, 2 },
+ [GCC_SEC_CTRL_BOOT_ROM_PATCH_ARES] = { 0x1a03c, 2 },
+ [GCC_ADSS_PWM_ARES] = { 0x1c00c, 2 },
+ [GCC_TME_ATB_ARES] = { 0x1e030, 2 },
+ [GCC_TME_DBGAPB_ARES] = { 0x1e034, 2 },
+ [GCC_TME_DEBUG_ARES] = { 0x1e038, 2 },
+ [GCC_TME_AT_ARES] = { 0x1e03C, 2 },
+ [GCC_TME_APB_ARES] = { 0x1e040, 2 },
+ [GCC_TME_DMI_DBG_HS_ARES] = { 0x1e044, 2 },
+ [GCC_APSS_AHB_ARES] = { 0x24014, 2 },
+ [GCC_APSS_AXI_ARES] = { 0x24018, 2 },
+ [GCC_CPUSS_TRIG_ARES] = { 0x2401c, 2 },
+ [GCC_APSS_DBG_ARES] = { 0x2402c, 2 },
+ [GCC_APSS_TS_ARES] = { 0x24030, 2 },
+ [GCC_APSS_ATB_ARES] = { 0x24034, 2 },
+ [GCC_Q6_AXIM_ARES] = { 0x2500c, 2 },
+ [GCC_Q6_AXIS_ARES] = { 0x25010, 2 },
+ [GCC_Q6_AHB_ARES] = { 0x25014, 2 },
+ [GCC_Q6_AHB_S_ARES] = { 0x25018, 2 },
+ [GCC_Q6SS_ATBM_ARES] = { 0x2501c, 2 },
+ [GCC_Q6_TSCTR_1TO2_ARES] = { 0x25020, 2 },
+ [GCC_Q6SS_PCLKDBG_ARES] = { 0x25024, 2 },
+ [GCC_Q6SS_TRIG_ARES] = { 0x25028, 2 },
+ [GCC_Q6SS_BOOT_CBCR_ARES] = { 0x2502c, 2 },
+ [GCC_WCSS_DBG_IFC_APB_ARES] = { 0x25038, 2 },
+ [GCC_WCSS_DBG_IFC_ATB_ARES] = { 0x2503c, 2 },
+ [GCC_WCSS_DBG_IFC_NTS_ARES] = { 0x25040, 2 },
+ [GCC_WCSS_DBG_IFC_DAPBUS_ARES] = { 0x25044, 2 },
+ [GCC_WCSS_DBG_IFC_APB_BDG_ARES] = { 0x25048, 2 },
+ [GCC_WCSS_DBG_IFC_NTS_BDG_ARES] = { 0x25050, 2 },
+ [GCC_WCSS_DBG_IFC_DAPBUS_BDG_ARES] = { 0x25054, 2 },
+ [GCC_WCSS_ECAHB_ARES] = { 0x25058, 2 },
+ [GCC_WCSS_ACMT_ARES] = { 0x2505c, 2 },
+ [GCC_WCSS_AHB_S_ARES] = { 0x25060, 2 },
+ [GCC_WCSS_AXI_M_ARES] = { 0x25064, 2 },
+ [GCC_PCNOC_WAPSS_ARES] = { 0x25080, 2 },
+ [GCC_SNOC_WAPSS_ARES] = { 0x25090, 2 },
+ [GCC_LPASS_SWAY_ARES] = { 0x27014, 2 },
+ [GCC_LPASS_CORE_AXIM_ARES] = { 0x27018, 2 },
+ [GCC_PCIE0_AHB_ARES] = { 0x28030, 2 },
+ [GCC_PCIE0_AXI_M_ARES] = { 0x28038, 2 },
+ [GCC_PCIE0_AXI_S_ARES] = { 0x28040, 2 },
+ [GCC_PCIE0_AXI_S_BRIDGE_ARES] = { 0x28048, 2},
+ [GCC_PCIE0_PIPE_ARES] = { 0x28068, 2},
+ [GCC_PCIE0_AUX_ARES] = { 0x28070, 2 },
+ [GCC_PCIE1_AHB_ARES] = { 0x29030, 2 },
+ [GCC_PCIE1_AXI_M_ARES] = { 0x29038, 2 },
+ [GCC_PCIE1_AXI_S_ARES] = { 0x29040, 2 },
+ [GCC_PCIE1_AXI_S_BRIDGE_ARES] = { 0x29048, 2 },
+ [GCC_PCIE1_PIPE_ARES] = { 0x29068, 2 },
+ [GCC_PCIE1_AUX_ARES] = { 0x29074, 2 },
+ [GCC_PCIE2_AHB_ARES] = { 0x2a030, 2 },
+ [GCC_PCIE2_AXI_M_ARES] = { 0x2a038, 2 },
+ [GCC_PCIE2_AXI_S_ARES] = { 0x2a040, 2 },
+ [GCC_PCIE2_AXI_S_BRIDGE_ARES] = { 0x2a048, 2 },
+ [GCC_PCIE2_PIPE_ARES] = { 0x2a068, 2 },
+ [GCC_PCIE2_AUX_ARES] = { 0x2a078, 2 },
+ [GCC_PCIE3_AHB_ARES] = { 0x2b030, 2 },
+ [GCC_PCIE3_AXI_M_ARES] = { 0x2b038, 2 },
+ [GCC_PCIE3_AXI_S_ARES] = { 0x2b040, 2 },
+ [GCC_PCIE3_AXI_S_BRIDGE_ARES] = { 0x2b048, 2 },
+ [GCC_PCIE3_PIPE_ARES] = { 0x2b068, 2 },
+ [GCC_PCIE3_AUX_ARES] = { 0x2b07C, 2 },
+ [GCC_USB0_MASTER_ARES] = { 0x2c044, 2 },
+ [GCC_USB0_AUX_ARES] = { 0x2c04c, 2 },
+ [GCC_USB0_MOCK_UTMI_ARES] = { 0x2c050, 2 },
+ [GCC_USB0_PIPE_ARES] = { 0x2c054, 2 },
+ [GCC_USB0_SLEEP_ARES] = { 0x2c058, 2 },
+ [GCC_USB0_PHY_CFG_AHB_ARES] = { 0x2c05c, 2 },
+ [GCC_QDSS_AT_ARES] = { 0x2d034, 2 },
+ [GCC_QDSS_STM_ARES] = { 0x2d03C, 2 },
+ [GCC_QDSS_TRACECLKIN_ARES] = { 0x2d040, 2 },
+ [GCC_QDSS_TSCTR_DIV2_ARES] = { 0x2d044, 2 },
+ [GCC_QDSS_TSCTR_DIV3_ARES] = { 0x2d048, 2 },
+ [GCC_QDSS_TSCTR_DIV4_ARES] = { 0x2d04c, 2 },
+ [GCC_QDSS_TSCTR_DIV8_ARES] = { 0x2d050, 2 },
+ [GCC_QDSS_TSCTR_DIV16_ARES] = { 0x2d054, 2 },
+ [GCC_QDSS_DAP_ARES] = { 0x2d058, 2 },
+ [GCC_QDSS_APB2JTAG_ARES] = { 0x2d05c, 2 },
+ [GCC_QDSS_ETR_USB_ARES] = { 0x2d060, 2 },
+ [GCC_QDSS_DAP_AHB_ARES] = { 0x2d064, 2 },
+ [GCC_QDSS_CFG_AHB_ARES] = { 0x2d068, 2 },
+ [GCC_QDSS_EUD_AT_ARES] = { 0x2d06c, 2 },
+ [GCC_QDSS_TS_ARES] = { 0x2d078, 2 },
+ [GCC_QDSS_USB_ARES] = { 0x2d07c, 2 },
+ [GCC_SYS_NOC_AXI_ARES] = { 0x2e01c, 2 },
+ [GCC_SNOC_QOSGEN_EXTREF_ARES] = { 0x2e020, 2 },
+ [GCC_CNOC_LPASS_CFG_ARES] = { 0x2e028, 2 },
+ [GCC_SYS_NOC_AT_ARES] = { 0x2e038, 2 },
+ [GCC_SNOC_PCNOC_AHB_ARES] = { 0x2e03c, 2 },
+ [GCC_SNOC_TME_ARES] = { 0x2e05c, 2 },
+ [GCC_SNOC_XO_DCD_ARES] = { 0x2e060, 2 },
+ [GCC_SNOC_TS_ARES] = { 0x2e068, 2 },
+ [GCC_ANOC0_AXI_ARES] = { 0x2e078, 2 },
+ [GCC_ANOC_PCIE0_1LANE_M_ARES] = { 0x2e07c, 2 },
+ [GCC_ANOC_PCIE2_2LANE_M_ARES] = { 0x2e080, 2 },
+ [GCC_ANOC_PCIE1_1LANE_M_ARES] = { 0x2e084, 2 },
+ [GCC_ANOC_PCIE3_2LANE_M_ARES] = { 0x2e090, 2 },
+ [GCC_ANOC_PCNOC_AHB_ARES] = { 0x2e094, 2 },
+ [GCC_ANOC_QOSGEN_EXTREF_ARES] = { 0x2e098, 2 },
+ [GCC_ANOC_XO_DCD_ARES] = { 0x2e09C, 2 },
+ [GCC_SNOC_XO_DBG_ARES] = { 0x2e0a0, 2 },
+ [GCC_AGGRNOC_ATB_ARES] = { 0x2e0ac, 2 },
+ [GCC_AGGRNOC_TS_ARES] = { 0x2e0b0, 2 },
+ [GCC_USB0_EUD_AT_ARES] = { 0x30004, 2 },
+ [GCC_PCNOC_TIC_ARES] = { 0x31014, 2 },
+ [GCC_PCNOC_AHB_ARES] = { 0x31018, 2 },
+ [GCC_PCNOC_XO_DBG_ARES] = { 0x3101c, 2 },
+ [GCC_SNOC_LPASS_ARES] = { 0x31020, 2 },
+ [GCC_PCNOC_AT_ARES] = { 0x31024, 2 },
+ [GCC_PCNOC_XO_DCD_ARES] = { 0x31028, 2 },
+ [GCC_PCNOC_TS_ARES] = { 0x3102c, 2 },
+ [GCC_PCNOC_BUS_TIMEOUT0_AHB_ARES] = { 0x31034, 2 },
+ [GCC_PCNOC_BUS_TIMEOUT1_AHB_ARES] = { 0x3103c, 2 },
+ [GCC_PCNOC_BUS_TIMEOUT2_AHB_ARES] = { 0x31044, 2 },
+ [GCC_PCNOC_BUS_TIMEOUT3_AHB_ARES] = { 0x3104c, 2 },
+ [GCC_PCNOC_BUS_TIMEOUT4_AHB_ARES] = { 0x31054, 2 },
+ [GCC_PCNOC_BUS_TIMEOUT5_AHB_ARES] = { 0x3105c, 2 },
+ [GCC_PCNOC_BUS_TIMEOUT6_AHB_ARES] = { 0x31064, 2 },
+ [GCC_PCNOC_BUS_TIMEOUT7_AHB_ARES] = { 0x3106c, 2 },
+ [GCC_Q6_AXIM_RESET] = { 0x2506c, 0 },
+ [GCC_Q6_AXIS_RESET] = { 0x2506c, 1 },
+ [GCC_Q6_AHB_S_RESET] = { 0x2506c, 2 },
+ [GCC_Q6_AHB_RESET] = { 0x2506c, 3 },
+ [GCC_Q6SS_DBG_RESET] = { 0x2506c, 4 },
+ [GCC_WCSS_ECAHB_RESET] = { 0x25070, 0 },
+ [GCC_WCSS_DBG_BDG_RESET] = { 0x25070, 1 },
+ [GCC_WCSS_DBG_RESET] = { 0x25070, 2 },
+ [GCC_WCSS_AXI_M_RESET] = { 0x25070, 3 },
+ [GCC_WCSS_AHB_S_RESET] = { 0x25070, 4 },
+ [GCC_WCSS_ACMT_RESET] = { 0x25070, 5 },
+ [GCC_WCSSAON_RESET] = { 0x25074, 0 },
+ [GCC_PCIE0_PIPE_RESET] = { 0x28058, 0 },
+ [GCC_PCIE0_CORE_STICKY_RESET] = { 0x28058, 1 },
+ [GCC_PCIE0_AXI_S_STICKY_RESET] = { 0x28058, 2 },
+ [GCC_PCIE0_AXI_S_RESET] = { 0x28058, 3 },
+ [GCC_PCIE0_AXI_M_STICKY_RESET] = { 0x28058, 4 },
+ [GCC_PCIE0_AXI_M_RESET] = { 0x28058, 5 },
+ [GCC_PCIE0_AUX_RESET] = { 0x28058, 6 },
+ [GCC_PCIE0_AHB_RESET] = { 0x28058, 7 },
+ [GCC_PCIE1_PIPE_RESET] = { 0x29058, 0 },
+ [GCC_PCIE1_CORE_STICKY_RESET] = { 0x29058, 1 },
+ [GCC_PCIE1_AXI_S_STICKY_RESET] = { 0x29058, 2 },
+ [GCC_PCIE1_AXI_S_RESET] = { 0x29058, 3 },
+ [GCC_PCIE1_AXI_M_STICKY_RESET] = { 0x29058, 4 },
+ [GCC_PCIE1_AXI_M_RESET] = { 0x29058, 5 },
+ [GCC_PCIE1_AUX_RESET] = { 0x29058, 6 },
+ [GCC_PCIE1_AHB_RESET] = { 0x29058, 7 },
+ [GCC_PCIE2_PIPE_RESET] = { 0x2a058, 0 },
+ [GCC_PCIE2_CORE_STICKY_RESET] = { 0x2a058, 1 },
+ [GCC_PCIE2_AXI_S_STICKY_RESET] = { 0x2a058, 2 },
+ [GCC_PCIE2_AXI_S_RESET] = { 0x2a058, 3 },
+ [GCC_PCIE2_AXI_M_STICKY_RESET] = { 0x2a058, 4 },
+ [GCC_PCIE2_AXI_M_RESET] = { 0x2a058, 5 },
+ [GCC_PCIE2_AUX_RESET] = { 0x2a058, 6 },
+ [GCC_PCIE2_AHB_RESET] = { 0x2a058, 7 },
+ [GCC_PCIE3_PIPE_RESET] = { 0x2b058, 0 },
+ [GCC_PCIE3_CORE_STICKY_RESET] = { 0x2b058, 1 },
+ [GCC_PCIE3_AXI_S_STICKY_RESET] = { 0x2b058, 2 },
+ [GCC_PCIE3_AXI_S_RESET] = { 0x2b058, 3 },
+ [GCC_PCIE3_AXI_M_STICKY_RESET] = { 0x2b058, 4 },
+ [GCC_PCIE3_AXI_M_RESET] = { 0x2b058, 5 },
+ [GCC_PCIE3_AUX_RESET] = { 0x2b058, 6 },
+ [GCC_PCIE3_AHB_RESET] = { 0x2b058, 7 },
+ [GCC_NSS_PARTIAL_RESET] = { 0x17078, 0 },
+ [GCC_UNIPHY0_XPCS_ARES] = { 0x17050, 2 },
+ [GCC_UNIPHY1_XPCS_ARES] = { 0x17060, 2 },
+ [GCC_UNIPHY2_XPCS_ARES] = { 0x17070, 2 },
+ [GCC_USB1_BCR] = { 0x3C000, 0 },
+ [GCC_QUSB2_1_PHY_BCR] = { 0x3C030, 0 },
+};
+
+#define IPQ_APPS_ID 5424 /* some unique value */
+
+static const struct qcom_icc_hws_data icc_ipq5424_hws[] = {
+ { MASTER_ANOC_PCIE0, SLAVE_ANOC_PCIE0, GCC_ANOC_PCIE0_1LANE_M_CLK },
+ { MASTER_CNOC_PCIE0, SLAVE_CNOC_PCIE0, GCC_CNOC_PCIE0_1LANE_S_CLK },
+ { MASTER_ANOC_PCIE1, SLAVE_ANOC_PCIE1, GCC_ANOC_PCIE1_1LANE_M_CLK },
+ { MASTER_CNOC_PCIE1, SLAVE_CNOC_PCIE1, GCC_CNOC_PCIE1_1LANE_S_CLK },
+ { MASTER_ANOC_PCIE2, SLAVE_ANOC_PCIE2, GCC_ANOC_PCIE2_2LANE_M_CLK },
+ { MASTER_CNOC_PCIE2, SLAVE_CNOC_PCIE2, GCC_CNOC_PCIE2_2LANE_S_CLK },
+ { MASTER_ANOC_PCIE3, SLAVE_ANOC_PCIE3, GCC_ANOC_PCIE3_2LANE_M_CLK },
+ { MASTER_CNOC_PCIE3, SLAVE_CNOC_PCIE3, GCC_CNOC_PCIE3_2LANE_S_CLK },
+ { MASTER_CNOC_USB, SLAVE_CNOC_USB, GCC_CNOC_USB_CLK },
+};
+
+static const struct of_device_id gcc_ipq5424_match_table[] = {
+ { .compatible = "qcom,ipq5424-gcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_ipq5424_match_table);
+
+static const struct regmap_config gcc_ipq5424_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x3f024,
+ .fast_io = true,
+};
+
+static struct clk_hw *gcc_ipq5424_hws[] = {
+ &gpll0_div2.hw,
+ &gcc_xo_div4_clk_src.hw,
+ &gcc_qdss_tsctr_div2_clk_src.hw,
+ &gcc_qdss_dap_sync_clk_src.hw,
+ &gcc_eud_at_div_clk_src.hw,
+};
+
+static const struct qcom_cc_desc gcc_ipq5424_desc = {
+ .config = &gcc_ipq5424_regmap_config,
+ .clks = gcc_ipq5424_clocks,
+ .num_clks = ARRAY_SIZE(gcc_ipq5424_clocks),
+ .resets = gcc_ipq5424_resets,
+ .num_resets = ARRAY_SIZE(gcc_ipq5424_resets),
+ .clk_hws = gcc_ipq5424_hws,
+ .num_clk_hws = ARRAY_SIZE(gcc_ipq5424_hws),
+ .icc_hws = icc_ipq5424_hws,
+ .num_icc_hws = ARRAY_SIZE(icc_ipq5424_hws),
+};
+
+static int gcc_ipq5424_probe(struct platform_device *pdev)
+{
+ return qcom_cc_probe(pdev, &gcc_ipq5424_desc);
+}
+
+static struct platform_driver gcc_ipq5424_driver = {
+ .probe = gcc_ipq5424_probe,
+ .driver = {
+ .name = "qcom,gcc-ipq5424",
+ .of_match_table = gcc_ipq5424_match_table,
+ .sync_state = icc_sync_state,
+ },
+};
+
+static int __init gcc_ipq5424_init(void)
+{
+ return platform_driver_register(&gcc_ipq5424_driver);
+}
+core_initcall(gcc_ipq5424_init);
+
+static void __exit gcc_ipq5424_exit(void)
+{
+ platform_driver_unregister(&gcc_ipq5424_driver);
+}
+module_exit(gcc_ipq5424_exit);
+
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. GCC IPQ5424 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/gcc-ipq6018.c b/drivers/clk/qcom/gcc-ipq6018.c
index ab0f7fc665a9..d4fc491a18b2 100644
--- a/drivers/clk/qcom/gcc-ipq6018.c
+++ b/drivers/clk/qcom/gcc-ipq6018.c
@@ -511,15 +511,23 @@ static struct clk_rcg2 apss_ahb_clk_src = {
},
};
-static const struct freq_tbl ftbl_nss_port5_rx_clk_src[] = {
- F(24000000, P_XO, 1, 0, 0),
- F(25000000, P_UNIPHY1_RX, 12.5, 0, 0),
- F(25000000, P_UNIPHY0_RX, 5, 0, 0),
- F(78125000, P_UNIPHY1_RX, 4, 0, 0),
- F(125000000, P_UNIPHY1_RX, 2.5, 0, 0),
- F(125000000, P_UNIPHY0_RX, 1, 0, 0),
- F(156250000, P_UNIPHY1_RX, 2, 0, 0),
- F(312500000, P_UNIPHY1_RX, 1, 0, 0),
+static const struct freq_conf ftbl_nss_port5_rx_clk_src_25[] = {
+ C(P_UNIPHY1_RX, 12.5, 0, 0),
+ C(P_UNIPHY0_RX, 5, 0, 0),
+};
+
+static const struct freq_conf ftbl_nss_port5_rx_clk_src_125[] = {
+ C(P_UNIPHY1_RX, 2.5, 0, 0),
+ C(P_UNIPHY0_RX, 1, 0, 0),
+};
+
+static const struct freq_multi_tbl ftbl_nss_port5_rx_clk_src[] = {
+ FMS(24000000, P_XO, 1, 0, 0),
+ FM(25000000, ftbl_nss_port5_rx_clk_src_25),
+ FMS(78125000, P_UNIPHY1_RX, 4, 0, 0),
+ FM(125000000, ftbl_nss_port5_rx_clk_src_125),
+ FMS(156250000, P_UNIPHY1_RX, 2, 0, 0),
+ FMS(312500000, P_UNIPHY1_RX, 1, 0, 0),
{ }
};
@@ -547,26 +555,34 @@ gcc_xo_uniphy0_rx_tx_uniphy1_rx_tx_ubi32_bias_map[] = {
static struct clk_rcg2 nss_port5_rx_clk_src = {
.cmd_rcgr = 0x68060,
- .freq_tbl = ftbl_nss_port5_rx_clk_src,
+ .freq_multi_tbl = ftbl_nss_port5_rx_clk_src,
.hid_width = 5,
.parent_map = gcc_xo_uniphy0_rx_tx_uniphy1_rx_tx_ubi32_bias_map,
.clkr.hw.init = &(struct clk_init_data){
.name = "nss_port5_rx_clk_src",
.parent_data = gcc_xo_uniphy0_rx_tx_uniphy1_rx_tx_ubi32_bias,
.num_parents = 7,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_fm_ops,
},
};
-static const struct freq_tbl ftbl_nss_port5_tx_clk_src[] = {
- F(24000000, P_XO, 1, 0, 0),
- F(25000000, P_UNIPHY1_TX, 12.5, 0, 0),
- F(25000000, P_UNIPHY0_TX, 5, 0, 0),
- F(78125000, P_UNIPHY1_TX, 4, 0, 0),
- F(125000000, P_UNIPHY1_TX, 2.5, 0, 0),
- F(125000000, P_UNIPHY0_TX, 1, 0, 0),
- F(156250000, P_UNIPHY1_TX, 2, 0, 0),
- F(312500000, P_UNIPHY1_TX, 1, 0, 0),
+static const struct freq_conf ftbl_nss_port5_tx_clk_src_25[] = {
+ C(P_UNIPHY1_TX, 12.5, 0, 0),
+ C(P_UNIPHY0_TX, 5, 0, 0),
+};
+
+static const struct freq_conf ftbl_nss_port5_tx_clk_src_125[] = {
+ C(P_UNIPHY1_TX, 2.5, 0, 0),
+ C(P_UNIPHY0_TX, 1, 0, 0),
+};
+
+static const struct freq_multi_tbl ftbl_nss_port5_tx_clk_src[] = {
+ FMS(24000000, P_XO, 1, 0, 0),
+ FM(25000000, ftbl_nss_port5_tx_clk_src_25),
+ FMS(78125000, P_UNIPHY1_TX, 4, 0, 0),
+ FM(125000000, ftbl_nss_port5_tx_clk_src_125),
+ FMS(156250000, P_UNIPHY1_TX, 2, 0, 0),
+ FMS(312500000, P_UNIPHY1_TX, 1, 0, 0),
{ }
};
@@ -594,14 +610,14 @@ gcc_xo_uniphy0_tx_rx_uniphy1_tx_rx_ubi32_bias_map[] = {
static struct clk_rcg2 nss_port5_tx_clk_src = {
.cmd_rcgr = 0x68068,
- .freq_tbl = ftbl_nss_port5_tx_clk_src,
+ .freq_multi_tbl = ftbl_nss_port5_tx_clk_src,
.hid_width = 5,
.parent_map = gcc_xo_uniphy0_tx_rx_uniphy1_tx_rx_ubi32_bias_map,
.clkr.hw.init = &(struct clk_init_data){
.name = "nss_port5_tx_clk_src",
.parent_data = gcc_xo_uniphy0_tx_rx_uniphy1_tx_rx_ubi32_bias,
.num_parents = 7,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_fm_ops,
},
};
@@ -4194,10 +4210,9 @@ static const struct alpha_pll_config ubi32_pll_config = {
.test_ctl_hi_val = 0x4000,
};
+/* 1200 MHz configuration */
static const struct alpha_pll_config nss_crypto_pll_config = {
.l = 0x32,
- .alpha = 0x0,
- .alpha_hi = 0x0,
.config_ctl_val = 0x4001055b,
.main_output_mask = BIT(0),
.pre_div_val = 0x0,
@@ -4206,7 +4221,6 @@ static const struct alpha_pll_config nss_crypto_pll_config = {
.post_div_mask = GENMASK(11, 8),
.vco_mask = GENMASK(21, 20),
.vco_val = 0x0,
- .alpha_en_mask = BIT(24),
};
static struct clk_hw *gcc_ipq6018_hws[] = {
diff --git a/drivers/clk/qcom/gcc-ipq8074.c b/drivers/clk/qcom/gcc-ipq8074.c
index 7258ba5c0900..1329ea28d703 100644
--- a/drivers/clk/qcom/gcc-ipq8074.c
+++ b/drivers/clk/qcom/gcc-ipq8074.c
@@ -1895,10 +1895,10 @@ static const struct freq_conf ftbl_nss_port6_tx_clk_src_125[] = {
static const struct freq_multi_tbl ftbl_nss_port6_tx_clk_src[] = {
FMS(19200000, P_XO, 1, 0, 0),
FM(25000000, ftbl_nss_port6_tx_clk_src_25),
- FMS(78125000, P_UNIPHY1_RX, 4, 0, 0),
+ FMS(78125000, P_UNIPHY2_TX, 4, 0, 0),
FM(125000000, ftbl_nss_port6_tx_clk_src_125),
- FMS(156250000, P_UNIPHY1_RX, 2, 0, 0),
- FMS(312500000, P_UNIPHY1_RX, 1, 0, 0),
+ FMS(156250000, P_UNIPHY2_TX, 2, 0, 0),
+ FMS(312500000, P_UNIPHY2_TX, 1, 0, 0),
{ }
};
diff --git a/drivers/clk/qcom/gcc-ipq9574.c b/drivers/clk/qcom/gcc-ipq9574.c
index 645109f75b46..6dc86e686de4 100644
--- a/drivers/clk/qcom/gcc-ipq9574.c
+++ b/drivers/clk/qcom/gcc-ipq9574.c
@@ -108,6 +108,20 @@ static struct clk_alpha_pll_postdiv gpll0 = {
},
};
+static struct clk_alpha_pll_postdiv gpll0_out_aux = {
+ .offset = 0x20000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpll0_out_aux",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gpll0_main.clkr.hw
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+ },
+};
+
static struct clk_alpha_pll gpll4_main = {
.offset = 0x22000,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
@@ -2645,24 +2659,6 @@ static struct clk_rcg2 system_noc_bfdcd_clk_src = {
},
};
-static struct clk_branch gcc_q6ss_boot_clk = {
- .halt_reg = 0x25080,
- .halt_check = BRANCH_HALT_SKIP,
- .clkr = {
- .enable_reg = 0x25080,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_q6ss_boot_clk",
- .parent_hws = (const struct clk_hw *[]) {
- &system_noc_bfdcd_clk_src.clkr.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch gcc_nssnoc_snoc_clk = {
.halt_reg = 0x17028,
.clkr = {
@@ -2733,91 +2729,6 @@ static struct clk_rcg2 wcss_ahb_clk_src = {
},
};
-static struct clk_branch gcc_q6_ahb_clk = {
- .halt_reg = 0x25014,
- .clkr = {
- .enable_reg = 0x25014,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_q6_ahb_clk",
- .parent_hws = (const struct clk_hw *[]) {
- &wcss_ahb_clk_src.clkr.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_q6_ahb_s_clk = {
- .halt_reg = 0x25018,
- .clkr = {
- .enable_reg = 0x25018,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_q6_ahb_s_clk",
- .parent_hws = (const struct clk_hw *[]) {
- &wcss_ahb_clk_src.clkr.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_wcss_ecahb_clk = {
- .halt_reg = 0x25058,
- .clkr = {
- .enable_reg = 0x25058,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_wcss_ecahb_clk",
- .parent_hws = (const struct clk_hw *[]) {
- &wcss_ahb_clk_src.clkr.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_wcss_acmt_clk = {
- .halt_reg = 0x2505c,
- .clkr = {
- .enable_reg = 0x2505c,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_wcss_acmt_clk",
- .parent_hws = (const struct clk_hw *[]) {
- &wcss_ahb_clk_src.clkr.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_sys_noc_wcss_ahb_clk = {
- .halt_reg = 0x2e030,
- .clkr = {
- .enable_reg = 0x2e030,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_sys_noc_wcss_ahb_clk",
- .parent_hws = (const struct clk_hw *[]) {
- &wcss_ahb_clk_src.clkr.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static const struct freq_tbl ftbl_wcss_axi_m_clk_src[] = {
F(24000000, P_XO, 1, 0, 0),
F(133333333, P_GPLL0, 6, 0, 0),
@@ -2838,23 +2749,6 @@ static struct clk_rcg2 wcss_axi_m_clk_src = {
},
};
-static struct clk_branch gcc_anoc_wcss_axi_m_clk = {
- .halt_reg = 0x2e0a8,
- .clkr = {
- .enable_reg = 0x2e0a8,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_anoc_wcss_axi_m_clk",
- .parent_hws = (const struct clk_hw *[]) {
- &wcss_axi_m_clk_src.clkr.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static const struct freq_tbl ftbl_qdss_at_clk_src[] = {
F(240000000, P_GPLL4, 5, 0, 0),
{ }
@@ -2873,40 +2767,6 @@ static struct clk_rcg2 qdss_at_clk_src = {
},
};
-static struct clk_branch gcc_q6ss_atbm_clk = {
- .halt_reg = 0x2501c,
- .clkr = {
- .enable_reg = 0x2501c,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_q6ss_atbm_clk",
- .parent_hws = (const struct clk_hw *[]) {
- &qdss_at_clk_src.clkr.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_wcss_dbg_ifc_atb_clk = {
- .halt_reg = 0x2503c,
- .clkr = {
- .enable_reg = 0x2503c,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_wcss_dbg_ifc_atb_clk",
- .parent_hws = (const struct clk_hw *[]) {
- &qdss_at_clk_src.clkr.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch gcc_nssnoc_atb_clk = {
.halt_reg = 0x17014,
.clkr = {
@@ -3143,40 +3003,6 @@ static struct clk_fixed_factor qdss_tsctr_div2_clk_src = {
},
};
-static struct clk_branch gcc_q6_tsctr_1to2_clk = {
- .halt_reg = 0x25020,
- .clkr = {
- .enable_reg = 0x25020,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_q6_tsctr_1to2_clk",
- .parent_hws = (const struct clk_hw *[]) {
- &qdss_tsctr_div2_clk_src.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_wcss_dbg_ifc_nts_clk = {
- .halt_reg = 0x25040,
- .clkr = {
- .enable_reg = 0x25040,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_wcss_dbg_ifc_nts_clk",
- .parent_hws = (const struct clk_hw *[]) {
- &qdss_tsctr_div2_clk_src.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch gcc_qdss_tsctr_div2_clk = {
.halt_reg = 0x2d044,
.clkr = {
@@ -3351,74 +3177,6 @@ static struct clk_branch gcc_qdss_tsctr_div16_clk = {
},
};
-static struct clk_branch gcc_q6ss_pclkdbg_clk = {
- .halt_reg = 0x25024,
- .clkr = {
- .enable_reg = 0x25024,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_q6ss_pclkdbg_clk",
- .parent_hws = (const struct clk_hw *[]) {
- &qdss_dap_sync_clk_src.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_q6ss_trig_clk = {
- .halt_reg = 0x25068,
- .clkr = {
- .enable_reg = 0x25068,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_q6ss_trig_clk",
- .parent_hws = (const struct clk_hw *[]) {
- &qdss_dap_sync_clk_src.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_wcss_dbg_ifc_apb_clk = {
- .halt_reg = 0x25038,
- .clkr = {
- .enable_reg = 0x25038,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_wcss_dbg_ifc_apb_clk",
- .parent_hws = (const struct clk_hw *[]) {
- &qdss_dap_sync_clk_src.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_wcss_dbg_ifc_dapbus_clk = {
- .halt_reg = 0x25044,
- .clkr = {
- .enable_reg = 0x25044,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_wcss_dbg_ifc_dapbus_clk",
- .parent_hws = (const struct clk_hw *[]) {
- &qdss_dap_sync_clk_src.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch gcc_qdss_dap_clk = {
.halt_reg = 0x2d058,
.clkr = {
@@ -3540,58 +3298,6 @@ static struct clk_rcg2 q6_axi_clk_src = {
},
};
-static struct clk_branch gcc_q6_axim_clk = {
- .halt_reg = 0x2500c,
- .clkr = {
- .enable_reg = 0x2500c,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_q6_axim_clk",
- .parent_hws = (const struct clk_hw *[]) {
- &q6_axi_clk_src.clkr.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_wcss_q6_tbu_clk = {
- .halt_reg = 0x12050,
- .halt_check = BRANCH_HALT_DELAY,
- .clkr = {
- .enable_reg = 0xb00c,
- .enable_mask = BIT(6),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_wcss_q6_tbu_clk",
- .parent_hws = (const struct clk_hw *[]) {
- &q6_axi_clk_src.clkr.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_mem_noc_q6_axi_clk = {
- .halt_reg = 0x19010,
- .clkr = {
- .enable_reg = 0x19010,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_mem_noc_q6_axi_clk",
- .parent_hws = (const struct clk_hw *[]) {
- &q6_axi_clk_src.clkr.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static const struct freq_tbl ftbl_q6_axim2_clk_src[] = {
F(342857143, P_GPLL4, 3.5, 0, 0),
{ }
@@ -4141,16 +3847,8 @@ static struct clk_regmap *gcc_ipq9574_clks[] = {
[GCC_NSSNOC_SNOC_1_CLK] = &gcc_nssnoc_snoc_1_clk.clkr,
[GCC_QDSS_ETR_USB_CLK] = &gcc_qdss_etr_usb_clk.clkr,
[WCSS_AHB_CLK_SRC] = &wcss_ahb_clk_src.clkr,
- [GCC_Q6_AHB_CLK] = &gcc_q6_ahb_clk.clkr,
- [GCC_Q6_AHB_S_CLK] = &gcc_q6_ahb_s_clk.clkr,
- [GCC_WCSS_ECAHB_CLK] = &gcc_wcss_ecahb_clk.clkr,
- [GCC_WCSS_ACMT_CLK] = &gcc_wcss_acmt_clk.clkr,
- [GCC_SYS_NOC_WCSS_AHB_CLK] = &gcc_sys_noc_wcss_ahb_clk.clkr,
[WCSS_AXI_M_CLK_SRC] = &wcss_axi_m_clk_src.clkr,
- [GCC_ANOC_WCSS_AXI_M_CLK] = &gcc_anoc_wcss_axi_m_clk.clkr,
[QDSS_AT_CLK_SRC] = &qdss_at_clk_src.clkr,
- [GCC_Q6SS_ATBM_CLK] = &gcc_q6ss_atbm_clk.clkr,
- [GCC_WCSS_DBG_IFC_ATB_CLK] = &gcc_wcss_dbg_ifc_atb_clk.clkr,
[GCC_NSSNOC_ATB_CLK] = &gcc_nssnoc_atb_clk.clkr,
[GCC_QDSS_AT_CLK] = &gcc_qdss_at_clk.clkr,
[GCC_SYS_NOC_AT_CLK] = &gcc_sys_noc_at_clk.clkr,
@@ -4163,27 +3861,18 @@ static struct clk_regmap *gcc_ipq9574_clks[] = {
[QDSS_TRACECLKIN_CLK_SRC] = &qdss_traceclkin_clk_src.clkr,
[GCC_QDSS_TRACECLKIN_CLK] = &gcc_qdss_traceclkin_clk.clkr,
[QDSS_TSCTR_CLK_SRC] = &qdss_tsctr_clk_src.clkr,
- [GCC_Q6_TSCTR_1TO2_CLK] = &gcc_q6_tsctr_1to2_clk.clkr,
- [GCC_WCSS_DBG_IFC_NTS_CLK] = &gcc_wcss_dbg_ifc_nts_clk.clkr,
[GCC_QDSS_TSCTR_DIV2_CLK] = &gcc_qdss_tsctr_div2_clk.clkr,
[GCC_QDSS_TS_CLK] = &gcc_qdss_ts_clk.clkr,
[GCC_QDSS_TSCTR_DIV4_CLK] = &gcc_qdss_tsctr_div4_clk.clkr,
[GCC_NSS_TS_CLK] = &gcc_nss_ts_clk.clkr,
[GCC_QDSS_TSCTR_DIV8_CLK] = &gcc_qdss_tsctr_div8_clk.clkr,
[GCC_QDSS_TSCTR_DIV16_CLK] = &gcc_qdss_tsctr_div16_clk.clkr,
- [GCC_Q6SS_PCLKDBG_CLK] = &gcc_q6ss_pclkdbg_clk.clkr,
- [GCC_Q6SS_TRIG_CLK] = &gcc_q6ss_trig_clk.clkr,
- [GCC_WCSS_DBG_IFC_APB_CLK] = &gcc_wcss_dbg_ifc_apb_clk.clkr,
- [GCC_WCSS_DBG_IFC_DAPBUS_CLK] = &gcc_wcss_dbg_ifc_dapbus_clk.clkr,
[GCC_QDSS_DAP_CLK] = &gcc_qdss_dap_clk.clkr,
[GCC_QDSS_APB2JTAG_CLK] = &gcc_qdss_apb2jtag_clk.clkr,
[GCC_QDSS_TSCTR_DIV3_CLK] = &gcc_qdss_tsctr_div3_clk.clkr,
[QPIC_IO_MACRO_CLK_SRC] = &qpic_io_macro_clk_src.clkr,
[GCC_QPIC_IO_MACRO_CLK] = &gcc_qpic_io_macro_clk.clkr,
[Q6_AXI_CLK_SRC] = &q6_axi_clk_src.clkr,
- [GCC_Q6_AXIM_CLK] = &gcc_q6_axim_clk.clkr,
- [GCC_WCSS_Q6_TBU_CLK] = &gcc_wcss_q6_tbu_clk.clkr,
- [GCC_MEM_NOC_Q6_AXI_CLK] = &gcc_mem_noc_q6_axi_clk.clkr,
[Q6_AXIM2_CLK_SRC] = &q6_axim2_clk_src.clkr,
[NSSNOC_MEMNOC_BFDCD_CLK_SRC] = &nssnoc_memnoc_bfdcd_clk_src.clkr,
[GCC_NSSNOC_MEMNOC_CLK] = &gcc_nssnoc_memnoc_clk.clkr,
@@ -4207,7 +3896,6 @@ static struct clk_regmap *gcc_ipq9574_clks[] = {
[GCC_UNIPHY1_SYS_CLK] = &gcc_uniphy1_sys_clk.clkr,
[GCC_UNIPHY2_SYS_CLK] = &gcc_uniphy2_sys_clk.clkr,
[GCC_CMN_12GPLL_SYS_CLK] = &gcc_cmn_12gpll_sys_clk.clkr,
- [GCC_Q6SS_BOOT_CLK] = &gcc_q6ss_boot_clk.clkr,
[UNIPHY_SYS_CLK_SRC] = &uniphy_sys_clk_src.clkr,
[NSS_TS_CLK_SRC] = &nss_ts_clk_src.clkr,
[GCC_ANOC_PCIE0_1LANE_M_CLK] = &gcc_anoc_pcie0_1lane_m_clk.clkr,
@@ -4222,6 +3910,7 @@ static struct clk_regmap *gcc_ipq9574_clks[] = {
[GCC_PCIE1_PIPE_CLK] = &gcc_pcie1_pipe_clk.clkr,
[GCC_PCIE2_PIPE_CLK] = &gcc_pcie2_pipe_clk.clkr,
[GCC_PCIE3_PIPE_CLK] = &gcc_pcie3_pipe_clk.clkr,
+ [GPLL0_OUT_AUX] = &gpll0_out_aux.clkr,
};
static const struct qcom_reset_map gcc_ipq9574_resets[] = {
@@ -4384,7 +4073,7 @@ static const struct qcom_reset_map gcc_ipq9574_resets[] = {
#define IPQ_APPS_ID 9574 /* some unique value */
-static struct qcom_icc_hws_data icc_ipq9574_hws[] = {
+static const struct qcom_icc_hws_data icc_ipq9574_hws[] = {
{ MASTER_ANOC_PCIE0, SLAVE_ANOC_PCIE0, GCC_ANOC_PCIE0_1LANE_M_CLK },
{ MASTER_SNOC_PCIE0, SLAVE_SNOC_PCIE0, GCC_SNOC_PCIE0_1LANE_S_CLK },
{ MASTER_ANOC_PCIE1, SLAVE_ANOC_PCIE1, GCC_ANOC_PCIE1_1LANE_M_CLK },
diff --git a/drivers/clk/qcom/gcc-mdm9607.c b/drivers/clk/qcom/gcc-mdm9607.c
index 6e6068b168e6..07f1b78d737a 100644
--- a/drivers/clk/qcom/gcc-mdm9607.c
+++ b/drivers/clk/qcom/gcc-mdm9607.c
@@ -535,7 +535,7 @@ static struct clk_rcg2 blsp1_uart5_apps_clk_src = {
};
static struct clk_rcg2 blsp1_uart6_apps_clk_src = {
- .cmd_rcgr = 0x6044,
+ .cmd_rcgr = 0x7044,
.mnd_width = 16,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
diff --git a/drivers/clk/qcom/gcc-milos.c b/drivers/clk/qcom/gcc-milos.c
new file mode 100644
index 000000000000..c9d61b05bafa
--- /dev/null
+++ b/drivers/clk/qcom/gcc-milos.c
@@ -0,0 +1,3225 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2025, Luca Weiss <luca.weiss@fairphone.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,milos-gcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "gdsc.h"
+#include "reset.h"
+
+/* Need to match the order of clocks in DT binding */
+enum {
+ DT_BI_TCXO,
+ DT_SLEEP_CLK,
+ DT_PCIE_0_PIPE,
+ DT_PCIE_1_PIPE,
+ DT_UFS_PHY_RX_SYMBOL_0,
+ DT_UFS_PHY_RX_SYMBOL_1,
+ DT_UFS_PHY_TX_SYMBOL_0,
+ DT_USB3_PHY_WRAPPER_GCC_USB30_PIPE,
+};
+
+enum {
+ P_BI_TCXO,
+ P_GCC_GPLL0_OUT_EVEN,
+ P_GCC_GPLL0_OUT_MAIN,
+ P_GCC_GPLL0_OUT_ODD,
+ P_GCC_GPLL2_OUT_MAIN,
+ P_GCC_GPLL4_OUT_MAIN,
+ P_GCC_GPLL6_OUT_MAIN,
+ P_GCC_GPLL7_OUT_MAIN,
+ P_GCC_GPLL9_OUT_MAIN,
+ P_PCIE_0_PIPE_CLK,
+ P_PCIE_1_PIPE_CLK,
+ P_SLEEP_CLK,
+ P_UFS_PHY_RX_SYMBOL_0_CLK,
+ P_UFS_PHY_RX_SYMBOL_1_CLK,
+ P_UFS_PHY_TX_SYMBOL_0_CLK,
+ P_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK,
+};
+
+static struct clk_alpha_pll gcc_gpll0 = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ole_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gcc_gpll0_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gcc_gpll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_gcc_gpll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_gcc_gpll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll0_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll2 = {
+ .offset = 0x2000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(2),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll2",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ole_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll4 = {
+ .offset = 0x4000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll4",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ole_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll6 = {
+ .offset = 0x6000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(6),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll6",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ole_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll7 = {
+ .offset = 0x7000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll7",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ole_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll9 = {
+ .offset = 0x9000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(9),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll9",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ole_ops,
+ },
+ },
+};
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_SLEEP_CLK, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .index = DT_SLEEP_CLK },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_SLEEP_CLK, 5 },
+};
+
+static const struct clk_parent_data gcc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL6_OUT_MAIN, 2 },
+ { P_GCC_GPLL7_OUT_MAIN, 3 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll6.clkr.hw },
+ { .hw = &gcc_gpll7.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data gcc_parent_data_5[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_6[] = {
+ { P_PCIE_0_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_6[] = {
+ { .index = DT_PCIE_0_PIPE },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_7[] = {
+ { P_PCIE_1_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_7[] = {
+ { .index = DT_PCIE_1_PIPE },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_8[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL7_OUT_MAIN, 2 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_8[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll7.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_9[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL6_OUT_MAIN, 2 },
+ { P_GCC_GPLL0_OUT_ODD, 3 },
+ { P_GCC_GPLL2_OUT_MAIN, 4 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_9[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll6.clkr.hw },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll2.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_10[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL6_OUT_MAIN, 2 },
+ { P_GCC_GPLL0_OUT_ODD, 3 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_10[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll6.clkr.hw },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_11[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL9_OUT_MAIN, 2 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_11[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll9.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_12[] = {
+ { P_UFS_PHY_RX_SYMBOL_0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_12[] = {
+ { .index = DT_UFS_PHY_RX_SYMBOL_0 },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_13[] = {
+ { P_UFS_PHY_RX_SYMBOL_1_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_13[] = {
+ { .index = DT_UFS_PHY_RX_SYMBOL_1 },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_14[] = {
+ { P_UFS_PHY_TX_SYMBOL_0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_14[] = {
+ { .index = DT_UFS_PHY_TX_SYMBOL_0 },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_15[] = {
+ { P_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_15[] = {
+ { .index = DT_USB3_PHY_WRAPPER_GCC_USB30_PIPE },
+ { .index = DT_BI_TCXO },
+};
+
+static struct clk_regmap_mux gcc_pcie_0_pipe_clk_src = {
+ .reg = 0x6b070,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_6,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_pipe_clk_src",
+ .parent_data = gcc_parent_data_6,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_6),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_pcie_1_pipe_clk_src = {
+ .reg = 0x9006c,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_7,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_pipe_clk_src",
+ .parent_data = gcc_parent_data_7,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_7),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_rx_symbol_0_clk_src = {
+ .reg = 0x77064,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_12,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_0_clk_src",
+ .parent_data = gcc_parent_data_12,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_12),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_rx_symbol_1_clk_src = {
+ .reg = 0x770e0,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_13,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_1_clk_src",
+ .parent_data = gcc_parent_data_13,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_13),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_tx_symbol_0_clk_src = {
+ .reg = 0x77054,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_14,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_tx_symbol_0_clk_src",
+ .parent_data = gcc_parent_data_14,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_14),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb3_prim_phy_pipe_clk_src = {
+ .reg = 0x3906c,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_15,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_15,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_15),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
+ F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_gp1_clk_src = {
+ .cmd_rcgr = 0x64004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp1_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp2_clk_src = {
+ .cmd_rcgr = 0x65004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp2_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp3_clk_src = {
+ .cmd_rcgr = 0x66004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp3_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_aux_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_0_aux_clk_src = {
+ .cmd_rcgr = 0x6b074,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_aux_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_phy_rchng_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_0_phy_rchng_clk_src = {
+ .cmd_rcgr = 0x6b058,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_1_aux_clk_src = {
+ .cmd_rcgr = 0x90070,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_aux_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_1_phy_rchng_clk_src = {
+ .cmd_rcgr = 0x90054,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = {
+ F(60000000, P_GCC_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pdm2_clk_src = {
+ .cmd_rcgr = 0x33010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pdm2_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_qspi_ref_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(51200000, P_GCC_GPLL0_OUT_EVEN, 1, 64, 375),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(102400000, P_GCC_GPLL0_OUT_EVEN, 1, 128, 375),
+ F(112000000, P_GCC_GPLL0_OUT_EVEN, 1, 28, 75),
+ F(117964800, P_GCC_GPLL0_OUT_EVEN, 1, 6144, 15625),
+ F(120000000, P_GCC_GPLL0_OUT_MAIN, 5, 0, 0),
+ F(150000000, P_GCC_GPLL0_OUT_EVEN, 2, 0, 0),
+ F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_qspi_ref_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_qspi_ref_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_qspi_ref_clk_src = {
+ .cmd_rcgr = 0x18768,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_qspi_ref_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_qspi_ref_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(51200000, P_GCC_GPLL0_OUT_EVEN, 1, 64, 375),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(102400000, P_GCC_GPLL0_OUT_EVEN, 1, 128, 375),
+ F(112000000, P_GCC_GPLL0_OUT_EVEN, 1, 28, 75),
+ F(117964800, P_GCC_GPLL0_OUT_EVEN, 1, 6144, 15625),
+ F(120000000, P_GCC_GPLL0_OUT_MAIN, 5, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
+ .cmd_rcgr = 0x18010,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
+ .cmd_rcgr = 0x18148,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s3_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(51200000, P_GCC_GPLL0_OUT_EVEN, 1, 64, 375),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
+ .cmd_rcgr = 0x18290,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s3_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s3_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s4_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(51200000, P_GCC_GPLL0_OUT_EVEN, 1, 64, 375),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(128000000, P_GCC_GPLL6_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s4_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
+ .cmd_rcgr = 0x183c8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s4_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
+ .cmd_rcgr = 0x18500,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s3_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s6_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s6_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s6_clk_src = {
+ .cmd_rcgr = 0x18638,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s3_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s6_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_qspi_ref_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_qspi_ref_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_qspi_ref_clk_src = {
+ .cmd_rcgr = 0x1e768,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_qspi_ref_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_qspi_ref_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
+ .cmd_rcgr = 0x1e010,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
+ .cmd_rcgr = 0x1e148,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
+ .cmd_rcgr = 0x1e290,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s3_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s4_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
+ .cmd_rcgr = 0x1e3c8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s4_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
+ .cmd_rcgr = 0x1e500,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s3_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s6_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s6_clk_src",
+ .parent_data = gcc_parent_data_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_8),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s6_clk_src = {
+ .cmd_rcgr = 0x1e638,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s3_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s6_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_apps_clk_src[] = {
+ F(144000, P_BI_TCXO, 16, 3, 25),
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(20000000, P_GCC_GPLL0_OUT_EVEN, 5, 1, 3),
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(192000000, P_GCC_GPLL6_OUT_MAIN, 2, 0, 0),
+ F(384000000, P_GCC_GPLL6_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc1_apps_clk_src = {
+ .cmd_rcgr = 0xa3014,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_9,
+ .freq_tbl = ftbl_gcc_sdcc1_apps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_apps_clk_src",
+ .parent_data = gcc_parent_data_9,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_9),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_ice_core_clk_src[] = {
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(150000000, P_GCC_GPLL0_OUT_EVEN, 2, 0, 0),
+ F(300000000, P_GCC_GPLL0_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc1_ice_core_clk_src = {
+ .cmd_rcgr = 0xa3038,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_10,
+ .freq_tbl = ftbl_gcc_sdcc1_ice_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_ice_core_clk_src",
+ .parent_data = gcc_parent_data_10,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_10),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(37500000, P_GCC_GPLL0_OUT_EVEN, 8, 0, 0),
+ F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(202000000, P_GCC_GPLL9_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+ .cmd_rcgr = 0x14018,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_11,
+ .freq_tbl = ftbl_gcc_sdcc2_apps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc2_apps_clk_src",
+ .parent_data = gcc_parent_data_11,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_11),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_axi_clk_src[] = {
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(150000000, P_GCC_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(300000000, P_GCC_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = {
+ .cmd_rcgr = 0x77030,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_ufs_phy_axi_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_axi_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_ice_core_clk_src[] = {
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(201500000, P_GCC_GPLL4_OUT_MAIN, 4, 0, 0),
+ F(403000000, P_GCC_GPLL4_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_ice_core_clk_src = {
+ .cmd_rcgr = 0x77080,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_ufs_phy_ice_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ice_core_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_phy_aux_clk_src[] = {
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = {
+ .cmd_rcgr = 0x770b4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_ufs_phy_phy_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_unipro_core_clk_src[] = {
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(150000000, P_GCC_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(300000000, P_GCC_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = {
+ .cmd_rcgr = 0x77098,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_ufs_phy_unipro_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_unipro_core_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_master_clk_src[] = {
+ F(66666667, P_GCC_GPLL0_OUT_EVEN, 4.5, 0, 0),
+ F(133333333, P_GCC_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GCC_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
+ .cmd_rcgr = 0x3902c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x39044,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = {
+ .cmd_rcgr = 0x39070,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_pcie_0_pipe_div2_clk_src = {
+ .reg = 0x6b094,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_pipe_div2_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_pcie_1_pipe_div2_clk_src = {
+ .reg = 0x90090,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_pipe_div2_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_1_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_wrap0_s2_clk_src = {
+ .reg = 0x18280,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s2_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_qspi_ref_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_wrap1_s2_clk_src = {
+ .reg = 0x1e280,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s2_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_qspi_ref_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb30_prim_mock_utmi_postdiv_clk_src = {
+ .reg = 0x3905c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_postdiv_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_pcie_axi_clk = {
+ .halt_reg = 0x1005c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x1005c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(12),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_noc_pcie_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_phy_axi_clk = {
+ .halt_reg = 0x770e4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x770e4,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x770e4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_ufs_phy_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_phy_axi_hw_ctl_clk = {
+ .halt_reg = 0x770e4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x770e4,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x770e4,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_ufs_phy_axi_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_prim_axi_clk = {
+ .halt_reg = 0x39090,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x39090,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x39090,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb3_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x38004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x38004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_hf_axi_clk = {
+ .halt_reg = 0x26010,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x26010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_camera_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_sf_axi_clk = {
+ .halt_reg = 0x26014,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x26014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_camera_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_pcie_anoc_ahb_clk = {
+ .halt_reg = 0x10050,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x10050,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_pcie_anoc_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_prim_axi_clk = {
+ .halt_reg = 0x3908c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x3908c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_usb3_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cnoc_pcie_sf_axi_clk = {
+ .halt_reg = 0x10058,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x10058,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(6),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cnoc_pcie_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ddrss_gpu_axi_clk = {
+ .halt_reg = 0x7115c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x7115c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7115c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ddrss_gpu_axi_clk",
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ddrss_pcie_sf_qtb_clk = {
+ .halt_reg = 0x1006c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x1006c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(19),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ddrss_pcie_sf_qtb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(23),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_disp_gpll0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0_out_even.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_hf_axi_clk = {
+ .halt_reg = 0x2700c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x2700c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2700c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_disp_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x64000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x64000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x65000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x65000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x66000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x66000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_gpll0_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_gpll0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0_out_even.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_memnoc_gfx_clk = {
+ .halt_reg = 0x71010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x71010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x71010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_memnoc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_snoc_dvm_gfx_clk = {
+ .halt_reg = 0x71018,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x71018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_snoc_dvm_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_aux_clk = {
+ .halt_reg = 0x6b03c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
+ .halt_reg = 0x6b038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b038,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(2),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
+ .halt_reg = 0x6b02c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x6b02c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_phy_rchng_clk = {
+ .halt_reg = 0x6b054,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_pipe_clk = {
+ .halt_reg = 0x6b048,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_pipe_div2_clk = {
+ .halt_reg = 0x6b098,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(13),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_pipe_div2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_pipe_div2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_axi_clk = {
+ .halt_reg = 0x6b020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b020,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_q2a_axi_clk = {
+ .halt_reg = 0x6b01c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_aux_clk = {
+ .halt_reg = 0x90038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(29),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_1_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_cfg_ahb_clk = {
+ .halt_reg = 0x90034,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x90034,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(28),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_mstr_axi_clk = {
+ .halt_reg = 0x90028,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x90028,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_phy_rchng_clk = {
+ .halt_reg = 0x90050,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(8),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_1_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_pipe_clk = {
+ .halt_reg = 0x90044,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_1_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_pipe_div2_clk = {
+ .halt_reg = 0x90094,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_pipe_div2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_1_pipe_div2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_slv_axi_clk = {
+ .halt_reg = 0x9001c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9001c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(26),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_slv_q2a_axi_clk = {
+ .halt_reg = 0x90018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(25),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_rscc_cfg_ahb_clk = {
+ .halt_reg = 0x11004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x11004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_rscc_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_rscc_xo_clk = {
+ .halt_reg = 0x11008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(21),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_rscc_xo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x3300c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pdm2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x33004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x33004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x33004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_xo4_clk = {
+ .halt_reg = 0x33008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x33008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm_xo4_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_nrt_ahb_clk = {
+ .halt_reg = 0x26008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x26008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_camera_nrt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_rt_ahb_clk = {
+ .halt_reg = 0x2600c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2600c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2600c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_camera_rt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_disp_ahb_clk = {
+ .halt_reg = 0x27008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x27008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x27008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_disp_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_gpu_ahb_clk = {
+ .halt_reg = 0x71008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x71008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x71008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_gpu_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_pcie_ahb_clk = {
+ .halt_reg = 0x6b018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(11),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_pcie_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_cv_cpu_ahb_clk = {
+ .halt_reg = 0x32014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x32014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_cv_cpu_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_cvp_ahb_clk = {
+ .halt_reg = 0x32008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x32008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_cvp_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_v_cpu_ahb_clk = {
+ .halt_reg = 0x32010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x32010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_v_cpu_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_vcodec_ahb_clk = {
+ .halt_reg = 0x3200c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x3200c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3200c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_vcodec_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_2x_clk = {
+ .halt_reg = 0x23018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(18),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_clk = {
+ .halt_reg = 0x23008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(19),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_qspi_ref_clk = {
+ .halt_reg = 0x18764,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(29),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_qspi_ref_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_qspi_ref_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s0_clk = {
+ .halt_reg = 0x18004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s1_clk = {
+ .halt_reg = 0x1813c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(23),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s2_clk = {
+ .halt_reg = 0x18274,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(24),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s3_clk = {
+ .halt_reg = 0x18284,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(25),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s4_clk = {
+ .halt_reg = 0x183bc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(26),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s5_clk = {
+ .halt_reg = 0x184f4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s5_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s6_clk = {
+ .halt_reg = 0x1862c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(28),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s6_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_2x_clk = {
+ .halt_reg = 0x23168,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_clk = {
+ .halt_reg = 0x23158,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_qspi_ref_clk = {
+ .halt_reg = 0x1e764,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(30),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_qspi_ref_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_qspi_ref_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s0_clk = {
+ .halt_reg = 0x1e004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s1_clk = {
+ .halt_reg = 0x1e13c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s2_clk = {
+ .halt_reg = 0x1e274,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(6),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s3_clk = {
+ .halt_reg = 0x1e284,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s4_clk = {
+ .halt_reg = 0x1e3bc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(8),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s5_clk = {
+ .halt_reg = 0x1e4f4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(9),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s5_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s6_clk = {
+ .halt_reg = 0x1e62c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s6_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_m_ahb_clk = {
+ .halt_reg = 0x23000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_0_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_s_ahb_clk = {
+ .halt_reg = 0x23004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x23004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(21),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_0_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_m_ahb_clk = {
+ .halt_reg = 0x23150,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(2),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_1_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_s_ahb_clk = {
+ .halt_reg = 0x23154,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x23154,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_1_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ahb_clk = {
+ .halt_reg = 0xa3004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa3004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_apps_clk = {
+ .halt_reg = 0xa3008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa3008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc1_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ice_core_clk = {
+ .halt_reg = 0xa302c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xa302c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xa302c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_ice_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc1_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .halt_reg = 0x14010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+ .halt_reg = 0x14004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc2_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc2_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ahb_clk = {
+ .halt_reg = 0x77024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_axi_clk = {
+ .halt_reg = 0x77018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_axi_hw_ctl_clk = {
+ .halt_reg = 0x77018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77018,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_axi_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ice_core_clk = {
+ .halt_reg = 0x77074,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77074,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77074,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ice_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ice_core_hw_ctl_clk = {
+ .halt_reg = 0x77074,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77074,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77074,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ice_core_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_phy_aux_clk = {
+ .halt_reg = 0x770b0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x770b0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x770b0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_phy_aux_hw_ctl_clk = {
+ .halt_reg = 0x770b0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x770b0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x770b0,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_phy_aux_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_0_clk = {
+ .halt_reg = 0x7702c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x7702c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_rx_symbol_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_1_clk = {
+ .halt_reg = 0x770cc,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x770cc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_rx_symbol_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_tx_symbol_0_clk = {
+ .halt_reg = 0x77028,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x77028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_tx_symbol_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_tx_symbol_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_unipro_core_clk = {
+ .halt_reg = 0x77068,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77068,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_unipro_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_unipro_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_unipro_core_hw_ctl_clk = {
+ .halt_reg = 0x77068,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77068,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77068,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_unipro_core_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_unipro_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_atb_clk = {
+ .halt_reg = 0x39088,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x39088,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_atb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_master_clk = {
+ .halt_reg = 0x39018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x39018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_mock_utmi_clk = {
+ .halt_reg = 0x39028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x39028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_sleep_clk = {
+ .halt_reg = 0x39024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x39024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_aux_clk = {
+ .halt_reg = 0x39060,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x39060,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = {
+ .halt_reg = 0x39064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x39064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_com_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_pipe_clk = {
+ .halt_reg = 0x39068,
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0x39068,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x39068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi0_clk = {
+ .halt_reg = 0x32018,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x32018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_video_axi0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc pcie_0_gdsc = {
+ .gdscr = 0x6b004,
+ .collapse_ctrl = 0x5214c,
+ .collapse_mask = BIT(0),
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "pcie_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc pcie_0_phy_gdsc = {
+ .gdscr = 0x6c000,
+ .collapse_ctrl = 0x5214c,
+ .collapse_mask = BIT(1),
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "pcie_0_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc pcie_1_gdsc = {
+ .gdscr = 0x90004,
+ .collapse_ctrl = 0x5214c,
+ .collapse_mask = BIT(3),
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "pcie_1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc pcie_1_phy_gdsc = {
+ .gdscr = 0xa2000,
+ .collapse_ctrl = 0x5214c,
+ .collapse_mask = BIT(4),
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "pcie_1_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc ufs_phy_gdsc = {
+ .gdscr = 0x77004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "ufs_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc ufs_mem_phy_gdsc = {
+ .gdscr = 0x9e000,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "ufs_mem_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc usb30_prim_gdsc = {
+ .gdscr = 0x39004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "usb30_prim_gdsc",
+ },
+ .pwrsts = PWRSTS_RET_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc usb3_phy_gdsc = {
+ .gdscr = 0x5000c,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "usb3_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_RET_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct clk_regmap *gcc_milos_clocks[] = {
+ [GCC_AGGRE_NOC_PCIE_AXI_CLK] = &gcc_aggre_noc_pcie_axi_clk.clkr,
+ [GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr,
+ [GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK] = &gcc_aggre_ufs_phy_axi_hw_ctl_clk.clkr,
+ [GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CAMERA_HF_AXI_CLK] = &gcc_camera_hf_axi_clk.clkr,
+ [GCC_CAMERA_SF_AXI_CLK] = &gcc_camera_sf_axi_clk.clkr,
+ [GCC_CFG_NOC_PCIE_ANOC_AHB_CLK] = &gcc_cfg_noc_pcie_anoc_ahb_clk.clkr,
+ [GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr,
+ [GCC_CNOC_PCIE_SF_AXI_CLK] = &gcc_cnoc_pcie_sf_axi_clk.clkr,
+ [GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr,
+ [GCC_DDRSS_PCIE_SF_QTB_CLK] = &gcc_ddrss_pcie_sf_qtb_clk.clkr,
+ [GCC_DISP_GPLL0_DIV_CLK_SRC] = &gcc_disp_gpll0_div_clk_src.clkr,
+ [GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
+ [GCC_GPLL0] = &gcc_gpll0.clkr,
+ [GCC_GPLL0_OUT_EVEN] = &gcc_gpll0_out_even.clkr,
+ [GCC_GPLL2] = &gcc_gpll2.clkr,
+ [GCC_GPLL4] = &gcc_gpll4.clkr,
+ [GCC_GPLL6] = &gcc_gpll6.clkr,
+ [GCC_GPLL7] = &gcc_gpll7.clkr,
+ [GCC_GPLL9] = &gcc_gpll9.clkr,
+ [GCC_GPU_GPLL0_CLK_SRC] = &gcc_gpu_gpll0_clk_src.clkr,
+ [GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr,
+ [GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr,
+ [GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK] = &gcc_pcie_0_aux_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK_SRC] = &gcc_pcie_0_aux_clk_src.clkr,
+ [GCC_PCIE_0_CFG_AHB_CLK] = &gcc_pcie_0_cfg_ahb_clk.clkr,
+ [GCC_PCIE_0_MSTR_AXI_CLK] = &gcc_pcie_0_mstr_axi_clk.clkr,
+ [GCC_PCIE_0_PHY_RCHNG_CLK] = &gcc_pcie_0_phy_rchng_clk.clkr,
+ [GCC_PCIE_0_PHY_RCHNG_CLK_SRC] = &gcc_pcie_0_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_0_PIPE_CLK] = &gcc_pcie_0_pipe_clk.clkr,
+ [GCC_PCIE_0_PIPE_CLK_SRC] = &gcc_pcie_0_pipe_clk_src.clkr,
+ [GCC_PCIE_0_PIPE_DIV2_CLK] = &gcc_pcie_0_pipe_div2_clk.clkr,
+ [GCC_PCIE_0_PIPE_DIV2_CLK_SRC] = &gcc_pcie_0_pipe_div2_clk_src.clkr,
+ [GCC_PCIE_0_SLV_AXI_CLK] = &gcc_pcie_0_slv_axi_clk.clkr,
+ [GCC_PCIE_0_SLV_Q2A_AXI_CLK] = &gcc_pcie_0_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_1_AUX_CLK] = &gcc_pcie_1_aux_clk.clkr,
+ [GCC_PCIE_1_AUX_CLK_SRC] = &gcc_pcie_1_aux_clk_src.clkr,
+ [GCC_PCIE_1_CFG_AHB_CLK] = &gcc_pcie_1_cfg_ahb_clk.clkr,
+ [GCC_PCIE_1_MSTR_AXI_CLK] = &gcc_pcie_1_mstr_axi_clk.clkr,
+ [GCC_PCIE_1_PHY_RCHNG_CLK] = &gcc_pcie_1_phy_rchng_clk.clkr,
+ [GCC_PCIE_1_PHY_RCHNG_CLK_SRC] = &gcc_pcie_1_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_1_PIPE_CLK] = &gcc_pcie_1_pipe_clk.clkr,
+ [GCC_PCIE_1_PIPE_CLK_SRC] = &gcc_pcie_1_pipe_clk_src.clkr,
+ [GCC_PCIE_1_PIPE_DIV2_CLK] = &gcc_pcie_1_pipe_div2_clk.clkr,
+ [GCC_PCIE_1_PIPE_DIV2_CLK_SRC] = &gcc_pcie_1_pipe_div2_clk_src.clkr,
+ [GCC_PCIE_1_SLV_AXI_CLK] = &gcc_pcie_1_slv_axi_clk.clkr,
+ [GCC_PCIE_1_SLV_Q2A_AXI_CLK] = &gcc_pcie_1_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_RSCC_CFG_AHB_CLK] = &gcc_pcie_rscc_cfg_ahb_clk.clkr,
+ [GCC_PCIE_RSCC_XO_CLK] = &gcc_pcie_rscc_xo_clk.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr,
+ [GCC_QMIP_CAMERA_NRT_AHB_CLK] = &gcc_qmip_camera_nrt_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_RT_AHB_CLK] = &gcc_qmip_camera_rt_ahb_clk.clkr,
+ [GCC_QMIP_DISP_AHB_CLK] = &gcc_qmip_disp_ahb_clk.clkr,
+ [GCC_QMIP_GPU_AHB_CLK] = &gcc_qmip_gpu_ahb_clk.clkr,
+ [GCC_QMIP_PCIE_AHB_CLK] = &gcc_qmip_pcie_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_CV_CPU_AHB_CLK] = &gcc_qmip_video_cv_cpu_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_CVP_AHB_CLK] = &gcc_qmip_video_cvp_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_V_CPU_AHB_CLK] = &gcc_qmip_video_v_cpu_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_VCODEC_AHB_CLK] = &gcc_qmip_video_vcodec_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_2X_CLK] = &gcc_qupv3_wrap0_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_CLK] = &gcc_qupv3_wrap0_core_clk.clkr,
+ [GCC_QUPV3_WRAP0_QSPI_REF_CLK] = &gcc_qupv3_wrap0_qspi_ref_clk.clkr,
+ [GCC_QUPV3_WRAP0_QSPI_REF_CLK_SRC] = &gcc_qupv3_wrap0_qspi_ref_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK] = &gcc_qupv3_wrap0_s0_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK_SRC] = &gcc_qupv3_wrap0_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK] = &gcc_qupv3_wrap0_s1_clk.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK_SRC] = &gcc_qupv3_wrap0_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK] = &gcc_qupv3_wrap0_s2_clk.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK_SRC] = &gcc_qupv3_wrap0_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK] = &gcc_qupv3_wrap0_s3_clk.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK_SRC] = &gcc_qupv3_wrap0_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK] = &gcc_qupv3_wrap0_s4_clk.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK_SRC] = &gcc_qupv3_wrap0_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK] = &gcc_qupv3_wrap0_s5_clk.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK_SRC] = &gcc_qupv3_wrap0_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S6_CLK] = &gcc_qupv3_wrap0_s6_clk.clkr,
+ [GCC_QUPV3_WRAP0_S6_CLK_SRC] = &gcc_qupv3_wrap0_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_CORE_2X_CLK] = &gcc_qupv3_wrap1_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP1_CORE_CLK] = &gcc_qupv3_wrap1_core_clk.clkr,
+ [GCC_QUPV3_WRAP1_QSPI_REF_CLK] = &gcc_qupv3_wrap1_qspi_ref_clk.clkr,
+ [GCC_QUPV3_WRAP1_QSPI_REF_CLK_SRC] = &gcc_qupv3_wrap1_qspi_ref_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK] = &gcc_qupv3_wrap1_s0_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK_SRC] = &gcc_qupv3_wrap1_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK] = &gcc_qupv3_wrap1_s1_clk.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK_SRC] = &gcc_qupv3_wrap1_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK] = &gcc_qupv3_wrap1_s2_clk.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK_SRC] = &gcc_qupv3_wrap1_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK] = &gcc_qupv3_wrap1_s3_clk.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK_SRC] = &gcc_qupv3_wrap1_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK] = &gcc_qupv3_wrap1_s4_clk.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK_SRC] = &gcc_qupv3_wrap1_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK] = &gcc_qupv3_wrap1_s5_clk.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK_SRC] = &gcc_qupv3_wrap1_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S6_CLK] = &gcc_qupv3_wrap1_s6_clk.clkr,
+ [GCC_QUPV3_WRAP1_S6_CLK_SRC] = &gcc_qupv3_wrap1_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP_0_M_AHB_CLK] = &gcc_qupv3_wrap_0_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_0_S_AHB_CLK] = &gcc_qupv3_wrap_0_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_M_AHB_CLK] = &gcc_qupv3_wrap_1_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_S_AHB_CLK] = &gcc_qupv3_wrap_1_s_ahb_clk.clkr,
+ [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+ [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+ [GCC_SDCC1_APPS_CLK_SRC] = &gcc_sdcc1_apps_clk_src.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK] = &gcc_sdcc1_ice_core_clk.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK_SRC] = &gcc_sdcc1_ice_core_clk_src.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_SDCC2_APPS_CLK_SRC] = &gcc_sdcc2_apps_clk_src.clkr,
+ [GCC_UFS_PHY_AHB_CLK] = &gcc_ufs_phy_ahb_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK] = &gcc_ufs_phy_axi_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK_SRC] = &gcc_ufs_phy_axi_clk_src.clkr,
+ [GCC_UFS_PHY_AXI_HW_CTL_CLK] = &gcc_ufs_phy_axi_hw_ctl_clk.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK] = &gcc_ufs_phy_ice_core_clk.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK_SRC] = &gcc_ufs_phy_ice_core_clk_src.clkr,
+ [GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK] = &gcc_ufs_phy_ice_core_hw_ctl_clk.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK] = &gcc_ufs_phy_phy_aux_clk.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK_SRC] = &gcc_ufs_phy_phy_aux_clk_src.clkr,
+ [GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK] = &gcc_ufs_phy_phy_aux_hw_ctl_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK] = &gcc_ufs_phy_rx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC] = &gcc_ufs_phy_rx_symbol_0_clk_src.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_1_CLK] = &gcc_ufs_phy_rx_symbol_1_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC] = &gcc_ufs_phy_rx_symbol_1_clk_src.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK] = &gcc_ufs_phy_tx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC] = &gcc_ufs_phy_tx_symbol_0_clk_src.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK] = &gcc_ufs_phy_unipro_core_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC] = &gcc_ufs_phy_unipro_core_clk_src.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK] = &gcc_ufs_phy_unipro_core_hw_ctl_clk.clkr,
+ [GCC_USB30_PRIM_ATB_CLK] = &gcc_usb30_prim_atb_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK] = &gcc_usb30_prim_master_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK_SRC] = &gcc_usb30_prim_master_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK] = &gcc_usb30_prim_mock_utmi_clk.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC] = &gcc_usb30_prim_mock_utmi_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB30_PRIM_SLEEP_CLK] = &gcc_usb30_prim_sleep_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK] = &gcc_usb3_prim_phy_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr,
+ [GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK_SRC] = &gcc_usb3_prim_phy_pipe_clk_src.clkr,
+ [GCC_VIDEO_AXI0_CLK] = &gcc_video_axi0_clk.clkr,
+};
+
+static const struct qcom_reset_map gcc_milos_resets[] = {
+ [GCC_CAMERA_BCR] = { 0x26000 },
+ [GCC_DISPLAY_BCR] = { 0x27000 },
+ [GCC_GPU_BCR] = { 0x71000 },
+ [GCC_PCIE_0_BCR] = { 0x6b000 },
+ [GCC_PCIE_0_LINK_DOWN_BCR] = { 0x6c014 },
+ [GCC_PCIE_0_NOCSR_COM_PHY_BCR] = { 0x6c020 },
+ [GCC_PCIE_0_PHY_BCR] = { 0x6c01c },
+ [GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR] = { 0x6c028 },
+ [GCC_PCIE_1_BCR] = { 0x90000 },
+ [GCC_PCIE_1_LINK_DOWN_BCR] = { 0x8e014 },
+ [GCC_PCIE_1_NOCSR_COM_PHY_BCR] = { 0x8e020 },
+ [GCC_PCIE_1_PHY_BCR] = { 0x8e01c },
+ [GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR] = { 0x8e024 },
+ [GCC_PCIE_RSCC_BCR] = { 0x11000 },
+ [GCC_PDM_BCR] = { 0x33000 },
+ [GCC_QUPV3_WRAPPER_0_BCR] = { 0x18000 },
+ [GCC_QUPV3_WRAPPER_1_BCR] = { 0x1e000 },
+ [GCC_QUSB2PHY_PRIM_BCR] = { 0x12000 },
+ [GCC_QUSB2PHY_SEC_BCR] = { 0x12004 },
+ [GCC_SDCC1_BCR] = { 0xa3000 },
+ [GCC_SDCC2_BCR] = { 0x14000 },
+ [GCC_UFS_PHY_BCR] = { 0x77000 },
+ [GCC_USB30_PRIM_BCR] = { 0x39000 },
+ [GCC_USB3_DP_PHY_PRIM_BCR] = { 0x50008 },
+ [GCC_USB3_PHY_PRIM_BCR] = { 0x50000 },
+ [GCC_USB3PHY_PHY_PRIM_BCR] = { 0x50004 },
+ [GCC_VIDEO_AXI0_CLK_ARES] = { 0x32018, 2 },
+ [GCC_VIDEO_BCR] = { 0x32000 },
+};
+
+static const struct clk_rcg_dfs_data gcc_milos_dfs_clocks[] = {
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_qspi_ref_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s6_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_qspi_ref_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s6_clk_src),
+};
+
+static struct gdsc *gcc_milos_gdscs[] = {
+ [PCIE_0_GDSC] = &pcie_0_gdsc,
+ [PCIE_0_PHY_GDSC] = &pcie_0_phy_gdsc,
+ [PCIE_1_GDSC] = &pcie_1_gdsc,
+ [PCIE_1_PHY_GDSC] = &pcie_1_phy_gdsc,
+ [UFS_PHY_GDSC] = &ufs_phy_gdsc,
+ [UFS_MEM_PHY_GDSC] = &ufs_mem_phy_gdsc,
+ [USB30_PRIM_GDSC] = &usb30_prim_gdsc,
+ [USB3_PHY_GDSC] = &usb3_phy_gdsc,
+};
+
+static u32 gcc_milos_critical_cbcrs[] = {
+ 0x26004, /* GCC_CAMERA_AHB_CLK */
+ 0x26018, /* GCC_CAMERA_HF_XO_CLK */
+ 0x2601c, /* GCC_CAMERA_SF_XO_CLK */
+ 0x27004, /* GCC_DISP_AHB_CLK */
+ 0x27018, /* GCC_DISP_XO_CLK */
+ 0x71004, /* GCC_GPU_CFG_AHB_CLK */
+ 0x32004, /* GCC_VIDEO_AHB_CLK */
+ 0x32024, /* GCC_VIDEO_XO_CLK */
+};
+
+static const struct regmap_config gcc_milos_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x1f41f0,
+ .fast_io = true,
+};
+
+static struct qcom_cc_driver_data gcc_milos_driver_data = {
+ .clk_cbcrs = gcc_milos_critical_cbcrs,
+ .num_clk_cbcrs = ARRAY_SIZE(gcc_milos_critical_cbcrs),
+ .dfs_rcgs = gcc_milos_dfs_clocks,
+ .num_dfs_rcgs = ARRAY_SIZE(gcc_milos_dfs_clocks),
+};
+
+static const struct qcom_cc_desc gcc_milos_desc = {
+ .config = &gcc_milos_regmap_config,
+ .clks = gcc_milos_clocks,
+ .num_clks = ARRAY_SIZE(gcc_milos_clocks),
+ .resets = gcc_milos_resets,
+ .num_resets = ARRAY_SIZE(gcc_milos_resets),
+ .gdscs = gcc_milos_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_milos_gdscs),
+ .use_rpm = true,
+ .driver_data = &gcc_milos_driver_data,
+};
+
+static const struct of_device_id gcc_milos_match_table[] = {
+ { .compatible = "qcom,milos-gcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_milos_match_table);
+
+static int gcc_milos_probe(struct platform_device *pdev)
+{
+ return qcom_cc_probe(pdev, &gcc_milos_desc);
+}
+
+static struct platform_driver gcc_milos_driver = {
+ .probe = gcc_milos_probe,
+ .driver = {
+ .name = "gcc-milos",
+ .of_match_table = gcc_milos_match_table,
+ },
+};
+
+static int __init gcc_milos_init(void)
+{
+ return platform_driver_register(&gcc_milos_driver);
+}
+subsys_initcall(gcc_milos_init);
+
+static void __exit gcc_milos_exit(void)
+{
+ platform_driver_unregister(&gcc_milos_driver);
+}
+module_exit(gcc_milos_exit);
+
+MODULE_DESCRIPTION("QTI GCC Milos Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/gcc-msm8917.c b/drivers/clk/qcom/gcc-msm8917.c
index 3e2a2ae2ee6e..0a1aa623cd49 100644
--- a/drivers/clk/qcom/gcc-msm8917.c
+++ b/drivers/clk/qcom/gcc-msm8917.c
@@ -37,6 +37,8 @@ enum {
DT_SLEEP_CLK,
DT_DSI0PLL,
DT_DSI0PLL_BYTE,
+ DT_DSI1PLL,
+ DT_DSI1PLL_BYTE,
};
enum {
@@ -48,6 +50,8 @@ enum {
P_GPLL6,
P_DSI0PLL,
P_DSI0PLL_BYTE,
+ P_DSI1PLL,
+ P_DSI1PLL_BYTE,
};
static struct clk_alpha_pll gpll0_sleep_clk_src = {
@@ -102,7 +106,11 @@ static const struct pll_vco gpll3_p_vco[] = {
{ 700000000, 1400000000, 0 },
};
-static const struct alpha_pll_config gpll3_early_config = {
+static const struct pll_vco gpll3_p_vco_msm8937[] = {
+ { 525000000, 1066000000, 0 },
+};
+
+static struct alpha_pll_config gpll3_early_config = {
.l = 63,
.config_ctl_val = 0x4001055b,
.early_output_mask = 0,
@@ -273,6 +281,19 @@ static const struct freq_tbl ftbl_blsp_i2c_apps_clk_src[] = {
{ }
};
+static struct clk_rcg2 blsp1_qup1_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x0200c,
+ .hid_width = 5,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup1_i2c_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
static struct clk_rcg2 blsp1_qup2_i2c_apps_clk_src = {
.cmd_rcgr = 0x03000,
.hid_width = 5,
@@ -351,6 +372,19 @@ static struct clk_rcg2 blsp2_qup3_i2c_apps_clk_src = {
}
};
+static struct clk_rcg2 blsp2_qup4_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x18000,
+ .hid_width = 5,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup4_i2c_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
static const struct freq_tbl ftbl_blsp_spi_apps_clk_src[] = {
F(960000, P_XO, 10, 1, 2),
F(4800000, P_XO, 4, 0, 0),
@@ -362,6 +396,20 @@ static const struct freq_tbl ftbl_blsp_spi_apps_clk_src[] = {
{ }
};
+static struct clk_rcg2 blsp1_qup1_spi_apps_clk_src = {
+ .cmd_rcgr = 0x02024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .freq_tbl = ftbl_blsp_spi_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup1_spi_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
static struct clk_rcg2 blsp1_qup2_spi_apps_clk_src = {
.cmd_rcgr = 0x03014,
.hid_width = 5,
@@ -446,6 +494,20 @@ static struct clk_rcg2 blsp2_qup3_spi_apps_clk_src = {
}
};
+static struct clk_rcg2 blsp2_qup4_spi_apps_clk_src = {
+ .cmd_rcgr = 0x18024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .freq_tbl = ftbl_blsp_spi_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup4_spi_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
static const struct freq_tbl ftbl_blsp_uart_apps_clk_src[] = {
F(3686400, P_GPLL0, 1, 72, 15625),
F(7372800, P_GPLL0, 1, 144, 15625),
@@ -525,11 +587,19 @@ static struct clk_rcg2 blsp2_uart2_apps_clk_src = {
static const struct parent_map gcc_byte0_map[] = {
{ P_XO, 0 },
{ P_DSI0PLL_BYTE, 1 },
+ { P_DSI1PLL_BYTE, 3 },
+};
+
+static const struct parent_map gcc_byte1_map[] = {
+ { P_XO, 0 },
+ { P_DSI0PLL_BYTE, 3 },
+ { P_DSI1PLL_BYTE, 1 },
};
static const struct clk_parent_data gcc_byte_data[] = {
{ .index = DT_XO },
{ .index = DT_DSI0PLL_BYTE },
+ { .index = DT_DSI1PLL_BYTE },
};
static struct clk_rcg2 byte0_clk_src = {
@@ -545,6 +615,19 @@ static struct clk_rcg2 byte0_clk_src = {
}
};
+static struct clk_rcg2 byte1_clk_src = {
+ .cmd_rcgr = 0x4d0b0,
+ .hid_width = 5,
+ .parent_map = gcc_byte1_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "byte1_clk_src",
+ .parent_data = gcc_byte_data,
+ .num_parents = ARRAY_SIZE(gcc_byte_data),
+ .ops = &clk_byte2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
static const struct freq_tbl ftbl_camss_gp_clk_src[] = {
F(100000000, P_GPLL0, 8, 0, 0),
F(160000000, P_GPLL0, 5, 0, 0),
@@ -642,6 +725,17 @@ static const struct freq_tbl ftbl_cpp_clk_src[] = {
{ }
};
+static const struct freq_tbl ftbl_cpp_clk_src_msm8937[] = {
+ F(133330000, P_GPLL0, 6, 0, 0),
+ F(160000000, P_GPLL0, 5, 0, 0),
+ F(200000000, P_GPLL0, 5, 0, 0),
+ F(266666667, P_GPLL0, 3, 0, 0),
+ F(308570000, P_GPLL6, 3.5, 0, 0),
+ F(320000000, P_GPLL0, 2.5, 0, 0),
+ F(360000000, P_GPLL6, 3, 0, 0),
+ { }
+};
+
static struct clk_rcg2 cpp_clk_src = {
.cmd_rcgr = 0x58018,
.hid_width = 5,
@@ -655,6 +749,13 @@ static struct clk_rcg2 cpp_clk_src = {
}
};
+static struct clk_init_data vcodec0_clk_src_init_msm8937 = {
+ .name = "vcodec0_clk_src",
+ .parent_data = gcc_cpp_data,
+ .num_parents = ARRAY_SIZE(gcc_cpp_data),
+ .ops = &clk_rcg2_ops,
+};
+
static const struct freq_tbl ftbl_crypto_clk_src[] = {
F(50000000, P_GPLL0, 16, 0, 0),
F(80000000, P_GPLL0, 10, 0, 0),
@@ -730,6 +831,13 @@ static const struct freq_tbl ftbl_csi_phytimer_clk_src[] = {
{ }
};
+static const struct freq_tbl ftbl_csi_phytimer_clk_src_msm8937[] = {
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(160000000, P_GPLL0, 5, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ { }
+};
+
static struct clk_rcg2 csi0phytimer_clk_src = {
.cmd_rcgr = 0x4e000,
.hid_width = 5,
@@ -774,6 +882,19 @@ static struct clk_rcg2 esc0_clk_src = {
}
};
+static struct clk_rcg2 esc1_clk_src = {
+ .cmd_rcgr = 0x4d0a8,
+ .hid_width = 5,
+ .freq_tbl = ftbl_esc0_1_clk_src,
+ .parent_map = gcc_xo_gpll0_out_aux_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "esc1_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
static const struct parent_map gcc_gfx3d_map[] = {
{ P_XO, 0 },
{ P_GPLL0, 1 },
@@ -817,6 +938,25 @@ static const struct freq_tbl ftbl_gfx3d_clk_src[] = {
{ }
};
+static const struct freq_tbl ftbl_gfx3d_clk_src_msm8937[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(50000000, P_GPLL0, 16, 0, 0),
+ F(80000000, P_GPLL0, 10, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(160000000, P_GPLL0, 5, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ F(216000000, P_GPLL6, 5, 0, 0),
+ F(228570000, P_GPLL0, 3.5, 0, 0),
+ F(240000000, P_GPLL6, 4.5, 0, 0),
+ F(266670000, P_GPLL0, 3, 0, 0),
+ F(300000000, P_GPLL3, 1, 0, 0),
+ F(320000000, P_GPLL0, 2.5, 0, 0),
+ F(375000000, P_GPLL3, 1, 0, 0),
+ F(400000000, P_GPLL0, 2, 0, 0),
+ F(450000000, P_GPLL3, 1, 0, 0),
+ { }
+};
+
static struct clk_rcg2 gfx3d_clk_src = {
.cmd_rcgr = 0x59000,
.hid_width = 5,
@@ -973,21 +1113,29 @@ static struct clk_rcg2 mdp_clk_src = {
}
};
-static const struct parent_map gcc_pclk_map[] = {
+static const struct parent_map gcc_pclk0_map[] = {
{ P_XO, 0 },
{ P_DSI0PLL, 1 },
+ { P_DSI1PLL, 3 },
+};
+
+static const struct parent_map gcc_pclk1_map[] = {
+ { P_XO, 0 },
+ { P_DSI0PLL, 3 },
+ { P_DSI1PLL, 1 },
};
static const struct clk_parent_data gcc_pclk_data[] = {
{ .index = DT_XO },
{ .index = DT_DSI0PLL },
+ { .index = DT_DSI1PLL },
};
static struct clk_rcg2 pclk0_clk_src = {
.cmd_rcgr = 0x4d000,
.hid_width = 5,
.mnd_width = 8,
- .parent_map = gcc_pclk_map,
+ .parent_map = gcc_pclk0_map,
.clkr.hw.init = &(struct clk_init_data) {
.name = "pclk0_clk_src",
.parent_data = gcc_pclk_data,
@@ -997,6 +1145,20 @@ static struct clk_rcg2 pclk0_clk_src = {
}
};
+static struct clk_rcg2 pclk1_clk_src = {
+ .cmd_rcgr = 0x4d0b8,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .parent_map = gcc_pclk1_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pclk1_clk_src",
+ .parent_data = gcc_pclk_data,
+ .num_parents = ARRAY_SIZE(gcc_pclk_data),
+ .ops = &clk_pixel_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
static const struct freq_tbl ftbl_pdm2_clk_src[] = {
F(64000000, P_GPLL0, 12.5, 0, 0),
{ }
@@ -1108,6 +1270,14 @@ static const struct freq_tbl ftbl_usb_hs_system_clk_src[] = {
{ }
};
+static const struct freq_tbl ftbl_usb_hs_system_clk_src_msm8937[] = {
+ F(57142857, P_GPLL0, 14, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(133333333, P_GPLL0, 6, 0, 0),
+ F(177777778, P_GPLL0, 4.5, 0, 0),
+ { }
+};
+
static struct clk_rcg2 usb_hs_system_clk_src = {
.cmd_rcgr = 0x41010,
.hid_width = 5,
@@ -1132,6 +1302,15 @@ static const struct freq_tbl ftbl_vcodec0_clk_src[] = {
{ }
};
+static const struct freq_tbl ftbl_vcodec0_clk_src_msm8937[] = {
+ F(166150000, P_GPLL6, 6.5, 0, 0),
+ F(240000000, P_GPLL6, 4.5, 0, 0),
+ F(308571428, P_GPLL6, 3.5, 0, 0),
+ F(320000000, P_GPLL0, 2.5, 0, 0),
+ F(360000000, P_GPLL6, 3, 0, 0),
+ { }
+};
+
static struct clk_rcg2 vcodec0_clk_src = {
.cmd_rcgr = 0x4c000,
.hid_width = 5,
@@ -1160,6 +1339,23 @@ static const struct freq_tbl ftbl_vfe_clk_src[] = {
{ }
};
+static const struct freq_tbl ftbl_vfe_clk_src_msm8937[] = {
+ F(50000000, P_GPLL0, 16, 0, 0),
+ F(80000000, P_GPLL0, 10, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(133333333, P_GPLL0, 6, 0, 0),
+ F(160000000, P_GPLL0, 5, 0, 0),
+ F(177777778, P_GPLL0, 4.5, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ F(266666667, P_GPLL0, 3, 0, 0),
+ F(308571428, P_GPLL6, 3.5, 0, 0),
+ F(320000000, P_GPLL0, 2.5, 0, 0),
+ F(360000000, P_GPLL6, 3, 0, 0),
+ F(400000000, P_GPLL0, 2, 0, 0),
+ F(432000000, P_GPLL6, 2.5, 0, 0),
+ { }
+};
+
static struct clk_rcg2 vfe0_clk_src = {
.cmd_rcgr = 0x58000,
.hid_width = 5,
@@ -1269,6 +1465,24 @@ static struct clk_branch gcc_blsp2_ahb_clk = {
}
};
+static struct clk_branch gcc_blsp1_qup1_i2c_apps_clk = {
+ .halt_reg = 0x02008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x02008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup1_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup1_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = {
.halt_reg = 0x03010,
.halt_check = BRANCH_HALT,
@@ -1377,6 +1591,42 @@ static struct clk_branch gcc_blsp2_qup3_i2c_apps_clk = {
}
};
+static struct clk_branch gcc_blsp2_qup4_i2c_apps_clk = {
+ .halt_reg = 0x18020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x18020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup4_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp2_qup4_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup1_spi_apps_clk = {
+ .halt_reg = 0x02004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x02004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup1_spi_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup1_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = {
.halt_reg = 0x0300c,
.halt_check = BRANCH_HALT,
@@ -1485,6 +1735,24 @@ static struct clk_branch gcc_blsp2_qup3_spi_apps_clk = {
}
};
+static struct clk_branch gcc_blsp2_qup4_spi_apps_clk = {
+ .halt_reg = 0x1801c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1801c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup4_spi_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp2_qup4_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
static struct clk_branch gcc_blsp1_uart1_apps_clk = {
.halt_reg = 0x0203c,
.halt_check = BRANCH_HALT,
@@ -2521,6 +2789,24 @@ static struct clk_branch gcc_mdss_byte0_clk = {
}
};
+static struct clk_branch gcc_mdss_byte1_clk = {
+ .halt_reg = 0x4d0a0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d0a0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mdss_byte1_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &byte1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
static struct clk_branch gcc_mdss_esc0_clk = {
.halt_reg = 0x4d098,
.halt_check = BRANCH_HALT,
@@ -2539,6 +2825,24 @@ static struct clk_branch gcc_mdss_esc0_clk = {
}
};
+static struct clk_branch gcc_mdss_esc1_clk = {
+ .halt_reg = 0x4d09c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d09c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mdss_esc1_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &esc1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
static struct clk_branch gcc_mdss_mdp_clk = {
.halt_reg = 0x4d088,
.halt_check = BRANCH_HALT,
@@ -2575,6 +2879,24 @@ static struct clk_branch gcc_mdss_pclk0_clk = {
}
};
+static struct clk_branch gcc_mdss_pclk1_clk = {
+ .halt_reg = 0x4d0a4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d0a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mdss_pclk1_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &pclk1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
static struct clk_branch gcc_mdss_vsync_clk = {
.halt_reg = 0x4d090,
.halt_check = BRANCH_HALT,
@@ -2632,6 +2954,24 @@ static struct clk_branch gcc_oxili_ahb_clk = {
}
};
+static struct clk_branch gcc_oxili_aon_clk = {
+ .halt_reg = 0x5904c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5904c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_oxili_aon_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gfx3d_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
static struct clk_branch gcc_oxili_gfx3d_clk = {
.halt_reg = 0x59020,
.halt_check = BRANCH_HALT,
@@ -2650,6 +2990,19 @@ static struct clk_branch gcc_oxili_gfx3d_clk = {
}
};
+static struct clk_branch gcc_oxili_timer_clk = {
+ .halt_reg = 0x59040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x59040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_oxili_timer_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_pdm2_clk = {
.halt_reg = 0x4400c,
.halt_check = BRANCH_HALT,
@@ -3027,6 +3380,28 @@ static struct gdsc oxili_gx_gdsc = {
.flags = CLAMP_IO,
};
+static struct gdsc oxili_gx_gdsc_msm8937 = {
+ .gdscr = 0x5901c,
+ .clamp_io_ctrl = 0x5b00c,
+ .cxcs = (unsigned int []){ 0x59000 },
+ .cxc_count = 1,
+ .pd = {
+ .name = "oxili_gx_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = CLAMP_IO,
+};
+
+static struct gdsc oxili_cx_gdsc = {
+ .gdscr = 0x59044,
+ .cxcs = (unsigned int []){ 0x59020 },
+ .cxc_count = 1,
+ .pd = {
+ .name = "oxili_cx_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
static struct gdsc cpp_gdsc = {
.gdscr = 0x58078,
.cxcs = (unsigned int []){ 0x5803c, 0x58064 },
@@ -3207,6 +3582,188 @@ static struct clk_regmap *gcc_msm8917_clocks[] = {
[GCC_VFE_TBU_CLK] = &gcc_vfe_tbu_clk.clkr,
};
+static struct clk_regmap *gcc_msm8937_clocks[] = {
+ [GPLL0] = &gpll0.clkr,
+ [GPLL0_EARLY] = &gpll0_early.clkr,
+ [GPLL0_SLEEP_CLK_SRC] = &gpll0_sleep_clk_src.clkr,
+ [GPLL3] = &gpll3.clkr,
+ [GPLL3_EARLY] = &gpll3_early.clkr,
+ [GPLL4] = &gpll4.clkr,
+ [GPLL4_EARLY] = &gpll4_early.clkr,
+ [GPLL6] = &gpll6,
+ [GPLL6_EARLY] = &gpll6_early.clkr,
+ [APSS_AHB_CLK_SRC] = &apss_ahb_clk_src.clkr,
+ [MSM8937_BLSP1_QUP1_I2C_APPS_CLK_SRC] = &blsp1_qup1_i2c_apps_clk_src.clkr,
+ [MSM8937_BLSP1_QUP1_SPI_APPS_CLK_SRC] = &blsp1_qup1_spi_apps_clk_src.clkr,
+ [BLSP1_QUP2_I2C_APPS_CLK_SRC] = &blsp1_qup2_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP2_SPI_APPS_CLK_SRC] = &blsp1_qup2_spi_apps_clk_src.clkr,
+ [BLSP1_QUP3_I2C_APPS_CLK_SRC] = &blsp1_qup3_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP3_SPI_APPS_CLK_SRC] = &blsp1_qup3_spi_apps_clk_src.clkr,
+ [BLSP1_QUP4_I2C_APPS_CLK_SRC] = &blsp1_qup4_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP4_SPI_APPS_CLK_SRC] = &blsp1_qup4_spi_apps_clk_src.clkr,
+ [BLSP1_UART1_APPS_CLK_SRC] = &blsp1_uart1_apps_clk_src.clkr,
+ [BLSP1_UART2_APPS_CLK_SRC] = &blsp1_uart2_apps_clk_src.clkr,
+ [BLSP2_QUP1_I2C_APPS_CLK_SRC] = &blsp2_qup1_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP1_SPI_APPS_CLK_SRC] = &blsp2_qup1_spi_apps_clk_src.clkr,
+ [BLSP2_QUP2_I2C_APPS_CLK_SRC] = &blsp2_qup2_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP2_SPI_APPS_CLK_SRC] = &blsp2_qup2_spi_apps_clk_src.clkr,
+ [BLSP2_QUP3_I2C_APPS_CLK_SRC] = &blsp2_qup3_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP3_SPI_APPS_CLK_SRC] = &blsp2_qup3_spi_apps_clk_src.clkr,
+ [MSM8937_BLSP2_QUP4_I2C_APPS_CLK_SRC] = &blsp2_qup4_i2c_apps_clk_src.clkr,
+ [MSM8937_BLSP2_QUP4_SPI_APPS_CLK_SRC] = &blsp2_qup4_spi_apps_clk_src.clkr,
+ [BLSP2_UART1_APPS_CLK_SRC] = &blsp2_uart1_apps_clk_src.clkr,
+ [BLSP2_UART2_APPS_CLK_SRC] = &blsp2_uart2_apps_clk_src.clkr,
+ [BYTE0_CLK_SRC] = &byte0_clk_src.clkr,
+ [MSM8937_BYTE1_CLK_SRC] = &byte1_clk_src.clkr,
+ [CAMSS_GP0_CLK_SRC] = &camss_gp0_clk_src.clkr,
+ [CAMSS_GP1_CLK_SRC] = &camss_gp1_clk_src.clkr,
+ [CAMSS_TOP_AHB_CLK_SRC] = &camss_top_ahb_clk_src.clkr,
+ [CCI_CLK_SRC] = &cci_clk_src.clkr,
+ [CPP_CLK_SRC] = &cpp_clk_src.clkr,
+ [CRYPTO_CLK_SRC] = &crypto_clk_src.clkr,
+ [CSI0PHYTIMER_CLK_SRC] = &csi0phytimer_clk_src.clkr,
+ [CSI0_CLK_SRC] = &csi0_clk_src.clkr,
+ [CSI1PHYTIMER_CLK_SRC] = &csi1phytimer_clk_src.clkr,
+ [CSI1_CLK_SRC] = &csi1_clk_src.clkr,
+ [CSI2_CLK_SRC] = &csi2_clk_src.clkr,
+ [ESC0_CLK_SRC] = &esc0_clk_src.clkr,
+ [MSM8937_ESC1_CLK_SRC] = &esc1_clk_src.clkr,
+ [GFX3D_CLK_SRC] = &gfx3d_clk_src.clkr,
+ [GP1_CLK_SRC] = &gp1_clk_src.clkr,
+ [GP2_CLK_SRC] = &gp2_clk_src.clkr,
+ [GP3_CLK_SRC] = &gp3_clk_src.clkr,
+ [JPEG0_CLK_SRC] = &jpeg0_clk_src.clkr,
+ [MCLK0_CLK_SRC] = &mclk0_clk_src.clkr,
+ [MCLK1_CLK_SRC] = &mclk1_clk_src.clkr,
+ [MCLK2_CLK_SRC] = &mclk2_clk_src.clkr,
+ [MDP_CLK_SRC] = &mdp_clk_src.clkr,
+ [PCLK0_CLK_SRC] = &pclk0_clk_src.clkr,
+ [MSM8937_PCLK1_CLK_SRC] = &pclk1_clk_src.clkr,
+ [PDM2_CLK_SRC] = &pdm2_clk_src.clkr,
+ [SDCC1_APPS_CLK_SRC] = &sdcc1_apps_clk_src.clkr,
+ [SDCC1_ICE_CORE_CLK_SRC] = &sdcc1_ice_core_clk_src.clkr,
+ [SDCC2_APPS_CLK_SRC] = &sdcc2_apps_clk_src.clkr,
+ [USB_HS_SYSTEM_CLK_SRC] = &usb_hs_system_clk_src.clkr,
+ [VCODEC0_CLK_SRC] = &vcodec0_clk_src.clkr,
+ [VFE0_CLK_SRC] = &vfe0_clk_src.clkr,
+ [VFE1_CLK_SRC] = &vfe1_clk_src.clkr,
+ [VSYNC_CLK_SRC] = &vsync_clk_src.clkr,
+ [GCC_APSS_TCU_CLK] = &gcc_apss_tcu_clk.clkr,
+ [GCC_BIMC_GFX_CLK] = &gcc_bimc_gfx_clk.clkr,
+ [GCC_BIMC_GPU_CLK] = &gcc_bimc_gpu_clk.clkr,
+ [GCC_BLSP1_AHB_CLK] = &gcc_blsp1_ahb_clk.clkr,
+ [MSM8937_GCC_BLSP1_QUP1_I2C_APPS_CLK] = &gcc_blsp1_qup1_i2c_apps_clk.clkr,
+ [MSM8937_GCC_BLSP1_QUP1_SPI_APPS_CLK] = &gcc_blsp1_qup1_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_I2C_APPS_CLK] = &gcc_blsp1_qup2_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_SPI_APPS_CLK] = &gcc_blsp1_qup2_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_I2C_APPS_CLK] = &gcc_blsp1_qup3_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_SPI_APPS_CLK] = &gcc_blsp1_qup3_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP4_I2C_APPS_CLK] = &gcc_blsp1_qup4_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP4_SPI_APPS_CLK] = &gcc_blsp1_qup4_spi_apps_clk.clkr,
+ [GCC_BLSP1_UART1_APPS_CLK] = &gcc_blsp1_uart1_apps_clk.clkr,
+ [GCC_BLSP1_UART2_APPS_CLK] = &gcc_blsp1_uart2_apps_clk.clkr,
+ [GCC_BLSP2_AHB_CLK] = &gcc_blsp2_ahb_clk.clkr,
+ [GCC_BLSP2_QUP1_I2C_APPS_CLK] = &gcc_blsp2_qup1_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP1_SPI_APPS_CLK] = &gcc_blsp2_qup1_spi_apps_clk.clkr,
+ [GCC_BLSP2_QUP2_I2C_APPS_CLK] = &gcc_blsp2_qup2_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP2_SPI_APPS_CLK] = &gcc_blsp2_qup2_spi_apps_clk.clkr,
+ [GCC_BLSP2_QUP3_I2C_APPS_CLK] = &gcc_blsp2_qup3_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP3_SPI_APPS_CLK] = &gcc_blsp2_qup3_spi_apps_clk.clkr,
+ [MSM8937_GCC_BLSP2_QUP4_I2C_APPS_CLK] = &gcc_blsp2_qup4_i2c_apps_clk.clkr,
+ [MSM8937_GCC_BLSP2_QUP4_SPI_APPS_CLK] = &gcc_blsp2_qup4_spi_apps_clk.clkr,
+ [GCC_BLSP2_UART1_APPS_CLK] = &gcc_blsp2_uart1_apps_clk.clkr,
+ [GCC_BLSP2_UART2_APPS_CLK] = &gcc_blsp2_uart2_apps_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CAMSS_AHB_CLK] = &gcc_camss_ahb_clk.clkr,
+ [GCC_CAMSS_CCI_AHB_CLK] = &gcc_camss_cci_ahb_clk.clkr,
+ [GCC_CAMSS_CCI_CLK] = &gcc_camss_cci_clk.clkr,
+ [GCC_CAMSS_CPP_AHB_CLK] = &gcc_camss_cpp_ahb_clk.clkr,
+ [GCC_CAMSS_CPP_CLK] = &gcc_camss_cpp_clk.clkr,
+ [GCC_CAMSS_CSI0PHYTIMER_CLK] = &gcc_camss_csi0phytimer_clk.clkr,
+ [GCC_CAMSS_CSI0PHY_CLK] = &gcc_camss_csi0phy_clk.clkr,
+ [GCC_CAMSS_CSI0PIX_CLK] = &gcc_camss_csi0pix_clk.clkr,
+ [GCC_CAMSS_CSI0RDI_CLK] = &gcc_camss_csi0rdi_clk.clkr,
+ [GCC_CAMSS_CSI0_AHB_CLK] = &gcc_camss_csi0_ahb_clk.clkr,
+ [GCC_CAMSS_CSI0_CLK] = &gcc_camss_csi0_clk.clkr,
+ [GCC_CAMSS_CSI1PHYTIMER_CLK] = &gcc_camss_csi1phytimer_clk.clkr,
+ [GCC_CAMSS_CSI1PHY_CLK] = &gcc_camss_csi1phy_clk.clkr,
+ [GCC_CAMSS_CSI1PIX_CLK] = &gcc_camss_csi1pix_clk.clkr,
+ [GCC_CAMSS_CSI1RDI_CLK] = &gcc_camss_csi1rdi_clk.clkr,
+ [GCC_CAMSS_CSI1_AHB_CLK] = &gcc_camss_csi1_ahb_clk.clkr,
+ [GCC_CAMSS_CSI1_CLK] = &gcc_camss_csi1_clk.clkr,
+ [GCC_CAMSS_CSI2PHY_CLK] = &gcc_camss_csi2phy_clk.clkr,
+ [GCC_CAMSS_CSI2PIX_CLK] = &gcc_camss_csi2pix_clk.clkr,
+ [GCC_CAMSS_CSI2RDI_CLK] = &gcc_camss_csi2rdi_clk.clkr,
+ [GCC_CAMSS_CSI2_AHB_CLK] = &gcc_camss_csi2_ahb_clk.clkr,
+ [GCC_CAMSS_CSI2_CLK] = &gcc_camss_csi2_clk.clkr,
+ [GCC_CAMSS_CSI_VFE0_CLK] = &gcc_camss_csi_vfe0_clk.clkr,
+ [GCC_CAMSS_CSI_VFE1_CLK] = &gcc_camss_csi_vfe1_clk.clkr,
+ [GCC_CAMSS_GP0_CLK] = &gcc_camss_gp0_clk.clkr,
+ [GCC_CAMSS_GP1_CLK] = &gcc_camss_gp1_clk.clkr,
+ [GCC_CAMSS_ISPIF_AHB_CLK] = &gcc_camss_ispif_ahb_clk.clkr,
+ [GCC_CAMSS_JPEG0_CLK] = &gcc_camss_jpeg0_clk.clkr,
+ [GCC_CAMSS_JPEG_AHB_CLK] = &gcc_camss_jpeg_ahb_clk.clkr,
+ [GCC_CAMSS_JPEG_AXI_CLK] = &gcc_camss_jpeg_axi_clk.clkr,
+ [GCC_CAMSS_MCLK0_CLK] = &gcc_camss_mclk0_clk.clkr,
+ [GCC_CAMSS_MCLK1_CLK] = &gcc_camss_mclk1_clk.clkr,
+ [GCC_CAMSS_MCLK2_CLK] = &gcc_camss_mclk2_clk.clkr,
+ [GCC_CAMSS_MICRO_AHB_CLK] = &gcc_camss_micro_ahb_clk.clkr,
+ [GCC_CAMSS_TOP_AHB_CLK] = &gcc_camss_top_ahb_clk.clkr,
+ [GCC_CAMSS_VFE0_AHB_CLK] = &gcc_camss_vfe0_ahb_clk.clkr,
+ [GCC_CAMSS_VFE0_AXI_CLK] = &gcc_camss_vfe0_axi_clk.clkr,
+ [GCC_CAMSS_VFE0_CLK] = &gcc_camss_vfe0_clk.clkr,
+ [GCC_CAMSS_VFE1_AHB_CLK] = &gcc_camss_vfe1_ahb_clk.clkr,
+ [GCC_CAMSS_VFE1_AXI_CLK] = &gcc_camss_vfe1_axi_clk.clkr,
+ [GCC_CAMSS_VFE1_CLK] = &gcc_camss_vfe1_clk.clkr,
+ [GCC_CPP_TBU_CLK] = &gcc_cpp_tbu_clk.clkr,
+ [GCC_CRYPTO_AHB_CLK] = &gcc_crypto_ahb_clk.clkr,
+ [GCC_CRYPTO_AXI_CLK] = &gcc_crypto_axi_clk.clkr,
+ [GCC_CRYPTO_CLK] = &gcc_crypto_clk.clkr,
+ [GCC_DCC_CLK] = &gcc_dcc_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_JPEG_TBU_CLK] = &gcc_jpeg_tbu_clk.clkr,
+ [GCC_MDP_TBU_CLK] = &gcc_mdp_tbu_clk.clkr,
+ [GCC_MDSS_AHB_CLK] = &gcc_mdss_ahb_clk.clkr,
+ [GCC_MDSS_AXI_CLK] = &gcc_mdss_axi_clk.clkr,
+ [GCC_MDSS_BYTE0_CLK] = &gcc_mdss_byte0_clk.clkr,
+ [MSM8937_GCC_MDSS_BYTE1_CLK] = &gcc_mdss_byte1_clk.clkr,
+ [GCC_MDSS_ESC0_CLK] = &gcc_mdss_esc0_clk.clkr,
+ [MSM8937_GCC_MDSS_ESC1_CLK] = &gcc_mdss_esc1_clk.clkr,
+ [GCC_MDSS_MDP_CLK] = &gcc_mdss_mdp_clk.clkr,
+ [GCC_MDSS_PCLK0_CLK] = &gcc_mdss_pclk0_clk.clkr,
+ [MSM8937_GCC_MDSS_PCLK1_CLK] = &gcc_mdss_pclk1_clk.clkr,
+ [GCC_MDSS_VSYNC_CLK] = &gcc_mdss_vsync_clk.clkr,
+ [GCC_MSS_CFG_AHB_CLK] = &gcc_mss_cfg_ahb_clk.clkr,
+ [GCC_MSS_Q6_BIMC_AXI_CLK] = &gcc_mss_q6_bimc_axi_clk.clkr,
+ [GCC_OXILI_AHB_CLK] = &gcc_oxili_ahb_clk.clkr,
+ [MSM8937_GCC_OXILI_AON_CLK] = &gcc_oxili_aon_clk.clkr,
+ [GCC_OXILI_GFX3D_CLK] = &gcc_oxili_gfx3d_clk.clkr,
+ [MSM8937_GCC_OXILI_TIMER_CLK] = &gcc_oxili_timer_clk.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+ [GCC_QDSS_DAP_CLK] = &gcc_qdss_dap_clk.clkr,
+ [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+ [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK] = &gcc_sdcc1_ice_core_clk.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_SMMU_CFG_CLK] = &gcc_smmu_cfg_clk.clkr,
+ [GCC_USB2A_PHY_SLEEP_CLK] = &gcc_usb2a_phy_sleep_clk.clkr,
+ [GCC_USB_HS_AHB_CLK] = &gcc_usb_hs_ahb_clk.clkr,
+ [GCC_USB_HS_PHY_CFG_AHB_CLK] = &gcc_usb_hs_phy_cfg_ahb_clk.clkr,
+ [GCC_USB_HS_SYSTEM_CLK] = &gcc_usb_hs_system_clk.clkr,
+ [GCC_VENUS0_AHB_CLK] = &gcc_venus0_ahb_clk.clkr,
+ [GCC_VENUS0_AXI_CLK] = &gcc_venus0_axi_clk.clkr,
+ [GCC_VENUS0_CORE0_VCODEC0_CLK] = &gcc_venus0_core0_vcodec0_clk.clkr,
+ [GCC_VENUS0_VCODEC0_CLK] = &gcc_venus0_vcodec0_clk.clkr,
+ [GCC_VENUS_TBU_CLK] = &gcc_venus_tbu_clk.clkr,
+ [GCC_VFE1_TBU_CLK] = &gcc_vfe1_tbu_clk.clkr,
+ [GCC_VFE_TBU_CLK] = &gcc_vfe_tbu_clk.clkr,
+};
+
static const struct qcom_reset_map gcc_msm8917_resets[] = {
[GCC_CAMSS_MICRO_BCR] = { 0x56008 },
[GCC_MSS_BCR] = { 0x71000 },
@@ -3234,6 +3791,18 @@ static struct gdsc *gcc_msm8917_gdscs[] = {
[VFE1_GDSC] = &vfe1_gdsc,
};
+static struct gdsc *gcc_msm8937_gdscs[] = {
+ [CPP_GDSC] = &cpp_gdsc,
+ [JPEG_GDSC] = &jpeg_gdsc,
+ [MDSS_GDSC] = &mdss_gdsc,
+ [OXILI_GX_GDSC] = &oxili_gx_gdsc_msm8937,
+ [MSM8937_OXILI_CX_GDSC] = &oxili_cx_gdsc,
+ [VENUS_CORE0_GDSC] = &venus_core0_gdsc,
+ [VENUS_GDSC] = &venus_gdsc,
+ [VFE0_GDSC] = &vfe0_gdsc,
+ [VFE1_GDSC] = &vfe1_gdsc,
+};
+
static const struct qcom_cc_desc gcc_msm8917_desc = {
.config = &gcc_msm8917_regmap_config,
.clks = gcc_msm8917_clocks,
@@ -3254,6 +3823,41 @@ static const struct qcom_cc_desc gcc_qm215_desc = {
.num_gdscs = ARRAY_SIZE(gcc_msm8917_gdscs),
};
+static const struct qcom_cc_desc gcc_msm8937_desc = {
+ .config = &gcc_msm8917_regmap_config,
+ .clks = gcc_msm8937_clocks,
+ .num_clks = ARRAY_SIZE(gcc_msm8937_clocks),
+ .resets = gcc_msm8917_resets,
+ .num_resets = ARRAY_SIZE(gcc_msm8917_resets),
+ .gdscs = gcc_msm8937_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_msm8937_gdscs),
+};
+
+static void msm8937_clock_override(void)
+{
+ /* GPLL3 750MHz configuration */
+ gpll3_early_config.l = 47;
+ gpll3_early.vco_table = gpll3_p_vco_msm8937;
+ gpll3_early.num_vco = ARRAY_SIZE(gpll3_p_vco_msm8937);
+
+ /*
+ * Set below clocks for use specific msm8937 parent map.
+ */
+ vcodec0_clk_src.parent_map = gcc_cpp_map;
+ vcodec0_clk_src.clkr.hw.init = &vcodec0_clk_src_init_msm8937;
+
+ /*
+ * Set below clocks for use specific msm8937 freq table.
+ */
+ vfe0_clk_src.freq_tbl = ftbl_vfe_clk_src_msm8937;
+ vfe1_clk_src.freq_tbl = ftbl_vfe_clk_src_msm8937;
+ cpp_clk_src.freq_tbl = ftbl_cpp_clk_src_msm8937;
+ vcodec0_clk_src.freq_tbl = ftbl_vcodec0_clk_src_msm8937;
+ csi0phytimer_clk_src.freq_tbl = ftbl_csi_phytimer_clk_src_msm8937;
+ csi1phytimer_clk_src.freq_tbl = ftbl_csi_phytimer_clk_src_msm8937;
+ usb_hs_system_clk_src.freq_tbl = ftbl_usb_hs_system_clk_src_msm8937;
+}
+
static int gcc_msm8917_probe(struct platform_device *pdev)
{
struct regmap *regmap;
@@ -3261,8 +3865,12 @@ static int gcc_msm8917_probe(struct platform_device *pdev)
gcc_desc = of_device_get_match_data(&pdev->dev);
- if (gcc_desc == &gcc_qm215_desc)
+ if (gcc_desc == &gcc_qm215_desc) {
gfx3d_clk_src.parent_map = gcc_gfx3d_map_qm215;
+ } else if (gcc_desc == &gcc_msm8937_desc) {
+ msm8937_clock_override();
+ gfx3d_clk_src.freq_tbl = ftbl_gfx3d_clk_src_msm8937;
+ }
regmap = qcom_cc_map(pdev, gcc_desc);
if (IS_ERR(regmap))
@@ -3276,6 +3884,7 @@ static int gcc_msm8917_probe(struct platform_device *pdev)
static const struct of_device_id gcc_msm8917_match_table[] = {
{ .compatible = "qcom,gcc-msm8917", .data = &gcc_msm8917_desc },
{ .compatible = "qcom,gcc-qm215", .data = &gcc_qm215_desc },
+ { .compatible = "qcom,gcc-msm8937", .data = &gcc_msm8937_desc },
{},
};
MODULE_DEVICE_TABLE(of, gcc_msm8917_match_table);
diff --git a/drivers/clk/qcom/gcc-msm8939.c b/drivers/clk/qcom/gcc-msm8939.c
index 7431c9a65044..45193b3d714b 100644
--- a/drivers/clk/qcom/gcc-msm8939.c
+++ b/drivers/clk/qcom/gcc-msm8939.c
@@ -432,7 +432,7 @@ static const struct parent_map gcc_xo_gpll0_gpll1a_gpll6_sleep_map[] = {
{ P_XO, 0 },
{ P_GPLL0, 1 },
{ P_GPLL1_AUX, 2 },
- { P_GPLL6, 2 },
+ { P_GPLL6, 3 },
{ P_SLEEP_CLK, 6 },
};
@@ -1113,7 +1113,7 @@ static struct clk_rcg2 jpeg0_clk_src = {
};
static const struct freq_tbl ftbl_gcc_camss_mclk0_1_clk[] = {
- F(24000000, P_GPLL0, 1, 1, 45),
+ F(24000000, P_GPLL6, 1, 1, 45),
F(66670000, P_GPLL0, 12, 0, 0),
{ }
};
diff --git a/drivers/clk/qcom/gcc-msm8953.c b/drivers/clk/qcom/gcc-msm8953.c
index 855a61966f3e..8f29ecc74c50 100644
--- a/drivers/clk/qcom/gcc-msm8953.c
+++ b/drivers/clk/qcom/gcc-msm8953.c
@@ -3770,7 +3770,7 @@ static struct clk_branch gcc_venus0_axi_clk = {
static struct clk_branch gcc_venus0_core0_vcodec0_clk = {
.halt_reg = 0x4c02c,
- .halt_check = BRANCH_HALT,
+ .halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x4c02c,
.enable_mask = BIT(0),
diff --git a/drivers/clk/qcom/gcc-msm8960.c b/drivers/clk/qcom/gcc-msm8960.c
index 9ddce11db6df..c2e4fa5d63ad 100644
--- a/drivers/clk/qcom/gcc-msm8960.c
+++ b/drivers/clk/qcom/gcc-msm8960.c
@@ -7,7 +7,6 @@
#include <linux/bitops.h>
#include <linux/err.h>
#include <linux/platform_device.h>
-#include <linux/property.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
diff --git a/drivers/clk/qcom/gcc-msm8974.c b/drivers/clk/qcom/gcc-msm8974.c
index b32e66714951..92ad35cfb75e 100644
--- a/drivers/clk/qcom/gcc-msm8974.c
+++ b/drivers/clk/qcom/gcc-msm8974.c
@@ -7,7 +7,6 @@
#include <linux/bitops.h>
#include <linux/err.h>
#include <linux/platform_device.h>
-#include <linux/property.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/clk-provider.h>
diff --git a/drivers/clk/qcom/gcc-qcm2290.c b/drivers/clk/qcom/gcc-qcm2290.c
index 9a6703365e61..6684cab63ae1 100644
--- a/drivers/clk/qcom/gcc-qcm2290.c
+++ b/drivers/clk/qcom/gcc-qcm2290.c
@@ -2720,6 +2720,7 @@ static struct gdsc gcc_vcodec0_gdsc = {
.pd = {
.name = "gcc_vcodec0",
},
+ .flags = HW_CTRL_TRIGGER,
.pwrsts = PWRSTS_OFF_ON,
};
diff --git a/drivers/clk/qcom/gcc-qcs404.c b/drivers/clk/qcom/gcc-qcs404.c
index c3cfd572e7c1..efc75a3814ab 100644
--- a/drivers/clk/qcom/gcc-qcs404.c
+++ b/drivers/clk/qcom/gcc-qcs404.c
@@ -131,6 +131,7 @@ static struct clk_alpha_pll gpll1_out_main = {
/* 930MHz configuration */
static const struct alpha_pll_config gpll3_config = {
.l = 48,
+ .alpha_hi = 0x70,
.alpha = 0x0,
.alpha_en_mask = BIT(24),
.post_div_mask = 0xf << 8,
@@ -2753,7 +2754,7 @@ static struct clk_regmap *gcc_qcs404_clocks[] = {
[GCC_DCC_CLK] = &gcc_dcc_clk.clkr,
[GCC_DCC_XO_CLK] = &gcc_dcc_xo_clk.clkr,
[GCC_WCSS_Q6_AHB_CLK] = &gcc_wdsp_q6ss_ahbs_clk.clkr,
- [GCC_WCSS_Q6_AXIM_CLK] = &gcc_wdsp_q6ss_axim_clk.clkr,
+ [GCC_WCSS_Q6_AXIM_CLK] = &gcc_wdsp_q6ss_axim_clk.clkr,
};
diff --git a/drivers/clk/qcom/gcc-qcs615.c b/drivers/clk/qcom/gcc-qcs615.c
new file mode 100644
index 000000000000..9695446bc2a3
--- /dev/null
+++ b/drivers/clk/qcom/gcc-qcs615.c
@@ -0,0 +1,3034 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,qcs615-gcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO,
+ DT_BI_TCXO_AO,
+ DT_SLEEP_CLK,
+};
+
+enum {
+ P_BI_TCXO,
+ P_GPLL0_OUT_AUX2_DIV,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL3_OUT_MAIN,
+ P_GPLL3_OUT_MAIN_DIV,
+ P_GPLL4_OUT_MAIN,
+ P_GPLL6_OUT_MAIN,
+ P_GPLL7_OUT_MAIN,
+ P_GPLL8_OUT_MAIN,
+ P_SLEEP_CLK,
+};
+
+static struct clk_alpha_pll gpll0 = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+/* Fixed divider clock of GPLL0 instead of PLL normal postdiv */
+static struct clk_fixed_factor gpll0_out_aux2_div = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data) {
+ .name = "gpll0_out_aux2_div",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll3 = {
+ .offset = 0x3000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpll3",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+/* Fixed divider clock of GPLL3 instead of PLL normal postdiv */
+static struct clk_fixed_factor gpll3_out_aux2_div = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data) {
+ .name = "gpll3_out_aux2_div",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &gpll3.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll4 = {
+ .offset = 0x76000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpll4",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gpll6 = {
+ .offset = 0x13000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(6),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpll6",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gpll6_out_main[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll6_out_main = {
+ .offset = 0x13000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_gpll6_out_main,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpll6_out_main),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpll6_out_main",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll6.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll7 = {
+ .offset = 0x1a000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpll7",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gpll8 = {
+ .offset = 0x1b000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(8),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpll8",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gpll8_out_main[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll8_out_main = {
+ .offset = 0x1b000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_gpll8_out_main,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpll8_out_main),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpll8_out_main",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll8.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL0_OUT_AUX2_DIV, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_aux2_div.hw },
+};
+
+static const struct clk_parent_data gcc_parent_data_0_ao[] = {
+ { .index = DT_BI_TCXO_AO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL6_OUT_MAIN, 2 },
+ { P_GPLL0_OUT_AUX2_DIV, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll6_out_main.clkr.hw },
+ { .hw = &gpll0_out_aux2_div.hw },
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_SLEEP_CLK, 5 },
+ { P_GPLL0_OUT_AUX2_DIV, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .index = DT_SLEEP_CLK },
+ { .hw = &gpll0_out_aux2_div.hw },
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_SLEEP_CLK, 5 },
+};
+
+static const struct clk_parent_data gcc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data gcc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL7_OUT_MAIN, 3 },
+ { P_GPLL4_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_AUX2_DIV, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_5[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll7.clkr.hw },
+ { .hw = &gpll4.clkr.hw },
+ { .hw = &gpll0_out_aux2_div.hw },
+};
+
+static const struct parent_map gcc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL7_OUT_MAIN, 3 },
+ { P_GPLL0_OUT_AUX2_DIV, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_6[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll7.clkr.hw },
+ { .hw = &gpll0_out_aux2_div.hw },
+};
+
+static const struct parent_map gcc_parent_map_7[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL3_OUT_MAIN_DIV, 4 },
+ { P_GPLL0_OUT_AUX2_DIV, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_7[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll3_out_aux2_div.hw },
+ { .hw = &gpll0_out_aux2_div.hw },
+};
+
+static const struct parent_map gcc_parent_map_8[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL8_OUT_MAIN, 2 },
+ { P_GPLL4_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_AUX2_DIV, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_8[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll8_out_main.clkr.hw },
+ { .hw = &gpll4.clkr.hw },
+ { .hw = &gpll0_out_aux2_div.hw },
+};
+
+static const struct parent_map gcc_parent_map_9[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL3_OUT_MAIN, 4 },
+};
+
+static const struct clk_parent_data gcc_parent_data_9[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll3.clkr.hw },
+};
+
+static const struct freq_tbl ftbl_gcc_cpuss_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_cpuss_ahb_clk_src = {
+ .cmd_rcgr = 0x48014,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_cpuss_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cpuss_ahb_clk_src",
+ .parent_data = gcc_parent_data_0_ao,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0_ao),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_emac_ptp_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(50000000, P_GPLL0_OUT_AUX2_DIV, 6, 0, 0),
+ F(75000000, P_GPLL0_OUT_AUX2_DIV, 4, 0, 0),
+ F(125000000, P_GPLL7_OUT_MAIN, 4, 0, 0),
+ F(250000000, P_GPLL7_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_emac_ptp_clk_src = {
+ .cmd_rcgr = 0x6038,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_emac_ptp_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac_ptp_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_emac_rgmii_clk_src[] = {
+ F(2500000, P_BI_TCXO, 1, 25, 192),
+ F(5000000, P_BI_TCXO, 1, 25, 96),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_AUX2_DIV, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_AUX2_DIV, 6, 0, 0),
+ F(75000000, P_GPLL0_OUT_AUX2_DIV, 4, 0, 0),
+ F(125000000, P_GPLL7_OUT_MAIN, 4, 0, 0),
+ F(250000000, P_GPLL7_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_emac_rgmii_clk_src = {
+ .cmd_rcgr = 0x601c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_emac_rgmii_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac_rgmii_clk_src",
+ .parent_data = gcc_parent_data_6,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_6),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
+ F(25000000, P_GPLL0_OUT_AUX2_DIV, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_AUX2_DIV, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_gp1_clk_src = {
+ .cmd_rcgr = 0x64004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp1_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp2_clk_src = {
+ .cmd_rcgr = 0x65004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp2_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp3_clk_src = {
+ .cmd_rcgr = 0x66004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp3_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_aux_clk_src[] = {
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_0_aux_clk_src = {
+ .cmd_rcgr = 0x6b02c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_aux_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_phy_refgen_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_phy_refgen_clk_src = {
+ .cmd_rcgr = 0x6f014,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_phy_refgen_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_phy_refgen_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pdm2_clk_src = {
+ .cmd_rcgr = 0x33010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pdm2_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qspi_core_clk_src[] = {
+ F(60000000, P_GPLL0_OUT_AUX2_DIV, 5, 0, 0),
+ F(133250000, P_GPLL3_OUT_MAIN_DIV, 4, 0, 0),
+ F(266500000, P_GPLL3_OUT_MAIN_DIV, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_qspi_core_clk_src = {
+ .cmd_rcgr = 0x4b008,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_7,
+ .freq_tbl = ftbl_gcc_qspi_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qspi_core_clk_src",
+ .parent_data = gcc_parent_data_7,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_7),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = {
+ F(7372800, P_GPLL0_OUT_AUX2_DIV, 1, 384, 15625),
+ F(14745600, P_GPLL0_OUT_AUX2_DIV, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GPLL0_OUT_AUX2_DIV, 1, 1536, 15625),
+ F(32000000, P_GPLL0_OUT_AUX2_DIV, 1, 8, 75),
+ F(48000000, P_GPLL0_OUT_AUX2_DIV, 1, 4, 25),
+ F(64000000, P_GPLL0_OUT_AUX2_DIV, 1, 16, 75),
+ F(75000000, P_GPLL0_OUT_AUX2_DIV, 4, 0, 0),
+ F(80000000, P_GPLL0_OUT_AUX2_DIV, 1, 4, 15),
+ F(96000000, P_GPLL0_OUT_AUX2_DIV, 1, 8, 25),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(102400000, P_GPLL0_OUT_AUX2_DIV, 1, 128, 375),
+ F(112000000, P_GPLL0_OUT_AUX2_DIV, 1, 28, 75),
+ F(117964800, P_GPLL0_OUT_AUX2_DIV, 1, 6144, 15625),
+ F(120000000, P_GPLL0_OUT_AUX2_DIV, 2.5, 0, 0),
+ F(128000000, P_GPLL6_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s0_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
+ .cmd_rcgr = 0x17148,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s1_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
+ .cmd_rcgr = 0x17278,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s2_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
+ .cmd_rcgr = 0x173a8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s3_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
+ .cmd_rcgr = 0x174d8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s4_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
+ .cmd_rcgr = 0x17608,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s5_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
+ .cmd_rcgr = 0x17738,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s0_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
+ .cmd_rcgr = 0x18148,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s1_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
+ .cmd_rcgr = 0x18278,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s2_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
+ .cmd_rcgr = 0x183a8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s3_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
+ .cmd_rcgr = 0x184d8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s4_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
+ .cmd_rcgr = 0x18608,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s5_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
+ .cmd_rcgr = 0x18738,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s5_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_apps_clk_src[] = {
+ F(144000, P_BI_TCXO, 16, 3, 25),
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(20000000, P_GPLL0_OUT_AUX2_DIV, 5, 1, 3),
+ F(25000000, P_GPLL0_OUT_AUX2_DIV, 6, 1, 2),
+ F(50000000, P_GPLL0_OUT_AUX2_DIV, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_AUX2_DIV, 3, 0, 0),
+ F(192000000, P_GPLL6_OUT_MAIN, 2, 0, 0),
+ F(384000000, P_GPLL6_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc1_apps_clk_src = {
+ .cmd_rcgr = 0x12028,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_sdcc1_apps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_apps_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_ice_core_clk_src[] = {
+ F(75000000, P_GPLL0_OUT_AUX2_DIV, 4, 0, 0),
+ F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc1_ice_core_clk_src = {
+ .cmd_rcgr = 0x12010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_sdcc1_ice_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_ice_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_AUX2_DIV, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_AUX2_DIV, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_AUX2_DIV, 3, 0, 0),
+ F(202000000, P_GPLL8_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+ .cmd_rcgr = 0x1400c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_gcc_sdcc2_apps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc2_apps_clk_src",
+ .parent_data = gcc_parent_data_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_8),
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_axi_clk_src[] = {
+ F(25000000, P_GPLL0_OUT_AUX2_DIV, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_AUX2_DIV, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = {
+ .cmd_rcgr = 0x77020,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_phy_axi_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_axi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_ice_core_clk_src[] = {
+ F(37500000, P_GPLL0_OUT_AUX2_DIV, 8, 0, 0),
+ F(75000000, P_GPLL0_OUT_AUX2_DIV, 4, 0, 0),
+ F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_ice_core_clk_src = {
+ .cmd_rcgr = 0x77048,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_phy_ice_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ice_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = {
+ .cmd_rcgr = 0x7707c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_unipro_core_clk_src[] = {
+ F(37500000, P_GPLL0_OUT_AUX2_DIV, 8, 0, 0),
+ F(75000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = {
+ .cmd_rcgr = 0x77060,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_phy_unipro_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_unipro_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb20_sec_master_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ F(120000000, P_GPLL0_OUT_MAIN, 5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb20_sec_master_clk_src = {
+ .cmd_rcgr = 0xa601c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb20_sec_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb20_sec_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb20_sec_mock_utmi_clk_src = {
+ .cmd_rcgr = 0xa6034,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pdm2_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb20_sec_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb2_sec_phy_aux_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb2_sec_phy_aux_clk_src = {
+ .cmd_rcgr = 0xa6060,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_usb2_sec_phy_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb2_sec_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_master_clk_src[] = {
+ F(66666667, P_GPLL0_OUT_AUX2_DIV, 4.5, 0, 0),
+ F(133333333, P_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
+ .cmd_rcgr = 0xf01c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_mock_utmi_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(20000000, P_GPLL0_OUT_AUX2_DIV, 15, 0, 0),
+ F(40000000, P_GPLL0_OUT_AUX2_DIV, 7.5, 0, 0),
+ F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
+ .cmd_rcgr = 0xf034,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_mock_utmi_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = {
+ .cmd_rcgr = 0xf060,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_usb2_sec_phy_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_vsensor_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(400000000, P_GPLL0_OUT_MAIN, 1.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_vsensor_clk_src = {
+ .cmd_rcgr = 0x7a018,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_9,
+ .freq_tbl = ftbl_gcc_vsensor_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_vsensor_clk_src",
+ .parent_data = gcc_parent_data_9,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_9),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_phy_axi_clk = {
+ .halt_reg = 0x770c0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x770c0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x770c0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_ufs_phy_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb2_sec_axi_clk = {
+ .halt_reg = 0xa6084,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xa6084,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb2_sec_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb20_sec_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_prim_axi_clk = {
+ .halt_reg = 0xf07c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xf07c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb3_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ahb2phy_east_clk = {
+ .halt_reg = 0x6a008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6a008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x6a008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ahb2phy_east_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ahb2phy_west_clk = {
+ .halt_reg = 0x6a004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6a004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x6a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ahb2phy_west_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x38004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x38004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_hf_axi_clk = {
+ .halt_reg = 0xb030,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xb030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_camera_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce1_ahb_clk = {
+ .halt_reg = 0x4100c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x4100c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ce1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce1_axi_clk = {
+ .halt_reg = 0x41008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ce1_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce1_clk = {
+ .halt_reg = 0x41004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ce1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb2_sec_axi_clk = {
+ .halt_reg = 0xa609c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xa609c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_usb2_sec_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb20_sec_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_prim_axi_clk = {
+ .halt_reg = 0xf078,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xf078,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_usb3_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cpuss_ahb_clk = {
+ .halt_reg = 0x48000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(21),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cpuss_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_cpuss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ddrss_gpu_axi_clk = {
+ .halt_reg = 0x71154,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x71154,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ddrss_gpu_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_disp_gpll0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll0_out_aux2_div.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_hf_axi_clk = {
+ .halt_reg = 0xb038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xb038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_disp_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac_axi_clk = {
+ .halt_reg = 0x6010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac_ptp_clk = {
+ .halt_reg = 0x6034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac_ptp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_emac_ptp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac_rgmii_clk = {
+ .halt_reg = 0x6018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac_rgmii_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_emac_rgmii_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac_slv_ahb_clk = {
+ .halt_reg = 0x6014,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x6014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x6014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac_slv_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x64000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x64000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x65000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x65000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x66000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x66000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_gpll0_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_gpll0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll0_out_aux2_div.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_iref_clk = {
+ .halt_reg = 0x8c010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_iref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_memnoc_gfx_clk = {
+ .halt_reg = 0x7100c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7100c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_memnoc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_snoc_dvm_gfx_clk = {
+ .halt_reg = 0x71018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x71018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_snoc_dvm_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie0_phy_refgen_clk = {
+ .halt_reg = 0x6f02c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6f02c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie0_phy_refgen_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_phy_refgen_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_aux_clk = {
+ .halt_reg = 0x6b020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
+ .halt_reg = 0x6b01c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b01c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(2),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_clkref_clk = {
+ .halt_reg = 0x8c00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
+ .halt_reg = 0x6b018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_pipe_clk = {
+ .halt_reg = 0x6b024,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_axi_clk = {
+ .halt_reg = 0x6b014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_q2a_axi_clk = {
+ .halt_reg = 0x6b010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_phy_aux_clk = {
+ .halt_reg = 0x6f004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6f004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x3300c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pdm2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x33004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x33004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x33004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_xo4_clk = {
+ .halt_reg = 0x33008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x33008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm_xo4_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_prng_ahb_clk = {
+ .halt_reg = 0x34004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x34004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(13),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_prng_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_nrt_ahb_clk = {
+ .halt_reg = 0xb018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xb018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_camera_nrt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_disp_ahb_clk = {
+ .halt_reg = 0xb020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xb020,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_disp_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_pcie_ahb_clk = {
+ .halt_reg = 0x6b044,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b044,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(28),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_pcie_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_vcodec_ahb_clk = {
+ .halt_reg = 0xb014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xb014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_vcodec_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qspi_cnoc_periph_ahb_clk = {
+ .halt_reg = 0x4b000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qspi_cnoc_periph_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qspi_core_clk = {
+ .halt_reg = 0x4b004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4b004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qspi_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qspi_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_2x_clk = {
+ .halt_reg = 0x17014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(9),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_clk = {
+ .halt_reg = 0x1700c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(8),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s0_clk = {
+ .halt_reg = 0x17144,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s1_clk = {
+ .halt_reg = 0x17274,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(11),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s2_clk = {
+ .halt_reg = 0x173a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(12),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s3_clk = {
+ .halt_reg = 0x174d4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(13),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s4_clk = {
+ .halt_reg = 0x17604,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(14),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s5_clk = {
+ .halt_reg = 0x17734,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s5_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_2x_clk = {
+ .halt_reg = 0x18014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(18),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_clk = {
+ .halt_reg = 0x1800c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(19),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s0_clk = {
+ .halt_reg = 0x18144,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s1_clk = {
+ .halt_reg = 0x18274,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(23),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s2_clk = {
+ .halt_reg = 0x183a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(24),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s3_clk = {
+ .halt_reg = 0x184d4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(25),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s4_clk = {
+ .halt_reg = 0x18604,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(26),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s5_clk = {
+ .halt_reg = 0x18734,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s5_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_m_ahb_clk = {
+ .halt_reg = 0x17004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(6),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_0_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_s_ahb_clk = {
+ .halt_reg = 0x17008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_0_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_m_ahb_clk = {
+ .halt_reg = 0x18004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_1_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_s_ahb_clk = {
+ .halt_reg = 0x18008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x18008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(21),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_1_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_rx1_usb2_clkref_clk = {
+ .halt_reg = 0x8c030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_rx1_usb2_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_rx3_usb2_clkref_clk = {
+ .halt_reg = 0x8c038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x8c038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_rx3_usb2_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ahb_clk = {
+ .halt_reg = 0x12008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x12008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_apps_clk = {
+ .halt_reg = 0x12004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x12004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc1_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ice_core_clk = {
+ .halt_reg = 0x1200c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1200c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_ice_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc1_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .halt_reg = 0x14008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+ .halt_reg = 0x14004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc2_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc2_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sys_noc_cpuss_ahb_clk = {
+ .halt_reg = 0x4819c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_sys_noc_cpuss_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &gcc_cpuss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_clkref_clk = {
+ .halt_reg = 0x8c004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x8c004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_card_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_mem_clkref_clk = {
+ .halt_reg = 0x8c000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_mem_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ahb_clk = {
+ .halt_reg = 0x77014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_axi_clk = {
+ .halt_reg = 0x77010,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ice_core_clk = {
+ .halt_reg = 0x77044,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77044,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ice_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_phy_aux_clk = {
+ .halt_reg = 0x77078,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77078,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77078,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_0_clk = {
+ .halt_reg = 0x7701c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x7701c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_tx_symbol_0_clk = {
+ .halt_reg = 0x77018,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x77018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_tx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_unipro_core_clk = {
+ .halt_reg = 0x77040,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77040,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_unipro_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_unipro_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb20_sec_master_clk = {
+ .halt_reg = 0xa6010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xa6010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb20_sec_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb20_sec_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb20_sec_mock_utmi_clk = {
+ .halt_reg = 0xa6018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa6018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb20_sec_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb20_sec_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb20_sec_sleep_clk = {
+ .halt_reg = 0xa6014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa6014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb20_sec_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb2_prim_clkref_clk = {
+ .halt_reg = 0x8c028,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x8c028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb2_prim_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb2_sec_clkref_clk = {
+ .halt_reg = 0x8c018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x8c018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb2_sec_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb2_sec_phy_aux_clk = {
+ .halt_reg = 0xa6050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa6050,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb2_sec_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb2_sec_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb2_sec_phy_com_aux_clk = {
+ .halt_reg = 0xa6054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa6054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb2_sec_phy_com_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb2_sec_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb2_sec_phy_pipe_clk = {
+ .halt_reg = 0xa6058,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0xa6058,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb2_sec_phy_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_master_clk = {
+ .halt_reg = 0xf010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xf010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_mock_utmi_clk = {
+ .halt_reg = 0xf018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_sleep_clk = {
+ .halt_reg = 0xf014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_clkref_clk = {
+ .halt_reg = 0x8c014,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x8c014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_aux_clk = {
+ .halt_reg = 0xf050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf050,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = {
+ .halt_reg = 0xf054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_com_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_pipe_clk = {
+ .halt_reg = 0xf058,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0xf058,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_clkref_clk = {
+ .halt_reg = 0x8c008,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x8c008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_sec_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi0_clk = {
+ .halt_reg = 0xb024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xb024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_video_axi0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_hw *gcc_qcs615_hws[] = {
+ [GPLL0_OUT_AUX2_DIV] = &gpll0_out_aux2_div.hw,
+ [GPLL3_OUT_AUX2_DIV] = &gpll3_out_aux2_div.hw,
+};
+
+static struct gdsc emac_gdsc = {
+ .gdscr = 0x6004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "emac_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc pcie_0_gdsc = {
+ .gdscr = 0x6b004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "pcie_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc ufs_phy_gdsc = {
+ .gdscr = 0x77004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "ufs_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc usb20_sec_gdsc = {
+ .gdscr = 0xa6004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "usb20_sec_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc usb30_prim_gdsc = {
+ .gdscr = 0xf004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "usb30_prim_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc = {
+ .gdscr = 0x7d040,
+ .pd = {
+ .name = "hlos1_vote_aggre_noc_mmu_audio_tbu",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_aggre_noc_mmu_tbu1_gdsc = {
+ .gdscr = 0x7d044,
+ .pd = {
+ .name = "hlos1_vote_aggre_noc_mmu_tbu1",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_aggre_noc_mmu_tbu2_gdsc = {
+ .gdscr = 0x7d048,
+ .pd = {
+ .name = "hlos1_vote_aggre_noc_mmu_tbu2",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc = {
+ .gdscr = 0x7d04c,
+ .pd = {
+ .name = "hlos1_vote_aggre_noc_mmu_pcie_tbu",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc = {
+ .gdscr = 0x7d050,
+ .pd = {
+ .name = "hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_mmnoc_mmu_tbu_sf_gdsc = {
+ .gdscr = 0x7d054,
+ .pd = {
+ .name = "hlos1_vote_mmnoc_mmu_tbu_sf_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc = {
+ .gdscr = 0x7d058,
+ .pd = {
+ .name = "hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct clk_regmap *gcc_qcs615_clocks[] = {
+ [GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr,
+ [GCC_AGGRE_USB2_SEC_AXI_CLK] = &gcc_aggre_usb2_sec_axi_clk.clkr,
+ [GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr,
+ [GCC_AHB2PHY_EAST_CLK] = &gcc_ahb2phy_east_clk.clkr,
+ [GCC_AHB2PHY_WEST_CLK] = &gcc_ahb2phy_west_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CAMERA_HF_AXI_CLK] = &gcc_camera_hf_axi_clk.clkr,
+ [GCC_CE1_AHB_CLK] = &gcc_ce1_ahb_clk.clkr,
+ [GCC_CE1_AXI_CLK] = &gcc_ce1_axi_clk.clkr,
+ [GCC_CE1_CLK] = &gcc_ce1_clk.clkr,
+ [GCC_CFG_NOC_USB2_SEC_AXI_CLK] = &gcc_cfg_noc_usb2_sec_axi_clk.clkr,
+ [GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr,
+ [GCC_CPUSS_AHB_CLK] = &gcc_cpuss_ahb_clk.clkr,
+ [GCC_CPUSS_AHB_CLK_SRC] = &gcc_cpuss_ahb_clk_src.clkr,
+ [GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr,
+ [GCC_DISP_GPLL0_DIV_CLK_SRC] = &gcc_disp_gpll0_div_clk_src.clkr,
+ [GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr,
+ [GCC_EMAC_AXI_CLK] = &gcc_emac_axi_clk.clkr,
+ [GCC_EMAC_PTP_CLK] = &gcc_emac_ptp_clk.clkr,
+ [GCC_EMAC_PTP_CLK_SRC] = &gcc_emac_ptp_clk_src.clkr,
+ [GCC_EMAC_RGMII_CLK] = &gcc_emac_rgmii_clk.clkr,
+ [GCC_EMAC_RGMII_CLK_SRC] = &gcc_emac_rgmii_clk_src.clkr,
+ [GCC_EMAC_SLV_AHB_CLK] = &gcc_emac_slv_ahb_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
+ [GCC_GPU_GPLL0_CLK_SRC] = &gcc_gpu_gpll0_clk_src.clkr,
+ [GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr,
+ [GCC_GPU_IREF_CLK] = &gcc_gpu_iref_clk.clkr,
+ [GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr,
+ [GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr,
+ [GCC_PCIE0_PHY_REFGEN_CLK] = &gcc_pcie0_phy_refgen_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK] = &gcc_pcie_0_aux_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK_SRC] = &gcc_pcie_0_aux_clk_src.clkr,
+ [GCC_PCIE_0_CFG_AHB_CLK] = &gcc_pcie_0_cfg_ahb_clk.clkr,
+ [GCC_PCIE_0_CLKREF_CLK] = &gcc_pcie_0_clkref_clk.clkr,
+ [GCC_PCIE_0_MSTR_AXI_CLK] = &gcc_pcie_0_mstr_axi_clk.clkr,
+ [GCC_PCIE_0_PIPE_CLK] = &gcc_pcie_0_pipe_clk.clkr,
+ [GCC_PCIE_0_SLV_AXI_CLK] = &gcc_pcie_0_slv_axi_clk.clkr,
+ [GCC_PCIE_0_SLV_Q2A_AXI_CLK] = &gcc_pcie_0_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_PHY_AUX_CLK] = &gcc_pcie_phy_aux_clk.clkr,
+ [GCC_PCIE_PHY_REFGEN_CLK_SRC] = &gcc_pcie_phy_refgen_clk_src.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr,
+ [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_NRT_AHB_CLK] = &gcc_qmip_camera_nrt_ahb_clk.clkr,
+ [GCC_QMIP_DISP_AHB_CLK] = &gcc_qmip_disp_ahb_clk.clkr,
+ [GCC_QMIP_PCIE_AHB_CLK] = &gcc_qmip_pcie_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_VCODEC_AHB_CLK] = &gcc_qmip_video_vcodec_ahb_clk.clkr,
+ [GCC_QSPI_CNOC_PERIPH_AHB_CLK] = &gcc_qspi_cnoc_periph_ahb_clk.clkr,
+ [GCC_QSPI_CORE_CLK] = &gcc_qspi_core_clk.clkr,
+ [GCC_QSPI_CORE_CLK_SRC] = &gcc_qspi_core_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_CORE_2X_CLK] = &gcc_qupv3_wrap0_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_CLK] = &gcc_qupv3_wrap0_core_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK] = &gcc_qupv3_wrap0_s0_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK_SRC] = &gcc_qupv3_wrap0_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK] = &gcc_qupv3_wrap0_s1_clk.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK_SRC] = &gcc_qupv3_wrap0_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK] = &gcc_qupv3_wrap0_s2_clk.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK_SRC] = &gcc_qupv3_wrap0_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK] = &gcc_qupv3_wrap0_s3_clk.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK_SRC] = &gcc_qupv3_wrap0_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK] = &gcc_qupv3_wrap0_s4_clk.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK_SRC] = &gcc_qupv3_wrap0_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK] = &gcc_qupv3_wrap0_s5_clk.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK_SRC] = &gcc_qupv3_wrap0_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_CORE_2X_CLK] = &gcc_qupv3_wrap1_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP1_CORE_CLK] = &gcc_qupv3_wrap1_core_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK] = &gcc_qupv3_wrap1_s0_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK_SRC] = &gcc_qupv3_wrap1_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK] = &gcc_qupv3_wrap1_s1_clk.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK_SRC] = &gcc_qupv3_wrap1_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK] = &gcc_qupv3_wrap1_s2_clk.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK_SRC] = &gcc_qupv3_wrap1_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK] = &gcc_qupv3_wrap1_s3_clk.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK_SRC] = &gcc_qupv3_wrap1_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK] = &gcc_qupv3_wrap1_s4_clk.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK_SRC] = &gcc_qupv3_wrap1_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK] = &gcc_qupv3_wrap1_s5_clk.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK_SRC] = &gcc_qupv3_wrap1_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP_0_M_AHB_CLK] = &gcc_qupv3_wrap_0_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_0_S_AHB_CLK] = &gcc_qupv3_wrap_0_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_M_AHB_CLK] = &gcc_qupv3_wrap_1_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_S_AHB_CLK] = &gcc_qupv3_wrap_1_s_ahb_clk.clkr,
+ [GCC_RX1_USB2_CLKREF_CLK] = &gcc_rx1_usb2_clkref_clk.clkr,
+ [GCC_RX3_USB2_CLKREF_CLK] = &gcc_rx3_usb2_clkref_clk.clkr,
+ [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+ [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+ [GCC_SDCC1_APPS_CLK_SRC] = &gcc_sdcc1_apps_clk_src.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK] = &gcc_sdcc1_ice_core_clk.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK_SRC] = &gcc_sdcc1_ice_core_clk_src.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_SDCC2_APPS_CLK_SRC] = &gcc_sdcc2_apps_clk_src.clkr,
+ [GCC_SYS_NOC_CPUSS_AHB_CLK] = &gcc_sys_noc_cpuss_ahb_clk.clkr,
+ [GCC_UFS_CARD_CLKREF_CLK] = &gcc_ufs_card_clkref_clk.clkr,
+ [GCC_UFS_MEM_CLKREF_CLK] = &gcc_ufs_mem_clkref_clk.clkr,
+ [GCC_UFS_PHY_AHB_CLK] = &gcc_ufs_phy_ahb_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK] = &gcc_ufs_phy_axi_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK_SRC] = &gcc_ufs_phy_axi_clk_src.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK] = &gcc_ufs_phy_ice_core_clk.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK_SRC] = &gcc_ufs_phy_ice_core_clk_src.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK] = &gcc_ufs_phy_phy_aux_clk.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK_SRC] = &gcc_ufs_phy_phy_aux_clk_src.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK] = &gcc_ufs_phy_rx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK] = &gcc_ufs_phy_tx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK] = &gcc_ufs_phy_unipro_core_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC] = &gcc_ufs_phy_unipro_core_clk_src.clkr,
+ [GCC_USB20_SEC_MASTER_CLK] = &gcc_usb20_sec_master_clk.clkr,
+ [GCC_USB20_SEC_MASTER_CLK_SRC] = &gcc_usb20_sec_master_clk_src.clkr,
+ [GCC_USB20_SEC_MOCK_UTMI_CLK] = &gcc_usb20_sec_mock_utmi_clk.clkr,
+ [GCC_USB20_SEC_MOCK_UTMI_CLK_SRC] = &gcc_usb20_sec_mock_utmi_clk_src.clkr,
+ [GCC_USB20_SEC_SLEEP_CLK] = &gcc_usb20_sec_sleep_clk.clkr,
+ [GCC_USB2_PRIM_CLKREF_CLK] = &gcc_usb2_prim_clkref_clk.clkr,
+ [GCC_USB2_SEC_CLKREF_CLK] = &gcc_usb2_sec_clkref_clk.clkr,
+ [GCC_USB2_SEC_PHY_AUX_CLK] = &gcc_usb2_sec_phy_aux_clk.clkr,
+ [GCC_USB2_SEC_PHY_AUX_CLK_SRC] = &gcc_usb2_sec_phy_aux_clk_src.clkr,
+ [GCC_USB2_SEC_PHY_COM_AUX_CLK] = &gcc_usb2_sec_phy_com_aux_clk.clkr,
+ [GCC_USB2_SEC_PHY_PIPE_CLK] = &gcc_usb2_sec_phy_pipe_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK] = &gcc_usb30_prim_master_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK_SRC] = &gcc_usb30_prim_master_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK] = &gcc_usb30_prim_mock_utmi_clk.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC] = &gcc_usb30_prim_mock_utmi_clk_src.clkr,
+ [GCC_USB30_PRIM_SLEEP_CLK] = &gcc_usb30_prim_sleep_clk.clkr,
+ [GCC_USB3_PRIM_CLKREF_CLK] = &gcc_usb3_prim_clkref_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK] = &gcc_usb3_prim_phy_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr,
+ [GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr,
+ [GCC_USB3_SEC_CLKREF_CLK] = &gcc_usb3_sec_clkref_clk.clkr,
+ [GCC_VIDEO_AXI0_CLK] = &gcc_video_axi0_clk.clkr,
+ [GCC_VSENSOR_CLK_SRC] = &gcc_vsensor_clk_src.clkr,
+ [GPLL0] = &gpll0.clkr,
+ [GPLL3] = &gpll3.clkr,
+ [GPLL4] = &gpll4.clkr,
+ [GPLL6] = &gpll6.clkr,
+ [GPLL6_OUT_MAIN] = &gpll6_out_main.clkr,
+ [GPLL7] = &gpll7.clkr,
+ [GPLL8] = &gpll8.clkr,
+ [GPLL8_OUT_MAIN] = &gpll8_out_main.clkr,
+};
+
+static struct gdsc *gcc_qcs615_gdscs[] = {
+ [EMAC_GDSC] = &emac_gdsc,
+ [PCIE_0_GDSC] = &pcie_0_gdsc,
+ [UFS_PHY_GDSC] = &ufs_phy_gdsc,
+ [USB20_SEC_GDSC] = &usb20_sec_gdsc,
+ [USB30_PRIM_GDSC] = &usb30_prim_gdsc,
+ [HLOS1_VOTE_AGGRE_NOC_MMU_AUDIO_TBU_GDSC] = &hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc,
+ [HLOS1_VOTE_AGGRE_NOC_MMU_TBU1_GDSC] = &hlos1_vote_aggre_noc_mmu_tbu1_gdsc,
+ [HLOS1_VOTE_AGGRE_NOC_MMU_TBU2_GDSC] = &hlos1_vote_aggre_noc_mmu_tbu2_gdsc,
+ [HLOS1_VOTE_AGGRE_NOC_MMU_PCIE_TBU_GDSC] = &hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc,
+ [HLOS1_VOTE_MMNOC_MMU_TBU_HF0_GDSC] = &hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc,
+ [HLOS1_VOTE_MMNOC_MMU_TBU_SF_GDSC] = &hlos1_vote_mmnoc_mmu_tbu_sf_gdsc,
+ [HLOS1_VOTE_MMNOC_MMU_TBU_HF1_GDSC] = &hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc,
+};
+
+static const struct qcom_reset_map gcc_qcs615_resets[] = {
+ [GCC_EMAC_BCR] = { 0x6000 },
+ [GCC_QUSB2PHY_PRIM_BCR] = { 0xd000 },
+ [GCC_QUSB2PHY_SEC_BCR] = { 0xd004 },
+ [GCC_USB30_PRIM_BCR] = { 0xf000 },
+ [GCC_USB2_PHY_SEC_BCR] = { 0x50018 },
+ [GCC_USB3_DP_PHY_SEC_BCR] = { 0x50020 },
+ [GCC_USB3PHY_PHY_SEC_BCR] = { 0x5001c },
+ [GCC_PCIE_0_BCR] = { 0x6b000 },
+ [GCC_PCIE_0_PHY_BCR] = { 0x6c01c },
+ [GCC_PCIE_PHY_BCR] = { 0x6f000 },
+ [GCC_PCIE_PHY_COM_BCR] = { 0x6f010 },
+ [GCC_UFS_PHY_BCR] = { 0x77000 },
+ [GCC_USB20_SEC_BCR] = { 0xa6000 },
+ [GCC_USB3PHY_PHY_PRIM_SP0_BCR] = { 0x50008 },
+ [GCC_USB3_PHY_PRIM_SP0_BCR] = { 0x50000 },
+ [GCC_SDCC1_BCR] = { 0x12000 },
+ [GCC_SDCC2_BCR] = { 0x14000 },
+};
+
+static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = {
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s5_clk_src),
+};
+
+static const struct regmap_config gcc_qcs615_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xa609c,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_qcs615_desc = {
+ .config = &gcc_qcs615_regmap_config,
+ .clk_hws = gcc_qcs615_hws,
+ .num_clk_hws = ARRAY_SIZE(gcc_qcs615_hws),
+ .clks = gcc_qcs615_clocks,
+ .num_clks = ARRAY_SIZE(gcc_qcs615_clocks),
+ .resets = gcc_qcs615_resets,
+ .num_resets = ARRAY_SIZE(gcc_qcs615_resets),
+ .gdscs = gcc_qcs615_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_qcs615_gdscs),
+};
+
+static const struct of_device_id gcc_qcs615_match_table[] = {
+ { .compatible = "qcom,qcs615-gcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_qcs615_match_table);
+
+static int gcc_qcs615_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ regmap = qcom_cc_map(pdev, &gcc_qcs615_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+ /*
+ * Disable the GPLL0 active input to MM blocks and GPU
+ * via MISC registers.
+ */
+ regmap_update_bits(regmap, 0x0b084, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x9b000, BIT(0), BIT(0));
+
+ /* Keep some clocks always enabled */
+ qcom_branch_set_clk_en(regmap, 0xb008); /* GCC_CAMERA_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0xb044); /* GCC_CAMERA_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0xb00c); /* GCC_DISP_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0xb048); /* GCC_DISP_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0x71004); /* GCC_GPU_CFG_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0xb004); /* GCC_VIDEO_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0xb040); /* GCC_VIDEO_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0x480040); /* GCC_CPUSS_GNOC_CLK */
+
+ ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks,
+ ARRAY_SIZE(gcc_dfs_clocks));
+ if (ret)
+ return ret;
+
+ return qcom_cc_really_probe(&pdev->dev, &gcc_qcs615_desc, regmap);
+}
+
+static struct platform_driver gcc_qcs615_driver = {
+ .probe = gcc_qcs615_probe,
+ .driver = {
+ .name = "gcc-qcs615",
+ .of_match_table = gcc_qcs615_match_table,
+ },
+};
+
+static int __init gcc_qcs615_init(void)
+{
+ return platform_driver_register(&gcc_qcs615_driver);
+}
+subsys_initcall(gcc_qcs615_init);
+
+static void __exit gcc_qcs615_exit(void)
+{
+ platform_driver_unregister(&gcc_qcs615_driver);
+}
+module_exit(gcc_qcs615_exit);
+
+MODULE_DESCRIPTION("QTI GCC QCS615 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/gcc-qcs8300.c b/drivers/clk/qcom/gcc-qcs8300.c
new file mode 100644
index 000000000000..80831c7dea3b
--- /dev/null
+++ b/drivers/clk/qcom/gcc-qcs8300.c
@@ -0,0 +1,3640 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,qcs8300-gcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "clk-regmap-phy-mux.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO,
+ DT_SLEEP_CLK,
+ DT_PCIE_0_PIPE_CLK,
+ DT_PCIE_1_PIPE_CLK,
+ DT_PCIE_PHY_AUX_CLK,
+ DT_RXC0_REF_CLK,
+ DT_UFS_PHY_RX_SYMBOL_0_CLK,
+ DT_UFS_PHY_RX_SYMBOL_1_CLK,
+ DT_UFS_PHY_TX_SYMBOL_0_CLK,
+ DT_USB3_PHY_WRAPPER_GCC_USB30_PRIM_PIPE_CLK,
+};
+
+enum {
+ P_BI_TCXO,
+ P_GCC_GPLL0_OUT_EVEN,
+ P_GCC_GPLL0_OUT_MAIN,
+ P_GCC_GPLL1_OUT_MAIN,
+ P_GCC_GPLL4_OUT_MAIN,
+ P_GCC_GPLL7_OUT_MAIN,
+ P_GCC_GPLL9_OUT_MAIN,
+ P_PCIE_0_PIPE_CLK,
+ P_PCIE_1_PIPE_CLK,
+ P_PCIE_PHY_AUX_CLK,
+ P_RXC0_REF_CLK,
+ P_SLEEP_CLK,
+ P_UFS_PHY_RX_SYMBOL_0_CLK,
+ P_UFS_PHY_RX_SYMBOL_1_CLK,
+ P_UFS_PHY_TX_SYMBOL_0_CLK,
+ P_USB3_PHY_WRAPPER_GCC_USB30_PRIM_PIPE_CLK,
+};
+
+static struct clk_alpha_pll gcc_gpll0 = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .enable_reg = 0x4b028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gcc_gpll0_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gcc_gpll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_gcc_gpll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_gcc_gpll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll0_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_lucid_evo_ops,
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll1 = {
+ .offset = 0x1000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .enable_reg = 0x4b028,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_evo_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll4 = {
+ .offset = 0x4000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .enable_reg = 0x4b028,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll4",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_evo_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll7 = {
+ .offset = 0x7000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .enable_reg = 0x4b028,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll7",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_evo_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll9 = {
+ .offset = 0x9000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .enable_reg = 0x4b028,
+ .enable_mask = BIT(9),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll9",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_SLEEP_CLK, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .index = DT_SLEEP_CLK },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_SLEEP_CLK, 5 },
+};
+
+static const struct clk_parent_data gcc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL1_OUT_MAIN, 4 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll1.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data gcc_parent_data_5[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL7_OUT_MAIN, 2 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_6[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll7.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_7[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL7_OUT_MAIN, 2 },
+ { P_RXC0_REF_CLK, 3 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_7[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll7.clkr.hw },
+ { .index = DT_RXC0_REF_CLK },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_8[] = {
+ { P_PCIE_PHY_AUX_CLK, 1 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_8[] = {
+ { .index = DT_PCIE_PHY_AUX_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_10[] = {
+ { P_PCIE_PHY_AUX_CLK, 1 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_10[] = {
+ { .index = DT_PCIE_PHY_AUX_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_12[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL9_OUT_MAIN, 2 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_12[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll9.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_13[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+};
+
+static const struct clk_parent_data gcc_parent_data_13[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_14[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL4_OUT_MAIN, 3 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_14[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_15[] = {
+ { P_UFS_PHY_RX_SYMBOL_0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_15[] = {
+ { .index = DT_UFS_PHY_RX_SYMBOL_0_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_16[] = {
+ { P_UFS_PHY_RX_SYMBOL_1_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_16[] = {
+ { .index = DT_UFS_PHY_RX_SYMBOL_1_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_17[] = {
+ { P_UFS_PHY_TX_SYMBOL_0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_17[] = {
+ { .index = DT_UFS_PHY_TX_SYMBOL_0_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_18[] = {
+ { P_USB3_PHY_WRAPPER_GCC_USB30_PRIM_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_18[] = {
+ { .index = DT_USB3_PHY_WRAPPER_GCC_USB30_PRIM_PIPE_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static struct clk_regmap_mux gcc_pcie_0_phy_aux_clk_src = {
+ .reg = 0xa9074,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_8,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_8),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_pcie_0_pipe_clk_src = {
+ .reg = 0xa906c,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_pipe_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_PCIE_0_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_pcie_1_phy_aux_clk_src = {
+ .reg = 0x77074,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_10,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_10,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_10),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_pcie_1_pipe_clk_src = {
+ .reg = 0x7706c,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_pipe_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_PCIE_1_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_rx_symbol_0_clk_src = {
+ .reg = 0x83060,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_15,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_0_clk_src",
+ .parent_data = gcc_parent_data_15,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_15),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_rx_symbol_1_clk_src = {
+ .reg = 0x830d0,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_16,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_1_clk_src",
+ .parent_data = gcc_parent_data_16,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_16),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_tx_symbol_0_clk_src = {
+ .reg = 0x83050,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_17,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_tx_symbol_0_clk_src",
+ .parent_data = gcc_parent_data_17,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_17),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb3_prim_phy_pipe_clk_src = {
+ .reg = 0x1b068,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_18,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_18,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_18),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_emac0_phy_aux_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_emac0_phy_aux_clk_src = {
+ .cmd_rcgr = 0xb6028,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_emac0_phy_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac0_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_emac0_ptp_clk_src[] = {
+ F(125000000, P_GCC_GPLL7_OUT_MAIN, 8, 0, 0),
+ F(230400000, P_GCC_GPLL4_OUT_MAIN, 3.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_emac0_ptp_clk_src = {
+ .cmd_rcgr = 0xb6060,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_emac0_ptp_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac0_ptp_clk_src",
+ .parent_data = gcc_parent_data_6,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_emac0_rgmii_clk_src[] = {
+ F(5000000, P_GCC_GPLL0_OUT_EVEN, 10, 1, 6),
+ F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(125000000, P_GCC_GPLL7_OUT_MAIN, 8, 0, 0),
+ F(250000000, P_GCC_GPLL7_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_emac0_rgmii_clk_src = {
+ .cmd_rcgr = 0xb6048,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_7,
+ .freq_tbl = ftbl_gcc_emac0_rgmii_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac0_rgmii_clk_src",
+ .parent_data = gcc_parent_data_7,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_7),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_gp1_clk_src = {
+ .cmd_rcgr = 0x70004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp1_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp2_clk_src = {
+ .cmd_rcgr = 0x71004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp2_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp3_clk_src = {
+ .cmd_rcgr = 0x62004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp3_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp4_clk_src = {
+ .cmd_rcgr = 0x1e004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp4_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp5_clk_src = {
+ .cmd_rcgr = 0x1f004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp5_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_0_aux_clk_src = {
+ .cmd_rcgr = 0xa9078,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_emac0_phy_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_phy_rchng_clk_src[] = {
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_0_phy_rchng_clk_src = {
+ .cmd_rcgr = 0xa9054,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_1_aux_clk_src = {
+ .cmd_rcgr = 0x77078,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_emac0_phy_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_1_phy_rchng_clk_src = {
+ .cmd_rcgr = 0x77054,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = {
+ F(60000000, P_GCC_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pdm2_clk_src = {
+ .cmd_rcgr = 0x3f010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pdm2_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(120000000, P_GCC_GPLL0_OUT_MAIN, 5, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
+ .cmd_rcgr = 0x23154,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
+ .cmd_rcgr = 0x23288,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s2_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
+ .cmd_rcgr = 0x233bc,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
+ .cmd_rcgr = 0x234f0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s4_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
+ .cmd_rcgr = 0x23624,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
+ .cmd_rcgr = 0x23758,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s6_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s6_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s6_clk_src = {
+ .cmd_rcgr = 0x2388c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s6_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s7_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s7_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s7_clk_src = {
+ .cmd_rcgr = 0x239c0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s7_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
+ .cmd_rcgr = 0x24154,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
+ .cmd_rcgr = 0x24288,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
+ .cmd_rcgr = 0x243bc,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
+ .cmd_rcgr = 0x244f0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s4_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
+ .cmd_rcgr = 0x24624,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
+ .cmd_rcgr = 0x24758,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s6_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s6_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s6_clk_src = {
+ .cmd_rcgr = 0x2488c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s6_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s7_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s7_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s7_clk_src = {
+ .cmd_rcgr = 0x249c0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s7_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap3_s0_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(403200000, P_GCC_GPLL4_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap3_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap3_s0_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap3_s0_clk_src = {
+ .cmd_rcgr = 0xc4158,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_qupv3_wrap3_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap3_s0_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_apps_clk_src[] = {
+ F(144000, P_BI_TCXO, 16, 3, 25),
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(20000000, P_GCC_GPLL0_OUT_EVEN, 5, 1, 3),
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(192000000, P_GCC_GPLL9_OUT_MAIN, 4, 0, 0),
+ F(384000000, P_GCC_GPLL9_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc1_apps_clk_src = {
+ .cmd_rcgr = 0x20014,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_12,
+ .freq_tbl = ftbl_gcc_sdcc1_apps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_apps_clk_src",
+ .parent_data = gcc_parent_data_12,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_12),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_ice_core_clk_src[] = {
+ F(150000000, P_GCC_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(300000000, P_GCC_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc1_ice_core_clk_src = {
+ .cmd_rcgr = 0x2002c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_13,
+ .freq_tbl = ftbl_gcc_sdcc1_ice_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_ice_core_clk_src",
+ .parent_data = gcc_parent_data_13,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_13),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_axi_clk_src[] = {
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(150000000, P_GCC_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(300000000, P_GCC_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = {
+ .cmd_rcgr = 0x8302c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_phy_axi_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_axi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_ice_core_clk_src[] = {
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(201600000, P_GCC_GPLL4_OUT_MAIN, 4, 0, 0),
+ F(403200000, P_GCC_GPLL4_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_ice_core_clk_src = {
+ .cmd_rcgr = 0x83074,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_14,
+ .freq_tbl = ftbl_gcc_ufs_phy_ice_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ice_core_clk_src",
+ .parent_data = gcc_parent_data_14,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_14),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = {
+ .cmd_rcgr = 0x830a8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_emac0_phy_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_unipro_core_clk_src[] = {
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(150000000, P_GCC_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(300000000, P_GCC_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = {
+ .cmd_rcgr = 0x8308c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_phy_unipro_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_unipro_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb20_master_clk_src[] = {
+ F(120000000, P_GCC_GPLL0_OUT_MAIN, 5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb20_master_clk_src = {
+ .cmd_rcgr = 0x1c028,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb20_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb20_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb20_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x1c040,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_emac0_phy_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb20_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_master_clk_src[] = {
+ F(133333333, P_GCC_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GCC_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
+ .cmd_rcgr = 0x1b028,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x1b040,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_emac0_phy_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = {
+ .cmd_rcgr = 0x1b06c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_emac0_phy_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_pcie_0_pipe_div_clk_src = {
+ .reg = 0xa9070,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_pipe_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_pcie_1_pipe_div_clk_src = {
+ .reg = 0x77070,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_pipe_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_1_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_wrap3_s0_div_clk_src = {
+ .reg = 0xc4288,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap3_s0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap3_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb20_mock_utmi_postdiv_clk_src = {
+ .reg = 0x1c058,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb20_mock_utmi_postdiv_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb20_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb30_prim_mock_utmi_postdiv_clk_src = {
+ .reg = 0x1b058,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_postdiv_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_qupv3_axi_clk = {
+ .halt_reg = 0x8e200,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8e200,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x4b000,
+ .enable_mask = BIT(28),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_noc_qupv3_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_phy_axi_clk = {
+ .halt_reg = 0x830d4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x830d4,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x830d4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_ufs_phy_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb2_prim_axi_clk = {
+ .halt_reg = 0x1c05c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1c05c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1c05c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb2_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb20_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_prim_axi_clk = {
+ .halt_reg = 0x1b084,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1b084,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1b084,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb3_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ahb2phy0_clk = {
+ .halt_reg = 0x76004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x76004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x76004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ahb2phy0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ahb2phy2_clk = {
+ .halt_reg = 0x76008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x76008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x76008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ahb2phy2_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ahb2phy3_clk = {
+ .halt_reg = 0x7600c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7600c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7600c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ahb2phy3_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x44004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x44004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x4b000,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_hf_axi_clk = {
+ .halt_reg = 0x32010,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x32010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_camera_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_sf_axi_clk = {
+ .halt_reg = 0x32018,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x32018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_camera_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_throttle_xo_clk = {
+ .halt_reg = 0x32024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x32024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_camera_throttle_xo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb2_prim_axi_clk = {
+ .halt_reg = 0x1c060,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1c060,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1c060,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_usb2_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb20_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_prim_axi_clk = {
+ .halt_reg = 0x1b088,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1b088,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1b088,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_usb3_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ddrss_gpu_axi_clk = {
+ .halt_reg = 0x7d164,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7d164,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7d164,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ddrss_gpu_axi_clk",
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_hf_axi_clk = {
+ .halt_reg = 0x33010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x33010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x33010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_disp_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_edp_ref_clkref_en = {
+ .halt_reg = 0x97448,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x97448,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_edp_ref_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac0_axi_clk = {
+ .halt_reg = 0xb6018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xb6018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb6018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac0_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac0_phy_aux_clk = {
+ .halt_reg = 0xb6024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb6024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac0_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_emac0_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac0_ptp_clk = {
+ .halt_reg = 0xb6040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb6040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac0_ptp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_emac0_ptp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac0_rgmii_clk = {
+ .halt_reg = 0xb6044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb6044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac0_rgmii_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_emac0_rgmii_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac0_slv_ahb_clk = {
+ .halt_reg = 0xb6020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xb6020,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb6020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac0_slv_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x70000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x70000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x71000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x71000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x62000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp4_clk = {
+ .halt_reg = 0x1e000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1e000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp5_clk = {
+ .halt_reg = 0x1f000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1f000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp5_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x4b000,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_gpll0_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x4b000,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_gpll0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0_out_even.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_memnoc_gfx_center_pipeline_clk = {
+ .halt_reg = 0x7d160,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7d160,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7d160,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_memnoc_gfx_center_pipeline_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_memnoc_gfx_clk = {
+ .halt_reg = 0x7d010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7d010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7d010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_memnoc_gfx_clk",
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_snoc_dvm_gfx_clk = {
+ .halt_reg = 0x7d01c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x7d01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_snoc_dvm_gfx_clk",
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_tcu_throttle_ahb_clk = {
+ .halt_reg = 0x7d008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7d008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7d008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_tcu_throttle_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_tcu_throttle_clk = {
+ .halt_reg = 0x7d014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7d014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7d014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_tcu_throttle_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_aux_clk = {
+ .halt_reg = 0xa9038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b010,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
+ .halt_reg = 0xa902c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xa902c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x4b010,
+ .enable_mask = BIT(12),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
+ .halt_reg = 0xa9024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b010,
+ .enable_mask = BIT(11),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_phy_aux_clk = {
+ .halt_reg = 0xa9030,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b010,
+ .enable_mask = BIT(13),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_phy_rchng_clk = {
+ .halt_reg = 0xa9050,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b010,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_pipe_clk = {
+ .halt_reg = 0xa9040,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x4b010,
+ .enable_mask = BIT(14),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_pipediv2_clk = {
+ .halt_reg = 0xa9048,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x4b018,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_pipediv2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_pipe_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_axi_clk = {
+ .halt_reg = 0xa901c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b010,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_q2a_axi_clk = {
+ .halt_reg = 0xa9018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b018,
+ .enable_mask = BIT(12),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_aux_clk = {
+ .halt_reg = 0x77038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b000,
+ .enable_mask = BIT(31),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_1_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_cfg_ahb_clk = {
+ .halt_reg = 0x7702c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7702c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(2),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_mstr_axi_clk = {
+ .halt_reg = 0x77024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_phy_aux_clk = {
+ .halt_reg = 0x77030,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_1_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_phy_rchng_clk = {
+ .halt_reg = 0x77050,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b000,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_1_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_pipe_clk = {
+ .halt_reg = 0x77040,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_1_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_pipediv2_clk = {
+ .halt_reg = 0x77048,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x4b018,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_pipediv2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_1_pipe_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_slv_axi_clk = {
+ .halt_reg = 0x7701c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_slv_q2a_axi_clk = {
+ .halt_reg = 0x77018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_clkref_en = {
+ .halt_reg = 0x9746c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x9746c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_throttle_cfg_clk = {
+ .halt_reg = 0xb2034,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b020,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_throttle_cfg_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x3f00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3f00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pdm2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x3f004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x3f004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3f004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_xo4_clk = {
+ .halt_reg = 0x3f008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3f008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm_xo4_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_nrt_ahb_clk = {
+ .halt_reg = 0x32008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x32008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_camera_nrt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_rt_ahb_clk = {
+ .halt_reg = 0x3200c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x3200c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3200c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_camera_rt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_disp_ahb_clk = {
+ .halt_reg = 0x33008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x33008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x33008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_disp_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_disp_rot_ahb_clk = {
+ .halt_reg = 0x3300c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x3300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_disp_rot_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_cvp_ahb_clk = {
+ .halt_reg = 0x34008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x34008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x34008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_cvp_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_vcodec_ahb_clk = {
+ .halt_reg = 0x3400c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x3400c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3400c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_vcodec_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_vcpu_ahb_clk = {
+ .halt_reg = 0x34010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x34010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x34010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_vcpu_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_2x_clk = {
+ .halt_reg = 0x23018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(9),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_clk = {
+ .halt_reg = 0x2300c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(8),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s0_clk = {
+ .halt_reg = 0x2314c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s1_clk = {
+ .halt_reg = 0x23280,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(11),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s2_clk = {
+ .halt_reg = 0x233b4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(12),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s3_clk = {
+ .halt_reg = 0x234e8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(13),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s4_clk = {
+ .halt_reg = 0x2361c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(14),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s5_clk = {
+ .halt_reg = 0x23750,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s5_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s6_clk = {
+ .halt_reg = 0x23884,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s6_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s7_clk = {
+ .halt_reg = 0x239b8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(17),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s7_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s7_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_2x_clk = {
+ .halt_reg = 0x24018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(18),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_clk = {
+ .halt_reg = 0x2400c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(19),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s0_clk = {
+ .halt_reg = 0x2414c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s1_clk = {
+ .halt_reg = 0x24280,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(23),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s2_clk = {
+ .halt_reg = 0x243b4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(24),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s3_clk = {
+ .halt_reg = 0x244e8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(25),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s4_clk = {
+ .halt_reg = 0x2461c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(26),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s5_clk = {
+ .halt_reg = 0x24750,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s5_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s6_clk = {
+ .halt_reg = 0x24884,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b018,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s6_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s7_clk = {
+ .halt_reg = 0x249b8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b018,
+ .enable_mask = BIT(28),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s7_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s7_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap3_core_2x_clk = {
+ .halt_reg = 0xc4018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b000,
+ .enable_mask = BIT(24),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap3_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap3_core_clk = {
+ .halt_reg = 0xc400c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b000,
+ .enable_mask = BIT(23),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap3_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap3_qspi_clk = {
+ .halt_reg = 0xc4284,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b000,
+ .enable_mask = BIT(26),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap3_qspi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap3_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap3_s0_clk = {
+ .halt_reg = 0xc4150,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b000,
+ .enable_mask = BIT(25),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap3_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap3_s0_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_m_ahb_clk = {
+ .halt_reg = 0x23004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x23004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(6),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_0_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_s_ahb_clk = {
+ .halt_reg = 0x23008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x23008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_0_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_m_ahb_clk = {
+ .halt_reg = 0x24004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x24004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_1_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_s_ahb_clk = {
+ .halt_reg = 0x24008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x24008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(21),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_1_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_3_m_ahb_clk = {
+ .halt_reg = 0xc4004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xc4004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x4b000,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_3_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_3_s_ahb_clk = {
+ .halt_reg = 0xc4008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xc4008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x4b000,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_3_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ahb_clk = {
+ .halt_reg = 0x2000c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2000c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_apps_clk = {
+ .halt_reg = 0x20004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x20004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc1_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ice_core_clk = {
+ .halt_reg = 0x20044,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x20044,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x20044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_ice_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc1_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sgmi_clkref_en = {
+ .halt_reg = 0x97034,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x97034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sgmi_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ahb_clk = {
+ .halt_reg = 0x83020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x83020,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x83020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_axi_clk = {
+ .halt_reg = 0x83018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x83018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x83018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ice_core_clk = {
+ .halt_reg = 0x8306c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8306c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x8306c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ice_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_phy_aux_clk = {
+ .halt_reg = 0x830a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x830a4,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x830a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_0_clk = {
+ .halt_reg = 0x83028,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x83028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_rx_symbol_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_1_clk = {
+ .halt_reg = 0x830c0,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x830c0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_rx_symbol_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_tx_symbol_0_clk = {
+ .halt_reg = 0x83024,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x83024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_tx_symbol_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_tx_symbol_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_unipro_core_clk = {
+ .halt_reg = 0x83064,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x83064,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x83064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_unipro_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_unipro_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb20_master_clk = {
+ .halt_reg = 0x1c018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1c018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb20_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb20_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb20_mock_utmi_clk = {
+ .halt_reg = 0x1c024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1c024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb20_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb20_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb20_sleep_clk = {
+ .halt_reg = 0x1c020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1c020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb20_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_master_clk = {
+ .halt_reg = 0x1b018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1b018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_mock_utmi_clk = {
+ .halt_reg = 0x1b024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1b024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_sleep_clk = {
+ .halt_reg = 0x1b020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1b020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_aux_clk = {
+ .halt_reg = 0x1b05c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1b05c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = {
+ .halt_reg = 0x1b060,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1b060,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_com_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_pipe_clk = {
+ .halt_reg = 0x1b064,
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0x1b064,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1b064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_clkref_en = {
+ .halt_reg = 0x97468,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x97468,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi0_clk = {
+ .halt_reg = 0x34014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x34014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x34014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_video_axi0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi1_clk = {
+ .halt_reg = 0x3401c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x3401c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3401c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_video_axi1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc gcc_emac0_gdsc = {
+ .gdscr = 0xb6004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_emac0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_pcie_0_gdsc = {
+ .gdscr = 0xa9004,
+ .collapse_ctrl = 0x4b104,
+ .collapse_mask = BIT(0),
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_pcie_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE | RETAIN_FF_ENABLE | POLL_CFG_GDSCR,
+};
+
+static struct gdsc gcc_pcie_1_gdsc = {
+ .gdscr = 0x77004,
+ .collapse_ctrl = 0x4b104,
+ .collapse_mask = BIT(1),
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_pcie_1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE | RETAIN_FF_ENABLE | POLL_CFG_GDSCR,
+};
+
+static struct gdsc gcc_ufs_phy_gdsc = {
+ .gdscr = 0x83004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_ufs_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = RETAIN_FF_ENABLE | POLL_CFG_GDSCR,
+};
+
+static struct gdsc gcc_usb20_prim_gdsc = {
+ .gdscr = 0x1c004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_usb20_prim_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = RETAIN_FF_ENABLE | POLL_CFG_GDSCR,
+};
+
+static struct gdsc gcc_usb30_prim_gdsc = {
+ .gdscr = 0x1b004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_usb30_prim_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = RETAIN_FF_ENABLE | POLL_CFG_GDSCR,
+};
+
+static struct clk_regmap *gcc_qcs8300_clocks[] = {
+ [GCC_AGGRE_NOC_QUPV3_AXI_CLK] = &gcc_aggre_noc_qupv3_axi_clk.clkr,
+ [GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr,
+ [GCC_AGGRE_USB2_PRIM_AXI_CLK] = &gcc_aggre_usb2_prim_axi_clk.clkr,
+ [GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr,
+ [GCC_AHB2PHY0_CLK] = &gcc_ahb2phy0_clk.clkr,
+ [GCC_AHB2PHY2_CLK] = &gcc_ahb2phy2_clk.clkr,
+ [GCC_AHB2PHY3_CLK] = &gcc_ahb2phy3_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CAMERA_HF_AXI_CLK] = &gcc_camera_hf_axi_clk.clkr,
+ [GCC_CAMERA_SF_AXI_CLK] = &gcc_camera_sf_axi_clk.clkr,
+ [GCC_CAMERA_THROTTLE_XO_CLK] = &gcc_camera_throttle_xo_clk.clkr,
+ [GCC_CFG_NOC_USB2_PRIM_AXI_CLK] = &gcc_cfg_noc_usb2_prim_axi_clk.clkr,
+ [GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr,
+ [GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr,
+ [GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr,
+ [GCC_EDP_REF_CLKREF_EN] = &gcc_edp_ref_clkref_en.clkr,
+ [GCC_EMAC0_AXI_CLK] = &gcc_emac0_axi_clk.clkr,
+ [GCC_EMAC0_PHY_AUX_CLK] = &gcc_emac0_phy_aux_clk.clkr,
+ [GCC_EMAC0_PHY_AUX_CLK_SRC] = &gcc_emac0_phy_aux_clk_src.clkr,
+ [GCC_EMAC0_PTP_CLK] = &gcc_emac0_ptp_clk.clkr,
+ [GCC_EMAC0_PTP_CLK_SRC] = &gcc_emac0_ptp_clk_src.clkr,
+ [GCC_EMAC0_RGMII_CLK] = &gcc_emac0_rgmii_clk.clkr,
+ [GCC_EMAC0_RGMII_CLK_SRC] = &gcc_emac0_rgmii_clk_src.clkr,
+ [GCC_EMAC0_SLV_AHB_CLK] = &gcc_emac0_slv_ahb_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
+ [GCC_GP4_CLK] = &gcc_gp4_clk.clkr,
+ [GCC_GP4_CLK_SRC] = &gcc_gp4_clk_src.clkr,
+ [GCC_GP5_CLK] = &gcc_gp5_clk.clkr,
+ [GCC_GP5_CLK_SRC] = &gcc_gp5_clk_src.clkr,
+ [GCC_GPLL0] = &gcc_gpll0.clkr,
+ [GCC_GPLL0_OUT_EVEN] = &gcc_gpll0_out_even.clkr,
+ [GCC_GPLL1] = &gcc_gpll1.clkr,
+ [GCC_GPLL4] = &gcc_gpll4.clkr,
+ [GCC_GPLL7] = &gcc_gpll7.clkr,
+ [GCC_GPLL9] = &gcc_gpll9.clkr,
+ [GCC_GPU_GPLL0_CLK_SRC] = &gcc_gpu_gpll0_clk_src.clkr,
+ [GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr,
+ [GCC_GPU_MEMNOC_GFX_CENTER_PIPELINE_CLK] = &gcc_gpu_memnoc_gfx_center_pipeline_clk.clkr,
+ [GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr,
+ [GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr,
+ [GCC_GPU_TCU_THROTTLE_AHB_CLK] = &gcc_gpu_tcu_throttle_ahb_clk.clkr,
+ [GCC_GPU_TCU_THROTTLE_CLK] = &gcc_gpu_tcu_throttle_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK] = &gcc_pcie_0_aux_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK_SRC] = &gcc_pcie_0_aux_clk_src.clkr,
+ [GCC_PCIE_0_CFG_AHB_CLK] = &gcc_pcie_0_cfg_ahb_clk.clkr,
+ [GCC_PCIE_0_MSTR_AXI_CLK] = &gcc_pcie_0_mstr_axi_clk.clkr,
+ [GCC_PCIE_0_PHY_AUX_CLK] = &gcc_pcie_0_phy_aux_clk.clkr,
+ [GCC_PCIE_0_PHY_AUX_CLK_SRC] = &gcc_pcie_0_phy_aux_clk_src.clkr,
+ [GCC_PCIE_0_PHY_RCHNG_CLK] = &gcc_pcie_0_phy_rchng_clk.clkr,
+ [GCC_PCIE_0_PHY_RCHNG_CLK_SRC] = &gcc_pcie_0_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_0_PIPE_CLK] = &gcc_pcie_0_pipe_clk.clkr,
+ [GCC_PCIE_0_PIPE_CLK_SRC] = &gcc_pcie_0_pipe_clk_src.clkr,
+ [GCC_PCIE_0_PIPE_DIV_CLK_SRC] = &gcc_pcie_0_pipe_div_clk_src.clkr,
+ [GCC_PCIE_0_PIPEDIV2_CLK] = &gcc_pcie_0_pipediv2_clk.clkr,
+ [GCC_PCIE_0_SLV_AXI_CLK] = &gcc_pcie_0_slv_axi_clk.clkr,
+ [GCC_PCIE_0_SLV_Q2A_AXI_CLK] = &gcc_pcie_0_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_1_AUX_CLK] = &gcc_pcie_1_aux_clk.clkr,
+ [GCC_PCIE_1_AUX_CLK_SRC] = &gcc_pcie_1_aux_clk_src.clkr,
+ [GCC_PCIE_1_CFG_AHB_CLK] = &gcc_pcie_1_cfg_ahb_clk.clkr,
+ [GCC_PCIE_1_MSTR_AXI_CLK] = &gcc_pcie_1_mstr_axi_clk.clkr,
+ [GCC_PCIE_1_PHY_AUX_CLK] = &gcc_pcie_1_phy_aux_clk.clkr,
+ [GCC_PCIE_1_PHY_AUX_CLK_SRC] = &gcc_pcie_1_phy_aux_clk_src.clkr,
+ [GCC_PCIE_1_PHY_RCHNG_CLK] = &gcc_pcie_1_phy_rchng_clk.clkr,
+ [GCC_PCIE_1_PHY_RCHNG_CLK_SRC] = &gcc_pcie_1_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_1_PIPE_CLK] = &gcc_pcie_1_pipe_clk.clkr,
+ [GCC_PCIE_1_PIPE_CLK_SRC] = &gcc_pcie_1_pipe_clk_src.clkr,
+ [GCC_PCIE_1_PIPE_DIV_CLK_SRC] = &gcc_pcie_1_pipe_div_clk_src.clkr,
+ [GCC_PCIE_1_PIPEDIV2_CLK] = &gcc_pcie_1_pipediv2_clk.clkr,
+ [GCC_PCIE_1_SLV_AXI_CLK] = &gcc_pcie_1_slv_axi_clk.clkr,
+ [GCC_PCIE_1_SLV_Q2A_AXI_CLK] = &gcc_pcie_1_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_CLKREF_EN] = &gcc_pcie_clkref_en.clkr,
+ [GCC_PCIE_THROTTLE_CFG_CLK] = &gcc_pcie_throttle_cfg_clk.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr,
+ [GCC_QMIP_CAMERA_NRT_AHB_CLK] = &gcc_qmip_camera_nrt_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_RT_AHB_CLK] = &gcc_qmip_camera_rt_ahb_clk.clkr,
+ [GCC_QMIP_DISP_AHB_CLK] = &gcc_qmip_disp_ahb_clk.clkr,
+ [GCC_QMIP_DISP_ROT_AHB_CLK] = &gcc_qmip_disp_rot_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_CVP_AHB_CLK] = &gcc_qmip_video_cvp_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_VCODEC_AHB_CLK] = &gcc_qmip_video_vcodec_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_VCPU_AHB_CLK] = &gcc_qmip_video_vcpu_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_2X_CLK] = &gcc_qupv3_wrap0_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_CLK] = &gcc_qupv3_wrap0_core_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK] = &gcc_qupv3_wrap0_s0_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK_SRC] = &gcc_qupv3_wrap0_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK] = &gcc_qupv3_wrap0_s1_clk.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK_SRC] = &gcc_qupv3_wrap0_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK] = &gcc_qupv3_wrap0_s2_clk.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK_SRC] = &gcc_qupv3_wrap0_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK] = &gcc_qupv3_wrap0_s3_clk.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK_SRC] = &gcc_qupv3_wrap0_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK] = &gcc_qupv3_wrap0_s4_clk.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK_SRC] = &gcc_qupv3_wrap0_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK] = &gcc_qupv3_wrap0_s5_clk.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK_SRC] = &gcc_qupv3_wrap0_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S6_CLK] = &gcc_qupv3_wrap0_s6_clk.clkr,
+ [GCC_QUPV3_WRAP0_S6_CLK_SRC] = &gcc_qupv3_wrap0_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S7_CLK] = &gcc_qupv3_wrap0_s7_clk.clkr,
+ [GCC_QUPV3_WRAP0_S7_CLK_SRC] = &gcc_qupv3_wrap0_s7_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_CORE_2X_CLK] = &gcc_qupv3_wrap1_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP1_CORE_CLK] = &gcc_qupv3_wrap1_core_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK] = &gcc_qupv3_wrap1_s0_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK_SRC] = &gcc_qupv3_wrap1_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK] = &gcc_qupv3_wrap1_s1_clk.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK_SRC] = &gcc_qupv3_wrap1_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK] = &gcc_qupv3_wrap1_s2_clk.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK_SRC] = &gcc_qupv3_wrap1_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK] = &gcc_qupv3_wrap1_s3_clk.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK_SRC] = &gcc_qupv3_wrap1_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK] = &gcc_qupv3_wrap1_s4_clk.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK_SRC] = &gcc_qupv3_wrap1_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK] = &gcc_qupv3_wrap1_s5_clk.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK_SRC] = &gcc_qupv3_wrap1_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S6_CLK] = &gcc_qupv3_wrap1_s6_clk.clkr,
+ [GCC_QUPV3_WRAP1_S6_CLK_SRC] = &gcc_qupv3_wrap1_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S7_CLK] = &gcc_qupv3_wrap1_s7_clk.clkr,
+ [GCC_QUPV3_WRAP1_S7_CLK_SRC] = &gcc_qupv3_wrap1_s7_clk_src.clkr,
+ [GCC_QUPV3_WRAP3_CORE_2X_CLK] = &gcc_qupv3_wrap3_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP3_CORE_CLK] = &gcc_qupv3_wrap3_core_clk.clkr,
+ [GCC_QUPV3_WRAP3_QSPI_CLK] = &gcc_qupv3_wrap3_qspi_clk.clkr,
+ [GCC_QUPV3_WRAP3_S0_CLK] = &gcc_qupv3_wrap3_s0_clk.clkr,
+ [GCC_QUPV3_WRAP3_S0_CLK_SRC] = &gcc_qupv3_wrap3_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP3_S0_DIV_CLK_SRC] = &gcc_qupv3_wrap3_s0_div_clk_src.clkr,
+ [GCC_QUPV3_WRAP_0_M_AHB_CLK] = &gcc_qupv3_wrap_0_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_0_S_AHB_CLK] = &gcc_qupv3_wrap_0_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_M_AHB_CLK] = &gcc_qupv3_wrap_1_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_S_AHB_CLK] = &gcc_qupv3_wrap_1_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_3_M_AHB_CLK] = &gcc_qupv3_wrap_3_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_3_S_AHB_CLK] = &gcc_qupv3_wrap_3_s_ahb_clk.clkr,
+ [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+ [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+ [GCC_SDCC1_APPS_CLK_SRC] = &gcc_sdcc1_apps_clk_src.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK] = &gcc_sdcc1_ice_core_clk.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK_SRC] = &gcc_sdcc1_ice_core_clk_src.clkr,
+ [GCC_SGMI_CLKREF_EN] = &gcc_sgmi_clkref_en.clkr,
+ [GCC_UFS_PHY_AHB_CLK] = &gcc_ufs_phy_ahb_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK] = &gcc_ufs_phy_axi_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK_SRC] = &gcc_ufs_phy_axi_clk_src.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK] = &gcc_ufs_phy_ice_core_clk.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK_SRC] = &gcc_ufs_phy_ice_core_clk_src.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK] = &gcc_ufs_phy_phy_aux_clk.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK_SRC] = &gcc_ufs_phy_phy_aux_clk_src.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK] = &gcc_ufs_phy_rx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC] = &gcc_ufs_phy_rx_symbol_0_clk_src.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_1_CLK] = &gcc_ufs_phy_rx_symbol_1_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC] = &gcc_ufs_phy_rx_symbol_1_clk_src.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK] = &gcc_ufs_phy_tx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC] = &gcc_ufs_phy_tx_symbol_0_clk_src.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK] = &gcc_ufs_phy_unipro_core_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC] = &gcc_ufs_phy_unipro_core_clk_src.clkr,
+ [GCC_USB20_MASTER_CLK] = &gcc_usb20_master_clk.clkr,
+ [GCC_USB20_MASTER_CLK_SRC] = &gcc_usb20_master_clk_src.clkr,
+ [GCC_USB20_MOCK_UTMI_CLK] = &gcc_usb20_mock_utmi_clk.clkr,
+ [GCC_USB20_MOCK_UTMI_CLK_SRC] = &gcc_usb20_mock_utmi_clk_src.clkr,
+ [GCC_USB20_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb20_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB20_SLEEP_CLK] = &gcc_usb20_sleep_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK] = &gcc_usb30_prim_master_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK_SRC] = &gcc_usb30_prim_master_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK] = &gcc_usb30_prim_mock_utmi_clk.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC] = &gcc_usb30_prim_mock_utmi_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB30_PRIM_SLEEP_CLK] = &gcc_usb30_prim_sleep_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK] = &gcc_usb3_prim_phy_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr,
+ [GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK_SRC] = &gcc_usb3_prim_phy_pipe_clk_src.clkr,
+ [GCC_USB_CLKREF_EN] = &gcc_usb_clkref_en.clkr,
+ [GCC_VIDEO_AXI0_CLK] = &gcc_video_axi0_clk.clkr,
+ [GCC_VIDEO_AXI1_CLK] = &gcc_video_axi1_clk.clkr,
+};
+
+static struct gdsc *gcc_qcs8300_gdscs[] = {
+ [GCC_EMAC0_GDSC] = &gcc_emac0_gdsc,
+ [GCC_PCIE_0_GDSC] = &gcc_pcie_0_gdsc,
+ [GCC_PCIE_1_GDSC] = &gcc_pcie_1_gdsc,
+ [GCC_UFS_PHY_GDSC] = &gcc_ufs_phy_gdsc,
+ [GCC_USB20_PRIM_GDSC] = &gcc_usb20_prim_gdsc,
+ [GCC_USB30_PRIM_GDSC] = &gcc_usb30_prim_gdsc,
+};
+
+static const struct qcom_reset_map gcc_qcs8300_resets[] = {
+ [GCC_EMAC0_BCR] = { 0xb6000 },
+ [GCC_PCIE_0_BCR] = { 0xa9000 },
+ [GCC_PCIE_0_LINK_DOWN_BCR] = { 0xbf000 },
+ [GCC_PCIE_0_NOCSR_COM_PHY_BCR] = { 0xbf008 },
+ [GCC_PCIE_0_PHY_BCR] = { 0xa9144 },
+ [GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR] = { 0xbf00c },
+ [GCC_PCIE_1_BCR] = { 0x77000 },
+ [GCC_PCIE_1_LINK_DOWN_BCR] = { 0xae084 },
+ [GCC_PCIE_1_NOCSR_COM_PHY_BCR] = { 0xae090 },
+ [GCC_PCIE_1_PHY_BCR] = { 0xae08c },
+ [GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR] = { 0xae094 },
+ [GCC_SDCC1_BCR] = { 0x20000 },
+ [GCC_UFS_PHY_BCR] = { 0x83000 },
+ [GCC_USB20_PRIM_BCR] = { 0x1c000 },
+ [GCC_USB2_PHY_PRIM_BCR] = { 0x5c01c },
+ [GCC_USB2_PHY_SEC_BCR] = { 0x5c020 },
+ [GCC_USB30_PRIM_BCR] = { 0x1b000 },
+ [GCC_USB3_DP_PHY_PRIM_BCR] = { 0x5c008 },
+ [GCC_USB3_PHY_PRIM_BCR] = { 0x5c000 },
+ [GCC_USB3_PHY_TERT_BCR] = { 0x5c024 },
+ [GCC_USB3_UNIPHY_MP0_BCR] = { 0x5c00c },
+ [GCC_USB3_UNIPHY_MP1_BCR] = { 0x5c010 },
+ [GCC_USB3PHY_PHY_PRIM_BCR] = { 0x5c004 },
+ [GCC_USB3UNIPHY_PHY_MP0_BCR] = { 0x5c014 },
+ [GCC_USB3UNIPHY_PHY_MP1_BCR] = { 0x5c018 },
+ [GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x76000 },
+ [GCC_VIDEO_AXI0_CLK_ARES] = { 0x34014, 2 },
+ [GCC_VIDEO_AXI1_CLK_ARES] = { 0x3401c, 2 },
+ [GCC_VIDEO_BCR] = { 0x34000 },
+};
+
+static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = {
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s6_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s7_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s6_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s7_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap3_s0_clk_src),
+};
+
+static const struct regmap_config gcc_qcs8300_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x472cffc,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_qcs8300_desc = {
+ .config = &gcc_qcs8300_regmap_config,
+ .clks = gcc_qcs8300_clocks,
+ .num_clks = ARRAY_SIZE(gcc_qcs8300_clocks),
+ .resets = gcc_qcs8300_resets,
+ .num_resets = ARRAY_SIZE(gcc_qcs8300_resets),
+ .gdscs = gcc_qcs8300_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_qcs8300_gdscs),
+};
+
+static const struct of_device_id gcc_qcs8300_match_table[] = {
+ { .compatible = "qcom,qcs8300-gcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_qcs8300_match_table);
+
+static int gcc_qcs8300_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ regmap = qcom_cc_map(pdev, &gcc_qcs8300_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks,
+ ARRAY_SIZE(gcc_dfs_clocks));
+ if (ret)
+ return ret;
+
+ /* Keep some clocks always enabled */
+ qcom_branch_set_clk_en(regmap, 0x32004); /* GCC_CAMERA_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x32020); /* GCC_CAMERA_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0x33004); /* GCC_DISP_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x33018); /* GCC_DISP_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0x7d004); /* GCC_GPU_CFG_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x34004); /* GCC_VIDEO_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x34024); /* GCC_VIDEO_XO_CLK */
+
+ /* FORCE_MEM_CORE_ON for ufs phy ice core clocks */
+ qcom_branch_set_force_mem_core(regmap, gcc_ufs_phy_ice_core_clk, true);
+
+ return qcom_cc_really_probe(&pdev->dev, &gcc_qcs8300_desc, regmap);
+}
+
+static struct platform_driver gcc_qcs8300_driver = {
+ .probe = gcc_qcs8300_probe,
+ .driver = {
+ .name = "gcc-qcs8300",
+ .of_match_table = gcc_qcs8300_match_table,
+ },
+};
+
+static int __init gcc_qcs8300_init(void)
+{
+ return platform_driver_register(&gcc_qcs8300_driver);
+}
+subsys_initcall(gcc_qcs8300_init);
+
+static void __exit gcc_qcs8300_exit(void)
+{
+ platform_driver_unregister(&gcc_qcs8300_driver);
+}
+module_exit(gcc_qcs8300_exit);
+
+MODULE_DESCRIPTION("QTI GCC QCS8300 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/gcc-sar2130p.c b/drivers/clk/qcom/gcc-sar2130p.c
new file mode 100644
index 000000000000..475e2cda3618
--- /dev/null
+++ b/drivers/clk/qcom/gcc-sar2130p.c
@@ -0,0 +1,2366 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021-2023, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sar2130p-gcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "clk-regmap-phy-mux.h"
+#include "gdsc.h"
+#include "reset.h"
+
+/* Need to match the order of clocks in DT binding */
+enum {
+ DT_BI_TCXO,
+ DT_SLEEP_CLK,
+
+ DT_PCIE_0_PIPE,
+ DT_PCIE_1_PIPE,
+
+ DT_USB3_PHY_WRAPPER_GCC_USB30_PIPE,
+};
+
+enum {
+ P_BI_TCXO,
+ P_GCC_GPLL0_OUT_EVEN,
+ P_GCC_GPLL0_OUT_MAIN,
+ P_GCC_GPLL1_OUT_EVEN,
+ P_GCC_GPLL1_OUT_MAIN,
+ P_GCC_GPLL4_OUT_MAIN,
+ P_GCC_GPLL5_OUT_MAIN,
+ P_GCC_GPLL7_OUT_MAIN,
+ P_GCC_GPLL9_OUT_EVEN,
+ P_PCIE_0_PIPE_CLK,
+ P_PCIE_1_PIPE_CLK,
+ P_SLEEP_CLK,
+ P_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK,
+};
+
+static struct clk_alpha_pll gcc_gpll0 = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ole_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gcc_gpll0_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gcc_gpll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_gcc_gpll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_gcc_gpll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll0_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll1 = {
+ .offset = 0x1000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ole_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll4 = {
+ .offset = 0x4000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll4",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ole_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll5 = {
+ .offset = 0x5000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll5",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ole_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll7 = {
+ .offset = 0x7000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll7",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ole_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll9 = {
+ .offset = 0x9000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(9),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll9",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ole_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gcc_gpll9_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gcc_gpll9_out_even = {
+ .offset = 0x9000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_gcc_gpll9_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_gcc_gpll9_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll9_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll9.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+ },
+};
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_SLEEP_CLK, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .index = DT_SLEEP_CLK },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL7_OUT_MAIN, 2 },
+ { P_GCC_GPLL5_OUT_MAIN, 3 },
+ { P_GCC_GPLL1_OUT_MAIN, 4 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll7.clkr.hw },
+ { .hw = &gcc_gpll5.clkr.hw },
+ { .hw = &gcc_gpll1.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_SLEEP_CLK, 5 },
+};
+
+static const struct clk_parent_data gcc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map gcc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL9_OUT_EVEN, 2 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_6[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll9_out_even.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_7[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL1_OUT_EVEN, 2 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_7[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll1.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_8[] = {
+ { P_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_8[] = {
+ { .index = DT_USB3_PHY_WRAPPER_GCC_USB30_PIPE },
+ { .index = DT_BI_TCXO },
+};
+
+static struct clk_regmap_phy_mux gcc_pcie_0_pipe_clk_src = {
+ .reg = 0x7b070,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_pipe_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_PCIE_0_PIPE,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_pcie_1_pipe_clk_src = {
+ .reg = 0x9d06c,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_pipe_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_PCIE_1_PIPE,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb3_prim_phy_pipe_clk_src = {
+ .reg = 0x4906c,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_8,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_8),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ddrss_spad_clk_src[] = {
+ F(300000000, P_GCC_GPLL0_OUT_EVEN, 1, 0, 0),
+ F(403000000, P_GCC_GPLL4_OUT_MAIN, 2, 0, 0),
+ F(426400000, P_GCC_GPLL1_OUT_MAIN, 2.5, 0, 0),
+ F(500000000, P_GCC_GPLL7_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ddrss_spad_clk_src = {
+ .cmd_rcgr = 0x70004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_ddrss_spad_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ddrss_spad_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
+ F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_gp1_clk_src = {
+ .cmd_rcgr = 0x74004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp1_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp2_clk_src = {
+ .cmd_rcgr = 0x75004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp2_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp3_clk_src = {
+ .cmd_rcgr = 0x76004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp3_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_aux_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_0_aux_clk_src = {
+ .cmd_rcgr = 0x7b074,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_aux_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_phy_rchng_clk_src[] = {
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_0_phy_rchng_clk_src = {
+ .cmd_rcgr = 0x7b058,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_1_aux_clk_src = {
+ .cmd_rcgr = 0x9d070,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_aux_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_1_phy_rchng_clk_src = {
+ .cmd_rcgr = 0x9d054,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = {
+ F(60000000, P_GCC_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pdm2_clk_src = {
+ .cmd_rcgr = 0x43010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pdm2_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(102400000, P_GCC_GPLL0_OUT_EVEN, 1, 128, 375),
+ F(112000000, P_GCC_GPLL0_OUT_EVEN, 1, 28, 75),
+ F(117964800, P_GCC_GPLL0_OUT_EVEN, 1, 6144, 15625),
+ F(120000000, P_GCC_GPLL0_OUT_MAIN, 5, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
+ .cmd_rcgr = 0x28018,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s1_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
+ .cmd_rcgr = 0x28150,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s1_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
+ .cmd_rcgr = 0x28288,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s1_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
+ .cmd_rcgr = 0x283c0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s1_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s4_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
+ .cmd_rcgr = 0x284f8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s1_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
+ .cmd_rcgr = 0x28630,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s1_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
+ .cmd_rcgr = 0x2e018,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
+ .cmd_rcgr = 0x2e150,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s1_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
+ .cmd_rcgr = 0x2e288,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s1_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
+ .cmd_rcgr = 0x2e3c0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s1_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s4_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
+ .cmd_rcgr = 0x2e4f8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s1_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
+ .cmd_rcgr = 0x2e630,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s1_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s5_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_apps_clk_src[] = {
+ F(144000, P_BI_TCXO, 16, 3, 25),
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(20000000, P_GCC_GPLL0_OUT_EVEN, 5, 1, 3),
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(192000000, P_GCC_GPLL9_OUT_EVEN, 2, 0, 0),
+ F(384000000, P_GCC_GPLL9_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc1_apps_clk_src = {
+ .cmd_rcgr = 0x26018,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_sdcc1_apps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_apps_clk_src",
+ .parent_data = gcc_parent_data_6,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_6),
+ .ops = &clk_rcg2_shared_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_ice_core_clk_src[] = {
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(150000000, P_GCC_GPLL0_OUT_EVEN, 2, 0, 0),
+ F(300000000, P_GCC_GPLL0_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc1_ice_core_clk_src = {
+ .cmd_rcgr = 0x2603c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_7,
+ .freq_tbl = ftbl_gcc_sdcc1_ice_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_ice_core_clk_src",
+ .parent_data = gcc_parent_data_7,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_7),
+ .ops = &clk_rcg2_shared_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_master_clk_src[] = {
+ F(66666667, P_GCC_GPLL0_OUT_EVEN, 4.5, 0, 0),
+ F(133333333, P_GCC_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
+ .cmd_rcgr = 0x4902c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x49044,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = {
+ .cmd_rcgr = 0x49070,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb30_prim_mock_utmi_postdiv_clk_src = {
+ .reg = 0x4905c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_postdiv_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_pcie_1_axi_clk = {
+ .halt_reg = 0x7b094,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x7b094,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(17),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_noc_pcie_1_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_prim_axi_clk = {
+ .halt_reg = 0x4908c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x4908c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x4908c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb3_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x48004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x48004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_pcie_anoc_ahb_clk = {
+ .halt_reg = 0x20034,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x20034,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_pcie_anoc_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_prim_axi_clk = {
+ .halt_reg = 0x49088,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x49088,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x49088,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_usb3_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ddrss_gpu_axi_clk = {
+ .halt_reg = 0x81154,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x81154,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x81154,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ddrss_gpu_axi_clk",
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ddrss_pcie_sf_clk = {
+ .halt_reg = 0x9d098,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x9d098,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(19),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ddrss_pcie_sf_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ddrss_spad_clk = {
+ .halt_reg = 0x70000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x70000,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x70000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ddrss_spad_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ddrss_spad_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_hf_axi_clk = {
+ .halt_reg = 0x37008,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x37008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x37008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_disp_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x74000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x74000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x75000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x75000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x76000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x76000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_gpll0_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_gpll0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0_out_even.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_memnoc_gfx_clk = {
+ .halt_reg = 0x9b010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9b010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x9b010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_memnoc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_snoc_dvm_gfx_clk = {
+ .halt_reg = 0x9b018,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x9b018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_snoc_dvm_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_iris_ss_hf_axi1_clk = {
+ .halt_reg = 0x42030,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x42030,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x42030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_iris_ss_hf_axi1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_iris_ss_spd_axi1_clk = {
+ .halt_reg = 0x70020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x70020,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x70020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_iris_ss_spd_axi1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ddrss_spad_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_aux_clk = {
+ .halt_reg = 0x7b03c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
+ .halt_reg = 0x7b038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7b038,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(2),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
+ .halt_reg = 0x7b02c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x7b02c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_phy_rchng_clk = {
+ .halt_reg = 0x7b054,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_pipe_clk = {
+ .halt_reg = 0x7b048,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_axi_clk = {
+ .halt_reg = 0x7b020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7b020,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_q2a_axi_clk = {
+ .halt_reg = 0x7b01c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_aux_clk = {
+ .halt_reg = 0x9d038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(29),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_1_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_cfg_ahb_clk = {
+ .halt_reg = 0x9d034,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9d034,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(28),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_mstr_axi_clk = {
+ .halt_reg = 0x9d028,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x9d028,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_phy_rchng_clk = {
+ .halt_reg = 0x9d050,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(23),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_1_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_pipe_clk = {
+ .halt_reg = 0x9d044,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(30),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_1_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_slv_axi_clk = {
+ .halt_reg = 0x9d01c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9d01c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(26),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_slv_q2a_axi_clk = {
+ .halt_reg = 0x9d018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(25),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x4300c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pdm2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x43004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x43004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x43004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_xo4_clk = {
+ .halt_reg = 0x43008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x43008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm_xo4_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_gpu_ahb_clk = {
+ .halt_reg = 0x9b008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9b008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x9b008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_gpu_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_pcie_ahb_clk = {
+ .halt_reg = 0x7b018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7b018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(11),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_pcie_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_cv_cpu_ahb_clk = {
+ .halt_reg = 0x42014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x42014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x42014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_cv_cpu_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_cvp_ahb_clk = {
+ .halt_reg = 0x42008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x42008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x42008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_cvp_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_lsr_ahb_clk = {
+ .halt_reg = 0x4204c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x4204c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x4204c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_lsr_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_v_cpu_ahb_clk = {
+ .halt_reg = 0x42010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x42010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x42010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_v_cpu_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_vcodec_ahb_clk = {
+ .halt_reg = 0x4200c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x4200c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x4200c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_vcodec_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_2x_clk = {
+ .halt_reg = 0x33034,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(18),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_clk = {
+ .halt_reg = 0x33024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(19),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s0_clk = {
+ .halt_reg = 0x2800c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s1_clk = {
+ .halt_reg = 0x28144,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(23),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s2_clk = {
+ .halt_reg = 0x2827c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(24),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s3_clk = {
+ .halt_reg = 0x283b4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(25),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s4_clk = {
+ .halt_reg = 0x284ec,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(26),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s5_clk = {
+ .halt_reg = 0x28624,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s5_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_2x_clk = {
+ .halt_reg = 0x3317c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_clk = {
+ .halt_reg = 0x3316c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s0_clk = {
+ .halt_reg = 0x2e00c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s1_clk = {
+ .halt_reg = 0x2e144,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s2_clk = {
+ .halt_reg = 0x2e27c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(6),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s3_clk = {
+ .halt_reg = 0x2e3b4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s4_clk = {
+ .halt_reg = 0x2e4ec,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(8),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s5_clk = {
+ .halt_reg = 0x2e624,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(9),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s5_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_m_ahb_clk = {
+ .halt_reg = 0x28004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x28004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_0_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_s_ahb_clk = {
+ .halt_reg = 0x28008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x28008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(21),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_0_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_m_ahb_clk = {
+ .halt_reg = 0x2e004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2e004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(2),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_1_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_s_ahb_clk = {
+ .halt_reg = 0x2e008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2e008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_1_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ahb_clk = {
+ .halt_reg = 0x26010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x26010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_apps_clk = {
+ .halt_reg = 0x26004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x26004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc1_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ice_core_clk = {
+ .halt_reg = 0x26030,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x26030,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_ice_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc1_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_master_clk = {
+ .halt_reg = 0x49018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x49018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_mock_utmi_clk = {
+ .halt_reg = 0x49028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x49028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_sleep_clk = {
+ .halt_reg = 0x49024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x49024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_aux_clk = {
+ .halt_reg = 0x49060,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x49060,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = {
+ .halt_reg = 0x49064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x49064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_com_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_pipe_clk = {
+ .halt_reg = 0x49068,
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0x49068,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x49068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi0_clk = {
+ .halt_reg = 0x42018,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x42018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x42018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_video_axi0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi1_clk = {
+ .halt_reg = 0x42024,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x42024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x42024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_video_axi1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc hlos1_vote_mm_snoc_mmu_tbu_hf0_gdsc = {
+ .gdscr = 0x8d204,
+ .pd = {
+ .name = "hlos1_vote_mm_snoc_mmu_tbu_hf0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_mm_snoc_mmu_tbu_sf0_gdsc = {
+ .gdscr = 0x8d054,
+ .pd = {
+ .name = "hlos1_vote_mm_snoc_mmu_tbu_sf0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_turing_mmu_tbu0_gdsc = {
+ .gdscr = 0x8d05c,
+ .pd = {
+ .name = "hlos1_vote_turing_mmu_tbu0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_turing_mmu_tbu1_gdsc = {
+ .gdscr = 0x8d060,
+ .pd = {
+ .name = "hlos1_vote_turing_mmu_tbu1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc pcie_0_gdsc = {
+ .gdscr = 0x7b004,
+ .collapse_ctrl = 0x62200,
+ .collapse_mask = BIT(0),
+ .pd = {
+ .name = "pcie_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc pcie_0_phy_gdsc = {
+ .gdscr = 0x7c000,
+ .collapse_ctrl = 0x62200,
+ .collapse_mask = BIT(3),
+ .pd = {
+ .name = "pcie_0_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc pcie_1_gdsc = {
+ .gdscr = 0x9d004,
+ .collapse_ctrl = 0x62200,
+ .collapse_mask = BIT(1),
+ .pd = {
+ .name = "pcie_1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc pcie_1_phy_gdsc = {
+ .gdscr = 0x9e000,
+ .collapse_ctrl = 0x62200,
+ .collapse_mask = BIT(4),
+ .pd = {
+ .name = "pcie_1_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc usb30_prim_gdsc = {
+ .gdscr = 0x49004,
+ .pd = {
+ .name = "usb30_prim_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = RETAIN_FF_ENABLE,
+};
+
+static struct gdsc usb3_phy_gdsc = {
+ .gdscr = 0x60018,
+ .pd = {
+ .name = "usb3_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = RETAIN_FF_ENABLE,
+};
+
+static struct clk_regmap *gcc_sar2130p_clocks[] = {
+ [GCC_AGGRE_NOC_PCIE_1_AXI_CLK] = &gcc_aggre_noc_pcie_1_axi_clk.clkr,
+ [GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CFG_NOC_PCIE_ANOC_AHB_CLK] = &gcc_cfg_noc_pcie_anoc_ahb_clk.clkr,
+ [GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr,
+ [GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr,
+ [GCC_DDRSS_PCIE_SF_CLK] = &gcc_ddrss_pcie_sf_clk.clkr,
+ [GCC_DDRSS_SPAD_CLK] = &gcc_ddrss_spad_clk.clkr,
+ [GCC_DDRSS_SPAD_CLK_SRC] = &gcc_ddrss_spad_clk_src.clkr,
+ [GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
+ [GCC_GPLL0] = &gcc_gpll0.clkr,
+ [GCC_GPLL0_OUT_EVEN] = &gcc_gpll0_out_even.clkr,
+ [GCC_GPLL1] = &gcc_gpll1.clkr,
+ [GCC_GPLL4] = &gcc_gpll4.clkr,
+ [GCC_GPLL5] = &gcc_gpll5.clkr,
+ [GCC_GPLL7] = &gcc_gpll7.clkr,
+ [GCC_GPLL9] = &gcc_gpll9.clkr,
+ [GCC_GPLL9_OUT_EVEN] = &gcc_gpll9_out_even.clkr,
+ [GCC_GPU_GPLL0_CLK_SRC] = &gcc_gpu_gpll0_clk_src.clkr,
+ [GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr,
+ [GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr,
+ [GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr,
+ [GCC_IRIS_SS_HF_AXI1_CLK] = &gcc_iris_ss_hf_axi1_clk.clkr,
+ [GCC_IRIS_SS_SPD_AXI1_CLK] = &gcc_iris_ss_spd_axi1_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK] = &gcc_pcie_0_aux_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK_SRC] = &gcc_pcie_0_aux_clk_src.clkr,
+ [GCC_PCIE_0_CFG_AHB_CLK] = &gcc_pcie_0_cfg_ahb_clk.clkr,
+ [GCC_PCIE_0_MSTR_AXI_CLK] = &gcc_pcie_0_mstr_axi_clk.clkr,
+ [GCC_PCIE_0_PHY_RCHNG_CLK] = &gcc_pcie_0_phy_rchng_clk.clkr,
+ [GCC_PCIE_0_PHY_RCHNG_CLK_SRC] = &gcc_pcie_0_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_0_PIPE_CLK] = &gcc_pcie_0_pipe_clk.clkr,
+ [GCC_PCIE_0_PIPE_CLK_SRC] = &gcc_pcie_0_pipe_clk_src.clkr,
+ [GCC_PCIE_0_SLV_AXI_CLK] = &gcc_pcie_0_slv_axi_clk.clkr,
+ [GCC_PCIE_0_SLV_Q2A_AXI_CLK] = &gcc_pcie_0_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_1_AUX_CLK] = &gcc_pcie_1_aux_clk.clkr,
+ [GCC_PCIE_1_AUX_CLK_SRC] = &gcc_pcie_1_aux_clk_src.clkr,
+ [GCC_PCIE_1_CFG_AHB_CLK] = &gcc_pcie_1_cfg_ahb_clk.clkr,
+ [GCC_PCIE_1_MSTR_AXI_CLK] = &gcc_pcie_1_mstr_axi_clk.clkr,
+ [GCC_PCIE_1_PHY_RCHNG_CLK] = &gcc_pcie_1_phy_rchng_clk.clkr,
+ [GCC_PCIE_1_PHY_RCHNG_CLK_SRC] = &gcc_pcie_1_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_1_PIPE_CLK] = &gcc_pcie_1_pipe_clk.clkr,
+ [GCC_PCIE_1_PIPE_CLK_SRC] = &gcc_pcie_1_pipe_clk_src.clkr,
+ [GCC_PCIE_1_SLV_AXI_CLK] = &gcc_pcie_1_slv_axi_clk.clkr,
+ [GCC_PCIE_1_SLV_Q2A_AXI_CLK] = &gcc_pcie_1_slv_q2a_axi_clk.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr,
+ [GCC_QMIP_GPU_AHB_CLK] = &gcc_qmip_gpu_ahb_clk.clkr,
+ [GCC_QMIP_PCIE_AHB_CLK] = &gcc_qmip_pcie_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_CV_CPU_AHB_CLK] = &gcc_qmip_video_cv_cpu_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_CVP_AHB_CLK] = &gcc_qmip_video_cvp_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_LSR_AHB_CLK] = &gcc_qmip_video_lsr_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_V_CPU_AHB_CLK] = &gcc_qmip_video_v_cpu_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_VCODEC_AHB_CLK] = &gcc_qmip_video_vcodec_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_2X_CLK] = &gcc_qupv3_wrap0_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_CLK] = &gcc_qupv3_wrap0_core_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK] = &gcc_qupv3_wrap0_s0_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK_SRC] = &gcc_qupv3_wrap0_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK] = &gcc_qupv3_wrap0_s1_clk.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK_SRC] = &gcc_qupv3_wrap0_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK] = &gcc_qupv3_wrap0_s2_clk.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK_SRC] = &gcc_qupv3_wrap0_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK] = &gcc_qupv3_wrap0_s3_clk.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK_SRC] = &gcc_qupv3_wrap0_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK] = &gcc_qupv3_wrap0_s4_clk.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK_SRC] = &gcc_qupv3_wrap0_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK] = &gcc_qupv3_wrap0_s5_clk.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK_SRC] = &gcc_qupv3_wrap0_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_CORE_2X_CLK] = &gcc_qupv3_wrap1_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP1_CORE_CLK] = &gcc_qupv3_wrap1_core_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK] = &gcc_qupv3_wrap1_s0_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK_SRC] = &gcc_qupv3_wrap1_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK] = &gcc_qupv3_wrap1_s1_clk.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK_SRC] = &gcc_qupv3_wrap1_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK] = &gcc_qupv3_wrap1_s2_clk.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK_SRC] = &gcc_qupv3_wrap1_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK] = &gcc_qupv3_wrap1_s3_clk.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK_SRC] = &gcc_qupv3_wrap1_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK] = &gcc_qupv3_wrap1_s4_clk.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK_SRC] = &gcc_qupv3_wrap1_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK] = &gcc_qupv3_wrap1_s5_clk.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK_SRC] = &gcc_qupv3_wrap1_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP_0_M_AHB_CLK] = &gcc_qupv3_wrap_0_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_0_S_AHB_CLK] = &gcc_qupv3_wrap_0_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_M_AHB_CLK] = &gcc_qupv3_wrap_1_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_S_AHB_CLK] = &gcc_qupv3_wrap_1_s_ahb_clk.clkr,
+ [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+ [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+ [GCC_SDCC1_APPS_CLK_SRC] = &gcc_sdcc1_apps_clk_src.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK] = &gcc_sdcc1_ice_core_clk.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK_SRC] = &gcc_sdcc1_ice_core_clk_src.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK] = &gcc_usb30_prim_master_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK_SRC] = &gcc_usb30_prim_master_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK] = &gcc_usb30_prim_mock_utmi_clk.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC] = &gcc_usb30_prim_mock_utmi_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB30_PRIM_SLEEP_CLK] = &gcc_usb30_prim_sleep_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK] = &gcc_usb3_prim_phy_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr,
+ [GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK_SRC] = &gcc_usb3_prim_phy_pipe_clk_src.clkr,
+ [GCC_VIDEO_AXI0_CLK] = &gcc_video_axi0_clk.clkr,
+ [GCC_VIDEO_AXI1_CLK] = &gcc_video_axi1_clk.clkr,
+};
+
+static const struct qcom_reset_map gcc_sar2130p_resets[] = {
+ [GCC_DISPLAY_BCR] = { 0x37000 },
+ [GCC_GPU_BCR] = { 0x9b000 },
+ [GCC_PCIE_0_BCR] = { 0x7b000 },
+ [GCC_PCIE_0_LINK_DOWN_BCR] = { 0x7c014 },
+ [GCC_PCIE_0_NOCSR_COM_PHY_BCR] = { 0x7c020 },
+ [GCC_PCIE_0_PHY_BCR] = { 0x7c01c },
+ [GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR] = { 0x7c028 },
+ [GCC_PCIE_1_BCR] = { 0x9d000 },
+ [GCC_PCIE_1_LINK_DOWN_BCR] = { 0x9e014 },
+ [GCC_PCIE_1_NOCSR_COM_PHY_BCR] = { 0x9e020 },
+ [GCC_PCIE_1_PHY_BCR] = { 0x9e01c },
+ [GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR] = { 0x9e024 },
+ [GCC_PCIE_PHY_BCR] = { 0x7f000 },
+ [GCC_PCIE_PHY_CFG_AHB_BCR] = { 0x7f00c },
+ [GCC_PCIE_PHY_COM_BCR] = { 0x7f010 },
+ [GCC_PDM_BCR] = { 0x43000 },
+ [GCC_QUPV3_WRAPPER_0_BCR] = { 0x28000 },
+ [GCC_QUPV3_WRAPPER_1_BCR] = { 0x2e000 },
+ [GCC_QUSB2PHY_PRIM_BCR] = { 0x22000 },
+ [GCC_QUSB2PHY_SEC_BCR] = { 0x22004 },
+ [GCC_SDCC1_BCR] = { 0x26000 },
+ [GCC_USB30_PRIM_BCR] = { 0x49000 },
+ [GCC_USB3_DP_PHY_PRIM_BCR] = { 0x60008 },
+ [GCC_USB3_DP_PHY_SEC_BCR] = { 0x60014 },
+ [GCC_USB3_PHY_PRIM_BCR] = { 0x60000 },
+ [GCC_USB3_PHY_SEC_BCR] = { 0x6000c },
+ [GCC_USB3PHY_PHY_PRIM_BCR] = { 0x60004 },
+ [GCC_USB3PHY_PHY_SEC_BCR] = { 0x60010 },
+ [GCC_VIDEO_AXI0_CLK_ARES] = { .reg = 0x42018, .bit = 2, .udelay = 1000 },
+ [GCC_VIDEO_AXI1_CLK_ARES] = { .reg = 0x42024, .bit = 2, .udelay = 1000 },
+ [GCC_VIDEO_BCR] = { 0x42000 },
+ [GCC_IRIS_SS_HF_AXI_CLK_ARES] = { .reg = 0x42030, .bit = 2 },
+ [GCC_IRIS_SS_SPD_AXI_CLK_ARES] = { .reg = 0x70020, .bit = 2 },
+ [GCC_DDRSS_SPAD_CLK_ARES] = { .reg = 0x70000, .bit = 2 },
+};
+
+static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = {
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s5_clk_src),
+};
+
+static struct gdsc *gcc_sar2130p_gdscs[] = {
+ [HLOS1_VOTE_MM_SNOC_MMU_TBU_HF0_GDSC] = &hlos1_vote_mm_snoc_mmu_tbu_hf0_gdsc,
+ [HLOS1_VOTE_MM_SNOC_MMU_TBU_SF0_GDSC] = &hlos1_vote_mm_snoc_mmu_tbu_sf0_gdsc,
+ [HLOS1_VOTE_TURING_MMU_TBU0_GDSC] = &hlos1_vote_turing_mmu_tbu0_gdsc,
+ [HLOS1_VOTE_TURING_MMU_TBU1_GDSC] = &hlos1_vote_turing_mmu_tbu1_gdsc,
+ [PCIE_0_GDSC] = &pcie_0_gdsc,
+ [PCIE_0_PHY_GDSC] = &pcie_0_phy_gdsc,
+ [PCIE_1_GDSC] = &pcie_1_gdsc,
+ [PCIE_1_PHY_GDSC] = &pcie_1_phy_gdsc,
+ [USB30_PRIM_GDSC] = &usb30_prim_gdsc,
+ [USB3_PHY_GDSC] = &usb3_phy_gdsc,
+};
+
+static const struct regmap_config gcc_sar2130p_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x1f1030,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_sar2130p_desc = {
+ .config = &gcc_sar2130p_regmap_config,
+ .clks = gcc_sar2130p_clocks,
+ .num_clks = ARRAY_SIZE(gcc_sar2130p_clocks),
+ .resets = gcc_sar2130p_resets,
+ .num_resets = ARRAY_SIZE(gcc_sar2130p_resets),
+ .gdscs = gcc_sar2130p_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_sar2130p_gdscs),
+};
+
+static const struct of_device_id gcc_sar2130p_match_table[] = {
+ { .compatible = "qcom,sar2130p-gcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_sar2130p_match_table);
+
+static int gcc_sar2130p_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ regmap = qcom_cc_map(pdev, &gcc_sar2130p_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks,
+ ARRAY_SIZE(gcc_dfs_clocks));
+ if (ret)
+ return ret;
+
+ /* Keep some clocks always-on */
+ qcom_branch_set_clk_en(regmap, 0x37004); /* GCC_DISP_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x42004); /* GCC_VIDEO_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x42028); /* GCC_VIDEO_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0x9b004); /* GCC_GPU_CFG_AHB_CLK */
+
+ /* Clear GDSC_SLEEP_ENA_VOTE to stop votes being auto-removed in sleep. */
+ regmap_write(regmap, 0x62204, 0x0);
+
+ return qcom_cc_really_probe(&pdev->dev, &gcc_sar2130p_desc, regmap);
+}
+
+static struct platform_driver gcc_sar2130p_driver = {
+ .probe = gcc_sar2130p_probe,
+ .driver = {
+ .name = "gcc-sar2130p",
+ .of_match_table = gcc_sar2130p_match_table,
+ },
+};
+
+static int __init gcc_sar2130p_init(void)
+{
+ return platform_driver_register(&gcc_sar2130p_driver);
+}
+subsys_initcall(gcc_sar2130p_init);
+
+static void __exit gcc_sar2130p_exit(void)
+{
+ platform_driver_unregister(&gcc_sar2130p_driver);
+}
+module_exit(gcc_sar2130p_exit);
+
+MODULE_DESCRIPTION("QTI GCC SAR2130P Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/gcc-sc8280xp.c b/drivers/clk/qcom/gcc-sc8280xp.c
index f27d0003f427..b683795475e3 100644
--- a/drivers/clk/qcom/gcc-sc8280xp.c
+++ b/drivers/clk/qcom/gcc-sc8280xp.c
@@ -6775,10 +6775,6 @@ static struct gdsc pcie_1_tunnel_gdsc = {
.flags = VOTABLE | RETAIN_FF_ENABLE,
};
-/*
- * The Qualcomm PCIe driver does not yet implement suspend so to keep the
- * PCIe power domains always-on for now.
- */
static struct gdsc pcie_2a_gdsc = {
.gdscr = 0x9d004,
.collapse_ctrl = 0x52128,
diff --git a/drivers/clk/qcom/gcc-sdm660.c b/drivers/clk/qcom/gcc-sdm660.c
index df79298a1a25..20253a06a583 100644
--- a/drivers/clk/qcom/gcc-sdm660.c
+++ b/drivers/clk/qcom/gcc-sdm660.c
@@ -2247,6 +2247,45 @@ static struct clk_branch gcc_usb_phy_cfg_ahb2phy_clk = {
},
};
+static struct clk_branch hlos1_vote_lpass_adsp_smmu_clk = {
+ .halt_reg = 0x7d014,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x7d014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "hlos1_vote_lpass_adsp_smmu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch hlos1_vote_turing_adsp_smmu_clk = {
+ .halt_reg = 0x7d048,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x7d048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "hlos1_vote_turing_adsp_smmu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch hlos2_vote_turing_adsp_smmu_clk = {
+ .halt_reg = 0x7e048,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x7e048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "hlos2_vote_turing_adsp_smmu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct gdsc ufs_gdsc = {
.gdscr = 0x75004,
.gds_hw_ctrl = 0x0,
@@ -2277,6 +2316,33 @@ static struct gdsc pcie_0_gdsc = {
.flags = VOTABLE,
};
+static struct gdsc hlos1_vote_turing_adsp_gdsc = {
+ .gdscr = 0x7d04c,
+ .pd = {
+ .name = "hlos1_vote_turing_adsp_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos2_vote_turing_adsp_gdsc = {
+ .gdscr = 0x7e04c,
+ .pd = {
+ .name = "hlos2_vote_turing_adsp_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_lpass_adsp_gdsc = {
+ .gdscr = 0x7d034,
+ .pd = {
+ .name = "hlos1_vote_lpass_adsp_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
static struct clk_hw *gcc_sdm660_hws[] = {
&xo.hw,
&gpll0_early_div.hw,
@@ -2409,17 +2475,25 @@ static struct clk_regmap *gcc_sdm660_clocks[] = {
[USB30_MASTER_CLK_SRC] = &usb30_master_clk_src.clkr,
[USB30_MOCK_UTMI_CLK_SRC] = &usb30_mock_utmi_clk_src.clkr,
[USB3_PHY_AUX_CLK_SRC] = &usb3_phy_aux_clk_src.clkr,
+ [GCC_HLOS1_VOTE_LPASS_ADSP_SMMU_CLK] = &hlos1_vote_lpass_adsp_smmu_clk.clkr,
+ [GCC_HLOS1_VOTE_TURING_ADSP_SMMU_CLK] = &hlos1_vote_turing_adsp_smmu_clk.clkr,
+ [GCC_HLOS2_VOTE_TURING_ADSP_SMMU_CLK] = &hlos2_vote_turing_adsp_smmu_clk.clkr,
};
static struct gdsc *gcc_sdm660_gdscs[] = {
[UFS_GDSC] = &ufs_gdsc,
[USB_30_GDSC] = &usb_30_gdsc,
[PCIE_0_GDSC] = &pcie_0_gdsc,
+ [HLOS1_VOTE_TURING_ADSP_GDSC] = &hlos1_vote_turing_adsp_gdsc,
+ [HLOS2_VOTE_TURING_ADSP_GDSC] = &hlos2_vote_turing_adsp_gdsc,
+ [HLOS1_VOTE_LPASS_ADSP_GDSC] = &hlos1_vote_lpass_adsp_gdsc,
};
static const struct qcom_reset_map gcc_sdm660_resets[] = {
[GCC_QUSB2PHY_PRIM_BCR] = { 0x12000 },
[GCC_QUSB2PHY_SEC_BCR] = { 0x12004 },
+ [GCC_SDCC2_BCR] = { 0x14000 },
+ [GCC_SDCC1_BCR] = { 0x16000 },
[GCC_UFS_BCR] = { 0x75000 },
[GCC_USB3_DP_PHY_BCR] = { 0x50028 },
[GCC_USB3_PHY_BCR] = { 0x50020 },
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
index dc3aa7014c3e..6d0f9cede5cf 100644
--- a/drivers/clk/qcom/gcc-sdm845.c
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -284,11 +284,6 @@ static struct clk_rcg2 gcc_sdm670_cpuss_rbcpr_clk_src = {
};
static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
- F(19200000, P_BI_TCXO, 1, 0, 0),
- F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
- F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
- F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
- F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
{ }
};
@@ -302,7 +297,7 @@ static struct clk_rcg2 gcc_gp1_clk_src = {
.name = "gcc_gp1_clk_src",
.parent_data = gcc_parent_data_1,
.num_parents = ARRAY_SIZE(gcc_parent_data_1),
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_gp_ops,
},
};
@@ -316,7 +311,7 @@ static struct clk_rcg2 gcc_gp2_clk_src = {
.name = "gcc_gp2_clk_src",
.parent_data = gcc_parent_data_1,
.num_parents = ARRAY_SIZE(gcc_parent_data_1),
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_gp_ops,
},
};
@@ -330,7 +325,7 @@ static struct clk_rcg2 gcc_gp3_clk_src = {
.name = "gcc_gp3_clk_src",
.parent_data = gcc_parent_data_1,
.num_parents = ARRAY_SIZE(gcc_parent_data_1),
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_gp_ops,
},
};
@@ -454,7 +449,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = {
.name = "gcc_qupv3_wrap0_s0_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
@@ -470,7 +465,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = {
.name = "gcc_qupv3_wrap0_s1_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
@@ -486,7 +481,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s2_clk_src_init = {
.name = "gcc_qupv3_wrap0_s2_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
@@ -502,7 +497,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = {
.name = "gcc_qupv3_wrap0_s3_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
@@ -518,7 +513,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = {
.name = "gcc_qupv3_wrap0_s4_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
@@ -534,7 +529,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = {
.name = "gcc_qupv3_wrap0_s5_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
@@ -550,7 +545,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s6_clk_src_init = {
.name = "gcc_qupv3_wrap0_s6_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s6_clk_src = {
@@ -566,7 +561,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s7_clk_src_init = {
.name = "gcc_qupv3_wrap0_s7_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s7_clk_src = {
@@ -582,7 +577,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = {
.name = "gcc_qupv3_wrap1_s0_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
@@ -598,7 +593,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = {
.name = "gcc_qupv3_wrap1_s1_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
@@ -614,7 +609,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s2_clk_src_init = {
.name = "gcc_qupv3_wrap1_s2_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
@@ -630,7 +625,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = {
.name = "gcc_qupv3_wrap1_s3_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
@@ -646,7 +641,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = {
.name = "gcc_qupv3_wrap1_s4_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
@@ -662,7 +657,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = {
.name = "gcc_qupv3_wrap1_s5_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
@@ -678,7 +673,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s6_clk_src_init = {
.name = "gcc_qupv3_wrap1_s6_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s6_clk_src = {
@@ -694,7 +689,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s7_clk_src_init = {
.name = "gcc_qupv3_wrap1_s7_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s7_clk_src = {
diff --git a/drivers/clk/qcom/gcc-sm6350.c b/drivers/clk/qcom/gcc-sm6350.c
index a811fad2aa27..a4d6dff9d0f7 100644
--- a/drivers/clk/qcom/gcc-sm6350.c
+++ b/drivers/clk/qcom/gcc-sm6350.c
@@ -182,6 +182,14 @@ static const struct clk_parent_data gcc_parent_data_2_ao[] = {
{ .hw = &gpll0_out_odd.clkr.hw },
};
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data gcc_parent_data_3[] = {
+ { .fw_name = "bi_tcxo" },
+};
+
static const struct parent_map gcc_parent_map_4[] = {
{ P_BI_TCXO, 0 },
{ P_GPLL0_OUT_MAIN, 1 },
@@ -701,13 +709,12 @@ static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = {
.cmd_rcgr = 0x3a0b0,
.mnd_width = 0,
.hid_width = 5,
+ .parent_map = gcc_parent_map_3,
.freq_tbl = ftbl_gcc_ufs_phy_phy_aux_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_phy_phy_aux_clk_src",
- .parent_data = &(const struct clk_parent_data){
- .fw_name = "bi_tcxo",
- },
- .num_parents = 1,
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
.ops = &clk_rcg2_ops,
},
};
@@ -764,13 +771,12 @@ static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
.cmd_rcgr = 0x1a034,
.mnd_width = 0,
.hid_width = 5,
+ .parent_map = gcc_parent_map_3,
.freq_tbl = ftbl_gcc_usb30_prim_mock_utmi_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_usb30_prim_mock_utmi_clk_src",
- .parent_data = &(const struct clk_parent_data){
- .fw_name = "bi_tcxo",
- },
- .num_parents = 1,
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
.ops = &clk_rcg2_ops,
},
};
@@ -2314,6 +2320,9 @@ static struct clk_branch gcc_video_xo_clk = {
static struct gdsc usb30_prim_gdsc = {
.gdscr = 0x1a004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
.pd = {
.name = "usb30_prim_gdsc",
},
@@ -2322,6 +2331,9 @@ static struct gdsc usb30_prim_gdsc = {
static struct gdsc ufs_phy_gdsc = {
.gdscr = 0x3a004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
.pd = {
.name = "ufs_phy_gdsc",
},
diff --git a/drivers/clk/qcom/gcc-sm8150.c b/drivers/clk/qcom/gcc-sm8150.c
index cefceb780889..a93d1f412a7b 100644
--- a/drivers/clk/qcom/gcc-sm8150.c
+++ b/drivers/clk/qcom/gcc-sm8150.c
@@ -1245,7 +1245,7 @@ static struct clk_branch gcc_boot_rom_ahb_clk = {
};
/*
- * Clock ON depends on external parent 'config noc', so cant poll
+ * Clock ON depends on external parent 'config noc', so can't poll
* delay and also mark as crtitical for camss boot
*/
static struct clk_branch gcc_camera_ahb_clk = {
@@ -1398,7 +1398,7 @@ static struct clk_branch gcc_ddrss_gpu_axi_clk = {
};
/*
- * Clock ON depends on external parent 'config noc', so cant poll
+ * Clock ON depends on external parent 'config noc', so can't poll
* delay and also mark as crtitical for disp boot
*/
static struct clk_branch gcc_disp_ahb_clk = {
@@ -3339,7 +3339,7 @@ static struct clk_branch gcc_usb3_sec_phy_pipe_clk = {
};
/*
- * Clock ON depends on external parent 'config noc', so cant poll
+ * Clock ON depends on external parent 'config noc', so can't poll
* delay and also mark as crtitical for video boot
*/
static struct clk_branch gcc_video_ahb_clk = {
diff --git a/drivers/clk/qcom/gcc-sm8450.c b/drivers/clk/qcom/gcc-sm8450.c
index c445c271678a..65d7d52bce03 100644
--- a/drivers/clk/qcom/gcc-sm8450.c
+++ b/drivers/clk/qcom/gcc-sm8450.c
@@ -26,6 +26,8 @@ enum {
P_BI_TCXO,
P_GCC_GPLL0_OUT_EVEN,
P_GCC_GPLL0_OUT_MAIN,
+ P_SM8475_GCC_GPLL2_OUT_EVEN,
+ P_SM8475_GCC_GPLL3_OUT_EVEN,
P_GCC_GPLL4_OUT_MAIN,
P_GCC_GPLL9_OUT_MAIN,
P_PCIE_1_PHY_AUX_CLK,
@@ -36,6 +38,15 @@ enum {
P_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK,
};
+static struct clk_init_data sm8475_gcc_gpll0_init = {
+ .name = "gcc_gpll0",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ole_ops,
+};
+
static struct clk_alpha_pll gcc_gpll0 = {
.offset = 0x0,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
@@ -53,6 +64,15 @@ static struct clk_alpha_pll gcc_gpll0 = {
},
};
+static struct clk_init_data sm8475_gcc_gpll0_out_even_init = {
+ .name = "gcc_gpll0_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+};
+
static const struct clk_div_table post_div_table_gcc_gpll0_out_even[] = {
{ 0x1, 2 },
{ }
@@ -75,6 +95,49 @@ static struct clk_alpha_pll_postdiv gcc_gpll0_out_even = {
},
};
+static struct clk_alpha_pll sm8475_gcc_gpll2 = {
+ .offset = 0x2000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpll2",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ole_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll sm8475_gcc_gpll3 = {
+ .offset = 0x3000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpll3",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ole_ops,
+ },
+ },
+};
+
+static struct clk_init_data sm8475_gcc_gpll4_init = {
+ .name = "gcc_gpll4",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ole_ops,
+};
+
static struct clk_alpha_pll gcc_gpll4 = {
.offset = 0x4000,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
@@ -92,6 +155,15 @@ static struct clk_alpha_pll gcc_gpll4 = {
},
};
+static struct clk_init_data sm8475_gcc_gpll9_init = {
+ .name = "gcc_gpll9",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ole_ops,
+};
+
static struct clk_alpha_pll gcc_gpll9 = {
.offset = 0x9000,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
@@ -153,6 +225,22 @@ static const struct clk_parent_data gcc_parent_data_3[] = {
{ .fw_name = "bi_tcxo" },
};
+static const struct parent_map sm8475_gcc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_SM8475_GCC_GPLL2_OUT_EVEN, 2 },
+ { P_SM8475_GCC_GPLL3_OUT_EVEN, 3 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data sm8475_gcc_parent_data_3[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &sm8475_gcc_gpll2.clkr.hw },
+ { .hw = &sm8475_gcc_gpll3.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
static const struct parent_map gcc_parent_map_5[] = {
{ P_PCIE_1_PHY_AUX_CLK, 0 },
{ P_BI_TCXO, 2 },
@@ -915,6 +1003,16 @@ static struct clk_rcg2 gcc_qupv3_wrap2_s6_clk_src = {
.clkr.hw.init = &gcc_qupv3_wrap2_s6_clk_src_init,
};
+static const struct freq_tbl sm8475_ftbl_gcc_sdcc2_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(37000000, P_GCC_GPLL9_OUT_MAIN, 16, 0, 0),
+ F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(148000000, P_GCC_GPLL9_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
F(400000, P_BI_TCXO, 12, 1, 4),
F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
@@ -963,6 +1061,25 @@ static struct clk_rcg2 gcc_sdcc4_apps_clk_src = {
},
};
+static const struct freq_tbl sm8475_ftbl_gcc_ufs_phy_axi_clk_src[] = {
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(150000000, P_GCC_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(300000000, P_GCC_GPLL0_OUT_MAIN, 2, 0, 0),
+ F(600000000, P_GCC_GPLL0_OUT_MAIN, 1, 0, 0),
+ F(806400000, P_SM8475_GCC_GPLL2_OUT_EVEN, 1, 0, 0),
+ F(850000000, P_SM8475_GCC_GPLL2_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_init_data sm8475_gcc_ufs_phy_axi_clk_src_init = {
+ .name = "gcc_ufs_phy_axi_clk_src",
+ .parent_data = sm8475_gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(sm8475_gcc_parent_map_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
static const struct freq_tbl ftbl_gcc_ufs_phy_axi_clk_src[] = {
F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
@@ -987,6 +1104,24 @@ static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = {
},
};
+static const struct freq_tbl sm8475_ftbl_gcc_ufs_phy_ice_core_clk_src[] = {
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(150000000, P_GCC_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(300000000, P_GCC_GPLL0_OUT_MAIN, 2, 0, 0),
+ F(600000000, P_GCC_GPLL0_OUT_MAIN, 1, 0, 0),
+ F(806400000, P_SM8475_GCC_GPLL2_OUT_EVEN, 1, 0, 0),
+ F(850000000, P_SM8475_GCC_GPLL2_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_init_data sm8475_gcc_ufs_phy_ice_core_clk_src_init = {
+ .name = "gcc_ufs_phy_ice_core_clk_src",
+ .parent_data = sm8475_gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(sm8475_gcc_parent_map_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
static const struct freq_tbl ftbl_gcc_ufs_phy_ice_core_clk_src[] = {
F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
F(150000000, P_GCC_GPLL0_OUT_MAIN, 4, 0, 0),
@@ -1032,6 +1167,14 @@ static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = {
},
};
+static struct clk_init_data sm8475_gcc_ufs_phy_unipro_core_clk_src_init = {
+ .name = "gcc_ufs_phy_unipro_core_clk_src",
+ .parent_data = sm8475_gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(sm8475_gcc_parent_map_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = {
.cmd_rcgr = 0x8708c,
.mnd_width = 0,
@@ -3166,6 +3309,8 @@ static struct clk_regmap *gcc_sm8450_clocks[] = {
[GCC_USB3_PRIM_PHY_PIPE_CLK_SRC] = &gcc_usb3_prim_phy_pipe_clk_src.clkr,
[GCC_VIDEO_AXI0_CLK] = &gcc_video_axi0_clk.clkr,
[GCC_VIDEO_AXI1_CLK] = &gcc_video_axi1_clk.clkr,
+ [SM8475_GCC_GPLL2] = NULL,
+ [SM8475_GCC_GPLL3] = NULL,
};
static const struct qcom_reset_map gcc_sm8450_resets[] = {
@@ -3259,6 +3404,7 @@ static const struct qcom_cc_desc gcc_sm8450_desc = {
static const struct of_device_id gcc_sm8450_match_table[] = {
{ .compatible = "qcom,gcc-sm8450" },
+ { .compatible = "qcom,sm8475-gcc" },
{ }
};
MODULE_DEVICE_TABLE(of, gcc_sm8450_match_table);
@@ -3277,6 +3423,39 @@ static int gcc_sm8450_probe(struct platform_device *pdev)
if (ret)
return ret;
+ if (of_device_is_compatible(pdev->dev.of_node, "qcom,sm8475-gcc")) {
+ /* Update GCC PLL0 */
+ gcc_gpll0.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE];
+ gcc_gpll0.clkr.hw.init = &sm8475_gcc_gpll0_init;
+ gcc_gpll0_out_even.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE];
+ gcc_gpll0_out_even.clkr.hw.init = &sm8475_gcc_gpll0_out_even_init;
+
+ /* Update GCC PLL4 */
+ gcc_gpll4.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE];
+ gcc_gpll4.clkr.hw.init = &sm8475_gcc_gpll4_init;
+
+ /* Update GCC PLL9 */
+ gcc_gpll9.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE];
+ gcc_gpll9.clkr.hw.init = &sm8475_gcc_gpll9_init;
+
+ gcc_sdcc2_apps_clk_src.freq_tbl = sm8475_ftbl_gcc_sdcc2_apps_clk_src;
+
+ gcc_ufs_phy_axi_clk_src.parent_map = sm8475_gcc_parent_map_3;
+ gcc_ufs_phy_axi_clk_src.freq_tbl = sm8475_ftbl_gcc_ufs_phy_axi_clk_src;
+ gcc_ufs_phy_axi_clk_src.clkr.hw.init = &sm8475_gcc_ufs_phy_axi_clk_src_init;
+
+ gcc_ufs_phy_ice_core_clk_src.parent_map = sm8475_gcc_parent_map_3;
+ gcc_ufs_phy_ice_core_clk_src.freq_tbl = sm8475_ftbl_gcc_ufs_phy_ice_core_clk_src;
+ gcc_ufs_phy_ice_core_clk_src.clkr.hw.init = &sm8475_gcc_ufs_phy_ice_core_clk_src_init;
+
+ gcc_ufs_phy_unipro_core_clk_src.parent_map = sm8475_gcc_parent_map_3;
+ gcc_ufs_phy_unipro_core_clk_src.freq_tbl = sm8475_ftbl_gcc_ufs_phy_ice_core_clk_src;
+ gcc_ufs_phy_unipro_core_clk_src.clkr.hw.init = &sm8475_gcc_ufs_phy_unipro_core_clk_src_init;
+
+ gcc_sm8450_desc.clks[SM8475_GCC_GPLL2] = &sm8475_gcc_gpll2.clkr;
+ gcc_sm8450_desc.clks[SM8475_GCC_GPLL3] = &sm8475_gcc_gpll3.clkr;
+ }
+
/* FORCE_MEM_CORE_ON for ufs phy ice core clocks */
regmap_update_bits(regmap, gcc_ufs_phy_ice_core_clk.halt_reg, BIT(14), BIT(14));
@@ -3312,5 +3491,5 @@ static void __exit gcc_sm8450_exit(void)
}
module_exit(gcc_sm8450_exit);
-MODULE_DESCRIPTION("QTI GCC SM8450 Driver");
+MODULE_DESCRIPTION("QTI GCC SM8450 / SM8475 Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/gcc-sm8550.c b/drivers/clk/qcom/gcc-sm8550.c
index 5abaeddd6afc..862a9bf73bcb 100644
--- a/drivers/clk/qcom/gcc-sm8550.c
+++ b/drivers/clk/qcom/gcc-sm8550.c
@@ -3003,7 +3003,7 @@ static struct gdsc pcie_0_gdsc = {
.pd = {
.name = "pcie_0_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
.flags = VOTABLE | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
@@ -3014,7 +3014,7 @@ static struct gdsc pcie_0_phy_gdsc = {
.pd = {
.name = "pcie_0_phy_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
.flags = VOTABLE | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
@@ -3025,7 +3025,7 @@ static struct gdsc pcie_1_gdsc = {
.pd = {
.name = "pcie_1_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
.flags = VOTABLE | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
@@ -3036,7 +3036,7 @@ static struct gdsc pcie_1_phy_gdsc = {
.pd = {
.name = "pcie_1_phy_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
.flags = VOTABLE | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
diff --git a/drivers/clk/qcom/gcc-sm8650.c b/drivers/clk/qcom/gcc-sm8650.c
index fd9d6544bdd5..24f98062b9dd 100644
--- a/drivers/clk/qcom/gcc-sm8650.c
+++ b/drivers/clk/qcom/gcc-sm8650.c
@@ -3437,7 +3437,7 @@ static struct gdsc pcie_0_gdsc = {
.pd = {
.name = "pcie_0_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
};
@@ -3448,7 +3448,7 @@ static struct gdsc pcie_0_phy_gdsc = {
.pd = {
.name = "pcie_0_phy_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
};
@@ -3459,7 +3459,7 @@ static struct gdsc pcie_1_gdsc = {
.pd = {
.name = "pcie_1_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
};
@@ -3470,7 +3470,7 @@ static struct gdsc pcie_1_phy_gdsc = {
.pd = {
.name = "pcie_1_phy_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
};
@@ -3497,7 +3497,7 @@ static struct gdsc usb30_prim_gdsc = {
.pd = {
.name = "usb30_prim_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
@@ -3506,7 +3506,7 @@ static struct gdsc usb3_phy_gdsc = {
.pd = {
.name = "usb3_phy_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
@@ -3817,7 +3817,9 @@ static int gcc_sm8650_probe(struct platform_device *pdev)
qcom_branch_set_clk_en(regmap, 0x32004); /* GCC_VIDEO_AHB_CLK */
qcom_branch_set_clk_en(regmap, 0x32030); /* GCC_VIDEO_XO_CLK */
+ /* FORCE_MEM_CORE_ON for ufs phy ice core and gcc ufs phy axi clocks */
qcom_branch_set_force_mem_core(regmap, gcc_ufs_phy_ice_core_clk, true);
+ qcom_branch_set_force_mem_core(regmap, gcc_ufs_phy_axi_clk, true);
/* Clear GDSC_SLEEP_ENA_VOTE to stop votes being auto-removed in sleep. */
regmap_write(regmap, 0x52150, 0x0);
diff --git a/drivers/clk/qcom/gcc-sm8750.c b/drivers/clk/qcom/gcc-sm8750.c
new file mode 100644
index 000000000000..8092dd6b37b5
--- /dev/null
+++ b/drivers/clk/qcom/gcc-sm8750.c
@@ -0,0 +1,3275 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sm8750-gcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "clk-regmap-phy-mux.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO,
+ DT_BI_TCXO_AO,
+ DT_SLEEP_CLK,
+ DT_PCIE_0_PIPE_CLK,
+ DT_UFS_PHY_RX_SYMBOL_0_CLK,
+ DT_UFS_PHY_RX_SYMBOL_1_CLK,
+ DT_UFS_PHY_TX_SYMBOL_0_CLK,
+ DT_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK,
+};
+
+enum {
+ P_BI_TCXO,
+ P_GCC_GPLL0_OUT_EVEN,
+ P_GCC_GPLL0_OUT_MAIN,
+ P_GCC_GPLL1_OUT_MAIN,
+ P_GCC_GPLL4_OUT_MAIN,
+ P_GCC_GPLL7_OUT_MAIN,
+ P_GCC_GPLL9_OUT_MAIN,
+ P_PCIE_0_PIPE_CLK,
+ P_SLEEP_CLK,
+ P_UFS_PHY_RX_SYMBOL_0_CLK,
+ P_UFS_PHY_RX_SYMBOL_1_CLK,
+ P_UFS_PHY_TX_SYMBOL_0_CLK,
+ P_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK,
+};
+
+static struct clk_alpha_pll gcc_gpll0 = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_ELU],
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_taycan_elu_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gcc_gpll0_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gcc_gpll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_gcc_gpll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_gcc_gpll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_ELU],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll0_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_taycan_elu_ops,
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll1 = {
+ .offset = 0x1000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_ELU],
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_taycan_elu_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll4 = {
+ .offset = 0x4000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_ELU],
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll4",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_taycan_elu_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll7 = {
+ .offset = 0x7000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_ELU],
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll7",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_taycan_elu_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll9 = {
+ .offset = 0x9000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_ELU],
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(9),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll9",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_taycan_elu_ops,
+ },
+ },
+};
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_SLEEP_CLK, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .index = DT_SLEEP_CLK },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL1_OUT_MAIN, 4 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll1.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_SLEEP_CLK, 5 },
+};
+
+static const struct clk_parent_data gcc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL7_OUT_MAIN, 2 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_5[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll7.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data gcc_parent_data_6[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_8[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL9_OUT_MAIN, 2 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_8[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll9.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_9[] = {
+ { P_UFS_PHY_RX_SYMBOL_0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_9[] = {
+ { .index = DT_UFS_PHY_RX_SYMBOL_0_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_10[] = {
+ { P_UFS_PHY_RX_SYMBOL_1_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_10[] = {
+ { .index = DT_UFS_PHY_RX_SYMBOL_1_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_11[] = {
+ { P_UFS_PHY_TX_SYMBOL_0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_11[] = {
+ { .index = DT_UFS_PHY_TX_SYMBOL_0_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_12[] = {
+ { P_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_12[] = {
+ { .index = DT_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static struct clk_regmap_phy_mux gcc_pcie_0_pipe_clk_src = {
+ .reg = 0x6b080,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_pipe_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_PCIE_0_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_rx_symbol_0_clk_src = {
+ .reg = 0x77068,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_9,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_0_clk_src",
+ .parent_data = gcc_parent_data_9,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_9),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_rx_symbol_1_clk_src = {
+ .reg = 0x770ec,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_10,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_1_clk_src",
+ .parent_data = gcc_parent_data_10,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_10),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_tx_symbol_0_clk_src = {
+ .reg = 0x77058,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_11,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_tx_symbol_0_clk_src",
+ .parent_data = gcc_parent_data_11,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_11),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb3_prim_phy_pipe_clk_src = {
+ .reg = 0x39070,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_12,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_12,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_12),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
+ F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_gp1_clk_src = {
+ .cmd_rcgr = 0x64004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp1_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp2_clk_src = {
+ .cmd_rcgr = 0x65004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp2_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp3_clk_src = {
+ .cmd_rcgr = 0x66004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp3_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_aux_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_0_aux_clk_src = {
+ .cmd_rcgr = 0x6b084,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_aux_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_phy_rchng_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_0_phy_rchng_clk_src = {
+ .cmd_rcgr = 0x6b068,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = {
+ F(60000000, P_GCC_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pdm2_clk_src = {
+ .cmd_rcgr = 0x33010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pdm2_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_i2c_s0_clk_src = {
+ .cmd_rcgr = 0x17008,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_i2c_s1_clk_src = {
+ .cmd_rcgr = 0x17024,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_i2c_s2_clk_src = {
+ .cmd_rcgr = 0x17040,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_i2c_s3_clk_src = {
+ .cmd_rcgr = 0x1705c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_i2c_s4_clk_src = {
+ .cmd_rcgr = 0x17078,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s4_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_i2c_s5_clk_src = {
+ .cmd_rcgr = 0x17094,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_i2c_s6_clk_src = {
+ .cmd_rcgr = 0x170b0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s6_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_i2c_s7_clk_src = {
+ .cmd_rcgr = 0x170cc,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s7_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_i2c_s8_clk_src = {
+ .cmd_rcgr = 0x170e8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s8_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_i2c_s9_clk_src = {
+ .cmd_rcgr = 0x17104,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s9_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+/* Check this frequency table.*/
+static const struct freq_tbl ftbl_gcc_qupv3_wrap1_qspi_ref_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(51200000, P_GCC_GPLL0_OUT_EVEN, 1, 64, 375),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(102400000, P_GCC_GPLL0_OUT_EVEN, 1, 128, 375),
+ F(112000000, P_GCC_GPLL0_OUT_EVEN, 1, 28, 75),
+ F(117964800, P_GCC_GPLL0_OUT_EVEN, 1, 6144, 15625),
+ F(120000000, P_GCC_GPLL0_OUT_MAIN, 5, 0, 0),
+ F(150000000, P_GCC_GPLL0_OUT_EVEN, 2, 0, 0),
+ F(250000000, P_GCC_GPLL7_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_qspi_ref_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_qspi_ref_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_qspi_ref_clk_src = {
+ .cmd_rcgr = 0x188c0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_qspi_ref_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_qspi_ref_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap1_s0_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(51200000, P_GCC_GPLL0_OUT_EVEN, 1, 64, 375),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(102400000, P_GCC_GPLL0_OUT_EVEN, 1, 128, 375),
+ F(112000000, P_GCC_GPLL0_OUT_EVEN, 1, 28, 75),
+ F(117964800, P_GCC_GPLL0_OUT_EVEN, 1, 6144, 15625),
+ F(120000000, P_GCC_GPLL0_OUT_MAIN, 5, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
+ .cmd_rcgr = 0x18014,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
+ .cmd_rcgr = 0x18150,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s1_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap1_s3_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(51200000, P_GCC_GPLL0_OUT_EVEN, 1, 64, 375),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
+ .cmd_rcgr = 0x182a0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s3_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s4_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
+ .cmd_rcgr = 0x183dc,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
+ .cmd_rcgr = 0x18518,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s3_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s6_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s6_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s6_clk_src = {
+ .cmd_rcgr = 0x18654,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s3_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s6_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s7_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s7_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s7_clk_src = {
+ .cmd_rcgr = 0x18790,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s3_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s7_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap2_ibi_ctrl_0_clk_src[] = {
+ F(37500000, P_GCC_GPLL0_OUT_EVEN, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_ibi_ctrl_0_clk_src = {
+ .cmd_rcgr = 0x1e9f4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_qupv3_wrap2_ibi_ctrl_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_ibi_ctrl_0_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s0_clk_src = {
+ .cmd_rcgr = 0x1e014,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s1_clk_src = {
+ .cmd_rcgr = 0x1e150,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s2_clk_src = {
+ .cmd_rcgr = 0x1e28c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s3_clk_src = {
+ .cmd_rcgr = 0x1e3c8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s4_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s4_clk_src = {
+ .cmd_rcgr = 0x1e504,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s3_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s5_clk_src = {
+ .cmd_rcgr = 0x1e640,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s3_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s5_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap2_s6_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(51200000, P_GCC_GPLL0_OUT_EVEN, 1, 64, 375),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(102400000, P_GCC_GPLL0_OUT_EVEN, 1, 128, 375),
+ F(112000000, P_GCC_GPLL0_OUT_EVEN, 1, 28, 75),
+ F(117964800, P_GCC_GPLL0_OUT_EVEN, 1, 6144, 15625),
+ F(128000000, P_GCC_GPLL0_OUT_MAIN, 1, 16, 75),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s6_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s6_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s6_clk_src = {
+ .cmd_rcgr = 0x1e77c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_qupv3_wrap2_s6_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s6_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s7_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s7_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s7_clk_src = {
+ .cmd_rcgr = 0x1e8b8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s7_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(202000000, P_GCC_GPLL9_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+ .cmd_rcgr = 0x1401c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_gcc_sdcc2_apps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc2_apps_clk_src",
+ .parent_data = gcc_parent_data_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_8),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc4_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc4_apps_clk_src = {
+ .cmd_rcgr = 0x1601c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_sdcc4_apps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc4_apps_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_axi_clk_src[] = {
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(201500000, P_GCC_GPLL4_OUT_MAIN, 4, 0, 0),
+ F(403000000, P_GCC_GPLL4_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = {
+ .cmd_rcgr = 0x77034,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_ufs_phy_axi_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_axi_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_ice_core_clk_src[] = {
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(201500000, P_GCC_GPLL4_OUT_MAIN, 4, 0, 0),
+ F(403000000, P_GCC_GPLL4_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_ice_core_clk_src = {
+ .cmd_rcgr = 0x7708c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_ufs_phy_ice_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ice_core_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_phy_aux_clk_src[] = {
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = {
+ .cmd_rcgr = 0x770c0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_ufs_phy_phy_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_6,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = {
+ .cmd_rcgr = 0x770a4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_ufs_phy_ice_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_unipro_core_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_master_clk_src[] = {
+ F(66666667, P_GCC_GPLL0_OUT_EVEN, 4.5, 0, 0),
+ F(133333333, P_GCC_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GCC_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
+ .cmd_rcgr = 0x39030,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x39048,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = {
+ .cmd_rcgr = 0x39074,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_wrap1_s2_clk_src = {
+ .reg = 0x1828c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s2_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_qspi_ref_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb30_prim_mock_utmi_postdiv_clk_src = {
+ .reg = 0x39060,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_postdiv_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_pcie_axi_clk = {
+ .halt_reg = 0x10068,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x10068,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(12),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_noc_pcie_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_phy_axi_clk = {
+ .halt_reg = 0x770f0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x770f0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x770f0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_ufs_phy_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_prim_axi_clk = {
+ .halt_reg = 0x39090,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x39090,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x39090,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb3_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x38004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x38004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_hf_axi_clk = {
+ .halt_reg = 0x26014,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x26014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_camera_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_sf_axi_clk = {
+ .halt_reg = 0x26024,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x26024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_camera_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_pcie_anoc_ahb_clk = {
+ .halt_reg = 0x10050,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x10050,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_pcie_anoc_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_prim_axi_clk = {
+ .halt_reg = 0x3908c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x3908c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3908c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_usb3_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cnoc_pcie_sf_axi_clk = {
+ .halt_reg = 0x10058,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x10058,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(6),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cnoc_pcie_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ddrss_gpu_axi_clk = {
+ .halt_reg = 0x71150,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x71150,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x71150,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ddrss_gpu_axi_clk",
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ddrss_pcie_sf_qtb_clk = {
+ .halt_reg = 0x1007c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x1007c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(19),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ddrss_pcie_sf_qtb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_hf_axi_clk = {
+ .halt_reg = 0x27008,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x27008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_disp_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_eva_axi0_clk = {
+ .halt_reg = 0x9f008,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x9f008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x9f008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_eva_axi0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_eva_axi0c_clk = {
+ .halt_reg = 0x9f018,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x9f018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x9f018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_eva_axi0c_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x64000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x64000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x65000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x65000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x66000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x66000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gemnoc_gfx_clk = {
+ .halt_reg = 0x71010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x71010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x71010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_gemnoc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_gpll0_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_gpll0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0_out_even.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_aux_clk = {
+ .halt_reg = 0x6b044,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
+ .halt_reg = 0x6b040,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b040,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(2),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
+ .halt_reg = 0x6b030,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x6b030,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_phy_rchng_clk = {
+ .halt_reg = 0x6b064,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_pipe_clk = {
+ .halt_reg = 0x6b054,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_axi_clk = {
+ .halt_reg = 0x6b020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b020,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_q2a_axi_clk = {
+ .halt_reg = 0x6b01c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x3300c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pdm2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x33004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x33004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x33004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_xo4_clk = {
+ .halt_reg = 0x33008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x33008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm_xo4_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_cmd_ahb_clk = {
+ .halt_reg = 0x26010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x26010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_camera_cmd_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_nrt_ahb_clk = {
+ .halt_reg = 0x26008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x26008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_camera_nrt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_rt_ahb_clk = {
+ .halt_reg = 0x2600c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2600c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2600c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_camera_rt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_gpu_ahb_clk = {
+ .halt_reg = 0x71008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x71008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x71008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_gpu_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_pcie_ahb_clk = {
+ .halt_reg = 0x6b018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(11),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_pcie_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_cv_cpu_ahb_clk = {
+ .halt_reg = 0x32014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x32014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_cv_cpu_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_cvp_ahb_clk = {
+ .halt_reg = 0x32008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x32008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_cvp_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_v_cpu_ahb_clk = {
+ .halt_reg = 0x32010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x32010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_v_cpu_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_vcodec_ahb_clk = {
+ .halt_reg = 0x3200c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x3200c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3200c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_vcodec_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_i2c_core_clk = {
+ .halt_reg = 0x23004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(8),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_i2c_s0_clk = {
+ .halt_reg = 0x17004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_i2c_s1_clk = {
+ .halt_reg = 0x17020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(11),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_i2c_s2_clk = {
+ .halt_reg = 0x1703c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(12),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_i2c_s3_clk = {
+ .halt_reg = 0x17058,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(13),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_i2c_s4_clk = {
+ .halt_reg = 0x17074,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(14),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_i2c_s5_clk = {
+ .halt_reg = 0x17090,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s5_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_i2c_s6_clk = {
+ .halt_reg = 0x170ac,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s6_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_i2c_s7_clk = {
+ .halt_reg = 0x170c8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(17),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s7_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c_s7_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_i2c_s8_clk = {
+ .halt_reg = 0x170e4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(14),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s8_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c_s8_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_i2c_s9_clk = {
+ .halt_reg = 0x17100,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s9_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c_s9_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_i2c_s_ahb_clk = {
+ .halt_reg = 0x23000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x23000,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_2x_clk = {
+ .halt_reg = 0x2315c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(18),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_clk = {
+ .halt_reg = 0x23148,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(19),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_qspi_ref_clk = {
+ .halt_reg = 0x188bc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(29),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_qspi_ref_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_qspi_ref_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s0_clk = {
+ .halt_reg = 0x18004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s1_clk = {
+ .halt_reg = 0x18140,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(23),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s2_clk = {
+ .halt_reg = 0x1827c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(24),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s3_clk = {
+ .halt_reg = 0x18290,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(25),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s4_clk = {
+ .halt_reg = 0x183cc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(26),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s5_clk = {
+ .halt_reg = 0x18508,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s5_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s6_clk = {
+ .halt_reg = 0x18644,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(28),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s6_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s7_clk = {
+ .halt_reg = 0x18780,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s7_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s7_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_core_2x_clk = {
+ .halt_reg = 0x232b4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_core_clk = {
+ .halt_reg = 0x232a0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_ibi_ctrl_2_clk = {
+ .halt_reg = 0x1e9ec,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1e9ec,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_ibi_ctrl_2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_ibi_ctrl_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_ibi_ctrl_3_clk = {
+ .halt_reg = 0x1e9f0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1e9f0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(28),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_ibi_ctrl_3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_ibi_ctrl_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s0_clk = {
+ .halt_reg = 0x1e004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s1_clk = {
+ .halt_reg = 0x1e140,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s2_clk = {
+ .halt_reg = 0x1e27c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(6),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s3_clk = {
+ .halt_reg = 0x1e3b8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s4_clk = {
+ .halt_reg = 0x1e4f4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(8),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s5_clk = {
+ .halt_reg = 0x1e630,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(9),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s5_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s6_clk = {
+ .halt_reg = 0x1e76c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s6_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s7_clk = {
+ .halt_reg = 0x1e8a8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(17),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s7_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s7_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_m_ahb_clk = {
+ .halt_reg = 0x23140,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x23140,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_1_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_s_ahb_clk = {
+ .halt_reg = 0x23144,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x23144,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(21),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_1_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_2_ibi_2_ahb_clk = {
+ .halt_reg = 0x1e9e4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1e9e4,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(25),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_2_ibi_2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_2_ibi_3_ahb_clk = {
+ .halt_reg = 0x1e9e8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1e9e8,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(26),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_2_ibi_3_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_2_m_ahb_clk = {
+ .halt_reg = 0x23298,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x23298,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(2),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_2_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_2_s_ahb_clk = {
+ .halt_reg = 0x2329c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2329c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_2_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .halt_reg = 0x14014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+ .halt_reg = 0x14004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc2_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc2_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_ahb_clk = {
+ .halt_reg = 0x16014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x16014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc4_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_apps_clk = {
+ .halt_reg = 0x16004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x16004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc4_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc4_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ahb_clk = {
+ .halt_reg = 0x77028,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77028,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_axi_clk = {
+ .halt_reg = 0x77018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ice_core_clk = {
+ .halt_reg = 0x7707c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7707c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7707c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ice_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_phy_aux_clk = {
+ .halt_reg = 0x770bc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x770bc,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x770bc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_0_clk = {
+ .halt_reg = 0x77030,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x77030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_rx_symbol_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_1_clk = {
+ .halt_reg = 0x770d8,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x770d8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_rx_symbol_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_tx_symbol_0_clk = {
+ .halt_reg = 0x7702c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x7702c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_tx_symbol_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_tx_symbol_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_unipro_core_clk = {
+ .halt_reg = 0x7706c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7706c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7706c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_unipro_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_unipro_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_master_clk = {
+ .halt_reg = 0x39018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x39018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_mock_utmi_clk = {
+ .halt_reg = 0x3902c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3902c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_sleep_clk = {
+ .halt_reg = 0x39028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x39028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_aux_clk = {
+ .halt_reg = 0x39064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x39064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = {
+ .halt_reg = 0x39068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x39068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_com_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_pipe_clk = {
+ .halt_reg = 0x3906c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0x3906c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3906c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi0_clk = {
+ .halt_reg = 0x32018,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x32018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_video_axi0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi1_clk = {
+ .halt_reg = 0x32028,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x32028,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_video_axi1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc gcc_pcie_0_gdsc = {
+ .gdscr = 0x6b004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .collapse_ctrl = 0x5214c,
+ .collapse_mask = BIT(0),
+ .pd = {
+ .name = "gcc_pcie_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_pcie_0_phy_gdsc = {
+ .gdscr = 0x6c000,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .collapse_ctrl = 0x5214c,
+ .collapse_mask = BIT(2),
+ .pd = {
+ .name = "gcc_pcie_0_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_ufs_mem_phy_gdsc = {
+ .gdscr = 0x9e000,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gcc_ufs_mem_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_ufs_phy_gdsc = {
+ .gdscr = 0x77004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_ufs_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb30_prim_gdsc = {
+ .gdscr = 0x39004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_usb30_prim_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb3_phy_gdsc = {
+ .gdscr = 0x50018,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gcc_usb3_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct clk_regmap *gcc_sm8750_clocks[] = {
+ [GCC_AGGRE_NOC_PCIE_AXI_CLK] = &gcc_aggre_noc_pcie_axi_clk.clkr,
+ [GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr,
+ [GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CAMERA_HF_AXI_CLK] = &gcc_camera_hf_axi_clk.clkr,
+ [GCC_CAMERA_SF_AXI_CLK] = &gcc_camera_sf_axi_clk.clkr,
+ [GCC_CFG_NOC_PCIE_ANOC_AHB_CLK] = &gcc_cfg_noc_pcie_anoc_ahb_clk.clkr,
+ [GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr,
+ [GCC_CNOC_PCIE_SF_AXI_CLK] = &gcc_cnoc_pcie_sf_axi_clk.clkr,
+ [GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr,
+ [GCC_DDRSS_PCIE_SF_QTB_CLK] = &gcc_ddrss_pcie_sf_qtb_clk.clkr,
+ [GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr,
+ [GCC_EVA_AXI0_CLK] = &gcc_eva_axi0_clk.clkr,
+ [GCC_EVA_AXI0C_CLK] = &gcc_eva_axi0c_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
+ [GCC_GPLL0] = &gcc_gpll0.clkr,
+ [GCC_GPLL0_OUT_EVEN] = &gcc_gpll0_out_even.clkr,
+ [GCC_GPLL1] = &gcc_gpll1.clkr,
+ [GCC_GPLL4] = &gcc_gpll4.clkr,
+ [GCC_GPLL7] = &gcc_gpll7.clkr,
+ [GCC_GPLL9] = &gcc_gpll9.clkr,
+ [GCC_GPU_GEMNOC_GFX_CLK] = &gcc_gpu_gemnoc_gfx_clk.clkr,
+ [GCC_GPU_GPLL0_CLK_SRC] = &gcc_gpu_gpll0_clk_src.clkr,
+ [GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr,
+ [GCC_PCIE_0_AUX_CLK] = &gcc_pcie_0_aux_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK_SRC] = &gcc_pcie_0_aux_clk_src.clkr,
+ [GCC_PCIE_0_CFG_AHB_CLK] = &gcc_pcie_0_cfg_ahb_clk.clkr,
+ [GCC_PCIE_0_MSTR_AXI_CLK] = &gcc_pcie_0_mstr_axi_clk.clkr,
+ [GCC_PCIE_0_PHY_RCHNG_CLK] = &gcc_pcie_0_phy_rchng_clk.clkr,
+ [GCC_PCIE_0_PHY_RCHNG_CLK_SRC] = &gcc_pcie_0_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_0_PIPE_CLK] = &gcc_pcie_0_pipe_clk.clkr,
+ [GCC_PCIE_0_PIPE_CLK_SRC] = &gcc_pcie_0_pipe_clk_src.clkr,
+ [GCC_PCIE_0_SLV_AXI_CLK] = &gcc_pcie_0_slv_axi_clk.clkr,
+ [GCC_PCIE_0_SLV_Q2A_AXI_CLK] = &gcc_pcie_0_slv_q2a_axi_clk.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr,
+ [GCC_QMIP_CAMERA_CMD_AHB_CLK] = &gcc_qmip_camera_cmd_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_NRT_AHB_CLK] = &gcc_qmip_camera_nrt_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_RT_AHB_CLK] = &gcc_qmip_camera_rt_ahb_clk.clkr,
+ [GCC_QMIP_GPU_AHB_CLK] = &gcc_qmip_gpu_ahb_clk.clkr,
+ [GCC_QMIP_PCIE_AHB_CLK] = &gcc_qmip_pcie_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_CV_CPU_AHB_CLK] = &gcc_qmip_video_cv_cpu_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_CVP_AHB_CLK] = &gcc_qmip_video_cvp_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_V_CPU_AHB_CLK] = &gcc_qmip_video_v_cpu_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_VCODEC_AHB_CLK] = &gcc_qmip_video_vcodec_ahb_clk.clkr,
+ [GCC_QUPV3_I2C_CORE_CLK] = &gcc_qupv3_i2c_core_clk.clkr,
+ [GCC_QUPV3_I2C_S0_CLK] = &gcc_qupv3_i2c_s0_clk.clkr,
+ [GCC_QUPV3_I2C_S0_CLK_SRC] = &gcc_qupv3_i2c_s0_clk_src.clkr,
+ [GCC_QUPV3_I2C_S1_CLK] = &gcc_qupv3_i2c_s1_clk.clkr,
+ [GCC_QUPV3_I2C_S1_CLK_SRC] = &gcc_qupv3_i2c_s1_clk_src.clkr,
+ [GCC_QUPV3_I2C_S2_CLK] = &gcc_qupv3_i2c_s2_clk.clkr,
+ [GCC_QUPV3_I2C_S2_CLK_SRC] = &gcc_qupv3_i2c_s2_clk_src.clkr,
+ [GCC_QUPV3_I2C_S3_CLK] = &gcc_qupv3_i2c_s3_clk.clkr,
+ [GCC_QUPV3_I2C_S3_CLK_SRC] = &gcc_qupv3_i2c_s3_clk_src.clkr,
+ [GCC_QUPV3_I2C_S4_CLK] = &gcc_qupv3_i2c_s4_clk.clkr,
+ [GCC_QUPV3_I2C_S4_CLK_SRC] = &gcc_qupv3_i2c_s4_clk_src.clkr,
+ [GCC_QUPV3_I2C_S5_CLK] = &gcc_qupv3_i2c_s5_clk.clkr,
+ [GCC_QUPV3_I2C_S5_CLK_SRC] = &gcc_qupv3_i2c_s5_clk_src.clkr,
+ [GCC_QUPV3_I2C_S6_CLK] = &gcc_qupv3_i2c_s6_clk.clkr,
+ [GCC_QUPV3_I2C_S6_CLK_SRC] = &gcc_qupv3_i2c_s6_clk_src.clkr,
+ [GCC_QUPV3_I2C_S7_CLK] = &gcc_qupv3_i2c_s7_clk.clkr,
+ [GCC_QUPV3_I2C_S7_CLK_SRC] = &gcc_qupv3_i2c_s7_clk_src.clkr,
+ [GCC_QUPV3_I2C_S8_CLK] = &gcc_qupv3_i2c_s8_clk.clkr,
+ [GCC_QUPV3_I2C_S8_CLK_SRC] = &gcc_qupv3_i2c_s8_clk_src.clkr,
+ [GCC_QUPV3_I2C_S9_CLK] = &gcc_qupv3_i2c_s9_clk.clkr,
+ [GCC_QUPV3_I2C_S9_CLK_SRC] = &gcc_qupv3_i2c_s9_clk_src.clkr,
+ [GCC_QUPV3_I2C_S_AHB_CLK] = &gcc_qupv3_i2c_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP1_CORE_2X_CLK] = &gcc_qupv3_wrap1_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP1_CORE_CLK] = &gcc_qupv3_wrap1_core_clk.clkr,
+ [GCC_QUPV3_WRAP1_QSPI_REF_CLK] = &gcc_qupv3_wrap1_qspi_ref_clk.clkr,
+ [GCC_QUPV3_WRAP1_QSPI_REF_CLK_SRC] = &gcc_qupv3_wrap1_qspi_ref_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK] = &gcc_qupv3_wrap1_s0_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK_SRC] = &gcc_qupv3_wrap1_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK] = &gcc_qupv3_wrap1_s1_clk.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK_SRC] = &gcc_qupv3_wrap1_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK] = &gcc_qupv3_wrap1_s2_clk.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK_SRC] = &gcc_qupv3_wrap1_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK] = &gcc_qupv3_wrap1_s3_clk.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK_SRC] = &gcc_qupv3_wrap1_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK] = &gcc_qupv3_wrap1_s4_clk.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK_SRC] = &gcc_qupv3_wrap1_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK] = &gcc_qupv3_wrap1_s5_clk.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK_SRC] = &gcc_qupv3_wrap1_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S6_CLK] = &gcc_qupv3_wrap1_s6_clk.clkr,
+ [GCC_QUPV3_WRAP1_S6_CLK_SRC] = &gcc_qupv3_wrap1_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S7_CLK] = &gcc_qupv3_wrap1_s7_clk.clkr,
+ [GCC_QUPV3_WRAP1_S7_CLK_SRC] = &gcc_qupv3_wrap1_s7_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_CORE_2X_CLK] = &gcc_qupv3_wrap2_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP2_CORE_CLK] = &gcc_qupv3_wrap2_core_clk.clkr,
+ [GCC_QUPV3_WRAP2_IBI_CTRL_0_CLK_SRC] = &gcc_qupv3_wrap2_ibi_ctrl_0_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_IBI_CTRL_2_CLK] = &gcc_qupv3_wrap2_ibi_ctrl_2_clk.clkr,
+ [GCC_QUPV3_WRAP2_IBI_CTRL_3_CLK] = &gcc_qupv3_wrap2_ibi_ctrl_3_clk.clkr,
+ [GCC_QUPV3_WRAP2_S0_CLK] = &gcc_qupv3_wrap2_s0_clk.clkr,
+ [GCC_QUPV3_WRAP2_S0_CLK_SRC] = &gcc_qupv3_wrap2_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S1_CLK] = &gcc_qupv3_wrap2_s1_clk.clkr,
+ [GCC_QUPV3_WRAP2_S1_CLK_SRC] = &gcc_qupv3_wrap2_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S2_CLK] = &gcc_qupv3_wrap2_s2_clk.clkr,
+ [GCC_QUPV3_WRAP2_S2_CLK_SRC] = &gcc_qupv3_wrap2_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S3_CLK] = &gcc_qupv3_wrap2_s3_clk.clkr,
+ [GCC_QUPV3_WRAP2_S3_CLK_SRC] = &gcc_qupv3_wrap2_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S4_CLK] = &gcc_qupv3_wrap2_s4_clk.clkr,
+ [GCC_QUPV3_WRAP2_S4_CLK_SRC] = &gcc_qupv3_wrap2_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S5_CLK] = &gcc_qupv3_wrap2_s5_clk.clkr,
+ [GCC_QUPV3_WRAP2_S5_CLK_SRC] = &gcc_qupv3_wrap2_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S6_CLK] = &gcc_qupv3_wrap2_s6_clk.clkr,
+ [GCC_QUPV3_WRAP2_S6_CLK_SRC] = &gcc_qupv3_wrap2_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S7_CLK] = &gcc_qupv3_wrap2_s7_clk.clkr,
+ [GCC_QUPV3_WRAP2_S7_CLK_SRC] = &gcc_qupv3_wrap2_s7_clk_src.clkr,
+ [GCC_QUPV3_WRAP_1_M_AHB_CLK] = &gcc_qupv3_wrap_1_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_S_AHB_CLK] = &gcc_qupv3_wrap_1_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_2_IBI_2_AHB_CLK] = &gcc_qupv3_wrap_2_ibi_2_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_2_IBI_3_AHB_CLK] = &gcc_qupv3_wrap_2_ibi_3_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_2_M_AHB_CLK] = &gcc_qupv3_wrap_2_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_2_S_AHB_CLK] = &gcc_qupv3_wrap_2_s_ahb_clk.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_SDCC2_APPS_CLK_SRC] = &gcc_sdcc2_apps_clk_src.clkr,
+ [GCC_SDCC4_AHB_CLK] = &gcc_sdcc4_ahb_clk.clkr,
+ [GCC_SDCC4_APPS_CLK] = &gcc_sdcc4_apps_clk.clkr,
+ [GCC_SDCC4_APPS_CLK_SRC] = &gcc_sdcc4_apps_clk_src.clkr,
+ [GCC_UFS_PHY_AHB_CLK] = &gcc_ufs_phy_ahb_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK] = &gcc_ufs_phy_axi_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK_SRC] = &gcc_ufs_phy_axi_clk_src.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK] = &gcc_ufs_phy_ice_core_clk.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK_SRC] = &gcc_ufs_phy_ice_core_clk_src.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK] = &gcc_ufs_phy_phy_aux_clk.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK_SRC] = &gcc_ufs_phy_phy_aux_clk_src.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK] = &gcc_ufs_phy_rx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC] = &gcc_ufs_phy_rx_symbol_0_clk_src.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_1_CLK] = &gcc_ufs_phy_rx_symbol_1_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC] = &gcc_ufs_phy_rx_symbol_1_clk_src.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK] = &gcc_ufs_phy_tx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC] = &gcc_ufs_phy_tx_symbol_0_clk_src.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK] = &gcc_ufs_phy_unipro_core_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC] = &gcc_ufs_phy_unipro_core_clk_src.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK] = &gcc_usb30_prim_master_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK_SRC] = &gcc_usb30_prim_master_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK] = &gcc_usb30_prim_mock_utmi_clk.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC] = &gcc_usb30_prim_mock_utmi_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB30_PRIM_SLEEP_CLK] = &gcc_usb30_prim_sleep_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK] = &gcc_usb3_prim_phy_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr,
+ [GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK_SRC] = &gcc_usb3_prim_phy_pipe_clk_src.clkr,
+ [GCC_VIDEO_AXI0_CLK] = &gcc_video_axi0_clk.clkr,
+ [GCC_VIDEO_AXI1_CLK] = &gcc_video_axi1_clk.clkr,
+};
+
+static struct gdsc *gcc_sm8750_gdscs[] = {
+ [GCC_PCIE_0_GDSC] = &gcc_pcie_0_gdsc,
+ [GCC_PCIE_0_PHY_GDSC] = &gcc_pcie_0_phy_gdsc,
+ [GCC_UFS_MEM_PHY_GDSC] = &gcc_ufs_mem_phy_gdsc,
+ [GCC_UFS_PHY_GDSC] = &gcc_ufs_phy_gdsc,
+ [GCC_USB30_PRIM_GDSC] = &gcc_usb30_prim_gdsc,
+ [GCC_USB3_PHY_GDSC] = &gcc_usb3_phy_gdsc,
+};
+
+static const struct qcom_reset_map gcc_sm8750_resets[] = {
+ [GCC_CAMERA_BCR] = { 0x26000 },
+ [GCC_DISPLAY_BCR] = { 0x27000 },
+ [GCC_EVA_BCR] = { 0x9f000 },
+ [GCC_EVA_AXI0_CLK_ARES] = { 0x9f008, 2 },
+ [GCC_EVA_AXI0C_CLK_ARES] = { 0x9f018, 2 },
+ [GCC_GPU_BCR] = { 0x71000 },
+ [GCC_PCIE_0_BCR] = { 0x6b000 },
+ [GCC_PCIE_0_LINK_DOWN_BCR] = { 0x6c014 },
+ [GCC_PCIE_0_NOCSR_COM_PHY_BCR] = { 0x6c020 },
+ [GCC_PCIE_0_PHY_BCR] = { 0x6c01c },
+ [GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR] = { 0x6c028 },
+ [GCC_PCIE_PHY_BCR] = { 0x6f000 },
+ [GCC_PCIE_PHY_CFG_AHB_BCR] = { 0x6f00c },
+ [GCC_PCIE_PHY_COM_BCR] = { 0x6f010 },
+ [GCC_PCIE_RSCC_BCR] = { 0x11000 },
+ [GCC_PDM_BCR] = { 0x33000 },
+ [GCC_QUPV3_WRAPPER_1_BCR] = { 0x18000 },
+ [GCC_QUPV3_WRAPPER_2_BCR] = { 0x1e000 },
+ [GCC_QUPV3_WRAPPER_I2C_BCR] = { 0x17000 },
+ [GCC_QUSB2PHY_PRIM_BCR] = { 0x12000 },
+ [GCC_QUSB2PHY_SEC_BCR] = { 0x12004 },
+ [GCC_SDCC2_BCR] = { 0x14000 },
+ [GCC_SDCC4_BCR] = { 0x16000 },
+ [GCC_UFS_PHY_BCR] = { 0x77000 },
+ [GCC_USB30_PRIM_BCR] = { 0x39000 },
+ [GCC_USB3_DP_PHY_PRIM_BCR] = { 0x50008 },
+ [GCC_USB3_DP_PHY_SEC_BCR] = { 0x50014 },
+ [GCC_USB3_PHY_PRIM_BCR] = { 0x50000 },
+ [GCC_USB3_PHY_SEC_BCR] = { 0x5000c },
+ [GCC_USB3PHY_PHY_PRIM_BCR] = { 0x50004 },
+ [GCC_USB3PHY_PHY_SEC_BCR] = { 0x50010 },
+ [GCC_VIDEO_BCR] = { 0x32000 },
+ [GCC_VIDEO_AXI0_CLK_ARES] = { 0x32018, 2 },
+ [GCC_VIDEO_AXI1_CLK_ARES] = { 0x32028, 2 },
+};
+
+
+static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = {
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_qspi_ref_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s6_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s7_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s6_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s7_clk_src),
+};
+
+static const struct regmap_config gcc_sm8750_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x1f41f0,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_sm8750_desc = {
+ .config = &gcc_sm8750_regmap_config,
+ .clks = gcc_sm8750_clocks,
+ .num_clks = ARRAY_SIZE(gcc_sm8750_clocks),
+ .resets = gcc_sm8750_resets,
+ .num_resets = ARRAY_SIZE(gcc_sm8750_resets),
+ .gdscs = gcc_sm8750_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_sm8750_gdscs),
+};
+
+static const struct of_device_id gcc_sm8750_match_table[] = {
+ { .compatible = "qcom,sm8750-gcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_sm8750_match_table);
+
+static int gcc_sm8750_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ regmap = qcom_cc_map(pdev, &gcc_sm8750_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks,
+ ARRAY_SIZE(gcc_dfs_clocks));
+ if (ret)
+ return ret;
+
+ /*
+ * Keep clocks always enabled:
+ * gcc_cam_bist_mclk_ahb_clk
+ * gcc_camera_ahb_clk
+ * gcc_camera_xo_clk
+ * gcc_disp_ahb_clk
+ * gcc_eva_ahb_clk
+ * gcc_eva_xo_clk
+ * gcc_gpu_cfg_ahb_clk
+ * gcc_video_ahb_clk
+ * gcc_video_xo_clk
+ * gcc_pcie_rscc_cfg_ahb_clk
+ * gcc_pcie_rscc_xo_clk
+ */
+ qcom_branch_set_clk_en(regmap, 0xa0004);
+ qcom_branch_set_clk_en(regmap, 0x26004);
+ qcom_branch_set_clk_en(regmap, 0x26034);
+ qcom_branch_set_clk_en(regmap, 0x27004);
+ qcom_branch_set_clk_en(regmap, 0x9f004);
+ qcom_branch_set_clk_en(regmap, 0x9f01c);
+ qcom_branch_set_clk_en(regmap, 0x71004);
+ qcom_branch_set_clk_en(regmap, 0x32004);
+ qcom_branch_set_clk_en(regmap, 0x32038);
+ regmap_update_bits(regmap, 0x52010, BIT(20), BIT(20));
+ regmap_update_bits(regmap, 0x52010, BIT(21), BIT(21));
+
+ /* FORCE_MEM_CORE_ON for ufs phy ice core and gcc ufs phy axi clocks */
+ qcom_branch_set_force_mem_core(regmap, gcc_ufs_phy_ice_core_clk, true);
+ qcom_branch_set_force_mem_core(regmap, gcc_ufs_phy_axi_clk, true);
+
+ return qcom_cc_really_probe(&pdev->dev, &gcc_sm8750_desc, regmap);
+}
+
+static struct platform_driver gcc_sm8750_driver = {
+ .probe = gcc_sm8750_probe,
+ .driver = {
+ .name = "gcc-sm8750",
+ .of_match_table = gcc_sm8750_match_table,
+ },
+};
+
+static int __init gcc_sm8750_init(void)
+{
+ return platform_driver_register(&gcc_sm8750_driver);
+}
+subsys_initcall(gcc_sm8750_init);
+
+static void __exit gcc_sm8750_exit(void)
+{
+ platform_driver_unregister(&gcc_sm8750_driver);
+}
+module_exit(gcc_sm8750_exit);
+
+MODULE_DESCRIPTION("QTI GCC SM8750 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/gcc-x1e80100.c b/drivers/clk/qcom/gcc-x1e80100.c
index 0f578771071f..301fc9fc32d8 100644
--- a/drivers/clk/qcom/gcc-x1e80100.c
+++ b/drivers/clk/qcom/gcc-x1e80100.c
@@ -2564,19 +2564,6 @@ static struct clk_branch gcc_disp_hf_axi_clk = {
},
};
-static struct clk_branch gcc_disp_xo_clk = {
- .halt_reg = 0x27018,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x27018,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_disp_xo_clk",
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch gcc_gp1_clk = {
.halt_reg = 0x64000,
.halt_check = BRANCH_HALT,
@@ -2631,21 +2618,6 @@ static struct clk_branch gcc_gp3_clk = {
},
};
-static struct clk_branch gcc_gpu_cfg_ahb_clk = {
- .halt_reg = 0x71004,
- .halt_check = BRANCH_HALT_VOTED,
- .hwcg_reg = 0x71004,
- .hwcg_bit = 1,
- .clkr = {
- .enable_reg = 0x71004,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_gpu_cfg_ahb_clk",
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch gcc_gpu_gpll0_cph_clk_src = {
.halt_check = BRANCH_HALT_DELAY,
.clkr = {
@@ -3123,7 +3095,7 @@ static struct clk_branch gcc_pcie_3_pipe_clk = {
static struct clk_branch gcc_pcie_3_pipediv2_clk = {
.halt_reg = 0x58060,
- .halt_check = BRANCH_HALT_VOTED,
+ .halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x52020,
.enable_mask = BIT(5),
@@ -3248,7 +3220,7 @@ static struct clk_branch gcc_pcie_4_pipe_clk = {
static struct clk_branch gcc_pcie_4_pipediv2_clk = {
.halt_reg = 0x6b054,
- .halt_check = BRANCH_HALT_VOTED,
+ .halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x52010,
.enable_mask = BIT(27),
@@ -3373,7 +3345,7 @@ static struct clk_branch gcc_pcie_5_pipe_clk = {
static struct clk_branch gcc_pcie_5_pipediv2_clk = {
.halt_reg = 0x2f054,
- .halt_check = BRANCH_HALT_VOTED,
+ .halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x52018,
.enable_mask = BIT(19),
@@ -3511,7 +3483,7 @@ static struct clk_branch gcc_pcie_6a_pipe_clk = {
static struct clk_branch gcc_pcie_6a_pipediv2_clk = {
.halt_reg = 0x31060,
- .halt_check = BRANCH_HALT_VOTED,
+ .halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x52018,
.enable_mask = BIT(28),
@@ -3649,7 +3621,7 @@ static struct clk_branch gcc_pcie_6b_pipe_clk = {
static struct clk_branch gcc_pcie_6b_pipediv2_clk = {
.halt_reg = 0x8d060,
- .halt_check = BRANCH_HALT_VOTED,
+ .halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x52010,
.enable_mask = BIT(28),
@@ -6083,7 +6055,7 @@ static struct gdsc gcc_usb20_prim_gdsc = {
.pd = {
.name = "gcc_usb20_prim_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
@@ -6155,7 +6127,7 @@ static struct gdsc gcc_usb3_mp_ss1_phy_gdsc = {
.pd = {
.name = "gcc_usb3_mp_ss1_phy_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
@@ -6268,7 +6240,6 @@ static struct clk_regmap *gcc_x1e80100_clocks[] = {
[GCC_CNOC_PCIE_TUNNEL_CLK] = &gcc_cnoc_pcie_tunnel_clk.clkr,
[GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr,
[GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr,
- [GCC_DISP_XO_CLK] = &gcc_disp_xo_clk.clkr,
[GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
[GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
[GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
@@ -6281,7 +6252,6 @@ static struct clk_regmap *gcc_x1e80100_clocks[] = {
[GCC_GPLL7] = &gcc_gpll7.clkr,
[GCC_GPLL8] = &gcc_gpll8.clkr,
[GCC_GPLL9] = &gcc_gpll9.clkr,
- [GCC_GPU_CFG_AHB_CLK] = &gcc_gpu_cfg_ahb_clk.clkr,
[GCC_GPU_GPLL0_CPH_CLK_SRC] = &gcc_gpu_gpll0_cph_clk_src.clkr,
[GCC_GPU_GPLL0_DIV_CPH_CLK_SRC] = &gcc_gpu_gpll0_div_cph_clk_src.clkr,
[GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr,
@@ -6704,6 +6674,8 @@ static const struct qcom_reset_map gcc_x1e80100_resets[] = {
[GCC_USB_1_PHY_BCR] = { 0x2a020 },
[GCC_USB_2_PHY_BCR] = { 0xa3020 },
[GCC_VIDEO_BCR] = { 0x32000 },
+ [GCC_VIDEO_AXI0_CLK_ARES] = { .reg = 0x32018, .bit = 2, .udelay = 1000 },
+ [GCC_VIDEO_AXI1_CLK_ARES] = { .reg = 0x32024, .bit = 2, .udelay = 1000 },
};
static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = {
@@ -6783,6 +6755,10 @@ static int gcc_x1e80100_probe(struct platform_device *pdev)
/* Clear GDSC_SLEEP_ENA_VOTE to stop votes being auto-removed in sleep. */
regmap_write(regmap, 0x52224, 0x0);
+ /* FORCE_MEM_CORE_ON for ufs phy ice core and gcc ufs phy axi clocks */
+ qcom_branch_set_force_mem_core(regmap, gcc_ufs_phy_ice_core_clk, true);
+ qcom_branch_set_force_mem_core(regmap, gcc_ufs_phy_axi_clk, true);
+
return qcom_cc_really_probe(&pdev->dev, &gcc_x1e80100_desc, regmap);
}
diff --git a/drivers/clk/qcom/gdsc.c b/drivers/clk/qcom/gdsc.c
index fa5fe4c2a2ee..7deabf8400cf 100644
--- a/drivers/clk/qcom/gdsc.c
+++ b/drivers/clk/qcom/gdsc.c
@@ -46,7 +46,7 @@
#define RETAIN_MEM BIT(14)
#define RETAIN_PERIPH BIT(13)
-#define STATUS_POLL_TIMEOUT_US 1500
+#define STATUS_POLL_TIMEOUT_US 2000
#define TIMEOUT_US 500
#define domain_to_gdsc(domain) container_of(domain, struct gdsc, pd)
@@ -292,6 +292,9 @@ static int gdsc_enable(struct generic_pm_domain *domain)
*/
udelay(1);
+ if (sc->flags & RETAIN_FF_ENABLE)
+ gdsc_retain_ff_on(sc);
+
/* Turn on HW trigger mode if supported */
if (sc->flags & HW_CTRL) {
ret = gdsc_hwctrl(sc, true);
@@ -308,9 +311,6 @@ static int gdsc_enable(struct generic_pm_domain *domain)
udelay(1);
}
- if (sc->flags & RETAIN_FF_ENABLE)
- gdsc_retain_ff_on(sc);
-
return 0;
}
@@ -457,13 +457,6 @@ static int gdsc_init(struct gdsc *sc)
goto err_disable_supply;
}
- /* Turn on HW trigger mode if supported */
- if (sc->flags & HW_CTRL) {
- ret = gdsc_hwctrl(sc, true);
- if (ret < 0)
- goto err_disable_supply;
- }
-
/*
* Make sure the retain bit is set if the GDSC is already on,
* otherwise we end up turning off the GDSC and destroying all
@@ -471,6 +464,14 @@ static int gdsc_init(struct gdsc *sc)
*/
if (sc->flags & RETAIN_FF_ENABLE)
gdsc_retain_ff_on(sc);
+
+ /* Turn on HW trigger mode if supported */
+ if (sc->flags & HW_CTRL) {
+ ret = gdsc_hwctrl(sc, true);
+ if (ret < 0)
+ goto err_disable_supply;
+ }
+
} else if (sc->flags & ALWAYS_ON) {
/* If ALWAYS_ON GDSCs are not ON, turn them ON */
gdsc_enable(&sc->pd);
@@ -506,6 +507,55 @@ err_disable_supply:
return ret;
}
+static int gdsc_add_subdomain_list(struct dev_pm_domain_list *pd_list,
+ struct generic_pm_domain *subdomain)
+{
+ int i, ret;
+
+ for (i = 0; i < pd_list->num_pds; i++) {
+ struct device *dev = pd_list->pd_devs[i];
+ struct generic_pm_domain *genpd = pd_to_genpd(dev->pm_domain);
+
+ ret = pm_genpd_add_subdomain(genpd, subdomain);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void gdsc_remove_subdomain_list(struct dev_pm_domain_list *pd_list,
+ struct generic_pm_domain *subdomain)
+{
+ int i;
+
+ for (i = 0; i < pd_list->num_pds; i++) {
+ struct device *dev = pd_list->pd_devs[i];
+ struct generic_pm_domain *genpd = pd_to_genpd(dev->pm_domain);
+
+ pm_genpd_remove_subdomain(genpd, subdomain);
+ }
+}
+
+static void gdsc_pm_subdomain_remove(struct gdsc_desc *desc, size_t num)
+{
+ struct device *dev = desc->dev;
+ struct gdsc **scs = desc->scs;
+ int i;
+
+ /* Remove subdomains */
+ for (i = num - 1; i >= 0; i--) {
+ if (!scs[i])
+ continue;
+ if (scs[i]->parent)
+ pm_genpd_remove_subdomain(scs[i]->parent, &scs[i]->pd);
+ else if (!IS_ERR_OR_NULL(dev->pm_domain))
+ pm_genpd_remove_subdomain(pd_to_genpd(dev->pm_domain), &scs[i]->pd);
+ else if (desc->pd_list)
+ gdsc_remove_subdomain_list(desc->pd_list, &scs[i]->pd);
+ }
+}
+
int gdsc_register(struct gdsc_desc *desc,
struct reset_controller_dev *rcdev, struct regmap *regmap)
{
@@ -555,30 +605,30 @@ int gdsc_register(struct gdsc_desc *desc,
if (!scs[i])
continue;
if (scs[i]->parent)
- pm_genpd_add_subdomain(scs[i]->parent, &scs[i]->pd);
+ ret = pm_genpd_add_subdomain(scs[i]->parent, &scs[i]->pd);
else if (!IS_ERR_OR_NULL(dev->pm_domain))
- pm_genpd_add_subdomain(pd_to_genpd(dev->pm_domain), &scs[i]->pd);
+ ret = pm_genpd_add_subdomain(pd_to_genpd(dev->pm_domain), &scs[i]->pd);
+ else if (desc->pd_list)
+ ret = gdsc_add_subdomain_list(desc->pd_list, &scs[i]->pd);
+
+ if (ret)
+ goto err_pm_subdomain_remove;
}
return of_genpd_add_provider_onecell(dev->of_node, data);
+
+err_pm_subdomain_remove:
+ gdsc_pm_subdomain_remove(desc, i);
+
+ return ret;
}
void gdsc_unregister(struct gdsc_desc *desc)
{
- int i;
struct device *dev = desc->dev;
- struct gdsc **scs = desc->scs;
size_t num = desc->num;
- /* Remove subdomains */
- for (i = 0; i < num; i++) {
- if (!scs[i])
- continue;
- if (scs[i]->parent)
- pm_genpd_remove_subdomain(scs[i]->parent, &scs[i]->pd);
- else if (!IS_ERR_OR_NULL(dev->pm_domain))
- pm_genpd_remove_subdomain(pd_to_genpd(dev->pm_domain), &scs[i]->pd);
- }
+ gdsc_pm_subdomain_remove(desc, num);
of_genpd_del_provider(dev->of_node);
}
diff --git a/drivers/clk/qcom/gdsc.h b/drivers/clk/qcom/gdsc.h
index 1e2779b823d1..dd843e86c05b 100644
--- a/drivers/clk/qcom/gdsc.h
+++ b/drivers/clk/qcom/gdsc.h
@@ -80,6 +80,7 @@ struct gdsc_desc {
struct device *dev;
struct gdsc **scs;
size_t num;
+ struct dev_pm_domain_list *pd_list;
};
#ifdef CONFIG_QCOM_GDSC
diff --git a/drivers/clk/qcom/gpucc-milos.c b/drivers/clk/qcom/gpucc-milos.c
new file mode 100644
index 000000000000..4ee09879156e
--- /dev/null
+++ b/drivers/clk/qcom/gpucc-milos.c
@@ -0,0 +1,562 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2025, Luca Weiss <luca.weiss@fairphone.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,milos-gpucc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+/* Need to match the order of clocks in DT binding */
+enum {
+ DT_BI_TCXO,
+ DT_GPLL0_OUT_MAIN,
+ DT_GPLL0_OUT_MAIN_DIV,
+};
+
+enum {
+ P_BI_TCXO,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL0_OUT_MAIN_DIV,
+ P_GPU_CC_PLL0_OUT_EVEN,
+ P_GPU_CC_PLL0_OUT_MAIN,
+ P_GPU_CC_PLL0_OUT_ODD,
+};
+
+static const struct pll_vco lucid_ole_vco[] = {
+ { 249600000, 2300000000, 0 },
+};
+
+/* 700.0 MHz Configuration */
+static const struct alpha_pll_config gpu_cc_pll0_config = {
+ .l = 0x24,
+ .alpha = 0x7555,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll gpu_cc_pll0 = {
+ .offset = 0x0,
+ .config = &gpu_cc_pll0_config,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gpu_cc_pll0_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpu_cc_pll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_gpu_cc_pll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpu_cc_pll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_pll0_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_pll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+ },
+};
+
+static const struct parent_map gpu_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_GPLL0_OUT_MAIN },
+ { .index = DT_GPLL0_OUT_MAIN_DIV },
+};
+
+static const struct parent_map gpu_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPU_CC_PLL0_OUT_MAIN, 1 },
+ { P_GPU_CC_PLL0_OUT_EVEN, 2 },
+ { P_GPU_CC_PLL0_OUT_ODD, 3 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpu_cc_pll0.clkr.hw },
+ { .hw = &gpu_cc_pll0_out_even.clkr.hw },
+ { .hw = &gpu_cc_pll0.clkr.hw },
+ { .index = DT_GPLL0_OUT_MAIN },
+ { .index = DT_GPLL0_OUT_MAIN_DIV },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_ff_clk_src[] = {
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_ff_clk_src = {
+ .cmd_rcgr = 0x9474,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_0,
+ .freq_tbl = ftbl_gpu_cc_ff_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_ff_clk_src",
+ .parent_data = gpu_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_gmu_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(350000000, P_GPU_CC_PLL0_OUT_EVEN, 1, 0, 0),
+ F(650000000, P_GPU_CC_PLL0_OUT_EVEN, 1, 0, 0),
+ F(687500000, P_GPU_CC_PLL0_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_gmu_clk_src = {
+ .cmd_rcgr = 0x9318,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_1,
+ .freq_tbl = ftbl_gpu_cc_gmu_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gmu_clk_src",
+ .parent_data = gpu_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_hub_clk_src[] = {
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ F(400000000, P_GPLL0_OUT_MAIN, 1.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_hub_clk_src = {
+ .cmd_rcgr = 0x93ec,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_1,
+ .freq_tbl = ftbl_gpu_cc_hub_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_hub_clk_src",
+ .parent_data = gpu_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_regmap_div gpu_cc_hub_div_clk_src = {
+ .reg = 0x942c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_hub_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch gpu_cc_ahb_clk = {
+ .halt_reg = 0x90bc,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x90bc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_hub_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_accu_shift_clk = {
+ .halt_reg = 0x910c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x910c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cx_accu_shift_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_ff_clk = {
+ .halt_reg = 0x90ec,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x90ec,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cx_ff_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_ff_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_gmu_clk = {
+ .halt_reg = 0x90d4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x90d4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cx_gmu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_gmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cxo_clk = {
+ .halt_reg = 0x90e4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x90e4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cxo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_dpm_clk = {
+ .halt_reg = 0x9110,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9110,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_dpm_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_freq_measure_clk = {
+ .halt_reg = 0x900c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x900c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_freq_measure_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_accu_shift_clk = {
+ .halt_reg = 0x9070,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x9070,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gx_accu_shift_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_acd_ahb_ff_clk = {
+ .halt_reg = 0x9068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gx_acd_ahb_ff_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_ff_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_gmu_clk = {
+ .halt_reg = 0x9060,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9060,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gx_gmu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_gmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_rcg_ahb_ff_clk = {
+ .halt_reg = 0x906c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x906c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gx_rcg_ahb_ff_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_ff_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hlos1_vote_gpu_smmu_clk = {
+ .halt_reg = 0x7000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_hlos1_vote_gpu_smmu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hub_aon_clk = {
+ .halt_reg = 0x93e8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x93e8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_hub_aon_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hub_cx_int_clk = {
+ .halt_reg = 0x90e8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x90e8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_hub_cx_int_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_memnoc_gfx_clk = {
+ .halt_reg = 0x90f4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x90f4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_memnoc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc gpu_cc_cx_gdsc = {
+ .gdscr = 0x9080,
+ .gds_hw_ctrl = 0x9094,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x8,
+ .pd = {
+ .name = "gpu_cc_cx_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct clk_regmap *gpu_cc_milos_clocks[] = {
+ [GPU_CC_AHB_CLK] = &gpu_cc_ahb_clk.clkr,
+ [GPU_CC_CX_ACCU_SHIFT_CLK] = &gpu_cc_cx_accu_shift_clk.clkr,
+ [GPU_CC_CX_FF_CLK] = &gpu_cc_cx_ff_clk.clkr,
+ [GPU_CC_CX_GMU_CLK] = &gpu_cc_cx_gmu_clk.clkr,
+ [GPU_CC_CXO_CLK] = &gpu_cc_cxo_clk.clkr,
+ [GPU_CC_DPM_CLK] = &gpu_cc_dpm_clk.clkr,
+ [GPU_CC_FF_CLK_SRC] = &gpu_cc_ff_clk_src.clkr,
+ [GPU_CC_FREQ_MEASURE_CLK] = &gpu_cc_freq_measure_clk.clkr,
+ [GPU_CC_GMU_CLK_SRC] = &gpu_cc_gmu_clk_src.clkr,
+ [GPU_CC_GX_ACCU_SHIFT_CLK] = &gpu_cc_gx_accu_shift_clk.clkr,
+ [GPU_CC_GX_ACD_AHB_FF_CLK] = &gpu_cc_gx_acd_ahb_ff_clk.clkr,
+ [GPU_CC_GX_GMU_CLK] = &gpu_cc_gx_gmu_clk.clkr,
+ [GPU_CC_GX_RCG_AHB_FF_CLK] = &gpu_cc_gx_rcg_ahb_ff_clk.clkr,
+ [GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK] = &gpu_cc_hlos1_vote_gpu_smmu_clk.clkr,
+ [GPU_CC_HUB_AON_CLK] = &gpu_cc_hub_aon_clk.clkr,
+ [GPU_CC_HUB_CLK_SRC] = &gpu_cc_hub_clk_src.clkr,
+ [GPU_CC_HUB_CX_INT_CLK] = &gpu_cc_hub_cx_int_clk.clkr,
+ [GPU_CC_HUB_DIV_CLK_SRC] = &gpu_cc_hub_div_clk_src.clkr,
+ [GPU_CC_MEMNOC_GFX_CLK] = &gpu_cc_memnoc_gfx_clk.clkr,
+ [GPU_CC_PLL0] = &gpu_cc_pll0.clkr,
+ [GPU_CC_PLL0_OUT_EVEN] = &gpu_cc_pll0_out_even.clkr,
+};
+
+static struct gdsc *gpu_cc_milos_gdscs[] = {
+ [GPU_CC_CX_GDSC] = &gpu_cc_cx_gdsc,
+};
+
+static const struct qcom_reset_map gpu_cc_milos_resets[] = {
+ [GPU_CC_CB_BCR] = { 0x93a0 },
+ [GPU_CC_CX_BCR] = { 0x907c },
+ [GPU_CC_FAST_HUB_BCR] = { 0x93e4 },
+ [GPU_CC_FF_BCR] = { 0x9470 },
+ [GPU_CC_GMU_BCR] = { 0x9314 },
+ [GPU_CC_GX_BCR] = { 0x905c },
+ [GPU_CC_RBCPR_BCR] = { 0x91e0 },
+ [GPU_CC_XO_BCR] = { 0x9000 },
+};
+
+static struct clk_alpha_pll *gpu_cc_milos_plls[] = {
+ &gpu_cc_pll0,
+};
+
+static u32 gpu_cc_milos_critical_cbcrs[] = {
+ 0x93a4, /* GPU_CC_CB_CLK */
+ 0x9008, /* GPU_CC_CXO_AON_CLK */
+ 0x9010, /* GPU_CC_DEMET_CLK */
+ 0x9064, /* GPU_CC_GX_AHB_FF_CLK */
+ 0x93a8, /* GPU_CC_RSCC_HUB_AON_CLK */
+ 0x9004, /* GPU_CC_RSCC_XO_AON_CLK */
+ 0x90cc, /* GPU_CC_SLEEP_CLK */
+};
+
+static const struct regmap_config gpu_cc_milos_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x95e8,
+ .fast_io = true,
+};
+
+static struct qcom_cc_driver_data gpu_cc_milos_driver_data = {
+ .alpha_plls = gpu_cc_milos_plls,
+ .num_alpha_plls = ARRAY_SIZE(gpu_cc_milos_plls),
+ .clk_cbcrs = gpu_cc_milos_critical_cbcrs,
+ .num_clk_cbcrs = ARRAY_SIZE(gpu_cc_milos_critical_cbcrs),
+};
+
+static const struct qcom_cc_desc gpu_cc_milos_desc = {
+ .config = &gpu_cc_milos_regmap_config,
+ .clks = gpu_cc_milos_clocks,
+ .num_clks = ARRAY_SIZE(gpu_cc_milos_clocks),
+ .resets = gpu_cc_milos_resets,
+ .num_resets = ARRAY_SIZE(gpu_cc_milos_resets),
+ .gdscs = gpu_cc_milos_gdscs,
+ .num_gdscs = ARRAY_SIZE(gpu_cc_milos_gdscs),
+ .use_rpm = true,
+ .driver_data = &gpu_cc_milos_driver_data,
+};
+
+static const struct of_device_id gpu_cc_milos_match_table[] = {
+ { .compatible = "qcom,milos-gpucc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gpu_cc_milos_match_table);
+
+static int gpu_cc_milos_probe(struct platform_device *pdev)
+{
+ return qcom_cc_probe(pdev, &gpu_cc_milos_desc);
+}
+
+static struct platform_driver gpu_cc_milos_driver = {
+ .probe = gpu_cc_milos_probe,
+ .driver = {
+ .name = "gpu_cc-milos",
+ .of_match_table = gpu_cc_milos_match_table,
+ },
+};
+
+module_platform_driver(gpu_cc_milos_driver);
+
+MODULE_DESCRIPTION("QTI GPU_CC Milos Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/gpucc-msm8998.c b/drivers/clk/qcom/gpucc-msm8998.c
index 9efeab2691ba..7fce70503141 100644
--- a/drivers/clk/qcom/gpucc-msm8998.c
+++ b/drivers/clk/qcom/gpucc-msm8998.c
@@ -7,11 +7,10 @@
#include <linux/bitops.h>
#include <linux/err.h>
#include <linux/platform_device.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/clk-provider.h>
#include <linux/regmap.h>
-#include <linux/reset-controller.h>
#include <dt-bindings/clock/qcom,gpucc-msm8998.h>
diff --git a/drivers/clk/qcom/gpucc-qcs615.c b/drivers/clk/qcom/gpucc-qcs615.c
new file mode 100644
index 000000000000..ec6739c08425
--- /dev/null
+++ b/drivers/clk/qcom/gpucc-qcs615.c
@@ -0,0 +1,531 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,qcs615-gpucc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO,
+ DT_GPLL0_OUT_MAIN,
+ DT_GPLL0_OUT_MAIN_DIV,
+};
+
+enum {
+ P_BI_TCXO,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL0_OUT_MAIN_DIV,
+ P_GPU_CC_PLL0_2X_CLK,
+ P_CRC_DIV_PLL0_OUT_AUX2,
+ P_GPU_CC_PLL0_OUT_MAIN,
+ P_GPU_CC_PLL1_OUT_AUX,
+ P_CRC_DIV_PLL1_OUT_AUX2,
+ P_GPU_CC_PLL1_OUT_MAIN,
+};
+
+static const struct pll_vco gpu_cc_pll0_vco[] = {
+ { 1000000000, 2100000000, 0 },
+};
+
+static struct pll_vco gpu_cc_pll1_vco[] = {
+ { 500000000, 1000000000, 2 },
+};
+
+/* 1020MHz configuration VCO - 0 */
+static struct alpha_pll_config gpu_cc_pll0_config = {
+ .l = 0x35,
+ .config_ctl_val = 0x4001055b,
+ .test_ctl_hi_val = 0x1,
+ .test_ctl_hi_mask = 0x1,
+ .alpha_hi = 0x20,
+ .alpha = 0x00,
+ .alpha_en_mask = BIT(24),
+ .vco_val = 0x0,
+ .vco_mask = GENMASK(21, 20),
+ .aux2_output_mask = BIT(2),
+};
+
+static struct clk_alpha_pll gpu_cc_pll0 = {
+ .offset = 0x0,
+ .config = &gpu_cc_pll0_config,
+ .vco_table = gpu_cc_pll0_vco,
+ .num_vco = ARRAY_SIZE(gpu_cc_pll0_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_slew_ops,
+ },
+ },
+};
+
+/* 930MHz configuration VCO - 2 */
+static struct alpha_pll_config gpu_cc_pll1_config = {
+ .l = 0x30,
+ .config_ctl_val = 0x4001055b,
+ .test_ctl_hi_val = 0x1,
+ .test_ctl_hi_mask = 0x1,
+ .alpha_hi = 0x70,
+ .alpha = 0x00,
+ .alpha_en_mask = BIT(24),
+ .vco_val = BIT(21),
+ .vco_mask = GENMASK(21, 20),
+ .aux2_output_mask = BIT(2),
+};
+
+static struct clk_alpha_pll gpu_cc_pll1 = {
+ .offset = 0x100,
+ .config = &gpu_cc_pll1_config,
+ .vco_table = gpu_cc_pll1_vco,
+ .num_vco = ARRAY_SIZE(gpu_cc_pll1_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_pll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_slew_ops,
+ },
+ }
+};
+
+/* Clock Ramp Controller */
+static struct clk_fixed_factor crc_div_pll0 = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "crc_div_pll0",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gpu_cc_pll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+/* Clock Ramp Controller */
+static struct clk_fixed_factor crc_div_pll1 = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "crc_div_pll1",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gpu_cc_pll1.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static const struct parent_map gpu_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPU_CC_PLL0_OUT_MAIN, 1 },
+ { P_GPU_CC_PLL1_OUT_MAIN, 3 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpu_cc_pll0.clkr.hw },
+ { .hw = &gpu_cc_pll1.clkr.hw },
+ { .index = DT_GPLL0_OUT_MAIN },
+ { .index = DT_GPLL0_OUT_MAIN_DIV },
+};
+
+static const struct parent_map gpu_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPU_CC_PLL0_2X_CLK, 1 },
+ { P_CRC_DIV_PLL0_OUT_AUX2, 2 },
+ { P_GPU_CC_PLL1_OUT_AUX, 3 },
+ { P_CRC_DIV_PLL1_OUT_AUX2, 4 },
+ { P_GPLL0_OUT_MAIN, 5 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpu_cc_pll0.clkr.hw },
+ { .hw = &crc_div_pll0.hw },
+ { .hw = &gpu_cc_pll1.clkr.hw },
+ { .hw = &crc_div_pll1.hw },
+ { .index = DT_GPLL0_OUT_MAIN },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_gmu_clk_src[] = {
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_gmu_clk_src = {
+ .cmd_rcgr = 0x1120,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_0,
+ .freq_tbl = ftbl_gpu_cc_gmu_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gmu_clk_src",
+ .parent_data = gpu_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_gx_gfx3d_clk_src[] = {
+ F(290000000, P_CRC_DIV_PLL1_OUT_AUX2, 1, 0, 0),
+ F(350000000, P_CRC_DIV_PLL1_OUT_AUX2, 1, 0, 0),
+ F(435000000, P_CRC_DIV_PLL1_OUT_AUX2, 1, 0, 0),
+ F(500000000, P_CRC_DIV_PLL0_OUT_AUX2, 1, 0, 0),
+ F(550000000, P_CRC_DIV_PLL0_OUT_AUX2, 1, 0, 0),
+ F(650000000, P_CRC_DIV_PLL0_OUT_AUX2, 1, 0, 0),
+ F(700000000, P_CRC_DIV_PLL0_OUT_AUX2, 1, 0, 0),
+ F(745000000, P_CRC_DIV_PLL0_OUT_AUX2, 1, 0, 0),
+ F(845000000, P_CRC_DIV_PLL0_OUT_AUX2, 1, 0, 0),
+ F(895000000, P_CRC_DIV_PLL0_OUT_AUX2, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_gx_gfx3d_clk_src = {
+ .cmd_rcgr = 0x101c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_1,
+ .freq_tbl = ftbl_gpu_cc_gx_gfx3d_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gx_gfx3d_clk_src",
+ .parent_data = gpu_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_branch gpu_cc_crc_ahb_clk = {
+ .halt_reg = 0x107c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x107c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_crc_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_gfx3d_clk = {
+ .halt_reg = 0x10a4,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x10a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cx_gfx3d_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_gx_gfx3d_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_gfx3d_slv_clk = {
+ .halt_reg = 0x10a8,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x10a8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cx_gfx3d_slv_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_gx_gfx3d_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_gmu_clk = {
+ .halt_reg = 0x1098,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1098,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cx_gmu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_gmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_snoc_dvm_clk = {
+ .halt_reg = 0x108c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x108c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cx_snoc_dvm_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cxo_aon_clk = {
+ .halt_reg = 0x1004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cxo_aon_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cxo_clk = {
+ .halt_reg = 0x109c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x109c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cxo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_gfx3d_clk = {
+ .halt_reg = 0x1054,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x1054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gx_gfx3d_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_gx_gfx3d_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_gmu_clk = {
+ .halt_reg = 0x1064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gx_gmu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_gmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hlos1_vote_gpu_smmu_clk = {
+ .halt_reg = 0x5000,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x5000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_hlos1_vote_gpu_smmu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_sleep_clk = {
+ .halt_reg = 0x1090,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1090,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_hw *gpu_cc_qcs615_hws[] = {
+ [CRC_DIV_PLL0] = &crc_div_pll0.hw,
+ [CRC_DIV_PLL1] = &crc_div_pll1.hw,
+};
+
+static struct gdsc cx_gdsc = {
+ .gdscr = 0x106c,
+ .gds_hw_ctrl = 0x1540,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x8,
+ .pd = {
+ .name = "cx_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc gx_gdsc = {
+ .gdscr = 0x100c,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gx_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct clk_regmap *gpu_cc_qcs615_clocks[] = {
+ [GPU_CC_CRC_AHB_CLK] = &gpu_cc_crc_ahb_clk.clkr,
+ [GPU_CC_CX_GFX3D_CLK] = &gpu_cc_cx_gfx3d_clk.clkr,
+ [GPU_CC_CX_GFX3D_SLV_CLK] = &gpu_cc_cx_gfx3d_slv_clk.clkr,
+ [GPU_CC_CX_GMU_CLK] = &gpu_cc_cx_gmu_clk.clkr,
+ [GPU_CC_CX_SNOC_DVM_CLK] = &gpu_cc_cx_snoc_dvm_clk.clkr,
+ [GPU_CC_CXO_AON_CLK] = &gpu_cc_cxo_aon_clk.clkr,
+ [GPU_CC_CXO_CLK] = &gpu_cc_cxo_clk.clkr,
+ [GPU_CC_GMU_CLK_SRC] = &gpu_cc_gmu_clk_src.clkr,
+ [GPU_CC_GX_GFX3D_CLK] = &gpu_cc_gx_gfx3d_clk.clkr,
+ [GPU_CC_GX_GFX3D_CLK_SRC] = &gpu_cc_gx_gfx3d_clk_src.clkr,
+ [GPU_CC_GX_GMU_CLK] = &gpu_cc_gx_gmu_clk.clkr,
+ [GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK] = &gpu_cc_hlos1_vote_gpu_smmu_clk.clkr,
+ [GPU_CC_PLL0] = &gpu_cc_pll0.clkr,
+ [GPU_CC_PLL1] = &gpu_cc_pll1.clkr,
+ [GPU_CC_SLEEP_CLK] = &gpu_cc_sleep_clk.clkr,
+};
+
+static struct gdsc *gpu_cc_qcs615_gdscs[] = {
+ [CX_GDSC] = &cx_gdsc,
+ [GX_GDSC] = &gx_gdsc,
+};
+
+static const struct qcom_reset_map gpu_cc_qcs615_resets[] = {
+ [GPU_CC_CX_BCR] = { 0x1068 },
+ [GPU_CC_GFX3D_AON_BCR] = { 0x10a0 },
+ [GPU_CC_GMU_BCR] = { 0x111c },
+ [GPU_CC_GX_BCR] = { 0x1008 },
+ [GPU_CC_XO_BCR] = { 0x1000 },
+};
+
+static struct clk_alpha_pll *gpu_cc_qcs615_plls[] = {
+ &gpu_cc_pll0,
+ &gpu_cc_pll1,
+};
+
+static u32 gpu_cc_qcs615_critical_cbcrs[] = {
+ 0x1078, /* GPU_CC_AHB_CLK */
+};
+
+static const struct regmap_config gpu_cc_qcs615_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x7008,
+ .fast_io = true,
+};
+
+static void clk_qcs615_regs_crc_configure(struct device *dev, struct regmap *regmap)
+{
+ /* Recommended WAKEUP/SLEEP settings for the gpu_cc_cx_gmu_clk */
+ regmap_update_bits(regmap, gpu_cc_cx_gmu_clk.clkr.enable_reg, 0xff0, 0xff0);
+
+ /*
+ * After POR, Clock Ramp Controller(CRC) will be in bypass mode.
+ * Software needs to do the following operation to enable the CRC
+ * for GFX3D clock and divide the input clock by div by 2.
+ */
+ regmap_update_bits(regmap, 0x1028, 0x00015011, 0x00015011);
+ regmap_update_bits(regmap, 0x1024, 0x00800000, 0x00800000);
+}
+
+static struct qcom_cc_driver_data gpu_cc_qcs615_driver_data = {
+ .alpha_plls = gpu_cc_qcs615_plls,
+ .num_alpha_plls = ARRAY_SIZE(gpu_cc_qcs615_plls),
+ .clk_cbcrs = gpu_cc_qcs615_critical_cbcrs,
+ .num_clk_cbcrs = ARRAY_SIZE(gpu_cc_qcs615_critical_cbcrs),
+ .clk_regs_configure = clk_qcs615_regs_crc_configure,
+};
+
+static const struct qcom_cc_desc gpu_cc_qcs615_desc = {
+ .config = &gpu_cc_qcs615_regmap_config,
+ .clks = gpu_cc_qcs615_clocks,
+ .num_clks = ARRAY_SIZE(gpu_cc_qcs615_clocks),
+ .clk_hws = gpu_cc_qcs615_hws,
+ .num_clk_hws = ARRAY_SIZE(gpu_cc_qcs615_hws),
+ .resets = gpu_cc_qcs615_resets,
+ .num_resets = ARRAY_SIZE(gpu_cc_qcs615_resets),
+ .gdscs = gpu_cc_qcs615_gdscs,
+ .num_gdscs = ARRAY_SIZE(gpu_cc_qcs615_gdscs),
+ .driver_data = &gpu_cc_qcs615_driver_data,
+};
+
+static const struct of_device_id gpu_cc_qcs615_match_table[] = {
+ { .compatible = "qcom,qcs615-gpucc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gpu_cc_qcs615_match_table);
+
+static int gpu_cc_qcs615_probe(struct platform_device *pdev)
+{
+ return qcom_cc_probe(pdev, &gpu_cc_qcs615_desc);
+}
+
+static struct platform_driver gpu_cc_qcs615_driver = {
+ .probe = gpu_cc_qcs615_probe,
+ .driver = {
+ .name = "gpucc-qcs615",
+ .of_match_table = gpu_cc_qcs615_match_table,
+ },
+};
+
+module_platform_driver(gpu_cc_qcs615_driver);
+
+MODULE_DESCRIPTION("QTI GPUCC QCS615 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/gpucc-sa8775p.c b/drivers/clk/qcom/gpucc-sa8775p.c
index f8a8ac343d70..25dcc5912f99 100644
--- a/drivers/clk/qcom/gpucc-sa8775p.c
+++ b/drivers/clk/qcom/gpucc-sa8775p.c
@@ -12,7 +12,7 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
-#include <dt-bindings/clock/qcom,sa8775p-gpucc.h>
+#include <dt-bindings/clock/qcom,qcs8300-gpucc.h>
#include "clk-alpha-pll.h"
#include "clk-branch.h"
@@ -317,6 +317,24 @@ static struct clk_branch gpu_cc_crc_ahb_clk = {
},
};
+static struct clk_branch gpu_cc_cx_accu_shift_clk = {
+ .halt_reg = 0x95e8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x95e8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gpu_cc_cx_accu_shift_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gpu_cc_cx_ff_clk = {
.halt_reg = 0x914c,
.halt_check = BRANCH_HALT,
@@ -347,7 +365,7 @@ static struct clk_branch gpu_cc_cx_gmu_clk = {
&gpu_cc_gmu_clk_src.clkr.hw,
},
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_aon_ops,
},
},
@@ -396,7 +414,7 @@ static struct clk_branch gpu_cc_cxo_clk = {
&gpu_cc_xo_clk_src.clkr.hw,
},
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -420,6 +438,24 @@ static struct clk_branch gpu_cc_demet_clk = {
},
};
+static struct clk_branch gpu_cc_gx_accu_shift_clk = {
+ .halt_reg = 0x95e4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x95e4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gpu_cc_gx_accu_shift_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gpu_cc_hlos1_vote_gpu_smmu_clk = {
.halt_reg = 0x7000,
.halt_check = BRANCH_HALT_VOTED,
@@ -463,7 +499,7 @@ static struct clk_branch gpu_cc_hub_cx_int_clk = {
&gpu_cc_hub_cx_int_div_clk_src.clkr.hw,
},
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_aon_ops,
},
},
@@ -499,6 +535,7 @@ static struct clk_regmap *gpu_cc_sa8775p_clocks[] = {
[GPU_CC_AHB_CLK] = &gpu_cc_ahb_clk.clkr,
[GPU_CC_CB_CLK] = &gpu_cc_cb_clk.clkr,
[GPU_CC_CRC_AHB_CLK] = &gpu_cc_crc_ahb_clk.clkr,
+ [GPU_CC_CX_ACCU_SHIFT_CLK] = NULL,
[GPU_CC_CX_FF_CLK] = &gpu_cc_cx_ff_clk.clkr,
[GPU_CC_CX_GMU_CLK] = &gpu_cc_cx_gmu_clk.clkr,
[GPU_CC_CX_SNOC_DVM_CLK] = &gpu_cc_cx_snoc_dvm_clk.clkr,
@@ -508,6 +545,7 @@ static struct clk_regmap *gpu_cc_sa8775p_clocks[] = {
[GPU_CC_DEMET_DIV_CLK_SRC] = &gpu_cc_demet_div_clk_src.clkr,
[GPU_CC_FF_CLK_SRC] = &gpu_cc_ff_clk_src.clkr,
[GPU_CC_GMU_CLK_SRC] = &gpu_cc_gmu_clk_src.clkr,
+ [GPU_CC_GX_ACCU_SHIFT_CLK] = NULL,
[GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK] = &gpu_cc_hlos1_vote_gpu_smmu_clk.clkr,
[GPU_CC_HUB_AHB_DIV_CLK_SRC] = &gpu_cc_hub_ahb_div_clk_src.clkr,
[GPU_CC_HUB_AON_CLK] = &gpu_cc_hub_aon_clk.clkr,
@@ -583,6 +621,7 @@ static const struct qcom_cc_desc gpu_cc_sa8775p_desc = {
};
static const struct of_device_id gpu_cc_sa8775p_match_table[] = {
+ { .compatible = "qcom,qcs8300-gpucc" },
{ .compatible = "qcom,sa8775p-gpucc" },
{ }
};
@@ -596,6 +635,14 @@ static int gpu_cc_sa8775p_probe(struct platform_device *pdev)
if (IS_ERR(regmap))
return PTR_ERR(regmap);
+ if (of_device_is_compatible(pdev->dev.of_node, "qcom,qcs8300-gpucc")) {
+ gpu_cc_pll0_config.l = 0x31;
+ gpu_cc_pll0_config.alpha = 0xe555;
+
+ gpu_cc_sa8775p_clocks[GPU_CC_CX_ACCU_SHIFT_CLK] = &gpu_cc_cx_accu_shift_clk.clkr;
+ gpu_cc_sa8775p_clocks[GPU_CC_GX_ACCU_SHIFT_CLK] = &gpu_cc_gx_accu_shift_clk.clkr;
+ }
+
clk_lucid_evo_pll_configure(&gpu_cc_pll0, regmap, &gpu_cc_pll0_config);
clk_lucid_evo_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll1_config);
diff --git a/drivers/clk/qcom/gpucc-sar2130p.c b/drivers/clk/qcom/gpucc-sar2130p.c
new file mode 100644
index 000000000000..c2903179ac85
--- /dev/null
+++ b/drivers/clk/qcom/gpucc-sar2130p.c
@@ -0,0 +1,503 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2024, Linaro Limited
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sar2130p-gpucc.h>
+#include <dt-bindings/reset/qcom,sar2130p-gpucc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO,
+ DT_GPLL0_OUT_MAIN,
+ DT_GPLL0_OUT_MAIN_DIV,
+};
+
+enum {
+ P_BI_TCXO,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL0_OUT_MAIN_DIV,
+ P_GPU_CC_PLL0_OUT_MAIN,
+ P_GPU_CC_PLL1_OUT_MAIN,
+};
+
+static const struct pll_vco lucid_ole_vco[] = {
+ { 249600000, 2000000000, 0 },
+};
+
+/* 470MHz Configuration */
+static const struct alpha_pll_config gpu_cc_pll0_config = {
+ .l = 0x18,
+ .alpha = 0x7aaa,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll gpu_cc_pll0 = {
+ .offset = 0x0,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+/* 440MHz Configuration */
+static const struct alpha_pll_config gpu_cc_pll1_config = {
+ .l = 0x16,
+ .alpha = 0xeaaa,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll gpu_cc_pll1 = {
+ .offset = 0x1000,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_pll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct parent_map gpu_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_GPLL0_OUT_MAIN },
+ { .index = DT_GPLL0_OUT_MAIN_DIV },
+};
+
+static const struct parent_map gpu_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPU_CC_PLL0_OUT_MAIN, 1 },
+ { P_GPU_CC_PLL1_OUT_MAIN, 3 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpu_cc_pll0.clkr.hw },
+ { .hw = &gpu_cc_pll1.clkr.hw },
+ { .index = DT_GPLL0_OUT_MAIN },
+ { .index = DT_GPLL0_OUT_MAIN_DIV },
+};
+
+static const struct parent_map gpu_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPU_CC_PLL1_OUT_MAIN, 3 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpu_cc_pll1.clkr.hw },
+ { .index = DT_GPLL0_OUT_MAIN },
+ { .index = DT_GPLL0_OUT_MAIN_DIV },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_ff_clk_src[] = {
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_ff_clk_src = {
+ .cmd_rcgr = 0x9474,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_0,
+ .freq_tbl = ftbl_gpu_cc_ff_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_ff_clk_src",
+ .parent_data = gpu_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_gmu_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(220000000, P_GPU_CC_PLL1_OUT_MAIN, 2, 0, 0),
+ F(550000000, P_GPU_CC_PLL1_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_gmu_clk_src = {
+ .cmd_rcgr = 0x9318,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_1,
+ .freq_tbl = ftbl_gpu_cc_gmu_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gmu_clk_src",
+ .parent_data = gpu_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gpu_cc_hub_clk_src = {
+ .cmd_rcgr = 0x93ec,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_2,
+ .freq_tbl = ftbl_gpu_cc_ff_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_hub_clk_src",
+ .parent_data = gpu_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_branch gpu_cc_ahb_clk = {
+ .halt_reg = 0x911c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x911c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_crc_ahb_clk = {
+ .halt_reg = 0x9120,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x9120,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_crc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_ff_clk = {
+ .halt_reg = 0x914c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x914c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cx_ff_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_ff_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_gmu_clk = {
+ .halt_reg = 0x913c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x913c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cx_gmu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_gmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cxo_aon_clk = {
+ .halt_reg = 0x9004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x9004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cxo_aon_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cxo_clk = {
+ .halt_reg = 0x9144,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9144,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cxo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_gmu_clk = {
+ .halt_reg = 0x90bc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x90bc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gx_gmu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_gmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hub_aon_clk = {
+ .halt_reg = 0x93e8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x93e8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_hub_aon_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hub_cx_int_clk = {
+ .halt_reg = 0x9148,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x9148,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_hub_cx_int_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_memnoc_gfx_clk = {
+ .halt_reg = 0x9150,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x9150,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_memnoc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hlos1_vote_gpu_smmu_clk = {
+ .halt_reg = 0x7000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_hlos1_vote_gpu_smmu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_sleep_clk = {
+ .halt_reg = 0x9134,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x9134,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc gpu_cx_gdsc = {
+ .gdscr = 0x9108,
+ .gds_hw_ctrl = 0x953c,
+ .clk_dis_wait_val = 8,
+ .pd = {
+ .name = "gpu_cx_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gpu_gx_gdsc = {
+ .gdscr = 0x905c,
+ .clamp_io_ctrl = 0x9504,
+ .resets = (unsigned int []){ GPUCC_GPU_CC_GX_BCR,
+ GPUCC_GPU_CC_ACD_BCR,
+ GPUCC_GPU_CC_GX_ACD_IROOT_BCR },
+ .reset_count = 3,
+ .pd = {
+ .name = "gpu_gx_gdsc",
+ .power_on = gdsc_gx_do_nothing_enable,
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = CLAMP_IO | AON_RESET | SW_RESET,
+};
+
+static struct clk_regmap *gpu_cc_sar2130p_clocks[] = {
+ [GPU_CC_AHB_CLK] = &gpu_cc_ahb_clk.clkr,
+ [GPU_CC_CRC_AHB_CLK] = &gpu_cc_crc_ahb_clk.clkr,
+ [GPU_CC_CX_FF_CLK] = &gpu_cc_cx_ff_clk.clkr,
+ [GPU_CC_CX_GMU_CLK] = &gpu_cc_cx_gmu_clk.clkr,
+ [GPU_CC_CXO_AON_CLK] = &gpu_cc_cxo_aon_clk.clkr,
+ [GPU_CC_CXO_CLK] = &gpu_cc_cxo_clk.clkr,
+ [GPU_CC_FF_CLK_SRC] = &gpu_cc_ff_clk_src.clkr,
+ [GPU_CC_GMU_CLK_SRC] = &gpu_cc_gmu_clk_src.clkr,
+ [GPU_CC_GX_GMU_CLK] = &gpu_cc_gx_gmu_clk.clkr,
+ [GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK] = &gpu_cc_hlos1_vote_gpu_smmu_clk.clkr,
+ [GPU_CC_HUB_AON_CLK] = &gpu_cc_hub_aon_clk.clkr,
+ [GPU_CC_HUB_CLK_SRC] = &gpu_cc_hub_clk_src.clkr,
+ [GPU_CC_HUB_CX_INT_CLK] = &gpu_cc_hub_cx_int_clk.clkr,
+ [GPU_CC_MEMNOC_GFX_CLK] = &gpu_cc_memnoc_gfx_clk.clkr,
+ [GPU_CC_PLL0] = &gpu_cc_pll0.clkr,
+ [GPU_CC_PLL1] = &gpu_cc_pll1.clkr,
+ [GPU_CC_SLEEP_CLK] = &gpu_cc_sleep_clk.clkr,
+};
+
+static const struct qcom_reset_map gpu_cc_sar2130p_resets[] = {
+ [GPUCC_GPU_CC_ACD_BCR] = { 0x9358 },
+ [GPUCC_GPU_CC_GX_ACD_IROOT_BCR] = { 0x958c },
+ [GPUCC_GPU_CC_GX_BCR] = { 0x9058 },
+};
+
+static struct gdsc *gpu_cc_sar2130p_gdscs[] = {
+ [GPU_CX_GDSC] = &gpu_cx_gdsc,
+ [GPU_GX_GDSC] = &gpu_gx_gdsc,
+};
+
+static const struct regmap_config gpu_cc_sar2130p_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xa000,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gpu_cc_sar2130p_desc = {
+ .config = &gpu_cc_sar2130p_regmap_config,
+ .clks = gpu_cc_sar2130p_clocks,
+ .num_clks = ARRAY_SIZE(gpu_cc_sar2130p_clocks),
+ .resets = gpu_cc_sar2130p_resets,
+ .num_resets = ARRAY_SIZE(gpu_cc_sar2130p_resets),
+ .gdscs = gpu_cc_sar2130p_gdscs,
+ .num_gdscs = ARRAY_SIZE(gpu_cc_sar2130p_gdscs),
+};
+
+static const struct of_device_id gpu_cc_sar2130p_match_table[] = {
+ { .compatible = "qcom,sar2130p-gpucc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gpu_cc_sar2130p_match_table);
+
+static int gpu_cc_sar2130p_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &gpu_cc_sar2130p_desc);
+ if (IS_ERR(regmap))
+ return dev_err_probe(dev, PTR_ERR(regmap), "Couldn't map GPU_CC\n");
+
+ clk_lucid_ole_pll_configure(&gpu_cc_pll0, regmap, &gpu_cc_pll0_config);
+ clk_lucid_ole_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll1_config);
+
+ /* Keep some clocks always-on */
+ qcom_branch_set_clk_en(regmap, 0x900c); /* GPU_CC_DEMET_CLK */
+
+ return qcom_cc_really_probe(dev, &gpu_cc_sar2130p_desc, regmap);
+}
+
+static struct platform_driver gpu_cc_sar2130p_driver = {
+ .probe = gpu_cc_sar2130p_probe,
+ .driver = {
+ .name = "gpu_cc-sar2130p",
+ .of_match_table = gpu_cc_sar2130p_match_table,
+ },
+};
+module_platform_driver(gpu_cc_sar2130p_driver);
+
+MODULE_DESCRIPTION("QTI GPU_CC SAR2130P Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/gpucc-sc7180.c b/drivers/clk/qcom/gpucc-sc7180.c
index 08f3983d016f..97287488e05a 100644
--- a/drivers/clk/qcom/gpucc-sc7180.c
+++ b/drivers/clk/qcom/gpucc-sc7180.c
@@ -4,6 +4,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -41,7 +42,7 @@ static struct clk_alpha_pll gpu_cc_pll1 = {
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "gpu_cc_pll1",
- .parent_data = &(const struct clk_parent_data){
+ .parent_data = &(const struct clk_parent_data){
.fw_name = "bi_tcxo",
},
.num_parents = 1,
diff --git a/drivers/clk/qcom/gpucc-sc7280.c b/drivers/clk/qcom/gpucc-sc7280.c
index bd699a624517..f81289fa719d 100644
--- a/drivers/clk/qcom/gpucc-sc7280.c
+++ b/drivers/clk/qcom/gpucc-sc7280.c
@@ -5,6 +5,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/clk/qcom/gpucc-sc8280xp.c b/drivers/clk/qcom/gpucc-sc8280xp.c
index c96be61e3f47..2645612f1cac 100644
--- a/drivers/clk/qcom/gpucc-sc8280xp.c
+++ b/drivers/clk/qcom/gpucc-sc8280xp.c
@@ -5,6 +5,7 @@
#include <linux/clk-provider.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -415,7 +416,7 @@ static const struct regmap_config gpu_cc_sc8280xp_regmap_config = {
.fast_io = true,
};
-static struct qcom_cc_desc gpu_cc_sc8280xp_desc = {
+static const struct qcom_cc_desc gpu_cc_sc8280xp_desc = {
.config = &gpu_cc_sc8280xp_regmap_config,
.clks = gpu_cc_sc8280xp_clocks,
.num_clks = ARRAY_SIZE(gpu_cc_sc8280xp_clocks),
diff --git a/drivers/clk/qcom/gpucc-sdm660.c b/drivers/clk/qcom/gpucc-sdm660.c
index 3ae1b80e38d9..28db307b6717 100644
--- a/drivers/clk/qcom/gpucc-sdm660.c
+++ b/drivers/clk/qcom/gpucc-sdm660.c
@@ -6,15 +6,14 @@
*/
#include <linux/bitops.h>
-#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/of.h>
#include <linux/regmap.h>
-#include <linux/reset-controller.h>
+
#include <dt-bindings/clock/qcom,gpucc-sdm660.h>
#include "clk-alpha-pll.h"
diff --git a/drivers/clk/qcom/gpucc-sdm845.c b/drivers/clk/qcom/gpucc-sdm845.c
index ef26690cf504..0d63b110a1fb 100644
--- a/drivers/clk/qcom/gpucc-sdm845.c
+++ b/drivers/clk/qcom/gpucc-sdm845.c
@@ -4,6 +4,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/clk/qcom/gpucc-sm4450.c b/drivers/clk/qcom/gpucc-sm4450.c
index a14d0bb031ac..34c7ba0c7d55 100644
--- a/drivers/clk/qcom/gpucc-sm4450.c
+++ b/drivers/clk/qcom/gpucc-sm4450.c
@@ -6,7 +6,6 @@
#include <linux/clk-provider.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
-#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/clk/qcom/gpucc-sm6350.c b/drivers/clk/qcom/gpucc-sm6350.c
index 1e12ad8948db..efbee1518dd3 100644
--- a/drivers/clk/qcom/gpucc-sm6350.c
+++ b/drivers/clk/qcom/gpucc-sm6350.c
@@ -5,6 +5,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -66,7 +67,7 @@ static struct clk_alpha_pll gpu_cc_pll0 = {
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "gpu_cc_pll0",
- .parent_data = &(const struct clk_parent_data){
+ .parent_data = &(const struct clk_parent_data){
.index = DT_BI_TCXO,
.fw_name = "bi_tcxo",
},
@@ -110,7 +111,7 @@ static struct clk_alpha_pll gpu_cc_pll1 = {
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "gpu_cc_pll1",
- .parent_data = &(const struct clk_parent_data){
+ .parent_data = &(const struct clk_parent_data){
.index = DT_BI_TCXO,
.fw_name = "bi_tcxo",
},
@@ -412,6 +413,9 @@ static struct clk_branch gpu_cc_gx_vsense_clk = {
static struct gdsc gpu_cx_gdsc = {
.gdscr = 0x106c,
.gds_hw_ctrl = 0x1540,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x8,
.pd = {
.name = "gpu_cx_gdsc",
},
@@ -422,6 +426,9 @@ static struct gdsc gpu_cx_gdsc = {
static struct gdsc gpu_gx_gdsc = {
.gdscr = 0x100c,
.clamp_io_ctrl = 0x1508,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
.pd = {
.name = "gpu_gx_gdsc",
.power_on = gdsc_gx_do_nothing_enable,
diff --git a/drivers/clk/qcom/gpucc-sm8150.c b/drivers/clk/qcom/gpucc-sm8150.c
index d711464a71b6..5701031c17f3 100644
--- a/drivers/clk/qcom/gpucc-sm8150.c
+++ b/drivers/clk/qcom/gpucc-sm8150.c
@@ -4,6 +4,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -52,7 +53,7 @@ static struct clk_alpha_pll gpu_cc_pll1 = {
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "gpu_cc_pll1",
- .parent_data = &(const struct clk_parent_data){
+ .parent_data = &(const struct clk_parent_data){
.fw_name = "bi_tcxo",
},
.num_parents = 1,
diff --git a/drivers/clk/qcom/gpucc-sm8250.c b/drivers/clk/qcom/gpucc-sm8250.c
index 113b486a6d2f..eee3208640cd 100644
--- a/drivers/clk/qcom/gpucc-sm8250.c
+++ b/drivers/clk/qcom/gpucc-sm8250.c
@@ -4,6 +4,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -55,7 +56,7 @@ static struct clk_alpha_pll gpu_cc_pll1 = {
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "gpu_cc_pll1",
- .parent_data = &(const struct clk_parent_data){
+ .parent_data = &(const struct clk_parent_data){
.fw_name = "bi_tcxo",
},
.num_parents = 1,
diff --git a/drivers/clk/qcom/gpucc-sm8350.c b/drivers/clk/qcom/gpucc-sm8350.c
index f3b6bdc24485..4025dab0a1ca 100644
--- a/drivers/clk/qcom/gpucc-sm8350.c
+++ b/drivers/clk/qcom/gpucc-sm8350.c
@@ -8,8 +8,8 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/clk/qcom/gpucc-sm8450.c b/drivers/clk/qcom/gpucc-sm8450.c
index b3c5d6923cd2..059df72deaa1 100644
--- a/drivers/clk/qcom/gpucc-sm8450.c
+++ b/drivers/clk/qcom/gpucc-sm8450.c
@@ -40,7 +40,7 @@ static const struct pll_vco lucid_evo_vco[] = {
{ 249600000, 2000000000, 0 },
};
-static struct alpha_pll_config gpu_cc_pll0_config = {
+static const struct alpha_pll_config gpu_cc_pll0_config = {
.l = 0x1d,
.alpha = 0xb000,
.config_ctl_val = 0x20485699,
@@ -50,6 +50,20 @@ static struct alpha_pll_config gpu_cc_pll0_config = {
.user_ctl_hi_val = 0x00000805,
};
+static const struct alpha_pll_config sm8475_gpu_cc_pll0_config = {
+ .l = 0x1d,
+ .alpha = 0xb000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000005,
+};
+
static struct clk_alpha_pll gpu_cc_pll0 = {
.offset = 0x0,
.vco_table = lucid_evo_vco,
@@ -67,7 +81,7 @@ static struct clk_alpha_pll gpu_cc_pll0 = {
},
};
-static struct alpha_pll_config gpu_cc_pll1_config = {
+static const struct alpha_pll_config gpu_cc_pll1_config = {
.l = 0x34,
.alpha = 0x1555,
.config_ctl_val = 0x20485699,
@@ -77,6 +91,20 @@ static struct alpha_pll_config gpu_cc_pll1_config = {
.user_ctl_hi_val = 0x00000805,
};
+static const struct alpha_pll_config sm8475_gpu_cc_pll1_config = {
+ .l = 0x34,
+ .alpha = 0x1555,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000005,
+};
+
static struct clk_alpha_pll gpu_cc_pll1 = {
.offset = 0x1000,
.vco_table = lucid_evo_vco,
@@ -736,6 +764,7 @@ static const struct qcom_cc_desc gpu_cc_sm8450_desc = {
static const struct of_device_id gpu_cc_sm8450_match_table[] = {
{ .compatible = "qcom,sm8450-gpucc" },
+ { .compatible = "qcom,sm8475-gpucc" },
{ }
};
MODULE_DEVICE_TABLE(of, gpu_cc_sm8450_match_table);
@@ -748,8 +777,19 @@ static int gpu_cc_sm8450_probe(struct platform_device *pdev)
if (IS_ERR(regmap))
return PTR_ERR(regmap);
- clk_lucid_evo_pll_configure(&gpu_cc_pll0, regmap, &gpu_cc_pll0_config);
- clk_lucid_evo_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll1_config);
+ if (of_device_is_compatible(pdev->dev.of_node, "qcom,sm8475-gpucc")) {
+ /* Update GPUCC PLL0 */
+ gpu_cc_pll0.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE];
+
+ /* Update GPUCC PLL1 */
+ gpu_cc_pll1.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE];
+
+ clk_lucid_ole_pll_configure(&gpu_cc_pll0, regmap, &sm8475_gpu_cc_pll0_config);
+ clk_lucid_ole_pll_configure(&gpu_cc_pll1, regmap, &sm8475_gpu_cc_pll1_config);
+ } else {
+ clk_lucid_evo_pll_configure(&gpu_cc_pll0, regmap, &gpu_cc_pll0_config);
+ clk_lucid_evo_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll1_config);
+ }
return qcom_cc_really_probe(&pdev->dev, &gpu_cc_sm8450_desc, regmap);
}
@@ -763,5 +803,5 @@ static struct platform_driver gpu_cc_sm8450_driver = {
};
module_platform_driver(gpu_cc_sm8450_driver);
-MODULE_DESCRIPTION("QTI GPU_CC SM8450 Driver");
+MODULE_DESCRIPTION("QTI GPU_CC SM8450 / SM8475 Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/gpucc-x1p42100.c b/drivers/clk/qcom/gpucc-x1p42100.c
new file mode 100644
index 000000000000..4031d3ff560a
--- /dev/null
+++ b/drivers/clk/qcom/gpucc-x1p42100.c
@@ -0,0 +1,587 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,x1e80100-gpucc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO,
+ DT_GPLL0_OUT_MAIN,
+ DT_GPLL0_OUT_MAIN_DIV,
+};
+
+enum {
+ P_BI_TCXO,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL0_OUT_MAIN_DIV,
+ P_GPU_CC_PLL0_OUT_MAIN,
+ P_GPU_CC_PLL1_OUT_MAIN,
+};
+
+static const struct pll_vco lucid_ole_vco[] = {
+ { 249600000, 2300000000, 0 },
+};
+
+/* 560.0 MHz Configuration */
+static const struct alpha_pll_config gpu_cc_pll0_config = {
+ .l = 0x1d,
+ .alpha = 0x2aaa,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll gpu_cc_pll0 = {
+ .offset = 0x0,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+/* 440.0 MHz Configuration */
+static const struct alpha_pll_config gpu_cc_pll1_config = {
+ .l = 0x16,
+ .alpha = 0xeaaa,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll gpu_cc_pll1 = {
+ .offset = 0x1000,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_pll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct parent_map gpu_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_GPLL0_OUT_MAIN },
+ { .index = DT_GPLL0_OUT_MAIN_DIV },
+};
+
+static const struct parent_map gpu_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPU_CC_PLL0_OUT_MAIN, 1 },
+ { P_GPU_CC_PLL1_OUT_MAIN, 3 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpu_cc_pll0.clkr.hw },
+ { .hw = &gpu_cc_pll1.clkr.hw },
+ { .index = DT_GPLL0_OUT_MAIN },
+ { .index = DT_GPLL0_OUT_MAIN_DIV },
+};
+
+static const struct parent_map gpu_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPU_CC_PLL1_OUT_MAIN, 3 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpu_cc_pll1.clkr.hw },
+ { .index = DT_GPLL0_OUT_MAIN },
+ { .index = DT_GPLL0_OUT_MAIN_DIV },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_ff_clk_src[] = {
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_ff_clk_src = {
+ .cmd_rcgr = 0x9474,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_0,
+ .freq_tbl = ftbl_gpu_cc_ff_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_ff_clk_src",
+ .parent_data = gpu_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_gmu_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(220000000, P_GPU_CC_PLL1_OUT_MAIN, 2, 0, 0),
+ F(550000000, P_GPU_CC_PLL1_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_gmu_clk_src = {
+ .cmd_rcgr = 0x9318,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_1,
+ .freq_tbl = ftbl_gpu_cc_gmu_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gmu_clk_src",
+ .parent_data = gpu_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gpu_cc_hub_clk_src = {
+ .cmd_rcgr = 0x93ec,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_2,
+ .freq_tbl = ftbl_gpu_cc_ff_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_hub_clk_src",
+ .parent_data = gpu_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_branch gpu_cc_ahb_clk = {
+ .halt_reg = 0x911c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x911c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_crc_ahb_clk = {
+ .halt_reg = 0x9120,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x9120,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_crc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_accu_shift_clk = {
+ .halt_reg = 0x9480,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x9480,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cx_accu_shift_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_ff_clk = {
+ .halt_reg = 0x914c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x914c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cx_ff_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_ff_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_gmu_clk = {
+ .halt_reg = 0x913c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x913c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cx_gmu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_gmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cxo_clk = {
+ .halt_reg = 0x9144,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9144,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cxo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_freq_measure_clk = {
+ .halt_reg = 0x9008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_freq_measure_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_accu_shift_clk = {
+ .halt_reg = 0x947c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x947c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gx_accu_shift_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_gmu_clk = {
+ .halt_reg = 0x90bc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x90bc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gx_gmu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_gmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_vsense_clk = {
+ .halt_reg = 0x90b0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x90b0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gx_vsense_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hub_aon_clk = {
+ .halt_reg = 0x93e8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x93e8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_hub_aon_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hub_cx_int_clk = {
+ .halt_reg = 0x9148,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x9148,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_hub_cx_int_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_memnoc_gfx_clk = {
+ .halt_reg = 0x9150,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x9150,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_memnoc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_mnd1x_0_gfx3d_clk = {
+ .halt_reg = 0x9288,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9288,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_mnd1x_0_gfx3d_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_mnd1x_1_gfx3d_clk = {
+ .halt_reg = 0x928c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x928c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_mnd1x_1_gfx3d_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_sleep_clk = {
+ .halt_reg = 0x9134,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x9134,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc gpu_cc_cx_gdsc = {
+ .gdscr = 0x9108,
+ .gds_hw_ctrl = 0x953c,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gpu_cc_cx_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gpu_cc_gx_gdsc = {
+ .gdscr = 0x905c,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gpu_cc_gx_gdsc",
+ .power_on = gdsc_gx_do_nothing_enable,
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = CLAMP_IO | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct clk_regmap *gpu_cc_x1p42100_clocks[] = {
+ [GPU_CC_AHB_CLK] = &gpu_cc_ahb_clk.clkr,
+ [GPU_CC_CRC_AHB_CLK] = &gpu_cc_crc_ahb_clk.clkr,
+ [GPU_CC_CX_ACCU_SHIFT_CLK] = &gpu_cc_cx_accu_shift_clk.clkr,
+ [GPU_CC_CX_FF_CLK] = &gpu_cc_cx_ff_clk.clkr,
+ [GPU_CC_CX_GMU_CLK] = &gpu_cc_cx_gmu_clk.clkr,
+ [GPU_CC_CXO_CLK] = &gpu_cc_cxo_clk.clkr,
+ [GPU_CC_FF_CLK_SRC] = &gpu_cc_ff_clk_src.clkr,
+ [GPU_CC_FREQ_MEASURE_CLK] = &gpu_cc_freq_measure_clk.clkr,
+ [GPU_CC_GMU_CLK_SRC] = &gpu_cc_gmu_clk_src.clkr,
+ [GPU_CC_GX_ACCU_SHIFT_CLK] = &gpu_cc_gx_accu_shift_clk.clkr,
+ [GPU_CC_GX_GMU_CLK] = &gpu_cc_gx_gmu_clk.clkr,
+ [GPU_CC_GX_VSENSE_CLK] = &gpu_cc_gx_vsense_clk.clkr,
+ [GPU_CC_HUB_AON_CLK] = &gpu_cc_hub_aon_clk.clkr,
+ [GPU_CC_HUB_CLK_SRC] = &gpu_cc_hub_clk_src.clkr,
+ [GPU_CC_HUB_CX_INT_CLK] = &gpu_cc_hub_cx_int_clk.clkr,
+ [GPU_CC_MEMNOC_GFX_CLK] = &gpu_cc_memnoc_gfx_clk.clkr,
+ [GPU_CC_MND1X_0_GFX3D_CLK] = &gpu_cc_mnd1x_0_gfx3d_clk.clkr,
+ [GPU_CC_MND1X_1_GFX3D_CLK] = &gpu_cc_mnd1x_1_gfx3d_clk.clkr,
+ [GPU_CC_PLL0] = &gpu_cc_pll0.clkr,
+ [GPU_CC_PLL1] = &gpu_cc_pll1.clkr,
+ [GPU_CC_SLEEP_CLK] = &gpu_cc_sleep_clk.clkr,
+};
+
+static struct gdsc *gpu_cc_x1p42100_gdscs[] = {
+ [GPU_CX_GDSC] = &gpu_cc_cx_gdsc,
+ [GPU_GX_GDSC] = &gpu_cc_gx_gdsc,
+};
+
+static const struct qcom_reset_map gpu_cc_x1p42100_resets[] = {
+ [GPU_CC_ACD_BCR] = { 0x9358 },
+ [GPU_CC_CB_BCR] = { 0x93a0 },
+ [GPU_CC_CX_BCR] = { 0x9104 },
+ [GPU_CC_FAST_HUB_BCR] = { 0x93e4 },
+ [GPU_CC_FF_BCR] = { 0x9470 },
+ [GPU_CC_GFX3D_AON_BCR] = { 0x9198 },
+ [GPU_CC_GMU_BCR] = { 0x9314 },
+ [GPU_CC_GX_BCR] = { 0x9058 },
+ [GPU_CC_XO_BCR] = { 0x9000 },
+};
+
+static const struct regmap_config gpu_cc_x1p42100_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x9988,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gpu_cc_x1p42100_desc = {
+ .config = &gpu_cc_x1p42100_regmap_config,
+ .clks = gpu_cc_x1p42100_clocks,
+ .num_clks = ARRAY_SIZE(gpu_cc_x1p42100_clocks),
+ .resets = gpu_cc_x1p42100_resets,
+ .num_resets = ARRAY_SIZE(gpu_cc_x1p42100_resets),
+ .gdscs = gpu_cc_x1p42100_gdscs,
+ .num_gdscs = ARRAY_SIZE(gpu_cc_x1p42100_gdscs),
+};
+
+static const struct of_device_id gpu_cc_x1p42100_match_table[] = {
+ { .compatible = "qcom,x1p42100-gpucc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gpu_cc_x1p42100_match_table);
+
+static int gpu_cc_x1p42100_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret)
+ return ret;
+
+ regmap = qcom_cc_map(pdev, &gpu_cc_x1p42100_desc);
+ if (IS_ERR(regmap)) {
+ pm_runtime_put(&pdev->dev);
+ return PTR_ERR(regmap);
+ }
+
+ clk_lucid_ole_pll_configure(&gpu_cc_pll0, regmap, &gpu_cc_pll0_config);
+ clk_lucid_ole_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll1_config);
+
+ /* Keep some clocks always enabled */
+ qcom_branch_set_clk_en(regmap, 0x93a4); /* GPU_CC_CB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x9004); /* GPU_CC_CXO_AON_CLK */
+ qcom_branch_set_clk_en(regmap, 0x900c); /* GPU_CC_DEMET_CLK */
+
+ ret = qcom_cc_really_probe(&pdev->dev, &gpu_cc_x1p42100_desc, regmap);
+
+ pm_runtime_put(&pdev->dev);
+
+ return ret;
+}
+
+static struct platform_driver gpu_cc_x1p42100_driver = {
+ .probe = gpu_cc_x1p42100_probe,
+ .driver = {
+ .name = "gpucc-x1p42100",
+ .of_match_table = gpu_cc_x1p42100_match_table,
+ },
+};
+
+module_platform_driver(gpu_cc_x1p42100_driver);
+
+MODULE_DESCRIPTION("QTI GPUCC X1P42100 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/hfpll.c b/drivers/clk/qcom/hfpll.c
index b0b0cb074b4a..385964196185 100644
--- a/drivers/clk/qcom/hfpll.c
+++ b/drivers/clk/qcom/hfpll.c
@@ -99,7 +99,6 @@ static const struct regmap_config hfpll_regmap_config = {
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x30,
- .fast_io = true,
};
static int qcom_hfpll_probe(struct platform_device *pdev)
diff --git a/drivers/clk/qcom/ipq-cmn-pll.c b/drivers/clk/qcom/ipq-cmn-pll.c
new file mode 100644
index 000000000000..dafbf5732048
--- /dev/null
+++ b/drivers/clk/qcom/ipq-cmn-pll.c
@@ -0,0 +1,468 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+/*
+ * CMN PLL block expects the reference clock from on-board Wi-Fi block,
+ * and supplies fixed rate clocks as output to the networking hardware
+ * blocks and to GCC. The networking related blocks include PPE (packet
+ * process engine), the externally connected PHY or switch devices, and
+ * the PCS.
+ *
+ * On the IPQ9574 SoC, there are three clocks with 50 MHZ and one clock
+ * with 25 MHZ which are output from the CMN PLL to Ethernet PHY (or switch),
+ * and one clock with 353 MHZ to PPE. The other fixed rate output clocks
+ * are supplied to GCC (24 MHZ as XO and 32 KHZ as sleep clock), and to PCS
+ * with 31.25 MHZ.
+ *
+ * On the IPQ5424 SoC, there is an output clock from CMN PLL to PPE at 375 MHZ,
+ * and an output clock to NSS (network subsystem) at 300 MHZ. The other output
+ * clocks from CMN PLL on IPQ5424 are the same as IPQ9574.
+ *
+ * +---------+
+ * | GCC |
+ * +--+---+--+
+ * AHB CLK| |SYS CLK
+ * V V
+ * +-------+---+------+
+ * | +-------------> eth0-50mhz
+ * REF CLK | IPQ9574 |
+ * -------->+ +-------------> eth1-50mhz
+ * | CMN PLL block |
+ * | +-------------> eth2-50mhz
+ * | |
+ * +----+----+----+---+-------------> eth-25mhz
+ * | | |
+ * V V V
+ * GCC PCS NSS/PPE
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_clock.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,ipq-cmn-pll.h>
+#include <dt-bindings/clock/qcom,ipq5018-cmn-pll.h>
+#include <dt-bindings/clock/qcom,ipq5424-cmn-pll.h>
+
+#define CMN_PLL_REFCLK_SRC_SELECTION 0x28
+#define CMN_PLL_REFCLK_SRC_DIV GENMASK(9, 8)
+
+#define CMN_PLL_LOCKED 0x64
+#define CMN_PLL_CLKS_LOCKED BIT(8)
+
+#define CMN_PLL_POWER_ON_AND_RESET 0x780
+#define CMN_ANA_EN_SW_RSTN BIT(6)
+
+#define CMN_PLL_REFCLK_CONFIG 0x784
+#define CMN_PLL_REFCLK_EXTERNAL BIT(9)
+#define CMN_PLL_REFCLK_DIV GENMASK(8, 4)
+#define CMN_PLL_REFCLK_INDEX GENMASK(3, 0)
+
+#define CMN_PLL_CTRL 0x78c
+#define CMN_PLL_CTRL_LOCK_DETECT_EN BIT(15)
+
+#define CMN_PLL_DIVIDER_CTRL 0x794
+#define CMN_PLL_DIVIDER_CTRL_FACTOR GENMASK(9, 0)
+
+/**
+ * struct cmn_pll_fixed_output_clk - CMN PLL output clocks information
+ * @id: Clock specifier to be supplied
+ * @name: Clock name to be registered
+ * @rate: Clock rate
+ */
+struct cmn_pll_fixed_output_clk {
+ unsigned int id;
+ const char *name;
+ unsigned long rate;
+};
+
+/**
+ * struct clk_cmn_pll - CMN PLL hardware specific data
+ * @regmap: hardware regmap.
+ * @hw: handle between common and hardware-specific interfaces
+ */
+struct clk_cmn_pll {
+ struct regmap *regmap;
+ struct clk_hw hw;
+};
+
+#define CLK_PLL_OUTPUT(_id, _name, _rate) { \
+ .id = _id, \
+ .name = _name, \
+ .rate = _rate, \
+}
+
+#define to_clk_cmn_pll(_hw) container_of(_hw, struct clk_cmn_pll, hw)
+
+static const struct regmap_config ipq_cmn_pll_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x7fc,
+};
+
+static const struct cmn_pll_fixed_output_clk ipq5018_output_clks[] = {
+ CLK_PLL_OUTPUT(IPQ5018_XO_24MHZ_CLK, "xo-24mhz", 24000000UL),
+ CLK_PLL_OUTPUT(IPQ5018_SLEEP_32KHZ_CLK, "sleep-32khz", 32000UL),
+ CLK_PLL_OUTPUT(IPQ5018_ETH_50MHZ_CLK, "eth-50mhz", 50000000UL),
+ { /* Sentinel */ }
+};
+
+static const struct cmn_pll_fixed_output_clk ipq5424_output_clks[] = {
+ CLK_PLL_OUTPUT(IPQ5424_XO_24MHZ_CLK, "xo-24mhz", 24000000UL),
+ CLK_PLL_OUTPUT(IPQ5424_SLEEP_32KHZ_CLK, "sleep-32khz", 32000UL),
+ CLK_PLL_OUTPUT(IPQ5424_PCS_31P25MHZ_CLK, "pcs-31p25mhz", 31250000UL),
+ CLK_PLL_OUTPUT(IPQ5424_NSS_300MHZ_CLK, "nss-300mhz", 300000000UL),
+ CLK_PLL_OUTPUT(IPQ5424_PPE_375MHZ_CLK, "ppe-375mhz", 375000000UL),
+ CLK_PLL_OUTPUT(IPQ5424_ETH0_50MHZ_CLK, "eth0-50mhz", 50000000UL),
+ CLK_PLL_OUTPUT(IPQ5424_ETH1_50MHZ_CLK, "eth1-50mhz", 50000000UL),
+ CLK_PLL_OUTPUT(IPQ5424_ETH2_50MHZ_CLK, "eth2-50mhz", 50000000UL),
+ CLK_PLL_OUTPUT(IPQ5424_ETH_25MHZ_CLK, "eth-25mhz", 25000000UL),
+ { /* Sentinel */ }
+};
+
+static const struct cmn_pll_fixed_output_clk ipq9574_output_clks[] = {
+ CLK_PLL_OUTPUT(XO_24MHZ_CLK, "xo-24mhz", 24000000UL),
+ CLK_PLL_OUTPUT(SLEEP_32KHZ_CLK, "sleep-32khz", 32000UL),
+ CLK_PLL_OUTPUT(PCS_31P25MHZ_CLK, "pcs-31p25mhz", 31250000UL),
+ CLK_PLL_OUTPUT(NSS_1200MHZ_CLK, "nss-1200mhz", 1200000000UL),
+ CLK_PLL_OUTPUT(PPE_353MHZ_CLK, "ppe-353mhz", 353000000UL),
+ CLK_PLL_OUTPUT(ETH0_50MHZ_CLK, "eth0-50mhz", 50000000UL),
+ CLK_PLL_OUTPUT(ETH1_50MHZ_CLK, "eth1-50mhz", 50000000UL),
+ CLK_PLL_OUTPUT(ETH2_50MHZ_CLK, "eth2-50mhz", 50000000UL),
+ CLK_PLL_OUTPUT(ETH_25MHZ_CLK, "eth-25mhz", 25000000UL),
+ { /* Sentinel */ }
+};
+
+/*
+ * CMN PLL has the single parent clock, which supports the several
+ * possible parent clock rates, each parent clock rate is reflected
+ * by the specific reference index value in the hardware.
+ */
+static int ipq_cmn_pll_find_freq_index(unsigned long parent_rate)
+{
+ int index = -EINVAL;
+
+ switch (parent_rate) {
+ case 25000000:
+ index = 3;
+ break;
+ case 31250000:
+ index = 4;
+ break;
+ case 40000000:
+ index = 6;
+ break;
+ case 48000000:
+ case 96000000:
+ /*
+ * Parent clock rate 48 MHZ and 96 MHZ take the same value
+ * of reference clock index. 96 MHZ needs the source clock
+ * divider to be programmed as 2.
+ */
+ index = 7;
+ break;
+ case 50000000:
+ index = 8;
+ break;
+ default:
+ break;
+ }
+
+ return index;
+}
+
+static unsigned long clk_cmn_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_cmn_pll *cmn_pll = to_clk_cmn_pll(hw);
+ u32 val, factor;
+
+ /*
+ * The value of CMN_PLL_DIVIDER_CTRL_FACTOR is automatically adjusted
+ * by HW according to the parent clock rate.
+ */
+ regmap_read(cmn_pll->regmap, CMN_PLL_DIVIDER_CTRL, &val);
+ factor = FIELD_GET(CMN_PLL_DIVIDER_CTRL_FACTOR, val);
+
+ return parent_rate * 2 * factor;
+}
+
+static int clk_cmn_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ int ret;
+
+ /* Validate the rate of the single parent clock. */
+ ret = ipq_cmn_pll_find_freq_index(req->best_parent_rate);
+
+ return ret < 0 ? ret : 0;
+}
+
+/*
+ * This function is used to initialize the CMN PLL to enable the fixed
+ * rate output clocks. It is expected to be configured once.
+ */
+static int clk_cmn_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_cmn_pll *cmn_pll = to_clk_cmn_pll(hw);
+ int ret, index;
+ u32 val;
+
+ /*
+ * Configure the reference input clock selection as per the given
+ * parent clock. The output clock rates are always of fixed value.
+ */
+ index = ipq_cmn_pll_find_freq_index(parent_rate);
+ if (index < 0)
+ return index;
+
+ ret = regmap_update_bits(cmn_pll->regmap, CMN_PLL_REFCLK_CONFIG,
+ CMN_PLL_REFCLK_INDEX,
+ FIELD_PREP(CMN_PLL_REFCLK_INDEX, index));
+ if (ret)
+ return ret;
+
+ /*
+ * Update the source clock rate selection and source clock
+ * divider as 2 when the parent clock rate is 96 MHZ.
+ */
+ if (parent_rate == 96000000) {
+ ret = regmap_update_bits(cmn_pll->regmap, CMN_PLL_REFCLK_CONFIG,
+ CMN_PLL_REFCLK_DIV,
+ FIELD_PREP(CMN_PLL_REFCLK_DIV, 2));
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(cmn_pll->regmap, CMN_PLL_REFCLK_SRC_SELECTION,
+ CMN_PLL_REFCLK_SRC_DIV,
+ FIELD_PREP(CMN_PLL_REFCLK_SRC_DIV, 0));
+ if (ret)
+ return ret;
+ }
+
+ /* Enable PLL locked detect. */
+ ret = regmap_set_bits(cmn_pll->regmap, CMN_PLL_CTRL,
+ CMN_PLL_CTRL_LOCK_DETECT_EN);
+ if (ret)
+ return ret;
+
+ /*
+ * Reset the CMN PLL block to ensure the updated configurations
+ * take effect.
+ */
+ ret = regmap_clear_bits(cmn_pll->regmap, CMN_PLL_POWER_ON_AND_RESET,
+ CMN_ANA_EN_SW_RSTN);
+ if (ret)
+ return ret;
+
+ usleep_range(1000, 1200);
+ ret = regmap_set_bits(cmn_pll->regmap, CMN_PLL_POWER_ON_AND_RESET,
+ CMN_ANA_EN_SW_RSTN);
+ if (ret)
+ return ret;
+
+ /* Stability check of CMN PLL output clocks. */
+ return regmap_read_poll_timeout(cmn_pll->regmap, CMN_PLL_LOCKED, val,
+ (val & CMN_PLL_CLKS_LOCKED),
+ 100, 100 * USEC_PER_MSEC);
+}
+
+static const struct clk_ops clk_cmn_pll_ops = {
+ .recalc_rate = clk_cmn_pll_recalc_rate,
+ .determine_rate = clk_cmn_pll_determine_rate,
+ .set_rate = clk_cmn_pll_set_rate,
+};
+
+static struct clk_hw *ipq_cmn_pll_clk_hw_register(struct platform_device *pdev)
+{
+ struct clk_parent_data pdata = { .index = 0 };
+ struct device *dev = &pdev->dev;
+ struct clk_init_data init = {};
+ struct clk_cmn_pll *cmn_pll;
+ struct regmap *regmap;
+ void __iomem *base;
+ int ret;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return ERR_CAST(base);
+
+ regmap = devm_regmap_init_mmio(dev, base, &ipq_cmn_pll_regmap_config);
+ if (IS_ERR(regmap))
+ return ERR_CAST(regmap);
+
+ cmn_pll = devm_kzalloc(dev, sizeof(*cmn_pll), GFP_KERNEL);
+ if (!cmn_pll)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = "cmn_pll";
+ init.parent_data = &pdata;
+ init.num_parents = 1;
+ init.ops = &clk_cmn_pll_ops;
+
+ cmn_pll->hw.init = &init;
+ cmn_pll->regmap = regmap;
+
+ ret = devm_clk_hw_register(dev, &cmn_pll->hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return &cmn_pll->hw;
+}
+
+static int ipq_cmn_pll_register_clks(struct platform_device *pdev)
+{
+ const struct cmn_pll_fixed_output_clk *p, *fixed_clk;
+ struct clk_hw_onecell_data *hw_data;
+ struct device *dev = &pdev->dev;
+ struct clk_hw *cmn_pll_hw;
+ unsigned int num_clks;
+ struct clk_hw *hw;
+ int ret, i;
+
+ fixed_clk = device_get_match_data(dev);
+ if (!fixed_clk)
+ return -EINVAL;
+
+ num_clks = 0;
+ for (p = fixed_clk; p->name; p++)
+ num_clks++;
+
+ hw_data = devm_kzalloc(dev, struct_size(hw_data, hws, num_clks + 1),
+ GFP_KERNEL);
+ if (!hw_data)
+ return -ENOMEM;
+
+ /*
+ * Register the CMN PLL clock, which is the parent clock of
+ * the fixed rate output clocks.
+ */
+ cmn_pll_hw = ipq_cmn_pll_clk_hw_register(pdev);
+ if (IS_ERR(cmn_pll_hw))
+ return PTR_ERR(cmn_pll_hw);
+
+ /* Register the fixed rate output clocks. */
+ for (i = 0; i < num_clks; i++) {
+ hw = clk_hw_register_fixed_rate_parent_hw(dev, fixed_clk[i].name,
+ cmn_pll_hw, 0,
+ fixed_clk[i].rate);
+ if (IS_ERR(hw)) {
+ ret = PTR_ERR(hw);
+ goto unregister_fixed_clk;
+ }
+
+ hw_data->hws[fixed_clk[i].id] = hw;
+ }
+
+ /*
+ * Provide the CMN PLL clock. The clock rate of CMN PLL
+ * is configured to 12 GHZ by DT property assigned-clock-rates-u64.
+ */
+ hw_data->hws[CMN_PLL_CLK] = cmn_pll_hw;
+ hw_data->num = num_clks + 1;
+
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, hw_data);
+ if (ret)
+ goto unregister_fixed_clk;
+
+ platform_set_drvdata(pdev, hw_data);
+
+ return 0;
+
+unregister_fixed_clk:
+ while (i > 0)
+ clk_hw_unregister(hw_data->hws[fixed_clk[--i].id]);
+
+ return ret;
+}
+
+static int ipq_cmn_pll_clk_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return ret;
+
+ ret = devm_pm_clk_create(dev);
+ if (ret)
+ return ret;
+
+ /*
+ * To access the CMN PLL registers, the GCC AHB & SYS clocks
+ * of CMN PLL block need to be enabled.
+ */
+ ret = pm_clk_add(dev, "ahb");
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add AHB clock\n");
+
+ ret = pm_clk_add(dev, "sys");
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add SYS clock\n");
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return ret;
+
+ /* Register CMN PLL clock and fixed rate output clocks. */
+ ret = ipq_cmn_pll_register_clks(pdev);
+ pm_runtime_put(dev);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to register CMN PLL clocks\n");
+
+ return 0;
+}
+
+static void ipq_cmn_pll_clk_remove(struct platform_device *pdev)
+{
+ struct clk_hw_onecell_data *hw_data = platform_get_drvdata(pdev);
+ int i;
+
+ /*
+ * The clock with index CMN_PLL_CLK is unregistered by
+ * device management.
+ */
+ for (i = 0; i < hw_data->num; i++) {
+ if (i != CMN_PLL_CLK)
+ clk_hw_unregister(hw_data->hws[i]);
+ }
+}
+
+static const struct dev_pm_ops ipq_cmn_pll_pm_ops = {
+ SET_RUNTIME_PM_OPS(pm_clk_suspend, pm_clk_resume, NULL)
+};
+
+static const struct of_device_id ipq_cmn_pll_clk_ids[] = {
+ { .compatible = "qcom,ipq5018-cmn-pll", .data = &ipq5018_output_clks },
+ { .compatible = "qcom,ipq5424-cmn-pll", .data = &ipq5424_output_clks },
+ { .compatible = "qcom,ipq9574-cmn-pll", .data = &ipq9574_output_clks },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ipq_cmn_pll_clk_ids);
+
+static struct platform_driver ipq_cmn_pll_clk_driver = {
+ .probe = ipq_cmn_pll_clk_probe,
+ .remove = ipq_cmn_pll_clk_remove,
+ .driver = {
+ .name = "ipq_cmn_pll",
+ .of_match_table = ipq_cmn_pll_clk_ids,
+ .pm = &ipq_cmn_pll_pm_ops,
+ },
+};
+module_platform_driver(ipq_cmn_pll_clk_driver);
+
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. IPQ CMN PLL Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/kpss-xcc.c b/drivers/clk/qcom/kpss-xcc.c
index e7cfa8d22044..97bfb21a5e5e 100644
--- a/drivers/clk/qcom/kpss-xcc.c
+++ b/drivers/clk/qcom/kpss-xcc.c
@@ -5,7 +5,6 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/property.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/of.h>
diff --git a/drivers/clk/qcom/krait-cc.c b/drivers/clk/qcom/krait-cc.c
index ae325f4e1047..f29d6dd1f3ac 100644
--- a/drivers/clk/qcom/krait-cc.c
+++ b/drivers/clk/qcom/krait-cc.c
@@ -5,7 +5,6 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/property.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/of.h>
diff --git a/drivers/clk/qcom/lpassaudiocc-sc7280.c b/drivers/clk/qcom/lpassaudiocc-sc7280.c
index 45e726477086..7e2172969289 100644
--- a/drivers/clk/qcom/lpassaudiocc-sc7280.c
+++ b/drivers/clk/qcom/lpassaudiocc-sc7280.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2025, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/clk-provider.h>
@@ -708,19 +709,29 @@ static const struct qcom_cc_desc lpass_audio_cc_sc7280_desc = {
};
static const struct qcom_reset_map lpass_audio_cc_sc7280_resets[] = {
- [LPASS_AUDIO_SWR_RX_CGCR] = { 0xa0, 1 },
- [LPASS_AUDIO_SWR_TX_CGCR] = { 0xa8, 1 },
+ [LPASS_AUDIO_SWR_RX_CGCR] = { 0xa0, 1 },
+ [LPASS_AUDIO_SWR_TX_CGCR] = { 0xa8, 1 },
[LPASS_AUDIO_SWR_WSA_CGCR] = { 0xb0, 1 },
};
+static const struct regmap_config lpass_audio_cc_sc7280_reset_regmap_config = {
+ .name = "lpassaudio_cc_reset",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .max_register = 0xc8,
+};
+
static const struct qcom_cc_desc lpass_audio_cc_reset_sc7280_desc = {
- .config = &lpass_audio_cc_sc7280_regmap_config,
+ .config = &lpass_audio_cc_sc7280_reset_regmap_config,
.resets = lpass_audio_cc_sc7280_resets,
.num_resets = ARRAY_SIZE(lpass_audio_cc_sc7280_resets),
};
static const struct of_device_id lpass_audio_cc_sc7280_match_table[] = {
- { .compatible = "qcom,sc7280-lpassaudiocc" },
+ { .compatible = "qcom,qcm6490-lpassaudiocc", .data = &lpass_audio_cc_reset_sc7280_desc },
+ { .compatible = "qcom,sc7280-lpassaudiocc", .data = &lpass_audio_cc_sc7280_desc },
{ }
};
MODULE_DEVICE_TABLE(of, lpass_audio_cc_sc7280_match_table);
@@ -752,13 +763,17 @@ static int lpass_audio_cc_sc7280_probe(struct platform_device *pdev)
struct regmap *regmap;
int ret;
+ desc = device_get_match_data(&pdev->dev);
+
+ if (of_device_is_compatible(pdev->dev.of_node, "qcom,qcm6490-lpassaudiocc"))
+ return qcom_cc_probe_by_index(pdev, 1, desc);
+
ret = lpass_audio_setup_runtime_pm(pdev);
if (ret)
return ret;
lpass_audio_cc_sc7280_regmap_config.name = "lpassaudio_cc";
lpass_audio_cc_sc7280_regmap_config.max_register = 0x2f000;
- desc = &lpass_audio_cc_sc7280_desc;
regmap = qcom_cc_map(pdev, desc);
if (IS_ERR(regmap)) {
@@ -772,7 +787,7 @@ static int lpass_audio_cc_sc7280_probe(struct platform_device *pdev)
regmap_write(regmap, 0x4, 0x3b);
regmap_write(regmap, 0x8, 0xff05);
- ret = qcom_cc_really_probe(&pdev->dev, &lpass_audio_cc_sc7280_desc, regmap);
+ ret = qcom_cc_really_probe(&pdev->dev, desc, regmap);
if (ret) {
dev_err(&pdev->dev, "Failed to register LPASS AUDIO CC clocks\n");
goto exit;
@@ -784,7 +799,6 @@ static int lpass_audio_cc_sc7280_probe(struct platform_device *pdev)
goto exit;
}
- pm_runtime_mark_last_busy(&pdev->dev);
exit:
pm_runtime_put_autosuspend(&pdev->dev);
@@ -853,7 +867,6 @@ static int lpass_aon_cc_sc7280_probe(struct platform_device *pdev)
goto exit;
}
- pm_runtime_mark_last_busy(&pdev->dev);
exit:
pm_runtime_put_autosuspend(&pdev->dev);
diff --git a/drivers/clk/qcom/lpasscc-sc8280xp.c b/drivers/clk/qcom/lpasscc-sc8280xp.c
index 9fd9498d7dc8..ff839788c40e 100644
--- a/drivers/clk/qcom/lpasscc-sc8280xp.c
+++ b/drivers/clk/qcom/lpasscc-sc8280xp.c
@@ -18,9 +18,9 @@
#include "reset.h"
static const struct qcom_reset_map lpass_audiocc_sc8280xp_resets[] = {
- [LPASS_AUDIO_SWR_RX_CGCR] = { 0xa0, 1 },
+ [LPASS_AUDIO_SWR_RX_CGCR] = { 0xa0, 1 },
[LPASS_AUDIO_SWR_WSA_CGCR] = { 0xb0, 1 },
- [LPASS_AUDIO_SWR_WSA2_CGCR] = { 0xd8, 1 },
+ [LPASS_AUDIO_SWR_WSA2_CGCR] = { 0xd8, 1 },
};
static const struct regmap_config lpass_audiocc_sc8280xp_regmap_config = {
diff --git a/drivers/clk/qcom/lpasscc-sdm845.c b/drivers/clk/qcom/lpasscc-sdm845.c
index 7040da952728..5c1ea75f9ba8 100644
--- a/drivers/clk/qcom/lpasscc-sdm845.c
+++ b/drivers/clk/qcom/lpasscc-sdm845.c
@@ -6,7 +6,6 @@
#include <linux/clk-provider.h>
#include <linux/platform_device.h>
#include <linux/module.h>
-#include <linux/of_address.h>
#include <linux/regmap.h>
#include <dt-bindings/clock/qcom,lpass-sdm845.h>
diff --git a/drivers/clk/qcom/lpasscc-sm6115.c b/drivers/clk/qcom/lpasscc-sm6115.c
new file mode 100644
index 000000000000..ac6d219233b4
--- /dev/null
+++ b/drivers/clk/qcom/lpasscc-sm6115.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022, 2023 Linaro Limited
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sm6115-lpasscc.h>
+
+#include "common.h"
+#include "reset.h"
+
+static const struct qcom_reset_map lpass_audiocc_sm6115_resets[] = {
+ [LPASS_AUDIO_SWR_RX_CGCR] = { .reg = 0x98, .bit = 1, .udelay = 500 },
+};
+
+static struct regmap_config lpass_audiocc_sm6115_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .name = "lpass-audio-csr",
+ .max_register = 0x1000,
+};
+
+static const struct qcom_cc_desc lpass_audiocc_sm6115_reset_desc = {
+ .config = &lpass_audiocc_sm6115_regmap_config,
+ .resets = lpass_audiocc_sm6115_resets,
+ .num_resets = ARRAY_SIZE(lpass_audiocc_sm6115_resets),
+};
+
+static const struct qcom_reset_map lpasscc_sm6115_resets[] = {
+ [LPASS_SWR_TX_CONFIG_CGCR] = { .reg = 0x100, .bit = 1, .udelay = 500 },
+};
+
+static struct regmap_config lpasscc_sm6115_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .name = "lpass-tcsr",
+ .max_register = 0x1000,
+};
+
+static const struct qcom_cc_desc lpasscc_sm6115_reset_desc = {
+ .config = &lpasscc_sm6115_regmap_config,
+ .resets = lpasscc_sm6115_resets,
+ .num_resets = ARRAY_SIZE(lpasscc_sm6115_resets),
+};
+
+static const struct of_device_id lpasscc_sm6115_match_table[] = {
+ {
+ .compatible = "qcom,sm6115-lpassaudiocc",
+ .data = &lpass_audiocc_sm6115_reset_desc,
+ }, {
+ .compatible = "qcom,sm6115-lpasscc",
+ .data = &lpasscc_sm6115_reset_desc,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, lpasscc_sm6115_match_table);
+
+static int lpasscc_sm6115_probe(struct platform_device *pdev)
+{
+ const struct qcom_cc_desc *desc = of_device_get_match_data(&pdev->dev);
+
+ return qcom_cc_probe_by_index(pdev, 0, desc);
+}
+
+static struct platform_driver lpasscc_sm6115_driver = {
+ .probe = lpasscc_sm6115_probe,
+ .driver = {
+ .name = "lpasscc-sm6115",
+ .of_match_table = lpasscc_sm6115_match_table,
+ },
+};
+
+module_platform_driver(lpasscc_sm6115_driver);
+
+MODULE_DESCRIPTION("QTI LPASSCC SM6115 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/lpasscorecc-sc7180.c b/drivers/clk/qcom/lpasscorecc-sc7180.c
index 726c6378752f..5174bd3dcdc5 100644
--- a/drivers/clk/qcom/lpasscorecc-sc7180.c
+++ b/drivers/clk/qcom/lpasscorecc-sc7180.c
@@ -9,7 +9,6 @@
#include <linux/platform_device.h>
#include <linux/pm_clock.h>
#include <linux/pm_runtime.h>
-#include <linux/of.h>
#include <linux/regmap.h>
#include <dt-bindings/clock/qcom,lpasscorecc-sc7180.h>
@@ -43,7 +42,7 @@ static const struct alpha_pll_config lpass_lpaaudio_dig_pll_config = {
};
static const u8 clk_alpha_pll_regs_offset[][PLL_OFF_MAX_REGS] = {
- [CLK_ALPHA_PLL_TYPE_FABIA] = {
+ [CLK_ALPHA_PLL_TYPE_FABIA] = {
[PLL_OFF_L_VAL] = 0x04,
[PLL_OFF_CAL_L_VAL] = 0x8,
[PLL_OFF_USER_CTL] = 0x0c,
@@ -413,7 +412,6 @@ static int lpass_core_cc_sc7180_probe(struct platform_device *pdev)
ret = qcom_cc_really_probe(&pdev->dev, &lpass_core_cc_sc7180_desc, regmap);
- pm_runtime_mark_last_busy(&pdev->dev);
exit:
pm_runtime_put_autosuspend(&pdev->dev);
@@ -434,7 +432,6 @@ static int lpass_hm_core_probe(struct platform_device *pdev)
ret = qcom_cc_probe_by_index(pdev, 0, desc);
- pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
return ret;
diff --git a/drivers/clk/qcom/lpasscorecc-sc7280.c b/drivers/clk/qcom/lpasscorecc-sc7280.c
index b0888cd2460b..56882c202376 100644
--- a/drivers/clk/qcom/lpasscorecc-sc7280.c
+++ b/drivers/clk/qcom/lpasscorecc-sc7280.c
@@ -6,7 +6,6 @@
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_clock.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/clk/qcom/mmcc-apq8084.c b/drivers/clk/qcom/mmcc-apq8084.c
index cc03722596a4..2d334977d783 100644
--- a/drivers/clk/qcom/mmcc-apq8084.c
+++ b/drivers/clk/qcom/mmcc-apq8084.c
@@ -6,9 +6,9 @@
#include <linux/clk-provider.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/regmap.h>
-#include <linux/reset-controller.h>
#include <dt-bindings/clock/qcom,mmcc-apq8084.h>
#include <dt-bindings/reset/qcom,mmcc-apq8084.h>
diff --git a/drivers/clk/qcom/mmcc-msm8960.c b/drivers/clk/qcom/mmcc-msm8960.c
index 3f41249c5ae4..cd3c9f8455e5 100644
--- a/drivers/clk/qcom/mmcc-msm8960.c
+++ b/drivers/clk/qcom/mmcc-msm8960.c
@@ -8,13 +8,11 @@
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
-#include <linux/property.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/regmap.h>
-#include <linux/reset-controller.h>
#include <dt-bindings/clock/qcom,mmcc-msm8960.h>
#include <dt-bindings/reset/qcom,mmcc-msm8960.h>
@@ -37,6 +35,7 @@ enum {
P_DSI2_PLL_DSICLK,
P_DSI1_PLL_BYTECLK,
P_DSI2_PLL_BYTECLK,
+ P_LVDS_PLL,
};
#define F_MN(f, s, _m, _n) { .freq = f, .src = s, .m = _m, .n = _n }
@@ -143,6 +142,20 @@ static const struct clk_parent_data mmcc_pxo_dsi2_dsi1[] = {
{ .fw_name = "dsi1pll", .name = "dsi1pll" },
};
+static const struct parent_map mmcc_pxo_dsi2_dsi1_lvds_map[] = {
+ { P_PXO, 0 },
+ { P_DSI2_PLL_DSICLK, 1 },
+ { P_LVDS_PLL, 2 },
+ { P_DSI1_PLL_DSICLK, 3 },
+};
+
+static const struct clk_parent_data mmcc_pxo_dsi2_dsi1_lvds[] = {
+ { .fw_name = "pxo", .name = "pxo_board" },
+ { .fw_name = "dsi2pll", .name = "dsi2pll" },
+ { .fw_name = "lvdspll", .name = "mpd4_lvds_pll" },
+ { .fw_name = "dsi1pll", .name = "dsi1pll" },
+};
+
static const struct parent_map mmcc_pxo_dsi1_dsi2_byte_map[] = {
{ P_PXO, 0 },
{ P_DSI1_PLL_BYTECLK, 1 },
@@ -2439,26 +2452,42 @@ static struct clk_rcg dsi2_pixel_src = {
},
.s = {
.src_sel_shift = 0,
- .parent_map = mmcc_pxo_dsi2_dsi1_map,
+ .parent_map = mmcc_pxo_dsi2_dsi1_lvds_map,
},
.clkr = {
.enable_reg = 0x0094,
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "dsi2_pixel_src",
- .parent_data = mmcc_pxo_dsi2_dsi1,
- .num_parents = ARRAY_SIZE(mmcc_pxo_dsi2_dsi1),
+ .parent_data = mmcc_pxo_dsi2_dsi1_lvds,
+ .num_parents = ARRAY_SIZE(mmcc_pxo_dsi2_dsi1_lvds),
.ops = &clk_rcg_pixel_ops,
},
},
};
+static struct clk_branch dsi2_pixel_lvds_src = {
+ .clkr = {
+ .enable_reg = 0x0094,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi2_pixel_lvds_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &dsi2_pixel_src.clkr.hw
+ },
+ .num_parents = 1,
+ .ops = &clk_branch_simple_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
static struct clk_branch dsi2_pixel_clk = {
.halt_reg = 0x01d0,
.halt_bit = 19,
.clkr = {
.enable_reg = 0x0094,
- .enable_mask = BIT(0),
+ .enable_mask = 0,
.hw.init = &(struct clk_init_data){
.name = "mdp_pclk2_clk",
.parent_hws = (const struct clk_hw*[]){
@@ -2471,6 +2500,24 @@ static struct clk_branch dsi2_pixel_clk = {
},
};
+static struct clk_branch lvds_clk = {
+ .halt_reg = 0x024c,
+ .halt_bit = 6,
+ .clkr = {
+ .enable_reg = 0x0264,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdp_lvds_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &dsi2_pixel_lvds_src.clkr.hw
+ },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
static struct clk_branch gfx2d0_ahb_clk = {
.hwcg_reg = 0x0038,
.hwcg_bit = 28,
@@ -2799,6 +2846,8 @@ static struct clk_regmap *mmcc_msm8960_clks[] = {
[CSIPHY1_TIMER_CLK] = &csiphy1_timer_clk.clkr,
[CSIPHY0_TIMER_CLK] = &csiphy0_timer_clk.clkr,
[PLL2] = &pll2.clkr,
+ [DSI2_PIXEL_LVDS_SRC] = &dsi2_pixel_lvds_src.clkr,
+ [LVDS_CLK] = &lvds_clk.clkr,
};
static const struct qcom_reset_map mmcc_msm8960_resets[] = {
@@ -2983,6 +3032,8 @@ static struct clk_regmap *mmcc_apq8064_clks[] = {
[VCAP_CLK] = &vcap_clk.clkr,
[VCAP_NPL_CLK] = &vcap_npl_clk.clkr,
[PLL15] = &pll15.clkr,
+ [DSI2_PIXEL_LVDS_SRC] = &dsi2_pixel_lvds_src.clkr,
+ [LVDS_CLK] = &lvds_clk.clkr,
};
static const struct qcom_reset_map mmcc_apq8064_resets[] = {
diff --git a/drivers/clk/qcom/mmcc-msm8974.c b/drivers/clk/qcom/mmcc-msm8974.c
index 169e85f60550..12bbc49c87af 100644
--- a/drivers/clk/qcom/mmcc-msm8974.c
+++ b/drivers/clk/qcom/mmcc-msm8974.c
@@ -7,11 +7,11 @@
#include <linux/bitops.h>
#include <linux/err.h>
#include <linux/platform_device.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/clk-provider.h>
#include <linux/regmap.h>
-#include <linux/reset-controller.h>
#include <dt-bindings/clock/qcom,mmcc-msm8974.h>
#include <dt-bindings/reset/qcom,mmcc-msm8974.h>
diff --git a/drivers/clk/qcom/mmcc-msm8994.c b/drivers/clk/qcom/mmcc-msm8994.c
index f70d080bf51c..7c0b959a4aa2 100644
--- a/drivers/clk/qcom/mmcc-msm8994.c
+++ b/drivers/clk/qcom/mmcc-msm8994.c
@@ -7,12 +7,11 @@
#include <linux/bitops.h>
#include <linux/err.h>
#include <linux/platform_device.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/clk-provider.h>
#include <linux/regmap.h>
-#include <linux/reset-controller.h>
-#include <linux/clk.h>
#include <dt-bindings/clock/qcom,mmcc-msm8994.h>
diff --git a/drivers/clk/qcom/mmcc-msm8996.c b/drivers/clk/qcom/mmcc-msm8996.c
index a742f848e4ee..7d67c6f73fe1 100644
--- a/drivers/clk/qcom/mmcc-msm8996.c
+++ b/drivers/clk/qcom/mmcc-msm8996.c
@@ -7,12 +7,10 @@
#include <linux/bitops.h>
#include <linux/err.h>
#include <linux/platform_device.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/clk-provider.h>
#include <linux/regmap.h>
-#include <linux/reset-controller.h>
-#include <linux/clk.h>
#include <dt-bindings/clock/qcom,mmcc-msm8996.h>
diff --git a/drivers/clk/qcom/mmcc-msm8998.c b/drivers/clk/qcom/mmcc-msm8998.c
index 5738445a8656..e2f198213b21 100644
--- a/drivers/clk/qcom/mmcc-msm8998.c
+++ b/drivers/clk/qcom/mmcc-msm8998.c
@@ -7,11 +7,10 @@
#include <linux/bitops.h>
#include <linux/err.h>
#include <linux/platform_device.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/clk-provider.h>
#include <linux/regmap.h>
-#include <linux/reset-controller.h>
#include <dt-bindings/clock/qcom,mmcc-msm8998.h>
diff --git a/drivers/clk/qcom/mmcc-sdm660.c b/drivers/clk/qcom/mmcc-sdm660.c
index 98ba5b4518fb..b723c536dfb6 100644
--- a/drivers/clk/qcom/mmcc-sdm660.c
+++ b/drivers/clk/qcom/mmcc-sdm660.c
@@ -9,14 +9,10 @@
#include <linux/bitops.h>
#include <linux/err.h>
#include <linux/platform_device.h>
-#include <linux/property.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/clk-provider.h>
#include <linux/regmap.h>
-#include <linux/reset-controller.h>
-#include <linux/clk.h>
-
#include <dt-bindings/clock/qcom,mmcc-sdm660.h>
@@ -78,7 +74,7 @@ static struct clk_alpha_pll mmpll0 = {
},
};
-static struct clk_alpha_pll mmpll6 = {
+static struct clk_alpha_pll mmpll6 = {
.offset = 0xf0,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.clkr = {
@@ -2544,7 +2540,7 @@ static struct clk_branch video_core_clk = {
static struct clk_branch video_subcore0_clk = {
.halt_reg = 0x1048,
- .halt_check = BRANCH_HALT,
+ .halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x1048,
.enable_mask = BIT(0),
diff --git a/drivers/clk/qcom/nsscc-ipq9574.c b/drivers/clk/qcom/nsscc-ipq9574.c
new file mode 100644
index 000000000000..c8b11b04a7c2
--- /dev/null
+++ b/drivers/clk/qcom/nsscc-ipq9574.c
@@ -0,0 +1,3110 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, 2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/interconnect-provider.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_clock.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,ipq9574-nsscc.h>
+#include <dt-bindings/interconnect/qcom,ipq9574.h>
+#include <dt-bindings/reset/qcom,ipq9574-nsscc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "common.h"
+#include "reset.h"
+
+/* Need to match the order of clocks in DT binding */
+enum {
+ DT_XO,
+ DT_BIAS_PLL_CC_CLK,
+ DT_BIAS_PLL_UBI_NC_CLK,
+ DT_GCC_GPLL0_OUT_AUX,
+ DT_UNIPHY0_NSS_RX_CLK,
+ DT_UNIPHY0_NSS_TX_CLK,
+ DT_UNIPHY1_NSS_RX_CLK,
+ DT_UNIPHY1_NSS_TX_CLK,
+ DT_UNIPHY2_NSS_RX_CLK,
+ DT_UNIPHY2_NSS_TX_CLK,
+};
+
+enum {
+ P_XO,
+ P_BIAS_PLL_CC_CLK,
+ P_BIAS_PLL_UBI_NC_CLK,
+ P_GCC_GPLL0_OUT_AUX,
+ P_UBI32_PLL_OUT_MAIN,
+ P_UNIPHY0_NSS_RX_CLK,
+ P_UNIPHY0_NSS_TX_CLK,
+ P_UNIPHY1_NSS_RX_CLK,
+ P_UNIPHY1_NSS_TX_CLK,
+ P_UNIPHY2_NSS_RX_CLK,
+ P_UNIPHY2_NSS_TX_CLK,
+};
+
+static const struct alpha_pll_config ubi32_pll_config = {
+ .l = 0x3e,
+ .alpha = 0x6666,
+ .config_ctl_val = 0x200d4aa8,
+ .config_ctl_hi_val = 0x3c,
+ .main_output_mask = BIT(0),
+ .aux_output_mask = BIT(1),
+ .pre_div_val = 0x0,
+ .pre_div_mask = BIT(12),
+ .post_div_val = 0x0,
+ .post_div_mask = GENMASK(9, 8),
+ .alpha_en_mask = BIT(24),
+ .test_ctl_val = 0x1c0000c0,
+ .test_ctl_hi_val = 0x4000,
+};
+
+static struct clk_alpha_pll ubi32_pll_main = {
+ .offset = 0x28000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_NSS_HUAYRA],
+ .flags = SUPPORTS_DYNAMIC_UPDATE,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ubi32_pll_main",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_XO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_huayra_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll_postdiv ubi32_pll = {
+ .offset = 0x28000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_NSS_HUAYRA],
+ .width = 2,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "ubi32_pll",
+ .parent_hws = (const struct clk_hw *[]) {
+ &ubi32_pll_main.clkr.hw
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static const struct parent_map nss_cc_parent_map_0[] = {
+ { P_XO, 0 },
+ { P_BIAS_PLL_CC_CLK, 1 },
+ { P_UNIPHY0_NSS_RX_CLK, 2 },
+ { P_UNIPHY0_NSS_TX_CLK, 3 },
+ { P_UNIPHY1_NSS_RX_CLK, 4 },
+ { P_UNIPHY1_NSS_TX_CLK, 5 },
+};
+
+static const struct clk_parent_data nss_cc_parent_data_0[] = {
+ { .index = DT_XO },
+ { .index = DT_BIAS_PLL_CC_CLK },
+ { .index = DT_UNIPHY0_NSS_RX_CLK },
+ { .index = DT_UNIPHY0_NSS_TX_CLK },
+ { .index = DT_UNIPHY1_NSS_RX_CLK },
+ { .index = DT_UNIPHY1_NSS_TX_CLK },
+};
+
+static const struct parent_map nss_cc_parent_map_1[] = {
+ { P_XO, 0 },
+ { P_BIAS_PLL_UBI_NC_CLK, 1 },
+ { P_GCC_GPLL0_OUT_AUX, 2 },
+ { P_BIAS_PLL_CC_CLK, 6 },
+};
+
+static const struct clk_parent_data nss_cc_parent_data_1[] = {
+ { .index = DT_XO },
+ { .index = DT_BIAS_PLL_UBI_NC_CLK },
+ { .index = DT_GCC_GPLL0_OUT_AUX },
+ { .index = DT_BIAS_PLL_CC_CLK },
+};
+
+static const struct parent_map nss_cc_parent_map_2[] = {
+ { P_XO, 0 },
+ { P_UBI32_PLL_OUT_MAIN, 1 },
+ { P_GCC_GPLL0_OUT_AUX, 2 },
+};
+
+static const struct clk_parent_data nss_cc_parent_data_2[] = {
+ { .index = DT_XO },
+ { .hw = &ubi32_pll.clkr.hw },
+ { .index = DT_GCC_GPLL0_OUT_AUX },
+};
+
+static const struct parent_map nss_cc_parent_map_3[] = {
+ { P_XO, 0 },
+ { P_BIAS_PLL_CC_CLK, 1 },
+ { P_GCC_GPLL0_OUT_AUX, 2 },
+};
+
+static const struct clk_parent_data nss_cc_parent_data_3[] = {
+ { .index = DT_XO },
+ { .index = DT_BIAS_PLL_CC_CLK },
+ { .index = DT_GCC_GPLL0_OUT_AUX },
+};
+
+static const struct parent_map nss_cc_parent_map_4[] = {
+ { P_XO, 0 },
+ { P_BIAS_PLL_CC_CLK, 1 },
+ { P_UNIPHY0_NSS_RX_CLK, 2 },
+ { P_UNIPHY0_NSS_TX_CLK, 3 },
+};
+
+static const struct clk_parent_data nss_cc_parent_data_4[] = {
+ { .index = DT_XO },
+ { .index = DT_BIAS_PLL_CC_CLK },
+ { .index = DT_UNIPHY0_NSS_RX_CLK },
+ { .index = DT_UNIPHY0_NSS_TX_CLK },
+};
+
+static const struct parent_map nss_cc_parent_map_5[] = {
+ { P_XO, 0 },
+ { P_BIAS_PLL_CC_CLK, 1 },
+ { P_UNIPHY2_NSS_RX_CLK, 2 },
+ { P_UNIPHY2_NSS_TX_CLK, 3 },
+};
+
+static const struct clk_parent_data nss_cc_parent_data_5[] = {
+ { .index = DT_XO },
+ { .index = DT_BIAS_PLL_CC_CLK },
+ { .index = DT_UNIPHY2_NSS_RX_CLK },
+ { .index = DT_UNIPHY2_NSS_TX_CLK },
+};
+
+static const struct parent_map nss_cc_parent_map_6[] = {
+ { P_XO, 0 },
+ { P_GCC_GPLL0_OUT_AUX, 2 },
+ { P_BIAS_PLL_CC_CLK, 6 },
+};
+
+static const struct clk_parent_data nss_cc_parent_data_6[] = {
+ { .index = DT_XO },
+ { .index = DT_GCC_GPLL0_OUT_AUX },
+ { .index = DT_BIAS_PLL_CC_CLK },
+};
+
+static const struct parent_map nss_cc_parent_map_7[] = {
+ { P_XO, 0 },
+ { P_UBI32_PLL_OUT_MAIN, 1 },
+ { P_GCC_GPLL0_OUT_AUX, 2 },
+ { P_BIAS_PLL_CC_CLK, 6 },
+};
+
+static const struct clk_parent_data nss_cc_parent_data_7[] = {
+ { .index = DT_XO },
+ { .hw = &ubi32_pll.clkr.hw },
+ { .index = DT_GCC_GPLL0_OUT_AUX },
+ { .index = DT_BIAS_PLL_CC_CLK },
+};
+
+static const struct freq_tbl ftbl_nss_cc_ce_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ F(353000000, P_BIAS_PLL_UBI_NC_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 nss_cc_ce_clk_src = {
+ .cmd_rcgr = 0x28404,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_1,
+ .freq_tbl = ftbl_nss_cc_ce_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ce_clk_src",
+ .parent_data = nss_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_nss_cc_cfg_clk_src[] = {
+ F(100000000, P_GCC_GPLL0_OUT_AUX, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 nss_cc_cfg_clk_src = {
+ .cmd_rcgr = 0x28104,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_3,
+ .freq_tbl = ftbl_nss_cc_cfg_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_cfg_clk_src",
+ .parent_data = nss_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_3),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_nss_cc_clc_clk_src[] = {
+ F(533333333, P_GCC_GPLL0_OUT_AUX, 1.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 nss_cc_clc_clk_src = {
+ .cmd_rcgr = 0x28604,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_6,
+ .freq_tbl = ftbl_nss_cc_clc_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_clc_clk_src",
+ .parent_data = nss_cc_parent_data_6,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_6),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_nss_cc_crypto_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ F(300000000, P_BIAS_PLL_CC_CLK, 4, 0, 0),
+ F(600000000, P_BIAS_PLL_CC_CLK, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 nss_cc_crypto_clk_src = {
+ .cmd_rcgr = 0x16008,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_3,
+ .freq_tbl = ftbl_nss_cc_crypto_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_crypto_clk_src",
+ .parent_data = nss_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 nss_cc_haq_clk_src = {
+ .cmd_rcgr = 0x28304,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_1,
+ .freq_tbl = ftbl_nss_cc_ce_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_haq_clk_src",
+ .parent_data = nss_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 nss_cc_imem_clk_src = {
+ .cmd_rcgr = 0xe008,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_1,
+ .freq_tbl = ftbl_nss_cc_ce_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_imem_clk_src",
+ .parent_data = nss_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_nss_cc_int_cfg_clk_src[] = {
+ F(200000000, P_GCC_GPLL0_OUT_AUX, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 nss_cc_int_cfg_clk_src = {
+ .cmd_rcgr = 0x287b4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_3,
+ .freq_tbl = ftbl_nss_cc_int_cfg_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_int_cfg_clk_src",
+ .parent_data = nss_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_3),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_conf ftbl_nss_cc_port1_rx_clk_src_25[] = {
+ C(P_UNIPHY0_NSS_RX_CLK, 12.5, 0, 0),
+ C(P_UNIPHY0_NSS_RX_CLK, 5, 0, 0),
+};
+
+static const struct freq_conf ftbl_nss_cc_port1_rx_clk_src_125[] = {
+ C(P_UNIPHY0_NSS_RX_CLK, 2.5, 0, 0),
+ C(P_UNIPHY0_NSS_RX_CLK, 1, 0, 0),
+};
+
+static const struct freq_multi_tbl ftbl_nss_cc_port1_rx_clk_src[] = {
+ FMS(24000000, P_XO, 1, 0, 0),
+ FM(25000000, ftbl_nss_cc_port1_rx_clk_src_25),
+ FMS(78125000, P_UNIPHY0_NSS_RX_CLK, 4, 0, 0),
+ FM(125000000, ftbl_nss_cc_port1_rx_clk_src_125),
+ FMS(312500000, P_UNIPHY0_NSS_RX_CLK, 1, 0, 0),
+ { }
+};
+
+static const struct freq_conf ftbl_nss_cc_port1_tx_clk_src_25[] = {
+ C(P_UNIPHY0_NSS_TX_CLK, 12.5, 0, 0),
+ C(P_UNIPHY0_NSS_TX_CLK, 5, 0, 0),
+};
+
+static const struct freq_conf ftbl_nss_cc_port1_tx_clk_src_125[] = {
+ C(P_UNIPHY0_NSS_TX_CLK, 2.5, 0, 0),
+ C(P_UNIPHY0_NSS_TX_CLK, 1, 0, 0),
+};
+
+static const struct freq_multi_tbl ftbl_nss_cc_port1_tx_clk_src[] = {
+ FMS(24000000, P_XO, 1, 0, 0),
+ FM(25000000, ftbl_nss_cc_port1_tx_clk_src_25),
+ FMS(78125000, P_UNIPHY0_NSS_TX_CLK, 4, 0, 0),
+ FM(125000000, ftbl_nss_cc_port1_tx_clk_src_125),
+ FMS(312500000, P_UNIPHY0_NSS_TX_CLK, 1, 0, 0),
+ { }
+};
+
+static const struct freq_conf ftbl_nss_cc_port5_rx_clk_src_25[] = {
+ C(P_UNIPHY1_NSS_RX_CLK, 12.5, 0, 0),
+ C(P_UNIPHY0_NSS_RX_CLK, 5, 0, 0),
+};
+
+static const struct freq_conf ftbl_nss_cc_port5_rx_clk_src_125[] = {
+ C(P_UNIPHY1_NSS_RX_CLK, 2.5, 0, 0),
+ C(P_UNIPHY0_NSS_RX_CLK, 1, 0, 0),
+};
+
+static const struct freq_conf ftbl_nss_cc_port5_rx_clk_src_312p5[] = {
+ C(P_UNIPHY1_NSS_RX_CLK, 1, 0, 0),
+ C(P_UNIPHY0_NSS_RX_CLK, 1, 0, 0),
+};
+
+static const struct freq_multi_tbl ftbl_nss_cc_port5_rx_clk_src[] = {
+ FMS(24000000, P_XO, 1, 0, 0),
+ FM(25000000, ftbl_nss_cc_port5_rx_clk_src_25),
+ FMS(78125000, P_UNIPHY1_NSS_RX_CLK, 4, 0, 0),
+ FM(125000000, ftbl_nss_cc_port5_rx_clk_src_125),
+ FMS(156250000, P_UNIPHY1_NSS_RX_CLK, 2, 0, 0),
+ FM(312500000, ftbl_nss_cc_port5_rx_clk_src_312p5),
+ { }
+};
+
+static const struct freq_conf ftbl_nss_cc_port5_tx_clk_src_25[] = {
+ C(P_UNIPHY1_NSS_TX_CLK, 12.5, 0, 0),
+ C(P_UNIPHY0_NSS_TX_CLK, 5, 0, 0),
+};
+
+static const struct freq_conf ftbl_nss_cc_port5_tx_clk_src_125[] = {
+ C(P_UNIPHY1_NSS_TX_CLK, 2.5, 0, 0),
+ C(P_UNIPHY0_NSS_TX_CLK, 1, 0, 0),
+};
+
+static const struct freq_conf ftbl_nss_cc_port5_tx_clk_src_312p5[] = {
+ C(P_UNIPHY1_NSS_TX_CLK, 1, 0, 0),
+ C(P_UNIPHY0_NSS_TX_CLK, 1, 0, 0),
+};
+
+static const struct freq_multi_tbl ftbl_nss_cc_port5_tx_clk_src[] = {
+ FMS(24000000, P_XO, 1, 0, 0),
+ FM(25000000, ftbl_nss_cc_port5_tx_clk_src_25),
+ FMS(78125000, P_UNIPHY1_NSS_TX_CLK, 4, 0, 0),
+ FM(125000000, ftbl_nss_cc_port5_tx_clk_src_125),
+ FMS(156250000, P_UNIPHY1_NSS_TX_CLK, 2, 0, 0),
+ FM(312500000, ftbl_nss_cc_port5_tx_clk_src_312p5),
+ { }
+};
+
+static const struct freq_conf ftbl_nss_cc_port6_rx_clk_src_25[] = {
+ C(P_UNIPHY2_NSS_RX_CLK, 12.5, 0, 0),
+ C(P_UNIPHY2_NSS_RX_CLK, 5, 0, 0),
+};
+
+static const struct freq_conf ftbl_nss_cc_port6_rx_clk_src_125[] = {
+ C(P_UNIPHY2_NSS_RX_CLK, 2.5, 0, 0),
+ C(P_UNIPHY2_NSS_RX_CLK, 1, 0, 0),
+};
+
+static const struct freq_multi_tbl ftbl_nss_cc_port6_rx_clk_src[] = {
+ FMS(24000000, P_XO, 1, 0, 0),
+ FM(25000000, ftbl_nss_cc_port6_rx_clk_src_25),
+ FMS(78125000, P_UNIPHY2_NSS_RX_CLK, 4, 0, 0),
+ FM(125000000, ftbl_nss_cc_port6_rx_clk_src_125),
+ FMS(156250000, P_UNIPHY2_NSS_RX_CLK, 2, 0, 0),
+ FMS(312500000, P_UNIPHY2_NSS_RX_CLK, 1, 0, 0),
+ { }
+};
+
+static const struct freq_conf ftbl_nss_cc_port6_tx_clk_src_25[] = {
+ C(P_UNIPHY2_NSS_TX_CLK, 12.5, 0, 0),
+ C(P_UNIPHY2_NSS_TX_CLK, 5, 0, 0),
+};
+
+static const struct freq_conf ftbl_nss_cc_port6_tx_clk_src_125[] = {
+ C(P_UNIPHY2_NSS_TX_CLK, 2.5, 0, 0),
+ C(P_UNIPHY2_NSS_TX_CLK, 1, 0, 0),
+};
+
+static const struct freq_multi_tbl ftbl_nss_cc_port6_tx_clk_src[] = {
+ FMS(24000000, P_XO, 1, 0, 0),
+ FM(25000000, ftbl_nss_cc_port6_tx_clk_src_25),
+ FMS(78125000, P_UNIPHY2_NSS_TX_CLK, 4, 0, 0),
+ FM(125000000, ftbl_nss_cc_port6_tx_clk_src_125),
+ FMS(156250000, P_UNIPHY2_NSS_TX_CLK, 2, 0, 0),
+ FMS(312500000, P_UNIPHY2_NSS_TX_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 nss_cc_port1_rx_clk_src = {
+ .cmd_rcgr = 0x28110,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_4,
+ .freq_multi_tbl = ftbl_nss_cc_port1_rx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port1_rx_clk_src",
+ .parent_data = nss_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_4),
+ .ops = &clk_rcg2_fm_ops,
+ },
+};
+
+static struct clk_rcg2 nss_cc_port1_tx_clk_src = {
+ .cmd_rcgr = 0x2811c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_4,
+ .freq_multi_tbl = ftbl_nss_cc_port1_tx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port1_tx_clk_src",
+ .parent_data = nss_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_4),
+ .ops = &clk_rcg2_fm_ops,
+ },
+};
+
+static struct clk_rcg2 nss_cc_port2_rx_clk_src = {
+ .cmd_rcgr = 0x28128,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_4,
+ .freq_multi_tbl = ftbl_nss_cc_port1_rx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port2_rx_clk_src",
+ .parent_data = nss_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_4),
+ .ops = &clk_rcg2_fm_ops,
+ },
+};
+
+static struct clk_rcg2 nss_cc_port2_tx_clk_src = {
+ .cmd_rcgr = 0x28134,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_4,
+ .freq_multi_tbl = ftbl_nss_cc_port1_tx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port2_tx_clk_src",
+ .parent_data = nss_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_4),
+ .ops = &clk_rcg2_fm_ops,
+ },
+};
+
+static struct clk_rcg2 nss_cc_port3_rx_clk_src = {
+ .cmd_rcgr = 0x28140,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_4,
+ .freq_multi_tbl = ftbl_nss_cc_port1_rx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port3_rx_clk_src",
+ .parent_data = nss_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_4),
+ .ops = &clk_rcg2_fm_ops,
+ },
+};
+
+static struct clk_rcg2 nss_cc_port3_tx_clk_src = {
+ .cmd_rcgr = 0x2814c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_4,
+ .freq_multi_tbl = ftbl_nss_cc_port1_tx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port3_tx_clk_src",
+ .parent_data = nss_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_4),
+ .ops = &clk_rcg2_fm_ops,
+ },
+};
+
+static struct clk_rcg2 nss_cc_port4_rx_clk_src = {
+ .cmd_rcgr = 0x28158,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_4,
+ .freq_multi_tbl = ftbl_nss_cc_port1_rx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port4_rx_clk_src",
+ .parent_data = nss_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_4),
+ .ops = &clk_rcg2_fm_ops,
+ },
+};
+
+static struct clk_rcg2 nss_cc_port4_tx_clk_src = {
+ .cmd_rcgr = 0x28164,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_4,
+ .freq_multi_tbl = ftbl_nss_cc_port1_tx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port4_tx_clk_src",
+ .parent_data = nss_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_4),
+ .ops = &clk_rcg2_fm_ops,
+ },
+};
+
+static struct clk_rcg2 nss_cc_port5_rx_clk_src = {
+ .cmd_rcgr = 0x28170,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_0,
+ .freq_multi_tbl = ftbl_nss_cc_port5_rx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port5_rx_clk_src",
+ .parent_data = nss_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_0),
+ .ops = &clk_rcg2_fm_ops,
+ },
+};
+
+static struct clk_rcg2 nss_cc_port5_tx_clk_src = {
+ .cmd_rcgr = 0x2817c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_0,
+ .freq_multi_tbl = ftbl_nss_cc_port5_tx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port5_tx_clk_src",
+ .parent_data = nss_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_0),
+ .ops = &clk_rcg2_fm_ops,
+ },
+};
+
+static struct clk_rcg2 nss_cc_port6_rx_clk_src = {
+ .cmd_rcgr = 0x28188,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_5,
+ .freq_multi_tbl = ftbl_nss_cc_port6_rx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port6_rx_clk_src",
+ .parent_data = nss_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_5),
+ .ops = &clk_rcg2_fm_ops,
+ },
+};
+
+static struct clk_rcg2 nss_cc_port6_tx_clk_src = {
+ .cmd_rcgr = 0x28194,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_5,
+ .freq_multi_tbl = ftbl_nss_cc_port6_tx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port6_tx_clk_src",
+ .parent_data = nss_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_5),
+ .ops = &clk_rcg2_fm_ops,
+ },
+};
+
+static struct clk_rcg2 nss_cc_ppe_clk_src = {
+ .cmd_rcgr = 0x28204,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_1,
+ .freq_tbl = ftbl_nss_cc_ce_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ppe_clk_src",
+ .parent_data = nss_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_nss_cc_ubi0_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ F(187200000, P_UBI32_PLL_OUT_MAIN, 8, 0, 0),
+ F(748800000, P_UBI32_PLL_OUT_MAIN, 2, 0, 0),
+ F(1497600000, P_UBI32_PLL_OUT_MAIN, 1, 0, 0),
+ F(1689600000, P_UBI32_PLL_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 nss_cc_ubi0_clk_src = {
+ .cmd_rcgr = 0x28704,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_2,
+ .freq_tbl = ftbl_nss_cc_ubi0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi0_clk_src",
+ .parent_data = nss_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 nss_cc_ubi1_clk_src = {
+ .cmd_rcgr = 0x2870c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_2,
+ .freq_tbl = ftbl_nss_cc_ubi0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi1_clk_src",
+ .parent_data = nss_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 nss_cc_ubi2_clk_src = {
+ .cmd_rcgr = 0x28714,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_2,
+ .freq_tbl = ftbl_nss_cc_ubi0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi2_clk_src",
+ .parent_data = nss_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 nss_cc_ubi3_clk_src = {
+ .cmd_rcgr = 0x2871c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_2,
+ .freq_tbl = ftbl_nss_cc_ubi0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi3_clk_src",
+ .parent_data = nss_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 nss_cc_ubi_axi_clk_src = {
+ .cmd_rcgr = 0x28724,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_7,
+ .freq_tbl = ftbl_nss_cc_clc_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi_axi_clk_src",
+ .parent_data = nss_cc_parent_data_7,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_7),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 nss_cc_ubi_nc_axi_bfdcd_clk_src = {
+ .cmd_rcgr = 0x2872c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_1,
+ .freq_tbl = ftbl_nss_cc_ce_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi_nc_axi_bfdcd_clk_src",
+ .parent_data = nss_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_regmap_div nss_cc_port1_rx_div_clk_src = {
+ .reg = 0x28118,
+ .shift = 0,
+ .width = 9,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port1_rx_div_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port1_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div nss_cc_port1_tx_div_clk_src = {
+ .reg = 0x28124,
+ .shift = 0,
+ .width = 9,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port1_tx_div_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port1_tx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div nss_cc_port2_rx_div_clk_src = {
+ .reg = 0x28130,
+ .shift = 0,
+ .width = 9,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port2_rx_div_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port2_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div nss_cc_port2_tx_div_clk_src = {
+ .reg = 0x2813c,
+ .shift = 0,
+ .width = 9,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port2_tx_div_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port2_tx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div nss_cc_port3_rx_div_clk_src = {
+ .reg = 0x28148,
+ .shift = 0,
+ .width = 9,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port3_rx_div_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port3_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div nss_cc_port3_tx_div_clk_src = {
+ .reg = 0x28154,
+ .shift = 0,
+ .width = 9,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port3_tx_div_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port3_tx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div nss_cc_port4_rx_div_clk_src = {
+ .reg = 0x28160,
+ .shift = 0,
+ .width = 9,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port4_rx_div_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port4_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div nss_cc_port4_tx_div_clk_src = {
+ .reg = 0x2816c,
+ .shift = 0,
+ .width = 9,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port4_tx_div_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port4_tx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div nss_cc_port5_rx_div_clk_src = {
+ .reg = 0x28178,
+ .shift = 0,
+ .width = 9,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port5_rx_div_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port5_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div nss_cc_port5_tx_div_clk_src = {
+ .reg = 0x28184,
+ .shift = 0,
+ .width = 9,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port5_tx_div_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port5_tx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div nss_cc_port6_rx_div_clk_src = {
+ .reg = 0x28190,
+ .shift = 0,
+ .width = 9,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port6_rx_div_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port6_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div nss_cc_port6_tx_div_clk_src = {
+ .reg = 0x2819c,
+ .shift = 0,
+ .width = 9,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port6_tx_div_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port6_tx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div nss_cc_ubi0_div_clk_src = {
+ .reg = 0x287a4,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi0_div_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ubi0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div nss_cc_ubi1_div_clk_src = {
+ .reg = 0x287a8,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi1_div_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ubi1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div nss_cc_ubi2_div_clk_src = {
+ .reg = 0x287ac,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi2_div_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ubi2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div nss_cc_ubi3_div_clk_src = {
+ .reg = 0x287b0,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi3_div_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ubi3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div nss_cc_xgmac0_ptp_ref_div_clk_src = {
+ .reg = 0x28214,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_xgmac0_ptp_ref_div_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div nss_cc_xgmac1_ptp_ref_div_clk_src = {
+ .reg = 0x28218,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_xgmac1_ptp_ref_div_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div nss_cc_xgmac2_ptp_ref_div_clk_src = {
+ .reg = 0x2821c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_xgmac2_ptp_ref_div_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div nss_cc_xgmac3_ptp_ref_div_clk_src = {
+ .reg = 0x28220,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_xgmac3_ptp_ref_div_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div nss_cc_xgmac4_ptp_ref_div_clk_src = {
+ .reg = 0x28224,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_xgmac4_ptp_ref_div_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div nss_cc_xgmac5_ptp_ref_div_clk_src = {
+ .reg = 0x28228,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_xgmac5_ptp_ref_div_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch nss_cc_ce_apb_clk = {
+ .halt_reg = 0x2840c,
+ .clkr = {
+ .enable_reg = 0x2840c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ce_apb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ce_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ce_axi_clk = {
+ .halt_reg = 0x28410,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28410,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ce_axi_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ce_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_clc_axi_clk = {
+ .halt_reg = 0x2860c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2860c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_clc_axi_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_clc_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_crypto_clk = {
+ .halt_reg = 0x1601c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1601c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_crypto_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_crypto_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_crypto_ppe_clk = {
+ .halt_reg = 0x28240,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28240,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_crypto_ppe_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_haq_ahb_clk = {
+ .halt_reg = 0x2830c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2830c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_haq_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_haq_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_haq_axi_clk = {
+ .halt_reg = 0x28310,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28310,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_haq_axi_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_haq_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_imem_ahb_clk = {
+ .halt_reg = 0xe018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_imem_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_cfg_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_imem_qsb_clk = {
+ .halt_reg = 0xe010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_imem_qsb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_imem_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_nss_csr_clk = {
+ .halt_reg = 0x281d0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x281d0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_nss_csr_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_cfg_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_nssnoc_ce_apb_clk = {
+ .halt_reg = 0x28414,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28414,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_nssnoc_ce_apb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ce_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_nssnoc_ce_axi_clk = {
+ .halt_reg = 0x28418,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28418,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_nssnoc_ce_axi_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ce_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_nssnoc_clc_axi_clk = {
+ .halt_reg = 0x28610,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28610,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_nssnoc_clc_axi_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_clc_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_nssnoc_crypto_clk = {
+ .halt_reg = 0x16020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x16020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_nssnoc_crypto_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_crypto_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_nssnoc_haq_ahb_clk = {
+ .halt_reg = 0x28314,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28314,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_nssnoc_haq_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_haq_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_nssnoc_haq_axi_clk = {
+ .halt_reg = 0x28318,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28318,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_nssnoc_haq_axi_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_haq_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_nssnoc_imem_ahb_clk = {
+ .halt_reg = 0xe01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_nssnoc_imem_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_cfg_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_nssnoc_imem_qsb_clk = {
+ .halt_reg = 0xe014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_nssnoc_imem_qsb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_imem_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_nssnoc_nss_csr_clk = {
+ .halt_reg = 0x281d4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x281d4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_nssnoc_nss_csr_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_cfg_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_nssnoc_ppe_cfg_clk = {
+ .halt_reg = 0x28248,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28248,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_nssnoc_ppe_cfg_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_nssnoc_ppe_clk = {
+ .halt_reg = 0x28244,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28244,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_nssnoc_ppe_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_nssnoc_ubi32_ahb0_clk = {
+ .halt_reg = 0x28788,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28788,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_nssnoc_ubi32_ahb0_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_cfg_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_nssnoc_ubi32_axi0_clk = {
+ .halt_reg = 0x287a0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x287a0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_nssnoc_ubi32_axi0_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ubi_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_nssnoc_ubi32_int0_ahb_clk = {
+ .halt_reg = 0x2878c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2878c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_nssnoc_ubi32_int0_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_int_cfg_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_nssnoc_ubi32_nc_axi0_1_clk = {
+ .halt_reg = 0x287bc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x287bc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_nssnoc_ubi32_nc_axi0_1_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ubi_nc_axi_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_nssnoc_ubi32_nc_axi0_clk = {
+ .halt_reg = 0x28764,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28764,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_nssnoc_ubi32_nc_axi0_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ubi_nc_axi_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_port1_mac_clk = {
+ .halt_reg = 0x2824c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2824c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port1_mac_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_port1_rx_clk = {
+ .halt_reg = 0x281a0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x281a0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port1_rx_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port1_rx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_port1_tx_clk = {
+ .halt_reg = 0x281a4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x281a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port1_tx_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port1_tx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_port2_mac_clk = {
+ .halt_reg = 0x28250,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28250,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port2_mac_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_port2_rx_clk = {
+ .halt_reg = 0x281a8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x281a8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port2_rx_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port2_rx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_port2_tx_clk = {
+ .halt_reg = 0x281ac,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x281ac,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port2_tx_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port2_tx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_port3_mac_clk = {
+ .halt_reg = 0x28254,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28254,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port3_mac_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_port3_rx_clk = {
+ .halt_reg = 0x281b0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x281b0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port3_rx_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port3_rx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_port3_tx_clk = {
+ .halt_reg = 0x281b4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x281b4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port3_tx_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port3_tx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_port4_mac_clk = {
+ .halt_reg = 0x28258,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28258,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port4_mac_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_port4_rx_clk = {
+ .halt_reg = 0x281b8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x281b8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port4_rx_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port4_rx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_port4_tx_clk = {
+ .halt_reg = 0x281bc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x281bc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port4_tx_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port4_tx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_port5_mac_clk = {
+ .halt_reg = 0x2825c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2825c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port5_mac_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_port5_rx_clk = {
+ .halt_reg = 0x281c0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x281c0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port5_rx_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port5_rx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_port5_tx_clk = {
+ .halt_reg = 0x281c4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x281c4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port5_tx_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port5_tx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_port6_mac_clk = {
+ .halt_reg = 0x28260,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28260,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port6_mac_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_port6_rx_clk = {
+ .halt_reg = 0x281c8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x281c8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port6_rx_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port6_rx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_port6_tx_clk = {
+ .halt_reg = 0x281cc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x281cc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port6_tx_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port6_tx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ppe_edma_cfg_clk = {
+ .halt_reg = 0x2823c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2823c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ppe_edma_cfg_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ppe_edma_clk = {
+ .halt_reg = 0x28238,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28238,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ppe_edma_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ppe_switch_btq_clk = {
+ .halt_reg = 0x2827c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2827c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ppe_switch_btq_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ppe_switch_cfg_clk = {
+ .halt_reg = 0x28234,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28234,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ppe_switch_cfg_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ppe_switch_clk = {
+ .halt_reg = 0x28230,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28230,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ppe_switch_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ppe_switch_ipe_clk = {
+ .halt_reg = 0x2822c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2822c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ppe_switch_ipe_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ubi32_ahb0_clk = {
+ .halt_reg = 0x28768,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28768,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi32_ahb0_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_cfg_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ubi32_ahb1_clk = {
+ .halt_reg = 0x28770,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28770,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi32_ahb1_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_cfg_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ubi32_ahb2_clk = {
+ .halt_reg = 0x28778,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28778,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi32_ahb2_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_cfg_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ubi32_ahb3_clk = {
+ .halt_reg = 0x28780,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28780,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi32_ahb3_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_cfg_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ubi32_axi0_clk = {
+ .halt_reg = 0x28790,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28790,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi32_axi0_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ubi_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ubi32_axi1_clk = {
+ .halt_reg = 0x28794,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28794,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi32_axi1_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ubi_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ubi32_axi2_clk = {
+ .halt_reg = 0x28798,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28798,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi32_axi2_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ubi_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ubi32_axi3_clk = {
+ .halt_reg = 0x2879c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2879c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi32_axi3_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ubi_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ubi32_core0_clk = {
+ .halt_reg = 0x28734,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28734,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi32_core0_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ubi0_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ubi32_core1_clk = {
+ .halt_reg = 0x28738,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28738,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi32_core1_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ubi1_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ubi32_core2_clk = {
+ .halt_reg = 0x2873c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2873c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi32_core2_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ubi2_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ubi32_core3_clk = {
+ .halt_reg = 0x28740,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28740,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi32_core3_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ubi3_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ubi32_intr0_ahb_clk = {
+ .halt_reg = 0x2876c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2876c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi32_intr0_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_int_cfg_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ubi32_intr1_ahb_clk = {
+ .halt_reg = 0x28774,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28774,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi32_intr1_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_int_cfg_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ubi32_intr2_ahb_clk = {
+ .halt_reg = 0x2877c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2877c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi32_intr2_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_int_cfg_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ubi32_intr3_ahb_clk = {
+ .halt_reg = 0x28784,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28784,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi32_intr3_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_int_cfg_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ubi32_nc_axi0_clk = {
+ .halt_reg = 0x28744,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28744,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi32_nc_axi0_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ubi_nc_axi_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ubi32_nc_axi1_clk = {
+ .halt_reg = 0x2874c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2874c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi32_nc_axi1_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ubi_nc_axi_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ubi32_nc_axi2_clk = {
+ .halt_reg = 0x28754,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28754,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi32_nc_axi2_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ubi_nc_axi_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ubi32_nc_axi3_clk = {
+ .halt_reg = 0x2875c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2875c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi32_nc_axi3_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ubi_nc_axi_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ubi32_utcm0_clk = {
+ .halt_reg = 0x28748,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28748,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi32_utcm0_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ubi_nc_axi_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ubi32_utcm1_clk = {
+ .halt_reg = 0x28750,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28750,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi32_utcm1_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ubi_nc_axi_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ubi32_utcm2_clk = {
+ .halt_reg = 0x28758,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28758,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi32_utcm2_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ubi_nc_axi_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ubi32_utcm3_clk = {
+ .halt_reg = 0x28760,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28760,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi32_utcm3_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ubi_nc_axi_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_uniphy_port1_rx_clk = {
+ .halt_reg = 0x28904,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28904,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_uniphy_port1_rx_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port1_rx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_uniphy_port1_tx_clk = {
+ .halt_reg = 0x28908,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28908,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_uniphy_port1_tx_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port1_tx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_uniphy_port2_rx_clk = {
+ .halt_reg = 0x2890c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2890c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_uniphy_port2_rx_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port2_rx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_uniphy_port2_tx_clk = {
+ .halt_reg = 0x28910,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28910,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_uniphy_port2_tx_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port2_tx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_uniphy_port3_rx_clk = {
+ .halt_reg = 0x28914,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28914,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_uniphy_port3_rx_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port3_rx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_uniphy_port3_tx_clk = {
+ .halt_reg = 0x28918,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28918,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_uniphy_port3_tx_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port3_tx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_uniphy_port4_rx_clk = {
+ .halt_reg = 0x2891c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2891c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_uniphy_port4_rx_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port4_rx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_uniphy_port4_tx_clk = {
+ .halt_reg = 0x28920,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28920,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_uniphy_port4_tx_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port4_tx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_uniphy_port5_rx_clk = {
+ .halt_reg = 0x28924,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28924,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_uniphy_port5_rx_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port5_rx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_uniphy_port5_tx_clk = {
+ .halt_reg = 0x28928,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28928,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_uniphy_port5_tx_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port5_tx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_uniphy_port6_rx_clk = {
+ .halt_reg = 0x2892c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2892c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_uniphy_port6_rx_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port6_rx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_uniphy_port6_tx_clk = {
+ .halt_reg = 0x28930,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28930,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_uniphy_port6_tx_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port6_tx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_xgmac0_ptp_ref_clk = {
+ .halt_reg = 0x28264,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28264,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_xgmac0_ptp_ref_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_xgmac0_ptp_ref_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_xgmac1_ptp_ref_clk = {
+ .halt_reg = 0x28268,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28268,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_xgmac1_ptp_ref_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_xgmac1_ptp_ref_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_xgmac2_ptp_ref_clk = {
+ .halt_reg = 0x2826c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2826c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_xgmac2_ptp_ref_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_xgmac2_ptp_ref_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_xgmac3_ptp_ref_clk = {
+ .halt_reg = 0x28270,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28270,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_xgmac3_ptp_ref_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_xgmac3_ptp_ref_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_xgmac4_ptp_ref_clk = {
+ .halt_reg = 0x28274,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28274,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_xgmac4_ptp_ref_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_xgmac4_ptp_ref_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_xgmac5_ptp_ref_clk = {
+ .halt_reg = 0x28278,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28278,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_xgmac5_ptp_ref_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_xgmac5_ptp_ref_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap *nss_cc_ipq9574_clocks[] = {
+ [NSS_CC_CE_APB_CLK] = &nss_cc_ce_apb_clk.clkr,
+ [NSS_CC_CE_AXI_CLK] = &nss_cc_ce_axi_clk.clkr,
+ [NSS_CC_CE_CLK_SRC] = &nss_cc_ce_clk_src.clkr,
+ [NSS_CC_CFG_CLK_SRC] = &nss_cc_cfg_clk_src.clkr,
+ [NSS_CC_CLC_AXI_CLK] = &nss_cc_clc_axi_clk.clkr,
+ [NSS_CC_CLC_CLK_SRC] = &nss_cc_clc_clk_src.clkr,
+ [NSS_CC_CRYPTO_CLK] = &nss_cc_crypto_clk.clkr,
+ [NSS_CC_CRYPTO_CLK_SRC] = &nss_cc_crypto_clk_src.clkr,
+ [NSS_CC_CRYPTO_PPE_CLK] = &nss_cc_crypto_ppe_clk.clkr,
+ [NSS_CC_HAQ_AHB_CLK] = &nss_cc_haq_ahb_clk.clkr,
+ [NSS_CC_HAQ_AXI_CLK] = &nss_cc_haq_axi_clk.clkr,
+ [NSS_CC_HAQ_CLK_SRC] = &nss_cc_haq_clk_src.clkr,
+ [NSS_CC_IMEM_AHB_CLK] = &nss_cc_imem_ahb_clk.clkr,
+ [NSS_CC_IMEM_CLK_SRC] = &nss_cc_imem_clk_src.clkr,
+ [NSS_CC_IMEM_QSB_CLK] = &nss_cc_imem_qsb_clk.clkr,
+ [NSS_CC_INT_CFG_CLK_SRC] = &nss_cc_int_cfg_clk_src.clkr,
+ [NSS_CC_NSS_CSR_CLK] = &nss_cc_nss_csr_clk.clkr,
+ [NSS_CC_NSSNOC_CE_APB_CLK] = &nss_cc_nssnoc_ce_apb_clk.clkr,
+ [NSS_CC_NSSNOC_CE_AXI_CLK] = &nss_cc_nssnoc_ce_axi_clk.clkr,
+ [NSS_CC_NSSNOC_CLC_AXI_CLK] = &nss_cc_nssnoc_clc_axi_clk.clkr,
+ [NSS_CC_NSSNOC_CRYPTO_CLK] = &nss_cc_nssnoc_crypto_clk.clkr,
+ [NSS_CC_NSSNOC_HAQ_AHB_CLK] = &nss_cc_nssnoc_haq_ahb_clk.clkr,
+ [NSS_CC_NSSNOC_HAQ_AXI_CLK] = &nss_cc_nssnoc_haq_axi_clk.clkr,
+ [NSS_CC_NSSNOC_IMEM_AHB_CLK] = &nss_cc_nssnoc_imem_ahb_clk.clkr,
+ [NSS_CC_NSSNOC_IMEM_QSB_CLK] = &nss_cc_nssnoc_imem_qsb_clk.clkr,
+ [NSS_CC_NSSNOC_NSS_CSR_CLK] = &nss_cc_nssnoc_nss_csr_clk.clkr,
+ [NSS_CC_NSSNOC_PPE_CFG_CLK] = &nss_cc_nssnoc_ppe_cfg_clk.clkr,
+ [NSS_CC_NSSNOC_PPE_CLK] = &nss_cc_nssnoc_ppe_clk.clkr,
+ [NSS_CC_NSSNOC_UBI32_AHB0_CLK] = &nss_cc_nssnoc_ubi32_ahb0_clk.clkr,
+ [NSS_CC_NSSNOC_UBI32_AXI0_CLK] = &nss_cc_nssnoc_ubi32_axi0_clk.clkr,
+ [NSS_CC_NSSNOC_UBI32_INT0_AHB_CLK] =
+ &nss_cc_nssnoc_ubi32_int0_ahb_clk.clkr,
+ [NSS_CC_NSSNOC_UBI32_NC_AXI0_1_CLK] =
+ &nss_cc_nssnoc_ubi32_nc_axi0_1_clk.clkr,
+ [NSS_CC_NSSNOC_UBI32_NC_AXI0_CLK] =
+ &nss_cc_nssnoc_ubi32_nc_axi0_clk.clkr,
+ [NSS_CC_PORT1_MAC_CLK] = &nss_cc_port1_mac_clk.clkr,
+ [NSS_CC_PORT1_RX_CLK] = &nss_cc_port1_rx_clk.clkr,
+ [NSS_CC_PORT1_RX_CLK_SRC] = &nss_cc_port1_rx_clk_src.clkr,
+ [NSS_CC_PORT1_RX_DIV_CLK_SRC] = &nss_cc_port1_rx_div_clk_src.clkr,
+ [NSS_CC_PORT1_TX_CLK] = &nss_cc_port1_tx_clk.clkr,
+ [NSS_CC_PORT1_TX_CLK_SRC] = &nss_cc_port1_tx_clk_src.clkr,
+ [NSS_CC_PORT1_TX_DIV_CLK_SRC] = &nss_cc_port1_tx_div_clk_src.clkr,
+ [NSS_CC_PORT2_MAC_CLK] = &nss_cc_port2_mac_clk.clkr,
+ [NSS_CC_PORT2_RX_CLK] = &nss_cc_port2_rx_clk.clkr,
+ [NSS_CC_PORT2_RX_CLK_SRC] = &nss_cc_port2_rx_clk_src.clkr,
+ [NSS_CC_PORT2_RX_DIV_CLK_SRC] = &nss_cc_port2_rx_div_clk_src.clkr,
+ [NSS_CC_PORT2_TX_CLK] = &nss_cc_port2_tx_clk.clkr,
+ [NSS_CC_PORT2_TX_CLK_SRC] = &nss_cc_port2_tx_clk_src.clkr,
+ [NSS_CC_PORT2_TX_DIV_CLK_SRC] = &nss_cc_port2_tx_div_clk_src.clkr,
+ [NSS_CC_PORT3_MAC_CLK] = &nss_cc_port3_mac_clk.clkr,
+ [NSS_CC_PORT3_RX_CLK] = &nss_cc_port3_rx_clk.clkr,
+ [NSS_CC_PORT3_RX_CLK_SRC] = &nss_cc_port3_rx_clk_src.clkr,
+ [NSS_CC_PORT3_RX_DIV_CLK_SRC] = &nss_cc_port3_rx_div_clk_src.clkr,
+ [NSS_CC_PORT3_TX_CLK] = &nss_cc_port3_tx_clk.clkr,
+ [NSS_CC_PORT3_TX_CLK_SRC] = &nss_cc_port3_tx_clk_src.clkr,
+ [NSS_CC_PORT3_TX_DIV_CLK_SRC] = &nss_cc_port3_tx_div_clk_src.clkr,
+ [NSS_CC_PORT4_MAC_CLK] = &nss_cc_port4_mac_clk.clkr,
+ [NSS_CC_PORT4_RX_CLK] = &nss_cc_port4_rx_clk.clkr,
+ [NSS_CC_PORT4_RX_CLK_SRC] = &nss_cc_port4_rx_clk_src.clkr,
+ [NSS_CC_PORT4_RX_DIV_CLK_SRC] = &nss_cc_port4_rx_div_clk_src.clkr,
+ [NSS_CC_PORT4_TX_CLK] = &nss_cc_port4_tx_clk.clkr,
+ [NSS_CC_PORT4_TX_CLK_SRC] = &nss_cc_port4_tx_clk_src.clkr,
+ [NSS_CC_PORT4_TX_DIV_CLK_SRC] = &nss_cc_port4_tx_div_clk_src.clkr,
+ [NSS_CC_PORT5_MAC_CLK] = &nss_cc_port5_mac_clk.clkr,
+ [NSS_CC_PORT5_RX_CLK] = &nss_cc_port5_rx_clk.clkr,
+ [NSS_CC_PORT5_RX_CLK_SRC] = &nss_cc_port5_rx_clk_src.clkr,
+ [NSS_CC_PORT5_RX_DIV_CLK_SRC] = &nss_cc_port5_rx_div_clk_src.clkr,
+ [NSS_CC_PORT5_TX_CLK] = &nss_cc_port5_tx_clk.clkr,
+ [NSS_CC_PORT5_TX_CLK_SRC] = &nss_cc_port5_tx_clk_src.clkr,
+ [NSS_CC_PORT5_TX_DIV_CLK_SRC] = &nss_cc_port5_tx_div_clk_src.clkr,
+ [NSS_CC_PORT6_MAC_CLK] = &nss_cc_port6_mac_clk.clkr,
+ [NSS_CC_PORT6_RX_CLK] = &nss_cc_port6_rx_clk.clkr,
+ [NSS_CC_PORT6_RX_CLK_SRC] = &nss_cc_port6_rx_clk_src.clkr,
+ [NSS_CC_PORT6_RX_DIV_CLK_SRC] = &nss_cc_port6_rx_div_clk_src.clkr,
+ [NSS_CC_PORT6_TX_CLK] = &nss_cc_port6_tx_clk.clkr,
+ [NSS_CC_PORT6_TX_CLK_SRC] = &nss_cc_port6_tx_clk_src.clkr,
+ [NSS_CC_PORT6_TX_DIV_CLK_SRC] = &nss_cc_port6_tx_div_clk_src.clkr,
+ [NSS_CC_PPE_CLK_SRC] = &nss_cc_ppe_clk_src.clkr,
+ [NSS_CC_PPE_EDMA_CFG_CLK] = &nss_cc_ppe_edma_cfg_clk.clkr,
+ [NSS_CC_PPE_EDMA_CLK] = &nss_cc_ppe_edma_clk.clkr,
+ [NSS_CC_PPE_SWITCH_BTQ_CLK] = &nss_cc_ppe_switch_btq_clk.clkr,
+ [NSS_CC_PPE_SWITCH_CFG_CLK] = &nss_cc_ppe_switch_cfg_clk.clkr,
+ [NSS_CC_PPE_SWITCH_CLK] = &nss_cc_ppe_switch_clk.clkr,
+ [NSS_CC_PPE_SWITCH_IPE_CLK] = &nss_cc_ppe_switch_ipe_clk.clkr,
+ [NSS_CC_UBI0_CLK_SRC] = &nss_cc_ubi0_clk_src.clkr,
+ [NSS_CC_UBI0_DIV_CLK_SRC] = &nss_cc_ubi0_div_clk_src.clkr,
+ [NSS_CC_UBI1_CLK_SRC] = &nss_cc_ubi1_clk_src.clkr,
+ [NSS_CC_UBI1_DIV_CLK_SRC] = &nss_cc_ubi1_div_clk_src.clkr,
+ [NSS_CC_UBI2_CLK_SRC] = &nss_cc_ubi2_clk_src.clkr,
+ [NSS_CC_UBI2_DIV_CLK_SRC] = &nss_cc_ubi2_div_clk_src.clkr,
+ [NSS_CC_UBI32_AHB0_CLK] = &nss_cc_ubi32_ahb0_clk.clkr,
+ [NSS_CC_UBI32_AHB1_CLK] = &nss_cc_ubi32_ahb1_clk.clkr,
+ [NSS_CC_UBI32_AHB2_CLK] = &nss_cc_ubi32_ahb2_clk.clkr,
+ [NSS_CC_UBI32_AHB3_CLK] = &nss_cc_ubi32_ahb3_clk.clkr,
+ [NSS_CC_UBI32_AXI0_CLK] = &nss_cc_ubi32_axi0_clk.clkr,
+ [NSS_CC_UBI32_AXI1_CLK] = &nss_cc_ubi32_axi1_clk.clkr,
+ [NSS_CC_UBI32_AXI2_CLK] = &nss_cc_ubi32_axi2_clk.clkr,
+ [NSS_CC_UBI32_AXI3_CLK] = &nss_cc_ubi32_axi3_clk.clkr,
+ [NSS_CC_UBI32_CORE0_CLK] = &nss_cc_ubi32_core0_clk.clkr,
+ [NSS_CC_UBI32_CORE1_CLK] = &nss_cc_ubi32_core1_clk.clkr,
+ [NSS_CC_UBI32_CORE2_CLK] = &nss_cc_ubi32_core2_clk.clkr,
+ [NSS_CC_UBI32_CORE3_CLK] = &nss_cc_ubi32_core3_clk.clkr,
+ [NSS_CC_UBI32_INTR0_AHB_CLK] = &nss_cc_ubi32_intr0_ahb_clk.clkr,
+ [NSS_CC_UBI32_INTR1_AHB_CLK] = &nss_cc_ubi32_intr1_ahb_clk.clkr,
+ [NSS_CC_UBI32_INTR2_AHB_CLK] = &nss_cc_ubi32_intr2_ahb_clk.clkr,
+ [NSS_CC_UBI32_INTR3_AHB_CLK] = &nss_cc_ubi32_intr3_ahb_clk.clkr,
+ [NSS_CC_UBI32_NC_AXI0_CLK] = &nss_cc_ubi32_nc_axi0_clk.clkr,
+ [NSS_CC_UBI32_NC_AXI1_CLK] = &nss_cc_ubi32_nc_axi1_clk.clkr,
+ [NSS_CC_UBI32_NC_AXI2_CLK] = &nss_cc_ubi32_nc_axi2_clk.clkr,
+ [NSS_CC_UBI32_NC_AXI3_CLK] = &nss_cc_ubi32_nc_axi3_clk.clkr,
+ [NSS_CC_UBI32_UTCM0_CLK] = &nss_cc_ubi32_utcm0_clk.clkr,
+ [NSS_CC_UBI32_UTCM1_CLK] = &nss_cc_ubi32_utcm1_clk.clkr,
+ [NSS_CC_UBI32_UTCM2_CLK] = &nss_cc_ubi32_utcm2_clk.clkr,
+ [NSS_CC_UBI32_UTCM3_CLK] = &nss_cc_ubi32_utcm3_clk.clkr,
+ [NSS_CC_UBI3_CLK_SRC] = &nss_cc_ubi3_clk_src.clkr,
+ [NSS_CC_UBI3_DIV_CLK_SRC] = &nss_cc_ubi3_div_clk_src.clkr,
+ [NSS_CC_UBI_AXI_CLK_SRC] = &nss_cc_ubi_axi_clk_src.clkr,
+ [NSS_CC_UBI_NC_AXI_BFDCD_CLK_SRC] =
+ &nss_cc_ubi_nc_axi_bfdcd_clk_src.clkr,
+ [NSS_CC_UNIPHY_PORT1_RX_CLK] = &nss_cc_uniphy_port1_rx_clk.clkr,
+ [NSS_CC_UNIPHY_PORT1_TX_CLK] = &nss_cc_uniphy_port1_tx_clk.clkr,
+ [NSS_CC_UNIPHY_PORT2_RX_CLK] = &nss_cc_uniphy_port2_rx_clk.clkr,
+ [NSS_CC_UNIPHY_PORT2_TX_CLK] = &nss_cc_uniphy_port2_tx_clk.clkr,
+ [NSS_CC_UNIPHY_PORT3_RX_CLK] = &nss_cc_uniphy_port3_rx_clk.clkr,
+ [NSS_CC_UNIPHY_PORT3_TX_CLK] = &nss_cc_uniphy_port3_tx_clk.clkr,
+ [NSS_CC_UNIPHY_PORT4_RX_CLK] = &nss_cc_uniphy_port4_rx_clk.clkr,
+ [NSS_CC_UNIPHY_PORT4_TX_CLK] = &nss_cc_uniphy_port4_tx_clk.clkr,
+ [NSS_CC_UNIPHY_PORT5_RX_CLK] = &nss_cc_uniphy_port5_rx_clk.clkr,
+ [NSS_CC_UNIPHY_PORT5_TX_CLK] = &nss_cc_uniphy_port5_tx_clk.clkr,
+ [NSS_CC_UNIPHY_PORT6_RX_CLK] = &nss_cc_uniphy_port6_rx_clk.clkr,
+ [NSS_CC_UNIPHY_PORT6_TX_CLK] = &nss_cc_uniphy_port6_tx_clk.clkr,
+ [NSS_CC_XGMAC0_PTP_REF_CLK] = &nss_cc_xgmac0_ptp_ref_clk.clkr,
+ [NSS_CC_XGMAC0_PTP_REF_DIV_CLK_SRC] =
+ &nss_cc_xgmac0_ptp_ref_div_clk_src.clkr,
+ [NSS_CC_XGMAC1_PTP_REF_CLK] = &nss_cc_xgmac1_ptp_ref_clk.clkr,
+ [NSS_CC_XGMAC1_PTP_REF_DIV_CLK_SRC] =
+ &nss_cc_xgmac1_ptp_ref_div_clk_src.clkr,
+ [NSS_CC_XGMAC2_PTP_REF_CLK] = &nss_cc_xgmac2_ptp_ref_clk.clkr,
+ [NSS_CC_XGMAC2_PTP_REF_DIV_CLK_SRC] =
+ &nss_cc_xgmac2_ptp_ref_div_clk_src.clkr,
+ [NSS_CC_XGMAC3_PTP_REF_CLK] = &nss_cc_xgmac3_ptp_ref_clk.clkr,
+ [NSS_CC_XGMAC3_PTP_REF_DIV_CLK_SRC] =
+ &nss_cc_xgmac3_ptp_ref_div_clk_src.clkr,
+ [NSS_CC_XGMAC4_PTP_REF_CLK] = &nss_cc_xgmac4_ptp_ref_clk.clkr,
+ [NSS_CC_XGMAC4_PTP_REF_DIV_CLK_SRC] =
+ &nss_cc_xgmac4_ptp_ref_div_clk_src.clkr,
+ [NSS_CC_XGMAC5_PTP_REF_CLK] = &nss_cc_xgmac5_ptp_ref_clk.clkr,
+ [NSS_CC_XGMAC5_PTP_REF_DIV_CLK_SRC] =
+ &nss_cc_xgmac5_ptp_ref_div_clk_src.clkr,
+ [UBI32_PLL] = &ubi32_pll.clkr,
+ [UBI32_PLL_MAIN] = &ubi32_pll_main.clkr,
+};
+
+static const struct qcom_reset_map nss_cc_ipq9574_resets[] = {
+ [NSS_CC_CE_BCR] = { 0x28400, 0 },
+ [NSS_CC_CLC_BCR] = { 0x28600, 0 },
+ [NSS_CC_EIP197_BCR] = { 0x16004, 0 },
+ [NSS_CC_HAQ_BCR] = { 0x28300, 0 },
+ [NSS_CC_IMEM_BCR] = { 0xe004, 0 },
+ [NSS_CC_MAC_BCR] = { 0x28100, 0 },
+ [NSS_CC_PPE_BCR] = { 0x28200, 0 },
+ [NSS_CC_UBI_BCR] = { 0x28700, 0 },
+ [NSS_CC_UNIPHY_BCR] = { 0x28900, 0 },
+ [UBI3_CLKRST_CLAMP_ENABLE] = { 0x28a04, 9 },
+ [UBI3_CORE_CLAMP_ENABLE] = { 0x28a04, 8 },
+ [UBI2_CLKRST_CLAMP_ENABLE] = { 0x28a04, 7 },
+ [UBI2_CORE_CLAMP_ENABLE] = { 0x28a04, 6 },
+ [UBI1_CLKRST_CLAMP_ENABLE] = { 0x28a04, 5 },
+ [UBI1_CORE_CLAMP_ENABLE] = { 0x28a04, 4 },
+ [UBI0_CLKRST_CLAMP_ENABLE] = { 0x28a04, 3 },
+ [UBI0_CORE_CLAMP_ENABLE] = { 0x28a04, 2 },
+ [NSSNOC_NSS_CSR_ARES] = { 0x28a04, 1 },
+ [NSS_CSR_ARES] = { 0x28a04, 0 },
+ [PPE_BTQ_ARES] = { 0x28a08, 20 },
+ [PPE_IPE_ARES] = { 0x28a08, 19 },
+ [PPE_ARES] = { 0x28a08, 18 },
+ [PPE_CFG_ARES] = { 0x28a08, 17 },
+ [PPE_EDMA_ARES] = { 0x28a08, 16 },
+ [PPE_EDMA_CFG_ARES] = { 0x28a08, 15 },
+ [CRY_PPE_ARES] = { 0x28a08, 14 },
+ [NSSNOC_PPE_ARES] = { 0x28a08, 13 },
+ [NSSNOC_PPE_CFG_ARES] = { 0x28a08, 12 },
+ [PORT1_MAC_ARES] = { 0x28a08, 11 },
+ [PORT2_MAC_ARES] = { 0x28a08, 10 },
+ [PORT3_MAC_ARES] = { 0x28a08, 9 },
+ [PORT4_MAC_ARES] = { 0x28a08, 8 },
+ [PORT5_MAC_ARES] = { 0x28a08, 7 },
+ [PORT6_MAC_ARES] = { 0x28a08, 6 },
+ [XGMAC0_PTP_REF_ARES] = { 0x28a08, 5 },
+ [XGMAC1_PTP_REF_ARES] = { 0x28a08, 4 },
+ [XGMAC2_PTP_REF_ARES] = { 0x28a08, 3 },
+ [XGMAC3_PTP_REF_ARES] = { 0x28a08, 2 },
+ [XGMAC4_PTP_REF_ARES] = { 0x28a08, 1 },
+ [XGMAC5_PTP_REF_ARES] = { 0x28a08, 0 },
+ [HAQ_AHB_ARES] = { 0x28a0c, 3 },
+ [HAQ_AXI_ARES] = { 0x28a0c, 2 },
+ [NSSNOC_HAQ_AHB_ARES] = { 0x28a0c, 1 },
+ [NSSNOC_HAQ_AXI_ARES] = { 0x28a0c, 0 },
+ [CE_APB_ARES] = { 0x28a10, 3 },
+ [CE_AXI_ARES] = { 0x28a10, 2 },
+ [NSSNOC_CE_APB_ARES] = { 0x28a10, 1 },
+ [NSSNOC_CE_AXI_ARES] = { 0x28a10, 0 },
+ [CRYPTO_ARES] = { 0x28a14, 1 },
+ [NSSNOC_CRYPTO_ARES] = { 0x28a14, 0 },
+ [NSSNOC_NC_AXI0_1_ARES] = { 0x28a1c, 28 },
+ [UBI0_CORE_ARES] = { 0x28a1c, 27 },
+ [UBI1_CORE_ARES] = { 0x28a1c, 26 },
+ [UBI2_CORE_ARES] = { 0x28a1c, 25 },
+ [UBI3_CORE_ARES] = { 0x28a1c, 24 },
+ [NC_AXI0_ARES] = { 0x28a1c, 23 },
+ [UTCM0_ARES] = { 0x28a1c, 22 },
+ [NC_AXI1_ARES] = { 0x28a1c, 21 },
+ [UTCM1_ARES] = { 0x28a1c, 20 },
+ [NC_AXI2_ARES] = { 0x28a1c, 19 },
+ [UTCM2_ARES] = { 0x28a1c, 18 },
+ [NC_AXI3_ARES] = { 0x28a1c, 17 },
+ [UTCM3_ARES] = { 0x28a1c, 16 },
+ [NSSNOC_NC_AXI0_ARES] = { 0x28a1c, 15 },
+ [AHB0_ARES] = { 0x28a1c, 14 },
+ [INTR0_AHB_ARES] = { 0x28a1c, 13 },
+ [AHB1_ARES] = { 0x28a1c, 12 },
+ [INTR1_AHB_ARES] = { 0x28a1c, 11 },
+ [AHB2_ARES] = { 0x28a1c, 10 },
+ [INTR2_AHB_ARES] = { 0x28a1c, 9 },
+ [AHB3_ARES] = { 0x28a1c, 8 },
+ [INTR3_AHB_ARES] = { 0x28a1c, 7 },
+ [NSSNOC_AHB0_ARES] = { 0x28a1c, 6 },
+ [NSSNOC_INT0_AHB_ARES] = { 0x28a1c, 5 },
+ [AXI0_ARES] = { 0x28a1c, 4 },
+ [AXI1_ARES] = { 0x28a1c, 3 },
+ [AXI2_ARES] = { 0x28a1c, 2 },
+ [AXI3_ARES] = { 0x28a1c, 1 },
+ [NSSNOC_AXI0_ARES] = { 0x28a1c, 0 },
+ [IMEM_QSB_ARES] = { 0x28a20, 3 },
+ [NSSNOC_IMEM_QSB_ARES] = { 0x28a20, 2 },
+ [IMEM_AHB_ARES] = { 0x28a20, 1 },
+ [NSSNOC_IMEM_AHB_ARES] = { 0x28a20, 0 },
+ [UNIPHY_PORT1_RX_ARES] = { 0x28a24, 23 },
+ [UNIPHY_PORT1_TX_ARES] = { 0x28a24, 22 },
+ [UNIPHY_PORT2_RX_ARES] = { 0x28a24, 21 },
+ [UNIPHY_PORT2_TX_ARES] = { 0x28a24, 20 },
+ [UNIPHY_PORT3_RX_ARES] = { 0x28a24, 19 },
+ [UNIPHY_PORT3_TX_ARES] = { 0x28a24, 18 },
+ [UNIPHY_PORT4_RX_ARES] = { 0x28a24, 17 },
+ [UNIPHY_PORT4_TX_ARES] = { 0x28a24, 16 },
+ [UNIPHY_PORT5_RX_ARES] = { 0x28a24, 15 },
+ [UNIPHY_PORT5_TX_ARES] = { 0x28a24, 14 },
+ [UNIPHY_PORT6_RX_ARES] = { 0x28a24, 13 },
+ [UNIPHY_PORT6_TX_ARES] = { 0x28a24, 12 },
+ [PORT1_RX_ARES] = { 0x28a24, 11 },
+ [PORT1_TX_ARES] = { 0x28a24, 10 },
+ [PORT2_RX_ARES] = { 0x28a24, 9 },
+ [PORT2_TX_ARES] = { 0x28a24, 8 },
+ [PORT3_RX_ARES] = { 0x28a24, 7 },
+ [PORT3_TX_ARES] = { 0x28a24, 6 },
+ [PORT4_RX_ARES] = { 0x28a24, 5 },
+ [PORT4_TX_ARES] = { 0x28a24, 4 },
+ [PORT5_RX_ARES] = { 0x28a24, 3 },
+ [PORT5_TX_ARES] = { 0x28a24, 2 },
+ [PORT6_RX_ARES] = { 0x28a24, 1 },
+ [PORT6_TX_ARES] = { 0x28a24, 0 },
+ [PPE_FULL_RESET] = { .reg = 0x28a08, .bitmask = GENMASK(20, 17) },
+ [UNIPHY0_SOFT_RESET] = { .reg = 0x28a24, .bitmask = GENMASK(23, 14) },
+ [UNIPHY1_SOFT_RESET] = { .reg = 0x28a24, .bitmask = GENMASK(15, 14) },
+ [UNIPHY2_SOFT_RESET] = { .reg = 0x28a24, .bitmask = GENMASK(13, 12) },
+ [UNIPHY_PORT1_ARES] = { .reg = 0x28a24, .bitmask = GENMASK(23, 22) },
+ [UNIPHY_PORT2_ARES] = { .reg = 0x28a24, .bitmask = GENMASK(21, 20) },
+ [UNIPHY_PORT3_ARES] = { .reg = 0x28a24, .bitmask = GENMASK(19, 18) },
+ [UNIPHY_PORT4_ARES] = { .reg = 0x28a24, .bitmask = GENMASK(17, 16) },
+ [UNIPHY_PORT5_ARES] = { .reg = 0x28a24, .bitmask = GENMASK(15, 14) },
+ [UNIPHY_PORT6_ARES] = { .reg = 0x28a24, .bitmask = GENMASK(13, 12) },
+ [NSSPORT1_RESET] = { .reg = 0x28a24, .bitmask = GENMASK(11, 10) },
+ [NSSPORT2_RESET] = { .reg = 0x28a24, .bitmask = GENMASK(9, 8) },
+ [NSSPORT3_RESET] = { .reg = 0x28a24, .bitmask = GENMASK(7, 6) },
+ [NSSPORT4_RESET] = { .reg = 0x28a24, .bitmask = GENMASK(5, 4) },
+ [NSSPORT5_RESET] = { .reg = 0x28a24, .bitmask = GENMASK(3, 2) },
+ [NSSPORT6_RESET] = { .reg = 0x28a24, .bitmask = GENMASK(1, 0) },
+ [EDMA_HW_RESET] = { .reg = 0x28a08, .bitmask = GENMASK(16, 15) },
+};
+
+static const struct regmap_config nss_cc_ipq9574_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x28a34,
+ .fast_io = true,
+};
+
+static struct qcom_icc_hws_data icc_ipq9574_nss_hws[] = {
+ { MASTER_NSSNOC_PPE, SLAVE_NSSNOC_PPE, NSS_CC_NSSNOC_PPE_CLK },
+ { MASTER_NSSNOC_PPE_CFG, SLAVE_NSSNOC_PPE_CFG, NSS_CC_NSSNOC_PPE_CFG_CLK },
+ { MASTER_NSSNOC_NSS_CSR, SLAVE_NSSNOC_NSS_CSR, NSS_CC_NSSNOC_NSS_CSR_CLK },
+ { MASTER_NSSNOC_IMEM_QSB, SLAVE_NSSNOC_IMEM_QSB, NSS_CC_NSSNOC_IMEM_QSB_CLK },
+ { MASTER_NSSNOC_IMEM_AHB, SLAVE_NSSNOC_IMEM_AHB, NSS_CC_NSSNOC_IMEM_AHB_CLK },
+};
+
+#define IPQ_NSSCC_ID (9574 * 2) /* some unique value */
+
+static const struct qcom_cc_desc nss_cc_ipq9574_desc = {
+ .config = &nss_cc_ipq9574_regmap_config,
+ .clks = nss_cc_ipq9574_clocks,
+ .num_clks = ARRAY_SIZE(nss_cc_ipq9574_clocks),
+ .resets = nss_cc_ipq9574_resets,
+ .num_resets = ARRAY_SIZE(nss_cc_ipq9574_resets),
+ .icc_hws = icc_ipq9574_nss_hws,
+ .num_icc_hws = ARRAY_SIZE(icc_ipq9574_nss_hws),
+ .icc_first_node_id = IPQ_NSSCC_ID,
+};
+
+static const struct dev_pm_ops nss_cc_ipq9574_pm_ops = {
+ SET_RUNTIME_PM_OPS(pm_clk_suspend, pm_clk_resume, NULL)
+};
+
+static const struct of_device_id nss_cc_ipq9574_match_table[] = {
+ { .compatible = "qcom,ipq9574-nsscc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, nss_cc_ipq9574_match_table);
+
+static int nss_cc_ipq9574_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Fail to enable runtime PM\n");
+
+ ret = devm_pm_clk_create(&pdev->dev);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Fail to create PM clock\n");
+
+ ret = pm_clk_add(&pdev->dev, "bus");
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Fail to add bus clock\n");
+
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Fail to resume\n");
+
+ regmap = qcom_cc_map(pdev, &nss_cc_ipq9574_desc);
+ if (IS_ERR(regmap)) {
+ pm_runtime_put(&pdev->dev);
+ return dev_err_probe(&pdev->dev, PTR_ERR(regmap),
+ "Fail to map clock controller registers\n");
+ }
+
+ clk_alpha_pll_configure(&ubi32_pll_main, regmap, &ubi32_pll_config);
+
+ ret = qcom_cc_really_probe(&pdev->dev, &nss_cc_ipq9574_desc, regmap);
+ pm_runtime_put(&pdev->dev);
+
+ return ret;
+}
+
+static struct platform_driver nss_cc_ipq9574_driver = {
+ .probe = nss_cc_ipq9574_probe,
+ .driver = {
+ .name = "qcom,nsscc-ipq9574",
+ .of_match_table = nss_cc_ipq9574_match_table,
+ .pm = &nss_cc_ipq9574_pm_ops,
+ .sync_state = icc_sync_state,
+ },
+};
+
+module_platform_driver(nss_cc_ipq9574_driver);
+
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. NSSCC IPQ9574 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/tcsrcc-glymur.c b/drivers/clk/qcom/tcsrcc-glymur.c
new file mode 100644
index 000000000000..c1f8b6d10b7f
--- /dev/null
+++ b/drivers/clk/qcom/tcsrcc-glymur.c
@@ -0,0 +1,313 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025, Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,glymur-tcsr.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO_PAD,
+};
+
+static struct clk_branch tcsr_edp_clkref_en = {
+ .halt_reg = 0x1c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x1c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_edp_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_pcie_1_clkref_en = {
+ .halt_reg = 0x4,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_pcie_1_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_pcie_2_clkref_en = {
+ .halt_reg = 0x8,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_pcie_2_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_pcie_3_clkref_en = {
+ .halt_reg = 0x10,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x10,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_pcie_3_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_pcie_4_clkref_en = {
+ .halt_reg = 0x14,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x14,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_pcie_4_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_usb2_1_clkref_en = {
+ .halt_reg = 0x28,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x28,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_usb2_1_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_usb2_2_clkref_en = {
+ .halt_reg = 0x2c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x2c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_usb2_2_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_usb2_3_clkref_en = {
+ .halt_reg = 0x30,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x30,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_usb2_3_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_usb2_4_clkref_en = {
+ .halt_reg = 0x44,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x44,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_usb2_4_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_usb3_0_clkref_en = {
+ .halt_reg = 0x20,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x20,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_usb3_0_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_usb3_1_clkref_en = {
+ .halt_reg = 0x24,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x24,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_usb3_1_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_usb4_1_clkref_en = {
+ .halt_reg = 0x0,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_usb4_1_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_usb4_2_clkref_en = {
+ .halt_reg = 0x18,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x18,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_usb4_2_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap *tcsr_cc_glymur_clocks[] = {
+ [TCSR_EDP_CLKREF_EN] = &tcsr_edp_clkref_en.clkr,
+ [TCSR_PCIE_1_CLKREF_EN] = &tcsr_pcie_1_clkref_en.clkr,
+ [TCSR_PCIE_2_CLKREF_EN] = &tcsr_pcie_2_clkref_en.clkr,
+ [TCSR_PCIE_3_CLKREF_EN] = &tcsr_pcie_3_clkref_en.clkr,
+ [TCSR_PCIE_4_CLKREF_EN] = &tcsr_pcie_4_clkref_en.clkr,
+ [TCSR_USB2_1_CLKREF_EN] = &tcsr_usb2_1_clkref_en.clkr,
+ [TCSR_USB2_2_CLKREF_EN] = &tcsr_usb2_2_clkref_en.clkr,
+ [TCSR_USB2_3_CLKREF_EN] = &tcsr_usb2_3_clkref_en.clkr,
+ [TCSR_USB2_4_CLKREF_EN] = &tcsr_usb2_4_clkref_en.clkr,
+ [TCSR_USB3_0_CLKREF_EN] = &tcsr_usb3_0_clkref_en.clkr,
+ [TCSR_USB3_1_CLKREF_EN] = &tcsr_usb3_1_clkref_en.clkr,
+ [TCSR_USB4_1_CLKREF_EN] = &tcsr_usb4_1_clkref_en.clkr,
+ [TCSR_USB4_2_CLKREF_EN] = &tcsr_usb4_2_clkref_en.clkr,
+};
+
+static const struct regmap_config tcsr_cc_glymur_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x44,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc tcsr_cc_glymur_desc = {
+ .config = &tcsr_cc_glymur_regmap_config,
+ .clks = tcsr_cc_glymur_clocks,
+ .num_clks = ARRAY_SIZE(tcsr_cc_glymur_clocks),
+};
+
+static const struct of_device_id tcsr_cc_glymur_match_table[] = {
+ { .compatible = "qcom,glymur-tcsr" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tcsr_cc_glymur_match_table);
+
+static int tcsr_cc_glymur_probe(struct platform_device *pdev)
+{
+ return qcom_cc_probe(pdev, &tcsr_cc_glymur_desc);
+}
+
+static struct platform_driver tcsr_cc_glymur_driver = {
+ .probe = tcsr_cc_glymur_probe,
+ .driver = {
+ .name = "tcsrcc-glymur",
+ .of_match_table = tcsr_cc_glymur_match_table,
+ },
+};
+
+static int __init tcsr_cc_glymur_init(void)
+{
+ return platform_driver_register(&tcsr_cc_glymur_driver);
+}
+subsys_initcall(tcsr_cc_glymur_init);
+
+static void __exit tcsr_cc_glymur_exit(void)
+{
+ platform_driver_unregister(&tcsr_cc_glymur_driver);
+}
+module_exit(tcsr_cc_glymur_exit);
+
+MODULE_DESCRIPTION("QTI TCSRCC GLYMUR Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/tcsrcc-sm8550.c b/drivers/clk/qcom/tcsrcc-sm8550.c
index e5e8f2e82b94..41d73f92a000 100644
--- a/drivers/clk/qcom/tcsrcc-sm8550.c
+++ b/drivers/clk/qcom/tcsrcc-sm8550.c
@@ -129,6 +129,13 @@ static struct clk_branch tcsr_usb3_clkref_en = {
},
};
+static struct clk_regmap *tcsr_cc_sar2130p_clocks[] = {
+ [TCSR_PCIE_0_CLKREF_EN] = &tcsr_pcie_0_clkref_en.clkr,
+ [TCSR_PCIE_1_CLKREF_EN] = &tcsr_pcie_1_clkref_en.clkr,
+ [TCSR_USB2_CLKREF_EN] = &tcsr_usb2_clkref_en.clkr,
+ [TCSR_USB3_CLKREF_EN] = &tcsr_usb3_clkref_en.clkr,
+};
+
static struct clk_regmap *tcsr_cc_sm8550_clocks[] = {
[TCSR_PCIE_0_CLKREF_EN] = &tcsr_pcie_0_clkref_en.clkr,
[TCSR_PCIE_1_CLKREF_EN] = &tcsr_pcie_1_clkref_en.clkr,
@@ -146,6 +153,12 @@ static const struct regmap_config tcsr_cc_sm8550_regmap_config = {
.fast_io = true,
};
+static const struct qcom_cc_desc tcsr_cc_sar2130p_desc = {
+ .config = &tcsr_cc_sm8550_regmap_config,
+ .clks = tcsr_cc_sar2130p_clocks,
+ .num_clks = ARRAY_SIZE(tcsr_cc_sar2130p_clocks),
+};
+
static const struct qcom_cc_desc tcsr_cc_sm8550_desc = {
.config = &tcsr_cc_sm8550_regmap_config,
.clks = tcsr_cc_sm8550_clocks,
@@ -153,7 +166,8 @@ static const struct qcom_cc_desc tcsr_cc_sm8550_desc = {
};
static const struct of_device_id tcsr_cc_sm8550_match_table[] = {
- { .compatible = "qcom,sm8550-tcsr" },
+ { .compatible = "qcom,sar2130p-tcsr", .data = &tcsr_cc_sar2130p_desc },
+ { .compatible = "qcom,sm8550-tcsr", .data = &tcsr_cc_sm8550_desc },
{ }
};
MODULE_DEVICE_TABLE(of, tcsr_cc_sm8550_match_table);
@@ -162,7 +176,7 @@ static int tcsr_cc_sm8550_probe(struct platform_device *pdev)
{
struct regmap *regmap;
- regmap = qcom_cc_map(pdev, &tcsr_cc_sm8550_desc);
+ regmap = qcom_cc_map(pdev, of_device_get_match_data(&pdev->dev));
if (IS_ERR(regmap))
return PTR_ERR(regmap);
diff --git a/drivers/clk/qcom/tcsrcc-sm8650.c b/drivers/clk/qcom/tcsrcc-sm8650.c
index 11c7d6df48c7..3685dcde9a4b 100644
--- a/drivers/clk/qcom/tcsrcc-sm8650.c
+++ b/drivers/clk/qcom/tcsrcc-sm8650.c
@@ -148,6 +148,7 @@ static const struct qcom_cc_desc tcsr_cc_sm8650_desc = {
};
static const struct of_device_id tcsr_cc_sm8650_match_table[] = {
+ { .compatible = "qcom,milos-tcsr" },
{ .compatible = "qcom,sm8650-tcsr" },
{ }
};
@@ -155,6 +156,13 @@ MODULE_DEVICE_TABLE(of, tcsr_cc_sm8650_match_table);
static int tcsr_cc_sm8650_probe(struct platform_device *pdev)
{
+ if (of_device_is_compatible(pdev->dev.of_node, "qcom,milos-tcsr")) {
+ tcsr_ufs_clkref_en.halt_reg = 0x31118;
+ tcsr_ufs_clkref_en.clkr.enable_reg = 0x31118;
+ tcsr_cc_sm8650_clocks[TCSR_USB2_CLKREF_EN] = NULL;
+ tcsr_cc_sm8650_clocks[TCSR_USB3_CLKREF_EN] = NULL;
+ }
+
return qcom_cc_probe(pdev, &tcsr_cc_sm8650_desc);
}
diff --git a/drivers/clk/qcom/tcsrcc-sm8750.c b/drivers/clk/qcom/tcsrcc-sm8750.c
new file mode 100644
index 000000000000..242e320986ef
--- /dev/null
+++ b/drivers/clk/qcom/tcsrcc-sm8750.c
@@ -0,0 +1,141 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sm8750-tcsr.h>
+
+#include "clk-branch.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "common.h"
+
+enum {
+ DT_BI_TCXO_PAD,
+};
+
+static struct clk_branch tcsr_pcie_0_clkref_en = {
+ .halt_reg = 0x0,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_pcie_0_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_ufs_clkref_en = {
+ .halt_reg = 0x1000,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x1000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_ufs_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_usb2_clkref_en = {
+ .halt_reg = 0x2000,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x2000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_usb2_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_usb3_clkref_en = {
+ .halt_reg = 0x3000,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x3000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_usb3_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap *tcsr_cc_sm8750_clocks[] = {
+ [TCSR_PCIE_0_CLKREF_EN] = &tcsr_pcie_0_clkref_en.clkr,
+ [TCSR_UFS_CLKREF_EN] = &tcsr_ufs_clkref_en.clkr,
+ [TCSR_USB2_CLKREF_EN] = &tcsr_usb2_clkref_en.clkr,
+ [TCSR_USB3_CLKREF_EN] = &tcsr_usb3_clkref_en.clkr,
+};
+
+static const struct regmap_config tcsr_cc_sm8750_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x3000,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc tcsr_cc_sm8750_desc = {
+ .config = &tcsr_cc_sm8750_regmap_config,
+ .clks = tcsr_cc_sm8750_clocks,
+ .num_clks = ARRAY_SIZE(tcsr_cc_sm8750_clocks),
+};
+
+static const struct of_device_id tcsr_cc_sm8750_match_table[] = {
+ { .compatible = "qcom,sm8750-tcsr" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tcsr_cc_sm8750_match_table);
+
+static int tcsr_cc_sm8750_probe(struct platform_device *pdev)
+{
+ return qcom_cc_probe(pdev, &tcsr_cc_sm8750_desc);
+}
+
+static struct platform_driver tcsr_cc_sm8750_driver = {
+ .probe = tcsr_cc_sm8750_probe,
+ .driver = {
+ .name = "tcsr_cc-sm8750",
+ .of_match_table = tcsr_cc_sm8750_match_table,
+ },
+};
+
+static int __init tcsr_cc_sm8750_init(void)
+{
+ return platform_driver_register(&tcsr_cc_sm8750_driver);
+}
+subsys_initcall(tcsr_cc_sm8750_init);
+
+static void __exit tcsr_cc_sm8750_exit(void)
+{
+ platform_driver_unregister(&tcsr_cc_sm8750_driver);
+}
+module_exit(tcsr_cc_sm8750_exit);
+
+MODULE_DESCRIPTION("QTI TCSR_CC SM8750 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/tcsrcc-x1e80100.c b/drivers/clk/qcom/tcsrcc-x1e80100.c
index ff61769a0807..a367e1f55622 100644
--- a/drivers/clk/qcom/tcsrcc-x1e80100.c
+++ b/drivers/clk/qcom/tcsrcc-x1e80100.c
@@ -29,6 +29,10 @@ static struct clk_branch tcsr_edp_clkref_en = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "tcsr_edp_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
diff --git a/drivers/clk/qcom/videocc-milos.c b/drivers/clk/qcom/videocc-milos.c
new file mode 100644
index 000000000000..acc9df295d4f
--- /dev/null
+++ b/drivers/clk/qcom/videocc-milos.c
@@ -0,0 +1,403 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2025, Luca Weiss <luca.weiss@fairphone.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,milos-videocc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+/* Need to match the order of clocks in DT binding */
+enum {
+ DT_BI_TCXO,
+ DT_BI_TCXO_AO,
+ DT_SLEEP_CLK,
+ DT_IFACE,
+};
+
+enum {
+ P_BI_TCXO,
+ P_SLEEP_CLK,
+ P_VIDEO_CC_PLL0_OUT_MAIN,
+};
+
+static const struct pll_vco lucid_ole_vco[] = {
+ { 249600000, 2300000000, 0 },
+};
+
+/* 604.8 MHz Configuration */
+static const struct alpha_pll_config video_cc_pll0_config = {
+ .l = 0x1f,
+ .alpha = 0x8000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll video_cc_pll0 = {
+ .offset = 0x0,
+ .config = &video_cc_pll0_config,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct parent_map video_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data video_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct clk_parent_data video_cc_parent_data_0_ao[] = {
+ { .index = DT_BI_TCXO_AO },
+};
+
+static const struct parent_map video_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_VIDEO_CC_PLL0_OUT_MAIN, 1 },
+};
+
+static const struct clk_parent_data video_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &video_cc_pll0.clkr.hw },
+};
+
+static const struct parent_map video_cc_parent_map_2[] = {
+ { P_SLEEP_CLK, 0 },
+};
+
+static const struct clk_parent_data video_cc_parent_data_2_ao[] = {
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct freq_tbl ftbl_video_cc_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 video_cc_ahb_clk_src = {
+ .cmd_rcgr = 0x8030,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = video_cc_parent_map_0,
+ .freq_tbl = ftbl_video_cc_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_ahb_clk_src",
+ .parent_data = video_cc_parent_data_0_ao,
+ .num_parents = ARRAY_SIZE(video_cc_parent_data_0_ao),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_video_cc_mvs0_clk_src[] = {
+ F(604800000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ F(720000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ F(1014000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ F(1098000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ F(1332000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ F(1656000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 video_cc_mvs0_clk_src = {
+ .cmd_rcgr = 0x8000,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = video_cc_parent_map_1,
+ .freq_tbl = ftbl_video_cc_mvs0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0_clk_src",
+ .parent_data = video_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(video_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_video_cc_sleep_clk_src[] = {
+ F(32000, P_SLEEP_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 video_cc_sleep_clk_src = {
+ .cmd_rcgr = 0x8128,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = video_cc_parent_map_2,
+ .freq_tbl = ftbl_video_cc_sleep_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_sleep_clk_src",
+ .parent_data = video_cc_parent_data_2_ao,
+ .num_parents = ARRAY_SIZE(video_cc_parent_data_2_ao),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 video_cc_xo_clk_src = {
+ .cmd_rcgr = 0x810c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = video_cc_parent_map_0,
+ .freq_tbl = ftbl_video_cc_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_xo_clk_src",
+ .parent_data = video_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(video_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_regmap_div video_cc_mvs0_div_clk_src = {
+ .reg = 0x80c4,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_mvs0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div video_cc_mvs0c_div2_div_clk_src = {
+ .reg = 0x8070,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0c_div2_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_mvs0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch video_cc_mvs0_clk = {
+ .halt_reg = 0x80b8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x80b8,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x80b8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_mvs0_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_mvs0_shift_clk = {
+ .halt_reg = 0x8144,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8144,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x8144,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0_shift_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_mvs0c_clk = {
+ .halt_reg = 0x8064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0c_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_mvs0c_div2_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_mvs0c_shift_clk = {
+ .halt_reg = 0x8148,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8148,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x8148,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0c_shift_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc video_cc_mvs0c_gdsc = {
+ .gdscr = 0x804c,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x6,
+ .pd = {
+ .name = "video_cc_mvs0c_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc video_cc_mvs0_gdsc = {
+ .gdscr = 0x80a4,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x6,
+ .pd = {
+ .name = "video_cc_mvs0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &video_cc_mvs0c_gdsc.pd,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | HW_CTRL_TRIGGER,
+};
+
+static struct clk_regmap *video_cc_milos_clocks[] = {
+ [VIDEO_CC_AHB_CLK_SRC] = &video_cc_ahb_clk_src.clkr,
+ [VIDEO_CC_MVS0_CLK] = &video_cc_mvs0_clk.clkr,
+ [VIDEO_CC_MVS0_CLK_SRC] = &video_cc_mvs0_clk_src.clkr,
+ [VIDEO_CC_MVS0_DIV_CLK_SRC] = &video_cc_mvs0_div_clk_src.clkr,
+ [VIDEO_CC_MVS0_SHIFT_CLK] = &video_cc_mvs0_shift_clk.clkr,
+ [VIDEO_CC_MVS0C_CLK] = &video_cc_mvs0c_clk.clkr,
+ [VIDEO_CC_MVS0C_DIV2_DIV_CLK_SRC] = &video_cc_mvs0c_div2_div_clk_src.clkr,
+ [VIDEO_CC_MVS0C_SHIFT_CLK] = &video_cc_mvs0c_shift_clk.clkr,
+ [VIDEO_CC_PLL0] = &video_cc_pll0.clkr,
+ [VIDEO_CC_SLEEP_CLK_SRC] = &video_cc_sleep_clk_src.clkr,
+ [VIDEO_CC_XO_CLK_SRC] = &video_cc_xo_clk_src.clkr,
+};
+
+static struct gdsc *video_cc_milos_gdscs[] = {
+ [VIDEO_CC_MVS0C_GDSC] = &video_cc_mvs0c_gdsc,
+ [VIDEO_CC_MVS0_GDSC] = &video_cc_mvs0_gdsc,
+};
+
+static const struct qcom_reset_map video_cc_milos_resets[] = {
+ [VIDEO_CC_INTERFACE_BCR] = { 0x80f0 },
+ [VIDEO_CC_MVS0_BCR] = { 0x80a0 },
+ [VIDEO_CC_MVS0C_CLK_ARES] = { 0x8064, 2 },
+ [VIDEO_CC_MVS0C_BCR] = { 0x8048 },
+};
+
+static struct clk_alpha_pll *video_cc_milos_plls[] = {
+ &video_cc_pll0,
+};
+
+static u32 video_cc_milos_critical_cbcrs[] = {
+ 0x80f4, /* VIDEO_CC_AHB_CLK */
+ 0x8140, /* VIDEO_CC_SLEEP_CLK */
+ 0x8124, /* VIDEO_CC_XO_CLK */
+};
+
+static const struct regmap_config video_cc_milos_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x9f50,
+ .fast_io = true,
+};
+
+static struct qcom_cc_driver_data video_cc_milos_driver_data = {
+ .alpha_plls = video_cc_milos_plls,
+ .num_alpha_plls = ARRAY_SIZE(video_cc_milos_plls),
+ .clk_cbcrs = video_cc_milos_critical_cbcrs,
+ .num_clk_cbcrs = ARRAY_SIZE(video_cc_milos_critical_cbcrs),
+};
+
+static const struct qcom_cc_desc video_cc_milos_desc = {
+ .config = &video_cc_milos_regmap_config,
+ .clks = video_cc_milos_clocks,
+ .num_clks = ARRAY_SIZE(video_cc_milos_clocks),
+ .resets = video_cc_milos_resets,
+ .num_resets = ARRAY_SIZE(video_cc_milos_resets),
+ .gdscs = video_cc_milos_gdscs,
+ .num_gdscs = ARRAY_SIZE(video_cc_milos_gdscs),
+ .use_rpm = true,
+ .driver_data = &video_cc_milos_driver_data,
+};
+
+static const struct of_device_id video_cc_milos_match_table[] = {
+ { .compatible = "qcom,milos-videocc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, video_cc_milos_match_table);
+
+static int video_cc_milos_probe(struct platform_device *pdev)
+{
+ return qcom_cc_probe(pdev, &video_cc_milos_desc);
+}
+
+static struct platform_driver video_cc_milos_driver = {
+ .probe = video_cc_milos_probe,
+ .driver = {
+ .name = "video_cc-milos",
+ .of_match_table = video_cc_milos_match_table,
+ },
+};
+
+module_platform_driver(video_cc_milos_driver);
+
+MODULE_DESCRIPTION("QTI VIDEO_CC Milos Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/videocc-qcs615.c b/drivers/clk/qcom/videocc-qcs615.c
new file mode 100644
index 000000000000..1b41fa44c17e
--- /dev/null
+++ b/drivers/clk/qcom/videocc-qcs615.c
@@ -0,0 +1,338 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,qcs615-videocc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO,
+ DT_SLEEP_CLK,
+};
+
+enum {
+ P_BI_TCXO,
+ P_SLEEP_CLK,
+ P_VIDEO_PLL0_OUT_AUX,
+ P_VIDEO_PLL0_OUT_AUX2,
+ P_VIDEO_PLL0_OUT_MAIN,
+};
+
+static const struct pll_vco video_cc_pll0_vco[] = {
+ { 500000000, 1000000000, 2 },
+};
+
+/* 600MHz configuration VCO - 2 */
+static struct alpha_pll_config video_pll0_config = {
+ .l = 0x1f,
+ .alpha_hi = 0x40,
+ .alpha = 0x00,
+ .alpha_en_mask = BIT(24),
+ .vco_val = BIT(21),
+ .vco_mask = GENMASK(21, 20),
+ .main_output_mask = BIT(0),
+ .config_ctl_val = 0x4001055b,
+ .test_ctl_hi_val = 0x1,
+ .test_ctl_hi_mask = 0x1,
+};
+
+static struct clk_alpha_pll video_pll0 = {
+ .offset = 0x42c,
+ .config = &video_pll0_config,
+ .vco_table = video_cc_pll0_vco,
+ .num_vco = ARRAY_SIZE(video_cc_pll0_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_slew_ops,
+ },
+ },
+};
+
+static const struct parent_map video_cc_parent_map_0[] = {
+ { P_SLEEP_CLK, 0 },
+};
+
+static const struct clk_parent_data video_cc_parent_data_0_ao[] = {
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map video_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_VIDEO_PLL0_OUT_MAIN, 1 },
+ { P_VIDEO_PLL0_OUT_AUX, 2 },
+ { P_VIDEO_PLL0_OUT_AUX2, 3 },
+};
+
+static const struct clk_parent_data video_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &video_pll0.clkr.hw },
+ { .hw = &video_pll0.clkr.hw },
+ { .hw = &video_pll0.clkr.hw },
+};
+
+static const struct freq_tbl ftbl_video_cc_sleep_clk_src[] = {
+ F(32000, P_SLEEP_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 video_cc_sleep_clk_src = {
+ .cmd_rcgr = 0xaf8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = video_cc_parent_map_0,
+ .freq_tbl = ftbl_video_cc_sleep_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_sleep_clk_src",
+ .parent_data = video_cc_parent_data_0_ao,
+ .num_parents = ARRAY_SIZE(video_cc_parent_data_0_ao),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_video_cc_venus_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(133333333, P_VIDEO_PLL0_OUT_MAIN, 4.5, 0, 0),
+ F(240000000, P_VIDEO_PLL0_OUT_MAIN, 2.5, 0, 0),
+ F(300000000, P_VIDEO_PLL0_OUT_MAIN, 2, 0, 0),
+ F(380000000, P_VIDEO_PLL0_OUT_MAIN, 2, 0, 0),
+ F(410000000, P_VIDEO_PLL0_OUT_MAIN, 2, 0, 0),
+ F(460000000, P_VIDEO_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 video_cc_venus_clk_src = {
+ .cmd_rcgr = 0x7f0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = video_cc_parent_map_1,
+ .freq_tbl = ftbl_video_cc_venus_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_venus_clk_src",
+ .parent_data = video_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(video_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_branch video_cc_sleep_clk = {
+ .halt_reg = 0xb18,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb18,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "video_cc_sleep_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &video_cc_sleep_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_vcodec0_axi_clk = {
+ .halt_reg = 0x8f0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8f0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_vcodec0_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_vcodec0_core_clk = {
+ .halt_reg = 0x890,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x890,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_vcodec0_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_venus_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_venus_ahb_clk = {
+ .halt_reg = 0x9b0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9b0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_venus_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_venus_ctl_axi_clk = {
+ .halt_reg = 0x8d0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8d0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_venus_ctl_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_venus_ctl_core_clk = {
+ .halt_reg = 0x850,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x850,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_venus_ctl_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_venus_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc vcodec0_gdsc = {
+ .gdscr = 0x874,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x6,
+ .pd = {
+ .name = "vcodec0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = HW_CTRL_TRIGGER | POLL_CFG_GDSCR,
+};
+
+static struct gdsc venus_gdsc = {
+ .gdscr = 0x814,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x6,
+ .pd = {
+ .name = "venus_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct clk_regmap *video_cc_qcs615_clocks[] = {
+ [VIDEO_CC_SLEEP_CLK] = &video_cc_sleep_clk.clkr,
+ [VIDEO_CC_SLEEP_CLK_SRC] = &video_cc_sleep_clk_src.clkr,
+ [VIDEO_CC_VCODEC0_AXI_CLK] = &video_cc_vcodec0_axi_clk.clkr,
+ [VIDEO_CC_VCODEC0_CORE_CLK] = &video_cc_vcodec0_core_clk.clkr,
+ [VIDEO_CC_VENUS_AHB_CLK] = &video_cc_venus_ahb_clk.clkr,
+ [VIDEO_CC_VENUS_CLK_SRC] = &video_cc_venus_clk_src.clkr,
+ [VIDEO_CC_VENUS_CTL_AXI_CLK] = &video_cc_venus_ctl_axi_clk.clkr,
+ [VIDEO_CC_VENUS_CTL_CORE_CLK] = &video_cc_venus_ctl_core_clk.clkr,
+ [VIDEO_PLL0] = &video_pll0.clkr,
+};
+
+static struct gdsc *video_cc_qcs615_gdscs[] = {
+ [VCODEC0_GDSC] = &vcodec0_gdsc,
+ [VENUS_GDSC] = &venus_gdsc,
+};
+
+static const struct qcom_reset_map video_cc_qcs615_resets[] = {
+ [VIDEO_CC_INTERFACE_BCR] = { 0x8b0 },
+ [VIDEO_CC_VCODEC0_BCR] = { 0x870 },
+ [VIDEO_CC_VENUS_BCR] = { 0x810 },
+};
+
+static struct clk_alpha_pll *video_cc_qcs615_plls[] = {
+ &video_pll0,
+};
+
+static u32 video_cc_qcs615_critical_cbcrs[] = {
+ 0xab8, /* VIDEO_CC_XO_CLK */
+};
+
+static const struct regmap_config video_cc_qcs615_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xb94,
+ .fast_io = true,
+};
+
+static struct qcom_cc_driver_data video_cc_qcs615_driver_data = {
+ .alpha_plls = video_cc_qcs615_plls,
+ .num_alpha_plls = ARRAY_SIZE(video_cc_qcs615_plls),
+ .clk_cbcrs = video_cc_qcs615_critical_cbcrs,
+ .num_clk_cbcrs = ARRAY_SIZE(video_cc_qcs615_critical_cbcrs),
+};
+
+static const struct qcom_cc_desc video_cc_qcs615_desc = {
+ .config = &video_cc_qcs615_regmap_config,
+ .clks = video_cc_qcs615_clocks,
+ .num_clks = ARRAY_SIZE(video_cc_qcs615_clocks),
+ .resets = video_cc_qcs615_resets,
+ .num_resets = ARRAY_SIZE(video_cc_qcs615_resets),
+ .gdscs = video_cc_qcs615_gdscs,
+ .num_gdscs = ARRAY_SIZE(video_cc_qcs615_gdscs),
+ .driver_data = &video_cc_qcs615_driver_data,
+};
+
+static const struct of_device_id video_cc_qcs615_match_table[] = {
+ { .compatible = "qcom,qcs615-videocc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, video_cc_qcs615_match_table);
+
+static int video_cc_qcs615_probe(struct platform_device *pdev)
+{
+ return qcom_cc_probe(pdev, &video_cc_qcs615_desc);
+}
+
+static struct platform_driver video_cc_qcs615_driver = {
+ .probe = video_cc_qcs615_probe,
+ .driver = {
+ .name = "videocc-qcs615",
+ .of_match_table = video_cc_qcs615_match_table,
+ },
+};
+
+module_platform_driver(video_cc_qcs615_driver);
+
+MODULE_DESCRIPTION("QTI VIDEOCC QCS615 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/videocc-sa8775p.c b/drivers/clk/qcom/videocc-sa8775p.c
new file mode 100644
index 000000000000..2476201dcd20
--- /dev/null
+++ b/drivers/clk/qcom/videocc-sa8775p.c
@@ -0,0 +1,584 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sa8775p-videocc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_IFACE,
+ DT_BI_TCXO,
+ DT_BI_TCXO_AO,
+ DT_SLEEP_CLK,
+};
+
+enum {
+ P_BI_TCXO,
+ P_BI_TCXO_AO,
+ P_SLEEP_CLK,
+ P_VIDEO_PLL0_OUT_MAIN,
+ P_VIDEO_PLL1_OUT_MAIN,
+};
+
+static const struct pll_vco lucid_evo_vco[] = {
+ { 249600000, 2020000000, 0 },
+};
+
+static const struct alpha_pll_config video_pll0_config = {
+ .l = 0x39,
+ .alpha = 0x3000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00400805,
+};
+
+static struct clk_alpha_pll video_pll0 = {
+ .offset = 0x0,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct alpha_pll_config video_pll1_config = {
+ .l = 0x39,
+ .alpha = 0x3000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00400805,
+};
+
+static struct clk_alpha_pll video_pll1 = {
+ .offset = 0x1000,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_pll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct parent_map video_cc_parent_map_0_ao[] = {
+ { P_BI_TCXO_AO, 0 },
+};
+
+static const struct clk_parent_data video_cc_parent_data_0_ao[] = {
+ { .index = DT_BI_TCXO_AO },
+};
+
+static const struct parent_map video_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_VIDEO_PLL0_OUT_MAIN, 1 },
+};
+
+static const struct clk_parent_data video_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &video_pll0.clkr.hw },
+};
+
+static const struct parent_map video_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_VIDEO_PLL1_OUT_MAIN, 1 },
+};
+
+static const struct clk_parent_data video_cc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &video_pll1.clkr.hw },
+};
+
+static const struct parent_map video_cc_parent_map_3[] = {
+ { P_SLEEP_CLK, 0 },
+};
+
+static const struct clk_parent_data video_cc_parent_data_3[] = {
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct freq_tbl ftbl_video_cc_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO_AO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 video_cc_ahb_clk_src = {
+ .cmd_rcgr = 0x8030,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = video_cc_parent_map_0_ao,
+ .freq_tbl = ftbl_video_cc_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_ahb_clk_src",
+ .parent_data = video_cc_parent_data_0_ao,
+ .num_parents = ARRAY_SIZE(video_cc_parent_data_0_ao),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_video_cc_mvs0_clk_src[] = {
+ F(1098000000, P_VIDEO_PLL0_OUT_MAIN, 1, 0, 0),
+ F(1332000000, P_VIDEO_PLL0_OUT_MAIN, 1, 0, 0),
+ F(1599000000, P_VIDEO_PLL0_OUT_MAIN, 1, 0, 0),
+ F(1680000000, P_VIDEO_PLL0_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 video_cc_mvs0_clk_src = {
+ .cmd_rcgr = 0x8000,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = video_cc_parent_map_1,
+ .freq_tbl = ftbl_video_cc_mvs0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0_clk_src",
+ .parent_data = video_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(video_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_video_cc_mvs1_clk_src[] = {
+ F(1098000000, P_VIDEO_PLL1_OUT_MAIN, 1, 0, 0),
+ F(1332000000, P_VIDEO_PLL1_OUT_MAIN, 1, 0, 0),
+ F(1600000000, P_VIDEO_PLL1_OUT_MAIN, 1, 0, 0),
+ F(1800000000, P_VIDEO_PLL1_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 video_cc_mvs1_clk_src = {
+ .cmd_rcgr = 0x8018,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = video_cc_parent_map_2,
+ .freq_tbl = ftbl_video_cc_mvs1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs1_clk_src",
+ .parent_data = video_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(video_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_video_cc_sleep_clk_src[] = {
+ F(32000, P_SLEEP_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 video_cc_sleep_clk_src = {
+ .cmd_rcgr = 0x812c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = video_cc_parent_map_3,
+ .freq_tbl = ftbl_video_cc_sleep_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_sleep_clk_src",
+ .parent_data = video_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(video_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 video_cc_xo_clk_src = {
+ .cmd_rcgr = 0x8110,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = video_cc_parent_map_0_ao,
+ .freq_tbl = ftbl_video_cc_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_xo_clk_src",
+ .parent_data = video_cc_parent_data_0_ao,
+ .num_parents = ARRAY_SIZE(video_cc_parent_data_0_ao),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_regmap_div video_cc_mvs0_div_clk_src = {
+ .reg = 0x80b8,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_mvs0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div video_cc_mvs0c_div2_div_clk_src = {
+ .reg = 0x806c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0c_div2_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_mvs0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div video_cc_mvs1_div_clk_src = {
+ .reg = 0x80dc,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs1_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_mvs1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div video_cc_mvs1c_div2_div_clk_src = {
+ .reg = 0x8094,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs1c_div2_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_mvs1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div video_cc_sm_div_clk_src = {
+ .reg = 0x8108,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_sm_div_clk_src",
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch video_cc_mvs0_clk = {
+ .halt_reg = 0x80b0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x80b0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x80b0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_mvs0_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_mvs0c_clk = {
+ .halt_reg = 0x8064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0c_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_mvs0c_div2_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_mvs1_clk = {
+ .halt_reg = 0x80d4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x80d4,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x80d4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_mvs1_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_mvs1c_clk = {
+ .halt_reg = 0x808c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x808c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs1c_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_mvs1c_div2_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_pll_lock_monitor_clk = {
+ .halt_reg = 0x9000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_pll_lock_monitor_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_sm_obs_clk = {
+ .halt_reg = 0x810c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x810c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_sm_obs_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_sm_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc video_cc_mvs0c_gdsc = {
+ .gdscr = 0x804c,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x6,
+ .pd = {
+ .name = "video_cc_mvs0c_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = RETAIN_FF_ENABLE | POLL_CFG_GDSCR,
+};
+
+static struct gdsc video_cc_mvs0_gdsc = {
+ .gdscr = 0x809c,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x6,
+ .pd = {
+ .name = "video_cc_mvs0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &video_cc_mvs0c_gdsc.pd,
+ .flags = RETAIN_FF_ENABLE | POLL_CFG_GDSCR | HW_CTRL_TRIGGER,
+};
+
+static struct gdsc video_cc_mvs1c_gdsc = {
+ .gdscr = 0x8074,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x6,
+ .pd = {
+ .name = "video_cc_mvs1c_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = RETAIN_FF_ENABLE | POLL_CFG_GDSCR,
+};
+
+static struct gdsc video_cc_mvs1_gdsc = {
+ .gdscr = 0x80c0,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x6,
+ .pd = {
+ .name = "video_cc_mvs1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &video_cc_mvs1c_gdsc.pd,
+ .flags = RETAIN_FF_ENABLE | POLL_CFG_GDSCR | HW_CTRL_TRIGGER,
+};
+
+static struct clk_regmap *video_cc_sa8775p_clocks[] = {
+ [VIDEO_CC_AHB_CLK_SRC] = &video_cc_ahb_clk_src.clkr,
+ [VIDEO_CC_MVS0_CLK] = &video_cc_mvs0_clk.clkr,
+ [VIDEO_CC_MVS0_CLK_SRC] = &video_cc_mvs0_clk_src.clkr,
+ [VIDEO_CC_MVS0_DIV_CLK_SRC] = &video_cc_mvs0_div_clk_src.clkr,
+ [VIDEO_CC_MVS0C_CLK] = &video_cc_mvs0c_clk.clkr,
+ [VIDEO_CC_MVS0C_DIV2_DIV_CLK_SRC] = &video_cc_mvs0c_div2_div_clk_src.clkr,
+ [VIDEO_CC_MVS1_CLK] = &video_cc_mvs1_clk.clkr,
+ [VIDEO_CC_MVS1_CLK_SRC] = &video_cc_mvs1_clk_src.clkr,
+ [VIDEO_CC_MVS1_DIV_CLK_SRC] = &video_cc_mvs1_div_clk_src.clkr,
+ [VIDEO_CC_MVS1C_CLK] = &video_cc_mvs1c_clk.clkr,
+ [VIDEO_CC_MVS1C_DIV2_DIV_CLK_SRC] = &video_cc_mvs1c_div2_div_clk_src.clkr,
+ [VIDEO_CC_PLL_LOCK_MONITOR_CLK] = &video_cc_pll_lock_monitor_clk.clkr,
+ [VIDEO_CC_SLEEP_CLK_SRC] = &video_cc_sleep_clk_src.clkr,
+ [VIDEO_CC_SM_DIV_CLK_SRC] = &video_cc_sm_div_clk_src.clkr,
+ [VIDEO_CC_SM_OBS_CLK] = &video_cc_sm_obs_clk.clkr,
+ [VIDEO_CC_XO_CLK_SRC] = &video_cc_xo_clk_src.clkr,
+ [VIDEO_PLL0] = &video_pll0.clkr,
+ [VIDEO_PLL1] = &video_pll1.clkr,
+};
+
+static struct gdsc *video_cc_sa8775p_gdscs[] = {
+ [VIDEO_CC_MVS0_GDSC] = &video_cc_mvs0_gdsc,
+ [VIDEO_CC_MVS0C_GDSC] = &video_cc_mvs0c_gdsc,
+ [VIDEO_CC_MVS1_GDSC] = &video_cc_mvs1_gdsc,
+ [VIDEO_CC_MVS1C_GDSC] = &video_cc_mvs1c_gdsc,
+};
+
+static const struct qcom_reset_map video_cc_sa8775p_resets[] = {
+ [VIDEO_CC_INTERFACE_BCR] = { 0x80e8 },
+ [VIDEO_CC_MVS0_BCR] = { 0x8098 },
+ [VIDEO_CC_MVS0C_CLK_ARES] = { 0x8064, 2 },
+ [VIDEO_CC_MVS0C_BCR] = { 0x8048 },
+ [VIDEO_CC_MVS1_BCR] = { 0x80bc },
+ [VIDEO_CC_MVS1C_CLK_ARES] = { 0x808c, 2 },
+ [VIDEO_CC_MVS1C_BCR] = { 0x8070 },
+};
+
+static const struct regmap_config video_cc_sa8775p_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xb000,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc video_cc_sa8775p_desc = {
+ .config = &video_cc_sa8775p_regmap_config,
+ .clks = video_cc_sa8775p_clocks,
+ .num_clks = ARRAY_SIZE(video_cc_sa8775p_clocks),
+ .resets = video_cc_sa8775p_resets,
+ .num_resets = ARRAY_SIZE(video_cc_sa8775p_resets),
+ .gdscs = video_cc_sa8775p_gdscs,
+ .num_gdscs = ARRAY_SIZE(video_cc_sa8775p_gdscs),
+};
+
+static const struct of_device_id video_cc_sa8775p_match_table[] = {
+ { .compatible = "qcom,qcs8300-videocc" },
+ { .compatible = "qcom,sa8775p-videocc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, video_cc_sa8775p_match_table);
+
+static int video_cc_sa8775p_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret)
+ return ret;
+
+ regmap = qcom_cc_map(pdev, &video_cc_sa8775p_desc);
+ if (IS_ERR(regmap)) {
+ pm_runtime_put(&pdev->dev);
+ return PTR_ERR(regmap);
+ }
+
+ clk_lucid_evo_pll_configure(&video_pll0, regmap, &video_pll0_config);
+ clk_lucid_evo_pll_configure(&video_pll1, regmap, &video_pll1_config);
+
+ /*
+ * Set mvs0c clock divider to div-3 to make the mvs0 and
+ * mvs0c clocks to run at the same frequency on QCS8300
+ */
+ if (of_device_is_compatible(pdev->dev.of_node, "qcom,qcs8300-videocc"))
+ regmap_write(regmap, video_cc_mvs0c_div2_div_clk_src.reg, 2);
+
+ /* Keep some clocks always enabled */
+ qcom_branch_set_clk_en(regmap, 0x80ec); /* VIDEO_CC_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x8144); /* VIDEO_CC_SLEEP_CLK */
+ qcom_branch_set_clk_en(regmap, 0x8128); /* VIDEO_CC_XO_CLK */
+
+ ret = qcom_cc_really_probe(&pdev->dev, &video_cc_sa8775p_desc, regmap);
+
+ pm_runtime_put(&pdev->dev);
+
+ return ret;
+}
+
+static struct platform_driver video_cc_sa8775p_driver = {
+ .probe = video_cc_sa8775p_probe,
+ .driver = {
+ .name = "videocc-sa8775p",
+ .of_match_table = video_cc_sa8775p_match_table,
+ },
+};
+
+module_platform_driver(video_cc_sa8775p_driver);
+
+MODULE_DESCRIPTION("QTI VIDEOCC SA8775P Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/videocc-sc7180.c b/drivers/clk/qcom/videocc-sc7180.c
index d7f845480396..dd2441d6aa83 100644
--- a/drivers/clk/qcom/videocc-sc7180.c
+++ b/drivers/clk/qcom/videocc-sc7180.c
@@ -166,7 +166,7 @@ static struct gdsc vcodec0_gdsc = {
.pd = {
.name = "vcodec0_gdsc",
},
- .flags = HW_CTRL,
+ .flags = HW_CTRL_TRIGGER,
.pwrsts = PWRSTS_OFF_ON,
};
diff --git a/drivers/clk/qcom/videocc-sdm845.c b/drivers/clk/qcom/videocc-sdm845.c
index f77a07779477..6dedc80a8b3e 100644
--- a/drivers/clk/qcom/videocc-sdm845.c
+++ b/drivers/clk/qcom/videocc-sdm845.c
@@ -260,7 +260,7 @@ static struct gdsc vcodec0_gdsc = {
},
.cxcs = (unsigned int []){ 0x890, 0x930 },
.cxc_count = 2,
- .flags = HW_CTRL | POLL_CFG_GDSCR,
+ .flags = HW_CTRL_TRIGGER | POLL_CFG_GDSCR,
.pwrsts = PWRSTS_OFF_ON,
};
@@ -271,7 +271,7 @@ static struct gdsc vcodec1_gdsc = {
},
.cxcs = (unsigned int []){ 0x8d0, 0x950 },
.cxc_count = 2,
- .flags = HW_CTRL | POLL_CFG_GDSCR,
+ .flags = HW_CTRL_TRIGGER | POLL_CFG_GDSCR,
.pwrsts = PWRSTS_OFF_ON,
};
diff --git a/drivers/clk/qcom/videocc-sm6350.c b/drivers/clk/qcom/videocc-sm6350.c
new file mode 100644
index 000000000000..34bdc5aa865a
--- /dev/null
+++ b/drivers/clk/qcom/videocc-sm6350.c
@@ -0,0 +1,355 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, Konrad Dybcio <konrad.dybcio@somainline.org>
+ * Copyright (c) 2025, Luca Weiss <luca.weiss@fairphone.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sm6350-videocc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "common.h"
+#include "gdsc.h"
+
+enum {
+ DT_IFACE,
+ DT_BI_TCXO,
+ DT_SLEEP_CLK,
+};
+
+enum {
+ P_BI_TCXO,
+ P_CHIP_SLEEP_CLK,
+ P_VIDEO_PLL0_OUT_EVEN,
+};
+
+static const struct pll_vco fabia_vco[] = {
+ { 125000000, 1000000000, 1 },
+};
+
+/* 600 MHz */
+static const struct alpha_pll_config video_pll0_config = {
+ .l = 0x1f,
+ .alpha = 0x4000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002067,
+ .test_ctl_val = 0x40000000,
+ .test_ctl_hi_val = 0x00000002,
+ .user_ctl_val = 0x00000101,
+ .user_ctl_hi_val = 0x00004005,
+};
+
+static struct clk_alpha_pll video_pll0 = {
+ .offset = 0x0,
+ .vco_table = fabia_vco,
+ .num_vco = ARRAY_SIZE(fabia_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fabia_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_video_pll0_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv video_pll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_video_pll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_video_pll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_pll0_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_pll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+};
+
+static const struct parent_map video_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_VIDEO_PLL0_OUT_EVEN, 3 },
+};
+
+static const struct clk_parent_data video_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &video_pll0_out_even.clkr.hw },
+};
+
+static const struct parent_map video_cc_parent_map_1[] = {
+ { P_CHIP_SLEEP_CLK, 0 },
+};
+
+static const struct clk_parent_data video_cc_parent_data_1[] = {
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct freq_tbl ftbl_video_cc_iris_clk_src[] = {
+ F(133250000, P_VIDEO_PLL0_OUT_EVEN, 2, 0, 0),
+ F(240000000, P_VIDEO_PLL0_OUT_EVEN, 1.5, 0, 0),
+ F(300000000, P_VIDEO_PLL0_OUT_EVEN, 1, 0, 0),
+ F(380000000, P_VIDEO_PLL0_OUT_EVEN, 1, 0, 0),
+ F(460000000, P_VIDEO_PLL0_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 video_cc_iris_clk_src = {
+ .cmd_rcgr = 0x1000,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = video_cc_parent_map_0,
+ .freq_tbl = ftbl_video_cc_iris_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_iris_clk_src",
+ .parent_data = video_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(video_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_video_cc_sleep_clk_src[] = {
+ F(32764, P_CHIP_SLEEP_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 video_cc_sleep_clk_src = {
+ .cmd_rcgr = 0x701c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = video_cc_parent_map_1,
+ .freq_tbl = ftbl_video_cc_sleep_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_sleep_clk_src",
+ .parent_data = video_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(video_cc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch video_cc_iris_ahb_clk = {
+ .halt_reg = 0x5004,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x5004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_iris_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_iris_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_mvs0_axi_clk = {
+ .halt_reg = 0x800c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x800c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_mvs0_core_clk = {
+ .halt_reg = 0x3010,
+ .halt_check = BRANCH_VOTED,
+ .hwcg_reg = 0x3010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_iris_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_mvsc_core_clk = {
+ .halt_reg = 0x2014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvsc_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_iris_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_mvsc_ctl_axi_clk = {
+ .halt_reg = 0x8004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvsc_ctl_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_sleep_clk = {
+ .halt_reg = 0x7034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_sleep_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_sleep_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_venus_ahb_clk = {
+ .halt_reg = 0x801c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x801c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_venus_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc mvsc_gdsc = {
+ .gdscr = 0x2004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x6,
+ .pd = {
+ .name = "mvsc_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc mvs0_gdsc = {
+ .gdscr = 0x3004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x6,
+ .pd = {
+ .name = "mvs0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = HW_CTRL_TRIGGER,
+};
+
+static struct gdsc *video_cc_sm6350_gdscs[] = {
+ [MVSC_GDSC] = &mvsc_gdsc,
+ [MVS0_GDSC] = &mvs0_gdsc,
+};
+
+static struct clk_regmap *video_cc_sm6350_clocks[] = {
+ [VIDEO_CC_IRIS_AHB_CLK] = &video_cc_iris_ahb_clk.clkr,
+ [VIDEO_CC_IRIS_CLK_SRC] = &video_cc_iris_clk_src.clkr,
+ [VIDEO_CC_MVS0_AXI_CLK] = &video_cc_mvs0_axi_clk.clkr,
+ [VIDEO_CC_MVS0_CORE_CLK] = &video_cc_mvs0_core_clk.clkr,
+ [VIDEO_CC_MVSC_CORE_CLK] = &video_cc_mvsc_core_clk.clkr,
+ [VIDEO_CC_MVSC_CTL_AXI_CLK] = &video_cc_mvsc_ctl_axi_clk.clkr,
+ [VIDEO_CC_SLEEP_CLK] = &video_cc_sleep_clk.clkr,
+ [VIDEO_CC_SLEEP_CLK_SRC] = &video_cc_sleep_clk_src.clkr,
+ [VIDEO_CC_VENUS_AHB_CLK] = &video_cc_venus_ahb_clk.clkr,
+ [VIDEO_PLL0] = &video_pll0.clkr,
+ [VIDEO_PLL0_OUT_EVEN] = &video_pll0_out_even.clkr,
+};
+
+static const struct regmap_config video_cc_sm6350_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xb000,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc video_cc_sm6350_desc = {
+ .config = &video_cc_sm6350_regmap_config,
+ .clks = video_cc_sm6350_clocks,
+ .num_clks = ARRAY_SIZE(video_cc_sm6350_clocks),
+ .gdscs = video_cc_sm6350_gdscs,
+ .num_gdscs = ARRAY_SIZE(video_cc_sm6350_gdscs),
+};
+
+static const struct of_device_id video_cc_sm6350_match_table[] = {
+ { .compatible = "qcom,sm6350-videocc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, video_cc_sm6350_match_table);
+
+static int video_cc_sm6350_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &video_cc_sm6350_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ clk_fabia_pll_configure(&video_pll0, regmap, &video_pll0_config);
+
+ /* Keep some clocks always-on */
+ qcom_branch_set_clk_en(regmap, 0x7018); /* VIDEO_CC_XO_CLK */
+
+ return qcom_cc_really_probe(&pdev->dev, &video_cc_sm6350_desc, regmap);
+}
+
+static struct platform_driver video_cc_sm6350_driver = {
+ .probe = video_cc_sm6350_probe,
+ .driver = {
+ .name = "video_cc-sm6350",
+ .of_match_table = video_cc_sm6350_match_table,
+ },
+};
+
+module_platform_driver(video_cc_sm6350_driver);
+
+MODULE_DESCRIPTION("QTI VIDEO_CC SM6350 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/videocc-sm7150.c b/drivers/clk/qcom/videocc-sm7150.c
index 14ef7f561753..b6912560ef9b 100644
--- a/drivers/clk/qcom/videocc-sm7150.c
+++ b/drivers/clk/qcom/videocc-sm7150.c
@@ -271,7 +271,7 @@ static struct gdsc vcodec0_gdsc = {
},
.cxcs = (unsigned int []){ 0x890, 0x9ec },
.cxc_count = 2,
- .flags = HW_CTRL | POLL_CFG_GDSCR,
+ .flags = HW_CTRL_TRIGGER | POLL_CFG_GDSCR,
.pwrsts = PWRSTS_OFF_ON,
};
@@ -282,7 +282,7 @@ static struct gdsc vcodec1_gdsc = {
},
.cxcs = (unsigned int []){ 0x8d0, 0xa0c },
.cxc_count = 2,
- .flags = HW_CTRL | POLL_CFG_GDSCR,
+ .flags = HW_CTRL_TRIGGER | POLL_CFG_GDSCR,
.pwrsts = PWRSTS_OFF_ON,
};
diff --git a/drivers/clk/qcom/videocc-sm8150.c b/drivers/clk/qcom/videocc-sm8150.c
index daab3237eec1..3024f6fc89c8 100644
--- a/drivers/clk/qcom/videocc-sm8150.c
+++ b/drivers/clk/qcom/videocc-sm8150.c
@@ -179,7 +179,7 @@ static struct gdsc vcodec0_gdsc = {
.pd = {
.name = "vcodec0_gdsc",
},
- .flags = HW_CTRL,
+ .flags = HW_CTRL_TRIGGER,
.pwrsts = PWRSTS_OFF_ON,
};
@@ -188,7 +188,7 @@ static struct gdsc vcodec1_gdsc = {
.pd = {
.name = "vcodec1_gdsc",
},
- .flags = HW_CTRL,
+ .flags = HW_CTRL_TRIGGER,
.pwrsts = PWRSTS_OFF_ON,
};
static struct clk_regmap *video_cc_sm8150_clocks[] = {
diff --git a/drivers/clk/qcom/videocc-sm8350.c b/drivers/clk/qcom/videocc-sm8350.c
index 5bd6fe3e1298..057a9474894a 100644
--- a/drivers/clk/qcom/videocc-sm8350.c
+++ b/drivers/clk/qcom/videocc-sm8350.c
@@ -452,7 +452,7 @@ static struct gdsc mvs0_gdsc = {
.pd = {
.name = "mvs0_gdsc",
},
- .flags = HW_CTRL | RETAIN_FF_ENABLE,
+ .flags = HW_CTRL_TRIGGER | RETAIN_FF_ENABLE,
.pwrsts = PWRSTS_OFF_ON,
};
@@ -461,7 +461,7 @@ static struct gdsc mvs1_gdsc = {
.pd = {
.name = "mvs1_gdsc",
},
- .flags = HW_CTRL | RETAIN_FF_ENABLE,
+ .flags = HW_CTRL_TRIGGER | RETAIN_FF_ENABLE,
.pwrsts = PWRSTS_OFF_ON,
};
@@ -510,7 +510,7 @@ static const struct regmap_config video_cc_sm8350_regmap_config = {
.fast_io = true,
};
-static struct qcom_cc_desc video_cc_sm8350_desc = {
+static const struct qcom_cc_desc video_cc_sm8350_desc = {
.config = &video_cc_sm8350_regmap_config,
.clks = video_cc_sm8350_clocks,
.num_clks = ARRAY_SIZE(video_cc_sm8350_clocks),
diff --git a/drivers/clk/qcom/videocc-sm8450.c b/drivers/clk/qcom/videocc-sm8450.c
index ed9163d64244..dc168ce199cc 100644
--- a/drivers/clk/qcom/videocc-sm8450.c
+++ b/drivers/clk/qcom/videocc-sm8450.c
@@ -7,7 +7,6 @@
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <dt-bindings/clock/qcom,sm8450-videocc.h>
@@ -46,8 +45,24 @@ static const struct alpha_pll_config video_cc_pll0_config = {
.user_ctl_hi_val = 0x00000805,
};
+static const struct alpha_pll_config sm8475_video_cc_pll0_config = {
+ /* .l includes CAL_L_VAL, L_VAL fields */
+ .l = 0x1e,
+ .alpha = 0x0,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000005,
+};
+
static struct clk_alpha_pll video_cc_pll0 = {
.offset = 0x0,
+ .config = &video_cc_pll0_config,
.vco_table = lucid_evo_vco,
.num_vco = ARRAY_SIZE(lucid_evo_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
@@ -74,8 +89,24 @@ static const struct alpha_pll_config video_cc_pll1_config = {
.user_ctl_hi_val = 0x00000805,
};
+static const struct alpha_pll_config sm8475_video_cc_pll1_config = {
+ /* .l includes CAL_L_VAL, L_VAL fields */
+ .l = 0x2b,
+ .alpha = 0xc000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000005,
+};
+
static struct clk_alpha_pll video_cc_pll1 = {
.offset = 0x1000,
+ .config = &video_cc_pll1_config,
.vco_table = lucid_evo_vco,
.num_vco = ARRAY_SIZE(lucid_evo_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
@@ -317,7 +348,7 @@ static struct gdsc video_cc_mvs0_gdsc = {
},
.pwrsts = PWRSTS_OFF_ON,
.parent = &video_cc_mvs0c_gdsc.pd,
- .flags = RETAIN_FF_ENABLE | HW_CTRL,
+ .flags = HW_CTRL_TRIGGER | RETAIN_FF_ENABLE,
};
static struct gdsc video_cc_mvs1c_gdsc = {
@@ -342,7 +373,7 @@ static struct gdsc video_cc_mvs1_gdsc = {
},
.pwrsts = PWRSTS_OFF_ON,
.parent = &video_cc_mvs1c_gdsc.pd,
- .flags = RETAIN_FF_ENABLE | HW_CTRL,
+ .flags = HW_CTRL_TRIGGER | RETAIN_FF_ENABLE,
};
static struct clk_regmap *video_cc_sm8450_clocks[] = {
@@ -377,6 +408,17 @@ static const struct qcom_reset_map video_cc_sm8450_resets[] = {
[VIDEO_CC_MVS1C_CLK_ARES] = { .reg = 0x808c, .bit = 2, .udelay = 1000 },
};
+static struct clk_alpha_pll *video_cc_sm8450_plls[] = {
+ &video_cc_pll0,
+ &video_cc_pll1,
+};
+
+static u32 video_cc_sm8450_critical_cbcrs[] = {
+ 0x80e4, /* VIDEO_CC_AHB_CLK */
+ 0x8114, /* VIDEO_CC_XO_CLK */
+ 0x8130, /* VIDEO_CC_SLEEP_CLK */
+};
+
static const struct regmap_config video_cc_sm8450_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
@@ -385,7 +427,14 @@ static const struct regmap_config video_cc_sm8450_regmap_config = {
.fast_io = true,
};
-static struct qcom_cc_desc video_cc_sm8450_desc = {
+static struct qcom_cc_driver_data video_cc_sm8450_driver_data = {
+ .alpha_plls = video_cc_sm8450_plls,
+ .num_alpha_plls = ARRAY_SIZE(video_cc_sm8450_plls),
+ .clk_cbcrs = video_cc_sm8450_critical_cbcrs,
+ .num_clk_cbcrs = ARRAY_SIZE(video_cc_sm8450_critical_cbcrs),
+};
+
+static const struct qcom_cc_desc video_cc_sm8450_desc = {
.config = &video_cc_sm8450_regmap_config,
.clks = video_cc_sm8450_clocks,
.num_clks = ARRAY_SIZE(video_cc_sm8450_clocks),
@@ -393,46 +442,31 @@ static struct qcom_cc_desc video_cc_sm8450_desc = {
.num_resets = ARRAY_SIZE(video_cc_sm8450_resets),
.gdscs = video_cc_sm8450_gdscs,
.num_gdscs = ARRAY_SIZE(video_cc_sm8450_gdscs),
+ .use_rpm = true,
+ .driver_data = &video_cc_sm8450_driver_data,
};
static const struct of_device_id video_cc_sm8450_match_table[] = {
{ .compatible = "qcom,sm8450-videocc" },
+ { .compatible = "qcom,sm8475-videocc" },
{ }
};
MODULE_DEVICE_TABLE(of, video_cc_sm8450_match_table);
static int video_cc_sm8450_probe(struct platform_device *pdev)
{
- struct regmap *regmap;
- int ret;
-
- ret = devm_pm_runtime_enable(&pdev->dev);
- if (ret)
- return ret;
+ if (of_device_is_compatible(pdev->dev.of_node, "qcom,sm8475-videocc")) {
+ /* Update VideoCC PLL0 */
+ video_cc_pll0.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE];
- ret = pm_runtime_resume_and_get(&pdev->dev);
- if (ret)
- return ret;
+ /* Update VideoCC PLL1 */
+ video_cc_pll1.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE];
- regmap = qcom_cc_map(pdev, &video_cc_sm8450_desc);
- if (IS_ERR(regmap)) {
- pm_runtime_put(&pdev->dev);
- return PTR_ERR(regmap);
+ video_cc_pll0.config = &sm8475_video_cc_pll0_config;
+ video_cc_pll1.config = &sm8475_video_cc_pll1_config;
}
- clk_lucid_evo_pll_configure(&video_cc_pll0, regmap, &video_cc_pll0_config);
- clk_lucid_evo_pll_configure(&video_cc_pll1, regmap, &video_cc_pll1_config);
-
- /* Keep some clocks always-on */
- qcom_branch_set_clk_en(regmap, 0x80e4); /* VIDEO_CC_AHB_CLK */
- qcom_branch_set_clk_en(regmap, 0x8130); /* VIDEO_CC_SLEEP_CLK */
- qcom_branch_set_clk_en(regmap, 0x8114); /* VIDEO_CC_XO_CLK */
-
- ret = qcom_cc_really_probe(&pdev->dev, &video_cc_sm8450_desc, regmap);
-
- pm_runtime_put(&pdev->dev);
-
- return ret;
+ return qcom_cc_probe(pdev, &video_cc_sm8450_desc);
}
static struct platform_driver video_cc_sm8450_driver = {
@@ -445,5 +479,5 @@ static struct platform_driver video_cc_sm8450_driver = {
module_platform_driver(video_cc_sm8450_driver);
-MODULE_DESCRIPTION("QTI VIDEOCC SM8450 Driver");
+MODULE_DESCRIPTION("QTI VIDEOCC SM8450 / SM8475 Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/videocc-sm8550.c b/drivers/clk/qcom/videocc-sm8550.c
index 7c25a50cfa97..32a6505abe26 100644
--- a/drivers/clk/qcom/videocc-sm8550.c
+++ b/drivers/clk/qcom/videocc-sm8550.c
@@ -7,7 +7,6 @@
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <dt-bindings/clock/qcom,sm8650-videocc.h>
@@ -51,6 +50,7 @@ static struct alpha_pll_config video_cc_pll0_config = {
static struct clk_alpha_pll video_cc_pll0 = {
.offset = 0x0,
+ .config = &video_cc_pll0_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -82,6 +82,7 @@ static struct alpha_pll_config video_cc_pll1_config = {
static struct clk_alpha_pll video_cc_pll1 = {
.offset = 0x1000,
+ .config = &video_cc_pll1_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -144,6 +145,16 @@ static const struct freq_tbl ftbl_video_cc_mvs0_clk_src_sm8650[] = {
{ }
};
+static const struct freq_tbl ftbl_video_cc_mvs0_clk_src_x1e80100[] = {
+ F(576000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ F(720000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ F(1014000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ F(1098000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ F(1332000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ F(1443000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
static struct clk_rcg2 video_cc_mvs0_clk_src = {
.cmd_rcgr = 0x8000,
.mnd_width = 0,
@@ -176,6 +187,15 @@ static const struct freq_tbl ftbl_video_cc_mvs1_clk_src_sm8650[] = {
{ }
};
+static const struct freq_tbl ftbl_video_cc_mvs1_clk_src_x1e80100[] = {
+ F(840000000, P_VIDEO_CC_PLL1_OUT_MAIN, 1, 0, 0),
+ F(1050000000, P_VIDEO_CC_PLL1_OUT_MAIN, 1, 0, 0),
+ F(1350000000, P_VIDEO_CC_PLL1_OUT_MAIN, 1, 0, 0),
+ F(1500000000, P_VIDEO_CC_PLL1_OUT_MAIN, 1, 0, 0),
+ F(1650000000, P_VIDEO_CC_PLL1_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
static struct clk_rcg2 video_cc_mvs1_clk_src = {
.cmd_rcgr = 0x8018,
.mnd_width = 0,
@@ -511,6 +531,23 @@ static const struct qcom_reset_map video_cc_sm8550_resets[] = {
[VIDEO_CC_XO_CLK_ARES] = { .reg = 0x8124, .bit = 2, .udelay = 100 },
};
+static struct clk_alpha_pll *video_cc_sm8550_plls[] = {
+ &video_cc_pll0,
+ &video_cc_pll1,
+};
+
+static u32 video_cc_sm8550_critical_cbcrs[] = {
+ 0x80f4, /* VIDEO_CC_AHB_CLK */
+ 0x8124, /* VIDEO_CC_XO_CLK */
+ 0x8140, /* VIDEO_CC_SLEEP_CLK */
+};
+
+static u32 video_cc_sm8650_critical_cbcrs[] = {
+ 0x80f4, /* VIDEO_CC_AHB_CLK */
+ 0x8124, /* VIDEO_CC_XO_CLK */
+ 0x8150, /* VIDEO_CC_SLEEP_CLK */
+};
+
static const struct regmap_config video_cc_sm8550_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
@@ -519,7 +556,14 @@ static const struct regmap_config video_cc_sm8550_regmap_config = {
.fast_io = true,
};
-static struct qcom_cc_desc video_cc_sm8550_desc = {
+static struct qcom_cc_driver_data video_cc_sm8550_driver_data = {
+ .alpha_plls = video_cc_sm8550_plls,
+ .num_alpha_plls = ARRAY_SIZE(video_cc_sm8550_plls),
+ .clk_cbcrs = video_cc_sm8550_critical_cbcrs,
+ .num_clk_cbcrs = ARRAY_SIZE(video_cc_sm8550_critical_cbcrs),
+};
+
+static const struct qcom_cc_desc video_cc_sm8550_desc = {
.config = &video_cc_sm8550_regmap_config,
.clks = video_cc_sm8550_clocks,
.num_clks = ARRAY_SIZE(video_cc_sm8550_clocks),
@@ -527,37 +571,30 @@ static struct qcom_cc_desc video_cc_sm8550_desc = {
.num_resets = ARRAY_SIZE(video_cc_sm8550_resets),
.gdscs = video_cc_sm8550_gdscs,
.num_gdscs = ARRAY_SIZE(video_cc_sm8550_gdscs),
+ .use_rpm = true,
+ .driver_data = &video_cc_sm8550_driver_data,
};
static const struct of_device_id video_cc_sm8550_match_table[] = {
{ .compatible = "qcom,sm8550-videocc" },
{ .compatible = "qcom,sm8650-videocc" },
+ { .compatible = "qcom,x1e80100-videocc" },
{ }
};
MODULE_DEVICE_TABLE(of, video_cc_sm8550_match_table);
static int video_cc_sm8550_probe(struct platform_device *pdev)
{
- struct regmap *regmap;
- int ret;
- u32 sleep_clk_offset = 0x8140;
-
- ret = devm_pm_runtime_enable(&pdev->dev);
- if (ret)
- return ret;
-
- ret = pm_runtime_resume_and_get(&pdev->dev);
- if (ret)
- return ret;
-
- regmap = qcom_cc_map(pdev, &video_cc_sm8550_desc);
- if (IS_ERR(regmap)) {
- pm_runtime_put(&pdev->dev);
- return PTR_ERR(regmap);
+ if (of_device_is_compatible(pdev->dev.of_node, "qcom,x1e80100-videocc")) {
+ video_cc_pll0_config.l = 0x1e;
+ video_cc_pll0_config.alpha = 0x0000;
+ video_cc_pll1_config.l = 0x2b;
+ video_cc_pll1_config.alpha = 0xc000;
+ video_cc_mvs0_clk_src.freq_tbl = ftbl_video_cc_mvs0_clk_src_x1e80100;
+ video_cc_mvs1_clk_src.freq_tbl = ftbl_video_cc_mvs1_clk_src_x1e80100;
}
if (of_device_is_compatible(pdev->dev.of_node, "qcom,sm8650-videocc")) {
- sleep_clk_offset = 0x8150;
video_cc_pll0_config.l = 0x1e;
video_cc_pll0_config.alpha = 0xa000;
video_cc_pll1_config.l = 0x2b;
@@ -569,21 +606,13 @@ static int video_cc_sm8550_probe(struct platform_device *pdev)
video_cc_sm8550_clocks[VIDEO_CC_MVS1_SHIFT_CLK] = &video_cc_mvs1_shift_clk.clkr;
video_cc_sm8550_clocks[VIDEO_CC_MVS1C_SHIFT_CLK] = &video_cc_mvs1c_shift_clk.clkr;
video_cc_sm8550_clocks[VIDEO_CC_XO_CLK_SRC] = &video_cc_xo_clk_src.clkr;
- }
-
- clk_lucid_ole_pll_configure(&video_cc_pll0, regmap, &video_cc_pll0_config);
- clk_lucid_ole_pll_configure(&video_cc_pll1, regmap, &video_cc_pll1_config);
- /* Keep some clocks always-on */
- qcom_branch_set_clk_en(regmap, 0x80f4); /* VIDEO_CC_AHB_CLK */
- qcom_branch_set_clk_en(regmap, sleep_clk_offset); /* VIDEO_CC_SLEEP_CLK */
- qcom_branch_set_clk_en(regmap, 0x8124); /* VIDEO_CC_XO_CLK */
-
- ret = qcom_cc_really_probe(&pdev->dev, &video_cc_sm8550_desc, regmap);
-
- pm_runtime_put(&pdev->dev);
+ video_cc_sm8550_driver_data.clk_cbcrs = video_cc_sm8650_critical_cbcrs;
+ video_cc_sm8550_driver_data.num_clk_cbcrs =
+ ARRAY_SIZE(video_cc_sm8650_critical_cbcrs);
+ }
- return ret;
+ return qcom_cc_probe(pdev, &video_cc_sm8550_desc);
}
static struct platform_driver video_cc_sm8550_driver = {
diff --git a/drivers/clk/ralink/clk-mtmips.c b/drivers/clk/ralink/clk-mtmips.c
index 50a443bf79ec..19d433034884 100644
--- a/drivers/clk/ralink/clk-mtmips.c
+++ b/drivers/clk/ralink/clk-mtmips.c
@@ -207,6 +207,7 @@ static struct mtmips_clk mt7620_pherip_clks[] = {
{ CLK_PERIPH("10000b00.spi", "bus") },
{ CLK_PERIPH("10000b40.spi", "bus") },
{ CLK_PERIPH("10000c00.uartlite", "periph") },
+ { CLK_PERIPH("10130000.mmc", "sdhc") },
{ CLK_PERIPH("10180000.wmac", "xtal") }
};
@@ -220,6 +221,7 @@ static struct mtmips_clk mt76x8_pherip_clks[] = {
{ CLK_PERIPH("10000c00.uart0", "periph") },
{ CLK_PERIPH("10000d00.uart1", "periph") },
{ CLK_PERIPH("10000e00.uart2", "periph") },
+ { CLK_PERIPH("10130000.mmc", "sdhc") },
{ CLK_PERIPH("10300000.wmac", "xtal") }
};
@@ -263,16 +265,21 @@ err_clk_unreg:
.rate = _rate \
}
-static struct mtmips_clk_fixed rt305x_fixed_clocks[] = {
- CLK_FIXED("xtal", NULL, 40000000)
+static struct mtmips_clk_fixed rt3883_fixed_clocks[] = {
+ CLK_FIXED("periph", "xtal", 40000000)
};
static struct mtmips_clk_fixed rt3352_fixed_clocks[] = {
CLK_FIXED("periph", "xtal", 40000000)
};
+static struct mtmips_clk_fixed mt7620_fixed_clocks[] = {
+ CLK_FIXED("bbppll", "xtal", 480000000)
+};
+
static struct mtmips_clk_fixed mt76x8_fixed_clocks[] = {
- CLK_FIXED("pcmi2s", "xtal", 480000000),
+ CLK_FIXED("bbppll", "xtal", 480000000),
+ CLK_FIXED("pcmi2s", "bbppll", 480000000),
CLK_FIXED("periph", "xtal", 40000000)
};
@@ -327,6 +334,15 @@ static struct mtmips_clk_factor rt305x_factor_clocks[] = {
CLK_FACTOR("bus", "cpu", 1, 3)
};
+static struct mtmips_clk_factor mt7620_factor_clocks[] = {
+ CLK_FACTOR("sdhc", "bbppll", 1, 10)
+};
+
+static struct mtmips_clk_factor mt76x8_factor_clocks[] = {
+ CLK_FACTOR("bus", "cpu", 1, 3),
+ CLK_FACTOR("sdhc", "bbppll", 1, 10)
+};
+
static int mtmips_register_factor_clocks(struct clk_hw_onecell_data *clk_data,
struct mtmips_clk_priv *priv)
{
@@ -366,6 +382,12 @@ static inline struct mtmips_clk *to_mtmips_clk(struct clk_hw *hw)
return container_of(hw, struct mtmips_clk, hw);
}
+static unsigned long rt2880_xtal_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ return 40000000;
+}
+
static unsigned long rt5350_xtal_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
@@ -677,10 +699,12 @@ static unsigned long mt76x8_cpu_recalc_rate(struct clk_hw *hw,
}
static struct mtmips_clk rt2880_clks_base[] = {
+ { CLK_BASE("xtal", NULL, rt2880_xtal_recalc_rate) },
{ CLK_BASE("cpu", "xtal", rt2880_cpu_recalc_rate) }
};
static struct mtmips_clk rt305x_clks_base[] = {
+ { CLK_BASE("xtal", NULL, rt2880_xtal_recalc_rate) },
{ CLK_BASE("cpu", "xtal", rt305x_cpu_recalc_rate) }
};
@@ -690,6 +714,7 @@ static struct mtmips_clk rt3352_clks_base[] = {
};
static struct mtmips_clk rt3883_clks_base[] = {
+ { CLK_BASE("xtal", NULL, rt2880_xtal_recalc_rate) },
{ CLK_BASE("cpu", "xtal", rt3883_cpu_recalc_rate) },
{ CLK_BASE("bus", "cpu", rt3883_bus_recalc_rate) }
};
@@ -746,8 +771,8 @@ err_clk_unreg:
static const struct mtmips_clk_data rt2880_clk_data = {
.clk_base = rt2880_clks_base,
.num_clk_base = ARRAY_SIZE(rt2880_clks_base),
- .clk_fixed = rt305x_fixed_clocks,
- .num_clk_fixed = ARRAY_SIZE(rt305x_fixed_clocks),
+ .clk_fixed = NULL,
+ .num_clk_fixed = 0,
.clk_factor = rt2880_factor_clocks,
.num_clk_factor = ARRAY_SIZE(rt2880_factor_clocks),
.clk_periph = rt2880_pherip_clks,
@@ -757,8 +782,8 @@ static const struct mtmips_clk_data rt2880_clk_data = {
static const struct mtmips_clk_data rt305x_clk_data = {
.clk_base = rt305x_clks_base,
.num_clk_base = ARRAY_SIZE(rt305x_clks_base),
- .clk_fixed = rt305x_fixed_clocks,
- .num_clk_fixed = ARRAY_SIZE(rt305x_fixed_clocks),
+ .clk_fixed = NULL,
+ .num_clk_fixed = 0,
.clk_factor = rt305x_factor_clocks,
.num_clk_factor = ARRAY_SIZE(rt305x_factor_clocks),
.clk_periph = rt305x_pherip_clks,
@@ -779,8 +804,8 @@ static const struct mtmips_clk_data rt3352_clk_data = {
static const struct mtmips_clk_data rt3883_clk_data = {
.clk_base = rt3883_clks_base,
.num_clk_base = ARRAY_SIZE(rt3883_clks_base),
- .clk_fixed = rt305x_fixed_clocks,
- .num_clk_fixed = ARRAY_SIZE(rt305x_fixed_clocks),
+ .clk_fixed = rt3883_fixed_clocks,
+ .num_clk_fixed = ARRAY_SIZE(rt3883_fixed_clocks),
.clk_factor = NULL,
.num_clk_factor = 0,
.clk_periph = rt5350_pherip_clks,
@@ -801,10 +826,10 @@ static const struct mtmips_clk_data rt5350_clk_data = {
static const struct mtmips_clk_data mt7620_clk_data = {
.clk_base = mt7620_clks_base,
.num_clk_base = ARRAY_SIZE(mt7620_clks_base),
- .clk_fixed = NULL,
- .num_clk_fixed = 0,
- .clk_factor = NULL,
- .num_clk_factor = 0,
+ .clk_fixed = mt7620_fixed_clocks,
+ .num_clk_fixed = ARRAY_SIZE(mt7620_fixed_clocks),
+ .clk_factor = mt7620_factor_clocks,
+ .num_clk_factor = ARRAY_SIZE(mt7620_factor_clocks),
.clk_periph = mt7620_pherip_clks,
.num_clk_periph = ARRAY_SIZE(mt7620_pherip_clks),
};
@@ -814,8 +839,8 @@ static const struct mtmips_clk_data mt76x8_clk_data = {
.num_clk_base = ARRAY_SIZE(mt76x8_clks_base),
.clk_fixed = mt76x8_fixed_clocks,
.num_clk_fixed = ARRAY_SIZE(mt76x8_fixed_clocks),
- .clk_factor = rt305x_factor_clocks,
- .num_clk_factor = ARRAY_SIZE(rt305x_factor_clocks),
+ .clk_factor = mt76x8_factor_clocks,
+ .num_clk_factor = ARRAY_SIZE(mt76x8_factor_clocks),
.clk_periph = mt76x8_pherip_clks,
.num_clk_periph = ARRAY_SIZE(mt76x8_pherip_clks),
};
diff --git a/drivers/clk/renesas/Kconfig b/drivers/clk/renesas/Kconfig
index 76791a1c50ac..6a5a04664990 100644
--- a/drivers/clk/renesas/Kconfig
+++ b/drivers/clk/renesas/Kconfig
@@ -40,7 +40,11 @@ config CLK_RENESAS
select CLK_R9A07G054 if ARCH_R9A07G054
select CLK_R9A08G045 if ARCH_R9A08G045
select CLK_R9A09G011 if ARCH_R9A09G011
+ select CLK_R9A09G047 if ARCH_R9A09G047
+ select CLK_R9A09G056 if ARCH_R9A09G056
select CLK_R9A09G057 if ARCH_R9A09G057
+ select CLK_R9A09G077 if ARCH_R9A09G077
+ select CLK_R9A09G087 if ARCH_R9A09G087
select CLK_SH73A0 if ARCH_SH73A0
if CLK_RENESAS
@@ -194,10 +198,26 @@ config CLK_R9A09G011
bool "RZ/V2M clock support" if COMPILE_TEST
select CLK_RZG2L
+config CLK_R9A09G047
+ bool "RZ/G3E clock support" if COMPILE_TEST
+ select CLK_RZV2H
+
+config CLK_R9A09G056
+ bool "RZ/V2N clock support" if COMPILE_TEST
+ select CLK_RZV2H
+
config CLK_R9A09G057
bool "RZ/V2H(P) clock support" if COMPILE_TEST
select CLK_RZV2H
+config CLK_R9A09G077
+ bool "RZ/T2H clock support" if COMPILE_TEST
+ select CLK_RENESAS_CPG_MSSR
+
+config CLK_R9A09G087
+ bool "RZ/N2H clock support" if COMPILE_TEST
+ select CLK_RENESAS_CPG_MSSR
+
config CLK_SH73A0
bool "SH-Mobile AG5 clock support" if COMPILE_TEST
select CLK_RENESAS_CPG_MSTP
@@ -234,7 +254,12 @@ config CLK_RZG2L
select RESET_CONTROLLER
config CLK_RZV2H
- bool "RZ/V2H(P) family clock support" if COMPILE_TEST
+ bool "RZ/{G3E,V2H(P)} family clock support" if COMPILE_TEST
+ select RESET_CONTROLLER
+
+config CLK_RENESAS_VBATTB
+ tristate "Renesas VBATTB clock controller"
+ depends on ARCH_RZG2L || COMPILE_TEST
select RESET_CONTROLLER
# Generic
diff --git a/drivers/clk/renesas/Makefile b/drivers/clk/renesas/Makefile
index 23d2e26051c8..d28eb276a153 100644
--- a/drivers/clk/renesas/Makefile
+++ b/drivers/clk/renesas/Makefile
@@ -37,7 +37,11 @@ obj-$(CONFIG_CLK_R9A07G044) += r9a07g044-cpg.o
obj-$(CONFIG_CLK_R9A07G054) += r9a07g044-cpg.o
obj-$(CONFIG_CLK_R9A08G045) += r9a08g045-cpg.o
obj-$(CONFIG_CLK_R9A09G011) += r9a09g011-cpg.o
+obj-$(CONFIG_CLK_R9A09G047) += r9a09g047-cpg.o
+obj-$(CONFIG_CLK_R9A09G056) += r9a09g056-cpg.o
obj-$(CONFIG_CLK_R9A09G057) += r9a09g057-cpg.o
+obj-$(CONFIG_CLK_R9A09G077) += r9a09g077-cpg.o
+obj-$(CONFIG_CLK_R9A09G087) += r9a09g077-cpg.o
obj-$(CONFIG_CLK_SH73A0) += clk-sh73a0.o
# Family
@@ -53,3 +57,4 @@ obj-$(CONFIG_CLK_RZV2H) += rzv2h-cpg.o
obj-$(CONFIG_CLK_RENESAS_CPG_MSSR) += renesas-cpg-mssr.o
obj-$(CONFIG_CLK_RENESAS_CPG_MSTP) += clk-mstp.o
obj-$(CONFIG_CLK_RENESAS_DIV6) += clk-div6.o
+obj-$(CONFIG_CLK_RENESAS_VBATTB) += clk-vbattb.o
diff --git a/drivers/clk/renesas/clk-mstp.c b/drivers/clk/renesas/clk-mstp.c
index 5bc473c2adb3..2f65fe2c6bdf 100644
--- a/drivers/clk/renesas/clk-mstp.c
+++ b/drivers/clk/renesas/clk-mstp.c
@@ -303,6 +303,9 @@ void cpg_mstp_detach_dev(struct generic_pm_domain *unused, struct device *dev)
pm_clk_destroy(dev);
}
+static struct device_node *cpg_mstp_pd_np __initdata = NULL;
+static struct generic_pm_domain *cpg_mstp_pd_genpd __initdata = NULL;
+
void __init cpg_mstp_add_clk_domain(struct device_node *np)
{
struct generic_pm_domain *pd;
@@ -324,5 +327,20 @@ void __init cpg_mstp_add_clk_domain(struct device_node *np)
pd->detach_dev = cpg_mstp_detach_dev;
pm_genpd_init(pd, &pm_domain_always_on_gov, false);
- of_genpd_add_provider_simple(np, pd);
+ cpg_mstp_pd_np = of_node_get(np);
+ cpg_mstp_pd_genpd = pd;
+}
+
+static int __init cpg_mstp_pd_init_provider(void)
+{
+ int error;
+
+ if (!cpg_mstp_pd_np)
+ return -ENODEV;
+
+ error = of_genpd_add_provider_simple(cpg_mstp_pd_np, cpg_mstp_pd_genpd);
+
+ of_node_put(cpg_mstp_pd_np);
+ return error;
}
+postcore_initcall(cpg_mstp_pd_init_provider);
diff --git a/drivers/clk/renesas/clk-r8a73a4.c b/drivers/clk/renesas/clk-r8a73a4.c
index 4b1815147f77..f331d8bc9daf 100644
--- a/drivers/clk/renesas/clk-r8a73a4.c
+++ b/drivers/clk/renesas/clk-r8a73a4.c
@@ -64,7 +64,6 @@ r8a73a4_cpg_register_clock(struct device_node *np, struct r8a73a4_cpg *cpg,
unsigned int mult = 1;
unsigned int div = 1;
-
if (!strcmp(name, "main")) {
u32 ckscr = readl(base + CPG_CKSCR);
diff --git a/drivers/clk/renesas/clk-r8a7778.c b/drivers/clk/renesas/clk-r8a7778.c
index 797556259370..6ea173f22251 100644
--- a/drivers/clk/renesas/clk-r8a7778.c
+++ b/drivers/clk/renesas/clk-r8a7778.c
@@ -67,7 +67,6 @@ r8a7778_cpg_register_clock(struct device_node *np, const char *name)
return ERR_PTR(-EINVAL);
}
-
static void __init r8a7778_cpg_clocks_init(struct device_node *np)
{
struct clk_onecell_data *data;
diff --git a/drivers/clk/renesas/clk-vbattb.c b/drivers/clk/renesas/clk-vbattb.c
new file mode 100644
index 000000000000..ff9d1ead455c
--- /dev/null
+++ b/drivers/clk/renesas/clk-vbattb.c
@@ -0,0 +1,205 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * VBATTB clock driver
+ *
+ * Copyright (C) 2024 Renesas Electronics Corp.
+ */
+
+#include <linux/cleanup.h>
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+#include <dt-bindings/clock/renesas,r9a08g045-vbattb.h>
+
+#define VBATTB_BKSCCR 0x1c
+#define VBATTB_BKSCCR_SOSEL 6
+#define VBATTB_SOSCCR2 0x24
+#define VBATTB_SOSCCR2_SOSTP2 0
+#define VBATTB_XOSCCR 0x30
+#define VBATTB_XOSCCR_OUTEN 16
+#define VBATTB_XOSCCR_XSEL GENMASK(1, 0)
+#define VBATTB_XOSCCR_XSEL_4_PF 0x0
+#define VBATTB_XOSCCR_XSEL_7_PF 0x1
+#define VBATTB_XOSCCR_XSEL_9_PF 0x2
+#define VBATTB_XOSCCR_XSEL_12_5_PF 0x3
+
+/**
+ * struct vbattb_clk - VBATTB clock data structure
+ * @base: base address
+ * @lock: lock
+ */
+struct vbattb_clk {
+ void __iomem *base;
+ spinlock_t lock;
+};
+
+static int vbattb_clk_validate_load_capacitance(u32 *reg_lc, u32 of_lc)
+{
+ switch (of_lc) {
+ case 4000:
+ *reg_lc = VBATTB_XOSCCR_XSEL_4_PF;
+ break;
+ case 7000:
+ *reg_lc = VBATTB_XOSCCR_XSEL_7_PF;
+ break;
+ case 9000:
+ *reg_lc = VBATTB_XOSCCR_XSEL_9_PF;
+ break;
+ case 12500:
+ *reg_lc = VBATTB_XOSCCR_XSEL_12_5_PF;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void vbattb_clk_action(void *data)
+{
+ struct device *dev = data;
+ struct reset_control *rstc = dev_get_drvdata(dev);
+ int ret;
+
+ ret = reset_control_assert(rstc);
+ if (ret)
+ dev_err(dev, "Failed to de-assert reset!");
+
+ ret = pm_runtime_put_sync(dev);
+ if (ret < 0)
+ dev_err(dev, "Failed to runtime suspend!");
+
+ of_clk_del_provider(dev->of_node);
+}
+
+static int vbattb_clk_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct clk_parent_data parent_data = {};
+ struct clk_hw_onecell_data *clk_data;
+ const struct clk_hw *parent_hws[2];
+ struct device *dev = &pdev->dev;
+ struct reset_control *rstc;
+ struct vbattb_clk *vbclk;
+ u32 of_lc, reg_lc;
+ struct clk_hw *hw;
+ /* 4 clocks are exported: VBATTB_XC, VBATTB_XBYP, VBATTB_MUX, VBATTB_VBATTCLK. */
+ u8 num_clks = 4;
+ int ret;
+
+ /* Default to 4pF as this is not needed if external clock device is connected. */
+ of_lc = 4000;
+ of_property_read_u32(np, "quartz-load-femtofarads", &of_lc);
+
+ ret = vbattb_clk_validate_load_capacitance(&reg_lc, of_lc);
+ if (ret)
+ return ret;
+
+ vbclk = devm_kzalloc(dev, sizeof(*vbclk), GFP_KERNEL);
+ if (!vbclk)
+ return -ENOMEM;
+
+ clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, num_clks), GFP_KERNEL);
+ if (!clk_data)
+ return -ENOMEM;
+ clk_data->num = num_clks;
+
+ vbclk->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(vbclk->base))
+ return PTR_ERR(vbclk->base);
+
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return ret;
+
+ rstc = devm_reset_control_get_shared(dev, NULL);
+ if (IS_ERR(rstc))
+ return PTR_ERR(rstc);
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return ret;
+
+ ret = reset_control_deassert(rstc);
+ if (ret) {
+ pm_runtime_put_sync(dev);
+ return ret;
+ }
+
+ dev_set_drvdata(dev, rstc);
+ ret = devm_add_action_or_reset(dev, vbattb_clk_action, dev);
+ if (ret)
+ return ret;
+
+ spin_lock_init(&vbclk->lock);
+
+ parent_data.fw_name = "rtx";
+ hw = devm_clk_hw_register_gate_parent_data(dev, "xc", &parent_data, 0,
+ vbclk->base + VBATTB_SOSCCR2,
+ VBATTB_SOSCCR2_SOSTP2,
+ CLK_GATE_SET_TO_DISABLE, &vbclk->lock);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+ clk_data->hws[VBATTB_XC] = hw;
+
+ hw = devm_clk_hw_register_fixed_factor_fwname(dev, np, "xbyp", "rtx", 0, 1, 1);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+ clk_data->hws[VBATTB_XBYP] = hw;
+
+ parent_hws[0] = clk_data->hws[VBATTB_XC];
+ parent_hws[1] = clk_data->hws[VBATTB_XBYP];
+ hw = devm_clk_hw_register_mux_parent_hws(dev, "mux", parent_hws, 2, 0,
+ vbclk->base + VBATTB_BKSCCR,
+ VBATTB_BKSCCR_SOSEL,
+ 1, 0, &vbclk->lock);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+ clk_data->hws[VBATTB_MUX] = hw;
+
+ /* Set load capacitance before registering the VBATTCLK clock. */
+ scoped_guard(spinlock, &vbclk->lock) {
+ u32 val = readl_relaxed(vbclk->base + VBATTB_XOSCCR);
+
+ val &= ~VBATTB_XOSCCR_XSEL;
+ val |= reg_lc;
+ writel_relaxed(val, vbclk->base + VBATTB_XOSCCR);
+ }
+
+ /* This feeds the RTC counter clock and it needs to stay on. */
+ hw = devm_clk_hw_register_gate_parent_hw(dev, "vbattclk", hw, CLK_IS_CRITICAL,
+ vbclk->base + VBATTB_XOSCCR,
+ VBATTB_XOSCCR_OUTEN, 0,
+ &vbclk->lock);
+
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+ clk_data->hws[VBATTB_VBATTCLK] = hw;
+
+ return of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
+}
+
+static const struct of_device_id vbattb_clk_match[] = {
+ { .compatible = "renesas,r9a08g045-vbattb" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, vbattb_clk_match);
+
+static struct platform_driver vbattb_clk_driver = {
+ .driver = {
+ .name = "renesas-vbattb-clk",
+ .of_match_table = vbattb_clk_match,
+ },
+ .probe = vbattb_clk_probe,
+};
+module_platform_driver(vbattb_clk_driver);
+
+MODULE_DESCRIPTION("Renesas VBATTB Clock Driver");
+MODULE_AUTHOR("Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/renesas/r7s9210-cpg-mssr.c b/drivers/clk/renesas/r7s9210-cpg-mssr.c
index a85227c248f3..a8ed87c11ba1 100644
--- a/drivers/clk/renesas/r7s9210-cpg-mssr.c
+++ b/drivers/clk/renesas/r7s9210-cpg-mssr.c
@@ -159,22 +159,24 @@ static void __init r7s9210_update_clk_table(struct clk *extal_clk,
static struct clk * __init rza2_cpg_clk_register(struct device *dev,
const struct cpg_core_clk *core, const struct cpg_mssr_info *info,
- struct clk **clks, void __iomem *base,
- struct raw_notifier_head *notifiers)
+ struct cpg_mssr_pub *pub)
{
- struct clk *parent;
+ void __iomem *base = pub->base0;
+ struct clk **clks = pub->clks;
unsigned int mult = 1;
unsigned int div = 1;
+ struct clk *parent;
parent = clks[core->parent];
if (IS_ERR(parent))
return ERR_CAST(parent);
- switch (core->id) {
- case CLK_MAIN:
+ switch (core->type) {
+ case CLK_TYPE_RZA_MAIN:
+ r7s9210_update_clk_table(parent, base);
break;
- case CLK_PLL:
+ case CLK_TYPE_RZA_PLL:
if (cpg_mode)
mult = 44; /* Divider 1 is 1/2 */
else
@@ -185,9 +187,6 @@ static struct clk * __init rza2_cpg_clk_register(struct device *dev,
return ERR_PTR(-EINVAL);
}
- if (core->id == CLK_MAIN)
- r7s9210_update_clk_table(parent, base);
-
return clk_register_fixed_factor(NULL, core->name,
__clk_get_name(parent), 0, mult, div);
}
diff --git a/drivers/clk/renesas/r8a77970-cpg-mssr.c b/drivers/clk/renesas/r8a77970-cpg-mssr.c
index 3cec0f501b94..e2bda2c10730 100644
--- a/drivers/clk/renesas/r8a77970-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a77970-cpg-mssr.c
@@ -219,10 +219,11 @@ static int __init r8a77970_cpg_mssr_init(struct device *dev)
static struct clk * __init r8a77970_cpg_clk_register(struct device *dev,
const struct cpg_core_clk *core, const struct cpg_mssr_info *info,
- struct clk **clks, void __iomem *base,
- struct raw_notifier_head *notifiers)
+ struct cpg_mssr_pub *pub)
{
const struct clk_div_table *table;
+ void __iomem *base = pub->base0;
+ struct clk **clks = pub->clks;
const struct clk *parent;
unsigned int shift;
@@ -236,8 +237,7 @@ static struct clk * __init r8a77970_cpg_clk_register(struct device *dev,
shift = 4;
break;
default:
- return rcar_gen3_cpg_clk_register(dev, core, info, clks, base,
- notifiers);
+ return rcar_gen3_cpg_clk_register(dev, core, info, pub);
}
parent = clks[core->parent];
diff --git a/drivers/clk/renesas/r8a779a0-cpg-mssr.c b/drivers/clk/renesas/r8a779a0-cpg-mssr.c
index 4c8e4c69c1bf..1be7b9592aa6 100644
--- a/drivers/clk/renesas/r8a779a0-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a779a0-cpg-mssr.c
@@ -138,6 +138,10 @@ static const struct cpg_core_clk r8a779a0_core_clks[] __initconst = {
};
static const struct mssr_mod_clk r8a779a0_mod_clks[] __initconst = {
+ DEF_MOD("isp0", 16, R8A779A0_CLK_S1D1),
+ DEF_MOD("isp1", 17, R8A779A0_CLK_S1D1),
+ DEF_MOD("isp2", 18, R8A779A0_CLK_S1D1),
+ DEF_MOD("isp3", 19, R8A779A0_CLK_S1D1),
DEF_MOD("avb0", 211, R8A779A0_CLK_S3D2),
DEF_MOD("avb1", 212, R8A779A0_CLK_S3D2),
DEF_MOD("avb2", 213, R8A779A0_CLK_S3D2),
@@ -238,6 +242,10 @@ static const struct mssr_mod_clk r8a779a0_mod_clks[] __initconst = {
DEF_MOD("vspx1", 1029, R8A779A0_CLK_S1D1),
DEF_MOD("vspx2", 1030, R8A779A0_CLK_S1D1),
DEF_MOD("vspx3", 1031, R8A779A0_CLK_S1D1),
+ DEF_MOD("fcpvx0", 1100, R8A779A0_CLK_S1D1),
+ DEF_MOD("fcpvx1", 1101, R8A779A0_CLK_S1D1),
+ DEF_MOD("fcpvx2", 1102, R8A779A0_CLK_S1D1),
+ DEF_MOD("fcpvx3", 1103, R8A779A0_CLK_S1D1),
};
static const unsigned int r8a779a0_crit_mod_clks[] __initconst = {
@@ -266,7 +274,6 @@ static const struct rcar_gen4_cpg_pll_config cpg_pll_configs[4] __initconst = {
{ 2, 128, 1, 192, 1, 32, },
};
-
static int __init r8a779a0_cpg_mssr_init(struct device *dev)
{
const struct rcar_gen4_cpg_pll_config *cpg_pll_config;
diff --git a/drivers/clk/renesas/r8a779g0-cpg-mssr.c b/drivers/clk/renesas/r8a779g0-cpg-mssr.c
index 55c8dd032fc3..015b9773cc55 100644
--- a/drivers/clk/renesas/r8a779g0-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a779g0-cpg-mssr.c
@@ -163,6 +163,8 @@ static const struct cpg_core_clk r8a779g0_core_clks[] __initconst = {
};
static const struct mssr_mod_clk r8a779g0_mod_clks[] __initconst = {
+ DEF_MOD("isp0", 16, R8A779G0_CLK_S0D2_VIO),
+ DEF_MOD("isp1", 17, R8A779G0_CLK_S0D2_VIO),
DEF_MOD("avb0", 211, R8A779G0_CLK_S0D4_HSC),
DEF_MOD("avb1", 212, R8A779G0_CLK_S0D4_HSC),
DEF_MOD("avb2", 213, R8A779G0_CLK_S0D4_HSC),
@@ -238,6 +240,10 @@ static const struct mssr_mod_clk r8a779g0_mod_clks[] __initconst = {
DEF_MOD("pfc2", 917, R8A779G0_CLK_CP),
DEF_MOD("pfc3", 918, R8A779G0_CLK_CP),
DEF_MOD("tsc", 919, R8A779G0_CLK_CL16M),
+ DEF_MOD("vspx0", 1028, R8A779G0_CLK_S0D1_VIO),
+ DEF_MOD("vspx1", 1029, R8A779G0_CLK_S0D1_VIO),
+ DEF_MOD("fcpvx0", 1100, R8A779G0_CLK_S0D1_VIO),
+ DEF_MOD("fcpvx1", 1101, R8A779G0_CLK_S0D1_VIO),
DEF_MOD("tsn", 2723, R8A779G0_CLK_S0D4_HSC),
DEF_MOD("ssiu", 2926, R8A779G0_CLK_S0D6_PER),
DEF_MOD("ssi", 2927, R8A779G0_CLK_S0D6_PER),
diff --git a/drivers/clk/renesas/r8a779h0-cpg-mssr.c b/drivers/clk/renesas/r8a779h0-cpg-mssr.c
index e20c048bfa9b..ffea06d77d5e 100644
--- a/drivers/clk/renesas/r8a779h0-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a779h0-cpg-mssr.c
@@ -37,7 +37,6 @@ enum clk_ids {
CLK_PLL5,
CLK_PLL6,
CLK_PLL1_DIV2,
- CLK_PLL2_DIV2,
CLK_PLL3_DIV2,
CLK_PLL4_DIV2,
CLK_PLL4_DIV5,
@@ -78,7 +77,6 @@ static const struct cpg_core_clk r8a779h0_core_clks[] __initconst = {
DEF_GEN4_PLL_V8_25(".pll6", 6, CLK_PLL6, CLK_MAIN),
DEF_FIXED(".pll1_div2", CLK_PLL1_DIV2, CLK_PLL1, 2, 1),
- DEF_FIXED(".pll2_div2", CLK_PLL2_DIV2, CLK_PLL2, 2, 1),
DEF_FIXED(".pll3_div2", CLK_PLL3_DIV2, CLK_PLL3, 2, 1),
DEF_FIXED(".pll4_div2", CLK_PLL4_DIV2, CLK_PLL4, 2, 1),
DEF_FIXED(".pll4_div5", CLK_PLL4_DIV5, CLK_PLL4, 5, 1),
@@ -101,10 +99,10 @@ static const struct cpg_core_clk r8a779h0_core_clks[] __initconst = {
DEF_RATE(".oco", CLK_OCO, 32768),
/* Core Clock Outputs */
- DEF_GEN4_Z("zc0", R8A779H0_CLK_ZC0, CLK_TYPE_GEN4_Z, CLK_PLL2_DIV2, 2, 0),
- DEF_GEN4_Z("zc1", R8A779H0_CLK_ZC1, CLK_TYPE_GEN4_Z, CLK_PLL2_DIV2, 2, 8),
- DEF_GEN4_Z("zc2", R8A779H0_CLK_ZC2, CLK_TYPE_GEN4_Z, CLK_PLL2_DIV2, 2, 32),
- DEF_GEN4_Z("zc3", R8A779H0_CLK_ZC3, CLK_TYPE_GEN4_Z, CLK_PLL2_DIV2, 2, 40),
+ DEF_GEN4_Z("zc0", R8A779H0_CLK_ZC0, CLK_TYPE_GEN4_Z, CLK_PLL2, 4, 0),
+ DEF_GEN4_Z("zc1", R8A779H0_CLK_ZC1, CLK_TYPE_GEN4_Z, CLK_PLL2, 4, 8),
+ DEF_GEN4_Z("zc2", R8A779H0_CLK_ZC2, CLK_TYPE_GEN4_Z, CLK_PLL2, 4, 32),
+ DEF_GEN4_Z("zc3", R8A779H0_CLK_ZC3, CLK_TYPE_GEN4_Z, CLK_PLL2, 4, 40),
DEF_FIXED("s0d2", R8A779H0_CLK_S0D2, CLK_S0, 2, 1),
DEF_FIXED("s0d3", R8A779H0_CLK_S0D3, CLK_S0, 3, 1),
DEF_FIXED("s0d4", R8A779H0_CLK_S0D4, CLK_S0, 4, 1),
@@ -173,12 +171,16 @@ static const struct cpg_core_clk r8a779h0_core_clks[] __initconst = {
};
static const struct mssr_mod_clk r8a779h0_mod_clks[] __initconst = {
+ DEF_MOD("isp0", 16, R8A779H0_CLK_S0D2_VIO),
DEF_MOD("avb0:rgmii0", 211, R8A779H0_CLK_S0D8_HSC),
DEF_MOD("avb1:rgmii1", 212, R8A779H0_CLK_S0D8_HSC),
DEF_MOD("avb2:rgmii2", 213, R8A779H0_CLK_S0D8_HSC),
DEF_MOD("canfd0", 328, R8A779H0_CLK_SASYNCPERD2),
DEF_MOD("csi40", 331, R8A779H0_CLK_CSI),
DEF_MOD("csi41", 400, R8A779H0_CLK_CSI),
+ DEF_MOD("dis0", 411, R8A779H0_CLK_VIOBUSD2),
+ DEF_MOD("dsitxlink0", 415, R8A779H0_CLK_VIOBUSD2),
+ DEF_MOD("fcpvd0", 508, R8A779H0_CLK_VIOBUSD2),
DEF_MOD("hscif0", 514, R8A779H0_CLK_SASYNCPERD1),
DEF_MOD("hscif1", 515, R8A779H0_CLK_SASYNCPERD1),
DEF_MOD("hscif2", 516, R8A779H0_CLK_SASYNCPERD1),
@@ -227,6 +229,7 @@ static const struct mssr_mod_clk r8a779h0_mod_clks[] __initconst = {
DEF_MOD("vin15", 811, R8A779H0_CLK_S0D4_VIO),
DEF_MOD("vin16", 812, R8A779H0_CLK_S0D4_VIO),
DEF_MOD("vin17", 813, R8A779H0_CLK_S0D4_VIO),
+ DEF_MOD("vspd0", 830, R8A779H0_CLK_VIOBUSD2),
DEF_MOD("wdt1:wdt0", 907, R8A779H0_CLK_R),
DEF_MOD("cmt0", 910, R8A779H0_CLK_R),
DEF_MOD("cmt1", 911, R8A779H0_CLK_R),
@@ -236,6 +239,8 @@ static const struct mssr_mod_clk r8a779h0_mod_clks[] __initconst = {
DEF_MOD("pfc1", 916, R8A779H0_CLK_CP),
DEF_MOD("pfc2", 917, R8A779H0_CLK_CP),
DEF_MOD("tsc2:tsc1", 919, R8A779H0_CLK_CL16M),
+ DEF_MOD("vspx0", 1028, R8A779H0_CLK_S0D1_VIO),
+ DEF_MOD("fcpvx0", 1100, R8A779H0_CLK_S0D1_VIO),
DEF_MOD("ssiu", 2926, R8A779H0_CLK_S0D6_PER),
DEF_MOD("ssi", 2927, R8A779H0_CLK_S0D6_PER),
};
diff --git a/drivers/clk/renesas/r9a06g032-clocks.c b/drivers/clk/renesas/r9a06g032-clocks.c
index c1348e2d450c..dcda19318b2a 100644
--- a/drivers/clk/renesas/r9a06g032-clocks.c
+++ b/drivers/clk/renesas/r9a06g032-clocks.c
@@ -20,15 +20,24 @@
#include <linux/platform_device.h>
#include <linux/pm_clock.h>
#include <linux/pm_domain.h>
+#include <linux/reboot.h>
#include <linux/slab.h>
#include <linux/soc/renesas/r9a06g032-sysctrl.h>
#include <linux/spinlock.h>
#include <dt-bindings/clock/r9a06g032-sysctrl.h>
#define R9A06G032_SYSCTRL_USB 0x00
-#define R9A06G032_SYSCTRL_USB_H2MODE (1<<1)
+#define R9A06G032_SYSCTRL_USB_H2MODE BIT(1)
#define R9A06G032_SYSCTRL_DMAMUX 0xA0
+#define R9A06G032_SYSCTRL_RSTEN 0x120
+#define R9A06G032_SYSCTRL_RSTEN_MRESET_EN BIT(0)
+#define R9A06G032_SYSCTRL_RSTCTRL 0x198
+/* These work for both reset registers */
+#define R9A06G032_SYSCTRL_SWRST BIT(6)
+#define R9A06G032_SYSCTRL_WDA7RST_1 BIT(2)
+#define R9A06G032_SYSCTRL_WDA7RST_0 BIT(1)
+
/**
* struct regbit - describe one bit in a register
* @reg: offset of register relative to base address,
@@ -1270,6 +1279,12 @@ static void r9a06g032_clocks_del_clk_provider(void *data)
of_clk_del_provider(data);
}
+static int r9a06g032_restart_handler(struct sys_off_data *data)
+{
+ writel(R9A06G032_SYSCTRL_SWRST, sysctrl_priv->reg + R9A06G032_SYSCTRL_RSTCTRL);
+ return NOTIFY_DONE;
+}
+
static void __init r9a06g032_init_h2mode(struct r9a06g032_priv *clocks)
{
struct device_node *usbf_np;
@@ -1324,6 +1339,18 @@ static int __init r9a06g032_clocks_probe(struct platform_device *pdev)
r9a06g032_init_h2mode(clocks);
+ /* Clear potentially pending resets */
+ writel(R9A06G032_SYSCTRL_WDA7RST_0 | R9A06G032_SYSCTRL_WDA7RST_1,
+ clocks->reg + R9A06G032_SYSCTRL_RSTCTRL);
+ /* Allow software reset */
+ writel(R9A06G032_SYSCTRL_SWRST | R9A06G032_SYSCTRL_RSTEN_MRESET_EN,
+ clocks->reg + R9A06G032_SYSCTRL_RSTEN);
+
+ error = devm_register_sys_off_handler(dev, SYS_OFF_MODE_RESTART, SYS_OFF_PRIO_HIGH,
+ r9a06g032_restart_handler, NULL);
+ if (error)
+ dev_warn(dev, "couldn't register restart handler (%d)\n", error);
+
for (i = 0; i < ARRAY_SIZE(r9a06g032_clocks); ++i) {
const struct r9a06g032_clkdesc *d = &r9a06g032_clocks[i];
const char *parent_name = d->source ?
diff --git a/drivers/clk/renesas/r9a07g043-cpg.c b/drivers/clk/renesas/r9a07g043-cpg.c
index c3c2b0c43983..33e9a1223c72 100644
--- a/drivers/clk/renesas/r9a07g043-cpg.c
+++ b/drivers/clk/renesas/r9a07g043-cpg.c
@@ -89,7 +89,9 @@ static const struct clk_div_table dtable_1_32[] = {
/* Mux clock tables */
static const char * const sel_pll3_3[] = { ".pll3_533", ".pll3_400" };
+#ifdef CONFIG_ARM64
static const char * const sel_pll6_2[] = { ".pll6_250", ".pll5_250" };
+#endif
static const char * const sel_sdhi[] = { ".clk_533", ".clk_400", ".clk_266" };
static const u32 mtable_sdhi[] = { 1, 2, 3 };
@@ -137,7 +139,12 @@ static const struct cpg_core_clk r9a07g043_core_clks[] __initconst = {
DEF_DIV("P2", R9A07G043_CLK_P2, CLK_PLL3_DIV2_4_2, DIVPL3A, dtable_1_32),
DEF_FIXED("M0", R9A07G043_CLK_M0, CLK_PLL3_DIV2_4, 1, 1),
DEF_FIXED("ZT", R9A07G043_CLK_ZT, CLK_PLL3_DIV2_4_2, 1, 1),
+#ifdef CONFIG_ARM64
DEF_MUX("HP", R9A07G043_CLK_HP, SEL_PLL6_2, sel_pll6_2),
+#endif
+#ifdef CONFIG_RISCV
+ DEF_FIXED("HP", R9A07G043_CLK_HP, CLK_PLL6_250, 1, 1),
+#endif
DEF_FIXED("SPI0", R9A07G043_CLK_SPI0, CLK_DIV_PLL3_C, 1, 2),
DEF_FIXED("SPI1", R9A07G043_CLK_SPI1, CLK_DIV_PLL3_C, 1, 4),
DEF_SD_MUX("SD0", R9A07G043_CLK_SD0, SEL_SDHI0, SEL_SDHI0_STS, sel_sdhi,
@@ -157,143 +164,143 @@ static const struct cpg_core_clk r9a07g043_core_clks[] __initconst = {
static const struct rzg2l_mod_clk r9a07g043_mod_clks[] = {
#ifdef CONFIG_ARM64
DEF_MOD("gic", R9A07G043_GIC600_GICCLK, R9A07G043_CLK_P1,
- 0x514, 0),
+ 0x514, 0, MSTOP(BUS_REG1, BIT(7))),
DEF_MOD("ia55_pclk", R9A07G043_IA55_PCLK, R9A07G043_CLK_P2,
- 0x518, 0),
+ 0x518, 0, MSTOP(BUS_PERI_CPU, BIT(13))),
DEF_MOD("ia55_clk", R9A07G043_IA55_CLK, R9A07G043_CLK_P1,
- 0x518, 1),
+ 0x518, 1, MSTOP(BUS_PERI_CPU, BIT(13))),
#endif
#ifdef CONFIG_RISCV
DEF_MOD("iax45_pclk", R9A07G043_IAX45_PCLK, R9A07G043_CLK_P2,
- 0x518, 0),
+ 0x518, 0, MSTOP(BUS_PERI_CPU, BIT(13))),
DEF_MOD("iax45_clk", R9A07G043_IAX45_CLK, R9A07G043_CLK_P1,
- 0x518, 1),
+ 0x518, 1, MSTOP(BUS_PERI_CPU, BIT(13))),
#endif
DEF_MOD("dmac_aclk", R9A07G043_DMAC_ACLK, R9A07G043_CLK_P1,
- 0x52c, 0),
+ 0x52c, 0, MSTOP(BUS_REG1, BIT(2))),
DEF_MOD("dmac_pclk", R9A07G043_DMAC_PCLK, CLK_P1_DIV2,
- 0x52c, 1),
+ 0x52c, 1, MSTOP(BUS_REG1, BIT(3))),
DEF_MOD("ostm0_pclk", R9A07G043_OSTM0_PCLK, R9A07G043_CLK_P0,
- 0x534, 0),
+ 0x534, 0, MSTOP(BUS_REG0, BIT(4))),
DEF_MOD("ostm1_pclk", R9A07G043_OSTM1_PCLK, R9A07G043_CLK_P0,
- 0x534, 1),
+ 0x534, 1, MSTOP(BUS_REG0, BIT(5))),
DEF_MOD("ostm2_pclk", R9A07G043_OSTM2_PCLK, R9A07G043_CLK_P0,
- 0x534, 2),
+ 0x534, 2, MSTOP(BUS_REG0, BIT(6))),
DEF_MOD("mtu_x_mck", R9A07G043_MTU_X_MCK_MTU3, R9A07G043_CLK_P0,
- 0x538, 0),
+ 0x538, 0, MSTOP(BUS_MCPU1, BIT(2))),
DEF_MOD("wdt0_pclk", R9A07G043_WDT0_PCLK, R9A07G043_CLK_P0,
- 0x548, 0),
+ 0x548, 0, MSTOP(BUS_REG0, BIT(2))),
DEF_MOD("wdt0_clk", R9A07G043_WDT0_CLK, R9A07G043_OSCCLK,
- 0x548, 1),
+ 0x548, 1, MSTOP(BUS_REG0, BIT(2))),
DEF_MOD("spi_clk2", R9A07G043_SPI_CLK2, R9A07G043_CLK_SPI1,
- 0x550, 0),
+ 0x550, 0, MSTOP(BUS_MCPU1, BIT(1))),
DEF_MOD("spi_clk", R9A07G043_SPI_CLK, R9A07G043_CLK_SPI0,
- 0x550, 1),
+ 0x550, 1, MSTOP(BUS_MCPU1, BIT(1))),
DEF_MOD("sdhi0_imclk", R9A07G043_SDHI0_IMCLK, CLK_SD0_DIV4,
- 0x554, 0),
+ 0x554, 0, MSTOP(BUS_PERI_COM, BIT(0))),
DEF_MOD("sdhi0_imclk2", R9A07G043_SDHI0_IMCLK2, CLK_SD0_DIV4,
- 0x554, 1),
+ 0x554, 1, MSTOP(BUS_PERI_COM, BIT(0))),
DEF_MOD("sdhi0_clk_hs", R9A07G043_SDHI0_CLK_HS, R9A07G043_CLK_SD0,
- 0x554, 2),
+ 0x554, 2, MSTOP(BUS_PERI_COM, BIT(0))),
DEF_MOD("sdhi0_aclk", R9A07G043_SDHI0_ACLK, R9A07G043_CLK_P1,
- 0x554, 3),
+ 0x554, 3, MSTOP(BUS_PERI_COM, BIT(0))),
DEF_MOD("sdhi1_imclk", R9A07G043_SDHI1_IMCLK, CLK_SD1_DIV4,
- 0x554, 4),
+ 0x554, 4, MSTOP(BUS_PERI_COM, BIT(1))),
DEF_MOD("sdhi1_imclk2", R9A07G043_SDHI1_IMCLK2, CLK_SD1_DIV4,
- 0x554, 5),
+ 0x554, 5, MSTOP(BUS_PERI_COM, BIT(1))),
DEF_MOD("sdhi1_clk_hs", R9A07G043_SDHI1_CLK_HS, R9A07G043_CLK_SD1,
- 0x554, 6),
+ 0x554, 6, MSTOP(BUS_PERI_COM, BIT(1))),
DEF_MOD("sdhi1_aclk", R9A07G043_SDHI1_ACLK, R9A07G043_CLK_P1,
- 0x554, 7),
+ 0x554, 7, MSTOP(BUS_PERI_COM, BIT(1))),
#ifdef CONFIG_ARM64
- DEF_MOD("cru_sysclk", R9A07G043_CRU_SYSCLK, CLK_M2_DIV2,
- 0x564, 0),
- DEF_MOD("cru_vclk", R9A07G043_CRU_VCLK, R9A07G043_CLK_M2,
- 0x564, 1),
- DEF_MOD("cru_pclk", R9A07G043_CRU_PCLK, R9A07G043_CLK_ZT,
- 0x564, 2),
- DEF_MOD("cru_aclk", R9A07G043_CRU_ACLK, R9A07G043_CLK_M0,
- 0x564, 3),
+ DEF_MOD("cru_sysclk", R9A07G043_CRU_SYSCLK, CLK_M2_DIV2,
+ 0x564, 0, MSTOP(BUS_PERI_VIDEO, BIT(3))),
+ DEF_MOD("cru_vclk", R9A07G043_CRU_VCLK, R9A07G043_CLK_M2,
+ 0x564, 1, MSTOP(BUS_PERI_VIDEO, BIT(3))),
+ DEF_MOD("cru_pclk", R9A07G043_CRU_PCLK, R9A07G043_CLK_ZT,
+ 0x564, 2, MSTOP(BUS_PERI_VIDEO, BIT(3))),
+ DEF_MOD("cru_aclk", R9A07G043_CRU_ACLK, R9A07G043_CLK_M0,
+ 0x564, 3, MSTOP(BUS_PERI_VIDEO, BIT(3))),
DEF_COUPLED("lcdc_clk_a", R9A07G043_LCDC_CLK_A, R9A07G043_CLK_M0,
- 0x56c, 0),
+ 0x56c, 0, MSTOP(BUS_PERI_VIDEO, GENMASK(8, 7))),
DEF_COUPLED("lcdc_clk_p", R9A07G043_LCDC_CLK_P, R9A07G043_CLK_ZT,
- 0x56c, 0),
+ 0x56c, 0, MSTOP(BUS_PERI_VIDEO, GENMASK(8, 7))),
DEF_MOD("lcdc_clk_d", R9A07G043_LCDC_CLK_D, R9A07G043_CLK_M3,
- 0x56c, 1),
+ 0x56c, 1, MSTOP(BUS_PERI_VIDEO, BIT(9))),
#endif
DEF_MOD("ssi0_pclk", R9A07G043_SSI0_PCLK2, R9A07G043_CLK_P0,
- 0x570, 0),
+ 0x570, 0, MSTOP(BUS_MCPU1, BIT(10))),
DEF_MOD("ssi0_sfr", R9A07G043_SSI0_PCLK_SFR, R9A07G043_CLK_P0,
- 0x570, 1),
+ 0x570, 1, MSTOP(BUS_MCPU1, BIT(10))),
DEF_MOD("ssi1_pclk", R9A07G043_SSI1_PCLK2, R9A07G043_CLK_P0,
- 0x570, 2),
+ 0x570, 2, MSTOP(BUS_MCPU1, BIT(11))),
DEF_MOD("ssi1_sfr", R9A07G043_SSI1_PCLK_SFR, R9A07G043_CLK_P0,
- 0x570, 3),
+ 0x570, 3, MSTOP(BUS_MCPU1, BIT(11))),
DEF_MOD("ssi2_pclk", R9A07G043_SSI2_PCLK2, R9A07G043_CLK_P0,
- 0x570, 4),
+ 0x570, 4, MSTOP(BUS_MCPU1, BIT(12))),
DEF_MOD("ssi2_sfr", R9A07G043_SSI2_PCLK_SFR, R9A07G043_CLK_P0,
- 0x570, 5),
+ 0x570, 5, MSTOP(BUS_MCPU1, BIT(12))),
DEF_MOD("ssi3_pclk", R9A07G043_SSI3_PCLK2, R9A07G043_CLK_P0,
- 0x570, 6),
+ 0x570, 6, MSTOP(BUS_MCPU1, BIT(13))),
DEF_MOD("ssi3_sfr", R9A07G043_SSI3_PCLK_SFR, R9A07G043_CLK_P0,
- 0x570, 7),
+ 0x570, 7, MSTOP(BUS_MCPU1, BIT(13))),
DEF_MOD("usb0_host", R9A07G043_USB_U2H0_HCLK, R9A07G043_CLK_P1,
- 0x578, 0),
+ 0x578, 0, MSTOP(BUS_PERI_COM, BIT(5))),
DEF_MOD("usb1_host", R9A07G043_USB_U2H1_HCLK, R9A07G043_CLK_P1,
- 0x578, 1),
+ 0x578, 1, MSTOP(BUS_PERI_COM, BIT(7))),
DEF_MOD("usb0_func", R9A07G043_USB_U2P_EXR_CPUCLK, R9A07G043_CLK_P1,
- 0x578, 2),
+ 0x578, 2, MSTOP(BUS_PERI_COM, BIT(6))),
DEF_MOD("usb_pclk", R9A07G043_USB_PCLK, R9A07G043_CLK_P1,
- 0x578, 3),
+ 0x578, 3, MSTOP(BUS_PERI_COM, BIT(4))),
DEF_COUPLED("eth0_axi", R9A07G043_ETH0_CLK_AXI, R9A07G043_CLK_M0,
- 0x57c, 0),
+ 0x57c, 0, MSTOP(BUS_PERI_COM, BIT(2))),
DEF_COUPLED("eth0_chi", R9A07G043_ETH0_CLK_CHI, R9A07G043_CLK_ZT,
- 0x57c, 0),
+ 0x57c, 0, MSTOP(BUS_PERI_COM, BIT(2))),
DEF_COUPLED("eth1_axi", R9A07G043_ETH1_CLK_AXI, R9A07G043_CLK_M0,
- 0x57c, 1),
+ 0x57c, 1, MSTOP(BUS_PERI_COM, BIT(3))),
DEF_COUPLED("eth1_chi", R9A07G043_ETH1_CLK_CHI, R9A07G043_CLK_ZT,
- 0x57c, 1),
+ 0x57c, 1, MSTOP(BUS_PERI_COM, BIT(3))),
DEF_MOD("i2c0", R9A07G043_I2C0_PCLK, R9A07G043_CLK_P0,
- 0x580, 0),
+ 0x580, 0, MSTOP(BUS_MCPU2, BIT(10))),
DEF_MOD("i2c1", R9A07G043_I2C1_PCLK, R9A07G043_CLK_P0,
- 0x580, 1),
+ 0x580, 1, MSTOP(BUS_MCPU2, BIT(11))),
DEF_MOD("i2c2", R9A07G043_I2C2_PCLK, R9A07G043_CLK_P0,
- 0x580, 2),
+ 0x580, 2, MSTOP(BUS_MCPU2, BIT(12))),
DEF_MOD("i2c3", R9A07G043_I2C3_PCLK, R9A07G043_CLK_P0,
- 0x580, 3),
+ 0x580, 3, MSTOP(BUS_MCPU2, BIT(13))),
DEF_MOD("scif0", R9A07G043_SCIF0_CLK_PCK, R9A07G043_CLK_P0,
- 0x584, 0),
+ 0x584, 0, MSTOP(BUS_MCPU2, BIT(1))),
DEF_MOD("scif1", R9A07G043_SCIF1_CLK_PCK, R9A07G043_CLK_P0,
- 0x584, 1),
+ 0x584, 1, MSTOP(BUS_MCPU2, BIT(2))),
DEF_MOD("scif2", R9A07G043_SCIF2_CLK_PCK, R9A07G043_CLK_P0,
- 0x584, 2),
+ 0x584, 2, MSTOP(BUS_MCPU2, BIT(3))),
DEF_MOD("scif3", R9A07G043_SCIF3_CLK_PCK, R9A07G043_CLK_P0,
- 0x584, 3),
+ 0x584, 3, MSTOP(BUS_MCPU2, BIT(4))),
DEF_MOD("scif4", R9A07G043_SCIF4_CLK_PCK, R9A07G043_CLK_P0,
- 0x584, 4),
+ 0x584, 4, MSTOP(BUS_MCPU2, BIT(5))),
DEF_MOD("sci0", R9A07G043_SCI0_CLKP, R9A07G043_CLK_P0,
- 0x588, 0),
+ 0x588, 0, MSTOP(BUS_MCPU2, BIT(7))),
DEF_MOD("sci1", R9A07G043_SCI1_CLKP, R9A07G043_CLK_P0,
- 0x588, 1),
+ 0x588, 1, MSTOP(BUS_MCPU2, BIT(8))),
DEF_MOD("rspi0", R9A07G043_RSPI0_CLKB, R9A07G043_CLK_P0,
- 0x590, 0),
+ 0x590, 0, MSTOP(BUS_MCPU1, BIT(14))),
DEF_MOD("rspi1", R9A07G043_RSPI1_CLKB, R9A07G043_CLK_P0,
- 0x590, 1),
+ 0x590, 1, MSTOP(BUS_MCPU1, BIT(15))),
DEF_MOD("rspi2", R9A07G043_RSPI2_CLKB, R9A07G043_CLK_P0,
- 0x590, 2),
+ 0x590, 2, MSTOP(BUS_MCPU2, BIT(0))),
DEF_MOD("canfd", R9A07G043_CANFD_PCLK, R9A07G043_CLK_P0,
- 0x594, 0),
+ 0x594, 0, MSTOP(BUS_MCPU2, BIT(9))),
DEF_MOD("gpio", R9A07G043_GPIO_HCLK, R9A07G043_OSCCLK,
- 0x598, 0),
+ 0x598, 0, MSTOP(BUS_PERI_CPU, BIT(6))),
DEF_MOD("adc_adclk", R9A07G043_ADC_ADCLK, R9A07G043_CLK_TSU,
- 0x5a8, 0),
+ 0x5a8, 0, MSTOP(BUS_MCPU2, BIT(14))),
DEF_MOD("adc_pclk", R9A07G043_ADC_PCLK, R9A07G043_CLK_P0,
- 0x5a8, 1),
+ 0x5a8, 1, MSTOP(BUS_MCPU2, BIT(14))),
DEF_MOD("tsu_pclk", R9A07G043_TSU_PCLK, R9A07G043_CLK_TSU,
- 0x5ac, 0),
+ 0x5ac, 0, MSTOP(BUS_MCPU2, BIT(15))),
#ifdef CONFIG_RISCV
DEF_MOD("nceplic_aclk", R9A07G043_NCEPLIC_ACLK, R9A07G043_CLK_P1,
- 0x608, 0),
+ 0x608, 0, MSTOP(BUS_REG1, BIT(7))),
#endif
};
diff --git a/drivers/clk/renesas/r9a07g044-cpg.c b/drivers/clk/renesas/r9a07g044-cpg.c
index f6df3f7a31b5..0dd264877b9a 100644
--- a/drivers/clk/renesas/r9a07g044-cpg.c
+++ b/drivers/clk/renesas/r9a07g044-cpg.c
@@ -94,6 +94,41 @@ static const struct clk_div_table dtable_1_32[] = {
{0, 0},
};
+#ifdef CONFIG_CLK_R9A07G054
+static const struct clk_div_table dtable_4_32[] = {
+ {3, 4},
+ {4, 5},
+ {5, 6},
+ {6, 7},
+ {7, 8},
+ {8, 9},
+ {9, 10},
+ {10, 11},
+ {11, 12},
+ {12, 13},
+ {13, 14},
+ {14, 15},
+ {15, 16},
+ {16, 17},
+ {17, 18},
+ {18, 19},
+ {19, 20},
+ {20, 21},
+ {21, 22},
+ {22, 23},
+ {23, 24},
+ {24, 25},
+ {25, 26},
+ {26, 27},
+ {27, 28},
+ {28, 29},
+ {29, 30},
+ {30, 31},
+ {31, 32},
+ {0, 0},
+};
+#endif
+
static const struct clk_div_table dtable_16_128[] = {
{0, 16},
{1, 32},
@@ -114,7 +149,7 @@ static const u32 mtable_sdhi[] = { 1, 2, 3 };
static const struct {
struct cpg_core_clk common[56];
#ifdef CONFIG_CLK_R9A07G054
- struct cpg_core_clk drp[0];
+ struct cpg_core_clk drp[3];
#endif
} core_clks __initconst = {
.common = {
@@ -192,6 +227,9 @@ static const struct {
},
#ifdef CONFIG_CLK_R9A07G054
.drp = {
+ DEF_FIXED("DRP_M", R9A07G054_CLK_DRP_M, CLK_PLL3, 1, 5),
+ DEF_FIXED("DRP_D", R9A07G054_CLK_DRP_D, CLK_PLL3, 1, 2),
+ DEF_DIV("DRP_A", R9A07G054_CLK_DRP_A, CLK_PLL3, DIVPL3E, dtable_4_32),
},
#endif
};
@@ -199,171 +237,181 @@ static const struct {
static const struct {
struct rzg2l_mod_clk common[79];
#ifdef CONFIG_CLK_R9A07G054
- struct rzg2l_mod_clk drp[0];
+ struct rzg2l_mod_clk drp[5];
#endif
} mod_clks = {
.common = {
DEF_MOD("gic", R9A07G044_GIC600_GICCLK, R9A07G044_CLK_P1,
- 0x514, 0),
+ 0x514, 0, MSTOP(BUS_REG1, BIT(7))),
DEF_MOD("ia55_pclk", R9A07G044_IA55_PCLK, R9A07G044_CLK_P2,
- 0x518, 0),
+ 0x518, 0, MSTOP(BUS_PERI_CPU, BIT(13))),
DEF_MOD("ia55_clk", R9A07G044_IA55_CLK, R9A07G044_CLK_P1,
- 0x518, 1),
+ 0x518, 1, MSTOP(BUS_PERI_CPU, BIT(13))),
DEF_MOD("dmac_aclk", R9A07G044_DMAC_ACLK, R9A07G044_CLK_P1,
- 0x52c, 0),
+ 0x52c, 0, MSTOP(BUS_REG1, BIT(2))),
DEF_MOD("dmac_pclk", R9A07G044_DMAC_PCLK, CLK_P1_DIV2,
- 0x52c, 1),
+ 0x52c, 1, MSTOP(BUS_REG1, BIT(3))),
DEF_MOD("ostm0_pclk", R9A07G044_OSTM0_PCLK, R9A07G044_CLK_P0,
- 0x534, 0),
+ 0x534, 0, MSTOP(BUS_REG0, BIT(4))),
DEF_MOD("ostm1_pclk", R9A07G044_OSTM1_PCLK, R9A07G044_CLK_P0,
- 0x534, 1),
+ 0x534, 1, MSTOP(BUS_REG0, BIT(5))),
DEF_MOD("ostm2_pclk", R9A07G044_OSTM2_PCLK, R9A07G044_CLK_P0,
- 0x534, 2),
+ 0x534, 2, MSTOP(BUS_REG0, BIT(6))),
DEF_MOD("mtu_x_mck", R9A07G044_MTU_X_MCK_MTU3, R9A07G044_CLK_P0,
- 0x538, 0),
+ 0x538, 0, MSTOP(BUS_MCPU1, BIT(2))),
DEF_MOD("gpt_pclk", R9A07G044_GPT_PCLK, R9A07G044_CLK_P0,
- 0x540, 0),
+ 0x540, 0, MSTOP(BUS_MCPU1, BIT(4))),
DEF_MOD("poeg_a_clkp", R9A07G044_POEG_A_CLKP, R9A07G044_CLK_P0,
- 0x544, 0),
+ 0x544, 0, MSTOP(BUS_MCPU1, BIT(5))),
DEF_MOD("poeg_b_clkp", R9A07G044_POEG_B_CLKP, R9A07G044_CLK_P0,
- 0x544, 1),
+ 0x544, 1, MSTOP(BUS_MCPU1, BIT(6))),
DEF_MOD("poeg_c_clkp", R9A07G044_POEG_C_CLKP, R9A07G044_CLK_P0,
- 0x544, 2),
+ 0x544, 2, MSTOP(BUS_MCPU1, BIT(7))),
DEF_MOD("poeg_d_clkp", R9A07G044_POEG_D_CLKP, R9A07G044_CLK_P0,
- 0x544, 3),
+ 0x544, 3, MSTOP(BUS_MCPU1, BIT(8))),
DEF_MOD("wdt0_pclk", R9A07G044_WDT0_PCLK, R9A07G044_CLK_P0,
- 0x548, 0),
+ 0x548, 0, MSTOP(BUS_REG0, BIT(2))),
DEF_MOD("wdt0_clk", R9A07G044_WDT0_CLK, R9A07G044_OSCCLK,
- 0x548, 1),
+ 0x548, 1, MSTOP(BUS_REG0, BIT(2))),
DEF_MOD("wdt1_pclk", R9A07G044_WDT1_PCLK, R9A07G044_CLK_P0,
- 0x548, 2),
+ 0x548, 2, MSTOP(BUS_REG0, BIT(3))),
DEF_MOD("wdt1_clk", R9A07G044_WDT1_CLK, R9A07G044_OSCCLK,
- 0x548, 3),
+ 0x548, 3, MSTOP(BUS_REG0, BIT(3))),
DEF_MOD("spi_clk2", R9A07G044_SPI_CLK2, R9A07G044_CLK_SPI1,
- 0x550, 0),
+ 0x550, 0, MSTOP(BUS_MCPU1, BIT(1))),
DEF_MOD("spi_clk", R9A07G044_SPI_CLK, R9A07G044_CLK_SPI0,
- 0x550, 1),
+ 0x550, 1, MSTOP(BUS_MCPU1, BIT(1))),
DEF_MOD("sdhi0_imclk", R9A07G044_SDHI0_IMCLK, CLK_SD0_DIV4,
- 0x554, 0),
+ 0x554, 0, MSTOP(BUS_PERI_COM, BIT(0))),
DEF_MOD("sdhi0_imclk2", R9A07G044_SDHI0_IMCLK2, CLK_SD0_DIV4,
- 0x554, 1),
+ 0x554, 1, MSTOP(BUS_PERI_COM, BIT(0))),
DEF_MOD("sdhi0_clk_hs", R9A07G044_SDHI0_CLK_HS, R9A07G044_CLK_SD0,
- 0x554, 2),
+ 0x554, 2, MSTOP(BUS_PERI_COM, BIT(0))),
DEF_MOD("sdhi0_aclk", R9A07G044_SDHI0_ACLK, R9A07G044_CLK_P1,
- 0x554, 3),
+ 0x554, 3, MSTOP(BUS_PERI_COM, BIT(0))),
DEF_MOD("sdhi1_imclk", R9A07G044_SDHI1_IMCLK, CLK_SD1_DIV4,
- 0x554, 4),
+ 0x554, 4, MSTOP(BUS_PERI_COM, BIT(1))),
DEF_MOD("sdhi1_imclk2", R9A07G044_SDHI1_IMCLK2, CLK_SD1_DIV4,
- 0x554, 5),
+ 0x554, 5, MSTOP(BUS_PERI_COM, BIT(1))),
DEF_MOD("sdhi1_clk_hs", R9A07G044_SDHI1_CLK_HS, R9A07G044_CLK_SD1,
- 0x554, 6),
+ 0x554, 6, MSTOP(BUS_PERI_COM, BIT(1))),
DEF_MOD("sdhi1_aclk", R9A07G044_SDHI1_ACLK, R9A07G044_CLK_P1,
- 0x554, 7),
+ 0x554, 7, MSTOP(BUS_PERI_COM, BIT(1))),
DEF_MOD("gpu_clk", R9A07G044_GPU_CLK, R9A07G044_CLK_G,
- 0x558, 0),
+ 0x558, 0, MSTOP(BUS_REG1, BIT(4))),
DEF_MOD("gpu_axi_clk", R9A07G044_GPU_AXI_CLK, R9A07G044_CLK_P1,
- 0x558, 1),
+ 0x558, 1, 0),
DEF_MOD("gpu_ace_clk", R9A07G044_GPU_ACE_CLK, R9A07G044_CLK_P1,
- 0x558, 2),
- DEF_MOD("cru_sysclk", R9A07G044_CRU_SYSCLK, CLK_M2_DIV2,
- 0x564, 0),
- DEF_MOD("cru_vclk", R9A07G044_CRU_VCLK, R9A07G044_CLK_M2,
- 0x564, 1),
- DEF_MOD("cru_pclk", R9A07G044_CRU_PCLK, R9A07G044_CLK_ZT,
- 0x564, 2),
- DEF_MOD("cru_aclk", R9A07G044_CRU_ACLK, R9A07G044_CLK_M0,
- 0x564, 3),
+ 0x558, 2, 0),
+ DEF_MOD("cru_sysclk", R9A07G044_CRU_SYSCLK, CLK_M2_DIV2,
+ 0x564, 0, MSTOP(BUS_PERI_VIDEO, BIT(3))),
+ DEF_MOD("cru_vclk", R9A07G044_CRU_VCLK, R9A07G044_CLK_M2,
+ 0x564, 1, MSTOP(BUS_PERI_VIDEO, BIT(3))),
+ DEF_MOD("cru_pclk", R9A07G044_CRU_PCLK, R9A07G044_CLK_ZT,
+ 0x564, 2, MSTOP(BUS_PERI_VIDEO, BIT(3))),
+ DEF_MOD("cru_aclk", R9A07G044_CRU_ACLK, R9A07G044_CLK_M0,
+ 0x564, 3, MSTOP(BUS_PERI_VIDEO, BIT(3))),
DEF_MOD("dsi_pll_clk", R9A07G044_MIPI_DSI_PLLCLK, R9A07G044_CLK_M1,
- 0x568, 0),
+ 0x568, 0, MSTOP(BUS_PERI_VIDEO, GENMASK(6, 5))),
DEF_MOD("dsi_sys_clk", R9A07G044_MIPI_DSI_SYSCLK, CLK_M2_DIV2,
- 0x568, 1),
+ 0x568, 1, MSTOP(BUS_PERI_VIDEO, GENMASK(6, 5))),
DEF_MOD("dsi_aclk", R9A07G044_MIPI_DSI_ACLK, R9A07G044_CLK_P1,
- 0x568, 2),
+ 0x568, 2, MSTOP(BUS_PERI_VIDEO, GENMASK(6, 5))),
DEF_MOD("dsi_pclk", R9A07G044_MIPI_DSI_PCLK, R9A07G044_CLK_P2,
- 0x568, 3),
+ 0x568, 3, MSTOP(BUS_PERI_VIDEO, GENMASK(6, 5))),
DEF_MOD("dsi_vclk", R9A07G044_MIPI_DSI_VCLK, R9A07G044_CLK_M3,
- 0x568, 4),
+ 0x568, 4, MSTOP(BUS_PERI_VIDEO, GENMASK(6, 5))),
DEF_MOD("dsi_lpclk", R9A07G044_MIPI_DSI_LPCLK, R9A07G044_CLK_M4,
- 0x568, 5),
+ 0x568, 5, MSTOP(BUS_PERI_VIDEO, GENMASK(6, 5))),
DEF_COUPLED("lcdc_a", R9A07G044_LCDC_CLK_A, R9A07G044_CLK_M0,
- 0x56c, 0),
+ 0x56c, 0, MSTOP(BUS_PERI_VIDEO, GENMASK(8, 7))),
DEF_COUPLED("lcdc_p", R9A07G044_LCDC_CLK_P, R9A07G044_CLK_ZT,
- 0x56c, 0),
+ 0x56c, 0, MSTOP(BUS_PERI_VIDEO, GENMASK(8, 7))),
DEF_MOD("lcdc_clk_d", R9A07G044_LCDC_CLK_D, R9A07G044_CLK_M3,
- 0x56c, 1),
+ 0x56c, 1, MSTOP(BUS_PERI_VIDEO, BIT(9))),
DEF_MOD("ssi0_pclk", R9A07G044_SSI0_PCLK2, R9A07G044_CLK_P0,
- 0x570, 0),
+ 0x570, 0, MSTOP(BUS_MCPU1, BIT(10))),
DEF_MOD("ssi0_sfr", R9A07G044_SSI0_PCLK_SFR, R9A07G044_CLK_P0,
- 0x570, 1),
+ 0x570, 1, MSTOP(BUS_MCPU1, BIT(10))),
DEF_MOD("ssi1_pclk", R9A07G044_SSI1_PCLK2, R9A07G044_CLK_P0,
- 0x570, 2),
+ 0x570, 2, MSTOP(BUS_MCPU1, BIT(11))),
DEF_MOD("ssi1_sfr", R9A07G044_SSI1_PCLK_SFR, R9A07G044_CLK_P0,
- 0x570, 3),
+ 0x570, 3, MSTOP(BUS_MCPU1, BIT(11))),
DEF_MOD("ssi2_pclk", R9A07G044_SSI2_PCLK2, R9A07G044_CLK_P0,
- 0x570, 4),
+ 0x570, 4, MSTOP(BUS_MCPU1, BIT(12))),
DEF_MOD("ssi2_sfr", R9A07G044_SSI2_PCLK_SFR, R9A07G044_CLK_P0,
- 0x570, 5),
+ 0x570, 5, MSTOP(BUS_MCPU1, BIT(12))),
DEF_MOD("ssi3_pclk", R9A07G044_SSI3_PCLK2, R9A07G044_CLK_P0,
- 0x570, 6),
+ 0x570, 6, MSTOP(BUS_MCPU1, BIT(13))),
DEF_MOD("ssi3_sfr", R9A07G044_SSI3_PCLK_SFR, R9A07G044_CLK_P0,
- 0x570, 7),
+ 0x570, 7, MSTOP(BUS_MCPU1, BIT(13))),
DEF_MOD("usb0_host", R9A07G044_USB_U2H0_HCLK, R9A07G044_CLK_P1,
- 0x578, 0),
+ 0x578, 0, MSTOP(BUS_PERI_COM, BIT(5))),
DEF_MOD("usb1_host", R9A07G044_USB_U2H1_HCLK, R9A07G044_CLK_P1,
- 0x578, 1),
+ 0x578, 1, MSTOP(BUS_PERI_COM, BIT(7))),
DEF_MOD("usb0_func", R9A07G044_USB_U2P_EXR_CPUCLK, R9A07G044_CLK_P1,
- 0x578, 2),
+ 0x578, 2, MSTOP(BUS_PERI_COM, BIT(6))),
DEF_MOD("usb_pclk", R9A07G044_USB_PCLK, R9A07G044_CLK_P1,
- 0x578, 3),
+ 0x578, 3, MSTOP(BUS_PERI_COM, BIT(4))),
DEF_COUPLED("eth0_axi", R9A07G044_ETH0_CLK_AXI, R9A07G044_CLK_M0,
- 0x57c, 0),
+ 0x57c, 0, MSTOP(BUS_PERI_COM, BIT(2))),
DEF_COUPLED("eth0_chi", R9A07G044_ETH0_CLK_CHI, R9A07G044_CLK_ZT,
- 0x57c, 0),
+ 0x57c, 0, MSTOP(BUS_PERI_COM, BIT(2))),
DEF_COUPLED("eth1_axi", R9A07G044_ETH1_CLK_AXI, R9A07G044_CLK_M0,
- 0x57c, 1),
+ 0x57c, 1, MSTOP(BUS_PERI_COM, BIT(3))),
DEF_COUPLED("eth1_chi", R9A07G044_ETH1_CLK_CHI, R9A07G044_CLK_ZT,
- 0x57c, 1),
+ 0x57c, 1, MSTOP(BUS_PERI_COM, BIT(3))),
DEF_MOD("i2c0", R9A07G044_I2C0_PCLK, R9A07G044_CLK_P0,
- 0x580, 0),
+ 0x580, 0, MSTOP(BUS_MCPU2, BIT(10))),
DEF_MOD("i2c1", R9A07G044_I2C1_PCLK, R9A07G044_CLK_P0,
- 0x580, 1),
+ 0x580, 1, MSTOP(BUS_MCPU2, BIT(11))),
DEF_MOD("i2c2", R9A07G044_I2C2_PCLK, R9A07G044_CLK_P0,
- 0x580, 2),
+ 0x580, 2, MSTOP(BUS_MCPU2, BIT(12))),
DEF_MOD("i2c3", R9A07G044_I2C3_PCLK, R9A07G044_CLK_P0,
- 0x580, 3),
+ 0x580, 3, MSTOP(BUS_MCPU2, BIT(13))),
DEF_MOD("scif0", R9A07G044_SCIF0_CLK_PCK, R9A07G044_CLK_P0,
- 0x584, 0),
+ 0x584, 0, MSTOP(BUS_MCPU2, BIT(1))),
DEF_MOD("scif1", R9A07G044_SCIF1_CLK_PCK, R9A07G044_CLK_P0,
- 0x584, 1),
+ 0x584, 1, MSTOP(BUS_MCPU2, BIT(2))),
DEF_MOD("scif2", R9A07G044_SCIF2_CLK_PCK, R9A07G044_CLK_P0,
- 0x584, 2),
+ 0x584, 2, MSTOP(BUS_MCPU2, BIT(3))),
DEF_MOD("scif3", R9A07G044_SCIF3_CLK_PCK, R9A07G044_CLK_P0,
- 0x584, 3),
+ 0x584, 3, MSTOP(BUS_MCPU2, BIT(4))),
DEF_MOD("scif4", R9A07G044_SCIF4_CLK_PCK, R9A07G044_CLK_P0,
- 0x584, 4),
+ 0x584, 4, MSTOP(BUS_MCPU2, BIT(5))),
DEF_MOD("sci0", R9A07G044_SCI0_CLKP, R9A07G044_CLK_P0,
- 0x588, 0),
+ 0x588, 0, MSTOP(BUS_MCPU2, BIT(7))),
DEF_MOD("sci1", R9A07G044_SCI1_CLKP, R9A07G044_CLK_P0,
- 0x588, 1),
+ 0x588, 1, MSTOP(BUS_MCPU2, BIT(8))),
DEF_MOD("rspi0", R9A07G044_RSPI0_CLKB, R9A07G044_CLK_P0,
- 0x590, 0),
+ 0x590, 0, MSTOP(BUS_MCPU1, BIT(14))),
DEF_MOD("rspi1", R9A07G044_RSPI1_CLKB, R9A07G044_CLK_P0,
- 0x590, 1),
+ 0x590, 1, MSTOP(BUS_MCPU1, BIT(15))),
DEF_MOD("rspi2", R9A07G044_RSPI2_CLKB, R9A07G044_CLK_P0,
- 0x590, 2),
+ 0x590, 2, MSTOP(BUS_MCPU2, BIT(0))),
DEF_MOD("canfd", R9A07G044_CANFD_PCLK, R9A07G044_CLK_P0,
- 0x594, 0),
+ 0x594, 0, MSTOP(BUS_MCPU2, BIT(9))),
DEF_MOD("gpio", R9A07G044_GPIO_HCLK, R9A07G044_OSCCLK,
- 0x598, 0),
+ 0x598, 0, MSTOP(BUS_PERI_CPU, BIT(6))),
DEF_MOD("adc_adclk", R9A07G044_ADC_ADCLK, R9A07G044_CLK_TSU,
- 0x5a8, 0),
+ 0x5a8, 0, MSTOP(BUS_MCPU2, BIT(14))),
DEF_MOD("adc_pclk", R9A07G044_ADC_PCLK, R9A07G044_CLK_P0,
- 0x5a8, 1),
+ 0x5a8, 1, MSTOP(BUS_MCPU2, BIT(14))),
DEF_MOD("tsu_pclk", R9A07G044_TSU_PCLK, R9A07G044_CLK_TSU,
- 0x5ac, 0),
+ 0x5ac, 0, MSTOP(BUS_MCPU2, BIT(15))),
},
#ifdef CONFIG_CLK_R9A07G054
.drp = {
+ DEF_MOD("stpai_initclk", R9A07G054_STPAI_INITCLK, R9A07G044_OSCCLK,
+ 0x5e8, 0, 0),
+ DEF_MOD("stpai_aclk", R9A07G054_STPAI_ACLK, R9A07G044_CLK_P1,
+ 0x5e8, 1, 0),
+ DEF_MOD("stpai_mclk", R9A07G054_STPAI_MCLK, R9A07G054_CLK_DRP_M,
+ 0x5e8, 2, 0),
+ DEF_MOD("stpai_dclkin", R9A07G054_STPAI_DCLKIN, R9A07G054_CLK_DRP_D,
+ 0x5e8, 3, 0),
+ DEF_MOD("stpai_aclk_drp", R9A07G054_STPAI_ACLK_DRP, R9A07G054_CLK_DRP_A,
+ 0x5e8, 4, 0),
},
#endif
};
@@ -430,6 +478,9 @@ static const struct rzg2l_reset r9a07g044_resets[] = {
DEF_RST(R9A07G044_ADC_PRESETN, 0x8a8, 0),
DEF_RST(R9A07G044_ADC_ADRST_N, 0x8a8, 1),
DEF_RST(R9A07G044_TSU_PRESETN, 0x8ac, 0),
+#ifdef CONFIG_CLK_R9A07G054
+ DEF_RST(R9A07G054_STPAI_ARESETN, 0x8e8, 0),
+#endif
};
static const unsigned int r9a07g044_crit_mod_clks[] __initconst = {
diff --git a/drivers/clk/renesas/r9a08g045-cpg.c b/drivers/clk/renesas/r9a08g045-cpg.c
index 1ce40fb51f13..79e7b19c7882 100644
--- a/drivers/clk/renesas/r9a08g045-cpg.c
+++ b/drivers/clk/renesas/r9a08g045-cpg.c
@@ -9,6 +9,7 @@
#include <linux/device.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/pm_domain.h>
#include <dt-bindings/clock/r9a08g045-cpg.h>
@@ -50,7 +51,7 @@
#define G3S_SEL_SDHI2 SEL_PLL_PACK(G3S_CPG_SDHI_DSEL, 8, 2)
/* PLL 1/4/6 configuration registers macro. */
-#define G3S_PLL146_CONF(clk1, clk2) ((clk1) << 22 | (clk2) << 12)
+#define G3S_PLL146_CONF(clk1, clk2, setting) ((clk1) << 22 | (clk2) << 12 | (setting))
#define DEF_G3S_MUX(_name, _id, _conf, _parent_names, _mux_flags, _clk_flags) \
DEF_TYPE(_name, _id, CLK_TYPE_MUX, .conf = (_conf), \
@@ -133,7 +134,8 @@ static const struct cpg_core_clk r9a08g045_core_clks[] __initconst = {
/* Internal Core Clocks */
DEF_FIXED(".osc_div1000", CLK_OSC_DIV1000, CLK_EXTAL, 1, 1000),
- DEF_G3S_PLL(".pll1", CLK_PLL1, CLK_EXTAL, G3S_PLL146_CONF(0x4, 0x8)),
+ DEF_G3S_PLL(".pll1", CLK_PLL1, CLK_EXTAL, G3S_PLL146_CONF(0x4, 0x8, 0x100),
+ 1100000000UL),
DEF_FIXED(".pll2", CLK_PLL2, CLK_EXTAL, 200, 3),
DEF_FIXED(".pll3", CLK_PLL3, CLK_EXTAL, 200, 3),
DEF_FIXED(".pll4", CLK_PLL4, CLK_EXTAL, 100, 3),
@@ -181,50 +183,126 @@ static const struct cpg_core_clk r9a08g045_core_clks[] __initconst = {
DEF_G3S_DIV("P3", R9A08G045_CLK_P3, CLK_PLL3_DIV2_4, DIVPL3C, G3S_DIVPL3C_STS,
dtable_1_32, 0, 0, 0, NULL),
DEF_FIXED("P3_DIV2", CLK_P3_DIV2, R9A08G045_CLK_P3, 1, 2),
+ DEF_FIXED("P5", R9A08G045_CLK_P5, CLK_PLL2_DIV2, 1, 4),
DEF_FIXED("ZT", R9A08G045_CLK_ZT, CLK_PLL3_DIV2_8, 1, 1),
DEF_FIXED("S0", R9A08G045_CLK_S0, CLK_SEL_PLL4, 1, 2),
DEF_FIXED("OSC", R9A08G045_OSCCLK, CLK_EXTAL, 1, 1),
DEF_FIXED("OSC2", R9A08G045_OSCCLK2, CLK_EXTAL, 1, 3),
DEF_FIXED("HP", R9A08G045_CLK_HP, CLK_PLL6, 1, 2),
+ DEF_FIXED("TSU", R9A08G045_CLK_TSU, CLK_PLL2_DIV2, 1, 8),
};
static const struct rzg2l_mod_clk r9a08g045_mod_clks[] = {
- DEF_MOD("gic_gicclk", R9A08G045_GIC600_GICCLK, R9A08G045_CLK_P1, 0x514, 0),
- DEF_MOD("ia55_pclk", R9A08G045_IA55_PCLK, R9A08G045_CLK_P2, 0x518, 0),
- DEF_MOD("ia55_clk", R9A08G045_IA55_CLK, R9A08G045_CLK_P1, 0x518, 1),
- DEF_MOD("dmac_aclk", R9A08G045_DMAC_ACLK, R9A08G045_CLK_P3, 0x52c, 0),
- DEF_MOD("dmac_pclk", R9A08G045_DMAC_PCLK, CLK_P3_DIV2, 0x52c, 1),
- DEF_MOD("wdt0_pclk", R9A08G045_WDT0_PCLK, R9A08G045_CLK_P0, 0x548, 0),
- DEF_MOD("wdt0_clk", R9A08G045_WDT0_CLK, R9A08G045_OSCCLK, 0x548, 1),
- DEF_MOD("sdhi0_imclk", R9A08G045_SDHI0_IMCLK, CLK_SD0_DIV4, 0x554, 0),
- DEF_MOD("sdhi0_imclk2", R9A08G045_SDHI0_IMCLK2, CLK_SD0_DIV4, 0x554, 1),
- DEF_MOD("sdhi0_clk_hs", R9A08G045_SDHI0_CLK_HS, R9A08G045_CLK_SD0, 0x554, 2),
- DEF_MOD("sdhi0_aclk", R9A08G045_SDHI0_ACLK, R9A08G045_CLK_P1, 0x554, 3),
- DEF_MOD("sdhi1_imclk", R9A08G045_SDHI1_IMCLK, CLK_SD1_DIV4, 0x554, 4),
- DEF_MOD("sdhi1_imclk2", R9A08G045_SDHI1_IMCLK2, CLK_SD1_DIV4, 0x554, 5),
- DEF_MOD("sdhi1_clk_hs", R9A08G045_SDHI1_CLK_HS, R9A08G045_CLK_SD1, 0x554, 6),
- DEF_MOD("sdhi1_aclk", R9A08G045_SDHI1_ACLK, R9A08G045_CLK_P1, 0x554, 7),
- DEF_MOD("sdhi2_imclk", R9A08G045_SDHI2_IMCLK, CLK_SD2_DIV4, 0x554, 8),
- DEF_MOD("sdhi2_imclk2", R9A08G045_SDHI2_IMCLK2, CLK_SD2_DIV4, 0x554, 9),
- DEF_MOD("sdhi2_clk_hs", R9A08G045_SDHI2_CLK_HS, R9A08G045_CLK_SD2, 0x554, 10),
- DEF_MOD("sdhi2_aclk", R9A08G045_SDHI2_ACLK, R9A08G045_CLK_P1, 0x554, 11),
- DEF_MOD("usb0_host", R9A08G045_USB_U2H0_HCLK, R9A08G045_CLK_P1, 0x578, 0),
- DEF_MOD("usb1_host", R9A08G045_USB_U2H1_HCLK, R9A08G045_CLK_P1, 0x578, 1),
- DEF_MOD("usb0_func", R9A08G045_USB_U2P_EXR_CPUCLK, R9A08G045_CLK_P1, 0x578, 2),
- DEF_MOD("usb_pclk", R9A08G045_USB_PCLK, R9A08G045_CLK_P1, 0x578, 3),
- DEF_COUPLED("eth0_axi", R9A08G045_ETH0_CLK_AXI, R9A08G045_CLK_M0, 0x57c, 0),
- DEF_COUPLED("eth0_chi", R9A08G045_ETH0_CLK_CHI, R9A08G045_CLK_ZT, 0x57c, 0),
- DEF_MOD("eth0_refclk", R9A08G045_ETH0_REFCLK, R9A08G045_CLK_HP, 0x57c, 8),
- DEF_COUPLED("eth1_axi", R9A08G045_ETH1_CLK_AXI, R9A08G045_CLK_M0, 0x57c, 1),
- DEF_COUPLED("eth1_chi", R9A08G045_ETH1_CLK_CHI, R9A08G045_CLK_ZT, 0x57c, 1),
- DEF_MOD("eth1_refclk", R9A08G045_ETH1_REFCLK, R9A08G045_CLK_HP, 0x57c, 9),
- DEF_MOD("i2c0_pclk", R9A08G045_I2C0_PCLK, R9A08G045_CLK_P0, 0x580, 0),
- DEF_MOD("i2c1_pclk", R9A08G045_I2C1_PCLK, R9A08G045_CLK_P0, 0x580, 1),
- DEF_MOD("i2c2_pclk", R9A08G045_I2C2_PCLK, R9A08G045_CLK_P0, 0x580, 2),
- DEF_MOD("i2c3_pclk", R9A08G045_I2C3_PCLK, R9A08G045_CLK_P0, 0x580, 3),
- DEF_MOD("scif0_clk_pck", R9A08G045_SCIF0_CLK_PCK, R9A08G045_CLK_P0, 0x584, 0),
- DEF_MOD("gpio_hclk", R9A08G045_GPIO_HCLK, R9A08G045_OSCCLK, 0x598, 0),
- DEF_MOD("vbat_bclk", R9A08G045_VBAT_BCLK, R9A08G045_OSCCLK, 0x614, 0),
+ DEF_MOD("gic_gicclk", R9A08G045_GIC600_GICCLK, R9A08G045_CLK_P1, 0x514, 0,
+ MSTOP(BUS_ACPU, BIT(3))),
+ DEF_MOD("ia55_pclk", R9A08G045_IA55_PCLK, R9A08G045_CLK_P2, 0x518, 0,
+ MSTOP(BUS_PERI_CPU, BIT(13))),
+ DEF_MOD("ia55_clk", R9A08G045_IA55_CLK, R9A08G045_CLK_P1, 0x518, 1,
+ MSTOP(BUS_PERI_CPU, BIT(13))),
+ DEF_MOD("dmac_aclk", R9A08G045_DMAC_ACLK, R9A08G045_CLK_P3, 0x52c, 0,
+ MSTOP(BUS_REG1, BIT(2))),
+ DEF_MOD("dmac_pclk", R9A08G045_DMAC_PCLK, CLK_P3_DIV2, 0x52c, 1,
+ MSTOP(BUS_REG1, BIT(3))),
+ DEF_MOD("wdt0_pclk", R9A08G045_WDT0_PCLK, R9A08G045_CLK_P0, 0x548, 0,
+ MSTOP(BUS_REG0, BIT(0))),
+ DEF_MOD("wdt0_clk", R9A08G045_WDT0_CLK, R9A08G045_OSCCLK, 0x548, 1,
+ MSTOP(BUS_REG0, BIT(0))),
+ DEF_MOD("sdhi0_imclk", R9A08G045_SDHI0_IMCLK, CLK_SD0_DIV4, 0x554, 0,
+ MSTOP(BUS_PERI_COM, BIT(0))),
+ DEF_MOD("sdhi0_imclk2", R9A08G045_SDHI0_IMCLK2, CLK_SD0_DIV4, 0x554, 1,
+ MSTOP(BUS_PERI_COM, BIT(0))),
+ DEF_MOD("sdhi0_clk_hs", R9A08G045_SDHI0_CLK_HS, R9A08G045_CLK_SD0, 0x554, 2,
+ MSTOP(BUS_PERI_COM, BIT(0))),
+ DEF_MOD("sdhi0_aclk", R9A08G045_SDHI0_ACLK, R9A08G045_CLK_P1, 0x554, 3,
+ MSTOP(BUS_PERI_COM, BIT(0))),
+ DEF_MOD("sdhi1_imclk", R9A08G045_SDHI1_IMCLK, CLK_SD1_DIV4, 0x554, 4,
+ MSTOP(BUS_PERI_COM, BIT(1))),
+ DEF_MOD("sdhi1_imclk2", R9A08G045_SDHI1_IMCLK2, CLK_SD1_DIV4, 0x554, 5,
+ MSTOP(BUS_PERI_COM, BIT(1))),
+ DEF_MOD("sdhi1_clk_hs", R9A08G045_SDHI1_CLK_HS, R9A08G045_CLK_SD1, 0x554, 6,
+ MSTOP(BUS_PERI_COM, BIT(1))),
+ DEF_MOD("sdhi1_aclk", R9A08G045_SDHI1_ACLK, R9A08G045_CLK_P1, 0x554, 7,
+ MSTOP(BUS_PERI_COM, BIT(1))),
+ DEF_MOD("sdhi2_imclk", R9A08G045_SDHI2_IMCLK, CLK_SD2_DIV4, 0x554, 8,
+ MSTOP(BUS_PERI_COM, BIT(11))),
+ DEF_MOD("sdhi2_imclk2", R9A08G045_SDHI2_IMCLK2, CLK_SD2_DIV4, 0x554, 9,
+ MSTOP(BUS_PERI_COM, BIT(11))),
+ DEF_MOD("sdhi2_clk_hs", R9A08G045_SDHI2_CLK_HS, R9A08G045_CLK_SD2, 0x554, 10,
+ MSTOP(BUS_PERI_COM, BIT(11))),
+ DEF_MOD("sdhi2_aclk", R9A08G045_SDHI2_ACLK, R9A08G045_CLK_P1, 0x554, 11,
+ MSTOP(BUS_PERI_COM, BIT(11))),
+ DEF_MOD("ssi0_pclk2", R9A08G045_SSI0_PCLK2, R9A08G045_CLK_P0, 0x570, 0,
+ MSTOP(BUS_MCPU1, BIT(10))),
+ DEF_MOD("ssi0_sfr", R9A08G045_SSI0_PCLK_SFR, R9A08G045_CLK_P0, 0x570, 1,
+ MSTOP(BUS_MCPU1, BIT(10))),
+ DEF_MOD("ssi1_pclk2", R9A08G045_SSI1_PCLK2, R9A08G045_CLK_P0, 0x570, 2,
+ MSTOP(BUS_MCPU1, BIT(11))),
+ DEF_MOD("ssi1_sfr", R9A08G045_SSI1_PCLK_SFR, R9A08G045_CLK_P0, 0x570, 3,
+ MSTOP(BUS_MCPU1, BIT(11))),
+ DEF_MOD("ssi2_pclk2", R9A08G045_SSI2_PCLK2, R9A08G045_CLK_P0, 0x570, 4,
+ MSTOP(BUS_MCPU1, BIT(12))),
+ DEF_MOD("ssi2_sfr", R9A08G045_SSI2_PCLK_SFR, R9A08G045_CLK_P0, 0x570, 5,
+ MSTOP(BUS_MCPU1, BIT(12))),
+ DEF_MOD("ssi3_pclk2", R9A08G045_SSI3_PCLK2, R9A08G045_CLK_P0, 0x570, 6,
+ MSTOP(BUS_MCPU1, BIT(13))),
+ DEF_MOD("ssi3_sfr", R9A08G045_SSI3_PCLK_SFR, R9A08G045_CLK_P0, 0x570, 7,
+ MSTOP(BUS_MCPU1, BIT(13))),
+ DEF_MOD("usb0_host", R9A08G045_USB_U2H0_HCLK, R9A08G045_CLK_P1, 0x578, 0,
+ MSTOP(BUS_PERI_COM, BIT(5))),
+ DEF_MOD("usb1_host", R9A08G045_USB_U2H1_HCLK, R9A08G045_CLK_P1, 0x578, 1,
+ MSTOP(BUS_PERI_COM, BIT(7))),
+ DEF_MOD("usb0_func", R9A08G045_USB_U2P_EXR_CPUCLK, R9A08G045_CLK_P1, 0x578, 2,
+ MSTOP(BUS_PERI_COM, BIT(6))),
+ DEF_MOD("usb_pclk", R9A08G045_USB_PCLK, R9A08G045_CLK_P1, 0x578, 3,
+ MSTOP(BUS_PERI_COM, BIT(4))),
+ DEF_COUPLED("eth0_axi", R9A08G045_ETH0_CLK_AXI, R9A08G045_CLK_M0, 0x57c, 0,
+ MSTOP(BUS_PERI_COM, BIT(2))),
+ DEF_COUPLED("eth0_chi", R9A08G045_ETH0_CLK_CHI, R9A08G045_CLK_ZT, 0x57c, 0,
+ MSTOP(BUS_PERI_COM, BIT(2))),
+ DEF_MOD("eth0_refclk", R9A08G045_ETH0_REFCLK, R9A08G045_CLK_HP, 0x57c, 8, 0),
+ DEF_COUPLED("eth1_axi", R9A08G045_ETH1_CLK_AXI, R9A08G045_CLK_M0, 0x57c, 1,
+ MSTOP(BUS_PERI_COM, BIT(3))),
+ DEF_COUPLED("eth1_chi", R9A08G045_ETH1_CLK_CHI, R9A08G045_CLK_ZT, 0x57c, 1,
+ MSTOP(BUS_PERI_COM, BIT(3))),
+ DEF_MOD("eth1_refclk", R9A08G045_ETH1_REFCLK, R9A08G045_CLK_HP, 0x57c, 9, 0),
+ DEF_MOD("i2c0_pclk", R9A08G045_I2C0_PCLK, R9A08G045_CLK_P0, 0x580, 0,
+ MSTOP(BUS_MCPU2, BIT(10))),
+ DEF_MOD("i2c1_pclk", R9A08G045_I2C1_PCLK, R9A08G045_CLK_P0, 0x580, 1,
+ MSTOP(BUS_MCPU2, BIT(11))),
+ DEF_MOD("i2c2_pclk", R9A08G045_I2C2_PCLK, R9A08G045_CLK_P0, 0x580, 2,
+ MSTOP(BUS_MCPU2, BIT(12))),
+ DEF_MOD("i2c3_pclk", R9A08G045_I2C3_PCLK, R9A08G045_CLK_P0, 0x580, 3,
+ MSTOP(BUS_MCPU2, BIT(13))),
+ DEF_MOD("scif0_clk_pck", R9A08G045_SCIF0_CLK_PCK, R9A08G045_CLK_P0, 0x584, 0,
+ MSTOP(BUS_MCPU2, BIT(1))),
+ DEF_MOD("scif1_clk_pck", R9A08G045_SCIF1_CLK_PCK, R9A08G045_CLK_P0, 0x584, 1,
+ MSTOP(BUS_MCPU2, BIT(2))),
+ DEF_MOD("scif2_clk_pck", R9A08G045_SCIF2_CLK_PCK, R9A08G045_CLK_P0, 0x584, 2,
+ MSTOP(BUS_MCPU2, BIT(3))),
+ DEF_MOD("scif3_clk_pck", R9A08G045_SCIF3_CLK_PCK, R9A08G045_CLK_P0, 0x584, 3,
+ MSTOP(BUS_MCPU2, BIT(4))),
+ DEF_MOD("scif4_clk_pck", R9A08G045_SCIF4_CLK_PCK, R9A08G045_CLK_P0, 0x584, 4,
+ MSTOP(BUS_MCPU2, BIT(5))),
+ DEF_MOD("scif5_clk_pck", R9A08G045_SCIF5_CLK_PCK, R9A08G045_CLK_P0, 0x584, 5,
+ MSTOP(BUS_MCPU3, BIT(4))),
+ DEF_MOD("gpio_hclk", R9A08G045_GPIO_HCLK, R9A08G045_OSCCLK, 0x598, 0,
+ MSTOP(BUS_PERI_CPU, BIT(6))),
+ DEF_MOD("adc_adclk", R9A08G045_ADC_ADCLK, R9A08G045_CLK_TSU, 0x5a8, 0,
+ MSTOP(BUS_MCPU2, BIT(14))),
+ DEF_MOD("adc_pclk", R9A08G045_ADC_PCLK, R9A08G045_CLK_TSU, 0x5a8, 1,
+ MSTOP(BUS_MCPU2, BIT(14))),
+ DEF_MOD("tsu_pclk", R9A08G045_TSU_PCLK, R9A08G045_CLK_TSU, 0x5ac, 0,
+ MSTOP(BUS_MCPU2, BIT(15))),
+ DEF_MOD("pci_aclk", R9A08G045_PCI_ACLK, R9A08G045_CLK_M0, 0x608, 0,
+ MSTOP(BUS_PERI_COM, BIT(10))),
+ DEF_MOD("pci_clkl1pm", R9A08G045_PCI_CLKL1PM, R9A08G045_CLK_ZT, 0x608, 1,
+ MSTOP(BUS_PERI_COM, BIT(10))),
+ DEF_MOD("i3c_pclk", R9A08G045_I3C_PCLK, R9A08G045_CLK_TSU, 0x610, 0,
+ MSTOP(BUS_MCPU3, BIT(10))),
+ DEF_MOD("i3c_tclk", R9A08G045_I3C_TCLK, R9A08G045_CLK_P5, 0x610, 1,
+ MSTOP(BUS_MCPU3, BIT(10))),
+ DEF_MOD("vbat_bclk", R9A08G045_VBAT_BCLK, R9A08G045_OSCCLK, 0x614, 0,
+ MSTOP(BUS_MCPU3, GENMASK(8, 7))),
};
static const struct rzg2l_reset r9a08g045_resets[] = {
@@ -237,6 +315,10 @@ static const struct rzg2l_reset r9a08g045_resets[] = {
DEF_RST(R9A08G045_SDHI0_IXRST, 0x854, 0),
DEF_RST(R9A08G045_SDHI1_IXRST, 0x854, 1),
DEF_RST(R9A08G045_SDHI2_IXRST, 0x854, 2),
+ DEF_RST(R9A08G045_SSI0_RST_M2_REG, 0x870, 0),
+ DEF_RST(R9A08G045_SSI1_RST_M2_REG, 0x870, 1),
+ DEF_RST(R9A08G045_SSI2_RST_M2_REG, 0x870, 2),
+ DEF_RST(R9A08G045_SSI3_RST_M2_REG, 0x870, 3),
DEF_RST(R9A08G045_USB_U2H0_HRESETN, 0x878, 0),
DEF_RST(R9A08G045_USB_U2H1_HRESETN, 0x878, 1),
DEF_RST(R9A08G045_USB_U2P_EXL_SYSRST, 0x878, 2),
@@ -248,9 +330,26 @@ static const struct rzg2l_reset r9a08g045_resets[] = {
DEF_RST(R9A08G045_I2C2_MRST, 0x880, 2),
DEF_RST(R9A08G045_I2C3_MRST, 0x880, 3),
DEF_RST(R9A08G045_SCIF0_RST_SYSTEM_N, 0x884, 0),
+ DEF_RST(R9A08G045_SCIF1_RST_SYSTEM_N, 0x884, 1),
+ DEF_RST(R9A08G045_SCIF2_RST_SYSTEM_N, 0x884, 2),
+ DEF_RST(R9A08G045_SCIF3_RST_SYSTEM_N, 0x884, 3),
+ DEF_RST(R9A08G045_SCIF4_RST_SYSTEM_N, 0x884, 4),
+ DEF_RST(R9A08G045_SCIF5_RST_SYSTEM_N, 0x884, 5),
DEF_RST(R9A08G045_GPIO_RSTN, 0x898, 0),
DEF_RST(R9A08G045_GPIO_PORT_RESETN, 0x898, 1),
DEF_RST(R9A08G045_GPIO_SPARE_RESETN, 0x898, 2),
+ DEF_RST(R9A08G045_ADC_PRESETN, 0x8a8, 0),
+ DEF_RST(R9A08G045_ADC_ADRST_N, 0x8a8, 1),
+ DEF_RST(R9A08G045_TSU_PRESETN, 0x8ac, 0),
+ DEF_RST(R9A08G045_PCI_ARESETN, 0x908, 0),
+ DEF_RST(R9A08G045_PCI_RST_B, 0x908, 1),
+ DEF_RST(R9A08G045_PCI_RST_GP_B, 0x908, 2),
+ DEF_RST(R9A08G045_PCI_RST_PS_B, 0x908, 3),
+ DEF_RST(R9A08G045_PCI_RST_RSM_B, 0x908, 4),
+ DEF_RST(R9A08G045_PCI_RST_CFG_B, 0x908, 5),
+ DEF_RST(R9A08G045_PCI_RST_LOAD_B, 0x908, 6),
+ DEF_RST(R9A08G045_I3C_TRESETN, 0x910, 0),
+ DEF_RST(R9A08G045_I3C_PRESETN, 0x910, 1),
DEF_RST(R9A08G045_VBAT_BRESETN, 0x914, 0),
};
@@ -262,65 +361,8 @@ static const unsigned int r9a08g045_crit_mod_clks[] __initconst = {
MOD_CLK_BASE + R9A08G045_VBAT_BCLK,
};
-static const struct rzg2l_cpg_pm_domain_init_data r9a08g045_pm_domains[] = {
- /* Keep always-on domain on the first position for proper domains registration. */
- DEF_PD("always-on", R9A08G045_PD_ALWAYS_ON,
- DEF_REG_CONF(0, 0),
- RZG2L_PD_F_ALWAYS_ON),
- DEF_PD("gic", R9A08G045_PD_GIC,
- DEF_REG_CONF(CPG_BUS_ACPU_MSTOP, BIT(3)),
- RZG2L_PD_F_ALWAYS_ON),
- DEF_PD("ia55", R9A08G045_PD_IA55,
- DEF_REG_CONF(CPG_BUS_PERI_CPU_MSTOP, BIT(13)),
- RZG2L_PD_F_ALWAYS_ON),
- DEF_PD("dmac", R9A08G045_PD_DMAC,
- DEF_REG_CONF(CPG_BUS_REG1_MSTOP, GENMASK(3, 0)),
- RZG2L_PD_F_ALWAYS_ON),
- DEF_PD("wdt0", R9A08G045_PD_WDT0,
- DEF_REG_CONF(CPG_BUS_REG0_MSTOP, BIT(0)),
- RZG2L_PD_F_NONE),
- DEF_PD("sdhi0", R9A08G045_PD_SDHI0,
- DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(0)),
- RZG2L_PD_F_NONE),
- DEF_PD("sdhi1", R9A08G045_PD_SDHI1,
- DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(1)),
- RZG2L_PD_F_NONE),
- DEF_PD("sdhi2", R9A08G045_PD_SDHI2,
- DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(11)),
- RZG2L_PD_F_NONE),
- DEF_PD("usb0", R9A08G045_PD_USB0,
- DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, GENMASK(6, 5)),
- RZG2L_PD_F_NONE),
- DEF_PD("usb1", R9A08G045_PD_USB1,
- DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(7)),
- RZG2L_PD_F_NONE),
- DEF_PD("usb-phy", R9A08G045_PD_USB_PHY,
- DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(4)),
- RZG2L_PD_F_NONE),
- DEF_PD("eth0", R9A08G045_PD_ETHER0,
- DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(2)),
- RZG2L_PD_F_NONE),
- DEF_PD("eth1", R9A08G045_PD_ETHER1,
- DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(3)),
- RZG2L_PD_F_NONE),
- DEF_PD("i2c0", R9A08G045_PD_I2C0,
- DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(10)),
- RZG2L_PD_F_NONE),
- DEF_PD("i2c1", R9A08G045_PD_I2C1,
- DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(11)),
- RZG2L_PD_F_NONE),
- DEF_PD("i2c2", R9A08G045_PD_I2C2,
- DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(12)),
- RZG2L_PD_F_NONE),
- DEF_PD("i2c3", R9A08G045_PD_I2C3,
- DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(13)),
- RZG2L_PD_F_NONE),
- DEF_PD("scif0", R9A08G045_PD_SCIF0,
- DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(1)),
- RZG2L_PD_F_NONE),
- DEF_PD("vbat", R9A08G045_PD_VBAT,
- DEF_REG_CONF(CPG_BUS_MCPU3_MSTOP, BIT(8)),
- RZG2L_PD_F_ALWAYS_ON),
+static const unsigned int r9a08g045_no_pm_mod_clks[] = {
+ MOD_CLK_BASE + R9A08G045_PCI_CLKL1PM,
};
const struct rzg2l_cpg_info r9a08g045_cpg_info = {
@@ -339,13 +381,13 @@ const struct rzg2l_cpg_info r9a08g045_cpg_info = {
.num_mod_clks = ARRAY_SIZE(r9a08g045_mod_clks),
.num_hw_mod_clks = R9A08G045_VBAT_BCLK + 1,
+ /* No PM modules Clocks */
+ .no_pm_mod_clks = r9a08g045_no_pm_mod_clks,
+ .num_no_pm_mod_clks = ARRAY_SIZE(r9a08g045_no_pm_mod_clks),
+
/* Resets */
.resets = r9a08g045_resets,
.num_resets = R9A08G045_VBAT_BRESETN + 1, /* Last reset ID + 1 */
- /* Power domains */
- .pm_domains = r9a08g045_pm_domains,
- .num_pm_domains = ARRAY_SIZE(r9a08g045_pm_domains),
-
.has_clk_mon_regs = true,
};
diff --git a/drivers/clk/renesas/r9a09g011-cpg.c b/drivers/clk/renesas/r9a09g011-cpg.c
index dda9f29dff33..ba25429c244d 100644
--- a/drivers/clk/renesas/r9a09g011-cpg.c
+++ b/drivers/clk/renesas/r9a09g011-cpg.c
@@ -98,7 +98,6 @@ static const struct clk_div_table dtable_divd[] = {
{0, 0},
};
-
static const struct clk_div_table dtable_divw[] = {
{0, 6},
{1, 7},
@@ -152,64 +151,64 @@ static const struct cpg_core_clk r9a09g011_core_clks[] __initconst = {
};
static const struct rzg2l_mod_clk r9a09g011_mod_clks[] __initconst = {
- DEF_MOD("pfc", R9A09G011_PFC_PCLK, CLK_MAIN, 0x400, 2),
- DEF_MOD("gic", R9A09G011_GIC_CLK, CLK_SEL_B_D2, 0x400, 5),
- DEF_MOD("sdi0_aclk", R9A09G011_SDI0_ACLK, CLK_SEL_D, 0x408, 0),
- DEF_MOD("sdi0_imclk", R9A09G011_SDI0_IMCLK, CLK_SEL_SDI, 0x408, 1),
- DEF_MOD("sdi0_imclk2", R9A09G011_SDI0_IMCLK2, CLK_SEL_SDI, 0x408, 2),
- DEF_MOD("sdi0_clk_hs", R9A09G011_SDI0_CLK_HS, CLK_PLL2_800, 0x408, 3),
- DEF_MOD("sdi1_aclk", R9A09G011_SDI1_ACLK, CLK_SEL_D, 0x408, 4),
- DEF_MOD("sdi1_imclk", R9A09G011_SDI1_IMCLK, CLK_SEL_SDI, 0x408, 5),
- DEF_MOD("sdi1_imclk2", R9A09G011_SDI1_IMCLK2, CLK_SEL_SDI, 0x408, 6),
- DEF_MOD("sdi1_clk_hs", R9A09G011_SDI1_CLK_HS, CLK_PLL2_800, 0x408, 7),
- DEF_MOD("emm_aclk", R9A09G011_EMM_ACLK, CLK_SEL_D, 0x408, 8),
- DEF_MOD("emm_imclk", R9A09G011_EMM_IMCLK, CLK_SEL_SDI, 0x408, 9),
- DEF_MOD("emm_imclk2", R9A09G011_EMM_IMCLK2, CLK_SEL_SDI, 0x408, 10),
- DEF_MOD("emm_clk_hs", R9A09G011_EMM_CLK_HS, CLK_PLL2_800, 0x408, 11),
- DEF_COUPLED("eth_axi", R9A09G011_ETH0_CLK_AXI, CLK_PLL2_200, 0x40c, 8),
- DEF_COUPLED("eth_chi", R9A09G011_ETH0_CLK_CHI, CLK_PLL2_100, 0x40c, 8),
- DEF_MOD("eth_clk_gptp", R9A09G011_ETH0_GPTP_EXT, CLK_PLL2_100, 0x40c, 9),
- DEF_MOD("usb_aclk_h", R9A09G011_USB_ACLK_H, CLK_SEL_D, 0x40c, 4),
- DEF_MOD("usb_aclk_p", R9A09G011_USB_ACLK_P, CLK_SEL_D, 0x40c, 5),
- DEF_MOD("usb_pclk", R9A09G011_USB_PCLK, CLK_SEL_E, 0x40c, 6),
- DEF_MOD("syc_cnt_clk", R9A09G011_SYC_CNT_CLK, CLK_MAIN_24, 0x41c, 12),
- DEF_MOD("iic_pclk0", R9A09G011_IIC_PCLK0, CLK_SEL_E, 0x420, 12),
- DEF_MOD("cperi_grpb", R9A09G011_CPERI_GRPB_PCLK, CLK_SEL_E, 0x424, 0),
- DEF_MOD("tim_clk_8", R9A09G011_TIM8_CLK, CLK_MAIN_2, 0x424, 4),
- DEF_MOD("tim_clk_9", R9A09G011_TIM9_CLK, CLK_MAIN_2, 0x424, 5),
- DEF_MOD("tim_clk_10", R9A09G011_TIM10_CLK, CLK_MAIN_2, 0x424, 6),
- DEF_MOD("tim_clk_11", R9A09G011_TIM11_CLK, CLK_MAIN_2, 0x424, 7),
- DEF_MOD("tim_clk_12", R9A09G011_TIM12_CLK, CLK_MAIN_2, 0x424, 8),
- DEF_MOD("tim_clk_13", R9A09G011_TIM13_CLK, CLK_MAIN_2, 0x424, 9),
- DEF_MOD("tim_clk_14", R9A09G011_TIM14_CLK, CLK_MAIN_2, 0x424, 10),
- DEF_MOD("tim_clk_15", R9A09G011_TIM15_CLK, CLK_MAIN_2, 0x424, 11),
- DEF_MOD("iic_pclk1", R9A09G011_IIC_PCLK1, CLK_SEL_E, 0x424, 12),
- DEF_MOD("cperi_grpc", R9A09G011_CPERI_GRPC_PCLK, CLK_SEL_E, 0x428, 0),
- DEF_MOD("tim_clk_16", R9A09G011_TIM16_CLK, CLK_MAIN_2, 0x428, 4),
- DEF_MOD("tim_clk_17", R9A09G011_TIM17_CLK, CLK_MAIN_2, 0x428, 5),
- DEF_MOD("tim_clk_18", R9A09G011_TIM18_CLK, CLK_MAIN_2, 0x428, 6),
- DEF_MOD("tim_clk_19", R9A09G011_TIM19_CLK, CLK_MAIN_2, 0x428, 7),
- DEF_MOD("tim_clk_20", R9A09G011_TIM20_CLK, CLK_MAIN_2, 0x428, 8),
- DEF_MOD("tim_clk_21", R9A09G011_TIM21_CLK, CLK_MAIN_2, 0x428, 9),
- DEF_MOD("tim_clk_22", R9A09G011_TIM22_CLK, CLK_MAIN_2, 0x428, 10),
- DEF_MOD("tim_clk_23", R9A09G011_TIM23_CLK, CLK_MAIN_2, 0x428, 11),
- DEF_MOD("wdt0_pclk", R9A09G011_WDT0_PCLK, CLK_SEL_E, 0x428, 12),
- DEF_MOD("wdt0_clk", R9A09G011_WDT0_CLK, CLK_MAIN, 0x428, 13),
- DEF_MOD("cperi_grpf", R9A09G011_CPERI_GRPF_PCLK, CLK_SEL_E, 0x434, 0),
- DEF_MOD("pwm8_clk", R9A09G011_PWM8_CLK, CLK_MAIN, 0x434, 4),
- DEF_MOD("pwm9_clk", R9A09G011_PWM9_CLK, CLK_MAIN, 0x434, 5),
- DEF_MOD("pwm10_clk", R9A09G011_PWM10_CLK, CLK_MAIN, 0x434, 6),
- DEF_MOD("pwm11_clk", R9A09G011_PWM11_CLK, CLK_MAIN, 0x434, 7),
- DEF_MOD("pwm12_clk", R9A09G011_PWM12_CLK, CLK_MAIN, 0x434, 8),
- DEF_MOD("pwm13_clk", R9A09G011_PWM13_CLK, CLK_MAIN, 0x434, 9),
- DEF_MOD("pwm14_clk", R9A09G011_PWM14_CLK, CLK_MAIN, 0x434, 10),
- DEF_MOD("cperi_grpg", R9A09G011_CPERI_GRPG_PCLK, CLK_SEL_E, 0x438, 0),
- DEF_MOD("cperi_grph", R9A09G011_CPERI_GRPH_PCLK, CLK_SEL_E, 0x438, 1),
- DEF_MOD("urt_pclk", R9A09G011_URT_PCLK, CLK_SEL_E, 0x438, 4),
- DEF_MOD("urt0_clk", R9A09G011_URT0_CLK, CLK_SEL_W0, 0x438, 5),
- DEF_MOD("csi0_clk", R9A09G011_CSI0_CLK, CLK_SEL_CSI0, 0x438, 8),
- DEF_MOD("csi4_clk", R9A09G011_CSI4_CLK, CLK_SEL_CSI4, 0x438, 12),
- DEF_MOD("ca53", R9A09G011_CA53_CLK, CLK_DIV_A, 0x448, 0),
+ DEF_MOD("pfc", R9A09G011_PFC_PCLK, CLK_MAIN, 0x400, 2, 0),
+ DEF_MOD("gic", R9A09G011_GIC_CLK, CLK_SEL_B_D2, 0x400, 5, 0),
+ DEF_MOD("sdi0_aclk", R9A09G011_SDI0_ACLK, CLK_SEL_D, 0x408, 0, 0),
+ DEF_MOD("sdi0_imclk", R9A09G011_SDI0_IMCLK, CLK_SEL_SDI, 0x408, 1, 0),
+ DEF_MOD("sdi0_imclk2", R9A09G011_SDI0_IMCLK2, CLK_SEL_SDI, 0x408, 2, 0),
+ DEF_MOD("sdi0_clk_hs", R9A09G011_SDI0_CLK_HS, CLK_PLL2_800, 0x408, 3, 0),
+ DEF_MOD("sdi1_aclk", R9A09G011_SDI1_ACLK, CLK_SEL_D, 0x408, 4, 0),
+ DEF_MOD("sdi1_imclk", R9A09G011_SDI1_IMCLK, CLK_SEL_SDI, 0x408, 5, 0),
+ DEF_MOD("sdi1_imclk2", R9A09G011_SDI1_IMCLK2, CLK_SEL_SDI, 0x408, 6, 0),
+ DEF_MOD("sdi1_clk_hs", R9A09G011_SDI1_CLK_HS, CLK_PLL2_800, 0x408, 7, 0),
+ DEF_MOD("emm_aclk", R9A09G011_EMM_ACLK, CLK_SEL_D, 0x408, 8, 0),
+ DEF_MOD("emm_imclk", R9A09G011_EMM_IMCLK, CLK_SEL_SDI, 0x408, 9, 0),
+ DEF_MOD("emm_imclk2", R9A09G011_EMM_IMCLK2, CLK_SEL_SDI, 0x408, 10, 0),
+ DEF_MOD("emm_clk_hs", R9A09G011_EMM_CLK_HS, CLK_PLL2_800, 0x408, 11, 0),
+ DEF_COUPLED("eth_axi", R9A09G011_ETH0_CLK_AXI, CLK_PLL2_200, 0x40c, 8, 0),
+ DEF_COUPLED("eth_chi", R9A09G011_ETH0_CLK_CHI, CLK_PLL2_100, 0x40c, 8, 0),
+ DEF_MOD("eth_clk_gptp", R9A09G011_ETH0_GPTP_EXT, CLK_PLL2_100, 0x40c, 9, 0),
+ DEF_MOD("usb_aclk_h", R9A09G011_USB_ACLK_H, CLK_SEL_D, 0x40c, 4, 0),
+ DEF_MOD("usb_aclk_p", R9A09G011_USB_ACLK_P, CLK_SEL_D, 0x40c, 5, 0),
+ DEF_MOD("usb_pclk", R9A09G011_USB_PCLK, CLK_SEL_E, 0x40c, 6, 0),
+ DEF_MOD("syc_cnt_clk", R9A09G011_SYC_CNT_CLK, CLK_MAIN_24, 0x41c, 12, 0),
+ DEF_MOD("iic_pclk0", R9A09G011_IIC_PCLK0, CLK_SEL_E, 0x420, 12, 0),
+ DEF_MOD("cperi_grpb", R9A09G011_CPERI_GRPB_PCLK, CLK_SEL_E, 0x424, 0, 0),
+ DEF_MOD("tim_clk_8", R9A09G011_TIM8_CLK, CLK_MAIN_2, 0x424, 4, 0),
+ DEF_MOD("tim_clk_9", R9A09G011_TIM9_CLK, CLK_MAIN_2, 0x424, 5, 0),
+ DEF_MOD("tim_clk_10", R9A09G011_TIM10_CLK, CLK_MAIN_2, 0x424, 6, 0),
+ DEF_MOD("tim_clk_11", R9A09G011_TIM11_CLK, CLK_MAIN_2, 0x424, 7, 0),
+ DEF_MOD("tim_clk_12", R9A09G011_TIM12_CLK, CLK_MAIN_2, 0x424, 8, 0),
+ DEF_MOD("tim_clk_13", R9A09G011_TIM13_CLK, CLK_MAIN_2, 0x424, 9, 0),
+ DEF_MOD("tim_clk_14", R9A09G011_TIM14_CLK, CLK_MAIN_2, 0x424, 10, 0),
+ DEF_MOD("tim_clk_15", R9A09G011_TIM15_CLK, CLK_MAIN_2, 0x424, 11, 0),
+ DEF_MOD("iic_pclk1", R9A09G011_IIC_PCLK1, CLK_SEL_E, 0x424, 12, 0),
+ DEF_MOD("cperi_grpc", R9A09G011_CPERI_GRPC_PCLK, CLK_SEL_E, 0x428, 0, 0),
+ DEF_MOD("tim_clk_16", R9A09G011_TIM16_CLK, CLK_MAIN_2, 0x428, 4, 0),
+ DEF_MOD("tim_clk_17", R9A09G011_TIM17_CLK, CLK_MAIN_2, 0x428, 5, 0),
+ DEF_MOD("tim_clk_18", R9A09G011_TIM18_CLK, CLK_MAIN_2, 0x428, 6, 0),
+ DEF_MOD("tim_clk_19", R9A09G011_TIM19_CLK, CLK_MAIN_2, 0x428, 7, 0),
+ DEF_MOD("tim_clk_20", R9A09G011_TIM20_CLK, CLK_MAIN_2, 0x428, 8, 0),
+ DEF_MOD("tim_clk_21", R9A09G011_TIM21_CLK, CLK_MAIN_2, 0x428, 9, 0),
+ DEF_MOD("tim_clk_22", R9A09G011_TIM22_CLK, CLK_MAIN_2, 0x428, 10, 0),
+ DEF_MOD("tim_clk_23", R9A09G011_TIM23_CLK, CLK_MAIN_2, 0x428, 11, 0),
+ DEF_MOD("wdt0_pclk", R9A09G011_WDT0_PCLK, CLK_SEL_E, 0x428, 12, 0),
+ DEF_MOD("wdt0_clk", R9A09G011_WDT0_CLK, CLK_MAIN, 0x428, 13, 0),
+ DEF_MOD("cperi_grpf", R9A09G011_CPERI_GRPF_PCLK, CLK_SEL_E, 0x434, 0, 0),
+ DEF_MOD("pwm8_clk", R9A09G011_PWM8_CLK, CLK_MAIN, 0x434, 4, 0),
+ DEF_MOD("pwm9_clk", R9A09G011_PWM9_CLK, CLK_MAIN, 0x434, 5, 0),
+ DEF_MOD("pwm10_clk", R9A09G011_PWM10_CLK, CLK_MAIN, 0x434, 6, 0),
+ DEF_MOD("pwm11_clk", R9A09G011_PWM11_CLK, CLK_MAIN, 0x434, 7, 0),
+ DEF_MOD("pwm12_clk", R9A09G011_PWM12_CLK, CLK_MAIN, 0x434, 8, 0),
+ DEF_MOD("pwm13_clk", R9A09G011_PWM13_CLK, CLK_MAIN, 0x434, 9, 0),
+ DEF_MOD("pwm14_clk", R9A09G011_PWM14_CLK, CLK_MAIN, 0x434, 10, 0),
+ DEF_MOD("cperi_grpg", R9A09G011_CPERI_GRPG_PCLK, CLK_SEL_E, 0x438, 0, 0),
+ DEF_MOD("cperi_grph", R9A09G011_CPERI_GRPH_PCLK, CLK_SEL_E, 0x438, 1, 0),
+ DEF_MOD("urt_pclk", R9A09G011_URT_PCLK, CLK_SEL_E, 0x438, 4, 0),
+ DEF_MOD("urt0_clk", R9A09G011_URT0_CLK, CLK_SEL_W0, 0x438, 5, 0),
+ DEF_MOD("csi0_clk", R9A09G011_CSI0_CLK, CLK_SEL_CSI0, 0x438, 8, 0),
+ DEF_MOD("csi4_clk", R9A09G011_CSI4_CLK, CLK_SEL_CSI4, 0x438, 12, 0),
+ DEF_MOD("ca53", R9A09G011_CA53_CLK, CLK_DIV_A, 0x448, 0, 0),
};
static const struct rzg2l_reset r9a09g011_resets[] = {
diff --git a/drivers/clk/renesas/r9a09g047-cpg.c b/drivers/clk/renesas/r9a09g047-cpg.c
new file mode 100644
index 000000000000..ef115f9ec0e6
--- /dev/null
+++ b/drivers/clk/renesas/r9a09g047-cpg.c
@@ -0,0 +1,390 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas RZ/G3E CPG driver
+ *
+ * Copyright (C) 2024 Renesas Electronics Corp.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+
+#include <dt-bindings/clock/renesas,r9a09g047-cpg.h>
+
+#include "rzv2h-cpg.h"
+
+enum clk_ids {
+ /* Core Clock Outputs exported to DT */
+ LAST_DT_CORE_CLK = R9A09G047_USB3_0_CLKCORE,
+
+ /* External Input Clocks */
+ CLK_AUDIO_EXTAL,
+ CLK_RTXIN,
+ CLK_QEXTAL,
+
+ /* PLL Clocks */
+ CLK_PLLCM33,
+ CLK_PLLCLN,
+ CLK_PLLDTY,
+ CLK_PLLCA55,
+ CLK_PLLVDO,
+ CLK_PLLETH,
+
+ /* Internal Core Clocks */
+ CLK_PLLCM33_DIV3,
+ CLK_PLLCM33_DIV4,
+ CLK_PLLCM33_DIV5,
+ CLK_PLLCM33_DIV16,
+ CLK_PLLCM33_GEAR,
+ CLK_SMUX2_XSPI_CLK0,
+ CLK_SMUX2_XSPI_CLK1,
+ CLK_PLLCM33_XSPI,
+ CLK_PLLCLN_DIV2,
+ CLK_PLLCLN_DIV8,
+ CLK_PLLCLN_DIV16,
+ CLK_PLLCLN_DIV20,
+ CLK_PLLDTY_ACPU,
+ CLK_PLLDTY_ACPU_DIV2,
+ CLK_PLLDTY_ACPU_DIV4,
+ CLK_PLLDTY_DIV8,
+ CLK_PLLDTY_RCPU,
+ CLK_PLLDTY_RCPU_DIV4,
+ CLK_PLLETH_DIV_250_FIX,
+ CLK_PLLETH_DIV_125_FIX,
+ CLK_CSDIV_PLLETH_GBE0,
+ CLK_CSDIV_PLLETH_GBE1,
+ CLK_SMUX2_GBE0_TXCLK,
+ CLK_SMUX2_GBE0_RXCLK,
+ CLK_SMUX2_GBE1_TXCLK,
+ CLK_SMUX2_GBE1_RXCLK,
+ CLK_PLLDTY_DIV16,
+ CLK_PLLVDO_CRU0,
+ CLK_PLLVDO_GPU,
+
+ /* Module Clocks */
+ MOD_CLK_BASE,
+};
+
+static const struct clk_div_table dtable_1_8[] = {
+ {0, 1},
+ {1, 2},
+ {2, 4},
+ {3, 8},
+ {0, 0},
+};
+
+static const struct clk_div_table dtable_2_4[] = {
+ {0, 2},
+ {1, 4},
+ {0, 0},
+};
+
+static const struct clk_div_table dtable_2_16[] = {
+ {0, 2},
+ {1, 4},
+ {2, 8},
+ {3, 16},
+ {0, 0},
+};
+
+static const struct clk_div_table dtable_2_64[] = {
+ {0, 2},
+ {1, 4},
+ {2, 8},
+ {3, 16},
+ {4, 64},
+ {0, 0},
+};
+
+static const struct clk_div_table dtable_2_100[] = {
+ {0, 2},
+ {1, 10},
+ {2, 100},
+ {0, 0},
+};
+
+/* Mux clock tables */
+static const char * const smux2_gbe0_rxclk[] = { ".plleth_gbe0", "et0_rxclk" };
+static const char * const smux2_gbe0_txclk[] = { ".plleth_gbe0", "et0_txclk" };
+static const char * const smux2_gbe1_rxclk[] = { ".plleth_gbe1", "et1_rxclk" };
+static const char * const smux2_gbe1_txclk[] = { ".plleth_gbe1", "et1_txclk" };
+static const char * const smux2_xspi_clk0[] = { ".pllcm33_div3", ".pllcm33_div4" };
+static const char * const smux2_xspi_clk1[] = { ".smux2_xspi_clk0", ".pllcm33_div5" };
+
+static const struct cpg_core_clk r9a09g047_core_clks[] __initconst = {
+ /* External Clock Inputs */
+ DEF_INPUT("audio_extal", CLK_AUDIO_EXTAL),
+ DEF_INPUT("rtxin", CLK_RTXIN),
+ DEF_INPUT("qextal", CLK_QEXTAL),
+
+ /* PLL Clocks */
+ DEF_FIXED(".pllcm33", CLK_PLLCM33, CLK_QEXTAL, 200, 3),
+ DEF_FIXED(".pllcln", CLK_PLLCLN, CLK_QEXTAL, 200, 3),
+ DEF_FIXED(".plldty", CLK_PLLDTY, CLK_QEXTAL, 200, 3),
+ DEF_PLL(".pllca55", CLK_PLLCA55, CLK_QEXTAL, PLLCA55),
+ DEF_FIXED(".plleth", CLK_PLLETH, CLK_QEXTAL, 125, 3),
+ DEF_FIXED(".pllvdo", CLK_PLLVDO, CLK_QEXTAL, 105, 2),
+
+ /* Internal Core Clocks */
+ DEF_FIXED(".pllcm33_div3", CLK_PLLCM33_DIV3, CLK_PLLCM33, 1, 3),
+ DEF_FIXED(".pllcm33_div4", CLK_PLLCM33_DIV4, CLK_PLLCM33, 1, 4),
+ DEF_FIXED(".pllcm33_div5", CLK_PLLCM33_DIV5, CLK_PLLCM33, 1, 5),
+ DEF_FIXED(".pllcm33_div16", CLK_PLLCM33_DIV16, CLK_PLLCM33, 1, 16),
+
+ DEF_DDIV(".pllcm33_gear", CLK_PLLCM33_GEAR, CLK_PLLCM33_DIV4, CDDIV0_DIVCTL1, dtable_2_64),
+
+ DEF_SMUX(".smux2_xspi_clk0", CLK_SMUX2_XSPI_CLK0, SSEL1_SELCTL2, smux2_xspi_clk0),
+ DEF_SMUX(".smux2_xspi_clk1", CLK_SMUX2_XSPI_CLK1, SSEL1_SELCTL3, smux2_xspi_clk1),
+ DEF_CSDIV(".pllcm33_xspi", CLK_PLLCM33_XSPI, CLK_SMUX2_XSPI_CLK1, CSDIV0_DIVCTL3,
+ dtable_2_16),
+ DEF_FIXED(".pllcln_div2", CLK_PLLCLN_DIV2, CLK_PLLCLN, 1, 2),
+ DEF_FIXED(".pllcln_div8", CLK_PLLCLN_DIV8, CLK_PLLCLN, 1, 8),
+ DEF_FIXED(".pllcln_div16", CLK_PLLCLN_DIV16, CLK_PLLCLN, 1, 16),
+ DEF_FIXED(".pllcln_div20", CLK_PLLCLN_DIV20, CLK_PLLCLN, 1, 20),
+
+ DEF_DDIV(".plldty_acpu", CLK_PLLDTY_ACPU, CLK_PLLDTY, CDDIV0_DIVCTL2, dtable_2_64),
+ DEF_FIXED(".plldty_acpu_div2", CLK_PLLDTY_ACPU_DIV2, CLK_PLLDTY_ACPU, 1, 2),
+ DEF_FIXED(".plldty_acpu_div4", CLK_PLLDTY_ACPU_DIV4, CLK_PLLDTY_ACPU, 1, 4),
+ DEF_FIXED(".plldty_div8", CLK_PLLDTY_DIV8, CLK_PLLDTY, 1, 8),
+
+ DEF_FIXED(".plleth_250_fix", CLK_PLLETH_DIV_250_FIX, CLK_PLLETH, 1, 4),
+ DEF_FIXED(".plleth_125_fix", CLK_PLLETH_DIV_125_FIX, CLK_PLLETH_DIV_250_FIX, 1, 2),
+ DEF_CSDIV(".plleth_gbe0", CLK_CSDIV_PLLETH_GBE0, CLK_PLLETH_DIV_250_FIX,
+ CSDIV0_DIVCTL0, dtable_2_100),
+ DEF_CSDIV(".plleth_gbe1", CLK_CSDIV_PLLETH_GBE1, CLK_PLLETH_DIV_250_FIX,
+ CSDIV0_DIVCTL1, dtable_2_100),
+ DEF_SMUX(".smux2_gbe0_txclk", CLK_SMUX2_GBE0_TXCLK, SSEL0_SELCTL2, smux2_gbe0_txclk),
+ DEF_SMUX(".smux2_gbe0_rxclk", CLK_SMUX2_GBE0_RXCLK, SSEL0_SELCTL3, smux2_gbe0_rxclk),
+ DEF_SMUX(".smux2_gbe1_txclk", CLK_SMUX2_GBE1_TXCLK, SSEL1_SELCTL0, smux2_gbe1_txclk),
+ DEF_SMUX(".smux2_gbe1_rxclk", CLK_SMUX2_GBE1_RXCLK, SSEL1_SELCTL1, smux2_gbe1_rxclk),
+ DEF_FIXED(".plldty_div16", CLK_PLLDTY_DIV16, CLK_PLLDTY, 1, 16),
+ DEF_DDIV(".plldty_rcpu", CLK_PLLDTY_RCPU, CLK_PLLDTY, CDDIV3_DIVCTL2, dtable_2_64),
+ DEF_FIXED(".plldty_rcpu_div4", CLK_PLLDTY_RCPU_DIV4, CLK_PLLDTY_RCPU, 1, 4),
+
+ DEF_DDIV(".pllvdo_cru0", CLK_PLLVDO_CRU0, CLK_PLLVDO, CDDIV3_DIVCTL3, dtable_2_4),
+ DEF_DDIV(".pllvdo_gpu", CLK_PLLVDO_GPU, CLK_PLLVDO, CDDIV3_DIVCTL1, dtable_2_64),
+
+ /* Core Clocks */
+ DEF_FIXED("sys_0_pclk", R9A09G047_SYS_0_PCLK, CLK_QEXTAL, 1, 1),
+ DEF_DDIV("ca55_0_coreclk0", R9A09G047_CA55_0_CORECLK0, CLK_PLLCA55,
+ CDDIV1_DIVCTL0, dtable_1_8),
+ DEF_DDIV("ca55_0_coreclk1", R9A09G047_CA55_0_CORECLK1, CLK_PLLCA55,
+ CDDIV1_DIVCTL1, dtable_1_8),
+ DEF_DDIV("ca55_0_coreclk2", R9A09G047_CA55_0_CORECLK2, CLK_PLLCA55,
+ CDDIV1_DIVCTL2, dtable_1_8),
+ DEF_DDIV("ca55_0_coreclk3", R9A09G047_CA55_0_CORECLK3, CLK_PLLCA55,
+ CDDIV1_DIVCTL3, dtable_1_8),
+ DEF_FIXED("iotop_0_shclk", R9A09G047_IOTOP_0_SHCLK, CLK_PLLCM33_DIV16, 1, 1),
+ DEF_FIXED("spi_clk_spi", R9A09G047_SPI_CLK_SPI, CLK_PLLCM33_XSPI, 1, 2),
+ DEF_FIXED("gbeth_0_clk_ptp_ref_i", R9A09G047_GBETH_0_CLK_PTP_REF_I,
+ CLK_PLLETH_DIV_125_FIX, 1, 1),
+ DEF_FIXED("gbeth_1_clk_ptp_ref_i", R9A09G047_GBETH_1_CLK_PTP_REF_I,
+ CLK_PLLETH_DIV_125_FIX, 1, 1),
+ DEF_FIXED("usb3_0_ref_alt_clk_p", R9A09G047_USB3_0_REF_ALT_CLK_P, CLK_QEXTAL, 1, 1),
+ DEF_FIXED("usb3_0_core_clk", R9A09G047_USB3_0_CLKCORE, CLK_QEXTAL, 1, 1),
+};
+
+static const struct rzv2h_mod_clk r9a09g047_mod_clks[] __initconst = {
+ DEF_MOD("dmac_0_aclk", CLK_PLLCM33_GEAR, 0, 0, 0, 0,
+ BUS_MSTOP(5, BIT(9))),
+ DEF_MOD("dmac_1_aclk", CLK_PLLDTY_ACPU_DIV2, 0, 1, 0, 1,
+ BUS_MSTOP(3, BIT(2))),
+ DEF_MOD("dmac_2_aclk", CLK_PLLDTY_ACPU_DIV2, 0, 2, 0, 2,
+ BUS_MSTOP(3, BIT(3))),
+ DEF_MOD("dmac_3_aclk", CLK_PLLDTY_RCPU_DIV4, 0, 3, 0, 3,
+ BUS_MSTOP(10, BIT(11))),
+ DEF_MOD("dmac_4_aclk", CLK_PLLDTY_RCPU_DIV4, 0, 4, 0, 4,
+ BUS_MSTOP(10, BIT(12))),
+ DEF_MOD_CRITICAL("icu_0_pclk_i", CLK_PLLCM33_DIV16, 0, 5, 0, 5,
+ BUS_MSTOP_NONE),
+ DEF_MOD_CRITICAL("gic_0_gicclk", CLK_PLLDTY_ACPU_DIV4, 1, 3, 0, 19,
+ BUS_MSTOP(3, BIT(5))),
+ DEF_MOD("gpt_0_pclk_sfr", CLK_PLLCLN_DIV8, 3, 1, 1, 17,
+ BUS_MSTOP(6, BIT(11))),
+ DEF_MOD("gpt_1_pclk_sfr", CLK_PLLCLN_DIV8, 3, 2, 1, 18,
+ BUS_MSTOP(6, BIT(12))),
+ DEF_MOD("wdt_1_clkp", CLK_PLLCLN_DIV16, 4, 13, 2, 13,
+ BUS_MSTOP(1, BIT(0))),
+ DEF_MOD("wdt_1_clk_loco", CLK_QEXTAL, 4, 14, 2, 14,
+ BUS_MSTOP(1, BIT(0))),
+ DEF_MOD("wdt_2_clkp", CLK_PLLCLN_DIV16, 4, 15, 2, 15,
+ BUS_MSTOP(5, BIT(12))),
+ DEF_MOD("wdt_2_clk_loco", CLK_QEXTAL, 5, 0, 2, 16,
+ BUS_MSTOP(5, BIT(12))),
+ DEF_MOD("wdt_3_clkp", CLK_PLLCLN_DIV16, 5, 1, 2, 17,
+ BUS_MSTOP(5, BIT(13))),
+ DEF_MOD("wdt_3_clk_loco", CLK_QEXTAL, 5, 2, 2, 18,
+ BUS_MSTOP(5, BIT(13))),
+ DEF_MOD("scif_0_clk_pck", CLK_PLLCM33_DIV16, 8, 15, 4, 15,
+ BUS_MSTOP(3, BIT(14))),
+ DEF_MOD("i3c_0_pclkrw", CLK_PLLCLN_DIV16, 9, 0, 4, 16,
+ BUS_MSTOP(10, BIT(15))),
+ DEF_MOD("i3c_0_pclk", CLK_PLLCLN_DIV16, 9, 1, 4, 17,
+ BUS_MSTOP(10, BIT(15))),
+ DEF_MOD("i3c_0_tclk", CLK_PLLCLN_DIV8, 9, 2, 4, 18,
+ BUS_MSTOP(10, BIT(15))),
+ DEF_MOD("riic_8_ckm", CLK_PLLCM33_DIV16, 9, 3, 4, 19,
+ BUS_MSTOP(3, BIT(13))),
+ DEF_MOD("riic_0_ckm", CLK_PLLCLN_DIV16, 9, 4, 4, 20,
+ BUS_MSTOP(1, BIT(1))),
+ DEF_MOD("riic_1_ckm", CLK_PLLCLN_DIV16, 9, 5, 4, 21,
+ BUS_MSTOP(1, BIT(2))),
+ DEF_MOD("riic_2_ckm", CLK_PLLCLN_DIV16, 9, 6, 4, 22,
+ BUS_MSTOP(1, BIT(3))),
+ DEF_MOD("riic_3_ckm", CLK_PLLCLN_DIV16, 9, 7, 4, 23,
+ BUS_MSTOP(1, BIT(4))),
+ DEF_MOD("riic_4_ckm", CLK_PLLCLN_DIV16, 9, 8, 4, 24,
+ BUS_MSTOP(1, BIT(5))),
+ DEF_MOD("riic_5_ckm", CLK_PLLCLN_DIV16, 9, 9, 4, 25,
+ BUS_MSTOP(1, BIT(6))),
+ DEF_MOD("riic_6_ckm", CLK_PLLCLN_DIV16, 9, 10, 4, 26,
+ BUS_MSTOP(1, BIT(7))),
+ DEF_MOD("riic_7_ckm", CLK_PLLCLN_DIV16, 9, 11, 4, 27,
+ BUS_MSTOP(1, BIT(8))),
+ DEF_MOD("canfd_0_pclk", CLK_PLLCLN_DIV16, 9, 12, 4, 28,
+ BUS_MSTOP(10, BIT(14))),
+ DEF_MOD("canfd_0_clk_ram", CLK_PLLCLN_DIV8, 9, 13, 4, 29,
+ BUS_MSTOP(10, BIT(14))),
+ DEF_MOD("canfd_0_clkc", CLK_PLLCLN_DIV20, 9, 14, 4, 30,
+ BUS_MSTOP(10, BIT(14))),
+ DEF_MOD("spi_hclk", CLK_PLLCM33_GEAR, 9, 15, 4, 31,
+ BUS_MSTOP(4, BIT(5))),
+ DEF_MOD("spi_aclk", CLK_PLLCM33_GEAR, 10, 0, 5, 0,
+ BUS_MSTOP(4, BIT(5))),
+ DEF_MOD_NO_PM("spi_clk_spix2", CLK_PLLCM33_XSPI, 10, 1, 5, 2,
+ BUS_MSTOP(4, BIT(5))),
+ DEF_MOD("sdhi_0_imclk", CLK_PLLCLN_DIV8, 10, 3, 5, 3,
+ BUS_MSTOP(8, BIT(2))),
+ DEF_MOD("sdhi_0_imclk2", CLK_PLLCLN_DIV8, 10, 4, 5, 4,
+ BUS_MSTOP(8, BIT(2))),
+ DEF_MOD("sdhi_0_clk_hs", CLK_PLLCLN_DIV2, 10, 5, 5, 5,
+ BUS_MSTOP(8, BIT(2))),
+ DEF_MOD("sdhi_0_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 6, 5, 6,
+ BUS_MSTOP(8, BIT(2))),
+ DEF_MOD("sdhi_1_imclk", CLK_PLLCLN_DIV8, 10, 7, 5, 7,
+ BUS_MSTOP(8, BIT(3))),
+ DEF_MOD("sdhi_1_imclk2", CLK_PLLCLN_DIV8, 10, 8, 5, 8,
+ BUS_MSTOP(8, BIT(3))),
+ DEF_MOD("sdhi_1_clk_hs", CLK_PLLCLN_DIV2, 10, 9, 5, 9,
+ BUS_MSTOP(8, BIT(3))),
+ DEF_MOD("sdhi_1_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 10, 5, 10,
+ BUS_MSTOP(8, BIT(3))),
+ DEF_MOD("sdhi_2_imclk", CLK_PLLCLN_DIV8, 10, 11, 5, 11,
+ BUS_MSTOP(8, BIT(4))),
+ DEF_MOD("sdhi_2_imclk2", CLK_PLLCLN_DIV8, 10, 12, 5, 12,
+ BUS_MSTOP(8, BIT(4))),
+ DEF_MOD("sdhi_2_clk_hs", CLK_PLLCLN_DIV2, 10, 13, 5, 13,
+ BUS_MSTOP(8, BIT(4))),
+ DEF_MOD("sdhi_2_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 14, 5, 14,
+ BUS_MSTOP(8, BIT(4))),
+ DEF_MOD("usb3_0_aclk", CLK_PLLDTY_DIV8, 10, 15, 5, 15,
+ BUS_MSTOP(7, BIT(12))),
+ DEF_MOD("usb3_0_pclk_usbtst", CLK_PLLDTY_ACPU_DIV4, 11, 0, 5, 16,
+ BUS_MSTOP(7, BIT(14))),
+ DEF_MOD_MUX_EXTERNAL("gbeth_0_clk_tx_i", CLK_SMUX2_GBE0_TXCLK, 11, 8, 5, 24,
+ BUS_MSTOP(8, BIT(5)), 1),
+ DEF_MOD_MUX_EXTERNAL("gbeth_0_clk_rx_i", CLK_SMUX2_GBE0_RXCLK, 11, 9, 5, 25,
+ BUS_MSTOP(8, BIT(5)), 1),
+ DEF_MOD_MUX_EXTERNAL("gbeth_0_clk_tx_180_i", CLK_SMUX2_GBE0_TXCLK, 11, 10, 5, 26,
+ BUS_MSTOP(8, BIT(5)), 1),
+ DEF_MOD_MUX_EXTERNAL("gbeth_0_clk_rx_180_i", CLK_SMUX2_GBE0_RXCLK, 11, 11, 5, 27,
+ BUS_MSTOP(8, BIT(5)), 1),
+ DEF_MOD("gbeth_0_aclk_csr_i", CLK_PLLDTY_DIV8, 11, 12, 5, 28,
+ BUS_MSTOP(8, BIT(5))),
+ DEF_MOD("gbeth_0_aclk_i", CLK_PLLDTY_DIV8, 11, 13, 5, 29,
+ BUS_MSTOP(8, BIT(5))),
+ DEF_MOD_MUX_EXTERNAL("gbeth_1_clk_tx_i", CLK_SMUX2_GBE1_TXCLK, 11, 14, 5, 30,
+ BUS_MSTOP(8, BIT(6)), 1),
+ DEF_MOD_MUX_EXTERNAL("gbeth_1_clk_rx_i", CLK_SMUX2_GBE1_RXCLK, 11, 15, 5, 31,
+ BUS_MSTOP(8, BIT(6)), 1),
+ DEF_MOD_MUX_EXTERNAL("gbeth_1_clk_tx_180_i", CLK_SMUX2_GBE1_TXCLK, 12, 0, 6, 0,
+ BUS_MSTOP(8, BIT(6)), 1),
+ DEF_MOD_MUX_EXTERNAL("gbeth_1_clk_rx_180_i", CLK_SMUX2_GBE1_RXCLK, 12, 1, 6, 1,
+ BUS_MSTOP(8, BIT(6)), 1),
+ DEF_MOD("gbeth_1_aclk_csr_i", CLK_PLLDTY_DIV8, 12, 2, 6, 2,
+ BUS_MSTOP(8, BIT(6))),
+ DEF_MOD("gbeth_1_aclk_i", CLK_PLLDTY_DIV8, 12, 3, 6, 3,
+ BUS_MSTOP(8, BIT(6))),
+ DEF_MOD("cru_0_aclk", CLK_PLLDTY_ACPU_DIV2, 13, 2, 6, 18,
+ BUS_MSTOP(9, BIT(4))),
+ DEF_MOD_NO_PM("cru_0_vclk", CLK_PLLVDO_CRU0, 13, 3, 6, 19,
+ BUS_MSTOP(9, BIT(4))),
+ DEF_MOD("cru_0_pclk", CLK_PLLDTY_DIV16, 13, 4, 6, 20,
+ BUS_MSTOP(9, BIT(4))),
+ DEF_MOD("ge3d_clk", CLK_PLLVDO_GPU, 15, 0, 7, 16,
+ BUS_MSTOP(3, BIT(4))),
+ DEF_MOD("ge3d_axi_clk", CLK_PLLDTY_ACPU_DIV2, 15, 1, 7, 17,
+ BUS_MSTOP(3, BIT(4))),
+ DEF_MOD("ge3d_ace_clk", CLK_PLLDTY_ACPU_DIV2, 15, 2, 7, 18,
+ BUS_MSTOP(3, BIT(4))),
+ DEF_MOD("tsu_1_pclk", CLK_QEXTAL, 16, 10, 8, 10,
+ BUS_MSTOP(2, BIT(15))),
+};
+
+static const struct rzv2h_reset r9a09g047_resets[] __initconst = {
+ DEF_RST(3, 0, 1, 1), /* SYS_0_PRESETN */
+ DEF_RST(3, 1, 1, 2), /* DMAC_0_ARESETN */
+ DEF_RST(3, 2, 1, 3), /* DMAC_1_ARESETN */
+ DEF_RST(3, 3, 1, 4), /* DMAC_2_ARESETN */
+ DEF_RST(3, 4, 1, 5), /* DMAC_3_ARESETN */
+ DEF_RST(3, 5, 1, 6), /* DMAC_4_ARESETN */
+ DEF_RST(3, 6, 1, 7), /* ICU_0_PRESETN_I */
+ DEF_RST(3, 8, 1, 9), /* GIC_0_GICRESET_N */
+ DEF_RST(3, 9, 1, 10), /* GIC_0_DBG_GICRESET_N */
+ DEF_RST(5, 9, 2, 10), /* GPT_0_RST_P_REG */
+ DEF_RST(5, 10, 2, 11), /* GPT_0_RST_S_REG */
+ DEF_RST(5, 11, 2, 12), /* GPT_1_RST_P_REG */
+ DEF_RST(5, 12, 2, 13), /* GPT_1_RST_S_REG */
+ DEF_RST(7, 6, 3, 7), /* WDT_1_RESET */
+ DEF_RST(7, 7, 3, 8), /* WDT_2_RESET */
+ DEF_RST(7, 8, 3, 9), /* WDT_3_RESET */
+ DEF_RST(9, 5, 4, 6), /* SCIF_0_RST_SYSTEM_N */
+ DEF_RST(9, 6, 4, 7), /* I3C_0_PRESETN */
+ DEF_RST(9, 7, 4, 8), /* I3C_0_TRESETN */
+ DEF_RST(9, 8, 4, 9), /* RIIC_0_MRST */
+ DEF_RST(9, 9, 4, 10), /* RIIC_1_MRST */
+ DEF_RST(9, 10, 4, 11), /* RIIC_2_MRST */
+ DEF_RST(9, 11, 4, 12), /* RIIC_3_MRST */
+ DEF_RST(9, 12, 4, 13), /* RIIC_4_MRST */
+ DEF_RST(9, 13, 4, 14), /* RIIC_5_MRST */
+ DEF_RST(9, 14, 4, 15), /* RIIC_6_MRST */
+ DEF_RST(9, 15, 4, 16), /* RIIC_7_MRST */
+ DEF_RST(10, 0, 4, 17), /* RIIC_8_MRST */
+ DEF_RST(10, 1, 4, 18), /* CANFD_0_RSTP_N */
+ DEF_RST(10, 2, 4, 19), /* CANFD_0_RSTC_N */
+ DEF_RST(10, 3, 4, 20), /* SPI_HRESETN */
+ DEF_RST(10, 4, 4, 21), /* SPI_ARESETN */
+ DEF_RST(10, 7, 4, 24), /* SDHI_0_IXRST */
+ DEF_RST(10, 8, 4, 25), /* SDHI_1_IXRST */
+ DEF_RST(10, 9, 4, 26), /* SDHI_2_IXRST */
+ DEF_RST(10, 10, 4, 27), /* USB3_0_ARESETN */
+ DEF_RST(11, 0, 5, 1), /* GBETH_0_ARESETN_I */
+ DEF_RST(11, 1, 5, 2), /* GBETH_1_ARESETN_I */
+ DEF_RST(12, 5, 5, 22), /* CRU_0_PRESETN */
+ DEF_RST(12, 6, 5, 23), /* CRU_0_ARESETN */
+ DEF_RST(12, 7, 5, 24), /* CRU_0_S_RESETN */
+ DEF_RST(13, 13, 6, 14), /* GE3D_RESETN */
+ DEF_RST(13, 14, 6, 15), /* GE3D_AXI_RESETN */
+ DEF_RST(13, 15, 6, 16), /* GE3D_ACE_RESETN */
+ DEF_RST(15, 8, 7, 9), /* TSU_1_PRESETN */
+};
+
+const struct rzv2h_cpg_info r9a09g047_cpg_info __initconst = {
+ /* Core Clocks */
+ .core_clks = r9a09g047_core_clks,
+ .num_core_clks = ARRAY_SIZE(r9a09g047_core_clks),
+ .last_dt_core_clk = LAST_DT_CORE_CLK,
+ .num_total_core_clks = MOD_CLK_BASE,
+
+ /* Module Clocks */
+ .mod_clks = r9a09g047_mod_clks,
+ .num_mod_clks = ARRAY_SIZE(r9a09g047_mod_clks),
+ .num_hw_mod_clks = 28 * 16,
+
+ /* Resets */
+ .resets = r9a09g047_resets,
+ .num_resets = ARRAY_SIZE(r9a09g047_resets),
+
+ .num_mstop_bits = 208,
+};
diff --git a/drivers/clk/renesas/r9a09g056-cpg.c b/drivers/clk/renesas/r9a09g056-cpg.c
new file mode 100644
index 000000000000..55f056359dd7
--- /dev/null
+++ b/drivers/clk/renesas/r9a09g056-cpg.c
@@ -0,0 +1,360 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas RZ/V2N CPG driver
+ *
+ * Copyright (C) 2025 Renesas Electronics Corp.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+
+#include <dt-bindings/clock/renesas,r9a09g056-cpg.h>
+
+#include "rzv2h-cpg.h"
+
+enum clk_ids {
+ /* Core Clock Outputs exported to DT */
+ LAST_DT_CORE_CLK = R9A09G056_SPI_CLK_SPI,
+
+ /* External Input Clocks */
+ CLK_AUDIO_EXTAL,
+ CLK_RTXIN,
+ CLK_QEXTAL,
+
+ /* PLL Clocks */
+ CLK_PLLCM33,
+ CLK_PLLCLN,
+ CLK_PLLDTY,
+ CLK_PLLCA55,
+ CLK_PLLETH,
+ CLK_PLLGPU,
+
+ /* Internal Core Clocks */
+ CLK_PLLCM33_DIV3,
+ CLK_PLLCM33_DIV4,
+ CLK_PLLCM33_DIV5,
+ CLK_PLLCM33_DIV16,
+ CLK_PLLCM33_GEAR,
+ CLK_SMUX2_XSPI_CLK0,
+ CLK_SMUX2_XSPI_CLK1,
+ CLK_PLLCM33_XSPI,
+ CLK_PLLCLN_DIV2,
+ CLK_PLLCLN_DIV8,
+ CLK_PLLCLN_DIV16,
+ CLK_PLLDTY_ACPU,
+ CLK_PLLDTY_ACPU_DIV2,
+ CLK_PLLDTY_ACPU_DIV4,
+ CLK_PLLDTY_DIV8,
+ CLK_PLLETH_DIV_250_FIX,
+ CLK_PLLETH_DIV_125_FIX,
+ CLK_CSDIV_PLLETH_GBE0,
+ CLK_CSDIV_PLLETH_GBE1,
+ CLK_SMUX2_GBE0_TXCLK,
+ CLK_SMUX2_GBE0_RXCLK,
+ CLK_SMUX2_GBE1_TXCLK,
+ CLK_SMUX2_GBE1_RXCLK,
+ CLK_PLLGPU_GEAR,
+
+ /* Module Clocks */
+ MOD_CLK_BASE,
+};
+
+static const struct clk_div_table dtable_1_8[] = {
+ {0, 1},
+ {1, 2},
+ {2, 4},
+ {3, 8},
+ {0, 0},
+};
+
+static const struct clk_div_table dtable_2_16[] = {
+ {0, 2},
+ {1, 4},
+ {2, 8},
+ {3, 16},
+ {0, 0},
+};
+
+static const struct clk_div_table dtable_2_64[] = {
+ {0, 2},
+ {1, 4},
+ {2, 8},
+ {3, 16},
+ {4, 64},
+ {0, 0},
+};
+
+static const struct clk_div_table dtable_2_100[] = {
+ {0, 2},
+ {1, 10},
+ {2, 100},
+ {0, 0},
+};
+
+/* Mux clock tables */
+static const char * const smux2_gbe0_rxclk[] = { ".plleth_gbe0", "et0_rxclk" };
+static const char * const smux2_gbe0_txclk[] = { ".plleth_gbe0", "et0_txclk" };
+static const char * const smux2_gbe1_rxclk[] = { ".plleth_gbe1", "et1_rxclk" };
+static const char * const smux2_gbe1_txclk[] = { ".plleth_gbe1", "et1_txclk" };
+static const char * const smux2_xspi_clk0[] = { ".pllcm33_div3", ".pllcm33_div4" };
+static const char * const smux2_xspi_clk1[] = { ".smux2_xspi_clk0", ".pllcm33_div5" };
+
+static const struct cpg_core_clk r9a09g056_core_clks[] __initconst = {
+ /* External Clock Inputs */
+ DEF_INPUT("audio_extal", CLK_AUDIO_EXTAL),
+ DEF_INPUT("rtxin", CLK_RTXIN),
+ DEF_INPUT("qextal", CLK_QEXTAL),
+
+ /* PLL Clocks */
+ DEF_FIXED(".pllcm33", CLK_PLLCM33, CLK_QEXTAL, 200, 3),
+ DEF_FIXED(".pllcln", CLK_PLLCLN, CLK_QEXTAL, 200, 3),
+ DEF_FIXED(".plldty", CLK_PLLDTY, CLK_QEXTAL, 200, 3),
+ DEF_PLL(".pllca55", CLK_PLLCA55, CLK_QEXTAL, PLLCA55),
+ DEF_FIXED(".plleth", CLK_PLLETH, CLK_QEXTAL, 125, 3),
+ DEF_PLL(".pllgpu", CLK_PLLGPU, CLK_QEXTAL, PLLGPU),
+
+ /* Internal Core Clocks */
+ DEF_FIXED(".pllcm33_div3", CLK_PLLCM33_DIV3, CLK_PLLCM33, 1, 3),
+ DEF_FIXED(".pllcm33_div4", CLK_PLLCM33_DIV4, CLK_PLLCM33, 1, 4),
+ DEF_FIXED(".pllcm33_div5", CLK_PLLCM33_DIV5, CLK_PLLCM33, 1, 5),
+ DEF_FIXED(".pllcm33_div16", CLK_PLLCM33_DIV16, CLK_PLLCM33, 1, 16),
+ DEF_DDIV(".pllcm33_gear", CLK_PLLCM33_GEAR, CLK_PLLCM33_DIV4, CDDIV0_DIVCTL1, dtable_2_64),
+ DEF_SMUX(".smux2_xspi_clk0", CLK_SMUX2_XSPI_CLK0, SSEL1_SELCTL2, smux2_xspi_clk0),
+ DEF_SMUX(".smux2_xspi_clk1", CLK_SMUX2_XSPI_CLK1, SSEL1_SELCTL3, smux2_xspi_clk1),
+ DEF_CSDIV(".pllcm33_xspi", CLK_PLLCM33_XSPI, CLK_SMUX2_XSPI_CLK1, CSDIV0_DIVCTL3,
+ dtable_2_16),
+
+ DEF_FIXED(".pllcln_div2", CLK_PLLCLN_DIV2, CLK_PLLCLN, 1, 2),
+ DEF_FIXED(".pllcln_div8", CLK_PLLCLN_DIV8, CLK_PLLCLN, 1, 8),
+ DEF_FIXED(".pllcln_div16", CLK_PLLCLN_DIV16, CLK_PLLCLN, 1, 16),
+
+ DEF_DDIV(".plldty_acpu", CLK_PLLDTY_ACPU, CLK_PLLDTY, CDDIV0_DIVCTL2, dtable_2_64),
+ DEF_FIXED(".plldty_acpu_div2", CLK_PLLDTY_ACPU_DIV2, CLK_PLLDTY_ACPU, 1, 2),
+ DEF_FIXED(".plldty_acpu_div4", CLK_PLLDTY_ACPU_DIV4, CLK_PLLDTY_ACPU, 1, 4),
+ DEF_FIXED(".plldty_div8", CLK_PLLDTY_DIV8, CLK_PLLDTY, 1, 8),
+
+ DEF_FIXED(".plleth_250_fix", CLK_PLLETH_DIV_250_FIX, CLK_PLLETH, 1, 4),
+ DEF_FIXED(".plleth_125_fix", CLK_PLLETH_DIV_125_FIX, CLK_PLLETH_DIV_250_FIX, 1, 2),
+ DEF_CSDIV(".plleth_gbe0", CLK_CSDIV_PLLETH_GBE0,
+ CLK_PLLETH_DIV_250_FIX, CSDIV0_DIVCTL0, dtable_2_100),
+ DEF_CSDIV(".plleth_gbe1", CLK_CSDIV_PLLETH_GBE1,
+ CLK_PLLETH_DIV_250_FIX, CSDIV0_DIVCTL1, dtable_2_100),
+ DEF_SMUX(".smux2_gbe0_txclk", CLK_SMUX2_GBE0_TXCLK, SSEL0_SELCTL2, smux2_gbe0_txclk),
+ DEF_SMUX(".smux2_gbe0_rxclk", CLK_SMUX2_GBE0_RXCLK, SSEL0_SELCTL3, smux2_gbe0_rxclk),
+ DEF_SMUX(".smux2_gbe1_txclk", CLK_SMUX2_GBE1_TXCLK, SSEL1_SELCTL0, smux2_gbe1_txclk),
+ DEF_SMUX(".smux2_gbe1_rxclk", CLK_SMUX2_GBE1_RXCLK, SSEL1_SELCTL1, smux2_gbe1_rxclk),
+
+ DEF_DDIV(".pllgpu_gear", CLK_PLLGPU_GEAR, CLK_PLLGPU, CDDIV3_DIVCTL1, dtable_2_64),
+
+ /* Core Clocks */
+ DEF_FIXED("sys_0_pclk", R9A09G056_SYS_0_PCLK, CLK_QEXTAL, 1, 1),
+ DEF_DDIV("ca55_0_coreclk0", R9A09G056_CA55_0_CORE_CLK0, CLK_PLLCA55,
+ CDDIV1_DIVCTL0, dtable_1_8),
+ DEF_DDIV("ca55_0_coreclk1", R9A09G056_CA55_0_CORE_CLK1, CLK_PLLCA55,
+ CDDIV1_DIVCTL1, dtable_1_8),
+ DEF_DDIV("ca55_0_coreclk2", R9A09G056_CA55_0_CORE_CLK2, CLK_PLLCA55,
+ CDDIV1_DIVCTL2, dtable_1_8),
+ DEF_DDIV("ca55_0_coreclk3", R9A09G056_CA55_0_CORE_CLK3, CLK_PLLCA55,
+ CDDIV1_DIVCTL3, dtable_1_8),
+ DEF_FIXED("iotop_0_shclk", R9A09G056_IOTOP_0_SHCLK, CLK_PLLCM33_DIV16, 1, 1),
+ DEF_FIXED("usb2_0_clk_core0", R9A09G056_USB2_0_CLK_CORE0, CLK_QEXTAL, 1, 1),
+ DEF_FIXED("gbeth_0_clk_ptp_ref_i", R9A09G056_GBETH_0_CLK_PTP_REF_I,
+ CLK_PLLETH_DIV_125_FIX, 1, 1),
+ DEF_FIXED("gbeth_1_clk_ptp_ref_i", R9A09G056_GBETH_1_CLK_PTP_REF_I,
+ CLK_PLLETH_DIV_125_FIX, 1, 1),
+ DEF_FIXED_MOD_STATUS("spi_clk_spi", R9A09G056_SPI_CLK_SPI, CLK_PLLCM33_XSPI, 1, 2,
+ FIXED_MOD_CONF_XSPI),
+};
+
+static const struct rzv2h_mod_clk r9a09g056_mod_clks[] __initconst = {
+ DEF_MOD_CRITICAL("gic_0_gicclk", CLK_PLLDTY_ACPU_DIV4, 1, 3, 0, 19,
+ BUS_MSTOP(3, BIT(5))),
+ DEF_MOD("gtm_0_pclk", CLK_PLLCM33_DIV16, 4, 3, 2, 3,
+ BUS_MSTOP(5, BIT(10))),
+ DEF_MOD("gtm_1_pclk", CLK_PLLCM33_DIV16, 4, 4, 2, 4,
+ BUS_MSTOP(5, BIT(11))),
+ DEF_MOD("gtm_2_pclk", CLK_PLLCLN_DIV16, 4, 5, 2, 5,
+ BUS_MSTOP(2, BIT(13))),
+ DEF_MOD("gtm_3_pclk", CLK_PLLCLN_DIV16, 4, 6, 2, 6,
+ BUS_MSTOP(2, BIT(14))),
+ DEF_MOD("gtm_4_pclk", CLK_PLLCLN_DIV16, 4, 7, 2, 7,
+ BUS_MSTOP(11, BIT(13))),
+ DEF_MOD("gtm_5_pclk", CLK_PLLCLN_DIV16, 4, 8, 2, 8,
+ BUS_MSTOP(11, BIT(14))),
+ DEF_MOD("gtm_6_pclk", CLK_PLLCLN_DIV16, 4, 9, 2, 9,
+ BUS_MSTOP(11, BIT(15))),
+ DEF_MOD("gtm_7_pclk", CLK_PLLCLN_DIV16, 4, 10, 2, 10,
+ BUS_MSTOP(12, BIT(0))),
+ DEF_MOD("wdt_0_clkp", CLK_PLLCM33_DIV16, 4, 11, 2, 11,
+ BUS_MSTOP(3, BIT(10))),
+ DEF_MOD("wdt_0_clk_loco", CLK_QEXTAL, 4, 12, 2, 12,
+ BUS_MSTOP(3, BIT(10))),
+ DEF_MOD("wdt_1_clkp", CLK_PLLCLN_DIV16, 4, 13, 2, 13,
+ BUS_MSTOP(1, BIT(0))),
+ DEF_MOD("wdt_1_clk_loco", CLK_QEXTAL, 4, 14, 2, 14,
+ BUS_MSTOP(1, BIT(0))),
+ DEF_MOD("wdt_2_clkp", CLK_PLLCLN_DIV16, 4, 15, 2, 15,
+ BUS_MSTOP(5, BIT(12))),
+ DEF_MOD("wdt_2_clk_loco", CLK_QEXTAL, 5, 0, 2, 16,
+ BUS_MSTOP(5, BIT(12))),
+ DEF_MOD("wdt_3_clkp", CLK_PLLCLN_DIV16, 5, 1, 2, 17,
+ BUS_MSTOP(5, BIT(13))),
+ DEF_MOD("wdt_3_clk_loco", CLK_QEXTAL, 5, 2, 2, 18,
+ BUS_MSTOP(5, BIT(13))),
+ DEF_MOD("scif_0_clk_pck", CLK_PLLCM33_DIV16, 8, 15, 4, 15,
+ BUS_MSTOP(3, BIT(14))),
+ DEF_MOD("i3c_0_pclkrw", CLK_PLLCLN_DIV16, 9, 0, 4, 16,
+ BUS_MSTOP(10, BIT(15))),
+ DEF_MOD("i3c_0_pclk", CLK_PLLCLN_DIV16, 9, 1, 4, 17,
+ BUS_MSTOP(10, BIT(15))),
+ DEF_MOD("i3c_0_tclk", CLK_PLLCLN_DIV8, 9, 2, 4, 18,
+ BUS_MSTOP(10, BIT(15))),
+ DEF_MOD("riic_8_ckm", CLK_PLLCM33_DIV16, 9, 3, 4, 19,
+ BUS_MSTOP(3, BIT(13))),
+ DEF_MOD("riic_0_ckm", CLK_PLLCLN_DIV16, 9, 4, 4, 20,
+ BUS_MSTOP(1, BIT(1))),
+ DEF_MOD("riic_1_ckm", CLK_PLLCLN_DIV16, 9, 5, 4, 21,
+ BUS_MSTOP(1, BIT(2))),
+ DEF_MOD("riic_2_ckm", CLK_PLLCLN_DIV16, 9, 6, 4, 22,
+ BUS_MSTOP(1, BIT(3))),
+ DEF_MOD("riic_3_ckm", CLK_PLLCLN_DIV16, 9, 7, 4, 23,
+ BUS_MSTOP(1, BIT(4))),
+ DEF_MOD("riic_4_ckm", CLK_PLLCLN_DIV16, 9, 8, 4, 24,
+ BUS_MSTOP(1, BIT(5))),
+ DEF_MOD("riic_5_ckm", CLK_PLLCLN_DIV16, 9, 9, 4, 25,
+ BUS_MSTOP(1, BIT(6))),
+ DEF_MOD("riic_6_ckm", CLK_PLLCLN_DIV16, 9, 10, 4, 26,
+ BUS_MSTOP(1, BIT(7))),
+ DEF_MOD("riic_7_ckm", CLK_PLLCLN_DIV16, 9, 11, 4, 27,
+ BUS_MSTOP(1, BIT(8))),
+ DEF_MOD("spi_hclk", CLK_PLLCM33_GEAR, 9, 15, 4, 31,
+ BUS_MSTOP(4, BIT(5))),
+ DEF_MOD("spi_aclk", CLK_PLLCM33_GEAR, 10, 0, 5, 0,
+ BUS_MSTOP(4, BIT(5))),
+ DEF_MOD("spi_clk_spix2", CLK_PLLCM33_XSPI, 10, 1, 5, 2,
+ BUS_MSTOP(4, BIT(5))),
+ DEF_MOD("sdhi_0_imclk", CLK_PLLCLN_DIV8, 10, 3, 5, 3,
+ BUS_MSTOP(8, BIT(2))),
+ DEF_MOD("sdhi_0_imclk2", CLK_PLLCLN_DIV8, 10, 4, 5, 4,
+ BUS_MSTOP(8, BIT(2))),
+ DEF_MOD("sdhi_0_clk_hs", CLK_PLLCLN_DIV2, 10, 5, 5, 5,
+ BUS_MSTOP(8, BIT(2))),
+ DEF_MOD("sdhi_0_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 6, 5, 6,
+ BUS_MSTOP(8, BIT(2))),
+ DEF_MOD("sdhi_1_imclk", CLK_PLLCLN_DIV8, 10, 7, 5, 7,
+ BUS_MSTOP(8, BIT(3))),
+ DEF_MOD("sdhi_1_imclk2", CLK_PLLCLN_DIV8, 10, 8, 5, 8,
+ BUS_MSTOP(8, BIT(3))),
+ DEF_MOD("sdhi_1_clk_hs", CLK_PLLCLN_DIV2, 10, 9, 5, 9,
+ BUS_MSTOP(8, BIT(3))),
+ DEF_MOD("sdhi_1_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 10, 5, 10,
+ BUS_MSTOP(8, BIT(3))),
+ DEF_MOD("sdhi_2_imclk", CLK_PLLCLN_DIV8, 10, 11, 5, 11,
+ BUS_MSTOP(8, BIT(4))),
+ DEF_MOD("sdhi_2_imclk2", CLK_PLLCLN_DIV8, 10, 12, 5, 12,
+ BUS_MSTOP(8, BIT(4))),
+ DEF_MOD("sdhi_2_clk_hs", CLK_PLLCLN_DIV2, 10, 13, 5, 13,
+ BUS_MSTOP(8, BIT(4))),
+ DEF_MOD("sdhi_2_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 14, 5, 14,
+ BUS_MSTOP(8, BIT(4))),
+ DEF_MOD("usb2_0_u2h0_hclk", CLK_PLLDTY_DIV8, 11, 3, 5, 19,
+ BUS_MSTOP(7, BIT(7))),
+ DEF_MOD("usb2_0_u2p_exr_cpuclk", CLK_PLLDTY_ACPU_DIV4, 11, 5, 5, 21,
+ BUS_MSTOP(7, BIT(9))),
+ DEF_MOD("usb2_0_pclk_usbtst0", CLK_PLLDTY_ACPU_DIV4, 11, 6, 5, 22,
+ BUS_MSTOP(7, BIT(10))),
+ DEF_MOD_MUX_EXTERNAL("gbeth_0_clk_tx_i", CLK_SMUX2_GBE0_TXCLK, 11, 8, 5, 24,
+ BUS_MSTOP(8, BIT(5)), 1),
+ DEF_MOD_MUX_EXTERNAL("gbeth_0_clk_rx_i", CLK_SMUX2_GBE0_RXCLK, 11, 9, 5, 25,
+ BUS_MSTOP(8, BIT(5)), 1),
+ DEF_MOD_MUX_EXTERNAL("gbeth_0_clk_tx_180_i", CLK_SMUX2_GBE0_TXCLK, 11, 10, 5, 26,
+ BUS_MSTOP(8, BIT(5)), 1),
+ DEF_MOD_MUX_EXTERNAL("gbeth_0_clk_rx_180_i", CLK_SMUX2_GBE0_RXCLK, 11, 11, 5, 27,
+ BUS_MSTOP(8, BIT(5)), 1),
+ DEF_MOD("gbeth_0_aclk_csr_i", CLK_PLLDTY_DIV8, 11, 12, 5, 28,
+ BUS_MSTOP(8, BIT(5))),
+ DEF_MOD("gbeth_0_aclk_i", CLK_PLLDTY_DIV8, 11, 13, 5, 29,
+ BUS_MSTOP(8, BIT(5))),
+ DEF_MOD_MUX_EXTERNAL("gbeth_1_clk_tx_i", CLK_SMUX2_GBE1_TXCLK, 11, 14, 5, 30,
+ BUS_MSTOP(8, BIT(6)), 1),
+ DEF_MOD_MUX_EXTERNAL("gbeth_1_clk_rx_i", CLK_SMUX2_GBE1_RXCLK, 11, 15, 5, 31,
+ BUS_MSTOP(8, BIT(6)), 1),
+ DEF_MOD_MUX_EXTERNAL("gbeth_1_clk_tx_180_i", CLK_SMUX2_GBE1_TXCLK, 12, 0, 6, 0,
+ BUS_MSTOP(8, BIT(6)), 1),
+ DEF_MOD_MUX_EXTERNAL("gbeth_1_clk_rx_180_i", CLK_SMUX2_GBE1_RXCLK, 12, 1, 6, 1,
+ BUS_MSTOP(8, BIT(6)), 1),
+ DEF_MOD("gbeth_1_aclk_csr_i", CLK_PLLDTY_DIV8, 12, 2, 6, 2,
+ BUS_MSTOP(8, BIT(6))),
+ DEF_MOD("gbeth_1_aclk_i", CLK_PLLDTY_DIV8, 12, 3, 6, 3,
+ BUS_MSTOP(8, BIT(6))),
+ DEF_MOD("gpu_0_clk", CLK_PLLGPU_GEAR, 15, 0, 7, 16,
+ BUS_MSTOP(3, BIT(4))),
+ DEF_MOD("gpu_0_axi_clk", CLK_PLLDTY_ACPU_DIV2, 15, 1, 7, 17,
+ BUS_MSTOP(3, BIT(4))),
+ DEF_MOD("gpu_0_ace_clk", CLK_PLLDTY_ACPU_DIV2, 15, 2, 7, 18,
+ BUS_MSTOP(3, BIT(4))),
+};
+
+static const struct rzv2h_reset r9a09g056_resets[] __initconst = {
+ DEF_RST(3, 0, 1, 1), /* SYS_0_PRESETN */
+ DEF_RST(3, 8, 1, 9), /* GIC_0_GICRESET_N */
+ DEF_RST(3, 9, 1, 10), /* GIC_0_DBG_GICRESET_N */
+ DEF_RST(6, 13, 2, 30), /* GTM_0_PRESETZ */
+ DEF_RST(6, 14, 2, 31), /* GTM_1_PRESETZ */
+ DEF_RST(6, 15, 3, 0), /* GTM_2_PRESETZ */
+ DEF_RST(7, 0, 3, 1), /* GTM_3_PRESETZ */
+ DEF_RST(7, 1, 3, 2), /* GTM_4_PRESETZ */
+ DEF_RST(7, 2, 3, 3), /* GTM_5_PRESETZ */
+ DEF_RST(7, 3, 3, 4), /* GTM_6_PRESETZ */
+ DEF_RST(7, 4, 3, 5), /* GTM_7_PRESETZ */
+ DEF_RST(7, 5, 3, 6), /* WDT_0_RESET */
+ DEF_RST(7, 6, 3, 7), /* WDT_1_RESET */
+ DEF_RST(7, 7, 3, 8), /* WDT_2_RESET */
+ DEF_RST(7, 8, 3, 9), /* WDT_3_RESET */
+ DEF_RST(9, 5, 4, 6), /* SCIF_0_RST_SYSTEM_N */
+ DEF_RST(9, 6, 4, 7), /* I3C_0_PRESETN */
+ DEF_RST(9, 7, 4, 8), /* I3C_0_TRESETN */
+ DEF_RST(9, 8, 4, 9), /* RIIC_0_MRST */
+ DEF_RST(9, 9, 4, 10), /* RIIC_1_MRST */
+ DEF_RST(9, 10, 4, 11), /* RIIC_2_MRST */
+ DEF_RST(9, 11, 4, 12), /* RIIC_3_MRST */
+ DEF_RST(9, 12, 4, 13), /* RIIC_4_MRST */
+ DEF_RST(9, 13, 4, 14), /* RIIC_5_MRST */
+ DEF_RST(9, 14, 4, 15), /* RIIC_6_MRST */
+ DEF_RST(9, 15, 4, 16), /* RIIC_7_MRST */
+ DEF_RST(10, 0, 4, 17), /* RIIC_8_MRST */
+ DEF_RST(10, 3, 4, 20), /* SPI_HRESETN */
+ DEF_RST(10, 4, 4, 21), /* SPI_ARESETN */
+ DEF_RST(10, 7, 4, 24), /* SDHI_0_IXRST */
+ DEF_RST(10, 8, 4, 25), /* SDHI_1_IXRST */
+ DEF_RST(10, 9, 4, 26), /* SDHI_2_IXRST */
+ DEF_RST(10, 12, 4, 29), /* USB2_0_U2H0_HRESETN */
+ DEF_RST(10, 14, 4, 31), /* USB2_0_U2P_EXL_SYSRST */
+ DEF_RST(10, 15, 5, 0), /* USB2_0_PRESETN */
+ DEF_RST(11, 0, 5, 1), /* GBETH_0_ARESETN_I */
+ DEF_RST(11, 1, 5, 2), /* GBETH_1_ARESETN_I */
+ DEF_RST(13, 13, 6, 14), /* GPU_0_RESETN */
+ DEF_RST(13, 14, 6, 15), /* GPU_0_AXI_RESETN */
+ DEF_RST(13, 15, 6, 16), /* GPU_0_ACE_RESETN */
+};
+
+const struct rzv2h_cpg_info r9a09g056_cpg_info __initconst = {
+ /* Core Clocks */
+ .core_clks = r9a09g056_core_clks,
+ .num_core_clks = ARRAY_SIZE(r9a09g056_core_clks),
+ .last_dt_core_clk = LAST_DT_CORE_CLK,
+ .num_total_core_clks = MOD_CLK_BASE,
+
+ /* Module Clocks */
+ .mod_clks = r9a09g056_mod_clks,
+ .num_mod_clks = ARRAY_SIZE(r9a09g056_mod_clks),
+ .num_hw_mod_clks = 25 * 16,
+
+ /* Resets */
+ .resets = r9a09g056_resets,
+ .num_resets = ARRAY_SIZE(r9a09g056_resets),
+
+ .num_mstop_bits = 192,
+};
diff --git a/drivers/clk/renesas/r9a09g057-cpg.c b/drivers/clk/renesas/r9a09g057-cpg.c
index 3ee32db5c0af..6389c4b6a523 100644
--- a/drivers/clk/renesas/r9a09g057-cpg.c
+++ b/drivers/clk/renesas/r9a09g057-cpg.c
@@ -16,7 +16,7 @@
enum clk_ids {
/* Core Clock Outputs exported to DT */
- LAST_DT_CORE_CLK = R9A09G057_IOTOP_0_SHCLK,
+ LAST_DT_CORE_CLK = R9A09G057_SPI_CLK_SPI,
/* External Input Clocks */
CLK_AUDIO_EXTAL,
@@ -28,19 +28,69 @@ enum clk_ids {
CLK_PLLCLN,
CLK_PLLDTY,
CLK_PLLCA55,
+ CLK_PLLVDO,
+ CLK_PLLETH,
+ CLK_PLLGPU,
/* Internal Core Clocks */
+ CLK_PLLCM33_DIV3,
+ CLK_PLLCM33_DIV4,
+ CLK_PLLCM33_DIV5,
CLK_PLLCM33_DIV16,
+ CLK_PLLCM33_GEAR,
+ CLK_SMUX2_XSPI_CLK0,
+ CLK_SMUX2_XSPI_CLK1,
+ CLK_PLLCM33_XSPI,
CLK_PLLCLN_DIV2,
CLK_PLLCLN_DIV8,
CLK_PLLCLN_DIV16,
CLK_PLLDTY_ACPU,
+ CLK_PLLDTY_ACPU_DIV2,
CLK_PLLDTY_ACPU_DIV4,
+ CLK_PLLDTY_DIV8,
+ CLK_PLLDTY_DIV16,
+ CLK_PLLDTY_RCPU,
+ CLK_PLLDTY_RCPU_DIV4,
+ CLK_PLLVDO_CRU0,
+ CLK_PLLVDO_CRU1,
+ CLK_PLLVDO_CRU2,
+ CLK_PLLVDO_CRU3,
+ CLK_PLLETH_DIV_250_FIX,
+ CLK_PLLETH_DIV_125_FIX,
+ CLK_CSDIV_PLLETH_GBE0,
+ CLK_CSDIV_PLLETH_GBE1,
+ CLK_SMUX2_GBE0_TXCLK,
+ CLK_SMUX2_GBE0_RXCLK,
+ CLK_SMUX2_GBE1_TXCLK,
+ CLK_SMUX2_GBE1_RXCLK,
+ CLK_PLLGPU_GEAR,
/* Module Clocks */
MOD_CLK_BASE,
};
+static const struct clk_div_table dtable_1_8[] = {
+ {0, 1},
+ {1, 2},
+ {2, 4},
+ {3, 8},
+ {0, 0},
+};
+
+static const struct clk_div_table dtable_2_4[] = {
+ {0, 2},
+ {1, 4},
+ {0, 0},
+};
+
+static const struct clk_div_table dtable_2_16[] = {
+ {0, 2},
+ {1, 4},
+ {2, 8},
+ {3, 16},
+ {0, 0},
+};
+
static const struct clk_div_table dtable_2_64[] = {
{0, 2},
{1, 4},
@@ -50,6 +100,21 @@ static const struct clk_div_table dtable_2_64[] = {
{0, 0},
};
+static const struct clk_div_table dtable_2_100[] = {
+ {0, 2},
+ {1, 10},
+ {2, 100},
+ {0, 0},
+};
+
+/* Mux clock tables */
+static const char * const smux2_gbe0_rxclk[] = { ".plleth_gbe0", "et0_rxclk" };
+static const char * const smux2_gbe0_txclk[] = { ".plleth_gbe0", "et0_txclk" };
+static const char * const smux2_gbe1_rxclk[] = { ".plleth_gbe1", "et1_rxclk" };
+static const char * const smux2_gbe1_txclk[] = { ".plleth_gbe1", "et1_txclk" };
+static const char * const smux2_xspi_clk0[] = { ".pllcm33_div3", ".pllcm33_div4" };
+static const char * const smux2_xspi_clk1[] = { ".smux2_xspi_clk0", ".pllcm33_div5" };
+
static const struct cpg_core_clk r9a09g057_core_clks[] __initconst = {
/* External Clock Inputs */
DEF_INPUT("audio_extal", CLK_AUDIO_EXTAL),
@@ -60,65 +125,270 @@ static const struct cpg_core_clk r9a09g057_core_clks[] __initconst = {
DEF_FIXED(".pllcm33", CLK_PLLCM33, CLK_QEXTAL, 200, 3),
DEF_FIXED(".pllcln", CLK_PLLCLN, CLK_QEXTAL, 200, 3),
DEF_FIXED(".plldty", CLK_PLLDTY, CLK_QEXTAL, 200, 3),
- DEF_PLL(".pllca55", CLK_PLLCA55, CLK_QEXTAL, PLL_CONF(0x64)),
+ DEF_PLL(".pllca55", CLK_PLLCA55, CLK_QEXTAL, PLLCA55),
+ DEF_FIXED(".pllvdo", CLK_PLLVDO, CLK_QEXTAL, 105, 2),
+ DEF_FIXED(".plleth", CLK_PLLETH, CLK_QEXTAL, 125, 3),
+ DEF_PLL(".pllgpu", CLK_PLLGPU, CLK_QEXTAL, PLLGPU),
/* Internal Core Clocks */
+ DEF_FIXED(".pllcm33_div3", CLK_PLLCM33_DIV3, CLK_PLLCM33, 1, 3),
+ DEF_FIXED(".pllcm33_div4", CLK_PLLCM33_DIV4, CLK_PLLCM33, 1, 4),
+ DEF_FIXED(".pllcm33_div5", CLK_PLLCM33_DIV5, CLK_PLLCM33, 1, 5),
DEF_FIXED(".pllcm33_div16", CLK_PLLCM33_DIV16, CLK_PLLCM33, 1, 16),
+ DEF_DDIV(".pllcm33_gear", CLK_PLLCM33_GEAR, CLK_PLLCM33_DIV4, CDDIV0_DIVCTL1, dtable_2_64),
+ DEF_SMUX(".smux2_xspi_clk0", CLK_SMUX2_XSPI_CLK0, SSEL1_SELCTL2, smux2_xspi_clk0),
+ DEF_SMUX(".smux2_xspi_clk1", CLK_SMUX2_XSPI_CLK1, SSEL1_SELCTL3, smux2_xspi_clk1),
+ DEF_CSDIV(".pllcm33_xspi", CLK_PLLCM33_XSPI, CLK_SMUX2_XSPI_CLK1, CSDIV0_DIVCTL3,
+ dtable_2_16),
DEF_FIXED(".pllcln_div2", CLK_PLLCLN_DIV2, CLK_PLLCLN, 1, 2),
DEF_FIXED(".pllcln_div8", CLK_PLLCLN_DIV8, CLK_PLLCLN, 1, 8),
DEF_FIXED(".pllcln_div16", CLK_PLLCLN_DIV16, CLK_PLLCLN, 1, 16),
DEF_DDIV(".plldty_acpu", CLK_PLLDTY_ACPU, CLK_PLLDTY, CDDIV0_DIVCTL2, dtable_2_64),
+ DEF_FIXED(".plldty_acpu_div2", CLK_PLLDTY_ACPU_DIV2, CLK_PLLDTY_ACPU, 1, 2),
DEF_FIXED(".plldty_acpu_div4", CLK_PLLDTY_ACPU_DIV4, CLK_PLLDTY_ACPU, 1, 4),
+ DEF_FIXED(".plldty_div8", CLK_PLLDTY_DIV8, CLK_PLLDTY, 1, 8),
+ DEF_FIXED(".plldty_div16", CLK_PLLDTY_DIV16, CLK_PLLDTY, 1, 16),
+ DEF_DDIV(".plldty_rcpu", CLK_PLLDTY_RCPU, CLK_PLLDTY, CDDIV3_DIVCTL2, dtable_2_64),
+ DEF_FIXED(".plldty_rcpu_div4", CLK_PLLDTY_RCPU_DIV4, CLK_PLLDTY_RCPU, 1, 4),
+
+ DEF_DDIV(".pllvdo_cru0", CLK_PLLVDO_CRU0, CLK_PLLVDO, CDDIV3_DIVCTL3, dtable_2_4),
+ DEF_DDIV(".pllvdo_cru1", CLK_PLLVDO_CRU1, CLK_PLLVDO, CDDIV4_DIVCTL0, dtable_2_4),
+ DEF_DDIV(".pllvdo_cru2", CLK_PLLVDO_CRU2, CLK_PLLVDO, CDDIV4_DIVCTL1, dtable_2_4),
+ DEF_DDIV(".pllvdo_cru3", CLK_PLLVDO_CRU3, CLK_PLLVDO, CDDIV4_DIVCTL2, dtable_2_4),
+
+ DEF_FIXED(".plleth_250_fix", CLK_PLLETH_DIV_250_FIX, CLK_PLLETH, 1, 4),
+ DEF_FIXED(".plleth_125_fix", CLK_PLLETH_DIV_125_FIX, CLK_PLLETH_DIV_250_FIX, 1, 2),
+ DEF_CSDIV(".plleth_gbe0", CLK_CSDIV_PLLETH_GBE0,
+ CLK_PLLETH_DIV_250_FIX, CSDIV0_DIVCTL0, dtable_2_100),
+ DEF_CSDIV(".plleth_gbe1", CLK_CSDIV_PLLETH_GBE1,
+ CLK_PLLETH_DIV_250_FIX, CSDIV0_DIVCTL1, dtable_2_100),
+ DEF_SMUX(".smux2_gbe0_txclk", CLK_SMUX2_GBE0_TXCLK, SSEL0_SELCTL2, smux2_gbe0_txclk),
+ DEF_SMUX(".smux2_gbe0_rxclk", CLK_SMUX2_GBE0_RXCLK, SSEL0_SELCTL3, smux2_gbe0_rxclk),
+ DEF_SMUX(".smux2_gbe1_txclk", CLK_SMUX2_GBE1_TXCLK, SSEL1_SELCTL0, smux2_gbe1_txclk),
+ DEF_SMUX(".smux2_gbe1_rxclk", CLK_SMUX2_GBE1_RXCLK, SSEL1_SELCTL1, smux2_gbe1_rxclk),
+
+ DEF_DDIV(".pllgpu_gear", CLK_PLLGPU_GEAR, CLK_PLLGPU, CDDIV3_DIVCTL1, dtable_2_64),
/* Core Clocks */
DEF_FIXED("sys_0_pclk", R9A09G057_SYS_0_PCLK, CLK_QEXTAL, 1, 1),
+ DEF_DDIV("ca55_0_coreclk0", R9A09G057_CA55_0_CORE_CLK0, CLK_PLLCA55,
+ CDDIV1_DIVCTL0, dtable_1_8),
+ DEF_DDIV("ca55_0_coreclk1", R9A09G057_CA55_0_CORE_CLK1, CLK_PLLCA55,
+ CDDIV1_DIVCTL1, dtable_1_8),
+ DEF_DDIV("ca55_0_coreclk2", R9A09G057_CA55_0_CORE_CLK2, CLK_PLLCA55,
+ CDDIV1_DIVCTL2, dtable_1_8),
+ DEF_DDIV("ca55_0_coreclk3", R9A09G057_CA55_0_CORE_CLK3, CLK_PLLCA55,
+ CDDIV1_DIVCTL3, dtable_1_8),
DEF_FIXED("iotop_0_shclk", R9A09G057_IOTOP_0_SHCLK, CLK_PLLCM33_DIV16, 1, 1),
+ DEF_FIXED("usb2_0_clk_core0", R9A09G057_USB2_0_CLK_CORE0, CLK_QEXTAL, 1, 1),
+ DEF_FIXED("usb2_0_clk_core1", R9A09G057_USB2_0_CLK_CORE1, CLK_QEXTAL, 1, 1),
+ DEF_FIXED("gbeth_0_clk_ptp_ref_i", R9A09G057_GBETH_0_CLK_PTP_REF_I,
+ CLK_PLLETH_DIV_125_FIX, 1, 1),
+ DEF_FIXED("gbeth_1_clk_ptp_ref_i", R9A09G057_GBETH_1_CLK_PTP_REF_I,
+ CLK_PLLETH_DIV_125_FIX, 1, 1),
+ DEF_FIXED_MOD_STATUS("spi_clk_spi", R9A09G057_SPI_CLK_SPI, CLK_PLLCM33_XSPI, 1, 2,
+ FIXED_MOD_CONF_XSPI),
};
static const struct rzv2h_mod_clk r9a09g057_mod_clks[] __initconst = {
- DEF_MOD("gtm_0_pclk", CLK_PLLCM33_DIV16, 4, 3, 2, 3),
- DEF_MOD("gtm_1_pclk", CLK_PLLCM33_DIV16, 4, 4, 2, 4),
- DEF_MOD("gtm_2_pclk", CLK_PLLCLN_DIV16, 4, 5, 2, 5),
- DEF_MOD("gtm_3_pclk", CLK_PLLCLN_DIV16, 4, 6, 2, 6),
- DEF_MOD("gtm_4_pclk", CLK_PLLCLN_DIV16, 4, 7, 2, 7),
- DEF_MOD("gtm_5_pclk", CLK_PLLCLN_DIV16, 4, 8, 2, 8),
- DEF_MOD("gtm_6_pclk", CLK_PLLCLN_DIV16, 4, 9, 2, 9),
- DEF_MOD("gtm_7_pclk", CLK_PLLCLN_DIV16, 4, 10, 2, 10),
- DEF_MOD("wdt_0_clkp", CLK_PLLCM33_DIV16, 4, 11, 2, 11),
- DEF_MOD("wdt_0_clk_loco", CLK_QEXTAL, 4, 12, 2, 12),
- DEF_MOD("wdt_1_clkp", CLK_PLLCLN_DIV16, 4, 13, 2, 13),
- DEF_MOD("wdt_1_clk_loco", CLK_QEXTAL, 4, 14, 2, 14),
- DEF_MOD("wdt_2_clkp", CLK_PLLCLN_DIV16, 4, 15, 2, 15),
- DEF_MOD("wdt_2_clk_loco", CLK_QEXTAL, 5, 0, 2, 16),
- DEF_MOD("wdt_3_clkp", CLK_PLLCLN_DIV16, 5, 1, 2, 17),
- DEF_MOD("wdt_3_clk_loco", CLK_QEXTAL, 5, 2, 2, 18),
- DEF_MOD("scif_0_clk_pck", CLK_PLLCM33_DIV16, 8, 15, 4, 15),
- DEF_MOD("riic_8_ckm", CLK_PLLCM33_DIV16, 9, 3, 4, 19),
- DEF_MOD("riic_0_ckm", CLK_PLLCLN_DIV16, 9, 4, 4, 20),
- DEF_MOD("riic_1_ckm", CLK_PLLCLN_DIV16, 9, 5, 4, 21),
- DEF_MOD("riic_2_ckm", CLK_PLLCLN_DIV16, 9, 6, 4, 22),
- DEF_MOD("riic_3_ckm", CLK_PLLCLN_DIV16, 9, 7, 4, 23),
- DEF_MOD("riic_4_ckm", CLK_PLLCLN_DIV16, 9, 8, 4, 24),
- DEF_MOD("riic_5_ckm", CLK_PLLCLN_DIV16, 9, 9, 4, 25),
- DEF_MOD("riic_6_ckm", CLK_PLLCLN_DIV16, 9, 10, 4, 26),
- DEF_MOD("riic_7_ckm", CLK_PLLCLN_DIV16, 9, 11, 4, 27),
- DEF_MOD("sdhi_0_imclk", CLK_PLLCLN_DIV8, 10, 3, 5, 3),
- DEF_MOD("sdhi_0_imclk2", CLK_PLLCLN_DIV8, 10, 4, 5, 4),
- DEF_MOD("sdhi_0_clk_hs", CLK_PLLCLN_DIV2, 10, 5, 5, 5),
- DEF_MOD("sdhi_0_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 6, 5, 6),
- DEF_MOD("sdhi_1_imclk", CLK_PLLCLN_DIV8, 10, 7, 5, 7),
- DEF_MOD("sdhi_1_imclk2", CLK_PLLCLN_DIV8, 10, 8, 5, 8),
- DEF_MOD("sdhi_1_clk_hs", CLK_PLLCLN_DIV2, 10, 9, 5, 9),
- DEF_MOD("sdhi_1_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 10, 5, 10),
- DEF_MOD("sdhi_2_imclk", CLK_PLLCLN_DIV8, 10, 11, 5, 11),
- DEF_MOD("sdhi_2_imclk2", CLK_PLLCLN_DIV8, 10, 12, 5, 12),
- DEF_MOD("sdhi_2_clk_hs", CLK_PLLCLN_DIV2, 10, 13, 5, 13),
- DEF_MOD("sdhi_2_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 14, 5, 14),
+ DEF_MOD("dmac_0_aclk", CLK_PLLCM33_GEAR, 0, 0, 0, 0,
+ BUS_MSTOP(5, BIT(9))),
+ DEF_MOD("dmac_1_aclk", CLK_PLLDTY_ACPU_DIV2, 0, 1, 0, 1,
+ BUS_MSTOP(3, BIT(2))),
+ DEF_MOD("dmac_2_aclk", CLK_PLLDTY_ACPU_DIV2, 0, 2, 0, 2,
+ BUS_MSTOP(3, BIT(3))),
+ DEF_MOD("dmac_3_aclk", CLK_PLLDTY_RCPU_DIV4, 0, 3, 0, 3,
+ BUS_MSTOP(10, BIT(11))),
+ DEF_MOD("dmac_4_aclk", CLK_PLLDTY_RCPU_DIV4, 0, 4, 0, 4,
+ BUS_MSTOP(10, BIT(12))),
+ DEF_MOD_CRITICAL("icu_0_pclk_i", CLK_PLLCM33_DIV16, 0, 5, 0, 5,
+ BUS_MSTOP_NONE),
+ DEF_MOD_CRITICAL("gic_0_gicclk", CLK_PLLDTY_ACPU_DIV4, 1, 3, 0, 19,
+ BUS_MSTOP(3, BIT(5))),
+ DEF_MOD("gtm_0_pclk", CLK_PLLCM33_DIV16, 4, 3, 2, 3,
+ BUS_MSTOP(5, BIT(10))),
+ DEF_MOD("gtm_1_pclk", CLK_PLLCM33_DIV16, 4, 4, 2, 4,
+ BUS_MSTOP(5, BIT(11))),
+ DEF_MOD("gtm_2_pclk", CLK_PLLCLN_DIV16, 4, 5, 2, 5,
+ BUS_MSTOP(2, BIT(13))),
+ DEF_MOD("gtm_3_pclk", CLK_PLLCLN_DIV16, 4, 6, 2, 6,
+ BUS_MSTOP(2, BIT(14))),
+ DEF_MOD("gtm_4_pclk", CLK_PLLCLN_DIV16, 4, 7, 2, 7,
+ BUS_MSTOP(11, BIT(13))),
+ DEF_MOD("gtm_5_pclk", CLK_PLLCLN_DIV16, 4, 8, 2, 8,
+ BUS_MSTOP(11, BIT(14))),
+ DEF_MOD("gtm_6_pclk", CLK_PLLCLN_DIV16, 4, 9, 2, 9,
+ BUS_MSTOP(11, BIT(15))),
+ DEF_MOD("gtm_7_pclk", CLK_PLLCLN_DIV16, 4, 10, 2, 10,
+ BUS_MSTOP(12, BIT(0))),
+ DEF_MOD("wdt_0_clkp", CLK_PLLCM33_DIV16, 4, 11, 2, 11,
+ BUS_MSTOP(3, BIT(10))),
+ DEF_MOD("wdt_0_clk_loco", CLK_QEXTAL, 4, 12, 2, 12,
+ BUS_MSTOP(3, BIT(10))),
+ DEF_MOD("wdt_1_clkp", CLK_PLLCLN_DIV16, 4, 13, 2, 13,
+ BUS_MSTOP(1, BIT(0))),
+ DEF_MOD("wdt_1_clk_loco", CLK_QEXTAL, 4, 14, 2, 14,
+ BUS_MSTOP(1, BIT(0))),
+ DEF_MOD("wdt_2_clkp", CLK_PLLCLN_DIV16, 4, 15, 2, 15,
+ BUS_MSTOP(5, BIT(12))),
+ DEF_MOD("wdt_2_clk_loco", CLK_QEXTAL, 5, 0, 2, 16,
+ BUS_MSTOP(5, BIT(12))),
+ DEF_MOD("wdt_3_clkp", CLK_PLLCLN_DIV16, 5, 1, 2, 17,
+ BUS_MSTOP(5, BIT(13))),
+ DEF_MOD("wdt_3_clk_loco", CLK_QEXTAL, 5, 2, 2, 18,
+ BUS_MSTOP(5, BIT(13))),
+ DEF_MOD("rspi_0_pclk", CLK_PLLCLN_DIV8, 5, 4, 2, 20,
+ BUS_MSTOP(11, BIT(0))),
+ DEF_MOD("rspi_0_pclk_sfr", CLK_PLLCLN_DIV8, 5, 5, 2, 21,
+ BUS_MSTOP(11, BIT(0))),
+ DEF_MOD("rspi_0_tclk", CLK_PLLCLN_DIV8, 5, 6, 2, 22,
+ BUS_MSTOP(11, BIT(0))),
+ DEF_MOD("rspi_1_pclk", CLK_PLLCLN_DIV8, 5, 7, 2, 23,
+ BUS_MSTOP(11, BIT(1))),
+ DEF_MOD("rspi_1_pclk_sfr", CLK_PLLCLN_DIV8, 5, 8, 2, 24,
+ BUS_MSTOP(11, BIT(1))),
+ DEF_MOD("rspi_1_tclk", CLK_PLLCLN_DIV8, 5, 9, 2, 25,
+ BUS_MSTOP(11, BIT(1))),
+ DEF_MOD("rspi_2_pclk", CLK_PLLCLN_DIV8, 5, 10, 2, 26,
+ BUS_MSTOP(11, BIT(2))),
+ DEF_MOD("rspi_2_pclk_sfr", CLK_PLLCLN_DIV8, 5, 11, 2, 27,
+ BUS_MSTOP(11, BIT(2))),
+ DEF_MOD("rspi_2_tclk", CLK_PLLCLN_DIV8, 5, 12, 2, 28,
+ BUS_MSTOP(11, BIT(2))),
+ DEF_MOD("scif_0_clk_pck", CLK_PLLCM33_DIV16, 8, 15, 4, 15,
+ BUS_MSTOP(3, BIT(14))),
+ DEF_MOD("i3c_0_pclkrw", CLK_PLLCLN_DIV16, 9, 0, 4, 16,
+ BUS_MSTOP(10, BIT(15))),
+ DEF_MOD("i3c_0_pclk", CLK_PLLCLN_DIV16, 9, 1, 4, 17,
+ BUS_MSTOP(10, BIT(15))),
+ DEF_MOD("i3c_0_tclk", CLK_PLLCLN_DIV8, 9, 2, 4, 18,
+ BUS_MSTOP(10, BIT(15))),
+ DEF_MOD("riic_8_ckm", CLK_PLLCM33_DIV16, 9, 3, 4, 19,
+ BUS_MSTOP(3, BIT(13))),
+ DEF_MOD("riic_0_ckm", CLK_PLLCLN_DIV16, 9, 4, 4, 20,
+ BUS_MSTOP(1, BIT(1))),
+ DEF_MOD("riic_1_ckm", CLK_PLLCLN_DIV16, 9, 5, 4, 21,
+ BUS_MSTOP(1, BIT(2))),
+ DEF_MOD("riic_2_ckm", CLK_PLLCLN_DIV16, 9, 6, 4, 22,
+ BUS_MSTOP(1, BIT(3))),
+ DEF_MOD("riic_3_ckm", CLK_PLLCLN_DIV16, 9, 7, 4, 23,
+ BUS_MSTOP(1, BIT(4))),
+ DEF_MOD("riic_4_ckm", CLK_PLLCLN_DIV16, 9, 8, 4, 24,
+ BUS_MSTOP(1, BIT(5))),
+ DEF_MOD("riic_5_ckm", CLK_PLLCLN_DIV16, 9, 9, 4, 25,
+ BUS_MSTOP(1, BIT(6))),
+ DEF_MOD("riic_6_ckm", CLK_PLLCLN_DIV16, 9, 10, 4, 26,
+ BUS_MSTOP(1, BIT(7))),
+ DEF_MOD("riic_7_ckm", CLK_PLLCLN_DIV16, 9, 11, 4, 27,
+ BUS_MSTOP(1, BIT(8))),
+ DEF_MOD("spi_hclk", CLK_PLLCM33_GEAR, 9, 15, 4, 31,
+ BUS_MSTOP(4, BIT(5))),
+ DEF_MOD("spi_aclk", CLK_PLLCM33_GEAR, 10, 0, 5, 0,
+ BUS_MSTOP(4, BIT(5))),
+ DEF_MOD("spi_clk_spix2", CLK_PLLCM33_XSPI, 10, 1, 5, 2,
+ BUS_MSTOP(4, BIT(5))),
+ DEF_MOD("sdhi_0_imclk", CLK_PLLCLN_DIV8, 10, 3, 5, 3,
+ BUS_MSTOP(8, BIT(2))),
+ DEF_MOD("sdhi_0_imclk2", CLK_PLLCLN_DIV8, 10, 4, 5, 4,
+ BUS_MSTOP(8, BIT(2))),
+ DEF_MOD("sdhi_0_clk_hs", CLK_PLLCLN_DIV2, 10, 5, 5, 5,
+ BUS_MSTOP(8, BIT(2))),
+ DEF_MOD("sdhi_0_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 6, 5, 6,
+ BUS_MSTOP(8, BIT(2))),
+ DEF_MOD("sdhi_1_imclk", CLK_PLLCLN_DIV8, 10, 7, 5, 7,
+ BUS_MSTOP(8, BIT(3))),
+ DEF_MOD("sdhi_1_imclk2", CLK_PLLCLN_DIV8, 10, 8, 5, 8,
+ BUS_MSTOP(8, BIT(3))),
+ DEF_MOD("sdhi_1_clk_hs", CLK_PLLCLN_DIV2, 10, 9, 5, 9,
+ BUS_MSTOP(8, BIT(3))),
+ DEF_MOD("sdhi_1_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 10, 5, 10,
+ BUS_MSTOP(8, BIT(3))),
+ DEF_MOD("sdhi_2_imclk", CLK_PLLCLN_DIV8, 10, 11, 5, 11,
+ BUS_MSTOP(8, BIT(4))),
+ DEF_MOD("sdhi_2_imclk2", CLK_PLLCLN_DIV8, 10, 12, 5, 12,
+ BUS_MSTOP(8, BIT(4))),
+ DEF_MOD("sdhi_2_clk_hs", CLK_PLLCLN_DIV2, 10, 13, 5, 13,
+ BUS_MSTOP(8, BIT(4))),
+ DEF_MOD("sdhi_2_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 14, 5, 14,
+ BUS_MSTOP(8, BIT(4))),
+ DEF_MOD("usb2_0_u2h0_hclk", CLK_PLLDTY_DIV8, 11, 3, 5, 19,
+ BUS_MSTOP(7, BIT(7))),
+ DEF_MOD("usb2_0_u2h1_hclk", CLK_PLLDTY_DIV8, 11, 4, 5, 20,
+ BUS_MSTOP(7, BIT(8))),
+ DEF_MOD("usb2_0_u2p_exr_cpuclk", CLK_PLLDTY_ACPU_DIV4, 11, 5, 5, 21,
+ BUS_MSTOP(7, BIT(9))),
+ DEF_MOD("usb2_0_pclk_usbtst0", CLK_PLLDTY_ACPU_DIV4, 11, 6, 5, 22,
+ BUS_MSTOP(7, BIT(10))),
+ DEF_MOD("usb2_0_pclk_usbtst1", CLK_PLLDTY_ACPU_DIV4, 11, 7, 5, 23,
+ BUS_MSTOP(7, BIT(11))),
+ DEF_MOD_MUX_EXTERNAL("gbeth_0_clk_tx_i", CLK_SMUX2_GBE0_TXCLK, 11, 8, 5, 24,
+ BUS_MSTOP(8, BIT(5)), 1),
+ DEF_MOD_MUX_EXTERNAL("gbeth_0_clk_rx_i", CLK_SMUX2_GBE0_RXCLK, 11, 9, 5, 25,
+ BUS_MSTOP(8, BIT(5)), 1),
+ DEF_MOD_MUX_EXTERNAL("gbeth_0_clk_tx_180_i", CLK_SMUX2_GBE0_TXCLK, 11, 10, 5, 26,
+ BUS_MSTOP(8, BIT(5)), 1),
+ DEF_MOD_MUX_EXTERNAL("gbeth_0_clk_rx_180_i", CLK_SMUX2_GBE0_RXCLK, 11, 11, 5, 27,
+ BUS_MSTOP(8, BIT(5)), 1),
+ DEF_MOD("gbeth_0_aclk_csr_i", CLK_PLLDTY_DIV8, 11, 12, 5, 28,
+ BUS_MSTOP(8, BIT(5))),
+ DEF_MOD("gbeth_0_aclk_i", CLK_PLLDTY_DIV8, 11, 13, 5, 29,
+ BUS_MSTOP(8, BIT(5))),
+ DEF_MOD_MUX_EXTERNAL("gbeth_1_clk_tx_i", CLK_SMUX2_GBE1_TXCLK, 11, 14, 5, 30,
+ BUS_MSTOP(8, BIT(6)), 1),
+ DEF_MOD_MUX_EXTERNAL("gbeth_1_clk_rx_i", CLK_SMUX2_GBE1_RXCLK, 11, 15, 5, 31,
+ BUS_MSTOP(8, BIT(6)), 1),
+ DEF_MOD_MUX_EXTERNAL("gbeth_1_clk_tx_180_i", CLK_SMUX2_GBE1_TXCLK, 12, 0, 6, 0,
+ BUS_MSTOP(8, BIT(6)), 1),
+ DEF_MOD_MUX_EXTERNAL("gbeth_1_clk_rx_180_i", CLK_SMUX2_GBE1_RXCLK, 12, 1, 6, 1,
+ BUS_MSTOP(8, BIT(6)), 1),
+ DEF_MOD("gbeth_1_aclk_csr_i", CLK_PLLDTY_DIV8, 12, 2, 6, 2,
+ BUS_MSTOP(8, BIT(6))),
+ DEF_MOD("gbeth_1_aclk_i", CLK_PLLDTY_DIV8, 12, 3, 6, 3,
+ BUS_MSTOP(8, BIT(6))),
+ DEF_MOD("cru_0_aclk", CLK_PLLDTY_ACPU_DIV2, 13, 2, 6, 18,
+ BUS_MSTOP(9, BIT(4))),
+ DEF_MOD_NO_PM("cru_0_vclk", CLK_PLLVDO_CRU0, 13, 3, 6, 19,
+ BUS_MSTOP(9, BIT(4))),
+ DEF_MOD("cru_0_pclk", CLK_PLLDTY_DIV16, 13, 4, 6, 20,
+ BUS_MSTOP(9, BIT(4))),
+ DEF_MOD("cru_1_aclk", CLK_PLLDTY_ACPU_DIV2, 13, 5, 6, 21,
+ BUS_MSTOP(9, BIT(5))),
+ DEF_MOD_NO_PM("cru_1_vclk", CLK_PLLVDO_CRU1, 13, 6, 6, 22,
+ BUS_MSTOP(9, BIT(5))),
+ DEF_MOD("cru_1_pclk", CLK_PLLDTY_DIV16, 13, 7, 6, 23,
+ BUS_MSTOP(9, BIT(5))),
+ DEF_MOD("cru_2_aclk", CLK_PLLDTY_ACPU_DIV2, 13, 8, 6, 24,
+ BUS_MSTOP(9, BIT(6))),
+ DEF_MOD_NO_PM("cru_2_vclk", CLK_PLLVDO_CRU2, 13, 9, 6, 25,
+ BUS_MSTOP(9, BIT(6))),
+ DEF_MOD("cru_2_pclk", CLK_PLLDTY_DIV16, 13, 10, 6, 26,
+ BUS_MSTOP(9, BIT(6))),
+ DEF_MOD("cru_3_aclk", CLK_PLLDTY_ACPU_DIV2, 13, 11, 6, 27,
+ BUS_MSTOP(9, BIT(7))),
+ DEF_MOD_NO_PM("cru_3_vclk", CLK_PLLVDO_CRU3, 13, 12, 6, 28,
+ BUS_MSTOP(9, BIT(7))),
+ DEF_MOD("cru_3_pclk", CLK_PLLDTY_DIV16, 13, 13, 6, 29,
+ BUS_MSTOP(9, BIT(7))),
+ DEF_MOD("gpu_0_clk", CLK_PLLGPU_GEAR, 15, 0, 7, 16,
+ BUS_MSTOP(3, BIT(4))),
+ DEF_MOD("gpu_0_axi_clk", CLK_PLLDTY_ACPU_DIV2, 15, 1, 7, 17,
+ BUS_MSTOP(3, BIT(4))),
+ DEF_MOD("gpu_0_ace_clk", CLK_PLLDTY_ACPU_DIV2, 15, 2, 7, 18,
+ BUS_MSTOP(3, BIT(4))),
};
static const struct rzv2h_reset r9a09g057_resets[] __initconst = {
+ DEF_RST(3, 0, 1, 1), /* SYS_0_PRESETN */
+ DEF_RST(3, 1, 1, 2), /* DMAC_0_ARESETN */
+ DEF_RST(3, 2, 1, 3), /* DMAC_1_ARESETN */
+ DEF_RST(3, 3, 1, 4), /* DMAC_2_ARESETN */
+ DEF_RST(3, 4, 1, 5), /* DMAC_3_ARESETN */
+ DEF_RST(3, 5, 1, 6), /* DMAC_4_ARESETN */
+ DEF_RST(3, 6, 1, 7), /* ICU_0_PRESETN_I */
+ DEF_RST(3, 8, 1, 9), /* GIC_0_GICRESET_N */
+ DEF_RST(3, 9, 1, 10), /* GIC_0_DBG_GICRESET_N */
DEF_RST(6, 13, 2, 30), /* GTM_0_PRESETZ */
DEF_RST(6, 14, 2, 31), /* GTM_1_PRESETZ */
DEF_RST(6, 15, 3, 0), /* GTM_2_PRESETZ */
@@ -131,7 +401,15 @@ static const struct rzv2h_reset r9a09g057_resets[] __initconst = {
DEF_RST(7, 6, 3, 7), /* WDT_1_RESET */
DEF_RST(7, 7, 3, 8), /* WDT_2_RESET */
DEF_RST(7, 8, 3, 9), /* WDT_3_RESET */
+ DEF_RST(7, 11, 3, 12), /* RSPI_0_PRESETN */
+ DEF_RST(7, 12, 3, 13), /* RSPI_0_TRESETN */
+ DEF_RST(7, 13, 3, 14), /* RSPI_1_PRESETN */
+ DEF_RST(7, 14, 3, 15), /* RSPI_1_TRESETN */
+ DEF_RST(7, 15, 3, 16), /* RSPI_2_PRESETN */
+ DEF_RST(8, 0, 3, 17), /* RSPI_2_TRESETN */
DEF_RST(9, 5, 4, 6), /* SCIF_0_RST_SYSTEM_N */
+ DEF_RST(9, 6, 4, 7), /* I3C_0_PRESETN */
+ DEF_RST(9, 7, 4, 8), /* I3C_0_TRESETN */
DEF_RST(9, 8, 4, 9), /* RIIC_0_MRST */
DEF_RST(9, 9, 4, 10), /* RIIC_1_MRST */
DEF_RST(9, 10, 4, 11), /* RIIC_2_MRST */
@@ -141,9 +419,32 @@ static const struct rzv2h_reset r9a09g057_resets[] __initconst = {
DEF_RST(9, 14, 4, 15), /* RIIC_6_MRST */
DEF_RST(9, 15, 4, 16), /* RIIC_7_MRST */
DEF_RST(10, 0, 4, 17), /* RIIC_8_MRST */
+ DEF_RST(10, 3, 4, 20), /* SPI_HRESETN */
+ DEF_RST(10, 4, 4, 21), /* SPI_ARESETN */
DEF_RST(10, 7, 4, 24), /* SDHI_0_IXRST */
DEF_RST(10, 8, 4, 25), /* SDHI_1_IXRST */
DEF_RST(10, 9, 4, 26), /* SDHI_2_IXRST */
+ DEF_RST(10, 12, 4, 29), /* USB2_0_U2H0_HRESETN */
+ DEF_RST(10, 13, 4, 30), /* USB2_0_U2H1_HRESETN */
+ DEF_RST(10, 14, 4, 31), /* USB2_0_U2P_EXL_SYSRST */
+ DEF_RST(10, 15, 5, 0), /* USB2_0_PRESETN */
+ DEF_RST(11, 0, 5, 1), /* GBETH_0_ARESETN_I */
+ DEF_RST(11, 1, 5, 2), /* GBETH_1_ARESETN_I */
+ DEF_RST(12, 5, 5, 22), /* CRU_0_PRESETN */
+ DEF_RST(12, 6, 5, 23), /* CRU_0_ARESETN */
+ DEF_RST(12, 7, 5, 24), /* CRU_0_S_RESETN */
+ DEF_RST(12, 8, 5, 25), /* CRU_1_PRESETN */
+ DEF_RST(12, 9, 5, 26), /* CRU_1_ARESETN */
+ DEF_RST(12, 10, 5, 27), /* CRU_1_S_RESETN */
+ DEF_RST(12, 11, 5, 28), /* CRU_2_PRESETN */
+ DEF_RST(12, 12, 5, 29), /* CRU_2_ARESETN */
+ DEF_RST(12, 13, 5, 30), /* CRU_2_S_RESETN */
+ DEF_RST(12, 14, 5, 31), /* CRU_3_PRESETN */
+ DEF_RST(12, 15, 6, 0), /* CRU_3_ARESETN */
+ DEF_RST(13, 0, 6, 1), /* CRU_3_S_RESETN */
+ DEF_RST(13, 13, 6, 14), /* GPU_0_RESETN */
+ DEF_RST(13, 14, 6, 15), /* GPU_0_AXI_RESETN */
+ DEF_RST(13, 15, 6, 16), /* GPU_0_ACE_RESETN */
};
const struct rzv2h_cpg_info r9a09g057_cpg_info __initconst = {
@@ -161,4 +462,6 @@ const struct rzv2h_cpg_info r9a09g057_cpg_info __initconst = {
/* Resets */
.resets = r9a09g057_resets,
.num_resets = ARRAY_SIZE(r9a09g057_resets),
+
+ .num_mstop_bits = 192,
};
diff --git a/drivers/clk/renesas/r9a09g077-cpg.c b/drivers/clk/renesas/r9a09g077-cpg.c
new file mode 100644
index 000000000000..af3ef6d58c87
--- /dev/null
+++ b/drivers/clk/renesas/r9a09g077-cpg.c
@@ -0,0 +1,295 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * r9a09g077 Clock Pulse Generator / Module Standby and Software Reset
+ *
+ * Copyright (C) 2025 Renesas Electronics Corp.
+ *
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+
+#include <dt-bindings/clock/renesas,r9a09g077-cpg-mssr.h>
+#include <dt-bindings/clock/renesas,r9a09g087-cpg-mssr.h>
+#include "renesas-cpg-mssr.h"
+
+#define RZT2H_REG_BLOCK_SHIFT 11
+#define RZT2H_REG_OFFSET_MASK GENMASK(10, 0)
+#define RZT2H_REG_CONF(block, offset) (((block) << RZT2H_REG_BLOCK_SHIFT) | \
+ ((offset) & RZT2H_REG_OFFSET_MASK))
+
+#define RZT2H_REG_BLOCK(x) ((x) >> RZT2H_REG_BLOCK_SHIFT)
+#define RZT2H_REG_OFFSET(x) ((x) & RZT2H_REG_OFFSET_MASK)
+
+#define SCKCR RZT2H_REG_CONF(0, 0x00)
+#define SCKCR2 RZT2H_REG_CONF(1, 0x04)
+#define SCKCR3 RZT2H_REG_CONF(0, 0x08)
+
+#define OFFSET_MASK GENMASK(31, 20)
+#define SHIFT_MASK GENMASK(19, 12)
+#define WIDTH_MASK GENMASK(11, 8)
+
+#define CONF_PACK(offset, shift, width) \
+ (FIELD_PREP_CONST(OFFSET_MASK, (offset)) | \
+ FIELD_PREP_CONST(SHIFT_MASK, (shift)) | \
+ FIELD_PREP_CONST(WIDTH_MASK, (width)))
+
+#define GET_SHIFT(val) FIELD_GET(SHIFT_MASK, val)
+#define GET_WIDTH(val) FIELD_GET(WIDTH_MASK, val)
+#define GET_REG_OFFSET(val) FIELD_GET(OFFSET_MASK, val)
+
+#define DIVCA55C0 CONF_PACK(SCKCR2, 8, 1)
+#define DIVCA55C1 CONF_PACK(SCKCR2, 9, 1)
+#define DIVCA55C2 CONF_PACK(SCKCR2, 10, 1)
+#define DIVCA55C3 CONF_PACK(SCKCR2, 11, 1)
+#define DIVCA55S CONF_PACK(SCKCR2, 12, 1)
+#define DIVSCI5ASYNC CONF_PACK(SCKCR2, 18, 2)
+
+#define DIVSCI0ASYNC CONF_PACK(SCKCR3, 6, 2)
+#define DIVSCI1ASYNC CONF_PACK(SCKCR3, 8, 2)
+#define DIVSCI2ASYNC CONF_PACK(SCKCR3, 10, 2)
+#define DIVSCI3ASYNC CONF_PACK(SCKCR3, 12, 2)
+#define DIVSCI4ASYNC CONF_PACK(SCKCR3, 14, 2)
+
+#define SEL_PLL CONF_PACK(SCKCR, 22, 1)
+
+
+enum rzt2h_clk_types {
+ CLK_TYPE_RZT2H_DIV = CLK_TYPE_CUSTOM, /* Clock with divider */
+ CLK_TYPE_RZT2H_MUX, /* Clock with clock source selector */
+};
+
+#define DEF_DIV(_name, _id, _parent, _conf, _dtable) \
+ DEF_TYPE(_name, _id, CLK_TYPE_RZT2H_DIV, .conf = _conf, \
+ .parent = _parent, .dtable = _dtable, .flag = 0)
+#define DEF_MUX(_name, _id, _conf, _parent_names, _num_parents, _mux_flags) \
+ DEF_TYPE(_name, _id, CLK_TYPE_RZT2H_MUX, .conf = _conf, \
+ .parent_names = _parent_names, .num_parents = _num_parents, \
+ .flag = 0, .mux_flags = _mux_flags)
+
+enum clk_ids {
+ /* Core Clock Outputs exported to DT */
+ LAST_DT_CORE_CLK = R9A09G077_ETCLKE,
+
+ /* External Input Clocks */
+ CLK_EXTAL,
+
+ /* Internal Core Clocks */
+ CLK_LOCO,
+ CLK_PLL0,
+ CLK_PLL1,
+ CLK_PLL2,
+ CLK_PLL4,
+ CLK_SEL_CLK_PLL0,
+ CLK_SEL_CLK_PLL1,
+ CLK_SEL_CLK_PLL2,
+ CLK_SEL_CLK_PLL4,
+ CLK_PLL4D1,
+ CLK_SCI0ASYNC,
+ CLK_SCI1ASYNC,
+ CLK_SCI2ASYNC,
+ CLK_SCI3ASYNC,
+ CLK_SCI4ASYNC,
+ CLK_SCI5ASYNC,
+
+ /* Module Clocks */
+ MOD_CLK_BASE,
+};
+
+static const struct clk_div_table dtable_1_2[] = {
+ {0, 2},
+ {1, 1},
+ {0, 0},
+};
+
+static const struct clk_div_table dtable_24_25_30_32[] = {
+ {0, 32},
+ {1, 30},
+ {2, 25},
+ {3, 24},
+ {0, 0},
+};
+
+/* Mux clock tables */
+
+static const char * const sel_clk_pll0[] = { ".loco", ".pll0" };
+static const char * const sel_clk_pll1[] = { ".loco", ".pll1" };
+static const char * const sel_clk_pll2[] = { ".loco", ".pll2" };
+static const char * const sel_clk_pll4[] = { ".loco", ".pll4" };
+
+static const struct cpg_core_clk r9a09g077_core_clks[] __initconst = {
+ /* External Clock Inputs */
+ DEF_INPUT("extal", CLK_EXTAL),
+
+ /* Internal Core Clocks */
+ DEF_RATE(".loco", CLK_LOCO, 1000 * 1000),
+ DEF_FIXED(".pll0", CLK_PLL0, CLK_EXTAL, 1, 48),
+ DEF_FIXED(".pll1", CLK_PLL1, CLK_EXTAL, 1, 40),
+ DEF_FIXED(".pll2", CLK_PLL2, CLK_EXTAL, 1, 32),
+ DEF_FIXED(".pll4", CLK_PLL4, CLK_EXTAL, 1, 96),
+
+ DEF_MUX(".sel_clk_pll0", CLK_SEL_CLK_PLL0, SEL_PLL,
+ sel_clk_pll0, ARRAY_SIZE(sel_clk_pll0), CLK_MUX_READ_ONLY),
+ DEF_MUX(".sel_clk_pll1", CLK_SEL_CLK_PLL1, SEL_PLL,
+ sel_clk_pll1, ARRAY_SIZE(sel_clk_pll1), CLK_MUX_READ_ONLY),
+ DEF_MUX(".sel_clk_pll2", CLK_SEL_CLK_PLL2, SEL_PLL,
+ sel_clk_pll2, ARRAY_SIZE(sel_clk_pll2), CLK_MUX_READ_ONLY),
+ DEF_MUX(".sel_clk_pll4", CLK_SEL_CLK_PLL4, SEL_PLL,
+ sel_clk_pll4, ARRAY_SIZE(sel_clk_pll4), CLK_MUX_READ_ONLY),
+
+ DEF_FIXED(".pll4d1", CLK_PLL4D1, CLK_SEL_CLK_PLL4, 1, 1),
+ DEF_DIV(".sci0async", CLK_SCI0ASYNC, CLK_PLL4D1, DIVSCI0ASYNC,
+ dtable_24_25_30_32),
+ DEF_DIV(".sci1async", CLK_SCI1ASYNC, CLK_PLL4D1, DIVSCI1ASYNC,
+ dtable_24_25_30_32),
+ DEF_DIV(".sci2async", CLK_SCI2ASYNC, CLK_PLL4D1, DIVSCI2ASYNC,
+ dtable_24_25_30_32),
+ DEF_DIV(".sci3async", CLK_SCI3ASYNC, CLK_PLL4D1, DIVSCI3ASYNC,
+ dtable_24_25_30_32),
+ DEF_DIV(".sci4async", CLK_SCI4ASYNC, CLK_PLL4D1, DIVSCI4ASYNC,
+ dtable_24_25_30_32),
+ DEF_DIV(".sci5async", CLK_SCI5ASYNC, CLK_PLL4D1, DIVSCI5ASYNC,
+ dtable_24_25_30_32),
+
+ /* Core output clk */
+ DEF_DIV("CA55C0", R9A09G077_CLK_CA55C0, CLK_SEL_CLK_PLL0, DIVCA55C0,
+ dtable_1_2),
+ DEF_DIV("CA55C1", R9A09G077_CLK_CA55C1, CLK_SEL_CLK_PLL0, DIVCA55C1,
+ dtable_1_2),
+ DEF_DIV("CA55C2", R9A09G077_CLK_CA55C2, CLK_SEL_CLK_PLL0, DIVCA55C2,
+ dtable_1_2),
+ DEF_DIV("CA55C3", R9A09G077_CLK_CA55C3, CLK_SEL_CLK_PLL0, DIVCA55C3,
+ dtable_1_2),
+ DEF_DIV("CA55S", R9A09G077_CLK_CA55S, CLK_SEL_CLK_PLL0, DIVCA55S,
+ dtable_1_2),
+ DEF_FIXED("PCLKGPTL", R9A09G077_CLK_PCLKGPTL, CLK_SEL_CLK_PLL1, 2, 1),
+ DEF_FIXED("PCLKH", R9A09G077_CLK_PCLKH, CLK_SEL_CLK_PLL1, 4, 1),
+ DEF_FIXED("PCLKM", R9A09G077_CLK_PCLKM, CLK_SEL_CLK_PLL1, 8, 1),
+ DEF_FIXED("PCLKL", R9A09G077_CLK_PCLKL, CLK_SEL_CLK_PLL1, 16, 1),
+ DEF_FIXED("PCLKAH", R9A09G077_CLK_PCLKAH, CLK_PLL4D1, 6, 1),
+ DEF_FIXED("PCLKAM", R9A09G077_CLK_PCLKAM, CLK_PLL4D1, 12, 1),
+ DEF_FIXED("SDHI_CLKHS", R9A09G077_SDHI_CLKHS, CLK_SEL_CLK_PLL2, 1, 1),
+ DEF_FIXED("USB_CLK", R9A09G077_USB_CLK, CLK_PLL4D1, 48, 1),
+ DEF_FIXED("ETCLKA", R9A09G077_ETCLKA, CLK_SEL_CLK_PLL1, 5, 1),
+ DEF_FIXED("ETCLKB", R9A09G077_ETCLKB, CLK_SEL_CLK_PLL1, 8, 1),
+ DEF_FIXED("ETCLKC", R9A09G077_ETCLKC, CLK_SEL_CLK_PLL1, 10, 1),
+ DEF_FIXED("ETCLKD", R9A09G077_ETCLKD, CLK_SEL_CLK_PLL1, 20, 1),
+ DEF_FIXED("ETCLKE", R9A09G077_ETCLKE, CLK_SEL_CLK_PLL1, 40, 1),
+};
+
+static const struct mssr_mod_clk r9a09g077_mod_clks[] __initconst = {
+ DEF_MOD("sci0fck", 8, CLK_SCI0ASYNC),
+ DEF_MOD("sci1fck", 9, CLK_SCI1ASYNC),
+ DEF_MOD("sci2fck", 10, CLK_SCI2ASYNC),
+ DEF_MOD("sci3fck", 11, CLK_SCI3ASYNC),
+ DEF_MOD("sci4fck", 12, CLK_SCI4ASYNC),
+ DEF_MOD("iic0", 100, R9A09G077_CLK_PCLKL),
+ DEF_MOD("iic1", 101, R9A09G077_CLK_PCLKL),
+ DEF_MOD("gmac0", 400, R9A09G077_CLK_PCLKM),
+ DEF_MOD("ethsw", 401, R9A09G077_CLK_PCLKM),
+ DEF_MOD("ethss", 403, R9A09G077_CLK_PCLKM),
+ DEF_MOD("usb", 408, R9A09G077_CLK_PCLKAM),
+ DEF_MOD("gmac1", 416, R9A09G077_CLK_PCLKAM),
+ DEF_MOD("gmac2", 417, R9A09G077_CLK_PCLKAM),
+ DEF_MOD("sci5fck", 600, CLK_SCI5ASYNC),
+ DEF_MOD("iic2", 601, R9A09G077_CLK_PCLKL),
+ DEF_MOD("sdhi0", 1212, R9A09G077_CLK_PCLKAM),
+ DEF_MOD("sdhi1", 1213, R9A09G077_CLK_PCLKAM),
+};
+
+static struct clk * __init
+r9a09g077_cpg_div_clk_register(struct device *dev,
+ const struct cpg_core_clk *core,
+ void __iomem *addr, struct cpg_mssr_pub *pub)
+{
+ const struct clk *parent;
+ const char *parent_name;
+ struct clk_hw *clk_hw;
+
+ parent = pub->clks[core->parent];
+ if (IS_ERR(parent))
+ return ERR_CAST(parent);
+
+ parent_name = __clk_get_name(parent);
+
+ if (core->dtable)
+ clk_hw = clk_hw_register_divider_table(dev, core->name,
+ parent_name, 0,
+ addr,
+ GET_SHIFT(core->conf),
+ GET_WIDTH(core->conf),
+ core->flag,
+ core->dtable,
+ &pub->rmw_lock);
+ else
+ clk_hw = clk_hw_register_divider(dev, core->name,
+ parent_name, 0,
+ addr,
+ GET_SHIFT(core->conf),
+ GET_WIDTH(core->conf),
+ core->flag, &pub->rmw_lock);
+
+ if (IS_ERR(clk_hw))
+ return ERR_CAST(clk_hw);
+
+ return clk_hw->clk;
+
+}
+
+static struct clk * __init
+r9a09g077_cpg_mux_clk_register(struct device *dev,
+ const struct cpg_core_clk *core,
+ void __iomem *addr, struct cpg_mssr_pub *pub)
+{
+ struct clk_hw *clk_hw;
+
+ clk_hw = devm_clk_hw_register_mux(dev, core->name,
+ core->parent_names, core->num_parents,
+ core->flag,
+ addr,
+ GET_SHIFT(core->conf),
+ GET_WIDTH(core->conf),
+ core->mux_flags, &pub->rmw_lock);
+ if (IS_ERR(clk_hw))
+ return ERR_CAST(clk_hw);
+
+ return clk_hw->clk;
+}
+
+static struct clk * __init
+r9a09g077_cpg_clk_register(struct device *dev, const struct cpg_core_clk *core,
+ const struct cpg_mssr_info *info,
+ struct cpg_mssr_pub *pub)
+{
+ u32 offset = GET_REG_OFFSET(core->conf);
+ void __iomem *base = RZT2H_REG_BLOCK(offset) ? pub->base1 : pub->base0;
+ void __iomem *addr = base + RZT2H_REG_OFFSET(offset);
+
+ switch (core->type) {
+ case CLK_TYPE_RZT2H_DIV:
+ return r9a09g077_cpg_div_clk_register(dev, core, addr, pub);
+ case CLK_TYPE_RZT2H_MUX:
+ return r9a09g077_cpg_mux_clk_register(dev, core, addr, pub);
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+}
+
+const struct cpg_mssr_info r9a09g077_cpg_mssr_info = {
+ /* Core Clocks */
+ .core_clks = r9a09g077_core_clks,
+ .num_core_clks = ARRAY_SIZE(r9a09g077_core_clks),
+ .last_dt_core_clk = LAST_DT_CORE_CLK,
+ .num_total_core_clks = MOD_CLK_BASE,
+
+ /* Module Clocks */
+ .mod_clks = r9a09g077_mod_clks,
+ .num_mod_clks = ARRAY_SIZE(r9a09g077_mod_clks),
+ .num_hw_mod_clks = 14 * 32,
+
+ .reg_layout = CLK_REG_LAYOUT_RZ_T2H,
+ .cpg_clk_register = r9a09g077_cpg_clk_register,
+};
diff --git a/drivers/clk/renesas/rcar-cpg-lib.c b/drivers/clk/renesas/rcar-cpg-lib.c
index 42b126ea3e13..a45f8e7e9ab6 100644
--- a/drivers/clk/renesas/rcar-cpg-lib.c
+++ b/drivers/clk/renesas/rcar-cpg-lib.c
@@ -206,4 +206,3 @@ struct clk * __init cpg_rpcd2_clk_register(const char *name,
return clk;
}
-
diff --git a/drivers/clk/renesas/rcar-gen2-cpg.c b/drivers/clk/renesas/rcar-gen2-cpg.c
index 4c3764972bad..ab34bb8c3e07 100644
--- a/drivers/clk/renesas/rcar-gen2-cpg.c
+++ b/drivers/clk/renesas/rcar-gen2-cpg.c
@@ -274,10 +274,11 @@ static const struct soc_device_attribute cpg_quirks_match[] __initconst = {
struct clk * __init rcar_gen2_cpg_clk_register(struct device *dev,
const struct cpg_core_clk *core, const struct cpg_mssr_info *info,
- struct clk **clks, void __iomem *base,
- struct raw_notifier_head *notifiers)
+ struct cpg_mssr_pub *pub)
{
const struct clk_div_table *table = NULL;
+ void __iomem *base = pub->base0;
+ struct clk **clks = pub->clks;
const struct clk *parent;
const char *parent_name;
unsigned int mult = 1;
diff --git a/drivers/clk/renesas/rcar-gen2-cpg.h b/drivers/clk/renesas/rcar-gen2-cpg.h
index bdcd4a38d48d..3d4b127fdeaf 100644
--- a/drivers/clk/renesas/rcar-gen2-cpg.h
+++ b/drivers/clk/renesas/rcar-gen2-cpg.h
@@ -32,8 +32,7 @@ struct rcar_gen2_cpg_pll_config {
struct clk *rcar_gen2_cpg_clk_register(struct device *dev,
const struct cpg_core_clk *core, const struct cpg_mssr_info *info,
- struct clk **clks, void __iomem *base,
- struct raw_notifier_head *notifiers);
+ struct cpg_mssr_pub *pub);
int rcar_gen2_cpg_init(const struct rcar_gen2_cpg_pll_config *config,
unsigned int pll0_div, u32 mode);
diff --git a/drivers/clk/renesas/rcar-gen3-cpg.c b/drivers/clk/renesas/rcar-gen3-cpg.c
index 20b89eb6c35c..10ae20489df9 100644
--- a/drivers/clk/renesas/rcar-gen3-cpg.c
+++ b/drivers/clk/renesas/rcar-gen3-cpg.c
@@ -335,7 +335,6 @@ static u32 cpg_quirks __initdata;
#define RCKCR_CKSEL BIT(1) /* Manual RCLK parent selection */
-
static const struct soc_device_attribute cpg_quirks_match[] __initconst = {
{
.soc_id = "r8a7796", .revision = "ES1.0",
@@ -346,9 +345,11 @@ static const struct soc_device_attribute cpg_quirks_match[] __initconst = {
struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev,
const struct cpg_core_clk *core, const struct cpg_mssr_info *info,
- struct clk **clks, void __iomem *base,
- struct raw_notifier_head *notifiers)
+ struct cpg_mssr_pub *pub)
{
+ struct raw_notifier_head *notifiers = &pub->notifiers;
+ void __iomem *base = pub->base0;
+ struct clk **clks = pub->clks;
const struct clk *parent;
unsigned int mult = 1;
unsigned int div = 1;
diff --git a/drivers/clk/renesas/rcar-gen3-cpg.h b/drivers/clk/renesas/rcar-gen3-cpg.h
index bfdc649bdf12..d15a5d1df71c 100644
--- a/drivers/clk/renesas/rcar-gen3-cpg.h
+++ b/drivers/clk/renesas/rcar-gen3-cpg.h
@@ -81,8 +81,7 @@ struct rcar_gen3_cpg_pll_config {
struct clk *rcar_gen3_cpg_clk_register(struct device *dev,
const struct cpg_core_clk *core, const struct cpg_mssr_info *info,
- struct clk **clks, void __iomem *base,
- struct raw_notifier_head *notifiers);
+ struct cpg_mssr_pub *pub);
int rcar_gen3_cpg_init(const struct rcar_gen3_cpg_pll_config *config,
unsigned int clk_extalr, u32 mode);
diff --git a/drivers/clk/renesas/rcar-gen4-cpg.c b/drivers/clk/renesas/rcar-gen4-cpg.c
index 31aa790fd003..fb9a876aaba5 100644
--- a/drivers/clk/renesas/rcar-gen4-cpg.c
+++ b/drivers/clk/renesas/rcar-gen4-cpg.c
@@ -418,9 +418,11 @@ static const struct clk_div_table cpg_rpcsrc_div_table[] = {
struct clk * __init rcar_gen4_cpg_clk_register(struct device *dev,
const struct cpg_core_clk *core, const struct cpg_mssr_info *info,
- struct clk **clks, void __iomem *base,
- struct raw_notifier_head *notifiers)
+ struct cpg_mssr_pub *pub)
{
+ struct raw_notifier_head *notifiers = &pub->notifiers;
+ void __iomem *base = pub->base0;
+ struct clk **clks = pub->clks;
const struct clk *parent;
unsigned int mult = 1;
unsigned int div = 1;
diff --git a/drivers/clk/renesas/rcar-gen4-cpg.h b/drivers/clk/renesas/rcar-gen4-cpg.h
index 717fd148464f..6c8280b37c37 100644
--- a/drivers/clk/renesas/rcar-gen4-cpg.h
+++ b/drivers/clk/renesas/rcar-gen4-cpg.h
@@ -78,8 +78,7 @@ struct rcar_gen4_cpg_pll_config {
struct clk *rcar_gen4_cpg_clk_register(struct device *dev,
const struct cpg_core_clk *core, const struct cpg_mssr_info *info,
- struct clk **clks, void __iomem *base,
- struct raw_notifier_head *notifiers);
+ struct cpg_mssr_pub *pub);
int rcar_gen4_cpg_init(const struct rcar_gen4_cpg_pll_config *config,
unsigned int clk_extalr, u32 mode);
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
index 1b421b809796..de1cf7ba45b7 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.c
+++ b/drivers/clk/renesas/renesas-cpg-mssr.c
@@ -27,6 +27,7 @@
#include <linux/psci.h>
#include <linux/reset-controller.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <dt-bindings/clock/renesas-cpg-mssr.h>
@@ -39,7 +40,6 @@
#define WARN_DEBUG(x) do { } while (0)
#endif
-
/*
* Module Standby and Software Reset register offets.
*
@@ -81,6 +81,37 @@ static const u16 mstpcr_for_gen4[] = {
};
/*
+ * Module Stop Control Register (RZ/T2H)
+ * RZ/T2H has 2 registers blocks,
+ * Bit 12 is used to differentiate them
+ */
+
+#define RZT2H_MSTPCR_BLOCK_SHIFT 12
+#define RZT2H_MSTPCR_OFFSET_MASK GENMASK(11, 0)
+#define RZT2H_MSTPCR(block, offset) (((block) << RZT2H_MSTPCR_BLOCK_SHIFT) | \
+ ((offset) & RZT2H_MSTPCR_OFFSET_MASK))
+
+#define RZT2H_MSTPCR_BLOCK(x) ((x) >> RZT2H_MSTPCR_BLOCK_SHIFT)
+#define RZT2H_MSTPCR_OFFSET(x) ((x) & RZT2H_MSTPCR_OFFSET_MASK)
+
+static const u16 mstpcr_for_rzt2h[] = {
+ RZT2H_MSTPCR(0, 0x300), /* MSTPCRA */
+ RZT2H_MSTPCR(0, 0x304), /* MSTPCRB */
+ RZT2H_MSTPCR(0, 0x308), /* MSTPCRC */
+ RZT2H_MSTPCR(0, 0x30c), /* MSTPCRD */
+ RZT2H_MSTPCR(0, 0x310), /* MSTPCRE */
+ 0,
+ RZT2H_MSTPCR(1, 0x318), /* MSTPCRG */
+ 0,
+ RZT2H_MSTPCR(1, 0x320), /* MSTPCRI */
+ RZT2H_MSTPCR(0, 0x324), /* MSTPCRJ */
+ RZT2H_MSTPCR(0, 0x328), /* MSTPCRK */
+ RZT2H_MSTPCR(0, 0x32c), /* MSTPCRL */
+ RZT2H_MSTPCR(0, 0x330), /* MSTPCRM */
+ RZT2H_MSTPCR(1, 0x334), /* MSTPCRN */
+};
+
+/*
* Standby Control Register offsets (RZ/A)
* Base address is FRQCR register
*/
@@ -126,16 +157,14 @@ static const u16 srstclr_for_gen4[] = {
* struct cpg_mssr_priv - Clock Pulse Generator / Module Standby
* and Software Reset Private Data
*
+ * @pub: Data passed to clock registration callback
* @rcdev: Optional reset controller entity
* @dev: CPG/MSSR device
- * @base: CPG/MSSR register block base address
* @reg_layout: CPG/MSSR register layout
- * @rmw_lock: protects RMW register accesses
* @np: Device node in DT for this CPG/MSSR module
* @num_core_clks: Number of Core Clocks in clks[]
* @num_mod_clks: Number of Module Clocks in clks[]
* @last_dt_core_clk: ID of the last Core Clock exported to DT
- * @notifiers: Notifier chain to save/restore clock state for system resume
* @status_regs: Pointer to status registers array
* @control_regs: Pointer to control registers array
* @reset_regs: Pointer to reset registers array
@@ -147,20 +176,18 @@ static const u16 srstclr_for_gen4[] = {
* @clks: Array containing all Core and Module Clocks
*/
struct cpg_mssr_priv {
+ struct cpg_mssr_pub pub;
#ifdef CONFIG_RESET_CONTROLLER
struct reset_controller_dev rcdev;
#endif
struct device *dev;
- void __iomem *base;
enum clk_reg_layout reg_layout;
- spinlock_t rmw_lock;
struct device_node *np;
unsigned int num_core_clks;
unsigned int num_mod_clks;
unsigned int last_dt_core_clk;
- struct raw_notifier_head notifiers;
const u16 *status_regs;
const u16 *control_regs;
const u16 *reset_regs;
@@ -192,6 +219,26 @@ struct mstp_clock {
#define to_mstp_clock(_hw) container_of(_hw, struct mstp_clock, hw)
+static u32 cpg_rzt2h_mstp_read(struct clk_hw *hw, u16 offset)
+{
+ struct mstp_clock *clock = to_mstp_clock(hw);
+ struct cpg_mssr_priv *priv = clock->priv;
+ void __iomem *base =
+ RZT2H_MSTPCR_BLOCK(offset) ? priv->pub.base1 : priv->pub.base0;
+
+ return readl(base + RZT2H_MSTPCR_OFFSET(offset));
+}
+
+static void cpg_rzt2h_mstp_write(struct clk_hw *hw, u16 offset, u32 value)
+{
+ struct mstp_clock *clock = to_mstp_clock(hw);
+ struct cpg_mssr_priv *priv = clock->priv;
+ void __iomem *base =
+ RZT2H_MSTPCR_BLOCK(offset) ? priv->pub.base1 : priv->pub.base0;
+
+ writel(value, base + RZT2H_MSTPCR_OFFSET(offset));
+}
+
static int cpg_mstp_clock_endisable(struct clk_hw *hw, bool enable)
{
struct mstp_clock *clock = to_mstp_clock(hw);
@@ -205,39 +252,53 @@ static int cpg_mstp_clock_endisable(struct clk_hw *hw, bool enable)
int error;
dev_dbg(dev, "MSTP %u%02u/%pC %s\n", reg, bit, hw->clk,
- enable ? "ON" : "OFF");
- spin_lock_irqsave(&priv->rmw_lock, flags);
+ str_on_off(enable));
+ spin_lock_irqsave(&priv->pub.rmw_lock, flags);
if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A) {
- value = readb(priv->base + priv->control_regs[reg]);
+ value = readb(priv->pub.base0 + priv->control_regs[reg]);
if (enable)
value &= ~bitmask;
else
value |= bitmask;
- writeb(value, priv->base + priv->control_regs[reg]);
+ writeb(value, priv->pub.base0 + priv->control_regs[reg]);
/* dummy read to ensure write has completed */
- readb(priv->base + priv->control_regs[reg]);
- barrier_data(priv->base + priv->control_regs[reg]);
+ readb(priv->pub.base0 + priv->control_regs[reg]);
+ barrier_data(priv->pub.base0 + priv->control_regs[reg]);
+
+ } else if (priv->reg_layout == CLK_REG_LAYOUT_RZ_T2H) {
+ value = cpg_rzt2h_mstp_read(hw,
+ priv->control_regs[reg]);
+
+ if (enable)
+ value &= ~bitmask;
+ else
+ value |= bitmask;
+
+ cpg_rzt2h_mstp_write(hw,
+ priv->control_regs[reg],
+ value);
} else {
- value = readl(priv->base + priv->control_regs[reg]);
+ value = readl(priv->pub.base0 + priv->control_regs[reg]);
if (enable)
value &= ~bitmask;
else
value |= bitmask;
- writel(value, priv->base + priv->control_regs[reg]);
+ writel(value, priv->pub.base0 + priv->control_regs[reg]);
}
- spin_unlock_irqrestore(&priv->rmw_lock, flags);
+ spin_unlock_irqrestore(&priv->pub.rmw_lock, flags);
- if (!enable || priv->reg_layout == CLK_REG_LAYOUT_RZ_A)
+ if (!enable || priv->reg_layout == CLK_REG_LAYOUT_RZ_A ||
+ priv->reg_layout == CLK_REG_LAYOUT_RZ_T2H)
return 0;
- error = readl_poll_timeout_atomic(priv->base + priv->status_regs[reg],
+ error = readl_poll_timeout_atomic(priv->pub.base0 + priv->status_regs[reg],
value, !(value & bitmask), 0, 10);
if (error)
dev_err(dev, "Failed to enable SMSTP %p[%d]\n",
- priv->base + priv->control_regs[reg], bit);
+ priv->pub.base0 + priv->control_regs[reg], bit);
return error;
}
@@ -256,12 +317,16 @@ static int cpg_mstp_clock_is_enabled(struct clk_hw *hw)
{
struct mstp_clock *clock = to_mstp_clock(hw);
struct cpg_mssr_priv *priv = clock->priv;
+ unsigned int reg = clock->index / 32;
u32 value;
if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A)
- value = readb(priv->base + priv->control_regs[clock->index / 32]);
+ value = readb(priv->pub.base0 + priv->control_regs[reg]);
+ else if (priv->reg_layout == CLK_REG_LAYOUT_RZ_T2H)
+ value = cpg_rzt2h_mstp_read(hw,
+ priv->control_regs[reg]);
else
- value = readl(priv->base + priv->status_regs[clock->index / 32]);
+ value = readl(priv->pub.base0 + priv->status_regs[reg]);
return !(value & BIT(clock->index % 32));
}
@@ -339,11 +404,6 @@ static void __init cpg_mssr_register_core_clk(const struct cpg_core_clk *core,
WARN_DEBUG(id >= priv->num_core_clks);
WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
- if (!core->name) {
- /* Skip NULLified clock */
- return;
- }
-
switch (core->type) {
case CLK_TYPE_IN:
clk = of_clk_get_by_name(priv->np, core->name);
@@ -353,7 +413,7 @@ static void __init cpg_mssr_register_core_clk(const struct cpg_core_clk *core,
case CLK_TYPE_DIV6P1:
case CLK_TYPE_DIV6_RO:
WARN_DEBUG(core->parent >= priv->num_core_clks);
- parent = priv->clks[core->parent];
+ parent = priv->pub.clks[core->parent];
if (IS_ERR(parent)) {
clk = parent;
goto fail;
@@ -363,12 +423,12 @@ static void __init cpg_mssr_register_core_clk(const struct cpg_core_clk *core,
if (core->type == CLK_TYPE_DIV6_RO)
/* Multiply with the DIV6 register value */
- div *= (readl(priv->base + core->offset) & 0x3f) + 1;
+ div *= (readl(priv->pub.base0 + core->offset) & 0x3f) + 1;
if (core->type == CLK_TYPE_DIV6P1) {
clk = cpg_div6_register(core->name, 1, &parent_name,
- priv->base + core->offset,
- &priv->notifiers);
+ priv->pub.base0 + core->offset,
+ &priv->pub.notifiers);
} else {
clk = clk_register_fixed_factor(NULL, core->name,
parent_name, 0,
@@ -384,8 +444,7 @@ static void __init cpg_mssr_register_core_clk(const struct cpg_core_clk *core,
default:
if (info->cpg_clk_register)
clk = info->cpg_clk_register(dev, core, info,
- priv->clks, priv->base,
- &priv->notifiers);
+ &priv->pub);
else
dev_err(dev, "%s has unsupported core clock type %u\n",
core->name, core->type);
@@ -396,7 +455,7 @@ static void __init cpg_mssr_register_core_clk(const struct cpg_core_clk *core,
goto fail;
dev_dbg(dev, "Core clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
- priv->clks[id] = clk;
+ priv->pub.clks[id] = clk;
return;
fail:
@@ -419,14 +478,14 @@ static void __init cpg_mssr_register_mod_clk(const struct mssr_mod_clk *mod,
WARN_DEBUG(id < priv->num_core_clks);
WARN_DEBUG(id >= priv->num_core_clks + priv->num_mod_clks);
WARN_DEBUG(mod->parent >= priv->num_core_clks + priv->num_mod_clks);
- WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
+ WARN_DEBUG(PTR_ERR(priv->pub.clks[id]) != -ENOENT);
if (!mod->name) {
/* Skip NULLified clock */
return;
}
- parent = priv->clks[mod->parent];
+ parent = priv->pub.clks[mod->parent];
if (IS_ERR(parent)) {
clk = parent;
goto fail;
@@ -628,13 +687,13 @@ static int cpg_mssr_reset(struct reset_controller_dev *rcdev,
dev_dbg(priv->dev, "reset %u%02u\n", reg, bit);
/* Reset module */
- writel(bitmask, priv->base + priv->reset_regs[reg]);
+ writel(bitmask, priv->pub.base0 + priv->reset_regs[reg]);
/* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
udelay(35);
/* Release module from reset state */
- writel(bitmask, priv->base + priv->reset_clear_regs[reg]);
+ writel(bitmask, priv->pub.base0 + priv->reset_clear_regs[reg]);
return 0;
}
@@ -648,7 +707,7 @@ static int cpg_mssr_assert(struct reset_controller_dev *rcdev, unsigned long id)
dev_dbg(priv->dev, "assert %u%02u\n", reg, bit);
- writel(bitmask, priv->base + priv->reset_regs[reg]);
+ writel(bitmask, priv->pub.base0 + priv->reset_regs[reg]);
return 0;
}
@@ -662,7 +721,7 @@ static int cpg_mssr_deassert(struct reset_controller_dev *rcdev,
dev_dbg(priv->dev, "deassert %u%02u\n", reg, bit);
- writel(bitmask, priv->base + priv->reset_clear_regs[reg]);
+ writel(bitmask, priv->pub.base0 + priv->reset_clear_regs[reg]);
return 0;
}
@@ -674,7 +733,7 @@ static int cpg_mssr_status(struct reset_controller_dev *rcdev,
unsigned int bit = id % 32;
u32 bitmask = BIT(bit);
- return !!(readl(priv->base + priv->reset_regs[reg]) & bitmask);
+ return !!(readl(priv->pub.base0 + priv->reset_regs[reg]) & bitmask);
}
static const struct reset_control_ops cpg_mssr_reset_ops = {
@@ -716,7 +775,6 @@ static inline int cpg_mssr_reset_controller_register(struct cpg_mssr_priv *priv)
}
#endif /* !CONFIG_RESET_CONTROLLER */
-
static const struct of_device_id cpg_mssr_match[] = {
#ifdef CONFIG_CLK_R7S9210
{
@@ -878,6 +936,18 @@ static const struct of_device_id cpg_mssr_match[] = {
.data = &r8a779h0_cpg_mssr_info,
},
#endif
+#ifdef CONFIG_CLK_R9A09G077
+ {
+ .compatible = "renesas,r9a09g077-cpg-mssr",
+ .data = &r9a09g077_cpg_mssr_info,
+ },
+#endif
+#ifdef CONFIG_CLK_R9A09G087
+ {
+ .compatible = "renesas,r9a09g087-cpg-mssr",
+ .data = &r9a09g077_cpg_mssr_info,
+ },
+#endif
{ /* sentinel */ }
};
@@ -901,12 +971,12 @@ static int cpg_mssr_suspend_noirq(struct device *dev)
if (priv->smstpcr_saved[reg].mask)
priv->smstpcr_saved[reg].val =
priv->reg_layout == CLK_REG_LAYOUT_RZ_A ?
- readb(priv->base + priv->control_regs[reg]) :
- readl(priv->base + priv->control_regs[reg]);
+ readb(priv->pub.base0 + priv->control_regs[reg]) :
+ readl(priv->pub.base0 + priv->control_regs[reg]);
}
/* Save core clocks */
- raw_notifier_call_chain(&priv->notifiers, PM_EVENT_SUSPEND, NULL);
+ raw_notifier_call_chain(&priv->pub.notifiers, PM_EVENT_SUSPEND, NULL);
return 0;
}
@@ -923,7 +993,7 @@ static int cpg_mssr_resume_noirq(struct device *dev)
return 0;
/* Restore core clocks */
- raw_notifier_call_chain(&priv->notifiers, PM_EVENT_RESUME, NULL);
+ raw_notifier_call_chain(&priv->pub.notifiers, PM_EVENT_RESUME, NULL);
/* Restore module clocks */
for (reg = 0; reg < ARRAY_SIZE(priv->smstpcr_saved); reg++) {
@@ -932,29 +1002,29 @@ static int cpg_mssr_resume_noirq(struct device *dev)
continue;
if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A)
- oldval = readb(priv->base + priv->control_regs[reg]);
+ oldval = readb(priv->pub.base0 + priv->control_regs[reg]);
else
- oldval = readl(priv->base + priv->control_regs[reg]);
+ oldval = readl(priv->pub.base0 + priv->control_regs[reg]);
newval = oldval & ~mask;
newval |= priv->smstpcr_saved[reg].val & mask;
if (newval == oldval)
continue;
if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A) {
- writeb(newval, priv->base + priv->control_regs[reg]);
+ writeb(newval, priv->pub.base0 + priv->control_regs[reg]);
/* dummy read to ensure write has completed */
- readb(priv->base + priv->control_regs[reg]);
- barrier_data(priv->base + priv->control_regs[reg]);
+ readb(priv->pub.base0 + priv->control_regs[reg]);
+ barrier_data(priv->pub.base0 + priv->control_regs[reg]);
continue;
} else
- writel(newval, priv->base + priv->control_regs[reg]);
+ writel(newval, priv->pub.base0 + priv->control_regs[reg]);
/* Wait until enabled clocks are really enabled */
mask &= ~priv->smstpcr_saved[reg].val;
if (!mask)
continue;
- error = readl_poll_timeout_atomic(priv->base + priv->status_regs[reg],
+ error = readl_poll_timeout_atomic(priv->pub.base0 + priv->status_regs[reg],
oldval, !(oldval & mask), 0, 10);
if (error)
dev_warn(dev, "Failed to enable SMSTP%u[0x%x]\n", reg,
@@ -981,7 +1051,7 @@ static void __init cpg_mssr_reserved_exit(struct cpg_mssr_priv *priv)
static int __init cpg_mssr_reserved_init(struct cpg_mssr_priv *priv,
const struct cpg_mssr_info *info)
{
- struct device_node *soc = of_find_node_by_path("/soc");
+ struct device_node *soc __free(device_node) = of_find_node_by_path("/soc");
struct device_node *node;
uint32_t args[MAX_PHANDLE_ARGS];
unsigned int *ids = NULL;
@@ -1012,6 +1082,7 @@ static int __init cpg_mssr_reserved_init(struct cpg_mssr_priv *priv,
of_for_each_phandle(&it, rc, node, "clocks", "#clock-cells", -1) {
int idx;
+ unsigned int *new_ids;
if (it.node != priv->np)
continue;
@@ -1022,11 +1093,13 @@ static int __init cpg_mssr_reserved_init(struct cpg_mssr_priv *priv,
if (args[0] != CPG_MOD)
continue;
- ids = krealloc_array(ids, (num + 1), sizeof(*ids), GFP_KERNEL);
- if (!ids) {
+ new_ids = krealloc_array(ids, (num + 1), sizeof(*ids), GFP_KERNEL);
+ if (!new_ids) {
of_node_put(it.node);
+ kfree(ids);
return -ENOMEM;
}
+ ids = new_ids;
if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A)
idx = MOD_CLK_PACK_10(args[1]); /* for DEF_MOD_STB() */
@@ -1064,20 +1137,28 @@ static int __init cpg_mssr_common_init(struct device *dev,
if (!priv)
return -ENOMEM;
+ priv->pub.clks = priv->clks;
priv->np = np;
priv->dev = dev;
- spin_lock_init(&priv->rmw_lock);
+ spin_lock_init(&priv->pub.rmw_lock);
- priv->base = of_iomap(np, 0);
- if (!priv->base) {
+ priv->pub.base0 = of_iomap(np, 0);
+ if (!priv->pub.base0) {
error = -ENOMEM;
goto out_err;
}
+ if (info->reg_layout == CLK_REG_LAYOUT_RZ_T2H) {
+ priv->pub.base1 = of_iomap(np, 1);
+ if (!priv->pub.base1) {
+ error = -ENOMEM;
+ goto out_err;
+ }
+ }
priv->num_core_clks = info->num_total_core_clks;
priv->num_mod_clks = info->num_hw_mod_clks;
priv->last_dt_core_clk = info->last_dt_core_clk;
- RAW_INIT_NOTIFIER_HEAD(&priv->notifiers);
+ RAW_INIT_NOTIFIER_HEAD(&priv->pub.notifiers);
priv->reg_layout = info->reg_layout;
if (priv->reg_layout == CLK_REG_LAYOUT_RCAR_GEN2_AND_GEN3) {
priv->status_regs = mstpsr;
@@ -1086,6 +1167,8 @@ static int __init cpg_mssr_common_init(struct device *dev,
priv->reset_clear_regs = srstclr;
} else if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A) {
priv->control_regs = stbcr;
+ } else if (priv->reg_layout == CLK_REG_LAYOUT_RZ_T2H) {
+ priv->control_regs = mstpcr_for_rzt2h;
} else if (priv->reg_layout == CLK_REG_LAYOUT_RCAR_GEN4) {
priv->status_regs = mstpsr_for_gen4;
priv->control_regs = mstpcr_for_gen4;
@@ -1097,7 +1180,7 @@ static int __init cpg_mssr_common_init(struct device *dev,
}
for (i = 0; i < nclks; i++)
- priv->clks[i] = ERR_PTR(-ENOENT);
+ priv->pub.clks[i] = ERR_PTR(-ENOENT);
error = cpg_mssr_reserved_init(priv, info);
if (error)
@@ -1114,8 +1197,10 @@ static int __init cpg_mssr_common_init(struct device *dev,
reserve_err:
cpg_mssr_reserved_exit(priv);
out_err:
- if (priv->base)
- iounmap(priv->base);
+ if (priv->pub.base0)
+ iounmap(priv->pub.base0);
+ if (priv->pub.base1)
+ iounmap(priv->pub.base1);
kfree(priv);
return error;
@@ -1180,7 +1265,8 @@ static int __init cpg_mssr_probe(struct platform_device *pdev)
goto reserve_exit;
/* Reset Controller not supported for Standby Control SoCs */
- if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A)
+ if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A ||
+ priv->reg_layout == CLK_REG_LAYOUT_RZ_T2H)
goto reserve_exit;
error = cpg_mssr_reset_controller_register(priv);
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.h b/drivers/clk/renesas/renesas-cpg-mssr.h
index a1d6e0cbcff9..ad11ab5f0069 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.h
+++ b/drivers/clk/renesas/renesas-cpg-mssr.h
@@ -8,6 +8,8 @@
#ifndef __CLK_RENESAS_CPG_MSSR_H__
#define __CLK_RENESAS_CPG_MSSR_H__
+#include <linux/notifier.h>
+
/*
* Definitions of CPG Core Clocks
*
@@ -27,6 +29,31 @@ struct cpg_core_clk {
unsigned int div;
unsigned int mult;
unsigned int offset;
+ union {
+ const char * const *parent_names;
+ const struct clk_div_table *dtable;
+ };
+ u32 conf;
+ u16 flag;
+ u8 mux_flags;
+ u8 num_parents;
+};
+
+/**
+ * struct cpg_mssr_pub - data shared with device-specific clk registration code
+ *
+ * @base0: CPG/MSSR register block base0 address
+ * @base1: CPG/MSSR register block base1 address
+ * @notifiers: Notifier chain to save/restore clock state for system resume
+ * @rmw_lock: protects RMW register accesses
+ * @clks: pointer to clocks
+ */
+struct cpg_mssr_pub {
+ void __iomem *base0;
+ void __iomem *base1;
+ struct raw_notifier_head notifiers;
+ spinlock_t rmw_lock;
+ struct clk **clks;
};
enum clk_types {
@@ -89,6 +116,7 @@ enum clk_reg_layout {
CLK_REG_LAYOUT_RCAR_GEN2_AND_GEN3 = 0,
CLK_REG_LAYOUT_RZ_A,
CLK_REG_LAYOUT_RCAR_GEN4,
+ CLK_REG_LAYOUT_RZ_T2H,
};
/**
@@ -153,8 +181,7 @@ struct cpg_mssr_info {
struct clk *(*cpg_clk_register)(struct device *dev,
const struct cpg_core_clk *core,
const struct cpg_mssr_info *info,
- struct clk **clks, void __iomem *base,
- struct raw_notifier_head *notifiers);
+ struct cpg_mssr_pub *pub);
};
extern const struct cpg_mssr_info r7s9210_cpg_mssr_info;
@@ -181,6 +208,7 @@ extern const struct cpg_mssr_info r8a779a0_cpg_mssr_info;
extern const struct cpg_mssr_info r8a779f0_cpg_mssr_info;
extern const struct cpg_mssr_info r8a779g0_cpg_mssr_info;
extern const struct cpg_mssr_info r8a779h0_cpg_mssr_info;
+extern const struct cpg_mssr_info r9a09g077_cpg_mssr_info;
void __init cpg_mssr_early_init(struct device_node *np,
const struct cpg_mssr_info *info);
diff --git a/drivers/clk/renesas/rzg2l-cpg.c b/drivers/clk/renesas/rzg2l-cpg.c
index 88bf39e8c79c..07909e80bae2 100644
--- a/drivers/clk/renesas/rzg2l-cpg.c
+++ b/drivers/clk/renesas/rzg2l-cpg.c
@@ -11,10 +11,13 @@
* Copyright (C) 2015 Renesas Electronics Corp.
*/
+#include <linux/atomic.h>
#include <linux/bitfield.h>
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clk/renesas.h>
+#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/init.h>
@@ -27,6 +30,7 @@
#include <linux/pm_domain.h>
#include <linux/reset-controller.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/units.h>
#include <dt-bindings/clock/renesas-cpg-mssr.h>
@@ -51,6 +55,7 @@
#define RZG3S_DIV_M GENMASK(25, 22)
#define RZG3S_DIV_NI GENMASK(21, 13)
#define RZG3S_DIV_NF GENMASK(12, 1)
+#define RZG3S_SEL_PLL BIT(0)
#define CLK_ON_R(reg) (reg)
#define CLK_MON_R(reg) (0x180 + (reg))
@@ -60,11 +65,15 @@
#define GET_REG_OFFSET(val) ((val >> 20) & 0xfff)
#define GET_REG_SAMPLL_CLK1(val) ((val >> 22) & 0xfff)
#define GET_REG_SAMPLL_CLK2(val) ((val >> 12) & 0xfff)
+#define GET_REG_SAMPLL_SETTING(val) ((val) & 0xfff)
#define CPG_WEN_BIT BIT(16)
#define MAX_VCLK_FREQ (148500000)
+#define MSTOP_OFF(conf) FIELD_GET(GENMASK(31, 16), (conf))
+#define MSTOP_MASK(conf) FIELD_GET(GENMASK(15, 0), (conf))
+
/**
* struct clk_hw_data - clock hardware data
* @hw: clock hw
@@ -139,6 +148,7 @@ struct rzg2l_pll5_mux_dsi_div_param {
* @num_resets: Number of Module Resets in info->resets[]
* @last_dt_core_clk: ID of the last Core Clock exported to DT
* @info: Pointer to platform data
+ * @genpd: PM domain
* @mux_dsi_div_params: pll5 mux and dsi div parameters
*/
struct rzg2l_cpg_priv {
@@ -155,6 +165,8 @@ struct rzg2l_cpg_priv {
const struct rzg2l_cpg_info *info;
+ struct generic_pm_domain genpd;
+
struct rzg2l_pll5_mux_dsi_div_param mux_dsi_div_params;
};
@@ -548,7 +560,7 @@ static unsigned long
rzg2l_cpg_get_foutpostdiv_rate(struct rzg2l_pll5_param *params,
unsigned long rate)
{
- unsigned long foutpostdiv_rate;
+ unsigned long foutpostdiv_rate, foutvco_rate;
params->pl5_intin = rate / MEGA;
params->pl5_fracin = div_u64(((u64)rate % MEGA) << 24, MEGA);
@@ -557,10 +569,11 @@ rzg2l_cpg_get_foutpostdiv_rate(struct rzg2l_pll5_param *params,
params->pl5_postdiv2 = 1;
params->pl5_spread = 0x16;
- foutpostdiv_rate =
- EXTAL_FREQ_IN_MEGA_HZ * MEGA / params->pl5_refdiv *
- ((((params->pl5_intin << 24) + params->pl5_fracin)) >> 24) /
- (params->pl5_postdiv1 * params->pl5_postdiv2);
+ foutvco_rate = div_u64(mul_u32_u32(EXTAL_FREQ_IN_MEGA_HZ * MEGA,
+ (params->pl5_intin << 24) + params->pl5_fracin),
+ params->pl5_refdiv) >> 24;
+ foutpostdiv_rate = DIV_ROUND_CLOSEST_ULL(foutvco_rate,
+ params->pl5_postdiv1 * params->pl5_postdiv2);
return foutpostdiv_rate;
}
@@ -811,11 +824,10 @@ static unsigned long rzg2l_cpg_sipll5_recalc_rate(struct clk_hw *hw,
return pll5_rate;
}
-static long rzg2l_cpg_sipll5_round_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long *parent_rate)
+static int rzg2l_cpg_sipll5_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- return rate;
+ return 0;
}
static int rzg2l_cpg_sipll5_set_rate(struct clk_hw *hw,
@@ -889,7 +901,7 @@ static int rzg2l_cpg_sipll5_set_rate(struct clk_hw *hw,
static const struct clk_ops rzg2l_cpg_sipll5_ops = {
.recalc_rate = rzg2l_cpg_sipll5_recalc_rate,
- .round_rate = rzg2l_cpg_sipll5_round_rate,
+ .determine_rate = rzg2l_cpg_sipll5_determine_rate,
.set_rate = rzg2l_cpg_sipll5_set_rate,
};
@@ -942,6 +954,7 @@ rzg2l_cpg_sipll5_register(const struct cpg_core_clk *core,
struct pll_clk {
struct clk_hw hw;
+ unsigned long default_rate;
unsigned int conf;
unsigned int type;
void __iomem *base;
@@ -979,12 +992,19 @@ static unsigned long rzg3s_cpg_pll_clk_recalc_rate(struct clk_hw *hw,
{
struct pll_clk *pll_clk = to_pll(hw);
struct rzg2l_cpg_priv *priv = pll_clk->priv;
- u32 nir, nfr, mr, pr, val;
+ u32 nir, nfr, mr, pr, val, setting;
u64 rate;
if (pll_clk->type != CLK_TYPE_G3S_PLL)
return parent_rate;
+ setting = GET_REG_SAMPLL_SETTING(pll_clk->conf);
+ if (setting) {
+ val = readl(priv->base + setting);
+ if (val & RZG3S_SEL_PLL)
+ return pll_clk->default_rate;
+ }
+
val = readl(priv->base + GET_REG_SAMPLL_CLK1(pll_clk->conf));
pr = 1 << FIELD_GET(RZG3S_DIV_P, val);
@@ -1037,6 +1057,7 @@ rzg2l_cpg_pll_clk_register(const struct cpg_core_clk *core,
pll_clk->base = priv->base;
pll_clk->priv = priv;
pll_clk->type = core->type;
+ pll_clk->default_rate = core->default_rate;
ret = devm_clk_hw_register(dev, &pll_clk->hw);
if (ret)
@@ -1104,11 +1125,6 @@ rzg2l_cpg_register_core_clk(const struct cpg_core_clk *core,
WARN_DEBUG(id >= priv->num_core_clks);
WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
- if (!core->name) {
- /* Skip NULLified clock */
- return;
- }
-
switch (core->type) {
case CLK_TYPE_IN:
clk = of_clk_get_by_name(priv->dev->of_node, core->name);
@@ -1174,29 +1190,146 @@ fail:
}
/**
- * struct mstp_clock - MSTP gating clock
+ * struct mstop - MSTOP specific data structure
+ * @usecnt: Usage counter for MSTOP settings (when zero the settings
+ * are applied to register)
+ * @conf: MSTOP configuration (register offset, setup bits)
+ */
+struct mstop {
+ atomic_t usecnt;
+ u32 conf;
+};
+
+/**
+ * struct mod_clock - Module clock
*
* @hw: handle between common and hardware-specific interfaces
+ * @priv: CPG/MSTP private data
+ * @sibling: pointer to the other coupled clock
+ * @mstop: MSTOP configuration
+ * @shared_mstop_clks: clocks sharing the MSTOP with this clock
* @off: register offset
* @bit: ON/MON bit
+ * @num_shared_mstop_clks: number of the clocks sharing MSTOP with this clock
* @enabled: soft state of the clock, if it is coupled with another clock
- * @priv: CPG/MSTP private data
- * @sibling: pointer to the other coupled clock
*/
-struct mstp_clock {
+struct mod_clock {
struct clk_hw hw;
+ struct rzg2l_cpg_priv *priv;
+ struct mod_clock *sibling;
+ struct mstop *mstop;
+ struct mod_clock **shared_mstop_clks;
u16 off;
u8 bit;
+ u8 num_shared_mstop_clks;
bool enabled;
- struct rzg2l_cpg_priv *priv;
- struct mstp_clock *sibling;
};
-#define to_mod_clock(_hw) container_of(_hw, struct mstp_clock, hw)
+#define to_mod_clock(_hw) container_of(_hw, struct mod_clock, hw)
+
+#define for_each_mod_clock(mod_clock, hw, priv) \
+ for (unsigned int i = 0; (priv) && i < (priv)->num_mod_clks; i++) \
+ if ((priv)->clks[(priv)->num_core_clks + i] == ERR_PTR(-ENOENT)) \
+ continue; \
+ else if (((hw) = __clk_get_hw((priv)->clks[(priv)->num_core_clks + i])) && \
+ ((mod_clock) = to_mod_clock(hw)))
+
+/* Need to be called with a lock held to avoid concurrent access to mstop->usecnt. */
+static void rzg2l_mod_clock_module_set_state(struct mod_clock *clock,
+ bool standby)
+{
+ struct rzg2l_cpg_priv *priv = clock->priv;
+ struct mstop *mstop = clock->mstop;
+ bool update = false;
+ u32 value;
+
+ if (!mstop)
+ return;
+
+ value = MSTOP_MASK(mstop->conf) << 16;
+
+ if (standby) {
+ unsigned int criticals = 0;
+
+ for (unsigned int i = 0; i < clock->num_shared_mstop_clks; i++) {
+ struct mod_clock *clk = clock->shared_mstop_clks[i];
+
+ if (clk_hw_get_flags(&clk->hw) & CLK_IS_CRITICAL)
+ criticals++;
+ }
+
+ if (!clock->num_shared_mstop_clks &&
+ clk_hw_get_flags(&clock->hw) & CLK_IS_CRITICAL)
+ criticals++;
+
+ /*
+ * If this is a shared MSTOP and it is shared with critical clocks,
+ * and the system boots up with this clock enabled but no driver
+ * uses it the CCF will disable it (as it is unused). As we don't
+ * increment reference counter for it at registration (to avoid
+ * messing with clocks enabled at probe but later used by drivers)
+ * do not set the MSTOP here too if it is shared with critical
+ * clocks and ref counted only by those critical clocks.
+ */
+ if (criticals && criticals == atomic_read(&mstop->usecnt))
+ return;
+
+ value |= MSTOP_MASK(mstop->conf);
+
+ /* Allow updates on probe when usecnt = 0. */
+ if (!atomic_read(&mstop->usecnt))
+ update = true;
+ else
+ update = atomic_dec_and_test(&mstop->usecnt);
+ } else {
+ if (!atomic_read(&mstop->usecnt))
+ update = true;
+ atomic_inc(&mstop->usecnt);
+ }
+
+ if (update)
+ writel(value, priv->base + MSTOP_OFF(mstop->conf));
+}
+
+static int rzg2l_mod_clock_mstop_show(struct seq_file *s, void *what)
+{
+ struct rzg2l_cpg_priv *priv = s->private;
+ struct mod_clock *clk;
+ struct clk_hw *hw;
+
+ seq_printf(s, "%-20s %-5s %-10s\n", "", "", "MSTOP");
+ seq_printf(s, "%-20s %-5s %-10s\n", "", "clk", "-------------------------");
+ seq_printf(s, "%-20s %-5s %-5s %-5s %-6s %-6s\n",
+ "clk_name", "cnt", "cnt", "off", "val", "shared");
+ seq_printf(s, "%-20s %-5s %-5s %-5s %-6s %-6s\n",
+ "--------", "-----", "-----", "-----", "------", "------");
+
+ for_each_mod_clock(clk, hw, priv) {
+ u32 val;
+
+ if (!clk->mstop)
+ continue;
+
+ val = readl(priv->base + MSTOP_OFF(clk->mstop->conf)) &
+ MSTOP_MASK(clk->mstop->conf);
+
+ seq_printf(s, "%-20s %-5d %-5d 0x%-3lx 0x%-4x", clk_hw_get_name(hw),
+ __clk_get_enable_count(hw->clk), atomic_read(&clk->mstop->usecnt),
+ MSTOP_OFF(clk->mstop->conf), val);
+
+ for (unsigned int i = 0; i < clk->num_shared_mstop_clks; i++)
+ seq_printf(s, " %pC", clk->shared_mstop_clks[i]->hw.clk);
+
+ seq_puts(s, "\n");
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(rzg2l_mod_clock_mstop);
static int rzg2l_mod_clock_endisable(struct clk_hw *hw, bool enable)
{
- struct mstp_clock *clock = to_mod_clock(hw);
+ struct mod_clock *clock = to_mod_clock(hw);
struct rzg2l_cpg_priv *priv = clock->priv;
unsigned int reg = clock->off;
struct device *dev = priv->dev;
@@ -1210,13 +1343,21 @@ static int rzg2l_mod_clock_endisable(struct clk_hw *hw, bool enable)
}
dev_dbg(dev, "CLK_ON 0x%x/%pC %s\n", CLK_ON_R(reg), hw->clk,
- enable ? "ON" : "OFF");
+ str_on_off(enable));
value = bitmask << 16;
if (enable)
value |= bitmask;
- writel(value, priv->base + CLK_ON_R(reg));
+ scoped_guard(spinlock_irqsave, &priv->rmw_lock) {
+ if (enable) {
+ writel(value, priv->base + CLK_ON_R(reg));
+ rzg2l_mod_clock_module_set_state(clock, false);
+ } else {
+ rzg2l_mod_clock_module_set_state(clock, true);
+ writel(value, priv->base + CLK_ON_R(reg));
+ }
+ }
if (!enable)
return 0;
@@ -1227,15 +1368,15 @@ static int rzg2l_mod_clock_endisable(struct clk_hw *hw, bool enable)
error = readl_poll_timeout_atomic(priv->base + CLK_MON_R(reg), value,
value & bitmask, 0, 10);
if (error)
- dev_err(dev, "Failed to enable CLK_ON %p\n",
- priv->base + CLK_ON_R(reg));
+ dev_err(dev, "Failed to enable CLK_ON 0x%x/%pC\n",
+ CLK_ON_R(reg), hw->clk);
return error;
}
static int rzg2l_mod_clock_enable(struct clk_hw *hw)
{
- struct mstp_clock *clock = to_mod_clock(hw);
+ struct mod_clock *clock = to_mod_clock(hw);
if (clock->sibling) {
struct rzg2l_cpg_priv *priv = clock->priv;
@@ -1255,7 +1396,7 @@ static int rzg2l_mod_clock_enable(struct clk_hw *hw)
static void rzg2l_mod_clock_disable(struct clk_hw *hw)
{
- struct mstp_clock *clock = to_mod_clock(hw);
+ struct mod_clock *clock = to_mod_clock(hw);
if (clock->sibling) {
struct rzg2l_cpg_priv *priv = clock->priv;
@@ -1275,7 +1416,7 @@ static void rzg2l_mod_clock_disable(struct clk_hw *hw)
static int rzg2l_mod_clock_is_enabled(struct clk_hw *hw)
{
- struct mstp_clock *clock = to_mod_clock(hw);
+ struct mod_clock *clock = to_mod_clock(hw);
struct rzg2l_cpg_priv *priv = clock->priv;
u32 bitmask = BIT(clock->bit);
u32 value;
@@ -1302,34 +1443,104 @@ static const struct clk_ops rzg2l_mod_clock_ops = {
.is_enabled = rzg2l_mod_clock_is_enabled,
};
-static struct mstp_clock
-*rzg2l_mod_clock_get_sibling(struct mstp_clock *clock,
+static struct mod_clock
+*rzg2l_mod_clock_get_sibling(struct mod_clock *clock,
struct rzg2l_cpg_priv *priv)
{
+ struct mod_clock *clk;
struct clk_hw *hw;
- unsigned int i;
- for (i = 0; i < priv->num_mod_clks; i++) {
- struct mstp_clock *clk;
+ for_each_mod_clock(clk, hw, priv) {
+ if (clock->off == clk->off && clock->bit == clk->bit)
+ return clk;
+ }
+
+ return NULL;
+}
+
+static struct mstop *rzg2l_mod_clock_get_mstop(struct rzg2l_cpg_priv *priv, u32 conf)
+{
+ struct mod_clock *clk;
+ struct clk_hw *hw;
- if (priv->clks[priv->num_core_clks + i] == ERR_PTR(-ENOENT))
+ for_each_mod_clock(clk, hw, priv) {
+ if (!clk->mstop)
continue;
- hw = __clk_get_hw(priv->clks[priv->num_core_clks + i]);
- clk = to_mod_clock(hw);
- if (clock->off == clk->off && clock->bit == clk->bit)
- return clk;
+ if (clk->mstop->conf == conf)
+ return clk->mstop;
}
return NULL;
}
+static void rzg2l_mod_clock_init_mstop(struct rzg2l_cpg_priv *priv)
+{
+ struct mod_clock *clk;
+ struct clk_hw *hw;
+
+ for_each_mod_clock(clk, hw, priv) {
+ if (!clk->mstop)
+ continue;
+
+ /*
+ * Out of reset all modules are enabled. Set module state
+ * in case associated clocks are disabled at probe. Otherwise
+ * module is in invalid HW state.
+ */
+ scoped_guard(spinlock_irqsave, &priv->rmw_lock) {
+ if (!rzg2l_mod_clock_is_enabled(&clk->hw))
+ rzg2l_mod_clock_module_set_state(clk, true);
+ }
+ }
+}
+
+static int rzg2l_mod_clock_update_shared_mstop_clks(struct rzg2l_cpg_priv *priv,
+ struct mod_clock *clock)
+{
+ struct mod_clock *clk;
+ struct clk_hw *hw;
+
+ if (!clock->mstop)
+ return 0;
+
+ for_each_mod_clock(clk, hw, priv) {
+ int num_shared_mstop_clks, incr = 1;
+ struct mod_clock **new_clks;
+
+ if (clk->mstop != clock->mstop)
+ continue;
+
+ num_shared_mstop_clks = clk->num_shared_mstop_clks;
+ if (!num_shared_mstop_clks)
+ incr++;
+
+ new_clks = devm_krealloc(priv->dev, clk->shared_mstop_clks,
+ (num_shared_mstop_clks + incr) * sizeof(*new_clks),
+ GFP_KERNEL);
+ if (!new_clks)
+ return -ENOMEM;
+
+ if (!num_shared_mstop_clks)
+ new_clks[num_shared_mstop_clks++] = clk;
+ new_clks[num_shared_mstop_clks++] = clock;
+
+ for (unsigned int i = 0; i < num_shared_mstop_clks; i++) {
+ new_clks[i]->shared_mstop_clks = new_clks;
+ new_clks[i]->num_shared_mstop_clks = num_shared_mstop_clks;
+ }
+ break;
+ }
+
+ return 0;
+}
+
static void __init
rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk *mod,
const struct rzg2l_cpg_info *info,
struct rzg2l_cpg_priv *priv)
{
- struct mstp_clock *clock = NULL;
+ struct mod_clock *clock = NULL;
struct device *dev = priv->dev;
unsigned int id = mod->id;
struct clk_init_data init;
@@ -1343,11 +1554,6 @@ rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk *mod,
WARN_DEBUG(mod->parent >= priv->num_core_clks + priv->num_mod_clks);
WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
- if (!mod->name) {
- /* Skip NULLified clock */
- return;
- }
-
parent = priv->clks[mod->parent];
if (IS_ERR(parent)) {
clk = parent;
@@ -1380,18 +1586,29 @@ rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk *mod,
clock->priv = priv;
clock->hw.init = &init;
+ if (mod->mstop_conf) {
+ struct mstop *mstop = rzg2l_mod_clock_get_mstop(priv, mod->mstop_conf);
+
+ if (!mstop) {
+ mstop = devm_kzalloc(dev, sizeof(*mstop), GFP_KERNEL);
+ if (!mstop) {
+ clk = ERR_PTR(-ENOMEM);
+ goto fail;
+ }
+ mstop->conf = mod->mstop_conf;
+ atomic_set(&mstop->usecnt, 0);
+ }
+ clock->mstop = mstop;
+ }
+
ret = devm_clk_hw_register(dev, &clock->hw);
if (ret) {
clk = ERR_PTR(ret);
goto fail;
}
- clk = clock->hw.clk;
- dev_dbg(dev, "Module clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
- priv->clks[id] = clk;
-
if (mod->is_coupled) {
- struct mstp_clock *sibling;
+ struct mod_clock *sibling;
clock->enabled = rzg2l_mod_clock_is_enabled(&clock->hw);
sibling = rzg2l_mod_clock_get_sibling(clock, priv);
@@ -1401,6 +1618,17 @@ rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk *mod,
}
}
+ /* Keep this before priv->clks[id] is updated. */
+ ret = rzg2l_mod_clock_update_shared_mstop_clks(priv, clock);
+ if (ret) {
+ clk = ERR_PTR(ret);
+ goto fail;
+ }
+
+ clk = clock->hw.clk;
+ dev_dbg(dev, "Module clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
+ priv->clks[id] = clk;
+
return;
fail:
@@ -1410,8 +1638,8 @@ fail:
#define rcdev_to_priv(x) container_of(x, struct rzg2l_cpg_priv, rcdev)
-static int rzg2l_cpg_assert(struct reset_controller_dev *rcdev,
- unsigned long id)
+static int __rzg2l_cpg_assert(struct reset_controller_dev *rcdev,
+ unsigned long id, bool assert)
{
struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
const struct rzg2l_cpg_info *info = priv->info;
@@ -1419,9 +1647,13 @@ static int rzg2l_cpg_assert(struct reset_controller_dev *rcdev,
u32 mask = BIT(info->resets[id].bit);
s8 monbit = info->resets[id].monbit;
u32 value = mask << 16;
+ int ret;
- dev_dbg(rcdev->dev, "assert id:%ld offset:0x%x\n", id, CLK_RST_R(reg));
+ dev_dbg(rcdev->dev, "%s id:%ld offset:0x%x\n",
+ assert ? "assert" : "deassert", id, CLK_RST_R(reg));
+ if (!assert)
+ value |= mask;
writel(value, priv->base + CLK_RST_R(reg));
if (info->has_clk_mon_regs) {
@@ -1435,38 +1667,26 @@ static int rzg2l_cpg_assert(struct reset_controller_dev *rcdev,
return 0;
}
- return readl_poll_timeout_atomic(priv->base + reg, value,
- value & mask, 10, 200);
+ ret = readl_poll_timeout_atomic(priv->base + reg, value,
+ assert == !!(value & mask), 10, 200);
+ if (ret && !assert) {
+ value = mask << 16;
+ writel(value, priv->base + CLK_RST_R(info->resets[id].off));
+ }
+
+ return ret;
+}
+
+static int rzg2l_cpg_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return __rzg2l_cpg_assert(rcdev, id, true);
}
static int rzg2l_cpg_deassert(struct reset_controller_dev *rcdev,
unsigned long id)
{
- struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
- const struct rzg2l_cpg_info *info = priv->info;
- unsigned int reg = info->resets[id].off;
- u32 mask = BIT(info->resets[id].bit);
- s8 monbit = info->resets[id].monbit;
- u32 value = (mask << 16) | mask;
-
- dev_dbg(rcdev->dev, "deassert id:%ld offset:0x%x\n", id,
- CLK_RST_R(reg));
-
- writel(value, priv->base + CLK_RST_R(reg));
-
- if (info->has_clk_mon_regs) {
- reg = CLK_MRST_R(reg);
- } else if (monbit >= 0) {
- reg = CPG_RST_MON;
- mask = BIT(monbit);
- } else {
- /* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
- udelay(35);
- return 0;
- }
-
- return readl_poll_timeout_atomic(priv->base + reg, value,
- !(value & mask), 10, 200);
+ return __rzg2l_cpg_assert(rcdev, id, false);
}
static int rzg2l_cpg_reset(struct reset_controller_dev *rcdev,
@@ -1540,88 +1760,69 @@ static int rzg2l_cpg_reset_controller_register(struct rzg2l_cpg_priv *priv)
static bool rzg2l_cpg_is_pm_clk(struct rzg2l_cpg_priv *priv,
const struct of_phandle_args *clkspec)
{
- const struct rzg2l_cpg_info *info = priv->info;
- unsigned int id;
- unsigned int i;
-
- if (clkspec->args_count != 2)
+ if (clkspec->np != priv->genpd.dev.of_node || clkspec->args_count != 2)
return false;
- if (clkspec->args[0] != CPG_MOD)
- return false;
+ switch (clkspec->args[0]) {
+ case CPG_MOD: {
+ const struct rzg2l_cpg_info *info = priv->info;
+ unsigned int id = clkspec->args[1];
- id = clkspec->args[1] + info->num_total_core_clks;
- for (i = 0; i < info->num_no_pm_mod_clks; i++) {
- if (info->no_pm_mod_clks[i] == id)
+ if (id >= priv->num_mod_clks)
return false;
- }
- return true;
-}
+ id += info->num_total_core_clks;
-/**
- * struct rzg2l_cpg_pm_domains - RZ/G2L PM domains data structure
- * @onecell_data: cell data
- * @domains: generic PM domains
- */
-struct rzg2l_cpg_pm_domains {
- struct genpd_onecell_data onecell_data;
- struct generic_pm_domain *domains[];
-};
+ for (unsigned int i = 0; i < info->num_no_pm_mod_clks; i++) {
+ if (info->no_pm_mod_clks[i] == id)
+ return false;
+ }
-/**
- * struct rzg2l_cpg_pd - RZ/G2L power domain data structure
- * @genpd: generic PM domain
- * @priv: pointer to CPG private data structure
- * @conf: CPG PM domain configuration info
- * @id: RZ/G2L power domain ID
- */
-struct rzg2l_cpg_pd {
- struct generic_pm_domain genpd;
- struct rzg2l_cpg_priv *priv;
- struct rzg2l_cpg_pm_domain_conf conf;
- u16 id;
-};
+ return true;
+ }
+
+ case CPG_CORE:
+ default:
+ return false;
+ }
+}
static int rzg2l_cpg_attach_dev(struct generic_pm_domain *domain, struct device *dev)
{
- struct rzg2l_cpg_pd *pd = container_of(domain, struct rzg2l_cpg_pd, genpd);
- struct rzg2l_cpg_priv *priv = pd->priv;
+ struct rzg2l_cpg_priv *priv = container_of(domain, struct rzg2l_cpg_priv, genpd);
struct device_node *np = dev->of_node;
struct of_phandle_args clkspec;
bool once = true;
struct clk *clk;
+ unsigned int i;
int error;
- int i = 0;
-
- while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i,
- &clkspec)) {
- if (rzg2l_cpg_is_pm_clk(priv, &clkspec)) {
- if (once) {
- once = false;
- error = pm_clk_create(dev);
- if (error) {
- of_node_put(clkspec.np);
- goto err;
- }
- }
- clk = of_clk_get_from_provider(&clkspec);
+
+ for (i = 0; !of_parse_phandle_with_args(np, "clocks", "#clock-cells", i, &clkspec); i++) {
+ if (!rzg2l_cpg_is_pm_clk(priv, &clkspec)) {
of_node_put(clkspec.np);
- if (IS_ERR(clk)) {
- error = PTR_ERR(clk);
- goto fail_destroy;
- }
+ continue;
+ }
- error = pm_clk_add_clk(dev, clk);
+ if (once) {
+ once = false;
+ error = pm_clk_create(dev);
if (error) {
- dev_err(dev, "pm_clk_add_clk failed %d\n",
- error);
- goto fail_put;
+ of_node_put(clkspec.np);
+ goto err;
}
- } else {
- of_node_put(clkspec.np);
}
- i++;
+ clk = of_clk_get_from_provider(&clkspec);
+ of_node_put(clkspec.np);
+ if (IS_ERR(clk)) {
+ error = PTR_ERR(clk);
+ goto fail_destroy;
+ }
+
+ error = pm_clk_add_clk(dev, clk);
+ if (error) {
+ dev_err(dev, "pm_clk_add_clk failed %d\n", error);
+ goto fail_put;
+ }
}
return 0;
@@ -1643,179 +1844,30 @@ static void rzg2l_cpg_detach_dev(struct generic_pm_domain *unused, struct device
static void rzg2l_cpg_genpd_remove(void *data)
{
- struct genpd_onecell_data *celldata = data;
-
- for (unsigned int i = 0; i < celldata->num_domains; i++)
- pm_genpd_remove(celldata->domains[i]);
-}
-
-static void rzg2l_cpg_genpd_remove_simple(void *data)
-{
pm_genpd_remove(data);
}
-static int rzg2l_cpg_power_on(struct generic_pm_domain *domain)
-{
- struct rzg2l_cpg_pd *pd = container_of(domain, struct rzg2l_cpg_pd, genpd);
- struct rzg2l_cpg_reg_conf mstop = pd->conf.mstop;
- struct rzg2l_cpg_priv *priv = pd->priv;
-
- /* Set MSTOP. */
- if (mstop.mask)
- writel(mstop.mask << 16, priv->base + mstop.off);
-
- return 0;
-}
-
-static int rzg2l_cpg_power_off(struct generic_pm_domain *domain)
-{
- struct rzg2l_cpg_pd *pd = container_of(domain, struct rzg2l_cpg_pd, genpd);
- struct rzg2l_cpg_reg_conf mstop = pd->conf.mstop;
- struct rzg2l_cpg_priv *priv = pd->priv;
-
- /* Set MSTOP. */
- if (mstop.mask)
- writel(mstop.mask | (mstop.mask << 16), priv->base + mstop.off);
-
- return 0;
-}
-
-static int __init rzg2l_cpg_pd_setup(struct rzg2l_cpg_pd *pd, bool always_on)
-{
- struct dev_power_governor *governor;
-
- pd->genpd.flags |= GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP;
- pd->genpd.attach_dev = rzg2l_cpg_attach_dev;
- pd->genpd.detach_dev = rzg2l_cpg_detach_dev;
- if (always_on) {
- pd->genpd.flags |= GENPD_FLAG_ALWAYS_ON;
- governor = &pm_domain_always_on_gov;
- } else {
- pd->genpd.power_on = rzg2l_cpg_power_on;
- pd->genpd.power_off = rzg2l_cpg_power_off;
- governor = &simple_qos_governor;
- }
-
- return pm_genpd_init(&pd->genpd, governor, !always_on);
-}
-
static int __init rzg2l_cpg_add_clk_domain(struct rzg2l_cpg_priv *priv)
{
struct device *dev = priv->dev;
struct device_node *np = dev->of_node;
- struct rzg2l_cpg_pd *pd;
- int ret;
-
- pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
- if (!pd)
- return -ENOMEM;
-
- pd->genpd.name = np->name;
- pd->priv = priv;
- ret = rzg2l_cpg_pd_setup(pd, true);
- if (ret)
- return ret;
-
- ret = devm_add_action_or_reset(dev, rzg2l_cpg_genpd_remove_simple, &pd->genpd);
- if (ret)
- return ret;
-
- return of_genpd_add_provider_simple(np, &pd->genpd);
-}
-
-static struct generic_pm_domain *
-rzg2l_cpg_pm_domain_xlate(const struct of_phandle_args *spec, void *data)
-{
- struct generic_pm_domain *domain = ERR_PTR(-ENOENT);
- struct genpd_onecell_data *genpd = data;
-
- if (spec->args_count != 1)
- return ERR_PTR(-EINVAL);
-
- for (unsigned int i = 0; i < genpd->num_domains; i++) {
- struct rzg2l_cpg_pd *pd = container_of(genpd->domains[i], struct rzg2l_cpg_pd,
- genpd);
-
- if (pd->id == spec->args[0]) {
- domain = &pd->genpd;
- break;
- }
- }
-
- return domain;
-}
-
-static int __init rzg2l_cpg_add_pm_domains(struct rzg2l_cpg_priv *priv)
-{
- const struct rzg2l_cpg_info *info = priv->info;
- struct device *dev = priv->dev;
- struct device_node *np = dev->of_node;
- struct rzg2l_cpg_pm_domains *domains;
- struct generic_pm_domain *parent;
- u32 ncells;
+ struct generic_pm_domain *genpd = &priv->genpd;
int ret;
- ret = of_property_read_u32(np, "#power-domain-cells", &ncells);
+ genpd->name = np->name;
+ genpd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ALWAYS_ON |
+ GENPD_FLAG_ACTIVE_WAKEUP;
+ genpd->attach_dev = rzg2l_cpg_attach_dev;
+ genpd->detach_dev = rzg2l_cpg_detach_dev;
+ ret = pm_genpd_init(genpd, &pm_domain_always_on_gov, false);
if (ret)
return ret;
- /* For backward compatibility. */
- if (!ncells)
- return rzg2l_cpg_add_clk_domain(priv);
-
- domains = devm_kzalloc(dev, struct_size(domains, domains, info->num_pm_domains),
- GFP_KERNEL);
- if (!domains)
- return -ENOMEM;
-
- domains->onecell_data.domains = domains->domains;
- domains->onecell_data.num_domains = info->num_pm_domains;
- domains->onecell_data.xlate = rzg2l_cpg_pm_domain_xlate;
-
- ret = devm_add_action_or_reset(dev, rzg2l_cpg_genpd_remove, &domains->onecell_data);
+ ret = devm_add_action_or_reset(dev, rzg2l_cpg_genpd_remove, genpd);
if (ret)
return ret;
- for (unsigned int i = 0; i < info->num_pm_domains; i++) {
- bool always_on = !!(info->pm_domains[i].flags & RZG2L_PD_F_ALWAYS_ON);
- struct rzg2l_cpg_pd *pd;
-
- pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
- if (!pd)
- return -ENOMEM;
-
- pd->genpd.name = info->pm_domains[i].name;
- pd->conf = info->pm_domains[i].conf;
- pd->id = info->pm_domains[i].id;
- pd->priv = priv;
-
- ret = rzg2l_cpg_pd_setup(pd, always_on);
- if (ret)
- return ret;
-
- if (always_on) {
- ret = rzg2l_cpg_power_on(&pd->genpd);
- if (ret)
- return ret;
- }
-
- domains->domains[i] = &pd->genpd;
- /* Parent should be on the very first entry of info->pm_domains[]. */
- if (!i) {
- parent = &pd->genpd;
- continue;
- }
-
- ret = pm_genpd_add_subdomain(parent, &pd->genpd);
- if (ret)
- return ret;
- }
-
- ret = of_genpd_add_provider_onecell(np, &domains->onecell_data);
- if (ret)
- return ret;
-
- return 0;
+ return of_genpd_add_provider_simple(np, genpd);
}
static int __init rzg2l_cpg_probe(struct platform_device *pdev)
@@ -1863,6 +1915,13 @@ static int __init rzg2l_cpg_probe(struct platform_device *pdev)
for (i = 0; i < info->num_mod_clks; i++)
rzg2l_cpg_register_mod_clk(&info->mod_clks[i], info, priv);
+ /*
+ * Initialize MSTOP after all the clocks were registered to avoid
+ * invalid reference counting when multiple clocks (critical,
+ * non-critical) share the same MSTOP.
+ */
+ rzg2l_mod_clock_init_mstop(priv);
+
error = of_clk_add_provider(np, rzg2l_cpg_clk_src_twocell_get, priv);
if (error)
return error;
@@ -1871,7 +1930,7 @@ static int __init rzg2l_cpg_probe(struct platform_device *pdev)
if (error)
return error;
- error = rzg2l_cpg_add_pm_domains(priv);
+ error = rzg2l_cpg_add_clk_domain(priv);
if (error)
return error;
@@ -1879,9 +1938,23 @@ static int __init rzg2l_cpg_probe(struct platform_device *pdev)
if (error)
return error;
+ debugfs_create_file("mstop", 0444, NULL, priv, &rzg2l_mod_clock_mstop_fops);
return 0;
}
+static int rzg2l_cpg_resume(struct device *dev)
+{
+ struct rzg2l_cpg_priv *priv = dev_get_drvdata(dev);
+
+ rzg2l_mod_clock_init_mstop(priv);
+
+ return 0;
+}
+
+static const struct dev_pm_ops rzg2l_cpg_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, rzg2l_cpg_resume)
+};
+
static const struct of_device_id rzg2l_cpg_match[] = {
#ifdef CONFIG_CLK_R9A07G043
{
@@ -1920,6 +1993,7 @@ static struct platform_driver rzg2l_cpg_driver = {
.driver = {
.name = "rzg2l-cpg",
.of_match_table = rzg2l_cpg_match,
+ .pm = pm_sleep_ptr(&rzg2l_cpg_pm_ops),
},
};
diff --git a/drivers/clk/renesas/rzg2l-cpg.h b/drivers/clk/renesas/rzg2l-cpg.h
index ecfe7e7ea8a1..55e815be16c8 100644
--- a/drivers/clk/renesas/rzg2l-cpg.h
+++ b/drivers/clk/renesas/rzg2l-cpg.h
@@ -21,6 +21,7 @@
#define CPG_PL2_DDIV (0x204)
#define CPG_PL3A_DDIV (0x208)
#define CPG_PL6_DDIV (0x210)
+#define CPG_PL3C_SDIV (0x214)
#define CPG_CLKSTATUS (0x280)
#define CPG_PL3_SSEL (0x408)
#define CPG_PL6_SSEL (0x414)
@@ -33,6 +34,7 @@
#define CPG_BUS_PERI_COM_MSTOP (0xB6C)
#define CPG_BUS_PERI_CPU_MSTOP (0xB70)
#define CPG_BUS_PERI_DDR_MSTOP (0xB74)
+#define CPG_BUS_PERI_VIDEO_MSTOP (0xB78)
#define CPG_BUS_REG0_MSTOP (0xB7C)
#define CPG_BUS_REG1_MSTOP (0xB80)
#define CPG_BUS_TZCDDR_MSTOP (0xB84)
@@ -70,6 +72,7 @@
#define DIVPL3A DDIV_PACK(CPG_PL3A_DDIV, 0, 3)
#define DIVPL3B DDIV_PACK(CPG_PL3A_DDIV, 4, 3)
#define DIVPL3C DDIV_PACK(CPG_PL3A_DDIV, 8, 3)
+#define DIVPL3E DDIV_PACK(CPG_PL3C_SDIV, 8, 5)
#define DIVGPU DDIV_PACK(CPG_PL6_DDIV, 0, 2)
#define SEL_PLL_PACK(offset, bitpos, size) \
@@ -80,6 +83,8 @@
#define SEL_PLL6_2 SEL_PLL_PACK(CPG_PL6_ETH_SSEL, 0, 1)
#define SEL_GPU2 SEL_PLL_PACK(CPG_PL6_SSEL, 12, 1)
+#define MSTOP(name, bitmask) ((CPG_##name##_MSTOP) << 16 | (bitmask))
+
#define EXTAL_FREQ_IN_MEGA_HZ (24)
/**
@@ -102,7 +107,10 @@ struct cpg_core_clk {
const struct clk_div_table *dtable;
const u32 *mtable;
const unsigned long invalid_rate;
- const unsigned long max_rate;
+ union {
+ const unsigned long max_rate;
+ const unsigned long default_rate;
+ };
const char * const *parent_names;
notifier_fn_t notifier;
u32 flag;
@@ -144,8 +152,9 @@ enum clk_types {
DEF_TYPE(_name, _id, _type, .parent = _parent)
#define DEF_SAMPLL(_name, _id, _parent, _conf) \
DEF_TYPE(_name, _id, CLK_TYPE_SAM_PLL, .parent = _parent, .conf = _conf)
-#define DEF_G3S_PLL(_name, _id, _parent, _conf) \
- DEF_TYPE(_name, _id, CLK_TYPE_G3S_PLL, .parent = _parent, .conf = _conf)
+#define DEF_G3S_PLL(_name, _id, _parent, _conf, _default_rate) \
+ DEF_TYPE(_name, _id, CLK_TYPE_G3S_PLL, .parent = _parent, .conf = _conf, \
+ .default_rate = _default_rate)
#define DEF_INPUT(_name, _id) \
DEF_TYPE(_name, _id, CLK_TYPE_IN)
#define DEF_FIXED(_name, _id, _parent, _mult, _div) \
@@ -195,6 +204,7 @@ enum clk_types {
* @name: handle between common and hardware-specific interfaces
* @id: clock index in array containing all Core and Module Clocks
* @parent: id of parent clock
+ * @mstop_conf: MSTOP configuration
* @off: register offset
* @bit: ON/MON bit
* @is_coupled: flag to indicate coupled clock
@@ -203,26 +213,28 @@ struct rzg2l_mod_clk {
const char *name;
unsigned int id;
unsigned int parent;
+ u32 mstop_conf;
u16 off;
u8 bit;
bool is_coupled;
};
-#define DEF_MOD_BASE(_name, _id, _parent, _off, _bit, _is_coupled) \
+#define DEF_MOD_BASE(_name, _id, _parent, _off, _bit, _mstop_conf, _is_coupled) \
{ \
.name = _name, \
.id = MOD_CLK_BASE + (_id), \
.parent = (_parent), \
+ .mstop_conf = (_mstop_conf), \
.off = (_off), \
.bit = (_bit), \
.is_coupled = (_is_coupled), \
}
-#define DEF_MOD(_name, _id, _parent, _off, _bit) \
- DEF_MOD_BASE(_name, _id, _parent, _off, _bit, false)
+#define DEF_MOD(_name, _id, _parent, _off, _bit, _mstop_conf) \
+ DEF_MOD_BASE(_name, _id, _parent, _off, _bit, _mstop_conf, false)
-#define DEF_COUPLED(_name, _id, _parent, _off, _bit) \
- DEF_MOD_BASE(_name, _id, _parent, _off, _bit, true)
+#define DEF_COUPLED(_name, _id, _parent, _off, _bit, _mstop_conf) \
+ DEF_MOD_BASE(_name, _id, _parent, _off, _bit, _mstop_conf, true)
/**
* struct rzg2l_reset - Reset definitions
@@ -247,55 +259,6 @@ struct rzg2l_reset {
DEF_RST_MON(_id, _off, _bit, -1)
/**
- * struct rzg2l_cpg_reg_conf - RZ/G2L register configuration data structure
- * @off: register offset
- * @mask: register mask
- */
-struct rzg2l_cpg_reg_conf {
- u16 off;
- u16 mask;
-};
-
-#define DEF_REG_CONF(_off, _mask) ((struct rzg2l_cpg_reg_conf) { .off = (_off), .mask = (_mask) })
-
-/**
- * struct rzg2l_cpg_pm_domain_conf - PM domain configuration data structure
- * @mstop: MSTOP register configuration
- */
-struct rzg2l_cpg_pm_domain_conf {
- struct rzg2l_cpg_reg_conf mstop;
-};
-
-/**
- * struct rzg2l_cpg_pm_domain_init_data - PM domain init data
- * @name: PM domain name
- * @conf: PM domain configuration
- * @flags: RZG2L PM domain flags (see RZG2L_PD_F_*)
- * @id: PM domain ID (similar to the ones defined in
- * include/dt-bindings/clock/<soc-id>-cpg.h)
- */
-struct rzg2l_cpg_pm_domain_init_data {
- const char * const name;
- struct rzg2l_cpg_pm_domain_conf conf;
- u32 flags;
- u16 id;
-};
-
-#define DEF_PD(_name, _id, _mstop_conf, _flags) \
- { \
- .name = (_name), \
- .id = (_id), \
- .conf = { \
- .mstop = (_mstop_conf), \
- }, \
- .flags = (_flags), \
- }
-
-/* Power domain flags. */
-#define RZG2L_PD_F_ALWAYS_ON BIT(0)
-#define RZG2L_PD_F_NONE (0)
-
-/**
* struct rzg2l_cpg_info - SoC-specific CPG Description
*
* @core_clks: Array of Core Clock definitions
@@ -313,8 +276,6 @@ struct rzg2l_cpg_pm_domain_init_data {
* @crit_mod_clks: Array with Module Clock IDs of critical clocks that
* should not be disabled without a knowledgeable driver
* @num_crit_mod_clks: Number of entries in crit_mod_clks[]
- * @pm_domains: PM domains init data array
- * @num_pm_domains: Number of PM domains
* @has_clk_mon_regs: Flag indicating whether the SoC has CLK_MON registers
*/
struct rzg2l_cpg_info {
@@ -341,10 +302,6 @@ struct rzg2l_cpg_info {
const unsigned int *crit_mod_clks;
unsigned int num_crit_mod_clks;
- /* Power domain. */
- const struct rzg2l_cpg_pm_domain_init_data *pm_domains;
- unsigned int num_pm_domains;
-
bool has_clk_mon_regs;
};
diff --git a/drivers/clk/renesas/rzv2h-cpg.c b/drivers/clk/renesas/rzv2h-cpg.c
index b524a9d33610..2197d1d2453a 100644
--- a/drivers/clk/renesas/rzv2h-cpg.c
+++ b/drivers/clk/renesas/rzv2h-cpg.c
@@ -23,7 +23,9 @@
#include <linux/platform_device.h>
#include <linux/pm_clock.h>
#include <linux/pm_domain.h>
+#include <linux/refcount.h>
#include <linux/reset-controller.h>
+#include <linux/string_choices.h>
#include <dt-bindings/clock/renesas-cpg-mssr.h>
@@ -40,10 +42,21 @@
#define GET_RST_OFFSET(x) (0x900 + ((x) * 4))
#define GET_RST_MON_OFFSET(x) (0xA00 + ((x) * 4))
-#define KDIV(val) ((s16)FIELD_GET(GENMASK(31, 16), (val)))
-#define MDIV(val) FIELD_GET(GENMASK(15, 6), (val))
-#define PDIV(val) FIELD_GET(GENMASK(5, 0), (val))
-#define SDIV(val) FIELD_GET(GENMASK(2, 0), (val))
+#define CPG_BUS_1_MSTOP (0xd00)
+#define CPG_BUS_MSTOP(m) (CPG_BUS_1_MSTOP + ((m) - 1) * 4)
+
+#define CPG_PLL_STBY(x) ((x))
+#define CPG_PLL_STBY_RESETB BIT(0)
+#define CPG_PLL_STBY_RESETB_WEN BIT(16)
+#define CPG_PLL_CLK1(x) ((x) + 0x004)
+#define CPG_PLL_CLK1_KDIV(x) ((s16)FIELD_GET(GENMASK(31, 16), (x)))
+#define CPG_PLL_CLK1_MDIV(x) FIELD_GET(GENMASK(15, 6), (x))
+#define CPG_PLL_CLK1_PDIV(x) FIELD_GET(GENMASK(5, 0), (x))
+#define CPG_PLL_CLK2(x) ((x) + 0x008)
+#define CPG_PLL_CLK2_SDIV(x) FIELD_GET(GENMASK(2, 0), (x))
+#define CPG_PLL_MON(x) ((x) + 0x010)
+#define CPG_PLL_MON_RESETB BIT(0)
+#define CPG_PLL_MON_LOCK BIT(4)
#define DDIV_DIVCTL_WEN(shift) BIT((shift) + 16)
@@ -64,6 +77,8 @@
* @resets: Array of resets
* @num_resets: Number of Module Resets in info->resets[]
* @last_dt_core_clk: ID of the last Core Clock exported to DT
+ * @ff_mod_status_ops: Fixed Factor Module Status Clock operations
+ * @mstop_count: Array of mstop values
* @rcdev: Reset controller entity
*/
struct rzv2h_cpg_priv {
@@ -78,6 +93,10 @@ struct rzv2h_cpg_priv {
unsigned int num_resets;
unsigned int last_dt_core_clk;
+ struct clk_ops *ff_mod_status_ops;
+
+ atomic_t *mstop_count;
+
struct reset_controller_dev rcdev;
};
@@ -85,10 +104,8 @@ struct rzv2h_cpg_priv {
struct pll_clk {
struct rzv2h_cpg_priv *priv;
- void __iomem *base;
struct clk_hw hw;
- unsigned int conf;
- unsigned int type;
+ struct pll pll;
};
#define to_pll(_hw) container_of(_hw, struct pll_clk, hw)
@@ -97,19 +114,25 @@ struct pll_clk {
* struct mod_clock - Module clock
*
* @priv: CPG private data
+ * @mstop_data: mstop data relating to module clock
* @hw: handle between common and hardware-specific interfaces
+ * @no_pm: flag to indicate PM is not supported
* @on_index: register offset
* @on_bit: ON/MON bit
* @mon_index: monitor register offset
- * @mon_bit: montor bit
+ * @mon_bit: monitor bit
+ * @ext_clk_mux_index: mux index for external clock source, or -1 if internal
*/
struct mod_clock {
struct rzv2h_cpg_priv *priv;
+ unsigned int mstop_data;
struct clk_hw hw;
+ bool no_pm;
u8 on_index;
u8 on_bit;
s8 mon_index;
u8 mon_bit;
+ s8 ext_clk_mux_index;
};
#define to_mod_clock(_hw) container_of(_hw, struct mod_clock, hw)
@@ -129,27 +152,94 @@ struct ddiv_clk {
#define to_ddiv_clock(_div) container_of(_div, struct ddiv_clk, div)
+/**
+ * struct rzv2h_ff_mod_status_clk - Fixed Factor Module Status Clock
+ *
+ * @priv: CPG private data
+ * @conf: fixed mod configuration
+ * @fix: fixed factor clock
+ */
+struct rzv2h_ff_mod_status_clk {
+ struct rzv2h_cpg_priv *priv;
+ struct fixed_mod_conf conf;
+ struct clk_fixed_factor fix;
+};
+
+#define to_rzv2h_ff_mod_status_clk(_hw) \
+ container_of(_hw, struct rzv2h_ff_mod_status_clk, fix.hw)
+
+static int rzv2h_cpg_pll_clk_is_enabled(struct clk_hw *hw)
+{
+ struct pll_clk *pll_clk = to_pll(hw);
+ struct rzv2h_cpg_priv *priv = pll_clk->priv;
+ u32 val = readl(priv->base + CPG_PLL_MON(pll_clk->pll.offset));
+
+ /* Ensure both RESETB and LOCK bits are set */
+ return (val & (CPG_PLL_MON_RESETB | CPG_PLL_MON_LOCK)) ==
+ (CPG_PLL_MON_RESETB | CPG_PLL_MON_LOCK);
+}
+
+static int rzv2h_cpg_pll_clk_enable(struct clk_hw *hw)
+{
+ struct pll_clk *pll_clk = to_pll(hw);
+ struct rzv2h_cpg_priv *priv = pll_clk->priv;
+ struct pll pll = pll_clk->pll;
+ u32 stby_offset;
+ u32 mon_offset;
+ u32 val;
+ int ret;
+
+ if (rzv2h_cpg_pll_clk_is_enabled(hw))
+ return 0;
+
+ stby_offset = CPG_PLL_STBY(pll.offset);
+ mon_offset = CPG_PLL_MON(pll.offset);
+
+ writel(CPG_PLL_STBY_RESETB_WEN | CPG_PLL_STBY_RESETB,
+ priv->base + stby_offset);
+
+ /*
+ * Ensure PLL enters into normal mode
+ *
+ * Note: There is no HW information about the worst case latency.
+ *
+ * Since this latency might depend on external crystal or PLL rate,
+ * use a "super" safe timeout value.
+ */
+ ret = readl_poll_timeout_atomic(priv->base + mon_offset, val,
+ (val & (CPG_PLL_MON_RESETB | CPG_PLL_MON_LOCK)) ==
+ (CPG_PLL_MON_RESETB | CPG_PLL_MON_LOCK), 200, 2000);
+ if (ret)
+ dev_err(priv->dev, "Failed to enable PLL 0x%x/%pC\n",
+ stby_offset, hw->clk);
+
+ return ret;
+}
+
static unsigned long rzv2h_cpg_pll_clk_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct pll_clk *pll_clk = to_pll(hw);
struct rzv2h_cpg_priv *priv = pll_clk->priv;
+ struct pll pll = pll_clk->pll;
unsigned int clk1, clk2;
u64 rate;
- if (!PLL_CLK_ACCESS(pll_clk->conf))
+ if (!pll.has_clkn)
return 0;
- clk1 = readl(priv->base + PLL_CLK1_OFFSET(pll_clk->conf));
- clk2 = readl(priv->base + PLL_CLK2_OFFSET(pll_clk->conf));
+ clk1 = readl(priv->base + CPG_PLL_CLK1(pll.offset));
+ clk2 = readl(priv->base + CPG_PLL_CLK2(pll.offset));
- rate = mul_u64_u32_shr(parent_rate, (MDIV(clk1) << 16) + KDIV(clk1),
- 16 + SDIV(clk2));
+ rate = mul_u64_u32_shr(parent_rate, (CPG_PLL_CLK1_MDIV(clk1) << 16) +
+ CPG_PLL_CLK1_KDIV(clk1), 16 + CPG_PLL_CLK2_SDIV(clk2));
- return DIV_ROUND_CLOSEST_ULL(rate, PDIV(clk1));
+ return DIV_ROUND_CLOSEST_ULL(rate, CPG_PLL_CLK1_PDIV(clk1));
}
static const struct clk_ops rzv2h_cpg_pll_ops = {
+ .is_enabled = rzv2h_cpg_pll_clk_is_enabled,
+ .enable = rzv2h_cpg_pll_clk_enable,
.recalc_rate = rzv2h_cpg_pll_clk_recalc_rate,
};
@@ -158,7 +248,6 @@ rzv2h_cpg_pll_clk_register(const struct cpg_core_clk *core,
struct rzv2h_cpg_priv *priv,
const struct clk_ops *ops)
{
- void __iomem *base = priv->base;
struct device *dev = priv->dev;
struct clk_init_data init;
const struct clk *parent;
@@ -182,10 +271,8 @@ rzv2h_cpg_pll_clk_register(const struct cpg_core_clk *core,
init.num_parents = 1;
pll_clk->hw.init = &init;
- pll_clk->conf = core->cfg.conf;
- pll_clk->base = base;
+ pll_clk->pll = core->cfg.pll;
pll_clk->priv = priv;
- pll_clk->type = core->type;
ret = devm_clk_hw_register(dev, &pll_clk->hw);
if (ret)
@@ -207,15 +294,6 @@ static unsigned long rzv2h_ddiv_recalc_rate(struct clk_hw *hw,
divider->flags, divider->width);
}
-static long rzv2h_ddiv_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
-{
- struct clk_divider *divider = to_clk_divider(hw);
-
- return divider_round_rate(hw, rate, prate, divider->table,
- divider->width, divider->flags);
-}
-
static int rzv2h_ddiv_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
@@ -230,6 +308,9 @@ static inline int rzv2h_cpg_wait_ddiv_clk_update_done(void __iomem *base, u8 mon
u32 bitmask = BIT(mon);
u32 val;
+ if (mon == CSDIV_NO_MON)
+ return 0;
+
return readl_poll_timeout_atomic(base + CPG_CLKSTATUS0, val, !(val & bitmask), 10, 200);
}
@@ -261,12 +342,6 @@ static int rzv2h_ddiv_set_rate(struct clk_hw *hw, unsigned long rate,
writel(val, divider->reg);
ret = rzv2h_cpg_wait_ddiv_clk_update_done(priv->base, ddiv->mon);
- if (ret)
- goto ddiv_timeout;
-
- spin_unlock_irqrestore(divider->lock, flags);
-
- return 0;
ddiv_timeout:
spin_unlock_irqrestore(divider->lock, flags);
@@ -275,7 +350,6 @@ ddiv_timeout:
static const struct clk_ops rzv2h_ddiv_clk_divider_ops = {
.recalc_rate = rzv2h_ddiv_recalc_rate,
- .round_rate = rzv2h_ddiv_round_rate,
.determine_rate = rzv2h_ddiv_determine_rate,
.set_rate = rzv2h_ddiv_set_rate,
};
@@ -309,9 +383,13 @@ rzv2h_cpg_ddiv_clk_register(const struct cpg_core_clk *core,
return ERR_PTR(-ENOMEM);
init.name = core->name;
- init.ops = &rzv2h_ddiv_clk_divider_ops;
+ if (cfg_ddiv.no_rmw)
+ init.ops = &clk_divider_ops;
+ else
+ init.ops = &rzv2h_ddiv_clk_divider_ops;
init.parent_names = &parent_name;
init.num_parents = 1;
+ init.flags = CLK_SET_RATE_PARENT;
ddiv->priv = priv;
ddiv->mon = cfg_ddiv.monbit;
@@ -331,6 +409,83 @@ rzv2h_cpg_ddiv_clk_register(const struct cpg_core_clk *core,
return div->hw.clk;
}
+static struct clk * __init
+rzv2h_cpg_mux_clk_register(const struct cpg_core_clk *core,
+ struct rzv2h_cpg_priv *priv)
+{
+ struct smuxed mux = core->cfg.smux;
+ const struct clk_hw *clk_hw;
+
+ clk_hw = devm_clk_hw_register_mux(priv->dev, core->name,
+ core->parent_names, core->num_parents,
+ core->flag, priv->base + mux.offset,
+ mux.shift, mux.width,
+ core->mux_flags, &priv->rmw_lock);
+ if (IS_ERR(clk_hw))
+ return ERR_CAST(clk_hw);
+
+ return clk_hw->clk;
+}
+
+static int
+rzv2h_clk_ff_mod_status_is_enabled(struct clk_hw *hw)
+{
+ struct rzv2h_ff_mod_status_clk *fix = to_rzv2h_ff_mod_status_clk(hw);
+ struct rzv2h_cpg_priv *priv = fix->priv;
+ u32 offset = GET_CLK_MON_OFFSET(fix->conf.mon_index);
+ u32 bitmask = BIT(fix->conf.mon_bit);
+ u32 val;
+
+ val = readl(priv->base + offset);
+ return !!(val & bitmask);
+}
+
+static struct clk * __init
+rzv2h_cpg_fixed_mod_status_clk_register(const struct cpg_core_clk *core,
+ struct rzv2h_cpg_priv *priv)
+{
+ struct rzv2h_ff_mod_status_clk *clk_hw_data;
+ struct clk_init_data init = { };
+ struct clk_fixed_factor *fix;
+ const struct clk *parent;
+ const char *parent_name;
+ int ret;
+
+ WARN_DEBUG(core->parent >= priv->num_core_clks);
+ parent = priv->clks[core->parent];
+ if (IS_ERR(parent))
+ return ERR_CAST(parent);
+
+ parent_name = __clk_get_name(parent);
+ parent = priv->clks[core->parent];
+ if (IS_ERR(parent))
+ return ERR_CAST(parent);
+
+ clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
+ if (!clk_hw_data)
+ return ERR_PTR(-ENOMEM);
+
+ clk_hw_data->priv = priv;
+ clk_hw_data->conf = core->cfg.fixed_mod;
+
+ init.name = core->name;
+ init.ops = priv->ff_mod_status_ops;
+ init.flags = CLK_SET_RATE_PARENT;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ fix = &clk_hw_data->fix;
+ fix->hw.init = &init;
+ fix->mult = core->mult;
+ fix->div = core->div;
+
+ ret = devm_clk_hw_register(priv->dev, &clk_hw_data->fix.hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return clk_hw_data->fix.hw.clk;
+}
+
static struct clk
*rzv2h_cpg_clk_src_twocell_get(struct of_phandle_args *clkspec,
void *data)
@@ -409,12 +564,29 @@ rzv2h_cpg_register_core_clk(const struct cpg_core_clk *core,
else
clk = clk_hw->clk;
break;
+ case CLK_TYPE_FF_MOD_STATUS:
+ if (!priv->ff_mod_status_ops) {
+ priv->ff_mod_status_ops =
+ devm_kzalloc(dev, sizeof(*priv->ff_mod_status_ops), GFP_KERNEL);
+ if (!priv->ff_mod_status_ops) {
+ clk = ERR_PTR(-ENOMEM);
+ goto fail;
+ }
+ memcpy(priv->ff_mod_status_ops, &clk_fixed_factor_ops,
+ sizeof(const struct clk_ops));
+ priv->ff_mod_status_ops->is_enabled = rzv2h_clk_ff_mod_status_is_enabled;
+ }
+ clk = rzv2h_cpg_fixed_mod_status_clk_register(core, priv);
+ break;
case CLK_TYPE_PLL:
clk = rzv2h_cpg_pll_clk_register(core, priv, &rzv2h_cpg_pll_ops);
break;
case CLK_TYPE_DDIV:
clk = rzv2h_cpg_ddiv_clk_register(core, priv);
break;
+ case CLK_TYPE_SMUX:
+ clk = rzv2h_cpg_mux_clk_register(core, priv);
+ break;
default:
goto fail;
}
@@ -431,8 +603,95 @@ fail:
core->name, PTR_ERR(clk));
}
+static void rzv2h_mod_clock_mstop_enable(struct rzv2h_cpg_priv *priv,
+ u32 mstop_data)
+{
+ unsigned long mstop_mask = FIELD_GET(BUS_MSTOP_BITS_MASK, mstop_data);
+ u16 mstop_index = FIELD_GET(BUS_MSTOP_IDX_MASK, mstop_data);
+ atomic_t *mstop = &priv->mstop_count[mstop_index * 16];
+ unsigned long flags;
+ unsigned int i;
+ u32 val = 0;
+
+ spin_lock_irqsave(&priv->rmw_lock, flags);
+ for_each_set_bit(i, &mstop_mask, 16) {
+ if (!atomic_read(&mstop[i]))
+ val |= BIT(i) << 16;
+ atomic_inc(&mstop[i]);
+ }
+ if (val)
+ writel(val, priv->base + CPG_BUS_MSTOP(mstop_index));
+ spin_unlock_irqrestore(&priv->rmw_lock, flags);
+}
+
+static void rzv2h_mod_clock_mstop_disable(struct rzv2h_cpg_priv *priv,
+ u32 mstop_data)
+{
+ unsigned long mstop_mask = FIELD_GET(BUS_MSTOP_BITS_MASK, mstop_data);
+ u16 mstop_index = FIELD_GET(BUS_MSTOP_IDX_MASK, mstop_data);
+ atomic_t *mstop = &priv->mstop_count[mstop_index * 16];
+ unsigned long flags;
+ unsigned int i;
+ u32 val = 0;
+
+ spin_lock_irqsave(&priv->rmw_lock, flags);
+ for_each_set_bit(i, &mstop_mask, 16) {
+ if (!atomic_read(&mstop[i]) ||
+ atomic_dec_and_test(&mstop[i]))
+ val |= BIT(i) << 16 | BIT(i);
+ }
+ if (val)
+ writel(val, priv->base + CPG_BUS_MSTOP(mstop_index));
+ spin_unlock_irqrestore(&priv->rmw_lock, flags);
+}
+
+static int rzv2h_parent_clk_mux_to_index(struct clk_hw *hw)
+{
+ struct clk_hw *parent_hw;
+ struct clk *parent_clk;
+ struct clk_mux *mux;
+ u32 val;
+
+ /* This will always succeed, so no need to check for IS_ERR() */
+ parent_clk = clk_get_parent(hw->clk);
+
+ parent_hw = __clk_get_hw(parent_clk);
+ mux = to_clk_mux(parent_hw);
+
+ val = readl(mux->reg) >> mux->shift;
+ val &= mux->mask;
+ return clk_mux_val_to_index(parent_hw, mux->table, 0, val);
+}
+
+static int rzv2h_mod_clock_is_enabled(struct clk_hw *hw)
+{
+ struct mod_clock *clock = to_mod_clock(hw);
+ struct rzv2h_cpg_priv *priv = clock->priv;
+ int mon_index = clock->mon_index;
+ u32 bitmask;
+ u32 offset;
+
+ if (clock->ext_clk_mux_index >= 0 &&
+ rzv2h_parent_clk_mux_to_index(hw) == clock->ext_clk_mux_index)
+ mon_index = -1;
+
+ if (mon_index >= 0) {
+ offset = GET_CLK_MON_OFFSET(mon_index);
+ bitmask = BIT(clock->mon_bit);
+
+ if (!(readl(priv->base + offset) & bitmask))
+ return 0;
+ }
+
+ offset = GET_CLK_ON_OFFSET(clock->on_index);
+ bitmask = BIT(clock->on_bit);
+
+ return readl(priv->base + offset) & bitmask;
+}
+
static int rzv2h_mod_clock_endisable(struct clk_hw *hw, bool enable)
{
+ bool enabled = rzv2h_mod_clock_is_enabled(hw);
struct mod_clock *clock = to_mod_clock(hw);
unsigned int reg = GET_CLK_ON_OFFSET(clock->on_index);
struct rzv2h_cpg_priv *priv = clock->priv;
@@ -442,13 +701,22 @@ static int rzv2h_mod_clock_endisable(struct clk_hw *hw, bool enable)
int error;
dev_dbg(dev, "CLK_ON 0x%x/%pC %s\n", reg, hw->clk,
- enable ? "ON" : "OFF");
+ str_on_off(enable));
+
+ if (enabled == enable)
+ return 0;
value = bitmask << 16;
- if (enable)
+ if (enable) {
value |= bitmask;
-
- writel(value, priv->base + reg);
+ writel(value, priv->base + reg);
+ if (clock->mstop_data != BUS_MSTOP_NONE)
+ rzv2h_mod_clock_mstop_enable(priv, clock->mstop_data);
+ } else {
+ if (clock->mstop_data != BUS_MSTOP_NONE)
+ rzv2h_mod_clock_mstop_disable(priv, clock->mstop_data);
+ writel(value, priv->base + reg);
+ }
if (!enable || clock->mon_index < 0)
return 0;
@@ -458,8 +726,8 @@ static int rzv2h_mod_clock_endisable(struct clk_hw *hw, bool enable)
error = readl_poll_timeout_atomic(priv->base + reg, value,
value & bitmask, 0, 10);
if (error)
- dev_err(dev, "Failed to enable CLK_ON %p\n",
- priv->base + reg);
+ dev_err(dev, "Failed to enable CLK_ON 0x%x/%pC\n",
+ GET_CLK_ON_OFFSET(clock->on_index), hw->clk);
return error;
}
@@ -474,24 +742,6 @@ static void rzv2h_mod_clock_disable(struct clk_hw *hw)
rzv2h_mod_clock_endisable(hw, false);
}
-static int rzv2h_mod_clock_is_enabled(struct clk_hw *hw)
-{
- struct mod_clock *clock = to_mod_clock(hw);
- struct rzv2h_cpg_priv *priv = clock->priv;
- u32 bitmask;
- u32 offset;
-
- if (clock->mon_index >= 0) {
- offset = GET_CLK_MON_OFFSET(clock->mon_index);
- bitmask = BIT(clock->mon_bit);
- } else {
- offset = GET_CLK_ON_OFFSET(clock->on_index);
- bitmask = BIT(clock->on_bit);
- }
-
- return readl(priv->base + offset) & bitmask;
-}
-
static const struct clk_ops rzv2h_mod_clock_ops = {
.enable = rzv2h_mod_clock_enable,
.disable = rzv2h_mod_clock_disable,
@@ -541,8 +791,11 @@ rzv2h_cpg_register_mod_clk(const struct rzv2h_mod_clk *mod,
clock->on_bit = mod->on_bit;
clock->mon_index = mod->mon_index;
clock->mon_bit = mod->mon_bit;
+ clock->no_pm = mod->no_pm;
+ clock->ext_clk_mux_index = mod->ext_clk_mux_index;
clock->priv = priv;
clock->hw.init = &init;
+ clock->mstop_data = mod->mstop_data;
ret = devm_clk_hw_register(dev, &clock->hw);
if (ret) {
@@ -552,6 +805,40 @@ rzv2h_cpg_register_mod_clk(const struct rzv2h_mod_clk *mod,
priv->clks[id] = clock->hw.clk;
+ /*
+ * Ensure the module clocks and MSTOP bits are synchronized when they are
+ * turned ON by the bootloader. Enable MSTOP bits for module clocks that were
+ * turned ON in an earlier boot stage.
+ */
+ if (clock->mstop_data != BUS_MSTOP_NONE &&
+ !mod->critical && rzv2h_mod_clock_is_enabled(&clock->hw)) {
+ rzv2h_mod_clock_mstop_enable(priv, clock->mstop_data);
+ } else if (clock->mstop_data != BUS_MSTOP_NONE && mod->critical) {
+ unsigned long mstop_mask = FIELD_GET(BUS_MSTOP_BITS_MASK, clock->mstop_data);
+ u16 mstop_index = FIELD_GET(BUS_MSTOP_IDX_MASK, clock->mstop_data);
+ atomic_t *mstop = &priv->mstop_count[mstop_index * 16];
+ unsigned long flags;
+ unsigned int i;
+ u32 val = 0;
+
+ /*
+ * Critical clocks are turned ON immediately upon registration, and the
+ * MSTOP counter is updated through the rzv2h_mod_clock_enable() path.
+ * However, if the critical clocks were already turned ON by the initial
+ * bootloader, synchronize the atomic counter here and clear the MSTOP bit.
+ */
+ spin_lock_irqsave(&priv->rmw_lock, flags);
+ for_each_set_bit(i, &mstop_mask, 16) {
+ if (atomic_read(&mstop[i]))
+ continue;
+ val |= BIT(i) << 16;
+ atomic_inc(&mstop[i]);
+ }
+ if (val)
+ writel(val, priv->base + CPG_BUS_MSTOP(mstop_index));
+ spin_unlock_irqrestore(&priv->rmw_lock, flags);
+ }
+
return;
fail:
@@ -559,44 +846,46 @@ fail:
mod->name, PTR_ERR(clk));
}
-static int rzv2h_cpg_assert(struct reset_controller_dev *rcdev,
- unsigned long id)
+static int __rzv2h_cpg_assert(struct reset_controller_dev *rcdev,
+ unsigned long id, bool assert)
{
struct rzv2h_cpg_priv *priv = rcdev_to_priv(rcdev);
unsigned int reg = GET_RST_OFFSET(priv->resets[id].reset_index);
u32 mask = BIT(priv->resets[id].reset_bit);
u8 monbit = priv->resets[id].mon_bit;
u32 value = mask << 16;
+ int ret;
- dev_dbg(rcdev->dev, "assert id:%ld offset:0x%x\n", id, reg);
+ dev_dbg(rcdev->dev, "%s id:%ld offset:0x%x\n",
+ assert ? "assert" : "deassert", id, reg);
+ if (!assert)
+ value |= mask;
writel(value, priv->base + reg);
reg = GET_RST_MON_OFFSET(priv->resets[id].mon_index);
mask = BIT(monbit);
- return readl_poll_timeout_atomic(priv->base + reg, value,
- value & mask, 10, 200);
+ ret = readl_poll_timeout_atomic(priv->base + reg, value,
+ assert == !!(value & mask), 10, 200);
+ if (ret && !assert) {
+ value = mask << 16;
+ writel(value, priv->base + GET_RST_OFFSET(priv->resets[id].reset_index));
+ }
+
+ return ret;
+}
+
+static int rzv2h_cpg_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return __rzv2h_cpg_assert(rcdev, id, true);
}
static int rzv2h_cpg_deassert(struct reset_controller_dev *rcdev,
unsigned long id)
{
- struct rzv2h_cpg_priv *priv = rcdev_to_priv(rcdev);
- unsigned int reg = GET_RST_OFFSET(priv->resets[id].reset_index);
- u32 mask = BIT(priv->resets[id].reset_bit);
- u8 monbit = priv->resets[id].mon_bit;
- u32 value = (mask << 16) | mask;
-
- dev_dbg(rcdev->dev, "deassert id:%ld offset:0x%x\n", id, reg);
-
- writel(value, priv->base + reg);
-
- reg = GET_RST_MON_OFFSET(priv->resets[id].mon_index);
- mask = BIT(monbit);
-
- return readl_poll_timeout_atomic(priv->base + reg, value,
- !(value & mask), 10, 200);
+ return __rzv2h_cpg_assert(rcdev, id, false);
}
static int rzv2h_cpg_reset(struct reset_controller_dev *rcdev,
@@ -668,17 +957,51 @@ struct rzv2h_cpg_pd {
struct generic_pm_domain genpd;
};
+static bool rzv2h_cpg_is_pm_clk(struct rzv2h_cpg_pd *pd,
+ const struct of_phandle_args *clkspec)
+{
+ if (clkspec->np != pd->genpd.dev.of_node || clkspec->args_count != 2)
+ return false;
+
+ switch (clkspec->args[0]) {
+ case CPG_MOD: {
+ struct rzv2h_cpg_priv *priv = pd->priv;
+ unsigned int id = clkspec->args[1];
+ struct mod_clock *clock;
+
+ if (id >= priv->num_mod_clks)
+ return false;
+
+ if (priv->clks[priv->num_core_clks + id] == ERR_PTR(-ENOENT))
+ return false;
+
+ clock = to_mod_clock(__clk_get_hw(priv->clks[priv->num_core_clks + id]));
+
+ return !clock->no_pm;
+ }
+
+ case CPG_CORE:
+ default:
+ return false;
+ }
+}
+
static int rzv2h_cpg_attach_dev(struct generic_pm_domain *domain, struct device *dev)
{
+ struct rzv2h_cpg_pd *pd = container_of(domain, struct rzv2h_cpg_pd, genpd);
struct device_node *np = dev->of_node;
struct of_phandle_args clkspec;
bool once = true;
struct clk *clk;
+ unsigned int i;
int error;
- int i = 0;
- while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i,
- &clkspec)) {
+ for (i = 0; !of_parse_phandle_with_args(np, "clocks", "#clock-cells", i, &clkspec); i++) {
+ if (!rzv2h_cpg_is_pm_clk(pd, &clkspec)) {
+ of_node_put(clkspec.np);
+ continue;
+ }
+
if (once) {
once = false;
error = pm_clk_create(dev);
@@ -700,7 +1023,6 @@ static int rzv2h_cpg_attach_dev(struct generic_pm_domain *domain, struct device
error);
goto fail_put;
}
- i++;
}
return 0;
@@ -786,8 +1108,16 @@ static int __init rzv2h_cpg_probe(struct platform_device *pdev)
if (!clks)
return -ENOMEM;
- priv->resets = devm_kmemdup(dev, info->resets, sizeof(*info->resets) *
- info->num_resets, GFP_KERNEL);
+ priv->mstop_count = devm_kcalloc(dev, info->num_mstop_bits,
+ sizeof(*priv->mstop_count), GFP_KERNEL);
+ if (!priv->mstop_count)
+ return -ENOMEM;
+
+ /* Adjust for CPG_BUS_m_MSTOP starting from m = 1 */
+ priv->mstop_count -= 16;
+
+ priv->resets = devm_kmemdup_array(dev, info->resets, info->num_resets,
+ sizeof(*info->resets), GFP_KERNEL);
if (!priv->resets)
return -ENOMEM;
@@ -827,6 +1157,18 @@ static int __init rzv2h_cpg_probe(struct platform_device *pdev)
}
static const struct of_device_id rzv2h_cpg_match[] = {
+#ifdef CONFIG_CLK_R9A09G047
+ {
+ .compatible = "renesas,r9a09g047-cpg",
+ .data = &r9a09g047_cpg_info,
+ },
+#endif
+#ifdef CONFIG_CLK_R9A09G056
+ {
+ .compatible = "renesas,r9a09g056-cpg",
+ .data = &r9a09g056_cpg_info,
+ },
+#endif
#ifdef CONFIG_CLK_R9A09G057
{
.compatible = "renesas,r9a09g057-cpg",
diff --git a/drivers/clk/renesas/rzv2h-cpg.h b/drivers/clk/renesas/rzv2h-cpg.h
index 1bd406c69015..840eed25aeda 100644
--- a/drivers/clk/renesas/rzv2h-cpg.h
+++ b/drivers/clk/renesas/rzv2h-cpg.h
@@ -8,6 +8,29 @@
#ifndef __RENESAS_RZV2H_CPG_H__
#define __RENESAS_RZV2H_CPG_H__
+#include <linux/bitfield.h>
+#include <linux/types.h>
+
+/**
+ * struct pll - Structure for PLL configuration
+ *
+ * @offset: STBY register offset
+ * @has_clkn: Flag to indicate if CLK1/2 are accessible or not
+ */
+struct pll {
+ unsigned int offset:9;
+ unsigned int has_clkn:1;
+};
+
+#define PLL_PACK(_offset, _has_clkn) \
+ ((struct pll){ \
+ .offset = _offset, \
+ .has_clkn = _has_clkn \
+ })
+
+#define PLLCA55 PLL_PACK(0x60, 1)
+#define PLLGPU PLL_PACK(0x120, 1)
+
/**
* struct ddiv - Structure for dynamic switching divider
*
@@ -15,14 +38,25 @@
* @shift: position of the divider bit
* @width: width of the divider
* @monbit: monitor bit in CPG_CLKSTATUS0 register
+ * @no_rmw: flag to indicate if the register is read-modify-write
+ * (1: no RMW, 0: RMW)
*/
struct ddiv {
unsigned int offset:11;
unsigned int shift:4;
unsigned int width:4;
unsigned int monbit:5;
+ unsigned int no_rmw:1;
};
+/*
+ * On RZ/V2H(P), the dynamic divider clock supports up to 19 monitor bits,
+ * while on RZ/G3E, it supports up to 16 monitor bits. Use the maximum value
+ * `0x1f` to indicate that monitor bits are not supported for static divider
+ * clocks.
+ */
+#define CSDIV_NO_MON (0x1f)
+
#define DDIV_PACK(_offset, _shift, _width, _monbit) \
((struct ddiv){ \
.offset = _offset, \
@@ -31,9 +65,91 @@ struct ddiv {
.monbit = _monbit \
})
+#define DDIV_PACK_NO_RMW(_offset, _shift, _width, _monbit) \
+ ((struct ddiv){ \
+ .offset = (_offset), \
+ .shift = (_shift), \
+ .width = (_width), \
+ .monbit = (_monbit), \
+ .no_rmw = 1 \
+ })
+
+/**
+ * struct smuxed - Structure for static muxed clocks
+ *
+ * @offset: register offset
+ * @shift: position of the divider field
+ * @width: width of the divider field
+ */
+struct smuxed {
+ unsigned int offset:11;
+ unsigned int shift:4;
+ unsigned int width:4;
+};
+
+#define SMUX_PACK(_offset, _shift, _width) \
+ ((struct smuxed){ \
+ .offset = (_offset), \
+ .shift = (_shift), \
+ .width = (_width), \
+ })
+
+/**
+ * struct fixed_mod_conf - Structure for fixed module configuration
+ *
+ * @mon_index: monitor index
+ * @mon_bit: monitor bit
+ */
+struct fixed_mod_conf {
+ u8 mon_index;
+ u8 mon_bit;
+};
+
+#define FIXED_MOD_CONF_PACK(_index, _bit) \
+ ((struct fixed_mod_conf){ \
+ .mon_index = (_index), \
+ .mon_bit = (_bit), \
+ })
+
+#define CPG_SSEL0 (0x300)
+#define CPG_SSEL1 (0x304)
#define CPG_CDDIV0 (0x400)
+#define CPG_CDDIV1 (0x404)
+#define CPG_CDDIV3 (0x40C)
+#define CPG_CDDIV4 (0x410)
+#define CPG_CSDIV0 (0x500)
+#define CDDIV0_DIVCTL1 DDIV_PACK(CPG_CDDIV0, 4, 3, 1)
#define CDDIV0_DIVCTL2 DDIV_PACK(CPG_CDDIV0, 8, 3, 2)
+#define CDDIV1_DIVCTL0 DDIV_PACK(CPG_CDDIV1, 0, 2, 4)
+#define CDDIV1_DIVCTL1 DDIV_PACK(CPG_CDDIV1, 4, 2, 5)
+#define CDDIV1_DIVCTL2 DDIV_PACK(CPG_CDDIV1, 8, 2, 6)
+#define CDDIV1_DIVCTL3 DDIV_PACK(CPG_CDDIV1, 12, 2, 7)
+#define CDDIV3_DIVCTL1 DDIV_PACK(CPG_CDDIV3, 4, 3, 13)
+#define CDDIV3_DIVCTL2 DDIV_PACK(CPG_CDDIV3, 8, 3, 14)
+#define CDDIV3_DIVCTL3 DDIV_PACK(CPG_CDDIV3, 12, 1, 15)
+#define CDDIV4_DIVCTL0 DDIV_PACK(CPG_CDDIV4, 0, 1, 16)
+#define CDDIV4_DIVCTL1 DDIV_PACK(CPG_CDDIV4, 4, 1, 17)
+#define CDDIV4_DIVCTL2 DDIV_PACK(CPG_CDDIV4, 8, 1, 18)
+
+#define CSDIV0_DIVCTL0 DDIV_PACK(CPG_CSDIV0, 0, 2, CSDIV_NO_MON)
+#define CSDIV0_DIVCTL1 DDIV_PACK(CPG_CSDIV0, 4, 2, CSDIV_NO_MON)
+#define CSDIV0_DIVCTL3 DDIV_PACK_NO_RMW(CPG_CSDIV0, 12, 2, CSDIV_NO_MON)
+
+#define SSEL0_SELCTL2 SMUX_PACK(CPG_SSEL0, 8, 1)
+#define SSEL0_SELCTL3 SMUX_PACK(CPG_SSEL0, 12, 1)
+#define SSEL1_SELCTL0 SMUX_PACK(CPG_SSEL1, 0, 1)
+#define SSEL1_SELCTL1 SMUX_PACK(CPG_SSEL1, 4, 1)
+#define SSEL1_SELCTL2 SMUX_PACK(CPG_SSEL1, 8, 1)
+#define SSEL1_SELCTL3 SMUX_PACK(CPG_SSEL1, 12, 1)
+
+#define BUS_MSTOP_IDX_MASK GENMASK(31, 16)
+#define BUS_MSTOP_BITS_MASK GENMASK(15, 0)
+#define BUS_MSTOP(idx, mask) (FIELD_PREP_CONST(BUS_MSTOP_IDX_MASK, (idx)) | \
+ FIELD_PREP_CONST(BUS_MSTOP_BITS_MASK, (mask)))
+#define BUS_MSTOP_NONE GENMASK(31, 0)
+
+#define FIXED_MOD_CONF_XSPI FIXED_MOD_CONF_PACK(5, 1)
/**
* Definitions of CPG Core Clocks
@@ -53,8 +169,14 @@ struct cpg_core_clk {
union {
unsigned int conf;
struct ddiv ddiv;
+ struct pll pll;
+ struct smuxed smux;
+ struct fixed_mod_conf fixed_mod;
} cfg;
const struct clk_div_table *dtable;
+ const char * const *parent_names;
+ unsigned int num_parents;
+ u8 mux_flags;
u32 flag;
};
@@ -62,70 +184,96 @@ enum clk_types {
/* Generic */
CLK_TYPE_IN, /* External Clock Input */
CLK_TYPE_FF, /* Fixed Factor Clock */
+ CLK_TYPE_FF_MOD_STATUS, /* Fixed Factor Clock which can report the status of module clock */
CLK_TYPE_PLL,
CLK_TYPE_DDIV, /* Dynamic Switching Divider */
+ CLK_TYPE_SMUX, /* Static Mux */
};
-/* BIT(31) indicates if CLK1/2 are accessible or not */
-#define PLL_CONF(n) (BIT(31) | ((n) & ~GENMASK(31, 16)))
-#define PLL_CLK_ACCESS(n) ((n) & BIT(31) ? 1 : 0)
-#define PLL_CLK1_OFFSET(n) ((n) & ~GENMASK(31, 16))
-#define PLL_CLK2_OFFSET(n) (((n) & ~GENMASK(31, 16)) + (0x4))
-
#define DEF_TYPE(_name, _id, _type...) \
{ .name = _name, .id = _id, .type = _type }
#define DEF_BASE(_name, _id, _type, _parent...) \
DEF_TYPE(_name, _id, _type, .parent = _parent)
-#define DEF_PLL(_name, _id, _parent, _conf) \
- DEF_TYPE(_name, _id, CLK_TYPE_PLL, .parent = _parent, .cfg.conf = _conf)
+#define DEF_PLL(_name, _id, _parent, _pll_packed) \
+ DEF_TYPE(_name, _id, CLK_TYPE_PLL, .parent = _parent, .cfg.pll = _pll_packed)
#define DEF_INPUT(_name, _id) \
DEF_TYPE(_name, _id, CLK_TYPE_IN)
#define DEF_FIXED(_name, _id, _parent, _mult, _div) \
DEF_BASE(_name, _id, CLK_TYPE_FF, _parent, .div = _div, .mult = _mult)
+#define DEF_FIXED_MOD_STATUS(_name, _id, _parent, _mult, _div, _gate) \
+ DEF_BASE(_name, _id, CLK_TYPE_FF_MOD_STATUS, _parent, .div = _div, \
+ .mult = _mult, .cfg.fixed_mod = _gate)
#define DEF_DDIV(_name, _id, _parent, _ddiv_packed, _dtable) \
DEF_TYPE(_name, _id, CLK_TYPE_DDIV, \
.cfg.ddiv = _ddiv_packed, \
.parent = _parent, \
.dtable = _dtable, \
.flag = CLK_DIVIDER_HIWORD_MASK)
+#define DEF_CSDIV(_name, _id, _parent, _ddiv_packed, _dtable) \
+ DEF_DDIV(_name, _id, _parent, _ddiv_packed, _dtable)
+#define DEF_SMUX(_name, _id, _smux_packed, _parent_names) \
+ DEF_TYPE(_name, _id, CLK_TYPE_SMUX, \
+ .cfg.smux = _smux_packed, \
+ .parent_names = _parent_names, \
+ .num_parents = ARRAY_SIZE(_parent_names), \
+ .flag = CLK_SET_RATE_PARENT, \
+ .mux_flags = CLK_MUX_HIWORD_MASK)
/**
* struct rzv2h_mod_clk - Module Clocks definitions
*
* @name: handle between common and hardware-specific interfaces
+ * @mstop_data: packed data mstop register offset and mask
* @parent: id of parent clock
* @critical: flag to indicate the clock is critical
+ * @no_pm: flag to indicate PM is not supported
* @on_index: control register index
* @on_bit: ON bit
* @mon_index: monitor register index
* @mon_bit: monitor bit
+ * @ext_clk_mux_index: mux index for external clock source, or -1 if internal
*/
struct rzv2h_mod_clk {
const char *name;
+ u32 mstop_data;
u16 parent;
bool critical;
+ bool no_pm;
u8 on_index;
u8 on_bit;
s8 mon_index;
u8 mon_bit;
+ s8 ext_clk_mux_index;
};
-#define DEF_MOD_BASE(_name, _parent, _critical, _onindex, _onbit, _monindex, _monbit) \
+#define DEF_MOD_BASE(_name, _mstop, _parent, _critical, _no_pm, _onindex, \
+ _onbit, _monindex, _monbit, _ext_clk_mux_index) \
{ \
.name = (_name), \
+ .mstop_data = (_mstop), \
.parent = (_parent), \
.critical = (_critical), \
+ .no_pm = (_no_pm), \
.on_index = (_onindex), \
.on_bit = (_onbit), \
.mon_index = (_monindex), \
.mon_bit = (_monbit), \
+ .ext_clk_mux_index = (_ext_clk_mux_index), \
}
-#define DEF_MOD(_name, _parent, _onindex, _onbit, _monindex, _monbit) \
- DEF_MOD_BASE(_name, _parent, false, _onindex, _onbit, _monindex, _monbit)
+#define DEF_MOD(_name, _parent, _onindex, _onbit, _monindex, _monbit, _mstop) \
+ DEF_MOD_BASE(_name, _mstop, _parent, false, false, _onindex, _onbit, _monindex, _monbit, -1)
+
+#define DEF_MOD_CRITICAL(_name, _parent, _onindex, _onbit, _monindex, _monbit, _mstop) \
+ DEF_MOD_BASE(_name, _mstop, _parent, true, false, _onindex, _onbit, _monindex, _monbit, -1)
+
+#define DEF_MOD_NO_PM(_name, _parent, _onindex, _onbit, _monindex, _monbit, _mstop) \
+ DEF_MOD_BASE(_name, _mstop, _parent, false, true, _onindex, _onbit, _monindex, _monbit, -1)
-#define DEF_MOD_CRITICAL(_name, _parent, _onindex, _onbit, _monindex, _monbit) \
- DEF_MOD_BASE(_name, _parent, true, _onindex, _onbit, _monindex, _monbit)
+#define DEF_MOD_MUX_EXTERNAL(_name, _parent, _onindex, _onbit, _monindex, _monbit, _mstop, \
+ _ext_clk_mux_index) \
+ DEF_MOD_BASE(_name, _mstop, _parent, false, false, _onindex, _onbit, _monindex, _monbit, \
+ _ext_clk_mux_index)
/**
* struct rzv2h_reset - Reset definitions
@@ -167,6 +315,9 @@ struct rzv2h_reset {
*
* @resets: Array of Module Reset definitions
* @num_resets: Number of entries in resets[]
+ *
+ * @num_mstop_bits: Maximum number of MSTOP bits supported, equivalent to the
+ * number of CPG_BUS_m_MSTOP registers multiplied by 16.
*/
struct rzv2h_cpg_info {
/* Core Clocks */
@@ -183,8 +334,12 @@ struct rzv2h_cpg_info {
/* Resets */
const struct rzv2h_reset *resets;
unsigned int num_resets;
+
+ unsigned int num_mstop_bits;
};
+extern const struct rzv2h_cpg_info r9a09g047_cpg_info;
+extern const struct rzv2h_cpg_info r9a09g056_cpg_info;
extern const struct rzv2h_cpg_info r9a09g057_cpg_info;
#endif /* __RENESAS_RZV2H_CPG_H__ */
diff --git a/drivers/clk/rockchip/Kconfig b/drivers/clk/rockchip/Kconfig
index 570ad90835d3..febb7944f34b 100644
--- a/drivers/clk/rockchip/Kconfig
+++ b/drivers/clk/rockchip/Kconfig
@@ -93,6 +93,20 @@ config CLK_RK3399
help
Build the driver for RK3399 Clock Driver.
+config CLK_RK3528
+ bool "Rockchip RK3528 clock controller support"
+ depends on ARM64 || COMPILE_TEST
+ default y
+ help
+ Build the driver for RK3528 Clock Controller.
+
+config CLK_RK3562
+ bool "Rockchip RK3562 clock controller support"
+ depends on ARM64 || COMPILE_TEST
+ default y
+ help
+ Build the driver for RK3562 Clock Controller.
+
config CLK_RK3568
bool "Rockchip RK3568 clock controller support"
depends on ARM64 || COMPILE_TEST
diff --git a/drivers/clk/rockchip/Makefile b/drivers/clk/rockchip/Makefile
index af2ade54a7ef..c281a9738d9f 100644
--- a/drivers/clk/rockchip/Makefile
+++ b/drivers/clk/rockchip/Makefile
@@ -8,11 +8,13 @@ obj-$(CONFIG_COMMON_CLK_ROCKCHIP) += clk-rockchip.o
clk-rockchip-y += clk.o
clk-rockchip-y += clk-pll.o
clk-rockchip-y += clk-cpu.o
+clk-rockchip-y += clk-gate-grf.o
clk-rockchip-y += clk-half-divider.o
clk-rockchip-y += clk-inverter.o
clk-rockchip-y += clk-mmc-phase.o
clk-rockchip-y += clk-muxgrf.o
clk-rockchip-y += clk-ddr.o
+clk-rockchip-y += gate-link.o
clk-rockchip-$(CONFIG_RESET_CONTROLLER) += softrst.o
obj-$(CONFIG_CLK_PX30) += clk-px30.o
@@ -27,6 +29,8 @@ obj-$(CONFIG_CLK_RK3308) += clk-rk3308.o
obj-$(CONFIG_CLK_RK3328) += clk-rk3328.o
obj-$(CONFIG_CLK_RK3368) += clk-rk3368.o
obj-$(CONFIG_CLK_RK3399) += clk-rk3399.o
+obj-$(CONFIG_CLK_RK3528) += clk-rk3528.o rst-rk3528.o
+obj-$(CONFIG_CLK_RK3562) += clk-rk3562.o rst-rk3562.o
obj-$(CONFIG_CLK_RK3568) += clk-rk3568.o
obj-$(CONFIG_CLK_RK3576) += clk-rk3576.o rst-rk3576.o
obj-$(CONFIG_CLK_RK3588) += clk-rk3588.o rst-rk3588.o
diff --git a/drivers/clk/rockchip/clk-cpu.c b/drivers/clk/rockchip/clk-cpu.c
index 398a226ad34e..dcc9dcb597ae 100644
--- a/drivers/clk/rockchip/clk-cpu.c
+++ b/drivers/clk/rockchip/clk-cpu.c
@@ -16,14 +16,14 @@
* of the SoC or supplied after the SoC characterization.
*
* The below implementation of the CPU clock allows the rate changes of the CPU
- * clock and the corresponding rate changes of the auxillary clocks of the CPU
+ * clock and the corresponding rate changes of the auxiliary clocks of the CPU
* domain. The platform clock driver provides a clock register configuration
* for each configurable rate which is then used to program the clock hardware
- * registers to acheive a fast co-oridinated rate change for all the CPU domain
+ * registers to achieve a fast co-oridinated rate change for all the CPU domain
* clocks.
*
* On a rate change request for the CPU clock, the rate change is propagated
- * upto the PLL supplying the clock to the CPU domain clock blocks. While the
+ * up to the PLL supplying the clock to the CPU domain clock blocks. While the
* CPU domain PLL is reconfigured, the CPU domain clocks are driven using an
* alternate clock source. If required, the alternate clock source is divided
* down in order to keep the output clock rate within the previous OPP limits.
diff --git a/drivers/clk/rockchip/clk-ddr.c b/drivers/clk/rockchip/clk-ddr.c
index 86718c54e56b..8866a65982a0 100644
--- a/drivers/clk/rockchip/clk-ddr.c
+++ b/drivers/clk/rockchip/clk-ddr.c
@@ -55,17 +55,18 @@ rockchip_ddrclk_sip_recalc_rate(struct clk_hw *hw,
return res.a0;
}
-static long rockchip_ddrclk_sip_round_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long *prate)
+static int rockchip_ddrclk_sip_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct arm_smccc_res res;
- arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, rate, 0,
+ arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, req->rate, 0,
ROCKCHIP_SIP_CONFIG_DRAM_ROUND_RATE,
0, 0, 0, 0, &res);
- return res.a0;
+ req->rate = res.a0;
+
+ return 0;
}
static u8 rockchip_ddrclk_get_parent(struct clk_hw *hw)
@@ -83,7 +84,7 @@ static u8 rockchip_ddrclk_get_parent(struct clk_hw *hw)
static const struct clk_ops rockchip_ddrclk_sip_ops = {
.recalc_rate = rockchip_ddrclk_sip_recalc_rate,
.set_rate = rockchip_ddrclk_sip_set_rate,
- .round_rate = rockchip_ddrclk_sip_round_rate,
+ .determine_rate = rockchip_ddrclk_sip_determine_rate,
.get_parent = rockchip_ddrclk_get_parent,
};
diff --git a/drivers/clk/rockchip/clk-gate-grf.c b/drivers/clk/rockchip/clk-gate-grf.c
new file mode 100644
index 000000000000..8122f471f391
--- /dev/null
+++ b/drivers/clk/rockchip/clk-gate-grf.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2025 Collabora Ltd.
+ * Author: Nicolas Frattaroli <nicolas.frattaroli@collabora.com>
+ *
+ * Certain clocks on Rockchip are "gated" behind an additional register bit
+ * write in a GRF register, such as the SAI MCLKs on RK3576. This code
+ * implements a clock driver for these types of gates, based on regmaps.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include "clk.h"
+
+struct rockchip_gate_grf {
+ struct clk_hw hw;
+ struct regmap *regmap;
+ unsigned int reg;
+ unsigned int shift;
+ u8 flags;
+};
+
+#define to_gate_grf(_hw) container_of(_hw, struct rockchip_gate_grf, hw)
+
+static int rockchip_gate_grf_enable(struct clk_hw *hw)
+{
+ struct rockchip_gate_grf *gate = to_gate_grf(hw);
+ u32 val = !(gate->flags & CLK_GATE_SET_TO_DISABLE) ? BIT(gate->shift) : 0;
+ u32 hiword = ((gate->flags & CLK_GATE_HIWORD_MASK) ? 1 : 0) << (gate->shift + 16);
+ int ret;
+
+ ret = regmap_update_bits(gate->regmap, gate->reg,
+ hiword | BIT(gate->shift), hiword | val);
+
+ return ret;
+}
+
+static void rockchip_gate_grf_disable(struct clk_hw *hw)
+{
+ struct rockchip_gate_grf *gate = to_gate_grf(hw);
+ u32 val = !(gate->flags & CLK_GATE_SET_TO_DISABLE) ? 0 : BIT(gate->shift);
+ u32 hiword = ((gate->flags & CLK_GATE_HIWORD_MASK) ? 1 : 0) << (gate->shift + 16);
+
+ regmap_update_bits(gate->regmap, gate->reg,
+ hiword | BIT(gate->shift), hiword | val);
+}
+
+static int rockchip_gate_grf_is_enabled(struct clk_hw *hw)
+{
+ struct rockchip_gate_grf *gate = to_gate_grf(hw);
+ bool invert = !!(gate->flags & CLK_GATE_SET_TO_DISABLE);
+ int ret;
+
+ ret = regmap_test_bits(gate->regmap, gate->reg, BIT(gate->shift));
+ if (ret < 0)
+ ret = 0;
+
+ return invert ? 1 - ret : ret;
+
+}
+
+static const struct clk_ops rockchip_gate_grf_ops = {
+ .enable = rockchip_gate_grf_enable,
+ .disable = rockchip_gate_grf_disable,
+ .is_enabled = rockchip_gate_grf_is_enabled,
+};
+
+struct clk *rockchip_clk_register_gate_grf(const char *name,
+ const char *parent_name, unsigned long flags,
+ struct regmap *regmap, unsigned int reg, unsigned int shift,
+ u8 gate_flags)
+{
+ struct rockchip_gate_grf *gate;
+ struct clk_init_data init;
+ struct clk *clk;
+
+ if (IS_ERR(regmap)) {
+ pr_err("%s: regmap not available\n", __func__);
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+ if (!gate)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.flags = flags;
+ init.num_parents = parent_name ? 1 : 0;
+ init.parent_names = parent_name ? &parent_name : NULL;
+ init.ops = &rockchip_gate_grf_ops;
+
+ gate->hw.init = &init;
+ gate->regmap = regmap;
+ gate->reg = reg;
+ gate->shift = shift;
+ gate->flags = gate_flags;
+
+ clk = clk_register(NULL, &gate->hw);
+ if (IS_ERR(clk))
+ kfree(gate);
+
+ return clk;
+}
diff --git a/drivers/clk/rockchip/clk-half-divider.c b/drivers/clk/rockchip/clk-half-divider.c
index 64f7faad2148..fbc018e8afa4 100644
--- a/drivers/clk/rockchip/clk-half-divider.c
+++ b/drivers/clk/rockchip/clk-half-divider.c
@@ -92,17 +92,19 @@ static int clk_half_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
return bestdiv;
}
-static long clk_half_divider_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_half_divider_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_divider *divider = to_clk_divider(hw);
int div;
- div = clk_half_divider_bestdiv(hw, rate, prate,
+ div = clk_half_divider_bestdiv(hw, req->rate, &req->best_parent_rate,
divider->width,
divider->flags);
- return DIV_ROUND_UP_ULL(((u64)*prate * 2), div * 2 + 3);
+ req->rate = DIV_ROUND_UP_ULL(((u64)req->best_parent_rate * 2), div * 2 + 3);
+
+ return 0;
}
static int clk_half_divider_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -141,7 +143,7 @@ static int clk_half_divider_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops clk_half_divider_ops = {
.recalc_rate = clk_half_divider_recalc_rate,
- .round_rate = clk_half_divider_round_rate,
+ .determine_rate = clk_half_divider_determine_rate,
.set_rate = clk_half_divider_set_rate,
};
diff --git a/drivers/clk/rockchip/clk-mmc-phase.c b/drivers/clk/rockchip/clk-mmc-phase.c
index 91012078681b..8b1292c56863 100644
--- a/drivers/clk/rockchip/clk-mmc-phase.c
+++ b/drivers/clk/rockchip/clk-mmc-phase.c
@@ -9,11 +9,14 @@
#include <linux/clk-provider.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/regmap.h>
#include "clk.h"
struct rockchip_mmc_clock {
struct clk_hw hw;
void __iomem *reg;
+ struct regmap *grf;
+ int grf_reg;
int shift;
int cached_phase;
struct notifier_block clk_rate_change_nb;
@@ -54,7 +57,12 @@ static int rockchip_mmc_get_phase(struct clk_hw *hw)
if (!rate)
return 0;
- raw_value = readl(mmc_clock->reg) >> (mmc_clock->shift);
+ if (mmc_clock->grf)
+ regmap_read(mmc_clock->grf, mmc_clock->grf_reg, &raw_value);
+ else
+ raw_value = readl(mmc_clock->reg);
+
+ raw_value >>= mmc_clock->shift;
degrees = (raw_value & ROCKCHIP_MMC_DEGREE_MASK) * 90;
@@ -134,8 +142,12 @@ static int rockchip_mmc_set_phase(struct clk_hw *hw, int degrees)
raw_value = delay_num ? ROCKCHIP_MMC_DELAY_SEL : 0;
raw_value |= delay_num << ROCKCHIP_MMC_DELAYNUM_OFFSET;
raw_value |= nineties;
- writel(HIWORD_UPDATE(raw_value, 0x07ff, mmc_clock->shift),
- mmc_clock->reg);
+ raw_value = HIWORD_UPDATE(raw_value, 0x07ff, mmc_clock->shift);
+
+ if (mmc_clock->grf)
+ regmap_write(mmc_clock->grf, mmc_clock->grf_reg, raw_value);
+ else
+ writel(raw_value, mmc_clock->reg);
pr_debug("%s->set_phase(%d) delay_nums=%u reg[0x%p]=0x%03x actual_degrees=%d\n",
clk_hw_get_name(hw), degrees, delay_num,
@@ -162,11 +174,11 @@ static int rockchip_mmc_clk_rate_notify(struct notifier_block *nb,
/*
* rockchip_mmc_clk is mostly used by mmc controllers to sample
- * the intput data, which expects the fixed phase after the tuning
+ * the input data, which expects the fixed phase after the tuning
* process. However if the clock rate is changed, the phase is stale
* and may break the data sampling. So here we try to restore the phase
* for that case, except that
- * (1) cached_phase is invaild since we inevitably cached it when the
+ * (1) cached_phase is invalid since we inevitably cached it when the
* clock provider be reparented from orphan to its real parent in the
* first place. Otherwise we may mess up the initialization of MMC cards
* since we only set the default sample phase and drive phase later on.
@@ -189,7 +201,9 @@ static int rockchip_mmc_clk_rate_notify(struct notifier_block *nb,
struct clk *rockchip_clk_register_mmc(const char *name,
const char *const *parent_names, u8 num_parents,
- void __iomem *reg, int shift)
+ void __iomem *reg,
+ struct regmap *grf, int grf_reg,
+ int shift)
{
struct clk_init_data init;
struct rockchip_mmc_clock *mmc_clock;
@@ -208,6 +222,8 @@ struct clk *rockchip_clk_register_mmc(const char *name,
mmc_clock->hw.init = &init;
mmc_clock->reg = reg;
+ mmc_clock->grf = grf;
+ mmc_clock->grf_reg = grf_reg;
mmc_clock->shift = shift;
clk = clk_register(NULL, &mmc_clock->hw);
diff --git a/drivers/clk/rockchip/clk-pll.c b/drivers/clk/rockchip/clk-pll.c
index fe76756e592e..86dba3826a77 100644
--- a/drivers/clk/rockchip/clk-pll.c
+++ b/drivers/clk/rockchip/clk-pll.c
@@ -61,21 +61,26 @@ static const struct rockchip_pll_rate_table *rockchip_get_pll_settings(
return NULL;
}
-static long rockchip_pll_round_rate(struct clk_hw *hw,
- unsigned long drate, unsigned long *prate)
+static int rockchip_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
const struct rockchip_pll_rate_table *rate_table = pll->rate_table;
int i;
- /* Assumming rate_table is in descending order */
+ /* Assuming rate_table is in descending order */
for (i = 0; i < pll->rate_count; i++) {
- if (drate >= rate_table[i].rate)
- return rate_table[i].rate;
+ if (req->rate >= rate_table[i].rate) {
+ req->rate = rate_table[i].rate;
+
+ return 0;
+ }
}
/* return minimum supported value */
- return rate_table[i - 1].rate;
+ req->rate = rate_table[i - 1].rate;
+
+ return 0;
}
/*
@@ -204,10 +209,12 @@ static int rockchip_rk3036_pll_set_params(struct rockchip_clk_pll *pll,
rockchip_rk3036_pll_get_params(pll, &cur);
cur.rate = 0;
- cur_parent = pll_mux_ops->get_parent(&pll_mux->hw);
- if (cur_parent == PLL_MODE_NORM) {
- pll_mux_ops->set_parent(&pll_mux->hw, PLL_MODE_SLOW);
- rate_change_remuxed = 1;
+ if (!(pll->flags & ROCKCHIP_PLL_FIXED_MODE)) {
+ cur_parent = pll_mux_ops->get_parent(&pll_mux->hw);
+ if (cur_parent == PLL_MODE_NORM) {
+ pll_mux_ops->set_parent(&pll_mux->hw, PLL_MODE_SLOW);
+ rate_change_remuxed = 1;
+ }
}
/* update pll values */
@@ -350,7 +357,7 @@ static const struct clk_ops rockchip_rk3036_pll_clk_norate_ops = {
static const struct clk_ops rockchip_rk3036_pll_clk_ops = {
.recalc_rate = rockchip_rk3036_pll_recalc_rate,
- .round_rate = rockchip_pll_round_rate,
+ .determine_rate = rockchip_pll_determine_rate,
.set_rate = rockchip_rk3036_pll_set_rate,
.enable = rockchip_rk3036_pll_enable,
.disable = rockchip_rk3036_pll_disable,
@@ -569,7 +576,7 @@ static const struct clk_ops rockchip_rk3066_pll_clk_norate_ops = {
static const struct clk_ops rockchip_rk3066_pll_clk_ops = {
.recalc_rate = rockchip_rk3066_pll_recalc_rate,
- .round_rate = rockchip_pll_round_rate,
+ .determine_rate = rockchip_pll_determine_rate,
.set_rate = rockchip_rk3066_pll_set_rate,
.enable = rockchip_rk3066_pll_enable,
.disable = rockchip_rk3066_pll_disable,
@@ -834,7 +841,7 @@ static const struct clk_ops rockchip_rk3399_pll_clk_norate_ops = {
static const struct clk_ops rockchip_rk3399_pll_clk_ops = {
.recalc_rate = rockchip_rk3399_pll_recalc_rate,
- .round_rate = rockchip_pll_round_rate,
+ .determine_rate = rockchip_pll_determine_rate,
.set_rate = rockchip_rk3399_pll_set_rate,
.enable = rockchip_rk3399_pll_enable,
.disable = rockchip_rk3399_pll_disable,
@@ -1025,16 +1032,6 @@ static int rockchip_rk3588_pll_is_enabled(struct clk_hw *hw)
return !(pllcon & RK3588_PLLCON1_PWRDOWN);
}
-static int rockchip_rk3588_pll_init(struct clk_hw *hw)
-{
- struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
-
- if (!(pll->flags & ROCKCHIP_PLL_SYNC_RATE))
- return 0;
-
- return 0;
-}
-
static const struct clk_ops rockchip_rk3588_pll_clk_norate_ops = {
.recalc_rate = rockchip_rk3588_pll_recalc_rate,
.enable = rockchip_rk3588_pll_enable,
@@ -1044,12 +1041,11 @@ static const struct clk_ops rockchip_rk3588_pll_clk_norate_ops = {
static const struct clk_ops rockchip_rk3588_pll_clk_ops = {
.recalc_rate = rockchip_rk3588_pll_recalc_rate,
- .round_rate = rockchip_pll_round_rate,
+ .determine_rate = rockchip_pll_determine_rate,
.set_rate = rockchip_rk3588_pll_set_rate,
.enable = rockchip_rk3588_pll_enable,
.disable = rockchip_rk3588_pll_disable,
.is_enabled = rockchip_rk3588_pll_is_enabled,
- .init = rockchip_rk3588_pll_init,
};
/*
diff --git a/drivers/clk/rockchip/clk-rk3036.c b/drivers/clk/rockchip/clk-rk3036.c
index d341ce0708aa..df9330958c83 100644
--- a/drivers/clk/rockchip/clk-rk3036.c
+++ b/drivers/clk/rockchip/clk-rk3036.c
@@ -123,6 +123,7 @@ PNAME(mux_timer_p) = { "xin24m", "pclk_peri_src" };
PNAME(mux_pll_src_apll_dpll_gpll_usb480m_p) = { "apll", "dpll", "gpll", "usb480m" };
PNAME(mux_pll_src_dmyapll_dpll_gpll_xin24_p) = { "dummy_apll", "dpll", "gpll", "xin24m" };
+PNAME(mux_usb480m_p) = { "usb480m_phy", "xin24m" };
PNAME(mux_mmc_src_p) = { "apll", "dpll", "gpll", "xin24m" };
PNAME(mux_i2s_pre_p) = { "i2s_src", "i2s_frac", "ext_i2s", "xin12m" };
PNAME(mux_i2s_clkout_p) = { "i2s_pre", "xin12m" };
@@ -423,6 +424,9 @@ static struct rockchip_clk_branch rk3036_clk_branches[] __initdata = {
GATE(PCLK_GPIO0, "pclk_gpio0", "pclk_peri", 0, RK2928_CLKGATE_CON(8), 9, GFLAGS),
GATE(PCLK_GPIO1, "pclk_gpio1", "pclk_peri", 0, RK2928_CLKGATE_CON(8), 10, GFLAGS),
GATE(PCLK_GPIO2, "pclk_gpio2", "pclk_peri", 0, RK2928_CLKGATE_CON(8), 11, GFLAGS),
+
+ MUX(SCLK_USB480M, "usb480m", mux_usb480m_p, CLK_SET_RATE_PARENT,
+ RK2928_MISC_CON, 15, 1, MFLAGS),
};
static const char *const rk3036_critical_clocks[] __initconst = {
@@ -431,6 +435,7 @@ static const char *const rk3036_critical_clocks[] __initconst = {
"hclk_peri",
"pclk_peri",
"pclk_ddrupctl",
+ "ddrphy",
};
static void __init rk3036_clk_init(struct device_node *np)
@@ -438,7 +443,6 @@ static void __init rk3036_clk_init(struct device_node *np)
struct rockchip_clk_provider *ctx;
unsigned long clk_nr_clks;
void __iomem *reg_base;
- struct clk *clk;
reg_base = of_iomap(np, 0);
if (!reg_base) {
@@ -462,11 +466,6 @@ static void __init rk3036_clk_init(struct device_node *np)
return;
}
- clk = clk_register_fixed_factor(NULL, "usb480m", "xin24m", 0, 20, 1);
- if (IS_ERR(clk))
- pr_warn("%s: could not register clock usb480m: %ld\n",
- __func__, PTR_ERR(clk));
-
rockchip_clk_register_plls(ctx, rk3036_pll_clks,
ARRAY_SIZE(rk3036_pll_clks),
RK3036_GRF_SOC_STATUS0);
diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c
index 30e670c8afba..318c8ddc8a76 100644
--- a/drivers/clk/rockchip/clk-rk3188.c
+++ b/drivers/clk/rockchip/clk-rk3188.c
@@ -337,7 +337,7 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
GATE(0, "pclkin_cif0", "ext_cif0", 0,
RK2928_CLKGATE_CON(3), 3, GFLAGS),
- INVERTER(0, "pclk_cif0", "pclkin_cif0",
+ INVERTER(PCLK_CIF0, "pclk_cif0", "pclkin_cif0",
RK2928_CLKSEL_CON(30), 8, IFLAGS),
FACTOR(0, "xin12m", "xin24m", 0, 1, 2),
@@ -595,7 +595,7 @@ static struct rockchip_clk_branch rk3066a_clk_branches[] __initdata = {
GATE(0, "pclkin_cif1", "ext_cif1", 0,
RK2928_CLKGATE_CON(3), 4, GFLAGS),
- INVERTER(0, "pclk_cif1", "pclkin_cif1",
+ INVERTER(PCLK_CIF1, "pclk_cif1", "pclkin_cif1",
RK2928_CLKSEL_CON(30), 12, IFLAGS),
COMPOSITE(0, "aclk_gpu_src", mux_pll_src_cpll_gpll_p, 0,
diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c
index 90d329216064..0a1e017df7c6 100644
--- a/drivers/clk/rockchip/clk-rk3288.c
+++ b/drivers/clk/rockchip/clk-rk3288.c
@@ -418,7 +418,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
RK3288_CLKSEL_CON(32), 14, 2, MFLAGS, 8, 5, DFLAGS,
RK3288_CLKGATE_CON(3), 11, GFLAGS),
MUXGRF(0, "aclk_vcodec_pre", mux_aclk_vcodec_pre_p, CLK_SET_RATE_PARENT,
- RK3288_GRF_SOC_CON(0), 7, 1, MFLAGS),
+ RK3288_GRF_SOC_CON(0), 7, 1, MFLAGS, grf_type_sys),
GATE(ACLK_VCODEC, "aclk_vcodec", "aclk_vcodec_pre", 0,
RK3288_CLKGATE_CON(9), 0, GFLAGS),
diff --git a/drivers/clk/rockchip/clk-rk3328.c b/drivers/clk/rockchip/clk-rk3328.c
index 3bb87b27b662..cd5f65b6cdf5 100644
--- a/drivers/clk/rockchip/clk-rk3328.c
+++ b/drivers/clk/rockchip/clk-rk3328.c
@@ -201,7 +201,7 @@ PNAME(mux_aclk_peri_pre_p) = { "cpll_peri",
"gpll_peri",
"hdmiphy_peri" };
PNAME(mux_ref_usb3otg_src_p) = { "xin24m",
- "clk_usb3otg_ref" };
+ "clk_ref_usb3otg_src" };
PNAME(mux_xin24m_32k_p) = { "xin24m",
"clk_rtc32k" };
PNAME(mux_mac2io_src_p) = { "clk_mac2io_src",
@@ -677,9 +677,9 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
RK3328_CLKSEL_CON(27), 15, 1, MFLAGS, 8, 5, DFLAGS,
RK3328_CLKGATE_CON(3), 5, GFLAGS),
MUXGRF(SCLK_MAC2IO, "clk_mac2io", mux_mac2io_src_p, CLK_SET_RATE_NO_REPARENT,
- RK3328_GRF_MAC_CON1, 10, 1, MFLAGS),
+ RK3328_GRF_MAC_CON1, 10, 1, MFLAGS, grf_type_sys),
MUXGRF(SCLK_MAC2IO_EXT, "clk_mac2io_ext", mux_mac2io_ext_p, CLK_SET_RATE_NO_REPARENT,
- RK3328_GRF_SOC_CON4, 14, 1, MFLAGS),
+ RK3328_GRF_SOC_CON4, 14, 1, MFLAGS, grf_type_sys),
COMPOSITE(SCLK_MAC2PHY_SRC, "clk_mac2phy_src", mux_2plls_p, 0,
RK3328_CLKSEL_CON(26), 7, 1, MFLAGS, 0, 5, DFLAGS,
@@ -692,7 +692,7 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
RK3328_CLKSEL_CON(26), 8, 2, DFLAGS,
RK3328_CLKGATE_CON(9), 2, GFLAGS),
MUXGRF(SCLK_MAC2PHY, "clk_mac2phy", mux_mac2phy_src_p, CLK_SET_RATE_NO_REPARENT,
- RK3328_GRF_MAC_CON2, 10, 1, MFLAGS),
+ RK3328_GRF_MAC_CON2, 10, 1, MFLAGS, grf_type_sys),
FACTOR(0, "xin12m", "xin24m", 0, 1, 2),
diff --git a/drivers/clk/rockchip/clk-rk3368.c b/drivers/clk/rockchip/clk-rk3368.c
index 04391e4e2874..95e6996adbae 100644
--- a/drivers/clk/rockchip/clk-rk3368.c
+++ b/drivers/clk/rockchip/clk-rk3368.c
@@ -526,7 +526,7 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
GATE(ACLK_PERI, "aclk_peri", "aclk_peri_src", CLK_IGNORE_UNUSED,
RK3368_CLKGATE_CON(3), 1, GFLAGS),
- GATE(0, "sclk_mipidsi_24m", "xin24m", 0, RK3368_CLKGATE_CON(4), 14, GFLAGS),
+ GATE(SCLK_MIPIDSI_24M, "sclk_mipidsi_24m", "xin24m", 0, RK3368_CLKGATE_CON(4), 14, GFLAGS),
/*
* Clock-Architecture Diagram 4
diff --git a/drivers/clk/rockchip/clk-rk3528.c b/drivers/clk/rockchip/clk-rk3528.c
new file mode 100644
index 000000000000..a5ff64b93f8f
--- /dev/null
+++ b/drivers/clk/rockchip/clk-rk3528.c
@@ -0,0 +1,1187 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2022 Rockchip Electronics Co. Ltd.
+ * Copyright (c) 2024 Yao Zi <ziyao@disroot.org>
+ * Author: Joseph Chen <chenjh@rock-chips.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/syscon.h>
+#include <linux/minmax.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/clock/rockchip,rk3528-cru.h>
+
+#include "clk.h"
+
+#define RK3528_GRF_SOC_STATUS0 0x1a0
+
+enum rk3528_plls {
+ apll, cpll, gpll, ppll, dpll,
+};
+
+static struct rockchip_pll_rate_table rk3528_pll_rates[] = {
+ RK3036_PLL_RATE(1896000000, 1, 79, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1800000000, 1, 75, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1704000000, 1, 71, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1608000000, 1, 67, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1512000000, 1, 63, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1416000000, 1, 59, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1296000000, 1, 54, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1200000000, 1, 50, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1188000000, 1, 99, 2, 1, 1, 0), /* GPLL */
+ RK3036_PLL_RATE(1092000000, 2, 91, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1008000000, 1, 42, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1000000000, 1, 125, 3, 1, 1, 0), /* PPLL */
+ RK3036_PLL_RATE(996000000, 2, 83, 1, 1, 1, 0), /* CPLL */
+ RK3036_PLL_RATE(960000000, 1, 40, 1, 1, 1, 0),
+ RK3036_PLL_RATE(912000000, 1, 76, 2, 1, 1, 0),
+ RK3036_PLL_RATE(816000000, 1, 68, 2, 1, 1, 0),
+ RK3036_PLL_RATE(600000000, 1, 50, 2, 1, 1, 0),
+ RK3036_PLL_RATE(594000000, 2, 99, 2, 1, 1, 0),
+ RK3036_PLL_RATE(408000000, 1, 68, 2, 2, 1, 0),
+ RK3036_PLL_RATE(312000000, 1, 78, 6, 1, 1, 0),
+ RK3036_PLL_RATE(216000000, 1, 72, 4, 2, 1, 0),
+ RK3036_PLL_RATE(96000000, 1, 24, 3, 2, 1, 0),
+ { /* sentinel */ },
+};
+
+#define RK3528_DIV_ACLK_M_CORE_MASK 0x1f
+#define RK3528_DIV_ACLK_M_CORE_SHIFT 11
+#define RK3528_DIV_PCLK_DBG_MASK 0x1f
+#define RK3528_DIV_PCLK_DBG_SHIFT 1
+
+#define RK3528_CLKSEL39(_aclk_m_core) \
+{ \
+ .reg = RK3528_CLKSEL_CON(39), \
+ .val = HIWORD_UPDATE(_aclk_m_core, RK3528_DIV_ACLK_M_CORE_MASK, \
+ RK3528_DIV_ACLK_M_CORE_SHIFT), \
+}
+
+#define RK3528_CLKSEL40(_pclk_dbg) \
+{ \
+ .reg = RK3528_CLKSEL_CON(40), \
+ .val = HIWORD_UPDATE(_pclk_dbg, RK3528_DIV_PCLK_DBG_MASK, \
+ RK3528_DIV_PCLK_DBG_SHIFT), \
+}
+
+#define RK3528_CPUCLK_RATE(_prate, _aclk_m_core, _pclk_dbg) \
+{ \
+ .prate = _prate, \
+ .divs = { \
+ RK3528_CLKSEL39(_aclk_m_core), \
+ RK3528_CLKSEL40(_pclk_dbg), \
+ }, \
+}
+
+static struct rockchip_cpuclk_rate_table rk3528_cpuclk_rates[] __initdata = {
+ RK3528_CPUCLK_RATE(1896000000, 1, 13),
+ RK3528_CPUCLK_RATE(1800000000, 1, 12),
+ RK3528_CPUCLK_RATE(1704000000, 1, 11),
+ RK3528_CPUCLK_RATE(1608000000, 1, 11),
+ RK3528_CPUCLK_RATE(1512000000, 1, 11),
+ RK3528_CPUCLK_RATE(1416000000, 1, 9),
+ RK3528_CPUCLK_RATE(1296000000, 1, 8),
+ RK3528_CPUCLK_RATE(1200000000, 1, 8),
+ RK3528_CPUCLK_RATE(1188000000, 1, 8),
+ RK3528_CPUCLK_RATE(1092000000, 1, 7),
+ RK3528_CPUCLK_RATE(1008000000, 1, 6),
+ RK3528_CPUCLK_RATE(1000000000, 1, 6),
+ RK3528_CPUCLK_RATE(996000000, 1, 6),
+ RK3528_CPUCLK_RATE(960000000, 1, 6),
+ RK3528_CPUCLK_RATE(912000000, 1, 6),
+ RK3528_CPUCLK_RATE(816000000, 1, 5),
+ RK3528_CPUCLK_RATE(600000000, 1, 3),
+ RK3528_CPUCLK_RATE(594000000, 1, 3),
+ RK3528_CPUCLK_RATE(408000000, 1, 2),
+ RK3528_CPUCLK_RATE(312000000, 1, 2),
+ RK3528_CPUCLK_RATE(216000000, 1, 1),
+ RK3528_CPUCLK_RATE(96000000, 1, 0),
+};
+
+static const struct rockchip_cpuclk_reg_data rk3528_cpuclk_data = {
+ .core_reg[0] = RK3528_CLKSEL_CON(39),
+ .div_core_shift[0] = 5,
+ .div_core_mask[0] = 0x1f,
+ .num_cores = 1,
+ .mux_core_alt = 1,
+ .mux_core_main = 0,
+ .mux_core_shift = 10,
+ .mux_core_mask = 0x1,
+};
+
+PNAME(mux_pll_p) = { "xin24m" };
+PNAME(mux_armclk) = { "apll", "gpll" };
+PNAME(mux_24m_32k_p) = { "xin24m", "clk_32k" };
+PNAME(mux_gpll_cpll_p) = { "gpll", "cpll" };
+PNAME(mux_gpll_cpll_xin24m_p) = { "gpll", "cpll", "xin24m" };
+PNAME(mux_100m_50m_24m_p) = { "clk_100m_src", "clk_50m_src",
+ "xin24m" };
+PNAME(mux_150m_100m_24m_p) = { "clk_150m_src", "clk_100m_src",
+ "xin24m" };
+PNAME(mux_200m_100m_24m_p) = { "clk_200m_src", "clk_100m_src",
+ "xin24m" };
+PNAME(mux_200m_100m_50m_24m_p) = { "clk_200m_src", "clk_100m_src",
+ "clk_50m_src", "xin24m" };
+PNAME(mux_300m_200m_100m_24m_p) = { "clk_300m_src", "clk_200m_src",
+ "clk_100m_src", "xin24m" };
+PNAME(mux_339m_200m_100m_24m_p) = { "clk_339m_src", "clk_200m_src",
+ "clk_100m_src", "xin24m" };
+PNAME(mux_500m_200m_100m_24m_p) = { "clk_500m_src", "clk_200m_src",
+ "clk_100m_src", "xin24m" };
+PNAME(mux_500m_300m_100m_24m_p) = { "clk_500m_src", "clk_300m_src",
+ "clk_100m_src", "xin24m" };
+PNAME(mux_600m_300m_200m_24m_p) = { "clk_600m_src", "clk_300m_src",
+ "clk_200m_src", "xin24m" };
+PNAME(aclk_gpu_p) = { "aclk_gpu_root",
+ "clk_gpu_pvtpll_src" };
+PNAME(aclk_rkvdec_pvtmux_root_p) = { "aclk_rkvdec_root",
+ "clk_rkvdec_pvtpll_src" };
+PNAME(clk_i2c2_p) = { "clk_200m_src", "clk_100m_src",
+ "xin24m", "clk_32k" };
+PNAME(clk_ref_pcie_inner_phy_p) = { "clk_ppll_100m_src", "xin24m" };
+PNAME(dclk_vop0_p) = { "dclk_vop_src0",
+ "clk_hdmiphy_pixel_io" };
+PNAME(mclk_i2s0_2ch_sai_src_p) = { "clk_i2s0_2ch_src",
+ "clk_i2s0_2ch_frac", "xin12m" };
+PNAME(mclk_i2s1_8ch_sai_src_p) = { "clk_i2s1_8ch_src",
+ "clk_i2s1_8ch_frac", "xin12m" };
+PNAME(mclk_i2s2_2ch_sai_src_p) = { "clk_i2s2_2ch_src",
+ "clk_i2s2_2ch_frac", "xin12m" };
+PNAME(mclk_i2s3_8ch_sai_src_p) = { "clk_i2s3_8ch_src",
+ "clk_i2s3_8ch_frac", "xin12m" };
+PNAME(mclk_sai_i2s0_p) = { "mclk_i2s0_2ch_sai_src",
+ "i2s0_mclkin" };
+PNAME(mclk_sai_i2s1_p) = { "mclk_i2s1_8ch_sai_src",
+ "i2s1_mclkin" };
+PNAME(mclk_spdif_src_p) = { "clk_spdif_src", "clk_spdif_frac",
+ "xin12m" };
+PNAME(sclk_uart0_src_p) = { "clk_uart0_src", "clk_uart0_frac",
+ "xin24m" };
+PNAME(sclk_uart1_src_p) = { "clk_uart1_src", "clk_uart1_frac",
+ "xin24m" };
+PNAME(sclk_uart2_src_p) = { "clk_uart2_src", "clk_uart2_frac",
+ "xin24m" };
+PNAME(sclk_uart3_src_p) = { "clk_uart3_src", "clk_uart3_frac",
+ "xin24m" };
+PNAME(sclk_uart4_src_p) = { "clk_uart4_src", "clk_uart4_frac",
+ "xin24m" };
+PNAME(sclk_uart5_src_p) = { "clk_uart5_src", "clk_uart5_frac",
+ "xin24m" };
+PNAME(sclk_uart6_src_p) = { "clk_uart6_src", "clk_uart6_frac",
+ "xin24m" };
+PNAME(sclk_uart7_src_p) = { "clk_uart7_src", "clk_uart7_frac",
+ "xin24m" };
+PNAME(clk_32k_p) = { "xin_osc0_div", "clk_pvtm_32k" };
+
+static struct rockchip_pll_clock rk3528_pll_clks[] __initdata = {
+ [apll] = PLL(pll_rk3328, PLL_APLL, "apll", mux_pll_p,
+ CLK_IS_CRITICAL, RK3528_PLL_CON(0),
+ RK3528_MODE_CON, 0, 0, 0, rk3528_pll_rates),
+
+ [cpll] = PLL(pll_rk3328, PLL_CPLL, "cpll", mux_pll_p,
+ CLK_IS_CRITICAL, RK3528_PLL_CON(8),
+ RK3528_MODE_CON, 2, 0, 0, rk3528_pll_rates),
+
+ [gpll] = PLL(pll_rk3328, PLL_GPLL, "gpll", mux_pll_p,
+ CLK_IS_CRITICAL, RK3528_PLL_CON(24),
+ RK3528_MODE_CON, 4, 0, 0, rk3528_pll_rates),
+
+ [ppll] = PLL(pll_rk3328, PLL_PPLL, "ppll", mux_pll_p,
+ CLK_IS_CRITICAL, RK3528_PCIE_PLL_CON(32),
+ RK3528_MODE_CON, 6, 0, ROCKCHIP_PLL_FIXED_MODE, rk3528_pll_rates),
+
+ [dpll] = PLL(pll_rk3328, PLL_DPLL, "dpll", mux_pll_p,
+ CLK_IGNORE_UNUSED, RK3528_DDRPHY_PLL_CON(16),
+ RK3528_DDRPHY_MODE_CON, 0, 0, 0, rk3528_pll_rates),
+};
+
+#define MFLAGS CLK_MUX_HIWORD_MASK
+#define DFLAGS CLK_DIVIDER_HIWORD_MASK
+#define GFLAGS (CLK_GATE_HIWORD_MASK | CLK_GATE_SET_TO_DISABLE)
+
+static struct rockchip_clk_branch rk3528_uart0_fracmux __initdata =
+ MUX(CLK_UART0, "clk_uart0", sclk_uart0_src_p, CLK_SET_RATE_PARENT,
+ RK3528_CLKSEL_CON(6), 0, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3528_uart1_fracmux __initdata =
+ MUX(CLK_UART1, "clk_uart1", sclk_uart1_src_p, CLK_SET_RATE_PARENT,
+ RK3528_CLKSEL_CON(8), 0, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3528_uart2_fracmux __initdata =
+ MUX(CLK_UART2, "clk_uart2", sclk_uart2_src_p, CLK_SET_RATE_PARENT,
+ RK3528_CLKSEL_CON(10), 0, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3528_uart3_fracmux __initdata =
+ MUX(CLK_UART3, "clk_uart3", sclk_uart3_src_p, CLK_SET_RATE_PARENT,
+ RK3528_CLKSEL_CON(12), 0, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3528_uart4_fracmux __initdata =
+ MUX(CLK_UART4, "clk_uart4", sclk_uart4_src_p, CLK_SET_RATE_PARENT,
+ RK3528_CLKSEL_CON(14), 0, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3528_uart5_fracmux __initdata =
+ MUX(CLK_UART5, "clk_uart5", sclk_uart5_src_p, CLK_SET_RATE_PARENT,
+ RK3528_CLKSEL_CON(16), 0, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3528_uart6_fracmux __initdata =
+ MUX(CLK_UART6, "clk_uart6", sclk_uart6_src_p, CLK_SET_RATE_PARENT,
+ RK3528_CLKSEL_CON(18), 0, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3528_uart7_fracmux __initdata =
+ MUX(CLK_UART7, "clk_uart7", sclk_uart7_src_p, CLK_SET_RATE_PARENT,
+ RK3528_CLKSEL_CON(20), 0, 2, MFLAGS);
+
+static struct rockchip_clk_branch mclk_i2s0_2ch_sai_src_fracmux __initdata =
+ MUX(MCLK_I2S0_2CH_SAI_SRC_PRE, "mclk_i2s0_2ch_sai_src_pre", mclk_i2s0_2ch_sai_src_p, CLK_SET_RATE_PARENT,
+ RK3528_CLKSEL_CON(22), 0, 2, MFLAGS);
+
+static struct rockchip_clk_branch mclk_i2s1_8ch_sai_src_fracmux __initdata =
+ MUX(MCLK_I2S1_8CH_SAI_SRC_PRE, "mclk_i2s1_8ch_sai_src_pre", mclk_i2s1_8ch_sai_src_p, CLK_SET_RATE_PARENT,
+ RK3528_CLKSEL_CON(26), 0, 2, MFLAGS);
+
+static struct rockchip_clk_branch mclk_i2s2_2ch_sai_src_fracmux __initdata =
+ MUX(MCLK_I2S2_2CH_SAI_SRC_PRE, "mclk_i2s2_2ch_sai_src_pre", mclk_i2s2_2ch_sai_src_p, CLK_SET_RATE_PARENT,
+ RK3528_CLKSEL_CON(28), 0, 2, MFLAGS);
+
+static struct rockchip_clk_branch mclk_i2s3_8ch_sai_src_fracmux __initdata =
+ MUX(MCLK_I2S3_8CH_SAI_SRC_PRE, "mclk_i2s3_8ch_sai_src_pre", mclk_i2s3_8ch_sai_src_p, CLK_SET_RATE_PARENT,
+ RK3528_CLKSEL_CON(24), 0, 2, MFLAGS);
+
+static struct rockchip_clk_branch mclk_spdif_src_fracmux __initdata =
+ MUX(MCLK_SDPDIF_SRC_PRE, "mclk_spdif_src_pre", mclk_spdif_src_p, CLK_SET_RATE_PARENT,
+ RK3528_CLKSEL_CON(32), 0, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3528_clk_branches[] __initdata = {
+ /* top */
+ FACTOR(0, "xin12m", "xin24m", 0, 1, 2),
+
+ COMPOSITE(CLK_MATRIX_250M_SRC, "clk_250m_src", mux_gpll_cpll_p, CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(1), 15, 1, MFLAGS, 10, 5, DFLAGS,
+ RK3528_CLKGATE_CON(0), 5, GFLAGS),
+ COMPOSITE(CLK_MATRIX_500M_SRC, "clk_500m_src", mux_gpll_cpll_p, CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(3), 11, 1, MFLAGS, 6, 5, DFLAGS,
+ RK3528_CLKGATE_CON(0), 10, GFLAGS),
+ COMPOSITE_NOMUX(CLK_MATRIX_50M_SRC, "clk_50m_src", "cpll", CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(0), 2, 5, DFLAGS,
+ RK3528_CLKGATE_CON(0), 1, GFLAGS),
+ COMPOSITE_NOMUX(CLK_MATRIX_100M_SRC, "clk_100m_src", "cpll", CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(0), 7, 5, DFLAGS,
+ RK3528_CLKGATE_CON(0), 2, GFLAGS),
+ COMPOSITE_NOMUX(CLK_MATRIX_150M_SRC, "clk_150m_src", "gpll", CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(1), 0, 5, DFLAGS,
+ RK3528_CLKGATE_CON(0), 3, GFLAGS),
+ COMPOSITE_NOMUX(CLK_MATRIX_200M_SRC, "clk_200m_src", "gpll", CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(1), 5, 5, DFLAGS,
+ RK3528_CLKGATE_CON(0), 4, GFLAGS),
+ COMPOSITE_NOMUX(CLK_MATRIX_300M_SRC, "clk_300m_src", "gpll", CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(2), 0, 5, DFLAGS,
+ RK3528_CLKGATE_CON(0), 6, GFLAGS),
+ COMPOSITE_NOMUX_HALFDIV(CLK_MATRIX_339M_SRC, "clk_339m_src", "gpll", CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(2), 5, 5, DFLAGS,
+ RK3528_CLKGATE_CON(0), 7, GFLAGS),
+ COMPOSITE_NOMUX(CLK_MATRIX_400M_SRC, "clk_400m_src", "gpll", CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(2), 10, 5, DFLAGS,
+ RK3528_CLKGATE_CON(0), 8, GFLAGS),
+ COMPOSITE_NOMUX(CLK_MATRIX_600M_SRC, "clk_600m_src", "gpll", CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(4), 0, 5, DFLAGS,
+ RK3528_CLKGATE_CON(0), 11, GFLAGS),
+ COMPOSITE(DCLK_VOP_SRC0, "dclk_vop_src0", mux_gpll_cpll_p, 0,
+ RK3528_CLKSEL_CON(32), 10, 1, MFLAGS, 2, 8, DFLAGS,
+ RK3528_CLKGATE_CON(3), 7, GFLAGS),
+ COMPOSITE(DCLK_VOP_SRC1, "dclk_vop_src1", mux_gpll_cpll_p, 0,
+ RK3528_CLKSEL_CON(33), 8, 1, MFLAGS, 0, 8, DFLAGS,
+ RK3528_CLKGATE_CON(3), 8, GFLAGS),
+ COMPOSITE_NOMUX(CLK_HSM, "clk_hsm", "xin24m", 0,
+ RK3528_CLKSEL_CON(36), 5, 5, DFLAGS,
+ RK3528_CLKGATE_CON(3), 13, GFLAGS),
+
+ COMPOSITE_NOMUX(CLK_UART0_SRC, "clk_uart0_src", "gpll", 0,
+ RK3528_CLKSEL_CON(4), 5, 5, DFLAGS,
+ RK3528_CLKGATE_CON(0), 12, GFLAGS),
+ COMPOSITE_FRACMUX(CLK_UART0_FRAC, "clk_uart0_frac", "clk_uart0_src", CLK_SET_RATE_PARENT,
+ RK3528_CLKSEL_CON(5), 0,
+ RK3528_CLKGATE_CON(0), 13, GFLAGS,
+ &rk3528_uart0_fracmux),
+ GATE(SCLK_UART0, "sclk_uart0", "clk_uart0", 0,
+ RK3528_CLKGATE_CON(0), 14, GFLAGS),
+
+ COMPOSITE_NOMUX(CLK_UART1_SRC, "clk_uart1_src", "gpll", 0,
+ RK3528_CLKSEL_CON(6), 2, 5, DFLAGS,
+ RK3528_CLKGATE_CON(0), 15, GFLAGS),
+ COMPOSITE_FRACMUX(CLK_UART1_FRAC, "clk_uart1_frac", "clk_uart1_src", CLK_SET_RATE_PARENT,
+ RK3528_CLKSEL_CON(7), 0,
+ RK3528_CLKGATE_CON(1), 0, GFLAGS,
+ &rk3528_uart1_fracmux),
+ GATE(SCLK_UART1, "sclk_uart1", "clk_uart1", 0,
+ RK3528_CLKGATE_CON(1), 1, GFLAGS),
+
+ COMPOSITE_NOMUX(CLK_UART2_SRC, "clk_uart2_src", "gpll", 0,
+ RK3528_CLKSEL_CON(8), 2, 5, DFLAGS,
+ RK3528_CLKGATE_CON(1), 2, GFLAGS),
+ COMPOSITE_FRACMUX(CLK_UART2_FRAC, "clk_uart2_frac", "clk_uart2_src", CLK_SET_RATE_PARENT,
+ RK3528_CLKSEL_CON(9), 0,
+ RK3528_CLKGATE_CON(1), 3, GFLAGS,
+ &rk3528_uart2_fracmux),
+ GATE(SCLK_UART2, "sclk_uart2", "clk_uart2", 0,
+ RK3528_CLKGATE_CON(1), 4, GFLAGS),
+
+ COMPOSITE_NOMUX(CLK_UART3_SRC, "clk_uart3_src", "gpll", 0,
+ RK3528_CLKSEL_CON(10), 2, 5, DFLAGS,
+ RK3528_CLKGATE_CON(1), 5, GFLAGS),
+ COMPOSITE_FRACMUX(CLK_UART3_FRAC, "clk_uart3_frac", "clk_uart3_src", CLK_SET_RATE_PARENT,
+ RK3528_CLKSEL_CON(11), 0,
+ RK3528_CLKGATE_CON(1), 6, GFLAGS,
+ &rk3528_uart3_fracmux),
+ GATE(SCLK_UART3, "sclk_uart3", "clk_uart3", 0,
+ RK3528_CLKGATE_CON(1), 7, GFLAGS),
+
+ COMPOSITE_NOMUX(CLK_UART4_SRC, "clk_uart4_src", "gpll", 0,
+ RK3528_CLKSEL_CON(12), 2, 5, DFLAGS,
+ RK3528_CLKGATE_CON(1), 8, GFLAGS),
+ COMPOSITE_FRACMUX(CLK_UART4_FRAC, "clk_uart4_frac", "clk_uart4_src", CLK_SET_RATE_PARENT,
+ RK3528_CLKSEL_CON(13), 0,
+ RK3528_CLKGATE_CON(1), 9, GFLAGS,
+ &rk3528_uart4_fracmux),
+ GATE(SCLK_UART4, "sclk_uart4", "clk_uart4", 0,
+ RK3528_CLKGATE_CON(1), 10, GFLAGS),
+
+ COMPOSITE_NOMUX(CLK_UART5_SRC, "clk_uart5_src", "gpll", 0,
+ RK3528_CLKSEL_CON(14), 2, 5, DFLAGS,
+ RK3528_CLKGATE_CON(1), 11, GFLAGS),
+ COMPOSITE_FRACMUX(CLK_UART5_FRAC, "clk_uart5_frac", "clk_uart5_src", CLK_SET_RATE_PARENT,
+ RK3528_CLKSEL_CON(15), 0,
+ RK3528_CLKGATE_CON(1), 12, GFLAGS,
+ &rk3528_uart5_fracmux),
+ GATE(SCLK_UART5, "sclk_uart5", "clk_uart5", 0,
+ RK3528_CLKGATE_CON(1), 13, GFLAGS),
+
+ COMPOSITE_NOMUX(CLK_UART6_SRC, "clk_uart6_src", "gpll", 0,
+ RK3528_CLKSEL_CON(16), 2, 5, DFLAGS,
+ RK3528_CLKGATE_CON(1), 14, GFLAGS),
+ COMPOSITE_FRACMUX(CLK_UART6_FRAC, "clk_uart6_frac", "clk_uart6_src", CLK_SET_RATE_PARENT,
+ RK3528_CLKSEL_CON(17), 0,
+ RK3528_CLKGATE_CON(1), 15, GFLAGS,
+ &rk3528_uart6_fracmux),
+ GATE(SCLK_UART6, "sclk_uart6", "clk_uart6", 0,
+ RK3528_CLKGATE_CON(2), 0, GFLAGS),
+
+ COMPOSITE_NOMUX(CLK_UART7_SRC, "clk_uart7_src", "gpll", 0,
+ RK3528_CLKSEL_CON(18), 2, 5, DFLAGS,
+ RK3528_CLKGATE_CON(2), 1, GFLAGS),
+ COMPOSITE_FRACMUX(CLK_UART7_FRAC, "clk_uart7_frac", "clk_uart7_src", CLK_SET_RATE_PARENT,
+ RK3528_CLKSEL_CON(19), 0,
+ RK3528_CLKGATE_CON(2), 2, GFLAGS,
+ &rk3528_uart7_fracmux),
+ GATE(SCLK_UART7, "sclk_uart7", "clk_uart7", 0,
+ RK3528_CLKGATE_CON(2), 3, GFLAGS),
+
+ COMPOSITE_NOMUX(CLK_I2S0_2CH_SRC, "clk_i2s0_2ch_src", "gpll", 0,
+ RK3528_CLKSEL_CON(20), 8, 5, DFLAGS,
+ RK3528_CLKGATE_CON(2), 5, GFLAGS),
+ COMPOSITE_FRACMUX(CLK_I2S0_2CH_FRAC, "clk_i2s0_2ch_frac", "clk_i2s0_2ch_src", CLK_SET_RATE_PARENT,
+ RK3528_CLKSEL_CON(21), 0,
+ RK3528_CLKGATE_CON(2), 6, GFLAGS,
+ &mclk_i2s0_2ch_sai_src_fracmux),
+ GATE(MCLK_I2S0_2CH_SAI_SRC, "mclk_i2s0_2ch_sai_src", "mclk_i2s0_2ch_sai_src_pre", 0,
+ RK3528_CLKGATE_CON(2), 7, GFLAGS),
+
+ COMPOSITE_NOMUX(CLK_I2S1_8CH_SRC, "clk_i2s1_8ch_src", "gpll", 0,
+ RK3528_CLKSEL_CON(24), 3, 5, DFLAGS,
+ RK3528_CLKGATE_CON(2), 11, GFLAGS),
+ COMPOSITE_FRACMUX(CLK_I2S1_8CH_FRAC, "clk_i2s1_8ch_frac", "clk_i2s1_8ch_src", CLK_SET_RATE_PARENT,
+ RK3528_CLKSEL_CON(25), 0,
+ RK3528_CLKGATE_CON(2), 12, GFLAGS,
+ &mclk_i2s1_8ch_sai_src_fracmux),
+ GATE(MCLK_I2S1_8CH_SAI_SRC, "mclk_i2s1_8ch_sai_src", "mclk_i2s1_8ch_sai_src_pre", 0,
+ RK3528_CLKGATE_CON(2), 13, GFLAGS),
+
+ COMPOSITE_NOMUX(CLK_I2S2_2CH_SRC, "clk_i2s2_2ch_src", "gpll", 0,
+ RK3528_CLKSEL_CON(26), 3, 5, DFLAGS,
+ RK3528_CLKGATE_CON(2), 14, GFLAGS),
+ COMPOSITE_FRACMUX(CLK_I2S2_2CH_FRAC, "clk_i2s2_2ch_frac", "clk_i2s2_2ch_src", CLK_SET_RATE_PARENT,
+ RK3528_CLKSEL_CON(27), 0,
+ RK3528_CLKGATE_CON(2), 15, GFLAGS,
+ &mclk_i2s2_2ch_sai_src_fracmux),
+ GATE(MCLK_I2S2_2CH_SAI_SRC, "mclk_i2s2_2ch_sai_src", "mclk_i2s2_2ch_sai_src_pre", 0,
+ RK3528_CLKGATE_CON(3), 0, GFLAGS),
+
+ COMPOSITE_NOMUX(CLK_I2S3_8CH_SRC, "clk_i2s3_8ch_src", "gpll", 0,
+ RK3528_CLKSEL_CON(22), 3, 5, DFLAGS,
+ RK3528_CLKGATE_CON(2), 8, GFLAGS),
+ COMPOSITE_FRACMUX(CLK_I2S3_8CH_FRAC, "clk_i2s3_8ch_frac", "clk_i2s3_8ch_src", CLK_SET_RATE_PARENT,
+ RK3528_CLKSEL_CON(23), 0,
+ RK3528_CLKGATE_CON(2), 9, GFLAGS,
+ &mclk_i2s3_8ch_sai_src_fracmux),
+ GATE(MCLK_I2S3_8CH_SAI_SRC, "mclk_i2s3_8ch_sai_src", "mclk_i2s3_8ch_sai_src_pre", 0,
+ RK3528_CLKGATE_CON(2), 10, GFLAGS),
+
+ COMPOSITE_NOMUX(CLK_SPDIF_SRC, "clk_spdif_src", "gpll", 0,
+ RK3528_CLKSEL_CON(30), 2, 5, DFLAGS,
+ RK3528_CLKGATE_CON(3), 4, GFLAGS),
+ COMPOSITE_FRACMUX(CLK_SPDIF_FRAC, "clk_spdif_frac", "clk_spdif_src", CLK_SET_RATE_PARENT,
+ RK3528_CLKSEL_CON(31), 0,
+ RK3528_CLKGATE_CON(3), 5, GFLAGS,
+ &mclk_spdif_src_fracmux),
+ GATE(MCLK_SPDIF_SRC, "mclk_spdif_src", "mclk_spdif_src_pre", 0,
+ RK3528_CLKGATE_CON(3), 6, GFLAGS),
+
+ /* bus */
+ COMPOSITE_NODIV(ACLK_BUS_M_ROOT, "aclk_bus_m_root", mux_300m_200m_100m_24m_p, CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(43), 12, 2, MFLAGS,
+ RK3528_CLKGATE_CON(8), 7, GFLAGS),
+ GATE(ACLK_GIC, "aclk_gic", "aclk_bus_m_root", CLK_IS_CRITICAL,
+ RK3528_CLKGATE_CON(9), 1, GFLAGS),
+
+ COMPOSITE_NODIV(ACLK_BUS_ROOT, "aclk_bus_root", mux_200m_100m_24m_p, CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(43), 6, 2, MFLAGS,
+ RK3528_CLKGATE_CON(8), 4, GFLAGS),
+ GATE(ACLK_SPINLOCK, "aclk_spinlock", "aclk_bus_root", 0,
+ RK3528_CLKGATE_CON(9), 2, GFLAGS),
+ GATE(ACLK_DMAC, "aclk_dmac", "aclk_bus_root", 0,
+ RK3528_CLKGATE_CON(9), 4, GFLAGS),
+ GATE(ACLK_DCF, "aclk_dcf", "aclk_bus_root", 0,
+ RK3528_CLKGATE_CON(11), 11, GFLAGS),
+ COMPOSITE(ACLK_BUS_VOPGL_ROOT, "aclk_bus_vopgl_root", mux_gpll_cpll_p, CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(43), 3, 1, MFLAGS, 0, 3, DFLAGS,
+ RK3528_CLKGATE_CON(8), 0, GFLAGS),
+ COMPOSITE_NODIV(ACLK_BUS_H_ROOT, "aclk_bus_h_root", mux_500m_200m_100m_24m_p, CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(43), 4, 2, MFLAGS,
+ RK3528_CLKGATE_CON(8), 2, GFLAGS),
+ GATE(ACLK_DMA2DDR, "aclk_dma2ddr", "aclk_bus_h_root", 0,
+ RK3528_CLKGATE_CON(10), 14, GFLAGS),
+
+ COMPOSITE_NODIV(HCLK_BUS_ROOT, "hclk_bus_root", mux_200m_100m_50m_24m_p, CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(43), 8, 2, MFLAGS,
+ RK3528_CLKGATE_CON(8), 5, GFLAGS),
+
+ COMPOSITE_NODIV(PCLK_BUS_ROOT, "pclk_bus_root", mux_100m_50m_24m_p, CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(43), 10, 2, MFLAGS,
+ RK3528_CLKGATE_CON(8), 6, GFLAGS),
+ GATE(PCLK_DFT2APB, "pclk_dft2apb", "pclk_bus_root", 0,
+ RK3528_CLKGATE_CON(8), 13, GFLAGS),
+ GATE(PCLK_BUS_GRF, "pclk_bus_grf", "pclk_bus_root", CLK_IS_CRITICAL,
+ RK3528_CLKGATE_CON(8), 15, GFLAGS),
+ GATE(PCLK_TIMER, "pclk_timer", "pclk_bus_root", 0,
+ RK3528_CLKGATE_CON(9), 5, GFLAGS),
+ GATE(PCLK_JDBCK_DAP, "pclk_jdbck_dap", "pclk_bus_root", 0,
+ RK3528_CLKGATE_CON(9), 12, GFLAGS),
+ GATE(PCLK_WDT_NS, "pclk_wdt_ns", "pclk_bus_root", 0,
+ RK3528_CLKGATE_CON(9), 15, GFLAGS),
+ GATE(PCLK_UART0, "pclk_uart0", "pclk_bus_root", 0,
+ RK3528_CLKGATE_CON(10), 7, GFLAGS),
+ GATE(PCLK_PWM0, "pclk_pwm0", "pclk_bus_root", 0,
+ RK3528_CLKGATE_CON(11), 4, GFLAGS),
+ GATE(PCLK_PWM1, "pclk_pwm1", "pclk_bus_root", 0,
+ RK3528_CLKGATE_CON(11), 7, GFLAGS),
+ GATE(PCLK_DMA2DDR, "pclk_dma2ddr", "pclk_bus_root", 0,
+ RK3528_CLKGATE_CON(10), 13, GFLAGS),
+ GATE(PCLK_SCR, "pclk_scr", "pclk_bus_root", 0,
+ RK3528_CLKGATE_CON(11), 10, GFLAGS),
+ GATE(PCLK_INTMUX, "pclk_intmux", "pclk_bus_root", CLK_IGNORE_UNUSED,
+ RK3528_CLKGATE_CON(11), 12, GFLAGS),
+
+ COMPOSITE_NODIV(CLK_PWM0, "clk_pwm0", mux_100m_50m_24m_p, 0,
+ RK3528_CLKSEL_CON(44), 6, 2, MFLAGS,
+ RK3528_CLKGATE_CON(11), 5, GFLAGS),
+ COMPOSITE_NODIV(CLK_PWM1, "clk_pwm1", mux_100m_50m_24m_p, 0,
+ RK3528_CLKSEL_CON(44), 8, 2, MFLAGS,
+ RK3528_CLKGATE_CON(11), 8, GFLAGS),
+
+ GATE(CLK_CAPTURE_PWM1, "clk_capture_pwm1", "xin24m", 0,
+ RK3528_CLKGATE_CON(11), 9, GFLAGS),
+ GATE(CLK_CAPTURE_PWM0, "clk_capture_pwm0", "xin24m", 0,
+ RK3528_CLKGATE_CON(11), 6, GFLAGS),
+ GATE(CLK_JDBCK_DAP, "clk_jdbck_dap", "xin24m", 0,
+ RK3528_CLKGATE_CON(9), 13, GFLAGS),
+ GATE(TCLK_WDT_NS, "tclk_wdt_ns", "xin24m", 0,
+ RK3528_CLKGATE_CON(10), 0, GFLAGS),
+
+ GATE(CLK_TIMER_ROOT, "clk_timer_root", "xin24m", 0,
+ RK3528_CLKGATE_CON(8), 9, GFLAGS),
+ GATE(CLK_TIMER0, "clk_timer0", "clk_timer_root", 0,
+ RK3528_CLKGATE_CON(9), 6, GFLAGS),
+ GATE(CLK_TIMER1, "clk_timer1", "clk_timer_root", 0,
+ RK3528_CLKGATE_CON(9), 7, GFLAGS),
+ GATE(CLK_TIMER2, "clk_timer2", "clk_timer_root", 0,
+ RK3528_CLKGATE_CON(9), 8, GFLAGS),
+ GATE(CLK_TIMER3, "clk_timer3", "clk_timer_root", 0,
+ RK3528_CLKGATE_CON(9), 9, GFLAGS),
+ GATE(CLK_TIMER4, "clk_timer4", "clk_timer_root", 0,
+ RK3528_CLKGATE_CON(9), 10, GFLAGS),
+ GATE(CLK_TIMER5, "clk_timer5", "clk_timer_root", 0,
+ RK3528_CLKGATE_CON(9), 11, GFLAGS),
+
+ /* pmu */
+ GATE(HCLK_PMU_ROOT, "hclk_pmu_root", "clk_100m_src", CLK_IGNORE_UNUSED,
+ RK3528_PMU_CLKGATE_CON(0), 1, GFLAGS),
+ GATE(PCLK_PMU_ROOT, "pclk_pmu_root", "clk_100m_src", CLK_IGNORE_UNUSED,
+ RK3528_PMU_CLKGATE_CON(0), 0, GFLAGS),
+
+ GATE(FCLK_MCU, "fclk_mcu", "hclk_pmu_root", 0,
+ RK3528_PMU_CLKGATE_CON(0), 7, GFLAGS),
+ GATE(HCLK_PMU_SRAM, "hclk_pmu_sram", "hclk_pmu_root", CLK_IS_CRITICAL,
+ RK3528_PMU_CLKGATE_CON(5), 4, GFLAGS),
+
+ GATE(PCLK_I2C2, "pclk_i2c2", "pclk_pmu_root", 0,
+ RK3528_PMU_CLKGATE_CON(0), 2, GFLAGS),
+ GATE(PCLK_PMU_HP_TIMER, "pclk_pmu_hp_timer", "pclk_pmu_root", 0,
+ RK3528_PMU_CLKGATE_CON(1), 2, GFLAGS),
+ GATE(PCLK_PMU_IOC, "pclk_pmu_ioc", "pclk_pmu_root", CLK_IS_CRITICAL,
+ RK3528_PMU_CLKGATE_CON(1), 5, GFLAGS),
+ GATE(PCLK_PMU_CRU, "pclk_pmu_cru", "pclk_pmu_root", CLK_IS_CRITICAL,
+ RK3528_PMU_CLKGATE_CON(1), 6, GFLAGS),
+ GATE(PCLK_PMU_GRF, "pclk_pmu_grf", "pclk_pmu_root", CLK_IS_CRITICAL,
+ RK3528_PMU_CLKGATE_CON(1), 7, GFLAGS),
+ GATE(PCLK_PMU_WDT, "pclk_pmu_wdt", "pclk_pmu_root", 0,
+ RK3528_PMU_CLKGATE_CON(1), 10, GFLAGS),
+ GATE(PCLK_PMU, "pclk_pmu", "pclk_pmu_root", CLK_IS_CRITICAL,
+ RK3528_PMU_CLKGATE_CON(0), 13, GFLAGS),
+ GATE(PCLK_GPIO0, "pclk_gpio0", "pclk_pmu_root", 0,
+ RK3528_PMU_CLKGATE_CON(0), 14, GFLAGS),
+ GATE(PCLK_OSCCHK, "pclk_oscchk", "pclk_pmu_root", 0,
+ RK3528_PMU_CLKGATE_CON(0), 9, GFLAGS),
+ GATE(PCLK_PMU_MAILBOX, "pclk_pmu_mailbox", "pclk_pmu_root", 0,
+ RK3528_PMU_CLKGATE_CON(1), 12, GFLAGS),
+ GATE(PCLK_SCRKEYGEN, "pclk_scrkeygen", "pclk_pmu_root", 0,
+ RK3528_PMU_CLKGATE_CON(1), 15, GFLAGS),
+ GATE(PCLK_PVTM_PMU, "pclk_pvtm_pmu", "pclk_pmu_root", 0,
+ RK3528_PMU_CLKGATE_CON(5), 1, GFLAGS),
+
+ COMPOSITE_NODIV(CLK_I2C2, "clk_i2c2", clk_i2c2_p, 0,
+ RK3528_PMU_CLKSEL_CON(0), 0, 2, MFLAGS,
+ RK3528_PMU_CLKGATE_CON(0), 3, GFLAGS),
+
+ GATE(CLK_REFOUT, "clk_refout", "xin24m", 0,
+ RK3528_PMU_CLKGATE_CON(2), 4, GFLAGS),
+ COMPOSITE_NOMUX(CLK_PVTM_PMU, "clk_pvtm_pmu", "xin24m", 0,
+ RK3528_PMU_CLKSEL_CON(5), 0, 5, DFLAGS,
+ RK3528_PMU_CLKGATE_CON(5), 0, GFLAGS),
+
+ COMPOSITE_FRAC(XIN_OSC0_DIV, "xin_osc0_div", "xin24m", 0,
+ RK3528_PMU_CLKSEL_CON(1), 0,
+ RK3528_PMU_CLKGATE_CON(1), 0, GFLAGS),
+ /* clk_32k: internal! No path from external osc 32k */
+ MUX(CLK_DEEPSLOW, "clk_32k", clk_32k_p, CLK_IS_CRITICAL,
+ RK3528_PMU_CLKSEL_CON(2), 0, 1, MFLAGS),
+ GATE(RTC_CLK_MCU, "rtc_clk_mcu", "clk_32k", 0,
+ RK3528_PMU_CLKGATE_CON(0), 8, GFLAGS),
+ GATE(CLK_DDR_FAIL_SAFE, "clk_ddr_fail_safe", "xin24m", CLK_IGNORE_UNUSED,
+ RK3528_PMU_CLKGATE_CON(1), 1, GFLAGS),
+
+ COMPOSITE_NODIV(DBCLK_GPIO0, "dbclk_gpio0", mux_24m_32k_p, 0,
+ RK3528_PMU_CLKSEL_CON(0), 2, 1, MFLAGS,
+ RK3528_PMU_CLKGATE_CON(0), 15, GFLAGS),
+ COMPOSITE_NODIV(TCLK_PMU_WDT, "tclk_pmu_wdt", mux_24m_32k_p, 0,
+ RK3528_PMU_CLKSEL_CON(2), 1, 1, MFLAGS,
+ RK3528_PMU_CLKGATE_CON(1), 11, GFLAGS),
+
+ /* core */
+ COMPOSITE_NOMUX(ACLK_M_CORE_BIU, "aclk_m_core", "armclk", CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(39), 11, 5, DFLAGS | CLK_DIVIDER_READ_ONLY,
+ RK3528_CLKGATE_CON(5), 12, GFLAGS),
+ COMPOSITE_NOMUX(PCLK_DBG, "pclk_dbg", "armclk", CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(40), 1, 5, DFLAGS | CLK_DIVIDER_READ_ONLY,
+ RK3528_CLKGATE_CON(5), 13, GFLAGS),
+ GATE(PCLK_CPU_ROOT, "pclk_cpu_root", "pclk_dbg", CLK_IS_CRITICAL,
+ RK3528_CLKGATE_CON(6), 1, GFLAGS),
+ GATE(PCLK_CORE_GRF, "pclk_core_grf", "pclk_cpu_root", CLK_IS_CRITICAL,
+ RK3528_CLKGATE_CON(6), 2, GFLAGS),
+
+ /* ddr */
+ GATE(CLK_DDRC_SRC, "clk_ddrc_src", "dpll", CLK_IS_CRITICAL,
+ RK3528_DDRPHY_CLKGATE_CON(0), 0, GFLAGS),
+ GATE(CLK_DDR_PHY, "clk_ddr_phy", "dpll", CLK_IS_CRITICAL,
+ RK3528_DDRPHY_CLKGATE_CON(0), 1, GFLAGS),
+
+ COMPOSITE_NODIV(PCLK_DDR_ROOT, "pclk_ddr_root", mux_100m_50m_24m_p, CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(90), 0, 2, MFLAGS,
+ RK3528_CLKGATE_CON(45), 0, GFLAGS),
+ GATE(PCLK_DDRMON, "pclk_ddrmon", "pclk_ddr_root", CLK_IGNORE_UNUSED,
+ RK3528_CLKGATE_CON(45), 3, GFLAGS),
+ GATE(PCLK_DDR_HWLP, "pclk_ddr_hwlp", "pclk_ddr_root", CLK_IGNORE_UNUSED,
+ RK3528_CLKGATE_CON(45), 8, GFLAGS),
+ GATE(CLK_TIMER_DDRMON, "clk_timer_ddrmon", "xin24m", CLK_IGNORE_UNUSED,
+ RK3528_CLKGATE_CON(45), 4, GFLAGS),
+
+ GATE(PCLK_DDRC, "pclk_ddrc", "pclk_ddr_root", CLK_IS_CRITICAL,
+ RK3528_CLKGATE_CON(45), 2, GFLAGS),
+ GATE(PCLK_DDR_GRF, "pclk_ddr_grf", "pclk_ddr_root", CLK_IS_CRITICAL,
+ RK3528_CLKGATE_CON(45), 6, GFLAGS),
+ GATE(PCLK_DDRPHY, "pclk_ddrphy", "pclk_ddr_root", CLK_IS_CRITICAL,
+ RK3528_CLKGATE_CON(45), 9, GFLAGS),
+
+ GATE(ACLK_DDR_UPCTL, "aclk_ddr_upctl", "clk_ddrc_src", CLK_IS_CRITICAL,
+ RK3528_CLKGATE_CON(45), 11, GFLAGS),
+ GATE(CLK_DDR_UPCTL, "clk_ddr_upctl", "clk_ddrc_src", CLK_IS_CRITICAL,
+ RK3528_CLKGATE_CON(45), 12, GFLAGS),
+ GATE(CLK_DDRMON, "clk_ddrmon", "clk_ddrc_src", CLK_IS_CRITICAL,
+ RK3528_CLKGATE_CON(45), 13, GFLAGS),
+ GATE(ACLK_DDR_SCRAMBLE, "aclk_ddr_scramble", "clk_ddrc_src", CLK_IS_CRITICAL,
+ RK3528_CLKGATE_CON(45), 14, GFLAGS),
+ GATE(ACLK_SPLIT, "aclk_split", "clk_ddrc_src", CLK_IS_CRITICAL,
+ RK3528_CLKGATE_CON(45), 15, GFLAGS),
+
+ /* gpu */
+ COMPOSITE_NODIV(ACLK_GPU_ROOT, "aclk_gpu_root", mux_500m_300m_100m_24m_p, CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(76), 0, 2, MFLAGS,
+ RK3528_CLKGATE_CON(34), 0, GFLAGS),
+ COMPOSITE_NODIV(ACLK_GPU, "aclk_gpu", aclk_gpu_p, CLK_SET_RATE_PARENT,
+ RK3528_CLKSEL_CON(76), 6, 1, MFLAGS,
+ RK3528_CLKGATE_CON(34), 7, GFLAGS),
+ GATE(ACLK_GPU_MALI, "aclk_gpu_mali", "aclk_gpu", 0,
+ RK3528_CLKGATE_CON(34), 8, GFLAGS),
+ COMPOSITE_NODIV(PCLK_GPU_ROOT, "pclk_gpu_root", mux_100m_50m_24m_p, CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(76), 4, 2, MFLAGS,
+ RK3528_CLKGATE_CON(34), 2, GFLAGS),
+
+ /* rkvdec */
+ COMPOSITE_NODIV(ACLK_RKVDEC_ROOT_NDFT, "aclk_rkvdec_root", mux_339m_200m_100m_24m_p, CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(88), 6, 2, MFLAGS,
+ RK3528_CLKGATE_CON(44), 3, GFLAGS),
+ COMPOSITE_NODIV(HCLK_RKVDEC_ROOT, "hclk_rkvdec_root", mux_200m_100m_50m_24m_p, CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(88), 4, 2, MFLAGS,
+ RK3528_CLKGATE_CON(44), 2, GFLAGS),
+ GATE(PCLK_DDRPHY_CRU, "pclk_ddrphy_cru", "hclk_rkvdec_root", CLK_IS_CRITICAL,
+ RK3528_CLKGATE_CON(44), 4, GFLAGS),
+ GATE(HCLK_RKVDEC, "hclk_rkvdec", "hclk_rkvdec_root", 0,
+ RK3528_CLKGATE_CON(44), 9, GFLAGS),
+ COMPOSITE_NODIV(CLK_HEVC_CA_RKVDEC, "clk_hevc_ca_rkvdec", mux_600m_300m_200m_24m_p, 0,
+ RK3528_CLKSEL_CON(88), 11, 2, MFLAGS,
+ RK3528_CLKGATE_CON(44), 11, GFLAGS),
+ MUX(ACLK_RKVDEC_PVTMUX_ROOT, "aclk_rkvdec_pvtmux_root", aclk_rkvdec_pvtmux_root_p, CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+ RK3528_CLKSEL_CON(88), 13, 1, MFLAGS),
+ GATE(ACLK_RKVDEC, "aclk_rkvdec", "aclk_rkvdec_pvtmux_root", 0,
+ RK3528_CLKGATE_CON(44), 8, GFLAGS),
+
+ /* rkvenc */
+ COMPOSITE_NODIV(ACLK_RKVENC_ROOT, "aclk_rkvenc_root", mux_300m_200m_100m_24m_p, CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(79), 2, 2, MFLAGS,
+ RK3528_CLKGATE_CON(36), 1, GFLAGS),
+ GATE(ACLK_RKVENC, "aclk_rkvenc", "aclk_rkvenc_root", 0,
+ RK3528_CLKGATE_CON(36), 7, GFLAGS),
+
+ COMPOSITE_NODIV(PCLK_RKVENC_ROOT, "pclk_rkvenc_root", mux_100m_50m_24m_p, CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(79), 4, 2, MFLAGS,
+ RK3528_CLKGATE_CON(36), 2, GFLAGS),
+ GATE(PCLK_RKVENC_IOC, "pclk_rkvenc_ioc", "pclk_rkvenc_root", CLK_IS_CRITICAL,
+ RK3528_CLKGATE_CON(37), 10, GFLAGS),
+ GATE(PCLK_RKVENC_GRF, "pclk_rkvenc_grf", "pclk_rkvenc_root", CLK_IS_CRITICAL,
+ RK3528_CLKGATE_CON(38), 6, GFLAGS),
+ GATE(PCLK_I2C1, "pclk_i2c1", "pclk_rkvenc_root", 0,
+ RK3528_CLKGATE_CON(36), 11, GFLAGS),
+ GATE(PCLK_I2C0, "pclk_i2c0", "pclk_rkvenc_root", 0,
+ RK3528_CLKGATE_CON(36), 13, GFLAGS),
+ GATE(PCLK_SPI0, "pclk_spi0", "pclk_rkvenc_root", 0,
+ RK3528_CLKGATE_CON(37), 2, GFLAGS),
+ GATE(PCLK_GPIO4, "pclk_gpio4", "pclk_rkvenc_root", 0,
+ RK3528_CLKGATE_CON(37), 8, GFLAGS),
+ GATE(PCLK_UART1, "pclk_uart1", "pclk_rkvenc_root", 0,
+ RK3528_CLKGATE_CON(38), 2, GFLAGS),
+ GATE(PCLK_UART3, "pclk_uart3", "pclk_rkvenc_root", 0,
+ RK3528_CLKGATE_CON(38), 4, GFLAGS),
+ GATE(PCLK_CAN0, "pclk_can0", "pclk_rkvenc_root", 0,
+ RK3528_CLKGATE_CON(38), 7, GFLAGS),
+ GATE(PCLK_CAN1, "pclk_can1", "pclk_rkvenc_root", 0,
+ RK3528_CLKGATE_CON(38), 9, GFLAGS),
+
+ COMPOSITE_NODIV(MCLK_PDM, "mclk_pdm", mux_150m_100m_24m_p, 0,
+ RK3528_CLKSEL_CON(80), 12, 2, MFLAGS,
+ RK3528_CLKGATE_CON(38), 1, GFLAGS),
+ COMPOSITE(CLK_CAN0, "clk_can0", mux_gpll_cpll_p, 0,
+ RK3528_CLKSEL_CON(81), 6, 1, MFLAGS, 0, 6, DFLAGS,
+ RK3528_CLKGATE_CON(38), 8, GFLAGS),
+ COMPOSITE(CLK_CAN1, "clk_can1", mux_gpll_cpll_p, 0,
+ RK3528_CLKSEL_CON(81), 13, 1, MFLAGS, 7, 6, DFLAGS,
+ RK3528_CLKGATE_CON(38), 10, GFLAGS),
+
+ COMPOSITE_NODIV(HCLK_RKVENC_ROOT, "hclk_rkvenc_root", mux_200m_100m_50m_24m_p, CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(79), 0, 2, MFLAGS,
+ RK3528_CLKGATE_CON(36), 0, GFLAGS),
+ GATE(HCLK_SAI_I2S1, "hclk_sai_i2s1", "hclk_rkvenc_root", 0,
+ RK3528_CLKGATE_CON(36), 9, GFLAGS),
+ GATE(HCLK_SPDIF, "hclk_spdif", "hclk_rkvenc_root", 0,
+ RK3528_CLKGATE_CON(37), 14, GFLAGS),
+ GATE(HCLK_PDM, "hclk_pdm", "hclk_rkvenc_root", 0,
+ RK3528_CLKGATE_CON(38), 0, GFLAGS),
+ GATE(HCLK_RKVENC, "hclk_rkvenc", "hclk_rkvenc_root", 0,
+ RK3528_CLKGATE_CON(36), 6, GFLAGS),
+
+ COMPOSITE_NODIV(CLK_CORE_RKVENC, "clk_core_rkvenc", mux_300m_200m_100m_24m_p, 0,
+ RK3528_CLKSEL_CON(79), 6, 2, MFLAGS,
+ RK3528_CLKGATE_CON(36), 8, GFLAGS),
+ COMPOSITE_NODIV(CLK_I2C0, "clk_i2c0", mux_200m_100m_50m_24m_p, 0,
+ RK3528_CLKSEL_CON(79), 11, 2, MFLAGS,
+ RK3528_CLKGATE_CON(36), 14, GFLAGS),
+ COMPOSITE_NODIV(CLK_I2C1, "clk_i2c1", mux_200m_100m_50m_24m_p, 0,
+ RK3528_CLKSEL_CON(79), 9, 2, MFLAGS,
+ RK3528_CLKGATE_CON(36), 12, GFLAGS),
+
+ COMPOSITE_NODIV(CLK_SPI0, "clk_spi0", mux_200m_100m_50m_24m_p, 0,
+ RK3528_CLKSEL_CON(79), 13, 2, MFLAGS,
+ RK3528_CLKGATE_CON(37), 3, GFLAGS),
+ COMPOSITE_NODIV(MCLK_SAI_I2S1, "mclk_sai_i2s1", mclk_sai_i2s1_p, CLK_SET_RATE_PARENT,
+ RK3528_CLKSEL_CON(79), 8, 1, MFLAGS,
+ RK3528_CLKGATE_CON(36), 10, GFLAGS),
+ GATE(DBCLK_GPIO4, "dbclk_gpio4", "xin24m", 0,
+ RK3528_CLKGATE_CON(37), 9, GFLAGS),
+
+ /* vo */
+ COMPOSITE_NODIV(HCLK_VO_ROOT, "hclk_vo_root", mux_150m_100m_24m_p, CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(83), 2, 2, MFLAGS,
+ RK3528_CLKGATE_CON(39), 1, GFLAGS),
+ GATE(HCLK_VOP, "hclk_vop", "hclk_vo_root", 0,
+ RK3528_CLKGATE_CON(40), 2, GFLAGS),
+ GATE(HCLK_USBHOST, "hclk_usbhost", "hclk_vo_root", 0,
+ RK3528_CLKGATE_CON(43), 3, GFLAGS),
+ GATE(HCLK_JPEG_DECODER, "hclk_jpeg_decoder", "hclk_vo_root", 0,
+ RK3528_CLKGATE_CON(41), 7, GFLAGS),
+ GATE(HCLK_VDPP, "hclk_vdpp", "hclk_vo_root", 0,
+ RK3528_CLKGATE_CON(39), 10, GFLAGS),
+ GATE(HCLK_CVBS, "hclk_cvbs", "hclk_vo_root", 0,
+ RK3528_CLKGATE_CON(41), 3, GFLAGS),
+ GATE(HCLK_USBHOST_ARB, "hclk_usbhost_arb", "hclk_vo_root", 0,
+ RK3528_CLKGATE_CON(43), 4, GFLAGS),
+ GATE(HCLK_SAI_I2S3, "hclk_sai_i2s3", "hclk_vo_root", 0,
+ RK3528_CLKGATE_CON(42), 1, GFLAGS),
+ GATE(HCLK_HDCP, "hclk_hdcp", "hclk_vo_root", 0,
+ RK3528_CLKGATE_CON(41), 1, GFLAGS),
+ GATE(HCLK_RGA2E, "hclk_rga2e", "hclk_vo_root", 0,
+ RK3528_CLKGATE_CON(39), 7, GFLAGS),
+ GATE(HCLK_SDMMC0, "hclk_sdmmc0", "hclk_vo_root", 0,
+ RK3528_CLKGATE_CON(42), 9, GFLAGS),
+ GATE(HCLK_HDCP_KEY, "hclk_hdcp_key", "hclk_vo_root", 0,
+ RK3528_CLKGATE_CON(40), 15, GFLAGS),
+
+ COMPOSITE_NODIV(ACLK_VO_L_ROOT, "aclk_vo_l_root", mux_150m_100m_24m_p, CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(84), 1, 2, MFLAGS,
+ RK3528_CLKGATE_CON(41), 8, GFLAGS),
+ GATE(ACLK_MAC_VO, "aclk_gmac0", "aclk_vo_l_root", 0,
+ RK3528_CLKGATE_CON(41), 10, GFLAGS),
+
+ COMPOSITE_NODIV(PCLK_VO_ROOT, "pclk_vo_root", mux_100m_50m_24m_p, CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(83), 4, 2, MFLAGS,
+ RK3528_CLKGATE_CON(39), 2, GFLAGS),
+ GATE(PCLK_MAC_VO, "pclk_gmac0", "pclk_vo_root", 0,
+ RK3528_CLKGATE_CON(41), 11, GFLAGS),
+ GATE(PCLK_VCDCPHY, "pclk_vcdcphy", "pclk_vo_root", 0,
+ RK3528_CLKGATE_CON(42), 4, GFLAGS),
+ GATE(PCLK_GPIO2, "pclk_gpio2", "pclk_vo_root", 0,
+ RK3528_CLKGATE_CON(42), 5, GFLAGS),
+ GATE(PCLK_VO_IOC, "pclk_vo_ioc", "pclk_vo_root", CLK_IS_CRITICAL,
+ RK3528_CLKGATE_CON(42), 7, GFLAGS),
+ GATE(PCLK_OTPC_NS, "pclk_otpc_ns", "pclk_vo_root", 0,
+ RK3528_CLKGATE_CON(42), 11, GFLAGS),
+ GATE(PCLK_UART4, "pclk_uart4", "pclk_vo_root", 0,
+ RK3528_CLKGATE_CON(43), 7, GFLAGS),
+ GATE(PCLK_I2C4, "pclk_i2c4", "pclk_vo_root", 0,
+ RK3528_CLKGATE_CON(43), 9, GFLAGS),
+ GATE(PCLK_I2C7, "pclk_i2c7", "pclk_vo_root", 0,
+ RK3528_CLKGATE_CON(43), 11, GFLAGS),
+
+ GATE(PCLK_USBPHY, "pclk_usbphy", "pclk_vo_root", 0,
+ RK3528_CLKGATE_CON(43), 13, GFLAGS),
+
+ GATE(PCLK_VO_GRF, "pclk_vo_grf", "pclk_vo_root", CLK_IS_CRITICAL,
+ RK3528_CLKGATE_CON(39), 13, GFLAGS),
+ GATE(PCLK_CRU, "pclk_cru", "pclk_vo_root", CLK_IS_CRITICAL,
+ RK3528_CLKGATE_CON(39), 15, GFLAGS),
+ GATE(PCLK_HDMI, "pclk_hdmi", "pclk_vo_root", 0,
+ RK3528_CLKGATE_CON(40), 6, GFLAGS),
+ GATE(PCLK_HDMIPHY, "pclk_hdmiphy", "pclk_vo_root", 0,
+ RK3528_CLKGATE_CON(40), 14, GFLAGS),
+ GATE(PCLK_HDCP, "pclk_hdcp", "pclk_vo_root", 0,
+ RK3528_CLKGATE_CON(41), 2, GFLAGS),
+
+ COMPOSITE_NODIV(CLK_CORE_VDPP, "clk_core_vdpp", mux_339m_200m_100m_24m_p, 0,
+ RK3528_CLKSEL_CON(83), 10, 2, MFLAGS,
+ RK3528_CLKGATE_CON(39), 12, GFLAGS),
+ COMPOSITE_NODIV(CLK_CORE_RGA2E, "clk_core_rga2e", mux_339m_200m_100m_24m_p, 0,
+ RK3528_CLKSEL_CON(83), 8, 2, MFLAGS,
+ RK3528_CLKGATE_CON(39), 9, GFLAGS),
+ COMPOSITE_NODIV(ACLK_JPEG_ROOT, "aclk_jpeg_root", mux_339m_200m_100m_24m_p, CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(84), 9, 2, MFLAGS,
+ RK3528_CLKGATE_CON(41), 15, GFLAGS),
+ GATE(ACLK_JPEG_DECODER, "aclk_jpeg_decoder", "aclk_jpeg_root", 0,
+ RK3528_CLKGATE_CON(41), 6, GFLAGS),
+
+ COMPOSITE_NODIV(ACLK_VO_ROOT, "aclk_vo_root", mux_339m_200m_100m_24m_p, CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(83), 0, 2, MFLAGS,
+ RK3528_CLKGATE_CON(39), 0, GFLAGS),
+ GATE(ACLK_RGA2E, "aclk_rga2e", "aclk_vo_root", 0,
+ RK3528_CLKGATE_CON(39), 8, GFLAGS),
+ GATE(ACLK_VDPP, "aclk_vdpp", "aclk_vo_root", 0,
+ RK3528_CLKGATE_CON(39), 11, GFLAGS),
+ GATE(ACLK_HDCP, "aclk_hdcp", "aclk_vo_root", 0,
+ RK3528_CLKGATE_CON(41), 0, GFLAGS),
+
+ COMPOSITE(CCLK_SRC_SDMMC0, "cclk_src_sdmmc0", mux_gpll_cpll_xin24m_p, 0,
+ RK3528_CLKSEL_CON(85), 6, 2, MFLAGS, 0, 6, DFLAGS,
+ RK3528_CLKGATE_CON(42), 8, GFLAGS),
+
+ COMPOSITE(ACLK_VOP_ROOT, "aclk_vop_root", mux_gpll_cpll_p, CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(83), 15, 1, MFLAGS, 12, 3, DFLAGS,
+ RK3528_CLKGATE_CON(40), 0, GFLAGS),
+ GATE(ACLK_VOP, "aclk_vop", "aclk_vop_root", 0,
+ RK3528_CLKGATE_CON(40), 5, GFLAGS),
+
+ COMPOSITE_NODIV(CLK_I2C4, "clk_i2c4", mux_200m_100m_50m_24m_p, 0,
+ RK3528_CLKSEL_CON(85), 13, 2, MFLAGS,
+ RK3528_CLKGATE_CON(43), 10, GFLAGS),
+ COMPOSITE_NODIV(CLK_I2C7, "clk_i2c7", mux_200m_100m_50m_24m_p, 0,
+ RK3528_CLKSEL_CON(86), 0, 2, MFLAGS,
+ RK3528_CLKGATE_CON(43), 12, GFLAGS),
+ GATE(DBCLK_GPIO2, "dbclk_gpio2", "xin24m", 0,
+ RK3528_CLKGATE_CON(42), 6, GFLAGS),
+
+ GATE(CLK_HDMIHDP0, "clk_hdmihdp0", "xin24m", 0,
+ RK3528_CLKGATE_CON(43), 2, GFLAGS),
+ GATE(CLK_MACPHY, "clk_macphy", "xin24m", 0,
+ RK3528_CLKGATE_CON(42), 3, GFLAGS),
+ GATE(CLK_REF_USBPHY, "clk_ref_usbphy", "xin24m", 0,
+ RK3528_CLKGATE_CON(43), 14, GFLAGS),
+ GATE(CLK_SBPI_OTPC_NS, "clk_sbpi_otpc_ns", "xin24m", 0,
+ RK3528_CLKGATE_CON(42), 12, GFLAGS),
+ FACTOR(CLK_USER_OTPC_NS, "clk_user_otpc_ns", "clk_sbpi_otpc_ns",
+ 0, 1, 2),
+
+ GATE(MCLK_SAI_I2S3, "mclk_sai_i2s3", "mclk_i2s3_8ch_sai_src", 0,
+ RK3528_CLKGATE_CON(42), 2, GFLAGS),
+ COMPOSITE_NODIV(DCLK_VOP0, "dclk_vop0", dclk_vop0_p, CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ RK3528_CLKSEL_CON(84), 0, 1, MFLAGS,
+ RK3528_CLKGATE_CON(40), 3, GFLAGS),
+ GATE(DCLK_VOP1, "dclk_vop1", "dclk_vop_src1", CLK_SET_RATE_PARENT,
+ RK3528_CLKGATE_CON(40), 4, GFLAGS),
+ FACTOR_GATE(DCLK_CVBS, "dclk_cvbs", "dclk_vop1", 0, 1, 4,
+ RK3528_CLKGATE_CON(41), 4, GFLAGS),
+ GATE(DCLK_4X_CVBS, "dclk_4x_cvbs", "dclk_vop1", 0,
+ RK3528_CLKGATE_CON(41), 5, GFLAGS),
+
+ FACTOR_GATE(CLK_SFR_HDMI, "clk_sfr_hdmi", "dclk_vop_src1", 0, 1, 4,
+ RK3528_CLKGATE_CON(40), 7, GFLAGS),
+
+ GATE(CLK_SPDIF_HDMI, "clk_spdif_hdmi", "mclk_spdif_src", 0,
+ RK3528_CLKGATE_CON(40), 10, GFLAGS),
+ GATE(MCLK_SPDIF, "mclk_spdif", "mclk_spdif_src", 0,
+ RK3528_CLKGATE_CON(37), 15, GFLAGS),
+ GATE(CLK_CEC_HDMI, "clk_cec_hdmi", "clk_32k", 0,
+ RK3528_CLKGATE_CON(40), 8, GFLAGS),
+
+ /* vpu */
+ GATE(DBCLK_GPIO1, "dbclk_gpio1", "xin24m", 0,
+ RK3528_CLKGATE_CON(26), 5, GFLAGS),
+ GATE(DBCLK_GPIO3, "dbclk_gpio3", "xin24m", 0,
+ RK3528_CLKGATE_CON(27), 1, GFLAGS),
+ GATE(CLK_SUSPEND_USB3OTG, "clk_suspend_usb3otg", "xin24m", 0,
+ RK3528_CLKGATE_CON(33), 4, GFLAGS),
+ GATE(CLK_PCIE_AUX, "clk_pcie_aux", "xin24m", 0,
+ RK3528_CLKGATE_CON(30), 2, GFLAGS),
+ GATE(TCLK_EMMC, "tclk_emmc", "xin24m", 0,
+ RK3528_CLKGATE_CON(26), 3, GFLAGS),
+ GATE(CLK_REF_USB3OTG, "clk_ref_usb3otg", "xin24m", 0,
+ RK3528_CLKGATE_CON(33), 2, GFLAGS),
+ COMPOSITE(CCLK_SRC_SDIO0, "cclk_src_sdio0", mux_gpll_cpll_xin24m_p, 0,
+ RK3528_CLKSEL_CON(72), 6, 2, MFLAGS, 0, 6, DFLAGS,
+ RK3528_CLKGATE_CON(32), 1, GFLAGS),
+
+ COMPOSITE_NODIV(PCLK_VPU_ROOT, "pclk_vpu_root", mux_100m_50m_24m_p, CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(61), 4, 2, MFLAGS,
+ RK3528_CLKGATE_CON(25), 5, GFLAGS),
+ GATE(PCLK_VPU_GRF, "pclk_vpu_grf", "pclk_vpu_root", CLK_IS_CRITICAL,
+ RK3528_CLKGATE_CON(25), 12, GFLAGS),
+ GATE(PCLK_CRU_PCIE, "pclk_cru_pcie", "pclk_vpu_root", CLK_IS_CRITICAL,
+ RK3528_CLKGATE_CON(25), 11, GFLAGS),
+ GATE(PCLK_UART6, "pclk_uart6", "pclk_vpu_root", 0,
+ RK3528_CLKGATE_CON(27), 11, GFLAGS),
+ GATE(PCLK_CAN2, "pclk_can2", "pclk_vpu_root", 0,
+ RK3528_CLKGATE_CON(32), 7, GFLAGS),
+ GATE(PCLK_SPI1, "pclk_spi1", "pclk_vpu_root", 0,
+ RK3528_CLKGATE_CON(27), 4, GFLAGS),
+ GATE(PCLK_CAN3, "pclk_can3", "pclk_vpu_root", 0,
+ RK3528_CLKGATE_CON(32), 9, GFLAGS),
+ GATE(PCLK_GPIO3, "pclk_gpio3", "pclk_vpu_root", 0,
+ RK3528_CLKGATE_CON(27), 0, GFLAGS),
+ GATE(PCLK_GPIO1, "pclk_gpio1", "pclk_vpu_root", 0,
+ RK3528_CLKGATE_CON(26), 4, GFLAGS),
+ GATE(PCLK_SARADC, "pclk_saradc", "pclk_vpu_root", 0,
+ RK3528_CLKGATE_CON(32), 11, GFLAGS),
+ GATE(PCLK_ACODEC, "pclk_acodec", "pclk_vpu_root", 0,
+ RK3528_CLKGATE_CON(26), 13, GFLAGS),
+ GATE(PCLK_UART7, "pclk_uart7", "pclk_vpu_root", 0,
+ RK3528_CLKGATE_CON(27), 13, GFLAGS),
+ GATE(PCLK_UART5, "pclk_uart5", "pclk_vpu_root", 0,
+ RK3528_CLKGATE_CON(27), 9, GFLAGS),
+ GATE(PCLK_TSADC, "pclk_tsadc", "pclk_vpu_root", 0,
+ RK3528_CLKGATE_CON(32), 14, GFLAGS),
+ GATE(PCLK_PCIE, "pclk_pcie", "pclk_vpu_root", 0,
+ RK3528_CLKGATE_CON(30), 1, GFLAGS),
+ GATE(PCLK_UART2, "pclk_uart2", "pclk_vpu_root", 0,
+ RK3528_CLKGATE_CON(27), 7, GFLAGS),
+ GATE(PCLK_VPU_IOC, "pclk_vpu_ioc", "pclk_vpu_root", CLK_IS_CRITICAL,
+ RK3528_CLKGATE_CON(26), 8, GFLAGS),
+ GATE(PCLK_PIPE_GRF, "pclk_pipe_grf", "pclk_vpu_root", CLK_IS_CRITICAL,
+ RK3528_CLKGATE_CON(30), 7, GFLAGS),
+ GATE(PCLK_I2C5, "pclk_i2c5", "pclk_vpu_root", 0,
+ RK3528_CLKGATE_CON(28), 1, GFLAGS),
+ GATE(PCLK_PCIE_PHY, "pclk_pcie_phy", "pclk_vpu_root", 0,
+ RK3528_CLKGATE_CON(30), 6, GFLAGS),
+ GATE(PCLK_I2C3, "pclk_i2c3", "pclk_vpu_root", 0,
+ RK3528_CLKGATE_CON(27), 15, GFLAGS),
+ GATE(PCLK_MAC_VPU, "pclk_gmac1", "pclk_vpu_root", CLK_IS_CRITICAL,
+ RK3528_CLKGATE_CON(28), 6, GFLAGS),
+ GATE(PCLK_I2C6, "pclk_i2c6", "pclk_vpu_root", 0,
+ RK3528_CLKGATE_CON(28), 3, GFLAGS),
+
+ COMPOSITE_NODIV(ACLK_VPU_L_ROOT, "aclk_vpu_l_root", mux_200m_100m_24m_p, CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(60), 0, 2, MFLAGS,
+ RK3528_CLKGATE_CON(25), 0, GFLAGS),
+ GATE(ACLK_EMMC, "aclk_emmc", "aclk_vpu_l_root", 0,
+ RK3528_CLKGATE_CON(26), 1, GFLAGS),
+ GATE(ACLK_MAC_VPU, "aclk_gmac1", "aclk_vpu_l_root", 0,
+ RK3528_CLKGATE_CON(28), 5, GFLAGS),
+ GATE(ACLK_PCIE, "aclk_pcie", "aclk_vpu_l_root", 0,
+ RK3528_CLKGATE_CON(30), 3, GFLAGS),
+
+ GATE(ACLK_USB3OTG, "aclk_usb3otg", "aclk_vpu_l_root", 0,
+ RK3528_CLKGATE_CON(33), 1, GFLAGS),
+
+ COMPOSITE_NODIV(HCLK_VPU_ROOT, "hclk_vpu_root", mux_200m_100m_50m_24m_p, CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(61), 2, 2, MFLAGS,
+ RK3528_CLKGATE_CON(25), 4, GFLAGS),
+ GATE(HCLK_VPU, "hclk_vpu", "hclk_vpu_root", 0,
+ RK3528_CLKGATE_CON(25), 10, GFLAGS),
+ GATE(HCLK_SFC, "hclk_sfc", "hclk_vpu_root", 0,
+ RK3528_CLKGATE_CON(25), 13, GFLAGS),
+ GATE(HCLK_EMMC, "hclk_emmc", "hclk_vpu_root", 0,
+ RK3528_CLKGATE_CON(26), 0, GFLAGS),
+ GATE(HCLK_SAI_I2S0, "hclk_sai_i2s0", "hclk_vpu_root", 0,
+ RK3528_CLKGATE_CON(26), 9, GFLAGS),
+ GATE(HCLK_SAI_I2S2, "hclk_sai_i2s2", "hclk_vpu_root", 0,
+ RK3528_CLKGATE_CON(26), 11, GFLAGS),
+
+ GATE(HCLK_PCIE_SLV, "hclk_pcie_slv", "hclk_vpu_root", 0,
+ RK3528_CLKGATE_CON(30), 4, GFLAGS),
+ GATE(HCLK_PCIE_DBI, "hclk_pcie_dbi", "hclk_vpu_root", 0,
+ RK3528_CLKGATE_CON(30), 5, GFLAGS),
+ GATE(HCLK_SDIO0, "hclk_sdio0", "hclk_vpu_root", 0,
+ RK3528_CLKGATE_CON(32), 2, GFLAGS),
+ GATE(HCLK_SDIO1, "hclk_sdio1", "hclk_vpu_root", 0,
+ RK3528_CLKGATE_CON(32), 4, GFLAGS),
+
+ COMPOSITE_NOMUX(CLK_GMAC1_VPU_25M, "clk_gmac1_25m", "ppll", 0,
+ RK3528_CLKSEL_CON(60), 2, 8, DFLAGS,
+ RK3528_CLKGATE_CON(25), 1, GFLAGS),
+ COMPOSITE_NOMUX(CLK_PPLL_125M_MATRIX, "clk_ppll_125m_src", "ppll", 0,
+ RK3528_CLKSEL_CON(60), 10, 5, DFLAGS,
+ RK3528_CLKGATE_CON(25), 2, GFLAGS),
+
+ COMPOSITE(CLK_CAN3, "clk_can3", mux_gpll_cpll_p, 0,
+ RK3528_CLKSEL_CON(73), 13, 1, MFLAGS, 7, 6, DFLAGS,
+ RK3528_CLKGATE_CON(32), 10, GFLAGS),
+ COMPOSITE_NODIV(CLK_I2C6, "clk_i2c6", mux_200m_100m_50m_24m_p, 0,
+ RK3528_CLKSEL_CON(64), 0, 2, MFLAGS,
+ RK3528_CLKGATE_CON(28), 4, GFLAGS),
+
+ COMPOSITE(SCLK_SFC, "sclk_sfc", mux_gpll_cpll_xin24m_p, 0,
+ RK3528_CLKSEL_CON(61), 12, 2, MFLAGS, 6, 6, DFLAGS,
+ RK3528_CLKGATE_CON(25), 14, GFLAGS),
+ COMPOSITE(CCLK_SRC_EMMC, "cclk_src_emmc", mux_gpll_cpll_xin24m_p, 0,
+ RK3528_CLKSEL_CON(62), 6, 2, MFLAGS, 0, 6, DFLAGS,
+ RK3528_CLKGATE_CON(25), 15, GFLAGS),
+
+ COMPOSITE_NODIV(ACLK_VPU_ROOT, "aclk_vpu_root",
+ mux_300m_200m_100m_24m_p, CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(61), 0, 2, MFLAGS,
+ RK3528_CLKGATE_CON(25), 3, GFLAGS),
+ GATE(ACLK_VPU, "aclk_vpu", "aclk_vpu_root", 0,
+ RK3528_CLKGATE_CON(25), 9, GFLAGS),
+
+ COMPOSITE_NODIV(CLK_SPI1, "clk_spi1", mux_200m_100m_50m_24m_p, 0,
+ RK3528_CLKSEL_CON(63), 10, 2, MFLAGS,
+ RK3528_CLKGATE_CON(27), 5, GFLAGS),
+ COMPOSITE(CCLK_SRC_SDIO1, "cclk_src_sdio1", mux_gpll_cpll_xin24m_p, 0,
+ RK3528_CLKSEL_CON(72), 14, 2, MFLAGS, 8, 6, DFLAGS,
+ RK3528_CLKGATE_CON(32), 3, GFLAGS),
+ COMPOSITE(CLK_CAN2, "clk_can2", mux_gpll_cpll_p, 0,
+ RK3528_CLKSEL_CON(73), 6, 1, MFLAGS, 0, 6, DFLAGS,
+ RK3528_CLKGATE_CON(32), 8, GFLAGS),
+ COMPOSITE_NOMUX(CLK_TSADC, "clk_tsadc", "xin24m", 0,
+ RK3528_CLKSEL_CON(74), 3, 5, DFLAGS,
+ RK3528_CLKGATE_CON(32), 15, GFLAGS),
+ COMPOSITE_NOMUX(CLK_SARADC, "clk_saradc", "xin24m", 0,
+ RK3528_CLKSEL_CON(74), 0, 3, DFLAGS,
+ RK3528_CLKGATE_CON(32), 12, GFLAGS),
+ COMPOSITE_NOMUX(CLK_TSADC_TSEN, "clk_tsadc_tsen", "xin24m", 0,
+ RK3528_CLKSEL_CON(74), 8, 5, DFLAGS,
+ RK3528_CLKGATE_CON(33), 0, GFLAGS),
+ COMPOSITE_NODIV(BCLK_EMMC, "bclk_emmc", mux_200m_100m_50m_24m_p, 0,
+ RK3528_CLKSEL_CON(62), 8, 2, MFLAGS,
+ RK3528_CLKGATE_CON(26), 2, GFLAGS),
+ COMPOSITE_NOMUX(MCLK_ACODEC_TX, "mclk_acodec_tx", "mclk_i2s2_2ch_sai_src", 0,
+ RK3528_CLKSEL_CON(63), 0, 8, DFLAGS,
+ RK3528_CLKGATE_CON(26), 14, GFLAGS),
+ COMPOSITE_NODIV(CLK_I2C3, "clk_i2c3", mux_200m_100m_50m_24m_p, 0,
+ RK3528_CLKSEL_CON(63), 12, 2, MFLAGS,
+ RK3528_CLKGATE_CON(28), 0, GFLAGS),
+ COMPOSITE_NODIV(CLK_I2C5, "clk_i2c5", mux_200m_100m_50m_24m_p, 0,
+ RK3528_CLKSEL_CON(63), 14, 2, MFLAGS,
+ RK3528_CLKGATE_CON(28), 2, GFLAGS),
+ COMPOSITE_NODIV(MCLK_SAI_I2S0, "mclk_sai_i2s0", mclk_sai_i2s0_p, CLK_SET_RATE_PARENT,
+ RK3528_CLKSEL_CON(62), 10, 1, MFLAGS,
+ RK3528_CLKGATE_CON(26), 10, GFLAGS),
+ GATE(MCLK_SAI_I2S2, "mclk_sai_i2s2", "mclk_i2s2_2ch_sai_src", 0,
+ RK3528_CLKGATE_CON(26), 12, GFLAGS),
+
+ /* pcie */
+ COMPOSITE_NOMUX(CLK_PPLL_100M_MATRIX, "clk_ppll_100m_src", "ppll", CLK_IS_CRITICAL,
+ RK3528_PCIE_CLKSEL_CON(1), 2, 5, DFLAGS,
+ RK3528_PCIE_CLKGATE_CON(0), 1, GFLAGS),
+ COMPOSITE_NOMUX(CLK_PPLL_50M_MATRIX, "clk_ppll_50m_src", "ppll", CLK_IS_CRITICAL,
+ RK3528_PCIE_CLKSEL_CON(1), 7, 5, DFLAGS,
+ RK3528_PCIE_CLKGATE_CON(0), 2, GFLAGS),
+ MUX(CLK_REF_PCIE_INNER_PHY, "clk_ref_pcie_inner_phy", clk_ref_pcie_inner_phy_p, 0,
+ RK3528_PCIE_CLKSEL_CON(1), 13, 1, MFLAGS),
+ FACTOR(CLK_REF_PCIE_100M_PHY, "clk_ref_pcie_100m_phy", "clk_ppll_100m_src",
+ 0, 1, 1),
+
+ /* gmac */
+ DIV(CLK_GMAC0_SRC, "clk_gmac0_src", "gmac0", 0,
+ RK3528_CLKSEL_CON(84), 3, 6, DFLAGS),
+ GATE(CLK_GMAC0_TX, "clk_gmac0_tx", "clk_gmac0_src", 0,
+ RK3528_CLKGATE_CON(41), 13, GFLAGS),
+ GATE(CLK_GMAC0_RX, "clk_gmac0_rx", "clk_gmac0_src", 0,
+ RK3528_CLKGATE_CON(41), 14, GFLAGS),
+ GATE(CLK_GMAC0_RMII_50M, "clk_gmac0_rmii_50m", "gmac0", 0,
+ RK3528_CLKGATE_CON(41), 12, GFLAGS),
+
+ FACTOR(CLK_GMAC1_RMII_VPU, "clk_gmac1_50m", "clk_ppll_50m_src",
+ 0, 1, 1),
+ FACTOR(CLK_GMAC1_SRC_VPU, "clk_gmac1_125m", "clk_ppll_125m_src",
+ 0, 1, 1),
+};
+
+static struct rockchip_clk_branch rk3528_vo_clk_branches[] __initdata = {
+ MMC_GRF(SCLK_SDMMC_DRV, "sdmmc_drv", "cclk_src_sdmmc0",
+ RK3528_SDMMC_CON(0), 1, grf_type_vo),
+ MMC_GRF(SCLK_SDMMC_SAMPLE, "sdmmc_sample", "cclk_src_sdmmc0",
+ RK3528_SDMMC_CON(1), 1, grf_type_vo),
+};
+
+static struct rockchip_clk_branch rk3528_vpu_clk_branches[] __initdata = {
+ MMC_GRF(SCLK_SDIO0_DRV, "sdio0_drv", "cclk_src_sdio0",
+ RK3528_SDIO0_CON(0), 1, grf_type_vpu),
+ MMC_GRF(SCLK_SDIO0_SAMPLE, "sdio0_sample", "cclk_src_sdio0",
+ RK3528_SDIO0_CON(1), 1, grf_type_vpu),
+ MMC_GRF(SCLK_SDIO1_DRV, "sdio1_drv", "cclk_src_sdio1",
+ RK3528_SDIO1_CON(0), 1, grf_type_vpu),
+ MMC_GRF(SCLK_SDIO1_SAMPLE, "sdio1_sample", "cclk_src_sdio1",
+ RK3528_SDIO1_CON(1), 1, grf_type_vpu),
+};
+
+static int __init clk_rk3528_probe(struct platform_device *pdev)
+{
+ unsigned long nr_vpu_branches = ARRAY_SIZE(rk3528_vpu_clk_branches);
+ unsigned long nr_vo_branches = ARRAY_SIZE(rk3528_vo_clk_branches);
+ unsigned long nr_branches = ARRAY_SIZE(rk3528_clk_branches);
+ unsigned long nr_clks, nr_vo_clks, nr_vpu_clks;
+ struct rockchip_aux_grf *vo_grf_e, *vpu_grf_e;
+ struct regmap *vo_grf, *vpu_grf;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct rockchip_clk_provider *ctx;
+ void __iomem *reg_base;
+
+ reg_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(reg_base))
+ return dev_err_probe(dev, PTR_ERR(reg_base),
+ "could not map cru region");
+
+ nr_clks = rockchip_clk_find_max_clk_id(rk3528_clk_branches,
+ nr_branches) + 1;
+
+ vo_grf = syscon_regmap_lookup_by_compatible("rockchip,rk3528-vo-grf");
+ if (!IS_ERR(vo_grf)) {
+ nr_vo_clks = rockchip_clk_find_max_clk_id(rk3528_vo_clk_branches,
+ nr_vo_branches) + 1;
+ nr_clks = max(nr_clks, nr_vo_clks);
+ } else if (PTR_ERR(vo_grf) != -ENODEV) {
+ return dev_err_probe(dev, PTR_ERR(vo_grf),
+ "failed to look up VO GRF\n");
+ }
+
+ vpu_grf = syscon_regmap_lookup_by_compatible("rockchip,rk3528-vpu-grf");
+ if (!IS_ERR(vpu_grf)) {
+ nr_vpu_clks = rockchip_clk_find_max_clk_id(rk3528_vpu_clk_branches,
+ nr_vpu_branches) + 1;
+ nr_clks = max(nr_clks, nr_vpu_clks);
+ } else if (PTR_ERR(vpu_grf) != -ENODEV) {
+ return dev_err_probe(dev, PTR_ERR(vpu_grf),
+ "failed to look up VPU GRF\n");
+ }
+
+ ctx = rockchip_clk_init(np, reg_base, nr_clks);
+ if (IS_ERR(ctx))
+ return dev_err_probe(dev, PTR_ERR(ctx),
+ "rockchip clk init failed");
+
+ rockchip_clk_register_plls(ctx, rk3528_pll_clks,
+ ARRAY_SIZE(rk3528_pll_clks),
+ RK3528_GRF_SOC_STATUS0);
+ rockchip_clk_register_armclk(ctx, ARMCLK, "armclk",
+ mux_armclk, ARRAY_SIZE(mux_armclk),
+ &rk3528_cpuclk_data, rk3528_cpuclk_rates,
+ ARRAY_SIZE(rk3528_cpuclk_rates));
+ rockchip_clk_register_branches(ctx, rk3528_clk_branches, nr_branches);
+
+ if (!IS_ERR(vo_grf)) {
+ vo_grf_e = devm_kzalloc(dev, sizeof(*vo_grf_e), GFP_KERNEL);
+ if (!vo_grf_e)
+ return -ENOMEM;
+
+ vo_grf_e->grf = vo_grf;
+ vo_grf_e->type = grf_type_vo;
+ hash_add(ctx->aux_grf_table, &vo_grf_e->node, grf_type_vo);
+
+ rockchip_clk_register_branches(ctx, rk3528_vo_clk_branches,
+ nr_vo_branches);
+ }
+
+ if (!IS_ERR(vpu_grf)) {
+ vpu_grf_e = devm_kzalloc(dev, sizeof(*vpu_grf_e), GFP_KERNEL);
+ if (!vpu_grf_e)
+ return -ENOMEM;
+
+ vpu_grf_e->grf = vpu_grf;
+ vpu_grf_e->type = grf_type_vpu;
+ hash_add(ctx->aux_grf_table, &vpu_grf_e->node, grf_type_vpu);
+
+ rockchip_clk_register_branches(ctx, rk3528_vpu_clk_branches,
+ nr_vpu_branches);
+ }
+
+ rk3528_rst_init(np, reg_base);
+
+ rockchip_register_restart_notifier(ctx, RK3528_GLB_SRST_FST, NULL);
+
+ rockchip_clk_of_add_provider(np, ctx);
+
+ return 0;
+}
+
+static const struct of_device_id clk_rk3528_match_table[] = {
+ { .compatible = "rockchip,rk3528-cru" },
+ { /* end */ }
+};
+
+static struct platform_driver clk_rk3528_driver = {
+ .driver = {
+ .name = "clk-rk3528",
+ .of_match_table = clk_rk3528_match_table,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver_probe(clk_rk3528_driver, clk_rk3528_probe);
diff --git a/drivers/clk/rockchip/clk-rk3562.c b/drivers/clk/rockchip/clk-rk3562.c
new file mode 100644
index 000000000000..b8858e5d5530
--- /dev/null
+++ b/drivers/clk/rockchip/clk-rk3562.c
@@ -0,0 +1,1101 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2022 Rockchip Electronics Co., Ltd.
+ * Author: Elaine Zhang <zhangqing@rock-chips.com>
+ * Author: Finley Xiao <finley.xiao@rock-chips.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/syscore_ops.h>
+#include <dt-bindings/clock/rockchip,rk3562-cru.h>
+#include "clk.h"
+
+#define RK3562_GRF_SOC_STATUS0 0x430
+#define ROCKCHIP_PLL_ALLOW_POWER_DOWN BIT(2)
+
+enum rk3562_plls {
+ apll, gpll, vpll, hpll, cpll, dpll,
+};
+
+static struct rockchip_pll_rate_table rk3562_pll_rates[] = {
+ /* _mhz, _refdiv, _fbdiv, _postdiv1, _postdiv2, _dsmpd, _frac */
+ RK3036_PLL_RATE(2208000000, 1, 92, 1, 1, 1, 0),
+ RK3036_PLL_RATE(2184000000, 1, 91, 1, 1, 1, 0),
+ RK3036_PLL_RATE(2160000000, 1, 90, 1, 1, 1, 0),
+ RK3036_PLL_RATE(2088000000, 1, 87, 1, 1, 1, 0),
+ RK3036_PLL_RATE(2064000000, 1, 86, 1, 1, 1, 0),
+ RK3036_PLL_RATE(2040000000, 1, 85, 1, 1, 1, 0),
+ RK3036_PLL_RATE(2016000000, 1, 84, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1992000000, 1, 83, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1920000000, 1, 80, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1896000000, 1, 79, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1800000000, 1, 75, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1704000000, 1, 71, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1608000000, 1, 67, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1600000000, 3, 200, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1584000000, 1, 132, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1560000000, 1, 130, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1536000000, 1, 128, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1512000000, 1, 126, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1488000000, 1, 124, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1464000000, 1, 122, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1440000000, 1, 120, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1416000000, 1, 118, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1400000000, 3, 350, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1392000000, 1, 116, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1368000000, 1, 114, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1344000000, 1, 112, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1320000000, 1, 110, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1296000000, 1, 108, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1272000000, 1, 106, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1248000000, 1, 104, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1200000000, 1, 100, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1188000000, 1, 99, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1104000000, 1, 92, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1100000000, 3, 275, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1008000000, 1, 84, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1000000000, 3, 250, 2, 1, 1, 0),
+ RK3036_PLL_RATE(912000000, 1, 76, 2, 1, 1, 0),
+ RK3036_PLL_RATE(816000000, 1, 68, 2, 1, 1, 0),
+ RK3036_PLL_RATE(800000000, 3, 200, 2, 1, 1, 0),
+ RK3036_PLL_RATE(700000000, 3, 350, 4, 1, 1, 0),
+ RK3036_PLL_RATE(696000000, 1, 116, 4, 1, 1, 0),
+ RK3036_PLL_RATE(600000000, 1, 100, 4, 1, 1, 0),
+ RK3036_PLL_RATE(594000000, 1, 99, 4, 1, 1, 0),
+ RK3036_PLL_RATE(500000000, 1, 125, 6, 1, 1, 0),
+ RK3036_PLL_RATE(408000000, 1, 68, 2, 2, 1, 0),
+ RK3036_PLL_RATE(312000000, 1, 78, 6, 1, 1, 0),
+ RK3036_PLL_RATE(216000000, 1, 72, 4, 2, 1, 0),
+ RK3036_PLL_RATE(200000000, 1, 100, 3, 4, 1, 0),
+ RK3036_PLL_RATE(148500000, 1, 99, 4, 4, 1, 0),
+ RK3036_PLL_RATE(100000000, 1, 150, 6, 6, 1, 0),
+ RK3036_PLL_RATE(96000000, 1, 96, 6, 4, 1, 0),
+ RK3036_PLL_RATE(74250000, 2, 99, 4, 4, 1, 0),
+ { /* sentinel */ },
+};
+
+PNAME(mux_pll_p) = { "xin24m" };
+PNAME(gpll_cpll_p) = { "gpll", "cpll" };
+PNAME(gpll_cpll_hpll_p) = { "gpll", "cpll", "hpll" };
+PNAME(gpll_cpll_pvtpll_dmyapll_p) = { "gpll", "cpll", "log_pvtpll", "dummy_apll" };
+PNAME(gpll_cpll_hpll_xin24m_p) = { "gpll", "cpll", "hpll", "xin24m" };
+PNAME(gpll_cpll_vpll_dmyhpll_p) = { "gpll", "cpll", "vpll", "dummy_hpll" };
+PNAME(gpll_dmyhpll_vpll_apll_p) = { "gpll", "dummy_hpll", "vpll", "apll" };
+PNAME(gpll_cpll_xin24m_p) = { "gpll", "cpll", "xin24m" };
+PNAME(gpll_cpll_xin24m_dmyapll_p) = { "gpll", "cpll", "xin24m", "dummy_apll" };
+PNAME(gpll_cpll_xin24m_dmyhpll_p) = { "gpll", "cpll", "xin24m", "dummy_hpll" };
+PNAME(vpll_dmyhpll_gpll_cpll_p) = { "vpll", "dummy_hpll", "gpll", "cpll" };
+PNAME(mux_xin24m_32k_p) = { "xin24m", "clk_rtc_32k" };
+PNAME(mux_50m_xin24m_p) = { "clk_matrix_50m_src", "xin24m" };
+PNAME(mux_100m_50m_xin24m_p) = { "clk_matrix_100m_src", "clk_matrix_50m_src", "xin24m" };
+PNAME(mux_125m_xin24m_p) = { "clk_matrix_125m_src", "xin24m" };
+PNAME(mux_200m_xin24m_32k_p) = { "clk_200m_pmu", "xin24m", "clk_rtc_32k" };
+PNAME(mux_200m_100m_p) = { "clk_matrix_200m_src", "clk_matrix_100m_src" };
+PNAME(mux_200m_100m_50m_xin24m_p) = { "clk_matrix_200m_src", "clk_matrix_100m_src", "clk_matrix_50m_src", "xin24m" };
+PNAME(clk_sai0_p) = { "clk_sai0_src", "clk_sai0_frac", "xin_osc0_half", "mclk_sai0_from_io" };
+PNAME(mclk_sai0_out2io_p) = { "mclk_sai0", "xin_osc0_half" };
+PNAME(clk_sai1_p) = { "clk_sai1_src", "clk_sai1_frac", "xin_osc0_half", "mclk_sai1_from_io" };
+PNAME(mclk_sai1_out2io_p) = { "mclk_sai1", "xin_osc0_half" };
+PNAME(clk_sai2_p) = { "clk_sai2_src", "clk_sai2_frac", "xin_osc0_half", "mclk_sai2_from_io" };
+PNAME(mclk_sai2_out2io_p) = { "mclk_sai2", "xin_osc0_half" };
+PNAME(clk_spdif_p) = { "clk_spdif_src", "clk_spdif_frac", "xin_osc0_half" };
+PNAME(clk_uart1_p) = { "clk_uart1_src", "clk_uart1_frac", "xin24m" };
+PNAME(clk_uart2_p) = { "clk_uart2_src", "clk_uart2_frac", "xin24m" };
+PNAME(clk_uart3_p) = { "clk_uart3_src", "clk_uart3_frac", "xin24m" };
+PNAME(clk_uart4_p) = { "clk_uart4_src", "clk_uart4_frac", "xin24m" };
+PNAME(clk_uart5_p) = { "clk_uart5_src", "clk_uart5_frac", "xin24m" };
+PNAME(clk_uart6_p) = { "clk_uart6_src", "clk_uart6_frac", "xin24m" };
+PNAME(clk_uart7_p) = { "clk_uart7_src", "clk_uart7_frac", "xin24m" };
+PNAME(clk_uart8_p) = { "clk_uart8_src", "clk_uart8_frac", "xin24m" };
+PNAME(clk_uart9_p) = { "clk_uart9_src", "clk_uart9_frac", "xin24m" };
+PNAME(clk_rtc32k_pmu_p) = { "clk_rtc32k_frac", "xin32k", "clk_32k_pvtm" };
+PNAME(clk_pmu1_uart0_p) = { "clk_pmu1_uart0_src", "clk_pmu1_uart0_frac", "xin24m" };
+PNAME(clk_pipephy_ref_p) = { "clk_pipephy_div", "clk_pipephy_xin24m" };
+PNAME(clk_usbphy_ref_p) = { "clk_usb2phy_xin24m", "clk_24m_sscsrc" };
+PNAME(clk_mipidsi_ref_p) = { "clk_mipidsiphy_xin24m", "clk_24m_sscsrc" };
+
+static struct rockchip_pll_clock rk3562_pll_clks[] __initdata = {
+ [apll] = PLL(pll_rk3328, PLL_APLL, "apll", mux_pll_p,
+ 0, RK3562_PLL_CON(0),
+ RK3562_MODE_CON, 0, 0,
+ ROCKCHIP_PLL_ALLOW_POWER_DOWN, rk3562_pll_rates),
+ [gpll] = PLL(pll_rk3328, PLL_GPLL, "gpll", mux_pll_p,
+ 0, RK3562_PLL_CON(24),
+ RK3562_MODE_CON, 2, 3, 0, rk3562_pll_rates),
+ [vpll] = PLL(pll_rk3328, PLL_VPLL, "vpll", mux_pll_p,
+ 0, RK3562_PLL_CON(32),
+ RK3562_MODE_CON, 6, 4,
+ ROCKCHIP_PLL_ALLOW_POWER_DOWN, rk3562_pll_rates),
+ [hpll] = PLL(pll_rk3328, PLL_HPLL, "hpll", mux_pll_p,
+ 0, RK3562_PLL_CON(40),
+ RK3562_MODE_CON, 8, 5,
+ ROCKCHIP_PLL_ALLOW_POWER_DOWN, rk3562_pll_rates),
+ [cpll] = PLL(pll_rk3328, PLL_CPLL, "cpll", mux_pll_p,
+ 0, RK3562_PMU1_PLL_CON(0),
+ RK3562_PMU1_MODE_CON, 0, 2, 0, rk3562_pll_rates),
+ [dpll] = PLL(pll_rk3328, PLL_DPLL, "dpll", mux_pll_p,
+ CLK_IS_CRITICAL, RK3562_SUBDDR_PLL_CON(0),
+ RK3562_SUBDDR_MODE_CON, 0, 1, 0, NULL),
+};
+
+#define MFLAGS CLK_MUX_HIWORD_MASK
+#define DFLAGS CLK_DIVIDER_HIWORD_MASK
+#define GFLAGS (CLK_GATE_HIWORD_MASK | CLK_GATE_SET_TO_DISABLE)
+
+static struct rockchip_clk_branch rk3562_clk_sai0_fracmux __initdata =
+ MUX(CLK_SAI0, "clk_sai0", clk_sai0_p, CLK_SET_RATE_PARENT,
+ RK3562_PERI_CLKSEL_CON(3), 6, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3562_clk_sai1_fracmux __initdata =
+ MUX(CLK_SAI1, "clk_sai1", clk_sai1_p, CLK_SET_RATE_PARENT,
+ RK3562_PERI_CLKSEL_CON(5), 6, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3562_clk_sai2_fracmux __initdata =
+ MUX(CLK_SAI2, "clk_sai2", clk_sai2_p, CLK_SET_RATE_PARENT,
+ RK3562_PERI_CLKSEL_CON(8), 6, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3562_clk_spdif_fracmux __initdata =
+ MUX(CLK_SPDIF, "clk_spdif", clk_spdif_p, CLK_SET_RATE_PARENT,
+ RK3562_PERI_CLKSEL_CON(15), 6, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3562_clk_uart1_fracmux __initdata =
+ MUX(CLK_UART1, "clk_uart1", clk_uart1_p, CLK_SET_RATE_PARENT,
+ RK3562_PERI_CLKSEL_CON(21), 14, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3562_clk_uart2_fracmux __initdata =
+ MUX(CLK_UART2, "clk_uart2", clk_uart2_p, CLK_SET_RATE_PARENT,
+ RK3562_PERI_CLKSEL_CON(23), 14, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3562_clk_uart3_fracmux __initdata =
+ MUX(CLK_UART3, "clk_uart3", clk_uart3_p, CLK_SET_RATE_PARENT,
+ RK3562_PERI_CLKSEL_CON(25), 14, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3562_clk_uart4_fracmux __initdata =
+ MUX(CLK_UART4, "clk_uart4", clk_uart4_p, CLK_SET_RATE_PARENT,
+ RK3562_PERI_CLKSEL_CON(27), 14, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3562_clk_uart5_fracmux __initdata =
+ MUX(CLK_UART5, "clk_uart5", clk_uart5_p, CLK_SET_RATE_PARENT,
+ RK3562_PERI_CLKSEL_CON(29), 14, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3562_clk_uart6_fracmux __initdata =
+ MUX(CLK_UART6, "clk_uart6", clk_uart6_p, CLK_SET_RATE_PARENT,
+ RK3562_PERI_CLKSEL_CON(31), 14, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3562_clk_uart7_fracmux __initdata =
+ MUX(CLK_UART7, "clk_uart7", clk_uart7_p, CLK_SET_RATE_PARENT,
+ RK3562_PERI_CLKSEL_CON(33), 14, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3562_clk_uart8_fracmux __initdata =
+ MUX(CLK_UART8, "clk_uart8", clk_uart8_p, CLK_SET_RATE_PARENT,
+ RK3562_PERI_CLKSEL_CON(35), 14, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3562_clk_uart9_fracmux __initdata =
+ MUX(CLK_UART9, "clk_uart9", clk_uart9_p, CLK_SET_RATE_PARENT,
+ RK3562_PERI_CLKSEL_CON(37), 14, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3562_rtc32k_pmu_fracmux __initdata =
+ MUX(CLK_RTC_32K, "clk_rtc_32k", clk_rtc32k_pmu_p, CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ RK3562_PMU0_CLKSEL_CON(1), 0, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3562_clk_pmu1_uart0_fracmux __initdata =
+ MUX(CLK_PMU1_UART0, "clk_pmu1_uart0", clk_pmu1_uart0_p, CLK_SET_RATE_PARENT,
+ RK3562_PMU1_CLKSEL_CON(2), 6, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3562_clk_branches[] __initdata = {
+ /*
+ * CRU Clock-Architecture
+ */
+ /* PD_TOP */
+ COMPOSITE(CLK_MATRIX_50M_SRC, "clk_matrix_50m_src", gpll_cpll_p, 0,
+ RK3562_CLKSEL_CON(0), 7, 1, MFLAGS, 0, 5, DFLAGS,
+ RK3562_CLKGATE_CON(0), 0, GFLAGS),
+ COMPOSITE(CLK_MATRIX_100M_SRC, "clk_matrix_100m_src", gpll_cpll_p, CLK_IS_CRITICAL,
+ RK3562_CLKSEL_CON(0), 15, 1, MFLAGS, 8, 4, DFLAGS,
+ RK3562_CLKGATE_CON(0), 1, GFLAGS),
+ COMPOSITE(CLK_MATRIX_125M_SRC, "clk_matrix_125m_src", gpll_cpll_p, 0,
+ RK3562_CLKSEL_CON(1), 7, 1, MFLAGS, 0, 4, DFLAGS,
+ RK3562_CLKGATE_CON(0), 2, GFLAGS),
+ COMPOSITE(CLK_MATRIX_200M_SRC, "clk_matrix_200m_src", gpll_cpll_p, CLK_IS_CRITICAL,
+ RK3562_CLKSEL_CON(2), 7, 1, MFLAGS, 0, 4, DFLAGS,
+ RK3562_CLKGATE_CON(0), 4, GFLAGS),
+ COMPOSITE(CLK_MATRIX_300M_SRC, "clk_matrix_300m_src", gpll_cpll_p, CLK_IS_CRITICAL,
+ RK3562_CLKSEL_CON(3), 7, 1, MFLAGS, 0, 4, DFLAGS,
+ RK3562_CLKGATE_CON(0), 6, GFLAGS),
+ COMPOSITE(ACLK_TOP, "aclk_top", gpll_cpll_p, CLK_IS_CRITICAL,
+ RK3562_CLKSEL_CON(5), 7, 1, MFLAGS, 0, 4, DFLAGS,
+ RK3562_CLKGATE_CON(1), 0, GFLAGS),
+ COMPOSITE(ACLK_TOP_VIO, "aclk_top_vio", gpll_cpll_p, 0,
+ RK3562_CLKSEL_CON(5), 15, 1, MFLAGS, 8, 4, DFLAGS,
+ RK3562_CLKGATE_CON(1), 1, GFLAGS),
+ COMPOSITE(CLK_24M_SSCSRC, "clk_24m_sscsrc", vpll_dmyhpll_gpll_cpll_p, 0,
+ RK3562_CLKSEL_CON(6), 6, 2, MFLAGS, 0, 6, DFLAGS,
+ RK3562_CLKGATE_CON(1), 9, GFLAGS),
+ COMPOSITE(CLK_CAM0_OUT2IO, "clk_cam0_out2io", gpll_cpll_xin24m_dmyapll_p, 0,
+ RK3562_CLKSEL_CON(8), 6, 2, MFLAGS, 0, 6, DFLAGS,
+ RK3562_CLKGATE_CON(1), 12, GFLAGS),
+ COMPOSITE(CLK_CAM1_OUT2IO, "clk_cam1_out2io", gpll_cpll_xin24m_dmyapll_p, 0,
+ RK3562_CLKSEL_CON(8), 14, 2, MFLAGS, 8, 6, DFLAGS,
+ RK3562_CLKGATE_CON(1), 13, GFLAGS),
+ COMPOSITE(CLK_CAM2_OUT2IO, "clk_cam2_out2io", gpll_cpll_xin24m_dmyapll_p, 0,
+ RK3562_CLKSEL_CON(9), 6, 2, MFLAGS, 0, 6, DFLAGS,
+ RK3562_CLKGATE_CON(1), 14, GFLAGS),
+ COMPOSITE(CLK_CAM3_OUT2IO, "clk_cam3_out2io", gpll_cpll_xin24m_dmyapll_p, 0,
+ RK3562_CLKSEL_CON(9), 14, 2, MFLAGS, 8, 6, DFLAGS,
+ RK3562_CLKGATE_CON(1), 15, GFLAGS),
+ FACTOR(0, "xin_osc0_half", "xin24m", 0, 1, 2),
+
+ /* PD_BUS */
+ COMPOSITE(ACLK_BUS, "aclk_bus", gpll_cpll_p, CLK_IS_CRITICAL,
+ RK3562_CLKSEL_CON(40), 7, 1, MFLAGS, 0, 5, DFLAGS,
+ RK3562_CLKGATE_CON(18), 0, GFLAGS),
+ COMPOSITE(HCLK_BUS, "hclk_bus", gpll_cpll_p, CLK_IS_CRITICAL,
+ RK3562_CLKSEL_CON(40), 15, 1, MFLAGS, 8, 6, DFLAGS,
+ RK3562_CLKGATE_CON(18), 1, GFLAGS),
+ COMPOSITE(PCLK_BUS, "pclk_bus", gpll_cpll_p, CLK_IS_CRITICAL,
+ RK3562_CLKSEL_CON(41), 7, 1, MFLAGS, 0, 5, DFLAGS,
+ RK3562_CLKGATE_CON(18), 2, GFLAGS),
+ GATE(PCLK_I2C1, "pclk_i2c1", "pclk_bus", 0,
+ RK3562_CLKGATE_CON(19), 0, GFLAGS),
+ GATE(PCLK_I2C2, "pclk_i2c2", "pclk_bus", 0,
+ RK3562_CLKGATE_CON(19), 1, GFLAGS),
+ GATE(PCLK_I2C3, "pclk_i2c3", "pclk_bus", 0,
+ RK3562_CLKGATE_CON(19), 2, GFLAGS),
+ GATE(PCLK_I2C4, "pclk_i2c4", "pclk_bus", 0,
+ RK3562_CLKGATE_CON(19), 3, GFLAGS),
+ GATE(PCLK_I2C5, "pclk_i2c5", "pclk_bus", 0,
+ RK3562_CLKGATE_CON(19), 4, GFLAGS),
+ COMPOSITE_NODIV(CLK_I2C, "clk_i2c", mux_200m_100m_50m_xin24m_p, 0,
+ RK3562_CLKSEL_CON(41), 8, 2, MFLAGS,
+ RK3562_CLKGATE_CON(19), 5, GFLAGS),
+ GATE(CLK_I2C1, "clk_i2c1", "clk_i2c", 0,
+ RK3562_CLKGATE_CON(19), 6, GFLAGS),
+ GATE(CLK_I2C2, "clk_i2c2", "clk_i2c", 0,
+ RK3562_CLKGATE_CON(19), 7, GFLAGS),
+ GATE(CLK_I2C3, "clk_i2c3", "clk_i2c", 0,
+ RK3562_CLKGATE_CON(19), 8, GFLAGS),
+ GATE(CLK_I2C4, "clk_i2c4", "clk_i2c", 0,
+ RK3562_CLKGATE_CON(19), 9, GFLAGS),
+ GATE(CLK_I2C5, "clk_i2c5", "clk_i2c", 0,
+ RK3562_CLKGATE_CON(19), 10, GFLAGS),
+ COMPOSITE_NODIV(DCLK_BUS_GPIO, "dclk_bus_gpio", mux_xin24m_32k_p, 0,
+ RK3562_CLKSEL_CON(41), 15, 1, MFLAGS,
+ RK3562_CLKGATE_CON(20), 4, GFLAGS),
+ GATE(DCLK_BUS_GPIO3, "dclk_bus_gpio3", "dclk_bus_gpio", 0,
+ RK3562_CLKGATE_CON(20), 5, GFLAGS),
+ GATE(DCLK_BUS_GPIO4, "dclk_bus_gpio4", "dclk_bus_gpio", 0,
+ RK3562_CLKGATE_CON(20), 6, GFLAGS),
+ GATE(PCLK_TIMER, "pclk_timer", "pclk_bus", 0,
+ RK3562_CLKGATE_CON(21), 0, GFLAGS),
+ GATE(CLK_TIMER0, "clk_timer0", "xin24m", 0,
+ RK3562_CLKGATE_CON(21), 1, GFLAGS),
+ GATE(CLK_TIMER1, "clk_timer1", "xin24m", 0,
+ RK3562_CLKGATE_CON(21), 2, GFLAGS),
+ GATE(CLK_TIMER2, "clk_timer2", "xin24m", 0,
+ RK3562_CLKGATE_CON(21), 3, GFLAGS),
+ GATE(CLK_TIMER3, "clk_timer3", "xin24m", 0,
+ RK3562_CLKGATE_CON(21), 4, GFLAGS),
+ GATE(CLK_TIMER4, "clk_timer4", "xin24m", 0,
+ RK3562_CLKGATE_CON(21), 5, GFLAGS),
+ GATE(CLK_TIMER5, "clk_timer5", "xin24m", 0,
+ RK3562_CLKGATE_CON(21), 6, GFLAGS),
+ GATE(PCLK_STIMER, "pclk_stimer", "pclk_bus", CLK_IGNORE_UNUSED,
+ RK3562_CLKGATE_CON(21), 7, GFLAGS),
+ GATE(CLK_STIMER0, "clk_stimer0", "xin24m", CLK_IGNORE_UNUSED,
+ RK3562_CLKGATE_CON(21), 8, GFLAGS),
+ GATE(CLK_STIMER1, "clk_stimer1", "xin24m", CLK_IGNORE_UNUSED,
+ RK3562_CLKGATE_CON(21), 9, GFLAGS),
+ GATE(PCLK_WDTNS, "pclk_wdtns", "pclk_bus", 0,
+ RK3562_CLKGATE_CON(22), 0, GFLAGS),
+ GATE(CLK_WDTNS, "clk_wdtns", "xin24m", 0,
+ RK3562_CLKGATE_CON(22), 1, GFLAGS),
+ GATE(PCLK_GRF, "pclk_grf", "pclk_bus", CLK_IGNORE_UNUSED,
+ RK3562_CLKGATE_CON(22), 2, GFLAGS),
+ GATE(PCLK_SGRF, "pclk_sgrf", "pclk_bus", CLK_IGNORE_UNUSED,
+ RK3562_CLKGATE_CON(22), 3, GFLAGS),
+ GATE(PCLK_MAILBOX, "pclk_mailbox", "pclk_bus", 0,
+ RK3562_CLKGATE_CON(22), 4, GFLAGS),
+ GATE(PCLK_INTC, "pclk_intc", "pclk_bus", 0,
+ RK3562_CLKGATE_CON(22), 5, GFLAGS),
+ GATE(ACLK_BUS_GIC400, "aclk_bus_gic400", "aclk_bus", CLK_IGNORE_UNUSED,
+ RK3562_CLKGATE_CON(22), 6, GFLAGS),
+ GATE(ACLK_BUS_SPINLOCK, "aclk_bus_spinlock", "aclk_bus", 0,
+ RK3562_CLKGATE_CON(23), 0, GFLAGS),
+ GATE(ACLK_DCF, "aclk_dcf", "aclk_bus", CLK_IGNORE_UNUSED,
+ RK3562_CLKGATE_CON(23), 1, GFLAGS),
+ GATE(PCLK_DCF, "pclk_dcf", "pclk_bus", CLK_IGNORE_UNUSED,
+ RK3562_CLKGATE_CON(23), 2, GFLAGS),
+ GATE(FCLK_BUS_CM0_CORE, "fclk_bus_cm0_core", "hclk_bus", 0,
+ RK3562_CLKGATE_CON(23), 3, GFLAGS),
+ GATE(CLK_BUS_CM0_RTC, "clk_bus_cm0_rtc", "clk_rtc_32k", 0,
+ RK3562_CLKGATE_CON(23), 4, GFLAGS),
+ GATE(HCLK_ICACHE, "hclk_icache", "hclk_bus", CLK_IGNORE_UNUSED,
+ RK3562_CLKGATE_CON(23), 8, GFLAGS),
+ GATE(HCLK_DCACHE, "hclk_dcache", "hclk_bus", CLK_IGNORE_UNUSED,
+ RK3562_CLKGATE_CON(23), 9, GFLAGS),
+ GATE(PCLK_TSADC, "pclk_tsadc", "pclk_bus", 0,
+ RK3562_CLKGATE_CON(24), 0, GFLAGS),
+ COMPOSITE_NOMUX(CLK_TSADC, "clk_tsadc", "xin24m", 0,
+ RK3562_CLKSEL_CON(43), 0, 11, DFLAGS,
+ RK3562_CLKGATE_CON(24), 1, GFLAGS),
+ COMPOSITE_NOMUX(CLK_TSADC_TSEN, "clk_tsadc_tsen", "xin24m", 0,
+ RK3562_CLKSEL_CON(43), 11, 5, DFLAGS,
+ RK3562_CLKGATE_CON(24), 3, GFLAGS),
+ GATE(PCLK_DFT2APB, "pclk_dft2apb", "pclk_bus", CLK_IGNORE_UNUSED,
+ RK3562_CLKGATE_CON(24), 4, GFLAGS),
+ COMPOSITE_NOMUX(CLK_SARADC_VCCIO156, "clk_saradc_vccio156", "xin24m", 0,
+ RK3562_CLKSEL_CON(44), 0, 12, DFLAGS,
+ RK3562_CLKGATE_CON(24), 9, GFLAGS),
+ GATE(PCLK_GMAC, "pclk_gmac", "pclk_bus", 0,
+ RK3562_CLKGATE_CON(25), 0, GFLAGS),
+ GATE(ACLK_GMAC, "aclk_gmac", "aclk_bus", 0,
+ RK3562_CLKGATE_CON(25), 1, GFLAGS),
+ COMPOSITE_NODIV(CLK_GMAC_125M_CRU_I, "clk_gmac_125m_cru_i", mux_125m_xin24m_p, 0,
+ RK3562_CLKSEL_CON(45), 8, 1, MFLAGS,
+ RK3562_CLKGATE_CON(25), 2, GFLAGS),
+ COMPOSITE_NODIV(CLK_GMAC_50M_CRU_I, "clk_gmac_50m_cru_i", mux_50m_xin24m_p, 0,
+ RK3562_CLKSEL_CON(45), 7, 1, MFLAGS,
+ RK3562_CLKGATE_CON(25), 3, GFLAGS),
+ COMPOSITE(CLK_GMAC_ETH_OUT2IO, "clk_gmac_eth_out2io", gpll_cpll_p, 0,
+ RK3562_CLKSEL_CON(46), 7, 1, MFLAGS, 0, 7, DFLAGS,
+ RK3562_CLKGATE_CON(25), 4, GFLAGS),
+ GATE(PCLK_APB2ASB_VCCIO156, "pclk_apb2asb_vccio156", "pclk_bus", CLK_IS_CRITICAL,
+ RK3562_CLKGATE_CON(25), 5, GFLAGS),
+ GATE(PCLK_TO_VCCIO156, "pclk_to_vccio156", "pclk_bus", CLK_IS_CRITICAL,
+ RK3562_CLKGATE_CON(25), 6, GFLAGS),
+ GATE(PCLK_DSIPHY, "pclk_dsiphy", "pclk_bus", 0,
+ RK3562_CLKGATE_CON(25), 8, GFLAGS),
+ GATE(PCLK_DSITX, "pclk_dsitx", "pclk_bus", 0,
+ RK3562_CLKGATE_CON(25), 9, GFLAGS),
+ GATE(PCLK_CPU_EMA_DET, "pclk_cpu_ema_det", "pclk_bus", CLK_IGNORE_UNUSED,
+ RK3562_CLKGATE_CON(25), 10, GFLAGS),
+ GATE(PCLK_HASH, "pclk_hash", "pclk_bus", 0,
+ RK3562_CLKGATE_CON(25), 11, GFLAGS),
+ GATE(PCLK_TOPCRU, "pclk_topcru", "pclk_bus", CLK_IGNORE_UNUSED,
+ RK3562_CLKGATE_CON(25), 15, GFLAGS),
+ GATE(PCLK_ASB2APB_VCCIO156, "pclk_asb2apb_vccio156", "pclk_to_vccio156", CLK_IS_CRITICAL,
+ RK3562_CLKGATE_CON(26), 0, GFLAGS),
+ GATE(PCLK_IOC_VCCIO156, "pclk_ioc_vccio156", "pclk_to_vccio156", CLK_IS_CRITICAL,
+ RK3562_CLKGATE_CON(26), 1, GFLAGS),
+ GATE(PCLK_GPIO3_VCCIO156, "pclk_gpio3_vccio156", "pclk_to_vccio156", 0,
+ RK3562_CLKGATE_CON(26), 2, GFLAGS),
+ GATE(PCLK_GPIO4_VCCIO156, "pclk_gpio4_vccio156", "pclk_to_vccio156", 0,
+ RK3562_CLKGATE_CON(26), 3, GFLAGS),
+ GATE(PCLK_SARADC_VCCIO156, "pclk_saradc_vccio156", "pclk_to_vccio156", 0,
+ RK3562_CLKGATE_CON(26), 4, GFLAGS),
+ GATE(PCLK_MAC100, "pclk_mac100", "pclk_bus", 0,
+ RK3562_CLKGATE_CON(27), 0, GFLAGS),
+ GATE(ACLK_MAC100, "aclk_mac100", "aclk_bus", 0,
+ RK3562_CLKGATE_CON(27), 1, GFLAGS),
+ COMPOSITE_NODIV(CLK_MAC100_50M_MATRIX, "clk_mac100_50m_matrix", mux_50m_xin24m_p, 0,
+ RK3562_CLKSEL_CON(47), 7, 1, MFLAGS,
+ RK3562_CLKGATE_CON(27), 2, GFLAGS),
+
+ /* PD_CORE */
+ COMPOSITE_NOMUX(0, "aclk_core_pre", "scmi_clk_cpu", CLK_IGNORE_UNUSED,
+ RK3562_CLKSEL_CON(11), 0, 3, DFLAGS | CLK_DIVIDER_READ_ONLY,
+ RK3562_CLKGATE_CON(4), 3, GFLAGS),
+ COMPOSITE_NOMUX(0, "pclk_dbg_pre", "scmi_clk_cpu", CLK_IGNORE_UNUSED,
+ RK3562_CLKSEL_CON(12), 0, 4, DFLAGS | CLK_DIVIDER_READ_ONLY,
+ RK3562_CLKGATE_CON(4), 5, GFLAGS),
+ COMPOSITE_NOMUX(HCLK_CORE, "hclk_core", "gpll", CLK_IS_CRITICAL,
+ RK3562_CLKSEL_CON(13), 0, 6, DFLAGS,
+ RK3562_CLKGATE_CON(5), 2, GFLAGS),
+ GATE(0, "pclk_dbg_daplite", "pclk_dbg_pre", CLK_IGNORE_UNUSED,
+ RK3562_CLKGATE_CON(4), 10, GFLAGS),
+
+ /* PD_DDR */
+ FACTOR_GATE(0, "clk_gpll_mux_to_ddr", "gpll", 0, 1, 4,
+ RK3328_CLKGATE_CON(1), 6, GFLAGS),
+ COMPOSITE_NOMUX(PCLK_DDR, "pclk_ddr", "clk_gpll_mux_to_ddr", CLK_IS_CRITICAL,
+ RK3562_DDR_CLKSEL_CON(1), 8, 5, DFLAGS,
+ RK3562_DDR_CLKGATE_CON(0), 3, GFLAGS),
+ COMPOSITE_NOMUX(CLK_MSCH_BRG_BIU, "clk_msch_brg_biu", "clk_gpll_mux_to_ddr", CLK_IS_CRITICAL,
+ RK3562_DDR_CLKSEL_CON(1), 0, 4, DFLAGS,
+ RK3562_DDR_CLKGATE_CON(0), 4, GFLAGS),
+ GATE(PCLK_DDR_HWLP, "pclk_ddr_hwlp", "pclk_ddr", CLK_IGNORE_UNUSED,
+ RK3562_DDR_CLKGATE_CON(0), 6, GFLAGS),
+ GATE(PCLK_DDR_UPCTL, "pclk_ddr_upctl", "pclk_ddr", CLK_IGNORE_UNUSED,
+ RK3562_DDR_CLKGATE_CON(0), 7, GFLAGS),
+ GATE(PCLK_DDR_PHY, "pclk_ddr_phy", "pclk_ddr", CLK_IGNORE_UNUSED,
+ RK3562_DDR_CLKGATE_CON(0), 8, GFLAGS),
+ GATE(PCLK_DDR_DFICTL, "pclk_ddr_dfictl", "pclk_ddr", CLK_IGNORE_UNUSED,
+ RK3562_DDR_CLKGATE_CON(0), 9, GFLAGS),
+ GATE(PCLK_DDR_DMA2DDR, "pclk_ddr_dma2ddr", "pclk_ddr", CLK_IGNORE_UNUSED,
+ RK3562_DDR_CLKGATE_CON(0), 10, GFLAGS),
+ GATE(PCLK_DDR_MON, "pclk_ddr_mon", "pclk_ddr", CLK_IGNORE_UNUSED,
+ RK3562_DDR_CLKGATE_CON(1), 0, GFLAGS),
+ GATE(TMCLK_DDR_MON, "tmclk_ddr_mon", "xin24m", CLK_IGNORE_UNUSED,
+ RK3562_DDR_CLKGATE_CON(1), 1, GFLAGS),
+ GATE(PCLK_DDR_GRF, "pclk_ddr_grf", "pclk_ddr", CLK_IGNORE_UNUSED,
+ RK3562_DDR_CLKGATE_CON(1), 2, GFLAGS),
+ GATE(PCLK_DDR_CRU, "pclk_ddr_cru", "pclk_ddr", CLK_IGNORE_UNUSED,
+ RK3562_DDR_CLKGATE_CON(1), 3, GFLAGS),
+ GATE(PCLK_SUBDDR_CRU, "pclk_subddr_cru", "pclk_ddr", CLK_IGNORE_UNUSED,
+ RK3562_DDR_CLKGATE_CON(1), 4, GFLAGS),
+
+ /* PD_GPU */
+ COMPOSITE(CLK_GPU_PRE, "clk_gpu_pre", gpll_cpll_p, 0,
+ RK3562_CLKSEL_CON(18), 7, 1, MFLAGS, 0, 4, DFLAGS,
+ RK3562_CLKGATE_CON(8), 0, GFLAGS),
+ COMPOSITE_NOMUX(ACLK_GPU_PRE, "aclk_gpu_pre", "clk_gpu_pre", 0,
+ RK3562_CLKSEL_CON(19), 0, 4, DFLAGS,
+ RK3562_CLKGATE_CON(8), 2, GFLAGS),
+ GATE(CLK_GPU, "clk_gpu", "clk_gpu_pre", 0,
+ RK3562_CLKGATE_CON(8), 4, GFLAGS),
+ COMPOSITE_NODIV(CLK_GPU_BRG, "clk_gpu_brg", mux_200m_100m_p, 0,
+ RK3562_CLKSEL_CON(19), 15, 1, MFLAGS,
+ RK3562_CLKGATE_CON(8), 8, GFLAGS),
+
+ /* PD_NPU */
+ COMPOSITE(CLK_NPU_PRE, "clk_npu_pre", gpll_cpll_p, 0,
+ RK3562_CLKSEL_CON(15), 7, 1, MFLAGS, 0, 4, DFLAGS,
+ RK3562_CLKGATE_CON(6), 0, GFLAGS),
+ COMPOSITE_NOMUX(HCLK_NPU_PRE, "hclk_npu_pre", "clk_npu_pre", 0,
+ RK3562_CLKSEL_CON(16), 0, 4, DFLAGS,
+ RK3562_CLKGATE_CON(6), 1, GFLAGS),
+ GATE(ACLK_RKNN, "aclk_rknn", "clk_npu_pre", 0,
+ RK3562_CLKGATE_CON(6), 4, GFLAGS),
+ GATE(HCLK_RKNN, "hclk_rknn", "hclk_npu_pre", 0,
+ RK3562_CLKGATE_CON(6), 5, GFLAGS),
+
+ /* PD_PERI */
+ COMPOSITE(ACLK_PERI, "aclk_peri", gpll_cpll_p, CLK_IS_CRITICAL,
+ RK3562_PERI_CLKSEL_CON(0), 7, 1, MFLAGS, 0, 5, DFLAGS,
+ RK3562_PERI_CLKGATE_CON(1), 0, GFLAGS),
+ COMPOSITE(HCLK_PERI, "hclk_peri", gpll_cpll_p, CLK_IS_CRITICAL,
+ RK3562_PERI_CLKSEL_CON(0), 15, 1, MFLAGS, 8, 6, DFLAGS,
+ RK3562_PERI_CLKGATE_CON(1), 1, GFLAGS),
+ COMPOSITE(PCLK_PERI, "pclk_peri", gpll_cpll_p, CLK_IS_CRITICAL,
+ RK3562_PERI_CLKSEL_CON(1), 7, 1, MFLAGS, 0, 5, DFLAGS,
+ RK3562_PERI_CLKGATE_CON(1), 2, GFLAGS),
+ GATE(PCLK_PERICRU, "pclk_pericru", "pclk_peri", CLK_IGNORE_UNUSED,
+ RK3562_PERI_CLKGATE_CON(1), 6, GFLAGS),
+ GATE(HCLK_SAI0, "hclk_sai0", "hclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(2), 0, GFLAGS),
+ COMPOSITE(CLK_SAI0_SRC, "clk_sai0_src", gpll_cpll_hpll_p, 0,
+ RK3562_PERI_CLKSEL_CON(1), 14, 2, MFLAGS, 8, 6, DFLAGS,
+ RK3562_PERI_CLKGATE_CON(2), 1, GFLAGS),
+ COMPOSITE_FRACMUX(CLK_SAI0_FRAC, "clk_sai0_frac", "clk_sai0_src", CLK_SET_RATE_PARENT,
+ RK3562_PERI_CLKSEL_CON(2), 0,
+ RK3562_PERI_CLKGATE_CON(2), 2, GFLAGS,
+ &rk3562_clk_sai0_fracmux),
+ GATE(MCLK_SAI0, "mclk_sai0", "clk_sai0", 0,
+ RK3562_PERI_CLKGATE_CON(2), 3, GFLAGS),
+ COMPOSITE_NODIV(MCLK_SAI0_OUT2IO, "mclk_sai0_out2io", mclk_sai0_out2io_p, CLK_SET_RATE_PARENT,
+ RK3562_PERI_CLKSEL_CON(3), 5, 1, MFLAGS,
+ RK3562_PERI_CLKGATE_CON(2), 4, GFLAGS),
+ GATE(HCLK_SAI1, "hclk_sai1", "hclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(2), 5, GFLAGS),
+ COMPOSITE(CLK_SAI1_SRC, "clk_sai1_src", gpll_cpll_hpll_p, 0,
+ RK3562_PERI_CLKSEL_CON(3), 14, 2, MFLAGS, 8, 6, DFLAGS,
+ RK3562_PERI_CLKGATE_CON(2), 6, GFLAGS),
+ COMPOSITE_FRACMUX(CLK_SAI1_FRAC, "clk_sai1_frac", "clk_sai1_src", CLK_SET_RATE_PARENT,
+ RK3562_PERI_CLKSEL_CON(4), 0,
+ RK3562_PERI_CLKGATE_CON(2), 7, GFLAGS,
+ &rk3562_clk_sai1_fracmux),
+ GATE(MCLK_SAI1, "mclk_sai1", "clk_sai1", 0,
+ RK3562_PERI_CLKGATE_CON(2), 8, GFLAGS),
+ COMPOSITE_NODIV(MCLK_SAI1_OUT2IO, "mclk_sai1_out2io", mclk_sai1_out2io_p, CLK_SET_RATE_PARENT,
+ RK3562_PERI_CLKSEL_CON(5), 5, 1, MFLAGS,
+ RK3562_PERI_CLKGATE_CON(2), 9, GFLAGS),
+ GATE(HCLK_SAI2, "hclk_sai2", "hclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(2), 10, GFLAGS),
+ COMPOSITE(CLK_SAI2_SRC, "clk_sai2_src", gpll_cpll_hpll_p, 0,
+ RK3562_PERI_CLKSEL_CON(6), 14, 2, MFLAGS, 8, 6, DFLAGS,
+ RK3562_PERI_CLKGATE_CON(2), 11, GFLAGS),
+ COMPOSITE_FRACMUX(CLK_SAI2_FRAC, "clk_sai2_frac", "clk_sai2_src", CLK_SET_RATE_PARENT,
+ RK3562_PERI_CLKSEL_CON(7), 0,
+ RK3562_PERI_CLKGATE_CON(2), 12, GFLAGS,
+ &rk3562_clk_sai2_fracmux),
+ GATE(MCLK_SAI2, "mclk_sai2", "clk_sai2", 0,
+ RK3562_PERI_CLKGATE_CON(2), 13, GFLAGS),
+ COMPOSITE_NODIV(MCLK_SAI2_OUT2IO, "mclk_sai2_out2io", mclk_sai2_out2io_p, CLK_SET_RATE_PARENT,
+ RK3562_PERI_CLKSEL_CON(8), 5, 1, MFLAGS,
+ RK3562_PERI_CLKGATE_CON(2), 14, GFLAGS),
+ GATE(HCLK_DSM, "hclk_dsm", "hclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(3), 1, GFLAGS),
+ GATE(CLK_DSM, "clk_dsm", "mclk_sai1", 0,
+ RK3562_PERI_CLKGATE_CON(3), 2, GFLAGS),
+ GATE(HCLK_PDM, "hclk_pdm", "hclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(3), 4, GFLAGS),
+ COMPOSITE(MCLK_PDM, "mclk_pdm", gpll_cpll_hpll_xin24m_p, 0,
+ RK3562_PERI_CLKSEL_CON(12), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3562_PERI_CLKGATE_CON(3), 5, GFLAGS),
+ GATE(HCLK_SPDIF, "hclk_spdif", "hclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(3), 8, GFLAGS),
+ COMPOSITE(CLK_SPDIF_SRC, "clk_spdif_src", gpll_cpll_hpll_p, 0,
+ RK3562_PERI_CLKSEL_CON(13), 14, 2, MFLAGS, 8, 6, DFLAGS,
+ RK3562_PERI_CLKGATE_CON(3), 9, GFLAGS),
+ COMPOSITE_FRACMUX(CLK_SPDIF_FRAC, "clk_spdif_frac", "clk_spdif_src", CLK_SET_RATE_PARENT,
+ RK3562_PERI_CLKSEL_CON(14), 0,
+ RK3562_PERI_CLKGATE_CON(3), 10, GFLAGS,
+ &rk3562_clk_spdif_fracmux),
+ GATE(MCLK_SPDIF, "mclk_spdif", "clk_spdif", 0,
+ RK3562_PERI_CLKGATE_CON(3), 11, GFLAGS),
+ GATE(HCLK_SDMMC0, "hclk_sdmmc0", "hclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(4), 0, GFLAGS),
+ COMPOSITE(CCLK_SDMMC0, "cclk_sdmmc0", gpll_cpll_xin24m_dmyhpll_p, 0,
+ RK3562_PERI_CLKSEL_CON(16), 14, 2, MFLAGS, 0, 8, DFLAGS,
+ RK3562_PERI_CLKGATE_CON(4), 1, GFLAGS),
+ MMC(SCLK_SDMMC0_DRV, "sdmmc0_drv", "cclk_sdmmc0", RK3562_SDMMC0_CON0, 1),
+ MMC(SCLK_SDMMC0_SAMPLE, "sdmmc0_sample", "cclk_sdmmc0", RK3562_SDMMC0_CON1, 1),
+ GATE(HCLK_SDMMC1, "hclk_sdmmc1", "hclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(4), 2, GFLAGS),
+ COMPOSITE(CCLK_SDMMC1, "cclk_sdmmc1", gpll_cpll_xin24m_dmyhpll_p, 0,
+ RK3562_PERI_CLKSEL_CON(17), 14, 2, MFLAGS, 0, 8, DFLAGS,
+ RK3562_PERI_CLKGATE_CON(4), 3, GFLAGS),
+ MMC(SCLK_SDMMC1_DRV, "sdmmc1_drv", "cclk_sdmmc1", RK3562_SDMMC1_CON0, 1),
+ MMC(SCLK_SDMMC1_SAMPLE, "sdmmc1_sample", "cclk_sdmmc1", RK3562_SDMMC1_CON1, 1),
+ GATE(HCLK_EMMC, "hclk_emmc", "hclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(4), 8, GFLAGS),
+ GATE(ACLK_EMMC, "aclk_emmc", "aclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(4), 9, GFLAGS),
+ COMPOSITE(CCLK_EMMC, "cclk_emmc", gpll_cpll_xin24m_dmyhpll_p, 0,
+ RK3562_PERI_CLKSEL_CON(18), 14, 2, MFLAGS, 0, 8, DFLAGS,
+ RK3562_PERI_CLKGATE_CON(4), 10, GFLAGS),
+ COMPOSITE(BCLK_EMMC, "bclk_emmc", gpll_cpll_p, 0,
+ RK3562_PERI_CLKSEL_CON(19), 15, 1, MFLAGS, 8, 7, DFLAGS,
+ RK3562_PERI_CLKGATE_CON(4), 11, GFLAGS),
+ GATE(TMCLK_EMMC, "tmclk_emmc", "xin24m", 0,
+ RK3562_PERI_CLKGATE_CON(4), 12, GFLAGS),
+ COMPOSITE(SCLK_SFC, "sclk_sfc", gpll_cpll_xin24m_p, 0,
+ RK3562_PERI_CLKSEL_CON(20), 8, 2, MFLAGS, 0, 8, DFLAGS,
+ RK3562_PERI_CLKGATE_CON(4), 13, GFLAGS),
+ GATE(HCLK_SFC, "hclk_sfc", "hclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(4), 14, GFLAGS),
+ GATE(HCLK_USB2HOST, "hclk_usb2host", "hclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(5), 0, GFLAGS),
+ GATE(HCLK_USB2HOST_ARB, "hclk_usb2host_arb", "hclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(5), 1, GFLAGS),
+ GATE(PCLK_SPI1, "pclk_spi1", "pclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(6), 0, GFLAGS),
+ COMPOSITE_NODIV(CLK_SPI1, "clk_spi1", mux_200m_100m_50m_xin24m_p, 0,
+ RK3562_PERI_CLKSEL_CON(20), 12, 2, MFLAGS,
+ RK3562_PERI_CLKGATE_CON(6), 1, GFLAGS),
+ GATE(SCLK_IN_SPI1, "sclk_in_spi1", "sclk_in_spi1_io", 0,
+ RK3562_PERI_CLKGATE_CON(6), 2, GFLAGS),
+ GATE(PCLK_SPI2, "pclk_spi2", "pclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(6), 3, GFLAGS),
+ COMPOSITE_NODIV(CLK_SPI2, "clk_spi2", mux_200m_100m_50m_xin24m_p, 0,
+ RK3562_PERI_CLKSEL_CON(20), 14, 2, MFLAGS,
+ RK3562_PERI_CLKGATE_CON(6), 4, GFLAGS),
+ GATE(SCLK_IN_SPI2, "sclk_in_spi2", "sclk_in_spi2_io", 0,
+ RK3562_PERI_CLKGATE_CON(6), 5, GFLAGS),
+ GATE(PCLK_UART1, "pclk_uart1", "pclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(7), 0, GFLAGS),
+ GATE(PCLK_UART2, "pclk_uart2", "pclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(7), 1, GFLAGS),
+ GATE(PCLK_UART3, "pclk_uart3", "pclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(7), 2, GFLAGS),
+ GATE(PCLK_UART4, "pclk_uart4", "pclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(7), 3, GFLAGS),
+ GATE(PCLK_UART5, "pclk_uart5", "pclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(7), 4, GFLAGS),
+ GATE(PCLK_UART6, "pclk_uart6", "pclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(7), 5, GFLAGS),
+ GATE(PCLK_UART7, "pclk_uart7", "pclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(7), 6, GFLAGS),
+ GATE(PCLK_UART8, "pclk_uart8", "pclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(7), 7, GFLAGS),
+ GATE(PCLK_UART9, "pclk_uart9", "pclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(7), 8, GFLAGS),
+ COMPOSITE(CLK_UART1_SRC, "clk_uart1_src", gpll_cpll_p, 0,
+ RK3562_PERI_CLKSEL_CON(21), 8, 1, MFLAGS, 0, 7, DFLAGS,
+ RK3562_PERI_CLKGATE_CON(7), 9, GFLAGS),
+ COMPOSITE_FRACMUX(CLK_UART1_FRAC, "clk_uart1_frac", "clk_uart1_src", CLK_SET_RATE_PARENT,
+ RK3562_PERI_CLKSEL_CON(22), 0,
+ RK3562_PERI_CLKGATE_CON(7), 10, GFLAGS,
+ &rk3562_clk_uart1_fracmux),
+ GATE(SCLK_UART1, "sclk_uart1", "clk_uart1", 0,
+ RK3562_PERI_CLKGATE_CON(7), 11, GFLAGS),
+ COMPOSITE(CLK_UART2_SRC, "clk_uart2_src", gpll_cpll_p, 0,
+ RK3562_PERI_CLKSEL_CON(23), 8, 1, MFLAGS, 0, 7, DFLAGS,
+ RK3562_PERI_CLKGATE_CON(7), 12, GFLAGS),
+ COMPOSITE_FRACMUX(CLK_UART2_FRAC, "clk_uart2_frac", "clk_uart2_src", CLK_SET_RATE_PARENT,
+ RK3562_PERI_CLKSEL_CON(24), 0,
+ RK3562_PERI_CLKGATE_CON(7), 13, GFLAGS,
+ &rk3562_clk_uart2_fracmux),
+ GATE(SCLK_UART2, "sclk_uart2", "clk_uart2", 0,
+ RK3562_PERI_CLKGATE_CON(7), 14, GFLAGS),
+ COMPOSITE(CLK_UART3_SRC, "clk_uart3_src", gpll_cpll_p, 0,
+ RK3562_PERI_CLKSEL_CON(25), 8, 1, MFLAGS, 0, 7, DFLAGS,
+ RK3562_PERI_CLKGATE_CON(7), 15, GFLAGS),
+ COMPOSITE_FRACMUX(CLK_UART3_FRAC, "clk_uart3_frac", "clk_uart3_src", CLK_SET_RATE_PARENT,
+ RK3562_PERI_CLKSEL_CON(26), 0,
+ RK3562_PERI_CLKGATE_CON(8), 0, GFLAGS,
+ &rk3562_clk_uart3_fracmux),
+ GATE(SCLK_UART3, "sclk_uart3", "clk_uart3", 0,
+ RK3562_PERI_CLKGATE_CON(8), 1, GFLAGS),
+ COMPOSITE(CLK_UART4_SRC, "clk_uart4_src", gpll_cpll_p, 0,
+ RK3562_PERI_CLKSEL_CON(27), 8, 1, MFLAGS, 0, 7, DFLAGS,
+ RK3562_PERI_CLKGATE_CON(8), 2, GFLAGS),
+ COMPOSITE_FRACMUX(CLK_UART4_FRAC, "clk_uart4_frac", "clk_uart4_src", CLK_SET_RATE_PARENT,
+ RK3562_PERI_CLKSEL_CON(28), 0,
+ RK3562_PERI_CLKGATE_CON(8), 3, GFLAGS,
+ &rk3562_clk_uart4_fracmux),
+ GATE(SCLK_UART4, "sclk_uart4", "clk_uart4", 0,
+ RK3562_PERI_CLKGATE_CON(8), 4, GFLAGS),
+ COMPOSITE(CLK_UART5_SRC, "clk_uart5_src", gpll_cpll_p, 0,
+ RK3562_PERI_CLKSEL_CON(29), 8, 1, MFLAGS, 0, 7, DFLAGS,
+ RK3562_PERI_CLKGATE_CON(8), 5, GFLAGS),
+ COMPOSITE_FRACMUX(CLK_UART5_FRAC, "clk_uart5_frac", "clk_uart5_src", CLK_SET_RATE_PARENT,
+ RK3562_PERI_CLKSEL_CON(30), 0,
+ RK3562_PERI_CLKGATE_CON(8), 6, GFLAGS,
+ &rk3562_clk_uart5_fracmux),
+ GATE(SCLK_UART5, "sclk_uart5", "clk_uart5", 0,
+ RK3562_PERI_CLKGATE_CON(8), 7, GFLAGS),
+ COMPOSITE(CLK_UART6_SRC, "clk_uart6_src", gpll_cpll_p, 0,
+ RK3562_PERI_CLKSEL_CON(31), 8, 1, MFLAGS, 0, 7, DFLAGS,
+ RK3562_PERI_CLKGATE_CON(8), 8, GFLAGS),
+ COMPOSITE_FRACMUX(CLK_UART6_FRAC, "clk_uart6_frac", "clk_uart6_src", CLK_SET_RATE_PARENT,
+ RK3562_PERI_CLKSEL_CON(32), 0,
+ RK3562_PERI_CLKGATE_CON(8), 9, GFLAGS,
+ &rk3562_clk_uart6_fracmux),
+ GATE(SCLK_UART6, "sclk_uart6", "clk_uart6", 0,
+ RK3562_PERI_CLKGATE_CON(8), 10, GFLAGS),
+ COMPOSITE(CLK_UART7_SRC, "clk_uart7_src", gpll_cpll_p, 0,
+ RK3562_PERI_CLKSEL_CON(33), 8, 1, MFLAGS, 0, 7, DFLAGS,
+ RK3562_PERI_CLKGATE_CON(8), 11, GFLAGS),
+ COMPOSITE_FRACMUX(CLK_UART7_FRAC, "clk_uart7_frac", "clk_uart7_src", CLK_SET_RATE_PARENT,
+ RK3562_PERI_CLKSEL_CON(34), 0,
+ RK3562_PERI_CLKGATE_CON(8), 12, GFLAGS,
+ &rk3562_clk_uart7_fracmux),
+ GATE(SCLK_UART7, "sclk_uart7", "clk_uart7", 0,
+ RK3562_PERI_CLKGATE_CON(8), 13, GFLAGS),
+ COMPOSITE(CLK_UART8_SRC, "clk_uart8_src", gpll_cpll_p, 0,
+ RK3562_PERI_CLKSEL_CON(35), 8, 1, MFLAGS, 0, 7, DFLAGS,
+ RK3562_PERI_CLKGATE_CON(8), 14, GFLAGS),
+ COMPOSITE_FRACMUX(CLK_UART8_FRAC, "clk_uart8_frac", "clk_uart8_src", CLK_SET_RATE_PARENT,
+ RK3562_PERI_CLKSEL_CON(36), 0,
+ RK3562_PERI_CLKGATE_CON(8), 15, GFLAGS,
+ &rk3562_clk_uart8_fracmux),
+ GATE(SCLK_UART8, "sclk_uart8", "clk_uart8", 0,
+ RK3562_PERI_CLKGATE_CON(9), 0, GFLAGS),
+ COMPOSITE(CLK_UART9_SRC, "clk_uart9_src", gpll_cpll_p, 0,
+ RK3562_PERI_CLKSEL_CON(37), 8, 1, MFLAGS, 0, 7, DFLAGS,
+ RK3562_PERI_CLKGATE_CON(9), 1, GFLAGS),
+ COMPOSITE_FRACMUX(CLK_UART9_FRAC, "clk_uart9_frac", "clk_uart9_src", CLK_SET_RATE_PARENT,
+ RK3562_PERI_CLKSEL_CON(38), 0,
+ RK3562_PERI_CLKGATE_CON(9), 2, GFLAGS,
+ &rk3562_clk_uart9_fracmux),
+ GATE(SCLK_UART9, "sclk_uart9", "clk_uart9", 0,
+ RK3562_PERI_CLKGATE_CON(9), 3, GFLAGS),
+ GATE(PCLK_PWM1_PERI, "pclk_pwm1_peri", "pclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(10), 0, GFLAGS),
+ COMPOSITE_NODIV(CLK_PWM1_PERI, "clk_pwm1_peri", mux_100m_50m_xin24m_p, 0,
+ RK3562_PERI_CLKSEL_CON(40), 0, 2, MFLAGS,
+ RK3562_PERI_CLKGATE_CON(10), 1, GFLAGS),
+ GATE(CLK_CAPTURE_PWM1_PERI, "clk_capture_pwm1_peri", "xin24m", 0,
+ RK3562_PERI_CLKGATE_CON(10), 2, GFLAGS),
+ GATE(PCLK_PWM2_PERI, "pclk_pwm2_peri", "pclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(10), 3, GFLAGS),
+ COMPOSITE_NODIV(CLK_PWM2_PERI, "clk_pwm2_peri", mux_100m_50m_xin24m_p, 0,
+ RK3562_PERI_CLKSEL_CON(40), 6, 2, MFLAGS,
+ RK3562_PERI_CLKGATE_CON(10), 4, GFLAGS),
+ GATE(CLK_CAPTURE_PWM2_PERI, "clk_capture_pwm2_peri", "xin24m", 0,
+ RK3562_PERI_CLKGATE_CON(10), 5, GFLAGS),
+ GATE(PCLK_PWM3_PERI, "pclk_pwm3_peri", "pclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(10), 6, GFLAGS),
+ COMPOSITE_NODIV(CLK_PWM3_PERI, "clk_pwm3_peri", mux_100m_50m_xin24m_p, 0,
+ RK3562_PERI_CLKSEL_CON(40), 8, 2, MFLAGS,
+ RK3562_PERI_CLKGATE_CON(10), 7, GFLAGS),
+ GATE(CLK_CAPTURE_PWM3_PERI, "clk_capture_pwm3_peri", "xin24m", 0,
+ RK3562_PERI_CLKGATE_CON(10), 8, GFLAGS),
+ GATE(PCLK_CAN0, "pclk_can0", "pclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(11), 0, GFLAGS),
+ COMPOSITE(CLK_CAN0, "clk_can0", gpll_cpll_p, 0,
+ RK3562_PERI_CLKSEL_CON(41), 7, 1, MFLAGS, 0, 5, DFLAGS,
+ RK3562_PERI_CLKGATE_CON(11), 1, GFLAGS),
+ GATE(PCLK_CAN1, "pclk_can1", "pclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(11), 2, GFLAGS),
+ COMPOSITE(CLK_CAN1, "clk_can1", gpll_cpll_p, 0,
+ RK3562_PERI_CLKSEL_CON(41), 15, 1, MFLAGS, 8, 5, DFLAGS,
+ RK3562_PERI_CLKGATE_CON(11), 3, GFLAGS),
+ GATE(PCLK_PERI_WDT, "pclk_peri_wdt", "pclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(13), 0, GFLAGS),
+ COMPOSITE_NODIV(TCLK_PERI_WDT, "tclk_peri_wdt", mux_xin24m_32k_p, 0,
+ RK3562_PERI_CLKSEL_CON(43), 15, 1, MFLAGS,
+ RK3562_PERI_CLKGATE_CON(13), 1, GFLAGS),
+ GATE(ACLK_SYSMEM, "aclk_sysmem", "aclk_peri", CLK_IGNORE_UNUSED,
+ RK3562_PERI_CLKGATE_CON(13), 2, GFLAGS),
+ GATE(HCLK_BOOTROM, "hclk_bootrom", "hclk_peri", CLK_IGNORE_UNUSED,
+ RK3562_PERI_CLKGATE_CON(13), 3, GFLAGS),
+ GATE(PCLK_PERI_GRF, "pclk_peri_grf", "pclk_peri", CLK_IGNORE_UNUSED,
+ RK3562_PERI_CLKGATE_CON(13), 4, GFLAGS),
+ GATE(ACLK_DMAC, "aclk_dmac", "aclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(13), 5, GFLAGS),
+ GATE(ACLK_RKDMAC, "aclk_rkdmac", "aclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(13), 6, GFLAGS),
+ GATE(PCLK_OTPC_NS, "pclk_otpc_ns", "pclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(14), 0, GFLAGS),
+ GATE(CLK_SBPI_OTPC_NS, "clk_sbpi_otpc_ns", "xin24m", 0,
+ RK3562_PERI_CLKGATE_CON(14), 1, GFLAGS),
+ COMPOSITE_NOMUX(CLK_USER_OTPC_NS, "clk_user_otpc_ns", "xin24m", 0,
+ RK3562_PERI_CLKSEL_CON(44), 0, 8, DFLAGS,
+ RK3562_PERI_CLKGATE_CON(14), 2, GFLAGS),
+ GATE(PCLK_OTPC_S, "pclk_otpc_s", "pclk_peri", CLK_IGNORE_UNUSED,
+ RK3562_PERI_CLKGATE_CON(14), 3, GFLAGS),
+ GATE(CLK_SBPI_OTPC_S, "clk_sbpi_otpc_s", "xin24m", CLK_IGNORE_UNUSED,
+ RK3562_PERI_CLKGATE_CON(14), 4, GFLAGS),
+ COMPOSITE_NOMUX(CLK_USER_OTPC_S, "clk_user_otpc_s", "xin24m", CLK_IGNORE_UNUSED,
+ RK3562_PERI_CLKSEL_CON(44), 8, 8, DFLAGS,
+ RK3562_PERI_CLKGATE_CON(14), 5, GFLAGS),
+ GATE(CLK_OTPC_ARB, "clk_otpc_arb", "xin24m", 0,
+ RK3562_PERI_CLKGATE_CON(14), 6, GFLAGS),
+ GATE(PCLK_OTPPHY, "pclk_otpphy", "pclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(14), 7, GFLAGS),
+ GATE(PCLK_USB2PHY, "pclk_usb2phy", "pclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(15), 0, GFLAGS),
+ GATE(PCLK_PIPEPHY, "pclk_pipephy", "pclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(15), 7, GFLAGS),
+ GATE(PCLK_SARADC, "pclk_saradc", "pclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(16), 4, GFLAGS),
+ COMPOSITE_NOMUX(CLK_SARADC, "clk_saradc", "xin24m", 0,
+ RK3562_PERI_CLKSEL_CON(46), 0, 12, DFLAGS,
+ RK3562_PERI_CLKGATE_CON(16), 5, GFLAGS),
+ GATE(PCLK_IOC_VCCIO234, "pclk_ioc_vccio234", "pclk_peri", CLK_IS_CRITICAL,
+ RK3562_PERI_CLKGATE_CON(16), 12, GFLAGS),
+ GATE(PCLK_PERI_GPIO1, "pclk_peri_gpio1", "pclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(17), 0, GFLAGS),
+ GATE(PCLK_PERI_GPIO2, "pclk_peri_gpio2", "pclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(17), 1, GFLAGS),
+ COMPOSITE_NODIV(DCLK_PERI_GPIO, "dclk_peri_gpio", mux_xin24m_32k_p, 0,
+ RK3562_PERI_CLKSEL_CON(47), 8, 1, MFLAGS,
+ RK3562_PERI_CLKGATE_CON(17), 4, GFLAGS),
+ GATE(DCLK_PERI_GPIO1, "dclk_peri_gpio1", "dclk_peri_gpio", 0,
+ RK3562_PERI_CLKGATE_CON(17), 2, GFLAGS),
+ GATE(DCLK_PERI_GPIO2, "dclk_peri_gpio2", "dclk_peri_gpio", 0,
+ RK3562_PERI_CLKGATE_CON(17), 3, GFLAGS),
+
+ /* PD_PHP */
+ COMPOSITE(ACLK_PHP, "aclk_php", gpll_cpll_p, 0,
+ RK3562_CLKSEL_CON(36), 7, 1, MFLAGS, 0, 4, DFLAGS,
+ RK3562_CLKGATE_CON(16), 0, GFLAGS),
+ COMPOSITE_NOMUX(PCLK_PHP, "pclk_php", "aclk_php", 0,
+ RK3562_CLKSEL_CON(36), 8, 4, DFLAGS,
+ RK3562_CLKGATE_CON(16), 1, GFLAGS),
+ GATE(ACLK_PCIE20_MST, "aclk_pcie20_mst", "aclk_php", 0,
+ RK3562_CLKGATE_CON(16), 4, GFLAGS),
+ GATE(ACLK_PCIE20_SLV, "aclk_pcie20_slv", "aclk_php", 0,
+ RK3562_CLKGATE_CON(16), 5, GFLAGS),
+ GATE(ACLK_PCIE20_DBI, "aclk_pcie20_dbi", "aclk_php", 0,
+ RK3562_CLKGATE_CON(16), 6, GFLAGS),
+ GATE(PCLK_PCIE20, "pclk_pcie20", "pclk_php", 0,
+ RK3562_CLKGATE_CON(16), 7, GFLAGS),
+ GATE(CLK_PCIE20_AUX, "clk_pcie20_aux", "xin24m", 0,
+ RK3562_CLKGATE_CON(16), 8, GFLAGS),
+ GATE(ACLK_USB3OTG, "aclk_usb3otg", "aclk_php", 0,
+ RK3562_CLKGATE_CON(16), 10, GFLAGS),
+ COMPOSITE_NODIV(CLK_USB3OTG_SUSPEND, "clk_usb3otg_suspend", mux_xin24m_32k_p, 0,
+ RK3562_CLKSEL_CON(36), 15, 1, MFLAGS,
+ RK3562_CLKGATE_CON(16), 11, GFLAGS),
+ GATE(CLK_USB3OTG_REF, "clk_usb3otg_ref", "xin24m", 0,
+ RK3562_CLKGATE_CON(16), 12, GFLAGS),
+ GATE(CLK_PIPEPHY_REF_FUNC, "clk_pipephy_ref_func", "pclk_pcie20", 0,
+ RK3562_CLKGATE_CON(17), 3, GFLAGS),
+
+ /* PD_PMU1 */
+ COMPOSITE_NOMUX(CLK_200M_PMU, "clk_200m_pmu", "cpll", CLK_IS_CRITICAL,
+ RK3562_PMU1_CLKSEL_CON(0), 0, 5, DFLAGS,
+ RK3562_PMU1_CLKGATE_CON(0), 1, GFLAGS),
+ /* PD_PMU0 */
+ COMPOSITE_FRACMUX(CLK_RTC32K_FRAC, "clk_rtc32k_frac", "xin24m", CLK_IS_CRITICAL,
+ RK3562_PMU0_CLKSEL_CON(0), 0,
+ RK3562_PMU0_CLKGATE_CON(0), 15, GFLAGS,
+ &rk3562_rtc32k_pmu_fracmux),
+ COMPOSITE_NOMUX(BUSCLK_PDPMU0, "busclk_pdpmu0", "clk_200m_pmu", CLK_IS_CRITICAL,
+ RK3562_PMU0_CLKSEL_CON(1), 3, 2, DFLAGS,
+ RK3562_PMU0_CLKGATE_CON(0), 14, GFLAGS),
+ GATE(PCLK_PMU0_CRU, "pclk_pmu0_cru", "busclk_pdpmu0", CLK_IGNORE_UNUSED,
+ RK3562_PMU0_CLKGATE_CON(0), 0, GFLAGS),
+ GATE(PCLK_PMU0_PMU, "pclk_pmu0_pmu", "busclk_pdpmu0", CLK_IGNORE_UNUSED,
+ RK3562_PMU0_CLKGATE_CON(0), 1, GFLAGS),
+ GATE(CLK_PMU0_PMU, "clk_pmu0_pmu", "xin24m", CLK_IGNORE_UNUSED,
+ RK3562_PMU0_CLKGATE_CON(0), 2, GFLAGS),
+ GATE(PCLK_PMU0_HP_TIMER, "pclk_pmu0_hp_timer", "busclk_pdpmu0", CLK_IGNORE_UNUSED,
+ RK3562_PMU0_CLKGATE_CON(0), 3, GFLAGS),
+ GATE(CLK_PMU0_HP_TIMER, "clk_pmu0_hp_timer", "xin24m", CLK_IGNORE_UNUSED,
+ RK3562_PMU0_CLKGATE_CON(0), 4, GFLAGS),
+ GATE(CLK_PMU0_32K_HP_TIMER, "clk_pmu0_32k_hp_timer", "clk_rtc_32k", CLK_IGNORE_UNUSED,
+ RK3562_PMU0_CLKGATE_CON(0), 5, GFLAGS),
+ GATE(PCLK_PMU0_PVTM, "pclk_pmu0_pvtm", "busclk_pdpmu0", 0,
+ RK3562_PMU0_CLKGATE_CON(0), 6, GFLAGS),
+ GATE(CLK_PMU0_PVTM, "clk_pmu0_pvtm", "xin24m", 0,
+ RK3562_PMU0_CLKGATE_CON(0), 7, GFLAGS),
+ GATE(PCLK_IOC_PMUIO, "pclk_ioc_pmuio", "busclk_pdpmu0", CLK_IS_CRITICAL,
+ RK3562_PMU0_CLKGATE_CON(0), 8, GFLAGS),
+ GATE(PCLK_PMU0_GPIO0, "pclk_pmu0_gpio0", "busclk_pdpmu0", 0,
+ RK3562_PMU0_CLKGATE_CON(0), 9, GFLAGS),
+ GATE(DBCLK_PMU0_GPIO0, "dbclk_pmu0_gpio0", "xin24m", 0,
+ RK3562_PMU0_CLKGATE_CON(0), 10, GFLAGS),
+ GATE(PCLK_PMU0_GRF, "pclk_pmu0_grf", "busclk_pdpmu0", CLK_IGNORE_UNUSED,
+ RK3562_PMU0_CLKGATE_CON(0), 11, GFLAGS),
+ GATE(PCLK_PMU0_SGRF, "pclk_pmu0_sgrf", "busclk_pdpmu0", CLK_IGNORE_UNUSED,
+ RK3562_PMU0_CLKGATE_CON(0), 12, GFLAGS),
+ GATE(CLK_DDR_FAIL_SAFE, "clk_ddr_fail_safe", "xin24m", CLK_IGNORE_UNUSED,
+ RK3562_PMU0_CLKGATE_CON(1), 0, GFLAGS),
+ GATE(PCLK_PMU0_SCRKEYGEN, "pclk_pmu0_scrkeygen", "busclk_pdpmu0", CLK_IGNORE_UNUSED,
+ RK3562_PMU0_CLKGATE_CON(1), 1, GFLAGS),
+ COMPOSITE_NOMUX(CLK_PIPEPHY_DIV, "clk_pipephy_div", "cpll", 0,
+ RK3562_PMU0_CLKSEL_CON(2), 0, 6, DFLAGS,
+ RK3562_PMU0_CLKGATE_CON(2), 0, GFLAGS),
+ GATE(CLK_PIPEPHY_XIN24M, "clk_pipephy_xin24m", "xin24m", 0,
+ RK3562_PMU0_CLKGATE_CON(2), 1, GFLAGS),
+ COMPOSITE_NODIV(CLK_PIPEPHY_REF, "clk_pipephy_ref", clk_pipephy_ref_p, 0,
+ RK3562_PMU0_CLKSEL_CON(2), 7, 1, MFLAGS,
+ RK3562_PMU0_CLKGATE_CON(2), 2, GFLAGS),
+ GATE(CLK_USB2PHY_XIN24M, "clk_usb2phy_xin24m", "xin24m", 0,
+ RK3562_PMU0_CLKGATE_CON(2), 4, GFLAGS),
+ COMPOSITE_NODIV(CLK_USB2PHY_REF, "clk_usb2phy_ref", clk_usbphy_ref_p, 0,
+ RK3562_PMU0_CLKSEL_CON(2), 8, 1, MFLAGS,
+ RK3562_PMU0_CLKGATE_CON(2), 5, GFLAGS),
+ GATE(CLK_MIPIDSIPHY_XIN24M, "clk_mipidsiphy_xin24m", "xin24m", 0,
+ RK3562_PMU0_CLKGATE_CON(2), 6, GFLAGS),
+ COMPOSITE_NODIV(CLK_MIPIDSIPHY_REF, "clk_mipidsiphy_ref", clk_mipidsi_ref_p, 0,
+ RK3562_PMU0_CLKSEL_CON(2), 15, 1, MFLAGS,
+ RK3562_PMU0_CLKGATE_CON(2), 7, GFLAGS),
+ GATE(PCLK_PMU0_I2C0, "pclk_pmu0_i2c0", "busclk_pdpmu0", 0,
+ RK3562_PMU0_CLKGATE_CON(2), 8, GFLAGS),
+ COMPOSITE(CLK_PMU0_I2C0, "clk_pmu0_i2c0", mux_200m_xin24m_32k_p, 0,
+ RK3562_PMU0_CLKSEL_CON(3), 14, 2, MFLAGS, 8, 5, DFLAGS,
+ RK3562_PMU0_CLKGATE_CON(2), 9, GFLAGS),
+ /* PD_PMU1 */
+ GATE(PCLK_PMU1_CRU, "pclk_pmu1_cru", "busclk_pdpmu0", CLK_IGNORE_UNUSED,
+ RK3562_PMU1_CLKGATE_CON(0), 0, GFLAGS),
+ GATE(HCLK_PMU1_MEM, "hclk_pmu1_mem", "busclk_pdpmu0", CLK_IGNORE_UNUSED,
+ RK3562_PMU1_CLKGATE_CON(0), 2, GFLAGS),
+ GATE(PCLK_PMU1_UART0, "pclk_pmu1_uart0", "busclk_pdpmu0", 0,
+ RK3562_PMU1_CLKGATE_CON(0), 7, GFLAGS),
+ COMPOSITE_NOMUX(CLK_PMU1_UART0_SRC, "clk_pmu1_uart0_src", "cpll", 0,
+ RK3562_PMU1_CLKSEL_CON(2), 0, 4, DFLAGS,
+ RK3562_PMU1_CLKGATE_CON(0), 8, GFLAGS),
+ COMPOSITE_FRACMUX(CLK_PMU1_UART0_FRAC, "clk_pmu1_uart0_frac", "clk_pmu1_uart0_src", CLK_SET_RATE_PARENT,
+ RK3562_PMU1_CLKSEL_CON(3), 0,
+ RK3562_PMU1_CLKGATE_CON(0), 9, GFLAGS,
+ &rk3562_clk_pmu1_uart0_fracmux),
+ GATE(SCLK_PMU1_UART0, "sclk_pmu1_uart0", "clk_pmu1_uart0", 0,
+ RK3562_PMU1_CLKGATE_CON(0), 10, GFLAGS),
+ GATE(PCLK_PMU1_SPI0, "pclk_pmu1_spi0", "busclk_pdpmu0", 0,
+ RK3562_PMU1_CLKGATE_CON(1), 0, GFLAGS),
+ COMPOSITE(CLK_PMU1_SPI0, "clk_pmu1_spi0", mux_200m_xin24m_32k_p, 0,
+ RK3562_PMU1_CLKSEL_CON(4), 6, 2, MFLAGS, 0, 2, DFLAGS,
+ RK3562_PMU1_CLKGATE_CON(1), 1, GFLAGS),
+ GATE(SCLK_IN_PMU1_SPI0, "sclk_in_pmu1_spi0", "sclk_in_pmu1_spi0_io", 0,
+ RK3562_PMU1_CLKGATE_CON(1), 2, GFLAGS),
+ GATE(PCLK_PMU1_PWM0, "pclk_pmu1_pwm0", "busclk_pdpmu0", 0,
+ RK3562_PMU1_CLKGATE_CON(1), 3, GFLAGS),
+ COMPOSITE(CLK_PMU1_PWM0, "clk_pmu1_pwm0", mux_200m_xin24m_32k_p, 0,
+ RK3562_PMU1_CLKSEL_CON(4), 14, 2, MFLAGS, 8, 2, DFLAGS,
+ RK3562_PMU1_CLKGATE_CON(1), 4, GFLAGS),
+ GATE(CLK_CAPTURE_PMU1_PWM0, "clk_capture_pmu1_pwm0", "xin24m", 0,
+ RK3562_PMU1_CLKGATE_CON(1), 5, GFLAGS),
+ GATE(CLK_PMU1_WIFI, "clk_pmu1_wifi", "xin24m", 0,
+ RK3562_PMU1_CLKGATE_CON(1), 6, GFLAGS),
+ GATE(FCLK_PMU1_CM0_CORE, "fclk_pmu1_cm0_core", "busclk_pdpmu0", 0,
+ RK3562_PMU1_CLKGATE_CON(2), 0, GFLAGS),
+ GATE(CLK_PMU1_CM0_RTC, "clk_pmu1_cm0_rtc", "clk_rtc_32k", 0,
+ RK3562_PMU1_CLKGATE_CON(2), 1, GFLAGS),
+ GATE(PCLK_PMU1_WDTNS, "pclk_pmu1_wdtns", "busclk_pdpmu0", 0,
+ RK3562_PMU1_CLKGATE_CON(2), 3, GFLAGS),
+ GATE(CLK_PMU1_WDTNS, "clk_pmu1_wdtns", "xin24m", 0,
+ RK3562_PMU1_CLKGATE_CON(2), 4, GFLAGS),
+ GATE(PCLK_PMU1_MAILBOX, "pclk_pmu1_mailbox", "busclk_pdpmu0", 0,
+ RK3562_PMU1_CLKGATE_CON(3), 8, GFLAGS),
+
+ /* PD_RGA */
+ COMPOSITE(ACLK_RGA_PRE, "aclk_rga_pre", gpll_cpll_pvtpll_dmyapll_p, 0,
+ RK3562_CLKSEL_CON(32), 6, 2, MFLAGS, 0, 4, DFLAGS,
+ RK3562_CLKGATE_CON(14), 0, GFLAGS),
+ COMPOSITE_NOMUX(HCLK_RGA_PRE, "hclk_rga_pre", "aclk_rga_jdec", 0,
+ RK3562_CLKSEL_CON(32), 8, 3, DFLAGS,
+ RK3562_CLKGATE_CON(14), 1, GFLAGS),
+ GATE(ACLK_RGA, "aclk_rga", "aclk_rga_jdec", 0,
+ RK3562_CLKGATE_CON(14), 6, GFLAGS),
+ GATE(HCLK_RGA, "hclk_rga", "hclk_rga_pre", 0,
+ RK3562_CLKGATE_CON(14), 7, GFLAGS),
+ COMPOSITE(CLK_RGA_CORE, "clk_rga_core", gpll_cpll_pvtpll_dmyapll_p, 0,
+ RK3562_CLKSEL_CON(33), 6, 2, MFLAGS, 0, 4, DFLAGS,
+ RK3562_CLKGATE_CON(14), 8, GFLAGS),
+ GATE(ACLK_JDEC, "aclk_jdec", "aclk_rga_jdec", 0,
+ RK3562_CLKGATE_CON(14), 9, GFLAGS),
+ GATE(HCLK_JDEC, "hclk_jdec", "hclk_rga_pre", 0,
+ RK3562_CLKGATE_CON(14), 10, GFLAGS),
+
+ /* PD_VDPU */
+ COMPOSITE(ACLK_VDPU_PRE, "aclk_vdpu_pre", gpll_cpll_pvtpll_dmyapll_p, 0,
+ RK3562_CLKSEL_CON(22), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3562_CLKGATE_CON(10), 0, GFLAGS),
+ COMPOSITE(CLK_RKVDEC_HEVC_CA, "clk_rkvdec_hevc_ca", gpll_cpll_pvtpll_dmyapll_p, 0,
+ RK3562_CLKSEL_CON(23), 14, 2, MFLAGS, 8, 5, DFLAGS,
+ RK3562_CLKGATE_CON(10), 3, GFLAGS),
+ COMPOSITE_NOMUX(HCLK_VDPU_PRE, "hclk_vdpu_pre", "aclk_vdpu", 0,
+ RK3562_CLKSEL_CON(24), 0, 4, DFLAGS,
+ RK3562_CLKGATE_CON(10), 4, GFLAGS),
+ GATE(ACLK_RKVDEC, "aclk_rkvdec", "aclk_vdpu", 0,
+ RK3562_CLKGATE_CON(10), 7, GFLAGS),
+ GATE(HCLK_RKVDEC, "hclk_rkvdec", "hclk_vdpu_pre", 0,
+ RK3562_CLKGATE_CON(10), 8, GFLAGS),
+
+ /* PD_VEPU */
+ COMPOSITE(CLK_RKVENC_CORE, "clk_rkvenc_core", gpll_cpll_pvtpll_dmyapll_p, 0,
+ RK3562_CLKSEL_CON(20), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3562_CLKGATE_CON(9), 0, GFLAGS),
+ COMPOSITE(ACLK_VEPU_PRE, "aclk_vepu_pre", gpll_cpll_pvtpll_dmyapll_p, 0,
+ RK3562_CLKSEL_CON(20), 14, 2, MFLAGS, 8, 5, DFLAGS,
+ RK3562_CLKGATE_CON(9), 1, GFLAGS),
+ COMPOSITE_NOMUX(HCLK_VEPU_PRE, "hclk_vepu_pre", "aclk_vepu", 0,
+ RK3562_CLKSEL_CON(21), 0, 4, DFLAGS,
+ RK3562_CLKGATE_CON(9), 2, GFLAGS),
+ GATE(ACLK_RKVENC, "aclk_rkvenc", "aclk_vepu", 0,
+ RK3562_CLKGATE_CON(9), 5, GFLAGS),
+ GATE(HCLK_RKVENC, "hclk_rkvenc", "hclk_vepu", 0,
+ RK3562_CLKGATE_CON(9), 6, GFLAGS),
+
+ /* PD_VI */
+ COMPOSITE(ACLK_VI, "aclk_vi", gpll_cpll_pvtpll_dmyapll_p, 0,
+ RK3562_CLKSEL_CON(25), 6, 2, MFLAGS, 0, 4, DFLAGS,
+ RK3562_CLKGATE_CON(11), 0, GFLAGS),
+ COMPOSITE_NOMUX(HCLK_VI, "hclk_vi", "aclk_vi_isp", 0,
+ RK3562_CLKSEL_CON(26), 0, 4, DFLAGS,
+ RK3562_CLKGATE_CON(11), 1, GFLAGS),
+ COMPOSITE_NOMUX(PCLK_VI, "pclk_vi", "aclk_vi_isp", 0,
+ RK3562_CLKSEL_CON(26), 8, 4, DFLAGS,
+ RK3562_CLKGATE_CON(11), 2, GFLAGS),
+ GATE(ACLK_ISP, "aclk_isp", "aclk_vi_isp", 0,
+ RK3562_CLKGATE_CON(11), 6, GFLAGS),
+ GATE(HCLK_ISP, "hclk_isp", "hclk_vi", 0,
+ RK3562_CLKGATE_CON(11), 7, GFLAGS),
+ COMPOSITE(CLK_ISP, "clk_isp", gpll_cpll_pvtpll_dmyapll_p, 0,
+ RK3562_CLKSEL_CON(27), 6, 2, MFLAGS, 0, 4, DFLAGS,
+ RK3562_CLKGATE_CON(11), 8, GFLAGS),
+ GATE(ACLK_VICAP, "aclk_vicap", "aclk_vi_isp", 0,
+ RK3562_CLKGATE_CON(11), 9, GFLAGS),
+ GATE(HCLK_VICAP, "hclk_vicap", "hclk_vi", 0,
+ RK3562_CLKGATE_CON(11), 10, GFLAGS),
+ COMPOSITE(DCLK_VICAP, "dclk_vicap", gpll_cpll_pvtpll_dmyapll_p, 0,
+ RK3562_CLKSEL_CON(27), 14, 2, MFLAGS, 8, 4, DFLAGS,
+ RK3562_CLKGATE_CON(11), 11, GFLAGS),
+ GATE(CSIRX0_CLK_DATA, "csirx0_clk_data", "csirx0_clk_data_io", 0,
+ RK3562_CLKGATE_CON(11), 12, GFLAGS),
+ GATE(CSIRX1_CLK_DATA, "csirx1_clk_data", "csirx1_clk_data_io", 0,
+ RK3562_CLKGATE_CON(11), 13, GFLAGS),
+ GATE(CSIRX2_CLK_DATA, "csirx2_clk_data", "csirx2_clk_data_io", 0,
+ RK3562_CLKGATE_CON(11), 14, GFLAGS),
+ GATE(CSIRX3_CLK_DATA, "csirx3_clk_data", "csirx3_clk_data_io", 0,
+ RK3562_CLKGATE_CON(11), 15, GFLAGS),
+ GATE(PCLK_CSIHOST0, "pclk_csihost0", "pclk_vi", 0,
+ RK3562_CLKGATE_CON(12), 0, GFLAGS),
+ GATE(PCLK_CSIHOST1, "pclk_csihost1", "pclk_vi", 0,
+ RK3562_CLKGATE_CON(12), 1, GFLAGS),
+ GATE(PCLK_CSIHOST2, "pclk_csihost2", "pclk_vi", 0,
+ RK3562_CLKGATE_CON(12), 2, GFLAGS),
+ GATE(PCLK_CSIHOST3, "pclk_csihost3", "pclk_vi", 0,
+ RK3562_CLKGATE_CON(12), 3, GFLAGS),
+ GATE(PCLK_CSIPHY0, "pclk_csiphy0", "pclk_vi", 0,
+ RK3562_CLKGATE_CON(12), 4, GFLAGS),
+ GATE(PCLK_CSIPHY1, "pclk_csiphy1", "pclk_vi", 0,
+ RK3562_CLKGATE_CON(12), 5, GFLAGS),
+
+ /* PD_VO */
+ COMPOSITE(ACLK_VO_PRE, "aclk_vo_pre", gpll_cpll_vpll_dmyhpll_p, 0,
+ RK3562_CLKSEL_CON(28), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3562_CLKGATE_CON(13), 0, GFLAGS),
+ COMPOSITE_NOMUX(HCLK_VO_PRE, "hclk_vo_pre", "aclk_vo", 0,
+ RK3562_CLKSEL_CON(29), 0, 5, DFLAGS,
+ RK3562_CLKGATE_CON(13), 1, GFLAGS),
+ GATE(ACLK_VOP, "aclk_vop", "aclk_vo", 0,
+ RK3562_CLKGATE_CON(13), 6, GFLAGS),
+ GATE(HCLK_VOP, "hclk_vop", "hclk_vo_pre", 0,
+ RK3562_CLKGATE_CON(13), 7, GFLAGS),
+ COMPOSITE(DCLK_VOP, "dclk_vop", gpll_dmyhpll_vpll_apll_p, CLK_SET_RATE_NO_REPARENT,
+ RK3562_CLKSEL_CON(30), 14, 2, MFLAGS, 0, 8, DFLAGS,
+ RK3562_CLKGATE_CON(13), 8, GFLAGS),
+ COMPOSITE(DCLK_VOP1, "dclk_vop1", gpll_dmyhpll_vpll_apll_p, CLK_SET_RATE_NO_REPARENT,
+ RK3562_CLKSEL_CON(31), 14, 2, MFLAGS, 0, 8, DFLAGS,
+ RK3562_CLKGATE_CON(13), 9, GFLAGS),
+};
+
+static void __init rk3562_clk_init(struct device_node *np)
+{
+ struct rockchip_clk_provider *ctx;
+ unsigned long clk_nr_clks;
+ void __iomem *reg_base;
+
+ clk_nr_clks = rockchip_clk_find_max_clk_id(rk3562_clk_branches,
+ ARRAY_SIZE(rk3562_clk_branches)) + 1;
+
+ reg_base = of_iomap(np, 0);
+ if (!reg_base) {
+ pr_err("%s: could not map cru region\n", __func__);
+ return;
+ }
+
+ ctx = rockchip_clk_init(np, reg_base, clk_nr_clks);
+ if (IS_ERR(ctx)) {
+ pr_err("%s: rockchip clk init failed\n", __func__);
+ iounmap(reg_base);
+ return;
+ }
+
+ rockchip_clk_register_plls(ctx, rk3562_pll_clks,
+ ARRAY_SIZE(rk3562_pll_clks),
+ RK3562_GRF_SOC_STATUS0);
+
+ rockchip_clk_register_branches(ctx, rk3562_clk_branches,
+ ARRAY_SIZE(rk3562_clk_branches));
+
+ rk3562_rst_init(np, reg_base);
+
+ rockchip_register_restart_notifier(ctx, RK3562_GLB_SRST_FST, NULL);
+
+ rockchip_clk_of_add_provider(np, ctx);
+}
+
+CLK_OF_DECLARE(rk3562_cru, "rockchip,rk3562-cru", rk3562_clk_init);
+
+struct clk_rk3562_inits {
+ void (*inits)(struct device_node *np);
+};
+
+static const struct clk_rk3562_inits clk_rk3562_cru_init = {
+ .inits = rk3562_clk_init,
+};
+
+static const struct of_device_id clk_rk3562_match_table[] = {
+ {
+ .compatible = "rockchip,rk3562-cru",
+ .data = &clk_rk3562_cru_init,
+ },
+ { }
+};
+
+static int clk_rk3562_probe(struct platform_device *pdev)
+{
+ const struct clk_rk3562_inits *init_data;
+ struct device *dev = &pdev->dev;
+
+ init_data = device_get_match_data(dev);
+ if (!init_data)
+ return -EINVAL;
+
+ if (init_data->inits)
+ init_data->inits(dev->of_node);
+
+ return 0;
+}
+
+static struct platform_driver clk_rk3562_driver = {
+ .probe = clk_rk3562_probe,
+ .driver = {
+ .name = "clk-rk3562",
+ .of_match_table = clk_rk3562_match_table,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver_probe(clk_rk3562_driver, clk_rk3562_probe);
diff --git a/drivers/clk/rockchip/clk-rk3568.c b/drivers/clk/rockchip/clk-rk3568.c
index 53d10b1c627b..97d279399ae8 100644
--- a/drivers/clk/rockchip/clk-rk3568.c
+++ b/drivers/clk/rockchip/clk-rk3568.c
@@ -79,6 +79,7 @@ static struct rockchip_pll_rate_table rk3568_pll_rates[] = {
RK3036_PLL_RATE(200000000, 1, 100, 3, 4, 1, 0),
RK3036_PLL_RATE(148500000, 1, 99, 4, 4, 1, 0),
RK3036_PLL_RATE(135000000, 2, 45, 4, 1, 1, 0),
+ RK3036_PLL_RATE(132000000, 1, 66, 6, 2, 1, 0),
RK3036_PLL_RATE(128000000, 1, 16, 3, 1, 1, 0),
RK3036_PLL_RATE(126400000, 1, 79, 5, 3, 1, 0),
RK3036_PLL_RATE(119000000, 3, 119, 4, 2, 1, 0),
@@ -89,6 +90,7 @@ static struct rockchip_pll_rate_table rk3568_pll_rates[] = {
RK3036_PLL_RATE(96000000, 1, 96, 6, 4, 1, 0),
RK3036_PLL_RATE(78750000, 4, 315, 6, 4, 1, 0),
RK3036_PLL_RATE(74250000, 2, 99, 4, 4, 1, 0),
+ RK3036_PLL_RATE(33300000, 4, 111, 5, 4, 1, 0),
{ /* sentinel */ },
};
@@ -590,7 +592,7 @@ static struct rockchip_clk_branch rk3568_clk_branches[] __initdata = {
RK3568_CLKSEL_CON(9), 6, 2, MFLAGS, 0, 5, DFLAGS,
RK3568_CLKGATE_CON(4), 0, GFLAGS),
MUXGRF(CLK_DDR1X, "clk_ddr1x", clk_ddr1x_p, CLK_SET_RATE_PARENT,
- RK3568_CLKSEL_CON(9), 15, 1, MFLAGS),
+ RK3568_CLKSEL_CON(9), 15, 1, MFLAGS, grf_type_sys),
COMPOSITE_NOMUX(CLK_MSCH, "clk_msch", "clk_ddr1x", CLK_IGNORE_UNUSED,
RK3568_CLKSEL_CON(10), 0, 2, DFLAGS,
@@ -1602,6 +1604,7 @@ static const char *const rk3568_cru_critical_clocks[] __initconst = {
"pclk_php",
"hclk_usb",
"pclk_usb",
+ "hclk_vi",
"hclk_vo",
};
diff --git a/drivers/clk/rockchip/clk-rk3576.c b/drivers/clk/rockchip/clk-rk3576.c
index 595e010341f7..9bc0ef51ef68 100644
--- a/drivers/clk/rockchip/clk-rk3576.c
+++ b/drivers/clk/rockchip/clk-rk3576.c
@@ -10,11 +10,13 @@
#include <linux/platform_device.h>
#include <linux/syscore_ops.h>
#include <linux/mfd/syscon.h>
+#include <linux/slab.h>
#include <dt-bindings/clock/rockchip,rk3576-cru.h>
#include "clk.h"
#define RK3576_GRF_SOC_STATUS0 0x600
#define RK3576_PMU0_GRF_OSC_CON6 0x18
+#define RK3576_VCCIO_IOC_MISC_CON0 0x6400
enum rk3576_plls {
bpll, lpll, vpll, aupll, cpll, gpll, ppll,
@@ -541,6 +543,8 @@ static struct rockchip_clk_branch rk3576_clk_branches[] __initdata = {
RK3576_CLKGATE_CON(5), 14, GFLAGS),
GATE(CLK_OTPC_AUTO_RD_G, "clk_otpc_auto_rd_g", "xin24m", 0,
RK3576_CLKGATE_CON(5), 15, GFLAGS),
+ GATE(CLK_OTP_PHY_G, "clk_otp_phy_g", "xin24m", 0,
+ RK3576_CLKGATE_CON(6), 0, GFLAGS),
COMPOSITE(CLK_MIPI_CAMERAOUT_M0, "clk_mipi_cameraout_m0", mux_24m_spll_gpll_cpll_p, 0,
RK3576_CLKSEL_CON(38), 8, 2, MFLAGS, 0, 8, DFLAGS,
RK3576_CLKGATE_CON(6), 3, GFLAGS),
@@ -1479,6 +1483,14 @@ static struct rockchip_clk_branch rk3576_clk_branches[] __initdata = {
RK3576_CLKGATE_CON(10), 0, GFLAGS),
GATE(CLK_SAI0_MCLKOUT, "clk_sai0_mclkout", "mclk_sai0_8ch", 0,
RK3576_CLKGATE_CON(10), 1, GFLAGS),
+ GATE_GRF(CLK_SAI0_MCLKOUT_TO_IO, "mclk_sai0_to_io", "clk_sai0_mclkout",
+ 0, RK3576_VCCIO_IOC_MISC_CON0, 0, GFLAGS, grf_type_ioc),
+ GATE_GRF(CLK_SAI1_MCLKOUT_TO_IO, "mclk_sai1_to_io", "clk_sai1_mclkout",
+ 0, RK3576_VCCIO_IOC_MISC_CON0, 1, GFLAGS, grf_type_ioc),
+ GATE_GRF(CLK_SAI2_MCLKOUT_TO_IO, "mclk_sai2_to_io", "clk_sai2_mclkout",
+ 0, RK3576_VCCIO_IOC_MISC_CON0, 2, GFLAGS, grf_type_ioc),
+ GATE_GRF(CLK_SAI3_MCLKOUT_TO_IO, "mclk_sai3_to_io", "clk_sai3_mclkout",
+ 0, RK3576_VCCIO_IOC_MISC_CON0, 3, GFLAGS, grf_type_ioc),
/* sdgmac */
COMPOSITE_NODIV(HCLK_SDGMAC_ROOT, "hclk_sdgmac_root", mux_200m_100m_50m_24m_p, 0,
@@ -1676,13 +1688,13 @@ static struct rockchip_clk_branch rk3576_clk_branches[] __initdata = {
/* phy ref */
MUXGRF(CLK_PHY_REF_SRC, "clk_phy_ref_src", clk_phy_ref_src_p, 0,
- RK3576_PMU0_GRF_OSC_CON6, 4, 1, MFLAGS),
+ RK3576_PMU0_GRF_OSC_CON6, 4, 1, MFLAGS, grf_type_pmu0),
MUXGRF(CLK_USBPHY_REF_SRC, "clk_usbphy_ref_src", clk_usbphy_ref_src_p, 0,
- RK3576_PMU0_GRF_OSC_CON6, 2, 1, MFLAGS),
+ RK3576_PMU0_GRF_OSC_CON6, 2, 1, MFLAGS, grf_type_pmu0),
MUXGRF(CLK_CPLL_REF_SRC, "clk_cpll_ref_src", clk_cpll_ref_src_p, 0,
- RK3576_PMU0_GRF_OSC_CON6, 1, 1, MFLAGS),
+ RK3576_PMU0_GRF_OSC_CON6, 1, 1, MFLAGS, grf_type_pmu0),
MUXGRF(CLK_AUPLL_REF_SRC, "clk_aupll_ref_src", clk_aupll_ref_src_p, 0,
- RK3576_PMU0_GRF_OSC_CON6, 0, 1, MFLAGS),
+ RK3576_PMU0_GRF_OSC_CON6, 0, 1, MFLAGS, grf_type_pmu0),
/* secure ns */
COMPOSITE_NODIV(ACLK_SECURE_NS, "aclk_secure_ns", mux_350m_175m_116m_24m_p, CLK_IS_CRITICAL,
@@ -1725,17 +1737,26 @@ static void __init rk3576_clk_init(struct device_node *np)
struct rockchip_clk_provider *ctx;
unsigned long clk_nr_clks;
void __iomem *reg_base;
- struct regmap *grf;
+ struct rockchip_aux_grf *ioc_grf_e;
+ struct rockchip_aux_grf *pmu0_grf_e;
+ struct regmap *ioc_grf;
+ struct regmap *pmu0_grf;
clk_nr_clks = rockchip_clk_find_max_clk_id(rk3576_clk_branches,
ARRAY_SIZE(rk3576_clk_branches)) + 1;
- grf = syscon_regmap_lookup_by_compatible("rockchip,rk3576-pmu0-grf");
- if (IS_ERR(grf)) {
+ pmu0_grf = syscon_regmap_lookup_by_compatible("rockchip,rk3576-pmu0-grf");
+ if (IS_ERR(pmu0_grf)) {
pr_err("%s: could not get PMU0 GRF syscon\n", __func__);
return;
}
+ ioc_grf = syscon_regmap_lookup_by_compatible("rockchip,rk3576-ioc-grf");
+ if (IS_ERR(ioc_grf)) {
+ pr_err("%s: could not get IOC GRF syscon\n", __func__);
+ return;
+ }
+
reg_base = of_iomap(np, 0);
if (!reg_base) {
pr_err("%s: could not map cru region\n", __func__);
@@ -1745,11 +1766,24 @@ static void __init rk3576_clk_init(struct device_node *np)
ctx = rockchip_clk_init(np, reg_base, clk_nr_clks);
if (IS_ERR(ctx)) {
pr_err("%s: rockchip clk init failed\n", __func__);
- iounmap(reg_base);
- return;
+ goto err_unmap;
}
- ctx->grf = grf;
+ pmu0_grf_e = kzalloc(sizeof(*pmu0_grf_e), GFP_KERNEL);
+ if (!pmu0_grf_e)
+ goto err_unmap;
+
+ pmu0_grf_e->grf = pmu0_grf;
+ pmu0_grf_e->type = grf_type_pmu0;
+ hash_add(ctx->aux_grf_table, &pmu0_grf_e->node, grf_type_pmu0);
+
+ ioc_grf_e = kzalloc(sizeof(*ioc_grf_e), GFP_KERNEL);
+ if (!ioc_grf_e)
+ goto err_free_pmu0;
+
+ ioc_grf_e->grf = ioc_grf;
+ ioc_grf_e->type = grf_type_ioc;
+ hash_add(ctx->aux_grf_table, &ioc_grf_e->node, grf_type_ioc);
rockchip_clk_register_plls(ctx, rk3576_pll_clks,
ARRAY_SIZE(rk3576_pll_clks),
@@ -1772,6 +1806,14 @@ static void __init rk3576_clk_init(struct device_node *np)
rockchip_register_restart_notifier(ctx, RK3576_GLB_SRST_FST, NULL);
rockchip_clk_of_add_provider(np, ctx);
+
+ return;
+
+err_free_pmu0:
+ kfree(pmu0_grf_e);
+err_unmap:
+ iounmap(reg_base);
+ return;
}
CLK_OF_DECLARE(rk3576_cru, "rockchip,rk3576-cru", rk3576_clk_init);
diff --git a/drivers/clk/rockchip/clk-rk3588.c b/drivers/clk/rockchip/clk-rk3588.c
index 0ffaf639f807..1694223f4f84 100644
--- a/drivers/clk/rockchip/clk-rk3588.c
+++ b/drivers/clk/rockchip/clk-rk3588.c
@@ -12,28 +12,6 @@
#include <dt-bindings/clock/rockchip,rk3588-cru.h>
#include "clk.h"
-/*
- * Recent Rockchip SoCs have a new hardware block called Native Interface
- * Unit (NIU), which gates clocks to devices behind them. These effectively
- * need two parent clocks.
- *
- * Downstream enables the linked clock via runtime PM whenever the gate is
- * enabled. This implementation uses separate clock nodes for each of the
- * linked gate clocks, which leaks parts of the clock tree into DT.
- *
- * The GATE_LINK macro instead takes the second parent via 'linkname', but
- * ignores the information. Once the clock framework is ready to handle it, the
- * information should be passed on here. But since these clocks are required to
- * access multiple relevant IP blocks, such as PCIe or USB, we mark all linked
- * clocks critical until a better solution is available. This will waste some
- * power, but avoids leaking implementation details into DT or hanging the
- * system.
- */
-#define GATE_LINK(_id, cname, pname, linkedclk, f, o, b, gf) \
- GATE(_id, cname, pname, f, o, b, gf)
-#define RK3588_LINKED_CLK CLK_IS_CRITICAL
-
-
#define RK3588_GRF_SOC_STATUS0 0x600
#define RK3588_PHYREF_ALT_GATE 0xc38
@@ -86,6 +64,7 @@ static struct rockchip_pll_rate_table rk3588_pll_rates[] = {
RK3588_PLL_RATE(1560000000, 2, 260, 1, 0),
RK3588_PLL_RATE(1536000000, 2, 256, 1, 0),
RK3588_PLL_RATE(1512000000, 2, 252, 1, 0),
+ RK3588_PLL_RATE(1500000000, 2, 250, 1, 0),
RK3588_PLL_RATE(1488000000, 2, 248, 1, 0),
RK3588_PLL_RATE(1464000000, 2, 244, 1, 0),
RK3588_PLL_RATE(1440000000, 2, 240, 1, 0),
@@ -266,6 +245,8 @@ static struct rockchip_pll_rate_table rk3588_pll_rates[] = {
}, \
}
+static struct rockchip_clk_provider *early_ctx;
+
static struct rockchip_cpuclk_rate_table rk3588_cpub0clk_rates[] __initdata = {
RK3588_CPUB01CLK_RATE(2496000000, 1),
RK3588_CPUB01CLK_RATE(2400000000, 1),
@@ -694,7 +675,7 @@ static struct rockchip_pll_clock rk3588_pll_clks[] __initdata = {
RK3588_MODE_CON0, 10, 15, 0, rk3588_pll_rates),
};
-static struct rockchip_clk_branch rk3588_clk_branches[] __initdata = {
+static struct rockchip_clk_branch rk3588_early_clk_branches[] __initdata = {
/*
* CRU Clock-Architecture
*/
@@ -792,10 +773,10 @@ static struct rockchip_clk_branch rk3588_clk_branches[] __initdata = {
COMPOSITE(MCLK_GMAC0_OUT, "mclk_gmac0_out", gpll_cpll_p, 0,
RK3588_CLKSEL_CON(15), 7, 1, MFLAGS, 0, 7, DFLAGS,
RK3588_CLKGATE_CON(5), 3, GFLAGS),
- COMPOSITE(REFCLKO25M_ETH0_OUT, "refclko25m_eth0_out", gpll_cpll_p, 0,
+ COMPOSITE(REFCLKO25M_ETH0_OUT, "refclko25m_eth0_out", gpll_cpll_p, CLK_IS_CRITICAL,
RK3588_CLKSEL_CON(15), 15, 1, MFLAGS, 8, 7, DFLAGS,
RK3588_CLKGATE_CON(5), 4, GFLAGS),
- COMPOSITE(REFCLKO25M_ETH1_OUT, "refclko25m_eth1_out", gpll_cpll_p, 0,
+ COMPOSITE(REFCLKO25M_ETH1_OUT, "refclko25m_eth1_out", gpll_cpll_p, CLK_IS_CRITICAL,
RK3588_CLKSEL_CON(16), 7, 1, MFLAGS, 0, 7, DFLAGS,
RK3588_CLKGATE_CON(5), 5, GFLAGS),
COMPOSITE(CLK_CIFOUT_OUT, "clk_cifout_out", gpll_cpll_24m_spll_p, 0,
@@ -1456,7 +1437,7 @@ static struct rockchip_clk_branch rk3588_clk_branches[] __initdata = {
COMPOSITE_NODIV(HCLK_NVM_ROOT, "hclk_nvm_root", mux_200m_100m_50m_24m_p, 0,
RK3588_CLKSEL_CON(77), 0, 2, MFLAGS,
RK3588_CLKGATE_CON(31), 0, GFLAGS),
- COMPOSITE(ACLK_NVM_ROOT, "aclk_nvm_root", gpll_cpll_p, RK3588_LINKED_CLK,
+ COMPOSITE(ACLK_NVM_ROOT, "aclk_nvm_root", gpll_cpll_p, 0,
RK3588_CLKSEL_CON(77), 7, 1, MFLAGS, 2, 5, DFLAGS,
RK3588_CLKGATE_CON(31), 1, GFLAGS),
GATE(ACLK_EMMC, "aclk_emmc", "aclk_nvm_root", 0,
@@ -1685,13 +1666,13 @@ static struct rockchip_clk_branch rk3588_clk_branches[] __initdata = {
RK3588_CLKGATE_CON(42), 9, GFLAGS),
/* vdpu */
- COMPOSITE(ACLK_VDPU_ROOT, "aclk_vdpu_root", gpll_cpll_aupll_p, RK3588_LINKED_CLK,
+ COMPOSITE(ACLK_VDPU_ROOT, "aclk_vdpu_root", gpll_cpll_aupll_p, 0,
RK3588_CLKSEL_CON(98), 5, 2, MFLAGS, 0, 5, DFLAGS,
RK3588_CLKGATE_CON(44), 0, GFLAGS),
COMPOSITE_NODIV(ACLK_VDPU_LOW_ROOT, "aclk_vdpu_low_root", mux_400m_200m_100m_24m_p, 0,
RK3588_CLKSEL_CON(98), 7, 2, MFLAGS,
RK3588_CLKGATE_CON(44), 1, GFLAGS),
- COMPOSITE_NODIV(HCLK_VDPU_ROOT, "hclk_vdpu_root", mux_200m_100m_50m_24m_p, RK3588_LINKED_CLK,
+ COMPOSITE_NODIV(HCLK_VDPU_ROOT, "hclk_vdpu_root", mux_200m_100m_50m_24m_p, 0,
RK3588_CLKSEL_CON(98), 9, 2, MFLAGS,
RK3588_CLKGATE_CON(44), 2, GFLAGS),
COMPOSITE(ACLK_JPEG_DECODER_ROOT, "aclk_jpeg_decoder_root", gpll_cpll_aupll_spll_p, 0,
@@ -1742,9 +1723,9 @@ static struct rockchip_clk_branch rk3588_clk_branches[] __initdata = {
COMPOSITE(ACLK_RKVENC0_ROOT, "aclk_rkvenc0_root", gpll_cpll_npll_p, 0,
RK3588_CLKSEL_CON(102), 7, 2, MFLAGS, 2, 5, DFLAGS,
RK3588_CLKGATE_CON(47), 1, GFLAGS),
- GATE(HCLK_RKVENC0, "hclk_rkvenc0", "hclk_rkvenc0_root", RK3588_LINKED_CLK,
+ GATE(HCLK_RKVENC0, "hclk_rkvenc0", "hclk_rkvenc0_root", 0,
RK3588_CLKGATE_CON(47), 4, GFLAGS),
- GATE(ACLK_RKVENC0, "aclk_rkvenc0", "aclk_rkvenc0_root", RK3588_LINKED_CLK,
+ GATE(ACLK_RKVENC0, "aclk_rkvenc0", "aclk_rkvenc0_root", 0,
RK3588_CLKGATE_CON(47), 5, GFLAGS),
COMPOSITE(CLK_RKVENC0_CORE, "clk_rkvenc0_core", gpll_cpll_aupll_npll_p, 0,
RK3588_CLKSEL_CON(102), 14, 2, MFLAGS, 9, 5, DFLAGS,
@@ -1754,10 +1735,10 @@ static struct rockchip_clk_branch rk3588_clk_branches[] __initdata = {
RK3588_CLKGATE_CON(48), 6, GFLAGS),
/* vi */
- COMPOSITE(ACLK_VI_ROOT, "aclk_vi_root", gpll_cpll_npll_aupll_spll_p, RK3588_LINKED_CLK,
+ COMPOSITE(ACLK_VI_ROOT, "aclk_vi_root", gpll_cpll_npll_aupll_spll_p, 0,
RK3588_CLKSEL_CON(106), 5, 3, MFLAGS, 0, 5, DFLAGS,
RK3588_CLKGATE_CON(49), 0, GFLAGS),
- COMPOSITE_NODIV(HCLK_VI_ROOT, "hclk_vi_root", mux_200m_100m_50m_24m_p, RK3588_LINKED_CLK,
+ COMPOSITE_NODIV(HCLK_VI_ROOT, "hclk_vi_root", mux_200m_100m_50m_24m_p, 0,
RK3588_CLKSEL_CON(106), 8, 2, MFLAGS,
RK3588_CLKGATE_CON(49), 1, GFLAGS),
COMPOSITE_NODIV(PCLK_VI_ROOT, "pclk_vi_root", mux_100m_50m_24m_p, 0,
@@ -1927,10 +1908,10 @@ static struct rockchip_clk_branch rk3588_clk_branches[] __initdata = {
COMPOSITE(ACLK_VOP_ROOT, "aclk_vop_root", gpll_cpll_dmyaupll_npll_spll_p, 0,
RK3588_CLKSEL_CON(110), 5, 3, MFLAGS, 0, 5, DFLAGS,
RK3588_CLKGATE_CON(52), 0, GFLAGS),
- COMPOSITE_NODIV(ACLK_VOP_LOW_ROOT, "aclk_vop_low_root", mux_400m_200m_100m_24m_p, RK3588_LINKED_CLK,
+ COMPOSITE_NODIV(ACLK_VOP_LOW_ROOT, "aclk_vop_low_root", mux_400m_200m_100m_24m_p, 0,
RK3588_CLKSEL_CON(110), 8, 2, MFLAGS,
RK3588_CLKGATE_CON(52), 1, GFLAGS),
- COMPOSITE_NODIV(HCLK_VOP_ROOT, "hclk_vop_root", mux_200m_100m_50m_24m_p, RK3588_LINKED_CLK,
+ COMPOSITE_NODIV(HCLK_VOP_ROOT, "hclk_vop_root", mux_200m_100m_50m_24m_p, 0,
RK3588_CLKSEL_CON(110), 10, 2, MFLAGS,
RK3588_CLKGATE_CON(52), 2, GFLAGS),
COMPOSITE_NODIV(PCLK_VOP_ROOT, "pclk_vop_root", mux_100m_50m_24m_p, 0,
@@ -2428,10 +2409,12 @@ static struct rockchip_clk_branch rk3588_clk_branches[] __initdata = {
RK3588_CLKGATE_CON(68), 5, GFLAGS),
GATE(ACLK_AV1, "aclk_av1", "aclk_av1_pre", 0,
RK3588_CLKGATE_CON(68), 2, GFLAGS),
+};
+static struct rockchip_clk_branch rk3588_clk_branches[] = {
GATE_LINK(ACLK_ISP1_PRE, "aclk_isp1_pre", "aclk_isp1_root", ACLK_VI_ROOT, 0, RK3588_CLKGATE_CON(26), 6, GFLAGS),
GATE_LINK(HCLK_ISP1_PRE, "hclk_isp1_pre", "hclk_isp1_root", HCLK_VI_ROOT, 0, RK3588_CLKGATE_CON(26), 8, GFLAGS),
- GATE_LINK(HCLK_NVM, "hclk_nvm", "hclk_nvm_root", ACLK_NVM_ROOT, RK3588_LINKED_CLK, RK3588_CLKGATE_CON(31), 2, GFLAGS),
+ GATE_LINK(HCLK_NVM, "hclk_nvm", "hclk_nvm_root", ACLK_NVM_ROOT, 0, RK3588_CLKGATE_CON(31), 2, GFLAGS),
GATE_LINK(ACLK_USB, "aclk_usb", "aclk_usb_root", ACLK_VO1USB_TOP_ROOT, 0, RK3588_CLKGATE_CON(42), 2, GFLAGS),
GATE_LINK(HCLK_USB, "hclk_usb", "hclk_usb_root", HCLK_VO1USB_TOP_ROOT, 0, RK3588_CLKGATE_CON(42), 3, GFLAGS),
GATE_LINK(ACLK_JPEG_DECODER_PRE, "aclk_jpeg_decoder_pre", "aclk_jpeg_decoder_root", ACLK_VDPU_ROOT, 0, RK3588_CLKGATE_CON(44), 7, GFLAGS),
@@ -2443,9 +2426,9 @@ static struct rockchip_clk_branch rk3588_clk_branches[] __initdata = {
GATE_LINK(HCLK_RKVDEC1_PRE, "hclk_rkvdec1_pre", "hclk_rkvdec1_root", HCLK_VDPU_ROOT, 0, RK3588_CLKGATE_CON(41), 4, GFLAGS),
GATE_LINK(ACLK_RKVDEC1_PRE, "aclk_rkvdec1_pre", "aclk_rkvdec1_root", ACLK_VDPU_ROOT, 0, RK3588_CLKGATE_CON(41), 5, GFLAGS),
GATE_LINK(ACLK_HDCP0_PRE, "aclk_hdcp0_pre", "aclk_vo0_root", ACLK_VOP_LOW_ROOT, 0, RK3588_CLKGATE_CON(55), 9, GFLAGS),
- GATE_LINK(HCLK_VO0, "hclk_vo0", "hclk_vo0_root", HCLK_VOP_ROOT, RK3588_LINKED_CLK, RK3588_CLKGATE_CON(55), 5, GFLAGS),
+ GATE_LINK(HCLK_VO0, "hclk_vo0", "hclk_vo0_root", HCLK_VOP_ROOT, 0, RK3588_CLKGATE_CON(55), 5, GFLAGS),
GATE_LINK(ACLK_HDCP1_PRE, "aclk_hdcp1_pre", "aclk_hdcp1_root", ACLK_VO1USB_TOP_ROOT, 0, RK3588_CLKGATE_CON(59), 6, GFLAGS),
- GATE_LINK(HCLK_VO1, "hclk_vo1", "hclk_vo1_root", HCLK_VO1USB_TOP_ROOT, RK3588_LINKED_CLK, RK3588_CLKGATE_CON(59), 9, GFLAGS),
+ GATE_LINK(HCLK_VO1, "hclk_vo1", "hclk_vo1_root", HCLK_VO1USB_TOP_ROOT, 0, RK3588_CLKGATE_CON(59), 9, GFLAGS),
GATE_LINK(ACLK_AV1_PRE, "aclk_av1_pre", "aclk_av1_root", ACLK_VDPU_ROOT, 0, RK3588_CLKGATE_CON(68), 1, GFLAGS),
GATE_LINK(PCLK_AV1_PRE, "pclk_av1_pre", "pclk_av1_root", HCLK_VDPU_ROOT, 0, RK3588_CLKGATE_CON(68), 4, GFLAGS),
GATE_LINK(HCLK_SDIO_PRE, "hclk_sdio_pre", "hclk_sdio_root", HCLK_NVM, 0, RK3588_CLKGATE_CON(75), 1, GFLAGS),
@@ -2453,26 +2436,31 @@ static struct rockchip_clk_branch rk3588_clk_branches[] __initdata = {
GATE_LINK(PCLK_VO1GRF, "pclk_vo1grf", "pclk_vo1_root", HCLK_VO1, CLK_IGNORE_UNUSED, RK3588_CLKGATE_CON(59), 12, GFLAGS),
};
-static void __init rk3588_clk_init(struct device_node *np)
+static void __init rk3588_clk_early_init(struct device_node *np)
{
struct rockchip_clk_provider *ctx;
- unsigned long clk_nr_clks;
+ unsigned long clk_nr_clks, max_clk_id1, max_clk_id2;
void __iomem *reg_base;
- clk_nr_clks = rockchip_clk_find_max_clk_id(rk3588_clk_branches,
- ARRAY_SIZE(rk3588_clk_branches)) + 1;
+ max_clk_id1 = rockchip_clk_find_max_clk_id(rk3588_clk_branches,
+ ARRAY_SIZE(rk3588_clk_branches));
+ max_clk_id2 = rockchip_clk_find_max_clk_id(rk3588_early_clk_branches,
+ ARRAY_SIZE(rk3588_early_clk_branches));
+ clk_nr_clks = max(max_clk_id1, max_clk_id2) + 1;
+
reg_base = of_iomap(np, 0);
if (!reg_base) {
pr_err("%s: could not map cru region\n", __func__);
return;
}
- ctx = rockchip_clk_init(np, reg_base, clk_nr_clks);
+ ctx = rockchip_clk_init_early(np, reg_base, clk_nr_clks);
if (IS_ERR(ctx)) {
pr_err("%s: rockchip clk init failed\n", __func__);
iounmap(reg_base);
return;
}
+ early_ctx = ctx;
rockchip_clk_register_plls(ctx, rk3588_pll_clks,
ARRAY_SIZE(rk3588_pll_clks),
@@ -2491,14 +2479,55 @@ static void __init rk3588_clk_init(struct device_node *np)
&rk3588_cpub1clk_data, rk3588_cpub1clk_rates,
ARRAY_SIZE(rk3588_cpub1clk_rates));
- rockchip_clk_register_branches(ctx, rk3588_clk_branches,
- ARRAY_SIZE(rk3588_clk_branches));
+ rockchip_clk_register_branches(ctx, rk3588_early_clk_branches,
+ ARRAY_SIZE(rk3588_early_clk_branches));
+
+ rockchip_clk_of_add_provider(np, ctx);
+}
+CLK_OF_DECLARE_DRIVER(rk3588_cru, "rockchip,rk3588-cru", rk3588_clk_early_init);
+
+static int clk_rk3588_probe(struct platform_device *pdev)
+{
+ struct rockchip_clk_provider *ctx = early_ctx;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+
+ rockchip_clk_register_late_branches(dev, ctx, rk3588_clk_branches,
+ ARRAY_SIZE(rk3588_clk_branches));
- rk3588_rst_init(np, reg_base);
+ rockchip_clk_finalize(ctx);
+ rk3588_rst_init(np, ctx->reg_base);
rockchip_register_restart_notifier(ctx, RK3588_GLB_SRST_FST, NULL);
+ /*
+ * Re-add clock provider, so that the newly added clocks are also
+ * re-parented and get their defaults configured.
+ */
+ of_clk_del_provider(np);
rockchip_clk_of_add_provider(np, ctx);
+
+ return 0;
}
-CLK_OF_DECLARE(rk3588_cru, "rockchip,rk3588-cru", rk3588_clk_init);
+static const struct of_device_id clk_rk3588_match_table[] = {
+ {
+ .compatible = "rockchip,rk3588-cru",
+ },
+ { }
+};
+
+static struct platform_driver clk_rk3588_driver = {
+ .probe = clk_rk3588_probe,
+ .driver = {
+ .name = "clk-rk3588",
+ .of_match_table = clk_rk3588_match_table,
+ .suppress_bind_attrs = true,
+ },
+};
+
+static int __init rockchip_clk_rk3588_drv_register(void)
+{
+ return platform_driver_register(&clk_rk3588_driver);
+}
+core_initcall(rockchip_clk_rk3588_drv_register);
diff --git a/drivers/clk/rockchip/clk-rv1126.c b/drivers/clk/rockchip/clk-rv1126.c
index fc19c5522490..15e7bfe84506 100644
--- a/drivers/clk/rockchip/clk-rv1126.c
+++ b/drivers/clk/rockchip/clk-rv1126.c
@@ -857,7 +857,7 @@ static struct rockchip_clk_branch rv1126_clk_branches[] __initdata = {
RV1126_GMAC_CON, 5, 1, MFLAGS),
MUXGRF(CLK_GMAC_SRC, "clk_gmac_src", mux_clk_gmac_src_p, CLK_SET_RATE_PARENT |
CLK_SET_RATE_NO_REPARENT,
- RV1126_GRF_IOFUNC_CON1, 12, 1, MFLAGS),
+ RV1126_GRF_IOFUNC_CON1, 12, 1, MFLAGS, grf_type_sys),
GATE(CLK_GMAC_REF, "clk_gmac_ref", "clk_gmac_src", 0,
RV1126_CLKGATE_CON(20), 7, GFLAGS),
diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
index 88629a9abc9c..19caf26c991b 100644
--- a/drivers/clk/rockchip/clk.c
+++ b/drivers/clk/rockchip/clk.c
@@ -19,6 +19,7 @@
#include <linux/clk-provider.h>
#include <linux/io.h>
#include <linux/mfd/syscon.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/reboot.h>
@@ -197,12 +198,6 @@ static void rockchip_fractional_approximation(struct clk_hw *hw,
clk_fractional_divider_general_approximation(hw, rate, parent_rate, m, n);
}
-static void rockchip_clk_add_lookup(struct rockchip_clk_provider *ctx,
- struct clk *clk, unsigned int id)
-{
- ctx->clk_data.clks[id] = clk;
-}
-
static struct clk *rockchip_clk_register_frac_branch(
struct rockchip_clk_provider *ctx, const char *name,
const char *const *parent_names, u8 num_parents,
@@ -292,7 +287,7 @@ static struct clk *rockchip_clk_register_frac_branch(
return mux_clk;
}
- rockchip_clk_add_lookup(ctx, mux_clk, child->id);
+ rockchip_clk_set_lookup(ctx, mux_clk, child->id);
/* notifier on the fraction divider to catch rate changes */
if (frac->mux_frac_idx >= 0) {
@@ -359,14 +354,17 @@ static struct clk *rockchip_clk_register_factor_branch(const char *name,
return hw->clk;
}
-struct rockchip_clk_provider *rockchip_clk_init(struct device_node *np,
- void __iomem *base,
- unsigned long nr_clks)
+static struct rockchip_clk_provider *rockchip_clk_init_base(
+ struct device_node *np, void __iomem *base,
+ unsigned long nr_clks, bool has_late_clocks)
{
struct rockchip_clk_provider *ctx;
struct clk **clk_table;
+ struct clk *default_clk_val;
int i;
+ default_clk_val = ERR_PTR(has_late_clocks ? -EPROBE_DEFER : -ENOENT);
+
ctx = kzalloc(sizeof(struct rockchip_clk_provider), GFP_KERNEL);
if (!ctx)
return ERR_PTR(-ENOMEM);
@@ -376,7 +374,7 @@ struct rockchip_clk_provider *rockchip_clk_init(struct device_node *np,
goto err_free;
for (i = 0; i < nr_clks; ++i)
- clk_table[i] = ERR_PTR(-ENOENT);
+ clk_table[i] = default_clk_val;
ctx->reg_base = base;
ctx->clk_data.clks = clk_table;
@@ -384,6 +382,8 @@ struct rockchip_clk_provider *rockchip_clk_init(struct device_node *np,
ctx->cru_node = np;
spin_lock_init(&ctx->lock);
+ hash_init(ctx->aux_grf_table);
+
ctx->grf = syscon_regmap_lookup_by_phandle(ctx->cru_node,
"rockchip,grf");
@@ -393,8 +393,33 @@ err_free:
kfree(ctx);
return ERR_PTR(-ENOMEM);
}
+
+struct rockchip_clk_provider *rockchip_clk_init(struct device_node *np,
+ void __iomem *base,
+ unsigned long nr_clks)
+{
+ return rockchip_clk_init_base(np, base, nr_clks, false);
+}
EXPORT_SYMBOL_GPL(rockchip_clk_init);
+struct rockchip_clk_provider *rockchip_clk_init_early(struct device_node *np,
+ void __iomem *base,
+ unsigned long nr_clks)
+{
+ return rockchip_clk_init_base(np, base, nr_clks, true);
+}
+EXPORT_SYMBOL_GPL(rockchip_clk_init_early);
+
+void rockchip_clk_finalize(struct rockchip_clk_provider *ctx)
+{
+ int i;
+
+ for (i = 0; i < ctx->clk_data.clk_num; ++i)
+ if (ctx->clk_data.clks[i] == ERR_PTR(-EPROBE_DEFER))
+ ctx->clk_data.clks[i] = ERR_PTR(-ENOENT);
+}
+EXPORT_SYMBOL_GPL(rockchip_clk_finalize);
+
void rockchip_clk_of_add_provider(struct device_node *np,
struct rockchip_clk_provider *ctx)
{
@@ -424,7 +449,7 @@ void rockchip_clk_register_plls(struct rockchip_clk_provider *ctx,
continue;
}
- rockchip_clk_add_lookup(ctx, clk, list->id);
+ rockchip_clk_set_lookup(ctx, clk, list->id);
}
}
EXPORT_SYMBOL_GPL(rockchip_clk_register_plls);
@@ -446,10 +471,35 @@ unsigned long rockchip_clk_find_max_clk_id(struct rockchip_clk_branch *list,
}
EXPORT_SYMBOL_GPL(rockchip_clk_find_max_clk_id);
+static struct platform_device *rockchip_clk_register_gate_link(
+ struct device *parent_dev,
+ struct rockchip_clk_provider *ctx,
+ struct rockchip_clk_branch *clkbr)
+{
+ struct rockchip_gate_link_platdata gate_link_pdata = {
+ .ctx = ctx,
+ .clkbr = clkbr,
+ };
+
+ struct platform_device_info pdevinfo = {
+ .parent = parent_dev,
+ .name = "rockchip-gate-link-clk",
+ .id = clkbr->id,
+ .fwnode = dev_fwnode(parent_dev),
+ .of_node_reused = true,
+ .data = &gate_link_pdata,
+ .size_data = sizeof(gate_link_pdata),
+ };
+
+ return platform_device_register_full(&pdevinfo);
+}
+
void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx,
struct rockchip_clk_branch *list,
unsigned int nr_clk)
{
+ struct regmap *grf = ctx->grf;
+ struct rockchip_aux_grf *agrf;
struct clk *clk;
unsigned int idx;
unsigned long flags;
@@ -458,6 +508,19 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx,
flags = list->flags;
clk = NULL;
+ /* for GRF-dependent branches, choose the right grf first */
+ if ((list->branch_type == branch_grf_mux ||
+ list->branch_type == branch_grf_gate ||
+ list->branch_type == branch_grf_mmc) &&
+ list->grf_type != grf_type_sys) {
+ hash_for_each_possible(ctx->aux_grf_table, agrf, node, list->grf_type) {
+ if (agrf->type == list->grf_type) {
+ grf = agrf->grf;
+ break;
+ }
+ }
+ }
+
/* catch simple muxes */
switch (list->branch_type) {
case branch_mux:
@@ -477,10 +540,10 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx,
list->mux_shift, list->mux_width,
list->mux_flags, &ctx->lock);
break;
- case branch_muxgrf:
+ case branch_grf_mux:
clk = rockchip_clk_register_muxgrf(list->name,
list->parent_names, list->num_parents,
- flags, ctx->grf, list->muxdiv_offset,
+ flags, grf, list->muxdiv_offset,
list->mux_shift, list->mux_width,
list->mux_flags);
break;
@@ -527,6 +590,13 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx,
ctx->reg_base + list->gate_offset,
list->gate_shift, list->gate_flags, &ctx->lock);
break;
+ case branch_grf_gate:
+ flags |= CLK_SET_RATE_PARENT;
+ clk = rockchip_clk_register_gate_grf(list->name,
+ list->parent_names[0], flags, grf,
+ list->gate_offset, list->gate_shift,
+ list->gate_flags);
+ break;
case branch_composite:
clk = rockchip_clk_register_branch(list->name,
list->parent_names, list->num_parents,
@@ -544,6 +614,16 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx,
list->name,
list->parent_names, list->num_parents,
ctx->reg_base + list->muxdiv_offset,
+ NULL, 0,
+ list->div_shift
+ );
+ break;
+ case branch_grf_mmc:
+ clk = rockchip_clk_register_mmc(
+ list->name,
+ list->parent_names, list->num_parents,
+ NULL,
+ grf, list->muxdiv_offset,
list->div_shift
);
break;
@@ -571,6 +651,9 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx,
list->div_width, list->div_flags,
ctx->reg_base, &ctx->lock);
break;
+ case branch_linked_gate:
+ /* must be registered late, fall-through for error message */
+ break;
}
/* none of the cases above matched */
@@ -586,11 +669,36 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx,
continue;
}
- rockchip_clk_add_lookup(ctx, clk, list->id);
+ rockchip_clk_set_lookup(ctx, clk, list->id);
}
}
EXPORT_SYMBOL_GPL(rockchip_clk_register_branches);
+void rockchip_clk_register_late_branches(struct device *dev,
+ struct rockchip_clk_provider *ctx,
+ struct rockchip_clk_branch *list,
+ unsigned int nr_clk)
+{
+ unsigned int idx;
+
+ for (idx = 0; idx < nr_clk; idx++, list++) {
+ struct platform_device *pdev = NULL;
+
+ switch (list->branch_type) {
+ case branch_linked_gate:
+ pdev = rockchip_clk_register_gate_link(dev, ctx, list);
+ break;
+ default:
+ dev_err(dev, "unknown clock type %d\n", list->branch_type);
+ break;
+ }
+
+ if (!pdev)
+ dev_err(dev, "failed to register device for clock %s\n", list->name);
+ }
+}
+EXPORT_SYMBOL_GPL(rockchip_clk_register_late_branches);
+
void rockchip_clk_register_armclk(struct rockchip_clk_provider *ctx,
unsigned int lookup_id,
const char *name, const char *const *parent_names,
@@ -610,7 +718,7 @@ void rockchip_clk_register_armclk(struct rockchip_clk_provider *ctx,
return;
}
- rockchip_clk_add_lookup(ctx, clk, lookup_id);
+ rockchip_clk_set_lookup(ctx, clk, lookup_id);
}
EXPORT_SYMBOL_GPL(rockchip_clk_register_armclk);
diff --git a/drivers/clk/rockchip/clk.h b/drivers/clk/rockchip/clk.h
index f1957e1c1178..7c5e74c7a2e2 100644
--- a/drivers/clk/rockchip/clk.h
+++ b/drivers/clk/rockchip/clk.h
@@ -19,6 +19,7 @@
#include <linux/io.h>
#include <linux/clk-provider.h>
+#include <linux/hashtable.h>
struct clk;
@@ -207,6 +208,68 @@ struct clk;
#define RK3399_PMU_CLKGATE_CON(x) ((x) * 0x4 + 0x100)
#define RK3399_PMU_SOFTRST_CON(x) ((x) * 0x4 + 0x110)
+#define RK3528_PMU_CRU_BASE 0x10000
+#define RK3528_PCIE_CRU_BASE 0x20000
+#define RK3528_DDRPHY_CRU_BASE 0x28000
+#define RK3528_PLL_CON(x) RK2928_PLL_CON(x)
+#define RK3528_PCIE_PLL_CON(x) ((x) * 0x4 + RK3528_PCIE_CRU_BASE)
+#define RK3528_DDRPHY_PLL_CON(x) ((x) * 0x4 + RK3528_DDRPHY_CRU_BASE)
+#define RK3528_MODE_CON 0x280
+#define RK3528_CLKSEL_CON(x) ((x) * 0x4 + 0x300)
+#define RK3528_CLKGATE_CON(x) ((x) * 0x4 + 0x800)
+#define RK3528_SOFTRST_CON(x) ((x) * 0x4 + 0xa00)
+#define RK3528_SDMMC_CON(x) ((x) * 0x4 + 0x24)
+#define RK3528_SDIO0_CON(x) ((x) * 0x4 + 0x4)
+#define RK3528_SDIO1_CON(x) ((x) * 0x4 + 0xc)
+#define RK3528_PMU_CLKSEL_CON(x) ((x) * 0x4 + 0x300 + RK3528_PMU_CRU_BASE)
+#define RK3528_PMU_CLKGATE_CON(x) ((x) * 0x4 + 0x800 + RK3528_PMU_CRU_BASE)
+#define RK3528_PCIE_CLKSEL_CON(x) ((x) * 0x4 + 0x300 + RK3528_PCIE_CRU_BASE)
+#define RK3528_PCIE_CLKGATE_CON(x) ((x) * 0x4 + 0x800 + RK3528_PCIE_CRU_BASE)
+#define RK3528_DDRPHY_CLKGATE_CON(x) ((x) * 0x4 + 0x800 + RK3528_DDRPHY_CRU_BASE)
+#define RK3528_DDRPHY_MODE_CON (0x280 + RK3528_DDRPHY_CRU_BASE)
+#define RK3528_GLB_CNT_TH 0xc00
+#define RK3528_GLB_SRST_FST 0xc08
+#define RK3528_GLB_SRST_SND 0xc0c
+
+#define RK3562_PMU0_CRU_BASE 0x10000
+#define RK3562_PMU1_CRU_BASE 0x18000
+#define RK3562_DDR_CRU_BASE 0x20000
+#define RK3562_SUBDDR_CRU_BASE 0x28000
+#define RK3562_PERI_CRU_BASE 0x30000
+
+#define RK3562_PLL_CON(x) RK2928_PLL_CON(x)
+#define RK3562_PMU1_PLL_CON(x) ((x) * 0x4 + RK3562_PMU1_CRU_BASE + 0x40)
+#define RK3562_SUBDDR_PLL_CON(x) ((x) * 0x4 + RK3562_SUBDDR_CRU_BASE + 0x20)
+#define RK3562_MODE_CON 0x600
+#define RK3562_PMU1_MODE_CON (RK3562_PMU1_CRU_BASE + 0x380)
+#define RK3562_SUBDDR_MODE_CON (RK3562_SUBDDR_CRU_BASE + 0x380)
+#define RK3562_CLKSEL_CON(x) ((x) * 0x4 + 0x100)
+#define RK3562_CLKGATE_CON(x) ((x) * 0x4 + 0x300)
+#define RK3562_SOFTRST_CON(x) ((x) * 0x4 + 0x400)
+#define RK3562_DDR_CLKSEL_CON(x) ((x) * 0x4 + RK3562_DDR_CRU_BASE + 0x100)
+#define RK3562_DDR_CLKGATE_CON(x) ((x) * 0x4 + RK3562_DDR_CRU_BASE + 0x180)
+#define RK3562_DDR_SOFTRST_CON(x) ((x) * 0x4 + RK3562_DDR_CRU_BASE + 0x200)
+#define RK3562_SUBDDR_CLKSEL_CON(x) ((x) * 0x4 + RK3562_SUBDDR_CRU_BASE + 0x100)
+#define RK3562_SUBDDR_CLKGATE_CON(x) ((x) * 0x4 + RK3562_SUBDDR_CRU_BASE + 0x180)
+#define RK3562_SUBDDR_SOFTRST_CON(x) ((x) * 0x4 + RK3562_SUBDDR_CRU_BASE + 0x200)
+#define RK3562_PERI_CLKSEL_CON(x) ((x) * 0x4 + RK3562_PERI_CRU_BASE + 0x100)
+#define RK3562_PERI_CLKGATE_CON(x) ((x) * 0x4 + RK3562_PERI_CRU_BASE + 0x300)
+#define RK3562_PERI_SOFTRST_CON(x) ((x) * 0x4 + RK3562_PERI_CRU_BASE + 0x400)
+#define RK3562_PMU0_CLKSEL_CON(x) ((x) * 0x4 + RK3562_PMU0_CRU_BASE + 0x100)
+#define RK3562_PMU0_CLKGATE_CON(x) ((x) * 0x4 + RK3562_PMU0_CRU_BASE + 0x180)
+#define RK3562_PMU0_SOFTRST_CON(x) ((x) * 0x4 + RK3562_PMU0_CRU_BASE + 0x200)
+#define RK3562_PMU1_CLKSEL_CON(x) ((x) * 0x4 + RK3562_PMU1_CRU_BASE + 0x100)
+#define RK3562_PMU1_CLKGATE_CON(x) ((x) * 0x4 + RK3562_PMU1_CRU_BASE + 0x180)
+#define RK3562_PMU1_SOFTRST_CON(x) ((x) * 0x4 + RK3562_PMU1_CRU_BASE + 0x200)
+#define RK3562_GLB_SRST_FST 0x614
+#define RK3562_GLB_SRST_SND 0x618
+#define RK3562_GLB_RST_CON 0x61c
+#define RK3562_GLB_RST_ST 0x620
+#define RK3562_SDMMC0_CON0 0x624
+#define RK3562_SDMMC0_CON1 0x628
+#define RK3562_SDMMC1_CON0 0x62c
+#define RK3562_SDMMC1_CON1 0x630
+
#define RK3568_PLL_CON(x) RK2928_PLL_CON(x)
#define RK3568_MODE_CON0 0xc0
#define RK3568_MISC_CON0 0xc4
@@ -381,12 +444,37 @@ enum rockchip_pll_type {
.k = _k, \
}
+enum rockchip_grf_type {
+ grf_type_sys = 0,
+ grf_type_pmu0,
+ grf_type_pmu1,
+ grf_type_ioc,
+ grf_type_vo,
+ grf_type_vpu,
+};
+
+/* ceil(sqrt(enums in rockchip_grf_type - 1)) */
+#define GRF_HASH_ORDER 2
+
+/**
+ * struct rockchip_aux_grf - entry for the aux_grf_table hashtable
+ * @grf: pointer to the grf this entry references
+ * @type: what type of GRF this is
+ * @node: hlist node
+ */
+struct rockchip_aux_grf {
+ struct regmap *grf;
+ enum rockchip_grf_type type;
+ struct hlist_node node;
+};
+
/**
* struct rockchip_clk_provider - information about clock provider
* @reg_base: virtual address for the register base.
* @clk_data: holds clock related data like clk* and number of clocks.
* @cru_node: device-node of the clock-provider
* @grf: regmap of the general-register-files syscon
+ * @aux_grf_table: hashtable of auxiliary GRF regmaps, indexed by grf_type
* @lock: maintains exclusion between callbacks for a given clock-provider.
*/
struct rockchip_clk_provider {
@@ -394,6 +482,7 @@ struct rockchip_clk_provider {
struct clk_onecell_data clk_data;
struct device_node *cru_node;
struct regmap *grf;
+ DECLARE_HASHTABLE(aux_grf_table, GRF_HASH_ORDER);
spinlock_t lock;
};
@@ -443,7 +532,8 @@ struct rockchip_pll_rate_table {
*
* Flags:
* ROCKCHIP_PLL_SYNC_RATE - check rate parameters to match against the
- * rate_table parameters and ajust them if necessary.
+ * rate_table parameters and adjust them if necessary.
+ * ROCKCHIP_PLL_FIXED_MODE - the pll operates in normal mode only
*/
struct rockchip_pll_clock {
unsigned int id;
@@ -461,6 +551,7 @@ struct rockchip_pll_clock {
};
#define ROCKCHIP_PLL_SYNC_RATE BIT(0)
+#define ROCKCHIP_PLL_FIXED_MODE BIT(1)
#define PLL(_type, _id, _name, _pnames, _flags, _con, _mode, _mshift, \
_lshift, _pflags, _rtable) \
@@ -533,7 +624,9 @@ struct clk *rockchip_clk_register_cpuclk(const char *name,
struct clk *rockchip_clk_register_mmc(const char *name,
const char *const *parent_names, u8 num_parents,
- void __iomem *reg, int shift);
+ void __iomem *reg,
+ struct regmap *grf, int grf_reg,
+ int shift);
/*
* DDRCLK flags, including method of setting the rate
@@ -561,16 +654,24 @@ struct clk *rockchip_clk_register_muxgrf(const char *name,
int flags, struct regmap *grf, int reg,
int shift, int width, int mux_flags);
+struct clk *rockchip_clk_register_gate_grf(const char *name,
+ const char *parent_name, unsigned long flags,
+ struct regmap *regmap, unsigned int reg,
+ unsigned int shift, u8 gate_flags);
+
#define PNAME(x) static const char *const x[] __initconst
enum rockchip_clk_branch_type {
branch_composite,
branch_mux,
- branch_muxgrf,
+ branch_grf_mux,
branch_divider,
branch_fraction_divider,
branch_gate,
+ branch_grf_gate,
+ branch_linked_gate,
branch_mmc,
+ branch_grf_mmc,
branch_inverter,
branch_factor,
branch_ddrclk,
@@ -597,6 +698,8 @@ struct rockchip_clk_branch {
int gate_offset;
u8 gate_shift;
u8 gate_flags;
+ unsigned int linked_clk_id;
+ enum rockchip_grf_type grf_type;
struct rockchip_clk_branch *child;
};
@@ -837,10 +940,10 @@ struct rockchip_clk_branch {
.mux_table = mt, \
}
-#define MUXGRF(_id, cname, pnames, f, o, s, w, mf) \
+#define MUXGRF(_id, cname, pnames, f, o, s, w, mf, gt) \
{ \
.id = _id, \
- .branch_type = branch_muxgrf, \
+ .branch_type = branch_grf_mux, \
.name = cname, \
.parent_names = pnames, \
.num_parents = ARRAY_SIZE(pnames), \
@@ -850,6 +953,7 @@ struct rockchip_clk_branch {
.mux_width = w, \
.mux_flags = mf, \
.gate_offset = -1, \
+ .grf_type = gt, \
}
#define DIV(_id, cname, pname, f, o, s, w, df) \
@@ -895,6 +999,34 @@ struct rockchip_clk_branch {
.gate_flags = gf, \
}
+#define GATE_GRF(_id, cname, pname, f, o, b, gf, gt) \
+ { \
+ .id = _id, \
+ .branch_type = branch_grf_gate, \
+ .name = cname, \
+ .parent_names = (const char *[]){ pname }, \
+ .num_parents = 1, \
+ .flags = f, \
+ .gate_offset = o, \
+ .gate_shift = b, \
+ .gate_flags = gf, \
+ .grf_type = gt, \
+ }
+
+#define GATE_LINK(_id, cname, pname, linkedclk, f, o, b, gf) \
+ { \
+ .id = _id, \
+ .branch_type = branch_linked_gate, \
+ .name = cname, \
+ .parent_names = (const char *[]){ pname }, \
+ .linked_clk_id = linkedclk, \
+ .num_parents = 1, \
+ .flags = f, \
+ .gate_offset = o, \
+ .gate_shift = b, \
+ .gate_flags = gf, \
+ }
+
#define MMC(_id, cname, pname, offset, shift) \
{ \
.id = _id, \
@@ -906,6 +1038,18 @@ struct rockchip_clk_branch {
.div_shift = shift, \
}
+#define MMC_GRF(_id, cname, pname, offset, shift, grftype) \
+ { \
+ .id = _id, \
+ .branch_type = branch_grf_mmc, \
+ .name = cname, \
+ .parent_names = (const char *[]){ pname }, \
+ .num_parents = 1, \
+ .muxdiv_offset = offset, \
+ .div_shift = shift, \
+ .grf_type = grftype, \
+ }
+
#define INVERTER(_id, cname, pname, io, is, if) \
{ \
.id = _id, \
@@ -1022,8 +1166,28 @@ struct rockchip_clk_branch {
#define SGRF_GATE(_id, cname, pname) \
FACTOR(_id, cname, pname, 0, 1, 1)
+static inline struct clk *rockchip_clk_get_lookup(struct rockchip_clk_provider *ctx,
+ unsigned int id)
+{
+ return ctx->clk_data.clks[id];
+}
+
+static inline void rockchip_clk_set_lookup(struct rockchip_clk_provider *ctx,
+ struct clk *clk, unsigned int id)
+{
+ ctx->clk_data.clks[id] = clk;
+}
+
+struct rockchip_gate_link_platdata {
+ struct rockchip_clk_provider *ctx;
+ struct rockchip_clk_branch *clkbr;
+};
+
struct rockchip_clk_provider *rockchip_clk_init(struct device_node *np,
void __iomem *base, unsigned long nr_clks);
+struct rockchip_clk_provider *rockchip_clk_init_early(struct device_node *np,
+ void __iomem *base, unsigned long nr_clks);
+void rockchip_clk_finalize(struct rockchip_clk_provider *ctx);
void rockchip_clk_of_add_provider(struct device_node *np,
struct rockchip_clk_provider *ctx);
unsigned long rockchip_clk_find_max_clk_id(struct rockchip_clk_branch *list,
@@ -1031,6 +1195,10 @@ unsigned long rockchip_clk_find_max_clk_id(struct rockchip_clk_branch *list,
void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx,
struct rockchip_clk_branch *list,
unsigned int nr_clk);
+void rockchip_clk_register_late_branches(struct device *dev,
+ struct rockchip_clk_provider *ctx,
+ struct rockchip_clk_branch *list,
+ unsigned int nr_clk);
void rockchip_clk_register_plls(struct rockchip_clk_provider *ctx,
struct rockchip_pll_clock *pll_list,
unsigned int nr_pll, int grf_lock_offset);
@@ -1078,6 +1246,8 @@ static inline void rockchip_register_softrst(struct device_node *np,
return rockchip_register_softrst_lut(np, NULL, num_regs, base, flags);
}
+void rk3528_rst_init(struct device_node *np, void __iomem *reg_base);
+void rk3562_rst_init(struct device_node *np, void __iomem *reg_base);
void rk3576_rst_init(struct device_node *np, void __iomem *reg_base);
void rk3588_rst_init(struct device_node *np, void __iomem *reg_base);
diff --git a/drivers/clk/rockchip/gate-link.c b/drivers/clk/rockchip/gate-link.c
new file mode 100644
index 000000000000..cd0f7a2d30ab
--- /dev/null
+++ b/drivers/clk/rockchip/gate-link.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2024 Collabora Ltd.
+ * Author: Sebastian Reichel <sebastian.reichel@collabora.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/pm_clock.h>
+#include <linux/pm_runtime.h>
+#include <linux/property.h>
+#include "clk.h"
+
+static int rk_clk_gate_link_register(struct device *dev,
+ struct rockchip_clk_provider *ctx,
+ struct rockchip_clk_branch *clkbr)
+{
+ unsigned long flags = clkbr->flags | CLK_SET_RATE_PARENT;
+ struct clk *clk;
+
+ clk = clk_register_gate(dev, clkbr->name, clkbr->parent_names[0],
+ flags, ctx->reg_base + clkbr->gate_offset,
+ clkbr->gate_shift, clkbr->gate_flags,
+ &ctx->lock);
+
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ rockchip_clk_set_lookup(ctx, clk, clkbr->id);
+ return 0;
+}
+
+static int rk_clk_gate_link_probe(struct platform_device *pdev)
+{
+ struct rockchip_gate_link_platdata *pdata;
+ struct device *dev = &pdev->dev;
+ struct clk *linked_clk;
+ int ret;
+
+ pdata = dev_get_platdata(dev);
+ if (!pdata)
+ return dev_err_probe(dev, -ENODEV, "missing platform data");
+
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return ret;
+
+ ret = devm_pm_clk_create(dev);
+ if (ret)
+ return ret;
+
+ linked_clk = rockchip_clk_get_lookup(pdata->ctx, pdata->clkbr->linked_clk_id);
+ ret = pm_clk_add_clk(dev, linked_clk);
+ if (ret)
+ return ret;
+
+ ret = rk_clk_gate_link_register(dev, pdata->ctx, pdata->clkbr);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ pm_clk_remove_clk(dev, linked_clk);
+ return ret;
+}
+
+static const struct dev_pm_ops rk_clk_gate_link_pm_ops = {
+ SET_RUNTIME_PM_OPS(pm_clk_suspend, pm_clk_resume, NULL)
+};
+
+static struct platform_driver rk_clk_gate_link_driver = {
+ .probe = rk_clk_gate_link_probe,
+ .driver = {
+ .name = "rockchip-gate-link-clk",
+ .pm = &rk_clk_gate_link_pm_ops,
+ .suppress_bind_attrs = true,
+ },
+};
+
+static int __init rk_clk_gate_link_drv_register(void)
+{
+ return platform_driver_register(&rk_clk_gate_link_driver);
+}
+core_initcall(rk_clk_gate_link_drv_register);
diff --git a/drivers/clk/rockchip/rst-rk3528.c b/drivers/clk/rockchip/rst-rk3528.c
new file mode 100644
index 000000000000..b24f2c367929
--- /dev/null
+++ b/drivers/clk/rockchip/rst-rk3528.c
@@ -0,0 +1,306 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2022 Rockchip Electronics Co., Ltd.
+ * Based on Sebastian Reichel's implementation for RK3588
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <dt-bindings/reset/rockchip,rk3528-cru.h>
+#include "clk.h"
+
+/* 0xFF4A0000 + 0x0A00 */
+#define RK3528_CRU_RESET_OFFSET(id, reg, bit) [id] = (0 + reg * 16 + bit)
+
+/* mapping table for reset ID to register offset */
+static const int rk3528_register_offset[] = {
+ /* CRU_SOFTRST_CON03 */
+ RK3528_CRU_RESET_OFFSET(SRST_CORE0_PO, 3, 0),
+ RK3528_CRU_RESET_OFFSET(SRST_CORE1_PO, 3, 1),
+ RK3528_CRU_RESET_OFFSET(SRST_CORE2_PO, 3, 2),
+ RK3528_CRU_RESET_OFFSET(SRST_CORE3_PO, 3, 3),
+ RK3528_CRU_RESET_OFFSET(SRST_CORE0, 3, 4),
+ RK3528_CRU_RESET_OFFSET(SRST_CORE1, 3, 5),
+ RK3528_CRU_RESET_OFFSET(SRST_CORE2, 3, 6),
+ RK3528_CRU_RESET_OFFSET(SRST_CORE3, 3, 7),
+ RK3528_CRU_RESET_OFFSET(SRST_NL2, 3, 8),
+ RK3528_CRU_RESET_OFFSET(SRST_CORE_BIU, 3, 9),
+ RK3528_CRU_RESET_OFFSET(SRST_CORE_CRYPTO, 3, 10),
+
+ /* CRU_SOFTRST_CON05 */
+ RK3528_CRU_RESET_OFFSET(SRST_P_DBG, 5, 13),
+ RK3528_CRU_RESET_OFFSET(SRST_POT_DBG, 5, 14),
+ RK3528_CRU_RESET_OFFSET(SRST_NT_DBG, 5, 15),
+
+ /* CRU_SOFTRST_CON06 */
+ RK3528_CRU_RESET_OFFSET(SRST_P_CORE_GRF, 6, 2),
+ RK3528_CRU_RESET_OFFSET(SRST_P_DAPLITE_BIU, 6, 3),
+ RK3528_CRU_RESET_OFFSET(SRST_P_CPU_BIU, 6, 4),
+ RK3528_CRU_RESET_OFFSET(SRST_REF_PVTPLL_CORE, 6, 7),
+
+ /* CRU_SOFTRST_CON08 */
+ RK3528_CRU_RESET_OFFSET(SRST_A_BUS_VOPGL_BIU, 8, 1),
+ RK3528_CRU_RESET_OFFSET(SRST_A_BUS_H_BIU, 8, 3),
+ RK3528_CRU_RESET_OFFSET(SRST_A_SYSMEM_BIU, 8, 8),
+ RK3528_CRU_RESET_OFFSET(SRST_A_BUS_BIU, 8, 10),
+ RK3528_CRU_RESET_OFFSET(SRST_H_BUS_BIU, 8, 11),
+ RK3528_CRU_RESET_OFFSET(SRST_P_BUS_BIU, 8, 12),
+ RK3528_CRU_RESET_OFFSET(SRST_P_DFT2APB, 8, 13),
+ RK3528_CRU_RESET_OFFSET(SRST_P_BUS_GRF, 8, 15),
+
+ /* CRU_SOFTRST_CON09 */
+ RK3528_CRU_RESET_OFFSET(SRST_A_BUS_M_BIU, 9, 0),
+ RK3528_CRU_RESET_OFFSET(SRST_A_GIC, 9, 1),
+ RK3528_CRU_RESET_OFFSET(SRST_A_SPINLOCK, 9, 2),
+ RK3528_CRU_RESET_OFFSET(SRST_A_DMAC, 9, 4),
+ RK3528_CRU_RESET_OFFSET(SRST_P_TIMER, 9, 5),
+ RK3528_CRU_RESET_OFFSET(SRST_TIMER0, 9, 6),
+ RK3528_CRU_RESET_OFFSET(SRST_TIMER1, 9, 7),
+ RK3528_CRU_RESET_OFFSET(SRST_TIMER2, 9, 8),
+ RK3528_CRU_RESET_OFFSET(SRST_TIMER3, 9, 9),
+ RK3528_CRU_RESET_OFFSET(SRST_TIMER4, 9, 10),
+ RK3528_CRU_RESET_OFFSET(SRST_TIMER5, 9, 11),
+ RK3528_CRU_RESET_OFFSET(SRST_P_JDBCK_DAP, 9, 12),
+ RK3528_CRU_RESET_OFFSET(SRST_JDBCK_DAP, 9, 13),
+ RK3528_CRU_RESET_OFFSET(SRST_P_WDT_NS, 9, 15),
+
+ /* CRU_SOFTRST_CON10 */
+ RK3528_CRU_RESET_OFFSET(SRST_T_WDT_NS, 10, 0),
+ RK3528_CRU_RESET_OFFSET(SRST_H_TRNG_NS, 10, 3),
+ RK3528_CRU_RESET_OFFSET(SRST_P_UART0, 10, 7),
+ RK3528_CRU_RESET_OFFSET(SRST_S_UART0, 10, 8),
+ RK3528_CRU_RESET_OFFSET(SRST_PKA_CRYPTO, 10, 10),
+ RK3528_CRU_RESET_OFFSET(SRST_A_CRYPTO, 10, 11),
+ RK3528_CRU_RESET_OFFSET(SRST_H_CRYPTO, 10, 12),
+ RK3528_CRU_RESET_OFFSET(SRST_P_DMA2DDR, 10, 13),
+ RK3528_CRU_RESET_OFFSET(SRST_A_DMA2DDR, 10, 14),
+
+ /* CRU_SOFTRST_CON11 */
+ RK3528_CRU_RESET_OFFSET(SRST_P_PWM0, 11, 4),
+ RK3528_CRU_RESET_OFFSET(SRST_PWM0, 11, 5),
+ RK3528_CRU_RESET_OFFSET(SRST_P_PWM1, 11, 7),
+ RK3528_CRU_RESET_OFFSET(SRST_PWM1, 11, 8),
+ RK3528_CRU_RESET_OFFSET(SRST_P_SCR, 11, 10),
+ RK3528_CRU_RESET_OFFSET(SRST_A_DCF, 11, 11),
+ RK3528_CRU_RESET_OFFSET(SRST_P_INTMUX, 11, 12),
+
+ /* CRU_SOFTRST_CON25 */
+ RK3528_CRU_RESET_OFFSET(SRST_A_VPU_BIU, 25, 6),
+ RK3528_CRU_RESET_OFFSET(SRST_H_VPU_BIU, 25, 7),
+ RK3528_CRU_RESET_OFFSET(SRST_P_VPU_BIU, 25, 8),
+ RK3528_CRU_RESET_OFFSET(SRST_A_VPU, 25, 9),
+ RK3528_CRU_RESET_OFFSET(SRST_H_VPU, 25, 10),
+ RK3528_CRU_RESET_OFFSET(SRST_P_CRU_PCIE, 25, 11),
+ RK3528_CRU_RESET_OFFSET(SRST_P_VPU_GRF, 25, 12),
+ RK3528_CRU_RESET_OFFSET(SRST_H_SFC, 25, 13),
+ RK3528_CRU_RESET_OFFSET(SRST_S_SFC, 25, 14),
+ RK3528_CRU_RESET_OFFSET(SRST_C_EMMC, 25, 15),
+
+ /* CRU_SOFTRST_CON26 */
+ RK3528_CRU_RESET_OFFSET(SRST_H_EMMC, 26, 0),
+ RK3528_CRU_RESET_OFFSET(SRST_A_EMMC, 26, 1),
+ RK3528_CRU_RESET_OFFSET(SRST_B_EMMC, 26, 2),
+ RK3528_CRU_RESET_OFFSET(SRST_T_EMMC, 26, 3),
+ RK3528_CRU_RESET_OFFSET(SRST_P_GPIO1, 26, 4),
+ RK3528_CRU_RESET_OFFSET(SRST_DB_GPIO1, 26, 5),
+ RK3528_CRU_RESET_OFFSET(SRST_A_VPU_L_BIU, 26, 6),
+ RK3528_CRU_RESET_OFFSET(SRST_P_VPU_IOC, 26, 8),
+ RK3528_CRU_RESET_OFFSET(SRST_H_SAI_I2S0, 26, 9),
+ RK3528_CRU_RESET_OFFSET(SRST_M_SAI_I2S0, 26, 10),
+ RK3528_CRU_RESET_OFFSET(SRST_H_SAI_I2S2, 26, 11),
+ RK3528_CRU_RESET_OFFSET(SRST_M_SAI_I2S2, 26, 12),
+ RK3528_CRU_RESET_OFFSET(SRST_P_ACODEC, 26, 13),
+
+ /* CRU_SOFTRST_CON27 */
+ RK3528_CRU_RESET_OFFSET(SRST_P_GPIO3, 27, 0),
+ RK3528_CRU_RESET_OFFSET(SRST_DB_GPIO3, 27, 1),
+ RK3528_CRU_RESET_OFFSET(SRST_P_SPI1, 27, 4),
+ RK3528_CRU_RESET_OFFSET(SRST_SPI1, 27, 5),
+ RK3528_CRU_RESET_OFFSET(SRST_P_UART2, 27, 7),
+ RK3528_CRU_RESET_OFFSET(SRST_S_UART2, 27, 8),
+ RK3528_CRU_RESET_OFFSET(SRST_P_UART5, 27, 9),
+ RK3528_CRU_RESET_OFFSET(SRST_S_UART5, 27, 10),
+ RK3528_CRU_RESET_OFFSET(SRST_P_UART6, 27, 11),
+ RK3528_CRU_RESET_OFFSET(SRST_S_UART6, 27, 12),
+ RK3528_CRU_RESET_OFFSET(SRST_P_UART7, 27, 13),
+ RK3528_CRU_RESET_OFFSET(SRST_S_UART7, 27, 14),
+ RK3528_CRU_RESET_OFFSET(SRST_P_I2C3, 27, 15),
+
+ /* CRU_SOFTRST_CON28 */
+ RK3528_CRU_RESET_OFFSET(SRST_I2C3, 28, 0),
+ RK3528_CRU_RESET_OFFSET(SRST_P_I2C5, 28, 1),
+ RK3528_CRU_RESET_OFFSET(SRST_I2C5, 28, 2),
+ RK3528_CRU_RESET_OFFSET(SRST_P_I2C6, 28, 3),
+ RK3528_CRU_RESET_OFFSET(SRST_I2C6, 28, 4),
+ RK3528_CRU_RESET_OFFSET(SRST_A_MAC, 28, 5),
+
+ /* CRU_SOFTRST_CON30 */
+ RK3528_CRU_RESET_OFFSET(SRST_P_PCIE, 30, 1),
+ RK3528_CRU_RESET_OFFSET(SRST_PCIE_PIPE_PHY, 30, 2),
+ RK3528_CRU_RESET_OFFSET(SRST_PCIE_POWER_UP, 30, 3),
+ RK3528_CRU_RESET_OFFSET(SRST_P_PCIE_PHY, 30, 6),
+ RK3528_CRU_RESET_OFFSET(SRST_P_PIPE_GRF, 30, 7),
+
+ /* CRU_SOFTRST_CON32 */
+ RK3528_CRU_RESET_OFFSET(SRST_H_SDIO0, 32, 2),
+ RK3528_CRU_RESET_OFFSET(SRST_H_SDIO1, 32, 4),
+ RK3528_CRU_RESET_OFFSET(SRST_TS_0, 32, 5),
+ RK3528_CRU_RESET_OFFSET(SRST_TS_1, 32, 6),
+ RK3528_CRU_RESET_OFFSET(SRST_P_CAN2, 32, 7),
+ RK3528_CRU_RESET_OFFSET(SRST_CAN2, 32, 8),
+ RK3528_CRU_RESET_OFFSET(SRST_P_CAN3, 32, 9),
+ RK3528_CRU_RESET_OFFSET(SRST_CAN3, 32, 10),
+ RK3528_CRU_RESET_OFFSET(SRST_P_SARADC, 32, 11),
+ RK3528_CRU_RESET_OFFSET(SRST_SARADC, 32, 12),
+ RK3528_CRU_RESET_OFFSET(SRST_SARADC_PHY, 32, 13),
+ RK3528_CRU_RESET_OFFSET(SRST_P_TSADC, 32, 14),
+ RK3528_CRU_RESET_OFFSET(SRST_TSADC, 32, 15),
+
+ /* CRU_SOFTRST_CON33 */
+ RK3528_CRU_RESET_OFFSET(SRST_A_USB3OTG, 33, 1),
+
+ /* CRU_SOFTRST_CON34 */
+ RK3528_CRU_RESET_OFFSET(SRST_A_GPU_BIU, 34, 3),
+ RK3528_CRU_RESET_OFFSET(SRST_P_GPU_BIU, 34, 5),
+ RK3528_CRU_RESET_OFFSET(SRST_A_GPU, 34, 8),
+ RK3528_CRU_RESET_OFFSET(SRST_REF_PVTPLL_GPU, 34, 9),
+
+ /* CRU_SOFTRST_CON36 */
+ RK3528_CRU_RESET_OFFSET(SRST_H_RKVENC_BIU, 36, 3),
+ RK3528_CRU_RESET_OFFSET(SRST_A_RKVENC_BIU, 36, 4),
+ RK3528_CRU_RESET_OFFSET(SRST_P_RKVENC_BIU, 36, 5),
+ RK3528_CRU_RESET_OFFSET(SRST_H_RKVENC, 36, 6),
+ RK3528_CRU_RESET_OFFSET(SRST_A_RKVENC, 36, 7),
+ RK3528_CRU_RESET_OFFSET(SRST_CORE_RKVENC, 36, 8),
+ RK3528_CRU_RESET_OFFSET(SRST_H_SAI_I2S1, 36, 9),
+ RK3528_CRU_RESET_OFFSET(SRST_M_SAI_I2S1, 36, 10),
+ RK3528_CRU_RESET_OFFSET(SRST_P_I2C1, 36, 11),
+ RK3528_CRU_RESET_OFFSET(SRST_I2C1, 36, 12),
+ RK3528_CRU_RESET_OFFSET(SRST_P_I2C0, 36, 13),
+ RK3528_CRU_RESET_OFFSET(SRST_I2C0, 36, 14),
+
+ /* CRU_SOFTRST_CON37 */
+ RK3528_CRU_RESET_OFFSET(SRST_P_SPI0, 37, 2),
+ RK3528_CRU_RESET_OFFSET(SRST_SPI0, 37, 3),
+ RK3528_CRU_RESET_OFFSET(SRST_P_GPIO4, 37, 8),
+ RK3528_CRU_RESET_OFFSET(SRST_DB_GPIO4, 37, 9),
+ RK3528_CRU_RESET_OFFSET(SRST_P_RKVENC_IOC, 37, 10),
+ RK3528_CRU_RESET_OFFSET(SRST_H_SPDIF, 37, 14),
+ RK3528_CRU_RESET_OFFSET(SRST_M_SPDIF, 37, 15),
+
+ /* CRU_SOFTRST_CON38 */
+ RK3528_CRU_RESET_OFFSET(SRST_H_PDM, 38, 0),
+ RK3528_CRU_RESET_OFFSET(SRST_M_PDM, 38, 1),
+ RK3528_CRU_RESET_OFFSET(SRST_P_UART1, 38, 2),
+ RK3528_CRU_RESET_OFFSET(SRST_S_UART1, 38, 3),
+ RK3528_CRU_RESET_OFFSET(SRST_P_UART3, 38, 4),
+ RK3528_CRU_RESET_OFFSET(SRST_S_UART3, 38, 5),
+ RK3528_CRU_RESET_OFFSET(SRST_P_RKVENC_GRF, 38, 6),
+ RK3528_CRU_RESET_OFFSET(SRST_P_CAN0, 38, 7),
+ RK3528_CRU_RESET_OFFSET(SRST_CAN0, 38, 8),
+ RK3528_CRU_RESET_OFFSET(SRST_P_CAN1, 38, 9),
+ RK3528_CRU_RESET_OFFSET(SRST_CAN1, 38, 10),
+
+ /* CRU_SOFTRST_CON39 */
+ RK3528_CRU_RESET_OFFSET(SRST_A_VO_BIU, 39, 3),
+ RK3528_CRU_RESET_OFFSET(SRST_H_VO_BIU, 39, 4),
+ RK3528_CRU_RESET_OFFSET(SRST_P_VO_BIU, 39, 5),
+ RK3528_CRU_RESET_OFFSET(SRST_H_RGA2E, 39, 7),
+ RK3528_CRU_RESET_OFFSET(SRST_A_RGA2E, 39, 8),
+ RK3528_CRU_RESET_OFFSET(SRST_CORE_RGA2E, 39, 9),
+ RK3528_CRU_RESET_OFFSET(SRST_H_VDPP, 39, 10),
+ RK3528_CRU_RESET_OFFSET(SRST_A_VDPP, 39, 11),
+ RK3528_CRU_RESET_OFFSET(SRST_CORE_VDPP, 39, 12),
+ RK3528_CRU_RESET_OFFSET(SRST_P_VO_GRF, 39, 13),
+ RK3528_CRU_RESET_OFFSET(SRST_P_CRU, 39, 15),
+
+ /* CRU_SOFTRST_CON40 */
+ RK3528_CRU_RESET_OFFSET(SRST_A_VOP_BIU, 40, 1),
+ RK3528_CRU_RESET_OFFSET(SRST_H_VOP, 40, 2),
+ RK3528_CRU_RESET_OFFSET(SRST_D_VOP0, 40, 3),
+ RK3528_CRU_RESET_OFFSET(SRST_D_VOP1, 40, 4),
+ RK3528_CRU_RESET_OFFSET(SRST_A_VOP, 40, 5),
+ RK3528_CRU_RESET_OFFSET(SRST_P_HDMI, 40, 6),
+ RK3528_CRU_RESET_OFFSET(SRST_HDMI, 40, 7),
+ RK3528_CRU_RESET_OFFSET(SRST_P_HDMIPHY, 40, 14),
+ RK3528_CRU_RESET_OFFSET(SRST_H_HDCP_KEY, 40, 15),
+
+ /* CRU_SOFTRST_CON41 */
+ RK3528_CRU_RESET_OFFSET(SRST_A_HDCP, 41, 0),
+ RK3528_CRU_RESET_OFFSET(SRST_H_HDCP, 41, 1),
+ RK3528_CRU_RESET_OFFSET(SRST_P_HDCP, 41, 2),
+ RK3528_CRU_RESET_OFFSET(SRST_H_CVBS, 41, 3),
+ RK3528_CRU_RESET_OFFSET(SRST_D_CVBS_VOP, 41, 4),
+ RK3528_CRU_RESET_OFFSET(SRST_D_4X_CVBS_VOP, 41, 5),
+ RK3528_CRU_RESET_OFFSET(SRST_A_JPEG_DECODER, 41, 6),
+ RK3528_CRU_RESET_OFFSET(SRST_H_JPEG_DECODER, 41, 7),
+ RK3528_CRU_RESET_OFFSET(SRST_A_VO_L_BIU, 41, 9),
+ RK3528_CRU_RESET_OFFSET(SRST_A_MAC_VO, 41, 10),
+
+ /* CRU_SOFTRST_CON42 */
+ RK3528_CRU_RESET_OFFSET(SRST_A_JPEG_BIU, 42, 0),
+ RK3528_CRU_RESET_OFFSET(SRST_H_SAI_I2S3, 42, 1),
+ RK3528_CRU_RESET_OFFSET(SRST_M_SAI_I2S3, 42, 2),
+ RK3528_CRU_RESET_OFFSET(SRST_MACPHY, 42, 3),
+ RK3528_CRU_RESET_OFFSET(SRST_P_VCDCPHY, 42, 4),
+ RK3528_CRU_RESET_OFFSET(SRST_P_GPIO2, 42, 5),
+ RK3528_CRU_RESET_OFFSET(SRST_DB_GPIO2, 42, 6),
+ RK3528_CRU_RESET_OFFSET(SRST_P_VO_IOC, 42, 7),
+ RK3528_CRU_RESET_OFFSET(SRST_H_SDMMC0, 42, 9),
+ RK3528_CRU_RESET_OFFSET(SRST_P_OTPC_NS, 42, 11),
+ RK3528_CRU_RESET_OFFSET(SRST_SBPI_OTPC_NS, 42, 12),
+ RK3528_CRU_RESET_OFFSET(SRST_USER_OTPC_NS, 42, 13),
+
+ /* CRU_SOFTRST_CON43 */
+ RK3528_CRU_RESET_OFFSET(SRST_HDMIHDP0, 43, 2),
+ RK3528_CRU_RESET_OFFSET(SRST_H_USBHOST, 43, 3),
+ RK3528_CRU_RESET_OFFSET(SRST_H_USBHOST_ARB, 43, 4),
+ RK3528_CRU_RESET_OFFSET(SRST_HOST_UTMI, 43, 6),
+ RK3528_CRU_RESET_OFFSET(SRST_P_UART4, 43, 7),
+ RK3528_CRU_RESET_OFFSET(SRST_S_UART4, 43, 8),
+ RK3528_CRU_RESET_OFFSET(SRST_P_I2C4, 43, 9),
+ RK3528_CRU_RESET_OFFSET(SRST_I2C4, 43, 10),
+ RK3528_CRU_RESET_OFFSET(SRST_P_I2C7, 43, 11),
+ RK3528_CRU_RESET_OFFSET(SRST_I2C7, 43, 12),
+ RK3528_CRU_RESET_OFFSET(SRST_P_USBPHY, 43, 13),
+ RK3528_CRU_RESET_OFFSET(SRST_USBPHY_POR, 43, 14),
+ RK3528_CRU_RESET_OFFSET(SRST_USBPHY_OTG, 43, 15),
+
+ /* CRU_SOFTRST_CON44 */
+ RK3528_CRU_RESET_OFFSET(SRST_USBPHY_HOST, 44, 0),
+ RK3528_CRU_RESET_OFFSET(SRST_P_DDRPHY_CRU, 44, 4),
+ RK3528_CRU_RESET_OFFSET(SRST_H_RKVDEC_BIU, 44, 6),
+ RK3528_CRU_RESET_OFFSET(SRST_A_RKVDEC_BIU, 44, 7),
+ RK3528_CRU_RESET_OFFSET(SRST_A_RKVDEC, 44, 8),
+ RK3528_CRU_RESET_OFFSET(SRST_H_RKVDEC, 44, 9),
+ RK3528_CRU_RESET_OFFSET(SRST_HEVC_CA_RKVDEC, 44, 11),
+ RK3528_CRU_RESET_OFFSET(SRST_REF_PVTPLL_RKVDEC, 44, 12),
+
+ /* CRU_SOFTRST_CON45 */
+ RK3528_CRU_RESET_OFFSET(SRST_P_DDR_BIU, 45, 1),
+ RK3528_CRU_RESET_OFFSET(SRST_P_DDRC, 45, 2),
+ RK3528_CRU_RESET_OFFSET(SRST_P_DDRMON, 45, 3),
+ RK3528_CRU_RESET_OFFSET(SRST_TIMER_DDRMON, 45, 4),
+ RK3528_CRU_RESET_OFFSET(SRST_P_MSCH_BIU, 45, 5),
+ RK3528_CRU_RESET_OFFSET(SRST_P_DDR_GRF, 45, 6),
+ RK3528_CRU_RESET_OFFSET(SRST_P_DDR_HWLP, 45, 8),
+ RK3528_CRU_RESET_OFFSET(SRST_P_DDRPHY, 45, 9),
+ RK3528_CRU_RESET_OFFSET(SRST_MSCH_BIU, 45, 10),
+ RK3528_CRU_RESET_OFFSET(SRST_A_DDR_UPCTL, 45, 11),
+ RK3528_CRU_RESET_OFFSET(SRST_DDR_UPCTL, 45, 12),
+ RK3528_CRU_RESET_OFFSET(SRST_DDRMON, 45, 13),
+ RK3528_CRU_RESET_OFFSET(SRST_A_DDR_SCRAMBLE, 45, 14),
+ RK3528_CRU_RESET_OFFSET(SRST_A_SPLIT, 45, 15),
+
+ /* CRU_SOFTRST_CON46 */
+ RK3528_CRU_RESET_OFFSET(SRST_DDR_PHY, 46, 0),
+};
+
+void rk3528_rst_init(struct device_node *np, void __iomem *reg_base)
+{
+ rockchip_register_softrst_lut(np,
+ rk3528_register_offset,
+ ARRAY_SIZE(rk3528_register_offset),
+ reg_base + RK3528_SOFTRST_CON(0),
+ ROCKCHIP_SOFTRST_HIWORD_MASK);
+}
diff --git a/drivers/clk/rockchip/rst-rk3562.c b/drivers/clk/rockchip/rst-rk3562.c
new file mode 100644
index 000000000000..a3854eaef3be
--- /dev/null
+++ b/drivers/clk/rockchip/rst-rk3562.c
@@ -0,0 +1,429 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2024 Rockchip Electronics Co., Ltd.
+ * Copyright (c) 2024 Collabora Ltd.
+ * Author: Detlev Casanova <detlev.casanova@collabora.com>
+ * Based on Sebastien Reichel's implementation for RK3588
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <dt-bindings/reset/rockchip,rk3562-cru.h>
+#include "clk.h"
+
+/* 0xff100000 + 0x0A00 */
+#define RK3562_CRU_RESET_OFFSET(id, reg, bit) [id] = (0 + reg * 16 + bit)
+/* 0xff110000 + 0x0A00 */
+#define RK3562_PMU0CRU_RESET_OFFSET(id, reg, bit) [id] = (0x10000*4 + reg * 16 + bit)
+/* 0xff118000 + 0x0A00 */
+#define RK3562_PMU1CRU_RESET_OFFSET(id, reg, bit) [id] = (0x18000*4 + reg * 16 + bit)
+/* 0xff120000 + 0x0A00 */
+#define RK3562_DDRCRU_RESET_OFFSET(id, reg, bit) [id] = (0x20000*4 + reg * 16 + bit)
+/* 0xff128000 + 0x0A00 */
+#define RK3562_SUBDDRCRU_RESET_OFFSET(id, reg, bit) [id] = (0x28000*4 + reg * 16 + bit)
+/* 0xff130000 + 0x0A00 */
+#define RK3562_PERICRU_RESET_OFFSET(id, reg, bit) [id] = (0x30000*4 + reg * 16 + bit)
+
+/* mapping table for reset ID to register offset */
+static const int rk3562_register_offset[] = {
+ /* SOFTRST_CON01 */
+ RK3562_CRU_RESET_OFFSET(SRST_A_TOP_BIU, 1, 0),
+ RK3562_CRU_RESET_OFFSET(SRST_A_TOP_VIO_BIU, 1, 1),
+ RK3562_CRU_RESET_OFFSET(SRST_REF_PVTPLL_LOGIC, 1, 2),
+
+ /* SOFTRST_CON03 */
+ RK3562_CRU_RESET_OFFSET(SRST_NCOREPORESET0, 3, 0),
+ RK3562_CRU_RESET_OFFSET(SRST_NCOREPORESET1, 3, 1),
+ RK3562_CRU_RESET_OFFSET(SRST_NCOREPORESET2, 3, 2),
+ RK3562_CRU_RESET_OFFSET(SRST_NCOREPORESET3, 3, 3),
+ RK3562_CRU_RESET_OFFSET(SRST_NCORESET0, 3, 4),
+ RK3562_CRU_RESET_OFFSET(SRST_NCORESET1, 3, 5),
+ RK3562_CRU_RESET_OFFSET(SRST_NCORESET2, 3, 6),
+ RK3562_CRU_RESET_OFFSET(SRST_NCORESET3, 3, 7),
+ RK3562_CRU_RESET_OFFSET(SRST_NL2RESET, 3, 8),
+
+ /* SOFTRST_CON04 */
+ RK3562_CRU_RESET_OFFSET(SRST_DAP, 4, 9),
+ RK3562_CRU_RESET_OFFSET(SRST_P_DBG_DAPLITE, 4, 10),
+ RK3562_CRU_RESET_OFFSET(SRST_REF_PVTPLL_CORE, 4, 13),
+
+ /* SOFTRST_CON05 */
+ RK3562_CRU_RESET_OFFSET(SRST_A_CORE_BIU, 5, 0),
+ RK3562_CRU_RESET_OFFSET(SRST_P_CORE_BIU, 5, 1),
+ RK3562_CRU_RESET_OFFSET(SRST_H_CORE_BIU, 5, 2),
+
+ /* SOFTRST_CON06 */
+ RK3562_CRU_RESET_OFFSET(SRST_A_NPU_BIU, 6, 2),
+ RK3562_CRU_RESET_OFFSET(SRST_H_NPU_BIU, 6, 3),
+ RK3562_CRU_RESET_OFFSET(SRST_A_RKNN, 6, 4),
+ RK3562_CRU_RESET_OFFSET(SRST_H_RKNN, 6, 5),
+ RK3562_CRU_RESET_OFFSET(SRST_REF_PVTPLL_NPU, 6, 6),
+
+ /* SOFTRST_CON08 */
+ RK3562_CRU_RESET_OFFSET(SRST_A_GPU_BIU, 8, 3),
+ RK3562_CRU_RESET_OFFSET(SRST_GPU, 8, 4),
+ RK3562_CRU_RESET_OFFSET(SRST_REF_PVTPLL_GPU, 8, 5),
+ RK3562_CRU_RESET_OFFSET(SRST_GPU_BRG_BIU, 8, 8),
+
+ /* SOFTRST_CON09 */
+ RK3562_CRU_RESET_OFFSET(SRST_RKVENC_CORE, 9, 0),
+ RK3562_CRU_RESET_OFFSET(SRST_A_VEPU_BIU, 9, 3),
+ RK3562_CRU_RESET_OFFSET(SRST_H_VEPU_BIU, 9, 4),
+ RK3562_CRU_RESET_OFFSET(SRST_A_RKVENC, 9, 5),
+ RK3562_CRU_RESET_OFFSET(SRST_H_RKVENC, 9, 6),
+
+ /* SOFTRST_CON10 */
+ RK3562_CRU_RESET_OFFSET(SRST_RKVDEC_HEVC_CA, 10, 2),
+ RK3562_CRU_RESET_OFFSET(SRST_A_VDPU_BIU, 10, 5),
+ RK3562_CRU_RESET_OFFSET(SRST_H_VDPU_BIU, 10, 6),
+ RK3562_CRU_RESET_OFFSET(SRST_A_RKVDEC, 10, 7),
+ RK3562_CRU_RESET_OFFSET(SRST_H_RKVDEC, 10, 8),
+
+ /* SOFTRST_CON11 */
+ RK3562_CRU_RESET_OFFSET(SRST_A_VI_BIU, 11, 3),
+ RK3562_CRU_RESET_OFFSET(SRST_H_VI_BIU, 11, 4),
+ RK3562_CRU_RESET_OFFSET(SRST_P_VI_BIU, 11, 5),
+ RK3562_CRU_RESET_OFFSET(SRST_ISP, 11, 8),
+ RK3562_CRU_RESET_OFFSET(SRST_A_VICAP, 11, 9),
+ RK3562_CRU_RESET_OFFSET(SRST_H_VICAP, 11, 10),
+ RK3562_CRU_RESET_OFFSET(SRST_D_VICAP, 11, 11),
+ RK3562_CRU_RESET_OFFSET(SRST_I0_VICAP, 11, 12),
+ RK3562_CRU_RESET_OFFSET(SRST_I1_VICAP, 11, 13),
+ RK3562_CRU_RESET_OFFSET(SRST_I2_VICAP, 11, 14),
+ RK3562_CRU_RESET_OFFSET(SRST_I3_VICAP, 11, 15),
+
+ /* SOFTRST_CON12 */
+ RK3562_CRU_RESET_OFFSET(SRST_P_CSIHOST0, 12, 0),
+ RK3562_CRU_RESET_OFFSET(SRST_P_CSIHOST1, 12, 1),
+ RK3562_CRU_RESET_OFFSET(SRST_P_CSIHOST2, 12, 2),
+ RK3562_CRU_RESET_OFFSET(SRST_P_CSIHOST3, 12, 3),
+ RK3562_CRU_RESET_OFFSET(SRST_P_CSIPHY0, 12, 4),
+ RK3562_CRU_RESET_OFFSET(SRST_P_CSIPHY1, 12, 5),
+
+ /* SOFTRST_CON13 */
+ RK3562_CRU_RESET_OFFSET(SRST_A_VO_BIU, 13, 3),
+ RK3562_CRU_RESET_OFFSET(SRST_H_VO_BIU, 13, 4),
+ RK3562_CRU_RESET_OFFSET(SRST_A_VOP, 13, 6),
+ RK3562_CRU_RESET_OFFSET(SRST_H_VOP, 13, 7),
+ RK3562_CRU_RESET_OFFSET(SRST_D_VOP, 13, 8),
+ RK3562_CRU_RESET_OFFSET(SRST_D_VOP1, 13, 9),
+
+ /* SOFTRST_CON14 */
+ RK3562_CRU_RESET_OFFSET(SRST_A_RGA_BIU, 14, 3),
+ RK3562_CRU_RESET_OFFSET(SRST_H_RGA_BIU, 14, 4),
+ RK3562_CRU_RESET_OFFSET(SRST_A_RGA, 14, 6),
+ RK3562_CRU_RESET_OFFSET(SRST_H_RGA, 14, 7),
+ RK3562_CRU_RESET_OFFSET(SRST_RGA_CORE, 14, 8),
+ RK3562_CRU_RESET_OFFSET(SRST_A_JDEC, 14, 9),
+ RK3562_CRU_RESET_OFFSET(SRST_H_JDEC, 14, 10),
+
+ /* SOFTRST_CON15 */
+ RK3562_CRU_RESET_OFFSET(SRST_B_EBK_BIU, 15, 2),
+ RK3562_CRU_RESET_OFFSET(SRST_P_EBK_BIU, 15, 3),
+ RK3562_CRU_RESET_OFFSET(SRST_AHB2AXI_EBC, 15, 4),
+ RK3562_CRU_RESET_OFFSET(SRST_H_EBC, 15, 5),
+ RK3562_CRU_RESET_OFFSET(SRST_D_EBC, 15, 6),
+ RK3562_CRU_RESET_OFFSET(SRST_H_EINK, 15, 7),
+ RK3562_CRU_RESET_OFFSET(SRST_P_EINK, 15, 8),
+
+ /* SOFTRST_CON16 */
+ RK3562_CRU_RESET_OFFSET(SRST_P_PHP_BIU, 16, 2),
+ RK3562_CRU_RESET_OFFSET(SRST_A_PHP_BIU, 16, 3),
+ RK3562_CRU_RESET_OFFSET(SRST_P_PCIE20, 16, 7),
+ RK3562_CRU_RESET_OFFSET(SRST_PCIE20_POWERUP, 16, 8),
+ RK3562_CRU_RESET_OFFSET(SRST_USB3OTG, 16, 10),
+
+ /* SOFTRST_CON17 */
+ RK3562_CRU_RESET_OFFSET(SRST_PIPEPHY, 17, 3),
+
+ /* SOFTRST_CON18 */
+ RK3562_CRU_RESET_OFFSET(SRST_A_BUS_BIU, 18, 3),
+ RK3562_CRU_RESET_OFFSET(SRST_H_BUS_BIU, 18, 4),
+ RK3562_CRU_RESET_OFFSET(SRST_P_BUS_BIU, 18, 5),
+
+ /* SOFTRST_CON19 */
+ RK3562_CRU_RESET_OFFSET(SRST_P_I2C1, 19, 0),
+ RK3562_CRU_RESET_OFFSET(SRST_P_I2C2, 19, 1),
+ RK3562_CRU_RESET_OFFSET(SRST_P_I2C3, 19, 2),
+ RK3562_CRU_RESET_OFFSET(SRST_P_I2C4, 19, 3),
+ RK3562_CRU_RESET_OFFSET(SRST_P_I2C5, 19, 4),
+ RK3562_CRU_RESET_OFFSET(SRST_I2C1, 19, 6),
+ RK3562_CRU_RESET_OFFSET(SRST_I2C2, 19, 7),
+ RK3562_CRU_RESET_OFFSET(SRST_I2C3, 19, 8),
+ RK3562_CRU_RESET_OFFSET(SRST_I2C4, 19, 9),
+ RK3562_CRU_RESET_OFFSET(SRST_I2C5, 19, 10),
+
+ /* SOFTRST_CON20 */
+ RK3562_CRU_RESET_OFFSET(SRST_BUS_GPIO3, 20, 5),
+ RK3562_CRU_RESET_OFFSET(SRST_BUS_GPIO4, 20, 6),
+
+ /* SOFTRST_CON21 */
+ RK3562_CRU_RESET_OFFSET(SRST_P_TIMER, 21, 0),
+ RK3562_CRU_RESET_OFFSET(SRST_TIMER0, 21, 1),
+ RK3562_CRU_RESET_OFFSET(SRST_TIMER1, 21, 2),
+ RK3562_CRU_RESET_OFFSET(SRST_TIMER2, 21, 3),
+ RK3562_CRU_RESET_OFFSET(SRST_TIMER3, 21, 4),
+ RK3562_CRU_RESET_OFFSET(SRST_TIMER4, 21, 5),
+ RK3562_CRU_RESET_OFFSET(SRST_TIMER5, 21, 6),
+ RK3562_CRU_RESET_OFFSET(SRST_P_STIMER, 21, 7),
+ RK3562_CRU_RESET_OFFSET(SRST_STIMER0, 21, 8),
+ RK3562_CRU_RESET_OFFSET(SRST_STIMER1, 21, 9),
+
+ /* SOFTRST_CON22 */
+ RK3562_CRU_RESET_OFFSET(SRST_P_WDTNS, 22, 0),
+ RK3562_CRU_RESET_OFFSET(SRST_WDTNS, 22, 1),
+ RK3562_CRU_RESET_OFFSET(SRST_P_GRF, 22, 2),
+ RK3562_CRU_RESET_OFFSET(SRST_P_SGRF, 22, 3),
+ RK3562_CRU_RESET_OFFSET(SRST_P_MAILBOX, 22, 4),
+ RK3562_CRU_RESET_OFFSET(SRST_P_INTC, 22, 5),
+ RK3562_CRU_RESET_OFFSET(SRST_A_BUS_GIC400, 22, 6),
+ RK3562_CRU_RESET_OFFSET(SRST_A_BUS_GIC400_DEBUG, 22, 7),
+
+ /* SOFTRST_CON23 */
+ RK3562_CRU_RESET_OFFSET(SRST_A_BUS_SPINLOCK, 23, 0),
+ RK3562_CRU_RESET_OFFSET(SRST_A_DCF, 23, 1),
+ RK3562_CRU_RESET_OFFSET(SRST_P_DCF, 23, 2),
+ RK3562_CRU_RESET_OFFSET(SRST_F_BUS_CM0_CORE, 23, 3),
+ RK3562_CRU_RESET_OFFSET(SRST_T_BUS_CM0_JTAG, 23, 5),
+ RK3562_CRU_RESET_OFFSET(SRST_H_ICACHE, 23, 8),
+ RK3562_CRU_RESET_OFFSET(SRST_H_DCACHE, 23, 9),
+
+ /* SOFTRST_CON24 */
+ RK3562_CRU_RESET_OFFSET(SRST_P_TSADC, 24, 0),
+ RK3562_CRU_RESET_OFFSET(SRST_TSADC, 24, 1),
+ RK3562_CRU_RESET_OFFSET(SRST_TSADCPHY, 24, 2),
+ RK3562_CRU_RESET_OFFSET(SRST_P_DFT2APB, 24, 4),
+
+ /* SOFTRST_CON25 */
+ RK3562_CRU_RESET_OFFSET(SRST_A_GMAC, 25, 0),
+ RK3562_CRU_RESET_OFFSET(SRST_P_APB2ASB_VCCIO156, 25, 1),
+ RK3562_CRU_RESET_OFFSET(SRST_P_DSIPHY, 25, 5),
+ RK3562_CRU_RESET_OFFSET(SRST_P_DSITX, 25, 8),
+ RK3562_CRU_RESET_OFFSET(SRST_P_CPU_EMA_DET, 25, 9),
+ RK3562_CRU_RESET_OFFSET(SRST_P_HASH, 25, 10),
+ RK3562_CRU_RESET_OFFSET(SRST_P_TOPCRU, 25, 11),
+
+ /* SOFTRST_CON26 */
+ RK3562_CRU_RESET_OFFSET(SRST_P_ASB2APB_VCCIO156, 26, 0),
+ RK3562_CRU_RESET_OFFSET(SRST_P_IOC_VCCIO156, 26, 1),
+ RK3562_CRU_RESET_OFFSET(SRST_P_GPIO3_VCCIO156, 26, 2),
+ RK3562_CRU_RESET_OFFSET(SRST_P_GPIO4_VCCIO156, 26, 3),
+ RK3562_CRU_RESET_OFFSET(SRST_P_SARADC_VCCIO156, 26, 4),
+ RK3562_CRU_RESET_OFFSET(SRST_SARADC_VCCIO156, 26, 5),
+ RK3562_CRU_RESET_OFFSET(SRST_SARADC_VCCIO156_PHY, 26, 6),
+
+ /* SOFTRST_CON27 */
+ RK3562_CRU_RESET_OFFSET(SRST_A_MAC100, 27, 1),
+
+ /* PMU0_SOFTRST_CON00 */
+ RK3562_PMU0CRU_RESET_OFFSET(SRST_P_PMU0_CRU, 0, 0),
+ RK3562_PMU0CRU_RESET_OFFSET(SRST_P_PMU0_PMU, 0, 1),
+ RK3562_PMU0CRU_RESET_OFFSET(SRST_PMU0_PMU, 0, 2),
+ RK3562_PMU0CRU_RESET_OFFSET(SRST_P_PMU0_HP_TIMER, 0, 3),
+ RK3562_PMU0CRU_RESET_OFFSET(SRST_PMU0_HP_TIMER, 0, 4),
+ RK3562_PMU0CRU_RESET_OFFSET(SRST_PMU0_32K_HP_TIMER, 0, 5),
+ RK3562_PMU0CRU_RESET_OFFSET(SRST_P_PMU0_PVTM, 0, 6),
+ RK3562_PMU0CRU_RESET_OFFSET(SRST_PMU0_PVTM, 0, 7),
+ RK3562_PMU0CRU_RESET_OFFSET(SRST_P_IOC_PMUIO, 0, 8),
+ RK3562_PMU0CRU_RESET_OFFSET(SRST_P_PMU0_GPIO0, 0, 9),
+ RK3562_PMU0CRU_RESET_OFFSET(SRST_PMU0_GPIO0, 0, 10),
+ RK3562_PMU0CRU_RESET_OFFSET(SRST_P_PMU0_GRF, 0, 11),
+ RK3562_PMU0CRU_RESET_OFFSET(SRST_P_PMU0_SGRF, 0, 12),
+
+ /* PMU0_SOFTRST_CON01 */
+ RK3562_PMU0CRU_RESET_OFFSET(SRST_DDR_FAIL_SAFE, 1, 0),
+ RK3562_PMU0CRU_RESET_OFFSET(SRST_P_PMU0_SCRKEYGEN, 1, 1),
+
+ /* PMU0_SOFTRST_CON02 */
+ RK3562_PMU0CRU_RESET_OFFSET(SRST_P_PMU0_I2C0, 2, 8),
+ RK3562_PMU0CRU_RESET_OFFSET(SRST_PMU0_I2C0, 2, 9),
+
+ /* PMU1_SOFTRST_CON00 */
+ RK3562_PMU1CRU_RESET_OFFSET(SRST_P_PMU1_CRU, 0, 0),
+ RK3562_PMU1CRU_RESET_OFFSET(SRST_H_PMU1_MEM, 0, 2),
+ RK3562_PMU1CRU_RESET_OFFSET(SRST_H_PMU1_BIU, 0, 3),
+ RK3562_PMU1CRU_RESET_OFFSET(SRST_P_PMU1_BIU, 0, 4),
+ RK3562_PMU1CRU_RESET_OFFSET(SRST_P_PMU1_UART0, 0, 7),
+ RK3562_PMU1CRU_RESET_OFFSET(SRST_S_PMU1_UART0, 0, 10),
+
+ /* PMU1_SOFTRST_CON01 */
+ RK3562_PMU1CRU_RESET_OFFSET(SRST_P_PMU1_SPI0, 1, 0),
+ RK3562_PMU1CRU_RESET_OFFSET(SRST_PMU1_SPI0, 1, 1),
+ RK3562_PMU1CRU_RESET_OFFSET(SRST_P_PMU1_PWM0, 1, 3),
+ RK3562_PMU1CRU_RESET_OFFSET(SRST_PMU1_PWM0, 1, 4),
+
+ /* PMU1_SOFTRST_CON02 */
+ RK3562_PMU1CRU_RESET_OFFSET(SRST_F_PMU1_CM0_CORE, 2, 0),
+ RK3562_PMU1CRU_RESET_OFFSET(SRST_T_PMU1_CM0_JTAG, 2, 2),
+ RK3562_PMU1CRU_RESET_OFFSET(SRST_P_PMU1_WDTNS, 2, 3),
+ RK3562_PMU1CRU_RESET_OFFSET(SRST_PMU1_WDTNS, 2, 4),
+ RK3562_PMU1CRU_RESET_OFFSET(SRST_PMU1_MAILBOX, 2, 8),
+
+ /* DDR_SOFTRST_CON00 */
+ RK3562_DDRCRU_RESET_OFFSET(SRST_MSCH_BRG_BIU, 0, 4),
+ RK3562_DDRCRU_RESET_OFFSET(SRST_P_MSCH_BIU, 0, 5),
+ RK3562_DDRCRU_RESET_OFFSET(SRST_P_DDR_HWLP, 0, 6),
+ RK3562_DDRCRU_RESET_OFFSET(SRST_P_DDR_PHY, 0, 8),
+ RK3562_DDRCRU_RESET_OFFSET(SRST_P_DDR_DFICTL, 0, 9),
+ RK3562_DDRCRU_RESET_OFFSET(SRST_P_DDR_DMA2DDR, 0, 10),
+
+ /* DDR_SOFTRST_CON01 */
+ RK3562_DDRCRU_RESET_OFFSET(SRST_P_DDR_MON, 1, 0),
+ RK3562_DDRCRU_RESET_OFFSET(SRST_TM_DDR_MON, 1, 1),
+ RK3562_DDRCRU_RESET_OFFSET(SRST_P_DDR_GRF, 1, 2),
+ RK3562_DDRCRU_RESET_OFFSET(SRST_P_DDR_CRU, 1, 3),
+ RK3562_DDRCRU_RESET_OFFSET(SRST_P_SUBDDR_CRU, 1, 4),
+
+ /* SUBDDR_SOFTRST_CON00 */
+ RK3562_SUBDDRCRU_RESET_OFFSET(SRST_MSCH_BIU, 0, 1),
+ RK3562_SUBDDRCRU_RESET_OFFSET(SRST_DDR_PHY, 0, 4),
+ RK3562_SUBDDRCRU_RESET_OFFSET(SRST_DDR_DFICTL, 0, 5),
+ RK3562_SUBDDRCRU_RESET_OFFSET(SRST_DDR_SCRAMBLE, 0, 6),
+ RK3562_SUBDDRCRU_RESET_OFFSET(SRST_DDR_MON, 0, 7),
+ RK3562_SUBDDRCRU_RESET_OFFSET(SRST_A_DDR_SPLIT, 0, 8),
+ RK3562_SUBDDRCRU_RESET_OFFSET(SRST_DDR_DMA2DDR, 0, 9),
+
+ /* PERI_SOFTRST_CON01 */
+ RK3562_PERICRU_RESET_OFFSET(SRST_A_PERI_BIU, 1, 3),
+ RK3562_PERICRU_RESET_OFFSET(SRST_H_PERI_BIU, 1, 4),
+ RK3562_PERICRU_RESET_OFFSET(SRST_P_PERI_BIU, 1, 5),
+ RK3562_PERICRU_RESET_OFFSET(SRST_P_PERICRU, 1, 6),
+
+ /* PERI_SOFTRST_CON02 */
+ RK3562_PERICRU_RESET_OFFSET(SRST_H_SAI0_8CH, 2, 0),
+ RK3562_PERICRU_RESET_OFFSET(SRST_M_SAI0_8CH, 2, 3),
+ RK3562_PERICRU_RESET_OFFSET(SRST_H_SAI1_8CH, 2, 5),
+ RK3562_PERICRU_RESET_OFFSET(SRST_M_SAI1_8CH, 2, 8),
+ RK3562_PERICRU_RESET_OFFSET(SRST_H_SAI2_2CH, 2, 10),
+ RK3562_PERICRU_RESET_OFFSET(SRST_M_SAI2_2CH, 2, 13),
+
+ /* PERI_SOFTRST_CON03 */
+ RK3562_PERICRU_RESET_OFFSET(SRST_H_DSM, 3, 1),
+ RK3562_PERICRU_RESET_OFFSET(SRST_DSM, 3, 2),
+ RK3562_PERICRU_RESET_OFFSET(SRST_H_PDM, 3, 4),
+ RK3562_PERICRU_RESET_OFFSET(SRST_M_PDM, 3, 5),
+ RK3562_PERICRU_RESET_OFFSET(SRST_H_SPDIF, 3, 8),
+ RK3562_PERICRU_RESET_OFFSET(SRST_M_SPDIF, 3, 11),
+
+ /* PERI_SOFTRST_CON04 */
+ RK3562_PERICRU_RESET_OFFSET(SRST_H_SDMMC0, 4, 0),
+ RK3562_PERICRU_RESET_OFFSET(SRST_H_SDMMC1, 4, 2),
+ RK3562_PERICRU_RESET_OFFSET(SRST_H_EMMC, 4, 8),
+ RK3562_PERICRU_RESET_OFFSET(SRST_A_EMMC, 4, 9),
+ RK3562_PERICRU_RESET_OFFSET(SRST_C_EMMC, 4, 10),
+ RK3562_PERICRU_RESET_OFFSET(SRST_B_EMMC, 4, 11),
+ RK3562_PERICRU_RESET_OFFSET(SRST_T_EMMC, 4, 12),
+ RK3562_PERICRU_RESET_OFFSET(SRST_S_SFC, 4, 13),
+ RK3562_PERICRU_RESET_OFFSET(SRST_H_SFC, 4, 14),
+
+ /* PERI_SOFTRST_CON05 */
+ RK3562_PERICRU_RESET_OFFSET(SRST_H_USB2HOST, 5, 0),
+ RK3562_PERICRU_RESET_OFFSET(SRST_H_USB2HOST_ARB, 5, 1),
+ RK3562_PERICRU_RESET_OFFSET(SRST_USB2HOST_UTMI, 5, 2),
+
+ /* PERI_SOFTRST_CON06 */
+ RK3562_PERICRU_RESET_OFFSET(SRST_P_SPI1, 6, 0),
+ RK3562_PERICRU_RESET_OFFSET(SRST_SPI1, 6, 1),
+ RK3562_PERICRU_RESET_OFFSET(SRST_P_SPI2, 6, 3),
+ RK3562_PERICRU_RESET_OFFSET(SRST_SPI2, 6, 4),
+
+ /* PERI_SOFTRST_CON07 */
+ RK3562_PERICRU_RESET_OFFSET(SRST_P_UART1, 7, 0),
+ RK3562_PERICRU_RESET_OFFSET(SRST_P_UART2, 7, 1),
+ RK3562_PERICRU_RESET_OFFSET(SRST_P_UART3, 7, 2),
+ RK3562_PERICRU_RESET_OFFSET(SRST_P_UART4, 7, 3),
+ RK3562_PERICRU_RESET_OFFSET(SRST_P_UART5, 7, 4),
+ RK3562_PERICRU_RESET_OFFSET(SRST_P_UART6, 7, 5),
+ RK3562_PERICRU_RESET_OFFSET(SRST_P_UART7, 7, 6),
+ RK3562_PERICRU_RESET_OFFSET(SRST_P_UART8, 7, 7),
+ RK3562_PERICRU_RESET_OFFSET(SRST_P_UART9, 7, 8),
+ RK3562_PERICRU_RESET_OFFSET(SRST_S_UART1, 7, 11),
+ RK3562_PERICRU_RESET_OFFSET(SRST_S_UART2, 7, 14),
+
+ /* PERI_SOFTRST_CON08 */
+ RK3562_PERICRU_RESET_OFFSET(SRST_S_UART3, 8, 1),
+ RK3562_PERICRU_RESET_OFFSET(SRST_S_UART4, 8, 4),
+ RK3562_PERICRU_RESET_OFFSET(SRST_S_UART5, 8, 7),
+ RK3562_PERICRU_RESET_OFFSET(SRST_S_UART6, 8, 10),
+ RK3562_PERICRU_RESET_OFFSET(SRST_S_UART7, 8, 13),
+
+ /* PERI_SOFTRST_CON09 */
+ RK3562_PERICRU_RESET_OFFSET(SRST_S_UART8, 9, 0),
+ RK3562_PERICRU_RESET_OFFSET(SRST_S_UART9, 9, 3),
+
+ /* PERI_SOFTRST_CON10 */
+ RK3562_PERICRU_RESET_OFFSET(SRST_P_PWM1_PERI, 10, 0),
+ RK3562_PERICRU_RESET_OFFSET(SRST_PWM1_PERI, 10, 1),
+ RK3562_PERICRU_RESET_OFFSET(SRST_P_PWM2_PERI, 10, 3),
+ RK3562_PERICRU_RESET_OFFSET(SRST_PWM2_PERI, 10, 4),
+ RK3562_PERICRU_RESET_OFFSET(SRST_P_PWM3_PERI, 10, 6),
+ RK3562_PERICRU_RESET_OFFSET(SRST_PWM3_PERI, 10, 7),
+
+ /* PERI_SOFTRST_CON11 */
+ RK3562_PERICRU_RESET_OFFSET(SRST_P_CAN0, 11, 0),
+ RK3562_PERICRU_RESET_OFFSET(SRST_CAN0, 11, 1),
+ RK3562_PERICRU_RESET_OFFSET(SRST_P_CAN1, 11, 2),
+ RK3562_PERICRU_RESET_OFFSET(SRST_CAN1, 11, 3),
+
+ /* PERI_SOFTRST_CON12 */
+ RK3562_PERICRU_RESET_OFFSET(SRST_A_CRYPTO, 12, 0),
+ RK3562_PERICRU_RESET_OFFSET(SRST_H_CRYPTO, 12, 1),
+ RK3562_PERICRU_RESET_OFFSET(SRST_P_CRYPTO, 12, 2),
+ RK3562_PERICRU_RESET_OFFSET(SRST_CORE_CRYPTO, 12, 3),
+ RK3562_PERICRU_RESET_OFFSET(SRST_PKA_CRYPTO, 12, 4),
+ RK3562_PERICRU_RESET_OFFSET(SRST_H_KLAD, 12, 5),
+ RK3562_PERICRU_RESET_OFFSET(SRST_P_KEY_READER, 12, 6),
+ RK3562_PERICRU_RESET_OFFSET(SRST_H_RK_RNG_NS, 12, 7),
+ RK3562_PERICRU_RESET_OFFSET(SRST_H_RK_RNG_S, 12, 8),
+ RK3562_PERICRU_RESET_OFFSET(SRST_H_TRNG_NS, 12, 9),
+ RK3562_PERICRU_RESET_OFFSET(SRST_H_TRNG_S, 12, 10),
+ RK3562_PERICRU_RESET_OFFSET(SRST_H_CRYPTO_S, 12, 11),
+
+ /* PERI_SOFTRST_CON13 */
+ RK3562_PERICRU_RESET_OFFSET(SRST_P_PERI_WDT, 13, 0),
+ RK3562_PERICRU_RESET_OFFSET(SRST_T_PERI_WDT, 13, 1),
+ RK3562_PERICRU_RESET_OFFSET(SRST_A_SYSMEM, 13, 2),
+ RK3562_PERICRU_RESET_OFFSET(SRST_H_BOOTROM, 13, 3),
+ RK3562_PERICRU_RESET_OFFSET(SRST_P_PERI_GRF, 13, 4),
+ RK3562_PERICRU_RESET_OFFSET(SRST_A_DMAC, 13, 5),
+ RK3562_PERICRU_RESET_OFFSET(SRST_A_RKDMAC, 13, 6),
+
+ /* PERI_SOFTRST_CON14 */
+ RK3562_PERICRU_RESET_OFFSET(SRST_P_OTPC_NS, 14, 0),
+ RK3562_PERICRU_RESET_OFFSET(SRST_SBPI_OTPC_NS, 14, 1),
+ RK3562_PERICRU_RESET_OFFSET(SRST_USER_OTPC_NS, 14, 2),
+ RK3562_PERICRU_RESET_OFFSET(SRST_P_OTPC_S, 14, 3),
+ RK3562_PERICRU_RESET_OFFSET(SRST_SBPI_OTPC_S, 14, 4),
+ RK3562_PERICRU_RESET_OFFSET(SRST_USER_OTPC_S, 14, 5),
+ RK3562_PERICRU_RESET_OFFSET(SRST_OTPC_ARB, 14, 6),
+ RK3562_PERICRU_RESET_OFFSET(SRST_P_OTPPHY, 14, 7),
+ RK3562_PERICRU_RESET_OFFSET(SRST_OTP_NPOR, 14, 8),
+
+ /* PERI_SOFTRST_CON15 */
+ RK3562_PERICRU_RESET_OFFSET(SRST_P_USB2PHY, 15, 0),
+ RK3562_PERICRU_RESET_OFFSET(SRST_USB2PHY_POR, 15, 4),
+ RK3562_PERICRU_RESET_OFFSET(SRST_USB2PHY_OTG, 15, 5),
+ RK3562_PERICRU_RESET_OFFSET(SRST_USB2PHY_HOST, 15, 6),
+ RK3562_PERICRU_RESET_OFFSET(SRST_P_PIPEPHY, 15, 7),
+
+ /* PERI_SOFTRST_CON16 */
+ RK3562_PERICRU_RESET_OFFSET(SRST_P_SARADC, 16, 4),
+ RK3562_PERICRU_RESET_OFFSET(SRST_SARADC, 16, 5),
+ RK3562_PERICRU_RESET_OFFSET(SRST_SARADC_PHY, 16, 6),
+ RK3562_PERICRU_RESET_OFFSET(SRST_P_IOC_VCCIO234, 16, 12),
+
+ /* PERI_SOFTRST_CON17 */
+ RK3562_PERICRU_RESET_OFFSET(SRST_P_PERI_GPIO1, 17, 0),
+ RK3562_PERICRU_RESET_OFFSET(SRST_P_PERI_GPIO2, 17, 1),
+ RK3562_PERICRU_RESET_OFFSET(SRST_PERI_GPIO1, 17, 2),
+ RK3562_PERICRU_RESET_OFFSET(SRST_PERI_GPIO2, 17, 3),
+};
+
+void rk3562_rst_init(struct device_node *np, void __iomem *reg_base)
+{
+ rockchip_register_softrst_lut(np,
+ rk3562_register_offset,
+ ARRAY_SIZE(rk3562_register_offset),
+ reg_base + RK3562_SOFTRST_CON(0),
+ ROCKCHIP_SOFTRST_HIWORD_MASK);
+}
diff --git a/drivers/clk/samsung/Makefile b/drivers/clk/samsung/Makefile
index f1ba48758c78..ef464f434740 100644
--- a/drivers/clk/samsung/Makefile
+++ b/drivers/clk/samsung/Makefile
@@ -13,13 +13,18 @@ obj-$(CONFIG_EXYNOS_5260_COMMON_CLK) += clk-exynos5260.o
obj-$(CONFIG_EXYNOS_5410_COMMON_CLK) += clk-exynos5410.o
obj-$(CONFIG_EXYNOS_5420_COMMON_CLK) += clk-exynos5420.o
obj-$(CONFIG_EXYNOS_5420_COMMON_CLK) += clk-exynos5-subcmu.o
+obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-artpec8.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos5433.o
obj-$(CONFIG_EXYNOS_AUDSS_CLK_CON) += clk-exynos-audss.o
obj-$(CONFIG_EXYNOS_CLKOUT) += clk-exynos-clkout.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos-arm64.o
+obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos2200.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos7.o
+obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos7870.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos7885.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos850.o
+obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos8895.o
+obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos990.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynosautov9.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynosautov920.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-gs101.o
diff --git a/drivers/clk/samsung/clk-artpec8.c b/drivers/clk/samsung/clk-artpec8.c
new file mode 100644
index 000000000000..0ea7c8b58674
--- /dev/null
+++ b/drivers/clk/samsung/clk-artpec8.c
@@ -0,0 +1,1044 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 Samsung Electronics Co., Ltd.
+ * https://www.samsung.com
+ * Copyright (c) 2025 Axis Communications AB.
+ * https://www.axis.com
+ *
+ * Common Clock Framework support for ARTPEC-8 SoC.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+#include <dt-bindings/clock/axis,artpec8-clk.h>
+
+#include "clk.h"
+#include "clk-exynos-arm64.h"
+
+/* NOTE: Must be equal to the last clock ID increased by one */
+#define CMU_CMU_NR_CLK (CLK_DOUT_CMU_VPP_CORE + 1)
+#define CMU_BUS_NR_CLK (CLK_DOUT_BUS_PCLK + 1)
+#define CMU_CORE_NR_CLK (CLK_DOUT_CORE_PCLK + 1)
+#define CMU_CPUCL_NR_CLK (CLK_GOUT_CPUCL_CSSYS_IPCLKPORT_ATCLK + 1)
+#define CMU_FSYS_NR_CLK (CLK_GOUT_FSYS_QSPI_IPCLKPORT_SSI_CLK + 1)
+#define CMU_IMEM_NR_CLK (CLK_GOUT_IMEM_PCLK_TMU0_APBIF + 1)
+#define CMU_PERI_NR_CLK (CLK_GOUT_PERI_DMA4DSIM_IPCLKPORT_CLK_AXI_CLK + 1)
+
+/* Register Offset definitions for CMU_CMU (0x12400000) */
+#define PLL_LOCKTIME_PLL_AUDIO 0x0000
+#define PLL_LOCKTIME_PLL_SHARED0 0x0004
+#define PLL_LOCKTIME_PLL_SHARED1 0x0008
+#define PLL_CON0_PLL_AUDIO 0x0100
+#define PLL_CON0_PLL_SHARED0 0x0120
+#define PLL_CON0_PLL_SHARED1 0x0140
+#define CLK_CON_MUX_CLKCMU_2D 0x1000
+#define CLK_CON_MUX_CLKCMU_3D 0x1004
+#define CLK_CON_MUX_CLKCMU_BUS 0x1008
+#define CLK_CON_MUX_CLKCMU_BUS_DLP 0x100c
+#define CLK_CON_MUX_CLKCMU_CDC_CORE 0x1010
+#define CLK_CON_MUX_CLKCMU_FSYS_SCAN0 0x1014
+#define CLK_CON_MUX_CLKCMU_FSYS_SCAN1 0x1018
+#define CLK_CON_MUX_CLKCMU_IMEM_JPEG 0x101c
+#define CLK_CON_MUX_CLKCMU_PERI_DISP 0x1020
+#define CLK_CON_MUX_CLKCMU_CORE_BUS 0x1024
+#define CLK_CON_MUX_CLKCMU_CORE_DLP 0x1028
+#define CLK_CON_MUX_CLKCMU_CPUCL_SWITCH 0x1030
+#define CLK_CON_MUX_CLKCMU_DLP_CORE 0x1034
+#define CLK_CON_MUX_CLKCMU_FSYS_BUS 0x1038
+#define CLK_CON_MUX_CLKCMU_FSYS_IP 0x103c
+#define CLK_CON_MUX_CLKCMU_IMEM_ACLK 0x1054
+#define CLK_CON_MUX_CLKCMU_MIF_BUSP 0x1080
+#define CLK_CON_MUX_CLKCMU_MIF_SWITCH 0x1084
+#define CLK_CON_MUX_CLKCMU_PERI_IP 0x1088
+#define CLK_CON_MUX_CLKCMU_RSP_CORE 0x108c
+#define CLK_CON_MUX_CLKCMU_TRFM_CORE 0x1090
+#define CLK_CON_MUX_CLKCMU_VCA_ACE 0x1094
+#define CLK_CON_MUX_CLKCMU_VCA_OD 0x1098
+#define CLK_CON_MUX_CLKCMU_VIO_CORE 0x109c
+#define CLK_CON_MUX_CLKCMU_VIP0_CORE 0x10a0
+#define CLK_CON_MUX_CLKCMU_VIP1_CORE 0x10a4
+#define CLK_CON_MUX_CLKCMU_VPP_CORE 0x10a8
+
+#define CLK_CON_DIV_CLKCMU_BUS 0x1800
+#define CLK_CON_DIV_CLKCMU_BUS_DLP 0x1804
+#define CLK_CON_DIV_CLKCMU_CDC_CORE 0x1808
+#define CLK_CON_DIV_CLKCMU_FSYS_SCAN0 0x180c
+#define CLK_CON_DIV_CLKCMU_FSYS_SCAN1 0x1810
+#define CLK_CON_DIV_CLKCMU_IMEM_JPEG 0x1814
+#define CLK_CON_DIV_CLKCMU_MIF_SWITCH 0x1818
+#define CLK_CON_DIV_CLKCMU_CORE_DLP 0x181c
+#define CLK_CON_DIV_CLKCMU_CORE_MAIN 0x1820
+#define CLK_CON_DIV_CLKCMU_PERI_DISP 0x1824
+#define CLK_CON_DIV_CLKCMU_CPUCL_SWITCH 0x1828
+#define CLK_CON_DIV_CLKCMU_DLP_CORE 0x182c
+#define CLK_CON_DIV_CLKCMU_FSYS_BUS 0x1830
+#define CLK_CON_DIV_CLKCMU_FSYS_IP 0x1834
+#define CLK_CON_DIV_CLKCMU_VIO_AUDIO 0x1838
+#define CLK_CON_DIV_CLKCMU_GPU_2D 0x1848
+#define CLK_CON_DIV_CLKCMU_GPU_3D 0x184c
+#define CLK_CON_DIV_CLKCMU_IMEM_ACLK 0x1854
+#define CLK_CON_DIV_CLKCMU_MIF_BUSP 0x1884
+#define CLK_CON_DIV_CLKCMU_PERI_AUDIO 0x1890
+#define CLK_CON_DIV_CLKCMU_PERI_IP 0x1894
+#define CLK_CON_DIV_CLKCMU_RSP_CORE 0x1898
+#define CLK_CON_DIV_CLKCMU_TRFM_CORE 0x189c
+#define CLK_CON_DIV_CLKCMU_VCA_ACE 0x18a0
+#define CLK_CON_DIV_CLKCMU_VCA_OD 0x18a4
+#define CLK_CON_DIV_CLKCMU_VIO_CORE 0x18ac
+#define CLK_CON_DIV_CLKCMU_VIP0_CORE 0x18b0
+#define CLK_CON_DIV_CLKCMU_VIP1_CORE 0x18b4
+#define CLK_CON_DIV_CLKCMU_VPP_CORE 0x18b8
+#define CLK_CON_DIV_PLL_SHARED0_DIV2 0x18bc
+#define CLK_CON_DIV_PLL_SHARED0_DIV3 0x18c0
+#define CLK_CON_DIV_PLL_SHARED0_DIV4 0x18c4
+#define CLK_CON_DIV_PLL_SHARED1_DIV2 0x18c8
+#define CLK_CON_DIV_PLL_SHARED1_DIV3 0x18cc
+#define CLK_CON_DIV_PLL_SHARED1_DIV4 0x18d0
+
+static const unsigned long cmu_cmu_clk_regs[] __initconst = {
+ PLL_LOCKTIME_PLL_AUDIO,
+ PLL_LOCKTIME_PLL_SHARED0,
+ PLL_LOCKTIME_PLL_SHARED1,
+ PLL_CON0_PLL_AUDIO,
+ PLL_CON0_PLL_SHARED0,
+ PLL_CON0_PLL_SHARED1,
+ CLK_CON_MUX_CLKCMU_2D,
+ CLK_CON_MUX_CLKCMU_3D,
+ CLK_CON_MUX_CLKCMU_BUS,
+ CLK_CON_MUX_CLKCMU_BUS_DLP,
+ CLK_CON_MUX_CLKCMU_CDC_CORE,
+ CLK_CON_MUX_CLKCMU_FSYS_SCAN0,
+ CLK_CON_MUX_CLKCMU_FSYS_SCAN1,
+ CLK_CON_MUX_CLKCMU_IMEM_JPEG,
+ CLK_CON_MUX_CLKCMU_PERI_DISP,
+ CLK_CON_MUX_CLKCMU_CORE_BUS,
+ CLK_CON_MUX_CLKCMU_CORE_DLP,
+ CLK_CON_MUX_CLKCMU_CPUCL_SWITCH,
+ CLK_CON_MUX_CLKCMU_DLP_CORE,
+ CLK_CON_MUX_CLKCMU_FSYS_BUS,
+ CLK_CON_MUX_CLKCMU_FSYS_IP,
+ CLK_CON_MUX_CLKCMU_IMEM_ACLK,
+ CLK_CON_MUX_CLKCMU_MIF_BUSP,
+ CLK_CON_MUX_CLKCMU_MIF_SWITCH,
+ CLK_CON_MUX_CLKCMU_PERI_IP,
+ CLK_CON_MUX_CLKCMU_RSP_CORE,
+ CLK_CON_MUX_CLKCMU_TRFM_CORE,
+ CLK_CON_MUX_CLKCMU_VCA_ACE,
+ CLK_CON_MUX_CLKCMU_VCA_OD,
+ CLK_CON_MUX_CLKCMU_VIO_CORE,
+ CLK_CON_MUX_CLKCMU_VIP0_CORE,
+ CLK_CON_MUX_CLKCMU_VIP1_CORE,
+ CLK_CON_MUX_CLKCMU_VPP_CORE,
+ CLK_CON_DIV_CLKCMU_BUS,
+ CLK_CON_DIV_CLKCMU_BUS_DLP,
+ CLK_CON_DIV_CLKCMU_CDC_CORE,
+ CLK_CON_DIV_CLKCMU_FSYS_SCAN0,
+ CLK_CON_DIV_CLKCMU_FSYS_SCAN1,
+ CLK_CON_DIV_CLKCMU_IMEM_JPEG,
+ CLK_CON_DIV_CLKCMU_MIF_SWITCH,
+ CLK_CON_DIV_CLKCMU_CORE_DLP,
+ CLK_CON_DIV_CLKCMU_CORE_MAIN,
+ CLK_CON_DIV_CLKCMU_PERI_DISP,
+ CLK_CON_DIV_CLKCMU_CPUCL_SWITCH,
+ CLK_CON_DIV_CLKCMU_DLP_CORE,
+ CLK_CON_DIV_CLKCMU_FSYS_BUS,
+ CLK_CON_DIV_CLKCMU_FSYS_IP,
+ CLK_CON_DIV_CLKCMU_VIO_AUDIO,
+ CLK_CON_DIV_CLKCMU_GPU_2D,
+ CLK_CON_DIV_CLKCMU_GPU_3D,
+ CLK_CON_DIV_CLKCMU_IMEM_ACLK,
+ CLK_CON_DIV_CLKCMU_MIF_BUSP,
+ CLK_CON_DIV_CLKCMU_PERI_AUDIO,
+ CLK_CON_DIV_CLKCMU_PERI_IP,
+ CLK_CON_DIV_CLKCMU_RSP_CORE,
+ CLK_CON_DIV_CLKCMU_TRFM_CORE,
+ CLK_CON_DIV_CLKCMU_VCA_ACE,
+ CLK_CON_DIV_CLKCMU_VCA_OD,
+ CLK_CON_DIV_CLKCMU_VIO_CORE,
+ CLK_CON_DIV_CLKCMU_VIP0_CORE,
+ CLK_CON_DIV_CLKCMU_VIP1_CORE,
+ CLK_CON_DIV_CLKCMU_VPP_CORE,
+ CLK_CON_DIV_PLL_SHARED0_DIV2,
+ CLK_CON_DIV_PLL_SHARED0_DIV3,
+ CLK_CON_DIV_PLL_SHARED0_DIV4,
+ CLK_CON_DIV_PLL_SHARED1_DIV2,
+ CLK_CON_DIV_PLL_SHARED1_DIV3,
+ CLK_CON_DIV_PLL_SHARED1_DIV4,
+};
+
+static const struct samsung_pll_rate_table artpec8_pll_audio_rates[] __initconst = {
+ PLL_36XX_RATE(25 * MHZ, 589823913U, 47, 1, 1, 12184),
+ PLL_36XX_RATE(25 * MHZ, 393215942U, 47, 3, 0, 12184),
+ PLL_36XX_RATE(25 * MHZ, 294911956U, 47, 1, 2, 12184),
+ PLL_36XX_RATE(25 * MHZ, 100000000U, 32, 2, 2, 0),
+ PLL_36XX_RATE(25 * MHZ, 98303985U, 47, 3, 2, 12184),
+ PLL_36XX_RATE(25 * MHZ, 49151992U, 47, 3, 3, 12184),
+};
+
+static const struct samsung_pll_clock cmu_cmu_pll_clks[] __initconst = {
+ PLL(pll_1017x, CLK_FOUT_SHARED0_PLL, "fout_pll_shared0", "fin_pll",
+ PLL_LOCKTIME_PLL_SHARED0, PLL_CON0_PLL_SHARED0, NULL),
+ PLL(pll_1017x, CLK_FOUT_SHARED1_PLL, "fout_pll_shared1", "fin_pll",
+ PLL_LOCKTIME_PLL_SHARED1, PLL_CON0_PLL_SHARED1, NULL),
+ PLL(pll_1031x, CLK_FOUT_AUDIO_PLL, "fout_pll_audio", "fin_pll",
+ PLL_LOCKTIME_PLL_AUDIO, PLL_CON0_PLL_AUDIO, artpec8_pll_audio_rates),
+};
+
+PNAME(mout_clkcmu_bus_bus_p) = { "dout_pll_shared1_div2", "dout_pll_shared0_div3",
+ "dout_pll_shared1_div3", "dout_pll_shared1_div4" };
+PNAME(mout_clkcmu_bus_dlp_p) = { "dout_pll_shared0_div2", "dout_pll_shared0_div4",
+ "dout_pll_shared1_div2", "dout_pll_shared1_div4" };
+PNAME(mout_clkcmu_core_bus_p) = { "dout_pll_shared1_div2", "dout_pll_shared0_div3",
+ "dout_pll_shared0_div4", "dout_pll_shared1_div3" };
+PNAME(mout_clkcmu_core_dlp_p) = { "dout_pll_shared0_div2", "dout_pll_shared1_div2",
+ "dout_pll_shared0_div3", "dout_pll_shared1_div3" };
+PNAME(mout_clkcmu_cpucl_switch_p) = { "dout_pll_shared0_div2", "dout_pll_shared1_div2",
+ "dout_pll_shared0_div3", "dout_pll_shared1_div3" };
+PNAME(mout_clkcmu_fsys_bus_p) = { "dout_pll_shared1_div2", "dout_pll_shared0_div2",
+ "dout_pll_shared1_div4", "dout_pll_shared1_div3" };
+PNAME(mout_clkcmu_fsys_ip_p) = { "dout_pll_shared0_div2", "dout_pll_shared1_div3",
+ "dout_pll_shared1_div2", "dout_pll_shared0_div3" };
+PNAME(mout_clkcmu_fsys_scan0_p) = { "dout_pll_shared0_div4", "dout_pll_shared1_div4" };
+PNAME(mout_clkcmu_fsys_scan1_p) = { "dout_pll_shared0_div4", "dout_pll_shared1_div4" };
+PNAME(mout_clkcmu_imem_imem_p) = { "dout_pll_shared1_div4", "dout_pll_shared0_div3",
+ "dout_pll_shared1_div3", "dout_pll_shared1_div2" };
+PNAME(mout_clkcmu_imem_jpeg_p) = { "dout_pll_shared0_div2", "dout_pll_shared0_div3",
+ "dout_pll_shared1_div2", "dout_pll_shared1_div3" };
+PNAME(mout_clkcmu_cdc_core_p) = { "dout_pll_shared1_div2", "dout_pll_shared0_div3",
+ "dout_pll_shared1_div3", "dout_pll_shared1_div4" };
+PNAME(mout_clkcmu_dlp_core_p) = { "dout_pll_shared0_div2", "dout_pll_shared1_div2",
+ "dout_pll_shared0_div3", "dout_pll_shared1_div3" };
+PNAME(mout_clkcmu_3d_p) = { "dout_pll_shared0_div2", "dout_pll_shared1_div2",
+ "dout_pll_shared0_div3", "dout_pll_shared1_div3" };
+PNAME(mout_clkcmu_2d_p) = { "dout_pll_shared0_div2", "dout_pll_shared1_div2",
+ "dout_pll_shared0_div3", "dout_pll_shared1_div3" };
+PNAME(mout_clkcmu_mif_switch_p) = { "dout_pll_shared0", "dout_pll_shared1",
+ "dout_pll_shared0_div2", "dout_pll_shared0_div3" };
+PNAME(mout_clkcmu_mif_busp_p) = { "dout_pll_shared0_div3", "dout_pll_shared1_div4",
+ "dout_pll_shared0_div4", "dout_pll_shared0_div2" };
+PNAME(mout_clkcmu_peri_disp_p) = { "dout_pll_shared1_div2", "dout_pll_shared0_div2",
+ "dout_pll_shared1_div4", "dout_pll_shared1_div3" };
+PNAME(mout_clkcmu_peri_ip_p) = { "dout_pll_shared1_div2", "dout_pll_shared0_div4",
+ "dout_pll_shared1_div4", "dout_pll_shared0_div2" };
+PNAME(mout_clkcmu_rsp_core_p) = { "dout_pll_shared1_div2", "dout_pll_shared0_div3",
+ "dout_pll_shared1_div3", "dout_pll_shared1_div4" };
+PNAME(mout_clkcmu_trfm_core_p) = { "dout_pll_shared1_div2", "dout_pll_shared0_div3",
+ "dout_pll_shared1_div3", "dout_pll_shared1_div4" };
+PNAME(mout_clkcmu_vca_ace_p) = { "dout_pll_shared1_div2", "dout_pll_shared0_div3",
+ "dout_pll_shared1_div3", "dout_pll_shared1_div4" };
+PNAME(mout_clkcmu_vca_od_p) = { "dout_pll_shared1_div2", "dout_pll_shared0_div3",
+ "dout_pll_shared1_div3", "dout_pll_shared1_div4" };
+PNAME(mout_clkcmu_vio_core_p) = { "dout_pll_shared0_div3", "dout_pll_shared0_div2",
+ "dout_pll_shared1_div2", "dout_pll_shared1_div3" };
+PNAME(mout_clkcmu_vip0_core_p) = { "dout_pll_shared1_div2", "dout_pll_shared0_div3",
+ "dout_pll_shared1_div3", "dout_pll_shared1_div4" };
+PNAME(mout_clkcmu_vip1_core_p) = { "dout_pll_shared1_div2", "dout_pll_shared0_div3",
+ "dout_pll_shared1_div3", "dout_pll_shared1_div4" };
+PNAME(mout_clkcmu_vpp_core_p) = { "dout_pll_shared1_div2", "dout_pll_shared0_div3",
+ "dout_pll_shared1_div3", "dout_pll_shared1_div4" };
+PNAME(mout_clkcmu_pll_shared0_p) = { "fin_pll", "fout_pll_shared0" };
+PNAME(mout_clkcmu_pll_shared1_p) = { "fin_pll", "fout_pll_shared1" };
+PNAME(mout_clkcmu_pll_audio_p) = { "fin_pll", "fout_pll_audio" };
+
+static const struct samsung_fixed_factor_clock cmu_fixed_factor_clks[] __initconst = {
+ FFACTOR(CLK_DOUT_CMU_OTP, "dout_clkcmu_otp", "fin_pll", 1, 8, 0),
+};
+
+static const struct samsung_mux_clock cmu_cmu_mux_clks[] __initconst = {
+ MUX(0, "mout_clkcmu_pll_shared0", mout_clkcmu_pll_shared0_p, PLL_CON0_PLL_SHARED0, 4, 1),
+ MUX(0, "mout_clkcmu_pll_shared1", mout_clkcmu_pll_shared1_p, PLL_CON0_PLL_SHARED1, 4, 1),
+ MUX(0, "mout_clkcmu_pll_audio", mout_clkcmu_pll_audio_p, PLL_CON0_PLL_AUDIO, 4, 1),
+ MUX(0, "mout_clkcmu_bus_bus", mout_clkcmu_bus_bus_p, CLK_CON_MUX_CLKCMU_BUS, 0, 2),
+ MUX(0, "mout_clkcmu_bus_dlp", mout_clkcmu_bus_dlp_p, CLK_CON_MUX_CLKCMU_BUS_DLP, 0, 2),
+ MUX(0, "mout_clkcmu_core_bus", mout_clkcmu_core_bus_p, CLK_CON_MUX_CLKCMU_CORE_BUS, 0, 2),
+ MUX(0, "mout_clkcmu_core_dlp", mout_clkcmu_core_dlp_p, CLK_CON_MUX_CLKCMU_CORE_DLP, 0, 2),
+ MUX(0, "mout_clkcmu_cpucl_switch", mout_clkcmu_cpucl_switch_p,
+ CLK_CON_MUX_CLKCMU_CPUCL_SWITCH, 0, 3),
+ MUX(0, "mout_clkcmu_fsys_bus", mout_clkcmu_fsys_bus_p, CLK_CON_MUX_CLKCMU_FSYS_BUS, 0, 2),
+ MUX(0, "mout_clkcmu_fsys_ip", mout_clkcmu_fsys_ip_p, CLK_CON_MUX_CLKCMU_FSYS_IP, 0, 2),
+ MUX(0, "mout_clkcmu_fsys_scan0", mout_clkcmu_fsys_scan0_p,
+ CLK_CON_MUX_CLKCMU_FSYS_SCAN0, 0, 1),
+ MUX(0, "mout_clkcmu_fsys_scan1", mout_clkcmu_fsys_scan1_p,
+ CLK_CON_MUX_CLKCMU_FSYS_SCAN1, 0, 1),
+ MUX(0, "mout_clkcmu_imem_imem", mout_clkcmu_imem_imem_p,
+ CLK_CON_MUX_CLKCMU_IMEM_ACLK, 0, 2),
+ MUX(0, "mout_clkcmu_imem_jpeg", mout_clkcmu_imem_jpeg_p,
+ CLK_CON_MUX_CLKCMU_IMEM_JPEG, 0, 2),
+ nMUX(0, "mout_clkcmu_cdc_core", mout_clkcmu_cdc_core_p, CLK_CON_MUX_CLKCMU_CDC_CORE, 0, 2),
+ nMUX(0, "mout_clkcmu_dlp_core", mout_clkcmu_dlp_core_p, CLK_CON_MUX_CLKCMU_DLP_CORE, 0, 2),
+ MUX(0, "mout_clkcmu_3d", mout_clkcmu_3d_p, CLK_CON_MUX_CLKCMU_3D, 0, 2),
+ MUX(0, "mout_clkcmu_2d", mout_clkcmu_2d_p, CLK_CON_MUX_CLKCMU_2D, 0, 2),
+ MUX(0, "mout_clkcmu_mif_switch", mout_clkcmu_mif_switch_p,
+ CLK_CON_MUX_CLKCMU_MIF_SWITCH, 0, 2),
+ MUX(0, "mout_clkcmu_mif_busp", mout_clkcmu_mif_busp_p, CLK_CON_MUX_CLKCMU_MIF_BUSP, 0, 2),
+ MUX(0, "mout_clkcmu_peri_disp", mout_clkcmu_peri_disp_p,
+ CLK_CON_MUX_CLKCMU_PERI_DISP, 0, 2),
+ MUX(0, "mout_clkcmu_peri_ip", mout_clkcmu_peri_ip_p, CLK_CON_MUX_CLKCMU_PERI_IP, 0, 2),
+ MUX(0, "mout_clkcmu_rsp_core", mout_clkcmu_rsp_core_p, CLK_CON_MUX_CLKCMU_RSP_CORE, 0, 2),
+ nMUX(0, "mout_clkcmu_trfm_core", mout_clkcmu_trfm_core_p,
+ CLK_CON_MUX_CLKCMU_TRFM_CORE, 0, 2),
+ MUX(0, "mout_clkcmu_vca_ace", mout_clkcmu_vca_ace_p, CLK_CON_MUX_CLKCMU_VCA_ACE, 0, 2),
+ MUX(0, "mout_clkcmu_vca_od", mout_clkcmu_vca_od_p, CLK_CON_MUX_CLKCMU_VCA_OD, 0, 2),
+ MUX(0, "mout_clkcmu_vio_core", mout_clkcmu_vio_core_p, CLK_CON_MUX_CLKCMU_VIO_CORE, 0, 2),
+ nMUX(0, "mout_clkcmu_vip0_core", mout_clkcmu_vip0_core_p,
+ CLK_CON_MUX_CLKCMU_VIP0_CORE, 0, 2),
+ nMUX(0, "mout_clkcmu_vip1_core", mout_clkcmu_vip1_core_p,
+ CLK_CON_MUX_CLKCMU_VIP1_CORE, 0, 2),
+ nMUX(0, "mout_clkcmu_vpp_core", mout_clkcmu_vpp_core_p, CLK_CON_MUX_CLKCMU_VPP_CORE, 0, 2),
+};
+
+static const struct samsung_div_clock cmu_cmu_div_clks[] __initconst = {
+ DIV(CLK_DOUT_SHARED0_DIV2, "dout_pll_shared0_div2",
+ "mout_clkcmu_pll_shared0", CLK_CON_DIV_PLL_SHARED0_DIV2, 0, 1),
+ DIV(CLK_DOUT_SHARED0_DIV3, "dout_pll_shared0_div3",
+ "mout_clkcmu_pll_shared0", CLK_CON_DIV_PLL_SHARED0_DIV3, 0, 2),
+ DIV(CLK_DOUT_SHARED0_DIV4, "dout_pll_shared0_div4",
+ "dout_pll_shared0_div2", CLK_CON_DIV_PLL_SHARED0_DIV4, 0, 1),
+ DIV(CLK_DOUT_SHARED1_DIV2, "dout_pll_shared1_div2",
+ "mout_clkcmu_pll_shared1", CLK_CON_DIV_PLL_SHARED1_DIV2, 0, 1),
+ DIV(CLK_DOUT_SHARED1_DIV3, "dout_pll_shared1_div3",
+ "mout_clkcmu_pll_shared1", CLK_CON_DIV_PLL_SHARED1_DIV3, 0, 2),
+ DIV(CLK_DOUT_SHARED1_DIV4, "dout_pll_shared1_div4",
+ "dout_pll_shared1_div2", CLK_CON_DIV_PLL_SHARED1_DIV4, 0, 1),
+ DIV(CLK_DOUT_CMU_BUS, "dout_clkcmu_bus",
+ "mout_clkcmu_bus_bus", CLK_CON_DIV_CLKCMU_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_BUS_DLP, "dout_clkcmu_bus_dlp",
+ "mout_clkcmu_bus_dlp", CLK_CON_DIV_CLKCMU_BUS_DLP, 0, 4),
+ DIV(CLK_DOUT_CMU_CORE_MAIN, "dout_clkcmu_core_main",
+ "mout_clkcmu_core_bus", CLK_CON_DIV_CLKCMU_CORE_MAIN, 0, 4),
+ DIV(CLK_DOUT_CMU_CORE_DLP, "dout_clkcmu_core_dlp",
+ "mout_clkcmu_core_dlp", CLK_CON_DIV_CLKCMU_CORE_DLP, 0, 4),
+ DIV(CLK_DOUT_CMU_CPUCL_SWITCH, "dout_clkcmu_cpucl_switch",
+ "mout_clkcmu_cpucl_switch", CLK_CON_DIV_CLKCMU_CPUCL_SWITCH, 0, 3),
+ DIV(CLK_DOUT_CMU_FSYS_BUS, "dout_clkcmu_fsys_bus",
+ "mout_clkcmu_fsys_bus", CLK_CON_DIV_CLKCMU_FSYS_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_FSYS_IP, "dout_clkcmu_fsys_ip",
+ "mout_clkcmu_fsys_ip", CLK_CON_DIV_CLKCMU_FSYS_IP, 0, 9),
+ DIV(CLK_DOUT_CMU_FSYS_SCAN0, "dout_clkcmu_fsys_scan0",
+ "mout_clkcmu_fsys_scan0", CLK_CON_DIV_CLKCMU_FSYS_SCAN0, 0, 4),
+ DIV(CLK_DOUT_CMU_FSYS_SCAN1, "dout_clkcmu_fsys_scan1",
+ "mout_clkcmu_fsys_scan1", CLK_CON_DIV_CLKCMU_FSYS_SCAN1, 0, 4),
+ DIV(CLK_DOUT_CMU_IMEM_ACLK, "dout_clkcmu_imem_aclk",
+ "mout_clkcmu_imem_imem", CLK_CON_DIV_CLKCMU_IMEM_ACLK, 0, 4),
+ DIV(CLK_DOUT_CMU_IMEM_JPEG, "dout_clkcmu_imem_jpeg",
+ "mout_clkcmu_imem_jpeg", CLK_CON_DIV_CLKCMU_IMEM_JPEG, 0, 4),
+ DIV_F(CLK_DOUT_CMU_CDC_CORE, "dout_clkcmu_cdc_core",
+ "mout_clkcmu_cdc_core", CLK_CON_DIV_CLKCMU_CDC_CORE, 0, 4, CLK_SET_RATE_PARENT, 0),
+ DIV_F(CLK_DOUT_CMU_DLP_CORE, "dout_clkcmu_dlp_core",
+ "mout_clkcmu_dlp_core", CLK_CON_DIV_CLKCMU_DLP_CORE, 0, 4, CLK_SET_RATE_PARENT, 0),
+ DIV(CLK_DOUT_CMU_GPU_3D, "dout_clkcmu_gpu_3d",
+ "mout_clkcmu_3d", CLK_CON_DIV_CLKCMU_GPU_3D, 0, 3),
+ DIV(CLK_DOUT_CMU_GPU_2D, "dout_clkcmu_gpu_2d",
+ "mout_clkcmu_2d", CLK_CON_DIV_CLKCMU_GPU_2D, 0, 4),
+ DIV(CLK_DOUT_CMU_MIF_SWITCH, "dout_clkcmu_mif_switch",
+ "mout_clkcmu_mif_switch", CLK_CON_DIV_CLKCMU_MIF_SWITCH, 0, 4),
+ DIV(CLK_DOUT_CMU_MIF_BUSP, "dout_clkcmu_mif_busp",
+ "mout_clkcmu_mif_busp", CLK_CON_DIV_CLKCMU_MIF_BUSP, 0, 3),
+ DIV(CLK_DOUT_CMU_PERI_DISP, "dout_clkcmu_peri_disp",
+ "mout_clkcmu_peri_disp", CLK_CON_DIV_CLKCMU_PERI_DISP, 0, 4),
+ DIV(CLK_DOUT_CMU_PERI_IP, "dout_clkcmu_peri_ip",
+ "mout_clkcmu_peri_ip", CLK_CON_DIV_CLKCMU_PERI_IP, 0, 4),
+ DIV(CLK_DOUT_CMU_PERI_AUDIO, "dout_clkcmu_peri_audio",
+ "mout_clkcmu_pll_audio", CLK_CON_DIV_CLKCMU_PERI_AUDIO, 0, 4),
+ DIV(CLK_DOUT_CMU_RSP_CORE, "dout_clkcmu_rsp_core",
+ "mout_clkcmu_rsp_core", CLK_CON_DIV_CLKCMU_RSP_CORE, 0, 4),
+ DIV_F(CLK_DOUT_CMU_TRFM_CORE, "dout_clkcmu_trfm_core",
+ "mout_clkcmu_trfm_core", CLK_CON_DIV_CLKCMU_TRFM_CORE, 0, 4, CLK_SET_RATE_PARENT, 0),
+ DIV(CLK_DOUT_CMU_VCA_ACE, "dout_clkcmu_vca_ace",
+ "mout_clkcmu_vca_ace", CLK_CON_DIV_CLKCMU_VCA_ACE, 0, 4),
+ DIV(CLK_DOUT_CMU_VCA_OD, "dout_clkcmu_vca_od",
+ "mout_clkcmu_vca_od", CLK_CON_DIV_CLKCMU_VCA_OD, 0, 4),
+ DIV(CLK_DOUT_CMU_VIO_CORE, "dout_clkcmu_vio_core",
+ "mout_clkcmu_vio_core", CLK_CON_DIV_CLKCMU_VIO_CORE, 0, 4),
+ DIV(CLK_DOUT_CMU_VIO_AUDIO, "dout_clkcmu_vio_audio",
+ "mout_clkcmu_pll_audio", CLK_CON_DIV_CLKCMU_VIO_AUDIO, 0, 4),
+ DIV_F(CLK_DOUT_CMU_VIP0_CORE, "dout_clkcmu_vip0_core",
+ "mout_clkcmu_vip0_core", CLK_CON_DIV_CLKCMU_VIP0_CORE, 0, 4, CLK_SET_RATE_PARENT, 0),
+ DIV_F(CLK_DOUT_CMU_VIP1_CORE, "dout_clkcmu_vip1_core",
+ "mout_clkcmu_vip1_core", CLK_CON_DIV_CLKCMU_VIP1_CORE, 0, 4, CLK_SET_RATE_PARENT, 0),
+ DIV_F(CLK_DOUT_CMU_VPP_CORE, "dout_clkcmu_vpp_core",
+ "mout_clkcmu_vpp_core", CLK_CON_DIV_CLKCMU_VPP_CORE, 0, 4, CLK_SET_RATE_PARENT, 0),
+};
+
+static const struct samsung_cmu_info cmu_cmu_info __initconst = {
+ .pll_clks = cmu_cmu_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(cmu_cmu_pll_clks),
+ .fixed_factor_clks = cmu_fixed_factor_clks,
+ .nr_fixed_factor_clks = ARRAY_SIZE(cmu_fixed_factor_clks),
+ .mux_clks = cmu_cmu_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(cmu_cmu_mux_clks),
+ .div_clks = cmu_cmu_div_clks,
+ .nr_div_clks = ARRAY_SIZE(cmu_cmu_div_clks),
+ .nr_clk_ids = CMU_CMU_NR_CLK,
+ .clk_regs = cmu_cmu_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(cmu_cmu_clk_regs),
+};
+
+/* Register Offset definitions for CMU_BUS (0x12c10000) */
+#define PLL_CON0_MUX_CLK_BUS_ACLK_USER 0x0100
+#define PLL_CON0_MUX_CLK_BUS_DLP_USER 0x0120
+#define CLK_CON_DIV_CLK_BUS_PCLK 0x1800
+
+static const unsigned long cmu_bus_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLK_BUS_ACLK_USER,
+ PLL_CON0_MUX_CLK_BUS_DLP_USER,
+ CLK_CON_DIV_CLK_BUS_PCLK,
+};
+
+PNAME(mout_clk_bus_aclk_user_p) = { "fin_pll", "dout_clkcmu_bus" };
+PNAME(mout_clk_bus_dlp_user_p) = { "fin_pll", "dout_clkcmu_bus_dlp" };
+
+static const struct samsung_mux_clock cmu_bus_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_BUS_ACLK_USER, "mout_clk_bus_aclk_user",
+ mout_clk_bus_aclk_user_p, PLL_CON0_MUX_CLK_BUS_ACLK_USER, 4, 1),
+ MUX(CLK_MOUT_BUS_DLP_USER, "mout_clk_bus_dlp_user",
+ mout_clk_bus_dlp_user_p, PLL_CON0_MUX_CLK_BUS_DLP_USER, 4, 1),
+};
+
+static const struct samsung_div_clock cmu_bus_div_clks[] __initconst = {
+ DIV(CLK_DOUT_BUS_PCLK, "dout_clk_bus_pclk", "mout_clk_bus_aclk_user",
+ CLK_CON_DIV_CLK_BUS_PCLK, 0, 4),
+};
+
+static const struct samsung_cmu_info cmu_bus_info __initconst = {
+ .mux_clks = cmu_bus_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(cmu_bus_mux_clks),
+ .div_clks = cmu_bus_div_clks,
+ .nr_div_clks = ARRAY_SIZE(cmu_bus_div_clks),
+ .nr_clk_ids = CMU_BUS_NR_CLK,
+ .clk_regs = cmu_bus_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(cmu_bus_clk_regs),
+};
+
+/* Register Offset definitions for CMU_CORE (0x12410000) */
+#define PLL_CON0_MUX_CLK_CORE_ACLK_USER 0x0100
+#define PLL_CON0_MUX_CLK_CORE_DLP_USER 0x0120
+#define CLK_CON_DIV_CLK_CORE_PCLK 0x1800
+
+static const unsigned long cmu_core_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLK_CORE_ACLK_USER,
+ PLL_CON0_MUX_CLK_CORE_DLP_USER,
+ CLK_CON_DIV_CLK_CORE_PCLK,
+};
+
+PNAME(mout_clk_core_aclk_user_p) = { "fin_pll", "dout_clkcmu_core_main" };
+PNAME(mout_clk_core_dlp_user_p) = { "fin_pll", "dout_clkcmu_core_dlp" };
+
+static const struct samsung_mux_clock cmu_core_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_CORE_ACLK_USER, "mout_clk_core_aclk_user",
+ mout_clk_core_aclk_user_p, PLL_CON0_MUX_CLK_CORE_ACLK_USER, 4, 1),
+ MUX(CLK_MOUT_CORE_DLP_USER, "mout_clk_core_dlp_user",
+ mout_clk_core_dlp_user_p, PLL_CON0_MUX_CLK_CORE_DLP_USER, 4, 1),
+};
+
+static const struct samsung_div_clock cmu_core_div_clks[] __initconst = {
+ DIV(CLK_DOUT_CORE_PCLK, "dout_clk_core_pclk",
+ "mout_clk_core_aclk_user", CLK_CON_DIV_CLK_CORE_PCLK, 0, 4),
+};
+
+static const struct samsung_cmu_info cmu_core_info __initconst = {
+ .mux_clks = cmu_core_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(cmu_core_mux_clks),
+ .div_clks = cmu_core_div_clks,
+ .nr_div_clks = ARRAY_SIZE(cmu_core_div_clks),
+ .nr_clk_ids = CMU_CORE_NR_CLK,
+ .clk_regs = cmu_core_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(cmu_core_clk_regs),
+};
+
+/* Register Offset definitions for CMU_CPUCL (0x11410000) */
+#define PLL_LOCKTIME_PLL_CPUCL 0x0000
+#define PLL_CON0_MUX_CLKCMU_CPUCL_SWITCH_USER 0x0120
+#define PLL_CON0_PLL_CPUCL 0x0140
+#define CLK_CON_MUX_CLK_CPUCL_PLL 0x1000
+#define CLK_CON_DIV_CLK_CLUSTER_ACLK 0x1800
+#define CLK_CON_DIV_CLK_CLUSTER_CNTCLK 0x1804
+#define CLK_CON_DIV_CLK_CLUSTER_PCLKDBG 0x1808
+#define CLK_CON_DIV_CLK_CPUCL_CMUREF 0x180c
+#define CLK_CON_DIV_CLK_CPUCL_PCLK 0x1814
+#define CLK_CON_DIV_CLK_CLUSTER_ATCLK 0x1818
+#define CLK_CON_DIV_CLK_CPUCL_DBG 0x181c
+#define CLK_CON_DIV_CLK_CPUCL_PCLKDBG 0x1820
+#define CLK_CON_GAT_CLK_CLUSTER_CPU 0x2008
+#define CLK_CON_GAT_CLK_CPUCL_SHORTSTOP 0x200c
+#define CLK_CON_DMYQCH_CON_CSSYS_QCH 0x3008
+
+static const unsigned long cmu_cpucl_clk_regs[] __initconst = {
+ PLL_LOCKTIME_PLL_CPUCL,
+ PLL_CON0_MUX_CLKCMU_CPUCL_SWITCH_USER,
+ PLL_CON0_PLL_CPUCL,
+ CLK_CON_MUX_CLK_CPUCL_PLL,
+ CLK_CON_DIV_CLK_CLUSTER_ACLK,
+ CLK_CON_DIV_CLK_CLUSTER_CNTCLK,
+ CLK_CON_DIV_CLK_CLUSTER_PCLKDBG,
+ CLK_CON_DIV_CLK_CPUCL_CMUREF,
+ CLK_CON_DIV_CLK_CPUCL_PCLK,
+ CLK_CON_DIV_CLK_CLUSTER_ATCLK,
+ CLK_CON_DIV_CLK_CPUCL_DBG,
+ CLK_CON_DIV_CLK_CPUCL_PCLKDBG,
+ CLK_CON_GAT_CLK_CLUSTER_CPU,
+ CLK_CON_GAT_CLK_CPUCL_SHORTSTOP,
+ CLK_CON_DMYQCH_CON_CSSYS_QCH,
+};
+
+static const struct samsung_pll_clock cmu_cpucl_pll_clks[] __initconst = {
+ PLL(pll_1017x, CLK_FOUT_CPUCL_PLL, "fout_pll_cpucl", "fin_pll",
+ PLL_LOCKTIME_PLL_CPUCL, PLL_CON0_PLL_CPUCL, NULL),
+};
+
+PNAME(mout_clkcmu_cpucl_switch_user_p) = { "fin_pll", "dout_clkcmu_cpucl_switch" };
+PNAME(mout_pll_cpucl_p) = { "fin_pll", "fout_pll_cpucl" };
+PNAME(mout_clk_cpucl_pll_p) = { "mout_pll_cpucl", "mout_clkcmu_cpucl_switch_user" };
+
+static const struct samsung_mux_clock cmu_cpucl_mux_clks[] __initconst = {
+ MUX_F(0, "mout_pll_cpucl", mout_pll_cpucl_p, PLL_CON0_PLL_CPUCL, 4, 1,
+ CLK_SET_RATE_PARENT | CLK_RECALC_NEW_RATES, 0),
+ MUX(CLK_MOUT_CPUCL_SWITCH_USER, "mout_clkcmu_cpucl_switch_user",
+ mout_clkcmu_cpucl_switch_user_p, PLL_CON0_MUX_CLKCMU_CPUCL_SWITCH_USER, 4, 1),
+ MUX_F(CLK_MOUT_CPUCL_PLL, "mout_clk_cpucl_pll", mout_clk_cpucl_pll_p,
+ CLK_CON_MUX_CLK_CPUCL_PLL, 0, 1, CLK_SET_RATE_PARENT, 0),
+};
+
+static const struct samsung_fixed_factor_clock cpucl_ffactor_clks[] __initconst = {
+ FFACTOR(CLK_DOUT_CPUCL_CPU, "dout_clk_cpucl_cpu",
+ "mout_clk_cpucl_pll", 1, 1, CLK_SET_RATE_PARENT),
+};
+
+static const struct samsung_div_clock cmu_cpucl_div_clks[] __initconst = {
+ DIV(CLK_DOUT_CPUCL_CLUSTER_ACLK, "dout_clk_cluster_aclk",
+ "dout_clk_cpucl_cpu", CLK_CON_DIV_CLK_CLUSTER_ACLK, 0, 4),
+ DIV(CLK_DOUT_CPUCL_CLUSTER_PCLKDBG, "dout_clk_cluster_pclkdbg",
+ "dout_clk_cpucl_cpu", CLK_CON_DIV_CLK_CLUSTER_PCLKDBG, 0, 4),
+ DIV(CLK_DOUT_CPUCL_CLUSTER_CNTCLK, "dout_clk_cluster_cntclk",
+ "dout_clk_cpucl_cpu", CLK_CON_DIV_CLK_CLUSTER_CNTCLK, 0, 4),
+ DIV(CLK_DOUT_CPUCL_CLUSTER_ATCLK, "dout_clk_cluster_atclk",
+ "dout_clk_cpucl_cpu", CLK_CON_DIV_CLK_CLUSTER_ATCLK, 0, 4),
+ DIV(CLK_DOUT_CPUCL_PCLK, "dout_clk_cpucl_pclk",
+ "dout_clk_cpucl_cpu", CLK_CON_DIV_CLK_CPUCL_PCLK, 0, 4),
+ DIV(CLK_DOUT_CPUCL_CMUREF, "dout_clk_cpucl_cmuref",
+ "dout_clk_cpucl_cpu", CLK_CON_DIV_CLK_CPUCL_CMUREF, 0, 3),
+ DIV(CLK_DOUT_CPUCL_DBG, "dout_clk_cpucl_dbg",
+ "dout_clk_cpucl_cpu", CLK_CON_DIV_CLK_CPUCL_DBG, 0, 4),
+ DIV(CLK_DOUT_CPUCL_PCLKDBG, "dout_clk_cpucl_pclkdbg",
+ "dout_clk_cpucl_dbg", CLK_CON_DIV_CLK_CPUCL_PCLKDBG, 0, 4),
+};
+
+static const struct samsung_gate_clock cmu_cpucl_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_CPUCL_CLUSTER_CPU, "clk_con_gat_clk_cluster_cpu",
+ "clk_con_gat_clk_cpucl_shortstop", CLK_CON_GAT_CLK_CLUSTER_CPU, 21,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_CPUCL_SHORTSTOP, "clk_con_gat_clk_cpucl_shortstop",
+ "dout_clk_cpucl_cpu", CLK_CON_GAT_CLK_CPUCL_SHORTSTOP, 21,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_CPUCL_CSSYS_IPCLKPORT_PCLKDBG, "cssys_ipclkport_pclkdbg",
+ "dout_clk_cpucl_pclkdbg", CLK_CON_DMYQCH_CON_CSSYS_QCH, 1,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_CPUCL_CSSYS_IPCLKPORT_ATCLK, "cssys_ipclkport_atclk",
+ "dout_clk_cpucl_dbg", CLK_CON_DMYQCH_CON_CSSYS_QCH, 1,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+};
+
+static const struct samsung_cmu_info cmu_cpucl_info __initconst = {
+ .pll_clks = cmu_cpucl_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(cmu_cpucl_pll_clks),
+ .fixed_factor_clks = cpucl_ffactor_clks,
+ .nr_fixed_factor_clks = ARRAY_SIZE(cpucl_ffactor_clks),
+ .mux_clks = cmu_cpucl_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(cmu_cpucl_mux_clks),
+ .div_clks = cmu_cpucl_div_clks,
+ .nr_div_clks = ARRAY_SIZE(cmu_cpucl_div_clks),
+ .gate_clks = cmu_cpucl_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(cmu_cpucl_gate_clks),
+ .nr_clk_ids = CMU_CPUCL_NR_CLK,
+ .clk_regs = cmu_cpucl_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(cmu_cpucl_clk_regs),
+};
+
+/* Register Offset definitions for CMU_FSYS (0x16c10000) */
+#define PLL_LOCKTIME_PLL_FSYS 0x0004
+#define PLL_CON0_MUX_CLK_FSYS_BUS_USER 0x0120
+#define PLL_CON0_MUX_CLK_FSYS_MMC_USER 0x0140
+#define PLL_CON0_MUX_CLK_FSYS_SCAN0_USER 0x0160
+#define PLL_CON0_MUX_CLK_FSYS_SCAN1_USER 0x0180
+#define PLL_CON0_PLL_FSYS 0x01c0
+#define CLK_CON_DIV_CLK_FSYS_ADC 0x1804
+#define CLK_CON_DIV_CLK_FSYS_BUS300 0x1808
+#define CLK_CON_DIV_CLK_FSYS_BUS_QSPI 0x180c
+#define CLK_CON_DIV_CLK_FSYS_EQOS_25 0x1810
+#define CLK_CON_DIV_CLK_FSYS_EQOS_2P5 0x1814
+#define CLK_CON_DIV_CLK_FSYS_EQOS_500 0x1818
+#define CLK_CON_DIV_CLK_FSYS_EQOS_INT125 0x181c
+#define CLK_CON_DIV_CLK_FSYS_MMC_CARD0 0x1820
+#define CLK_CON_DIV_CLK_FSYS_MMC_CARD1 0x1824
+#define CLK_CON_DIV_CLK_FSYS_OTP_MEM 0x1828
+#define CLK_CON_DIV_CLK_FSYS_PCIE_PHY_REFCLK_SYSPLL 0x182c
+#define CLK_CON_DIV_CLK_FSYS_QSPI 0x1830
+#define CLK_CON_DIV_CLK_FSYS_SCLK_UART 0x1834
+#define CLK_CON_DIV_CLK_FSYS_SFMC_NAND 0x1838
+#define CLK_CON_DIV_SCAN_CLK_FSYS_125 0x183c
+#define CLK_CON_DIV_SCAN_CLK_FSYS_MMC 0x1840
+#define CLK_CON_DIV_SCAN_CLK_FSYS_PCIE_PIPE 0x1844
+#define CLK_CON_FSYS_I2C0_IPCLKPORT_I_PCLK 0x2044
+#define CLK_CON_FSYS_I2C1_IPCLKPORT_I_PCLK 0x2048
+#define CLK_CON_FSYS_UART0_IPCLKPORT_I_PCLK 0x204c
+#define CLK_CON_FSYS_UART0_IPCLKPORT_I_SCLK_UART 0x2050
+#define CLK_CON_MMC0_IPCLKPORT_I_ACLK 0x2070
+#define CLK_CON_MMC1_IPCLKPORT_I_ACLK 0x2078
+#define CLK_CON_DWC_PCIE_CTL_INST_0_DBI_ACLK_UG 0x208c
+#define CLK_CON_DWC_PCIE_CTL_INST_0_MSTR_ACLK_UG 0x2090
+#define CLK_CON_DWC_PCIE_CTL_INST_0_SLV_ACLK_UG 0x2094
+#define CLK_CON_PWM_IPCLKPORT_I_PCLK_S0 0x20a0
+#define CLK_CON_USB20DRD_IPCLKPORT_ACLK_PHYCTRL_20 0x20bc
+#define CLK_CON_USB20DRD_IPCLKPORT_BUS_CLK_EARLY 0x20c0
+#define CLK_CON_XHB_AHBBR_IPCLKPORT_CLK 0x20c4
+#define CLK_CON_XHB_USB_IPCLKPORT_CLK 0x20cc
+#define CLK_CON_BUS_P_FSYS_IPCLKPORT_QSPICLK 0x201c
+#define CLK_CON_DMYQCH_CON_EQOS_TOP_QCH 0x3008
+#define CLK_CON_DMYQCH_CON_MMC0_QCH 0x300c
+#define CLK_CON_DMYQCH_CON_MMC1_QCH 0x3010
+#define CLK_CON_DMYQCH_CON_PCIE_TOP_QCH 0x3018
+#define CLK_CON_DMYQCH_CON_PCIE_TOP_QCH_REF 0x301c
+#define CLK_CON_DMYQCH_CON_QSPI_QCH 0x3020
+#define CLK_CON_DMYQCH_CON_SFMC_QCH 0x3024
+
+static const unsigned long cmu_fsys_clk_regs[] __initconst = {
+ PLL_LOCKTIME_PLL_FSYS,
+ PLL_CON0_MUX_CLK_FSYS_BUS_USER,
+ PLL_CON0_MUX_CLK_FSYS_MMC_USER,
+ PLL_CON0_MUX_CLK_FSYS_SCAN0_USER,
+ PLL_CON0_MUX_CLK_FSYS_SCAN1_USER,
+ PLL_CON0_PLL_FSYS,
+ CLK_CON_DIV_CLK_FSYS_ADC,
+ CLK_CON_DIV_CLK_FSYS_BUS300,
+ CLK_CON_DIV_CLK_FSYS_BUS_QSPI,
+ CLK_CON_DIV_CLK_FSYS_EQOS_25,
+ CLK_CON_DIV_CLK_FSYS_EQOS_2P5,
+ CLK_CON_DIV_CLK_FSYS_EQOS_500,
+ CLK_CON_DIV_CLK_FSYS_EQOS_INT125,
+ CLK_CON_DIV_CLK_FSYS_MMC_CARD0,
+ CLK_CON_DIV_CLK_FSYS_MMC_CARD1,
+ CLK_CON_DIV_CLK_FSYS_OTP_MEM,
+ CLK_CON_DIV_CLK_FSYS_PCIE_PHY_REFCLK_SYSPLL,
+ CLK_CON_DIV_CLK_FSYS_QSPI,
+ CLK_CON_DIV_CLK_FSYS_SCLK_UART,
+ CLK_CON_DIV_CLK_FSYS_SFMC_NAND,
+ CLK_CON_DIV_SCAN_CLK_FSYS_125,
+ CLK_CON_DIV_SCAN_CLK_FSYS_MMC,
+ CLK_CON_DIV_SCAN_CLK_FSYS_PCIE_PIPE,
+ CLK_CON_FSYS_I2C0_IPCLKPORT_I_PCLK,
+ CLK_CON_FSYS_I2C1_IPCLKPORT_I_PCLK,
+ CLK_CON_FSYS_UART0_IPCLKPORT_I_PCLK,
+ CLK_CON_FSYS_UART0_IPCLKPORT_I_SCLK_UART,
+ CLK_CON_MMC0_IPCLKPORT_I_ACLK,
+ CLK_CON_MMC1_IPCLKPORT_I_ACLK,
+ CLK_CON_DWC_PCIE_CTL_INST_0_DBI_ACLK_UG,
+ CLK_CON_DWC_PCIE_CTL_INST_0_MSTR_ACLK_UG,
+ CLK_CON_DWC_PCIE_CTL_INST_0_SLV_ACLK_UG,
+ CLK_CON_PWM_IPCLKPORT_I_PCLK_S0,
+ CLK_CON_USB20DRD_IPCLKPORT_ACLK_PHYCTRL_20,
+ CLK_CON_USB20DRD_IPCLKPORT_BUS_CLK_EARLY,
+ CLK_CON_XHB_AHBBR_IPCLKPORT_CLK,
+ CLK_CON_XHB_USB_IPCLKPORT_CLK,
+ CLK_CON_BUS_P_FSYS_IPCLKPORT_QSPICLK,
+ CLK_CON_DMYQCH_CON_EQOS_TOP_QCH,
+ CLK_CON_DMYQCH_CON_MMC0_QCH,
+ CLK_CON_DMYQCH_CON_MMC1_QCH,
+ CLK_CON_DMYQCH_CON_PCIE_TOP_QCH,
+ CLK_CON_DMYQCH_CON_PCIE_TOP_QCH_REF,
+ CLK_CON_DMYQCH_CON_QSPI_QCH,
+ CLK_CON_DMYQCH_CON_SFMC_QCH,
+};
+
+static const struct samsung_pll_clock cmu_fsys_pll_clks[] __initconst = {
+ PLL(pll_1017x, CLK_FOUT_FSYS_PLL, "fout_pll_fsys", "fin_pll",
+ PLL_LOCKTIME_PLL_FSYS, PLL_CON0_PLL_FSYS, NULL),
+};
+
+PNAME(mout_fsys_scan0_user_p) = { "fin_pll", "dout_clkcmu_fsys_scan0" };
+PNAME(mout_fsys_scan1_user_p) = { "fin_pll", "dout_clkcmu_fsys_scan1" };
+PNAME(mout_fsys_bus_user_p) = { "fin_pll", "dout_clkcmu_fsys_bus" };
+PNAME(mout_fsys_mmc_user_p) = { "fin_pll", "dout_clkcmu_fsys_ip" };
+PNAME(mout_fsys_pll_fsys_p) = { "fin_pll", "fout_pll_fsys" };
+
+static const struct samsung_mux_clock cmu_fsys_mux_clks[] __initconst = {
+ MUX(0, "mout_clk_pll_fsys", mout_fsys_pll_fsys_p, PLL_CON0_PLL_FSYS, 4, 1),
+ MUX(CLK_MOUT_FSYS_SCAN0_USER, "mout_fsys_scan0_user",
+ mout_fsys_scan0_user_p, PLL_CON0_MUX_CLK_FSYS_SCAN0_USER, 4, 1),
+ MUX(CLK_MOUT_FSYS_SCAN1_USER, "mout_fsys_scan1_user",
+ mout_fsys_scan1_user_p, PLL_CON0_MUX_CLK_FSYS_SCAN1_USER, 4, 1),
+ MUX(CLK_MOUT_FSYS_BUS_USER, "mout_fsys_bus_user",
+ mout_fsys_bus_user_p, PLL_CON0_MUX_CLK_FSYS_BUS_USER, 4, 1),
+ MUX(CLK_MOUT_FSYS_MMC_USER, "mout_fsys_mmc_user",
+ mout_fsys_mmc_user_p, PLL_CON0_MUX_CLK_FSYS_MMC_USER, 4, 1),
+};
+
+static const struct samsung_div_clock cmu_fsys_div_clks[] __initconst = {
+ DIV(CLK_DOUT_FSYS_PCIE_PIPE, "dout_fsys_pcie_pipe", "mout_clk_pll_fsys",
+ CLK_CON_DIV_SCAN_CLK_FSYS_PCIE_PIPE, 0, 4),
+ DIV(CLK_DOUT_FSYS_ADC, "dout_fsys_adc", "mout_clk_pll_fsys",
+ CLK_CON_DIV_CLK_FSYS_ADC, 0, 7),
+ DIV(CLK_DOUT_FSYS_PCIE_PHY_REFCLK_SYSPLL, "dout_fsys_pcie_phy_refclk_syspll",
+ "mout_clk_pll_fsys", CLK_CON_DIV_CLK_FSYS_PCIE_PHY_REFCLK_SYSPLL, 0, 8),
+ DIV(CLK_DOUT_FSYS_QSPI, "dout_fsys_qspi", "mout_fsys_mmc_user",
+ CLK_CON_DIV_CLK_FSYS_QSPI, 0, 4),
+ DIV(CLK_DOUT_FSYS_EQOS_INT125, "dout_fsys_eqos_int125", "mout_clk_pll_fsys",
+ CLK_CON_DIV_CLK_FSYS_EQOS_INT125, 0, 4),
+ DIV(CLK_DOUT_FSYS_OTP_MEM, "dout_fsys_otp_mem", "fin_pll",
+ CLK_CON_DIV_CLK_FSYS_OTP_MEM, 0, 9),
+ DIV(CLK_DOUT_FSYS_SCLK_UART, "dout_fsys_sclk_uart", "mout_clk_pll_fsys",
+ CLK_CON_DIV_CLK_FSYS_SCLK_UART, 0, 10),
+ DIV(CLK_DOUT_FSYS_SFMC_NAND, "dout_fsys_sfmc_nand", "mout_fsys_mmc_user",
+ CLK_CON_DIV_CLK_FSYS_SFMC_NAND, 0, 4),
+ DIV(CLK_DOUT_SCAN_CLK_FSYS_125, "dout_scan_clk_fsys_125", "mout_clk_pll_fsys",
+ CLK_CON_DIV_SCAN_CLK_FSYS_125, 0, 4),
+ DIV(CLK_DOUT_FSYS_SCAN_CLK_MMC, "dout_scan_clk_fsys_mmc", "fout_pll_fsys",
+ CLK_CON_DIV_SCAN_CLK_FSYS_MMC, 0, 4),
+ DIV(CLK_DOUT_FSYS_EQOS_25, "dout_fsys_eqos_25", "dout_fsys_eqos_int125",
+ CLK_CON_DIV_CLK_FSYS_EQOS_25, 0, 4),
+ DIV_F(CLK_DOUT_FSYS_EQOS_2p5, "dout_fsys_eqos_2p5", "dout_fsys_eqos_25",
+ CLK_CON_DIV_CLK_FSYS_EQOS_2P5, 0, 4, CLK_SET_RATE_PARENT, 0),
+ DIV(0, "dout_fsys_eqos_500", "mout_clk_pll_fsys",
+ CLK_CON_DIV_CLK_FSYS_EQOS_500, 0, 4),
+ DIV(CLK_DOUT_FSYS_BUS300, "dout_fsys_bus300", "mout_fsys_bus_user",
+ CLK_CON_DIV_CLK_FSYS_BUS300, 0, 4),
+ DIV(CLK_DOUT_FSYS_BUS_QSPI, "dout_fsys_bus_qspi", "mout_fsys_mmc_user",
+ CLK_CON_DIV_CLK_FSYS_BUS_QSPI, 0, 4),
+ DIV(CLK_DOUT_FSYS_MMC_CARD0, "dout_fsys_mmc_card0", "mout_fsys_mmc_user",
+ CLK_CON_DIV_CLK_FSYS_MMC_CARD0, 0, 10),
+ DIV(CLK_DOUT_FSYS_MMC_CARD1, "dout_fsys_mmc_card1", "mout_fsys_mmc_user",
+ CLK_CON_DIV_CLK_FSYS_MMC_CARD1, 0, 10),
+};
+
+static const struct samsung_gate_clock cmu_fsys_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_FSYS_PCIE_PHY_REFCLK_IN, "pcie_sub_ctrl_inst_0_phy_refclk_in",
+ "dout_fsys_pcie_phy_refclk_syspll", CLK_CON_DMYQCH_CON_PCIE_TOP_QCH_REF, 1,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_EQOS_TOP_IPCLKPORT_I_RGMII_TXCLK_2P5,
+ "eqos_top_ipclkport_i_rgmii_txclk_2p5",
+ "dout_fsys_eqos_2p5", CLK_CON_DMYQCH_CON_EQOS_TOP_QCH, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_EQOS_TOP_IPCLKPORT_ACLK_I, "eqos_top_ipclkport_aclk_i",
+ "dout_fsys_bus300", CLK_CON_DMYQCH_CON_EQOS_TOP_QCH, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_EQOS_TOP_IPCLKPORT_CLK_CSR_I, "eqos_top_ipclkport_clk_csr_i",
+ "dout_fsys_bus300", CLK_CON_DMYQCH_CON_EQOS_TOP_QCH, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_PIPE_PAL_INST_0_I_APB_PCLK, "pipe_pal_inst_0_i_apb_pclk",
+ "dout_fsys_bus300", CLK_CON_DMYQCH_CON_PCIE_TOP_QCH, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_QSPI_IPCLKPORT_HCLK, "qspi_ipclkport_hclk",
+ "dout_fsys_bus_qspi", CLK_CON_DMYQCH_CON_QSPI_QCH, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_QSPI_IPCLKPORT_SSI_CLK, "qspi_ipclkport_ssi_clk",
+ "dout_fsys_qspi", CLK_CON_DMYQCH_CON_QSPI_QCH, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_MMC0_IPCLKPORT_SDCLKIN, "mmc0_ipclkport_sdclkin",
+ "dout_fsys_mmc_card0", CLK_CON_DMYQCH_CON_MMC0_QCH, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_MMC1_IPCLKPORT_SDCLKIN, "mmc1_ipclkport_sdclkin",
+ "dout_fsys_mmc_card1", CLK_CON_DMYQCH_CON_MMC1_QCH, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_SFMC_IPCLKPORT_I_ACLK_NAND, "sfmc_ipclkport_i_aclk_nand",
+ "dout_fsys_sfmc_nand", CLK_CON_DMYQCH_CON_SFMC_QCH, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_UART0_SCLK_UART, "uart0_sclk", "dout_fsys_sclk_uart",
+ CLK_CON_FSYS_UART0_IPCLKPORT_I_SCLK_UART, 21,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS_DWC_PCIE_CTL_INST_0_MSTR_ACLK_UG, "dwc_pcie_ctl_inst_0_mstr_aclk_ug",
+ "mout_fsys_bus_user", CLK_CON_DWC_PCIE_CTL_INST_0_MSTR_ACLK_UG, 21,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS_DWC_PCIE_CTL_INXT_0_SLV_ACLK_UG, "dwc_pcie_ctl_inst_0_slv_aclk_ug",
+ "mout_fsys_bus_user", CLK_CON_DWC_PCIE_CTL_INST_0_SLV_ACLK_UG, 21,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS_I2C0_IPCLKPORT_I_PCLK, "fsys_i2c0_ipclkport_i_pclk", "dout_fsys_bus300",
+ CLK_CON_FSYS_I2C0_IPCLKPORT_I_PCLK, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS_I2C1_IPCLKPORT_I_PCLK, "fsys_i2c1_ipclkport_i_pclk", "dout_fsys_bus300",
+ CLK_CON_FSYS_I2C1_IPCLKPORT_I_PCLK, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS_UART0_PCLK, "uart0_pclk", "dout_fsys_bus300",
+ CLK_CON_FSYS_UART0_IPCLKPORT_I_PCLK, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS_MMC0_IPCLKPORT_I_ACLK, "mmc0_ipclkport_i_aclk", "dout_fsys_bus300",
+ CLK_CON_MMC0_IPCLKPORT_I_ACLK, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS_MMC1_IPCLKPORT_I_ACLK, "mmc1_ipclkport_i_aclk", "dout_fsys_bus300",
+ CLK_CON_MMC1_IPCLKPORT_I_ACLK, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS_DWC_PCIE_CTL_INST_0_DBI_ACLK_UG, "dwc_pcie_ctl_inst_0_dbi_aclk_ug",
+ "dout_fsys_bus300", CLK_CON_DWC_PCIE_CTL_INST_0_DBI_ACLK_UG, 21,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS_PWM_IPCLKPORT_I_PCLK_S0, "pwm_ipclkport_i_pclk_s0", "dout_fsys_bus300",
+ CLK_CON_PWM_IPCLKPORT_I_PCLK_S0, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS_USB20DRD_IPCLKPORT_ACLK_PHYCTRL_20, "usb20drd_ipclkport_aclk_phyctrl_20",
+ "dout_fsys_bus300", CLK_CON_USB20DRD_IPCLKPORT_ACLK_PHYCTRL_20, 21,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS_USB20DRD_IPCLKPORT_BUS_CLK_EARLY, "usb20drd_ipclkport_bus_clk_early",
+ "dout_fsys_bus300", CLK_CON_USB20DRD_IPCLKPORT_BUS_CLK_EARLY, 21,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS_XHB_AHBBR_IPCLKPORT_CLK, "xhb_ahbbr_ipclkport_clk", "dout_fsys_bus300",
+ CLK_CON_XHB_AHBBR_IPCLKPORT_CLK, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS_XHB_USB_IPCLKPORT_CLK, "xhb_usb_ipclkport_clk", "dout_fsys_bus300",
+ CLK_CON_XHB_USB_IPCLKPORT_CLK, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS_BUS_QSPI, "bus_p_fsys_ipclkport_qspiclk", "dout_fsys_bus_qspi",
+ CLK_CON_BUS_P_FSYS_IPCLKPORT_QSPICLK, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+};
+
+static const struct samsung_cmu_info cmu_fsys_info __initconst = {
+ .pll_clks = cmu_fsys_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(cmu_fsys_pll_clks),
+ .mux_clks = cmu_fsys_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(cmu_fsys_mux_clks),
+ .div_clks = cmu_fsys_div_clks,
+ .nr_div_clks = ARRAY_SIZE(cmu_fsys_div_clks),
+ .gate_clks = cmu_fsys_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(cmu_fsys_gate_clks),
+ .nr_clk_ids = CMU_FSYS_NR_CLK,
+ .clk_regs = cmu_fsys_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(cmu_fsys_clk_regs),
+};
+
+/* Register Offset definitions for CMU_IMEM (0x10010000) */
+#define PLL_CON0_MUX_CLK_IMEM_ACLK_USER 0x0100
+#define PLL_CON0_MUX_CLK_IMEM_JPEG_USER 0x0120
+#define CLK_CON_MUX_CLK_IMEM_GIC_CA53 0x1000
+#define CLK_CON_MUX_CLK_IMEM_GIC_CA5 0x1008
+#define CLK_CON_MCT_IPCLKPORT_PCLK 0x2038
+#define CLK_CON_SFRIF_TMU_IMEM_IPCLKPORT_PCLK 0x2044
+
+static const unsigned long cmu_imem_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLK_IMEM_ACLK_USER,
+ PLL_CON0_MUX_CLK_IMEM_JPEG_USER,
+ CLK_CON_MUX_CLK_IMEM_GIC_CA53,
+ CLK_CON_MUX_CLK_IMEM_GIC_CA5,
+ CLK_CON_MCT_IPCLKPORT_PCLK,
+ CLK_CON_SFRIF_TMU_IMEM_IPCLKPORT_PCLK,
+};
+
+PNAME(mout_imem_aclk_user_p) = { "fin_pll", "dout_clkcmu_imem_aclk" };
+PNAME(mout_imem_gic_ca53_p) = { "mout_imem_aclk_user", "fin_pll" };
+PNAME(mout_imem_gic_ca5_p) = { "mout_imem_aclk_user", "fin_pll" };
+PNAME(mout_imem_jpeg_user_p) = { "fin_pll", "dout_clkcmu_imem_jpeg" };
+
+static const struct samsung_mux_clock cmu_imem_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_IMEM_ACLK_USER, "mout_imem_aclk_user",
+ mout_imem_aclk_user_p, PLL_CON0_MUX_CLK_IMEM_ACLK_USER, 4, 1),
+ MUX(CLK_MOUT_IMEM_GIC_CA53, "mout_imem_gic_ca53",
+ mout_imem_gic_ca53_p, CLK_CON_MUX_CLK_IMEM_GIC_CA53, 0, 1),
+ MUX(CLK_MOUT_IMEM_GIC_CA5, "mout_imem_gic_ca5",
+ mout_imem_gic_ca5_p, CLK_CON_MUX_CLK_IMEM_GIC_CA5, 0, 1),
+ MUX(CLK_MOUT_IMEM_JPEG_USER, "mout_imem_jpeg_user",
+ mout_imem_jpeg_user_p, PLL_CON0_MUX_CLK_IMEM_JPEG_USER, 4, 1),
+};
+
+static const struct samsung_gate_clock cmu_imem_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_IMEM_MCT_PCLK, "mct_pclk", "mout_imem_aclk_user",
+ CLK_CON_MCT_IPCLKPORT_PCLK, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_IMEM_PCLK_TMU0_APBIF, "sfrif_tmu_imem_ipclkport_pclk", "mout_imem_aclk_user",
+ CLK_CON_SFRIF_TMU_IMEM_IPCLKPORT_PCLK, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+};
+
+static const struct samsung_cmu_info cmu_imem_info __initconst = {
+ .mux_clks = cmu_imem_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(cmu_imem_mux_clks),
+ .gate_clks = cmu_imem_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(cmu_imem_gate_clks),
+ .nr_clk_ids = CMU_IMEM_NR_CLK,
+ .clk_regs = cmu_imem_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(cmu_imem_clk_regs),
+};
+
+static void __init artpec8_clk_cmu_imem_init(struct device_node *np)
+{
+ samsung_cmu_register_one(np, &cmu_imem_info);
+}
+
+CLK_OF_DECLARE(artpec8_clk_cmu_imem, "axis,artpec8-cmu-imem", artpec8_clk_cmu_imem_init);
+
+/* Register Offset definitions for CMU_PERI (0x16410000) */
+#define PLL_CON0_MUX_CLK_PERI_AUDIO_USER 0x0100
+#define PLL_CON0_MUX_CLK_PERI_DISP_USER 0x0120
+#define PLL_CON0_MUX_CLK_PERI_IP_USER 0x0140
+#define CLK_CON_MUX_CLK_PERI_I2S0 0x1000
+#define CLK_CON_MUX_CLK_PERI_I2S1 0x1004
+#define CLK_CON_DIV_CLK_PERI_DSIM 0x1800
+#define CLK_CON_DIV_CLK_PERI_I2S0 0x1804
+#define CLK_CON_DIV_CLK_PERI_I2S1 0x1808
+#define CLK_CON_DIV_CLK_PERI_PCLK 0x180c
+#define CLK_CON_DIV_CLK_PERI_SPI 0x1810
+#define CLK_CON_DIV_CLK_PERI_UART1 0x1814
+#define CLK_CON_DIV_CLK_PERI_UART2 0x1818
+#define CLK_CON_APB_ASYNC_DSIM_IPCLKPORT_PCLKS 0x2004
+#define CLK_CON_PERI_I2C2_IPCLKPORT_I_PCLK 0x2030
+#define CLK_CON_PERI_I2C3_IPCLKPORT_I_PCLK 0x2034
+#define CLK_CON_PERI_SPI0_IPCLKPORT_I_PCLK 0x2048
+#define CLK_CON_PERI_SPI0_IPCLKPORT_I_SCLK_SPI 0x204c
+#define CLK_CON_PERI_UART1_IPCLKPORT_I_PCLK 0x2050
+#define CLK_CON_PERI_UART1_IPCLKPORT_I_SCLK_UART 0x2054
+#define CLK_CON_PERI_UART2_IPCLKPORT_I_PCLK 0x2058
+#define CLK_CON_PERI_UART2_IPCLKPORT_I_SCLK_UART 0x205c
+#define CLK_CON_DMYQCH_CON_AUDIO_OUT_QCH 0x3000
+#define CLK_CON_DMYQCH_CON_DMA4DSIM_QCH 0x3004
+#define CLK_CON_DMYQCH_CON_PERI_I2SSC0_QCH 0x3008
+#define CLK_CON_DMYQCH_CON_PERI_I2SSC1_QCH 0x300c
+
+static const unsigned long cmu_peri_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLK_PERI_AUDIO_USER,
+ PLL_CON0_MUX_CLK_PERI_DISP_USER,
+ PLL_CON0_MUX_CLK_PERI_IP_USER,
+ CLK_CON_MUX_CLK_PERI_I2S0,
+ CLK_CON_MUX_CLK_PERI_I2S1,
+ CLK_CON_DIV_CLK_PERI_DSIM,
+ CLK_CON_DIV_CLK_PERI_I2S0,
+ CLK_CON_DIV_CLK_PERI_I2S1,
+ CLK_CON_DIV_CLK_PERI_PCLK,
+ CLK_CON_DIV_CLK_PERI_SPI,
+ CLK_CON_DIV_CLK_PERI_UART1,
+ CLK_CON_DIV_CLK_PERI_UART2,
+ CLK_CON_APB_ASYNC_DSIM_IPCLKPORT_PCLKS,
+ CLK_CON_PERI_I2C2_IPCLKPORT_I_PCLK,
+ CLK_CON_PERI_I2C3_IPCLKPORT_I_PCLK,
+ CLK_CON_PERI_SPI0_IPCLKPORT_I_PCLK,
+ CLK_CON_PERI_SPI0_IPCLKPORT_I_SCLK_SPI,
+ CLK_CON_PERI_UART1_IPCLKPORT_I_PCLK,
+ CLK_CON_PERI_UART1_IPCLKPORT_I_SCLK_UART,
+ CLK_CON_PERI_UART2_IPCLKPORT_I_PCLK,
+ CLK_CON_PERI_UART2_IPCLKPORT_I_SCLK_UART,
+ CLK_CON_DMYQCH_CON_AUDIO_OUT_QCH,
+ CLK_CON_DMYQCH_CON_DMA4DSIM_QCH,
+ CLK_CON_DMYQCH_CON_PERI_I2SSC0_QCH,
+ CLK_CON_DMYQCH_CON_PERI_I2SSC1_QCH,
+};
+
+static const struct samsung_fixed_rate_clock peri_fixed_clks[] __initconst = {
+ FRATE(0, "clk_peri_audio", NULL, 0, 100000000),
+};
+
+PNAME(mout_peri_ip_user_p) = { "fin_pll", "dout_clkcmu_peri_ip" };
+PNAME(mout_peri_audio_user_p) = { "fin_pll", "dout_clkcmu_peri_audio" };
+PNAME(mout_peri_disp_user_p) = { "fin_pll", "dout_clkcmu_peri_disp" };
+PNAME(mout_peri_i2s0_p) = { "dout_peri_i2s0", "clk_peri_audio" };
+PNAME(mout_peri_i2s1_p) = { "dout_peri_i2s1", "clk_peri_audio" };
+
+static const struct samsung_mux_clock cmu_peri_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_PERI_IP_USER, "mout_peri_ip_user", mout_peri_ip_user_p,
+ PLL_CON0_MUX_CLK_PERI_IP_USER, 4, 1),
+ MUX(CLK_MOUT_PERI_AUDIO_USER, "mout_peri_audio_user",
+ mout_peri_audio_user_p, PLL_CON0_MUX_CLK_PERI_AUDIO_USER, 4, 1),
+ MUX(CLK_MOUT_PERI_DISP_USER, "mout_peri_disp_user", mout_peri_disp_user_p,
+ PLL_CON0_MUX_CLK_PERI_DISP_USER, 4, 1),
+ MUX(CLK_MOUT_PERI_I2S0, "mout_peri_i2s0", mout_peri_i2s0_p,
+ CLK_CON_MUX_CLK_PERI_I2S0, 0, 1),
+ MUX(CLK_MOUT_PERI_I2S1, "mout_peri_i2s1", mout_peri_i2s1_p,
+ CLK_CON_MUX_CLK_PERI_I2S1, 0, 1),
+};
+
+static const struct samsung_div_clock cmu_peri_div_clks[] __initconst = {
+ DIV(CLK_DOUT_PERI_SPI, "dout_peri_spi", "mout_peri_ip_user",
+ CLK_CON_DIV_CLK_PERI_SPI, 0, 10),
+ DIV(CLK_DOUT_PERI_UART1, "dout_peri_uart1", "mout_peri_ip_user",
+ CLK_CON_DIV_CLK_PERI_UART1, 0, 10),
+ DIV(CLK_DOUT_PERI_UART2, "dout_peri_uart2", "mout_peri_ip_user",
+ CLK_CON_DIV_CLK_PERI_UART2, 0, 10),
+ DIV(CLK_DOUT_PERI_PCLK, "dout_peri_pclk", "mout_peri_ip_user",
+ CLK_CON_DIV_CLK_PERI_PCLK, 0, 4),
+ DIV(CLK_DOUT_PERI_I2S0, "dout_peri_i2s0", "mout_peri_audio_user",
+ CLK_CON_DIV_CLK_PERI_I2S0, 0, 4),
+ DIV(CLK_DOUT_PERI_I2S1, "dout_peri_i2s1", "mout_peri_audio_user",
+ CLK_CON_DIV_CLK_PERI_I2S1, 0, 4),
+ DIV(CLK_DOUT_PERI_DSIM, "dout_peri_dsim", "mout_peri_disp_user",
+ CLK_CON_DIV_CLK_PERI_DSIM, 0, 4),
+};
+
+static const struct samsung_gate_clock cmu_peri_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_PERI_DMA4DSIM_IPCLKPORT_CLK_APB_CLK, "dma4dsim_ipclkport_clk_apb_clk",
+ "dout_peri_pclk", CLK_CON_DMYQCH_CON_DMA4DSIM_QCH, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_I2SSC0_IPCLKPORT_CLK_HST, "i2ssc0_ipclkport_clk_hst", "dout_peri_pclk",
+ CLK_CON_DMYQCH_CON_PERI_I2SSC0_QCH, 1, CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_PERI_I2SSC1_IPCLKPORT_CLK_HST, "i2ssc1_ipclkport_clk_hst", "dout_peri_pclk",
+ CLK_CON_DMYQCH_CON_PERI_I2SSC1_QCH, 1, CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_PERI_AUDIO_OUT_IPCLKPORT_CLK, "audio_out_ipclkport_clk",
+ "mout_peri_audio_user", CLK_CON_DMYQCH_CON_AUDIO_OUT_QCH, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_I2SSC0_IPCLKPORT_CLK, "peri_i2ssc0_ipclkport_clk", "mout_peri_i2s0",
+ CLK_CON_DMYQCH_CON_PERI_I2SSC0_QCH, 1, CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_PERI_I2SSC1_IPCLKPORT_CLK, "peri_i2ssc1_ipclkport_clk", "mout_peri_i2s1",
+ CLK_CON_DMYQCH_CON_PERI_I2SSC1_QCH, 1, CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_PERI_DMA4DSIM_IPCLKPORT_CLK_AXI_CLK, "dma4dsim_ipclkport_clk_axi_clk",
+ "mout_peri_disp_user", CLK_CON_DMYQCH_CON_DMA4DSIM_QCH, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_SPI0_SCLK_SPI, "peri_spi0_ipclkport_i_sclk_spi", "dout_peri_spi",
+ CLK_CON_PERI_SPI0_IPCLKPORT_I_SCLK_SPI, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERI_UART1_SCLK_UART, "uart1_sclk", "dout_peri_uart1",
+ CLK_CON_PERI_UART1_IPCLKPORT_I_SCLK_UART, 21,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERI_UART2_SCLK_UART, "uart2_sclk", "dout_peri_uart2",
+ CLK_CON_PERI_UART2_IPCLKPORT_I_SCLK_UART, 21,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERI_APB_ASYNC_DSIM_IPCLKPORT_PCLKS, "apb_async_dsim_ipclkport_pclks",
+ "dout_peri_pclk", CLK_CON_APB_ASYNC_DSIM_IPCLKPORT_PCLKS, 21,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERI_I2C2_IPCLKPORT_I_PCLK, "peri_i2c2_ipclkport_i_pclk", "dout_peri_pclk",
+ CLK_CON_PERI_I2C2_IPCLKPORT_I_PCLK, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERI_I2C3_IPCLKPORT_I_PCLK, "peri_i2c3_ipclkport_i_pclk", "dout_peri_pclk",
+ CLK_CON_PERI_I2C3_IPCLKPORT_I_PCLK, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERI_SPI0_PCLK, "peri_spi0_ipclkport_i_pclk", "dout_peri_pclk",
+ CLK_CON_PERI_SPI0_IPCLKPORT_I_PCLK, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERI_UART1_PCLK, "uart1_pclk", "dout_peri_pclk",
+ CLK_CON_PERI_UART1_IPCLKPORT_I_PCLK, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERI_UART2_PCLK, "uart2_pclk", "dout_peri_pclk",
+ CLK_CON_PERI_UART2_IPCLKPORT_I_PCLK, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+};
+
+static const struct samsung_cmu_info cmu_peri_info __initconst = {
+ .mux_clks = cmu_peri_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(cmu_peri_mux_clks),
+ .div_clks = cmu_peri_div_clks,
+ .nr_div_clks = ARRAY_SIZE(cmu_peri_div_clks),
+ .gate_clks = cmu_peri_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(cmu_peri_gate_clks),
+ .fixed_clks = peri_fixed_clks,
+ .nr_fixed_clks = ARRAY_SIZE(peri_fixed_clks),
+ .nr_clk_ids = CMU_PERI_NR_CLK,
+ .clk_regs = cmu_peri_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(cmu_peri_clk_regs),
+};
+
+/**
+ * artpec8_cmu_probe - Probe function for ARTPEC platform clocks
+ * @pdev: Pointer to platform device
+ *
+ * Configure clock hierarchy for clock domains of ARTPEC platform
+ */
+static int __init artpec8_cmu_probe(struct platform_device *pdev)
+{
+ const struct samsung_cmu_info *info;
+ struct device *dev = &pdev->dev;
+
+ info = of_device_get_match_data(dev);
+ exynos_arm64_register_cmu(dev, dev->of_node, info);
+
+ return 0;
+}
+
+static const struct of_device_id artpec8_cmu_of_match[] = {
+ {
+ .compatible = "axis,artpec8-cmu-cmu",
+ .data = &cmu_cmu_info,
+ }, {
+ .compatible = "axis,artpec8-cmu-bus",
+ .data = &cmu_bus_info,
+ }, {
+ .compatible = "axis,artpec8-cmu-core",
+ .data = &cmu_core_info,
+ }, {
+ .compatible = "axis,artpec8-cmu-cpucl",
+ .data = &cmu_cpucl_info,
+ }, {
+ .compatible = "axis,artpec8-cmu-fsys",
+ .data = &cmu_fsys_info,
+ }, {
+ .compatible = "axis,artpec8-cmu-peri",
+ .data = &cmu_peri_info,
+ }, {
+ },
+};
+
+static struct platform_driver artpec8_cmu_driver __refdata = {
+ .driver = {
+ .name = "artpec8-cmu",
+ .of_match_table = artpec8_cmu_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = artpec8_cmu_probe,
+};
+
+static int __init artpec8_cmu_init(void)
+{
+ return platform_driver_register(&artpec8_cmu_driver);
+}
+core_initcall(artpec8_cmu_init);
diff --git a/drivers/clk/samsung/clk-cpu.c b/drivers/clk/samsung/clk-cpu.c
index dfa149e648aa..300f8d5d3c48 100644
--- a/drivers/clk/samsung/clk-cpu.c
+++ b/drivers/clk/samsung/clk-cpu.c
@@ -133,7 +133,7 @@ static void wait_until_divider_stable(void __iomem *div_reg, unsigned long mask)
if (!(readl(div_reg) & mask))
return;
- pr_err("%s: timeout in divider stablization\n", __func__);
+ pr_err("%s: timeout in divider stabilization\n", __func__);
}
/*
@@ -243,7 +243,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) {
/*
* In Exynos4210, ATB clock parent is also mout_core. So
- * ATB clock also needs to be mantained at safe speed.
+ * ATB clock also needs to be maintained at safe speed.
*/
alt_div |= E4210_DIV0_ATB_MASK;
alt_div_mask |= E4210_DIV0_ATB_MASK;
@@ -567,12 +567,14 @@ static int exynos850_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
/* -------------------------------------------------------------------------- */
/* Common round rate callback usable for all types of CPU clocks */
-static long exynos_cpuclk_round_rate(struct clk_hw *hw, unsigned long drate,
- unsigned long *prate)
+static int exynos_cpuclk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_hw *parent = clk_hw_get_parent(hw);
- *prate = clk_hw_round_rate(parent, drate);
- return *prate;
+ req->best_parent_rate = clk_hw_round_rate(parent, req->rate);
+ req->rate = req->best_parent_rate;
+
+ return 0;
}
/* Common recalc rate callback usable for all types of CPU clocks */
@@ -591,7 +593,7 @@ static unsigned long exynos_cpuclk_recalc_rate(struct clk_hw *hw,
static const struct clk_ops exynos_cpuclk_clk_ops = {
.recalc_rate = exynos_cpuclk_recalc_rate,
- .round_rate = exynos_cpuclk_round_rate,
+ .determine_rate = exynos_cpuclk_determine_rate,
};
/*
diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c
index abd49edcf707..0f5ae3e8d000 100644
--- a/drivers/clk/samsung/clk-exynos-audss.c
+++ b/drivers/clk/samsung/clk-exynos-audss.c
@@ -4,13 +4,14 @@
* Author: Padmavathi Venna <padma.v@samsung.com>
*
* Common Clock Framework support for Audio Subsystem Clock Controller.
-*/
+ */
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/of.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/clk/samsung/clk-exynos-clkout.c b/drivers/clk/samsung/clk-exynos-clkout.c
index 2ef5748c139b..5f1a4f5e2e59 100644
--- a/drivers/clk/samsung/clk-exynos-clkout.c
+++ b/drivers/clk/samsung/clk-exynos-clkout.c
@@ -10,6 +10,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/samsung/clk-exynos2200.c b/drivers/clk/samsung/clk-exynos2200.c
new file mode 100644
index 000000000000..eab9f5eecfa3
--- /dev/null
+++ b/drivers/clk/samsung/clk-exynos2200.c
@@ -0,0 +1,3928 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2025 Ivaylo Ivanov <ivo.ivanov.ivanov1@gmail.com>
+ * Author: Ivaylo Ivanov <ivo.ivanov.ivanov1@gmail.com>
+ *
+ * Common Clock Framework support for Exynos2200 SoC.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/clock/samsung,exynos2200-cmu.h>
+
+#include "clk.h"
+#include "clk-exynos-arm64.h"
+
+/* NOTE: Must be equal to the last clock ID increased by one */
+#define CLKS_NR_TOP (CLK_DOUT_TCXO_DIV4 + 1)
+#define CLKS_NR_ALIVE (CLK_DOUT_ALIVE_DSP_NOC + 1)
+#define CLKS_NR_PERIS (CLK_DOUT_PERIS_DDD_CTRL + 1)
+#define CLKS_NR_CMGP (CLK_DOUT_CMGP_USI6 + 1)
+#define CLKS_NR_HSI0 (CLK_DOUT_DIV_CLK_HSI0_EUSB + 1)
+#define CLKS_NR_PERIC0 (CLK_DOUT_PERIC0_USI04 + 1)
+#define CLKS_NR_PERIC1 (CLK_DOUT_PERIC1_USI10 + 1)
+#define CLKS_NR_PERIC2 (CLK_DOUT_PERIC2_USI11 + 1)
+#define CLKS_NR_UFS (CLK_MOUT_UFS_UFS_EMBD_USER + 1)
+#define CLKS_NR_VTS (CLK_DOUT_CLKVTS_SERIAL_LIF_CORE + 1)
+
+/* ---- CMU_TOP ------------------------------------------------------------ */
+
+/* Register Offset definitions for CMU_TOP (0x1a320000) */
+#define PLL_LOCKTIME_PLL_MMC 0x0
+#define PLL_LOCKTIME_PLL_SHARED0 0x4
+#define PLL_LOCKTIME_PLL_SHARED1 0x8
+#define PLL_LOCKTIME_PLL_SHARED2 0xc
+#define PLL_LOCKTIME_PLL_SHARED3 0x10
+#define PLL_LOCKTIME_PLL_SHARED4 0x14
+#define PLL_LOCKTIME_PLL_SHARED_MIF 0x18
+#define PLL_CON3_PLL_MMC 0x10c
+#define PLL_CON8_PLL_MMC 0x120
+#define PLL_CON3_PLL_SHARED0 0x14c
+#define PLL_CON8_PLL_SHARED0 0x160
+#define PLL_CON3_PLL_SHARED1 0x18c
+#define PLL_CON8_PLL_SHARED1 0x1a0
+#define PLL_CON3_PLL_SHARED2 0x1cc
+#define PLL_CON8_PLL_SHARED2 0x1e0
+#define PLL_CON3_PLL_SHARED3 0x20c
+#define PLL_CON8_PLL_SHARED3 0x220
+#define PLL_CON3_PLL_SHARED4 0x24c
+#define PLL_CON8_PLL_SHARED4 0x260
+#define PLL_CON3_PLL_SHARED_MIF 0x28c
+#define PLL_CON8_PLL_SHARED_MIF 0x2a0
+#define PLL_CON0_MUX_CP_MPLL_CLK_D2_USER 0x600
+#define PLL_CON1_MUX_CP_MPLL_CLK_D2_USER 0x604
+#define PLL_CON0_MUX_CP_MPLL_CLK_USER 0x610
+#define PLL_CON1_MUX_CP_MPLL_CLK_USER 0x614
+#define CLK_CON_MUX_CLKCMU_AUD_AUDIF0 0x1000
+#define CLK_CON_MUX_CLKCMU_AUD_AUDIF1 0x1004
+#define CLK_CON_MUX_CLKCMU_AUD_CPU 0x1008
+#define CLK_CON_MUX_CLKCMU_CPUCL0_DBG_NOC 0x100c
+#define CLK_CON_MUX_CLKCMU_CPUCL0_SWITCH 0x1010
+#define CLK_CON_MUX_CLKCMU_CPUCL1_SWITCH 0x1014
+#define CLK_CON_MUX_CLKCMU_CPUCL2_SWITCH 0x1018
+#define CLK_CON_MUX_CLKCMU_DNC_NOC 0x101c
+#define CLK_CON_MUX_CLKCMU_DPUB_NOC 0x1020
+#define CLK_CON_MUX_CLKCMU_DPUF_NOC 0x1024
+#define CLK_CON_MUX_CLKCMU_DSP_NOC 0x102c
+#define CLK_CON_MUX_CLKCMU_DSU_SWITCH 0x1030
+#define CLK_CON_MUX_CLKCMU_G3D_SWITCH 0x1034
+#define CLK_CON_MUX_CLKCMU_GNPU_NOC 0x103c
+#define CLK_CON_MUX_CLKCMU_UFS_MMC_CARD 0x1040
+#define CLK_CON_MUX_CLKCMU_M2M_NOC 0x1044
+#define CLK_CON_MUX_CLKCMU_NOCL0_NOC 0x1048
+#define CLK_CON_MUX_CLKCMU_NOCL1A_NOC 0x104c
+#define CLK_CON_MUX_CLKCMU_NOCL1B_NOC0 0x1050
+#define CLK_CON_MUX_CLKCMU_NOCL1C_NOC 0x1054
+#define CLK_CON_MUX_CLKCMU_SDMA_NOC 0x1058
+#define CLK_CON_MUX_CP_HISPEEDY_CLK 0x105c
+#define CLK_CON_MUX_CP_SHARED0_CLK 0x1060
+#define CLK_CON_MUX_CP_SHARED2_CLK 0x1064
+#define CLK_CON_MUX_MUX_CLKCMU_ALIVE_NOC 0x1068
+#define CLK_CON_MUX_MUX_CLKCMU_AUD_AUDIF0 0x106c
+#define CLK_CON_MUX_MUX_CLKCMU_AUD_AUDIF1 0x1070
+#define CLK_CON_MUX_MUX_CLKCMU_AUD_CPU 0x1074
+#define CLK_CON_MUX_MUX_CLKCMU_AUD_NOC 0x1078
+#define CLK_CON_MUX_MUX_CLKCMU_BRP_NOC 0x107c
+#define CLK_CON_MUX_MUX_CLKCMU_CIS_CLK0 0x1080
+#define CLK_CON_MUX_MUX_CLKCMU_CIS_CLK1 0x1084
+#define CLK_CON_MUX_MUX_CLKCMU_CIS_CLK2 0x1088
+#define CLK_CON_MUX_MUX_CLKCMU_CIS_CLK3 0x108c
+#define CLK_CON_MUX_MUX_CLKCMU_CIS_CLK4 0x1090
+#define CLK_CON_MUX_MUX_CLKCMU_CIS_CLK5 0x1094
+#define CLK_CON_MUX_MUX_CLKCMU_CIS_CLK6 0x1098
+#define CLK_CON_MUX_MUX_CLKCMU_CIS_CLK7 0x109c
+#define CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST 0x10a0
+#define CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST_CAM 0x10a4
+#define CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST_CPU 0x10a8
+#define CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST_MIF 0x10ac
+#define CLK_CON_MUX_MUX_CLKCMU_CPUCL0_DBG_NOC 0x10b0
+#define CLK_CON_MUX_MUX_CLKCMU_CPUCL0_NOCP 0x10b4
+#define CLK_CON_MUX_MUX_CLKCMU_CPUCL0_SWITCH 0x10b8
+#define CLK_CON_MUX_MUX_CLKCMU_CPUCL1_SWITCH 0x10bc
+#define CLK_CON_MUX_MUX_CLKCMU_CPUCL2_SWITCH 0x10c0
+#define CLK_CON_MUX_MUX_CLKCMU_CSIS_DCPHY 0x10c4
+#define CLK_CON_MUX_MUX_CLKCMU_CSIS_NOC 0x10c8
+#define CLK_CON_MUX_MUX_CLKCMU_CSIS_OIS_MCU 0x10cc
+#define CLK_CON_MUX_MUX_CLKCMU_CSTAT_NOC 0x10d0
+#define CLK_CON_MUX_MUX_CLKCMU_DNC_NOC 0x10d4
+#define CLK_CON_MUX_MUX_CLKCMU_DPUB 0x10d8
+#define CLK_CON_MUX_MUX_CLKCMU_DPUB_ALT 0x10dc
+#define CLK_CON_MUX_MUX_CLKCMU_DPUB_DSIM 0x10e0
+#define CLK_CON_MUX_MUX_CLKCMU_DPUF 0x10e4
+#define CLK_CON_MUX_MUX_CLKCMU_DPUF_ALT 0x10e8
+#define CLK_CON_MUX_MUX_CLKCMU_DSP_NOC 0x10f8
+#define CLK_CON_MUX_MUX_CLKCMU_DSU_SWITCH 0x10fc
+#define CLK_CON_MUX_MUX_CLKCMU_G3D_NOCP 0x1100
+#define CLK_CON_MUX_MUX_CLKCMU_G3D_SWITCH 0x1104
+#define CLK_CON_MUX_MUX_CLKCMU_GNPU_NOC 0x110c
+#define CLK_CON_MUX_MUX_CLKCMU_HSI0_DPGTC 0x1114
+#define CLK_CON_MUX_MUX_CLKCMU_HSI0_DPOSC 0x1118
+#define CLK_CON_MUX_MUX_CLKCMU_HSI0_NOC 0x111c
+#define CLK_CON_MUX_MUX_CLKCMU_HSI0_USB32DRD 0x1120
+#define CLK_CON_MUX_MUX_CLKCMU_UFS_MMC_CARD 0x1124
+#define CLK_CON_MUX_MUX_CLKCMU_HSI1_NOC 0x1128
+#define CLK_CON_MUX_MUX_CLKCMU_HSI1_PCIE 0x112c
+#define CLK_CON_MUX_MUX_CLKCMU_UFS_UFS_EMBD 0x1130
+#define CLK_CON_MUX_MUX_CLKCMU_LME_LME 0x1134
+#define CLK_CON_MUX_MUX_CLKCMU_LME_NOC 0x1138
+#define CLK_CON_MUX_MUX_CLKCMU_M2M_NOC 0x1140
+#define CLK_CON_MUX_MUX_CLKCMU_MCSC_MCSC 0x1148
+#define CLK_CON_MUX_MUX_CLKCMU_MCSC_NOC 0x114c
+#define CLK_CON_MUX_MUX_CLKCMU_MFC0_MFC0 0x1150
+#define CLK_CON_MUX_MUX_CLKCMU_MFC0_WFD 0x1154
+#define CLK_CON_MUX_MUX_CLKCMU_MFC1_MFC1 0x1158
+#define CLK_CON_MUX_MUX_CLKCMU_MIF_NOCP 0x115c
+#define CLK_CON_MUX_MUX_CLKCMU_MIF_SWITCH 0x1160
+#define CLK_CON_MUX_MUX_CLKCMU_NOCL0_NOC 0x1164
+#define CLK_CON_MUX_MUX_CLKCMU_NOCL1A_NOC 0x1168
+#define CLK_CON_MUX_MUX_CLKCMU_NOCL1B_NOC0 0x116c
+#define CLK_CON_MUX_MUX_CLKCMU_NOCL1B_NOC1 0x1170
+#define CLK_CON_MUX_MUX_CLKCMU_NOCL1C_NOC 0x1174
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC0_IP0 0x1178
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC0_IP1 0x117c
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC0_NOC 0x1180
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_IP0 0x1184
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_IP1 0x1188
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_NOC 0x118c
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC2_IP0 0x1190
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC2_IP1 0x1194
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC2_NOC 0x1198
+#define CLK_CON_MUX_MUX_CLKCMU_PERIS_GIC 0x119c
+#define CLK_CON_MUX_MUX_CLKCMU_PERIS_NOC 0x11a0
+#define CLK_CON_MUX_MUX_CLKCMU_SDMA_NOC 0x11a8
+#define CLK_CON_MUX_MUX_CLKCMU_SSP_NOC 0x11ac
+#define CLK_CON_MUX_MUX_CLKCMU_VTS_DMIC 0x11b0
+#define CLK_CON_MUX_MUX_CLKCMU_YUVP_NOC 0x11b4
+#define CLK_CON_MUX_MUX_CMU_CMUREF 0x11b8
+#define CLK_CON_MUX_MUX_CP_HISPEEDY_CLK 0x11bc
+#define CLK_CON_MUX_MUX_CP_SHARED0_CLK 0x11c0
+#define CLK_CON_MUX_MUX_CP_SHARED1_CLK 0x11c4
+#define CLK_CON_MUX_MUX_CP_SHARED2_CLK 0x11c8
+#define CLK_CON_MUX_CLKCMU_M2M_FRC 0x11cc
+#define CLK_CON_MUX_CLKCMU_MCSC_MCSC 0x11d0
+#define CLK_CON_MUX_CLKCMU_MCSC_NOC 0x11d4
+#define CLK_CON_MUX_MUX_CLKCMU_M2M_FRC 0x11d8
+#define CLK_CON_MUX_MUX_CLKCMU_UFS_NOC 0x11dc
+#define CLK_CON_DIV_CLKCMU_ALIVE_NOC 0x1800
+#define CLK_CON_DIV_CLKCMU_AUD_NOC 0x1804
+#define CLK_CON_DIV_CLKCMU_BRP_NOC 0x1808
+#define CLK_CON_DIV_CLKCMU_CMU_BOOST 0x180c
+#define CLK_CON_DIV_CLKCMU_CMU_BOOST_CAM 0x1810
+#define CLK_CON_DIV_CLKCMU_CMU_BOOST_CPU 0x1814
+#define CLK_CON_DIV_CLKCMU_CMU_BOOST_MIF 0x1818
+#define CLK_CON_DIV_CLKCMU_CPUCL0_NOCP 0x181c
+#define CLK_CON_DIV_CLKCMU_CSIS_DCPHY 0x1820
+#define CLK_CON_DIV_CLKCMU_CSIS_NOC 0x1824
+#define CLK_CON_DIV_CLKCMU_CSIS_OIS_MCU 0x1828
+#define CLK_CON_DIV_CLKCMU_CSTAT_NOC 0x182c
+#define CLK_CON_DIV_CLKCMU_DPUB_DSIM 0x1830
+#define CLK_CON_DIV_CLKCMU_LME_LME 0x1834
+#define CLK_CON_DIV_CLKCMU_G3D_NOCP 0x1838
+#define CLK_CON_DIV_CLKCMU_HSI0_DPGTC 0x1840
+#define CLK_CON_DIV_CLKCMU_HSI0_DPOSC 0x1844
+#define CLK_CON_DIV_CLKCMU_HSI0_NOC 0x1848
+#define CLK_CON_DIV_CLKCMU_HSI0_USB32DRD 0x184c
+#define CLK_CON_DIV_CLKCMU_HSI1_NOC 0x1850
+#define CLK_CON_DIV_CLKCMU_HSI1_PCIE 0x1854
+#define CLK_CON_DIV_CLKCMU_UFS_UFS_EMBD 0x1858
+#define CLK_CON_DIV_CLKCMU_LME_NOC 0x1860
+#define CLK_CON_DIV_CLKCMU_MFC0_MFC0 0x1874
+#define CLK_CON_DIV_CLKCMU_MFC0_WFD 0x1878
+#define CLK_CON_DIV_CLKCMU_MFC1_MFC1 0x187c
+#define CLK_CON_DIV_CLKCMU_MIF_NOCP 0x1880
+#define CLK_CON_DIV_CLKCMU_NOCL1B_NOC1 0x1884
+#define CLK_CON_DIV_CLKCMU_PERIC0_IP0 0x1888
+#define CLK_CON_DIV_CLKCMU_PERIC0_IP1 0x188c
+#define CLK_CON_DIV_CLKCMU_PERIC0_NOC 0x1890
+#define CLK_CON_DIV_CLKCMU_PERIC1_IP0 0x1894
+#define CLK_CON_DIV_CLKCMU_PERIC1_IP1 0x1898
+#define CLK_CON_DIV_CLKCMU_PERIC1_NOC 0x189c
+#define CLK_CON_DIV_CLKCMU_PERIC2_IP0 0x18a0
+#define CLK_CON_DIV_CLKCMU_PERIC2_IP1 0x18a4
+#define CLK_CON_DIV_CLKCMU_PERIC2_NOC 0x18a8
+#define CLK_CON_DIV_CLKCMU_PERIS_GIC 0x18ac
+#define CLK_CON_DIV_CLKCMU_PERIS_NOC 0x18b0
+#define CLK_CON_DIV_CLKCMU_SSP_NOC 0x18b8
+#define CLK_CON_DIV_CLKCMU_VTS_DMIC 0x18bc
+#define CLK_CON_DIV_CLKCMU_YUVP_NOC 0x18c0
+#define CLK_CON_DIV_CP_SHARED1_CLK 0x18c4
+#define CLK_CON_DIV_DIV_CLKCMU_AUD_AUDIF0 0x18c8
+#define CLK_CON_DIV_DIV_CLKCMU_AUD_AUDIF0_SM 0x18cc
+#define CLK_CON_DIV_DIV_CLKCMU_AUD_AUDIF1 0x18d0
+#define CLK_CON_DIV_DIV_CLKCMU_AUD_AUDIF1_SM 0x18d4
+#define CLK_CON_DIV_DIV_CLKCMU_AUD_CPU 0x18d8
+#define CLK_CON_DIV_DIV_CLKCMU_AUD_CPU_SM 0x18dc
+#define CLK_CON_DIV_DIV_CLKCMU_CIS_CLK0 0x18e0
+#define CLK_CON_DIV_DIV_CLKCMU_CIS_CLK1 0x18e4
+#define CLK_CON_DIV_DIV_CLKCMU_CIS_CLK2 0x18e8
+#define CLK_CON_DIV_DIV_CLKCMU_CIS_CLK3 0x18ec
+#define CLK_CON_DIV_DIV_CLKCMU_CIS_CLK4 0x18f0
+#define CLK_CON_DIV_DIV_CLKCMU_CIS_CLK5 0x18f4
+#define CLK_CON_DIV_DIV_CLKCMU_CIS_CLK6 0x18f8
+#define CLK_CON_DIV_DIV_CLKCMU_CIS_CLK7 0x18fc
+#define CLK_CON_DIV_DIV_CLKCMU_CPUCL0_DBG_NOC 0x1900
+#define CLK_CON_DIV_DIV_CLKCMU_CPUCL0_DBG_NOC_SM 0x1904
+#define CLK_CON_DIV_DIV_CLKCMU_CPUCL0_SWITCH 0x1908
+#define CLK_CON_DIV_DIV_CLKCMU_CPUCL0_SWITCH_SM 0x190c
+#define CLK_CON_DIV_DIV_CLKCMU_CPUCL1_SWITCH 0x1910
+#define CLK_CON_DIV_DIV_CLKCMU_CPUCL1_SWITCH_SM 0x1914
+#define CLK_CON_DIV_DIV_CLKCMU_CPUCL2_SWITCH 0x1918
+#define CLK_CON_DIV_DIV_CLKCMU_CPUCL2_SWITCH_SM 0x191c
+#define CLK_CON_DIV_DIV_CLKCMU_DNC_NOC 0x1920
+#define CLK_CON_DIV_DIV_CLKCMU_DNC_NOC_SM 0x1924
+#define CLK_CON_DIV_DIV_CLKCMU_DPUB 0x1928
+#define CLK_CON_DIV_DIV_CLKCMU_DPUB_ALT 0x192c
+#define CLK_CON_DIV_DIV_CLKCMU_DPUF 0x1930
+#define CLK_CON_DIV_DIV_CLKCMU_DPUF_ALT 0x1934
+#define CLK_CON_DIV_DIV_CLKCMU_DSP_NOC 0x1940
+#define CLK_CON_DIV_DIV_CLKCMU_DSP_NOC_SM 0x1944
+#define CLK_CON_DIV_DIV_CLKCMU_DSU_SWITCH 0x1948
+#define CLK_CON_DIV_DIV_CLKCMU_DSU_SWITCH_SM 0x194c
+#define CLK_CON_DIV_DIV_CLKCMU_G3D_SWITCH 0x1950
+#define CLK_CON_DIV_DIV_CLKCMU_G3D_SWITCH_SM 0x1954
+#define CLK_CON_DIV_DIV_CLKCMU_GNPU_NOC 0x1960
+#define CLK_CON_DIV_DIV_CLKCMU_GNPU_NOC_SM 0x1964
+#define CLK_CON_DIV_DIV_CLKCMU_UFS_MMC_CARD 0x1968
+#define CLK_CON_DIV_DIV_CLKCMU_UFS_MMC_CARD_SM 0x196c
+#define CLK_CON_DIV_DIV_CLKCMU_M2M_NOC 0x1970
+#define CLK_CON_DIV_DIV_CLKCMU_M2M_NOC_SM 0x1974
+#define CLK_CON_DIV_DIV_CLKCMU_NOCL0_NOC 0x1978
+#define CLK_CON_DIV_DIV_CLKCMU_NOCL0_NOC_SM 0x197c
+#define CLK_CON_DIV_DIV_CLKCMU_NOCL1A_NOC 0x1980
+#define CLK_CON_DIV_DIV_CLKCMU_NOCL1A_NOC_SM 0x1984
+#define CLK_CON_DIV_DIV_CLKCMU_NOCL1B_NOC0 0x1988
+#define CLK_CON_DIV_DIV_CLKCMU_NOCL1B_NOC0_SM 0x198c
+#define CLK_CON_DIV_DIV_CLKCMU_NOCL1C_NOC 0x1990
+#define CLK_CON_DIV_DIV_CLKCMU_NOCL1C_NOC_SM 0x1994
+#define CLK_CON_DIV_DIV_CLKCMU_SDMA_NOC 0x1998
+#define CLK_CON_DIV_DIV_CLKCMU_SDMA_NOC_SM 0x199c
+#define CLK_CON_DIV_DIV_CP_HISPEEDY_CLK 0x19a0
+#define CLK_CON_DIV_DIV_CP_HISPEEDY_CLK_SM 0x19a4
+#define CLK_CON_DIV_DIV_CP_SHARED0_CLK 0x19a8
+#define CLK_CON_DIV_DIV_CP_SHARED0_CLK_SM 0x19ac
+#define CLK_CON_DIV_DIV_CP_SHARED2_CLK 0x19b0
+#define CLK_CON_DIV_DIV_CP_SHARED2_CLK_SM 0x19b4
+#define CLK_CON_DIV_CLKCMU_UFS_NOC 0x19b8
+#define CLK_CON_DIV_DIV_CLKCMU_M2M_FRC 0x19bc
+#define CLK_CON_DIV_DIV_CLKCMU_M2M_FRC_SM 0x19c0
+#define CLK_CON_DIV_DIV_CLKCMU_MCSC_MCSC 0x19c4
+#define CLK_CON_DIV_DIV_CLKCMU_MCSC_MCSC_SM 0x19c8
+#define CLK_CON_DIV_DIV_CLKCMU_MCSC_NOC 0x19cc
+#define CLK_CON_DIV_DIV_CLKCMU_MCSC_NOC_SM 0x19d0
+#define CLK_CON_GAT_CLKCMU_MIF01_SWITCH 0x2000
+#define CLK_CON_GAT_CLKCMU_MIF23_SWITCH 0x2004
+#define CLK_CON_GAT_GATE_CLKCMU_ALIVE_NOC 0x200c
+#define CLK_CON_GAT_GATE_CLKCMU_AUD_AUDIF0 0x2010
+#define CLK_CON_GAT_GATE_CLKCMU_AUD_AUDIF0_SM 0x2014
+#define CLK_CON_GAT_GATE_CLKCMU_AUD_AUDIF1 0x2018
+#define CLK_CON_GAT_GATE_CLKCMU_AUD_AUDIF1_SM 0x201c
+#define CLK_CON_GAT_GATE_CLKCMU_AUD_CPU 0x2020
+#define CLK_CON_GAT_GATE_CLKCMU_AUD_CPU_SM 0x2024
+#define CLK_CON_GAT_GATE_CLKCMU_AUD_NOC 0x2028
+#define CLK_CON_GAT_GATE_CLKCMU_BRP_NOC 0x202c
+#define CLK_CON_GAT_GATE_CLKCMU_CIS_CLK0 0x2030
+#define CLK_CON_GAT_GATE_CLKCMU_CIS_CLK1 0x2034
+#define CLK_CON_GAT_GATE_CLKCMU_CIS_CLK2 0x2038
+#define CLK_CON_GAT_GATE_CLKCMU_CIS_CLK3 0x203c
+#define CLK_CON_GAT_GATE_CLKCMU_CIS_CLK4 0x2040
+#define CLK_CON_GAT_GATE_CLKCMU_CIS_CLK5 0x2044
+#define CLK_CON_GAT_GATE_CLKCMU_CIS_CLK6 0x2048
+#define CLK_CON_GAT_GATE_CLKCMU_CIS_CLK7 0x204c
+#define CLK_CON_GAT_GATE_CLKCMU_CMU_BOOST 0x2050
+#define CLK_CON_GAT_GATE_CLKCMU_CMU_BOOST_CAM 0x2054
+#define CLK_CON_GAT_GATE_CLKCMU_CMU_BOOST_CPU 0x2058
+#define CLK_CON_GAT_GATE_CLKCMU_CMU_BOOST_CPU_MIF 0x205c
+#define CLK_CON_GAT_GATE_CLKCMU_CPUCL0_DBG_NOC 0x2060
+#define CLK_CON_GAT_GATE_CLKCMU_CPUCL0_DBG_NOC_SM 0x2064
+#define CLK_CON_GAT_GATE_CLKCMU_CPUCL0_NOCP 0x2068
+#define CLK_CON_GAT_GATE_CLKCMU_CPUCL0_SWITCH 0x206c
+#define CLK_CON_GAT_GATE_CLKCMU_CPUCL0_SWITCH_SM 0x2070
+#define CLK_CON_GAT_GATE_CLKCMU_CPUCL1_SWITCH 0x2074
+#define CLK_CON_GAT_GATE_CLKCMU_CPUCL1_SWITCH_SM 0x2078
+#define CLK_CON_GAT_GATE_CLKCMU_CPUCL2_SWITCH 0x207c
+#define CLK_CON_GAT_GATE_CLKCMU_CPUCL2_SWITCH_SM 0x2080
+#define CLK_CON_GAT_GATE_CLKCMU_CSIS_DCPHY 0x2084
+#define CLK_CON_GAT_GATE_CLKCMU_CSIS_NOC 0x2088
+#define CLK_CON_GAT_GATE_CLKCMU_CSIS_OIS_MCU 0x208c
+#define CLK_CON_GAT_GATE_CLKCMU_CSTAT_NOC 0x2090
+#define CLK_CON_GAT_GATE_CLKCMU_DNC_NOC 0x2094
+#define CLK_CON_GAT_GATE_CLKCMU_DNC_NOC_SM 0x2098
+#define CLK_CON_GAT_GATE_CLKCMU_DPUB 0x209c
+#define CLK_CON_GAT_GATE_CLKCMU_DPUB_ALT 0x20a0
+#define CLK_CON_GAT_GATE_CLKCMU_DPUB_DSIM 0x20a4
+#define CLK_CON_GAT_GATE_CLKCMU_DPUF 0x20a8
+#define CLK_CON_GAT_GATE_CLKCMU_DPUF_ALT 0x20ac
+#define CLK_CON_GAT_GATE_CLKCMU_DSP_NOC 0x20bc
+#define CLK_CON_GAT_GATE_CLKCMU_DSP_NOC_SM 0x20c0
+#define CLK_CON_GAT_GATE_CLKCMU_DSU_SWITCH 0x20c4
+#define CLK_CON_GAT_GATE_CLKCMU_DSU_SWITCH_SM 0x20c8
+#define CLK_CON_GAT_GATE_CLKCMU_G3D_NOCP 0x20cc
+#define CLK_CON_GAT_GATE_CLKCMU_G3D_SWITCH 0x20d0
+#define CLK_CON_GAT_GATE_CLKCMU_G3D_SWITCH_SM 0x20d4
+#define CLK_CON_GAT_GATE_CLKCMU_GNPU_NOC 0x20e0
+#define CLK_CON_GAT_GATE_CLKCMU_GNPU_NOC_SM 0x20e4
+#define CLK_CON_GAT_GATE_CLKCMU_HSI0_DPGTC 0x20ec
+#define CLK_CON_GAT_GATE_CLKCMU_HSI0_DPOSC 0x20f0
+#define CLK_CON_GAT_GATE_CLKCMU_HSI0_NOC 0x20f4
+#define CLK_CON_GAT_GATE_CLKCMU_HSI0_USB32DRD 0x20f8
+#define CLK_CON_GAT_GATE_CLKCMU_UFS_MMC_CARD 0x20fc
+#define CLK_CON_GAT_GATE_CLKCMU_UFS_MMC_CARD_SM 0x2100
+#define CLK_CON_GAT_GATE_CLKCMU_HSI1_NOC 0x2104
+#define CLK_CON_GAT_GATE_CLKCMU_HSI1_PCIE 0x2108
+#define CLK_CON_GAT_GATE_CLKCMU_UFS_UFS_EMBD 0x210c
+#define CLK_CON_GAT_GATE_CLKCMU_LME_LME 0x2110
+#define CLK_CON_GAT_GATE_CLKCMU_LME_NOC 0x2114
+#define CLK_CON_GAT_GATE_CLKCMU_M2M_NOC 0x2118
+#define CLK_CON_GAT_GATE_CLKCMU_M2M_NOC_SM 0x211c
+#define CLK_CON_GAT_GATE_CLKCMU_MFC0_MFC0 0x212c
+#define CLK_CON_GAT_GATE_CLKCMU_MFC0_WFD 0x2130
+#define CLK_CON_GAT_GATE_CLKCMU_MFC1_MFC1 0x2134
+#define CLK_CON_GAT_GATE_CLKCMU_MIF_NOCP 0x2138
+#define CLK_CON_GAT_GATE_CLKCMU_NOCL0_NOC 0x213c
+#define CLK_CON_GAT_GATE_CLKCMU_NOCL0_NOC_SM 0x2140
+#define CLK_CON_GAT_GATE_CLKCMU_NOCL1A_NOC 0x2144
+#define CLK_CON_GAT_GATE_CLKCMU_NOCL1A_NOC_SM 0x2148
+#define CLK_CON_GAT_GATE_CLKCMU_NOCL1B_NOC0 0x214c
+#define CLK_CON_GAT_GATE_CLKCMU_NOCL1B_NOC0_SM 0x2150
+#define CLK_CON_GAT_GATE_CLKCMU_NOCL1B_NOC1 0x2154
+#define CLK_CON_GAT_GATE_CLKCMU_NOCL1C_NOC 0x2158
+#define CLK_CON_GAT_GATE_CLKCMU_NOCL1C_NOC_SM 0x215c
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC0_IP0 0x2160
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC0_IP1 0x2164
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC0_NOC 0x2168
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_IP0 0x216c
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_IP1 0x2170
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_NOC 0x2174
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC2_IP0 0x2178
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC2_IP1 0x217c
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC2_NOC 0x2180
+#define CLK_CON_GAT_GATE_CLKCMU_PERIS_GIC 0x2184
+#define CLK_CON_GAT_GATE_CLKCMU_PERIS_NOC 0x2188
+#define CLK_CON_GAT_GATE_CLKCMU_SDMA_NOC 0x2190
+#define CLK_CON_GAT_GATE_CLKCMU_SDMA_NOC_SM 0x2194
+#define CLK_CON_GAT_GATE_CLKCMU_SSP_NOC 0x2198
+#define CLK_CON_GAT_GATE_CLKCMU_VTS_DMIC 0x219c
+#define CLK_CON_GAT_GATE_CLKCMU_YUVP_NOC 0x21a0
+#define CLK_CON_GAT_GATE_CP_HISPEEDY_CLK 0x21a4
+#define CLK_CON_GAT_GATE_CP_HISPEEDY_CLK_SM 0x21a8
+#define CLK_CON_GAT_GATE_CP_SHARED0_CLK 0x21ac
+#define CLK_CON_GAT_GATE_CP_SHARED0_CLK_SM 0x21b0
+#define CLK_CON_GAT_GATE_CP_SHARED1_CLK 0x21b4
+#define CLK_CON_GAT_GATE_CP_SHARED2_CLK 0x21b8
+#define CLK_CON_GAT_GATE_CP_SHARED2_CLK_SM 0x21bc
+#define CLK_CON_GAT_GATE_CLKCMU_UFS_NOC 0x21c0
+#define CLK_CON_GAT_GATE_CLKCMU_M2M_FRC 0x21c4
+#define CLK_CON_GAT_GATE_CLKCMU_M2M_FRC_SM 0x21c8
+#define CLK_CON_GAT_GATE_CLKCMU_MCSC_MCSC 0x21cc
+#define CLK_CON_GAT_GATE_CLKCMU_MCSC_MCSC_SM 0x21d0
+#define CLK_CON_GAT_GATE_CLKCMU_MCSC_NOC 0x21d4
+#define CLK_CON_GAT_GATE_CLKCMU_MCSC_NOC_SM 0x21d8
+
+static const unsigned long top_clk_regs[] __initconst = {
+ PLL_LOCKTIME_PLL_MMC,
+ PLL_LOCKTIME_PLL_SHARED0,
+ PLL_LOCKTIME_PLL_SHARED1,
+ PLL_LOCKTIME_PLL_SHARED2,
+ PLL_LOCKTIME_PLL_SHARED3,
+ PLL_LOCKTIME_PLL_SHARED4,
+ PLL_LOCKTIME_PLL_SHARED_MIF,
+ PLL_CON3_PLL_MMC,
+ PLL_CON8_PLL_MMC,
+ PLL_CON3_PLL_SHARED0,
+ PLL_CON8_PLL_SHARED0,
+ PLL_CON3_PLL_SHARED1,
+ PLL_CON8_PLL_SHARED1,
+ PLL_CON3_PLL_SHARED2,
+ PLL_CON8_PLL_SHARED2,
+ PLL_CON3_PLL_SHARED3,
+ PLL_CON8_PLL_SHARED3,
+ PLL_CON3_PLL_SHARED4,
+ PLL_CON8_PLL_SHARED4,
+ PLL_CON3_PLL_SHARED_MIF,
+ PLL_CON8_PLL_SHARED_MIF,
+ PLL_CON0_MUX_CP_MPLL_CLK_D2_USER,
+ PLL_CON1_MUX_CP_MPLL_CLK_D2_USER,
+ PLL_CON0_MUX_CP_MPLL_CLK_USER,
+ PLL_CON1_MUX_CP_MPLL_CLK_USER,
+ CLK_CON_MUX_CLKCMU_AUD_AUDIF0,
+ CLK_CON_MUX_CLKCMU_AUD_AUDIF1,
+ CLK_CON_MUX_CLKCMU_AUD_CPU,
+ CLK_CON_MUX_CLKCMU_CPUCL0_DBG_NOC,
+ CLK_CON_MUX_CLKCMU_CPUCL0_SWITCH,
+ CLK_CON_MUX_CLKCMU_CPUCL1_SWITCH,
+ CLK_CON_MUX_CLKCMU_CPUCL2_SWITCH,
+ CLK_CON_MUX_CLKCMU_DNC_NOC,
+ CLK_CON_MUX_CLKCMU_DPUB_NOC,
+ CLK_CON_MUX_CLKCMU_DPUF_NOC,
+ CLK_CON_MUX_CLKCMU_DSP_NOC,
+ CLK_CON_MUX_CLKCMU_DSU_SWITCH,
+ CLK_CON_MUX_CLKCMU_G3D_SWITCH,
+ CLK_CON_MUX_CLKCMU_GNPU_NOC,
+ CLK_CON_MUX_CLKCMU_UFS_MMC_CARD,
+ CLK_CON_MUX_CLKCMU_M2M_NOC,
+ CLK_CON_MUX_CLKCMU_NOCL0_NOC,
+ CLK_CON_MUX_CLKCMU_NOCL1A_NOC,
+ CLK_CON_MUX_CLKCMU_NOCL1B_NOC0,
+ CLK_CON_MUX_CLKCMU_NOCL1C_NOC,
+ CLK_CON_MUX_CLKCMU_SDMA_NOC,
+ CLK_CON_MUX_CP_HISPEEDY_CLK,
+ CLK_CON_MUX_CP_SHARED0_CLK,
+ CLK_CON_MUX_CP_SHARED2_CLK,
+ CLK_CON_MUX_MUX_CLKCMU_ALIVE_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_AUD_AUDIF0,
+ CLK_CON_MUX_MUX_CLKCMU_AUD_AUDIF1,
+ CLK_CON_MUX_MUX_CLKCMU_AUD_CPU,
+ CLK_CON_MUX_MUX_CLKCMU_AUD_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_BRP_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK0,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK1,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK2,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK3,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK4,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK5,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK6,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK7,
+ CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST,
+ CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST_CAM,
+ CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST_CPU,
+ CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST_MIF,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL0_DBG_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL0_NOCP,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL0_SWITCH,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL1_SWITCH,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL2_SWITCH,
+ CLK_CON_MUX_MUX_CLKCMU_CSIS_DCPHY,
+ CLK_CON_MUX_MUX_CLKCMU_CSIS_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_CSIS_OIS_MCU,
+ CLK_CON_MUX_MUX_CLKCMU_CSTAT_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_DNC_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_DPUB,
+ CLK_CON_MUX_MUX_CLKCMU_DPUB_ALT,
+ CLK_CON_MUX_MUX_CLKCMU_DPUB_DSIM,
+ CLK_CON_MUX_MUX_CLKCMU_DPUF,
+ CLK_CON_MUX_MUX_CLKCMU_DPUF_ALT,
+ CLK_CON_MUX_MUX_CLKCMU_DSP_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_DSU_SWITCH,
+ CLK_CON_MUX_MUX_CLKCMU_G3D_NOCP,
+ CLK_CON_MUX_MUX_CLKCMU_G3D_SWITCH,
+ CLK_CON_MUX_MUX_CLKCMU_GNPU_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_HSI0_DPGTC,
+ CLK_CON_MUX_MUX_CLKCMU_HSI0_DPOSC,
+ CLK_CON_MUX_MUX_CLKCMU_HSI0_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_HSI0_USB32DRD,
+ CLK_CON_MUX_MUX_CLKCMU_UFS_MMC_CARD,
+ CLK_CON_MUX_MUX_CLKCMU_HSI1_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_HSI1_PCIE,
+ CLK_CON_MUX_MUX_CLKCMU_UFS_UFS_EMBD,
+ CLK_CON_MUX_MUX_CLKCMU_LME_LME,
+ CLK_CON_MUX_MUX_CLKCMU_LME_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_M2M_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_MCSC_MCSC,
+ CLK_CON_MUX_MUX_CLKCMU_MCSC_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_MFC0_MFC0,
+ CLK_CON_MUX_MUX_CLKCMU_MFC0_WFD,
+ CLK_CON_MUX_MUX_CLKCMU_MFC1_MFC1,
+ CLK_CON_MUX_MUX_CLKCMU_MIF_NOCP,
+ CLK_CON_MUX_MUX_CLKCMU_MIF_SWITCH,
+ CLK_CON_MUX_MUX_CLKCMU_NOCL0_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_NOCL1A_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_NOCL1B_NOC0,
+ CLK_CON_MUX_MUX_CLKCMU_NOCL1B_NOC1,
+ CLK_CON_MUX_MUX_CLKCMU_NOCL1C_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC0_IP0,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC0_IP1,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC0_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC1_IP0,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC1_IP1,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC1_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC2_IP0,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC2_IP1,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC2_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_PERIS_GIC,
+ CLK_CON_MUX_MUX_CLKCMU_PERIS_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_SDMA_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_SSP_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_VTS_DMIC,
+ CLK_CON_MUX_MUX_CLKCMU_YUVP_NOC,
+ CLK_CON_MUX_MUX_CMU_CMUREF,
+ CLK_CON_MUX_MUX_CP_HISPEEDY_CLK,
+ CLK_CON_MUX_MUX_CP_SHARED0_CLK,
+ CLK_CON_MUX_MUX_CP_SHARED1_CLK,
+ CLK_CON_MUX_MUX_CP_SHARED2_CLK,
+ CLK_CON_MUX_CLKCMU_M2M_FRC,
+ CLK_CON_MUX_CLKCMU_MCSC_MCSC,
+ CLK_CON_MUX_CLKCMU_MCSC_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_M2M_FRC,
+ CLK_CON_MUX_MUX_CLKCMU_UFS_NOC,
+ CLK_CON_DIV_CLKCMU_ALIVE_NOC,
+ CLK_CON_DIV_CLKCMU_AUD_NOC,
+ CLK_CON_DIV_CLKCMU_BRP_NOC,
+ CLK_CON_DIV_CLKCMU_CMU_BOOST,
+ CLK_CON_DIV_CLKCMU_CMU_BOOST_CAM,
+ CLK_CON_DIV_CLKCMU_CMU_BOOST_CPU,
+ CLK_CON_DIV_CLKCMU_CMU_BOOST_MIF,
+ CLK_CON_DIV_CLKCMU_CPUCL0_NOCP,
+ CLK_CON_DIV_CLKCMU_CSIS_DCPHY,
+ CLK_CON_DIV_CLKCMU_CSIS_NOC,
+ CLK_CON_DIV_CLKCMU_CSIS_OIS_MCU,
+ CLK_CON_DIV_CLKCMU_CSTAT_NOC,
+ CLK_CON_DIV_CLKCMU_DPUB_DSIM,
+ CLK_CON_DIV_CLKCMU_LME_LME,
+ CLK_CON_DIV_CLKCMU_G3D_NOCP,
+ CLK_CON_DIV_CLKCMU_HSI0_DPGTC,
+ CLK_CON_DIV_CLKCMU_HSI0_DPOSC,
+ CLK_CON_DIV_CLKCMU_HSI0_NOC,
+ CLK_CON_DIV_CLKCMU_HSI0_USB32DRD,
+ CLK_CON_DIV_CLKCMU_HSI1_NOC,
+ CLK_CON_DIV_CLKCMU_HSI1_PCIE,
+ CLK_CON_DIV_CLKCMU_UFS_UFS_EMBD,
+ CLK_CON_DIV_CLKCMU_LME_NOC,
+ CLK_CON_DIV_CLKCMU_MFC0_MFC0,
+ CLK_CON_DIV_CLKCMU_MFC0_WFD,
+ CLK_CON_DIV_CLKCMU_MFC1_MFC1,
+ CLK_CON_DIV_CLKCMU_MIF_NOCP,
+ CLK_CON_DIV_CLKCMU_NOCL1B_NOC1,
+ CLK_CON_DIV_CLKCMU_PERIC0_IP0,
+ CLK_CON_DIV_CLKCMU_PERIC0_IP1,
+ CLK_CON_DIV_CLKCMU_PERIC0_NOC,
+ CLK_CON_DIV_CLKCMU_PERIC1_IP0,
+ CLK_CON_DIV_CLKCMU_PERIC1_IP1,
+ CLK_CON_DIV_CLKCMU_PERIC1_NOC,
+ CLK_CON_DIV_CLKCMU_PERIC2_IP0,
+ CLK_CON_DIV_CLKCMU_PERIC2_IP1,
+ CLK_CON_DIV_CLKCMU_PERIC2_NOC,
+ CLK_CON_DIV_CLKCMU_PERIS_GIC,
+ CLK_CON_DIV_CLKCMU_PERIS_NOC,
+ CLK_CON_DIV_CLKCMU_SSP_NOC,
+ CLK_CON_DIV_CLKCMU_VTS_DMIC,
+ CLK_CON_DIV_CLKCMU_YUVP_NOC,
+ CLK_CON_DIV_CP_SHARED1_CLK,
+ CLK_CON_DIV_DIV_CLKCMU_AUD_AUDIF0,
+ CLK_CON_DIV_DIV_CLKCMU_AUD_AUDIF0_SM,
+ CLK_CON_DIV_DIV_CLKCMU_AUD_AUDIF1,
+ CLK_CON_DIV_DIV_CLKCMU_AUD_AUDIF1_SM,
+ CLK_CON_DIV_DIV_CLKCMU_AUD_CPU,
+ CLK_CON_DIV_DIV_CLKCMU_AUD_CPU_SM,
+ CLK_CON_DIV_DIV_CLKCMU_CIS_CLK0,
+ CLK_CON_DIV_DIV_CLKCMU_CIS_CLK1,
+ CLK_CON_DIV_DIV_CLKCMU_CIS_CLK2,
+ CLK_CON_DIV_DIV_CLKCMU_CIS_CLK3,
+ CLK_CON_DIV_DIV_CLKCMU_CIS_CLK4,
+ CLK_CON_DIV_DIV_CLKCMU_CIS_CLK5,
+ CLK_CON_DIV_DIV_CLKCMU_CIS_CLK6,
+ CLK_CON_DIV_DIV_CLKCMU_CIS_CLK7,
+ CLK_CON_DIV_DIV_CLKCMU_CPUCL0_DBG_NOC,
+ CLK_CON_DIV_DIV_CLKCMU_CPUCL0_DBG_NOC_SM,
+ CLK_CON_DIV_DIV_CLKCMU_CPUCL0_SWITCH,
+ CLK_CON_DIV_DIV_CLKCMU_CPUCL0_SWITCH_SM,
+ CLK_CON_DIV_DIV_CLKCMU_CPUCL1_SWITCH,
+ CLK_CON_DIV_DIV_CLKCMU_CPUCL1_SWITCH_SM,
+ CLK_CON_DIV_DIV_CLKCMU_CPUCL2_SWITCH,
+ CLK_CON_DIV_DIV_CLKCMU_CPUCL2_SWITCH_SM,
+ CLK_CON_DIV_DIV_CLKCMU_DNC_NOC,
+ CLK_CON_DIV_DIV_CLKCMU_DNC_NOC_SM,
+ CLK_CON_DIV_DIV_CLKCMU_DPUB,
+ CLK_CON_DIV_DIV_CLKCMU_DPUB_ALT,
+ CLK_CON_DIV_DIV_CLKCMU_DPUF,
+ CLK_CON_DIV_DIV_CLKCMU_DPUF_ALT,
+ CLK_CON_DIV_DIV_CLKCMU_DSP_NOC,
+ CLK_CON_DIV_DIV_CLKCMU_DSP_NOC_SM,
+ CLK_CON_DIV_DIV_CLKCMU_DSU_SWITCH,
+ CLK_CON_DIV_DIV_CLKCMU_DSU_SWITCH_SM,
+ CLK_CON_DIV_DIV_CLKCMU_G3D_SWITCH,
+ CLK_CON_DIV_DIV_CLKCMU_G3D_SWITCH_SM,
+ CLK_CON_DIV_DIV_CLKCMU_GNPU_NOC,
+ CLK_CON_DIV_DIV_CLKCMU_GNPU_NOC_SM,
+ CLK_CON_DIV_DIV_CLKCMU_UFS_MMC_CARD,
+ CLK_CON_DIV_DIV_CLKCMU_UFS_MMC_CARD_SM,
+ CLK_CON_DIV_DIV_CLKCMU_M2M_NOC,
+ CLK_CON_DIV_DIV_CLKCMU_M2M_NOC_SM,
+ CLK_CON_DIV_DIV_CLKCMU_NOCL0_NOC,
+ CLK_CON_DIV_DIV_CLKCMU_NOCL0_NOC_SM,
+ CLK_CON_DIV_DIV_CLKCMU_NOCL1A_NOC,
+ CLK_CON_DIV_DIV_CLKCMU_NOCL1A_NOC_SM,
+ CLK_CON_DIV_DIV_CLKCMU_NOCL1B_NOC0,
+ CLK_CON_DIV_DIV_CLKCMU_NOCL1B_NOC0_SM,
+ CLK_CON_DIV_DIV_CLKCMU_NOCL1C_NOC,
+ CLK_CON_DIV_DIV_CLKCMU_NOCL1C_NOC_SM,
+ CLK_CON_DIV_DIV_CLKCMU_SDMA_NOC,
+ CLK_CON_DIV_DIV_CLKCMU_SDMA_NOC_SM,
+ CLK_CON_DIV_DIV_CP_HISPEEDY_CLK,
+ CLK_CON_DIV_DIV_CP_HISPEEDY_CLK_SM,
+ CLK_CON_DIV_DIV_CP_SHARED0_CLK,
+ CLK_CON_DIV_DIV_CP_SHARED0_CLK_SM,
+ CLK_CON_DIV_DIV_CP_SHARED2_CLK,
+ CLK_CON_DIV_DIV_CP_SHARED2_CLK_SM,
+ CLK_CON_DIV_CLKCMU_UFS_NOC,
+ CLK_CON_DIV_DIV_CLKCMU_M2M_FRC,
+ CLK_CON_DIV_DIV_CLKCMU_M2M_FRC_SM,
+ CLK_CON_DIV_DIV_CLKCMU_MCSC_MCSC,
+ CLK_CON_DIV_DIV_CLKCMU_MCSC_MCSC_SM,
+ CLK_CON_DIV_DIV_CLKCMU_MCSC_NOC,
+ CLK_CON_DIV_DIV_CLKCMU_MCSC_NOC_SM,
+ CLK_CON_GAT_CLKCMU_MIF01_SWITCH,
+ CLK_CON_GAT_CLKCMU_MIF23_SWITCH,
+ CLK_CON_GAT_GATE_CLKCMU_ALIVE_NOC,
+ CLK_CON_GAT_GATE_CLKCMU_AUD_AUDIF0,
+ CLK_CON_GAT_GATE_CLKCMU_AUD_AUDIF0_SM,
+ CLK_CON_GAT_GATE_CLKCMU_AUD_AUDIF1,
+ CLK_CON_GAT_GATE_CLKCMU_AUD_AUDIF1_SM,
+ CLK_CON_GAT_GATE_CLKCMU_AUD_CPU,
+ CLK_CON_GAT_GATE_CLKCMU_AUD_CPU_SM,
+ CLK_CON_GAT_GATE_CLKCMU_AUD_NOC,
+ CLK_CON_GAT_GATE_CLKCMU_BRP_NOC,
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK0,
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK1,
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK2,
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK3,
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK4,
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK5,
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK6,
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK7,
+ CLK_CON_GAT_GATE_CLKCMU_CMU_BOOST,
+ CLK_CON_GAT_GATE_CLKCMU_CMU_BOOST_CAM,
+ CLK_CON_GAT_GATE_CLKCMU_CMU_BOOST_CPU,
+ CLK_CON_GAT_GATE_CLKCMU_CMU_BOOST_CPU_MIF,
+ CLK_CON_GAT_GATE_CLKCMU_CPUCL0_DBG_NOC,
+ CLK_CON_GAT_GATE_CLKCMU_CPUCL0_DBG_NOC_SM,
+ CLK_CON_GAT_GATE_CLKCMU_CPUCL0_NOCP,
+ CLK_CON_GAT_GATE_CLKCMU_CPUCL0_SWITCH,
+ CLK_CON_GAT_GATE_CLKCMU_CPUCL0_SWITCH_SM,
+ CLK_CON_GAT_GATE_CLKCMU_CPUCL1_SWITCH,
+ CLK_CON_GAT_GATE_CLKCMU_CPUCL1_SWITCH_SM,
+ CLK_CON_GAT_GATE_CLKCMU_CPUCL2_SWITCH,
+ CLK_CON_GAT_GATE_CLKCMU_CPUCL2_SWITCH_SM,
+ CLK_CON_GAT_GATE_CLKCMU_CSIS_DCPHY,
+ CLK_CON_GAT_GATE_CLKCMU_CSIS_NOC,
+ CLK_CON_GAT_GATE_CLKCMU_CSIS_OIS_MCU,
+ CLK_CON_GAT_GATE_CLKCMU_CSTAT_NOC,
+ CLK_CON_GAT_GATE_CLKCMU_DNC_NOC,
+ CLK_CON_GAT_GATE_CLKCMU_DNC_NOC_SM,
+ CLK_CON_GAT_GATE_CLKCMU_DPUB,
+ CLK_CON_GAT_GATE_CLKCMU_DPUB_ALT,
+ CLK_CON_GAT_GATE_CLKCMU_DPUB_DSIM,
+ CLK_CON_GAT_GATE_CLKCMU_DPUF,
+ CLK_CON_GAT_GATE_CLKCMU_DPUF_ALT,
+ CLK_CON_GAT_GATE_CLKCMU_DSP_NOC,
+ CLK_CON_GAT_GATE_CLKCMU_DSP_NOC_SM,
+ CLK_CON_GAT_GATE_CLKCMU_DSU_SWITCH,
+ CLK_CON_GAT_GATE_CLKCMU_DSU_SWITCH_SM,
+ CLK_CON_GAT_GATE_CLKCMU_G3D_NOCP,
+ CLK_CON_GAT_GATE_CLKCMU_G3D_SWITCH,
+ CLK_CON_GAT_GATE_CLKCMU_G3D_SWITCH_SM,
+ CLK_CON_GAT_GATE_CLKCMU_GNPU_NOC,
+ CLK_CON_GAT_GATE_CLKCMU_GNPU_NOC_SM,
+ CLK_CON_GAT_GATE_CLKCMU_HSI0_DPGTC,
+ CLK_CON_GAT_GATE_CLKCMU_HSI0_DPOSC,
+ CLK_CON_GAT_GATE_CLKCMU_HSI0_NOC,
+ CLK_CON_GAT_GATE_CLKCMU_HSI0_USB32DRD,
+ CLK_CON_GAT_GATE_CLKCMU_UFS_MMC_CARD,
+ CLK_CON_GAT_GATE_CLKCMU_UFS_MMC_CARD_SM,
+ CLK_CON_GAT_GATE_CLKCMU_HSI1_NOC,
+ CLK_CON_GAT_GATE_CLKCMU_HSI1_PCIE,
+ CLK_CON_GAT_GATE_CLKCMU_UFS_UFS_EMBD,
+ CLK_CON_GAT_GATE_CLKCMU_LME_LME,
+ CLK_CON_GAT_GATE_CLKCMU_LME_NOC,
+ CLK_CON_GAT_GATE_CLKCMU_M2M_NOC,
+ CLK_CON_GAT_GATE_CLKCMU_M2M_NOC_SM,
+ CLK_CON_GAT_GATE_CLKCMU_MFC0_MFC0,
+ CLK_CON_GAT_GATE_CLKCMU_MFC0_WFD,
+ CLK_CON_GAT_GATE_CLKCMU_MFC1_MFC1,
+ CLK_CON_GAT_GATE_CLKCMU_MIF_NOCP,
+ CLK_CON_GAT_GATE_CLKCMU_NOCL0_NOC,
+ CLK_CON_GAT_GATE_CLKCMU_NOCL0_NOC_SM,
+ CLK_CON_GAT_GATE_CLKCMU_NOCL1A_NOC,
+ CLK_CON_GAT_GATE_CLKCMU_NOCL1A_NOC_SM,
+ CLK_CON_GAT_GATE_CLKCMU_NOCL1B_NOC0,
+ CLK_CON_GAT_GATE_CLKCMU_NOCL1B_NOC0_SM,
+ CLK_CON_GAT_GATE_CLKCMU_NOCL1B_NOC1,
+ CLK_CON_GAT_GATE_CLKCMU_NOCL1C_NOC,
+ CLK_CON_GAT_GATE_CLKCMU_NOCL1C_NOC_SM,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC0_IP0,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC0_IP1,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC0_NOC,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC1_IP0,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC1_IP1,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC1_NOC,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC2_IP0,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC2_IP1,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC2_NOC,
+ CLK_CON_GAT_GATE_CLKCMU_PERIS_GIC,
+ CLK_CON_GAT_GATE_CLKCMU_PERIS_NOC,
+ CLK_CON_GAT_GATE_CLKCMU_SDMA_NOC,
+ CLK_CON_GAT_GATE_CLKCMU_SDMA_NOC_SM,
+ CLK_CON_GAT_GATE_CLKCMU_SSP_NOC,
+ CLK_CON_GAT_GATE_CLKCMU_VTS_DMIC,
+ CLK_CON_GAT_GATE_CLKCMU_YUVP_NOC,
+ CLK_CON_GAT_GATE_CP_HISPEEDY_CLK,
+ CLK_CON_GAT_GATE_CP_HISPEEDY_CLK_SM,
+ CLK_CON_GAT_GATE_CP_SHARED0_CLK,
+ CLK_CON_GAT_GATE_CP_SHARED0_CLK_SM,
+ CLK_CON_GAT_GATE_CP_SHARED1_CLK,
+ CLK_CON_GAT_GATE_CP_SHARED2_CLK,
+ CLK_CON_GAT_GATE_CP_SHARED2_CLK_SM,
+ CLK_CON_GAT_GATE_CLKCMU_UFS_NOC,
+ CLK_CON_GAT_GATE_CLKCMU_M2M_FRC,
+ CLK_CON_GAT_GATE_CLKCMU_M2M_FRC_SM,
+ CLK_CON_GAT_GATE_CLKCMU_MCSC_MCSC,
+ CLK_CON_GAT_GATE_CLKCMU_MCSC_MCSC_SM,
+ CLK_CON_GAT_GATE_CLKCMU_MCSC_NOC,
+ CLK_CON_GAT_GATE_CLKCMU_MCSC_NOC_SM,
+};
+
+/* List of parent clocks for Muxes in CMU_TOP */
+PNAME(mout_cmu_cp_mpll_clk_d2_user_parents) = { "oscclk" };
+PNAME(mout_cmu_cp_mpll_clk_user_parents) = { "oscclk" };
+PNAME(mout_cmu_aud_audif0_p) = { "dout_shared0_div1",
+ "dout_shared1_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "mout_cmu_cp_mpll_clk_d2_user",
+ "oscclk", "oscclk",
+ "oscclk" };
+PNAME(mout_cmu_aud_audif1_p) = { "dout_shared0_div1",
+ "dout_shared1_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "mout_cmu_cp_mpll_clk_d2_user",
+ "oscclk", "oscclk",
+ "oscclk" };
+PNAME(mout_cmu_aud_cpu_p) = { "dout_shared0_div1",
+ "dout_shared1_div1",
+ "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "mout_cmu_cp_mpll_clk_d2_user" };
+PNAME(mout_cmu_cpucl0_dbg_noc_p) = { "dout_shared2_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2" };
+PNAME(mout_cmu_cpucl0_switch_p) = { "dout_shared0_div1",
+ "dout_shared1_div1",
+ "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "oscclk", "oscclk" };
+PNAME(mout_cmu_cpucl1_switch_p) = { "dout_shared0_div1",
+ "dout_shared1_div1",
+ "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "oscclk", "oscclk" };
+PNAME(mout_cmu_cpucl2_switch_p) = { "dout_shared0_div1",
+ "dout_shared1_div1",
+ "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "oscclk", "oscclk" };
+PNAME(mout_cmu_dnc_noc_p) = { "dout_shared0_div1",
+ "dout_shared1_div1",
+ "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "oscclk" };
+PNAME(mout_cmu_dpub_noc_p) = { "dout_cmu_div_dpub",
+ "dout_cmu_div_dpub_alt"};
+PNAME(mout_cmu_dpuf_noc_p) = { "dout_cmu_div_dpuf",
+ "dout_cmu_div_dpuf_alt" };
+PNAME(mout_cmu_dsp_noc_p) = { "dout_shared0_div1",
+ "dout_shared1_div1",
+ "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "oscclk" };
+PNAME(mout_cmu_dsu_switch_p) = { "dout_shared0_div1",
+ "dout_shared1_div1",
+ "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "oscclk", "oscclk" };
+PNAME(mout_cmu_g3d_switch_p) = { "dout_shared0_div1",
+ "dout_shared1_div1",
+ "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2" };
+PNAME(mout_cmu_gnpu_noc_p) = { "dout_shared0_div1",
+ "dout_shared1_div1",
+ "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "oscclk" };
+PNAME(mout_cmu_ufs_mmc_card_p) = { "oscclk",
+ "dout_shared2_div1",
+ "dout_mmc_div1",
+ "dout_shared0_div2" };
+PNAME(mout_cmu_m2m_noc_p) = { "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared3_div2",
+ "oscclk" };
+PNAME(mout_cmu_nocl0_noc_p) = { "dout_shared0_div1",
+ "dout_shared1_div1",
+ "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "mout_cmu_cp_mpll_clk_d2_user",
+ "dout_shared_mif_div2" };
+PNAME(mout_cmu_nocl1a_noc_p) = { "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared3_div2",
+ "dout_shared4_div2" };
+PNAME(mout_cmu_nocl1b_noc0_p) = { "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared3_div2",
+ "dout_shared4_div2" };
+PNAME(mout_cmu_nocl1c_noc_p) = { "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared3_div2",
+ "dout_shared4_div2" };
+PNAME(mout_cmu_sdma_noc_p) = { "dout_shared0_div1",
+ "dout_shared1_div1",
+ "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "oscclk" };
+PNAME(mout_cmu_cp_hispeedy_clk_p) = { "dout_shared2_div1",
+ "dout_shared3_div1" };
+PNAME(mout_cmu_cp_shared0_clk_p) = { "dout_shared0_div1",
+ "dout_shared1_div1",
+ "dout_shared2_div1",
+ "dout_shared3_div1" };
+PNAME(mout_cmu_cp_shared2_clk_p) = { "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared1_div2" };
+PNAME(mout_cmu_mux_alive_noc_p) = { "dout_shared0_div2",
+ "dout_shared2_div2" };
+PNAME(mout_cmu_mux_aud_audif0_p) = { "dout_shared0_div1",
+ "dout_shared1_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "mout_cmu_cp_mpll_clk_d2_user",
+ "oscclk", "oscclk",
+ "oscclk" };
+PNAME(mout_cmu_mux_aud_audif1_p) = { "dout_shared0_div1",
+ "dout_shared1_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "mout_cmu_cp_mpll_clk_d2_user",
+ "oscclk", "oscclk",
+ "oscclk" };
+PNAME(mout_cmu_mux_aud_cpu_p) = { "dout_shared0_div1",
+ "dout_shared1_div1",
+ "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "mout_cmu_cp_mpll_clk_d2_user" };
+PNAME(mout_cmu_mux_aud_noc_p) = { "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared3_div2",
+ "mout_cmu_cp_mpll_clk_d2_user",
+ "oscclk", "oscclk" };
+PNAME(mout_cmu_mux_brp_noc_p) = { "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared3_div2",
+ "oscclk" };
+PNAME(mout_cmu_mux_cis_clk0_p) = { "oscclk",
+ "dout_shared2_div2" };
+PNAME(mout_cmu_mux_cis_clk1_p) = { "oscclk",
+ "dout_shared2_div2" };
+PNAME(mout_cmu_mux_cis_clk2_p) = { "oscclk",
+ "dout_shared2_div2" };
+PNAME(mout_cmu_mux_cis_clk3_p) = { "oscclk",
+ "dout_shared2_div2" };
+PNAME(mout_cmu_mux_cis_clk4_p) = { "oscclk",
+ "dout_shared2_div2" };
+PNAME(mout_cmu_mux_cis_clk5_p) = { "oscclk",
+ "dout_shared2_div2" };
+PNAME(mout_cmu_mux_cis_clk6_p) = { "oscclk",
+ "dout_shared2_div2" };
+PNAME(mout_cmu_mux_cis_clk7_p) = { "oscclk",
+ "dout_shared2_div2" };
+PNAME(mout_cmu_mux_cmu_boost_p) = { "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared4_div2" };
+PNAME(mout_cmu_mux_cmu_boost_cam_p) = { "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared4_div2" };
+PNAME(mout_cmu_mux_cmu_boost_cpu_p) = { "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared4_div2" };
+PNAME(mout_cmu_mux_cmu_boost_mif_p) = { "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared4_div2" };
+PNAME(mout_cmu_mux_cpucl0_dbg_noc_p) = { "dout_shared2_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2" };
+PNAME(mout_cmu_mux_cpucl0_nocp_p) = { "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared4_div2" };
+PNAME(mout_cmu_mux_cpucl0_switch_p) = { "dout_shared0_div1",
+ "dout_shared1_div1",
+ "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "oscclk", "oscclk" };
+PNAME(mout_cmu_mux_cpucl1_switch_p) = { "dout_shared0_div1",
+ "dout_shared1_div1",
+ "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "oscclk", "oscclk" };
+PNAME(mout_cmu_mux_cpucl2_switch_p) = { "dout_shared0_div1",
+ "dout_shared1_div1",
+ "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "oscclk", "oscclk" };
+PNAME(mout_cmu_mux_csis_dcphy_p) = { "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared4_div2" };
+PNAME(mout_cmu_mux_csis_noc_p) = { "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared3_div2",
+ "oscclk" };
+PNAME(mout_cmu_mux_csis_ois_mcu_p) = { "dout_shared0_div2",
+ "dout_shared2_div2" };
+PNAME(mout_cmu_mux_cstat_noc_p) = { "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared3_div2",
+ "oscclk" };
+PNAME(mout_cmu_mux_dnc_noc_p) = { "dout_shared0_div1",
+ "dout_shared1_div1",
+ "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "oscclk" };
+PNAME(mout_cmu_mux_dpub_p) = { "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "oscclk", "oscclk" };
+PNAME(mout_cmu_mux_dpub_alt_p) = { "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "oscclk", "oscclk" };
+PNAME(mout_cmu_mux_dpub_dsim_p) = { "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared3_div2" };
+PNAME(mout_cmu_mux_dpuf_p) = { "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "oscclk", "oscclk" };
+PNAME(mout_cmu_mux_dpuf_alt_p) = { "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "oscclk", "oscclk" };
+PNAME(mout_cmu_mux_dsp_noc_p) = { "dout_shared0_div1",
+ "dout_shared1_div1",
+ "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "oscclk" };
+PNAME(mout_cmu_mux_dsu_switch_p) = { "dout_shared0_div1",
+ "dout_shared1_div1",
+ "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "oscclk", "oscclk" };
+PNAME(mout_cmu_mux_g3d_nocp_p) = { "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared4_div2" };
+PNAME(mout_cmu_mux_g3d_switch_p) = { "dout_shared0_div1",
+ "dout_shared1_div1",
+ "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2" };
+PNAME(mout_cmu_mux_gnpu_noc_p) = { "dout_shared0_div1",
+ "dout_shared1_div1",
+ "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "oscclk" };
+PNAME(mout_cmu_mux_hsi0_dpgtc_p) = { "oscclk",
+ "dout_shared0_div2",
+ "dout_shared2_div2",
+ "dout_shared4_div2" };
+PNAME(mout_cmu_mux_hsi0_dposc_p) = { "oscclk",
+ "dout_shared2_div1" };
+PNAME(mout_cmu_mux_hsi0_noc_p) = { "dout_shared0_div2",
+ "dout_shared2_div2",
+ "dout_shared3_div2",
+ "dout_shared4_div2" };
+PNAME(mout_cmu_mux_hsi0_usb32drd_p) = { "oscclk",
+ "dout_shared0_div2",
+ "dout_shared2_div2",
+ "dout_shared4_div2" };
+PNAME(mout_cmu_mux_ufs_mmc_card_p) = { "oscclk",
+ "dout_shared2_div1",
+ "dout_mmc_div1",
+ "dout_shared0_div2" };
+PNAME(mout_cmu_mux_hsi1_noc_p) = { "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared4_div2" };
+PNAME(mout_cmu_mux_hsi1_pcie_p) = { "oscclk",
+ "dout_shared2_div1" };
+PNAME(mout_cmu_mux_ufs_ufs_embd_p) = { "oscclk",
+ "dout_shared0_div2",
+ "dout_shared2_div2",
+ "dout_shared4_div2" };
+PNAME(mout_cmu_mux_lme_lme_p) = { "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared3_div2",
+ "oscclk" };
+PNAME(mout_cmu_mux_lme_noc_p) = { "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared3_div2",
+ "oscclk" };
+PNAME(mout_cmu_mux_m2m_noc_p) = { "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared3_div2",
+ "oscclk" };
+PNAME(mout_cmu_mux_mcsc_mcsc_p) = { "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared3_div2",
+ "oscclk" };
+PNAME(mout_cmu_mux_mcsc_noc_p) = { "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared3_div2",
+ "oscclk" };
+PNAME(mout_cmu_mux_mfc0_mfc0_p) = { "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared3_div2",
+ "oscclk" };
+PNAME(mout_cmu_mux_mfc0_wfd_p) = { "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared3_div2",
+ "dout_shared4_div2",
+ "oscclk", "oscclk",
+ "oscclk" };
+PNAME(mout_cmu_mux_mfc1_mfc1_p) = { "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared3_div2",
+ "oscclk" };
+PNAME(mout_cmu_mux_mif_nocp_p) = { "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared4_div2" };
+PNAME(mout_cmu_mux_mif_switch_p) = { "dout_shared0_div1",
+ "dout_shared1_div1",
+ "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "mout_cmu_cp_mpll_clk_user",
+ "dout_shared_mif_div1" };
+PNAME(mout_cmu_mux_nocl0_noc_p) = { "dout_shared0_div1",
+ "dout_shared1_div1",
+ "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "mout_cmu_cp_mpll_clk_d2_user",
+ "dout_shared_mif_div2" };
+PNAME(mout_cmu_mux_nocl1a_noc_p) = { "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared3_div2",
+ "dout_shared4_div2" };
+PNAME(mout_cmu_mux_nocl1b_noc0_p) = { "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared3_div2",
+ "dout_shared4_div2" };
+PNAME(mout_cmu_mux_nocl1b_noc1_p) = { "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2" };
+PNAME(mout_cmu_mux_nocl1c_noc_p) = { "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared3_div2",
+ "dout_shared4_div2" };
+PNAME(mout_cmu_mux_peric0_ip0_p) = { "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared4_div2" };
+PNAME(mout_cmu_mux_peric0_ip1_p) = { "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared4_div2" };
+PNAME(mout_cmu_mux_peric0_noc_p) = { "dout_shared0_div2",
+ "dout_shared2_div2" };
+PNAME(mout_cmu_mux_peric1_ip0_p) = { "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared4_div2" };
+PNAME(mout_cmu_mux_peric1_ip1_p) = { "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared4_div2" };
+PNAME(mout_cmu_mux_peric1_noc_p) = { "dout_shared0_div2",
+ "dout_shared2_div2" };
+PNAME(mout_cmu_mux_peric2_ip0_p) = { "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared4_div2" };
+PNAME(mout_cmu_mux_peric2_ip1_p) = { "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared4_div2" };
+PNAME(mout_cmu_mux_peric2_noc_p) = { "dout_shared0_div2",
+ "dout_shared2_div2" };
+PNAME(mout_cmu_mux_peris_gic_p) = { "dout_shared0_div2",
+ "dout_shared2_div2" };
+PNAME(mout_cmu_mux_peris_noc_p) = { "dout_shared0_div2",
+ "dout_shared2_div2" };
+PNAME(mout_cmu_mux_sdma_noc_p) = { "dout_shared0_div1",
+ "dout_shared1_div1",
+ "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "oscclk" };
+PNAME(mout_cmu_mux_ssp_noc_p) = { "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared4_div2" };
+PNAME(mout_cmu_mux_vts_dmic_p) = { "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2" };
+PNAME(mout_cmu_mux_yuvp_noc_p) = { "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared3_div2",
+ "oscclk" };
+PNAME(mout_cmu_mux_cmu_cmuref_p) = { "oscclk",
+ "dout_cmu_boost" };
+PNAME(mout_cmu_mux_cp_hispeedy_clk_p) = { "dout_shared2_div1",
+ "dout_shared3_div1" };
+PNAME(mout_cmu_mux_cp_shared0_clk_p) = { "dout_shared0_div1",
+ "dout_shared1_div1",
+ "dout_shared2_div1",
+ "dout_shared3_div1" };
+PNAME(mout_cmu_mux_cp_shared1_clk_p) = { "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2" };
+PNAME(mout_cmu_mux_cp_shared2_clk_p) = { "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared1_div2" };
+PNAME(mout_cmu_m2m_frc_p) = { "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared3_div2",
+ "oscclk" };
+PNAME(mout_cmu_mcsc_mcsc_p) = { "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared3_div2",
+ "oscclk" };
+PNAME(mout_cmu_mcsc_noc_p) = { "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared3_div2",
+ "oscclk" };
+PNAME(mout_cmu_mux_m2m_frc_p) = { "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared3_div2",
+ "oscclk" };
+PNAME(mout_cmu_mux_ufs_noc_p) = { "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared4_div2" };
+
+static const struct samsung_pll_clock top_pll_clks[] __initconst = {
+ PLL(pll_4311, CLK_FOUT_SHARED0_PLL, "fout_shared0_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED0, PLL_CON3_PLL_SHARED0, NULL),
+ PLL(pll_4311, CLK_FOUT_SHARED1_PLL, "fout_shared1_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED1, PLL_CON3_PLL_SHARED1, NULL),
+ PLL(pll_4311, CLK_FOUT_SHARED2_PLL, "fout_shared2_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED2, PLL_CON3_PLL_SHARED2, NULL),
+ PLL(pll_4311, CLK_FOUT_SHARED3_PLL, "fout_shared3_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED3, PLL_CON3_PLL_SHARED3, NULL),
+ PLL(pll_4311, CLK_FOUT_SHARED4_PLL, "fout_shared4_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED4, PLL_CON3_PLL_SHARED4, NULL),
+ PLL(pll_4311, CLK_FOUT_MMC_PLL, "fout_mmc_pll", "oscclk",
+ PLL_LOCKTIME_PLL_MMC, PLL_CON3_PLL_MMC, NULL),
+ PLL(pll_4311, CLK_FOUT_SHARED_MIF_PLL, "fout_shared_mif_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED_MIF, PLL_CON3_PLL_SHARED_MIF, NULL),
+};
+
+static const struct samsung_mux_clock top_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_CMU_CP_MPLL_CLK_D2_USER, "mout_cmu_cp_mpll_clk_d2_user",
+ mout_cmu_cp_mpll_clk_d2_user_parents,
+ PLL_CON0_MUX_CP_MPLL_CLK_D2_USER, 4, 1),
+ MUX(CLK_MOUT_CMU_CP_MPLL_CLK_USER, "mout_cmu_cp_mpll_clk_user",
+ mout_cmu_cp_mpll_clk_user_parents, PLL_CON0_MUX_CP_MPLL_CLK_USER,
+ 4, 1),
+ MUX(CLK_MOUT_CMU_AUD_AUDIF0, "mout_cmu_aud_audif0",
+ mout_cmu_aud_audif0_p, CLK_CON_MUX_CLKCMU_AUD_AUDIF0, 0, 3),
+ MUX(CLK_MOUT_CMU_AUD_AUDIF1, "mout_cmu_aud_audif1",
+ mout_cmu_aud_audif1_p, CLK_CON_MUX_CLKCMU_AUD_AUDIF1, 0, 3),
+ MUX(CLK_MOUT_CMU_AUD_CPU, "mout_cmu_aud_cpu", mout_cmu_aud_cpu_p,
+ CLK_CON_MUX_CLKCMU_AUD_CPU, 0, 3),
+ MUX(CLK_MOUT_CMU_CPUCL0_DBG_NOC, "mout_cmu_cpucl0_dbg_noc",
+ mout_cmu_cpucl0_dbg_noc_p, CLK_CON_MUX_CLKCMU_CPUCL0_DBG_NOC,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_CPUCL0_SWITCH, "mout_cmu_cpucl0_switch",
+ mout_cmu_cpucl0_switch_p, CLK_CON_MUX_CLKCMU_CPUCL0_SWITCH, 0, 3),
+ MUX(CLK_MOUT_CMU_CPUCL1_SWITCH, "mout_cmu_cpucl1_switch",
+ mout_cmu_cpucl1_switch_p, CLK_CON_MUX_CLKCMU_CPUCL1_SWITCH, 0, 3),
+ MUX(CLK_MOUT_CMU_CPUCL2_SWITCH, "mout_cmu_cpucl2_switch",
+ mout_cmu_cpucl2_switch_p, CLK_CON_MUX_CLKCMU_CPUCL2_SWITCH, 0, 3),
+ MUX(CLK_MOUT_CMU_DNC_NOC, "mout_cmu_dnc_noc", mout_cmu_dnc_noc_p,
+ CLK_CON_MUX_CLKCMU_DNC_NOC, 0, 3),
+ MUX(CLK_MOUT_CMU_DPUB_NOC, "mout_cmu_dpub_noc", mout_cmu_dpub_noc_p,
+ CLK_CON_MUX_CLKCMU_DPUB_NOC, 0, 1),
+ MUX(CLK_MOUT_CMU_DPUF_NOC, "mout_cmu_dpuf_noc", mout_cmu_dpuf_noc_p,
+ CLK_CON_MUX_CLKCMU_DPUF_NOC, 0, 1),
+ MUX(CLK_MOUT_CMU_DSP_NOC, "mout_cmu_dsp_noc", mout_cmu_dsp_noc_p,
+ CLK_CON_MUX_CLKCMU_DSP_NOC, 0, 3),
+ MUX(CLK_MOUT_CMU_DSU_SWITCH, "mout_cmu_dsu_switch",
+ mout_cmu_dsu_switch_p, CLK_CON_MUX_CLKCMU_DSU_SWITCH, 0, 3),
+ MUX(CLK_MOUT_CMU_G3D_SWITCH, "mout_cmu_g3d_switch",
+ mout_cmu_g3d_switch_p, CLK_CON_MUX_CLKCMU_G3D_SWITCH, 0, 3),
+ MUX(CLK_MOUT_CMU_GNPU_NOC, "mout_cmu_gnpu_noc", mout_cmu_gnpu_noc_p,
+ CLK_CON_MUX_CLKCMU_GNPU_NOC, 0, 3),
+ MUX(CLK_MOUT_CMU_UFS_MMC_CARD, "mout_cmu_ufs_mmc_card",
+ mout_cmu_ufs_mmc_card_p, CLK_CON_MUX_CLKCMU_UFS_MMC_CARD, 0, 2),
+ MUX(CLK_MOUT_CMU_M2M_NOC, "mout_cmu_m2m_noc", mout_cmu_m2m_noc_p,
+ CLK_CON_MUX_CLKCMU_M2M_NOC, 0, 3),
+ MUX(CLK_MOUT_CMU_NOCL0_NOC, "mout_cmu_nocl0_noc", mout_cmu_nocl0_noc_p,
+ CLK_CON_MUX_CLKCMU_NOCL0_NOC, 0, 3),
+ MUX(CLK_MOUT_CMU_NOCL1A_NOC, "mout_cmu_nocl1a_noc",
+ mout_cmu_nocl1a_noc_p, CLK_CON_MUX_CLKCMU_NOCL1A_NOC, 0, 3),
+ MUX(CLK_MOUT_CMU_NOCL1B_NOC0, "mout_cmu_nocl1b_noc0",
+ mout_cmu_nocl1b_noc0_p, CLK_CON_MUX_CLKCMU_NOCL1B_NOC0, 0, 3),
+ MUX(CLK_MOUT_CMU_NOCL1C_NOC, "mout_cmu_nocl1c_noc",
+ mout_cmu_nocl1c_noc_p, CLK_CON_MUX_CLKCMU_NOCL1C_NOC, 0, 3),
+ MUX(CLK_MOUT_CMU_SDMA_NOC, "mout_cmu_sdma_noc", mout_cmu_sdma_noc_p,
+ CLK_CON_MUX_CLKCMU_SDMA_NOC, 0, 3),
+ MUX(CLK_MOUT_CMU_CP_HISPEEDY_CLK, "mout_cmu_cp_hispeedy_clk",
+ mout_cmu_cp_hispeedy_clk_p, CLK_CON_MUX_CP_HISPEEDY_CLK, 0, 1),
+ MUX(CLK_MOUT_CMU_CP_SHARED0_CLK, "mout_cmu_cp_shared0_clk",
+ mout_cmu_cp_shared0_clk_p, CLK_CON_MUX_CP_SHARED0_CLK, 0, 2),
+ MUX(CLK_MOUT_CMU_CP_SHARED2_CLK, "mout_cmu_cp_shared2_clk",
+ mout_cmu_cp_shared2_clk_p, CLK_CON_MUX_CP_SHARED2_CLK, 0, 2),
+ MUX(CLK_MOUT_CMU_MUX_ALIVE_NOC, "mout_cmu_mux_alive_noc",
+ mout_cmu_mux_alive_noc_p, CLK_CON_MUX_MUX_CLKCMU_ALIVE_NOC, 0, 1),
+ MUX(CLK_MOUT_CMU_MUX_AUD_AUDIF0, "mout_cmu_mux_aud_audif0",
+ mout_cmu_mux_aud_audif0_p, CLK_CON_MUX_MUX_CLKCMU_AUD_AUDIF0,
+ 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_AUD_AUDIF1, "mout_cmu_mux_aud_audif1",
+ mout_cmu_mux_aud_audif1_p, CLK_CON_MUX_MUX_CLKCMU_AUD_AUDIF1,
+ 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_AUD_CPU, "mout_cmu_mux_aud_cpu",
+ mout_cmu_mux_aud_cpu_p, CLK_CON_MUX_MUX_CLKCMU_AUD_CPU, 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_AUD_NOC, "mout_cmu_mux_aud_noc",
+ mout_cmu_mux_aud_noc_p, CLK_CON_MUX_MUX_CLKCMU_AUD_NOC, 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_BRP_NOC, "mout_cmu_mux_brp_noc",
+ mout_cmu_mux_brp_noc_p, CLK_CON_MUX_MUX_CLKCMU_BRP_NOC, 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_CIS_CLK0, "mout_cmu_mux_cis_clk0",
+ mout_cmu_mux_cis_clk0_p, CLK_CON_MUX_MUX_CLKCMU_CIS_CLK0, 0, 1),
+ MUX(CLK_MOUT_CMU_MUX_CIS_CLK1, "mout_cmu_mux_cis_clk1",
+ mout_cmu_mux_cis_clk1_p, CLK_CON_MUX_MUX_CLKCMU_CIS_CLK1, 0, 1),
+ MUX(CLK_MOUT_CMU_MUX_CIS_CLK2, "mout_cmu_mux_cis_clk2",
+ mout_cmu_mux_cis_clk2_p, CLK_CON_MUX_MUX_CLKCMU_CIS_CLK2, 0, 1),
+ MUX(CLK_MOUT_CMU_MUX_CIS_CLK3, "mout_cmu_mux_cis_clk3",
+ mout_cmu_mux_cis_clk3_p, CLK_CON_MUX_MUX_CLKCMU_CIS_CLK3, 0, 1),
+ MUX(CLK_MOUT_CMU_MUX_CIS_CLK4, "mout_cmu_mux_cis_clk4",
+ mout_cmu_mux_cis_clk4_p, CLK_CON_MUX_MUX_CLKCMU_CIS_CLK4, 0, 1),
+ MUX(CLK_MOUT_CMU_MUX_CIS_CLK5, "mout_cmu_mux_cis_clk5",
+ mout_cmu_mux_cis_clk5_p, CLK_CON_MUX_MUX_CLKCMU_CIS_CLK5, 0, 1),
+ MUX(CLK_MOUT_CMU_MUX_CIS_CLK6, "mout_cmu_mux_cis_clk6",
+ mout_cmu_mux_cis_clk6_p, CLK_CON_MUX_MUX_CLKCMU_CIS_CLK6, 0, 1),
+ MUX(CLK_MOUT_CMU_MUX_CIS_CLK7, "mout_cmu_mux_cis_clk7",
+ mout_cmu_mux_cis_clk7_p, CLK_CON_MUX_MUX_CLKCMU_CIS_CLK7, 0, 1),
+ MUX(CLK_MOUT_CMU_MUX_CMU_BOOST, "mout_cmu_mux_cmu_boost",
+ mout_cmu_mux_cmu_boost_p, CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST, 0, 2),
+ MUX(CLK_MOUT_CMU_MUX_CMU_BOOST_CAM, "mout_cmu_mux_cmu_boost_cam",
+ mout_cmu_mux_cmu_boost_cam_p, CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST_CAM,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_MUX_CMU_BOOST_CPU, "mout_cmu_mux_cmu_boost_cpu",
+ mout_cmu_mux_cmu_boost_cpu_p, CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST_CPU,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_MUX_CMU_BOOST_MIF, "mout_cmu_mux_cmu_boost_mif",
+ mout_cmu_mux_cmu_boost_mif_p, CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST_MIF,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_MUX_CPUCL0_DBG_NOC, "mout_cmu_mux_cpucl0_dbg_noc",
+ mout_cmu_mux_cpucl0_dbg_noc_p,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL0_DBG_NOC, 0, 2),
+ MUX(CLK_MOUT_CMU_MUX_CPUCL0_NOCP, "mout_cmu_mux_cpucl0_nocp",
+ mout_cmu_mux_cpucl0_nocp_p, CLK_CON_MUX_MUX_CLKCMU_CPUCL0_NOCP,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_MUX_CPUCL0_SWITCH, "mout_cmu_mux_cpucl0_switch",
+ mout_cmu_mux_cpucl0_switch_p, CLK_CON_MUX_MUX_CLKCMU_CPUCL0_SWITCH,
+ 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_CPUCL1_SWITCH, "mout_cmu_mux_cpucl1_switch",
+ mout_cmu_mux_cpucl1_switch_p, CLK_CON_MUX_MUX_CLKCMU_CPUCL1_SWITCH,
+ 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_CPUCL2_SWITCH, "mout_cmu_mux_cpucl2_switch",
+ mout_cmu_mux_cpucl2_switch_p, CLK_CON_MUX_MUX_CLKCMU_CPUCL2_SWITCH,
+ 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_CSIS_DCPHY, "mout_cmu_mux_csis_dcphy",
+ mout_cmu_mux_csis_dcphy_p, CLK_CON_MUX_MUX_CLKCMU_CSIS_DCPHY,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_MUX_CSIS_NOC, "mout_cmu_mux_csis_noc",
+ mout_cmu_mux_csis_noc_p, CLK_CON_MUX_MUX_CLKCMU_CSIS_NOC, 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_CSIS_OIS_MCU, "mout_cmu_mux_csis_ois_mcu",
+ mout_cmu_mux_csis_ois_mcu_p, CLK_CON_MUX_MUX_CLKCMU_CSIS_OIS_MCU,
+ 0, 1),
+ MUX(CLK_MOUT_CMU_MUX_CSTAT_NOC, "mout_cmu_mux_cstat_noc",
+ mout_cmu_mux_cstat_noc_p, CLK_CON_MUX_MUX_CLKCMU_CSTAT_NOC, 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_DNC_NOC, "mout_cmu_mux_dnc_noc",
+ mout_cmu_mux_dnc_noc_p, CLK_CON_MUX_MUX_CLKCMU_DNC_NOC, 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_DPUB, "mout_cmu_mux_dpub", mout_cmu_mux_dpub_p,
+ CLK_CON_MUX_MUX_CLKCMU_DPUB, 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_DPUB_ALT, "mout_cmu_mux_dpub_alt",
+ mout_cmu_mux_dpub_alt_p, CLK_CON_MUX_MUX_CLKCMU_DPUB_ALT, 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_DPUB_DSIM, "mout_cmu_mux_dpub_dsim",
+ mout_cmu_mux_dpub_dsim_p, CLK_CON_MUX_MUX_CLKCMU_DPUB_DSIM, 0, 2),
+ MUX(CLK_MOUT_CMU_MUX_DPUF, "mout_cmu_mux_dpuf", mout_cmu_mux_dpuf_p,
+ CLK_CON_MUX_MUX_CLKCMU_DPUF, 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_DPUF_ALT, "mout_cmu_mux_dpuf_alt",
+ mout_cmu_mux_dpuf_alt_p, CLK_CON_MUX_MUX_CLKCMU_DPUF_ALT, 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_DSP_NOC, "mout_cmu_mux_dsp_noc",
+ mout_cmu_mux_dsp_noc_p, CLK_CON_MUX_MUX_CLKCMU_DSP_NOC, 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_DSU_SWITCH, "mout_cmu_mux_dsu_switch",
+ mout_cmu_mux_dsu_switch_p, CLK_CON_MUX_MUX_CLKCMU_DSU_SWITCH,
+ 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_G3D_NOCP, "mout_cmu_mux_g3d_nocp",
+ mout_cmu_mux_g3d_nocp_p, CLK_CON_MUX_MUX_CLKCMU_G3D_NOCP, 0, 2),
+ MUX(CLK_MOUT_CMU_MUX_G3D_SWITCH, "mout_cmu_mux_g3d_switch",
+ mout_cmu_mux_g3d_switch_p, CLK_CON_MUX_MUX_CLKCMU_G3D_SWITCH,
+ 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_GNPU_NOC, "mout_cmu_mux_gnpu_noc",
+ mout_cmu_mux_gnpu_noc_p, CLK_CON_MUX_MUX_CLKCMU_GNPU_NOC, 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_HSI0_DPGTC, "mout_cmu_mux_hsi0_dpgtc",
+ mout_cmu_mux_hsi0_dpgtc_p, CLK_CON_MUX_MUX_CLKCMU_HSI0_DPGTC,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_MUX_HSI0_DPOSC, "mout_cmu_mux_hsi0_dposc",
+ mout_cmu_mux_hsi0_dposc_p, CLK_CON_MUX_MUX_CLKCMU_HSI0_DPOSC,
+ 0, 1),
+ MUX(CLK_MOUT_CMU_MUX_HSI0_NOC, "mout_cmu_mux_hsi0_noc",
+ mout_cmu_mux_hsi0_noc_p, CLK_CON_MUX_MUX_CLKCMU_HSI0_NOC, 0, 2),
+ MUX(CLK_MOUT_CMU_MUX_HSI0_USB32DRD, "mout_cmu_mux_hsi0_usb32drd",
+ mout_cmu_mux_hsi0_usb32drd_p, CLK_CON_MUX_MUX_CLKCMU_HSI0_USB32DRD,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_MUX_UFS_MMC_CARD, "mout_cmu_mux_ufs_mmc_card",
+ mout_cmu_mux_ufs_mmc_card_p, CLK_CON_MUX_MUX_CLKCMU_UFS_MMC_CARD,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_MUX_HSI1_NOC, "mout_cmu_mux_hsi1_noc",
+ mout_cmu_mux_hsi1_noc_p, CLK_CON_MUX_MUX_CLKCMU_HSI1_NOC, 0, 2),
+ MUX(CLK_MOUT_CMU_MUX_HSI1_PCIE, "mout_cmu_mux_hsi1_pcie",
+ mout_cmu_mux_hsi1_pcie_p, CLK_CON_MUX_MUX_CLKCMU_HSI1_PCIE, 0, 1),
+ MUX(CLK_MOUT_CMU_MUX_UFS_UFS_EMBD, "mout_cmu_mux_ufs_ufs_embd",
+ mout_cmu_mux_ufs_ufs_embd_p, CLK_CON_MUX_MUX_CLKCMU_UFS_UFS_EMBD,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_MUX_LME_LME, "mout_cmu_mux_lme_lme",
+ mout_cmu_mux_lme_lme_p, CLK_CON_MUX_MUX_CLKCMU_LME_LME, 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_LME_NOC, "mout_cmu_mux_lme_noc",
+ mout_cmu_mux_lme_noc_p, CLK_CON_MUX_MUX_CLKCMU_LME_NOC, 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_M2M_NOC, "mout_cmu_mux_m2m_noc",
+ mout_cmu_mux_m2m_noc_p, CLK_CON_MUX_MUX_CLKCMU_M2M_NOC, 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_MCSC_MCSC, "mout_cmu_mux_mcsc_mcsc",
+ mout_cmu_mux_mcsc_mcsc_p, CLK_CON_MUX_MUX_CLKCMU_MCSC_MCSC, 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_MCSC_NOC, "mout_cmu_mux_mcsc_noc",
+ mout_cmu_mux_mcsc_noc_p, CLK_CON_MUX_MUX_CLKCMU_MCSC_NOC, 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_MFC0_MFC0, "mout_cmu_mux_mfc0_mfc0",
+ mout_cmu_mux_mfc0_mfc0_p, CLK_CON_MUX_MUX_CLKCMU_MFC0_MFC0, 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_MFC0_WFD, "mout_cmu_mux_mfc0_wfd",
+ mout_cmu_mux_mfc0_wfd_p, CLK_CON_MUX_MUX_CLKCMU_MFC0_WFD, 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_MFC1_MFC1, "mout_cmu_mux_mfc1_mfc1",
+ mout_cmu_mux_mfc1_mfc1_p, CLK_CON_MUX_MUX_CLKCMU_MFC1_MFC1, 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_MIF_NOCP, "mout_cmu_mux_mif_nocp",
+ mout_cmu_mux_mif_nocp_p, CLK_CON_MUX_MUX_CLKCMU_MIF_NOCP, 0, 2),
+ MUX(CLK_MOUT_CMU_MUX_MIF_SWITCH, "mout_cmu_mux_mif_switch",
+ mout_cmu_mux_mif_switch_p, CLK_CON_MUX_MUX_CLKCMU_MIF_SWITCH,
+ 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_NOCL0_NOC, "mout_cmu_mux_nocl0_noc",
+ mout_cmu_mux_nocl0_noc_p, CLK_CON_MUX_MUX_CLKCMU_NOCL0_NOC, 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_NOCL1A_NOC, "mout_cmu_mux_nocl1a_noc",
+ mout_cmu_mux_nocl1a_noc_p, CLK_CON_MUX_MUX_CLKCMU_NOCL1A_NOC,
+ 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_NOCL1B_NOC0, "mout_cmu_mux_nocl1b_noc0",
+ mout_cmu_mux_nocl1b_noc0_p, CLK_CON_MUX_MUX_CLKCMU_NOCL1B_NOC0,
+ 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_NOCL1B_NOC1, "mout_cmu_mux_nocl1b_noc1",
+ mout_cmu_mux_nocl1b_noc1_p, CLK_CON_MUX_MUX_CLKCMU_NOCL1B_NOC1,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_MUX_NOCL1C_NOC, "mout_cmu_mux_nocl1c_noc",
+ mout_cmu_mux_nocl1c_noc_p, CLK_CON_MUX_MUX_CLKCMU_NOCL1C_NOC,
+ 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_PERIC0_IP0, "mout_cmu_mux_peric0_ip0",
+ mout_cmu_mux_peric0_ip0_p, CLK_CON_MUX_MUX_CLKCMU_PERIC0_IP0,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_MUX_PERIC0_IP1, "mout_cmu_mux_peric0_ip1",
+ mout_cmu_mux_peric0_ip1_p, CLK_CON_MUX_MUX_CLKCMU_PERIC0_IP1,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_MUX_PERIC0_NOC, "mout_cmu_mux_peric0_noc",
+ mout_cmu_mux_peric0_noc_p, CLK_CON_MUX_MUX_CLKCMU_PERIC0_NOC,
+ 0, 1),
+ MUX(CLK_MOUT_CMU_MUX_PERIC1_IP0, "mout_cmu_mux_peric1_ip0",
+ mout_cmu_mux_peric1_ip0_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_IP0,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_MUX_PERIC1_IP1, "mout_cmu_mux_peric1_ip1",
+ mout_cmu_mux_peric1_ip1_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_IP1,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_MUX_PERIC1_NOC, "mout_cmu_mux_peric1_noc",
+ mout_cmu_mux_peric1_noc_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_NOC,
+ 0, 1),
+ MUX(CLK_MOUT_CMU_MUX_PERIC2_IP0, "mout_cmu_mux_peric2_ip0",
+ mout_cmu_mux_peric2_ip0_p, CLK_CON_MUX_MUX_CLKCMU_PERIC2_IP0,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_MUX_PERIC2_IP1, "mout_cmu_mux_peric2_ip1",
+ mout_cmu_mux_peric2_ip1_p, CLK_CON_MUX_MUX_CLKCMU_PERIC2_IP1,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_MUX_PERIC2_NOC, "mout_cmu_mux_peric2_noc",
+ mout_cmu_mux_peric2_noc_p, CLK_CON_MUX_MUX_CLKCMU_PERIC2_NOC,
+ 0, 1),
+ MUX(CLK_MOUT_CMU_MUX_PERIS_GIC, "mout_cmu_mux_peris_gic",
+ mout_cmu_mux_peris_gic_p, CLK_CON_MUX_MUX_CLKCMU_PERIS_GIC, 0, 1),
+ MUX(CLK_MOUT_CMU_MUX_PERIS_NOC, "mout_cmu_mux_peris_noc",
+ mout_cmu_mux_peris_noc_p, CLK_CON_MUX_MUX_CLKCMU_PERIS_NOC, 0, 1),
+ MUX(CLK_MOUT_CMU_MUX_SDMA_NOC, "mout_cmu_mux_sdma_noc",
+ mout_cmu_mux_sdma_noc_p, CLK_CON_MUX_MUX_CLKCMU_SDMA_NOC, 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_SSP_NOC, "mout_cmu_mux_ssp_noc",
+ mout_cmu_mux_ssp_noc_p, CLK_CON_MUX_MUX_CLKCMU_SSP_NOC, 0, 2),
+ MUX(CLK_MOUT_CMU_MUX_VTS_DMIC, "mout_cmu_mux_vts_dmic",
+ mout_cmu_mux_vts_dmic_p, CLK_CON_MUX_MUX_CLKCMU_VTS_DMIC, 0, 2),
+ MUX(CLK_MOUT_CMU_MUX_YUVP_NOC, "mout_cmu_mux_yuvp_noc",
+ mout_cmu_mux_yuvp_noc_p, CLK_CON_MUX_MUX_CLKCMU_YUVP_NOC, 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_CMU_CMUREF, "mout_cmu_mux_cmu_cmuref",
+ mout_cmu_mux_cmu_cmuref_p, CLK_CON_MUX_MUX_CMU_CMUREF, 0, 1),
+ MUX(CLK_MOUT_CMU_MUX_CP_HISPEEDY_CLK, "mout_cmu_mux_cp_hispeedy_clk",
+ mout_cmu_mux_cp_hispeedy_clk_p, CLK_CON_MUX_MUX_CP_HISPEEDY_CLK,
+ 0, 1),
+ MUX(CLK_MOUT_CMU_MUX_CP_SHARED0_CLK, "mout_cmu_mux_cp_shared0_clk",
+ mout_cmu_mux_cp_shared0_clk_p, CLK_CON_MUX_MUX_CP_SHARED0_CLK,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_MUX_CP_SHARED1_CLK, "mout_cmu_mux_cp_shared1_clk",
+ mout_cmu_mux_cp_shared1_clk_p, CLK_CON_MUX_MUX_CP_SHARED1_CLK,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_MUX_CP_SHARED2_CLK, "mout_cmu_mux_cp_shared2_clk",
+ mout_cmu_mux_cp_shared2_clk_p, CLK_CON_MUX_MUX_CP_SHARED2_CLK,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_M2M_FRC, "mout_cmu_m2m_frc", mout_cmu_m2m_frc_p,
+ CLK_CON_MUX_CLKCMU_M2M_FRC, 0, 3),
+ MUX(CLK_MOUT_CMU_MCSC_MCSC, "mout_cmu_mcsc_mcsc", mout_cmu_mcsc_mcsc_p,
+ CLK_CON_MUX_CLKCMU_MCSC_MCSC, 0, 3),
+ MUX(CLK_MOUT_CMU_MCSC_NOC, "mout_cmu_mcsc_noc", mout_cmu_mcsc_noc_p,
+ CLK_CON_MUX_CLKCMU_MCSC_NOC, 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_M2M_FRC, "mout_cmu_mux_m2m_frc",
+ mout_cmu_mux_m2m_frc_p, CLK_CON_MUX_MUX_CLKCMU_M2M_FRC, 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_UFS_NOC, "mout_cmu_mux_ufs_noc",
+ mout_cmu_mux_ufs_noc_p, CLK_CON_MUX_MUX_CLKCMU_UFS_NOC, 0, 2),
+};
+
+static const struct samsung_div_clock top_div_clks[] __initconst = {
+ DIV(CLK_DOUT_CMU_ALIVE_NOC, "dout_cmu_alive_noc",
+ "mout_cmu_mux_alive_noc", CLK_CON_DIV_CLKCMU_ALIVE_NOC, 0, 2),
+ DIV(CLK_DOUT_CMU_AUD_NOC, "dout_cmu_aud_noc", "mout_cmu_mux_aud_noc",
+ CLK_CON_DIV_CLKCMU_AUD_NOC, 0, 4),
+ DIV(CLK_DOUT_CMU_BRP_NOC, "dout_cmu_brp_noc", "mout_cmu_mux_brp_noc",
+ CLK_CON_DIV_CLKCMU_BRP_NOC, 0, 4),
+ DIV(CLK_DOUT_CMU_CMU_BOOST, "dout_cmu_cmu_boost",
+ "mout_cmu_mux_cmu_boost", CLK_CON_DIV_CLKCMU_CMU_BOOST, 0, 3),
+ DIV(CLK_DOUT_CMU_CMU_BOOST_CAM, "dout_cmu_cmu_boost_cam",
+ "mout_cmu_mux_cmu_boost_cam", CLK_CON_DIV_CLKCMU_CMU_BOOST_CAM,
+ 0, 3),
+ DIV(CLK_DOUT_CMU_CMU_BOOST_CPU, "dout_cmu_cmu_boost_cpu",
+ "mout_cmu_mux_cmu_boost_cpu", CLK_CON_DIV_CLKCMU_CMU_BOOST_CPU,
+ 0, 3),
+ DIV(CLK_DOUT_CMU_CMU_BOOST_MIF, "dout_cmu_cmu_boost_mif",
+ "mout_cmu_mux_cmu_boost_mif", CLK_CON_DIV_CLKCMU_CMU_BOOST_MIF,
+ 0, 3),
+ DIV(CLK_DOUT_CMU_CPUCL0_NOCP, "dout_cmu_cpucl0_nocp",
+ "mout_cmu_mux_cpucl0_nocp", CLK_CON_DIV_CLKCMU_CPUCL0_NOCP, 0, 4),
+ DIV(CLK_DOUT_CMU_CSIS_DCPHY, "dout_cmu_csis_dcphy",
+ "mout_cmu_mux_csis_dcphy", CLK_CON_DIV_CLKCMU_CSIS_DCPHY, 0, 4),
+ DIV(CLK_DOUT_CMU_CSIS_NOC, "dout_cmu_csis_noc",
+ "mout_cmu_mux_csis_noc", CLK_CON_DIV_CLKCMU_CSIS_NOC, 0, 4),
+ DIV(CLK_DOUT_CMU_CSIS_OIS_MCU, "dout_cmu_csis_ois_mcu",
+ "mout_cmu_mux_csis_ois_mcu", CLK_CON_DIV_CLKCMU_CSIS_OIS_MCU,
+ 0, 4),
+ DIV(CLK_DOUT_CMU_CSTAT_NOC, "dout_cmu_cstat_noc",
+ "mout_cmu_mux_cstat_noc", CLK_CON_DIV_CLKCMU_CSTAT_NOC, 0, 4),
+ DIV(CLK_DOUT_CMU_DPUB_DSIM, "dout_cmu_dpub_dsim",
+ "mout_cmu_mux_dpub_dsim", CLK_CON_DIV_CLKCMU_DPUB_DSIM, 0, 4),
+ DIV(CLK_DOUT_CMU_LME_LME, "dout_cmu_lme_lme", "mout_cmu_mux_lme_lme",
+ CLK_CON_DIV_CLKCMU_LME_LME, 0, 4),
+ DIV(CLK_DOUT_CMU_G3D_NOCP, "dout_cmu_g3d_nocp",
+ "mout_cmu_mux_g3d_nocp", CLK_CON_DIV_CLKCMU_G3D_NOCP, 0, 3),
+ DIV(CLK_DOUT_CMU_HSI0_DPGTC, "dout_cmu_hsi0_dpgtc",
+ "mout_cmu_mux_hsi0_dpgtc", CLK_CON_DIV_CLKCMU_HSI0_DPGTC, 0, 3),
+ DIV(CLK_DOUT_CMU_HSI0_DPOSC, "dout_cmu_hsi0_dposc",
+ "mout_cmu_mux_hsi0_dposc", CLK_CON_DIV_CLKCMU_HSI0_DPOSC, 0, 5),
+ DIV(CLK_DOUT_CMU_HSI0_NOC, "dout_cmu_hsi0_noc",
+ "mout_cmu_mux_hsi0_noc", CLK_CON_DIV_CLKCMU_HSI0_NOC, 0, 4),
+ DIV(CLK_DOUT_CMU_HSI0_USB32DRD, "dout_cmu_hsi0_usb32drd",
+ "mout_cmu_mux_hsi0_usb32drd", CLK_CON_DIV_CLKCMU_HSI0_USB32DRD,
+ 0, 5),
+ DIV(CLK_DOUT_CMU_HSI1_NOC, "dout_cmu_hsi1_noc",
+ "mout_cmu_mux_hsi1_noc", CLK_CON_DIV_CLKCMU_HSI1_NOC, 0, 4),
+ DIV(CLK_DOUT_CMU_HSI1_PCIE, "dout_cmu_hsi1_pcie",
+ "mout_cmu_mux_hsi1_pcie", CLK_CON_DIV_CLKCMU_HSI1_PCIE, 0, 8),
+ DIV(CLK_DOUT_CMU_UFS_UFS_EMBD, "dout_cmu_ufs_ufs_embd",
+ "mout_cmu_mux_ufs_ufs_embd", CLK_CON_DIV_CLKCMU_UFS_UFS_EMBD,
+ 0, 4),
+ DIV(CLK_DOUT_CMU_LME_NOC, "dout_cmu_lme_noc", "mout_cmu_mux_lme_noc",
+ CLK_CON_DIV_CLKCMU_LME_NOC, 0, 4),
+ DIV(CLK_DOUT_CMU_MFC0_MFC0, "dout_cmu_mfc0_mfc0",
+ "mout_cmu_mux_mfc0_mfc0", CLK_CON_DIV_CLKCMU_MFC0_MFC0, 0, 4),
+ DIV(CLK_DOUT_CMU_MFC0_WFD, "dout_cmu_mfc0_wfd",
+ "mout_cmu_mux_mfc0_wfd", CLK_CON_DIV_CLKCMU_MFC0_WFD, 0, 4),
+ DIV(CLK_DOUT_CMU_MFC1_MFC1, "dout_cmu_mfc1_mfc1",
+ "mout_cmu_mux_mfc1_mfc1", CLK_CON_DIV_CLKCMU_MFC1_MFC1, 0, 4),
+ DIV(CLK_DOUT_CMU_MIF_NOCP, "dout_cmu_mif_nocp",
+ "mout_cmu_mux_mif_nocp", CLK_CON_DIV_CLKCMU_MIF_NOCP, 0, 4),
+ DIV(CLK_DOUT_CMU_NOCL1B_NOC1, "dout_cmu_nocl1b_noc1",
+ "mout_cmu_mux_nocl1b_noc1", CLK_CON_DIV_CLKCMU_NOCL1B_NOC1, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC0_IP0, "dout_cmu_peric0_ip0",
+ "mout_cmu_mux_peric0_ip0", CLK_CON_DIV_CLKCMU_PERIC0_IP0, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC0_IP1, "dout_cmu_peric0_ip1",
+ "mout_cmu_mux_peric0_ip1", CLK_CON_DIV_CLKCMU_PERIC0_IP1, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC0_NOC, "dout_cmu_peric0_noc",
+ "mout_cmu_mux_peric0_noc", CLK_CON_DIV_CLKCMU_PERIC0_NOC, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC1_IP0, "dout_cmu_peric1_ip0",
+ "mout_cmu_mux_peric1_ip0", CLK_CON_DIV_CLKCMU_PERIC1_IP0, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC1_IP1, "dout_cmu_peric1_ip1",
+ "mout_cmu_mux_peric1_ip1", CLK_CON_DIV_CLKCMU_PERIC1_IP1, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC1_NOC, "dout_cmu_peric1_noc",
+ "mout_cmu_mux_peric1_noc", CLK_CON_DIV_CLKCMU_PERIC1_NOC, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC2_IP0, "dout_cmu_peric2_ip0",
+ "mout_cmu_mux_peric2_ip0", CLK_CON_DIV_CLKCMU_PERIC2_IP0, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC2_IP1, "dout_cmu_peric2_ip1",
+ "mout_cmu_mux_peric2_ip1", CLK_CON_DIV_CLKCMU_PERIC2_IP1, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC2_NOC, "dout_cmu_peric2_noc",
+ "mout_cmu_mux_peric2_noc", CLK_CON_DIV_CLKCMU_PERIC2_NOC, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIS_GIC, "dout_cmu_peris_gic",
+ "mout_cmu_mux_peris_gic", CLK_CON_DIV_CLKCMU_PERIS_GIC, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIS_NOC, "dout_cmu_peris_noc",
+ "mout_cmu_mux_peris_noc", CLK_CON_DIV_CLKCMU_PERIS_NOC, 0, 4),
+ DIV(CLK_DOUT_CMU_SSP_NOC, "dout_cmu_ssp_noc", "mout_cmu_mux_ssp_noc",
+ CLK_CON_DIV_CLKCMU_SSP_NOC, 0, 4),
+ DIV(CLK_DOUT_CMU_VTS_DMIC, "dout_cmu_vts_dmic",
+ "mout_cmu_mux_vts_dmic", CLK_CON_DIV_CLKCMU_VTS_DMIC, 0, 6),
+ DIV(CLK_DOUT_CMU_YUVP_NOC, "dout_cmu_yuvp_noc",
+ "mout_cmu_mux_yuvp_noc", CLK_CON_DIV_CLKCMU_YUVP_NOC, 0, 4),
+ DIV(CLK_DOUT_CMU_CP_SHARED1_CLK, "dout_cmu_cp_shared1_clk",
+ "mout_cmu_mux_cp_shared1_clk", CLK_CON_DIV_CP_SHARED1_CLK, 0, 3),
+ DIV(CLK_DOUT_CMU_DIV_AUD_AUDIF0, "dout_cmu_div_aud_audif0",
+ "mout_cmu_mux_aud_audif0", CLK_CON_DIV_DIV_CLKCMU_AUD_AUDIF0,
+ 0, 6),
+ DIV(CLK_DOUT_CMU_DIV_AUD_AUDIF0_SM, "dout_cmu_div_aud_audif0_sm",
+ "mout_cmu_aud_audif0", CLK_CON_DIV_DIV_CLKCMU_AUD_AUDIF0_SM, 0, 6),
+ DIV(CLK_DOUT_CMU_DIV_AUD_AUDIF1, "dout_cmu_div_aud_audif1",
+ "mout_cmu_mux_aud_audif1", CLK_CON_DIV_DIV_CLKCMU_AUD_AUDIF1,
+ 0, 6),
+ DIV(CLK_DOUT_CMU_DIV_AUD_AUDIF1_SM, "dout_cmu_div_aud_audif1_sm",
+ "mout_cmu_aud_audif1", CLK_CON_DIV_DIV_CLKCMU_AUD_AUDIF1_SM, 0, 6),
+ DIV(CLK_DOUT_CMU_DIV_AUD_CPU, "dout_cmu_div_aud_cpu",
+ "mout_cmu_mux_aud_cpu", CLK_CON_DIV_DIV_CLKCMU_AUD_CPU, 0, 3),
+ DIV(CLK_DOUT_CMU_DIV_AUD_CPU_SM, "dout_cmu_div_aud_cpu_sm",
+ "mout_cmu_aud_cpu", CLK_CON_DIV_DIV_CLKCMU_AUD_CPU_SM, 0, 3),
+ DIV(CLK_DOUT_CMU_DIV_CIS_CLK0, "dout_cmu_div_cis_clk0",
+ "mout_cmu_mux_cis_clk0", CLK_CON_DIV_DIV_CLKCMU_CIS_CLK0, 0, 5),
+ DIV(CLK_DOUT_CMU_DIV_CIS_CLK1, "dout_cmu_div_cis_clk1",
+ "mout_cmu_mux_cis_clk1", CLK_CON_DIV_DIV_CLKCMU_CIS_CLK1, 0, 5),
+ DIV(CLK_DOUT_CMU_DIV_CIS_CLK2, "dout_cmu_div_cis_clk2",
+ "mout_cmu_mux_cis_clk2", CLK_CON_DIV_DIV_CLKCMU_CIS_CLK2, 0, 5),
+ DIV(CLK_DOUT_CMU_DIV_CIS_CLK3, "dout_cmu_div_cis_clk3",
+ "mout_cmu_mux_cis_clk3", CLK_CON_DIV_DIV_CLKCMU_CIS_CLK3, 0, 5),
+ DIV(CLK_DOUT_CMU_DIV_CIS_CLK4, "dout_cmu_div_cis_clk4",
+ "mout_cmu_mux_cis_clk4", CLK_CON_DIV_DIV_CLKCMU_CIS_CLK4, 0, 5),
+ DIV(CLK_DOUT_CMU_DIV_CIS_CLK5, "dout_cmu_div_cis_clk5",
+ "mout_cmu_mux_cis_clk5", CLK_CON_DIV_DIV_CLKCMU_CIS_CLK5, 0, 5),
+ DIV(CLK_DOUT_CMU_DIV_CIS_CLK6, "dout_cmu_div_cis_clk6",
+ "mout_cmu_mux_cis_clk6", CLK_CON_DIV_DIV_CLKCMU_CIS_CLK6, 0, 5),
+ DIV(CLK_DOUT_CMU_DIV_CIS_CLK7, "dout_cmu_div_cis_clk7",
+ "mout_cmu_mux_cis_clk7", CLK_CON_DIV_DIV_CLKCMU_CIS_CLK7, 0, 5),
+ DIV(CLK_DOUT_CMU_DIV_CPUCL0_DBG_NOC, "dout_cmu_div_cpucl0_dbg_noc",
+ "mout_cmu_mux_cpucl0_dbg_noc",
+ CLK_CON_DIV_DIV_CLKCMU_CPUCL0_DBG_NOC, 0, 4),
+ DIV(CLK_DOUT_CMU_DIV_CPUCL0_DBG_NOC_SM,
+ "dout_cmu_div_cpucl0_dbg_noc_sm", "mout_cmu_cpucl0_dbg_noc",
+ CLK_CON_DIV_DIV_CLKCMU_CPUCL0_DBG_NOC_SM, 0, 4),
+ DIV(CLK_DOUT_CMU_DIV_CPUCL0_SWITCH, "dout_cmu_div_cpucl0_switch",
+ "mout_cmu_mux_cpucl0_switch", CLK_CON_DIV_DIV_CLKCMU_CPUCL0_SWITCH,
+ 0, 3),
+ DIV(CLK_DOUT_CMU_DIV_CPUCL0_SWITCH_SM, "dout_cmu_div_cpucl0_switch_sm",
+ "mout_cmu_cpucl0_switch", CLK_CON_DIV_DIV_CLKCMU_CPUCL0_SWITCH_SM,
+ 0, 3),
+ DIV(CLK_DOUT_CMU_DIV_CPUCL1_SWITCH, "dout_cmu_div_cpucl1_switch",
+ "mout_cmu_mux_cpucl1_switch", CLK_CON_DIV_DIV_CLKCMU_CPUCL1_SWITCH,
+ 0, 3),
+ DIV(CLK_DOUT_CMU_DIV_CPUCL1_SWITCH_SM, "dout_cmu_div_cpucl1_switch_sm",
+ "mout_cmu_cpucl1_switch", CLK_CON_DIV_DIV_CLKCMU_CPUCL1_SWITCH_SM,
+ 0, 3),
+ DIV(CLK_DOUT_CMU_DIV_CPUCL2_SWITCH, "dout_cmu_div_cpucl2_switch",
+ "mout_cmu_mux_cpucl2_switch", CLK_CON_DIV_DIV_CLKCMU_CPUCL2_SWITCH,
+ 0, 3),
+ DIV(CLK_DOUT_CMU_DIV_CPUCL2_SWITCH_SM, "dout_cmu_div_cpucl2_switch_sm",
+ "mout_cmu_cpucl2_switch", CLK_CON_DIV_DIV_CLKCMU_CPUCL2_SWITCH_SM,
+ 0, 3),
+ DIV(CLK_DOUT_CMU_DIV_DNC_NOC, "dout_cmu_div_dnc_noc",
+ "mout_cmu_mux_dnc_noc", CLK_CON_DIV_DIV_CLKCMU_DNC_NOC, 0, 4),
+ DIV(CLK_DOUT_CMU_DIV_DNC_NOC_SM, "dout_cmu_div_dnc_noc_sm",
+ "mout_cmu_dnc_noc", CLK_CON_DIV_DIV_CLKCMU_DNC_NOC_SM, 0, 4),
+ DIV(CLK_DOUT_CMU_DIV_DPUB, "dout_cmu_div_dpub", "mout_cmu_mux_dpub",
+ CLK_CON_DIV_DIV_CLKCMU_DPUB, 0, 4),
+ DIV(CLK_DOUT_CMU_DIV_DPUB_ALT, "dout_cmu_div_dpub_alt",
+ "mout_cmu_mux_dpub_alt", CLK_CON_DIV_DIV_CLKCMU_DPUB_ALT, 0, 4),
+ DIV(CLK_DOUT_CMU_DIV_DPUF, "dout_cmu_div_dpuf", "mout_cmu_mux_dpuf",
+ CLK_CON_DIV_DIV_CLKCMU_DPUF, 0, 4),
+ DIV(CLK_DOUT_CMU_DIV_DPUF_ALT, "dout_cmu_div_dpuf_alt",
+ "mout_cmu_mux_dpuf_alt", CLK_CON_DIV_DIV_CLKCMU_DPUF_ALT, 0, 4),
+ DIV(CLK_DOUT_CMU_DIV_DSP_NOC, "dout_cmu_div_dsp_noc",
+ "mout_cmu_mux_dsp_noc", CLK_CON_DIV_DIV_CLKCMU_DSP_NOC, 0, 4),
+ DIV(CLK_DOUT_CMU_DIV_DSP_NOC_SM, "dout_cmu_div_dsp_noc_sm",
+ "mout_cmu_dsp_noc", CLK_CON_DIV_DIV_CLKCMU_DSP_NOC_SM, 0, 4),
+ DIV(CLK_DOUT_CMU_DIV_DSU_SWITCH, "dout_cmu_div_dsu_switch",
+ "mout_cmu_mux_dsu_switch", CLK_CON_DIV_DIV_CLKCMU_DSU_SWITCH,
+ 0, 3),
+ DIV(CLK_DOUT_CMU_DIV_DSU_SWITCH_SM, "dout_cmu_div_dsu_switch_sm",
+ "mout_cmu_dsu_switch", CLK_CON_DIV_DIV_CLKCMU_DSU_SWITCH_SM, 0, 3),
+ DIV(CLK_DOUT_CMU_DIV_G3D_SWITCH, "dout_cmu_div_g3d_switch",
+ "mout_cmu_mux_g3d_switch", CLK_CON_DIV_DIV_CLKCMU_G3D_SWITCH,
+ 0, 3),
+ DIV(CLK_DOUT_CMU_DIV_G3D_SWITCH_SM, "dout_cmu_div_g3d_switch_sm",
+ "mout_cmu_g3d_switch", CLK_CON_DIV_DIV_CLKCMU_G3D_SWITCH_SM, 0, 3),
+ DIV(CLK_DOUT_CMU_DIV_GNPU_NOC, "dout_cmu_div_gnpu_noc",
+ "mout_cmu_mux_gnpu_noc", CLK_CON_DIV_DIV_CLKCMU_GNPU_NOC, 0, 4),
+ DIV(CLK_DOUT_CMU_DIV_GNPU_NOC_SM, "dout_cmu_div_gnpu_noc_sm",
+ "mout_cmu_gnpu_noc", CLK_CON_DIV_DIV_CLKCMU_GNPU_NOC_SM, 0, 4),
+ DIV(CLK_DOUT_CMU_DIV_UFS_MMC_CARD, "dout_cmu_div_ufs_mmc_card",
+ "mout_cmu_mux_ufs_mmc_card", CLK_CON_DIV_DIV_CLKCMU_UFS_MMC_CARD,
+ 0, 9),
+ DIV(CLK_DOUT_CMU_DIV_UFS_MMC_CARD_SM, "dout_cmu_div_ufs_mmc_card_sm",
+ "mout_cmu_ufs_mmc_card", CLK_CON_DIV_DIV_CLKCMU_UFS_MMC_CARD_SM,
+ 0, 9),
+ DIV(CLK_DOUT_CMU_DIV_M2M_NOC, "dout_cmu_div_m2m_noc",
+ "mout_cmu_mux_m2m_noc", CLK_CON_DIV_DIV_CLKCMU_M2M_NOC, 0, 4),
+ DIV(CLK_DOUT_CMU_DIV_M2M_NOC_SM, "dout_cmu_div_m2m_noc_sm",
+ "mout_cmu_m2m_noc", CLK_CON_DIV_DIV_CLKCMU_M2M_NOC_SM, 0, 4),
+ DIV(CLK_DOUT_CMU_DIV_NOCL0_NOC, "dout_cmu_div_nocl0_noc",
+ "mout_cmu_mux_nocl0_noc", CLK_CON_DIV_DIV_CLKCMU_NOCL0_NOC,
+ 0, 4),
+ DIV(CLK_DOUT_CMU_DIV_NOCL0_NOC_SM, "dout_cmu_div_nocl0_noc_sm",
+ "mout_cmu_nocl0_noc", CLK_CON_DIV_DIV_CLKCMU_NOCL0_NOC_SM, 0, 4),
+ DIV(CLK_DOUT_CMU_DIV_NOCL1A_NOC, "dout_cmu_div_nocl1a_noc",
+ "mout_cmu_mux_nocl1a_noc", CLK_CON_DIV_DIV_CLKCMU_NOCL1A_NOC,
+ 0, 4),
+ DIV(CLK_DOUT_CMU_DIV_NOCL1A_NOC_SM, "dout_cmu_div_nocl1a_noc_sm",
+ "mout_cmu_nocl1a_noc", CLK_CON_DIV_DIV_CLKCMU_NOCL1A_NOC_SM, 0, 4),
+ DIV(CLK_DOUT_CMU_DIV_NOCL1B_NOC0, "dout_cmu_div_nocl1b_noc0",
+ "mout_cmu_mux_nocl1b_noc0", CLK_CON_DIV_DIV_CLKCMU_NOCL1B_NOC0,
+ 0, 4),
+ DIV(CLK_DOUT_CMU_DIV_NOCL1B_NOC0_SM, "dout_cmu_div_nocl1b_noc0_sm",
+ "mout_cmu_nocl1b_noc0", CLK_CON_DIV_DIV_CLKCMU_NOCL1B_NOC0_SM,
+ 0, 4),
+ DIV(CLK_DOUT_CMU_DIV_NOCL1C_NOC, "dout_cmu_div_nocl1c_noc",
+ "mout_cmu_mux_nocl1c_noc", CLK_CON_DIV_DIV_CLKCMU_NOCL1C_NOC,
+ 0, 4),
+ DIV(CLK_DOUT_CMU_DIV_NOCL1C_NOC_SM, "dout_cmu_div_nocl1c_noc_sm",
+ "mout_cmu_nocl1c_noc", CLK_CON_DIV_DIV_CLKCMU_NOCL1C_NOC_SM, 0, 4),
+ DIV(CLK_DOUT_CMU_DIV_SDMA_NOC, "dout_cmu_div_sdma_noc",
+ "mout_cmu_mux_sdma_noc", CLK_CON_DIV_DIV_CLKCMU_SDMA_NOC, 0, 4),
+ DIV(CLK_DOUT_CMU_DIV_SDMA_NOC_SM, "dout_cmu_div_sdma_noc_sm",
+ "mout_cmu_sdma_noc", CLK_CON_DIV_DIV_CLKCMU_SDMA_NOC_SM, 0, 4),
+ DIV(CLK_DOUT_CMU_DIV_CP_HISPEEDY_CLK, "dout_cmu_div_cp_hispeedy_clk",
+ "mout_cmu_mux_cp_hispeedy_clk", CLK_CON_DIV_DIV_CP_HISPEEDY_CLK,
+ 0, 4),
+ DIV(CLK_DOUT_CMU_DIV_CP_HISPEEDY_CLK_SM,
+ "dout_cmu_div_cp_hispeedy_clk_sm", "mout_cmu_mux_cp_hispeedy_clk",
+ CLK_CON_DIV_DIV_CP_HISPEEDY_CLK_SM, 0, 4),
+ DIV(CLK_DOUT_CMU_DIV_CP_SHARED0_CLK, "dout_cmu_div_cp_shared0_clk",
+ "mout_cmu_mux_cp_shared0_clk", CLK_CON_DIV_DIV_CP_SHARED0_CLK,
+ 0, 3),
+ DIV(CLK_DOUT_CMU_DIV_CP_SHARED0_CLK_SM,
+ "dout_cmu_div_cp_shared0_clk_sm", "mout_cmu_cp_shared0_clk",
+ CLK_CON_DIV_DIV_CP_SHARED0_CLK_SM, 0, 3),
+ DIV(CLK_DOUT_CMU_DIV_CP_SHARED2_CLK, "dout_cmu_div_cp_shared2_clk",
+ "mout_cmu_mux_cp_shared2_clk", CLK_CON_DIV_DIV_CP_SHARED2_CLK,
+ 0, 3),
+ DIV(CLK_DOUT_CMU_DIV_CP_SHARED2_CLK_SM,
+ "dout_cmu_div_cp_shared2_clk_sm", "mout_cmu_cp_shared2_clk",
+ CLK_CON_DIV_DIV_CP_SHARED2_CLK_SM, 0, 3),
+ DIV(CLK_DOUT_CMU_UFS_NOC, "dout_cmu_ufs_noc", "mout_cmu_mux_ufs_noc",
+ CLK_CON_DIV_CLKCMU_UFS_NOC, 0, 4),
+ DIV(CLK_DOUT_CMU_DIV_M2M_FRC, "dout_cmu_div_m2m_frc",
+ "mout_cmu_mux_m2m_frc", CLK_CON_DIV_DIV_CLKCMU_M2M_FRC, 0, 4),
+ DIV(CLK_DOUT_CMU_DIV_M2M_FRC_SM, "dout_cmu_div_m2m_frc_sm",
+ "mout_cmu_m2m_frc", CLK_CON_DIV_DIV_CLKCMU_M2M_FRC_SM, 0, 4),
+ DIV(CLK_DOUT_CMU_DIV_MCSC_MCSC, "dout_cmu_div_mcsc_mcsc",
+ "mout_cmu_mux_mcsc_mcsc", CLK_CON_DIV_DIV_CLKCMU_MCSC_MCSC, 0, 4),
+ DIV(CLK_DOUT_CMU_DIV_MCSC_MCSC_SM, "dout_cmu_div_mcsc_mcsc_sm",
+ "mout_cmu_mcsc_mcsc", CLK_CON_DIV_DIV_CLKCMU_MCSC_MCSC_SM, 0, 4),
+ DIV(CLK_DOUT_CMU_DIV_MCSC_NOC, "dout_cmu_div_mcsc_noc",
+ "mout_cmu_mux_mcsc_noc", CLK_CON_DIV_DIV_CLKCMU_MCSC_NOC, 0, 4),
+ DIV(CLK_DOUT_CMU_DIV_MCSC_NOC_SM, "dout_cmu_div_mcsc_noc_sm",
+ "mout_cmu_mcsc_noc", CLK_CON_DIV_DIV_CLKCMU_MCSC_NOC_SM, 0, 4),
+};
+
+static const struct samsung_fixed_factor_clock top_fixed_factor_clks[] __initconst = {
+ FFACTOR(CLK_DOUT_SHARED0_DIV1, "dout_shared0_div1",
+ "fout_shared0_pll", 1, 1, 0),
+ FFACTOR(CLK_DOUT_SHARED0_DIV2, "dout_shared0_div2",
+ "fout_shared0_pll", 1, 2, 0),
+ FFACTOR(CLK_DOUT_SHARED0_DIV4, "dout_shared0_div4",
+ "fout_shared0_pll", 1, 4, 0),
+ FFACTOR(CLK_DOUT_SHARED1_DIV1, "dout_shared1_div1",
+ "fout_shared1_pll", 1, 1, 0),
+ FFACTOR(CLK_DOUT_SHARED1_DIV2, "dout_shared1_div2",
+ "fout_shared1_pll", 1, 2, 0),
+ FFACTOR(CLK_DOUT_SHARED1_DIV4, "dout_shared1_div4",
+ "fout_shared1_pll", 1, 4, 0),
+ FFACTOR(CLK_DOUT_SHARED2_DIV1, "dout_shared2_div1",
+ "fout_shared2_pll", 1, 1, 0),
+ FFACTOR(CLK_DOUT_SHARED2_DIV2, "dout_shared2_div2",
+ "fout_shared2_pll", 1, 2, 0),
+ FFACTOR(CLK_DOUT_SHARED2_DIV4, "dout_shared2_div4",
+ "fout_shared2_pll", 1, 4, 0),
+ FFACTOR(CLK_DOUT_SHARED3_DIV1, "dout_shared3_div1",
+ "fout_shared3_pll", 1, 1, 0),
+ FFACTOR(CLK_DOUT_SHARED3_DIV2, "dout_shared3_div2",
+ "fout_shared3_pll", 1, 2, 0),
+ FFACTOR(CLK_DOUT_SHARED3_DIV4, "dout_shared3_div4",
+ "fout_shared3_pll", 1, 4, 0),
+ FFACTOR(CLK_DOUT_SHARED4_DIV1, "dout_shared4_div1",
+ "fout_shared4_pll", 1, 1, 0),
+ FFACTOR(CLK_DOUT_SHARED4_DIV2, "dout_shared4_div2",
+ "fout_shared4_pll", 1, 2, 0),
+ FFACTOR(CLK_DOUT_SHARED4_DIV4, "dout_shared4_div4",
+ "fout_shared4_pll", 1, 4, 0),
+ FFACTOR(CLK_DOUT_SHARED_MIF_DIV1, "dout_shared_mif_div1",
+ "fout_shared_mif_pll", 1, 1, 0),
+ FFACTOR(CLK_DOUT_SHARED_MIF_DIV2, "dout_shared_mif_div2",
+ "fout_shared_mif_pll", 1, 2, 0),
+ FFACTOR(CLK_DOUT_SHARED_MIF_DIV4, "dout_shared_mif_div4",
+ "fout_shared_mif_pll", 1, 4, 0),
+ FFACTOR(CLK_DOUT_SHARED_MIF_DIV4, "dout_mmc_div1",
+ "fout_mmc_pll", 1, 1, 0),
+ FFACTOR(CLK_DOUT_SHARED_MIF_DIV4, "dout_mmc_div2",
+ "fout_mmc_pll", 1, 2, 0),
+ FFACTOR(CLK_DOUT_SHARED_MIF_DIV4, "dout_mmc_div4",
+ "fout_mmc_pll", 1, 4, 0),
+ FFACTOR(CLK_DOUT_TCXO_DIV3, "dout_tcxo_div3",
+ "oscclk", 1, 3, 0),
+ FFACTOR(CLK_DOUT_TCXO_DIV4, "dout_tcxo_div4",
+ "oscclk", 1, 4, 0),
+};
+
+static const struct samsung_cmu_info top_cmu_info __initconst = {
+ .pll_clks = top_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(top_pll_clks),
+ .mux_clks = top_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(top_mux_clks),
+ .div_clks = top_div_clks,
+ .nr_div_clks = ARRAY_SIZE(top_div_clks),
+ .fixed_factor_clks = top_fixed_factor_clks,
+ .nr_fixed_factor_clks = ARRAY_SIZE(top_fixed_factor_clks),
+ .nr_clk_ids = CLKS_NR_TOP,
+ .clk_regs = top_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(top_clk_regs),
+};
+
+static void __init exynos2200_cmu_top_init(struct device_node *np)
+{
+ exynos_arm64_register_cmu(NULL, np, &top_cmu_info);
+}
+
+/* Register CMU_TOP early, as it's a dependency for other early domains */
+CLK_OF_DECLARE(exynos2200_cmu_top, "samsung,exynos2200-cmu-top",
+ exynos2200_cmu_top_init);
+
+/* ---- CMU_ALIVE ---------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_ALIVE (0x15800000) */
+#define PLL_CON0_MUX_CLKCMU_ALIVE_NOC_USER 0x600
+#define PLL_CON1_MUX_CLKCMU_ALIVE_NOC_USER 0x604
+#define PLL_CON0_MUX_CLKMUX_ALIVE_RCO_SPMI_USER 0x610
+#define PLL_CON1_MUX_CLKMUX_ALIVE_RCO_SPMI_USER 0x614
+#define PLL_CON0_MUX_CLK_RCO_ALIVE_USER 0x620
+#define PLL_CON1_MUX_CLK_RCO_ALIVE_USER 0x624
+#define CLK_CON_MUX_MUX_CLKALIVE_CHUB_PERI 0x1004
+#define CLK_CON_MUX_MUX_CLKALIVE_CMGP_NOC 0x1008
+#define CLK_CON_MUX_MUX_CLKALIVE_CMGP_PERI 0x100c
+#define CLK_CON_MUX_MUX_CLKALIVE_DBGCORE_NOC 0x1010
+#define CLK_CON_MUX_MUX_CLKALIVE_DNC_NOC 0x1014
+#define CLK_CON_MUX_MUX_CLKALIVE_CHUBVTS_NOC 0x1018
+#define CLK_CON_MUX_MUX_CLKALIVE_GNPU_NOC 0x101c
+#define CLK_CON_MUX_MUX_CLKALIVE_GNSS_NOC 0x1020
+#define CLK_CON_MUX_MUX_CLKALIVE_SDMA_NOC 0x1024
+#define CLK_CON_MUX_MUX_CLKALIVE_UFD_NOC 0x1028
+#define CLK_CON_MUX_MUX_CLK_ALIVE_DBGCORE_UART 0x1030
+#define CLK_CON_MUX_MUX_CLK_ALIVE_NOC 0x1034
+#define CLK_CON_MUX_MUX_CLK_ALIVE_PMU_SUB 0x1038
+#define CLK_CON_MUX_MUX_CLK_ALIVE_SPMI 0x103c
+#define CLK_CON_MUX_MUX_CLK_ALIVE_TIMER 0x1040
+#define CLK_CON_MUX_MUX_CLKALIVE_CSIS_NOC 0x1044
+#define CLK_CON_MUX_MUX_CLKALIVE_DSP_NOC 0x1048
+#define CLK_CON_DIV_CLKALIVE_CHUB_PERI 0x1804
+#define CLK_CON_DIV_CLKALIVE_CMGP_NOC 0x1808
+#define CLK_CON_DIV_CLKALIVE_CMGP_PERI 0x180c
+#define CLK_CON_DIV_CLKALIVE_DBGCORE_NOC 0x1810
+#define CLK_CON_DIV_CLKALIVE_DNC_NOC 0x1814
+#define CLK_CON_DIV_CLKALIVE_CHUBVTS_NOC 0x1818
+#define CLK_CON_DIV_CLKALIVE_GNPU_NOC 0x181c
+#define CLK_CON_DIV_CLKALIVE_SDMA_NOC 0x1820
+#define CLK_CON_DIV_CLKALIVE_UFD_NOC 0x1824
+#define CLK_CON_DIV_DIV_CLK_ALIVE_DBGCORE_UART 0x182c
+#define CLK_CON_DIV_DIV_CLK_ALIVE_NOC 0x1830
+#define CLK_CON_DIV_DIV_CLK_ALIVE_PMU_SUB 0x1834
+#define CLK_CON_DIV_DIV_CLK_ALIVE_SPMI 0x1838
+#define CLK_CON_DIV_CLKALIVE_CSIS_NOC 0x183c
+#define CLK_CON_DIV_CLKALIVE_DSP_NOC 0x1840
+#define CLK_CON_GAT_CLKALIVE_CHUBVTS_RCO 0x2000
+#define CLK_CON_GAT_CLKALIVE_DNC_RCO 0x2004
+#define CLK_CON_GAT_CLKALIVE_CSIS_RCO 0x2008
+#define CLK_CON_GAT_CLKALIVE_GNPU_RCO 0x200c
+#define CLK_CON_GAT_CLKALIVE_GNSS_NOC 0x2010
+#define CLK_CON_GAT_CLKALIVE_SDMA_RCO 0x2014
+#define CLK_CON_GAT_CLKALIVE_UFD_RCO 0x2018
+#define CLK_CON_GAT_CLKALIVE_DSP_RCO 0x201c
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_ALIVE_CMU_ALIVE_IPCLKPORT_PCLK 0x2020
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_APBIF_GPIO_ALIVE_IPCLKPORT_PCLK 0x2024
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_APBIF_INTCOMB_VGPIO2APM_IPCLKPORT_PCLK 0x2028
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_APBIF_INTCOMB_VGPIO2AP_IPCLKPORT_PCLK 0x202c
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_APBIF_INTCOMB_VGPIO2PMU_IPCLKPORT_PCLK 0x2030
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_APBIF_PMU_ALIVE_IPCLKPORT_PCLK 0x2034
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_APM_DMA_IPCLKPORT_PCLK 0x2038
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_CHUB_RTC_IPCLKPORT_OSCCLK 0x203c
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_CHUB_RTC_IPCLKPORT_PCLK 0x2040
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_CLKMON_IPCLKPORT_PCLK 0x2044
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_DBGCORE_UART_IPCLKPORT_IPCLK 0x2048
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_DBGCORE_UART_IPCLKPORT_PCLK 0x204c
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_BLK_ALIVE_FRC_OTP_DESERIAL_IPCLKPORT_I_CLK 0x2050
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_DTZPC_ALIVE_IPCLKPORT_PCLK 0x2054
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_GREBEINTEGRATION_IPCLKPORT_HCLK 0x2058
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_HW_SCANDUMP_CLKSTOP_CTRL_IPCLKPORT_ACLK 0x205c
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_INTMEM_IPCLKPORT_I_ACLK 0x2060
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_INTMEM_IPCLKPORT_I_PCLK 0x2064
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_LH_AXI_SI_D_APM_IPCLKPORT_I_CLK 0x2068
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_MAILBOX_APM_AP_IPCLKPORT_PCLK 0x206c
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_MAILBOX_APM_CHUB_IPCLKPORT_PCLK 0x2070
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_MAILBOX_APM_CP_IPCLKPORT_PCLK 0x2074
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_MAILBOX_APM_GNSS_IPCLKPORT_PCLK 0x2078
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_MAILBOX_APM_VTS_IPCLKPORT_PCLK 0x207c
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_MAILBOX_AP_CHUB_IPCLKPORT_PCLK 0x2080
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_MAILBOX_AP_CP_IPCLKPORT_PCLK 0x2084
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_MAILBOX_AP_CP_S_IPCLKPORT_PCLK 0x2088
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_MAILBOX_AP_DBGCORE_IPCLKPORT_PCLK 0x208c
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_MAILBOX_AP_GNSS_IPCLKPORT_PCLK 0x2090
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_MAILBOX_CP_CHUB_IPCLKPORT_PCLK 0x2094
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_MAILBOX_CP_GNSS_IPCLKPORT_PCLK 0x2098
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_MAILBOX_GNSS_CHUB_IPCLKPORT_PCLK 0x209c
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_MAILBOX_SHARED_SRAM_IPCLKPORT_PCLK 0x20a0
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_MAILBOX_APM_AUD_IPCLKPORT_PCLK 0x20a4
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_MCT_ALIVE_IPCLKPORT_I_PCLK 0x20a8
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_PMU_INTR_GEN_IPCLKPORT_PCLK 0x20ac
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_PMU_IPCLKPORT_ACLK 0x20b0
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_PMU_IPCLKPORT_CLKIN_PMU_SUB 0x20b4
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_MAILBOX_APM_CP_1_IPCLKPORT_PCLK 0x20b8
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_OTP_DESERIAL_ALIVE_IPCLKPORT_I_CLK 0x20bc
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_RSTNSYNC_SR_CLK_ALIVE_DBGCORE_UART_IPCLKPORT_CLK 0x20c0
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_RSTNSYNC_CLK_ALIVE_FREE_OSCCLK_IPCLKPORT_CLK 0x20c4
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_RSTNSYNC_CLK_ALIVE_GREBE_IPCLKPORT_CLK 0x20c8
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_RSTNSYNC_CLK_ALIVE_NOC_IPCLKPORT_CLK 0x20cc
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_RSTNSYNC_CLK_ALIVE_OSCCLK_RCO_IPCLKPORT_CLK 0x20d0
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_RSTNSYNC_CLK_ALIVE_SPMI_IPCLKPORT_CLK 0x20d4
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_RSTNSYNC_CLK_ALIVE_TIMER_IPCLKPORT_CLK 0x20d8
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_RSTNSYNC_SR_CLK_ALIVE_NOC_IPCLKPORT_CLK 0x20dc
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_RTC_IPCLKPORT_PCLK 0x20e0
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_OTP_HCU_DESERIAL_IPCLKPORT_I_CLK 0x20e4
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_SLH_AXI_MI_LD_GNSS_IPCLKPORT_I_CLK 0x20e8
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_SLH_AXI_MI_LP_MODEM_IPCLKPORT_I_CLK 0x20ec
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_SLH_AXI_MI_LD_CHUBVTS_IPCLKPORT_I_CLK 0x20f0
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_SLH_AXI_MI_ID_DBGCORE_IPCLKPORT_I_CLK 0x20f4
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_SLH_AXI_MI_P_APM_IPCLKPORT_I_CLK 0x20f8
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_SLH_AXI_SI_LP_CMGP_IPCLKPORT_I_CLK 0x20fc
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_SLH_AXI_SI_IP_APM_IPCLKPORT_I_CLK 0x2100
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_RSTNSYNC_SR_CLK_ALIVE_FREE_OSCCLK_IPCLKPORT_CLK 0x2104
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_SLH_AXI_SI_LP_CHUBVTS_IPCLKPORT_I_CLK 0x2108
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_SLH_AXI_SI_LP_PPU_IPCLKPORT_I_CLK 0x210c
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_SLH_AXI_SI_LP_ALIVEDNC_IPCLKPORT_I_CLK 0x2110
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_SPC_ALIVE_IPCLKPORT_PCLK 0x2114
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_SPMI_MASTER_PMIC_IPCLKPORT_I_IPCLK 0x2118
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_SPMI_MASTER_PMIC_IPCLKPORT_I_PCLK 0x211c
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_SWEEPER_P_ALIVE_IPCLKPORT_ACLK 0x2120
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_SYSREG_ALIVE_IPCLKPORT_PCLK 0x2124
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_TOP_RTC_IPCLKPORT_OSCCLK 0x2128
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_TOP_RTC_IPCLKPORT_PCLK 0x212c
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_VGEN_LITE_ALIVE_IPCLKPORT_CLK 0x2130
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_WDT_ALIVE_IPCLKPORT_PCLK 0x2134
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_XIU_DP_ALIVE_IPCLKPORT_ACLK 0x2138
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_RSTNSYNC_SR_CLK_ALIVE_OSCCLK_RCO_IPCLKPORT_CLK 0x213c
+#define CLK_CON_GAT_GATE_CLKALIVE_CHUB_PERI 0x2140
+#define CLK_CON_GAT_GATE_CLKALIVE_CMGP_NOC 0x2144
+#define CLK_CON_GAT_GATE_CLKALIVE_CMGP_PERI 0x2148
+#define CLK_CON_GAT_GATE_CLKALIVE_DBGCORE_NOC 0x214c
+#define CLK_CON_GAT_GATE_CLKALIVE_DNC_NOC 0x2150
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_RSTNSYNC_SR_CLK_ALIVE_SPMI_IPCLKPORT_CLK 0x2154
+#define CLK_CON_GAT_GATE_CLKALIVE_GNPU_NOC 0x2158
+#define CLK_CON_GAT_GATE_CLKALIVE_SDMA_NOC 0x215c
+#define CLK_CON_GAT_GATE_CLKALIVE_UFD_NOC 0x2160
+#define CLK_CON_GAT_GATE_CLKALIVE_CHUBVTS_NOC 0x2164
+#define CLK_CON_GAT_GATE_CLKALIVE_CSIS_NOC 0x2168
+#define CLK_CON_GAT_GATE_CLKALIVE_DSP_NOC 0x216c
+
+static const unsigned long alive_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_ALIVE_NOC_USER,
+ PLL_CON1_MUX_CLKCMU_ALIVE_NOC_USER,
+ PLL_CON0_MUX_CLKMUX_ALIVE_RCO_SPMI_USER,
+ PLL_CON1_MUX_CLKMUX_ALIVE_RCO_SPMI_USER,
+ PLL_CON0_MUX_CLK_RCO_ALIVE_USER,
+ PLL_CON1_MUX_CLK_RCO_ALIVE_USER,
+ CLK_CON_MUX_MUX_CLKALIVE_CHUB_PERI,
+ CLK_CON_MUX_MUX_CLKALIVE_CMGP_NOC,
+ CLK_CON_MUX_MUX_CLKALIVE_CMGP_PERI,
+ CLK_CON_MUX_MUX_CLKALIVE_DBGCORE_NOC,
+ CLK_CON_MUX_MUX_CLKALIVE_DNC_NOC,
+ CLK_CON_MUX_MUX_CLKALIVE_CHUBVTS_NOC,
+ CLK_CON_MUX_MUX_CLKALIVE_GNPU_NOC,
+ CLK_CON_MUX_MUX_CLKALIVE_GNSS_NOC,
+ CLK_CON_MUX_MUX_CLKALIVE_SDMA_NOC,
+ CLK_CON_MUX_MUX_CLKALIVE_UFD_NOC,
+ CLK_CON_MUX_MUX_CLK_ALIVE_DBGCORE_UART,
+ CLK_CON_MUX_MUX_CLK_ALIVE_NOC,
+ CLK_CON_MUX_MUX_CLK_ALIVE_PMU_SUB,
+ CLK_CON_MUX_MUX_CLK_ALIVE_SPMI,
+ CLK_CON_MUX_MUX_CLK_ALIVE_TIMER,
+ CLK_CON_MUX_MUX_CLKALIVE_CSIS_NOC,
+ CLK_CON_MUX_MUX_CLKALIVE_DSP_NOC,
+ CLK_CON_DIV_CLKALIVE_CHUB_PERI,
+ CLK_CON_DIV_CLKALIVE_CMGP_NOC,
+ CLK_CON_DIV_CLKALIVE_CMGP_PERI,
+ CLK_CON_DIV_CLKALIVE_DBGCORE_NOC,
+ CLK_CON_DIV_CLKALIVE_DNC_NOC,
+ CLK_CON_DIV_CLKALIVE_CHUBVTS_NOC,
+ CLK_CON_DIV_CLKALIVE_GNPU_NOC,
+ CLK_CON_DIV_CLKALIVE_SDMA_NOC,
+ CLK_CON_DIV_CLKALIVE_UFD_NOC,
+ CLK_CON_DIV_DIV_CLK_ALIVE_DBGCORE_UART,
+ CLK_CON_DIV_DIV_CLK_ALIVE_NOC,
+ CLK_CON_DIV_DIV_CLK_ALIVE_PMU_SUB,
+ CLK_CON_DIV_DIV_CLK_ALIVE_SPMI,
+ CLK_CON_DIV_CLKALIVE_CSIS_NOC,
+ CLK_CON_DIV_CLKALIVE_DSP_NOC,
+ CLK_CON_GAT_CLKALIVE_CHUBVTS_RCO,
+ CLK_CON_GAT_CLKALIVE_DNC_RCO,
+ CLK_CON_GAT_CLKALIVE_CSIS_RCO,
+ CLK_CON_GAT_CLKALIVE_GNPU_RCO,
+ CLK_CON_GAT_CLKALIVE_GNSS_NOC,
+ CLK_CON_GAT_CLKALIVE_SDMA_RCO,
+ CLK_CON_GAT_CLKALIVE_UFD_RCO,
+ CLK_CON_GAT_CLKALIVE_DSP_RCO,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_ALIVE_CMU_ALIVE_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_APBIF_GPIO_ALIVE_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_APBIF_INTCOMB_VGPIO2APM_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_APBIF_INTCOMB_VGPIO2AP_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_APBIF_INTCOMB_VGPIO2PMU_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_APBIF_PMU_ALIVE_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_APM_DMA_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_CHUB_RTC_IPCLKPORT_OSCCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_CHUB_RTC_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_CLKMON_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_DBGCORE_UART_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_DBGCORE_UART_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_BLK_ALIVE_FRC_OTP_DESERIAL_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_DTZPC_ALIVE_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_GREBEINTEGRATION_IPCLKPORT_HCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_HW_SCANDUMP_CLKSTOP_CTRL_IPCLKPORT_ACLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_INTMEM_IPCLKPORT_I_ACLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_INTMEM_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_LH_AXI_SI_D_APM_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_MAILBOX_APM_AP_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_MAILBOX_APM_CHUB_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_MAILBOX_APM_CP_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_MAILBOX_APM_GNSS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_MAILBOX_APM_VTS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_MAILBOX_AP_CHUB_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_MAILBOX_AP_CP_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_MAILBOX_AP_CP_S_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_MAILBOX_AP_DBGCORE_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_MAILBOX_AP_GNSS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_MAILBOX_CP_CHUB_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_MAILBOX_CP_GNSS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_MAILBOX_GNSS_CHUB_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_MAILBOX_SHARED_SRAM_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_MAILBOX_APM_AUD_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_MCT_ALIVE_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_PMU_INTR_GEN_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_PMU_IPCLKPORT_ACLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_PMU_IPCLKPORT_CLKIN_PMU_SUB,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_MAILBOX_APM_CP_1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_OTP_DESERIAL_ALIVE_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_RSTNSYNC_SR_CLK_ALIVE_DBGCORE_UART_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_RSTNSYNC_CLK_ALIVE_FREE_OSCCLK_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_RSTNSYNC_CLK_ALIVE_GREBE_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_RSTNSYNC_CLK_ALIVE_NOC_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_RSTNSYNC_CLK_ALIVE_OSCCLK_RCO_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_RSTNSYNC_CLK_ALIVE_SPMI_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_RSTNSYNC_CLK_ALIVE_TIMER_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_RSTNSYNC_SR_CLK_ALIVE_NOC_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_RTC_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_OTP_HCU_DESERIAL_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_SLH_AXI_MI_LD_GNSS_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_SLH_AXI_MI_LP_MODEM_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_SLH_AXI_MI_LD_CHUBVTS_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_SLH_AXI_MI_ID_DBGCORE_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_SLH_AXI_MI_P_APM_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_SLH_AXI_SI_LP_CMGP_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_SLH_AXI_SI_IP_APM_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_RSTNSYNC_SR_CLK_ALIVE_FREE_OSCCLK_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_SLH_AXI_SI_LP_CHUBVTS_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_SLH_AXI_SI_LP_PPU_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_SLH_AXI_SI_LP_ALIVEDNC_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_SPC_ALIVE_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_SPMI_MASTER_PMIC_IPCLKPORT_I_IPCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_SPMI_MASTER_PMIC_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_SWEEPER_P_ALIVE_IPCLKPORT_ACLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_SYSREG_ALIVE_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_TOP_RTC_IPCLKPORT_OSCCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_TOP_RTC_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_VGEN_LITE_ALIVE_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_WDT_ALIVE_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_XIU_DP_ALIVE_IPCLKPORT_ACLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_RSTNSYNC_SR_CLK_ALIVE_OSCCLK_RCO_IPCLKPORT_CLK,
+ CLK_CON_GAT_GATE_CLKALIVE_CHUB_PERI,
+ CLK_CON_GAT_GATE_CLKALIVE_CMGP_NOC,
+ CLK_CON_GAT_GATE_CLKALIVE_CMGP_PERI,
+ CLK_CON_GAT_GATE_CLKALIVE_DBGCORE_NOC,
+ CLK_CON_GAT_GATE_CLKALIVE_DNC_NOC,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_RSTNSYNC_SR_CLK_ALIVE_SPMI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GATE_CLKALIVE_GNPU_NOC,
+ CLK_CON_GAT_GATE_CLKALIVE_SDMA_NOC,
+ CLK_CON_GAT_GATE_CLKALIVE_UFD_NOC,
+ CLK_CON_GAT_GATE_CLKALIVE_CHUBVTS_NOC,
+ CLK_CON_GAT_GATE_CLKALIVE_CSIS_NOC,
+ CLK_CON_GAT_GATE_CLKALIVE_DSP_NOC,
+};
+
+PNAME(mout_alive_noc_user_p) = { "oscclk", "dout_cmu_alive_noc" };
+PNAME(mout_alive_rco_spmi_user_p) = { "oscclk", "rco_i3c_pmic" };
+PNAME(mout_rco_alive_user_p) = { "oscclk", "rco_alive" };
+PNAME(mout_alive_chub_peri_p) = { "mout_rco_alive_user", "rco_400",
+ "mout_alive_noc_user", "oscclk" };
+PNAME(mout_alive_cmgp_noc_p) = { "mout_rco_alive_user", "rco_400",
+ "mout_alive_noc_user", "oscclk" };
+PNAME(mout_alive_cmgp_peri_p) = { "mout_rco_alive_user", "rco_400",
+ "mout_alive_noc_user", "oscclk" };
+PNAME(mout_alive_dbgcore_noc_p) = { "mout_rco_alive_user", "rco_400",
+ "mout_alive_noc_user", "oscclk" };
+PNAME(mout_alive_dnc_noc_p) = { "mout_rco_alive_user", "rco_400",
+ "mout_alive_noc_user", "oscclk" };
+PNAME(mout_alive_chubvts_noc_p) = { "mout_rco_alive_user", "rco_400",
+ "mout_alive_noc_user", "oscclk" };
+PNAME(mout_alive_gnpu_noc_p) = { "mout_rco_alive_user", "rco_400",
+ "mout_alive_noc_user", "oscclk" };
+PNAME(mout_alive_gnss_noc_p) = { "rco_400", "mout_alive_noc_user" };
+PNAME(mout_alive_sdma_noc_p) = { "mout_rco_alive_user", "rco_400",
+ "mout_alive_noc_user", "oscclk" };
+PNAME(mout_alive_ufd_noc_p) = { "mout_rco_alive_user",
+ "rco_400",
+ "mout_alive_noc_user",
+ "oscclk" };
+PNAME(mout_alive_dbgcore_uart_p) = { "mout_rco_alive_user", "rco_400",
+ "mout_alive_noc_user", "oscclk" };
+PNAME(mout_alive_noc_p) = { "mout_rco_alive_user", "rco_400",
+ "mout_alive_noc_user", "oscclk" };
+PNAME(mout_alive_pmu_sub_p) = { "mout_rco_alive_user", "rco_400",
+ "mout_alive_noc_user", "oscclk" };
+PNAME(mout_alive_spmi_p) = { "mout_rco_alive_user", "rco_400",
+ "mout_alive_rco_spmi_user",
+ "oscclk" };
+PNAME(mout_alive_timer_p) = { "oscclk", "oscclk" };
+PNAME(mout_alive_csis_noc_p) = { "mout_rco_alive_user", "rco_400",
+ "mout_alive_noc_user", "oscclk" };
+PNAME(mout_alive_dsp_noc_p) = { "mout_rco_alive_user", "rco_400",
+ "mout_alive_noc_user", "oscclk" };
+
+static const struct samsung_mux_clock alive_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_ALIVE_NOC_USER, "mout_alive_noc_user",
+ mout_alive_noc_user_p, PLL_CON0_MUX_CLKCMU_ALIVE_NOC_USER, 4, 1),
+ MUX(CLK_MOUT_ALIVE_RCO_SPMI_USER, "mout_alive_rco_spmi_user",
+ mout_alive_rco_spmi_user_p,
+ PLL_CON0_MUX_CLKMUX_ALIVE_RCO_SPMI_USER, 4, 1),
+ MUX(CLK_MOUT_RCO_ALIVE_USER, "mout_rco_alive_user",
+ mout_rco_alive_user_p, PLL_CON0_MUX_CLK_RCO_ALIVE_USER, 4, 1),
+ MUX(CLK_MOUT_ALIVE_CHUB_PERI, "mout_alive_chub_peri",
+ mout_alive_chub_peri_p, CLK_CON_MUX_MUX_CLKALIVE_CHUB_PERI, 0, 2),
+ MUX(CLK_MOUT_ALIVE_CMGP_NOC, "mout_alive_cmgp_noc",
+ mout_alive_cmgp_noc_p, CLK_CON_MUX_MUX_CLKALIVE_CMGP_NOC, 0, 2),
+ MUX(CLK_MOUT_ALIVE_CMGP_PERI, "mout_alive_cmgp_peri",
+ mout_alive_cmgp_peri_p, CLK_CON_MUX_MUX_CLKALIVE_CMGP_PERI, 0, 2),
+ MUX(CLK_MOUT_ALIVE_DBGCORE_NOC, "mout_alive_dbgcore_noc",
+ mout_alive_dbgcore_noc_p, CLK_CON_MUX_MUX_CLKALIVE_DBGCORE_NOC,
+ 0, 2),
+ MUX(CLK_MOUT_ALIVE_DNC_NOC, "mout_alive_dnc_noc", mout_alive_dnc_noc_p,
+ CLK_CON_MUX_MUX_CLKALIVE_DNC_NOC, 0, 2),
+ MUX(CLK_MOUT_ALIVE_CHUBVTS_NOC, "mout_alive_chubvts_noc",
+ mout_alive_chubvts_noc_p, CLK_CON_MUX_MUX_CLKALIVE_CHUBVTS_NOC,
+ 0, 2),
+ MUX(CLK_MOUT_ALIVE_GNPU_NOC, "mout_alive_gnpu_noc",
+ mout_alive_gnpu_noc_p, CLK_CON_MUX_MUX_CLKALIVE_GNPU_NOC, 0, 2),
+ MUX(CLK_MOUT_ALIVE_GNSS_NOC, "mout_alive_gnss_noc",
+ mout_alive_gnss_noc_p, CLK_CON_MUX_MUX_CLKALIVE_GNSS_NOC, 0, 1),
+ MUX(CLK_MOUT_ALIVE_SDMA_NOC, "mout_alive_sdma_noc",
+ mout_alive_sdma_noc_p, CLK_CON_MUX_MUX_CLKALIVE_SDMA_NOC, 0, 2),
+ MUX(CLK_MOUT_ALIVE_UFD_NOC, "mout_alive_ufd_noc", mout_alive_ufd_noc_p,
+ CLK_CON_MUX_MUX_CLKALIVE_UFD_NOC, 0, 2),
+ MUX(CLK_MOUT_ALIVE_DBGCORE_UART, "mout_alive_dbgcore_uart",
+ mout_alive_dbgcore_uart_p, CLK_CON_MUX_MUX_CLK_ALIVE_DBGCORE_UART,
+ 0, 2),
+ MUX(CLK_MOUT_ALIVE_NOC, "mout_alive_noc", mout_alive_noc_p,
+ CLK_CON_MUX_MUX_CLK_ALIVE_NOC, 0, 2),
+ MUX(CLK_MOUT_ALIVE_PMU_SUB, "mout_alive_pmu_sub", mout_alive_pmu_sub_p,
+ CLK_CON_MUX_MUX_CLK_ALIVE_PMU_SUB, 0, 2),
+ MUX(CLK_MOUT_ALIVE_SPMI, "mout_alive_spmi", mout_alive_spmi_p,
+ CLK_CON_MUX_MUX_CLK_ALIVE_SPMI, 0, 2),
+ MUX(CLK_MOUT_ALIVE_TIMER, "mout_alive_timer", mout_alive_timer_p,
+ CLK_CON_MUX_MUX_CLK_ALIVE_TIMER, 0, 1),
+ MUX(CLK_MOUT_ALIVE_CSIS_NOC, "mout_alive_csis_noc",
+ mout_alive_csis_noc_p, CLK_CON_MUX_MUX_CLKALIVE_CSIS_NOC, 0, 2),
+ MUX(CLK_MOUT_ALIVE_DSP_NOC, "mout_alive_dsp_noc", mout_alive_dsp_noc_p,
+ CLK_CON_MUX_MUX_CLKALIVE_DSP_NOC, 0, 2),
+};
+
+static const struct samsung_div_clock alive_div_clks[] __initconst = {
+ DIV(CLK_DOUT_ALIVE_CHUB_PERI, "dout_alive_chub_peri",
+ "mout_alive_chub_peri", CLK_CON_DIV_CLKALIVE_CHUB_PERI, 0, 3),
+ DIV(CLK_DOUT_ALIVE_CMGP_NOC, "dout_alive_cmgp_noc",
+ "mout_alive_cmgp_noc", CLK_CON_DIV_CLKALIVE_CMGP_NOC, 0, 3),
+ DIV(CLK_DOUT_ALIVE_CMGP_PERI, "dout_alive_cmgp_peri",
+ "mout_alive_cmgp_peri", CLK_CON_DIV_CLKALIVE_CMGP_PERI, 0, 3),
+ DIV(CLK_DOUT_ALIVE_DBGCORE_NOC, "dout_alive_dbgcore_noc",
+ "mout_alive_dbgcore_noc", CLK_CON_DIV_CLKALIVE_DBGCORE_NOC, 0, 3),
+ DIV(CLK_DOUT_ALIVE_DNC_NOC, "dout_alive_dnc_noc", "mout_alive_dnc_noc",
+ CLK_CON_DIV_CLKALIVE_DNC_NOC, 0, 3),
+ DIV(CLK_DOUT_ALIVE_CHUBVTS_NOC, "dout_alive_chubvts_noc",
+ "mout_alive_chubvts_noc", CLK_CON_DIV_CLKALIVE_CHUBVTS_NOC, 0, 3),
+ DIV(CLK_DOUT_ALIVE_GNPU_NOC, "dout_alive_gnpu_noc",
+ "mout_alive_gnpu_noc", CLK_CON_DIV_CLKALIVE_GNPU_NOC, 0, 3),
+ DIV(CLK_DOUT_ALIVE_SDMA_NOC, "dout_alive_sdma_noc",
+ "mout_alive_sdma_noc", CLK_CON_DIV_CLKALIVE_SDMA_NOC, 0, 3),
+ DIV(CLK_DOUT_ALIVE_UFD_NOC, "dout_alive_ufd_noc", "mout_alive_ufd_noc",
+ CLK_CON_DIV_CLKALIVE_UFD_NOC, 0, 3),
+ DIV(CLK_DOUT_ALIVE_DBGCORE_UART, "dout_alive_dbgcore_uart",
+ "mout_alive_dbgcore_uart", CLK_CON_DIV_DIV_CLK_ALIVE_DBGCORE_UART,
+ 0, 4),
+ DIV(CLK_DOUT_ALIVE_NOC, "dout_alive_noc", "mout_alive_noc",
+ CLK_CON_DIV_DIV_CLK_ALIVE_NOC, 0, 3),
+ DIV(CLK_DOUT_ALIVE_PMU_SUB, "dout_alive_pmu_sub", "mout_alive_pmu_sub",
+ CLK_CON_DIV_DIV_CLK_ALIVE_PMU_SUB, 0, 3),
+ DIV(CLK_DOUT_ALIVE_SPMI, "dout_alive_spmi", "mout_alive_spmi",
+ CLK_CON_DIV_DIV_CLK_ALIVE_SPMI, 0, 5),
+ DIV(CLK_DOUT_ALIVE_CSIS_NOC, "dout_alive_csis_noc",
+ "mout_alive_csis_noc", CLK_CON_DIV_CLKALIVE_CSIS_NOC, 0, 3),
+ DIV(CLK_DOUT_ALIVE_DSP_NOC, "dout_alive_dsp_noc",
+ "mout_alive_dsp_noc", CLK_CON_DIV_CLKALIVE_DSP_NOC, 0, 3),
+};
+
+static const struct samsung_fixed_rate_clock alive_fixed_clks[] __initconst = {
+ FRATE(0, "rco_i3c_pmic", NULL, 0, 49152000),
+ FRATE(0, "rco_alive", NULL, 0, 49152000),
+ FRATE(0, "rco_400", NULL, 0, 393216000),
+};
+
+static const struct samsung_cmu_info alive_cmu_info __initconst = {
+ .mux_clks = alive_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(alive_mux_clks),
+ .div_clks = alive_div_clks,
+ .nr_div_clks = ARRAY_SIZE(alive_div_clks),
+ .fixed_clks = alive_fixed_clks,
+ .nr_fixed_clks = ARRAY_SIZE(alive_fixed_clks),
+ .nr_clk_ids = CLKS_NR_ALIVE,
+ .clk_regs = alive_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(alive_clk_regs),
+ .clk_name = "noc",
+};
+
+static void __init exynos2200_cmu_alive_init(struct device_node *np)
+{
+ exynos_arm64_register_cmu(NULL, np, &alive_cmu_info);
+}
+
+/* Register CMU_ALIVE early, as it's a dependency for other early domains */
+CLK_OF_DECLARE(exynos2200_cmu_alive, "samsung,exynos2200-cmu-alive",
+ exynos2200_cmu_alive_init);
+
+/* ---- CMU_PERIS ---------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_PERIS (0x10020000) */
+#define PLL_CON0_MUX_CLKCMU_PERIS_GIC_USER 0x600
+#define PLL_CON1_MUX_CLKCMU_PERIS_GIC_USER 0x604
+#define PLL_CON0_MUX_CLKCMU_PERIS_NOC_USER 0x610
+#define PLL_CON1_MUX_CLKCMU_PERIS_NOC_USER 0x614
+#define CLK_CON_MUX_MUX_CLK_PERIS_GIC 0x1000
+#define CLK_CON_DIV_CLKCMU_OTP 0x1800
+#define CLK_CON_DIV_DIV_CLK_PERIS_DDD_CTRL 0x1804
+#define CLK_CON_GAT_CLK_BLK_PERIS_UID_BUSIF_DDD_PERIS_IPCLKPORT_ATCLK 0x2000
+#define CLK_CON_GAT_CLK_BLK_PERIS_UID_BUSIF_DDD_PERIS_IPCLKPORT_PCLK 0x2004
+#define CLK_CON_GAT_CLK_BLK_PERIS_UID_DDD_PERIS_IPCLKPORT_CK_IN 0x2008
+#define CLK_CON_GAT_CLK_BLK_PERIS_UID_D_TZPC_PERIS_IPCLKPORT_PCLK 0x200c
+#define CLK_CON_GAT_CLK_BLK_PERIS_UID_GIC_IPCLKPORT_GICCLK 0x2010
+#define CLK_CON_GAT_CLK_BLK_PERIS_UID_LH_AST_MI_LD_ICC_CPUGIC_CLUSTER0_IPCLKPORT_I_CLK 0x2014
+#define CLK_CON_GAT_CLK_BLK_PERIS_UID_LH_AST_SI_LD_IRI_GICCPU_CLUSTER0_IPCLKPORT_I_CLK 0x2018
+#define CLK_CON_GAT_CLK_BLK_PERIS_UID_MCT_IPCLKPORT_PCLK 0x201c
+#define CLK_CON_GAT_CLK_BLK_PERIS_UID_OTP_CON_BIRA_IPCLKPORT_I_OSCCLK 0x2020
+#define CLK_CON_GAT_CLK_BLK_PERIS_UID_OTP_CON_BIRA_IPCLKPORT_PCLK 0x2024
+#define CLK_CON_GAT_CLK_BLK_PERIS_UID_OTP_CON_TOP_IPCLKPORT_I_OSCCLK 0x2028
+#define CLK_CON_GAT_CLK_BLK_PERIS_UID_OTP_CON_TOP_IPCLKPORT_PCLK 0x202c
+#define CLK_CON_GAT_CLK_BLK_PERIS_UID_PERIS_CMU_PERIS_IPCLKPORT_PCLK 0x2030
+#define CLK_CON_GAT_CLK_BLK_PERIS_UID_RSTNSYNC_CLK_PERIS_FREE_OSCCLK_IPCLKPORT_CLK 0x2038
+#define CLK_CON_GAT_CLK_BLK_PERIS_UID_RSTNSYNC_CLK_PERIS_GIC_IPCLKPORT_CLK 0x203c
+#define CLK_CON_GAT_CLK_BLK_PERIS_UID_RSTNSYNC_CLK_PERIS_NOCP_IPCLKPORT_CLK 0x2040
+#define CLK_CON_GAT_CLK_BLK_PERIS_UID_SLH_AXI_MI_P_PERISGIC_IPCLKPORT_I_CLK 0x2044
+#define CLK_CON_GAT_CLK_BLK_PERIS_UID_SLH_AXI_MI_P_PERIS_IPCLKPORT_I_CLK 0x2048
+#define CLK_CON_GAT_CLK_BLK_PERIS_UID_SYSREG_PERIS_IPCLKPORT_PCLK 0x204c
+#define CLK_CON_GAT_CLK_BLK_PERIS_UID_TMU_SUB_IPCLKPORT_PCLK 0x2050
+#define CLK_CON_GAT_CLK_BLK_PERIS_UID_TMU_TOP_IPCLKPORT_PCLK 0x2054
+#define CLK_CON_GAT_CLK_BLK_PERIS_UID_WDT0_IPCLKPORT_PCLK 0x2058
+#define CLK_CON_GAT_CLK_BLK_PERIS_UID_WDT1_IPCLKPORT_PCLK 0x205c
+#define CLK_CON_GAT_CLK_BLK_PERIS_UID_RSTNSYNC_SR_CLK_PERIS_DDD_CTRL_IPCLKPORT_CLK 0x2060
+#define CLK_CON_GAT_CLK_BLK_PERIS_UID_RSTNSYNC_SR_CLK_PERIS_FREE_OSCCLK_IPCLKPORT_CLK 0x2064
+#define CLK_CON_GAT_CLK_BLK_PERIS_UID_RSTNSYNC_SR_CLK_PERIS_GIC_IPCLKPORT_CLK 0x2068
+#define CLK_CON_GAT_CLK_BLK_PERIS_UID_RSTNSYNC_SR_CLK_PERIS_NOCP_IPCLKPORT_CLK 0x206c
+
+static const unsigned long peris_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_PERIS_GIC_USER,
+ PLL_CON1_MUX_CLKCMU_PERIS_GIC_USER,
+ PLL_CON0_MUX_CLKCMU_PERIS_NOC_USER,
+ PLL_CON1_MUX_CLKCMU_PERIS_NOC_USER,
+ CLK_CON_MUX_MUX_CLK_PERIS_GIC,
+ CLK_CON_DIV_CLKCMU_OTP,
+ CLK_CON_DIV_DIV_CLK_PERIS_DDD_CTRL,
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_BUSIF_DDD_PERIS_IPCLKPORT_ATCLK,
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_BUSIF_DDD_PERIS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_DDD_PERIS_IPCLKPORT_CK_IN,
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_D_TZPC_PERIS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_GIC_IPCLKPORT_GICCLK,
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_LH_AST_MI_LD_ICC_CPUGIC_CLUSTER0_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_LH_AST_SI_LD_IRI_GICCPU_CLUSTER0_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_MCT_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_OTP_CON_BIRA_IPCLKPORT_I_OSCCLK,
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_OTP_CON_BIRA_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_OTP_CON_TOP_IPCLKPORT_I_OSCCLK,
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_OTP_CON_TOP_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_PERIS_CMU_PERIS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_RSTNSYNC_CLK_PERIS_FREE_OSCCLK_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_RSTNSYNC_CLK_PERIS_GIC_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_RSTNSYNC_CLK_PERIS_NOCP_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_SLH_AXI_MI_P_PERISGIC_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_SLH_AXI_MI_P_PERIS_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_SYSREG_PERIS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_TMU_SUB_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_TMU_TOP_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_WDT0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_WDT1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_RSTNSYNC_SR_CLK_PERIS_DDD_CTRL_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_RSTNSYNC_SR_CLK_PERIS_FREE_OSCCLK_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_RSTNSYNC_SR_CLK_PERIS_GIC_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_RSTNSYNC_SR_CLK_PERIS_NOCP_IPCLKPORT_CLK,
+};
+
+PNAME(mout_peris_gic_user_p) = { "dout_tcxo_div3",
+ "dout_cmu_peris_gic" };
+PNAME(mout_peris_noc_user_p) = { "dout_tcxo_div3",
+ "dout_cmu_peris_noc" };
+PNAME(mout_peris_gic_p) = { "mout_peris_gic_user", "dout_tcxo_div3" };
+
+static const struct samsung_mux_clock peris_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_PERIS_GIC_USER, "mout_peris_gic_user",
+ mout_peris_gic_user_p, PLL_CON0_MUX_CLKCMU_PERIS_GIC_USER, 4, 1),
+ MUX(CLK_MOUT_PERIS_NOC_USER, "mout_peris_noc_user",
+ mout_peris_noc_user_p, PLL_CON0_MUX_CLKCMU_PERIS_NOC_USER, 4, 1),
+ MUX(CLK_MOUT_PERIS_GIC, "mout_peris_gic", mout_peris_gic_p,
+ CLK_CON_MUX_MUX_CLK_PERIS_GIC, 0, 0),
+};
+
+static const struct samsung_fixed_factor_clock peris_fixed_factor_clks[] __initconst = {
+ FFACTOR(CLK_DOUT_PERIS_OTP, "dout_peris_otp",
+ "dout_tcxo_div3", 1, 8, 0),
+ FFACTOR(CLK_DOUT_PERIS_DDD_CTRL, "dout_peris_ddd_ctrl",
+ "mout_peris_gic", 1, 4, 0),
+};
+
+static const struct samsung_cmu_info peris_cmu_info __initconst = {
+ .mux_clks = peris_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(peris_mux_clks),
+ .fixed_factor_clks = peris_fixed_factor_clks,
+ .nr_fixed_factor_clks = ARRAY_SIZE(peris_fixed_factor_clks),
+ .nr_clk_ids = CLKS_NR_PERIS,
+ .clk_regs = peris_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(peris_clk_regs),
+ .clk_name = "noc",
+};
+
+static void __init exynos2200_cmu_peris_init(struct device_node *np)
+{
+ exynos_arm64_register_cmu(NULL, np, &peris_cmu_info);
+}
+
+/* Register CMU_PERIS early, as it's a dependency for GIC and MCT */
+CLK_OF_DECLARE(exynos2200_cmu_peris, "samsung,exynos2200-cmu-peris",
+ exynos2200_cmu_peris_init);
+
+/* ---- CMU_CMGP ----------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_CMGP (0x14e00000) */
+#define PLL_CON0_MUX_CLKALIVE_CMGP_NOC_USER 0x610
+#define PLL_CON1_MUX_CLKALIVE_CMGP_NOC_USER 0x614
+#define PLL_CON0_MUX_CLKALIVE_CMGP_PERI_USER 0x620
+#define PLL_CON1_MUX_CLKALIVE_CMGP_PERI_USER 0x624
+#define CLK_CON_MUX_MUX_CLK_CMGP_I2C 0x1000
+#define CLK_CON_MUX_MUX_CLK_CMGP_SPI_I2C0 0x1008
+#define CLK_CON_MUX_MUX_CLK_CMGP_SPI_I2C1 0x100c
+#define CLK_CON_MUX_MUX_CLK_CMGP_SPI_MS_CTRL 0x1010
+#define CLK_CON_MUX_MUX_CLK_CMGP_USI0 0x1014
+#define CLK_CON_MUX_MUX_CLK_CMGP_USI1 0x1018
+#define CLK_CON_MUX_MUX_CLK_CMGP_USI2 0x101c
+#define CLK_CON_MUX_MUX_CLK_CMGP_USI3 0x1020
+#define CLK_CON_MUX_MUX_CLK_CMGP_USI4 0x1024
+#define CLK_CON_MUX_MUX_CLK_CMGP_USI5 0x1028
+#define CLK_CON_MUX_MUX_CLK_CMGP_USI6 0x102c
+#define CLK_CON_DIV_DIV_CLK_CMGP_I2C 0x1800
+#define CLK_CON_DIV_DIV_CLK_CMGP_SPI_I2C0 0x1808
+#define CLK_CON_DIV_DIV_CLK_CMGP_SPI_I2C1 0x180c
+#define CLK_CON_DIV_DIV_CLK_CMGP_SPI_MS_CTRL 0x1810
+#define CLK_CON_DIV_DIV_CLK_CMGP_USI0 0x1814
+#define CLK_CON_DIV_DIV_CLK_CMGP_USI1 0x1818
+#define CLK_CON_DIV_DIV_CLK_CMGP_USI2 0x181c
+#define CLK_CON_DIV_DIV_CLK_CMGP_USI3 0x1820
+#define CLK_CON_DIV_DIV_CLK_CMGP_USI4 0x1824
+#define CLK_CON_DIV_DIV_CLK_CMGP_USI5 0x1828
+#define CLK_CON_DIV_DIV_CLK_CMGP_USI6 0x182c
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_APBIF_GPIO_CMGP_IPCLKPORT_PCLK 0x2000
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_CMGP_CMU_CMGP_IPCLKPORT_PCLK 0x2004
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_CMGP_I2C_IPCLKPORT_IPCLK 0x2008
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_CMGP_I2C_IPCLKPORT_PCLK 0x200c
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_D_TZPC_CMGP_IPCLKPORT_PCLK 0x2010
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_I2C_CMGP2_IPCLKPORT_IPCLK 0x2014
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_I2C_CMGP2_IPCLKPORT_PCLK 0x2018
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_I2C_CMGP3_IPCLKPORT_IPCLK 0x201c
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_I2C_CMGP3_IPCLKPORT_PCLK 0x2020
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_I2C_CMGP4_IPCLKPORT_IPCLK 0x2024
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_I2C_CMGP4_IPCLKPORT_PCLK 0x2028
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_I2C_CMGP5_IPCLKPORT_IPCLK 0x202c
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_I2C_CMGP5_IPCLKPORT_PCLK 0x2030
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_I2C_CMGP6_IPCLKPORT_IPCLK 0x2034
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_I2C_CMGP6_IPCLKPORT_PCLK 0x2038
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_RSTNSYNC_SR_CLK_CMGP_NOC_IPCLKPORT_CLK 0x2040
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_RSTNSYNC_CLK_CMGP_FREE_OSCCLK_IPCLKPORT_CLK 0x2044
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_RSTNSYNC_SR_CLK_CMGP_I2C_IPCLKPORT_CLK 0x2048
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_RSTNSYNC_CLK_CMGP_NOC_IPCLKPORT_CLK 0x2050
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_RSTNSYNC_SR_CLK_CMGP_SPI_I2C0_IPCLKPORT_CLK 0x2054
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_RSTNSYNC_SR_CLK_CMGP_SPI_I2C1_IPCLKPORT_CLK 0x2058
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_RSTNSYNC_SR_CLK_CMGP_USI0_IPCLKPORT_CLK 0x205c
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_RSTNSYNC_SR_CLK_CMGP_USI1_IPCLKPORT_CLK 0x2060
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_RSTNSYNC_SR_CLK_CMGP_USI2_IPCLKPORT_CLK 0x2064
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_RSTNSYNC_SR_CLK_CMGP_USI3_IPCLKPORT_CLK 0x2068
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_RSTNSYNC_SR_CLK_CMGP_USI4_IPCLKPORT_CLK 0x206c
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_RSTNSYNC_SR_CLK_CMGP_USI5_IPCLKPORT_CLK 0x2070
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_RSTNSYNC_SR_CLK_CMGP_USI6_IPCLKPORT_CLK 0x2074
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_RSTNSYNC_SR_CLK_CMGP_SPI_MS_CTRL_IPCLKPORT_CLK 0x2078
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_SLH_AXI_MI_LP_CMGP_IPCLKPORT_I_CLK 0x207c
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_SLH_AXI_SI_LP_CMGPUFD_IPCLKPORT_I_CLK 0x2080
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_SPI_I2C_CMGP0_IPCLKPORT_IPCLK 0x2084
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_SPI_I2C_CMGP0_IPCLKPORT_PCLK 0x2088
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_SPI_I2C_CMGP1_IPCLKPORT_IPCLK 0x208c
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_SPI_I2C_CMGP1_IPCLKPORT_PCLK 0x2090
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_SPI_MULTI_SLV_Q_CTRL_CMGP_IPCLKPORT_CLK 0x2094
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_SYSREG_CMGP2APM_IPCLKPORT_PCLK 0x2098
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_SYSREG_CMGP2CHUB_IPCLKPORT_PCLK 0x209c
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_SYSREG_CMGP2CP_IPCLKPORT_PCLK 0x20a0
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_SYSREG_CMGP2GNSS_IPCLKPORT_PCLK 0x20a4
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_SYSREG_CMGP2PMU_AP_IPCLKPORT_PCLK 0x20a8
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_SYSREG_CMGP_IPCLKPORT_PCLK 0x20ac
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_USI_CMGP0_IPCLKPORT_IPCLK 0x20b0
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_USI_CMGP0_IPCLKPORT_PCLK 0x20b4
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_USI_CMGP1_IPCLKPORT_IPCLK 0x20b8
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_USI_CMGP1_IPCLKPORT_PCLK 0x20bc
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_USI_CMGP2_IPCLKPORT_IPCLK 0x20c0
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_USI_CMGP2_IPCLKPORT_PCLK 0x20c4
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_USI_CMGP3_IPCLKPORT_IPCLK 0x20c8
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_USI_CMGP3_IPCLKPORT_PCLK 0x20cc
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_USI_CMGP4_IPCLKPORT_IPCLK 0x20d0
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_USI_CMGP4_IPCLKPORT_PCLK 0x20d4
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_USI_CMGP5_IPCLKPORT_IPCLK 0x20d8
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_USI_CMGP5_IPCLKPORT_PCLK 0x20dc
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_USI_CMGP6_IPCLKPORT_IPCLK 0x20e0
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_USI_CMGP6_IPCLKPORT_PCLK 0x20e4
+
+static const unsigned long cmgp_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKALIVE_CMGP_NOC_USER,
+ PLL_CON1_MUX_CLKALIVE_CMGP_NOC_USER,
+ PLL_CON0_MUX_CLKALIVE_CMGP_PERI_USER,
+ PLL_CON1_MUX_CLKALIVE_CMGP_PERI_USER,
+ CLK_CON_MUX_MUX_CLK_CMGP_I2C,
+ CLK_CON_MUX_MUX_CLK_CMGP_SPI_I2C0,
+ CLK_CON_MUX_MUX_CLK_CMGP_SPI_I2C1,
+ CLK_CON_MUX_MUX_CLK_CMGP_SPI_MS_CTRL,
+ CLK_CON_MUX_MUX_CLK_CMGP_USI0,
+ CLK_CON_MUX_MUX_CLK_CMGP_USI1,
+ CLK_CON_MUX_MUX_CLK_CMGP_USI2,
+ CLK_CON_MUX_MUX_CLK_CMGP_USI3,
+ CLK_CON_MUX_MUX_CLK_CMGP_USI4,
+ CLK_CON_MUX_MUX_CLK_CMGP_USI5,
+ CLK_CON_MUX_MUX_CLK_CMGP_USI6,
+ CLK_CON_DIV_DIV_CLK_CMGP_I2C,
+ CLK_CON_DIV_DIV_CLK_CMGP_SPI_I2C0,
+ CLK_CON_DIV_DIV_CLK_CMGP_SPI_I2C1,
+ CLK_CON_DIV_DIV_CLK_CMGP_SPI_MS_CTRL,
+ CLK_CON_DIV_DIV_CLK_CMGP_USI0,
+ CLK_CON_DIV_DIV_CLK_CMGP_USI1,
+ CLK_CON_DIV_DIV_CLK_CMGP_USI2,
+ CLK_CON_DIV_DIV_CLK_CMGP_USI3,
+ CLK_CON_DIV_DIV_CLK_CMGP_USI4,
+ CLK_CON_DIV_DIV_CLK_CMGP_USI5,
+ CLK_CON_DIV_DIV_CLK_CMGP_USI6,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_APBIF_GPIO_CMGP_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_CMGP_CMU_CMGP_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_CMGP_I2C_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_CMGP_I2C_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_D_TZPC_CMGP_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_I2C_CMGP2_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_I2C_CMGP2_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_I2C_CMGP3_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_I2C_CMGP3_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_I2C_CMGP4_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_I2C_CMGP4_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_I2C_CMGP5_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_I2C_CMGP5_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_I2C_CMGP6_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_I2C_CMGP6_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_RSTNSYNC_SR_CLK_CMGP_NOC_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_RSTNSYNC_CLK_CMGP_FREE_OSCCLK_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_RSTNSYNC_SR_CLK_CMGP_I2C_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_RSTNSYNC_CLK_CMGP_NOC_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_RSTNSYNC_SR_CLK_CMGP_SPI_I2C0_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_RSTNSYNC_SR_CLK_CMGP_SPI_I2C1_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_RSTNSYNC_SR_CLK_CMGP_USI0_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_RSTNSYNC_SR_CLK_CMGP_USI1_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_RSTNSYNC_SR_CLK_CMGP_USI2_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_RSTNSYNC_SR_CLK_CMGP_USI3_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_RSTNSYNC_SR_CLK_CMGP_USI4_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_RSTNSYNC_SR_CLK_CMGP_USI5_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_RSTNSYNC_SR_CLK_CMGP_USI6_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_RSTNSYNC_SR_CLK_CMGP_SPI_MS_CTRL_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_SLH_AXI_MI_LP_CMGP_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_SLH_AXI_SI_LP_CMGPUFD_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_SPI_I2C_CMGP0_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_SPI_I2C_CMGP0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_SPI_I2C_CMGP1_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_SPI_I2C_CMGP1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_SPI_MULTI_SLV_Q_CTRL_CMGP_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_SYSREG_CMGP2APM_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_SYSREG_CMGP2CHUB_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_SYSREG_CMGP2CP_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_SYSREG_CMGP2GNSS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_SYSREG_CMGP2PMU_AP_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_SYSREG_CMGP_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_USI_CMGP0_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_USI_CMGP0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_USI_CMGP1_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_USI_CMGP1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_USI_CMGP2_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_USI_CMGP2_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_USI_CMGP3_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_USI_CMGP3_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_USI_CMGP4_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_USI_CMGP4_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_USI_CMGP5_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_USI_CMGP5_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_USI_CMGP6_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_USI_CMGP6_IPCLKPORT_PCLK,
+};
+
+PNAME(mout_cmgp_clkalive_noc_user_p) = { "oscclk", "dout_alive_cmgp_noc" };
+PNAME(mout_cmgp_clkalive_peri_user_p) = { "oscclk", "dout_alive_cmgp_peri" };
+PNAME(mout_cmgp_i2c_p) = { "oscclk",
+ "mout_cmgp_clkalive_peri_user" };
+PNAME(mout_cmgp_spi_i2c0_p) = { "oscclk",
+ "mout_cmgp_clkalive_peri_user" };
+PNAME(mout_cmgp_spi_i2c1_p) = { "oscclk",
+ "mout_cmgp_clkalive_peri_user" };
+PNAME(mout_cmgp_spi_ms_ctrl_p) = { "oscclk",
+ "mout_cmgp_clkalive_peri_user" };
+PNAME(mout_cmgp_usi0_p) = { "oscclk",
+ "mout_cmgp_clkalive_peri_user" };
+PNAME(mout_cmgp_usi1_p) = { "oscclk",
+ "mout_cmgp_clkalive_peri_user" };
+PNAME(mout_cmgp_usi2_p) = { "oscclk",
+ "mout_cmgp_clkalive_peri_user" };
+PNAME(mout_cmgp_usi3_p) = { "oscclk",
+ "mout_cmgp_clkalive_peri_user" };
+PNAME(mout_cmgp_usi4_p) = { "oscclk",
+ "mout_cmgp_clkalive_peri_user" };
+PNAME(mout_cmgp_usi5_p) = { "oscclk",
+ "mout_cmgp_clkalive_peri_user" };
+PNAME(mout_cmgp_usi6_p) = { "oscclk",
+ "mout_cmgp_clkalive_peri_user" };
+
+static const struct samsung_mux_clock cmgp_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_CMGP_CLKALIVE_NOC_USER, "mout_cmgp_clkalive_noc_user",
+ mout_cmgp_clkalive_noc_user_p, PLL_CON0_MUX_CLKALIVE_CMGP_NOC_USER,
+ 4, 1),
+ MUX(CLK_MOUT_CMGP_CLKALIVE_PERI_USER, "mout_cmgp_clkalive_peri_user",
+ mout_cmgp_clkalive_peri_user_p,
+ PLL_CON0_MUX_CLKALIVE_CMGP_PERI_USER, 4, 1),
+ MUX(CLK_MOUT_CMGP_I2C, "mout_cmgp_i2c", mout_cmgp_i2c_p,
+ CLK_CON_MUX_MUX_CLK_CMGP_I2C, 0, 1),
+ MUX(CLK_MOUT_CMGP_SPI_I2C0, "mout_cmgp_spi_i2c0", mout_cmgp_spi_i2c0_p,
+ CLK_CON_MUX_MUX_CLK_CMGP_SPI_I2C0, 0, 1),
+ MUX(CLK_MOUT_CMGP_SPI_I2C1, "mout_cmgp_spi_i2c1", mout_cmgp_spi_i2c1_p,
+ CLK_CON_MUX_MUX_CLK_CMGP_SPI_I2C1, 0, 1),
+ MUX(CLK_MOUT_CMGP_SPI_MS_CTRL, "mout_cmgp_spi_ms_ctrl",
+ mout_cmgp_spi_ms_ctrl_p, CLK_CON_MUX_MUX_CLK_CMGP_SPI_MS_CTRL,
+ 0, 1),
+ MUX(CLK_MOUT_CMGP_USI0, "mout_cmgp_usi0", mout_cmgp_usi0_p,
+ CLK_CON_MUX_MUX_CLK_CMGP_USI0, 0, 1),
+ MUX(CLK_MOUT_CMGP_USI1, "mout_cmgp_usi1", mout_cmgp_usi1_p,
+ CLK_CON_MUX_MUX_CLK_CMGP_USI1, 0, 1),
+ MUX(CLK_MOUT_CMGP_USI2, "mout_cmgp_usi2", mout_cmgp_usi2_p,
+ CLK_CON_MUX_MUX_CLK_CMGP_USI2, 0, 1),
+ MUX(CLK_MOUT_CMGP_USI3, "mout_cmgp_usi3", mout_cmgp_usi3_p,
+ CLK_CON_MUX_MUX_CLK_CMGP_USI3, 0, 1),
+ MUX(CLK_MOUT_CMGP_USI4, "mout_cmgp_usi4", mout_cmgp_usi4_p,
+ CLK_CON_MUX_MUX_CLK_CMGP_USI4, 0, 1),
+ MUX(CLK_MOUT_CMGP_USI5, "mout_cmgp_usi5", mout_cmgp_usi5_p,
+ CLK_CON_MUX_MUX_CLK_CMGP_USI5, 0, 1),
+ MUX(CLK_MOUT_CMGP_USI6, "mout_cmgp_usi6", mout_cmgp_usi6_p,
+ CLK_CON_MUX_MUX_CLK_CMGP_USI6, 0, 1),
+};
+
+static const struct samsung_div_clock cmgp_div_clks[] __initconst = {
+ DIV(CLK_DOUT_CMGP_I2C, "dout_cmgp_i2c", "mout_cmgp_i2c",
+ CLK_CON_DIV_DIV_CLK_CMGP_I2C, 0, 4),
+ DIV(CLK_DOUT_CMGP_SPI_I2C0, "dout_cmgp_spi_i2c0", "mout_cmgp_spi_i2c0",
+ CLK_CON_DIV_DIV_CLK_CMGP_SPI_I2C0, 0, 4),
+ DIV(CLK_DOUT_CMGP_SPI_I2C1, "dout_cmgp_spi_i2c1", "mout_cmgp_spi_i2c1",
+ CLK_CON_DIV_DIV_CLK_CMGP_SPI_I2C1, 0, 4),
+ DIV(CLK_DOUT_CMGP_SPI_MS_CTRL, "dout_cmgp_spi_ms_ctrl",
+ "mout_cmgp_spi_ms_ctrl", CLK_CON_DIV_DIV_CLK_CMGP_SPI_MS_CTRL,
+ 0, 4),
+ DIV(CLK_DOUT_CMGP_USI0, "dout_cmgp_usi0", "mout_cmgp_usi0",
+ CLK_CON_DIV_DIV_CLK_CMGP_USI0, 0, 4),
+ DIV(CLK_DOUT_CMGP_USI1, "dout_cmgp_usi1", "mout_cmgp_usi1",
+ CLK_CON_DIV_DIV_CLK_CMGP_USI1, 0, 4),
+ DIV(CLK_DOUT_CMGP_USI2, "dout_cmgp_usi2", "mout_cmgp_usi2",
+ CLK_CON_DIV_DIV_CLK_CMGP_USI2, 0, 4),
+ DIV(CLK_DOUT_CMGP_USI3, "dout_cmgp_usi3", "mout_cmgp_usi3",
+ CLK_CON_DIV_DIV_CLK_CMGP_USI3, 0, 4),
+ DIV(CLK_DOUT_CMGP_USI4, "dout_cmgp_usi4", "mout_cmgp_usi4",
+ CLK_CON_DIV_DIV_CLK_CMGP_USI4, 0, 4),
+ DIV(CLK_DOUT_CMGP_USI5, "dout_cmgp_usi5", "mout_cmgp_usi5",
+ CLK_CON_DIV_DIV_CLK_CMGP_USI5, 0, 4),
+ DIV(CLK_DOUT_CMGP_USI6, "dout_cmgp_usi6", "mout_cmgp_usi6",
+ CLK_CON_DIV_DIV_CLK_CMGP_USI6, 0, 4),
+};
+
+static const struct samsung_cmu_info cmgp_cmu_info __initconst = {
+ .mux_clks = cmgp_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(cmgp_mux_clks),
+ .div_clks = cmgp_div_clks,
+ .nr_div_clks = ARRAY_SIZE(cmgp_div_clks),
+ .nr_clk_ids = CLKS_NR_CMGP,
+ .clk_regs = cmgp_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(cmgp_clk_regs),
+ .clk_name = "noc",
+};
+
+/* ---- CMU_HSI0 ----------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_HSI0 (0x10a00000) */
+#define PLL_CON0_MUX_CLKAUD_HSI0_NOC_USER 0x600
+#define PLL_CON1_MUX_CLKAUD_HSI0_NOC_USER 0x604
+#define PLL_CON0_MUX_CLKCMU_HSI0_DPGTC_USER 0x610
+#define PLL_CON1_MUX_CLKCMU_HSI0_DPGTC_USER 0x614
+#define PLL_CON0_MUX_CLKCMU_HSI0_DPOSC_USER 0x620
+#define PLL_CON1_MUX_CLKCMU_HSI0_DPOSC_USER 0x624
+#define PLL_CON0_MUX_CLKCMU_HSI0_NOC_USER 0x630
+#define PLL_CON1_MUX_CLKCMU_HSI0_NOC_USER 0x634
+#define PLL_CON0_MUX_CLKCMU_HSI0_USB32DRD_USER 0x640
+#define PLL_CON1_MUX_CLKCMU_HSI0_USB32DRD_USER 0x644
+#define CLK_CON_MUX_MUX_CLK_HSI0_NOC 0x1000
+#define CLK_CON_MUX_MUX_CLK_HSI0_RTCCLK 0x1004
+#define CLK_CON_MUX_MUX_CLK_HSI0_USB32DRD 0x1008
+#define CLK_CON_DIV_DIV_CLK_HSI0_EUSB 0x1800
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_AS_APB_EUSBPHY_HSI0_IPCLKPORT_PCLKM 0x2000
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_DP_LINK_IPCLKPORT_I_DP_GTC_CLK 0x2004
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_DP_LINK_IPCLKPORT_I_DP_OSC_CLK 0x2008
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_DP_LINK_IPCLKPORT_I_PCLK 0x200c
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_D_TZPC_HSI0_IPCLKPORT_PCLK 0x2010
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_HSI0_CMU_HSI0_IPCLKPORT_PCLK 0x2014
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_PPMU_HSI0_BUS1_IPCLKPORT_ACLK 0x2018
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_PPMU_HSI0_BUS1_IPCLKPORT_PCLK 0x201c
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_RSTNSYNC_CLK_HSI0_EUSB_IPCLKPORT_CLK 0x2020
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_RSTNSYNC_CLK_HSI0_NOC_IPCLKPORT_CLK 0x2024
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_SLH_ACEL_SI_D_HSI0_IPCLKPORT_I_CLK 0x2028
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_SLH_AST_SI_G_PPMU_HSI0_IPCLKPORT_I_CLK 0x202c
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_SLH_AXI_MI_LD_AUDHSI0_IPCLKPORT_I_CLK 0x2030
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_SLH_AXI_MI_P_HSI0_IPCLKPORT_I_CLK 0x2034
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_SLH_AXI_SI_LD_HSI0AUD_IPCLKPORT_I_CLK 0x2038
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_BLK_HSI0_FRC_OTP_DESERIAL_IPCLKPORT_I_CLK 0x203c
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_SYSMMU_D_HSI0_IPCLKPORT_CLK_S2 0x2040
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_SYSREG_HSI0_IPCLKPORT_PCLK 0x2044
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_URAM_IPCLKPORT_ACLK 0x2048
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_USB32DRD_IPCLKPORT_I_EUSB_APB_CLK 0x204c
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_USB32DRD_IPCLKPORT_I_EUSB_CTRL_PCLK 0x2050
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_USB32DRD_IPCLKPORT_I_USB32DRD_REF_CLK_40 0x2054
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_USB32DRD_IPCLKPORT_I_USBDPPHY_CTRL_PCLK 0x2058
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_USB32DRD_IPCLKPORT_I_USBDPPHY_TCA_APB_CLK 0x205c
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_USB32DRD_IPCLKPORT_I_USBLINK_ACLK 0x2060
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_USB32DRD_IPCLKPORT_I_USBSUBCTL_APB_PCLK 0x2064
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_VGEN_LITE_HSI0_IPCLKPORT_CLK 0x2068
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_XIU_D_HSI0_IPCLKPORT_ACLK 0x206c
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_XIU_P0_HSI0_IPCLKPORT_ACLK 0x2070
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_OTP_DESERIAL_DPLINK_HDCP_IPCLKPORT_I_CLK 0x2074
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_RSTNSYNC_SR_CLK_HSI0_FREE_OSCCLK_IPCLKPORT_CLK 0x2078
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_RSTNSYNC_SR_CLK_HSI0_NOC_IPCLKPORT_CLK 0x207c
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_SPC_HSI0_IPCLKPORT_PCLK 0x2080
+
+static const unsigned long hsi0_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKAUD_HSI0_NOC_USER,
+ PLL_CON1_MUX_CLKAUD_HSI0_NOC_USER,
+ PLL_CON0_MUX_CLKCMU_HSI0_DPGTC_USER,
+ PLL_CON1_MUX_CLKCMU_HSI0_DPGTC_USER,
+ PLL_CON0_MUX_CLKCMU_HSI0_DPOSC_USER,
+ PLL_CON1_MUX_CLKCMU_HSI0_DPOSC_USER,
+ PLL_CON0_MUX_CLKCMU_HSI0_NOC_USER,
+ PLL_CON1_MUX_CLKCMU_HSI0_NOC_USER,
+ PLL_CON0_MUX_CLKCMU_HSI0_USB32DRD_USER,
+ PLL_CON1_MUX_CLKCMU_HSI0_USB32DRD_USER,
+ CLK_CON_MUX_MUX_CLK_HSI0_NOC,
+ CLK_CON_MUX_MUX_CLK_HSI0_RTCCLK,
+ CLK_CON_MUX_MUX_CLK_HSI0_USB32DRD,
+ CLK_CON_DIV_DIV_CLK_HSI0_EUSB,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_AS_APB_EUSBPHY_HSI0_IPCLKPORT_PCLKM,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_DP_LINK_IPCLKPORT_I_DP_GTC_CLK,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_DP_LINK_IPCLKPORT_I_DP_OSC_CLK,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_DP_LINK_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_D_TZPC_HSI0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_HSI0_CMU_HSI0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_PPMU_HSI0_BUS1_IPCLKPORT_ACLK,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_PPMU_HSI0_BUS1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_RSTNSYNC_CLK_HSI0_EUSB_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_RSTNSYNC_CLK_HSI0_NOC_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_SLH_ACEL_SI_D_HSI0_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_SLH_AST_SI_G_PPMU_HSI0_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_SLH_AXI_MI_LD_AUDHSI0_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_SLH_AXI_MI_P_HSI0_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_SLH_AXI_SI_LD_HSI0AUD_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_BLK_HSI0_FRC_OTP_DESERIAL_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_SYSMMU_D_HSI0_IPCLKPORT_CLK_S2,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_SYSREG_HSI0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_URAM_IPCLKPORT_ACLK,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_USB32DRD_IPCLKPORT_I_EUSB_APB_CLK,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_USB32DRD_IPCLKPORT_I_EUSB_CTRL_PCLK,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_USB32DRD_IPCLKPORT_I_USB32DRD_REF_CLK_40,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_USB32DRD_IPCLKPORT_I_USBDPPHY_CTRL_PCLK,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_USB32DRD_IPCLKPORT_I_USBDPPHY_TCA_APB_CLK,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_USB32DRD_IPCLKPORT_I_USBLINK_ACLK,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_USB32DRD_IPCLKPORT_I_USBSUBCTL_APB_PCLK,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_VGEN_LITE_HSI0_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_XIU_D_HSI0_IPCLKPORT_ACLK,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_XIU_P0_HSI0_IPCLKPORT_ACLK,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_OTP_DESERIAL_DPLINK_HDCP_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_RSTNSYNC_SR_CLK_HSI0_FREE_OSCCLK_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_RSTNSYNC_SR_CLK_HSI0_NOC_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_SPC_HSI0_IPCLKPORT_PCLK,
+};
+
+PNAME(mout_clkcmu_hsi0_dpgtc_user_p) = { "oscclk", "dout_cmu_hsi0_dpgtc" };
+PNAME(mout_clkcmu_hsi0_dposc_user_p) = { "oscclk", "dout_cmu_hsi0_dposc" };
+PNAME(mout_clkcmu_hsi0_noc_user_p) = { "oscclk", "dout_cmu_hsi0_noc" };
+PNAME(mout_clkcmu_hsi0_usb32drd_user_p) = { "oscclk",
+ "dout_cmu_hsi0_usb32drd" };
+PNAME(mout_mux_clk_hsi0_noc_p) = { "mout_clkcmu_hsi0_noc_user" };
+PNAME(mout_mux_clk_hsi0_rtcclk_p) = { "rtcclk", "oscclk" };
+PNAME(mout_mux_clk_hsi0_usb32drd_p) = { "dout_tcxo_div4",
+ "mout_clkcmu_hsi0_usb32drd_user" };
+
+static const struct samsung_mux_clock hsi0_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_CLKCMU_HSI0_DPGTC_USER, "mout_clkcmu_hsi0_dpgtc_user",
+ mout_clkcmu_hsi0_dpgtc_user_p, PLL_CON0_MUX_CLKCMU_HSI0_DPGTC_USER,
+ 4, 1),
+ MUX(CLK_MOUT_CLKCMU_HSI0_DPOSC_USER, "mout_clkcmu_hsi0_dposc_user",
+ mout_clkcmu_hsi0_dposc_user_p, PLL_CON0_MUX_CLKCMU_HSI0_DPOSC_USER,
+ 4, 1),
+ MUX(CLK_MOUT_CLKCMU_HSI0_NOC_USER, "mout_clkcmu_hsi0_noc_user",
+ mout_clkcmu_hsi0_noc_user_p, PLL_CON0_MUX_CLKCMU_HSI0_NOC_USER,
+ 4, 1),
+ MUX(CLK_MOUT_CLKCMU_HSI0_USB32DRD_USER,
+ "mout_clkcmu_hsi0_usb32drd_user", mout_clkcmu_hsi0_usb32drd_user_p,
+ PLL_CON0_MUX_CLKCMU_HSI0_USB32DRD_USER, 4, 1),
+ MUX(CLK_MOUT_HSI0_NOC, "mout_hsi0_noc", mout_mux_clk_hsi0_noc_p,
+ CLK_CON_MUX_MUX_CLK_HSI0_NOC, 0, 1),
+ MUX(CLK_MOUT_HSI0_RTCCLK, "mout_hsi0_rtcclk",
+ mout_mux_clk_hsi0_rtcclk_p, CLK_CON_MUX_MUX_CLK_HSI0_RTCCLK, 0, 1),
+ MUX(CLK_MOUT_HSI0_USB32DRD, "mout_hsi0_usb32drd",
+ mout_mux_clk_hsi0_usb32drd_p, CLK_CON_MUX_MUX_CLK_HSI0_USB32DRD,
+ 0, 1),
+};
+
+static const struct samsung_div_clock hsi0_div_clks[] __initconst = {
+ DIV(CLK_DOUT_DIV_CLK_HSI0_EUSB, "dout_div_clk_hsi0_eusb",
+ "mout_hsi0_noc", CLK_CON_DIV_DIV_CLK_HSI0_EUSB, 0, 2),
+};
+
+static const struct samsung_cmu_info hsi0_cmu_info __initconst = {
+ .mux_clks = hsi0_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(hsi0_mux_clks),
+ .div_clks = hsi0_div_clks,
+ .nr_div_clks = ARRAY_SIZE(hsi0_div_clks),
+ .nr_clk_ids = CLKS_NR_HSI0,
+ .clk_regs = hsi0_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(hsi0_clk_regs),
+ .clk_name = "noc",
+};
+
+/* ---- CMU_PERIC0 --------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_PERIC0 (0x10400000) */
+#define PLL_CON0_MUX_CLKCMU_PERIC0_IP0_USER 0x600
+#define PLL_CON1_MUX_CLKCMU_PERIC0_IP0_USER 0x604
+#define PLL_CON0_MUX_CLKCMU_PERIC0_IP1_USER 0x610
+#define PLL_CON1_MUX_CLKCMU_PERIC0_IP1_USER 0x614
+#define PLL_CON0_MUX_CLKCMU_PERIC0_NOC_USER 0x620
+#define PLL_CON1_MUX_CLKCMU_PERIC0_NOC_USER 0x624
+#define CLK_CON_MUX_MUX_CLK_PERIC0_I2C 0x1000
+#define CLK_CON_MUX_MUX_CLK_PERIC0_USI04 0x1010
+#define CLK_CON_DIV_DIV_CLK_PERIC0_I2C 0x1800
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI04 0x1810
+#define CLK_CON_GAT_CLK_BLK_PERIC0_UID_D_TZPC_PERIC0_IPCLKPORT_PCLK 0x2000
+#define CLK_CON_GAT_CLK_BLK_PERIC0_UID_GPIO_PERIC0_IPCLKPORT_PCLK 0x2004
+#define CLK_CON_GAT_CLK_BLK_PERIC0_UID_PERIC0_CMU_PERIC0_IPCLKPORT_PCLK 0x2008
+#define CLK_CON_GAT_CLK_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_FREE_OSCCLK_IPCLKPORT_CLK 0x200c
+#define CLK_CON_GAT_CLK_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_I2C_IPCLKPORT_CLK 0x2010
+#define CLK_CON_GAT_CLK_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_NOCP_IPCLKPORT_CLK 0x2014
+#define CLK_CON_GAT_CLK_BLK_PERIC0_UID_RSTNSYNC_SR_CLK_PERIC0_USI04_IPCLKPORT_CLK 0x2018
+#define CLK_CON_GAT_CLK_BLK_PERIC0_UID_SLH_AXI_MI_P_PERIC0_IPCLKPORT_I_CLK 0x201c
+#define CLK_CON_GAT_CLK_BLK_PERIC0_UID_SYSREG_PERIC0_IPCLKPORT_PCLK 0x2020
+#define CLK_CON_GAT_CLK_BLK_PERIC0_UID_USI04_I2C_IPCLKPORT_IPCLK 0x2024
+#define CLK_CON_GAT_CLK_BLK_PERIC0_UID_USI04_I2C_IPCLKPORT_PCLK 0x2028
+#define CLK_CON_GAT_CLK_BLK_PERIC0_UID_USI04_USI_IPCLKPORT_IPCLK 0x202c
+#define CLK_CON_GAT_CLK_BLK_PERIC0_UID_USI04_USI_IPCLKPORT_PCLK 0x2030
+#define CLK_CON_GAT_CLK_BLK_PERIC0_UID_I3C00_IPCLKPORT_I_PCLK 0x2034
+#define CLK_CON_GAT_CLK_BLK_PERIC0_UID_I3C00_IPCLKPORT_I_SCLK 0x2038
+#define CLK_CON_GAT_CLK_BLK_PERIC0_UID_I3C01_IPCLKPORT_I_PCLK 0x203c
+#define CLK_CON_GAT_CLK_BLK_PERIC0_UID_I3C01_IPCLKPORT_I_SCLK 0x2040
+#define CLK_CON_GAT_CLK_BLK_PERIC0_UID_I3C02_IPCLKPORT_I_PCLK 0x2044
+#define CLK_CON_GAT_CLK_BLK_PERIC0_UID_I3C02_IPCLKPORT_I_SCLK 0x2048
+#define CLK_CON_GAT_CLK_BLK_PERIC0_UID_RSTNSYNC_SR_CLK_PERIC0_I2C_IPCLKPORT_CLK 0x204c
+#define CLK_CON_GAT_CLK_BLK_PERIC0_UID_RSTNSYNC_SR_CLK_PERIC0_NOCP_IPCLKPORT_CLK 0x2050
+
+static const unsigned long peric0_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_PERIC0_IP0_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC0_IP0_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_IP1_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC0_IP1_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_NOC_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC0_NOC_USER,
+ CLK_CON_MUX_MUX_CLK_PERIC0_I2C,
+ CLK_CON_MUX_MUX_CLK_PERIC0_USI04,
+ CLK_CON_DIV_DIV_CLK_PERIC0_I2C,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI04,
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_D_TZPC_PERIC0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_GPIO_PERIC0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_PERIC0_CMU_PERIC0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_FREE_OSCCLK_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_I2C_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_NOCP_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_RSTNSYNC_SR_CLK_PERIC0_USI04_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_SLH_AXI_MI_P_PERIC0_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_SYSREG_PERIC0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_USI04_I2C_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_USI04_I2C_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_USI04_USI_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_USI04_USI_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_I3C00_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_I3C00_IPCLKPORT_I_SCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_I3C01_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_I3C01_IPCLKPORT_I_SCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_I3C02_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_I3C02_IPCLKPORT_I_SCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_RSTNSYNC_SR_CLK_PERIC0_I2C_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_RSTNSYNC_SR_CLK_PERIC0_NOCP_IPCLKPORT_CLK,
+};
+
+PNAME(mout_peric0_ip0_user_p) = { "oscclk", "dout_cmu_peric0_ip0" };
+PNAME(mout_peric0_ip1_user_p) = { "oscclk", "dout_cmu_peric0_ip1" };
+PNAME(mout_peric0_noc_user_p) = { "oscclk", "dout_cmu_peric0_noc" };
+PNAME(mout_peric0_i2c_p) = { "oscclk", "mout_peric0_ip0_user",
+ "mout_peric0_ip0_user", "oscclk" };
+PNAME(mout_peric0_usi04_p) = { "oscclk", "mout_peric0_ip0_user",
+ "mout_peric0_ip0_user", "oscclk" };
+
+static const struct samsung_mux_clock peric0_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_PERIC0_IP0_USER, "mout_peric0_ip0_user",
+ mout_peric0_ip0_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_IP0_USER, 4, 1),
+ MUX(CLK_MOUT_PERIC0_IP1_USER, "mout_peric0_ip1_user",
+ mout_peric0_ip1_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_IP1_USER, 4, 1),
+ MUX(CLK_MOUT_PERIC0_NOC_USER, "mout_peric0_noc_user",
+ mout_peric0_noc_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_NOC_USER, 4, 1),
+ MUX(CLK_MOUT_PERIC0_I2C, "mout_peric0_i2c", mout_peric0_i2c_p,
+ CLK_CON_MUX_MUX_CLK_PERIC0_I2C, 0, 2),
+ MUX(CLK_MOUT_PERIC0_USI04, "mout_peric0_usi04", mout_peric0_usi04_p,
+ CLK_CON_MUX_MUX_CLK_PERIC0_USI04, 0, 2),
+};
+
+static const struct samsung_div_clock peric0_div_clks[] __initconst = {
+ DIV(CLK_DOUT_PERIC0_I2C, "dout_peric0_i2c", "mout_peric0_i2c",
+ CLK_CON_DIV_DIV_CLK_PERIC0_I2C, 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI04, "dout_peric0_usi04", "mout_peric0_usi04",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI04, 0, 7),
+};
+
+static const struct samsung_cmu_info peric0_cmu_info __initconst = {
+ .mux_clks = peric0_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(peric0_mux_clks),
+ .div_clks = peric0_div_clks,
+ .nr_div_clks = ARRAY_SIZE(peric0_div_clks),
+ .nr_clk_ids = CLKS_NR_PERIC0,
+ .clk_regs = peric0_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(peric0_clk_regs),
+ .clk_name = "noc",
+};
+
+/* ---- CMU_PERIC1 --------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_PERIC1 (0x10700000) */
+#define PLL_CON0_MUX_CLKCMU_PERIC1_IP0_USER 0x600
+#define PLL_CON1_MUX_CLKCMU_PERIC1_IP0_USER 0x604
+#define PLL_CON0_MUX_CLKCMU_PERIC1_IP1_USER 0x610
+#define PLL_CON1_MUX_CLKCMU_PERIC1_IP1_USER 0x614
+#define PLL_CON0_MUX_CLKCMU_PERIC1_NOC_USER 0x620
+#define PLL_CON1_MUX_CLKCMU_PERIC1_NOC_USER 0x624
+#define CLK_CON_MUX_MUX_CLK_PERIC1_I2C 0x1000
+#define CLK_CON_MUX_MUX_CLK_PERIC1_SPI_MS_CTRL 0x1004
+#define CLK_CON_MUX_MUX_CLK_PERIC1_UART_BT 0x1008
+#define CLK_CON_MUX_MUX_CLK_PERIC1_USI07 0x100c
+#define CLK_CON_MUX_MUX_CLK_PERIC1_USI07_SPI_I2C 0x1010
+#define CLK_CON_MUX_MUX_CLK_PERIC1_USI08 0x1014
+#define CLK_CON_MUX_MUX_CLK_PERIC1_USI08_SPI_I2C 0x1018
+#define CLK_CON_MUX_MUX_CLK_PERIC1_USI09 0x101c
+#define CLK_CON_MUX_MUX_CLK_PERIC1_USI10 0x1020
+#define CLK_CON_DIV_DIV_CLK_PERIC1_I2C 0x1800
+#define CLK_CON_DIV_DIV_CLK_PERIC1_SPI_MS_CTRL 0x1804
+#define CLK_CON_DIV_DIV_CLK_PERIC1_UART_BT 0x1808
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI07 0x180c
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI07_SPI_I2C 0x1810
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI08 0x1814
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI08_SPI_I2C 0x1818
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI09 0x181c
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI10 0x1820
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_BT_UART_IPCLKPORT_IPCLK 0x2000
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_BT_UART_IPCLKPORT_PCLK 0x2004
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_D_TZPC_PERIC1_IPCLKPORT_PCLK 0x2008
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_GPIO_PERIC1_IPCLKPORT_PCLK 0x200c
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_SR_CLK_PERIC1_I2C_IPCLKPORT_CLK 0x2010
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_SR_CLK_PERIC1_NOCP_IPCLKPORT_CLK 0x2014
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_PERIC1_CMU_PERIC1_IPCLKPORT_PCLK 0x2028
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_FREE_OSCCLK_IPCLKPORT_CLK 0x202c
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_NOCP_IPCLKPORT_CLK 0x2034
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_SR_CLK_PERIC1_UART_BT_IPCLKPORT_CLK 0x2038
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_SR_CLK_PERIC1_USI07_IPCLKPORT_CLK 0x203c
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_SR_CLK_PERIC1_USI07_SPI_I2C_IPCLKPORT_CLK 0x2040
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_SR_CLK_PERIC1_USI08_IPCLKPORT_CLK 0x2044
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_SR_CLK_PERIC1_USI08_SPI_I2C_IPCLKPORT_CLK 0x2048
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_SR_CLK_PERIC1_USI09_IPCLKPORT_CLK 0x204c
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_SR_CLK_PERIC1_USI10_IPCLKPORT_CLK 0x2050
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_SR_CLK_PERIC1_SPI_MS_CTRL_IPCLKPORT_CLK 0x2054
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_SLH_AXI_MI_P_PERIC1_IPCLKPORT_I_CLK 0x205c
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_SPI_MULTI_SLV_Q_CTRL_PERIC1_IPCLKPORT_CLK 0x2060
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_SYSREG_PERIC1_IPCLKPORT_PCLK 0x2064
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_USI07_SPI_I2C_IPCLKPORT_IPCLK 0x2068
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_USI07_SPI_I2C_IPCLKPORT_PCLK 0x206c
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_USI07_USI_IPCLKPORT_IPCLK 0x2070
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_USI07_USI_IPCLKPORT_PCLK 0x2074
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_USI08_SPI_I2C_IPCLKPORT_IPCLK 0x2078
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_USI08_SPI_I2C_IPCLKPORT_PCLK 0x207c
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_USI08_USI_IPCLKPORT_IPCLK 0x2080
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_USI08_USI_IPCLKPORT_PCLK 0x2084
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_USI09_I2C_IPCLKPORT_IPCLK 0x2088
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_USI09_I2C_IPCLKPORT_PCLK 0x208c
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_USI09_USI_IPCLKPORT_IPCLK 0x2090
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_USI09_USI_IPCLKPORT_PCLK 0x2094
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_USI10_I2C_IPCLKPORT_IPCLK 0x2098
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_USI10_I2C_IPCLKPORT_PCLK 0x209c
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_USI10_USI_IPCLKPORT_IPCLK 0x20a0
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_USI10_USI_IPCLKPORT_PCLK 0x20a4
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_XIU_P_PERIC1_IPCLKPORT_ACLK 0x20a8
+
+static const unsigned long peric1_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_PERIC1_IP0_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_IP0_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_IP1_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_IP1_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_NOC_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_NOC_USER,
+ CLK_CON_MUX_MUX_CLK_PERIC1_I2C,
+ CLK_CON_MUX_MUX_CLK_PERIC1_SPI_MS_CTRL,
+ CLK_CON_MUX_MUX_CLK_PERIC1_UART_BT,
+ CLK_CON_MUX_MUX_CLK_PERIC1_USI07,
+ CLK_CON_MUX_MUX_CLK_PERIC1_USI07_SPI_I2C,
+ CLK_CON_MUX_MUX_CLK_PERIC1_USI08,
+ CLK_CON_MUX_MUX_CLK_PERIC1_USI08_SPI_I2C,
+ CLK_CON_MUX_MUX_CLK_PERIC1_USI09,
+ CLK_CON_MUX_MUX_CLK_PERIC1_USI10,
+ CLK_CON_DIV_DIV_CLK_PERIC1_I2C,
+ CLK_CON_DIV_DIV_CLK_PERIC1_SPI_MS_CTRL,
+ CLK_CON_DIV_DIV_CLK_PERIC1_UART_BT,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI07,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI07_SPI_I2C,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI08,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI08_SPI_I2C,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI09,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI10,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_BT_UART_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_BT_UART_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_D_TZPC_PERIC1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_GPIO_PERIC1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_SR_CLK_PERIC1_I2C_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_SR_CLK_PERIC1_NOCP_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_PERIC1_CMU_PERIC1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_FREE_OSCCLK_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_NOCP_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_SR_CLK_PERIC1_UART_BT_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_SR_CLK_PERIC1_USI07_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_SR_CLK_PERIC1_USI07_SPI_I2C_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_SR_CLK_PERIC1_USI08_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_SR_CLK_PERIC1_USI08_SPI_I2C_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_SR_CLK_PERIC1_USI09_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_SR_CLK_PERIC1_USI10_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_SR_CLK_PERIC1_SPI_MS_CTRL_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_SLH_AXI_MI_P_PERIC1_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_SPI_MULTI_SLV_Q_CTRL_PERIC1_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_SYSREG_PERIC1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_USI07_SPI_I2C_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_USI07_SPI_I2C_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_USI07_USI_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_USI07_USI_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_USI08_SPI_I2C_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_USI08_SPI_I2C_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_USI08_USI_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_USI08_USI_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_USI09_I2C_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_USI09_I2C_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_USI09_USI_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_USI09_USI_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_USI10_I2C_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_USI10_I2C_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_USI10_USI_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_USI10_USI_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_XIU_P_PERIC1_IPCLKPORT_ACLK,
+};
+
+PNAME(mout_peric1_ip0_user_p) = { "oscclk", "dout_cmu_peric1_ip0" };
+PNAME(mout_peric1_ip1_user_p) = { "oscclk", "dout_cmu_peric1_ip1" };
+PNAME(mout_peric1_noc_user_p) = { "oscclk", "dout_cmu_peric1_noc" };
+PNAME(mout_peric1_i2c_p) = { "oscclk", "mout_peric1_ip0_user",
+ "mout_peric1_ip1_user", "oscclk" };
+PNAME(mout_peric1_spi_ms_ctrl_p) = { "oscclk", "mout_peric1_ip0_user",
+ "mout_peric1_ip1_user", "oscclk" };
+PNAME(mout_peric1_uart_bt_p) = { "oscclk", "mout_peric1_ip0_user",
+ "mout_peric1_ip1_user", "oscclk" };
+PNAME(mout_peric1_usi07_p) = { "oscclk", "mout_peric1_ip0_user",
+ "mout_peric1_ip1_user", "oscclk" };
+PNAME(mout_peric1_usi07_spi_i2c_p) = { "oscclk", "mout_peric1_ip0_user",
+ "mout_peric1_ip1_user", "oscclk" };
+PNAME(mout_peric1_usi08_p) = { "oscclk", "mout_peric1_ip0_user",
+ "mout_peric1_ip1_user", "oscclk" };
+PNAME(mout_peric1_usi08_spi_i2c_p) = { "oscclk", "mout_peric1_ip0_user",
+ "mout_peric1_ip1_user", "oscclk" };
+PNAME(mout_peric1_usi09_p) = { "oscclk", "mout_peric1_ip0_user",
+ "mout_peric1_ip1_user", "oscclk" };
+PNAME(mout_peric1_usi10_p) = { "oscclk", "mout_peric1_ip0_user",
+ "mout_peric1_ip1_user", "oscclk" };
+
+static const struct samsung_mux_clock peric1_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_PERIC1_IP0_USER, "mout_peric1_ip0_user",
+ mout_peric1_ip0_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_IP0_USER, 4, 1),
+ MUX(CLK_MOUT_PERIC1_IP1_USER, "mout_peric1_ip1_user",
+ mout_peric1_ip1_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_IP1_USER, 4, 1),
+ MUX(CLK_MOUT_PERIC1_NOC_USER, "mout_peric1_noc_user",
+ mout_peric1_noc_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_NOC_USER, 4, 1),
+ MUX(CLK_MOUT_PERIC1_I2C, "mout_peric1_i2c", mout_peric1_i2c_p,
+ CLK_CON_MUX_MUX_CLK_PERIC1_I2C, 0, 2),
+ MUX(CLK_MOUT_PERIC1_SPI_MS_CTRL, "mout_peric1_spi_ms_ctrl",
+ mout_peric1_spi_ms_ctrl_p, CLK_CON_MUX_MUX_CLK_PERIC1_SPI_MS_CTRL,
+ 0, 2),
+ MUX(CLK_MOUT_PERIC1_UART_BT, "mout_peric1_uart_bt",
+ mout_peric1_uart_bt_p, CLK_CON_MUX_MUX_CLK_PERIC1_UART_BT, 0, 2),
+ MUX(CLK_MOUT_PERIC1_USI07, "mout_peric1_usi07", mout_peric1_usi07_p,
+ CLK_CON_MUX_MUX_CLK_PERIC1_USI07, 0, 2),
+ MUX(CLK_MOUT_PERIC1_USI07_SPI_I2C, "mout_peric1_usi07_spi_i2c",
+ mout_peric1_usi07_spi_i2c_p,
+ CLK_CON_MUX_MUX_CLK_PERIC1_USI07_SPI_I2C, 0, 2),
+ MUX(CLK_MOUT_PERIC1_USI08, "mout_peric1_usi08", mout_peric1_usi08_p,
+ CLK_CON_MUX_MUX_CLK_PERIC1_USI08, 0, 2),
+ MUX(CLK_MOUT_PERIC1_USI08_SPI_I2C, "mout_peric1_usi08_spi_i2c",
+ mout_peric1_usi08_spi_i2c_p,
+ CLK_CON_MUX_MUX_CLK_PERIC1_USI08_SPI_I2C, 0, 2),
+ MUX(CLK_MOUT_PERIC1_USI09, "mout_peric1_usi09", mout_peric1_usi09_p,
+ CLK_CON_MUX_MUX_CLK_PERIC1_USI09, 0, 2),
+ MUX(CLK_MOUT_PERIC1_USI10, "mout_peric1_usi10", mout_peric1_usi10_p,
+ CLK_CON_MUX_MUX_CLK_PERIC1_USI10, 0, 2),
+};
+
+static const struct samsung_div_clock peric1_div_clks[] __initconst = {
+ DIV(CLK_DOUT_PERIC1_I2C, "dout_peric1_i2c", "mout_peric1_i2c",
+ CLK_CON_DIV_DIV_CLK_PERIC1_I2C, 0, 4),
+ DIV(CLK_DOUT_PERIC1_SPI_MS_CTRL, "dout_peric1_spi_ms_ctrl",
+ "mout_peric1_spi_ms_ctrl", CLK_CON_DIV_DIV_CLK_PERIC1_SPI_MS_CTRL,
+ 0, 7),
+ DIV(CLK_DOUT_PERIC1_UART_BT, "dout_peric1_uart_bt",
+ "mout_peric1_uart_bt", CLK_CON_DIV_DIV_CLK_PERIC1_UART_BT, 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI07, "dout_peric1_usi07", "mout_peric1_usi07",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI07, 0, 7),
+ DIV(CLK_DOUT_PERIC1_USI07_SPI_I2C, "dout_peric1_usi07_spi_i2c",
+ "mout_peric1_usi07_spi_i2c",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI07_SPI_I2C, 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI08, "dout_peric1_usi08", "mout_peric1_usi08",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI08, 0, 7),
+ DIV(CLK_DOUT_PERIC1_USI08_SPI_I2C, "dout_peric1_usi08_spi_i2c",
+ "mout_peric1_usi08_spi_i2c",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI08_SPI_I2C, 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI09, "dout_peric1_usi09", "mout_peric1_usi09",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI09, 0, 7),
+ DIV(CLK_DOUT_PERIC1_USI10, "dout_peric1_usi10", "mout_peric1_usi10",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI10, 0, 7),
+};
+
+static const struct samsung_cmu_info peric1_cmu_info __initconst = {
+ .mux_clks = peric1_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(peric1_mux_clks),
+ .div_clks = peric1_div_clks,
+ .nr_div_clks = ARRAY_SIZE(peric1_div_clks),
+ .nr_clk_ids = CLKS_NR_PERIC1,
+ .clk_regs = peric1_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(peric1_clk_regs),
+ .clk_name = "noc",
+};
+
+/* ---- CMU_PERIC2 --------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_PERIC2 (0x11c00000) */
+#define PLL_CON0_MUX_CLKCMU_PERIC2_IP0_USER 0x600
+#define PLL_CON1_MUX_CLKCMU_PERIC2_IP0_USER 0x604
+#define PLL_CON0_MUX_CLKCMU_PERIC2_IP1_USER 0x610
+#define PLL_CON1_MUX_CLKCMU_PERIC2_IP1_USER 0x614
+#define PLL_CON0_MUX_CLKCMU_PERIC2_NOC_USER 0x620
+#define PLL_CON1_MUX_CLKCMU_PERIC2_NOC_USER 0x624
+#define CLK_CON_MUX_MUX_CLK_PERIC2_I2C 0x1000
+#define CLK_CON_MUX_MUX_CLK_PERIC2_SPI_MS_CTRL 0x1004
+#define CLK_CON_MUX_MUX_CLK_PERIC2_UART_DBG 0x1008
+#define CLK_CON_MUX_MUX_CLK_PERIC2_USI00 0x100c
+#define CLK_CON_MUX_MUX_CLK_PERIC2_USI00_SPI_I2C 0x1010
+#define CLK_CON_MUX_MUX_CLK_PERIC2_USI01 0x1014
+#define CLK_CON_MUX_MUX_CLK_PERIC2_USI01_SPI_I2C 0x1018
+#define CLK_CON_MUX_MUX_CLK_PERIC2_USI02 0x101c
+#define CLK_CON_MUX_MUX_CLK_PERIC2_USI03 0x1020
+#define CLK_CON_MUX_MUX_CLK_PERIC2_USI05 0x1024
+#define CLK_CON_MUX_MUX_CLK_PERIC2_USI06 0x1028
+#define CLK_CON_MUX_MUX_CLK_PERIC2_USI11 0x102c
+#define CLK_CON_DIV_DIV_CLK_PERIC2_I2C 0x1800
+#define CLK_CON_DIV_DIV_CLK_PERIC2_SPI_MS_CTRL 0x1804
+#define CLK_CON_DIV_DIV_CLK_PERIC2_UART_DBG 0x1808
+#define CLK_CON_DIV_DIV_CLK_PERIC2_USI00 0x180c
+#define CLK_CON_DIV_DIV_CLK_PERIC2_USI00_SPI_I2C 0x1810
+#define CLK_CON_DIV_DIV_CLK_PERIC2_USI01 0x1814
+#define CLK_CON_DIV_DIV_CLK_PERIC2_USI01_SPI_I2C 0x1818
+#define CLK_CON_DIV_DIV_CLK_PERIC2_USI02 0x181c
+#define CLK_CON_DIV_DIV_CLK_PERIC2_USI03 0x1820
+#define CLK_CON_DIV_DIV_CLK_PERIC2_USI05 0x1824
+#define CLK_CON_DIV_DIV_CLK_PERIC2_USI06 0x1828
+#define CLK_CON_DIV_DIV_CLK_PERIC2_USI11 0x182c
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_DBG_UART_IPCLKPORT_IPCLK 0x2000
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_DBG_UART_IPCLKPORT_PCLK 0x2004
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_D_TZPC_PERIC2_IPCLKPORT_PCLK 0x2008
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_GPIO_PERIC2_IPCLKPORT_PCLK 0x200c
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C03_OIS_IPCLKPORT_I_PCLK 0x2010
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C03_OIS_IPCLKPORT_I_SCLK 0x2014
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C04_IPCLKPORT_I_PCLK 0x2018
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C04_IPCLKPORT_I_SCLK 0x201c
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C05_IPCLKPORT_I_PCLK 0x2020
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C05_IPCLKPORT_I_SCLK 0x2024
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C06_IPCLKPORT_I_PCLK 0x2028
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C06_IPCLKPORT_I_SCLK 0x202c
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C07_IPCLKPORT_I_PCLK 0x2030
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C07_IPCLKPORT_I_SCLK 0x2034
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C08_IPCLKPORT_I_PCLK 0x2038
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C08_IPCLKPORT_I_SCLK 0x203c
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C09_IPCLKPORT_I_PCLK 0x2040
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C09_IPCLKPORT_I_SCLK 0x2044
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C10_IPCLKPORT_I_PCLK 0x2048
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C10_IPCLKPORT_I_SCLK 0x204c
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C11_IPCLKPORT_I_PCLK 0x2050
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C11_IPCLKPORT_I_SCLK 0x2054
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_PERIC2_CMU_PERIC2_IPCLKPORT_PCLK 0x2058
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_PWM_IPCLKPORT_I_PCLK_S0 0x205c
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_RSTNSYNC_CLK_PERIC2_FREE_OSCCLK_IPCLKPORT_CLK 0x2060
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_RSTNSYNC_CLK_PERIC2_I2C_IPCLKPORT_CLK 0x2064
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_RSTNSYNC_CLK_PERIC2_NOCP_IPCLKPORT_CLK 0x2068
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_RSTNSYNC_SR_CLK_PERIC2_UART_DBG_IPCLKPORT_CLK 0x206c
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_RSTNSYNC_SR_CLK_PERIC2_USI00_IPCLKPORT_CLK 0x2070
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_RSTNSYNC_SR_CLK_PERIC2_USI00_SPI_I2C_IPCLKPORT_CLK 0x2074
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_RSTNSYNC_SR_CLK_PERIC2_USI01_IPCLKPORT_CLK 0x2078
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_RSTNSYNC_SR_CLK_PERIC2_USI01_SPI_I2C_IPCLKPORT_CLK 0x207c
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_RSTNSYNC_SR_CLK_PERIC2_USI02_IPCLKPORT_CLK 0x2080
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_RSTNSYNC_SR_CLK_PERIC2_USI03_IPCLKPORT_CLK 0x2084
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_RSTNSYNC_SR_CLK_PERIC2_USI05_IPCLKPORT_CLK 0x2088
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_RSTNSYNC_SR_CLK_PERIC2_USI06_IPCLKPORT_CLK 0x208c
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_RSTNSYNC_SR_CLK_PERIC2_USI11_IPCLKPORT_CLK 0x2090
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_RSTNSYNC_SR_CLK_PERIC2_SPI_MS_CTRL_IPCLKPORT_CLK 0x2094
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_SLH_AXI_MI_P_PERIC2_IPCLKPORT_I_CLK 0x2098
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_SPI_MULTI_SLV_Q_CTRL_PERIC2_IPCLKPORT_CLK 0x209c
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_SYSREG_PERIC2_IPCLKPORT_PCLK 0x20a0
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI00_SPI_I2C_IPCLKPORT_IPCLK 0x20a4
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI00_SPI_I2C_IPCLKPORT_PCLK 0x20a8
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI00_USI_IPCLKPORT_IPCLK 0x20ac
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI00_USI_IPCLKPORT_PCLK 0x20b0
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI01_SPI_I2C_IPCLKPORT_IPCLK 0x20b4
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI01_SPI_I2C_IPCLKPORT_PCLK 0x20b8
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI01_USI_IPCLKPORT_IPCLK 0x20bc
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI01_USI_IPCLKPORT_PCLK 0x20c0
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI02_I2C_IPCLKPORT_IPCLK 0x20c4
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI02_I2C_IPCLKPORT_PCLK 0x20c8
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI02_USI_IPCLKPORT_IPCLK 0x20cc
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI02_USI_IPCLKPORT_PCLK 0x20d0
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI03_I2C_IPCLKPORT_IPCLK 0x20d4
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI03_I2C_IPCLKPORT_PCLK 0x20d8
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI03_USI_IPCLKPORT_IPCLK 0x20dc
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI03_USI_IPCLKPORT_PCLK 0x20e0
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI05_I2C_IPCLKPORT_IPCLK 0x20e4
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI05_I2C_IPCLKPORT_PCLK 0x20e8
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI05_USI_OIS_IPCLKPORT_IPCLK 0x20ec
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI05_USI_OIS_IPCLKPORT_PCLK 0x20f0
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI06_I2C_IPCLKPORT_IPCLK 0x20f4
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI06_I2C_IPCLKPORT_PCLK 0x20f8
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI06_USI_OIS_IPCLKPORT_IPCLK 0x20fc
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI06_USI_OIS_IPCLKPORT_PCLK 0x2100
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI11_I2C_IPCLKPORT_IPCLK 0x2104
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI11_I2C_IPCLKPORT_PCLK 0x2108
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI11_USI_IPCLKPORT_IPCLK 0x210c
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI11_USI_IPCLKPORT_PCLK 0x2110
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_RSTNSYNC_SR_CLK_PERIC2_I2C_IPCLKPORT_CLK 0x2114
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_RSTNSYNC_SR_CLK_PERIC2_NOCP_IPCLKPORT_CLK 0x2118
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_SLH_AXI_MI_LP_CSISPERIC2_IPCLKPORT_I_CLK 0x211c
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_XIU_P_PERIC2_IPCLKPORT_ACLK 0x2120
+
+static const unsigned long peric2_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_PERIC2_IP0_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC2_IP0_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC2_IP1_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC2_IP1_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC2_NOC_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC2_NOC_USER,
+ CLK_CON_MUX_MUX_CLK_PERIC2_I2C,
+ CLK_CON_MUX_MUX_CLK_PERIC2_SPI_MS_CTRL,
+ CLK_CON_MUX_MUX_CLK_PERIC2_UART_DBG,
+ CLK_CON_MUX_MUX_CLK_PERIC2_USI00,
+ CLK_CON_MUX_MUX_CLK_PERIC2_USI00_SPI_I2C,
+ CLK_CON_MUX_MUX_CLK_PERIC2_USI01,
+ CLK_CON_MUX_MUX_CLK_PERIC2_USI01_SPI_I2C,
+ CLK_CON_MUX_MUX_CLK_PERIC2_USI02,
+ CLK_CON_MUX_MUX_CLK_PERIC2_USI03,
+ CLK_CON_MUX_MUX_CLK_PERIC2_USI05,
+ CLK_CON_MUX_MUX_CLK_PERIC2_USI06,
+ CLK_CON_MUX_MUX_CLK_PERIC2_USI11,
+ CLK_CON_DIV_DIV_CLK_PERIC2_I2C,
+ CLK_CON_DIV_DIV_CLK_PERIC2_SPI_MS_CTRL,
+ CLK_CON_DIV_DIV_CLK_PERIC2_UART_DBG,
+ CLK_CON_DIV_DIV_CLK_PERIC2_USI00,
+ CLK_CON_DIV_DIV_CLK_PERIC2_USI00_SPI_I2C,
+ CLK_CON_DIV_DIV_CLK_PERIC2_USI01,
+ CLK_CON_DIV_DIV_CLK_PERIC2_USI01_SPI_I2C,
+ CLK_CON_DIV_DIV_CLK_PERIC2_USI02,
+ CLK_CON_DIV_DIV_CLK_PERIC2_USI03,
+ CLK_CON_DIV_DIV_CLK_PERIC2_USI05,
+ CLK_CON_DIV_DIV_CLK_PERIC2_USI06,
+ CLK_CON_DIV_DIV_CLK_PERIC2_USI11,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_DBG_UART_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_DBG_UART_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_D_TZPC_PERIC2_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_GPIO_PERIC2_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C03_OIS_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C03_OIS_IPCLKPORT_I_SCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C04_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C04_IPCLKPORT_I_SCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C05_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C05_IPCLKPORT_I_SCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C06_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C06_IPCLKPORT_I_SCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C07_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C07_IPCLKPORT_I_SCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C08_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C08_IPCLKPORT_I_SCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C09_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C09_IPCLKPORT_I_SCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C10_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C10_IPCLKPORT_I_SCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C11_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C11_IPCLKPORT_I_SCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_PERIC2_CMU_PERIC2_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_PWM_IPCLKPORT_I_PCLK_S0,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_RSTNSYNC_CLK_PERIC2_FREE_OSCCLK_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_RSTNSYNC_CLK_PERIC2_I2C_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_RSTNSYNC_CLK_PERIC2_NOCP_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_RSTNSYNC_SR_CLK_PERIC2_UART_DBG_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_RSTNSYNC_SR_CLK_PERIC2_USI00_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_RSTNSYNC_SR_CLK_PERIC2_USI00_SPI_I2C_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_RSTNSYNC_SR_CLK_PERIC2_USI01_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_RSTNSYNC_SR_CLK_PERIC2_USI01_SPI_I2C_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_RSTNSYNC_SR_CLK_PERIC2_USI02_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_RSTNSYNC_SR_CLK_PERIC2_USI03_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_RSTNSYNC_SR_CLK_PERIC2_USI05_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_RSTNSYNC_SR_CLK_PERIC2_USI06_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_RSTNSYNC_SR_CLK_PERIC2_USI11_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_RSTNSYNC_SR_CLK_PERIC2_SPI_MS_CTRL_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_SLH_AXI_MI_P_PERIC2_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_SPI_MULTI_SLV_Q_CTRL_PERIC2_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_SYSREG_PERIC2_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI00_SPI_I2C_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI00_SPI_I2C_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI00_USI_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI00_USI_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI01_SPI_I2C_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI01_SPI_I2C_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI01_USI_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI01_USI_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI02_I2C_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI02_I2C_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI02_USI_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI02_USI_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI03_I2C_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI03_I2C_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI03_USI_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI03_USI_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI05_I2C_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI05_I2C_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI05_USI_OIS_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI05_USI_OIS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI06_I2C_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI06_I2C_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI06_USI_OIS_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI06_USI_OIS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI11_I2C_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI11_I2C_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI11_USI_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI11_USI_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_RSTNSYNC_SR_CLK_PERIC2_I2C_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_RSTNSYNC_SR_CLK_PERIC2_NOCP_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_SLH_AXI_MI_LP_CSISPERIC2_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_XIU_P_PERIC2_IPCLKPORT_ACLK,
+};
+
+PNAME(mout_peric2_ip0_user_p) = { "oscclk",
+ "dout_cmu_peric2_ip0" };
+PNAME(mout_peric2_ip1_user_p) = { "oscclk",
+ "dout_cmu_peric2_ip1" };
+PNAME(mout_peric2_noc_user_p) = { "oscclk",
+ "dout_cmu_peric2_noc" };
+PNAME(mout_peric2_i2c_p) = { "oscclk",
+ "mout_peric2_ip0_user",
+ "mout_peric2_ip1_user",
+ "oscclk" };
+PNAME(mout_peric2_spi_ms_ctrl_p) = { "oscclk",
+ "mout_peric2_ip0_user",
+ "mout_peric2_ip1_user",
+ "oscclk" };
+PNAME(mout_peric2_uart_dbg_p) = { "oscclk",
+ "mout_peric2_ip0_user",
+ "mout_peric2_ip1_user",
+ "oscclk" };
+PNAME(mout_peric2_usi00_p) = { "oscclk",
+ "mout_peric2_ip0_user",
+ "mout_peric2_ip1_user",
+ "oscclk" };
+PNAME(mout_peric2_usi00_spi_i2c_p) = { "oscclk",
+ "mout_peric2_ip0_user",
+ "mout_peric2_ip1_user",
+ "oscclk" };
+PNAME(mout_peric2_usi01_p) = { "oscclk",
+ "mout_peric2_ip0_user",
+ "mout_peric2_ip1_user",
+ "oscclk" };
+PNAME(mout_peric2_usi01_spi_i2c_p) = { "oscclk",
+ "mout_peric2_ip0_user",
+ "mout_peric2_ip1_user",
+ "oscclk" };
+PNAME(mout_peric2_usi02_p) = { "oscclk",
+ "mout_peric2_ip0_user",
+ "mout_peric2_ip1_user",
+ "oscclk" };
+PNAME(mout_peric2_usi03_p) = { "oscclk",
+ "mout_peric2_ip0_user",
+ "mout_peric2_ip1_user",
+ "oscclk" };
+PNAME(mout_peric2_usi05_p) = { "oscclk",
+ "mout_peric2_ip0_user",
+ "mout_peric2_ip1_user",
+ "oscclk" };
+PNAME(mout_peric2_usi06_p) = { "oscclk",
+ "mout_peric2_ip0_user",
+ "mout_peric2_ip1_user",
+ "oscclk" };
+PNAME(mout_peric2_usi11_p) = { "oscclk",
+ "mout_peric2_ip0_user",
+ "mout_peric2_ip1_user",
+ "oscclk" };
+
+static const struct samsung_mux_clock peric2_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_PERIC2_IP0_USER, "mout_peric2_ip0_user",
+ mout_peric2_ip0_user_p, PLL_CON0_MUX_CLKCMU_PERIC2_IP0_USER, 4, 1),
+ MUX(CLK_MOUT_PERIC2_IP1_USER, "mout_peric2_ip1_user",
+ mout_peric2_ip1_user_p, PLL_CON0_MUX_CLKCMU_PERIC2_IP1_USER, 4, 1),
+ MUX(CLK_MOUT_PERIC2_NOC_USER, "mout_peric2_noc_user",
+ mout_peric2_noc_user_p, PLL_CON0_MUX_CLKCMU_PERIC2_NOC_USER, 4, 1),
+ MUX(CLK_MOUT_PERIC2_I2C, "mout_peric2_i2c", mout_peric2_i2c_p,
+ CLK_CON_MUX_MUX_CLK_PERIC2_I2C, 0, 2),
+ MUX(CLK_MOUT_PERIC2_SPI_MS_CTRL, "mout_peric2_spi_ms_ctrl",
+ mout_peric2_spi_ms_ctrl_p,
+ CLK_CON_MUX_MUX_CLK_PERIC2_SPI_MS_CTRL, 0, 2),
+ MUX(CLK_MOUT_PERIC2_UART_DBG, "mout_peric2_uart_dbg",
+ mout_peric2_uart_dbg_p, CLK_CON_MUX_MUX_CLK_PERIC2_UART_DBG, 0, 2),
+ MUX(CLK_MOUT_PERIC2_USI00, "mout_peric2_usi00", mout_peric2_usi00_p,
+ CLK_CON_MUX_MUX_CLK_PERIC2_USI00, 0, 2),
+ MUX(CLK_MOUT_PERIC2_USI00_SPI_I2C, "mout_peric2_usi00_spi_i2c",
+ mout_peric2_usi00_spi_i2c_p, CLK_CON_MUX_MUX_CLK_PERIC2_USI00_SPI_I2C,
+ 0, 2),
+ MUX(CLK_MOUT_PERIC2_USI01, "mout_peric2_usi01", mout_peric2_usi01_p,
+ CLK_CON_MUX_MUX_CLK_PERIC2_USI01, 0, 2),
+ MUX(CLK_MOUT_PERIC2_USI01_SPI_I2C, "mout_peric2_usi01_spi_i2c",
+ mout_peric2_usi01_spi_i2c_p,
+ CLK_CON_MUX_MUX_CLK_PERIC2_USI01_SPI_I2C, 0, 2),
+ MUX(CLK_MOUT_PERIC2_USI02, "mout_peric2_usi02", mout_peric2_usi02_p,
+ CLK_CON_MUX_MUX_CLK_PERIC2_USI02, 0, 2),
+ MUX(CLK_MOUT_PERIC2_USI03, "mout_peric2_usi03", mout_peric2_usi03_p,
+ CLK_CON_MUX_MUX_CLK_PERIC2_USI03, 0, 2),
+ MUX(CLK_MOUT_PERIC2_USI05, "mout_peric2_usi05", mout_peric2_usi05_p,
+ CLK_CON_MUX_MUX_CLK_PERIC2_USI05, 0, 2),
+ MUX(CLK_MOUT_PERIC2_USI06, "mout_peric2_usi06", mout_peric2_usi06_p,
+ CLK_CON_MUX_MUX_CLK_PERIC2_USI06, 0, 2),
+ MUX(CLK_MOUT_PERIC2_USI11, "mout_peric2_usi11", mout_peric2_usi11_p,
+ CLK_CON_MUX_MUX_CLK_PERIC2_USI11, 0, 2),
+};
+
+static const struct samsung_div_clock peric2_div_clks[] __initconst = {
+ DIV(CLK_DOUT_PERIC2_I2C, "dout_peric2_i2c", "mout_peric2_i2c",
+ CLK_CON_DIV_DIV_CLK_PERIC2_I2C, 0, 4),
+ DIV(CLK_DOUT_PERIC2_SPI_MS_CTRL, "dout_peric2_spi_ms_ctrl",
+ "mout_peric2_spi_ms_ctrl", CLK_CON_DIV_DIV_CLK_PERIC2_SPI_MS_CTRL,
+ 0, 7),
+ DIV(CLK_DOUT_PERIC2_UART_DBG, "dout_peric2_uart_dbg",
+ "mout_peric2_uart_dbg", CLK_CON_DIV_DIV_CLK_PERIC2_UART_DBG, 0, 4),
+ DIV(CLK_DOUT_PERIC2_USI00, "dout_peric2_usi00", "mout_peric2_usi00",
+ CLK_CON_DIV_DIV_CLK_PERIC2_USI00, 0, 7),
+ DIV(CLK_DOUT_PERIC2_USI00_SPI_I2C, "dout_peric2_usi00_spi_i2c",
+ "mout_peric2_usi00_spi_i2c",
+ CLK_CON_DIV_DIV_CLK_PERIC2_USI00_SPI_I2C, 0, 4),
+ DIV(CLK_DOUT_PERIC2_USI01, "dout_peric2_usi01", "mout_peric2_usi01",
+ CLK_CON_DIV_DIV_CLK_PERIC2_USI01, 0, 7),
+ DIV(CLK_DOUT_PERIC2_USI01_SPI_I2C, "dout_peric2_usi01_spi_i2c",
+ "mout_peric2_usi01_spi_i2c",
+ CLK_CON_DIV_DIV_CLK_PERIC2_USI01_SPI_I2C, 0, 4),
+ DIV(CLK_DOUT_PERIC2_USI02, "dout_peric2_usi02", "mout_peric2_usi02",
+ CLK_CON_DIV_DIV_CLK_PERIC2_USI02, 0, 7),
+ DIV(CLK_DOUT_PERIC2_USI03, "dout_peric2_usi03", "mout_peric2_usi03",
+ CLK_CON_DIV_DIV_CLK_PERIC2_USI03, 0, 7),
+ DIV(CLK_DOUT_PERIC2_USI05, "dout_peric2_usi05", "mout_peric2_usi05",
+ CLK_CON_DIV_DIV_CLK_PERIC2_USI05, 0, 7),
+ DIV(CLK_DOUT_PERIC2_USI06, "dout_peric2_usi06", "mout_peric2_usi06",
+ CLK_CON_DIV_DIV_CLK_PERIC2_USI06, 0, 7),
+ DIV(CLK_DOUT_PERIC2_USI11, "dout_peric2_usi11", "mout_peric2_usi11",
+ CLK_CON_DIV_DIV_CLK_PERIC2_USI11, 0, 7),
+};
+
+static const struct samsung_cmu_info peric2_cmu_info __initconst = {
+ .mux_clks = peric2_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(peric2_mux_clks),
+ .div_clks = peric2_div_clks,
+ .nr_div_clks = ARRAY_SIZE(peric2_div_clks),
+ .nr_clk_ids = CLKS_NR_PERIC2,
+ .clk_regs = peric2_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(peric2_clk_regs),
+ .clk_name = "noc",
+};
+
+/* ---- CMU_UFS ------------------------------------------------------------ */
+
+/* Register Offset definitions for CMU_UFS(0x11000000) */
+#define PLL_CON0_MUX_CLKCMU_UFS_MMC_CARD_USER 0x600
+#define PLL_CON1_MUX_CLKCMU_UFS_MMC_CARD_USER 0x604
+#define PLL_CON0_MUX_CLKCMU_UFS_NOC_USER 0x610
+#define PLL_CON1_MUX_CLKCMU_UFS_NOC_USER 0x614
+#define PLL_CON0_MUX_CLKCMU_UFS_UFS_EMBD_USER 0x620
+#define PLL_CON1_MUX_CLKCMU_UFS_UFS_EMBD_USER 0x624
+#define CLK_CON_GAT_CLK_BLK_UFS_UID_BLK_UFS_FRC_OTP_DESERIAL_IPCLKPORT_I_CLK 0x2000
+#define CLK_CON_GAT_CLK_BLK_UFS_UID_D_TZPC_UFS_IPCLKPORT_PCLK 0x2004
+#define CLK_CON_GAT_CLK_BLK_UFS_UID_GPIO_HSI1UFS_IPCLKPORT_PCLK 0x2008
+#define CLK_CON_GAT_CLK_BLK_UFS_UID_GPIO_UFS_IPCLKPORT_PCLK 0x200c
+#define CLK_CON_GAT_CLK_BLK_UFS_UID_LH_ACEL_SI_D_UFS_IPCLKPORT_I_CLK 0x2010
+#define CLK_CON_GAT_CLK_BLK_UFS_UID_MMC_CARD_IPCLKPORT_I_ACLK 0x2014
+#define CLK_CON_GAT_CLK_BLK_UFS_UID_MMC_CARD_IPCLKPORT_SDCLKIN 0x2018
+#define CLK_CON_GAT_CLK_BLK_UFS_UID_OTP_DESERIAL_UFS_EMBD_FMP_IPCLKPORT_I_CLK 0x201c
+#define CLK_CON_GAT_CLK_BLK_UFS_UID_PPMU_UFS_IPCLKPORT_ACLK 0x2024
+#define CLK_CON_GAT_CLK_BLK_UFS_UID_PPMU_UFS_IPCLKPORT_PCLK 0x2028
+#define CLK_CON_GAT_CLK_BLK_UFS_UID_RSTNSYNC_CLK_UFS_FREE_OSCCLK_IPCLKPORT_CLK 0x202c
+#define CLK_CON_GAT_CLK_BLK_UFS_UID_RSTNSYNC_CLK_UFS_MMC_CARD_IPCLKPORT_CLK 0x2030
+#define CLK_CON_GAT_CLK_BLK_UFS_UID_RSTNSYNC_CLK_UFS_NOC_IPCLKPORT_CLK 0x2034
+#define CLK_CON_GAT_CLK_BLK_UFS_UID_RSTNSYNC_CLK_UFS_UFS_EMBD_IPCLKPORT_CLK 0x2038
+#define CLK_CON_GAT_CLK_BLK_UFS_UID_RSTNSYNC_SR_CLK_UFS_FREE_OSCCLK_IPCLKPORT_CLK 0x203c
+#define CLK_CON_GAT_CLK_BLK_UFS_UID_RSTNSYNC_SR_CLK_UFS_NOC_IPCLKPORT_CLK 0x2040
+#define CLK_CON_GAT_CLK_BLK_UFS_UID_SLH_AST_SI_G_PPMU_UFS_IPCLKPORT_I_CLK 0x2044
+#define CLK_CON_GAT_CLK_BLK_UFS_UID_SLH_AXI_MI_P_UFS_IPCLKPORT_I_CLK 0x2048
+#define CLK_CON_GAT_CLK_BLK_UFS_UID_SPC_UFS_IPCLKPORT_PCLK 0x204c
+#define CLK_CON_GAT_CLK_BLK_UFS_UID_SYSMMU_UFS_IPCLKPORT_CLK_S2 0x2050
+#define CLK_CON_GAT_CLK_BLK_UFS_UID_SYSREG_UFS_IPCLKPORT_PCLK 0x2054
+#define CLK_CON_GAT_CLK_BLK_UFS_UID_UFS_CMU_UFS_IPCLKPORT_PCLK 0x2058
+#define CLK_CON_GAT_CLK_BLK_UFS_UID_UFS_EMBD_IPCLKPORT_I_ACLK 0x205c
+#define CLK_CON_GAT_CLK_BLK_UFS_UID_UFS_EMBD_IPCLKPORT_I_CLK_UNIPRO 0x2060
+#define CLK_CON_GAT_CLK_BLK_UFS_UID_UFS_EMBD_IPCLKPORT_I_FMP_CLK 0x2064
+#define CLK_CON_GAT_CLK_BLK_UFS_UID_VGEN_LITE_UFS_IPCLKPORT_CLK 0x2068
+#define CLK_CON_GAT_CLK_BLK_UFS_UID_XIU_D_UFS_IPCLKPORT_ACLK 0x206c
+#define CLK_CON_GAT_CLK_BLK_UFS_UID_XIU_P_UFS_IPCLKPORT_ACLK 0x2070
+
+static const unsigned long ufs_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_UFS_MMC_CARD_USER,
+ PLL_CON1_MUX_CLKCMU_UFS_MMC_CARD_USER,
+ PLL_CON0_MUX_CLKCMU_UFS_NOC_USER,
+ PLL_CON1_MUX_CLKCMU_UFS_NOC_USER,
+ PLL_CON0_MUX_CLKCMU_UFS_UFS_EMBD_USER,
+ PLL_CON1_MUX_CLKCMU_UFS_UFS_EMBD_USER,
+ CLK_CON_GAT_CLK_BLK_UFS_UID_BLK_UFS_FRC_OTP_DESERIAL_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_UFS_UID_D_TZPC_UFS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_UFS_UID_GPIO_HSI1UFS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_UFS_UID_GPIO_UFS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_UFS_UID_LH_ACEL_SI_D_UFS_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_UFS_UID_MMC_CARD_IPCLKPORT_I_ACLK,
+ CLK_CON_GAT_CLK_BLK_UFS_UID_MMC_CARD_IPCLKPORT_SDCLKIN,
+ CLK_CON_GAT_CLK_BLK_UFS_UID_OTP_DESERIAL_UFS_EMBD_FMP_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_UFS_UID_PPMU_UFS_IPCLKPORT_ACLK,
+ CLK_CON_GAT_CLK_BLK_UFS_UID_PPMU_UFS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_UFS_UID_RSTNSYNC_CLK_UFS_FREE_OSCCLK_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_UFS_UID_RSTNSYNC_CLK_UFS_MMC_CARD_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_UFS_UID_RSTNSYNC_CLK_UFS_NOC_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_UFS_UID_RSTNSYNC_CLK_UFS_UFS_EMBD_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_UFS_UID_RSTNSYNC_SR_CLK_UFS_FREE_OSCCLK_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_UFS_UID_RSTNSYNC_SR_CLK_UFS_NOC_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_UFS_UID_SLH_AST_SI_G_PPMU_UFS_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_UFS_UID_SLH_AXI_MI_P_UFS_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_UFS_UID_SPC_UFS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_UFS_UID_SYSMMU_UFS_IPCLKPORT_CLK_S2,
+ CLK_CON_GAT_CLK_BLK_UFS_UID_SYSREG_UFS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_UFS_UID_UFS_CMU_UFS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_UFS_UID_UFS_EMBD_IPCLKPORT_I_ACLK,
+ CLK_CON_GAT_CLK_BLK_UFS_UID_UFS_EMBD_IPCLKPORT_I_CLK_UNIPRO,
+ CLK_CON_GAT_CLK_BLK_UFS_UID_UFS_EMBD_IPCLKPORT_I_FMP_CLK,
+ CLK_CON_GAT_CLK_BLK_UFS_UID_VGEN_LITE_UFS_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_UFS_UID_XIU_D_UFS_IPCLKPORT_ACLK,
+ CLK_CON_GAT_CLK_BLK_UFS_UID_XIU_P_UFS_IPCLKPORT_ACLK,
+};
+
+PNAME(mout_clkcmu_ufs_mmc_card_user_p) = { "oscclk",
+ "mout_cmu_ufs_mmc_card" };
+PNAME(mout_clkcmu_ufs_noc_user_p) = { "oscclk", "dout_cmu_ufs_noc" };
+PNAME(mout_clkcmu_ufs_ufs_embd_user_p) = { "oscclk",
+ "dout_cmu_ufs_ufs_embd" };
+
+static const struct samsung_mux_clock ufs_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_UFS_MMC_CARD_USER, "mout_ufs_mmc_card_user",
+ mout_clkcmu_ufs_mmc_card_user_p,
+ PLL_CON0_MUX_CLKCMU_UFS_MMC_CARD_USER, 4, 1),
+ MUX(CLK_MOUT_UFS_NOC_USER, "mout_ufs_noc_user",
+ mout_clkcmu_ufs_noc_user_p,
+ PLL_CON0_MUX_CLKCMU_UFS_NOC_USER, 4, 1),
+ MUX(CLK_MOUT_UFS_UFS_EMBD_USER, "mout_ufs_ufs_embd_user",
+ mout_clkcmu_ufs_ufs_embd_user_p,
+ PLL_CON0_MUX_CLKCMU_UFS_UFS_EMBD_USER, 4, 1),
+};
+
+static const struct samsung_cmu_info ufs_cmu_info __initconst = {
+ .mux_clks = ufs_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(ufs_mux_clks),
+ .nr_clk_ids = CLKS_NR_UFS,
+ .clk_regs = ufs_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(ufs_clk_regs),
+ .clk_name = "noc",
+};
+
+/* ---- CMU_VTS ------------------------------------------------------------ */
+
+/* Register Offset definitions for CMU_VTS (0x15300000) */
+#define PLL_CON0_MUX_CLKALIVE_VTS_NOC_USER 0x600
+#define PLL_CON1_MUX_CLKALIVE_VTS_NOC_USER 0x604
+#define PLL_CON0_MUX_CLKALIVE_VTS_RCO_USER 0x610
+#define PLL_CON1_MUX_CLKALIVE_VTS_RCO_USER 0x614
+#define PLL_CON0_MUX_CLKCMU_VTS_DMIC_USER 0x620
+#define PLL_CON1_MUX_CLKCMU_VTS_DMIC_USER 0x624
+#define CLK_CON_MUX_MUX_CLKVTS_AUD_DMIC1 0x1000
+#define CLK_CON_MUX_MUX_CLK_VTS_NOC 0x1004
+#define CLK_CON_MUX_MUX_CLK_VTS_DMIC_PAD 0x100c
+#define CLK_CON_DIV_DIV_CLKVTS_AUD_DMIC0 0x1800
+#define CLK_CON_DIV_DIV_CLKVTS_AUD_DMIC1 0x1804
+#define CLK_CON_DIV_DIV_CLK_VTS_CPU 0x1808
+#define CLK_CON_DIV_DIV_CLK_VTS_DMIC_IF 0x1814
+#define CLK_CON_DIV_DIV_CLK_VTS_DMIC_IF_DIV2 0x1818
+#define CLK_CON_DIV_DIV_CLK_VTS_NOC 0x181c
+#define CLK_CON_DIV_DIV_CLK_VTS_SERIAL_LIF 0x1820
+#define CLK_CON_DIV_DIV_CLK_VTS_SERIAL_LIF_CORE 0x1824
+#define CLK_CON_GAT_CLKVTS_AUD_DMIC0 0x2000
+#define CLK_CON_GAT_CLKVTS_AUD_DMIC1 0x2004
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_BAAW_VTS_IPCLKPORT_I_PCLK 0x2008
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_LH_AXI_MI_IP_VC2VTS_IPCLKPORT_I_CLK 0x200c
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_LH_AXI_SI_ID_VTS2VC_IPCLKPORT_I_CLK 0x2010
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_RSTNSYNC_CLK_VTS_FREE_OSCCLK_IPCLKPORT_CLK 0x2014
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_RSTNSYNC_SR_CLK_VTS_DMIC_IF_IPCLKPORT_CLK 0x2018
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_RSTNSYNC_SR_CLK_VTS_SERIAL_LIF_CORE_IPCLKPORT_CLK 0x2020
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_RSTNSYNC_SR_CLK_VTS_SERIAL_LIF_IPCLKPORT_CLK 0x2024
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_RSTNSYNC_SR_CLK_VTS_YAMIN_IPCLKPORT_CLK 0x2028
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_SERIAL_LIF_VT_IPCLKPORT_ACLK 0x202c
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_SS_VTS_FRC_OTP_DESERIAL_IPCLKPORT_I_CLK 0x2030
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_ASYNCINTERRUPT_VTS_IPCLKPORT_CLK 0x2034
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_DMIC_IF0_IPCLKPORT_DMIC_IF_CLK 0x2068
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_DMIC_IF0_IPCLKPORT_DMIC_IF_DIV2_CLK 0x206c
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_DMIC_IF0_IPCLKPORT_PCLK 0x2070
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_DMIC_IF1_IPCLKPORT_DMIC_IF_CLK 0x2074
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_DMIC_IF1_IPCLKPORT_DMIC_IF_DIV2_CLK 0x2078
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_DMIC_IF1_IPCLKPORT_PCLK 0x207c
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_DMIC_IF2_IPCLKPORT_DMIC_IF_CLK 0x2080
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_DMIC_IF2_IPCLKPORT_DMIC_IF_DIV2_CLK 0x2084
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_DMIC_IF2_IPCLKPORT_PCLK 0x2088
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_GPIO_VTS_IPCLKPORT_PCLK 0x2090
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_INTMEM_CODE_IPCLKPORT_I_ACLK 0x20ac
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_INTMEM_CODE_IPCLKPORT_I_PCLK 0x20b0
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_INTMEM_DATA0_IPCLKPORT_I_ACLK 0x20b4
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_INTMEM_DATA0_IPCLKPORT_I_PCLK 0x20b8
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_INTMEM_DATA1_IPCLKPORT_I_ACLK 0x20bc
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_INTMEM_DATA1_IPCLKPORT_I_PCLK 0x20c0
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_INTMEM_PCM_IPCLKPORT_I_ACLK 0x20c4
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_INTMEM_PCM_IPCLKPORT_I_PCLK 0x20c8
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_MAILBOX_ABOX_VTS_IPCLKPORT_PCLK 0x20cc
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_MAILBOX_AP_VTS_IPCLKPORT_PCLK 0x20d0
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_MAILBOX_DNC_VTS_IPCLKPORT_PCLK 0x20d4
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_RSTNSYNC_CLK_VTS_NOC_IPCLKPORT_CLK 0x20ec
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_RSTNSYNC_CLK_VTS_YAMIN_IPCLKPORT_CLK 0x20f8
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_RSTNSYNC_SR_CLK_VTS_NOC_IPCLKPORT_CLK 0x20fc
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_SERIAL_LIF_VT_IPCLKPORT_BCLK 0x2104
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_SERIAL_LIF_VT_IPCLKPORT_CCLK 0x2108
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_SERIAL_LIF_VT_IPCLKPORT_PCLK 0x210c
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_SS_VTS_GLUE_IPCLKPORT_ACLK_CPU 0x2124
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_SS_VTS_GLUE_IPCLKPORT_DMIC_AUD_DIV2_CLK 0x2128
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_SS_VTS_GLUE_IPCLKPORT_DMIC_IF_PAD_CLK0 0x212c
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_SS_VTS_GLUE_IPCLKPORT_DMIC_IF_PAD_CLK1 0x2130
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_SS_VTS_GLUE_IPCLKPORT_DMIC_IF_PAD_CLK2 0x2134
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_SYSREG_VTS_IPCLKPORT_PCLK 0x213c
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_TIMER1_IPCLKPORT_PCLK 0x2140
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_TIMER2_IPCLKPORT_PCLK 0x2144
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_TIMER_IPCLKPORT_PCLK 0x2148
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_VTS_CMU_VTS_IPCLKPORT_PCLK 0x2150
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_WDT_VTS_IPCLKPORT_PCLK 0x2154
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_XIU_DP0_VTS_IPCLKPORT_ACLK 0x2158
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_YAMIN_MCU_VTS_IPCLKPORT_CLKIN 0x215c
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_YAMIN_MCU_VTS_IPCLKPORT_DBGCLK 0x2160
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_YAMIN_MCU_VTS_IPCLKPORT_IWICCLK 0x2164
+
+static const unsigned long vts_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKALIVE_VTS_NOC_USER,
+ PLL_CON1_MUX_CLKALIVE_VTS_NOC_USER,
+ PLL_CON0_MUX_CLKALIVE_VTS_RCO_USER,
+ PLL_CON1_MUX_CLKALIVE_VTS_RCO_USER,
+ PLL_CON0_MUX_CLKCMU_VTS_DMIC_USER,
+ PLL_CON1_MUX_CLKCMU_VTS_DMIC_USER,
+ CLK_CON_MUX_MUX_CLKVTS_AUD_DMIC1,
+ CLK_CON_MUX_MUX_CLK_VTS_NOC,
+ CLK_CON_MUX_MUX_CLK_VTS_DMIC_PAD,
+ CLK_CON_DIV_DIV_CLKVTS_AUD_DMIC0,
+ CLK_CON_DIV_DIV_CLKVTS_AUD_DMIC1,
+ CLK_CON_DIV_DIV_CLK_VTS_CPU,
+ CLK_CON_DIV_DIV_CLK_VTS_DMIC_IF,
+ CLK_CON_DIV_DIV_CLK_VTS_DMIC_IF_DIV2,
+ CLK_CON_DIV_DIV_CLK_VTS_NOC,
+ CLK_CON_DIV_DIV_CLK_VTS_SERIAL_LIF,
+ CLK_CON_DIV_DIV_CLK_VTS_SERIAL_LIF_CORE,
+ CLK_CON_GAT_CLKVTS_AUD_DMIC0,
+ CLK_CON_GAT_CLKVTS_AUD_DMIC1,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_BAAW_VTS_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_LH_AXI_MI_IP_VC2VTS_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_LH_AXI_SI_ID_VTS2VC_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_RSTNSYNC_CLK_VTS_FREE_OSCCLK_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_RSTNSYNC_SR_CLK_VTS_DMIC_IF_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_RSTNSYNC_SR_CLK_VTS_SERIAL_LIF_CORE_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_RSTNSYNC_SR_CLK_VTS_SERIAL_LIF_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_RSTNSYNC_SR_CLK_VTS_YAMIN_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_SERIAL_LIF_VT_IPCLKPORT_ACLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_SS_VTS_FRC_OTP_DESERIAL_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_ASYNCINTERRUPT_VTS_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_DMIC_IF0_IPCLKPORT_DMIC_IF_CLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_DMIC_IF0_IPCLKPORT_DMIC_IF_DIV2_CLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_DMIC_IF0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_DMIC_IF1_IPCLKPORT_DMIC_IF_CLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_DMIC_IF1_IPCLKPORT_DMIC_IF_DIV2_CLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_DMIC_IF1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_DMIC_IF2_IPCLKPORT_DMIC_IF_CLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_DMIC_IF2_IPCLKPORT_DMIC_IF_DIV2_CLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_DMIC_IF2_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_GPIO_VTS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_INTMEM_CODE_IPCLKPORT_I_ACLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_INTMEM_CODE_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_INTMEM_DATA0_IPCLKPORT_I_ACLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_INTMEM_DATA0_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_INTMEM_DATA1_IPCLKPORT_I_ACLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_INTMEM_DATA1_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_INTMEM_PCM_IPCLKPORT_I_ACLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_INTMEM_PCM_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_MAILBOX_ABOX_VTS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_MAILBOX_AP_VTS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_MAILBOX_DNC_VTS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_RSTNSYNC_CLK_VTS_NOC_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_RSTNSYNC_CLK_VTS_YAMIN_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_RSTNSYNC_SR_CLK_VTS_NOC_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_SERIAL_LIF_VT_IPCLKPORT_BCLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_SERIAL_LIF_VT_IPCLKPORT_CCLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_SERIAL_LIF_VT_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_SS_VTS_GLUE_IPCLKPORT_ACLK_CPU,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_SS_VTS_GLUE_IPCLKPORT_DMIC_AUD_DIV2_CLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_SS_VTS_GLUE_IPCLKPORT_DMIC_IF_PAD_CLK0,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_SS_VTS_GLUE_IPCLKPORT_DMIC_IF_PAD_CLK1,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_SS_VTS_GLUE_IPCLKPORT_DMIC_IF_PAD_CLK2,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_SYSREG_VTS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_TIMER1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_TIMER2_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_TIMER_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_VTS_CMU_VTS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_WDT_VTS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_XIU_DP0_VTS_IPCLKPORT_ACLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_YAMIN_MCU_VTS_IPCLKPORT_CLKIN,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_YAMIN_MCU_VTS_IPCLKPORT_DBGCLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_YAMIN_MCU_VTS_IPCLKPORT_IWICCLK,
+};
+
+PNAME(mout_clkalive_vts_noc_user_p) = { "oscclk" };
+PNAME(mout_clkalive_vts_rco_user_p) = { "oscclk" };
+PNAME(mout_clkcmu_vts_dmic_user_p) = { "oscclk", "dout_cmu_vts_dmic" };
+PNAME(mout_clkvts_aud_dmic1_p) = { "dout_clkvts_aud_dmic1",
+ "dout_clkvts_dmic_if_div2",
+ "dmic_clk0_in", "dmic_clk1_in",
+ "dmic_clk2_in", "oscclk", "oscclk",
+ "oscclk" };
+PNAME(mout_clkvts_noc_p) = { "mout_clkalive_vts_noc_user",
+ "mout_clkalive_vts_rco_user" };
+PNAME(mout_clkvts_dmic_pad_p) = { "mout_clkalive_vts_rco_user",
+ "mout_clkcmu_vts_dmic_user" };
+
+static const struct samsung_mux_clock vts_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_CLKALIVE_VTS_NOC_USER, "mout_clkalive_vts_noc_user",
+ mout_clkalive_vts_noc_user_p, PLL_CON0_MUX_CLKALIVE_VTS_NOC_USER,
+ 4, 1),
+ MUX(CLK_MOUT_CLKALIVE_VTS_RCO_USER, "mout_clkalive_vts_rco_user",
+ mout_clkalive_vts_rco_user_p, PLL_CON0_MUX_CLKALIVE_VTS_RCO_USER,
+ 4, 1),
+ MUX(CLK_MOUT_CLKCMU_VTS_DMIC_USER, "mout_clkcmu_vts_dmic_user",
+ mout_clkcmu_vts_dmic_user_p, PLL_CON0_MUX_CLKCMU_VTS_DMIC_USER,
+ 4, 1),
+ MUX(CLK_MOUT_CLKVTS_AUD_DMIC1, "mout_clkvts_aud_dmic1",
+ mout_clkvts_aud_dmic1_p, CLK_CON_MUX_MUX_CLKVTS_AUD_DMIC1, 0, 3),
+ MUX(CLK_MOUT_CLKVTS_NOC, "mout_clkvts_noc", mout_clkvts_noc_p,
+ CLK_CON_MUX_MUX_CLK_VTS_NOC, 0, 1),
+ MUX(CLK_MOUT_CLKVTS_DMIC_PAD, "mout_clkvts_dmic_pad",
+ mout_clkvts_dmic_pad_p, CLK_CON_MUX_MUX_CLK_VTS_DMIC_PAD, 0, 1),
+};
+
+static const struct samsung_div_clock vts_div_clks[] __initconst = {
+ DIV(CLK_DOUT_CLKVTS_AUD_DMIC0, "dout_clkvts_aud_dmic0",
+ "mout_clkvts_dmic_pad", CLK_CON_DIV_DIV_CLKVTS_AUD_DMIC0, 0, 5),
+ DIV(CLK_DOUT_CLKVTS_AUD_DMIC1, "dout_clkvts_aud_dmic1",
+ "dout_clkvts_aud_dmic0", CLK_CON_DIV_DIV_CLKVTS_AUD_DMIC1, 0, 4),
+ DIV(CLK_DOUT_CLKVTS_CPU, "dout_clkvts_cpu", "mout_clkvts_noc",
+ CLK_CON_DIV_DIV_CLK_VTS_CPU, 0, 3),
+ DIV(CLK_DOUT_CLKVTS_DMIC_IF, "dout_clkvts_dmic_if",
+ "dout_clkvts_aud_dmic0", CLK_CON_DIV_DIV_CLK_VTS_DMIC_IF, 0, 7),
+ DIV(CLK_DOUT_CLKVTS_DMIC_IF_DIV2, "dout_clkvts_dmic_if_div2",
+ "dout_clkvts_dmic_if", CLK_CON_DIV_DIV_CLK_VTS_DMIC_IF_DIV2, 0, 4),
+ DIV(CLK_DOUT_CLKVTS_NOC, "dout_clkvts_noc", "dout_clkvts_cpu",
+ CLK_CON_DIV_DIV_CLK_VTS_NOC, 0, 3),
+ DIV(CLK_DOUT_CLKVTS_SERIAL_LIF, "dout_clkvts_serial_lif",
+ "mout_clkalive_vts_rco_user", CLK_CON_DIV_DIV_CLK_VTS_SERIAL_LIF,
+ 0, 7),
+ DIV(CLK_DOUT_CLKVTS_SERIAL_LIF_CORE, "dout_clkvts_serial_lif_core",
+ "mout_clkalive_vts_rco_user",
+ CLK_CON_DIV_DIV_CLK_VTS_SERIAL_LIF_CORE, 0, 7),
+};
+
+static const struct samsung_fixed_rate_clock vts_fixed_clks[] __initconst = {
+ FRATE(0, "dmic_clk0_in", NULL, 0, 100000000),
+ FRATE(0, "dmic_clk1_in", NULL, 0, 100000000),
+ FRATE(0, "dmic_clk2_in", NULL, 0, 100000000),
+};
+
+static const struct samsung_cmu_info vts_cmu_info __initconst = {
+ .mux_clks = vts_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(vts_mux_clks),
+ .div_clks = vts_div_clks,
+ .nr_div_clks = ARRAY_SIZE(vts_div_clks),
+ .fixed_clks = vts_fixed_clks,
+ .nr_fixed_clks = ARRAY_SIZE(vts_fixed_clks),
+ .nr_clk_ids = CLKS_NR_VTS,
+ .clk_regs = vts_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(vts_clk_regs),
+ .clk_name = "dmic",
+};
+
+static int __init exynos2200_cmu_probe(struct platform_device *pdev)
+{
+ const struct samsung_cmu_info *info;
+ struct device *dev = &pdev->dev;
+
+ info = of_device_get_match_data(dev);
+ exynos_arm64_register_cmu(dev, dev->of_node, info);
+
+ return 0;
+}
+
+static const struct of_device_id exynos2200_cmu_of_match[] = {
+ {
+ .compatible = "samsung,exynos2200-cmu-cmgp",
+ .data = &cmgp_cmu_info,
+ }, {
+ .compatible = "samsung,exynos2200-cmu-hsi0",
+ .data = &hsi0_cmu_info,
+ }, {
+ .compatible = "samsung,exynos2200-cmu-peric0",
+ .data = &peric0_cmu_info,
+ }, {
+ .compatible = "samsung,exynos2200-cmu-peric1",
+ .data = &peric1_cmu_info,
+ }, {
+ .compatible = "samsung,exynos2200-cmu-peric2",
+ .data = &peric2_cmu_info,
+ }, {
+ .compatible = "samsung,exynos2200-cmu-ufs",
+ .data = &ufs_cmu_info,
+ }, {
+ .compatible = "samsung,exynos2200-cmu-vts",
+ .data = &vts_cmu_info,
+ }, { }
+};
+
+static struct platform_driver exynos2200_cmu_driver __refdata = {
+ .driver = {
+ .name = "exynos2200-cmu",
+ .of_match_table = exynos2200_cmu_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = exynos2200_cmu_probe,
+};
+
+static int __init exynos2200_cmu_init(void)
+{
+ return platform_driver_register(&exynos2200_cmu_driver);
+}
+core_initcall(exynos2200_cmu_init);
diff --git a/drivers/clk/samsung/clk-exynos3250.c b/drivers/clk/samsung/clk-exynos3250.c
index cd4fec323a42..84564ec4c8ec 100644
--- a/drivers/clk/samsung/clk-exynos3250.c
+++ b/drivers/clk/samsung/clk-exynos3250.c
@@ -7,10 +7,8 @@
#include <linux/clk-provider.h>
#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
-
#include <dt-bindings/clock/exynos3250.h>
#include "clk.h"
@@ -260,7 +258,7 @@ static const struct samsung_mux_clock mux_clks[] __initconst = {
/* SRC_TOP0 */
MUX(CLK_MOUT_EBI, "mout_ebi", mout_ebi_p, SRC_TOP0, 28, 1),
- MUX(CLK_MOUT_ACLK_200, "mout_aclk_200", group_div_mpll_pre_p,SRC_TOP0, 24, 1),
+ MUX(CLK_MOUT_ACLK_200, "mout_aclk_200", group_div_mpll_pre_p, SRC_TOP0, 24, 1),
MUX(CLK_MOUT_ACLK_160, "mout_aclk_160", group_div_mpll_pre_p, SRC_TOP0, 20, 1),
MUX(CLK_MOUT_ACLK_100, "mout_aclk_100", group_div_mpll_pre_p, SRC_TOP0, 16, 1),
MUX(CLK_MOUT_ACLK_266_1, "mout_aclk_266_1", mout_aclk_266_1_p, SRC_TOP0, 14, 1),
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index 28945b6b0ee1..cc5c1644c41c 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -5,13 +5,13 @@
* Author: Thomas Abraham <thomas.ab@samsung.com>
*
* Common Clock Framework support for all Exynos4 SoCs.
-*/
+ */
#include <dt-bindings/clock/exynos4.h>
#include <linux/slab.h>
-#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/io.h>
+#include <linux/mod_devicetable.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -1269,6 +1269,45 @@ static const struct samsung_cpu_clock exynos4412_cpu_clks[] __initconst = {
CPUCLK_LAYOUT_E4210, e4412_armclk_d),
};
+static const struct samsung_cmu_info cmu_info_exynos4 __initconst = {
+ .mux_clks = exynos4_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(exynos4_mux_clks),
+ .div_clks = exynos4_div_clks,
+ .nr_div_clks = ARRAY_SIZE(exynos4_div_clks),
+ .gate_clks = exynos4_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(exynos4_gate_clks),
+ .fixed_factor_clks = exynos4_fixed_factor_clks,
+ .nr_fixed_factor_clks = ARRAY_SIZE(exynos4_fixed_factor_clks),
+ .fixed_clks = exynos4_fixed_rate_clks,
+ .nr_fixed_clks = ARRAY_SIZE(exynos4_fixed_rate_clks),
+};
+
+static const struct samsung_cmu_info cmu_info_exynos4210 __initconst = {
+ .mux_clks = exynos4210_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(exynos4210_mux_clks),
+ .div_clks = exynos4210_div_clks,
+ .nr_div_clks = ARRAY_SIZE(exynos4210_div_clks),
+ .gate_clks = exynos4210_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(exynos4210_gate_clks),
+ .fixed_factor_clks = exynos4210_fixed_factor_clks,
+ .nr_fixed_factor_clks = ARRAY_SIZE(exynos4210_fixed_factor_clks),
+ .fixed_clks = exynos4210_fixed_rate_clks,
+ .nr_fixed_clks = ARRAY_SIZE(exynos4210_fixed_rate_clks),
+ .cpu_clks = exynos4210_cpu_clks,
+ .nr_cpu_clks = ARRAY_SIZE(exynos4210_cpu_clks),
+};
+
+static const struct samsung_cmu_info cmu_info_exynos4x12 __initconst = {
+ .mux_clks = exynos4x12_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(exynos4x12_mux_clks),
+ .div_clks = exynos4x12_div_clks,
+ .nr_div_clks = ARRAY_SIZE(exynos4x12_div_clks),
+ .gate_clks = exynos4x12_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(exynos4x12_gate_clks),
+ .fixed_factor_clks = exynos4x12_fixed_factor_clks,
+ .nr_fixed_factor_clks = ARRAY_SIZE(exynos4x12_fixed_factor_clks),
+};
+
/* register exynos4 clocks */
static void __init exynos4_clk_init(struct device_node *np,
enum exynos4_soc soc)
@@ -1322,41 +1361,12 @@ static void __init exynos4_clk_init(struct device_node *np,
ARRAY_SIZE(exynos4x12_plls));
}
- samsung_clk_register_fixed_rate(ctx, exynos4_fixed_rate_clks,
- ARRAY_SIZE(exynos4_fixed_rate_clks));
- samsung_clk_register_mux(ctx, exynos4_mux_clks,
- ARRAY_SIZE(exynos4_mux_clks));
- samsung_clk_register_div(ctx, exynos4_div_clks,
- ARRAY_SIZE(exynos4_div_clks));
- samsung_clk_register_gate(ctx, exynos4_gate_clks,
- ARRAY_SIZE(exynos4_gate_clks));
- samsung_clk_register_fixed_factor(ctx, exynos4_fixed_factor_clks,
- ARRAY_SIZE(exynos4_fixed_factor_clks));
+ samsung_cmu_register_clocks(ctx, &cmu_info_exynos4);
if (exynos4_soc == EXYNOS4210) {
- samsung_clk_register_fixed_rate(ctx, exynos4210_fixed_rate_clks,
- ARRAY_SIZE(exynos4210_fixed_rate_clks));
- samsung_clk_register_mux(ctx, exynos4210_mux_clks,
- ARRAY_SIZE(exynos4210_mux_clks));
- samsung_clk_register_div(ctx, exynos4210_div_clks,
- ARRAY_SIZE(exynos4210_div_clks));
- samsung_clk_register_gate(ctx, exynos4210_gate_clks,
- ARRAY_SIZE(exynos4210_gate_clks));
- samsung_clk_register_fixed_factor(ctx,
- exynos4210_fixed_factor_clks,
- ARRAY_SIZE(exynos4210_fixed_factor_clks));
- samsung_clk_register_cpu(ctx, exynos4210_cpu_clks,
- ARRAY_SIZE(exynos4210_cpu_clks));
+ samsung_cmu_register_clocks(ctx, &cmu_info_exynos4210);
} else {
- samsung_clk_register_mux(ctx, exynos4x12_mux_clks,
- ARRAY_SIZE(exynos4x12_mux_clks));
- samsung_clk_register_div(ctx, exynos4x12_div_clks,
- ARRAY_SIZE(exynos4x12_div_clks));
- samsung_clk_register_gate(ctx, exynos4x12_gate_clks,
- ARRAY_SIZE(exynos4x12_gate_clks));
- samsung_clk_register_fixed_factor(ctx,
- exynos4x12_fixed_factor_clks,
- ARRAY_SIZE(exynos4x12_fixed_factor_clks));
+ samsung_cmu_register_clocks(ctx, &cmu_info_exynos4x12);
if (soc == EXYNOS4412)
samsung_clk_register_cpu(ctx, exynos4412_cpu_clks,
ARRAY_SIZE(exynos4412_cpu_clks));
diff --git a/drivers/clk/samsung/clk-exynos4412-isp.c b/drivers/clk/samsung/clk-exynos4412-isp.c
index a70c2b06a61a..fa915057e109 100644
--- a/drivers/clk/samsung/clk-exynos4412-isp.c
+++ b/drivers/clk/samsung/clk-exynos4412-isp.c
@@ -4,12 +4,12 @@
* Author: Marek Szyprowski <m.szyprowski@samsung.com>
*
* Common Clock Framework support for Exynos4412 ISP module.
-*/
+ */
#include <dt-bindings/clock/exynos4.h>
#include <linux/slab.h>
-#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/clk/samsung/clk-exynos5-subcmu.c b/drivers/clk/samsung/clk-exynos5-subcmu.c
index 373129847301..03bbde76e3ce 100644
--- a/drivers/clk/samsung/clk-exynos5-subcmu.c
+++ b/drivers/clk/samsung/clk-exynos5-subcmu.c
@@ -5,6 +5,7 @@
// Common Clock Framework support for Exynos5 power-domain dependent clocks
#include <linux/io.h>
+#include <linux/mod_devicetable.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c
index e02e7c013f3d..e90d3a0848cb 100644
--- a/drivers/clk/samsung/clk-exynos5250.c
+++ b/drivers/clk/samsung/clk-exynos5250.c
@@ -5,11 +5,12 @@
* Author: Thomas Abraham <thomas.ab@samsung.com>
*
* Common Clock Framework support for Exynos5250 SoC.
-*/
+ */
#include <dt-bindings/clock/exynos5250.h>
#include <linux/clk-provider.h>
#include <linux/io.h>
+#include <linux/mod_devicetable.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/samsung/clk-exynos5260.c b/drivers/clk/samsung/clk-exynos5260.c
index 16da6ef5ca0c..0a5959823370 100644
--- a/drivers/clk/samsung/clk-exynos5260.c
+++ b/drivers/clk/samsung/clk-exynos5260.c
@@ -6,9 +6,6 @@
* Common Clock Framework support for Exynos5260 SoC.
*/
-#include <linux/of.h>
-#include <linux/of_address.h>
-
#include "clk-exynos5260.h"
#include "clk.h"
#include "clk-pll.h"
@@ -1458,7 +1455,7 @@ static const struct samsung_fixed_rate_clock fixed_rate_clks[] __initconst = {
FRATE(PHYCLK_HDMI_LINK_O_TMDS_CLKHI, "phyclk_hdmi_link_o_tmds_clkhi",
NULL, 0, 125000000),
FRATE(PHYCLK_MIPI_DPHY_4L_M_TXBYTECLKHS,
- "phyclk_mipi_dphy_4l_m_txbyte_clkhs" , NULL,
+ "phyclk_mipi_dphy_4l_m_txbyte_clkhs", NULL,
0, 187500000),
FRATE(PHYCLK_DPTX_PHY_O_REF_CLK_24M, "phyclk_dptx_phy_o_ref_clk_24m",
NULL, 0, 24000000),
@@ -1629,7 +1626,7 @@ static const struct samsung_mux_clock top_mux_clks[] __initconst = {
mout_isp1_media_400_p,
MUX_SEL_TOP_ISP10, 4, 1),
MUX(TOP_MOUT_ACLK_ISP1_400, "mout_aclk_isp1_400", mout_aclk_isp1_400_p,
- MUX_SEL_TOP_ISP10, 8 , 1),
+ MUX_SEL_TOP_ISP10, 8, 1),
MUX(TOP_MOUT_ISP1_MEDIA_266, "mout_isp1_media_266",
mout_isp1_media_266_p,
MUX_SEL_TOP_ISP10, 16, 1),
diff --git a/drivers/clk/samsung/clk-exynos5410.c b/drivers/clk/samsung/clk-exynos5410.c
index 2654077211e7..baa9988c7bb7 100644
--- a/drivers/clk/samsung/clk-exynos5410.c
+++ b/drivers/clk/samsung/clk-exynos5410.c
@@ -4,13 +4,11 @@
* Author: Tarek Dakhran <t.dakhran@samsung.com>
*
* Common Clock Framework support for Exynos5410 SoC.
-*/
+ */
#include <dt-bindings/clock/exynos5410.h>
#include <linux/clk-provider.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
#include <linux/clk.h>
#include "clk.h"
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
index c630135c686b..a9df4e6db82f 100644
--- a/drivers/clk/samsung/clk-exynos5420.c
+++ b/drivers/clk/samsung/clk-exynos5420.c
@@ -5,11 +5,12 @@
* Chander Kashyap <k.chander@samsung.com>
*
* Common Clock Framework support for Exynos5420 SoC.
-*/
+ */
#include <dt-bindings/clock/exynos5420.h>
#include <linux/slab.h>
#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/clk.h>
@@ -295,8 +296,8 @@ static const struct samsung_clk_reg_dump exynos5420_set_clksrc[] = {
/* list of all parent clocks */
PNAME(mout_mspll_cpu_p) = {"mout_sclk_cpll", "mout_sclk_dpll",
"mout_sclk_mpll", "mout_sclk_spll"};
-PNAME(mout_cpu_p) = {"mout_apll" , "mout_mspll_cpu"};
-PNAME(mout_kfc_p) = {"mout_kpll" , "mout_mspll_kfc"};
+PNAME(mout_cpu_p) = {"mout_apll", "mout_mspll_cpu"};
+PNAME(mout_kfc_p) = {"mout_kpll", "mout_mspll_kfc"};
PNAME(mout_apll_p) = {"fin_pll", "fout_apll"};
PNAME(mout_bpll_p) = {"fin_pll", "fout_bpll"};
PNAME(mout_cpll_p) = {"fin_pll", "fout_cpll"};
diff --git a/drivers/clk/samsung/clk-exynos5433.c b/drivers/clk/samsung/clk-exynos5433.c
index 609d31a7aa52..4b2a861e7d57 100644
--- a/drivers/clk/samsung/clk-exynos5433.c
+++ b/drivers/clk/samsung/clk-exynos5433.c
@@ -6,10 +6,8 @@
* Common Clock Framework support for Exynos5433 SoC.
*/
-#include <linux/clk.h>
#include <linux/clk-provider.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
diff --git a/drivers/clk/samsung/clk-exynos7.c b/drivers/clk/samsung/clk-exynos7.c
index 4a5d2a914bd6..fe0fa5bdbd4b 100644
--- a/drivers/clk/samsung/clk-exynos7.c
+++ b/drivers/clk/samsung/clk-exynos7.c
@@ -2,10 +2,9 @@
/*
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
* Author: Naveen Krishna Ch <naveenkrishna.ch@gmail.com>
-*/
+ */
#include <linux/clk-provider.h>
-#include <linux/of.h>
#include "clk.h"
#include <dt-bindings/clock/exynos7-clk.h>
diff --git a/drivers/clk/samsung/clk-exynos7870.c b/drivers/clk/samsung/clk-exynos7870.c
new file mode 100644
index 000000000000..b3bcf3a1d0b7
--- /dev/null
+++ b/drivers/clk/samsung/clk-exynos7870.c
@@ -0,0 +1,1829 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2015 Samsung Electronics Co., Ltd.
+ * Author: Kaustabh Chakraborty <kauschluss@disroot.org>
+ *
+ * Common Clock Framework support for Exynos7870.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/clock/samsung,exynos7870-cmu.h>
+
+#include "clk.h"
+#include "clk-exynos-arm64.h"
+
+/*
+ * Register offsets for CMU_MIF (0x10460000)
+ */
+#define PLL_LOCKTIME_MIF_MEM_PLL 0x0000
+#define PLL_LOCKTIME_MIF_MEDIA_PLL 0x0020
+#define PLL_LOCKTIME_MIF_BUS_PLL 0x0040
+#define PLL_CON0_MIF_MEM_PLL 0x0100
+#define PLL_CON0_MIF_MEDIA_PLL 0x0120
+#define PLL_CON0_MIF_BUS_PLL 0x0140
+#define CLK_CON_GAT_MIF_MUX_MEM_PLL 0x0200
+#define CLK_CON_GAT_MIF_MUX_MEM_PLL_CON 0x0200
+#define CLK_CON_GAT_MIF_MUX_MEDIA_PLL 0x0204
+#define CLK_CON_GAT_MIF_MUX_MEDIA_PLL_CON 0x0204
+#define CLK_CON_GAT_MIF_MUX_BUS_PLL 0x0208
+#define CLK_CON_GAT_MIF_MUX_BUS_PLL_CON 0x0208
+#define CLK_CON_GAT_MIF_MUX_BUSD 0x0220
+#define CLK_CON_MUX_MIF_BUSD 0x0220
+#define CLK_CON_GAT_MIF_MUX_CMU_ISP_VRA 0x0264
+#define CLK_CON_MUX_MIF_CMU_ISP_VRA 0x0264
+#define CLK_CON_GAT_MIF_MUX_CMU_ISP_CAM 0x0268
+#define CLK_CON_MUX_MIF_CMU_ISP_CAM 0x0268
+#define CLK_CON_GAT_MIF_MUX_CMU_ISP_ISP 0x026c
+#define CLK_CON_MUX_MIF_CMU_ISP_ISP 0x026c
+#define CLK_CON_GAT_MIF_MUX_CMU_DISPAUD_BUS 0x0270
+#define CLK_CON_MUX_MIF_CMU_DISPAUD_BUS 0x0270
+#define CLK_CON_GAT_MIF_MUX_CMU_DISPAUD_DECON_VCLK 0x0274
+#define CLK_CON_MUX_MIF_CMU_DISPAUD_DECON_VCLK 0x0274
+#define CLK_CON_GAT_MIF_MUX_CMU_DISPAUD_DECON_ECLK 0x0278
+#define CLK_CON_MUX_MIF_CMU_DISPAUD_DECON_ECLK 0x0278
+#define CLK_CON_GAT_MIF_MUX_CMU_MFCMSCL_MSCL 0x027c
+#define CLK_CON_MUX_MIF_CMU_MFCMSCL_MSCL 0x027c
+#define CLK_CON_GAT_MIF_MUX_CMU_MFCMSCL_MFC 0x0280
+#define CLK_CON_MUX_MIF_CMU_MFCMSCL_MFC 0x0280
+#define CLK_CON_GAT_MIF_MUX_CMU_FSYS_BUS 0x0284
+#define CLK_CON_MUX_MIF_CMU_FSYS_BUS 0x0284
+#define CLK_CON_GAT_MIF_MUX_CMU_FSYS_MMC0 0x0288
+#define CLK_CON_MUX_MIF_CMU_FSYS_MMC0 0x0288
+#define CLK_CON_GAT_MIF_MUX_CMU_FSYS_MMC1 0x028c
+#define CLK_CON_MUX_MIF_CMU_FSYS_MMC1 0x028c
+#define CLK_CON_GAT_MIF_MUX_CMU_FSYS_MMC2 0x0290
+#define CLK_CON_MUX_MIF_CMU_FSYS_MMC2 0x0290
+#define CLK_CON_GAT_MIF_MUX_CMU_FSYS_USB20DRD_REFCLK 0x029c
+#define CLK_CON_MUX_MIF_CMU_FSYS_USB20DRD_REFCLK 0x029c
+#define CLK_CON_GAT_MIF_MUX_CMU_PERI_BUS 0x02a0
+#define CLK_CON_MUX_MIF_CMU_PERI_BUS 0x02a0
+#define CLK_CON_GAT_MIF_MUX_CMU_PERI_UART1 0x02a4
+#define CLK_CON_MUX_MIF_CMU_PERI_UART1 0x02a4
+#define CLK_CON_GAT_MIF_MUX_CMU_PERI_UART2 0x02a8
+#define CLK_CON_MUX_MIF_CMU_PERI_UART2 0x02a8
+#define CLK_CON_GAT_MIF_MUX_CMU_PERI_UART0 0x02ac
+#define CLK_CON_MUX_MIF_CMU_PERI_UART0 0x02ac
+#define CLK_CON_GAT_MIF_MUX_CMU_PERI_SPI2 0x02b0
+#define CLK_CON_MUX_MIF_CMU_PERI_SPI2 0x02b0
+#define CLK_CON_GAT_MIF_MUX_CMU_PERI_SPI1 0x02b4
+#define CLK_CON_MUX_MIF_CMU_PERI_SPI1 0x02b4
+#define CLK_CON_GAT_MIF_MUX_CMU_PERI_SPI0 0x02b8
+#define CLK_CON_MUX_MIF_CMU_PERI_SPI0 0x02b8
+#define CLK_CON_GAT_MIF_MUX_CMU_PERI_SPI3 0x02bc
+#define CLK_CON_MUX_MIF_CMU_PERI_SPI3 0x02bc
+#define CLK_CON_GAT_MIF_MUX_CMU_PERI_SPI4 0x02c0
+#define CLK_CON_MUX_MIF_CMU_PERI_SPI4 0x02c0
+#define CLK_CON_GAT_MIF_MUX_CMU_ISP_SENSOR0 0x02c4
+#define CLK_CON_MUX_MIF_CMU_ISP_SENSOR0 0x02c4
+#define CLK_CON_GAT_MIF_MUX_CMU_ISP_SENSOR1 0x02c8
+#define CLK_CON_MUX_MIF_CMU_ISP_SENSOR1 0x02c8
+#define CLK_CON_GAT_MIF_MUX_CMU_ISP_SENSOR2 0x02cc
+#define CLK_CON_MUX_MIF_CMU_ISP_SENSOR2 0x02cc
+#define CLK_CON_DIV_MIF_BUSD 0x0420
+#define CLK_CON_DIV_MIF_APB 0x0424
+#define CLK_CON_DIV_MIF_HSI2C 0x0430
+#define CLK_CON_DIV_MIF_CMU_G3D_SWITCH 0x0460
+#define CLK_CON_DIV_MIF_CMU_ISP_VRA 0x0464
+#define CLK_CON_DIV_MIF_CMU_ISP_CAM 0x0468
+#define CLK_CON_DIV_MIF_CMU_ISP_ISP 0x046c
+#define CLK_CON_DIV_MIF_CMU_DISPAUD_BUS 0x0470
+#define CLK_CON_DIV_MIF_CMU_DISPAUD_DECON_VCLK 0x0474
+#define CLK_CON_DIV_MIF_CMU_DISPAUD_DECON_ECLK 0x0478
+#define CLK_CON_DIV_MIF_CMU_MFCMSCL_MSCL 0x047c
+#define CLK_CON_DIV_MIF_CMU_MFCMSCL_MFC 0x0480
+#define CLK_CON_DIV_MIF_CMU_FSYS_BUS 0x0484
+#define CLK_CON_DIV_MIF_CMU_FSYS_MMC0 0x0488
+#define CLK_CON_DIV_MIF_CMU_FSYS_MMC1 0x048c
+#define CLK_CON_DIV_MIF_CMU_FSYS_MMC2 0x0490
+#define CLK_CON_DIV_MIF_CMU_FSYS_USB20DRD_REFCLK 0x049c
+#define CLK_CON_DIV_MIF_CMU_PERI_BUS 0x04a0
+#define CLK_CON_DIV_MIF_CMU_PERI_UART1 0x04a4
+#define CLK_CON_DIV_MIF_CMU_PERI_UART2 0x04a8
+#define CLK_CON_DIV_MIF_CMU_PERI_UART0 0x04ac
+#define CLK_CON_DIV_MIF_CMU_PERI_SPI2 0x04b0
+#define CLK_CON_DIV_MIF_CMU_PERI_SPI1 0x04b4
+#define CLK_CON_DIV_MIF_CMU_PERI_SPI0 0x04b8
+#define CLK_CON_DIV_MIF_CMU_PERI_SPI3 0x04bc
+#define CLK_CON_DIV_MIF_CMU_PERI_SPI4 0x04c0
+#define CLK_CON_DIV_MIF_CMU_ISP_SENSOR0 0x04c4
+#define CLK_CON_DIV_MIF_CMU_ISP_SENSOR1 0x04c8
+#define CLK_CON_DIV_MIF_CMU_ISP_SENSOR2 0x04cc
+#define CLK_CON_GAT_MIF_WRAP_ADC_IF_OSC_SYS 0x080c
+#define CLK_CON_GAT_MIF_HSI2C_AP_PCLKS 0x0828
+#define CLK_CON_GAT_MIF_HSI2C_CP_PCLKS 0x0828
+#define CLK_CON_GAT_MIF_WRAP_ADC_IF_PCLK_S0 0x0828
+#define CLK_CON_GAT_MIF_WRAP_ADC_IF_PCLK_S1 0x0828
+#define CLK_CON_GAT_MIF_HSI2C_AP_PCLKM 0x0840
+#define CLK_CON_GAT_MIF_HSI2C_CP_PCLKM 0x0840
+#define CLK_CON_GAT_MIF_HSI2C_IPCLK 0x0840
+#define CLK_CON_GAT_MIF_HSI2C_ITCLK 0x0840
+#define CLK_CON_GAT_MIF_CP_PCLK_HSI2C 0x0840
+#define CLK_CON_GAT_MIF_CP_PCLK_HSI2C_BAT_0 0x0840
+#define CLK_CON_GAT_MIF_CP_PCLK_HSI2C_BAT_1 0x0840
+#define CLK_CON_GAT_MIF_CMU_G3D_SWITCH 0x0860
+#define CLK_CON_GAT_MIF_CMU_ISP_VRA 0x0864
+#define CLK_CON_GAT_MIF_CMU_ISP_CAM 0x0868
+#define CLK_CON_GAT_MIF_CMU_ISP_ISP 0x086c
+#define CLK_CON_GAT_MIF_CMU_DISPAUD_BUS 0x0870
+#define CLK_CON_GAT_MIF_CMU_DISPAUD_DECON_VCLK 0x0874
+#define CLK_CON_GAT_MIF_CMU_DISPAUD_DECON_ECLK 0x0878
+#define CLK_CON_GAT_MIF_CMU_MFCMSCL_MSCL 0x087c
+#define CLK_CON_GAT_MIF_CMU_MFCMSCL_MFC 0x0880
+#define CLK_CON_GAT_MIF_CMU_FSYS_BUS 0x0884
+#define CLK_CON_GAT_MIF_CMU_FSYS_MMC0 0x0888
+#define CLK_CON_GAT_MIF_CMU_FSYS_MMC1 0x088c
+#define CLK_CON_GAT_MIF_CMU_FSYS_MMC2 0x0890
+#define CLK_CON_GAT_MIF_CMU_FSYS_USB20DRD_REFCLK 0x089c
+#define CLK_CON_GAT_MIF_CMU_PERI_BUS 0x08a0
+#define CLK_CON_GAT_MIF_CMU_PERI_UART1 0x08a4
+#define CLK_CON_GAT_MIF_CMU_PERI_UART2 0x08a8
+#define CLK_CON_GAT_MIF_CMU_PERI_UART0 0x08ac
+#define CLK_CON_GAT_MIF_CMU_PERI_SPI2 0x08b0
+#define CLK_CON_GAT_MIF_CMU_PERI_SPI1 0x08b4
+#define CLK_CON_GAT_MIF_CMU_PERI_SPI0 0x08b8
+#define CLK_CON_GAT_MIF_CMU_PERI_SPI3 0x08bc
+#define CLK_CON_GAT_MIF_CMU_PERI_SPI4 0x08c0
+#define CLK_CON_GAT_MIF_CMU_ISP_SENSOR0 0x08c4
+#define CLK_CON_GAT_MIF_CMU_ISP_SENSOR1 0x08c8
+#define CLK_CON_GAT_MIF_CMU_ISP_SENSOR2 0x08cc
+
+static const unsigned long mif_clk_regs[] __initconst = {
+ PLL_LOCKTIME_MIF_MEM_PLL,
+ PLL_LOCKTIME_MIF_MEDIA_PLL,
+ PLL_LOCKTIME_MIF_BUS_PLL,
+ PLL_CON0_MIF_MEM_PLL,
+ PLL_CON0_MIF_MEDIA_PLL,
+ PLL_CON0_MIF_BUS_PLL,
+ CLK_CON_GAT_MIF_MUX_MEM_PLL,
+ CLK_CON_GAT_MIF_MUX_MEM_PLL_CON,
+ CLK_CON_GAT_MIF_MUX_MEDIA_PLL,
+ CLK_CON_GAT_MIF_MUX_MEDIA_PLL_CON,
+ CLK_CON_GAT_MIF_MUX_BUS_PLL,
+ CLK_CON_GAT_MIF_MUX_BUS_PLL_CON,
+ CLK_CON_GAT_MIF_MUX_BUSD,
+ CLK_CON_MUX_MIF_BUSD,
+ CLK_CON_GAT_MIF_MUX_CMU_ISP_VRA,
+ CLK_CON_MUX_MIF_CMU_ISP_VRA,
+ CLK_CON_GAT_MIF_MUX_CMU_ISP_CAM,
+ CLK_CON_MUX_MIF_CMU_ISP_CAM,
+ CLK_CON_GAT_MIF_MUX_CMU_ISP_ISP,
+ CLK_CON_MUX_MIF_CMU_ISP_ISP,
+ CLK_CON_GAT_MIF_MUX_CMU_DISPAUD_BUS,
+ CLK_CON_MUX_MIF_CMU_DISPAUD_BUS,
+ CLK_CON_GAT_MIF_MUX_CMU_DISPAUD_DECON_VCLK,
+ CLK_CON_MUX_MIF_CMU_DISPAUD_DECON_VCLK,
+ CLK_CON_GAT_MIF_MUX_CMU_DISPAUD_DECON_ECLK,
+ CLK_CON_MUX_MIF_CMU_DISPAUD_DECON_ECLK,
+ CLK_CON_GAT_MIF_MUX_CMU_MFCMSCL_MSCL,
+ CLK_CON_MUX_MIF_CMU_MFCMSCL_MSCL,
+ CLK_CON_GAT_MIF_MUX_CMU_MFCMSCL_MFC,
+ CLK_CON_MUX_MIF_CMU_MFCMSCL_MFC,
+ CLK_CON_GAT_MIF_MUX_CMU_FSYS_BUS,
+ CLK_CON_MUX_MIF_CMU_FSYS_BUS,
+ CLK_CON_GAT_MIF_MUX_CMU_FSYS_MMC0,
+ CLK_CON_MUX_MIF_CMU_FSYS_MMC0,
+ CLK_CON_GAT_MIF_MUX_CMU_FSYS_MMC1,
+ CLK_CON_MUX_MIF_CMU_FSYS_MMC1,
+ CLK_CON_GAT_MIF_MUX_CMU_FSYS_MMC2,
+ CLK_CON_MUX_MIF_CMU_FSYS_MMC2,
+ CLK_CON_GAT_MIF_MUX_CMU_FSYS_USB20DRD_REFCLK,
+ CLK_CON_MUX_MIF_CMU_FSYS_USB20DRD_REFCLK,
+ CLK_CON_GAT_MIF_MUX_CMU_PERI_BUS,
+ CLK_CON_MUX_MIF_CMU_PERI_BUS,
+ CLK_CON_GAT_MIF_MUX_CMU_PERI_UART1,
+ CLK_CON_MUX_MIF_CMU_PERI_UART1,
+ CLK_CON_GAT_MIF_MUX_CMU_PERI_UART2,
+ CLK_CON_MUX_MIF_CMU_PERI_UART2,
+ CLK_CON_GAT_MIF_MUX_CMU_PERI_UART0,
+ CLK_CON_MUX_MIF_CMU_PERI_UART0,
+ CLK_CON_GAT_MIF_MUX_CMU_PERI_SPI2,
+ CLK_CON_MUX_MIF_CMU_PERI_SPI2,
+ CLK_CON_GAT_MIF_MUX_CMU_PERI_SPI1,
+ CLK_CON_MUX_MIF_CMU_PERI_SPI1,
+ CLK_CON_GAT_MIF_MUX_CMU_PERI_SPI0,
+ CLK_CON_MUX_MIF_CMU_PERI_SPI0,
+ CLK_CON_GAT_MIF_MUX_CMU_PERI_SPI3,
+ CLK_CON_MUX_MIF_CMU_PERI_SPI3,
+ CLK_CON_GAT_MIF_MUX_CMU_PERI_SPI4,
+ CLK_CON_MUX_MIF_CMU_PERI_SPI4,
+ CLK_CON_GAT_MIF_MUX_CMU_ISP_SENSOR0,
+ CLK_CON_MUX_MIF_CMU_ISP_SENSOR0,
+ CLK_CON_GAT_MIF_MUX_CMU_ISP_SENSOR1,
+ CLK_CON_MUX_MIF_CMU_ISP_SENSOR1,
+ CLK_CON_GAT_MIF_MUX_CMU_ISP_SENSOR2,
+ CLK_CON_MUX_MIF_CMU_ISP_SENSOR2,
+ CLK_CON_DIV_MIF_BUSD,
+ CLK_CON_DIV_MIF_APB,
+ CLK_CON_DIV_MIF_HSI2C,
+ CLK_CON_DIV_MIF_CMU_G3D_SWITCH,
+ CLK_CON_DIV_MIF_CMU_ISP_VRA,
+ CLK_CON_DIV_MIF_CMU_ISP_CAM,
+ CLK_CON_DIV_MIF_CMU_ISP_ISP,
+ CLK_CON_DIV_MIF_CMU_DISPAUD_BUS,
+ CLK_CON_DIV_MIF_CMU_DISPAUD_DECON_VCLK,
+ CLK_CON_DIV_MIF_CMU_DISPAUD_DECON_ECLK,
+ CLK_CON_DIV_MIF_CMU_MFCMSCL_MSCL,
+ CLK_CON_DIV_MIF_CMU_MFCMSCL_MFC,
+ CLK_CON_DIV_MIF_CMU_FSYS_BUS,
+ CLK_CON_DIV_MIF_CMU_FSYS_MMC0,
+ CLK_CON_DIV_MIF_CMU_FSYS_MMC1,
+ CLK_CON_DIV_MIF_CMU_FSYS_MMC2,
+ CLK_CON_DIV_MIF_CMU_FSYS_USB20DRD_REFCLK,
+ CLK_CON_DIV_MIF_CMU_PERI_BUS,
+ CLK_CON_DIV_MIF_CMU_PERI_UART1,
+ CLK_CON_DIV_MIF_CMU_PERI_UART2,
+ CLK_CON_DIV_MIF_CMU_PERI_UART0,
+ CLK_CON_DIV_MIF_CMU_PERI_SPI2,
+ CLK_CON_DIV_MIF_CMU_PERI_SPI1,
+ CLK_CON_DIV_MIF_CMU_PERI_SPI0,
+ CLK_CON_DIV_MIF_CMU_PERI_SPI3,
+ CLK_CON_DIV_MIF_CMU_PERI_SPI4,
+ CLK_CON_DIV_MIF_CMU_ISP_SENSOR0,
+ CLK_CON_DIV_MIF_CMU_ISP_SENSOR1,
+ CLK_CON_DIV_MIF_CMU_ISP_SENSOR2,
+ CLK_CON_GAT_MIF_WRAP_ADC_IF_OSC_SYS,
+ CLK_CON_GAT_MIF_HSI2C_AP_PCLKS,
+ CLK_CON_GAT_MIF_HSI2C_CP_PCLKS,
+ CLK_CON_GAT_MIF_WRAP_ADC_IF_PCLK_S0,
+ CLK_CON_GAT_MIF_WRAP_ADC_IF_PCLK_S1,
+ CLK_CON_GAT_MIF_HSI2C_AP_PCLKM,
+ CLK_CON_GAT_MIF_HSI2C_CP_PCLKM,
+ CLK_CON_GAT_MIF_HSI2C_IPCLK,
+ CLK_CON_GAT_MIF_HSI2C_ITCLK,
+ CLK_CON_GAT_MIF_CP_PCLK_HSI2C,
+ CLK_CON_GAT_MIF_CP_PCLK_HSI2C_BAT_0,
+ CLK_CON_GAT_MIF_CP_PCLK_HSI2C_BAT_1,
+ CLK_CON_GAT_MIF_CMU_G3D_SWITCH,
+ CLK_CON_GAT_MIF_CMU_ISP_VRA,
+ CLK_CON_GAT_MIF_CMU_ISP_CAM,
+ CLK_CON_GAT_MIF_CMU_ISP_ISP,
+ CLK_CON_GAT_MIF_CMU_DISPAUD_BUS,
+ CLK_CON_GAT_MIF_CMU_DISPAUD_DECON_VCLK,
+ CLK_CON_GAT_MIF_CMU_DISPAUD_DECON_ECLK,
+ CLK_CON_GAT_MIF_CMU_MFCMSCL_MSCL,
+ CLK_CON_GAT_MIF_CMU_MFCMSCL_MFC,
+ CLK_CON_GAT_MIF_CMU_FSYS_BUS,
+ CLK_CON_GAT_MIF_CMU_FSYS_MMC0,
+ CLK_CON_GAT_MIF_CMU_FSYS_MMC1,
+ CLK_CON_GAT_MIF_CMU_FSYS_MMC2,
+ CLK_CON_GAT_MIF_CMU_FSYS_USB20DRD_REFCLK,
+ CLK_CON_GAT_MIF_CMU_PERI_BUS,
+ CLK_CON_GAT_MIF_CMU_PERI_UART1,
+ CLK_CON_GAT_MIF_CMU_PERI_UART2,
+ CLK_CON_GAT_MIF_CMU_PERI_UART0,
+ CLK_CON_GAT_MIF_CMU_PERI_SPI2,
+ CLK_CON_GAT_MIF_CMU_PERI_SPI1,
+ CLK_CON_GAT_MIF_CMU_PERI_SPI0,
+ CLK_CON_GAT_MIF_CMU_PERI_SPI3,
+ CLK_CON_GAT_MIF_CMU_PERI_SPI4,
+ CLK_CON_GAT_MIF_CMU_ISP_SENSOR0,
+ CLK_CON_GAT_MIF_CMU_ISP_SENSOR1,
+ CLK_CON_GAT_MIF_CMU_ISP_SENSOR2,
+};
+
+static const struct samsung_fixed_factor_clock mif_fixed_factor_clks[] __initconst = {
+ FFACTOR(0, "ffac_mif_mux_bus_pll_div2", "gout_mif_mux_bus_pll_con", 1, 2,
+ 0),
+ FFACTOR(0, "ffac_mif_mux_media_pll_div2", "gout_mif_mux_media_pll_con",
+ 1, 2, 0),
+ FFACTOR(0, "ffac_mif_mux_mem_pll_div2", "gout_mif_mux_mem_pll_con", 1, 2,
+ 0),
+};
+
+static const struct samsung_pll_clock mif_pll_clks[] __initconst = {
+ PLL(pll_1417x, CLK_FOUT_MIF_BUS_PLL, "fout_mif_bus_pll", "oscclk",
+ PLL_LOCKTIME_MIF_BUS_PLL, PLL_CON0_MIF_BUS_PLL, NULL),
+ PLL(pll_1417x, CLK_FOUT_MIF_MEDIA_PLL, "fout_mif_media_pll", "oscclk",
+ PLL_LOCKTIME_MIF_MEDIA_PLL, PLL_CON0_MIF_MEDIA_PLL, NULL),
+ PLL(pll_1417x, CLK_FOUT_MIF_MEM_PLL, "fout_mif_mem_pll", "oscclk",
+ PLL_LOCKTIME_MIF_MEM_PLL, PLL_CON0_MIF_MEM_PLL, NULL),
+};
+
+/* List of parent clocks for muxes in CMU_MIF */
+PNAME(mout_mif_cmu_dispaud_bus_p) = { "ffac_mif_mux_bus_pll_div2",
+ "ffac_mif_mux_media_pll_div2" };
+PNAME(mout_mif_cmu_dispaud_decon_eclk_p) = { "ffac_mif_mux_bus_pll_div2",
+ "ffac_mif_mux_media_pll_div2" };
+PNAME(mout_mif_cmu_dispaud_decon_vclk_p) = { "ffac_mif_mux_bus_pll_div2",
+ "ffac_mif_mux_media_pll_div2" };
+PNAME(mout_mif_cmu_fsys_bus_p) = { "ffac_mif_mux_bus_pll_div2",
+ "ffac_mif_mux_media_pll_div2" };
+PNAME(mout_mif_cmu_fsys_mmc0_p) = { "ffac_mif_mux_bus_pll_div2",
+ "ffac_mif_mux_media_pll_div2" };
+PNAME(mout_mif_cmu_fsys_mmc1_p) = { "ffac_mif_mux_bus_pll_div2",
+ "ffac_mif_mux_media_pll_div2" };
+PNAME(mout_mif_cmu_fsys_mmc2_p) = { "ffac_mif_mux_bus_pll_div2",
+ "ffac_mif_mux_media_pll_div2" };
+PNAME(mout_mif_cmu_fsys_usb20drd_refclk_p) = { "ffac_mif_mux_bus_pll_div2",
+ "ffac_mif_mux_media_pll_div2" };
+PNAME(mout_mif_cmu_isp_cam_p) = { "ffac_mif_mux_bus_pll_div2",
+ "ffac_mif_mux_media_pll_div2" };
+PNAME(mout_mif_cmu_isp_isp_p) = { "ffac_mif_mux_bus_pll_div2",
+ "ffac_mif_mux_media_pll_div2" };
+PNAME(mout_mif_cmu_isp_sensor0_p) = { "ffac_mif_mux_bus_pll_div2",
+ "oscclk" };
+PNAME(mout_mif_cmu_isp_sensor1_p) = { "ffac_mif_mux_bus_pll_div2",
+ "oscclk" };
+PNAME(mout_mif_cmu_isp_sensor2_p) = { "ffac_mif_mux_bus_pll_div2",
+ "oscclk" };
+PNAME(mout_mif_cmu_isp_vra_p) = { "ffac_mif_mux_bus_pll_div2",
+ "ffac_mif_mux_media_pll_div2",
+ "gout_mif_mux_bus_pll_con" };
+PNAME(mout_mif_cmu_mfcmscl_mfc_p) = { "ffac_mif_mux_bus_pll_div2",
+ "ffac_mif_mux_media_pll_div2",
+ "gout_mif_mux_bus_pll_con" };
+PNAME(mout_mif_cmu_mfcmscl_mscl_p) = { "ffac_mif_mux_bus_pll_div2",
+ "ffac_mif_mux_media_pll_div2",
+ "gout_mif_mux_bus_pll_con" };
+PNAME(mout_mif_cmu_peri_bus_p) = { "ffac_mif_mux_bus_pll_div2",
+ "ffac_mif_mux_media_pll_div2" };
+PNAME(mout_mif_cmu_peri_spi0_p) = { "ffac_mif_mux_bus_pll_div2",
+ "oscclk" };
+PNAME(mout_mif_cmu_peri_spi2_p) = { "ffac_mif_mux_bus_pll_div2",
+ "oscclk" };
+PNAME(mout_mif_cmu_peri_spi1_p) = { "ffac_mif_mux_bus_pll_div2",
+ "oscclk" };
+PNAME(mout_mif_cmu_peri_spi4_p) = { "ffac_mif_mux_bus_pll_div2",
+ "oscclk" };
+PNAME(mout_mif_cmu_peri_spi3_p) = { "ffac_mif_mux_bus_pll_div2",
+ "oscclk" };
+PNAME(mout_mif_cmu_peri_uart1_p) = { "ffac_mif_mux_bus_pll_div2",
+ "ffac_mif_mux_media_pll_div2" };
+PNAME(mout_mif_cmu_peri_uart2_p) = { "ffac_mif_mux_bus_pll_div2",
+ "ffac_mif_mux_media_pll_div2" };
+PNAME(mout_mif_cmu_peri_uart0_p) = { "ffac_mif_mux_bus_pll_div2",
+ "ffac_mif_mux_media_pll_div2" };
+PNAME(mout_mif_busd_p) = { "ffac_mif_mux_bus_pll_div2",
+ "ffac_mif_mux_media_pll_div2",
+ "ffac_mif_mux_mem_pll_div2" };
+
+static const struct samsung_mux_clock mif_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_MIF_CMU_DISPAUD_BUS, "mout_mif_cmu_dispaud_bus",
+ mout_mif_cmu_dispaud_bus_p, CLK_CON_MUX_MIF_CMU_DISPAUD_BUS, 12, 1),
+ MUX(CLK_MOUT_MIF_CMU_DISPAUD_DECON_ECLK,
+ "mout_mif_cmu_dispaud_decon_eclk",
+ mout_mif_cmu_dispaud_decon_eclk_p,
+ CLK_CON_MUX_MIF_CMU_DISPAUD_DECON_ECLK, 12, 1),
+ MUX(CLK_MOUT_MIF_CMU_DISPAUD_DECON_VCLK,
+ "mout_mif_cmu_dispaud_decon_vclk",
+ mout_mif_cmu_dispaud_decon_vclk_p,
+ CLK_CON_MUX_MIF_CMU_DISPAUD_DECON_VCLK, 12, 1),
+ MUX(CLK_MOUT_MIF_CMU_FSYS_BUS, "mout_mif_cmu_fsys_bus",
+ mout_mif_cmu_fsys_bus_p, CLK_CON_MUX_MIF_CMU_FSYS_BUS, 12, 1),
+ MUX(CLK_MOUT_MIF_CMU_FSYS_MMC0, "mout_mif_cmu_fsys_mmc0",
+ mout_mif_cmu_fsys_mmc0_p, CLK_CON_MUX_MIF_CMU_FSYS_MMC0, 12, 1),
+ MUX(CLK_MOUT_MIF_CMU_FSYS_MMC1, "mout_mif_cmu_fsys_mmc1",
+ mout_mif_cmu_fsys_mmc1_p, CLK_CON_MUX_MIF_CMU_FSYS_MMC1, 12, 1),
+ MUX(CLK_MOUT_MIF_CMU_FSYS_MMC2, "mout_mif_cmu_fsys_mmc2",
+ mout_mif_cmu_fsys_mmc2_p, CLK_CON_MUX_MIF_CMU_FSYS_MMC2, 12, 1),
+ MUX(CLK_MOUT_MIF_CMU_FSYS_USB20DRD_REFCLK,
+ "mout_mif_cmu_fsys_usb20drd_refclk",
+ mout_mif_cmu_fsys_usb20drd_refclk_p,
+ CLK_CON_MUX_MIF_CMU_FSYS_USB20DRD_REFCLK, 12, 1),
+ MUX(CLK_MOUT_MIF_CMU_ISP_CAM, "mout_mif_cmu_isp_cam",
+ mout_mif_cmu_isp_cam_p, CLK_CON_MUX_MIF_CMU_ISP_CAM, 12, 1),
+ MUX(CLK_MOUT_MIF_CMU_ISP_ISP, "mout_mif_cmu_isp_isp",
+ mout_mif_cmu_isp_isp_p, CLK_CON_MUX_MIF_CMU_ISP_ISP, 12, 1),
+ MUX(CLK_MOUT_MIF_CMU_ISP_SENSOR0, "mout_mif_cmu_isp_sensor0",
+ mout_mif_cmu_isp_sensor0_p, CLK_CON_MUX_MIF_CMU_ISP_SENSOR0, 12, 1),
+ MUX(CLK_MOUT_MIF_CMU_ISP_SENSOR1, "mout_mif_cmu_isp_sensor1",
+ mout_mif_cmu_isp_sensor1_p, CLK_CON_MUX_MIF_CMU_ISP_SENSOR1, 12, 1),
+ MUX(CLK_MOUT_MIF_CMU_ISP_SENSOR2, "mout_mif_cmu_isp_sensor2",
+ mout_mif_cmu_isp_sensor2_p, CLK_CON_MUX_MIF_CMU_ISP_SENSOR2, 12, 1),
+ MUX(CLK_MOUT_MIF_CMU_ISP_VRA, "mout_mif_cmu_isp_vra",
+ mout_mif_cmu_isp_vra_p, CLK_CON_MUX_MIF_CMU_ISP_VRA, 12, 2),
+ MUX(CLK_MOUT_MIF_CMU_MFCMSCL_MFC, "mout_mif_cmu_mfcmscl_mfc",
+ mout_mif_cmu_mfcmscl_mfc_p, CLK_CON_MUX_MIF_CMU_MFCMSCL_MFC, 12, 2),
+ MUX(CLK_MOUT_MIF_CMU_MFCMSCL_MSCL, "mout_mif_cmu_mfcmscl_mscl",
+ mout_mif_cmu_mfcmscl_mscl_p, CLK_CON_MUX_MIF_CMU_MFCMSCL_MSCL, 12,
+ 2),
+ MUX(CLK_MOUT_MIF_CMU_PERI_BUS, "mout_mif_cmu_peri_bus",
+ mout_mif_cmu_peri_bus_p, CLK_CON_MUX_MIF_CMU_PERI_BUS, 12, 1),
+ MUX(CLK_MOUT_MIF_CMU_PERI_SPI0, "mout_mif_cmu_peri_spi0",
+ mout_mif_cmu_peri_spi0_p, CLK_CON_MUX_MIF_CMU_PERI_SPI0, 12, 1),
+ MUX(CLK_MOUT_MIF_CMU_PERI_SPI2, "mout_mif_cmu_peri_spi2",
+ mout_mif_cmu_peri_spi2_p, CLK_CON_MUX_MIF_CMU_PERI_SPI2, 12, 1),
+ MUX(CLK_MOUT_MIF_CMU_PERI_SPI1, "mout_mif_cmu_peri_spi1",
+ mout_mif_cmu_peri_spi1_p, CLK_CON_MUX_MIF_CMU_PERI_SPI1, 12, 1),
+ MUX(CLK_MOUT_MIF_CMU_PERI_SPI4, "mout_mif_cmu_peri_spi4",
+ mout_mif_cmu_peri_spi4_p, CLK_CON_MUX_MIF_CMU_PERI_SPI4, 12, 1),
+ MUX(CLK_MOUT_MIF_CMU_PERI_SPI3, "mout_mif_cmu_peri_spi3",
+ mout_mif_cmu_peri_spi3_p, CLK_CON_MUX_MIF_CMU_PERI_SPI3, 12, 1),
+ MUX(CLK_MOUT_MIF_CMU_PERI_UART1, "mout_mif_cmu_peri_uart1",
+ mout_mif_cmu_peri_uart1_p, CLK_CON_MUX_MIF_CMU_PERI_UART1, 12, 1),
+ MUX(CLK_MOUT_MIF_CMU_PERI_UART2, "mout_mif_cmu_peri_uart2",
+ mout_mif_cmu_peri_uart2_p, CLK_CON_MUX_MIF_CMU_PERI_UART2, 12, 1),
+ MUX(CLK_MOUT_MIF_CMU_PERI_UART0, "mout_mif_cmu_peri_uart0",
+ mout_mif_cmu_peri_uart0_p, CLK_CON_MUX_MIF_CMU_PERI_UART0, 12, 1),
+ MUX(CLK_MOUT_MIF_BUSD, "mout_mif_busd", mout_mif_busd_p,
+ CLK_CON_MUX_MIF_BUSD, 12, 2),
+};
+
+static const struct samsung_div_clock mif_div_clks[] __initconst = {
+ DIV(CLK_DOUT_MIF_CMU_DISPAUD_BUS, "dout_mif_cmu_dispaud_bus",
+ "gout_mif_mux_cmu_dispaud_bus", CLK_CON_DIV_MIF_CMU_DISPAUD_BUS, 0,
+ 4),
+ DIV(CLK_DOUT_MIF_CMU_DISPAUD_DECON_ECLK,
+ "dout_mif_cmu_dispaud_decon_eclk",
+ "gout_mif_mux_cmu_dispaud_decon_eclk",
+ CLK_CON_DIV_MIF_CMU_DISPAUD_DECON_ECLK, 0, 4),
+ DIV(CLK_DOUT_MIF_CMU_DISPAUD_DECON_VCLK,
+ "dout_mif_cmu_dispaud_decon_vclk",
+ "gout_mif_mux_cmu_dispaud_decon_vclk",
+ CLK_CON_DIV_MIF_CMU_DISPAUD_DECON_VCLK, 0, 4),
+ DIV(CLK_DOUT_MIF_CMU_FSYS_BUS, "dout_mif_cmu_fsys_bus",
+ "gout_mif_mux_cmu_fsys_bus", CLK_CON_DIV_MIF_CMU_FSYS_BUS, 0, 4),
+ DIV(CLK_DOUT_MIF_CMU_FSYS_MMC0, "dout_mif_cmu_fsys_mmc0",
+ "gout_mif_mux_cmu_fsys_mmc0", CLK_CON_DIV_MIF_CMU_FSYS_MMC0, 0, 10),
+ DIV(CLK_DOUT_MIF_CMU_FSYS_MMC1, "dout_mif_cmu_fsys_mmc1",
+ "gout_mif_mux_cmu_fsys_mmc1", CLK_CON_DIV_MIF_CMU_FSYS_MMC1, 0, 10),
+ DIV(CLK_DOUT_MIF_CMU_FSYS_MMC2, "dout_mif_cmu_fsys_mmc2",
+ "gout_mif_mux_cmu_fsys_mmc2", CLK_CON_DIV_MIF_CMU_FSYS_MMC2, 0, 10),
+ DIV(CLK_DOUT_MIF_CMU_FSYS_USB20DRD_REFCLK,
+ "dout_mif_cmu_fsys_usb20drd_refclk",
+ "gout_mif_mux_cmu_fsys_usb20drd_refclk",
+ CLK_CON_DIV_MIF_CMU_FSYS_USB20DRD_REFCLK, 0, 4),
+ DIV(CLK_DOUT_MIF_CMU_G3D_SWITCH, "dout_mif_cmu_g3d_switch",
+ "ffac_mif_mux_bus_pll_div2", CLK_CON_DIV_MIF_CMU_G3D_SWITCH, 0, 2),
+ DIV(CLK_DOUT_MIF_CMU_ISP_CAM, "dout_mif_cmu_isp_cam",
+ "gout_mif_mux_cmu_isp_cam", CLK_CON_DIV_MIF_CMU_ISP_CAM, 0, 4),
+ DIV(CLK_DOUT_MIF_CMU_ISP_ISP, "dout_mif_cmu_isp_isp",
+ "gout_mif_mux_cmu_isp_isp", CLK_CON_DIV_MIF_CMU_ISP_ISP, 0, 4),
+ DIV(CLK_DOUT_MIF_CMU_ISP_SENSOR0, "dout_mif_cmu_isp_sensor0",
+ "gout_mif_mux_cmu_isp_sensor0", CLK_CON_DIV_MIF_CMU_ISP_SENSOR0, 0,
+ 6),
+ DIV(CLK_DOUT_MIF_CMU_ISP_SENSOR1, "dout_mif_cmu_isp_sensor1",
+ "gout_mif_mux_cmu_isp_sensor1", CLK_CON_DIV_MIF_CMU_ISP_SENSOR1, 0,
+ 6),
+ DIV(CLK_DOUT_MIF_CMU_ISP_SENSOR2, "dout_mif_cmu_isp_sensor2",
+ "gout_mif_mux_cmu_isp_sensor2", CLK_CON_DIV_MIF_CMU_ISP_SENSOR2, 0,
+ 6),
+ DIV(CLK_DOUT_MIF_CMU_ISP_VRA, "dout_mif_cmu_isp_vra",
+ "gout_mif_mux_cmu_isp_vra", CLK_CON_DIV_MIF_CMU_ISP_VRA, 0, 4),
+ DIV(CLK_DOUT_MIF_CMU_MFCMSCL_MFC, "dout_mif_cmu_mfcmscl_mfc",
+ "gout_mif_mux_cmu_mfcmscl_mfc", CLK_CON_DIV_MIF_CMU_MFCMSCL_MFC, 0,
+ 4),
+ DIV(CLK_DOUT_MIF_CMU_MFCMSCL_MSCL, "dout_mif_cmu_mfcmscl_mscl",
+ "gout_mif_mux_cmu_mfcmscl_mscl", CLK_CON_DIV_MIF_CMU_MFCMSCL_MSCL,
+ 0, 4),
+ DIV(CLK_DOUT_MIF_CMU_PERI_BUS, "dout_mif_cmu_peri_bus",
+ "gout_mif_mux_cmu_peri_bus", CLK_CON_DIV_MIF_CMU_PERI_BUS, 0, 4),
+ DIV(CLK_DOUT_MIF_CMU_PERI_SPI0, "dout_mif_cmu_peri_spi0",
+ "gout_mif_mux_cmu_peri_spi0", CLK_CON_DIV_MIF_CMU_PERI_SPI0, 0, 6),
+ DIV(CLK_DOUT_MIF_CMU_PERI_SPI2, "dout_mif_cmu_peri_spi2",
+ "gout_mif_mux_cmu_peri_spi2", CLK_CON_DIV_MIF_CMU_PERI_SPI2, 0, 6),
+ DIV(CLK_DOUT_MIF_CMU_PERI_SPI1, "dout_mif_cmu_peri_spi1",
+ "gout_mif_mux_cmu_peri_spi1", CLK_CON_DIV_MIF_CMU_PERI_SPI1, 0, 6),
+ DIV(CLK_DOUT_MIF_CMU_PERI_SPI4, "dout_mif_cmu_peri_spi4",
+ "gout_mif_mux_cmu_peri_spi4", CLK_CON_DIV_MIF_CMU_PERI_SPI4, 0, 6),
+ DIV(CLK_DOUT_MIF_CMU_PERI_SPI3, "dout_mif_cmu_peri_spi3",
+ "gout_mif_mux_cmu_peri_spi3", CLK_CON_DIV_MIF_CMU_PERI_SPI3, 0, 6),
+ DIV(CLK_DOUT_MIF_CMU_PERI_UART1, "dout_mif_cmu_peri_uart1",
+ "gout_mif_mux_cmu_peri_uart1", CLK_CON_DIV_MIF_CMU_PERI_UART1, 0, 4),
+ DIV(CLK_DOUT_MIF_CMU_PERI_UART2, "dout_mif_cmu_peri_uart2",
+ "gout_mif_mux_cmu_peri_uart2", CLK_CON_DIV_MIF_CMU_PERI_UART2, 0, 4),
+ DIV(CLK_DOUT_MIF_CMU_PERI_UART0, "dout_mif_cmu_peri_uart0",
+ "gout_mif_mux_cmu_peri_uart0", CLK_CON_DIV_MIF_CMU_PERI_UART0, 0, 4),
+ DIV(CLK_DOUT_MIF_APB, "dout_mif_apb", "dout_mif_busd",
+ CLK_CON_DIV_MIF_APB, 0, 2),
+ DIV(CLK_DOUT_MIF_BUSD, "dout_mif_busd", "gout_mif_mux_busd",
+ CLK_CON_DIV_MIF_BUSD, 0, 4),
+ DIV(CLK_DOUT_MIF_HSI2C, "dout_mif_hsi2c", "ffac_mif_mux_media_pll_div2",
+ CLK_CON_DIV_MIF_HSI2C, 0, 4),
+};
+
+static const struct samsung_gate_clock mif_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_MIF_CMU_DISPAUD_BUS, "gout_mif_cmu_dispaud_bus",
+ "dout_mif_cmu_dispaud_bus", CLK_CON_GAT_MIF_CMU_DISPAUD_BUS, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_CMU_DISPAUD_DECON_ECLK,
+ "gout_mif_cmu_dispaud_decon_eclk",
+ "dout_mif_cmu_dispaud_decon_eclk",
+ CLK_CON_GAT_MIF_CMU_DISPAUD_DECON_ECLK, 0, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_CMU_DISPAUD_DECON_VCLK,
+ "gout_mif_cmu_dispaud_decon_vclk",
+ "dout_mif_cmu_dispaud_decon_vclk",
+ CLK_CON_GAT_MIF_CMU_DISPAUD_DECON_VCLK, 0, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_CMU_FSYS_BUS, "gout_mif_cmu_fsys_bus",
+ "dout_mif_cmu_fsys_bus", CLK_CON_GAT_MIF_CMU_FSYS_BUS, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_CMU_FSYS_MMC0, "gout_mif_cmu_fsys_mmc0",
+ "dout_mif_cmu_fsys_mmc0", CLK_CON_GAT_MIF_CMU_FSYS_MMC0, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_CMU_FSYS_MMC1, "gout_mif_cmu_fsys_mmc1",
+ "dout_mif_cmu_fsys_mmc1", CLK_CON_GAT_MIF_CMU_FSYS_MMC1, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_CMU_FSYS_MMC2, "gout_mif_cmu_fsys_mmc2",
+ "dout_mif_cmu_fsys_mmc2", CLK_CON_GAT_MIF_CMU_FSYS_MMC2, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_CMU_FSYS_USB20DRD_REFCLK,
+ "gout_mif_cmu_fsys_usb20drd_refclk",
+ "dout_mif_cmu_fsys_usb20drd_refclk",
+ CLK_CON_GAT_MIF_CMU_FSYS_USB20DRD_REFCLK, 0, CLK_SET_RATE_PARENT,
+ 0),
+ GATE(CLK_GOUT_MIF_CMU_G3D_SWITCH, "gout_mif_cmu_g3d_switch",
+ "dout_mif_cmu_g3d_switch", CLK_CON_GAT_MIF_CMU_G3D_SWITCH, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_CMU_ISP_CAM, "gout_mif_cmu_isp_cam",
+ "dout_mif_cmu_isp_cam", CLK_CON_GAT_MIF_CMU_ISP_CAM, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_CMU_ISP_ISP, "gout_mif_cmu_isp_isp",
+ "dout_mif_cmu_isp_isp", CLK_CON_GAT_MIF_CMU_ISP_ISP, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_CMU_ISP_SENSOR0, "gout_mif_cmu_isp_sensor0",
+ "dout_mif_cmu_isp_sensor0", CLK_CON_GAT_MIF_CMU_ISP_SENSOR0, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_CMU_ISP_SENSOR1, "gout_mif_cmu_isp_sensor1",
+ "dout_mif_cmu_isp_sensor1", CLK_CON_GAT_MIF_CMU_ISP_SENSOR1, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_CMU_ISP_SENSOR2, "gout_mif_cmu_isp_sensor2",
+ "dout_mif_cmu_isp_sensor2", CLK_CON_GAT_MIF_CMU_ISP_SENSOR2, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_CMU_ISP_VRA, "gout_mif_cmu_isp_vra",
+ "dout_mif_cmu_isp_vra", CLK_CON_GAT_MIF_CMU_ISP_VRA, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_CMU_MFCMSCL_MFC, "gout_mif_cmu_mfcmscl_mfc",
+ "dout_mif_cmu_mfcmscl_mfc", CLK_CON_GAT_MIF_CMU_MFCMSCL_MFC, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_CMU_MFCMSCL_MSCL, "gout_mif_cmu_mfcmscl_mscl",
+ "dout_mif_cmu_mfcmscl_mscl", CLK_CON_GAT_MIF_CMU_MFCMSCL_MSCL, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_CMU_PERI_BUS, "gout_mif_cmu_peri_bus",
+ "dout_mif_cmu_peri_bus", CLK_CON_GAT_MIF_CMU_PERI_BUS, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_CMU_PERI_SPI0, "gout_mif_cmu_peri_spi0",
+ "dout_mif_cmu_peri_spi0", CLK_CON_GAT_MIF_CMU_PERI_SPI0, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_CMU_PERI_SPI2, "gout_mif_cmu_peri_spi2",
+ "dout_mif_cmu_peri_spi2", CLK_CON_GAT_MIF_CMU_PERI_SPI2, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_CMU_PERI_SPI1, "gout_mif_cmu_peri_spi1",
+ "dout_mif_cmu_peri_spi1", CLK_CON_GAT_MIF_CMU_PERI_SPI1, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_CMU_PERI_SPI4, "gout_mif_cmu_peri_spi4",
+ "dout_mif_cmu_peri_spi4", CLK_CON_GAT_MIF_CMU_PERI_SPI4, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_CMU_PERI_SPI3, "gout_mif_cmu_peri_spi3",
+ "dout_mif_cmu_peri_spi3", CLK_CON_GAT_MIF_CMU_PERI_SPI3, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_CMU_PERI_UART1, "gout_mif_cmu_peri_uart1",
+ "dout_mif_cmu_peri_uart1", CLK_CON_GAT_MIF_CMU_PERI_UART1, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_CMU_PERI_UART2, "gout_mif_cmu_peri_uart2",
+ "dout_mif_cmu_peri_uart2", CLK_CON_GAT_MIF_CMU_PERI_UART2, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_CMU_PERI_UART0, "gout_mif_cmu_peri_uart0",
+ "dout_mif_cmu_peri_uart0", CLK_CON_GAT_MIF_CMU_PERI_UART0, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_HSI2C_AP_PCLKM, "gout_mif_hsi2c_ap_pclkm",
+ "dout_mif_hsi2c", CLK_CON_GAT_MIF_HSI2C_AP_PCLKM, 0,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_HSI2C_AP_PCLKS, "gout_mif_hsi2c_ap_pclks",
+ "dout_mif_apb", CLK_CON_GAT_MIF_HSI2C_AP_PCLKS, 14, CLK_IS_CRITICAL
+ | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_HSI2C_CP_PCLKM, "gout_mif_hsi2c_cp_pclkm",
+ "dout_mif_hsi2c", CLK_CON_GAT_MIF_HSI2C_CP_PCLKM, 1,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_HSI2C_CP_PCLKS, "gout_mif_hsi2c_cp_pclks",
+ "dout_mif_apb", CLK_CON_GAT_MIF_HSI2C_CP_PCLKS, 15, CLK_IS_CRITICAL
+ | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_HSI2C_IPCLK, "gout_mif_hsi2c_ipclk", "dout_mif_hsi2c",
+ CLK_CON_GAT_MIF_HSI2C_IPCLK, 2, CLK_IS_CRITICAL |
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_HSI2C_ITCLK, "gout_mif_hsi2c_itclk", "dout_mif_hsi2c",
+ CLK_CON_GAT_MIF_HSI2C_ITCLK, 3, CLK_IS_CRITICAL |
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_CP_PCLK_HSI2C, "gout_mif_cp_pclk_hsi2c",
+ "dout_mif_hsi2c", CLK_CON_GAT_MIF_CP_PCLK_HSI2C, 6, CLK_IS_CRITICAL
+ | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_CP_PCLK_HSI2C_BAT_0, "gout_mif_cp_pclk_hsi2c_bat_0",
+ "dout_mif_hsi2c", CLK_CON_GAT_MIF_CP_PCLK_HSI2C_BAT_0, 4,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_CP_PCLK_HSI2C_BAT_1, "gout_mif_cp_pclk_hsi2c_bat_1",
+ "dout_mif_hsi2c", CLK_CON_GAT_MIF_CP_PCLK_HSI2C_BAT_1, 5,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_WRAP_ADC_IF_OSC_SYS, "gout_mif_wrap_adc_if_osc_sys",
+ "oscclk", CLK_CON_GAT_MIF_WRAP_ADC_IF_OSC_SYS, 3,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_WRAP_ADC_IF_PCLK_S0, "gout_mif_wrap_adc_if_pclk_s0",
+ "dout_mif_apb", CLK_CON_GAT_MIF_WRAP_ADC_IF_PCLK_S0, 20,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_WRAP_ADC_IF_PCLK_S1, "gout_mif_wrap_adc_if_pclk_s1",
+ "dout_mif_apb", CLK_CON_GAT_MIF_WRAP_ADC_IF_PCLK_S1, 21,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_MUX_BUS_PLL, "gout_mif_mux_bus_pll",
+ "gout_mif_mux_bus_pll_con", CLK_CON_GAT_MIF_MUX_BUS_PLL, 21,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_MUX_BUS_PLL_CON, "gout_mif_mux_bus_pll_con",
+ "fout_mif_bus_pll", CLK_CON_GAT_MIF_MUX_BUS_PLL_CON, 12,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_MUX_CMU_DISPAUD_BUS, "gout_mif_mux_cmu_dispaud_bus",
+ "mout_mif_cmu_dispaud_bus", CLK_CON_GAT_MIF_MUX_CMU_DISPAUD_BUS,
+ 21, CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_MUX_CMU_DISPAUD_DECON_ECLK,
+ "gout_mif_mux_cmu_dispaud_decon_eclk",
+ "mout_mif_cmu_dispaud_decon_eclk",
+ CLK_CON_GAT_MIF_MUX_CMU_DISPAUD_DECON_ECLK, 21, CLK_IS_CRITICAL |
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_MUX_CMU_DISPAUD_DECON_VCLK,
+ "gout_mif_mux_cmu_dispaud_decon_vclk",
+ "mout_mif_cmu_dispaud_decon_vclk",
+ CLK_CON_GAT_MIF_MUX_CMU_DISPAUD_DECON_VCLK, 21, CLK_IS_CRITICAL |
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_MUX_CMU_FSYS_BUS, "gout_mif_mux_cmu_fsys_bus",
+ "mout_mif_cmu_fsys_bus", CLK_CON_GAT_MIF_MUX_CMU_FSYS_BUS, 21,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_MUX_CMU_FSYS_MMC0, "gout_mif_mux_cmu_fsys_mmc0",
+ "mout_mif_cmu_fsys_mmc0", CLK_CON_GAT_MIF_MUX_CMU_FSYS_MMC0, 21,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_MUX_CMU_FSYS_MMC1, "gout_mif_mux_cmu_fsys_mmc1",
+ "mout_mif_cmu_fsys_mmc1", CLK_CON_GAT_MIF_MUX_CMU_FSYS_MMC1, 21,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_MUX_CMU_FSYS_MMC2, "gout_mif_mux_cmu_fsys_mmc2",
+ "mout_mif_cmu_fsys_mmc2", CLK_CON_GAT_MIF_MUX_CMU_FSYS_MMC2, 21,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_MUX_CMU_FSYS_USB20DRD_REFCLK,
+ "gout_mif_mux_cmu_fsys_usb20drd_refclk",
+ "mout_mif_cmu_fsys_usb20drd_refclk",
+ CLK_CON_GAT_MIF_MUX_CMU_FSYS_USB20DRD_REFCLK, 21, CLK_IS_CRITICAL |
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_MUX_CMU_ISP_CAM, "gout_mif_mux_cmu_isp_cam",
+ "mout_mif_cmu_isp_cam", CLK_CON_GAT_MIF_MUX_CMU_ISP_CAM, 21,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_MUX_CMU_ISP_ISP, "gout_mif_mux_cmu_isp_isp",
+ "mout_mif_cmu_isp_isp", CLK_CON_GAT_MIF_MUX_CMU_ISP_ISP, 21,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_MUX_CMU_ISP_SENSOR0, "gout_mif_mux_cmu_isp_sensor0",
+ "mout_mif_cmu_isp_sensor0", CLK_CON_GAT_MIF_MUX_CMU_ISP_SENSOR0,
+ 21, CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_MUX_CMU_ISP_SENSOR1, "gout_mif_mux_cmu_isp_sensor1",
+ "mout_mif_cmu_isp_sensor1", CLK_CON_GAT_MIF_MUX_CMU_ISP_SENSOR1,
+ 21, CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_MUX_CMU_ISP_SENSOR2, "gout_mif_mux_cmu_isp_sensor2",
+ "mout_mif_cmu_isp_sensor2", CLK_CON_GAT_MIF_MUX_CMU_ISP_SENSOR2,
+ 21, CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_MUX_CMU_ISP_VRA, "gout_mif_mux_cmu_isp_vra",
+ "mout_mif_cmu_isp_vra", CLK_CON_GAT_MIF_MUX_CMU_ISP_VRA, 21,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_MUX_CMU_MFCMSCL_MFC, "gout_mif_mux_cmu_mfcmscl_mfc",
+ "mout_mif_cmu_mfcmscl_mfc", CLK_CON_GAT_MIF_MUX_CMU_MFCMSCL_MFC,
+ 21, CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_MUX_CMU_MFCMSCL_MSCL, "gout_mif_mux_cmu_mfcmscl_mscl",
+ "mout_mif_cmu_mfcmscl_mscl", CLK_CON_GAT_MIF_MUX_CMU_MFCMSCL_MSCL,
+ 21, CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_MUX_CMU_PERI_BUS, "gout_mif_mux_cmu_peri_bus",
+ "mout_mif_cmu_peri_bus", CLK_CON_GAT_MIF_MUX_CMU_PERI_BUS, 21,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_MUX_CMU_PERI_SPI0, "gout_mif_mux_cmu_peri_spi0",
+ "mout_mif_cmu_peri_spi0", CLK_CON_GAT_MIF_MUX_CMU_PERI_SPI0, 21,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_MUX_CMU_PERI_SPI2, "gout_mif_mux_cmu_peri_spi2",
+ "mout_mif_cmu_peri_spi2", CLK_CON_GAT_MIF_MUX_CMU_PERI_SPI2, 21,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_MUX_CMU_PERI_SPI1, "gout_mif_mux_cmu_peri_spi1",
+ "mout_mif_cmu_peri_spi1", CLK_CON_GAT_MIF_MUX_CMU_PERI_SPI1, 21,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_MUX_CMU_PERI_SPI4, "gout_mif_mux_cmu_peri_spi4",
+ "mout_mif_cmu_peri_spi4", CLK_CON_GAT_MIF_MUX_CMU_PERI_SPI4, 21,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_MUX_CMU_PERI_SPI3, "gout_mif_mux_cmu_peri_spi3",
+ "mout_mif_cmu_peri_spi3", CLK_CON_GAT_MIF_MUX_CMU_PERI_SPI3, 21,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_MUX_CMU_PERI_UART1, "gout_mif_mux_cmu_peri_uart1",
+ "mout_mif_cmu_peri_uart1", CLK_CON_GAT_MIF_MUX_CMU_PERI_UART1, 21,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_MUX_CMU_PERI_UART2, "gout_mif_mux_cmu_peri_uart2",
+ "mout_mif_cmu_peri_uart2", CLK_CON_GAT_MIF_MUX_CMU_PERI_UART2, 21,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_MUX_CMU_PERI_UART0, "gout_mif_mux_cmu_peri_uart0",
+ "mout_mif_cmu_peri_uart0", CLK_CON_GAT_MIF_MUX_CMU_PERI_UART0, 21,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_MUX_BUSD, "gout_mif_mux_busd", "mout_mif_busd",
+ CLK_CON_GAT_MIF_MUX_BUSD, 21, CLK_IS_CRITICAL |
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_MUX_MEDIA_PLL, "gout_mif_mux_media_pll",
+ "gout_mif_mux_media_pll_con", CLK_CON_GAT_MIF_MUX_MEDIA_PLL, 21,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_MUX_MEDIA_PLL_CON, "gout_mif_mux_media_pll_con",
+ "fout_mif_media_pll", CLK_CON_GAT_MIF_MUX_MEDIA_PLL_CON, 12,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_MUX_MEM_PLL, "gout_mif_mux_mem_pll",
+ "gout_mif_mux_mem_pll_con", CLK_CON_GAT_MIF_MUX_MEM_PLL, 21,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_MUX_MEM_PLL_CON, "gout_mif_mux_mem_pll_con",
+ "fout_mif_mem_pll", CLK_CON_GAT_MIF_MUX_MEM_PLL_CON, 12,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+};
+
+static const struct samsung_cmu_info mif_cmu_info __initconst = {
+ .fixed_factor_clks = mif_fixed_factor_clks,
+ .nr_fixed_factor_clks = ARRAY_SIZE(mif_fixed_factor_clks),
+ .pll_clks = mif_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(mif_pll_clks),
+ .mux_clks = mif_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(mif_mux_clks),
+ .div_clks = mif_div_clks,
+ .nr_div_clks = ARRAY_SIZE(mif_div_clks),
+ .gate_clks = mif_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(mif_gate_clks),
+ .clk_regs = mif_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(mif_clk_regs),
+ .nr_clk_ids = MIF_NR_CLK,
+};
+
+/*
+ * Register offsets for CMU_DISPAUD (0x148d0000)
+ */
+#define PLL_LOCKTIME_DISPAUD_PLL 0x0000
+#define PLL_LOCKTIME_DISPAUD_AUD_PLL 0x00c0
+#define PLL_CON0_DISPAUD_PLL 0x0100
+#define PLL_CON0_DISPAUD_AUD_PLL 0x01c0
+#define CLK_CON_GAT_DISPAUD_MUX_PLL 0x0200
+#define CLK_CON_GAT_DISPAUD_MUX_PLL_CON 0x0200
+#define CLK_CON_GAT_DISPAUD_MUX_AUD_PLL 0x0204
+#define CLK_CON_GAT_DISPAUD_MUX_AUD_PLL_CON 0x0204
+#define CLK_CON_GAT_DISPAUD_MUX_BUS_USER 0x0210
+#define CLK_CON_MUX_DISPAUD_BUS_USER 0x0210
+#define CLK_CON_GAT_DISPAUD_MUX_DECON_VCLK_USER 0x0214
+#define CLK_CON_MUX_DISPAUD_DECON_VCLK_USER 0x0214
+#define CLK_CON_GAT_DISPAUD_MUX_DECON_ECLK_USER 0x0218
+#define CLK_CON_MUX_DISPAUD_DECON_ECLK_USER 0x0218
+#define CLK_CON_GAT_DISPAUD_MUX_DECON_VCLK 0x021c
+#define CLK_CON_MUX_DISPAUD_DECON_VCLK 0x021c
+#define CLK_CON_GAT_DISPAUD_MUX_DECON_ECLK 0x0220
+#define CLK_CON_MUX_DISPAUD_DECON_ECLK 0x0220
+#define CLK_CON_GAT_DISPAUD_MUX_MIPIPHY_TXBYTECLKHS_USER 0x0224
+#define CLK_CON_GAT_DISPAUD_MUX_MIPIPHY_TXBYTECLKHS_USER_CON 0x0224
+#define CLK_CON_GAT_DISPAUD_MUX_MIPIPHY_RXCLKESC0_USER 0x0228
+#define CLK_CON_GAT_DISPAUD_MUX_MIPIPHY_RXCLKESC0_USER_CON 0x0228
+#define CLK_CON_GAT_DISPAUD_MUX_MI2S 0x022c
+#define CLK_CON_MUX_DISPAUD_MI2S 0x022c
+#define CLK_CON_DIV_DISPAUD_APB 0x0400
+#define CLK_CON_DIV_DISPAUD_DECON_VCLK 0x0404
+#define CLK_CON_DIV_DISPAUD_DECON_ECLK 0x0408
+#define CLK_CON_DIV_DISPAUD_MI2S 0x040c
+#define CLK_CON_DIV_DISPAUD_MIXER 0x0410
+#define CLK_CON_GAT_DISPAUD_BUS 0x0810
+#define CLK_CON_GAT_DISPAUD_BUS_DISP 0x0810
+#define CLK_CON_GAT_DISPAUD_BUS_PPMU 0x0810
+#define CLK_CON_GAT_DISPAUD_APB_AUD 0x0814
+#define CLK_CON_GAT_DISPAUD_APB_AUD_AMP 0x0814
+#define CLK_CON_GAT_DISPAUD_APB_DISP 0x0814
+#define CLK_CON_GAT_DISPAUD_DECON_VCLK 0x081c
+#define CLK_CON_GAT_DISPAUD_DECON_ECLK 0x0820
+#define CLK_CON_GAT_DISPAUD_MI2S_AMP_I2SCODCLKI 0x082c
+#define CLK_CON_GAT_DISPAUD_MI2S_AUD_I2SCODCLKI 0x082c
+#define CLK_CON_GAT_DISPAUD_MIXER_AUD_SYSCLK 0x0830
+#define CLK_CON_GAT_DISPAUD_CON_EXT2AUD_BCK_GPIO_I2S 0x0834
+#define CLK_CON_GAT_DISPAUD_CON_AUD_I2S_BCLK_BT_IN 0x0838
+#define CLK_CON_GAT_DISPAUD_CON_CP2AUD_BCK 0x083c
+#define CLK_CON_GAT_DISPAUD_CON_AUD_I2S_BCLK_FM_IN 0x0840
+
+static const unsigned long dispaud_clk_regs[] __initconst = {
+ PLL_LOCKTIME_DISPAUD_PLL,
+ PLL_LOCKTIME_DISPAUD_AUD_PLL,
+ PLL_CON0_DISPAUD_PLL,
+ PLL_CON0_DISPAUD_AUD_PLL,
+ CLK_CON_GAT_DISPAUD_MUX_PLL,
+ CLK_CON_GAT_DISPAUD_MUX_PLL_CON,
+ CLK_CON_GAT_DISPAUD_MUX_AUD_PLL,
+ CLK_CON_GAT_DISPAUD_MUX_AUD_PLL_CON,
+ CLK_CON_GAT_DISPAUD_MUX_BUS_USER,
+ CLK_CON_MUX_DISPAUD_BUS_USER,
+ CLK_CON_GAT_DISPAUD_MUX_DECON_VCLK_USER,
+ CLK_CON_MUX_DISPAUD_DECON_VCLK_USER,
+ CLK_CON_GAT_DISPAUD_MUX_DECON_ECLK_USER,
+ CLK_CON_MUX_DISPAUD_DECON_ECLK_USER,
+ CLK_CON_GAT_DISPAUD_MUX_DECON_VCLK,
+ CLK_CON_MUX_DISPAUD_DECON_VCLK,
+ CLK_CON_GAT_DISPAUD_MUX_DECON_ECLK,
+ CLK_CON_MUX_DISPAUD_DECON_ECLK,
+ CLK_CON_GAT_DISPAUD_MUX_MIPIPHY_TXBYTECLKHS_USER,
+ CLK_CON_GAT_DISPAUD_MUX_MIPIPHY_TXBYTECLKHS_USER_CON,
+ CLK_CON_GAT_DISPAUD_MUX_MIPIPHY_RXCLKESC0_USER,
+ CLK_CON_GAT_DISPAUD_MUX_MIPIPHY_RXCLKESC0_USER_CON,
+ CLK_CON_GAT_DISPAUD_MUX_MI2S,
+ CLK_CON_MUX_DISPAUD_MI2S,
+ CLK_CON_DIV_DISPAUD_APB,
+ CLK_CON_DIV_DISPAUD_DECON_VCLK,
+ CLK_CON_DIV_DISPAUD_DECON_ECLK,
+ CLK_CON_DIV_DISPAUD_MI2S,
+ CLK_CON_DIV_DISPAUD_MIXER,
+ CLK_CON_GAT_DISPAUD_BUS,
+ CLK_CON_GAT_DISPAUD_BUS_DISP,
+ CLK_CON_GAT_DISPAUD_BUS_PPMU,
+ CLK_CON_GAT_DISPAUD_APB_AUD,
+ CLK_CON_GAT_DISPAUD_APB_AUD_AMP,
+ CLK_CON_GAT_DISPAUD_APB_DISP,
+ CLK_CON_GAT_DISPAUD_DECON_VCLK,
+ CLK_CON_GAT_DISPAUD_DECON_ECLK,
+ CLK_CON_GAT_DISPAUD_MI2S_AMP_I2SCODCLKI,
+ CLK_CON_GAT_DISPAUD_MI2S_AUD_I2SCODCLKI,
+ CLK_CON_GAT_DISPAUD_MIXER_AUD_SYSCLK,
+ CLK_CON_GAT_DISPAUD_CON_EXT2AUD_BCK_GPIO_I2S,
+ CLK_CON_GAT_DISPAUD_CON_AUD_I2S_BCLK_BT_IN,
+ CLK_CON_GAT_DISPAUD_CON_CP2AUD_BCK,
+ CLK_CON_GAT_DISPAUD_CON_AUD_I2S_BCLK_FM_IN,
+};
+
+static const struct samsung_fixed_rate_clock dispaud_fixed_clks[] __initconst = {
+ FRATE(0, "frat_dispaud_audiocdclk0", NULL, 0, 100000000),
+ FRATE(0, "frat_dispaud_mixer_bclk_bt", NULL, 0, 12500000),
+ FRATE(0, "frat_dispaud_mixer_bclk_cp", NULL, 0, 12500000),
+ FRATE(0, "frat_dispaud_mixer_bclk_fm", NULL, 0, 12500000),
+ FRATE(0, "frat_dispaud_mixer_sclk_ap", NULL, 0, 12500000),
+ FRATE(0, "frat_dispaud_mipiphy_rxclkesc0", NULL, 0, 188000000),
+ FRATE(0, "frat_dispaud_mipiphy_txbyteclkhs", NULL, 0, 188000000),
+};
+
+static const struct samsung_pll_clock dispaud_pll_clks[] __initconst = {
+ PLL(pll_1417x, CLK_FOUT_DISPAUD_AUD_PLL, "fout_dispaud_aud_pll",
+ "oscclk", PLL_LOCKTIME_DISPAUD_AUD_PLL, PLL_CON0_DISPAUD_AUD_PLL,
+ NULL),
+ PLL(pll_1417x, CLK_FOUT_DISPAUD_PLL, "fout_dispaud_pll", "oscclk",
+ PLL_LOCKTIME_DISPAUD_PLL, PLL_CON0_DISPAUD_PLL, NULL),
+};
+
+/* List of parent clocks for muxes in CMU_DISPAUD */
+PNAME(mout_dispaud_bus_user_p) = { "oscclk", "gout_mif_cmu_dispaud_bus" };
+PNAME(mout_dispaud_decon_eclk_user_p) = { "oscclk",
+ "gout_mif_cmu_dispaud_decon_eclk" };
+PNAME(mout_dispaud_decon_vclk_user_p) = { "oscclk",
+ "gout_mif_cmu_dispaud_decon_vclk" };
+PNAME(mout_dispaud_decon_eclk_p) = { "gout_dispaud_mux_decon_eclk_user",
+ "gout_dispaud_mux_pll_con" };
+PNAME(mout_dispaud_decon_vclk_p) = { "gout_dispaud_mux_decon_vclk_user",
+ "gout_dispaud_mux_pll_con" };
+PNAME(mout_dispaud_mi2s_p) = { "gout_dispaud_mux_aud_pll_con",
+ "frat_dispaud_audiocdclk0" };
+
+static const struct samsung_mux_clock dispaud_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_DISPAUD_BUS_USER, "mout_dispaud_bus_user",
+ mout_dispaud_bus_user_p, CLK_CON_MUX_DISPAUD_BUS_USER, 12, 1),
+ MUX(CLK_MOUT_DISPAUD_DECON_ECLK_USER, "mout_dispaud_decon_eclk_user",
+ mout_dispaud_decon_eclk_user_p, CLK_CON_MUX_DISPAUD_DECON_ECLK_USER,
+ 12, 1),
+ MUX(CLK_MOUT_DISPAUD_DECON_VCLK_USER, "mout_dispaud_decon_vclk_user",
+ mout_dispaud_decon_vclk_user_p, CLK_CON_MUX_DISPAUD_DECON_VCLK_USER,
+ 12, 1),
+ MUX(CLK_MOUT_DISPAUD_DECON_ECLK, "mout_dispaud_decon_eclk",
+ mout_dispaud_decon_eclk_p, CLK_CON_MUX_DISPAUD_DECON_ECLK, 12, 1),
+ MUX(CLK_MOUT_DISPAUD_DECON_VCLK, "mout_dispaud_decon_vclk",
+ mout_dispaud_decon_vclk_p, CLK_CON_MUX_DISPAUD_DECON_VCLK, 12, 1),
+ MUX(CLK_MOUT_DISPAUD_MI2S, "mout_dispaud_mi2s", mout_dispaud_mi2s_p,
+ CLK_CON_MUX_DISPAUD_MI2S, 12, 1),
+};
+
+static const struct samsung_div_clock dispaud_div_clks[] __initconst = {
+ DIV(CLK_DOUT_DISPAUD_APB, "dout_dispaud_apb",
+ "gout_dispaud_mux_bus_user", CLK_CON_DIV_DISPAUD_APB, 0, 2),
+ DIV(CLK_DOUT_DISPAUD_DECON_ECLK, "dout_dispaud_decon_eclk",
+ "gout_dispaud_mux_decon_eclk", CLK_CON_DIV_DISPAUD_DECON_ECLK, 0, 3),
+ DIV(CLK_DOUT_DISPAUD_DECON_VCLK, "dout_dispaud_decon_vclk",
+ "gout_dispaud_mux_decon_vclk", CLK_CON_DIV_DISPAUD_DECON_VCLK, 0, 3),
+ DIV(CLK_DOUT_DISPAUD_MI2S, "dout_dispaud_mi2s", "gout_dispaud_mux_mi2s",
+ CLK_CON_DIV_DISPAUD_MI2S, 0, 4),
+ DIV(CLK_DOUT_DISPAUD_MIXER, "dout_dispaud_mixer",
+ "gout_dispaud_mux_aud_pll_con", CLK_CON_DIV_DISPAUD_MIXER, 0, 4),
+};
+
+static const struct samsung_gate_clock dispaud_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_DISPAUD_BUS, "gout_dispaud_bus",
+ "gout_dispaud_mux_bus_user", CLK_CON_GAT_DISPAUD_BUS, 0,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_DISPAUD_BUS_DISP, "gout_dispaud_bus_disp",
+ "gout_dispaud_mux_bus_user", CLK_CON_GAT_DISPAUD_BUS_DISP, 2,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_DISPAUD_BUS_PPMU, "gout_dispaud_bus_ppmu",
+ "gout_dispaud_mux_bus_user", CLK_CON_GAT_DISPAUD_BUS_PPMU, 3,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_DISPAUD_APB_AUD, "gout_dispaud_apb_aud",
+ "dout_dispaud_apb", CLK_CON_GAT_DISPAUD_APB_AUD, 2,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_DISPAUD_APB_AUD_AMP, "gout_dispaud_apb_aud_amp",
+ "dout_dispaud_apb", CLK_CON_GAT_DISPAUD_APB_AUD_AMP, 3,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_DISPAUD_APB_DISP, "gout_dispaud_apb_disp",
+ "dout_dispaud_apb", CLK_CON_GAT_DISPAUD_APB_DISP, 1,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_DISPAUD_CON_AUD_I2S_BCLK_BT_IN,
+ "gout_dispaud_con_aud_i2s_bclk_bt_in",
+ "frat_dispaud_mixer_bclk_bt",
+ CLK_CON_GAT_DISPAUD_CON_AUD_I2S_BCLK_BT_IN, 0, CLK_SET_RATE_PARENT,
+ 0),
+ GATE(CLK_GOUT_DISPAUD_CON_AUD_I2S_BCLK_FM_IN,
+ "gout_dispaud_con_aud_i2s_bclk_fm_in",
+ "frat_dispaud_mixer_bclk_fm",
+ CLK_CON_GAT_DISPAUD_CON_AUD_I2S_BCLK_FM_IN, 0, CLK_SET_RATE_PARENT,
+ 0),
+ GATE(CLK_GOUT_DISPAUD_CON_CP2AUD_BCK, "gout_dispaud_con_cp2aud_bck",
+ "frat_dispaud_mixer_bclk_cp", CLK_CON_GAT_DISPAUD_CON_CP2AUD_BCK,
+ 0, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_DISPAUD_CON_EXT2AUD_BCK_GPIO_I2S,
+ "gout_dispaud_con_ext2aud_bck_gpio_i2s",
+ "frat_dispaud_mixer_sclk_ap",
+ CLK_CON_GAT_DISPAUD_CON_EXT2AUD_BCK_GPIO_I2S, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_DISPAUD_DECON_ECLK, "gout_dispaud_decon_eclk",
+ "dout_dispaud_decon_eclk", CLK_CON_GAT_DISPAUD_DECON_ECLK, 0,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_DISPAUD_DECON_VCLK, "gout_dispaud_decon_vclk",
+ "dout_dispaud_decon_vclk", CLK_CON_GAT_DISPAUD_DECON_VCLK, 0,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_DISPAUD_MI2S_AMP_I2SCODCLKI,
+ "gout_dispaud_mi2s_amp_i2scodclki", "dout_dispaud_mi2s",
+ CLK_CON_GAT_DISPAUD_MI2S_AMP_I2SCODCLKI, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_DISPAUD_MI2S_AUD_I2SCODCLKI,
+ "gout_dispaud_mi2s_aud_i2scodclki", "dout_dispaud_mi2s",
+ CLK_CON_GAT_DISPAUD_MI2S_AUD_I2SCODCLKI, 0, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_DISPAUD_MIXER_AUD_SYSCLK, "gout_dispaud_mixer_aud_sysclk",
+ "dout_dispaud_mixer", CLK_CON_GAT_DISPAUD_MIXER_AUD_SYSCLK, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_DISPAUD_MUX_AUD_PLL, "gout_dispaud_mux_aud_pll",
+ "gout_dispaud_mux_aud_pll_con", CLK_CON_GAT_DISPAUD_MUX_AUD_PLL,
+ 21, CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_DISPAUD_MUX_AUD_PLL_CON, "gout_dispaud_mux_aud_pll_con",
+ "fout_dispaud_aud_pll", CLK_CON_GAT_DISPAUD_MUX_AUD_PLL_CON, 12,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_DISPAUD_MUX_BUS_USER, "gout_dispaud_mux_bus_user",
+ "mout_dispaud_bus_user", CLK_CON_GAT_DISPAUD_MUX_BUS_USER, 21,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_DISPAUD_MUX_DECON_ECLK_USER,
+ "gout_dispaud_mux_decon_eclk_user", "mout_dispaud_decon_eclk_user",
+ CLK_CON_GAT_DISPAUD_MUX_DECON_ECLK_USER, 21, CLK_IS_CRITICAL |
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_DISPAUD_MUX_DECON_VCLK_USER,
+ "gout_dispaud_mux_decon_vclk_user", "mout_dispaud_decon_vclk_user",
+ CLK_CON_GAT_DISPAUD_MUX_DECON_VCLK_USER, 21, CLK_IS_CRITICAL |
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_DISPAUD_MUX_MIPIPHY_RXCLKESC0_USER,
+ "gout_dispaud_mux_mipiphy_rxclkesc0_user",
+ "gout_dispaud_mux_mipiphy_rxclkesc0_user_con",
+ CLK_CON_GAT_DISPAUD_MUX_MIPIPHY_RXCLKESC0_USER, 21, CLK_IS_CRITICAL
+ | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_DISPAUD_MUX_MIPIPHY_RXCLKESC0_USER_CON,
+ "gout_dispaud_mux_mipiphy_rxclkesc0_user_con",
+ "frat_dispaud_mipiphy_rxclkesc0",
+ CLK_CON_GAT_DISPAUD_MUX_MIPIPHY_RXCLKESC0_USER_CON, 12,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_DISPAUD_MUX_MIPIPHY_TXBYTECLKHS_USER,
+ "gout_dispaud_mux_mipiphy_txbyteclkhs_user",
+ "gout_dispaud_mux_mipiphy_txbyteclkhs_user_con",
+ CLK_CON_GAT_DISPAUD_MUX_MIPIPHY_TXBYTECLKHS_USER, 21,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_DISPAUD_MUX_MIPIPHY_TXBYTECLKHS_USER_CON,
+ "gout_dispaud_mux_mipiphy_txbyteclkhs_user_con",
+ "frat_dispaud_mipiphy_txbyteclkhs",
+ CLK_CON_GAT_DISPAUD_MUX_MIPIPHY_TXBYTECLKHS_USER_CON, 12,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_DISPAUD_MUX_DECON_ECLK, "gout_dispaud_mux_decon_eclk",
+ "mout_dispaud_decon_eclk", CLK_CON_GAT_DISPAUD_MUX_DECON_ECLK, 21,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_DISPAUD_MUX_DECON_VCLK, "gout_dispaud_mux_decon_vclk",
+ "mout_dispaud_decon_vclk", CLK_CON_GAT_DISPAUD_MUX_DECON_VCLK, 21,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_DISPAUD_MUX_MI2S, "gout_dispaud_mux_mi2s",
+ "mout_dispaud_mi2s", CLK_CON_GAT_DISPAUD_MUX_MI2S, 21,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_DISPAUD_MUX_PLL, "gout_dispaud_mux_pll",
+ "gout_dispaud_mux_pll_con", CLK_CON_GAT_DISPAUD_MUX_PLL, 21,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_DISPAUD_MUX_PLL_CON, "gout_dispaud_mux_pll_con",
+ "fout_dispaud_pll", CLK_CON_GAT_DISPAUD_MUX_PLL_CON, 12,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+};
+
+static const struct samsung_cmu_info dispaud_cmu_info __initconst = {
+ .fixed_clks = dispaud_fixed_clks,
+ .nr_fixed_clks = ARRAY_SIZE(dispaud_fixed_clks),
+ .pll_clks = dispaud_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(dispaud_pll_clks),
+ .mux_clks = dispaud_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(dispaud_mux_clks),
+ .div_clks = dispaud_div_clks,
+ .nr_div_clks = ARRAY_SIZE(dispaud_div_clks),
+ .gate_clks = dispaud_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(dispaud_gate_clks),
+ .clk_regs = dispaud_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(dispaud_clk_regs),
+ .nr_clk_ids = DISPAUD_NR_CLK,
+};
+
+/*
+ * Register offsets for CMU_FSYS (0x13730000)
+ */
+#define PLL_LOCKTIME_FSYS_USB_PLL 0x0000
+#define PLL_CON0_FSYS_USB_PLL 0x0100
+#define CLK_CON_GAT_FSYS_MUX_USB_PLL 0x0200
+#define CLK_CON_GAT_FSYS_MUX_USB_PLL_CON 0x0200
+#define CLK_CON_GAT_FSYS_MUX_USB20DRD_PHYCLOCK_USER 0x0230
+#define CLK_CON_GAT_FSYS_MUX_USB20DRD_PHYCLOCK_USER_CON 0x0230
+#define CLK_CON_GAT_FSYS_BUSP3_HCLK 0x0804
+#define CLK_CON_GAT_FSYS_MMC0_ACLK 0x0804
+#define CLK_CON_GAT_FSYS_MMC1_ACLK 0x0804
+#define CLK_CON_GAT_FSYS_MMC2_ACLK 0x0804
+#define CLK_CON_GAT_FSYS_PDMA0_ACLK_PDMA0 0x0804
+#define CLK_CON_GAT_FSYS_PPMU_ACLK 0x0804
+#define CLK_CON_GAT_FSYS_PPMU_PCLK 0x0804
+#define CLK_CON_GAT_FSYS_SROMC_HCLK 0x0804
+#define CLK_CON_GAT_FSYS_UPSIZER_BUS1_ACLK 0x0804
+#define CLK_CON_GAT_FSYS_USB20DRD_ACLK_HSDRD 0x0804
+#define CLK_CON_GAT_FSYS_USB20DRD_HCLK_USB20_CTRL 0x0804
+#define CLK_CON_GAT_FSYS_USB20DRD_HSDRD_REF_CLK 0x0828
+
+static const unsigned long fsys_clk_regs[] __initconst = {
+ PLL_LOCKTIME_FSYS_USB_PLL,
+ PLL_CON0_FSYS_USB_PLL,
+ CLK_CON_GAT_FSYS_MUX_USB_PLL,
+ CLK_CON_GAT_FSYS_MUX_USB_PLL_CON,
+ CLK_CON_GAT_FSYS_MUX_USB20DRD_PHYCLOCK_USER,
+ CLK_CON_GAT_FSYS_MUX_USB20DRD_PHYCLOCK_USER_CON,
+ CLK_CON_GAT_FSYS_BUSP3_HCLK,
+ CLK_CON_GAT_FSYS_MMC0_ACLK,
+ CLK_CON_GAT_FSYS_MMC1_ACLK,
+ CLK_CON_GAT_FSYS_MMC2_ACLK,
+ CLK_CON_GAT_FSYS_PDMA0_ACLK_PDMA0,
+ CLK_CON_GAT_FSYS_PPMU_ACLK,
+ CLK_CON_GAT_FSYS_PPMU_PCLK,
+ CLK_CON_GAT_FSYS_SROMC_HCLK,
+ CLK_CON_GAT_FSYS_UPSIZER_BUS1_ACLK,
+ CLK_CON_GAT_FSYS_USB20DRD_ACLK_HSDRD,
+ CLK_CON_GAT_FSYS_USB20DRD_HCLK_USB20_CTRL,
+ CLK_CON_GAT_FSYS_USB20DRD_HSDRD_REF_CLK,
+};
+
+static const struct samsung_fixed_rate_clock fsys_fixed_clks[] __initconst = {
+ FRATE(0, "frat_fsys_usb20drd_phyclock", NULL, 0, 60000000),
+};
+
+static const struct samsung_pll_clock fsys_pll_clks[] __initconst = {
+ PLL(pll_1417x, CLK_FOUT_FSYS_USB_PLL, "fout_fsys_usb_pll", "oscclk",
+ PLL_LOCKTIME_FSYS_USB_PLL, PLL_CON0_FSYS_USB_PLL, NULL),
+};
+
+static const struct samsung_gate_clock fsys_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_FSYS_BUSP3_HCLK, "gout_fsys_busp3_hclk",
+ "gout_mif_cmu_fsys_bus", CLK_CON_GAT_FSYS_BUSP3_HCLK, 2,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_MMC0_ACLK, "gout_fsys_mmc0_aclk",
+ "gout_fsys_busp3_hclk", CLK_CON_GAT_FSYS_MMC0_ACLK, 8,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_MMC1_ACLK, "gout_fsys_mmc1_aclk",
+ "gout_fsys_busp3_hclk", CLK_CON_GAT_FSYS_MMC1_ACLK, 9,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_MMC2_ACLK, "gout_fsys_mmc2_aclk",
+ "gout_fsys_busp3_hclk", CLK_CON_GAT_FSYS_MMC2_ACLK, 10,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_PDMA0_ACLK_PDMA0, "gout_fsys_pdma0_aclk_pdma0",
+ "gout_fsys_upsizer_bus1_aclk", CLK_CON_GAT_FSYS_PDMA0_ACLK_PDMA0,
+ 7, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_PPMU_ACLK, "gout_fsys_ppmu_aclk",
+ "gout_mif_cmu_fsys_bus", CLK_CON_GAT_FSYS_PPMU_ACLK, 17,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_PPMU_PCLK, "gout_fsys_ppmu_pclk",
+ "gout_mif_cmu_fsys_bus", CLK_CON_GAT_FSYS_PPMU_PCLK, 18,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_SROMC_HCLK, "gout_fsys_sromc_hclk",
+ "gout_fsys_busp3_hclk", CLK_CON_GAT_FSYS_SROMC_HCLK, 6,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_UPSIZER_BUS1_ACLK, "gout_fsys_upsizer_bus1_aclk",
+ "gout_mif_cmu_fsys_bus", CLK_CON_GAT_FSYS_UPSIZER_BUS1_ACLK, 12,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_USB20DRD_ACLK_HSDRD, "gout_fsys_usb20drd_aclk_hsdrd",
+ "gout_fsys_busp3_hclk", CLK_CON_GAT_FSYS_USB20DRD_ACLK_HSDRD, 20,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_USB20DRD_HCLK_USB20_CTRL,
+ "gout_fsys_usb20drd_hclk_usb20_ctrl", "gout_fsys_busp3_hclk",
+ CLK_CON_GAT_FSYS_USB20DRD_HCLK_USB20_CTRL, 21, CLK_IS_CRITICAL |
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_USB20DRD_HSDRD_REF_CLK,
+ "gout_fsys_usb20drd_hsdrd_ref_clk",
+ "gout_mif_cmu_fsys_usb20drd_refclk",
+ CLK_CON_GAT_FSYS_USB20DRD_HSDRD_REF_CLK, 0, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_MUX_USB20DRD_PHYCLOCK_USER,
+ "gout_fsys_mux_usb20drd_phyclock_user",
+ "gout_fsys_mux_usb20drd_phyclock_user_con",
+ CLK_CON_GAT_FSYS_MUX_USB20DRD_PHYCLOCK_USER, 21, CLK_IS_CRITICAL |
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_MUX_USB20DRD_PHYCLOCK_USER_CON,
+ "gout_fsys_mux_usb20drd_phyclock_user_con",
+ "frat_fsys_usb20drd_phyclock",
+ CLK_CON_GAT_FSYS_MUX_USB20DRD_PHYCLOCK_USER_CON, 12,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_MUX_USB_PLL, "gout_fsys_mux_usb_pll",
+ "gout_fsys_mux_usb_pll_con", CLK_CON_GAT_FSYS_MUX_USB_PLL, 21,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_MUX_USB_PLL_CON, "gout_fsys_mux_usb_pll_con",
+ "fout_fsys_usb_pll", CLK_CON_GAT_FSYS_MUX_USB_PLL_CON, 12,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+};
+
+static const struct samsung_cmu_info fsys_cmu_info __initconst = {
+ .fixed_clks = fsys_fixed_clks,
+ .nr_fixed_clks = ARRAY_SIZE(fsys_fixed_clks),
+ .pll_clks = fsys_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(fsys_pll_clks),
+ .gate_clks = fsys_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(fsys_gate_clks),
+ .clk_regs = fsys_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(fsys_clk_regs),
+ .nr_clk_ids = FSYS_NR_CLK,
+};
+
+/*
+ * Register offsets for CMU_G3D (0x11460000)
+ */
+#define PLL_LOCKTIME_G3D_PLL 0x0000
+#define PLL_CON0_G3D_PLL 0x0100
+#define CLK_CON_GAT_G3D_MUX_PLL 0x0200
+#define CLK_CON_GAT_G3D_MUX_PLL_CON 0x0200
+#define CLK_CON_GAT_G3D_MUX_SWITCH_USER 0x0204
+#define CLK_CON_MUX_G3D_SWITCH_USER 0x0204
+#define CLK_CON_GAT_G3D_MUX 0x0208
+#define CLK_CON_MUX_G3D 0x0208
+#define CLK_CON_DIV_G3D_BUS 0x0400
+#define CLK_CON_DIV_G3D_APB 0x0404
+#define CLK_CON_GAT_G3D_ASYNCS_D0_CLK 0x0804
+#define CLK_CON_GAT_G3D_ASYNC_PCLKM 0x0804
+#define CLK_CON_GAT_G3D_CLK 0x0804
+#define CLK_CON_GAT_G3D_PPMU_ACLK 0x0804
+#define CLK_CON_GAT_G3D_QE_ACLK 0x0804
+#define CLK_CON_GAT_G3D_PPMU_PCLK 0x0808
+#define CLK_CON_GAT_G3D_QE_PCLK 0x0808
+#define CLK_CON_GAT_G3D_SYSREG_PCLK 0x0808
+
+static const unsigned long g3d_clk_regs[] __initconst = {
+ PLL_LOCKTIME_G3D_PLL,
+ PLL_CON0_G3D_PLL,
+ CLK_CON_GAT_G3D_MUX_PLL,
+ CLK_CON_GAT_G3D_MUX_PLL_CON,
+ CLK_CON_GAT_G3D_MUX_SWITCH_USER,
+ CLK_CON_MUX_G3D_SWITCH_USER,
+ CLK_CON_GAT_G3D_MUX,
+ CLK_CON_MUX_G3D,
+ CLK_CON_DIV_G3D_BUS,
+ CLK_CON_DIV_G3D_APB,
+ CLK_CON_GAT_G3D_ASYNCS_D0_CLK,
+ CLK_CON_GAT_G3D_ASYNC_PCLKM,
+ CLK_CON_GAT_G3D_CLK,
+ CLK_CON_GAT_G3D_PPMU_ACLK,
+ CLK_CON_GAT_G3D_QE_ACLK,
+ CLK_CON_GAT_G3D_PPMU_PCLK,
+ CLK_CON_GAT_G3D_QE_PCLK,
+ CLK_CON_GAT_G3D_SYSREG_PCLK,
+};
+
+static const struct samsung_pll_clock g3d_pll_clks[] __initconst = {
+ PLL(pll_1417x, CLK_FOUT_G3D_PLL, "fout_g3d_pll", "oscclk",
+ PLL_LOCKTIME_G3D_PLL, PLL_CON0_G3D_PLL, NULL),
+};
+
+/* List of parent clocks for muxes in CMU_G3D */
+PNAME(mout_g3d_switch_user_p) = { "oscclk", "gout_mif_cmu_g3d_switch" };
+PNAME(mout_g3d_p) = { "gout_g3d_mux_pll_con",
+ "gout_g3d_mux_switch_user" };
+
+static const struct samsung_mux_clock g3d_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_G3D_SWITCH_USER, "mout_g3d_switch_user",
+ mout_g3d_switch_user_p, CLK_CON_MUX_G3D_SWITCH_USER, 12, 1),
+ MUX(CLK_MOUT_G3D, "mout_g3d", mout_g3d_p, CLK_CON_MUX_G3D, 12, 1),
+};
+
+static const struct samsung_div_clock g3d_div_clks[] __initconst = {
+ DIV(CLK_DOUT_G3D_APB, "dout_g3d_apb", "dout_g3d_bus",
+ CLK_CON_DIV_G3D_APB, 0, 3),
+ DIV(CLK_DOUT_G3D_BUS, "dout_g3d_bus", "gout_g3d_mux",
+ CLK_CON_DIV_G3D_BUS, 0, 3),
+};
+
+static const struct samsung_gate_clock g3d_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_G3D_ASYNCS_D0_CLK, "gout_g3d_asyncs_d0_clk",
+ "dout_g3d_bus", CLK_CON_GAT_G3D_ASYNCS_D0_CLK, 1, CLK_IS_CRITICAL |
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_G3D_ASYNC_PCLKM, "gout_g3d_async_pclkm", "dout_g3d_bus",
+ CLK_CON_GAT_G3D_ASYNC_PCLKM, 0, CLK_IS_CRITICAL |
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_G3D_CLK, "gout_g3d_clk", "dout_g3d_bus",
+ CLK_CON_GAT_G3D_CLK, 6, CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_G3D_PPMU_ACLK, "gout_g3d_ppmu_aclk", "dout_g3d_bus",
+ CLK_CON_GAT_G3D_PPMU_ACLK, 7, CLK_IS_CRITICAL |
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_G3D_PPMU_PCLK, "gout_g3d_ppmu_pclk", "dout_g3d_apb",
+ CLK_CON_GAT_G3D_PPMU_PCLK, 4, CLK_IS_CRITICAL |
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_G3D_QE_ACLK, "gout_g3d_qe_aclk", "dout_g3d_bus",
+ CLK_CON_GAT_G3D_QE_ACLK, 8, CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+ 0),
+ GATE(CLK_GOUT_G3D_QE_PCLK, "gout_g3d_qe_pclk", "dout_g3d_apb",
+ CLK_CON_GAT_G3D_QE_PCLK, 5, CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+ 0),
+ GATE(CLK_GOUT_G3D_SYSREG_PCLK, "gout_g3d_sysreg_pclk", "dout_g3d_apb",
+ CLK_CON_GAT_G3D_SYSREG_PCLK, 6, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_G3D_MUX_SWITCH_USER, "gout_g3d_mux_switch_user",
+ "mout_g3d_switch_user", CLK_CON_GAT_G3D_MUX_SWITCH_USER, 21,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_G3D_MUX, "gout_g3d_mux", "mout_g3d", CLK_CON_GAT_G3D_MUX,
+ 21, CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_G3D_MUX_PLL, "gout_g3d_mux_pll", "gout_g3d_mux_pll_con",
+ CLK_CON_GAT_G3D_MUX_PLL, 21, CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+ 0),
+ GATE(CLK_GOUT_G3D_MUX_PLL_CON, "gout_g3d_mux_pll_con", "fout_g3d_pll",
+ CLK_CON_GAT_G3D_MUX_PLL_CON, 12, CLK_IS_CRITICAL |
+ CLK_SET_RATE_PARENT, 0),
+};
+
+static const struct samsung_cmu_info g3d_cmu_info __initconst = {
+ .pll_clks = g3d_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(g3d_pll_clks),
+ .mux_clks = g3d_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(g3d_mux_clks),
+ .div_clks = g3d_div_clks,
+ .nr_div_clks = ARRAY_SIZE(g3d_div_clks),
+ .gate_clks = g3d_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(g3d_gate_clks),
+ .clk_regs = g3d_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(g3d_clk_regs),
+ .nr_clk_ids = G3D_NR_CLK,
+};
+
+/*
+ * Register offsets for CMU_ISP (0x144d0000)
+ */
+#define PLL_LOCKTIME_ISP_PLL 0x0000
+#define PLL_CON0_ISP_PLL 0x0100
+#define CLK_CON_GAT_ISP_MUX_PLL 0x0200
+#define CLK_CON_GAT_ISP_MUX_PLL_CON 0x0200
+#define CLK_CON_GAT_ISP_MUX_VRA_USER 0x0210
+#define CLK_CON_MUX_ISP_VRA_USER 0x0210
+#define CLK_CON_GAT_ISP_MUX_CAM_USER 0x0214
+#define CLK_CON_MUX_ISP_CAM_USER 0x0214
+#define CLK_CON_GAT_ISP_MUX_USER 0x0218
+#define CLK_CON_MUX_ISP_USER 0x0218
+#define CLK_CON_GAT_ISP_MUX_VRA 0x0220
+#define CLK_CON_MUX_ISP_VRA 0x0220
+#define CLK_CON_GAT_ISP_MUX_CAM 0x0224
+#define CLK_CON_MUX_ISP_CAM 0x0224
+#define CLK_CON_GAT_ISP_MUX_ISP 0x0228
+#define CLK_CON_MUX_ISP_ISP 0x0228
+#define CLK_CON_GAT_ISP_MUX_ISPD 0x022c
+#define CLK_CON_MUX_ISP_ISPD 0x022c
+#define CLK_CON_GAT_ISP_MUX_RXBYTECLKHS0_SENSOR0_USER 0x0230
+#define CLK_CON_GAT_ISP_MUX_RXBYTECLKHS0_SENSOR0_USER_CON 0x0230
+#define CLK_CON_GAT_ISP_MUX_RXBYTECLKHS0_SENSOR1_USER 0x0234
+#define CLK_CON_GAT_ISP_MUX_RXBYTECLKHS0_SENSOR1_USER_CON 0x0234
+#define CLK_CON_DIV_ISP_APB 0x0400
+#define CLK_CON_DIV_ISP_CAM_HALF 0x0404
+#define CLK_CON_GAT_ISP_VRA 0x0810
+#define CLK_CON_GAT_ISP_ISPD 0x0818
+#define CLK_CON_GAT_ISP_ISPD_PPMU 0x0818
+#define CLK_CON_GAT_ISP_CAM 0x081c
+#define CLK_CON_GAT_ISP_CAM_HALF 0x0820
+
+static const unsigned long isp_clk_regs[] __initconst = {
+ PLL_LOCKTIME_ISP_PLL,
+ PLL_CON0_ISP_PLL,
+ CLK_CON_GAT_ISP_MUX_PLL,
+ CLK_CON_GAT_ISP_MUX_PLL_CON,
+ CLK_CON_GAT_ISP_MUX_VRA_USER,
+ CLK_CON_MUX_ISP_VRA_USER,
+ CLK_CON_GAT_ISP_MUX_CAM_USER,
+ CLK_CON_MUX_ISP_CAM_USER,
+ CLK_CON_GAT_ISP_MUX_USER,
+ CLK_CON_MUX_ISP_USER,
+ CLK_CON_GAT_ISP_MUX_VRA,
+ CLK_CON_MUX_ISP_VRA,
+ CLK_CON_GAT_ISP_MUX_CAM,
+ CLK_CON_MUX_ISP_CAM,
+ CLK_CON_GAT_ISP_MUX_ISP,
+ CLK_CON_MUX_ISP_ISP,
+ CLK_CON_GAT_ISP_MUX_ISPD,
+ CLK_CON_MUX_ISP_ISPD,
+ CLK_CON_GAT_ISP_MUX_RXBYTECLKHS0_SENSOR0_USER,
+ CLK_CON_GAT_ISP_MUX_RXBYTECLKHS0_SENSOR0_USER_CON,
+ CLK_CON_GAT_ISP_MUX_RXBYTECLKHS0_SENSOR1_USER,
+ CLK_CON_GAT_ISP_MUX_RXBYTECLKHS0_SENSOR1_USER_CON,
+ CLK_CON_DIV_ISP_APB,
+ CLK_CON_DIV_ISP_CAM_HALF,
+ CLK_CON_GAT_ISP_VRA,
+ CLK_CON_GAT_ISP_ISPD,
+ CLK_CON_GAT_ISP_ISPD_PPMU,
+ CLK_CON_GAT_ISP_CAM,
+ CLK_CON_GAT_ISP_CAM_HALF,
+};
+
+static const struct samsung_fixed_rate_clock isp_fixed_clks[] __initconst = {
+ FRATE(0, "frat_isp_rxbyteclkhs0_sensor0", NULL, 0, 188000000),
+ FRATE(0, "frat_isp_rxbyteclkhs0_sensor1", NULL, 0, 188000000),
+};
+
+static const struct samsung_pll_clock isp_pll_clks[] __initconst = {
+ PLL(pll_1417x, CLK_FOUT_ISP_PLL, "fout_isp_pll", "oscclk",
+ PLL_LOCKTIME_ISP_PLL, PLL_CON0_ISP_PLL, NULL),
+};
+
+/* List of parent clocks for muxes in CMU_ISP */
+PNAME(mout_isp_cam_user_p) = { "oscclk", "gout_mif_cmu_isp_cam" };
+PNAME(mout_isp_user_p) = { "oscclk", "gout_mif_cmu_isp_isp" };
+PNAME(mout_isp_vra_user_p) = { "oscclk", "gout_mif_cmu_isp_vra" };
+PNAME(mout_isp_cam_p) = { "gout_isp_mux_cam_user",
+ "gout_isp_mux_pll_con" };
+PNAME(mout_isp_isp_p) = { "gout_isp_mux_user", "gout_isp_mux_pll_con" };
+PNAME(mout_isp_ispd_p) = { "gout_isp_mux_vra", "gout_isp_mux_cam" };
+PNAME(mout_isp_vra_p) = { "gout_isp_mux_vra_user",
+ "gout_isp_mux_pll_con" };
+
+static const struct samsung_mux_clock isp_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_ISP_CAM_USER, "mout_isp_cam_user", mout_isp_cam_user_p,
+ CLK_CON_MUX_ISP_CAM_USER, 12, 1),
+ MUX(CLK_MOUT_ISP_USER, "mout_isp_user", mout_isp_user_p,
+ CLK_CON_MUX_ISP_USER, 12, 1),
+ MUX(CLK_MOUT_ISP_VRA_USER, "mout_isp_vra_user", mout_isp_vra_user_p,
+ CLK_CON_MUX_ISP_VRA_USER, 12, 1),
+ MUX(CLK_MOUT_ISP_CAM, "mout_isp_cam", mout_isp_cam_p,
+ CLK_CON_MUX_ISP_CAM, 12, 1),
+ MUX(CLK_MOUT_ISP_ISP, "mout_isp_isp", mout_isp_isp_p,
+ CLK_CON_MUX_ISP_ISP, 12, 1),
+ MUX(CLK_MOUT_ISP_ISPD, "mout_isp_ispd", mout_isp_ispd_p,
+ CLK_CON_MUX_ISP_ISPD, 12, 1),
+ MUX(CLK_MOUT_ISP_VRA, "mout_isp_vra", mout_isp_vra_p,
+ CLK_CON_MUX_ISP_VRA, 12, 1),
+};
+
+static const struct samsung_div_clock isp_div_clks[] __initconst = {
+ DIV(CLK_DOUT_ISP_APB, "dout_isp_apb", "gout_isp_mux_vra",
+ CLK_CON_DIV_ISP_APB, 0, 2),
+ DIV(CLK_DOUT_ISP_CAM_HALF, "dout_isp_cam_half", "gout_isp_mux_cam",
+ CLK_CON_DIV_ISP_CAM_HALF, 0, 2),
+};
+
+static const struct samsung_gate_clock isp_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_ISP_CAM, "gout_isp_cam", "gout_isp_mux_cam",
+ CLK_CON_GAT_ISP_CAM, 0, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_ISP_CAM_HALF, "gout_isp_cam_half", "dout_isp_cam_half",
+ CLK_CON_GAT_ISP_CAM_HALF, 0, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_ISP_ISPD, "gout_isp_ispd", "gout_isp_mux_ispd",
+ CLK_CON_GAT_ISP_ISPD, 0, CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_ISP_ISPD_PPMU, "gout_isp_ispd_ppmu", "gout_isp_mux_ispd",
+ CLK_CON_GAT_ISP_ISPD_PPMU, 1, CLK_IS_CRITICAL |
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_ISP_VRA, "gout_isp_vra", "gout_isp_mux_vra",
+ CLK_CON_GAT_ISP_VRA, 0, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_ISP_MUX_CAM_USER, "gout_isp_mux_cam_user",
+ "mout_isp_cam_user", CLK_CON_GAT_ISP_MUX_CAM_USER, 21,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_ISP_MUX_USER, "gout_isp_mux_user", "mout_isp_user",
+ CLK_CON_GAT_ISP_MUX_USER, 21, CLK_IS_CRITICAL |
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_ISP_MUX_VRA_USER, "gout_isp_mux_vra_user",
+ "mout_isp_vra_user", CLK_CON_GAT_ISP_MUX_VRA_USER, 21,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_ISP_MUX_RXBYTECLKHS0_SENSOR1_USER,
+ "gout_isp_mux_rxbyteclkhs0_sensor1_user",
+ "gout_isp_mux_rxbyteclkhs0_sensor1_user_con",
+ CLK_CON_GAT_ISP_MUX_RXBYTECLKHS0_SENSOR1_USER, 21, CLK_IS_CRITICAL
+ | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_ISP_MUX_RXBYTECLKHS0_SENSOR1_USER_CON,
+ "gout_isp_mux_rxbyteclkhs0_sensor1_user_con",
+ "frat_isp_rxbyteclkhs0_sensor1",
+ CLK_CON_GAT_ISP_MUX_RXBYTECLKHS0_SENSOR1_USER_CON, 12,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_ISP_MUX_RXBYTECLKHS0_SENSOR0_USER,
+ "gout_isp_mux_rxbyteclkhs0_sensor0_user",
+ "gout_isp_mux_rxbyteclkhs0_sensor0_user_con",
+ CLK_CON_GAT_ISP_MUX_RXBYTECLKHS0_SENSOR0_USER, 21, CLK_IS_CRITICAL
+ | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_ISP_MUX_RXBYTECLKHS0_SENSOR0_USER_CON,
+ "gout_isp_mux_rxbyteclkhs0_sensor0_user_con",
+ "frat_isp_rxbyteclkhs0_sensor0",
+ CLK_CON_GAT_ISP_MUX_RXBYTECLKHS0_SENSOR0_USER_CON, 12,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_ISP_MUX_CAM, "gout_isp_mux_cam", "mout_isp_cam",
+ CLK_CON_GAT_ISP_MUX_CAM, 21, CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+ 0),
+ GATE(CLK_GOUT_ISP_MUX_ISP, "gout_isp_mux_isp", "mout_isp_isp",
+ CLK_CON_GAT_ISP_MUX_ISP, 21, CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+ 0),
+ GATE(CLK_GOUT_ISP_MUX_ISPD, "gout_isp_mux_ispd", "mout_isp_ispd",
+ CLK_CON_GAT_ISP_MUX_ISPD, 21, CLK_IS_CRITICAL |
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_ISP_MUX_VRA, "gout_isp_mux_vra", "mout_isp_vra",
+ CLK_CON_GAT_ISP_MUX_VRA, 21, CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+ 0),
+ GATE(CLK_GOUT_ISP_MUX_PLL, "gout_isp_mux_pll", "gout_isp_mux_pll_con",
+ CLK_CON_GAT_ISP_MUX_PLL, 21, CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+ 0),
+ GATE(CLK_GOUT_ISP_MUX_PLL_CON, "gout_isp_mux_pll_con", "fout_isp_pll",
+ CLK_CON_GAT_ISP_MUX_PLL_CON, 12, CLK_IS_CRITICAL |
+ CLK_SET_RATE_PARENT, 0),
+};
+
+static const struct samsung_cmu_info isp_cmu_info __initconst = {
+ .fixed_clks = isp_fixed_clks,
+ .nr_fixed_clks = ARRAY_SIZE(isp_fixed_clks),
+ .pll_clks = isp_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(isp_pll_clks),
+ .mux_clks = isp_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(isp_mux_clks),
+ .div_clks = isp_div_clks,
+ .nr_div_clks = ARRAY_SIZE(isp_div_clks),
+ .gate_clks = isp_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(isp_gate_clks),
+ .clk_regs = isp_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(isp_clk_regs),
+ .nr_clk_ids = ISP_NR_CLK,
+};
+
+/*
+ * Register offsets for CMU_MFCMSCL (0x12cb0000)
+ */
+#define CLK_CON_GAT_MFCMSCL_MUX_MSCL_USER 0x0200
+#define CLK_CON_MUX_MFCMSCL_MSCL_USER 0x0200
+#define CLK_CON_GAT_MFCMSCL_MUX_MFC_USER 0x0204
+#define CLK_CON_MUX_MFCMSCL_MFC_USER 0x0204
+#define CLK_CON_DIV_MFCMSCL_APB 0x0400
+#define CLK_CON_GAT_MFCMSCL_MSCL 0x0804
+#define CLK_CON_GAT_MFCMSCL_MSCL_BI 0x0804
+#define CLK_CON_GAT_MFCMSCL_MSCL_D 0x0804
+#define CLK_CON_GAT_MFCMSCL_MSCL_JPEG 0x0804
+#define CLK_CON_GAT_MFCMSCL_MSCL_POLY 0x0804
+#define CLK_CON_GAT_MFCMSCL_MSCL_PPMU 0x0804
+#define CLK_CON_GAT_MFCMSCL_MFC 0x0808
+
+static const unsigned long mfcmscl_clk_regs[] __initconst = {
+ CLK_CON_GAT_MFCMSCL_MUX_MSCL_USER,
+ CLK_CON_MUX_MFCMSCL_MSCL_USER,
+ CLK_CON_GAT_MFCMSCL_MUX_MFC_USER,
+ CLK_CON_MUX_MFCMSCL_MFC_USER,
+ CLK_CON_DIV_MFCMSCL_APB,
+ CLK_CON_GAT_MFCMSCL_MSCL,
+ CLK_CON_GAT_MFCMSCL_MSCL_BI,
+ CLK_CON_GAT_MFCMSCL_MSCL_D,
+ CLK_CON_GAT_MFCMSCL_MSCL_JPEG,
+ CLK_CON_GAT_MFCMSCL_MSCL_POLY,
+ CLK_CON_GAT_MFCMSCL_MSCL_PPMU,
+ CLK_CON_GAT_MFCMSCL_MFC,
+};
+
+/* List of parent clocks for muxes in CMU_MFCMSCL */
+PNAME(mout_mfcmscl_mfc_user_p) = { "oscclk", "gout_mif_cmu_mfcmscl_mfc" };
+PNAME(mout_mfcmscl_mscl_user_p) = { "oscclk", "gout_mif_cmu_mfcmscl_mscl" };
+
+static const struct samsung_mux_clock mfcmscl_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_MFCMSCL_MFC_USER, "mout_mfcmscl_mfc_user",
+ mout_mfcmscl_mfc_user_p, CLK_CON_MUX_MFCMSCL_MFC_USER, 12, 1),
+ MUX(CLK_MOUT_MFCMSCL_MSCL_USER, "mout_mfcmscl_mscl_user",
+ mout_mfcmscl_mscl_user_p, CLK_CON_MUX_MFCMSCL_MSCL_USER, 12, 1),
+};
+
+static const struct samsung_div_clock mfcmscl_div_clks[] __initconst = {
+ DIV(CLK_DOUT_MFCMSCL_APB, "dout_mfcmscl_apb",
+ "gout_mfcmscl_mux_mscl_user", CLK_CON_DIV_MFCMSCL_APB, 0, 2),
+};
+
+static const struct samsung_gate_clock mfcmscl_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_MFCMSCL_MFC, "gout_mfcmscl_mfc",
+ "gout_mfcmscl_mux_mfc_user", CLK_CON_GAT_MFCMSCL_MFC, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MFCMSCL_MSCL, "gout_mfcmscl_mscl",
+ "gout_mfcmscl_mux_mscl_user", CLK_CON_GAT_MFCMSCL_MSCL, 0,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MFCMSCL_MSCL_BI, "gout_mfcmscl_mscl_bi",
+ "gout_mfcmscl_mscl_d", CLK_CON_GAT_MFCMSCL_MSCL_BI, 4,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MFCMSCL_MSCL_D, "gout_mfcmscl_mscl_d",
+ "gout_mfcmscl_mux_mscl_user", CLK_CON_GAT_MFCMSCL_MSCL_D, 1,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MFCMSCL_MSCL_JPEG, "gout_mfcmscl_mscl_jpeg",
+ "gout_mfcmscl_mscl_d", CLK_CON_GAT_MFCMSCL_MSCL_JPEG, 2,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MFCMSCL_MSCL_POLY, "gout_mfcmscl_mscl_poly",
+ "gout_mfcmscl_mscl_d", CLK_CON_GAT_MFCMSCL_MSCL_POLY, 3,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MFCMSCL_MSCL_PPMU, "gout_mfcmscl_mscl_ppmu",
+ "gout_mfcmscl_mux_mscl_user", CLK_CON_GAT_MFCMSCL_MSCL_PPMU, 5,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MFCMSCL_MUX_MFC_USER, "gout_mfcmscl_mux_mfc_user",
+ "mout_mfcmscl_mfc_user", CLK_CON_GAT_MFCMSCL_MUX_MFC_USER, 21,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MFCMSCL_MUX_MSCL_USER, "gout_mfcmscl_mux_mscl_user",
+ "mout_mfcmscl_mscl_user", CLK_CON_GAT_MFCMSCL_MUX_MSCL_USER, 21,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+};
+
+static const struct samsung_cmu_info mfcmscl_cmu_info __initconst = {
+ .mux_clks = mfcmscl_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(mfcmscl_mux_clks),
+ .div_clks = mfcmscl_div_clks,
+ .nr_div_clks = ARRAY_SIZE(mfcmscl_div_clks),
+ .gate_clks = mfcmscl_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(mfcmscl_gate_clks),
+ .clk_regs = mfcmscl_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(mfcmscl_clk_regs),
+ .nr_clk_ids = MFCMSCL_NR_CLK,
+};
+
+/*
+ * Register offsets for CMU_PERI (0x101f0000)
+ */
+#define CLK_CON_GAT_PERI_PWM_MOTOR_OSCCLK 0x0800
+#define CLK_CON_GAT_PERI_TMU_CPUCL0_CLK 0x0800
+#define CLK_CON_GAT_PERI_TMU_CPUCL1_CLK 0x0800
+#define CLK_CON_GAT_PERI_TMU_CLK 0x0800
+#define CLK_CON_GAT_PERI_BUSP1_PERIC0_HCLK 0x0810
+#define CLK_CON_GAT_PERI_GPIO2_PCLK 0x0810
+#define CLK_CON_GAT_PERI_GPIO5_PCLK 0x0810
+#define CLK_CON_GAT_PERI_GPIO6_PCLK 0x0810
+#define CLK_CON_GAT_PERI_GPIO7_PCLK 0x0810
+#define CLK_CON_GAT_PERI_HSI2C4_IPCLK 0x0810
+#define CLK_CON_GAT_PERI_HSI2C6_IPCLK 0x0810
+#define CLK_CON_GAT_PERI_HSI2C3_IPCLK 0x0810
+#define CLK_CON_GAT_PERI_HSI2C5_IPCLK 0x0810
+#define CLK_CON_GAT_PERI_HSI2C2_IPCLK 0x0810
+#define CLK_CON_GAT_PERI_HSI2C1_IPCLK 0x0810
+#define CLK_CON_GAT_PERI_I2C0_PCLK 0x0810
+#define CLK_CON_GAT_PERI_I2C4_PCLK 0x0810
+#define CLK_CON_GAT_PERI_I2C5_PCLK 0x0810
+#define CLK_CON_GAT_PERI_I2C6_PCLK 0x0810
+#define CLK_CON_GAT_PERI_I2C7_PCLK 0x0810
+#define CLK_CON_GAT_PERI_I2C8_PCLK 0x0810
+#define CLK_CON_GAT_PERI_I2C3_PCLK 0x0810
+#define CLK_CON_GAT_PERI_I2C2_PCLK 0x0810
+#define CLK_CON_GAT_PERI_I2C1_PCLK 0x0810
+#define CLK_CON_GAT_PERI_MCT_PCLK 0x0810
+#define CLK_CON_GAT_PERI_PWM_MOTOR_PCLK_S0 0x0810
+#define CLK_CON_GAT_PERI_SFRIF_TMU_CPUCL0_PCLK 0x0814
+#define CLK_CON_GAT_PERI_SFRIF_TMU_CPUCL1_PCLK 0x0814
+#define CLK_CON_GAT_PERI_SFRIF_TMU_PCLK 0x0814
+#define CLK_CON_GAT_PERI_SPI0_PCLK 0x0814
+#define CLK_CON_GAT_PERI_SPI2_PCLK 0x0814
+#define CLK_CON_GAT_PERI_SPI1_PCLK 0x0814
+#define CLK_CON_GAT_PERI_SPI4_PCLK 0x0814
+#define CLK_CON_GAT_PERI_SPI3_PCLK 0x0814
+#define CLK_CON_GAT_PERI_UART1_PCLK 0x0814
+#define CLK_CON_GAT_PERI_UART2_PCLK 0x0814
+#define CLK_CON_GAT_PERI_UART0_PCLK 0x0814
+#define CLK_CON_GAT_PERI_WDT_CPUCL0_PCLK 0x0814
+#define CLK_CON_GAT_PERI_WDT_CPUCL1_PCLK 0x0814
+#define CLK_CON_GAT_PERI_UART1_EXT_UCLK 0x0830
+#define CLK_CON_GAT_PERI_UART2_EXT_UCLK 0x0834
+#define CLK_CON_GAT_PERI_UART0_EXT_UCLK 0x0838
+#define CLK_CON_GAT_PERI_SPI2_SPI_EXT_CLK 0x083c
+#define CLK_CON_GAT_PERI_SPI1_SPI_EXT_CLK 0x0840
+#define CLK_CON_GAT_PERI_SPI0_SPI_EXT_CLK 0x0844
+#define CLK_CON_GAT_PERI_SPI3_SPI_EXT_CLK 0x0848
+#define CLK_CON_GAT_PERI_SPI4_SPI_EXT_CLK 0x084c
+
+static const unsigned long peri_clk_regs[] __initconst = {
+ CLK_CON_GAT_PERI_PWM_MOTOR_OSCCLK,
+ CLK_CON_GAT_PERI_TMU_CPUCL0_CLK,
+ CLK_CON_GAT_PERI_TMU_CPUCL1_CLK,
+ CLK_CON_GAT_PERI_TMU_CLK,
+ CLK_CON_GAT_PERI_BUSP1_PERIC0_HCLK,
+ CLK_CON_GAT_PERI_GPIO2_PCLK,
+ CLK_CON_GAT_PERI_GPIO5_PCLK,
+ CLK_CON_GAT_PERI_GPIO6_PCLK,
+ CLK_CON_GAT_PERI_GPIO7_PCLK,
+ CLK_CON_GAT_PERI_HSI2C4_IPCLK,
+ CLK_CON_GAT_PERI_HSI2C6_IPCLK,
+ CLK_CON_GAT_PERI_HSI2C3_IPCLK,
+ CLK_CON_GAT_PERI_HSI2C5_IPCLK,
+ CLK_CON_GAT_PERI_HSI2C2_IPCLK,
+ CLK_CON_GAT_PERI_HSI2C1_IPCLK,
+ CLK_CON_GAT_PERI_I2C0_PCLK,
+ CLK_CON_GAT_PERI_I2C4_PCLK,
+ CLK_CON_GAT_PERI_I2C5_PCLK,
+ CLK_CON_GAT_PERI_I2C6_PCLK,
+ CLK_CON_GAT_PERI_I2C7_PCLK,
+ CLK_CON_GAT_PERI_I2C8_PCLK,
+ CLK_CON_GAT_PERI_I2C3_PCLK,
+ CLK_CON_GAT_PERI_I2C2_PCLK,
+ CLK_CON_GAT_PERI_I2C1_PCLK,
+ CLK_CON_GAT_PERI_MCT_PCLK,
+ CLK_CON_GAT_PERI_PWM_MOTOR_PCLK_S0,
+ CLK_CON_GAT_PERI_SFRIF_TMU_CPUCL0_PCLK,
+ CLK_CON_GAT_PERI_SFRIF_TMU_CPUCL1_PCLK,
+ CLK_CON_GAT_PERI_SFRIF_TMU_PCLK,
+ CLK_CON_GAT_PERI_SPI0_PCLK,
+ CLK_CON_GAT_PERI_SPI2_PCLK,
+ CLK_CON_GAT_PERI_SPI1_PCLK,
+ CLK_CON_GAT_PERI_SPI4_PCLK,
+ CLK_CON_GAT_PERI_SPI3_PCLK,
+ CLK_CON_GAT_PERI_UART1_PCLK,
+ CLK_CON_GAT_PERI_UART2_PCLK,
+ CLK_CON_GAT_PERI_UART0_PCLK,
+ CLK_CON_GAT_PERI_WDT_CPUCL0_PCLK,
+ CLK_CON_GAT_PERI_WDT_CPUCL1_PCLK,
+ CLK_CON_GAT_PERI_UART1_EXT_UCLK,
+ CLK_CON_GAT_PERI_UART2_EXT_UCLK,
+ CLK_CON_GAT_PERI_UART0_EXT_UCLK,
+ CLK_CON_GAT_PERI_SPI2_SPI_EXT_CLK,
+ CLK_CON_GAT_PERI_SPI1_SPI_EXT_CLK,
+ CLK_CON_GAT_PERI_SPI0_SPI_EXT_CLK,
+ CLK_CON_GAT_PERI_SPI3_SPI_EXT_CLK,
+ CLK_CON_GAT_PERI_SPI4_SPI_EXT_CLK,
+};
+
+static const struct samsung_gate_clock peri_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_PERI_BUSP1_PERIC0_HCLK, "gout_peri_busp1_peric0_hclk",
+ "gout_mif_cmu_peri_bus", CLK_CON_GAT_PERI_BUSP1_PERIC0_HCLK, 3,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_GPIO2_PCLK, "gout_peri_gpio2_pclk",
+ "gout_mif_cmu_peri_bus", CLK_CON_GAT_PERI_GPIO2_PCLK, 7,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_GPIO5_PCLK, "gout_peri_gpio5_pclk",
+ "gout_mif_cmu_peri_bus", CLK_CON_GAT_PERI_GPIO5_PCLK, 8,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_GPIO6_PCLK, "gout_peri_gpio6_pclk",
+ "gout_mif_cmu_peri_bus", CLK_CON_GAT_PERI_GPIO6_PCLK, 9,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_GPIO7_PCLK, "gout_peri_gpio7_pclk",
+ "gout_mif_cmu_peri_bus", CLK_CON_GAT_PERI_GPIO7_PCLK, 10,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_HSI2C4_IPCLK, "gout_peri_hsi2c4_ipclk",
+ "gout_peri_busp1_peric0_hclk", CLK_CON_GAT_PERI_HSI2C4_IPCLK, 14,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_HSI2C6_IPCLK, "gout_peri_hsi2c6_ipclk",
+ "gout_mif_cmu_peri_bus", CLK_CON_GAT_PERI_HSI2C6_IPCLK, 16,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_HSI2C3_IPCLK, "gout_peri_hsi2c3_ipclk",
+ "gout_peri_busp1_peric0_hclk", CLK_CON_GAT_PERI_HSI2C3_IPCLK, 13,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_HSI2C5_IPCLK, "gout_peri_hsi2c5_ipclk",
+ "gout_mif_cmu_peri_bus", CLK_CON_GAT_PERI_HSI2C5_IPCLK, 15,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_HSI2C2_IPCLK, "gout_peri_hsi2c2_ipclk",
+ "gout_peri_busp1_peric0_hclk", CLK_CON_GAT_PERI_HSI2C2_IPCLK, 12,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_HSI2C1_IPCLK, "gout_peri_hsi2c1_ipclk",
+ "gout_peri_busp1_peric0_hclk", CLK_CON_GAT_PERI_HSI2C1_IPCLK, 11,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_I2C0_PCLK, "gout_peri_i2c0_pclk",
+ "gout_peri_busp1_peric0_hclk", CLK_CON_GAT_PERI_I2C0_PCLK, 21,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_I2C4_PCLK, "gout_peri_i2c4_pclk",
+ "gout_peri_busp1_peric0_hclk", CLK_CON_GAT_PERI_I2C4_PCLK, 17,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_I2C5_PCLK, "gout_peri_i2c5_pclk",
+ "gout_peri_busp1_peric0_hclk", CLK_CON_GAT_PERI_I2C5_PCLK, 18,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_I2C6_PCLK, "gout_peri_i2c6_pclk",
+ "gout_peri_busp1_peric0_hclk", CLK_CON_GAT_PERI_I2C6_PCLK, 19,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_I2C7_PCLK, "gout_peri_i2c7_pclk",
+ "gout_peri_busp1_peric0_hclk", CLK_CON_GAT_PERI_I2C7_PCLK, 24,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_I2C8_PCLK, "gout_peri_i2c8_pclk",
+ "gout_peri_busp1_peric0_hclk", CLK_CON_GAT_PERI_I2C8_PCLK, 25,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_I2C3_PCLK, "gout_peri_i2c3_pclk",
+ "gout_peri_busp1_peric0_hclk", CLK_CON_GAT_PERI_I2C3_PCLK, 20,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_I2C2_PCLK, "gout_peri_i2c2_pclk",
+ "gout_peri_busp1_peric0_hclk", CLK_CON_GAT_PERI_I2C2_PCLK, 22,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_I2C1_PCLK, "gout_peri_i2c1_pclk",
+ "gout_peri_busp1_peric0_hclk", CLK_CON_GAT_PERI_I2C1_PCLK, 23,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_MCT_PCLK, "gout_peri_mct_pclk",
+ "gout_mif_cmu_peri_bus", CLK_CON_GAT_PERI_MCT_PCLK, 26,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_PWM_MOTOR_OSCCLK, "gout_peri_pwm_motor_oscclk",
+ "oscclk", CLK_CON_GAT_PERI_PWM_MOTOR_OSCCLK, 2,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_PWM_MOTOR_PCLK_S0, "gout_peri_pwm_motor_pclk_s0",
+ "gout_mif_cmu_peri_bus", CLK_CON_GAT_PERI_PWM_MOTOR_PCLK_S0, 29,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_SFRIF_TMU_CPUCL0_PCLK,
+ "gout_peri_sfrif_tmu_cpucl0_pclk", "gout_mif_cmu_peri_bus",
+ CLK_CON_GAT_PERI_SFRIF_TMU_CPUCL0_PCLK, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_SFRIF_TMU_CPUCL1_PCLK,
+ "gout_peri_sfrif_tmu_cpucl1_pclk", "gout_mif_cmu_peri_bus",
+ CLK_CON_GAT_PERI_SFRIF_TMU_CPUCL1_PCLK, 2, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_SFRIF_TMU_PCLK, "gout_peri_sfrif_tmu_pclk",
+ "gout_mif_cmu_peri_bus", CLK_CON_GAT_PERI_SFRIF_TMU_PCLK, 3,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_SPI0_PCLK, "gout_peri_spi0_pclk",
+ "gout_mif_cmu_peri_bus", CLK_CON_GAT_PERI_SPI0_PCLK, 6,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_SPI0_SPI_EXT_CLK, "gout_peri_spi0_spi_ext_clk",
+ "gout_mif_cmu_peri_spi0", CLK_CON_GAT_PERI_SPI0_SPI_EXT_CLK, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_SPI2_PCLK, "gout_peri_spi2_pclk",
+ "gout_mif_cmu_peri_bus", CLK_CON_GAT_PERI_SPI2_PCLK, 4,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_SPI2_SPI_EXT_CLK, "gout_peri_spi2_spi_ext_clk",
+ "gout_mif_cmu_peri_spi2", CLK_CON_GAT_PERI_SPI2_SPI_EXT_CLK, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_SPI1_PCLK, "gout_peri_spi1_pclk",
+ "gout_mif_cmu_peri_bus", CLK_CON_GAT_PERI_SPI1_PCLK, 5,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_SPI1_SPI_EXT_CLK, "gout_peri_spi1_spi_ext_clk",
+ "gout_mif_cmu_peri_spi1", CLK_CON_GAT_PERI_SPI1_SPI_EXT_CLK, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_SPI4_PCLK, "gout_peri_spi4_pclk",
+ "gout_mif_cmu_peri_bus", CLK_CON_GAT_PERI_SPI4_PCLK, 8,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_SPI4_SPI_EXT_CLK, "gout_peri_spi4_spi_ext_clk",
+ "gout_mif_cmu_peri_spi4", CLK_CON_GAT_PERI_SPI4_SPI_EXT_CLK, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_SPI3_PCLK, "gout_peri_spi3_pclk",
+ "gout_mif_cmu_peri_bus", CLK_CON_GAT_PERI_SPI3_PCLK, 7,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_SPI3_SPI_EXT_CLK, "gout_peri_spi3_spi_ext_clk",
+ "gout_mif_cmu_peri_spi3", CLK_CON_GAT_PERI_SPI3_SPI_EXT_CLK, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_TMU_CPUCL0_CLK, "gout_peri_tmu_cpucl0_clk", "oscclk",
+ CLK_CON_GAT_PERI_TMU_CPUCL0_CLK, 4, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_TMU_CPUCL1_CLK, "gout_peri_tmu_cpucl1_clk", "oscclk",
+ CLK_CON_GAT_PERI_TMU_CPUCL1_CLK, 5, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_TMU_CLK, "gout_peri_tmu_clk", "oscclk",
+ CLK_CON_GAT_PERI_TMU_CLK, 6, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_UART1_EXT_UCLK, "gout_peri_uart1_ext_uclk",
+ "gout_mif_cmu_peri_uart1", CLK_CON_GAT_PERI_UART1_EXT_UCLK, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_UART1_PCLK, "gout_peri_uart1_pclk",
+ "gout_peri_busp1_peric0_hclk", CLK_CON_GAT_PERI_UART1_PCLK, 11,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_UART2_EXT_UCLK, "gout_peri_uart2_ext_uclk",
+ "gout_mif_cmu_peri_uart2", CLK_CON_GAT_PERI_UART2_EXT_UCLK, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_UART2_PCLK, "gout_peri_uart2_pclk",
+ "gout_peri_busp1_peric0_hclk", CLK_CON_GAT_PERI_UART2_PCLK, 12,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_UART0_EXT_UCLK, "gout_peri_uart0_ext_uclk",
+ "gout_mif_cmu_peri_uart0", CLK_CON_GAT_PERI_UART0_EXT_UCLK, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_UART0_PCLK, "gout_peri_uart0_pclk",
+ "gout_peri_busp1_peric0_hclk", CLK_CON_GAT_PERI_UART0_PCLK, 10,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_WDT_CPUCL0_PCLK, "gout_peri_wdt_cpucl0_pclk",
+ "gout_mif_cmu_peri_bus", CLK_CON_GAT_PERI_WDT_CPUCL0_PCLK, 13,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_WDT_CPUCL1_PCLK, "gout_peri_wdt_cpucl1_pclk",
+ "gout_mif_cmu_peri_bus", CLK_CON_GAT_PERI_WDT_CPUCL1_PCLK, 14,
+ CLK_SET_RATE_PARENT, 0),
+};
+
+static const struct samsung_cmu_info peri_cmu_info __initconst = {
+ .gate_clks = peri_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(peri_gate_clks),
+ .clk_regs = peri_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(peri_clk_regs),
+ .nr_clk_ids = PERI_NR_CLK,
+};
+
+static int __init exynos7870_cmu_probe(struct platform_device *pdev)
+{
+ const struct samsung_cmu_info *info;
+ struct device *dev = &pdev->dev;
+
+ info = of_device_get_match_data(dev);
+ exynos_arm64_register_cmu(dev, dev->of_node, info);
+
+ return 0;
+}
+
+static const struct of_device_id exynos7870_cmu_of_match[] = {
+ {
+ .compatible = "samsung,exynos7870-cmu-mif",
+ .data = &mif_cmu_info,
+ }, {
+ .compatible = "samsung,exynos7870-cmu-dispaud",
+ .data = &dispaud_cmu_info,
+ }, {
+ .compatible = "samsung,exynos7870-cmu-fsys",
+ .data = &fsys_cmu_info,
+ }, {
+ .compatible = "samsung,exynos7870-cmu-g3d",
+ .data = &g3d_cmu_info,
+ }, {
+ .compatible = "samsung,exynos7870-cmu-isp",
+ .data = &isp_cmu_info,
+ }, {
+ .compatible = "samsung,exynos7870-cmu-mfcmscl",
+ .data = &mfcmscl_cmu_info,
+ }, {
+ .compatible = "samsung,exynos7870-cmu-peri",
+ .data = &peri_cmu_info,
+ }, {
+ },
+};
+
+static struct platform_driver exynos7870_cmu_driver __refdata = {
+ .driver = {
+ .name = "exynos7870-cmu",
+ .of_match_table = exynos7870_cmu_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = exynos7870_cmu_probe,
+};
+
+static int __init exynos7870_cmu_init(void)
+{
+ return platform_driver_register(&exynos7870_cmu_driver);
+}
+core_initcall(exynos7870_cmu_init);
diff --git a/drivers/clk/samsung/clk-exynos7885.c b/drivers/clk/samsung/clk-exynos7885.c
index fc42251731ed..ba7cf79bc300 100644
--- a/drivers/clk/samsung/clk-exynos7885.c
+++ b/drivers/clk/samsung/clk-exynos7885.c
@@ -6,8 +6,8 @@
* Common Clock Framework support for Exynos7885 SoC.
*/
-#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
#include <linux/of.h>
#include <linux/platform_device.h>
diff --git a/drivers/clk/samsung/clk-exynos850.c b/drivers/clk/samsung/clk-exynos850.c
index e00e213b1201..56f27697c76b 100644
--- a/drivers/clk/samsung/clk-exynos850.c
+++ b/drivers/clk/samsung/clk-exynos850.c
@@ -6,8 +6,8 @@
* Common Clock Framework support for Exynos850 SoC.
*/
-#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -1360,7 +1360,7 @@ static const unsigned long cpucl1_clk_regs[] __initconst = {
CLK_CON_GAT_GATE_CLK_CPUCL1_CPU,
};
-/* List of parent clocks for Muxes in CMU_CPUCL0 */
+/* List of parent clocks for Muxes in CMU_CPUCL1 */
PNAME(mout_pll_cpucl1_p) = { "oscclk", "fout_cpucl1_pll" };
PNAME(mout_cpucl1_switch_user_p) = { "oscclk", "dout_cpucl1_switch" };
PNAME(mout_cpucl1_dbg_user_p) = { "oscclk", "dout_cpucl1_dbg" };
diff --git a/drivers/clk/samsung/clk-exynos8895.c b/drivers/clk/samsung/clk-exynos8895.c
new file mode 100644
index 000000000000..e6980a8f026f
--- /dev/null
+++ b/drivers/clk/samsung/clk-exynos8895.c
@@ -0,0 +1,2803 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024 Ivaylo Ivanov <ivo.ivanov.ivanov1@gmail.com>
+ * Author: Ivaylo Ivanov <ivo.ivanov.ivanov1@gmail.com>
+ *
+ * Common Clock Framework support for Exynos8895 SoC.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/clock/samsung,exynos8895.h>
+
+#include "clk.h"
+#include "clk-exynos-arm64.h"
+
+/* NOTE: Must be equal to the last clock ID increased by one */
+#define CLKS_NR_TOP (CLK_GOUT_CMU_VPU_BUS + 1)
+#define CLKS_NR_FSYS0 (CLK_GOUT_FSYS0_XIU_P_FSYS0_ACLK + 1)
+#define CLKS_NR_FSYS1 (CLK_GOUT_FSYS1_XIU_P_FSYS1_ACLK + 1)
+#define CLKS_NR_PERIC0 (CLK_GOUT_PERIC0_USI03_I_SCLK_USI + 1)
+#define CLKS_NR_PERIC1 (CLK_GOUT_PERIC1_XIU_P_PERIC1_ACLK + 1)
+#define CLKS_NR_PERIS (CLK_GOUT_PERIS_XIU_P_PERIS_ACLK + 1)
+
+/* ---- CMU_TOP ------------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_TOP (0x15a80000) */
+#define PLL_LOCKTIME_PLL_SHARED0 0x0000
+#define PLL_LOCKTIME_PLL_SHARED1 0x0004
+#define PLL_LOCKTIME_PLL_SHARED2 0x0008
+#define PLL_LOCKTIME_PLL_SHARED3 0x000c
+#define PLL_LOCKTIME_PLL_SHARED4 0x0010
+#define PLL_CON0_MUX_CP2AP_MIF_CLK_USER 0x0100
+#define PLL_CON2_MUX_CP2AP_MIF_CLK_USER 0x0108
+#define PLL_CON0_PLL_SHARED0 0x0120
+#define PLL_CON0_PLL_SHARED1 0x0140
+#define PLL_CON0_PLL_SHARED2 0x0160
+#define PLL_CON0_PLL_SHARED3 0x0180
+#define PLL_CON0_PLL_SHARED4 0x01a0
+#define CLK_CON_MUX_MUX_CLKCMU_ABOX_CPUABOX 0x1000
+#define CLK_CON_MUX_MUX_CLKCMU_APM_BUS 0x1004
+#define CLK_CON_MUX_MUX_CLKCMU_BUS1_BUS 0x1008
+#define CLK_CON_MUX_MUX_CLKCMU_BUSC_BUS 0x100c
+#define CLK_CON_MUX_MUX_CLKCMU_BUSC_BUSPHSI2C 0x1010
+#define CLK_CON_MUX_MUX_CLKCMU_CAM_BUS 0x1014
+#define CLK_CON_MUX_MUX_CLKCMU_CAM_TPU0 0x1018
+#define CLK_CON_MUX_MUX_CLKCMU_CAM_TPU1 0x101c
+#define CLK_CON_MUX_MUX_CLKCMU_CAM_VRA 0x1020
+#define CLK_CON_MUX_MUX_CLKCMU_CIS_CLK0 0x1024
+#define CLK_CON_MUX_MUX_CLKCMU_CIS_CLK1 0x1028
+#define CLK_CON_MUX_MUX_CLKCMU_CIS_CLK2 0x102c
+#define CLK_CON_MUX_MUX_CLKCMU_CIS_CLK3 0x1030
+#define CLK_CON_MUX_MUX_CLKCMU_CORE_BUS 0x1034
+#define CLK_CON_MUX_MUX_CLKCMU_CPUCL0_SWITCH 0x1038
+#define CLK_CON_MUX_MUX_CLKCMU_CPUCL1_SWITCH 0x103c
+#define CLK_CON_MUX_MUX_CLKCMU_DBG_BUS 0x1040
+#define CLK_CON_MUX_MUX_CLKCMU_DCAM_BUS 0x1044
+#define CLK_CON_MUX_MUX_CLKCMU_DCAM_IMGD 0x1048
+#define CLK_CON_MUX_MUX_CLKCMU_DPU_BUS 0x104c
+#define CLK_CON_MUX_MUX_CLKCMU_DROOPDETECTOR 0x1050
+#define CLK_CON_MUX_MUX_CLKCMU_DSP_BUS 0x1054
+#define CLK_CON_MUX_MUX_CLKCMU_FSYS0_BUS 0x1058
+#define CLK_CON_MUX_MUX_CLKCMU_FSYS0_DPGTC 0x105c
+#define CLK_CON_MUX_MUX_CLKCMU_FSYS0_MMC_EMBD 0x1060
+#define CLK_CON_MUX_MUX_CLKCMU_FSYS0_UFS_EMBD 0x1064
+#define CLK_CON_MUX_MUX_CLKCMU_FSYS0_USBDRD30 0x1068
+#define CLK_CON_MUX_MUX_CLKCMU_FSYS1_BUS 0x106c
+#define CLK_CON_MUX_MUX_CLKCMU_FSYS1_MMC_CARD 0x1070
+#define CLK_CON_MUX_MUX_CLKCMU_FSYS1_PCIE 0x1074
+#define CLK_CON_MUX_MUX_CLKCMU_FSYS1_UFS_CARD 0x1078
+#define CLK_CON_MUX_MUX_CLKCMU_G2D_G2D 0x107c
+#define CLK_CON_MUX_MUX_CLKCMU_G2D_JPEG 0x1080
+#define CLK_CON_MUX_MUX_CLKCMU_HPM 0x1084
+#define CLK_CON_MUX_MUX_CLKCMU_IMEM_BUS 0x1088
+#define CLK_CON_MUX_MUX_CLKCMU_ISPHQ_BUS 0x108c
+#define CLK_CON_MUX_MUX_CLKCMU_ISPLP_BUS 0x1090
+#define CLK_CON_MUX_MUX_CLKCMU_IVA_BUS 0x1094
+#define CLK_CON_MUX_MUX_CLKCMU_MFC_BUS 0x1098
+#define CLK_CON_MUX_MUX_CLKCMU_MIF_SWITCH 0x109c
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC0_BUS 0x10a0
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC0_UART_DBG 0x10a4
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC0_USI00 0x10a8
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC0_USI01 0x10ac
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC0_USI02 0x10b0
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC0_USI03 0x10b4
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_BUS 0x10b8
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_SPEEDY2 0x10bc
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_SPI_CAM0 0x10c0
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_SPI_CAM1 0x10c4
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_UART_BT 0x10c8
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI04 0x10cc
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI05 0x10d0
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI06 0x10d4
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI07 0x10d8
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI08 0x10dc
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI09 0x10e0
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI10 0x10e4
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI11 0x10e8
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI12 0x10ec
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI13 0x10f0
+#define CLK_CON_MUX_MUX_CLKCMU_PERIS_BUS 0x10f4
+#define CLK_CON_MUX_MUX_CLKCMU_SRDZ_BUS 0x10f8
+#define CLK_CON_MUX_MUX_CLKCMU_SRDZ_IMGD 0x10fc
+#define CLK_CON_MUX_MUX_CLKCMU_VPU_BUS 0x1100
+#define CLK_CON_MUX_MUX_CLK_CMU_CMUREF 0x1104
+#define CLK_CON_MUX_MUX_CMU_CMUREF 0x1108
+#define CLK_CON_DIV_CLKCMU_ABOX_CPUABOX 0x1800
+#define CLK_CON_DIV_CLKCMU_APM_BUS 0x1804
+#define CLK_CON_DIV_CLKCMU_BUS1_BUS 0x1808
+#define CLK_CON_DIV_CLKCMU_BUSC_BUS 0x180c
+#define CLK_CON_DIV_CLKCMU_BUSC_BUSPHSI2C 0x1810
+#define CLK_CON_DIV_CLKCMU_CAM_BUS 0x1814
+#define CLK_CON_DIV_CLKCMU_CAM_TPU0 0x1818
+#define CLK_CON_DIV_CLKCMU_CAM_TPU1 0x181c
+#define CLK_CON_DIV_CLKCMU_CAM_VRA 0x1820
+#define CLK_CON_DIV_CLKCMU_CIS_CLK0 0x1824
+#define CLK_CON_DIV_CLKCMU_CIS_CLK1 0x1828
+#define CLK_CON_DIV_CLKCMU_CIS_CLK2 0x182c
+#define CLK_CON_DIV_CLKCMU_CIS_CLK3 0x1830
+#define CLK_CON_DIV_CLKCMU_CORE_BUS 0x1834
+#define CLK_CON_DIV_CLKCMU_CPUCL0_SWITCH 0x1838
+#define CLK_CON_DIV_CLKCMU_CPUCL1_SWITCH 0x183c
+#define CLK_CON_DIV_CLKCMU_DBG_BUS 0x1840
+#define CLK_CON_DIV_CLKCMU_DCAM_BUS 0x1844
+#define CLK_CON_DIV_CLKCMU_DCAM_IMGD 0x1848
+#define CLK_CON_DIV_CLKCMU_DPU_BUS 0x184c
+#define CLK_CON_DIV_CLKCMU_DSP_BUS 0x1850
+#define CLK_CON_DIV_CLKCMU_FSYS0_BUS 0x1854
+#define CLK_CON_DIV_CLKCMU_FSYS0_DPGTC 0x1858
+#define CLK_CON_DIV_CLKCMU_FSYS0_MMC_EMBD 0x185c
+#define CLK_CON_DIV_CLKCMU_FSYS0_UFS_EMBD 0x1860
+#define CLK_CON_DIV_CLKCMU_FSYS0_USBDRD30 0x1864
+#define CLK_CON_DIV_CLKCMU_FSYS1_BUS 0x1868
+#define CLK_CON_DIV_CLKCMU_FSYS1_MMC_CARD 0x186c
+#define CLK_CON_DIV_CLKCMU_FSYS1_PCIE 0x1870
+#define CLK_CON_DIV_CLKCMU_FSYS1_UFS_CARD 0x1874
+#define CLK_CON_DIV_CLKCMU_G2D_G2D 0x1878
+#define CLK_CON_DIV_CLKCMU_G2D_JPEG 0x187c
+#define CLK_CON_DIV_CLKCMU_G3D_SWITCH 0x1880
+#define CLK_CON_DIV_CLKCMU_HPM 0x1884
+#define CLK_CON_DIV_CLKCMU_IMEM_BUS 0x1888
+#define CLK_CON_DIV_CLKCMU_ISPHQ_BUS 0x188c
+#define CLK_CON_DIV_CLKCMU_ISPLP_BUS 0x1890
+#define CLK_CON_DIV_CLKCMU_IVA_BUS 0x1894
+#define CLK_CON_DIV_CLKCMU_MFC_BUS 0x1898
+#define CLK_CON_DIV_CLKCMU_MODEM_SHARED0 0x189c
+#define CLK_CON_DIV_CLKCMU_MODEM_SHARED1 0x18a0
+#define CLK_CON_DIV_CLKCMU_OTP 0x18a4
+#define CLK_CON_DIV_CLKCMU_PERIC0_BUS 0x18a8
+#define CLK_CON_DIV_CLKCMU_PERIC0_UART_DBG 0x18ac
+#define CLK_CON_DIV_CLKCMU_PERIC0_USI00 0x18b0
+#define CLK_CON_DIV_CLKCMU_PERIC0_USI01 0x18b4
+#define CLK_CON_DIV_CLKCMU_PERIC0_USI02 0x18b8
+#define CLK_CON_DIV_CLKCMU_PERIC0_USI03 0x18bc
+#define CLK_CON_DIV_CLKCMU_PERIC1_BUS 0x18c0
+#define CLK_CON_DIV_CLKCMU_PERIC1_SPEEDY2 0x18c4
+#define CLK_CON_DIV_CLKCMU_PERIC1_SPI_CAM0 0x18c8
+#define CLK_CON_DIV_CLKCMU_PERIC1_SPI_CAM1 0x18cc
+#define CLK_CON_DIV_CLKCMU_PERIC1_UART_BT 0x18d0
+#define CLK_CON_DIV_CLKCMU_PERIC1_USI04 0x18d4
+#define CLK_CON_DIV_CLKCMU_PERIC1_USI05 0x18d8
+#define CLK_CON_DIV_CLKCMU_PERIC1_USI06 0x18dc
+#define CLK_CON_DIV_CLKCMU_PERIC1_USI07 0x18e0
+#define CLK_CON_DIV_CLKCMU_PERIC1_USI08 0x18e4
+#define CLK_CON_DIV_CLKCMU_PERIC1_USI09 0x18e8
+#define CLK_CON_DIV_CLKCMU_PERIC1_USI10 0x18ec
+#define CLK_CON_DIV_CLKCMU_PERIC1_USI11 0x18f0
+#define CLK_CON_DIV_CLKCMU_PERIC1_USI12 0x18f4
+#define CLK_CON_DIV_CLKCMU_PERIC1_USI13 0x18f8
+#define CLK_CON_DIV_CLKCMU_PERIS_BUS 0x18fc
+#define CLK_CON_DIV_CLKCMU_SRDZ_BUS 0x1900
+#define CLK_CON_DIV_CLKCMU_SRDZ_IMGD 0x1904
+#define CLK_CON_DIV_CLKCMU_VPU_BUS 0x1908
+#define CLK_CON_DIV_DIV_CLK_CMU_CMUREF 0x190c
+#define CLK_CON_DIV_DIV_CP2AP_MIF_CLK_DIV2 0x1910
+#define CLK_CON_DIV_DIV_PLL_SHARED0_DIV2 0x1914
+#define CLK_CON_DIV_DIV_PLL_SHARED0_DIV4 0x1918
+#define CLK_CON_DIV_DIV_PLL_SHARED1_DIV2 0x191c
+#define CLK_CON_DIV_DIV_PLL_SHARED1_DIV4 0x1920
+#define CLK_CON_DIV_DIV_PLL_SHARED2_DIV2 0x1924
+#define CLK_CON_DIV_DIV_PLL_SHARED3_DIV2 0x1928
+#define CLK_CON_DIV_DIV_PLL_SHARED4_DIV2 0x192c
+#define CLK_CON_GAT_CLKCMU_DROOPDETECTOR 0x2000
+#define CLK_CON_GAT_CLKCMU_MIF_SWITCH 0x2004
+#define CLK_CON_GAT_GATE_CLKCMU_ABOX_CPUABOX 0x2008
+#define CLK_CON_GAT_GATE_CLKCMU_APM_BUS 0x200c
+#define CLK_CON_GAT_GATE_CLKCMU_BUS1_BUS 0x2010
+#define CLK_CON_GAT_GATE_CLKCMU_BUSC_BUS 0x2014
+#define CLK_CON_GAT_GATE_CLKCMU_BUSC_BUSPHSI2C 0x2018
+#define CLK_CON_GAT_GATE_CLKCMU_CAM_BUS 0x201c
+#define CLK_CON_GAT_GATE_CLKCMU_CAM_TPU0 0x2020
+#define CLK_CON_GAT_GATE_CLKCMU_CAM_TPU1 0x2024
+#define CLK_CON_GAT_GATE_CLKCMU_CAM_VRA 0x2028
+#define CLK_CON_GAT_GATE_CLKCMU_CIS_CLK0 0x202c
+#define CLK_CON_GAT_GATE_CLKCMU_CIS_CLK1 0x2030
+#define CLK_CON_GAT_GATE_CLKCMU_CIS_CLK2 0x2034
+#define CLK_CON_GAT_GATE_CLKCMU_CIS_CLK3 0x2038
+#define CLK_CON_GAT_GATE_CLKCMU_CORE_BUS 0x203c
+#define CLK_CON_GAT_GATE_CLKCMU_CPUCL0_SWITCH 0x2040
+#define CLK_CON_GAT_GATE_CLKCMU_CPUCL1_SWITCH 0x2044
+#define CLK_CON_GAT_GATE_CLKCMU_DBG_BUS 0x2048
+#define CLK_CON_GAT_GATE_CLKCMU_DCAM_BUS 0x204c
+#define CLK_CON_GAT_GATE_CLKCMU_DCAM_IMGD 0x2050
+#define CLK_CON_GAT_GATE_CLKCMU_DPU_BUS 0x2054
+#define CLK_CON_GAT_GATE_CLKCMU_DSP_BUS 0x2058
+#define CLK_CON_GAT_GATE_CLKCMU_FSYS0_BUS 0x205c
+#define CLK_CON_GAT_GATE_CLKCMU_FSYS0_DPGTC 0x2060
+#define CLK_CON_GAT_GATE_CLKCMU_FSYS0_MMC_EMBD 0x2064
+#define CLK_CON_GAT_GATE_CLKCMU_FSYS0_UFS_EMBD 0x2068
+#define CLK_CON_GAT_GATE_CLKCMU_FSYS0_USBDRD30 0x206c
+#define CLK_CON_GAT_GATE_CLKCMU_FSYS1_BUS 0x2070
+#define CLK_CON_GAT_GATE_CLKCMU_FSYS1_MMC_CARD 0x2074
+#define CLK_CON_GAT_GATE_CLKCMU_FSYS1_PCIE 0x2078
+#define CLK_CON_GAT_GATE_CLKCMU_FSYS1_UFS_CARD 0x207c
+#define CLK_CON_GAT_GATE_CLKCMU_G2D_G2D 0x2080
+#define CLK_CON_GAT_GATE_CLKCMU_G2D_JPEG 0x2084
+#define CLK_CON_GAT_GATE_CLKCMU_G3D_SWITCH 0x2088
+#define CLK_CON_GAT_GATE_CLKCMU_HPM 0x208c
+#define CLK_CON_GAT_GATE_CLKCMU_IMEM_BUS 0x2090
+#define CLK_CON_GAT_GATE_CLKCMU_ISPHQ_BUS 0x2094
+#define CLK_CON_GAT_GATE_CLKCMU_ISPLP_BUS 0x2098
+#define CLK_CON_GAT_GATE_CLKCMU_IVA_BUS 0x209c
+#define CLK_CON_GAT_GATE_CLKCMU_MFC_BUS 0x20a0
+#define CLK_CON_GAT_GATE_CLKCMU_MODEM_SHARED0 0x20a4
+#define CLK_CON_GAT_GATE_CLKCMU_MODEM_SHARED1 0x20a8
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC0_BUS 0x20ac
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC0_UART_DBG 0x20b0
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC0_USI00 0x20b4
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC0_USI01 0x20b8
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC0_USI02 0x20bc
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC0_USI03 0x20c0
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_BUS 0x20c4
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_SPEEDY2 0x20c8
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_SPI_CAM0 0x20cc
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_SPI_CAM1 0x20d0
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_UART_BT 0x20d4
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI04 0x20d8
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI05 0x20dc
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI06 0x20e0
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI07 0x20e4
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI08 0x20e8
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI09 0x20ec
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI10 0x20f0
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI11 0x20f4
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI12 0x20f8
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI13 0x20fc
+#define CLK_CON_GAT_GATE_CLKCMU_PERIS_BUS 0x2100
+#define CLK_CON_GAT_GATE_CLKCMU_SRDZ_BUS 0x2104
+#define CLK_CON_GAT_GATE_CLKCMU_SRDZ_IMGD 0x2108
+#define CLK_CON_GAT_GATE_CLKCMU_VPU_BUS 0x210c
+
+static const unsigned long top_clk_regs[] __initconst = {
+ PLL_LOCKTIME_PLL_SHARED0,
+ PLL_LOCKTIME_PLL_SHARED1,
+ PLL_LOCKTIME_PLL_SHARED2,
+ PLL_LOCKTIME_PLL_SHARED3,
+ PLL_LOCKTIME_PLL_SHARED4,
+ PLL_CON0_MUX_CP2AP_MIF_CLK_USER,
+ PLL_CON2_MUX_CP2AP_MIF_CLK_USER,
+ PLL_CON0_PLL_SHARED0,
+ PLL_CON0_PLL_SHARED1,
+ PLL_CON0_PLL_SHARED2,
+ PLL_CON0_PLL_SHARED3,
+ PLL_CON0_PLL_SHARED4,
+ CLK_CON_MUX_MUX_CLKCMU_ABOX_CPUABOX,
+ CLK_CON_MUX_MUX_CLKCMU_APM_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_BUS1_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_BUSC_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_BUSC_BUSPHSI2C,
+ CLK_CON_MUX_MUX_CLKCMU_CAM_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_CAM_TPU0,
+ CLK_CON_MUX_MUX_CLKCMU_CAM_TPU1,
+ CLK_CON_MUX_MUX_CLKCMU_CAM_VRA,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK0,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK1,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK2,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK3,
+ CLK_CON_MUX_MUX_CLKCMU_CORE_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL0_SWITCH,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL1_SWITCH,
+ CLK_CON_MUX_MUX_CLKCMU_DBG_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_DCAM_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_DCAM_IMGD,
+ CLK_CON_MUX_MUX_CLKCMU_DPU_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_DROOPDETECTOR,
+ CLK_CON_MUX_MUX_CLKCMU_DSP_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_FSYS0_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_FSYS0_DPGTC,
+ CLK_CON_MUX_MUX_CLKCMU_FSYS0_MMC_EMBD,
+ CLK_CON_MUX_MUX_CLKCMU_FSYS0_UFS_EMBD,
+ CLK_CON_MUX_MUX_CLKCMU_FSYS0_USBDRD30,
+ CLK_CON_MUX_MUX_CLKCMU_FSYS1_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_FSYS1_MMC_CARD,
+ CLK_CON_MUX_MUX_CLKCMU_FSYS1_PCIE,
+ CLK_CON_MUX_MUX_CLKCMU_FSYS1_UFS_CARD,
+ CLK_CON_MUX_MUX_CLKCMU_G2D_G2D,
+ CLK_CON_MUX_MUX_CLKCMU_G2D_JPEG,
+ CLK_CON_MUX_MUX_CLKCMU_HPM,
+ CLK_CON_MUX_MUX_CLKCMU_IMEM_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_ISPHQ_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_ISPLP_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_IVA_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_MFC_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_MIF_SWITCH,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC0_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC0_UART_DBG,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC0_USI00,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC0_USI01,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC0_USI02,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC0_USI03,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC1_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC1_SPEEDY2,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC1_SPI_CAM0,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC1_SPI_CAM1,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC1_UART_BT,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI04,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI05,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI06,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI07,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI08,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI09,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI10,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI11,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI12,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI13,
+ CLK_CON_MUX_MUX_CLKCMU_PERIS_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_SRDZ_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_SRDZ_IMGD,
+ CLK_CON_MUX_MUX_CLKCMU_VPU_BUS,
+ CLK_CON_MUX_MUX_CLK_CMU_CMUREF,
+ CLK_CON_MUX_MUX_CMU_CMUREF,
+ CLK_CON_DIV_CLKCMU_ABOX_CPUABOX,
+ CLK_CON_DIV_CLKCMU_APM_BUS,
+ CLK_CON_DIV_CLKCMU_BUS1_BUS,
+ CLK_CON_DIV_CLKCMU_BUSC_BUS,
+ CLK_CON_DIV_CLKCMU_BUSC_BUSPHSI2C,
+ CLK_CON_DIV_CLKCMU_CAM_BUS,
+ CLK_CON_DIV_CLKCMU_CAM_TPU0,
+ CLK_CON_DIV_CLKCMU_CAM_TPU1,
+ CLK_CON_DIV_CLKCMU_CAM_VRA,
+ CLK_CON_DIV_CLKCMU_CIS_CLK0,
+ CLK_CON_DIV_CLKCMU_CIS_CLK1,
+ CLK_CON_DIV_CLKCMU_CIS_CLK2,
+ CLK_CON_DIV_CLKCMU_CIS_CLK3,
+ CLK_CON_DIV_CLKCMU_CORE_BUS,
+ CLK_CON_DIV_CLKCMU_CPUCL0_SWITCH,
+ CLK_CON_DIV_CLKCMU_CPUCL1_SWITCH,
+ CLK_CON_DIV_CLKCMU_DBG_BUS,
+ CLK_CON_DIV_CLKCMU_DCAM_BUS,
+ CLK_CON_DIV_CLKCMU_DCAM_IMGD,
+ CLK_CON_DIV_CLKCMU_DPU_BUS,
+ CLK_CON_DIV_CLKCMU_DSP_BUS,
+ CLK_CON_DIV_CLKCMU_FSYS0_BUS,
+ CLK_CON_DIV_CLKCMU_FSYS0_DPGTC,
+ CLK_CON_DIV_CLKCMU_FSYS0_MMC_EMBD,
+ CLK_CON_DIV_CLKCMU_FSYS0_UFS_EMBD,
+ CLK_CON_DIV_CLKCMU_FSYS0_USBDRD30,
+ CLK_CON_DIV_CLKCMU_FSYS1_BUS,
+ CLK_CON_DIV_CLKCMU_FSYS1_MMC_CARD,
+ CLK_CON_DIV_CLKCMU_FSYS1_PCIE,
+ CLK_CON_DIV_CLKCMU_FSYS1_UFS_CARD,
+ CLK_CON_DIV_CLKCMU_G2D_G2D,
+ CLK_CON_DIV_CLKCMU_G2D_JPEG,
+ CLK_CON_DIV_CLKCMU_G3D_SWITCH,
+ CLK_CON_DIV_CLKCMU_HPM,
+ CLK_CON_DIV_CLKCMU_IMEM_BUS,
+ CLK_CON_DIV_CLKCMU_ISPHQ_BUS,
+ CLK_CON_DIV_CLKCMU_ISPLP_BUS,
+ CLK_CON_DIV_CLKCMU_IVA_BUS,
+ CLK_CON_DIV_CLKCMU_MFC_BUS,
+ CLK_CON_DIV_CLKCMU_MODEM_SHARED0,
+ CLK_CON_DIV_CLKCMU_MODEM_SHARED1,
+ CLK_CON_DIV_CLKCMU_OTP,
+ CLK_CON_DIV_CLKCMU_PERIC0_BUS,
+ CLK_CON_DIV_CLKCMU_PERIC0_UART_DBG,
+ CLK_CON_DIV_CLKCMU_PERIC0_USI00,
+ CLK_CON_DIV_CLKCMU_PERIC0_USI01,
+ CLK_CON_DIV_CLKCMU_PERIC0_USI02,
+ CLK_CON_DIV_CLKCMU_PERIC0_USI03,
+ CLK_CON_DIV_CLKCMU_PERIC1_BUS,
+ CLK_CON_DIV_CLKCMU_PERIC1_SPEEDY2,
+ CLK_CON_DIV_CLKCMU_PERIC1_SPI_CAM0,
+ CLK_CON_DIV_CLKCMU_PERIC1_SPI_CAM1,
+ CLK_CON_DIV_CLKCMU_PERIC1_UART_BT,
+ CLK_CON_DIV_CLKCMU_PERIC1_USI04,
+ CLK_CON_DIV_CLKCMU_PERIC1_USI05,
+ CLK_CON_DIV_CLKCMU_PERIC1_USI06,
+ CLK_CON_DIV_CLKCMU_PERIC1_USI07,
+ CLK_CON_DIV_CLKCMU_PERIC1_USI08,
+ CLK_CON_DIV_CLKCMU_PERIC1_USI09,
+ CLK_CON_DIV_CLKCMU_PERIC1_USI10,
+ CLK_CON_DIV_CLKCMU_PERIC1_USI11,
+ CLK_CON_DIV_CLKCMU_PERIC1_USI12,
+ CLK_CON_DIV_CLKCMU_PERIC1_USI13,
+ CLK_CON_DIV_CLKCMU_PERIS_BUS,
+ CLK_CON_DIV_CLKCMU_SRDZ_BUS,
+ CLK_CON_DIV_CLKCMU_SRDZ_IMGD,
+ CLK_CON_DIV_CLKCMU_VPU_BUS,
+ CLK_CON_DIV_DIV_CLK_CMU_CMUREF,
+ CLK_CON_DIV_DIV_CP2AP_MIF_CLK_DIV2,
+ CLK_CON_DIV_DIV_PLL_SHARED0_DIV2,
+ CLK_CON_DIV_DIV_PLL_SHARED0_DIV4,
+ CLK_CON_DIV_DIV_PLL_SHARED1_DIV2,
+ CLK_CON_DIV_DIV_PLL_SHARED1_DIV4,
+ CLK_CON_DIV_DIV_PLL_SHARED2_DIV2,
+ CLK_CON_DIV_DIV_PLL_SHARED3_DIV2,
+ CLK_CON_DIV_DIV_PLL_SHARED4_DIV2,
+ CLK_CON_GAT_CLKCMU_DROOPDETECTOR,
+ CLK_CON_GAT_CLKCMU_MIF_SWITCH,
+ CLK_CON_GAT_GATE_CLKCMU_ABOX_CPUABOX,
+ CLK_CON_GAT_GATE_CLKCMU_APM_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_BUS1_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_BUSC_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_BUSC_BUSPHSI2C,
+ CLK_CON_GAT_GATE_CLKCMU_CAM_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_CAM_TPU0,
+ CLK_CON_GAT_GATE_CLKCMU_CAM_TPU1,
+ CLK_CON_GAT_GATE_CLKCMU_CAM_VRA,
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK0,
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK1,
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK2,
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK3,
+ CLK_CON_GAT_GATE_CLKCMU_CORE_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_CPUCL0_SWITCH,
+ CLK_CON_GAT_GATE_CLKCMU_CPUCL1_SWITCH,
+ CLK_CON_GAT_GATE_CLKCMU_DBG_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_DCAM_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_DCAM_IMGD,
+ CLK_CON_GAT_GATE_CLKCMU_DPU_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_DSP_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_FSYS0_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_FSYS0_DPGTC,
+ CLK_CON_GAT_GATE_CLKCMU_FSYS0_MMC_EMBD,
+ CLK_CON_GAT_GATE_CLKCMU_FSYS0_UFS_EMBD,
+ CLK_CON_GAT_GATE_CLKCMU_FSYS0_USBDRD30,
+ CLK_CON_GAT_GATE_CLKCMU_FSYS1_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_FSYS1_MMC_CARD,
+ CLK_CON_GAT_GATE_CLKCMU_FSYS1_PCIE,
+ CLK_CON_GAT_GATE_CLKCMU_FSYS1_UFS_CARD,
+ CLK_CON_GAT_GATE_CLKCMU_G2D_G2D,
+ CLK_CON_GAT_GATE_CLKCMU_G2D_JPEG,
+ CLK_CON_GAT_GATE_CLKCMU_G3D_SWITCH,
+ CLK_CON_GAT_GATE_CLKCMU_HPM,
+ CLK_CON_GAT_GATE_CLKCMU_IMEM_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_ISPHQ_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_ISPLP_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_IVA_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_MFC_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_MODEM_SHARED0,
+ CLK_CON_GAT_GATE_CLKCMU_MODEM_SHARED1,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC0_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC0_UART_DBG,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC0_USI00,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC0_USI01,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC0_USI02,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC0_USI03,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC1_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC1_SPEEDY2,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC1_SPI_CAM0,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC1_SPI_CAM1,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC1_UART_BT,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI04,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI05,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI06,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI07,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI08,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI09,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI10,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI11,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI12,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI13,
+ CLK_CON_GAT_GATE_CLKCMU_PERIS_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_SRDZ_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_SRDZ_IMGD,
+ CLK_CON_GAT_GATE_CLKCMU_VPU_BUS,
+};
+
+static const struct samsung_pll_rate_table pll_shared0_rate_table[] __initconst = {
+ PLL_35XX_RATE(26 * MHZ, 2132000000U, 328, 4, 0),
+};
+
+static const struct samsung_pll_rate_table pll_shared1_rate_table[] __initconst = {
+ PLL_35XX_RATE(26 * MHZ, 1865500000U, 287, 4, 0),
+};
+
+static const struct samsung_pll_rate_table pll_shared2_rate_table[] __initconst = {
+ PLL_35XX_RATE(26 * MHZ, 800000000U, 400, 13, 0),
+};
+
+static const struct samsung_pll_rate_table pll_shared3_rate_table[] __initconst = {
+ PLL_35XX_RATE(26 * MHZ, 630000000U, 315, 13, 0),
+};
+
+static const struct samsung_pll_rate_table pll_shared4_rate_table[] __initconst = {
+ PLL_35XX_RATE(26 * MHZ, 667333333U, 154, 6, 0),
+};
+
+static const struct samsung_pll_clock top_pll_clks[] __initconst = {
+ /* CMU_TOP_PURECLKCOMP */
+ PLL(pll_1051x, CLK_FOUT_SHARED0_PLL, "fout_shared0_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED0, PLL_CON0_PLL_SHARED0,
+ pll_shared0_rate_table),
+ PLL(pll_1051x, CLK_FOUT_SHARED1_PLL, "fout_shared1_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED1, PLL_CON0_PLL_SHARED1,
+ pll_shared1_rate_table),
+ PLL(pll_1052x, CLK_FOUT_SHARED2_PLL, "fout_shared2_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED2, PLL_CON0_PLL_SHARED2,
+ pll_shared2_rate_table),
+ PLL(pll_1052x, CLK_FOUT_SHARED3_PLL, "fout_shared3_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED3, PLL_CON0_PLL_SHARED3,
+ pll_shared3_rate_table),
+ PLL(pll_1052x, CLK_FOUT_SHARED4_PLL, "fout_shared4_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED4, PLL_CON0_PLL_SHARED4,
+ pll_shared4_rate_table),
+};
+
+/* List of parent clocks for Muxes in CMU_TOP */
+PNAME(mout_pll_shared0_p) = { "oscclk", "fout_shared0_pll" };
+PNAME(mout_pll_shared1_p) = { "oscclk", "fout_shared1_pll" };
+PNAME(mout_pll_shared2_p) = { "oscclk", "fout_shared2_pll" };
+PNAME(mout_pll_shared3_p) = { "oscclk", "fout_shared3_pll" };
+PNAME(mout_pll_shared4_p) = { "oscclk", "fout_shared4_pll" };
+PNAME(mout_cp2ap_mif_clk_user_p) = { "oscclk" };
+PNAME(mout_cmu_abox_cpuabox_p) = { "dout_cmu_shared0_div2",
+ "dout_cmu_shared1_div2",
+ "fout_shared2_pll",
+ "fout_shared4_pll" };
+PNAME(mout_cmu_apm_bus_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_bus1_bus_p) = { "fout_shared4_pll",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_busc_bus_p) = { "fout_shared4_pll",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_cp2ap_mif_clk_div2",
+ "oscclk", "oscclk", "oscclk" };
+PNAME(mout_cmu_busc_busphsi2c_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_cp2ap_mif_clk_div2",
+ "oscclk" };
+PNAME(mout_cmu_cam_bus_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_cmu_cam_tpu0_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_cmu_cam_tpu1_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_cmu_cam_vra_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_cmu_cis_clk0_p) = { "oscclk",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_cis_clk1_p) = { "oscclk",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_cis_clk2_p) = { "oscclk",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_cis_clk3_p) = { "oscclk",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_core_bus_p) = { "dout_cmu_shared0_div2",
+ "dout_cmu_shared1_div2",
+ "fout_shared2_pll",
+ "fout_shared4_pll",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_cp2ap_mif_clk_div2" };
+PNAME(mout_cmu_cpucl0_switch_p) = { "dout_cmu_shared0_div2",
+ "dout_cmu_shared1_div2",
+ "fout_shared2_pll",
+ "fout_shared4_pll" };
+PNAME(mout_cmu_cpucl1_switch_p) = { "dout_cmu_shared0_div2",
+ "dout_cmu_shared1_div2",
+ "fout_shared2_pll",
+ "fout_shared4_pll" };
+PNAME(mout_cmu_dbg_bus_p) = { "fout_shared2_pll",
+ "fout_shared4_pll",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4" };
+PNAME(mout_cmu_dcam_bus_p) = { "fout_shared4_pll",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_dcam_imgd_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_cmu_dpu_bus_p) = { "dout_cmu_shared0_div2",
+ "fout_shared3_pll",
+ "fout_shared4_pll",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "oscclk", "oscclk", "oscclk" };
+PNAME(mout_cmu_droopdetector_p) = { "oscclk", "dout_cmu_shared0_div2",
+ "dout_cmu_shared1_div2",
+ "fout_shared2_pll" };
+PNAME(mout_cmu_dsp_bus_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_fsys0_bus_p) = { "dout_cmu_shared1_div2",
+ "fout_shared2_pll",
+ "fout_shared4_pll",
+ "dout_cmu_shared0_div4" };
+PNAME(mout_fsys0_dpgtc_p) = { "oscclk", "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_fsys0_mmc_embd_p) = { "oscclk", "fout_shared2_pll",
+ "fout_shared4_pll",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "oscclk", "oscclk", "oscclk" };
+PNAME(mout_fsys0_ufs_embd_p) = { "oscclk", "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_fsys0_usbdrd30_p) = { "oscclk", "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_cmu_fsys1_bus_p) = { "fout_shared2_pll",
+ "dout_cmu_shared0_div4" };
+PNAME(mout_cmu_fsys1_mmc_card_p) = { "oscclk", "fout_shared2_pll",
+ "fout_shared4_pll",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "oscclk", "oscclk", "oscclk" };
+PNAME(mout_cmu_fsys1_pcie_p) = { "oscclk", "fout_shared2_pll" };
+PNAME(mout_cmu_fsys1_ufs_card_p) = { "oscclk",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_cmu_g2d_g2d_p) = { "fout_shared4_pll",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_g2d_jpeg_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_cmu_hpm_p) = { "oscclk", "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_imem_bus_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_cmu_isphq_bus_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_cmu_isplp_bus_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_cmu_iva_bus_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_cmu_mfc_bus_p) = { "fout_shared4_pll",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_mif_switch_p) = { "fout_shared0_pll",
+ "fout_shared1_pll",
+ "dout_cmu_shared0_div2",
+ "dout_cmu_shared1_div2",
+ "fout_shared2_pll",
+ "mout_cp2ap_mif_clk_user",
+ "oscclk", "oscclk" };
+PNAME(mout_cmu_peric0_bus_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_peric0_uart_dbg_p) = { "oscclk", "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_cmu_peric0_usi00_p) = { "oscclk", "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_cmu_peric0_usi01_p) = { "oscclk", "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_cmu_peric0_usi02_p) = { "oscclk", "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_cmu_peric0_usi03_p) = { "oscclk", "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_cmu_peric1_bus_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_peric1_speedy2_p) = { "oscclk", "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "oscclk" };
+PNAME(mout_cmu_peric1_spi_cam0_p) = { "oscclk", "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_cmu_peric1_spi_cam1_p) = { "oscclk", "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_cmu_peric1_uart_bt_p) = { "oscclk", "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_cmu_peric1_usi04_p) = { "oscclk", "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_cmu_peric1_usi05_p) = { "oscclk", "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_cmu_peric1_usi06_p) = { "oscclk", "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_cmu_peric1_usi07_p) = { "oscclk", "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_cmu_peric1_usi08_p) = { "oscclk", "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_cmu_peric1_usi09_p) = { "oscclk", "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_cmu_peric1_usi10_p) = { "oscclk", "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_cmu_peric1_usi11_p) = { "oscclk", "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_cmu_peric1_usi12_p) = { "oscclk", "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_cmu_peric1_usi13_p) = { "oscclk", "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_cmu_peris_bus_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_srdz_bus_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_cmu_srdz_imgd_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_cmu_vpu_bus_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+
+/*
+ * Register name to clock name mangling strategy used in this file
+ *
+ * Replace PLL_CON0_PLL with CLK_MOUT_PLL and mout_pll
+ * Replace CLK_CON_MUX_MUX_CLKCMU with CLK_MOUT_CMU and mout_cmu
+ * Replace CLK_CON_DIV_CLKCMU with CLK_DOUT_CMU and dout_cmu
+ * Replace CLK_CON_DIV_DIV_CLKCMU with CLK_DOUT_CMU and dout_cmu
+ * Replace CLK_CON_GAT_CLKCMU with CLK_GOUT_CMU and gout_cmu
+ * Replace CLK_CON_GAT_GATE_CLKCMU with CLK_GOUT_CMU and gout_cmu
+ *
+ * For gates remove _UID _BLK _IPCLKPORT and _RSTNSYNC
+ */
+
+static const struct samsung_mux_clock top_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_PLL_SHARED0, "mout_pll_shared0", mout_pll_shared0_p,
+ PLL_CON0_PLL_SHARED0, 4, 1),
+ MUX(CLK_MOUT_PLL_SHARED1, "mout_pll_shared1", mout_pll_shared1_p,
+ PLL_CON0_PLL_SHARED1, 4, 1),
+ MUX(CLK_MOUT_PLL_SHARED2, "mout_pll_shared2", mout_pll_shared2_p,
+ PLL_CON0_PLL_SHARED2, 4, 1),
+ MUX(CLK_MOUT_PLL_SHARED3, "mout_pll_shared3", mout_pll_shared3_p,
+ PLL_CON0_PLL_SHARED3, 4, 1),
+ MUX(CLK_MOUT_PLL_SHARED4, "mout_pll_shared4", mout_pll_shared4_p,
+ PLL_CON0_PLL_SHARED4, 4, 1),
+ MUX(CLK_MOUT_CP2AP_MIF_CLK_USER, "mout_cp2ap_mif_clk_user",
+ mout_cp2ap_mif_clk_user_p, PLL_CON0_MUX_CP2AP_MIF_CLK_USER, 4, 1),
+ MUX(CLK_MOUT_CMU_ABOX_CPUABOX, "mout_cmu_abox_cpuabox",
+ mout_cmu_abox_cpuabox_p, CLK_CON_MUX_MUX_CLKCMU_ABOX_CPUABOX,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_APM_BUS, "mout_cmu_apm_bus", mout_cmu_apm_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_APM_BUS, 0, 1),
+ MUX(CLK_MOUT_CMU_BUS1_BUS, "mout_cmu_bus1_bus", mout_cmu_bus1_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_BUS1_BUS, 0, 2),
+ MUX(CLK_MOUT_CMU_BUSC_BUS, "mout_cmu_busc_bus", mout_cmu_busc_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_BUSC_BUS, 0, 3),
+ MUX(CLK_MOUT_CMU_BUSC_BUSPHSI2C, "mout_cmu_busc_busphsi2c",
+ mout_cmu_busc_busphsi2c_p, CLK_CON_MUX_MUX_CLKCMU_BUSC_BUSPHSI2C,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_CAM_BUS, "mout_cmu_cam_bus", mout_cmu_cam_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_CAM_BUS, 0, 2),
+ MUX(CLK_MOUT_CMU_CAM_TPU0, "mout_cmu_cam_tpu0", mout_cmu_cam_tpu0_p,
+ CLK_CON_MUX_MUX_CLKCMU_CAM_TPU0, 0, 2),
+ MUX(CLK_MOUT_CMU_CAM_TPU1, "mout_cmu_cam_tpu1", mout_cmu_cam_tpu1_p,
+ CLK_CON_MUX_MUX_CLKCMU_CAM_TPU1, 0, 2),
+ MUX(CLK_MOUT_CMU_CAM_VRA, "mout_cmu_cam_vra", mout_cmu_cam_vra_p,
+ CLK_CON_MUX_MUX_CLKCMU_CAM_VRA, 0, 2),
+ MUX(CLK_MOUT_CMU_CIS_CLK0, "mout_cmu_cis_clk0", mout_cmu_cis_clk0_p,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK0, 0, 1),
+ MUX(CLK_MOUT_CMU_CIS_CLK1, "mout_cmu_cis_clk1", mout_cmu_cis_clk1_p,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK1, 0, 1),
+ MUX(CLK_MOUT_CMU_CIS_CLK2, "mout_cmu_cis_clk2", mout_cmu_cis_clk2_p,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK2, 0, 1),
+ MUX(CLK_MOUT_CMU_CIS_CLK3, "mout_cmu_cis_clk3", mout_cmu_cis_clk3_p,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK3, 0, 1),
+ MUX(CLK_MOUT_CMU_CORE_BUS, "mout_core_bus", mout_core_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_CORE_BUS, 0, 3),
+ MUX(CLK_MOUT_CMU_CPUCL0_SWITCH, "mout_cmu_cpucl0_switch",
+ mout_cmu_cpucl0_switch_p, CLK_CON_MUX_MUX_CLKCMU_CPUCL0_SWITCH,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_CPUCL1_SWITCH, "mout_cmu_cpucl1_switch",
+ mout_cmu_cpucl1_switch_p, CLK_CON_MUX_MUX_CLKCMU_CPUCL1_SWITCH,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_DBG_BUS, "mout_cmu_dbg_bus", mout_cmu_dbg_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_DBG_BUS, 0, 2),
+ MUX(CLK_MOUT_CMU_DCAM_BUS, "mout_cmu_dcam_bus", mout_cmu_dcam_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_DCAM_BUS, 0, 2),
+ MUX(CLK_MOUT_CMU_DCAM_IMGD, "mout_cmu_dcam_imgd", mout_cmu_dcam_imgd_p,
+ CLK_CON_MUX_MUX_CLKCMU_DCAM_IMGD, 0, 2),
+ MUX(CLK_MOUT_CMU_DPU_BUS, "mout_cmu_dpu_bus", mout_cmu_dpu_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_DPU_BUS, 0, 3),
+ MUX(CLK_MOUT_CMU_DROOPDETECTOR, "mout_cmu_droopdetector",
+ mout_cmu_droopdetector_p, CLK_CON_MUX_MUX_CLKCMU_DROOPDETECTOR,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_DSP_BUS, "mout_cmu_dsp_bus", mout_cmu_dsp_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_DSP_BUS, 0, 2),
+ MUX(CLK_MOUT_CMU_FSYS0_BUS, "mout_fsys0_bus", mout_fsys0_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_FSYS0_BUS, 0, 2),
+ MUX(CLK_MOUT_CMU_FSYS0_DPGTC, "mout_fsys0_dpgtc", mout_fsys0_dpgtc_p,
+ CLK_CON_MUX_MUX_CLKCMU_FSYS0_DPGTC, 0, 2),
+ MUX(CLK_MOUT_CMU_FSYS0_MMC_EMBD, "mout_fsys0_mmc_embd",
+ mout_fsys0_mmc_embd_p, CLK_CON_MUX_MUX_CLKCMU_FSYS0_MMC_EMBD,
+ 0, 3),
+ MUX(CLK_MOUT_CMU_FSYS0_UFS_EMBD, "mout_fsys0_ufs_embd",
+ mout_fsys0_ufs_embd_p, CLK_CON_MUX_MUX_CLKCMU_FSYS0_UFS_EMBD,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_FSYS0_USBDRD30, "mout_fsys0_usbdrd30",
+ mout_fsys0_usbdrd30_p, CLK_CON_MUX_MUX_CLKCMU_FSYS0_USBDRD30,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_FSYS1_BUS, "mout_cmu_fsys1_bus", mout_cmu_fsys1_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_FSYS1_BUS, 0, 1),
+ MUX(CLK_MOUT_CMU_FSYS1_MMC_CARD, "mout_cmu_fsys1_mmc_card",
+ mout_cmu_fsys1_mmc_card_p, CLK_CON_MUX_MUX_CLKCMU_FSYS1_MMC_CARD,
+ 0, 3),
+ MUX(CLK_MOUT_CMU_FSYS1_PCIE, "mout_cmu_fsys1_pcie",
+ mout_cmu_fsys1_pcie_p, CLK_CON_MUX_MUX_CLKCMU_FSYS1_PCIE, 0, 1),
+ MUX(CLK_MOUT_CMU_FSYS1_UFS_CARD, "mout_cmu_fsys1_ufs_card",
+ mout_cmu_fsys1_ufs_card_p, CLK_CON_MUX_MUX_CLKCMU_FSYS1_UFS_CARD,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_G2D_G2D, "mout_cmu_g2d_g2d", mout_cmu_g2d_g2d_p,
+ CLK_CON_MUX_MUX_CLKCMU_G2D_G2D, 0, 2),
+ MUX(CLK_MOUT_CMU_G2D_JPEG, "mout_cmu_g2d_jpeg", mout_cmu_g2d_jpeg_p,
+ CLK_CON_MUX_MUX_CLKCMU_G2D_JPEG, 0, 2),
+ MUX(CLK_MOUT_CMU_HPM, "mout_cmu_hpm", mout_cmu_hpm_p,
+ CLK_CON_MUX_MUX_CLKCMU_HPM, 0, 2),
+ MUX(CLK_MOUT_CMU_IMEM_BUS, "mout_cmu_imem_bus", mout_cmu_imem_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_IMEM_BUS, 0, 2),
+ MUX(CLK_MOUT_CMU_ISPHQ_BUS, "mout_cmu_isphq_bus", mout_cmu_isphq_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_ISPHQ_BUS, 0, 2),
+ MUX(CLK_MOUT_CMU_ISPLP_BUS, "mout_cmu_isplp_bus", mout_cmu_isplp_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_ISPLP_BUS, 0, 2),
+ MUX(CLK_MOUT_CMU_IVA_BUS, "mout_cmu_iva_bus", mout_cmu_iva_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_IVA_BUS, 0, 2),
+ MUX(CLK_MOUT_CMU_MFC_BUS, "mout_cmu_mfc_bus", mout_cmu_mfc_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_MFC_BUS, 0, 2),
+ MUX(CLK_MOUT_CMU_MIF_SWITCH, "mout_cmu_mif_switch",
+ mout_cmu_mif_switch_p, CLK_CON_MUX_MUX_CLKCMU_MIF_SWITCH, 0, 3),
+ MUX(CLK_MOUT_CMU_PERIC0_BUS, "mout_cmu_peric0_bus",
+ mout_cmu_peric0_bus_p, CLK_CON_MUX_MUX_CLKCMU_PERIC0_BUS, 0, 1),
+ MUX(CLK_MOUT_CMU_PERIC0_UART_DBG, "mout_cmu_peric0_uart_dbg",
+ mout_cmu_peric0_uart_dbg_p, CLK_CON_MUX_MUX_CLKCMU_PERIC0_UART_DBG,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_PERIC0_USI00, "mout_cmu_peric0_usi00",
+ mout_cmu_peric0_usi00_p, CLK_CON_MUX_MUX_CLKCMU_PERIC0_USI00,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_PERIC0_USI01, "mout_cmu_peric0_usi01",
+ mout_cmu_peric0_usi01_p, CLK_CON_MUX_MUX_CLKCMU_PERIC0_USI01,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_PERIC0_USI02, "mout_cmu_peric0_usi02",
+ mout_cmu_peric0_usi02_p, CLK_CON_MUX_MUX_CLKCMU_PERIC0_USI02,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_PERIC0_USI03, "mout_cmu_peric0_usi03",
+ mout_cmu_peric0_usi03_p, CLK_CON_MUX_MUX_CLKCMU_PERIC0_USI03,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_PERIC1_BUS, "mout_cmu_peric1_bus",
+ mout_cmu_peric1_bus_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_BUS, 0, 1),
+ MUX(CLK_MOUT_CMU_PERIC1_SPEEDY2, "mout_cmu_peric1_speedy2",
+ mout_cmu_peric1_speedy2_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_SPEEDY2,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_PERIC1_SPI_CAM0, "mout_cmu_peric1_spi_cam0",
+ mout_cmu_peric1_spi_cam0_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_SPI_CAM0,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_PERIC1_SPI_CAM1, "mout_cmu_peric1_spi_cam1",
+ mout_cmu_peric1_spi_cam1_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_SPI_CAM1,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_PERIC1_UART_BT, "mout_cmu_peric1_uart_bt",
+ mout_cmu_peric1_uart_bt_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_UART_BT,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_PERIC1_USI04, "mout_cmu_peric1_usi04",
+ mout_cmu_peric1_usi04_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI04,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_PERIC1_USI05, "mout_cmu_peric1_usi05",
+ mout_cmu_peric1_usi05_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI05,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_PERIC1_USI06, "mout_cmu_peric1_usi06",
+ mout_cmu_peric1_usi06_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI06,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_PERIC1_USI07, "mout_cmu_peric1_usi07",
+ mout_cmu_peric1_usi07_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI07,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_PERIC1_USI08, "mout_cmu_peric1_usi08",
+ mout_cmu_peric1_usi08_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI08,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_PERIC1_USI09, "mout_cmu_peric1_usi09",
+ mout_cmu_peric1_usi09_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI09,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_PERIC1_USI10, "mout_cmu_peric1_usi10",
+ mout_cmu_peric1_usi10_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI10,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_PERIC1_USI11, "mout_cmu_peric1_usi11",
+ mout_cmu_peric1_usi11_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI11,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_PERIC1_USI12, "mout_cmu_peric1_usi12",
+ mout_cmu_peric1_usi12_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI12,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_PERIC1_USI13, "mout_cmu_peric1_usi13",
+ mout_cmu_peric1_usi13_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI13,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_PERIS_BUS, "mout_cmu_peris_bus", mout_cmu_peris_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_PERIS_BUS, 0, 1),
+ MUX(CLK_MOUT_CMU_SRDZ_BUS, "mout_cmu_srdz_bus", mout_cmu_srdz_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_SRDZ_BUS, 0, 2),
+ MUX(CLK_MOUT_CMU_SRDZ_IMGD, "mout_cmu_srdz_imgd", mout_cmu_srdz_imgd_p,
+ CLK_CON_MUX_MUX_CLKCMU_SRDZ_IMGD, 0, 2),
+ MUX(CLK_MOUT_CMU_VPU_BUS, "mout_cmu_vpu_bus", mout_cmu_vpu_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_VPU_BUS, 0, 2),
+};
+
+static const struct samsung_div_clock top_div_clks[] __initconst = {
+ DIV(CLK_DOUT_CMU_ABOX_CPUABOX, "dout_cmu_cmu_abox_cpuabox",
+ "gout_cmu_abox_cpuabox", CLK_CON_DIV_CLKCMU_ABOX_CPUABOX, 0, 3),
+ DIV(CLK_DOUT_CMU_APM_BUS, "dout_cmu_apm_bus", "gout_cmu_apm_bus",
+ CLK_CON_DIV_CLKCMU_APM_BUS, 0, 3),
+ DIV(CLK_DOUT_CMU_BUS1_BUS, "dout_cmu_bus1_bus", "gout_cmu_bus1_bus",
+ CLK_CON_DIV_CLKCMU_BUS1_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_BUSC_BUS, "dout_cmu_busc_bus",
+ "gout_cmu_clkcmu_busc_bus", CLK_CON_DIV_CLKCMU_BUSC_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_BUSC_BUSPHSI2C, "dout_cmu_busc_busphsi2c",
+ "gout_cmu_busc_busphsi2c", CLK_CON_DIV_CLKCMU_BUSC_BUSPHSI2C,
+ 0, 4),
+ DIV(CLK_DOUT_CMU_CAM_BUS, "dout_cmu_cam_bus", "gout_cmu_cam_bus",
+ CLK_CON_DIV_CLKCMU_CAM_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_CAM_TPU0, "dout_cmu_cam_tpu0", "gout_cmu_cam_tpu0",
+ CLK_CON_DIV_CLKCMU_CAM_TPU0, 0, 4),
+ DIV(CLK_DOUT_CMU_CAM_TPU1, "dout_cmu_cam_tpu1", "gout_cmu_cam_tpu1",
+ CLK_CON_DIV_CLKCMU_CAM_TPU1, 0, 4),
+ DIV(CLK_DOUT_CMU_CAM_VRA, "dout_cmu_cam_vra", "gout_cmu_cam_vra",
+ CLK_CON_DIV_CLKCMU_CAM_VRA, 0, 4),
+ DIV(CLK_DOUT_CMU_CIS_CLK0, "dout_cmu_cis_clk0", "gout_cmu_cis_clk0",
+ CLK_CON_DIV_CLKCMU_CIS_CLK0, 0, 5),
+ DIV(CLK_DOUT_CMU_CIS_CLK1, "dout_cmu_cis_clk1", "gout_cmu_cis_clk1",
+ CLK_CON_DIV_CLKCMU_CIS_CLK1, 0, 5),
+ DIV(CLK_DOUT_CMU_CIS_CLK2, "dout_cmu_cis_clk2", "gout_cmu_cis_clk2",
+ CLK_CON_DIV_CLKCMU_CIS_CLK2, 0, 5),
+ DIV(CLK_DOUT_CMU_CIS_CLK3, "dout_cmu_cis_clk3", "gout_cmu_cis_clk3",
+ CLK_CON_DIV_CLKCMU_CIS_CLK3, 0, 5),
+ DIV(CLK_DOUT_CMU_CORE_BUS, "dout_cmu_core_bus", "gout_cmu_core_bus",
+ CLK_CON_DIV_CLKCMU_CORE_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_CPUCL0_SWITCH, "dout_cmu_cpucl0_switch",
+ "gout_cmu_cpucl0_switch", CLK_CON_DIV_CLKCMU_CPUCL0_SWITCH, 0, 3),
+ DIV(CLK_DOUT_CMU_CPUCL1_SWITCH, "dout_cmu_cpucl1_switch",
+ "gout_cmu_cpucl1_switch", CLK_CON_DIV_CLKCMU_CPUCL1_SWITCH, 0, 3),
+ DIV(CLK_DOUT_CMU_DBG_BUS, "dout_cmu_dbg_bus", "gout_cmu_dbg_bus",
+ CLK_CON_DIV_CLKCMU_DBG_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_DCAM_BUS, "dout_cmu_dcam_bus", "gout_cmu_dcam_bus",
+ CLK_CON_DIV_CLKCMU_DCAM_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_DCAM_IMGD, "dout_cmu_dcam_imgd", "gout_cmu_dcam_imgd",
+ CLK_CON_DIV_CLKCMU_DCAM_IMGD, 0, 4),
+ DIV(CLK_DOUT_CMU_DPU_BUS, "dout_cmu_dpu_bus", "gout_cmu_dpu_bus",
+ CLK_CON_DIV_CLKCMU_DPU_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_DSP_BUS, "dout_cmu_dsp_bus", "gout_cmu_dsp_bus",
+ CLK_CON_DIV_CLKCMU_DSP_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_FSYS0_BUS, "dout_cmu_fsys0_bus", "gout_cmu_fsys0_bus",
+ CLK_CON_DIV_CLKCMU_FSYS0_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_FSYS0_DPGTC, "dout_cmu_fsys0_dpgtc",
+ "gout_cmu_fsys0_dpgtc", CLK_CON_DIV_CLKCMU_FSYS0_DPGTC, 0, 3),
+ DIV(CLK_DOUT_CMU_FSYS0_MMC_EMBD, "dout_cmu_fsys0_mmc_embd",
+ "gout_cmu_fsys0_mmc_embd", CLK_CON_DIV_CLKCMU_FSYS0_MMC_EMBD,
+ 0, 9),
+ DIV(CLK_DOUT_CMU_FSYS0_UFS_EMBD, "dout_cmu_fsys0_ufs_embd",
+ "gout_cmu_fsys0_ufs_embd", CLK_CON_DIV_CLKCMU_FSYS0_UFS_EMBD,
+ 0, 3),
+ DIV(CLK_DOUT_CMU_FSYS0_USBDRD30, "dout_cmu_fsys0_usbdrd30",
+ "gout_cmu_fsys0_usbdrd30", CLK_CON_DIV_CLKCMU_FSYS0_USBDRD30,
+ 0, 4),
+ DIV(CLK_DOUT_CMU_FSYS1_BUS, "dout_cmu_fsys1_bus", "gout_cmu_fsys1_bus",
+ CLK_CON_DIV_CLKCMU_FSYS1_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_FSYS1_MMC_CARD, "dout_cmu_fsys1_mmc_card",
+ "gout_cmu_fsys1_mmc_card", CLK_CON_DIV_CLKCMU_FSYS1_MMC_CARD,
+ 0, 9),
+ DIV(CLK_DOUT_CMU_FSYS1_UFS_CARD, "dout_cmu_fsys1_ufs_card",
+ "gout_cmu_fsys1_ufs_card", CLK_CON_DIV_CLKCMU_FSYS1_UFS_CARD,
+ 0, 4),
+ DIV(CLK_DOUT_CMU_G2D_G2D, "dout_cmu_g2d_g2d", "gout_cmu_g2d_g2d",
+ CLK_CON_DIV_CLKCMU_G2D_G2D, 0, 4),
+ DIV(CLK_DOUT_CMU_G2D_JPEG, "dout_cmu_g2d_jpeg", "gout_cmu_g2d_jpeg",
+ CLK_CON_DIV_CLKCMU_G2D_JPEG, 0, 4),
+ DIV(CLK_DOUT_CMU_G3D_SWITCH, "dout_cmu_g3d_switch",
+ "gout_cmu_g3d_switch", CLK_CON_DIV_CLKCMU_G3D_SWITCH, 0, 3),
+ DIV(CLK_DOUT_CMU_HPM, "dout_cmu_hpm", "gout_cmu_hpm",
+ CLK_CON_DIV_CLKCMU_HPM, 0, 2),
+ DIV(CLK_DOUT_CMU_IMEM_BUS, "dout_cmu_imem_bus", "gout_cmu_imem_bus",
+ CLK_CON_DIV_CLKCMU_IMEM_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_ISPHQ_BUS, "dout_cmu_isphq_bus", "gout_cmu_isphq_bus",
+ CLK_CON_DIV_CLKCMU_ISPHQ_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_ISPLP_BUS, "dout_cmu_isplp_bus", "gout_cmu_isplp_bus",
+ CLK_CON_DIV_CLKCMU_ISPLP_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_IVA_BUS, "dout_cmu_iva_bus", "gout_cmu_iva_bus",
+ CLK_CON_DIV_CLKCMU_IVA_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_MFC_BUS, "dout_cmu_mfc_bus", "gout_cmu_mfc_bus",
+ CLK_CON_DIV_CLKCMU_MFC_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_MODEM_SHARED0, "dout_cmu_modem_shared0",
+ "gout_cmu_modem_shared0", CLK_CON_DIV_CLKCMU_MODEM_SHARED0, 0, 3),
+ DIV(CLK_DOUT_CMU_MODEM_SHARED1, "dout_cmu_modem_shared1",
+ "gout_cmu_modem_shared1", CLK_CON_DIV_CLKCMU_MODEM_SHARED1, 0, 3),
+ DIV(CLK_DOUT_CMU_PERIC0_BUS, "dout_cmu_peric0_bus",
+ "gout_cmu_peric0_bus", CLK_CON_DIV_CLKCMU_PERIC0_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC0_UART_DBG, "dout_cmu_peric0_uart_dbg",
+ "gout_cmu_peric0_uart_dbg", CLK_CON_DIV_CLKCMU_PERIC0_UART_DBG,
+ 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC0_USI00, "dout_cmu_peric0_usi00",
+ "gout_cmu_peric0_usi00", CLK_CON_DIV_CLKCMU_PERIC0_USI00, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC0_USI01, "dout_cmu_peric0_usi01",
+ "gout_cmu_peric0_usi01", CLK_CON_DIV_CLKCMU_PERIC0_USI01, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC0_USI02, "dout_cmu_peric0_usi02",
+ "gout_cmu_peric0_usi02", CLK_CON_DIV_CLKCMU_PERIC0_USI02, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC0_USI03, "dout_cmu_peric0_usi03",
+ "gout_cmu_peric0_usi03", CLK_CON_DIV_CLKCMU_PERIC0_USI03, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC1_BUS, "dout_cmu_peric1_bus",
+ "gout_cmu_peric1_bus", CLK_CON_DIV_CLKCMU_PERIC1_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC1_SPEEDY2, "dout_cmu_peric1_speedy2",
+ "gout_cmu_peric1_speedy2", CLK_CON_DIV_CLKCMU_PERIC1_SPEEDY2,
+ 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC1_SPI_CAM0, "dout_cmu_peric1_spi_cam0",
+ "gout_cmu_peric1_spi_cam0", CLK_CON_DIV_CLKCMU_PERIC1_SPI_CAM0,
+ 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC1_SPI_CAM1, "dout_cmu_peric1_spi_cam1",
+ "gout_cmu_peric1_spi_cam1", CLK_CON_DIV_CLKCMU_PERIC1_SPI_CAM1,
+ 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC1_UART_BT, "dout_cmu_peric1_uart_bt",
+ "gout_cmu_peric1_uart_bt", CLK_CON_DIV_CLKCMU_PERIC1_UART_BT,
+ 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC1_USI04, "dout_cmu_peric1_usi04",
+ "gout_cmu_peric1_usi04", CLK_CON_DIV_CLKCMU_PERIC1_USI04, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC1_USI05, "dout_cmu_peric1_usi05",
+ "gout_cmu_peric1_usi05", CLK_CON_DIV_CLKCMU_PERIC1_USI05, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC1_USI06, "dout_cmu_peric1_usi06",
+ "gout_cmu_peric1_usi06", CLK_CON_DIV_CLKCMU_PERIC1_USI06, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC1_USI07, "dout_cmu_peric1_usi07",
+ "gout_cmu_peric1_usi07", CLK_CON_DIV_CLKCMU_PERIC1_USI07, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC1_USI08, "dout_cmu_peric1_usi08",
+ "gout_cmu_peric1_usi08", CLK_CON_DIV_CLKCMU_PERIC1_USI08, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC1_USI09, "dout_cmu_peric1_usi09",
+ "gout_cmu_peric1_usi09", CLK_CON_DIV_CLKCMU_PERIC1_USI09, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC1_USI10, "dout_cmu_peric1_usi10",
+ "gout_cmu_peric1_usi10", CLK_CON_DIV_CLKCMU_PERIC1_USI10, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC1_USI11, "dout_cmu_peric1_usi11",
+ "gout_cmu_peric1_usi11", CLK_CON_DIV_CLKCMU_PERIC1_USI11, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC1_USI12, "dout_cmu_peric1_usi12",
+ "gout_cmu_peric1_usi12", CLK_CON_DIV_CLKCMU_PERIC1_USI12, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC1_USI13, "dout_cmu_peric1_usi13",
+ "gout_cmu_peric1_usi13", CLK_CON_DIV_CLKCMU_PERIC1_USI13, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIS_BUS, "dout_cmu_peris_bus", "gout_cmu_peris_bus",
+ CLK_CON_DIV_CLKCMU_PERIS_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_SRDZ_BUS, "dout_cmu_srdz_bus", "gout_cmu_srdz_bus",
+ CLK_CON_DIV_CLKCMU_SRDZ_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_SRDZ_IMGD, "dout_cmu_srdz_imgd", "gout_cmu_srdz_imgd",
+ CLK_CON_DIV_CLKCMU_SRDZ_IMGD, 0, 4),
+ DIV(CLK_DOUT_CMU_VPU_BUS, "dout_cmu_vpu_bus", "gout_cmu_vpu_bus",
+ CLK_CON_DIV_CLKCMU_VPU_BUS, 0, 4),
+};
+
+static const struct samsung_fixed_factor_clock top_fixed_factor_clks[] __initconst = {
+ FFACTOR(CLK_DOUT_CMU_SHARED0_DIV2, "dout_cmu_shared0_div2",
+ "mout_pll_shared0", 1, 2, 0),
+ FFACTOR(CLK_DOUT_CMU_SHARED0_DIV4, "dout_cmu_shared0_div4",
+ "mout_pll_shared0", 1, 4, 0),
+ FFACTOR(CLK_DOUT_CMU_SHARED1_DIV2, "dout_cmu_shared1_div2",
+ "mout_pll_shared1", 1, 2, 0),
+ FFACTOR(CLK_DOUT_CMU_SHARED1_DIV4, "dout_cmu_shared1_div4",
+ "mout_pll_shared1", 1, 4, 0),
+ FFACTOR(CLK_DOUT_CMU_SHARED2_DIV2, "dout_cmu_shared2_div2",
+ "mout_pll_shared2", 1, 2, 0),
+ FFACTOR(CLK_DOUT_CMU_SHARED3_DIV2, "dout_cmu_shared3_div2",
+ "mout_pll_shared3", 1, 2, 0),
+ FFACTOR(CLK_DOUT_CMU_SHARED4_DIV2, "dout_cmu_shared4_div2",
+ "mout_pll_shared4", 1, 2, 0),
+ FFACTOR(CLK_DOUT_CMU_FSYS1_PCIE, "dout_cmu_fsys1_pcie",
+ "gout_cmu_fsys1_pcie", 1, 8, 0),
+ FFACTOR(CLK_DOUT_CMU_CP2AP_MIF_CLK_DIV2, "dout_cmu_cp2ap_mif_clk_div2",
+ "mout_cp2ap_mif_clk_user", 1, 2, 0),
+ FFACTOR(CLK_DOUT_CMU_CMU_OTP, "dout_cmu_cmu_otp", "oscclk", 1, 8, 0),
+};
+
+static const struct samsung_gate_clock top_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_CMU_DROOPDETECTOR, "gout_droopdetector",
+ "mout_cmu_droopdetector", CLK_CON_GAT_CLKCMU_DROOPDETECTOR,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_MIF_SWITCH, "gout_cmu_mif_switch",
+ "mout_cmu_mif_switch", CLK_CON_GAT_CLKCMU_MIF_SWITCH, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_ABOX_CPUABOX, "gout_cmu_abox_cpuabox",
+ "mout_cmu_abox_cpuabox", CLK_CON_GAT_GATE_CLKCMU_ABOX_CPUABOX,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_APM_BUS, "gout_cmu_apm_bus", "mout_cmu_apm_bus",
+ CLK_CON_GAT_GATE_CLKCMU_APM_BUS, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMU_BUS1_BUS, "gout_cmu_bus1_bus", "mout_cmu_bus1_bus",
+ CLK_CON_GAT_GATE_CLKCMU_BUS1_BUS, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMU_BUSC_BUS, "gout_cmu_busc_bus", "mout_cmu_busc_bus",
+ CLK_CON_GAT_GATE_CLKCMU_BUSC_BUS, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMU_BUSC_BUSPHSI2C, "gout_cmu_busc_busphsi2c",
+ "mout_cmu_busc_busphsi2c", CLK_CON_GAT_GATE_CLKCMU_BUSC_BUSPHSI2C,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMU_CAM_BUS, "gout_cmu_cam_bus", "mout_cmu_cam_bus",
+ CLK_CON_GAT_GATE_CLKCMU_CAM_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CAM_TPU0, "gout_cmu_cam_tpu0", "mout_cmu_cam_tpu0",
+ CLK_CON_GAT_GATE_CLKCMU_CAM_TPU0, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CAM_TPU1, "gout_cmu_cam_tpu1", "mout_cmu_cam_tpu1",
+ CLK_CON_GAT_GATE_CLKCMU_CAM_TPU1, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CAM_VRA, "gout_cmu_cam_vra", "mout_cmu_cam_vra",
+ CLK_CON_GAT_GATE_CLKCMU_CAM_VRA, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CIS_CLK0, "gout_cmu_cis_clk0", "mout_cmu_cis_clk0",
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK0, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CIS_CLK1, "gout_cmu_cis_clk1", "mout_cmu_cis_clk1",
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK1, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CIS_CLK2, "gout_cmu_cis_clk2", "mout_cmu_cis_clk2",
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK2, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CIS_CLK3, "gout_cmu_cis_clk3", "mout_cmu_cis_clk3",
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK3, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CORE_BUS, "gout_cmu_core_bus", "mout_core_bus",
+ CLK_CON_GAT_GATE_CLKCMU_CORE_BUS, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMU_CPUCL0_SWITCH, "gout_cmu_cpucl0_switch",
+ "mout_cmu_cpucl0_switch", CLK_CON_GAT_GATE_CLKCMU_CPUCL0_SWITCH,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMU_CPUCL1_SWITCH, "gout_cmu_cpucl1_switch",
+ "mout_cmu_cpucl1_switch", CLK_CON_GAT_GATE_CLKCMU_CPUCL1_SWITCH,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMU_DBG_BUS, "gout_cmu_dbg_bus", "mout_cmu_dbg_bus",
+ CLK_CON_GAT_GATE_CLKCMU_DBG_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_DCAM_BUS, "gout_cmu_dcam_bus", "mout_cmu_dcam_bus",
+ CLK_CON_GAT_GATE_CLKCMU_DCAM_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_DCAM_IMGD, "gout_cmu_dcam_imgd",
+ "mout_cmu_dcam_imgd", CLK_CON_GAT_GATE_CLKCMU_DCAM_IMGD,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_DPU_BUS, "gout_cmu_dpu_bus", "mout_cmu_dpu_bus",
+ CLK_CON_GAT_GATE_CLKCMU_DPU_BUS, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMU_DSP_BUS, "gout_cmu_dsp_bus", "mout_cmu_dsp_bus",
+ CLK_CON_GAT_GATE_CLKCMU_DSP_BUS, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMU_FSYS0_BUS, "gout_cmu_fsys0_bus", "mout_fsys0_bus",
+ CLK_CON_GAT_GATE_CLKCMU_FSYS0_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_FSYS0_DPGTC, "gout_cmu_fsys0_dpgtc",
+ "mout_fsys0_dpgtc", CLK_CON_GAT_GATE_CLKCMU_FSYS0_DPGTC,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_FSYS0_MMC_EMBD, "gout_cmu_fsys0_mmc_embd",
+ "mout_fsys0_mmc_embd", CLK_CON_GAT_GATE_CLKCMU_FSYS0_MMC_EMBD,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_FSYS0_UFS_EMBD, "gout_cmu_fsys0_ufs_embd",
+ "mout_fsys0_ufs_embd", CLK_CON_GAT_GATE_CLKCMU_FSYS0_UFS_EMBD,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_FSYS0_USBDRD30, "gout_cmu_fsys0_usbdrd30",
+ "mout_fsys0_usbdrd30", CLK_CON_GAT_GATE_CLKCMU_FSYS0_USBDRD30,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_FSYS1_BUS, "gout_cmu_fsys1_bus",
+ "mout_cmu_fsys1_bus", CLK_CON_GAT_GATE_CLKCMU_FSYS1_BUS,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_FSYS1_MMC_CARD, "gout_cmu_fsys1_mmc_card",
+ "mout_cmu_fsys1_mmc_card", CLK_CON_GAT_GATE_CLKCMU_FSYS1_MMC_CARD,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_FSYS1_PCIE, "gout_cmu_fsys1_pcie",
+ "mout_cmu_fsys1_pcie", CLK_CON_GAT_GATE_CLKCMU_FSYS1_PCIE,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_FSYS1_UFS_CARD, "gout_cmu_fsys1_ufs_card",
+ "mout_cmu_fsys1_ufs_card", CLK_CON_GAT_GATE_CLKCMU_FSYS1_UFS_CARD,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_G2D_G2D, "gout_cmu_g2d_g2d", "mout_cmu_g2d_g2d",
+ CLK_CON_GAT_GATE_CLKCMU_G2D_G2D, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_G2D_JPEG, "gout_cmu_g2d_jpeg", "mout_cmu_g2d_jpeg",
+ CLK_CON_GAT_GATE_CLKCMU_G2D_JPEG, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_G3D_SWITCH, "gout_cmu_g3d_switch",
+ "fout_shared2_pll", CLK_CON_GAT_GATE_CLKCMU_G3D_SWITCH, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_HPM, "gout_cmu_hpm", "mout_cmu_hpm",
+ CLK_CON_GAT_GATE_CLKCMU_HPM, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_IMEM_BUS, "gout_cmu_imem_bus", "mout_cmu_imem_bus",
+ CLK_CON_GAT_GATE_CLKCMU_IMEM_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_ISPHQ_BUS, "gout_cmu_isphq_bus",
+ "mout_cmu_isphq_bus", CLK_CON_GAT_GATE_CLKCMU_ISPHQ_BUS,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_ISPLP_BUS, "gout_cmu_isplp_bus",
+ "mout_cmu_isplp_bus", CLK_CON_GAT_GATE_CLKCMU_ISPLP_BUS,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_IVA_BUS, "gout_cmu_iva_bus", "mout_cmu_iva_bus",
+ CLK_CON_GAT_GATE_CLKCMU_IVA_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_MFC_BUS, "gout_cmu_mfc_bus", "mout_cmu_mfc_bus",
+ CLK_CON_GAT_GATE_CLKCMU_MFC_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_MODEM_SHARED0, "gout_cmu_modem_shared0",
+ "dout_cmu_shared0_div2", CLK_CON_GAT_GATE_CLKCMU_MODEM_SHARED0,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_MODEM_SHARED1, "gout_cmu_modem_shared1",
+ "fout_shared2_pll", CLK_CON_GAT_GATE_CLKCMU_MODEM_SHARED1,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIC0_BUS, "gout_cmu_peric0_bus",
+ "mout_cmu_peric0_bus", CLK_CON_GAT_GATE_CLKCMU_PERIC0_BUS,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIC0_UART_DBG, "gout_cmu_peric0_uart_dbg",
+ "mout_cmu_peric0_uart_dbg",
+ CLK_CON_GAT_GATE_CLKCMU_PERIC0_UART_DBG, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIC0_USI00, "gout_cmu_peric0_usi00",
+ "mout_cmu_peric0_usi00", CLK_CON_GAT_GATE_CLKCMU_PERIC0_USI00,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIC0_USI01, "gout_cmu_peric0_usi01",
+ "mout_cmu_peric0_usi01", CLK_CON_GAT_GATE_CLKCMU_PERIC0_USI01,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIC0_USI02, "gout_cmu_peric0_usi02",
+ "mout_cmu_peric0_usi02", CLK_CON_GAT_GATE_CLKCMU_PERIC0_USI02,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIC0_USI03, "gout_cmu_peric0_usi03",
+ "mout_cmu_peric0_usi03", CLK_CON_GAT_GATE_CLKCMU_PERIC0_USI03,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIC1_BUS, "gout_cmu_peric1_bus",
+ "mout_cmu_peric1_bus", CLK_CON_GAT_GATE_CLKCMU_PERIC1_BUS,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIC1_SPEEDY2, "gout_cmu_peric1_speedy2",
+ "mout_cmu_peric1_speedy2", CLK_CON_GAT_GATE_CLKCMU_PERIC1_SPEEDY2,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIC1_SPI_CAM0, "gout_cmu_peric1_spi_cam0",
+ "mout_cmu_peric1_spi_cam0",
+ CLK_CON_GAT_GATE_CLKCMU_PERIC1_SPI_CAM0, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIC1_SPI_CAM1, "gout_cmu_peric1_spi_cam1",
+ "mout_cmu_peric1_spi_cam1",
+ CLK_CON_GAT_GATE_CLKCMU_PERIC1_SPI_CAM1, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIC1_UART_BT, "gout_cmu_peric1_uart_bt",
+ "mout_cmu_peric1_uart_bt", CLK_CON_GAT_GATE_CLKCMU_PERIC1_UART_BT,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIC1_USI04, "gout_cmu_peric1_usi04",
+ "mout_cmu_peric1_usi04", CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI04,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIC1_USI05, "gout_cmu_peric1_usi05",
+ "mout_cmu_peric1_usi05", CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI05,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIC1_USI06, "gout_cmu_peric1_usi06",
+ "mout_cmu_peric1_usi06", CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI06,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIC1_USI07, "gout_cmu_peric1_usi07",
+ "mout_cmu_peric1_usi07", CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI07,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIC1_USI08, "gout_cmu_peric1_usi08",
+ "mout_cmu_peric1_usi08", CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI08,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIC1_USI09, "gout_cmu_peric1_usi09",
+ "mout_cmu_peric1_usi09", CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI09,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIC1_USI10, "gout_cmu_peric1_usi10",
+ "mout_cmu_peric1_usi10", CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI10,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIC1_USI11, "gout_cmu_peric1_usi11",
+ "mout_cmu_peric1_usi11", CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI11,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIC1_USI12, "gout_cmu_peric1_usi12",
+ "mout_cmu_peric1_usi12", CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI12,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIC1_USI13, "gout_cmu_peric1_usi13",
+ "mout_cmu_peric1_usi13", CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI13,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIS_BUS, "gout_cmu_peris_bus",
+ "mout_cmu_peris_bus", CLK_CON_GAT_GATE_CLKCMU_PERIS_BUS,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_SRDZ_BUS, "gout_cmu_srdz_bus", "mout_cmu_srdz_bus",
+ CLK_CON_GAT_GATE_CLKCMU_SRDZ_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_SRDZ_IMGD, "gout_cmu_srdz_imgd",
+ "mout_cmu_srdz_imgd", CLK_CON_GAT_GATE_CLKCMU_SRDZ_IMGD,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_VPU_BUS, "gout_cmu_vpu_bus", "mout_cmu_vpu_bus",
+ CLK_CON_GAT_GATE_CLKCMU_VPU_BUS, 21, 0, 0),
+};
+
+static const struct samsung_cmu_info top_cmu_info __initconst = {
+ .pll_clks = top_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(top_pll_clks),
+ .mux_clks = top_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(top_mux_clks),
+ .div_clks = top_div_clks,
+ .nr_div_clks = ARRAY_SIZE(top_div_clks),
+ .fixed_factor_clks = top_fixed_factor_clks,
+ .nr_fixed_factor_clks = ARRAY_SIZE(top_fixed_factor_clks),
+ .gate_clks = top_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(top_gate_clks),
+ .nr_clk_ids = CLKS_NR_TOP,
+ .clk_regs = top_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(top_clk_regs),
+};
+
+static void __init exynos8895_cmu_top_init(struct device_node *np)
+{
+ exynos_arm64_register_cmu(NULL, np, &top_cmu_info);
+}
+
+/* Register CMU_TOP early, as it's a dependency for other early domains */
+CLK_OF_DECLARE(exynos8895_cmu_top, "samsung,exynos8895-cmu-top",
+ exynos8895_cmu_top_init);
+
+/* ---- CMU_PERIS ----------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_PERIS (0x10010000) */
+#define PLL_CON0_MUX_CLKCMU_PERIS_BUS_USER 0x0100
+#define PLL_CON2_MUX_CLKCMU_PERIS_BUS_USER 0x0108
+#define CLK_CON_MUX_MUX_CLK_PERIS_GIC 0x1000
+#define CLK_CON_GAT_CLK_BLK_PERIS_UID_PERIS_CMU_PERIS_IPCLKPORT_PCLK 0x2000
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_AD_AXI_P_PERIS_IPCLKPORT_ACLKM 0x2010
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_AD_AXI_P_PERIS_IPCLKPORT_ACLKS 0x2014
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_AXI2APB_PERISP0_IPCLKPORT_ACLK 0x2018
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_AXI2APB_PERISP1_IPCLKPORT_ACLK 0x201c
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_BUSIF_TMU_IPCLKPORT_PCLK 0x2020
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_GIC_IPCLKPORT_CLK 0x2024
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_LHM_AXI_P_PERIS_IPCLKPORT_I_CLK 0x2028
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_MCT_IPCLKPORT_PCLK 0x202c
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_OTP_CON_BIRA_IPCLKPORT_PCLK 0x2030
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_OTP_CON_TOP_IPCLKPORT_PCLK 0x2034
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_PMU_PERIS_IPCLKPORT_PCLK 0x2038
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_RSTNSYNC_CLK_PERIS_BUSP_IPCLKPORT_CLK 0x203c
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_RSTNSYNC_CLK_PERIS_GIC_IPCLKPORT_CLK 0x2040
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_SYSREG_PERIS_IPCLKPORT_PCLK 0x2044
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC00_IPCLKPORT_PCLK 0x2048
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC01_IPCLKPORT_PCLK 0x204c
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC02_IPCLKPORT_PCLK 0x2050
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC03_IPCLKPORT_PCLK 0x2054
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC04_IPCLKPORT_PCLK 0x2058
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC05_IPCLKPORT_PCLK 0x205c
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC06_IPCLKPORT_PCLK 0x2060
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC07_IPCLKPORT_PCLK 0x2064
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC08_IPCLKPORT_PCLK 0x2068
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC09_IPCLKPORT_PCLK 0x206c
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC10_IPCLKPORT_PCLK 0x2070
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC11_IPCLKPORT_PCLK 0x2074
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC12_IPCLKPORT_PCLK 0x2078
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC13_IPCLKPORT_PCLK 0x207c
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC14_IPCLKPORT_PCLK 0x2080
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC15_IPCLKPORT_PCLK 0x2084
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_WDT_CLUSTER0_IPCLKPORT_PCLK 0x2088
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_WDT_CLUSTER1_IPCLKPORT_PCLK 0x208c
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_XIU_P_PERIS_IPCLKPORT_ACLK 0x2090
+
+static const unsigned long peris_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_PERIS_BUS_USER,
+ PLL_CON2_MUX_CLKCMU_PERIS_BUS_USER,
+ CLK_CON_MUX_MUX_CLK_PERIS_GIC,
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_PERIS_CMU_PERIS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_AD_AXI_P_PERIS_IPCLKPORT_ACLKM,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_AD_AXI_P_PERIS_IPCLKPORT_ACLKS,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_AXI2APB_PERISP0_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_AXI2APB_PERISP1_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_BUSIF_TMU_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_GIC_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_LHM_AXI_P_PERIS_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_MCT_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_OTP_CON_BIRA_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_OTP_CON_TOP_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_PMU_PERIS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_RSTNSYNC_CLK_PERIS_BUSP_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_RSTNSYNC_CLK_PERIS_GIC_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_SYSREG_PERIS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC00_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC01_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC02_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC03_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC04_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC05_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC06_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC07_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC08_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC09_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC10_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC11_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC12_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC13_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC14_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC15_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_WDT_CLUSTER0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_WDT_CLUSTER1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_XIU_P_PERIS_IPCLKPORT_ACLK,
+};
+
+/* List of parent clocks for Muxes in CMU_PERIS */
+PNAME(mout_peris_bus_user_p) = { "oscclk", "dout_cmu_peris_bus" };
+PNAME(mout_peris_gic_p) = { "oscclk", "mout_peris_bus_user" };
+
+static const struct samsung_mux_clock peris_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_PERIS_BUS_USER, "mout_peris_bus_user",
+ mout_peris_bus_user_p, PLL_CON0_MUX_CLKCMU_PERIS_BUS_USER, 4, 1),
+ MUX(CLK_MOUT_PERIS_GIC, "mout_peris_gic",
+ mout_peris_gic_p, CLK_CON_MUX_MUX_CLK_PERIS_GIC, 0, 5),
+};
+
+static const struct samsung_gate_clock peris_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_PERIS_CMU_PERIS_PCLK, "gout_peris_cmu_peris_pclk",
+ "mout_peris_bus_user",
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_PERIS_CMU_PERIS_IPCLKPORT_PCLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERIS_AD_AXI_P_PERIS_ACLKM,
+ "gout_peris_ad_axi_p_peris_aclkm", "mout_peris_gic",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_AD_AXI_P_PERIS_IPCLKPORT_ACLKM,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERIS_AD_AXI_P_PERIS_ACLKS,
+ "gout_peris_ad_axi_p_peris_aclks", "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_AD_AXI_P_PERIS_IPCLKPORT_ACLKS,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERIS_AXI2APB_PERISP0_ACLK,
+ "gout_peris_axi2apb_perisp0_aclk", "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_AXI2APB_PERISP0_IPCLKPORT_ACLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERIS_AXI2APB_PERISP1_ACLK,
+ "gout_peris_axi2apb_perisp1_aclk", "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_AXI2APB_PERISP1_IPCLKPORT_ACLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERIS_BUSIF_TMU_PCLK, "gout_peris_busif_tmu_pclk",
+ "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_BUSIF_TMU_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ /* GIC (interrupt controller) clock must be always running */
+ GATE(CLK_GOUT_PERIS_GIC_CLK, "gout_peris_gic_clk",
+ "mout_peris_gic",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_GIC_IPCLKPORT_CLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERIS_LHM_AXI_P_PERIS_I_CLK,
+ "gout_peris_lhm_axi_p_peris_i_clk", "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_LHM_AXI_P_PERIS_IPCLKPORT_I_CLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERIS_MCT_PCLK, "gout_peris_mct_pclk",
+ "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_MCT_IPCLKPORT_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_OTP_CON_BIRA_PCLK, "gout_peris_otp_con_bira_pclk",
+ "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_OTP_CON_BIRA_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_OTP_CON_TOP_PCLK, "gout_peris_otp_con_top_pclk",
+ "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_OTP_CON_TOP_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_PMU_PERIS_PCLK, "gout_peris_pmu_peris_pclk",
+ "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_PMU_PERIS_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_RSTNSYNC_CLK_PERIS_BUSP_CLK,
+ "gout_peris_rstnsync_clk_peris_busp_clk", "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_RSTNSYNC_CLK_PERIS_BUSP_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_RSTNSYNC_CLK_PERIS_GIC_CLK,
+ "gout_peris_rstnsync_clk_peris_gic_clk", "mout_peris_gic",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_RSTNSYNC_CLK_PERIS_GIC_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_SYSREG_PERIS_PCLK, "gout_peris_sysreg_peris_pclk",
+ "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_SYSREG_PERIS_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_TZPC00_PCLK, "gout_peris_tzpc00_pclk",
+ "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC00_IPCLKPORT_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_TZPC01_PCLK, "gout_peris_tzpc01_pclk",
+ "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC01_IPCLKPORT_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_TZPC02_PCLK, "gout_peris_tzpc02_pclk",
+ "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC02_IPCLKPORT_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_TZPC03_PCLK, "gout_peris_tzpc03_pclk",
+ "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC03_IPCLKPORT_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_TZPC04_PCLK, "gout_peris_tzpc04_pclk",
+ "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC04_IPCLKPORT_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_TZPC05_PCLK, "gout_peris_tzpc05_pclk",
+ "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC05_IPCLKPORT_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_TZPC06_PCLK, "gout_peris_tzpc06_pclk",
+ "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC06_IPCLKPORT_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_TZPC07_PCLK, "gout_peris_tzpc07_pclk",
+ "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC07_IPCLKPORT_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_TZPC08_PCLK, "gout_peris_tzpc08_pclk",
+ "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC08_IPCLKPORT_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_TZPC09_PCLK, "gout_peris_tzpc09_pclk",
+ "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC09_IPCLKPORT_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_TZPC10_PCLK, "gout_peris_tzpc10_pclk",
+ "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC10_IPCLKPORT_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_TZPC11_PCLK, "gout_peris_tzpc11_pclk",
+ "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC11_IPCLKPORT_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_TZPC12_PCLK, "gout_peris_tzpc12_pclk",
+ "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC12_IPCLKPORT_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_TZPC13_PCLK, "gout_peris_tzpc13_pclk",
+ "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC13_IPCLKPORT_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_TZPC14_PCLK, "gout_peris_tzpc14_pclk",
+ "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC14_IPCLKPORT_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_TZPC15_PCLK, "gout_peris_tzpc15_pclk",
+ "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC15_IPCLKPORT_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_WDT_CLUSTER0_PCLK, "gout_peris_wdt_cluster0_pclk",
+ "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_WDT_CLUSTER0_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_WDT_CLUSTER1_PCLK, "gout_peris_wdt_cluster1_pclk",
+ "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_WDT_CLUSTER1_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_XIU_P_PERIS_ACLK, "gout_peris_xiu_p_peris_aclk",
+ "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_XIU_P_PERIS_IPCLKPORT_ACLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+};
+
+static const struct samsung_cmu_info peris_cmu_info __initconst = {
+ .mux_clks = peris_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(peris_mux_clks),
+ .gate_clks = peris_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(peris_gate_clks),
+ .nr_clk_ids = CLKS_NR_PERIS,
+ .clk_regs = peris_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(peris_clk_regs),
+ .clk_name = "bus",
+};
+
+static void __init exynos8895_cmu_peris_init(struct device_node *np)
+{
+ exynos_arm64_register_cmu(NULL, np, &peris_cmu_info);
+}
+
+/* Register CMU_PERIS early, as it's needed for MCT timer */
+CLK_OF_DECLARE(exynos8895_cmu_peris, "samsung,exynos8895-cmu-peris",
+ exynos8895_cmu_peris_init);
+
+/* ---- CMU_FSYS0 ---------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_FSYS0 (0x11000000) */
+#define PLL_CON0_MUX_CLKCMU_FSYS0_BUS_USER 0x0100
+#define PLL_CON2_MUX_CLKCMU_FSYS0_BUS_USER 0x0108
+#define PLL_CON0_MUX_CLKCMU_FSYS0_DPGTC_USER 0x0120
+#define PLL_CON2_MUX_CLKCMU_FSYS0_DPGTC_USER 0x0128
+#define PLL_CON0_MUX_CLKCMU_FSYS0_MMC_EMBD_USER 0x0140
+#define PLL_CON2_MUX_CLKCMU_FSYS0_MMC_EMBD_USER 0x0148
+#define PLL_CON0_MUX_CLKCMU_FSYS0_UFS_EMBD_USER 0x0160
+#define PLL_CON2_MUX_CLKCMU_FSYS0_UFS_EMBD_USER 0x0168
+#define PLL_CON0_MUX_CLKCMU_FSYS0_USBDRD30_USER 0x0180
+#define PLL_CON2_MUX_CLKCMU_FSYS0_USBDRD30_USER 0x0188
+#define CLK_CON_GAT_CLK_BLK_FSYS0_UID_FSYS0_CMU_FSYS0_IPCLKPORT_PCLK 0x2000
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_AHBBR_FSYS0_IPCLKPORT_HCLK 0x2010
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_AXI2AHB_FSYS0_IPCLKPORT_ACLK 0x2014
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_AXI2AHB_USB_FSYS0_IPCLKPORT_ACLK 0x2018
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_AXI2APB_FSYS0_IPCLKPORT_ACLK 0x201c
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_BTM_FSYS0_IPCLKPORT_I_ACLK 0x2020
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_BTM_FSYS0_IPCLKPORT_I_PCLK 0x2024
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_DP_LINK_IPCLKPORT_I_GTC_EXT_CLK 0x202c
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_DP_LINK_IPCLKPORT_I_PCLK 0x2030
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_ETR_MIU_IPCLKPORT_I_ACLK 0x2034
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_ETR_MIU_IPCLKPORT_I_PCLK 0x2038
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_GPIO_FSYS0_IPCLKPORT_PCLK 0x203c
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_LHM_AXI_D_USBTV_IPCLKPORT_I_CLK 0x2040
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_LHM_AXI_G_ETR_IPCLKPORT_I_CLK 0x2044
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_LHM_AXI_P_FSYS0_IPCLKPORT_I_CLK 0x2048
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_LHS_ACEL_D_FSYS0_IPCLKPORT_I_CLK 0x204c
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_MMC_EMBD_IPCLKPORT_I_ACLK 0x2050
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_MMC_EMBD_IPCLKPORT_SDCLKIN 0x2054
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PMU_FSYS0_IPCLKPORT_PCLK 0x2058
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_BCM_FSYS0_IPCLKPORT_ACLK 0x205c
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_BCM_FSYS0_IPCLKPORT_PCLK 0x2060
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_RSTNSYNC_CLK_FSYS0_BUS_IPCLKPORT_CLK 0x2064
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_SYSREG_FSYS0_IPCLKPORT_PCLK 0x2068
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_UFS_EMBD_IPCLKPORT_I_ACLK 0x206c
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_UFS_EMBD_IPCLKPORT_I_CLK_UNIPRO 0x2070
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_UFS_EMBD_IPCLKPORT_I_FMP_CLK 0x2074
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_USBTV_IPCLKPORT_I_USB30DRD_ACLK 0x2078
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_USBTV_IPCLKPORT_I_USB30DRD_REF_CLK 0x207c
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_USBTV_IPCLKPORT_I_USB30DRD_SUSPEND_CLK 0x2080
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_USBTV_IPCLKPORT_I_USBTVH_AHB_CLK 0x2084
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_USBTV_IPCLKPORT_I_USBTVH_CORE_CLK 0x2088
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_USBTV_IPCLKPORT_I_USBTVH_XIU_CLK 0x208c
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_US_D_FSYS0_USB_IPCLKPORT_ACLK 0x2090
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_XIU_D_FSYS0_IPCLKPORT_ACLK 0x2094
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_XIU_D_FSYS0_USB_IPCLKPORT_ACLK 0x2098
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_XIU_P_FSYS0_IPCLKPORT_ACLK 0x209c
+
+static const unsigned long fsys0_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_FSYS0_BUS_USER,
+ PLL_CON2_MUX_CLKCMU_FSYS0_BUS_USER,
+ PLL_CON0_MUX_CLKCMU_FSYS0_DPGTC_USER,
+ PLL_CON2_MUX_CLKCMU_FSYS0_DPGTC_USER,
+ PLL_CON0_MUX_CLKCMU_FSYS0_MMC_EMBD_USER,
+ PLL_CON2_MUX_CLKCMU_FSYS0_MMC_EMBD_USER,
+ PLL_CON0_MUX_CLKCMU_FSYS0_UFS_EMBD_USER,
+ PLL_CON2_MUX_CLKCMU_FSYS0_UFS_EMBD_USER,
+ PLL_CON0_MUX_CLKCMU_FSYS0_USBDRD30_USER,
+ PLL_CON2_MUX_CLKCMU_FSYS0_USBDRD30_USER,
+ CLK_CON_GAT_CLK_BLK_FSYS0_UID_FSYS0_CMU_FSYS0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_AHBBR_FSYS0_IPCLKPORT_HCLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_AXI2AHB_FSYS0_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_AXI2AHB_USB_FSYS0_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_AXI2APB_FSYS0_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_BTM_FSYS0_IPCLKPORT_I_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_BTM_FSYS0_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_DP_LINK_IPCLKPORT_I_GTC_EXT_CLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_DP_LINK_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_ETR_MIU_IPCLKPORT_I_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_ETR_MIU_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_GPIO_FSYS0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_LHM_AXI_D_USBTV_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_LHM_AXI_G_ETR_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_LHM_AXI_P_FSYS0_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_LHS_ACEL_D_FSYS0_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_MMC_EMBD_IPCLKPORT_I_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_MMC_EMBD_IPCLKPORT_SDCLKIN,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PMU_FSYS0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_BCM_FSYS0_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_BCM_FSYS0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_RSTNSYNC_CLK_FSYS0_BUS_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_SYSREG_FSYS0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_UFS_EMBD_IPCLKPORT_I_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_UFS_EMBD_IPCLKPORT_I_CLK_UNIPRO,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_UFS_EMBD_IPCLKPORT_I_FMP_CLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_USBTV_IPCLKPORT_I_USB30DRD_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_USBTV_IPCLKPORT_I_USB30DRD_REF_CLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_USBTV_IPCLKPORT_I_USB30DRD_SUSPEND_CLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_USBTV_IPCLKPORT_I_USBTVH_AHB_CLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_USBTV_IPCLKPORT_I_USBTVH_CORE_CLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_USBTV_IPCLKPORT_I_USBTVH_XIU_CLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_US_D_FSYS0_USB_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_XIU_D_FSYS0_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_XIU_D_FSYS0_USB_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_XIU_P_FSYS0_IPCLKPORT_ACLK,
+};
+
+/* List of parent clocks for Muxes in CMU_FSYS0 */
+PNAME(mout_fsys0_bus_user_p) = { "oscclk", "dout_cmu_fsys0_bus" };
+PNAME(mout_fsys0_dpgtc_user_p) = { "oscclk", "dout_cmu_fsys0_dpgtc" };
+PNAME(mout_fsys0_mmc_embd_user_p) = { "oscclk",
+ "dout_cmu_fsys0_mmc_embd" };
+PNAME(mout_fsys0_ufs_embd_user_p) = { "oscclk",
+ "dout_cmu_fsys0_ufs_embd" };
+PNAME(mout_fsys0_usbdrd30_user_p) = { "oscclk",
+ "dout_cmu_fsys0_usbdrd30" };
+
+static const struct samsung_mux_clock fsys0_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_FSYS0_BUS_USER, "mout_fsys0_bus_user",
+ mout_fsys0_bus_user_p, PLL_CON0_MUX_CLKCMU_FSYS0_BUS_USER, 4, 1),
+ MUX(CLK_MOUT_FSYS0_DPGTC_USER, "mout_fsys0_dpgtc_user",
+ mout_fsys0_dpgtc_user_p, PLL_CON0_MUX_CLKCMU_FSYS0_DPGTC_USER,
+ 4, 1),
+ MUX_F(CLK_MOUT_FSYS0_MMC_EMBD_USER, "mout_fsys0_mmc_embd_user",
+ mout_fsys0_mmc_embd_user_p, PLL_CON0_MUX_CLKCMU_FSYS0_MMC_EMBD_USER,
+ 4, 1, CLK_SET_RATE_PARENT, 0),
+ MUX(CLK_MOUT_FSYS0_UFS_EMBD_USER, "mout_fsys0_ufs_embd_user",
+ mout_fsys0_ufs_embd_user_p, PLL_CON0_MUX_CLKCMU_FSYS0_UFS_EMBD_USER,
+ 4, 1),
+ MUX(CLK_MOUT_FSYS0_USBDRD30_USER, "mout_fsys0_usbdrd30_user",
+ mout_fsys0_usbdrd30_user_p, PLL_CON0_MUX_CLKCMU_FSYS0_USBDRD30_USER,
+ 4, 1),
+};
+
+static const struct samsung_gate_clock fsys0_gate_clks[] __initconst = {
+ /* Disabling this clock makes the system hang. Mark the clock as critical. */
+ GATE(CLK_GOUT_FSYS0_FSYS0_CMU_FSYS0_PCLK,
+ "gout_fsys0_fsys0_cmu_fsys0_pclk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_CLK_BLK_FSYS0_UID_FSYS0_CMU_FSYS0_IPCLKPORT_PCLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS0_AHBBR_FSYS0_HCLK, "gout_fsys0_ahbbr_fsys0_hclk",
+ "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_AHBBR_FSYS0_IPCLKPORT_HCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_AXI2AHB_FSYS0_ACLK,
+ "gout_fsys0_axi2ahb_fsys0_aclk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_AXI2AHB_FSYS0_IPCLKPORT_ACLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS0_AXI2AHB_USB_FSYS0_ACLK,
+ "gout_fsys0_axi2ahb_usb_fsys0_aclk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_AXI2AHB_USB_FSYS0_IPCLKPORT_ACLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS0_AXI2APB_FSYS0_ACLK,
+ "gout_fsys0_axi2apb_fsys0_aclk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_AXI2APB_FSYS0_IPCLKPORT_ACLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS0_BTM_FSYS0_I_ACLK, "gout_fsys0_btm_fsys0_i_aclk",
+ "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_BTM_FSYS0_IPCLKPORT_I_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_BTM_FSYS0_I_PCLK, "gout_fsys0_btm_fsys0_i_pclk",
+ "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_BTM_FSYS0_IPCLKPORT_I_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_DP_LINK_I_GTC_EXT_CLK,
+ "gout_fsys0_dp_link_i_gtc_ext_clk", "mout_fsys0_dpgtc_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_DP_LINK_IPCLKPORT_I_GTC_EXT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_DP_LINK_I_PCLK, "gout_fsys0_dp_link_i_pclk",
+ "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_DP_LINK_IPCLKPORT_I_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_ETR_MIU_I_ACLK, "gout_fsys0_etr_miu_i_aclk",
+ "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_ETR_MIU_IPCLKPORT_I_ACLK, 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_ETR_MIU_I_PCLK, "gout_fsys0_etr_miu_i_pclk",
+ "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_ETR_MIU_IPCLKPORT_I_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_GPIO_FSYS0_PCLK, "gout_fsys0_gpio_fsys0_pclk",
+ "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_GPIO_FSYS0_IPCLKPORT_PCLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_FSYS0_LHM_AXI_D_USBTV_I_CLK,
+ "gout_fsys0_lhm_axi_d_usbtv_i_clk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_LHM_AXI_D_USBTV_IPCLKPORT_I_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_LHM_AXI_G_ETR_I_CLK,
+ "gout_fsys0_lhm_axi_g_etr_i_clk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_LHM_AXI_G_ETR_IPCLKPORT_I_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_LHM_AXI_P_FSYS0_I_CLK,
+ "gout_fsys0_lhm_axi_p_fsys0_i_clk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_LHM_AXI_P_FSYS0_IPCLKPORT_I_CLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS0_LHS_ACEL_D_FSYS0_I_CLK,
+ "gout_fsys0_lhs_acel_d_fsys0_i_clk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_LHS_ACEL_D_FSYS0_IPCLKPORT_I_CLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS0_MMC_EMBD_I_ACLK, "gout_fsys0_mmc_embd_i_aclk",
+ "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_MMC_EMBD_IPCLKPORT_I_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_MMC_EMBD_SDCLKIN, "gout_fsys0_mmc_embd_sdclkin",
+ "mout_fsys0_mmc_embd_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_MMC_EMBD_IPCLKPORT_SDCLKIN,
+ 21, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS0_PMU_FSYS0_PCLK, "gout_fsys0_pmu_fsys0_pclk",
+ "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PMU_FSYS0_IPCLKPORT_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_BCM_FSYS0_ACLK, "gout_fsys0_bcm_fsys0_aclk",
+ "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_BCM_FSYS0_IPCLKPORT_ACLK, 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_BCM_FSYS0_PCLK, "gout_fsys0_bcm_fsys0_pclk",
+ "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_BCM_FSYS0_IPCLKPORT_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_RSTNSYNC_CLK_FSYS0_BUS_CLK,
+ "gout_fsys0_rstnsync_clk_fsys0_bus_clk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_RSTNSYNC_CLK_FSYS0_BUS_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_SYSREG_FSYS0_PCLK, "gout_fsys0_sysreg_fsys0_pclk",
+ "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_SYSREG_FSYS0_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_UFS_EMBD_I_ACLK, "gout_fsys0_ufs_embd_i_aclk",
+ "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_UFS_EMBD_IPCLKPORT_I_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_UFS_EMBD_I_CLK_UNIPRO,
+ "gout_fsys0_ufs_embd_i_clk_unipro", "mout_fsys0_ufs_embd_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_UFS_EMBD_IPCLKPORT_I_CLK_UNIPRO,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS0_UFS_EMBD_I_FMP_CLK,
+ "gout_fsys0_ufs_embd_i_fmp_clk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_UFS_EMBD_IPCLKPORT_I_FMP_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_USBTV_I_USB30DRD_ACLK,
+ "gout_fsys0_usbtv_i_usb30drd_aclk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_USBTV_IPCLKPORT_I_USB30DRD_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_USBTV_I_USB30DRD_REF_CLK,
+ "gout_fsys0_usbtv_i_usb30drd_ref_clk", "mout_fsys0_usbdrd30_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_USBTV_IPCLKPORT_I_USB30DRD_REF_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_USBTV_I_USB30DRD_SUSPEND_CLK,
+ "gout_fsys0_usbtv_i_usb30drd_suspend_clk",
+ "mout_fsys0_usbdrd30_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_USBTV_IPCLKPORT_I_USB30DRD_SUSPEND_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_USBTV_I_USBTVH_AHB_CLK,
+ "gout_fsys0_usbtv_i_usbtvh_ahb_clk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_USBTV_IPCLKPORT_I_USBTVH_AHB_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_USBTV_I_USBTVH_CORE_CLK,
+ "gout_fsys0_usbtv_i_usbtvh_core_clk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_USBTV_IPCLKPORT_I_USBTVH_CORE_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_USBTV_I_USBTVH_XIU_CLK,
+ "gout_fsys0_usbtv_i_usbtvh_xiu_clk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_USBTV_IPCLKPORT_I_USBTVH_XIU_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_US_D_FSYS0_USB_ACLK,
+ "gout_fsys0_us_d_fsys0_usb_aclk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_US_D_FSYS0_USB_IPCLKPORT_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_XIU_D_FSYS0_ACLK, "gout_fsys0_xiu_d_fsys0_aclk",
+ "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_XIU_D_FSYS0_IPCLKPORT_ACLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_FSYS0_XIU_D_FSYS0_USB_ACLK,
+ "gout_fsys0_xiu_d_fsys0_usb_aclk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_XIU_D_FSYS0_USB_IPCLKPORT_ACLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_FSYS0_XIU_P_FSYS0_ACLK,
+ "gout_fsys0_xiu_p_fsys0_aclk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_XIU_P_FSYS0_IPCLKPORT_ACLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+};
+
+static const struct samsung_cmu_info fsys0_cmu_info __initconst = {
+ .mux_clks = fsys0_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(fsys0_mux_clks),
+ .gate_clks = fsys0_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(fsys0_gate_clks),
+ .nr_clk_ids = CLKS_NR_FSYS0,
+ .clk_regs = fsys0_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(fsys0_clk_regs),
+ .clk_name = "bus",
+};
+
+/* ---- CMU_FSYS1 ---------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_FSYS1 (0x11400000) */
+#define PLL_CON0_MUX_CLKCMU_FSYS1_BUS_USER 0x0100
+#define PLL_CON2_MUX_CLKCMU_FSYS1_BUS_USER 0x0108
+#define PLL_CON0_MUX_CLKCMU_FSYS1_MMC_CARD_USER 0x0120
+#define PLL_CON2_MUX_CLKCMU_FSYS1_MMC_CARD_USER 0x0128
+#define PLL_CON0_MUX_CLKCMU_FSYS1_PCIE_USER 0x0140
+#define PLL_CON2_MUX_CLKCMU_FSYS1_PCIE_USER 0x0148
+#define PLL_CON0_MUX_CLKCMU_FSYS1_UFS_CARD_USER 0x0160
+#define PLL_CON2_MUX_CLKCMU_FSYS1_UFS_CARD_USER 0x0168
+#define CLK_CON_GAT_CLK_BLK_FSYS1_UID_PCIE_IPCLKPORT_PHY_REF_CLK_IN 0x2000
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_ADM_AHB_SSS_IPCLKPORT_HCLKM 0x2004
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_AHBBR_FSYS1_IPCLKPORT_HCLK 0x2008
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_AXI2AHB_FSYS1_IPCLKPORT_ACLK 0x200c
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_AXI2APB_FSYS1P0_IPCLKPORT_ACLK 0x2010
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_AXI2APB_FSYS1P1_IPCLKPORT_ACLK 0x2014
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_BTM_FSYS1_IPCLKPORT_I_ACLK 0x2018
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_BTM_FSYS1_IPCLKPORT_I_PCLK 0x201c
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_FSYS1_CMU_FSYS1_IPCLKPORT_PCLK 0x2024
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_GPIO_FSYS1_IPCLKPORT_PCLK 0x2028
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_LHM_AXI_P_FSYS1_IPCLKPORT_I_CLK 0x202c
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_LHS_ACEL_D_FSYS1_IPCLKPORT_I_CLK 0x2030
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_MMC_CARD_IPCLKPORT_I_ACLK 0x2034
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_MMC_CARD_IPCLKPORT_SDCLKIN 0x2038
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_DBI_ACLK_0 0x203c
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_DBI_ACLK_1 0x2040
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_IEEE1500_WRAPPER_FOR_PCIE_PHY_LC_X2_INST_0_I_SCL_APB_PCLK 0x2044
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_MSTR_ACLK_0 0x2048
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_MSTR_ACLK_1 0x204c
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_PCIE_SUB_CTRL_INST_0_I_DRIVER_APB_CLK 0x2050
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_PCIE_SUB_CTRL_INST_1_I_DRIVER_APB_CLK 0x2054
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_PIPE2_DIGITAL_X2_WRAP_INST_0_I_APB_PCLK_SCL 0x2058
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_SLV_ACLK_0 0x205c
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_SLV_ACLK_1 0x2060
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PMU_FSYS1_IPCLKPORT_PCLK 0x2068
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_BCM_FSYS1_IPCLKPORT_ACLK 0x206c
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_BCM_FSYS1_IPCLKPORT_PCLK 0x2070
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_RSTNSYNC_CLK_FSYS1_BUS_IPCLKPORT_CLK 0x2074
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_RTIC_IPCLKPORT_I_ACLK 0x207c
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_RTIC_IPCLKPORT_I_PCLK 0x2080
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_SSS_IPCLKPORT_I_ACLK 0x2084
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_SSS_IPCLKPORT_I_PCLK 0x2088
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_SYSREG_FSYS1_IPCLKPORT_PCLK 0x2090
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_TOE_WIFI0_IPCLKPORT_I_CLK 0x2094
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_TOE_WIFI1_IPCLKPORT_I_CLK 0x2098
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_UFS_CARD_IPCLKPORT_I_ACLK 0x209c
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_UFS_CARD_IPCLKPORT_I_CLK_UNIPRO 0x20a0
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_UFS_CARD_IPCLKPORT_I_FMP_CLK 0x20a4
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_XIU_D_FSYS1_IPCLKPORT_ACLK 0x20a8
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_XIU_P_FSYS1_IPCLKPORT_ACLK 0x20ac
+
+static const unsigned long fsys1_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_FSYS1_BUS_USER,
+ PLL_CON2_MUX_CLKCMU_FSYS1_BUS_USER,
+ PLL_CON0_MUX_CLKCMU_FSYS1_MMC_CARD_USER,
+ PLL_CON2_MUX_CLKCMU_FSYS1_MMC_CARD_USER,
+ PLL_CON0_MUX_CLKCMU_FSYS1_PCIE_USER,
+ PLL_CON2_MUX_CLKCMU_FSYS1_PCIE_USER,
+ PLL_CON0_MUX_CLKCMU_FSYS1_UFS_CARD_USER,
+ PLL_CON2_MUX_CLKCMU_FSYS1_UFS_CARD_USER,
+ CLK_CON_GAT_CLK_BLK_FSYS1_UID_PCIE_IPCLKPORT_PHY_REF_CLK_IN,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_ADM_AHB_SSS_IPCLKPORT_HCLKM,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_AHBBR_FSYS1_IPCLKPORT_HCLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_AXI2AHB_FSYS1_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_AXI2APB_FSYS1P0_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_AXI2APB_FSYS1P1_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_BTM_FSYS1_IPCLKPORT_I_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_BTM_FSYS1_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_FSYS1_CMU_FSYS1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_GPIO_FSYS1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_LHM_AXI_P_FSYS1_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_LHS_ACEL_D_FSYS1_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_MMC_CARD_IPCLKPORT_I_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_MMC_CARD_IPCLKPORT_SDCLKIN,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_DBI_ACLK_0,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_DBI_ACLK_1,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_IEEE1500_WRAPPER_FOR_PCIE_PHY_LC_X2_INST_0_I_SCL_APB_PCLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_MSTR_ACLK_0,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_MSTR_ACLK_1,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_PCIE_SUB_CTRL_INST_0_I_DRIVER_APB_CLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_PCIE_SUB_CTRL_INST_1_I_DRIVER_APB_CLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_PIPE2_DIGITAL_X2_WRAP_INST_0_I_APB_PCLK_SCL,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_SLV_ACLK_0,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_SLV_ACLK_1,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PMU_FSYS1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_BCM_FSYS1_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_BCM_FSYS1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_RSTNSYNC_CLK_FSYS1_BUS_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_RTIC_IPCLKPORT_I_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_RTIC_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_SSS_IPCLKPORT_I_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_SSS_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_SYSREG_FSYS1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_TOE_WIFI0_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_TOE_WIFI1_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_UFS_CARD_IPCLKPORT_I_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_UFS_CARD_IPCLKPORT_I_CLK_UNIPRO,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_UFS_CARD_IPCLKPORT_I_FMP_CLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_XIU_D_FSYS1_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_XIU_P_FSYS1_IPCLKPORT_ACLK,
+};
+
+/* List of parent clocks for Muxes in CMU_FSYS1 */
+PNAME(mout_fsys1_bus_user_p) = { "oscclk", "dout_cmu_fsys1_bus" };
+PNAME(mout_fsys1_mmc_card_user_p) = { "oscclk",
+ "dout_cmu_fsys1_mmc_card" };
+PNAME(mout_fsys1_pcie_user_p) = { "oscclk", "dout_cmu_fsys1_pcie" };
+PNAME(mout_fsys1_ufs_card_user_p) = { "oscclk",
+ "dout_cmu_fsys1_ufs_card" };
+
+static const struct samsung_mux_clock fsys1_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_FSYS1_BUS_USER, "mout_fsys1_bus_user",
+ mout_fsys1_bus_user_p, PLL_CON0_MUX_CLKCMU_FSYS1_BUS_USER, 4, 1),
+ MUX_F(CLK_MOUT_FSYS1_MMC_CARD_USER, "mout_fsys1_mmc_card_user",
+ mout_fsys1_mmc_card_user_p,
+ PLL_CON0_MUX_CLKCMU_FSYS1_MMC_CARD_USER,
+ 4, 1, CLK_SET_RATE_PARENT, 0),
+ MUX(CLK_MOUT_FSYS1_PCIE_USER, "mout_fsys1_pcie_user",
+ mout_fsys1_pcie_user_p, PLL_CON0_MUX_CLKCMU_FSYS1_PCIE_USER, 4, 1),
+ MUX(CLK_MOUT_FSYS1_UFS_CARD_USER, "mout_fsys1_ufs_card_user",
+ mout_fsys1_ufs_card_user_p,
+ PLL_CON0_MUX_CLKCMU_FSYS1_UFS_CARD_USER, 4, 1),
+};
+
+static const struct samsung_gate_clock fsys1_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_FSYS1_PCIE_PHY_REF_CLK_IN,
+ "gout_clk_blk_fsys1_pcie_phy_ref_clk_in", "mout_fsys1_pcie_user",
+ CLK_CON_GAT_CLK_BLK_FSYS1_UID_PCIE_IPCLKPORT_PHY_REF_CLK_IN,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_ADM_AHB_SSS_HCLKM, "gout_fsys1_adm_ahb_sss_hclkm",
+ "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_ADM_AHB_SSS_IPCLKPORT_HCLKM,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_AHBBR_FSYS1_HCLK, "gout_fsys1_ahbbr_fsys1_hclk",
+ "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_AHBBR_FSYS1_IPCLKPORT_HCLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS1_AXI2AHB_FSYS1_ACLK,
+ "gout_fsys1_axi2ahb_fsys1_aclk", "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_AXI2AHB_FSYS1_IPCLKPORT_ACLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS1_AXI2APB_FSYS1P0_ACLK,
+ "gout_fsys1_axi2apb_fsys1p0_aclk", "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_AXI2APB_FSYS1P0_IPCLKPORT_ACLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS1_AXI2APB_FSYS1P1_ACLK,
+ "gout_fsys1_axi2apb_fsys1p1_aclk", "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_AXI2APB_FSYS1P1_IPCLKPORT_ACLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS1_BTM_FSYS1_I_ACLK, "gout_fsys1_btm_fsys1_i_aclk",
+ "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_BTM_FSYS1_IPCLKPORT_I_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_BTM_FSYS1_I_PCLK, "gout_fsys1_btm_fsys1_i_pclk",
+ "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_BTM_FSYS1_IPCLKPORT_I_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_FSYS1_CMU_FSYS1_PCLK,
+ "gout_fsys1_fsys1_cmu_fsys1_pclk", "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_FSYS1_CMU_FSYS1_IPCLKPORT_PCLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS1_GPIO_FSYS1_PCLK, "gout_fsys1_gpio_fsys1_pclk",
+ "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_GPIO_FSYS1_IPCLKPORT_PCLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_FSYS1_LHM_AXI_P_FSYS1_I_CLK,
+ "gout_fsys1_lhm_axi_p_fsys1_i_clk", "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_LHM_AXI_P_FSYS1_IPCLKPORT_I_CLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS1_LHS_ACEL_D_FSYS1_I_CLK,
+ "gout_fsys1_lhs_acel_d_fsys1_i_clk", "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_LHS_ACEL_D_FSYS1_IPCLKPORT_I_CLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS1_MMC_CARD_I_ACLK, "gout_fsys1_mmc_card_i_aclk",
+ "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_MMC_CARD_IPCLKPORT_I_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_MMC_CARD_SDCLKIN, "gout_fsys1_mmc_card_sdclkin",
+ "mout_fsys1_mmc_card_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_MMC_CARD_IPCLKPORT_SDCLKIN,
+ 21, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS1_PCIE_DBI_ACLK_0, "gout_fsys1_pcie_dbi_aclk_0",
+ "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_DBI_ACLK_0,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_PCIE_DBI_ACLK_1, "gout_fsys1_pcie_dbi_aclk_1",
+ "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_DBI_ACLK_1,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_PCIE_IEEE1500_WRAPPER_FOR_PCIE_PHY_LC_X2_INST_0_I_SCL_APB_PCLK,
+ "gout_fsys1_pcie_ieee1500_wrapper_for_pcie_phy_lc_x2_inst_0_i_scl_apb_pclk",
+ "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_IEEE1500_WRAPPER_FOR_PCIE_PHY_LC_X2_INST_0_I_SCL_APB_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_PCIE_MSTR_ACLK_0,
+ "gout_fsys1_pcie_mstr_aclk_0", "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_MSTR_ACLK_0,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_PCIE_MSTR_ACLK_1,
+ "gout_fsys1_pcie_mstr_aclk_1", "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_MSTR_ACLK_1,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_PCIE_PCIE_SUB_CTRL_INST_0_I_DRIVER_APB_CLK,
+ "gout_fsys1_pcie_pcie_sub_ctrl_inst_0_i_driver_apb_clk",
+ "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_PCIE_SUB_CTRL_INST_0_I_DRIVER_APB_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_PCIE_PCIE_SUB_CTRL_INST_1_I_DRIVER_APB_CLK,
+ "gout_fsys1_pcie_pcie_sub_ctrl_inst_1_i_driver_apb_clk",
+ "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_PCIE_SUB_CTRL_INST_1_I_DRIVER_APB_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_PCIE_PIPE2_DIGITAL_X2_WRAP_INST_0_I_APB_PCLK_SCL,
+ "gout_fsys1_pcie_pipe2_digital_x2_wrap_inst_0_i_apb_pclk_scl",
+ "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_PIPE2_DIGITAL_X2_WRAP_INST_0_I_APB_PCLK_SCL,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_PCIE_SLV_ACLK_0, "gout_fsys1_pcie_slv_aclk_0",
+ "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_SLV_ACLK_0,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_PCIE_SLV_ACLK_1, "gout_fsys1_pcie_slv_aclk_1",
+ "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_SLV_ACLK_1,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_PMU_FSYS1_PCLK, "gout_fsys1_pmu_fsys1_pclk",
+ "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PMU_FSYS1_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_BCM_FSYS1_ACLK, "gout_fsys1_bcm_fsys1_aclk",
+ "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_BCM_FSYS1_IPCLKPORT_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_BCM_FSYS1_PCLK, "gout_fsys1_bcm_fsys1_pclk",
+ "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_BCM_FSYS1_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_RSTNSYNC_CLK_FSYS1_BUS_CLK,
+ "gout_fsys1_rstnsync_clk_fsys1_bus_clk", "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_RSTNSYNC_CLK_FSYS1_BUS_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_RTIC_I_ACLK, "gout_fsys1_rtic_i_aclk",
+ "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_RTIC_IPCLKPORT_I_ACLK, 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_RTIC_I_PCLK, "gout_fsys1_rtic_i_pclk",
+ "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_RTIC_IPCLKPORT_I_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_SSS_I_ACLK, "gout_fsys1_sss_i_aclk",
+ "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_SSS_IPCLKPORT_I_ACLK, 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_SSS_I_PCLK, "gout_fsys1_sss_i_pclk",
+ "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_SSS_IPCLKPORT_I_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_SYSREG_FSYS1_PCLK, "gout_fsys1_sysreg_fsys1_pclk",
+ "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_SYSREG_FSYS1_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_TOE_WIFI0_I_CLK, "gout_fsys1_toe_wifi0_i_clk",
+ "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_TOE_WIFI0_IPCLKPORT_I_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_TOE_WIFI1_I_CLK, "gout_fsys1_toe_wifi1_i_clk",
+ "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_TOE_WIFI1_IPCLKPORT_I_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_UFS_CARD_I_ACLK, "gout_fsys1_ufs_card_i_aclk",
+ "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_UFS_CARD_IPCLKPORT_I_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_UFS_CARD_I_CLK_UNIPRO,
+ "gout_fsys1_ufs_card_i_clk_unipro", "mout_fsys1_ufs_card_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_UFS_CARD_IPCLKPORT_I_CLK_UNIPRO,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_FSYS1_UFS_CARD_I_FMP_CLK,
+ "gout_fsys1_ufs_card_i_fmp_clk", "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_UFS_CARD_IPCLKPORT_I_FMP_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_XIU_D_FSYS1_ACLK, "gout_fsys1_xiu_d_fsys1_aclk",
+ "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_XIU_D_FSYS1_IPCLKPORT_ACLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_FSYS1_XIU_P_FSYS1_ACLK, "gout_fsys1_xiu_p_fsys1_aclk",
+ "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_XIU_P_FSYS1_IPCLKPORT_ACLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+};
+
+static const struct samsung_cmu_info fsys1_cmu_info __initconst = {
+ .mux_clks = fsys1_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(fsys1_mux_clks),
+ .gate_clks = fsys1_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(fsys1_gate_clks),
+ .nr_clk_ids = CLKS_NR_FSYS1,
+ .clk_regs = fsys1_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(fsys1_clk_regs),
+ .clk_name = "bus",
+};
+
+/* ---- CMU_PERIC0 ---------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_PERIC0 (0x10400000) */
+#define PLL_CON0_MUX_CLKCMU_PERIC0_BUS_USER 0x0100
+#define PLL_CON2_MUX_CLKCMU_PERIC0_BUS_USER 0x0108
+#define PLL_CON0_MUX_CLKCMU_PERIC0_UART_DBG_USER 0x0120
+#define PLL_CON2_MUX_CLKCMU_PERIC0_UART_DBG_USER 0x0128
+#define PLL_CON0_MUX_CLKCMU_PERIC0_USI00_USER 0x0140
+#define PLL_CON2_MUX_CLKCMU_PERIC0_USI00_USER 0x0148
+#define PLL_CON0_MUX_CLKCMU_PERIC0_USI01_USER 0x0160
+#define PLL_CON2_MUX_CLKCMU_PERIC0_USI01_USER 0x0168
+#define PLL_CON0_MUX_CLKCMU_PERIC0_USI02_USER 0x0180
+#define PLL_CON2_MUX_CLKCMU_PERIC0_USI02_USER 0x0188
+#define PLL_CON0_MUX_CLKCMU_PERIC0_USI03_USER 0x01a0
+#define PLL_CON2_MUX_CLKCMU_PERIC0_USI03_USER 0x01a8
+#define CLK_CON_GAT_CLK_BLK_PERIC0_UID_PERIC0_CMU_PERIC0_IPCLKPORT_PCLK 0x2000
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_AXI2APB_PERIC0_IPCLKPORT_ACLK 0x2014
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_GPIO_PERIC0_IPCLKPORT_PCLK 0x2018
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_LHM_AXI_P_PERIC0_IPCLKPORT_I_CLK 0x201c
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PMU_PERIC0_IPCLKPORT_PCLK 0x2020
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PWM_IPCLKPORT_I_PCLK_S0 0x2028
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_BUSP_IPCLKPORT_CLK 0x202c
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_SPEEDY2_TSP_IPCLKPORT_CLK 0x2030
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_SYSREG_PERIC0_IPCLKPORT_PCLK 0x2034
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_UART_DBG_IPCLKPORT_EXT_UCLK 0x2038
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_UART_DBG_IPCLKPORT_PCLK 0x203c
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI00_IPCLKPORT_I_PCLK 0x2040
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI00_IPCLKPORT_I_SCLK_USI 0x2044
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI01_IPCLKPORT_I_PCLK 0x2048
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI01_IPCLKPORT_I_SCLK_USI 0x204c
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI02_IPCLKPORT_I_PCLK 0x2050
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI02_IPCLKPORT_I_SCLK_USI 0x2054
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI03_IPCLKPORT_I_PCLK 0x2058
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI03_IPCLKPORT_I_SCLK_USI 0x205c
+
+static const unsigned long peric0_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_PERIC0_BUS_USER,
+ PLL_CON2_MUX_CLKCMU_PERIC0_BUS_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_UART_DBG_USER,
+ PLL_CON2_MUX_CLKCMU_PERIC0_UART_DBG_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI00_USER,
+ PLL_CON2_MUX_CLKCMU_PERIC0_USI00_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI01_USER,
+ PLL_CON2_MUX_CLKCMU_PERIC0_USI01_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI02_USER,
+ PLL_CON2_MUX_CLKCMU_PERIC0_USI02_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI03_USER,
+ PLL_CON2_MUX_CLKCMU_PERIC0_USI03_USER,
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_PERIC0_CMU_PERIC0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_AXI2APB_PERIC0_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_GPIO_PERIC0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_LHM_AXI_P_PERIC0_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PMU_PERIC0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PWM_IPCLKPORT_I_PCLK_S0,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_BUSP_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_SPEEDY2_TSP_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_SYSREG_PERIC0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_UART_DBG_IPCLKPORT_EXT_UCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_UART_DBG_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI00_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI00_IPCLKPORT_I_SCLK_USI,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI01_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI01_IPCLKPORT_I_SCLK_USI,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI02_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI02_IPCLKPORT_I_SCLK_USI,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI03_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI03_IPCLKPORT_I_SCLK_USI,
+};
+
+/* List of parent clocks for Muxes in CMU_PERIC0 */
+PNAME(mout_peric0_bus_user_p) = { "oscclk", "dout_cmu_peric0_bus" };
+PNAME(mout_peric0_uart_dbg_user_p) = { "oscclk",
+ "dout_cmu_peric0_uart_dbg" };
+PNAME(mout_peric0_usi00_user_p) = { "oscclk",
+ "dout_cmu_peric0_usi00" };
+PNAME(mout_peric0_usi01_user_p) = { "oscclk",
+ "dout_cmu_peric0_usi01" };
+PNAME(mout_peric0_usi02_user_p) = { "oscclk",
+ "dout_cmu_peric0_usi02" };
+PNAME(mout_peric0_usi03_user_p) = { "oscclk",
+ "dout_cmu_peric0_usi03" };
+
+static const struct samsung_mux_clock peric0_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_PERIC0_BUS_USER, "mout_peric0_bus_user",
+ mout_peric0_bus_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_BUS_USER, 4, 1),
+ MUX(CLK_MOUT_PERIC0_UART_DBG_USER, "mout_peric0_uart_dbg_user",
+ mout_peric0_uart_dbg_user_p,
+ PLL_CON0_MUX_CLKCMU_PERIC0_UART_DBG_USER, 4, 1),
+ MUX(CLK_MOUT_PERIC0_USI00_USER, "mout_peric0_usi00_user",
+ mout_peric0_usi00_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_USI00_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC0_USI01_USER, "mout_peric0_usi01_user",
+ mout_peric0_usi01_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_USI01_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC0_USI02_USER, "mout_peric0_usi02_user",
+ mout_peric0_usi02_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_USI02_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC0_USI03_USER, "mout_peric0_usi03_user",
+ mout_peric0_usi03_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_USI03_USER,
+ 4, 1),
+};
+
+static const struct samsung_gate_clock peric0_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_PERIC0_PERIC0_CMU_PERIC0_PCLK,
+ "gout_cperic0_peric0_cmu_peric0_pclk", "mout_peric0_bus_user",
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_PERIC0_CMU_PERIC0_IPCLKPORT_PCLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERIC0_AXI2APB_PERIC0_ACLK,
+ "gout_peric0_axi2apb_peric0_aclk", "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_AXI2APB_PERIC0_IPCLKPORT_ACLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERIC0_GPIO_PERIC0_PCLK, "gout_peric0_gpio_peric0_pclk",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_GPIO_PERIC0_IPCLKPORT_PCLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_PERIC0_LHM_AXI_P_PERIC0_I_CLK,
+ "gout_peric0_lhm_axi_p_peric0_i_clk", "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_LHM_AXI_P_PERIC0_IPCLKPORT_I_CLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERIC0_PMU_PERIC0_PCLK, "gout_peric0_pmu_peric0_pclk",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PMU_PERIC0_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_PWM_I_PCLK_S0, "gout_peric0_pwm_i_pclk_s0",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PWM_IPCLKPORT_I_PCLK_S0,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_RSTNSYNC_CLK_PERIC0_BUSP_CLK,
+ "gout_peric0_rstnsync_clk_peric0_busp_clk",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_BUSP_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_SPEEDY2_TSP_CLK, "gout_peric0_speedy2_tsp_clk",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_SPEEDY2_TSP_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_SYSREG_PERIC0_PCLK,
+ "gout_peric0_sysreg_peric0_pclk", "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_SYSREG_PERIC0_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_UART_DBG_EXT_UCLK,
+ "gout_peric0_uart_dbg_ext_uclk", "mout_peric0_uart_dbg_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_UART_DBG_IPCLKPORT_EXT_UCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_UART_DBG_PCLK, "gout_peric0_uart_dbg_pclk",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_UART_DBG_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_USI00_I_PCLK, "gout_peric0_usi00_i_pclk",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI00_IPCLKPORT_I_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_USI00_I_SCLK_USI, "gout_peric0_usi00_i_sclk_usi",
+ "mout_peric0_usi00_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI00_IPCLKPORT_I_SCLK_USI,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_USI01_I_PCLK, "gout_peric0_usi01_i_pclk",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI01_IPCLKPORT_I_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_USI01_I_SCLK_USI, "gout_peric0_usi01_i_sclk_usi",
+ "mout_peric0_usi01_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI01_IPCLKPORT_I_SCLK_USI,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_USI02_I_PCLK, "gout_peric0_usi02_i_pclk",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI02_IPCLKPORT_I_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_USI02_I_SCLK_USI, "gout_peric0_usi02_i_sclk_usi",
+ "mout_peric0_usi02_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI02_IPCLKPORT_I_SCLK_USI,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_USI03_I_PCLK, "gout_peric0_usi03_i_pclk",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI03_IPCLKPORT_I_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_USI03_I_SCLK_USI, "gout_peric0_usi03_i_sclk_usi",
+ "mout_peric0_usi03_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI03_IPCLKPORT_I_SCLK_USI,
+ 21, 0, 0),
+};
+
+static const struct samsung_cmu_info peric0_cmu_info __initconst = {
+ .mux_clks = peric0_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(peric0_mux_clks),
+ .gate_clks = peric0_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(peric0_gate_clks),
+ .nr_clk_ids = CLKS_NR_PERIC0,
+ .clk_regs = peric0_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(peric0_clk_regs),
+ .clk_name = "bus",
+};
+
+/* ---- CMU_PERIC1 ---------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_PERIC1 (0x10800000) */
+#define PLL_CON0_MUX_CLKCMU_PERIC1_BUS_USER 0x0100
+#define PLL_CON2_MUX_CLKCMU_PERIC1_BUS_USER 0x0108
+#define PLL_CON0_MUX_CLKCMU_PERIC1_SPEEDY2_USER 0x0120
+#define PLL_CON2_MUX_CLKCMU_PERIC1_SPEEDY2_USER 0x0128
+#define PLL_CON0_MUX_CLKCMU_PERIC1_SPI_CAM0_USER 0x0140
+#define PLL_CON2_MUX_CLKCMU_PERIC1_SPI_CAM0_USER 0x0148
+#define PLL_CON0_MUX_CLKCMU_PERIC1_SPI_CAM1_USER 0x0160
+#define PLL_CON2_MUX_CLKCMU_PERIC1_SPI_CAM1_USER 0x0168
+#define PLL_CON0_MUX_CLKCMU_PERIC1_UART_BT_USER 0x0180
+#define PLL_CON2_MUX_CLKCMU_PERIC1_UART_BT_USER 0x0188
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI04_USER 0x01a0
+#define PLL_CON2_MUX_CLKCMU_PERIC1_USI04_USER 0x01a8
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI05_USER 0x01c0
+#define PLL_CON2_MUX_CLKCMU_PERIC1_USI05_USER 0x01c8
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI06_USER 0x01e0
+#define PLL_CON2_MUX_CLKCMU_PERIC1_USI06_USER 0x01e8
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI07_USER 0x0200
+#define PLL_CON2_MUX_CLKCMU_PERIC1_USI07_USER 0x0208
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI08_USER 0x0220
+#define PLL_CON2_MUX_CLKCMU_PERIC1_USI08_USER 0x0228
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI09_USER 0x0240
+#define PLL_CON2_MUX_CLKCMU_PERIC1_USI09_USER 0x0248
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI10_USER 0x0260
+#define PLL_CON2_MUX_CLKCMU_PERIC1_USI10_USER 0x0268
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI11_USER 0x0280
+#define PLL_CON2_MUX_CLKCMU_PERIC1_USI11_USER 0x0288
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI12_USER 0x02a0
+#define PLL_CON2_MUX_CLKCMU_PERIC1_USI12_USER 0x02a8
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI13_USER 0x02c0
+#define PLL_CON2_MUX_CLKCMU_PERIC1_USI13_USER 0x02c8
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_PERIC1_CMU_PERIC1_IPCLKPORT_PCLK 0x2000
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_SPEEDY2_IPCLKPORT_CLK 0x200c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_AXI2APB_PERIC1P0_IPCLKPORT_ACLK 0x201c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_AXI2APB_PERIC1P1_IPCLKPORT_ACLK 0x2020
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_AXI2APB_PERIC1P2_IPCLKPORT_ACLK 0x2024
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_GPIO_PERIC1_IPCLKPORT_PCLK 0x2028
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_HSI2C_CAM0_IPCLKPORT_IPCLK 0x202c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_HSI2C_CAM1_IPCLKPORT_IPCLK 0x2030
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_HSI2C_CAM2_IPCLKPORT_IPCLK 0x2034
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_HSI2C_CAM3_IPCLKPORT_IPCLK 0x2038
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_LHM_AXI_P_PERIC1_IPCLKPORT_I_CLK 0x203c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PMU_PERIC1_IPCLKPORT_PCLK 0x2040
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_BUSP_IPCLKPORT_CLK 0x2044
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_DDI1_IPCLKPORT_CLK 0x2048
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_DDI1_IPCLKPORT_SCLK 0x204c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_DDI2_IPCLKPORT_CLK 0x2050
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_DDI2_IPCLKPORT_SCLK 0x2054
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_DDI_IPCLKPORT_CLK 0x2058
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_DDI_IPCLKPORT_SCLK 0x205c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_TSP1_IPCLKPORT_CLK 0x2060
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_TSP2_IPCLKPORT_CLK 0x2064
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPI_CAM0_IPCLKPORT_PCLK 0x2068
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPI_CAM0_IPCLKPORT_SPI_EXT_CLK 0x206c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPI_CAM1_IPCLKPORT_PCLK 0x2070
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPI_CAM1_IPCLKPORT_SPI_EXT_CLK 0x2074
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SYSREG_PERIC1_IPCLKPORT_PCLK 0x2078
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_UART_BT_IPCLKPORT_EXT_UCLK 0x207c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_UART_BT_IPCLKPORT_PCLK 0x2080
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI04_IPCLKPORT_I_PCLK 0x2084
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI04_IPCLKPORT_I_SCLK_USI 0x2088
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI05_IPCLKPORT_I_PCLK 0x208c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI05_IPCLKPORT_I_SCLK_USI 0x2090
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI06_IPCLKPORT_I_PCLK 0x2094
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI06_IPCLKPORT_I_SCLK_USI 0x2098
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI07_IPCLKPORT_I_PCLK 0x209c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI07_IPCLKPORT_I_SCLK_USI 0x20a0
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI08_IPCLKPORT_I_PCLK 0x20a4
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI08_IPCLKPORT_I_SCLK_USI 0x20a8
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI09_IPCLKPORT_I_PCLK 0x20ac
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI09_IPCLKPORT_I_SCLK_USI 0x20b0
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI10_IPCLKPORT_I_PCLK 0x20b4
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI10_IPCLKPORT_I_SCLK_USI 0x20b8
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI11_IPCLKPORT_I_PCLK 0x20bc
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI11_IPCLKPORT_I_SCLK_USI 0x20c0
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI12_IPCLKPORT_I_PCLK 0x20c4
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI12_IPCLKPORT_I_SCLK_USI 0x20c8
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI13_IPCLKPORT_I_PCLK 0x20cc
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI13_IPCLKPORT_I_SCLK_USI 0x20d0
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_XIU_P_PERIC1_IPCLKPORT_ACLK 0x20d4
+
+static const unsigned long peric1_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_PERIC1_BUS_USER,
+ PLL_CON2_MUX_CLKCMU_PERIC1_BUS_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_SPEEDY2_USER,
+ PLL_CON2_MUX_CLKCMU_PERIC1_SPEEDY2_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_SPI_CAM0_USER,
+ PLL_CON2_MUX_CLKCMU_PERIC1_SPI_CAM0_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_SPI_CAM1_USER,
+ PLL_CON2_MUX_CLKCMU_PERIC1_SPI_CAM1_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_UART_BT_USER,
+ PLL_CON2_MUX_CLKCMU_PERIC1_UART_BT_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI04_USER,
+ PLL_CON2_MUX_CLKCMU_PERIC1_USI04_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI05_USER,
+ PLL_CON2_MUX_CLKCMU_PERIC1_USI05_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI06_USER,
+ PLL_CON2_MUX_CLKCMU_PERIC1_USI06_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI07_USER,
+ PLL_CON2_MUX_CLKCMU_PERIC1_USI07_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI08_USER,
+ PLL_CON2_MUX_CLKCMU_PERIC1_USI08_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI09_USER,
+ PLL_CON2_MUX_CLKCMU_PERIC1_USI09_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI10_USER,
+ PLL_CON2_MUX_CLKCMU_PERIC1_USI10_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI11_USER,
+ PLL_CON2_MUX_CLKCMU_PERIC1_USI11_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI12_USER,
+ PLL_CON2_MUX_CLKCMU_PERIC1_USI12_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI13_USER,
+ PLL_CON2_MUX_CLKCMU_PERIC1_USI13_USER,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_PERIC1_CMU_PERIC1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_SPEEDY2_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_AXI2APB_PERIC1P0_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_AXI2APB_PERIC1P1_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_AXI2APB_PERIC1P2_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_GPIO_PERIC1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_HSI2C_CAM0_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_HSI2C_CAM1_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_HSI2C_CAM2_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_HSI2C_CAM3_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_LHM_AXI_P_PERIC1_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PMU_PERIC1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_BUSP_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_DDI1_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_DDI1_IPCLKPORT_SCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_DDI2_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_DDI2_IPCLKPORT_SCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_DDI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_DDI_IPCLKPORT_SCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_TSP1_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_TSP2_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPI_CAM0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPI_CAM0_IPCLKPORT_SPI_EXT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPI_CAM1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPI_CAM1_IPCLKPORT_SPI_EXT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SYSREG_PERIC1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_UART_BT_IPCLKPORT_EXT_UCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_UART_BT_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI04_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI04_IPCLKPORT_I_SCLK_USI,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI05_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI05_IPCLKPORT_I_SCLK_USI,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI06_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI06_IPCLKPORT_I_SCLK_USI,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI07_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI07_IPCLKPORT_I_SCLK_USI,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI08_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI08_IPCLKPORT_I_SCLK_USI,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI09_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI09_IPCLKPORT_I_SCLK_USI,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI10_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI10_IPCLKPORT_I_SCLK_USI,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI11_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI11_IPCLKPORT_I_SCLK_USI,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI12_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI12_IPCLKPORT_I_SCLK_USI,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI13_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI13_IPCLKPORT_I_SCLK_USI,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_XIU_P_PERIC1_IPCLKPORT_ACLK,
+};
+
+/* List of parent clocks for Muxes in CMU_PERIC1 */
+PNAME(mout_peric1_bus_user_p) = { "oscclk", "dout_cmu_peric1_bus" };
+PNAME(mout_peric1_speedy2_user_p) = { "oscclk",
+ "dout_cmu_peric1_speedy2" };
+PNAME(mout_peric1_spi_cam0_user_p) = { "oscclk",
+ "dout_cmu_peric1_spi_cam0" };
+PNAME(mout_peric1_spi_cam1_user_p) = { "oscclk",
+ "dout_cmu_peric1_spi_cam1" };
+PNAME(mout_peric1_uart_bt_user_p) = { "oscclk",
+ "dout_cmu_peric1_uart_bt" };
+PNAME(mout_peric1_usi04_user_p) = { "oscclk",
+ "dout_cmu_peric1_usi04" };
+PNAME(mout_peric1_usi05_user_p) = { "oscclk",
+ "dout_cmu_peric1_usi05" };
+PNAME(mout_peric1_usi06_user_p) = { "oscclk",
+ "dout_cmu_peric1_usi06" };
+PNAME(mout_peric1_usi07_user_p) = { "oscclk",
+ "dout_cmu_peric1_usi07" };
+PNAME(mout_peric1_usi08_user_p) = { "oscclk",
+ "dout_cmu_peric1_usi08" };
+PNAME(mout_peric1_usi09_user_p) = { "oscclk",
+ "dout_cmu_peric1_usi09" };
+PNAME(mout_peric1_usi10_user_p) = { "oscclk",
+ "dout_cmu_peric1_usi10" };
+PNAME(mout_peric1_usi11_user_p) = { "oscclk",
+ "dout_cmu_peric1_usi11" };
+PNAME(mout_peric1_usi12_user_p) = { "oscclk",
+ "dout_cmu_peric1_usi12" };
+PNAME(mout_peric1_usi13_user_p) = { "oscclk",
+ "dout_cmu_peric1_usi13" };
+
+static const struct samsung_mux_clock peric1_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_PERIC1_BUS_USER, "mout_peric1_bus_user",
+ mout_peric1_bus_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_BUS_USER, 4, 1),
+ MUX(CLK_MOUT_PERIC1_SPEEDY2_USER, "mout_peric1_speedy2_user",
+ mout_peric1_speedy2_user_p,
+ PLL_CON0_MUX_CLKCMU_PERIC1_SPEEDY2_USER, 4, 1),
+ MUX(CLK_MOUT_PERIC1_SPI_CAM0_USER, "mout_peric1_spi_cam0_user",
+ mout_peric1_spi_cam0_user_p,
+ PLL_CON0_MUX_CLKCMU_PERIC1_SPI_CAM0_USER, 4, 1),
+ MUX(CLK_MOUT_PERIC1_SPI_CAM1_USER, "mout_peric1_spi_cam1_user",
+ mout_peric1_spi_cam1_user_p,
+ PLL_CON0_MUX_CLKCMU_PERIC1_SPI_CAM1_USER, 4, 1),
+ MUX(CLK_MOUT_PERIC1_UART_BT_USER, "mout_peric1_uart_bt_user",
+ mout_peric1_uart_bt_user_p,
+ PLL_CON0_MUX_CLKCMU_PERIC1_UART_BT_USER, 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI04_USER, "mout_peric1_usi04_user",
+ mout_peric1_usi04_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI04_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI05_USER, "mout_peric1_usi05_user",
+ mout_peric1_usi05_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI05_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI06_USER, "mout_peric1_usi06_user",
+ mout_peric1_usi06_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI06_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI07_USER, "mout_peric1_usi07_user",
+ mout_peric1_usi07_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI07_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI08_USER, "mout_peric1_usi08_user",
+ mout_peric1_usi08_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI08_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI09_USER, "mout_peric1_usi09_user",
+ mout_peric1_usi09_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI09_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI10_USER, "mout_peric1_usi10_user",
+ mout_peric1_usi10_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI10_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI11_USER, "mout_peric1_usi11_user",
+ mout_peric1_usi11_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI11_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI12_USER, "mout_peric1_usi12_user",
+ mout_peric1_usi12_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI12_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI13_USER, "mout_peric1_usi13_user",
+ mout_peric1_usi13_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI13_USER,
+ 4, 1),
+};
+
+static const struct samsung_gate_clock peric1_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_PERIC1_PERIC1_CMU_PERIC1_PCLK,
+ "gout_peric1_peric1_cmu_peric1_pclk", "mout_peric1_bus_user",
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_PERIC1_CMU_PERIC1_IPCLKPORT_PCLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERIC1_RSTNSYNC_CLK_PERIC1_SPEEDY2_CLK,
+ "gout_peric1_rstnsync_clk_peric1_speedy2_clk",
+ "mout_peric1_speedy2_user",
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_SPEEDY2_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_AXI2APB_PERIC1P0_ACLK,
+ "gout_peric1_axi2apb_peric1p0_aclk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_AXI2APB_PERIC1P0_IPCLKPORT_ACLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERIC1_AXI2APB_PERIC1P1_ACLK,
+ "gout_peric1_axi2apb_peric1p1_aclk", "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_AXI2APB_PERIC1P1_IPCLKPORT_ACLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERIC1_AXI2APB_PERIC1P2_ACLK,
+ "gout_peric1_axi2apb_peric1p2_aclk", "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_AXI2APB_PERIC1P2_IPCLKPORT_ACLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERIC1_GPIO_PERIC1_PCLK,
+ "gout_peric1_gpio_peric1_pclk", "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_GPIO_PERIC1_IPCLKPORT_PCLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_PERIC1_HSI2C_CAM0_IPCLK, "gout_peric1_hsi2c_cam0_ipclk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_HSI2C_CAM0_IPCLKPORT_IPCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_HSI2C_CAM1_IPCLK,
+ "gout_peric1_hsi2c_cam1_ipclk", "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_HSI2C_CAM1_IPCLKPORT_IPCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_HSI2C_CAM2_IPCLK,
+ "gout_peric1_hsi2c_cam2_ipclk", "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_HSI2C_CAM2_IPCLKPORT_IPCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_HSI2C_CAM3_IPCLK, "gout_peric1_hsi2c_cam3_ipclk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_HSI2C_CAM3_IPCLKPORT_IPCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_LHM_AXI_P_PERIC1_I_CLK,
+ "gout_peric1_lhm_axi_p_peric1_i_clk", "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_LHM_AXI_P_PERIC1_IPCLKPORT_I_CLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERIC1_PMU_PERIC1_PCLK, "gout_peric1_pmu_peric1_pclk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PMU_PERIC1_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_RSTNSYNC_CLK_PERIC1_BUSP_CLK,
+ "gout_peric1_rstnsync_clk_peric1_busp_clk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_BUSP_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_SPEEDY2_DDI1_CLK, "gout_peric1_speedy2_ddi1_clk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_DDI1_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_SPEEDY2_DDI1_SCLK,
+ "gout_peric1_speedy2_ddi1_sclk", "mout_peric1_speedy2_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_DDI1_IPCLKPORT_SCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_SPEEDY2_DDI2_CLK, "gout_peric1_speedy2_ddi2_clk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_DDI2_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_SPEEDY2_DDI2_SCLK,
+ "gout_peric1_speedy2_ddi2_sclk", "mout_peric1_speedy2_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_DDI2_IPCLKPORT_SCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_SPEEDY2_DDI_CLK, "gout_peric1_speedy2_ddi_clk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_DDI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_SPEEDY2_DDI_SCLK, "gout_peric1_speedy2_ddi_sclk",
+ "mout_peric1_speedy2_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_DDI_IPCLKPORT_SCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_SPEEDY2_TSP1_CLK, "gout_peric1_speedy2_tsp1_clk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_TSP1_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_SPEEDY2_TSP2_CLK, "gout_peric1_speedy2_tsp2_clk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_TSP2_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_SPI_CAM0_PCLK, "gout_peric1_spi_cam0_pclk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPI_CAM0_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_SPI_CAM0_SPI_EXT_CLK,
+ "gout_peric1_spi_cam0_spi_ext_clk", "mout_peric1_spi_cam0_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPI_CAM0_IPCLKPORT_SPI_EXT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_SPI_CAM1_PCLK, "gout_peric1_spi_cam1_pclk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPI_CAM1_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_SPI_CAM1_SPI_EXT_CLK,
+ "gout_peric1_spi_cam1_spi_ext_clk", "mout_peric1_spi_cam1_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPI_CAM1_IPCLKPORT_SPI_EXT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_SYSREG_PERIC1_PCLK,
+ "gout_peric1_sysreg_peric1_pclk", "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SYSREG_PERIC1_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_UART_BT_EXT_UCLK, "gout_peric1_uart_bt_ext_uclk",
+ "mout_peric1_uart_bt_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_UART_BT_IPCLKPORT_EXT_UCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_UART_BT_PCLK, "gout_peric1_uart_bt_pclk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_UART_BT_IPCLKPORT_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI04_I_PCLK, "gout_peric1_usi04_i_pclk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI04_IPCLKPORT_I_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI04_I_SCLK_USI, "gout_peric1_usi04_i_sclk_usi",
+ "mout_peric1_usi04_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI04_IPCLKPORT_I_SCLK_USI,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI05_I_PCLK, "gout_peric1_usi05_i_pclk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI05_IPCLKPORT_I_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI05_I_SCLK_USI, "gout_peric1_usi05_i_sclk_usi",
+ "mout_peric1_usi05_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI05_IPCLKPORT_I_SCLK_USI,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI06_I_PCLK, "gout_peric1_usi06_i_pclk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI06_IPCLKPORT_I_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI06_I_SCLK_USI, "gout_peric1_usi06_i_sclk_usi",
+ "mout_peric1_usi06_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI06_IPCLKPORT_I_SCLK_USI,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI07_I_PCLK, "gout_peric1_usi07_i_pclk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI07_IPCLKPORT_I_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI07_I_SCLK_USI, "gout_peric1_usi07_i_sclk_usi",
+ "mout_peric1_usi07_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI07_IPCLKPORT_I_SCLK_USI,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI08_I_PCLK, "gout_peric1_usi08_i_pclk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI08_IPCLKPORT_I_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI08_I_SCLK_USI, "gout_peric1_usi08_i_sclk_usi",
+ "mout_peric1_usi08_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI08_IPCLKPORT_I_SCLK_USI,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI09_I_PCLK, "gout_peric1_usi09_i_pclk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI09_IPCLKPORT_I_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI09_I_SCLK_USI, "gout_peric1_usi09_i_sclk_usi",
+ "mout_peric1_usi09_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI09_IPCLKPORT_I_SCLK_USI,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI10_I_PCLK, "gout_peric1_usi10_i_pclk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI10_IPCLKPORT_I_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI10_I_SCLK_USI, "gout_peric1_usi10_i_sclk_usi",
+ "mout_peric1_usi10_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI10_IPCLKPORT_I_SCLK_USI,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI11_I_PCLK, "gout_peric1_usi11_i_pclk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI11_IPCLKPORT_I_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI11_I_SCLK_USI, "gout_peric1_usi11_i_sclk_usi",
+ "mout_peric1_usi11_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI11_IPCLKPORT_I_SCLK_USI,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI12_I_PCLK, "gout_peric1_usi12_i_pclk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI12_IPCLKPORT_I_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI12_I_SCLK_USI, "gout_peric1_usi12_i_sclk_usi",
+ "mout_peric1_usi12_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI12_IPCLKPORT_I_SCLK_USI,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI13_I_PCLK, "gout_peric1_usi13_i_pclk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI13_IPCLKPORT_I_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI13_I_SCLK_USI, "gout_peric1_usi13_i_sclk_usi",
+ "mout_peric1_usi13_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI13_IPCLKPORT_I_SCLK_USI,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_XIU_P_PERIC1_ACLK,
+ "gout_peric1_xiu_p_peric1_aclk", "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_XIU_P_PERIC1_IPCLKPORT_ACLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+};
+
+static const struct samsung_cmu_info peric1_cmu_info __initconst = {
+ .mux_clks = peric1_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(peric1_mux_clks),
+ .gate_clks = peric1_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(peric1_gate_clks),
+ .nr_clk_ids = CLKS_NR_PERIC1,
+ .clk_regs = peric1_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(peric1_clk_regs),
+ .clk_name = "bus",
+};
+
+static int __init exynos8895_cmu_probe(struct platform_device *pdev)
+{
+ const struct samsung_cmu_info *info;
+ struct device *dev = &pdev->dev;
+
+ info = of_device_get_match_data(dev);
+ exynos_arm64_register_cmu(dev, dev->of_node, info);
+
+ return 0;
+}
+
+static const struct of_device_id exynos8895_cmu_of_match[] = {
+ {
+ .compatible = "samsung,exynos8895-cmu-fsys0",
+ .data = &fsys0_cmu_info,
+ }, {
+ .compatible = "samsung,exynos8895-cmu-fsys1",
+ .data = &fsys1_cmu_info,
+ }, {
+ .compatible = "samsung,exynos8895-cmu-peric0",
+ .data = &peric0_cmu_info,
+ }, {
+ .compatible = "samsung,exynos8895-cmu-peric1",
+ .data = &peric1_cmu_info,
+ },
+ { }
+};
+
+static struct platform_driver exynos8895_cmu_driver __refdata = {
+ .driver = {
+ .name = "exynos8895-cmu",
+ .of_match_table = exynos8895_cmu_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = exynos8895_cmu_probe,
+};
+
+static int __init exynos8895_cmu_init(void)
+{
+ return platform_driver_register(&exynos8895_cmu_driver);
+}
+core_initcall(exynos8895_cmu_init);
diff --git a/drivers/clk/samsung/clk-exynos990.c b/drivers/clk/samsung/clk-exynos990.c
new file mode 100644
index 000000000000..6277dd557fab
--- /dev/null
+++ b/drivers/clk/samsung/clk-exynos990.c
@@ -0,0 +1,2717 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024 Igor Belwon <igor.belwon@mentallysanemainliners.org>
+ *
+ * Common Clock Framework support for Exynos990.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/clock/samsung,exynos990.h>
+
+#include "clk.h"
+#include "clk-exynos-arm64.h"
+#include "clk-pll.h"
+
+/* NOTE: Must be equal to the last clock ID increased by one */
+#define CLKS_NR_TOP (CLK_DOUT_CMU_CLK_CMUREF + 1)
+#define CLKS_NR_HSI0 (CLK_GOUT_HSI0_LHS_ACEL_D_HSI0_CLK + 1)
+#define CLKS_NR_PERIC0 (CLK_GOUT_PERIC0_SYSREG_PCLK + 1)
+#define CLKS_NR_PERIC1 (CLK_GOUT_PERIC1_XIU_P_ACLK + 1)
+#define CLKS_NR_PERIS (CLK_GOUT_PERIS_OTP_CON_TOP_OSCCLK + 1)
+
+/* ---- CMU_TOP ------------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_TOP (0x1a330000) */
+#define PLL_LOCKTIME_PLL_G3D 0x0000
+#define PLL_LOCKTIME_PLL_MMC 0x0004
+#define PLL_LOCKTIME_PLL_SHARED0 0x0008
+#define PLL_LOCKTIME_PLL_SHARED1 0x000c
+#define PLL_LOCKTIME_PLL_SHARED2 0x0010
+#define PLL_LOCKTIME_PLL_SHARED3 0x0014
+#define PLL_LOCKTIME_PLL_SHARED4 0x0018
+#define PLL_CON0_PLL_G3D 0x0100
+#define PLL_CON3_PLL_G3D 0x010c
+#define PLL_CON0_PLL_MMC 0x0140
+#define PLL_CON3_PLL_MMC 0x014c
+#define PLL_CON0_PLL_SHARED0 0x0180
+#define PLL_CON3_PLL_SHARED0 0x018c
+#define PLL_CON0_PLL_SHARED1 0x01c0
+#define PLL_CON3_PLL_SHARED1 0x01cc
+#define PLL_CON0_PLL_SHARED2 0x0200
+#define PLL_CON3_PLL_SHARED2 0x020c
+#define PLL_CON0_PLL_SHARED3 0x0240
+#define PLL_CON3_PLL_SHARED3 0x024c
+#define PLL_CON0_PLL_SHARED4 0x0280
+#define PLL_CON3_PLL_SHARED4 0x028c
+#define CLK_CON_MUX_CLKCMU_DPU_BUS 0x1000
+#define CLK_CON_MUX_MUX_CLKCMU_APM_BUS 0x1004
+#define CLK_CON_MUX_MUX_CLKCMU_AUD_CPU 0x1008
+#define CLK_CON_MUX_MUX_CLKCMU_BUS0_BUS 0x100c
+#define CLK_CON_MUX_MUX_CLKCMU_BUS1_BUS 0x1010
+#define CLK_CON_MUX_MUX_CLKCMU_BUS1_SSS 0x1014
+#define CLK_CON_MUX_MUX_CLKCMU_CIS_CLK0 0x1018
+#define CLK_CON_MUX_MUX_CLKCMU_CIS_CLK1 0x101c
+#define CLK_CON_MUX_MUX_CLKCMU_CIS_CLK2 0x1020
+#define CLK_CON_MUX_MUX_CLKCMU_CIS_CLK3 0x1024
+#define CLK_CON_MUX_MUX_CLKCMU_CIS_CLK4 0x1028
+#define CLK_CON_MUX_MUX_CLKCMU_CIS_CLK5 0x102c
+#define CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST 0x1030
+#define CLK_CON_MUX_MUX_CLKCMU_CORE_BUS 0x1034
+#define CLK_CON_MUX_MUX_CLKCMU_CPUCL0_DBG_BUS 0x1038
+#define CLK_CON_MUX_MUX_CLKCMU_CPUCL0_SWITCH 0x103c
+#define CLK_CON_MUX_MUX_CLKCMU_CPUCL1_SWITCH 0x1040
+#define CLK_CON_MUX_MUX_CLKCMU_CPUCL2_BUSP 0x1044
+#define CLK_CON_MUX_MUX_CLKCMU_CPUCL2_SWITCH 0x1048
+#define CLK_CON_MUX_MUX_CLKCMU_CSIS_BUS 0x104c
+#define CLK_CON_MUX_MUX_CLKCMU_CSIS_OIS_MCU 0x1050
+#define CLK_CON_MUX_MUX_CLKCMU_DNC_BUS 0x1054
+#define CLK_CON_MUX_MUX_CLKCMU_DNC_BUSM 0x1058
+#define CLK_CON_MUX_MUX_CLKCMU_DNS_BUS 0x105c
+#define CLK_CON_MUX_MUX_CLKCMU_DPU 0x1060
+#define CLK_CON_MUX_MUX_CLKCMU_DPU_ALT 0x1064
+#define CLK_CON_MUX_MUX_CLKCMU_DSP_BUS 0x1068
+#define CLK_CON_MUX_MUX_CLKCMU_G2D_G2D 0x106c
+#define CLK_CON_MUX_MUX_CLKCMU_G2D_MSCL 0x1070
+#define CLK_CON_MUX_MUX_CLKCMU_HPM 0x1074
+#define CLK_CON_MUX_MUX_CLKCMU_HSI0_BUS 0x1078
+#define CLK_CON_MUX_MUX_CLKCMU_HSI0_DPGTC 0x107c
+#define CLK_CON_MUX_MUX_CLKCMU_HSI0_USB31DRD 0x1080
+#define CLK_CON_MUX_MUX_CLKCMU_HSI0_USBDP_DEBUG 0x1084
+#define CLK_CON_MUX_MUX_CLKCMU_HSI1_BUS 0x1088
+#define CLK_CON_MUX_MUX_CLKCMU_HSI1_MMC_CARD 0x108c
+#define CLK_CON_MUX_MUX_CLKCMU_HSI1_PCIE 0x1090
+#define CLK_CON_MUX_MUX_CLKCMU_HSI1_UFS_CARD 0x1094
+#define CLK_CON_MUX_MUX_CLKCMU_HSI1_UFS_EMBD 0x1098
+#define CLK_CON_MUX_MUX_CLKCMU_HSI2_BUS 0x109c
+#define CLK_CON_MUX_MUX_CLKCMU_HSI2_PCIE 0x10a0
+#define CLK_CON_MUX_MUX_CLKCMU_IPP_BUS 0x10a4
+#define CLK_CON_MUX_MUX_CLKCMU_ITP_BUS 0x10a8
+#define CLK_CON_MUX_MUX_CLKCMU_MCSC_BUS 0x10ac
+#define CLK_CON_MUX_MUX_CLKCMU_MCSC_GDC 0x10b0
+#define CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST_CPU 0x10b4
+#define CLK_CON_MUX_MUX_CLKCMU_MFC0_MFC0 0x10b8
+#define CLK_CON_MUX_MUX_CLKCMU_MFC0_WFD 0x10bc
+#define CLK_CON_MUX_MUX_CLKCMU_MIF_BUSP 0x10c0
+#define CLK_CON_MUX_MUX_CLKCMU_MIF_SWITCH 0x10c4
+#define CLK_CON_MUX_MUX_CLKCMU_NPU_BUS 0x10c8
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC0_BUS 0x10cc
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC0_IP 0x10d0
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_BUS 0x10d4
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_IP 0x10d8
+#define CLK_CON_MUX_MUX_CLKCMU_PERIS_BUS 0x10dc
+#define CLK_CON_MUX_MUX_CLKCMU_SSP_BUS 0x10e0
+#define CLK_CON_MUX_MUX_CLKCMU_TNR_BUS 0x10e4
+#define CLK_CON_MUX_MUX_CLKCMU_VRA_BUS 0x10e8
+#define CLK_CON_MUX_MUX_CLK_CMU_CMUREF 0x10f0
+#define CLK_CON_MUX_MUX_CMU_CMUREF 0x10f4
+#define CLK_CON_DIV_CLKCMU_APM_BUS 0x1800
+#define CLK_CON_DIV_CLKCMU_AUD_CPU 0x1804
+#define CLK_CON_DIV_CLKCMU_BUS0_BUS 0x1808
+#define CLK_CON_DIV_CLKCMU_BUS1_BUS 0x180c
+#define CLK_CON_DIV_CLKCMU_BUS1_SSS 0x1810
+#define CLK_CON_DIV_CLKCMU_CIS_CLK0 0x1814
+#define CLK_CON_DIV_CLKCMU_CIS_CLK1 0x1818
+#define CLK_CON_DIV_CLKCMU_CIS_CLK2 0x181c
+#define CLK_CON_DIV_CLKCMU_CIS_CLK3 0x1820
+#define CLK_CON_DIV_CLKCMU_CIS_CLK4 0x1824
+#define CLK_CON_DIV_CLKCMU_CIS_CLK5 0x1828
+#define CLK_CON_DIV_CLKCMU_CMU_BOOST 0x182c
+#define CLK_CON_DIV_CLKCMU_CORE_BUS 0x1830
+#define CLK_CON_DIV_CLKCMU_CPUCL0_DBG_BUS 0x1834
+#define CLK_CON_DIV_CLKCMU_CPUCL0_SWITCH 0x1838
+#define CLK_CON_DIV_CLKCMU_CPUCL1_SWITCH 0x183c
+#define CLK_CON_DIV_CLKCMU_CPUCL2_BUSP 0x1840
+#define CLK_CON_DIV_CLKCMU_CPUCL2_SWITCH 0x1844
+#define CLK_CON_DIV_CLKCMU_CSIS_BUS 0x1848
+#define CLK_CON_DIV_CLKCMU_CSIS_OIS_MCU 0x184c
+#define CLK_CON_DIV_CLKCMU_DNC_BUS 0x1850
+#define CLK_CON_DIV_CLKCMU_DNC_BUSM 0x1854
+#define CLK_CON_DIV_CLKCMU_DNS_BUS 0x1858
+#define CLK_CON_DIV_CLKCMU_DSP_BUS 0x185c
+#define CLK_CON_DIV_CLKCMU_G2D_G2D 0x1860
+#define CLK_CON_DIV_CLKCMU_G2D_MSCL 0x1864
+#define CLK_CON_DIV_CLKCMU_G3D_SWITCH 0x1868
+#define CLK_CON_DIV_CLKCMU_HPM 0x186c
+#define CLK_CON_DIV_CLKCMU_HSI0_BUS 0x1870
+#define CLK_CON_DIV_CLKCMU_HSI0_DPGTC 0x1874
+#define CLK_CON_DIV_CLKCMU_HSI0_USB31DRD 0x1878
+#define CLK_CON_DIV_CLKCMU_HSI0_USBDP_DEBUG 0x187c
+#define CLK_CON_DIV_CLKCMU_HSI1_BUS 0x1880
+#define CLK_CON_DIV_CLKCMU_HSI1_MMC_CARD 0x1884
+#define CLK_CON_DIV_CLKCMU_HSI1_PCIE 0x1888
+#define CLK_CON_DIV_CLKCMU_HSI1_UFS_CARD 0x188c
+#define CLK_CON_DIV_CLKCMU_HSI1_UFS_EMBD 0x1890
+#define CLK_CON_DIV_CLKCMU_HSI2_BUS 0x1894
+#define CLK_CON_DIV_CLKCMU_HSI2_PCIE 0x1898
+#define CLK_CON_DIV_CLKCMU_IPP_BUS 0x189c
+#define CLK_CON_DIV_CLKCMU_ITP_BUS 0x18a0
+#define CLK_CON_DIV_CLKCMU_MCSC_BUS 0x18a4
+#define CLK_CON_DIV_CLKCMU_MCSC_GDC 0x18a8
+#define CLK_CON_DIV_CLKCMU_CMU_BOOST_CPU 0x18ac
+#define CLK_CON_DIV_CLKCMU_MFC0_MFC0 0x18b0
+#define CLK_CON_DIV_CLKCMU_MFC0_WFD 0x18b4
+#define CLK_CON_DIV_CLKCMU_MIF_BUSP 0x18b8
+#define CLK_CON_DIV_CLKCMU_NPU_BUS 0x18bc
+#define CLK_CON_DIV_CLKCMU_OTP 0x18c0
+#define CLK_CON_DIV_CLKCMU_PERIC0_BUS 0x18c4
+#define CLK_CON_DIV_CLKCMU_PERIC0_IP 0x18c8
+#define CLK_CON_DIV_CLKCMU_PERIC1_BUS 0x18cc
+#define CLK_CON_DIV_CLKCMU_PERIC1_IP 0x18d0
+#define CLK_CON_DIV_CLKCMU_PERIS_BUS 0x18d4
+#define CLK_CON_DIV_CLKCMU_SSP_BUS 0x18d8
+#define CLK_CON_DIV_CLKCMU_TNR_BUS 0x18dc
+#define CLK_CON_DIV_CLKCMU_VRA_BUS 0x18e0
+#define CLK_CON_DIV_DIV_CLKCMU_DPU 0x18e8
+#define CLK_CON_DIV_DIV_CLKCMU_DPU_ALT 0x18ec
+#define CLK_CON_DIV_DIV_CLK_CMU_CMUREF 0x18f0
+#define CLK_CON_DIV_PLL_SHARED0_DIV2 0x18f4
+#define CLK_CON_DIV_PLL_SHARED0_DIV3 0x18f8
+#define CLK_CON_DIV_PLL_SHARED0_DIV4 0x18fc
+#define CLK_CON_DIV_PLL_SHARED1_DIV2 0x1900
+#define CLK_CON_DIV_PLL_SHARED1_DIV3 0x1904
+#define CLK_CON_DIV_PLL_SHARED1_DIV4 0x1908
+#define CLK_CON_DIV_PLL_SHARED2_DIV2 0x190c
+#define CLK_CON_DIV_PLL_SHARED4_DIV2 0x1910
+#define CLK_CON_DIV_PLL_SHARED4_DIV3 0x1914
+#define CLK_CON_DIV_PLL_SHARED4_DIV4 0x1918
+#define CLK_CON_GAT_CLKCMU_G3D_BUS 0x2000
+#define CLK_CON_GAT_CLKCMU_MIF_SWITCH 0x2004
+#define CLK_CON_GAT_GATE_CLKCMU_APM_BUS 0x2008
+#define CLK_CON_GAT_GATE_CLKCMU_AUD_CPU 0x200c
+#define CLK_CON_GAT_GATE_CLKCMU_BUS0_BUS 0x2010
+#define CLK_CON_GAT_GATE_CLKCMU_BUS1_BUS 0x2014
+#define CLK_CON_GAT_GATE_CLKCMU_BUS1_SSS 0x2018
+#define CLK_CON_GAT_GATE_CLKCMU_CIS_CLK0 0x201c
+#define CLK_CON_GAT_GATE_CLKCMU_CIS_CLK1 0x2020
+#define CLK_CON_GAT_GATE_CLKCMU_CIS_CLK2 0x2024
+#define CLK_CON_GAT_GATE_CLKCMU_CIS_CLK3 0x2028
+#define CLK_CON_GAT_GATE_CLKCMU_CIS_CLK4 0x202c
+#define CLK_CON_GAT_GATE_CLKCMU_CIS_CLK5 0x2030
+#define CLK_CON_GAT_GATE_CLKCMU_CORE_BUS 0x2034
+#define CLK_CON_GAT_GATE_CLKCMU_CPUCL0_DBG_BUS 0x2038
+#define CLK_CON_GAT_GATE_CLKCMU_CPUCL0_SWITCH 0x203c
+#define CLK_CON_GAT_GATE_CLKCMU_CPUCL1_SWITCH 0x2040
+#define CLK_CON_GAT_GATE_CLKCMU_CPUCL2_BUSP 0x2044
+#define CLK_CON_GAT_GATE_CLKCMU_CPUCL2_SWITCH 0x2048
+#define CLK_CON_GAT_GATE_CLKCMU_CSIS_BUS 0x204c
+#define CLK_CON_GAT_GATE_CLKCMU_CSIS_OIS_MCU 0x2050
+#define CLK_CON_GAT_GATE_CLKCMU_DNC_BUS 0x2054
+#define CLK_CON_GAT_GATE_CLKCMU_DNC_BUSM 0x2058
+#define CLK_CON_GAT_GATE_CLKCMU_DNS_BUS 0x205c
+#define CLK_CON_GAT_GATE_CLKCMU_DPU 0x2060
+#define CLK_CON_GAT_GATE_CLKCMU_DPU_BUS 0x2064
+#define CLK_CON_GAT_GATE_CLKCMU_DSP_BUS 0x2068
+#define CLK_CON_GAT_GATE_CLKCMU_G2D_G2D 0x206c
+#define CLK_CON_GAT_GATE_CLKCMU_G2D_MSCL 0x2070
+#define CLK_CON_GAT_GATE_CLKCMU_G3D_SWITCH 0x2074
+#define CLK_CON_GAT_GATE_CLKCMU_HPM 0x2078
+#define CLK_CON_GAT_GATE_CLKCMU_HSI0_BUS 0x207c
+#define CLK_CON_GAT_GATE_CLKCMU_HSI0_DPGTC 0x2080
+#define CLK_CON_GAT_GATE_CLKCMU_HSI0_USB31DRD 0x2084
+#define CLK_CON_GAT_GATE_CLKCMU_HSI0_USBDP_DEBUG 0x2088
+#define CLK_CON_GAT_GATE_CLKCMU_HSI1_BUS 0x208c
+#define CLK_CON_GAT_GATE_CLKCMU_HSI1_MMC_CARD 0x2090
+#define CLK_CON_GAT_GATE_CLKCMU_HSI1_PCIE 0x2094
+#define CLK_CON_GAT_GATE_CLKCMU_HSI1_UFS_CARD 0x2098
+#define CLK_CON_GAT_GATE_CLKCMU_HSI1_UFS_EMBD 0x209c
+#define CLK_CON_GAT_GATE_CLKCMU_HSI2_BUS 0x20a0
+#define CLK_CON_GAT_GATE_CLKCMU_HSI2_PCIE 0x20a4
+#define CLK_CON_GAT_GATE_CLKCMU_IPP_BUS 0x20a8
+#define CLK_CON_GAT_GATE_CLKCMU_ITP_BUS 0x20ac
+#define CLK_CON_GAT_GATE_CLKCMU_MCSC_BUS 0x20b0
+#define CLK_CON_GAT_GATE_CLKCMU_MCSC_GDC 0x20b4
+#define CLK_CON_GAT_GATE_CLKCMU_MFC0_MFC0 0x20bc
+#define CLK_CON_GAT_GATE_CLKCMU_MFC0_WFD 0x20c0
+#define CLK_CON_GAT_GATE_CLKCMU_MIF_BUSP 0x20c4
+#define CLK_CON_GAT_GATE_CLKCMU_NPU_BUS 0x20c8
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC0_BUS 0x20cc
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC0_IP 0x20d0
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_BUS 0x20d4
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_IP 0x20d8
+#define CLK_CON_GAT_GATE_CLKCMU_PERIS_BUS 0x20dc
+#define CLK_CON_GAT_GATE_CLKCMU_SSP_BUS 0x20e0
+#define CLK_CON_GAT_GATE_CLKCMU_TNR_BUS 0x20e4
+#define CLK_CON_GAT_GATE_CLKCMU_VRA_BUS 0x20e8
+
+static const unsigned long top_clk_regs[] __initconst = {
+ PLL_LOCKTIME_PLL_G3D,
+ PLL_LOCKTIME_PLL_MMC,
+ PLL_LOCKTIME_PLL_SHARED0,
+ PLL_LOCKTIME_PLL_SHARED1,
+ PLL_LOCKTIME_PLL_SHARED2,
+ PLL_LOCKTIME_PLL_SHARED3,
+ PLL_LOCKTIME_PLL_SHARED4,
+ PLL_CON0_PLL_G3D,
+ PLL_CON3_PLL_G3D,
+ PLL_CON0_PLL_MMC,
+ PLL_CON3_PLL_MMC,
+ PLL_CON0_PLL_SHARED0,
+ PLL_CON3_PLL_SHARED0,
+ PLL_CON0_PLL_SHARED1,
+ PLL_CON3_PLL_SHARED1,
+ PLL_CON0_PLL_SHARED2,
+ PLL_CON3_PLL_SHARED2,
+ PLL_CON0_PLL_SHARED3,
+ PLL_CON3_PLL_SHARED3,
+ PLL_CON0_PLL_SHARED4,
+ PLL_CON3_PLL_SHARED4,
+ CLK_CON_MUX_CLKCMU_DPU_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_APM_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_AUD_CPU,
+ CLK_CON_MUX_MUX_CLKCMU_BUS0_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_BUS1_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_BUS1_SSS,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK0,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK1,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK2,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK3,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK4,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK5,
+ CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST,
+ CLK_CON_MUX_MUX_CLKCMU_CORE_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL0_DBG_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL0_SWITCH,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL1_SWITCH,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL2_BUSP,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL2_SWITCH,
+ CLK_CON_MUX_MUX_CLKCMU_CSIS_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_CSIS_OIS_MCU,
+ CLK_CON_MUX_MUX_CLKCMU_DNC_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_DNC_BUSM,
+ CLK_CON_MUX_MUX_CLKCMU_DNS_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_DPU,
+ CLK_CON_MUX_MUX_CLKCMU_DPU_ALT,
+ CLK_CON_MUX_MUX_CLKCMU_DSP_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_G2D_G2D,
+ CLK_CON_MUX_MUX_CLKCMU_G2D_MSCL,
+ CLK_CON_MUX_MUX_CLKCMU_HPM,
+ CLK_CON_MUX_MUX_CLKCMU_HSI0_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_HSI0_DPGTC,
+ CLK_CON_MUX_MUX_CLKCMU_HSI0_USB31DRD,
+ CLK_CON_MUX_MUX_CLKCMU_HSI0_USBDP_DEBUG,
+ CLK_CON_MUX_MUX_CLKCMU_HSI1_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_HSI1_MMC_CARD,
+ CLK_CON_MUX_MUX_CLKCMU_HSI1_PCIE,
+ CLK_CON_MUX_MUX_CLKCMU_HSI1_UFS_CARD,
+ CLK_CON_MUX_MUX_CLKCMU_HSI1_UFS_EMBD,
+ CLK_CON_MUX_MUX_CLKCMU_HSI2_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_HSI2_PCIE,
+ CLK_CON_MUX_MUX_CLKCMU_IPP_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_ITP_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_MCSC_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_MCSC_GDC,
+ CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST_CPU,
+ CLK_CON_MUX_MUX_CLKCMU_MFC0_MFC0,
+ CLK_CON_MUX_MUX_CLKCMU_MFC0_WFD,
+ CLK_CON_MUX_MUX_CLKCMU_MIF_BUSP,
+ CLK_CON_MUX_MUX_CLKCMU_MIF_SWITCH,
+ CLK_CON_MUX_MUX_CLKCMU_NPU_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC0_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC0_IP,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC1_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC1_IP,
+ CLK_CON_MUX_MUX_CLKCMU_PERIS_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_SSP_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_TNR_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_VRA_BUS,
+ CLK_CON_MUX_MUX_CLK_CMU_CMUREF,
+ CLK_CON_MUX_MUX_CMU_CMUREF,
+ CLK_CON_DIV_CLKCMU_APM_BUS,
+ CLK_CON_DIV_CLKCMU_AUD_CPU,
+ CLK_CON_DIV_CLKCMU_BUS0_BUS,
+ CLK_CON_DIV_CLKCMU_BUS1_BUS,
+ CLK_CON_DIV_CLKCMU_BUS1_SSS,
+ CLK_CON_DIV_CLKCMU_CIS_CLK0,
+ CLK_CON_DIV_CLKCMU_CIS_CLK1,
+ CLK_CON_DIV_CLKCMU_CIS_CLK2,
+ CLK_CON_DIV_CLKCMU_CIS_CLK3,
+ CLK_CON_DIV_CLKCMU_CIS_CLK4,
+ CLK_CON_DIV_CLKCMU_CIS_CLK5,
+ CLK_CON_DIV_CLKCMU_CMU_BOOST,
+ CLK_CON_DIV_CLKCMU_CORE_BUS,
+ CLK_CON_DIV_CLKCMU_CPUCL0_DBG_BUS,
+ CLK_CON_DIV_CLKCMU_CPUCL0_SWITCH,
+ CLK_CON_DIV_CLKCMU_CPUCL1_SWITCH,
+ CLK_CON_DIV_CLKCMU_CPUCL2_BUSP,
+ CLK_CON_DIV_CLKCMU_CPUCL2_SWITCH,
+ CLK_CON_DIV_CLKCMU_CSIS_BUS,
+ CLK_CON_DIV_CLKCMU_CSIS_OIS_MCU,
+ CLK_CON_DIV_CLKCMU_DNC_BUS,
+ CLK_CON_DIV_CLKCMU_DNC_BUSM,
+ CLK_CON_DIV_CLKCMU_DNS_BUS,
+ CLK_CON_DIV_CLKCMU_DSP_BUS,
+ CLK_CON_DIV_CLKCMU_G2D_G2D,
+ CLK_CON_DIV_CLKCMU_G2D_MSCL,
+ CLK_CON_DIV_CLKCMU_G3D_SWITCH,
+ CLK_CON_DIV_CLKCMU_HPM,
+ CLK_CON_DIV_CLKCMU_HSI0_BUS,
+ CLK_CON_DIV_CLKCMU_HSI0_DPGTC,
+ CLK_CON_DIV_CLKCMU_HSI0_USB31DRD,
+ CLK_CON_DIV_CLKCMU_HSI0_USBDP_DEBUG,
+ CLK_CON_DIV_CLKCMU_HSI1_BUS,
+ CLK_CON_DIV_CLKCMU_HSI1_MMC_CARD,
+ CLK_CON_DIV_CLKCMU_HSI1_PCIE,
+ CLK_CON_DIV_CLKCMU_HSI1_UFS_CARD,
+ CLK_CON_DIV_CLKCMU_HSI1_UFS_EMBD,
+ CLK_CON_DIV_CLKCMU_HSI2_BUS,
+ CLK_CON_DIV_CLKCMU_HSI2_PCIE,
+ CLK_CON_DIV_CLKCMU_IPP_BUS,
+ CLK_CON_DIV_CLKCMU_ITP_BUS,
+ CLK_CON_DIV_CLKCMU_MCSC_BUS,
+ CLK_CON_DIV_CLKCMU_MCSC_GDC,
+ CLK_CON_DIV_CLKCMU_CMU_BOOST_CPU,
+ CLK_CON_DIV_CLKCMU_MFC0_MFC0,
+ CLK_CON_DIV_CLKCMU_MFC0_WFD,
+ CLK_CON_DIV_CLKCMU_MIF_BUSP,
+ CLK_CON_DIV_CLKCMU_NPU_BUS,
+ CLK_CON_DIV_CLKCMU_OTP,
+ CLK_CON_DIV_CLKCMU_PERIC0_BUS,
+ CLK_CON_DIV_CLKCMU_PERIC0_IP,
+ CLK_CON_DIV_CLKCMU_PERIC1_BUS,
+ CLK_CON_DIV_CLKCMU_PERIC1_IP,
+ CLK_CON_DIV_CLKCMU_PERIS_BUS,
+ CLK_CON_DIV_CLKCMU_SSP_BUS,
+ CLK_CON_DIV_CLKCMU_TNR_BUS,
+ CLK_CON_DIV_CLKCMU_VRA_BUS,
+ CLK_CON_DIV_DIV_CLKCMU_DPU,
+ CLK_CON_DIV_DIV_CLKCMU_DPU_ALT,
+ CLK_CON_DIV_DIV_CLK_CMU_CMUREF,
+ CLK_CON_DIV_PLL_SHARED0_DIV2,
+ CLK_CON_DIV_PLL_SHARED0_DIV3,
+ CLK_CON_DIV_PLL_SHARED0_DIV4,
+ CLK_CON_DIV_PLL_SHARED1_DIV2,
+ CLK_CON_DIV_PLL_SHARED1_DIV3,
+ CLK_CON_DIV_PLL_SHARED1_DIV4,
+ CLK_CON_DIV_PLL_SHARED2_DIV2,
+ CLK_CON_DIV_PLL_SHARED4_DIV2,
+ CLK_CON_DIV_PLL_SHARED4_DIV3,
+ CLK_CON_DIV_PLL_SHARED4_DIV4,
+ CLK_CON_GAT_CLKCMU_G3D_BUS,
+ CLK_CON_GAT_CLKCMU_MIF_SWITCH,
+ CLK_CON_GAT_GATE_CLKCMU_APM_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_AUD_CPU,
+ CLK_CON_GAT_GATE_CLKCMU_BUS0_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_BUS1_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_BUS1_SSS,
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK0,
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK1,
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK2,
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK3,
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK4,
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK5,
+ CLK_CON_GAT_GATE_CLKCMU_CORE_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_CPUCL0_DBG_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_CPUCL0_SWITCH,
+ CLK_CON_GAT_GATE_CLKCMU_CPUCL1_SWITCH,
+ CLK_CON_GAT_GATE_CLKCMU_CPUCL2_BUSP,
+ CLK_CON_GAT_GATE_CLKCMU_CPUCL2_SWITCH,
+ CLK_CON_GAT_GATE_CLKCMU_CSIS_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_CSIS_OIS_MCU,
+ CLK_CON_GAT_GATE_CLKCMU_DNC_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_DNC_BUSM,
+ CLK_CON_GAT_GATE_CLKCMU_DNS_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_DPU,
+ CLK_CON_GAT_GATE_CLKCMU_DPU_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_DSP_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_G2D_G2D,
+ CLK_CON_GAT_GATE_CLKCMU_G2D_MSCL,
+ CLK_CON_GAT_GATE_CLKCMU_G3D_SWITCH,
+ CLK_CON_GAT_GATE_CLKCMU_HPM,
+ CLK_CON_GAT_GATE_CLKCMU_HSI0_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_HSI0_DPGTC,
+ CLK_CON_GAT_GATE_CLKCMU_HSI0_USB31DRD,
+ CLK_CON_GAT_GATE_CLKCMU_HSI0_USBDP_DEBUG,
+ CLK_CON_GAT_GATE_CLKCMU_HSI1_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_HSI1_MMC_CARD,
+ CLK_CON_GAT_GATE_CLKCMU_HSI1_PCIE,
+ CLK_CON_GAT_GATE_CLKCMU_HSI1_UFS_CARD,
+ CLK_CON_GAT_GATE_CLKCMU_HSI1_UFS_EMBD,
+ CLK_CON_GAT_GATE_CLKCMU_HSI2_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_HSI2_PCIE,
+ CLK_CON_GAT_GATE_CLKCMU_IPP_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_ITP_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_MCSC_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_MCSC_GDC,
+ CLK_CON_GAT_GATE_CLKCMU_MFC0_MFC0,
+ CLK_CON_GAT_GATE_CLKCMU_MFC0_WFD,
+ CLK_CON_GAT_GATE_CLKCMU_MIF_BUSP,
+ CLK_CON_GAT_GATE_CLKCMU_NPU_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC0_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC0_IP,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC1_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC1_IP,
+ CLK_CON_GAT_GATE_CLKCMU_PERIS_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_SSP_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_TNR_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_VRA_BUS,
+};
+
+static const struct samsung_pll_clock top_pll_clks[] __initconst = {
+ PLL(pll_0717x, CLK_FOUT_SHARED0_PLL, "fout_shared0_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED0, PLL_CON3_PLL_SHARED0, NULL),
+ PLL(pll_0717x, CLK_FOUT_SHARED1_PLL, "fout_shared1_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED1, PLL_CON3_PLL_SHARED1, NULL),
+ PLL(pll_0718x, CLK_FOUT_SHARED2_PLL, "fout_shared2_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED2, PLL_CON3_PLL_SHARED2, NULL),
+ PLL(pll_0718x, CLK_FOUT_SHARED3_PLL, "fout_shared3_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED3, PLL_CON3_PLL_SHARED3, NULL),
+ PLL(pll_0717x, CLK_FOUT_SHARED4_PLL, "fout_shared4_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED4, PLL_CON3_PLL_SHARED4, NULL),
+ PLL(pll_0732x, CLK_FOUT_MMC_PLL, "fout_mmc_pll", "oscclk",
+ PLL_LOCKTIME_PLL_MMC, PLL_CON3_PLL_MMC, NULL),
+ PLL(pll_0718x, CLK_FOUT_G3D_PLL, "fout_g3d_pll", "oscclk",
+ PLL_LOCKTIME_PLL_G3D, PLL_CON3_PLL_G3D, NULL),
+};
+
+/* Parent clock list for CMU_TOP muxes */
+PNAME(mout_pll_shared0_p) = { "oscclk", "fout_shared0_pll" };
+PNAME(mout_pll_shared1_p) = { "oscclk", "fout_shared1_pll" };
+PNAME(mout_pll_shared2_p) = { "oscclk", "fout_shared2_pll" };
+PNAME(mout_pll_shared3_p) = { "oscclk", "fout_shared3_pll" };
+PNAME(mout_pll_shared4_p) = { "oscclk", "fout_shared4_pll" };
+PNAME(mout_pll_mmc_p) = { "oscclk", "fout_mmc_pll" };
+PNAME(mout_pll_g3d_p) = { "oscclk", "fout_g3d_pll" };
+PNAME(mout_cmu_dpu_bus_p) = { "dout_cmu_dpu",
+ "dout_cmu_dpu_alt" };
+PNAME(mout_cmu_apm_bus_p) = { "dout_cmu_shared0_div2",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_aud_cpu_p) = { "dout_cmu_shared0_div2",
+ "fout_shared2_pll",
+ "dout_cmu_shared4_div2",
+ "dout_cmu_shared0_div4" };
+PNAME(mout_cmu_bus0_bus_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "oscclk" };
+PNAME(mout_cmu_bus1_bus_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "oscclk" };
+PNAME(mout_cmu_bus1_sss_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "oscclk" };
+PNAME(mout_cmu_cis_clk0_p) = { "oscclk",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_cis_clk1_p) = { "oscclk",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_cis_clk2_p) = { "oscclk",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_cis_clk3_p) = { "oscclk",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_cis_clk4_p) = { "oscclk",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_cis_clk5_p) = { "oscclk",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_cmu_boost_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "oscclk" };
+PNAME(mout_cmu_core_bus_p) = { "dout_cmu_shared0_div2",
+ "dout_cmu_shared1_div2",
+ "fout_shared2_pll",
+ "dout_cmu_shared0_div3",
+ "dout_cmu_shared1_div3",
+ "dout_cmu_shared0_div4",
+ "fout_shared3_pll", "oscclk" };
+PNAME(mout_cmu_cpucl0_dbg_bus_p) = { "fout_shared2_pll",
+ "dout_cmu_shared0_div3",
+ "dout_cmu_shared0_div4",
+ "oscclk" };
+PNAME(mout_cmu_cpucl0_switch_p) = { "fout_shared4_pll",
+ "dout_cmu_shared0_div2",
+ "fout_shared2_pll",
+ "dout_cmu_shared0_div4" };
+PNAME(mout_cmu_cpucl1_switch_p) = { "fout_shared4_pll",
+ "dout_cmu_shared0_div2",
+ "fout_shared2_pll",
+ "dout_cmu_shared0_div4" };
+PNAME(mout_cmu_cpucl2_busp_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_cpucl2_switch_p) = { "fout_shared4_pll",
+ "dout_cmu_shared0_div2",
+ "fout_shared2_pll",
+ "dout_cmu_shared0_div4" };
+PNAME(mout_cmu_csis_bus_p) = { "dout_cmu_shared0_div3",
+ "dout_cmu_shared4_div2",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared4_div3" };
+PNAME(mout_cmu_csis_ois_mcu_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_dnc_bus_p) = { "dout_cmu_shared1_div2",
+ "fout_shared2_pll",
+ "dout_cmu_shared4_div2",
+ "dout_cmu_shared0_div4" };
+PNAME(mout_cmu_dnc_busm_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div4" };
+PNAME(mout_cmu_dns_bus_p) = { "dout_cmu_shared0_div3",
+ "dout_cmu_shared4_div2",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared4_div3",
+ "dout_cmu_shared2_div2",
+ "oscclk", "oscclk" };
+PNAME(mout_cmu_dpu_p) = { "dout_cmu_shared0_div3",
+ "dout_cmu_shared0_div4" };
+PNAME(mout_cmu_dpu_alt_p) = { "dout_cmu_shared4_div2",
+ "dout_cmu_shared4_div3",
+ "dout_cmu_shared2_div2",
+ "oscclk" };
+PNAME(mout_cmu_dsp_bus_p) = { "dout_cmu_shared0_div2",
+ "dout_cmu_shared1_div2",
+ "fout_shared2_pll",
+ "dout_cmu_shared4_div2",
+ "fout_shared3_pll", "oscclk",
+ "oscclk", "oscclk" };
+PNAME(mout_cmu_g2d_g2d_p) = { "dout_cmu_shared0_div3",
+ "dout_cmu_shared4_div2",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_g2d_mscl_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div4",
+ "oscclk" };
+PNAME(mout_cmu_hpm_p) = { "oscclk",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "oscclk" };
+PNAME(mout_cmu_hsi0_bus_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_hsi0_dpgtc_p) = { "oscclk", "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "oscclk" };
+PNAME(mout_cmu_hsi0_usb31drd_p) = { "oscclk", "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "oscclk" };
+PNAME(mout_cmu_hsi0_usbdp_debug_p) = { "oscclk", "fout_shared2_pll" };
+PNAME(mout_cmu_hsi1_bus_p) = { "dout_cmu_shared0_div3",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared4_div3",
+ "dout_cmu_shared2_div2",
+ "fout_mmc_pll", "oscclk", "oscclk" };
+PNAME(mout_cmu_hsi1_mmc_card_p) = { "oscclk", "fout_shared2_pll",
+ "fout_mmc_pll",
+ "dout_cmu_shared0_div4" };
+PNAME(mout_cmu_hsi1_pcie_p) = { "oscclk", "fout_shared2_pll" };
+PNAME(mout_cmu_hsi1_ufs_card_p) = { "oscclk", "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "oscclk" };
+PNAME(mout_cmu_hsi1_ufs_embd_p) = { "oscclk", "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "oscclk" };
+PNAME(mout_cmu_hsi2_bus_p) = { "dout_cmu_shared0_div3",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_hsi2_pcie_p) = { "oscclk", "fout_shared2_pll" };
+PNAME(mout_cmu_ipp_bus_p) = { "dout_cmu_shared0_div3",
+ "dout_cmu_shared4_div2",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared4_div3",
+ "oscclk", "oscclk", "oscclk" };
+PNAME(mout_cmu_itp_bus_p) = { "dout_cmu_shared0_div3",
+ "dout_cmu_shared4_div2",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared4_div3",
+ "dout_cmu_shared2_div2",
+ "oscclk", "oscclk" };
+PNAME(mout_cmu_mcsc_bus_p) = { "dout_cmu_shared0_div3",
+ "dout_cmu_shared4_div2",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared4_div3",
+ "dout_cmu_shared2_div2",
+ "oscclk", "oscclk" };
+PNAME(mout_cmu_mcsc_gdc_p) = { "dout_cmu_shared0_div3",
+ "dout_cmu_shared4_div2",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared4_div3",
+ "dout_cmu_shared2_div2",
+ "oscclk", "oscclk" };
+PNAME(mout_cmu_cmu_boost_cpu_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "oscclk" };
+PNAME(mout_cmu_mfc0_mfc0_p) = { "dout_cmu_shared4_div2",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared4_div3",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_mfc0_wfd_p) = { "dout_cmu_shared4_div2",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared4_div3",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_mif_busp_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "oscclk" };
+PNAME(mout_cmu_mif_switch_p) = { "fout_shared0_pll",
+ "fout_shared1_pll",
+ "dout_cmu_shared0_div2",
+ "dout_cmu_shared1_div2",
+ "fout_shared2_pll",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "oscclk" };
+PNAME(mout_cmu_npu_bus_p) = { "dout_cmu_shared0_div2",
+ "dout_cmu_shared1_div2",
+ "fout_shared2_pll",
+ "dout_cmu_shared4_div2",
+ "fout_shared3_pll", "oscclk",
+ "oscclk", "oscclk" };
+PNAME(mout_cmu_peric0_bus_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_peric0_ip_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_peric1_bus_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_peric1_ip_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_peris_bus_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_ssp_bus_p) = { "dout_cmu_shared4_div2",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared4_div3",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_tnr_bus_p) = { "dout_cmu_shared0_div3",
+ "dout_cmu_shared4_div2",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared4_div3",
+ "dout_cmu_shared2_div2",
+ "oscclk", "oscclk" };
+PNAME(mout_cmu_vra_bus_p) = { "dout_cmu_shared0_div3",
+ "dout_cmu_shared4_div2",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared4_div3" };
+PNAME(mout_cmu_cmuref_p) = { "oscclk",
+ "dout_cmu_clk_cmuref" };
+PNAME(mout_cmu_clk_cmuref_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "oscclk" };
+
+/*
+ * Register name to clock name mangling strategy used in this file
+ *
+ * Replace PLL_CON{0,3}_PLL with CLK_MOUT_PLL and mout_pll
+ * Replace CLK_CON_MUX_MUX_CLKCMU with CLK_MOUT_CMU and mout_cmu
+ * Replace CLK_CON_DIV_CLKCMU with CLK_DOUT_CMU_CMU and dout_cmu_cmu
+ * Replace CLK_CON_DIV_DIV_CLKCMU with CLK_DOUT_CMU_CMU and dout_cmu_cmu
+ * Replace CLK_CON_DIV_PLL_CLKCMU with CLK_DOUT_CMU_CMU and dout_cmu_cmu
+ * Replace CLK_CON_GAT_CLKCMU with CLK_GOUT_CMU and gout_cmu
+ * Replace CLK_CON_GAT_GATE_CLKCMU with CLK_GOUT_CMU and gout_cmu
+ *
+ * For gates remove _UID _BLK _IPCLKPORT, _I and _RSTNSYNC
+ */
+
+static const struct samsung_mux_clock top_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_PLL_SHARED0, "mout_pll_shared0", mout_pll_shared0_p,
+ PLL_CON0_PLL_SHARED0, 4, 1),
+ MUX(CLK_MOUT_PLL_SHARED1, "mout_pll_shared1", mout_pll_shared1_p,
+ PLL_CON0_PLL_SHARED1, 4, 1),
+ MUX(CLK_MOUT_PLL_SHARED2, "mout_pll_shared2", mout_pll_shared2_p,
+ PLL_CON0_PLL_SHARED2, 4, 1),
+ MUX(CLK_MOUT_PLL_SHARED3, "mout_pll_shared3", mout_pll_shared3_p,
+ PLL_CON0_PLL_SHARED3, 4, 1),
+ MUX(CLK_MOUT_PLL_SHARED4, "mout_pll_shared4", mout_pll_shared4_p,
+ PLL_CON0_PLL_SHARED4, 4, 1),
+ MUX(CLK_MOUT_PLL_MMC, "mout_pll_mmc", mout_pll_mmc_p,
+ PLL_CON0_PLL_MMC, 4, 1),
+ MUX(CLK_MOUT_PLL_G3D, "mout_pll_g3d", mout_pll_g3d_p,
+ PLL_CON0_PLL_G3D, 4, 1),
+ MUX(CLK_MOUT_CMU_DPU_BUS, "mout_cmu_dpu_bus",
+ mout_cmu_dpu_bus_p, CLK_CON_MUX_CLKCMU_DPU_BUS, 0, 1),
+ MUX(CLK_MOUT_CMU_APM_BUS, "mout_cmu_apm_bus",
+ mout_cmu_apm_bus_p, CLK_CON_MUX_MUX_CLKCMU_APM_BUS, 0, 1),
+ MUX(CLK_MOUT_CMU_AUD_CPU, "mout_cmu_aud_cpu",
+ mout_cmu_aud_cpu_p, CLK_CON_MUX_MUX_CLKCMU_AUD_CPU, 0, 2),
+ MUX(CLK_MOUT_CMU_BUS0_BUS, "mout_cmu_bus0_bus",
+ mout_cmu_bus0_bus_p, CLK_CON_MUX_MUX_CLKCMU_BUS0_BUS, 0, 2),
+ MUX(CLK_MOUT_CMU_BUS1_BUS, "mout_cmu_bus1_bus",
+ mout_cmu_bus1_bus_p, CLK_CON_MUX_MUX_CLKCMU_BUS1_BUS, 0, 2),
+ MUX(CLK_MOUT_CMU_BUS1_SSS, "mout_cmu_bus1_sss",
+ mout_cmu_bus1_sss_p, CLK_CON_MUX_MUX_CLKCMU_BUS1_SSS, 0, 2),
+ MUX(CLK_MOUT_CMU_CIS_CLK0, "mout_cmu_cis_clk0",
+ mout_cmu_cis_clk0_p, CLK_CON_MUX_MUX_CLKCMU_CIS_CLK0, 0, 1),
+ MUX(CLK_MOUT_CMU_CIS_CLK1, "mout_cmu_cis_clk1",
+ mout_cmu_cis_clk1_p, CLK_CON_MUX_MUX_CLKCMU_CIS_CLK1, 0, 1),
+ MUX(CLK_MOUT_CMU_CIS_CLK2, "mout_cmu_cis_clk2",
+ mout_cmu_cis_clk2_p, CLK_CON_MUX_MUX_CLKCMU_CIS_CLK2, 0, 1),
+ MUX(CLK_MOUT_CMU_CIS_CLK3, "mout_cmu_cis_clk3",
+ mout_cmu_cis_clk3_p, CLK_CON_MUX_MUX_CLKCMU_CIS_CLK3, 0, 1),
+ MUX(CLK_MOUT_CMU_CIS_CLK4, "mout_cmu_cis_clk4",
+ mout_cmu_cis_clk4_p, CLK_CON_MUX_MUX_CLKCMU_CIS_CLK4, 0, 1),
+ MUX(CLK_MOUT_CMU_CIS_CLK5, "mout_cmu_cis_clk5",
+ mout_cmu_cis_clk5_p, CLK_CON_MUX_MUX_CLKCMU_CIS_CLK5, 0, 1),
+ MUX(CLK_MOUT_CMU_CMU_BOOST, "mout_cmu_cmu_boost",
+ mout_cmu_cmu_boost_p, CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST, 0, 2),
+ MUX(CLK_MOUT_CMU_CORE_BUS, "mout_cmu_core_bus",
+ mout_cmu_core_bus_p, CLK_CON_MUX_MUX_CLKCMU_CORE_BUS, 0, 3),
+ MUX(CLK_MOUT_CMU_CPUCL0_DBG_BUS, "mout_cmu_cpucl0_dbg_bus",
+ mout_cmu_cpucl0_dbg_bus_p, CLK_CON_MUX_MUX_CLKCMU_CPUCL0_DBG_BUS,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_CPUCL0_SWITCH, "mout_cmu_cpucl0_switch",
+ mout_cmu_cpucl0_switch_p, CLK_CON_MUX_MUX_CLKCMU_CPUCL0_SWITCH,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_CPUCL1_SWITCH, "mout_cmu_cpucl1_switch",
+ mout_cmu_cpucl1_switch_p, CLK_CON_MUX_MUX_CLKCMU_CPUCL1_SWITCH,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_CPUCL2_BUSP, "mout_cmu_cpucl2_busp",
+ mout_cmu_cpucl2_busp_p, CLK_CON_MUX_MUX_CLKCMU_CPUCL2_BUSP,
+ 0, 1),
+ MUX(CLK_MOUT_CMU_CPUCL2_SWITCH, "mout_cmu_cpucl2_switch",
+ mout_cmu_cpucl2_switch_p, CLK_CON_MUX_MUX_CLKCMU_CPUCL2_SWITCH,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_CSIS_BUS, "mout_cmu_csis_bus",
+ mout_cmu_csis_bus_p, CLK_CON_MUX_MUX_CLKCMU_CSIS_BUS, 0, 2),
+ MUX(CLK_MOUT_CMU_CSIS_OIS_MCU, "mout_cmu_csis_ois_mcu",
+ mout_cmu_csis_ois_mcu_p, CLK_CON_MUX_MUX_CLKCMU_CSIS_OIS_MCU,
+ 0, 1),
+ MUX(CLK_MOUT_CMU_DNC_BUS, "mout_cmu_dnc_bus",
+ mout_cmu_dnc_bus_p, CLK_CON_MUX_MUX_CLKCMU_DNC_BUS, 0, 2),
+ MUX(CLK_MOUT_CMU_DNC_BUSM, "mout_cmu_dnc_busm",
+ mout_cmu_dnc_busm_p, CLK_CON_MUX_MUX_CLKCMU_DNC_BUSM, 0, 2),
+ MUX(CLK_MOUT_CMU_DNS_BUS, "mout_cmu_dns_bus",
+ mout_cmu_dns_bus_p, CLK_CON_MUX_MUX_CLKCMU_DNS_BUS, 0, 3),
+ MUX(CLK_MOUT_CMU_DPU, "mout_cmu_dpu",
+ mout_cmu_dpu_p, CLK_CON_MUX_MUX_CLKCMU_DPU, 0, 1),
+ MUX(CLK_MOUT_CMU_DPU_ALT, "mout_cmu_dpu_alt",
+ mout_cmu_dpu_alt_p, CLK_CON_MUX_MUX_CLKCMU_DPU_ALT, 0, 2),
+ MUX(CLK_MOUT_CMU_DSP_BUS, "mout_cmu_dsp_bus",
+ mout_cmu_dsp_bus_p, CLK_CON_MUX_MUX_CLKCMU_DSP_BUS, 0, 3),
+ MUX(CLK_MOUT_CMU_G2D_G2D, "mout_cmu_g2d_g2d",
+ mout_cmu_g2d_g2d_p, CLK_CON_MUX_MUX_CLKCMU_G2D_G2D, 0, 2),
+ MUX(CLK_MOUT_CMU_G2D_MSCL, "mout_cmu_g2d_mscl",
+ mout_cmu_g2d_mscl_p, CLK_CON_MUX_MUX_CLKCMU_G2D_MSCL, 0, 2),
+ MUX(CLK_MOUT_CMU_HPM, "mout_cmu_hpm",
+ mout_cmu_hpm_p, CLK_CON_MUX_MUX_CLKCMU_HPM, 0, 2),
+ MUX(CLK_MOUT_CMU_HSI0_BUS, "mout_cmu_hsi0_bus",
+ mout_cmu_hsi0_bus_p, CLK_CON_MUX_MUX_CLKCMU_HSI0_BUS, 0, 1),
+ MUX(CLK_MOUT_CMU_HSI0_DPGTC, "mout_cmu_hsi0_dpgtc",
+ mout_cmu_hsi0_dpgtc_p, CLK_CON_MUX_MUX_CLKCMU_HSI0_DPGTC, 0, 2),
+ MUX(CLK_MOUT_CMU_HSI0_USB31DRD, "mout_cmu_hsi0_usb31drd",
+ mout_cmu_hsi0_usb31drd_p, CLK_CON_MUX_MUX_CLKCMU_HSI0_USB31DRD,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_HSI0_USBDP_DEBUG, "mout_cmu_hsi0_usbdp_debug",
+ mout_cmu_hsi0_usbdp_debug_p,
+ CLK_CON_MUX_MUX_CLKCMU_HSI0_USBDP_DEBUG, 0, 1),
+ MUX(CLK_MOUT_CMU_HSI1_BUS, "mout_cmu_hsi1_bus",
+ mout_cmu_hsi1_bus_p, CLK_CON_MUX_MUX_CLKCMU_HSI1_BUS, 0, 3),
+ MUX(CLK_MOUT_CMU_HSI1_MMC_CARD, "mout_cmu_hsi1_mmc_card",
+ mout_cmu_hsi1_mmc_card_p, CLK_CON_MUX_MUX_CLKCMU_HSI1_MMC_CARD,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_HSI1_PCIE, "mout_cmu_hsi1_pcie",
+ mout_cmu_hsi1_pcie_p, CLK_CON_MUX_MUX_CLKCMU_HSI1_PCIE, 0, 1),
+ MUX(CLK_MOUT_CMU_HSI1_UFS_CARD, "mout_cmu_hsi1_ufs_card",
+ mout_cmu_hsi1_ufs_card_p, CLK_CON_MUX_MUX_CLKCMU_HSI1_UFS_CARD,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_HSI1_UFS_EMBD, "mout_cmu_hsi1_ufs_embd",
+ mout_cmu_hsi1_ufs_embd_p, CLK_CON_MUX_MUX_CLKCMU_HSI1_UFS_EMBD,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_HSI2_BUS, "mout_cmu_hsi2_bus",
+ mout_cmu_hsi2_bus_p, CLK_CON_MUX_MUX_CLKCMU_HSI2_BUS, 0, 1),
+ MUX(CLK_MOUT_CMU_HSI2_PCIE, "mout_cmu_hsi2_pcie",
+ mout_cmu_hsi2_pcie_p, CLK_CON_MUX_MUX_CLKCMU_HSI2_PCIE, 0, 1),
+ MUX(CLK_MOUT_CMU_IPP_BUS, "mout_cmu_ipp_bus",
+ mout_cmu_ipp_bus_p, CLK_CON_MUX_MUX_CLKCMU_IPP_BUS, 0, 3),
+ MUX(CLK_MOUT_CMU_ITP_BUS, "mout_cmu_itp_bus",
+ mout_cmu_itp_bus_p, CLK_CON_MUX_MUX_CLKCMU_ITP_BUS, 0, 3),
+ MUX(CLK_MOUT_CMU_MCSC_BUS, "mout_cmu_mcsc_bus",
+ mout_cmu_mcsc_bus_p, CLK_CON_MUX_MUX_CLKCMU_MCSC_BUS, 0, 3),
+ MUX(CLK_MOUT_CMU_MCSC_GDC, "mout_cmu_mcsc_gdc",
+ mout_cmu_mcsc_gdc_p, CLK_CON_MUX_MUX_CLKCMU_MCSC_GDC, 0, 3),
+ MUX(CLK_MOUT_CMU_CMU_BOOST_CPU, "mout_cmu_cmu_boost_cpu",
+ mout_cmu_cmu_boost_cpu_p, CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST_CPU,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_MFC0_MFC0, "mout_cmu_mfc0_mfc0",
+ mout_cmu_mfc0_mfc0_p, CLK_CON_MUX_MUX_CLKCMU_MFC0_MFC0, 0, 2),
+ MUX(CLK_MOUT_CMU_MFC0_WFD, "mout_cmu_mfc0_wfd",
+ mout_cmu_mfc0_wfd_p, CLK_CON_MUX_MUX_CLKCMU_MFC0_WFD, 0, 2),
+ MUX(CLK_MOUT_CMU_MIF_BUSP, "mout_cmu_mif_busp",
+ mout_cmu_mif_busp_p, CLK_CON_MUX_MUX_CLKCMU_MIF_BUSP, 0, 2),
+ MUX(CLK_MOUT_CMU_MIF_SWITCH, "mout_cmu_mif_switch",
+ mout_cmu_mif_switch_p, CLK_CON_MUX_MUX_CLKCMU_MIF_SWITCH, 0, 3),
+ MUX(CLK_MOUT_CMU_NPU_BUS, "mout_cmu_npu_bus",
+ mout_cmu_npu_bus_p, CLK_CON_MUX_MUX_CLKCMU_NPU_BUS, 0, 3),
+ MUX(CLK_MOUT_CMU_PERIC0_BUS, "mout_cmu_peric0_bus",
+ mout_cmu_peric0_bus_p, CLK_CON_MUX_MUX_CLKCMU_PERIC0_BUS, 0, 1),
+ MUX(CLK_MOUT_CMU_PERIC0_IP, "mout_cmu_peric0_ip",
+ mout_cmu_peric0_ip_p, CLK_CON_MUX_MUX_CLKCMU_PERIC0_IP, 0, 1),
+ MUX(CLK_MOUT_CMU_PERIC1_BUS, "mout_cmu_peric1_bus",
+ mout_cmu_peric1_bus_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_BUS, 0, 1),
+ MUX(CLK_MOUT_CMU_PERIC1_IP, "mout_cmu_peric1_ip",
+ mout_cmu_peric1_ip_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_IP, 0, 1),
+ MUX(CLK_MOUT_CMU_PERIS_BUS, "mout_cmu_peris_bus",
+ mout_cmu_peris_bus_p, CLK_CON_MUX_MUX_CLKCMU_PERIS_BUS, 0, 1),
+ MUX(CLK_MOUT_CMU_SSP_BUS, "mout_cmu_ssp_bus",
+ mout_cmu_ssp_bus_p, CLK_CON_MUX_MUX_CLKCMU_SSP_BUS, 0, 2),
+ MUX(CLK_MOUT_CMU_TNR_BUS, "mout_cmu_tnr_bus",
+ mout_cmu_tnr_bus_p, CLK_CON_MUX_MUX_CLKCMU_TNR_BUS, 0, 3),
+ MUX(CLK_MOUT_CMU_VRA_BUS, "mout_cmu_vra_bus",
+ mout_cmu_vra_bus_p, CLK_CON_MUX_MUX_CLKCMU_VRA_BUS, 0, 2),
+ MUX(CLK_MOUT_CMU_CMUREF, "mout_cmu_cmuref",
+ mout_cmu_cmuref_p, CLK_CON_MUX_MUX_CMU_CMUREF, 0, 1),
+ MUX(CLK_MOUT_CMU_CLK_CMUREF, "mout_cmu_clk_cmuref",
+ mout_cmu_clk_cmuref_p, CLK_CON_MUX_MUX_CLK_CMU_CMUREF, 0, 2),
+};
+
+static const struct samsung_div_clock top_div_clks[] __initconst = {
+ /* SHARED0 region*/
+ DIV(CLK_DOUT_CMU_SHARED0_DIV2, "dout_cmu_shared0_div2", "mout_pll_shared0",
+ CLK_CON_DIV_PLL_SHARED0_DIV2, 0, 1),
+ DIV(CLK_DOUT_CMU_SHARED0_DIV3, "dout_cmu_shared0_div3", "mout_pll_shared0",
+ CLK_CON_DIV_PLL_SHARED0_DIV3, 0, 2),
+ DIV(CLK_DOUT_CMU_SHARED0_DIV4, "dout_cmu_shared0_div4", "dout_cmu_shared0_div2",
+ CLK_CON_DIV_PLL_SHARED0_DIV4, 0, 1),
+
+ /* SHARED1 region*/
+ DIV(CLK_DOUT_CMU_SHARED1_DIV2, "dout_cmu_shared1_div2", "mout_pll_shared1",
+ CLK_CON_DIV_PLL_SHARED1_DIV2, 0, 1),
+ DIV(CLK_DOUT_CMU_SHARED1_DIV3, "dout_cmu_shared1_div3", "mout_pll_shared1",
+ CLK_CON_DIV_PLL_SHARED1_DIV3, 0, 2),
+ DIV(CLK_DOUT_CMU_SHARED1_DIV4, "dout_cmu_shared1_div4", "dout_cmu_shared1_div2",
+ CLK_CON_DIV_PLL_SHARED1_DIV4, 0, 1),
+
+ /* SHARED2 region */
+ DIV(CLK_DOUT_CMU_SHARED2_DIV2, "dout_cmu_shared2_div2", "mout_pll_shared2",
+ CLK_CON_DIV_PLL_SHARED2_DIV2, 0, 1),
+
+ /* SHARED4 region*/
+ DIV(CLK_DOUT_CMU_SHARED4_DIV2, "dout_cmu_shared4_div2", "mout_pll_shared4",
+ CLK_CON_DIV_PLL_SHARED4_DIV2, 0, 1),
+ DIV(CLK_DOUT_CMU_SHARED4_DIV3, "dout_cmu_shared4_div3", "mout_pll_shared4",
+ CLK_CON_DIV_PLL_SHARED4_DIV3, 0, 2),
+ DIV(CLK_DOUT_CMU_SHARED4_DIV4, "dout_cmu_shared4_div4", "mout_pll_shared4",
+ CLK_CON_DIV_PLL_SHARED4_DIV4, 0, 1),
+
+ DIV(CLK_DOUT_CMU_APM_BUS, "dout_cmu_apm_bus", "gout_cmu_apm_bus",
+ CLK_CON_DIV_CLKCMU_APM_BUS, 0, 2),
+ DIV(CLK_DOUT_CMU_AUD_CPU, "dout_cmu_aud_cpu", "gout_cmu_aud_cpu",
+ CLK_CON_DIV_CLKCMU_AUD_CPU, 0, 3),
+ DIV(CLK_DOUT_CMU_BUS0_BUS, "dout_cmu_bus0_bus", "gout_cmu_bus0_bus",
+ CLK_CON_DIV_CLKCMU_BUS0_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_BUS1_BUS, "dout_cmu_bus1_bus", "gout_cmu_bus1_bus",
+ CLK_CON_DIV_CLKCMU_BUS1_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_BUS1_SSS, "dout_cmu_bus1_sss", "gout_cmu_bus1_sss",
+ CLK_CON_DIV_CLKCMU_BUS1_SSS, 0, 4),
+ DIV(CLK_DOUT_CMU_CIS_CLK0, "dout_cmu_cis_clk0", "gout_cmu_cis_clk0",
+ CLK_CON_DIV_CLKCMU_CIS_CLK0, 0, 5),
+ DIV(CLK_DOUT_CMU_CIS_CLK1, "dout_cmu_cis_clk1", "gout_cmu_cis_clk1",
+ CLK_CON_DIV_CLKCMU_CIS_CLK1, 0, 5),
+ DIV(CLK_DOUT_CMU_CIS_CLK2, "dout_cmu_cis_clk2", "gout_cmu_cis_clk2",
+ CLK_CON_DIV_CLKCMU_CIS_CLK2, 0, 5),
+ DIV(CLK_DOUT_CMU_CIS_CLK3, "dout_cmu_cis_clk3", "gout_cmu_cis_clk3",
+ CLK_CON_DIV_CLKCMU_CIS_CLK3, 0, 5),
+ DIV(CLK_DOUT_CMU_CIS_CLK4, "dout_cmu_cis_clk4", "gout_cmu_cis_clk4",
+ CLK_CON_DIV_CLKCMU_CIS_CLK4, 0, 5),
+ DIV(CLK_DOUT_CMU_CIS_CLK5, "dout_cmu_cis_clk5", "gout_cmu_cis_clk5",
+ CLK_CON_DIV_CLKCMU_CIS_CLK5, 0, 5),
+ DIV(CLK_DOUT_CMU_CMU_BOOST, "dout_cmu_cmu_boost", "mout_cmu_cmu_boost",
+ CLK_CON_DIV_CLKCMU_CMU_BOOST, 0, 2),
+ DIV(CLK_DOUT_CMU_CORE_BUS, "dout_cmu_core_bus", "gout_cmu_core_bus",
+ CLK_CON_DIV_CLKCMU_CORE_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_CPUCL0_DBG_BUS, "dout_cmu_cpucl0_dbg_bus",
+ "gout_cmu_cpucl0_dbg_bus", CLK_CON_DIV_CLKCMU_CPUCL0_DBG_BUS,
+ 0, 4),
+ DIV(CLK_DOUT_CMU_CPUCL0_SWITCH, "dout_cmu_cpucl0_switch",
+ "gout_cmu_cpucl0_switch", CLK_CON_DIV_CLKCMU_CPUCL0_SWITCH, 0, 3),
+ DIV(CLK_DOUT_CMU_CPUCL1_SWITCH, "dout_cmu_cpucl1_switch",
+ "gout_cmu_cpucl1_switch", CLK_CON_DIV_CLKCMU_CPUCL1_SWITCH, 0, 3),
+ DIV(CLK_DOUT_CMU_CPUCL2_BUSP, "dout_cmu_cpucl2_busp",
+ "gout_cmu_cpucl2_busp", CLK_CON_DIV_CLKCMU_CPUCL2_BUSP, 0, 4),
+ DIV(CLK_DOUT_CMU_CPUCL2_SWITCH, "dout_cmu_cpucl2_switch",
+ "gout_cmu_cpucl2_switch", CLK_CON_DIV_CLKCMU_CPUCL2_SWITCH, 0, 3),
+ DIV(CLK_DOUT_CMU_CSIS_BUS, "dout_cmu_csis_bus", "gout_cmu_csis_bus",
+ CLK_CON_DIV_CLKCMU_CSIS_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_CSIS_OIS_MCU, "dout_cmu_csis_ois_mcu",
+ "gout_cmu_csis_ois_mcu", CLK_CON_DIV_CLKCMU_CSIS_OIS_MCU, 0, 4),
+ DIV(CLK_DOUT_CMU_DNC_BUS, "dout_cmu_dnc_bus", "gout_cmu_dnc_bus",
+ CLK_CON_DIV_CLKCMU_DNC_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_DNC_BUSM, "dout_cmu_dnc_busm", "gout_cmu_dnc_busm",
+ CLK_CON_DIV_CLKCMU_DNC_BUSM, 0, 4),
+ DIV(CLK_DOUT_CMU_DNS_BUS, "dout_cmu_dns_bus", "gout_cmu_dns_bus",
+ CLK_CON_DIV_CLKCMU_DNS_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_DSP_BUS, "dout_cmu_dsp_bus", "gout_cmu_dsp_bus",
+ CLK_CON_DIV_CLKCMU_DSP_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_G2D_G2D, "dout_cmu_g2d_g2d", "gout_cmu_g2d_g2d",
+ CLK_CON_DIV_CLKCMU_G2D_G2D, 0, 4),
+ DIV(CLK_DOUT_CMU_G2D_MSCL, "dout_cmu_g2d_mscl", "gout_cmu_g2d_mscl",
+ CLK_CON_DIV_CLKCMU_G2D_MSCL, 0, 4),
+ DIV(CLK_DOUT_CMU_G3D_SWITCH, "dout_cmu_g3d_switch",
+ "gout_cmu_g3d_switch", CLK_CON_DIV_CLKCMU_G3D_SWITCH, 0, 3),
+ DIV(CLK_DOUT_CMU_HPM, "dout_cmu_hpm", "gout_cmu_hpm",
+ CLK_CON_DIV_CLKCMU_HPM, 0, 2),
+ DIV(CLK_DOUT_CMU_HSI0_BUS, "dout_cmu_hsi0_bus", "gout_cmu_hsi0_bus",
+ CLK_CON_DIV_CLKCMU_HSI0_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_HSI0_DPGTC, "dout_cmu_hsi0_dpgtc", "gout_cmu_hsi0_dpgtc",
+ CLK_CON_DIV_CLKCMU_HSI0_DPGTC, 0, 3),
+ DIV(CLK_DOUT_CMU_HSI0_USB31DRD, "dout_cmu_hsi0_usb31drd",
+ "gout_cmu_hsi0_usb31drd", CLK_CON_DIV_CLKCMU_HSI0_USB31DRD, 0, 4),
+ DIV(CLK_DOUT_CMU_HSI1_BUS, "dout_cmu_hsi1_bus", "gout_cmu_hsi1_bus",
+ CLK_CON_DIV_CLKCMU_HSI1_BUS, 0, 3),
+ DIV(CLK_DOUT_CMU_HSI1_MMC_CARD, "dout_cmu_hsi1_mmc_card",
+ "gout_cmu_hsi1_mmc_card", CLK_CON_DIV_CLKCMU_HSI1_MMC_CARD,
+ 0, 9),
+ DIV(CLK_DOUT_CMU_HSI1_UFS_CARD, "dout_cmu_hsi1_ufs_card",
+ "gout_cmu_hsi1_ufs_card", CLK_CON_DIV_CLKCMU_HSI1_UFS_CARD,
+ 0, 3),
+ DIV(CLK_DOUT_CMU_HSI1_UFS_EMBD, "dout_cmu_hsi1_ufs_embd",
+ "gout_cmu_hsi1_ufs_embd", CLK_CON_DIV_CLKCMU_HSI1_UFS_EMBD,
+ 0, 3),
+ DIV(CLK_DOUT_CMU_HSI2_BUS, "dout_cmu_hsi2_bus", "gout_cmu_hsi2_bus",
+ CLK_CON_DIV_CLKCMU_HSI2_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_IPP_BUS, "dout_cmu_ipp_bus", "gout_cmu_ipp_bus",
+ CLK_CON_DIV_CLKCMU_IPP_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_ITP_BUS, "dout_cmu_itp_bus", "gout_cmu_itp_bus",
+ CLK_CON_DIV_CLKCMU_ITP_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_MCSC_BUS, "dout_cmu_mcsc_bus", "gout_cmu_mcsc_bus",
+ CLK_CON_DIV_CLKCMU_MCSC_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_MCSC_GDC, "dout_cmu_mcsc_gdc", "gout_cmu_mcsc_gdc",
+ CLK_CON_DIV_CLKCMU_MCSC_GDC, 0, 4),
+ DIV(CLK_DOUT_CMU_CMU_BOOST_CPU, "dout_cmu_cmu_boost_cpu",
+ "mout_cmu_cmu_boost_cpu", CLK_CON_DIV_CLKCMU_CMU_BOOST_CPU,
+ 0, 2),
+ DIV(CLK_DOUT_CMU_MFC0_MFC0, "dout_cmu_mfc0_mfc0", "gout_cmu_mfc0_mfc0",
+ CLK_CON_DIV_CLKCMU_MFC0_MFC0, 0, 4),
+ DIV(CLK_DOUT_CMU_MFC0_WFD, "dout_cmu_mfc0_wfd", "gout_cmu_mfc0_wfd",
+ CLK_CON_DIV_CLKCMU_MFC0_WFD, 0, 4),
+ DIV(CLK_DOUT_CMU_MIF_BUSP, "dout_cmu_mif_busp", "gout_cmu_mif_busp",
+ CLK_CON_DIV_CLKCMU_MIF_BUSP, 0, 4),
+ DIV(CLK_DOUT_CMU_NPU_BUS, "dout_cmu_npu_bus", "gout_cmu_npu_bus",
+ CLK_CON_DIV_CLKCMU_NPU_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC0_BUS, "dout_cmu_peric0_bus", "gout_cmu_peric0_bus",
+ CLK_CON_DIV_CLKCMU_PERIC0_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC0_IP, "dout_cmu_peric0_ip", "gout_cmu_peric0_ip",
+ CLK_CON_DIV_CLKCMU_PERIC0_IP, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC1_BUS, "dout_cmu_peric1_bus", "gout_cmu_peric1_bus",
+ CLK_CON_DIV_CLKCMU_PERIC1_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC1_IP, "dout_cmu_peric1_ip", "gout_cmu_peric1_ip",
+ CLK_CON_DIV_CLKCMU_PERIC1_IP, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIS_BUS, "dout_cmu_peris_bus", "gout_cmu_peris_bus",
+ CLK_CON_DIV_CLKCMU_PERIS_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_SSP_BUS, "dout_cmu_ssp_bus", "gout_cmu_ssp_bus",
+ CLK_CON_DIV_CLKCMU_SSP_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_TNR_BUS, "dout_cmu_tnr_bus", "gout_cmu_tnr_bus",
+ CLK_CON_DIV_CLKCMU_TNR_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_VRA_BUS, "dout_cmu_vra_bus", "gout_cmu_vra_bus",
+ CLK_CON_DIV_CLKCMU_VRA_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_DPU, "dout_cmu_dpu", "gout_cmu_dpu",
+ CLK_CON_DIV_DIV_CLKCMU_DPU, 0, 3),
+ DIV(CLK_DOUT_CMU_DPU_ALT, "dout_cmu_dpu_alt", "gout_cmu_dpu_bus",
+ CLK_CON_DIV_DIV_CLKCMU_DPU_ALT, 0, 4),
+ DIV(CLK_DOUT_CMU_CLK_CMUREF, "dout_cmu_clk_cmuref", "mout_cmu_clk_cmuref",
+ CLK_CON_DIV_DIV_CLK_CMU_CMUREF, 0, 2),
+};
+
+static const struct samsung_fixed_factor_clock cmu_top_ffactor[] __initconst = {
+ FFACTOR(CLK_DOUT_CMU_HSI1_PCIE, "dout_cmu_hsi1_pcie",
+ "gout_cmu_hsi1_pcie", 1, 8, 0),
+ FFACTOR(CLK_DOUT_CMU_OTP, "dout_cmu_otp", "oscclk", 1, 8, 0),
+ FFACTOR(CLK_DOUT_CMU_HSI0_USBDP_DEBUG, "dout_cmu_hsi0_usbdp_debug",
+ "gout_cmu_hsi0_usbdp_debug", 1, 8, 0),
+ FFACTOR(CLK_DOUT_CMU_HSI2_PCIE, "dout_cmu_hsi2_pcie",
+ "gout_cmu_hsi2_pcie", 1, 8, 0),
+};
+
+static const struct samsung_gate_clock top_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_CMU_APM_BUS, "gout_cmu_apm_bus", "mout_cmu_apm_bus",
+ CLK_CON_GAT_GATE_CLKCMU_APM_BUS, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMU_AUD_CPU, "gout_cmu_aud_cpu", "mout_cmu_aud_cpu",
+ CLK_CON_GAT_GATE_CLKCMU_AUD_CPU, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_BUS0_BUS, "gout_cmu_bus0_bus", "mout_cmu_bus0_bus",
+ CLK_CON_GAT_GATE_CLKCMU_BUS0_BUS, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMU_BUS1_BUS, "gout_cmu_bus1_bus", "mout_cmu_bus1_bus",
+ CLK_CON_GAT_GATE_CLKCMU_BUS1_BUS, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMU_BUS1_SSS, "gout_cmu_bus1_sss", "mout_cmu_bus1_sss",
+ CLK_CON_GAT_GATE_CLKCMU_BUS1_SSS, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMU_CIS_CLK0, "gout_cmu_cis_clk0", "mout_cmu_cis_clk0",
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK0, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CIS_CLK1, "gout_cmu_cis_clk1", "mout_cmu_cis_clk1",
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK1, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CIS_CLK2, "gout_cmu_cis_clk2", "mout_cmu_cis_clk2",
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK2, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CIS_CLK3, "gout_cmu_cis_clk3", "mout_cmu_cis_clk3",
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK3, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CIS_CLK4, "gout_cmu_cis_clk4", "mout_cmu_cis_clk4",
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK4, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CIS_CLK5, "gout_cmu_cis_clk5", "mout_cmu_cis_clk5",
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK5, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CORE_BUS, "gout_cmu_core_bus", "mout_cmu_core_bus",
+ CLK_CON_GAT_GATE_CLKCMU_CORE_BUS, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMU_CPUCL0_DBG_BUS, "gout_cmu_cpucl0_dbg_bus",
+ "mout_cmu_cpucl0_dbg_bus", CLK_CON_GAT_GATE_CLKCMU_CPUCL0_DBG_BUS,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CPUCL0_SWITCH, "gout_cmu_cpucl0_switch",
+ "mout_cmu_cpucl0_switch", CLK_CON_GAT_GATE_CLKCMU_CPUCL0_SWITCH,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMU_CPUCL1_SWITCH, "gout_cmu_cpucl1_switch",
+ "mout_cmu_cpucl1_switch", CLK_CON_GAT_GATE_CLKCMU_CPUCL1_SWITCH,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMU_CPUCL2_BUSP, "gout_cmu_cpucl2_busp",
+ "mout_cmu_cpucl2_busp", CLK_CON_GAT_GATE_CLKCMU_CPUCL2_BUSP,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMU_CPUCL2_SWITCH, "gout_cmu_cpucl2_switch",
+ "mout_cmu_cpucl2_switch", CLK_CON_GAT_GATE_CLKCMU_CPUCL2_SWITCH,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMU_CSIS_BUS, "gout_cmu_csis_bus", "mout_cmu_csis_bus",
+ CLK_CON_GAT_GATE_CLKCMU_CSIS_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CSIS_OIS_MCU, "gout_cmu_csis_ois_mcu",
+ "mout_cmu_csis_ois_mcu", CLK_CON_GAT_GATE_CLKCMU_CSIS_OIS_MCU,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_DNC_BUS, "gout_cmu_dnc_bus", "mout_cmu_dnc_bus",
+ CLK_CON_GAT_GATE_CLKCMU_DNC_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_DNC_BUSM, "gout_cmu_dnc_busm", "mout_cmu_dnc_busm",
+ CLK_CON_GAT_GATE_CLKCMU_DNC_BUSM, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_DNS_BUS, "gout_cmu_dns_bus", "mout_cmu_dns_bus",
+ CLK_CON_GAT_GATE_CLKCMU_DNS_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_DPU, "gout_cmu_dpu", "mout_cmu_dpu",
+ CLK_CON_GAT_GATE_CLKCMU_DPU, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_DPU_BUS, "gout_cmu_dpu_bus", "mout_cmu_dpu_alt",
+ CLK_CON_GAT_GATE_CLKCMU_DPU_BUS, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMU_DSP_BUS, "gout_cmu_dsp_bus", "mout_cmu_dsp_bus",
+ CLK_CON_GAT_GATE_CLKCMU_DSP_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_G2D_G2D, "gout_cmu_g2d_g2d", "mout_cmu_g2d_g2d",
+ CLK_CON_GAT_GATE_CLKCMU_G2D_G2D, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_G2D_MSCL, "gout_cmu_g2d_mscl", "mout_cmu_g2d_mscl",
+ CLK_CON_GAT_GATE_CLKCMU_G2D_MSCL, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_G3D_SWITCH, "gout_cmu_g3d_switch",
+ "fout_shared2_pll", CLK_CON_GAT_GATE_CLKCMU_G3D_SWITCH,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_HPM, "gout_cmu_hpm", "mout_cmu_hpm",
+ CLK_CON_GAT_GATE_CLKCMU_HPM, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_HSI0_BUS, "gout_cmu_hsi0_bus",
+ "mout_cmu_hsi0_bus", CLK_CON_GAT_GATE_CLKCMU_HSI0_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_HSI0_DPGTC, "gout_cmu_hsi0_dpgtc",
+ "mout_cmu_hsi0_dpgtc", CLK_CON_GAT_GATE_CLKCMU_HSI0_DPGTC,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_HSI0_USB31DRD, "gout_cmu_hsi0_usb31drd",
+ "mout_cmu_hsi0_usb31drd", CLK_CON_GAT_GATE_CLKCMU_HSI0_USB31DRD,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_HSI0_USBDP_DEBUG, "gout_cmu_hsi0_usbdp_debug",
+ "mout_cmu_hsi0_usbdp_debug", CLK_CON_GAT_GATE_CLKCMU_HSI0_USBDP_DEBUG,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_HSI1_BUS, "gout_cmu_hsi1_bus", "mout_cmu_hsi1_bus",
+ CLK_CON_GAT_GATE_CLKCMU_HSI1_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_HSI1_MMC_CARD, "gout_cmu_hsi1_mmc_card",
+ "mout_cmu_hsi1_mmc_card", CLK_CON_GAT_GATE_CLKCMU_HSI1_MMC_CARD,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_HSI1_PCIE, "gout_cmu_hsi1_pcie",
+ "mout_cmu_hsi1_pcie", CLK_CON_GAT_GATE_CLKCMU_HSI1_PCIE,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_HSI1_UFS_CARD, "gout_cmu_hsi1_ufs_card",
+ "mout_cmu_hsi1_ufs_card", CLK_CON_GAT_GATE_CLKCMU_HSI1_UFS_CARD,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_HSI1_UFS_EMBD, "gout_cmu_hsi1_ufs_embd",
+ "mout_cmu_hsi1_ufs_embd", CLK_CON_GAT_GATE_CLKCMU_HSI1_UFS_EMBD,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_HSI2_BUS, "gout_cmu_hsi2_bus", "mout_cmu_hsi2_bus",
+ CLK_CON_GAT_GATE_CLKCMU_HSI2_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_HSI2_PCIE, "gout_cmu_hsi2_pcie",
+ "mout_cmu_hsi2_pcie", CLK_CON_GAT_GATE_CLKCMU_HSI2_PCIE,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_IPP_BUS, "gout_cmu_ipp_bus", "mout_cmu_ipp_bus",
+ CLK_CON_GAT_GATE_CLKCMU_IPP_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_ITP_BUS, "gout_cmu_itp_bus", "mout_cmu_itp_bus",
+ CLK_CON_GAT_GATE_CLKCMU_ITP_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_MCSC_BUS, "gout_cmu_mcsc_bus", "mout_cmu_mcsc_bus",
+ CLK_CON_GAT_GATE_CLKCMU_MCSC_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_MCSC_GDC, "gout_cmu_mcsc_gdc", "mout_cmu_mcsc_gdc",
+ CLK_CON_GAT_GATE_CLKCMU_MCSC_GDC, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_MFC0_MFC0, "gout_cmu_mfc0_mfc0",
+ "mout_cmu_mfc0_mfc0", CLK_CON_GAT_GATE_CLKCMU_MFC0_MFC0,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_MFC0_WFD, "gout_cmu_mfc0_wfd", "mout_cmu_mfc0_wfd",
+ CLK_CON_GAT_GATE_CLKCMU_MFC0_WFD, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_MIF_BUSP, "gout_cmu_mif_busp", "mout_cmu_mif_busp",
+ CLK_CON_GAT_GATE_CLKCMU_MIF_BUSP, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_NPU_BUS, "gout_cmu_npu_bus", "mout_cmu_npu_bus",
+ CLK_CON_GAT_GATE_CLKCMU_NPU_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIC0_BUS, "gout_cmu_peric0_bus",
+ "mout_cmu_peric0_bus", CLK_CON_GAT_GATE_CLKCMU_PERIC0_BUS,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIC0_IP, "gout_cmu_peric0_ip",
+ "mout_cmu_peric0_ip", CLK_CON_GAT_GATE_CLKCMU_PERIC0_IP,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIC1_BUS, "gout_cmu_peric1_bus",
+ "mout_cmu_peric1_bus", CLK_CON_GAT_GATE_CLKCMU_PERIC1_BUS,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIC1_IP, "gout_cmu_peric1_ip",
+ "mout_cmu_peric1_ip", CLK_CON_GAT_GATE_CLKCMU_PERIC1_IP,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIS_BUS, "gout_cmu_peris_bus",
+ "mout_cmu_peris_bus", CLK_CON_GAT_GATE_CLKCMU_PERIS_BUS,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMU_SSP_BUS, "gout_cmu_ssp_bus", "mout_cmu_ssp_bus",
+ CLK_CON_GAT_GATE_CLKCMU_SSP_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_TNR_BUS, "gout_cmu_tnr_bus", "mout_cmu_tnr_bus",
+ CLK_CON_GAT_GATE_CLKCMU_TNR_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_VRA_BUS, "gout_cmu_vra_bus", "mout_cmu_vra_bus",
+ CLK_CON_GAT_GATE_CLKCMU_VRA_BUS, 21, 0, 0),
+};
+
+static const struct samsung_cmu_info top_cmu_info __initconst = {
+ .pll_clks = top_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(top_pll_clks),
+ .mux_clks = top_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(top_mux_clks),
+ .div_clks = top_div_clks,
+ .nr_div_clks = ARRAY_SIZE(top_div_clks),
+ .fixed_factor_clks = cmu_top_ffactor,
+ .nr_fixed_factor_clks = ARRAY_SIZE(cmu_top_ffactor),
+ .gate_clks = top_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(top_gate_clks),
+ .nr_clk_ids = CLKS_NR_TOP,
+ .clk_regs = top_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(top_clk_regs),
+};
+
+static void __init exynos990_cmu_top_init(struct device_node *np)
+{
+ exynos_arm64_register_cmu(NULL, np, &top_cmu_info);
+}
+
+/* Register CMU_TOP early, as it's a dependency for other early domains */
+CLK_OF_DECLARE(exynos990_cmu_top, "samsung,exynos990-cmu-top",
+ exynos990_cmu_top_init);
+
+/* ---- CMU_HSI0 ------------------------------------------------------------ */
+
+/* Register Offset definitions for CMU_HSI0 (0x10a00000) */
+#define PLL_CON0_MUX_CLKCMU_HSI0_BUS_USER 0x0600
+#define PLL_CON0_MUX_CLKCMU_HSI0_USB31DRD_USER 0x0620
+#define PLL_CON0_MUX_CLKCMU_HSI0_USBDP_DEBUG_USER 0x0630
+#define PLL_CON0_MUX_CLKCMU_HSI0_DPGTC_USER 0x0610
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_DP_LINK_IPCLKPORT_I_DP_GTC_CLK 0x2004
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_PPMU_HSI0_BUS1_IPCLKPORT_ACLK 0x2018
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_LHS_ACEL_D_HSI0_IPCLKPORT_I_CLK 0x2014
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_RSTNSYNC_CLK_HSI0_BUS_IPCLKPORT_CLK 0x2020
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_VGEN_LITE_HSI0_IPCLKPORT_CLK 0x2044
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_DP_LINK_IPCLKPORT_I_PCLK 0x2008
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_D_TZPC_HSI0_IPCLKPORT_PCLK 0x200c
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_LHM_AXI_P_HSI0_IPCLKPORT_I_CLK 0x2010
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_PPMU_HSI0_BUS1_IPCLKPORT_PCLK 0x201c
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_SYSMMU_USB_IPCLKPORT_CLK_S2 0x2024
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_SYSREG_HSI0_IPCLKPORT_PCLK 0x2028
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_ACLK_PHYCTRL 0x202c
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USB31DRD_REF_CLK_40 0x2034
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USBDPPHY_SCL_APB_PCLK 0x203c
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USBPCS_APB_CLK 0x2040
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_BUS_CLK_EARLY 0x2030
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_HSI0_CMU_HSI0_IPCLKPORT_PCLK 0x2000
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_XIU_D_HSI0_IPCLKPORT_ACLK 0x2048
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USBDPPHY_REF_SOC_PLL 0x2038
+
+static const unsigned long hsi0_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_HSI0_BUS_USER,
+ PLL_CON0_MUX_CLKCMU_HSI0_USB31DRD_USER,
+ PLL_CON0_MUX_CLKCMU_HSI0_USBDP_DEBUG_USER,
+ PLL_CON0_MUX_CLKCMU_HSI0_DPGTC_USER,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_DP_LINK_IPCLKPORT_I_DP_GTC_CLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_DP_LINK_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_PPMU_HSI0_BUS1_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_LHS_ACEL_D_HSI0_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_RSTNSYNC_CLK_HSI0_BUS_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_VGEN_LITE_HSI0_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_D_TZPC_HSI0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_LHM_AXI_P_HSI0_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_PPMU_HSI0_BUS1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_SYSMMU_USB_IPCLKPORT_CLK_S2,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_SYSREG_HSI0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_ACLK_PHYCTRL,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USB31DRD_REF_CLK_40,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USBDPPHY_REF_SOC_PLL,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USBDPPHY_SCL_APB_PCLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USBPCS_APB_CLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_BUS_CLK_EARLY,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_HSI0_CMU_HSI0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_XIU_D_HSI0_IPCLKPORT_ACLK,
+};
+
+/* Parent clock list for CMU_HSI0 muxes */
+PNAME(mout_hsi0_bus_user_p) = { "oscclk", "dout_cmu_hsi0_bus" };
+PNAME(mout_hsi0_usb31drd_user_p) = { "oscclk", "dout_cmu_hsi0_usb31drd" };
+PNAME(mout_hsi0_usbdp_debug_user_p) = { "oscclk",
+ "dout_cmu_hsi0_usbdp_debug" };
+PNAME(mout_hsi0_dpgtc_user_p) = { "oscclk", "dout_cmu_hsi0_dpgtc" };
+
+static const struct samsung_mux_clock hsi0_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_HSI0_BUS_USER, "mout_hsi0_bus_user",
+ mout_hsi0_bus_user_p, PLL_CON0_MUX_CLKCMU_HSI0_BUS_USER,
+ 4, 1),
+ MUX(CLK_MOUT_HSI0_USB31DRD_USER, "mout_hsi0_usb31drd_user",
+ mout_hsi0_usb31drd_user_p, PLL_CON0_MUX_CLKCMU_HSI0_USB31DRD_USER,
+ 4, 1),
+ MUX(CLK_MOUT_HSI0_USBDP_DEBUG_USER, "mout_hsi0_usbdp_debug_user",
+ mout_hsi0_usbdp_debug_user_p,
+ PLL_CON0_MUX_CLKCMU_HSI0_USBDP_DEBUG_USER,
+ 4, 1),
+ MUX(CLK_MOUT_HSI0_DPGTC_USER, "mout_hsi0_dpgtc_user",
+ mout_hsi0_dpgtc_user_p, PLL_CON0_MUX_CLKCMU_HSI0_DPGTC_USER,
+ 4, 1),
+};
+
+static const struct samsung_gate_clock hsi0_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_HSI0_DP_LINK_DP_GTC_CLK,
+ "gout_hsi0_dp_link_dp_gtc_clk", "mout_hsi0_dpgtc_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_DP_LINK_IPCLKPORT_I_DP_GTC_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_DP_LINK_PCLK,
+ "gout_hsi0_dp_link_pclk", "mout_hsi0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_DP_LINK_IPCLKPORT_I_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_D_TZPC_HSI0_PCLK,
+ "gout_hsi0_d_tzpc_hsi0_pclk", "mout_hsi0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_D_TZPC_HSI0_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_LHM_AXI_P_HSI0_CLK,
+ "gout_hsi0_lhm_axi_p_hsi0_clk", "mout_hsi0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_LHM_AXI_P_HSI0_IPCLKPORT_I_CLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_HSI0_PPMU_HSI0_BUS1_ACLK,
+ "gout_hsi0_ppmu_hsi0_bus1_aclk", "mout_hsi0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_PPMU_HSI0_BUS1_IPCLKPORT_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_PPMU_HSI0_BUS1_PCLK,
+ "gout_hsi0_ppmu_hsi0_bus1_pclk", "mout_hsi0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_PPMU_HSI0_BUS1_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_CLK_HSI0_BUS_CLK,
+ "gout_hsi0_clk_hsi0_bus_clk", "mout_hsi0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_RSTNSYNC_CLK_HSI0_BUS_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_SYSMMU_USB_CLK_S2,
+ "gout_hsi0_sysmmu_usb_clk_s2", "mout_hsi0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_SYSMMU_USB_IPCLKPORT_CLK_S2,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_HSI0_SYSREG_HSI0_PCLK,
+ "gout_hsi0_sysreg_hsi0_pclk", "mout_hsi0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_SYSREG_HSI0_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_USB31DRD_ACLK_PHYCTRL,
+ "gout_hsi0_usb31drd_aclk_phyctrl", "mout_hsi0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_ACLK_PHYCTRL,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_USB31DRD_BUS_CLK_EARLY,
+ "gout_hsi0_usb31drd_bus_clk_early",
+ "mout_hsi0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_BUS_CLK_EARLY,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_USB31DRD_USB31DRD_REF_CLK_40,
+ "gout_hsi0_usb31drd_usb31drd_ref_clk_40",
+ "mout_hsi0_usb31drd_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USB31DRD_REF_CLK_40,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_USB31DRD_USBDPPHY_REF_SOC_PLL,
+ "gout_hsi0_usb31drd_usbdpphy_ref_soc_pll",
+ "mout_hsi0_usbdp_debug_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USBDPPHY_REF_SOC_PLL,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_USB31DRD_USBDPPHY_SCL_APB,
+ "gout_hsi0_usb31drd_ipclkport_i_usbdpphy_scl_apb_pclk",
+ "mout_hsi0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USBDPPHY_SCL_APB_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_USB31DRD_USBPCS_APB_CLK,
+ "gout_hsi0_usb31drd_usbpcs_apb_clk",
+ "mout_hsi0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USBPCS_APB_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_VGEN_LITE_HSI0_CLK,
+ "gout_hsi0_vgen_lite_ipclkport_clk", "mout_hsi0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_VGEN_LITE_HSI0_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_CMU_HSI0_PCLK,
+ "gout_hsi0_cmu_hsi0_pclk", "mout_hsi0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_HSI0_CMU_HSI0_IPCLKPORT_PCLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_HSI0_XIU_D_HSI0_ACLK,
+ "gout_hsi0_xiu_d_hsi0_aclk", "mout_hsi0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_XIU_D_HSI0_IPCLKPORT_ACLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_HSI0_LHS_ACEL_D_HSI0_CLK,
+ "gout_hsi0_lhs_acel_d_hsi0_clk", "mout_hsi0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_LHS_ACEL_D_HSI0_IPCLKPORT_I_CLK,
+ 21, CLK_IS_CRITICAL, 0),
+};
+
+static const struct samsung_cmu_info hsi0_cmu_info __initconst = {
+ .mux_clks = hsi0_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(hsi0_mux_clks),
+ .gate_clks = hsi0_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(hsi0_gate_clks),
+ .nr_clk_ids = CLKS_NR_HSI0,
+ .clk_regs = hsi0_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(hsi0_clk_regs),
+ .clk_name = "bus",
+};
+
+/* ---- CMU_PERIC0 --------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_PERIC0 (0x10400000) */
+#define PLL_CON0_MUX_CLKCMU_PERIC0_BUS_USER 0x0600
+#define PLL_CON1_MUX_CLKCMU_PERIC0_BUS_USER 0x0604
+#define PLL_CON0_MUX_CLKCMU_PERIC0_UART_DBG 0x0610
+#define PLL_CON1_MUX_CLKCMU_PERIC0_UART_DBG 0x0614
+#define PLL_CON0_MUX_CLKCMU_PERIC0_USI00_USI_USER 0x0620
+#define PLL_CON1_MUX_CLKCMU_PERIC0_USI00_USI_USER 0x0624
+#define PLL_CON0_MUX_CLKCMU_PERIC0_USI01_USI_USER 0x0630
+#define PLL_CON1_MUX_CLKCMU_PERIC0_USI01_USI_USER 0x0634
+#define PLL_CON0_MUX_CLKCMU_PERIC0_USI02_USI_USER 0x0640
+#define PLL_CON1_MUX_CLKCMU_PERIC0_USI02_USI_USER 0x0644
+#define PLL_CON0_MUX_CLKCMU_PERIC0_USI03_USI_USER 0x0650
+#define PLL_CON1_MUX_CLKCMU_PERIC0_USI03_USI_USER 0x0654
+#define PLL_CON0_MUX_CLKCMU_PERIC0_USI04_USI_USER 0x0660
+#define PLL_CON1_MUX_CLKCMU_PERIC0_USI04_USI_USER 0x0664
+#define PLL_CON0_MUX_CLKCMU_PERIC0_USI05_USI_USER 0x0670
+#define PLL_CON1_MUX_CLKCMU_PERIC0_USI05_USI_USER 0x0674
+#define PLL_CON0_MUX_CLKCMU_PERIC0_USI13_USI_USER 0x0680
+#define PLL_CON1_MUX_CLKCMU_PERIC0_USI13_USI_USER 0x0684
+#define PLL_CON0_MUX_CLKCMU_PERIC0_USI14_USI_USER 0x0690
+#define PLL_CON1_MUX_CLKCMU_PERIC0_USI14_USI_USER 0x0694
+#define PLL_CON0_MUX_CLKCMU_PERIC0_USI15_USI_USER 0x06a0
+#define PLL_CON1_MUX_CLKCMU_PERIC0_USI15_USI_USER 0x06a4
+#define PLL_CON0_MUX_CLKCMU_PERIC0_USI_I2C_USER 0x06b0
+#define PLL_CON1_MUX_CLKCMU_PERIC0_USI_I2C_USER 0x06b4
+#define CLK_CON_DIV_DIV_CLK_PERIC0_UART_DBG 0x1800
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI00_USI 0x1804
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI01_USI 0x1808
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI02_USI 0x180c
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI03_USI 0x1810
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI04_USI 0x1814
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI05_USI 0x1818
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI13_USI 0x181c
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI14_USI 0x1820
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI15_USI 0x1824
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI_I2C 0x1828
+#define CLK_CON_GAT_CLK_BLK_PERIC0_UID_PERIC0_CMU_PERIC0_IPCLKPORT_PCLK 0x2004
+#define CLK_CON_GAT_CLK_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_OSCCLK_IPCLKPORT_CLK 0x2008
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_D_TZPC_PERIC0_IPCLKPORT_PCLK 0x200c
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_GPIO_PERIC0_IPCLKPORT_PCLK 0x2010
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_LHM_AXI_P_PERIC0_IPCLKPORT_I_CLK 0x2014
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_10 0x2018
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_11 0x201c
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_12 0x2020
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_13 0x2024
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_14 0x2028
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_15 0x202c
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_4 0x2030
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_5 0x2034
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_6 0x2038
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_7 0x203c
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_8 0x2040
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_9 0x2044
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_10 0x2048
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_11 0x204c
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_12 0x2050
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_13 0x2054
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_14 0x2058
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_15 0x205c
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_4 0x2060
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_5 0x2064
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_6 0x2068
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_7 0x206c
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_8 0x2070
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_9 0x2074
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_0 0x2078
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_3 0x207c
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_4 0x2080
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_5 0x2084
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_6 0x2088
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_7 0x208c
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_8 0x2090
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_0 0x2094
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_15 0x2098
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_3 0x209c
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_4 0x20a0
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_5 0x20a4
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_6 0x20a8
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_7 0x20ac
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_8 0x20b0
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_BUSP_IPCLKPORT_CLK 0x20b4
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_UART_DBG_IPCLKPORT_CLK 0x20b8
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI00_USI_IPCLKPORT_CLK 0x20bc
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI01_USI_IPCLKPORT_CLK 0x20c0
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI02_USI_IPCLKPORT_CLK 0x20c4
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI03_USI_IPCLKPORT_CLK 0x20c8
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI04_USI_IPCLKPORT_CLK 0x20cc
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI05_USI_IPCLKPORT_CLK 0x20d0
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI13_USI_IPCLKPORT_CLK 0x20d4
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI14_USI_IPCLKPORT_CLK 0x20d8
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI15_USI_IPCLKPORT_CLK 0x20dc
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI_I2C_IPCLKPORT_CLK 0x20e0
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_SYSREG_PERIC0_IPCLKPORT_PCLK 0x20e4
+
+static const unsigned long peric0_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_PERIC0_BUS_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC0_BUS_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_UART_DBG,
+ PLL_CON1_MUX_CLKCMU_PERIC0_UART_DBG,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI00_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC0_USI00_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI01_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC0_USI01_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI02_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC0_USI02_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI03_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC0_USI03_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI04_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC0_USI04_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI05_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC0_USI05_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI13_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC0_USI13_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI14_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC0_USI14_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI15_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC0_USI15_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI_I2C_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC0_USI_I2C_USER,
+ CLK_CON_DIV_DIV_CLK_PERIC0_UART_DBG,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI00_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI01_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI02_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI03_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI04_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI05_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI13_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI14_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI15_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI_I2C,
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_PERIC0_CMU_PERIC0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_OSCCLK_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_D_TZPC_PERIC0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_GPIO_PERIC0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_LHM_AXI_P_PERIC0_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_10,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_11,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_12,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_13,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_14,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_15,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_4,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_5,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_6,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_7,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_8,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_9,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_10,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_11,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_12,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_13,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_14,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_15,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_4,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_5,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_6,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_7,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_8,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_9,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_0,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_3,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_4,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_5,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_6,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_7,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_8,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_0,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_15,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_3,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_4,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_5,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_6,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_7,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_8,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_BUSP_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_UART_DBG_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI00_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI01_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI02_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI03_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI04_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI05_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI13_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI14_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI15_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI_I2C_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_SYSREG_PERIC0_IPCLKPORT_PCLK,
+};
+
+/* Parent clock list for CMU_PERIC0 muxes */
+PNAME(mout_peric0_bus_user_p) = { "oscclk", "dout_cmu_peric0_bus" };
+PNAME(mout_peric0_uart_dbg_p) = { "oscclk", "dout_cmu_peric0_ip" };
+PNAME(mout_peric0_usi00_user_p) = { "oscclk", "dout_cmu_peric0_ip" };
+PNAME(mout_peric0_usi01_user_p) = { "oscclk", "dout_cmu_peric0_ip" };
+PNAME(mout_peric0_usi02_user_p) = { "oscclk", "dout_cmu_peric0_ip" };
+PNAME(mout_peric0_usi03_user_p) = { "oscclk", "dout_cmu_peric0_ip" };
+PNAME(mout_peric0_usi04_user_p) = { "oscclk", "dout_cmu_peric0_ip" };
+PNAME(mout_peric0_usi05_user_p) = { "oscclk", "dout_cmu_peric0_ip" };
+PNAME(mout_peric0_usi13_user_p) = { "oscclk", "dout_cmu_peric0_ip" };
+PNAME(mout_peric0_usi14_user_p) = { "oscclk", "dout_cmu_peric0_ip" };
+PNAME(mout_peric0_usi15_user_p) = { "oscclk", "dout_cmu_peric0_ip" };
+PNAME(mout_peric0_usi_i2c_user_p) = { "oscclk", "dout_cmu_peric0_ip" };
+
+static const struct samsung_mux_clock peric0_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_PERIC0_BUS_USER, "mout_peric0_bus_user",
+ mout_peric0_bus_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_BUS_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC0_UART_DBG, "mout_peric0_uart_dbg",
+ mout_peric0_uart_dbg_p, PLL_CON0_MUX_CLKCMU_PERIC0_UART_DBG,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC0_USI00_USI_USER, "mout_peric0_usi00_usi_user",
+ mout_peric0_usi00_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_USI00_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC0_USI01_USI_USER, "mout_peric0_usi01_usi_user",
+ mout_peric0_usi01_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_USI01_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC0_USI02_USI_USER, "mout_peric0_usi02_usi_user",
+ mout_peric0_usi02_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_USI02_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC0_USI03_USI_USER, "mout_peric0_usi03_usi_user",
+ mout_peric0_usi03_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_USI03_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC0_USI04_USI_USER, "mout_peric0_usi04_usi_user",
+ mout_peric0_usi04_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_USI04_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC0_USI05_USI_USER, "mout_peric0_usi05_usi_user",
+ mout_peric0_usi05_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_USI05_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC0_USI13_USI_USER, "mout_peric0_usi13_usi_user",
+ mout_peric0_usi13_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_USI13_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC0_USI14_USI_USER, "mout_peric0_usi14_usi_user",
+ mout_peric0_usi14_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_USI14_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC0_USI15_USI_USER, "mout_peric0_usi15_usi_user",
+ mout_peric0_usi15_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_USI15_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC0_USI_I2C_USER, "mout_peric0_usi_i2c_user",
+ mout_peric0_usi_i2c_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_USI_I2C_USER,
+ 4, 1),
+};
+
+static const struct samsung_div_clock peric0_div_clks[] __initconst = {
+ DIV(CLK_DOUT_PERIC0_UART_DBG, "dout_peric0_uart_dbg",
+ "mout_peric0_uart_dbg",
+ CLK_CON_DIV_DIV_CLK_PERIC0_UART_DBG,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI00_USI, "dout_peric0_usi00_usi",
+ "mout_peric0_usi00_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI00_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI01_USI, "dout_peric0_usi01_usi",
+ "mout_peric0_usi01_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI01_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI02_USI, "dout_peric0_usi02_usi",
+ "mout_peric0_usi02_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI02_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI03_USI, "dout_peric0_usi03_usi",
+ "mout_peric0_usi03_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI03_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI04_USI, "dout_peric0_usi04_usi",
+ "mout_peric0_usi04_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI04_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI05_USI, "dout_peric0_usi05_usi",
+ "mout_peric0_usi05_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI05_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI13_USI, "dout_peric0_usi13_usi",
+ "mout_peric0_usi13_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI13_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI14_USI, "dout_peric0_usi14_usi",
+ "mout_peric0_usi14_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI14_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI15_USI, "dout_peric0_usi15_usi",
+ "mout_peric0_usi15_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI15_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI_I2C, "dout_peric0_usi_i2c",
+ "mout_peric0_usi_i2c_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI_I2C,
+ 0, 4),
+};
+
+static const struct samsung_gate_clock peric0_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_PERIC0_CMU_PCLK, "gout_peric0_cmu_pclk",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_PERIC0_CMU_PERIC0_IPCLKPORT_PCLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERIC0_OSCCLK_CLK, "gout_peric0_oscclk_clk",
+ "oscclk",
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_OSCCLK_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_D_TZPC_PCLK, "gout_peric0_d_tpzc_pclk",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_D_TZPC_PERIC0_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_GPIO_PCLK, "gout_peric0_gpio_pclk",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_GPIO_PERIC0_IPCLKPORT_PCLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_PERIC0_LHM_AXI_P_CLK, "gout_peric0_lhm_axi_p_clk",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_LHM_AXI_P_PERIC0_IPCLKPORT_I_CLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_IPCLK_10, "gout_peric0_top0_ipclk_10",
+ "dout_peric0_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_10,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_IPCLK_11, "gout_peric0_top0_ipclk_11",
+ "dout_peric0_usi03_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_11,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_IPCLK_12, "gout_peric0_top0_ipclk_12",
+ "dout_peric0_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_12,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_IPCLK_13, "gout_peric0_top0_ipclk_13",
+ "dout_peric0_usi04_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_13,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_IPCLK_14, "gout_peric0_top0_ipclk_14",
+ "dout_peric0_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_14,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_IPCLK_15, "gout_peric0_top0_ipclk_15",
+ "dout_peric0_usi05_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_15,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_IPCLK_4, "gout_peric0_top0_ipclk_4",
+ "dout_peric0_uart_dbg",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_4,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_IPCLK_5, "gout_peric0_top0_ipclk_5",
+ "dout_peric0_usi00_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_5,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_IPCLK_6, "gout_peric0_top0_ipclk_6",
+ "dout_peric0_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_6,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_IPCLK_7, "gout_peric0_top0_ipclk_7",
+ "dout_peric0_usi01_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_7,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_IPCLK_8, "gout_peric0_top0_ipclk_8",
+ "dout_peric0_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_8,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_IPCLK_9, "gout_peric0_top0_ipclk_9",
+ "dout_peric0_usi02_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_9,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_PCLK_10, "gout_peric0_top0_pclk_10",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_10,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_PCLK_11, "gout_peric0_top0_pclk_11",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_11,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_PCLK_12, "gout_peric0_top0_pclk_12",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_12,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_PCLK_13, "gout_peric0_top0_pclk_13",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_13,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_PCLK_14, "gout_peric0_top0_pclk_14",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_14,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_PCLK_15, "gout_peric0_top0_pclk_15",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_15,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_PCLK_4, "gout_peric0_top0_pclk_4",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_4,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_PCLK_5, "gout_peric0_top0_pclk_5",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_5,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_PCLK_6, "gout_peric0_top0_pclk_6",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_6,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_PCLK_7, "gout_peric0_top0_pclk_7",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_7,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_PCLK_8, "gout_peric0_top0_pclk_8",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_8,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_PCLK_9, "gout_peric0_top0_pclk_9",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_9,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP1_IPCLK_0, "gout_peric0_top1_ipclk_0",
+ "dout_peric0_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_0,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP1_IPCLK_3, "gout_peric0_top1_ipclk_3",
+ "dout_peric0_usi13_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_3,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP1_IPCLK_4, "gout_peric0_top1_ipclk_4",
+ "dout_peric0_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_4,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP1_IPCLK_5, "gout_peric0_top1_ipclk_5",
+ "dout_peric0_usi14_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_5,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP1_IPCLK_6, "gout_peric0_top1_ipclk_6",
+ "dout_peric0_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_6,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP1_IPCLK_7, "gout_peric0_top1_ipclk_7",
+ "dout_peric0_usi15_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_7,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP1_IPCLK_8, "gout_peric0_top1_ipclk_8",
+ "dout_peric0_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_8,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP1_PCLK_0, "gout_peric0_top1_pclk_0",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_0,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP1_PCLK_15, "gout_peric0_top1_pclk_15",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_15,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP1_PCLK_3, "gout_peric0_top1_pclk_3",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_3,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP1_PCLK_4, "gout_peric0_top1_pclk_4",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_4,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP1_PCLK_5, "gout_peric0_top1_pclk_5",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_5,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP1_PCLK_6, "gout_peric0_top1_pclk_6",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_6,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP1_PCLK_7, "gout_peric0_top1_pclk_7",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_7,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP1_PCLK_8, "gout_peric0_top1_pclk_8",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_8,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_BUSP_CLK, "gout_peric0_busp_clk",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_BUSP_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_UART_DBG_CLK, "gout_peric0_uart_dbg_clk",
+ "dout_peric0_uart_dbg",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_UART_DBG_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_USI00_USI_CLK, "gout_peric0_usi00_usi_clk",
+ "dout_peric0_usi00_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI00_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_USI01_USI_CLK, "gout_peric0_usi01_usi_clk",
+ "dout_peric0_usi01_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI01_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_USI02_USI_CLK, "gout_peric0_usi02_usi_clk",
+ "dout_peric0_usi02_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI02_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_USI03_USI_CLK, "gout_peric0_usi03_usi_clk",
+ "dout_peric0_usi03_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI03_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_USI04_USI_CLK, "gout_peric0_usi04_usi_clk",
+ "dout_peric0_usi04_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI04_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_USI05_USI_CLK, "gout_peric0_usi05_usi_clk",
+ "dout_peric0_usi05_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI05_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_USI13_USI_CLK, "gout_peric0_usi13_usi_clk",
+ "dout_peric0_usi13_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI13_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_USI14_USI_CLK, "gout_peric0_usi14_usi_clk",
+ "dout_peric0_usi14_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI14_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_USI15_USI_CLK, "gout_peric0_usi15_usi_clk",
+ "dout_peric0_usi15_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI15_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_USI_I2C_CLK, "gout_peric0_usi_i2c_clk",
+ "dout_peric0_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI_I2C_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_SYSREG_PCLK, "gout_peric0_sysreg_pclk",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_SYSREG_PERIC0_IPCLKPORT_PCLK,
+ 21, 0, 0)
+};
+
+static const struct samsung_cmu_info peric0_cmu_info __initconst = {
+ .mux_clks = peric0_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(peric0_mux_clks),
+ .div_clks = peric0_div_clks,
+ .nr_div_clks = ARRAY_SIZE(peric0_div_clks),
+ .gate_clks = peric0_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(peric0_gate_clks),
+ .nr_clk_ids = CLKS_NR_PERIC0,
+ .clk_regs = peric0_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(peric0_clk_regs),
+ .clk_name = "bus",
+};
+
+/* ---- CMU_PERIC1 --------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_PERIC1 (0x10700000) */
+#define PLL_CON0_MUX_CLKCMU_PERIC1_BUS_USER 0x0600
+#define PLL_CON1_MUX_CLKCMU_PERIC1_BUS_USER 0x0604
+#define PLL_CON0_MUX_CLKCMU_PERIC1_UART_BT_USER 0x0610
+#define PLL_CON1_MUX_CLKCMU_PERIC1_UART_BT_USER 0x0614
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI06_USI_USER 0x0620
+#define PLL_CON1_MUX_CLKCMU_PERIC1_USI06_USI_USER 0x0624
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI07_USI_USER 0x0630
+#define PLL_CON1_MUX_CLKCMU_PERIC1_USI07_USI_USER 0x0634
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI08_USI_USER 0x0640
+#define PLL_CON1_MUX_CLKCMU_PERIC1_USI08_USI_USER 0x0644
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI09_USI_USER 0x0650
+#define PLL_CON1_MUX_CLKCMU_PERIC1_USI09_USI_USER 0x0654
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI10_USI_USER 0x0660
+#define PLL_CON1_MUX_CLKCMU_PERIC1_USI10_USI_USER 0x0664
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI11_USI_USER 0x0670
+#define PLL_CON1_MUX_CLKCMU_PERIC1_USI11_USI_USER 0x0674
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI12_USI_USER 0x0680
+#define PLL_CON1_MUX_CLKCMU_PERIC1_USI12_USI_USER 0x0684
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI16_USI_USER 0x0690
+#define PLL_CON1_MUX_CLKCMU_PERIC1_USI16_USI_USER 0x0694
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI17_USI_USER 0x06a0
+#define PLL_CON1_MUX_CLKCMU_PERIC1_USI17_USI_USER 0x06a4
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI18_USI_USER 0x06b0
+#define PLL_CON1_MUX_CLKCMU_PERIC1_USI18_USI_USER 0x06b4
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI_I2C_USER 0x06c0
+#define PLL_CON1_MUX_CLKCMU_PERIC1_USI_I2C_USER 0x06c4
+#define CLK_CON_DIV_DIV_CLK_PERIC1_UART_BT 0x1800
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI06_USI 0x1804
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI07_USI 0x1808
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI08_USI 0x180c
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI09_USI 0x1810
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI10_USI 0x1814
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI11_USI 0x1818
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI12_USI 0x181c
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI16_USI 0x1820
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI17_USI 0x1824
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI18_USI 0x1828
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI_I2C 0x182c
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_PERIC1_CMU_PERIC1_IPCLKPORT_PCLK 0x2004
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_UART_BT_IPCLKPORT_CLK 0x2008
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI12_USI_IPCLKPORT_CLK 0x200c
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI18_USI_IPCLKPORT_CLK 0x2010
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_D_TZPC_PERIC1_IPCLKPORT_PCLK 0x2014
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_GPIO_PERIC1_IPCLKPORT_PCLK 0x2018
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_LHM_AXI_P_CSISPERIC1_IPCLKPORT_I_CLK 0x201c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_LHM_AXI_P_PERIC1_IPCLKPORT_I_CLK 0x2020
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_10 0x2024
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_11 0x2028
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_12 0x202c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_13 0x2030
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_14 0x2034
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_15 0x2038
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_4 0x203c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_10 0x2040
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_11 0x2044
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_12 0x2048
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_13 0x204c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_14 0x2050
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_15 0x2054
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_4 0x2058
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_0 0x205c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_1 0x2060
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_10 0x2064
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_12 0x206c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_13 0x2070
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_14 0x2074
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_15 0x2078
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_2 0x207c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_3 0x2080
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_4 0x2084
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_5 0x2088
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_6 0x208c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_7 0x2090
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_9 0x2098
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_0 0x209c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_1 0x20a0
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_10 0x20a4
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_12 0x20ac
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_13 0x20b0
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_14 0x20b4
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_15 0x20b8
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_2 0x20bc
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_3 0x20c0
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_4 0x20c4
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_5 0x20c8
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_6 0x20cc
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_7 0x20d0
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_9 0x20d8
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_BUSP_IPCLKPORT_CLK 0x20dc
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_OSCCLK_IPCLKPORT_CLK 0x20e0
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI06_USI_IPCLKPORT_CLK 0x20e4
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI07_USI_IPCLKPORT_CLK 0x20e8
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI08_USI_IPCLKPORT_CLK 0x20ec
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI09_USI_IPCLKPORT_CLK 0x20f0
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI10_USI_IPCLKPORT_CLK 0x20f4
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI11_USI_IPCLKPORT_CLK 0x20f8
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI16_USI_IPCLKPORT_CLK 0x20fc
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI17_USI_IPCLKPORT_CLK 0x2100
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI_I2C_IPCLKPORT_CLK 0x2104
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SYSREG_PERIC1_IPCLKPORT_PCLK 0x2108
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI16_I3C_IPCLKPORT_I_PCLK 0x210c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI16_I3C_IPCLKPORT_I_SCLK 0x2110
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI17_I3C_IPCLKPORT_I_PCLK 0x2114
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI17_I3C_IPCLKPORT_I_SCLK 0x2118
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_XIU_P_PERIC1_IPCLKPORT_ACLK 0x211c
+
+static const unsigned long peric1_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_PERIC1_BUS_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_BUS_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_UART_BT_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_UART_BT_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI06_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_USI06_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI07_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_USI07_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI08_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_USI08_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI09_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_USI09_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI10_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_USI10_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI11_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_USI11_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI12_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_USI12_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI16_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_USI16_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI17_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_USI17_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI18_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_USI18_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI_I2C_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_USI_I2C_USER,
+ CLK_CON_DIV_DIV_CLK_PERIC1_UART_BT,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI06_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI07_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI08_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI09_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI10_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI11_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI12_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI16_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI17_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI18_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI_I2C,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_PERIC1_CMU_PERIC1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_UART_BT_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI12_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI18_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_D_TZPC_PERIC1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_GPIO_PERIC1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_LHM_AXI_P_CSISPERIC1_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_LHM_AXI_P_PERIC1_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_10,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_11,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_12,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_13,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_14,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_15,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_4,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_10,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_11,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_12,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_13,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_14,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_15,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_4,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_0,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_1,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_10,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_12,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_13,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_14,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_15,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_2,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_3,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_4,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_5,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_6,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_7,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_9,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_0,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_1,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_10,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_12,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_13,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_14,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_15,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_2,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_3,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_4,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_5,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_6,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_7,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_9,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_BUSP_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_OSCCLK_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI06_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI07_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI08_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI09_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI10_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI11_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI16_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI17_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI_I2C_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SYSREG_PERIC1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI16_I3C_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI16_I3C_IPCLKPORT_I_SCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI17_I3C_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI17_I3C_IPCLKPORT_I_SCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_XIU_P_PERIC1_IPCLKPORT_ACLK,
+};
+
+/* Parent clock list for CMU_PERIC1 muxes */
+PNAME(mout_peric1_bus_user_p) = { "oscclk", "dout_cmu_peric1_bus" };
+PNAME(mout_peric1_uart_bt_user_p) = { "oscclk", "dout_cmu_peric1_ip" };
+PNAME(mout_peric1_usi06_user_p) = { "oscclk", "dout_cmu_peric1_ip" };
+PNAME(mout_peric1_usi07_user_p) = { "oscclk", "dout_cmu_peric1_ip" };
+PNAME(mout_peric1_usi08_user_p) = { "oscclk", "dout_cmu_peric1_ip" };
+PNAME(mout_peric1_usi09_user_p) = { "oscclk", "dout_cmu_peric1_ip" };
+PNAME(mout_peric1_usi10_user_p) = { "oscclk", "dout_cmu_peric1_ip" };
+PNAME(mout_peric1_usi11_user_p) = { "oscclk", "dout_cmu_peric1_ip" };
+PNAME(mout_peric1_usi12_user_p) = { "oscclk", "dout_cmu_peric1_ip" };
+PNAME(mout_peric1_usi18_user_p) = { "oscclk", "dout_cmu_peric1_ip" };
+PNAME(mout_peric1_usi16_user_p) = { "oscclk", "dout_cmu_peric1_ip" };
+PNAME(mout_peric1_usi17_user_p) = { "oscclk", "dout_cmu_peric1_ip" };
+PNAME(mout_peric1_usi_i2c_user_p) = { "oscclk", "dout_cmu_peric1_ip" };
+
+static const struct samsung_mux_clock peric1_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_PERIC1_BUS_USER, "mout_peric1_bus_user",
+ mout_peric1_bus_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_BUS_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_UART_BT_USER, "mout_peric1_uart_bt_user",
+ mout_peric1_uart_bt_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_UART_BT_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI06_USI_USER, "mout_peric1_usi06_usi_user",
+ mout_peric1_usi06_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI06_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI07_USI_USER, "mout_peric1_usi07_usi_user",
+ mout_peric1_usi07_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI07_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI08_USI_USER, "mout_peric1_usi08_usi_user",
+ mout_peric1_usi08_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI08_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI09_USI_USER, "mout_peric1_usi09_usi_user",
+ mout_peric1_usi09_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI09_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI10_USI_USER, "mout_peric1_usi10_usi_user",
+ mout_peric1_usi10_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI10_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI11_USI_USER, "mout_peric1_usi11_usi_user",
+ mout_peric1_usi11_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI11_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI12_USI_USER, "mout_peric1_usi12_usi_user",
+ mout_peric1_usi12_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI12_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI18_USI_USER, "mout_peric1_usi18_usi_user",
+ mout_peric1_usi18_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI18_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI16_USI_USER, "mout_peric1_usi16_usi_user",
+ mout_peric1_usi16_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI16_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI17_USI_USER, "mout_peric1_usi17_usi_user",
+ mout_peric1_usi17_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI17_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI_I2C_USER, "mout_peric1_usi_i2c_user",
+ mout_peric1_usi_i2c_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI_I2C_USER,
+ 4, 1),
+};
+
+static const struct samsung_div_clock peric1_div_clks[] __initconst = {
+ DIV(CLK_DOUT_PERIC1_UART_BT, "dout_peric1_uart_bt",
+ "mout_peric1_uart_bt_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_UART_BT,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI06_USI, "dout_peric1_usi06_usi",
+ "mout_peric1_usi06_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI06_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI07_USI, "dout_peric1_usi07_usi",
+ "mout_peric1_usi07_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI07_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI08_USI, "dout_peric1_usi08_usi",
+ "mout_peric1_usi08_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI08_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI18_USI, "dout_peric1_usi18_usi",
+ "mout_peric1_usi18_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI18_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI12_USI, "dout_peric1_usi12_usi",
+ "mout_peric1_usi12_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI12_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI09_USI, "dout_peric1_usi09_usi",
+ "mout_peric1_usi09_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI09_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI10_USI, "dout_peric1_usi10_usi",
+ "mout_peric1_usi10_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI10_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI11_USI, "dout_peric1_usi11_usi",
+ "mout_peric1_usi11_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI11_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI16_USI, "dout_peric1_usi16_usi",
+ "mout_peric1_usi16_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI16_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI17_USI, "dout_peric1_usi17_usi",
+ "mout_peric1_usi17_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI17_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI_I2C, "dout_peric1_usi_i2c",
+ "mout_peric1_usi_i2c_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI_I2C,
+ 0, 4),
+};
+
+static const struct samsung_gate_clock peric1_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_PERIC1_CMU_PCLK, "gout_peric1_cmu_pclk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_PERIC1_CMU_PERIC1_IPCLKPORT_PCLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERIC1_UART_BT_CLK, "gout_peric1_uart_bt_clk",
+ "dout_peric1_uart_bt",
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_UART_BT_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI12_USI_CLK, "gout_peric1_usi12_usi_clk",
+ "dout_peric1_usi12_usi",
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI12_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI18_USI_CLK, "gout_peric1_usi18_usi_clk",
+ "dout_peric1_usi18_usi",
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI18_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_D_TZPC_PCLK, "gout_peric1_d_tzpc_pclk",
+ "dout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_D_TZPC_PERIC1_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_GPIO_PCLK, "gout_peric1_gpio_pclk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_GPIO_PERIC1_IPCLKPORT_PCLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_PERIC1_LHM_AXI_P_CSIS_CLK, "gout_peric1_lhm_axi_p_csis_clk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_LHM_AXI_P_CSISPERIC1_IPCLKPORT_I_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_LHM_AXI_P_CLK, "gout_peric1_lhm_axi_p_clk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_LHM_AXI_P_PERIC1_IPCLKPORT_I_CLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERIC1_TOP0_IPCLK_10, "gout_peric1_top0_ipclk_10",
+ "dout_peric1_usi06_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_10,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP0_IPCLK_11, "gout_peric1_top0_ipclk_11",
+ "dout_peric1_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_11,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP0_IPCLK_12, "gout_peric1_top0_ipclk_12",
+ "dout_peric1_usi07_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_12,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP0_IPCLK_13, "gout_peric1_top0_ipclk_13",
+ "dout_peric1_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_13,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP0_IPCLK_14, "gout_peric1_top0_ipclk_14",
+ "dout_peric1_usi08_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_14,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP0_IPCLK_15, "gout_peric1_top0_ipclk_15",
+ "dout_peric1_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_15,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP0_IPCLK_4, "gout_peric1_top0_ipclk_4",
+ "dout_peric1_uart_bt",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_4,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP0_PCLK_10, "gout_peric1_top0_pclk_10",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_10,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP0_PCLK_11, "gout_peric1_top0_pclk_11",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_11,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP0_PCLK_12, "gout_peric1_top0_pclk_12",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_12,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP0_PCLK_13, "gout_peric1_top0_pclk_13",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_13,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP0_PCLK_14, "gout_peric1_top0_pclk_14",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_14,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP0_PCLK_15, "gout_peric1_top0_pclk_15",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_15,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP0_PCLK_4, "gout_peric1_top0_pclk_4",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_4,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_IPCLK_0, "gout_peric1_top1_ipclk_0",
+ "dout_peric1_usi09_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_0,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_IPCLK_1, "gout_peric1_top1_ipclk_1",
+ "dout_peric1_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_1,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_IPCLK_10, "gout_peric1_top1_ipclk_10",
+ "dout_peric1_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_10,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_IPCLK_12, "gout_peric1_top1_ipclk_12",
+ "dout_peric1_usi12_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_12,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_IPCLK_13, "gout_peric1_top1_ipclk_13",
+ "dout_peric1_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_13,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_IPCLK_14, "gout_peric1_top1_ipclk_14",
+ "dout_peric1_usi18_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_14,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_IPCLK_15, "gout_peric1_top1_ipclk_15",
+ "dout_peric1_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_15,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_IPCLK_2, "gout_peric1_top1_ipclk_2",
+ "dout_peric1_usi10_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_2,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_IPCLK_3, "gout_peric1_top1_ipclk_3",
+ "dout_peric1_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_3,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_IPCLK_4, "gout_peric1_top1_ipclk_4",
+ "dout_peric1_usi11_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_4,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_IPCLK_5, "gout_peric1_top1_ipclk_5",
+ "dout_peric1_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_5,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_IPCLK_6, "gout_peric1_top1_ipclk_6",
+ "dout_peric1_usi16_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_6,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_IPCLK_7, "gout_peric1_top1_ipclk_7",
+ "dout_peric1_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_7,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_IPCLK_9, "gout_peric1_top1_ipclk_9",
+ "dout_peric1_usi17_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_9,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_PCLK_0, "gout_peric1_top1_pclk_0",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_0,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_PCLK_1, "gout_peric1_top1_pclk_1",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_1,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_PCLK_10, "gout_peric1_top1_pclk_10",
+ "dout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_10,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_PCLK_12, "gout_peric1_top1_pclk_12",
+ "dout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_12,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_PCLK_13, "gout_peric1_top1_pclk_13",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_13,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_PCLK_14, "gout_peric1_top1_pclk_14",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_14,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_PCLK_15, "gout_peric1_top1_pclk_15",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_15,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_PCLK_2, "gout_peric1_top1_pclk_2",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_2,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_PCLK_3, "gout_peric1_top1_pclk_3",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_3,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_PCLK_4, "gout_peric1_top1_pclk_4",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_4,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_PCLK_5, "gout_peric1_top1_pclk_5",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_5,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_PCLK_6, "gout_peric1_top1_pclk_6",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_6,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_PCLK_7, "gout_peric1_top1_pclk_7",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_7,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_PCLK_9, "gout_peric1_top1_pclk_9",
+ "dout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_9,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_BUSP_CLK, "gout_peric1_busp_clk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_BUSP_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_OSCCLK_CLK, "gout_peric1_oscclk_clk",
+ "oscclk",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_OSCCLK_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI06_USI_CLK, "gout_peric1_usi06_usi_clk",
+ "dout_peric1_usi06_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI06_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI07_USI_CLK, "gout_peric1_usi07_usi_clk",
+ "dout_peric1_usi07_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI07_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI08_USI_CLK, "gout_peric1_usi08_usi_clk",
+ "dout_peric1_usi08_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI08_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI09_USI_CLK, "gout_peric1_usi09_usi_clk",
+ "dout_peric1_usi09_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI09_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI10_USI_CLK, "gout_peric1_usi10_usi_clk",
+ "dout_peric1_usi10_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI10_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI11_USI_CLK, "gout_peric1_usi11_usi_clk",
+ "dout_peric1_usi11_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI11_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI16_USI_CLK, "gout_peric1_usi16_usi_clk",
+ "dout_peric1_usi16_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI16_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI17_USI_CLK, "gout_peric1_usi17_usi_clk",
+ "dout_peric1_usi17_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI17_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI_I2C_CLK, "gout_peric1_usi_i2c_clk",
+ "dout_peric1_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI_I2C_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_SYSREG_PCLK, "gout_peric1_sysreg_pclk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SYSREG_PERIC1_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI16_I3C_PCLK, "gout_peric1_usi16_i3c_pclk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI16_I3C_IPCLKPORT_I_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI16_I3C_SCLK, "gout_peric1_usi16_i3c_sclk",
+ "dout_peric1_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI16_I3C_IPCLKPORT_I_SCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI17_I3C_PCLK, "gout_peric1_usi17_i3c_pclk",
+ "dout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI17_I3C_IPCLKPORT_I_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI17_I3C_SCLK, "gout_peric1_usi17_i3c_sclk",
+ "dout_peric1_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI17_I3C_IPCLKPORT_I_SCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_XIU_P_ACLK, "gout_peric1_xiu_p_aclk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_XIU_P_PERIC1_IPCLKPORT_ACLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+};
+
+static const struct samsung_cmu_info peric1_cmu_info __initconst = {
+ .mux_clks = peric1_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(peric1_mux_clks),
+ .div_clks = peric1_div_clks,
+ .nr_div_clks = ARRAY_SIZE(peric1_div_clks),
+ .gate_clks = peric1_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(peric1_gate_clks),
+ .nr_clk_ids = CLKS_NR_PERIC1,
+ .clk_regs = peric1_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(peric1_clk_regs),
+ .clk_name = "bus",
+};
+
+/* ---- CMU_PERIS ----------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_PERIS (0x10020000) */
+#define PLL_CON0_MUX_CLKCMU_PERIS_BUS_USER 0x0600
+#define PLL_CON1_MUX_CLKCMU_PERIS_BUS_USER 0x0604
+#define CLK_CON_MUX_MUX_CLK_PERIS_GIC 0x1000
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_SYSREG_PERIS_IPCLKPORT_PCLK 0x203c
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_WDT_CLUSTER2_IPCLKPORT_PCLK 0x204c
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_WDT_CLUSTER0_IPCLKPORT_PCLK 0x2048
+#define CLK_CON_GAT_CLK_BLK_PERIS_UID_PERIS_CMU_PERIS_IPCLKPORT_PCLK 0x200c
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_RSTNSYNC_CLK_PERIS_BUSP_IPCLKPORT_CLK 0x2034
+#define CLK_CON_GAT_CLK_BLK_PERIS_UID_RSTNSYNC_CLK_PERIS_OSCCLK_IPCLKPORT_CLK 0x2010
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_RSTNSYNC_CLK_PERIS_GIC_IPCLKPORT_CLK 0x2038
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_AD_AXI_P_PERIS_IPCLKPORT_ACLKM 0x2014
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_OTP_CON_BIRA_IPCLKPORT_PCLK 0x2028
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_GIC_IPCLKPORT_CLK 0x201c
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_LHM_AXI_P_PERIS_IPCLKPORT_I_CLK 0x2020
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_MCT_IPCLKPORT_PCLK 0x2024
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_OTP_CON_TOP_IPCLKPORT_PCLK 0x2030
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_D_TZPC_PERIS_IPCLKPORT_PCLK 0x2018
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_TMU_SUB_IPCLKPORT_PCLK 0x2040
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_TMU_TOP_IPCLKPORT_PCLK 0x2044
+#define CLK_CON_GAT_CLK_BLK_PERIS_UID_OTP_CON_BIRA_IPCLKPORT_I_OSCCLK 0x2000
+#define CLK_CON_GAT_CLK_BLK_PERIS_UID_OTP_CON_TOP_IPCLKPORT_I_OSCCLK 0x2008
+#define QCH_CON_D_TZPC_PERIS_QCH 0x3004
+#define QCH_CON_GIC_QCH 0x3008
+#define QCH_CON_LHM_AXI_P_PERIS_QCH 0x300c
+#define QCH_CON_MCT_QCH 0x3010
+#define QCH_CON_OTP_CON_BIRA_QCH 0x3014
+#define QCH_CON_OTP_CON_TOP_QCH 0x301c
+#define QCH_CON_PERIS_CMU_PERIS_QCH 0x3020
+#define QCH_CON_SYSREG_PERIS_QCH 0x3024
+#define QCH_CON_TMU_SUB_QCH 0x3028
+#define QCH_CON_TMU_TOP_QCH 0x302c
+#define QCH_CON_WDT_CLUSTER0_QCH 0x3030
+#define QCH_CON_WDT_CLUSTER2_QCH 0x3034
+
+static const unsigned long peris_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_PERIS_BUS_USER,
+ PLL_CON1_MUX_CLKCMU_PERIS_BUS_USER,
+ CLK_CON_MUX_MUX_CLK_PERIS_GIC,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_SYSREG_PERIS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_WDT_CLUSTER2_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_WDT_CLUSTER0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_PERIS_CMU_PERIS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_RSTNSYNC_CLK_PERIS_BUSP_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_RSTNSYNC_CLK_PERIS_OSCCLK_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_RSTNSYNC_CLK_PERIS_GIC_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_AD_AXI_P_PERIS_IPCLKPORT_ACLKM,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_OTP_CON_BIRA_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_GIC_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_LHM_AXI_P_PERIS_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_MCT_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_OTP_CON_TOP_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_D_TZPC_PERIS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TMU_SUB_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TMU_TOP_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_OTP_CON_BIRA_IPCLKPORT_I_OSCCLK,
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_OTP_CON_TOP_IPCLKPORT_I_OSCCLK,
+ QCH_CON_D_TZPC_PERIS_QCH,
+ QCH_CON_GIC_QCH,
+ QCH_CON_LHM_AXI_P_PERIS_QCH,
+ QCH_CON_MCT_QCH,
+ QCH_CON_OTP_CON_BIRA_QCH,
+ QCH_CON_OTP_CON_TOP_QCH,
+ QCH_CON_PERIS_CMU_PERIS_QCH,
+ QCH_CON_SYSREG_PERIS_QCH,
+ QCH_CON_TMU_SUB_QCH,
+ QCH_CON_TMU_TOP_QCH,
+ QCH_CON_WDT_CLUSTER0_QCH,
+ QCH_CON_WDT_CLUSTER2_QCH,
+};
+
+/* Parent clock list for CMU_PERIS muxes */
+PNAME(mout_peris_bus_user_p) = { "oscclk", "mout_cmu_peris_bus" };
+PNAME(mout_peris_clk_peris_gic_p) = { "oscclk", "mout_peris_bus_user" };
+
+static const struct samsung_mux_clock peris_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_PERIS_BUS_USER, "mout_peris_bus_user",
+ mout_peris_bus_user_p, PLL_CON0_MUX_CLKCMU_PERIS_BUS_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIS_CLK_PERIS_GIC, "mout_peris_clk_peris_gic",
+ mout_peris_clk_peris_gic_p, CLK_CON_MUX_MUX_CLK_PERIS_GIC,
+ 4, 1),
+};
+
+static const struct samsung_gate_clock peris_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_PERIS_SYSREG_PERIS_PCLK,
+ "gout_peris_sysreg_peris_pclk", "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_SYSREG_PERIS_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_WDT_CLUSTER2_PCLK,
+ "gout_peris_wdt_cluster2_pclk", "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_WDT_CLUSTER2_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_WDT_CLUSTER0_PCLK,
+ "gout_peris_wdt_cluster0_pclk", "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_WDT_CLUSTER0_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_CLK_PERIS_PERIS_CMU_PERIS_PCLK,
+ "clk_peris_peris_cmu_peris_pclk", "mout_peris_bus_user",
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_PERIS_CMU_PERIS_IPCLKPORT_PCLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_PERIS_CLK_PERIS_BUSP_CLK,
+ "gout_peris_clk_peris_busp_clk", "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_RSTNSYNC_CLK_PERIS_BUSP_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_CLK_PERIS_OSCCLK_CLK,
+ "gout_peris_clk_peris_oscclk_clk", "mout_peris_bus_user",
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_RSTNSYNC_CLK_PERIS_OSCCLK_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_CLK_PERIS_GIC_CLK,
+ "gout_peris_clk_peris_gic_clk", "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_RSTNSYNC_CLK_PERIS_GIC_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_AD_AXI_P_PERIS_ACLKM,
+ "gout_peris_ad_axi_p_peris_aclkm", "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_AD_AXI_P_PERIS_IPCLKPORT_ACLKM,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_PERIS_OTP_CON_BIRA_PCLK,
+ "gout_peris_otp_con_bira_pclk", "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_OTP_CON_BIRA_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_GIC_CLK,
+ "gout_peris_gic_clk", "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_GIC_IPCLKPORT_CLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERIS_LHM_AXI_P_PERIS_CLK,
+ "gout_peris_lhm_axi_p_peris_clk", "oscclk",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_LHM_AXI_P_PERIS_IPCLKPORT_I_CLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_PERIS_MCT_PCLK,
+ "gout_peris_mct_pclk", "mout_peris_clk_peris_gic",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_MCT_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_OTP_CON_TOP_PCLK,
+ "gout_peris_otp_con_top_pclk", "mout_peris_clk_peris_gic",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_OTP_CON_TOP_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_D_TZPC_PERIS_PCLK,
+ "gout_peris_d_tzpc_peris_pclk", "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_D_TZPC_PERIS_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_TMU_TOP_PCLK,
+ "gout_peris_tmu_top_pclk", "mout_peris_clk_peris_gic",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TMU_TOP_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_OTP_CON_BIRA_OSCCLK,
+ "gout_peris_otp_con_bira_oscclk", "oscclk",
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_OTP_CON_BIRA_IPCLKPORT_I_OSCCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_OTP_CON_TOP_OSCCLK,
+ "gout_peris_otp_con_top_oscclk", "oscclk",
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_OTP_CON_TOP_IPCLKPORT_I_OSCCLK,
+ 21, 0, 0),
+};
+
+static const struct samsung_cmu_info peris_cmu_info __initconst = {
+ .mux_clks = peris_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(peris_mux_clks),
+ .gate_clks = peris_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(peris_gate_clks),
+ .nr_clk_ids = CLKS_NR_PERIS,
+ .clk_regs = peris_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(peris_clk_regs),
+};
+
+static void __init exynos990_cmu_peris_init(struct device_node *np)
+{
+ exynos_arm64_register_cmu(NULL, np, &peris_cmu_info);
+}
+
+/* Register CMU_PERIS early, as it's a dependency for the MCT. */
+CLK_OF_DECLARE(exynos990_cmu_peris, "samsung,exynos990-cmu-peris",
+ exynos990_cmu_peris_init);
+
+/* ----- platform_driver ----- */
+
+static int __init exynos990_cmu_probe(struct platform_device *pdev)
+{
+ const struct samsung_cmu_info *info;
+ struct device *dev = &pdev->dev;
+
+ info = of_device_get_match_data(dev);
+ exynos_arm64_register_cmu(dev, dev->of_node, info);
+
+ return 0;
+}
+
+static const struct of_device_id exynos990_cmu_of_match[] = {
+ {
+ .compatible = "samsung,exynos990-cmu-hsi0",
+ .data = &hsi0_cmu_info,
+ }, {
+ .compatible = "samsung,exynos990-cmu-peric0",
+ .data = &peric0_cmu_info,
+ }, {
+ .compatible = "samsung,exynos990-cmu-peric1",
+ .data = &peric1_cmu_info,
+ },
+ { },
+};
+
+static struct platform_driver exynos990_cmu_driver __refdata = {
+ .driver = {
+ .name = "exynos990-cmu",
+ .of_match_table = exynos990_cmu_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = exynos990_cmu_probe,
+};
+
+static int __init exynos990_cmu_init(void)
+{
+ return platform_driver_register(&exynos990_cmu_driver);
+}
+
+core_initcall(exynos990_cmu_init);
diff --git a/drivers/clk/samsung/clk-exynosautov9.c b/drivers/clk/samsung/clk-exynosautov9.c
index 5971e680e566..e4d7c7b96aa8 100644
--- a/drivers/clk/samsung/clk-exynosautov9.c
+++ b/drivers/clk/samsung/clk-exynosautov9.c
@@ -6,8 +6,8 @@
* Common Clock Framework support for ExynosAuto V9 SoC.
*/
-#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
#include <linux/of.h>
#include <linux/platform_device.h>
diff --git a/drivers/clk/samsung/clk-exynosautov920.c b/drivers/clk/samsung/clk-exynosautov920.c
index f60f0a0c598d..572b6ace14ac 100644
--- a/drivers/clk/samsung/clk-exynosautov920.c
+++ b/drivers/clk/samsung/clk-exynosautov920.c
@@ -6,8 +6,8 @@
* Common Clock Framework support for ExynosAuto v920 SoC.
*/
-#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -18,7 +18,15 @@
/* NOTE: Must be equal to the last clock ID increased by one */
#define CLKS_NR_TOP (DOUT_CLKCMU_TAA_NOC + 1)
+#define CLKS_NR_CPUCL0 (CLK_DOUT_CPUCL0_NOCP + 1)
+#define CLKS_NR_CPUCL1 (CLK_DOUT_CPUCL1_NOCP + 1)
+#define CLKS_NR_CPUCL2 (CLK_DOUT_CPUCL2_NOCP + 1)
#define CLKS_NR_PERIC0 (CLK_DOUT_PERIC0_I3C + 1)
+#define CLKS_NR_PERIC1 (CLK_DOUT_PERIC1_I3C + 1)
+#define CLKS_NR_MISC (CLK_DOUT_MISC_OSC_DIV2 + 1)
+#define CLKS_NR_HSI0 (CLK_DOUT_HSI0_PCIE_APB + 1)
+#define CLKS_NR_HSI1 (CLK_MOUT_HSI1_USBDRD + 1)
+#define CLKS_NR_HSI2 (CLK_DOUT_HSI2_ETHERNET_PTP + 1)
/* ---- CMU_TOP ------------------------------------------------------------ */
@@ -974,6 +982,8 @@ static const struct samsung_fixed_factor_clock top_fixed_factor_clks[] __initcon
"mout_shared5_pll", 1, 3, 0),
FFACTOR(DOUT_SHARED5_DIV4, "dout_shared5_div4",
"mout_shared5_pll", 1, 4, 0),
+ FFACTOR(DOUT_TCXO_DIV2, "dout_tcxo_div2",
+ "oscclk", 1, 2, 0),
};
static const struct samsung_cmu_info top_cmu_info __initconst = {
@@ -999,6 +1009,339 @@ static void __init exynosautov920_cmu_top_init(struct device_node *np)
CLK_OF_DECLARE(exynosautov920_cmu_top, "samsung,exynosautov920-cmu-top",
exynosautov920_cmu_top_init);
+/* ---- CMU_CPUCL0 --------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_CPUCL0 (0x1EC00000) */
+#define PLL_LOCKTIME_PLL_CPUCL0 0x0000
+#define PLL_CON0_PLL_CPUCL0 0x0100
+#define PLL_CON1_PLL_CPUCL0 0x0104
+#define PLL_CON3_PLL_CPUCL0 0x010c
+#define PLL_CON0_MUX_CLKCMU_CPUCL0_CLUSTER_USER 0x0600
+#define PLL_CON0_MUX_CLKCMU_CPUCL0_DBG_USER 0x0610
+#define PLL_CON0_MUX_CLKCMU_CPUCL0_SWITCH_USER 0x0620
+
+#define CLK_CON_MUX_MUX_CLK_CPUCL0_CLUSTER 0x1000
+#define CLK_CON_MUX_MUX_CLK_CPUCL0_CORE 0x1004
+
+#define CLK_CON_DIV_DIV_CLK_CLUSTER0_ACLK 0x1800
+#define CLK_CON_DIV_DIV_CLK_CLUSTER0_ATCLK 0x1804
+#define CLK_CON_DIV_DIV_CLK_CLUSTER0_MPCLK 0x1808
+#define CLK_CON_DIV_DIV_CLK_CLUSTER0_PCLK 0x180c
+#define CLK_CON_DIV_DIV_CLK_CLUSTER0_PERIPHCLK 0x1810
+#define CLK_CON_DIV_DIV_CLK_CPUCL0_DBG_NOC 0x181c
+#define CLK_CON_DIV_DIV_CLK_CPUCL0_DBG_PCLKDBG 0x1820
+#define CLK_CON_DIV_DIV_CLK_CPUCL0_NOCP 0x1824
+
+static const unsigned long cpucl0_clk_regs[] __initconst = {
+ PLL_LOCKTIME_PLL_CPUCL0,
+ PLL_CON0_PLL_CPUCL0,
+ PLL_CON1_PLL_CPUCL0,
+ PLL_CON3_PLL_CPUCL0,
+ PLL_CON0_MUX_CLKCMU_CPUCL0_CLUSTER_USER,
+ PLL_CON0_MUX_CLKCMU_CPUCL0_DBG_USER,
+ PLL_CON0_MUX_CLKCMU_CPUCL0_SWITCH_USER,
+ CLK_CON_MUX_MUX_CLK_CPUCL0_CLUSTER,
+ CLK_CON_MUX_MUX_CLK_CPUCL0_CORE,
+ CLK_CON_DIV_DIV_CLK_CLUSTER0_ACLK,
+ CLK_CON_DIV_DIV_CLK_CLUSTER0_ATCLK,
+ CLK_CON_DIV_DIV_CLK_CLUSTER0_MPCLK,
+ CLK_CON_DIV_DIV_CLK_CLUSTER0_PCLK,
+ CLK_CON_DIV_DIV_CLK_CLUSTER0_PERIPHCLK,
+ CLK_CON_DIV_DIV_CLK_CPUCL0_DBG_NOC,
+ CLK_CON_DIV_DIV_CLK_CPUCL0_DBG_PCLKDBG,
+ CLK_CON_DIV_DIV_CLK_CPUCL0_NOCP,
+};
+
+/* List of parent clocks for Muxes in CMU_CPUCL0 */
+PNAME(mout_pll_cpucl0_p) = { "oscclk", "fout_cpucl0_pll" };
+PNAME(mout_cpucl0_cluster_user_p) = { "oscclk", "dout_clkcmu_cpucl0_cluster" };
+PNAME(mout_cpucl0_dbg_user_p) = { "oscclk", "dout_clkcmu_cpucl0_dbg" };
+PNAME(mout_cpucl0_switch_user_p) = { "oscclk", "dout_clkcmu_cpucl0_switch" };
+PNAME(mout_cpucl0_cluster_p) = { "oscclk", "mout_cpucl0_cluster_user",
+ "mout_cpucl0_switch_user"};
+PNAME(mout_cpucl0_core_p) = { "oscclk", "mout_pll_cpucl0",
+ "mout_cpucl0_switch_user"};
+
+static const struct samsung_pll_rate_table cpu_pll_rates[] __initconst = {
+ PLL_35XX_RATE(38400000U, 2400000000U, 250, 4, 0),
+ PLL_35XX_RATE(38400000U, 2304000000U, 240, 4, 0),
+ PLL_35XX_RATE(38400000U, 2208000000U, 230, 4, 0),
+ PLL_35XX_RATE(38400000U, 2112000000U, 220, 4, 0),
+ PLL_35XX_RATE(38400000U, 2016000000U, 210, 4, 0),
+ PLL_35XX_RATE(38400000U, 1824000000U, 190, 4, 0),
+ PLL_35XX_RATE(38400000U, 1680000000U, 175, 4, 0),
+ PLL_35XX_RATE(38400000U, 1344000000U, 140, 4, 0),
+ PLL_35XX_RATE(38400000U, 1152000000U, 120, 4, 0),
+ PLL_35XX_RATE(38400000U, 576000000U, 120, 4, 1),
+ PLL_35XX_RATE(38400000U, 288000000U, 120, 4, 2),
+};
+
+static const struct samsung_pll_clock cpucl0_pll_clks[] __initconst = {
+ /* CMU_CPUCL0_PURECLKCOMP */
+ PLL(pll_531x, CLK_FOUT_CPUCL0_PLL, "fout_cpucl0_pll", "oscclk",
+ PLL_LOCKTIME_PLL_CPUCL0, PLL_CON3_PLL_CPUCL0, cpu_pll_rates),
+};
+
+static const struct samsung_mux_clock cpucl0_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_PLL_CPUCL0, "mout_pll_cpucl0", mout_pll_cpucl0_p,
+ PLL_CON0_PLL_CPUCL0, 4, 1),
+ MUX(CLK_MOUT_CPUCL0_CLUSTER_USER, "mout_cpucl0_cluster_user", mout_cpucl0_cluster_user_p,
+ PLL_CON0_MUX_CLKCMU_CPUCL0_CLUSTER_USER, 4, 1),
+ MUX(CLK_MOUT_CPUCL0_DBG_USER, "mout_cpucl0_dbg_user", mout_cpucl0_dbg_user_p,
+ PLL_CON0_MUX_CLKCMU_CPUCL0_DBG_USER, 4, 1),
+ MUX(CLK_MOUT_CPUCL0_SWITCH_USER, "mout_cpucl0_switch_user", mout_cpucl0_switch_user_p,
+ PLL_CON0_MUX_CLKCMU_CPUCL0_SWITCH_USER, 4, 1),
+ MUX(CLK_MOUT_CPUCL0_CLUSTER, "mout_cpucl0_cluster", mout_cpucl0_cluster_p,
+ CLK_CON_MUX_MUX_CLK_CPUCL0_CLUSTER, 0, 2),
+ MUX(CLK_MOUT_CPUCL0_CORE, "mout_cpucl0_core", mout_cpucl0_core_p,
+ CLK_CON_MUX_MUX_CLK_CPUCL0_CORE, 0, 2),
+};
+
+static const struct samsung_div_clock cpucl0_div_clks[] __initconst = {
+ DIV(CLK_DOUT_CLUSTER0_ACLK, "dout_cluster0_aclk",
+ "mout_cpucl0_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER0_ACLK, 0, 4),
+ DIV(CLK_DOUT_CLUSTER0_ATCLK, "dout_cluster0_atclk",
+ "mout_cpucl0_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER0_ATCLK, 0, 4),
+ DIV(CLK_DOUT_CLUSTER0_MPCLK, "dout_cluster0_mpclk",
+ "mout_cpucl0_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER0_MPCLK, 0, 4),
+ DIV(CLK_DOUT_CLUSTER0_PCLK, "dout_cluster0_pclk",
+ "mout_cpucl0_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER0_PCLK, 0, 4),
+ DIV(CLK_DOUT_CLUSTER0_PERIPHCLK, "dout_cluster0_periphclk",
+ "mout_cpucl0_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER0_PERIPHCLK, 0, 4),
+ DIV(CLK_DOUT_CPUCL0_DBG_NOC, "dout_cpucl0_dbg_noc",
+ "mout_cpucl0_dbg_user", CLK_CON_DIV_DIV_CLK_CPUCL0_DBG_NOC, 0, 3),
+ DIV(CLK_DOUT_CPUCL0_DBG_PCLKDBG, "dout_cpucl0_dbg_pclkdbg",
+ "mout_cpucl0_dbg_user", CLK_CON_DIV_DIV_CLK_CPUCL0_DBG_PCLKDBG, 0, 3),
+ DIV(CLK_DOUT_CPUCL0_NOCP, "dout_cpucl0_nocp",
+ "mout_cpucl0_cluster", CLK_CON_DIV_DIV_CLK_CPUCL0_NOCP, 0, 4),
+};
+
+static const struct samsung_cmu_info cpucl0_cmu_info __initconst = {
+ .pll_clks = cpucl0_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(cpucl0_pll_clks),
+ .mux_clks = cpucl0_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(cpucl0_mux_clks),
+ .div_clks = cpucl0_div_clks,
+ .nr_div_clks = ARRAY_SIZE(cpucl0_div_clks),
+ .nr_clk_ids = CLKS_NR_CPUCL0,
+ .clk_regs = cpucl0_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(cpucl0_clk_regs),
+ .clk_name = "cpucl0",
+};
+
+static void __init exynosautov920_cmu_cpucl0_init(struct device_node *np)
+{
+ exynos_arm64_register_cmu(NULL, np, &cpucl0_cmu_info);
+}
+
+/* Register CMU_CPUCL0 early, as CPU clocks should be available ASAP */
+CLK_OF_DECLARE(exynosautov920_cmu_cpucl0, "samsung,exynosautov920-cmu-cpucl0",
+ exynosautov920_cmu_cpucl0_init);
+
+/* ---- CMU_CPUCL1 --------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_CPUCL1 (0x1ED00000) */
+#define PLL_LOCKTIME_PLL_CPUCL1 0x0000
+#define PLL_CON0_PLL_CPUCL1 0x0100
+#define PLL_CON1_PLL_CPUCL1 0x0104
+#define PLL_CON3_PLL_CPUCL1 0x010c
+#define PLL_CON0_MUX_CLKCMU_CPUCL1_CLUSTER_USER 0x0600
+#define PLL_CON0_MUX_CLKCMU_CPUCL1_SWITCH_USER 0x0610
+
+#define CLK_CON_MUX_MUX_CLK_CPUCL1_CLUSTER 0x1000
+#define CLK_CON_MUX_MUX_CLK_CPUCL1_CORE 0x1004
+
+#define CLK_CON_DIV_DIV_CLK_CLUSTER1_ACLK 0x1800
+#define CLK_CON_DIV_DIV_CLK_CLUSTER1_ATCLK 0x1804
+#define CLK_CON_DIV_DIV_CLK_CLUSTER1_MPCLK 0x1808
+#define CLK_CON_DIV_DIV_CLK_CLUSTER1_PCLK 0x180c
+#define CLK_CON_DIV_DIV_CLK_CLUSTER1_PERIPHCLK 0x1810
+#define CLK_CON_DIV_DIV_CLK_CPUCL1_NOCP 0x181c
+
+static const unsigned long cpucl1_clk_regs[] __initconst = {
+ PLL_LOCKTIME_PLL_CPUCL1,
+ PLL_CON0_PLL_CPUCL1,
+ PLL_CON1_PLL_CPUCL1,
+ PLL_CON3_PLL_CPUCL1,
+ PLL_CON0_MUX_CLKCMU_CPUCL1_CLUSTER_USER,
+ PLL_CON0_MUX_CLKCMU_CPUCL1_SWITCH_USER,
+ CLK_CON_MUX_MUX_CLK_CPUCL1_CLUSTER,
+ CLK_CON_MUX_MUX_CLK_CPUCL1_CORE,
+ CLK_CON_DIV_DIV_CLK_CLUSTER1_ACLK,
+ CLK_CON_DIV_DIV_CLK_CLUSTER1_ATCLK,
+ CLK_CON_DIV_DIV_CLK_CLUSTER1_MPCLK,
+ CLK_CON_DIV_DIV_CLK_CLUSTER1_PCLK,
+ CLK_CON_DIV_DIV_CLK_CLUSTER1_PERIPHCLK,
+ CLK_CON_DIV_DIV_CLK_CPUCL1_NOCP,
+};
+
+/* List of parent clocks for Muxes in CMU_CPUCL1 */
+PNAME(mout_pll_cpucl1_p) = { "oscclk", "fout_cpucl1_pll" };
+PNAME(mout_cpucl1_cluster_user_p) = { "oscclk", "dout_clkcmu_cpucl1_cluster" };
+PNAME(mout_cpucl1_switch_user_p) = { "oscclk", "dout_clkcmu_cpucl1_switch" };
+PNAME(mout_cpucl1_cluster_p) = { "oscclk", "mout_cpucl1_cluster_user",
+ "mout_cpucl1_switch_user"};
+PNAME(mout_cpucl1_core_p) = { "oscclk", "mout_pll_cpucl1",
+ "mout_cpucl1_switch_user"};
+
+static const struct samsung_pll_clock cpucl1_pll_clks[] __initconst = {
+ /* CMU_CPUCL1_PURECLKCOMP */
+ PLL(pll_531x, CLK_FOUT_CPUCL1_PLL, "fout_cpucl1_pll", "oscclk",
+ PLL_LOCKTIME_PLL_CPUCL1, PLL_CON3_PLL_CPUCL1, cpu_pll_rates),
+};
+
+static const struct samsung_mux_clock cpucl1_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_PLL_CPUCL1, "mout_pll_cpucl1", mout_pll_cpucl1_p,
+ PLL_CON0_PLL_CPUCL1, 4, 1),
+ MUX(CLK_MOUT_CPUCL1_CLUSTER_USER, "mout_cpucl1_cluster_user", mout_cpucl1_cluster_user_p,
+ PLL_CON0_MUX_CLKCMU_CPUCL1_CLUSTER_USER, 4, 1),
+ MUX(CLK_MOUT_CPUCL1_SWITCH_USER, "mout_cpucl1_switch_user", mout_cpucl1_switch_user_p,
+ PLL_CON0_MUX_CLKCMU_CPUCL1_SWITCH_USER, 4, 1),
+ MUX(CLK_MOUT_CPUCL1_CLUSTER, "mout_cpucl1_cluster", mout_cpucl1_cluster_p,
+ CLK_CON_MUX_MUX_CLK_CPUCL1_CLUSTER, 0, 2),
+ MUX(CLK_MOUT_CPUCL1_CORE, "mout_cpucl1_core", mout_cpucl1_core_p,
+ CLK_CON_MUX_MUX_CLK_CPUCL1_CORE, 0, 2),
+};
+
+static const struct samsung_div_clock cpucl1_div_clks[] __initconst = {
+ DIV(CLK_DOUT_CLUSTER1_ACLK, "dout_cluster1_aclk",
+ "mout_cpucl1_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER1_ACLK, 0, 4),
+ DIV(CLK_DOUT_CLUSTER1_ATCLK, "dout_cluster1_atclk",
+ "mout_cpucl1_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER1_ATCLK, 0, 4),
+ DIV(CLK_DOUT_CLUSTER1_MPCLK, "dout_cluster1_mpclk",
+ "mout_cpucl1_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER1_MPCLK, 0, 4),
+ DIV(CLK_DOUT_CLUSTER1_PCLK, "dout_cluster1_pclk",
+ "mout_cpucl1_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER1_PCLK, 0, 4),
+ DIV(CLK_DOUT_CLUSTER1_PERIPHCLK, "dout_cluster1_periphclk",
+ "mout_cpucl1_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER1_PERIPHCLK, 0, 4),
+ DIV(CLK_DOUT_CPUCL1_NOCP, "dout_cpucl1_nocp",
+ "mout_cpucl1_cluster", CLK_CON_DIV_DIV_CLK_CPUCL1_NOCP, 0, 4),
+};
+
+static const struct samsung_cmu_info cpucl1_cmu_info __initconst = {
+ .pll_clks = cpucl1_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(cpucl1_pll_clks),
+ .mux_clks = cpucl1_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(cpucl1_mux_clks),
+ .div_clks = cpucl1_div_clks,
+ .nr_div_clks = ARRAY_SIZE(cpucl1_div_clks),
+ .nr_clk_ids = CLKS_NR_CPUCL1,
+ .clk_regs = cpucl1_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(cpucl1_clk_regs),
+ .clk_name = "cpucl1",
+};
+
+static void __init exynosautov920_cmu_cpucl1_init(struct device_node *np)
+{
+ exynos_arm64_register_cmu(NULL, np, &cpucl1_cmu_info);
+}
+
+/* Register CMU_CPUCL1 early, as CPU clocks should be available ASAP */
+CLK_OF_DECLARE(exynosautov920_cmu_cpucl1, "samsung,exynosautov920-cmu-cpucl1",
+ exynosautov920_cmu_cpucl1_init);
+
+/* ---- CMU_CPUCL2 --------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_CPUCL2 (0x1EE00000) */
+#define PLL_LOCKTIME_PLL_CPUCL2 0x0000
+#define PLL_CON0_PLL_CPUCL2 0x0100
+#define PLL_CON1_PLL_CPUCL2 0x0104
+#define PLL_CON3_PLL_CPUCL2 0x010c
+#define PLL_CON0_MUX_CLKCMU_CPUCL2_CLUSTER_USER 0x0600
+#define PLL_CON0_MUX_CLKCMU_CPUCL2_SWITCH_USER 0x0610
+
+#define CLK_CON_MUX_MUX_CLK_CPUCL2_CLUSTER 0x1000
+#define CLK_CON_MUX_MUX_CLK_CPUCL2_CORE 0x1004
+
+#define CLK_CON_DIV_DIV_CLK_CLUSTER2_ACLK 0x1800
+#define CLK_CON_DIV_DIV_CLK_CLUSTER2_ATCLK 0x1804
+#define CLK_CON_DIV_DIV_CLK_CLUSTER2_MPCLK 0x1808
+#define CLK_CON_DIV_DIV_CLK_CLUSTER2_PCLK 0x180c
+#define CLK_CON_DIV_DIV_CLK_CLUSTER2_PERIPHCLK 0x1810
+#define CLK_CON_DIV_DIV_CLK_CPUCL2_NOCP 0x181c
+
+static const unsigned long cpucl2_clk_regs[] __initconst = {
+ PLL_LOCKTIME_PLL_CPUCL2,
+ PLL_CON0_PLL_CPUCL2,
+ PLL_CON1_PLL_CPUCL2,
+ PLL_CON3_PLL_CPUCL2,
+ PLL_CON0_MUX_CLKCMU_CPUCL2_CLUSTER_USER,
+ PLL_CON0_MUX_CLKCMU_CPUCL2_SWITCH_USER,
+ CLK_CON_MUX_MUX_CLK_CPUCL2_CLUSTER,
+ CLK_CON_MUX_MUX_CLK_CPUCL2_CORE,
+ CLK_CON_DIV_DIV_CLK_CLUSTER2_ACLK,
+ CLK_CON_DIV_DIV_CLK_CLUSTER2_ATCLK,
+ CLK_CON_DIV_DIV_CLK_CLUSTER2_MPCLK,
+ CLK_CON_DIV_DIV_CLK_CLUSTER2_PCLK,
+ CLK_CON_DIV_DIV_CLK_CLUSTER2_PERIPHCLK,
+ CLK_CON_DIV_DIV_CLK_CPUCL2_NOCP,
+};
+
+/* List of parent clocks for Muxes in CMU_CPUCL2 */
+PNAME(mout_pll_cpucl2_p) = { "oscclk", "fout_cpucl2_pll" };
+PNAME(mout_cpucl2_cluster_user_p) = { "oscclk", "dout_clkcmu_cpucl2_cluster" };
+PNAME(mout_cpucl2_switch_user_p) = { "oscclk", "dout_clkcmu_cpucl2_switch" };
+PNAME(mout_cpucl2_cluster_p) = { "oscclk", "mout_cpucl2_cluster_user",
+ "mout_cpucl2_switch_user"};
+PNAME(mout_cpucl2_core_p) = { "oscclk", "mout_pll_cpucl2",
+ "mout_cpucl2_switch_user"};
+
+static const struct samsung_pll_clock cpucl2_pll_clks[] __initconst = {
+ /* CMU_CPUCL2_PURECLKCOMP */
+ PLL(pll_531x, CLK_FOUT_CPUCL2_PLL, "fout_cpucl2_pll", "oscclk",
+ PLL_LOCKTIME_PLL_CPUCL2, PLL_CON3_PLL_CPUCL2, cpu_pll_rates),
+};
+
+static const struct samsung_mux_clock cpucl2_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_PLL_CPUCL2, "mout_pll_cpucl2", mout_pll_cpucl2_p,
+ PLL_CON0_PLL_CPUCL2, 4, 1),
+ MUX(CLK_MOUT_CPUCL2_CLUSTER_USER, "mout_cpucl2_cluster_user", mout_cpucl2_cluster_user_p,
+ PLL_CON0_MUX_CLKCMU_CPUCL2_CLUSTER_USER, 4, 1),
+ MUX(CLK_MOUT_CPUCL2_SWITCH_USER, "mout_cpucl2_switch_user", mout_cpucl2_switch_user_p,
+ PLL_CON0_MUX_CLKCMU_CPUCL2_SWITCH_USER, 4, 1),
+ MUX(CLK_MOUT_CPUCL2_CLUSTER, "mout_cpucl2_cluster", mout_cpucl2_cluster_p,
+ CLK_CON_MUX_MUX_CLK_CPUCL2_CLUSTER, 0, 2),
+ MUX(CLK_MOUT_CPUCL2_CORE, "mout_cpucl2_core", mout_cpucl2_core_p,
+ CLK_CON_MUX_MUX_CLK_CPUCL2_CORE, 0, 2),
+};
+
+static const struct samsung_div_clock cpucl2_div_clks[] __initconst = {
+ DIV(CLK_DOUT_CLUSTER2_ACLK, "dout_cluster2_aclk",
+ "mout_cpucl2_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER2_ACLK, 0, 4),
+ DIV(CLK_DOUT_CLUSTER2_ATCLK, "dout_cluster2_atclk",
+ "mout_cpucl2_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER2_ATCLK, 0, 4),
+ DIV(CLK_DOUT_CLUSTER2_MPCLK, "dout_cluster2_mpclk",
+ "mout_cpucl2_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER2_MPCLK, 0, 4),
+ DIV(CLK_DOUT_CLUSTER2_PCLK, "dout_cluster2_pclk",
+ "mout_cpucl2_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER2_PCLK, 0, 4),
+ DIV(CLK_DOUT_CLUSTER2_PERIPHCLK, "dout_cluster2_periphclk",
+ "mout_cpucl2_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER2_PERIPHCLK, 0, 4),
+ DIV(CLK_DOUT_CPUCL2_NOCP, "dout_cpucl2_nocp",
+ "mout_cpucl2_cluster", CLK_CON_DIV_DIV_CLK_CPUCL2_NOCP, 0, 4),
+};
+
+static const struct samsung_cmu_info cpucl2_cmu_info __initconst = {
+ .pll_clks = cpucl2_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(cpucl2_pll_clks),
+ .mux_clks = cpucl2_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(cpucl2_mux_clks),
+ .div_clks = cpucl2_div_clks,
+ .nr_div_clks = ARRAY_SIZE(cpucl2_div_clks),
+ .nr_clk_ids = CLKS_NR_CPUCL2,
+ .clk_regs = cpucl2_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(cpucl2_clk_regs),
+ .clk_name = "cpucl2",
+};
+
+static void __init exynosautov920_cmu_cpucl2_init(struct device_node *np)
+{
+ exynos_arm64_register_cmu(NULL, np, &cpucl2_cmu_info);
+}
+
+/* Register CMU_CPUCL2 early, as CPU clocks should be available ASAP */
+CLK_OF_DECLARE(exynosautov920_cmu_cpucl2, "samsung,exynosautov920-cmu-cpucl2",
+ exynosautov920_cmu_cpucl2_init);
+
/* ---- CMU_PERIC0 --------------------------------------------------------- */
/* Register Offset definitions for CMU_PERIC0 (0x10800000) */
@@ -1139,6 +1482,345 @@ static const struct samsung_cmu_info peric0_cmu_info __initconst = {
.clk_name = "noc",
};
+/* ---- CMU_PERIC1 --------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_PERIC1 (0x10C00000) */
+#define PLL_CON0_MUX_CLKCMU_PERIC1_IP_USER 0x600
+#define PLL_CON0_MUX_CLKCMU_PERIC1_NOC_USER 0x610
+#define CLK_CON_MUX_MUX_CLK_PERIC1_I3C 0x1000
+#define CLK_CON_MUX_MUX_CLK_PERIC1_USI09_USI 0x1004
+#define CLK_CON_MUX_MUX_CLK_PERIC1_USI10_USI 0x1008
+#define CLK_CON_MUX_MUX_CLK_PERIC1_USI11_USI 0x100c
+#define CLK_CON_MUX_MUX_CLK_PERIC1_USI12_USI 0x1010
+#define CLK_CON_MUX_MUX_CLK_PERIC1_USI13_USI 0x1014
+#define CLK_CON_MUX_MUX_CLK_PERIC1_USI14_USI 0x1018
+#define CLK_CON_MUX_MUX_CLK_PERIC1_USI15_USI 0x101c
+#define CLK_CON_MUX_MUX_CLK_PERIC1_USI16_USI 0x1020
+#define CLK_CON_MUX_MUX_CLK_PERIC1_USI17_USI 0x1024
+#define CLK_CON_MUX_MUX_CLK_PERIC1_USI_I2C 0x1028
+#define CLK_CON_DIV_DIV_CLK_PERIC1_I3C 0x1800
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI09_USI 0x1804
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI10_USI 0x1808
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI11_USI 0x180c
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI12_USI 0x1810
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI13_USI 0x1814
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI14_USI 0x1818
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI15_USI 0x181c
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI16_USI 0x1820
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI17_USI 0x1824
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI_I2C 0x1828
+
+static const unsigned long peric1_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_PERIC1_IP_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_NOC_USER,
+ CLK_CON_MUX_MUX_CLK_PERIC1_I3C,
+ CLK_CON_MUX_MUX_CLK_PERIC1_USI09_USI,
+ CLK_CON_MUX_MUX_CLK_PERIC1_USI10_USI,
+ CLK_CON_MUX_MUX_CLK_PERIC1_USI11_USI,
+ CLK_CON_MUX_MUX_CLK_PERIC1_USI12_USI,
+ CLK_CON_MUX_MUX_CLK_PERIC1_USI13_USI,
+ CLK_CON_MUX_MUX_CLK_PERIC1_USI14_USI,
+ CLK_CON_MUX_MUX_CLK_PERIC1_USI15_USI,
+ CLK_CON_MUX_MUX_CLK_PERIC1_USI16_USI,
+ CLK_CON_MUX_MUX_CLK_PERIC1_USI17_USI,
+ CLK_CON_MUX_MUX_CLK_PERIC1_USI_I2C,
+ CLK_CON_DIV_DIV_CLK_PERIC1_I3C,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI09_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI10_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI11_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI12_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI13_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI14_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI15_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI16_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI17_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI_I2C,
+};
+
+/* List of parent clocks for Muxes in CMU_PERIC1 */
+PNAME(mout_peric1_ip_user_p) = { "oscclk", "dout_clkcmu_peric1_ip" };
+PNAME(mout_peric1_noc_user_p) = { "oscclk", "dout_clkcmu_peric1_noc" };
+PNAME(mout_peric1_usi_p) = { "oscclk", "mout_peric1_ip_user" };
+
+static const struct samsung_mux_clock peric1_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_PERIC1_IP_USER, "mout_peric1_ip_user",
+ mout_peric1_ip_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_IP_USER, 4, 1),
+ MUX(CLK_MOUT_PERIC1_NOC_USER, "mout_peric1_noc_user",
+ mout_peric1_noc_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_NOC_USER, 4, 1),
+ /* USI09 ~ USI17 */
+ MUX(CLK_MOUT_PERIC1_USI09_USI, "mout_peric1_usi09_usi",
+ mout_peric1_usi_p, CLK_CON_MUX_MUX_CLK_PERIC1_USI09_USI, 0, 1),
+ MUX(CLK_MOUT_PERIC1_USI10_USI, "mout_peric1_usi10_usi",
+ mout_peric1_usi_p, CLK_CON_MUX_MUX_CLK_PERIC1_USI10_USI, 0, 1),
+ MUX(CLK_MOUT_PERIC1_USI11_USI, "mout_peric1_usi11_usi",
+ mout_peric1_usi_p, CLK_CON_MUX_MUX_CLK_PERIC1_USI11_USI, 0, 1),
+ MUX(CLK_MOUT_PERIC1_USI12_USI, "mout_peric1_usi12_usi",
+ mout_peric1_usi_p, CLK_CON_MUX_MUX_CLK_PERIC1_USI12_USI, 0, 1),
+ MUX(CLK_MOUT_PERIC1_USI13_USI, "mout_peric1_usi13_usi",
+ mout_peric1_usi_p, CLK_CON_MUX_MUX_CLK_PERIC1_USI13_USI, 0, 1),
+ MUX(CLK_MOUT_PERIC1_USI14_USI, "mout_peric1_usi14_usi",
+ mout_peric1_usi_p, CLK_CON_MUX_MUX_CLK_PERIC1_USI14_USI, 0, 1),
+ MUX(CLK_MOUT_PERIC1_USI15_USI, "mout_peric1_usi15_usi",
+ mout_peric1_usi_p, CLK_CON_MUX_MUX_CLK_PERIC1_USI15_USI, 0, 1),
+ MUX(CLK_MOUT_PERIC1_USI16_USI, "mout_peric1_usi16_usi",
+ mout_peric1_usi_p, CLK_CON_MUX_MUX_CLK_PERIC1_USI16_USI, 0, 1),
+ MUX(CLK_MOUT_PERIC1_USI17_USI, "mout_peric1_usi17_usi",
+ mout_peric1_usi_p, CLK_CON_MUX_MUX_CLK_PERIC1_USI17_USI, 0, 1),
+ /* USI_I2C */
+ MUX(CLK_MOUT_PERIC1_USI_I2C, "mout_peric1_usi_i2c",
+ mout_peric1_usi_p, CLK_CON_MUX_MUX_CLK_PERIC1_USI_I2C, 0, 1),
+ /* USI_I3C */
+ MUX(CLK_MOUT_PERIC1_I3C, "mout_peric1_i3c",
+ mout_peric1_usi_p, CLK_CON_MUX_MUX_CLK_PERIC1_I3C, 0, 1),
+};
+
+static const struct samsung_div_clock peric1_div_clks[] __initconst = {
+ /* USI09 ~ USI17 */
+ DIV(CLK_DOUT_PERIC1_USI09_USI, "dout_peric1_usi09_usi",
+ "mout_peric1_usi09_usi", CLK_CON_DIV_DIV_CLK_PERIC1_USI09_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI10_USI, "dout_peric1_usi10_usi",
+ "mout_peric1_usi10_usi", CLK_CON_DIV_DIV_CLK_PERIC1_USI10_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI11_USI, "dout_peric1_usi11_usi",
+ "mout_peric1_usi11_usi", CLK_CON_DIV_DIV_CLK_PERIC1_USI11_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI12_USI, "dout_peric1_usi12_usi",
+ "mout_peric1_usi12_usi", CLK_CON_DIV_DIV_CLK_PERIC1_USI12_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI13_USI, "dout_peric1_usi13_usi",
+ "mout_peric1_usi13_usi", CLK_CON_DIV_DIV_CLK_PERIC1_USI13_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI14_USI, "dout_peric1_usi14_usi",
+ "mout_peric1_usi14_usi", CLK_CON_DIV_DIV_CLK_PERIC1_USI14_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI15_USI, "dout_peric1_usi15_usi",
+ "mout_peric1_usi15_usi", CLK_CON_DIV_DIV_CLK_PERIC1_USI15_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI16_USI, "dout_peric1_usi16_usi",
+ "mout_peric1_usi16_usi", CLK_CON_DIV_DIV_CLK_PERIC1_USI16_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI17_USI, "dout_peric1_usi17_usi",
+ "mout_peric1_usi17_usi", CLK_CON_DIV_DIV_CLK_PERIC1_USI17_USI,
+ 0, 4),
+ /* USI_I2C */
+ DIV(CLK_DOUT_PERIC1_USI_I2C, "dout_peric1_usi_i2c",
+ "mout_peric1_usi_i2c", CLK_CON_DIV_DIV_CLK_PERIC1_USI_I2C, 0, 4),
+ /* USI_I3C */
+ DIV(CLK_DOUT_PERIC1_I3C, "dout_peric1_i3c",
+ "mout_peric1_i3c", CLK_CON_DIV_DIV_CLK_PERIC1_I3C, 0, 4),
+};
+
+static const struct samsung_cmu_info peric1_cmu_info __initconst = {
+ .mux_clks = peric1_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(peric1_mux_clks),
+ .div_clks = peric1_div_clks,
+ .nr_div_clks = ARRAY_SIZE(peric1_div_clks),
+ .nr_clk_ids = CLKS_NR_PERIC1,
+ .clk_regs = peric1_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(peric1_clk_regs),
+ .clk_name = "noc",
+};
+
+/* ---- CMU_MISC --------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_MISC (0x10020000) */
+#define PLL_CON0_MUX_CLKCMU_MISC_NOC_USER 0x600
+#define CLK_CON_MUX_MUX_CLK_MISC_GIC 0x1000
+#define CLK_CON_DIV_CLKCMU_OTP 0x1800
+#define CLK_CON_DIV_DIV_CLK_MISC_NOCP 0x1804
+#define CLK_CON_DIV_DIV_CLK_MISC_OSC_DIV2 0x1808
+
+static const unsigned long misc_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_MISC_NOC_USER,
+ CLK_CON_MUX_MUX_CLK_MISC_GIC,
+ CLK_CON_DIV_CLKCMU_OTP,
+ CLK_CON_DIV_DIV_CLK_MISC_NOCP,
+ CLK_CON_DIV_DIV_CLK_MISC_OSC_DIV2,
+};
+
+/* List of parent clocks for Muxes in CMU_MISC */
+PNAME(mout_misc_noc_user_p) = { "oscclk", "dout_clkcmu_misc_noc" };
+PNAME(mout_misc_gic_p) = { "dout_misc_nocp", "oscclk" };
+
+static const struct samsung_mux_clock misc_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_MISC_NOC_USER, "mout_misc_noc_user",
+ mout_misc_noc_user_p, PLL_CON0_MUX_CLKCMU_MISC_NOC_USER, 4, 1),
+ MUX(CLK_MOUT_MISC_GIC, "mout_misc_gic",
+ mout_misc_gic_p, CLK_CON_MUX_MUX_CLK_MISC_GIC, 0, 1),
+};
+
+static const struct samsung_div_clock misc_div_clks[] __initconst = {
+ DIV(CLK_DOUT_MISC_NOCP, "dout_misc_nocp",
+ "mout_misc_noc_user", CLK_CON_DIV_DIV_CLK_MISC_NOCP,
+ 0, 3),
+};
+
+static const struct samsung_fixed_factor_clock misc_fixed_factor_clks[] __initconst = {
+ FFACTOR(CLK_DOUT_MISC_OTP, "dout_misc_otp",
+ "oscclk", 1, 10, 0),
+ FFACTOR(CLK_DOUT_MISC_OSC_DIV2, "dout_misc_osc_div2",
+ "oscclk", 1, 2, 0),
+};
+
+static const struct samsung_cmu_info misc_cmu_info __initconst = {
+ .mux_clks = misc_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(misc_mux_clks),
+ .div_clks = misc_div_clks,
+ .nr_div_clks = ARRAY_SIZE(misc_div_clks),
+ .fixed_factor_clks = misc_fixed_factor_clks,
+ .nr_fixed_factor_clks = ARRAY_SIZE(misc_fixed_factor_clks),
+ .nr_clk_ids = CLKS_NR_MISC,
+ .clk_regs = misc_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(misc_clk_regs),
+ .clk_name = "noc",
+};
+
+/* ---- CMU_HSI0 --------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_HSI0 (0x16000000) */
+#define PLL_CON0_MUX_CLKCMU_HSI0_NOC_USER 0x600
+#define CLK_CON_DIV_DIV_CLK_HSI0_PCIE_APB 0x1800
+
+static const unsigned long hsi0_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_HSI0_NOC_USER,
+ CLK_CON_DIV_DIV_CLK_HSI0_PCIE_APB,
+};
+
+/* List of parent clocks for Muxes in CMU_HSI0 */
+PNAME(mout_hsi0_noc_user_p) = { "oscclk", "dout_clkcmu_hsi0_noc" };
+
+static const struct samsung_mux_clock hsi0_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_HSI0_NOC_USER, "mout_hsi0_noc_user",
+ mout_hsi0_noc_user_p, PLL_CON0_MUX_CLKCMU_HSI0_NOC_USER, 4, 1),
+};
+
+static const struct samsung_div_clock hsi0_div_clks[] __initconst = {
+ DIV(CLK_DOUT_HSI0_PCIE_APB, "dout_hsi0_pcie_apb",
+ "mout_hsi0_noc_user", CLK_CON_DIV_DIV_CLK_HSI0_PCIE_APB,
+ 0, 4),
+};
+
+static const struct samsung_cmu_info hsi0_cmu_info __initconst = {
+ .mux_clks = hsi0_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(hsi0_mux_clks),
+ .div_clks = hsi0_div_clks,
+ .nr_div_clks = ARRAY_SIZE(hsi0_div_clks),
+ .nr_clk_ids = CLKS_NR_HSI0,
+ .clk_regs = hsi0_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(hsi0_clk_regs),
+ .clk_name = "noc",
+};
+
+/* ---- CMU_HSI1 --------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_HSI1 (0x16400000) */
+#define PLL_CON0_MUX_CLKCMU_HSI1_MMC_CARD_USER 0x600
+#define PLL_CON0_MUX_CLKCMU_HSI1_NOC_USER 0x610
+#define PLL_CON0_MUX_CLKCMU_HSI1_USBDRD_USER 0x620
+#define CLK_CON_MUX_MUX_CLK_HSI1_USBDRD 0x1000
+
+static const unsigned long hsi1_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_HSI1_MMC_CARD_USER,
+ PLL_CON0_MUX_CLKCMU_HSI1_NOC_USER,
+ PLL_CON0_MUX_CLKCMU_HSI1_USBDRD_USER,
+ CLK_CON_MUX_MUX_CLK_HSI1_USBDRD,
+};
+
+/* List of parent clocks for Muxes in CMU_HSI1 */
+PNAME(mout_hsi1_mmc_card_user_p) = {"oscclk", "dout_clkcmu_hsi1_mmc_card"};
+PNAME(mout_hsi1_noc_user_p) = { "oscclk", "dout_clkcmu_hsi1_noc" };
+PNAME(mout_hsi1_usbdrd_user_p) = { "oscclk", "dout_clkcmu_hsi1_usbdrd" };
+PNAME(mout_hsi1_usbdrd_p) = { "dout_tcxo_div2", "mout_hsi1_usbdrd_user" };
+
+static const struct samsung_mux_clock hsi1_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_HSI1_MMC_CARD_USER, "mout_hsi1_mmc_card_user",
+ mout_hsi1_mmc_card_user_p, PLL_CON0_MUX_CLKCMU_HSI1_MMC_CARD_USER, 4, 1),
+ MUX(CLK_MOUT_HSI1_NOC_USER, "mout_hsi1_noc_user",
+ mout_hsi1_noc_user_p, PLL_CON0_MUX_CLKCMU_HSI1_NOC_USER, 4, 1),
+ MUX(CLK_MOUT_HSI1_USBDRD_USER, "mout_hsi1_usbdrd_user",
+ mout_hsi1_usbdrd_user_p, PLL_CON0_MUX_CLKCMU_HSI1_USBDRD_USER, 4, 1),
+ MUX(CLK_MOUT_HSI1_USBDRD, "mout_hsi1_usbdrd",
+ mout_hsi1_usbdrd_p, CLK_CON_MUX_MUX_CLK_HSI1_USBDRD, 4, 1),
+};
+
+static const struct samsung_cmu_info hsi1_cmu_info __initconst = {
+ .mux_clks = hsi1_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(hsi1_mux_clks),
+ .nr_clk_ids = CLKS_NR_HSI1,
+ .clk_regs = hsi1_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(hsi1_clk_regs),
+ .clk_name = "noc",
+};
+
+/* ---- CMU_HSI2 --------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_HSI2 (0x16b00000) */
+#define PLL_LOCKTIME_PLL_ETH 0x0
+#define PLL_CON3_PLL_ETH 0x10c
+#define PLL_CON0_MUX_CLKCMU_HSI2_ETHERNET_USER 0x600
+#define PLL_CON0_MUX_CLKCMU_HSI2_NOC_UFS_USER 0x610
+#define PLL_CON0_MUX_CLKCMU_HSI2_UFS_EMBD_USER 0x630
+#define CLK_CON_MUX_MUX_CLK_HSI2_ETHERNET 0x1000
+#define CLK_CON_DIV_DIV_CLK_HSI2_ETHERNET 0x1800
+#define CLK_CON_DIV_DIV_CLK_HSI2_ETHERNET_PTP 0x1804
+
+static const unsigned long hsi2_clk_regs[] __initconst = {
+ PLL_LOCKTIME_PLL_ETH,
+ PLL_CON3_PLL_ETH,
+ PLL_CON0_MUX_CLKCMU_HSI2_ETHERNET_USER,
+ PLL_CON0_MUX_CLKCMU_HSI2_NOC_UFS_USER,
+ PLL_CON0_MUX_CLKCMU_HSI2_UFS_EMBD_USER,
+ CLK_CON_MUX_MUX_CLK_HSI2_ETHERNET,
+ CLK_CON_DIV_DIV_CLK_HSI2_ETHERNET,
+ CLK_CON_DIV_DIV_CLK_HSI2_ETHERNET_PTP,
+};
+
+static const struct samsung_pll_clock hsi2_pll_clks[] __initconst = {
+ /* CMU_HSI2_PLL */
+ PLL(pll_531x, FOUT_PLL_ETH, "fout_pll_eth", "oscclk",
+ PLL_LOCKTIME_PLL_ETH, PLL_CON3_PLL_ETH, NULL),
+};
+
+/* List of parent clocks for Muxes in CMU_HSI2 */
+PNAME(mout_clkcmu_hsi2_noc_ufs_user_p) = { "oscclk", "dout_clkcmu_hsi2_noc_ufs" };
+PNAME(mout_clkcmu_hsi2_ufs_embd_user_p) = { "oscclk", "dout_clkcmu_hsi2_ufs_embd" };
+PNAME(mout_hsi2_ethernet_p) = { "fout_pll_eth", "mout_clkcmu_hsi2_ethernet_user" };
+PNAME(mout_clkcmu_hsi2_ethernet_user_p) = { "oscclk", "dout_clkcmu_hsi2_ethernet" };
+
+static const struct samsung_mux_clock hsi2_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_HSI2_NOC_UFS_USER, "mout_clkcmu_hsi2_noc_ufs_user",
+ mout_clkcmu_hsi2_noc_ufs_user_p, PLL_CON0_MUX_CLKCMU_HSI2_NOC_UFS_USER, 4, 1),
+ MUX(CLK_MOUT_HSI2_UFS_EMBD_USER, "mout_clkcmu_hsi2_ufs_embd_user",
+ mout_clkcmu_hsi2_ufs_embd_user_p, PLL_CON0_MUX_CLKCMU_HSI2_UFS_EMBD_USER, 4, 1),
+ MUX(CLK_MOUT_HSI2_ETHERNET, "mout_hsi2_ethernet",
+ mout_hsi2_ethernet_p, CLK_CON_MUX_MUX_CLK_HSI2_ETHERNET, 0, 1),
+ MUX(CLK_MOUT_HSI2_ETHERNET_USER, "mout_clkcmu_hsi2_ethernet_user",
+ mout_clkcmu_hsi2_ethernet_user_p, PLL_CON0_MUX_CLKCMU_HSI2_ETHERNET_USER, 4, 1),
+};
+
+static const struct samsung_div_clock hsi2_div_clks[] __initconst = {
+ DIV(CLK_DOUT_HSI2_ETHERNET, "dout_hsi2_ethernet",
+ "mout_hsi2_ethernet", CLK_CON_DIV_DIV_CLK_HSI2_ETHERNET,
+ 0, 4),
+ DIV(CLK_DOUT_HSI2_ETHERNET_PTP, "dout_hsi2_ethernet_ptp",
+ "mout_hsi2_ethernet", CLK_CON_DIV_DIV_CLK_HSI2_ETHERNET_PTP,
+ 0, 4),
+};
+
+static const struct samsung_cmu_info hsi2_cmu_info __initconst = {
+ .pll_clks = hsi2_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(hsi2_pll_clks),
+ .mux_clks = hsi2_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(hsi2_mux_clks),
+ .div_clks = hsi2_div_clks,
+ .nr_div_clks = ARRAY_SIZE(hsi2_div_clks),
+ .nr_clk_ids = CLKS_NR_HSI2,
+ .clk_regs = hsi2_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(hsi2_clk_regs),
+ .clk_name = "noc",
+};
+
static int __init exynosautov920_cmu_probe(struct platform_device *pdev)
{
const struct samsung_cmu_info *info;
@@ -1154,6 +1836,21 @@ static const struct of_device_id exynosautov920_cmu_of_match[] = {
{
.compatible = "samsung,exynosautov920-cmu-peric0",
.data = &peric0_cmu_info,
+ }, {
+ .compatible = "samsung,exynosautov920-cmu-peric1",
+ .data = &peric1_cmu_info,
+ }, {
+ .compatible = "samsung,exynosautov920-cmu-misc",
+ .data = &misc_cmu_info,
+ }, {
+ .compatible = "samsung,exynosautov920-cmu-hsi0",
+ .data = &hsi0_cmu_info,
+ }, {
+ .compatible = "samsung,exynosautov920-cmu-hsi1",
+ .data = &hsi1_cmu_info,
+ }, {
+ .compatible = "samsung,exynosautov920-cmu-hsi2",
+ .data = &hsi2_cmu_info,
},
{ }
};
diff --git a/drivers/clk/samsung/clk-fsd.c b/drivers/clk/samsung/clk-fsd.c
index 6f984cfcd33c..4124d65e3d18 100644
--- a/drivers/clk/samsung/clk-fsd.c
+++ b/drivers/clk/samsung/clk-fsd.c
@@ -8,10 +8,10 @@
* Common Clock Framework support for FSD SoC.
*/
-#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -82,6 +82,15 @@
#define GAT_CMU_NS_BRDG_CMU_IPCLKPORT_CLK__PSOC_CMU__CLK_CMU 0x205c
#define GAT_CMU_SYSREG_CMU_IPCLKPORT_PCLK 0x2060
+/* NOTE: Must be equal to the last clock ID increased by one */
+#define CLKS_NR_CMU (GAT_CMU_FSYS0_SHARED0DIV4 + 1)
+#define CLKS_NR_PERIC (PERIC_DOUT_RGMII_CLK + 1)
+#define CLKS_NR_FSYS0 (FSYS0_DOUT_FSYS0_PERIBUS_GRP + 1)
+#define CLKS_NR_FSYS1 (PCIE_LINK1_IPCLKPORT_SLV_ACLK + 1)
+#define CLKS_NR_IMEM (IMEM_TMU_GT_IPCLKPORT_I_CLK_TS + 1)
+#define CLKS_NR_MFC (MFC_MFC_IPCLKPORT_ACLK + 1)
+#define CLKS_NR_CAM_CSI (CAM_CSI2_3_IPCLKPORT_I_PCLK + 1)
+
static const unsigned long cmu_clk_regs[] __initconst = {
PLL_LOCKTIME_PLL_SHARED0,
PLL_LOCKTIME_PLL_SHARED1,
@@ -300,7 +309,7 @@ static const struct samsung_cmu_info cmu_cmu_info __initconst = {
.nr_div_clks = ARRAY_SIZE(cmu_div_clks),
.gate_clks = cmu_gate_clks,
.nr_gate_clks = ARRAY_SIZE(cmu_gate_clks),
- .nr_clk_ids = CMU_NR_CLK,
+ .nr_clk_ids = CLKS_NR_CMU,
.clk_regs = cmu_clk_regs,
.nr_clk_regs = ARRAY_SIZE(cmu_clk_regs),
};
@@ -665,7 +674,7 @@ static const struct samsung_cmu_info peric_cmu_info __initconst = {
.nr_gate_clks = ARRAY_SIZE(peric_gate_clks),
.fixed_clks = peric_fixed_clks,
.nr_fixed_clks = ARRAY_SIZE(peric_fixed_clks),
- .nr_clk_ids = PERIC_NR_CLK,
+ .nr_clk_ids = CLKS_NR_PERIC,
.clk_regs = peric_clk_regs,
.nr_clk_regs = ARRAY_SIZE(peric_clk_regs),
.clk_name = "dout_cmu_pll_shared0_div4",
@@ -964,7 +973,7 @@ static const struct samsung_cmu_info fsys0_cmu_info __initconst = {
.nr_gate_clks = ARRAY_SIZE(fsys0_gate_clks),
.fixed_clks = fsys0_fixed_clks,
.nr_fixed_clks = ARRAY_SIZE(fsys0_fixed_clks),
- .nr_clk_ids = FSYS0_NR_CLK,
+ .nr_clk_ids = CLKS_NR_FSYS0,
.clk_regs = fsys0_clk_regs,
.nr_clk_regs = ARRAY_SIZE(fsys0_clk_regs),
.clk_name = "dout_cmu_fsys0_shared1div4",
@@ -1136,7 +1145,7 @@ static const struct samsung_cmu_info fsys1_cmu_info __initconst = {
.nr_gate_clks = ARRAY_SIZE(fsys1_gate_clks),
.fixed_clks = fsys1_fixed_clks,
.nr_fixed_clks = ARRAY_SIZE(fsys1_fixed_clks),
- .nr_clk_ids = FSYS1_NR_CLK,
+ .nr_clk_ids = CLKS_NR_FSYS1,
.clk_regs = fsys1_clk_regs,
.nr_clk_regs = ARRAY_SIZE(fsys1_clk_regs),
.clk_name = "dout_cmu_fsys1_shared0div4",
@@ -1413,7 +1422,7 @@ static const struct samsung_cmu_info imem_cmu_info __initconst = {
.nr_div_clks = ARRAY_SIZE(imem_div_clks),
.gate_clks = imem_gate_clks,
.nr_gate_clks = ARRAY_SIZE(imem_gate_clks),
- .nr_clk_ids = IMEM_NR_CLK,
+ .nr_clk_ids = CLKS_NR_IMEM,
.clk_regs = imem_clk_regs,
.nr_clk_regs = ARRAY_SIZE(imem_clk_regs),
};
@@ -1538,7 +1547,7 @@ static const struct samsung_cmu_info mfc_cmu_info __initconst = {
.nr_div_clks = ARRAY_SIZE(mfc_div_clks),
.gate_clks = mfc_gate_clks,
.nr_gate_clks = ARRAY_SIZE(mfc_gate_clks),
- .nr_clk_ids = MFC_NR_CLK,
+ .nr_clk_ids = CLKS_NR_MFC,
.clk_regs = mfc_clk_regs,
.nr_clk_regs = ARRAY_SIZE(mfc_clk_regs),
};
@@ -1637,7 +1646,7 @@ static const struct samsung_pll_rate_table pll_cam_csi_rate_table[] __initconst
};
static const struct samsung_pll_clock cam_csi_pll_clks[] __initconst = {
- PLL(pll_142xx, 0, "fout_pll_cam_csi", "fin_pll",
+ PLL(pll_142xx, CAM_CSI_PLL, "fout_pll_cam_csi", "fin_pll",
PLL_LOCKTIME_PLL_CAM_CSI, PLL_CON0_PLL_CAM_CSI, pll_cam_csi_rate_table),
};
@@ -1673,51 +1682,51 @@ static const struct samsung_gate_clock cam_csi_gate_clks[] __initconst = {
GAT_CAM_CSI_BUS_D_CAM_CSI_IPCLKPORT_CLK__SYSTEM__NOC, 21, CLK_IGNORE_UNUSED, 0),
GATE(CAM_CSI0_0_IPCLKPORT_I_ACLK, "cam_csi0_0_ipclkport_i_aclk", "dout_cam_csi0_aclk",
GAT_CAM_CSI0_0_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0),
- GATE(0, "cam_csi0_0_ipclkport_i_pclk", "dout_cam_csi_busp",
+ GATE(CAM_CSI0_0_IPCLKPORT_I_PCLK, "cam_csi0_0_ipclkport_i_pclk", "dout_cam_csi_busp",
GAT_CAM_CSI0_0_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CAM_CSI0_1_IPCLKPORT_I_ACLK, "cam_csi0_1_ipclkport_i_aclk", "dout_cam_csi0_aclk",
GAT_CAM_CSI0_1_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0),
- GATE(0, "cam_csi0_1_ipclkport_i_pclk", "dout_cam_csi_busp",
+ GATE(CAM_CSI0_1_IPCLKPORT_I_PCLK, "cam_csi0_1_ipclkport_i_pclk", "dout_cam_csi_busp",
GAT_CAM_CSI0_1_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CAM_CSI0_2_IPCLKPORT_I_ACLK, "cam_csi0_2_ipclkport_i_aclk", "dout_cam_csi0_aclk",
GAT_CAM_CSI0_2_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0),
- GATE(0, "cam_csi0_2_ipclkport_i_pclk", "dout_cam_csi_busp",
+ GATE(CAM_CSI0_2_IPCLKPORT_I_PCLK, "cam_csi0_2_ipclkport_i_pclk", "dout_cam_csi_busp",
GAT_CAM_CSI0_2_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CAM_CSI0_3_IPCLKPORT_I_ACLK, "cam_csi0_3_ipclkport_i_aclk", "dout_cam_csi0_aclk",
GAT_CAM_CSI0_3_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0),
- GATE(0, "cam_csi0_3_ipclkport_i_pclk", "dout_cam_csi_busp",
+ GATE(CAM_CSI0_3_IPCLKPORT_I_PCLK, "cam_csi0_3_ipclkport_i_pclk", "dout_cam_csi_busp",
GAT_CAM_CSI0_3_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CAM_CSI1_0_IPCLKPORT_I_ACLK, "cam_csi1_0_ipclkport_i_aclk", "dout_cam_csi1_aclk",
GAT_CAM_CSI1_0_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0),
- GATE(0, "cam_csi1_0_ipclkport_i_pclk", "dout_cam_csi_busp",
+ GATE(CAM_CSI1_0_IPCLKPORT_I_PCLK, "cam_csi1_0_ipclkport_i_pclk", "dout_cam_csi_busp",
GAT_CAM_CSI1_0_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CAM_CSI1_1_IPCLKPORT_I_ACLK, "cam_csi1_1_ipclkport_i_aclk", "dout_cam_csi1_aclk",
GAT_CAM_CSI1_1_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0),
- GATE(0, "cam_csi1_1_ipclkport_i_pclk", "dout_cam_csi_busp",
+ GATE(CAM_CSI1_1_IPCLKPORT_I_PCLK, "cam_csi1_1_ipclkport_i_pclk", "dout_cam_csi_busp",
GAT_CAM_CSI1_1_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CAM_CSI1_2_IPCLKPORT_I_ACLK, "cam_csi1_2_ipclkport_i_aclk", "dout_cam_csi1_aclk",
GAT_CAM_CSI1_2_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0),
- GATE(0, "cam_csi1_2_ipclkport_i_pclk", "dout_cam_csi_busp",
+ GATE(CAM_CSI1_2_IPCLKPORT_I_PCLK, "cam_csi1_2_ipclkport_i_pclk", "dout_cam_csi_busp",
GAT_CAM_CSI1_2_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CAM_CSI1_3_IPCLKPORT_I_ACLK, "cam_csi1_3_ipclkport_i_aclk", "dout_cam_csi1_aclk",
GAT_CAM_CSI1_3_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0),
- GATE(0, "cam_csi1_3_ipclkport_i_pclk", "dout_cam_csi_busp",
+ GATE(CAM_CSI1_3_IPCLKPORT_I_PCLK, "cam_csi1_3_ipclkport_i_pclk", "dout_cam_csi_busp",
GAT_CAM_CSI1_3_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CAM_CSI2_0_IPCLKPORT_I_ACLK, "cam_csi2_0_ipclkport_i_aclk", "dout_cam_csi2_aclk",
GAT_CAM_CSI2_0_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0),
- GATE(0, "cam_csi2_0_ipclkport_i_pclk", "dout_cam_csi_busp",
+ GATE(CAM_CSI2_0_IPCLKPORT_I_PCLK, "cam_csi2_0_ipclkport_i_pclk", "dout_cam_csi_busp",
GAT_CAM_CSI2_0_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CAM_CSI2_1_IPCLKPORT_I_ACLK, "cam_csi2_1_ipclkport_i_aclk", "dout_cam_csi2_aclk",
GAT_CAM_CSI2_1_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0),
- GATE(0, "cam_csi2_1_ipclkport_i_pclk", "dout_cam_csi_busp",
+ GATE(CAM_CSI2_1_IPCLKPORT_I_PCLK, "cam_csi2_1_ipclkport_i_pclk", "dout_cam_csi_busp",
GAT_CAM_CSI2_1_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CAM_CSI2_2_IPCLKPORT_I_ACLK, "cam_csi2_2_ipclkport_i_aclk", "dout_cam_csi2_aclk",
GAT_CAM_CSI2_2_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0),
- GATE(0, "cam_csi2_2_ipclkport_i_pclk", "dout_cam_csi_busp",
+ GATE(CAM_CSI2_2_IPCLKPORT_I_PCLK, "cam_csi2_2_ipclkport_i_pclk", "dout_cam_csi_busp",
GAT_CAM_CSI2_2_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CAM_CSI2_3_IPCLKPORT_I_ACLK, "cam_csi2_3_ipclkport_i_aclk", "dout_cam_csi2_aclk",
GAT_CAM_CSI2_3_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0),
- GATE(0, "cam_csi2_3_ipclkport_i_pclk", "dout_cam_csi_busp",
+ GATE(CAM_CSI2_3_IPCLKPORT_I_PCLK, "cam_csi2_3_ipclkport_i_pclk", "dout_cam_csi_busp",
GAT_CAM_CSI2_3_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(0, "cam_ns_brdg_cam_csi_ipclkport_clk__psoc_cam_csi__clk_cam_csi_d",
"dout_cam_csi_busd",
@@ -1742,7 +1751,7 @@ static const struct samsung_cmu_info cam_csi_cmu_info __initconst = {
.nr_div_clks = ARRAY_SIZE(cam_csi_div_clks),
.gate_clks = cam_csi_gate_clks,
.nr_gate_clks = ARRAY_SIZE(cam_csi_gate_clks),
- .nr_clk_ids = CAM_CSI_NR_CLK,
+ .nr_clk_ids = CLKS_NR_CAM_CSI,
.clk_regs = cam_csi_clk_regs,
.nr_clk_regs = ARRAY_SIZE(cam_csi_clk_regs),
};
diff --git a/drivers/clk/samsung/clk-gs101.c b/drivers/clk/samsung/clk-gs101.c
index 85098c61c15e..70b26db9b95a 100644
--- a/drivers/clk/samsung/clk-gs101.c
+++ b/drivers/clk/samsung/clk-gs101.c
@@ -6,8 +6,8 @@
* Common Clock Framework support for GS101.
*/
-#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -382,17 +382,9 @@ static const unsigned long cmu_top_clk_regs[] __initconst = {
EARLY_WAKEUP_DPU_DEST,
EARLY_WAKEUP_CSIS_DEST,
EARLY_WAKEUP_SW_TRIG_APM,
- EARLY_WAKEUP_SW_TRIG_APM_SET,
- EARLY_WAKEUP_SW_TRIG_APM_CLEAR,
EARLY_WAKEUP_SW_TRIG_CLUSTER0,
- EARLY_WAKEUP_SW_TRIG_CLUSTER0_SET,
- EARLY_WAKEUP_SW_TRIG_CLUSTER0_CLEAR,
EARLY_WAKEUP_SW_TRIG_DPU,
- EARLY_WAKEUP_SW_TRIG_DPU_SET,
- EARLY_WAKEUP_SW_TRIG_DPU_CLEAR,
EARLY_WAKEUP_SW_TRIG_CSIS,
- EARLY_WAKEUP_SW_TRIG_CSIS_SET,
- EARLY_WAKEUP_SW_TRIG_CSIS_CLEAR,
CLK_CON_MUX_MUX_CLKCMU_BO_BUS,
CLK_CON_MUX_MUX_CLKCMU_BUS0_BUS,
CLK_CON_MUX_MUX_CLKCMU_BUS1_BUS,
@@ -1162,7 +1154,7 @@ static const struct samsung_div_clock cmu_top_div_clks[] __initconst = {
CLK_CON_DIV_CLKCMU_G2D_MSCL, 0, 4),
DIV(CLK_DOUT_CMU_G3AA_G3AA, "dout_cmu_g3aa_g3aa", "gout_cmu_g3aa_g3aa",
CLK_CON_DIV_CLKCMU_G3AA_G3AA, 0, 4),
- DIV(CLK_DOUT_CMU_G3D_SWITCH, "dout_cmu_g3d_busd", "gout_cmu_g3d_busd",
+ DIV(CLK_DOUT_CMU_G3D_BUSD, "dout_cmu_g3d_busd", "gout_cmu_g3d_busd",
CLK_CON_DIV_CLKCMU_G3D_BUSD, 0, 4),
DIV(CLK_DOUT_CMU_G3D_GLB, "dout_cmu_g3d_glb", "gout_cmu_g3d_glb",
CLK_CON_DIV_CLKCMU_G3D_GLB, 0, 4),
@@ -2137,7 +2129,7 @@ PNAME(mout_hsi0_usbdpdbg_user_p) = { "oscclk",
"dout_cmu_hsi0_usbdpdbg" };
PNAME(mout_hsi0_bus_p) = { "mout_hsi0_bus_user",
"mout_hsi0_alt_user" };
-PNAME(mout_hsi0_usb20_ref_p) = { "fout_usb_pll",
+PNAME(mout_hsi0_usb20_ref_p) = { "mout_pll_usb",
"mout_hsi0_tcxo_user" };
PNAME(mout_hsi0_usb31drd_p) = { "fout_usb_pll",
"mout_hsi0_usb31drd_user",
@@ -2775,11 +2767,11 @@ static const struct samsung_gate_clock hsi2_gate_clks[] __initconst = {
GATE(CLK_GOUT_HSI2_QE_UFS_EMBD_HSI2_ACLK,
"gout_hsi2_qe_ufs_embd_hsi2_aclk", "mout_hsi2_bus_user",
CLK_CON_GAT_GOUT_BLK_HSI2_UID_QE_UFS_EMBD_HSI2_IPCLKPORT_ACLK,
- 21, 0, 0),
+ 21, CLK_IS_CRITICAL, 0),
GATE(CLK_GOUT_HSI2_QE_UFS_EMBD_HSI2_PCLK,
"gout_hsi2_qe_ufs_embd_hsi2_pclk", "mout_hsi2_bus_user",
CLK_CON_GAT_GOUT_BLK_HSI2_UID_QE_UFS_EMBD_HSI2_IPCLKPORT_PCLK,
- 21, 0, 0),
+ 21, CLK_IS_CRITICAL, 0),
GATE(CLK_GOUT_HSI2_CLK_HSI2_BUS_CLK,
"gout_hsi2_clk_hsi2_bus_clk", "mout_hsi2_bus_user",
CLK_CON_GAT_GOUT_BLK_HSI2_UID_RSTNSYNC_CLK_HSI2_BUS_IPCLKPORT_CLK,
@@ -2806,7 +2798,7 @@ static const struct samsung_gate_clock hsi2_gate_clks[] __initconst = {
GATE(CLK_GOUT_HSI2_SYSREG_HSI2_PCLK,
"gout_hsi2_sysreg_hsi2_pclk", "mout_hsi2_bus_user",
CLK_CON_GAT_GOUT_BLK_HSI2_UID_SYSREG_HSI2_IPCLKPORT_PCLK,
- 21, 0, 0),
+ 21, CLK_IS_CRITICAL, 0),
GATE(CLK_GOUT_HSI2_UASC_PCIE_GEN4A_DBI_1_ACLK,
"gout_hsi2_uasc_pcie_gen4a_dbi_1_aclk", "mout_hsi2_bus_user",
CLK_CON_GAT_GOUT_BLK_HSI2_UID_UASC_PCIE_GEN4A_DBI_1_IPCLKPORT_ACLK,
@@ -2842,7 +2834,7 @@ static const struct samsung_gate_clock hsi2_gate_clks[] __initconst = {
GATE(CLK_GOUT_HSI2_UFS_EMBD_I_ACLK,
"gout_hsi2_ufs_embd_i_aclk", "mout_hsi2_bus_user",
CLK_CON_GAT_GOUT_BLK_HSI2_UID_UFS_EMBD_IPCLKPORT_I_ACLK,
- 21, 0, 0),
+ 21, CLK_IS_CRITICAL, 0),
GATE(CLK_GOUT_HSI2_UFS_EMBD_I_CLK_UNIPRO,
"gout_hsi2_ufs_embd_i_clk_unipro", "mout_hsi2_ufs_embd_user",
CLK_CON_GAT_GOUT_BLK_HSI2_UID_UFS_EMBD_IPCLKPORT_I_CLK_UNIPRO,
@@ -2850,7 +2842,7 @@ static const struct samsung_gate_clock hsi2_gate_clks[] __initconst = {
GATE(CLK_GOUT_HSI2_UFS_EMBD_I_FMP_CLK,
"gout_hsi2_ufs_embd_i_fmp_clk", "mout_hsi2_bus_user",
CLK_CON_GAT_GOUT_BLK_HSI2_UID_UFS_EMBD_IPCLKPORT_I_FMP_CLK,
- 21, 0, 0),
+ 21, CLK_IS_CRITICAL, 0),
/* TODO: should have a driver for this */
GATE(CLK_GOUT_HSI2_XIU_D_HSI2_ACLK,
"gout_hsi2_xiu_d_hsi2_aclk", "mout_hsi2_bus_user",
diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c
index cca3e630922c..7bea7be1d7e4 100644
--- a/drivers/clk/samsung/clk-pll.c
+++ b/drivers/clk/samsung/clk-pll.c
@@ -49,21 +49,26 @@ static const struct samsung_pll_rate_table *samsung_get_pll_settings(
return NULL;
}
-static long samsung_pll_round_rate(struct clk_hw *hw,
- unsigned long drate, unsigned long *prate)
+static int samsung_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct samsung_clk_pll *pll = to_clk_pll(hw);
const struct samsung_pll_rate_table *rate_table = pll->rate_table;
int i;
- /* Assumming rate_table is in descending order */
+ /* Assuming rate_table is in descending order */
for (i = 0; i < pll->rate_count; i++) {
- if (drate >= rate_table[i].rate)
- return rate_table[i].rate;
+ if (req->rate >= rate_table[i].rate) {
+ req->rate = rate_table[i].rate;
+
+ return 0;
+ }
}
/* return minimum supported value */
- return rate_table[i - 1].rate;
+ req->rate = rate_table[i - 1].rate;
+
+ return 0;
}
static bool pll_early_timeout = true;
@@ -206,6 +211,7 @@ static const struct clk_ops samsung_pll3000_clk_ops = {
*/
/* Maximum lock time can be 270 * PDIV cycles */
#define PLL35XX_LOCK_FACTOR (270)
+#define PLL142XX_LOCK_FACTOR (150)
#define PLL35XX_MDIV_MASK (0x3FF)
#define PLL35XX_PDIV_MASK (0x3F)
@@ -272,7 +278,11 @@ static int samsung_pll35xx_set_rate(struct clk_hw *hw, unsigned long drate,
}
/* Set PLL lock time. */
- writel_relaxed(rate->pdiv * PLL35XX_LOCK_FACTOR,
+ if (pll->type == pll_142xx || pll->type == pll_1017x)
+ writel_relaxed(rate->pdiv * PLL142XX_LOCK_FACTOR,
+ pll->lock_reg);
+ else
+ writel_relaxed(rate->pdiv * PLL35XX_LOCK_FACTOR,
pll->lock_reg);
/* Change PLL PMS values */
@@ -293,7 +303,7 @@ static int samsung_pll35xx_set_rate(struct clk_hw *hw, unsigned long drate,
static const struct clk_ops samsung_pll35xx_clk_ops = {
.recalc_rate = samsung_pll35xx_recalc_rate,
- .round_rate = samsung_pll_round_rate,
+ .determine_rate = samsung_pll_determine_rate,
.set_rate = samsung_pll35xx_set_rate,
.enable = samsung_pll3xxx_enable,
.disable = samsung_pll3xxx_disable,
@@ -406,7 +416,7 @@ static int samsung_pll36xx_set_rate(struct clk_hw *hw, unsigned long drate,
static const struct clk_ops samsung_pll36xx_clk_ops = {
.recalc_rate = samsung_pll36xx_recalc_rate,
.set_rate = samsung_pll36xx_set_rate,
- .round_rate = samsung_pll_round_rate,
+ .determine_rate = samsung_pll_determine_rate,
.enable = samsung_pll3xxx_enable,
.disable = samsung_pll3xxx_disable,
};
@@ -430,7 +440,10 @@ static const struct clk_ops samsung_pll36xx_clk_min_ops = {
#define PLL0822X_LOCK_STAT_SHIFT (29)
#define PLL0822X_ENABLE_SHIFT (31)
-/* PLL1418x is similar to PLL0822x, except that MDIV is one bit smaller */
+/*
+ * PLL1418x, PLL0717x and PLL0718x are similar
+ * to PLL0822x, except that MDIV is one bit smaller
+ */
#define PLL1418X_MDIV_MASK (0x1FF)
static unsigned long samsung_pll0822x_recalc_rate(struct clk_hw *hw,
@@ -441,10 +454,14 @@ static unsigned long samsung_pll0822x_recalc_rate(struct clk_hw *hw,
u64 fvco = parent_rate;
pll_con3 = readl_relaxed(pll->con_reg);
- if (pll->type != pll_1418x)
+
+ if (pll->type != pll_1418x &&
+ pll->type != pll_0717x &&
+ pll->type != pll_0718x)
mdiv = (pll_con3 >> PLL0822X_MDIV_SHIFT) & PLL0822X_MDIV_MASK;
else
mdiv = (pll_con3 >> PLL0822X_MDIV_SHIFT) & PLL1418X_MDIV_MASK;
+
pdiv = (pll_con3 >> PLL0822X_PDIV_SHIFT) & PLL0822X_PDIV_MASK;
sdiv = (pll_con3 >> PLL0822X_SDIV_SHIFT) & PLL0822X_SDIV_MASK;
@@ -502,7 +519,7 @@ static int samsung_pll0822x_set_rate(struct clk_hw *hw, unsigned long drate,
static const struct clk_ops samsung_pll0822x_clk_ops = {
.recalc_rate = samsung_pll0822x_recalc_rate,
- .round_rate = samsung_pll_round_rate,
+ .determine_rate = samsung_pll_determine_rate,
.set_rate = samsung_pll0822x_set_rate,
.enable = samsung_pll3xxx_enable,
.disable = samsung_pll3xxx_disable,
@@ -600,7 +617,7 @@ static int samsung_pll0831x_set_rate(struct clk_hw *hw, unsigned long drate,
static const struct clk_ops samsung_pll0831x_clk_ops = {
.recalc_rate = samsung_pll0831x_recalc_rate,
.set_rate = samsung_pll0831x_set_rate,
- .round_rate = samsung_pll_round_rate,
+ .determine_rate = samsung_pll_determine_rate,
.enable = samsung_pll3xxx_enable,
.disable = samsung_pll3xxx_disable,
};
@@ -723,7 +740,7 @@ static int samsung_pll45xx_set_rate(struct clk_hw *hw, unsigned long drate,
static const struct clk_ops samsung_pll45xx_clk_ops = {
.recalc_rate = samsung_pll45xx_recalc_rate,
- .round_rate = samsung_pll_round_rate,
+ .determine_rate = samsung_pll_determine_rate,
.set_rate = samsung_pll45xx_set_rate,
};
@@ -868,7 +885,7 @@ static int samsung_pll46xx_set_rate(struct clk_hw *hw, unsigned long drate,
static const struct clk_ops samsung_pll46xx_clk_ops = {
.recalc_rate = samsung_pll46xx_recalc_rate,
- .round_rate = samsung_pll_round_rate,
+ .determine_rate = samsung_pll_determine_rate,
.set_rate = samsung_pll46xx_set_rate,
};
@@ -1081,7 +1098,7 @@ static int samsung_pll2550xx_set_rate(struct clk_hw *hw, unsigned long drate,
static const struct clk_ops samsung_pll2550xx_clk_ops = {
.recalc_rate = samsung_pll2550xx_recalc_rate,
- .round_rate = samsung_pll_round_rate,
+ .determine_rate = samsung_pll_determine_rate,
.set_rate = samsung_pll2550xx_set_rate,
};
@@ -1173,7 +1190,7 @@ static int samsung_pll2650x_set_rate(struct clk_hw *hw, unsigned long drate,
static const struct clk_ops samsung_pll2650x_clk_ops = {
.recalc_rate = samsung_pll2650x_recalc_rate,
- .round_rate = samsung_pll_round_rate,
+ .determine_rate = samsung_pll_determine_rate,
.set_rate = samsung_pll2650x_set_rate,
};
@@ -1265,7 +1282,7 @@ static int samsung_pll2650xx_set_rate(struct clk_hw *hw, unsigned long drate,
static const struct clk_ops samsung_pll2650xx_clk_ops = {
.recalc_rate = samsung_pll2650xx_recalc_rate,
.set_rate = samsung_pll2650xx_set_rate,
- .round_rate = samsung_pll_round_rate,
+ .determine_rate = samsung_pll_determine_rate,
};
static const struct clk_ops samsung_pll2650xx_clk_min_ops = {
@@ -1313,6 +1330,125 @@ static const struct clk_ops samsung_pll531x_clk_ops = {
.recalc_rate = samsung_pll531x_recalc_rate,
};
+/*
+ * PLL1031x Clock Type
+ */
+#define PLL1031X_LOCK_FACTOR (500)
+
+#define PLL1031X_MDIV_MASK (0x3ff)
+#define PLL1031X_PDIV_MASK (0x3f)
+#define PLL1031X_SDIV_MASK (0x7)
+#define PLL1031X_MDIV_SHIFT (16)
+#define PLL1031X_PDIV_SHIFT (8)
+#define PLL1031X_SDIV_SHIFT (0)
+
+#define PLL1031X_KDIV_MASK (0xffff)
+#define PLL1031X_KDIV_SHIFT (0)
+#define PLL1031X_MFR_MASK (0x3f)
+#define PLL1031X_MRR_MASK (0x1f)
+#define PLL1031X_MFR_SHIFT (16)
+#define PLL1031X_MRR_SHIFT (24)
+
+static unsigned long samsung_pll1031x_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct samsung_clk_pll *pll = to_clk_pll(hw);
+ u32 mdiv, pdiv, sdiv, kdiv, pll_con0, pll_con3;
+ u64 fvco = parent_rate;
+
+ pll_con0 = readl_relaxed(pll->con_reg);
+ pll_con3 = readl_relaxed(pll->con_reg + 0xc);
+ mdiv = (pll_con0 >> PLL1031X_MDIV_SHIFT) & PLL1031X_MDIV_MASK;
+ pdiv = (pll_con0 >> PLL1031X_PDIV_SHIFT) & PLL1031X_PDIV_MASK;
+ sdiv = (pll_con0 >> PLL1031X_SDIV_SHIFT) & PLL1031X_SDIV_MASK;
+ kdiv = (pll_con3 & PLL1031X_KDIV_MASK);
+
+ fvco *= (mdiv << PLL1031X_MDIV_SHIFT) + kdiv;
+ do_div(fvco, (pdiv << sdiv));
+ fvco >>= PLL1031X_MDIV_SHIFT;
+
+ return (unsigned long)fvco;
+}
+
+static bool samsung_pll1031x_mpk_change(u32 pll_con0, u32 pll_con3,
+ const struct samsung_pll_rate_table *rate)
+{
+ u32 old_mdiv, old_pdiv, old_kdiv;
+
+ old_mdiv = (pll_con0 >> PLL1031X_MDIV_SHIFT) & PLL1031X_MDIV_MASK;
+ old_pdiv = (pll_con0 >> PLL1031X_PDIV_SHIFT) & PLL1031X_PDIV_MASK;
+ old_kdiv = (pll_con3 >> PLL1031X_KDIV_SHIFT) & PLL1031X_KDIV_MASK;
+
+ return (old_mdiv != rate->mdiv || old_pdiv != rate->pdiv ||
+ old_kdiv != rate->kdiv);
+}
+
+static int samsung_pll1031x_set_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long prate)
+{
+ struct samsung_clk_pll *pll = to_clk_pll(hw);
+ const struct samsung_pll_rate_table *rate;
+ u32 con0, con3;
+
+ /* Get required rate settings from table */
+ rate = samsung_get_pll_settings(pll, drate);
+ if (!rate) {
+ pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
+ drate, clk_hw_get_name(hw));
+ return -EINVAL;
+ }
+
+ con0 = readl_relaxed(pll->con_reg);
+ con3 = readl_relaxed(pll->con_reg + 0xc);
+
+ if (!(samsung_pll1031x_mpk_change(con0, con3, rate))) {
+ /* If only s change, change just s value only */
+ con0 &= ~(PLL1031X_SDIV_MASK << PLL1031X_SDIV_SHIFT);
+ con0 |= rate->sdiv << PLL1031X_SDIV_SHIFT;
+ writel_relaxed(con0, pll->con_reg);
+
+ return 0;
+ }
+
+ /* Set PLL lock time. */
+ writel_relaxed(rate->pdiv * PLL1031X_LOCK_FACTOR, pll->lock_reg);
+
+ /* Set PLL M, P, and S values. */
+ con0 &= ~((PLL1031X_MDIV_MASK << PLL1031X_MDIV_SHIFT) |
+ (PLL1031X_PDIV_MASK << PLL1031X_PDIV_SHIFT) |
+ (PLL1031X_SDIV_MASK << PLL1031X_SDIV_SHIFT));
+
+ con0 |= (rate->mdiv << PLL1031X_MDIV_SHIFT) |
+ (rate->pdiv << PLL1031X_PDIV_SHIFT) |
+ (rate->sdiv << PLL1031X_SDIV_SHIFT);
+
+ /* Set PLL K, MFR and MRR values. */
+ con3 = readl_relaxed(pll->con_reg + 0xc);
+ con3 &= ~((PLL1031X_KDIV_MASK << PLL1031X_KDIV_SHIFT) |
+ (PLL1031X_MFR_MASK << PLL1031X_MFR_SHIFT) |
+ (PLL1031X_MRR_MASK << PLL1031X_MRR_SHIFT));
+ con3 |= (rate->kdiv << PLL1031X_KDIV_SHIFT) |
+ (rate->mfr << PLL1031X_MFR_SHIFT) |
+ (rate->mrr << PLL1031X_MRR_SHIFT);
+
+ /* Write configuration to PLL */
+ writel_relaxed(con0, pll->con_reg);
+ writel_relaxed(con3, pll->con_reg + 0xc);
+
+ /* Wait for PLL lock if the PLL is enabled */
+ return samsung_pll_lock_wait(pll, BIT(pll->lock_offs));
+}
+
+static const struct clk_ops samsung_pll1031x_clk_ops = {
+ .recalc_rate = samsung_pll1031x_recalc_rate,
+ .determine_rate = samsung_pll_determine_rate,
+ .set_rate = samsung_pll1031x_set_rate,
+};
+
+static const struct clk_ops samsung_pll1031x_clk_min_ops = {
+ .recalc_rate = samsung_pll1031x_recalc_rate,
+};
+
static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx,
const struct samsung_pll_clock *pll_clk)
{
@@ -1361,6 +1497,7 @@ static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx,
case pll_1451x:
case pll_1452x:
case pll_142xx:
+ case pll_1017x:
pll->enable_offs = PLL35XX_ENABLE_SHIFT;
pll->lock_offs = PLL35XX_LOCK_STAT_SHIFT;
if (!pll->rate_table)
@@ -1370,11 +1507,16 @@ static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx,
break;
case pll_1417x:
case pll_1418x:
+ case pll_1051x:
+ case pll_1052x:
case pll_0818x:
case pll_0822x:
case pll_0516x:
case pll_0517x:
case pll_0518x:
+ case pll_0717x:
+ case pll_0718x:
+ case pll_0732x:
pll->enable_offs = PLL0822X_ENABLE_SHIFT;
pll->lock_offs = PLL0822X_LOCK_STAT_SHIFT;
if (!pll->rate_table)
@@ -1448,8 +1590,15 @@ static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx,
init.ops = &samsung_pll2650xx_clk_ops;
break;
case pll_531x:
+ case pll_4311:
init.ops = &samsung_pll531x_clk_ops;
break;
+ case pll_1031x:
+ if (!pll->rate_table)
+ init.ops = &samsung_pll1031x_clk_min_ops;
+ else
+ init.ops = &samsung_pll1031x_clk_ops;
+ break;
default:
pr_warn("%s: Unknown pll type for pll clk %s\n",
__func__, pll_clk->name);
diff --git a/drivers/clk/samsung/clk-pll.h b/drivers/clk/samsung/clk-pll.h
index 3481941ba07a..6c8bb7f26da5 100644
--- a/drivers/clk/samsung/clk-pll.h
+++ b/drivers/clk/samsung/clk-pll.h
@@ -43,6 +43,14 @@ enum samsung_pll_type {
pll_0517x,
pll_0518x,
pll_531x,
+ pll_1051x,
+ pll_1052x,
+ pll_0717x,
+ pll_0718x,
+ pll_0732x,
+ pll_4311,
+ pll_1017x,
+ pll_1031x,
};
#define PLL_RATE(_fin, _m, _p, _s, _k, _ks) \
diff --git a/drivers/clk/samsung/clk-s3c64xx.c b/drivers/clk/samsung/clk-s3c64xx.c
index d27a1f73f077..397a057af5d1 100644
--- a/drivers/clk/samsung/clk-s3c64xx.c
+++ b/drivers/clk/samsung/clk-s3c64xx.c
@@ -3,12 +3,11 @@
* Copyright (c) 2013 Tomasz Figa <tomasz.figa at gmail.com>
*
* Common Clock Framework support for all S3C64xx SoCs.
-*/
+ */
#include <linux/slab.h>
#include <linux/clk-provider.h>
#include <linux/clk/samsung.h>
-#include <linux/of.h>
#include <linux/of_address.h>
#include <dt-bindings/clock/samsung,s3c64xx-clock.h>
diff --git a/drivers/clk/samsung/clk-s5pv210-audss.c b/drivers/clk/samsung/clk-s5pv210-audss.c
index b31c00ea331f..b1fd8fac3a4c 100644
--- a/drivers/clk/samsung/clk-s5pv210-audss.c
+++ b/drivers/clk/samsung/clk-s5pv210-audss.c
@@ -8,11 +8,12 @@
* Author: Padmavathi Venna <padma.v@samsung.com>
*
* Driver for Audio Subsystem Clock Controller of S5PV210-compatible SoCs.
-*/
+ */
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
#include <linux/of_address.h>
#include <linux/syscore_ops.h>
#include <linux/init.h>
diff --git a/drivers/clk/samsung/clk-s5pv210.c b/drivers/clk/samsung/clk-s5pv210.c
index cd85342e4ddb..9a4217cc1908 100644
--- a/drivers/clk/samsung/clk-s5pv210.c
+++ b/drivers/clk/samsung/clk-s5pv210.c
@@ -9,7 +9,6 @@
*/
#include <linux/clk-provider.h>
-#include <linux/of.h>
#include <linux/of_address.h>
#include "clk.h"
diff --git a/drivers/clk/samsung/clk.c b/drivers/clk/samsung/clk.c
index afa5760ed3a1..dbc9925ca8f4 100644
--- a/drivers/clk/samsung/clk.c
+++ b/drivers/clk/samsung/clk.c
@@ -6,13 +6,13 @@
*
* This file includes utility functions to register clocks to common
* clock framework for Samsung platforms.
-*/
+ */
#include <linux/slab.h>
#include <linux/clkdev.h>
-#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/io.h>
+#include <linux/mod_devicetable.h>
#include <linux/of_address.h>
#include <linux/syscore_ops.h>
@@ -74,12 +74,12 @@ struct samsung_clk_provider * __init samsung_clk_init(struct device *dev,
if (!ctx)
panic("could not allocate clock provider context.\n");
+ ctx->clk_data.num = nr_clks;
for (i = 0; i < nr_clks; ++i)
ctx->clk_data.hws[i] = ERR_PTR(-ENOENT);
ctx->dev = dev;
ctx->reg_base = base;
- ctx->clk_data.num = nr_clks;
spin_lock_init(&ctx->lock);
return ctx;
diff --git a/drivers/clk/samsung/clk.h b/drivers/clk/samsung/clk.h
index fb06caa71f0a..18660c1ac6f0 100644
--- a/drivers/clk/samsung/clk.h
+++ b/drivers/clk/samsung/clk.h
@@ -11,6 +11,7 @@
#define __SAMSUNG_CLK_H
#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
#include "clk-pll.h"
#include "clk-cpu.h"
diff --git a/drivers/clk/sifive/fu540-prci.h b/drivers/clk/sifive/fu540-prci.h
index e0173324f3c5..d45193c210b4 100644
--- a/drivers/clk/sifive/fu540-prci.h
+++ b/drivers/clk/sifive/fu540-prci.h
@@ -49,7 +49,7 @@ static struct __prci_wrpll_data sifive_fu540_prci_gemgxlpll_data = {
static const struct clk_ops sifive_fu540_prci_wrpll_clk_ops = {
.set_rate = sifive_prci_wrpll_set_rate,
- .round_rate = sifive_prci_wrpll_round_rate,
+ .determine_rate = sifive_prci_wrpll_determine_rate,
.recalc_rate = sifive_prci_wrpll_recalc_rate,
.enable = sifive_prci_clock_enable,
.disable = sifive_prci_clock_disable,
diff --git a/drivers/clk/sifive/fu740-prci.h b/drivers/clk/sifive/fu740-prci.h
index f31cd30fc395..c605a899d97d 100644
--- a/drivers/clk/sifive/fu740-prci.h
+++ b/drivers/clk/sifive/fu740-prci.h
@@ -55,7 +55,7 @@ static struct __prci_wrpll_data sifive_fu740_prci_cltxpll_data = {
static const struct clk_ops sifive_fu740_prci_wrpll_clk_ops = {
.set_rate = sifive_prci_wrpll_set_rate,
- .round_rate = sifive_prci_wrpll_round_rate,
+ .determine_rate = sifive_prci_wrpll_determine_rate,
.recalc_rate = sifive_prci_wrpll_recalc_rate,
.enable = sifive_prci_clock_enable,
.disable = sifive_prci_clock_disable,
diff --git a/drivers/clk/sifive/sifive-prci.c b/drivers/clk/sifive/sifive-prci.c
index caba0400f8a2..4d1cc7adb2b3 100644
--- a/drivers/clk/sifive/sifive-prci.c
+++ b/drivers/clk/sifive/sifive-prci.c
@@ -183,9 +183,8 @@ unsigned long sifive_prci_wrpll_recalc_rate(struct clk_hw *hw,
return wrpll_calc_output_rate(&pwd->c, parent_rate);
}
-long sifive_prci_wrpll_round_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long *parent_rate)
+int sifive_prci_wrpll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
struct __prci_wrpll_data *pwd = pc->pwd;
@@ -193,9 +192,11 @@ long sifive_prci_wrpll_round_rate(struct clk_hw *hw,
memcpy(&c, &pwd->c, sizeof(c));
- wrpll_configure_for_rate(&c, rate, *parent_rate);
+ wrpll_configure_for_rate(&c, req->rate, req->best_parent_rate);
- return wrpll_calc_output_rate(&c, *parent_rate);
+ req->rate = wrpll_calc_output_rate(&c, req->best_parent_rate);
+
+ return 0;
}
int sifive_prci_wrpll_set_rate(struct clk_hw *hw,
diff --git a/drivers/clk/sifive/sifive-prci.h b/drivers/clk/sifive/sifive-prci.h
index 91658a88af4e..d74b2bddd08a 100644
--- a/drivers/clk/sifive/sifive-prci.h
+++ b/drivers/clk/sifive/sifive-prci.h
@@ -291,8 +291,8 @@ void sifive_prci_hfpclkpllsel_use_hfclk(struct __prci_data *pd);
void sifive_prci_hfpclkpllsel_use_hfpclkpll(struct __prci_data *pd);
/* Linux clock framework integration */
-long sifive_prci_wrpll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate);
+int sifive_prci_wrpll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req);
int sifive_prci_wrpll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate);
int sifive_clk_is_enabled(struct clk_hw *hw);
diff --git a/drivers/clk/socfpga/clk-pll-a10.c b/drivers/clk/socfpga/clk-pll-a10.c
index b028f25c658a..62eed964c3d0 100644
--- a/drivers/clk/socfpga/clk-pll-a10.c
+++ b/drivers/clk/socfpga/clk-pll-a10.c
@@ -35,7 +35,7 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk,
unsigned long parent_rate)
{
struct socfpga_pll *socfpgaclk = to_socfpga_clk(hwclk);
- unsigned long divf, divq, reg;
+ u32 divf, divq, reg;
unsigned long long vco_freq;
/* read VCO1 reg for numerator and denominator */
diff --git a/drivers/clk/socfpga/clk-pll-s10.c b/drivers/clk/socfpga/clk-pll-s10.c
index 1d82737befd3..a88c212bda12 100644
--- a/drivers/clk/socfpga/clk-pll-s10.c
+++ b/drivers/clk/socfpga/clk-pll-s10.c
@@ -83,9 +83,9 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk,
unsigned long parent_rate)
{
struct socfpga_pll *socfpgaclk = to_socfpga_clk(hwclk);
- unsigned long mdiv;
- unsigned long refdiv;
- unsigned long reg;
+ u32 mdiv;
+ u32 refdiv;
+ u32 reg;
unsigned long long vco_freq;
/* read VCO1 reg for numerator and denominator */
diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c
index 9dcc1b2d2cc0..03a96139a576 100644
--- a/drivers/clk/socfpga/clk-pll.c
+++ b/drivers/clk/socfpga/clk-pll.c
@@ -39,9 +39,9 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk,
unsigned long parent_rate)
{
struct socfpga_pll *socfpgaclk = to_socfpga_clk(hwclk);
- unsigned long divf, divq, reg;
+ u32 divf, divq, reg;
unsigned long long vco_freq;
- unsigned long bypass;
+ u32 bypass;
reg = readl(socfpgaclk->hw.reg);
bypass = readl(clk_mgr_base_addr + CLKMGR_BYPASS);
diff --git a/drivers/clk/sophgo/Kconfig b/drivers/clk/sophgo/Kconfig
index 8b1367e3a95e..e14e802f28bf 100644
--- a/drivers/clk/sophgo/Kconfig
+++ b/drivers/clk/sophgo/Kconfig
@@ -37,3 +37,22 @@ config CLK_SOPHGO_SG2042_RPGATE
This clock IP depends on SG2042 Clock Generator because it uses
clock from Clock Generator IP as input.
This driver provides Gate function for RP.
+
+config CLK_SOPHGO_SG2044
+ tristate "Sophgo SG2044 clock controller support"
+ depends on ARCH_SOPHGO || COMPILE_TEST
+ help
+ This driver supports the clock controller on the Sophgo SG2044
+ SoC. This controller requires mulitple PLL clock as input.
+ This clock control provides PLL clocks and common clock function
+ for various IPs on the SoC.
+
+config CLK_SOPHGO_SG2044_PLL
+ tristate "Sophgo SG2044 PLL clock controller support"
+ depends on ARCH_SOPHGO || COMPILE_TEST
+ select MFD_SYSCON
+ select REGMAP_MMIO
+ help
+ This driver supports the PLL clock controller on the Sophgo
+ SG2044 SoC. This controller requires 25M oscillator as input.
+ This clock control provides PLL clocks on the SoC.
diff --git a/drivers/clk/sophgo/Makefile b/drivers/clk/sophgo/Makefile
index 53506845a044..26b2fd121582 100644
--- a/drivers/clk/sophgo/Makefile
+++ b/drivers/clk/sophgo/Makefile
@@ -9,3 +9,5 @@ clk-sophgo-cv1800-y += clk-cv18xx-pll.o
obj-$(CONFIG_CLK_SOPHGO_SG2042_CLKGEN) += clk-sg2042-clkgen.o
obj-$(CONFIG_CLK_SOPHGO_SG2042_PLL) += clk-sg2042-pll.o
obj-$(CONFIG_CLK_SOPHGO_SG2042_RPGATE) += clk-sg2042-rpgate.o
+obj-$(CONFIG_CLK_SOPHGO_SG2044) += clk-sg2044.o
+obj-$(CONFIG_CLK_SOPHGO_SG2044_PLL) += clk-sg2044-pll.o
diff --git a/drivers/clk/sophgo/clk-cv1800.c b/drivers/clk/sophgo/clk-cv1800.c
index e0c4dc347579..a4116ac1adcb 100644
--- a/drivers/clk/sophgo/clk-cv1800.c
+++ b/drivers/clk/sophgo/clk-cv1800.c
@@ -1519,7 +1519,9 @@ static int cv1800_clk_probe(struct platform_device *pdev)
static const struct of_device_id cv1800_clk_ids[] = {
{ .compatible = "sophgo,cv1800-clk", .data = &cv1800_desc },
+ { .compatible = "sophgo,cv1800b-clk", .data = &cv1800_desc },
{ .compatible = "sophgo,cv1810-clk", .data = &cv1810_desc },
+ { .compatible = "sophgo,cv1812h-clk", .data = &cv1810_desc },
{ .compatible = "sophgo,sg2000-clk", .data = &sg2000_desc },
{ }
};
diff --git a/drivers/clk/sophgo/clk-cv18xx-ip.c b/drivers/clk/sophgo/clk-cv18xx-ip.c
index b186e64d4813..c2b58faf0938 100644
--- a/drivers/clk/sophgo/clk-cv18xx-ip.c
+++ b/drivers/clk/sophgo/clk-cv18xx-ip.c
@@ -45,10 +45,12 @@ static unsigned long gate_recalc_rate(struct clk_hw *hw,
return parent_rate;
}
-static long gate_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int gate_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- return *parent_rate;
+ req->rate = req->best_parent_rate;
+
+ return 0;
}
static int gate_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -63,7 +65,7 @@ const struct clk_ops cv1800_clk_gate_ops = {
.is_enabled = gate_is_enabled,
.recalc_rate = gate_recalc_rate,
- .round_rate = gate_round_rate,
+ .determine_rate = gate_determine_rate,
.set_rate = gate_set_rate,
};
diff --git a/drivers/clk/sophgo/clk-sg2042-clkgen.c b/drivers/clk/sophgo/clk-sg2042-clkgen.c
index a334963e83ce..683661b71787 100644
--- a/drivers/clk/sophgo/clk-sg2042-clkgen.c
+++ b/drivers/clk/sophgo/clk-sg2042-clkgen.c
@@ -176,9 +176,8 @@ static unsigned long sg2042_clk_divider_recalc_rate(struct clk_hw *hw,
return ret_rate;
}
-static long sg2042_clk_divider_round_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long *prate)
+static int sg2042_clk_divider_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct sg2042_divider_clock *divider = to_sg2042_clk_divider(hw);
unsigned long ret_rate;
@@ -192,15 +191,17 @@ static long sg2042_clk_divider_round_rate(struct clk_hw *hw,
bestdiv = readl(divider->reg) >> divider->shift;
bestdiv &= clk_div_mask(divider->width);
}
- ret_rate = DIV_ROUND_UP_ULL((u64)*prate, bestdiv);
+ ret_rate = DIV_ROUND_UP_ULL((u64)req->best_parent_rate, bestdiv);
} else {
- ret_rate = divider_round_rate(hw, rate, prate, NULL,
+ ret_rate = divider_round_rate(hw, req->rate, &req->best_parent_rate, NULL,
divider->width, divider->div_flags);
}
pr_debug("--> %s: divider_round_rate: val = %ld\n",
clk_hw_get_name(hw), ret_rate);
- return ret_rate;
+ req->rate = ret_rate;
+
+ return 0;
}
static int sg2042_clk_divider_set_rate(struct clk_hw *hw,
@@ -258,13 +259,13 @@ static int sg2042_clk_divider_set_rate(struct clk_hw *hw,
static const struct clk_ops sg2042_clk_divider_ops = {
.recalc_rate = sg2042_clk_divider_recalc_rate,
- .round_rate = sg2042_clk_divider_round_rate,
+ .determine_rate = sg2042_clk_divider_determine_rate,
.set_rate = sg2042_clk_divider_set_rate,
};
static const struct clk_ops sg2042_clk_divider_ro_ops = {
.recalc_rate = sg2042_clk_divider_recalc_rate,
- .round_rate = sg2042_clk_divider_round_rate,
+ .determine_rate = sg2042_clk_divider_determine_rate,
};
/*
@@ -968,7 +969,7 @@ static int sg2042_mux_notifier_cb(struct notifier_block *nb,
/*
* "1" is the array index of the second parent input source of
* mux. For SG2042, it's fpll for all mux clocks.
- * "0" is the array index of the frist parent input source of
+ * "0" is the array index of the first parent input source of
* mux, For SG2042, it's mpll.
* FIXME, any good idea to avoid magic number?
*/
diff --git a/drivers/clk/sophgo/clk-sg2042-pll.c b/drivers/clk/sophgo/clk-sg2042-pll.c
index ff9deeef509b..110b6ee06fe4 100644
--- a/drivers/clk/sophgo/clk-sg2042-pll.c
+++ b/drivers/clk/sophgo/clk-sg2042-pll.c
@@ -153,9 +153,9 @@ static unsigned long sg2042_pll_recalc_rate(unsigned int reg_value,
sg2042_pll_ctrl_decode(reg_value, &ctrl_table);
- numerator = parent_rate * ctrl_table.fbdiv;
+ numerator = (u64)parent_rate * ctrl_table.fbdiv;
denominator = ctrl_table.refdiv * ctrl_table.postdiv1 * ctrl_table.postdiv2;
- do_div(numerator, denominator);
+ numerator = div64_u64(numerator, denominator);
return numerator;
}
@@ -212,7 +212,7 @@ static int sg2042_pll_get_postdiv_1_2(unsigned long rate,
tmp0 *= fbdiv;
/* ((prate/REFDIV) x FBDIV)/rate and result save to tmp0 */
- do_div(tmp0, rate);
+ tmp0 = div64_ul(tmp0, rate);
/* tmp0 is POSTDIV1*POSTDIV2, now we calculate div1 and div2 value */
if (tmp0 <= 7) {
@@ -346,37 +346,30 @@ static unsigned long sg2042_clk_pll_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long sg2042_clk_pll_round_rate(struct clk_hw *hw,
- unsigned long req_rate,
- unsigned long *prate)
+static int sg2042_clk_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct sg2042_pll_ctrl pctrl_table;
unsigned int value;
long proper_rate;
int ret;
- ret = sg2042_get_pll_ctl_setting(&pctrl_table, req_rate, *prate);
+ ret = sg2042_get_pll_ctl_setting(&pctrl_table,
+ min(req->rate, req->max_rate),
+ req->best_parent_rate);
if (ret) {
proper_rate = 0;
goto out;
}
value = sg2042_pll_ctrl_encode(&pctrl_table);
- proper_rate = (long)sg2042_pll_recalc_rate(value, *prate);
+ proper_rate = (long)sg2042_pll_recalc_rate(value, req->best_parent_rate);
out:
- pr_debug("--> %s: pll_round_rate: val = %ld\n",
+ pr_debug("--> %s: pll_determine_rate: val = %ld\n",
clk_hw_get_name(hw), proper_rate);
- return proper_rate;
-}
+ req->rate = proper_rate;
-static int sg2042_clk_pll_determine_rate(struct clk_hw *hw,
- struct clk_rate_request *req)
-{
- req->rate = sg2042_clk_pll_round_rate(hw, min(req->rate, req->max_rate),
- &req->best_parent_rate);
- pr_debug("--> %s: pll_determine_rate: val = %ld\n",
- clk_hw_get_name(hw), req->rate);
return 0;
}
@@ -417,14 +410,13 @@ out:
static const struct clk_ops sg2042_clk_pll_ops = {
.recalc_rate = sg2042_clk_pll_recalc_rate,
- .round_rate = sg2042_clk_pll_round_rate,
.determine_rate = sg2042_clk_pll_determine_rate,
.set_rate = sg2042_clk_pll_set_rate,
};
static const struct clk_ops sg2042_clk_pll_ro_ops = {
.recalc_rate = sg2042_clk_pll_recalc_rate,
- .round_rate = sg2042_clk_pll_round_rate,
+ .determine_rate = sg2042_clk_pll_determine_rate,
};
/*
diff --git a/drivers/clk/sophgo/clk-sg2044-pll.c b/drivers/clk/sophgo/clk-sg2044-pll.c
new file mode 100644
index 000000000000..94c0f519ba6d
--- /dev/null
+++ b/drivers/clk/sophgo/clk-sg2044-pll.c
@@ -0,0 +1,628 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Sophgo SG2044 PLL clock controller driver
+ *
+ * Copyright (C) 2025 Inochi Amaoto <inochiama@gmail.com>
+ */
+
+#include <linux/array_size.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/math64.h>
+#include <linux/mfd/syscon.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/spinlock.h>
+
+#include <dt-bindings/clock/sophgo,sg2044-pll.h>
+
+/* Low Control part */
+#define PLL_VCOSEL_MASK GENMASK(17, 16)
+
+/* High Control part */
+#define PLL_FBDIV_MASK GENMASK(11, 0)
+#define PLL_REFDIV_MASK GENMASK(17, 12)
+#define PLL_POSTDIV1_MASK GENMASK(20, 18)
+#define PLL_POSTDIV2_MASK GENMASK(23, 21)
+
+#define PLL_CALIBRATE_EN BIT(24)
+#define PLL_CALIBRATE_MASK GENMASK(29, 27)
+#define PLL_CALIBRATE_DEFAULT FIELD_PREP(PLL_CALIBRATE_MASK, 2)
+#define PLL_UPDATE_EN BIT(30)
+
+#define PLL_HIGH_CTRL_MASK \
+ (PLL_FBDIV_MASK | PLL_REFDIV_MASK | \
+ PLL_POSTDIV1_MASK | PLL_POSTDIV2_MASK | \
+ PLL_CALIBRATE_EN | PLL_CALIBRATE_MASK | \
+ PLL_UPDATE_EN)
+
+#define PLL_HIGH_CTRL_OFFSET 4
+
+#define PLL_VCOSEL_1G6 0x2
+#define PLL_VCOSEL_2G4 0x3
+
+#define PLL_LIMIT_FOUTVCO 0
+#define PLL_LIMIT_FOUT 1
+#define PLL_LIMIT_REFDIV 2
+#define PLL_LIMIT_FBDIV 3
+#define PLL_LIMIT_POSTDIV1 4
+#define PLL_LIMIT_POSTDIV2 5
+
+#define for_each_pll_limit_range(_var, _limit) \
+ for (_var = (_limit)->min; _var <= (_limit)->max; _var++)
+
+struct sg2044_pll_limit {
+ u64 min;
+ u64 max;
+};
+
+struct sg2044_pll_internal {
+ u32 ctrl_offset;
+ u32 status_offset;
+ u32 enable_offset;
+
+ u8 status_lock_bit;
+ u8 status_updating_bit;
+ u8 enable_bit;
+
+ const struct sg2044_pll_limit *limits;
+};
+
+struct sg2044_clk_common {
+ struct clk_hw hw;
+ struct regmap *regmap;
+ spinlock_t *lock;
+ unsigned int id;
+};
+
+struct sg2044_pll {
+ struct sg2044_clk_common common;
+ struct sg2044_pll_internal pll;
+ unsigned int syscon_offset;
+};
+
+struct sg2044_pll_desc_data {
+ struct sg2044_clk_common * const *pll;
+ u16 num_pll;
+};
+
+#define SG2044_SYSCON_PLL_OFFSET 0x98
+
+struct sg2044_pll_ctrl {
+ spinlock_t lock;
+ struct clk_hw_onecell_data data;
+};
+
+#define hw_to_sg2044_clk_common(_hw) \
+ container_of((_hw), struct sg2044_clk_common, hw)
+
+static inline bool sg2044_clk_fit_limit(u64 value,
+ const struct sg2044_pll_limit *limit)
+{
+ return value >= limit->min && value <= limit->max;
+}
+
+static inline struct sg2044_pll *hw_to_sg2044_pll(struct clk_hw *hw)
+{
+ return container_of(hw_to_sg2044_clk_common(hw),
+ struct sg2044_pll, common);
+}
+
+static unsigned long sg2044_pll_calc_vco_rate(unsigned long parent_rate,
+ unsigned long refdiv,
+ unsigned long fbdiv)
+{
+ u64 numerator = parent_rate * fbdiv;
+
+ return div64_ul(numerator, refdiv);
+}
+
+static unsigned long sg2044_pll_calc_rate(unsigned long parent_rate,
+ unsigned long refdiv,
+ unsigned long fbdiv,
+ unsigned long postdiv1,
+ unsigned long postdiv2)
+{
+ u64 numerator, denominator;
+
+ numerator = parent_rate * fbdiv;
+ denominator = refdiv * (postdiv1 + 1) * (postdiv2 + 1);
+
+ return div64_u64(numerator, denominator);
+}
+
+static unsigned long sg2044_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct sg2044_pll *pll = hw_to_sg2044_pll(hw);
+ u32 value;
+ int ret;
+
+ ret = regmap_read(pll->common.regmap,
+ pll->syscon_offset + pll->pll.ctrl_offset + PLL_HIGH_CTRL_OFFSET,
+ &value);
+ if (ret < 0)
+ return 0;
+
+ return sg2044_pll_calc_rate(parent_rate,
+ FIELD_GET(PLL_REFDIV_MASK, value),
+ FIELD_GET(PLL_FBDIV_MASK, value),
+ FIELD_GET(PLL_POSTDIV1_MASK, value),
+ FIELD_GET(PLL_POSTDIV2_MASK, value));
+}
+
+static bool pll_is_better_rate(unsigned long target, unsigned long now,
+ unsigned long best)
+{
+ return abs_diff(target, now) < abs_diff(target, best);
+}
+
+static int sg2042_pll_compute_postdiv(const struct sg2044_pll_limit *limits,
+ unsigned long target,
+ unsigned long parent_rate,
+ unsigned int refdiv,
+ unsigned int fbdiv,
+ unsigned int *postdiv1,
+ unsigned int *postdiv2)
+{
+ unsigned int div1, div2;
+ unsigned long tmp, best_rate = 0;
+ unsigned int best_div1 = 0, best_div2 = 0;
+
+ for_each_pll_limit_range(div2, &limits[PLL_LIMIT_POSTDIV2]) {
+ for_each_pll_limit_range(div1, &limits[PLL_LIMIT_POSTDIV1]) {
+ tmp = sg2044_pll_calc_rate(parent_rate,
+ refdiv, fbdiv,
+ div1, div2);
+
+ if (tmp > target)
+ continue;
+
+ if (pll_is_better_rate(target, tmp, best_rate)) {
+ best_div1 = div1;
+ best_div2 = div2;
+ best_rate = tmp;
+
+ if (tmp == target)
+ goto find;
+ }
+ }
+ }
+
+find:
+ if (best_rate) {
+ *postdiv1 = best_div1;
+ *postdiv2 = best_div2;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int sg2044_compute_pll_setting(const struct sg2044_pll_limit *limits,
+ unsigned long req_rate,
+ unsigned long parent_rate,
+ unsigned int *value)
+{
+ unsigned int refdiv, fbdiv, postdiv1, postdiv2;
+ unsigned int best_refdiv, best_fbdiv, best_postdiv1, best_postdiv2;
+ unsigned long tmp, best_rate = 0;
+ int ret;
+
+ for_each_pll_limit_range(fbdiv, &limits[PLL_LIMIT_FBDIV]) {
+ for_each_pll_limit_range(refdiv, &limits[PLL_LIMIT_REFDIV]) {
+ u64 vco = sg2044_pll_calc_vco_rate(parent_rate,
+ refdiv, fbdiv);
+ if (!sg2044_clk_fit_limit(vco, &limits[PLL_LIMIT_FOUTVCO]))
+ continue;
+
+ ret = sg2042_pll_compute_postdiv(limits,
+ req_rate, parent_rate,
+ refdiv, fbdiv,
+ &postdiv1, &postdiv2);
+ if (ret)
+ continue;
+
+ tmp = sg2044_pll_calc_rate(parent_rate,
+ refdiv, fbdiv,
+ postdiv1, postdiv2);
+
+ if (pll_is_better_rate(req_rate, tmp, best_rate)) {
+ best_refdiv = refdiv;
+ best_fbdiv = fbdiv;
+ best_postdiv1 = postdiv1;
+ best_postdiv2 = postdiv2;
+ best_rate = tmp;
+
+ if (tmp == req_rate)
+ goto find;
+ }
+ }
+ }
+
+find:
+ if (best_rate) {
+ *value = FIELD_PREP(PLL_REFDIV_MASK, best_refdiv) |
+ FIELD_PREP(PLL_FBDIV_MASK, best_fbdiv) |
+ FIELD_PREP(PLL_POSTDIV1_MASK, best_postdiv1) |
+ FIELD_PREP(PLL_POSTDIV2_MASK, best_postdiv2);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int sg2044_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct sg2044_pll *pll = hw_to_sg2044_pll(hw);
+ unsigned int value;
+ u64 target;
+ int ret;
+
+ target = clamp(req->rate, pll->pll.limits[PLL_LIMIT_FOUT].min,
+ pll->pll.limits[PLL_LIMIT_FOUT].max);
+
+ ret = sg2044_compute_pll_setting(pll->pll.limits, target,
+ req->best_parent_rate, &value);
+ if (ret < 0)
+ return ret;
+
+ req->rate = sg2044_pll_calc_rate(req->best_parent_rate,
+ FIELD_GET(PLL_REFDIV_MASK, value),
+ FIELD_GET(PLL_FBDIV_MASK, value),
+ FIELD_GET(PLL_POSTDIV1_MASK, value),
+ FIELD_GET(PLL_POSTDIV2_MASK, value));
+
+ return 0;
+}
+
+static int sg2044_pll_poll_update(struct sg2044_pll *pll)
+{
+ int ret;
+ unsigned int value;
+
+ ret = regmap_read_poll_timeout_atomic(pll->common.regmap,
+ pll->syscon_offset + pll->pll.status_offset,
+ value,
+ (value & BIT(pll->pll.status_lock_bit)),
+ 1, 100000);
+ if (ret)
+ return ret;
+
+ return regmap_read_poll_timeout_atomic(pll->common.regmap,
+ pll->syscon_offset + pll->pll.status_offset,
+ value,
+ (!(value & BIT(pll->pll.status_updating_bit))),
+ 1, 100000);
+}
+
+static int sg2044_pll_enable(struct sg2044_pll *pll, bool en)
+{
+ if (en) {
+ if (sg2044_pll_poll_update(pll) < 0)
+ pr_warn("%s: fail to lock pll\n", clk_hw_get_name(&pll->common.hw));
+
+ return regmap_set_bits(pll->common.regmap,
+ pll->syscon_offset + pll->pll.enable_offset,
+ BIT(pll->pll.enable_bit));
+ }
+
+ return regmap_clear_bits(pll->common.regmap,
+ pll->syscon_offset + pll->pll.enable_offset,
+ BIT(pll->pll.enable_bit));
+}
+
+static int sg2044_pll_update_vcosel(struct sg2044_pll *pll, u64 rate)
+{
+ unsigned int sel;
+
+ if (rate < U64_C(2400000000))
+ sel = PLL_VCOSEL_1G6;
+ else
+ sel = PLL_VCOSEL_2G4;
+
+ return regmap_write_bits(pll->common.regmap,
+ pll->syscon_offset + pll->pll.ctrl_offset,
+ PLL_VCOSEL_MASK,
+ FIELD_PREP(PLL_VCOSEL_MASK, sel));
+}
+
+static int sg2044_pll_set_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate)
+{
+ struct sg2044_pll *pll = hw_to_sg2044_pll(hw);
+ unsigned int value;
+ u64 vco;
+ int ret;
+
+ ret = sg2044_compute_pll_setting(pll->pll.limits, rate,
+ parent_rate, &value);
+ if (ret < 0)
+ return ret;
+
+ vco = sg2044_pll_calc_vco_rate(parent_rate,
+ FIELD_GET(PLL_REFDIV_MASK, value),
+ FIELD_GET(PLL_FBDIV_MASK, value));
+
+ value |= PLL_CALIBRATE_EN;
+ value |= PLL_CALIBRATE_DEFAULT;
+ value |= PLL_UPDATE_EN;
+
+ guard(spinlock_irqsave)(pll->common.lock);
+
+ ret = sg2044_pll_enable(pll, false);
+ if (ret)
+ return ret;
+
+ sg2044_pll_update_vcosel(pll, vco);
+
+ regmap_write_bits(pll->common.regmap,
+ pll->syscon_offset + pll->pll.ctrl_offset +
+ PLL_HIGH_CTRL_OFFSET,
+ PLL_HIGH_CTRL_MASK, value);
+
+ sg2044_pll_enable(pll, true);
+
+ return ret;
+}
+
+static const struct clk_ops sg2044_pll_ops = {
+ .recalc_rate = sg2044_pll_recalc_rate,
+ .determine_rate = sg2044_pll_determine_rate,
+ .set_rate = sg2044_pll_set_rate,
+};
+
+static const struct clk_ops sg2044_pll_ro_ops = {
+ .recalc_rate = sg2044_pll_recalc_rate,
+};
+
+#define SG2044_CLK_COMMON_PDATA(_id, _name, _parents, _op, _flags) \
+ { \
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(_name, _parents, \
+ _op, (_flags)), \
+ .id = (_id), \
+ }
+
+#define DEFINE_SG2044_PLL(_id, _name, _parent, _flags, \
+ _ctrl_offset, \
+ _status_offset, _status_lock_bit, \
+ _status_updating_bit, \
+ _enable_offset, _enable_bit, \
+ _limits) \
+ struct sg2044_pll _name = { \
+ .common = SG2044_CLK_COMMON_PDATA(_id, #_name, _parent, \
+ &sg2044_pll_ops, \
+ (_flags)), \
+ .pll = { \
+ .ctrl_offset = (_ctrl_offset), \
+ .status_offset = (_status_offset), \
+ .enable_offset = (_enable_offset), \
+ .status_lock_bit = (_status_lock_bit), \
+ .status_updating_bit = (_status_updating_bit), \
+ .enable_bit = (_enable_bit), \
+ .limits = (_limits), \
+ }, \
+ }
+
+#define DEFINE_SG2044_PLL_RO(_id, _name, _parent, _flags, \
+ _ctrl_offset, \
+ _status_offset, _status_lock_bit, \
+ _status_updating_bit, \
+ _enable_offset, _enable_bit, \
+ _limits) \
+ struct sg2044_pll _name = { \
+ .common = SG2044_CLK_COMMON_PDATA(_id, #_name, _parent, \
+ &sg2044_pll_ro_ops, \
+ (_flags)), \
+ .pll = { \
+ .ctrl_offset = (_ctrl_offset), \
+ .status_offset = (_status_offset), \
+ .enable_offset = (_enable_offset), \
+ .status_lock_bit = (_status_lock_bit), \
+ .status_updating_bit = (_status_updating_bit), \
+ .enable_bit = (_enable_bit), \
+ .limits = (_limits), \
+ }, \
+ }
+
+static const struct clk_parent_data osc_parents[] = {
+ { .index = 0 },
+};
+
+static const struct sg2044_pll_limit pll_limits[] = {
+ [PLL_LIMIT_FOUTVCO] = {
+ .min = U64_C(1600000000),
+ .max = U64_C(3200000000),
+ },
+ [PLL_LIMIT_FOUT] = {
+ .min = U64_C(25000),
+ .max = U64_C(3200000000),
+ },
+ [PLL_LIMIT_REFDIV] = {
+ .min = U64_C(1),
+ .max = U64_C(63),
+ },
+ [PLL_LIMIT_FBDIV] = {
+ .min = U64_C(8),
+ .max = U64_C(1066),
+ },
+ [PLL_LIMIT_POSTDIV1] = {
+ .min = U64_C(0),
+ .max = U64_C(7),
+ },
+ [PLL_LIMIT_POSTDIV2] = {
+ .min = U64_C(0),
+ .max = U64_C(7),
+ },
+};
+
+static DEFINE_SG2044_PLL_RO(CLK_FPLL0, clk_fpll0, osc_parents, CLK_IS_CRITICAL,
+ 0x58, 0x00, 22, 6,
+ 0x04, 6, pll_limits);
+
+static DEFINE_SG2044_PLL_RO(CLK_FPLL1, clk_fpll1, osc_parents, CLK_IS_CRITICAL,
+ 0x60, 0x00, 23, 7,
+ 0x04, 7, pll_limits);
+
+static DEFINE_SG2044_PLL_RO(CLK_FPLL2, clk_fpll2, osc_parents, CLK_IS_CRITICAL,
+ 0x20, 0x08, 16, 0,
+ 0x0c, 0, pll_limits);
+
+static DEFINE_SG2044_PLL_RO(CLK_DPLL0, clk_dpll0, osc_parents, CLK_IS_CRITICAL,
+ 0x68, 0x00, 24, 8,
+ 0x04, 8, pll_limits);
+
+static DEFINE_SG2044_PLL_RO(CLK_DPLL1, clk_dpll1, osc_parents, CLK_IS_CRITICAL,
+ 0x70, 0x00, 25, 9,
+ 0x04, 9, pll_limits);
+
+static DEFINE_SG2044_PLL_RO(CLK_DPLL2, clk_dpll2, osc_parents, CLK_IS_CRITICAL,
+ 0x78, 0x00, 26, 10,
+ 0x04, 10, pll_limits);
+
+static DEFINE_SG2044_PLL_RO(CLK_DPLL3, clk_dpll3, osc_parents, CLK_IS_CRITICAL,
+ 0x80, 0x00, 27, 11,
+ 0x04, 11, pll_limits);
+
+static DEFINE_SG2044_PLL_RO(CLK_DPLL4, clk_dpll4, osc_parents, CLK_IS_CRITICAL,
+ 0x88, 0x00, 28, 12,
+ 0x04, 12, pll_limits);
+
+static DEFINE_SG2044_PLL_RO(CLK_DPLL5, clk_dpll5, osc_parents, CLK_IS_CRITICAL,
+ 0x90, 0x00, 29, 13,
+ 0x04, 13, pll_limits);
+
+static DEFINE_SG2044_PLL_RO(CLK_DPLL6, clk_dpll6, osc_parents, CLK_IS_CRITICAL,
+ 0x98, 0x00, 30, 14,
+ 0x04, 14, pll_limits);
+
+static DEFINE_SG2044_PLL_RO(CLK_DPLL7, clk_dpll7, osc_parents, CLK_IS_CRITICAL,
+ 0xa0, 0x00, 31, 15,
+ 0x04, 15, pll_limits);
+
+static DEFINE_SG2044_PLL(CLK_MPLL0, clk_mpll0, osc_parents, CLK_IS_CRITICAL,
+ 0x28, 0x00, 16, 0,
+ 0x04, 0, pll_limits);
+
+static DEFINE_SG2044_PLL(CLK_MPLL1, clk_mpll1, osc_parents, CLK_IS_CRITICAL,
+ 0x30, 0x00, 17, 1,
+ 0x04, 1, pll_limits);
+
+static DEFINE_SG2044_PLL(CLK_MPLL2, clk_mpll2, osc_parents, CLK_IS_CRITICAL,
+ 0x38, 0x00, 18, 2,
+ 0x04, 2, pll_limits);
+
+static DEFINE_SG2044_PLL(CLK_MPLL3, clk_mpll3, osc_parents, CLK_IS_CRITICAL,
+ 0x40, 0x00, 19, 3,
+ 0x04, 3, pll_limits);
+
+static DEFINE_SG2044_PLL(CLK_MPLL4, clk_mpll4, osc_parents, CLK_IS_CRITICAL,
+ 0x48, 0x00, 20, 4,
+ 0x04, 4, pll_limits);
+
+static DEFINE_SG2044_PLL(CLK_MPLL5, clk_mpll5, osc_parents, CLK_IS_CRITICAL,
+ 0x50, 0x00, 21, 5,
+ 0x04, 5, pll_limits);
+
+static struct sg2044_clk_common * const sg2044_pll_commons[] = {
+ &clk_fpll0.common,
+ &clk_fpll1.common,
+ &clk_fpll2.common,
+ &clk_dpll0.common,
+ &clk_dpll1.common,
+ &clk_dpll2.common,
+ &clk_dpll3.common,
+ &clk_dpll4.common,
+ &clk_dpll5.common,
+ &clk_dpll6.common,
+ &clk_dpll7.common,
+ &clk_mpll0.common,
+ &clk_mpll1.common,
+ &clk_mpll2.common,
+ &clk_mpll3.common,
+ &clk_mpll4.common,
+ &clk_mpll5.common,
+};
+
+static int sg2044_pll_init_ctrl(struct device *dev, struct regmap *regmap,
+ struct sg2044_pll_ctrl *ctrl,
+ const struct sg2044_pll_desc_data *desc)
+{
+ int ret, i;
+
+ spin_lock_init(&ctrl->lock);
+
+ for (i = 0; i < desc->num_pll; i++) {
+ struct sg2044_clk_common *common = desc->pll[i];
+ struct sg2044_pll *pll = hw_to_sg2044_pll(&common->hw);
+
+ common->lock = &ctrl->lock;
+ common->regmap = regmap;
+ pll->syscon_offset = SG2044_SYSCON_PLL_OFFSET;
+
+ ret = devm_clk_hw_register(dev, &common->hw);
+ if (ret)
+ return ret;
+
+ ctrl->data.hws[common->id] = &common->hw;
+ }
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
+ &ctrl->data);
+}
+
+static int sg2044_pll_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sg2044_pll_ctrl *ctrl;
+ const struct sg2044_pll_desc_data *desc;
+ struct regmap *regmap;
+
+ regmap = device_node_to_regmap(pdev->dev.parent->of_node);
+ if (IS_ERR(regmap))
+ return dev_err_probe(dev, PTR_ERR(regmap),
+ "fail to get the regmap for PLL\n");
+
+ desc = (const struct sg2044_pll_desc_data *)platform_get_device_id(pdev)->driver_data;
+ if (!desc)
+ return dev_err_probe(dev, -EINVAL, "no match data for platform\n");
+
+ ctrl = devm_kzalloc(dev, struct_size(ctrl, data.hws, desc->num_pll), GFP_KERNEL);
+ if (!ctrl)
+ return -ENOMEM;
+
+ ctrl->data.num = desc->num_pll;
+
+ return sg2044_pll_init_ctrl(dev, regmap, ctrl, desc);
+}
+
+static const struct sg2044_pll_desc_data sg2044_pll_desc_data = {
+ .pll = sg2044_pll_commons,
+ .num_pll = ARRAY_SIZE(sg2044_pll_commons),
+};
+
+static const struct platform_device_id sg2044_pll_match[] = {
+ { .name = "sg2044-pll",
+ .driver_data = (unsigned long)&sg2044_pll_desc_data },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, sg2044_pll_match);
+
+static struct platform_driver sg2044_clk_driver = {
+ .probe = sg2044_pll_probe,
+ .driver = {
+ .name = "sg2044-pll",
+ },
+ .id_table = sg2044_pll_match,
+};
+module_platform_driver(sg2044_clk_driver);
+
+MODULE_AUTHOR("Inochi Amaoto <inochiama@gmail.com>");
+MODULE_DESCRIPTION("Sophgo SG2044 pll clock driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sophgo/clk-sg2044.c b/drivers/clk/sophgo/clk-sg2044.c
new file mode 100644
index 000000000000..f67f99c926b6
--- /dev/null
+++ b/drivers/clk/sophgo/clk-sg2044.c
@@ -0,0 +1,1812 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Sophgo SG2044 clock controller driver
+ *
+ * Copyright (C) 2025 Inochi Amaoto <inochiama@gmail.com>
+ */
+
+#include <linux/array_size.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/math64.h>
+#include <linux/mfd/syscon.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/spinlock.h>
+
+#include <dt-bindings/clock/sophgo,sg2044-clk.h>
+
+#define DIV_ASSERT BIT(0)
+#define DIV_FACTOR_REG_SOURCE BIT(3)
+#define DIV_BRANCH_EN BIT(4)
+
+#define DIV_ASSERT_TIME 2
+
+struct sg2044_div_internal {
+ u32 offset;
+ u32 initval;
+ u8 shift;
+ u8 width;
+ u16 flags;
+};
+
+struct sg2044_mux_internal {
+ const u32 *table;
+ u32 offset;
+ u16 shift;
+ u16 flags;
+};
+
+struct sg2044_gate_internal {
+ u32 offset;
+ u16 shift;
+ u16 flags;
+};
+
+struct sg2044_clk_common {
+ struct clk_hw hw;
+ void __iomem *base;
+ spinlock_t *lock;
+ unsigned int id;
+};
+
+struct sg2044_div {
+ struct sg2044_clk_common common;
+ struct sg2044_div_internal div;
+};
+
+struct sg2044_mux {
+ struct sg2044_clk_common common;
+ struct sg2044_mux_internal mux;
+ struct notifier_block nb;
+ u8 saved_parent;
+};
+
+struct sg2044_gate {
+ struct sg2044_clk_common common;
+ struct sg2044_gate_internal gate;
+};
+
+struct sg2044_clk_ctrl {
+ spinlock_t lock;
+ struct clk_hw_onecell_data data;
+};
+
+struct sg2044_clk_desc_data {
+ struct sg2044_clk_common * const *pll;
+ struct sg2044_clk_common * const *div;
+ struct sg2044_clk_common * const *mux;
+ struct sg2044_clk_common * const *gate;
+ u16 num_pll;
+ u16 num_div;
+ u16 num_mux;
+ u16 num_gate;
+};
+
+#define hw_to_sg2044_clk_common(_hw) \
+ container_of((_hw), struct sg2044_clk_common, hw)
+
+static inline struct sg2044_div *hw_to_sg2044_div(struct clk_hw *hw)
+{
+ return container_of(hw_to_sg2044_clk_common(hw),
+ struct sg2044_div, common);
+}
+
+static u32 sg2044_div_get_reg_div(u32 reg, struct sg2044_div_internal *div)
+{
+ if ((reg & DIV_FACTOR_REG_SOURCE))
+ return (reg >> div->shift) & clk_div_mask(div->width);
+
+ return div->initval == 0 ? 1 : div->initval;
+}
+
+static unsigned long _sg2044_div_recalc_rate(struct sg2044_clk_common *common,
+ struct sg2044_div_internal *div,
+ unsigned long parent_rate)
+{
+ u32 reg = readl(common->base + div->offset);
+ u32 val = sg2044_div_get_reg_div(reg, div);
+
+ return divider_recalc_rate(&common->hw, parent_rate, val, NULL,
+ div->flags, div->width);
+}
+
+static unsigned long sg2044_div_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct sg2044_div *div = hw_to_sg2044_div(hw);
+
+ return _sg2044_div_recalc_rate(&div->common, &div->div,
+ parent_rate);
+}
+
+static int _sg2044_div_determine_rate(struct sg2044_clk_common *common,
+ struct sg2044_div_internal *div,
+ struct clk_rate_request *req)
+{
+ if (div->flags & CLK_DIVIDER_READ_ONLY) {
+ u32 reg = readl(common->base + div->offset);
+ u32 val = sg2044_div_get_reg_div(reg, div);
+
+ return divider_ro_determine_rate(&common->hw, req, NULL,
+ div->width, div->flags,
+ val);
+ }
+
+ return divider_determine_rate(&common->hw, req, NULL,
+ div->width, div->flags);
+}
+
+static int sg2044_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct sg2044_div *div = hw_to_sg2044_div(hw);
+
+ return _sg2044_div_determine_rate(&div->common, &div->div, req);
+}
+
+static void sg2044_div_set_reg_div(struct sg2044_clk_common *common,
+ struct sg2044_div_internal *div,
+ u32 value)
+{
+ void __iomem *addr = common->base + div->offset;
+ u32 reg;
+
+ reg = readl(addr);
+
+ /* assert */
+ reg &= ~DIV_ASSERT;
+ writel(reg, addr);
+
+ /* set value */
+ reg = readl(addr);
+ reg &= ~(clk_div_mask(div->width) << div->shift);
+ reg |= (value << div->shift) | DIV_FACTOR_REG_SOURCE;
+ writel(reg, addr);
+
+ /* de-assert */
+ reg |= DIV_ASSERT;
+ writel(reg, addr);
+}
+
+static int sg2044_div_set_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate)
+{
+ struct sg2044_div *div = hw_to_sg2044_div(hw);
+ u32 value;
+
+ value = divider_get_val(rate, parent_rate, NULL,
+ div->div.width, div->div.flags);
+
+ guard(spinlock_irqsave)(div->common.lock);
+
+ sg2044_div_set_reg_div(&div->common, &div->div, value);
+
+ return 0;
+}
+
+static int sg2044_div_enable(struct clk_hw *hw)
+{
+ struct sg2044_div *div = hw_to_sg2044_div(hw);
+ void __iomem *addr = div->common.base + div->div.offset;
+ u32 value;
+
+ guard(spinlock_irqsave)(div->common.lock);
+
+ value = readl(addr);
+ value |= DIV_BRANCH_EN;
+ writel(value, addr);
+
+ return 0;
+}
+
+static void sg2044_div_disable(struct clk_hw *hw)
+{
+ struct sg2044_div *div = hw_to_sg2044_div(hw);
+ void __iomem *addr = div->common.base + div->div.offset;
+ u32 value;
+
+ guard(spinlock_irqsave)(div->common.lock);
+
+ value = readl(addr);
+ value &= ~DIV_BRANCH_EN;
+ writel(value, addr);
+}
+
+static int sg2044_div_is_enabled(struct clk_hw *hw)
+{
+ struct sg2044_div *div = hw_to_sg2044_div(hw);
+
+ return readl(div->common.base + div->div.offset) & DIV_BRANCH_EN;
+}
+
+static const struct clk_ops sg2044_gateable_div_ops = {
+ .enable = sg2044_div_enable,
+ .disable = sg2044_div_disable,
+ .is_enabled = sg2044_div_is_enabled,
+ .recalc_rate = sg2044_div_recalc_rate,
+ .determine_rate = sg2044_div_determine_rate,
+ .set_rate = sg2044_div_set_rate,
+};
+
+static const struct clk_ops sg2044_div_ops = {
+ .recalc_rate = sg2044_div_recalc_rate,
+ .determine_rate = sg2044_div_determine_rate,
+ .set_rate = sg2044_div_set_rate,
+};
+
+static const struct clk_ops sg2044_div_ro_ops = {
+ .recalc_rate = sg2044_div_recalc_rate,
+ .determine_rate = sg2044_div_determine_rate,
+};
+
+static inline struct sg2044_mux *hw_to_sg2044_mux(struct clk_hw *hw)
+{
+ return container_of(hw_to_sg2044_clk_common(hw),
+ struct sg2044_mux, common);
+}
+
+static inline struct sg2044_mux *nb_to_sg2044_mux(struct notifier_block *nb)
+{
+ return container_of(nb, struct sg2044_mux, nb);
+}
+
+static const u32 sg2044_mux_table[] = {0, 1};
+
+static int sg2044_mux_notifier_cb(struct notifier_block *nb,
+ unsigned long event,
+ void *data)
+{
+ struct sg2044_mux *mux = nb_to_sg2044_mux(nb);
+ const struct clk_ops *ops = &clk_mux_ops;
+ struct clk_notifier_data *ndata = data;
+ struct clk_hw *hw = __clk_get_hw(ndata->clk);
+ int ret = 0;
+
+ if (event == PRE_RATE_CHANGE) {
+ mux->saved_parent = ops->get_parent(hw);
+ if (mux->saved_parent)
+ ret = ops->set_parent(hw, 0);
+ } else if (event == POST_RATE_CHANGE) {
+ ret = ops->set_parent(hw, mux->saved_parent);
+ }
+
+ return notifier_from_errno(ret);
+}
+
+static inline struct sg2044_gate *hw_to_sg2044_gate(struct clk_hw *hw)
+{
+ return container_of(hw_to_sg2044_clk_common(hw),
+ struct sg2044_gate, common);
+}
+
+#define SG2044_CLK_COMMON_PDATA(_id, _name, _parents, _op, _flags) \
+ { \
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(_name, _parents, \
+ _op, (_flags)), \
+ .id = (_id), \
+ }
+
+#define SG2044_CLK_COMMON_PHWS(_id, _name, _parents, _op, _flags) \
+ { \
+ .hw.init = CLK_HW_INIT_PARENTS_HW(_name, _parents, \
+ _op, (_flags)), \
+ .id = (_id), \
+ }
+
+#define DEFINE_SG2044_GATEABLE_DIV(_id, _name, _parent, _flags, \
+ _div_offset, _div_shift, _div_width, \
+ _div_flags, _div_initval) \
+ struct sg2044_div _name = { \
+ .common = SG2044_CLK_COMMON_PDATA(_id, #_name, _parent, \
+ &sg2044_gateable_div_ops,\
+ (_flags)), \
+ .div = { \
+ .offset = (_div_offset), \
+ .initval = (_div_initval), \
+ .shift = (_div_shift), \
+ .width = (_div_width), \
+ .flags = (_div_flags), \
+ }, \
+ }
+
+#define DEFINE_SG2044_DIV(_id, _name, _parent, _flags, \
+ _div_offset, _div_shift, _div_width, \
+ _div_flags, _div_initval) \
+ struct sg2044_div _name = { \
+ .common = SG2044_CLK_COMMON_PHWS(_id, #_name, _parent, \
+ &sg2044_div_ops, \
+ (_flags)), \
+ .div = { \
+ .offset = (_div_offset), \
+ .initval = (_div_initval), \
+ .shift = (_div_shift), \
+ .width = (_div_width), \
+ .flags = (_div_flags), \
+ }, \
+ }
+
+#define DEFINE_SG2044_DIV_PDATA(_id, _name, _parent, _flags, \
+ _div_offset, _div_shift, _div_width, \
+ _div_flags, _div_initval) \
+ struct sg2044_div _name = { \
+ .common = SG2044_CLK_COMMON_PDATA(_id, #_name, _parent, \
+ &sg2044_div_ops, \
+ (_flags)), \
+ .div = { \
+ .offset = (_div_offset), \
+ .initval = (_div_initval), \
+ .shift = (_div_shift), \
+ .width = (_div_width), \
+ .flags = (_div_flags), \
+ }, \
+ }
+
+#define DEFINE_SG2044_DIV_RO(_id, _name, _parent, _flags, \
+ _div_offset, _div_shift, _div_width, \
+ _div_flags, _div_initval) \
+ struct sg2044_div _name = { \
+ .common = SG2044_CLK_COMMON_PDATA(_id, #_name, _parent, \
+ &sg2044_div_ro_ops, \
+ (_flags)), \
+ .div = { \
+ .offset = (_div_offset), \
+ .initval = (_div_initval), \
+ .shift = (_div_shift), \
+ .width = (_div_width), \
+ .flags = (_div_flags) | CLK_DIVIDER_READ_ONLY,\
+ }, \
+ }
+
+#define DEFINE_SG2044_MUX(_id, _name, _parent, _flags, \
+ _mux_offset, _mux_shift, \
+ _mux_table, _mux_flags) \
+ struct sg2044_mux _name = { \
+ .common = SG2044_CLK_COMMON_PDATA(_id, #_name, _parent, \
+ &clk_mux_ops, (_flags)),\
+ .mux = { \
+ .table = (_mux_table), \
+ .offset = (_mux_offset), \
+ .shift = (_mux_shift), \
+ .flags = (_mux_flags), \
+ }, \
+ }
+
+#define DEFINE_SG2044_GATE(_id, _name, _parent, _flags, \
+ _gate_offset, _gate_shift, _gate_flags) \
+ struct sg2044_gate _name = { \
+ .common = SG2044_CLK_COMMON_PHWS(_id, #_name, _parent, \
+ &clk_gate_ops, (_flags)),\
+ .gate = { \
+ .offset = (_gate_offset), \
+ .shift = (_gate_shift), \
+ .flags = (_gate_flags), \
+ }, \
+ }
+
+static const struct clk_parent_data clk_fpll0_parent[] = {
+ { .fw_name = "fpll0" },
+};
+
+static const struct clk_parent_data clk_fpll1_parent[] = {
+ { .fw_name = "fpll1" },
+};
+
+static const struct clk_parent_data clk_fpll2_parent[] = {
+ { .fw_name = "fpll2" },
+};
+
+static const struct clk_parent_data clk_dpll0_parent[] = {
+ { .fw_name = "dpll0" },
+};
+
+static const struct clk_parent_data clk_dpll1_parent[] = {
+ { .fw_name = "dpll1" },
+};
+
+static const struct clk_parent_data clk_dpll2_parent[] = {
+ { .fw_name = "dpll2" },
+};
+
+static const struct clk_parent_data clk_dpll3_parent[] = {
+ { .fw_name = "dpll3" },
+};
+
+static const struct clk_parent_data clk_dpll4_parent[] = {
+ { .fw_name = "dpll4" },
+};
+
+static const struct clk_parent_data clk_dpll5_parent[] = {
+ { .fw_name = "dpll5" },
+};
+
+static const struct clk_parent_data clk_dpll6_parent[] = {
+ { .fw_name = "dpll6" },
+};
+
+static const struct clk_parent_data clk_dpll7_parent[] = {
+ { .fw_name = "dpll7" },
+};
+
+static const struct clk_parent_data clk_mpll0_parent[] = {
+ { .fw_name = "mpll0" },
+};
+
+static const struct clk_parent_data clk_mpll1_parent[] = {
+ { .fw_name = "mpll1" },
+};
+
+static const struct clk_parent_data clk_mpll2_parent[] = {
+ { .fw_name = "mpll2" },
+};
+
+static const struct clk_parent_data clk_mpll3_parent[] = {
+ { .fw_name = "mpll3" },
+};
+
+static const struct clk_parent_data clk_mpll4_parent[] = {
+ { .fw_name = "mpll4" },
+};
+
+static const struct clk_parent_data clk_mpll5_parent[] = {
+ { .fw_name = "mpll5" },
+};
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_AP_SYS_FIXED, clk_div_ap_sys_fixed,
+ clk_fpll0_parent, 0,
+ 0x044, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 1);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_AP_SYS_MAIN, clk_div_ap_sys_main,
+ clk_mpll0_parent, 0,
+ 0x040, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 1);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_RP_SYS_FIXED, clk_div_rp_sys_fixed,
+ clk_fpll0_parent, 0,
+ 0x050, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 1);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_RP_SYS_MAIN, clk_div_rp_sys_main,
+ clk_mpll1_parent, 0,
+ 0x04c, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 1);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_TPU_SYS_FIXED, clk_div_tpu_sys_fixed,
+ clk_fpll0_parent, 0,
+ 0x058, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 2);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_TPU_SYS_MAIN, clk_div_tpu_sys_main,
+ clk_mpll2_parent, 0,
+ 0x054, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 1);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_NOC_SYS_FIXED, clk_div_noc_sys_fixed,
+ clk_fpll0_parent, 0,
+ 0x070, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 1);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_NOC_SYS_MAIN, clk_div_noc_sys_main,
+ clk_mpll3_parent, 0,
+ 0x06c, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 1);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_VC_SRC0_FIXED, clk_div_vc_src0_fixed,
+ clk_fpll0_parent, 0,
+ 0x078, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 2);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_VC_SRC0_MAIN, clk_div_vc_src0_main,
+ clk_mpll4_parent, 0,
+ 0x074, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 1);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_VC_SRC1_FIXED, clk_div_vc_src1_fixed,
+ clk_fpll0_parent, 0,
+ 0x080, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 3);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_VC_SRC1_MAIN, clk_div_vc_src1_main,
+ clk_mpll5_parent, 0,
+ 0x07c, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 1);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_CXP_MAC_FIXED, clk_div_cxp_mac_fixed,
+ clk_fpll0_parent, 0,
+ 0x088, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 2);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_CXP_MAC_MAIN, clk_div_cxp_mac_main,
+ clk_fpll1_parent, 0,
+ 0x084, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 1);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR0_FIXED, clk_div_ddr0_fixed,
+ clk_fpll0_parent, 0,
+ 0x124, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 2);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR0_MAIN, clk_div_ddr0_main,
+ clk_dpll0_parent, 0,
+ 0x120, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR1_FIXED, clk_div_ddr1_fixed,
+ clk_fpll0_parent, 0,
+ 0x12c, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 2);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR1_MAIN, clk_div_ddr1_main,
+ clk_dpll1_parent, 0,
+ 0x128, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR2_FIXED, clk_div_ddr2_fixed,
+ clk_fpll0_parent, 0,
+ 0x134, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 2);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR2_MAIN, clk_div_ddr2_main,
+ clk_dpll2_parent, 0,
+ 0x130, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR3_FIXED, clk_div_ddr3_fixed,
+ clk_fpll0_parent, 0,
+ 0x13c, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 2);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR3_MAIN, clk_div_ddr3_main,
+ clk_dpll3_parent, 0,
+ 0x138, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR4_FIXED, clk_div_ddr4_fixed,
+ clk_fpll0_parent, 0,
+ 0x144, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 2);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR4_MAIN, clk_div_ddr4_main,
+ clk_dpll4_parent, 0,
+ 0x140, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR5_FIXED, clk_div_ddr5_fixed,
+ clk_fpll0_parent, 0,
+ 0x14c, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 2);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR5_MAIN, clk_div_ddr5_main,
+ clk_dpll5_parent, 0,
+ 0x148, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR6_FIXED, clk_div_ddr6_fixed,
+ clk_fpll0_parent, 0,
+ 0x154, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 2);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR6_MAIN, clk_div_ddr6_main,
+ clk_dpll6_parent, 0,
+ 0x150, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR7_FIXED, clk_div_ddr7_fixed,
+ clk_fpll0_parent, 0,
+ 0x15c, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 2);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR7_MAIN, clk_div_ddr7_main,
+ clk_dpll7_parent, 0,
+ 0x158, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_TOP_50M, clk_div_top_50m,
+ clk_fpll0_parent, 0,
+ 0x048, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 40);
+
+static const struct clk_hw *clk_div_top_50m_parent[] = {
+ &clk_div_top_50m.common.hw,
+};
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_TOP_AXI0, clk_div_top_axi0,
+ clk_fpll0_parent, 0,
+ 0x118, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 20);
+
+static const struct clk_hw *clk_div_top_axi0_parent[] = {
+ &clk_div_top_axi0.common.hw,
+};
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_TOP_AXI_HSPERI, clk_div_top_axi_hsperi,
+ clk_fpll0_parent, 0,
+ 0x11c, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 8);
+
+static const struct clk_hw *clk_div_top_axi_hsperi_parent[] = {
+ &clk_div_top_axi_hsperi.common.hw,
+};
+
+static DEFINE_SG2044_DIV(CLK_DIV_TIMER0, clk_div_timer0,
+ clk_div_top_50m_parent, 0,
+ 0x0d0, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV(CLK_DIV_TIMER1, clk_div_timer1,
+ clk_div_top_50m_parent, 0,
+ 0x0d4, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV(CLK_DIV_TIMER2, clk_div_timer2,
+ clk_div_top_50m_parent, 0,
+ 0x0d8, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV(CLK_DIV_TIMER3, clk_div_timer3,
+ clk_div_top_50m_parent, 0,
+ 0x0dc, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV(CLK_DIV_TIMER4, clk_div_timer4,
+ clk_div_top_50m_parent, 0,
+ 0x0e0, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV(CLK_DIV_TIMER5, clk_div_timer5,
+ clk_div_top_50m_parent, 0,
+ 0x0e4, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV(CLK_DIV_TIMER6, clk_div_timer6,
+ clk_div_top_50m_parent, 0,
+ 0x0e8, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV(CLK_DIV_TIMER7, clk_div_timer7,
+ clk_div_top_50m_parent, 0,
+ 0x0ec, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_CXP_TEST_PHY, clk_div_cxp_test_phy,
+ clk_fpll0_parent, 0,
+ 0x064, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_CXP_TEST_ETH_PHY, clk_div_cxp_test_eth_phy,
+ clk_fpll2_parent, 0,
+ 0x068, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_C2C0_TEST_PHY, clk_div_c2c0_test_phy,
+ clk_fpll0_parent, 0,
+ 0x05c, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_C2C1_TEST_PHY, clk_div_c2c1_test_phy,
+ clk_fpll0_parent, 0,
+ 0x060, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_PCIE_1G, clk_div_pcie_1g,
+ clk_fpll1_parent, 0,
+ 0x160, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_UART_500M, clk_div_uart_500m,
+ clk_fpll0_parent, 0,
+ 0x0cc, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 4);
+
+static DEFINE_SG2044_DIV(CLK_DIV_GPIO_DB, clk_div_gpio_db,
+ clk_div_top_axi0_parent, 0,
+ 0x0f8, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1000);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_SD, clk_div_sd,
+ clk_fpll0_parent, 0,
+ 0x110, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 5);
+
+static DEFINE_SG2044_DIV(CLK_DIV_SD_100K, clk_div_sd_100k,
+ clk_div_top_axi0_parent, 0,
+ 0x114, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1000);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_EMMC, clk_div_emmc,
+ clk_fpll0_parent, 0,
+ 0x108, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 5);
+
+static DEFINE_SG2044_DIV(CLK_DIV_EMMC_100K, clk_div_emmc_100k,
+ clk_div_top_axi0_parent, 0,
+ 0x10c, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1000);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_EFUSE, clk_div_efuse,
+ clk_fpll0_parent, 0,
+ 0x0f4, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 80);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_TX_ETH0, clk_div_tx_eth0,
+ clk_fpll0_parent, 0,
+ 0x0fc, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 16);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_PTP_REF_I_ETH0, clk_div_ptp_ref_i_eth0,
+ clk_fpll0_parent, 0,
+ 0x100, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 40);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_REF_ETH0, clk_div_ref_eth0,
+ clk_fpll0_parent, 0,
+ 0x104, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 80);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_PKA, clk_div_pka,
+ clk_fpll0_parent, 0,
+ 0x0f0, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 2);
+
+static const struct clk_parent_data clk_mux_ddr0_parents[] = {
+ { .hw = &clk_div_ddr0_fixed.common.hw },
+ { .hw = &clk_div_ddr0_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_DDR0, clk_mux_ddr0,
+ clk_mux_ddr0_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 7, sg2044_mux_table, CLK_MUX_READ_ONLY);
+
+static const struct clk_parent_data clk_mux_ddr1_parents[] = {
+ { .hw = &clk_div_ddr1_fixed.common.hw },
+ { .hw = &clk_div_ddr1_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_DDR1, clk_mux_ddr1,
+ clk_mux_ddr1_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 8, sg2044_mux_table, CLK_MUX_READ_ONLY);
+
+static const struct clk_parent_data clk_mux_ddr2_parents[] = {
+ { .hw = &clk_div_ddr2_fixed.common.hw },
+ { .hw = &clk_div_ddr2_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_DDR2, clk_mux_ddr2,
+ clk_mux_ddr2_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 9, sg2044_mux_table, CLK_MUX_READ_ONLY);
+
+static const struct clk_parent_data clk_mux_ddr3_parents[] = {
+ { .hw = &clk_div_ddr3_fixed.common.hw },
+ { .hw = &clk_div_ddr3_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_DDR3, clk_mux_ddr3,
+ clk_mux_ddr3_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 10, sg2044_mux_table, CLK_MUX_READ_ONLY);
+
+static const struct clk_parent_data clk_mux_ddr4_parents[] = {
+ { .hw = &clk_div_ddr4_fixed.common.hw },
+ { .hw = &clk_div_ddr4_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_DDR4, clk_mux_ddr4,
+ clk_mux_ddr4_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 11, sg2044_mux_table, CLK_MUX_READ_ONLY);
+
+static const struct clk_parent_data clk_mux_ddr5_parents[] = {
+ { .hw = &clk_div_ddr5_fixed.common.hw },
+ { .hw = &clk_div_ddr5_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_DDR5, clk_mux_ddr5,
+ clk_mux_ddr5_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 12, sg2044_mux_table, CLK_MUX_READ_ONLY);
+
+static const struct clk_parent_data clk_mux_ddr6_parents[] = {
+ { .hw = &clk_div_ddr6_fixed.common.hw },
+ { .hw = &clk_div_ddr6_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_DDR6, clk_mux_ddr6,
+ clk_mux_ddr6_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 13, sg2044_mux_table, CLK_MUX_READ_ONLY);
+
+static const struct clk_parent_data clk_mux_ddr7_parents[] = {
+ { .hw = &clk_div_ddr7_fixed.common.hw },
+ { .hw = &clk_div_ddr7_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_DDR7, clk_mux_ddr7,
+ clk_mux_ddr7_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 14, sg2044_mux_table, CLK_MUX_READ_ONLY);
+
+static const struct clk_parent_data clk_mux_noc_sys_parents[] = {
+ { .hw = &clk_div_noc_sys_fixed.common.hw },
+ { .hw = &clk_div_noc_sys_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_NOC_SYS, clk_mux_noc_sys,
+ clk_mux_noc_sys_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 3, sg2044_mux_table, 0);
+
+static const struct clk_parent_data clk_mux_tpu_sys_parents[] = {
+ { .hw = &clk_div_tpu_sys_fixed.common.hw },
+ { .hw = &clk_div_tpu_sys_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_TPU_SYS, clk_mux_tpu_sys,
+ clk_mux_tpu_sys_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 2, sg2044_mux_table, 0);
+
+static const struct clk_parent_data clk_mux_rp_sys_parents[] = {
+ { .hw = &clk_div_rp_sys_fixed.common.hw },
+ { .hw = &clk_div_rp_sys_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_RP_SYS, clk_mux_rp_sys,
+ clk_mux_rp_sys_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 1, sg2044_mux_table, 0);
+
+static const struct clk_parent_data clk_mux_ap_sys_parents[] = {
+ { .hw = &clk_div_ap_sys_fixed.common.hw },
+ { .hw = &clk_div_ap_sys_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_AP_SYS, clk_mux_ap_sys,
+ clk_mux_ap_sys_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 0, sg2044_mux_table, 0);
+
+static const struct clk_parent_data clk_mux_vc_src0_parents[] = {
+ { .hw = &clk_div_vc_src0_fixed.common.hw },
+ { .hw = &clk_div_vc_src0_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_VC_SRC0, clk_mux_vc_src0,
+ clk_mux_vc_src0_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 4, sg2044_mux_table, 0);
+
+static const struct clk_parent_data clk_mux_vc_src1_parents[] = {
+ { .hw = &clk_div_vc_src1_fixed.common.hw },
+ { .hw = &clk_div_vc_src1_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_VC_SRC1, clk_mux_vc_src1,
+ clk_mux_vc_src1_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 5, sg2044_mux_table, 0);
+
+static const struct clk_parent_data clk_mux_cxp_mac_parents[] = {
+ { .hw = &clk_div_cxp_mac_fixed.common.hw },
+ { .hw = &clk_div_cxp_mac_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_CXP_MAC, clk_mux_cxp_mac,
+ clk_mux_cxp_mac_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 6, sg2044_mux_table, 0);
+
+static const struct clk_hw *clk_gate_ap_sys_parent[] = {
+ &clk_mux_ap_sys.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_AP_SYS, clk_gate_ap_sys,
+ clk_gate_ap_sys_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x000, 0, 0);
+
+static const struct clk_hw *clk_gate_rp_sys_parent[] = {
+ &clk_mux_rp_sys.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_RP_SYS, clk_gate_rp_sys,
+ clk_gate_rp_sys_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x000, 2, 0);
+
+static const struct clk_hw *clk_gate_tpu_sys_parent[] = {
+ &clk_mux_tpu_sys.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_TPU_SYS, clk_gate_tpu_sys,
+ clk_gate_tpu_sys_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x000, 3, 0);
+
+static const struct clk_hw *clk_gate_noc_sys_parent[] = {
+ &clk_mux_noc_sys.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_NOC_SYS, clk_gate_noc_sys,
+ clk_gate_noc_sys_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x000, 8, 0);
+
+static const struct clk_hw *clk_gate_vc_src0_parent[] = {
+ &clk_mux_vc_src0.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_VC_SRC0, clk_gate_vc_src0,
+ clk_gate_vc_src0_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x000, 9, 0);
+
+static const struct clk_hw *clk_gate_vc_src1_parent[] = {
+ &clk_mux_vc_src1.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_VC_SRC1, clk_gate_vc_src1,
+ clk_gate_vc_src1_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x000, 10, 0);
+
+static const struct clk_hw *clk_gate_ddr0_parent[] = {
+ &clk_mux_ddr0.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_DDR0, clk_gate_ddr0,
+ clk_gate_ddr0_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x008, 7, 0);
+
+static const struct clk_hw *clk_gate_ddr1_parent[] = {
+ &clk_mux_ddr1.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_DDR1, clk_gate_ddr1,
+ clk_gate_ddr1_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x008, 8, 0);
+
+static const struct clk_hw *clk_gate_ddr2_parent[] = {
+ &clk_mux_ddr2.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_DDR2, clk_gate_ddr2,
+ clk_gate_ddr2_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x008, 9, 0);
+
+static const struct clk_hw *clk_gate_ddr3_parent[] = {
+ &clk_mux_ddr3.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_DDR3, clk_gate_ddr3,
+ clk_gate_ddr3_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x008, 10, 0);
+
+static const struct clk_hw *clk_gate_ddr4_parent[] = {
+ &clk_mux_ddr4.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_DDR4, clk_gate_ddr4,
+ clk_gate_ddr4_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x008, 11, 0);
+
+static const struct clk_hw *clk_gate_ddr5_parent[] = {
+ &clk_mux_ddr5.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_DDR5, clk_gate_ddr5,
+ clk_gate_ddr5_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x008, 12, 0);
+
+static const struct clk_hw *clk_gate_ddr6_parent[] = {
+ &clk_mux_ddr6.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_DDR6, clk_gate_ddr6,
+ clk_gate_ddr6_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x008, 13, 0);
+
+static const struct clk_hw *clk_gate_ddr7_parent[] = {
+ &clk_mux_ddr7.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_DDR7, clk_gate_ddr7,
+ clk_gate_ddr7_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x008, 14, 0);
+
+static const struct clk_hw *clk_gate_top_50m_parent[] = {
+ &clk_div_top_50m.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_TOP_50M, clk_gate_top_50m,
+ clk_gate_top_50m_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x000, 1, 0);
+
+static const struct clk_hw *clk_gate_sc_rx_parent[] = {
+ &clk_div_top_50m.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_SC_RX, clk_gate_sc_rx,
+ clk_gate_sc_rx_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x000, 12, 0);
+
+static const struct clk_hw *clk_gate_sc_rx_x0y1_parent[] = {
+ &clk_div_top_50m.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_SC_RX_X0Y1, clk_gate_sc_rx_x0y1,
+ clk_gate_sc_rx_x0y1_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x000, 13, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_TOP_AXI0, clk_gate_top_axi0,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x008, 5, 0);
+
+static const struct clk_hw *clk_gate_mailbox_intc_parent[] = {
+ &clk_gate_top_axi0.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_INTC0, clk_gate_intc0,
+ clk_gate_mailbox_intc_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x020, 20, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_INTC1, clk_gate_intc1,
+ clk_gate_mailbox_intc_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x020, 21, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_INTC2, clk_gate_intc2,
+ clk_gate_mailbox_intc_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x020, 22, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_INTC3, clk_gate_intc3,
+ clk_gate_mailbox_intc_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x020, 23, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_MAILBOX0, clk_gate_mailbox0,
+ clk_gate_mailbox_intc_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x020, 16, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_MAILBOX1, clk_gate_mailbox1,
+ clk_gate_mailbox_intc_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x020, 17, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_MAILBOX2, clk_gate_mailbox2,
+ clk_gate_mailbox_intc_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x020, 18, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_MAILBOX3, clk_gate_mailbox3,
+ clk_gate_mailbox_intc_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x020, 19, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_TOP_AXI_HSPERI, clk_gate_top_axi_hsperi,
+ clk_div_top_axi_hsperi_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x008, 6, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_APB_TIMER, clk_gate_apb_timer,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 7, 0);
+
+static const struct clk_hw *clk_gate_timer0_parent[] = {
+ &clk_div_timer0.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_TIMER0, clk_gate_timer0,
+ clk_gate_timer0_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 8, 0);
+
+static const struct clk_hw *clk_gate_timer1_parent[] = {
+ &clk_div_timer1.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_TIMER1, clk_gate_timer1,
+ clk_gate_timer1_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 9, 0);
+
+static const struct clk_hw *clk_gate_timer2_parent[] = {
+ &clk_div_timer2.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_TIMER2, clk_gate_timer2,
+ clk_gate_timer2_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 10, 0);
+
+static const struct clk_hw *clk_gate_timer3_parent[] = {
+ &clk_div_timer3.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_TIMER3, clk_gate_timer3,
+ clk_gate_timer3_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 11, 0);
+
+static const struct clk_hw *clk_gate_timer4_parent[] = {
+ &clk_div_timer4.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_TIMER4, clk_gate_timer4,
+ clk_gate_timer4_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 12, 0);
+
+static const struct clk_hw *clk_gate_timer5_parent[] = {
+ &clk_div_timer5.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_TIMER5, clk_gate_timer5,
+ clk_gate_timer5_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 13, 0);
+
+static const struct clk_hw *clk_gate_timer6_parent[] = {
+ &clk_div_timer6.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_TIMER6, clk_gate_timer6,
+ clk_gate_timer6_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 14, 0);
+
+static const struct clk_hw *clk_gate_timer7_parent[] = {
+ &clk_div_timer7.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_TIMER7, clk_gate_timer7,
+ clk_gate_timer7_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 15, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_CXP_CFG, clk_gate_cxp_cfg,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x000, 15, 0);
+
+static const struct clk_hw *clk_gate_cxp_mac_parent[] = {
+ &clk_mux_cxp_mac.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_CXP_MAC, clk_gate_cxp_mac,
+ clk_gate_cxp_mac_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x000, 14, 0);
+
+static const struct clk_hw *clk_gate_cxp_test_phy_parent[] = {
+ &clk_div_cxp_test_phy.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_CXP_TEST_PHY, clk_gate_cxp_test_phy,
+ clk_gate_cxp_test_phy_parent,
+ CLK_SET_RATE_PARENT,
+ 0x000, 6, 0);
+
+static const struct clk_hw *clk_gate_cxp_test_eth_phy_parent[] = {
+ &clk_div_cxp_test_eth_phy.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_CXP_TEST_ETH_PHY, clk_gate_cxp_test_eth_phy,
+ clk_gate_cxp_test_eth_phy_parent,
+ CLK_SET_RATE_PARENT,
+ 0x000, 7, 0);
+
+static const struct clk_hw *clk_gate_pcie_1g_parent[] = {
+ &clk_div_pcie_1g.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_PCIE_1G, clk_gate_pcie_1g,
+ clk_gate_pcie_1g_parent,
+ CLK_SET_RATE_PARENT,
+ 0x008, 15, 0);
+
+static const struct clk_hw *clk_gate_c2c0_test_phy_parent[] = {
+ &clk_div_c2c0_test_phy.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_C2C0_TEST_PHY, clk_gate_c2c0_test_phy,
+ clk_gate_c2c0_test_phy_parent,
+ CLK_SET_RATE_PARENT,
+ 0x000, 4, 0);
+
+static const struct clk_hw *clk_gate_c2c1_test_phy_parent[] = {
+ &clk_div_c2c1_test_phy.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_C2C1_TEST_PHY, clk_gate_c2c1_test_phy,
+ clk_gate_c2c1_test_phy_parent,
+ CLK_SET_RATE_PARENT,
+ 0x000, 5, 0);
+
+static const struct clk_hw *clk_gate_uart_500m_parent[] = {
+ &clk_div_uart_500m.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_UART_500M, clk_gate_uart_500m,
+ clk_gate_uart_500m_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 1, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_APB_UART, clk_gate_apb_uart,
+ clk_div_top_axi_hsperi_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 2, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_APB_SPI, clk_gate_apb_spi,
+ clk_div_top_axi_hsperi_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 22, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_AHB_SPIFMC, clk_gate_ahb_spifmc,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 5, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_APB_I2C, clk_gate_apb_i2c,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x004, 23, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_AXI_DBG_I2C, clk_gate_axi_dbg_i2c,
+ clk_div_top_axi_hsperi_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 3, 0);
+
+static const struct clk_hw *clk_gate_gpio_db_parent[] = {
+ &clk_div_gpio_db.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_GPIO_DB, clk_gate_gpio_db,
+ clk_gate_gpio_db_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x004, 21, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_APB_GPIO_INTR, clk_gate_apb_gpio_intr,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x004, 20, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_APB_GPIO, clk_gate_apb_gpio,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x004, 19, 0);
+
+static const struct clk_hw *clk_gate_sd_parent[] = {
+ &clk_div_sd.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_SD, clk_gate_sd,
+ clk_gate_sd_parent,
+ CLK_SET_RATE_PARENT,
+ 0x008, 3, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_AXI_SD, clk_gate_axi_sd,
+ clk_div_top_axi_hsperi_parent,
+ CLK_SET_RATE_PARENT,
+ 0x008, 2, 0);
+
+static const struct clk_hw *clk_gate_sd_100k_parent[] = {
+ &clk_div_sd_100k.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_SD_100K, clk_gate_sd_100k,
+ clk_gate_sd_100k_parent,
+ CLK_SET_RATE_PARENT,
+ 0x008, 4, 0);
+
+static const struct clk_hw *clk_gate_emmc_parent[] = {
+ &clk_div_emmc.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_EMMC, clk_gate_emmc,
+ clk_gate_emmc_parent,
+ CLK_SET_RATE_PARENT,
+ 0x008, 0, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_AXI_EMMC, clk_gate_axi_emmc,
+ clk_div_top_axi_hsperi_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 31, 0);
+
+static const struct clk_hw *clk_gate_emmc_100k_parent[] = {
+ &clk_div_emmc_100k.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_EMMC_100K, clk_gate_emmc_100k,
+ clk_gate_emmc_100k_parent,
+ CLK_SET_RATE_PARENT,
+ 0x008, 1, 0);
+
+static const struct clk_hw *clk_gate_efuse_parent[] = {
+ &clk_div_efuse.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_EFUSE, clk_gate_efuse,
+ clk_gate_efuse_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 17, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_APB_EFUSE, clk_gate_apb_efuse,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 18, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_SYSDMA_AXI, clk_gate_sysdma_axi,
+ clk_div_top_axi_hsperi_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 0, 0);
+
+static const struct clk_hw *clk_gate_tx_eth0_parent[] = {
+ &clk_div_tx_eth0.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_TX_ETH0, clk_gate_tx_eth0,
+ clk_gate_tx_eth0_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 27, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_AXI_ETH0, clk_gate_axi_eth0,
+ clk_div_top_axi_hsperi_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 28, 0);
+
+static const struct clk_hw *clk_gate_ptp_ref_i_eth0_parent[] = {
+ &clk_div_ptp_ref_i_eth0.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_PTP_REF_I_ETH0, clk_gate_ptp_ref_i_eth0,
+ clk_gate_ptp_ref_i_eth0_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 29, 0);
+
+static const struct clk_hw *clk_gate_ref_eth0_parent[] = {
+ &clk_div_ref_eth0.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_REF_ETH0, clk_gate_ref_eth0,
+ clk_gate_ref_eth0_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 30, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_APB_RTC, clk_gate_apb_rtc,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x004, 26, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_APB_PWM, clk_gate_apb_pwm,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 25, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_APB_WDT, clk_gate_apb_wdt,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 24, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_AXI_SRAM, clk_gate_axi_sram,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x004, 6, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_AHB_ROM, clk_gate_ahb_rom,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x004, 4, 0);
+
+static const struct clk_hw *clk_gate_pka_parent[] = {
+ &clk_div_pka.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_PKA, clk_gate_pka,
+ clk_gate_pka_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 16, 0);
+
+static struct sg2044_clk_common * const sg2044_div_commons[] = {
+ &clk_div_ap_sys_fixed.common,
+ &clk_div_ap_sys_main.common,
+ &clk_div_rp_sys_fixed.common,
+ &clk_div_rp_sys_main.common,
+ &clk_div_tpu_sys_fixed.common,
+ &clk_div_tpu_sys_main.common,
+ &clk_div_noc_sys_fixed.common,
+ &clk_div_noc_sys_main.common,
+ &clk_div_vc_src0_fixed.common,
+ &clk_div_vc_src0_main.common,
+ &clk_div_vc_src1_fixed.common,
+ &clk_div_vc_src1_main.common,
+ &clk_div_cxp_mac_fixed.common,
+ &clk_div_cxp_mac_main.common,
+ &clk_div_ddr0_fixed.common,
+ &clk_div_ddr0_main.common,
+ &clk_div_ddr1_fixed.common,
+ &clk_div_ddr1_main.common,
+ &clk_div_ddr2_fixed.common,
+ &clk_div_ddr2_main.common,
+ &clk_div_ddr3_fixed.common,
+ &clk_div_ddr3_main.common,
+ &clk_div_ddr4_fixed.common,
+ &clk_div_ddr4_main.common,
+ &clk_div_ddr5_fixed.common,
+ &clk_div_ddr5_main.common,
+ &clk_div_ddr6_fixed.common,
+ &clk_div_ddr6_main.common,
+ &clk_div_ddr7_fixed.common,
+ &clk_div_ddr7_main.common,
+ &clk_div_top_50m.common,
+ &clk_div_top_axi0.common,
+ &clk_div_top_axi_hsperi.common,
+ &clk_div_timer0.common,
+ &clk_div_timer1.common,
+ &clk_div_timer2.common,
+ &clk_div_timer3.common,
+ &clk_div_timer4.common,
+ &clk_div_timer5.common,
+ &clk_div_timer6.common,
+ &clk_div_timer7.common,
+ &clk_div_cxp_test_phy.common,
+ &clk_div_cxp_test_eth_phy.common,
+ &clk_div_c2c0_test_phy.common,
+ &clk_div_c2c1_test_phy.common,
+ &clk_div_pcie_1g.common,
+ &clk_div_uart_500m.common,
+ &clk_div_gpio_db.common,
+ &clk_div_sd.common,
+ &clk_div_sd_100k.common,
+ &clk_div_emmc.common,
+ &clk_div_emmc_100k.common,
+ &clk_div_efuse.common,
+ &clk_div_tx_eth0.common,
+ &clk_div_ptp_ref_i_eth0.common,
+ &clk_div_ref_eth0.common,
+ &clk_div_pka.common,
+};
+
+static struct sg2044_clk_common * const sg2044_mux_commons[] = {
+ &clk_mux_ddr0.common,
+ &clk_mux_ddr1.common,
+ &clk_mux_ddr2.common,
+ &clk_mux_ddr3.common,
+ &clk_mux_ddr4.common,
+ &clk_mux_ddr5.common,
+ &clk_mux_ddr6.common,
+ &clk_mux_ddr7.common,
+ &clk_mux_noc_sys.common,
+ &clk_mux_tpu_sys.common,
+ &clk_mux_rp_sys.common,
+ &clk_mux_ap_sys.common,
+ &clk_mux_vc_src0.common,
+ &clk_mux_vc_src1.common,
+ &clk_mux_cxp_mac.common,
+};
+
+static struct sg2044_clk_common * const sg2044_gate_commons[] = {
+ &clk_gate_ap_sys.common,
+ &clk_gate_rp_sys.common,
+ &clk_gate_tpu_sys.common,
+ &clk_gate_noc_sys.common,
+ &clk_gate_vc_src0.common,
+ &clk_gate_vc_src1.common,
+ &clk_gate_ddr0.common,
+ &clk_gate_ddr1.common,
+ &clk_gate_ddr2.common,
+ &clk_gate_ddr3.common,
+ &clk_gate_ddr4.common,
+ &clk_gate_ddr5.common,
+ &clk_gate_ddr6.common,
+ &clk_gate_ddr7.common,
+ &clk_gate_top_50m.common,
+ &clk_gate_sc_rx.common,
+ &clk_gate_sc_rx_x0y1.common,
+ &clk_gate_top_axi0.common,
+ &clk_gate_intc0.common,
+ &clk_gate_intc1.common,
+ &clk_gate_intc2.common,
+ &clk_gate_intc3.common,
+ &clk_gate_mailbox0.common,
+ &clk_gate_mailbox1.common,
+ &clk_gate_mailbox2.common,
+ &clk_gate_mailbox3.common,
+ &clk_gate_top_axi_hsperi.common,
+ &clk_gate_apb_timer.common,
+ &clk_gate_timer0.common,
+ &clk_gate_timer1.common,
+ &clk_gate_timer2.common,
+ &clk_gate_timer3.common,
+ &clk_gate_timer4.common,
+ &clk_gate_timer5.common,
+ &clk_gate_timer6.common,
+ &clk_gate_timer7.common,
+ &clk_gate_cxp_cfg.common,
+ &clk_gate_cxp_mac.common,
+ &clk_gate_cxp_test_phy.common,
+ &clk_gate_cxp_test_eth_phy.common,
+ &clk_gate_pcie_1g.common,
+ &clk_gate_c2c0_test_phy.common,
+ &clk_gate_c2c1_test_phy.common,
+ &clk_gate_uart_500m.common,
+ &clk_gate_apb_uart.common,
+ &clk_gate_apb_spi.common,
+ &clk_gate_ahb_spifmc.common,
+ &clk_gate_apb_i2c.common,
+ &clk_gate_axi_dbg_i2c.common,
+ &clk_gate_gpio_db.common,
+ &clk_gate_apb_gpio_intr.common,
+ &clk_gate_apb_gpio.common,
+ &clk_gate_sd.common,
+ &clk_gate_axi_sd.common,
+ &clk_gate_sd_100k.common,
+ &clk_gate_emmc.common,
+ &clk_gate_axi_emmc.common,
+ &clk_gate_emmc_100k.common,
+ &clk_gate_efuse.common,
+ &clk_gate_apb_efuse.common,
+ &clk_gate_sysdma_axi.common,
+ &clk_gate_tx_eth0.common,
+ &clk_gate_axi_eth0.common,
+ &clk_gate_ptp_ref_i_eth0.common,
+ &clk_gate_ref_eth0.common,
+ &clk_gate_apb_rtc.common,
+ &clk_gate_apb_pwm.common,
+ &clk_gate_apb_wdt.common,
+ &clk_gate_axi_sram.common,
+ &clk_gate_ahb_rom.common,
+ &clk_gate_pka.common,
+};
+
+static void sg2044_clk_fix_init_parent(struct clk_hw **pdata,
+ const struct clk_init_data *init,
+ struct clk_hw_onecell_data *data)
+{
+ u8 i;
+ const struct clk_hw *hw;
+ const struct sg2044_clk_common *common;
+
+ for (i = 0; i < init->num_parents; i++) {
+ hw = init->parent_hws[i];
+ common = hw_to_sg2044_clk_common(hw);
+
+ WARN(!data->hws[common->id], "clk %u is not register\n",
+ common->id);
+ pdata[i] = data->hws[common->id];
+ }
+}
+
+static int sg2044_clk_init_ctrl(struct device *dev, void __iomem *reg,
+ struct sg2044_clk_ctrl *ctrl,
+ const struct sg2044_clk_desc_data *desc)
+{
+ int ret, i;
+ struct clk_hw *hw;
+
+ spin_lock_init(&ctrl->lock);
+
+ for (i = 0; i < desc->num_div; i++) {
+ struct sg2044_clk_common *common = desc->div[i];
+
+ common->lock = &ctrl->lock;
+ common->base = reg;
+
+ ret = devm_clk_hw_register(dev, &common->hw);
+ if (ret)
+ return ret;
+
+ ctrl->data.hws[common->id] = &common->hw;
+ }
+
+ for (i = 0; i < desc->num_mux; i++) {
+ struct sg2044_clk_common *common = desc->mux[i];
+ struct sg2044_mux *mux = hw_to_sg2044_mux(&common->hw);
+ const struct clk_init_data *init = common->hw.init;
+
+ common->lock = &ctrl->lock;
+ common->base = reg;
+
+ hw = devm_clk_hw_register_mux_parent_data_table(dev,
+ init->name,
+ init->parent_data,
+ init->num_parents,
+ init->flags,
+ reg + mux->mux.offset,
+ mux->mux.shift,
+ 1,
+ mux->mux.flags,
+ mux->mux.table,
+ &ctrl->lock);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ if (!(mux->mux.flags & CLK_MUX_READ_ONLY)) {
+ mux->nb.notifier_call = sg2044_mux_notifier_cb;
+ ret = devm_clk_notifier_register(dev, hw->clk,
+ &mux->nb);
+ if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "%s: failed to register notifier\n",
+ clk_hw_get_name(hw));
+ }
+
+ ctrl->data.hws[common->id] = hw;
+ }
+
+ for (i = 0; i < desc->num_gate; i++) {
+ struct sg2044_clk_common *common = desc->gate[i];
+ struct sg2044_gate *gate = hw_to_sg2044_gate(&common->hw);
+ const struct clk_init_data *init = common->hw.init;
+ struct clk_hw *parent_hws[1] = { };
+
+ sg2044_clk_fix_init_parent(parent_hws, init, &ctrl->data);
+ common->lock = &ctrl->lock;
+ common->base = reg;
+
+ hw = devm_clk_hw_register_gate_parent_hw(dev, init->name,
+ parent_hws[0],
+ init->flags,
+ reg + gate->gate.offset,
+ gate->gate.shift,
+ gate->gate.flags,
+ &ctrl->lock);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ ctrl->data.hws[common->id] = hw;
+ }
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
+ &ctrl->data);
+}
+
+static int sg2044_clk_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sg2044_clk_ctrl *ctrl;
+ const struct sg2044_clk_desc_data *desc;
+ void __iomem *reg;
+ u32 num_clks;
+
+ reg = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+
+ desc = device_get_match_data(dev);
+ if (!desc)
+ return dev_err_probe(dev, -EINVAL, "no match data for platform\n");
+
+ num_clks = desc->num_div + desc->num_gate + desc->num_mux;
+
+ ctrl = devm_kzalloc(dev, struct_size(ctrl, data.hws, num_clks), GFP_KERNEL);
+ if (!ctrl)
+ return -ENOMEM;
+
+ ctrl->data.num = num_clks;
+
+ return sg2044_clk_init_ctrl(dev, reg, ctrl, desc);
+}
+
+static const struct sg2044_clk_desc_data sg2044_clk_desc_data = {
+ .div = sg2044_div_commons,
+ .mux = sg2044_mux_commons,
+ .gate = sg2044_gate_commons,
+ .num_div = ARRAY_SIZE(sg2044_div_commons),
+ .num_mux = ARRAY_SIZE(sg2044_mux_commons),
+ .num_gate = ARRAY_SIZE(sg2044_gate_commons),
+};
+
+static const struct of_device_id sg2044_clk_match[] = {
+ { .compatible = "sophgo,sg2044-clk", .data = &sg2044_clk_desc_data },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sg2044_clk_match);
+
+static struct platform_driver sg2044_clk_driver = {
+ .probe = sg2044_clk_probe,
+ .driver = {
+ .name = "sg2044-clk",
+ .of_match_table = sg2044_clk_match,
+ },
+};
+module_platform_driver(sg2044_clk_driver);
+
+MODULE_AUTHOR("Inochi Amaoto <inochiama@gmail.com>");
+MODULE_DESCRIPTION("Sophgo SG2044 clock driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/spacemit/Kconfig b/drivers/clk/spacemit/Kconfig
new file mode 100644
index 000000000000..3854f6ae6d0e
--- /dev/null
+++ b/drivers/clk/spacemit/Kconfig
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config SPACEMIT_CCU
+ tristate "Clock support for SpacemiT SoCs"
+ depends on ARCH_SPACEMIT || COMPILE_TEST
+ select AUXILIARY_BUS
+ select MFD_SYSCON
+ help
+ Say Y to enable clock controller unit support for SpacemiT SoCs.
+
+if SPACEMIT_CCU
+
+config SPACEMIT_K1_CCU
+ tristate "Support for SpacemiT K1 SoC"
+ depends on ARCH_SPACEMIT || COMPILE_TEST
+ help
+ Support for clock controller unit in SpacemiT K1 SoC.
+
+endif
diff --git a/drivers/clk/spacemit/Makefile b/drivers/clk/spacemit/Makefile
new file mode 100644
index 000000000000..5ec6da61db98
--- /dev/null
+++ b/drivers/clk/spacemit/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_SPACEMIT_K1_CCU) = spacemit-ccu-k1.o
+spacemit-ccu-k1-y = ccu_pll.o ccu_mix.o ccu_ddn.o
+spacemit-ccu-k1-y += ccu-k1.o
diff --git a/drivers/clk/spacemit/ccu-k1.c b/drivers/clk/spacemit/ccu-k1.c
new file mode 100644
index 000000000000..f5a9fe6ba185
--- /dev/null
+++ b/drivers/clk/spacemit/ccu-k1.c
@@ -0,0 +1,1209 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024 SpacemiT Technology Co. Ltd
+ * Copyright (c) 2024-2025 Haylen Chu <heylenay@4d2.org>
+ */
+
+#include <linux/array_size.h>
+#include <linux/auxiliary_bus.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/idr.h>
+#include <linux/mfd/syscon.h>
+#include <linux/minmax.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <soc/spacemit/k1-syscon.h>
+
+#include "ccu_common.h"
+#include "ccu_pll.h"
+#include "ccu_mix.h"
+#include "ccu_ddn.h"
+
+#include <dt-bindings/clock/spacemit,k1-syscon.h>
+
+struct spacemit_ccu_data {
+ const char *reset_name;
+ struct clk_hw **hws;
+ size_t num;
+};
+
+static DEFINE_IDA(auxiliary_ids);
+
+/* APBS clocks start, APBS region contains and only contains all PLL clocks */
+
+/*
+ * PLL{1,2} must run at fixed frequencies to provide clocks in correct rates for
+ * peripherals.
+ */
+static const struct ccu_pll_rate_tbl pll1_rate_tbl[] = {
+ CCU_PLL_RATE(2457600000UL, 0x0050dd64, 0x330ccccd),
+};
+
+static const struct ccu_pll_rate_tbl pll2_rate_tbl[] = {
+ CCU_PLL_RATE(3000000000UL, 0x0050dd66, 0x3fe00000),
+};
+
+static const struct ccu_pll_rate_tbl pll3_rate_tbl[] = {
+ CCU_PLL_RATE(1600000000UL, 0x0050cd61, 0x43eaaaab),
+ CCU_PLL_RATE(1800000000UL, 0x0050cd61, 0x4b000000),
+ CCU_PLL_RATE(2000000000UL, 0x0050dd62, 0x2aeaaaab),
+ CCU_PLL_RATE(2457600000UL, 0x0050dd64, 0x330ccccd),
+ CCU_PLL_RATE(3000000000UL, 0x0050dd66, 0x3fe00000),
+ CCU_PLL_RATE(3200000000UL, 0x0050dd67, 0x43eaaaab),
+};
+
+CCU_PLL_DEFINE(pll1, pll1_rate_tbl, APBS_PLL1_SWCR1, APBS_PLL1_SWCR3, MPMU_POSR, POSR_PLL1_LOCK,
+ CLK_SET_RATE_GATE);
+CCU_PLL_DEFINE(pll2, pll2_rate_tbl, APBS_PLL2_SWCR1, APBS_PLL2_SWCR3, MPMU_POSR, POSR_PLL2_LOCK,
+ CLK_SET_RATE_GATE);
+CCU_PLL_DEFINE(pll3, pll3_rate_tbl, APBS_PLL3_SWCR1, APBS_PLL3_SWCR3, MPMU_POSR, POSR_PLL3_LOCK,
+ CLK_SET_RATE_GATE);
+
+CCU_FACTOR_GATE_DEFINE(pll1_d2, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(1), 2, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d3, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(2), 3, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d4, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(3), 4, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d5, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(4), 5, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d6, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(5), 6, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d7, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(6), 7, 1);
+CCU_FACTOR_GATE_FLAGS_DEFINE(pll1_d8, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(7), 8, 1,
+ CLK_IS_CRITICAL);
+CCU_FACTOR_GATE_DEFINE(pll1_d11_223p4, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(15), 11, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d13_189, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(16), 13, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d23_106p8, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(20), 23, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d64_38p4, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(0), 64, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_aud_245p7, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(10), 10, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_aud_24p5, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(11), 100, 1);
+
+CCU_FACTOR_GATE_DEFINE(pll2_d1, CCU_PARENT_HW(pll2), APBS_PLL2_SWCR2, BIT(0), 1, 1);
+CCU_FACTOR_GATE_DEFINE(pll2_d2, CCU_PARENT_HW(pll2), APBS_PLL2_SWCR2, BIT(1), 2, 1);
+CCU_FACTOR_GATE_DEFINE(pll2_d3, CCU_PARENT_HW(pll2), APBS_PLL2_SWCR2, BIT(2), 3, 1);
+CCU_FACTOR_GATE_DEFINE(pll2_d4, CCU_PARENT_HW(pll2), APBS_PLL2_SWCR2, BIT(3), 4, 1);
+CCU_FACTOR_GATE_DEFINE(pll2_d5, CCU_PARENT_HW(pll2), APBS_PLL2_SWCR2, BIT(4), 5, 1);
+CCU_FACTOR_GATE_DEFINE(pll2_d6, CCU_PARENT_HW(pll2), APBS_PLL2_SWCR2, BIT(5), 6, 1);
+CCU_FACTOR_GATE_DEFINE(pll2_d7, CCU_PARENT_HW(pll2), APBS_PLL2_SWCR2, BIT(6), 7, 1);
+CCU_FACTOR_GATE_DEFINE(pll2_d8, CCU_PARENT_HW(pll2), APBS_PLL2_SWCR2, BIT(7), 8, 1);
+
+CCU_FACTOR_GATE_DEFINE(pll3_d1, CCU_PARENT_HW(pll3), APBS_PLL3_SWCR2, BIT(0), 1, 1);
+CCU_FACTOR_GATE_DEFINE(pll3_d2, CCU_PARENT_HW(pll3), APBS_PLL3_SWCR2, BIT(1), 2, 1);
+CCU_FACTOR_GATE_DEFINE(pll3_d3, CCU_PARENT_HW(pll3), APBS_PLL3_SWCR2, BIT(2), 3, 1);
+CCU_FACTOR_GATE_DEFINE(pll3_d4, CCU_PARENT_HW(pll3), APBS_PLL3_SWCR2, BIT(3), 4, 1);
+CCU_FACTOR_GATE_DEFINE(pll3_d5, CCU_PARENT_HW(pll3), APBS_PLL3_SWCR2, BIT(4), 5, 1);
+CCU_FACTOR_GATE_DEFINE(pll3_d6, CCU_PARENT_HW(pll3), APBS_PLL3_SWCR2, BIT(5), 6, 1);
+CCU_FACTOR_GATE_DEFINE(pll3_d7, CCU_PARENT_HW(pll3), APBS_PLL3_SWCR2, BIT(6), 7, 1);
+CCU_FACTOR_GATE_DEFINE(pll3_d8, CCU_PARENT_HW(pll3), APBS_PLL3_SWCR2, BIT(7), 8, 1);
+
+CCU_FACTOR_DEFINE(pll3_20, CCU_PARENT_HW(pll3_d8), 20, 1);
+CCU_FACTOR_DEFINE(pll3_40, CCU_PARENT_HW(pll3_d8), 10, 1);
+CCU_FACTOR_DEFINE(pll3_80, CCU_PARENT_HW(pll3_d8), 5, 1);
+
+/* APBS clocks end */
+
+/* MPMU clocks start */
+CCU_GATE_DEFINE(pll1_d8_307p2, CCU_PARENT_HW(pll1_d8), MPMU_ACGR, BIT(13), 0);
+
+CCU_FACTOR_DEFINE(pll1_d32_76p8, CCU_PARENT_HW(pll1_d8_307p2), 4, 1);
+
+CCU_FACTOR_DEFINE(pll1_d40_61p44, CCU_PARENT_HW(pll1_d8_307p2), 5, 1);
+
+CCU_FACTOR_DEFINE(pll1_d16_153p6, CCU_PARENT_HW(pll1_d8), 2, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d24_102p4, CCU_PARENT_HW(pll1_d8), MPMU_ACGR, BIT(12), 3, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d48_51p2, CCU_PARENT_HW(pll1_d8), MPMU_ACGR, BIT(7), 6, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d48_51p2_ap, CCU_PARENT_HW(pll1_d8), MPMU_ACGR, BIT(11), 6, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_m3d128_57p6, CCU_PARENT_HW(pll1_d8), MPMU_ACGR, BIT(8), 16, 3);
+CCU_FACTOR_GATE_DEFINE(pll1_d96_25p6, CCU_PARENT_HW(pll1_d8), MPMU_ACGR, BIT(4), 12, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d192_12p8, CCU_PARENT_HW(pll1_d8), MPMU_ACGR, BIT(3), 24, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d192_12p8_wdt, CCU_PARENT_HW(pll1_d8), MPMU_ACGR, BIT(19), 24, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d384_6p4, CCU_PARENT_HW(pll1_d8), MPMU_ACGR, BIT(2), 48, 1);
+
+CCU_FACTOR_DEFINE(pll1_d768_3p2, CCU_PARENT_HW(pll1_d384_6p4), 2, 1);
+CCU_FACTOR_DEFINE(pll1_d1536_1p6, CCU_PARENT_HW(pll1_d384_6p4), 4, 1);
+CCU_FACTOR_DEFINE(pll1_d3072_0p8, CCU_PARENT_HW(pll1_d384_6p4), 8, 1);
+
+CCU_GATE_DEFINE(pll1_d6_409p6, CCU_PARENT_HW(pll1_d6), MPMU_ACGR, BIT(0), 0);
+CCU_FACTOR_GATE_DEFINE(pll1_d12_204p8, CCU_PARENT_HW(pll1_d6), MPMU_ACGR, BIT(5), 2, 1);
+
+CCU_GATE_DEFINE(pll1_d5_491p52, CCU_PARENT_HW(pll1_d5), MPMU_ACGR, BIT(21), 0);
+CCU_FACTOR_GATE_DEFINE(pll1_d10_245p76, CCU_PARENT_HW(pll1_d5), MPMU_ACGR, BIT(18), 2, 1);
+
+CCU_GATE_DEFINE(pll1_d4_614p4, CCU_PARENT_HW(pll1_d4), MPMU_ACGR, BIT(15), 0);
+CCU_FACTOR_GATE_DEFINE(pll1_d52_47p26, CCU_PARENT_HW(pll1_d4), MPMU_ACGR, BIT(10), 13, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d78_31p5, CCU_PARENT_HW(pll1_d4), MPMU_ACGR, BIT(6), 39, 2);
+
+CCU_GATE_DEFINE(pll1_d3_819p2, CCU_PARENT_HW(pll1_d3), MPMU_ACGR, BIT(14), 0);
+
+CCU_GATE_DEFINE(pll1_d2_1228p8, CCU_PARENT_HW(pll1_d2), MPMU_ACGR, BIT(16), 0);
+
+CCU_GATE_DEFINE(slow_uart, CCU_PARENT_NAME(osc), MPMU_ACGR, BIT(1), CLK_IGNORE_UNUSED);
+CCU_DDN_DEFINE(slow_uart1_14p74, pll1_d16_153p6, MPMU_SUCCR, 16, 13, 0, 13, 2, 0);
+CCU_DDN_DEFINE(slow_uart2_48, pll1_d4_614p4, MPMU_SUCCR_1, 16, 13, 0, 13, 2, 0);
+
+CCU_GATE_DEFINE(wdt_clk, CCU_PARENT_HW(pll1_d96_25p6), MPMU_WDTPCR, BIT(1), 0);
+
+CCU_FACTOR_DEFINE(i2s_153p6, CCU_PARENT_HW(pll1_d8_307p2), 2, 1);
+
+static const struct clk_parent_data i2s_153p6_base_parents[] = {
+ CCU_PARENT_HW(i2s_153p6),
+ CCU_PARENT_HW(pll1_d8_307p2),
+};
+CCU_MUX_DEFINE(i2s_153p6_base, i2s_153p6_base_parents, MPMU_FCCR, 29, 1, 0);
+
+static const struct clk_parent_data i2s_sysclk_src_parents[] = {
+ CCU_PARENT_HW(pll1_d96_25p6),
+ CCU_PARENT_HW(i2s_153p6_base)
+};
+CCU_MUX_GATE_DEFINE(i2s_sysclk_src, i2s_sysclk_src_parents, MPMU_ISCCR, 30, 1, BIT(31), 0);
+
+CCU_DDN_DEFINE(i2s_sysclk, i2s_sysclk_src, MPMU_ISCCR, 0, 15, 15, 12, 1, 0);
+
+CCU_FACTOR_DEFINE(i2s_bclk_factor, CCU_PARENT_HW(i2s_sysclk), 2, 1);
+/*
+ * Divider of i2s_bclk always implies a 1/2 factor, which is
+ * described by i2s_bclk_factor.
+ */
+CCU_DIV_GATE_DEFINE(i2s_bclk, CCU_PARENT_HW(i2s_bclk_factor), MPMU_ISCCR, 27, 2, BIT(29), 0);
+
+static const struct clk_parent_data apb_parents[] = {
+ CCU_PARENT_HW(pll1_d96_25p6),
+ CCU_PARENT_HW(pll1_d48_51p2),
+ CCU_PARENT_HW(pll1_d96_25p6),
+ CCU_PARENT_HW(pll1_d24_102p4),
+};
+CCU_MUX_DEFINE(apb_clk, apb_parents, MPMU_APBCSCR, 0, 2, 0);
+
+CCU_GATE_DEFINE(wdt_bus_clk, CCU_PARENT_HW(apb_clk), MPMU_WDTPCR, BIT(0), 0);
+
+CCU_GATE_DEFINE(ripc_clk, CCU_PARENT_HW(apb_clk), MPMU_RIPCCR, 0x1, 0);
+/* MPMU clocks end */
+
+/* APBC clocks start */
+static const struct clk_parent_data uart_clk_parents[] = {
+ CCU_PARENT_HW(pll1_m3d128_57p6),
+ CCU_PARENT_HW(slow_uart1_14p74),
+ CCU_PARENT_HW(slow_uart2_48),
+};
+CCU_MUX_GATE_DEFINE(uart0_clk, uart_clk_parents, APBC_UART1_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(uart2_clk, uart_clk_parents, APBC_UART2_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(uart3_clk, uart_clk_parents, APBC_UART3_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(uart4_clk, uart_clk_parents, APBC_UART4_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(uart5_clk, uart_clk_parents, APBC_UART5_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(uart6_clk, uart_clk_parents, APBC_UART6_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(uart7_clk, uart_clk_parents, APBC_UART7_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(uart8_clk, uart_clk_parents, APBC_UART8_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(uart9_clk, uart_clk_parents, APBC_UART9_CLK_RST, 4, 3, BIT(1), 0);
+
+CCU_GATE_DEFINE(gpio_clk, CCU_PARENT_NAME(vctcxo_24m), APBC_GPIO_CLK_RST, BIT(1), 0);
+
+static const struct clk_parent_data pwm_parents[] = {
+ CCU_PARENT_HW(pll1_d192_12p8),
+ CCU_PARENT_NAME(osc),
+};
+CCU_MUX_GATE_DEFINE(pwm0_clk, pwm_parents, APBC_PWM0_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm1_clk, pwm_parents, APBC_PWM1_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm2_clk, pwm_parents, APBC_PWM2_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm3_clk, pwm_parents, APBC_PWM3_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm4_clk, pwm_parents, APBC_PWM4_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm5_clk, pwm_parents, APBC_PWM5_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm6_clk, pwm_parents, APBC_PWM6_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm7_clk, pwm_parents, APBC_PWM7_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm8_clk, pwm_parents, APBC_PWM8_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm9_clk, pwm_parents, APBC_PWM9_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm10_clk, pwm_parents, APBC_PWM10_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm11_clk, pwm_parents, APBC_PWM11_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm12_clk, pwm_parents, APBC_PWM12_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm13_clk, pwm_parents, APBC_PWM13_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm14_clk, pwm_parents, APBC_PWM14_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm15_clk, pwm_parents, APBC_PWM15_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm16_clk, pwm_parents, APBC_PWM16_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm17_clk, pwm_parents, APBC_PWM17_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm18_clk, pwm_parents, APBC_PWM18_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm19_clk, pwm_parents, APBC_PWM19_CLK_RST, 4, 3, BIT(1), 0);
+
+static const struct clk_parent_data ssp_parents[] = {
+ CCU_PARENT_HW(pll1_d384_6p4),
+ CCU_PARENT_HW(pll1_d192_12p8),
+ CCU_PARENT_HW(pll1_d96_25p6),
+ CCU_PARENT_HW(pll1_d48_51p2),
+ CCU_PARENT_HW(pll1_d768_3p2),
+ CCU_PARENT_HW(pll1_d1536_1p6),
+ CCU_PARENT_HW(pll1_d3072_0p8),
+};
+CCU_MUX_GATE_DEFINE(ssp3_clk, ssp_parents, APBC_SSP3_CLK_RST, 4, 3, BIT(1), 0);
+
+CCU_GATE_DEFINE(rtc_clk, CCU_PARENT_NAME(osc), APBC_RTC_CLK_RST,
+ BIT(7) | BIT(1), 0);
+
+static const struct clk_parent_data twsi_parents[] = {
+ CCU_PARENT_HW(pll1_d78_31p5),
+ CCU_PARENT_HW(pll1_d48_51p2),
+ CCU_PARENT_HW(pll1_d40_61p44),
+};
+CCU_MUX_GATE_DEFINE(twsi0_clk, twsi_parents, APBC_TWSI0_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(twsi1_clk, twsi_parents, APBC_TWSI1_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(twsi2_clk, twsi_parents, APBC_TWSI2_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(twsi4_clk, twsi_parents, APBC_TWSI4_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(twsi5_clk, twsi_parents, APBC_TWSI5_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(twsi6_clk, twsi_parents, APBC_TWSI6_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(twsi7_clk, twsi_parents, APBC_TWSI7_CLK_RST, 4, 3, BIT(1), 0);
+/*
+ * APBC_TWSI8_CLK_RST has a quirk that reading always results in zero.
+ * Combine functional and bus bits together as a gate to avoid sharing the
+ * write-only register between different clock hardwares.
+ */
+CCU_GATE_DEFINE(twsi8_clk, CCU_PARENT_HW(pll1_d78_31p5), APBC_TWSI8_CLK_RST, BIT(1) | BIT(0), 0);
+
+static const struct clk_parent_data timer_parents[] = {
+ CCU_PARENT_HW(pll1_d192_12p8),
+ CCU_PARENT_NAME(osc),
+ CCU_PARENT_HW(pll1_d384_6p4),
+ CCU_PARENT_NAME(vctcxo_3m),
+ CCU_PARENT_NAME(vctcxo_1m),
+};
+CCU_MUX_GATE_DEFINE(timers1_clk, timer_parents, APBC_TIMERS1_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(timers2_clk, timer_parents, APBC_TIMERS2_CLK_RST, 4, 3, BIT(1), 0);
+
+CCU_GATE_DEFINE(aib_clk, CCU_PARENT_NAME(vctcxo_24m), APBC_AIB_CLK_RST, BIT(1), 0);
+
+CCU_GATE_DEFINE(onewire_clk, CCU_PARENT_NAME(vctcxo_24m), APBC_ONEWIRE_CLK_RST, BIT(1), 0);
+
+/*
+ * When i2s_bclk is selected as the parent clock of sspa,
+ * the hardware requires bit3 to be set
+ */
+CCU_GATE_DEFINE(sspa0_i2s_bclk, CCU_PARENT_HW(i2s_bclk), APBC_SSPA0_CLK_RST, BIT(3), 0);
+CCU_GATE_DEFINE(sspa1_i2s_bclk, CCU_PARENT_HW(i2s_bclk), APBC_SSPA1_CLK_RST, BIT(3), 0);
+
+static const struct clk_parent_data sspa0_parents[] = {
+ CCU_PARENT_HW(pll1_d384_6p4),
+ CCU_PARENT_HW(pll1_d192_12p8),
+ CCU_PARENT_HW(pll1_d96_25p6),
+ CCU_PARENT_HW(pll1_d48_51p2),
+ CCU_PARENT_HW(pll1_d768_3p2),
+ CCU_PARENT_HW(pll1_d1536_1p6),
+ CCU_PARENT_HW(pll1_d3072_0p8),
+ CCU_PARENT_HW(sspa0_i2s_bclk),
+};
+CCU_MUX_GATE_DEFINE(sspa0_clk, sspa0_parents, APBC_SSPA0_CLK_RST, 4, 3, BIT(1), 0);
+
+static const struct clk_parent_data sspa1_parents[] = {
+ CCU_PARENT_HW(pll1_d384_6p4),
+ CCU_PARENT_HW(pll1_d192_12p8),
+ CCU_PARENT_HW(pll1_d96_25p6),
+ CCU_PARENT_HW(pll1_d48_51p2),
+ CCU_PARENT_HW(pll1_d768_3p2),
+ CCU_PARENT_HW(pll1_d1536_1p6),
+ CCU_PARENT_HW(pll1_d3072_0p8),
+ CCU_PARENT_HW(sspa1_i2s_bclk),
+};
+CCU_MUX_GATE_DEFINE(sspa1_clk, sspa1_parents, APBC_SSPA1_CLK_RST, 4, 3, BIT(1), 0);
+
+CCU_GATE_DEFINE(dro_clk, CCU_PARENT_HW(apb_clk), APBC_DRO_CLK_RST, BIT(1), 0);
+CCU_GATE_DEFINE(ir_clk, CCU_PARENT_HW(apb_clk), APBC_IR_CLK_RST, BIT(1), 0);
+CCU_GATE_DEFINE(tsen_clk, CCU_PARENT_HW(apb_clk), APBC_TSEN_CLK_RST, BIT(1), 0);
+CCU_GATE_DEFINE(ipc_ap2aud_clk, CCU_PARENT_HW(apb_clk), APBC_IPC_AP2AUD_CLK_RST, BIT(1), 0);
+
+static const struct clk_parent_data can_parents[] = {
+ CCU_PARENT_HW(pll3_20),
+ CCU_PARENT_HW(pll3_40),
+ CCU_PARENT_HW(pll3_80),
+};
+CCU_MUX_GATE_DEFINE(can0_clk, can_parents, APBC_CAN0_CLK_RST, 4, 3, BIT(1), 0);
+CCU_GATE_DEFINE(can0_bus_clk, CCU_PARENT_NAME(vctcxo_24m), APBC_CAN0_CLK_RST, BIT(0), 0);
+
+CCU_GATE_DEFINE(uart0_bus_clk, CCU_PARENT_HW(apb_clk), APBC_UART1_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(uart2_bus_clk, CCU_PARENT_HW(apb_clk), APBC_UART2_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(uart3_bus_clk, CCU_PARENT_HW(apb_clk), APBC_UART3_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(uart4_bus_clk, CCU_PARENT_HW(apb_clk), APBC_UART4_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(uart5_bus_clk, CCU_PARENT_HW(apb_clk), APBC_UART5_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(uart6_bus_clk, CCU_PARENT_HW(apb_clk), APBC_UART6_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(uart7_bus_clk, CCU_PARENT_HW(apb_clk), APBC_UART7_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(uart8_bus_clk, CCU_PARENT_HW(apb_clk), APBC_UART8_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(uart9_bus_clk, CCU_PARENT_HW(apb_clk), APBC_UART9_CLK_RST, BIT(0), 0);
+
+CCU_GATE_DEFINE(gpio_bus_clk, CCU_PARENT_HW(apb_clk), APBC_GPIO_CLK_RST, BIT(0), 0);
+
+CCU_GATE_DEFINE(pwm0_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM0_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm1_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM1_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm2_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM2_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm3_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM3_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm4_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM4_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm5_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM5_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm6_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM6_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm7_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM7_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm8_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM8_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm9_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM9_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm10_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM10_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm11_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM11_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm12_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM12_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm13_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM13_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm14_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM14_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm15_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM15_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm16_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM16_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm17_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM17_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm18_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM18_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm19_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM19_CLK_RST, BIT(0), 0);
+
+CCU_GATE_DEFINE(ssp3_bus_clk, CCU_PARENT_HW(apb_clk), APBC_SSP3_CLK_RST, BIT(0), 0);
+
+CCU_GATE_DEFINE(rtc_bus_clk, CCU_PARENT_HW(apb_clk), APBC_RTC_CLK_RST, BIT(0), 0);
+
+CCU_GATE_DEFINE(twsi0_bus_clk, CCU_PARENT_HW(apb_clk), APBC_TWSI0_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(twsi1_bus_clk, CCU_PARENT_HW(apb_clk), APBC_TWSI1_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(twsi2_bus_clk, CCU_PARENT_HW(apb_clk), APBC_TWSI2_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(twsi4_bus_clk, CCU_PARENT_HW(apb_clk), APBC_TWSI4_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(twsi5_bus_clk, CCU_PARENT_HW(apb_clk), APBC_TWSI5_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(twsi6_bus_clk, CCU_PARENT_HW(apb_clk), APBC_TWSI6_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(twsi7_bus_clk, CCU_PARENT_HW(apb_clk), APBC_TWSI7_CLK_RST, BIT(0), 0);
+/* Placeholder to workaround quirk of the register */
+CCU_FACTOR_DEFINE(twsi8_bus_clk, CCU_PARENT_HW(apb_clk), 1, 1);
+
+CCU_GATE_DEFINE(timers1_bus_clk, CCU_PARENT_HW(apb_clk), APBC_TIMERS1_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(timers2_bus_clk, CCU_PARENT_HW(apb_clk), APBC_TIMERS2_CLK_RST, BIT(0), 0);
+
+CCU_GATE_DEFINE(aib_bus_clk, CCU_PARENT_HW(apb_clk), APBC_AIB_CLK_RST, BIT(0), 0);
+
+CCU_GATE_DEFINE(onewire_bus_clk, CCU_PARENT_HW(apb_clk), APBC_ONEWIRE_CLK_RST, BIT(0), 0);
+
+CCU_GATE_DEFINE(sspa0_bus_clk, CCU_PARENT_HW(apb_clk), APBC_SSPA0_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(sspa1_bus_clk, CCU_PARENT_HW(apb_clk), APBC_SSPA1_CLK_RST, BIT(0), 0);
+
+CCU_GATE_DEFINE(tsen_bus_clk, CCU_PARENT_HW(apb_clk), APBC_TSEN_CLK_RST, BIT(0), 0);
+
+CCU_GATE_DEFINE(ipc_ap2aud_bus_clk, CCU_PARENT_HW(apb_clk), APBC_IPC_AP2AUD_CLK_RST, BIT(0), 0);
+/* APBC clocks end */
+
+/* APMU clocks start */
+static const struct clk_parent_data pmua_aclk_parents[] = {
+ CCU_PARENT_HW(pll1_d10_245p76),
+ CCU_PARENT_HW(pll1_d8_307p2),
+};
+CCU_MUX_DIV_FC_DEFINE(pmua_aclk, pmua_aclk_parents, APMU_ACLK_CLK_CTRL, 1, 2, BIT(4), 0, 1, 0);
+
+static const struct clk_parent_data cci550_clk_parents[] = {
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d3_819p2),
+ CCU_PARENT_HW(pll2_d3),
+};
+CCU_MUX_DIV_FC_DEFINE(cci550_clk, cci550_clk_parents, APMU_CCI550_CLK_CTRL, 8, 3, BIT(12), 0, 2,
+ CLK_IS_CRITICAL);
+
+static const struct clk_parent_data cpu_c0_hi_clk_parents[] = {
+ CCU_PARENT_HW(pll3_d2),
+ CCU_PARENT_HW(pll3_d1),
+};
+CCU_MUX_DEFINE(cpu_c0_hi_clk, cpu_c0_hi_clk_parents, APMU_CPU_C0_CLK_CTRL, 13, 1, 0);
+static const struct clk_parent_data cpu_c0_clk_parents[] = {
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d3_819p2),
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d2_1228p8),
+ CCU_PARENT_HW(pll3_d3),
+ CCU_PARENT_HW(pll2_d3),
+ CCU_PARENT_HW(cpu_c0_hi_clk),
+};
+CCU_MUX_FC_DEFINE(cpu_c0_core_clk, cpu_c0_clk_parents, APMU_CPU_C0_CLK_CTRL, BIT(12), 0, 3,
+ CLK_IS_CRITICAL);
+CCU_DIV_DEFINE(cpu_c0_ace_clk, CCU_PARENT_HW(cpu_c0_core_clk), APMU_CPU_C0_CLK_CTRL, 6, 3,
+ CLK_IS_CRITICAL);
+CCU_DIV_DEFINE(cpu_c0_tcm_clk, CCU_PARENT_HW(cpu_c0_core_clk), APMU_CPU_C0_CLK_CTRL, 9, 3,
+ CLK_IS_CRITICAL);
+
+static const struct clk_parent_data cpu_c1_hi_clk_parents[] = {
+ CCU_PARENT_HW(pll3_d2),
+ CCU_PARENT_HW(pll3_d1),
+};
+CCU_MUX_DEFINE(cpu_c1_hi_clk, cpu_c1_hi_clk_parents, APMU_CPU_C1_CLK_CTRL, 13, 1, 0);
+static const struct clk_parent_data cpu_c1_clk_parents[] = {
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d3_819p2),
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d2_1228p8),
+ CCU_PARENT_HW(pll3_d3),
+ CCU_PARENT_HW(pll2_d3),
+ CCU_PARENT_HW(cpu_c1_hi_clk),
+};
+CCU_MUX_FC_DEFINE(cpu_c1_core_clk, cpu_c1_clk_parents, APMU_CPU_C1_CLK_CTRL, BIT(12), 0, 3,
+ CLK_IS_CRITICAL);
+CCU_DIV_DEFINE(cpu_c1_ace_clk, CCU_PARENT_HW(cpu_c1_core_clk), APMU_CPU_C1_CLK_CTRL, 6, 3,
+ CLK_IS_CRITICAL);
+
+static const struct clk_parent_data jpg_parents[] = {
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d3_819p2),
+ CCU_PARENT_HW(pll1_d2_1228p8),
+ CCU_PARENT_HW(pll2_d4),
+ CCU_PARENT_HW(pll2_d3),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(jpg_clk, jpg_parents, APMU_JPG_CLK_RES_CTRL, 5, 3, BIT(15), 2, 3,
+ BIT(1), 0);
+
+static const struct clk_parent_data ccic2phy_parents[] = {
+ CCU_PARENT_HW(pll1_d24_102p4),
+ CCU_PARENT_HW(pll1_d48_51p2_ap),
+};
+CCU_MUX_GATE_DEFINE(ccic2phy_clk, ccic2phy_parents, APMU_CSI_CCIC2_CLK_RES_CTRL, 7, 1, BIT(5), 0);
+
+static const struct clk_parent_data ccic3phy_parents[] = {
+ CCU_PARENT_HW(pll1_d24_102p4),
+ CCU_PARENT_HW(pll1_d48_51p2_ap),
+};
+CCU_MUX_GATE_DEFINE(ccic3phy_clk, ccic3phy_parents, APMU_CSI_CCIC2_CLK_RES_CTRL, 31, 1, BIT(30), 0);
+
+static const struct clk_parent_data csi_parents[] = {
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d3_819p2),
+ CCU_PARENT_HW(pll2_d2),
+ CCU_PARENT_HW(pll2_d3),
+ CCU_PARENT_HW(pll2_d4),
+ CCU_PARENT_HW(pll1_d2_1228p8),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(csi_clk, csi_parents, APMU_CSI_CCIC2_CLK_RES_CTRL, 20, 3, BIT(15),
+ 16, 3, BIT(4), 0);
+
+static const struct clk_parent_data camm_parents[] = {
+ CCU_PARENT_HW(pll1_d8_307p2),
+ CCU_PARENT_HW(pll2_d5),
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_NAME(vctcxo_24m),
+};
+CCU_MUX_DIV_GATE_DEFINE(camm0_clk, camm_parents, APMU_CSI_CCIC2_CLK_RES_CTRL, 23, 4, 8, 2,
+ BIT(28), 0);
+CCU_MUX_DIV_GATE_DEFINE(camm1_clk, camm_parents, APMU_CSI_CCIC2_CLK_RES_CTRL, 23, 4, 8, 2,
+ BIT(6), 0);
+CCU_MUX_DIV_GATE_DEFINE(camm2_clk, camm_parents, APMU_CSI_CCIC2_CLK_RES_CTRL, 23, 4, 8, 2,
+ BIT(3), 0);
+
+static const struct clk_parent_data isp_cpp_parents[] = {
+ CCU_PARENT_HW(pll1_d8_307p2),
+ CCU_PARENT_HW(pll1_d6_409p6),
+};
+CCU_MUX_DIV_GATE_DEFINE(isp_cpp_clk, isp_cpp_parents, APMU_ISP_CLK_RES_CTRL, 24, 2, 26, 1,
+ BIT(28), 0);
+static const struct clk_parent_data isp_bus_parents[] = {
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d8_307p2),
+ CCU_PARENT_HW(pll1_d10_245p76),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(isp_bus_clk, isp_bus_parents, APMU_ISP_CLK_RES_CTRL, 18, 3, BIT(23),
+ 21, 2, BIT(17), 0);
+static const struct clk_parent_data isp_parents[] = {
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d8_307p2),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(isp_clk, isp_parents, APMU_ISP_CLK_RES_CTRL, 4, 3, BIT(7), 8, 2,
+ BIT(1), 0);
+
+static const struct clk_parent_data dpumclk_parents[] = {
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d8_307p2),
+};
+CCU_MUX_DIV_GATE_SPLIT_FC_DEFINE(dpu_mclk, dpumclk_parents, APMU_LCD_CLK_RES_CTRL2,
+ APMU_LCD_CLK_RES_CTRL1, 1, 4, BIT(29), 5, 3, BIT(0), 0);
+
+static const struct clk_parent_data dpuesc_parents[] = {
+ CCU_PARENT_HW(pll1_d48_51p2_ap),
+ CCU_PARENT_HW(pll1_d52_47p26),
+ CCU_PARENT_HW(pll1_d96_25p6),
+ CCU_PARENT_HW(pll1_d32_76p8),
+};
+CCU_MUX_GATE_DEFINE(dpu_esc_clk, dpuesc_parents, APMU_LCD_CLK_RES_CTRL1, 0, 2, BIT(2), 0);
+
+static const struct clk_parent_data dpubit_parents[] = {
+ CCU_PARENT_HW(pll1_d3_819p2),
+ CCU_PARENT_HW(pll2_d2),
+ CCU_PARENT_HW(pll2_d3),
+ CCU_PARENT_HW(pll1_d2_1228p8),
+ CCU_PARENT_HW(pll2_d4),
+ CCU_PARENT_HW(pll2_d5),
+ CCU_PARENT_HW(pll2_d7),
+ CCU_PARENT_HW(pll2_d8),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(dpu_bit_clk, dpubit_parents, APMU_LCD_CLK_RES_CTRL1, 17, 3, BIT(31),
+ 20, 3, BIT(16), 0);
+
+static const struct clk_parent_data dpupx_parents[] = {
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d8_307p2),
+ CCU_PARENT_HW(pll2_d7),
+ CCU_PARENT_HW(pll2_d8),
+};
+CCU_MUX_DIV_GATE_SPLIT_FC_DEFINE(dpu_pxclk, dpupx_parents, APMU_LCD_CLK_RES_CTRL2,
+ APMU_LCD_CLK_RES_CTRL1, 17, 4, BIT(30), 21, 3, BIT(16), 0);
+
+CCU_GATE_DEFINE(dpu_hclk, CCU_PARENT_HW(pmua_aclk), APMU_LCD_CLK_RES_CTRL1,
+ BIT(5), 0);
+
+static const struct clk_parent_data dpu_spi_parents[] = {
+ CCU_PARENT_HW(pll1_d8_307p2),
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d10_245p76),
+ CCU_PARENT_HW(pll1_d11_223p4),
+ CCU_PARENT_HW(pll1_d13_189),
+ CCU_PARENT_HW(pll1_d23_106p8),
+ CCU_PARENT_HW(pll2_d3),
+ CCU_PARENT_HW(pll2_d5),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(dpu_spi_clk, dpu_spi_parents, APMU_LCD_SPI_CLK_RES_CTRL, 8, 3,
+ BIT(7), 12, 3, BIT(1), 0);
+CCU_GATE_DEFINE(dpu_spi_hbus_clk, CCU_PARENT_HW(pmua_aclk), APMU_LCD_SPI_CLK_RES_CTRL, BIT(3), 0);
+CCU_GATE_DEFINE(dpu_spi_bus_clk, CCU_PARENT_HW(pmua_aclk), APMU_LCD_SPI_CLK_RES_CTRL, BIT(5), 0);
+CCU_GATE_DEFINE(dpu_spi_aclk, CCU_PARENT_HW(pmua_aclk), APMU_LCD_SPI_CLK_RES_CTRL, BIT(6), 0);
+
+static const struct clk_parent_data v2d_parents[] = {
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d8_307p2),
+ CCU_PARENT_HW(pll1_d4_614p4),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(v2d_clk, v2d_parents, APMU_LCD_CLK_RES_CTRL1, 9, 3, BIT(28), 12, 2,
+ BIT(8), 0);
+
+static const struct clk_parent_data ccic_4x_parents[] = {
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d3_819p2),
+ CCU_PARENT_HW(pll2_d2),
+ CCU_PARENT_HW(pll2_d3),
+ CCU_PARENT_HW(pll2_d4),
+ CCU_PARENT_HW(pll1_d2_1228p8),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(ccic_4x_clk, ccic_4x_parents, APMU_CCIC_CLK_RES_CTRL, 18, 3,
+ BIT(15), 23, 2, BIT(4), 0);
+
+static const struct clk_parent_data ccic1phy_parents[] = {
+ CCU_PARENT_HW(pll1_d24_102p4),
+ CCU_PARENT_HW(pll1_d48_51p2_ap),
+};
+CCU_MUX_GATE_DEFINE(ccic1phy_clk, ccic1phy_parents, APMU_CCIC_CLK_RES_CTRL, 7, 1, BIT(5), 0);
+
+CCU_GATE_DEFINE(sdh_axi_aclk, CCU_PARENT_HW(pmua_aclk), APMU_SDH0_CLK_RES_CTRL, BIT(3), 0);
+static const struct clk_parent_data sdh01_parents[] = {
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll2_d8),
+ CCU_PARENT_HW(pll2_d5),
+ CCU_PARENT_HW(pll1_d11_223p4),
+ CCU_PARENT_HW(pll1_d13_189),
+ CCU_PARENT_HW(pll1_d23_106p8),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(sdh0_clk, sdh01_parents, APMU_SDH0_CLK_RES_CTRL, 8, 3, BIT(11), 5, 3,
+ BIT(4), 0);
+CCU_MUX_DIV_GATE_FC_DEFINE(sdh1_clk, sdh01_parents, APMU_SDH1_CLK_RES_CTRL, 8, 3, BIT(11), 5, 3,
+ BIT(4), 0);
+static const struct clk_parent_data sdh2_parents[] = {
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll2_d8),
+ CCU_PARENT_HW(pll1_d3_819p2),
+ CCU_PARENT_HW(pll1_d11_223p4),
+ CCU_PARENT_HW(pll1_d13_189),
+ CCU_PARENT_HW(pll1_d23_106p8),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(sdh2_clk, sdh2_parents, APMU_SDH2_CLK_RES_CTRL, 8, 3, BIT(11), 5, 3,
+ BIT(4), 0);
+
+CCU_GATE_DEFINE(usb_axi_clk, CCU_PARENT_HW(pmua_aclk), APMU_USB_CLK_RES_CTRL, BIT(1), 0);
+CCU_GATE_DEFINE(usb_p1_aclk, CCU_PARENT_HW(pmua_aclk), APMU_USB_CLK_RES_CTRL, BIT(5), 0);
+CCU_GATE_DEFINE(usb30_clk, CCU_PARENT_HW(pmua_aclk), APMU_USB_CLK_RES_CTRL, BIT(8), 0);
+
+static const struct clk_parent_data qspi_parents[] = {
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll2_d8),
+ CCU_PARENT_HW(pll1_d8_307p2),
+ CCU_PARENT_HW(pll1_d10_245p76),
+ CCU_PARENT_HW(pll1_d11_223p4),
+ CCU_PARENT_HW(pll1_d23_106p8),
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d13_189),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(qspi_clk, qspi_parents, APMU_QSPI_CLK_RES_CTRL, 9, 3, BIT(12), 6, 3,
+ BIT(4), 0);
+CCU_GATE_DEFINE(qspi_bus_clk, CCU_PARENT_HW(pmua_aclk), APMU_QSPI_CLK_RES_CTRL, BIT(3), 0);
+CCU_GATE_DEFINE(dma_clk, CCU_PARENT_HW(pmua_aclk), APMU_DMA_CLK_RES_CTRL, BIT(3), 0);
+
+static const struct clk_parent_data aes_parents[] = {
+ CCU_PARENT_HW(pll1_d12_204p8),
+ CCU_PARENT_HW(pll1_d24_102p4),
+};
+CCU_MUX_GATE_DEFINE(aes_clk, aes_parents, APMU_AES_CLK_RES_CTRL, 6, 1, BIT(5), 0);
+
+static const struct clk_parent_data vpu_parents[] = {
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d3_819p2),
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll3_d6),
+ CCU_PARENT_HW(pll2_d3),
+ CCU_PARENT_HW(pll2_d4),
+ CCU_PARENT_HW(pll2_d5),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(vpu_clk, vpu_parents, APMU_VPU_CLK_RES_CTRL, 13, 3, BIT(21), 10, 3,
+ BIT(3), 0);
+
+static const struct clk_parent_data gpu_parents[] = {
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d3_819p2),
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll3_d6),
+ CCU_PARENT_HW(pll2_d3),
+ CCU_PARENT_HW(pll2_d4),
+ CCU_PARENT_HW(pll2_d5),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(gpu_clk, gpu_parents, APMU_GPU_CLK_RES_CTRL, 12, 3, BIT(15), 18, 3,
+ BIT(4), 0);
+
+static const struct clk_parent_data emmc_parents[] = {
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d52_47p26),
+ CCU_PARENT_HW(pll1_d3_819p2),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(emmc_clk, emmc_parents, APMU_PMUA_EM_CLK_RES_CTRL, 8, 3, BIT(11),
+ 6, 2, BIT(4), 0);
+CCU_DIV_GATE_DEFINE(emmc_x_clk, CCU_PARENT_HW(pll1_d2_1228p8), APMU_PMUA_EM_CLK_RES_CTRL, 12,
+ 3, BIT(15), 0);
+
+static const struct clk_parent_data audio_parents[] = {
+ CCU_PARENT_HW(pll1_aud_245p7),
+ CCU_PARENT_HW(pll1_d8_307p2),
+ CCU_PARENT_HW(pll1_d6_409p6),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(audio_clk, audio_parents, APMU_AUDIO_CLK_RES_CTRL, 4, 3, BIT(15),
+ 7, 3, BIT(12), 0);
+
+static const struct clk_parent_data hdmi_parents[] = {
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d8_307p2),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(hdmi_mclk, hdmi_parents, APMU_HDMI_CLK_RES_CTRL, 1, 4, BIT(29), 5,
+ 3, BIT(0), 0);
+
+CCU_GATE_DEFINE(pcie0_master_clk, CCU_PARENT_HW(pmua_aclk), APMU_PCIE_CLK_RES_CTRL_0, BIT(2), 0);
+CCU_GATE_DEFINE(pcie0_slave_clk, CCU_PARENT_HW(pmua_aclk), APMU_PCIE_CLK_RES_CTRL_0, BIT(1), 0);
+CCU_GATE_DEFINE(pcie0_dbi_clk, CCU_PARENT_HW(pmua_aclk), APMU_PCIE_CLK_RES_CTRL_0, BIT(0), 0);
+
+CCU_GATE_DEFINE(pcie1_master_clk, CCU_PARENT_HW(pmua_aclk), APMU_PCIE_CLK_RES_CTRL_1, BIT(2), 0);
+CCU_GATE_DEFINE(pcie1_slave_clk, CCU_PARENT_HW(pmua_aclk), APMU_PCIE_CLK_RES_CTRL_1, BIT(1), 0);
+CCU_GATE_DEFINE(pcie1_dbi_clk, CCU_PARENT_HW(pmua_aclk), APMU_PCIE_CLK_RES_CTRL_1, BIT(0), 0);
+
+CCU_GATE_DEFINE(pcie2_master_clk, CCU_PARENT_HW(pmua_aclk), APMU_PCIE_CLK_RES_CTRL_2, BIT(2), 0);
+CCU_GATE_DEFINE(pcie2_slave_clk, CCU_PARENT_HW(pmua_aclk), APMU_PCIE_CLK_RES_CTRL_2, BIT(1), 0);
+CCU_GATE_DEFINE(pcie2_dbi_clk, CCU_PARENT_HW(pmua_aclk), APMU_PCIE_CLK_RES_CTRL_2, BIT(0), 0);
+
+CCU_GATE_DEFINE(emac0_bus_clk, CCU_PARENT_HW(pmua_aclk), APMU_EMAC0_CLK_RES_CTRL, BIT(0), 0);
+CCU_GATE_DEFINE(emac0_ptp_clk, CCU_PARENT_HW(pll2_d6), APMU_EMAC0_CLK_RES_CTRL, BIT(15), 0);
+CCU_GATE_DEFINE(emac1_bus_clk, CCU_PARENT_HW(pmua_aclk), APMU_EMAC1_CLK_RES_CTRL, BIT(0), 0);
+CCU_GATE_DEFINE(emac1_ptp_clk, CCU_PARENT_HW(pll2_d6), APMU_EMAC1_CLK_RES_CTRL, BIT(15), 0);
+
+CCU_GATE_DEFINE(emmc_bus_clk, CCU_PARENT_HW(pmua_aclk), APMU_PMUA_EM_CLK_RES_CTRL, BIT(3), 0);
+/* APMU clocks end */
+
+static struct clk_hw *k1_ccu_pll_hws[] = {
+ [CLK_PLL1] = &pll1.common.hw,
+ [CLK_PLL2] = &pll2.common.hw,
+ [CLK_PLL3] = &pll3.common.hw,
+ [CLK_PLL1_D2] = &pll1_d2.common.hw,
+ [CLK_PLL1_D3] = &pll1_d3.common.hw,
+ [CLK_PLL1_D4] = &pll1_d4.common.hw,
+ [CLK_PLL1_D5] = &pll1_d5.common.hw,
+ [CLK_PLL1_D6] = &pll1_d6.common.hw,
+ [CLK_PLL1_D7] = &pll1_d7.common.hw,
+ [CLK_PLL1_D8] = &pll1_d8.common.hw,
+ [CLK_PLL1_D11] = &pll1_d11_223p4.common.hw,
+ [CLK_PLL1_D13] = &pll1_d13_189.common.hw,
+ [CLK_PLL1_D23] = &pll1_d23_106p8.common.hw,
+ [CLK_PLL1_D64] = &pll1_d64_38p4.common.hw,
+ [CLK_PLL1_D10_AUD] = &pll1_aud_245p7.common.hw,
+ [CLK_PLL1_D100_AUD] = &pll1_aud_24p5.common.hw,
+ [CLK_PLL2_D1] = &pll2_d1.common.hw,
+ [CLK_PLL2_D2] = &pll2_d2.common.hw,
+ [CLK_PLL2_D3] = &pll2_d3.common.hw,
+ [CLK_PLL2_D4] = &pll2_d4.common.hw,
+ [CLK_PLL2_D5] = &pll2_d5.common.hw,
+ [CLK_PLL2_D6] = &pll2_d6.common.hw,
+ [CLK_PLL2_D7] = &pll2_d7.common.hw,
+ [CLK_PLL2_D8] = &pll2_d8.common.hw,
+ [CLK_PLL3_D1] = &pll3_d1.common.hw,
+ [CLK_PLL3_D2] = &pll3_d2.common.hw,
+ [CLK_PLL3_D3] = &pll3_d3.common.hw,
+ [CLK_PLL3_D4] = &pll3_d4.common.hw,
+ [CLK_PLL3_D5] = &pll3_d5.common.hw,
+ [CLK_PLL3_D6] = &pll3_d6.common.hw,
+ [CLK_PLL3_D7] = &pll3_d7.common.hw,
+ [CLK_PLL3_D8] = &pll3_d8.common.hw,
+ [CLK_PLL3_80] = &pll3_80.common.hw,
+ [CLK_PLL3_40] = &pll3_40.common.hw,
+ [CLK_PLL3_20] = &pll3_20.common.hw,
+};
+
+static const struct spacemit_ccu_data k1_ccu_pll_data = {
+ /* The PLL CCU implements no resets */
+ .hws = k1_ccu_pll_hws,
+ .num = ARRAY_SIZE(k1_ccu_pll_hws),
+};
+
+static struct clk_hw *k1_ccu_mpmu_hws[] = {
+ [CLK_PLL1_307P2] = &pll1_d8_307p2.common.hw,
+ [CLK_PLL1_76P8] = &pll1_d32_76p8.common.hw,
+ [CLK_PLL1_61P44] = &pll1_d40_61p44.common.hw,
+ [CLK_PLL1_153P6] = &pll1_d16_153p6.common.hw,
+ [CLK_PLL1_102P4] = &pll1_d24_102p4.common.hw,
+ [CLK_PLL1_51P2] = &pll1_d48_51p2.common.hw,
+ [CLK_PLL1_51P2_AP] = &pll1_d48_51p2_ap.common.hw,
+ [CLK_PLL1_57P6] = &pll1_m3d128_57p6.common.hw,
+ [CLK_PLL1_25P6] = &pll1_d96_25p6.common.hw,
+ [CLK_PLL1_12P8] = &pll1_d192_12p8.common.hw,
+ [CLK_PLL1_12P8_WDT] = &pll1_d192_12p8_wdt.common.hw,
+ [CLK_PLL1_6P4] = &pll1_d384_6p4.common.hw,
+ [CLK_PLL1_3P2] = &pll1_d768_3p2.common.hw,
+ [CLK_PLL1_1P6] = &pll1_d1536_1p6.common.hw,
+ [CLK_PLL1_0P8] = &pll1_d3072_0p8.common.hw,
+ [CLK_PLL1_409P6] = &pll1_d6_409p6.common.hw,
+ [CLK_PLL1_204P8] = &pll1_d12_204p8.common.hw,
+ [CLK_PLL1_491] = &pll1_d5_491p52.common.hw,
+ [CLK_PLL1_245P76] = &pll1_d10_245p76.common.hw,
+ [CLK_PLL1_614] = &pll1_d4_614p4.common.hw,
+ [CLK_PLL1_47P26] = &pll1_d52_47p26.common.hw,
+ [CLK_PLL1_31P5] = &pll1_d78_31p5.common.hw,
+ [CLK_PLL1_819] = &pll1_d3_819p2.common.hw,
+ [CLK_PLL1_1228] = &pll1_d2_1228p8.common.hw,
+ [CLK_SLOW_UART] = &slow_uart.common.hw,
+ [CLK_SLOW_UART1] = &slow_uart1_14p74.common.hw,
+ [CLK_SLOW_UART2] = &slow_uart2_48.common.hw,
+ [CLK_WDT] = &wdt_clk.common.hw,
+ [CLK_RIPC] = &ripc_clk.common.hw,
+ [CLK_I2S_SYSCLK] = &i2s_sysclk.common.hw,
+ [CLK_I2S_BCLK] = &i2s_bclk.common.hw,
+ [CLK_APB] = &apb_clk.common.hw,
+ [CLK_WDT_BUS] = &wdt_bus_clk.common.hw,
+ [CLK_I2S_153P6] = &i2s_153p6.common.hw,
+ [CLK_I2S_153P6_BASE] = &i2s_153p6_base.common.hw,
+ [CLK_I2S_SYSCLK_SRC] = &i2s_sysclk_src.common.hw,
+ [CLK_I2S_BCLK_FACTOR] = &i2s_bclk_factor.common.hw,
+};
+
+static const struct spacemit_ccu_data k1_ccu_mpmu_data = {
+ .reset_name = "mpmu-reset",
+ .hws = k1_ccu_mpmu_hws,
+ .num = ARRAY_SIZE(k1_ccu_mpmu_hws),
+};
+
+static struct clk_hw *k1_ccu_apbc_hws[] = {
+ [CLK_UART0] = &uart0_clk.common.hw,
+ [CLK_UART2] = &uart2_clk.common.hw,
+ [CLK_UART3] = &uart3_clk.common.hw,
+ [CLK_UART4] = &uart4_clk.common.hw,
+ [CLK_UART5] = &uart5_clk.common.hw,
+ [CLK_UART6] = &uart6_clk.common.hw,
+ [CLK_UART7] = &uart7_clk.common.hw,
+ [CLK_UART8] = &uart8_clk.common.hw,
+ [CLK_UART9] = &uart9_clk.common.hw,
+ [CLK_GPIO] = &gpio_clk.common.hw,
+ [CLK_PWM0] = &pwm0_clk.common.hw,
+ [CLK_PWM1] = &pwm1_clk.common.hw,
+ [CLK_PWM2] = &pwm2_clk.common.hw,
+ [CLK_PWM3] = &pwm3_clk.common.hw,
+ [CLK_PWM4] = &pwm4_clk.common.hw,
+ [CLK_PWM5] = &pwm5_clk.common.hw,
+ [CLK_PWM6] = &pwm6_clk.common.hw,
+ [CLK_PWM7] = &pwm7_clk.common.hw,
+ [CLK_PWM8] = &pwm8_clk.common.hw,
+ [CLK_PWM9] = &pwm9_clk.common.hw,
+ [CLK_PWM10] = &pwm10_clk.common.hw,
+ [CLK_PWM11] = &pwm11_clk.common.hw,
+ [CLK_PWM12] = &pwm12_clk.common.hw,
+ [CLK_PWM13] = &pwm13_clk.common.hw,
+ [CLK_PWM14] = &pwm14_clk.common.hw,
+ [CLK_PWM15] = &pwm15_clk.common.hw,
+ [CLK_PWM16] = &pwm16_clk.common.hw,
+ [CLK_PWM17] = &pwm17_clk.common.hw,
+ [CLK_PWM18] = &pwm18_clk.common.hw,
+ [CLK_PWM19] = &pwm19_clk.common.hw,
+ [CLK_SSP3] = &ssp3_clk.common.hw,
+ [CLK_RTC] = &rtc_clk.common.hw,
+ [CLK_TWSI0] = &twsi0_clk.common.hw,
+ [CLK_TWSI1] = &twsi1_clk.common.hw,
+ [CLK_TWSI2] = &twsi2_clk.common.hw,
+ [CLK_TWSI4] = &twsi4_clk.common.hw,
+ [CLK_TWSI5] = &twsi5_clk.common.hw,
+ [CLK_TWSI6] = &twsi6_clk.common.hw,
+ [CLK_TWSI7] = &twsi7_clk.common.hw,
+ [CLK_TWSI8] = &twsi8_clk.common.hw,
+ [CLK_TIMERS1] = &timers1_clk.common.hw,
+ [CLK_TIMERS2] = &timers2_clk.common.hw,
+ [CLK_AIB] = &aib_clk.common.hw,
+ [CLK_ONEWIRE] = &onewire_clk.common.hw,
+ [CLK_SSPA0] = &sspa0_clk.common.hw,
+ [CLK_SSPA1] = &sspa1_clk.common.hw,
+ [CLK_DRO] = &dro_clk.common.hw,
+ [CLK_IR] = &ir_clk.common.hw,
+ [CLK_TSEN] = &tsen_clk.common.hw,
+ [CLK_IPC_AP2AUD] = &ipc_ap2aud_clk.common.hw,
+ [CLK_CAN0] = &can0_clk.common.hw,
+ [CLK_CAN0_BUS] = &can0_bus_clk.common.hw,
+ [CLK_UART0_BUS] = &uart0_bus_clk.common.hw,
+ [CLK_UART2_BUS] = &uart2_bus_clk.common.hw,
+ [CLK_UART3_BUS] = &uart3_bus_clk.common.hw,
+ [CLK_UART4_BUS] = &uart4_bus_clk.common.hw,
+ [CLK_UART5_BUS] = &uart5_bus_clk.common.hw,
+ [CLK_UART6_BUS] = &uart6_bus_clk.common.hw,
+ [CLK_UART7_BUS] = &uart7_bus_clk.common.hw,
+ [CLK_UART8_BUS] = &uart8_bus_clk.common.hw,
+ [CLK_UART9_BUS] = &uart9_bus_clk.common.hw,
+ [CLK_GPIO_BUS] = &gpio_bus_clk.common.hw,
+ [CLK_PWM0_BUS] = &pwm0_bus_clk.common.hw,
+ [CLK_PWM1_BUS] = &pwm1_bus_clk.common.hw,
+ [CLK_PWM2_BUS] = &pwm2_bus_clk.common.hw,
+ [CLK_PWM3_BUS] = &pwm3_bus_clk.common.hw,
+ [CLK_PWM4_BUS] = &pwm4_bus_clk.common.hw,
+ [CLK_PWM5_BUS] = &pwm5_bus_clk.common.hw,
+ [CLK_PWM6_BUS] = &pwm6_bus_clk.common.hw,
+ [CLK_PWM7_BUS] = &pwm7_bus_clk.common.hw,
+ [CLK_PWM8_BUS] = &pwm8_bus_clk.common.hw,
+ [CLK_PWM9_BUS] = &pwm9_bus_clk.common.hw,
+ [CLK_PWM10_BUS] = &pwm10_bus_clk.common.hw,
+ [CLK_PWM11_BUS] = &pwm11_bus_clk.common.hw,
+ [CLK_PWM12_BUS] = &pwm12_bus_clk.common.hw,
+ [CLK_PWM13_BUS] = &pwm13_bus_clk.common.hw,
+ [CLK_PWM14_BUS] = &pwm14_bus_clk.common.hw,
+ [CLK_PWM15_BUS] = &pwm15_bus_clk.common.hw,
+ [CLK_PWM16_BUS] = &pwm16_bus_clk.common.hw,
+ [CLK_PWM17_BUS] = &pwm17_bus_clk.common.hw,
+ [CLK_PWM18_BUS] = &pwm18_bus_clk.common.hw,
+ [CLK_PWM19_BUS] = &pwm19_bus_clk.common.hw,
+ [CLK_SSP3_BUS] = &ssp3_bus_clk.common.hw,
+ [CLK_RTC_BUS] = &rtc_bus_clk.common.hw,
+ [CLK_TWSI0_BUS] = &twsi0_bus_clk.common.hw,
+ [CLK_TWSI1_BUS] = &twsi1_bus_clk.common.hw,
+ [CLK_TWSI2_BUS] = &twsi2_bus_clk.common.hw,
+ [CLK_TWSI4_BUS] = &twsi4_bus_clk.common.hw,
+ [CLK_TWSI5_BUS] = &twsi5_bus_clk.common.hw,
+ [CLK_TWSI6_BUS] = &twsi6_bus_clk.common.hw,
+ [CLK_TWSI7_BUS] = &twsi7_bus_clk.common.hw,
+ [CLK_TWSI8_BUS] = &twsi8_bus_clk.common.hw,
+ [CLK_TIMERS1_BUS] = &timers1_bus_clk.common.hw,
+ [CLK_TIMERS2_BUS] = &timers2_bus_clk.common.hw,
+ [CLK_AIB_BUS] = &aib_bus_clk.common.hw,
+ [CLK_ONEWIRE_BUS] = &onewire_bus_clk.common.hw,
+ [CLK_SSPA0_BUS] = &sspa0_bus_clk.common.hw,
+ [CLK_SSPA1_BUS] = &sspa1_bus_clk.common.hw,
+ [CLK_TSEN_BUS] = &tsen_bus_clk.common.hw,
+ [CLK_IPC_AP2AUD_BUS] = &ipc_ap2aud_bus_clk.common.hw,
+ [CLK_SSPA0_I2S_BCLK] = &sspa0_i2s_bclk.common.hw,
+ [CLK_SSPA1_I2S_BCLK] = &sspa1_i2s_bclk.common.hw,
+};
+
+static const struct spacemit_ccu_data k1_ccu_apbc_data = {
+ .reset_name = "apbc-reset",
+ .hws = k1_ccu_apbc_hws,
+ .num = ARRAY_SIZE(k1_ccu_apbc_hws),
+};
+
+static struct clk_hw *k1_ccu_apmu_hws[] = {
+ [CLK_CCI550] = &cci550_clk.common.hw,
+ [CLK_CPU_C0_HI] = &cpu_c0_hi_clk.common.hw,
+ [CLK_CPU_C0_CORE] = &cpu_c0_core_clk.common.hw,
+ [CLK_CPU_C0_ACE] = &cpu_c0_ace_clk.common.hw,
+ [CLK_CPU_C0_TCM] = &cpu_c0_tcm_clk.common.hw,
+ [CLK_CPU_C1_HI] = &cpu_c1_hi_clk.common.hw,
+ [CLK_CPU_C1_CORE] = &cpu_c1_core_clk.common.hw,
+ [CLK_CPU_C1_ACE] = &cpu_c1_ace_clk.common.hw,
+ [CLK_CCIC_4X] = &ccic_4x_clk.common.hw,
+ [CLK_CCIC1PHY] = &ccic1phy_clk.common.hw,
+ [CLK_SDH_AXI] = &sdh_axi_aclk.common.hw,
+ [CLK_SDH0] = &sdh0_clk.common.hw,
+ [CLK_SDH1] = &sdh1_clk.common.hw,
+ [CLK_SDH2] = &sdh2_clk.common.hw,
+ [CLK_USB_P1] = &usb_p1_aclk.common.hw,
+ [CLK_USB_AXI] = &usb_axi_clk.common.hw,
+ [CLK_USB30] = &usb30_clk.common.hw,
+ [CLK_QSPI] = &qspi_clk.common.hw,
+ [CLK_QSPI_BUS] = &qspi_bus_clk.common.hw,
+ [CLK_DMA] = &dma_clk.common.hw,
+ [CLK_AES] = &aes_clk.common.hw,
+ [CLK_VPU] = &vpu_clk.common.hw,
+ [CLK_GPU] = &gpu_clk.common.hw,
+ [CLK_EMMC] = &emmc_clk.common.hw,
+ [CLK_EMMC_X] = &emmc_x_clk.common.hw,
+ [CLK_AUDIO] = &audio_clk.common.hw,
+ [CLK_HDMI] = &hdmi_mclk.common.hw,
+ [CLK_PMUA_ACLK] = &pmua_aclk.common.hw,
+ [CLK_PCIE0_MASTER] = &pcie0_master_clk.common.hw,
+ [CLK_PCIE0_SLAVE] = &pcie0_slave_clk.common.hw,
+ [CLK_PCIE0_DBI] = &pcie0_dbi_clk.common.hw,
+ [CLK_PCIE1_MASTER] = &pcie1_master_clk.common.hw,
+ [CLK_PCIE1_SLAVE] = &pcie1_slave_clk.common.hw,
+ [CLK_PCIE1_DBI] = &pcie1_dbi_clk.common.hw,
+ [CLK_PCIE2_MASTER] = &pcie2_master_clk.common.hw,
+ [CLK_PCIE2_SLAVE] = &pcie2_slave_clk.common.hw,
+ [CLK_PCIE2_DBI] = &pcie2_dbi_clk.common.hw,
+ [CLK_EMAC0_BUS] = &emac0_bus_clk.common.hw,
+ [CLK_EMAC0_PTP] = &emac0_ptp_clk.common.hw,
+ [CLK_EMAC1_BUS] = &emac1_bus_clk.common.hw,
+ [CLK_EMAC1_PTP] = &emac1_ptp_clk.common.hw,
+ [CLK_JPG] = &jpg_clk.common.hw,
+ [CLK_CCIC2PHY] = &ccic2phy_clk.common.hw,
+ [CLK_CCIC3PHY] = &ccic3phy_clk.common.hw,
+ [CLK_CSI] = &csi_clk.common.hw,
+ [CLK_CAMM0] = &camm0_clk.common.hw,
+ [CLK_CAMM1] = &camm1_clk.common.hw,
+ [CLK_CAMM2] = &camm2_clk.common.hw,
+ [CLK_ISP_CPP] = &isp_cpp_clk.common.hw,
+ [CLK_ISP_BUS] = &isp_bus_clk.common.hw,
+ [CLK_ISP] = &isp_clk.common.hw,
+ [CLK_DPU_MCLK] = &dpu_mclk.common.hw,
+ [CLK_DPU_ESC] = &dpu_esc_clk.common.hw,
+ [CLK_DPU_BIT] = &dpu_bit_clk.common.hw,
+ [CLK_DPU_PXCLK] = &dpu_pxclk.common.hw,
+ [CLK_DPU_HCLK] = &dpu_hclk.common.hw,
+ [CLK_DPU_SPI] = &dpu_spi_clk.common.hw,
+ [CLK_DPU_SPI_HBUS] = &dpu_spi_hbus_clk.common.hw,
+ [CLK_DPU_SPIBUS] = &dpu_spi_bus_clk.common.hw,
+ [CLK_DPU_SPI_ACLK] = &dpu_spi_aclk.common.hw,
+ [CLK_V2D] = &v2d_clk.common.hw,
+ [CLK_EMMC_BUS] = &emmc_bus_clk.common.hw,
+};
+
+static const struct spacemit_ccu_data k1_ccu_apmu_data = {
+ .reset_name = "apmu-reset",
+ .hws = k1_ccu_apmu_hws,
+ .num = ARRAY_SIZE(k1_ccu_apmu_hws),
+};
+
+static const struct spacemit_ccu_data k1_ccu_rcpu_data = {
+ .reset_name = "rcpu-reset",
+};
+
+static const struct spacemit_ccu_data k1_ccu_rcpu2_data = {
+ .reset_name = "rcpu2-reset",
+};
+
+static const struct spacemit_ccu_data k1_ccu_apbc2_data = {
+ .reset_name = "apbc2-reset",
+};
+
+static int spacemit_ccu_register(struct device *dev,
+ struct regmap *regmap,
+ struct regmap *lock_regmap,
+ const struct spacemit_ccu_data *data)
+{
+ struct clk_hw_onecell_data *clk_data;
+ int i, ret;
+
+ /* Nothing to do if the CCU does not implement any clocks */
+ if (!data->hws)
+ return 0;
+
+ clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, data->num),
+ GFP_KERNEL);
+ if (!clk_data)
+ return -ENOMEM;
+
+ for (i = 0; i < data->num; i++) {
+ struct clk_hw *hw = data->hws[i];
+ struct ccu_common *common;
+ const char *name;
+
+ if (!hw) {
+ clk_data->hws[i] = ERR_PTR(-ENOENT);
+ continue;
+ }
+
+ name = hw->init->name;
+
+ common = hw_to_ccu_common(hw);
+ common->regmap = regmap;
+ common->lock_regmap = lock_regmap;
+
+ ret = devm_clk_hw_register(dev, hw);
+ if (ret) {
+ dev_err(dev, "Cannot register clock %d - %s\n",
+ i, name);
+ return ret;
+ }
+
+ clk_data->hws[i] = hw;
+ }
+
+ clk_data->num = data->num;
+
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, clk_data);
+ if (ret)
+ dev_err(dev, "failed to add clock hardware provider (%d)\n", ret);
+
+ return ret;
+}
+
+static void spacemit_cadev_release(struct device *dev)
+{
+ struct auxiliary_device *adev = to_auxiliary_dev(dev);
+
+ ida_free(&auxiliary_ids, adev->id);
+ kfree(to_spacemit_ccu_adev(adev));
+}
+
+static void spacemit_adev_unregister(void *data)
+{
+ struct auxiliary_device *adev = data;
+
+ auxiliary_device_delete(adev);
+ auxiliary_device_uninit(adev);
+}
+
+static int spacemit_ccu_reset_register(struct device *dev,
+ struct regmap *regmap,
+ const char *reset_name)
+{
+ struct spacemit_ccu_adev *cadev;
+ struct auxiliary_device *adev;
+ int ret;
+
+ /* Nothing to do if the CCU does not implement a reset controller */
+ if (!reset_name)
+ return 0;
+
+ cadev = kzalloc(sizeof(*cadev), GFP_KERNEL);
+ if (!cadev)
+ return -ENOMEM;
+
+ cadev->regmap = regmap;
+
+ adev = &cadev->adev;
+ adev->name = reset_name;
+ adev->dev.parent = dev;
+ adev->dev.release = spacemit_cadev_release;
+ adev->dev.of_node = dev->of_node;
+ ret = ida_alloc(&auxiliary_ids, GFP_KERNEL);
+ if (ret < 0)
+ goto err_free_cadev;
+ adev->id = ret;
+
+ ret = auxiliary_device_init(adev);
+ if (ret)
+ goto err_free_aux_id;
+
+ ret = auxiliary_device_add(adev);
+ if (ret) {
+ auxiliary_device_uninit(adev);
+ return ret;
+ }
+
+ return devm_add_action_or_reset(dev, spacemit_adev_unregister, adev);
+
+err_free_aux_id:
+ ida_free(&auxiliary_ids, adev->id);
+err_free_cadev:
+ kfree(cadev);
+
+ return ret;
+}
+
+static int k1_ccu_probe(struct platform_device *pdev)
+{
+ struct regmap *base_regmap, *lock_regmap = NULL;
+ const struct spacemit_ccu_data *data;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ base_regmap = device_node_to_regmap(dev->of_node);
+ if (IS_ERR(base_regmap))
+ return dev_err_probe(dev, PTR_ERR(base_regmap),
+ "failed to get regmap\n");
+
+ /*
+ * The lock status of PLLs locate in MPMU region, while PLLs themselves
+ * are in APBS region. Reference to MPMU syscon is required to check PLL
+ * status.
+ */
+ if (of_device_is_compatible(dev->of_node, "spacemit,k1-pll")) {
+ struct device_node *mpmu = of_parse_phandle(dev->of_node,
+ "spacemit,mpmu", 0);
+ if (!mpmu)
+ return dev_err_probe(dev, -ENODEV,
+ "Cannot parse MPMU region\n");
+
+ lock_regmap = device_node_to_regmap(mpmu);
+ of_node_put(mpmu);
+
+ if (IS_ERR(lock_regmap))
+ return dev_err_probe(dev, PTR_ERR(lock_regmap),
+ "failed to get lock regmap\n");
+ }
+
+ data = of_device_get_match_data(dev);
+
+ ret = spacemit_ccu_register(dev, base_regmap, lock_regmap, data);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to register clocks\n");
+
+ ret = spacemit_ccu_reset_register(dev, base_regmap, data->reset_name);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to register resets\n");
+
+ return 0;
+}
+
+static const struct of_device_id of_k1_ccu_match[] = {
+ {
+ .compatible = "spacemit,k1-pll",
+ .data = &k1_ccu_pll_data,
+ },
+ {
+ .compatible = "spacemit,k1-syscon-mpmu",
+ .data = &k1_ccu_mpmu_data,
+ },
+ {
+ .compatible = "spacemit,k1-syscon-apbc",
+ .data = &k1_ccu_apbc_data,
+ },
+ {
+ .compatible = "spacemit,k1-syscon-apmu",
+ .data = &k1_ccu_apmu_data,
+ },
+ {
+ .compatible = "spacemit,k1-syscon-rcpu",
+ .data = &k1_ccu_rcpu_data,
+ },
+ {
+ .compatible = "spacemit,k1-syscon-rcpu2",
+ .data = &k1_ccu_rcpu2_data,
+ },
+ {
+ .compatible = "spacemit,k1-syscon-apbc2",
+ .data = &k1_ccu_apbc2_data,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, of_k1_ccu_match);
+
+static struct platform_driver k1_ccu_driver = {
+ .driver = {
+ .name = "spacemit,k1-ccu",
+ .of_match_table = of_k1_ccu_match,
+ },
+ .probe = k1_ccu_probe,
+};
+module_platform_driver(k1_ccu_driver);
+
+MODULE_DESCRIPTION("SpacemiT K1 CCU driver");
+MODULE_AUTHOR("Haylen Chu <heylenay@4d2.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/spacemit/ccu_common.h b/drivers/clk/spacemit/ccu_common.h
new file mode 100644
index 000000000000..da72f3836e0b
--- /dev/null
+++ b/drivers/clk/spacemit/ccu_common.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024 SpacemiT Technology Co. Ltd
+ * Copyright (c) 2024-2025 Haylen Chu <heylenay@4d2.org>
+ */
+
+#ifndef _CCU_COMMON_H_
+#define _CCU_COMMON_H_
+
+#include <linux/regmap.h>
+
+struct ccu_common {
+ struct regmap *regmap;
+ struct regmap *lock_regmap;
+
+ union {
+ /* For DDN and MIX */
+ struct {
+ u32 reg_ctrl;
+ u32 reg_fc;
+ u32 mask_fc;
+ };
+
+ /* For PLL */
+ struct {
+ u32 reg_swcr1;
+ u32 reg_swcr3;
+ };
+ };
+
+ struct clk_hw hw;
+};
+
+static inline struct ccu_common *hw_to_ccu_common(struct clk_hw *hw)
+{
+ return container_of(hw, struct ccu_common, hw);
+}
+
+#define ccu_read(c, reg) \
+ ({ \
+ u32 tmp; \
+ regmap_read((c)->regmap, (c)->reg_##reg, &tmp); \
+ tmp; \
+ })
+#define ccu_update(c, reg, mask, val) \
+ regmap_update_bits((c)->regmap, (c)->reg_##reg, mask, val)
+
+#endif /* _CCU_COMMON_H_ */
diff --git a/drivers/clk/spacemit/ccu_ddn.c b/drivers/clk/spacemit/ccu_ddn.c
new file mode 100644
index 000000000000..5b16e273bee5
--- /dev/null
+++ b/drivers/clk/spacemit/ccu_ddn.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024 SpacemiT Technology Co. Ltd
+ * Copyright (c) 2024-2025 Haylen Chu <heylenay@4d2.org>
+ *
+ * DDN stands for "Divider Denominator Numerator", it's M/N clock with a
+ * constant x2 factor. This clock hardware follows the equation below,
+ *
+ * numerator Fin
+ * 2 * ------------- = -------
+ * denominator Fout
+ *
+ * Thus, Fout could be calculated with,
+ *
+ * Fin denominator
+ * Fout = ----- * -------------
+ * 2 numerator
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/rational.h>
+
+#include "ccu_ddn.h"
+
+static unsigned long ccu_ddn_calc_rate(unsigned long prate, unsigned long num,
+ unsigned long den, unsigned int pre_div)
+{
+ return prate * den / pre_div / num;
+}
+
+static unsigned long ccu_ddn_calc_best_rate(struct ccu_ddn *ddn,
+ unsigned long rate, unsigned long prate,
+ unsigned long *num, unsigned long *den)
+{
+ rational_best_approximation(rate, prate / ddn->pre_div,
+ ddn->den_mask >> ddn->den_shift,
+ ddn->num_mask >> ddn->num_shift,
+ den, num);
+ return ccu_ddn_calc_rate(prate, *num, *den, ddn->pre_div);
+}
+
+static int ccu_ddn_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct ccu_ddn *ddn = hw_to_ccu_ddn(hw);
+ unsigned long num, den;
+
+ req->rate = ccu_ddn_calc_best_rate(ddn, req->rate,
+ req->best_parent_rate, &num, &den);
+
+ return 0;
+}
+
+static unsigned long ccu_ddn_recalc_rate(struct clk_hw *hw, unsigned long prate)
+{
+ struct ccu_ddn *ddn = hw_to_ccu_ddn(hw);
+ unsigned int val, num, den;
+
+ val = ccu_read(&ddn->common, ctrl);
+
+ num = (val & ddn->num_mask) >> ddn->num_shift;
+ den = (val & ddn->den_mask) >> ddn->den_shift;
+
+ return ccu_ddn_calc_rate(prate, num, den, ddn->pre_div);
+}
+
+static int ccu_ddn_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate)
+{
+ struct ccu_ddn *ddn = hw_to_ccu_ddn(hw);
+ unsigned long num, den;
+
+ ccu_ddn_calc_best_rate(ddn, rate, prate, &num, &den);
+
+ ccu_update(&ddn->common, ctrl,
+ ddn->num_mask | ddn->den_mask,
+ (num << ddn->num_shift) | (den << ddn->den_shift));
+
+ return 0;
+}
+
+const struct clk_ops spacemit_ccu_ddn_ops = {
+ .recalc_rate = ccu_ddn_recalc_rate,
+ .determine_rate = ccu_ddn_determine_rate,
+ .set_rate = ccu_ddn_set_rate,
+};
diff --git a/drivers/clk/spacemit/ccu_ddn.h b/drivers/clk/spacemit/ccu_ddn.h
new file mode 100644
index 000000000000..4838414a8e8d
--- /dev/null
+++ b/drivers/clk/spacemit/ccu_ddn.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024 SpacemiT Technology Co. Ltd
+ * Copyright (c) 2024-2025 Haylen Chu <heylenay@4d2.org>
+ */
+
+#ifndef _CCU_DDN_H_
+#define _CCU_DDN_H_
+
+#include <linux/bitops.h>
+#include <linux/clk-provider.h>
+
+#include "ccu_common.h"
+
+struct ccu_ddn {
+ struct ccu_common common;
+ unsigned int num_mask;
+ unsigned int num_shift;
+ unsigned int den_mask;
+ unsigned int den_shift;
+ unsigned int pre_div;
+};
+
+#define CCU_DDN_INIT(_name, _parent, _flags) \
+ CLK_HW_INIT_HW(#_name, &_parent.common.hw, &spacemit_ccu_ddn_ops, _flags)
+
+#define CCU_DDN_DEFINE(_name, _parent, _reg_ctrl, _num_shift, _num_width, \
+ _den_shift, _den_width, _pre_div, _flags) \
+static struct ccu_ddn _name = { \
+ .common = { \
+ .reg_ctrl = _reg_ctrl, \
+ .hw.init = CCU_DDN_INIT(_name, _parent, _flags), \
+ }, \
+ .num_mask = GENMASK(_num_shift + _num_width - 1, _num_shift), \
+ .num_shift = _num_shift, \
+ .den_mask = GENMASK(_den_shift + _den_width - 1, _den_shift), \
+ .den_shift = _den_shift, \
+ .pre_div = _pre_div, \
+}
+
+static inline struct ccu_ddn *hw_to_ccu_ddn(struct clk_hw *hw)
+{
+ struct ccu_common *common = hw_to_ccu_common(hw);
+
+ return container_of(common, struct ccu_ddn, common);
+}
+
+extern const struct clk_ops spacemit_ccu_ddn_ops;
+
+#endif
diff --git a/drivers/clk/spacemit/ccu_mix.c b/drivers/clk/spacemit/ccu_mix.c
new file mode 100644
index 000000000000..7b7990875372
--- /dev/null
+++ b/drivers/clk/spacemit/ccu_mix.c
@@ -0,0 +1,270 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024 SpacemiT Technology Co. Ltd
+ * Copyright (c) 2024-2025 Haylen Chu <heylenay@4d2.org>
+ *
+ * MIX clock type is the combination of mux, factor or divider, and gate
+ */
+
+#include <linux/clk-provider.h>
+
+#include "ccu_mix.h"
+
+#define MIX_FC_TIMEOUT_US 10000
+#define MIX_FC_DELAY_US 5
+
+static void ccu_gate_disable(struct clk_hw *hw)
+{
+ struct ccu_mix *mix = hw_to_ccu_mix(hw);
+
+ ccu_update(&mix->common, ctrl, mix->gate.mask, 0);
+}
+
+static int ccu_gate_enable(struct clk_hw *hw)
+{
+ struct ccu_mix *mix = hw_to_ccu_mix(hw);
+ struct ccu_gate_config *gate = &mix->gate;
+
+ ccu_update(&mix->common, ctrl, gate->mask, gate->mask);
+
+ return 0;
+}
+
+static int ccu_gate_is_enabled(struct clk_hw *hw)
+{
+ struct ccu_mix *mix = hw_to_ccu_mix(hw);
+ struct ccu_gate_config *gate = &mix->gate;
+
+ return (ccu_read(&mix->common, ctrl) & gate->mask) == gate->mask;
+}
+
+static unsigned long ccu_factor_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct ccu_mix *mix = hw_to_ccu_mix(hw);
+
+ return parent_rate * mix->factor.mul / mix->factor.div;
+}
+
+static unsigned long ccu_div_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct ccu_mix *mix = hw_to_ccu_mix(hw);
+ struct ccu_div_config *div = &mix->div;
+ unsigned long val;
+
+ val = ccu_read(&mix->common, ctrl) >> div->shift;
+ val &= (1 << div->width) - 1;
+
+ return divider_recalc_rate(hw, parent_rate, val, NULL, 0, div->width);
+}
+
+/*
+ * Some clocks require a "FC" (frequency change) bit to be set after changing
+ * their rates or reparenting. This bit will be automatically cleared by
+ * hardware in MIX_FC_TIMEOUT_US, which indicates the operation is completed.
+ */
+static int ccu_mix_trigger_fc(struct clk_hw *hw)
+{
+ struct ccu_common *common = hw_to_ccu_common(hw);
+ unsigned int val;
+
+ if (common->reg_fc)
+ return 0;
+
+ ccu_update(common, fc, common->mask_fc, common->mask_fc);
+
+ return regmap_read_poll_timeout_atomic(common->regmap, common->reg_fc,
+ val, !(val & common->mask_fc),
+ MIX_FC_DELAY_US,
+ MIX_FC_TIMEOUT_US);
+}
+
+static int ccu_factor_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ req->rate = ccu_factor_recalc_rate(hw, req->best_parent_rate);
+
+ return 0;
+}
+
+static int ccu_factor_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ return 0;
+}
+
+static unsigned long
+ccu_mix_calc_best_rate(struct clk_hw *hw, unsigned long rate,
+ struct clk_hw **best_parent,
+ unsigned long *best_parent_rate,
+ u32 *div_val)
+{
+ struct ccu_mix *mix = hw_to_ccu_mix(hw);
+ unsigned int parent_num = clk_hw_get_num_parents(hw);
+ struct ccu_div_config *div = &mix->div;
+ u32 div_max = 1 << div->width;
+ unsigned long best_rate = 0;
+
+ for (int i = 0; i < parent_num; i++) {
+ struct clk_hw *parent = clk_hw_get_parent_by_index(hw, i);
+ unsigned long parent_rate;
+
+ if (!parent)
+ continue;
+
+ parent_rate = clk_hw_get_rate(parent);
+
+ for (int j = 1; j <= div_max; j++) {
+ unsigned long tmp = DIV_ROUND_CLOSEST_ULL(parent_rate, j);
+
+ if (abs(tmp - rate) < abs(best_rate - rate)) {
+ best_rate = tmp;
+
+ if (div_val)
+ *div_val = j - 1;
+
+ if (best_parent) {
+ *best_parent = parent;
+ *best_parent_rate = parent_rate;
+ }
+ }
+ }
+ }
+
+ return best_rate;
+}
+
+static int ccu_mix_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ req->rate = ccu_mix_calc_best_rate(hw, req->rate,
+ &req->best_parent_hw,
+ &req->best_parent_rate,
+ NULL);
+ return 0;
+}
+
+static int ccu_mix_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct ccu_mix *mix = hw_to_ccu_mix(hw);
+ struct ccu_common *common = &mix->common;
+ struct ccu_div_config *div = &mix->div;
+ u32 current_div, target_div, mask;
+
+ ccu_mix_calc_best_rate(hw, rate, NULL, NULL, &target_div);
+
+ current_div = ccu_read(common, ctrl) >> div->shift;
+ current_div &= (1 << div->width) - 1;
+
+ if (current_div == target_div)
+ return 0;
+
+ mask = GENMASK(div->width + div->shift - 1, div->shift);
+
+ ccu_update(common, ctrl, mask, target_div << div->shift);
+
+ return ccu_mix_trigger_fc(hw);
+}
+
+static u8 ccu_mux_get_parent(struct clk_hw *hw)
+{
+ struct ccu_mix *mix = hw_to_ccu_mix(hw);
+ struct ccu_mux_config *mux = &mix->mux;
+ u8 parent;
+
+ parent = ccu_read(&mix->common, ctrl) >> mux->shift;
+ parent &= (1 << mux->width) - 1;
+
+ return parent;
+}
+
+static int ccu_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct ccu_mix *mix = hw_to_ccu_mix(hw);
+ struct ccu_mux_config *mux = &mix->mux;
+ u32 mask;
+
+ mask = GENMASK(mux->width + mux->shift - 1, mux->shift);
+
+ ccu_update(&mix->common, ctrl, mask, index << mux->shift);
+
+ return ccu_mix_trigger_fc(hw);
+}
+
+const struct clk_ops spacemit_ccu_gate_ops = {
+ .disable = ccu_gate_disable,
+ .enable = ccu_gate_enable,
+ .is_enabled = ccu_gate_is_enabled,
+};
+
+const struct clk_ops spacemit_ccu_factor_ops = {
+ .determine_rate = ccu_factor_determine_rate,
+ .recalc_rate = ccu_factor_recalc_rate,
+ .set_rate = ccu_factor_set_rate,
+};
+
+const struct clk_ops spacemit_ccu_mux_ops = {
+ .determine_rate = ccu_mix_determine_rate,
+ .get_parent = ccu_mux_get_parent,
+ .set_parent = ccu_mux_set_parent,
+};
+
+const struct clk_ops spacemit_ccu_div_ops = {
+ .determine_rate = ccu_mix_determine_rate,
+ .recalc_rate = ccu_div_recalc_rate,
+ .set_rate = ccu_mix_set_rate,
+};
+
+const struct clk_ops spacemit_ccu_factor_gate_ops = {
+ .disable = ccu_gate_disable,
+ .enable = ccu_gate_enable,
+ .is_enabled = ccu_gate_is_enabled,
+
+ .determine_rate = ccu_factor_determine_rate,
+ .recalc_rate = ccu_factor_recalc_rate,
+ .set_rate = ccu_factor_set_rate,
+};
+
+const struct clk_ops spacemit_ccu_mux_gate_ops = {
+ .disable = ccu_gate_disable,
+ .enable = ccu_gate_enable,
+ .is_enabled = ccu_gate_is_enabled,
+
+ .determine_rate = ccu_mix_determine_rate,
+ .get_parent = ccu_mux_get_parent,
+ .set_parent = ccu_mux_set_parent,
+};
+
+const struct clk_ops spacemit_ccu_div_gate_ops = {
+ .disable = ccu_gate_disable,
+ .enable = ccu_gate_enable,
+ .is_enabled = ccu_gate_is_enabled,
+
+ .determine_rate = ccu_mix_determine_rate,
+ .recalc_rate = ccu_div_recalc_rate,
+ .set_rate = ccu_mix_set_rate,
+};
+
+const struct clk_ops spacemit_ccu_mux_div_gate_ops = {
+ .disable = ccu_gate_disable,
+ .enable = ccu_gate_enable,
+ .is_enabled = ccu_gate_is_enabled,
+
+ .get_parent = ccu_mux_get_parent,
+ .set_parent = ccu_mux_set_parent,
+
+ .determine_rate = ccu_mix_determine_rate,
+ .recalc_rate = ccu_div_recalc_rate,
+ .set_rate = ccu_mix_set_rate,
+};
+
+const struct clk_ops spacemit_ccu_mux_div_ops = {
+ .get_parent = ccu_mux_get_parent,
+ .set_parent = ccu_mux_set_parent,
+
+ .determine_rate = ccu_mix_determine_rate,
+ .recalc_rate = ccu_div_recalc_rate,
+ .set_rate = ccu_mix_set_rate,
+};
diff --git a/drivers/clk/spacemit/ccu_mix.h b/drivers/clk/spacemit/ccu_mix.h
new file mode 100644
index 000000000000..54d40cd39b27
--- /dev/null
+++ b/drivers/clk/spacemit/ccu_mix.h
@@ -0,0 +1,223 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024 SpacemiT Technology Co. Ltd
+ * Copyright (c) 2024-2025 Haylen Chu <heylenay@4d2.org>
+ */
+
+#ifndef _CCU_MIX_H_
+#define _CCU_MIX_H_
+
+#include <linux/clk-provider.h>
+
+#include "ccu_common.h"
+
+/**
+ * struct ccu_gate_config - Gate configuration
+ *
+ * @mask: Mask to enable the gate. Some clocks may have more than one bit
+ * set in this field.
+ */
+struct ccu_gate_config {
+ u32 mask;
+};
+
+struct ccu_factor_config {
+ u32 div;
+ u32 mul;
+};
+
+struct ccu_mux_config {
+ u8 shift;
+ u8 width;
+};
+
+struct ccu_div_config {
+ u8 shift;
+ u8 width;
+};
+
+struct ccu_mix {
+ struct ccu_factor_config factor;
+ struct ccu_gate_config gate;
+ struct ccu_div_config div;
+ struct ccu_mux_config mux;
+ struct ccu_common common;
+};
+
+#define CCU_GATE_INIT(_mask) { .mask = _mask }
+#define CCU_FACTOR_INIT(_div, _mul) { .div = _div, .mul = _mul }
+#define CCU_MUX_INIT(_shift, _width) { .shift = _shift, .width = _width }
+#define CCU_DIV_INIT(_shift, _width) { .shift = _shift, .width = _width }
+
+#define CCU_PARENT_HW(_parent) { .hw = &_parent.common.hw }
+#define CCU_PARENT_NAME(_name) { .fw_name = #_name }
+
+#define CCU_MIX_INITHW(_name, _parent, _ops, _flags) \
+ .hw.init = &(struct clk_init_data) { \
+ .flags = _flags, \
+ .name = #_name, \
+ .parent_data = (const struct clk_parent_data[]) \
+ { _parent }, \
+ .num_parents = 1, \
+ .ops = &_ops, \
+ }
+
+#define CCU_MIX_INITHW_PARENTS(_name, _parents, _ops, _flags) \
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(#_name, _parents, &_ops, _flags)
+
+#define CCU_GATE_DEFINE(_name, _parent, _reg_ctrl, _mask_gate, _flags) \
+static struct ccu_mix _name = { \
+ .gate = CCU_GATE_INIT(_mask_gate), \
+ .common = { \
+ .reg_ctrl = _reg_ctrl, \
+ CCU_MIX_INITHW(_name, _parent, spacemit_ccu_gate_ops, _flags), \
+ } \
+}
+
+#define CCU_FACTOR_DEFINE(_name, _parent, _div, _mul) \
+static struct ccu_mix _name = { \
+ .factor = CCU_FACTOR_INIT(_div, _mul), \
+ .common = { \
+ CCU_MIX_INITHW(_name, _parent, spacemit_ccu_factor_ops, 0), \
+ } \
+}
+
+#define CCU_MUX_DEFINE(_name, _parents, _reg_ctrl, _shift, _width, _flags) \
+static struct ccu_mix _name = { \
+ .mux = CCU_MUX_INIT(_shift, _width), \
+ .common = { \
+ .reg_ctrl = _reg_ctrl, \
+ CCU_MIX_INITHW_PARENTS(_name, _parents, spacemit_ccu_mux_ops, \
+ _flags), \
+ } \
+}
+
+#define CCU_DIV_DEFINE(_name, _parent, _reg_ctrl, _shift, _width, _flags) \
+static struct ccu_mix _name = { \
+ .div = CCU_DIV_INIT(_shift, _width), \
+ .common = { \
+ .reg_ctrl = _reg_ctrl, \
+ CCU_MIX_INITHW(_name, _parent, spacemit_ccu_div_ops, _flags) \
+ } \
+}
+
+#define CCU_FACTOR_GATE_FLAGS_DEFINE(_name, _parent, _reg_ctrl, _mask_gate, _div, \
+ _mul, _flags) \
+static struct ccu_mix _name = { \
+ .gate = CCU_GATE_INIT(_mask_gate), \
+ .factor = CCU_FACTOR_INIT(_div, _mul), \
+ .common = { \
+ .reg_ctrl = _reg_ctrl, \
+ CCU_MIX_INITHW(_name, _parent, spacemit_ccu_factor_gate_ops, _flags) \
+ } \
+}
+
+#define CCU_FACTOR_GATE_DEFINE(_name, _parent, _reg_ctrl, _mask_gate, _div, \
+ _mul) \
+ CCU_FACTOR_GATE_FLAGS_DEFINE(_name, _parent, _reg_ctrl, _mask_gate, _div, \
+ _mul, 0)
+
+#define CCU_MUX_GATE_DEFINE(_name, _parents, _reg_ctrl, _shift, _width, \
+ _mask_gate, _flags) \
+static struct ccu_mix _name = { \
+ .gate = CCU_GATE_INIT(_mask_gate), \
+ .mux = CCU_MUX_INIT(_shift, _width), \
+ .common = { \
+ .reg_ctrl = _reg_ctrl, \
+ CCU_MIX_INITHW_PARENTS(_name, _parents, \
+ spacemit_ccu_mux_gate_ops, _flags), \
+ } \
+}
+
+#define CCU_DIV_GATE_DEFINE(_name, _parent, _reg_ctrl, _shift, _width, \
+ _mask_gate, _flags) \
+static struct ccu_mix _name = { \
+ .gate = CCU_GATE_INIT(_mask_gate), \
+ .div = CCU_DIV_INIT(_shift, _width), \
+ .common = { \
+ .reg_ctrl = _reg_ctrl, \
+ CCU_MIX_INITHW(_name, _parent, spacemit_ccu_div_gate_ops, \
+ _flags), \
+ } \
+}
+
+#define CCU_MUX_DIV_GATE_DEFINE(_name, _parents, _reg_ctrl, _mshift, _mwidth, \
+ _muxshift, _muxwidth, _mask_gate, _flags) \
+static struct ccu_mix _name = { \
+ .gate = CCU_GATE_INIT(_mask_gate), \
+ .div = CCU_DIV_INIT(_mshift, _mwidth), \
+ .mux = CCU_MUX_INIT(_muxshift, _muxwidth), \
+ .common = { \
+ .reg_ctrl = _reg_ctrl, \
+ CCU_MIX_INITHW_PARENTS(_name, _parents, \
+ spacemit_ccu_mux_div_gate_ops, _flags), \
+ }, \
+}
+
+#define CCU_MUX_DIV_GATE_SPLIT_FC_DEFINE(_name, _parents, _reg_ctrl, _reg_fc, \
+ _mshift, _mwidth, _mask_fc, _muxshift, \
+ _muxwidth, _mask_gate, _flags) \
+static struct ccu_mix _name = { \
+ .gate = CCU_GATE_INIT(_mask_gate), \
+ .div = CCU_DIV_INIT(_mshift, _mwidth), \
+ .mux = CCU_MUX_INIT(_muxshift, _muxwidth), \
+ .common = { \
+ .reg_ctrl = _reg_ctrl, \
+ .reg_fc = _reg_fc, \
+ .mask_fc = _mask_fc, \
+ CCU_MIX_INITHW_PARENTS(_name, _parents, \
+ spacemit_ccu_mux_div_gate_ops, _flags), \
+ }, \
+}
+
+#define CCU_MUX_DIV_GATE_FC_DEFINE(_name, _parents, _reg_ctrl, _mshift, _mwidth,\
+ _mask_fc, _muxshift, _muxwidth, _mask_gate, \
+ _flags) \
+CCU_MUX_DIV_GATE_SPLIT_FC_DEFINE(_name, _parents, _reg_ctrl, _reg_ctrl, _mshift,\
+ _mwidth, _mask_fc, _muxshift, _muxwidth, \
+ _mask_gate, _flags)
+
+#define CCU_MUX_DIV_FC_DEFINE(_name, _parents, _reg_ctrl, _mshift, _mwidth, \
+ _mask_fc, _muxshift, _muxwidth, _flags) \
+static struct ccu_mix _name = { \
+ .div = CCU_DIV_INIT(_mshift, _mwidth), \
+ .mux = CCU_MUX_INIT(_muxshift, _muxwidth), \
+ .common = { \
+ .reg_ctrl = _reg_ctrl, \
+ .reg_fc = _reg_ctrl, \
+ .mask_fc = _mask_fc, \
+ CCU_MIX_INITHW_PARENTS(_name, _parents, \
+ spacemit_ccu_mux_div_ops, _flags), \
+ }, \
+}
+
+#define CCU_MUX_FC_DEFINE(_name, _parents, _reg_ctrl, _mask_fc, _muxshift, \
+ _muxwidth, _flags) \
+static struct ccu_mix _name = { \
+ .mux = CCU_MUX_INIT(_muxshift, _muxwidth), \
+ .common = { \
+ .reg_ctrl = _reg_ctrl, \
+ .reg_fc = _reg_ctrl, \
+ .mask_fc = _mask_fc, \
+ CCU_MIX_INITHW_PARENTS(_name, _parents, spacemit_ccu_mux_ops, \
+ _flags) \
+ }, \
+}
+
+static inline struct ccu_mix *hw_to_ccu_mix(struct clk_hw *hw)
+{
+ struct ccu_common *common = hw_to_ccu_common(hw);
+
+ return container_of(common, struct ccu_mix, common);
+}
+
+extern const struct clk_ops spacemit_ccu_gate_ops;
+extern const struct clk_ops spacemit_ccu_factor_ops;
+extern const struct clk_ops spacemit_ccu_mux_ops;
+extern const struct clk_ops spacemit_ccu_div_ops;
+extern const struct clk_ops spacemit_ccu_factor_gate_ops;
+extern const struct clk_ops spacemit_ccu_div_gate_ops;
+extern const struct clk_ops spacemit_ccu_mux_gate_ops;
+extern const struct clk_ops spacemit_ccu_mux_div_ops;
+extern const struct clk_ops spacemit_ccu_mux_div_gate_ops;
+#endif /* _CCU_DIV_H_ */
diff --git a/drivers/clk/spacemit/ccu_pll.c b/drivers/clk/spacemit/ccu_pll.c
new file mode 100644
index 000000000000..d92f0dae65a4
--- /dev/null
+++ b/drivers/clk/spacemit/ccu_pll.c
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024 SpacemiT Technology Co. Ltd
+ * Copyright (c) 2024-2025 Haylen Chu <heylenay@4d2.org>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/math.h>
+#include <linux/regmap.h>
+
+#include "ccu_common.h"
+#include "ccu_pll.h"
+
+#define PLL_TIMEOUT_US 3000
+#define PLL_DELAY_US 5
+
+#define PLL_SWCR3_EN ((u32)BIT(31))
+#define PLL_SWCR3_MASK GENMASK(30, 0)
+
+static const struct ccu_pll_rate_tbl *ccu_pll_lookup_best_rate(struct ccu_pll *pll,
+ unsigned long rate)
+{
+ struct ccu_pll_config *config = &pll->config;
+ const struct ccu_pll_rate_tbl *best_entry;
+ unsigned long best_delta = ULONG_MAX;
+ int i;
+
+ for (i = 0; i < config->tbl_num; i++) {
+ const struct ccu_pll_rate_tbl *entry = &config->rate_tbl[i];
+ unsigned long delta = abs_diff(entry->rate, rate);
+
+ if (delta < best_delta) {
+ best_delta = delta;
+ best_entry = entry;
+ }
+ }
+
+ return best_entry;
+}
+
+static const struct ccu_pll_rate_tbl *ccu_pll_lookup_matched_entry(struct ccu_pll *pll)
+{
+ struct ccu_pll_config *config = &pll->config;
+ u32 swcr1, swcr3;
+ int i;
+
+ swcr1 = ccu_read(&pll->common, swcr1);
+ swcr3 = ccu_read(&pll->common, swcr3);
+ swcr3 &= PLL_SWCR3_MASK;
+
+ for (i = 0; i < config->tbl_num; i++) {
+ const struct ccu_pll_rate_tbl *entry = &config->rate_tbl[i];
+
+ if (swcr1 == entry->swcr1 && swcr3 == entry->swcr3)
+ return entry;
+ }
+
+ return NULL;
+}
+
+static void ccu_pll_update_param(struct ccu_pll *pll, const struct ccu_pll_rate_tbl *entry)
+{
+ struct ccu_common *common = &pll->common;
+
+ regmap_write(common->regmap, common->reg_swcr1, entry->swcr1);
+ ccu_update(common, swcr3, PLL_SWCR3_MASK, entry->swcr3);
+}
+
+static int ccu_pll_is_enabled(struct clk_hw *hw)
+{
+ struct ccu_common *common = hw_to_ccu_common(hw);
+
+ return ccu_read(common, swcr3) & PLL_SWCR3_EN;
+}
+
+static int ccu_pll_enable(struct clk_hw *hw)
+{
+ struct ccu_pll *pll = hw_to_ccu_pll(hw);
+ struct ccu_common *common = &pll->common;
+ unsigned int tmp;
+
+ ccu_update(common, swcr3, PLL_SWCR3_EN, PLL_SWCR3_EN);
+
+ /* check lock status */
+ return regmap_read_poll_timeout_atomic(common->lock_regmap,
+ pll->config.reg_lock,
+ tmp,
+ tmp & pll->config.mask_lock,
+ PLL_DELAY_US, PLL_TIMEOUT_US);
+}
+
+static void ccu_pll_disable(struct clk_hw *hw)
+{
+ struct ccu_common *common = hw_to_ccu_common(hw);
+
+ ccu_update(common, swcr3, PLL_SWCR3_EN, 0);
+}
+
+/*
+ * PLLs must be gated before changing rate, which is ensured by
+ * flag CLK_SET_RATE_GATE.
+ */
+static int ccu_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct ccu_pll *pll = hw_to_ccu_pll(hw);
+ const struct ccu_pll_rate_tbl *entry;
+
+ entry = ccu_pll_lookup_best_rate(pll, rate);
+ ccu_pll_update_param(pll, entry);
+
+ return 0;
+}
+
+static unsigned long ccu_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct ccu_pll *pll = hw_to_ccu_pll(hw);
+ const struct ccu_pll_rate_tbl *entry;
+
+ entry = ccu_pll_lookup_matched_entry(pll);
+
+ WARN_ON_ONCE(!entry);
+
+ return entry ? entry->rate : 0;
+}
+
+static int ccu_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct ccu_pll *pll = hw_to_ccu_pll(hw);
+
+ req->rate = ccu_pll_lookup_best_rate(pll, req->rate)->rate;
+
+ return 0;
+}
+
+static int ccu_pll_init(struct clk_hw *hw)
+{
+ struct ccu_pll *pll = hw_to_ccu_pll(hw);
+
+ if (ccu_pll_lookup_matched_entry(pll))
+ return 0;
+
+ ccu_pll_disable(hw);
+ ccu_pll_update_param(pll, &pll->config.rate_tbl[0]);
+
+ return 0;
+}
+
+const struct clk_ops spacemit_ccu_pll_ops = {
+ .init = ccu_pll_init,
+ .enable = ccu_pll_enable,
+ .disable = ccu_pll_disable,
+ .set_rate = ccu_pll_set_rate,
+ .recalc_rate = ccu_pll_recalc_rate,
+ .determine_rate = ccu_pll_determine_rate,
+ .is_enabled = ccu_pll_is_enabled,
+};
diff --git a/drivers/clk/spacemit/ccu_pll.h b/drivers/clk/spacemit/ccu_pll.h
new file mode 100644
index 000000000000..0592f4c3068c
--- /dev/null
+++ b/drivers/clk/spacemit/ccu_pll.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024 SpacemiT Technology Co. Ltd
+ * Copyright (c) 2024-2025 Haylen Chu <heylenay@4d2.org>
+ */
+
+#ifndef _CCU_PLL_H_
+#define _CCU_PLL_H_
+
+#include <linux/clk-provider.h>
+
+#include "ccu_common.h"
+
+/**
+ * struct ccu_pll_rate_tbl - Structure mapping between PLL rate and register
+ * configuration.
+ *
+ * @rate: PLL rate
+ * @swcr1: Register value of PLLX_SW1_CTRL (PLLx_SWCR1).
+ * @swcr3: Register value of the PLLx_SW3_CTRL's lowest 31 bits of
+ * PLLx_SW3_CTRL (PLLx_SWCR3). This highest bit is for enabling
+ * the PLL and not contained in this field.
+ */
+struct ccu_pll_rate_tbl {
+ unsigned long rate;
+ u32 swcr1;
+ u32 swcr3;
+};
+
+struct ccu_pll_config {
+ const struct ccu_pll_rate_tbl *rate_tbl;
+ u32 tbl_num;
+ u32 reg_lock;
+ u32 mask_lock;
+};
+
+#define CCU_PLL_RATE(_rate, _swcr1, _swcr3) \
+ { \
+ .rate = _rate, \
+ .swcr1 = _swcr1, \
+ .swcr3 = _swcr3, \
+ }
+
+struct ccu_pll {
+ struct ccu_common common;
+ struct ccu_pll_config config;
+};
+
+#define CCU_PLL_CONFIG(_table, _reg_lock, _mask_lock) \
+ { \
+ .rate_tbl = _table, \
+ .tbl_num = ARRAY_SIZE(_table), \
+ .reg_lock = (_reg_lock), \
+ .mask_lock = (_mask_lock), \
+ }
+
+#define CCU_PLL_HWINIT(_name, _flags) \
+ (&(struct clk_init_data) { \
+ .name = #_name, \
+ .ops = &spacemit_ccu_pll_ops, \
+ .parent_data = &(struct clk_parent_data) { .index = 0 }, \
+ .num_parents = 1, \
+ .flags = _flags, \
+ })
+
+#define CCU_PLL_DEFINE(_name, _table, _reg_swcr1, _reg_swcr3, _reg_lock, \
+ _mask_lock, _flags) \
+static struct ccu_pll _name = { \
+ .config = CCU_PLL_CONFIG(_table, _reg_lock, _mask_lock), \
+ .common = { \
+ .reg_swcr1 = _reg_swcr1, \
+ .reg_swcr3 = _reg_swcr3, \
+ .hw.init = CCU_PLL_HWINIT(_name, _flags) \
+ } \
+}
+
+static inline struct ccu_pll *hw_to_ccu_pll(struct clk_hw *hw)
+{
+ struct ccu_common *common = hw_to_ccu_common(hw);
+
+ return container_of(common, struct ccu_pll, common);
+}
+
+extern const struct clk_ops spacemit_ccu_pll_ops;
+
+#endif
diff --git a/drivers/clk/spear/clk-aux-synth.c b/drivers/clk/spear/clk-aux-synth.c
index 637938e804f8..d0d063147af8 100644
--- a/drivers/clk/spear/clk-aux-synth.c
+++ b/drivers/clk/spear/clk-aux-synth.c
@@ -49,14 +49,16 @@ static unsigned long aux_calc_rate(struct clk_hw *hw, unsigned long prate,
(rtbl[index].yscale * eq)) * 10000;
}
-static long clk_aux_round_rate(struct clk_hw *hw, unsigned long drate,
- unsigned long *prate)
+static int clk_aux_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_aux *aux = to_clk_aux(hw);
int unused;
- return clk_round_rate_index(hw, drate, *prate, aux_calc_rate,
- aux->rtbl_cnt, &unused);
+ req->rate = clk_round_rate_index(hw, req->rate, req->best_parent_rate,
+ aux_calc_rate, aux->rtbl_cnt, &unused);
+
+ return 0;
}
static unsigned long clk_aux_recalc_rate(struct clk_hw *hw,
@@ -127,7 +129,7 @@ static int clk_aux_set_rate(struct clk_hw *hw, unsigned long drate,
static const struct clk_ops clk_aux_ops = {
.recalc_rate = clk_aux_recalc_rate,
- .round_rate = clk_aux_round_rate,
+ .determine_rate = clk_aux_determine_rate,
.set_rate = clk_aux_set_rate,
};
diff --git a/drivers/clk/spear/clk-frac-synth.c b/drivers/clk/spear/clk-frac-synth.c
index 2380df293a2c..150f051d28e0 100644
--- a/drivers/clk/spear/clk-frac-synth.c
+++ b/drivers/clk/spear/clk-frac-synth.c
@@ -52,14 +52,16 @@ static unsigned long frac_calc_rate(struct clk_hw *hw, unsigned long prate,
return prate;
}
-static long clk_frac_round_rate(struct clk_hw *hw, unsigned long drate,
- unsigned long *prate)
+static int clk_frac_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_frac *frac = to_clk_frac(hw);
int unused;
- return clk_round_rate_index(hw, drate, *prate, frac_calc_rate,
- frac->rtbl_cnt, &unused);
+ req->rate = clk_round_rate_index(hw, req->rate, req->best_parent_rate,
+ frac_calc_rate, frac->rtbl_cnt, &unused);
+
+ return 0;
}
static unsigned long clk_frac_recalc_rate(struct clk_hw *hw,
@@ -115,7 +117,7 @@ static int clk_frac_set_rate(struct clk_hw *hw, unsigned long drate,
static const struct clk_ops clk_frac_ops = {
.recalc_rate = clk_frac_recalc_rate,
- .round_rate = clk_frac_round_rate,
+ .determine_rate = clk_frac_determine_rate,
.set_rate = clk_frac_set_rate,
};
diff --git a/drivers/clk/spear/clk-gpt-synth.c b/drivers/clk/spear/clk-gpt-synth.c
index 4ef747c2abbb..cf9659dc9073 100644
--- a/drivers/clk/spear/clk-gpt-synth.c
+++ b/drivers/clk/spear/clk-gpt-synth.c
@@ -39,14 +39,16 @@ static unsigned long gpt_calc_rate(struct clk_hw *hw, unsigned long prate,
return prate;
}
-static long clk_gpt_round_rate(struct clk_hw *hw, unsigned long drate,
- unsigned long *prate)
+static int clk_gpt_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_gpt *gpt = to_clk_gpt(hw);
int unused;
- return clk_round_rate_index(hw, drate, *prate, gpt_calc_rate,
- gpt->rtbl_cnt, &unused);
+ req->rate = clk_round_rate_index(hw, req->rate, req->best_parent_rate,
+ gpt_calc_rate, gpt->rtbl_cnt, &unused);
+
+ return 0;
}
static unsigned long clk_gpt_recalc_rate(struct clk_hw *hw,
@@ -104,7 +106,7 @@ static int clk_gpt_set_rate(struct clk_hw *hw, unsigned long drate,
static const struct clk_ops clk_gpt_ops = {
.recalc_rate = clk_gpt_recalc_rate,
- .round_rate = clk_gpt_round_rate,
+ .determine_rate = clk_gpt_determine_rate,
.set_rate = clk_gpt_set_rate,
};
diff --git a/drivers/clk/spear/clk-vco-pll.c b/drivers/clk/spear/clk-vco-pll.c
index 348eeab0a906..723a6eb67754 100644
--- a/drivers/clk/spear/clk-vco-pll.c
+++ b/drivers/clk/spear/clk-vco-pll.c
@@ -110,12 +110,15 @@ static long clk_pll_round_rate_index(struct clk_hw *hw, unsigned long drate,
return rate;
}
-static long clk_pll_round_rate(struct clk_hw *hw, unsigned long drate,
- unsigned long *prate)
+static int clk_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
int unused;
- return clk_pll_round_rate_index(hw, drate, prate, &unused);
+ req->rate = clk_pll_round_rate_index(hw, req->rate,
+ &req->best_parent_rate, &unused);
+
+ return 0;
}
static unsigned long clk_pll_recalc_rate(struct clk_hw *hw, unsigned long
@@ -164,7 +167,7 @@ static int clk_pll_set_rate(struct clk_hw *hw, unsigned long drate,
static const struct clk_ops clk_pll_ops = {
.recalc_rate = clk_pll_recalc_rate,
- .round_rate = clk_pll_round_rate,
+ .determine_rate = clk_pll_determine_rate,
.set_rate = clk_pll_set_rate,
};
@@ -176,14 +179,16 @@ static inline unsigned long vco_calc_rate(struct clk_hw *hw,
return pll_calc_rate(vco->rtbl, prate, index, NULL);
}
-static long clk_vco_round_rate(struct clk_hw *hw, unsigned long drate,
- unsigned long *prate)
+static int clk_vco_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_vco *vco = to_clk_vco(hw);
int unused;
- return clk_round_rate_index(hw, drate, *prate, vco_calc_rate,
- vco->rtbl_cnt, &unused);
+ req->rate = clk_round_rate_index(hw, req->rate, req->best_parent_rate,
+ vco_calc_rate, vco->rtbl_cnt, &unused);
+
+ return 0;
}
static unsigned long clk_vco_recalc_rate(struct clk_hw *hw,
@@ -265,7 +270,7 @@ static int clk_vco_set_rate(struct clk_hw *hw, unsigned long drate,
static const struct clk_ops clk_vco_ops = {
.recalc_rate = clk_vco_recalc_rate,
- .round_rate = clk_vco_round_rate,
+ .determine_rate = clk_vco_determine_rate,
.set_rate = clk_vco_set_rate,
};
diff --git a/drivers/clk/spear/spear1340_clock.c b/drivers/clk/spear/spear1340_clock.c
index 361d344bfaf0..fdfb26c67188 100644
--- a/drivers/clk/spear/spear1340_clock.c
+++ b/drivers/clk/spear/spear1340_clock.c
@@ -199,7 +199,7 @@ static struct frac_rate_tbl amba_synth_rtbl[] = {
* We can program this synthesizer to make cpu run on different clock
* frequencies.
* Following table provides configuration values to let cpu run on 200,
- * 250, 332, 400 or 500 MHz considering different possibilites of input
+ * 250, 332, 400 or 500 MHz considering different possibilities of input
* (vco1div2) clock.
*
* --------------------------------------------------------------------
diff --git a/drivers/clk/sprd/div.c b/drivers/clk/sprd/div.c
index 936782c24127..013423881968 100644
--- a/drivers/clk/sprd/div.c
+++ b/drivers/clk/sprd/div.c
@@ -9,13 +9,16 @@
#include "div.h"
-static long sprd_div_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int sprd_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct sprd_div *cd = hw_to_sprd_div(hw);
- return divider_round_rate(&cd->common.hw, rate, parent_rate, NULL,
- cd->div.width, 0);
+ req->rate = divider_round_rate(&cd->common.hw, req->rate,
+ &req->best_parent_rate,
+ NULL, cd->div.width, 0);
+
+ return 0;
}
unsigned long sprd_div_helper_recalc_rate(struct sprd_clk_common *common,
@@ -75,7 +78,7 @@ static int sprd_div_set_rate(struct clk_hw *hw, unsigned long rate,
const struct clk_ops sprd_div_ops = {
.recalc_rate = sprd_div_recalc_rate,
- .round_rate = sprd_div_round_rate,
+ .determine_rate = sprd_div_determine_rate,
.set_rate = sprd_div_set_rate,
};
EXPORT_SYMBOL_GPL(sprd_div_ops);
diff --git a/drivers/clk/sprd/gate.h b/drivers/clk/sprd/gate.h
index e738dafa4fe9..775519eb1cb6 100644
--- a/drivers/clk/sprd/gate.h
+++ b/drivers/clk/sprd/gate.h
@@ -26,7 +26,7 @@ struct sprd_gate {
* CLK_GATE_BIG_ENDIAN BIT(2)
* so we define new flags from BIT(3)
*/
-#define SPRD_GATE_NON_AON BIT(3) /* not alway powered on, check before read */
+#define SPRD_GATE_NON_AON BIT(3) /* not always powered on, check before read */
#define SPRD_SC_GATE_CLK_HW_INIT_FN(_struct, _name, _parent, _reg, \
_sc_offset, _enable_mask, _flags, \
diff --git a/drivers/clk/sprd/pll.c b/drivers/clk/sprd/pll.c
index 13a322b2535a..bc6610d5fcb7 100644
--- a/drivers/clk/sprd/pll.c
+++ b/drivers/clk/sprd/pll.c
@@ -254,16 +254,16 @@ static int sprd_pll_clk_prepare(struct clk_hw *hw)
return 0;
}
-static long sprd_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int sprd_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- return rate;
+ return 0;
}
const struct clk_ops sprd_pll_ops = {
.prepare = sprd_pll_clk_prepare,
.recalc_rate = sprd_pll_recalc_rate,
- .round_rate = sprd_pll_round_rate,
+ .determine_rate = sprd_pll_determine_rate,
.set_rate = sprd_pll_set_rate,
};
EXPORT_SYMBOL_GPL(sprd_pll_ops);
diff --git a/drivers/clk/sprd/ums512-clk.c b/drivers/clk/sprd/ums512-clk.c
index 9384ecc6c741..f763d83de9ee 100644
--- a/drivers/clk/sprd/ums512-clk.c
+++ b/drivers/clk/sprd/ums512-clk.c
@@ -1550,7 +1550,7 @@ static struct sprd_clk_desc ums512_aon_gate_desc = {
/* audcp apb gates */
/* Audcp apb clocks configure CLK_IGNORE_UNUSED because these clocks may be
- * controlled by audcp sys at the same time. It may be cause an execption if
+ * controlled by audcp sys at the same time. It may cause an exception if
* kernel gates these clock.
*/
static SPRD_SC_GATE_CLK_HW(audcp_wdg_eb, "audcp-wdg-eb",
@@ -1592,7 +1592,7 @@ static const struct sprd_clk_desc ums512_audcpapb_gate_desc = {
/* audcp ahb gates */
/* Audcp aphb clocks configure CLK_IGNORE_UNUSED because these clocks may be
- * controlled by audcp sys at the same time. It may be cause an execption if
+ * controlled by audcp sys at the same time. It may cause an exception if
* kernel gates these clock.
*/
static SPRD_SC_GATE_CLK_HW(audcp_iis0_eb, "audcp-iis0-eb",
diff --git a/drivers/clk/st/clk-flexgen.c b/drivers/clk/st/clk-flexgen.c
index 5292208c4dd8..e8e7626c76db 100644
--- a/drivers/clk/st/clk-flexgen.c
+++ b/drivers/clk/st/clk-flexgen.c
@@ -303,16 +303,6 @@ static const struct clkgen_data clkgen_video = {
.mode = 1,
};
-static const struct clkgen_clk_out clkgen_stih407_a0_clk_out[] = {
- /* This clk needs to be on so that memory interface is accessible */
- { .name = "clk-ic-lmi0", .flags = CLK_IS_CRITICAL },
-};
-
-static const struct clkgen_data clkgen_stih407_a0 = {
- .outputs = clkgen_stih407_a0_clk_out,
- .outputs_nb = ARRAY_SIZE(clkgen_stih407_a0_clk_out),
-};
-
static const struct clkgen_clk_out clkgen_stih410_a0_clk_out[] = {
/* Those clks need to be on so that memory interface is accessible */
{ .name = "clk-ic-lmi0", .flags = CLK_IS_CRITICAL },
@@ -324,51 +314,6 @@ static const struct clkgen_data clkgen_stih410_a0 = {
.outputs_nb = ARRAY_SIZE(clkgen_stih410_a0_clk_out),
};
-static const struct clkgen_clk_out clkgen_stih407_c0_clk_out[] = {
- { .name = "clk-icn-gpu", },
- { .name = "clk-fdma", },
- { .name = "clk-nand", },
- { .name = "clk-hva", },
- { .name = "clk-proc-stfe", },
- { .name = "clk-proc-tp", },
- { .name = "clk-rx-icn-dmu", },
- { .name = "clk-rx-icn-hva", },
- /* This clk needs to be on to keep bus interconnect alive */
- { .name = "clk-icn-cpu", .flags = CLK_IS_CRITICAL },
- /* This clk needs to be on to keep bus interconnect alive */
- { .name = "clk-tx-icn-dmu", .flags = CLK_IS_CRITICAL },
- { .name = "clk-mmc-0", },
- { .name = "clk-mmc-1", },
- { .name = "clk-jpegdec", },
- /* This clk needs to be on to keep A9 running */
- { .name = "clk-ext2fa9", .flags = CLK_IS_CRITICAL },
- { .name = "clk-ic-bdisp-0", },
- { .name = "clk-ic-bdisp-1", },
- { .name = "clk-pp-dmu", },
- { .name = "clk-vid-dmu", },
- { .name = "clk-dss-lpc", },
- { .name = "clk-st231-aud-0", },
- { .name = "clk-st231-gp-1", },
- { .name = "clk-st231-dmu", },
- /* This clk needs to be on to keep bus interconnect alive */
- { .name = "clk-icn-lmi", .flags = CLK_IS_CRITICAL },
- { .name = "clk-tx-icn-disp-1", },
- /* This clk needs to be on to keep bus interconnect alive */
- { .name = "clk-icn-sbc", .flags = CLK_IS_CRITICAL },
- { .name = "clk-stfe-frc2", },
- { .name = "clk-eth-phy", },
- { .name = "clk-eth-ref-phyclk", },
- { .name = "clk-flash-promip", },
- { .name = "clk-main-disp", },
- { .name = "clk-aux-disp", },
- { .name = "clk-compo-dvp", },
-};
-
-static const struct clkgen_data clkgen_stih407_c0 = {
- .outputs = clkgen_stih407_c0_clk_out,
- .outputs_nb = ARRAY_SIZE(clkgen_stih407_c0_clk_out),
-};
-
static const struct clkgen_clk_out clkgen_stih410_c0_clk_out[] = {
{ .name = "clk-icn-gpu", },
{ .name = "clk-fdma", },
@@ -482,19 +427,6 @@ static const struct clkgen_data clkgen_stih418_c0 = {
.outputs_nb = ARRAY_SIZE(clkgen_stih418_c0_clk_out),
};
-static const struct clkgen_clk_out clkgen_stih407_d0_clk_out[] = {
- { .name = "clk-pcm-0", },
- { .name = "clk-pcm-1", },
- { .name = "clk-pcm-2", },
- { .name = "clk-spdiff", },
-};
-
-static const struct clkgen_data clkgen_stih407_d0 = {
- .flags = CLK_SET_RATE_PARENT,
- .outputs = clkgen_stih407_d0_clk_out,
- .outputs_nb = ARRAY_SIZE(clkgen_stih407_d0_clk_out),
-};
-
static const struct clkgen_clk_out clkgen_stih410_d0_clk_out[] = {
{ .name = "clk-pcm-0", },
{ .name = "clk-pcm-1", },
@@ -597,18 +529,10 @@ static const struct of_device_id flexgen_of_match[] = {
.data = &clkgen_video,
},
{
- .compatible = "st,flexgen-stih407-a0",
- .data = &clkgen_stih407_a0,
- },
- {
.compatible = "st,flexgen-stih410-a0",
.data = &clkgen_stih410_a0,
},
{
- .compatible = "st,flexgen-stih407-c0",
- .data = &clkgen_stih407_c0,
- },
- {
.compatible = "st,flexgen-stih410-c0",
.data = &clkgen_stih410_c0,
},
@@ -617,10 +541,6 @@ static const struct of_device_id flexgen_of_match[] = {
.data = &clkgen_stih418_c0,
},
{
- .compatible = "st,flexgen-stih407-d0",
- .data = &clkgen_stih407_d0,
- },
- {
.compatible = "st,flexgen-stih410-d0",
.data = &clkgen_stih410_d0,
},
diff --git a/drivers/clk/st/clkgen-fsyn.c b/drivers/clk/st/clkgen-fsyn.c
index 40df1db102a7..e06e7e5cc1a5 100644
--- a/drivers/clk/st/clkgen-fsyn.c
+++ b/drivers/clk/st/clkgen-fsyn.c
@@ -375,22 +375,21 @@ static int clk_fs660c32_vco_get_params(unsigned long input,
return 0;
}
-static long quadfs_pll_fs660c32_round_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long *prate)
+static int quadfs_pll_fs660c32_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct stm_fs params;
- if (clk_fs660c32_vco_get_params(*prate, rate, &params))
- return rate;
+ if (clk_fs660c32_vco_get_params(req->best_parent_rate, req->rate, &params))
+ return 0;
- clk_fs660c32_vco_get_rate(*prate, &params, &rate);
+ clk_fs660c32_vco_get_rate(req->best_parent_rate, &params, &req->rate);
pr_debug("%s: %s new rate %ld [ndiv=%u]\n",
__func__, clk_hw_get_name(hw),
- rate, (unsigned int)params.ndiv);
+ req->rate, (unsigned int)params.ndiv);
- return rate;
+ return 0;
}
static int quadfs_pll_fs660c32_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -436,7 +435,7 @@ static const struct clk_ops st_quadfs_pll_c32_ops = {
.disable = quadfs_pll_disable,
.is_enabled = quadfs_pll_is_enabled,
.recalc_rate = quadfs_pll_fs660c32_recalc_rate,
- .round_rate = quadfs_pll_fs660c32_round_rate,
+ .determine_rate = quadfs_pll_fs660c32_determine_rate,
.set_rate = quadfs_pll_fs660c32_set_rate,
};
@@ -814,19 +813,21 @@ static unsigned long quadfs_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long quadfs_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int quadfs_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct stm_fs params;
- rate = quadfs_find_best_rate(hw, rate, *prate, &params);
+ req->rate = quadfs_find_best_rate(hw, req->rate,
+ req->best_parent_rate, &params);
pr_debug("%s: %s new rate %ld [sdiv=0x%x,md=0x%x,pe=0x%x,nsdiv3=%u]\n",
__func__, clk_hw_get_name(hw),
- rate, (unsigned int)params.sdiv, (unsigned int)params.mdiv,
- (unsigned int)params.pe, (unsigned int)params.nsdiv);
+ req->rate, (unsigned int)params.sdiv,
+ (unsigned int)params.mdiv,
+ (unsigned int)params.pe, (unsigned int)params.nsdiv);
- return rate;
+ return 0;
}
@@ -873,7 +874,7 @@ static const struct clk_ops st_quadfs_ops = {
.enable = quadfs_fsynth_enable,
.disable = quadfs_fsynth_disable,
.is_enabled = quadfs_fsynth_is_enabled,
- .round_rate = quadfs_round_rate,
+ .determine_rate = quadfs_determine_rate,
.set_rate = quadfs_set_rate,
.recalc_rate = quadfs_recalc_rate,
};
diff --git a/drivers/clk/st/clkgen-pll.c b/drivers/clk/st/clkgen-pll.c
index b36e4d803636..c258ff87a171 100644
--- a/drivers/clk/st/clkgen-pll.c
+++ b/drivers/clk/st/clkgen-pll.c
@@ -395,25 +395,28 @@ static unsigned long recalc_stm_pll3200c32(struct clk_hw *hw,
return rate;
}
-static long round_rate_stm_pll3200c32(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int stm_pll3200c32_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct stm_pll params;
- if (!clk_pll3200c32_get_params(*prate, rate, &params))
- clk_pll3200c32_get_rate(*prate, &params, &rate);
+ if (!clk_pll3200c32_get_params(req->best_parent_rate, req->rate, &params))
+ clk_pll3200c32_get_rate(req->best_parent_rate, &params,
+ &req->rate);
else {
pr_debug("%s: %s rate %ld Invalid\n", __func__,
- __clk_get_name(hw->clk), rate);
+ __clk_get_name(hw->clk), req->rate);
+ req->rate = 0;
+
return 0;
}
pr_debug("%s: %s new rate %ld [ndiv=%u] [idf=%u]\n",
__func__, __clk_get_name(hw->clk),
- rate, (unsigned int)params.ndiv,
+ req->rate, (unsigned int)params.ndiv,
(unsigned int)params.idf);
- return rate;
+ return 0;
}
static int set_rate_stm_pll3200c32(struct clk_hw *hw, unsigned long rate,
@@ -549,25 +552,28 @@ static unsigned long recalc_stm_pll4600c28(struct clk_hw *hw,
return rate;
}
-static long round_rate_stm_pll4600c28(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int stm_pll4600c28_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct stm_pll params;
- if (!clk_pll4600c28_get_params(*prate, rate, &params)) {
- clk_pll4600c28_get_rate(*prate, &params, &rate);
+ if (!clk_pll4600c28_get_params(req->best_parent_rate, req->rate, &params)) {
+ clk_pll4600c28_get_rate(req->best_parent_rate, &params,
+ &req->rate);
} else {
pr_debug("%s: %s rate %ld Invalid\n", __func__,
- __clk_get_name(hw->clk), rate);
+ __clk_get_name(hw->clk), req->rate);
+ req->rate = 0;
+
return 0;
}
pr_debug("%s: %s new rate %ld [ndiv=%u] [idf=%u]\n",
__func__, __clk_get_name(hw->clk),
- rate, (unsigned int)params.ndiv,
+ req->rate, (unsigned int)params.ndiv,
(unsigned int)params.idf);
- return rate;
+ return 0;
}
static int set_rate_stm_pll4600c28(struct clk_hw *hw, unsigned long rate,
@@ -628,7 +634,7 @@ static const struct clk_ops stm_pll3200c32_a9_ops = {
.disable = clkgen_pll_disable,
.is_enabled = clkgen_pll_is_enabled,
.recalc_rate = recalc_stm_pll3200c32,
- .round_rate = round_rate_stm_pll3200c32,
+ .determine_rate = stm_pll3200c32_determine_rate,
.set_rate = set_rate_stm_pll3200c32,
};
@@ -637,7 +643,7 @@ static const struct clk_ops stm_pll4600c28_ops = {
.disable = clkgen_pll_disable,
.is_enabled = clkgen_pll_is_enabled,
.recalc_rate = recalc_stm_pll4600c28,
- .round_rate = round_rate_stm_pll4600c28,
+ .determine_rate = stm_pll4600c28_determine_rate,
.set_rate = set_rate_stm_pll4600c28,
};
diff --git a/drivers/clk/starfive/clk-starfive-jh7100-audio.c b/drivers/clk/starfive/clk-starfive-jh7100-audio.c
index 1fcf4e62f347..7de23f6749aa 100644
--- a/drivers/clk/starfive/clk-starfive-jh7100-audio.c
+++ b/drivers/clk/starfive/clk-starfive-jh7100-audio.c
@@ -84,17 +84,6 @@ static const struct jh71x0_clk_data jh7100_audclk_data[] = {
JH7100_AUDCLK_AUDIO_12288),
};
-static struct clk_hw *jh7100_audclk_get(struct of_phandle_args *clkspec, void *data)
-{
- struct jh71x0_clk_priv *priv = data;
- unsigned int idx = clkspec->args[0];
-
- if (idx < JH7100_AUDCLK_END)
- return &priv->reg[idx].hw;
-
- return ERR_PTR(-EINVAL);
-}
-
static int jh7100_audclk_probe(struct platform_device *pdev)
{
struct jh71x0_clk_priv *priv;
@@ -106,6 +95,7 @@ static int jh7100_audclk_probe(struct platform_device *pdev)
return -ENOMEM;
spin_lock_init(&priv->rmw_lock);
+ priv->num_reg = JH7100_AUDCLK_END;
priv->dev = &pdev->dev;
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
@@ -146,7 +136,7 @@ static int jh7100_audclk_probe(struct platform_device *pdev)
return ret;
}
- return devm_of_clk_add_hw_provider(priv->dev, jh7100_audclk_get, priv);
+ return devm_of_clk_add_hw_provider(priv->dev, jh71x0_clk_get, priv);
}
static const struct of_device_id jh7100_audclk_match[] = {
diff --git a/drivers/clk/starfive/clk-starfive-jh7110-aon.c b/drivers/clk/starfive/clk-starfive-jh7110-aon.c
index 418efdad719b..6f67587f4335 100644
--- a/drivers/clk/starfive/clk-starfive-jh7110-aon.c
+++ b/drivers/clk/starfive/clk-starfive-jh7110-aon.c
@@ -54,17 +54,6 @@ static const struct jh71x0_clk_data jh7110_aonclk_data[] = {
JH71X0_GATE(JH7110_AONCLK_RTC_CAL, "rtc_cal", 0, JH7110_AONCLK_OSC),
};
-static struct clk_hw *jh7110_aonclk_get(struct of_phandle_args *clkspec, void *data)
-{
- struct jh71x0_clk_priv *priv = data;
- unsigned int idx = clkspec->args[0];
-
- if (idx < JH7110_AONCLK_END)
- return &priv->reg[idx].hw;
-
- return ERR_PTR(-EINVAL);
-}
-
static int jh7110_aoncrg_probe(struct platform_device *pdev)
{
struct jh71x0_clk_priv *priv;
@@ -78,6 +67,7 @@ static int jh7110_aoncrg_probe(struct platform_device *pdev)
return -ENOMEM;
spin_lock_init(&priv->rmw_lock);
+ priv->num_reg = JH7110_AONCLK_END;
priv->dev = &pdev->dev;
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
@@ -127,7 +117,7 @@ static int jh7110_aoncrg_probe(struct platform_device *pdev)
return ret;
}
- ret = devm_of_clk_add_hw_provider(&pdev->dev, jh7110_aonclk_get, priv);
+ ret = devm_of_clk_add_hw_provider(&pdev->dev, jh71x0_clk_get, priv);
if (ret)
return ret;
diff --git a/drivers/clk/starfive/clk-starfive-jh7110-isp.c b/drivers/clk/starfive/clk-starfive-jh7110-isp.c
index 8c4c3a958a9f..f3fa069db193 100644
--- a/drivers/clk/starfive/clk-starfive-jh7110-isp.c
+++ b/drivers/clk/starfive/clk-starfive-jh7110-isp.c
@@ -75,17 +75,6 @@ static inline int jh7110_isp_top_rst_init(struct jh71x0_clk_priv *priv)
return reset_control_deassert(top_rsts);
}
-static struct clk_hw *jh7110_ispclk_get(struct of_phandle_args *clkspec, void *data)
-{
- struct jh71x0_clk_priv *priv = data;
- unsigned int idx = clkspec->args[0];
-
- if (idx < JH7110_ISPCLK_END)
- return &priv->reg[idx].hw;
-
- return ERR_PTR(-EINVAL);
-}
-
#ifdef CONFIG_PM
static int jh7110_ispcrg_suspend(struct device *dev)
{
@@ -126,6 +115,7 @@ static int jh7110_ispcrg_probe(struct platform_device *pdev)
return -ENOMEM;
spin_lock_init(&priv->rmw_lock);
+ priv->num_reg = JH7110_ISPCLK_END;
priv->dev = &pdev->dev;
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
@@ -186,7 +176,7 @@ static int jh7110_ispcrg_probe(struct platform_device *pdev)
goto err_exit;
}
- ret = devm_of_clk_add_hw_provider(&pdev->dev, jh7110_ispclk_get, priv);
+ ret = devm_of_clk_add_hw_provider(&pdev->dev, jh71x0_clk_get, priv);
if (ret)
goto err_exit;
diff --git a/drivers/clk/starfive/clk-starfive-jh7110-pll.c b/drivers/clk/starfive/clk-starfive-jh7110-pll.c
index 3598390e8fd0..56dc58a04f8a 100644
--- a/drivers/clk/starfive/clk-starfive-jh7110-pll.c
+++ b/drivers/clk/starfive/clk-starfive-jh7110-pll.c
@@ -453,7 +453,7 @@ static struct clk_hw *jh7110_pll_get(struct of_phandle_args *clkspec, void *data
return ERR_PTR(-EINVAL);
}
-static int jh7110_pll_probe(struct platform_device *pdev)
+static int __init jh7110_pll_probe(struct platform_device *pdev)
{
struct jh7110_pll_priv *priv;
unsigned int idx;
diff --git a/drivers/clk/starfive/clk-starfive-jh7110-stg.c b/drivers/clk/starfive/clk-starfive-jh7110-stg.c
index dafcb7190592..2a5ad0e07d1d 100644
--- a/drivers/clk/starfive/clk-starfive-jh7110-stg.c
+++ b/drivers/clk/starfive/clk-starfive-jh7110-stg.c
@@ -75,17 +75,6 @@ static const struct jh71x0_clk_data jh7110_stgclk_data[] = {
JH71X0_GATE(JH7110_STGCLK_DMA1P_AHB, "dma1p_ahb", 0, JH7110_STGCLK_STG_AXIAHB),
};
-static struct clk_hw *jh7110_stgclk_get(struct of_phandle_args *clkspec, void *data)
-{
- struct jh71x0_clk_priv *priv = data;
- unsigned int idx = clkspec->args[0];
-
- if (idx < JH7110_STGCLK_END)
- return &priv->reg[idx].hw;
-
- return ERR_PTR(-EINVAL);
-}
-
static int jh7110_stgcrg_probe(struct platform_device *pdev)
{
struct jh71x0_clk_priv *priv;
@@ -98,6 +87,7 @@ static int jh7110_stgcrg_probe(struct platform_device *pdev)
return -ENOMEM;
spin_lock_init(&priv->rmw_lock);
+ priv->num_reg = JH7110_STGCLK_END;
priv->dev = &pdev->dev;
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
@@ -145,7 +135,7 @@ static int jh7110_stgcrg_probe(struct platform_device *pdev)
return ret;
}
- ret = devm_of_clk_add_hw_provider(&pdev->dev, jh7110_stgclk_get, priv);
+ ret = devm_of_clk_add_hw_provider(&pdev->dev, jh71x0_clk_get, priv);
if (ret)
return ret;
diff --git a/drivers/clk/starfive/clk-starfive-jh7110-sys.c b/drivers/clk/starfive/clk-starfive-jh7110-sys.c
index 17325f17696f..52833d4241c5 100644
--- a/drivers/clk/starfive/clk-starfive-jh7110-sys.c
+++ b/drivers/clk/starfive/clk-starfive-jh7110-sys.c
@@ -323,17 +323,6 @@ static const struct jh71x0_clk_data jh7110_sysclk_data[] __initconst = {
JH7110_SYSCLK_OSC),
};
-static struct clk_hw *jh7110_sysclk_get(struct of_phandle_args *clkspec, void *data)
-{
- struct jh71x0_clk_priv *priv = data;
- unsigned int idx = clkspec->args[0];
-
- if (idx < JH7110_SYSCLK_END)
- return &priv->reg[idx].hw;
-
- return ERR_PTR(-EINVAL);
-}
-
static void jh7110_reset_unregister_adev(void *_adev)
{
struct auxiliary_device *adev = _adev;
@@ -387,7 +376,7 @@ EXPORT_SYMBOL_GPL(jh7110_reset_controller_register);
/*
* This clock notifier is called when the rate of PLL0 clock is to be changed.
- * The cpu_root clock should save the curent parent clock and switch its parent
+ * The cpu_root clock should save the current parent clock and switch its parent
* clock to osc before PLL0 rate will be changed. Then switch its parent clock
* back after the PLL0 rate is completed.
*/
@@ -425,6 +414,7 @@ static int __init jh7110_syscrg_probe(struct platform_device *pdev)
return -ENOMEM;
spin_lock_init(&priv->rmw_lock);
+ priv->num_reg = JH7110_SYSCLK_END;
priv->dev = &pdev->dev;
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
@@ -526,7 +516,7 @@ static int __init jh7110_syscrg_probe(struct platform_device *pdev)
return ret;
}
- ret = devm_of_clk_add_hw_provider(&pdev->dev, jh7110_sysclk_get, priv);
+ ret = devm_of_clk_add_hw_provider(&pdev->dev, jh71x0_clk_get, priv);
if (ret)
return ret;
diff --git a/drivers/clk/starfive/clk-starfive-jh7110-vout.c b/drivers/clk/starfive/clk-starfive-jh7110-vout.c
index 04eeed199087..bad20d5d794a 100644
--- a/drivers/clk/starfive/clk-starfive-jh7110-vout.c
+++ b/drivers/clk/starfive/clk-starfive-jh7110-vout.c
@@ -80,17 +80,6 @@ static int jh7110_vout_top_rst_init(struct jh71x0_clk_priv *priv)
return reset_control_deassert(top_rst);
}
-static struct clk_hw *jh7110_voutclk_get(struct of_phandle_args *clkspec, void *data)
-{
- struct jh71x0_clk_priv *priv = data;
- unsigned int idx = clkspec->args[0];
-
- if (idx < JH7110_VOUTCLK_END)
- return &priv->reg[idx].hw;
-
- return ERR_PTR(-EINVAL);
-}
-
#ifdef CONFIG_PM
static int jh7110_voutcrg_suspend(struct device *dev)
{
@@ -131,6 +120,7 @@ static int jh7110_voutcrg_probe(struct platform_device *pdev)
return -ENOMEM;
spin_lock_init(&priv->rmw_lock);
+ priv->num_reg = JH7110_VOUTCLK_END;
priv->dev = &pdev->dev;
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
@@ -193,7 +183,7 @@ static int jh7110_voutcrg_probe(struct platform_device *pdev)
goto err_exit;
}
- ret = devm_of_clk_add_hw_provider(&pdev->dev, jh7110_voutclk_get, priv);
+ ret = devm_of_clk_add_hw_provider(&pdev->dev, jh71x0_clk_get, priv);
if (ret)
goto err_exit;
diff --git a/drivers/clk/starfive/clk-starfive-jh71x0.c b/drivers/clk/starfive/clk-starfive-jh71x0.c
index aebc99264a0b..80e9157347eb 100644
--- a/drivers/clk/starfive/clk-starfive-jh71x0.c
+++ b/drivers/clk/starfive/clk-starfive-jh71x0.c
@@ -325,3 +325,15 @@ const struct clk_ops *starfive_jh71x0_clk_ops(u32 max)
return &jh71x0_clk_inv_ops;
}
EXPORT_SYMBOL_GPL(starfive_jh71x0_clk_ops);
+
+struct clk_hw *jh71x0_clk_get(struct of_phandle_args *clkspec, void *data)
+{
+ struct jh71x0_clk_priv *priv = data;
+ unsigned int idx = clkspec->args[0];
+
+ if (idx < priv->num_reg)
+ return &priv->reg[idx].hw;
+
+ return ERR_PTR(-EINVAL);
+}
+EXPORT_SYMBOL_GPL(jh71x0_clk_get);
diff --git a/drivers/clk/starfive/clk-starfive-jh71x0.h b/drivers/clk/starfive/clk-starfive-jh71x0.h
index e3f441393e48..9d5dec1d5cd1 100644
--- a/drivers/clk/starfive/clk-starfive-jh71x0.h
+++ b/drivers/clk/starfive/clk-starfive-jh71x0.h
@@ -117,9 +117,11 @@ struct jh71x0_clk_priv {
struct clk *original_clk;
struct notifier_block pll_clk_nb;
struct clk_hw *pll[3];
- struct jh71x0_clk reg[];
+ unsigned int num_reg;
+ struct jh71x0_clk reg[] __counted_by(num_reg);
};
const struct clk_ops *starfive_jh71x0_clk_ops(u32 max);
+struct clk_hw *jh71x0_clk_get(struct of_phandle_args *clkspec, void *data);
#endif
diff --git a/drivers/clk/stm32/Kconfig b/drivers/clk/stm32/Kconfig
index dca409d52652..5dbd75cde657 100644
--- a/drivers/clk/stm32/Kconfig
+++ b/drivers/clk/stm32/Kconfig
@@ -4,7 +4,7 @@
menuconfig COMMON_CLK_STM32MP
bool "Clock support for common STM32MP clocks"
depends on ARCH_STM32 || COMPILE_TEST
- default y
+ default ARCH_STM32
select RESET_CONTROLLER
help
Support for STM32MP SoC family clocks.
@@ -14,21 +14,28 @@ if COMMON_CLK_STM32MP
config COMMON_CLK_STM32MP135
bool "Clock driver for stm32mp13x clocks"
depends on ARM || COMPILE_TEST
- default y
+ default ARCH_STM32
help
Support for stm32mp13x SoC family clocks.
config COMMON_CLK_STM32MP157
bool "Clock driver for stm32mp15x clocks"
depends on ARM || COMPILE_TEST
- default y
+ default ARCH_STM32
help
Support for stm32mp15x SoC family clocks.
+config COMMON_CLK_STM32MP215
+ bool "Clock driver for stm32mp21x clocks"
+ depends on ARM || ARM64 || COMPILE_TEST
+ default y
+ help
+ Support for stm32mp21x SoC family clocks
+
config COMMON_CLK_STM32MP257
bool "Clock driver for stm32mp25x clocks"
depends on ARM64 || COMPILE_TEST
- default y
+ default ARCH_STM32
help
Support for stm32mp25x SoC family clocks.
diff --git a/drivers/clk/stm32/Makefile b/drivers/clk/stm32/Makefile
index 0a627164fcce..e04727b59449 100644
--- a/drivers/clk/stm32/Makefile
+++ b/drivers/clk/stm32/Makefile
@@ -1,3 +1,4 @@
obj-$(CONFIG_COMMON_CLK_STM32MP135) += clk-stm32mp13.o clk-stm32-core.o reset-stm32.o
obj-$(CONFIG_COMMON_CLK_STM32MP157) += clk-stm32mp1.o reset-stm32.o
+obj-$(CONFIG_COMMON_CLK_STM32MP215) += clk-stm32mp21.o clk-stm32-core.o reset-stm32.o
obj-$(CONFIG_COMMON_CLK_STM32MP257) += clk-stm32mp25.o clk-stm32-core.o reset-stm32.o
diff --git a/drivers/clk/stm32/clk-stm32-core.c b/drivers/clk/stm32/clk-stm32-core.c
index 933e3cde0795..72825b9c36a4 100644
--- a/drivers/clk/stm32/clk-stm32-core.c
+++ b/drivers/clk/stm32/clk-stm32-core.c
@@ -351,14 +351,14 @@ static int clk_stm32_divider_set_rate(struct clk_hw *hw, unsigned long rate,
return ret;
}
-static long clk_stm32_divider_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_stm32_divider_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_stm32_div *div = to_clk_stm32_divider(hw);
const struct stm32_div_cfg *divider;
if (div->div_id == NO_STM32_DIV)
- return rate;
+ return 0;
divider = &div->clock_data->dividers[div->div_id];
@@ -369,14 +369,22 @@ static long clk_stm32_divider_round_rate(struct clk_hw *hw, unsigned long rate,
val = readl(div->base + divider->offset) >> divider->shift;
val &= clk_div_mask(divider->width);
- return divider_ro_round_rate(hw, rate, prate, divider->table,
- divider->width, divider->flags,
- val);
+ req->rate = divider_ro_round_rate(hw, req->rate,
+ &req->best_parent_rate,
+ divider->table,
+ divider->width,
+ divider->flags, val);
+
+ return 0;
}
- return divider_round_rate_parent(hw, clk_hw_get_parent(hw),
- rate, prate, divider->table,
- divider->width, divider->flags);
+ req->rate = divider_round_rate_parent(hw, clk_hw_get_parent(hw),
+ req->rate,
+ &req->best_parent_rate,
+ divider->table,
+ divider->width, divider->flags);
+
+ return 0;
}
static unsigned long clk_stm32_divider_recalc_rate(struct clk_hw *hw,
@@ -392,7 +400,7 @@ static unsigned long clk_stm32_divider_recalc_rate(struct clk_hw *hw,
const struct clk_ops clk_stm32_divider_ops = {
.recalc_rate = clk_stm32_divider_recalc_rate,
- .round_rate = clk_stm32_divider_round_rate,
+ .determine_rate = clk_stm32_divider_determine_rate,
.set_rate = clk_stm32_divider_set_rate,
};
diff --git a/drivers/clk/stm32/clk-stm32mp1.c b/drivers/clk/stm32/clk-stm32mp1.c
index 5fcc4c77c11f..2d9ccd96ec98 100644
--- a/drivers/clk/stm32/clk-stm32mp1.c
+++ b/drivers/clk/stm32/clk-stm32mp1.c
@@ -970,12 +970,15 @@ static unsigned long __bestmult(struct clk_hw *hw, unsigned long rate,
return mult;
}
-static long timer_ker_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int timer_ker_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- unsigned long factor = __bestmult(hw, rate, *parent_rate);
+ unsigned long factor = __bestmult(hw, req->rate,
+ req->best_parent_rate);
- return *parent_rate * factor;
+ req->rate = req->best_parent_rate * factor;
+
+ return 0;
}
static int timer_ker_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -1026,7 +1029,7 @@ static unsigned long timer_ker_recalc_rate(struct clk_hw *hw,
static const struct clk_ops timer_ker_ops = {
.recalc_rate = timer_ker_recalc_rate,
- .round_rate = timer_ker_round_rate,
+ .determine_rate = timer_ker_determine_rate,
.set_rate = timer_ker_set_rate,
};
@@ -2041,7 +2044,7 @@ static const struct clock_config stm32mp1_clock_cfg[] = {
KCLK(ADFSDM_K, "adfsdm_k", sai_src, 0, G_ADFSDM, M_SAI1),
KCLK(USBO_K, "usbo_k", usbo_src, 0, G_USBO, M_USBO),
- /* Particulary Kernel Clocks (no mux or no gate) */
+ /* Particularly Kernel Clocks (no mux or no gate) */
MGATE_MP1(DFSDM_K, "dfsdm_k", "ck_mcu", 0, G_DFSDM),
MGATE_MP1(DSI_PX, "dsi_px", "pll4_q", CLK_SET_RATE_PARENT, G_DSI),
MGATE_MP1(LTDC_PX, "ltdc_px", "pll4_q", CLK_SET_RATE_PARENT, G_LTDC),
diff --git a/drivers/clk/stm32/clk-stm32mp21.c b/drivers/clk/stm32/clk-stm32mp21.c
new file mode 100644
index 000000000000..c8a37b716bd5
--- /dev/null
+++ b/drivers/clk/stm32/clk-stm32mp21.c
@@ -0,0 +1,1586 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) STMicroelectronics 2023 - All Rights Reserved
+ * Author: Gabriel Fernandez <gabriel.fernandez@foss.st.com> for STMicroelectronics.
+ */
+
+#include <linux/bus/stm32_firewall_device.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+
+#include "clk-stm32-core.h"
+#include "reset-stm32.h"
+#include "stm32mp21_rcc.h"
+
+#include <dt-bindings/clock/st,stm32mp21-rcc.h>
+#include <dt-bindings/reset/st,stm32mp21-rcc.h>
+
+/* Max clock binding value */
+#define STM32MP21_LAST_CLK CK_SCMI_KER_ETR
+
+/* Clock security definition */
+#define SECF_NONE -1
+
+#define RCC_REG_SIZE 32
+#define RCC_SECCFGR(x) (((x) / RCC_REG_SIZE) * 0x4 + RCC_SECCFGR0)
+#define RCC_CIDCFGR(x) ((x) * 0x8 + RCC_R0CIDCFGR)
+#define RCC_SEMCR(x) ((x) * 0x8 + RCC_R0SEMCR)
+#define RCC_CID1 1
+
+/* Register: RIFSC_CIDCFGR */
+#define RCC_CIDCFGR_CFEN BIT(0)
+#define RCC_CIDCFGR_SEM_EN BIT(1)
+#define RCC_CIDCFGR_SEMWLC1_EN BIT(17)
+#define RCC_CIDCFGR_SCID_MASK GENMASK(6, 4)
+
+/* Register: RIFSC_SEMCR */
+#define RCC_SEMCR_SEMCID_MASK GENMASK(6, 4)
+
+#define MP21_RIF_RCC_MCO1 108
+#define MP21_RIF_RCC_MCO2 109
+
+#define SEC_RIFSC_FLAG BIT(31)
+#define SEC_RIFSC(_id) ((_id) | SEC_RIFSC_FLAG)
+
+enum {
+ HSE,
+ HSI,
+ MSI,
+ LSE,
+ LSI,
+ HSE_DIV2,
+ ICN_HS_MCU,
+ ICN_LS_MCU,
+ ICN_SDMMC,
+ ICN_DDR,
+ ICN_DISPLAY,
+ ICN_HSL,
+ ICN_NIC,
+ FLEXGEN_07,
+ FLEXGEN_08,
+ FLEXGEN_09,
+ FLEXGEN_10,
+ FLEXGEN_11,
+ FLEXGEN_12,
+ FLEXGEN_13,
+ FLEXGEN_14,
+ FLEXGEN_16,
+ FLEXGEN_17,
+ FLEXGEN_18,
+ FLEXGEN_19,
+ FLEXGEN_20,
+ FLEXGEN_21,
+ FLEXGEN_22,
+ FLEXGEN_23,
+ FLEXGEN_24,
+ FLEXGEN_25,
+ FLEXGEN_26,
+ FLEXGEN_27,
+ FLEXGEN_29,
+ FLEXGEN_30,
+ FLEXGEN_31,
+ FLEXGEN_33,
+ FLEXGEN_36,
+ FLEXGEN_37,
+ FLEXGEN_38,
+ FLEXGEN_39,
+ FLEXGEN_40,
+ FLEXGEN_41,
+ FLEXGEN_42,
+ FLEXGEN_43,
+ FLEXGEN_44,
+ FLEXGEN_45,
+ FLEXGEN_46,
+ FLEXGEN_47,
+ FLEXGEN_48,
+ FLEXGEN_50,
+ FLEXGEN_51,
+ FLEXGEN_52,
+ FLEXGEN_53,
+ FLEXGEN_54,
+ FLEXGEN_55,
+ FLEXGEN_56,
+ FLEXGEN_57,
+ FLEXGEN_58,
+ FLEXGEN_61,
+ FLEXGEN_62,
+ FLEXGEN_63,
+ ICN_APB1,
+ ICN_APB2,
+ ICN_APB3,
+ ICN_APB4,
+ ICN_APB5,
+ ICN_APBDBG,
+ TIMG1,
+ TIMG2,
+};
+
+static const struct clk_parent_data adc1_src[] = {
+ { .index = FLEXGEN_46 },
+ { .index = ICN_LS_MCU },
+};
+
+static const struct clk_parent_data adc2_src[] = {
+ { .index = FLEXGEN_47 },
+ { .index = ICN_LS_MCU },
+ { .index = FLEXGEN_46 },
+};
+
+static const struct clk_parent_data usb2phy1_src[] = {
+ { .index = FLEXGEN_57 },
+ { .index = HSE_DIV2 },
+};
+
+static const struct clk_parent_data usb2phy2_src[] = {
+ { .index = FLEXGEN_58 },
+ { .index = HSE_DIV2 },
+};
+
+static const struct clk_parent_data dts_src[] = {
+ { .index = HSI },
+ { .index = HSE },
+ { .index = MSI },
+};
+
+static const struct clk_parent_data mco1_src[] = {
+ { .index = FLEXGEN_61 },
+};
+
+static const struct clk_parent_data mco2_src[] = {
+ { .index = FLEXGEN_62 },
+};
+
+enum enum_mux_cfg {
+ MUX_ADC1,
+ MUX_ADC2,
+ MUX_DTS,
+ MUX_MCO1,
+ MUX_MCO2,
+ MUX_USB2PHY1,
+ MUX_USB2PHY2,
+ MUX_NB
+};
+
+#define MUX_CFG(id, _offset, _shift, _width) \
+ [id] = { \
+ .offset = (_offset), \
+ .shift = (_shift), \
+ .width = (_width), \
+ }
+
+static const struct stm32_mux_cfg stm32mp21_muxes[MUX_NB] = {
+ MUX_CFG(MUX_ADC1, RCC_ADC1CFGR, 12, 1),
+ MUX_CFG(MUX_ADC2, RCC_ADC2CFGR, 12, 2),
+ MUX_CFG(MUX_DTS, RCC_DTSCFGR, 12, 2),
+ MUX_CFG(MUX_MCO1, RCC_MCO1CFGR, 0, 1),
+ MUX_CFG(MUX_MCO2, RCC_MCO2CFGR, 0, 1),
+ MUX_CFG(MUX_USB2PHY1, RCC_USB2PHY1CFGR, 15, 1),
+ MUX_CFG(MUX_USB2PHY2, RCC_USB2PHY2CFGR, 15, 1),
+};
+
+enum enum_gate_cfg {
+ GATE_ADC1,
+ GATE_ADC2,
+ GATE_CRC,
+ GATE_CRYP1,
+ GATE_CRYP2,
+ GATE_CSI,
+ GATE_DCMIPP,
+ GATE_DCMIPSSI,
+ GATE_DDRPERFM,
+ GATE_DTS,
+ GATE_ETH1,
+ GATE_ETH1MAC,
+ GATE_ETH1RX,
+ GATE_ETH1STP,
+ GATE_ETH1TX,
+ GATE_ETH2,
+ GATE_ETH2MAC,
+ GATE_ETH2RX,
+ GATE_ETH2STP,
+ GATE_ETH2TX,
+ GATE_FDCAN,
+ GATE_HASH1,
+ GATE_HASH2,
+ GATE_HDP,
+ GATE_I2C1,
+ GATE_I2C2,
+ GATE_I2C3,
+ GATE_I3C1,
+ GATE_I3C2,
+ GATE_I3C3,
+ GATE_IWDG1,
+ GATE_IWDG2,
+ GATE_IWDG3,
+ GATE_IWDG4,
+ GATE_LPTIM1,
+ GATE_LPTIM2,
+ GATE_LPTIM3,
+ GATE_LPTIM4,
+ GATE_LPTIM5,
+ GATE_LPUART1,
+ GATE_LTDC,
+ GATE_MCO1,
+ GATE_MCO2,
+ GATE_MDF1,
+ GATE_OTG,
+ GATE_PKA,
+ GATE_RNG1,
+ GATE_RNG2,
+ GATE_SAES,
+ GATE_SAI1,
+ GATE_SAI2,
+ GATE_SAI3,
+ GATE_SAI4,
+ GATE_SDMMC1,
+ GATE_SDMMC2,
+ GATE_SDMMC3,
+ GATE_SERC,
+ GATE_SPDIFRX,
+ GATE_SPI1,
+ GATE_SPI2,
+ GATE_SPI3,
+ GATE_SPI4,
+ GATE_SPI5,
+ GATE_SPI6,
+ GATE_TIM1,
+ GATE_TIM10,
+ GATE_TIM11,
+ GATE_TIM12,
+ GATE_TIM13,
+ GATE_TIM14,
+ GATE_TIM15,
+ GATE_TIM16,
+ GATE_TIM17,
+ GATE_TIM2,
+ GATE_TIM3,
+ GATE_TIM4,
+ GATE_TIM5,
+ GATE_TIM6,
+ GATE_TIM7,
+ GATE_TIM8,
+ GATE_UART4,
+ GATE_UART5,
+ GATE_UART7,
+ GATE_USART1,
+ GATE_USART2,
+ GATE_USART3,
+ GATE_USART6,
+ GATE_USB2PHY1,
+ GATE_USB2PHY2,
+ GATE_USBH,
+ GATE_VREF,
+ GATE_WWDG1,
+ GATE_NB
+};
+
+#define GATE_CFG(id, _offset, _bit_idx, _offset_clr) \
+ [id] = { \
+ .offset = (_offset), \
+ .bit_idx = (_bit_idx), \
+ .set_clr = (_offset_clr), \
+ }
+
+static const struct stm32_gate_cfg stm32mp21_gates[GATE_NB] = {
+ GATE_CFG(GATE_ADC1, RCC_ADC1CFGR, 1, 0),
+ GATE_CFG(GATE_ADC2, RCC_ADC2CFGR, 1, 0),
+ GATE_CFG(GATE_CRC, RCC_CRCCFGR, 1, 0),
+ GATE_CFG(GATE_CRYP1, RCC_CRYP1CFGR, 1, 0),
+ GATE_CFG(GATE_CRYP2, RCC_CRYP2CFGR, 1, 0),
+ GATE_CFG(GATE_CSI, RCC_CSICFGR, 1, 0),
+ GATE_CFG(GATE_DCMIPP, RCC_DCMIPPCFGR, 1, 0),
+ GATE_CFG(GATE_DCMIPSSI, RCC_DCMIPSSICFGR, 1, 0),
+ GATE_CFG(GATE_DDRPERFM, RCC_DDRPERFMCFGR, 1, 0),
+ GATE_CFG(GATE_DTS, RCC_DTSCFGR, 1, 0),
+ GATE_CFG(GATE_ETH1, RCC_ETH1CFGR, 5, 0),
+ GATE_CFG(GATE_ETH1MAC, RCC_ETH1CFGR, 1, 0),
+ GATE_CFG(GATE_ETH1RX, RCC_ETH1CFGR, 10, 0),
+ GATE_CFG(GATE_ETH1STP, RCC_ETH1CFGR, 4, 0),
+ GATE_CFG(GATE_ETH1TX, RCC_ETH1CFGR, 8, 0),
+ GATE_CFG(GATE_ETH2, RCC_ETH2CFGR, 5, 0),
+ GATE_CFG(GATE_ETH2MAC, RCC_ETH2CFGR, 1, 0),
+ GATE_CFG(GATE_ETH2RX, RCC_ETH2CFGR, 10, 0),
+ GATE_CFG(GATE_ETH2STP, RCC_ETH2CFGR, 4, 0),
+ GATE_CFG(GATE_ETH2TX, RCC_ETH2CFGR, 8, 0),
+ GATE_CFG(GATE_FDCAN, RCC_FDCANCFGR, 1, 0),
+ GATE_CFG(GATE_HASH1, RCC_HASH1CFGR, 1, 0),
+ GATE_CFG(GATE_HASH2, RCC_HASH2CFGR, 1, 0),
+ GATE_CFG(GATE_HDP, RCC_HDPCFGR, 1, 0),
+ GATE_CFG(GATE_I2C1, RCC_I2C1CFGR, 1, 0),
+ GATE_CFG(GATE_I2C2, RCC_I2C2CFGR, 1, 0),
+ GATE_CFG(GATE_I2C3, RCC_I2C3CFGR, 1, 0),
+ GATE_CFG(GATE_I3C1, RCC_I3C1CFGR, 1, 0),
+ GATE_CFG(GATE_I3C2, RCC_I3C2CFGR, 1, 0),
+ GATE_CFG(GATE_I3C3, RCC_I3C3CFGR, 1, 0),
+ GATE_CFG(GATE_IWDG1, RCC_IWDG1CFGR, 1, 0),
+ GATE_CFG(GATE_IWDG2, RCC_IWDG2CFGR, 1, 0),
+ GATE_CFG(GATE_IWDG3, RCC_IWDG3CFGR, 1, 0),
+ GATE_CFG(GATE_IWDG4, RCC_IWDG4CFGR, 1, 0),
+ GATE_CFG(GATE_LPTIM1, RCC_LPTIM1CFGR, 1, 0),
+ GATE_CFG(GATE_LPTIM2, RCC_LPTIM2CFGR, 1, 0),
+ GATE_CFG(GATE_LPTIM3, RCC_LPTIM3CFGR, 1, 0),
+ GATE_CFG(GATE_LPTIM4, RCC_LPTIM4CFGR, 1, 0),
+ GATE_CFG(GATE_LPTIM5, RCC_LPTIM5CFGR, 1, 0),
+ GATE_CFG(GATE_LPUART1, RCC_LPUART1CFGR, 1, 0),
+ GATE_CFG(GATE_LTDC, RCC_LTDCCFGR, 1, 0),
+ GATE_CFG(GATE_MCO1, RCC_MCO1CFGR, 8, 0),
+ GATE_CFG(GATE_MCO2, RCC_MCO2CFGR, 8, 0),
+ GATE_CFG(GATE_MDF1, RCC_MDF1CFGR, 1, 0),
+ GATE_CFG(GATE_OTG, RCC_OTGCFGR, 1, 0),
+ GATE_CFG(GATE_PKA, RCC_PKACFGR, 1, 0),
+ GATE_CFG(GATE_RNG1, RCC_RNG1CFGR, 1, 0),
+ GATE_CFG(GATE_RNG2, RCC_RNG2CFGR, 1, 0),
+ GATE_CFG(GATE_SAES, RCC_SAESCFGR, 1, 0),
+ GATE_CFG(GATE_SAI1, RCC_SAI1CFGR, 1, 0),
+ GATE_CFG(GATE_SAI2, RCC_SAI2CFGR, 1, 0),
+ GATE_CFG(GATE_SAI3, RCC_SAI3CFGR, 1, 0),
+ GATE_CFG(GATE_SAI4, RCC_SAI4CFGR, 1, 0),
+ GATE_CFG(GATE_SDMMC1, RCC_SDMMC1CFGR, 1, 0),
+ GATE_CFG(GATE_SDMMC2, RCC_SDMMC2CFGR, 1, 0),
+ GATE_CFG(GATE_SDMMC3, RCC_SDMMC3CFGR, 1, 0),
+ GATE_CFG(GATE_SERC, RCC_SERCCFGR, 1, 0),
+ GATE_CFG(GATE_SPDIFRX, RCC_SPDIFRXCFGR, 1, 0),
+ GATE_CFG(GATE_SPI1, RCC_SPI1CFGR, 1, 0),
+ GATE_CFG(GATE_SPI2, RCC_SPI2CFGR, 1, 0),
+ GATE_CFG(GATE_SPI3, RCC_SPI3CFGR, 1, 0),
+ GATE_CFG(GATE_SPI4, RCC_SPI4CFGR, 1, 0),
+ GATE_CFG(GATE_SPI5, RCC_SPI5CFGR, 1, 0),
+ GATE_CFG(GATE_SPI6, RCC_SPI6CFGR, 1, 0),
+ GATE_CFG(GATE_TIM1, RCC_TIM1CFGR, 1, 0),
+ GATE_CFG(GATE_TIM10, RCC_TIM10CFGR, 1, 0),
+ GATE_CFG(GATE_TIM11, RCC_TIM11CFGR, 1, 0),
+ GATE_CFG(GATE_TIM12, RCC_TIM12CFGR, 1, 0),
+ GATE_CFG(GATE_TIM13, RCC_TIM13CFGR, 1, 0),
+ GATE_CFG(GATE_TIM14, RCC_TIM14CFGR, 1, 0),
+ GATE_CFG(GATE_TIM15, RCC_TIM15CFGR, 1, 0),
+ GATE_CFG(GATE_TIM16, RCC_TIM16CFGR, 1, 0),
+ GATE_CFG(GATE_TIM17, RCC_TIM17CFGR, 1, 0),
+ GATE_CFG(GATE_TIM2, RCC_TIM2CFGR, 1, 0),
+ GATE_CFG(GATE_TIM3, RCC_TIM3CFGR, 1, 0),
+ GATE_CFG(GATE_TIM4, RCC_TIM4CFGR, 1, 0),
+ GATE_CFG(GATE_TIM5, RCC_TIM5CFGR, 1, 0),
+ GATE_CFG(GATE_TIM6, RCC_TIM6CFGR, 1, 0),
+ GATE_CFG(GATE_TIM7, RCC_TIM7CFGR, 1, 0),
+ GATE_CFG(GATE_TIM8, RCC_TIM8CFGR, 1, 0),
+ GATE_CFG(GATE_UART4, RCC_UART4CFGR, 1, 0),
+ GATE_CFG(GATE_UART5, RCC_UART5CFGR, 1, 0),
+ GATE_CFG(GATE_UART7, RCC_UART7CFGR, 1, 0),
+ GATE_CFG(GATE_USART1, RCC_USART1CFGR, 1, 0),
+ GATE_CFG(GATE_USART2, RCC_USART2CFGR, 1, 0),
+ GATE_CFG(GATE_USART3, RCC_USART3CFGR, 1, 0),
+ GATE_CFG(GATE_USART6, RCC_USART6CFGR, 1, 0),
+ GATE_CFG(GATE_USB2PHY1, RCC_USB2PHY1CFGR, 1, 0),
+ GATE_CFG(GATE_USB2PHY2, RCC_USB2PHY2CFGR, 1, 0),
+ GATE_CFG(GATE_USBH, RCC_USBHCFGR, 1, 0),
+ GATE_CFG(GATE_VREF, RCC_VREFCFGR, 1, 0),
+ GATE_CFG(GATE_WWDG1, RCC_WWDG1CFGR, 1, 0),
+};
+
+#define CLK_HW_INIT_INDEX(_name, _parent, _ops, _flags) \
+ (&(struct clk_init_data) { \
+ .flags = _flags, \
+ .name = _name, \
+ .parent_data = (const struct clk_parent_data[]) { \
+ { .index = _parent }, \
+ }, \
+ .num_parents = 1, \
+ .ops = _ops, \
+ })
+
+/* ADC */
+static struct clk_stm32_gate ck_icn_p_adc1 = {
+ .gate_id = GATE_ADC1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_adc1", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_composite ck_ker_adc1 = {
+ .gate_id = GATE_ADC1,
+ .mux_id = MUX_ADC1,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("ck_ker_adc1", adc1_src, &clk_stm32_composite_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_adc2 = {
+ .gate_id = GATE_ADC2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_adc2", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_composite ck_ker_adc2 = {
+ .gate_id = GATE_ADC2,
+ .mux_id = MUX_ADC2,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("ck_ker_adc2", adc2_src, &clk_stm32_composite_ops, 0),
+};
+
+/* CSI-HOST */
+static struct clk_stm32_gate ck_icn_p_csi = {
+ .gate_id = GATE_CSI,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_csi", ICN_APB4, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_csi = {
+ .gate_id = GATE_CSI,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_csi", FLEXGEN_29, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_csitxesc = {
+ .gate_id = GATE_CSI,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_csitxesc", FLEXGEN_30, &clk_stm32_gate_ops, 0),
+};
+
+/* CSI-PHY */
+static struct clk_stm32_gate ck_ker_csiphy = {
+ .gate_id = GATE_CSI,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_csiphy", FLEXGEN_31, &clk_stm32_gate_ops, 0),
+};
+
+/* DCMIPP */
+static struct clk_stm32_gate ck_icn_p_dcmipp = {
+ .gate_id = GATE_DCMIPP,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_dcmipp", ICN_APB4, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_dcmipssi = {
+ .gate_id = GATE_DCMIPSSI,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_dcmipssi", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+/* DDRPERMF */
+static struct clk_stm32_gate ck_icn_p_ddrperfm = {
+ .gate_id = GATE_DDRPERFM,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_ddrperfm", ICN_APB4, &clk_stm32_gate_ops, 0),
+};
+
+/* CRC */
+static struct clk_stm32_gate ck_icn_p_crc = {
+ .gate_id = GATE_CRC,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_crc", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+/* CRYP */
+static struct clk_stm32_gate ck_icn_p_cryp1 = {
+ .gate_id = GATE_CRYP1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_cryp1", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_cryp2 = {
+ .gate_id = GATE_CRYP2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_cryp2", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+/* DBG & TRACE */
+/* Trace and debug clocks are managed by SCMI */
+
+/* LTDC */
+static struct clk_stm32_gate ck_icn_p_ltdc = {
+ .gate_id = GATE_LTDC,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_ltdc", ICN_APB4, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_ltdc = {
+ .gate_id = GATE_LTDC,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_ltdc", FLEXGEN_27, &clk_stm32_gate_ops,
+ CLK_SET_RATE_PARENT),
+};
+
+/* DTS */
+static struct clk_stm32_composite ck_ker_dts = {
+ .gate_id = GATE_DTS,
+ .mux_id = MUX_DTS,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("ck_ker_dts", dts_src,
+ &clk_stm32_composite_ops, 0),
+};
+
+/* ETHERNET */
+static struct clk_stm32_gate ck_icn_p_eth1 = {
+ .gate_id = GATE_ETH1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_eth1", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_eth1stp = {
+ .gate_id = GATE_ETH1STP,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth1stp", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_eth1 = {
+ .gate_id = GATE_ETH1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth1", FLEXGEN_54, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_eth1ptp = {
+ .gate_id = GATE_ETH1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth1ptp", FLEXGEN_56, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_eth1mac = {
+ .gate_id = GATE_ETH1MAC,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth1mac", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_eth1tx = {
+ .gate_id = GATE_ETH1TX,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth1tx", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_eth1rx = {
+ .gate_id = GATE_ETH1RX,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth1rx", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_eth2 = {
+ .gate_id = GATE_ETH2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_eth2", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_eth2stp = {
+ .gate_id = GATE_ETH2STP,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth2stp", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_eth2 = {
+ .gate_id = GATE_ETH2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth2", FLEXGEN_55, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_eth2ptp = {
+ .gate_id = GATE_ETH2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth2ptp", FLEXGEN_56, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_eth2mac = {
+ .gate_id = GATE_ETH2MAC,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth2mac", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_eth2tx = {
+ .gate_id = GATE_ETH2TX,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth2tx", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_eth2rx = {
+ .gate_id = GATE_ETH2RX,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth2rx", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+/* FDCAN */
+static struct clk_stm32_gate ck_icn_p_fdcan = {
+ .gate_id = GATE_FDCAN,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_fdcan", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_fdcan = {
+ .gate_id = GATE_FDCAN,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_fdcan", FLEXGEN_26, &clk_stm32_gate_ops, 0),
+};
+
+/* HASH */
+static struct clk_stm32_gate ck_icn_p_hash1 = {
+ .gate_id = GATE_HASH1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_hash1", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_hash2 = {
+ .gate_id = GATE_HASH2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_hash2", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+/* HDP */
+static struct clk_stm32_gate ck_icn_p_hdp = {
+ .gate_id = GATE_HDP,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_hdp", ICN_APB3, &clk_stm32_gate_ops, 0),
+};
+
+/* I2C */
+static struct clk_stm32_gate ck_icn_p_i2c1 = {
+ .gate_id = GATE_I2C1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_i2c1", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_i2c2 = {
+ .gate_id = GATE_I2C2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_i2c2", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_i2c3 = {
+ .gate_id = GATE_I2C3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_i2c3", ICN_APB5, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_i2c1 = {
+ .gate_id = GATE_I2C1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_i2c1", FLEXGEN_13, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_i2c2 = {
+ .gate_id = GATE_I2C2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_i2c2", FLEXGEN_13, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_i2c3 = {
+ .gate_id = GATE_I2C3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_i2c3", FLEXGEN_38, &clk_stm32_gate_ops, 0),
+};
+
+/* I3C */
+static struct clk_stm32_gate ck_icn_p_i3c1 = {
+ .gate_id = GATE_I3C1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_i3c1", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_i3c2 = {
+ .gate_id = GATE_I3C2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_i3c2", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_i3c3 = {
+ .gate_id = GATE_I3C3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_i3c3", ICN_APB5, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_i3c1 = {
+ .gate_id = GATE_I3C1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_i3c1", FLEXGEN_14, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_i3c2 = {
+ .gate_id = GATE_I3C2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_i3c2", FLEXGEN_14, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_i3c3 = {
+ .gate_id = GATE_I3C3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_i3c3", FLEXGEN_36, &clk_stm32_gate_ops, 0),
+};
+
+/* IWDG */
+static struct clk_stm32_gate ck_icn_p_iwdg1 = {
+ .gate_id = GATE_IWDG1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_iwdg1", ICN_APB3, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_iwdg2 = {
+ .gate_id = GATE_IWDG2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_iwdg2", ICN_APB3, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_iwdg3 = {
+ .gate_id = GATE_IWDG3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_iwdg3", ICN_APB3, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_iwdg4 = {
+ .gate_id = GATE_IWDG4,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_iwdg4", ICN_APB3, &clk_stm32_gate_ops, 0),
+};
+
+/* LPTIM */
+static struct clk_stm32_gate ck_icn_p_lptim1 = {
+ .gate_id = GATE_LPTIM1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_lptim1", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_lptim2 = {
+ .gate_id = GATE_LPTIM2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_lptim2", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_lptim3 = {
+ .gate_id = GATE_LPTIM3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_lptim3", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_lptim4 = {
+ .gate_id = GATE_LPTIM4,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_lptim4", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_lptim5 = {
+ .gate_id = GATE_LPTIM5,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_lptim5", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_lptim1 = {
+ .gate_id = GATE_LPTIM1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_lptim1", FLEXGEN_07, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_lptim2 = {
+ .gate_id = GATE_LPTIM2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_lptim2", FLEXGEN_07, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_lptim3 = {
+ .gate_id = GATE_LPTIM3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_lptim3", FLEXGEN_40, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_lptim4 = {
+ .gate_id = GATE_LPTIM4,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_lptim4", FLEXGEN_41, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_lptim5 = {
+ .gate_id = GATE_LPTIM5,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_lptim5", FLEXGEN_42, &clk_stm32_gate_ops, 0),
+};
+
+/* LPUART */
+static struct clk_stm32_gate ck_icn_p_lpuart1 = {
+ .gate_id = GATE_LPUART1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_lpuart1", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_lpuart1 = {
+ .gate_id = GATE_LPUART1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_lpuart1", FLEXGEN_39, &clk_stm32_gate_ops, 0),
+};
+
+/* MCO1 & MCO2 */
+static struct clk_stm32_composite ck_mco1 = {
+ .gate_id = GATE_MCO1,
+ .mux_id = MUX_MCO1,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("ck_mco1", mco1_src, &clk_stm32_composite_ops, 0),
+};
+
+static struct clk_stm32_composite ck_mco2 = {
+ .gate_id = GATE_MCO2,
+ .mux_id = MUX_MCO2,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("ck_mco2", mco2_src, &clk_stm32_composite_ops, 0),
+};
+
+/* MDF */
+static struct clk_stm32_gate ck_icn_p_mdf1 = {
+ .gate_id = GATE_MDF1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_mdf1", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_mdf1 = {
+ .gate_id = GATE_MDF1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_mdf1", FLEXGEN_21, &clk_stm32_gate_ops, 0),
+};
+
+/* OTG */
+static struct clk_stm32_gate ck_icn_m_otg = {
+ .gate_id = GATE_OTG,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_m_otg", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+/* PKA */
+static struct clk_stm32_gate ck_icn_p_pka = {
+ .gate_id = GATE_PKA,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_pka", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+/* RNG */
+static struct clk_stm32_gate ck_icn_p_rng1 = {
+ .gate_id = GATE_RNG1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_rng1", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_rng2 = {
+ .gate_id = GATE_RNG2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_rng2", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+/* SAES */
+static struct clk_stm32_gate ck_icn_p_saes = {
+ .gate_id = GATE_SAES,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_saes", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+/* SAI */
+static struct clk_stm32_gate ck_icn_p_sai1 = {
+ .gate_id = GATE_SAI1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_sai1", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_sai2 = {
+ .gate_id = GATE_SAI2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_sai2", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_sai3 = {
+ .gate_id = GATE_SAI3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_sai3", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_sai4 = {
+ .gate_id = GATE_SAI4,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_sai4", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_sai1 = {
+ .gate_id = GATE_SAI1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_sai1", FLEXGEN_22, &clk_stm32_gate_ops,
+ CLK_SET_RATE_PARENT),
+};
+
+static struct clk_stm32_gate ck_ker_sai2 = {
+ .gate_id = GATE_SAI2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_sai2", FLEXGEN_23, &clk_stm32_gate_ops,
+ CLK_SET_RATE_PARENT),
+};
+
+static struct clk_stm32_gate ck_ker_sai3 = {
+ .gate_id = GATE_SAI3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_sai3", FLEXGEN_24, &clk_stm32_gate_ops,
+ CLK_SET_RATE_PARENT),
+};
+
+static struct clk_stm32_gate ck_ker_sai4 = {
+ .gate_id = GATE_SAI4,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_sai4", FLEXGEN_25, &clk_stm32_gate_ops,
+ CLK_SET_RATE_PARENT),
+};
+
+/* SDMMC */
+static struct clk_stm32_gate ck_icn_m_sdmmc1 = {
+ .gate_id = GATE_SDMMC1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_m_sdmmc1", ICN_SDMMC, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_m_sdmmc2 = {
+ .gate_id = GATE_SDMMC2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_m_sdmmc2", ICN_SDMMC, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_m_sdmmc3 = {
+ .gate_id = GATE_SDMMC3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_m_sdmmc3", ICN_SDMMC, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_sdmmc1 = {
+ .gate_id = GATE_SDMMC1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_sdmmc1", FLEXGEN_51, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_sdmmc2 = {
+ .gate_id = GATE_SDMMC2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_sdmmc2", FLEXGEN_52, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_sdmmc3 = {
+ .gate_id = GATE_SDMMC3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_sdmmc3", FLEXGEN_53, &clk_stm32_gate_ops, 0),
+};
+
+/* SERC */
+static struct clk_stm32_gate ck_icn_p_serc = {
+ .gate_id = GATE_SERC,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_serc", ICN_APB3, &clk_stm32_gate_ops, 0),
+};
+
+/* SPDIF */
+static struct clk_stm32_gate ck_icn_p_spdifrx = {
+ .gate_id = GATE_SPDIFRX,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_spdifrx", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_spdifrx = {
+ .gate_id = GATE_SPDIFRX,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_spdifrx", FLEXGEN_12, &clk_stm32_gate_ops, 0),
+};
+
+/* SPI */
+static struct clk_stm32_gate ck_icn_p_spi1 = {
+ .gate_id = GATE_SPI1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_spi1", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_spi2 = {
+ .gate_id = GATE_SPI2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_spi2", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_spi3 = {
+ .gate_id = GATE_SPI3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_spi3", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_spi4 = {
+ .gate_id = GATE_SPI4,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_spi4", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_spi5 = {
+ .gate_id = GATE_SPI5,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_spi5", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_spi6 = {
+ .gate_id = GATE_SPI6,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_spi6", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_spi1 = {
+ .gate_id = GATE_SPI1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_spi1", FLEXGEN_16, &clk_stm32_gate_ops,
+ CLK_SET_RATE_PARENT),
+};
+
+static struct clk_stm32_gate ck_ker_spi2 = {
+ .gate_id = GATE_SPI2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_spi2", FLEXGEN_10, &clk_stm32_gate_ops,
+ CLK_SET_RATE_PARENT),
+};
+
+static struct clk_stm32_gate ck_ker_spi3 = {
+ .gate_id = GATE_SPI3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_spi3", FLEXGEN_11, &clk_stm32_gate_ops,
+ CLK_SET_RATE_PARENT),
+};
+
+static struct clk_stm32_gate ck_ker_spi4 = {
+ .gate_id = GATE_SPI4,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_spi4", FLEXGEN_17, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_spi5 = {
+ .gate_id = GATE_SPI5,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_spi5", FLEXGEN_17, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_spi6 = {
+ .gate_id = GATE_SPI6,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_spi6", FLEXGEN_37, &clk_stm32_gate_ops, 0),
+};
+
+/* Timers */
+static struct clk_stm32_gate ck_icn_p_tim2 = {
+ .gate_id = GATE_TIM2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim2", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim3 = {
+ .gate_id = GATE_TIM3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim3", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim4 = {
+ .gate_id = GATE_TIM4,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim4", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim5 = {
+ .gate_id = GATE_TIM5,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim5", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim6 = {
+ .gate_id = GATE_TIM6,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim6", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim7 = {
+ .gate_id = GATE_TIM7,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim7", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim10 = {
+ .gate_id = GATE_TIM10,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim10", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim11 = {
+ .gate_id = GATE_TIM11,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim11", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim12 = {
+ .gate_id = GATE_TIM12,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim12", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim13 = {
+ .gate_id = GATE_TIM13,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim13", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim14 = {
+ .gate_id = GATE_TIM14,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim14", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim1 = {
+ .gate_id = GATE_TIM1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim1", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim8 = {
+ .gate_id = GATE_TIM8,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim8", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim15 = {
+ .gate_id = GATE_TIM15,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim15", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim16 = {
+ .gate_id = GATE_TIM16,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim16", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim17 = {
+ .gate_id = GATE_TIM17,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim17", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim2 = {
+ .gate_id = GATE_TIM2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim2", TIMG1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim3 = {
+ .gate_id = GATE_TIM3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim3", TIMG1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim4 = {
+ .gate_id = GATE_TIM4,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim4", TIMG1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim5 = {
+ .gate_id = GATE_TIM5,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim5", TIMG1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim6 = {
+ .gate_id = GATE_TIM6,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim6", TIMG1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim7 = {
+ .gate_id = GATE_TIM7,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim7", TIMG1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim10 = {
+ .gate_id = GATE_TIM10,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim10", TIMG1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim11 = {
+ .gate_id = GATE_TIM11,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim11", TIMG1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim12 = {
+ .gate_id = GATE_TIM12,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim12", TIMG1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim13 = {
+ .gate_id = GATE_TIM13,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim13", TIMG1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim14 = {
+ .gate_id = GATE_TIM14,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim14", TIMG1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim1 = {
+ .gate_id = GATE_TIM1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim1", TIMG2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim8 = {
+ .gate_id = GATE_TIM8,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim8", TIMG2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim15 = {
+ .gate_id = GATE_TIM15,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim15", TIMG2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim16 = {
+ .gate_id = GATE_TIM16,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim16", TIMG2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim17 = {
+ .gate_id = GATE_TIM17,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim17", TIMG2, &clk_stm32_gate_ops, 0),
+};
+
+/* UART/USART */
+static struct clk_stm32_gate ck_icn_p_usart2 = {
+ .gate_id = GATE_USART2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_usart2", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_usart3 = {
+ .gate_id = GATE_USART3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_usart3", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_uart4 = {
+ .gate_id = GATE_UART4,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_uart4", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_uart5 = {
+ .gate_id = GATE_UART5,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_uart5", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_usart1 = {
+ .gate_id = GATE_USART1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_usart1", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_usart6 = {
+ .gate_id = GATE_USART6,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_usart6", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_uart7 = {
+ .gate_id = GATE_UART7,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_uart7", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_usart2 = {
+ .gate_id = GATE_USART2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_usart2", FLEXGEN_08, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_uart4 = {
+ .gate_id = GATE_UART4,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_uart4", FLEXGEN_08, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_usart3 = {
+ .gate_id = GATE_USART3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_usart3", FLEXGEN_09, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_uart5 = {
+ .gate_id = GATE_UART5,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_uart5", FLEXGEN_09, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_usart1 = {
+ .gate_id = GATE_USART1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_usart1", FLEXGEN_18, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_usart6 = {
+ .gate_id = GATE_USART6,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_usart6", FLEXGEN_19, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_uart7 = {
+ .gate_id = GATE_UART7,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_uart7", FLEXGEN_20, &clk_stm32_gate_ops, 0),
+};
+
+/* USB2PHY1 */
+static struct clk_stm32_composite ck_ker_usb2phy1 = {
+ .gate_id = GATE_USB2PHY1,
+ .mux_id = MUX_USB2PHY1,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("ck_ker_usb2phy1", usb2phy1_src,
+ &clk_stm32_composite_ops, 0),
+};
+
+/* USBH */
+static struct clk_stm32_gate ck_icn_m_usbhehci = {
+ .gate_id = GATE_USBH,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_m_usbhehci", ICN_HSL, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_m_usbhohci = {
+ .gate_id = GATE_USBH,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_m_usbhohci", ICN_HSL, &clk_stm32_gate_ops, 0),
+};
+
+/* USB2PHY2 */
+static struct clk_stm32_composite ck_ker_usb2phy2_en = {
+ .gate_id = GATE_USB2PHY2,
+ .mux_id = MUX_USB2PHY2,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("ck_ker_usb2phy2_en", usb2phy2_src,
+ &clk_stm32_composite_ops, 0),
+};
+
+/* VREF */
+static struct clk_stm32_gate ck_icn_p_vref = {
+ .gate_id = GATE_VREF,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_vref", ICN_APB3, &clk_stm32_gate_ops, 0),
+};
+
+/* WWDG */
+static struct clk_stm32_gate ck_icn_p_wwdg1 = {
+ .gate_id = GATE_WWDG1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_wwdg1", ICN_APB3, &clk_stm32_gate_ops, 0),
+};
+
+static int stm32_rcc_get_access(void __iomem *base, u32 index)
+{
+ u32 seccfgr, cidcfgr, semcr;
+ int bit, cid;
+
+ bit = index % RCC_REG_SIZE;
+
+ seccfgr = readl(base + RCC_SECCFGR(index));
+ if (seccfgr & BIT(bit))
+ return -EACCES;
+
+ cidcfgr = readl(base + RCC_CIDCFGR(index));
+ if (!(cidcfgr & RCC_CIDCFGR_CFEN))
+ /* CID filtering is turned off: access granted */
+ return 0;
+
+ if (!(cidcfgr & RCC_CIDCFGR_SEM_EN)) {
+ /* Static CID mode */
+ cid = FIELD_GET(RCC_CIDCFGR_SCID_MASK, cidcfgr);
+ if (cid != RCC_CID1)
+ return -EACCES;
+ return 0;
+ }
+
+ /* Pass-list with semaphore mode */
+ if (!(cidcfgr & RCC_CIDCFGR_SEMWLC1_EN))
+ return -EACCES;
+
+ semcr = readl(base + RCC_SEMCR(index));
+
+ cid = FIELD_GET(RCC_SEMCR_SEMCID_MASK, semcr);
+ if (cid != RCC_CID1)
+ return -EACCES;
+
+ return 0;
+}
+
+static int stm32mp21_check_security(struct device_node *np, void __iomem *base,
+ const struct clock_config *cfg)
+{
+ int ret = 0;
+
+ if (cfg->sec_id != SECF_NONE) {
+ struct stm32_firewall firewall;
+ u32 index = (u32)cfg->sec_id;
+
+ if (index & SEC_RIFSC_FLAG) {
+ ret = stm32_firewall_get_firewall(np, &firewall, 1);
+ if (ret)
+ return ret;
+ ret = stm32_firewall_grant_access_by_id(&firewall, index & ~SEC_RIFSC_FLAG);
+ } else {
+ ret = stm32_rcc_get_access(base, cfg->sec_id & ~SEC_RIFSC_FLAG);
+ }
+ }
+
+ return ret;
+}
+
+static const struct clock_config stm32mp21_clock_cfg[] = {
+ STM32_GATE_CFG(CK_BUS_ETH1, ck_icn_p_eth1, SEC_RIFSC(60)),
+ STM32_GATE_CFG(CK_BUS_ETH2, ck_icn_p_eth2, SEC_RIFSC(61)),
+ STM32_GATE_CFG(CK_BUS_ADC1, ck_icn_p_adc1, SEC_RIFSC(58)),
+ STM32_GATE_CFG(CK_BUS_ADC2, ck_icn_p_adc2, SEC_RIFSC(59)),
+ STM32_GATE_CFG(CK_BUS_CRC, ck_icn_p_crc, SEC_RIFSC(109)),
+ STM32_GATE_CFG(CK_BUS_MDF1, ck_icn_p_mdf1, SEC_RIFSC(54)),
+ STM32_GATE_CFG(CK_BUS_HASH1, ck_icn_p_hash1, SEC_RIFSC(96)),
+ STM32_GATE_CFG(CK_BUS_HASH2, ck_icn_p_hash2, SEC_RIFSC(97)),
+ STM32_GATE_CFG(CK_BUS_RNG1, ck_icn_p_rng1, SEC_RIFSC(92)),
+ STM32_GATE_CFG(CK_BUS_RNG2, ck_icn_p_rng2, SEC_RIFSC(93)),
+ STM32_GATE_CFG(CK_BUS_CRYP1, ck_icn_p_cryp1, SEC_RIFSC(98)),
+ STM32_GATE_CFG(CK_BUS_CRYP2, ck_icn_p_cryp2, SEC_RIFSC(99)),
+ STM32_GATE_CFG(CK_BUS_SAES, ck_icn_p_saes, SEC_RIFSC(95)),
+ STM32_GATE_CFG(CK_BUS_PKA, ck_icn_p_pka, SEC_RIFSC(94)),
+ STM32_GATE_CFG(CK_BUS_LPUART1, ck_icn_p_lpuart1, SEC_RIFSC(40)),
+ STM32_GATE_CFG(CK_BUS_LPTIM3, ck_icn_p_lptim3, SEC_RIFSC(19)),
+ STM32_GATE_CFG(CK_BUS_LPTIM4, ck_icn_p_lptim4, SEC_RIFSC(20)),
+ STM32_GATE_CFG(CK_BUS_LPTIM5, ck_icn_p_lptim5, SEC_RIFSC(21)),
+ STM32_GATE_CFG(CK_BUS_SDMMC1, ck_icn_m_sdmmc1, SEC_RIFSC(76)),
+ STM32_GATE_CFG(CK_BUS_SDMMC2, ck_icn_m_sdmmc2, SEC_RIFSC(77)),
+ STM32_GATE_CFG(CK_BUS_SDMMC3, ck_icn_m_sdmmc3, SEC_RIFSC(78)),
+ STM32_GATE_CFG(CK_BUS_USBHOHCI, ck_icn_m_usbhohci, SEC_RIFSC(63)),
+ STM32_GATE_CFG(CK_BUS_USBHEHCI, ck_icn_m_usbhehci, SEC_RIFSC(63)),
+ STM32_GATE_CFG(CK_BUS_OTG, ck_icn_m_otg, SEC_RIFSC(66)),
+ STM32_GATE_CFG(CK_BUS_TIM2, ck_icn_p_tim2, SEC_RIFSC(1)),
+ STM32_GATE_CFG(CK_BUS_TIM3, ck_icn_p_tim3, SEC_RIFSC(2)),
+ STM32_GATE_CFG(CK_BUS_TIM4, ck_icn_p_tim4, SEC_RIFSC(3)),
+ STM32_GATE_CFG(CK_BUS_TIM5, ck_icn_p_tim5, SEC_RIFSC(4)),
+ STM32_GATE_CFG(CK_BUS_TIM6, ck_icn_p_tim6, SEC_RIFSC(5)),
+ STM32_GATE_CFG(CK_BUS_TIM7, ck_icn_p_tim7, SEC_RIFSC(6)),
+ STM32_GATE_CFG(CK_BUS_TIM10, ck_icn_p_tim10, SEC_RIFSC(8)),
+ STM32_GATE_CFG(CK_BUS_TIM11, ck_icn_p_tim11, SEC_RIFSC(9)),
+ STM32_GATE_CFG(CK_BUS_TIM12, ck_icn_p_tim12, SEC_RIFSC(10)),
+ STM32_GATE_CFG(CK_BUS_TIM13, ck_icn_p_tim13, SEC_RIFSC(11)),
+ STM32_GATE_CFG(CK_BUS_TIM14, ck_icn_p_tim14, SEC_RIFSC(12)),
+ STM32_GATE_CFG(CK_BUS_LPTIM1, ck_icn_p_lptim1, SEC_RIFSC(17)),
+ STM32_GATE_CFG(CK_BUS_LPTIM2, ck_icn_p_lptim2, SEC_RIFSC(18)),
+ STM32_GATE_CFG(CK_BUS_SPI2, ck_icn_p_spi2, SEC_RIFSC(23)),
+ STM32_GATE_CFG(CK_BUS_SPI3, ck_icn_p_spi3, SEC_RIFSC(24)),
+ STM32_GATE_CFG(CK_BUS_SPDIFRX, ck_icn_p_spdifrx, SEC_RIFSC(30)),
+ STM32_GATE_CFG(CK_BUS_USART2, ck_icn_p_usart2, SEC_RIFSC(32)),
+ STM32_GATE_CFG(CK_BUS_USART3, ck_icn_p_usart3, SEC_RIFSC(33)),
+ STM32_GATE_CFG(CK_BUS_UART4, ck_icn_p_uart4, SEC_RIFSC(34)),
+ STM32_GATE_CFG(CK_BUS_UART5, ck_icn_p_uart5, SEC_RIFSC(35)),
+ STM32_GATE_CFG(CK_BUS_I2C1, ck_icn_p_i2c1, SEC_RIFSC(41)),
+ STM32_GATE_CFG(CK_BUS_I2C2, ck_icn_p_i2c2, SEC_RIFSC(42)),
+ STM32_GATE_CFG(CK_BUS_I2C3, ck_icn_p_i2c3, SEC_RIFSC(43)),
+ STM32_GATE_CFG(CK_BUS_I3C1, ck_icn_p_i3c1, SEC_RIFSC(114)),
+ STM32_GATE_CFG(CK_BUS_I3C2, ck_icn_p_i3c2, SEC_RIFSC(115)),
+ STM32_GATE_CFG(CK_BUS_I3C3, ck_icn_p_i3c3, SEC_RIFSC(116)),
+ STM32_GATE_CFG(CK_BUS_TIM1, ck_icn_p_tim1, SEC_RIFSC(0)),
+ STM32_GATE_CFG(CK_BUS_TIM8, ck_icn_p_tim8, SEC_RIFSC(7)),
+ STM32_GATE_CFG(CK_BUS_TIM15, ck_icn_p_tim15, SEC_RIFSC(13)),
+ STM32_GATE_CFG(CK_BUS_TIM16, ck_icn_p_tim16, SEC_RIFSC(14)),
+ STM32_GATE_CFG(CK_BUS_TIM17, ck_icn_p_tim17, SEC_RIFSC(15)),
+ STM32_GATE_CFG(CK_BUS_SAI1, ck_icn_p_sai1, SEC_RIFSC(49)),
+ STM32_GATE_CFG(CK_BUS_SAI2, ck_icn_p_sai2, SEC_RIFSC(50)),
+ STM32_GATE_CFG(CK_BUS_SAI3, ck_icn_p_sai3, SEC_RIFSC(51)),
+ STM32_GATE_CFG(CK_BUS_SAI4, ck_icn_p_sai4, SEC_RIFSC(52)),
+ STM32_GATE_CFG(CK_BUS_USART1, ck_icn_p_usart1, SEC_RIFSC(31)),
+ STM32_GATE_CFG(CK_BUS_USART6, ck_icn_p_usart6, SEC_RIFSC(36)),
+ STM32_GATE_CFG(CK_BUS_UART7, ck_icn_p_uart7, SEC_RIFSC(37)),
+ STM32_GATE_CFG(CK_BUS_FDCAN, ck_icn_p_fdcan, SEC_RIFSC(56)),
+ STM32_GATE_CFG(CK_BUS_SPI1, ck_icn_p_spi1, SEC_RIFSC(22)),
+ STM32_GATE_CFG(CK_BUS_SPI4, ck_icn_p_spi4, SEC_RIFSC(25)),
+ STM32_GATE_CFG(CK_BUS_SPI5, ck_icn_p_spi5, SEC_RIFSC(26)),
+ STM32_GATE_CFG(CK_BUS_SPI6, ck_icn_p_spi6, SEC_RIFSC(27)),
+ STM32_GATE_CFG(CK_BUS_IWDG1, ck_icn_p_iwdg1, SEC_RIFSC(100)),
+ STM32_GATE_CFG(CK_BUS_IWDG2, ck_icn_p_iwdg2, SEC_RIFSC(101)),
+ STM32_GATE_CFG(CK_BUS_IWDG3, ck_icn_p_iwdg3, SEC_RIFSC(102)),
+ STM32_GATE_CFG(CK_BUS_IWDG4, ck_icn_p_iwdg4, SEC_RIFSC(103)),
+ STM32_GATE_CFG(CK_BUS_WWDG1, ck_icn_p_wwdg1, SEC_RIFSC(104)),
+ STM32_GATE_CFG(CK_BUS_VREF, ck_icn_p_vref, SEC_RIFSC(106)),
+ STM32_GATE_CFG(CK_BUS_SERC, ck_icn_p_serc, SEC_RIFSC(110)),
+ STM32_GATE_CFG(CK_BUS_HDP, ck_icn_p_hdp, SEC_RIFSC(57)),
+ STM32_GATE_CFG(CK_BUS_LTDC, ck_icn_p_ltdc, SEC_RIFSC(80)),
+ STM32_GATE_CFG(CK_BUS_CSI, ck_icn_p_csi, SEC_RIFSC(86)),
+ STM32_GATE_CFG(CK_BUS_DCMIPP, ck_icn_p_dcmipp, SEC_RIFSC(87)),
+ STM32_GATE_CFG(CK_BUS_DCMIPSSI, ck_icn_p_dcmipssi, SEC_RIFSC(88)),
+ STM32_GATE_CFG(CK_BUS_DDRPERFM, ck_icn_p_ddrperfm, SEC_RIFSC(67)),
+ STM32_GATE_CFG(CK_KER_TIM2, ck_ker_tim2, SEC_RIFSC(1)),
+ STM32_GATE_CFG(CK_KER_TIM3, ck_ker_tim3, SEC_RIFSC(2)),
+ STM32_GATE_CFG(CK_KER_TIM4, ck_ker_tim4, SEC_RIFSC(3)),
+ STM32_GATE_CFG(CK_KER_TIM5, ck_ker_tim5, SEC_RIFSC(4)),
+ STM32_GATE_CFG(CK_KER_TIM6, ck_ker_tim6, SEC_RIFSC(5)),
+ STM32_GATE_CFG(CK_KER_TIM7, ck_ker_tim7, SEC_RIFSC(6)),
+ STM32_GATE_CFG(CK_KER_TIM10, ck_ker_tim10, SEC_RIFSC(8)),
+ STM32_GATE_CFG(CK_KER_TIM11, ck_ker_tim11, SEC_RIFSC(9)),
+ STM32_GATE_CFG(CK_KER_TIM12, ck_ker_tim12, SEC_RIFSC(10)),
+ STM32_GATE_CFG(CK_KER_TIM13, ck_ker_tim13, SEC_RIFSC(11)),
+ STM32_GATE_CFG(CK_KER_TIM14, ck_ker_tim14, SEC_RIFSC(12)),
+ STM32_GATE_CFG(CK_KER_TIM1, ck_ker_tim1, SEC_RIFSC(0)),
+ STM32_GATE_CFG(CK_KER_TIM8, ck_ker_tim8, SEC_RIFSC(7)),
+ STM32_GATE_CFG(CK_KER_TIM15, ck_ker_tim15, SEC_RIFSC(13)),
+ STM32_GATE_CFG(CK_KER_TIM16, ck_ker_tim16, SEC_RIFSC(14)),
+ STM32_GATE_CFG(CK_KER_TIM17, ck_ker_tim17, SEC_RIFSC(15)),
+ STM32_GATE_CFG(CK_KER_LPTIM1, ck_ker_lptim1, SEC_RIFSC(17)),
+ STM32_GATE_CFG(CK_KER_LPTIM2, ck_ker_lptim2, SEC_RIFSC(18)),
+ STM32_GATE_CFG(CK_KER_USART2, ck_ker_usart2, SEC_RIFSC(32)),
+ STM32_GATE_CFG(CK_KER_UART4, ck_ker_uart4, SEC_RIFSC(34)),
+ STM32_GATE_CFG(CK_KER_USART3, ck_ker_usart3, SEC_RIFSC(33)),
+ STM32_GATE_CFG(CK_KER_UART5, ck_ker_uart5, SEC_RIFSC(35)),
+ STM32_GATE_CFG(CK_KER_SPI2, ck_ker_spi2, SEC_RIFSC(23)),
+ STM32_GATE_CFG(CK_KER_SPI3, ck_ker_spi3, SEC_RIFSC(24)),
+ STM32_GATE_CFG(CK_KER_SPDIFRX, ck_ker_spdifrx, SEC_RIFSC(30)),
+ STM32_GATE_CFG(CK_KER_I2C1, ck_ker_i2c1, SEC_RIFSC(41)),
+ STM32_GATE_CFG(CK_KER_I2C2, ck_ker_i2c2, SEC_RIFSC(42)),
+ STM32_GATE_CFG(CK_KER_I3C1, ck_ker_i3c1, SEC_RIFSC(114)),
+ STM32_GATE_CFG(CK_KER_I3C2, ck_ker_i3c2, SEC_RIFSC(115)),
+ STM32_GATE_CFG(CK_KER_I2C3, ck_ker_i2c3, SEC_RIFSC(43)),
+ STM32_GATE_CFG(CK_KER_I3C3, ck_ker_i3c3, SEC_RIFSC(116)),
+ STM32_GATE_CFG(CK_KER_SPI1, ck_ker_spi1, SEC_RIFSC(22)),
+ STM32_GATE_CFG(CK_KER_SPI4, ck_ker_spi4, SEC_RIFSC(25)),
+ STM32_GATE_CFG(CK_KER_SPI5, ck_ker_spi5, SEC_RIFSC(26)),
+ STM32_GATE_CFG(CK_KER_SPI6, ck_ker_spi6, SEC_RIFSC(27)),
+ STM32_GATE_CFG(CK_KER_USART1, ck_ker_usart1, SEC_RIFSC(31)),
+ STM32_GATE_CFG(CK_KER_USART6, ck_ker_usart6, SEC_RIFSC(36)),
+ STM32_GATE_CFG(CK_KER_UART7, ck_ker_uart7, SEC_RIFSC(37)),
+ STM32_GATE_CFG(CK_KER_MDF1, ck_ker_mdf1, SEC_RIFSC(54)),
+ STM32_GATE_CFG(CK_KER_SAI1, ck_ker_sai1, SEC_RIFSC(49)),
+ STM32_GATE_CFG(CK_KER_SAI2, ck_ker_sai2, SEC_RIFSC(50)),
+ STM32_GATE_CFG(CK_KER_SAI3, ck_ker_sai3, SEC_RIFSC(51)),
+ STM32_GATE_CFG(CK_KER_SAI4, ck_ker_sai4, SEC_RIFSC(52)),
+ STM32_GATE_CFG(CK_KER_FDCAN, ck_ker_fdcan, SEC_RIFSC(56)),
+ STM32_GATE_CFG(CK_KER_CSI, ck_ker_csi, SEC_RIFSC(86)),
+ STM32_GATE_CFG(CK_KER_CSITXESC, ck_ker_csitxesc, SEC_RIFSC(86)),
+ STM32_GATE_CFG(CK_KER_CSIPHY, ck_ker_csiphy, SEC_RIFSC(86)),
+ STM32_GATE_CFG(CK_KER_LPUART1, ck_ker_lpuart1, SEC_RIFSC(40)),
+ STM32_GATE_CFG(CK_KER_LPTIM3, ck_ker_lptim3, SEC_RIFSC(19)),
+ STM32_GATE_CFG(CK_KER_LPTIM4, ck_ker_lptim4, SEC_RIFSC(20)),
+ STM32_GATE_CFG(CK_KER_LPTIM5, ck_ker_lptim5, SEC_RIFSC(21)),
+ STM32_GATE_CFG(CK_KER_SDMMC1, ck_ker_sdmmc1, SEC_RIFSC(76)),
+ STM32_GATE_CFG(CK_KER_SDMMC2, ck_ker_sdmmc2, SEC_RIFSC(77)),
+ STM32_GATE_CFG(CK_KER_SDMMC3, ck_ker_sdmmc3, SEC_RIFSC(78)),
+ STM32_GATE_CFG(CK_KER_ETH1, ck_ker_eth1, SEC_RIFSC(60)),
+ STM32_GATE_CFG(CK_ETH1_STP, ck_ker_eth1stp, SEC_RIFSC(60)),
+ STM32_GATE_CFG(CK_KER_ETH2, ck_ker_eth2, SEC_RIFSC(61)),
+ STM32_GATE_CFG(CK_ETH2_STP, ck_ker_eth2stp, SEC_RIFSC(61)),
+ STM32_GATE_CFG(CK_KER_ETH1PTP, ck_ker_eth1ptp, SEC_RIFSC(60)),
+ STM32_GATE_CFG(CK_KER_ETH2PTP, ck_ker_eth2ptp, SEC_RIFSC(61)),
+ STM32_GATE_CFG(CK_ETH1_MAC, ck_ker_eth1mac, SEC_RIFSC(60)),
+ STM32_GATE_CFG(CK_ETH1_TX, ck_ker_eth1tx, SEC_RIFSC(60)),
+ STM32_GATE_CFG(CK_ETH1_RX, ck_ker_eth1rx, SEC_RIFSC(60)),
+ STM32_GATE_CFG(CK_ETH2_MAC, ck_ker_eth2mac, SEC_RIFSC(61)),
+ STM32_GATE_CFG(CK_ETH2_TX, ck_ker_eth2tx, SEC_RIFSC(61)),
+ STM32_GATE_CFG(CK_ETH2_RX, ck_ker_eth2rx, SEC_RIFSC(61)),
+ STM32_COMPOSITE_CFG(CK_MCO1, ck_mco1, MP21_RIF_RCC_MCO1),
+ STM32_COMPOSITE_CFG(CK_MCO2, ck_mco2, MP21_RIF_RCC_MCO2),
+ STM32_COMPOSITE_CFG(CK_KER_ADC1, ck_ker_adc1, SEC_RIFSC(58)),
+ STM32_COMPOSITE_CFG(CK_KER_ADC2, ck_ker_adc2, SEC_RIFSC(59)),
+ STM32_COMPOSITE_CFG(CK_KER_USB2PHY1, ck_ker_usb2phy1, SEC_RIFSC(63)),
+ STM32_COMPOSITE_CFG(CK_KER_USB2PHY2EN, ck_ker_usb2phy2_en, SEC_RIFSC(66)),
+ STM32_COMPOSITE_CFG(CK_KER_DTS, ck_ker_dts, SEC_RIFSC(107)),
+ STM32_GATE_CFG(CK_KER_LTDC, ck_ker_ltdc, SEC_RIFSC(80)),
+};
+
+#define RESET_MP21(id, _offset, _bit_idx, _set_clr) \
+ [id] = &(struct stm32_reset_cfg){ \
+ .offset = (_offset), \
+ .bit_idx = (_bit_idx), \
+ .set_clr = (_set_clr), \
+ }
+
+static const struct stm32_reset_cfg *stm32mp21_reset_cfg[] = {
+ RESET_MP21(TIM1_R, RCC_TIM1CFGR, 0, 0),
+ RESET_MP21(TIM2_R, RCC_TIM2CFGR, 0, 0),
+ RESET_MP21(TIM3_R, RCC_TIM3CFGR, 0, 0),
+ RESET_MP21(TIM4_R, RCC_TIM4CFGR, 0, 0),
+ RESET_MP21(TIM5_R, RCC_TIM5CFGR, 0, 0),
+ RESET_MP21(TIM6_R, RCC_TIM6CFGR, 0, 0),
+ RESET_MP21(TIM7_R, RCC_TIM7CFGR, 0, 0),
+ RESET_MP21(TIM8_R, RCC_TIM8CFGR, 0, 0),
+ RESET_MP21(TIM10_R, RCC_TIM10CFGR, 0, 0),
+ RESET_MP21(TIM11_R, RCC_TIM11CFGR, 0, 0),
+ RESET_MP21(TIM12_R, RCC_TIM12CFGR, 0, 0),
+ RESET_MP21(TIM13_R, RCC_TIM13CFGR, 0, 0),
+ RESET_MP21(TIM14_R, RCC_TIM14CFGR, 0, 0),
+ RESET_MP21(TIM15_R, RCC_TIM15CFGR, 0, 0),
+ RESET_MP21(TIM16_R, RCC_TIM16CFGR, 0, 0),
+ RESET_MP21(TIM17_R, RCC_TIM17CFGR, 0, 0),
+ RESET_MP21(LPTIM1_R, RCC_LPTIM1CFGR, 0, 0),
+ RESET_MP21(LPTIM2_R, RCC_LPTIM2CFGR, 0, 0),
+ RESET_MP21(LPTIM3_R, RCC_LPTIM3CFGR, 0, 0),
+ RESET_MP21(LPTIM4_R, RCC_LPTIM4CFGR, 0, 0),
+ RESET_MP21(LPTIM5_R, RCC_LPTIM5CFGR, 0, 0),
+ RESET_MP21(SPI1_R, RCC_SPI1CFGR, 0, 0),
+ RESET_MP21(SPI2_R, RCC_SPI2CFGR, 0, 0),
+ RESET_MP21(SPI3_R, RCC_SPI3CFGR, 0, 0),
+ RESET_MP21(SPI4_R, RCC_SPI4CFGR, 0, 0),
+ RESET_MP21(SPI5_R, RCC_SPI5CFGR, 0, 0),
+ RESET_MP21(SPI6_R, RCC_SPI6CFGR, 0, 0),
+ RESET_MP21(SPDIFRX_R, RCC_SPDIFRXCFGR, 0, 0),
+ RESET_MP21(USART1_R, RCC_USART1CFGR, 0, 0),
+ RESET_MP21(USART2_R, RCC_USART2CFGR, 0, 0),
+ RESET_MP21(USART3_R, RCC_USART3CFGR, 0, 0),
+ RESET_MP21(UART4_R, RCC_UART4CFGR, 0, 0),
+ RESET_MP21(UART5_R, RCC_UART5CFGR, 0, 0),
+ RESET_MP21(USART6_R, RCC_USART6CFGR, 0, 0),
+ RESET_MP21(UART7_R, RCC_UART7CFGR, 0, 0),
+ RESET_MP21(LPUART1_R, RCC_LPUART1CFGR, 0, 0),
+ RESET_MP21(I2C1_R, RCC_I2C1CFGR, 0, 0),
+ RESET_MP21(I2C2_R, RCC_I2C2CFGR, 0, 0),
+ RESET_MP21(I2C3_R, RCC_I2C3CFGR, 0, 0),
+ RESET_MP21(SAI1_R, RCC_SAI1CFGR, 0, 0),
+ RESET_MP21(SAI2_R, RCC_SAI2CFGR, 0, 0),
+ RESET_MP21(SAI3_R, RCC_SAI3CFGR, 0, 0),
+ RESET_MP21(SAI4_R, RCC_SAI4CFGR, 0, 0),
+ RESET_MP21(MDF1_R, RCC_MDF1CFGR, 0, 0),
+ RESET_MP21(FDCAN_R, RCC_FDCANCFGR, 0, 0),
+ RESET_MP21(HDP_R, RCC_HDPCFGR, 0, 0),
+ RESET_MP21(ADC1_R, RCC_ADC1CFGR, 0, 0),
+ RESET_MP21(ADC2_R, RCC_ADC2CFGR, 0, 0),
+ RESET_MP21(ETH1_R, RCC_ETH1CFGR, 0, 0),
+ RESET_MP21(ETH2_R, RCC_ETH2CFGR, 0, 0),
+ RESET_MP21(OTG_R, RCC_OTGCFGR, 0, 0),
+ RESET_MP21(USBH_R, RCC_USBHCFGR, 0, 0),
+ RESET_MP21(USB2PHY1_R, RCC_USB2PHY1CFGR, 0, 0),
+ RESET_MP21(USB2PHY2_R, RCC_USB2PHY2CFGR, 0, 0),
+ RESET_MP21(SDMMC1_R, RCC_SDMMC1CFGR, 0, 0),
+ RESET_MP21(SDMMC1DLL_R, RCC_SDMMC1CFGR, 16, 0),
+ RESET_MP21(SDMMC2_R, RCC_SDMMC2CFGR, 0, 0),
+ RESET_MP21(SDMMC2DLL_R, RCC_SDMMC2CFGR, 16, 0),
+ RESET_MP21(SDMMC3_R, RCC_SDMMC3CFGR, 0, 0),
+ RESET_MP21(SDMMC3DLL_R, RCC_SDMMC3CFGR, 16, 0),
+ RESET_MP21(LTDC_R, RCC_LTDCCFGR, 0, 0),
+ RESET_MP21(CSI_R, RCC_CSICFGR, 0, 0),
+ RESET_MP21(DCMIPP_R, RCC_DCMIPPCFGR, 0, 0),
+ RESET_MP21(DCMIPSSI_R, RCC_DCMIPSSICFGR, 0, 0),
+ RESET_MP21(WWDG1_R, RCC_WWDG1CFGR, 0, 0),
+ RESET_MP21(VREF_R, RCC_VREFCFGR, 0, 0),
+ RESET_MP21(DTS_R, RCC_DTSCFGR, 0, 0),
+ RESET_MP21(CRC_R, RCC_CRCCFGR, 0, 0),
+ RESET_MP21(SERC_R, RCC_SERCCFGR, 0, 0),
+ RESET_MP21(I3C1_R, RCC_I3C1CFGR, 0, 0),
+ RESET_MP21(I3C2_R, RCC_I3C2CFGR, 0, 0),
+ RESET_MP21(IWDG2_KER_R, RCC_IWDGC1CFGSETR, 18, 1),
+ RESET_MP21(IWDG4_KER_R, RCC_IWDGC2CFGSETR, 18, 1),
+ RESET_MP21(RNG1_R, RCC_RNG1CFGR, 0, 0),
+ RESET_MP21(RNG2_R, RCC_RNG2CFGR, 0, 0),
+ RESET_MP21(PKA_R, RCC_PKACFGR, 0, 0),
+ RESET_MP21(SAES_R, RCC_SAESCFGR, 0, 0),
+ RESET_MP21(HASH1_R, RCC_HASH1CFGR, 0, 0),
+ RESET_MP21(HASH2_R, RCC_HASH2CFGR, 0, 0),
+ RESET_MP21(CRYP1_R, RCC_CRYP1CFGR, 0, 0),
+ RESET_MP21(CRYP2_R, RCC_CRYP2CFGR, 0, 0),
+};
+
+static u16 stm32mp21_cpt_gate[GATE_NB];
+
+static struct clk_stm32_clock_data stm32mp21_clock_data = {
+ .gate_cpt = stm32mp21_cpt_gate,
+ .gates = stm32mp21_gates,
+ .muxes = stm32mp21_muxes,
+};
+
+static struct clk_stm32_reset_data stm32mp21_reset_data = {
+ .reset_lines = stm32mp21_reset_cfg,
+ .nr_lines = ARRAY_SIZE(stm32mp21_reset_cfg),
+};
+
+static const struct stm32_rcc_match_data stm32mp21_data = {
+ .tab_clocks = stm32mp21_clock_cfg,
+ .num_clocks = ARRAY_SIZE(stm32mp21_clock_cfg),
+ .maxbinding = STM32MP21_LAST_CLK,
+ .clock_data = &stm32mp21_clock_data,
+ .reset_data = &stm32mp21_reset_data,
+ .check_security = &stm32mp21_check_security,
+};
+
+static const struct of_device_id stm32mp21_match_data[] = {
+ { .compatible = "st,stm32mp21-rcc", .data = &stm32mp21_data, },
+ { }
+};
+MODULE_DEVICE_TABLE(of, stm32mp21_match_data);
+
+static int stm32mp21_rcc_clocks_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ void __iomem *base;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (WARN_ON(IS_ERR(base)))
+ return PTR_ERR(base);
+
+ return stm32_rcc_init(dev, stm32mp21_match_data, base);
+}
+
+static struct platform_driver stm32mp21_rcc_clocks_driver = {
+ .driver = {
+ .name = "stm32mp21_rcc",
+ .of_match_table = stm32mp21_match_data,
+ },
+ .probe = stm32mp21_rcc_clocks_probe,
+};
+
+static int __init stm32mp21_clocks_init(void)
+{
+ return platform_driver_register(&stm32mp21_rcc_clocks_driver);
+}
+
+core_initcall(stm32mp21_clocks_init);
+
diff --git a/drivers/clk/stm32/stm32mp21_rcc.h b/drivers/clk/stm32/stm32mp21_rcc.h
new file mode 100644
index 000000000000..df3ea921ffba
--- /dev/null
+++ b/drivers/clk/stm32/stm32mp21_rcc.h
@@ -0,0 +1,651 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) STMicroelectronics 2025 - All Rights Reserved
+ * Author: Gabriel Fernandez <gabriel.fernandez@foss.st.com> for STMicroelectronics.
+ */
+
+#ifndef STM32MP21_RCC_H
+#define STM32MP21_RCC_H
+
+#define RCC_SECCFGR0 0x0
+#define RCC_SECCFGR1 0x4
+#define RCC_SECCFGR2 0x8
+#define RCC_SECCFGR3 0xC
+#define RCC_PRIVCFGR0 0x10
+#define RCC_PRIVCFGR1 0x14
+#define RCC_PRIVCFGR2 0x18
+#define RCC_PRIVCFGR3 0x1C
+#define RCC_RCFGLOCKR0 0x20
+#define RCC_RCFGLOCKR1 0x24
+#define RCC_RCFGLOCKR2 0x28
+#define RCC_RCFGLOCKR3 0x2C
+#define RCC_R0CIDCFGR 0x30
+#define RCC_R0SEMCR 0x34
+#define RCC_R1CIDCFGR 0x38
+#define RCC_R1SEMCR 0x3C
+#define RCC_R2CIDCFGR 0x40
+#define RCC_R2SEMCR 0x44
+#define RCC_R3CIDCFGR 0x48
+#define RCC_R3SEMCR 0x4C
+#define RCC_R4CIDCFGR 0x50
+#define RCC_R4SEMCR 0x54
+#define RCC_R5CIDCFGR 0x58
+#define RCC_R5SEMCR 0x5C
+#define RCC_R6CIDCFGR 0x60
+#define RCC_R6SEMCR 0x64
+#define RCC_R7CIDCFGR 0x68
+#define RCC_R7SEMCR 0x6C
+#define RCC_R8CIDCFGR 0x70
+#define RCC_R8SEMCR 0x74
+#define RCC_R9CIDCFGR 0x78
+#define RCC_R9SEMCR 0x7C
+#define RCC_R10CIDCFGR 0x80
+#define RCC_R10SEMCR 0x84
+#define RCC_R11CIDCFGR 0x88
+#define RCC_R11SEMCR 0x8C
+#define RCC_R12CIDCFGR 0x90
+#define RCC_R12SEMCR 0x94
+#define RCC_R13CIDCFGR 0x98
+#define RCC_R13SEMCR 0x9C
+#define RCC_R14CIDCFGR 0xA0
+#define RCC_R14SEMCR 0xA4
+#define RCC_R15CIDCFGR 0xA8
+#define RCC_R15SEMCR 0xAC
+#define RCC_R16CIDCFGR 0xB0
+#define RCC_R16SEMCR 0xB4
+#define RCC_R17CIDCFGR 0xB8
+#define RCC_R17SEMCR 0xBC
+#define RCC_R18CIDCFGR 0xC0
+#define RCC_R18SEMCR 0xC4
+#define RCC_R19CIDCFGR 0xC8
+#define RCC_R19SEMCR 0xCC
+#define RCC_R20CIDCFGR 0xD0
+#define RCC_R20SEMCR 0xD4
+#define RCC_R21CIDCFGR 0xD8
+#define RCC_R21SEMCR 0xDC
+#define RCC_R22CIDCFGR 0xE0
+#define RCC_R22SEMCR 0xE4
+#define RCC_R23CIDCFGR 0xE8
+#define RCC_R23SEMCR 0xEC
+#define RCC_R24CIDCFGR 0xF0
+#define RCC_R24SEMCR 0xF4
+#define RCC_R25CIDCFGR 0xF8
+#define RCC_R25SEMCR 0xFC
+#define RCC_R26CIDCFGR 0x100
+#define RCC_R26SEMCR 0x104
+#define RCC_R27CIDCFGR 0x108
+#define RCC_R27SEMCR 0x10C
+#define RCC_R28CIDCFGR 0x110
+#define RCC_R28SEMCR 0x114
+#define RCC_R29CIDCFGR 0x118
+#define RCC_R29SEMCR 0x11C
+#define RCC_R30CIDCFGR 0x120
+#define RCC_R30SEMCR 0x124
+#define RCC_R31CIDCFGR 0x128
+#define RCC_R31SEMCR 0x12C
+#define RCC_R32CIDCFGR 0x130
+#define RCC_R32SEMCR 0x134
+#define RCC_R33CIDCFGR 0x138
+#define RCC_R33SEMCR 0x13C
+#define RCC_R34CIDCFGR 0x140
+#define RCC_R34SEMCR 0x144
+#define RCC_R35CIDCFGR 0x148
+#define RCC_R35SEMCR 0x14C
+#define RCC_R36CIDCFGR 0x150
+#define RCC_R36SEMCR 0x154
+#define RCC_R37CIDCFGR 0x158
+#define RCC_R37SEMCR 0x15C
+#define RCC_R38CIDCFGR 0x160
+#define RCC_R38SEMCR 0x164
+#define RCC_R39CIDCFGR 0x168
+#define RCC_R39SEMCR 0x16C
+#define RCC_R40CIDCFGR 0x170
+#define RCC_R40SEMCR 0x174
+#define RCC_R41CIDCFGR 0x178
+#define RCC_R41SEMCR 0x17C
+#define RCC_R42CIDCFGR 0x180
+#define RCC_R42SEMCR 0x184
+#define RCC_R43CIDCFGR 0x188
+#define RCC_R43SEMCR 0x18C
+#define RCC_R44CIDCFGR 0x190
+#define RCC_R44SEMCR 0x194
+#define RCC_R45CIDCFGR 0x198
+#define RCC_R45SEMCR 0x19C
+#define RCC_R46CIDCFGR 0x1A0
+#define RCC_R46SEMCR 0x1A4
+#define RCC_R47CIDCFGR 0x1A8
+#define RCC_R47SEMCR 0x1AC
+#define RCC_R48CIDCFGR 0x1B0
+#define RCC_R48SEMCR 0x1B4
+#define RCC_R49CIDCFGR 0x1B8
+#define RCC_R49SEMCR 0x1BC
+#define RCC_R50CIDCFGR 0x1C0
+#define RCC_R50SEMCR 0x1C4
+#define RCC_R51CIDCFGR 0x1C8
+#define RCC_R51SEMCR 0x1CC
+#define RCC_R52CIDCFGR 0x1D0
+#define RCC_R52SEMCR 0x1D4
+#define RCC_R53CIDCFGR 0x1D8
+#define RCC_R53SEMCR 0x1DC
+#define RCC_R54CIDCFGR 0x1E0
+#define RCC_R54SEMCR 0x1E4
+#define RCC_R55CIDCFGR 0x1E8
+#define RCC_R55SEMCR 0x1EC
+#define RCC_R56CIDCFGR 0x1F0
+#define RCC_R56SEMCR 0x1F4
+#define RCC_R57CIDCFGR 0x1F8
+#define RCC_R57SEMCR 0x1FC
+#define RCC_R58CIDCFGR 0x200
+#define RCC_R58SEMCR 0x204
+#define RCC_R59CIDCFGR 0x208
+#define RCC_R59SEMCR 0x20C
+#define RCC_R60CIDCFGR 0x210
+#define RCC_R60SEMCR 0x214
+#define RCC_R61CIDCFGR 0x218
+#define RCC_R61SEMCR 0x21C
+#define RCC_R62CIDCFGR 0x220
+#define RCC_R62SEMCR 0x224
+#define RCC_R63CIDCFGR 0x228
+#define RCC_R63SEMCR 0x22C
+#define RCC_R64CIDCFGR 0x230
+#define RCC_R64SEMCR 0x234
+#define RCC_R65CIDCFGR 0x238
+#define RCC_R65SEMCR 0x23C
+#define RCC_R66CIDCFGR 0x240
+#define RCC_R66SEMCR 0x244
+#define RCC_R67CIDCFGR 0x248
+#define RCC_R67SEMCR 0x24C
+#define RCC_R68CIDCFGR 0x250
+#define RCC_R68SEMCR 0x254
+#define RCC_R69CIDCFGR 0x258
+#define RCC_R69SEMCR 0x25C
+#define RCC_R70CIDCFGR 0x260
+#define RCC_R70SEMCR 0x264
+#define RCC_R71CIDCFGR 0x268
+#define RCC_R71SEMCR 0x26C
+#define RCC_R73CIDCFGR 0x278
+#define RCC_R73SEMCR 0x27C
+#define RCC_R74CIDCFGR 0x280
+#define RCC_R74SEMCR 0x284
+#define RCC_R75CIDCFGR 0x288
+#define RCC_R75SEMCR 0x28C
+#define RCC_R76CIDCFGR 0x290
+#define RCC_R76SEMCR 0x294
+#define RCC_R77CIDCFGR 0x298
+#define RCC_R77SEMCR 0x29C
+#define RCC_R78CIDCFGR 0x2A0
+#define RCC_R78SEMCR 0x2A4
+#define RCC_R79CIDCFGR 0x2A8
+#define RCC_R79SEMCR 0x2AC
+#define RCC_R83CIDCFGR 0x2C8
+#define RCC_R83SEMCR 0x2CC
+#define RCC_R84CIDCFGR 0x2D0
+#define RCC_R84SEMCR 0x2D4
+#define RCC_R85CIDCFGR 0x2D8
+#define RCC_R85SEMCR 0x2DC
+#define RCC_R86CIDCFGR 0x2E0
+#define RCC_R86SEMCR 0x2E4
+#define RCC_R87CIDCFGR 0x2E8
+#define RCC_R87SEMCR 0x2EC
+#define RCC_R88CIDCFGR 0x2F0
+#define RCC_R88SEMCR 0x2F4
+#define RCC_R90CIDCFGR 0x300
+#define RCC_R90SEMCR 0x304
+#define RCC_R91CIDCFGR 0x308
+#define RCC_R91SEMCR 0x30C
+#define RCC_R92CIDCFGR 0x310
+#define RCC_R92SEMCR 0x314
+#define RCC_R93CIDCFGR 0x318
+#define RCC_R93SEMCR 0x31C
+#define RCC_R94CIDCFGR 0x320
+#define RCC_R94SEMCR 0x324
+#define RCC_R95CIDCFGR 0x328
+#define RCC_R95SEMCR 0x32C
+#define RCC_R96CIDCFGR 0x330
+#define RCC_R96SEMCR 0x334
+#define RCC_R97CIDCFGR 0x338
+#define RCC_R97SEMCR 0x33C
+#define RCC_R98CIDCFGR 0x340
+#define RCC_R98SEMCR 0x344
+#define RCC_R101CIDCFGR 0x358
+#define RCC_R101SEMCR 0x35C
+#define RCC_R102CIDCFGR 0x360
+#define RCC_R102SEMCR 0x364
+#define RCC_R103CIDCFGR 0x368
+#define RCC_R103SEMCR 0x36C
+#define RCC_R104CIDCFGR 0x370
+#define RCC_R104SEMCR 0x374
+#define RCC_R105CIDCFGR 0x378
+#define RCC_R105SEMCR 0x37C
+#define RCC_R106CIDCFGR 0x380
+#define RCC_R106SEMCR 0x384
+#define RCC_R108CIDCFGR 0x390
+#define RCC_R108SEMCR 0x394
+#define RCC_R109CIDCFGR 0x398
+#define RCC_R109SEMCR 0x39C
+#define RCC_R110CIDCFGR 0x3A0
+#define RCC_R110SEMCR 0x3A4
+#define RCC_R111CIDCFGR 0x3A8
+#define RCC_R111SEMCR 0x3AC
+#define RCC_R112CIDCFGR 0x3B0
+#define RCC_R112SEMCR 0x3B4
+#define RCC_R113CIDCFGR 0x3B8
+#define RCC_R113SEMCR 0x3BC
+#define RCC_GRSTCSETR 0x400
+#define RCC_C1RSTCSETR 0x404
+#define RCC_C2RSTCSETR 0x40C
+#define RCC_HWRSTSCLRR 0x410
+#define RCC_C1HWRSTSCLRR 0x414
+#define RCC_C2HWRSTSCLRR 0x418
+#define RCC_C1BOOTRSTSSETR 0x41C
+#define RCC_C1BOOTRSTSCLRR 0x420
+#define RCC_C2BOOTRSTSSETR 0x424
+#define RCC_C2BOOTRSTSCLRR 0x428
+#define RCC_C1SREQSETR 0x42C
+#define RCC_C1SREQCLRR 0x430
+#define RCC_CPUBOOTCR 0x434
+#define RCC_STBYBOOTCR 0x438
+#define RCC_LEGBOOTCR 0x43C
+#define RCC_BDCR 0x440
+#define RCC_RDCR 0x44C
+#define RCC_C1MSRDCR 0x450
+#define RCC_PWRLPDLYCR 0x454
+#define RCC_C1CIESETR 0x458
+#define RCC_C1CIFCLRR 0x45C
+#define RCC_C2CIESETR 0x460
+#define RCC_C2CIFCLRR 0x464
+#define RCC_IWDGC1FZSETR 0x468
+#define RCC_IWDGC1FZCLRR 0x46C
+#define RCC_IWDGC1CFGSETR 0x470
+#define RCC_IWDGC1CFGCLRR 0x474
+#define RCC_IWDGC2FZSETR 0x478
+#define RCC_IWDGC2FZCLRR 0x47C
+#define RCC_IWDGC2CFGSETR 0x480
+#define RCC_IWDGC2CFGCLRR 0x484
+#define RCC_MCO1CFGR 0x488
+#define RCC_MCO2CFGR 0x48C
+#define RCC_OCENSETR 0x490
+#define RCC_OCENCLRR 0x494
+#define RCC_OCRDYR 0x498
+#define RCC_HSICFGR 0x49C
+#define RCC_MSICFGR 0x4A0
+#define RCC_LSICR 0x4A4
+#define RCC_RTCDIVR 0x4A8
+#define RCC_APB1DIVR 0x4AC
+#define RCC_APB2DIVR 0x4B0
+#define RCC_APB3DIVR 0x4B4
+#define RCC_APB4DIVR 0x4B8
+#define RCC_APB5DIVR 0x4BC
+#define RCC_APBDBGDIVR 0x4C0
+#define RCC_TIMG1PRER 0x4C8
+#define RCC_TIMG2PRER 0x4CC
+#define RCC_LSMCUDIVR 0x4D0
+#define RCC_DDRCPCFGR 0x4D4
+#define RCC_DDRCAPBCFGR 0x4D8
+#define RCC_DDRPHYCAPBCFGR 0x4DC
+#define RCC_DDRPHYCCFGR 0x4E0
+#define RCC_DDRCFGR 0x4E4
+#define RCC_DDRITFCFGR 0x4E8
+#define RCC_SYSRAMCFGR 0x4F0
+#define RCC_SRAM1CFGR 0x4F8
+#define RCC_RETRAMCFGR 0x500
+#define RCC_BKPSRAMCFGR 0x504
+#define RCC_OSPI1CFGR 0x514
+#define RCC_FMCCFGR 0x51C
+#define RCC_DBGCFGR 0x520
+#define RCC_STMCFGR 0x524
+#define RCC_ETRCFGR 0x528
+#define RCC_GPIOACFGR 0x52C
+#define RCC_GPIOBCFGR 0x530
+#define RCC_GPIOCCFGR 0x534
+#define RCC_GPIODCFGR 0x538
+#define RCC_GPIOECFGR 0x53C
+#define RCC_GPIOFCFGR 0x540
+#define RCC_GPIOGCFGR 0x544
+#define RCC_GPIOHCFGR 0x548
+#define RCC_GPIOICFGR 0x54C
+#define RCC_GPIOZCFGR 0x558
+#define RCC_HPDMA1CFGR 0x55C
+#define RCC_HPDMA2CFGR 0x560
+#define RCC_HPDMA3CFGR 0x564
+#define RCC_IPCC1CFGR 0x570
+#define RCC_RTCCFGR 0x578
+#define RCC_SYSCPU1CFGR 0x580
+#define RCC_BSECCFGR 0x584
+#define RCC_PLL2CFGR1 0x590
+#define RCC_PLL2CFGR2 0x594
+#define RCC_PLL2CFGR3 0x598
+#define RCC_PLL2CFGR4 0x59C
+#define RCC_PLL2CFGR5 0x5A0
+#define RCC_PLL2CFGR6 0x5A8
+#define RCC_PLL2CFGR7 0x5AC
+#define RCC_HSIFMONCR 0x5E0
+#define RCC_HSIFVALR 0x5E4
+#define RCC_MSIFMONCR 0x5E8
+#define RCC_MSIFVALR 0x5EC
+#define RCC_TIM1CFGR 0x700
+#define RCC_TIM2CFGR 0x704
+#define RCC_TIM3CFGR 0x708
+#define RCC_TIM4CFGR 0x70C
+#define RCC_TIM5CFGR 0x710
+#define RCC_TIM6CFGR 0x714
+#define RCC_TIM7CFGR 0x718
+#define RCC_TIM8CFGR 0x71C
+#define RCC_TIM10CFGR 0x720
+#define RCC_TIM11CFGR 0x724
+#define RCC_TIM12CFGR 0x728
+#define RCC_TIM13CFGR 0x72C
+#define RCC_TIM14CFGR 0x730
+#define RCC_TIM15CFGR 0x734
+#define RCC_TIM16CFGR 0x738
+#define RCC_TIM17CFGR 0x73C
+#define RCC_LPTIM1CFGR 0x744
+#define RCC_LPTIM2CFGR 0x748
+#define RCC_LPTIM3CFGR 0x74C
+#define RCC_LPTIM4CFGR 0x750
+#define RCC_LPTIM5CFGR 0x754
+#define RCC_SPI1CFGR 0x758
+#define RCC_SPI2CFGR 0x75C
+#define RCC_SPI3CFGR 0x760
+#define RCC_SPI4CFGR 0x764
+#define RCC_SPI5CFGR 0x768
+#define RCC_SPI6CFGR 0x76C
+#define RCC_SPDIFRXCFGR 0x778
+#define RCC_USART1CFGR 0x77C
+#define RCC_USART2CFGR 0x780
+#define RCC_USART3CFGR 0x784
+#define RCC_UART4CFGR 0x788
+#define RCC_UART5CFGR 0x78C
+#define RCC_USART6CFGR 0x790
+#define RCC_UART7CFGR 0x794
+#define RCC_LPUART1CFGR 0x7A0
+#define RCC_I2C1CFGR 0x7A4
+#define RCC_I2C2CFGR 0x7A8
+#define RCC_I2C3CFGR 0x7AC
+#define RCC_SAI1CFGR 0x7C4
+#define RCC_SAI2CFGR 0x7C8
+#define RCC_SAI3CFGR 0x7CC
+#define RCC_SAI4CFGR 0x7D0
+#define RCC_MDF1CFGR 0x7D8
+#define RCC_FDCANCFGR 0x7E0
+#define RCC_HDPCFGR 0x7E4
+#define RCC_ADC1CFGR 0x7E8
+#define RCC_ADC2CFGR 0x7EC
+#define RCC_ETH1CFGR 0x7F0
+#define RCC_ETH2CFGR 0x7F4
+#define RCC_USBHCFGR 0x7FC
+#define RCC_USB2PHY1CFGR 0x800
+#define RCC_OTGCFGR 0x808
+#define RCC_USB2PHY2CFGR 0x80C
+#define RCC_STGENCFGR 0x824
+#define RCC_SDMMC1CFGR 0x830
+#define RCC_SDMMC2CFGR 0x834
+#define RCC_SDMMC3CFGR 0x838
+#define RCC_LTDCCFGR 0x840
+#define RCC_CSICFGR 0x858
+#define RCC_DCMIPPCFGR 0x85C
+#define RCC_DCMIPSSICFGR 0x860
+#define RCC_RNG1CFGR 0x870
+#define RCC_RNG2CFGR 0x874
+#define RCC_PKACFGR 0x878
+#define RCC_SAESCFGR 0x87C
+#define RCC_HASH1CFGR 0x880
+#define RCC_HASH2CFGR 0x884
+#define RCC_CRYP1CFGR 0x888
+#define RCC_CRYP2CFGR 0x88C
+#define RCC_IWDG1CFGR 0x894
+#define RCC_IWDG2CFGR 0x898
+#define RCC_IWDG3CFGR 0x89C
+#define RCC_IWDG4CFGR 0x8A0
+#define RCC_WWDG1CFGR 0x8A4
+#define RCC_VREFCFGR 0x8AC
+#define RCC_DTSCFGR 0x8B0
+#define RCC_CRCCFGR 0x8B4
+#define RCC_SERCCFGR 0x8B8
+#define RCC_DDRPERFMCFGR 0x8C0
+#define RCC_I3C1CFGR 0x8C8
+#define RCC_I3C2CFGR 0x8CC
+#define RCC_I3C3CFGR 0x8D0
+#define RCC_MUXSELCFGR 0x1000
+#define RCC_XBAR0CFGR 0x1018
+#define RCC_XBAR1CFGR 0x101C
+#define RCC_XBAR2CFGR 0x1020
+#define RCC_XBAR3CFGR 0x1024
+#define RCC_XBAR4CFGR 0x1028
+#define RCC_XBAR5CFGR 0x102C
+#define RCC_XBAR6CFGR 0x1030
+#define RCC_XBAR7CFGR 0x1034
+#define RCC_XBAR8CFGR 0x1038
+#define RCC_XBAR9CFGR 0x103C
+#define RCC_XBAR10CFGR 0x1040
+#define RCC_XBAR11CFGR 0x1044
+#define RCC_XBAR12CFGR 0x1048
+#define RCC_XBAR13CFGR 0x104C
+#define RCC_XBAR14CFGR 0x1050
+#define RCC_XBAR15CFGR 0x1054
+#define RCC_XBAR16CFGR 0x1058
+#define RCC_XBAR17CFGR 0x105C
+#define RCC_XBAR18CFGR 0x1060
+#define RCC_XBAR19CFGR 0x1064
+#define RCC_XBAR20CFGR 0x1068
+#define RCC_XBAR21CFGR 0x106C
+#define RCC_XBAR22CFGR 0x1070
+#define RCC_XBAR23CFGR 0x1074
+#define RCC_XBAR24CFGR 0x1078
+#define RCC_XBAR25CFGR 0x107C
+#define RCC_XBAR26CFGR 0x1080
+#define RCC_XBAR27CFGR 0x1084
+#define RCC_XBAR28CFGR 0x1088
+#define RCC_XBAR29CFGR 0x108C
+#define RCC_XBAR30CFGR 0x1090
+#define RCC_XBAR31CFGR 0x1094
+#define RCC_XBAR32CFGR 0x1098
+#define RCC_XBAR33CFGR 0x109C
+#define RCC_XBAR34CFGR 0x10A0
+#define RCC_XBAR35CFGR 0x10A4
+#define RCC_XBAR36CFGR 0x10A8
+#define RCC_XBAR37CFGR 0x10AC
+#define RCC_XBAR38CFGR 0x10B0
+#define RCC_XBAR39CFGR 0x10B4
+#define RCC_XBAR40CFGR 0x10B8
+#define RCC_XBAR41CFGR 0x10BC
+#define RCC_XBAR42CFGR 0x10C0
+#define RCC_XBAR43CFGR 0x10C4
+#define RCC_XBAR44CFGR 0x10C8
+#define RCC_XBAR45CFGR 0x10CC
+#define RCC_XBAR46CFGR 0x10D0
+#define RCC_XBAR47CFGR 0x10D4
+#define RCC_XBAR48CFGR 0x10D8
+#define RCC_XBAR49CFGR 0x10DC
+#define RCC_XBAR50CFGR 0x10E0
+#define RCC_XBAR51CFGR 0x10E4
+#define RCC_XBAR52CFGR 0x10E8
+#define RCC_XBAR53CFGR 0x10EC
+#define RCC_XBAR54CFGR 0x10F0
+#define RCC_XBAR55CFGR 0x10F4
+#define RCC_XBAR56CFGR 0x10F8
+#define RCC_XBAR57CFGR 0x10FC
+#define RCC_XBAR58CFGR 0x1100
+#define RCC_XBAR59CFGR 0x1104
+#define RCC_XBAR60CFGR 0x1108
+#define RCC_XBAR61CFGR 0x110C
+#define RCC_XBAR62CFGR 0x1110
+#define RCC_XBAR63CFGR 0x1114
+#define RCC_PREDIV0CFGR 0x1118
+#define RCC_PREDIV1CFGR 0x111C
+#define RCC_PREDIV2CFGR 0x1120
+#define RCC_PREDIV3CFGR 0x1124
+#define RCC_PREDIV4CFGR 0x1128
+#define RCC_PREDIV5CFGR 0x112C
+#define RCC_PREDIV6CFGR 0x1130
+#define RCC_PREDIV7CFGR 0x1134
+#define RCC_PREDIV8CFGR 0x1138
+#define RCC_PREDIV9CFGR 0x113C
+#define RCC_PREDIV10CFGR 0x1140
+#define RCC_PREDIV11CFGR 0x1144
+#define RCC_PREDIV12CFGR 0x1148
+#define RCC_PREDIV13CFGR 0x114C
+#define RCC_PREDIV14CFGR 0x1150
+#define RCC_PREDIV15CFGR 0x1154
+#define RCC_PREDIV16CFGR 0x1158
+#define RCC_PREDIV17CFGR 0x115C
+#define RCC_PREDIV18CFGR 0x1160
+#define RCC_PREDIV19CFGR 0x1164
+#define RCC_PREDIV20CFGR 0x1168
+#define RCC_PREDIV21CFGR 0x116C
+#define RCC_PREDIV22CFGR 0x1170
+#define RCC_PREDIV23CFGR 0x1174
+#define RCC_PREDIV24CFGR 0x1178
+#define RCC_PREDIV25CFGR 0x117C
+#define RCC_PREDIV26CFGR 0x1180
+#define RCC_PREDIV27CFGR 0x1184
+#define RCC_PREDIV28CFGR 0x1188
+#define RCC_PREDIV29CFGR 0x118C
+#define RCC_PREDIV30CFGR 0x1190
+#define RCC_PREDIV31CFGR 0x1194
+#define RCC_PREDIV32CFGR 0x1198
+#define RCC_PREDIV33CFGR 0x119C
+#define RCC_PREDIV34CFGR 0x11A0
+#define RCC_PREDIV35CFGR 0x11A4
+#define RCC_PREDIV36CFGR 0x11A8
+#define RCC_PREDIV37CFGR 0x11AC
+#define RCC_PREDIV38CFGR 0x11B0
+#define RCC_PREDIV39CFGR 0x11B4
+#define RCC_PREDIV40CFGR 0x11B8
+#define RCC_PREDIV41CFGR 0x11BC
+#define RCC_PREDIV42CFGR 0x11C0
+#define RCC_PREDIV43CFGR 0x11C4
+#define RCC_PREDIV44CFGR 0x11C8
+#define RCC_PREDIV45CFGR 0x11CC
+#define RCC_PREDIV46CFGR 0x11D0
+#define RCC_PREDIV47CFGR 0x11D4
+#define RCC_PREDIV48CFGR 0x11D8
+#define RCC_PREDIV49CFGR 0x11DC
+#define RCC_PREDIV50CFGR 0x11E0
+#define RCC_PREDIV51CFGR 0x11E4
+#define RCC_PREDIV52CFGR 0x11E8
+#define RCC_PREDIV53CFGR 0x11EC
+#define RCC_PREDIV54CFGR 0x11F0
+#define RCC_PREDIV55CFGR 0x11F4
+#define RCC_PREDIV56CFGR 0x11F8
+#define RCC_PREDIV57CFGR 0x11FC
+#define RCC_PREDIV58CFGR 0x1200
+#define RCC_PREDIV59CFGR 0x1204
+#define RCC_PREDIV60CFGR 0x1208
+#define RCC_PREDIV61CFGR 0x120C
+#define RCC_PREDIV62CFGR 0x1210
+#define RCC_PREDIV63CFGR 0x1214
+#define RCC_PREDIVSR1 0x1218
+#define RCC_PREDIVSR2 0x121C
+#define RCC_FINDIV0CFGR 0x1224
+#define RCC_FINDIV1CFGR 0x1228
+#define RCC_FINDIV2CFGR 0x122C
+#define RCC_FINDIV3CFGR 0x1230
+#define RCC_FINDIV4CFGR 0x1234
+#define RCC_FINDIV5CFGR 0x1238
+#define RCC_FINDIV6CFGR 0x123C
+#define RCC_FINDIV7CFGR 0x1240
+#define RCC_FINDIV8CFGR 0x1244
+#define RCC_FINDIV9CFGR 0x1248
+#define RCC_FINDIV10CFGR 0x124C
+#define RCC_FINDIV11CFGR 0x1250
+#define RCC_FINDIV12CFGR 0x1254
+#define RCC_FINDIV13CFGR 0x1258
+#define RCC_FINDIV14CFGR 0x125C
+#define RCC_FINDIV15CFGR 0x1260
+#define RCC_FINDIV16CFGR 0x1264
+#define RCC_FINDIV17CFGR 0x1268
+#define RCC_FINDIV18CFGR 0x126C
+#define RCC_FINDIV19CFGR 0x1270
+#define RCC_FINDIV20CFGR 0x1274
+#define RCC_FINDIV21CFGR 0x1278
+#define RCC_FINDIV22CFGR 0x127C
+#define RCC_FINDIV23CFGR 0x1280
+#define RCC_FINDIV24CFGR 0x1284
+#define RCC_FINDIV25CFGR 0x1288
+#define RCC_FINDIV26CFGR 0x128C
+#define RCC_FINDIV27CFGR 0x1290
+#define RCC_FINDIV28CFGR 0x1294
+#define RCC_FINDIV29CFGR 0x1298
+#define RCC_FINDIV30CFGR 0x129C
+#define RCC_FINDIV31CFGR 0x12A0
+#define RCC_FINDIV32CFGR 0x12A4
+#define RCC_FINDIV33CFGR 0x12A8
+#define RCC_FINDIV34CFGR 0x12AC
+#define RCC_FINDIV35CFGR 0x12B0
+#define RCC_FINDIV36CFGR 0x12B4
+#define RCC_FINDIV37CFGR 0x12B8
+#define RCC_FINDIV38CFGR 0x12BC
+#define RCC_FINDIV39CFGR 0x12C0
+#define RCC_FINDIV40CFGR 0x12C4
+#define RCC_FINDIV41CFGR 0x12C8
+#define RCC_FINDIV42CFGR 0x12CC
+#define RCC_FINDIV43CFGR 0x12D0
+#define RCC_FINDIV44CFGR 0x12D4
+#define RCC_FINDIV45CFGR 0x12D8
+#define RCC_FINDIV46CFGR 0x12DC
+#define RCC_FINDIV47CFGR 0x12E0
+#define RCC_FINDIV48CFGR 0x12E4
+#define RCC_FINDIV49CFGR 0x12E8
+#define RCC_FINDIV50CFGR 0x12EC
+#define RCC_FINDIV51CFGR 0x12F0
+#define RCC_FINDIV52CFGR 0x12F4
+#define RCC_FINDIV53CFGR 0x12F8
+#define RCC_FINDIV54CFGR 0x12FC
+#define RCC_FINDIV55CFGR 0x1300
+#define RCC_FINDIV56CFGR 0x1304
+#define RCC_FINDIV57CFGR 0x1308
+#define RCC_FINDIV58CFGR 0x130C
+#define RCC_FINDIV59CFGR 0x1310
+#define RCC_FINDIV60CFGR 0x1314
+#define RCC_FINDIV61CFGR 0x1318
+#define RCC_FINDIV62CFGR 0x131C
+#define RCC_FINDIV63CFGR 0x1320
+#define RCC_FINDIVSR1 0x1324
+#define RCC_FINDIVSR2 0x1328
+#define RCC_FCALCOBS0CFGR 0x1340
+#define RCC_FCALCOBS1CFGR 0x1344
+#define RCC_FCALCREFCFGR 0x1348
+#define RCC_FCALCCR1 0x134C
+#define RCC_FCALCCR2 0x1354
+#define RCC_FCALCSR 0x1358
+#define RCC_PLL4CFGR1 0x1360
+#define RCC_PLL4CFGR2 0x1364
+#define RCC_PLL4CFGR3 0x1368
+#define RCC_PLL4CFGR4 0x136C
+#define RCC_PLL4CFGR5 0x1370
+#define RCC_PLL4CFGR6 0x1378
+#define RCC_PLL4CFGR7 0x137C
+#define RCC_PLL5CFGR1 0x1388
+#define RCC_PLL5CFGR2 0x138C
+#define RCC_PLL5CFGR3 0x1390
+#define RCC_PLL5CFGR4 0x1394
+#define RCC_PLL5CFGR5 0x1398
+#define RCC_PLL5CFGR6 0x13A0
+#define RCC_PLL5CFGR7 0x13A4
+#define RCC_PLL6CFGR1 0x13B0
+#define RCC_PLL6CFGR2 0x13B4
+#define RCC_PLL6CFGR3 0x13B8
+#define RCC_PLL6CFGR4 0x13BC
+#define RCC_PLL6CFGR5 0x13C0
+#define RCC_PLL6CFGR6 0x13C8
+#define RCC_PLL6CFGR7 0x13CC
+#define RCC_PLL7CFGR1 0x13D8
+#define RCC_PLL7CFGR2 0x13DC
+#define RCC_PLL7CFGR3 0x13E0
+#define RCC_PLL7CFGR4 0x13E4
+#define RCC_PLL7CFGR5 0x13E8
+#define RCC_PLL7CFGR6 0x13F0
+#define RCC_PLL7CFGR7 0x13F4
+#define RCC_PLL8CFGR1 0x1400
+#define RCC_PLL8CFGR2 0x1404
+#define RCC_PLL8CFGR3 0x1408
+#define RCC_PLL8CFGR4 0x140C
+#define RCC_PLL8CFGR5 0x1410
+#define RCC_PLL8CFGR6 0x1418
+#define RCC_PLL8CFGR7 0x141C
+#define RCC_VERR 0xFFF4
+#define RCC_IDR 0xFFF8
+#define RCC_SIDR 0xFFFC
+
+#endif /* STM32MP21_RCC_H */
diff --git a/drivers/clk/sunxi-ng/Kconfig b/drivers/clk/sunxi-ng/Kconfig
index b547198a2c65..6af2d020e03e 100644
--- a/drivers/clk/sunxi-ng/Kconfig
+++ b/drivers/clk/sunxi-ng/Kconfig
@@ -9,113 +9,128 @@ if SUNXI_CCU
config SUNIV_F1C100S_CCU
tristate "Support for the Allwinner newer F1C100s CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUNIV || COMPILE_TEST
config SUN20I_D1_CCU
tristate "Support for the Allwinner D1/R528/T113 CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN8I || RISCV || COMPILE_TEST
config SUN20I_D1_R_CCU
tristate "Support for the Allwinner D1/R528/T113 PRCM CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN8I || RISCV || COMPILE_TEST
config SUN50I_A64_CCU
tristate "Support for the Allwinner A64 CCU"
- default y
+ default ARCH_SUNXI
depends on ARM64 || COMPILE_TEST
config SUN50I_A100_CCU
tristate "Support for the Allwinner A100 CCU"
- default y
+ default ARCH_SUNXI
depends on ARM64 || COMPILE_TEST
config SUN50I_A100_R_CCU
tristate "Support for the Allwinner A100 PRCM CCU"
- default y
+ default ARCH_SUNXI
depends on ARM64 || COMPILE_TEST
config SUN50I_H6_CCU
tristate "Support for the Allwinner H6 CCU"
- default y
+ default ARCH_SUNXI
depends on ARM64 || COMPILE_TEST
config SUN50I_H616_CCU
tristate "Support for the Allwinner H616 CCU"
- default y
+ default ARCH_SUNXI
depends on ARM64 || COMPILE_TEST
config SUN50I_H6_R_CCU
tristate "Support for the Allwinner H6 and H616 PRCM CCU"
- default y
+ default ARCH_SUNXI
+ depends on ARM64 || COMPILE_TEST
+
+config SUN55I_A523_CCU
+ tristate "Support for the Allwinner A523/T527 CCU"
+ default ARCH_SUNXI
+ depends on ARM64 || COMPILE_TEST
+
+config SUN55I_A523_MCU_CCU
+ tristate "Support for the Allwinner A523/T527 MCU CCU"
+ default ARCH_SUNXI
+ depends on ARM64 || COMPILE_TEST
+
+config SUN55I_A523_R_CCU
+ tristate "Support for the Allwinner A523/T527 PRCM CCU"
+ default ARCH_SUNXI
depends on ARM64 || COMPILE_TEST
config SUN4I_A10_CCU
tristate "Support for the Allwinner A10/A20 CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN4I || MACH_SUN7I || COMPILE_TEST
config SUN5I_CCU
bool "Support for the Allwinner sun5i family CCM"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN5I || COMPILE_TEST
depends on SUNXI_CCU=y
config SUN6I_A31_CCU
tristate "Support for the Allwinner A31/A31s CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN6I || COMPILE_TEST
config SUN6I_RTC_CCU
tristate "Support for the Allwinner H616/R329 RTC CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN8I || ARM64 || RISCV || COMPILE_TEST
config SUN8I_A23_CCU
tristate "Support for the Allwinner A23 CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN8I || COMPILE_TEST
config SUN8I_A33_CCU
tristate "Support for the Allwinner A33 CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN8I || COMPILE_TEST
config SUN8I_A83T_CCU
tristate "Support for the Allwinner A83T CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN8I || COMPILE_TEST
config SUN8I_H3_CCU
tristate "Support for the Allwinner H3 CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN8I || ARM64 || COMPILE_TEST
config SUN8I_V3S_CCU
tristate "Support for the Allwinner V3s CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN8I || COMPILE_TEST
config SUN8I_DE2_CCU
tristate "Support for the Allwinner SoCs DE2 CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN8I || ARM64 || RISCV || COMPILE_TEST
config SUN8I_R40_CCU
tristate "Support for the Allwinner R40 CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN8I || COMPILE_TEST
config SUN9I_A80_CCU
tristate "Support for the Allwinner A80 CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN9I || COMPILE_TEST
config SUN8I_R_CCU
tristate "Support for Allwinner SoCs' PRCM CCUs"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN8I || ARM64 || COMPILE_TEST
endif
diff --git a/drivers/clk/sunxi-ng/Makefile b/drivers/clk/sunxi-ng/Makefile
index 6b3ae2b620db..a1c4087d7241 100644
--- a/drivers/clk/sunxi-ng/Makefile
+++ b/drivers/clk/sunxi-ng/Makefile
@@ -33,6 +33,9 @@ obj-$(CONFIG_SUN50I_A100_R_CCU) += sun50i-a100-r-ccu.o
obj-$(CONFIG_SUN50I_H6_CCU) += sun50i-h6-ccu.o
obj-$(CONFIG_SUN50I_H6_R_CCU) += sun50i-h6-r-ccu.o
obj-$(CONFIG_SUN50I_H616_CCU) += sun50i-h616-ccu.o
+obj-$(CONFIG_SUN55I_A523_CCU) += sun55i-a523-ccu.o
+obj-$(CONFIG_SUN55I_A523_MCU_CCU) += sun55i-a523-mcu-ccu.o
+obj-$(CONFIG_SUN55I_A523_R_CCU) += sun55i-a523-r-ccu.o
obj-$(CONFIG_SUN4I_A10_CCU) += sun4i-a10-ccu.o
obj-$(CONFIG_SUN5I_CCU) += sun5i-ccu.o
obj-$(CONFIG_SUN6I_A31_CCU) += sun6i-a31-ccu.o
@@ -58,6 +61,9 @@ sun50i-a100-r-ccu-y += ccu-sun50i-a100-r.o
sun50i-h6-ccu-y += ccu-sun50i-h6.o
sun50i-h6-r-ccu-y += ccu-sun50i-h6-r.o
sun50i-h616-ccu-y += ccu-sun50i-h616.o
+sun55i-a523-ccu-y += ccu-sun55i-a523.o
+sun55i-a523-mcu-ccu-y += ccu-sun55i-a523-mcu.o
+sun55i-a523-r-ccu-y += ccu-sun55i-a523-r.o
sun4i-a10-ccu-y += ccu-sun4i-a10.o
sun5i-ccu-y += ccu-sun5i.o
sun6i-a31-ccu-y += ccu-sun6i-a31.o
diff --git a/drivers/clk/sunxi-ng/ccu-sun20i-d1-r.c b/drivers/clk/sunxi-ng/ccu-sun20i-d1-r.c
index de36e21d3eaf..44b2ebdebdac 100644
--- a/drivers/clk/sunxi-ng/ccu-sun20i-d1-r.c
+++ b/drivers/clk/sunxi-ng/ccu-sun20i-d1-r.c
@@ -91,7 +91,7 @@ static struct clk_hw_onecell_data sun20i_d1_r_hw_clks = {
},
};
-static struct ccu_reset_map sun20i_d1_r_ccu_resets[] = {
+static const struct ccu_reset_map sun20i_d1_r_ccu_resets[] = {
[RST_BUS_R_TIMER] = { 0x11c, BIT(16) },
[RST_BUS_R_TWD] = { 0x12c, BIT(16) },
[RST_BUS_R_PPU] = { 0x1ac, BIT(16) },
@@ -137,6 +137,6 @@ static struct platform_driver sun20i_d1_r_ccu_driver = {
};
module_platform_driver(sun20i_d1_r_ccu_driver);
-MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_IMPORT_NS("SUNXI_CCU");
MODULE_DESCRIPTION("Support for the Allwinner D1/R528/T113 PRCM CCU");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun20i-d1.c b/drivers/clk/sunxi-ng/ccu-sun20i-d1.c
index 9b5cfac2ee70..e83d4fd40240 100644
--- a/drivers/clk/sunxi-ng/ccu-sun20i-d1.c
+++ b/drivers/clk/sunxi-ng/ccu-sun20i-d1.c
@@ -412,19 +412,23 @@ static const struct clk_parent_data mmc0_mmc1_parents[] = {
{ .hw = &pll_periph0_2x_clk.common.hw },
{ .hw = &pll_audio1_div2_clk.common.hw },
};
-static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(mmc0_clk, "mmc0", mmc0_mmc1_parents, 0x830,
- 0, 4, /* M */
- 8, 2, /* P */
- 24, 3, /* mux */
- BIT(31), /* gate */
- 0);
-
-static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(mmc1_clk, "mmc1", mmc0_mmc1_parents, 0x834,
- 0, 4, /* M */
- 8, 2, /* P */
- 24, 3, /* mux */
- BIT(31), /* gate */
- 0);
+static SUNXI_CCU_MP_DATA_WITH_MUX_GATE_POSTDIV(mmc0_clk, "mmc0",
+ mmc0_mmc1_parents, 0x830,
+ 0, 4, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 2, /* post-div */
+ 0);
+
+static SUNXI_CCU_MP_DATA_WITH_MUX_GATE_POSTDIV(mmc1_clk, "mmc1",
+ mmc0_mmc1_parents, 0x834,
+ 0, 4, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 2, /* post-div */
+ 0);
static const struct clk_parent_data mmc2_parents[] = {
{ .fw_name = "hosc" },
@@ -433,12 +437,14 @@ static const struct clk_parent_data mmc2_parents[] = {
{ .hw = &pll_periph0_800M_clk.common.hw },
{ .hw = &pll_audio1_div2_clk.common.hw },
};
-static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(mmc2_clk, "mmc2", mmc2_parents, 0x838,
- 0, 4, /* M */
- 8, 2, /* P */
- 24, 3, /* mux */
- BIT(31), /* gate */
- 0);
+static SUNXI_CCU_MP_DATA_WITH_MUX_GATE_POSTDIV(mmc2_clk, "mmc2", mmc2_parents,
+ 0x838,
+ 0, 4, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 2, /* post-div */
+ 0);
static SUNXI_CCU_GATE_HWS(bus_mmc0_clk, "bus-mmc0", psi_ahb_hws,
0x84c, BIT(0), 0);
@@ -1232,7 +1238,7 @@ static struct clk_hw_onecell_data sun20i_d1_hw_clks = {
},
};
-static struct ccu_reset_map sun20i_d1_ccu_resets[] = {
+static const struct ccu_reset_map sun20i_d1_ccu_resets[] = {
[RST_MBUS] = { 0x540, BIT(30) },
[RST_BUS_DE] = { 0x60c, BIT(16) },
[RST_BUS_DI] = { 0x62c, BIT(16) },
@@ -1371,7 +1377,7 @@ static int sun20i_d1_ccu_probe(struct platform_device *pdev)
/* Enforce m1 = 0, m0 = 0 for PLL_AUDIO0 */
val = readl(reg + SUN20I_D1_PLL_AUDIO0_REG);
- val &= ~BIT(1) | BIT(0);
+ val &= ~(BIT(1) | BIT(0));
writel(val, reg + SUN20I_D1_PLL_AUDIO0_REG);
/* Force fanout-27M factor N to 0. */
@@ -1406,6 +1412,6 @@ static struct platform_driver sun20i_d1_ccu_driver = {
};
module_platform_driver(sun20i_d1_ccu_driver);
-MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_IMPORT_NS("SUNXI_CCU");
MODULE_DESCRIPTION("Support for the Allwinner D1/R528/T113 CCU");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun4i-a10.c b/drivers/clk/sunxi-ng/ccu-sun4i-a10.c
index d1a1683baff4..409feb085021 100644
--- a/drivers/clk/sunxi-ng/ccu-sun4i-a10.c
+++ b/drivers/clk/sunxi-ng/ccu-sun4i-a10.c
@@ -1382,7 +1382,7 @@ static struct clk_hw_onecell_data sun7i_a20_hw_clks = {
.num = CLK_NUMBER_SUN7I,
};
-static struct ccu_reset_map sunxi_a10_a20_ccu_resets[] = {
+static const struct ccu_reset_map sunxi_a10_a20_ccu_resets[] = {
[RST_USB_PHY0] = { 0x0cc, BIT(0) },
[RST_USB_PHY1] = { 0x0cc, BIT(1) },
[RST_USB_PHY2] = { 0x0cc, BIT(2) },
@@ -1493,6 +1493,6 @@ static struct platform_driver sun4i_a10_ccu_driver = {
};
module_platform_driver(sun4i_a10_ccu_driver);
-MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_IMPORT_NS("SUNXI_CCU");
MODULE_DESCRIPTION("Support for the Allwinner A10/A20 CCU");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a100-r.c b/drivers/clk/sunxi-ng/ccu-sun50i-a100-r.c
index 2c791761a646..cb0f8d110c32 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-a100-r.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a100-r.c
@@ -166,7 +166,7 @@ static struct clk_hw_onecell_data sun50i_a100_r_hw_clks = {
.num = CLK_NUMBER,
};
-static struct ccu_reset_map sun50i_a100_r_ccu_resets[] = {
+static const struct ccu_reset_map sun50i_a100_r_ccu_resets[] = {
[RST_R_APB1_TIMER] = { 0x11c, BIT(16) },
[RST_R_APB1_BUS_PWM] = { 0x13c, BIT(16) },
[RST_R_APB1_PPU] = { 0x17c, BIT(16) },
@@ -214,6 +214,6 @@ static struct platform_driver sun50i_a100_r_ccu_driver = {
};
module_platform_driver(sun50i_a100_r_ccu_driver);
-MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_IMPORT_NS("SUNXI_CCU");
MODULE_DESCRIPTION("Support for the Allwinner A100 PRCM CCU");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a100.c b/drivers/clk/sunxi-ng/ccu-sun50i-a100.c
index bbaa82978716..1f81c7ac41af 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-a100.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a100.c
@@ -436,7 +436,7 @@ static SUNXI_CCU_MP_WITH_MUX_GATE_POSTDIV(mmc0_clk, "mmc0", mmc_parents, 0x830,
24, 2, /* mux */
BIT(31), /* gate */
2, /* post-div */
- CLK_SET_RATE_NO_REPARENT);
+ 0);
static SUNXI_CCU_MP_WITH_MUX_GATE_POSTDIV(mmc1_clk, "mmc1", mmc_parents, 0x834,
0, 4, /* M */
@@ -444,7 +444,7 @@ static SUNXI_CCU_MP_WITH_MUX_GATE_POSTDIV(mmc1_clk, "mmc1", mmc_parents, 0x834,
24, 2, /* mux */
BIT(31), /* gate */
2, /* post-div */
- CLK_SET_RATE_NO_REPARENT);
+ 0);
static SUNXI_CCU_MP_WITH_MUX_GATE_POSTDIV(mmc2_clk, "mmc2", mmc_parents, 0x838,
0, 4, /* M */
@@ -452,7 +452,7 @@ static SUNXI_CCU_MP_WITH_MUX_GATE_POSTDIV(mmc2_clk, "mmc2", mmc_parents, 0x838,
24, 2, /* mux */
BIT(31), /* gate */
2, /* post-div */
- CLK_SET_RATE_NO_REPARENT);
+ 0);
static SUNXI_CCU_GATE(bus_mmc0_clk, "bus-mmc0", "ahb3", 0x84c, BIT(0), 0);
static SUNXI_CCU_GATE(bus_mmc1_clk, "bus-mmc1", "ahb3", 0x84c, BIT(1), 0);
@@ -1061,7 +1061,7 @@ static struct clk_hw_onecell_data sun50i_a100_hw_clks = {
.num = CLK_NUMBER,
};
-static struct ccu_reset_map sun50i_a100_ccu_resets[] = {
+static const struct ccu_reset_map sun50i_a100_ccu_resets[] = {
[RST_MBUS] = { 0x540, BIT(30) },
[RST_BUS_DE] = { 0x60c, BIT(16) },
@@ -1276,6 +1276,6 @@ static struct platform_driver sun50i_a100_ccu_driver = {
};
module_platform_driver(sun50i_a100_ccu_driver);
-MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_IMPORT_NS("SUNXI_CCU");
MODULE_DESCRIPTION("Support for the Allwinner A100 CCU");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
index c255dba2c96d..ba1ad267f123 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
@@ -535,11 +535,11 @@ static SUNXI_CCU_M_WITH_MUX_GATE(de_clk, "de", de_parents,
CLK_SET_RATE_PARENT);
/*
- * DSI output seems to work only when PLL_MIPI selected. Set it and prevent
- * the mux from reparenting.
+ * Experiments showed that RGB output requires pll-video0-2x, while DSI
+ * requires pll-mipi. It will not work with incorrect clock, the screen will
+ * be blank.
+ * sun50i-a64.dtsi assigns pll-mipi as TCON0 parent by default
*/
-#define SUN50I_A64_TCON0_CLK_REG 0x118
-
static const char * const tcon0_parents[] = { "pll-mipi", "pll-video0-2x" };
static const u8 tcon0_table[] = { 0, 2, };
static SUNXI_CCU_MUX_TABLE_WITH_GATE_CLOSEST(tcon0_clk, "tcon0", tcon0_parents,
@@ -858,7 +858,7 @@ static struct clk_hw_onecell_data sun50i_a64_hw_clks = {
.num = CLK_NUMBER,
};
-static struct ccu_reset_map sun50i_a64_ccu_resets[] = {
+static const struct ccu_reset_map sun50i_a64_ccu_resets[] = {
[RST_USB_PHY0] = { 0x0cc, BIT(0) },
[RST_USB_PHY1] = { 0x0cc, BIT(1) },
[RST_USB_HSIC] = { 0x0cc, BIT(2) },
@@ -959,11 +959,6 @@ static int sun50i_a64_ccu_probe(struct platform_device *pdev)
writel(0x515, reg + SUN50I_A64_PLL_MIPI_REG);
- /* Set PLL MIPI as parent for TCON0 */
- val = readl(reg + SUN50I_A64_TCON0_CLK_REG);
- val &= ~GENMASK(26, 24);
- writel(val | (0 << 24), reg + SUN50I_A64_TCON0_CLK_REG);
-
ret = devm_sunxi_ccu_probe(&pdev->dev, reg, &sun50i_a64_ccu_desc);
if (ret)
return ret;
@@ -994,6 +989,6 @@ static struct platform_driver sun50i_a64_ccu_driver = {
};
module_platform_driver(sun50i_a64_ccu_driver);
-MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_IMPORT_NS("SUNXI_CCU");
MODULE_DESCRIPTION("Support for the Allwinner A64 CCU");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.h b/drivers/clk/sunxi-ng/ccu-sun50i-a64.h
index a8c11c0b4e06..dfba88a5ad0f 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.h
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.h
@@ -21,7 +21,6 @@
/* PLL_VIDEO0 exported for HDMI PHY */
-#define CLK_PLL_VIDEO0_2X 8
#define CLK_PLL_VE 9
#define CLK_PLL_DDR0 10
@@ -32,7 +31,6 @@
#define CLK_PLL_PERIPH1_2X 14
#define CLK_PLL_VIDEO1 15
#define CLK_PLL_GPU 16
-#define CLK_PLL_MIPI 17
#define CLK_PLL_HSIC 18
#define CLK_PLL_DE 19
#define CLK_PLL_DDR1 20
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c
index c72815841111..d24fa3449303 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c
@@ -80,7 +80,7 @@ static struct ccu_div r_apb2_clk = {
* in the BSP source code, although most of them are unused. The existence
* of the hardware block is verified with "3.1 Memory Mapping" chapter in
* "Allwinner H6 V200 User Manual V1.1"; and the parent APB buses are verified
- * with "3.3.2.1 System Bus Tree" chapter inthe same document.
+ * with "3.3.2.1 System Bus Tree" chapter in the same document.
*/
static SUNXI_CCU_GATE(r_apb1_timer_clk, "r-apb1-timer", "r-apb1",
0x11c, BIT(0), 0);
@@ -179,7 +179,7 @@ static struct clk_hw_onecell_data sun50i_h616_r_hw_clks = {
.num = CLK_NUMBER,
};
-static struct ccu_reset_map sun50i_h6_r_ccu_resets[] = {
+static const struct ccu_reset_map sun50i_h6_r_ccu_resets[] = {
[RST_R_APB1_TIMER] = { 0x11c, BIT(16) },
[RST_R_APB1_TWD] = { 0x12c, BIT(16) },
[RST_R_APB1_PWM] = { 0x13c, BIT(16) },
@@ -190,7 +190,7 @@ static struct ccu_reset_map sun50i_h6_r_ccu_resets[] = {
[RST_R_APB1_W1] = { 0x1ec, BIT(16) },
};
-static struct ccu_reset_map sun50i_h616_r_ccu_resets[] = {
+static const struct ccu_reset_map sun50i_h616_r_ccu_resets[] = {
[RST_R_APB1_TWD] = { 0x12c, BIT(16) },
[RST_R_APB2_I2C] = { 0x19c, BIT(16) },
[RST_R_APB2_RSB] = { 0x1bc, BIT(16) },
@@ -256,6 +256,6 @@ static struct platform_driver sun50i_h6_r_ccu_driver = {
};
module_platform_driver(sun50i_h6_r_ccu_driver);
-MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_IMPORT_NS("SUNXI_CCU");
MODULE_DESCRIPTION("Support for the Allwinner H6 and H616 PRCM CCU");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
index a20b621ad8f1..7fccda96d444 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
@@ -1076,7 +1076,7 @@ static struct clk_hw_onecell_data sun50i_h6_hw_clks = {
.num = CLK_NUMBER,
};
-static struct ccu_reset_map sun50i_h6_ccu_resets[] = {
+static const struct ccu_reset_map sun50i_h6_ccu_resets[] = {
[RST_MBUS] = { 0x540, BIT(30) },
[RST_BUS_DE] = { 0x60c, BIT(16) },
@@ -1286,6 +1286,6 @@ static struct platform_driver sun50i_h6_ccu_driver = {
};
module_platform_driver(sun50i_h6_ccu_driver);
-MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_IMPORT_NS("SUNXI_CCU");
MODULE_DESCRIPTION("Support for the Allwinner H6 CCU");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h616.c b/drivers/clk/sunxi-ng/ccu-sun50i-h616.c
index 84e406ddf9d1..955c614830fa 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h616.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h616.c
@@ -216,19 +216,29 @@ static struct ccu_nkmp pll_de_clk = {
};
/*
- * TODO: Determine SDM settings for the audio PLL. The manual suggests
- * PLL_FACTOR_N=16, PLL_POST_DIV_P=2, OUTPUT_DIV=2, pattern=0xe000c49b
- * for 24.576 MHz, and PLL_FACTOR_N=22, PLL_POST_DIV_P=3, OUTPUT_DIV=2,
- * pattern=0xe001288c for 22.5792 MHz.
- * This clashes with our fixed PLL_POST_DIV_P.
+ * Sigma-delta modulation settings table obtained from the vendor SDK driver.
+ * There are additional M0 and M1 divider bits not modelled here, so forced to
+ * fixed values in the probe routine. Sigma-delta modulation allows providing a
+ * fractional-N divider in the PLL, to help reaching those specific
+ * frequencies with less error.
*/
+static struct ccu_sdm_setting pll_audio_sdm_table[] = {
+ { .rate = 90316800, .pattern = 0xc001288d, .m = 3, .n = 22 },
+ { .rate = 98304000, .pattern = 0xc001eb85, .m = 5, .n = 40 },
+};
+
#define SUN50I_H616_PLL_AUDIO_REG 0x078
static struct ccu_nm pll_audio_hs_clk = {
.enable = BIT(31),
.lock = BIT(28),
.n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
- .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .m = _SUNXI_CCU_DIV(16, 6),
+ .sdm = _SUNXI_CCU_SDM(pll_audio_sdm_table,
+ BIT(24), 0x178, BIT(31)),
+ .fixed_post_div = 2,
.common = {
+ .features = CCU_FEATURE_FIXED_POSTDIV |
+ CCU_FEATURE_SIGMA_DELTA_MOD,
.reg = 0x078,
.hw.init = CLK_HW_INIT("pll-audio-hs", "osc24M",
&ccu_nm_ops,
@@ -318,10 +328,16 @@ static SUNXI_CCU_M_WITH_MUX_GATE(gpu0_clk, "gpu0", gpu0_parents, 0x670,
24, 1, /* mux */
BIT(31), /* gate */
CLK_SET_RATE_PARENT);
+
+/*
+ * This clk is needed as a temporary fall back during GPU PLL freq changes.
+ * Set CLK_IS_CRITICAL flag to prevent from being disabled.
+ */
+#define SUN50I_H616_GPU_CLK1_REG 0x674
static SUNXI_CCU_M_WITH_GATE(gpu1_clk, "gpu1", "pll-periph0-2x", 0x674,
0, 2, /* M */
BIT(31),/* gate */
- 0);
+ CLK_IS_CRITICAL);
static SUNXI_CCU_GATE(bus_gpu_clk, "bus-gpu", "psi-ahb1-ahb2",
0x67c, BIT(0), 0);
@@ -635,6 +651,20 @@ static const char * const tcon_tv_parents[] = { "pll-video0",
"pll-video0-4x",
"pll-video1",
"pll-video1-4x" };
+static SUNXI_CCU_MUX_WITH_GATE(tcon_lcd0_clk, "tcon-lcd0",
+ tcon_tv_parents, 0xb60,
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+static SUNXI_CCU_MUX_WITH_GATE(tcon_lcd1_clk, "tcon-lcd1",
+ tcon_tv_parents, 0xb64,
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+static SUNXI_CCU_GATE(bus_tcon_lcd0_clk, "bus-tcon-lcd0", "ahb3",
+ 0xb7c, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_tcon_lcd1_clk, "bus-tcon-lcd1", "ahb3",
+ 0xb7c, BIT(1), 0);
static SUNXI_CCU_MP_WITH_MUX_GATE(tcon_tv0_clk, "tcon-tv0",
tcon_tv_parents, 0xb80,
0, 4, /* M */
@@ -685,18 +715,20 @@ static const struct clk_hw *clk_parent_pll_audio[] = {
};
/*
- * The divider of pll-audio is fixed to 24 for now, so 24576000 and 22579200
- * rates can be set exactly in conjunction with sigma-delta modulation.
+ * The PLL_AUDIO_4X clock defaults to 24.5714 MHz according to the manual, with
+ * a final divider of 1. The 2X and 1X clocks use 2 and 4 respectively. The 1x
+ * clock is set to either 24576000 or 22579200 for 48Khz and 44.1Khz (and
+ * multiples).
*/
static CLK_FIXED_FACTOR_HWS(pll_audio_1x_clk, "pll-audio-1x",
clk_parent_pll_audio,
- 96, 1, CLK_SET_RATE_PARENT);
+ 4, 1, CLK_SET_RATE_PARENT);
static CLK_FIXED_FACTOR_HWS(pll_audio_2x_clk, "pll-audio-2x",
clk_parent_pll_audio,
- 48, 1, CLK_SET_RATE_PARENT);
+ 2, 1, CLK_SET_RATE_PARENT);
static CLK_FIXED_FACTOR_HWS(pll_audio_4x_clk, "pll-audio-4x",
clk_parent_pll_audio,
- 24, 1, CLK_SET_RATE_PARENT);
+ 1, 1, CLK_SET_RATE_PARENT);
static const struct clk_hw *pll_periph0_parents[] = {
&pll_periph0_clk.common.hw
@@ -843,8 +875,12 @@ static struct ccu_common *sun50i_h616_ccu_clks[] = {
&hdmi_cec_clk.common,
&bus_hdmi_clk.common,
&bus_tcon_top_clk.common,
+ &tcon_lcd0_clk.common,
+ &tcon_lcd1_clk.common,
&tcon_tv0_clk.common,
&tcon_tv1_clk.common,
+ &bus_tcon_lcd0_clk.common,
+ &bus_tcon_lcd1_clk.common,
&bus_tcon_tv0_clk.common,
&bus_tcon_tv1_clk.common,
&tve0_clk.common,
@@ -977,8 +1013,12 @@ static struct clk_hw_onecell_data sun50i_h616_hw_clks = {
[CLK_HDMI_CEC] = &hdmi_cec_clk.common.hw,
[CLK_BUS_HDMI] = &bus_hdmi_clk.common.hw,
[CLK_BUS_TCON_TOP] = &bus_tcon_top_clk.common.hw,
+ [CLK_TCON_LCD0] = &tcon_lcd0_clk.common.hw,
+ [CLK_TCON_LCD1] = &tcon_lcd1_clk.common.hw,
[CLK_TCON_TV0] = &tcon_tv0_clk.common.hw,
[CLK_TCON_TV1] = &tcon_tv1_clk.common.hw,
+ [CLK_BUS_TCON_LCD0] = &bus_tcon_lcd0_clk.common.hw,
+ [CLK_BUS_TCON_LCD1] = &bus_tcon_lcd1_clk.common.hw,
[CLK_BUS_TCON_TV0] = &bus_tcon_tv0_clk.common.hw,
[CLK_BUS_TCON_TV1] = &bus_tcon_tv1_clk.common.hw,
[CLK_TVE0] = &tve0_clk.common.hw,
@@ -990,7 +1030,7 @@ static struct clk_hw_onecell_data sun50i_h616_hw_clks = {
.num = CLK_NUMBER,
};
-static struct ccu_reset_map sun50i_h616_ccu_resets[] = {
+static const struct ccu_reset_map sun50i_h616_ccu_resets[] = {
[RST_MBUS] = { 0x540, BIT(30) },
[RST_BUS_DE] = { 0x60c, BIT(16) },
@@ -1050,8 +1090,11 @@ static struct ccu_reset_map sun50i_h616_ccu_resets[] = {
[RST_BUS_HDMI] = { 0xb1c, BIT(16) },
[RST_BUS_HDMI_SUB] = { 0xb1c, BIT(17) },
[RST_BUS_TCON_TOP] = { 0xb5c, BIT(16) },
+ [RST_BUS_TCON_LCD0] = { 0xb7c, BIT(16) },
+ [RST_BUS_TCON_LCD1] = { 0xb7c, BIT(17) },
[RST_BUS_TCON_TV0] = { 0xb9c, BIT(16) },
[RST_BUS_TCON_TV1] = { 0xb9c, BIT(17) },
+ [RST_BUS_LVDS] = { 0xbac, BIT(16) },
[RST_BUS_TVE_TOP] = { 0xbbc, BIT(16) },
[RST_BUS_TVE0] = { 0xbbc, BIT(17) },
[RST_BUS_HDCP] = { 0xc4c, BIT(16) },
@@ -1095,11 +1138,37 @@ static const u32 usb2_clk_regs[] = {
SUN50I_H616_USB3_CLK_REG,
};
+static struct ccu_mux_nb sun50i_h616_cpu_nb = {
+ .common = &cpux_clk.common,
+ .cm = &cpux_clk.mux,
+ .delay_us = 1, /* manual doesn't really say */
+ .bypass_index = 4, /* PLL_PERI0@600MHz, as recommended by manual */
+};
+
+static struct ccu_pll_nb sun50i_h616_pll_cpu_nb = {
+ .common = &pll_cpux_clk.common,
+ .enable = BIT(29), /* LOCK_ENABLE */
+ .lock = BIT(28),
+};
+
+static struct ccu_mux_nb sun50i_h616_gpu_nb = {
+ .common = &gpu0_clk.common,
+ .cm = &gpu0_clk.mux,
+ .delay_us = 1, /* manual doesn't really say */
+ .bypass_index = 1, /* GPU_CLK1@400MHz */
+};
+
+static struct ccu_pll_nb sun50i_h616_pll_gpu_nb = {
+ .common = &pll_gpu_clk.common,
+ .enable = BIT(29), /* LOCK_ENABLE */
+ .lock = BIT(28),
+};
+
static int sun50i_h616_ccu_probe(struct platform_device *pdev)
{
void __iomem *reg;
u32 val;
- int i;
+ int ret, i;
reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg))
@@ -1136,12 +1205,22 @@ static int sun50i_h616_ccu_probe(struct platform_device *pdev)
}
/*
- * Force the post-divider of pll-audio to 12 and the output divider
- * of it to 2, so 24576000 and 22579200 rates can be set exactly.
+ * Set the output-divider for the pll-audio clocks (M0) to 2 and the
+ * input divider (M1) to 1 as recommended by the manual when using
+ * SDM.
*/
val = readl(reg + SUN50I_H616_PLL_AUDIO_REG);
- val &= ~(GENMASK(21, 16) | BIT(0));
- writel(val | (11 << 16) | BIT(0), reg + SUN50I_H616_PLL_AUDIO_REG);
+ val &= ~BIT(1);
+ val |= BIT(0);
+ writel(val, reg + SUN50I_H616_PLL_AUDIO_REG);
+
+ /*
+ * Set the input-divider for the gpu1 clock to 3, to reach a safe 400 MHz.
+ */
+ val = readl(reg + SUN50I_H616_GPU_CLK1_REG);
+ val &= ~GENMASK(1, 0);
+ val |= 2;
+ writel(val, reg + SUN50I_H616_GPU_CLK1_REG);
/*
* First clock parent (osc32K) is unusable for CEC. But since there
@@ -1152,7 +1231,25 @@ static int sun50i_h616_ccu_probe(struct platform_device *pdev)
val |= BIT(24);
writel(val, reg + SUN50I_H616_HDMI_CEC_CLK_REG);
- return devm_sunxi_ccu_probe(&pdev->dev, reg, &sun50i_h616_ccu_desc);
+ ret = devm_sunxi_ccu_probe(&pdev->dev, reg, &sun50i_h616_ccu_desc);
+ if (ret)
+ return ret;
+
+ /* Reparent CPU during CPU PLL rate changes */
+ ccu_mux_notifier_register(pll_cpux_clk.common.hw.clk,
+ &sun50i_h616_cpu_nb);
+
+ /* Re-lock the CPU PLL after any rate changes */
+ ccu_pll_notifier_register(&sun50i_h616_pll_cpu_nb);
+
+ /* Reparent GPU during GPU PLL rate changes */
+ ccu_mux_notifier_register(pll_gpu_clk.common.hw.clk,
+ &sun50i_h616_gpu_nb);
+
+ /* Re-lock the GPU PLL after any rate changes */
+ ccu_pll_notifier_register(&sun50i_h616_pll_gpu_nb);
+
+ return 0;
}
static const struct of_device_id sun50i_h616_ccu_ids[] = {
@@ -1171,6 +1268,6 @@ static struct platform_driver sun50i_h616_ccu_driver = {
};
module_platform_driver(sun50i_h616_ccu_driver);
-MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_IMPORT_NS("SUNXI_CCU");
MODULE_DESCRIPTION("Support for the Allwinner H616 CCU");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h616.h b/drivers/clk/sunxi-ng/ccu-sun50i-h616.h
index a75803b49f6a..7056f293a8e0 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h616.h
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h616.h
@@ -51,6 +51,6 @@
#define CLK_BUS_DRAM 56
-#define CLK_NUMBER (CLK_BUS_GPADC + 1)
+#define CLK_NUMBER (CLK_BUS_TCON_LCD1 + 1)
#endif /* _CCU_SUN50I_H616_H_ */
diff --git a/drivers/clk/sunxi-ng/ccu-sun55i-a523-mcu.c b/drivers/clk/sunxi-ng/ccu-sun55i-a523-mcu.c
new file mode 100644
index 000000000000..197844f0fe4e
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu-sun55i-a523-mcu.c
@@ -0,0 +1,469 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025 Chen-Yu Tsai <wens@csie.org>
+ *
+ * Based on the A523 CCU driver:
+ * Copyright (C) 2023-2024 Arm Ltd.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/clock/sun55i-a523-mcu-ccu.h>
+#include <dt-bindings/reset/sun55i-a523-mcu-ccu.h>
+
+#include "ccu_common.h"
+#include "ccu_reset.h"
+
+#include "ccu_div.h"
+#include "ccu_gate.h"
+#include "ccu_mp.h"
+#include "ccu_mult.h"
+#include "ccu_nm.h"
+
+static const struct clk_parent_data osc24M[] = {
+ { .fw_name = "hosc" }
+};
+
+static const struct clk_parent_data ahb[] = {
+ { .fw_name = "r-ahb" }
+};
+
+static const struct clk_parent_data apb[] = {
+ { .fw_name = "r-apb0" }
+};
+
+#define SUN55I_A523_PLL_AUDIO1_REG 0x00c
+static struct ccu_sdm_setting pll_audio1_sdm_table[] = {
+ { .rate = 2167603200, .pattern = 0xa000a234, .m = 1, .n = 90 }, /* div2->22.5792 */
+ { .rate = 2359296000, .pattern = 0xa0009ba6, .m = 1, .n = 98 }, /* div2->24.576 */
+ { .rate = 1806336000, .pattern = 0xa000872b, .m = 1, .n = 75 }, /* div5->22.576 */
+};
+
+static struct ccu_nm pll_audio1_clk = {
+ .enable = BIT(27),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 11),
+ .m = _SUNXI_CCU_DIV(1, 1),
+ .sdm = _SUNXI_CCU_SDM(pll_audio1_sdm_table, BIT(24),
+ 0x010, BIT(31)),
+ .min_rate = 180000000U,
+ .max_rate = 3500000000U,
+ .common = {
+ .reg = 0x00c,
+ .features = CCU_FEATURE_SIGMA_DELTA_MOD,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("pll-audio1",
+ osc24M, &ccu_nm_ops,
+ CLK_SET_RATE_GATE),
+ },
+};
+
+/*
+ * /2 and /5 dividers are actually programmable, but we just use the
+ * values from the BSP, since the audio PLL only needs to provide a
+ * couple clock rates. This also matches the names given in the manual.
+ */
+static const struct clk_hw *pll_audio1_div_parents[] = { &pll_audio1_clk.common.hw };
+static CLK_FIXED_FACTOR_HWS(pll_audio1_div2_clk, "pll-audio1-div2",
+ pll_audio1_div_parents, 2, 1,
+ CLK_SET_RATE_PARENT);
+static CLK_FIXED_FACTOR_HWS(pll_audio1_div5_clk, "pll-audio1-div5",
+ pll_audio1_div_parents, 5, 1,
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_M_WITH_GATE(audio_out_clk, "audio-out",
+ "pll-audio1-div2", 0x01c,
+ 0, 5, BIT(31), CLK_SET_RATE_PARENT);
+
+static const struct clk_parent_data dsp_parents[] = {
+ { .fw_name = "hosc" },
+ { .fw_name = "losc" },
+ { .fw_name = "iosc" },
+ /*
+ * The order of the following two parent is from the BSP code. It is
+ * the opposite in the manual. Testing with the DSP is required to
+ * figure out the real order.
+ */
+ { .hw = &pll_audio1_div5_clk.hw },
+ { .hw = &pll_audio1_div2_clk.hw },
+ { .fw_name = "dsp" },
+};
+static SUNXI_CCU_M_DATA_WITH_MUX_GATE(dsp_clk, "mcu-dsp", dsp_parents, 0x0020,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static const struct clk_parent_data i2s_parents[] = {
+ { .fw_name = "pll-audio0-4x" },
+ { .hw = &pll_audio1_div2_clk.hw },
+ { .hw = &pll_audio1_div5_clk.hw },
+};
+
+static SUNXI_CCU_DUALDIV_MUX_GATE(i2s0_clk, "i2s0", i2s_parents, 0x02c,
+ 0, 5, /* M */
+ 5, 5, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+static SUNXI_CCU_DUALDIV_MUX_GATE(i2s1_clk, "i2s1", i2s_parents, 0x030,
+ 0, 5, /* M */
+ 5, 5, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+static SUNXI_CCU_DUALDIV_MUX_GATE(i2s2_clk, "i2s2", i2s_parents, 0x034,
+ 0, 5, /* M */
+ 5, 5, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+static SUNXI_CCU_DUALDIV_MUX_GATE(i2s3_clk, "i2s3", i2s_parents, 0x038,
+ 0, 5, /* M */
+ 5, 5, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static const struct clk_parent_data i2s3_asrc_parents[] = {
+ { .fw_name = "pll-periph0-300m" },
+ { .hw = &pll_audio1_div2_clk.hw },
+ { .hw = &pll_audio1_div5_clk.hw },
+};
+static SUNXI_CCU_DUALDIV_MUX_GATE(i2s3_asrc_clk, "i2s3-asrc",
+ i2s3_asrc_parents, 0x03c,
+ 0, 5, /* M */
+ 5, 5, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE_DATA(bus_i2s0_clk, "bus-i2s0", apb, 0x040, BIT(0), 0);
+static SUNXI_CCU_GATE_DATA(bus_i2s1_clk, "bus-i2s1", apb, 0x040, BIT(1), 0);
+static SUNXI_CCU_GATE_DATA(bus_i2s2_clk, "bus-i2s2", apb, 0x040, BIT(2), 0);
+static SUNXI_CCU_GATE_DATA(bus_i2s3_clk, "bus-i2s3", apb, 0x040, BIT(3), 0);
+
+static const struct clk_parent_data audio_parents[] = {
+ { .fw_name = "pll-audio0-4x" },
+ { .hw = &pll_audio1_div2_clk.hw },
+ { .hw = &pll_audio1_div5_clk.hw },
+};
+static SUNXI_CCU_DUALDIV_MUX_GATE(spdif_tx_clk, "spdif-tx",
+ audio_parents, 0x044,
+ 0, 5, /* M */
+ 5, 5, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+static SUNXI_CCU_DUALDIV_MUX_GATE(spdif_rx_clk, "spdif-rx",
+ i2s3_asrc_parents, 0x048,
+ 0, 5, /* M */
+ 5, 5, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE_DATA(bus_spdif_clk, "bus-spdif", apb, 0x04c, BIT(0), 0);
+
+static SUNXI_CCU_DUALDIV_MUX_GATE(dmic_clk, "dmic", audio_parents, 0x050,
+ 0, 5, /* M */
+ 5, 5, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE_DATA(bus_dmic_clk, "bus-dmic", apb, 0x054, BIT(0), 0);
+
+static SUNXI_CCU_DUALDIV_MUX_GATE(audio_dac_clk, "audio-dac",
+ audio_parents, 0x058,
+ 0, 5, /* M */
+ 5, 5, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+static SUNXI_CCU_DUALDIV_MUX_GATE(audio_adc_clk, "audio-adc",
+ audio_parents, 0x05c,
+ 0, 5, /* M */
+ 5, 5, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE_DATA(bus_audio_codec_clk, "bus-audio-codec",
+ apb, 0x060, BIT(0), 0);
+
+static SUNXI_CCU_GATE_DATA(bus_dsp_msgbox_clk, "bus-dsp-msgbox",
+ ahb, 0x068, BIT(0), 0);
+static SUNXI_CCU_GATE_DATA(bus_dsp_cfg_clk, "bus-dsp-cfg",
+ apb, 0x06c, BIT(0), 0);
+
+static SUNXI_CCU_GATE_DATA(bus_npu_hclk, "bus-npu-hclk", ahb, 0x070, BIT(1), 0);
+static SUNXI_CCU_GATE_DATA(bus_npu_aclk, "bus-npu-aclk", ahb, 0x070, BIT(2), 0);
+
+static const struct clk_parent_data timer_parents[] = {
+ { .fw_name = "hosc" },
+ { .fw_name = "losc" },
+ { .fw_name = "iosc" },
+ { .fw_name = "r-ahb" }
+};
+static SUNXI_CCU_P_DATA_WITH_MUX_GATE(mcu_timer0_clk, "mcu-timer0", timer_parents,
+ 0x074,
+ 1, 3, /* P */
+ 4, 2, /* mux */
+ BIT(0), /* gate */
+ 0);
+static SUNXI_CCU_P_DATA_WITH_MUX_GATE(mcu_timer1_clk, "mcu-timer1", timer_parents,
+ 0x078,
+ 1, 3, /* P */
+ 4, 2, /* mux */
+ BIT(0), /* gate */
+ 0);
+static SUNXI_CCU_P_DATA_WITH_MUX_GATE(mcu_timer2_clk, "mcu-timer2", timer_parents,
+ 0x07c,
+ 1, 3, /* P */
+ 4, 2, /* mux */
+ BIT(0), /* gate */
+ 0);
+static SUNXI_CCU_P_DATA_WITH_MUX_GATE(mcu_timer3_clk, "mcu-timer3", timer_parents,
+ 0x080,
+ 1, 3, /* P */
+ 4, 2, /* mux */
+ BIT(0), /* gate */
+ 0);
+static SUNXI_CCU_P_DATA_WITH_MUX_GATE(mcu_timer4_clk, "mcu-timer4", timer_parents,
+ 0x084,
+ 1, 3, /* P */
+ 4, 2, /* mux */
+ BIT(0), /* gate */
+ 0);
+static SUNXI_CCU_P_DATA_WITH_MUX_GATE(mcu_timer5_clk, "mcu-timer5", timer_parents,
+ 0x088,
+ 1, 3, /* P */
+ 4, 2, /* mux */
+ BIT(0), /* gate */
+ 0);
+static SUNXI_CCU_GATE_DATA(bus_mcu_timer_clk, "bus-mcu-timer", ahb, 0x08c, BIT(0), 0);
+static SUNXI_CCU_GATE_DATA(bus_mcu_dma_clk, "bus-mcu-dma", ahb, 0x104, BIT(0), 0);
+/* tzma* only found in BSP code. */
+static SUNXI_CCU_GATE_DATA(tzma0_clk, "tzma0", ahb, 0x108, BIT(0), 0);
+static SUNXI_CCU_GATE_DATA(tzma1_clk, "tzma1", ahb, 0x10c, BIT(0), 0);
+/* parent is a guess as this block is not shown in the system bus tree diagram */
+static SUNXI_CCU_GATE_DATA(bus_pubsram_clk, "bus-pubsram", ahb, 0x114, BIT(0), 0);
+
+/*
+ * user manual has "mbus" clock as parent of both clocks below,
+ * but this makes more sense, since BSP MCU DMA controller has
+ * reference to both of them, likely needing both enabled.
+ */
+static SUNXI_CCU_GATE_FW(mbus_mcu_clk, "mbus-mcu", "mbus", 0x11c, BIT(1), 0);
+static SUNXI_CCU_GATE_HW(mbus_mcu_dma_clk, "mbus-mcu-dma",
+ &mbus_mcu_clk.common.hw, 0x11c, BIT(0), 0);
+
+static const struct clk_parent_data riscv_pwm_parents[] = {
+ { .fw_name = "hosc" },
+ { .fw_name = "losc" },
+ { .fw_name = "iosc" },
+};
+
+static SUNXI_CCU_MUX_DATA_WITH_GATE(riscv_clk, "riscv",
+ riscv_pwm_parents, 0x120,
+ 27, 3, BIT(31), 0);
+/* Parents are guesses as these two blocks are not shown in the system bus tree diagram */
+static SUNXI_CCU_GATE_DATA(bus_riscv_cfg_clk, "bus-riscv-cfg", ahb,
+ 0x124, BIT(0), 0);
+static SUNXI_CCU_GATE_DATA(bus_riscv_msgbox_clk, "bus-riscv-msgbox", ahb,
+ 0x128, BIT(0), 0);
+
+static SUNXI_CCU_MUX_DATA_WITH_GATE(mcu_pwm0_clk, "mcu-pwm0",
+ riscv_pwm_parents, 0x130,
+ 24, 3, BIT(31), 0);
+static SUNXI_CCU_GATE_DATA(bus_mcu_pwm0_clk, "bus-mcu-pwm0", apb,
+ 0x134, BIT(0), 0);
+
+/*
+ * Contains all clocks that are controlled by a hardware register. They
+ * have a (sunxi) .common member, which needs to be initialised by the common
+ * sunxi CCU code, to be filled with the MMIO base address and the shared lock.
+ */
+static struct ccu_common *sun55i_a523_mcu_ccu_clks[] = {
+ &pll_audio1_clk.common,
+ &audio_out_clk.common,
+ &dsp_clk.common,
+ &i2s0_clk.common,
+ &i2s1_clk.common,
+ &i2s2_clk.common,
+ &i2s3_clk.common,
+ &i2s3_asrc_clk.common,
+ &bus_i2s0_clk.common,
+ &bus_i2s1_clk.common,
+ &bus_i2s2_clk.common,
+ &bus_i2s3_clk.common,
+ &spdif_tx_clk.common,
+ &spdif_rx_clk.common,
+ &bus_spdif_clk.common,
+ &dmic_clk.common,
+ &bus_dmic_clk.common,
+ &audio_dac_clk.common,
+ &audio_adc_clk.common,
+ &bus_audio_codec_clk.common,
+ &bus_dsp_msgbox_clk.common,
+ &bus_dsp_cfg_clk.common,
+ &bus_npu_aclk.common,
+ &bus_npu_hclk.common,
+ &mcu_timer0_clk.common,
+ &mcu_timer1_clk.common,
+ &mcu_timer2_clk.common,
+ &mcu_timer3_clk.common,
+ &mcu_timer4_clk.common,
+ &mcu_timer5_clk.common,
+ &bus_mcu_timer_clk.common,
+ &bus_mcu_dma_clk.common,
+ &tzma0_clk.common,
+ &tzma1_clk.common,
+ &bus_pubsram_clk.common,
+ &mbus_mcu_dma_clk.common,
+ &mbus_mcu_clk.common,
+ &riscv_clk.common,
+ &bus_riscv_cfg_clk.common,
+ &bus_riscv_msgbox_clk.common,
+ &mcu_pwm0_clk.common,
+ &bus_mcu_pwm0_clk.common,
+};
+
+static struct clk_hw_onecell_data sun55i_a523_mcu_hw_clks = {
+ .hws = {
+ [CLK_MCU_PLL_AUDIO1] = &pll_audio1_clk.common.hw,
+ [CLK_MCU_PLL_AUDIO1_DIV2] = &pll_audio1_div2_clk.hw,
+ [CLK_MCU_PLL_AUDIO1_DIV5] = &pll_audio1_div5_clk.hw,
+ [CLK_MCU_AUDIO_OUT] = &audio_out_clk.common.hw,
+ [CLK_MCU_DSP] = &dsp_clk.common.hw,
+ [CLK_MCU_I2S0] = &i2s0_clk.common.hw,
+ [CLK_MCU_I2S1] = &i2s1_clk.common.hw,
+ [CLK_MCU_I2S2] = &i2s2_clk.common.hw,
+ [CLK_MCU_I2S3] = &i2s3_clk.common.hw,
+ [CLK_MCU_I2S3_ASRC] = &i2s3_asrc_clk.common.hw,
+ [CLK_BUS_MCU_I2S0] = &bus_i2s0_clk.common.hw,
+ [CLK_BUS_MCU_I2S1] = &bus_i2s1_clk.common.hw,
+ [CLK_BUS_MCU_I2S2] = &bus_i2s2_clk.common.hw,
+ [CLK_BUS_MCU_I2S3] = &bus_i2s3_clk.common.hw,
+ [CLK_MCU_SPDIF_TX] = &spdif_tx_clk.common.hw,
+ [CLK_MCU_SPDIF_RX] = &spdif_rx_clk.common.hw,
+ [CLK_BUS_MCU_SPDIF] = &bus_spdif_clk.common.hw,
+ [CLK_MCU_DMIC] = &dmic_clk.common.hw,
+ [CLK_BUS_MCU_DMIC] = &bus_dmic_clk.common.hw,
+ [CLK_MCU_AUDIO_CODEC_DAC] = &audio_dac_clk.common.hw,
+ [CLK_MCU_AUDIO_CODEC_ADC] = &audio_adc_clk.common.hw,
+ [CLK_BUS_MCU_AUDIO_CODEC] = &bus_audio_codec_clk.common.hw,
+ [CLK_BUS_MCU_DSP_MSGBOX] = &bus_dsp_msgbox_clk.common.hw,
+ [CLK_BUS_MCU_DSP_CFG] = &bus_dsp_cfg_clk.common.hw,
+ [CLK_BUS_MCU_NPU_HCLK] = &bus_npu_hclk.common.hw,
+ [CLK_BUS_MCU_NPU_ACLK] = &bus_npu_aclk.common.hw,
+ [CLK_MCU_TIMER0] = &mcu_timer0_clk.common.hw,
+ [CLK_MCU_TIMER1] = &mcu_timer1_clk.common.hw,
+ [CLK_MCU_TIMER2] = &mcu_timer2_clk.common.hw,
+ [CLK_MCU_TIMER3] = &mcu_timer3_clk.common.hw,
+ [CLK_MCU_TIMER4] = &mcu_timer4_clk.common.hw,
+ [CLK_MCU_TIMER5] = &mcu_timer5_clk.common.hw,
+ [CLK_BUS_MCU_TIMER] = &bus_mcu_timer_clk.common.hw,
+ [CLK_BUS_MCU_DMA] = &bus_mcu_dma_clk.common.hw,
+ [CLK_MCU_TZMA0] = &tzma0_clk.common.hw,
+ [CLK_MCU_TZMA1] = &tzma1_clk.common.hw,
+ [CLK_BUS_MCU_PUBSRAM] = &bus_pubsram_clk.common.hw,
+ [CLK_MCU_MBUS_DMA] = &mbus_mcu_dma_clk.common.hw,
+ [CLK_MCU_MBUS] = &mbus_mcu_clk.common.hw,
+ [CLK_MCU_RISCV] = &riscv_clk.common.hw,
+ [CLK_BUS_MCU_RISCV_CFG] = &bus_riscv_cfg_clk.common.hw,
+ [CLK_BUS_MCU_RISCV_MSGBOX] = &bus_riscv_msgbox_clk.common.hw,
+ [CLK_MCU_PWM0] = &mcu_pwm0_clk.common.hw,
+ [CLK_BUS_MCU_PWM0] = &bus_mcu_pwm0_clk.common.hw,
+ },
+ .num = CLK_BUS_MCU_PWM0 + 1,
+};
+
+static struct ccu_reset_map sun55i_a523_mcu_ccu_resets[] = {
+ [RST_BUS_MCU_I2S0] = { 0x0040, BIT(16) },
+ [RST_BUS_MCU_I2S1] = { 0x0040, BIT(17) },
+ [RST_BUS_MCU_I2S2] = { 0x0040, BIT(18) },
+ [RST_BUS_MCU_I2S3] = { 0x0040, BIT(19) },
+ [RST_BUS_MCU_SPDIF] = { 0x004c, BIT(16) },
+ [RST_BUS_MCU_DMIC] = { 0x0054, BIT(16) },
+ [RST_BUS_MCU_AUDIO_CODEC] = { 0x0060, BIT(16) },
+ [RST_BUS_MCU_DSP_MSGBOX] = { 0x0068, BIT(16) },
+ [RST_BUS_MCU_DSP_CFG] = { 0x006c, BIT(16) },
+ [RST_BUS_MCU_NPU] = { 0x0070, BIT(16) },
+ [RST_BUS_MCU_TIMER] = { 0x008c, BIT(16) },
+ /* dsp and dsp_debug resets only found in BSP code. */
+ [RST_BUS_MCU_DSP_DEBUG] = { 0x0100, BIT(16) },
+ [RST_BUS_MCU_DSP] = { 0x0100, BIT(17) },
+ [RST_BUS_MCU_DMA] = { 0x0104, BIT(16) },
+ [RST_BUS_MCU_PUBSRAM] = { 0x0114, BIT(16) },
+ [RST_BUS_MCU_RISCV_CFG] = { 0x0124, BIT(16) },
+ [RST_BUS_MCU_RISCV_DEBUG] = { 0x0124, BIT(17) },
+ [RST_BUS_MCU_RISCV_CORE] = { 0x0124, BIT(18) },
+ [RST_BUS_MCU_RISCV_MSGBOX] = { 0x0128, BIT(16) },
+ [RST_BUS_MCU_PWM0] = { 0x0134, BIT(16) },
+};
+
+static const struct sunxi_ccu_desc sun55i_a523_mcu_ccu_desc = {
+ .ccu_clks = sun55i_a523_mcu_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun55i_a523_mcu_ccu_clks),
+
+ .hw_clks = &sun55i_a523_mcu_hw_clks,
+
+ .resets = sun55i_a523_mcu_ccu_resets,
+ .num_resets = ARRAY_SIZE(sun55i_a523_mcu_ccu_resets),
+};
+
+static int sun55i_a523_mcu_ccu_probe(struct platform_device *pdev)
+{
+ void __iomem *reg;
+ u32 val;
+ int ret;
+
+ reg = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+
+ val = readl(reg + SUN55I_A523_PLL_AUDIO1_REG);
+
+ /*
+ * The PLL clock code does not model all bits, for instance it does
+ * not support a separate enable and gate bit. We present the
+ * gate bit(27) as the enable bit, but then have to set the
+ * PLL Enable, LDO Enable, and Lock Enable bits on all PLLs here.
+ */
+ val |= BIT(31) | BIT(30) | BIT(29);
+
+ /* Enforce p1 = 5, p0 = 2 (the default) for PLL_AUDIO1 */
+ val &= ~(GENMASK(22, 20) | GENMASK(18, 16));
+ val |= (4 << 20) | (1 << 16);
+
+ writel(val, reg + SUN55I_A523_PLL_AUDIO1_REG);
+
+ ret = devm_sunxi_ccu_probe(&pdev->dev, reg, &sun55i_a523_mcu_ccu_desc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct of_device_id sun55i_a523_mcu_ccu_ids[] = {
+ { .compatible = "allwinner,sun55i-a523-mcu-ccu" },
+ { }
+};
+
+static struct platform_driver sun55i_a523_mcu_ccu_driver = {
+ .probe = sun55i_a523_mcu_ccu_probe,
+ .driver = {
+ .name = "sun55i-a523-mcu-ccu",
+ .suppress_bind_attrs = true,
+ .of_match_table = sun55i_a523_mcu_ccu_ids,
+ },
+};
+module_platform_driver(sun55i_a523_mcu_ccu_driver);
+
+MODULE_IMPORT_NS("SUNXI_CCU");
+MODULE_DESCRIPTION("Support for the Allwinner A523 MCU CCU");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun55i-a523-r.c b/drivers/clk/sunxi-ng/ccu-sun55i-a523-r.c
new file mode 100644
index 000000000000..70ce0ca0cb7d
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu-sun55i-a523-r.c
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024 Arm Ltd.
+ * Based on the D1 CCU driver:
+ * Copyright (c) 2020 huangzhenwei@allwinnertech.com
+ * Copyright (C) 2021 Samuel Holland <samuel@sholland.org>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "ccu_common.h"
+#include "ccu_reset.h"
+
+#include "ccu_gate.h"
+#include "ccu_mp.h"
+
+#include "ccu-sun55i-a523-r.h"
+
+static const struct clk_parent_data r_ahb_apb_parents[] = {
+ { .fw_name = "hosc" },
+ { .fw_name = "losc" },
+ { .fw_name = "iosc" },
+ { .fw_name = "pll-periph" },
+ { .fw_name = "pll-audio" },
+};
+static SUNXI_CCU_M_DATA_WITH_MUX(r_ahb_clk, "r-ahb",
+ r_ahb_apb_parents, 0x000,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ 0);
+
+static SUNXI_CCU_M_DATA_WITH_MUX(r_apb0_clk, "r-apb0",
+ r_ahb_apb_parents, 0x00c,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ 0);
+
+static SUNXI_CCU_M_DATA_WITH_MUX(r_apb1_clk, "r-apb1",
+ r_ahb_apb_parents, 0x010,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ 0);
+
+static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(r_cpu_timer0, "r-timer0",
+ r_ahb_apb_parents, 0x100,
+ 0, 0, /* no M */
+ 1, 3, /* P */
+ 4, 3, /* mux */
+ BIT(0),
+ 0);
+static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(r_cpu_timer1, "r-timer1",
+ r_ahb_apb_parents, 0x104,
+ 0, 0, /* no M */
+ 1, 3, /* P */
+ 4, 3, /* mux */
+ BIT(0),
+ 0);
+static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(r_cpu_timer2, "r-timer2",
+ r_ahb_apb_parents, 0x108,
+ 0, 0, /* no M */
+ 1, 3, /* P */
+ 4, 3, /* mux */
+ BIT(0),
+ 0);
+
+static SUNXI_CCU_GATE_HW(bus_r_timer_clk, "bus-r-timer", &r_ahb_clk.common.hw,
+ 0x11c, BIT(0), 0);
+static SUNXI_CCU_GATE_HW(bus_r_twd_clk, "bus-r-twd", &r_apb0_clk.common.hw,
+ 0x12c, BIT(0), 0);
+
+static const struct clk_parent_data r_pwmctrl_parents[] = {
+ { .fw_name = "hosc" },
+ { .fw_name = "losc" },
+ { .fw_name = "iosc" },
+};
+static SUNXI_CCU_MUX_DATA_WITH_GATE(r_pwmctrl_clk, "r-pwmctrl",
+ r_pwmctrl_parents, 0x130,
+ 24, 2, /* mux */
+ BIT(31),
+ 0);
+static SUNXI_CCU_GATE_HW(bus_r_pwmctrl_clk, "bus-r-pwmctrl",
+ &r_apb0_clk.common.hw, 0x13c, BIT(0), 0);
+
+/* SPI clock is /M/N (same as new MMC?) */
+static SUNXI_CCU_GATE_HW(bus_r_spi_clk, "bus-r-spi",
+ &r_ahb_clk.common.hw, 0x15c, BIT(0), 0);
+static SUNXI_CCU_GATE_HW(bus_r_spinlock_clk, "bus-r-spinlock",
+ &r_ahb_clk.common.hw, 0x16c, BIT(0), 0);
+static SUNXI_CCU_GATE_HW(bus_r_msgbox_clk, "bus-r-msgbox",
+ &r_ahb_clk.common.hw, 0x17c, BIT(0), 0);
+static SUNXI_CCU_GATE_HW(bus_r_uart0_clk, "bus-r-uart0",
+ &r_apb1_clk.common.hw, 0x18c, BIT(0), 0);
+static SUNXI_CCU_GATE_HW(bus_r_uart1_clk, "bus-r-uart1",
+ &r_apb1_clk.common.hw, 0x18c, BIT(1), 0);
+static SUNXI_CCU_GATE_HW(bus_r_i2c0_clk, "bus-r-i2c0",
+ &r_apb1_clk.common.hw, 0x19c, BIT(0), 0);
+static SUNXI_CCU_GATE_HW(bus_r_i2c1_clk, "bus-r-i2c1",
+ &r_apb1_clk.common.hw, 0x19c, BIT(1), 0);
+static SUNXI_CCU_GATE_HW(bus_r_i2c2_clk, "bus-r-i2c2",
+ &r_apb1_clk.common.hw, 0x19c, BIT(2), 0);
+static SUNXI_CCU_GATE_HW(bus_r_ppu0_clk, "bus-r-ppu0",
+ &r_apb0_clk.common.hw, 0x1ac, BIT(0), 0);
+static SUNXI_CCU_GATE_HW(bus_r_ppu1_clk, "bus-r-ppu1",
+ &r_apb0_clk.common.hw, 0x1ac, BIT(1), 0);
+static SUNXI_CCU_GATE_HW(bus_r_cpu_bist_clk, "bus-r-cpu-bist",
+ &r_apb0_clk.common.hw, 0x1bc, BIT(0), 0);
+
+static const struct clk_parent_data r_ir_rx_parents[] = {
+ { .fw_name = "losc" },
+ { .fw_name = "hosc" },
+};
+static SUNXI_CCU_M_DATA_WITH_MUX_GATE(r_ir_rx_clk, "r-ir-rx",
+ r_ir_rx_parents, 0x1c0,
+ 0, 5, /* M */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+static SUNXI_CCU_GATE_HW(bus_r_ir_rx_clk, "bus-r-ir-rx",
+ &r_apb0_clk.common.hw, 0x1cc, BIT(0), 0);
+
+static SUNXI_CCU_GATE_HW(bus_r_dma_clk, "bus-r-dma",
+ &r_apb0_clk.common.hw, 0x1dc, BIT(0), 0);
+static SUNXI_CCU_GATE_HW(bus_r_rtc_clk, "bus-r-rtc",
+ &r_apb0_clk.common.hw, 0x20c, BIT(0), 0);
+static SUNXI_CCU_GATE_HW(bus_r_cpucfg_clk, "bus-r-cpucfg",
+ &r_apb0_clk.common.hw, 0x22c, BIT(0), 0);
+
+static struct ccu_common *sun55i_a523_r_ccu_clks[] = {
+ &r_ahb_clk.common,
+ &r_apb0_clk.common,
+ &r_apb1_clk.common,
+ &r_cpu_timer0.common,
+ &r_cpu_timer1.common,
+ &r_cpu_timer2.common,
+ &bus_r_timer_clk.common,
+ &bus_r_twd_clk.common,
+ &r_pwmctrl_clk.common,
+ &bus_r_pwmctrl_clk.common,
+ &bus_r_spi_clk.common,
+ &bus_r_spinlock_clk.common,
+ &bus_r_msgbox_clk.common,
+ &bus_r_uart0_clk.common,
+ &bus_r_uart1_clk.common,
+ &bus_r_i2c0_clk.common,
+ &bus_r_i2c1_clk.common,
+ &bus_r_i2c2_clk.common,
+ &bus_r_ppu0_clk.common,
+ &bus_r_ppu1_clk.common,
+ &bus_r_cpu_bist_clk.common,
+ &r_ir_rx_clk.common,
+ &bus_r_ir_rx_clk.common,
+ &bus_r_dma_clk.common,
+ &bus_r_rtc_clk.common,
+ &bus_r_cpucfg_clk.common,
+};
+
+static struct clk_hw_onecell_data sun55i_a523_r_hw_clks = {
+ .num = CLK_NUMBER,
+ .hws = {
+ [CLK_R_AHB] = &r_ahb_clk.common.hw,
+ [CLK_R_APB0] = &r_apb0_clk.common.hw,
+ [CLK_R_APB1] = &r_apb1_clk.common.hw,
+ [CLK_R_TIMER0] = &r_cpu_timer0.common.hw,
+ [CLK_R_TIMER1] = &r_cpu_timer1.common.hw,
+ [CLK_R_TIMER2] = &r_cpu_timer2.common.hw,
+ [CLK_BUS_R_TIMER] = &bus_r_timer_clk.common.hw,
+ [CLK_BUS_R_TWD] = &bus_r_twd_clk.common.hw,
+ [CLK_R_PWMCTRL] = &r_pwmctrl_clk.common.hw,
+ [CLK_BUS_R_PWMCTRL] = &bus_r_pwmctrl_clk.common.hw,
+ [CLK_BUS_R_SPI] = &bus_r_spi_clk.common.hw,
+ [CLK_BUS_R_SPINLOCK] = &bus_r_spinlock_clk.common.hw,
+ [CLK_BUS_R_MSGBOX] = &bus_r_msgbox_clk.common.hw,
+ [CLK_BUS_R_UART0] = &bus_r_uart0_clk.common.hw,
+ [CLK_BUS_R_UART1] = &bus_r_uart1_clk.common.hw,
+ [CLK_BUS_R_I2C0] = &bus_r_i2c0_clk.common.hw,
+ [CLK_BUS_R_I2C1] = &bus_r_i2c1_clk.common.hw,
+ [CLK_BUS_R_I2C2] = &bus_r_i2c2_clk.common.hw,
+ [CLK_BUS_R_PPU0] = &bus_r_ppu0_clk.common.hw,
+ [CLK_BUS_R_PPU1] = &bus_r_ppu1_clk.common.hw,
+ [CLK_BUS_R_CPU_BIST] = &bus_r_cpu_bist_clk.common.hw,
+ [CLK_R_IR_RX] = &r_ir_rx_clk.common.hw,
+ [CLK_BUS_R_IR_RX] = &bus_r_ir_rx_clk.common.hw,
+ [CLK_BUS_R_DMA] = &bus_r_dma_clk.common.hw,
+ [CLK_BUS_R_RTC] = &bus_r_rtc_clk.common.hw,
+ [CLK_BUS_R_CPUCFG] = &bus_r_cpucfg_clk.common.hw,
+ },
+};
+
+static struct ccu_reset_map sun55i_a523_r_ccu_resets[] = {
+ [RST_BUS_R_TIMER] = { 0x11c, BIT(16) },
+ [RST_BUS_R_TWD] = { 0x12c, BIT(16) },
+ [RST_BUS_R_PWMCTRL] = { 0x13c, BIT(16) },
+ [RST_BUS_R_SPI] = { 0x15c, BIT(16) },
+ [RST_BUS_R_SPINLOCK] = { 0x16c, BIT(16) },
+ [RST_BUS_R_MSGBOX] = { 0x17c, BIT(16) },
+ [RST_BUS_R_UART0] = { 0x18c, BIT(16) },
+ [RST_BUS_R_UART1] = { 0x18c, BIT(17) },
+ [RST_BUS_R_I2C0] = { 0x19c, BIT(16) },
+ [RST_BUS_R_I2C1] = { 0x19c, BIT(17) },
+ [RST_BUS_R_I2C2] = { 0x19c, BIT(18) },
+ [RST_BUS_R_PPU1] = { 0x1ac, BIT(17) },
+ [RST_BUS_R_IR_RX] = { 0x1cc, BIT(16) },
+ [RST_BUS_R_RTC] = { 0x20c, BIT(16) },
+ [RST_BUS_R_CPUCFG] = { 0x22c, BIT(16) },
+ [RST_BUS_R_PPU0] = { 0x1ac, BIT(16) },
+};
+
+static const struct sunxi_ccu_desc sun55i_a523_r_ccu_desc = {
+ .ccu_clks = sun55i_a523_r_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun55i_a523_r_ccu_clks),
+
+ .hw_clks = &sun55i_a523_r_hw_clks,
+
+ .resets = sun55i_a523_r_ccu_resets,
+ .num_resets = ARRAY_SIZE(sun55i_a523_r_ccu_resets),
+};
+
+static int sun55i_a523_r_ccu_probe(struct platform_device *pdev)
+{
+ void __iomem *reg;
+
+ reg = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+
+ return devm_sunxi_ccu_probe(&pdev->dev, reg, &sun55i_a523_r_ccu_desc);
+}
+
+static const struct of_device_id sun55i_a523_r_ccu_ids[] = {
+ { .compatible = "allwinner,sun55i-a523-r-ccu" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sun55i_a523_r_ccu_ids);
+
+static struct platform_driver sun55i_a523_r_ccu_driver = {
+ .probe = sun55i_a523_r_ccu_probe,
+ .driver = {
+ .name = "sun55i-a523-r-ccu",
+ .suppress_bind_attrs = true,
+ .of_match_table = sun55i_a523_r_ccu_ids,
+ },
+};
+module_platform_driver(sun55i_a523_r_ccu_driver);
+
+MODULE_IMPORT_NS("SUNXI_CCU");
+MODULE_DESCRIPTION("Support for the Allwinner A523 PRCM CCU");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun55i-a523-r.h b/drivers/clk/sunxi-ng/ccu-sun55i-a523-r.h
new file mode 100644
index 000000000000..d50f46ac4f3f
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu-sun55i-a523-r.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2024 Arm Ltd.
+ */
+
+#ifndef _CCU_SUN55I_A523_R_H
+#define _CCU_SUN55I_A523_R_H
+
+#include <dt-bindings/clock/sun55i-a523-r-ccu.h>
+#include <dt-bindings/reset/sun55i-a523-r-ccu.h>
+
+#define CLK_NUMBER (CLK_BUS_R_CPUCFG + 1)
+
+#endif /* _CCU_SUN55I_A523_R_H */
diff --git a/drivers/clk/sunxi-ng/ccu-sun55i-a523.c b/drivers/clk/sunxi-ng/ccu-sun55i-a523.c
new file mode 100644
index 000000000000..acb532f8361b
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu-sun55i-a523.c
@@ -0,0 +1,1701 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023-2024 Arm Ltd.
+ * Based on the D1 CCU driver:
+ * Copyright (c) 2020 huangzhenwei@allwinnertech.com
+ * Copyright (C) 2021 Samuel Holland <samuel@sholland.org>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/clock/sun55i-a523-ccu.h>
+#include <dt-bindings/reset/sun55i-a523-ccu.h>
+
+#include "../clk.h"
+
+#include "ccu_common.h"
+#include "ccu_reset.h"
+
+#include "ccu_div.h"
+#include "ccu_gate.h"
+#include "ccu_mp.h"
+#include "ccu_mult.h"
+#include "ccu_nk.h"
+#include "ccu_nkm.h"
+#include "ccu_nkmp.h"
+#include "ccu_nm.h"
+
+/*
+ * The 24 MHz oscillator, the root of most of the clock tree.
+ * .fw_name is the string used in the DT "clock-names" property, used to
+ * identify the corresponding clock in the "clocks" property.
+ */
+static const struct clk_parent_data osc24M[] = {
+ { .fw_name = "hosc" }
+};
+
+/**************************************************************************
+ * PLLs *
+ **************************************************************************/
+
+/* Some PLLs are input * N / div1 / P. Model them as NKMP with no K */
+#define SUN55I_A523_PLL_DDR0_REG 0x010
+static struct ccu_nkmp pll_ddr_clk = {
+ .enable = BIT(27),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 11),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .p = _SUNXI_CCU_DIV(0, 1), /* output divider */
+ .common = {
+ .reg = 0x010,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("pll-ddr0", osc24M,
+ &ccu_nkmp_ops,
+ CLK_SET_RATE_GATE |
+ CLK_IS_CRITICAL),
+ },
+};
+
+/*
+ * There is no actual clock output with that frequency (2.4 GHz), instead it
+ * has multiple outputs with adjustable dividers from that base frequency.
+ * Model them separately as divider clocks based on that parent here.
+ */
+#define SUN55I_A523_PLL_PERIPH0_REG 0x020
+static struct ccu_nm pll_periph0_4x_clk = {
+ .enable = BIT(27),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 11),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .common = {
+ .reg = 0x020,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("pll-periph0-4x",
+ osc24M, &ccu_nm_ops,
+ CLK_SET_RATE_GATE),
+ },
+};
+/*
+ * Most clock-defining macros expect an *array* of parent clocks, even if
+ * they do not contain a muxer to select between different parents.
+ * The macros ending in just _HW take a simple clock pointer, but then create
+ * a single-entry array out of that. The macros using _HWS take such an
+ * array (even when it is a single entry one), this avoids having those
+ * helper arrays created inside *every* clock definition.
+ * This means for every clock that is referenced more than once it is
+ * useful to create such a dummy array and use _HWS.
+ */
+static const struct clk_hw *pll_periph0_4x_hws[] = {
+ &pll_periph0_4x_clk.common.hw
+};
+
+static SUNXI_CCU_M_HWS(pll_periph0_2x_clk, "pll-periph0-2x",
+ pll_periph0_4x_hws, 0x020, 16, 3, 0);
+static const struct clk_hw *pll_periph0_2x_hws[] = {
+ &pll_periph0_2x_clk.common.hw
+};
+static SUNXI_CCU_M_HWS(pll_periph0_800M_clk, "pll-periph0-800M",
+ pll_periph0_4x_hws, 0x020, 20, 3, 0);
+static SUNXI_CCU_M_HWS(pll_periph0_480M_clk, "pll-periph0-480M",
+ pll_periph0_4x_hws, 0x020, 2, 3, 0);
+static const struct clk_hw *pll_periph0_480M_hws[] = {
+ &pll_periph0_480M_clk.common.hw
+};
+static CLK_FIXED_FACTOR_HWS(pll_periph0_600M_clk, "pll-periph0-600M",
+ pll_periph0_2x_hws, 2, 1, 0);
+static CLK_FIXED_FACTOR_HWS(pll_periph0_400M_clk, "pll-periph0-400M",
+ pll_periph0_2x_hws, 3, 1, 0);
+static CLK_FIXED_FACTOR_HWS(pll_periph0_300M_clk, "pll-periph0-300M",
+ pll_periph0_2x_hws, 4, 1, 0);
+static CLK_FIXED_FACTOR_HWS(pll_periph0_200M_clk, "pll-periph0-200M",
+ pll_periph0_2x_hws, 6, 1, 0);
+static CLK_FIXED_FACTOR_HWS(pll_periph0_150M_clk, "pll-periph0-150M",
+ pll_periph0_2x_hws, 8, 1, 0);
+static CLK_FIXED_FACTOR_HWS(pll_periph0_160M_clk, "pll-periph0-160M",
+ pll_periph0_480M_hws, 3, 1, 0);
+static const struct clk_hw *pll_periph0_150M_hws[] = {
+ &pll_periph0_150M_clk.hw
+};
+
+#define SUN55I_A523_PLL_PERIPH1_REG 0x028
+static struct ccu_nm pll_periph1_4x_clk = {
+ .enable = BIT(27),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 11),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .common = {
+ .reg = 0x028,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("pll-periph1-4x",
+ osc24M, &ccu_nm_ops,
+ CLK_SET_RATE_GATE),
+ },
+};
+
+static const struct clk_hw *pll_periph1_4x_hws[] = {
+ &pll_periph1_4x_clk.common.hw
+};
+static SUNXI_CCU_M_HWS(pll_periph1_2x_clk, "pll-periph1-2x",
+ pll_periph1_4x_hws, 0x028, 16, 3, 0);
+static SUNXI_CCU_M_HWS(pll_periph1_800M_clk, "pll-periph1-800M",
+ pll_periph1_4x_hws, 0x028, 20, 3, 0);
+static SUNXI_CCU_M_HWS(pll_periph1_480M_clk, "pll-periph1-480M",
+ pll_periph1_4x_hws, 0x028, 2, 3, 0);
+
+static const struct clk_hw *pll_periph1_2x_hws[] = {
+ &pll_periph1_2x_clk.common.hw
+};
+static CLK_FIXED_FACTOR_HWS(pll_periph1_600M_clk, "pll-periph1-600M",
+ pll_periph1_2x_hws, 2, 1, 0);
+static CLK_FIXED_FACTOR_HWS(pll_periph1_400M_clk, "pll-periph1-400M",
+ pll_periph1_2x_hws, 3, 1, 0);
+static CLK_FIXED_FACTOR_HWS(pll_periph1_300M_clk, "pll-periph1-300M",
+ pll_periph1_2x_hws, 4, 1, 0);
+static CLK_FIXED_FACTOR_HWS(pll_periph1_200M_clk, "pll-periph1-200M",
+ pll_periph1_2x_hws, 6, 1, 0);
+static CLK_FIXED_FACTOR_HWS(pll_periph1_150M_clk, "pll-periph1-150M",
+ pll_periph1_2x_hws, 8, 1, 0);
+static const struct clk_hw *pll_periph1_480M_hws[] = {
+ &pll_periph1_480M_clk.common.hw
+};
+static CLK_FIXED_FACTOR_HWS(pll_periph1_160M_clk, "pll-periph1-160M",
+ pll_periph1_480M_hws, 3, 1, 0);
+
+#define SUN55I_A523_PLL_GPU_REG 0x030
+static struct ccu_nkmp pll_gpu_clk = {
+ .enable = BIT(27),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 11),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .p = _SUNXI_CCU_DIV(0, 1), /* output divider */
+ .common = {
+ .reg = 0x030,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("pll-gpu", osc24M,
+ &ccu_nkmp_ops,
+ CLK_SET_RATE_GATE),
+ },
+};
+
+#define SUN55I_A523_PLL_VIDEO0_REG 0x040
+static struct ccu_nm pll_video0_8x_clk = {
+ .enable = BIT(27),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 11),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .common = {
+ .reg = 0x040,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("pll-video0-8x",
+ osc24M, &ccu_nm_ops,
+ CLK_SET_RATE_GATE),
+ },
+};
+
+static const struct clk_hw *pll_video0_8x_hws[] = {
+ &pll_video0_8x_clk.common.hw
+};
+static SUNXI_CCU_M_HWS(pll_video0_4x_clk, "pll-video0-4x",
+ pll_video0_8x_hws, 0x040, 0, 1, 0);
+static CLK_FIXED_FACTOR_HWS(pll_video0_3x_clk, "pll-video0-3x",
+ pll_video0_8x_hws, 3, 1, CLK_SET_RATE_PARENT);
+
+#define SUN55I_A523_PLL_VIDEO1_REG 0x048
+static struct ccu_nm pll_video1_8x_clk = {
+ .enable = BIT(27),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 11),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .common = {
+ .reg = 0x048,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("pll-video1-8x",
+ osc24M, &ccu_nm_ops,
+ CLK_SET_RATE_GATE),
+ },
+};
+
+static const struct clk_hw *pll_video1_8x_hws[] = {
+ &pll_video1_8x_clk.common.hw
+};
+static SUNXI_CCU_M_HWS(pll_video1_4x_clk, "pll-video1-4x",
+ pll_video1_8x_hws, 0x048, 0, 1, 0);
+static CLK_FIXED_FACTOR_HWS(pll_video1_3x_clk, "pll-video1-3x",
+ pll_video1_8x_hws, 3, 1, CLK_SET_RATE_PARENT);
+
+#define SUN55I_A523_PLL_VIDEO2_REG 0x050
+static struct ccu_nm pll_video2_8x_clk = {
+ .enable = BIT(27),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 11),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .common = {
+ .reg = 0x050,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("pll-video2-8x",
+ osc24M, &ccu_nm_ops,
+ CLK_SET_RATE_GATE),
+ },
+};
+
+static const struct clk_hw *pll_video2_8x_hws[] = {
+ &pll_video2_8x_clk.common.hw
+};
+static SUNXI_CCU_M_HWS(pll_video2_4x_clk, "pll-video2-4x",
+ pll_video2_8x_hws, 0x050, 0, 1, 0);
+static CLK_FIXED_FACTOR_HWS(pll_video2_3x_clk, "pll-video2-3x",
+ pll_video2_8x_hws, 3, 1, CLK_SET_RATE_PARENT);
+
+#define SUN55I_A523_PLL_VE_REG 0x058
+static struct ccu_nkmp pll_ve_clk = {
+ .enable = BIT(27),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 11),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .p = _SUNXI_CCU_DIV(0, 1), /* output divider */
+ .common = {
+ .reg = 0x058,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("pll-ve", osc24M,
+ &ccu_nkmp_ops,
+ CLK_SET_RATE_GATE),
+ },
+};
+
+#define SUN55I_A523_PLL_VIDEO3_REG 0x068
+static struct ccu_nm pll_video3_8x_clk = {
+ .enable = BIT(27),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 11),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .common = {
+ .reg = 0x068,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("pll-video3-8x",
+ osc24M, &ccu_nm_ops,
+ CLK_SET_RATE_GATE),
+ },
+};
+
+static const struct clk_hw *pll_video3_8x_hws[] = {
+ &pll_video3_8x_clk.common.hw
+};
+static SUNXI_CCU_M_HWS(pll_video3_4x_clk, "pll-video3-4x",
+ pll_video3_8x_hws, 0x068, 0, 1, 0);
+static CLK_FIXED_FACTOR_HWS(pll_video3_3x_clk, "pll-video3-3x",
+ pll_video3_8x_hws, 3, 1, CLK_SET_RATE_PARENT);
+
+/*
+ * PLL_AUDIO0 has m0, m1 dividers in addition to the usual N, M factors.
+ * Since we only need some fixed frequency from this PLL (22.5792MHz x 4 and
+ * 24.576MHz x 4), ignore those dividers and force both of them to 1 (encoded
+ * as 0), in the probe function below.
+ * The M factor must be an even number to produce a 50% duty cycle output.
+ */
+#define SUN55I_A523_PLL_AUDIO0_REG 0x078
+static struct ccu_sdm_setting pll_audio0_sdm_table[] = {
+ { .rate = 90316800, .pattern = 0xc000872b, .m = 20, .n = 75 },
+ { .rate = 98304000, .pattern = 0xc0004dd3, .m = 12, .n = 49 },
+
+};
+
+static struct ccu_nm pll_audio0_4x_clk = {
+ .enable = BIT(27),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 11),
+ .m = _SUNXI_CCU_DIV(16, 6),
+ .sdm = _SUNXI_CCU_SDM(pll_audio0_sdm_table, BIT(24),
+ 0x178, BIT(31)),
+ .min_rate = 180000000U,
+ .max_rate = 3000000000U,
+ .common = {
+ .reg = 0x078,
+ .features = CCU_FEATURE_SIGMA_DELTA_MOD,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("pll-audio0-4x",
+ osc24M, &ccu_nm_ops,
+ CLK_SET_RATE_GATE),
+ },
+};
+
+static CLK_FIXED_FACTOR_HW(pll_audio0_2x_clk, "pll-audio0-2x",
+ &pll_audio0_4x_clk.common.hw, 2, 1, 0);
+static CLK_FIXED_FACTOR_HW(pll_audio0_clk, "pll-audio0",
+ &pll_audio0_4x_clk.common.hw, 4, 1, 0);
+
+#define SUN55I_A523_PLL_NPU_REG 0x080
+static struct ccu_nm pll_npu_4x_clk = {
+ .enable = BIT(27),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 11),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .common = {
+ .reg = 0x0080,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("pll-npu-4x",
+ osc24M, &ccu_nm_ops,
+ CLK_SET_RATE_GATE),
+ },
+};
+static CLK_FIXED_FACTOR_HW(pll_npu_2x_clk, "pll-npu-2x",
+ &pll_npu_4x_clk.common.hw, 2, 1, CLK_SET_RATE_PARENT);
+
+static CLK_FIXED_FACTOR_HW(pll_npu_1x_clk, "pll-npu-1x",
+ &pll_npu_4x_clk.common.hw, 4, 1, 0);
+
+
+/**************************************************************************
+ * bus clocks *
+ **************************************************************************/
+
+static const struct clk_parent_data ahb_apb0_parents[] = {
+ { .fw_name = "hosc" },
+ { .fw_name = "losc" },
+ { .fw_name = "iosc" },
+ { .hw = &pll_periph0_600M_clk.hw },
+};
+
+static SUNXI_CCU_M_DATA_WITH_MUX(ahb_clk, "ahb", ahb_apb0_parents, 0x510,
+ 0, 5, /* M */
+ 24, 2, /* mux */
+ 0);
+static const struct clk_hw *ahb_hws[] = { &ahb_clk.common.hw };
+
+static SUNXI_CCU_M_DATA_WITH_MUX(apb0_clk, "apb0", ahb_apb0_parents, 0x520,
+ 0, 5, /* M */
+ 24, 2, /* mux */
+ 0);
+static const struct clk_hw *apb0_hws[] = { &apb0_clk.common.hw };
+
+static const struct clk_parent_data apb1_parents[] = {
+ { .fw_name = "hosc" },
+ { .fw_name = "losc" },
+ { .fw_name = "iosc" },
+ { .hw = &pll_periph0_600M_clk.hw },
+ { .hw = &pll_periph0_480M_clk.common.hw },
+};
+static SUNXI_CCU_M_DATA_WITH_MUX(apb1_clk, "apb1", apb1_parents, 0x524,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ 0);
+static const struct clk_hw *apb1_hws[] = { &apb1_clk.common.hw };
+
+static const struct clk_parent_data mbus_parents[] = {
+ { .hw = &pll_ddr_clk.common.hw },
+ { .hw = &pll_periph1_600M_clk.hw },
+ { .hw = &pll_periph1_480M_clk.common.hw },
+ { .hw = &pll_periph1_400M_clk.hw },
+ { .hw = &pll_periph1_150M_clk.hw },
+ { .fw_name = "hosc" },
+};
+static SUNXI_CCU_MP_DATA_WITH_MUX_GATE_FEAT(mbus_clk, "mbus", mbus_parents,
+ 0x540,
+ 0, 5, /* M */
+ 0, 0, /* no P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_IS_CRITICAL,
+ CCU_FEATURE_UPDATE_BIT);
+
+static const struct clk_hw *mbus_hws[] = { &mbus_clk.common.hw };
+
+/**************************************************************************
+ * mod clocks with gates *
+ **************************************************************************/
+
+static const struct clk_hw *de_parents[] = {
+ &pll_periph0_300M_clk.hw,
+ &pll_periph0_400M_clk.hw,
+ &pll_video3_4x_clk.common.hw,
+ &pll_video3_3x_clk.hw,
+};
+
+static SUNXI_CCU_M_HW_WITH_MUX_GATE(de_clk, "de", de_parents, 0x600,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE_HWS(bus_de_clk, "bus-de", ahb_hws, 0x60c, BIT(0), 0);
+
+static const struct clk_hw *di_parents[] = {
+ &pll_periph0_300M_clk.hw,
+ &pll_periph0_400M_clk.hw,
+ &pll_video0_4x_clk.common.hw,
+ &pll_video1_4x_clk.common.hw,
+};
+
+static SUNXI_CCU_M_HW_WITH_MUX_GATE(di_clk, "di", di_parents, 0x620,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE_HWS(bus_di_clk, "bus-di", ahb_hws, 0x62c, BIT(0), 0);
+
+static const struct clk_hw *g2d_parents[] = {
+ &pll_periph0_400M_clk.hw,
+ &pll_periph0_300M_clk.hw,
+ &pll_video0_4x_clk.common.hw,
+ &pll_video1_4x_clk.common.hw,
+};
+
+static SUNXI_CCU_M_HW_WITH_MUX_GATE(g2d_clk, "g2d", g2d_parents, 0x630,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE_HWS(bus_g2d_clk, "bus-g2d", ahb_hws, 0x63c, BIT(0), 0);
+
+static const struct clk_hw *gpu_parents[] = {
+ &pll_gpu_clk.common.hw,
+ &pll_periph0_800M_clk.common.hw,
+ &pll_periph0_600M_clk.hw,
+ &pll_periph0_400M_clk.hw,
+ &pll_periph0_300M_clk.hw,
+ &pll_periph0_200M_clk.hw,
+};
+
+static SUNXI_CCU_M_HW_WITH_MUX_GATE(gpu_clk, "gpu", gpu_parents, 0x670,
+ 0, 4, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE_HWS(bus_gpu_clk, "bus-gpu", ahb_hws, 0x67c, BIT(0), 0);
+
+static const struct clk_parent_data ce_parents[] = {
+ { .fw_name = "hosc" },
+ { .hw = &pll_periph0_480M_clk.common.hw },
+ { .hw = &pll_periph0_400M_clk.hw },
+ { .hw = &pll_periph0_300M_clk.hw },
+};
+static SUNXI_CCU_M_DATA_WITH_MUX_GATE(ce_clk, "ce", ce_parents, 0x680,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE_HWS(bus_ce_clk, "bus-ce", ahb_hws, 0x68c, BIT(0), 0);
+static SUNXI_CCU_GATE_HWS(bus_ce_sys_clk, "bus-ce-sys", ahb_hws, 0x68c,
+ BIT(1), 0);
+
+static const struct clk_hw *ve_parents[] = {
+ &pll_ve_clk.common.hw,
+ &pll_periph0_480M_clk.common.hw,
+ &pll_periph0_400M_clk.hw,
+ &pll_periph0_300M_clk.hw,
+};
+static SUNXI_CCU_M_HW_WITH_MUX_GATE(ve_clk, "ve", ve_parents, 0x690,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE_HWS(bus_ve_clk, "bus-ve", ahb_hws, 0x69c, BIT(0), 0);
+
+static const struct clk_hw *npu_parents[] = {
+ &pll_periph0_480M_clk.common.hw,
+ &pll_periph0_600M_clk.hw,
+ &pll_periph0_800M_clk.common.hw,
+ &pll_npu_2x_clk.hw,
+};
+static SUNXI_CCU_M_HW_WITH_MUX_GATE(npu_clk, "npu", npu_parents, 0x6e0,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE_HWS(bus_dma_clk, "bus-dma", ahb_hws, 0x70c, BIT(0), 0);
+
+static SUNXI_CCU_GATE_HWS(bus_msgbox_clk, "bus-msgbox", ahb_hws, 0x71c,
+ BIT(0), 0);
+
+static SUNXI_CCU_GATE_HWS(bus_spinlock_clk, "bus-spinlock", ahb_hws, 0x72c,
+ BIT(0), 0);
+
+static const struct clk_parent_data hstimer_parents[] = {
+ { .fw_name = "hosc" },
+ { .fw_name = "iosc" },
+ { .fw_name = "losc" },
+ { .hw = &pll_periph0_200M_clk.hw },
+};
+static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(hstimer0_clk, "hstimer0",
+ hstimer_parents, 0x730,
+ 0, 0, /* M */
+ 0, 3, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(hstimer1_clk, "hstimer1",
+ hstimer_parents,
+ 0x734,
+ 0, 0, /* M */
+ 0, 3, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(hstimer2_clk, "hstimer2",
+ hstimer_parents,
+ 0x738,
+ 0, 0, /* M */
+ 0, 3, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(hstimer3_clk, "hstimer3",
+ hstimer_parents,
+ 0x73c,
+ 0, 0, /* M */
+ 0, 3, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(hstimer4_clk, "hstimer4",
+ hstimer_parents,
+ 0x740,
+ 0, 0, /* M */
+ 0, 3, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(hstimer5_clk, "hstimer5",
+ hstimer_parents,
+ 0x744,
+ 0, 0, /* M */
+ 0, 3, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE_HWS(bus_hstimer_clk, "bus-hstimer", ahb_hws, 0x74c,
+ BIT(0), 0);
+
+static SUNXI_CCU_GATE_HWS(bus_dbg_clk, "bus-dbg", ahb_hws, 0x78c,
+ BIT(0), 0);
+
+static SUNXI_CCU_GATE_HWS(bus_pwm0_clk, "bus-pwm0", apb1_hws, 0x7ac, BIT(0), 0);
+static SUNXI_CCU_GATE_HWS(bus_pwm1_clk, "bus-pwm1", apb1_hws, 0x7ac, BIT(1), 0);
+
+static const struct clk_parent_data iommu_parents[] = {
+ { .hw = &pll_periph0_600M_clk.hw },
+ { .hw = &pll_ddr_clk.common.hw },
+ { .hw = &pll_periph0_480M_clk.common.hw },
+ { .hw = &pll_periph0_400M_clk.hw },
+ { .hw = &pll_periph0_150M_clk.hw },
+ { .fw_name = "hosc" },
+};
+
+static SUNXI_CCU_MP_DATA_WITH_MUX_GATE_FEAT(iommu_clk, "iommu", iommu_parents,
+ 0x7b0,
+ 0, 5, /* M */
+ 0, 0, /* no P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT,
+ CCU_FEATURE_UPDATE_BIT);
+
+static SUNXI_CCU_GATE_HWS(bus_iommu_clk, "bus-iommu", apb0_hws, 0x7bc,
+ BIT(0), 0);
+
+static const struct clk_parent_data dram_parents[] = {
+ { .hw = &pll_ddr_clk.common.hw },
+ { .hw = &pll_periph0_600M_clk.hw },
+ { .hw = &pll_periph0_480M_clk.common.hw },
+ { .hw = &pll_periph0_400M_clk.hw },
+ { .hw = &pll_periph0_150M_clk.hw },
+};
+static SUNXI_CCU_MP_DATA_WITH_MUX_GATE_FEAT(dram_clk, "dram", dram_parents,
+ 0x800,
+ 0, 5, /* M */
+ 0, 0, /* no P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_IS_CRITICAL,
+ CCU_FEATURE_UPDATE_BIT);
+
+static SUNXI_CCU_GATE_HWS(mbus_dma_clk, "mbus-dma", mbus_hws,
+ 0x804, BIT(0), 0);
+static SUNXI_CCU_GATE_HWS(mbus_ve_clk, "mbus-ve", mbus_hws,
+ 0x804, BIT(1), 0);
+static SUNXI_CCU_GATE_HWS(mbus_ce_clk, "mbus-ce", mbus_hws,
+ 0x804, BIT(2), 0);
+static SUNXI_CCU_GATE_HWS(mbus_nand_clk, "mbus-nand", mbus_hws,
+ 0x804, BIT(5), 0);
+static SUNXI_CCU_GATE_HWS(mbus_usb3_clk, "mbus-usb3", mbus_hws,
+ 0x804, BIT(6), 0);
+static SUNXI_CCU_GATE_HWS(mbus_csi_clk, "mbus-csi", mbus_hws,
+ 0x804, BIT(8), 0);
+static SUNXI_CCU_GATE_HWS(mbus_isp_clk, "mbus-isp", mbus_hws,
+ 0x804, BIT(9), 0);
+static SUNXI_CCU_GATE_HWS(mbus_gmac1_clk, "mbus-gmac1", mbus_hws,
+ 0x804, BIT(12), 0);
+
+static SUNXI_CCU_GATE_HWS(bus_dram_clk, "bus-dram", ahb_hws, 0x80c,
+ BIT(0), CLK_IS_CRITICAL);
+
+static const struct clk_parent_data nand_mmc_parents[] = {
+ { .fw_name = "hosc" },
+ { .hw = &pll_periph0_400M_clk.hw },
+ { .hw = &pll_periph0_300M_clk.hw },
+ { .hw = &pll_periph1_400M_clk.hw },
+ { .hw = &pll_periph1_300M_clk.hw },
+};
+
+static SUNXI_CCU_M_DATA_WITH_MUX_GATE(nand0_clk, "nand0", nand_mmc_parents,
+ 0x810,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_M_DATA_WITH_MUX_GATE(nand1_clk, "nand1", nand_mmc_parents,
+ 0x814,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE_HWS(bus_nand_clk, "bus-nand", ahb_hws, 0x82c,
+ BIT(0), 0);
+
+static SUNXI_CCU_MP_MUX_GATE_POSTDIV_DUALDIV(mmc0_clk, "mmc0", nand_mmc_parents,
+ 0x830,
+ 0, 5, /* M */
+ 8, 5, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 2, /* post div */
+ 0);
+
+static SUNXI_CCU_MP_MUX_GATE_POSTDIV_DUALDIV(mmc1_clk, "mmc1", nand_mmc_parents,
+ 0x834,
+ 0, 5, /* M */
+ 8, 5, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 2, /* post div */
+ 0);
+
+static const struct clk_parent_data mmc2_parents[] = {
+ { .fw_name = "hosc" },
+ { .hw = &pll_periph0_800M_clk.common.hw },
+ { .hw = &pll_periph0_600M_clk.hw },
+ { .hw = &pll_periph1_800M_clk.common.hw },
+ { .hw = &pll_periph1_600M_clk.hw },
+};
+
+static SUNXI_CCU_MP_MUX_GATE_POSTDIV_DUALDIV(mmc2_clk, "mmc2", mmc2_parents,
+ 0x838,
+ 0, 5, /* M */
+ 8, 5, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 2, /* post div */
+ 0);
+
+static SUNXI_CCU_GATE_HWS(bus_mmc0_clk, "bus-mmc0", ahb_hws, 0x84c, BIT(0), 0);
+static SUNXI_CCU_GATE_HWS(bus_mmc1_clk, "bus-mmc1", ahb_hws, 0x84c, BIT(1), 0);
+static SUNXI_CCU_GATE_HWS(bus_mmc2_clk, "bus-mmc2", ahb_hws, 0x84c, BIT(2), 0);
+
+static SUNXI_CCU_GATE_HWS(bus_sysdap_clk, "bus-sysdap", apb1_hws, 0x88c,
+ BIT(0), 0);
+
+static SUNXI_CCU_GATE_HWS(bus_uart0_clk, "bus-uart0", apb1_hws, 0x90c,
+ BIT(0), 0);
+static SUNXI_CCU_GATE_HWS(bus_uart1_clk, "bus-uart1", apb1_hws, 0x90c,
+ BIT(1), 0);
+static SUNXI_CCU_GATE_HWS(bus_uart2_clk, "bus-uart2", apb1_hws, 0x90c,
+ BIT(2), 0);
+static SUNXI_CCU_GATE_HWS(bus_uart3_clk, "bus-uart3", apb1_hws, 0x90c,
+ BIT(3), 0);
+static SUNXI_CCU_GATE_HWS(bus_uart4_clk, "bus-uart4", apb1_hws, 0x90c,
+ BIT(4), 0);
+static SUNXI_CCU_GATE_HWS(bus_uart5_clk, "bus-uart5", apb1_hws, 0x90c,
+ BIT(5), 0);
+static SUNXI_CCU_GATE_HWS(bus_uart6_clk, "bus-uart6", apb1_hws, 0x90c,
+ BIT(6), 0);
+static SUNXI_CCU_GATE_HWS(bus_uart7_clk, "bus-uart7", apb1_hws, 0x90c,
+ BIT(7), 0);
+
+static SUNXI_CCU_GATE_HWS(bus_i2c0_clk, "bus-i2c0", apb1_hws, 0x91c, BIT(0), 0);
+static SUNXI_CCU_GATE_HWS(bus_i2c1_clk, "bus-i2c1", apb1_hws, 0x91c, BIT(1), 0);
+static SUNXI_CCU_GATE_HWS(bus_i2c2_clk, "bus-i2c2", apb1_hws, 0x91c, BIT(2), 0);
+static SUNXI_CCU_GATE_HWS(bus_i2c3_clk, "bus-i2c3", apb1_hws, 0x91c, BIT(3), 0);
+static SUNXI_CCU_GATE_HWS(bus_i2c4_clk, "bus-i2c4", apb1_hws, 0x91c, BIT(4), 0);
+static SUNXI_CCU_GATE_HWS(bus_i2c5_clk, "bus-i2c5", apb1_hws, 0x91c, BIT(5), 0);
+
+static SUNXI_CCU_GATE_HWS(bus_can_clk, "bus-can", apb1_hws, 0x92c, BIT(0), 0);
+
+static const struct clk_parent_data spi_parents[] = {
+ { .fw_name = "hosc" },
+ { .hw = &pll_periph0_300M_clk.hw },
+ { .hw = &pll_periph0_200M_clk.hw },
+ { .hw = &pll_periph1_300M_clk.hw },
+ { .hw = &pll_periph1_200M_clk.hw },
+};
+static SUNXI_CCU_DUALDIV_MUX_GATE(spi0_clk, "spi0", spi_parents, 0x940,
+ 0, 5, /* M */
+ 8, 5, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+static SUNXI_CCU_DUALDIV_MUX_GATE(spi1_clk, "spi1", spi_parents, 0x944,
+ 0, 5, /* M */
+ 8, 5, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+static SUNXI_CCU_DUALDIV_MUX_GATE(spi2_clk, "spi2", spi_parents, 0x948,
+ 0, 5, /* M */
+ 8, 5, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+static SUNXI_CCU_DUALDIV_MUX_GATE(spifc_clk, "spifc", nand_mmc_parents, 0x950,
+ 0, 5, /* M */
+ 8, 5, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+static SUNXI_CCU_GATE_HWS(bus_spi0_clk, "bus-spi0", ahb_hws, 0x96c, BIT(0), 0);
+static SUNXI_CCU_GATE_HWS(bus_spi1_clk, "bus-spi1", ahb_hws, 0x96c, BIT(1), 0);
+static SUNXI_CCU_GATE_HWS(bus_spi2_clk, "bus-spi2", ahb_hws, 0x96c, BIT(2), 0);
+static SUNXI_CCU_GATE_HWS(bus_spifc_clk, "bus-spifc", ahb_hws, 0x96c,
+ BIT(3), 0);
+
+static SUNXI_CCU_GATE_HWS_WITH_PREDIV(emac0_25M_clk, "emac0-25M",
+ pll_periph0_150M_hws,
+ 0x970, BIT(31) | BIT(30), 6, 0);
+static SUNXI_CCU_GATE_HWS_WITH_PREDIV(emac1_25M_clk, "emac1-25M",
+ pll_periph0_150M_hws,
+ 0x974, BIT(31) | BIT(30), 6, 0);
+static SUNXI_CCU_GATE_HWS(bus_emac0_clk, "bus-emac0", ahb_hws, 0x97c,
+ BIT(0), 0);
+static SUNXI_CCU_GATE_HWS(bus_emac1_clk, "bus-emac1", ahb_hws, 0x98c,
+ BIT(0), 0);
+
+static const struct clk_parent_data ir_rx_parents[] = {
+ { .fw_name = "losc" },
+ { .fw_name = "hosc" },
+};
+
+static SUNXI_CCU_M_DATA_WITH_MUX_GATE(ir_rx_clk, "ir-rx", ir_rx_parents, 0x990,
+ 0, 5, /* M */
+ 24, 1, /* mux */
+ BIT(31), /* gate */
+ 0);
+static SUNXI_CCU_GATE_HWS(bus_ir_rx_clk, "bus-ir-rx", apb0_hws, 0x99c,
+ BIT(0), 0);
+
+static const struct clk_parent_data ir_tx_ledc_parents[] = {
+ { .fw_name = "hosc" },
+ { .hw = &pll_periph1_600M_clk.hw },
+};
+static SUNXI_CCU_M_DATA_WITH_MUX_GATE(ir_tx_clk, "ir-tx", ir_tx_ledc_parents,
+ 0x9c0,
+ 0, 5, /* M */
+ 24, 1, /* mux */
+ BIT(31), /* gate */
+ 0);
+static SUNXI_CCU_GATE_HWS(bus_ir_tx_clk, "bus-ir-tx", apb0_hws, 0x9cc,
+ BIT(0), 0);
+
+static SUNXI_CCU_M_WITH_GATE(gpadc0_clk, "gpadc0", "hosc", 0x9e0,
+ 0, 5, /* M */
+ BIT(31), /* gate */
+ 0);
+static SUNXI_CCU_M_WITH_GATE(gpadc1_clk, "gpadc1", "hosc", 0x9e4,
+ 0, 5, /* M */
+ BIT(31), /* gate */
+ 0);
+static SUNXI_CCU_GATE_HWS(bus_gpadc0_clk, "bus-gpadc0", ahb_hws, 0x9ec,
+ BIT(0), 0);
+static SUNXI_CCU_GATE_HWS(bus_gpadc1_clk, "bus-gpadc1", ahb_hws, 0x9ec,
+ BIT(1), 0);
+
+static SUNXI_CCU_GATE_HWS(bus_ths_clk, "bus-ths", apb0_hws, 0x9fc, BIT(0), 0);
+
+/*
+ * The first parent is a 48 MHz input clock divided by 4. That 48 MHz clock is
+ * a 2x multiplier from osc24M synchronized by pll-periph0, and is also used by
+ * the OHCI module.
+ */
+static const struct clk_parent_data usb_ohci_parents[] = {
+ { .hw = &pll_periph0_4x_clk.common.hw },
+ { .fw_name = "hosc" },
+ { .fw_name = "losc" },
+ { .fw_name = "iosc" },
+};
+static const struct ccu_mux_fixed_prediv usb_ohci_predivs[] = {
+ { .index = 0, .div = 50 },
+ { .index = 1, .div = 2 },
+};
+
+static struct ccu_mux usb_ohci0_clk = {
+ .enable = BIT(31),
+ .mux = {
+ .shift = 24,
+ .width = 2,
+ .fixed_predivs = usb_ohci_predivs,
+ .n_predivs = ARRAY_SIZE(usb_ohci_predivs),
+ },
+ .common = {
+ .reg = 0xa70,
+ .features = CCU_FEATURE_FIXED_PREDIV,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("usb-ohci0",
+ usb_ohci_parents,
+ &ccu_mux_ops,
+ 0),
+ },
+};
+
+static struct ccu_mux usb_ohci1_clk = {
+ .enable = BIT(31),
+ .mux = {
+ .shift = 24,
+ .width = 2,
+ .fixed_predivs = usb_ohci_predivs,
+ .n_predivs = ARRAY_SIZE(usb_ohci_predivs),
+ },
+ .common = {
+ .reg = 0xa74,
+ .features = CCU_FEATURE_FIXED_PREDIV,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("usb-ohci1",
+ usb_ohci_parents,
+ &ccu_mux_ops,
+ 0),
+ },
+};
+
+static SUNXI_CCU_GATE_HWS(bus_ohci0_clk, "bus-ohci0", ahb_hws, 0xa8c,
+ BIT(0), 0);
+static SUNXI_CCU_GATE_HWS(bus_ohci1_clk, "bus-ohci1", ahb_hws, 0xa8c,
+ BIT(1), 0);
+static SUNXI_CCU_GATE_HWS(bus_ehci0_clk, "bus-ehci0", ahb_hws, 0xa8c,
+ BIT(4), 0);
+static SUNXI_CCU_GATE_HWS(bus_ehci1_clk, "bus-ehci1", ahb_hws, 0xa8c,
+ BIT(5), 0);
+static SUNXI_CCU_GATE_HWS(bus_otg_clk, "bus-otg", ahb_hws, 0xa8c, BIT(8), 0);
+
+static SUNXI_CCU_GATE_HWS(bus_lradc_clk, "bus-lradc", apb0_hws, 0xa9c,
+ BIT(0), 0);
+
+static const struct clk_parent_data losc_hosc_parents[] = {
+ { .fw_name = "hosc" },
+ { .fw_name = "losc" },
+};
+
+static SUNXI_CCU_M_DATA_WITH_MUX_GATE(pcie_aux_clk, "pcie-aux",
+ losc_hosc_parents, 0xaa0,
+ 0, 5, /* M */
+ 24, 1, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE_HWS(bus_display0_top_clk, "bus-display0-top", ahb_hws,
+ 0xabc, BIT(0), 0);
+static SUNXI_CCU_GATE_HWS(bus_display1_top_clk, "bus-display1-top", ahb_hws,
+ 0xacc, BIT(0), 0);
+
+static SUNXI_CCU_GATE_DATA(hdmi_24M_clk, "hdmi-24M", osc24M, 0xb04, BIT(31), 0);
+
+static SUNXI_CCU_GATE_HWS_WITH_PREDIV(hdmi_cec_32k_clk, "hdmi-cec-32k",
+ pll_periph0_2x_hws,
+ 0xb10, BIT(30), 36621, 0);
+
+static const struct clk_parent_data hdmi_cec_parents[] = {
+ { .fw_name = "losc" },
+ { .hw = &hdmi_cec_32k_clk.common.hw },
+};
+static SUNXI_CCU_MUX_DATA_WITH_GATE(hdmi_cec_clk, "hdmi-cec", hdmi_cec_parents,
+ 0xb10,
+ 24, 1, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE_HWS(bus_hdmi_clk, "bus-hdmi", ahb_hws, 0xb1c, BIT(0), 0);
+
+static const struct clk_parent_data mipi_dsi_parents[] = {
+ { .fw_name = "hosc" },
+ { .hw = &pll_periph0_200M_clk.hw },
+ { .hw = &pll_periph0_150M_clk.hw },
+};
+static SUNXI_CCU_M_DATA_WITH_MUX_GATE(mipi_dsi0_clk, "mipi-dsi0",
+ mipi_dsi_parents, 0xb24,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_M_DATA_WITH_MUX_GATE(mipi_dsi1_clk, "mipi-dsi1",
+ mipi_dsi_parents, 0xb28,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE_HWS(bus_mipi_dsi0_clk, "bus-mipi-dsi0", ahb_hws, 0xb4c,
+ BIT(0), 0);
+
+static SUNXI_CCU_GATE_HWS(bus_mipi_dsi1_clk, "bus-mipi-dsi1", ahb_hws, 0xb4c,
+ BIT(1), 0);
+
+static const struct clk_hw *tcon_parents[] = {
+ &pll_video0_4x_clk.common.hw,
+ &pll_video1_4x_clk.common.hw,
+ &pll_video2_4x_clk.common.hw,
+ &pll_video3_4x_clk.common.hw,
+ &pll_periph0_2x_clk.common.hw,
+ &pll_video0_3x_clk.hw,
+ &pll_video1_3x_clk.hw,
+};
+static SUNXI_CCU_M_HW_WITH_MUX_GATE(tcon_lcd0_clk, "tcon-lcd0", tcon_parents,
+ 0xb60,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_M_HW_WITH_MUX_GATE(tcon_lcd1_clk, "tcon-lcd1", tcon_parents,
+ 0xb64,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static const struct clk_hw *tcon_tv_parents[] = {
+ &pll_video0_4x_clk.common.hw,
+ &pll_video1_4x_clk.common.hw,
+ &pll_video2_4x_clk.common.hw,
+ &pll_video3_4x_clk.common.hw,
+ &pll_periph0_2x_clk.common.hw,
+};
+static SUNXI_CCU_M_HW_WITH_MUX_GATE(tcon_lcd2_clk, "tcon-lcd2",
+ tcon_tv_parents, 0xb68,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_M_HW_WITH_MUX_GATE(combophy_dsi0_clk, "combophy-dsi0",
+ tcon_parents, 0xb6c,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_M_HW_WITH_MUX_GATE(combophy_dsi1_clk, "combophy-dsi1",
+ tcon_parents, 0xb70,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE_HWS(bus_tcon_lcd0_clk, "bus-tcon-lcd0", ahb_hws, 0xb7c,
+ BIT(0), 0);
+static SUNXI_CCU_GATE_HWS(bus_tcon_lcd1_clk, "bus-tcon-lcd1", ahb_hws, 0xb7c,
+ BIT(1), 0);
+static SUNXI_CCU_GATE_HWS(bus_tcon_lcd2_clk, "bus-tcon-lcd2", ahb_hws, 0xb7c,
+ BIT(2), 0);
+
+static SUNXI_CCU_M_HW_WITH_MUX_GATE(tcon_tv0_clk, "tcon-tv0", tcon_tv_parents,
+ 0xb80,
+ 0, 4, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_M_HW_WITH_MUX_GATE(tcon_tv1_clk, "tcon-tv1", tcon_tv_parents,
+ 0xb84,
+ 0, 4, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE_HWS(bus_tcon_tv0_clk, "bus-tcon-tv0", ahb_hws, 0xb9c,
+ BIT(0), 0);
+static SUNXI_CCU_GATE_HWS(bus_tcon_tv1_clk, "bus-tcon-tv1", ahb_hws, 0xb9c,
+ BIT(1), 0);
+
+static const struct clk_hw *edp_parents[] = {
+ &pll_video0_4x_clk.common.hw,
+ &pll_video1_4x_clk.common.hw,
+ &pll_video2_4x_clk.common.hw,
+ &pll_video3_4x_clk.common.hw,
+ &pll_periph0_2x_clk.common.hw,
+};
+static SUNXI_CCU_M_HW_WITH_MUX_GATE(edp_clk, "edp", edp_parents, 0xbb0,
+ 0, 4, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE_HWS(bus_edp_clk, "bus-edp", ahb_hws, 0xbbc, BIT(0), 0);
+
+static SUNXI_CCU_M_DATA_WITH_MUX_GATE(ledc_clk, "ledc", ir_tx_ledc_parents,
+ 0xbf0,
+ 0, 4, /* M */
+ 24, 1, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE_HWS(bus_ledc_clk, "bus-ledc", apb0_hws, 0xbfc, BIT(0), 0);
+
+static const struct clk_hw *csi_top_parents[] = {
+ &pll_periph0_300M_clk.hw,
+ &pll_periph0_400M_clk.hw,
+ &pll_periph0_480M_clk.common.hw,
+ &pll_video3_4x_clk.common.hw,
+ &pll_video3_3x_clk.hw,
+};
+static SUNXI_CCU_M_HW_WITH_MUX_GATE(csi_top_clk, "csi-top", csi_top_parents,
+ 0xc04,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static const struct clk_parent_data csi_mclk_parents[] = {
+ { .fw_name = "hosc" },
+ { .hw = &pll_video3_4x_clk.common.hw },
+ { .hw = &pll_video0_4x_clk.common.hw },
+ { .hw = &pll_video1_4x_clk.common.hw },
+ { .hw = &pll_video2_4x_clk.common.hw },
+};
+static SUNXI_CCU_DUALDIV_MUX_GATE(csi_mclk0_clk, "csi-mclk0", csi_mclk_parents,
+ 0xc08,
+ 0, 5, /* M */
+ 8, 5, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_DUALDIV_MUX_GATE(csi_mclk1_clk, "csi-mclk1", csi_mclk_parents,
+ 0xc0c,
+ 0, 5, /* M */
+ 8, 5, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_DUALDIV_MUX_GATE(csi_mclk2_clk, "csi-mclk2", csi_mclk_parents,
+ 0xc10,
+ 0, 5, /* M */
+ 8, 5, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_DUALDIV_MUX_GATE(csi_mclk3_clk, "csi-mclk3", csi_mclk_parents,
+ 0xc14,
+ 0, 5, /* M */
+ 8, 5, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE_HWS(bus_csi_clk, "bus-csi", ahb_hws, 0xc1c, BIT(0), 0);
+
+static const struct clk_hw *isp_parents[] = {
+ &pll_periph0_300M_clk.hw,
+ &pll_periph0_400M_clk.hw,
+ &pll_video2_4x_clk.common.hw,
+ &pll_video3_4x_clk.common.hw,
+};
+static SUNXI_CCU_M_HW_WITH_MUX_GATE(isp_clk, "isp", isp_parents, 0xc20,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static const struct clk_parent_data dsp_parents[] = {
+ { .fw_name = "hosc" },
+ { .fw_name = "losc" },
+ { .fw_name = "iosc" },
+ { .hw = &pll_periph0_2x_clk.common.hw },
+ { .hw = &pll_periph0_480M_clk.common.hw, },
+};
+static SUNXI_CCU_M_DATA_WITH_MUX_GATE(dsp_clk, "dsp", dsp_parents, 0xc70,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE_DATA(fanout_24M_clk, "fanout-24M", osc24M,
+ 0xf30, BIT(0), 0);
+static SUNXI_CCU_GATE_DATA_WITH_PREDIV(fanout_12M_clk, "fanout-12M", osc24M,
+ 0xf30, BIT(1), 2, 0);
+static SUNXI_CCU_GATE_HWS_WITH_PREDIV(fanout_16M_clk, "fanout-16M",
+ pll_periph0_480M_hws,
+ 0xf30, BIT(2), 30, 0);
+static SUNXI_CCU_GATE_HWS_WITH_PREDIV(fanout_25M_clk, "fanout-25M",
+ pll_periph0_2x_hws,
+ 0xf30, BIT(3), 48, 0);
+static SUNXI_CCU_GATE_HWS_WITH_PREDIV(fanout_50M_clk, "fanout-50M",
+ pll_periph0_2x_hws,
+ 0xf30, BIT(4), 24, 0);
+
+static const struct clk_parent_data fanout_27M_parents[] = {
+ { .hw = &pll_video0_4x_clk.common.hw },
+ { .hw = &pll_video1_4x_clk.common.hw },
+ { .hw = &pll_video2_4x_clk.common.hw },
+ { .hw = &pll_video3_4x_clk.common.hw },
+};
+static SUNXI_CCU_DUALDIV_MUX_GATE(fanout_27M_clk, "fanout-27M",
+ fanout_27M_parents, 0xf34,
+ 0, 5, /* div0 */
+ 8, 5, /* div1 */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static const struct clk_parent_data fanout_pclk_parents[] = {
+ { .hw = &apb0_clk.common.hw }
+};
+static SUNXI_CCU_DUALDIV_MUX_GATE(fanout_pclk_clk, "fanout-pclk",
+ fanout_pclk_parents,
+ 0xf38,
+ 0, 5, /* div0 */
+ 5, 5, /* div1 */
+ 0, 0, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static const struct clk_parent_data fanout_parents[] = {
+ { .fw_name = "losc-fanout" },
+ { .hw = &fanout_12M_clk.common.hw, },
+ { .hw = &fanout_16M_clk.common.hw, },
+ { .hw = &fanout_24M_clk.common.hw, },
+ { .hw = &fanout_25M_clk.common.hw, },
+ { .hw = &fanout_27M_clk.common.hw, },
+ { .hw = &fanout_pclk_clk.common.hw, },
+ { .hw = &fanout_50M_clk.common.hw, },
+};
+static SUNXI_CCU_MUX_DATA_WITH_GATE(fanout0_clk, "fanout0", fanout_parents,
+ 0xf3c,
+ 0, 3, /* mux */
+ BIT(21), /* gate */
+ 0);
+static SUNXI_CCU_MUX_DATA_WITH_GATE(fanout1_clk, "fanout1", fanout_parents,
+ 0xf3c,
+ 3, 3, /* mux */
+ BIT(22), /* gate */
+ 0);
+static SUNXI_CCU_MUX_DATA_WITH_GATE(fanout2_clk, "fanout2", fanout_parents,
+ 0xf3c,
+ 6, 3, /* mux */
+ BIT(23), /* gate */
+ 0);
+
+/*
+ * Contains all clocks that are controlled by a hardware register. They
+ * have a (sunxi) .common member, which needs to be initialised by the common
+ * sunxi CCU code, to be filled with the MMIO base address and the shared lock.
+ */
+static struct ccu_common *sun55i_a523_ccu_clks[] = {
+ &pll_ddr_clk.common,
+ &pll_periph0_4x_clk.common,
+ &pll_periph0_2x_clk.common,
+ &pll_periph0_800M_clk.common,
+ &pll_periph0_480M_clk.common,
+ &pll_periph1_4x_clk.common,
+ &pll_periph1_2x_clk.common,
+ &pll_periph1_800M_clk.common,
+ &pll_periph1_480M_clk.common,
+ &pll_gpu_clk.common,
+ &pll_video0_8x_clk.common,
+ &pll_video0_4x_clk.common,
+ &pll_video1_8x_clk.common,
+ &pll_video1_4x_clk.common,
+ &pll_video2_8x_clk.common,
+ &pll_video2_4x_clk.common,
+ &pll_video3_8x_clk.common,
+ &pll_video3_4x_clk.common,
+ &pll_ve_clk.common,
+ &pll_audio0_4x_clk.common,
+ &pll_npu_4x_clk.common,
+ &ahb_clk.common,
+ &apb0_clk.common,
+ &apb1_clk.common,
+ &mbus_clk.common,
+ &de_clk.common,
+ &bus_de_clk.common,
+ &di_clk.common,
+ &bus_di_clk.common,
+ &g2d_clk.common,
+ &bus_g2d_clk.common,
+ &gpu_clk.common,
+ &bus_gpu_clk.common,
+ &ce_clk.common,
+ &bus_ce_clk.common,
+ &bus_ce_sys_clk.common,
+ &ve_clk.common,
+ &bus_ve_clk.common,
+ &npu_clk.common,
+ &bus_dma_clk.common,
+ &bus_msgbox_clk.common,
+ &bus_spinlock_clk.common,
+ &hstimer0_clk.common,
+ &hstimer1_clk.common,
+ &hstimer2_clk.common,
+ &hstimer3_clk.common,
+ &hstimer4_clk.common,
+ &hstimer5_clk.common,
+ &bus_hstimer_clk.common,
+ &bus_dbg_clk.common,
+ &bus_pwm0_clk.common,
+ &bus_pwm1_clk.common,
+ &iommu_clk.common,
+ &bus_iommu_clk.common,
+ &dram_clk.common,
+ &mbus_dma_clk.common,
+ &mbus_ve_clk.common,
+ &mbus_ce_clk.common,
+ &mbus_nand_clk.common,
+ &mbus_usb3_clk.common,
+ &mbus_csi_clk.common,
+ &mbus_isp_clk.common,
+ &mbus_gmac1_clk.common,
+ &bus_dram_clk.common,
+ &nand0_clk.common,
+ &nand1_clk.common,
+ &bus_nand_clk.common,
+ &mmc0_clk.common,
+ &mmc1_clk.common,
+ &mmc2_clk.common,
+ &bus_sysdap_clk.common,
+ &bus_mmc0_clk.common,
+ &bus_mmc1_clk.common,
+ &bus_mmc2_clk.common,
+ &bus_uart0_clk.common,
+ &bus_uart1_clk.common,
+ &bus_uart2_clk.common,
+ &bus_uart3_clk.common,
+ &bus_uart4_clk.common,
+ &bus_uart5_clk.common,
+ &bus_uart6_clk.common,
+ &bus_uart7_clk.common,
+ &bus_i2c0_clk.common,
+ &bus_i2c1_clk.common,
+ &bus_i2c2_clk.common,
+ &bus_i2c3_clk.common,
+ &bus_i2c4_clk.common,
+ &bus_i2c5_clk.common,
+ &bus_can_clk.common,
+ &spi0_clk.common,
+ &spi1_clk.common,
+ &spi2_clk.common,
+ &spifc_clk.common,
+ &bus_spi0_clk.common,
+ &bus_spi1_clk.common,
+ &bus_spi2_clk.common,
+ &bus_spifc_clk.common,
+ &emac0_25M_clk.common,
+ &emac1_25M_clk.common,
+ &bus_emac0_clk.common,
+ &bus_emac1_clk.common,
+ &ir_rx_clk.common,
+ &bus_ir_rx_clk.common,
+ &ir_tx_clk.common,
+ &bus_ir_tx_clk.common,
+ &gpadc0_clk.common,
+ &gpadc1_clk.common,
+ &bus_gpadc0_clk.common,
+ &bus_gpadc1_clk.common,
+ &bus_ths_clk.common,
+ &usb_ohci0_clk.common,
+ &usb_ohci1_clk.common,
+ &bus_ohci0_clk.common,
+ &bus_ohci1_clk.common,
+ &bus_ehci0_clk.common,
+ &bus_ehci1_clk.common,
+ &bus_otg_clk.common,
+ &bus_lradc_clk.common,
+ &pcie_aux_clk.common,
+ &bus_display0_top_clk.common,
+ &bus_display1_top_clk.common,
+ &hdmi_24M_clk.common,
+ &hdmi_cec_32k_clk.common,
+ &hdmi_cec_clk.common,
+ &bus_hdmi_clk.common,
+ &mipi_dsi0_clk.common,
+ &mipi_dsi1_clk.common,
+ &bus_mipi_dsi0_clk.common,
+ &bus_mipi_dsi1_clk.common,
+ &tcon_lcd0_clk.common,
+ &tcon_lcd1_clk.common,
+ &tcon_lcd2_clk.common,
+ &combophy_dsi0_clk.common,
+ &combophy_dsi1_clk.common,
+ &bus_tcon_lcd0_clk.common,
+ &bus_tcon_lcd1_clk.common,
+ &bus_tcon_lcd2_clk.common,
+ &tcon_tv0_clk.common,
+ &tcon_tv1_clk.common,
+ &bus_tcon_tv0_clk.common,
+ &bus_tcon_tv1_clk.common,
+ &edp_clk.common,
+ &bus_edp_clk.common,
+ &ledc_clk.common,
+ &bus_ledc_clk.common,
+ &csi_top_clk.common,
+ &csi_mclk0_clk.common,
+ &csi_mclk1_clk.common,
+ &csi_mclk2_clk.common,
+ &csi_mclk3_clk.common,
+ &bus_csi_clk.common,
+ &isp_clk.common,
+ &dsp_clk.common,
+ &fanout_24M_clk.common,
+ &fanout_12M_clk.common,
+ &fanout_16M_clk.common,
+ &fanout_25M_clk.common,
+ &fanout_27M_clk.common,
+ &fanout_pclk_clk.common,
+ &fanout0_clk.common,
+ &fanout1_clk.common,
+ &fanout2_clk.common,
+};
+
+static struct clk_hw_onecell_data sun55i_a523_hw_clks = {
+ .hws = {
+ [CLK_PLL_DDR0] = &pll_ddr_clk.common.hw,
+ [CLK_PLL_PERIPH0_4X] = &pll_periph0_4x_clk.common.hw,
+ [CLK_PLL_PERIPH0_2X] = &pll_periph0_2x_clk.common.hw,
+ [CLK_PLL_PERIPH0_800M] = &pll_periph0_800M_clk.common.hw,
+ [CLK_PLL_PERIPH0_480M] = &pll_periph0_480M_clk.common.hw,
+ [CLK_PLL_PERIPH0_600M] = &pll_periph0_600M_clk.hw,
+ [CLK_PLL_PERIPH0_400M] = &pll_periph0_400M_clk.hw,
+ [CLK_PLL_PERIPH0_300M] = &pll_periph0_300M_clk.hw,
+ [CLK_PLL_PERIPH0_200M] = &pll_periph0_200M_clk.hw,
+ [CLK_PLL_PERIPH0_160M] = &pll_periph0_160M_clk.hw,
+ [CLK_PLL_PERIPH0_150M] = &pll_periph0_150M_clk.hw,
+ [CLK_PLL_PERIPH1_4X] = &pll_periph1_4x_clk.common.hw,
+ [CLK_PLL_PERIPH1_2X] = &pll_periph1_2x_clk.common.hw,
+ [CLK_PLL_PERIPH1_800M] = &pll_periph1_800M_clk.common.hw,
+ [CLK_PLL_PERIPH1_480M] = &pll_periph1_480M_clk.common.hw,
+ [CLK_PLL_PERIPH1_600M] = &pll_periph1_600M_clk.hw,
+ [CLK_PLL_PERIPH1_400M] = &pll_periph1_400M_clk.hw,
+ [CLK_PLL_PERIPH1_300M] = &pll_periph1_300M_clk.hw,
+ [CLK_PLL_PERIPH1_200M] = &pll_periph1_200M_clk.hw,
+ [CLK_PLL_PERIPH1_160M] = &pll_periph1_160M_clk.hw,
+ [CLK_PLL_PERIPH1_150M] = &pll_periph1_150M_clk.hw,
+ [CLK_PLL_GPU] = &pll_gpu_clk.common.hw,
+ [CLK_PLL_VIDEO0_8X] = &pll_video0_8x_clk.common.hw,
+ [CLK_PLL_VIDEO0_4X] = &pll_video0_4x_clk.common.hw,
+ [CLK_PLL_VIDEO0_3X] = &pll_video0_3x_clk.hw,
+ [CLK_PLL_VIDEO1_8X] = &pll_video1_8x_clk.common.hw,
+ [CLK_PLL_VIDEO1_4X] = &pll_video1_4x_clk.common.hw,
+ [CLK_PLL_VIDEO1_3X] = &pll_video1_3x_clk.hw,
+ [CLK_PLL_VIDEO2_8X] = &pll_video2_8x_clk.common.hw,
+ [CLK_PLL_VIDEO2_4X] = &pll_video2_4x_clk.common.hw,
+ [CLK_PLL_VIDEO2_3X] = &pll_video2_3x_clk.hw,
+ [CLK_PLL_VIDEO3_8X] = &pll_video3_8x_clk.common.hw,
+ [CLK_PLL_VIDEO3_4X] = &pll_video3_4x_clk.common.hw,
+ [CLK_PLL_VIDEO3_3X] = &pll_video3_3x_clk.hw,
+ [CLK_PLL_VE] = &pll_ve_clk.common.hw,
+ [CLK_PLL_AUDIO0_4X] = &pll_audio0_4x_clk.common.hw,
+ [CLK_PLL_AUDIO0_2X] = &pll_audio0_2x_clk.hw,
+ [CLK_PLL_AUDIO0] = &pll_audio0_clk.hw,
+ [CLK_PLL_NPU_4X] = &pll_npu_4x_clk.common.hw,
+ [CLK_PLL_NPU_2X] = &pll_npu_2x_clk.hw,
+ [CLK_PLL_NPU] = &pll_npu_1x_clk.hw,
+ [CLK_AHB] = &ahb_clk.common.hw,
+ [CLK_APB0] = &apb0_clk.common.hw,
+ [CLK_APB1] = &apb1_clk.common.hw,
+ [CLK_MBUS] = &mbus_clk.common.hw,
+ [CLK_DE] = &de_clk.common.hw,
+ [CLK_BUS_DE] = &bus_de_clk.common.hw,
+ [CLK_DI] = &di_clk.common.hw,
+ [CLK_BUS_DI] = &bus_di_clk.common.hw,
+ [CLK_G2D] = &g2d_clk.common.hw,
+ [CLK_BUS_G2D] = &bus_g2d_clk.common.hw,
+ [CLK_GPU] = &gpu_clk.common.hw,
+ [CLK_BUS_GPU] = &bus_gpu_clk.common.hw,
+ [CLK_CE] = &ce_clk.common.hw,
+ [CLK_BUS_CE] = &bus_ce_clk.common.hw,
+ [CLK_BUS_CE_SYS] = &bus_ce_sys_clk.common.hw,
+ [CLK_VE] = &ve_clk.common.hw,
+ [CLK_BUS_VE] = &bus_ve_clk.common.hw,
+ [CLK_BUS_DMA] = &bus_dma_clk.common.hw,
+ [CLK_BUS_MSGBOX] = &bus_msgbox_clk.common.hw,
+ [CLK_BUS_SPINLOCK] = &bus_spinlock_clk.common.hw,
+ [CLK_HSTIMER0] = &hstimer0_clk.common.hw,
+ [CLK_HSTIMER1] = &hstimer1_clk.common.hw,
+ [CLK_HSTIMER2] = &hstimer2_clk.common.hw,
+ [CLK_HSTIMER3] = &hstimer3_clk.common.hw,
+ [CLK_HSTIMER4] = &hstimer4_clk.common.hw,
+ [CLK_HSTIMER5] = &hstimer5_clk.common.hw,
+ [CLK_BUS_HSTIMER] = &bus_hstimer_clk.common.hw,
+ [CLK_BUS_DBG] = &bus_dbg_clk.common.hw,
+ [CLK_BUS_PWM0] = &bus_pwm0_clk.common.hw,
+ [CLK_BUS_PWM1] = &bus_pwm1_clk.common.hw,
+ [CLK_IOMMU] = &iommu_clk.common.hw,
+ [CLK_BUS_IOMMU] = &bus_iommu_clk.common.hw,
+ [CLK_DRAM] = &dram_clk.common.hw,
+ [CLK_MBUS_DMA] = &mbus_dma_clk.common.hw,
+ [CLK_MBUS_VE] = &mbus_ve_clk.common.hw,
+ [CLK_MBUS_CE] = &mbus_ce_clk.common.hw,
+ [CLK_MBUS_CSI] = &mbus_csi_clk.common.hw,
+ [CLK_MBUS_ISP] = &mbus_isp_clk.common.hw,
+ [CLK_MBUS_EMAC1] = &mbus_gmac1_clk.common.hw,
+ [CLK_BUS_DRAM] = &bus_dram_clk.common.hw,
+ [CLK_NAND0] = &nand0_clk.common.hw,
+ [CLK_NAND1] = &nand1_clk.common.hw,
+ [CLK_BUS_NAND] = &bus_nand_clk.common.hw,
+ [CLK_MMC0] = &mmc0_clk.common.hw,
+ [CLK_MMC1] = &mmc1_clk.common.hw,
+ [CLK_MMC2] = &mmc2_clk.common.hw,
+ [CLK_BUS_SYSDAP] = &bus_sysdap_clk.common.hw,
+ [CLK_BUS_MMC0] = &bus_mmc0_clk.common.hw,
+ [CLK_BUS_MMC1] = &bus_mmc1_clk.common.hw,
+ [CLK_BUS_MMC2] = &bus_mmc2_clk.common.hw,
+ [CLK_BUS_UART0] = &bus_uart0_clk.common.hw,
+ [CLK_BUS_UART1] = &bus_uart1_clk.common.hw,
+ [CLK_BUS_UART2] = &bus_uart2_clk.common.hw,
+ [CLK_BUS_UART3] = &bus_uart3_clk.common.hw,
+ [CLK_BUS_UART4] = &bus_uart4_clk.common.hw,
+ [CLK_BUS_UART5] = &bus_uart5_clk.common.hw,
+ [CLK_BUS_UART6] = &bus_uart6_clk.common.hw,
+ [CLK_BUS_UART7] = &bus_uart7_clk.common.hw,
+ [CLK_BUS_I2C0] = &bus_i2c0_clk.common.hw,
+ [CLK_BUS_I2C1] = &bus_i2c1_clk.common.hw,
+ [CLK_BUS_I2C2] = &bus_i2c2_clk.common.hw,
+ [CLK_BUS_I2C3] = &bus_i2c3_clk.common.hw,
+ [CLK_BUS_I2C4] = &bus_i2c4_clk.common.hw,
+ [CLK_BUS_I2C5] = &bus_i2c5_clk.common.hw,
+ [CLK_BUS_CAN] = &bus_can_clk.common.hw,
+ [CLK_SPI0] = &spi0_clk.common.hw,
+ [CLK_SPI1] = &spi1_clk.common.hw,
+ [CLK_SPI2] = &spi2_clk.common.hw,
+ [CLK_SPIFC] = &spifc_clk.common.hw,
+ [CLK_BUS_SPI0] = &bus_spi0_clk.common.hw,
+ [CLK_BUS_SPI1] = &bus_spi1_clk.common.hw,
+ [CLK_BUS_SPI2] = &bus_spi2_clk.common.hw,
+ [CLK_BUS_SPIFC] = &bus_spifc_clk.common.hw,
+ [CLK_EMAC0_25M] = &emac0_25M_clk.common.hw,
+ [CLK_EMAC1_25M] = &emac1_25M_clk.common.hw,
+ [CLK_BUS_EMAC0] = &bus_emac0_clk.common.hw,
+ [CLK_BUS_EMAC1] = &bus_emac1_clk.common.hw,
+ [CLK_IR_RX] = &ir_rx_clk.common.hw,
+ [CLK_BUS_IR_RX] = &bus_ir_rx_clk.common.hw,
+ [CLK_IR_TX] = &ir_tx_clk.common.hw,
+ [CLK_BUS_IR_TX] = &bus_ir_tx_clk.common.hw,
+ [CLK_GPADC0] = &gpadc0_clk.common.hw,
+ [CLK_GPADC1] = &gpadc1_clk.common.hw,
+ [CLK_BUS_GPADC0] = &bus_gpadc0_clk.common.hw,
+ [CLK_BUS_GPADC1] = &bus_gpadc1_clk.common.hw,
+ [CLK_BUS_THS] = &bus_ths_clk.common.hw,
+ [CLK_USB_OHCI0] = &usb_ohci0_clk.common.hw,
+ [CLK_USB_OHCI1] = &usb_ohci1_clk.common.hw,
+ [CLK_BUS_OHCI0] = &bus_ohci0_clk.common.hw,
+ [CLK_BUS_OHCI1] = &bus_ohci1_clk.common.hw,
+ [CLK_BUS_EHCI0] = &bus_ehci0_clk.common.hw,
+ [CLK_BUS_EHCI1] = &bus_ehci1_clk.common.hw,
+ [CLK_BUS_OTG] = &bus_otg_clk.common.hw,
+ [CLK_BUS_LRADC] = &bus_lradc_clk.common.hw,
+ [CLK_PCIE_AUX] = &pcie_aux_clk.common.hw,
+ [CLK_BUS_DISPLAY0_TOP] = &bus_display0_top_clk.common.hw,
+ [CLK_BUS_DISPLAY1_TOP] = &bus_display1_top_clk.common.hw,
+ [CLK_HDMI_24M] = &hdmi_24M_clk.common.hw,
+ [CLK_HDMI_CEC_32K] = &hdmi_cec_32k_clk.common.hw,
+ [CLK_HDMI_CEC] = &hdmi_cec_clk.common.hw,
+ [CLK_BUS_HDMI] = &bus_hdmi_clk.common.hw,
+ [CLK_MIPI_DSI0] = &mipi_dsi0_clk.common.hw,
+ [CLK_MIPI_DSI1] = &mipi_dsi1_clk.common.hw,
+ [CLK_BUS_MIPI_DSI0] = &bus_mipi_dsi0_clk.common.hw,
+ [CLK_BUS_MIPI_DSI1] = &bus_mipi_dsi1_clk.common.hw,
+ [CLK_TCON_LCD0] = &tcon_lcd0_clk.common.hw,
+ [CLK_TCON_LCD1] = &tcon_lcd1_clk.common.hw,
+ [CLK_TCON_LCD2] = &tcon_lcd2_clk.common.hw,
+ [CLK_COMBOPHY_DSI0] = &combophy_dsi0_clk.common.hw,
+ [CLK_COMBOPHY_DSI1] = &combophy_dsi1_clk.common.hw,
+ [CLK_BUS_TCON_LCD0] = &bus_tcon_lcd0_clk.common.hw,
+ [CLK_BUS_TCON_LCD1] = &bus_tcon_lcd1_clk.common.hw,
+ [CLK_BUS_TCON_LCD2] = &bus_tcon_lcd2_clk.common.hw,
+ [CLK_TCON_TV0] = &tcon_tv0_clk.common.hw,
+ [CLK_TCON_TV1] = &tcon_tv1_clk.common.hw,
+ [CLK_BUS_TCON_TV0] = &bus_tcon_tv0_clk.common.hw,
+ [CLK_BUS_TCON_TV1] = &bus_tcon_tv1_clk.common.hw,
+ [CLK_EDP] = &edp_clk.common.hw,
+ [CLK_BUS_EDP] = &bus_edp_clk.common.hw,
+ [CLK_LEDC] = &ledc_clk.common.hw,
+ [CLK_BUS_LEDC] = &bus_ledc_clk.common.hw,
+ [CLK_CSI_TOP] = &csi_top_clk.common.hw,
+ [CLK_CSI_MCLK0] = &csi_mclk0_clk.common.hw,
+ [CLK_CSI_MCLK1] = &csi_mclk1_clk.common.hw,
+ [CLK_CSI_MCLK2] = &csi_mclk2_clk.common.hw,
+ [CLK_CSI_MCLK3] = &csi_mclk3_clk.common.hw,
+ [CLK_BUS_CSI] = &bus_csi_clk.common.hw,
+ [CLK_ISP] = &isp_clk.common.hw,
+ [CLK_DSP] = &dsp_clk.common.hw,
+ [CLK_FANOUT_24M] = &fanout_24M_clk.common.hw,
+ [CLK_FANOUT_12M] = &fanout_12M_clk.common.hw,
+ [CLK_FANOUT_16M] = &fanout_16M_clk.common.hw,
+ [CLK_FANOUT_25M] = &fanout_25M_clk.common.hw,
+ [CLK_FANOUT_27M] = &fanout_27M_clk.common.hw,
+ [CLK_FANOUT_PCLK] = &fanout_pclk_clk.common.hw,
+ [CLK_FANOUT0] = &fanout0_clk.common.hw,
+ [CLK_FANOUT1] = &fanout1_clk.common.hw,
+ [CLK_FANOUT2] = &fanout2_clk.common.hw,
+ [CLK_NPU] = &npu_clk.common.hw,
+ },
+ .num = CLK_NPU + 1,
+};
+
+static struct ccu_reset_map sun55i_a523_ccu_resets[] = {
+ [RST_MBUS] = { 0x540, BIT(30) },
+ [RST_BUS_NSI] = { 0x54c, BIT(16) },
+ [RST_BUS_DE] = { 0x60c, BIT(16) },
+ [RST_BUS_DI] = { 0x62c, BIT(16) },
+ [RST_BUS_G2D] = { 0x63c, BIT(16) },
+ [RST_BUS_SYS] = { 0x64c, BIT(16) },
+ [RST_BUS_GPU] = { 0x67c, BIT(16) },
+ [RST_BUS_CE] = { 0x68c, BIT(16) },
+ [RST_BUS_SYS_CE] = { 0x68c, BIT(17) },
+ [RST_BUS_VE] = { 0x69c, BIT(16) },
+ [RST_BUS_DMA] = { 0x70c, BIT(16) },
+ [RST_BUS_MSGBOX] = { 0x71c, BIT(16) },
+ [RST_BUS_SPINLOCK] = { 0x72c, BIT(16) },
+ [RST_BUS_CPUXTIMER] = { 0x74c, BIT(16) },
+ [RST_BUS_DBG] = { 0x78c, BIT(16) },
+ [RST_BUS_PWM0] = { 0x7ac, BIT(16) },
+ [RST_BUS_PWM1] = { 0x7ac, BIT(17) },
+ [RST_BUS_DRAM] = { 0x80c, BIT(16) },
+ [RST_BUS_NAND] = { 0x82c, BIT(16) },
+ [RST_BUS_MMC0] = { 0x84c, BIT(16) },
+ [RST_BUS_MMC1] = { 0x84c, BIT(17) },
+ [RST_BUS_MMC2] = { 0x84c, BIT(18) },
+ [RST_BUS_SYSDAP] = { 0x88c, BIT(16) },
+ [RST_BUS_UART0] = { 0x90c, BIT(16) },
+ [RST_BUS_UART1] = { 0x90c, BIT(17) },
+ [RST_BUS_UART2] = { 0x90c, BIT(18) },
+ [RST_BUS_UART3] = { 0x90c, BIT(19) },
+ [RST_BUS_UART4] = { 0x90c, BIT(20) },
+ [RST_BUS_UART5] = { 0x90c, BIT(21) },
+ [RST_BUS_UART6] = { 0x90c, BIT(22) },
+ [RST_BUS_UART7] = { 0x90c, BIT(23) },
+ [RST_BUS_I2C0] = { 0x91c, BIT(16) },
+ [RST_BUS_I2C1] = { 0x91c, BIT(17) },
+ [RST_BUS_I2C2] = { 0x91c, BIT(18) },
+ [RST_BUS_I2C3] = { 0x91c, BIT(19) },
+ [RST_BUS_I2C4] = { 0x91c, BIT(20) },
+ [RST_BUS_I2C5] = { 0x91c, BIT(21) },
+ [RST_BUS_CAN] = { 0x92c, BIT(16) },
+ [RST_BUS_SPI0] = { 0x96c, BIT(16) },
+ [RST_BUS_SPI1] = { 0x96c, BIT(17) },
+ [RST_BUS_SPI2] = { 0x96c, BIT(18) },
+ [RST_BUS_SPIFC] = { 0x96c, BIT(19) },
+ [RST_BUS_EMAC0] = { 0x97c, BIT(16) },
+ [RST_BUS_EMAC1] = { 0x98c, BIT(16) | BIT(17) }, /* GMAC1-AXI */
+ [RST_BUS_IR_RX] = { 0x99c, BIT(16) },
+ [RST_BUS_IR_TX] = { 0x9cc, BIT(16) },
+ [RST_BUS_GPADC0] = { 0x9ec, BIT(16) },
+ [RST_BUS_GPADC1] = { 0x9ec, BIT(17) },
+ [RST_BUS_THS] = { 0x9fc, BIT(16) },
+ [RST_USB_PHY0] = { 0xa70, BIT(30) },
+ [RST_USB_PHY1] = { 0xa74, BIT(30) },
+ [RST_BUS_OHCI0] = { 0xa8c, BIT(16) },
+ [RST_BUS_OHCI1] = { 0xa8c, BIT(17) },
+ [RST_BUS_EHCI0] = { 0xa8c, BIT(20) },
+ [RST_BUS_EHCI1] = { 0xa8c, BIT(21) },
+ [RST_BUS_OTG] = { 0xa8c, BIT(24) },
+ [RST_BUS_3] = { 0xa8c, BIT(25) }, /* BSP + register */
+ [RST_BUS_LRADC] = { 0xa9c, BIT(16) },
+ [RST_BUS_PCIE_USB3] = { 0xaac, BIT(16) },
+ [RST_BUS_DISPLAY0_TOP] = { 0xabc, BIT(16) },
+ [RST_BUS_DISPLAY1_TOP] = { 0xacc, BIT(16) },
+ [RST_BUS_HDMI_MAIN] = { 0xb1c, BIT(16) },
+ [RST_BUS_HDMI_SUB] = { 0xb1c, BIT(17) },
+ [RST_BUS_MIPI_DSI0] = { 0xb4c, BIT(16) },
+ [RST_BUS_MIPI_DSI1] = { 0xb4c, BIT(17) },
+ [RST_BUS_TCON_LCD0] = { 0xb7c, BIT(16) },
+ [RST_BUS_TCON_LCD1] = { 0xb7c, BIT(17) },
+ [RST_BUS_TCON_LCD2] = { 0xb7c, BIT(18) },
+ [RST_BUS_TCON_TV0] = { 0xb9c, BIT(16) },
+ [RST_BUS_TCON_TV1] = { 0xb9c, BIT(17) },
+ [RST_BUS_LVDS0] = { 0xbac, BIT(16) },
+ [RST_BUS_LVDS1] = { 0xbac, BIT(17) },
+ [RST_BUS_EDP] = { 0xbbc, BIT(16) },
+ [RST_BUS_VIDEO_OUT0] = { 0xbcc, BIT(16) },
+ [RST_BUS_VIDEO_OUT1] = { 0xbcc, BIT(17) },
+ [RST_BUS_LEDC] = { 0xbfc, BIT(16) },
+ [RST_BUS_CSI] = { 0xc1c, BIT(16) },
+ [RST_BUS_ISP] = { 0xc2c, BIT(16) }, /* BSP + register */
+};
+
+static const struct sunxi_ccu_desc sun55i_a523_ccu_desc = {
+ .ccu_clks = sun55i_a523_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun55i_a523_ccu_clks),
+
+ .hw_clks = &sun55i_a523_hw_clks,
+
+ .resets = sun55i_a523_ccu_resets,
+ .num_resets = ARRAY_SIZE(sun55i_a523_ccu_resets),
+};
+
+static const u32 pll_regs[] = {
+ SUN55I_A523_PLL_DDR0_REG,
+ SUN55I_A523_PLL_PERIPH0_REG,
+ SUN55I_A523_PLL_PERIPH1_REG,
+ SUN55I_A523_PLL_GPU_REG,
+ SUN55I_A523_PLL_VIDEO0_REG,
+ SUN55I_A523_PLL_VIDEO1_REG,
+ SUN55I_A523_PLL_VIDEO2_REG,
+ SUN55I_A523_PLL_VE_REG,
+ SUN55I_A523_PLL_VIDEO3_REG,
+ SUN55I_A523_PLL_AUDIO0_REG,
+ SUN55I_A523_PLL_NPU_REG,
+};
+
+static int sun55i_a523_ccu_probe(struct platform_device *pdev)
+{
+ void __iomem *reg;
+ u32 val;
+ int i, ret;
+
+ reg = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+
+ /*
+ * The PLL clock code does not model all bits, for instance it does
+ * not support a separate enable and gate bit. We present the
+ * gate bit(27) as the enable bit, but then have to set the
+ * PLL Enable, LDO Enable, and Lock Enable bits on all PLLs here.
+ */
+ for (i = 0; i < ARRAY_SIZE(pll_regs); i++) {
+ val = readl(reg + pll_regs[i]);
+ val |= BIT(31) | BIT(30) | BIT(29);
+ writel(val, reg + pll_regs[i]);
+ }
+
+ /* Enforce m1 = 0, m0 = 0 for PLL_AUDIO0 */
+ val = readl(reg + SUN55I_A523_PLL_AUDIO0_REG);
+ val &= ~(BIT(1) | BIT(0));
+ writel(val, reg + SUN55I_A523_PLL_AUDIO0_REG);
+
+ ret = devm_sunxi_ccu_probe(&pdev->dev, reg, &sun55i_a523_ccu_desc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct of_device_id sun55i_a523_ccu_ids[] = {
+ { .compatible = "allwinner,sun55i-a523-ccu" },
+ { }
+};
+
+static struct platform_driver sun55i_a523_ccu_driver = {
+ .probe = sun55i_a523_ccu_probe,
+ .driver = {
+ .name = "sun55i-a523-ccu",
+ .suppress_bind_attrs = true,
+ .of_match_table = sun55i_a523_ccu_ids,
+ },
+};
+module_platform_driver(sun55i_a523_ccu_driver);
+
+MODULE_IMPORT_NS("SUNXI_CCU");
+MODULE_DESCRIPTION("Support for the Allwinner A523 CCU");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun5i.c b/drivers/clk/sunxi-ng/ccu-sun5i.c
index 1f4bc0e773a7..c9bf1fdb8a8a 100644
--- a/drivers/clk/sunxi-ng/ccu-sun5i.c
+++ b/drivers/clk/sunxi-ng/ccu-sun5i.c
@@ -731,7 +731,7 @@ static struct clk_hw_onecell_data sun5i_a10s_hw_clks = {
.num = CLK_NUMBER,
};
-static struct ccu_reset_map sun5i_a10s_ccu_resets[] = {
+static const struct ccu_reset_map sun5i_a10s_ccu_resets[] = {
[RST_USB_PHY0] = { 0x0cc, BIT(0) },
[RST_USB_PHY1] = { 0x0cc, BIT(1) },
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
index e8b8d2dd7f2c..bab65cfe9501 100644
--- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
+++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
@@ -1146,7 +1146,7 @@ static struct clk_hw_onecell_data sun6i_a31_hw_clks = {
.num = CLK_NUMBER,
};
-static struct ccu_reset_map sun6i_a31_ccu_resets[] = {
+static const struct ccu_reset_map sun6i_a31_ccu_resets[] = {
[RST_USB_PHY0] = { 0x0cc, BIT(0) },
[RST_USB_PHY1] = { 0x0cc, BIT(1) },
[RST_USB_PHY2] = { 0x0cc, BIT(2) },
@@ -1283,6 +1283,6 @@ static struct platform_driver sun6i_a31_ccu_driver = {
};
module_platform_driver(sun6i_a31_ccu_driver);
-MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_IMPORT_NS("SUNXI_CCU");
MODULE_DESCRIPTION("Support for the Allwinner A31/A31s CCU");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-rtc.c b/drivers/clk/sunxi-ng/ccu-sun6i-rtc.c
index 87e23d16ed0f..f6bfeba009e8 100644
--- a/drivers/clk/sunxi-ng/ccu-sun6i-rtc.c
+++ b/drivers/clk/sunxi-ng/ccu-sun6i-rtc.c
@@ -325,6 +325,13 @@ static const struct sun6i_rtc_match_data sun50i_r329_rtc_ccu_data = {
.osc32k_fanout_nparents = ARRAY_SIZE(sun50i_r329_osc32k_fanout_parents),
};
+static const struct sun6i_rtc_match_data sun55i_a523_rtc_ccu_data = {
+ .have_ext_osc32k = true,
+ .have_iosc_calibration = true,
+ .osc32k_fanout_parents = sun50i_r329_osc32k_fanout_parents,
+ .osc32k_fanout_nparents = ARRAY_SIZE(sun50i_r329_osc32k_fanout_parents),
+};
+
static const struct of_device_id sun6i_rtc_ccu_match[] = {
{
.compatible = "allwinner,sun50i-h616-rtc",
@@ -334,6 +341,10 @@ static const struct of_device_id sun6i_rtc_ccu_match[] = {
.compatible = "allwinner,sun50i-r329-rtc",
.data = &sun50i_r329_rtc_ccu_data,
},
+ {
+ .compatible = "allwinner,sun55i-a523-rtc",
+ .data = &sun55i_a523_rtc_ccu_data,
+ },
{},
};
MODULE_DEVICE_TABLE(of, sun6i_rtc_ccu_match);
@@ -356,7 +367,7 @@ int sun6i_rtc_ccu_probe(struct device *dev, void __iomem *reg)
const char *fw_name;
/* ext-osc32k was the only input clock in the old binding. */
- fw_name = of_property_read_bool(dev->of_node, "clock-names")
+ fw_name = of_property_present(dev->of_node, "clock-names")
? "ext-osc32k" : NULL;
ext_osc32k_clk = devm_clk_get_optional(dev, fw_name);
if (IS_ERR(ext_osc32k_clk))
@@ -381,6 +392,6 @@ int sun6i_rtc_ccu_probe(struct device *dev, void __iomem *reg)
return devm_sunxi_ccu_probe(dev, reg, &sun6i_rtc_ccu_desc);
}
-MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_IMPORT_NS("SUNXI_CCU");
MODULE_DESCRIPTION("Support for the Allwinner H616/R329 RTC CCU");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a23.c b/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
index 6c2a08f722a8..78cf3818ab09 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
@@ -668,7 +668,7 @@ static struct clk_hw_onecell_data sun8i_a23_hw_clks = {
.num = CLK_NUMBER,
};
-static struct ccu_reset_map sun8i_a23_ccu_resets[] = {
+static const struct ccu_reset_map sun8i_a23_ccu_resets[] = {
[RST_USB_PHY0] = { 0x0cc, BIT(0) },
[RST_USB_PHY1] = { 0x0cc, BIT(1) },
[RST_USB_HSIC] = { 0x0cc, BIT(2) },
@@ -763,6 +763,6 @@ static struct platform_driver sun8i_a23_ccu_driver = {
};
module_platform_driver(sun8i_a23_ccu_driver);
-MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_IMPORT_NS("SUNXI_CCU");
MODULE_DESCRIPTION("Support for the Allwinner A23 CCU");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
index 5e0bc08a9ce3..b039d419512c 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
@@ -712,7 +712,7 @@ static struct clk_hw_onecell_data sun8i_a33_hw_clks = {
.num = CLK_NUMBER,
};
-static struct ccu_reset_map sun8i_a33_ccu_resets[] = {
+static const struct ccu_reset_map sun8i_a33_ccu_resets[] = {
[RST_USB_PHY0] = { 0x0cc, BIT(0) },
[RST_USB_PHY1] = { 0x0cc, BIT(1) },
[RST_USB_HSIC] = { 0x0cc, BIT(2) },
@@ -835,6 +835,6 @@ static struct platform_driver sun8i_a33_ccu_driver = {
};
module_platform_driver(sun8i_a33_ccu_driver);
-MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_IMPORT_NS("SUNXI_CCU");
MODULE_DESCRIPTION("Support for the Allwinner A33 CCU");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c b/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c
index cb4c6b16c467..60e918965a72 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c
@@ -797,7 +797,7 @@ static struct clk_hw_onecell_data sun8i_a83t_hw_clks = {
.num = CLK_NUMBER,
};
-static struct ccu_reset_map sun8i_a83t_ccu_resets[] = {
+static const struct ccu_reset_map sun8i_a83t_ccu_resets[] = {
[RST_USB_PHY0] = { 0x0cc, BIT(0) },
[RST_USB_PHY1] = { 0x0cc, BIT(1) },
[RST_USB_HSIC] = { 0x0cc, BIT(2) },
@@ -923,6 +923,6 @@ static struct platform_driver sun8i_a83t_ccu_driver = {
};
module_platform_driver(sun8i_a83t_ccu_driver);
-MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_IMPORT_NS("SUNXI_CCU");
MODULE_DESCRIPTION("Support for the Allwinner A83T CCU");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-de2.c b/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
index 7683ea08d8e3..a6cd0f988859 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
@@ -5,6 +5,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -146,7 +147,7 @@ static struct clk_hw_onecell_data sun50i_a64_de2_hw_clks = {
.num = CLK_NUMBER_WITH_ROT,
};
-static struct ccu_reset_map sun8i_a83t_de2_resets[] = {
+static const struct ccu_reset_map sun8i_a83t_de2_resets[] = {
[RST_MIXER0] = { 0x08, BIT(0) },
/*
* Mixer1 reset line is shared with wb, so only RST_WB is
@@ -156,7 +157,7 @@ static struct ccu_reset_map sun8i_a83t_de2_resets[] = {
[RST_ROT] = { 0x08, BIT(3) },
};
-static struct ccu_reset_map sun8i_h3_de2_resets[] = {
+static const struct ccu_reset_map sun8i_h3_de2_resets[] = {
[RST_MIXER0] = { 0x08, BIT(0) },
/*
* Mixer1 reset line is shared with wb, so only RST_WB is
@@ -166,14 +167,14 @@ static struct ccu_reset_map sun8i_h3_de2_resets[] = {
[RST_WB] = { 0x08, BIT(2) },
};
-static struct ccu_reset_map sun50i_a64_de2_resets[] = {
+static const struct ccu_reset_map sun50i_a64_de2_resets[] = {
[RST_MIXER0] = { 0x08, BIT(0) },
[RST_MIXER1] = { 0x08, BIT(1) },
[RST_WB] = { 0x08, BIT(2) },
[RST_ROT] = { 0x08, BIT(3) },
};
-static struct ccu_reset_map sun50i_h5_de2_resets[] = {
+static const struct ccu_reset_map sun50i_h5_de2_resets[] = {
[RST_MIXER0] = { 0x08, BIT(0) },
[RST_MIXER1] = { 0x08, BIT(1) },
[RST_WB] = { 0x08, BIT(2) },
@@ -239,6 +240,16 @@ static const struct sunxi_ccu_desc sun50i_h5_de2_clk_desc = {
.num_resets = ARRAY_SIZE(sun50i_h5_de2_resets),
};
+static const struct sunxi_ccu_desc sun50i_h616_de33_clk_desc = {
+ .ccu_clks = sun8i_de2_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun8i_de2_ccu_clks),
+
+ .hw_clks = &sun8i_h3_de2_hw_clks,
+
+ .resets = sun50i_h5_de2_resets,
+ .num_resets = ARRAY_SIZE(sun50i_h5_de2_resets),
+};
+
static int sunxi_de2_clk_probe(struct platform_device *pdev)
{
struct clk *bus_clk, *mod_clk;
@@ -291,6 +302,16 @@ static int sunxi_de2_clk_probe(struct platform_device *pdev)
goto err_disable_mod_clk;
}
+ /*
+ * The DE33 requires these additional (unknown) registers set
+ * during initialisation.
+ */
+ if (of_device_is_compatible(pdev->dev.of_node,
+ "allwinner,sun50i-h616-de33-clk")) {
+ writel(0, reg + 0x24);
+ writel(0x0000a980, reg + 0x28);
+ }
+
ret = devm_sunxi_ccu_probe(&pdev->dev, reg, ccu_desc);
if (ret)
goto err_assert_reset;
@@ -335,6 +356,10 @@ static const struct of_device_id sunxi_de2_clk_ids[] = {
.compatible = "allwinner,sun50i-h6-de3-clk",
.data = &sun50i_h5_de2_clk_desc,
},
+ {
+ .compatible = "allwinner,sun50i-h616-de33-clk",
+ .data = &sun50i_h616_de33_clk_desc,
+ },
{ }
};
MODULE_DEVICE_TABLE(of, sunxi_de2_clk_ids);
@@ -348,6 +373,6 @@ static struct platform_driver sunxi_de2_clk_driver = {
};
module_platform_driver(sunxi_de2_clk_driver);
-MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_IMPORT_NS("SUNXI_CCU");
MODULE_DESCRIPTION("Support for the Allwinner SoCs DE2 CCU");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
index 13e57db2f8d5..740c4c97331c 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
@@ -876,7 +876,7 @@ static struct clk_hw_onecell_data sun50i_h5_hw_clks = {
.num = CLK_NUMBER_H5,
};
-static struct ccu_reset_map sun8i_h3_ccu_resets[] = {
+static const struct ccu_reset_map sun8i_h3_ccu_resets[] = {
[RST_USB_PHY0] = { 0x0cc, BIT(0) },
[RST_USB_PHY1] = { 0x0cc, BIT(1) },
[RST_USB_PHY2] = { 0x0cc, BIT(2) },
@@ -939,7 +939,7 @@ static struct ccu_reset_map sun8i_h3_ccu_resets[] = {
[RST_BUS_SCR0] = { 0x2d8, BIT(20) },
};
-static struct ccu_reset_map sun50i_h5_ccu_resets[] = {
+static const struct ccu_reset_map sun50i_h5_ccu_resets[] = {
[RST_USB_PHY0] = { 0x0cc, BIT(0) },
[RST_USB_PHY1] = { 0x0cc, BIT(1) },
[RST_USB_PHY2] = { 0x0cc, BIT(2) },
@@ -1094,6 +1094,6 @@ static struct platform_driver sun8i_h3_ccu_driver = {
};
module_platform_driver(sun8i_h3_ccu_driver);
-MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_IMPORT_NS("SUNXI_CCU");
MODULE_DESCRIPTION("Support for the Allwinner H3 CCU");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r.c b/drivers/clk/sunxi-ng/ccu-sun8i-r.c
index da6569334d68..0e324344673b 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-r.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-r.c
@@ -178,7 +178,7 @@ static struct clk_hw_onecell_data sun50i_a64_r_hw_clks = {
.num = CLK_NUMBER,
};
-static struct ccu_reset_map sun8i_a83t_r_ccu_resets[] = {
+static const struct ccu_reset_map sun8i_a83t_r_ccu_resets[] = {
[RST_APB0_IR] = { 0xb0, BIT(1) },
[RST_APB0_TIMER] = { 0xb0, BIT(2) },
[RST_APB0_RSB] = { 0xb0, BIT(3) },
@@ -186,14 +186,14 @@ static struct ccu_reset_map sun8i_a83t_r_ccu_resets[] = {
[RST_APB0_I2C] = { 0xb0, BIT(6) },
};
-static struct ccu_reset_map sun8i_h3_r_ccu_resets[] = {
+static const struct ccu_reset_map sun8i_h3_r_ccu_resets[] = {
[RST_APB0_IR] = { 0xb0, BIT(1) },
[RST_APB0_TIMER] = { 0xb0, BIT(2) },
[RST_APB0_UART] = { 0xb0, BIT(4) },
[RST_APB0_I2C] = { 0xb0, BIT(6) },
};
-static struct ccu_reset_map sun50i_a64_r_ccu_resets[] = {
+static const struct ccu_reset_map sun50i_a64_r_ccu_resets[] = {
[RST_APB0_IR] = { 0xb0, BIT(1) },
[RST_APB0_TIMER] = { 0xb0, BIT(2) },
[RST_APB0_RSB] = { 0xb0, BIT(3) },
@@ -274,6 +274,6 @@ static struct platform_driver sun8i_r_ccu_driver = {
};
module_platform_driver(sun8i_r_ccu_driver);
-MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_IMPORT_NS("SUNXI_CCU");
MODULE_DESCRIPTION("Support for Allwinner SoCs' PRCM CCUs");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
index 2f51ceab8016..44565830881d 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
@@ -439,7 +439,7 @@ static SUNXI_CCU_GATE(bus_i2c2_clk, "bus-i2c2", "apb2",
static SUNXI_CCU_GATE(bus_i2c3_clk, "bus-i2c3", "apb2",
0x06c, BIT(3), 0);
/*
- * In datasheet here's "Reserved", however the gate exists in BSP soucre
+ * In datasheet here's "Reserved", however the gate exists in BSP source
* code.
*/
static SUNXI_CCU_GATE(bus_can_clk, "bus-can", "apb2",
@@ -1162,7 +1162,7 @@ static struct clk_hw_onecell_data sun8i_r40_hw_clks = {
.num = CLK_NUMBER,
};
-static struct ccu_reset_map sun8i_r40_ccu_resets[] = {
+static const struct ccu_reset_map sun8i_r40_ccu_resets[] = {
[RST_USB_PHY0] = { 0x0cc, BIT(0) },
[RST_USB_PHY1] = { 0x0cc, BIT(1) },
[RST_USB_PHY2] = { 0x0cc, BIT(2) },
@@ -1375,6 +1375,6 @@ static struct platform_driver sun8i_r40_ccu_driver = {
};
module_platform_driver(sun8i_r40_ccu_driver);
-MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_IMPORT_NS("SUNXI_CCU");
MODULE_DESCRIPTION("Support for the Allwinner R40 CCU");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
index d24c0d8dfee4..05595ac51b76 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
@@ -347,12 +347,13 @@ static SUNXI_CCU_GATE(dram_ohci_clk, "dram-ohci", "dram",
static const char * const de_parents[] = { "pll-video", "pll-periph0" };
static SUNXI_CCU_M_WITH_MUX_GATE(de_clk, "de", de_parents,
- 0x104, 0, 4, 24, 2, BIT(31),
- CLK_SET_RATE_PARENT);
+ 0x104, 0, 4, 24, 3, BIT(31),
+ CLK_SET_RATE_NO_REPARENT);
-static const char * const tcon_parents[] = { "pll-video" };
+static const char * const tcon_parents[] = { "pll-video", "pll-periph0" };
static SUNXI_CCU_M_WITH_MUX_GATE(tcon_clk, "tcon", tcon_parents,
- 0x118, 0, 4, 24, 3, BIT(31), 0);
+ 0x118, 0, 4, 24, 3, BIT(31),
+ CLK_SET_RATE_NO_REPARENT);
static SUNXI_CCU_GATE(csi_misc_clk, "csi-misc", "osc24M",
0x130, BIT(31), 0);
@@ -362,11 +363,11 @@ static const char * const csi_mclk_parents[] = { "osc24M", "pll-video",
static SUNXI_CCU_M_WITH_MUX_GATE(csi0_mclk_clk, "csi0-mclk", csi_mclk_parents,
0x130, 0, 5, 8, 3, BIT(15), 0);
-static const char * const csi1_sclk_parents[] = { "pll-video", "pll-isp" };
-static SUNXI_CCU_M_WITH_MUX_GATE(csi1_sclk_clk, "csi-sclk", csi1_sclk_parents,
+static const char * const csi_sclk_parents[] = { "pll-video", "pll-isp" };
+static SUNXI_CCU_M_WITH_MUX_GATE(csi_sclk_clk, "csi-sclk", csi_sclk_parents,
0x134, 16, 4, 24, 3, BIT(31), 0);
-static SUNXI_CCU_M_WITH_MUX_GATE(csi1_mclk_clk, "csi-mclk", csi_mclk_parents,
+static SUNXI_CCU_M_WITH_MUX_GATE(csi1_mclk_clk, "csi1-mclk", csi_mclk_parents,
0x134, 0, 5, 8, 3, BIT(15), 0);
static SUNXI_CCU_M_WITH_GATE(ve_clk, "ve", "pll-ve",
@@ -452,7 +453,7 @@ static struct ccu_common *sun8i_v3s_ccu_clks[] = {
&tcon_clk.common,
&csi_misc_clk.common,
&csi0_mclk_clk.common,
- &csi1_sclk_clk.common,
+ &csi_sclk_clk.common,
&csi1_mclk_clk.common,
&ve_clk.common,
&ac_dig_clk.common,
@@ -551,7 +552,7 @@ static struct clk_hw_onecell_data sun8i_v3s_hw_clks = {
[CLK_TCON0] = &tcon_clk.common.hw,
[CLK_CSI_MISC] = &csi_misc_clk.common.hw,
[CLK_CSI0_MCLK] = &csi0_mclk_clk.common.hw,
- [CLK_CSI1_SCLK] = &csi1_sclk_clk.common.hw,
+ [CLK_CSI_SCLK] = &csi_sclk_clk.common.hw,
[CLK_CSI1_MCLK] = &csi1_mclk_clk.common.hw,
[CLK_VE] = &ve_clk.common.hw,
[CLK_AC_DIG] = &ac_dig_clk.common.hw,
@@ -633,7 +634,7 @@ static struct clk_hw_onecell_data sun8i_v3_hw_clks = {
[CLK_TCON0] = &tcon_clk.common.hw,
[CLK_CSI_MISC] = &csi_misc_clk.common.hw,
[CLK_CSI0_MCLK] = &csi0_mclk_clk.common.hw,
- [CLK_CSI1_SCLK] = &csi1_sclk_clk.common.hw,
+ [CLK_CSI_SCLK] = &csi_sclk_clk.common.hw,
[CLK_CSI1_MCLK] = &csi1_mclk_clk.common.hw,
[CLK_VE] = &ve_clk.common.hw,
[CLK_AC_DIG] = &ac_dig_clk.common.hw,
@@ -644,7 +645,7 @@ static struct clk_hw_onecell_data sun8i_v3_hw_clks = {
.num = CLK_I2S0 + 1,
};
-static struct ccu_reset_map sun8i_v3s_ccu_resets[] = {
+static const struct ccu_reset_map sun8i_v3s_ccu_resets[] = {
[RST_USB_PHY0] = { 0x0cc, BIT(0) },
[RST_MBUS] = { 0x0fc, BIT(31) },
@@ -679,7 +680,7 @@ static struct ccu_reset_map sun8i_v3s_ccu_resets[] = {
[RST_BUS_UART2] = { 0x2d8, BIT(18) },
};
-static struct ccu_reset_map sun8i_v3_ccu_resets[] = {
+static const struct ccu_reset_map sun8i_v3_ccu_resets[] = {
[RST_USB_PHY0] = { 0x0cc, BIT(0) },
[RST_MBUS] = { 0x0fc, BIT(31) },
@@ -754,6 +755,21 @@ static int sun8i_v3s_ccu_probe(struct platform_device *pdev)
val &= ~GENMASK(19, 16);
writel(val, reg + SUN8I_V3S_PLL_AUDIO_REG);
+ /*
+ * Assign the DE and TCON clock to the video PLL. Both clocks need to
+ * have the same parent for the units to work together.
+ */
+
+ val = readl(reg + de_clk.common.reg);
+ val &= ~GENMASK(de_clk.mux.shift + de_clk.mux.width - 1,
+ de_clk.mux.shift);
+ writel(val, reg + de_clk.common.reg);
+
+ val = readl(reg + tcon_clk.common.reg);
+ val &= ~GENMASK(tcon_clk.mux.shift + tcon_clk.mux.width - 1,
+ tcon_clk.mux.shift);
+ writel(val, reg + tcon_clk.common.reg);
+
return devm_sunxi_ccu_probe(&pdev->dev, reg, desc);
}
@@ -780,6 +796,6 @@ static struct platform_driver sun8i_v3s_ccu_driver = {
};
module_platform_driver(sun8i_v3s_ccu_driver);
-MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_IMPORT_NS("SUNXI_CCU");
MODULE_DESCRIPTION("Support for the Allwinner V3s CCU");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun9i-a80-de.c b/drivers/clk/sunxi-ng/ccu-sun9i-a80-de.c
index 0975ac58949f..91e5dc448bc0 100644
--- a/drivers/clk/sunxi-ng/ccu-sun9i-a80-de.c
+++ b/drivers/clk/sunxi-ng/ccu-sun9i-a80-de.c
@@ -177,7 +177,7 @@ static struct clk_hw_onecell_data sun9i_a80_de_hw_clks = {
.num = CLK_NUMBER,
};
-static struct ccu_reset_map sun9i_a80_de_resets[] = {
+static const struct ccu_reset_map sun9i_a80_de_resets[] = {
[RST_FE0] = { 0x0c, BIT(0) },
[RST_FE1] = { 0x0c, BIT(1) },
[RST_FE2] = { 0x0c, BIT(2) },
@@ -266,6 +266,6 @@ static struct platform_driver sun9i_a80_de_clk_driver = {
};
module_platform_driver(sun9i_a80_de_clk_driver);
-MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_IMPORT_NS("SUNXI_CCU");
MODULE_DESCRIPTION("Support for the Allwinner A80 Display Engine CCU");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c b/drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c
index e5527c8cc64f..62063f525616 100644
--- a/drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c
+++ b/drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c
@@ -68,7 +68,7 @@ static struct clk_hw_onecell_data sun9i_a80_usb_hw_clks = {
.num = CLK_NUMBER,
};
-static struct ccu_reset_map sun9i_a80_usb_resets[] = {
+static const struct ccu_reset_map sun9i_a80_usb_resets[] = {
[RST_USB0_HCI] = { 0x0, BIT(17) },
[RST_USB1_HCI] = { 0x0, BIT(18) },
[RST_USB2_HCI] = { 0x0, BIT(19) },
@@ -138,6 +138,6 @@ static struct platform_driver sun9i_a80_usb_clk_driver = {
};
module_platform_driver(sun9i_a80_usb_clk_driver);
-MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_IMPORT_NS("SUNXI_CCU");
MODULE_DESCRIPTION("Support for the Allwinner A80 USB CCU");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun9i-a80.c b/drivers/clk/sunxi-ng/ccu-sun9i-a80.c
index 756dd8fca6b0..337751998005 100644
--- a/drivers/clk/sunxi-ng/ccu-sun9i-a80.c
+++ b/drivers/clk/sunxi-ng/ccu-sun9i-a80.c
@@ -1108,7 +1108,7 @@ static struct clk_hw_onecell_data sun9i_a80_hw_clks = {
.num = CLK_NUMBER,
};
-static struct ccu_reset_map sun9i_a80_ccu_resets[] = {
+static const struct ccu_reset_map sun9i_a80_ccu_resets[] = {
/* AHB0 reset controls */
[RST_BUS_FD] = { 0x5a0, BIT(0) },
[RST_BUS_VE] = { 0x5a0, BIT(1) },
@@ -1248,6 +1248,6 @@ static struct platform_driver sun9i_a80_ccu_driver = {
};
module_platform_driver(sun9i_a80_ccu_driver);
-MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_IMPORT_NS("SUNXI_CCU");
MODULE_DESCRIPTION("Support for the Allwinner A80 CCU");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c b/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c
index 52f1a04269f8..35935423145e 100644
--- a/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c
+++ b/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c
@@ -477,7 +477,7 @@ static struct clk_hw_onecell_data suniv_hw_clks = {
.num = CLK_NUMBER,
};
-static struct ccu_reset_map suniv_ccu_resets[] = {
+static const struct ccu_reset_map suniv_ccu_resets[] = {
[RST_USB_PHY0] = { 0x0cc, BIT(0) },
[RST_BUS_DMA] = { 0x2c0, BIT(6) },
@@ -577,6 +577,6 @@ static struct platform_driver suniv_f1c100s_ccu_driver = {
};
module_platform_driver(suniv_f1c100s_ccu_driver);
-MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_IMPORT_NS("SUNXI_CCU");
MODULE_DESCRIPTION("Support for the Allwinner newer F1C100s CCU");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu_common.c b/drivers/clk/sunxi-ng/ccu_common.c
index 4117b0bea267..c7e00f0c29a5 100644
--- a/drivers/clk/sunxi-ng/ccu_common.c
+++ b/drivers/clk/sunxi-ng/ccu_common.c
@@ -37,7 +37,7 @@ void ccu_helper_wait_for_lock(struct ccu_common *common, u32 lock)
WARN_ON(readl_relaxed_poll_timeout(addr, reg, reg & lock, 100, 70000));
}
-EXPORT_SYMBOL_NS_GPL(ccu_helper_wait_for_lock, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_helper_wait_for_lock, "SUNXI_CCU");
bool ccu_is_better_rate(struct ccu_common *common,
unsigned long target_rate,
@@ -59,14 +59,14 @@ bool ccu_is_better_rate(struct ccu_common *common,
return current_rate <= target_rate && current_rate > best_rate;
}
-EXPORT_SYMBOL_NS_GPL(ccu_is_better_rate, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_is_better_rate, "SUNXI_CCU");
/*
* This clock notifier is called when the frequency of a PLL clock is
* changed. In common PLL designs, changes to the dividers take effect
* almost immediately, while changes to the multipliers (implemented
* as dividers in the feedback loop) take a few cycles to work into
- * the feedback loop for the PLL to stablize.
+ * the feedback loop for the PLL to stabilize.
*
* Sometimes when the PLL clock rate is changed, the decrease in the
* divider is too much for the decrease in the multiplier to catch up.
@@ -107,7 +107,7 @@ int ccu_pll_notifier_register(struct ccu_pll_nb *pll_nb)
return clk_notifier_register(pll_nb->common->hw.clk,
&pll_nb->clk_nb);
}
-EXPORT_SYMBOL_NS_GPL(ccu_pll_notifier_register, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_pll_notifier_register, "SUNXI_CCU");
static int sunxi_ccu_probe(struct sunxi_ccu *ccu, struct device *dev,
struct device_node *node, void __iomem *reg,
@@ -234,7 +234,7 @@ int devm_sunxi_ccu_probe(struct device *dev, void __iomem *reg,
return 0;
}
-EXPORT_SYMBOL_NS_GPL(devm_sunxi_ccu_probe, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(devm_sunxi_ccu_probe, "SUNXI_CCU");
void of_sunxi_ccu_probe(struct device_node *node, void __iomem *reg,
const struct sunxi_ccu_desc *desc)
diff --git a/drivers/clk/sunxi-ng/ccu_common.h b/drivers/clk/sunxi-ng/ccu_common.h
index 329734f8cf42..bbec283b9d99 100644
--- a/drivers/clk/sunxi-ng/ccu_common.h
+++ b/drivers/clk/sunxi-ng/ccu_common.h
@@ -19,10 +19,15 @@
#define CCU_FEATURE_SIGMA_DELTA_MOD BIT(7)
#define CCU_FEATURE_KEY_FIELD BIT(8)
#define CCU_FEATURE_CLOSEST_RATE BIT(9)
+#define CCU_FEATURE_DUAL_DIV BIT(10)
+#define CCU_FEATURE_UPDATE_BIT BIT(11)
/* MMC timing mode switch bit */
#define CCU_MMC_NEW_TIMING_MODE BIT(30)
+/* Some clocks need this bit to actually apply register changes */
+#define CCU_SUNXI_UPDATE_BIT BIT(27)
+
struct device_node;
struct ccu_common {
@@ -50,7 +55,7 @@ struct sunxi_ccu_desc {
struct clk_hw_onecell_data *hw_clks;
- struct ccu_reset_map *resets;
+ const struct ccu_reset_map *resets;
unsigned long num_resets;
};
diff --git a/drivers/clk/sunxi-ng/ccu_div.c b/drivers/clk/sunxi-ng/ccu_div.c
index cb10a3ea23f9..916d6da6d8a3 100644
--- a/drivers/clk/sunxi-ng/ccu_div.c
+++ b/drivers/clk/sunxi-ng/ccu_div.c
@@ -106,6 +106,8 @@ static int ccu_div_set_rate(struct clk_hw *hw, unsigned long rate,
reg = readl(cd->common.base + cd->common.reg);
reg &= ~GENMASK(cd->div.width + cd->div.shift - 1, cd->div.shift);
+ if (cd->common.features & CCU_FEATURE_UPDATE_BIT)
+ reg |= CCU_SUNXI_UPDATE_BIT;
writel(reg | (val << cd->div.shift),
cd->common.base + cd->common.reg);
@@ -141,4 +143,4 @@ const struct clk_ops ccu_div_ops = {
.recalc_rate = ccu_div_recalc_rate,
.set_rate = ccu_div_set_rate,
};
-EXPORT_SYMBOL_NS_GPL(ccu_div_ops, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_div_ops, "SUNXI_CCU");
diff --git a/drivers/clk/sunxi-ng/ccu_div.h b/drivers/clk/sunxi-ng/ccu_div.h
index 90d49ee8e0cc..be00b3277e97 100644
--- a/drivers/clk/sunxi-ng/ccu_div.h
+++ b/drivers/clk/sunxi-ng/ccu_div.h
@@ -274,6 +274,24 @@ struct ccu_div {
SUNXI_CCU_M_HWS_WITH_GATE(_struct, _name, _parent, _reg, \
_mshift, _mwidth, 0, _flags)
+#define SUNXI_CCU_P_DATA_WITH_MUX_GATE(_struct, _name, _parents, _reg, \
+ _mshift, _mwidth, \
+ _muxshift, _muxwidth, \
+ _gate, _flags) \
+ struct ccu_div _struct = { \
+ .enable = _gate, \
+ .div = _SUNXI_CCU_DIV_FLAGS(_mshift, _mwidth, \
+ CLK_DIVIDER_POWER_OF_TWO), \
+ .mux = _SUNXI_CCU_MUX(_muxshift, _muxwidth), \
+ .common = { \
+ .reg = _reg, \
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(_name, \
+ _parents, \
+ &ccu_div_ops, \
+ _flags), \
+ }, \
+ }
+
static inline struct ccu_div *hw_to_ccu_div(struct clk_hw *hw)
{
struct ccu_common *common = hw_to_ccu_common(hw);
diff --git a/drivers/clk/sunxi-ng/ccu_frac.c b/drivers/clk/sunxi-ng/ccu_frac.c
index b31f3ad946d6..75323912608a 100644
--- a/drivers/clk/sunxi-ng/ccu_frac.c
+++ b/drivers/clk/sunxi-ng/ccu_frac.c
@@ -18,7 +18,7 @@ bool ccu_frac_helper_is_enabled(struct ccu_common *common,
return !(readl(common->base + common->reg) & cf->enable);
}
-EXPORT_SYMBOL_NS_GPL(ccu_frac_helper_is_enabled, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_frac_helper_is_enabled, "SUNXI_CCU");
void ccu_frac_helper_enable(struct ccu_common *common,
struct ccu_frac_internal *cf)
@@ -34,7 +34,7 @@ void ccu_frac_helper_enable(struct ccu_common *common,
writel(reg & ~cf->enable, common->base + common->reg);
spin_unlock_irqrestore(common->lock, flags);
}
-EXPORT_SYMBOL_NS_GPL(ccu_frac_helper_enable, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_frac_helper_enable, "SUNXI_CCU");
void ccu_frac_helper_disable(struct ccu_common *common,
struct ccu_frac_internal *cf)
@@ -50,7 +50,7 @@ void ccu_frac_helper_disable(struct ccu_common *common,
writel(reg | cf->enable, common->base + common->reg);
spin_unlock_irqrestore(common->lock, flags);
}
-EXPORT_SYMBOL_NS_GPL(ccu_frac_helper_disable, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_frac_helper_disable, "SUNXI_CCU");
bool ccu_frac_helper_has_rate(struct ccu_common *common,
struct ccu_frac_internal *cf,
@@ -61,7 +61,7 @@ bool ccu_frac_helper_has_rate(struct ccu_common *common,
return (cf->rates[0] == rate) || (cf->rates[1] == rate);
}
-EXPORT_SYMBOL_NS_GPL(ccu_frac_helper_has_rate, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_frac_helper_has_rate, "SUNXI_CCU");
unsigned long ccu_frac_helper_read_rate(struct ccu_common *common,
struct ccu_frac_internal *cf)
@@ -83,7 +83,7 @@ unsigned long ccu_frac_helper_read_rate(struct ccu_common *common,
return (reg & cf->select) ? cf->rates[1] : cf->rates[0];
}
-EXPORT_SYMBOL_NS_GPL(ccu_frac_helper_read_rate, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_frac_helper_read_rate, "SUNXI_CCU");
int ccu_frac_helper_set_rate(struct ccu_common *common,
struct ccu_frac_internal *cf,
@@ -112,4 +112,4 @@ int ccu_frac_helper_set_rate(struct ccu_common *common,
return 0;
}
-EXPORT_SYMBOL_NS_GPL(ccu_frac_helper_set_rate, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_frac_helper_set_rate, "SUNXI_CCU");
diff --git a/drivers/clk/sunxi-ng/ccu_gate.c b/drivers/clk/sunxi-ng/ccu_gate.c
index a2115a21807d..30673fe4e3c2 100644
--- a/drivers/clk/sunxi-ng/ccu_gate.c
+++ b/drivers/clk/sunxi-ng/ccu_gate.c
@@ -20,11 +20,13 @@ void ccu_gate_helper_disable(struct ccu_common *common, u32 gate)
spin_lock_irqsave(common->lock, flags);
reg = readl(common->base + common->reg);
+ if (common->features & CCU_FEATURE_UPDATE_BIT)
+ reg |= CCU_SUNXI_UPDATE_BIT;
writel(reg & ~gate, common->base + common->reg);
spin_unlock_irqrestore(common->lock, flags);
}
-EXPORT_SYMBOL_NS_GPL(ccu_gate_helper_disable, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_gate_helper_disable, "SUNXI_CCU");
static void ccu_gate_disable(struct clk_hw *hw)
{
@@ -44,13 +46,15 @@ int ccu_gate_helper_enable(struct ccu_common *common, u32 gate)
spin_lock_irqsave(common->lock, flags);
reg = readl(common->base + common->reg);
+ if (common->features & CCU_FEATURE_UPDATE_BIT)
+ reg |= CCU_SUNXI_UPDATE_BIT;
writel(reg | gate, common->base + common->reg);
spin_unlock_irqrestore(common->lock, flags);
return 0;
}
-EXPORT_SYMBOL_NS_GPL(ccu_gate_helper_enable, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_gate_helper_enable, "SUNXI_CCU");
static int ccu_gate_enable(struct clk_hw *hw)
{
@@ -66,7 +70,7 @@ int ccu_gate_helper_is_enabled(struct ccu_common *common, u32 gate)
return readl(common->base + common->reg) & gate;
}
-EXPORT_SYMBOL_NS_GPL(ccu_gate_helper_is_enabled, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_gate_helper_is_enabled, "SUNXI_CCU");
static int ccu_gate_is_enabled(struct clk_hw *hw)
{
@@ -87,8 +91,8 @@ static unsigned long ccu_gate_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long ccu_gate_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int ccu_gate_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct ccu_gate *cg = hw_to_ccu_gate(hw);
int div = 1;
@@ -97,14 +101,16 @@ static long ccu_gate_round_rate(struct clk_hw *hw, unsigned long rate,
div = cg->common.prediv;
if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) {
- unsigned long best_parent = rate;
+ unsigned long best_parent = req->rate;
if (cg->common.features & CCU_FEATURE_ALL_PREDIV)
best_parent *= div;
- *prate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
+ req->best_parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
}
- return *prate / div;
+ req->rate = req->best_parent_rate / div;
+
+ return 0;
}
static int ccu_gate_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -123,8 +129,8 @@ const struct clk_ops ccu_gate_ops = {
.disable = ccu_gate_disable,
.enable = ccu_gate_enable,
.is_enabled = ccu_gate_is_enabled,
- .round_rate = ccu_gate_round_rate,
+ .determine_rate = ccu_gate_determine_rate,
.set_rate = ccu_gate_set_rate,
.recalc_rate = ccu_gate_recalc_rate,
};
-EXPORT_SYMBOL_NS_GPL(ccu_gate_ops, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_gate_ops, "SUNXI_CCU");
diff --git a/drivers/clk/sunxi-ng/ccu_mp.c b/drivers/clk/sunxi-ng/ccu_mp.c
index cc94a694cb67..4221b1888b38 100644
--- a/drivers/clk/sunxi-ng/ccu_mp.c
+++ b/drivers/clk/sunxi-ng/ccu_mp.c
@@ -10,15 +10,23 @@
#include "ccu_gate.h"
#include "ccu_mp.h"
+static unsigned int next_div(unsigned int div, bool shift)
+{
+ if (shift)
+ return div << 1;
+ return div + 1;
+}
+
static unsigned long ccu_mp_find_best(unsigned long parent, unsigned long rate,
unsigned int max_m, unsigned int max_p,
+ bool shift,
unsigned int *m, unsigned int *p)
{
unsigned long best_rate = 0;
unsigned int best_m = 0, best_p = 0;
unsigned int _m, _p;
- for (_p = 1; _p <= max_p; _p <<= 1) {
+ for (_p = 1; _p <= max_p; _p = next_div(_p, shift)) {
for (_m = 1; _m <= max_m; _m++) {
unsigned long tmp_rate = parent / _p / _m;
@@ -43,7 +51,8 @@ static unsigned long ccu_mp_find_best_with_parent_adj(struct clk_hw *hw,
unsigned long *parent,
unsigned long rate,
unsigned int max_m,
- unsigned int max_p)
+ unsigned int max_p,
+ bool shift)
{
unsigned long parent_rate_saved;
unsigned long parent_rate, now;
@@ -60,7 +69,7 @@ static unsigned long ccu_mp_find_best_with_parent_adj(struct clk_hw *hw,
maxdiv = max_m * max_p;
maxdiv = min(ULONG_MAX / rate, maxdiv);
- for (_p = 1; _p <= max_p; _p <<= 1) {
+ for (_p = 1; _p <= max_p; _p = next_div(_p, shift)) {
for (_m = 1; _m <= max_m; _m++) {
div = _m * _p;
@@ -103,18 +112,26 @@ static unsigned long ccu_mp_round_rate(struct ccu_mux_internal *mux,
struct ccu_mp *cmp = data;
unsigned int max_m, max_p;
unsigned int m, p;
+ bool shift = true;
if (cmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
rate *= cmp->fixed_post_div;
+ if (cmp->common.features & CCU_FEATURE_DUAL_DIV)
+ shift = false;
+
max_m = cmp->m.max ?: 1 << cmp->m.width;
- max_p = cmp->p.max ?: 1 << ((1 << cmp->p.width) - 1);
+ if (shift)
+ max_p = cmp->p.max ?: 1 << ((1 << cmp->p.width) - 1);
+ else
+ max_p = cmp->p.max ?: 1 << cmp->p.width;
if (!clk_hw_can_set_rate_parent(&cmp->common.hw)) {
- rate = ccu_mp_find_best(*parent_rate, rate, max_m, max_p, &m, &p);
+ rate = ccu_mp_find_best(*parent_rate, rate, max_m, max_p, shift,
+ &m, &p);
} else {
rate = ccu_mp_find_best_with_parent_adj(hw, parent_rate, rate,
- max_m, max_p);
+ max_m, max_p, shift);
}
if (cmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
@@ -167,7 +184,11 @@ static unsigned long ccu_mp_recalc_rate(struct clk_hw *hw,
p = reg >> cmp->p.shift;
p &= (1 << cmp->p.width) - 1;
- rate = (parent_rate >> p) / m;
+ if (cmp->common.features & CCU_FEATURE_DUAL_DIV)
+ rate = (parent_rate / (p + cmp->p.offset)) / m;
+ else
+ rate = (parent_rate >> p) / m;
+
if (cmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
rate /= cmp->fixed_post_div;
@@ -190,20 +211,27 @@ static int ccu_mp_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long flags;
unsigned int max_m, max_p;
unsigned int m, p;
+ bool shift = true;
u32 reg;
+ if (cmp->common.features & CCU_FEATURE_DUAL_DIV)
+ shift = false;
+
/* Adjust parent_rate according to pre-dividers */
parent_rate = ccu_mux_helper_apply_prediv(&cmp->common, &cmp->mux, -1,
parent_rate);
max_m = cmp->m.max ?: 1 << cmp->m.width;
- max_p = cmp->p.max ?: 1 << ((1 << cmp->p.width) - 1);
+ if (shift)
+ max_p = cmp->p.max ?: 1 << ((1 << cmp->p.width) - 1);
+ else
+ max_p = cmp->p.max ?: 1 << cmp->p.width;
/* Adjust target rate according to post-dividers */
if (cmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
rate = rate * cmp->fixed_post_div;
- ccu_mp_find_best(parent_rate, rate, max_m, max_p, &m, &p);
+ ccu_mp_find_best(parent_rate, rate, max_m, max_p, shift, &m, &p);
spin_lock_irqsave(cmp->common.lock, flags);
@@ -211,7 +239,10 @@ static int ccu_mp_set_rate(struct clk_hw *hw, unsigned long rate,
reg &= ~GENMASK(cmp->m.width + cmp->m.shift - 1, cmp->m.shift);
reg &= ~GENMASK(cmp->p.width + cmp->p.shift - 1, cmp->p.shift);
reg |= (m - cmp->m.offset) << cmp->m.shift;
- reg |= ilog2(p) << cmp->p.shift;
+ if (shift)
+ reg |= ilog2(p) << cmp->p.shift;
+ else
+ reg |= (p - cmp->p.offset) << cmp->p.shift;
writel(reg, cmp->common.base + cmp->common.reg);
@@ -246,7 +277,7 @@ const struct clk_ops ccu_mp_ops = {
.recalc_rate = ccu_mp_recalc_rate,
.set_rate = ccu_mp_set_rate,
};
-EXPORT_SYMBOL_NS_GPL(ccu_mp_ops, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_mp_ops, "SUNXI_CCU");
/*
* Support for MMC timing mode switching
@@ -327,4 +358,4 @@ const struct clk_ops ccu_mp_mmc_ops = {
.recalc_rate = ccu_mp_mmc_recalc_rate,
.set_rate = ccu_mp_mmc_set_rate,
};
-EXPORT_SYMBOL_NS_GPL(ccu_mp_mmc_ops, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_mp_mmc_ops, "SUNXI_CCU");
diff --git a/drivers/clk/sunxi-ng/ccu_mp.h b/drivers/clk/sunxi-ng/ccu_mp.h
index 6e50f3728fb5..bb09c649bfa3 100644
--- a/drivers/clk/sunxi-ng/ccu_mp.h
+++ b/drivers/clk/sunxi-ng/ccu_mp.h
@@ -52,6 +52,28 @@ struct ccu_mp {
} \
}
+#define SUNXI_CCU_MP_DATA_WITH_MUX_GATE_POSTDIV(_struct, _name, _parents, \
+ _reg, \
+ _mshift, _mwidth, \
+ _pshift, _pwidth, \
+ _muxshift, _muxwidth, \
+ _gate, _postdiv, _flags)\
+ struct ccu_mp _struct = { \
+ .enable = _gate, \
+ .m = _SUNXI_CCU_DIV(_mshift, _mwidth), \
+ .p = _SUNXI_CCU_DIV(_pshift, _pwidth), \
+ .mux = _SUNXI_CCU_MUX(_muxshift, _muxwidth), \
+ .fixed_post_div = _postdiv, \
+ .common = { \
+ .reg = _reg, \
+ .features = CCU_FEATURE_FIXED_POSTDIV, \
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(_name, \
+ _parents, \
+ &ccu_mp_ops, \
+ _flags), \
+ } \
+ }
+
#define SUNXI_CCU_MP_WITH_MUX_GATE(_struct, _name, _parents, _reg, \
_mshift, _mwidth, \
_pshift, _pwidth, \
@@ -82,18 +104,22 @@ struct ccu_mp {
_muxshift, _muxwidth, \
0, _flags)
-#define SUNXI_CCU_MP_DATA_WITH_MUX_GATE(_struct, _name, _parents, _reg, \
- _mshift, _mwidth, \
- _pshift, _pwidth, \
- _muxshift, _muxwidth, \
- _gate, _flags) \
+#define SUNXI_CCU_MP_MUX_GATE_POSTDIV_DUALDIV(_struct, _name, _parents, _reg, \
+ _mshift, _mwidth, \
+ _pshift, _pwidth, \
+ _muxshift, _muxwidth, \
+ _gate, _postdiv, \
+ _flags) \
struct ccu_mp _struct = { \
.enable = _gate, \
.m = _SUNXI_CCU_DIV(_mshift, _mwidth), \
.p = _SUNXI_CCU_DIV(_pshift, _pwidth), \
.mux = _SUNXI_CCU_MUX(_muxshift, _muxwidth), \
+ .fixed_post_div = _postdiv, \
.common = { \
.reg = _reg, \
+ .features = CCU_FEATURE_FIXED_POSTDIV | \
+ CCU_FEATURE_DUAL_DIV, \
.hw.init = CLK_HW_INIT_PARENTS_DATA(_name, \
_parents, \
&ccu_mp_ops, \
@@ -101,6 +127,49 @@ struct ccu_mp {
} \
}
+#define SUNXI_CCU_MP_DATA_WITH_MUX_GATE_FEAT(_struct, _name, _parents, _reg, \
+ _mshift, _mwidth, \
+ _pshift, _pwidth, \
+ _muxshift, _muxwidth, \
+ _gate, _flags, _features) \
+ struct ccu_mp _struct = { \
+ .enable = _gate, \
+ .m = _SUNXI_CCU_DIV(_mshift, _mwidth), \
+ .p = _SUNXI_CCU_DIV(_pshift, _pwidth), \
+ .mux = _SUNXI_CCU_MUX(_muxshift, _muxwidth), \
+ .common = { \
+ .reg = _reg, \
+ .features = _features, \
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(_name, \
+ _parents, \
+ &ccu_mp_ops, \
+ _flags), \
+ } \
+ }
+
+#define SUNXI_CCU_MP_DATA_WITH_MUX_GATE(_struct, _name, _parents, _reg, \
+ _mshift, _mwidth, \
+ _pshift, _pwidth, \
+ _muxshift, _muxwidth, \
+ _gate, _flags) \
+ SUNXI_CCU_MP_DATA_WITH_MUX_GATE_FEAT(_struct, _name, _parents, \
+ _reg, _mshift, _mwidth, \
+ _pshift, _pwidth, \
+ _muxshift, _muxwidth, \
+ _gate, _flags, 0)
+
+#define SUNXI_CCU_DUALDIV_MUX_GATE(_struct, _name, _parents, _reg, \
+ _mshift, _mwidth, \
+ _pshift, _pwidth, \
+ _muxshift, _muxwidth, \
+ _gate, _flags) \
+ SUNXI_CCU_MP_DATA_WITH_MUX_GATE_FEAT(_struct, _name, _parents, \
+ _reg, _mshift, _mwidth, \
+ _pshift, _pwidth, \
+ _muxshift, _muxwidth, \
+ _gate, _flags, \
+ CCU_FEATURE_DUAL_DIV)
+
#define SUNXI_CCU_MP_DATA_WITH_MUX(_struct, _name, _parents, _reg, \
_mshift, _mwidth, \
_pshift, _pwidth, \
diff --git a/drivers/clk/sunxi-ng/ccu_mult.c b/drivers/clk/sunxi-ng/ccu_mult.c
index 7bee217ef111..8d5720f3dec1 100644
--- a/drivers/clk/sunxi-ng/ccu_mult.c
+++ b/drivers/clk/sunxi-ng/ccu_mult.c
@@ -170,4 +170,4 @@ const struct clk_ops ccu_mult_ops = {
.recalc_rate = ccu_mult_recalc_rate,
.set_rate = ccu_mult_set_rate,
};
-EXPORT_SYMBOL_NS_GPL(ccu_mult_ops, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_mult_ops, "SUNXI_CCU");
diff --git a/drivers/clk/sunxi-ng/ccu_mux.c b/drivers/clk/sunxi-ng/ccu_mux.c
index 5edc63b46651..74f9e98a5d35 100644
--- a/drivers/clk/sunxi-ng/ccu_mux.c
+++ b/drivers/clk/sunxi-ng/ccu_mux.c
@@ -66,7 +66,7 @@ unsigned long ccu_mux_helper_apply_prediv(struct ccu_common *common,
{
return parent_rate / ccu_mux_get_prediv(common, cm, parent_index);
}
-EXPORT_SYMBOL_NS_GPL(ccu_mux_helper_apply_prediv, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_mux_helper_apply_prediv, "SUNXI_CCU");
static unsigned long ccu_mux_helper_unapply_prediv(struct ccu_common *common,
struct ccu_mux_internal *cm,
@@ -155,7 +155,7 @@ out:
req->rate = best_rate;
return 0;
}
-EXPORT_SYMBOL_NS_GPL(ccu_mux_helper_determine_rate, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_mux_helper_determine_rate, "SUNXI_CCU");
u8 ccu_mux_helper_get_parent(struct ccu_common *common,
struct ccu_mux_internal *cm)
@@ -178,7 +178,7 @@ u8 ccu_mux_helper_get_parent(struct ccu_common *common,
return parent;
}
-EXPORT_SYMBOL_NS_GPL(ccu_mux_helper_get_parent, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_mux_helper_get_parent, "SUNXI_CCU");
int ccu_mux_helper_set_parent(struct ccu_common *common,
struct ccu_mux_internal *cm,
@@ -197,6 +197,8 @@ int ccu_mux_helper_set_parent(struct ccu_common *common,
/* The key field always reads as zero. */
if (common->features & CCU_FEATURE_KEY_FIELD)
reg |= CCU_MUX_KEY_VALUE;
+ if (common->features & CCU_FEATURE_UPDATE_BIT)
+ reg |= CCU_SUNXI_UPDATE_BIT;
reg &= ~GENMASK(cm->width + cm->shift - 1, cm->shift);
writel(reg | (index << cm->shift), common->base + common->reg);
@@ -205,7 +207,7 @@ int ccu_mux_helper_set_parent(struct ccu_common *common,
return 0;
}
-EXPORT_SYMBOL_NS_GPL(ccu_mux_helper_set_parent, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_mux_helper_set_parent, "SUNXI_CCU");
static void ccu_mux_disable(struct clk_hw *hw)
{
@@ -273,7 +275,7 @@ const struct clk_ops ccu_mux_ops = {
.determine_rate = ccu_mux_determine_rate,
.recalc_rate = ccu_mux_recalc_rate,
};
-EXPORT_SYMBOL_NS_GPL(ccu_mux_ops, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_mux_ops, "SUNXI_CCU");
/*
* This clock notifier is called when the frequency of the of the parent
@@ -308,4 +310,4 @@ int ccu_mux_notifier_register(struct clk *clk, struct ccu_mux_nb *mux_nb)
return clk_notifier_register(clk, &mux_nb->clk_nb);
}
-EXPORT_SYMBOL_NS_GPL(ccu_mux_notifier_register, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_mux_notifier_register, "SUNXI_CCU");
diff --git a/drivers/clk/sunxi-ng/ccu_nk.c b/drivers/clk/sunxi-ng/ccu_nk.c
index 8aa35d5804f3..5db748fbb5bd 100644
--- a/drivers/clk/sunxi-ng/ccu_nk.c
+++ b/drivers/clk/sunxi-ng/ccu_nk.c
@@ -92,26 +92,26 @@ static unsigned long ccu_nk_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long ccu_nk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int ccu_nk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct ccu_nk *nk = hw_to_ccu_nk(hw);
struct _ccu_nk _nk;
if (nk->common.features & CCU_FEATURE_FIXED_POSTDIV)
- rate *= nk->fixed_post_div;
+ req->rate *= nk->fixed_post_div;
_nk.min_n = nk->n.min ?: 1;
_nk.max_n = nk->n.max ?: 1 << nk->n.width;
_nk.min_k = nk->k.min ?: 1;
_nk.max_k = nk->k.max ?: 1 << nk->k.width;
- rate = ccu_nk_find_best(*parent_rate, rate, &_nk);
+ req->rate = ccu_nk_find_best(req->best_parent_rate, req->rate, &_nk);
if (nk->common.features & CCU_FEATURE_FIXED_POSTDIV)
- rate = rate / nk->fixed_post_div;
+ req->rate = req->rate / nk->fixed_post_div;
- return rate;
+ return 0;
}
static int ccu_nk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -155,7 +155,7 @@ const struct clk_ops ccu_nk_ops = {
.is_enabled = ccu_nk_is_enabled,
.recalc_rate = ccu_nk_recalc_rate,
- .round_rate = ccu_nk_round_rate,
+ .determine_rate = ccu_nk_determine_rate,
.set_rate = ccu_nk_set_rate,
};
-EXPORT_SYMBOL_NS_GPL(ccu_nk_ops, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_nk_ops, "SUNXI_CCU");
diff --git a/drivers/clk/sunxi-ng/ccu_nkm.c b/drivers/clk/sunxi-ng/ccu_nkm.c
index 1168d894d636..784eec9ac997 100644
--- a/drivers/clk/sunxi-ng/ccu_nkm.c
+++ b/drivers/clk/sunxi-ng/ccu_nkm.c
@@ -267,4 +267,4 @@ const struct clk_ops ccu_nkm_ops = {
.recalc_rate = ccu_nkm_recalc_rate,
.set_rate = ccu_nkm_set_rate,
};
-EXPORT_SYMBOL_NS_GPL(ccu_nkm_ops, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_nkm_ops, "SUNXI_CCU");
diff --git a/drivers/clk/sunxi-ng/ccu_nkmp.c b/drivers/clk/sunxi-ng/ccu_nkmp.c
index 99359a06892d..25efb5b37607 100644
--- a/drivers/clk/sunxi-ng/ccu_nkmp.c
+++ b/drivers/clk/sunxi-ng/ccu_nkmp.c
@@ -127,20 +127,20 @@ static unsigned long ccu_nkmp_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long ccu_nkmp_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int ccu_nkmp_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw);
struct _ccu_nkmp _nkmp;
if (nkmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
- rate *= nkmp->fixed_post_div;
+ req->rate *= nkmp->fixed_post_div;
- if (nkmp->max_rate && rate > nkmp->max_rate) {
- rate = nkmp->max_rate;
+ if (nkmp->max_rate && req->rate > nkmp->max_rate) {
+ req->rate = nkmp->max_rate;
if (nkmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
- rate /= nkmp->fixed_post_div;
- return rate;
+ req->rate /= nkmp->fixed_post_div;
+ return 0;
}
_nkmp.min_n = nkmp->n.min ?: 1;
@@ -152,12 +152,13 @@ static long ccu_nkmp_round_rate(struct clk_hw *hw, unsigned long rate,
_nkmp.min_p = 1;
_nkmp.max_p = nkmp->p.max ?: 1 << ((1 << nkmp->p.width) - 1);
- rate = ccu_nkmp_find_best(*parent_rate, rate, &_nkmp);
+ req->rate = ccu_nkmp_find_best(req->best_parent_rate, req->rate,
+ &_nkmp);
if (nkmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
- rate = rate / nkmp->fixed_post_div;
+ req->rate = req->rate / nkmp->fixed_post_div;
- return rate;
+ return 0;
}
static int ccu_nkmp_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -227,7 +228,7 @@ const struct clk_ops ccu_nkmp_ops = {
.is_enabled = ccu_nkmp_is_enabled,
.recalc_rate = ccu_nkmp_recalc_rate,
- .round_rate = ccu_nkmp_round_rate,
+ .determine_rate = ccu_nkmp_determine_rate,
.set_rate = ccu_nkmp_set_rate,
};
-EXPORT_SYMBOL_NS_GPL(ccu_nkmp_ops, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_nkmp_ops, "SUNXI_CCU");
diff --git a/drivers/clk/sunxi-ng/ccu_nm.c b/drivers/clk/sunxi-ng/ccu_nm.c
index ffac3deb89d6..df01ed3b37a6 100644
--- a/drivers/clk/sunxi-ng/ccu_nm.c
+++ b/drivers/clk/sunxi-ng/ccu_nm.c
@@ -116,39 +116,39 @@ static unsigned long ccu_nm_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long ccu_nm_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int ccu_nm_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct ccu_nm *nm = hw_to_ccu_nm(hw);
struct _ccu_nm _nm;
if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
- rate *= nm->fixed_post_div;
+ req->rate *= nm->fixed_post_div;
- if (rate < nm->min_rate) {
- rate = nm->min_rate;
+ if (req->rate < nm->min_rate) {
+ req->rate = nm->min_rate;
if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
- rate /= nm->fixed_post_div;
- return rate;
+ req->rate /= nm->fixed_post_div;
+ return 0;
}
- if (nm->max_rate && rate > nm->max_rate) {
- rate = nm->max_rate;
+ if (nm->max_rate && req->rate > nm->max_rate) {
+ req->rate = nm->max_rate;
if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
- rate /= nm->fixed_post_div;
- return rate;
+ req->rate /= nm->fixed_post_div;
+ return 0;
}
- if (ccu_frac_helper_has_rate(&nm->common, &nm->frac, rate)) {
+ if (ccu_frac_helper_has_rate(&nm->common, &nm->frac, req->rate)) {
if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
- rate /= nm->fixed_post_div;
- return rate;
+ req->rate /= nm->fixed_post_div;
+ return 0;
}
- if (ccu_sdm_helper_has_rate(&nm->common, &nm->sdm, rate)) {
+ if (ccu_sdm_helper_has_rate(&nm->common, &nm->sdm, req->rate)) {
if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
- rate /= nm->fixed_post_div;
- return rate;
+ req->rate /= nm->fixed_post_div;
+ return 0;
}
_nm.min_n = nm->n.min ?: 1;
@@ -156,12 +156,13 @@ static long ccu_nm_round_rate(struct clk_hw *hw, unsigned long rate,
_nm.min_m = 1;
_nm.max_m = nm->m.max ?: 1 << nm->m.width;
- rate = ccu_nm_find_best(&nm->common, *parent_rate, rate, &_nm);
+ req->rate = ccu_nm_find_best(&nm->common, req->best_parent_rate,
+ req->rate, &_nm);
if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
- rate /= nm->fixed_post_div;
+ req->rate /= nm->fixed_post_div;
- return rate;
+ return 0;
}
static int ccu_nm_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -233,7 +234,7 @@ const struct clk_ops ccu_nm_ops = {
.is_enabled = ccu_nm_is_enabled,
.recalc_rate = ccu_nm_recalc_rate,
- .round_rate = ccu_nm_round_rate,
+ .determine_rate = ccu_nm_determine_rate,
.set_rate = ccu_nm_set_rate,
};
-EXPORT_SYMBOL_NS_GPL(ccu_nm_ops, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_nm_ops, "SUNXI_CCU");
diff --git a/drivers/clk/sunxi-ng/ccu_phase.c b/drivers/clk/sunxi-ng/ccu_phase.c
index e4cae2afe9db..ca43cf448666 100644
--- a/drivers/clk/sunxi-ng/ccu_phase.c
+++ b/drivers/clk/sunxi-ng/ccu_phase.c
@@ -121,4 +121,4 @@ const struct clk_ops ccu_phase_ops = {
.get_phase = ccu_phase_get_phase,
.set_phase = ccu_phase_set_phase,
};
-EXPORT_SYMBOL_NS_GPL(ccu_phase_ops, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_phase_ops, "SUNXI_CCU");
diff --git a/drivers/clk/sunxi-ng/ccu_reset.c b/drivers/clk/sunxi-ng/ccu_reset.c
index 6577aa18cb01..55bc7c7cda0f 100644
--- a/drivers/clk/sunxi-ng/ccu_reset.c
+++ b/drivers/clk/sunxi-ng/ccu_reset.c
@@ -75,4 +75,4 @@ const struct reset_control_ops ccu_reset_ops = {
.reset = ccu_reset_reset,
.status = ccu_reset_status,
};
-EXPORT_SYMBOL_NS_GPL(ccu_reset_ops, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_reset_ops, "SUNXI_CCU");
diff --git a/drivers/clk/sunxi-ng/ccu_reset.h b/drivers/clk/sunxi-ng/ccu_reset.h
index e9b973cae4af..941276a8ec2e 100644
--- a/drivers/clk/sunxi-ng/ccu_reset.h
+++ b/drivers/clk/sunxi-ng/ccu_reset.h
@@ -17,7 +17,7 @@ struct ccu_reset_map {
struct ccu_reset {
void __iomem *base;
- struct ccu_reset_map *reset_map;
+ const struct ccu_reset_map *reset_map;
spinlock_t *lock;
struct reset_controller_dev rcdev;
diff --git a/drivers/clk/sunxi-ng/ccu_sdm.c b/drivers/clk/sunxi-ng/ccu_sdm.c
index 41937ed0766d..c564e5f9e610 100644
--- a/drivers/clk/sunxi-ng/ccu_sdm.c
+++ b/drivers/clk/sunxi-ng/ccu_sdm.c
@@ -20,7 +20,7 @@ bool ccu_sdm_helper_is_enabled(struct ccu_common *common,
return !!(readl(common->base + sdm->tuning_reg) & sdm->tuning_enable);
}
-EXPORT_SYMBOL_NS_GPL(ccu_sdm_helper_is_enabled, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_sdm_helper_is_enabled, "SUNXI_CCU");
void ccu_sdm_helper_enable(struct ccu_common *common,
struct ccu_sdm_internal *sdm,
@@ -50,7 +50,7 @@ void ccu_sdm_helper_enable(struct ccu_common *common,
writel(reg | sdm->enable, common->base + common->reg);
spin_unlock_irqrestore(common->lock, flags);
}
-EXPORT_SYMBOL_NS_GPL(ccu_sdm_helper_enable, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_sdm_helper_enable, "SUNXI_CCU");
void ccu_sdm_helper_disable(struct ccu_common *common,
struct ccu_sdm_internal *sdm)
@@ -71,7 +71,7 @@ void ccu_sdm_helper_disable(struct ccu_common *common,
writel(reg & ~sdm->tuning_enable, common->base + sdm->tuning_reg);
spin_unlock_irqrestore(common->lock, flags);
}
-EXPORT_SYMBOL_NS_GPL(ccu_sdm_helper_disable, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_sdm_helper_disable, "SUNXI_CCU");
/*
* Sigma delta modulation provides a way to do fractional-N frequency
@@ -105,7 +105,7 @@ bool ccu_sdm_helper_has_rate(struct ccu_common *common,
return false;
}
-EXPORT_SYMBOL_NS_GPL(ccu_sdm_helper_has_rate, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_sdm_helper_has_rate, "SUNXI_CCU");
unsigned long ccu_sdm_helper_read_rate(struct ccu_common *common,
struct ccu_sdm_internal *sdm,
@@ -136,7 +136,7 @@ unsigned long ccu_sdm_helper_read_rate(struct ccu_common *common,
/* We can't calculate the effective clock rate, so just fail. */
return 0;
}
-EXPORT_SYMBOL_NS_GPL(ccu_sdm_helper_read_rate, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_sdm_helper_read_rate, "SUNXI_CCU");
int ccu_sdm_helper_get_factors(struct ccu_common *common,
struct ccu_sdm_internal *sdm,
@@ -158,4 +158,4 @@ int ccu_sdm_helper_get_factors(struct ccu_common *common,
/* nothing found */
return -EINVAL;
}
-EXPORT_SYMBOL_NS_GPL(ccu_sdm_helper_get_factors, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_sdm_helper_get_factors, "SUNXI_CCU");
diff --git a/drivers/clk/sunxi/Kconfig b/drivers/clk/sunxi/Kconfig
index 1c4e543366dd..5e2f92bfe412 100644
--- a/drivers/clk/sunxi/Kconfig
+++ b/drivers/clk/sunxi/Kconfig
@@ -2,13 +2,13 @@
menuconfig CLK_SUNXI
bool "Legacy clock support for Allwinner SoCs"
depends on (ARM && ARCH_SUNXI) || COMPILE_TEST
- default y
+ default (ARM && ARCH_SUNXI)
if CLK_SUNXI
config CLK_SUNXI_CLOCKS
bool "Legacy clock drivers"
- default y
+ default ARCH_SUNXI
help
Legacy clock drivers being used on older (A10, A13, A20,
A23, A31, A80) SoCs. These drivers are kept around for
@@ -19,14 +19,14 @@ config CLK_SUNXI_CLOCKS
config CLK_SUNXI_PRCM_SUN6I
bool "Legacy A31 PRCM driver"
- default y
+ default ARCH_SUNXI
help
Legacy clock driver for the A31 PRCM clocks. Those are
usually needed for the PMIC communication, mostly.
config CLK_SUNXI_PRCM_SUN8I
bool "Legacy sun8i PRCM driver"
- default y
+ default ARCH_SUNXI
help
Legacy clock driver for the sun8i family PRCM clocks.
Those are usually needed for the PMIC communication,
@@ -34,7 +34,7 @@ config CLK_SUNXI_PRCM_SUN8I
config CLK_SUNXI_PRCM_SUN9I
bool "Legacy A80 PRCM driver"
- default y
+ default ARCH_SUNXI
help
Legacy clock driver for the A80 PRCM clocks. Those are
usually needed for the PMIC communication, mostly.
diff --git a/drivers/clk/tegra/Kconfig b/drivers/clk/tegra/Kconfig
index 90df619dc087..62147a069606 100644
--- a/drivers/clk/tegra/Kconfig
+++ b/drivers/clk/tegra/Kconfig
@@ -4,7 +4,7 @@ config CLK_TEGRA_BPMP
depends on TEGRA_BPMP
config TEGRA_CLK_DFLL
- depends on ARCH_TEGRA_124_SOC || ARCH_TEGRA_210_SOC
+ depends on ARCH_TEGRA_114_SOC || ARCH_TEGRA_124_SOC || ARCH_TEGRA_210_SOC
select PM_OPP
def_bool y
diff --git a/drivers/clk/tegra/clk-audio-sync.c b/drivers/clk/tegra/clk-audio-sync.c
index 2c4bb96eae16..468a4403f147 100644
--- a/drivers/clk/tegra/clk-audio-sync.c
+++ b/drivers/clk/tegra/clk-audio-sync.c
@@ -17,15 +17,15 @@ static unsigned long clk_sync_source_recalc_rate(struct clk_hw *hw,
return sync->rate;
}
-static long clk_sync_source_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_sync_source_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct tegra_clk_sync_source *sync = to_clk_sync_source(hw);
- if (rate > sync->max_rate)
+ if (req->rate > sync->max_rate)
return -EINVAL;
else
- return rate;
+ return 0;
}
static int clk_sync_source_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -38,7 +38,7 @@ static int clk_sync_source_set_rate(struct clk_hw *hw, unsigned long rate,
}
const struct clk_ops tegra_clk_sync_source_ops = {
- .round_rate = clk_sync_source_round_rate,
+ .determine_rate = clk_sync_source_determine_rate,
.set_rate = clk_sync_source_set_rate,
.recalc_rate = clk_sync_source_recalc_rate,
};
diff --git a/drivers/clk/tegra/clk-bpmp.c b/drivers/clk/tegra/clk-bpmp.c
index 7bfba0afd778..77a2586dbe00 100644
--- a/drivers/clk/tegra/clk-bpmp.c
+++ b/drivers/clk/tegra/clk-bpmp.c
@@ -174,7 +174,7 @@ static int tegra_bpmp_clk_determine_rate(struct clk_hw *hw,
unsigned long rate;
int err;
- rate = min(max(rate_req->rate, rate_req->min_rate), rate_req->max_rate);
+ rate = clamp(rate_req->rate, rate_req->min_rate, rate_req->max_rate);
memset(&request, 0, sizeof(request));
request.rate = min_t(u64, rate, S64_MAX);
@@ -635,7 +635,7 @@ static int tegra_bpmp_register_clocks(struct tegra_bpmp *bpmp,
bpmp->num_clocks = count;
- bpmp->clocks = devm_kcalloc(bpmp->dev, count, sizeof(struct tegra_bpmp_clk), GFP_KERNEL);
+ bpmp->clocks = devm_kcalloc(bpmp->dev, count, sizeof(*bpmp->clocks), GFP_KERNEL);
if (!bpmp->clocks)
return -ENOMEM;
diff --git a/drivers/clk/tegra/clk-dfll.c b/drivers/clk/tegra/clk-dfll.c
index 58fa5a59e0c7..22dc29432eff 100644
--- a/drivers/clk/tegra/clk-dfll.c
+++ b/drivers/clk/tegra/clk-dfll.c
@@ -882,7 +882,7 @@ static void dfll_set_frequency_request(struct tegra_dfll *td,
{
u32 val = 0;
int force_val;
- int coef = 128; /* FIXME: td->cg_scale? */;
+ int coef = 128; /* FIXME: td->cg_scale? */
force_val = (req->lut_index - td->lut_safe) * coef / td->cg;
force_val = clamp(force_val, FORCE_MIN, FORCE_MAX);
diff --git a/drivers/clk/tegra/clk-divider.c b/drivers/clk/tegra/clk-divider.c
index 38daf483ddf1..37439fcb3ac0 100644
--- a/drivers/clk/tegra/clk-divider.c
+++ b/drivers/clk/tegra/clk-divider.c
@@ -58,23 +58,31 @@ static unsigned long clk_frac_div_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long clk_frac_div_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_frac_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct tegra_clk_frac_div *divider = to_clk_frac_div(hw);
int div, mul;
- unsigned long output_rate = *prate;
+ unsigned long output_rate = req->best_parent_rate;
- if (!rate)
- return output_rate;
+ if (!req->rate) {
+ req->rate = output_rate;
- div = get_div(divider, rate, output_rate);
- if (div < 0)
- return *prate;
+ return 0;
+ }
+
+ div = get_div(divider, req->rate, output_rate);
+ if (div < 0) {
+ req->rate = req->best_parent_rate;
+
+ return 0;
+ }
mul = get_mul(divider);
- return DIV_ROUND_UP(output_rate * mul, div + mul);
+ req->rate = DIV_ROUND_UP(output_rate * mul, div + mul);
+
+ return 0;
}
static int clk_frac_div_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -127,7 +135,7 @@ static void clk_divider_restore_context(struct clk_hw *hw)
const struct clk_ops tegra_clk_frac_div_ops = {
.recalc_rate = clk_frac_div_recalc_rate,
.set_rate = clk_frac_div_set_rate,
- .round_rate = clk_frac_div_round_rate,
+ .determine_rate = clk_frac_div_determine_rate,
.restore_context = clk_divider_restore_context,
};
diff --git a/drivers/clk/tegra/clk-periph.c b/drivers/clk/tegra/clk-periph.c
index 0626650a7011..6ebeaa7cb656 100644
--- a/drivers/clk/tegra/clk-periph.c
+++ b/drivers/clk/tegra/clk-periph.c
@@ -51,16 +51,10 @@ static int clk_periph_determine_rate(struct clk_hw *hw,
struct tegra_clk_periph *periph = to_clk_periph(hw);
const struct clk_ops *div_ops = periph->div_ops;
struct clk_hw *div_hw = &periph->divider.hw;
- unsigned long rate;
__clk_hw_set_clk(div_hw, hw);
- rate = div_ops->round_rate(div_hw, req->rate, &req->best_parent_rate);
- if (rate < 0)
- return rate;
-
- req->rate = rate;
- return 0;
+ return div_ops->determine_rate(div_hw, req);
}
static int clk_periph_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -132,7 +126,7 @@ static void clk_periph_restore_context(struct clk_hw *hw)
clk_periph_set_parent(hw, parent_id);
}
-const struct clk_ops tegra_clk_periph_ops = {
+static const struct clk_ops tegra_clk_periph_ops = {
.get_parent = clk_periph_get_parent,
.set_parent = clk_periph_set_parent,
.recalc_rate = clk_periph_recalc_rate,
diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
index 100b5d9b7e26..591b9f0c155a 100644
--- a/drivers/clk/tegra/clk-pll.c
+++ b/drivers/clk/tegra/clk-pll.c
@@ -840,8 +840,8 @@ static int clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
return ret;
}
-static long clk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct tegra_clk_pll *pll = to_clk_pll(hw);
struct tegra_clk_pll_freq_table cfg;
@@ -849,15 +849,20 @@ static long clk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
if (pll->params->flags & TEGRA_PLL_FIXED) {
/* PLLM/MB are used for memory; we do not change rate */
if (pll->params->flags & (TEGRA_PLLM | TEGRA_PLLMB))
- return clk_hw_get_rate(hw);
- return pll->params->fixed_rate;
+ req->rate = clk_hw_get_rate(hw);
+ else
+ req->rate = pll->params->fixed_rate;
+
+ return 0;
}
- if (_get_table_rate(hw, &cfg, rate, *prate) &&
- pll->params->calc_rate(hw, &cfg, rate, *prate))
+ if (_get_table_rate(hw, &cfg, req->rate, req->best_parent_rate) &&
+ pll->params->calc_rate(hw, &cfg, req->rate, req->best_parent_rate))
return -EINVAL;
- return cfg.output_rate;
+ req->rate = cfg.output_rate;
+
+ return 0;
}
static unsigned long clk_pll_recalc_rate(struct clk_hw *hw,
@@ -1057,7 +1062,7 @@ const struct clk_ops tegra_clk_pll_ops = {
.enable = clk_pll_enable,
.disable = clk_pll_disable,
.recalc_rate = clk_pll_recalc_rate,
- .round_rate = clk_pll_round_rate,
+ .determine_rate = clk_pll_determine_rate,
.set_rate = clk_pll_set_rate,
.restore_context = tegra_clk_pll_restore_context,
};
@@ -1195,7 +1200,7 @@ static const struct clk_ops tegra_clk_pllu_ops = {
.enable = clk_pllu_enable,
.disable = clk_pll_disable,
.recalc_rate = clk_pll_recalc_rate,
- .round_rate = clk_pll_round_rate,
+ .determine_rate = clk_pll_determine_rate,
.set_rate = clk_pll_set_rate,
};
@@ -1353,15 +1358,15 @@ static int clk_pllxc_set_rate(struct clk_hw *hw, unsigned long rate,
return ret;
}
-static long clk_pll_ramp_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_pll_ramp_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct tegra_clk_pll *pll = to_clk_pll(hw);
struct tegra_clk_pll_freq_table cfg;
int ret, p_div;
- u64 output_rate = *prate;
+ u64 output_rate = req->best_parent_rate;
- ret = _pll_ramp_calc_pll(hw, &cfg, rate, *prate);
+ ret = _pll_ramp_calc_pll(hw, &cfg, req->rate, req->best_parent_rate);
if (ret < 0)
return ret;
@@ -1375,7 +1380,9 @@ static long clk_pll_ramp_round_rate(struct clk_hw *hw, unsigned long rate,
output_rate *= cfg.n;
do_div(output_rate, cfg.m * p_div);
- return output_rate;
+ req->rate = output_rate;
+
+ return 0;
}
static void _pllcx_strobe(struct tegra_clk_pll *pll)
@@ -1598,12 +1605,15 @@ static unsigned long clk_pllre_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long clk_pllre_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_pllre_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct tegra_clk_pll *pll = to_clk_pll(hw);
- return _pllre_calc_rate(pll, NULL, rate, *prate);
+ req->rate = _pllre_calc_rate(pll, NULL, req->rate,
+ req->best_parent_rate);
+
+ return 0;
}
static int clk_plle_tegra114_enable(struct clk_hw *hw)
@@ -2003,7 +2013,7 @@ static const struct clk_ops tegra_clk_pllxc_ops = {
.enable = clk_pll_enable,
.disable = clk_pll_disable,
.recalc_rate = clk_pll_recalc_rate,
- .round_rate = clk_pll_ramp_round_rate,
+ .determine_rate = clk_pll_ramp_determine_rate,
.set_rate = clk_pllxc_set_rate,
};
@@ -2012,7 +2022,7 @@ static const struct clk_ops tegra_clk_pllc_ops = {
.enable = clk_pllc_enable,
.disable = clk_pllc_disable,
.recalc_rate = clk_pll_recalc_rate,
- .round_rate = clk_pll_ramp_round_rate,
+ .determine_rate = clk_pll_ramp_determine_rate,
.set_rate = clk_pllc_set_rate,
};
@@ -2021,7 +2031,7 @@ static const struct clk_ops tegra_clk_pllre_ops = {
.enable = clk_pll_enable,
.disable = clk_pll_disable,
.recalc_rate = clk_pllre_recalc_rate,
- .round_rate = clk_pllre_round_rate,
+ .determine_rate = clk_pllre_determine_rate,
.set_rate = clk_pllre_set_rate,
};
@@ -2321,7 +2331,7 @@ static const struct clk_ops tegra_clk_pllss_ops = {
.enable = clk_pll_enable,
.disable = clk_pll_disable,
.recalc_rate = clk_pll_recalc_rate,
- .round_rate = clk_pll_ramp_round_rate,
+ .determine_rate = clk_pll_ramp_determine_rate,
.set_rate = clk_pllxc_set_rate,
.restore_context = tegra_clk_pll_restore_context,
};
diff --git a/drivers/clk/tegra/clk-super.c b/drivers/clk/tegra/clk-super.c
index 7ec47942720c..51fb356e770e 100644
--- a/drivers/clk/tegra/clk-super.c
+++ b/drivers/clk/tegra/clk-super.c
@@ -147,17 +147,10 @@ static int clk_super_determine_rate(struct clk_hw *hw,
{
struct tegra_clk_super_mux *super = to_clk_super_mux(hw);
struct clk_hw *div_hw = &super->frac_div.hw;
- unsigned long rate;
__clk_hw_set_clk(div_hw, hw);
- rate = super->div_ops->round_rate(div_hw, req->rate,
- &req->best_parent_rate);
- if (rate < 0)
- return rate;
-
- req->rate = rate;
- return 0;
+ return super->div_ops->determine_rate(div_hw, req);
}
static unsigned long clk_super_recalc_rate(struct clk_hw *hw,
diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c
index 73303458e886..6c8e053311c3 100644
--- a/drivers/clk/tegra/clk-tegra114.c
+++ b/drivers/clk/tegra/clk-tegra114.c
@@ -11,6 +11,7 @@
#include <linux/export.h>
#include <linux/clk/tegra.h>
#include <dt-bindings/clock/tegra114-car.h>
+#include <dt-bindings/reset/nvidia,tegra114-car.h>
#include "clk.h"
#include "clk-id.h"
@@ -1272,7 +1273,7 @@ EXPORT_SYMBOL(tegra114_clock_tune_cpu_trimmers_init);
*
* Assert the reset line of the DFLL's DVCO. No return value.
*/
-void tegra114_clock_assert_dfll_dvco_reset(void)
+static void tegra114_clock_assert_dfll_dvco_reset(void)
{
u32 v;
@@ -1281,7 +1282,6 @@ void tegra114_clock_assert_dfll_dvco_reset(void)
writel_relaxed(v, clk_base + RST_DFLL_DVCO);
tegra114_car_barrier();
}
-EXPORT_SYMBOL(tegra114_clock_assert_dfll_dvco_reset);
/**
* tegra114_clock_deassert_dfll_dvco_reset - deassert the DFLL's DVCO reset
@@ -1289,7 +1289,7 @@ EXPORT_SYMBOL(tegra114_clock_assert_dfll_dvco_reset);
* Deassert the reset line of the DFLL's DVCO, allowing the DVCO to
* operate. No return value.
*/
-void tegra114_clock_deassert_dfll_dvco_reset(void)
+static void tegra114_clock_deassert_dfll_dvco_reset(void)
{
u32 v;
@@ -1298,7 +1298,26 @@ void tegra114_clock_deassert_dfll_dvco_reset(void)
writel_relaxed(v, clk_base + RST_DFLL_DVCO);
tegra114_car_barrier();
}
-EXPORT_SYMBOL(tegra114_clock_deassert_dfll_dvco_reset);
+
+static int tegra114_reset_assert(unsigned long id)
+{
+ if (id == TEGRA114_RST_DFLL_DVCO)
+ tegra114_clock_assert_dfll_dvco_reset();
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int tegra114_reset_deassert(unsigned long id)
+{
+ if (id == TEGRA114_RST_DFLL_DVCO)
+ tegra114_clock_deassert_dfll_dvco_reset();
+ else
+ return -EINVAL;
+
+ return 0;
+}
static void __init tegra114_clock_init(struct device_node *np)
{
@@ -1344,6 +1363,9 @@ static void __init tegra114_clock_init(struct device_node *np)
tegra_super_clk_gen4_init(clk_base, pmc_base, tegra114_clks,
&pll_x_params);
+ tegra_init_special_resets(1, tegra114_reset_assert,
+ tegra114_reset_deassert);
+
tegra_add_of_provider(np, of_clk_src_onecell_get);
tegra_register_devclks(devclks, ARRAY_SIZE(devclks));
diff --git a/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c b/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
index 0251618b82c8..457a77c5bb62 100644
--- a/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
+++ b/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
@@ -29,6 +29,99 @@ struct dfll_fcpu_data {
};
/* Maximum CPU frequency, indexed by CPU speedo id */
+static const unsigned long tegra114_cpu_max_freq_table[] = {
+ [0] = 2040000000UL,
+ [1] = 1810500000UL,
+ [2] = 1912500000UL,
+ [3] = 1810500000UL,
+};
+
+#define T114_CPU_CVB_TABLE \
+ .min_millivolts = 1000, \
+ .max_millivolts = 1320, \
+ .speedo_scale = 100, \
+ .voltage_scale = 1000, \
+ .entries = { \
+ { 306000000UL, { 2190643, -141851, 3576 } }, \
+ { 408000000UL, { 2250968, -144331, 3576 } }, \
+ { 510000000UL, { 2313333, -146811, 3576 } }, \
+ { 612000000UL, { 2377738, -149291, 3576 } }, \
+ { 714000000UL, { 2444183, -151771, 3576 } }, \
+ { 816000000UL, { 2512669, -154251, 3576 } }, \
+ { 918000000UL, { 2583194, -156731, 3576 } }, \
+ { 1020000000UL, { 2655759, -159211, 3576 } }, \
+ { 1122000000UL, { 2730365, -161691, 3576 } }, \
+ { 1224000000UL, { 2807010, -164171, 3576 } }, \
+ { 1326000000UL, { 2885696, -166651, 3576 } }, \
+ { 1428000000UL, { 2966422, -169131, 3576 } }, \
+ { 1530000000UL, { 3049183, -171601, 3576 } }, \
+ { 1606500000UL, { 3112179, -173451, 3576 } }, \
+ { 1708500000UL, { 3198504, -175931, 3576 } }, \
+ { 1810500000UL, { 3304747, -179126, 3576 } }, \
+ { 1912500000UL, { 3395401, -181606, 3576 } }, \
+ { 0UL, { 0, 0, 0 } }, \
+ }, \
+ .cpu_dfll_data = { \
+ .tune0_low = 0x00b0039d, \
+ .tune0_high = 0x00b0009d, \
+ .tune1 = 0x0000001f, \
+ .tune_high_min_millivolts = 1050, \
+ }
+
+static const struct cvb_table tegra114_cpu_cvb_tables[] = {
+ {
+ .speedo_id = 0,
+ .process_id = -1,
+ .min_millivolts = 1000,
+ .max_millivolts = 1250,
+ .speedo_scale = 100,
+ .voltage_scale = 100,
+ .entries = {
+ { 306000000UL, { 107330, -1569, 0 } },
+ { 408000000UL, { 111250, -1666, 0 } },
+ { 510000000UL, { 110000, -1460, 0 } },
+ { 612000000UL, { 117290, -1745, 0 } },
+ { 714000000UL, { 122700, -1910, 0 } },
+ { 816000000UL, { 125620, -1945, 0 } },
+ { 918000000UL, { 130560, -2076, 0 } },
+ { 1020000000UL, { 137280, -2303, 0 } },
+ { 1122000000UL, { 146440, -2660, 0 } },
+ { 1224000000UL, { 152190, -2825, 0 } },
+ { 1326000000UL, { 157520, -2953, 0 } },
+ { 1428000000UL, { 166100, -3261, 0 } },
+ { 1530000000UL, { 176410, -3647, 0 } },
+ { 1632000000UL, { 189620, -4186, 0 } },
+ { 1734000000UL, { 203190, -4725, 0 } },
+ { 1836000000UL, { 222670, -5573, 0 } },
+ { 1938000000UL, { 256210, -7165, 0 } },
+ { 2040000000UL, { 250050, -6544, 0 } },
+ { 0UL, { 0, 0, 0 } },
+ },
+ .cpu_dfll_data = {
+ .tune0_low = 0x00b0019d,
+ .tune0_high = 0x00b0019d,
+ .tune1 = 0x0000001f,
+ .tune_high_min_millivolts = 1000,
+ }
+ },
+ {
+ .speedo_id = 1,
+ .process_id = -1,
+ T114_CPU_CVB_TABLE
+ },
+ {
+ .speedo_id = 2,
+ .process_id = -1,
+ T114_CPU_CVB_TABLE
+ },
+ {
+ .speedo_id = 3,
+ .process_id = -1,
+ T114_CPU_CVB_TABLE
+ },
+};
+
+/* Maximum CPU frequency, indexed by CPU speedo id */
static const unsigned long tegra124_cpu_max_freq_table[] = {
[0] = 2014500000UL,
[1] = 2320500000UL,
@@ -93,7 +186,7 @@ static const unsigned long tegra210_cpu_max_freq_table[] = {
[10] = 1504500000UL,
};
-#define CPU_CVB_TABLE \
+#define TEGRA210_CPU_CVB_TABLE \
.speedo_scale = 100, \
.voltage_scale = 1000, \
.entries = { \
@@ -120,7 +213,7 @@ static const unsigned long tegra210_cpu_max_freq_table[] = {
{ 0UL, { 0, 0, 0 } }, \
}
-#define CPU_CVB_TABLE_XA \
+#define TEGRA210_CPU_CVB_TABLE_XA \
.speedo_scale = 100, \
.voltage_scale = 1000, \
.entries = { \
@@ -143,7 +236,7 @@ static const unsigned long tegra210_cpu_max_freq_table[] = {
{ 0UL, { 0, 0, 0 } }, \
}
-#define CPU_CVB_TABLE_EUCM1 \
+#define TEGRA210_CPU_CVB_TABLE_EUCM1 \
.speedo_scale = 100, \
.voltage_scale = 1000, \
.entries = { \
@@ -166,7 +259,7 @@ static const unsigned long tegra210_cpu_max_freq_table[] = {
{ 0UL, { 0, 0, 0 } }, \
}
-#define CPU_CVB_TABLE_EUCM2 \
+#define TEGRA210_CPU_CVB_TABLE_EUCM2 \
.speedo_scale = 100, \
.voltage_scale = 1000, \
.entries = { \
@@ -188,7 +281,7 @@ static const unsigned long tegra210_cpu_max_freq_table[] = {
{ 0UL, { 0, 0, 0 } }, \
}
-#define CPU_CVB_TABLE_EUCM2_JOINT_RAIL \
+#define TEGRA210_CPU_CVB_TABLE_EUCM2_JOINT_RAIL \
.speedo_scale = 100, \
.voltage_scale = 1000, \
.entries = { \
@@ -209,7 +302,7 @@ static const unsigned long tegra210_cpu_max_freq_table[] = {
{ 0UL, { 0, 0, 0 } }, \
}
-#define CPU_CVB_TABLE_ODN \
+#define TEGRA210_CPU_CVB_TABLE_ODN \
.speedo_scale = 100, \
.voltage_scale = 1000, \
.entries = { \
@@ -238,7 +331,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 0,
.min_millivolts = 840,
.max_millivolts = 1120,
- CPU_CVB_TABLE_EUCM2_JOINT_RAIL,
+ TEGRA210_CPU_CVB_TABLE_EUCM2_JOINT_RAIL,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune0_high = 0xffead0ff,
@@ -251,7 +344,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 1,
.min_millivolts = 840,
.max_millivolts = 1120,
- CPU_CVB_TABLE_EUCM2_JOINT_RAIL,
+ TEGRA210_CPU_CVB_TABLE_EUCM2_JOINT_RAIL,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune0_high = 0xffead0ff,
@@ -264,7 +357,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 0,
.min_millivolts = 900,
.max_millivolts = 1162,
- CPU_CVB_TABLE_EUCM2,
+ TEGRA210_CPU_CVB_TABLE_EUCM2,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune0_high = 0xffead0ff,
@@ -276,7 +369,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 1,
.min_millivolts = 900,
.max_millivolts = 1162,
- CPU_CVB_TABLE_EUCM2,
+ TEGRA210_CPU_CVB_TABLE_EUCM2,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune0_high = 0xffead0ff,
@@ -288,7 +381,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 0,
.min_millivolts = 900,
.max_millivolts = 1195,
- CPU_CVB_TABLE_EUCM2,
+ TEGRA210_CPU_CVB_TABLE_EUCM2,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune0_high = 0xffead0ff,
@@ -300,7 +393,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 1,
.min_millivolts = 900,
.max_millivolts = 1195,
- CPU_CVB_TABLE_EUCM2,
+ TEGRA210_CPU_CVB_TABLE_EUCM2,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune0_high = 0xffead0ff,
@@ -312,7 +405,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 0,
.min_millivolts = 841,
.max_millivolts = 1227,
- CPU_CVB_TABLE_EUCM1,
+ TEGRA210_CPU_CVB_TABLE_EUCM1,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune0_high = 0xffead0ff,
@@ -325,7 +418,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 1,
.min_millivolts = 841,
.max_millivolts = 1227,
- CPU_CVB_TABLE_EUCM1,
+ TEGRA210_CPU_CVB_TABLE_EUCM1,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune0_high = 0xffead0ff,
@@ -338,7 +431,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 0,
.min_millivolts = 870,
.max_millivolts = 1150,
- CPU_CVB_TABLE,
+ TEGRA210_CPU_CVB_TABLE,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune1 = 0x20091d9,
@@ -349,7 +442,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 1,
.min_millivolts = 870,
.max_millivolts = 1150,
- CPU_CVB_TABLE,
+ TEGRA210_CPU_CVB_TABLE,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune1 = 0x25501d0,
@@ -360,7 +453,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 0,
.min_millivolts = 818,
.max_millivolts = 1227,
- CPU_CVB_TABLE,
+ TEGRA210_CPU_CVB_TABLE,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune0_high = 0xffead0ff,
@@ -373,7 +466,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 1,
.min_millivolts = 818,
.max_millivolts = 1227,
- CPU_CVB_TABLE,
+ TEGRA210_CPU_CVB_TABLE,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune0_high = 0xffead0ff,
@@ -386,7 +479,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = -1,
.min_millivolts = 918,
.max_millivolts = 1113,
- CPU_CVB_TABLE_XA,
+ TEGRA210_CPU_CVB_TABLE_XA,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune1 = 0x17711BD,
@@ -397,7 +490,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 0,
.min_millivolts = 825,
.max_millivolts = 1227,
- CPU_CVB_TABLE_ODN,
+ TEGRA210_CPU_CVB_TABLE_ODN,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune0_high = 0xffead0ff,
@@ -410,7 +503,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 1,
.min_millivolts = 825,
.max_millivolts = 1227,
- CPU_CVB_TABLE_ODN,
+ TEGRA210_CPU_CVB_TABLE_ODN,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune0_high = 0xffead0ff,
@@ -423,7 +516,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 0,
.min_millivolts = 870,
.max_millivolts = 1227,
- CPU_CVB_TABLE,
+ TEGRA210_CPU_CVB_TABLE,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune1 = 0x20091d9,
@@ -434,7 +527,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 1,
.min_millivolts = 870,
.max_millivolts = 1227,
- CPU_CVB_TABLE,
+ TEGRA210_CPU_CVB_TABLE,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune1 = 0x25501d0,
@@ -445,7 +538,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 0,
.min_millivolts = 837,
.max_millivolts = 1227,
- CPU_CVB_TABLE,
+ TEGRA210_CPU_CVB_TABLE,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune0_high = 0xffead0ff,
@@ -458,7 +551,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 1,
.min_millivolts = 837,
.max_millivolts = 1227,
- CPU_CVB_TABLE,
+ TEGRA210_CPU_CVB_TABLE,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune0_high = 0xffead0ff,
@@ -471,7 +564,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 0,
.min_millivolts = 850,
.max_millivolts = 1170,
- CPU_CVB_TABLE,
+ TEGRA210_CPU_CVB_TABLE,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune0_high = 0xffead0ff,
@@ -484,7 +577,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 1,
.min_millivolts = 850,
.max_millivolts = 1170,
- CPU_CVB_TABLE,
+ TEGRA210_CPU_CVB_TABLE,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune0_high = 0xffead0ff,
@@ -494,6 +587,13 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
},
};
+static const struct dfll_fcpu_data tegra114_dfll_fcpu_data = {
+ .cpu_max_freq_table = tegra114_cpu_max_freq_table,
+ .cpu_max_freq_table_size = ARRAY_SIZE(tegra114_cpu_max_freq_table),
+ .cpu_cvb_tables = tegra114_cpu_cvb_tables,
+ .cpu_cvb_tables_size = ARRAY_SIZE(tegra114_cpu_cvb_tables)
+};
+
static const struct dfll_fcpu_data tegra124_dfll_fcpu_data = {
.cpu_max_freq_table = tegra124_cpu_max_freq_table,
.cpu_max_freq_table_size = ARRAY_SIZE(tegra124_cpu_max_freq_table),
@@ -510,6 +610,10 @@ static const struct dfll_fcpu_data tegra210_dfll_fcpu_data = {
static const struct of_device_id tegra124_dfll_fcpu_of_match[] = {
{
+ .compatible = "nvidia,tegra114-dfll",
+ .data = &tegra114_dfll_fcpu_data,
+ },
+ {
.compatible = "nvidia,tegra124-dfll",
.data = &tegra124_dfll_fcpu_data,
},
diff --git a/drivers/clk/tegra/clk-tegra210-emc.c b/drivers/clk/tegra/clk-tegra210-emc.c
index 672ca8c184d2..fbf3c894eb56 100644
--- a/drivers/clk/tegra/clk-tegra210-emc.c
+++ b/drivers/clk/tegra/clk-tegra210-emc.c
@@ -86,22 +86,30 @@ static unsigned long tegra210_clk_emc_recalc_rate(struct clk_hw *hw,
return DIV_ROUND_UP(parent_rate * 2, div);
}
-static long tegra210_clk_emc_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int tegra210_clk_emc_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct tegra210_clk_emc *emc = to_tegra210_clk_emc(hw);
struct tegra210_clk_emc_provider *provider = emc->provider;
unsigned int i;
- if (!provider || !provider->configs || provider->num_configs == 0)
- return clk_hw_get_rate(hw);
+ if (!provider || !provider->configs || provider->num_configs == 0) {
+ req->rate = clk_hw_get_rate(hw);
+
+ return 0;
+ }
for (i = 0; i < provider->num_configs; i++) {
- if (provider->configs[i].rate >= rate)
- return provider->configs[i].rate;
+ if (provider->configs[i].rate >= req->rate) {
+ req->rate = provider->configs[i].rate;
+
+ return 0;
+ }
}
- return provider->configs[i - 1].rate;
+ req->rate = provider->configs[i - 1].rate;
+
+ return 0;
}
static struct clk *tegra210_clk_emc_find_parent(struct tegra210_clk_emc *emc,
@@ -259,7 +267,7 @@ static int tegra210_clk_emc_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops tegra210_clk_emc_ops = {
.get_parent = tegra210_clk_emc_get_parent,
.recalc_rate = tegra210_clk_emc_recalc_rate,
- .round_rate = tegra210_clk_emc_round_rate,
+ .determine_rate = tegra210_clk_emc_determine_rate,
.set_rate = tegra210_clk_emc_set_rate,
};
diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c
index a3488aaac3f7..412902f573b5 100644
--- a/drivers/clk/tegra/clk-tegra210.c
+++ b/drivers/clk/tegra/clk-tegra210.c
@@ -255,7 +255,7 @@
/* VIC register to handle during MBIST WAR */
#define NV_PVIC_THI_SLCG_OVERRIDE_LOW 0x8c
-/* APE, DISPA and VIC base addesses needed for MBIST WAR */
+/* APE, DISPA and VIC base addresses needed for MBIST WAR */
#define TEGRA210_AHUB_BASE 0x702d0000
#define TEGRA210_DISPA_BASE 0x54200000
#define TEGRA210_VIC_BASE 0x54340000
diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
index 82a8cb9545eb..e7ebb63970d3 100644
--- a/drivers/clk/tegra/clk-tegra30.c
+++ b/drivers/clk/tegra/clk-tegra30.c
@@ -53,6 +53,7 @@
#define SYSTEM_CLK_RATE 0x030
#define TEGRA30_CLK_PERIPH_BANKS 5
+#define TEGRA30_CLK_CLK_MAX 311
#define PLLC_BASE 0x80
#define PLLC_MISC 0x8c
diff --git a/drivers/clk/tegra/clk.h b/drivers/clk/tegra/clk.h
index 5d80d8b79b8e..73efd2ff37c9 100644
--- a/drivers/clk/tegra/clk.h
+++ b/drivers/clk/tegra/clk.h
@@ -629,7 +629,6 @@ struct tegra_clk_periph {
#define TEGRA_CLK_PERIPH_MAGIC 0x18221223
-extern const struct clk_ops tegra_clk_periph_ops;
struct clk *tegra_clk_register_periph(const char *name,
const char * const *parent_names, int num_parents,
struct tegra_clk_periph *periph, void __iomem *clk_base,
@@ -898,8 +897,6 @@ static inline bool tegra124_clk_emc_driver_available(struct clk_hw *emc_hw)
void tegra114_clock_tune_cpu_trimmers_high(void);
void tegra114_clock_tune_cpu_trimmers_low(void);
void tegra114_clock_tune_cpu_trimmers_init(void);
-void tegra114_clock_assert_dfll_dvco_reset(void);
-void tegra114_clock_deassert_dfll_dvco_reset(void);
typedef void (*tegra_clk_apply_init_table_func)(void);
extern tegra_clk_apply_init_table_func tegra_clk_apply_init_table;
diff --git a/drivers/clk/thead/clk-th1520-ap.c b/drivers/clk/thead/clk-th1520-ap.c
index 17e32ae08720..71ad03a998e8 100644
--- a/drivers/clk/thead/clk-th1520-ap.c
+++ b/drivers/clk/thead/clk-th1520-ap.c
@@ -18,6 +18,7 @@
#define TH1520_PLL_FBDIV GENMASK(19, 8)
#define TH1520_PLL_REFDIV GENMASK(5, 0)
#define TH1520_PLL_BYPASS BIT(30)
+#define TH1520_PLL_VCO_RST BIT(29)
#define TH1520_PLL_DSMPD BIT(24)
#define TH1520_PLL_FRAC GENMASK(23, 0)
#define TH1520_PLL_FRAC_BITS 24
@@ -42,17 +43,20 @@ struct ccu_common {
};
struct ccu_mux {
- struct ccu_internal mux;
- struct ccu_common common;
+ int clkid;
+ u32 reg;
+ struct clk_mux mux;
};
struct ccu_gate {
- u32 enable;
- struct ccu_common common;
+ int clkid;
+ u32 reg;
+ struct clk_gate gate;
};
struct ccu_div {
u32 enable;
+ u32 div_en;
struct ccu_div_internal div;
struct ccu_internal mux;
struct ccu_common common;
@@ -75,12 +79,23 @@ struct ccu_pll {
.flags = _flags, \
}
-#define CCU_GATE(_clkid, _struct, _name, _parent, _reg, _gate, _flags) \
+#define TH_CCU_MUX(_name, _parents, _shift, _width) \
+ { \
+ .mask = GENMASK(_width - 1, 0), \
+ .shift = _shift, \
+ .hw.init = CLK_HW_INIT_PARENTS_DATA( \
+ _name, \
+ _parents, \
+ &clk_mux_ops, \
+ 0), \
+ }
+
+#define CCU_GATE(_clkid, _struct, _name, _parent, _reg, _bit, _flags) \
struct ccu_gate _struct = { \
- .enable = _gate, \
- .common = { \
- .clkid = _clkid, \
- .cfg0 = _reg, \
+ .clkid = _clkid, \
+ .reg = _reg, \
+ .gate = { \
+ .bit_idx = _bit, \
.hw.init = CLK_HW_INIT_PARENTS_DATA( \
_name, \
_parent, \
@@ -94,13 +109,6 @@ static inline struct ccu_common *hw_to_ccu_common(struct clk_hw *hw)
return container_of(hw, struct ccu_common, hw);
}
-static inline struct ccu_mux *hw_to_ccu_mux(struct clk_hw *hw)
-{
- struct ccu_common *common = hw_to_ccu_common(hw);
-
- return container_of(common, struct ccu_mux, common);
-}
-
static inline struct ccu_pll *hw_to_ccu_pll(struct clk_hw *hw)
{
struct ccu_common *common = hw_to_ccu_common(hw);
@@ -115,13 +123,6 @@ static inline struct ccu_div *hw_to_ccu_div(struct clk_hw *hw)
return container_of(common, struct ccu_div, common);
}
-static inline struct ccu_gate *hw_to_ccu_gate(struct clk_hw *hw)
-{
- struct ccu_common *common = hw_to_ccu_common(hw);
-
- return container_of(common, struct ccu_gate, common);
-}
-
static u8 ccu_get_parent_helper(struct ccu_common *common,
struct ccu_internal *mux)
{
@@ -192,6 +193,55 @@ static unsigned long ccu_div_recalc_rate(struct clk_hw *hw,
return rate;
}
+static int ccu_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct ccu_div *cd = hw_to_ccu_div(hw);
+ unsigned int val;
+
+ if (cd->div_en)
+ return divider_determine_rate(hw, req, NULL,
+ cd->div.width, cd->div.flags);
+
+ regmap_read(cd->common.map, cd->common.cfg0, &val);
+ val = val >> cd->div.shift;
+ val &= GENMASK(cd->div.width - 1, 0);
+ return divider_ro_determine_rate(hw, req, NULL, cd->div.width,
+ cd->div.flags, val);
+}
+
+static int ccu_div_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct ccu_div *cd = hw_to_ccu_div(hw);
+ int val = divider_get_val(rate, parent_rate, NULL,
+ cd->div.width, cd->div.flags);
+ unsigned int curr_val, reg_val;
+
+ if (val < 0)
+ return val;
+
+ regmap_read(cd->common.map, cd->common.cfg0, &reg_val);
+ curr_val = reg_val >> cd->div.shift;
+ curr_val &= GENMASK(cd->div.width - 1, 0);
+
+ if (!cd->div_en && curr_val != val)
+ return -EINVAL;
+
+ reg_val &= ~cd->div_en;
+ regmap_write(cd->common.map, cd->common.cfg0, reg_val);
+ udelay(1);
+
+ reg_val &= ~GENMASK(cd->div.width + cd->div.shift - 1, cd->div.shift);
+ reg_val |= val << cd->div.shift;
+ regmap_write(cd->common.map, cd->common.cfg0, reg_val);
+
+ reg_val |= cd->div_en;
+ regmap_write(cd->common.map, cd->common.cfg0, reg_val);
+
+ return 0;
+}
+
static u8 ccu_div_get_parent(struct clk_hw *hw)
{
struct ccu_div *cd = hw_to_ccu_div(hw);
@@ -234,9 +284,34 @@ static const struct clk_ops ccu_div_ops = {
.get_parent = ccu_div_get_parent,
.set_parent = ccu_div_set_parent,
.recalc_rate = ccu_div_recalc_rate,
- .determine_rate = clk_hw_determine_rate_no_reparent,
+ .set_rate = ccu_div_set_rate,
+ .determine_rate = ccu_div_determine_rate,
};
+static void ccu_pll_disable(struct clk_hw *hw)
+{
+ struct ccu_pll *pll = hw_to_ccu_pll(hw);
+
+ regmap_set_bits(pll->common.map, pll->common.cfg1,
+ TH1520_PLL_VCO_RST);
+}
+
+static int ccu_pll_enable(struct clk_hw *hw)
+{
+ struct ccu_pll *pll = hw_to_ccu_pll(hw);
+
+ return regmap_clear_bits(pll->common.map, pll->common.cfg1,
+ TH1520_PLL_VCO_RST);
+}
+
+static int ccu_pll_is_enabled(struct clk_hw *hw)
+{
+ struct ccu_pll *pll = hw_to_ccu_pll(hw);
+
+ return !regmap_test_bits(pll->common.map, pll->common.cfg1,
+ TH1520_PLL_VCO_RST);
+}
+
static unsigned long th1520_pll_vco_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
@@ -294,6 +369,9 @@ static unsigned long ccu_pll_recalc_rate(struct clk_hw *hw,
}
static const struct clk_ops clk_pll_ops = {
+ .disable = ccu_pll_disable,
+ .enable = ccu_pll_enable,
+ .is_enabled = ccu_pll_is_enabled,
.recalc_rate = ccu_pll_recalc_rate,
};
@@ -309,7 +387,7 @@ static struct ccu_pll cpu_pll0_clk = {
.hw.init = CLK_HW_INIT_PARENTS_DATA("cpu-pll0",
osc_24m_clk,
&clk_pll_ops,
- 0),
+ CLK_IS_CRITICAL),
},
};
@@ -321,7 +399,7 @@ static struct ccu_pll cpu_pll1_clk = {
.hw.init = CLK_HW_INIT_PARENTS_DATA("cpu-pll1",
osc_24m_clk,
&clk_pll_ops,
- 0),
+ CLK_IS_CRITICAL),
},
};
@@ -333,7 +411,7 @@ static struct ccu_pll gmac_pll_clk = {
.hw.init = CLK_HW_INIT_PARENTS_DATA("gmac-pll",
osc_24m_clk,
&clk_pll_ops,
- 0),
+ CLK_IS_CRITICAL),
},
};
@@ -353,7 +431,7 @@ static struct ccu_pll video_pll_clk = {
.hw.init = CLK_HW_INIT_PARENTS_DATA("video-pll",
osc_24m_clk,
&clk_pll_ops,
- 0),
+ CLK_IS_CRITICAL),
},
};
@@ -405,7 +483,7 @@ static struct ccu_pll tee_pll_clk = {
.hw.init = CLK_HW_INIT_PARENTS_DATA("tee-pll",
osc_24m_clk,
&clk_pll_ops,
- 0),
+ CLK_IS_CRITICAL),
},
};
@@ -415,32 +493,20 @@ static const struct clk_parent_data c910_i0_parents[] = {
};
static struct ccu_mux c910_i0_clk = {
- .mux = TH_CCU_ARG(1, 1),
- .common = {
- .clkid = CLK_C910_I0,
- .cfg0 = 0x100,
- .hw.init = CLK_HW_INIT_PARENTS_DATA("c910-i0",
- c910_i0_parents,
- &clk_mux_ops,
- 0),
- }
+ .clkid = CLK_C910_I0,
+ .reg = 0x100,
+ .mux = TH_CCU_MUX("c910-i0", c910_i0_parents, 1, 1),
};
static const struct clk_parent_data c910_parents[] = {
- { .hw = &c910_i0_clk.common.hw },
+ { .hw = &c910_i0_clk.mux.hw },
{ .hw = &cpu_pll1_clk.common.hw }
};
static struct ccu_mux c910_clk = {
- .mux = TH_CCU_ARG(0, 1),
- .common = {
- .clkid = CLK_C910,
- .cfg0 = 0x100,
- .hw.init = CLK_HW_INIT_PARENTS_DATA("c910",
- c910_parents,
- &clk_mux_ops,
- 0),
- }
+ .clkid = CLK_C910,
+ .reg = 0x100,
+ .mux = TH_CCU_MUX("c910", c910_parents, 0, 1),
};
static const struct clk_parent_data ahb2_cpusys_parents[] = {
@@ -493,7 +559,7 @@ static struct ccu_div axi4_cpusys2_aclk = {
.hw.init = CLK_HW_INIT_PARENTS_HW("axi4-cpusys2-aclk",
gmac_pll_clk_parent,
&ccu_div_ops,
- 0),
+ CLK_IS_CRITICAL),
},
};
@@ -515,7 +581,7 @@ static struct ccu_div axi_aclk = {
.hw.init = CLK_HW_INIT_PARENTS_DATA("axi-aclk",
axi_parents,
&ccu_div_ops,
- 0),
+ CLK_IS_CRITICAL),
},
};
@@ -582,7 +648,14 @@ static const struct clk_parent_data peri2sys_apb_pclk_pd[] = {
{ .hw = &peri2sys_apb_pclk.common.hw }
};
-static CLK_FIXED_FACTOR_FW_NAME(osc12m_clk, "osc_12m", "osc_24m", 2, 1, 0);
+static struct clk_fixed_factor osc12m_clk = {
+ .div = 2,
+ .mult = 1,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("osc_12m",
+ osc_24m_clk,
+ &clk_fixed_factor_ops,
+ 0),
+};
static const char * const out_parents[] = { "osc_24m", "osc_12m" };
@@ -657,7 +730,7 @@ static struct ccu_div apb_pclk = {
.hw.init = CLK_HW_INIT_PARENTS_DATA("apb-pclk",
apb_parents,
&ccu_div_ops,
- 0),
+ CLK_IS_CRITICAL),
},
};
@@ -688,7 +761,7 @@ static struct ccu_div vi_clk = {
.hw.init = CLK_HW_INIT_PARENTS_HW("vi",
video_pll_clk_parent,
&ccu_div_ops,
- 0),
+ CLK_IS_CRITICAL),
},
};
@@ -713,7 +786,7 @@ static struct ccu_div vo_axi_clk = {
.hw.init = CLK_HW_INIT_PARENTS_HW("vo-axi",
video_pll_clk_parent,
&ccu_div_ops,
- 0),
+ CLK_IS_CRITICAL),
},
};
@@ -738,7 +811,7 @@ static struct ccu_div vp_axi_clk = {
.hw.init = CLK_HW_INIT_PARENTS_HW("vp-axi",
video_pll_clk_parent,
&ccu_div_ops,
- CLK_IGNORE_UNUSED),
+ CLK_IS_CRITICAL),
},
};
@@ -756,6 +829,7 @@ static struct ccu_div venc_clk = {
};
static struct ccu_div dpu0_clk = {
+ .div_en = BIT(8),
.div = TH_CCU_DIV_FLAGS(0, 8, CLK_DIVIDER_ONE_BASED),
.common = {
.clkid = CLK_DPU0,
@@ -763,11 +837,16 @@ static struct ccu_div dpu0_clk = {
.hw.init = CLK_HW_INIT_PARENTS_HW("dpu0",
dpu0_pll_clk_parent,
&ccu_div_ops,
- 0),
+ CLK_SET_RATE_UNGATE),
},
};
+static const struct clk_parent_data dpu0_clk_pd[] = {
+ { .hw = &dpu0_clk.common.hw }
+};
+
static struct ccu_div dpu1_clk = {
+ .div_en = BIT(8),
.div = TH_CCU_DIV_FLAGS(0, 8, CLK_DIVIDER_ONE_BASED),
.common = {
.clkid = CLK_DPU1,
@@ -775,70 +854,147 @@ static struct ccu_div dpu1_clk = {
.hw.init = CLK_HW_INIT_PARENTS_HW("dpu1",
dpu1_pll_clk_parent,
&ccu_div_ops,
- 0),
+ CLK_SET_RATE_UNGATE),
},
};
-static CCU_GATE(CLK_BROM, brom_clk, "brom", ahb2_cpusys_hclk_pd, 0x100, BIT(4), 0);
-static CCU_GATE(CLK_BMU, bmu_clk, "bmu", axi4_cpusys2_aclk_pd, 0x100, BIT(5), 0);
+static const struct clk_parent_data dpu1_clk_pd[] = {
+ { .hw = &dpu1_clk.common.hw }
+};
+
+static CLK_FIXED_FACTOR_HW(emmc_sdio_ref_clk, "emmc-sdio-ref",
+ &video_pll_clk.common.hw, 4, 1, 0);
+
+static const struct clk_parent_data emmc_sdio_ref_clk_pd[] = {
+ { .hw = &emmc_sdio_ref_clk.hw },
+};
+
+static CCU_GATE(CLK_BROM, brom_clk, "brom", ahb2_cpusys_hclk_pd, 0x100, 4, 0);
+static CCU_GATE(CLK_BMU, bmu_clk, "bmu", axi4_cpusys2_aclk_pd, 0x100, 5, 0);
static CCU_GATE(CLK_AON2CPU_A2X, aon2cpu_a2x_clk, "aon2cpu-a2x", axi4_cpusys2_aclk_pd,
- 0x134, BIT(8), 0);
+ 0x134, 8, CLK_IS_CRITICAL);
static CCU_GATE(CLK_X2X_CPUSYS, x2x_cpusys_clk, "x2x-cpusys", axi4_cpusys2_aclk_pd,
- 0x134, BIT(7), 0);
-static CCU_GATE(CLK_CPU2AON_X2H, cpu2aon_x2h_clk, "cpu2aon-x2h", axi_aclk_pd, 0x138, BIT(8), 0);
+ 0x134, 7, CLK_IS_CRITICAL);
+static CCU_GATE(CLK_CPU2AON_X2H, cpu2aon_x2h_clk, "cpu2aon-x2h", axi_aclk_pd,
+ 0x138, 8, CLK_IS_CRITICAL);
static CCU_GATE(CLK_CPU2PERI_X2H, cpu2peri_x2h_clk, "cpu2peri-x2h", axi4_cpusys2_aclk_pd,
- 0x140, BIT(9), 0);
+ 0x140, 9, CLK_IS_CRITICAL);
static CCU_GATE(CLK_PERISYS_APB1_HCLK, perisys_apb1_hclk, "perisys-apb1-hclk", perisys_ahb_hclk_pd,
- 0x150, BIT(9), 0);
+ 0x150, 9, CLK_IS_CRITICAL);
static CCU_GATE(CLK_PERISYS_APB2_HCLK, perisys_apb2_hclk, "perisys-apb2-hclk", perisys_ahb_hclk_pd,
- 0x150, BIT(10), 0);
+ 0x150, 10, CLK_IS_CRITICAL);
static CCU_GATE(CLK_PERISYS_APB3_HCLK, perisys_apb3_hclk, "perisys-apb3-hclk", perisys_ahb_hclk_pd,
- 0x150, BIT(11), 0);
+ 0x150, 11, CLK_IS_CRITICAL);
static CCU_GATE(CLK_PERISYS_APB4_HCLK, perisys_apb4_hclk, "perisys-apb4-hclk", perisys_ahb_hclk_pd,
- 0x150, BIT(12), 0);
-static CCU_GATE(CLK_NPU_AXI, npu_axi_clk, "npu-axi", axi_aclk_pd, 0x1c8, BIT(5), 0);
-static CCU_GATE(CLK_CPU2VP, cpu2vp_clk, "cpu2vp", axi_aclk_pd, 0x1e0, BIT(13), 0);
-static CCU_GATE(CLK_EMMC_SDIO, emmc_sdio_clk, "emmc-sdio", video_pll_clk_pd, 0x204, BIT(30), 0);
-static CCU_GATE(CLK_GMAC1, gmac1_clk, "gmac1", gmac_pll_clk_pd, 0x204, BIT(26), 0);
-static CCU_GATE(CLK_PADCTRL1, padctrl1_clk, "padctrl1", perisys_apb_pclk_pd, 0x204, BIT(24), 0);
-static CCU_GATE(CLK_DSMART, dsmart_clk, "dsmart", perisys_apb_pclk_pd, 0x204, BIT(23), 0);
-static CCU_GATE(CLK_PADCTRL0, padctrl0_clk, "padctrl0", perisys_apb_pclk_pd, 0x204, BIT(22), 0);
-static CCU_GATE(CLK_GMAC_AXI, gmac_axi_clk, "gmac-axi", axi4_cpusys2_aclk_pd, 0x204, BIT(21), 0);
-static CCU_GATE(CLK_GPIO3, gpio3_clk, "gpio3-clk", peri2sys_apb_pclk_pd, 0x204, BIT(20), 0);
-static CCU_GATE(CLK_GMAC0, gmac0_clk, "gmac0", gmac_pll_clk_pd, 0x204, BIT(19), 0);
-static CCU_GATE(CLK_PWM, pwm_clk, "pwm", perisys_apb_pclk_pd, 0x204, BIT(18), 0);
-static CCU_GATE(CLK_QSPI0, qspi0_clk, "qspi0", video_pll_clk_pd, 0x204, BIT(17), 0);
-static CCU_GATE(CLK_QSPI1, qspi1_clk, "qspi1", video_pll_clk_pd, 0x204, BIT(16), 0);
-static CCU_GATE(CLK_SPI, spi_clk, "spi", video_pll_clk_pd, 0x204, BIT(15), 0);
-static CCU_GATE(CLK_UART0_PCLK, uart0_pclk, "uart0-pclk", perisys_apb_pclk_pd, 0x204, BIT(14), 0);
-static CCU_GATE(CLK_UART1_PCLK, uart1_pclk, "uart1-pclk", perisys_apb_pclk_pd, 0x204, BIT(13), 0);
-static CCU_GATE(CLK_UART2_PCLK, uart2_pclk, "uart2-pclk", perisys_apb_pclk_pd, 0x204, BIT(12), 0);
-static CCU_GATE(CLK_UART3_PCLK, uart3_pclk, "uart3-pclk", perisys_apb_pclk_pd, 0x204, BIT(11), 0);
-static CCU_GATE(CLK_UART4_PCLK, uart4_pclk, "uart4-pclk", perisys_apb_pclk_pd, 0x204, BIT(10), 0);
-static CCU_GATE(CLK_UART5_PCLK, uart5_pclk, "uart5-pclk", perisys_apb_pclk_pd, 0x204, BIT(9), 0);
-static CCU_GATE(CLK_GPIO0, gpio0_clk, "gpio0-clk", perisys_apb_pclk_pd, 0x204, BIT(8), 0);
-static CCU_GATE(CLK_GPIO1, gpio1_clk, "gpio1-clk", perisys_apb_pclk_pd, 0x204, BIT(7), 0);
-static CCU_GATE(CLK_GPIO2, gpio2_clk, "gpio2-clk", peri2sys_apb_pclk_pd, 0x204, BIT(6), 0);
-static CCU_GATE(CLK_I2C0, i2c0_clk, "i2c0", perisys_apb_pclk_pd, 0x204, BIT(5), 0);
-static CCU_GATE(CLK_I2C1, i2c1_clk, "i2c1", perisys_apb_pclk_pd, 0x204, BIT(4), 0);
-static CCU_GATE(CLK_I2C2, i2c2_clk, "i2c2", perisys_apb_pclk_pd, 0x204, BIT(3), 0);
-static CCU_GATE(CLK_I2C3, i2c3_clk, "i2c3", perisys_apb_pclk_pd, 0x204, BIT(2), 0);
-static CCU_GATE(CLK_I2C4, i2c4_clk, "i2c4", perisys_apb_pclk_pd, 0x204, BIT(1), 0);
-static CCU_GATE(CLK_I2C5, i2c5_clk, "i2c5", perisys_apb_pclk_pd, 0x204, BIT(0), 0);
-static CCU_GATE(CLK_SPINLOCK, spinlock_clk, "spinlock", ahb2_cpusys_hclk_pd, 0x208, BIT(10), 0);
-static CCU_GATE(CLK_DMA, dma_clk, "dma", axi4_cpusys2_aclk_pd, 0x208, BIT(8), 0);
-static CCU_GATE(CLK_MBOX0, mbox0_clk, "mbox0", apb3_cpusys_pclk_pd, 0x208, BIT(7), 0);
-static CCU_GATE(CLK_MBOX1, mbox1_clk, "mbox1", apb3_cpusys_pclk_pd, 0x208, BIT(6), 0);
-static CCU_GATE(CLK_MBOX2, mbox2_clk, "mbox2", apb3_cpusys_pclk_pd, 0x208, BIT(5), 0);
-static CCU_GATE(CLK_MBOX3, mbox3_clk, "mbox3", apb3_cpusys_pclk_pd, 0x208, BIT(4), 0);
-static CCU_GATE(CLK_WDT0, wdt0_clk, "wdt0", apb3_cpusys_pclk_pd, 0x208, BIT(3), 0);
-static CCU_GATE(CLK_WDT1, wdt1_clk, "wdt1", apb3_cpusys_pclk_pd, 0x208, BIT(2), 0);
-static CCU_GATE(CLK_TIMER0, timer0_clk, "timer0", apb3_cpusys_pclk_pd, 0x208, BIT(1), 0);
-static CCU_GATE(CLK_TIMER1, timer1_clk, "timer1", apb3_cpusys_pclk_pd, 0x208, BIT(0), 0);
-static CCU_GATE(CLK_SRAM0, sram0_clk, "sram0", axi_aclk_pd, 0x20c, BIT(4), 0);
-static CCU_GATE(CLK_SRAM1, sram1_clk, "sram1", axi_aclk_pd, 0x20c, BIT(3), 0);
-static CCU_GATE(CLK_SRAM2, sram2_clk, "sram2", axi_aclk_pd, 0x20c, BIT(2), 0);
-static CCU_GATE(CLK_SRAM3, sram3_clk, "sram3", axi_aclk_pd, 0x20c, BIT(1), 0);
+ 0x150, 12, 0);
+static const struct clk_parent_data perisys_apb4_hclk_pd[] = {
+ { .hw = &perisys_apb4_hclk.gate.hw },
+};
+
+static CCU_GATE(CLK_NPU_AXI, npu_axi_clk, "npu-axi", axi_aclk_pd, 0x1c8, 5, CLK_IS_CRITICAL);
+static CCU_GATE(CLK_CPU2VP, cpu2vp_clk, "cpu2vp", axi_aclk_pd, 0x1e0, 13, CLK_IS_CRITICAL);
+static CCU_GATE(CLK_EMMC_SDIO, emmc_sdio_clk, "emmc-sdio", emmc_sdio_ref_clk_pd, 0x204, 30, 0);
+static CCU_GATE(CLK_GMAC1, gmac1_clk, "gmac1", gmac_pll_clk_pd, 0x204, 26, 0);
+static CCU_GATE(CLK_PADCTRL1, padctrl1_clk, "padctrl1", perisys_apb_pclk_pd, 0x204, 24, 0);
+static CCU_GATE(CLK_DSMART, dsmart_clk, "dsmart", perisys_apb_pclk_pd, 0x204, 23, 0);
+static CCU_GATE(CLK_PADCTRL0, padctrl0_clk, "padctrl0", perisys_apb4_hclk_pd, 0x204, 22, 0);
+static CCU_GATE(CLK_GMAC_AXI, gmac_axi_clk, "gmac-axi", axi4_cpusys2_aclk_pd, 0x204, 21, 0);
+static CCU_GATE(CLK_GPIO3, gpio3_clk, "gpio3-clk", peri2sys_apb_pclk_pd, 0x204, 20, 0);
+static CCU_GATE(CLK_GMAC0, gmac0_clk, "gmac0", gmac_pll_clk_pd, 0x204, 19, 0);
+static CCU_GATE(CLK_PWM, pwm_clk, "pwm", perisys_apb_pclk_pd, 0x204, 18, 0);
+static CCU_GATE(CLK_QSPI0, qspi0_clk, "qspi0", video_pll_clk_pd, 0x204, 17, 0);
+static CCU_GATE(CLK_QSPI1, qspi1_clk, "qspi1", video_pll_clk_pd, 0x204, 16, 0);
+static CCU_GATE(CLK_SPI, spi_clk, "spi", video_pll_clk_pd, 0x204, 15, 0);
+static CCU_GATE(CLK_UART0_PCLK, uart0_pclk, "uart0-pclk", perisys_apb_pclk_pd, 0x204, 14, 0);
+static CCU_GATE(CLK_UART1_PCLK, uart1_pclk, "uart1-pclk", perisys_apb_pclk_pd, 0x204, 13, 0);
+static CCU_GATE(CLK_UART2_PCLK, uart2_pclk, "uart2-pclk", perisys_apb_pclk_pd, 0x204, 12, 0);
+static CCU_GATE(CLK_UART3_PCLK, uart3_pclk, "uart3-pclk", perisys_apb_pclk_pd, 0x204, 11, 0);
+static CCU_GATE(CLK_UART4_PCLK, uart4_pclk, "uart4-pclk", perisys_apb_pclk_pd, 0x204, 10, 0);
+static CCU_GATE(CLK_UART5_PCLK, uart5_pclk, "uart5-pclk", perisys_apb_pclk_pd, 0x204, 9, 0);
+static CCU_GATE(CLK_GPIO0, gpio0_clk, "gpio0-clk", perisys_apb_pclk_pd, 0x204, 8, 0);
+static CCU_GATE(CLK_GPIO1, gpio1_clk, "gpio1-clk", perisys_apb_pclk_pd, 0x204, 7, 0);
+static CCU_GATE(CLK_GPIO2, gpio2_clk, "gpio2-clk", peri2sys_apb_pclk_pd, 0x204, 6, 0);
+static CCU_GATE(CLK_I2C0, i2c0_clk, "i2c0", perisys_apb_pclk_pd, 0x204, 5, 0);
+static CCU_GATE(CLK_I2C1, i2c1_clk, "i2c1", perisys_apb_pclk_pd, 0x204, 4, 0);
+static CCU_GATE(CLK_I2C2, i2c2_clk, "i2c2", perisys_apb_pclk_pd, 0x204, 3, 0);
+static CCU_GATE(CLK_I2C3, i2c3_clk, "i2c3", perisys_apb_pclk_pd, 0x204, 2, 0);
+static CCU_GATE(CLK_I2C4, i2c4_clk, "i2c4", perisys_apb_pclk_pd, 0x204, 1, 0);
+static CCU_GATE(CLK_I2C5, i2c5_clk, "i2c5", perisys_apb_pclk_pd, 0x204, 0, 0);
+static CCU_GATE(CLK_SPINLOCK, spinlock_clk, "spinlock", ahb2_cpusys_hclk_pd, 0x208, 10, 0);
+static CCU_GATE(CLK_DMA, dma_clk, "dma", axi4_cpusys2_aclk_pd, 0x208, 8, 0);
+static CCU_GATE(CLK_MBOX0, mbox0_clk, "mbox0", apb3_cpusys_pclk_pd, 0x208, 7, 0);
+static CCU_GATE(CLK_MBOX1, mbox1_clk, "mbox1", apb3_cpusys_pclk_pd, 0x208, 6, 0);
+static CCU_GATE(CLK_MBOX2, mbox2_clk, "mbox2", apb3_cpusys_pclk_pd, 0x208, 5, 0);
+static CCU_GATE(CLK_MBOX3, mbox3_clk, "mbox3", apb3_cpusys_pclk_pd, 0x208, 4, 0);
+static CCU_GATE(CLK_WDT0, wdt0_clk, "wdt0", apb3_cpusys_pclk_pd, 0x208, 3, 0);
+static CCU_GATE(CLK_WDT1, wdt1_clk, "wdt1", apb3_cpusys_pclk_pd, 0x208, 2, 0);
+static CCU_GATE(CLK_TIMER0, timer0_clk, "timer0", apb3_cpusys_pclk_pd, 0x208, 1, 0);
+static CCU_GATE(CLK_TIMER1, timer1_clk, "timer1", apb3_cpusys_pclk_pd, 0x208, 0, 0);
+static CCU_GATE(CLK_SRAM0, sram0_clk, "sram0", axi_aclk_pd, 0x20c, 4, 0);
+static CCU_GATE(CLK_SRAM1, sram1_clk, "sram1", axi_aclk_pd, 0x20c, 3, 0);
+static CCU_GATE(CLK_SRAM2, sram2_clk, "sram2", axi_aclk_pd, 0x20c, 2, 0);
+static CCU_GATE(CLK_SRAM3, sram3_clk, "sram3", axi_aclk_pd, 0x20c, 1, 0);
+
+static CCU_GATE(CLK_AXI4_VO_ACLK, axi4_vo_aclk, "axi4-vo-aclk",
+ video_pll_clk_pd, 0x0, 0, CLK_IS_CRITICAL);
+static CCU_GATE(CLK_GPU_CORE, gpu_core_clk, "gpu-core-clk", video_pll_clk_pd,
+ 0x0, 3, 0);
+static CCU_GATE(CLK_GPU_CFG_ACLK, gpu_cfg_aclk, "gpu-cfg-aclk",
+ video_pll_clk_pd, 0x0, 4, CLK_IS_CRITICAL);
+static CCU_GATE(CLK_DPU_PIXELCLK0, dpu0_pixelclk, "dpu0-pixelclk",
+ dpu0_clk_pd, 0x0, 5, CLK_SET_RATE_PARENT);
+static CCU_GATE(CLK_DPU_PIXELCLK1, dpu1_pixelclk, "dpu1-pixelclk",
+ dpu1_clk_pd, 0x0, 6, CLK_SET_RATE_PARENT);
+static CCU_GATE(CLK_DPU_HCLK, dpu_hclk, "dpu-hclk", video_pll_clk_pd, 0x0,
+ 7, 0);
+static CCU_GATE(CLK_DPU_ACLK, dpu_aclk, "dpu-aclk", video_pll_clk_pd, 0x0,
+ 8, 0);
+static CCU_GATE(CLK_DPU_CCLK, dpu_cclk, "dpu-cclk", video_pll_clk_pd, 0x0,
+ 9, 0);
+static CCU_GATE(CLK_HDMI_SFR, hdmi_sfr_clk, "hdmi-sfr-clk", video_pll_clk_pd,
+ 0x0, 10, 0);
+static CCU_GATE(CLK_HDMI_PCLK, hdmi_pclk, "hdmi-pclk", video_pll_clk_pd, 0x0,
+ 11, 0);
+static CCU_GATE(CLK_HDMI_CEC, hdmi_cec_clk, "hdmi-cec-clk", video_pll_clk_pd,
+ 0x0, 12, 0);
+static CCU_GATE(CLK_MIPI_DSI0_PCLK, mipi_dsi0_pclk, "mipi-dsi0-pclk",
+ video_pll_clk_pd, 0x0, 13, 0);
+static CCU_GATE(CLK_MIPI_DSI1_PCLK, mipi_dsi1_pclk, "mipi-dsi1-pclk",
+ video_pll_clk_pd, 0x0, 14, 0);
+static CCU_GATE(CLK_MIPI_DSI0_CFG, mipi_dsi0_cfg_clk, "mipi-dsi0-cfg-clk",
+ video_pll_clk_pd, 0x0, 15, 0);
+static CCU_GATE(CLK_MIPI_DSI1_CFG, mipi_dsi1_cfg_clk, "mipi-dsi1-cfg-clk",
+ video_pll_clk_pd, 0x0, 16, 0);
+static CCU_GATE(CLK_MIPI_DSI0_REFCLK, mipi_dsi0_refclk, "mipi-dsi0-refclk",
+ video_pll_clk_pd, 0x0, 17, 0);
+static CCU_GATE(CLK_MIPI_DSI1_REFCLK, mipi_dsi1_refclk, "mipi-dsi1-refclk",
+ video_pll_clk_pd, 0x0, 18, 0);
+static CCU_GATE(CLK_HDMI_I2S, hdmi_i2s_clk, "hdmi-i2s-clk", video_pll_clk_pd,
+ 0x0, 19, 0);
+static CCU_GATE(CLK_X2H_DPU1_ACLK, x2h_dpu1_aclk, "x2h-dpu1-aclk",
+ video_pll_clk_pd, 0x0, 20, CLK_IS_CRITICAL);
+static CCU_GATE(CLK_X2H_DPU_ACLK, x2h_dpu_aclk, "x2h-dpu-aclk",
+ video_pll_clk_pd, 0x0, 21, CLK_IS_CRITICAL);
+static CCU_GATE(CLK_AXI4_VO_PCLK, axi4_vo_pclk, "axi4-vo-pclk",
+ video_pll_clk_pd, 0x0, 22, 0);
+static CCU_GATE(CLK_IOPMP_VOSYS_DPU_PCLK, iopmp_vosys_dpu_pclk,
+ "iopmp-vosys-dpu-pclk", video_pll_clk_pd, 0x0, 23, 0);
+static CCU_GATE(CLK_IOPMP_VOSYS_DPU1_PCLK, iopmp_vosys_dpu1_pclk,
+ "iopmp-vosys-dpu1-pclk", video_pll_clk_pd, 0x0, 24, 0);
+static CCU_GATE(CLK_IOPMP_VOSYS_GPU_PCLK, iopmp_vosys_gpu_pclk,
+ "iopmp-vosys-gpu-pclk", video_pll_clk_pd, 0x0, 25, 0);
+static CCU_GATE(CLK_IOPMP_DPU1_ACLK, iopmp_dpu1_aclk, "iopmp-dpu1-aclk",
+ video_pll_clk_pd, 0x0, 27, CLK_IS_CRITICAL);
+static CCU_GATE(CLK_IOPMP_DPU_ACLK, iopmp_dpu_aclk, "iopmp-dpu-aclk",
+ video_pll_clk_pd, 0x0, 28, CLK_IS_CRITICAL);
+static CCU_GATE(CLK_IOPMP_GPU_ACLK, iopmp_gpu_aclk, "iopmp-gpu-aclk",
+ video_pll_clk_pd, 0x0, 29, CLK_IS_CRITICAL);
+static CCU_GATE(CLK_MIPIDSI0_PIXCLK, mipi_dsi0_pixclk, "mipi-dsi0-pixclk",
+ video_pll_clk_pd, 0x0, 30, 0);
+static CCU_GATE(CLK_MIPIDSI1_PIXCLK, mipi_dsi1_pixclk, "mipi-dsi1-pixclk",
+ video_pll_clk_pd, 0x0, 31, 0);
+static CCU_GATE(CLK_HDMI_PIXCLK, hdmi_pixclk, "hdmi-pixclk", video_pll_clk_pd,
+ 0x4, 0, 0);
static CLK_FIXED_FACTOR_HW(gmac_pll_clk_100m, "gmac-pll-clk-100m",
&gmac_pll_clk.common.hw, 10, 1, 0);
@@ -849,15 +1005,9 @@ static const struct clk_parent_data uart_sclk_parents[] = {
};
static struct ccu_mux uart_sclk = {
- .mux = TH_CCU_ARG(0, 1),
- .common = {
- .clkid = CLK_UART_SCLK,
- .cfg0 = 0x210,
- .hw.init = CLK_HW_INIT_PARENTS_DATA("uart-sclk",
- uart_sclk_parents,
- &clk_mux_ops,
- 0),
- }
+ .clkid = CLK_UART_SCLK,
+ .reg = 0x210,
+ .mux = TH_CCU_MUX("uart-sclk", uart_sclk_parents, 0, 1),
};
static struct ccu_common *th1520_pll_clks[] = {
@@ -889,97 +1039,167 @@ static struct ccu_common *th1520_div_clks[] = {
&vo_axi_clk.common,
&vp_apb_clk.common,
&vp_axi_clk.common,
- &cpu2vp_clk.common,
&venc_clk.common,
&dpu0_clk.common,
&dpu1_clk.common,
};
-static struct ccu_common *th1520_mux_clks[] = {
- &c910_i0_clk.common,
- &c910_clk.common,
- &uart_sclk.common,
-};
-
-static struct ccu_common *th1520_gate_clks[] = {
- &emmc_sdio_clk.common,
- &aon2cpu_a2x_clk.common,
- &x2x_cpusys_clk.common,
- &brom_clk.common,
- &bmu_clk.common,
- &cpu2aon_x2h_clk.common,
- &cpu2peri_x2h_clk.common,
- &perisys_apb1_hclk.common,
- &perisys_apb2_hclk.common,
- &perisys_apb3_hclk.common,
- &perisys_apb4_hclk.common,
- &npu_axi_clk.common,
- &gmac1_clk.common,
- &padctrl1_clk.common,
- &dsmart_clk.common,
- &padctrl0_clk.common,
- &gmac_axi_clk.common,
- &gpio3_clk.common,
- &gmac0_clk.common,
- &pwm_clk.common,
- &qspi0_clk.common,
- &qspi1_clk.common,
- &spi_clk.common,
- &uart0_pclk.common,
- &uart1_pclk.common,
- &uart2_pclk.common,
- &uart3_pclk.common,
- &uart4_pclk.common,
- &uart5_pclk.common,
- &gpio0_clk.common,
- &gpio1_clk.common,
- &gpio2_clk.common,
- &i2c0_clk.common,
- &i2c1_clk.common,
- &i2c2_clk.common,
- &i2c3_clk.common,
- &i2c4_clk.common,
- &i2c5_clk.common,
- &spinlock_clk.common,
- &dma_clk.common,
- &mbox0_clk.common,
- &mbox1_clk.common,
- &mbox2_clk.common,
- &mbox3_clk.common,
- &wdt0_clk.common,
- &wdt1_clk.common,
- &timer0_clk.common,
- &timer1_clk.common,
- &sram0_clk.common,
- &sram1_clk.common,
- &sram2_clk.common,
- &sram3_clk.common,
-};
-
-#define NR_CLKS (CLK_UART_SCLK + 1)
+static struct ccu_mux *th1520_mux_clks[] = {
+ &c910_i0_clk,
+ &c910_clk,
+ &uart_sclk,
+};
+
+static struct ccu_gate *th1520_gate_clks[] = {
+ &emmc_sdio_clk,
+ &aon2cpu_a2x_clk,
+ &x2x_cpusys_clk,
+ &brom_clk,
+ &bmu_clk,
+ &cpu2aon_x2h_clk,
+ &cpu2peri_x2h_clk,
+ &cpu2vp_clk,
+ &perisys_apb1_hclk,
+ &perisys_apb2_hclk,
+ &perisys_apb3_hclk,
+ &perisys_apb4_hclk,
+ &npu_axi_clk,
+ &gmac1_clk,
+ &padctrl1_clk,
+ &dsmart_clk,
+ &padctrl0_clk,
+ &gmac_axi_clk,
+ &gpio3_clk,
+ &gmac0_clk,
+ &pwm_clk,
+ &qspi0_clk,
+ &qspi1_clk,
+ &spi_clk,
+ &uart0_pclk,
+ &uart1_pclk,
+ &uart2_pclk,
+ &uart3_pclk,
+ &uart4_pclk,
+ &uart5_pclk,
+ &gpio0_clk,
+ &gpio1_clk,
+ &gpio2_clk,
+ &i2c0_clk,
+ &i2c1_clk,
+ &i2c2_clk,
+ &i2c3_clk,
+ &i2c4_clk,
+ &i2c5_clk,
+ &spinlock_clk,
+ &dma_clk,
+ &mbox0_clk,
+ &mbox1_clk,
+ &mbox2_clk,
+ &mbox3_clk,
+ &wdt0_clk,
+ &wdt1_clk,
+ &timer0_clk,
+ &timer1_clk,
+ &sram0_clk,
+ &sram1_clk,
+ &sram2_clk,
+ &sram3_clk,
+};
+
+static struct ccu_gate *th1520_vo_gate_clks[] = {
+ &axi4_vo_aclk,
+ &gpu_core_clk,
+ &gpu_cfg_aclk,
+ &dpu0_pixelclk,
+ &dpu1_pixelclk,
+ &dpu_hclk,
+ &dpu_aclk,
+ &dpu_cclk,
+ &hdmi_sfr_clk,
+ &hdmi_pclk,
+ &hdmi_cec_clk,
+ &mipi_dsi0_pclk,
+ &mipi_dsi1_pclk,
+ &mipi_dsi0_cfg_clk,
+ &mipi_dsi1_cfg_clk,
+ &mipi_dsi0_refclk,
+ &mipi_dsi1_refclk,
+ &hdmi_i2s_clk,
+ &x2h_dpu1_aclk,
+ &x2h_dpu_aclk,
+ &axi4_vo_pclk,
+ &iopmp_vosys_dpu_pclk,
+ &iopmp_vosys_dpu1_pclk,
+ &iopmp_vosys_gpu_pclk,
+ &iopmp_dpu1_aclk,
+ &iopmp_dpu_aclk,
+ &iopmp_gpu_aclk,
+ &mipi_dsi0_pixclk,
+ &mipi_dsi1_pixclk,
+ &hdmi_pixclk
+};
static const struct regmap_config th1520_clk_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
- .fast_io = true,
+};
+
+struct th1520_plat_data {
+ struct ccu_common **th1520_pll_clks;
+ struct ccu_common **th1520_div_clks;
+ struct ccu_mux **th1520_mux_clks;
+ struct ccu_gate **th1520_gate_clks;
+
+ int nr_clks;
+ int nr_pll_clks;
+ int nr_div_clks;
+ int nr_mux_clks;
+ int nr_gate_clks;
+};
+
+static const struct th1520_plat_data th1520_ap_platdata = {
+ .th1520_pll_clks = th1520_pll_clks,
+ .th1520_div_clks = th1520_div_clks,
+ .th1520_mux_clks = th1520_mux_clks,
+ .th1520_gate_clks = th1520_gate_clks,
+
+ .nr_clks = CLK_UART_SCLK + 1,
+
+ .nr_pll_clks = ARRAY_SIZE(th1520_pll_clks),
+ .nr_div_clks = ARRAY_SIZE(th1520_div_clks),
+ .nr_mux_clks = ARRAY_SIZE(th1520_mux_clks),
+ .nr_gate_clks = ARRAY_SIZE(th1520_gate_clks),
+};
+
+static const struct th1520_plat_data th1520_vo_platdata = {
+ .th1520_gate_clks = th1520_vo_gate_clks,
+
+ .nr_clks = CLK_HDMI_PIXCLK + 1,
+
+ .nr_gate_clks = ARRAY_SIZE(th1520_vo_gate_clks),
};
static int th1520_clk_probe(struct platform_device *pdev)
{
+ const struct th1520_plat_data *plat_data;
struct device *dev = &pdev->dev;
struct clk_hw_onecell_data *priv;
struct regmap *map;
void __iomem *base;
- struct clk_hw *hw;
int ret, i;
- priv = devm_kzalloc(dev, struct_size(priv, hws, NR_CLKS), GFP_KERNEL);
+ plat_data = device_get_match_data(&pdev->dev);
+ if (!plat_data)
+ return dev_err_probe(&pdev->dev, -ENODEV,
+ "No device match data found\n");
+
+ priv = devm_kzalloc(dev, struct_size(priv, hws, plat_data->nr_clks), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- priv->num = NR_CLKS;
+ priv->num = plat_data->nr_clks;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
@@ -989,75 +1209,69 @@ static int th1520_clk_probe(struct platform_device *pdev)
if (IS_ERR(map))
return PTR_ERR(map);
- for (i = 0; i < ARRAY_SIZE(th1520_pll_clks); i++) {
- struct ccu_pll *cp = hw_to_ccu_pll(&th1520_pll_clks[i]->hw);
+ for (i = 0; i < plat_data->nr_pll_clks; i++) {
+ struct ccu_pll *cp = hw_to_ccu_pll(&plat_data->th1520_pll_clks[i]->hw);
- th1520_pll_clks[i]->map = map;
+ plat_data->th1520_pll_clks[i]->map = map;
- ret = devm_clk_hw_register(dev, &th1520_pll_clks[i]->hw);
+ ret = devm_clk_hw_register(dev, &plat_data->th1520_pll_clks[i]->hw);
if (ret)
return ret;
priv->hws[cp->common.clkid] = &cp->common.hw;
}
- for (i = 0; i < ARRAY_SIZE(th1520_div_clks); i++) {
- struct ccu_div *cd = hw_to_ccu_div(&th1520_div_clks[i]->hw);
+ for (i = 0; i < plat_data->nr_div_clks; i++) {
+ struct ccu_div *cd = hw_to_ccu_div(&plat_data->th1520_div_clks[i]->hw);
- th1520_div_clks[i]->map = map;
+ plat_data->th1520_div_clks[i]->map = map;
- ret = devm_clk_hw_register(dev, &th1520_div_clks[i]->hw);
+ ret = devm_clk_hw_register(dev, &plat_data->th1520_div_clks[i]->hw);
if (ret)
return ret;
priv->hws[cd->common.clkid] = &cd->common.hw;
}
- for (i = 0; i < ARRAY_SIZE(th1520_mux_clks); i++) {
- struct ccu_mux *cm = hw_to_ccu_mux(&th1520_mux_clks[i]->hw);
- const struct clk_init_data *init = cm->common.hw.init;
-
- th1520_mux_clks[i]->map = map;
- hw = devm_clk_hw_register_mux_parent_data_table(dev,
- init->name,
- init->parent_data,
- init->num_parents,
- 0,
- base + cm->common.cfg0,
- cm->mux.shift,
- cm->mux.width,
- 0, NULL, NULL);
- if (IS_ERR(hw))
- return PTR_ERR(hw);
-
- priv->hws[cm->common.clkid] = hw;
+ for (i = 0; i < plat_data->nr_mux_clks; i++) {
+ struct ccu_mux *cm = plat_data->th1520_mux_clks[i];
+
+ cm->mux.reg = base + cm->reg;
+
+ ret = devm_clk_hw_register(dev, &cm->mux.hw);
+ if (ret)
+ return ret;
+
+ priv->hws[cm->clkid] = &cm->mux.hw;
}
- for (i = 0; i < ARRAY_SIZE(th1520_gate_clks); i++) {
- struct ccu_gate *cg = hw_to_ccu_gate(&th1520_gate_clks[i]->hw);
+ for (i = 0; i < plat_data->nr_gate_clks; i++) {
+ struct ccu_gate *cg = plat_data->th1520_gate_clks[i];
- th1520_gate_clks[i]->map = map;
+ cg->gate.reg = base + cg->reg;
- hw = devm_clk_hw_register_gate_parent_data(dev,
- cg->common.hw.init->name,
- cg->common.hw.init->parent_data,
- 0, base + cg->common.cfg0,
- ffs(cg->enable) - 1, 0, NULL);
- if (IS_ERR(hw))
- return PTR_ERR(hw);
+ ret = devm_clk_hw_register(dev, &cg->gate.hw);
+ if (ret)
+ return ret;
- priv->hws[cg->common.clkid] = hw;
+ priv->hws[cg->clkid] = &cg->gate.hw;
}
- ret = devm_clk_hw_register(dev, &osc12m_clk.hw);
- if (ret)
- return ret;
- priv->hws[CLK_OSC12M] = &osc12m_clk.hw;
+ if (plat_data == &th1520_ap_platdata) {
+ ret = devm_clk_hw_register(dev, &osc12m_clk.hw);
+ if (ret)
+ return ret;
+ priv->hws[CLK_OSC12M] = &osc12m_clk.hw;
- ret = devm_clk_hw_register(dev, &gmac_pll_clk_100m.hw);
- if (ret)
- return ret;
- priv->hws[CLK_PLL_GMAC_100M] = &gmac_pll_clk_100m.hw;
+ ret = devm_clk_hw_register(dev, &gmac_pll_clk_100m.hw);
+ if (ret)
+ return ret;
+ priv->hws[CLK_PLL_GMAC_100M] = &gmac_pll_clk_100m.hw;
+
+ ret = devm_clk_hw_register(dev, &emmc_sdio_ref_clk.hw);
+ if (ret)
+ return ret;
+ }
ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, priv);
if (ret)
@@ -1069,6 +1283,11 @@ static int th1520_clk_probe(struct platform_device *pdev)
static const struct of_device_id th1520_clk_match[] = {
{
.compatible = "thead,th1520-clk-ap",
+ .data = &th1520_ap_platdata,
+ },
+ {
+ .compatible = "thead,th1520-clk-vo",
+ .data = &th1520_vo_platdata,
},
{ /* sentinel */ },
};
diff --git a/drivers/clk/ti/autoidle.c b/drivers/clk/ti/autoidle.c
index 27e6b9cb1881..a99aaf2e7684 100644
--- a/drivers/clk/ti/autoidle.c
+++ b/drivers/clk/ti/autoidle.c
@@ -30,7 +30,7 @@ static LIST_HEAD(autoidle_clks);
/*
* we have some non-atomic read/write
- * operations behind it, so lets
+ * operations behind it, so let's
* take one lock for handling autoidle
* of all clocks
*/
diff --git a/drivers/clk/ti/clk-33xx.c b/drivers/clk/ti/clk-33xx.c
index 85c50ea39e6d..9269e6a0db6a 100644
--- a/drivers/clk/ti/clk-33xx.c
+++ b/drivers/clk/ti/clk-33xx.c
@@ -258,6 +258,8 @@ static const char *enable_init_clks[] = {
"dpll_ddr_m2_ck",
"dpll_mpu_m2_ck",
"l3_gclk",
+ /* WKUP_DEBUGSS_CLKCTRL - disable fails, AM335x Errata Advisory 1.0.42 */
+ "l3-aon-clkctrl:0000:0",
/* AM3_L3_L3_MAIN_CLKCTRL, needed during suspend */
"l3-clkctrl:00bc:0",
"l4hs_gclk",
diff --git a/drivers/clk/ti/clk-43xx.c b/drivers/clk/ti/clk-43xx.c
index f24f6eb2157a..35af3079c002 100644
--- a/drivers/clk/ti/clk-43xx.c
+++ b/drivers/clk/ti/clk-43xx.c
@@ -286,7 +286,7 @@ int __init am43xx_dt_clk_init(void)
/*
* cpsw_cpts_rft_clk has got the choice of 3 clocksources
* dpll_core_m4_ck, dpll_core_m5_ck and dpll_disp_m2_ck.
- * By default dpll_core_m4_ck is selected, witn this as clock
+ * By default dpll_core_m4_ck is selected, with this as clock
* source the CPTS doesnot work properly. It gives clockcheck errors
* while running PTP.
* clockcheck: clock jumped backward or running slower than expected!
diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c
index 0eab7f3e2eab..b02f84d49b96 100644
--- a/drivers/clk/ti/clk-dra7-atl.c
+++ b/drivers/clk/ti/clk-dra7-atl.c
@@ -120,16 +120,18 @@ static unsigned long atl_clk_recalc_rate(struct clk_hw *hw,
return parent_rate / cdesc->divider;
}
-static long atl_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int atl_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
unsigned divider;
- divider = (*parent_rate + rate / 2) / rate;
+ divider = (req->best_parent_rate + req->rate / 2) / req->rate;
if (divider > DRA7_ATL_DIVIDER_MASK + 1)
divider = DRA7_ATL_DIVIDER_MASK + 1;
- return *parent_rate / divider;
+ req->rate = req->best_parent_rate / divider;
+
+ return 0;
}
static int atl_clk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -156,7 +158,7 @@ static const struct clk_ops atl_clk_ops = {
.disable = atl_clk_disable,
.is_enabled = atl_clk_is_enabled,
.recalc_rate = atl_clk_recalc_rate,
- .round_rate = atl_clk_round_rate,
+ .determine_rate = atl_clk_determine_rate,
.set_rate = atl_clk_set_rate,
};
diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c
index f2117fef7c7d..693a4459a01b 100644
--- a/drivers/clk/ti/clk.c
+++ b/drivers/clk/ti/clk.c
@@ -118,13 +118,10 @@ int ti_clk_setup_ll_ops(struct ti_clk_ll_ops *ops)
* Eventually we could standardize to using '_' for clk-*.c files to follow the
* TRM naming.
*/
-static struct device_node *ti_find_clock_provider(struct device_node *from,
- const char *name)
+static struct device_node *ti_find_clock_provider(const char *name)
{
char *tmp __free(kfree) = NULL;
struct device_node *np;
- bool found = false;
- const char *n;
char *p;
tmp = kstrdup_and_replace(name, '-', '_', GFP_KERNEL);
@@ -137,25 +134,13 @@ static struct device_node *ti_find_clock_provider(struct device_node *from,
*p = '\0';
/* Node named "clock" with "clock-output-names" */
- for_each_of_allnodes_from(from, np) {
- if (of_property_read_string_index(np, "clock-output-names",
- 0, &n))
- continue;
-
- if (!strncmp(n, tmp, strlen(tmp))) {
- of_node_get(np);
- found = true;
- break;
- }
- }
-
- if (found) {
- of_node_put(from);
- return np;
+ for_each_node_with_property(np, "clock-output-names") {
+ if (of_property_match_string(np, "clock-output-names", tmp) == 0)
+ return np;
}
/* Fall back to using old node name base provider name */
- return of_find_node_by_name(from, tmp);
+ return of_find_node_by_name(NULL, tmp);
}
/**
@@ -208,7 +193,7 @@ void __init ti_dt_clocks_register(struct ti_dt_clk oclks[])
if (num_args && clkctrl_nodes_missing)
continue;
- node = ti_find_clock_provider(NULL, buf);
+ node = ti_find_clock_provider(buf);
if (num_args && compat_mode) {
parent = node;
child = of_get_child_by_name(parent, "clock");
@@ -449,10 +434,7 @@ void __init omap2_clk_legacy_provider_init(int index, void __iomem *mem)
{
struct clk_iomap *io;
- io = memblock_alloc(sizeof(*io), SMP_CACHE_BYTES);
- if (!io)
- panic("%s: Failed to allocate %zu bytes\n", __func__,
- sizeof(*io));
+ io = memblock_alloc_or_panic(sizeof(*io), SMP_CACHE_BYTES);
io->mem = mem;
diff --git a/drivers/clk/ti/clkt_dpll.c b/drivers/clk/ti/clkt_dpll.c
index dfaa4d1f0b64..2ecd66968af4 100644
--- a/drivers/clk/ti/clkt_dpll.c
+++ b/drivers/clk/ti/clkt_dpll.c
@@ -268,20 +268,18 @@ unsigned long omap2_get_dpll_rate(struct clk_hw_omap *clk)
/* DPLL rate rounding code */
/**
- * omap2_dpll_round_rate - round a target rate for an OMAP DPLL
+ * omap2_dpll_determine_rate - round a target rate for an OMAP DPLL
* @hw: struct clk_hw containing the struct clk * for a DPLL
- * @target_rate: desired DPLL clock rate
- * @parent_rate: parent's DPLL clock rate
+ * @req: rate request
*
* Given a DPLL and a desired target rate, round the target rate to a
* possible, programmable rate for this DPLL. Attempts to select the
* minimum possible n. Stores the computed (m, n) in the DPLL's
* dpll_data structure so set_rate() will not need to call this
- * (expensive) function again. Returns ~0 if the target rate cannot
- * be rounded, or the rounded rate upon success.
+ * (expensive) function again. Returns -EINVAL if the target rate
+ * cannot be rounded, or the rounded rate upon success.
*/
-long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate,
- unsigned long *parent_rate)
+int omap2_dpll_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
{
struct clk_hw_omap *clk = to_clk_hw_omap(hw);
int m, n, r, scaled_max_m;
@@ -295,19 +293,19 @@ long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate,
const char *clk_name;
if (!clk || !clk->dpll_data)
- return ~0;
+ return -EINVAL;
dd = clk->dpll_data;
- if (dd->max_rate && target_rate > dd->max_rate)
- target_rate = dd->max_rate;
+ if (dd->max_rate && req->rate > dd->max_rate)
+ req->rate = dd->max_rate;
ref_rate = clk_hw_get_rate(dd->clk_ref);
clk_name = clk_hw_get_name(hw);
pr_debug("clock: %s: starting DPLL round_rate, target rate %lu\n",
- clk_name, target_rate);
+ clk_name, req->rate);
- scaled_rt_rp = target_rate / (ref_rate / DPLL_SCALE_FACTOR);
+ scaled_rt_rp = req->rate / (ref_rate / DPLL_SCALE_FACTOR);
scaled_max_m = dd->max_multiplier * DPLL_SCALE_FACTOR;
dd->last_rounded_rate = 0;
@@ -332,7 +330,7 @@ long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate,
if (m > scaled_max_m)
break;
- r = _dpll_test_mult(&m, n, &new_rate, target_rate,
+ r = _dpll_test_mult(&m, n, &new_rate, req->rate,
ref_rate);
/* m can't be set low enough for this n - try with a larger n */
@@ -340,7 +338,7 @@ long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate,
continue;
/* skip rates above our target rate */
- delta = target_rate - new_rate;
+ delta = req->rate - new_rate;
if (delta < 0)
continue;
@@ -359,13 +357,15 @@ long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate,
if (prev_min_delta == LONG_MAX) {
pr_debug("clock: %s: cannot round to rate %lu\n",
- clk_name, target_rate);
- return ~0;
+ clk_name, req->rate);
+ return -EINVAL;
}
dd->last_rounded_m = min_delta_m;
dd->last_rounded_n = min_delta_n;
- dd->last_rounded_rate = target_rate - prev_min_delta;
+ dd->last_rounded_rate = req->rate - prev_min_delta;
- return dd->last_rounded_rate;
+ req->rate = dd->last_rounded_rate;
+
+ return 0;
}
diff --git a/drivers/clk/ti/clock.h b/drivers/clk/ti/clock.h
index 2de7acea1ea0..d5e24fe4ae3a 100644
--- a/drivers/clk/ti/clock.h
+++ b/drivers/clk/ti/clock.h
@@ -273,8 +273,7 @@ int omap3_noncore_dpll_set_rate_and_parent(struct clk_hw *hw,
u8 index);
int omap3_noncore_dpll_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req);
-long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate,
- unsigned long *parent_rate);
+int omap2_dpll_determine_rate(struct clk_hw *hw, struct clk_rate_request *req);
unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw,
unsigned long parent_rate);
@@ -296,9 +295,6 @@ void omap3_clk_lock_dpll5(void);
unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw,
unsigned long parent_rate);
-long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw,
- unsigned long target_rate,
- unsigned long *parent_rate);
int omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req);
int omap2_clk_for_each(int (*fn)(struct clk_hw_omap *hw));
diff --git a/drivers/clk/ti/composite.c b/drivers/clk/ti/composite.c
index b85382c370f7..8cba259188d4 100644
--- a/drivers/clk/ti/composite.c
+++ b/drivers/clk/ti/composite.c
@@ -26,8 +26,8 @@ static unsigned long ti_composite_recalc_rate(struct clk_hw *hw,
return ti_clk_divider_ops.recalc_rate(hw, parent_rate);
}
-static long ti_composite_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int ti_composite_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
return -EINVAL;
}
@@ -40,7 +40,7 @@ static int ti_composite_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops ti_composite_divider_ops = {
.recalc_rate = &ti_composite_recalc_rate,
- .round_rate = &ti_composite_round_rate,
+ .determine_rate = &ti_composite_determine_rate,
.set_rate = &ti_composite_set_rate,
};
diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c
index ade99ab6cfa9..6f58a0f2e74a 100644
--- a/drivers/clk/ti/divider.c
+++ b/drivers/clk/ti/divider.c
@@ -223,13 +223,15 @@ static int ti_clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
return bestdiv;
}
-static long ti_clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int ti_clk_divider_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
int div;
- div = ti_clk_divider_bestdiv(hw, rate, prate);
+ div = ti_clk_divider_bestdiv(hw, req->rate, &req->best_parent_rate);
- return DIV_ROUND_UP(*prate, div);
+ req->rate = DIV_ROUND_UP(req->best_parent_rate, div);
+
+ return 0;
}
static int ti_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -299,7 +301,7 @@ static void clk_divider_restore_context(struct clk_hw *hw)
const struct clk_ops ti_clk_divider_ops = {
.recalc_rate = ti_clk_divider_recalc_rate,
- .round_rate = ti_clk_divider_round_rate,
+ .determine_rate = ti_clk_divider_determine_rate,
.set_rate = ti_clk_divider_set_rate,
.save_context = clk_divider_save_context,
.restore_context = clk_divider_restore_context,
diff --git a/drivers/clk/ti/dpll.c b/drivers/clk/ti/dpll.c
index 3386bd1903df..971adafd9a8b 100644
--- a/drivers/clk/ti/dpll.c
+++ b/drivers/clk/ti/dpll.c
@@ -25,7 +25,6 @@ static const struct clk_ops dpll_m4xen_ck_ops = {
.enable = &omap3_noncore_dpll_enable,
.disable = &omap3_noncore_dpll_disable,
.recalc_rate = &omap4_dpll_regm4xen_recalc,
- .round_rate = &omap4_dpll_regm4xen_round_rate,
.set_rate = &omap3_noncore_dpll_set_rate,
.set_parent = &omap3_noncore_dpll_set_parent,
.set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent,
@@ -48,7 +47,6 @@ static const struct clk_ops dpll_ck_ops = {
.enable = &omap3_noncore_dpll_enable,
.disable = &omap3_noncore_dpll_disable,
.recalc_rate = &omap3_dpll_recalc,
- .round_rate = &omap2_dpll_round_rate,
.set_rate = &omap3_noncore_dpll_set_rate,
.set_parent = &omap3_noncore_dpll_set_parent,
.set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent,
@@ -61,7 +59,6 @@ static const struct clk_ops dpll_ck_ops = {
static const struct clk_ops dpll_no_gate_ck_ops = {
.recalc_rate = &omap3_dpll_recalc,
.get_parent = &omap2_init_dpll_parent,
- .round_rate = &omap2_dpll_round_rate,
.set_rate = &omap3_noncore_dpll_set_rate,
.set_parent = &omap3_noncore_dpll_set_parent,
.set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent,
@@ -80,7 +77,7 @@ const struct clk_hw_omap_ops clkhwops_omap3_dpll = {};
static const struct clk_ops omap2_dpll_core_ck_ops = {
.get_parent = &omap2_init_dpll_parent,
.recalc_rate = &omap2_dpllcore_recalc,
- .round_rate = &omap2_dpll_round_rate,
+ .determine_rate = &omap2_dpll_determine_rate,
.set_rate = &omap2_reprogram_dpllcore,
};
#else
@@ -91,7 +88,7 @@ static const struct clk_ops omap2_dpll_core_ck_ops = {};
static const struct clk_ops omap3_dpll_core_ck_ops = {
.get_parent = &omap2_init_dpll_parent,
.recalc_rate = &omap3_dpll_recalc,
- .round_rate = &omap2_dpll_round_rate,
+ .determine_rate = &omap2_dpll_determine_rate,
};
static const struct clk_ops omap3_dpll_ck_ops = {
@@ -103,7 +100,6 @@ static const struct clk_ops omap3_dpll_ck_ops = {
.set_parent = &omap3_noncore_dpll_set_parent,
.set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent,
.determine_rate = &omap3_noncore_dpll_determine_rate,
- .round_rate = &omap2_dpll_round_rate,
};
static const struct clk_ops omap3_dpll5_ck_ops = {
@@ -115,7 +111,6 @@ static const struct clk_ops omap3_dpll5_ck_ops = {
.set_parent = &omap3_noncore_dpll_set_parent,
.set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent,
.determine_rate = &omap3_noncore_dpll_determine_rate,
- .round_rate = &omap2_dpll_round_rate,
};
static const struct clk_ops omap3_dpll_per_ck_ops = {
@@ -127,7 +122,6 @@ static const struct clk_ops omap3_dpll_per_ck_ops = {
.set_parent = &omap3_noncore_dpll_set_parent,
.set_rate_and_parent = &omap3_dpll4_set_rate_and_parent,
.determine_rate = &omap3_noncore_dpll_determine_rate,
- .round_rate = &omap2_dpll_round_rate,
};
#endif
diff --git a/drivers/clk/ti/dpll3xxx.c b/drivers/clk/ti/dpll3xxx.c
index 00680486b1bd..8c51b988a04f 100644
--- a/drivers/clk/ti/dpll3xxx.c
+++ b/drivers/clk/ti/dpll3xxx.c
@@ -587,6 +587,7 @@ int omap3_noncore_dpll_determine_rate(struct clk_hw *hw,
{
struct clk_hw_omap *clk = to_clk_hw_omap(hw);
struct dpll_data *dd;
+ int ret;
if (!req->rate)
return -EINVAL;
@@ -599,8 +600,10 @@ int omap3_noncore_dpll_determine_rate(struct clk_hw *hw,
(dd->modes & (1 << DPLL_LOW_POWER_BYPASS))) {
req->best_parent_hw = dd->clk_bypass;
} else {
- req->rate = omap2_dpll_round_rate(hw, req->rate,
- &req->best_parent_rate);
+ ret = omap2_dpll_determine_rate(hw, req);
+ if (ret != 0)
+ return ret;
+
req->best_parent_hw = dd->clk_ref;
}
diff --git a/drivers/clk/ti/dpll44xx.c b/drivers/clk/ti/dpll44xx.c
index 3fc2cab69a3f..08ed57f181b4 100644
--- a/drivers/clk/ti/dpll44xx.c
+++ b/drivers/clk/ti/dpll44xx.c
@@ -134,68 +134,13 @@ unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw,
}
/**
- * omap4_dpll_regm4xen_round_rate - round DPLL rate, considering REGM4XEN bit
- * @hw: struct hw_clk containing the struct clk * of the DPLL to round a rate for
- * @target_rate: the desired rate of the DPLL
- * @parent_rate: clock rate of the DPLL parent
- *
- * Compute the rate that would be programmed into the DPLL hardware
- * for @clk if set_rate() were to be provided with the rate
- * @target_rate. Takes the REGM4XEN bit into consideration, which is
- * needed for the OMAP4 ABE DPLL. Returns the rounded rate (before
- * M-dividers) upon success, -EINVAL if @clk is null or not a DPLL, or
- * ~0 if an error occurred in omap2_dpll_round_rate().
- */
-long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw,
- unsigned long target_rate,
- unsigned long *parent_rate)
-{
- struct clk_hw_omap *clk = to_clk_hw_omap(hw);
- struct dpll_data *dd;
- long r;
-
- if (!clk || !clk->dpll_data)
- return -EINVAL;
-
- dd = clk->dpll_data;
-
- dd->last_rounded_m4xen = 0;
-
- /*
- * First try to compute the DPLL configuration for
- * target rate without using the 4X multiplier.
- */
- r = omap2_dpll_round_rate(hw, target_rate, NULL);
- if (r != ~0)
- goto out;
-
- /*
- * If we did not find a valid DPLL configuration, try again, but
- * this time see if using the 4X multiplier can help. Enabling the
- * 4X multiplier is equivalent to dividing the target rate by 4.
- */
- r = omap2_dpll_round_rate(hw, target_rate / OMAP4430_REGM4XEN_MULT,
- NULL);
- if (r == ~0)
- return r;
-
- dd->last_rounded_rate *= OMAP4430_REGM4XEN_MULT;
- dd->last_rounded_m4xen = 1;
-
-out:
- omap4_dpll_lpmode_recalc(dd);
-
- return dd->last_rounded_rate;
-}
-
-/**
* omap4_dpll_regm4xen_determine_rate - determine rate for a DPLL
* @hw: pointer to the clock to determine rate for
* @req: target rate request
*
* Determines which DPLL mode to use for reaching a desired rate.
* Checks whether the DPLL shall be in bypass or locked mode, and if
- * locked, calculates the M,N values for the DPLL via round-rate.
+ * locked, calculates the M,N values for the DPLL.
* Returns 0 on success and a negative error value otherwise.
*/
int omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw,
@@ -215,8 +160,36 @@ int omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw,
(dd->modes & (1 << DPLL_LOW_POWER_BYPASS))) {
req->best_parent_hw = dd->clk_bypass;
} else {
- req->rate = omap4_dpll_regm4xen_round_rate(hw, req->rate,
- &req->best_parent_rate);
+ struct clk_rate_request tmp_req;
+ long r;
+
+ clk_hw_init_rate_request(hw, &tmp_req, req->rate);
+ dd->last_rounded_m4xen = 0;
+
+ /*
+ * First try to compute the DPLL configuration for
+ * target rate without using the 4X multiplier.
+ */
+
+ r = omap2_dpll_determine_rate(hw, &tmp_req);
+ if (r < 0) {
+ /*
+ * If we did not find a valid DPLL configuration, try again, but
+ * this time see if using the 4X multiplier can help. Enabling the
+ * 4X multiplier is equivalent to dividing the target rate by 4.
+ */
+ tmp_req.rate /= OMAP4430_REGM4XEN_MULT;
+ r = omap2_dpll_determine_rate(hw, &tmp_req);
+ if (r < 0)
+ return r;
+
+ dd->last_rounded_rate *= OMAP4430_REGM4XEN_MULT;
+ dd->last_rounded_m4xen = 1;
+ }
+
+ omap4_dpll_lpmode_recalc(dd);
+
+ req->rate = dd->last_rounded_rate;
req->best_parent_hw = dd->clk_ref;
}
diff --git a/drivers/clk/ti/fapll.c b/drivers/clk/ti/fapll.c
index 2db3fc4a443e..4f28138d2d8a 100644
--- a/drivers/clk/ti/fapll.c
+++ b/drivers/clk/ti/fapll.c
@@ -214,24 +214,27 @@ static int ti_fapll_set_div_mult(unsigned long rate,
return 0;
}
-static long ti_fapll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int ti_fapll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
u32 pre_div_p, mult_n;
int error;
- if (!rate)
+ if (!req->rate)
return -EINVAL;
- error = ti_fapll_set_div_mult(rate, *parent_rate,
+ error = ti_fapll_set_div_mult(req->rate, req->best_parent_rate,
&pre_div_p, &mult_n);
- if (error)
- return error;
+ if (error) {
+ req->rate = error;
- rate = *parent_rate / pre_div_p;
- rate *= mult_n;
+ return 0;
+ }
- return rate;
+ req->rate = req->best_parent_rate / pre_div_p;
+ req->rate *= mult_n;
+
+ return 0;
}
static int ti_fapll_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -268,7 +271,7 @@ static const struct clk_ops ti_fapll_ops = {
.is_enabled = ti_fapll_is_enabled,
.recalc_rate = ti_fapll_recalc_rate,
.get_parent = ti_fapll_get_parent,
- .round_rate = ti_fapll_round_rate,
+ .determine_rate = ti_fapll_determine_rate,
.set_rate = ti_fapll_set_rate,
};
@@ -399,14 +402,14 @@ static u32 ti_fapll_synth_set_frac_rate(struct fapll_synth *synth,
return post_div_m;
}
-static long ti_fapll_synth_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int ti_fapll_synth_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct fapll_synth *synth = to_synth(hw);
struct fapll_data *fd = synth->fd;
unsigned long r;
- if (ti_fapll_clock_is_bypass(fd) || !synth->div || !rate)
+ if (ti_fapll_clock_is_bypass(fd) || !synth->div || !req->rate)
return -EINVAL;
/* Only post divider m available with no fractional divider? */
@@ -414,23 +417,26 @@ static long ti_fapll_synth_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long frac_rate;
u32 synth_post_div_m;
- frac_rate = ti_fapll_synth_get_frac_rate(hw, *parent_rate);
- synth_post_div_m = DIV_ROUND_UP(frac_rate, rate);
+ frac_rate = ti_fapll_synth_get_frac_rate(hw,
+ req->best_parent_rate);
+ synth_post_div_m = DIV_ROUND_UP(frac_rate, req->rate);
r = DIV_ROUND_UP(frac_rate, synth_post_div_m);
goto out;
}
- r = *parent_rate * SYNTH_PHASE_K;
- if (rate > r)
+ r = req->best_parent_rate * SYNTH_PHASE_K;
+ if (req->rate > r)
goto out;
r = DIV_ROUND_UP_ULL(r, SYNTH_MAX_INT_DIV * SYNTH_MAX_DIV_M);
- if (rate < r)
+ if (req->rate < r)
goto out;
- r = rate;
+ r = req->rate;
out:
- return r;
+ req->rate = r;
+
+ return 0;
}
static int ti_fapll_synth_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -477,7 +483,7 @@ static const struct clk_ops ti_fapll_synt_ops = {
.disable = ti_fapll_synth_disable,
.is_enabled = ti_fapll_synth_is_enabled,
.recalc_rate = ti_fapll_synth_recalc_rate,
- .round_rate = ti_fapll_synth_round_rate,
+ .determine_rate = ti_fapll_synth_determine_rate,
.set_rate = ti_fapll_synth_set_rate,
};
diff --git a/drivers/clk/ti/mux.c b/drivers/clk/ti/mux.c
index 216d85d6aac6..d6d247ff2be5 100644
--- a/drivers/clk/ti/mux.c
+++ b/drivers/clk/ti/mux.c
@@ -84,7 +84,7 @@ static int ti_clk_mux_set_parent(struct clk_hw *hw, u8 index)
}
/**
- * clk_mux_save_context - Save the parent selcted in the mux
+ * clk_mux_save_context - Save the parent selected in the mux
* @hw: pointer struct clk_hw
*
* Save the parent mux value.
@@ -180,7 +180,7 @@ static void of_mux_clk_setup(struct device_node *node)
pr_err("mux-clock %pOFn must have parents\n", node);
return;
}
- parent_names = kzalloc((sizeof(char *) * num_parents), GFP_KERNEL);
+ parent_names = kcalloc(num_parents, sizeof(char *), GFP_KERNEL);
if (!parent_names)
goto cleanup;
diff --git a/drivers/clk/ux500/clk-prcmu.c b/drivers/clk/ux500/clk-prcmu.c
index 5cbf24c94606..f775e18acd46 100644
--- a/drivers/clk/ux500/clk-prcmu.c
+++ b/drivers/clk/ux500/clk-prcmu.c
@@ -53,11 +53,13 @@ static unsigned long clk_prcmu_recalc_rate(struct clk_hw *hw,
return prcmu_clock_rate(clk->cg_sel);
}
-static long clk_prcmu_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_prcmu_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_prcmu *clk = to_clk_prcmu(hw);
- return prcmu_round_clock_rate(clk->cg_sel, rate);
+ req->rate = prcmu_round_clock_rate(clk->cg_sel, req->rate);
+
+ return 0;
}
static int clk_prcmu_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -157,7 +159,7 @@ static const struct clk_ops clk_prcmu_scalable_ops = {
.prepare = clk_prcmu_prepare,
.unprepare = clk_prcmu_unprepare,
.recalc_rate = clk_prcmu_recalc_rate,
- .round_rate = clk_prcmu_round_rate,
+ .determine_rate = clk_prcmu_determine_rate,
.set_rate = clk_prcmu_set_rate,
};
@@ -169,7 +171,7 @@ static const struct clk_ops clk_prcmu_gate_ops = {
static const struct clk_ops clk_prcmu_scalable_rate_ops = {
.recalc_rate = clk_prcmu_recalc_rate,
- .round_rate = clk_prcmu_round_rate,
+ .determine_rate = clk_prcmu_determine_rate,
.set_rate = clk_prcmu_set_rate,
};
@@ -187,7 +189,7 @@ static const struct clk_ops clk_prcmu_opp_volt_scalable_ops = {
.prepare = clk_prcmu_opp_volt_prepare,
.unprepare = clk_prcmu_opp_volt_unprepare,
.recalc_rate = clk_prcmu_recalc_rate,
- .round_rate = clk_prcmu_round_rate,
+ .determine_rate = clk_prcmu_determine_rate,
.set_rate = clk_prcmu_set_rate,
};
diff --git a/drivers/clk/versatile/clk-icst.c b/drivers/clk/versatile/clk-icst.c
index d5cb372f0901..86ca04ad9fab 100644
--- a/drivers/clk/versatile/clk-icst.c
+++ b/drivers/clk/versatile/clk-icst.c
@@ -194,7 +194,7 @@ static int vco_set(struct clk_icst *icst, struct icst_vco vco)
pr_err("ICST error: tried to use RDW != 22\n");
break;
default:
- /* Regular auxilary oscillator */
+ /* Regular auxiliary oscillator */
mask = VERSATILE_AUX_OSC_BITS;
val = vco.v | (vco.r << 9) | (vco.s << 16);
break;
@@ -234,39 +234,51 @@ static unsigned long icst_recalc_rate(struct clk_hw *hw,
return icst->rate;
}
-static long icst_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int icst_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_icst *icst = to_icst(hw);
struct icst_vco vco;
if (icst->ctype == ICST_INTEGRATOR_AP_CM ||
icst->ctype == ICST_INTEGRATOR_CP_CM_CORE) {
- if (rate <= 12000000)
- return 12000000;
- if (rate >= 160000000)
- return 160000000;
- /* Slam to closest megahertz */
- return DIV_ROUND_CLOSEST(rate, 1000000) * 1000000;
+ if (req->rate <= 12000000)
+ req->rate = 12000000;
+ else if (req->rate >= 160000000)
+ req->rate = 160000000;
+ else {
+ /* Slam to closest megahertz */
+ req->rate = DIV_ROUND_CLOSEST(req->rate, 1000000) * 1000000;
+ }
+
+ return 0;
}
if (icst->ctype == ICST_INTEGRATOR_CP_CM_MEM) {
- if (rate <= 6000000)
- return 6000000;
- if (rate >= 66000000)
- return 66000000;
- /* Slam to closest 0.5 megahertz */
- return DIV_ROUND_CLOSEST(rate, 500000) * 500000;
+ if (req->rate <= 6000000)
+ req->rate = 6000000;
+ else if (req->rate >= 66000000)
+ req->rate = 66000000;
+ else {
+ /* Slam to closest 0.5 megahertz */
+ req->rate = DIV_ROUND_CLOSEST(req->rate, 500000) * 500000;
+ }
+
+ return 0;
}
if (icst->ctype == ICST_INTEGRATOR_AP_SYS) {
/* Divides between 3 and 50 MHz in steps of 0.25 MHz */
- if (rate <= 3000000)
- return 3000000;
- if (rate >= 50000000)
- return 5000000;
- /* Slam to closest 0.25 MHz */
- return DIV_ROUND_CLOSEST(rate, 250000) * 250000;
+ if (req->rate <= 3000000)
+ req->rate = 3000000;
+ else if (req->rate >= 50000000)
+ req->rate = 5000000;
+ else {
+ /* Slam to closest 0.25 MHz */
+ req->rate = DIV_ROUND_CLOSEST(req->rate, 250000) * 250000;
+ }
+
+ return 0;
}
if (icst->ctype == ICST_INTEGRATOR_AP_PCI) {
@@ -274,14 +286,20 @@ static long icst_round_rate(struct clk_hw *hw, unsigned long rate,
* If we're below or less than halfway from 25 to 33 MHz
* select 25 MHz
*/
- if (rate <= 25000000 || rate < 29000000)
- return 25000000;
- /* Else just return the default frequency */
- return 33000000;
+ if (req->rate <= 25000000 || req->rate < 29000000)
+ req->rate = 25000000;
+ else {
+ /* Else just return the default frequency */
+ req->rate = 33000000;
+ }
+
+ return 0;
}
- vco = icst_hz_to_vco(icst->params, rate);
- return icst_hz(icst->params, vco);
+ vco = icst_hz_to_vco(icst->params, req->rate);
+ req->rate = icst_hz(icst->params, vco);
+
+ return 0;
}
static int icst_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -329,7 +347,7 @@ static int icst_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops icst_ops = {
.recalc_rate = icst_recalc_rate,
- .round_rate = icst_round_rate,
+ .determine_rate = icst_determine_rate,
.set_rate = icst_set_rate,
};
diff --git a/drivers/clk/versatile/clk-vexpress-osc.c b/drivers/clk/versatile/clk-vexpress-osc.c
index c385ca2f4a74..9adbf5c33bd1 100644
--- a/drivers/clk/versatile/clk-vexpress-osc.c
+++ b/drivers/clk/versatile/clk-vexpress-osc.c
@@ -33,18 +33,18 @@ static unsigned long vexpress_osc_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long vexpress_osc_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int vexpress_osc_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct vexpress_osc *osc = to_vexpress_osc(hw);
- if (osc->rate_min && rate < osc->rate_min)
- rate = osc->rate_min;
+ if (osc->rate_min && req->rate < osc->rate_min)
+ req->rate = osc->rate_min;
- if (osc->rate_max && rate > osc->rate_max)
- rate = osc->rate_max;
+ if (osc->rate_max && req->rate > osc->rate_max)
+ req->rate = osc->rate_max;
- return rate;
+ return 0;
}
static int vexpress_osc_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -57,7 +57,7 @@ static int vexpress_osc_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops vexpress_osc_ops = {
.recalc_rate = vexpress_osc_recalc_rate,
- .round_rate = vexpress_osc_round_rate,
+ .determine_rate = vexpress_osc_determine_rate,
.set_rate = vexpress_osc_set_rate,
};
diff --git a/drivers/clk/visconti/pll.c b/drivers/clk/visconti/pll.c
index 3f929cf8dd2f..681721d85032 100644
--- a/drivers/clk/visconti/pll.c
+++ b/drivers/clk/visconti/pll.c
@@ -100,20 +100,25 @@ static unsigned long visconti_get_pll_rate_from_data(struct visconti_pll *pll,
return rate_table[0].rate;
}
-static long visconti_pll_round_rate(struct clk_hw *hw,
- unsigned long rate, unsigned long *prate)
+static int visconti_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct visconti_pll *pll = to_visconti_pll(hw);
const struct visconti_pll_rate_table *rate_table = pll->rate_table;
int i;
- /* Assumming rate_table is in descending order */
+ /* Assuming rate_table is in descending order */
for (i = 0; i < pll->rate_count; i++)
- if (rate >= rate_table[i].rate)
- return rate_table[i].rate;
+ if (req->rate >= rate_table[i].rate) {
+ req->rate = rate_table[i].rate;
+
+ return 0;
+ }
/* return minimum supported value */
- return rate_table[i - 1].rate;
+ req->rate = rate_table[i - 1].rate;
+
+ return 0;
}
static unsigned long visconti_pll_recalc_rate(struct clk_hw *hw,
@@ -232,7 +237,7 @@ static const struct clk_ops visconti_pll_ops = {
.enable = visconti_pll_enable,
.disable = visconti_pll_disable,
.is_enabled = visconti_pll_is_enabled,
- .round_rate = visconti_pll_round_rate,
+ .determine_rate = visconti_pll_determine_rate,
.recalc_rate = visconti_pll_recalc_rate,
.set_rate = visconti_pll_set_rate,
};
diff --git a/drivers/clk/x86/clk-cgu.c b/drivers/clk/x86/clk-cgu.c
index 89b53f280aee..d099667355f8 100644
--- a/drivers/clk/x86/clk-cgu.c
+++ b/drivers/clk/x86/clk-cgu.c
@@ -132,14 +132,15 @@ lgm_clk_divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
divider->flags, divider->width);
}
-static long
-lgm_clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int lgm_clk_divider_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct lgm_clk_divider *divider = to_lgm_clk_divider(hw);
- return divider_round_rate(hw, rate, prate, divider->table,
- divider->width, divider->flags);
+ req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate, divider->table,
+ divider->width, divider->flags);
+
+ return 0;
}
static int
@@ -182,7 +183,7 @@ static void lgm_clk_divider_disable(struct clk_hw *hw)
static const struct clk_ops lgm_clk_divider_ops = {
.recalc_rate = lgm_clk_divider_recalc_rate,
- .round_rate = lgm_clk_divider_round_rate,
+ .determine_rate = lgm_clk_divider_determine_rate,
.set_rate = lgm_clk_divider_set_rate,
.enable = lgm_clk_divider_enable,
.disable = lgm_clk_divider_disable,
@@ -487,15 +488,14 @@ lgm_clk_ddiv_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
-static long
-lgm_clk_ddiv_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int lgm_clk_ddiv_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
u32 div, ddiv1, ddiv2;
u64 rate64;
- div = DIV_ROUND_CLOSEST_ULL((u64)*prate, rate);
+ div = DIV_ROUND_CLOSEST_ULL((u64)req->best_parent_rate, req->rate);
/* if predivide bit is enabled, modify div by factor of 2.5 */
if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) {
@@ -503,14 +503,17 @@ lgm_clk_ddiv_round_rate(struct clk_hw *hw, unsigned long rate,
div = DIV_ROUND_CLOSEST_ULL((u64)div, 5);
}
- if (div <= 0)
- return *prate;
+ if (div <= 0) {
+ req->rate = req->best_parent_rate;
+
+ return 0;
+ }
if (lgm_clk_get_ddiv_val(div, &ddiv1, &ddiv2) != 0)
if (lgm_clk_get_ddiv_val(div + 1, &ddiv1, &ddiv2) != 0)
return -EINVAL;
- rate64 = *prate;
+ rate64 = req->best_parent_rate;
do_div(rate64, ddiv1);
do_div(rate64, ddiv2);
@@ -520,7 +523,9 @@ lgm_clk_ddiv_round_rate(struct clk_hw *hw, unsigned long rate,
rate64 = DIV_ROUND_CLOSEST_ULL(rate64, 5);
}
- return rate64;
+ req->rate = rate64;
+
+ return 0;
}
static const struct clk_ops lgm_clk_ddiv_ops = {
@@ -528,7 +533,7 @@ static const struct clk_ops lgm_clk_ddiv_ops = {
.enable = lgm_clk_ddiv_enable,
.disable = lgm_clk_ddiv_disable,
.set_rate = lgm_clk_ddiv_set_rate,
- .round_rate = lgm_clk_ddiv_round_rate,
+ .determine_rate = lgm_clk_ddiv_determine_rate,
};
int lgm_clk_register_ddiv(struct lgm_clk_provider *ctx,
diff --git a/drivers/clk/xilinx/clk-xlnx-clock-wizard.c b/drivers/clk/xilinx/clk-xlnx-clock-wizard.c
index 7a0269bdfbb3..4a0136349f71 100644
--- a/drivers/clk/xilinx/clk-xlnx-clock-wizard.c
+++ b/drivers/clk/xilinx/clk-xlnx-clock-wizard.c
@@ -17,6 +17,7 @@
#include <linux/of.h>
#include <linux/math64.h>
#include <linux/module.h>
+#include <linux/overflow.h>
#include <linux/err.h>
#include <linux/iopoll.h>
@@ -51,6 +52,8 @@
#define WZRD_CLKFBOUT_MULT_SHIFT 8
#define WZRD_CLKFBOUT_MULT_MASK (0xff << WZRD_CLKFBOUT_MULT_SHIFT)
+#define WZRD_CLKFBOUT_MULT_FRAC_MASK GENMASK(25, 16)
+#define WZRD_CLKFBOUT_O_MASK GENMASK(7, 0)
#define WZRD_CLKFBOUT_L_SHIFT 0
#define WZRD_CLKFBOUT_H_SHIFT 8
#define WZRD_CLKFBOUT_L_MASK GENMASK(7, 0)
@@ -86,14 +89,14 @@
#define DIV_O 0x01
#define DIV_ALL 0x03
-#define WZRD_M_MIN 2
-#define WZRD_M_MAX 128
-#define WZRD_D_MIN 1
-#define WZRD_D_MAX 106
-#define WZRD_VCO_MIN 800000000
-#define WZRD_VCO_MAX 1600000000
-#define WZRD_O_MIN 1
-#define WZRD_O_MAX 128
+#define WZRD_M_MIN 2ULL
+#define WZRD_M_MAX 128ULL
+#define WZRD_D_MIN 1ULL
+#define WZRD_D_MAX 106ULL
+#define WZRD_VCO_MIN 800000000ULL
+#define WZRD_VCO_MAX 1600000000ULL
+#define WZRD_O_MIN 2ULL
+#define WZRD_O_MAX 128ULL
#define VER_WZRD_M_MIN 4
#define VER_WZRD_M_MAX 432
#define VER_WZRD_D_MIN 1
@@ -121,26 +124,24 @@ enum clk_wzrd_int_clks {
/**
* struct clk_wzrd - Clock wizard private data structure
*
- * @clk_data: Clock data
* @nb: Notifier block
* @base: Memory base
* @clk_in1: Handle to input clock 'clk_in1'
* @axi_clk: Handle to input clock 's_axi_aclk'
* @clks_internal: Internal clocks
- * @clkout: Output clocks
* @speed_grade: Speed grade of the device
* @suspended: Flag indicating power state of the device
+ * @clk_data: Output clock data
*/
struct clk_wzrd {
- struct clk_onecell_data clk_data;
struct notifier_block nb;
void __iomem *base;
struct clk *clk_in1;
struct clk *axi_clk;
- struct clk *clks_internal[wzrd_clk_int_max];
- struct clk *clkout[WZRD_NUM_OUTPUTS];
+ struct clk_hw *clks_internal[wzrd_clk_int_max];
unsigned int speed_grade;
bool suspended;
+ struct clk_hw_onecell_data clk_data;
};
/**
@@ -154,8 +155,10 @@ struct clk_wzrd {
* @flags: clk_wzrd divider flags
* @table: array of value/divider pairs, last entry should have div = 0
* @m: value of the multiplier
+ * @m_frac: fractional value of the multiplier
* @d: value of the common divider
* @o: value of the leaf divider
+ * @o_frac: value of the fractional leaf divider
* @lock: register lock
*/
struct clk_wzrd_divider {
@@ -167,8 +170,10 @@ struct clk_wzrd_divider {
u8 flags;
const struct clk_div_table *table;
u32 m;
+ u32 m_frac;
u32 d;
u32 o;
+ u32 o_frac;
spinlock_t *lock; /* divider lock */
};
@@ -317,8 +322,8 @@ err_reconfig:
return err;
}
-static long clk_wzrd_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_wzrd_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
u8 div;
@@ -326,16 +331,18 @@ static long clk_wzrd_round_rate(struct clk_hw *hw, unsigned long rate,
* since we don't change parent rate we just round rate to closest
* achievable
*/
- div = DIV_ROUND_CLOSEST(*prate, rate);
+ div = DIV_ROUND_CLOSEST(req->best_parent_rate, req->rate);
- return *prate / div;
+ req->rate = req->best_parent_rate / div;
+
+ return 0;
}
static int clk_wzrd_get_divisors_ver(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
- u64 vco_freq, freq, diff, vcomin, vcomax;
+ u64 vco_freq, freq, diff, vcomin, vcomax, best_diff = -1ULL;
u32 m, d, o;
u32 mmin, mmax, dmin, dmax, omin, omax;
@@ -351,60 +358,66 @@ static int clk_wzrd_get_divisors_ver(struct clk_hw *hw, unsigned long rate,
for (m = mmin; m <= mmax; m++) {
for (d = dmin; d <= dmax; d++) {
vco_freq = DIV_ROUND_CLOSEST((parent_rate * m), d);
- if (vco_freq >= vcomin && vco_freq <= vcomax) {
- for (o = omin; o <= omax; o++) {
- freq = DIV_ROUND_CLOSEST_ULL(vco_freq, o);
- diff = abs(freq - rate);
-
- if (diff < WZRD_MIN_ERR) {
- divider->m = m;
- divider->d = d;
- divider->o = o;
- return 0;
- }
- }
+ if (vco_freq < vcomin || vco_freq > vcomax)
+ continue;
+
+ o = DIV_ROUND_CLOSEST_ULL(vco_freq, rate);
+ if (o < omin || o > omax)
+ continue;
+ freq = DIV_ROUND_CLOSEST_ULL(vco_freq, o);
+ diff = abs(freq - rate);
+
+ if (diff < best_diff) {
+ best_diff = diff;
+ divider->m = m;
+ divider->d = d;
+ divider->o = o;
+ if (!diff)
+ return 0;
}
}
}
- return -EBUSY;
+ return 0;
}
static int clk_wzrd_get_divisors(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
- u64 vco_freq, freq, diff, vcomin, vcomax;
- u32 m, d, o;
- u32 mmin, mmax, dmin, dmax, omin, omax;
+ u64 vco_freq, freq, diff, vcomin, vcomax, best_diff = -1ULL;
+ u64 m, d, o;
+ u64 mmin, mmax, dmin, dmax, omin, omax, mdmin, mdmax;
- mmin = WZRD_M_MIN;
- mmax = WZRD_M_MAX;
+ mmin = WZRD_M_MIN << 3;
+ mmax = WZRD_M_MAX << 3;
dmin = WZRD_D_MIN;
dmax = WZRD_D_MAX;
- omin = WZRD_O_MIN;
- omax = WZRD_O_MAX;
- vcomin = WZRD_VCO_MIN;
- vcomax = WZRD_VCO_MAX;
+ omin = WZRD_O_MIN << 3;
+ omax = WZRD_O_MAX << 3;
+ vcomin = WZRD_VCO_MIN << 3;
+ vcomax = WZRD_VCO_MAX << 3;
for (m = mmin; m <= mmax; m++) {
- for (d = dmin; d <= dmax; d++) {
- vco_freq = DIV_ROUND_CLOSEST((parent_rate * m), d);
- if (vco_freq >= vcomin && vco_freq <= vcomax) {
- for (o = omin; o <= omax; o++) {
- freq = DIV_ROUND_CLOSEST_ULL(vco_freq, o);
- diff = abs(freq - rate);
-
- if (diff < WZRD_MIN_ERR) {
- divider->m = m;
- divider->d = d;
- divider->o = o;
- return 0;
- }
- }
+ mdmin = max(dmin, div64_u64(parent_rate * m + vcomax / 2, vcomax));
+ mdmax = min(dmax, div64_u64(parent_rate * m + vcomin / 2, vcomin));
+ for (d = mdmin; d <= mdmax; d++) {
+ vco_freq = DIV_ROUND_CLOSEST_ULL((parent_rate * m), d);
+ o = DIV_ROUND_CLOSEST_ULL(vco_freq, rate);
+ if (o < omin || o > omax)
+ continue;
+ freq = DIV_ROUND_CLOSEST_ULL(vco_freq, o);
+ diff = freq - rate;
+ if (diff < best_diff) {
+ best_diff = diff;
+ divider->m = m >> 3;
+ divider->m_frac = (m - (divider->m << 3)) * 125;
+ divider->d = d;
+ divider->o = o >> 3;
+ divider->o_frac = (o - (divider->o << 3)) * 125;
}
}
}
- return -EBUSY;
+ return best_diff < WZRD_MIN_ERR ? 0 : -EBUSY;
}
static int clk_wzrd_reconfig(struct clk_wzrd_divider *divider, void __iomem *div_addr)
@@ -497,33 +510,22 @@ static int clk_wzrd_dynamic_all_nolock(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
- unsigned long vco_freq, rate_div, clockout0_div;
void __iomem *div_addr;
- u32 reg, pre, f;
+ u32 reg;
int err;
err = clk_wzrd_get_divisors(hw, rate, parent_rate);
if (err)
return err;
- vco_freq = DIV_ROUND_CLOSEST(parent_rate * divider->m, divider->d);
- rate_div = DIV_ROUND_CLOSEST_ULL((vco_freq * WZRD_FRAC_POINTS), rate);
-
- clockout0_div = div_u64(rate_div, WZRD_FRAC_POINTS);
-
- pre = DIV_ROUND_CLOSEST_ULL(vco_freq * WZRD_FRAC_POINTS, rate);
- f = (pre - (clockout0_div * WZRD_FRAC_POINTS));
- f &= WZRD_CLKOUT_FRAC_MASK;
-
- reg = FIELD_PREP(WZRD_CLKOUT_DIVIDE_MASK, clockout0_div) |
- FIELD_PREP(WZRD_CLKOUT0_FRAC_MASK, f);
+ reg = FIELD_PREP(WZRD_CLKOUT_DIVIDE_MASK, divider->o) |
+ FIELD_PREP(WZRD_CLKOUT0_FRAC_MASK, divider->o_frac);
writel(reg, divider->base + WZRD_CLK_CFG_REG(0, 2));
- /* Set divisor and clear phase offset */
reg = FIELD_PREP(WZRD_CLKFBOUT_MULT_MASK, divider->m) |
+ FIELD_PREP(WZRD_CLKFBOUT_MULT_FRAC_MASK, divider->m_frac) |
FIELD_PREP(WZRD_DIVCLK_DIVIDE_MASK, divider->d);
writel(reg, divider->base + WZRD_CLK_CFG_REG(0, 0));
- writel(divider->o, divider->base + WZRD_CLK_CFG_REG(0, 2));
writel(0, divider->base + WZRD_CLK_CFG_REG(0, 3));
div_addr = divider->base + WZRD_DR_INIT_REG_OFFSET;
return clk_wzrd_reconfig(divider, div_addr);
@@ -565,18 +567,19 @@ static unsigned long clk_wzrd_recalc_rate_all(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
- u32 m, d, o, div, reg, f;
+ u32 m, d, o, reg, f, mf;
+ u64 mul;
reg = readl(divider->base + WZRD_CLK_CFG_REG(0, 0));
d = FIELD_GET(WZRD_DIVCLK_DIVIDE_MASK, reg);
m = FIELD_GET(WZRD_CLKFBOUT_MULT_MASK, reg);
+ mf = FIELD_GET(WZRD_CLKFBOUT_MULT_FRAC_MASK, reg);
reg = readl(divider->base + WZRD_CLK_CFG_REG(0, 2));
o = FIELD_GET(WZRD_DIVCLK_DIVIDE_MASK, reg);
f = FIELD_GET(WZRD_CLKOUT0_FRAC_MASK, reg);
- div = DIV_ROUND_CLOSEST(d * (WZRD_FRAC_POINTS * o + f), WZRD_FRAC_POINTS);
- return divider_recalc_rate(hw, parent_rate * m, div, divider->table,
- divider->flags, divider->width);
+ mul = m * 1000 + mf;
+ return DIV_ROUND_CLOSEST_ULL(parent_rate * mul, d * (o * 1000 + f));
}
static unsigned long clk_wzrd_recalc_rate_all_ver(struct clk_hw *hw,
@@ -645,15 +648,35 @@ static unsigned long clk_wzrd_recalc_rate_all_ver(struct clk_hw *hw,
divider->flags, divider->width);
}
-static long clk_wzrd_round_rate_all(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_wzrd_determine_rate_all(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
+ u32 m, d, o;
+ int err;
+
+ err = clk_wzrd_get_divisors(hw, req->rate, req->best_parent_rate);
+ if (err)
+ return err;
+
+ m = divider->m;
+ d = divider->d;
+ o = divider->o;
+
+ req->rate = div_u64(req->best_parent_rate * (m * 1000 + divider->m_frac),
+ d * (o * 1000 + divider->o_frac));
+ return 0;
+}
+
+static int clk_wzrd_ver_determine_rate_all(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
unsigned long int_freq;
u32 m, d, o, div, f;
int err;
- err = clk_wzrd_get_divisors(hw, rate, *prate);
+ err = clk_wzrd_get_divisors_ver(hw, req->rate, req->best_parent_rate);
if (err)
return err;
@@ -662,36 +685,38 @@ static long clk_wzrd_round_rate_all(struct clk_hw *hw, unsigned long rate,
o = divider->o;
div = d * o;
- int_freq = divider_recalc_rate(hw, *prate * m, div, divider->table,
+ int_freq = divider_recalc_rate(hw, req->best_parent_rate * m, div,
+ divider->table,
divider->flags, divider->width);
- if (rate > int_freq) {
- f = DIV_ROUND_CLOSEST_ULL(rate * WZRD_FRAC_POINTS, int_freq);
- rate = DIV_ROUND_CLOSEST(int_freq * f, WZRD_FRAC_POINTS);
+ if (req->rate > int_freq) {
+ f = DIV_ROUND_CLOSEST_ULL(req->rate * WZRD_FRAC_POINTS,
+ int_freq);
+ req->rate = DIV_ROUND_CLOSEST(int_freq * f, WZRD_FRAC_POINTS);
}
- return rate;
+ return 0;
}
static const struct clk_ops clk_wzrd_ver_divider_ops = {
- .round_rate = clk_wzrd_round_rate,
+ .determine_rate = clk_wzrd_determine_rate,
.set_rate = clk_wzrd_ver_dynamic_reconfig,
.recalc_rate = clk_wzrd_recalc_rate_ver,
};
static const struct clk_ops clk_wzrd_ver_div_all_ops = {
- .round_rate = clk_wzrd_round_rate_all,
+ .determine_rate = clk_wzrd_ver_determine_rate_all,
.set_rate = clk_wzrd_dynamic_all_ver,
.recalc_rate = clk_wzrd_recalc_rate_all_ver,
};
static const struct clk_ops clk_wzrd_clk_divider_ops = {
- .round_rate = clk_wzrd_round_rate,
+ .determine_rate = clk_wzrd_determine_rate,
.set_rate = clk_wzrd_dynamic_reconfig,
.recalc_rate = clk_wzrd_recalc_rate,
};
static const struct clk_ops clk_wzrd_clk_div_all_ops = {
- .round_rate = clk_wzrd_round_rate_all,
+ .determine_rate = clk_wzrd_determine_rate_all,
.set_rate = clk_wzrd_dynamic_all,
.recalc_rate = clk_wzrd_recalc_rate_all,
};
@@ -753,19 +778,19 @@ static int clk_wzrd_dynamic_reconfig_f(struct clk_hw *hw, unsigned long rate,
WZRD_USEC_POLL, WZRD_TIMEOUT_POLL);
}
-static long clk_wzrd_round_rate_f(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_wzrd_determine_rate_f(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- return rate;
+ return 0;
}
static const struct clk_ops clk_wzrd_clk_divider_ops_f = {
- .round_rate = clk_wzrd_round_rate_f,
+ .determine_rate = clk_wzrd_determine_rate_f,
.set_rate = clk_wzrd_dynamic_reconfig_f,
.recalc_rate = clk_wzrd_recalc_ratef,
};
-static struct clk *clk_wzrd_register_divf(struct device *dev,
+static struct clk_hw *clk_wzrd_register_divf(struct device *dev,
const char *name,
const char *parent_name,
unsigned long flags,
@@ -805,10 +830,10 @@ static struct clk *clk_wzrd_register_divf(struct device *dev,
if (ret)
return ERR_PTR(ret);
- return hw->clk;
+ return hw;
}
-static struct clk *clk_wzrd_ver_register_divider(struct device *dev,
+static struct clk_hw *clk_wzrd_ver_register_divider(struct device *dev,
const char *name,
const char *parent_name,
unsigned long flags,
@@ -852,10 +877,10 @@ static struct clk *clk_wzrd_ver_register_divider(struct device *dev,
if (ret)
return ERR_PTR(ret);
- return hw->clk;
+ return hw;
}
-static struct clk *clk_wzrd_register_divider(struct device *dev,
+static struct clk_hw *clk_wzrd_register_divider(struct device *dev,
const char *name,
const char *parent_name,
unsigned long flags,
@@ -898,7 +923,7 @@ static struct clk *clk_wzrd_register_divider(struct device *dev,
if (ret)
return ERR_PTR(ret);
- return hw->clk;
+ return hw;
}
static int clk_wzrd_clk_notifier(struct notifier_block *nb, unsigned long event,
@@ -963,81 +988,30 @@ static const struct versal_clk_data versal_data = {
.is_versal = true,
};
-static int clk_wzrd_probe(struct platform_device *pdev)
+static int clk_wzrd_register_output_clocks(struct device *dev, int nr_outputs)
{
const char *clkout_name, *clk_name, *clk_mul_name;
+ struct clk_wzrd *clk_wzrd = dev_get_drvdata(dev);
u32 regl, regh, edge, regld, reghd, edged, div;
- struct device_node *np = pdev->dev.of_node;
const struct versal_clk_data *data;
- struct clk_wzrd *clk_wzrd;
unsigned long flags = 0;
+ bool is_versal = false;
void __iomem *ctrl_reg;
u32 reg, reg_f, mult;
- bool is_versal = false;
- unsigned long rate;
- int nr_outputs;
- int i, ret;
-
- clk_wzrd = devm_kzalloc(&pdev->dev, sizeof(*clk_wzrd), GFP_KERNEL);
- if (!clk_wzrd)
- return -ENOMEM;
- platform_set_drvdata(pdev, clk_wzrd);
-
- clk_wzrd->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(clk_wzrd->base))
- return PTR_ERR(clk_wzrd->base);
-
- ret = of_property_read_u32(np, "xlnx,speed-grade", &clk_wzrd->speed_grade);
- if (!ret) {
- if (clk_wzrd->speed_grade < 1 || clk_wzrd->speed_grade > 3) {
- dev_warn(&pdev->dev, "invalid speed grade '%d'\n",
- clk_wzrd->speed_grade);
- clk_wzrd->speed_grade = 0;
- }
- }
-
- clk_wzrd->clk_in1 = devm_clk_get(&pdev->dev, "clk_in1");
- if (IS_ERR(clk_wzrd->clk_in1))
- return dev_err_probe(&pdev->dev, PTR_ERR(clk_wzrd->clk_in1),
- "clk_in1 not found\n");
-
- clk_wzrd->axi_clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
- if (IS_ERR(clk_wzrd->axi_clk))
- return dev_err_probe(&pdev->dev, PTR_ERR(clk_wzrd->axi_clk),
- "s_axi_aclk not found\n");
- ret = clk_prepare_enable(clk_wzrd->axi_clk);
- if (ret) {
- dev_err(&pdev->dev, "enabling s_axi_aclk failed\n");
- return ret;
- }
- rate = clk_get_rate(clk_wzrd->axi_clk);
- if (rate > WZRD_ACLK_MAX_FREQ) {
- dev_err(&pdev->dev, "s_axi_aclk frequency (%lu) too high\n",
- rate);
- ret = -EINVAL;
- goto err_disable_clk;
- }
+ int i;
- data = device_get_match_data(&pdev->dev);
+ data = device_get_match_data(dev);
if (data)
is_versal = data->is_versal;
- ret = of_property_read_u32(np, "xlnx,nr-outputs", &nr_outputs);
- if (ret || nr_outputs > WZRD_NUM_OUTPUTS) {
- ret = -EINVAL;
- goto err_disable_clk;
- }
-
- clkout_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_out0", dev_name(&pdev->dev));
- if (!clkout_name) {
- ret = -ENOMEM;
- goto err_disable_clk;
- }
+ clkout_name = devm_kasprintf(dev, GFP_KERNEL, "%s_out0", dev_name(dev));
+ if (!clkout_name)
+ return -ENOMEM;
if (is_versal) {
if (nr_outputs == 1) {
- clk_wzrd->clkout[0] = clk_wzrd_ver_register_divider
- (&pdev->dev, clkout_name,
+ clk_wzrd->clk_data.hws[0] = clk_wzrd_ver_register_divider
+ (dev, clkout_name,
__clk_get_name(clk_wzrd->clk_in1), 0,
clk_wzrd->base, WZRD_CLK_CFG_REG(is_versal, 3),
WZRD_CLKOUT_DIVIDE_SHIFT,
@@ -1045,7 +1019,7 @@ static int clk_wzrd_probe(struct platform_device *pdev)
CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
DIV_ALL, &clkwzrd_lock);
- goto out;
+ return 0;
}
/* register multiplier */
edge = !!(readl(clk_wzrd->base + WZRD_CLK_CFG_REG(is_versal, 0)) &
@@ -1069,8 +1043,8 @@ static int clk_wzrd_probe(struct platform_device *pdev)
div = 64;
} else {
if (nr_outputs == 1) {
- clk_wzrd->clkout[0] = clk_wzrd_register_divider
- (&pdev->dev, clkout_name,
+ clk_wzrd->clk_data.hws[0] = clk_wzrd_register_divider
+ (dev, clkout_name,
__clk_get_name(clk_wzrd->clk_in1), 0,
clk_wzrd->base, WZRD_CLK_CFG_REG(is_versal, 3),
WZRD_CLKOUT_DIVIDE_SHIFT,
@@ -1078,7 +1052,7 @@ static int clk_wzrd_probe(struct platform_device *pdev)
CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
DIV_ALL, &clkwzrd_lock);
- goto out;
+ return 0;
}
reg = readl(clk_wzrd->base + WZRD_CLK_CFG_REG(is_versal, 0));
reg_f = reg & WZRD_CLKFBOUT_FRAC_MASK;
@@ -1089,26 +1063,21 @@ static int clk_wzrd_probe(struct platform_device *pdev)
mult = (reg * 1000) + reg_f;
div = 1000;
}
- clk_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_mul", dev_name(&pdev->dev));
- if (!clk_name) {
- ret = -ENOMEM;
- goto err_disable_clk;
- }
- clk_wzrd->clks_internal[wzrd_clk_mul] = clk_register_fixed_factor
- (&pdev->dev, clk_name,
+ clk_name = devm_kasprintf(dev, GFP_KERNEL, "%s_mul", dev_name(dev));
+ if (!clk_name)
+ return -ENOMEM;
+ clk_wzrd->clks_internal[wzrd_clk_mul] = devm_clk_hw_register_fixed_factor
+ (dev, clk_name,
__clk_get_name(clk_wzrd->clk_in1),
0, mult, div);
if (IS_ERR(clk_wzrd->clks_internal[wzrd_clk_mul])) {
- dev_err(&pdev->dev, "unable to register fixed-factor clock\n");
- ret = PTR_ERR(clk_wzrd->clks_internal[wzrd_clk_mul]);
- goto err_disable_clk;
+ dev_err(dev, "unable to register fixed-factor clock\n");
+ return PTR_ERR(clk_wzrd->clks_internal[wzrd_clk_mul]);
}
- clk_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_mul_div", dev_name(&pdev->dev));
- if (!clk_name) {
- ret = -ENOMEM;
- goto err_rm_int_clk;
- }
+ clk_name = devm_kasprintf(dev, GFP_KERNEL, "%s_mul_div", dev_name(dev));
+ if (!clk_name)
+ return -ENOMEM;
if (is_versal) {
edged = !!(readl(clk_wzrd->base + WZRD_CLK_CFG_REG(is_versal, 20)) &
@@ -1121,39 +1090,34 @@ static int clk_wzrd_probe(struct platform_device *pdev)
if (!div)
div = 1;
- clk_mul_name = __clk_get_name(clk_wzrd->clks_internal[wzrd_clk_mul]);
+ clk_mul_name = clk_hw_get_name(clk_wzrd->clks_internal[wzrd_clk_mul]);
clk_wzrd->clks_internal[wzrd_clk_mul_div] =
- clk_register_fixed_factor(&pdev->dev, clk_name,
- clk_mul_name, 0, 1, div);
+ devm_clk_hw_register_fixed_factor(dev, clk_name, clk_mul_name, 0, 1, div);
} else {
ctrl_reg = clk_wzrd->base + WZRD_CLK_CFG_REG(is_versal, 0);
- clk_wzrd->clks_internal[wzrd_clk_mul_div] = clk_register_divider
- (&pdev->dev, clk_name,
- __clk_get_name(clk_wzrd->clks_internal[wzrd_clk_mul]),
+ clk_wzrd->clks_internal[wzrd_clk_mul_div] = devm_clk_hw_register_divider
+ (dev, clk_name,
+ clk_hw_get_name(clk_wzrd->clks_internal[wzrd_clk_mul]),
flags, ctrl_reg, 0, 8, CLK_DIVIDER_ONE_BASED |
CLK_DIVIDER_ALLOW_ZERO, &clkwzrd_lock);
}
if (IS_ERR(clk_wzrd->clks_internal[wzrd_clk_mul_div])) {
- dev_err(&pdev->dev, "unable to register divider clock\n");
- ret = PTR_ERR(clk_wzrd->clks_internal[wzrd_clk_mul_div]);
- goto err_rm_int_clk;
+ dev_err(dev, "unable to register divider clock\n");
+ return PTR_ERR(clk_wzrd->clks_internal[wzrd_clk_mul_div]);
}
/* register div per output */
for (i = nr_outputs - 1; i >= 0 ; i--) {
- clkout_name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
- "%s_out%d", dev_name(&pdev->dev), i);
- if (!clkout_name) {
- ret = -ENOMEM;
- goto err_rm_int_clk;
- }
+ clkout_name = devm_kasprintf(dev, GFP_KERNEL, "%s_out%d", dev_name(dev), i);
+ if (!clkout_name)
+ return -ENOMEM;
if (is_versal) {
- clk_wzrd->clkout[i] = clk_wzrd_ver_register_divider
- (&pdev->dev,
+ clk_wzrd->clk_data.hws[i] = clk_wzrd_ver_register_divider
+ (dev,
clkout_name, clk_name, 0,
clk_wzrd->base,
- (WZRD_CLK_CFG_REG(is_versal, 3) + i * 8),
+ (WZRD_CLK_CFG_REG(is_versal, 2) + i * 8),
WZRD_CLKOUT_DIVIDE_SHIFT,
WZRD_CLKOUT_DIVIDE_WIDTH,
CLK_DIVIDER_ONE_BASED |
@@ -1161,84 +1125,108 @@ static int clk_wzrd_probe(struct platform_device *pdev)
DIV_O, &clkwzrd_lock);
} else {
if (!i)
- clk_wzrd->clkout[i] = clk_wzrd_register_divf
- (&pdev->dev, clkout_name, clk_name, flags, clk_wzrd->base,
+ clk_wzrd->clk_data.hws[i] = clk_wzrd_register_divf
+ (dev, clkout_name, clk_name, flags, clk_wzrd->base,
(WZRD_CLK_CFG_REG(is_versal, 2) + i * 12),
WZRD_CLKOUT_DIVIDE_SHIFT,
WZRD_CLKOUT_DIVIDE_WIDTH,
CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
DIV_O, &clkwzrd_lock);
else
- clk_wzrd->clkout[i] = clk_wzrd_register_divider
- (&pdev->dev, clkout_name, clk_name, 0, clk_wzrd->base,
+ clk_wzrd->clk_data.hws[i] = clk_wzrd_register_divider
+ (dev, clkout_name, clk_name, 0, clk_wzrd->base,
(WZRD_CLK_CFG_REG(is_versal, 2) + i * 12),
WZRD_CLKOUT_DIVIDE_SHIFT,
WZRD_CLKOUT_DIVIDE_WIDTH,
CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
DIV_O, &clkwzrd_lock);
}
- if (IS_ERR(clk_wzrd->clkout[i])) {
- int j;
-
- for (j = i + 1; j < nr_outputs; j++)
- clk_unregister(clk_wzrd->clkout[j]);
- dev_err(&pdev->dev,
- "unable to register divider clock\n");
- ret = PTR_ERR(clk_wzrd->clkout[i]);
- goto err_rm_int_clks;
+ if (IS_ERR(clk_wzrd->clk_data.hws[i])) {
+ dev_err(dev, "unable to register divider clock\n");
+ return PTR_ERR(clk_wzrd->clk_data.hws[i]);
}
}
-out:
- clk_wzrd->clk_data.clks = clk_wzrd->clkout;
- clk_wzrd->clk_data.clk_num = ARRAY_SIZE(clk_wzrd->clkout);
- of_clk_add_provider(np, of_clk_src_onecell_get, &clk_wzrd->clk_data);
+ return 0;
+}
- if (clk_wzrd->speed_grade) {
- clk_wzrd->nb.notifier_call = clk_wzrd_clk_notifier;
+static int clk_wzrd_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct clk_wzrd *clk_wzrd;
+ unsigned long rate;
+ int nr_outputs;
+ int ret;
- ret = clk_notifier_register(clk_wzrd->clk_in1,
- &clk_wzrd->nb);
- if (ret)
- dev_warn(&pdev->dev,
- "unable to register clock notifier\n");
+ ret = of_property_read_u32(np, "xlnx,nr-outputs", &nr_outputs);
+ if (ret || nr_outputs > WZRD_NUM_OUTPUTS)
+ return -EINVAL;
- ret = clk_notifier_register(clk_wzrd->axi_clk, &clk_wzrd->nb);
- if (ret)
- dev_warn(&pdev->dev,
- "unable to register clock notifier\n");
- }
+ clk_wzrd = devm_kzalloc(&pdev->dev, struct_size(clk_wzrd, clk_data.hws, nr_outputs),
+ GFP_KERNEL);
+ if (!clk_wzrd)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, clk_wzrd);
- return 0;
+ clk_wzrd->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(clk_wzrd->base))
+ return PTR_ERR(clk_wzrd->base);
-err_rm_int_clks:
- clk_unregister(clk_wzrd->clks_internal[1]);
-err_rm_int_clk:
- clk_unregister(clk_wzrd->clks_internal[0]);
-err_disable_clk:
- clk_disable_unprepare(clk_wzrd->axi_clk);
+ clk_wzrd->axi_clk = devm_clk_get_enabled(&pdev->dev, "s_axi_aclk");
+ if (IS_ERR(clk_wzrd->axi_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(clk_wzrd->axi_clk),
+ "s_axi_aclk not found\n");
+ rate = clk_get_rate(clk_wzrd->axi_clk);
+ if (rate > WZRD_ACLK_MAX_FREQ) {
+ dev_err(&pdev->dev, "s_axi_aclk frequency (%lu) too high\n", rate);
+ return -EINVAL;
+ }
- return ret;
-}
+ if (!of_property_present(np, "xlnx,static-config")) {
+ ret = of_property_read_u32(np, "xlnx,speed-grade", &clk_wzrd->speed_grade);
+ if (!ret) {
+ if (clk_wzrd->speed_grade < 1 || clk_wzrd->speed_grade > 3) {
+ dev_warn(&pdev->dev, "invalid speed grade '%d'\n",
+ clk_wzrd->speed_grade);
+ clk_wzrd->speed_grade = 0;
+ }
+ }
-static void clk_wzrd_remove(struct platform_device *pdev)
-{
- int i;
- struct clk_wzrd *clk_wzrd = platform_get_drvdata(pdev);
+ clk_wzrd->clk_in1 = devm_clk_get(&pdev->dev, "clk_in1");
+ if (IS_ERR(clk_wzrd->clk_in1))
+ return dev_err_probe(&pdev->dev, PTR_ERR(clk_wzrd->clk_in1),
+ "clk_in1 not found\n");
- of_clk_del_provider(pdev->dev.of_node);
+ ret = clk_wzrd_register_output_clocks(&pdev->dev, nr_outputs);
+ if (ret)
+ return ret;
+
+ clk_wzrd->clk_data.num = nr_outputs;
+ ret = devm_of_clk_add_hw_provider(&pdev->dev, of_clk_hw_onecell_get,
+ &clk_wzrd->clk_data);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to register clock provider\n");
+ return ret;
+ }
+
+ if (clk_wzrd->speed_grade) {
+ clk_wzrd->nb.notifier_call = clk_wzrd_clk_notifier;
- for (i = 0; i < WZRD_NUM_OUTPUTS; i++)
- clk_unregister(clk_wzrd->clkout[i]);
- for (i = 0; i < wzrd_clk_int_max; i++)
- clk_unregister(clk_wzrd->clks_internal[i]);
+ ret = devm_clk_notifier_register(&pdev->dev, clk_wzrd->clk_in1,
+ &clk_wzrd->nb);
+ if (ret)
+ dev_warn(&pdev->dev,
+ "unable to register clock notifier\n");
- if (clk_wzrd->speed_grade) {
- clk_notifier_unregister(clk_wzrd->axi_clk, &clk_wzrd->nb);
- clk_notifier_unregister(clk_wzrd->clk_in1, &clk_wzrd->nb);
+ ret = devm_clk_notifier_register(&pdev->dev, clk_wzrd->axi_clk,
+ &clk_wzrd->nb);
+ if (ret)
+ dev_warn(&pdev->dev,
+ "unable to register clock notifier\n");
+ }
}
- clk_disable_unprepare(clk_wzrd->axi_clk);
+ return 0;
}
static const struct of_device_id clk_wzrd_ids[] = {
@@ -1257,7 +1245,6 @@ static struct platform_driver clk_wzrd_driver = {
.pm = &clk_wzrd_dev_pm_ops,
},
.probe = clk_wzrd_probe,
- .remove = clk_wzrd_remove,
};
module_platform_driver(clk_wzrd_driver);
diff --git a/drivers/clk/xilinx/xlnx_vcu.c b/drivers/clk/xilinx/xlnx_vcu.c
index 81501b48412e..02699bc0f82c 100644
--- a/drivers/clk/xilinx/xlnx_vcu.c
+++ b/drivers/clk/xilinx/xlnx_vcu.c
@@ -11,6 +11,7 @@
#include <linux/clk-provider.h>
#include <linux/device.h>
#include <linux/errno.h>
+#include <linux/gpio/consumer.h>
#include <linux/io.h>
#include <linux/mfd/syscon.h>
#include <linux/mfd/syscon/xlnx-vcu.h>
@@ -51,6 +52,7 @@
* @dev: Platform device
* @pll_ref: pll ref clock source
* @aclk: axi clock source
+ * @reset_gpio: vcu reset gpio
* @logicore_reg_ba: logicore reg base address
* @vcu_slcr_ba: vcu_slcr Register base address
* @pll: handle for the VCU PLL
@@ -61,6 +63,7 @@ struct xvcu_device {
struct device *dev;
struct clk *pll_ref;
struct clk *aclk;
+ struct gpio_desc *reset_gpio;
struct regmap *logicore_reg_ba;
void __iomem *vcu_slcr_ba;
struct clk_hw *pll;
@@ -308,18 +311,21 @@ static int xvcu_pll_set_div(struct vcu_pll *pll, int div)
return 0;
}
-static long xvcu_pll_round_rate(struct clk_hw *hw,
- unsigned long rate, unsigned long *parent_rate)
+static int xvcu_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct vcu_pll *pll = to_vcu_pll(hw);
unsigned int feedback_div;
- rate = clamp_t(unsigned long, rate, pll->fvco_min, pll->fvco_max);
+ req->rate = clamp_t(unsigned long, req->rate, pll->fvco_min,
+ pll->fvco_max);
- feedback_div = DIV_ROUND_CLOSEST_ULL(rate, *parent_rate);
+ feedback_div = DIV_ROUND_CLOSEST_ULL(req->rate, req->best_parent_rate);
feedback_div = clamp_t(unsigned int, feedback_div, 25, 125);
- return *parent_rate * feedback_div;
+ req->rate = req->best_parent_rate * feedback_div;
+
+ return 0;
}
static unsigned long xvcu_pll_recalc_rate(struct clk_hw *hw,
@@ -391,7 +397,7 @@ static void xvcu_pll_disable(struct clk_hw *hw)
static const struct clk_ops vcu_pll_ops = {
.enable = xvcu_pll_enable,
.disable = xvcu_pll_disable,
- .round_rate = xvcu_pll_round_rate,
+ .determine_rate = xvcu_pll_determine_rate,
.recalc_rate = xvcu_pll_recalc_rate,
.set_rate = xvcu_pll_set_rate,
};
@@ -587,8 +593,8 @@ static void xvcu_unregister_clock_provider(struct xvcu_device *xvcu)
xvcu_clk_hw_unregister_leaf(hws[CLK_XVCU_ENC_MCU]);
if (!IS_ERR_OR_NULL(hws[CLK_XVCU_ENC_CORE]))
xvcu_clk_hw_unregister_leaf(hws[CLK_XVCU_ENC_CORE]);
-
- clk_hw_unregister_fixed_factor(xvcu->pll_post);
+ if (!IS_ERR_OR_NULL(xvcu->pll_post))
+ clk_hw_unregister_fixed_factor(xvcu->pll_post);
}
/**
@@ -676,6 +682,24 @@ static int xvcu_probe(struct platform_device *pdev)
* Bit 0 : Gasket isolation
* Bit 1 : put VCU out of reset
*/
+ xvcu->reset_gpio = devm_gpiod_get_optional(&pdev->dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(xvcu->reset_gpio)) {
+ ret = PTR_ERR(xvcu->reset_gpio);
+ dev_err_probe(&pdev->dev, ret, "failed to get reset gpio for vcu.\n");
+ goto error_get_gpio;
+ }
+
+ if (xvcu->reset_gpio) {
+ gpiod_set_value(xvcu->reset_gpio, 0);
+ /* min 2 clock cycle of vcu pll_ref, slowest freq is 33.33KHz */
+ usleep_range(60, 120);
+ gpiod_set_value(xvcu->reset_gpio, 1);
+ usleep_range(60, 120);
+ } else {
+ dev_dbg(&pdev->dev, "No reset gpio info found in dts for VCU. This may result in incorrect functionality if VCU isolation is removed after initialization in designs where the VCU reset is driven by gpio.\n");
+ }
+
regmap_write(xvcu->logicore_reg_ba, VCU_GASKET_INIT, VCU_GASKET_VALUE);
ret = xvcu_register_clock_provider(xvcu);
@@ -690,6 +714,7 @@ static int xvcu_probe(struct platform_device *pdev)
error_clk_provider:
xvcu_unregister_clock_provider(xvcu);
+error_get_gpio:
clk_disable_unprepare(xvcu->aclk);
return ret;
}
@@ -711,6 +736,13 @@ static void xvcu_remove(struct platform_device *pdev)
xvcu_unregister_clock_provider(xvcu);
/* Add the Gasket isolation and put the VCU in reset. */
+ if (xvcu->reset_gpio) {
+ gpiod_set_value(xvcu->reset_gpio, 0);
+ /* min 2 clock cycle of vcu pll_ref, slowest freq is 33.33KHz */
+ usleep_range(60, 120);
+ gpiod_set_value(xvcu->reset_gpio, 1);
+ usleep_range(60, 120);
+ }
regmap_write(xvcu->logicore_reg_ba, VCU_GASKET_INIT, 0);
clk_disable_unprepare(xvcu->aclk);
diff --git a/drivers/clk/zynq/pll.c b/drivers/clk/zynq/pll.c
index e5f8fb704df2..5eca1c14981a 100644
--- a/drivers/clk/zynq/pll.c
+++ b/drivers/clk/zynq/pll.c
@@ -48,18 +48,20 @@ struct zynq_pll {
* @prate: Clock frequency of parent clock
* Return: frequency closest to @rate the hardware can generate.
*/
-static long zynq_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int zynq_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
u32 fbdiv;
- fbdiv = DIV_ROUND_CLOSEST(rate, *prate);
+ fbdiv = DIV_ROUND_CLOSEST(req->rate, req->best_parent_rate);
if (fbdiv < PLL_FBDIV_MIN)
fbdiv = PLL_FBDIV_MIN;
else if (fbdiv > PLL_FBDIV_MAX)
fbdiv = PLL_FBDIV_MAX;
- return *prate * fbdiv;
+ req->rate = req->best_parent_rate * fbdiv;
+
+ return 0;
}
/**
@@ -167,7 +169,7 @@ static const struct clk_ops zynq_pll_ops = {
.enable = zynq_pll_enable,
.disable = zynq_pll_disable,
.is_enabled = zynq_pll_is_enabled,
- .round_rate = zynq_pll_round_rate,
+ .determine_rate = zynq_pll_determine_rate,
.recalc_rate = zynq_pll_recalc_rate
};
diff --git a/drivers/clk/zynqmp/divider.c b/drivers/clk/zynqmp/divider.c
index 5a00487ae408..c824eeacd8eb 100644
--- a/drivers/clk/zynqmp/divider.c
+++ b/drivers/clk/zynqmp/divider.c
@@ -118,9 +118,8 @@ static unsigned long zynqmp_clk_divider_recalc_rate(struct clk_hw *hw,
*
* Return: 0 on success else error+reason
*/
-static long zynqmp_clk_divider_round_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long *prate)
+static int zynqmp_clk_divider_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct zynqmp_clk_divider *divider = to_zynqmp_clk_divider(hw);
const char *clk_name = clk_hw_get_name(hw);
@@ -145,17 +144,21 @@ static long zynqmp_clk_divider_round_rate(struct clk_hw *hw,
if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
bestdiv = 1 << bestdiv;
- return DIV_ROUND_UP_ULL((u64)*prate, bestdiv);
+ req->rate = DIV_ROUND_UP_ULL((u64)req->best_parent_rate, bestdiv);
+
+ return 0;
}
width = fls(divider->max_div);
- rate = divider_round_rate(hw, rate, prate, NULL, width, divider->flags);
+ req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate,
+ NULL, width, divider->flags);
- if (divider->is_frac && (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) && (rate % *prate))
- *prate = rate;
+ if (divider->is_frac && (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) &&
+ (req->rate % req->best_parent_rate))
+ req->best_parent_rate = req->rate;
- return rate;
+ return 0;
}
/**
@@ -199,13 +202,13 @@ static int zynqmp_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops zynqmp_clk_divider_ops = {
.recalc_rate = zynqmp_clk_divider_recalc_rate,
- .round_rate = zynqmp_clk_divider_round_rate,
+ .determine_rate = zynqmp_clk_divider_determine_rate,
.set_rate = zynqmp_clk_divider_set_rate,
};
static const struct clk_ops zynqmp_clk_divider_ro_ops = {
.recalc_rate = zynqmp_clk_divider_recalc_rate,
- .round_rate = zynqmp_clk_divider_round_rate,
+ .determine_rate = zynqmp_clk_divider_determine_rate,
};
/**
diff --git a/drivers/clk/zynqmp/pll.c b/drivers/clk/zynqmp/pll.c
index 7411a7fd50ac..630a3936c97c 100644
--- a/drivers/clk/zynqmp/pll.c
+++ b/drivers/clk/zynqmp/pll.c
@@ -98,29 +98,29 @@ static inline void zynqmp_pll_set_mode(struct clk_hw *hw, bool on)
*
* Return: Frequency closest to @rate the hardware can generate
*/
-static long zynqmp_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int zynqmp_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
u32 fbdiv;
u32 mult, div;
/* Let rate fall inside the range PS_PLL_VCO_MIN ~ PS_PLL_VCO_MAX */
- if (rate > PS_PLL_VCO_MAX) {
- div = DIV_ROUND_UP(rate, PS_PLL_VCO_MAX);
- rate = rate / div;
+ if (req->rate > PS_PLL_VCO_MAX) {
+ div = DIV_ROUND_UP(req->rate, PS_PLL_VCO_MAX);
+ req->rate = req->rate / div;
}
- if (rate < PS_PLL_VCO_MIN) {
- mult = DIV_ROUND_UP(PS_PLL_VCO_MIN, rate);
- rate = rate * mult;
+ if (req->rate < PS_PLL_VCO_MIN) {
+ mult = DIV_ROUND_UP(PS_PLL_VCO_MIN, req->rate);
+ req->rate = req->rate * mult;
}
- fbdiv = DIV_ROUND_CLOSEST(rate, *prate);
+ fbdiv = DIV_ROUND_CLOSEST(req->rate, req->best_parent_rate);
if (fbdiv < PLL_FBDIV_MIN || fbdiv > PLL_FBDIV_MAX) {
fbdiv = clamp_t(u32, fbdiv, PLL_FBDIV_MIN, PLL_FBDIV_MAX);
- rate = *prate * fbdiv;
+ req->rate = req->best_parent_rate * fbdiv;
}
- return rate;
+ return 0;
}
/**
@@ -294,7 +294,7 @@ static const struct clk_ops zynqmp_pll_ops = {
.enable = zynqmp_pll_enable,
.disable = zynqmp_pll_disable,
.is_enabled = zynqmp_pll_is_enabled,
- .round_rate = zynqmp_pll_round_rate,
+ .determine_rate = zynqmp_pll_determine_rate,
.recalc_rate = zynqmp_pll_recalc_rate,
.set_rate = zynqmp_pll_set_rate,
};
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 95dd4660b5b6..ffcd23668763 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -73,6 +73,14 @@ config DW_APB_TIMER_OF
select DW_APB_TIMER
select TIMER_OF
+config ECONET_EN751221_TIMER
+ bool "EcoNet EN751221 High Precision Timer" if COMPILE_TEST
+ depends on HAS_IOMEM
+ select CLKSRC_MMIO
+ select TIMER_OF
+ help
+ Support for CPU timer found on EcoNet MIPS based SoCs.
+
config FTTMR010_TIMER
bool "Faraday Technology timer driver" if COMPILE_TEST
depends on HAS_IOMEM
@@ -387,8 +395,7 @@ config ARM_GLOBAL_TIMER
config ARM_GT_INITIAL_PRESCALER_VAL
int "ARM global timer initial prescaler value"
- default 2 if ARCH_ZYNQ
- default 1
+ default 0
depends on ARM_GLOBAL_TIMER
help
When the ARM global timer initializes, its current rate is declared
@@ -398,9 +405,11 @@ config ARM_GT_INITIAL_PRESCALER_VAL
bounds about how much the parent clock is allowed to decrease or
increase wrt the initial clock value.
This affects CPU_FREQ max delta from the initial frequency.
+ Use 0 to use auto-detection in the driver.
config ARM_TIMER_SP804
- bool "Support for Dual Timer SP804 module" if COMPILE_TEST
+ bool "Support for Dual Timer SP804 module"
+ depends on ARM || ARM64 || COMPILE_TEST
depends on GENERIC_SCHED_CLOCK && HAVE_CLK
select CLKSRC_MMIO
select TIMER_OF if OF
@@ -436,8 +445,8 @@ config ATMEL_ST
config ATMEL_TCB_CLKSRC
bool "Atmel TC Block timer driver" if COMPILE_TEST
- depends on ARM && HAS_IOMEM
- select TIMER_OF if OF
+ depends on ARM && OF && HAS_IOMEM
+ select TIMER_OF
help
Support for Timer Counter Blocks on Atmel SoCs.
@@ -465,11 +474,14 @@ config FSL_FTM_TIMER
help
Support for Freescale FlexTimer Module (FTM) timer.
-config VF_PIT_TIMER
- bool
+config NXP_PIT_TIMER
+ bool "NXP Periodic Interrupt Timer" if COMPILE_TEST
select CLKSRC_MMIO
help
- Support for Periodic Interrupt Timer on Freescale Vybrid Family SoCs.
+ Support for Periodic Interrupt Timer on Freescale / NXP
+ SoCs. This periodic timer is found on the Vybrid Family and
+ the Automotive S32G2/3 platforms. It contains 4 channels
+ where two can be coupled to form a 64 bits channel.
config SYS_SUPPORTS_SH_CMT
bool
@@ -753,4 +765,21 @@ config EP93XX_TIMER
Enables support for the Cirrus Logic timer block
EP93XX.
+config RALINK_TIMER
+ bool "Ralink System Tick Counter"
+ depends on SOC_RT305X || SOC_MT7620 || COMPILE_TEST
+ select CLKSRC_MMIO
+ select TIMER_OF
+ help
+ Enables support for system tick counter present on
+ Ralink SoCs RT3352 and MT7620.
+
+config NXP_STM_TIMER
+ bool "NXP System Timer Module driver"
+ depends on ARCH_S32 || COMPILE_TEST
+ select CLKSRC_MMIO
+ help
+ Enables the support for NXP System Timer Module found in the
+ s32g NXP platform series.
+
endmenu
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 22743785299e..ec4452ee958f 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_CLKBLD_I8253) += i8253.o
obj-$(CONFIG_CLKSRC_MMIO) += mmio.o
obj-$(CONFIG_DAVINCI_TIMER) += timer-davinci.o
obj-$(CONFIG_DIGICOLOR_TIMER) += timer-digicolor.o
+obj-$(CONFIG_ECONET_EN751221_TIMER) += timer-econet-en751221.o
obj-$(CONFIG_OMAP_DM_TIMER) += timer-ti-dm.o
obj-$(CONFIG_OMAP_DM_SYSTIMER) += timer-ti-dm-systimer.o
obj-$(CONFIG_DW_APB_TIMER) += dw_apb_timer.o
@@ -48,7 +49,7 @@ obj-$(CONFIG_CLKSRC_LPC32XX) += timer-lpc32xx.o
obj-$(CONFIG_CLKSRC_MPS2) += mps2-timer.o
obj-$(CONFIG_CLKSRC_SAMSUNG_PWM) += samsung_pwm_timer.o
obj-$(CONFIG_FSL_FTM_TIMER) += timer-fsl-ftm.o
-obj-$(CONFIG_VF_PIT_TIMER) += timer-vf-pit.o
+obj-$(CONFIG_NXP_PIT_TIMER) += timer-nxp-pit.o
obj-$(CONFIG_CLKSRC_QCOM) += timer-qcom.o
obj-$(CONFIG_MTK_TIMER) += timer-mediatek.o
obj-$(CONFIG_MTK_CPUX_TIMER) += timer-mediatek-cpux.o
@@ -63,6 +64,7 @@ obj-$(CONFIG_REALTEK_OTTO_TIMER) += timer-rtl-otto.o
obj-$(CONFIG_ARC_TIMERS) += arc_timer.o
obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o
+obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer_mmio.o
obj-$(CONFIG_ARM_GLOBAL_TIMER) += arm_global_timer.o
obj-$(CONFIG_ARMV7M_SYSTICK) += armv7m_systick.o
obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp804.o
@@ -91,3 +93,5 @@ obj-$(CONFIG_GOLDFISH_TIMER) += timer-goldfish.o
obj-$(CONFIG_GXP_TIMER) += timer-gxp.o
obj-$(CONFIG_CLKSRC_LOONGSON1_PWM) += timer-loongson1-pwm.o
obj-$(CONFIG_EP93XX_TIMER) += timer-ep93xx.o
+obj-$(CONFIG_RALINK_TIMER) += timer-ralink.o
+obj-$(CONFIG_NXP_STM_TIMER) += timer-nxp-stm.o
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 03733101e231..90aeff44a276 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -34,42 +34,12 @@
#include <clocksource/arm_arch_timer.h>
-#define CNTTIDR 0x08
-#define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4))
-
-#define CNTACR(n) (0x40 + ((n) * 4))
-#define CNTACR_RPCT BIT(0)
-#define CNTACR_RVCT BIT(1)
-#define CNTACR_RFRQ BIT(2)
-#define CNTACR_RVOFF BIT(3)
-#define CNTACR_RWVT BIT(4)
-#define CNTACR_RWPT BIT(5)
-
-#define CNTPCT_LO 0x00
-#define CNTVCT_LO 0x08
-#define CNTFRQ 0x10
-#define CNTP_CVAL_LO 0x20
-#define CNTP_CTL 0x2c
-#define CNTV_CVAL_LO 0x30
-#define CNTV_CTL 0x3c
-
/*
* The minimum amount of time a generic counter is guaranteed to not roll over
* (40 years)
*/
#define MIN_ROLLOVER_SECS (40ULL * 365 * 24 * 3600)
-static unsigned arch_timers_present __initdata;
-
-struct arch_timer {
- void __iomem *base;
- struct clock_event_device evt;
-};
-
-static struct arch_timer *arch_timer_mem __ro_after_init;
-
-#define to_arch_timer(e) container_of(e, struct arch_timer, evt)
-
static u32 arch_timer_rate __ro_after_init;
static int arch_timer_ppi[ARCH_TIMER_MAX_TIMER_PPI] __ro_after_init;
@@ -85,7 +55,6 @@ static struct clock_event_device __percpu *arch_timer_evt;
static enum arch_timer_ppi_nr arch_timer_uses_ppi __ro_after_init = ARCH_TIMER_VIRT_PPI;
static bool arch_timer_c3stop __ro_after_init;
-static bool arch_timer_mem_use_virtual __ro_after_init;
static bool arch_counter_suspend_stop __ro_after_init;
#ifdef CONFIG_GENERIC_GETTIMEOFDAY
static enum vdso_clock_mode vdso_default = VDSO_CLOCKMODE_ARCHTIMER;
@@ -121,76 +90,6 @@ static int arch_counter_get_width(void)
/*
* Architected system timer support.
*/
-
-static __always_inline
-void arch_timer_reg_write(int access, enum arch_timer_reg reg, u64 val,
- struct clock_event_device *clk)
-{
- if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
- struct arch_timer *timer = to_arch_timer(clk);
- switch (reg) {
- case ARCH_TIMER_REG_CTRL:
- writel_relaxed((u32)val, timer->base + CNTP_CTL);
- break;
- case ARCH_TIMER_REG_CVAL:
- /*
- * Not guaranteed to be atomic, so the timer
- * must be disabled at this point.
- */
- writeq_relaxed(val, timer->base + CNTP_CVAL_LO);
- break;
- default:
- BUILD_BUG();
- }
- } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
- struct arch_timer *timer = to_arch_timer(clk);
- switch (reg) {
- case ARCH_TIMER_REG_CTRL:
- writel_relaxed((u32)val, timer->base + CNTV_CTL);
- break;
- case ARCH_TIMER_REG_CVAL:
- /* Same restriction as above */
- writeq_relaxed(val, timer->base + CNTV_CVAL_LO);
- break;
- default:
- BUILD_BUG();
- }
- } else {
- arch_timer_reg_write_cp15(access, reg, val);
- }
-}
-
-static __always_inline
-u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
- struct clock_event_device *clk)
-{
- u32 val;
-
- if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
- struct arch_timer *timer = to_arch_timer(clk);
- switch (reg) {
- case ARCH_TIMER_REG_CTRL:
- val = readl_relaxed(timer->base + CNTP_CTL);
- break;
- default:
- BUILD_BUG();
- }
- } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
- struct arch_timer *timer = to_arch_timer(clk);
- switch (reg) {
- case ARCH_TIMER_REG_CTRL:
- val = readl_relaxed(timer->base + CNTV_CTL);
- break;
- default:
- BUILD_BUG();
- }
- } else {
- val = arch_timer_reg_read_cp15(access, reg);
- }
-
- return val;
-}
-
static noinstr u64 raw_counter_get_cntpct_stable(void)
{
return __arch_counter_get_cntpct_stable();
@@ -243,7 +142,7 @@ static u64 arch_counter_read(struct clocksource *cs)
return arch_timer_read_counter();
}
-static u64 arch_counter_read_cc(const struct cyclecounter *cc)
+static u64 arch_counter_read_cc(struct cyclecounter *cc)
{
return arch_timer_read_counter();
}
@@ -424,7 +323,7 @@ void erratum_set_next_event_generic(const int access, unsigned long evt,
unsigned long ctrl;
u64 cval;
- ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
+ ctrl = arch_timer_reg_read_cp15(access, ARCH_TIMER_REG_CTRL);
ctrl |= ARCH_TIMER_CTRL_ENABLE;
ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
@@ -436,7 +335,7 @@ void erratum_set_next_event_generic(const int access, unsigned long evt,
write_sysreg(cval, cntv_cval_el0);
}
- arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
+ arch_timer_reg_write_cp15(access, ARCH_TIMER_REG_CTRL, ctrl);
}
static __maybe_unused int erratum_set_next_event_virt(unsigned long evt,
@@ -667,10 +566,10 @@ static __always_inline irqreturn_t timer_handler(const int access,
{
unsigned long ctrl;
- ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, evt);
+ ctrl = arch_timer_reg_read_cp15(access, ARCH_TIMER_REG_CTRL);
if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
ctrl |= ARCH_TIMER_CTRL_IT_MASK;
- arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, evt);
+ arch_timer_reg_write_cp15(access, ARCH_TIMER_REG_CTRL, ctrl);
evt->event_handler(evt);
return IRQ_HANDLED;
}
@@ -692,28 +591,14 @@ static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id)
return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt);
}
-static irqreturn_t arch_timer_handler_phys_mem(int irq, void *dev_id)
-{
- struct clock_event_device *evt = dev_id;
-
- return timer_handler(ARCH_TIMER_MEM_PHYS_ACCESS, evt);
-}
-
-static irqreturn_t arch_timer_handler_virt_mem(int irq, void *dev_id)
-{
- struct clock_event_device *evt = dev_id;
-
- return timer_handler(ARCH_TIMER_MEM_VIRT_ACCESS, evt);
-}
-
static __always_inline int arch_timer_shutdown(const int access,
struct clock_event_device *clk)
{
unsigned long ctrl;
- ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
+ ctrl = arch_timer_reg_read_cp15(access, ARCH_TIMER_REG_CTRL);
ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
- arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
+ arch_timer_reg_write_cp15(access, ARCH_TIMER_REG_CTRL, ctrl);
return 0;
}
@@ -728,23 +613,13 @@ static int arch_timer_shutdown_phys(struct clock_event_device *clk)
return arch_timer_shutdown(ARCH_TIMER_PHYS_ACCESS, clk);
}
-static int arch_timer_shutdown_virt_mem(struct clock_event_device *clk)
-{
- return arch_timer_shutdown(ARCH_TIMER_MEM_VIRT_ACCESS, clk);
-}
-
-static int arch_timer_shutdown_phys_mem(struct clock_event_device *clk)
-{
- return arch_timer_shutdown(ARCH_TIMER_MEM_PHYS_ACCESS, clk);
-}
-
static __always_inline void set_next_event(const int access, unsigned long evt,
struct clock_event_device *clk)
{
unsigned long ctrl;
u64 cnt;
- ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
+ ctrl = arch_timer_reg_read_cp15(access, ARCH_TIMER_REG_CTRL);
ctrl |= ARCH_TIMER_CTRL_ENABLE;
ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
@@ -753,8 +628,8 @@ static __always_inline void set_next_event(const int access, unsigned long evt,
else
cnt = __arch_counter_get_cntvct();
- arch_timer_reg_write(access, ARCH_TIMER_REG_CVAL, evt + cnt, clk);
- arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
+ arch_timer_reg_write_cp15(access, ARCH_TIMER_REG_CVAL, evt + cnt);
+ arch_timer_reg_write_cp15(access, ARCH_TIMER_REG_CTRL, ctrl);
}
static int arch_timer_set_next_event_virt(unsigned long evt,
@@ -771,60 +646,6 @@ static int arch_timer_set_next_event_phys(unsigned long evt,
return 0;
}
-static noinstr u64 arch_counter_get_cnt_mem(struct arch_timer *t, int offset_lo)
-{
- u32 cnt_lo, cnt_hi, tmp_hi;
-
- do {
- cnt_hi = __le32_to_cpu((__le32 __force)__raw_readl(t->base + offset_lo + 4));
- cnt_lo = __le32_to_cpu((__le32 __force)__raw_readl(t->base + offset_lo));
- tmp_hi = __le32_to_cpu((__le32 __force)__raw_readl(t->base + offset_lo + 4));
- } while (cnt_hi != tmp_hi);
-
- return ((u64) cnt_hi << 32) | cnt_lo;
-}
-
-static __always_inline void set_next_event_mem(const int access, unsigned long evt,
- struct clock_event_device *clk)
-{
- struct arch_timer *timer = to_arch_timer(clk);
- unsigned long ctrl;
- u64 cnt;
-
- ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
-
- /* Timer must be disabled before programming CVAL */
- if (ctrl & ARCH_TIMER_CTRL_ENABLE) {
- ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
- arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
- }
-
- ctrl |= ARCH_TIMER_CTRL_ENABLE;
- ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
-
- if (access == ARCH_TIMER_MEM_VIRT_ACCESS)
- cnt = arch_counter_get_cnt_mem(timer, CNTVCT_LO);
- else
- cnt = arch_counter_get_cnt_mem(timer, CNTPCT_LO);
-
- arch_timer_reg_write(access, ARCH_TIMER_REG_CVAL, evt + cnt, clk);
- arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
-}
-
-static int arch_timer_set_next_event_virt_mem(unsigned long evt,
- struct clock_event_device *clk)
-{
- set_next_event_mem(ARCH_TIMER_MEM_VIRT_ACCESS, evt, clk);
- return 0;
-}
-
-static int arch_timer_set_next_event_phys_mem(unsigned long evt,
- struct clock_event_device *clk)
-{
- set_next_event_mem(ARCH_TIMER_MEM_PHYS_ACCESS, evt, clk);
- return 0;
-}
-
static u64 __arch_timer_check_delta(void)
{
#ifdef CONFIG_ARM64
@@ -842,7 +663,7 @@ static u64 __arch_timer_check_delta(void)
{},
};
- if (is_midr_in_range_list(read_cpuid_id(), broken_cval_midrs)) {
+ if (is_midr_in_range_list(broken_cval_midrs)) {
pr_warn_once("Broken CNTx_CVAL_EL1, using 31 bit TVAL instead.\n");
return CLOCKSOURCE_MASK(31);
}
@@ -850,63 +671,41 @@ static u64 __arch_timer_check_delta(void)
return CLOCKSOURCE_MASK(arch_counter_get_width());
}
-static void __arch_timer_setup(unsigned type,
- struct clock_event_device *clk)
+static void __arch_timer_setup(struct clock_event_device *clk)
{
+ typeof(clk->set_next_event) sne;
u64 max_delta;
clk->features = CLOCK_EVT_FEAT_ONESHOT;
- if (type == ARCH_TIMER_TYPE_CP15) {
- typeof(clk->set_next_event) sne;
-
- arch_timer_check_ool_workaround(ate_match_local_cap_id, NULL);
-
- if (arch_timer_c3stop)
- clk->features |= CLOCK_EVT_FEAT_C3STOP;
- clk->name = "arch_sys_timer";
- clk->rating = 450;
- clk->cpumask = cpumask_of(smp_processor_id());
- clk->irq = arch_timer_ppi[arch_timer_uses_ppi];
- switch (arch_timer_uses_ppi) {
- case ARCH_TIMER_VIRT_PPI:
- clk->set_state_shutdown = arch_timer_shutdown_virt;
- clk->set_state_oneshot_stopped = arch_timer_shutdown_virt;
- sne = erratum_handler(set_next_event_virt);
- break;
- case ARCH_TIMER_PHYS_SECURE_PPI:
- case ARCH_TIMER_PHYS_NONSECURE_PPI:
- case ARCH_TIMER_HYP_PPI:
- clk->set_state_shutdown = arch_timer_shutdown_phys;
- clk->set_state_oneshot_stopped = arch_timer_shutdown_phys;
- sne = erratum_handler(set_next_event_phys);
- break;
- default:
- BUG();
- }
+ arch_timer_check_ool_workaround(ate_match_local_cap_id, NULL);
- clk->set_next_event = sne;
- max_delta = __arch_timer_check_delta();
- } else {
- clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
- clk->name = "arch_mem_timer";
- clk->rating = 400;
- clk->cpumask = cpu_possible_mask;
- if (arch_timer_mem_use_virtual) {
- clk->set_state_shutdown = arch_timer_shutdown_virt_mem;
- clk->set_state_oneshot_stopped = arch_timer_shutdown_virt_mem;
- clk->set_next_event =
- arch_timer_set_next_event_virt_mem;
- } else {
- clk->set_state_shutdown = arch_timer_shutdown_phys_mem;
- clk->set_state_oneshot_stopped = arch_timer_shutdown_phys_mem;
- clk->set_next_event =
- arch_timer_set_next_event_phys_mem;
- }
-
- max_delta = CLOCKSOURCE_MASK(56);
+ if (arch_timer_c3stop)
+ clk->features |= CLOCK_EVT_FEAT_C3STOP;
+ clk->name = "arch_sys_timer";
+ clk->rating = 450;
+ clk->cpumask = cpumask_of(smp_processor_id());
+ clk->irq = arch_timer_ppi[arch_timer_uses_ppi];
+ switch (arch_timer_uses_ppi) {
+ case ARCH_TIMER_VIRT_PPI:
+ clk->set_state_shutdown = arch_timer_shutdown_virt;
+ clk->set_state_oneshot_stopped = arch_timer_shutdown_virt;
+ sne = erratum_handler(set_next_event_virt);
+ break;
+ case ARCH_TIMER_PHYS_SECURE_PPI:
+ case ARCH_TIMER_PHYS_NONSECURE_PPI:
+ case ARCH_TIMER_HYP_PPI:
+ clk->set_state_shutdown = arch_timer_shutdown_phys;
+ clk->set_state_oneshot_stopped = arch_timer_shutdown_phys;
+ sne = erratum_handler(set_next_event_phys);
+ break;
+ default:
+ BUG();
}
+ clk->set_next_event = sne;
+ max_delta = __arch_timer_check_delta();
+
clk->set_state_shutdown(clk);
clockevents_config_and_register(clk, arch_timer_rate, 0xf, max_delta);
@@ -1029,7 +828,7 @@ static int arch_timer_starting_cpu(unsigned int cpu)
struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
u32 flags;
- __arch_timer_setup(ARCH_TIMER_TYPE_CP15, clk);
+ __arch_timer_setup(clk);
flags = check_ppi_trigger(arch_timer_ppi[arch_timer_uses_ppi]);
enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], flags);
@@ -1075,22 +874,12 @@ static void __init arch_timer_of_configure_rate(u32 rate, struct device_node *np
pr_warn("frequency not available\n");
}
-static void __init arch_timer_banner(unsigned type)
+static void __init arch_timer_banner(void)
{
- pr_info("%s%s%s timer(s) running at %lu.%02luMHz (%s%s%s).\n",
- type & ARCH_TIMER_TYPE_CP15 ? "cp15" : "",
- type == (ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM) ?
- " and " : "",
- type & ARCH_TIMER_TYPE_MEM ? "mmio" : "",
+ pr_info("cp15 timer running at %lu.%02luMHz (%s).\n",
(unsigned long)arch_timer_rate / 1000000,
(unsigned long)(arch_timer_rate / 10000) % 100,
- type & ARCH_TIMER_TYPE_CP15 ?
- (arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) ? "virt" : "phys" :
- "",
- type == (ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM) ? "/" : "",
- type & ARCH_TIMER_TYPE_MEM ?
- arch_timer_mem_use_virtual ? "virt" : "phys" :
- "");
+ (arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) ? "virt" : "phys");
}
u32 arch_timer_get_rate(void)
@@ -1108,11 +897,6 @@ bool arch_timer_evtstrm_available(void)
return cpumask_test_cpu(raw_smp_processor_id(), &evtstrm_available);
}
-static noinstr u64 arch_counter_get_cntvct_mem(void)
-{
- return arch_counter_get_cnt_mem(arch_timer_mem, CNTVCT_LO);
-}
-
static struct arch_timer_kvm_info arch_timer_kvm_info;
struct arch_timer_kvm_info *arch_timer_get_kvm_info(void)
@@ -1120,42 +904,35 @@ struct arch_timer_kvm_info *arch_timer_get_kvm_info(void)
return &arch_timer_kvm_info;
}
-static void __init arch_counter_register(unsigned type)
+static void __init arch_counter_register(void)
{
u64 (*scr)(void);
+ u64 (*rd)(void);
u64 start_count;
int width;
- /* Register the CP15 based counter if we have one */
- if (type & ARCH_TIMER_TYPE_CP15) {
- u64 (*rd)(void);
-
- if ((IS_ENABLED(CONFIG_ARM64) && !is_hyp_mode_available()) ||
- arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) {
- if (arch_timer_counter_has_wa()) {
- rd = arch_counter_get_cntvct_stable;
- scr = raw_counter_get_cntvct_stable;
- } else {
- rd = arch_counter_get_cntvct;
- scr = arch_counter_get_cntvct;
- }
+ if ((IS_ENABLED(CONFIG_ARM64) && !is_hyp_mode_available()) ||
+ arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) {
+ if (arch_timer_counter_has_wa()) {
+ rd = arch_counter_get_cntvct_stable;
+ scr = raw_counter_get_cntvct_stable;
} else {
- if (arch_timer_counter_has_wa()) {
- rd = arch_counter_get_cntpct_stable;
- scr = raw_counter_get_cntpct_stable;
- } else {
- rd = arch_counter_get_cntpct;
- scr = arch_counter_get_cntpct;
- }
+ rd = arch_counter_get_cntvct;
+ scr = arch_counter_get_cntvct;
}
-
- arch_timer_read_counter = rd;
- clocksource_counter.vdso_clock_mode = vdso_default;
} else {
- arch_timer_read_counter = arch_counter_get_cntvct_mem;
- scr = arch_counter_get_cntvct_mem;
+ if (arch_timer_counter_has_wa()) {
+ rd = arch_counter_get_cntpct_stable;
+ scr = raw_counter_get_cntpct_stable;
+ } else {
+ rd = arch_counter_get_cntpct;
+ scr = arch_counter_get_cntpct;
+ }
}
+ arch_timer_read_counter = rd;
+ clocksource_counter.vdso_clock_mode = vdso_default;
+
width = arch_counter_get_width();
clocksource_counter.mask = CLOCKSOURCE_MASK(width);
cyclecounter.mask = CLOCKSOURCE_MASK(width);
@@ -1179,8 +956,6 @@ static void arch_timer_stop(struct clock_event_device *clk)
disable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi]);
if (arch_timer_has_nonsecure_ppi())
disable_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]);
-
- clk->set_state_shutdown(clk);
}
static int arch_timer_dying_cpu(unsigned int cpu)
@@ -1305,76 +1080,10 @@ out:
return err;
}
-static int __init arch_timer_mem_register(void __iomem *base, unsigned int irq)
-{
- int ret;
- irq_handler_t func;
-
- arch_timer_mem = kzalloc(sizeof(*arch_timer_mem), GFP_KERNEL);
- if (!arch_timer_mem)
- return -ENOMEM;
-
- arch_timer_mem->base = base;
- arch_timer_mem->evt.irq = irq;
- __arch_timer_setup(ARCH_TIMER_TYPE_MEM, &arch_timer_mem->evt);
-
- if (arch_timer_mem_use_virtual)
- func = arch_timer_handler_virt_mem;
- else
- func = arch_timer_handler_phys_mem;
-
- ret = request_irq(irq, func, IRQF_TIMER, "arch_mem_timer", &arch_timer_mem->evt);
- if (ret) {
- pr_err("Failed to request mem timer irq\n");
- kfree(arch_timer_mem);
- arch_timer_mem = NULL;
- }
-
- return ret;
-}
-
-static const struct of_device_id arch_timer_of_match[] __initconst = {
- { .compatible = "arm,armv7-timer", },
- { .compatible = "arm,armv8-timer", },
- {},
-};
-
-static const struct of_device_id arch_timer_mem_of_match[] __initconst = {
- { .compatible = "arm,armv7-timer-mem", },
- {},
-};
-
-static bool __init arch_timer_needs_of_probing(void)
-{
- struct device_node *dn;
- bool needs_probing = false;
- unsigned int mask = ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM;
-
- /* We have two timers, and both device-tree nodes are probed. */
- if ((arch_timers_present & mask) == mask)
- return false;
-
- /*
- * Only one type of timer is probed,
- * check if we have another type of timer node in device-tree.
- */
- if (arch_timers_present & ARCH_TIMER_TYPE_CP15)
- dn = of_find_matching_node(NULL, arch_timer_mem_of_match);
- else
- dn = of_find_matching_node(NULL, arch_timer_of_match);
-
- if (dn && of_device_is_available(dn))
- needs_probing = true;
-
- of_node_put(dn);
-
- return needs_probing;
-}
-
static int __init arch_timer_common_init(void)
{
- arch_timer_banner(arch_timers_present);
- arch_counter_register(arch_timers_present);
+ arch_timer_banner();
+ arch_counter_register();
return arch_timer_arch_init();
}
@@ -1423,14 +1132,12 @@ static int __init arch_timer_of_init(struct device_node *np)
u32 rate;
bool has_names;
- if (arch_timers_present & ARCH_TIMER_TYPE_CP15) {
+ if (arch_timer_evt) {
pr_warn("multiple nodes in dt, skipping\n");
return 0;
}
- arch_timers_present |= ARCH_TIMER_TYPE_CP15;
-
- has_names = of_property_read_bool(np, "interrupt-names");
+ has_names = of_property_present(np, "interrupt-names");
for (i = ARCH_TIMER_PHYS_SECURE_PPI; i < ARCH_TIMER_MAX_TIMER_PPI; i++) {
if (has_names)
@@ -1474,283 +1181,22 @@ static int __init arch_timer_of_init(struct device_node *np)
if (ret)
return ret;
- if (arch_timer_needs_of_probing())
- return 0;
-
return arch_timer_common_init();
}
TIMER_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_of_init);
TIMER_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_of_init);
-static u32 __init
-arch_timer_mem_frame_get_cntfrq(struct arch_timer_mem_frame *frame)
-{
- void __iomem *base;
- u32 rate;
-
- base = ioremap(frame->cntbase, frame->size);
- if (!base) {
- pr_err("Unable to map frame @ %pa\n", &frame->cntbase);
- return 0;
- }
-
- rate = readl_relaxed(base + CNTFRQ);
-
- iounmap(base);
-
- return rate;
-}
-
-static struct arch_timer_mem_frame * __init
-arch_timer_mem_find_best_frame(struct arch_timer_mem *timer_mem)
-{
- struct arch_timer_mem_frame *frame, *best_frame = NULL;
- void __iomem *cntctlbase;
- u32 cnttidr;
- int i;
-
- cntctlbase = ioremap(timer_mem->cntctlbase, timer_mem->size);
- if (!cntctlbase) {
- pr_err("Can't map CNTCTLBase @ %pa\n",
- &timer_mem->cntctlbase);
- return NULL;
- }
-
- cnttidr = readl_relaxed(cntctlbase + CNTTIDR);
-
- /*
- * Try to find a virtual capable frame. Otherwise fall back to a
- * physical capable frame.
- */
- for (i = 0; i < ARCH_TIMER_MEM_MAX_FRAMES; i++) {
- u32 cntacr = CNTACR_RFRQ | CNTACR_RWPT | CNTACR_RPCT |
- CNTACR_RWVT | CNTACR_RVOFF | CNTACR_RVCT;
-
- frame = &timer_mem->frame[i];
- if (!frame->valid)
- continue;
-
- /* Try enabling everything, and see what sticks */
- writel_relaxed(cntacr, cntctlbase + CNTACR(i));
- cntacr = readl_relaxed(cntctlbase + CNTACR(i));
-
- if ((cnttidr & CNTTIDR_VIRT(i)) &&
- !(~cntacr & (CNTACR_RWVT | CNTACR_RVCT))) {
- best_frame = frame;
- arch_timer_mem_use_virtual = true;
- break;
- }
-
- if (~cntacr & (CNTACR_RWPT | CNTACR_RPCT))
- continue;
-
- best_frame = frame;
- }
-
- iounmap(cntctlbase);
-
- return best_frame;
-}
-
-static int __init
-arch_timer_mem_frame_register(struct arch_timer_mem_frame *frame)
-{
- void __iomem *base;
- int ret, irq;
-
- if (arch_timer_mem_use_virtual)
- irq = frame->virt_irq;
- else
- irq = frame->phys_irq;
-
- if (!irq) {
- pr_err("Frame missing %s irq.\n",
- arch_timer_mem_use_virtual ? "virt" : "phys");
- return -EINVAL;
- }
-
- if (!request_mem_region(frame->cntbase, frame->size,
- "arch_mem_timer"))
- return -EBUSY;
-
- base = ioremap(frame->cntbase, frame->size);
- if (!base) {
- pr_err("Can't map frame's registers\n");
- return -ENXIO;
- }
-
- ret = arch_timer_mem_register(base, irq);
- if (ret) {
- iounmap(base);
- return ret;
- }
-
- arch_timers_present |= ARCH_TIMER_TYPE_MEM;
-
- return 0;
-}
-
-static int __init arch_timer_mem_of_init(struct device_node *np)
-{
- struct arch_timer_mem *timer_mem;
- struct arch_timer_mem_frame *frame;
- struct resource res;
- int ret = -EINVAL;
- u32 rate;
-
- timer_mem = kzalloc(sizeof(*timer_mem), GFP_KERNEL);
- if (!timer_mem)
- return -ENOMEM;
-
- if (of_address_to_resource(np, 0, &res))
- goto out;
- timer_mem->cntctlbase = res.start;
- timer_mem->size = resource_size(&res);
-
- for_each_available_child_of_node_scoped(np, frame_node) {
- u32 n;
- struct arch_timer_mem_frame *frame;
-
- if (of_property_read_u32(frame_node, "frame-number", &n)) {
- pr_err(FW_BUG "Missing frame-number.\n");
- goto out;
- }
- if (n >= ARCH_TIMER_MEM_MAX_FRAMES) {
- pr_err(FW_BUG "Wrong frame-number, only 0-%u are permitted.\n",
- ARCH_TIMER_MEM_MAX_FRAMES - 1);
- goto out;
- }
- frame = &timer_mem->frame[n];
-
- if (frame->valid) {
- pr_err(FW_BUG "Duplicated frame-number.\n");
- goto out;
- }
-
- if (of_address_to_resource(frame_node, 0, &res))
- goto out;
-
- frame->cntbase = res.start;
- frame->size = resource_size(&res);
-
- frame->virt_irq = irq_of_parse_and_map(frame_node,
- ARCH_TIMER_VIRT_SPI);
- frame->phys_irq = irq_of_parse_and_map(frame_node,
- ARCH_TIMER_PHYS_SPI);
-
- frame->valid = true;
- }
-
- frame = arch_timer_mem_find_best_frame(timer_mem);
- if (!frame) {
- pr_err("Unable to find a suitable frame in timer @ %pa\n",
- &timer_mem->cntctlbase);
- ret = -EINVAL;
- goto out;
- }
-
- rate = arch_timer_mem_frame_get_cntfrq(frame);
- arch_timer_of_configure_rate(rate, np);
-
- ret = arch_timer_mem_frame_register(frame);
- if (!ret && !arch_timer_needs_of_probing())
- ret = arch_timer_common_init();
-out:
- kfree(timer_mem);
- return ret;
-}
-TIMER_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem",
- arch_timer_mem_of_init);
-
#ifdef CONFIG_ACPI_GTDT
-static int __init
-arch_timer_mem_verify_cntfrq(struct arch_timer_mem *timer_mem)
-{
- struct arch_timer_mem_frame *frame;
- u32 rate;
- int i;
-
- for (i = 0; i < ARCH_TIMER_MEM_MAX_FRAMES; i++) {
- frame = &timer_mem->frame[i];
-
- if (!frame->valid)
- continue;
-
- rate = arch_timer_mem_frame_get_cntfrq(frame);
- if (rate == arch_timer_rate)
- continue;
-
- pr_err(FW_BUG "CNTFRQ mismatch: frame @ %pa: (0x%08lx), CPU: (0x%08lx)\n",
- &frame->cntbase,
- (unsigned long)rate, (unsigned long)arch_timer_rate);
-
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int __init arch_timer_mem_acpi_init(int platform_timer_count)
-{
- struct arch_timer_mem *timers, *timer;
- struct arch_timer_mem_frame *frame, *best_frame = NULL;
- int timer_count, i, ret = 0;
-
- timers = kcalloc(platform_timer_count, sizeof(*timers),
- GFP_KERNEL);
- if (!timers)
- return -ENOMEM;
-
- ret = acpi_arch_timer_mem_init(timers, &timer_count);
- if (ret || !timer_count)
- goto out;
-
- /*
- * While unlikely, it's theoretically possible that none of the frames
- * in a timer expose the combination of feature we want.
- */
- for (i = 0; i < timer_count; i++) {
- timer = &timers[i];
-
- frame = arch_timer_mem_find_best_frame(timer);
- if (!best_frame)
- best_frame = frame;
-
- ret = arch_timer_mem_verify_cntfrq(timer);
- if (ret) {
- pr_err("Disabling MMIO timers due to CNTFRQ mismatch\n");
- goto out;
- }
-
- if (!best_frame) /* implies !frame */
- /*
- * Only complain about missing suitable frames if we
- * haven't already found one in a previous iteration.
- */
- pr_err("Unable to find a suitable frame in timer @ %pa\n",
- &timer->cntctlbase);
- }
-
- if (best_frame)
- ret = arch_timer_mem_frame_register(best_frame);
-out:
- kfree(timers);
- return ret;
-}
-
-/* Initialize per-processor generic timer and memory-mapped timer(if present) */
static int __init arch_timer_acpi_init(struct acpi_table_header *table)
{
- int ret, platform_timer_count;
+ int ret;
- if (arch_timers_present & ARCH_TIMER_TYPE_CP15) {
+ if (arch_timer_evt) {
pr_warn("already initialized, skipping\n");
return -EINVAL;
}
- arch_timers_present |= ARCH_TIMER_TYPE_CP15;
-
- ret = acpi_gtdt_init(table, &platform_timer_count);
+ ret = acpi_gtdt_init(table, NULL);
if (ret)
return ret;
@@ -1792,10 +1238,6 @@ static int __init arch_timer_acpi_init(struct acpi_table_header *table)
if (ret)
return ret;
- if (platform_timer_count &&
- arch_timer_mem_acpi_init(platform_timer_count))
- pr_err("Failed to initialize memory-mapped timer.\n");
-
return arch_timer_common_init();
}
TIMER_ACPI_DECLARE(arch_timer, ACPI_SIG_GTDT, arch_timer_acpi_init);
diff --git a/drivers/clocksource/arm_arch_timer_mmio.c b/drivers/clocksource/arm_arch_timer_mmio.c
new file mode 100644
index 000000000000..ebe1987d651e
--- /dev/null
+++ b/drivers/clocksource/arm_arch_timer_mmio.c
@@ -0,0 +1,440 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ARM Generic Memory Mapped Timer support
+ *
+ * Split from drivers/clocksource/arm_arch_timer.c
+ *
+ * Copyright (C) 2011 ARM Ltd.
+ * All Rights Reserved
+ */
+
+#define pr_fmt(fmt) "arch_timer_mmio: " fmt
+
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+
+#include <clocksource/arm_arch_timer.h>
+
+#define CNTTIDR 0x08
+#define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4))
+
+#define CNTACR(n) (0x40 + ((n) * 4))
+#define CNTACR_RPCT BIT(0)
+#define CNTACR_RVCT BIT(1)
+#define CNTACR_RFRQ BIT(2)
+#define CNTACR_RVOFF BIT(3)
+#define CNTACR_RWVT BIT(4)
+#define CNTACR_RWPT BIT(5)
+
+#define CNTPCT_LO 0x00
+#define CNTVCT_LO 0x08
+#define CNTFRQ 0x10
+#define CNTP_CVAL_LO 0x20
+#define CNTP_CTL 0x2c
+#define CNTV_CVAL_LO 0x30
+#define CNTV_CTL 0x3c
+
+enum arch_timer_access {
+ PHYS_ACCESS,
+ VIRT_ACCESS,
+};
+
+struct arch_timer {
+ struct clock_event_device evt;
+ struct clocksource cs;
+ struct arch_timer_mem *gt_block;
+ void __iomem *base;
+ enum arch_timer_access access;
+ u32 rate;
+};
+
+#define evt_to_arch_timer(e) container_of(e, struct arch_timer, evt)
+#define cs_to_arch_timer(c) container_of(c, struct arch_timer, cs)
+
+static void arch_timer_mmio_write(struct arch_timer *timer,
+ enum arch_timer_reg reg, u64 val)
+{
+ switch (timer->access) {
+ case PHYS_ACCESS:
+ switch (reg) {
+ case ARCH_TIMER_REG_CTRL:
+ writel_relaxed((u32)val, timer->base + CNTP_CTL);
+ return;
+ case ARCH_TIMER_REG_CVAL:
+ /*
+ * Not guaranteed to be atomic, so the timer
+ * must be disabled at this point.
+ */
+ writeq_relaxed(val, timer->base + CNTP_CVAL_LO);
+ return;
+ }
+ break;
+ case VIRT_ACCESS:
+ switch (reg) {
+ case ARCH_TIMER_REG_CTRL:
+ writel_relaxed((u32)val, timer->base + CNTV_CTL);
+ return;
+ case ARCH_TIMER_REG_CVAL:
+ /* Same restriction as above */
+ writeq_relaxed(val, timer->base + CNTV_CVAL_LO);
+ return;
+ }
+ break;
+ }
+
+ /* Should never be here */
+ WARN_ON_ONCE(1);
+}
+
+static u32 arch_timer_mmio_read(struct arch_timer *timer, enum arch_timer_reg reg)
+{
+ switch (timer->access) {
+ case PHYS_ACCESS:
+ switch (reg) {
+ case ARCH_TIMER_REG_CTRL:
+ return readl_relaxed(timer->base + CNTP_CTL);
+ default:
+ break;
+ }
+ break;
+ case VIRT_ACCESS:
+ switch (reg) {
+ case ARCH_TIMER_REG_CTRL:
+ return readl_relaxed(timer->base + CNTV_CTL);
+ default:
+ break;
+ }
+ break;
+ }
+
+ /* Should never be here */
+ WARN_ON_ONCE(1);
+ return 0;
+}
+
+static noinstr u64 arch_counter_mmio_get_cnt(struct arch_timer *t)
+{
+ int offset_lo = t->access == VIRT_ACCESS ? CNTVCT_LO : CNTPCT_LO;
+ u32 cnt_lo, cnt_hi, tmp_hi;
+
+ do {
+ cnt_hi = __le32_to_cpu((__le32 __force)__raw_readl(t->base + offset_lo + 4));
+ cnt_lo = __le32_to_cpu((__le32 __force)__raw_readl(t->base + offset_lo));
+ tmp_hi = __le32_to_cpu((__le32 __force)__raw_readl(t->base + offset_lo + 4));
+ } while (cnt_hi != tmp_hi);
+
+ return ((u64) cnt_hi << 32) | cnt_lo;
+}
+
+static u64 arch_mmio_counter_read(struct clocksource *cs)
+{
+ struct arch_timer *at = cs_to_arch_timer(cs);
+
+ return arch_counter_mmio_get_cnt(at);
+}
+
+static int arch_timer_mmio_shutdown(struct clock_event_device *clk)
+{
+ struct arch_timer *at = evt_to_arch_timer(clk);
+ unsigned long ctrl;
+
+ ctrl = arch_timer_mmio_read(at, ARCH_TIMER_REG_CTRL);
+ ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
+ arch_timer_mmio_write(at, ARCH_TIMER_REG_CTRL, ctrl);
+
+ return 0;
+}
+
+static int arch_timer_mmio_set_next_event(unsigned long evt,
+ struct clock_event_device *clk)
+{
+ struct arch_timer *timer = evt_to_arch_timer(clk);
+ unsigned long ctrl;
+ u64 cnt;
+
+ ctrl = arch_timer_mmio_read(timer, ARCH_TIMER_REG_CTRL);
+
+ /* Timer must be disabled before programming CVAL */
+ if (ctrl & ARCH_TIMER_CTRL_ENABLE) {
+ ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
+ arch_timer_mmio_write(timer, ARCH_TIMER_REG_CTRL, ctrl);
+ }
+
+ ctrl |= ARCH_TIMER_CTRL_ENABLE;
+ ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
+
+ cnt = arch_counter_mmio_get_cnt(timer);
+
+ arch_timer_mmio_write(timer, ARCH_TIMER_REG_CVAL, evt + cnt);
+ arch_timer_mmio_write(timer, ARCH_TIMER_REG_CTRL, ctrl);
+ return 0;
+}
+
+static irqreturn_t arch_timer_mmio_handler(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = dev_id;
+ struct arch_timer *at = evt_to_arch_timer(evt);
+ unsigned long ctrl;
+
+ ctrl = arch_timer_mmio_read(at, ARCH_TIMER_REG_CTRL);
+ if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
+ ctrl |= ARCH_TIMER_CTRL_IT_MASK;
+ arch_timer_mmio_write(at, ARCH_TIMER_REG_CTRL, ctrl);
+ evt->event_handler(evt);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static struct arch_timer_mem_frame *find_best_frame(struct platform_device *pdev)
+{
+ struct arch_timer_mem_frame *frame, *best_frame = NULL;
+ struct arch_timer *at = platform_get_drvdata(pdev);
+ void __iomem *cntctlbase;
+ u32 cnttidr;
+
+ cntctlbase = ioremap(at->gt_block->cntctlbase, at->gt_block->size);
+ if (!cntctlbase) {
+ dev_err(&pdev->dev, "Can't map CNTCTLBase @ %pa\n",
+ &at->gt_block->cntctlbase);
+ return NULL;
+ }
+
+ cnttidr = readl_relaxed(cntctlbase + CNTTIDR);
+
+ /*
+ * Try to find a virtual capable frame. Otherwise fall back to a
+ * physical capable frame.
+ */
+ for (int i = 0; i < ARCH_TIMER_MEM_MAX_FRAMES; i++) {
+ u32 cntacr = CNTACR_RFRQ | CNTACR_RWPT | CNTACR_RPCT |
+ CNTACR_RWVT | CNTACR_RVOFF | CNTACR_RVCT;
+
+ frame = &at->gt_block->frame[i];
+ if (!frame->valid)
+ continue;
+
+ /* Try enabling everything, and see what sticks */
+ writel_relaxed(cntacr, cntctlbase + CNTACR(i));
+ cntacr = readl_relaxed(cntctlbase + CNTACR(i));
+
+ /* Pick a suitable frame for which we have an IRQ */
+ if ((cnttidr & CNTTIDR_VIRT(i)) &&
+ !(~cntacr & (CNTACR_RWVT | CNTACR_RVCT)) &&
+ frame->virt_irq) {
+ best_frame = frame;
+ at->access = VIRT_ACCESS;
+ break;
+ }
+
+ if ((~cntacr & (CNTACR_RWPT | CNTACR_RPCT)) ||
+ !frame->phys_irq)
+ continue;
+
+ at->access = PHYS_ACCESS;
+ best_frame = frame;
+ }
+
+ iounmap(cntctlbase);
+
+ return best_frame;
+}
+
+static void arch_timer_mmio_setup(struct arch_timer *at, int irq)
+{
+ at->evt = (struct clock_event_device) {
+ .features = (CLOCK_EVT_FEAT_ONESHOT |
+ CLOCK_EVT_FEAT_DYNIRQ),
+ .name = "arch_mem_timer",
+ .rating = 400,
+ .cpumask = cpu_possible_mask,
+ .irq = irq,
+ .set_next_event = arch_timer_mmio_set_next_event,
+ .set_state_oneshot_stopped = arch_timer_mmio_shutdown,
+ .set_state_shutdown = arch_timer_mmio_shutdown,
+ };
+
+ at->evt.set_state_shutdown(&at->evt);
+
+ clockevents_config_and_register(&at->evt, at->rate, 0xf,
+ (unsigned long)CLOCKSOURCE_MASK(56));
+
+ enable_irq(at->evt.irq);
+
+ at->cs = (struct clocksource) {
+ .name = "arch_mmio_counter",
+ .rating = 300,
+ .read = arch_mmio_counter_read,
+ .mask = CLOCKSOURCE_MASK(56),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ };
+
+ clocksource_register_hz(&at->cs, at->rate);
+}
+
+static int arch_timer_mmio_frame_register(struct platform_device *pdev,
+ struct arch_timer_mem_frame *frame)
+{
+ struct arch_timer *at = platform_get_drvdata(pdev);
+ struct device_node *np = pdev->dev.of_node;
+ int ret, irq;
+ u32 rate;
+
+ if (!devm_request_mem_region(&pdev->dev, frame->cntbase, frame->size,
+ "arch_mem_timer"))
+ return -EBUSY;
+
+ at->base = devm_ioremap(&pdev->dev, frame->cntbase, frame->size);
+ if (!at->base) {
+ dev_err(&pdev->dev, "Can't map frame's registers\n");
+ return -ENXIO;
+ }
+
+ /*
+ * Allow "clock-frequency" to override the probed rate. If neither
+ * lead to something useful, use the CPU timer frequency as the
+ * fallback. The nice thing about that last point is that we woudn't
+ * made it here if we didn't have a valid frequency.
+ */
+ rate = readl_relaxed(at->base + CNTFRQ);
+
+ if (!np || of_property_read_u32(np, "clock-frequency", &at->rate))
+ at->rate = rate;
+
+ if (!at->rate)
+ at->rate = arch_timer_get_rate();
+
+ irq = at->access == VIRT_ACCESS ? frame->virt_irq : frame->phys_irq;
+ ret = devm_request_irq(&pdev->dev, irq, arch_timer_mmio_handler,
+ IRQF_TIMER | IRQF_NO_AUTOEN, "arch_mem_timer",
+ &at->evt);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request mem timer irq\n");
+ return ret;
+ }
+
+ /* Afer this point, we're not allowed to fail anymore */
+ arch_timer_mmio_setup(at, irq);
+ return 0;
+}
+
+static int of_populate_gt_block(struct platform_device *pdev,
+ struct arch_timer *at)
+{
+ struct resource res;
+
+ if (of_address_to_resource(pdev->dev.of_node, 0, &res))
+ return -EINVAL;
+
+ at->gt_block->cntctlbase = res.start;
+ at->gt_block->size = resource_size(&res);
+
+ for_each_available_child_of_node_scoped(pdev->dev.of_node, frame_node) {
+ struct arch_timer_mem_frame *frame;
+ u32 n;
+
+ if (of_property_read_u32(frame_node, "frame-number", &n)) {
+ dev_err(&pdev->dev, FW_BUG "Missing frame-number\n");
+ return -EINVAL;
+ }
+ if (n >= ARCH_TIMER_MEM_MAX_FRAMES) {
+ dev_err(&pdev->dev,
+ FW_BUG "Wrong frame-number, only 0-%u are permitted\n",
+ ARCH_TIMER_MEM_MAX_FRAMES - 1);
+ return -EINVAL;
+ }
+
+ frame = &at->gt_block->frame[n];
+
+ if (frame->valid) {
+ dev_err(&pdev->dev, FW_BUG "Duplicated frame-number\n");
+ return -EINVAL;
+ }
+
+ if (of_address_to_resource(frame_node, 0, &res))
+ return -EINVAL;
+
+ frame->cntbase = res.start;
+ frame->size = resource_size(&res);
+
+ frame->phys_irq = irq_of_parse_and_map(frame_node, 0);
+ frame->virt_irq = irq_of_parse_and_map(frame_node, 1);
+
+ frame->valid = true;
+ }
+
+ return 0;
+}
+
+static int arch_timer_mmio_probe(struct platform_device *pdev)
+{
+ struct arch_timer_mem_frame *frame;
+ struct arch_timer *at;
+ struct device_node *np;
+ int ret;
+
+ np = pdev->dev.of_node;
+
+ at = devm_kmalloc(&pdev->dev, sizeof(*at), GFP_KERNEL | __GFP_ZERO);
+ if (!at)
+ return -ENOMEM;
+
+ if (np) {
+ at->gt_block = devm_kmalloc(&pdev->dev, sizeof(*at->gt_block),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!at->gt_block)
+ return -ENOMEM;
+ ret = of_populate_gt_block(pdev, at);
+ if (ret)
+ return ret;
+ } else {
+ at->gt_block = dev_get_platdata(&pdev->dev);
+ }
+
+ platform_set_drvdata(pdev, at);
+
+ frame = find_best_frame(pdev);
+ if (!frame) {
+ dev_err(&pdev->dev,
+ "Unable to find a suitable frame in timer @ %pa\n",
+ &at->gt_block->cntctlbase);
+ return -EINVAL;
+ }
+
+ ret = arch_timer_mmio_frame_register(pdev, frame);
+ if (!ret)
+ dev_info(&pdev->dev,
+ "mmio timer running at %lu.%02luMHz (%s)\n",
+ (unsigned long)at->rate / 1000000,
+ (unsigned long)(at->rate / 10000) % 100,
+ at->access == VIRT_ACCESS ? "virt" : "phys");
+
+ return ret;
+}
+
+static const struct of_device_id arch_timer_mmio_of_table[] = {
+ { .compatible = "arm,armv7-timer-mem", },
+ {}
+};
+
+static struct platform_driver arch_timer_mmio_drv = {
+ .driver = {
+ .name = "arch-timer-mmio",
+ .of_match_table = arch_timer_mmio_of_table,
+ },
+ .probe = arch_timer_mmio_probe,
+};
+builtin_platform_driver(arch_timer_mmio_drv);
+
+static struct platform_driver arch_timer_mmio_acpi_drv = {
+ .driver = {
+ .name = "gtdt-arm-mmio-timer",
+ },
+ .probe = arch_timer_mmio_probe,
+};
+builtin_platform_driver(arch_timer_mmio_acpi_drv);
diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c
index a05cfaab5f84..5e3d6bb7e437 100644
--- a/drivers/clocksource/arm_global_timer.c
+++ b/drivers/clocksource/arm_global_timer.c
@@ -195,7 +195,6 @@ static int gt_dying_cpu(unsigned int cpu)
{
struct clock_event_device *clk = this_cpu_ptr(gt_evt);
- gt_clockevent_shutdown(clk);
disable_percpu_irq(clk->irq);
return 0;
}
@@ -264,14 +263,13 @@ static void __init gt_delay_timer_init(void)
register_current_timer_delay(&gt_delay_timer);
}
-static int __init gt_clocksource_init(void)
+static int __init gt_clocksource_init(unsigned int psv)
{
writel(0, gt_base + GT_CONTROL);
writel(0, gt_base + GT_COUNTER0);
writel(0, gt_base + GT_COUNTER1);
/* set prescaler and enable timer on all the cores */
- writel(FIELD_PREP(GT_CONTROL_PRESCALER_MASK,
- CONFIG_ARM_GT_INITIAL_PRESCALER_VAL - 1) |
+ writel(FIELD_PREP(GT_CONTROL_PRESCALER_MASK, psv - 1) |
GT_CONTROL_TIMER_ENABLE, gt_base + GT_CONTROL);
#ifdef CONFIG_CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
@@ -339,11 +337,45 @@ static int gt_clk_rate_change_cb(struct notifier_block *nb,
return NOTIFY_DONE;
}
+struct gt_prescaler_config {
+ const char *compatible;
+ unsigned long prescaler;
+};
+
+static const struct gt_prescaler_config gt_prescaler_configs[] = {
+ /*
+ * On am43 the global timer clock is a child of the clock used for CPU
+ * OPPs, so the initial prescaler has to be compatible with all OPPs
+ * which are 300, 600, 720, 800 and 1000 with a fixed divider of 2, this
+ * gives us a GCD of 10. Initial frequency is 1000, so the prescaler is
+ * 50.
+ */
+ { .compatible = "ti,am43", .prescaler = 50 },
+ { .compatible = "xlnx,zynq-7000", .prescaler = 2 },
+ { .compatible = NULL }
+};
+
+static unsigned long gt_get_initial_prescaler_value(struct device_node *np)
+{
+ const struct gt_prescaler_config *config;
+
+ if (CONFIG_ARM_GT_INITIAL_PRESCALER_VAL != 0)
+ return CONFIG_ARM_GT_INITIAL_PRESCALER_VAL;
+
+ for (config = gt_prescaler_configs; config->compatible; config++) {
+ if (of_machine_is_compatible(config->compatible))
+ return config->prescaler;
+ }
+
+ return 1;
+}
+
static int __init global_timer_of_register(struct device_node *np)
{
struct clk *gt_clk;
static unsigned long gt_clk_rate;
int err;
+ unsigned long psv;
/*
* In A9 r2p0 the comparators for each processor with the global timer
@@ -379,8 +411,9 @@ static int __init global_timer_of_register(struct device_node *np)
goto out_unmap;
}
+ psv = gt_get_initial_prescaler_value(np);
gt_clk_rate = clk_get_rate(gt_clk);
- gt_target_rate = gt_clk_rate / CONFIG_ARM_GT_INITIAL_PRESCALER_VAL;
+ gt_target_rate = gt_clk_rate / psv;
gt_clk_rate_change_nb.notifier_call =
gt_clk_rate_change_cb;
err = clk_notifier_register(gt_clk, &gt_clk_rate_change_nb);
@@ -405,7 +438,7 @@ static int __init global_timer_of_register(struct device_node *np)
}
/* Register and immediately configure the timer on the boot CPU */
- err = gt_clocksource_init();
+ err = gt_clocksource_init(psv);
if (err)
goto out_irq;
diff --git a/drivers/clocksource/clps711x-timer.c b/drivers/clocksource/clps711x-timer.c
index e95fdc49c226..bbceb0289d45 100644
--- a/drivers/clocksource/clps711x-timer.c
+++ b/drivers/clocksource/clps711x-timer.c
@@ -78,24 +78,33 @@ static int __init clps711x_timer_init(struct device_node *np)
unsigned int irq = irq_of_parse_and_map(np, 0);
struct clk *clock = of_clk_get(np, 0);
void __iomem *base = of_iomap(np, 0);
+ int ret = 0;
if (!base)
return -ENOMEM;
- if (!irq)
- return -EINVAL;
- if (IS_ERR(clock))
- return PTR_ERR(clock);
+ if (!irq) {
+ ret = -EINVAL;
+ goto unmap_io;
+ }
+ if (IS_ERR(clock)) {
+ ret = PTR_ERR(clock);
+ goto unmap_io;
+ }
switch (of_alias_get_id(np, "timer")) {
case CLPS711X_CLKSRC_CLOCKSOURCE:
clps711x_clksrc_init(clock, base);
break;
case CLPS711X_CLKSRC_CLOCKEVENT:
- return _clps711x_clkevt_init(clock, base, irq);
+ ret = _clps711x_clkevt_init(clock, base, irq);
+ break;
default:
- return -EINVAL;
+ ret = -EINVAL;
+ break;
}
- return 0;
+unmap_io:
+ iounmap(base);
+ return ret;
}
TIMER_OF_DECLARE(clps711x, "cirrus,ep7209-timer", clps711x_timer_init);
diff --git a/drivers/clocksource/dw_apb_timer.c b/drivers/clocksource/dw_apb_timer.c
index f5f24a95ee82..3a55ae5fe225 100644
--- a/drivers/clocksource/dw_apb_timer.c
+++ b/drivers/clocksource/dw_apb_timer.c
@@ -68,25 +68,6 @@ static inline void apbt_writel_relaxed(struct dw_apb_timer *timer, u32 val,
writel_relaxed(val, timer->base + offs);
}
-static void apbt_disable_int(struct dw_apb_timer *timer)
-{
- u32 ctrl = apbt_readl(timer, APBTMR_N_CONTROL);
-
- ctrl |= APBTMR_CONTROL_INT;
- apbt_writel(timer, ctrl, APBTMR_N_CONTROL);
-}
-
-/**
- * dw_apb_clockevent_pause() - stop the clock_event_device from running
- *
- * @dw_ced: The APB clock to stop generating events.
- */
-void dw_apb_clockevent_pause(struct dw_apb_clock_event_device *dw_ced)
-{
- disable_irq(dw_ced->timer.irq);
- apbt_disable_int(&dw_ced->timer);
-}
-
static void apbt_eoi(struct dw_apb_timer *timer)
{
apbt_readl_relaxed(timer, APBTMR_N_EOI);
@@ -285,26 +266,6 @@ dw_apb_clockevent_init(int cpu, const char *name, unsigned rating,
}
/**
- * dw_apb_clockevent_resume() - resume a clock that has been paused.
- *
- * @dw_ced: The APB clock to resume.
- */
-void dw_apb_clockevent_resume(struct dw_apb_clock_event_device *dw_ced)
-{
- enable_irq(dw_ced->timer.irq);
-}
-
-/**
- * dw_apb_clockevent_stop() - stop the clock_event_device and release the IRQ.
- *
- * @dw_ced: The APB clock to stop generating the events.
- */
-void dw_apb_clockevent_stop(struct dw_apb_clock_event_device *dw_ced)
-{
- free_irq(dw_ced->timer.irq, &dw_ced->ced);
-}
-
-/**
* dw_apb_clockevent_register() - register the clock with the generic layer
*
* @dw_ced: The APB clock to register as a clock_event_device.
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index ef8cb1b71be4..da09f467a6bb 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -238,7 +238,7 @@ static cycles_t exynos4_read_current_timer(void)
static int __init exynos4_clocksource_init(bool frc_shared)
{
/*
- * When the frc is shared, the main processer should have already
+ * When the frc is shared, the main processor should have already
* turned it on and we shouldn't be writing to TCON.
*/
if (frc_shared)
@@ -496,7 +496,6 @@ static int exynos4_mct_dying_cpu(unsigned int cpu)
per_cpu_ptr(&percpu_mct_tick, cpu);
struct clock_event_device *evt = &mevt->evt;
- evt->set_state_shutdown(evt);
if (mct_int_type == MCT_INT_SPI) {
if (evt->irq != -1)
disable_irq_nosync(evt->irq);
diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c
index 99177835cade..10356d4ec55c 100644
--- a/drivers/clocksource/hyperv_timer.c
+++ b/drivers/clocksource/hyperv_timer.c
@@ -22,12 +22,14 @@
#include <linux/irq.h>
#include <linux/acpi.h>
#include <linux/hyperv.h>
+#include <linux/export.h>
#include <clocksource/hyperv_timer.h>
-#include <asm/hyperv-tlfs.h>
+#include <hyperv/hvhdk.h>
#include <asm/mshyperv.h>
static struct clock_event_device __percpu *hv_clock_event;
-static u64 hv_sched_clock_offset __ro_after_init;
+/* Note: offset can hold negative values after hibernation. */
+static u64 hv_sched_clock_offset __read_mostly;
/*
* If false, we're using the old mechanism for stimer0 interrupts
@@ -470,6 +472,17 @@ static void resume_hv_clock_tsc(struct clocksource *arg)
hv_set_msr(HV_MSR_REFERENCE_TSC, tsc_msr.as_uint64);
}
+/*
+ * Called during resume from hibernation, from overridden
+ * x86_platform.restore_sched_clock_state routine. This is to adjust offsets
+ * used to calculate time for hv tsc page based sched_clock, to account for
+ * time spent before hibernation.
+ */
+void hv_adj_sched_clock_offset(u64 offset)
+{
+ hv_sched_clock_offset -= offset;
+}
+
#ifdef HAVE_VDSO_CLOCKMODE_HVCLOCK
static int hv_cs_enable(struct clocksource *cs)
{
@@ -536,14 +549,22 @@ static void __init hv_init_tsc_clocksource(void)
union hv_reference_tsc_msr tsc_msr;
/*
+ * When running as a guest partition:
+ *
* If Hyper-V offers TSC_INVARIANT, then the virtualized TSC correctly
* handles frequency and offset changes due to live migration,
* pause/resume, and other VM management operations. So lower the
* Hyper-V Reference TSC rating, causing the generic TSC to be used.
* TSC_INVARIANT is not offered on ARM64, so the Hyper-V Reference
* TSC will be preferred over the virtualized ARM64 arch counter.
+ *
+ * When running as the root partition:
+ *
+ * There is no HV_ACCESS_TSC_INVARIANT feature. Always lower the rating
+ * of the Hyper-V Reference TSC.
*/
- if (ms_hyperv.features & HV_ACCESS_TSC_INVARIANT) {
+ if ((ms_hyperv.features & HV_ACCESS_TSC_INVARIANT) ||
+ hv_root_partition()) {
hyperv_cs_tsc.rating = 250;
hyperv_cs_msr.rating = 245;
}
@@ -570,7 +591,7 @@ static void __init hv_init_tsc_clocksource(void)
* mapped.
*/
tsc_msr.as_uint64 = hv_get_msr(HV_MSR_REFERENCE_TSC);
- if (hv_root_partition)
+ if (hv_root_partition())
tsc_pfn = tsc_msr.pfn;
else
tsc_pfn = HVPFN_DOWN(virt_to_phys(tsc_page));
@@ -615,7 +636,7 @@ void __init hv_remap_tsc_clocksource(void)
if (!(ms_hyperv.features & HV_MSR_REFERENCE_TSC_AVAILABLE))
return;
- if (!hv_root_partition) {
+ if (!hv_root_partition()) {
WARN(1, "%s: attempt to remap TSC page in guest partition\n",
__func__);
return;
diff --git a/drivers/clocksource/i8253.c b/drivers/clocksource/i8253.c
index 39f7c2d736d1..b603c25f3dfa 100644
--- a/drivers/clocksource/i8253.c
+++ b/drivers/clocksource/i8253.c
@@ -103,7 +103,7 @@ int __init clocksource_i8253_init(void)
#ifdef CONFIG_CLKEVT_I8253
void clockevent_i8253_disable(void)
{
- raw_spin_lock(&i8253_lock);
+ guard(raw_spinlock_irqsave)(&i8253_lock);
/*
* Writing the MODE register should stop the counter, according to
@@ -132,8 +132,6 @@ void clockevent_i8253_disable(void)
outb_p(0, PIT_CH0);
outb_p(0x30, PIT_MODE);
-
- raw_spin_unlock(&i8253_lock);
}
static int pit_shutdown(struct clock_event_device *evt)
diff --git a/drivers/clocksource/ingenic-sysost.c b/drivers/clocksource/ingenic-sysost.c
index cb6fc2f152d4..e79cfb0b8e05 100644
--- a/drivers/clocksource/ingenic-sysost.c
+++ b/drivers/clocksource/ingenic-sysost.c
@@ -127,18 +127,23 @@ static u8 ingenic_ost_get_prescale(unsigned long rate, unsigned long req_rate)
return 2; /* /16 divider */
}
-static long ingenic_ost_round_rate(struct clk_hw *hw, unsigned long req_rate,
- unsigned long *parent_rate)
+static int ingenic_ost_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- unsigned long rate = *parent_rate;
+ unsigned long rate = req->best_parent_rate;
u8 prescale;
- if (req_rate > rate)
- return rate;
+ if (req->rate > rate) {
+ req->rate = rate;
- prescale = ingenic_ost_get_prescale(rate, req_rate);
+ return 0;
+ }
+
+ prescale = ingenic_ost_get_prescale(rate, req->rate);
- return rate >> (prescale * 2);
+ req->rate = rate >> (prescale * 2);
+
+ return 0;
}
static int ingenic_ost_percpu_timer_set_rate(struct clk_hw *hw, unsigned long req_rate,
@@ -175,14 +180,14 @@ static int ingenic_ost_global_timer_set_rate(struct clk_hw *hw, unsigned long re
static const struct clk_ops ingenic_ost_percpu_timer_ops = {
.recalc_rate = ingenic_ost_percpu_timer_recalc_rate,
- .round_rate = ingenic_ost_round_rate,
- .set_rate = ingenic_ost_percpu_timer_set_rate,
+ .determine_rate = ingenic_ost_determine_rate,
+ .set_rate = ingenic_ost_percpu_timer_set_rate,
};
static const struct clk_ops ingenic_ost_global_timer_ops = {
.recalc_rate = ingenic_ost_global_timer_recalc_rate,
- .round_rate = ingenic_ost_round_rate,
- .set_rate = ingenic_ost_global_timer_set_rate,
+ .determine_rate = ingenic_ost_determine_rate,
+ .set_rate = ingenic_ost_global_timer_set_rate,
};
static const char * const ingenic_ost_clk_parents[] = { "ext" };
diff --git a/drivers/clocksource/jcore-pit.c b/drivers/clocksource/jcore-pit.c
index a3fe98cd3838..82815428f8f9 100644
--- a/drivers/clocksource/jcore-pit.c
+++ b/drivers/clocksource/jcore-pit.c
@@ -114,6 +114,18 @@ static int jcore_pit_local_init(unsigned cpu)
pit->periodic_delta = DIV_ROUND_CLOSEST(NSEC_PER_SEC, HZ * buspd);
clockevents_config_and_register(&pit->ced, freq, 1, ULONG_MAX);
+ enable_percpu_irq(pit->ced.irq, IRQ_TYPE_NONE);
+
+ return 0;
+}
+
+static int jcore_pit_local_teardown(unsigned cpu)
+{
+ struct jcore_pit *pit = this_cpu_ptr(jcore_pit_percpu);
+
+ pr_info("Local J-Core PIT teardown on cpu %u\n", cpu);
+
+ disable_percpu_irq(pit->ced.irq);
return 0;
}
@@ -168,6 +180,7 @@ static int __init jcore_pit_init(struct device_node *node)
return -ENOMEM;
}
+ irq_set_percpu_devid(pit_irq);
err = request_percpu_irq(pit_irq, jcore_timer_interrupt,
"jcore_pit", jcore_pit_percpu);
if (err) {
@@ -237,7 +250,7 @@ static int __init jcore_pit_init(struct device_node *node)
cpuhp_setup_state(CPUHP_AP_JCORE_TIMER_STARTING,
"clockevents/jcore:starting",
- jcore_pit_local_init, NULL);
+ jcore_pit_local_init, jcore_pit_local_teardown);
return 0;
}
diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c
index 110347707ff9..abb685a080a5 100644
--- a/drivers/clocksource/mips-gic-timer.c
+++ b/drivers/clocksource/mips-gic-timer.c
@@ -115,6 +115,9 @@ static void gic_update_frequency(void *data)
static int gic_starting_cpu(unsigned int cpu)
{
+ /* Ensure the GIC counter is running */
+ clear_gic_config(GIC_CONFIG_COUNTSTOP);
+
gic_clockevent_cpu_init(cpu, this_cpu_ptr(&gic_clockevent_device));
return 0;
}
@@ -166,6 +169,37 @@ static u64 gic_hpt_read(struct clocksource *cs)
return gic_read_count();
}
+static u64 gic_hpt_read_multicluster(struct clocksource *cs)
+{
+ unsigned int hi, hi2, lo;
+ u64 count;
+
+ mips_cm_lock_other(0, 0, 0, CM_GCR_Cx_OTHER_BLOCK_GLOBAL);
+
+ if (mips_cm_is64) {
+ count = read_gic_redir_counter();
+ goto out;
+ }
+
+ hi = read_gic_redir_counter_32h();
+ while (true) {
+ lo = read_gic_redir_counter_32l();
+
+ /* If hi didn't change then lo didn't wrap & we're done */
+ hi2 = read_gic_redir_counter_32h();
+ if (hi2 == hi)
+ break;
+
+ /* Otherwise, repeat with the latest hi value */
+ hi = hi2;
+ }
+
+ count = (((u64)hi) << 32) + lo;
+out:
+ mips_cm_unlock_other();
+ return count;
+}
+
static struct clocksource gic_clocksource = {
.name = "GIC",
.read = gic_hpt_read,
@@ -203,6 +237,11 @@ static int __init __gic_clocksource_init(void)
gic_clocksource.rating = 200;
gic_clocksource.rating += clamp(gic_frequency / 10000000, 0, 99);
+ if (mips_cps_multicluster_cpus()) {
+ gic_clocksource.read = &gic_hpt_read_multicluster;
+ gic_clocksource.vdso_clock_mode = VDSO_CLOCKMODE_NONE;
+ }
+
ret = clocksource_register_hz(&gic_clocksource, gic_frequency);
if (ret < 0)
pr_warn("Unable to register clocksource\n");
@@ -252,16 +291,14 @@ static int __init gic_clocksource_of_init(struct device_node *node)
pr_warn("Unable to register clock notifier\n");
}
- /* And finally start the counter */
- clear_gic_config(GIC_CONFIG_COUNTSTOP);
-
/*
* It's safe to use the MIPS GIC timer as a sched clock source only if
* its ticks are stable, which is true on either the platforms with
* stable CPU frequency or on the platforms with CM3 and CPU frequency
* change performed by the CPC core clocks divider.
*/
- if (mips_cm_revision() >= CM_REV_CM3 || !IS_ENABLED(CONFIG_CPU_FREQ)) {
+ if ((mips_cm_revision() >= CM_REV_CM3 || !IS_ENABLED(CONFIG_CPU_FREQ)) &&
+ !mips_cps_multicluster_cpus()) {
sched_clock_register(mips_cm_is64 ?
gic_read_count_64 : gic_read_count_2x32,
gic_count_width, gic_frequency);
diff --git a/drivers/clocksource/renesas-ostm.c b/drivers/clocksource/renesas-ostm.c
index 3fcbd02b2483..2089aeaae225 100644
--- a/drivers/clocksource/renesas-ostm.c
+++ b/drivers/clocksource/renesas-ostm.c
@@ -225,7 +225,6 @@ err_free:
TIMER_OF_DECLARE(ostm, "renesas,ostm", ostm_init);
-#if defined(CONFIG_ARCH_RZG2L) || defined(CONFIG_ARCH_R9A09G057)
static int __init ostm_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -233,7 +232,7 @@ static int __init ostm_probe(struct platform_device *pdev)
return ostm_init(dev->of_node);
}
-static const struct of_device_id ostm_of_table[] = {
+static const struct of_device_id __maybe_unused ostm_of_table[] = {
{ .compatible = "renesas,ostm", },
{ /* sentinel */ }
};
@@ -246,4 +245,3 @@ static struct platform_driver ostm_device_driver = {
},
};
builtin_platform_driver_probe(ostm_device_driver, ostm_probe);
-#endif
diff --git a/drivers/clocksource/scx200_hrt.c b/drivers/clocksource/scx200_hrt.c
index c3536fffbe9a..5a99801a1657 100644
--- a/drivers/clocksource/scx200_hrt.c
+++ b/drivers/clocksource/scx200_hrt.c
@@ -52,6 +52,7 @@ static struct clocksource cs_hrt = {
.mask = CLOCKSOURCE_MASK(32),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
/* mult, shift are set based on mhz27 flag */
+ .owner = THIS_MODULE,
};
static int __init init_hrt_clocksource(void)
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index b72b36e0abed..385eb94bbe7c 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -578,37 +578,74 @@ static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int sh_cmt_start(struct sh_cmt_channel *ch, unsigned long flag)
+static int sh_cmt_start_clocksource(struct sh_cmt_channel *ch)
{
int ret = 0;
unsigned long flags;
- if (flag & FLAG_CLOCKSOURCE)
- pm_runtime_get_sync(&ch->cmt->pdev->dev);
+ pm_runtime_get_sync(&ch->cmt->pdev->dev);
raw_spin_lock_irqsave(&ch->lock, flags);
- if (!(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) {
- if (flag & FLAG_CLOCKEVENT)
- pm_runtime_get_sync(&ch->cmt->pdev->dev);
+ if (!(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
ret = sh_cmt_enable(ch);
- }
if (ret)
goto out;
- ch->flags |= flag;
+
+ ch->flags |= FLAG_CLOCKSOURCE;
/* setup timeout if no clockevent */
- if (ch->cmt->num_channels == 1 &&
- flag == FLAG_CLOCKSOURCE && (!(ch->flags & FLAG_CLOCKEVENT)))
+ if (ch->cmt->num_channels == 1 && !(ch->flags & FLAG_CLOCKEVENT))
__sh_cmt_set_next(ch, ch->max_match_value);
+out:
+ raw_spin_unlock_irqrestore(&ch->lock, flags);
+
+ return ret;
+}
+
+static void sh_cmt_stop_clocksource(struct sh_cmt_channel *ch)
+{
+ unsigned long flags;
+ unsigned long f;
+
+ raw_spin_lock_irqsave(&ch->lock, flags);
+
+ f = ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE);
+
+ ch->flags &= ~FLAG_CLOCKSOURCE;
+
+ if (f && !(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
+ sh_cmt_disable(ch);
+
+ raw_spin_unlock_irqrestore(&ch->lock, flags);
+
+ pm_runtime_put(&ch->cmt->pdev->dev);
+}
+
+static int sh_cmt_start_clockevent(struct sh_cmt_channel *ch)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&ch->lock, flags);
+
+ if (!(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) {
+ pm_runtime_get_sync(&ch->cmt->pdev->dev);
+ ret = sh_cmt_enable(ch);
+ }
+
+ if (ret)
+ goto out;
+
+ ch->flags |= FLAG_CLOCKEVENT;
out:
raw_spin_unlock_irqrestore(&ch->lock, flags);
return ret;
}
-static void sh_cmt_stop(struct sh_cmt_channel *ch, unsigned long flag)
+static void sh_cmt_stop_clockevent(struct sh_cmt_channel *ch)
{
unsigned long flags;
unsigned long f;
@@ -616,22 +653,19 @@ static void sh_cmt_stop(struct sh_cmt_channel *ch, unsigned long flag)
raw_spin_lock_irqsave(&ch->lock, flags);
f = ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE);
- ch->flags &= ~flag;
+
+ ch->flags &= ~FLAG_CLOCKEVENT;
if (f && !(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) {
sh_cmt_disable(ch);
- if (flag & FLAG_CLOCKEVENT)
- pm_runtime_put(&ch->cmt->pdev->dev);
+ pm_runtime_put(&ch->cmt->pdev->dev);
}
/* adjust the timeout to maximum if only clocksource left */
- if ((flag == FLAG_CLOCKEVENT) && (ch->flags & FLAG_CLOCKSOURCE))
+ if (ch->flags & FLAG_CLOCKSOURCE)
__sh_cmt_set_next(ch, ch->max_match_value);
raw_spin_unlock_irqrestore(&ch->lock, flags);
-
- if (flag & FLAG_CLOCKSOURCE)
- pm_runtime_put(&ch->cmt->pdev->dev);
}
static struct sh_cmt_channel *cs_to_sh_cmt(struct clocksource *cs)
@@ -672,7 +706,7 @@ static int sh_cmt_clocksource_enable(struct clocksource *cs)
ch->total_cycles = 0;
- ret = sh_cmt_start(ch, FLAG_CLOCKSOURCE);
+ ret = sh_cmt_start_clocksource(ch);
if (!ret)
ch->cs_enabled = true;
@@ -685,7 +719,7 @@ static void sh_cmt_clocksource_disable(struct clocksource *cs)
WARN_ON(!ch->cs_enabled);
- sh_cmt_stop(ch, FLAG_CLOCKSOURCE);
+ sh_cmt_stop_clocksource(ch);
ch->cs_enabled = false;
}
@@ -696,7 +730,7 @@ static void sh_cmt_clocksource_suspend(struct clocksource *cs)
if (!ch->cs_enabled)
return;
- sh_cmt_stop(ch, FLAG_CLOCKSOURCE);
+ sh_cmt_stop_clocksource(ch);
dev_pm_genpd_suspend(&ch->cmt->pdev->dev);
}
@@ -708,7 +742,7 @@ static void sh_cmt_clocksource_resume(struct clocksource *cs)
return;
dev_pm_genpd_resume(&ch->cmt->pdev->dev);
- sh_cmt_start(ch, FLAG_CLOCKSOURCE);
+ sh_cmt_start_clocksource(ch);
}
static int sh_cmt_register_clocksource(struct sh_cmt_channel *ch,
@@ -740,7 +774,7 @@ static struct sh_cmt_channel *ced_to_sh_cmt(struct clock_event_device *ced)
static void sh_cmt_clock_event_start(struct sh_cmt_channel *ch, int periodic)
{
- sh_cmt_start(ch, FLAG_CLOCKEVENT);
+ sh_cmt_start_clockevent(ch);
if (periodic)
sh_cmt_set_next(ch, ((ch->cmt->rate + HZ/2) / HZ) - 1);
@@ -752,7 +786,7 @@ static int sh_cmt_clock_event_shutdown(struct clock_event_device *ced)
{
struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
- sh_cmt_stop(ch, FLAG_CLOCKEVENT);
+ sh_cmt_stop_clockevent(ch);
return 0;
}
@@ -763,7 +797,7 @@ static int sh_cmt_clock_event_set_state(struct clock_event_device *ced,
/* deal with old setting first */
if (clockevent_state_oneshot(ced) || clockevent_state_periodic(ced))
- sh_cmt_stop(ch, FLAG_CLOCKEVENT);
+ sh_cmt_stop_clockevent(ch);
dev_info(&ch->cmt->pdev->dev, "ch%u: used for %s clock events\n",
ch->index, periodic ? "periodic" : "oneshot");
diff --git a/drivers/clocksource/timer-armada-370-xp.c b/drivers/clocksource/timer-armada-370-xp.c
index 6ec565d6939a..54284c1c0651 100644
--- a/drivers/clocksource/timer-armada-370-xp.c
+++ b/drivers/clocksource/timer-armada-370-xp.c
@@ -201,7 +201,6 @@ static int armada_370_xp_timer_dying_cpu(unsigned int cpu)
{
struct clock_event_device *evt = per_cpu_ptr(armada_370_xp_evt, cpu);
- evt->set_state_shutdown(evt);
disable_percpu_irq(evt->irq);
return 0;
}
diff --git a/drivers/clocksource/timer-cs5535.c b/drivers/clocksource/timer-cs5535.c
index d47acfe848ae..8af666c39890 100644
--- a/drivers/clocksource/timer-cs5535.c
+++ b/drivers/clocksource/timer-cs5535.c
@@ -101,6 +101,7 @@ static struct clock_event_device cs5535_clockevent = {
.tick_resume = mfgpt_shutdown,
.set_next_event = mfgpt_next_event,
.rating = 250,
+ .owner = THIS_MODULE,
};
static irqreturn_t mfgpt_tick(int irq, void *dev_id)
diff --git a/drivers/clocksource/timer-econet-en751221.c b/drivers/clocksource/timer-econet-en751221.c
new file mode 100644
index 000000000000..4008076b1a21
--- /dev/null
+++ b/drivers/clocksource/timer-econet-en751221.c
@@ -0,0 +1,216 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Timer present on EcoNet EN75xx MIPS based SoCs.
+ *
+ * Copyright (C) 2025 by Caleb James DeLisle <cjd@cjdns.fr>
+ */
+
+#include <linux/io.h>
+#include <linux/cpumask.h>
+#include <linux/interrupt.h>
+#include <linux/clockchips.h>
+#include <linux/sched_clock.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/cpuhotplug.h>
+#include <linux/clk.h>
+
+#define ECONET_BITS 32
+#define ECONET_MIN_DELTA 0x00001000
+#define ECONET_MAX_DELTA GENMASK(ECONET_BITS - 2, 0)
+/* 34Kc hardware has 1 block and 1004Kc has 2. */
+#define ECONET_NUM_BLOCKS DIV_ROUND_UP(NR_CPUS, 2)
+
+static struct {
+ void __iomem *membase[ECONET_NUM_BLOCKS];
+ u32 freq_hz;
+} econet_timer __ro_after_init;
+
+static DEFINE_PER_CPU(struct clock_event_device, econet_timer_pcpu);
+
+/* Each memory block has 2 timers, the order of registers is:
+ * CTL, CMR0, CNT0, CMR1, CNT1
+ */
+static inline void __iomem *reg_ctl(u32 timer_n)
+{
+ return econet_timer.membase[timer_n >> 1];
+}
+
+static inline void __iomem *reg_compare(u32 timer_n)
+{
+ return econet_timer.membase[timer_n >> 1] + (timer_n & 1) * 0x08 + 0x04;
+}
+
+static inline void __iomem *reg_count(u32 timer_n)
+{
+ return econet_timer.membase[timer_n >> 1] + (timer_n & 1) * 0x08 + 0x08;
+}
+
+static inline u32 ctl_bit_enabled(u32 timer_n)
+{
+ return 1U << (timer_n & 1);
+}
+
+static inline u32 ctl_bit_pending(u32 timer_n)
+{
+ return 1U << ((timer_n & 1) + 16);
+}
+
+static bool cevt_is_pending(int cpu_id)
+{
+ return ioread32(reg_ctl(cpu_id)) & ctl_bit_pending(cpu_id);
+}
+
+static irqreturn_t cevt_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *dev = this_cpu_ptr(&econet_timer_pcpu);
+ int cpu = cpumask_first(dev->cpumask);
+
+ /* Each VPE has its own events,
+ * so this will only happen on spurious interrupt.
+ */
+ if (!cevt_is_pending(cpu))
+ return IRQ_NONE;
+
+ iowrite32(ioread32(reg_count(cpu)), reg_compare(cpu));
+ dev->event_handler(dev);
+ return IRQ_HANDLED;
+}
+
+static int cevt_set_next_event(ulong delta, struct clock_event_device *dev)
+{
+ u32 next;
+ int cpu;
+
+ cpu = cpumask_first(dev->cpumask);
+ next = ioread32(reg_count(cpu)) + delta;
+ iowrite32(next, reg_compare(cpu));
+
+ if ((s32)(next - ioread32(reg_count(cpu))) < ECONET_MIN_DELTA / 2)
+ return -ETIME;
+
+ return 0;
+}
+
+static int cevt_init_cpu(uint cpu)
+{
+ struct clock_event_device *cd = &per_cpu(econet_timer_pcpu, cpu);
+ u32 reg;
+
+ pr_debug("%s: Setting up clockevent for CPU %d\n", cd->name, cpu);
+
+ reg = ioread32(reg_ctl(cpu)) | ctl_bit_enabled(cpu);
+ iowrite32(reg, reg_ctl(cpu));
+
+ enable_percpu_irq(cd->irq, IRQ_TYPE_NONE);
+
+ /* Do this last because it synchronously configures the timer */
+ clockevents_config_and_register(cd, econet_timer.freq_hz,
+ ECONET_MIN_DELTA, ECONET_MAX_DELTA);
+
+ return 0;
+}
+
+static u64 notrace sched_clock_read(void)
+{
+ /* Always read from clock zero no matter the CPU */
+ return (u64)ioread32(reg_count(0));
+}
+
+/* Init */
+
+static void __init cevt_dev_init(uint cpu)
+{
+ iowrite32(0, reg_count(cpu));
+ iowrite32(U32_MAX, reg_compare(cpu));
+}
+
+static int __init cevt_init(struct device_node *np)
+{
+ int i, irq, ret;
+
+ irq = irq_of_parse_and_map(np, 0);
+ if (irq <= 0) {
+ pr_err("%pOFn: irq_of_parse_and_map failed", np);
+ return -EINVAL;
+ }
+
+ ret = request_percpu_irq(irq, cevt_interrupt, np->name, &econet_timer_pcpu);
+
+ if (ret < 0) {
+ pr_err("%pOFn: IRQ %d setup failed (%d)\n", np, irq, ret);
+ goto err_unmap_irq;
+ }
+
+ for_each_possible_cpu(i) {
+ struct clock_event_device *cd = &per_cpu(econet_timer_pcpu, i);
+
+ cd->rating = 310;
+ cd->features = CLOCK_EVT_FEAT_ONESHOT |
+ CLOCK_EVT_FEAT_C3STOP |
+ CLOCK_EVT_FEAT_PERCPU;
+ cd->set_next_event = cevt_set_next_event;
+ cd->irq = irq;
+ cd->cpumask = cpumask_of(i);
+ cd->name = np->name;
+
+ cevt_dev_init(i);
+ }
+
+ cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
+ "clockevents/econet/timer:starting",
+ cevt_init_cpu, NULL);
+ return 0;
+
+err_unmap_irq:
+ irq_dispose_mapping(irq);
+ return ret;
+}
+
+static int __init timer_init(struct device_node *np)
+{
+ int num_blocks = DIV_ROUND_UP(num_possible_cpus(), 2);
+ struct clk *clk;
+ int ret;
+
+ clk = of_clk_get(np, 0);
+ if (IS_ERR(clk)) {
+ pr_err("%pOFn: Failed to get CPU clock from DT %ld\n", np, PTR_ERR(clk));
+ return PTR_ERR(clk);
+ }
+
+ econet_timer.freq_hz = clk_get_rate(clk);
+
+ for (int i = 0; i < num_blocks; i++) {
+ econet_timer.membase[i] = of_iomap(np, i);
+ if (!econet_timer.membase[i]) {
+ pr_err("%pOFn: failed to map register [%d]\n", np, i);
+ return -ENXIO;
+ }
+ }
+
+ /* For clocksource purposes always read clock zero, whatever the CPU */
+ ret = clocksource_mmio_init(reg_count(0), np->name,
+ econet_timer.freq_hz, 301, ECONET_BITS,
+ clocksource_mmio_readl_up);
+ if (ret) {
+ pr_err("%pOFn: clocksource_mmio_init failed: %d", np, ret);
+ return ret;
+ }
+
+ ret = cevt_init(np);
+ if (ret < 0)
+ return ret;
+
+ sched_clock_register(sched_clock_read, ECONET_BITS,
+ econet_timer.freq_hz);
+
+ pr_info("%pOFn: using %u.%03u MHz high precision timer\n", np,
+ econet_timer.freq_hz / 1000000,
+ (econet_timer.freq_hz / 1000) % 1000);
+
+ return 0;
+}
+
+TIMER_OF_DECLARE(econet_timer_hpt, "econet,en751221-timer", timer_init);
diff --git a/drivers/clocksource/timer-gxp.c b/drivers/clocksource/timer-gxp.c
index 57aa2e2cce53..48a73c101eb8 100644
--- a/drivers/clocksource/timer-gxp.c
+++ b/drivers/clocksource/timer-gxp.c
@@ -85,7 +85,7 @@ static int __init gxp_timer_init(struct device_node *node)
clk = of_clk_get(node, 0);
if (IS_ERR(clk)) {
- ret = (int)PTR_ERR(clk);
+ ret = PTR_ERR(clk);
pr_err("%pOFn clock not found: %d\n", node, ret);
goto err_free;
}
diff --git a/drivers/clocksource/timer-nxp-pit.c b/drivers/clocksource/timer-nxp-pit.c
new file mode 100644
index 000000000000..2d0a3554b6bf
--- /dev/null
+++ b/drivers/clocksource/timer-nxp-pit.c
@@ -0,0 +1,382 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright 2012-2013 Freescale Semiconductor, Inc.
+ * Copyright 2018,2021-2025 NXP
+ */
+#include <linux/interrupt.h>
+#include <linux/clockchips.h>
+#include <linux/cpuhotplug.h>
+#include <linux/clk.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/sched_clock.h>
+#include <linux/platform_device.h>
+
+/*
+ * Each pit takes 0x10 Bytes register space
+ */
+#define PIT0_OFFSET 0x100
+#define PIT_CH(n) (PIT0_OFFSET + 0x10 * (n))
+
+#define PITMCR(__base) (__base)
+
+#define PITMCR_FRZ BIT(0)
+#define PITMCR_MDIS BIT(1)
+
+#define PITLDVAL(__base) (__base)
+#define PITTCTRL(__base) ((__base) + 0x08)
+
+#define PITCVAL_OFFSET 0x04
+#define PITCVAL(__base) ((__base) + 0x04)
+
+#define PITTCTRL_TEN BIT(0)
+#define PITTCTRL_TIE BIT(1)
+
+#define PITTFLG(__base) ((__base) + 0x0c)
+
+#define PITTFLG_TIF BIT(0)
+
+struct pit_timer {
+ void __iomem *clksrc_base;
+ void __iomem *clkevt_base;
+ struct clock_event_device ced;
+ struct clocksource cs;
+ int rate;
+};
+
+struct pit_timer_data {
+ int max_pit_instances;
+};
+
+static DEFINE_PER_CPU(struct pit_timer *, pit_timers);
+
+/*
+ * Global structure for multiple PITs initialization
+ */
+static int pit_instances;
+static int max_pit_instances = 1;
+
+static void __iomem *sched_clock_base;
+
+static inline struct pit_timer *ced_to_pit(struct clock_event_device *ced)
+{
+ return container_of(ced, struct pit_timer, ced);
+}
+
+static inline struct pit_timer *cs_to_pit(struct clocksource *cs)
+{
+ return container_of(cs, struct pit_timer, cs);
+}
+
+static inline void pit_module_enable(void __iomem *base)
+{
+ writel(0, PITMCR(base));
+}
+
+static inline void pit_module_disable(void __iomem *base)
+{
+ writel(PITMCR_MDIS, PITMCR(base));
+}
+
+static inline void pit_timer_enable(void __iomem *base, bool tie)
+{
+ u32 val = PITTCTRL_TEN | (tie ? PITTCTRL_TIE : 0);
+
+ writel(val, PITTCTRL(base));
+}
+
+static inline void pit_timer_disable(void __iomem *base)
+{
+ writel(0, PITTCTRL(base));
+}
+
+static inline void pit_timer_set_counter(void __iomem *base, unsigned int cnt)
+{
+ writel(cnt, PITLDVAL(base));
+}
+
+static inline void pit_timer_irqack(struct pit_timer *pit)
+{
+ writel(PITTFLG_TIF, PITTFLG(pit->clkevt_base));
+}
+
+static u64 notrace pit_read_sched_clock(void)
+{
+ return ~readl(sched_clock_base);
+}
+
+static u64 pit_timer_clocksource_read(struct clocksource *cs)
+{
+ struct pit_timer *pit = cs_to_pit(cs);
+
+ return (u64)~readl(PITCVAL(pit->clksrc_base));
+}
+
+static int pit_clocksource_init(struct pit_timer *pit, const char *name,
+ void __iomem *base, unsigned long rate)
+{
+ /*
+ * The channels 0 and 1 can be chained to build a 64-bit
+ * timer. Let's use the channel 2 as a clocksource and leave
+ * the channels 0 and 1 unused for anyone else who needs them
+ */
+ pit->clksrc_base = base + PIT_CH(2);
+ pit->cs.name = name;
+ pit->cs.rating = 300;
+ pit->cs.read = pit_timer_clocksource_read;
+ pit->cs.mask = CLOCKSOURCE_MASK(32);
+ pit->cs.flags = CLOCK_SOURCE_IS_CONTINUOUS;
+
+ /* set the max load value and start the clock source counter */
+ pit_timer_disable(pit->clksrc_base);
+ pit_timer_set_counter(pit->clksrc_base, ~0);
+ pit_timer_enable(pit->clksrc_base, 0);
+
+ sched_clock_base = pit->clksrc_base + PITCVAL_OFFSET;
+ sched_clock_register(pit_read_sched_clock, 32, rate);
+
+ return clocksource_register_hz(&pit->cs, rate);
+}
+
+static int pit_set_next_event(unsigned long delta, struct clock_event_device *ced)
+{
+ struct pit_timer *pit = ced_to_pit(ced);
+
+ /*
+ * set a new value to PITLDVAL register will not restart the timer,
+ * to abort the current cycle and start a timer period with the new
+ * value, the timer must be disabled and enabled again.
+ * and the PITLAVAL should be set to delta minus one according to pit
+ * hardware requirement.
+ */
+ pit_timer_disable(pit->clkevt_base);
+ pit_timer_set_counter(pit->clkevt_base, delta - 1);
+ pit_timer_enable(pit->clkevt_base, true);
+
+ return 0;
+}
+
+static int pit_shutdown(struct clock_event_device *ced)
+{
+ struct pit_timer *pit = ced_to_pit(ced);
+
+ pit_timer_disable(pit->clkevt_base);
+
+ return 0;
+}
+
+static int pit_set_periodic(struct clock_event_device *ced)
+{
+ struct pit_timer *pit = ced_to_pit(ced);
+
+ pit_set_next_event(pit->rate / HZ, ced);
+
+ return 0;
+}
+
+static irqreturn_t pit_timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *ced = dev_id;
+ struct pit_timer *pit = ced_to_pit(ced);
+
+ pit_timer_irqack(pit);
+
+ /*
+ * pit hardware doesn't support oneshot, it will generate an interrupt
+ * and reload the counter value from PITLDVAL when PITCVAL reach zero,
+ * and start the counter again. So software need to disable the timer
+ * to stop the counter loop in ONESHOT mode.
+ */
+ if (likely(clockevent_state_oneshot(ced)))
+ pit_timer_disable(pit->clkevt_base);
+
+ ced->event_handler(ced);
+
+ return IRQ_HANDLED;
+}
+
+static int pit_clockevent_per_cpu_init(struct pit_timer *pit, const char *name,
+ void __iomem *base, unsigned long rate,
+ int irq, unsigned int cpu)
+{
+ int ret;
+
+ /*
+ * The channels 0 and 1 can be chained to build a 64-bit
+ * timer. Let's use the channel 3 as a clockevent and leave
+ * the channels 0 and 1 unused for anyone else who needs them
+ */
+ pit->clkevt_base = base + PIT_CH(3);
+ pit->rate = rate;
+
+ pit_timer_disable(pit->clkevt_base);
+
+ pit_timer_irqack(pit);
+
+ ret = request_irq(irq, pit_timer_interrupt, IRQF_TIMER | IRQF_NOBALANCING,
+ name, &pit->ced);
+ if (ret)
+ return ret;
+
+ pit->ced.cpumask = cpumask_of(cpu);
+ pit->ced.irq = irq;
+
+ pit->ced.name = name;
+ pit->ced.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+ pit->ced.set_state_shutdown = pit_shutdown;
+ pit->ced.set_state_periodic = pit_set_periodic;
+ pit->ced.set_next_event = pit_set_next_event;
+ pit->ced.rating = 300;
+
+ per_cpu(pit_timers, cpu) = pit;
+
+ return 0;
+}
+
+static void pit_clockevent_per_cpu_exit(struct pit_timer *pit, unsigned int cpu)
+{
+ pit_timer_disable(pit->clkevt_base);
+ free_irq(pit->ced.irq, &pit->ced);
+ per_cpu(pit_timers, cpu) = NULL;
+}
+
+static int pit_clockevent_starting_cpu(unsigned int cpu)
+{
+ struct pit_timer *pit = per_cpu(pit_timers, cpu);
+ int ret;
+
+ if (!pit)
+ return 0;
+
+ ret = irq_force_affinity(pit->ced.irq, cpumask_of(cpu));
+ if (ret) {
+ pit_clockevent_per_cpu_exit(pit, cpu);
+ return ret;
+ }
+
+ /*
+ * The value for the LDVAL register trigger is calculated as:
+ * LDVAL trigger = (period / clock period) - 1
+ * The pit is a 32-bit down count timer, when the counter value
+ * reaches 0, it will generate an interrupt, thus the minimal
+ * LDVAL trigger value is 1. And then the min_delta is
+ * minimal LDVAL trigger value + 1, and the max_delta is full 32-bit.
+ */
+ clockevents_config_and_register(&pit->ced, pit->rate, 2, 0xffffffff);
+
+ return 0;
+}
+
+static int pit_timer_init(struct device_node *np)
+{
+ struct pit_timer *pit;
+ struct clk *pit_clk;
+ void __iomem *timer_base;
+ const char *name = of_node_full_name(np);
+ unsigned long clk_rate;
+ int irq, ret;
+
+ pit = kzalloc(sizeof(*pit), GFP_KERNEL);
+ if (!pit)
+ return -ENOMEM;
+
+ ret = -ENXIO;
+ timer_base = of_iomap(np, 0);
+ if (!timer_base) {
+ pr_err("Failed to iomap\n");
+ goto out_kfree;
+ }
+
+ ret = -EINVAL;
+ irq = irq_of_parse_and_map(np, 0);
+ if (irq <= 0) {
+ pr_err("Failed to irq_of_parse_and_map\n");
+ goto out_iounmap;
+ }
+
+ pit_clk = of_clk_get(np, 0);
+ if (IS_ERR(pit_clk)) {
+ ret = PTR_ERR(pit_clk);
+ goto out_irq_dispose_mapping;
+ }
+
+ ret = clk_prepare_enable(pit_clk);
+ if (ret)
+ goto out_clk_put;
+
+ clk_rate = clk_get_rate(pit_clk);
+
+ pit_module_disable(timer_base);
+
+ ret = pit_clocksource_init(pit, name, timer_base, clk_rate);
+ if (ret) {
+ pr_err("Failed to initialize clocksource '%pOF'\n", np);
+ goto out_pit_module_disable;
+ }
+
+ ret = pit_clockevent_per_cpu_init(pit, name, timer_base, clk_rate, irq, pit_instances);
+ if (ret) {
+ pr_err("Failed to initialize clockevent '%pOF'\n", np);
+ goto out_pit_clocksource_unregister;
+ }
+
+ /* enable the pit module */
+ pit_module_enable(timer_base);
+
+ pit_instances++;
+
+ if (pit_instances == max_pit_instances) {
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "PIT timer:starting",
+ pit_clockevent_starting_cpu, NULL);
+ if (ret < 0)
+ goto out_pit_clocksource_unregister;
+ }
+
+ return 0;
+
+out_pit_clocksource_unregister:
+ clocksource_unregister(&pit->cs);
+out_pit_module_disable:
+ pit_module_disable(timer_base);
+ clk_disable_unprepare(pit_clk);
+out_clk_put:
+ clk_put(pit_clk);
+out_irq_dispose_mapping:
+ irq_dispose_mapping(irq);
+out_iounmap:
+ iounmap(timer_base);
+out_kfree:
+ kfree(pit);
+
+ return ret;
+}
+
+static int pit_timer_probe(struct platform_device *pdev)
+{
+ const struct pit_timer_data *pit_timer_data;
+
+ pit_timer_data = of_device_get_match_data(&pdev->dev);
+ if (pit_timer_data)
+ max_pit_instances = pit_timer_data->max_pit_instances;
+
+ return pit_timer_init(pdev->dev.of_node);
+}
+
+static struct pit_timer_data s32g2_data = { .max_pit_instances = 2 };
+
+static const struct of_device_id pit_timer_of_match[] = {
+ { .compatible = "nxp,s32g2-pit", .data = &s32g2_data },
+ { }
+};
+MODULE_DEVICE_TABLE(of, pit_timer_of_match);
+
+static struct platform_driver nxp_pit_driver = {
+ .driver = {
+ .name = "nxp-pit",
+ .of_match_table = pit_timer_of_match,
+ },
+ .probe = pit_timer_probe,
+};
+module_platform_driver(nxp_pit_driver);
+
+TIMER_OF_DECLARE(vf610, "fsl,vf610-pit", pit_timer_init);
diff --git a/drivers/clocksource/timer-nxp-stm.c b/drivers/clocksource/timer-nxp-stm.c
new file mode 100644
index 000000000000..bbc40623728f
--- /dev/null
+++ b/drivers/clocksource/timer-nxp-stm.c
@@ -0,0 +1,497 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright 2016 Freescale Semiconductor, Inc.
+ * Copyright 2018,2021-2025 NXP
+ *
+ * NXP System Timer Module:
+ *
+ * STM supports commonly required system and application software
+ * timing functions. STM includes a 32-bit count-up timer and four
+ * 32-bit compare channels with a separate interrupt source for each
+ * channel. The timer is driven by the STM module clock divided by an
+ * 8-bit prescale value (1 to 256). It has ability to stop the timer
+ * in Debug mode
+ */
+#include <linux/clk.h>
+#include <linux/clockchips.h>
+#include <linux/cpuhotplug.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/sched_clock.h>
+#include <linux/units.h>
+
+#define STM_CR(__base) (__base)
+
+#define STM_CR_TEN BIT(0)
+#define STM_CR_FRZ BIT(1)
+#define STM_CR_CPS_OFFSET 8u
+#define STM_CR_CPS_MASK GENMASK(15, STM_CR_CPS_OFFSET)
+
+#define STM_CNT(__base) ((__base) + 0x04)
+
+#define STM_CCR0(__base) ((__base) + 0x10)
+#define STM_CCR1(__base) ((__base) + 0x20)
+#define STM_CCR2(__base) ((__base) + 0x30)
+#define STM_CCR3(__base) ((__base) + 0x40)
+
+#define STM_CCR_CEN BIT(0)
+
+#define STM_CIR0(__base) ((__base) + 0x14)
+#define STM_CIR1(__base) ((__base) + 0x24)
+#define STM_CIR2(__base) ((__base) + 0x34)
+#define STM_CIR3(__base) ((__base) + 0x44)
+
+#define STM_CIR_CIF BIT(0)
+
+#define STM_CMP0(__base) ((__base) + 0x18)
+#define STM_CMP1(__base) ((__base) + 0x28)
+#define STM_CMP2(__base) ((__base) + 0x38)
+#define STM_CMP3(__base) ((__base) + 0x48)
+
+#define STM_ENABLE_MASK (STM_CR_FRZ | STM_CR_TEN)
+
+struct stm_timer {
+ void __iomem *base;
+ unsigned long rate;
+ unsigned long delta;
+ unsigned long counter;
+ struct clock_event_device ced;
+ struct clocksource cs;
+ atomic_t refcnt;
+};
+
+static DEFINE_PER_CPU(struct stm_timer *, stm_timers);
+
+static struct stm_timer *stm_sched_clock;
+
+/*
+ * Global structure for multiple STMs initialization
+ */
+static int stm_instances;
+
+/*
+ * This global lock is used to prevent race conditions with the
+ * stm_instances in case the driver is using the ASYNC option
+ */
+static DEFINE_MUTEX(stm_instances_lock);
+
+DEFINE_GUARD(stm_instances, struct mutex *, mutex_lock(_T), mutex_unlock(_T))
+
+static struct stm_timer *cs_to_stm(struct clocksource *cs)
+{
+ return container_of(cs, struct stm_timer, cs);
+}
+
+static struct stm_timer *ced_to_stm(struct clock_event_device *ced)
+{
+ return container_of(ced, struct stm_timer, ced);
+}
+
+static u64 notrace nxp_stm_read_sched_clock(void)
+{
+ return readl(STM_CNT(stm_sched_clock->base));
+}
+
+static u32 nxp_stm_clocksource_getcnt(struct stm_timer *stm_timer)
+{
+ return readl(STM_CNT(stm_timer->base));
+}
+
+static void nxp_stm_clocksource_setcnt(struct stm_timer *stm_timer, u32 cnt)
+{
+ writel(cnt, STM_CNT(stm_timer->base));
+}
+
+static u64 nxp_stm_clocksource_read(struct clocksource *cs)
+{
+ struct stm_timer *stm_timer = cs_to_stm(cs);
+
+ return (u64)nxp_stm_clocksource_getcnt(stm_timer);
+}
+
+static void nxp_stm_module_enable(struct stm_timer *stm_timer)
+{
+ u32 reg;
+
+ reg = readl(STM_CR(stm_timer->base));
+
+ reg |= STM_ENABLE_MASK;
+
+ writel(reg, STM_CR(stm_timer->base));
+}
+
+static void nxp_stm_module_disable(struct stm_timer *stm_timer)
+{
+ u32 reg;
+
+ reg = readl(STM_CR(stm_timer->base));
+
+ reg &= ~STM_ENABLE_MASK;
+
+ writel(reg, STM_CR(stm_timer->base));
+}
+
+static void nxp_stm_module_put(struct stm_timer *stm_timer)
+{
+ if (atomic_dec_and_test(&stm_timer->refcnt))
+ nxp_stm_module_disable(stm_timer);
+}
+
+static void nxp_stm_module_get(struct stm_timer *stm_timer)
+{
+ if (atomic_inc_return(&stm_timer->refcnt) == 1)
+ nxp_stm_module_enable(stm_timer);
+}
+
+static int nxp_stm_clocksource_enable(struct clocksource *cs)
+{
+ struct stm_timer *stm_timer = cs_to_stm(cs);
+
+ nxp_stm_module_get(stm_timer);
+
+ return 0;
+}
+
+static void nxp_stm_clocksource_disable(struct clocksource *cs)
+{
+ struct stm_timer *stm_timer = cs_to_stm(cs);
+
+ nxp_stm_module_put(stm_timer);
+}
+
+static void nxp_stm_clocksource_suspend(struct clocksource *cs)
+{
+ struct stm_timer *stm_timer = cs_to_stm(cs);
+
+ nxp_stm_clocksource_disable(cs);
+ stm_timer->counter = nxp_stm_clocksource_getcnt(stm_timer);
+}
+
+static void nxp_stm_clocksource_resume(struct clocksource *cs)
+{
+ struct stm_timer *stm_timer = cs_to_stm(cs);
+
+ nxp_stm_clocksource_setcnt(stm_timer, stm_timer->counter);
+ nxp_stm_clocksource_enable(cs);
+}
+
+static void __init devm_clocksource_unregister(void *data)
+{
+ struct stm_timer *stm_timer = data;
+
+ clocksource_unregister(&stm_timer->cs);
+}
+
+static int __init nxp_stm_clocksource_init(struct device *dev, struct stm_timer *stm_timer,
+ const char *name, void __iomem *base, struct clk *clk)
+{
+ int ret;
+
+ stm_timer->base = base;
+ stm_timer->rate = clk_get_rate(clk);
+
+ stm_timer->cs.name = name;
+ stm_timer->cs.rating = 460;
+ stm_timer->cs.read = nxp_stm_clocksource_read;
+ stm_timer->cs.enable = nxp_stm_clocksource_enable;
+ stm_timer->cs.disable = nxp_stm_clocksource_disable;
+ stm_timer->cs.suspend = nxp_stm_clocksource_suspend;
+ stm_timer->cs.resume = nxp_stm_clocksource_resume;
+ stm_timer->cs.mask = CLOCKSOURCE_MASK(32);
+ stm_timer->cs.flags = CLOCK_SOURCE_IS_CONTINUOUS;
+ stm_timer->cs.owner = THIS_MODULE;
+
+ ret = clocksource_register_hz(&stm_timer->cs, stm_timer->rate);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, devm_clocksource_unregister, stm_timer);
+ if (ret) {
+ clocksource_unregister(&stm_timer->cs);
+ return ret;
+ }
+
+ stm_sched_clock = stm_timer;
+
+ sched_clock_register(nxp_stm_read_sched_clock, 32, stm_timer->rate);
+
+ dev_dbg(dev, "Registered clocksource %s\n", name);
+
+ return 0;
+}
+
+static int nxp_stm_clockevent_read_counter(struct stm_timer *stm_timer)
+{
+ return readl(STM_CNT(stm_timer->base));
+}
+
+static void nxp_stm_clockevent_disable(struct stm_timer *stm_timer)
+{
+ writel(0, STM_CCR0(stm_timer->base));
+}
+
+static void nxp_stm_clockevent_enable(struct stm_timer *stm_timer)
+{
+ writel(STM_CCR_CEN, STM_CCR0(stm_timer->base));
+}
+
+static int nxp_stm_clockevent_shutdown(struct clock_event_device *ced)
+{
+ struct stm_timer *stm_timer = ced_to_stm(ced);
+
+ nxp_stm_clockevent_disable(stm_timer);
+
+ return 0;
+}
+
+static int nxp_stm_clockevent_set_next_event(unsigned long delta, struct clock_event_device *ced)
+{
+ struct stm_timer *stm_timer = ced_to_stm(ced);
+ u32 val;
+
+ nxp_stm_clockevent_disable(stm_timer);
+
+ stm_timer->delta = delta;
+
+ val = nxp_stm_clockevent_read_counter(stm_timer) + delta;
+
+ writel(val, STM_CMP0(stm_timer->base));
+
+ /*
+ * The counter is shared across the channels and can not be
+ * stopped while we are setting the next event. If the delta
+ * is very small it is possible the counter increases above
+ * the computed 'val'. The min_delta value specified when
+ * registering the clockevent will prevent that. The second
+ * case is if the counter wraps while we compute the 'val' and
+ * before writing the comparator register. We read the counter,
+ * check if we are back in time and abort the timer with -ETIME.
+ */
+ if (val > nxp_stm_clockevent_read_counter(stm_timer) + delta)
+ return -ETIME;
+
+ nxp_stm_clockevent_enable(stm_timer);
+
+ return 0;
+}
+
+static int nxp_stm_clockevent_set_periodic(struct clock_event_device *ced)
+{
+ struct stm_timer *stm_timer = ced_to_stm(ced);
+
+ return nxp_stm_clockevent_set_next_event(stm_timer->rate, ced);
+}
+
+static void nxp_stm_clockevent_suspend(struct clock_event_device *ced)
+{
+ struct stm_timer *stm_timer = ced_to_stm(ced);
+
+ nxp_stm_module_put(stm_timer);
+}
+
+static void nxp_stm_clockevent_resume(struct clock_event_device *ced)
+{
+ struct stm_timer *stm_timer = ced_to_stm(ced);
+
+ nxp_stm_module_get(stm_timer);
+}
+
+static int __init nxp_stm_clockevent_per_cpu_init(struct device *dev, struct stm_timer *stm_timer,
+ const char *name, void __iomem *base, int irq,
+ struct clk *clk, int cpu)
+{
+ stm_timer->base = base;
+ stm_timer->rate = clk_get_rate(clk);
+
+ stm_timer->ced.name = name;
+ stm_timer->ced.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+ stm_timer->ced.set_state_shutdown = nxp_stm_clockevent_shutdown;
+ stm_timer->ced.set_state_periodic = nxp_stm_clockevent_set_periodic;
+ stm_timer->ced.set_next_event = nxp_stm_clockevent_set_next_event;
+ stm_timer->ced.suspend = nxp_stm_clockevent_suspend;
+ stm_timer->ced.resume = nxp_stm_clockevent_resume;
+ stm_timer->ced.cpumask = cpumask_of(cpu);
+ stm_timer->ced.rating = 460;
+ stm_timer->ced.irq = irq;
+ stm_timer->ced.owner = THIS_MODULE;
+
+ per_cpu(stm_timers, cpu) = stm_timer;
+
+ nxp_stm_module_get(stm_timer);
+
+ dev_dbg(dev, "Initialized per cpu clockevent name=%s, irq=%d, cpu=%d\n", name, irq, cpu);
+
+ return 0;
+}
+
+static int nxp_stm_clockevent_starting_cpu(unsigned int cpu)
+{
+ struct stm_timer *stm_timer = per_cpu(stm_timers, cpu);
+ int ret;
+
+ if (WARN_ON(!stm_timer))
+ return -EFAULT;
+
+ ret = irq_force_affinity(stm_timer->ced.irq, cpumask_of(cpu));
+ if (ret)
+ return ret;
+
+ /*
+ * The timings measurement show reading the counter register
+ * and writing to the comparator register takes as a maximum
+ * value 1100 ns at 133MHz rate frequency. The timer must be
+ * set above this value and to be secure we set the minimum
+ * value equal to 2000ns, so 2us.
+ *
+ * minimum ticks = (rate / MICRO) * 2
+ */
+ clockevents_config_and_register(&stm_timer->ced, stm_timer->rate,
+ (stm_timer->rate / MICRO) * 2, ULONG_MAX);
+
+ return 0;
+}
+
+static irqreturn_t nxp_stm_module_interrupt(int irq, void *dev_id)
+{
+ struct stm_timer *stm_timer = dev_id;
+ struct clock_event_device *ced = &stm_timer->ced;
+ u32 val;
+
+ /*
+ * The interrupt is shared across the channels in the
+ * module. But this one is configured to run only one channel,
+ * consequently it is pointless to test the interrupt flags
+ * before and we can directly reset the channel 0 irq flag
+ * register.
+ */
+ writel(STM_CIR_CIF, STM_CIR0(stm_timer->base));
+
+ /*
+ * Update STM_CMP value using the counter value
+ */
+ val = nxp_stm_clockevent_read_counter(stm_timer) + stm_timer->delta;
+
+ writel(val, STM_CMP0(stm_timer->base));
+
+ /*
+ * stm hardware doesn't support oneshot, it will generate an
+ * interrupt and start the counter again so software needs to
+ * disable the timer to stop the counter loop in ONESHOT mode.
+ */
+ if (likely(clockevent_state_oneshot(ced)))
+ nxp_stm_clockevent_disable(stm_timer);
+
+ ced->event_handler(ced);
+
+ return IRQ_HANDLED;
+}
+
+static int __init nxp_stm_timer_probe(struct platform_device *pdev)
+{
+ struct stm_timer *stm_timer;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ const char *name = of_node_full_name(np);
+ struct clk *clk;
+ void __iomem *base;
+ int irq, ret;
+
+ /*
+ * The device tree can have multiple STM nodes described, so
+ * it makes this driver a good candidate for the async probe.
+ * It is still unclear if the time framework correctly handles
+ * parallel loading of the timers but at least this driver is
+ * ready to support the option.
+ */
+ guard(stm_instances)(&stm_instances_lock);
+
+ /*
+ * The S32Gx are SoCs featuring a diverse set of cores. Linux
+ * is expected to run on Cortex-A53 cores, while other
+ * software stacks will operate on Cortex-M cores. The number
+ * of STM instances has been sized to include at most one
+ * instance per core.
+ *
+ * As we need a clocksource and a clockevent per cpu, we
+ * simply initialize a clocksource per cpu along with the
+ * clockevent which makes the resulting code simpler.
+ *
+ * However if the device tree is describing more STM instances
+ * than the number of cores, then we ignore them.
+ */
+ if (stm_instances >= num_possible_cpus())
+ return 0;
+
+ base = devm_of_iomap(dev, np, 0, NULL);
+ if (IS_ERR(base))
+ return dev_err_probe(dev, PTR_ERR(base), "Failed to iomap %pOFn\n", np);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return dev_err_probe(dev, irq, "Failed to get IRQ\n");
+
+ clk = devm_clk_get_enabled(dev, NULL);
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk), "Clock not found\n");
+
+ stm_timer = devm_kzalloc(dev, sizeof(*stm_timer), GFP_KERNEL);
+ if (!stm_timer)
+ return -ENOMEM;
+
+ ret = devm_request_irq(dev, irq, nxp_stm_module_interrupt,
+ IRQF_TIMER | IRQF_NOBALANCING, name, stm_timer);
+ if (ret)
+ return dev_err_probe(dev, ret, "Unable to allocate interrupt line\n");
+
+ ret = nxp_stm_clocksource_init(dev, stm_timer, name, base, clk);
+ if (ret)
+ return ret;
+
+ /*
+ * Next probed STM will be a per CPU clockevent, until we
+ * probe as many as we have CPUs available on the system, we
+ * do a partial initialization
+ */
+ ret = nxp_stm_clockevent_per_cpu_init(dev, stm_timer, name,
+ base, irq, clk,
+ stm_instances);
+ if (ret)
+ return ret;
+
+ stm_instances++;
+
+ /*
+ * The number of probed STMs for per CPU clockevent is
+ * equal to the number of available CPUs on the
+ * system. We install the cpu hotplug to finish the
+ * initialization by registering the clockevents
+ */
+ if (stm_instances == num_possible_cpus()) {
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "STM timer:starting",
+ nxp_stm_clockevent_starting_cpu, NULL);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id nxp_stm_of_match[] = {
+ { .compatible = "nxp,s32g2-stm" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, nxp_stm_of_match);
+
+static struct platform_driver nxp_stm_probe = {
+ .probe = nxp_stm_timer_probe,
+ .driver = {
+ .name = "nxp-stm",
+ .of_match_table = nxp_stm_of_match,
+ },
+};
+module_platform_driver(nxp_stm_probe);
+
+MODULE_DESCRIPTION("NXP System Timer Module driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clocksource/timer-orion.c b/drivers/clocksource/timer-orion.c
index 49e86cb70a7a..61f1e27fc41e 100644
--- a/drivers/clocksource/timer-orion.c
+++ b/drivers/clocksource/timer-orion.c
@@ -43,7 +43,7 @@ static struct delay_timer orion_delay_timer = {
.read_current_timer = orion_read_timer,
};
-static void orion_delay_timer_init(unsigned long rate)
+static void __init orion_delay_timer_init(unsigned long rate)
{
orion_delay_timer.freq = rate;
register_current_timer_delay(&orion_delay_timer);
diff --git a/drivers/clocksource/timer-qcom.c b/drivers/clocksource/timer-qcom.c
index eac4c95c6127..ddb1debe6a6b 100644
--- a/drivers/clocksource/timer-qcom.c
+++ b/drivers/clocksource/timer-qcom.c
@@ -130,7 +130,6 @@ static int msm_local_timer_dying_cpu(unsigned int cpu)
{
struct clock_event_device *evt = per_cpu_ptr(msm_evt, cpu);
- evt->set_state_shutdown(evt);
disable_percpu_irq(evt->irq);
return 0;
}
diff --git a/drivers/clocksource/timer-ralink.c b/drivers/clocksource/timer-ralink.c
new file mode 100644
index 000000000000..6ecdb4228f76
--- /dev/null
+++ b/drivers/clocksource/timer-ralink.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Ralink System Tick Counter driver present on RT3352 and MT7620 SoCs.
+ *
+ * Copyright (C) 2013 by John Crispin <john@phrozen.org>
+ */
+
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/interrupt.h>
+#include <linux/reset.h>
+#include <linux/init.h>
+#include <linux/time.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+
+#define SYSTICK_FREQ (50 * 1000)
+
+#define SYSTICK_CONFIG 0x00
+#define SYSTICK_COMPARE 0x04
+#define SYSTICK_COUNT 0x08
+
+/* route systick irq to mips irq 7 instead of the r4k-timer */
+#define CFG_EXT_STK_EN 0x2
+/* enable the counter */
+#define CFG_CNT_EN 0x1
+
+struct systick_device {
+ void __iomem *membase;
+ struct clock_event_device dev;
+ int irq_requested;
+ int freq_scale;
+};
+
+static int systick_set_oneshot(struct clock_event_device *evt);
+static int systick_shutdown(struct clock_event_device *evt);
+
+static int systick_next_event(unsigned long delta,
+ struct clock_event_device *evt)
+{
+ struct systick_device *sdev;
+ u32 count;
+
+ sdev = container_of(evt, struct systick_device, dev);
+ count = ioread32(sdev->membase + SYSTICK_COUNT);
+ count = (count + delta) % SYSTICK_FREQ;
+ iowrite32(count, sdev->membase + SYSTICK_COMPARE);
+
+ return 0;
+}
+
+static void systick_event_handler(struct clock_event_device *dev)
+{
+ /* noting to do here */
+}
+
+static irqreturn_t systick_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *dev = (struct clock_event_device *)dev_id;
+
+ dev->event_handler(dev);
+
+ return IRQ_HANDLED;
+}
+
+static struct systick_device systick = {
+ .dev = {
+ /*
+ * cevt-r4k uses 300, make sure systick
+ * gets used if available
+ */
+ .rating = 310,
+ .features = CLOCK_EVT_FEAT_ONESHOT,
+ .set_next_event = systick_next_event,
+ .set_state_shutdown = systick_shutdown,
+ .set_state_oneshot = systick_set_oneshot,
+ .event_handler = systick_event_handler,
+ },
+};
+
+static int systick_shutdown(struct clock_event_device *evt)
+{
+ struct systick_device *sdev;
+
+ sdev = container_of(evt, struct systick_device, dev);
+
+ if (sdev->irq_requested)
+ free_irq(systick.dev.irq, &systick.dev);
+ sdev->irq_requested = 0;
+ iowrite32(0, systick.membase + SYSTICK_CONFIG);
+
+ return 0;
+}
+
+static int systick_set_oneshot(struct clock_event_device *evt)
+{
+ const char *name = systick.dev.name;
+ struct systick_device *sdev;
+ int irq = systick.dev.irq;
+
+ sdev = container_of(evt, struct systick_device, dev);
+
+ if (!sdev->irq_requested) {
+ if (request_irq(irq, systick_interrupt,
+ IRQF_PERCPU | IRQF_TIMER, name, &systick.dev))
+ pr_err("Failed to request irq %d (%s)\n", irq, name);
+ }
+ sdev->irq_requested = 1;
+ iowrite32(CFG_EXT_STK_EN | CFG_CNT_EN,
+ systick.membase + SYSTICK_CONFIG);
+
+ return 0;
+}
+
+static int __init ralink_systick_init(struct device_node *np)
+{
+ int ret;
+
+ systick.membase = of_iomap(np, 0);
+ if (!systick.membase)
+ return -ENXIO;
+
+ systick.dev.name = np->name;
+ clockevents_calc_mult_shift(&systick.dev, SYSTICK_FREQ, 60);
+ systick.dev.max_delta_ns = clockevent_delta2ns(0x7fff, &systick.dev);
+ systick.dev.max_delta_ticks = 0x7fff;
+ systick.dev.min_delta_ns = clockevent_delta2ns(0x3, &systick.dev);
+ systick.dev.min_delta_ticks = 0x3;
+ systick.dev.irq = irq_of_parse_and_map(np, 0);
+ if (!systick.dev.irq) {
+ pr_err("%pOFn: request_irq failed", np);
+ return -EINVAL;
+ }
+
+ ret = clocksource_mmio_init(systick.membase + SYSTICK_COUNT, np->name,
+ SYSTICK_FREQ, 301, 16,
+ clocksource_mmio_readl_up);
+ if (ret)
+ return ret;
+
+ clockevents_register_device(&systick.dev);
+
+ pr_info("%pOFn: running - mult: %d, shift: %d\n",
+ np, systick.dev.mult, systick.dev.shift);
+
+ return 0;
+}
+
+TIMER_OF_DECLARE(systick, "ralink,cevt-systick", ralink_systick_init);
diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c
index 48ce50c5f5e6..4d7cf338824a 100644
--- a/drivers/clocksource/timer-riscv.c
+++ b/drivers/clocksource/timer-riscv.c
@@ -126,7 +126,13 @@ static int riscv_timer_starting_cpu(unsigned int cpu)
static int riscv_timer_dying_cpu(unsigned int cpu)
{
+ /*
+ * Stop the timer when the cpu is going to be offline otherwise
+ * the timer interrupt may be pending while performing power-down.
+ */
+ riscv_clock_event_stop();
disable_percpu_irq(riscv_clock_event_irq);
+
return 0;
}
diff --git a/drivers/clocksource/timer-rtl-otto.c b/drivers/clocksource/timer-rtl-otto.c
index 8a3068b36e75..6113d2fdd4de 100644
--- a/drivers/clocksource/timer-rtl-otto.c
+++ b/drivers/clocksource/timer-rtl-otto.c
@@ -38,14 +38,13 @@
#define RTTM_BIT_COUNT 28
#define RTTM_MIN_DELTA 8
#define RTTM_MAX_DELTA CLOCKSOURCE_MASK(28)
+#define RTTM_MAX_DIVISOR GENMASK(15, 0)
/*
- * Timers are derived from the LXB clock frequency. Usually this is a fixed
- * multiple of the 25 MHz oscillator. The 930X SOC is an exception from that.
- * Its LXB clock has only dividers and uses the switch PLL of 2.45 GHz as its
- * base. The only meaningful frequencies we can achieve from that are 175.000
- * MHz and 153.125 MHz. The greatest common divisor of all explained possible
- * speeds is 3125000. Pin the timers to this 3.125 MHz reference frequency.
+ * Timers are derived from the lexra bus (LXB) clock frequency. This is 175 MHz
+ * on RTL930x and 200 MHz on the other platforms. With 3.125 MHz choose a common
+ * divisor to have enough range and detail. This provides comparability between
+ * the different platforms.
*/
#define RTTM_TICKS_PER_SEC 3125000
@@ -55,11 +54,6 @@ struct rttm_cs {
};
/* Simple internal register functions */
-static inline void rttm_set_counter(void __iomem *base, unsigned int counter)
-{
- iowrite32(counter, base + RTTM_CNT);
-}
-
static inline unsigned int rttm_get_counter(void __iomem *base)
{
return ioread32(base + RTTM_CNT);
@@ -112,6 +106,22 @@ static irqreturn_t rttm_timer_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static void rttm_bounce_timer(void __iomem *base, u32 mode)
+{
+ /*
+ * When a running timer has less than ~5us left, a stop/start sequence
+ * might fail. While the details are unknown the most evident effect is
+ * that the subsequent interrupt will not be fired.
+ *
+ * As a workaround issue an intermediate restart with a very slow
+ * frequency of ~3kHz keeping the target counter (>=8). So the follow
+ * up restart will always be issued outside the critical window.
+ */
+
+ rttm_disable_timer(base);
+ rttm_enable_timer(base, mode, RTTM_MAX_DIVISOR);
+}
+
static void rttm_stop_timer(void __iomem *base)
{
rttm_disable_timer(base);
@@ -120,7 +130,6 @@ static void rttm_stop_timer(void __iomem *base)
static void rttm_start_timer(struct timer_of *to, u32 mode)
{
- rttm_set_counter(to->of_base.base, 0);
rttm_enable_timer(to->of_base.base, mode, to->of_clk.rate / RTTM_TICKS_PER_SEC);
}
@@ -129,7 +138,8 @@ static int rttm_next_event(unsigned long delta, struct clock_event_device *clkev
struct timer_of *to = to_timer_of(clkevt);
RTTM_DEBUG(to->of_base.base);
- rttm_stop_timer(to->of_base.base);
+ rttm_bounce_timer(to->of_base.base, RTTM_CTRL_COUNTER);
+ rttm_disable_timer(to->of_base.base);
rttm_set_period(to->of_base.base, delta);
rttm_start_timer(to, RTTM_CTRL_COUNTER);
@@ -141,7 +151,8 @@ static int rttm_state_oneshot(struct clock_event_device *clkevt)
struct timer_of *to = to_timer_of(clkevt);
RTTM_DEBUG(to->of_base.base);
- rttm_stop_timer(to->of_base.base);
+ rttm_bounce_timer(to->of_base.base, RTTM_CTRL_COUNTER);
+ rttm_disable_timer(to->of_base.base);
rttm_set_period(to->of_base.base, RTTM_TICKS_PER_SEC / HZ);
rttm_start_timer(to, RTTM_CTRL_COUNTER);
@@ -153,7 +164,8 @@ static int rttm_state_periodic(struct clock_event_device *clkevt)
struct timer_of *to = to_timer_of(clkevt);
RTTM_DEBUG(to->of_base.base);
- rttm_stop_timer(to->of_base.base);
+ rttm_bounce_timer(to->of_base.base, RTTM_CTRL_TIMER);
+ rttm_disable_timer(to->of_base.base);
rttm_set_period(to->of_base.base, RTTM_TICKS_PER_SEC / HZ);
rttm_start_timer(to, RTTM_CTRL_TIMER);
diff --git a/drivers/clocksource/timer-stm32-lp.c b/drivers/clocksource/timer-stm32-lp.c
index a4c95161cb22..c2a699f5c1dd 100644
--- a/drivers/clocksource/timer-stm32-lp.c
+++ b/drivers/clocksource/timer-stm32-lp.c
@@ -5,6 +5,7 @@
* Pascal Paillet <p.paillet@st.com> for STMicroelectronics.
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/clockchips.h>
#include <linux/interrupt.h>
@@ -24,7 +25,10 @@ struct stm32_lp_private {
struct regmap *reg;
struct clock_event_device clkevt;
unsigned long period;
+ u32 psc;
struct device *dev;
+ struct clk *clk;
+ u32 version;
};
static struct stm32_lp_private*
@@ -45,12 +49,46 @@ static int stm32_clkevent_lp_shutdown(struct clock_event_device *clkevt)
return 0;
}
-static int stm32_clkevent_lp_set_timer(unsigned long evt,
- struct clock_event_device *clkevt,
- int is_periodic)
+static int stm32mp25_clkevent_lp_set_evt(struct stm32_lp_private *priv, unsigned long evt)
{
- struct stm32_lp_private *priv = to_priv(clkevt);
+ int ret;
+ u32 val;
+
+ regmap_read(priv->reg, STM32_LPTIM_CR, &val);
+ if (!FIELD_GET(STM32_LPTIM_ENABLE, val)) {
+ /* Enable LPTIMER to be able to write into IER and ARR registers */
+ regmap_write(priv->reg, STM32_LPTIM_CR, STM32_LPTIM_ENABLE);
+ /*
+ * After setting the ENABLE bit, a delay of two counter clock cycles is needed
+ * before the LPTIM is actually enabled. For 32KHz rate, this makes approximately
+ * 62.5 micro-seconds, round it up.
+ */
+ udelay(63);
+ }
+ /* set next event counter */
+ regmap_write(priv->reg, STM32_LPTIM_ARR, evt);
+ /* enable ARR interrupt */
+ regmap_write(priv->reg, STM32_LPTIM_IER, STM32_LPTIM_ARRMIE);
+
+ /* Poll DIEROK and ARROK to ensure register access has completed */
+ ret = regmap_read_poll_timeout_atomic(priv->reg, STM32_LPTIM_ISR, val,
+ (val & STM32_LPTIM_DIEROK_ARROK) ==
+ STM32_LPTIM_DIEROK_ARROK,
+ 10, 500);
+ if (ret) {
+ dev_err(priv->dev, "access to LPTIM timed out\n");
+ /* Disable LPTIMER */
+ regmap_write(priv->reg, STM32_LPTIM_CR, 0);
+ return ret;
+ }
+ /* Clear DIEROK and ARROK flags */
+ regmap_write(priv->reg, STM32_LPTIM_ICR, STM32_LPTIM_DIEROKCF_ARROKCF);
+ return 0;
+}
+
+static void stm32_clkevent_lp_set_evt(struct stm32_lp_private *priv, unsigned long evt)
+{
/* disable LPTIMER to be able to write into IER register*/
regmap_write(priv->reg, STM32_LPTIM_CR, 0);
/* enable ARR interrupt */
@@ -59,6 +97,22 @@ static int stm32_clkevent_lp_set_timer(unsigned long evt,
regmap_write(priv->reg, STM32_LPTIM_CR, STM32_LPTIM_ENABLE);
/* set next event counter */
regmap_write(priv->reg, STM32_LPTIM_ARR, evt);
+}
+
+static int stm32_clkevent_lp_set_timer(unsigned long evt,
+ struct clock_event_device *clkevt,
+ int is_periodic)
+{
+ struct stm32_lp_private *priv = to_priv(clkevt);
+ int ret;
+
+ if (priv->version == STM32_LPTIM_VERR_23) {
+ ret = stm32mp25_clkevent_lp_set_evt(priv, evt);
+ if (ret)
+ return ret;
+ } else {
+ stm32_clkevent_lp_set_evt(priv, evt);
+ }
/* start counter */
if (is_periodic)
@@ -120,6 +174,27 @@ static void stm32_clkevent_lp_set_prescaler(struct stm32_lp_private *priv,
/* Adjust rate and period given the prescaler value */
*rate = DIV_ROUND_CLOSEST(*rate, (1 << i));
priv->period = DIV_ROUND_UP(*rate, HZ);
+ priv->psc = i;
+}
+
+static void stm32_clkevent_lp_suspend(struct clock_event_device *clkevt)
+{
+ struct stm32_lp_private *priv = to_priv(clkevt);
+
+ stm32_clkevent_lp_shutdown(clkevt);
+
+ /* balance clk_prepare_enable() from the probe */
+ clk_disable_unprepare(priv->clk);
+}
+
+static void stm32_clkevent_lp_resume(struct clock_event_device *clkevt)
+{
+ struct stm32_lp_private *priv = to_priv(clkevt);
+
+ clk_prepare_enable(priv->clk);
+
+ /* restore prescaler */
+ regmap_write(priv->reg, STM32_LPTIM_CFGR, priv->psc << CFGR_PSC_OFFSET);
}
static void stm32_clkevent_lp_init(struct stm32_lp_private *priv,
@@ -134,6 +209,9 @@ static void stm32_clkevent_lp_init(struct stm32_lp_private *priv,
priv->clkevt.set_state_oneshot = stm32_clkevent_lp_set_oneshot;
priv->clkevt.set_next_event = stm32_clkevent_lp_set_next_event;
priv->clkevt.rating = STM32_LP_RATING;
+ priv->clkevt.suspend = stm32_clkevent_lp_suspend;
+ priv->clkevt.resume = stm32_clkevent_lp_resume;
+ priv->clkevt.owner = THIS_MODULE;
clockevents_config_and_register(&priv->clkevt, rate, 0x1,
STM32_LPTIM_MAX_ARR);
@@ -151,11 +229,13 @@ static int stm32_clkevent_lp_probe(struct platform_device *pdev)
return -ENOMEM;
priv->reg = ddata->regmap;
- ret = clk_prepare_enable(ddata->clk);
+ priv->version = ddata->version;
+ priv->clk = ddata->clk;
+ ret = clk_prepare_enable(priv->clk);
if (ret)
return -EINVAL;
- rate = clk_get_rate(ddata->clk);
+ rate = clk_get_rate(priv->clk);
if (!rate) {
ret = -EINVAL;
goto out_clk_disable;
@@ -168,9 +248,7 @@ static int stm32_clkevent_lp_probe(struct platform_device *pdev)
}
if (of_property_read_bool(pdev->dev.parent->of_node, "wakeup-source")) {
- ret = device_init_wakeup(&pdev->dev, true);
- if (ret)
- goto out_clk_disable;
+ device_set_wakeup_capable(&pdev->dev, true);
ret = dev_pm_set_wake_irq(&pdev->dev, irq);
if (ret)
@@ -191,7 +269,7 @@ static int stm32_clkevent_lp_probe(struct platform_device *pdev)
return 0;
out_clk_disable:
- clk_disable_unprepare(ddata->clk);
+ clk_disable_unprepare(priv->clk);
return ret;
}
diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c
index 0d229a9058da..f827d3f98f60 100644
--- a/drivers/clocksource/timer-sun5i.c
+++ b/drivers/clocksource/timer-sun5i.c
@@ -185,6 +185,7 @@ static int sun5i_setup_clocksource(struct platform_device *pdev,
cs->clksrc.read = sun5i_clksrc_read;
cs->clksrc.mask = CLOCKSOURCE_MASK(32);
cs->clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS;
+ cs->clksrc.owner = THIS_MODULE;
ret = clocksource_register_hz(&cs->clksrc, rate);
if (ret) {
@@ -214,6 +215,7 @@ static int sun5i_setup_clockevent(struct platform_device *pdev,
ce->clkevt.rating = 340;
ce->clkevt.irq = irq;
ce->clkevt.cpumask = cpu_possible_mask;
+ ce->clkevt.owner = THIS_MODULE;
/* Enable timer0 interrupt */
val = readl(base + TIMER_IRQ_EN_REG);
@@ -318,7 +320,7 @@ MODULE_DEVICE_TABLE(of, sun5i_timer_of_match);
static struct platform_driver sun5i_timer_driver = {
.probe = sun5i_timer_probe,
- .remove_new = sun5i_timer_remove,
+ .remove = sun5i_timer_remove,
.driver = {
.name = "sun5i-timer",
.of_match_table = sun5i_timer_of_match,
diff --git a/drivers/clocksource/timer-tegra.c b/drivers/clocksource/timer-tegra.c
index e9635c25eef4..35b6ce9deffa 100644
--- a/drivers/clocksource/timer-tegra.c
+++ b/drivers/clocksource/timer-tegra.c
@@ -158,7 +158,6 @@ static int tegra_timer_stop(unsigned int cpu)
{
struct timer_of *to = per_cpu_ptr(&tegra_to, cpu);
- to->clkevt.set_state_shutdown(&to->clkevt);
disable_irq_nosync(to->clkevt.irq);
return 0;
diff --git a/drivers/clocksource/timer-tegra186.c b/drivers/clocksource/timer-tegra186.c
index 304537dadf2c..355558893e5f 100644
--- a/drivers/clocksource/timer-tegra186.c
+++ b/drivers/clocksource/timer-tegra186.c
@@ -1,8 +1,9 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2019-2020 NVIDIA Corporation. All rights reserved.
+ * Copyright (c) 2019-2025 NVIDIA Corporation. All rights reserved.
*/
+#include <linux/bitfield.h>
#include <linux/clocksource.h>
#include <linux/module.h>
#include <linux/interrupt.h>
@@ -29,6 +30,7 @@
#define TMRSR 0x004
#define TMRSR_INTR_CLR BIT(30)
+#define TMRSR_PCV GENMASK(28, 0)
#define TMRCSSR 0x008
#define TMRCSSR_SRC_USEC (0 << 0)
@@ -45,6 +47,9 @@
#define WDTCR_TIMER_SOURCE_MASK 0xf
#define WDTCR_TIMER_SOURCE(x) ((x) & 0xf)
+#define WDTSR 0x004
+#define WDTSR_CURRENT_EXPIRATION_COUNT GENMASK(14, 12)
+
#define WDTCMDR 0x008
#define WDTCMDR_DISABLE_COUNTER BIT(1)
#define WDTCMDR_START_COUNTER BIT(0)
@@ -154,7 +159,7 @@ static void tegra186_wdt_enable(struct tegra186_wdt *wdt)
tmr_writel(wdt->tmr, TMRCSSR_SRC_USEC, TMRCSSR);
/* configure timer (system reset happens on the fifth expiration) */
- value = TMRCR_PTV(wdt->base.timeout * USEC_PER_SEC / 5) |
+ value = TMRCR_PTV(wdt->base.timeout * (USEC_PER_SEC / 5)) |
TMRCR_PERIODIC | TMRCR_ENABLE;
tmr_writel(wdt->tmr, value, TMRCR);
@@ -169,18 +174,6 @@ static void tegra186_wdt_enable(struct tegra186_wdt *wdt)
value &= ~WDTCR_PERIOD_MASK;
value |= WDTCR_PERIOD(1);
- /* enable local interrupt for WDT petting */
- value |= WDTCR_LOCAL_INT_ENABLE;
-
- /* enable local FIQ and remote interrupt for debug dump */
- if (0)
- value |= WDTCR_REMOTE_INT_ENABLE |
- WDTCR_LOCAL_FIQ_ENABLE;
-
- /* enable system debug reset (doesn't properly reboot) */
- if (0)
- value |= WDTCR_SYSTEM_DEBUG_RESET_ENABLE;
-
/* enable system POR reset */
value |= WDTCR_SYSTEM_POR_RESET_ENABLE;
@@ -234,12 +227,74 @@ static int tegra186_wdt_set_timeout(struct watchdog_device *wdd,
return 0;
}
+static unsigned int tegra186_wdt_get_timeleft(struct watchdog_device *wdd)
+{
+ struct tegra186_wdt *wdt = to_tegra186_wdt(wdd);
+ u32 expiration, val;
+ u32 timeleft;
+
+ if (!watchdog_active(&wdt->base)) {
+ /* return zero if the watchdog timer is not activated. */
+ return 0;
+ }
+
+ /*
+ * Reset occurs on the fifth expiration of the
+ * watchdog timer and so when the watchdog timer is configured,
+ * the actual value programmed into the counter is 1/5 of the
+ * timeout value. Once the counter reaches 0, expiration count
+ * will be increased by 1 and the down counter restarts.
+ * Hence to get the time left before system reset we must
+ * combine 2 parts:
+ * 1. value of the current down counter
+ * 2. (number of counter expirations remaining) * (timeout/5)
+ */
+
+ /* Get the current number of counter expirations. Should be a
+ * value between 0 and 4
+ */
+ val = readl_relaxed(wdt->regs + WDTSR);
+ expiration = FIELD_GET(WDTSR_CURRENT_EXPIRATION_COUNT, val);
+ if (WARN_ON_ONCE(expiration > 4))
+ return 0;
+
+ /* Get the current counter value in microsecond. */
+ val = readl_relaxed(wdt->tmr->regs + TMRSR);
+ timeleft = FIELD_GET(TMRSR_PCV, val);
+
+ /*
+ * Calculate the time remaining by adding the time for the
+ * counter value to the time of the counter expirations that
+ * remain.
+ * Note: Since wdt->base.timeout is bound to 255, the maximum
+ * value added to timeleft is
+ * 255 * (1,000,000 / 5) * 4
+ * = 255 * 200,000 * 4
+ * = 204,000,000
+ * TMRSR_PCV is a 29-bit field.
+ * Its maximum value is 0x1fffffff = 536,870,911.
+ * 204,000,000 + 536,870,911 = 740,870,911 = 0x2C28CAFF.
+ * timeleft can therefore not overflow, and 64-bit calculations
+ * are not necessary.
+ */
+ timeleft += (wdt->base.timeout * (USEC_PER_SEC / 5)) * (4 - expiration);
+
+ /*
+ * Convert the current counter value to seconds,
+ * rounding to the nearest second.
+ */
+ timeleft = DIV_ROUND_CLOSEST(timeleft, USEC_PER_SEC);
+
+ return timeleft;
+}
+
static const struct watchdog_ops tegra186_wdt_ops = {
.owner = THIS_MODULE,
.start = tegra186_wdt_start,
.stop = tegra186_wdt_stop,
.ping = tegra186_wdt_ping,
.set_timeout = tegra186_wdt_set_timeout,
+ .get_timeleft = tegra186_wdt_get_timeleft,
};
static struct tegra186_wdt *tegra186_wdt_create(struct tegra186_timer *tegra,
@@ -278,16 +333,12 @@ static struct tegra186_wdt *tegra186_wdt_create(struct tegra186_timer *tegra,
wdt->base.parent = tegra->dev;
err = watchdog_init_timeout(&wdt->base, 5, tegra->dev);
- if (err < 0) {
- dev_err(tegra->dev, "failed to initialize timeout: %d\n", err);
+ if (err < 0)
return ERR_PTR(err);
- }
err = devm_watchdog_register_device(tegra->dev, &wdt->base);
- if (err < 0) {
- dev_err(tegra->dev, "failed to register WDT: %d\n", err);
+ if (err < 0)
return ERR_PTR(err);
- }
return wdt;
}
@@ -323,6 +374,7 @@ static int tegra186_timer_tsc_init(struct tegra186_timer *tegra)
tegra->tsc.read = tegra186_timer_tsc_read;
tegra->tsc.mask = CLOCKSOURCE_MASK(56);
tegra->tsc.flags = CLOCK_SOURCE_IS_CONTINUOUS;
+ tegra->tsc.owner = THIS_MODULE;
return clocksource_register_hz(&tegra->tsc, 31250000);
}
@@ -342,6 +394,7 @@ static int tegra186_timer_osc_init(struct tegra186_timer *tegra)
tegra->osc.read = tegra186_timer_osc_read;
tegra->osc.mask = CLOCKSOURCE_MASK(32);
tegra->osc.flags = CLOCK_SOURCE_IS_CONTINUOUS;
+ tegra->osc.owner = THIS_MODULE;
return clocksource_register_hz(&tegra->osc, 38400000);
}
@@ -361,27 +414,15 @@ static int tegra186_timer_usec_init(struct tegra186_timer *tegra)
tegra->usec.read = tegra186_timer_usec_read;
tegra->usec.mask = CLOCKSOURCE_MASK(32);
tegra->usec.flags = CLOCK_SOURCE_IS_CONTINUOUS;
+ tegra->usec.owner = THIS_MODULE;
return clocksource_register_hz(&tegra->usec, USEC_PER_SEC);
}
-static irqreturn_t tegra186_timer_irq(int irq, void *data)
-{
- struct tegra186_timer *tegra = data;
-
- if (watchdog_active(&tegra->wdt->base)) {
- tegra186_wdt_disable(tegra->wdt);
- tegra186_wdt_enable(tegra->wdt);
- }
-
- return IRQ_HANDLED;
-}
-
static int tegra186_timer_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct tegra186_timer *tegra;
- unsigned int irq;
int err;
tegra = devm_kzalloc(dev, sizeof(*tegra), GFP_KERNEL);
@@ -400,8 +441,6 @@ static int tegra186_timer_probe(struct platform_device *pdev)
if (err < 0)
return err;
- irq = err;
-
/* create a watchdog using a preconfigured timer */
tegra->wdt = tegra186_wdt_create(tegra, 0);
if (IS_ERR(tegra->wdt)) {
@@ -428,17 +467,8 @@ static int tegra186_timer_probe(struct platform_device *pdev)
goto unregister_osc;
}
- err = devm_request_irq(dev, irq, tegra186_timer_irq, 0,
- "tegra186-timer", tegra);
- if (err < 0) {
- dev_err(dev, "failed to request IRQ#%u: %d\n", irq, err);
- goto unregister_usec;
- }
-
return 0;
-unregister_usec:
- clocksource_unregister(&tegra->usec);
unregister_osc:
clocksource_unregister(&tegra->osc);
unregister_tsc:
@@ -502,7 +532,7 @@ static struct platform_driver tegra186_wdt_driver = {
.of_match_table = tegra186_timer_of_match,
},
.probe = tegra186_timer_probe,
- .remove_new = tegra186_timer_remove,
+ .remove = tegra186_timer_remove,
};
module_platform_driver(tegra186_wdt_driver);
diff --git a/drivers/clocksource/timer-ti-dm-systimer.c b/drivers/clocksource/timer-ti-dm-systimer.c
index c2dcd8d68e45..985a6d08512b 100644
--- a/drivers/clocksource/timer-ti-dm-systimer.c
+++ b/drivers/clocksource/timer-ti-dm-systimer.c
@@ -202,10 +202,10 @@ static bool __init dmtimer_is_preferred(struct device_node *np)
/* Secure gptimer12 is always clocked with a fixed source */
if (!of_property_read_bool(np, "ti,timer-secure")) {
- if (!of_property_read_bool(np, "assigned-clocks"))
+ if (!of_property_present(np, "assigned-clocks"))
return false;
- if (!of_property_read_bool(np, "assigned-clock-parents"))
+ if (!of_property_present(np, "assigned-clock-parents"))
return false;
}
@@ -686,9 +686,9 @@ subsys_initcall(dmtimer_percpu_timer_startup);
static int __init dmtimer_percpu_quirk_init(struct device_node *np, u32 pa)
{
- struct device_node *arm_timer;
+ struct device_node *arm_timer __free(device_node) =
+ of_find_compatible_node(NULL, NULL, "arm,armv7-timer");
- arm_timer = of_find_compatible_node(NULL, NULL, "arm,armv7-timer");
if (of_device_is_available(arm_timer)) {
pr_warn_once("ARM architected timer wrap issue i940 detected\n");
return 0;
diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c
index b7a34b1a975e..793e7cdcb1b1 100644
--- a/drivers/clocksource/timer-ti-dm.c
+++ b/drivers/clocksource/timer-ti-dm.c
@@ -31,6 +31,7 @@
#include <linux/platform_data/dmtimer-omap.h>
#include <clocksource/timer-ti-dm.h>
+#include <linux/delay.h>
/*
* timer errata flags
@@ -836,6 +837,48 @@ static int omap_dm_timer_set_match(struct omap_dm_timer *cookie, int enable,
return 0;
}
+static int omap_dm_timer_set_cap(struct omap_dm_timer *cookie,
+ int autoreload, bool config_period)
+{
+ struct dmtimer *timer;
+ struct device *dev;
+ int rc;
+ u32 l;
+
+ timer = to_dmtimer(cookie);
+ if (unlikely(!timer))
+ return -EINVAL;
+
+ dev = &timer->pdev->dev;
+ rc = pm_runtime_resume_and_get(dev);
+ if (rc)
+ return rc;
+ /*
+ * 1. Select autoreload mode. TIMER_TCLR[1] AR bit.
+ * 2. TIMER_TCLR[14]: Sets the functionality of the TIMER IO pin.
+ * 3. TIMER_TCLR[13] : Capture mode select bit.
+ * 3. TIMER_TCLR[9-8] : Select transition capture mode.
+ */
+
+ l = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
+
+ if (autoreload)
+ l |= OMAP_TIMER_CTRL_AR;
+
+ l |= OMAP_TIMER_CTRL_CAPTMODE | OMAP_TIMER_CTRL_GPOCFG;
+
+ if (config_period == true)
+ l |= OMAP_TIMER_CTRL_TCM_LOWTOHIGH; /* Time Period config */
+ else
+ l |= OMAP_TIMER_CTRL_TCM_BOTHEDGES; /* Duty Cycle config */
+
+ dmtimer_write(timer, OMAP_TIMER_CTRL_REG, l);
+
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+
static int omap_dm_timer_set_pwm(struct omap_dm_timer *cookie, int def_on,
int toggle, int trigger, int autoreload)
{
@@ -1023,23 +1066,92 @@ static unsigned int omap_dm_timer_read_counter(struct omap_dm_timer *cookie)
return __omap_dm_timer_read_counter(timer);
}
+static inline unsigned int __omap_dm_timer_cap(struct dmtimer *timer, int idx)
+{
+ return idx == 0 ? dmtimer_read(timer, OMAP_TIMER_CAPTURE_REG) :
+ dmtimer_read(timer, OMAP_TIMER_CAPTURE2_REG);
+}
+
static int omap_dm_timer_write_counter(struct omap_dm_timer *cookie, unsigned int value)
{
struct dmtimer *timer;
+ struct device *dev;
timer = to_dmtimer(cookie);
- if (unlikely(!timer || !atomic_read(&timer->enabled))) {
- pr_err("%s: timer not available or enabled.\n", __func__);
+ if (unlikely(!timer)) {
+ pr_err("%s: timer not available.\n", __func__);
return -EINVAL;
}
+ dev = &timer->pdev->dev;
+
+ pm_runtime_resume_and_get(dev);
dmtimer_write(timer, OMAP_TIMER_COUNTER_REG, value);
+ pm_runtime_put_sync(dev);
/* Save the context */
timer->context.tcrr = value;
return 0;
}
+/**
+ * omap_dm_timer_cap_counter() - Calculate the high count or period count depending on the
+ * configuration.
+ * @cookie:Pointer to OMAP DM timer
+ * @is_period:Whether to configure timer in period or duty cycle mode
+ *
+ * Return high count or period count if timer is enabled else appropriate error.
+ */
+static unsigned int omap_dm_timer_cap_counter(struct omap_dm_timer *cookie, bool is_period)
+{
+ struct dmtimer *timer;
+ unsigned int cap1 = 0;
+ unsigned int cap2 = 0;
+ u32 l, ret;
+
+ timer = to_dmtimer(cookie);
+ if (unlikely(!timer || !atomic_read(&timer->enabled))) {
+ pr_err("%s:timer is not available or enabled.%p\n", __func__, (void *)timer);
+ return -EINVAL;
+ }
+
+ /* Stop the timer */
+ omap_dm_timer_stop(cookie);
+
+ /* Clear the timer counter value to 0 */
+ ret = omap_dm_timer_write_counter(cookie, 0);
+ if (ret)
+ return ret;
+
+ /* Sets the timer capture configuration for period/duty cycle calculation */
+ ret = omap_dm_timer_set_cap(cookie, true, is_period);
+ if (ret) {
+ pr_err("%s: Failed to set timer capture configuration.\n", __func__);
+ return ret;
+ }
+ /* Start the timer */
+ omap_dm_timer_start(cookie);
+
+ /*
+ * 1 sec delay is given so as to provide
+ * enough time to capture low frequency signals.
+ */
+ msleep(1000);
+
+ cap1 = __omap_dm_timer_cap(timer, 0);
+ cap2 = __omap_dm_timer_cap(timer, 1);
+
+ /*
+ * Clears the TCLR configuration.
+ * The start bit must be set to 1 as the timer is already in start mode.
+ */
+ l = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
+ l &= ~(0xffff) | 0x1;
+ dmtimer_write(timer, OMAP_TIMER_CTRL_REG, l);
+
+ return (cap2-cap1);
+}
+
static int __maybe_unused omap_dm_timer_runtime_suspend(struct device *dev)
{
struct dmtimer *timer = dev_get_drvdata(dev);
@@ -1104,8 +1216,12 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
return -ENOMEM;
timer->irq = platform_get_irq(pdev, 0);
- if (timer->irq < 0)
- return timer->irq;
+ if (timer->irq < 0) {
+ if (of_property_read_bool(dev->of_node, "ti,timer-pwm"))
+ dev_info(dev, "Did not find timer interrupt, timer usable in PWM mode only\n");
+ else
+ return timer->irq;
+ }
timer->io_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(timer->io_base))
@@ -1242,6 +1358,9 @@ static const struct omap_dm_timer_ops dmtimer_ops = {
.write_counter = omap_dm_timer_write_counter,
.read_status = omap_dm_timer_read_status,
.write_status = omap_dm_timer_write_status,
+ .set_cap = omap_dm_timer_set_cap,
+ .get_cap_status = omap_dm_timer_get_pwm_status,
+ .read_cap = omap_dm_timer_cap_counter,
};
static const struct dmtimer_platform_data omap3plus_pdata = {
@@ -1291,7 +1410,7 @@ MODULE_DEVICE_TABLE(of, omap_timer_match);
static struct platform_driver omap_dm_timer_driver = {
.probe = omap_dm_timer_probe,
- .remove_new = omap_dm_timer_remove,
+ .remove = omap_dm_timer_remove,
.driver = {
.name = "omap_timer",
.of_match_table = omap_timer_match,
diff --git a/drivers/clocksource/timer-vf-pit.c b/drivers/clocksource/timer-vf-pit.c
deleted file mode 100644
index 911c92146eca..000000000000
--- a/drivers/clocksource/timer-vf-pit.c
+++ /dev/null
@@ -1,194 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright 2012-2013 Freescale Semiconductor, Inc.
- */
-
-#include <linux/interrupt.h>
-#include <linux/clockchips.h>
-#include <linux/clk.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/sched_clock.h>
-
-/*
- * Each pit takes 0x10 Bytes register space
- */
-#define PITMCR 0x00
-#define PIT0_OFFSET 0x100
-#define PITn_OFFSET(n) (PIT0_OFFSET + 0x10 * (n))
-#define PITLDVAL 0x00
-#define PITCVAL 0x04
-#define PITTCTRL 0x08
-#define PITTFLG 0x0c
-
-#define PITMCR_MDIS (0x1 << 1)
-
-#define PITTCTRL_TEN (0x1 << 0)
-#define PITTCTRL_TIE (0x1 << 1)
-#define PITCTRL_CHN (0x1 << 2)
-
-#define PITTFLG_TIF 0x1
-
-static void __iomem *clksrc_base;
-static void __iomem *clkevt_base;
-static unsigned long cycle_per_jiffy;
-
-static inline void pit_timer_enable(void)
-{
- __raw_writel(PITTCTRL_TEN | PITTCTRL_TIE, clkevt_base + PITTCTRL);
-}
-
-static inline void pit_timer_disable(void)
-{
- __raw_writel(0, clkevt_base + PITTCTRL);
-}
-
-static inline void pit_irq_acknowledge(void)
-{
- __raw_writel(PITTFLG_TIF, clkevt_base + PITTFLG);
-}
-
-static u64 notrace pit_read_sched_clock(void)
-{
- return ~__raw_readl(clksrc_base + PITCVAL);
-}
-
-static int __init pit_clocksource_init(unsigned long rate)
-{
- /* set the max load value and start the clock source counter */
- __raw_writel(0, clksrc_base + PITTCTRL);
- __raw_writel(~0UL, clksrc_base + PITLDVAL);
- __raw_writel(PITTCTRL_TEN, clksrc_base + PITTCTRL);
-
- sched_clock_register(pit_read_sched_clock, 32, rate);
- return clocksource_mmio_init(clksrc_base + PITCVAL, "vf-pit", rate,
- 300, 32, clocksource_mmio_readl_down);
-}
-
-static int pit_set_next_event(unsigned long delta,
- struct clock_event_device *unused)
-{
- /*
- * set a new value to PITLDVAL register will not restart the timer,
- * to abort the current cycle and start a timer period with the new
- * value, the timer must be disabled and enabled again.
- * and the PITLAVAL should be set to delta minus one according to pit
- * hardware requirement.
- */
- pit_timer_disable();
- __raw_writel(delta - 1, clkevt_base + PITLDVAL);
- pit_timer_enable();
-
- return 0;
-}
-
-static int pit_shutdown(struct clock_event_device *evt)
-{
- pit_timer_disable();
- return 0;
-}
-
-static int pit_set_periodic(struct clock_event_device *evt)
-{
- pit_set_next_event(cycle_per_jiffy, evt);
- return 0;
-}
-
-static irqreturn_t pit_timer_interrupt(int irq, void *dev_id)
-{
- struct clock_event_device *evt = dev_id;
-
- pit_irq_acknowledge();
-
- /*
- * pit hardware doesn't support oneshot, it will generate an interrupt
- * and reload the counter value from PITLDVAL when PITCVAL reach zero,
- * and start the counter again. So software need to disable the timer
- * to stop the counter loop in ONESHOT mode.
- */
- if (likely(clockevent_state_oneshot(evt)))
- pit_timer_disable();
-
- evt->event_handler(evt);
-
- return IRQ_HANDLED;
-}
-
-static struct clock_event_device clockevent_pit = {
- .name = "VF pit timer",
- .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
- .set_state_shutdown = pit_shutdown,
- .set_state_periodic = pit_set_periodic,
- .set_next_event = pit_set_next_event,
- .rating = 300,
-};
-
-static int __init pit_clockevent_init(unsigned long rate, int irq)
-{
- __raw_writel(0, clkevt_base + PITTCTRL);
- __raw_writel(PITTFLG_TIF, clkevt_base + PITTFLG);
-
- BUG_ON(request_irq(irq, pit_timer_interrupt, IRQF_TIMER | IRQF_IRQPOLL,
- "VF pit timer", &clockevent_pit));
-
- clockevent_pit.cpumask = cpumask_of(0);
- clockevent_pit.irq = irq;
- /*
- * The value for the LDVAL register trigger is calculated as:
- * LDVAL trigger = (period / clock period) - 1
- * The pit is a 32-bit down count timer, when the counter value
- * reaches 0, it will generate an interrupt, thus the minimal
- * LDVAL trigger value is 1. And then the min_delta is
- * minimal LDVAL trigger value + 1, and the max_delta is full 32-bit.
- */
- clockevents_config_and_register(&clockevent_pit, rate, 2, 0xffffffff);
-
- return 0;
-}
-
-static int __init pit_timer_init(struct device_node *np)
-{
- struct clk *pit_clk;
- void __iomem *timer_base;
- unsigned long clk_rate;
- int irq, ret;
-
- timer_base = of_iomap(np, 0);
- if (!timer_base) {
- pr_err("Failed to iomap\n");
- return -ENXIO;
- }
-
- /*
- * PIT0 and PIT1 can be chained to build a 64-bit timer,
- * so choose PIT2 as clocksource, PIT3 as clockevent device,
- * and leave PIT0 and PIT1 unused for anyone else who needs them.
- */
- clksrc_base = timer_base + PITn_OFFSET(2);
- clkevt_base = timer_base + PITn_OFFSET(3);
-
- irq = irq_of_parse_and_map(np, 0);
- if (irq <= 0)
- return -EINVAL;
-
- pit_clk = of_clk_get(np, 0);
- if (IS_ERR(pit_clk))
- return PTR_ERR(pit_clk);
-
- ret = clk_prepare_enable(pit_clk);
- if (ret)
- return ret;
-
- clk_rate = clk_get_rate(pit_clk);
- cycle_per_jiffy = clk_rate / (HZ);
-
- /* enable the pit module */
- __raw_writel(~PITMCR_MDIS, timer_base + PITMCR);
-
- ret = pit_clocksource_init(clk_rate);
- if (ret)
- return ret;
-
- return pit_clockevent_init(clk_rate, irq);
-}
-TIMER_OF_DECLARE(vf610, "fsl,vf610-pit", pit_timer_init);
diff --git a/drivers/comedi/Kconfig b/drivers/comedi/Kconfig
index 93c68a40a17b..6dcc2567de6d 100644
--- a/drivers/comedi/Kconfig
+++ b/drivers/comedi/Kconfig
@@ -705,6 +705,15 @@ config COMEDI_ADL_PCI6208
To compile this driver as a module, choose M here: the module will be
called adl_pci6208.
+config COMEDI_ADL_PCI7250
+ tristate "ADLink PCI-7250 support"
+ help
+ Enable support for ADLink PCI-7250/LPCI-7250/LPCIe-7250 relay output
+ and isolated digital input boards.
+
+ To compile this driver as a module, choose M here: the module will be
+ called adl_pci7250.
+
config COMEDI_ADL_PCI7X3X
tristate "ADLink PCI-723X/743X isolated digital i/o board support"
depends on HAS_IOPORT
diff --git a/drivers/comedi/comedi_buf.c b/drivers/comedi/comedi_buf.c
index 393966c09740..002c0e76baff 100644
--- a/drivers/comedi/comedi_buf.c
+++ b/drivers/comedi/comedi_buf.c
@@ -27,14 +27,12 @@ static void comedi_buf_map_kref_release(struct kref *kref)
if (bm->page_list) {
if (bm->dma_dir != DMA_NONE) {
- /*
- * DMA buffer was allocated as a single block.
- * Address is in page_list[0].
- */
- buf = &bm->page_list[0];
- dma_free_coherent(bm->dma_hw_dev,
- PAGE_SIZE * bm->n_pages,
- buf->virt_addr, buf->dma_addr);
+ for (i = 0; i < bm->n_pages; i++) {
+ buf = &bm->page_list[i];
+ dma_free_coherent(bm->dma_hw_dev, PAGE_SIZE,
+ buf->virt_addr,
+ buf->dma_addr);
+ }
} else {
for (i = 0; i < bm->n_pages; i++) {
buf = &bm->page_list[i];
@@ -56,13 +54,7 @@ static void __comedi_buf_free(struct comedi_device *dev,
struct comedi_buf_map *bm;
unsigned long flags;
- if (async->prealloc_buf) {
- if (s->async_dma_dir == DMA_NONE)
- vunmap(async->prealloc_buf);
- async->prealloc_buf = NULL;
- async->prealloc_bufsz = 0;
- }
-
+ async->prealloc_bufsz = 0;
spin_lock_irqsave(&s->spin_lock, flags);
bm = async->buf_map;
async->buf_map = NULL;
@@ -94,26 +86,14 @@ comedi_buf_map_alloc(struct comedi_device *dev, enum dma_data_direction dma_dir,
goto err;
if (bm->dma_dir != DMA_NONE) {
- void *virt_addr;
- dma_addr_t dma_addr;
-
- /*
- * Currently, the DMA buffer needs to be allocated as a
- * single block so that it can be mmap()'ed.
- */
- virt_addr = dma_alloc_coherent(bm->dma_hw_dev,
- PAGE_SIZE * n_pages, &dma_addr,
- GFP_KERNEL);
- if (!virt_addr)
- goto err;
-
for (i = 0; i < n_pages; i++) {
buf = &bm->page_list[i];
- buf->virt_addr = virt_addr + (i << PAGE_SHIFT);
- buf->dma_addr = dma_addr + (i << PAGE_SHIFT);
+ buf->virt_addr =
+ dma_alloc_coherent(bm->dma_hw_dev, PAGE_SIZE,
+ &buf->dma_addr, GFP_KERNEL);
+ if (!buf->virt_addr)
+ break;
}
-
- bm->n_pages = i;
} else {
for (i = 0; i < n_pages; i++) {
buf = &bm->page_list[i];
@@ -123,11 +103,10 @@ comedi_buf_map_alloc(struct comedi_device *dev, enum dma_data_direction dma_dir,
SetPageReserved(virt_to_page(buf->virt_addr));
}
-
- bm->n_pages = i;
- if (i < n_pages)
- goto err;
}
+ bm->n_pages = i;
+ if (i < n_pages)
+ goto err;
return bm;
@@ -141,11 +120,8 @@ static void __comedi_buf_alloc(struct comedi_device *dev,
unsigned int n_pages)
{
struct comedi_async *async = s->async;
- struct page **pages = NULL;
struct comedi_buf_map *bm;
- struct comedi_buf_page *buf;
unsigned long flags;
- unsigned int i;
if (!IS_ENABLED(CONFIG_HAS_DMA) && s->async_dma_dir != DMA_NONE) {
dev_err(dev->class_dev,
@@ -160,30 +136,7 @@ static void __comedi_buf_alloc(struct comedi_device *dev,
spin_lock_irqsave(&s->spin_lock, flags);
async->buf_map = bm;
spin_unlock_irqrestore(&s->spin_lock, flags);
-
- if (bm->dma_dir != DMA_NONE) {
- /*
- * DMA buffer was allocated as a single block.
- * Address is in page_list[0].
- */
- buf = &bm->page_list[0];
- async->prealloc_buf = buf->virt_addr;
- } else {
- pages = vmalloc(sizeof(struct page *) * n_pages);
- if (!pages)
- return;
-
- for (i = 0; i < n_pages; i++) {
- buf = &bm->page_list[i];
- pages[i] = virt_to_page(buf->virt_addr);
- }
-
- /* vmap the pages to prealloc_buf */
- async->prealloc_buf = vmap(pages, n_pages, VM_MAP,
- COMEDI_PAGE_PROTECTION);
-
- vfree(pages);
- }
+ async->prealloc_bufsz = n_pages << PAGE_SHIFT;
}
void comedi_buf_map_get(struct comedi_buf_map *bm)
@@ -264,7 +217,7 @@ int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
new_size = (new_size + PAGE_SIZE - 1) & PAGE_MASK;
/* if no change is required, do nothing */
- if (async->prealloc_buf && async->prealloc_bufsz == new_size)
+ if (async->prealloc_bufsz == new_size)
return 0;
/* deallocate old buffer */
@@ -275,14 +228,9 @@ int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
unsigned int n_pages = new_size >> PAGE_SHIFT;
__comedi_buf_alloc(dev, s, n_pages);
-
- if (!async->prealloc_buf) {
- /* allocation failed */
- __comedi_buf_free(dev, s);
+ if (!async->prealloc_bufsz)
return -ENOMEM;
- }
}
- async->prealloc_bufsz = new_size;
return 0;
}
@@ -365,6 +313,7 @@ static unsigned int comedi_buf_munge(struct comedi_subdevice *s,
unsigned int num_bytes)
{
struct comedi_async *async = s->async;
+ struct comedi_buf_page *buf_page_list = async->buf_map->page_list;
unsigned int count = 0;
const unsigned int num_sample_bytes = comedi_bytes_per_sample(s);
@@ -376,15 +325,16 @@ static unsigned int comedi_buf_munge(struct comedi_subdevice *s,
/* don't munge partial samples */
num_bytes -= num_bytes % num_sample_bytes;
while (count < num_bytes) {
- int block_size = num_bytes - count;
- unsigned int buf_end;
-
- buf_end = async->prealloc_bufsz - async->munge_ptr;
- if (block_size > buf_end)
- block_size = buf_end;
+ /*
+ * Do not munge beyond page boundary.
+ * Note: prealloc_bufsz is a multiple of PAGE_SIZE.
+ */
+ unsigned int page = async->munge_ptr >> PAGE_SHIFT;
+ unsigned int offset = offset_in_page(async->munge_ptr);
+ unsigned int block_size =
+ min(num_bytes - count, PAGE_SIZE - offset);
- s->munge(s->device, s,
- async->prealloc_buf + async->munge_ptr,
+ s->munge(s->device, s, buf_page_list[page].virt_addr + offset,
block_size, async->munge_chan);
/*
@@ -397,7 +347,8 @@ static unsigned int comedi_buf_munge(struct comedi_subdevice *s,
async->munge_chan %= async->cmd.chanlist_len;
async->munge_count += block_size;
async->munge_ptr += block_size;
- async->munge_ptr %= async->prealloc_bufsz;
+ if (async->munge_ptr == async->prealloc_bufsz)
+ async->munge_ptr = 0;
count += block_size;
}
@@ -558,46 +509,52 @@ static void comedi_buf_memcpy_to(struct comedi_subdevice *s,
const void *data, unsigned int num_bytes)
{
struct comedi_async *async = s->async;
+ struct comedi_buf_page *buf_page_list = async->buf_map->page_list;
unsigned int write_ptr = async->buf_write_ptr;
while (num_bytes) {
- unsigned int block_size;
-
- if (write_ptr + num_bytes > async->prealloc_bufsz)
- block_size = async->prealloc_bufsz - write_ptr;
- else
- block_size = num_bytes;
+ /*
+ * Do not copy beyond page boundary.
+ * Note: prealloc_bufsz is a multiple of PAGE_SIZE.
+ */
+ unsigned int page = write_ptr >> PAGE_SHIFT;
+ unsigned int offset = offset_in_page(write_ptr);
+ unsigned int block_size = min(num_bytes, PAGE_SIZE - offset);
- memcpy(async->prealloc_buf + write_ptr, data, block_size);
+ memcpy(buf_page_list[page].virt_addr + offset,
+ data, block_size);
data += block_size;
num_bytes -= block_size;
-
- write_ptr = 0;
+ write_ptr += block_size;
+ if (write_ptr == async->prealloc_bufsz)
+ write_ptr = 0;
}
}
static void comedi_buf_memcpy_from(struct comedi_subdevice *s,
void *dest, unsigned int nbytes)
{
- void *src;
struct comedi_async *async = s->async;
+ struct comedi_buf_page *buf_page_list = async->buf_map->page_list;
unsigned int read_ptr = async->buf_read_ptr;
while (nbytes) {
- unsigned int block_size;
-
- src = async->prealloc_buf + read_ptr;
-
- if (nbytes >= async->prealloc_bufsz - read_ptr)
- block_size = async->prealloc_bufsz - read_ptr;
- else
- block_size = nbytes;
+ /*
+ * Do not copy beyond page boundary.
+ * Note: prealloc_bufsz is a multiple of PAGE_SIZE.
+ */
+ unsigned int page = read_ptr >> PAGE_SHIFT;
+ unsigned int offset = offset_in_page(read_ptr);
+ unsigned int block_size = min(nbytes, PAGE_SIZE - offset);
- memcpy(dest, src, block_size);
+ memcpy(dest, buf_page_list[page].virt_addr + offset,
+ block_size);
nbytes -= block_size;
dest += block_size;
- read_ptr = 0;
+ read_ptr += block_size;
+ if (read_ptr == async->prealloc_bufsz)
+ read_ptr = 0;
}
}
diff --git a/drivers/comedi/comedi_fops.c b/drivers/comedi/comedi_fops.c
index 1b481731df96..7e2f2b1a1c36 100644
--- a/drivers/comedi/comedi_fops.c
+++ b/drivers/comedi/comedi_fops.c
@@ -787,6 +787,7 @@ static int is_device_busy(struct comedi_device *dev)
struct comedi_subdevice *s;
int i;
+ lockdep_assert_held_write(&dev->attach_lock);
lockdep_assert_held(&dev->mutex);
if (!dev->attached)
return 0;
@@ -795,7 +796,16 @@ static int is_device_busy(struct comedi_device *dev)
s = &dev->subdevices[i];
if (s->busy)
return 1;
- if (s->async && comedi_buf_is_mmapped(s))
+ if (!s->async)
+ continue;
+ if (comedi_buf_is_mmapped(s))
+ return 1;
+ /*
+ * There may be tasks still waiting on the subdevice's wait
+ * queue, although they should already be about to be removed
+ * from it since the subdevice has no active async command.
+ */
+ if (wq_has_sleeper(&s->async->wait_head))
return 1;
}
@@ -825,15 +835,22 @@ static int do_devconfig_ioctl(struct comedi_device *dev,
return -EPERM;
if (!arg) {
- if (is_device_busy(dev))
- return -EBUSY;
+ int rc = 0;
+
if (dev->attached) {
- struct module *driver_module = dev->driver->module;
+ down_write(&dev->attach_lock);
+ if (is_device_busy(dev)) {
+ rc = -EBUSY;
+ } else {
+ struct module *driver_module =
+ dev->driver->module;
- comedi_device_detach(dev);
- module_put(driver_module);
+ comedi_device_detach_locked(dev);
+ module_put(driver_module);
+ }
+ up_write(&dev->attach_lock);
}
- return 0;
+ return rc;
}
if (copy_from_user(&it, arg, sizeof(it)))
@@ -1556,21 +1573,30 @@ static int do_insnlist_ioctl(struct comedi_device *dev,
}
for (i = 0; i < n_insns; ++i) {
+ unsigned int n = insns[i].n;
+
if (insns[i].insn & INSN_MASK_WRITE) {
if (copy_from_user(data, insns[i].data,
- insns[i].n * sizeof(unsigned int))) {
+ n * sizeof(unsigned int))) {
dev_dbg(dev->class_dev,
"copy_from_user failed\n");
ret = -EFAULT;
goto error;
}
+ if (n < MIN_SAMPLES) {
+ memset(&data[n], 0, (MIN_SAMPLES - n) *
+ sizeof(unsigned int));
+ }
+ } else {
+ memset(data, 0, max_t(unsigned int, n, MIN_SAMPLES) *
+ sizeof(unsigned int));
}
ret = parse_insn(dev, insns + i, data, file);
if (ret < 0)
goto error;
if (insns[i].insn & INSN_MASK_READ) {
if (copy_to_user(insns[i].data, data,
- insns[i].n * sizeof(unsigned int))) {
+ n * sizeof(unsigned int))) {
dev_dbg(dev->class_dev,
"copy_to_user failed\n");
ret = -EFAULT;
@@ -1589,6 +1615,16 @@ error:
return i;
}
+#define MAX_INSNS MAX_SAMPLES
+static int check_insnlist_len(struct comedi_device *dev, unsigned int n_insns)
+{
+ if (n_insns > MAX_INSNS) {
+ dev_dbg(dev->class_dev, "insnlist length too large\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
/*
* COMEDI_INSN ioctl
* synchronous instruction
@@ -1633,6 +1669,12 @@ static int do_insn_ioctl(struct comedi_device *dev,
ret = -EFAULT;
goto error;
}
+ if (insn->n < MIN_SAMPLES) {
+ memset(&data[insn->n], 0,
+ (MIN_SAMPLES - insn->n) * sizeof(unsigned int));
+ }
+ } else {
+ memset(data, 0, n_data * sizeof(unsigned int));
}
ret = parse_insn(dev, insn, data, file);
if (ret < 0)
@@ -2239,6 +2281,9 @@ static long comedi_unlocked_ioctl(struct file *file, unsigned int cmd,
rc = -EFAULT;
break;
}
+ rc = check_insnlist_len(dev, insnlist.n_insns);
+ if (rc)
+ break;
insns = kcalloc(insnlist.n_insns, sizeof(*insns), GFP_KERNEL);
if (!insns) {
rc = -ENOMEM;
@@ -2387,13 +2432,27 @@ static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
goto done;
}
if (bm->dma_dir != DMA_NONE) {
+ unsigned long vm_start = vma->vm_start;
+ unsigned long vm_end = vma->vm_end;
+
/*
- * DMA buffer was allocated as a single block.
- * Address is in page_list[0].
+ * Buffer pages are not contiguous, so temporarily modify VMA
+ * start and end addresses for each buffer page.
*/
- buf = &bm->page_list[0];
- retval = dma_mmap_coherent(bm->dma_hw_dev, vma, buf->virt_addr,
- buf->dma_addr, n_pages * PAGE_SIZE);
+ for (i = 0; i < n_pages; ++i) {
+ buf = &bm->page_list[i];
+ vma->vm_start = start;
+ vma->vm_end = start + PAGE_SIZE;
+ retval = dma_mmap_coherent(bm->dma_hw_dev, vma,
+ buf->virt_addr,
+ buf->dma_addr, PAGE_SIZE);
+ if (retval)
+ break;
+
+ start += PAGE_SIZE;
+ }
+ vma->vm_start = vm_start;
+ vma->vm_end = vm_end;
} else {
for (i = 0; i < n_pages; ++i) {
unsigned long pfn;
@@ -2409,6 +2468,17 @@ static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
}
}
+#ifdef CONFIG_MMU
+ /*
+ * Leaving behind a partial mapping of a buffer we're about to drop is
+ * unsafe, see remap_pfn_range_notrack(). We need to zap the range
+ * here ourselves instead of relying on the automatic zapping in
+ * remap_pfn_range() because we call remap_pfn_range() in a loop.
+ */
+ if (retval)
+ zap_vma_ptes(vma, vma->vm_start, size);
+#endif
+
if (retval == 0) {
vma->vm_ops = &comedi_vm_ops;
vma->vm_private_data = bm;
@@ -2463,6 +2533,62 @@ done:
return mask;
}
+static unsigned int comedi_buf_copy_to_user(struct comedi_subdevice *s,
+ void __user *dest, unsigned int src_offset, unsigned int n)
+{
+ struct comedi_buf_map *bm = s->async->buf_map;
+ struct comedi_buf_page *buf_page_list = bm->page_list;
+ unsigned int page = src_offset >> PAGE_SHIFT;
+ unsigned int offset = offset_in_page(src_offset);
+
+ while (n) {
+ unsigned int copy_amount = min(n, PAGE_SIZE - offset);
+ unsigned int uncopied;
+
+ uncopied = copy_to_user(dest, buf_page_list[page].virt_addr +
+ offset, copy_amount);
+ copy_amount -= uncopied;
+ n -= copy_amount;
+ if (uncopied)
+ break;
+
+ dest += copy_amount;
+ page++;
+ if (page == bm->n_pages)
+ page = 0; /* buffer wraparound */
+ offset = 0;
+ }
+ return n;
+}
+
+static unsigned int comedi_buf_copy_from_user(struct comedi_subdevice *s,
+ unsigned int dst_offset, const void __user *src, unsigned int n)
+{
+ struct comedi_buf_map *bm = s->async->buf_map;
+ struct comedi_buf_page *buf_page_list = bm->page_list;
+ unsigned int page = dst_offset >> PAGE_SHIFT;
+ unsigned int offset = offset_in_page(dst_offset);
+
+ while (n) {
+ unsigned int copy_amount = min(n, PAGE_SIZE - offset);
+ unsigned int uncopied;
+
+ uncopied = copy_from_user(buf_page_list[page].virt_addr +
+ offset, src, copy_amount);
+ copy_amount -= uncopied;
+ n -= copy_amount;
+ if (uncopied)
+ break;
+
+ src += copy_amount;
+ page++;
+ if (page == bm->n_pages)
+ page = 0; /* buffer wraparound */
+ offset = 0;
+ }
+ return n;
+}
+
static ssize_t comedi_write(struct file *file, const char __user *buf,
size_t nbytes, loff_t *offset)
{
@@ -2504,7 +2630,6 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
add_wait_queue(&async->wait_head, &wait);
while (count == 0 && !retval) {
unsigned int runflags;
- unsigned int wp, n1, n2;
set_current_state(TASK_INTERRUPTIBLE);
@@ -2543,14 +2668,7 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
}
set_current_state(TASK_RUNNING);
- wp = async->buf_write_ptr;
- n1 = min(n, async->prealloc_bufsz - wp);
- n2 = n - n1;
- m = copy_from_user(async->prealloc_buf + wp, buf, n1);
- if (m)
- m += n2;
- else if (n2)
- m = copy_from_user(async->prealloc_buf, buf + n1, n2);
+ m = comedi_buf_copy_from_user(s, async->buf_write_ptr, buf, n);
if (m) {
n -= m;
retval = -EFAULT;
@@ -2639,8 +2757,6 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
add_wait_queue(&async->wait_head, &wait);
while (count == 0 && !retval) {
- unsigned int rp, n1, n2;
-
set_current_state(TASK_INTERRUPTIBLE);
m = comedi_buf_read_n_available(s);
@@ -2677,14 +2793,7 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
}
set_current_state(TASK_RUNNING);
- rp = async->buf_read_ptr;
- n1 = min(n, async->prealloc_bufsz - rp);
- n2 = n - n1;
- m = copy_to_user(buf, async->prealloc_buf + rp, n1);
- if (m)
- m += n2;
- else if (n2)
- m = copy_to_user(buf + n1, async->prealloc_buf, n2);
+ m = comedi_buf_copy_to_user(s, buf, async->buf_read_ptr, n);
if (m) {
n -= m;
retval = -EFAULT;
@@ -3078,6 +3187,9 @@ static int compat_insnlist(struct file *file, unsigned long arg)
if (copy_from_user(&insnlist32, compat_ptr(arg), sizeof(insnlist32)))
return -EFAULT;
+ rc = check_insnlist_len(dev, insnlist32.n_insns);
+ if (rc)
+ return rc;
insns = kcalloc(insnlist32.n_insns, sizeof(*insns), GFP_KERNEL);
if (!insns)
return -ENOMEM;
diff --git a/drivers/comedi/comedi_internal.h b/drivers/comedi/comedi_internal.h
index 9b3631a654c8..cf10ba016ebc 100644
--- a/drivers/comedi/comedi_internal.h
+++ b/drivers/comedi/comedi_internal.h
@@ -50,6 +50,7 @@ extern struct mutex comedi_drivers_list_lock;
int insn_inval(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data);
+void comedi_device_detach_locked(struct comedi_device *dev);
void comedi_device_detach(struct comedi_device *dev);
int comedi_device_attach(struct comedi_device *dev,
struct comedi_devconfig *it);
diff --git a/drivers/comedi/drivers.c b/drivers/comedi/drivers.c
index 376130bfba8a..c9ebaadc5e82 100644
--- a/drivers/comedi/drivers.c
+++ b/drivers/comedi/drivers.c
@@ -158,7 +158,7 @@ static void comedi_device_detach_cleanup(struct comedi_device *dev)
int i;
struct comedi_subdevice *s;
- lockdep_assert_held(&dev->attach_lock);
+ lockdep_assert_held_write(&dev->attach_lock);
lockdep_assert_held(&dev->mutex);
if (dev->subdevices) {
for (i = 0; i < dev->n_subdevices; i++) {
@@ -196,16 +196,23 @@ static void comedi_device_detach_cleanup(struct comedi_device *dev)
comedi_clear_hw_dev(dev);
}
-void comedi_device_detach(struct comedi_device *dev)
+void comedi_device_detach_locked(struct comedi_device *dev)
{
+ lockdep_assert_held_write(&dev->attach_lock);
lockdep_assert_held(&dev->mutex);
comedi_device_cancel_all(dev);
- down_write(&dev->attach_lock);
dev->attached = false;
dev->detach_count++;
if (dev->driver)
dev->driver->detach(dev);
comedi_device_detach_cleanup(dev);
+}
+
+void comedi_device_detach(struct comedi_device *dev)
+{
+ lockdep_assert_held(&dev->mutex);
+ down_write(&dev->attach_lock);
+ comedi_device_detach_locked(dev);
up_write(&dev->attach_lock);
}
@@ -339,10 +346,10 @@ int comedi_dio_insn_config(struct comedi_device *dev,
unsigned int *data,
unsigned int mask)
{
- unsigned int chan_mask = 1 << CR_CHAN(insn->chanspec);
+ unsigned int chan = CR_CHAN(insn->chanspec);
- if (!mask)
- mask = chan_mask;
+ if (!mask && chan < 32)
+ mask = 1U << chan;
switch (data[0]) {
case INSN_CONFIG_DIO_INPUT:
@@ -382,7 +389,7 @@ EXPORT_SYMBOL_GPL(comedi_dio_insn_config);
unsigned int comedi_dio_update_state(struct comedi_subdevice *s,
unsigned int *data)
{
- unsigned int chanmask = (s->n_chan < 32) ? ((1 << s->n_chan) - 1)
+ unsigned int chanmask = (s->n_chan < 32) ? ((1U << s->n_chan) - 1)
: 0xffffffff;
unsigned int mask = data[0] & chanmask;
unsigned int bits = data[1];
@@ -613,6 +620,7 @@ static int insn_rw_emulate_bits(struct comedi_device *dev,
unsigned int chan = CR_CHAN(insn->chanspec);
unsigned int base_chan = (chan < 32) ? 0 : chan;
unsigned int _data[2];
+ unsigned int i;
int ret;
memset(_data, 0, sizeof(_data));
@@ -625,18 +633,21 @@ static int insn_rw_emulate_bits(struct comedi_device *dev,
if (insn->insn == INSN_WRITE) {
if (!(s->subdev_flags & SDF_WRITABLE))
return -EINVAL;
- _data[0] = 1 << (chan - base_chan); /* mask */
- _data[1] = data[0] ? (1 << (chan - base_chan)) : 0; /* bits */
+ _data[0] = 1U << (chan - base_chan); /* mask */
}
+ for (i = 0; i < insn->n; i++) {
+ if (insn->insn == INSN_WRITE)
+ _data[1] = data[i] ? _data[0] : 0; /* bits */
- ret = s->insn_bits(dev, s, &_insn, _data);
- if (ret < 0)
- return ret;
+ ret = s->insn_bits(dev, s, &_insn, _data);
+ if (ret < 0)
+ return ret;
- if (insn->insn == INSN_READ)
- data[0] = (_data[1] >> (chan - base_chan)) & 1;
+ if (insn->insn == INSN_READ)
+ data[i] = (_data[1] >> (chan - base_chan)) & 1;
+ }
- return 1;
+ return insn->n;
}
static int __comedi_device_postconfig_async(struct comedi_device *dev,
@@ -709,7 +720,7 @@ static int __comedi_device_postconfig(struct comedi_device *dev)
if (s->type == COMEDI_SUBD_DO) {
if (s->n_chan < 32)
- s->io_bits = (1 << s->n_chan) - 1;
+ s->io_bits = (1U << s->n_chan) - 1;
else
s->io_bits = 0xffffffff;
}
diff --git a/drivers/comedi/drivers/Makefile b/drivers/comedi/drivers/Makefile
index b24ac00cab73..7b99a431330d 100644
--- a/drivers/comedi/drivers/Makefile
+++ b/drivers/comedi/drivers/Makefile
@@ -73,6 +73,7 @@ obj-$(CONFIG_COMEDI_ADDI_APCI_3120) += addi_apci_3120.o
obj-$(CONFIG_COMEDI_ADDI_APCI_3501) += addi_apci_3501.o
obj-$(CONFIG_COMEDI_ADDI_APCI_3XXX) += addi_apci_3xxx.o
obj-$(CONFIG_COMEDI_ADL_PCI6208) += adl_pci6208.o
+obj-$(CONFIG_COMEDI_ADL_PCI7250) += adl_pci7250.o
obj-$(CONFIG_COMEDI_ADL_PCI7X3X) += adl_pci7x3x.o
obj-$(CONFIG_COMEDI_ADL_PCI8164) += adl_pci8164.o
obj-$(CONFIG_COMEDI_ADL_PCI9111) += adl_pci9111.o
diff --git a/drivers/comedi/drivers/adl_pci7250.c b/drivers/comedi/drivers/adl_pci7250.c
new file mode 100644
index 000000000000..78c85a402435
--- /dev/null
+++ b/drivers/comedi/drivers/adl_pci7250.c
@@ -0,0 +1,220 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * adl_pci7250.c
+ *
+ * Comedi driver for ADLink PCI-7250 series cards.
+ *
+ * Copyright (C) 2015, 2025 Ian Abbott <abbotti@mev.co.uk>
+ */
+
+/*
+ * Driver: adl_pci7250
+ * Description: Driver for the ADLINK PCI-7250 relay output & digital input card
+ * Devices: [ADLINK] PCI-7250 (adl_pci7250) LPCI-7250 LPCIe-7250
+ * Author: Ian Abbott <abbotti@mev.co.uk>
+ * Status: works
+ * Updated: Mon, 02 Jun 2025 13:54:11 +0100
+ *
+ * The driver assumes that 3 PCI-7251 modules are fitted to the PCI-7250,
+ * giving 32 channels of relay outputs and 32 channels of isolated digital
+ * inputs. That is also the case for the LPCI-7250 and older LPCIe-7250
+ * cards although they do not physically support the PCI-7251 modules.
+ * Newer LPCIe-7250 cards have a different PCI subsystem device ID, so
+ * set the number of channels to 8 for these cards.
+ *
+ * Not fitting the PCI-7251 modules shouldn't do any harm, but the extra
+ * inputs and relay outputs won't work!
+ *
+ * Configuration Options: not applicable, uses PCI auto config
+ */
+
+#include <linux/module.h>
+#include <linux/comedi/comedi_pci.h>
+
+static unsigned char adl_pci7250_read8(struct comedi_device *dev,
+ unsigned int offset)
+{
+#ifdef CONFIG_HAS_IOPORT
+ if (!dev->mmio)
+ return inb(dev->iobase + offset);
+#endif
+ return readb(dev->mmio + offset);
+}
+
+static void adl_pci7250_write8(struct comedi_device *dev, unsigned int offset,
+ unsigned char val)
+{
+#ifdef CONFIG_HAS_IOPORT
+ if (!dev->mmio) {
+ outb(val, dev->iobase + offset);
+ return;
+ }
+#endif
+ writeb(val, dev->mmio + offset);
+}
+
+static int adl_pci7250_do_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ unsigned int mask = comedi_dio_update_state(s, data);
+
+ if (mask) {
+ unsigned int state = s->state;
+ unsigned int i;
+
+ for (i = 0; i * 8 < s->n_chan; i++) {
+ if ((mask & 0xffu) != 0) {
+ /* write relay data to even offset registers */
+ adl_pci7250_write8(dev, i * 2, state & 0xffu);
+ }
+ state >>= 8;
+ mask >>= 8;
+ }
+ }
+
+ data[1] = s->state;
+
+ return 2;
+}
+
+static int adl_pci7250_di_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ unsigned int value = 0;
+ unsigned int i;
+
+ for (i = 0; i * 8 < s->n_chan; i++) {
+ /* read DI value from odd offset registers */
+ value |= (unsigned int)adl_pci7250_read8(dev, i * 2 + 1) <<
+ (i * 8);
+ }
+
+ data[1] = value;
+
+ return 2;
+}
+
+static int pci7250_auto_attach(struct comedi_device *dev,
+ unsigned long context_unused)
+{
+ struct pci_dev *pcidev = comedi_to_pci_dev(dev);
+ struct comedi_subdevice *s;
+ unsigned int max_chans;
+ unsigned int i;
+ int ret;
+
+ ret = comedi_pci_enable(dev);
+ if (ret)
+ return ret;
+
+ if (pci_resource_len(pcidev, 2) < 8)
+ return -ENXIO;
+
+ /*
+ * Newer LPCIe-7250 boards use MMIO. Older LPCIe-7250, LPCI-7250, and
+ * PCI-7250 boards use Port I/O.
+ */
+ if (pci_resource_flags(pcidev, 2) & IORESOURCE_MEM) {
+ dev->mmio = pci_ioremap_bar(pcidev, 2);
+ if (!dev->mmio)
+ return -ENOMEM;
+ } else if (IS_ENABLED(CONFIG_HAS_IOPORT)) {
+ dev->iobase = pci_resource_start(pcidev, 2);
+ } else {
+ dev_err(dev->class_dev,
+ "error! need I/O port support\n");
+ return -ENXIO;
+ }
+
+ if (pcidev->subsystem_device == 0x7000) {
+ /*
+ * This is a newer LPCIe-7250 variant and cannot possibly
+ * have PCI-7251 modules fitted, so limit the number of
+ * channels to 8.
+ */
+ max_chans = 8;
+ } else {
+ /*
+ * It is unknown whether the board is a PCI-7250, an LPCI-7250,
+ * or an older LPCIe-7250 variant, so treat it as a PCI-7250
+ * and assume it can have PCI-7251 modules fitted to increase
+ * the number of channels to a maximum of 32.
+ */
+ max_chans = 32;
+ }
+
+ ret = comedi_alloc_subdevices(dev, 2);
+ if (ret)
+ return ret;
+
+ /* Relay digital output. */
+ s = &dev->subdevices[0];
+ s->type = COMEDI_SUBD_DO;
+ s->subdev_flags = SDF_WRITABLE;
+ s->n_chan = max_chans;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = adl_pci7250_do_insn_bits;
+ /* Read initial state of relays from the even offset registers. */
+ s->state = 0;
+ for (i = 0; i * 8 < max_chans; i++) {
+ s->state |= (unsigned int)adl_pci7250_read8(dev, i * 2) <<
+ (i * 8);
+ }
+
+ /* Isolated digital input. */
+ s = &dev->subdevices[1];
+ s->type = COMEDI_SUBD_DI;
+ s->subdev_flags = SDF_READABLE;
+ s->n_chan = max_chans;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = adl_pci7250_di_insn_bits;
+
+ return 0;
+}
+
+static struct comedi_driver adl_pci7250_driver = {
+ .driver_name = "adl_pci7250",
+ .module = THIS_MODULE,
+ .auto_attach = pci7250_auto_attach,
+ .detach = comedi_pci_detach,
+};
+
+static int adl_pci7250_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *id)
+{
+ return comedi_pci_auto_config(dev, &adl_pci7250_driver,
+ id->driver_data);
+}
+
+static const struct pci_device_id adl_pci7250_pci_table[] = {
+#ifdef CONFIG_HAS_IOPORT
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050,
+ 0x9999, 0x7250) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADLINK, 0x7250,
+ 0x9999, 0x7250) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADLINK, 0x7250,
+ PCI_VENDOR_ID_ADLINK, 0x7250) },
+#endif
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADLINK, 0x7250,
+ PCI_VENDOR_ID_ADLINK, 0x7000) }, /* newer LPCIe-7250 */
+ { 0 }
+};
+MODULE_DEVICE_TABLE(pci, adl_pci7250_pci_table);
+
+static struct pci_driver adl_pci7250_pci_driver = {
+ .name = "adl_pci7250",
+ .id_table = adl_pci7250_pci_table,
+ .probe = adl_pci7250_pci_probe,
+ .remove = comedi_pci_auto_unconfig,
+};
+module_comedi_pci_driver(adl_pci7250_driver, adl_pci7250_pci_driver);
+
+MODULE_AUTHOR("Comedi https://www.comedi.org");
+MODULE_DESCRIPTION("Comedi driver for ADLink PCI-7250 series boards");
+MODULE_LICENSE("GPL");
diff --git a/drivers/comedi/drivers/adl_pci9118.c b/drivers/comedi/drivers/adl_pci9118.c
index a76e2666d583..67c663892e48 100644
--- a/drivers/comedi/drivers/adl_pci9118.c
+++ b/drivers/comedi/drivers/adl_pci9118.c
@@ -32,7 +32,7 @@
* ranges).
*
* There are some hardware limitations:
- * a) You cann't use mixture of unipolar/bipoar ranges or differencial/single
+ * a) You can't use mixture of unipolar/bipolar ranges or differential/single
* ended inputs.
* b) DMA transfers must have the length aligned to two samples (32 bit),
* so there is some problems if cmd->chanlist_len is odd. This driver tries
@@ -227,7 +227,7 @@ struct pci9118_private {
struct pci9118_dmabuf dmabuf[2];
int softsshdelay; /*
* >0 use software S&H,
- * numer is requested delay in ns
+ * number is requested delay in ns
*/
unsigned char softsshsample; /*
* polarity of S&H signal
diff --git a/drivers/comedi/drivers/aio_iiro_16.c b/drivers/comedi/drivers/aio_iiro_16.c
index b00fab0b89d4..739cc4db52ac 100644
--- a/drivers/comedi/drivers/aio_iiro_16.c
+++ b/drivers/comedi/drivers/aio_iiro_16.c
@@ -177,7 +177,8 @@ static int aio_iiro_16_attach(struct comedi_device *dev,
* Digital input change of state interrupts are optionally supported
* using IRQ 2-7, 10-12, 14, or 15.
*/
- if ((1 << it->options[1]) & 0xdcfc) {
+ if (it->options[1] > 0 && it->options[1] < 16 &&
+ (1 << it->options[1]) & 0xdcfc) {
ret = request_irq(it->options[1], aio_iiro_16_cos, 0,
dev->board_name, dev);
if (ret == 0)
diff --git a/drivers/comedi/drivers/comedi_test.c b/drivers/comedi/drivers/comedi_test.c
index 05ae9122823f..7984950f0f99 100644
--- a/drivers/comedi/drivers/comedi_test.c
+++ b/drivers/comedi/drivers/comedi_test.c
@@ -197,7 +197,8 @@ static unsigned short fake_waveform(struct comedi_device *dev,
*/
static void waveform_ai_timer(struct timer_list *t)
{
- struct waveform_private *devpriv = from_timer(devpriv, t, ai_timer);
+ struct waveform_private *devpriv = timer_container_of(devpriv, t,
+ ai_timer);
struct comedi_device *dev = devpriv->dev;
struct comedi_subdevice *s = dev->read_subdev;
struct comedi_async *async = s->async;
@@ -418,9 +419,9 @@ static int waveform_ai_cancel(struct comedi_device *dev,
spin_unlock_bh(&dev->spinlock);
if (in_softirq()) {
/* Assume we were called from the timer routine itself. */
- del_timer(&devpriv->ai_timer);
+ timer_delete(&devpriv->ai_timer);
} else {
- del_timer_sync(&devpriv->ai_timer);
+ timer_delete_sync(&devpriv->ai_timer);
}
return 0;
}
@@ -444,7 +445,8 @@ static int waveform_ai_insn_read(struct comedi_device *dev,
*/
static void waveform_ao_timer(struct timer_list *t)
{
- struct waveform_private *devpriv = from_timer(devpriv, t, ao_timer);
+ struct waveform_private *devpriv = timer_container_of(devpriv, t,
+ ao_timer);
struct comedi_device *dev = devpriv->dev;
struct comedi_subdevice *s = dev->write_subdev;
struct comedi_async *async = s->async;
@@ -628,9 +630,9 @@ static int waveform_ao_cancel(struct comedi_device *dev,
spin_unlock_bh(&dev->spinlock);
if (in_softirq()) {
/* Assume we were called from the timer routine itself. */
- del_timer(&devpriv->ao_timer);
+ timer_delete(&devpriv->ao_timer);
} else {
- del_timer_sync(&devpriv->ao_timer);
+ timer_delete_sync(&devpriv->ao_timer);
}
return 0;
}
@@ -790,9 +792,9 @@ static void waveform_detach(struct comedi_device *dev)
{
struct waveform_private *devpriv = dev->private;
- if (devpriv) {
- del_timer_sync(&devpriv->ai_timer);
- del_timer_sync(&devpriv->ao_timer);
+ if (devpriv && dev->n_subdevices) {
+ timer_delete_sync(&devpriv->ai_timer);
+ timer_delete_sync(&devpriv->ao_timer);
}
}
diff --git a/drivers/comedi/drivers/das16.c b/drivers/comedi/drivers/das16.c
index 4ed56a02150e..1f85572c21b4 100644
--- a/drivers/comedi/drivers/das16.c
+++ b/drivers/comedi/drivers/das16.c
@@ -517,7 +517,8 @@ static void das16_interrupt(struct comedi_device *dev)
static void das16_timer_interrupt(struct timer_list *t)
{
- struct das16_private_struct *devpriv = from_timer(devpriv, t, timer);
+ struct das16_private_struct *devpriv = timer_container_of(devpriv, t,
+ timer);
struct comedi_device *dev = devpriv->dev;
unsigned long flags;
@@ -775,7 +776,7 @@ static int das16_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
/* disable SW timer */
if (devpriv->timer_running) {
devpriv->timer_running = 0;
- del_timer(&devpriv->timer);
+ timer_delete(&devpriv->timer);
}
if (devpriv->can_burst)
@@ -940,7 +941,7 @@ static void das16_free_dma(struct comedi_device *dev)
struct das16_private_struct *devpriv = dev->private;
if (devpriv) {
- del_timer_sync(&devpriv->timer);
+ timer_delete_sync(&devpriv->timer);
comedi_isadma_free(devpriv->dma);
}
}
diff --git a/drivers/comedi/drivers/das16m1.c b/drivers/comedi/drivers/das16m1.c
index b8ea737ad3d1..1b638f5b5a4f 100644
--- a/drivers/comedi/drivers/das16m1.c
+++ b/drivers/comedi/drivers/das16m1.c
@@ -522,7 +522,8 @@ static int das16m1_attach(struct comedi_device *dev,
devpriv->extra_iobase = dev->iobase + DAS16M1_8255_IOBASE;
/* only irqs 2, 3, 4, 5, 6, 7, 10, 11, 12, 14, and 15 are valid */
- if ((1 << it->options[1]) & 0xdcfc) {
+ if (it->options[1] >= 2 && it->options[1] <= 15 &&
+ (1 << it->options[1]) & 0xdcfc) {
ret = request_irq(it->options[1], das16m1_interrupt, 0,
dev->board_name, dev);
if (ret == 0)
diff --git a/drivers/comedi/drivers/das6402.c b/drivers/comedi/drivers/das6402.c
index 68f95330de45..7660487e563c 100644
--- a/drivers/comedi/drivers/das6402.c
+++ b/drivers/comedi/drivers/das6402.c
@@ -567,7 +567,8 @@ static int das6402_attach(struct comedi_device *dev,
das6402_reset(dev);
/* IRQs 2,3,5,6,7, 10,11,15 are valid for "enhanced" mode */
- if ((1 << it->options[1]) & 0x8cec) {
+ if (it->options[1] > 0 && it->options[1] < 16 &&
+ (1 << it->options[1]) & 0x8cec) {
ret = request_irq(it->options[1], das6402_interrupt, 0,
dev->board_name, dev);
if (ret == 0) {
diff --git a/drivers/comedi/drivers/jr3_pci.c b/drivers/comedi/drivers/jr3_pci.c
index 951c23fa0369..61792d940a3d 100644
--- a/drivers/comedi/drivers/jr3_pci.c
+++ b/drivers/comedi/drivers/jr3_pci.c
@@ -562,7 +562,8 @@ jr3_pci_poll_subdevice(struct comedi_subdevice *s)
static void jr3_pci_poll_dev(struct timer_list *t)
{
- struct jr3_pci_dev_private *devpriv = from_timer(devpriv, t, timer);
+ struct jr3_pci_dev_private *devpriv = timer_container_of(devpriv, t,
+ timer);
struct comedi_device *dev = devpriv->dev;
struct jr3_pci_subdev_private *spriv;
struct comedi_subdevice *s;
@@ -758,7 +759,7 @@ static void jr3_pci_detach(struct comedi_device *dev)
struct jr3_pci_dev_private *devpriv = dev->private;
if (devpriv)
- del_timer_sync(&devpriv->timer);
+ timer_shutdown_sync(&devpriv->timer);
comedi_pci_detach(dev);
}
diff --git a/drivers/comedi/drivers/ni_atmio.c b/drivers/comedi/drivers/ni_atmio.c
index 330ae1c58800..b4e759e5703f 100644
--- a/drivers/comedi/drivers/ni_atmio.c
+++ b/drivers/comedi/drivers/ni_atmio.c
@@ -215,7 +215,7 @@ static const int ni_irqpin[] = {
#include "ni_mio_common.c"
-static const struct pnp_device_id device_ids[] = {
+static const struct pnp_device_id __maybe_unused device_ids[] = {
{.id = "NIC1900", .driver_data = 0},
{.id = "NIC2400", .driver_data = 0},
{.id = "NIC2500", .driver_data = 0},
diff --git a/drivers/comedi/drivers/ni_pcidio.c b/drivers/comedi/drivers/ni_pcidio.c
index 2d58e83420e8..2c7bb9c1ea5b 100644
--- a/drivers/comedi/drivers/ni_pcidio.c
+++ b/drivers/comedi/drivers/ni_pcidio.c
@@ -747,8 +747,6 @@ static int ni_pcidio_change(struct comedi_device *dev,
if (ret < 0)
return ret;
- memset(s->async->prealloc_buf, 0xaa, s->async->prealloc_bufsz);
-
return 0;
}
diff --git a/drivers/comedi/drivers/pcl726.c b/drivers/comedi/drivers/pcl726.c
index 0430630e6ebb..b542896fa0e4 100644
--- a/drivers/comedi/drivers/pcl726.c
+++ b/drivers/comedi/drivers/pcl726.c
@@ -328,7 +328,8 @@ static int pcl726_attach(struct comedi_device *dev,
* Hook up the external trigger source interrupt only if the
* user config option is valid and the board supports interrupts.
*/
- if (it->options[1] && (board->irq_mask & (1 << it->options[1]))) {
+ if (it->options[1] > 0 && it->options[1] < 16 &&
+ (board->irq_mask & (1U << it->options[1]))) {
ret = request_irq(it->options[1], pcl726_interrupt, 0,
dev->board_name, dev);
if (ret == 0) {
diff --git a/drivers/comedi/drivers/pcl812.c b/drivers/comedi/drivers/pcl812.c
index 0df639c6a595..abca61a72cf7 100644
--- a/drivers/comedi/drivers/pcl812.c
+++ b/drivers/comedi/drivers/pcl812.c
@@ -1149,7 +1149,8 @@ static int pcl812_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (IS_ERR(dev->pacer))
return PTR_ERR(dev->pacer);
- if ((1 << it->options[1]) & board->irq_bits) {
+ if (it->options[1] > 0 && it->options[1] < 16 &&
+ (1 << it->options[1]) & board->irq_bits) {
ret = request_irq(it->options[1], pcl812_interrupt, 0,
dev->board_name, dev);
if (ret == 0)
diff --git a/drivers/counter/104-quad-8.c b/drivers/counter/104-quad-8.c
index 4a6868b8f58b..ce81fc4e1ae7 100644
--- a/drivers/counter/104-quad-8.c
+++ b/drivers/counter/104-quad-8.c
@@ -1360,4 +1360,4 @@ module_isa_driver_with_irq(quad8_driver, num_quad8, num_irq);
MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>");
MODULE_DESCRIPTION("ACCES 104-QUAD-8 driver");
MODULE_LICENSE("GPL v2");
-MODULE_IMPORT_NS(COUNTER);
+MODULE_IMPORT_NS("COUNTER");
diff --git a/drivers/counter/counter-chrdev.c b/drivers/counter/counter-chrdev.c
index 3ee75e1a78cd..23fdf0caf712 100644
--- a/drivers/counter/counter-chrdev.c
+++ b/drivers/counter/counter-chrdev.c
@@ -672,4 +672,4 @@ exit_early:
if (copied)
wake_up_poll(&counter->events_wait, EPOLLIN);
}
-EXPORT_SYMBOL_NS_GPL(counter_push_event, COUNTER);
+EXPORT_SYMBOL_NS_GPL(counter_push_event, "COUNTER");
diff --git a/drivers/counter/counter-core.c b/drivers/counter/counter-core.c
index 893b4f0726d2..50bd30ba3d03 100644
--- a/drivers/counter/counter-core.c
+++ b/drivers/counter/counter-core.c
@@ -74,7 +74,7 @@ void *counter_priv(const struct counter_device *const counter)
return &ch->privdata;
}
-EXPORT_SYMBOL_NS_GPL(counter_priv, COUNTER);
+EXPORT_SYMBOL_NS_GPL(counter_priv, "COUNTER");
/**
* counter_alloc - allocate a counter_device
@@ -134,13 +134,13 @@ err_ida_alloc:
return NULL;
}
-EXPORT_SYMBOL_NS_GPL(counter_alloc, COUNTER);
+EXPORT_SYMBOL_NS_GPL(counter_alloc, "COUNTER");
void counter_put(struct counter_device *counter)
{
put_device(&counter->dev);
}
-EXPORT_SYMBOL_NS_GPL(counter_put, COUNTER);
+EXPORT_SYMBOL_NS_GPL(counter_put, "COUNTER");
/**
* counter_add - complete registration of a counter
@@ -167,7 +167,7 @@ int counter_add(struct counter_device *counter)
/* implies device_add(dev) */
return cdev_device_add(&counter->chrdev, dev);
}
-EXPORT_SYMBOL_NS_GPL(counter_add, COUNTER);
+EXPORT_SYMBOL_NS_GPL(counter_add, "COUNTER");
/**
* counter_unregister - unregister Counter from the system
@@ -189,7 +189,7 @@ void counter_unregister(struct counter_device *const counter)
mutex_unlock(&counter->ops_exist_lock);
}
-EXPORT_SYMBOL_NS_GPL(counter_unregister, COUNTER);
+EXPORT_SYMBOL_NS_GPL(counter_unregister, "COUNTER");
static void devm_counter_release(void *counter)
{
@@ -224,7 +224,7 @@ struct counter_device *devm_counter_alloc(struct device *dev, size_t sizeof_priv
return counter;
}
-EXPORT_SYMBOL_NS_GPL(devm_counter_alloc, COUNTER);
+EXPORT_SYMBOL_NS_GPL(devm_counter_alloc, "COUNTER");
/**
* devm_counter_add - complete registration of a counter
@@ -245,7 +245,7 @@ int devm_counter_add(struct device *dev,
return devm_add_action_or_reset(dev, devm_counter_release, counter);
}
-EXPORT_SYMBOL_NS_GPL(devm_counter_add, COUNTER);
+EXPORT_SYMBOL_NS_GPL(devm_counter_add, "COUNTER");
#define COUNTER_DEV_MAX 256
diff --git a/drivers/counter/ftm-quaddec.c b/drivers/counter/ftm-quaddec.c
index 200876f3ec04..c47741292ae1 100644
--- a/drivers/counter/ftm-quaddec.c
+++ b/drivers/counter/ftm-quaddec.c
@@ -311,6 +311,7 @@ static const struct of_device_id ftm_quaddec_match[] = {
{ .compatible = "fsl,ftm-quaddec" },
{},
};
+MODULE_DEVICE_TABLE(of, ftm_quaddec_match);
static struct platform_driver ftm_quaddec_driver = {
.driver = {
@@ -326,4 +327,4 @@ MODULE_DESCRIPTION("Flex Timer Module Quadrature decoder");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kjeld Flarup <kfa@deif.com>");
MODULE_AUTHOR("Patrick Havelange <patrick.havelange@essensium.com>");
-MODULE_IMPORT_NS(COUNTER);
+MODULE_IMPORT_NS("COUNTER");
diff --git a/drivers/counter/i8254.c b/drivers/counter/i8254.c
index 6d74e8ef92f0..95ad928725ec 100644
--- a/drivers/counter/i8254.c
+++ b/drivers/counter/i8254.c
@@ -439,9 +439,9 @@ int devm_i8254_regmap_register(struct device *const dev,
return 0;
}
-EXPORT_SYMBOL_NS_GPL(devm_i8254_regmap_register, I8254);
+EXPORT_SYMBOL_NS_GPL(devm_i8254_regmap_register, "I8254");
MODULE_AUTHOR("William Breathitt Gray");
MODULE_DESCRIPTION("Intel 8254 Programmable Interval Timer");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(COUNTER);
+MODULE_IMPORT_NS("COUNTER");
diff --git a/drivers/counter/intel-qep.c b/drivers/counter/intel-qep.c
index af5942e66f7d..c49c178056f4 100644
--- a/drivers/counter/intel-qep.c
+++ b/drivers/counter/intel-qep.c
@@ -408,13 +408,9 @@ static int intel_qep_probe(struct pci_dev *pci, const struct pci_device_id *id)
pci_set_master(pci);
- ret = pcim_iomap_regions(pci, BIT(0), pci_name(pci));
- if (ret)
- return ret;
-
- regs = pcim_iomap_table(pci)[0];
- if (!regs)
- return -ENOMEM;
+ regs = pcim_iomap_region(pci, 0, pci_name(pci));
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
qep->dev = dev;
qep->regs = regs;
@@ -523,4 +519,4 @@ MODULE_AUTHOR("Jarkko Nikula <jarkko.nikula@linux.intel.com>");
MODULE_AUTHOR("Raymond Tan <raymond.tan@intel.com>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Intel Quadrature Encoder Peripheral driver");
-MODULE_IMPORT_NS(COUNTER);
+MODULE_IMPORT_NS("COUNTER");
diff --git a/drivers/counter/interrupt-cnt.c b/drivers/counter/interrupt-cnt.c
index 229473855c5b..6c0c1d2d7027 100644
--- a/drivers/counter/interrupt-cnt.c
+++ b/drivers/counter/interrupt-cnt.c
@@ -3,22 +3,25 @@
* Copyright (c) 2021 Pengutronix, Oleksij Rempel <kernel@pengutronix.de>
*/
+#include <linux/cleanup.h>
#include <linux/counter.h>
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/types.h>
#define INTERRUPT_CNT_NAME "interrupt-cnt"
struct interrupt_cnt_priv {
- atomic_t count;
+ atomic_long_t count;
struct gpio_desc *gpio;
int irq;
bool enabled;
+ struct mutex lock;
struct counter_signal signals;
struct counter_synapse synapses;
struct counter_count cnts;
@@ -29,7 +32,7 @@ static irqreturn_t interrupt_cnt_isr(int irq, void *dev_id)
struct counter_device *counter = dev_id;
struct interrupt_cnt_priv *priv = counter_priv(counter);
- atomic_inc(&priv->count);
+ atomic_long_inc(&priv->count);
counter_push_event(counter, COUNTER_EVENT_CHANGE_OF_STATE, 0);
@@ -41,6 +44,8 @@ static int interrupt_cnt_enable_read(struct counter_device *counter,
{
struct interrupt_cnt_priv *priv = counter_priv(counter);
+ guard(mutex)(&priv->lock);
+
*enable = priv->enabled;
return 0;
@@ -51,6 +56,8 @@ static int interrupt_cnt_enable_write(struct counter_device *counter,
{
struct interrupt_cnt_priv *priv = counter_priv(counter);
+ guard(mutex)(&priv->lock);
+
if (priv->enabled == enable)
return 0;
@@ -89,7 +96,7 @@ static int interrupt_cnt_read(struct counter_device *counter,
{
struct interrupt_cnt_priv *priv = counter_priv(counter);
- *val = atomic_read(&priv->count);
+ *val = atomic_long_read(&priv->count);
return 0;
}
@@ -102,7 +109,7 @@ static int interrupt_cnt_write(struct counter_device *counter,
if (val != (typeof(priv->count.counter))val)
return -ERANGE;
- atomic_set(&priv->count, val);
+ atomic_long_set(&priv->count, val);
return 0;
}
@@ -227,6 +234,8 @@ static int interrupt_cnt_probe(struct platform_device *pdev)
if (ret)
return ret;
+ mutex_init(&priv->lock);
+
ret = devm_counter_add(dev, counter);
if (ret < 0)
return dev_err_probe(dev, ret, "Failed to add counter\n");
@@ -253,4 +262,4 @@ MODULE_ALIAS("platform:interrupt-counter");
MODULE_AUTHOR("Oleksij Rempel <o.rempel@pengutronix.de>");
MODULE_DESCRIPTION("Interrupt counter driver");
MODULE_LICENSE("GPL v2");
-MODULE_IMPORT_NS(COUNTER);
+MODULE_IMPORT_NS("COUNTER");
diff --git a/drivers/counter/microchip-tcb-capture.c b/drivers/counter/microchip-tcb-capture.c
index b3e615cbd2ca..1a299d1f350b 100644
--- a/drivers/counter/microchip-tcb-capture.c
+++ b/drivers/counter/microchip-tcb-capture.c
@@ -6,18 +6,24 @@
*/
#include <linux/clk.h>
#include <linux/counter.h>
+#include <linux/interrupt.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
+#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
+#include <uapi/linux/counter/microchip-tcb-capture.h>
#include <soc/at91/atmel_tcb.h>
#define ATMEL_TC_CMR_MASK (ATMEL_TC_LDRA_RISING | ATMEL_TC_LDRB_FALLING | \
ATMEL_TC_ETRGEDG_RISING | ATMEL_TC_LDBDIS | \
ATMEL_TC_LDBSTOP)
+#define ATMEL_TC_DEF_IRQS (ATMEL_TC_ETRGS | ATMEL_TC_COVFS | \
+ ATMEL_TC_LDRAS | ATMEL_TC_LDRBS | ATMEL_TC_CPCS)
+
#define ATMEL_TC_QDEN BIT(8)
#define ATMEL_TC_POSEN BIT(9)
@@ -247,6 +253,112 @@ static int mchp_tc_count_read(struct counter_device *counter,
return 0;
}
+static int mchp_tc_count_cap_read(struct counter_device *counter,
+ struct counter_count *count, size_t idx, u64 *val)
+{
+ struct mchp_tc_data *const priv = counter_priv(counter);
+ u32 cnt;
+ int ret;
+
+ switch (idx) {
+ case COUNTER_MCHP_EXCAP_RA:
+ ret = regmap_read(priv->regmap, ATMEL_TC_REG(priv->channel[0], RA), &cnt);
+ break;
+ case COUNTER_MCHP_EXCAP_RB:
+ ret = regmap_read(priv->regmap, ATMEL_TC_REG(priv->channel[0], RB), &cnt);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (ret < 0)
+ return ret;
+
+ *val = cnt;
+
+ return 0;
+}
+
+static int mchp_tc_count_cap_write(struct counter_device *counter,
+ struct counter_count *count, size_t idx, u64 val)
+{
+ struct mchp_tc_data *const priv = counter_priv(counter);
+ int ret;
+
+ if (val > U32_MAX)
+ return -ERANGE;
+
+ switch (idx) {
+ case COUNTER_MCHP_EXCAP_RA:
+ ret = regmap_write(priv->regmap, ATMEL_TC_REG(priv->channel[0], RA), val);
+ break;
+ case COUNTER_MCHP_EXCAP_RB:
+ ret = regmap_write(priv->regmap, ATMEL_TC_REG(priv->channel[0], RB), val);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int mchp_tc_count_compare_read(struct counter_device *counter, struct counter_count *count,
+ u64 *val)
+{
+ struct mchp_tc_data *const priv = counter_priv(counter);
+ u32 cnt;
+ int ret;
+
+ ret = regmap_read(priv->regmap, ATMEL_TC_REG(priv->channel[0], RC), &cnt);
+ if (ret < 0)
+ return ret;
+
+ *val = cnt;
+
+ return 0;
+}
+
+static int mchp_tc_count_compare_write(struct counter_device *counter, struct counter_count *count,
+ u64 val)
+{
+ struct mchp_tc_data *const priv = counter_priv(counter);
+
+ if (val > U32_MAX)
+ return -ERANGE;
+
+ return regmap_write(priv->regmap, ATMEL_TC_REG(priv->channel[0], RC), val);
+}
+
+static DEFINE_COUNTER_ARRAY_CAPTURE(mchp_tc_cnt_cap_array, 2);
+
+static struct counter_comp mchp_tc_count_ext[] = {
+ COUNTER_COMP_ARRAY_CAPTURE(mchp_tc_count_cap_read, mchp_tc_count_cap_write,
+ mchp_tc_cnt_cap_array),
+ COUNTER_COMP_COMPARE(mchp_tc_count_compare_read, mchp_tc_count_compare_write),
+};
+
+static int mchp_tc_watch_validate(struct counter_device *counter,
+ const struct counter_watch *watch)
+{
+ if (watch->channel == COUNTER_MCHP_EVCHN_CV || watch->channel == COUNTER_MCHP_EVCHN_RA)
+ switch (watch->event) {
+ case COUNTER_EVENT_CHANGE_OF_STATE:
+ case COUNTER_EVENT_OVERFLOW:
+ case COUNTER_EVENT_CAPTURE:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+
+ if (watch->channel == COUNTER_MCHP_EVCHN_RB && watch->event == COUNTER_EVENT_CAPTURE)
+ return 0;
+
+ if (watch->channel == COUNTER_MCHP_EVCHN_RC && watch->event == COUNTER_EVENT_THRESHOLD)
+ return 0;
+
+ return -EINVAL;
+}
+
static struct counter_count mchp_tc_counts[] = {
{
.id = 0,
@@ -255,6 +367,8 @@ static struct counter_count mchp_tc_counts[] = {
.num_functions = ARRAY_SIZE(mchp_tc_count_functions),
.synapses = mchp_tc_count_synapses,
.num_synapses = ARRAY_SIZE(mchp_tc_count_synapses),
+ .ext = mchp_tc_count_ext,
+ .num_ext = ARRAY_SIZE(mchp_tc_count_ext),
},
};
@@ -264,7 +378,8 @@ static const struct counter_ops mchp_tc_ops = {
.function_read = mchp_tc_count_function_read,
.function_write = mchp_tc_count_function_write,
.action_read = mchp_tc_count_action_read,
- .action_write = mchp_tc_count_action_write
+ .action_write = mchp_tc_count_action_write,
+ .watch_validate = mchp_tc_watch_validate,
};
static const struct atmel_tcb_config tcb_rm9200_config = {
@@ -294,6 +409,65 @@ static const struct of_device_id atmel_tc_of_match[] = {
{ /* sentinel */ }
};
+static irqreturn_t mchp_tc_isr(int irq, void *dev_id)
+{
+ struct counter_device *const counter = dev_id;
+ struct mchp_tc_data *const priv = counter_priv(counter);
+ u32 sr, mask;
+
+ regmap_read(priv->regmap, ATMEL_TC_REG(priv->channel[0], SR), &sr);
+ regmap_read(priv->regmap, ATMEL_TC_REG(priv->channel[0], IMR), &mask);
+
+ sr &= mask;
+ if (!(sr & ATMEL_TC_ALL_IRQ))
+ return IRQ_NONE;
+
+ if (sr & ATMEL_TC_ETRGS)
+ counter_push_event(counter, COUNTER_EVENT_CHANGE_OF_STATE,
+ COUNTER_MCHP_EVCHN_CV);
+ if (sr & ATMEL_TC_LDRAS)
+ counter_push_event(counter, COUNTER_EVENT_CAPTURE,
+ COUNTER_MCHP_EVCHN_RA);
+ if (sr & ATMEL_TC_LDRBS)
+ counter_push_event(counter, COUNTER_EVENT_CAPTURE,
+ COUNTER_MCHP_EVCHN_RB);
+ if (sr & ATMEL_TC_CPCS)
+ counter_push_event(counter, COUNTER_EVENT_THRESHOLD,
+ COUNTER_MCHP_EVCHN_RC);
+ if (sr & ATMEL_TC_COVFS)
+ counter_push_event(counter, COUNTER_EVENT_OVERFLOW,
+ COUNTER_MCHP_EVCHN_CV);
+
+ return IRQ_HANDLED;
+}
+
+static void mchp_tc_irq_remove(void *ptr)
+{
+ struct mchp_tc_data *priv = ptr;
+
+ regmap_write(priv->regmap, ATMEL_TC_REG(priv->channel[0], IDR), ATMEL_TC_DEF_IRQS);
+}
+
+static int mchp_tc_irq_enable(struct counter_device *const counter, int irq)
+{
+ struct mchp_tc_data *const priv = counter_priv(counter);
+ int ret = devm_request_irq(counter->parent, irq, mchp_tc_isr, 0,
+ dev_name(counter->parent), counter);
+
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(priv->regmap, ATMEL_TC_REG(priv->channel[0], IER), ATMEL_TC_DEF_IRQS);
+ if (ret < 0)
+ return ret;
+
+ ret = devm_add_action_or_reset(counter->parent, mchp_tc_irq_remove, priv);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
static void mchp_tc_clk_remove(void *ptr)
{
clk_disable_unprepare((struct clk *)ptr);
@@ -368,6 +542,25 @@ static int mchp_tc_probe(struct platform_device *pdev)
channel);
}
+ /* Disable Quadrature Decoder and position measure */
+ ret = regmap_update_bits(regmap, ATMEL_TC_BMR, ATMEL_TC_QDEN | ATMEL_TC_POSEN, 0);
+ if (ret)
+ return ret;
+
+ /* Setup the period capture mode */
+ ret = regmap_update_bits(regmap, ATMEL_TC_REG(priv->channel[0], CMR),
+ ATMEL_TC_WAVE | ATMEL_TC_ABETRG | ATMEL_TC_CMR_MASK |
+ ATMEL_TC_TCCLKS,
+ ATMEL_TC_CMR_MASK);
+ if (ret)
+ return ret;
+
+ /* Enable clock and trigger counter */
+ ret = regmap_write(regmap, ATMEL_TC_REG(priv->channel[0], CCR),
+ ATMEL_TC_CLKEN | ATMEL_TC_SWTRG);
+ if (ret)
+ return ret;
+
priv->tc_cfg = tcb_config;
priv->regmap = regmap;
counter->name = dev_name(&pdev->dev);
@@ -378,6 +571,15 @@ static int mchp_tc_probe(struct platform_device *pdev)
counter->num_signals = ARRAY_SIZE(mchp_tc_count_signals);
counter->signals = mchp_tc_count_signals;
+ i = of_irq_get(np->parent, 0);
+ if (i == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ if (i > 0) {
+ ret = mchp_tc_irq_enable(counter, i);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "Failed to set up IRQ");
+ }
+
ret = devm_counter_add(&pdev->dev, counter);
if (ret < 0)
return dev_err_probe(&pdev->dev, ret, "Failed to add counter\n");
@@ -403,4 +605,4 @@ module_platform_driver(mchp_tc_driver);
MODULE_AUTHOR("Kamel Bouhara <kamel.bouhara@bootlin.com>");
MODULE_DESCRIPTION("Microchip TCB Capture driver");
MODULE_LICENSE("GPL v2");
-MODULE_IMPORT_NS(COUNTER);
+MODULE_IMPORT_NS("COUNTER");
diff --git a/drivers/counter/rz-mtu3-cnt.c b/drivers/counter/rz-mtu3-cnt.c
index ee821493b166..e755d54dfece 100644
--- a/drivers/counter/rz-mtu3-cnt.c
+++ b/drivers/counter/rz-mtu3-cnt.c
@@ -903,4 +903,4 @@ MODULE_AUTHOR("Biju Das <biju.das.jz@bp.renesas.com>");
MODULE_ALIAS("platform:rz-mtu3-counter");
MODULE_DESCRIPTION("Renesas RZ/G2L MTU3a counter driver");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(COUNTER);
+MODULE_IMPORT_NS("COUNTER");
diff --git a/drivers/counter/stm32-lptimer-cnt.c b/drivers/counter/stm32-lptimer-cnt.c
index 8439755559b2..b249c8647639 100644
--- a/drivers/counter/stm32-lptimer-cnt.c
+++ b/drivers/counter/stm32-lptimer-cnt.c
@@ -58,37 +58,43 @@ static int stm32_lptim_set_enable_state(struct stm32_lptim_cnt *priv,
return 0;
}
+ ret = clk_enable(priv->clk);
+ if (ret)
+ goto disable_cnt;
+
/* LP timer must be enabled before writing CMP & ARR */
ret = regmap_write(priv->regmap, STM32_LPTIM_ARR, priv->ceiling);
if (ret)
- return ret;
+ goto disable_clk;
ret = regmap_write(priv->regmap, STM32_LPTIM_CMP, 0);
if (ret)
- return ret;
+ goto disable_clk;
/* ensure CMP & ARR registers are properly written */
ret = regmap_read_poll_timeout(priv->regmap, STM32_LPTIM_ISR, val,
(val & STM32_LPTIM_CMPOK_ARROK) == STM32_LPTIM_CMPOK_ARROK,
100, 1000);
if (ret)
- return ret;
+ goto disable_clk;
ret = regmap_write(priv->regmap, STM32_LPTIM_ICR,
STM32_LPTIM_CMPOKCF_ARROKCF);
if (ret)
- return ret;
+ goto disable_clk;
- ret = clk_enable(priv->clk);
- if (ret) {
- regmap_write(priv->regmap, STM32_LPTIM_CR, 0);
- return ret;
- }
priv->enabled = true;
/* Start LP timer in continuous mode */
return regmap_update_bits(priv->regmap, STM32_LPTIM_CR,
STM32_LPTIM_CNTSTRT, STM32_LPTIM_CNTSTRT);
+
+disable_clk:
+ clk_disable(priv->clk);
+disable_cnt:
+ regmap_write(priv->regmap, STM32_LPTIM_CR, 0);
+
+ return ret;
}
static int stm32_lptim_setup(struct stm32_lptim_cnt *priv, int enable)
@@ -520,4 +526,4 @@ MODULE_AUTHOR("Fabrice Gasnier <fabrice.gasnier@st.com>");
MODULE_ALIAS("platform:stm32-lptimer-counter");
MODULE_DESCRIPTION("STMicroelectronics STM32 LPTIM counter driver");
MODULE_LICENSE("GPL v2");
-MODULE_IMPORT_NS(COUNTER);
+MODULE_IMPORT_NS("COUNTER");
diff --git a/drivers/counter/stm32-timer-cnt.c b/drivers/counter/stm32-timer-cnt.c
index 186e73d6ccb4..3d3384cbea87 100644
--- a/drivers/counter/stm32-timer-cnt.c
+++ b/drivers/counter/stm32-timer-cnt.c
@@ -214,11 +214,17 @@ static int stm32_count_enable_write(struct counter_device *counter,
{
struct stm32_timer_cnt *const priv = counter_priv(counter);
u32 cr1;
+ int ret;
if (enable) {
regmap_read(priv->regmap, TIM_CR1, &cr1);
- if (!(cr1 & TIM_CR1_CEN))
- clk_enable(priv->clk);
+ if (!(cr1 & TIM_CR1_CEN)) {
+ ret = clk_enable(priv->clk);
+ if (ret) {
+ dev_err(counter->parent, "Cannot enable clock %d\n", ret);
+ return ret;
+ }
+ }
regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN,
TIM_CR1_CEN);
@@ -663,12 +669,14 @@ static void stm32_timer_cnt_detect_channels(struct device *dev,
dev_dbg(dev, "has %d cc channels\n", priv->nchannels);
}
-/* encoder supported on TIM1 TIM2 TIM3 TIM4 TIM5 TIM8 */
-#define STM32_TIM_ENCODER_SUPPORTED (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(7))
+/* encoder supported on TIM1 TIM2 TIM3 TIM4 TIM5 TIM8 TIM20 */
+#define STM32_TIM_ENCODER_SUPPORTED (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(7) | \
+ BIT(19))
static const char * const stm32_timer_trigger_compat[] = {
"st,stm32-timer-trigger",
"st,stm32h7-timer-trigger",
+ "st,stm32mp25-timer-trigger",
};
static int stm32_timer_cnt_probe_encoder(struct device *dev,
@@ -694,6 +702,7 @@ static int stm32_timer_cnt_probe_encoder(struct device *dev,
}
ret = of_property_read_u32(tnode, "reg", &idx);
+ of_node_put(tnode);
if (ret) {
dev_err(dev, "Can't get index (%d)\n", ret);
return ret;
@@ -816,7 +825,11 @@ static int __maybe_unused stm32_timer_cnt_resume(struct device *dev)
return ret;
if (priv->enabled) {
- clk_enable(priv->clk);
+ ret = clk_enable(priv->clk);
+ if (ret) {
+ dev_err(dev, "Cannot enable clock %d\n", ret);
+ return ret;
+ }
/* Restore registers that may have been lost */
regmap_write(priv->regmap, TIM_SMCR, priv->bak.smcr);
@@ -835,6 +848,7 @@ static SIMPLE_DEV_PM_OPS(stm32_timer_cnt_pm_ops, stm32_timer_cnt_suspend,
static const struct of_device_id stm32_timer_cnt_of_match[] = {
{ .compatible = "st,stm32-timer-counter", },
+ { .compatible = "st,stm32mp25-timer-counter", },
{},
};
MODULE_DEVICE_TABLE(of, stm32_timer_cnt_of_match);
@@ -853,4 +867,4 @@ MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
MODULE_ALIAS("platform:stm32-timer-counter");
MODULE_DESCRIPTION("STMicroelectronics STM32 TIMER counter driver");
MODULE_LICENSE("GPL v2");
-MODULE_IMPORT_NS(COUNTER);
+MODULE_IMPORT_NS("COUNTER");
diff --git a/drivers/counter/ti-ecap-capture.c b/drivers/counter/ti-ecap-capture.c
index 675447315caf..3586a7ab9887 100644
--- a/drivers/counter/ti-ecap-capture.c
+++ b/drivers/counter/ti-ecap-capture.c
@@ -465,11 +465,6 @@ static irqreturn_t ecap_cnt_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void ecap_cnt_pm_disable(void *dev)
-{
- pm_runtime_disable(dev);
-}
-
static int ecap_cnt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -523,12 +518,9 @@ static int ecap_cnt_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, counter_dev);
- pm_runtime_enable(dev);
-
- /* Register a cleanup callback to care for disabling PM */
- ret = devm_add_action_or_reset(dev, ecap_cnt_pm_disable, dev);
+ ret = devm_pm_runtime_enable(dev);
if (ret)
- return dev_err_probe(dev, ret, "failed to add pm disable action\n");
+ return ret;
ret = devm_counter_add(dev, counter_dev);
if (ret)
@@ -574,8 +566,13 @@ static int ecap_cnt_resume(struct device *dev)
{
struct counter_device *counter_dev = dev_get_drvdata(dev);
struct ecap_cnt_dev *ecap_dev = counter_priv(counter_dev);
+ int ret;
- clk_enable(ecap_dev->clk);
+ ret = clk_enable(ecap_dev->clk);
+ if (ret) {
+ dev_err(dev, "Cannot enable clock %d\n", ret);
+ return ret;
+ }
ecap_cnt_capture_set_evmode(counter_dev, ecap_dev->pm_ctx.ev_mode);
@@ -598,7 +595,7 @@ MODULE_DEVICE_TABLE(of, ecap_cnt_of_match);
static struct platform_driver ecap_cnt_driver = {
.probe = ecap_cnt_probe,
- .remove_new = ecap_cnt_remove,
+ .remove = ecap_cnt_remove,
.driver = {
.name = "ecap-capture",
.of_match_table = ecap_cnt_of_match,
@@ -610,4 +607,4 @@ module_platform_driver(ecap_cnt_driver);
MODULE_DESCRIPTION("ECAP Capture driver");
MODULE_AUTHOR("Julien Panis <jpanis@baylibre.com>");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(COUNTER);
+MODULE_IMPORT_NS("COUNTER");
diff --git a/drivers/counter/ti-eqep.c b/drivers/counter/ti-eqep.c
index 313b91456f26..d21c157e531a 100644
--- a/drivers/counter/ti-eqep.c
+++ b/drivers/counter/ti-eqep.c
@@ -107,6 +107,15 @@
#define QCLR_PCE BIT(1)
#define QCLR_INT BIT(0)
+#define QEPSTS_UPEVNT BIT(7)
+#define QEPSTS_FDF BIT(6)
+#define QEPSTS_QDF BIT(5)
+#define QEPSTS_QDLF BIT(4)
+#define QEPSTS_COEF BIT(3)
+#define QEPSTS_CDEF BIT(2)
+#define QEPSTS_FIMF BIT(1)
+#define QEPSTS_PCEF BIT(0)
+
/* EQEP Inputs */
enum {
TI_EQEP_SIGNAL_QEPA, /* QEPA/XCLK */
@@ -286,6 +295,9 @@ static int ti_eqep_events_configure(struct counter_device *counter)
case COUNTER_EVENT_UNDERFLOW:
qeint |= QEINT_PCU;
break;
+ case COUNTER_EVENT_DIRECTION_CHANGE:
+ qeint |= QEINT_QDC;
+ break;
}
}
@@ -298,6 +310,7 @@ static int ti_eqep_watch_validate(struct counter_device *counter,
switch (watch->event) {
case COUNTER_EVENT_OVERFLOW:
case COUNTER_EVENT_UNDERFLOW:
+ case COUNTER_EVENT_DIRECTION_CHANGE:
if (watch->channel != 0)
return -EINVAL;
@@ -368,11 +381,27 @@ static int ti_eqep_position_enable_write(struct counter_device *counter,
return 0;
}
+static int ti_eqep_direction_read(struct counter_device *counter,
+ struct counter_count *count,
+ enum counter_count_direction *direction)
+{
+ struct ti_eqep_cnt *priv = counter_priv(counter);
+ u32 qepsts;
+
+ regmap_read(priv->regmap16, QEPSTS, &qepsts);
+
+ *direction = (qepsts & QEPSTS_QDF) ? COUNTER_COUNT_DIRECTION_FORWARD
+ : COUNTER_COUNT_DIRECTION_BACKWARD;
+
+ return 0;
+}
+
static struct counter_comp ti_eqep_position_ext[] = {
COUNTER_COMP_CEILING(ti_eqep_position_ceiling_read,
ti_eqep_position_ceiling_write),
COUNTER_COMP_ENABLE(ti_eqep_position_enable_read,
ti_eqep_position_enable_write),
+ COUNTER_COMP_DIRECTION(ti_eqep_direction_read),
};
static struct counter_signal ti_eqep_signals[] = {
@@ -439,6 +468,9 @@ static irqreturn_t ti_eqep_irq_handler(int irq, void *dev_id)
if (qflg & QFLG_PCU)
counter_push_event(counter, COUNTER_EVENT_UNDERFLOW, 0);
+ if (qflg & QFLG_QDC)
+ counter_push_event(counter, COUNTER_EVENT_DIRECTION_CHANGE, 0);
+
regmap_write(priv->regmap16, QCLR, qflg);
return IRQ_HANDLED;
@@ -548,7 +580,7 @@ MODULE_DEVICE_TABLE(of, ti_eqep_of_match);
static struct platform_driver ti_eqep_driver = {
.probe = ti_eqep_probe,
- .remove_new = ti_eqep_remove,
+ .remove = ti_eqep_remove,
.driver = {
.name = "ti-eqep-cnt",
.of_match_table = ti_eqep_of_match,
@@ -559,4 +591,4 @@ module_platform_driver(ti_eqep_driver);
MODULE_AUTHOR("David Lechner <david@lechnology.com>");
MODULE_DESCRIPTION("TI eQEP counter driver");
MODULE_LICENSE("GPL v2");
-MODULE_IMPORT_NS(COUNTER);
+MODULE_IMPORT_NS("COUNTER");
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 2561b215432a..78702a08364f 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -217,8 +217,34 @@ config CPUFREQ_DT
If in doubt, say N.
+config CPUFREQ_DT_RUST
+ tristate "Rust based Generic DT based cpufreq driver"
+ depends on HAVE_CLK && OF && RUST
+ select CPUFREQ_DT_PLATDEV
+ select PM_OPP
+ help
+ This adds a Rust based generic DT based cpufreq driver for frequency
+ management. It supports both uniprocessor (UP) and symmetric
+ multiprocessor (SMP) systems.
+
+ If in doubt, say N.
+
+config CPUFREQ_VIRT
+ tristate "Virtual cpufreq driver"
+ depends on GENERIC_ARCH_TOPOLOGY
+ help
+ This adds a virtualized cpufreq driver for guest kernels that
+ read/writes to a MMIO region for a virtualized cpufreq device to
+ communicate with the host. It sends performance requests to the host
+ which gets used as a hint to schedule vCPU threads and select CPU
+ frequency. If a VM does not support a virtualized FIE such as AMUs,
+ it updates the frequency scaling factor by polling host CPU frequency
+ to enable accurate Per-Entity Load Tracking for tasks running in the guest.
+
+ If in doubt, say N.
+
config CPUFREQ_DT_PLATDEV
- tristate "Generic DT based cpufreq platdev driver"
+ bool "Generic DT based cpufreq platdev driver"
depends on OF
help
This adds a generic DT based cpufreq platdev driver for frequency
@@ -311,8 +337,6 @@ config QORIQ_CPUFREQ
This adds the CPUFreq driver support for Freescale QorIQ SoCs
which are capable of changing the CPU's frequency dynamically.
-endif
-
config ACPI_CPPC_CPUFREQ
tristate "CPUFreq driver based on the ACPI CPPC spec"
depends on ACPI_PROCESSOR
@@ -341,4 +365,6 @@ config ACPI_CPPC_CPUFREQ_FIE
If in doubt, say N.
+endif
+
endmenu
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 5f7e13e60c80..9be0503df55a 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -15,11 +15,19 @@ config ARM_ALLWINNER_SUN50I_CPUFREQ_NVMEM
To compile this driver as a module, choose M here: the
module will be called sun50i-cpufreq-nvmem.
+config ARM_AIROHA_SOC_CPUFREQ
+ tristate "Airoha EN7581 SoC CPUFreq support"
+ depends on ARCH_AIROHA || COMPILE_TEST
+ depends on OF
+ select PM_OPP
+ default ARCH_AIROHA
+ help
+ This adds the CPUFreq driver for Airoha EN7581 SoCs.
+
config ARM_APPLE_SOC_CPUFREQ
tristate "Apple Silicon SoC CPUFreq support"
depends on ARCH_APPLE || (COMPILE_TEST && 64BIT)
select PM_OPP
- default ARCH_APPLE
help
This adds the CPUFreq driver for Apple Silicon machines
(e.g. Apple M1).
@@ -67,7 +75,7 @@ config ARM_VEXPRESS_SPC_CPUFREQ
config ARM_BRCMSTB_AVS_CPUFREQ
tristate "Broadcom STB AVS CPUfreq driver"
depends on (ARCH_BRCMSTB && !ARM_SCMI_CPUFREQ) || COMPILE_TEST
- default y
+ default y if ARCH_BRCMSTB && !ARM_SCMI_CPUFREQ
help
Some Broadcom STB SoCs use a co-processor running proprietary firmware
("AVS") to handle voltage and frequency scaling. This driver provides
@@ -79,7 +87,7 @@ config ARM_HIGHBANK_CPUFREQ
tristate "Calxeda Highbank-based"
depends on ARCH_HIGHBANK || COMPILE_TEST
depends on CPUFREQ_DT && REGULATOR && PL320_MBOX
- default m
+ default m if ARCH_HIGHBANK
help
This adds the CPUFreq driver for Calxeda Highbank SoC
based boards.
@@ -124,7 +132,7 @@ config ARM_MEDIATEK_CPUFREQ
config ARM_MEDIATEK_CPUFREQ_HW
tristate "MediaTek CPUFreq HW driver"
depends on ARCH_MEDIATEK || COMPILE_TEST
- default m
+ default m if ARCH_MEDIATEK
help
Support for the CPUFreq HW driver.
Some MediaTek chipsets have a HW engine to offload the steps
@@ -172,7 +180,7 @@ config ARM_RASPBERRYPI_CPUFREQ
config ARM_S3C64XX_CPUFREQ
bool "Samsung S3C64XX"
depends on CPU_S3C6410 || COMPILE_TEST
- default y
+ default CPU_S3C6410
help
This adds the CPUFreq driver for Samsung S3C6410 SoC.
@@ -181,7 +189,7 @@ config ARM_S3C64XX_CPUFREQ
config ARM_S5PV210_CPUFREQ
bool "Samsung S5PV210 and S5PC110"
depends on CPU_S5PV210 || COMPILE_TEST
- default y
+ default CPU_S5PV210
help
This adds the CPUFreq driver for Samsung S5PV210 and
S5PC110 SoCs.
@@ -205,7 +213,7 @@ config ARM_SCMI_CPUFREQ
config ARM_SPEAR_CPUFREQ
bool "SPEAr CPUFreq support"
depends on PLAT_SPEAR || COMPILE_TEST
- default y
+ default PLAT_SPEAR
help
This adds the CPUFreq driver support for SPEAr SOCs.
@@ -224,15 +232,15 @@ config ARM_TEGRA20_CPUFREQ
tristate "Tegra20/30 CPUFreq support"
depends on ARCH_TEGRA || COMPILE_TEST
depends on CPUFREQ_DT
- default y
+ default ARCH_TEGRA
help
This adds the CPUFreq driver support for Tegra20/30 SOCs.
config ARM_TEGRA124_CPUFREQ
- bool "Tegra124 CPUFreq support"
+ tristate "Tegra124 CPUFreq support"
depends on ARCH_TEGRA || COMPILE_TEST
depends on CPUFREQ_DT
- default y
+ default ARCH_TEGRA
help
This adds the CPUFreq driver support for Tegra124 SOCs.
@@ -245,16 +253,16 @@ config ARM_TEGRA186_CPUFREQ
config ARM_TEGRA194_CPUFREQ
tristate "Tegra194 CPUFreq support"
- depends on ARCH_TEGRA_194_SOC || (64BIT && COMPILE_TEST)
+ depends on ARCH_TEGRA_194_SOC || ARCH_TEGRA_234_SOC || (64BIT && COMPILE_TEST)
depends on TEGRA_BPMP
- default y
+ default ARCH_TEGRA_194_SOC || ARCH_TEGRA_234_SOC
help
This adds CPU frequency driver support for Tegra194 SOCs.
config ARM_TI_CPUFREQ
bool "Texas Instruments CPUFreq support"
depends on ARCH_OMAP2PLUS || ARCH_K3 || COMPILE_TEST
- default y
+ default ARCH_OMAP2PLUS || ARCH_K3
help
This driver enables valid OPPs on the running platform based on
values contained within the SoC in use. Enable this in order to
diff --git a/drivers/cpufreq/Kconfig.powerpc b/drivers/cpufreq/Kconfig.powerpc
index 58151ca56695..551e65d35a1d 100644
--- a/drivers/cpufreq/Kconfig.powerpc
+++ b/drivers/cpufreq/Kconfig.powerpc
@@ -1,29 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-config CPU_FREQ_CBE
- tristate "CBE frequency scaling"
- depends on CBE_RAS && PPC_CELL
- default m
- help
- This adds the cpufreq driver for Cell BE processors.
- For details, take a look at <file:Documentation/cpu-freq/>.
- If you don't have such processor, say N
-
-config CPU_FREQ_CBE_PMI
- bool "CBE frequency scaling using PMI interface"
- depends on CPU_FREQ_CBE
- default n
- help
- Select this, if you want to use the PMI interface to switch
- frequencies. Using PMI, the processor will not only be able to run at
- lower speed, but also at lower core voltage.
-
-config CPU_FREQ_MAPLE
- bool "Support for Maple 970FX Evaluation Board"
- depends on PPC_MAPLE
- help
- This adds support for frequency switching on Maple 970FX
- Evaluation Board and compatible boards (IBM JS2x blades).
-
config CPU_FREQ_PMAC
bool "Support for Apple PowerBooks"
depends on ADB_PMU && PPC32
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
index 97c2d4f15d76..2c5c228408bf 100644
--- a/drivers/cpufreq/Kconfig.x86
+++ b/drivers/cpufreq/Kconfig.x86
@@ -340,3 +340,15 @@ config X86_SPEEDSTEP_RELAXED_CAP_CHECK
option lets the probing code bypass some of those checks if the
parameter "relaxed_check=1" is passed to the module.
+config CPUFREQ_ARCH_CUR_FREQ
+ default y
+ bool "Current frequency derived from HW provided feedback"
+ help
+ This determines whether the scaling_cur_freq sysfs attribute returns
+ the last requested frequency or a more precise value based on hardware
+ provided feedback (as architected counters).
+ Given that a more precise frequency can now be provided via the
+ cpuinfo_avg_freq attribute, by enabling this option,
+ scaling_cur_freq maintains the provision of a counter based frequency,
+ for compatibility reasons.
+
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 0f184031dd12..681d687b5a18 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -15,10 +15,13 @@ obj-$(CONFIG_CPU_FREQ_GOV_COMMON) += cpufreq_governor.o
obj-$(CONFIG_CPU_FREQ_GOV_ATTR_SET) += cpufreq_governor_attr_set.o
obj-$(CONFIG_CPUFREQ_DT) += cpufreq-dt.o
+obj-$(CONFIG_CPUFREQ_DT_RUST) += rcpufreq_dt.o
obj-$(CONFIG_CPUFREQ_DT_PLATDEV) += cpufreq-dt-platdev.o
+obj-$(CONFIG_CPUFREQ_VIRT) += virtual-cpufreq.o
# Traces
CFLAGS_amd-pstate-trace.o := -I$(src)
+CFLAGS_powernv-cpufreq.o := -I$(src)
amd_pstate-y := amd-pstate.o amd-pstate-trace.o
##################################################################################
@@ -52,6 +55,7 @@ obj-$(CONFIG_X86_AMD_FREQ_SENSITIVITY) += amd_freq_sensitivity.o
##################################################################################
# ARM SoC drivers
+obj-$(CONFIG_ARM_AIROHA_SOC_CPUFREQ) += airoha-cpufreq.o
obj-$(CONFIG_ARM_APPLE_SOC_CPUFREQ) += apple-soc-cpufreq.o
obj-$(CONFIG_ARM_ARMADA_37XX_CPUFREQ) += armada-37xx-cpufreq.o
obj-$(CONFIG_ARM_ARMADA_8K_CPUFREQ) += armada-8k-cpufreq.o
@@ -89,10 +93,6 @@ obj-$(CONFIG_ARM_VEXPRESS_SPC_CPUFREQ) += vexpress-spc-cpufreq.o
##################################################################################
# PowerPC platform drivers
-obj-$(CONFIG_CPU_FREQ_CBE) += ppc-cbe-cpufreq.o
-ppc-cbe-cpufreq-y += ppc_cbe_cpufreq_pervasive.o ppc_cbe_cpufreq.o
-obj-$(CONFIG_CPU_FREQ_CBE_PMI) += ppc_cbe_cpufreq_pmi.o
-obj-$(CONFIG_CPU_FREQ_MAPLE) += maple-cpufreq.o
obj-$(CONFIG_QORIQ_CPUFREQ) += qoriq-cpufreq.o
obj-$(CONFIG_CPU_FREQ_PMAC) += pmac32-cpufreq.o
obj-$(CONFIG_CPU_FREQ_PMAC64) += pmac64-cpufreq.o
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 0f04feb6cafa..083d8369a591 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -73,20 +73,17 @@ static unsigned int acpi_pstate_strict;
static bool boost_state(unsigned int cpu)
{
- u32 lo, hi;
u64 msr;
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_INTEL:
case X86_VENDOR_CENTAUR:
case X86_VENDOR_ZHAOXIN:
- rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &lo, &hi);
- msr = lo | ((u64)hi << 32);
+ rdmsrq_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &msr);
return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
case X86_VENDOR_HYGON:
case X86_VENDOR_AMD:
- rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
- msr = lo | ((u64)hi << 32);
+ rdmsrq_on_cpu(cpu, MSR_K7_HWCR, &msr);
return !(msr & MSR_K7_HWCR_CPB_DIS);
}
return false;
@@ -113,14 +110,14 @@ static int boost_set_msr(bool enable)
return -EINVAL;
}
- rdmsrl(msr_addr, val);
+ rdmsrq(msr_addr, val);
if (enable)
val &= ~msr_mask;
else
val |= msr_mask;
- wrmsrl(msr_addr, val);
+ wrmsrq(msr_addr, val);
return 0;
}
@@ -321,7 +318,6 @@ static u32 drv_read(struct acpi_cpufreq_data *data, const struct cpumask *mask)
return cmd.val;
}
-/* Called via smp_call_function_many(), on the target CPUs */
static void do_drv_write(void *_cmd)
{
struct drv_cmd *cmd = _cmd;
@@ -338,14 +334,8 @@ static void drv_write(struct acpi_cpufreq_data *data,
.val = val,
.func.write = data->cpu_freq_write,
};
- int this_cpu;
-
- this_cpu = get_cpu();
- if (cpumask_test_cpu(this_cpu, mask))
- do_drv_write(&cmd);
- smp_call_function_many(mask, do_drv_write, &cmd, 1);
- put_cpu();
+ on_each_cpu_mask(mask, do_drv_write, &cmd, true);
}
static u32 get_cur_val(const struct cpumask *mask, struct acpi_cpufreq_data *data)
@@ -626,7 +616,14 @@ static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
#endif
#ifdef CONFIG_ACPI_CPPC_LIB
-static u64 get_max_boost_ratio(unsigned int cpu)
+/*
+ * get_max_boost_ratio: Computes the max_boost_ratio as the ratio
+ * between the highest_perf and the nominal_perf.
+ *
+ * Returns the max_boost_ratio for @cpu. Returns the CPPC nominal
+ * frequency via @nominal_freq if it is non-NULL pointer.
+ */
+static u64 get_max_boost_ratio(unsigned int cpu, u64 *nominal_freq)
{
struct cppc_perf_caps perf_caps;
u64 highest_perf, nominal_perf;
@@ -655,6 +652,9 @@ static u64 get_max_boost_ratio(unsigned int cpu)
nominal_perf = perf_caps.nominal_perf;
+ if (nominal_freq)
+ *nominal_freq = perf_caps.nominal_freq * 1000;
+
if (!highest_perf || !nominal_perf) {
pr_debug("CPU%d: highest or nominal performance missing\n", cpu);
return 0;
@@ -667,8 +667,12 @@ static u64 get_max_boost_ratio(unsigned int cpu)
return div_u64(highest_perf << SCHED_CAPACITY_SHIFT, nominal_perf);
}
+
#else
-static inline u64 get_max_boost_ratio(unsigned int cpu) { return 0; }
+static inline u64 get_max_boost_ratio(unsigned int cpu, u64 *nominal_freq)
+{
+ return 0;
+}
#endif
static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
@@ -678,9 +682,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
struct acpi_cpufreq_data *data;
unsigned int cpu = policy->cpu;
struct cpuinfo_x86 *c = &cpu_data(cpu);
+ u64 max_boost_ratio, nominal_freq = 0;
unsigned int valid_states = 0;
unsigned int result = 0;
- u64 max_boost_ratio;
unsigned int i;
#ifdef CONFIG_SMP
static int blacklisted;
@@ -830,16 +834,20 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
}
freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
- max_boost_ratio = get_max_boost_ratio(cpu);
+ max_boost_ratio = get_max_boost_ratio(cpu, &nominal_freq);
if (max_boost_ratio) {
- unsigned int freq = freq_table[0].frequency;
+ unsigned int freq = nominal_freq;
/*
- * Because the loop above sorts the freq_table entries in the
- * descending order, freq is the maximum frequency in the table.
- * Assume that it corresponds to the CPPC nominal frequency and
- * use it to set cpuinfo.max_freq.
+ * The loop above sorts the freq_table entries in the
+ * descending order. If ACPI CPPC has not advertised
+ * the nominal frequency (this is possible in CPPC
+ * revisions prior to 3), then use the first entry in
+ * the pstate table as a proxy for nominal frequency.
*/
+ if (!freq)
+ freq = freq_table[0].frequency;
+
policy->cpuinfo.max_freq = freq * max_boost_ratio >> SCHED_CAPACITY_SHIFT;
} else {
/*
@@ -895,8 +903,17 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
pr_warn(FW_WARN "P-state 0 is not max freq\n");
if (acpi_cpufreq_driver.set_boost) {
- set_boost(policy, acpi_cpufreq_driver.boost_enabled);
- policy->boost_enabled = acpi_cpufreq_driver.boost_enabled;
+ if (policy->boost_supported) {
+ /*
+ * The firmware may have altered boost state while the
+ * CPU was offline (for example during a suspend-resume
+ * cycle).
+ */
+ if (policy->boost_enabled != boost_state(cpu))
+ set_boost(policy, policy->boost_enabled);
+ } else {
+ policy->boost_supported = true;
+ }
}
return result;
@@ -939,7 +956,6 @@ static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
}
static struct freq_attr *acpi_cpufreq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
&freqdomain_cpus,
#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
&cpb,
@@ -1028,7 +1044,7 @@ static struct platform_driver acpi_cpufreq_platdrv = {
.driver = {
.name = "acpi-cpufreq",
},
- .remove_new = acpi_cpufreq_remove,
+ .remove = acpi_cpufreq_remove,
};
static int __init acpi_cpufreq_init(void)
diff --git a/drivers/cpufreq/airoha-cpufreq.c b/drivers/cpufreq/airoha-cpufreq.c
new file mode 100644
index 000000000000..b6b1cdc4d11d
--- /dev/null
+++ b/drivers/cpufreq/airoha-cpufreq.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bitfield.h>
+#include <linux/cpufreq.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+
+#include "cpufreq-dt.h"
+
+struct airoha_cpufreq_priv {
+ int opp_token;
+ struct dev_pm_domain_list *pd_list;
+ struct platform_device *cpufreq_dt;
+};
+
+static struct platform_device *cpufreq_pdev;
+
+/* NOP function to disable OPP from setting clock */
+static int airoha_cpufreq_config_clks_nop(struct device *dev,
+ struct opp_table *opp_table,
+ struct dev_pm_opp *opp,
+ void *data, bool scaling_down)
+{
+ return 0;
+}
+
+static const char * const airoha_cpufreq_clk_names[] = { "cpu", NULL };
+static const char * const airoha_cpufreq_pd_names[] = { "perf" };
+
+static int airoha_cpufreq_probe(struct platform_device *pdev)
+{
+ const struct dev_pm_domain_attach_data attach_data = {
+ .pd_names = airoha_cpufreq_pd_names,
+ .num_pd_names = ARRAY_SIZE(airoha_cpufreq_pd_names),
+ .pd_flags = PD_FLAG_DEV_LINK_ON | PD_FLAG_REQUIRED_OPP,
+ };
+ struct dev_pm_opp_config config = {
+ .clk_names = airoha_cpufreq_clk_names,
+ .config_clks = airoha_cpufreq_config_clks_nop,
+ };
+ struct platform_device *cpufreq_dt;
+ struct airoha_cpufreq_priv *priv;
+ struct device *dev = &pdev->dev;
+ struct device *cpu_dev;
+ int ret;
+
+ /* CPUs refer to the same OPP table */
+ cpu_dev = get_cpu_device(0);
+ if (!cpu_dev)
+ return -ENODEV;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /* Set OPP table conf with NOP config_clks */
+ priv->opp_token = dev_pm_opp_set_config(cpu_dev, &config);
+ if (priv->opp_token < 0)
+ return dev_err_probe(dev, priv->opp_token, "Failed to set OPP config\n");
+
+ /* Attach PM for OPP */
+ ret = dev_pm_domain_attach_list(cpu_dev, &attach_data,
+ &priv->pd_list);
+ if (ret)
+ goto clear_opp_config;
+
+ cpufreq_dt = platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
+ ret = PTR_ERR_OR_ZERO(cpufreq_dt);
+ if (ret) {
+ dev_err(dev, "failed to create cpufreq-dt device: %d\n", ret);
+ goto detach_pm;
+ }
+
+ priv->cpufreq_dt = cpufreq_dt;
+ platform_set_drvdata(pdev, priv);
+
+ return 0;
+
+detach_pm:
+ dev_pm_domain_detach_list(priv->pd_list);
+clear_opp_config:
+ dev_pm_opp_clear_config(priv->opp_token);
+
+ return ret;
+}
+
+static void airoha_cpufreq_remove(struct platform_device *pdev)
+{
+ struct airoha_cpufreq_priv *priv = platform_get_drvdata(pdev);
+
+ platform_device_unregister(priv->cpufreq_dt);
+
+ dev_pm_domain_detach_list(priv->pd_list);
+
+ dev_pm_opp_clear_config(priv->opp_token);
+}
+
+static struct platform_driver airoha_cpufreq_driver = {
+ .probe = airoha_cpufreq_probe,
+ .remove = airoha_cpufreq_remove,
+ .driver = {
+ .name = "airoha-cpufreq",
+ },
+};
+
+static const struct of_device_id airoha_cpufreq_match_list[] __initconst = {
+ { .compatible = "airoha,an7583" },
+ { .compatible = "airoha,en7581" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, airoha_cpufreq_match_list);
+
+static int __init airoha_cpufreq_init(void)
+{
+ struct device_node *np = of_find_node_by_path("/");
+ const struct of_device_id *match;
+ int ret;
+
+ if (!np)
+ return -ENODEV;
+
+ match = of_match_node(airoha_cpufreq_match_list, np);
+ of_node_put(np);
+ if (!match)
+ return -ENODEV;
+
+ ret = platform_driver_register(&airoha_cpufreq_driver);
+ if (unlikely(ret < 0))
+ return ret;
+
+ cpufreq_pdev = platform_device_register_data(NULL, "airoha-cpufreq",
+ -1, match, sizeof(*match));
+ ret = PTR_ERR_OR_ZERO(cpufreq_pdev);
+ if (ret)
+ platform_driver_unregister(&airoha_cpufreq_driver);
+
+ return ret;
+}
+module_init(airoha_cpufreq_init);
+
+static void __exit airoha_cpufreq_exit(void)
+{
+ platform_device_unregister(cpufreq_pdev);
+ platform_driver_unregister(&airoha_cpufreq_driver);
+}
+module_exit(airoha_cpufreq_exit);
+
+MODULE_AUTHOR("Christian Marangi <ansuelsmth@gmail.com>");
+MODULE_DESCRIPTION("CPUfreq driver for Airoha SoCs");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/amd-pstate-trace.h b/drivers/cpufreq/amd-pstate-trace.h
index 35f38ae67fb1..32e1bdc588c5 100644
--- a/drivers/cpufreq/amd-pstate-trace.h
+++ b/drivers/cpufreq/amd-pstate-trace.h
@@ -24,15 +24,14 @@
TRACE_EVENT(amd_pstate_perf,
- TP_PROTO(unsigned long min_perf,
- unsigned long target_perf,
- unsigned long capacity,
+ TP_PROTO(u8 min_perf,
+ u8 target_perf,
+ u8 capacity,
u64 freq,
u64 mperf,
u64 aperf,
u64 tsc,
unsigned int cpu_id,
- bool changed,
bool fast_switch
),
@@ -44,20 +43,18 @@ TRACE_EVENT(amd_pstate_perf,
aperf,
tsc,
cpu_id,
- changed,
fast_switch
),
TP_STRUCT__entry(
- __field(unsigned long, min_perf)
- __field(unsigned long, target_perf)
- __field(unsigned long, capacity)
+ __field(u8, min_perf)
+ __field(u8, target_perf)
+ __field(u8, capacity)
__field(unsigned long long, freq)
__field(unsigned long long, mperf)
__field(unsigned long long, aperf)
__field(unsigned long long, tsc)
__field(unsigned int, cpu_id)
- __field(bool, changed)
__field(bool, fast_switch)
),
@@ -70,24 +67,72 @@ TRACE_EVENT(amd_pstate_perf,
__entry->aperf = aperf;
__entry->tsc = tsc;
__entry->cpu_id = cpu_id;
- __entry->changed = changed;
__entry->fast_switch = fast_switch;
),
- TP_printk("amd_min_perf=%lu amd_des_perf=%lu amd_max_perf=%lu freq=%llu mperf=%llu aperf=%llu tsc=%llu cpu_id=%u changed=%s fast_switch=%s",
- (unsigned long)__entry->min_perf,
- (unsigned long)__entry->target_perf,
- (unsigned long)__entry->capacity,
+ TP_printk("amd_min_perf=%hhu amd_des_perf=%hhu amd_max_perf=%hhu freq=%llu mperf=%llu aperf=%llu tsc=%llu cpu_id=%u fast_switch=%s",
+ (u8)__entry->min_perf,
+ (u8)__entry->target_perf,
+ (u8)__entry->capacity,
(unsigned long long)__entry->freq,
(unsigned long long)__entry->mperf,
(unsigned long long)__entry->aperf,
(unsigned long long)__entry->tsc,
(unsigned int)__entry->cpu_id,
- (__entry->changed) ? "true" : "false",
(__entry->fast_switch) ? "true" : "false"
)
);
+TRACE_EVENT(amd_pstate_epp_perf,
+
+ TP_PROTO(unsigned int cpu_id,
+ u8 highest_perf,
+ u8 epp,
+ u8 min_perf,
+ u8 max_perf,
+ bool boost,
+ bool changed
+ ),
+
+ TP_ARGS(cpu_id,
+ highest_perf,
+ epp,
+ min_perf,
+ max_perf,
+ boost,
+ changed),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, cpu_id)
+ __field(u8, highest_perf)
+ __field(u8, epp)
+ __field(u8, min_perf)
+ __field(u8, max_perf)
+ __field(bool, boost)
+ __field(bool, changed)
+ ),
+
+ TP_fast_assign(
+ __entry->cpu_id = cpu_id;
+ __entry->highest_perf = highest_perf;
+ __entry->epp = epp;
+ __entry->min_perf = min_perf;
+ __entry->max_perf = max_perf;
+ __entry->boost = boost;
+ __entry->changed = changed;
+ ),
+
+ TP_printk("cpu%u: [%hhu<->%hhu]/%hhu, epp=%hhu, boost=%u, changed=%u",
+ (unsigned int)__entry->cpu_id,
+ (u8)__entry->min_perf,
+ (u8)__entry->max_perf,
+ (u8)__entry->highest_perf,
+ (u8)__entry->epp,
+ (bool)__entry->boost,
+ (bool)__entry->changed
+ )
+);
+
#endif /* _AMD_PSTATE_TRACE_H */
/* This part must be outside protection */
diff --git a/drivers/cpufreq/amd-pstate-ut.c b/drivers/cpufreq/amd-pstate-ut.c
index f66701514d90..447b9aa5ce40 100644
--- a/drivers/cpufreq/amd-pstate-ut.c
+++ b/drivers/cpufreq/amd-pstate-ut.c
@@ -22,39 +22,33 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/bitfield.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/fs.h>
+#include <linux/cleanup.h>
#include <acpi/cppc_acpi.h>
+#include <asm/msr.h>
+
#include "amd-pstate.h"
-/*
- * Abbreviations:
- * amd_pstate_ut: used as a shortform for AMD P-State unit test.
- * It helps to keep variable names smaller, simpler
- */
-enum amd_pstate_ut_result {
- AMD_PSTATE_UT_RESULT_PASS,
- AMD_PSTATE_UT_RESULT_FAIL,
-};
struct amd_pstate_ut_struct {
const char *name;
- void (*func)(u32 index);
- enum amd_pstate_ut_result result;
+ int (*func)(u32 index);
};
/*
* Kernel module for testing the AMD P-State unit test
*/
-static void amd_pstate_ut_acpi_cpc_valid(u32 index);
-static void amd_pstate_ut_check_enabled(u32 index);
-static void amd_pstate_ut_check_perf(u32 index);
-static void amd_pstate_ut_check_freq(u32 index);
-static void amd_pstate_ut_check_driver(u32 index);
+static int amd_pstate_ut_acpi_cpc_valid(u32 index);
+static int amd_pstate_ut_check_enabled(u32 index);
+static int amd_pstate_ut_check_perf(u32 index);
+static int amd_pstate_ut_check_freq(u32 index);
+static int amd_pstate_ut_check_driver(u32 index);
static struct amd_pstate_ut_struct amd_pstate_ut_cases[] = {
{"amd_pstate_ut_acpi_cpc_valid", amd_pstate_ut_acpi_cpc_valid },
@@ -77,71 +71,67 @@ static bool get_shared_mem(void)
/*
* check the _CPC object is present in SBIOS.
*/
-static void amd_pstate_ut_acpi_cpc_valid(u32 index)
+static int amd_pstate_ut_acpi_cpc_valid(u32 index)
{
- if (acpi_cpc_valid())
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
- else {
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+ if (!acpi_cpc_valid()) {
pr_err("%s the _CPC object is not present in SBIOS!\n", __func__);
+ return -EINVAL;
}
+
+ return 0;
}
-static void amd_pstate_ut_pstate_enable(u32 index)
+/*
+ * check if amd pstate is enabled
+ */
+static int amd_pstate_ut_check_enabled(u32 index)
{
- int ret = 0;
u64 cppc_enable = 0;
+ int ret;
- ret = rdmsrl_safe(MSR_AMD_CPPC_ENABLE, &cppc_enable);
+ if (get_shared_mem())
+ return 0;
+
+ ret = rdmsrq_safe(MSR_AMD_CPPC_ENABLE, &cppc_enable);
if (ret) {
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
- pr_err("%s rdmsrl_safe MSR_AMD_CPPC_ENABLE ret=%d error!\n", __func__, ret);
- return;
+ pr_err("%s rdmsrq_safe MSR_AMD_CPPC_ENABLE ret=%d error!\n", __func__, ret);
+ return ret;
}
- if (cppc_enable)
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
- else {
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+
+ if (!cppc_enable) {
pr_err("%s amd pstate must be enabled!\n", __func__);
+ return -EINVAL;
}
-}
-/*
- * check if amd pstate is enabled
- */
-static void amd_pstate_ut_check_enabled(u32 index)
-{
- if (get_shared_mem())
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
- else
- amd_pstate_ut_pstate_enable(index);
+ return 0;
}
/*
* check if performance values are reasonable.
* highest_perf >= nominal_perf > lowest_nonlinear_perf > lowest_perf > 0
*/
-static void amd_pstate_ut_check_perf(u32 index)
+static int amd_pstate_ut_check_perf(u32 index)
{
int cpu = 0, ret = 0;
u32 highest_perf = 0, nominal_perf = 0, lowest_nonlinear_perf = 0, lowest_perf = 0;
u64 cap1 = 0;
struct cppc_perf_caps cppc_perf;
- struct cpufreq_policy *policy = NULL;
- struct amd_cpudata *cpudata = NULL;
+ union perf_cached cur_perf;
+
+ for_each_online_cpu(cpu) {
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = NULL;
+ struct amd_cpudata *cpudata;
- for_each_possible_cpu(cpu) {
policy = cpufreq_cpu_get(cpu);
if (!policy)
- break;
+ continue;
cpudata = policy->driver_data;
if (get_shared_mem()) {
ret = cppc_get_perf_caps(cpu, &cppc_perf);
if (ret) {
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
pr_err("%s cppc_get_perf_caps ret=%d error!\n", __func__, ret);
- goto skip_test;
+ return ret;
}
highest_perf = cppc_perf.highest_perf;
@@ -149,52 +139,46 @@ static void amd_pstate_ut_check_perf(u32 index)
lowest_nonlinear_perf = cppc_perf.lowest_nonlinear_perf;
lowest_perf = cppc_perf.lowest_perf;
} else {
- ret = rdmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &cap1);
+ ret = rdmsrq_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &cap1);
if (ret) {
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
pr_err("%s read CPPC_CAP1 ret=%d error!\n", __func__, ret);
- goto skip_test;
+ return ret;
}
- highest_perf = AMD_CPPC_HIGHEST_PERF(cap1);
- nominal_perf = AMD_CPPC_NOMINAL_PERF(cap1);
- lowest_nonlinear_perf = AMD_CPPC_LOWNONLIN_PERF(cap1);
- lowest_perf = AMD_CPPC_LOWEST_PERF(cap1);
+ highest_perf = FIELD_GET(AMD_CPPC_HIGHEST_PERF_MASK, cap1);
+ nominal_perf = FIELD_GET(AMD_CPPC_NOMINAL_PERF_MASK, cap1);
+ lowest_nonlinear_perf = FIELD_GET(AMD_CPPC_LOWNONLIN_PERF_MASK, cap1);
+ lowest_perf = FIELD_GET(AMD_CPPC_LOWEST_PERF_MASK, cap1);
}
- if (highest_perf != READ_ONCE(cpudata->highest_perf) && !cpudata->hw_prefcore) {
+ cur_perf = READ_ONCE(cpudata->perf);
+ if (highest_perf != cur_perf.highest_perf && !cpudata->hw_prefcore) {
pr_err("%s cpu%d highest=%d %d highest perf doesn't match\n",
- __func__, cpu, highest_perf, cpudata->highest_perf);
- goto skip_test;
+ __func__, cpu, highest_perf, cur_perf.highest_perf);
+ return -EINVAL;
}
- if ((nominal_perf != READ_ONCE(cpudata->nominal_perf)) ||
- (lowest_nonlinear_perf != READ_ONCE(cpudata->lowest_nonlinear_perf)) ||
- (lowest_perf != READ_ONCE(cpudata->lowest_perf))) {
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+ if (nominal_perf != cur_perf.nominal_perf ||
+ (lowest_nonlinear_perf != cur_perf.lowest_nonlinear_perf) ||
+ (lowest_perf != cur_perf.lowest_perf)) {
pr_err("%s cpu%d nominal=%d %d lowest_nonlinear=%d %d lowest=%d %d, they should be equal!\n",
- __func__, cpu, nominal_perf, cpudata->nominal_perf,
- lowest_nonlinear_perf, cpudata->lowest_nonlinear_perf,
- lowest_perf, cpudata->lowest_perf);
- goto skip_test;
+ __func__, cpu, nominal_perf, cur_perf.nominal_perf,
+ lowest_nonlinear_perf, cur_perf.lowest_nonlinear_perf,
+ lowest_perf, cur_perf.lowest_perf);
+ return -EINVAL;
}
if (!((highest_perf >= nominal_perf) &&
(nominal_perf > lowest_nonlinear_perf) &&
- (lowest_nonlinear_perf > lowest_perf) &&
+ (lowest_nonlinear_perf >= lowest_perf) &&
(lowest_perf > 0))) {
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
pr_err("%s cpu%d highest=%d >= nominal=%d > lowest_nonlinear=%d > lowest=%d > 0, the formula is incorrect!\n",
__func__, cpu, highest_perf, nominal_perf,
lowest_nonlinear_perf, lowest_perf);
- goto skip_test;
+ return -EINVAL;
}
- cpufreq_cpu_put(policy);
}
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
- return;
-skip_test:
- cpufreq_cpu_put(policy);
+ return 0;
}
/*
@@ -202,61 +186,50 @@ skip_test:
* max_freq >= nominal_freq > lowest_nonlinear_freq > min_freq > 0
* check max freq when set support boost mode.
*/
-static void amd_pstate_ut_check_freq(u32 index)
+static int amd_pstate_ut_check_freq(u32 index)
{
int cpu = 0;
- struct cpufreq_policy *policy = NULL;
- struct amd_cpudata *cpudata = NULL;
- u32 nominal_freq_khz;
- for_each_possible_cpu(cpu) {
+ for_each_online_cpu(cpu) {
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = NULL;
+ struct amd_cpudata *cpudata;
+
policy = cpufreq_cpu_get(cpu);
if (!policy)
- break;
+ continue;
cpudata = policy->driver_data;
- nominal_freq_khz = cpudata->nominal_freq*1000;
- if (!((cpudata->max_freq >= nominal_freq_khz) &&
- (nominal_freq_khz > cpudata->lowest_nonlinear_freq) &&
- (cpudata->lowest_nonlinear_freq > cpudata->min_freq) &&
- (cpudata->min_freq > 0))) {
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+ if (!((policy->cpuinfo.max_freq >= cpudata->nominal_freq) &&
+ (cpudata->nominal_freq > cpudata->lowest_nonlinear_freq) &&
+ (cpudata->lowest_nonlinear_freq >= policy->cpuinfo.min_freq) &&
+ (policy->cpuinfo.min_freq > 0))) {
pr_err("%s cpu%d max=%d >= nominal=%d > lowest_nonlinear=%d > min=%d > 0, the formula is incorrect!\n",
- __func__, cpu, cpudata->max_freq, nominal_freq_khz,
- cpudata->lowest_nonlinear_freq, cpudata->min_freq);
- goto skip_test;
+ __func__, cpu, policy->cpuinfo.max_freq, cpudata->nominal_freq,
+ cpudata->lowest_nonlinear_freq, policy->cpuinfo.min_freq);
+ return -EINVAL;
}
- if (cpudata->min_freq != policy->min) {
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
- pr_err("%s cpu%d cpudata_min_freq=%d policy_min=%d, they should be equal!\n",
- __func__, cpu, cpudata->min_freq, policy->min);
- goto skip_test;
+ if (cpudata->lowest_nonlinear_freq != policy->min) {
+ pr_err("%s cpu%d cpudata_lowest_nonlinear_freq=%d policy_min=%d, they should be equal!\n",
+ __func__, cpu, cpudata->lowest_nonlinear_freq, policy->min);
+ return -EINVAL;
}
if (cpudata->boost_supported) {
- if ((policy->max == cpudata->max_freq) ||
- (policy->max == nominal_freq_khz))
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
- else {
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+ if ((policy->max != policy->cpuinfo.max_freq) &&
+ (policy->max != cpudata->nominal_freq)) {
pr_err("%s cpu%d policy_max=%d should be equal cpu_max=%d or cpu_nominal=%d !\n",
- __func__, cpu, policy->max, cpudata->max_freq,
- nominal_freq_khz);
- goto skip_test;
+ __func__, cpu, policy->max, policy->cpuinfo.max_freq,
+ cpudata->nominal_freq);
+ return -EINVAL;
}
} else {
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
pr_err("%s cpu%d must support boost!\n", __func__, cpu);
- goto skip_test;
+ return -EINVAL;
}
- cpufreq_cpu_put(policy);
}
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
- return;
-skip_test:
- cpufreq_cpu_put(policy);
+ return 0;
}
static int amd_pstate_set_mode(enum amd_pstate_mode mode)
@@ -268,15 +241,16 @@ static int amd_pstate_set_mode(enum amd_pstate_mode mode)
return amd_pstate_update_status(mode_str, strlen(mode_str));
}
-static void amd_pstate_ut_check_driver(u32 index)
+static int amd_pstate_ut_check_driver(u32 index)
{
enum amd_pstate_mode mode1, mode2 = AMD_PSTATE_DISABLE;
+ enum amd_pstate_mode orig_mode = amd_pstate_get_status();
int ret;
for (mode1 = AMD_PSTATE_DISABLE; mode1 < AMD_PSTATE_MAX; mode1++) {
ret = amd_pstate_set_mode(mode1);
if (ret)
- goto out;
+ return ret;
for (mode2 = AMD_PSTATE_DISABLE; mode2 < AMD_PSTATE_MAX; mode2++) {
if (mode1 == mode2)
continue;
@@ -285,15 +259,15 @@ static void amd_pstate_ut_check_driver(u32 index)
goto out;
}
}
+
out:
if (ret)
pr_warn("%s: failed to update status for %s->%s: %d\n", __func__,
amd_pstate_get_mode_string(mode1),
amd_pstate_get_mode_string(mode2), ret);
- amd_pstate_ut_cases[index].result = ret ?
- AMD_PSTATE_UT_RESULT_FAIL :
- AMD_PSTATE_UT_RESULT_PASS;
+ amd_pstate_set_mode(orig_mode);
+ return ret;
}
static int __init amd_pstate_ut_init(void)
@@ -301,16 +275,12 @@ static int __init amd_pstate_ut_init(void)
u32 i = 0, arr_size = ARRAY_SIZE(amd_pstate_ut_cases);
for (i = 0; i < arr_size; i++) {
- amd_pstate_ut_cases[i].func(i);
- switch (amd_pstate_ut_cases[i].result) {
- case AMD_PSTATE_UT_RESULT_PASS:
+ int ret = amd_pstate_ut_cases[i].func(i);
+
+ if (ret)
+ pr_err("%-4d %-20s\t fail: %d!\n", i+1, amd_pstate_ut_cases[i].name, ret);
+ else
pr_info("%-4d %-20s\t success!\n", i+1, amd_pstate_ut_cases[i].name);
- break;
- case AMD_PSTATE_UT_RESULT_FAIL:
- default:
- pr_info("%-4d %-20s\t fail!\n", i+1, amd_pstate_ut_cases[i].name);
- break;
- }
}
return 0;
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index b63863f77c67..298e92d8cc03 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -22,6 +22,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/bitfield.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -84,7 +85,6 @@ static struct cpufreq_driver *current_pstate_driver;
static struct cpufreq_driver amd_pstate_driver;
static struct cpufreq_driver amd_pstate_epp_driver;
static int cppc_state = AMD_PSTATE_UNDEFINED;
-static bool cppc_enabled;
static bool amd_pstate_prefcore = true;
static struct quirk_entry *quirks;
@@ -136,6 +136,19 @@ static struct quirk_entry quirk_amd_7k62 = {
.lowest_freq = 550,
};
+static inline u8 freq_to_perf(union perf_cached perf, u32 nominal_freq, unsigned int freq_val)
+{
+ u32 perf_val = DIV_ROUND_UP_ULL((u64)freq_val * perf.nominal_perf, nominal_freq);
+
+ return (u8)clamp(perf_val, perf.lowest_perf, perf.highest_perf);
+}
+
+static inline u32 perf_to_freq(union perf_cached perf, u32 nominal_freq, u8 perf_val)
+{
+ return DIV_ROUND_UP_ULL((u64)nominal_freq * perf_val,
+ perf.nominal_perf);
+}
+
static int __init dmi_matched_7k62_bios_bug(const struct dmi_system_id *dmi)
{
/**
@@ -177,235 +190,273 @@ static inline int get_mode_idx_from_str(const char *str, size_t size)
return -EINVAL;
}
-static DEFINE_MUTEX(amd_pstate_limits_lock);
static DEFINE_MUTEX(amd_pstate_driver_lock);
-static s16 amd_pstate_get_epp(struct amd_cpudata *cpudata, u64 cppc_req_cached)
+static u8 msr_get_epp(struct amd_cpudata *cpudata)
{
- u64 epp;
+ u64 value;
int ret;
- if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
- if (!cppc_req_cached) {
- epp = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
- &cppc_req_cached);
- if (epp)
- return epp;
- }
- epp = (cppc_req_cached >> 24) & 0xFF;
- } else {
- ret = cppc_get_epp_perf(cpudata->cpu, &epp);
- if (ret < 0) {
- pr_debug("Could not retrieve energy perf value (%d)\n", ret);
- return -EIO;
- }
+ ret = rdmsrq_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value);
+ if (ret < 0) {
+ pr_debug("Could not retrieve energy perf value (%d)\n", ret);
+ return ret;
}
- return (s16)(epp & 0xff);
+ return FIELD_GET(AMD_CPPC_EPP_PERF_MASK, value);
}
-static int amd_pstate_get_energy_pref_index(struct amd_cpudata *cpudata)
-{
- s16 epp;
- int index = -EINVAL;
-
- epp = amd_pstate_get_epp(cpudata, 0);
- if (epp < 0)
- return epp;
-
- switch (epp) {
- case AMD_CPPC_EPP_PERFORMANCE:
- index = EPP_INDEX_PERFORMANCE;
- break;
- case AMD_CPPC_EPP_BALANCE_PERFORMANCE:
- index = EPP_INDEX_BALANCE_PERFORMANCE;
- break;
- case AMD_CPPC_EPP_BALANCE_POWERSAVE:
- index = EPP_INDEX_BALANCE_POWERSAVE;
- break;
- case AMD_CPPC_EPP_POWERSAVE:
- index = EPP_INDEX_POWERSAVE;
- break;
- default:
- break;
- }
+DEFINE_STATIC_CALL(amd_pstate_get_epp, msr_get_epp);
- return index;
+static inline s16 amd_pstate_get_epp(struct amd_cpudata *cpudata)
+{
+ return static_call(amd_pstate_get_epp)(cpudata);
}
-static void pstate_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
- u32 des_perf, u32 max_perf, bool fast_switch)
+static u8 shmem_get_epp(struct amd_cpudata *cpudata)
{
- if (fast_switch)
- wrmsrl(MSR_AMD_CPPC_REQ, READ_ONCE(cpudata->cppc_req_cached));
- else
- wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
- READ_ONCE(cpudata->cppc_req_cached));
-}
+ u64 epp;
+ int ret;
-DEFINE_STATIC_CALL(amd_pstate_update_perf, pstate_update_perf);
+ ret = cppc_get_epp_perf(cpudata->cpu, &epp);
+ if (ret < 0) {
+ pr_debug("Could not retrieve energy perf value (%d)\n", ret);
+ return ret;
+ }
-static inline void amd_pstate_update_perf(struct amd_cpudata *cpudata,
- u32 min_perf, u32 des_perf,
- u32 max_perf, bool fast_switch)
-{
- static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf,
- max_perf, fast_switch);
+ return FIELD_GET(AMD_CPPC_EPP_PERF_MASK, epp);
}
-static int amd_pstate_set_epp(struct amd_cpudata *cpudata, u32 epp)
+static int msr_update_perf(struct cpufreq_policy *policy, u8 min_perf,
+ u8 des_perf, u8 max_perf, u8 epp, bool fast_switch)
{
- int ret;
- struct cppc_perf_ctrls perf_ctrls;
-
- if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
- u64 value = READ_ONCE(cpudata->cppc_req_cached);
+ struct amd_cpudata *cpudata = policy->driver_data;
+ u64 value, prev;
+
+ value = prev = READ_ONCE(cpudata->cppc_req_cached);
+
+ value &= ~(AMD_CPPC_MAX_PERF_MASK | AMD_CPPC_MIN_PERF_MASK |
+ AMD_CPPC_DES_PERF_MASK | AMD_CPPC_EPP_PERF_MASK);
+ value |= FIELD_PREP(AMD_CPPC_MAX_PERF_MASK, max_perf);
+ value |= FIELD_PREP(AMD_CPPC_DES_PERF_MASK, des_perf);
+ value |= FIELD_PREP(AMD_CPPC_MIN_PERF_MASK, min_perf);
+ value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp);
+
+ if (trace_amd_pstate_epp_perf_enabled()) {
+ union perf_cached perf = READ_ONCE(cpudata->perf);
+
+ trace_amd_pstate_epp_perf(cpudata->cpu,
+ perf.highest_perf,
+ epp,
+ min_perf,
+ max_perf,
+ policy->boost_enabled,
+ value != prev);
+ }
- value &= ~GENMASK_ULL(31, 24);
- value |= (u64)epp << 24;
- WRITE_ONCE(cpudata->cppc_req_cached, value);
+ if (value == prev)
+ return 0;
- ret = wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
- if (!ret)
- cpudata->epp_cached = epp;
+ if (fast_switch) {
+ wrmsrq(MSR_AMD_CPPC_REQ, value);
+ return 0;
} else {
- amd_pstate_update_perf(cpudata, cpudata->min_limit_perf, 0U,
- cpudata->max_limit_perf, false);
+ int ret = wrmsrq_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
- perf_ctrls.energy_perf = epp;
- ret = cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1);
- if (ret) {
- pr_debug("failed to set energy perf value (%d)\n", ret);
+ if (ret)
return ret;
- }
- cpudata->epp_cached = epp;
}
- return ret;
+ WRITE_ONCE(cpudata->cppc_req_cached, value);
+
+ return 0;
+}
+
+DEFINE_STATIC_CALL(amd_pstate_update_perf, msr_update_perf);
+
+static inline int amd_pstate_update_perf(struct cpufreq_policy *policy,
+ u8 min_perf, u8 des_perf,
+ u8 max_perf, u8 epp,
+ bool fast_switch)
+{
+ return static_call(amd_pstate_update_perf)(policy, min_perf, des_perf,
+ max_perf, epp, fast_switch);
}
-static int amd_pstate_set_energy_pref_index(struct amd_cpudata *cpudata,
- int pref_index)
+static int msr_set_epp(struct cpufreq_policy *policy, u8 epp)
{
- int epp = -EINVAL;
+ struct amd_cpudata *cpudata = policy->driver_data;
+ u64 value, prev;
int ret;
- if (!pref_index)
- epp = cpudata->epp_default;
+ value = prev = READ_ONCE(cpudata->cppc_req_cached);
+ value &= ~AMD_CPPC_EPP_PERF_MASK;
+ value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp);
+
+ if (trace_amd_pstate_epp_perf_enabled()) {
+ union perf_cached perf = cpudata->perf;
+
+ trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf,
+ epp,
+ FIELD_GET(AMD_CPPC_MIN_PERF_MASK,
+ cpudata->cppc_req_cached),
+ FIELD_GET(AMD_CPPC_MAX_PERF_MASK,
+ cpudata->cppc_req_cached),
+ policy->boost_enabled,
+ value != prev);
+ }
- if (epp == -EINVAL)
- epp = epp_values[pref_index];
+ if (value == prev)
+ return 0;
- if (epp > 0 && cpudata->policy == CPUFREQ_POLICY_PERFORMANCE) {
- pr_debug("EPP cannot be set under performance policy\n");
- return -EBUSY;
+ ret = wrmsrq_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
+ if (ret) {
+ pr_err("failed to set energy perf value (%d)\n", ret);
+ return ret;
}
- ret = amd_pstate_set_epp(cpudata, epp);
+ /* update both so that msr_update_perf() can effectively check */
+ WRITE_ONCE(cpudata->cppc_req_cached, value);
return ret;
}
-static inline int pstate_enable(bool enable)
-{
- int ret, cpu;
- unsigned long logical_proc_id_mask = 0;
+DEFINE_STATIC_CALL(amd_pstate_set_epp, msr_set_epp);
- if (enable == cppc_enabled)
- return 0;
+static inline int amd_pstate_set_epp(struct cpufreq_policy *policy, u8 epp)
+{
+ return static_call(amd_pstate_set_epp)(policy, epp);
+}
- for_each_present_cpu(cpu) {
- unsigned long logical_id = topology_logical_package_id(cpu);
+static int shmem_set_epp(struct cpufreq_policy *policy, u8 epp)
+{
+ struct amd_cpudata *cpudata = policy->driver_data;
+ struct cppc_perf_ctrls perf_ctrls;
+ u8 epp_cached;
+ u64 value;
+ int ret;
- if (test_bit(logical_id, &logical_proc_id_mask))
- continue;
- set_bit(logical_id, &logical_proc_id_mask);
+ epp_cached = FIELD_GET(AMD_CPPC_EPP_PERF_MASK, cpudata->cppc_req_cached);
+ if (trace_amd_pstate_epp_perf_enabled()) {
+ union perf_cached perf = cpudata->perf;
- ret = wrmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_ENABLE,
- enable);
- if (ret)
- return ret;
+ trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf,
+ epp,
+ FIELD_GET(AMD_CPPC_MIN_PERF_MASK,
+ cpudata->cppc_req_cached),
+ FIELD_GET(AMD_CPPC_MAX_PERF_MASK,
+ cpudata->cppc_req_cached),
+ policy->boost_enabled,
+ epp != epp_cached);
}
- cppc_enabled = enable;
- return 0;
-}
-
-static int cppc_enable(bool enable)
-{
- int cpu, ret = 0;
- struct cppc_perf_ctrls perf_ctrls;
-
- if (enable == cppc_enabled)
+ if (epp == epp_cached)
return 0;
- for_each_present_cpu(cpu) {
- ret = cppc_set_enable(cpu, enable);
- if (ret)
- return ret;
-
- /* Enable autonomous mode for EPP */
- if (cppc_state == AMD_PSTATE_ACTIVE) {
- /* Set desired perf as zero to allow EPP firmware control */
- perf_ctrls.desired_perf = 0;
- ret = cppc_set_perf(cpu, &perf_ctrls);
- if (ret)
- return ret;
- }
+ perf_ctrls.energy_perf = epp;
+ ret = cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1);
+ if (ret) {
+ pr_debug("failed to set energy perf value (%d)\n", ret);
+ return ret;
}
- cppc_enabled = enable;
+ value = READ_ONCE(cpudata->cppc_req_cached);
+ value &= ~AMD_CPPC_EPP_PERF_MASK;
+ value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp);
+ WRITE_ONCE(cpudata->cppc_req_cached, value);
+
return ret;
}
-DEFINE_STATIC_CALL(amd_pstate_enable, pstate_enable);
+static inline int msr_cppc_enable(struct cpufreq_policy *policy)
+{
+ return wrmsrq_safe_on_cpu(policy->cpu, MSR_AMD_CPPC_ENABLE, 1);
+}
+
+static int shmem_cppc_enable(struct cpufreq_policy *policy)
+{
+ return cppc_set_enable(policy->cpu, 1);
+}
-static inline int amd_pstate_enable(bool enable)
+DEFINE_STATIC_CALL(amd_pstate_cppc_enable, msr_cppc_enable);
+
+static inline int amd_pstate_cppc_enable(struct cpufreq_policy *policy)
{
- return static_call(amd_pstate_enable)(enable);
+ return static_call(amd_pstate_cppc_enable)(policy);
}
-static int pstate_init_perf(struct amd_cpudata *cpudata)
+static int msr_init_perf(struct amd_cpudata *cpudata)
{
- u64 cap1;
+ union perf_cached perf = READ_ONCE(cpudata->perf);
+ u64 cap1, numerator, cppc_req;
+ u8 min_perf;
- int ret = rdmsrl_safe_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1,
+ int ret = rdmsrq_safe_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1,
&cap1);
if (ret)
return ret;
- WRITE_ONCE(cpudata->highest_perf, AMD_CPPC_HIGHEST_PERF(cap1));
- WRITE_ONCE(cpudata->max_limit_perf, AMD_CPPC_HIGHEST_PERF(cap1));
- WRITE_ONCE(cpudata->nominal_perf, AMD_CPPC_NOMINAL_PERF(cap1));
- WRITE_ONCE(cpudata->lowest_nonlinear_perf, AMD_CPPC_LOWNONLIN_PERF(cap1));
- WRITE_ONCE(cpudata->lowest_perf, AMD_CPPC_LOWEST_PERF(cap1));
- WRITE_ONCE(cpudata->prefcore_ranking, AMD_CPPC_HIGHEST_PERF(cap1));
- WRITE_ONCE(cpudata->min_limit_perf, AMD_CPPC_LOWEST_PERF(cap1));
+ ret = amd_get_boost_ratio_numerator(cpudata->cpu, &numerator);
+ if (ret)
+ return ret;
+
+ ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &cppc_req);
+ if (ret)
+ return ret;
+
+ WRITE_ONCE(cpudata->cppc_req_cached, cppc_req);
+ min_perf = FIELD_GET(AMD_CPPC_MIN_PERF_MASK, cppc_req);
+
+ /*
+ * Clear out the min_perf part to check if the rest of the MSR is 0, if yes, this is an
+ * indication that the min_perf value is the one specified through the BIOS option
+ */
+ cppc_req &= ~(AMD_CPPC_MIN_PERF_MASK);
+
+ if (!cppc_req)
+ perf.bios_min_perf = min_perf;
+
+ perf.highest_perf = numerator;
+ perf.max_limit_perf = numerator;
+ perf.min_limit_perf = FIELD_GET(AMD_CPPC_LOWEST_PERF_MASK, cap1);
+ perf.nominal_perf = FIELD_GET(AMD_CPPC_NOMINAL_PERF_MASK, cap1);
+ perf.lowest_nonlinear_perf = FIELD_GET(AMD_CPPC_LOWNONLIN_PERF_MASK, cap1);
+ perf.lowest_perf = FIELD_GET(AMD_CPPC_LOWEST_PERF_MASK, cap1);
+ WRITE_ONCE(cpudata->perf, perf);
+ WRITE_ONCE(cpudata->prefcore_ranking, FIELD_GET(AMD_CPPC_HIGHEST_PERF_MASK, cap1));
+
return 0;
}
-static int cppc_init_perf(struct amd_cpudata *cpudata)
+static int shmem_init_perf(struct amd_cpudata *cpudata)
{
struct cppc_perf_caps cppc_perf;
+ union perf_cached perf = READ_ONCE(cpudata->perf);
+ u64 numerator;
+ bool auto_sel;
int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
if (ret)
return ret;
- WRITE_ONCE(cpudata->highest_perf, cppc_perf.highest_perf);
- WRITE_ONCE(cpudata->max_limit_perf, cppc_perf.highest_perf);
- WRITE_ONCE(cpudata->nominal_perf, cppc_perf.nominal_perf);
- WRITE_ONCE(cpudata->lowest_nonlinear_perf,
- cppc_perf.lowest_nonlinear_perf);
- WRITE_ONCE(cpudata->lowest_perf, cppc_perf.lowest_perf);
+ ret = amd_get_boost_ratio_numerator(cpudata->cpu, &numerator);
+ if (ret)
+ return ret;
+
+ perf.highest_perf = numerator;
+ perf.max_limit_perf = numerator;
+ perf.min_limit_perf = cppc_perf.lowest_perf;
+ perf.nominal_perf = cppc_perf.nominal_perf;
+ perf.lowest_nonlinear_perf = cppc_perf.lowest_nonlinear_perf;
+ perf.lowest_perf = cppc_perf.lowest_perf;
+ WRITE_ONCE(cpudata->perf, perf);
WRITE_ONCE(cpudata->prefcore_ranking, cppc_perf.highest_perf);
- WRITE_ONCE(cpudata->min_limit_perf, cppc_perf.lowest_perf);
if (cppc_state == AMD_PSTATE_ACTIVE)
return 0;
- ret = cppc_get_auto_sel_caps(cpudata->cpu, &cppc_perf);
+ ret = cppc_get_auto_sel(cpudata->cpu, &auto_sel);
if (ret) {
pr_warn("failed to get auto_sel, ret: %d\n", ret);
return 0;
@@ -420,24 +471,63 @@ static int cppc_init_perf(struct amd_cpudata *cpudata)
return ret;
}
-DEFINE_STATIC_CALL(amd_pstate_init_perf, pstate_init_perf);
+DEFINE_STATIC_CALL(amd_pstate_init_perf, msr_init_perf);
static inline int amd_pstate_init_perf(struct amd_cpudata *cpudata)
{
return static_call(amd_pstate_init_perf)(cpudata);
}
-static void cppc_update_perf(struct amd_cpudata *cpudata,
- u32 min_perf, u32 des_perf,
- u32 max_perf, bool fast_switch)
+static int shmem_update_perf(struct cpufreq_policy *policy, u8 min_perf,
+ u8 des_perf, u8 max_perf, u8 epp, bool fast_switch)
{
+ struct amd_cpudata *cpudata = policy->driver_data;
struct cppc_perf_ctrls perf_ctrls;
+ u64 value, prev;
+ int ret;
+
+ if (cppc_state == AMD_PSTATE_ACTIVE) {
+ int ret = shmem_set_epp(policy, epp);
+
+ if (ret)
+ return ret;
+ }
+
+ value = prev = READ_ONCE(cpudata->cppc_req_cached);
+
+ value &= ~(AMD_CPPC_MAX_PERF_MASK | AMD_CPPC_MIN_PERF_MASK |
+ AMD_CPPC_DES_PERF_MASK | AMD_CPPC_EPP_PERF_MASK);
+ value |= FIELD_PREP(AMD_CPPC_MAX_PERF_MASK, max_perf);
+ value |= FIELD_PREP(AMD_CPPC_DES_PERF_MASK, des_perf);
+ value |= FIELD_PREP(AMD_CPPC_MIN_PERF_MASK, min_perf);
+ value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp);
+
+ if (trace_amd_pstate_epp_perf_enabled()) {
+ union perf_cached perf = READ_ONCE(cpudata->perf);
+
+ trace_amd_pstate_epp_perf(cpudata->cpu,
+ perf.highest_perf,
+ epp,
+ min_perf,
+ max_perf,
+ policy->boost_enabled,
+ value != prev);
+ }
+
+ if (value == prev)
+ return 0;
perf_ctrls.max_perf = max_perf;
perf_ctrls.min_perf = min_perf;
perf_ctrls.desired_perf = des_perf;
- cppc_set_perf(cpudata->cpu, &perf_ctrls);
+ ret = cppc_set_perf(cpudata->cpu, &perf_ctrls);
+ if (ret)
+ return ret;
+
+ WRITE_ONCE(cpudata->cppc_req_cached, value);
+
+ return 0;
}
static inline bool amd_pstate_sample(struct amd_cpudata *cpudata)
@@ -446,8 +536,8 @@ static inline bool amd_pstate_sample(struct amd_cpudata *cpudata)
unsigned long flags;
local_irq_save(flags);
- rdmsrl(MSR_IA32_APERF, aperf);
- rdmsrl(MSR_IA32_MPERF, mperf);
+ rdmsrq(MSR_IA32_APERF, aperf);
+ rdmsrq(MSR_IA32_MPERF, mperf);
tsc = rdtsc();
if (cpudata->prev.mperf == mperf || cpudata->prev.tsc == tsc) {
@@ -473,117 +563,106 @@ static inline bool amd_pstate_sample(struct amd_cpudata *cpudata)
return true;
}
-static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
- u32 des_perf, u32 max_perf, bool fast_switch, int gov_flags)
+static void amd_pstate_update(struct amd_cpudata *cpudata, u8 min_perf,
+ u8 des_perf, u8 max_perf, bool fast_switch, int gov_flags)
{
- unsigned long max_freq;
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpudata->cpu);
- u64 prev = READ_ONCE(cpudata->cppc_req_cached);
- u32 nominal_perf = READ_ONCE(cpudata->nominal_perf);
- u64 value = prev;
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpudata->cpu);
+ union perf_cached perf = READ_ONCE(cpudata->perf);
- min_perf = clamp_t(unsigned long, min_perf, cpudata->min_limit_perf,
- cpudata->max_limit_perf);
- max_perf = clamp_t(unsigned long, max_perf, cpudata->min_limit_perf,
- cpudata->max_limit_perf);
- des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
+ if (!policy)
+ return;
- max_freq = READ_ONCE(cpudata->max_limit_freq);
- policy->cur = div_u64(des_perf * max_freq, max_perf);
+ /* limit the max perf when core performance boost feature is disabled */
+ if (!cpudata->boost_supported)
+ max_perf = min_t(u8, perf.nominal_perf, max_perf);
+
+ des_perf = clamp_t(u8, des_perf, min_perf, max_perf);
+
+ policy->cur = perf_to_freq(perf, cpudata->nominal_freq, des_perf);
if ((cppc_state == AMD_PSTATE_GUIDED) && (gov_flags & CPUFREQ_GOV_DYNAMIC_SWITCHING)) {
min_perf = des_perf;
des_perf = 0;
}
- value &= ~AMD_CPPC_MIN_PERF(~0L);
- value |= AMD_CPPC_MIN_PERF(min_perf);
-
- value &= ~AMD_CPPC_DES_PERF(~0L);
- value |= AMD_CPPC_DES_PERF(des_perf);
-
- /* limit the max perf when core performance boost feature is disabled */
- if (!cpudata->boost_supported)
- max_perf = min_t(unsigned long, nominal_perf, max_perf);
-
- value &= ~AMD_CPPC_MAX_PERF(~0L);
- value |= AMD_CPPC_MAX_PERF(max_perf);
-
if (trace_amd_pstate_perf_enabled() && amd_pstate_sample(cpudata)) {
trace_amd_pstate_perf(min_perf, des_perf, max_perf, cpudata->freq,
cpudata->cur.mperf, cpudata->cur.aperf, cpudata->cur.tsc,
- cpudata->cpu, (value != prev), fast_switch);
+ cpudata->cpu, fast_switch);
}
- if (value == prev)
- goto cpufreq_policy_put;
+ amd_pstate_update_perf(policy, min_perf, des_perf, max_perf, 0, fast_switch);
+}
- WRITE_ONCE(cpudata->cppc_req_cached, value);
+static int amd_pstate_verify(struct cpufreq_policy_data *policy_data)
+{
+ /*
+ * Initialize lower frequency limit (i.e.policy->min) with
+ * lowest_nonlinear_frequency or the min frequency (if) specified in BIOS,
+ * Override the initial value set by cpufreq core and amd-pstate qos_requests.
+ */
+ if (policy_data->min == FREQ_QOS_MIN_DEFAULT_VALUE) {
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) =
+ cpufreq_cpu_get(policy_data->cpu);
+ struct amd_cpudata *cpudata;
+ union perf_cached perf;
- amd_pstate_update_perf(cpudata, min_perf, des_perf,
- max_perf, fast_switch);
+ if (!policy)
+ return -EINVAL;
-cpufreq_policy_put:
- cpufreq_cpu_put(policy);
-}
+ cpudata = policy->driver_data;
+ perf = READ_ONCE(cpudata->perf);
-static int amd_pstate_verify(struct cpufreq_policy_data *policy)
-{
- cpufreq_verify_within_cpu_limits(policy);
+ if (perf.bios_min_perf)
+ policy_data->min = perf_to_freq(perf, cpudata->nominal_freq,
+ perf.bios_min_perf);
+ else
+ policy_data->min = cpudata->lowest_nonlinear_freq;
+ }
+
+ cpufreq_verify_within_cpu_limits(policy_data);
return 0;
}
-static int amd_pstate_update_min_max_limit(struct cpufreq_policy *policy)
+static void amd_pstate_update_min_max_limit(struct cpufreq_policy *policy)
{
- u32 max_limit_perf, min_limit_perf, lowest_perf, max_perf;
struct amd_cpudata *cpudata = policy->driver_data;
+ union perf_cached perf = READ_ONCE(cpudata->perf);
- if (cpudata->boost_supported && !policy->boost_enabled)
- max_perf = READ_ONCE(cpudata->nominal_perf);
- else
- max_perf = READ_ONCE(cpudata->highest_perf);
-
- max_limit_perf = div_u64(policy->max * max_perf, policy->cpuinfo.max_freq);
- min_limit_perf = div_u64(policy->min * max_perf, policy->cpuinfo.max_freq);
-
- lowest_perf = READ_ONCE(cpudata->lowest_perf);
- if (min_limit_perf < lowest_perf)
- min_limit_perf = lowest_perf;
-
- if (max_limit_perf < min_limit_perf)
- max_limit_perf = min_limit_perf;
-
- WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf);
- WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf);
+ perf.max_limit_perf = freq_to_perf(perf, cpudata->nominal_freq, policy->max);
WRITE_ONCE(cpudata->max_limit_freq, policy->max);
- WRITE_ONCE(cpudata->min_limit_freq, policy->min);
- return 0;
+ if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE) {
+ perf.min_limit_perf = min(perf.nominal_perf, perf.max_limit_perf);
+ WRITE_ONCE(cpudata->min_limit_freq, min(cpudata->nominal_freq, cpudata->max_limit_freq));
+ } else {
+ perf.min_limit_perf = freq_to_perf(perf, cpudata->nominal_freq, policy->min);
+ WRITE_ONCE(cpudata->min_limit_freq, policy->min);
+ }
+
+ WRITE_ONCE(cpudata->perf, perf);
}
static int amd_pstate_update_freq(struct cpufreq_policy *policy,
unsigned int target_freq, bool fast_switch)
{
struct cpufreq_freqs freqs;
- struct amd_cpudata *cpudata = policy->driver_data;
- unsigned long max_perf, min_perf, des_perf, cap_perf;
+ struct amd_cpudata *cpudata;
+ union perf_cached perf;
+ u8 des_perf;
- if (!cpudata->max_freq)
- return -ENODEV;
+ cpudata = policy->driver_data;
if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq)
amd_pstate_update_min_max_limit(policy);
- cap_perf = READ_ONCE(cpudata->highest_perf);
- min_perf = READ_ONCE(cpudata->lowest_perf);
- max_perf = cap_perf;
+ perf = READ_ONCE(cpudata->perf);
freqs.old = policy->cur;
freqs.new = target_freq;
- des_perf = DIV_ROUND_CLOSEST(target_freq * cap_perf,
- cpudata->max_freq);
+ des_perf = freq_to_perf(perf, cpudata->nominal_freq, target_freq);
WARN_ON(fast_switch && !policy->fast_switch_enabled);
/*
@@ -594,8 +673,9 @@ static int amd_pstate_update_freq(struct cpufreq_policy *policy,
if (!fast_switch)
cpufreq_freq_transition_begin(policy, &freqs);
- amd_pstate_update(cpudata, min_perf, des_perf,
- max_perf, fast_switch, policy->governor->flags);
+ amd_pstate_update(cpudata, perf.min_limit_perf, des_perf,
+ perf.max_limit_perf, fast_switch,
+ policy->governor->flags);
if (!fast_switch)
cpufreq_freq_transition_end(policy, &freqs, false);
@@ -623,10 +703,10 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
unsigned long target_perf,
unsigned long capacity)
{
- unsigned long max_perf, min_perf, des_perf,
- cap_perf, lowest_nonlinear_perf;
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ u8 max_perf, min_perf, des_perf, cap_perf;
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
struct amd_cpudata *cpudata;
+ union perf_cached perf;
if (!policy)
return;
@@ -636,67 +716,43 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq)
amd_pstate_update_min_max_limit(policy);
-
- cap_perf = READ_ONCE(cpudata->highest_perf);
- lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);
+ perf = READ_ONCE(cpudata->perf);
+ cap_perf = perf.highest_perf;
des_perf = cap_perf;
if (target_perf < capacity)
des_perf = DIV_ROUND_UP(cap_perf * target_perf, capacity);
- min_perf = READ_ONCE(cpudata->lowest_perf);
if (_min_perf < capacity)
min_perf = DIV_ROUND_UP(cap_perf * _min_perf, capacity);
+ else
+ min_perf = cap_perf;
- if (min_perf < lowest_nonlinear_perf)
- min_perf = lowest_nonlinear_perf;
+ if (min_perf < perf.min_limit_perf)
+ min_perf = perf.min_limit_perf;
- max_perf = cap_perf;
+ max_perf = perf.max_limit_perf;
if (max_perf < min_perf)
max_perf = min_perf;
- des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
-
amd_pstate_update(cpudata, min_perf, des_perf, max_perf, true,
policy->governor->flags);
- cpufreq_cpu_put(policy);
}
static int amd_pstate_cpu_boost_update(struct cpufreq_policy *policy, bool on)
{
struct amd_cpudata *cpudata = policy->driver_data;
- struct cppc_perf_ctrls perf_ctrls;
- u32 highest_perf, nominal_perf, nominal_freq, max_freq;
+ union perf_cached perf = READ_ONCE(cpudata->perf);
+ u32 nominal_freq, max_freq;
int ret = 0;
- highest_perf = READ_ONCE(cpudata->highest_perf);
- nominal_perf = READ_ONCE(cpudata->nominal_perf);
nominal_freq = READ_ONCE(cpudata->nominal_freq);
- max_freq = READ_ONCE(cpudata->max_freq);
-
- if (boot_cpu_has(X86_FEATURE_CPPC)) {
- u64 value = READ_ONCE(cpudata->cppc_req_cached);
-
- value &= ~GENMASK_ULL(7, 0);
- value |= on ? highest_perf : nominal_perf;
- WRITE_ONCE(cpudata->cppc_req_cached, value);
-
- wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
- } else {
- perf_ctrls.max_perf = on ? highest_perf : nominal_perf;
- ret = cppc_set_perf(cpudata->cpu, &perf_ctrls);
- if (ret) {
- cpufreq_cpu_release(policy);
- pr_debug("Failed to set max perf on CPU:%d. ret:%d\n",
- cpudata->cpu, ret);
- return ret;
- }
- }
+ max_freq = perf_to_freq(perf, cpudata->nominal_freq, perf.highest_perf);
if (on)
policy->cpuinfo.max_freq = max_freq;
- else if (policy->cpuinfo.max_freq > nominal_freq * 1000)
- policy->cpuinfo.max_freq = nominal_freq * 1000;
+ else if (policy->cpuinfo.max_freq > nominal_freq)
+ policy->cpuinfo.max_freq = nominal_freq;
policy->max = policy->cpuinfo.max_freq;
@@ -718,12 +774,9 @@ static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state)
pr_err("Boost mode is not supported by this processor or SBIOS\n");
return -EOPNOTSUPP;
}
- mutex_lock(&amd_pstate_driver_lock);
+
ret = amd_pstate_cpu_boost_update(policy, state);
- WRITE_ONCE(cpudata->boost_state, !ret ? state : false);
- policy->boost_enabled = !ret ? state : false;
refresh_frequency_limits(policy);
- mutex_unlock(&amd_pstate_driver_lock);
return ret;
}
@@ -743,10 +796,7 @@ static int amd_pstate_init_boost_support(struct amd_cpudata *cpudata)
goto exit_err;
}
- /* at least one CPU supports CPB, even if others fail later on to set up */
- current_pstate_driver->boost_enabled = true;
-
- ret = rdmsrl_on_cpu(cpudata->cpu, MSR_K7_HWCR, &boost_val);
+ ret = rdmsrq_on_cpu(cpudata->cpu, MSR_K7_HWCR, &boost_val);
if (ret) {
pr_err_once("failed to read initial CPU boost state!\n");
ret = -EIO;
@@ -765,19 +815,9 @@ exit_err:
static void amd_perf_ctl_reset(unsigned int cpu)
{
- wrmsrl_on_cpu(cpu, MSR_AMD_PERF_CTL, 0);
+ wrmsrq_on_cpu(cpu, MSR_AMD_PERF_CTL, 0);
}
-/*
- * Set amd-pstate preferred core enable can't be done directly from cpufreq callbacks
- * due to locking, so queue the work for later.
- */
-static void amd_pstste_sched_prefcore_workfn(struct work_struct *work)
-{
- sched_set_itmt_support();
-}
-static DECLARE_WORK(sched_prefcore_work, amd_pstste_sched_prefcore_workfn);
-
#define CPPC_MAX_PERF U8_MAX
static void amd_pstate_init_prefcore(struct amd_cpudata *cpudata)
@@ -786,55 +826,44 @@ static void amd_pstate_init_prefcore(struct amd_cpudata *cpudata)
if (!amd_pstate_prefcore)
return;
- cpudata->hw_prefcore = true;
+ /* should use amd-hfi instead */
+ if (cpu_feature_enabled(X86_FEATURE_AMD_WORKLOAD_CLASS) &&
+ IS_ENABLED(CONFIG_AMD_HFI)) {
+ amd_pstate_prefcore = false;
+ return;
+ }
- /*
- * The priorities can be set regardless of whether or not
- * sched_set_itmt_support(true) has been called and it is valid to
- * update them at any time after it has been called.
- */
- sched_set_itmt_core_prio((int)READ_ONCE(cpudata->highest_perf), cpudata->cpu);
+ cpudata->hw_prefcore = true;
- schedule_work(&sched_prefcore_work);
+ /* Priorities must be initialized before ITMT support can be toggled on. */
+ sched_set_itmt_core_prio((int)READ_ONCE(cpudata->prefcore_ranking), cpudata->cpu);
}
-static void amd_pstate_update_limits(unsigned int cpu)
+static void amd_pstate_update_limits(struct cpufreq_policy *policy)
{
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
struct amd_cpudata *cpudata;
u32 prev_high = 0, cur_high = 0;
- int ret;
bool highest_perf_changed = false;
+ unsigned int cpu = policy->cpu;
- if (!policy)
+ if (!amd_pstate_prefcore)
return;
- cpudata = policy->driver_data;
-
- if (!amd_pstate_prefcore)
+ if (amd_get_highest_perf(cpu, &cur_high))
return;
- mutex_lock(&amd_pstate_driver_lock);
- ret = amd_get_highest_perf(cpu, &cur_high);
- if (ret)
- goto free_cpufreq_put;
+ cpudata = policy->driver_data;
prev_high = READ_ONCE(cpudata->prefcore_ranking);
highest_perf_changed = (prev_high != cur_high);
if (highest_perf_changed) {
WRITE_ONCE(cpudata->prefcore_ranking, cur_high);
- if (cur_high < CPPC_MAX_PERF)
+ if (cur_high < CPPC_MAX_PERF) {
sched_set_itmt_core_prio((int)cur_high, cpu);
+ sched_update_asym_prefer_cpu(cpu, prev_high, cur_high);
+ }
}
-
-free_cpufreq_put:
- cpufreq_cpu_put(policy);
-
- if (!highest_perf_changed)
- cpufreq_update_policy(cpu);
-
- mutex_unlock(&amd_pstate_driver_lock);
}
/*
@@ -843,11 +872,11 @@ free_cpufreq_put:
*/
static u32 amd_pstate_get_transition_delay_us(unsigned int cpu)
{
- u32 transition_delay_ns;
+ int transition_delay_ns;
transition_delay_ns = cppc_get_transition_latency(cpu);
- if (transition_delay_ns == CPUFREQ_ETERNAL) {
- if (cpu_feature_enabled(X86_FEATURE_FAST_CPPC))
+ if (transition_delay_ns < 0) {
+ if (cpu_feature_enabled(X86_FEATURE_AMD_FAST_CPPC))
return AMD_PSTATE_FAST_CPPC_TRANSITION_DELAY;
else
return AMD_PSTATE_TRANSITION_DELAY;
@@ -862,67 +891,55 @@ static u32 amd_pstate_get_transition_delay_us(unsigned int cpu)
*/
static u32 amd_pstate_get_transition_latency(unsigned int cpu)
{
- u32 transition_latency;
+ int transition_latency;
transition_latency = cppc_get_transition_latency(cpu);
- if (transition_latency == CPUFREQ_ETERNAL)
+ if (transition_latency < 0)
return AMD_PSTATE_TRANSITION_LATENCY;
return transition_latency;
}
/*
- * amd_pstate_init_freq: Initialize the max_freq, min_freq,
- * nominal_freq and lowest_nonlinear_freq for
- * the @cpudata object.
+ * amd_pstate_init_freq: Initialize the nominal_freq and lowest_nonlinear_freq
+ * for the @cpudata object.
*
- * Requires: highest_perf, lowest_perf, nominal_perf and
- * lowest_nonlinear_perf members of @cpudata to be
- * initialized.
+ * Requires: all perf members of @cpudata to be initialized.
*
- * Returns 0 on success, non-zero value on failure.
+ * Returns 0 on success, non-zero value on failure.
*/
static int amd_pstate_init_freq(struct amd_cpudata *cpudata)
{
- int ret;
- u32 min_freq, max_freq;
- u64 numerator;
- u32 nominal_perf, nominal_freq;
- u32 lowest_nonlinear_perf, lowest_nonlinear_freq;
- u32 boost_ratio, lowest_nonlinear_ratio;
+ u32 min_freq, max_freq, nominal_freq, lowest_nonlinear_freq;
struct cppc_perf_caps cppc_perf;
+ union perf_cached perf;
+ int ret;
ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
if (ret)
return ret;
-
- if (quirks && quirks->lowest_freq)
- min_freq = quirks->lowest_freq * 1000;
- else
- min_freq = cppc_perf.lowest_freq * 1000;
+ perf = READ_ONCE(cpudata->perf);
if (quirks && quirks->nominal_freq)
- nominal_freq = quirks->nominal_freq ;
+ nominal_freq = quirks->nominal_freq;
else
nominal_freq = cppc_perf.nominal_freq;
+ nominal_freq *= 1000;
- nominal_perf = READ_ONCE(cpudata->nominal_perf);
+ if (quirks && quirks->lowest_freq) {
+ min_freq = quirks->lowest_freq;
+ perf.lowest_perf = freq_to_perf(perf, nominal_freq, min_freq);
+ WRITE_ONCE(cpudata->perf, perf);
+ } else
+ min_freq = cppc_perf.lowest_freq;
- ret = amd_get_boost_ratio_numerator(cpudata->cpu, &numerator);
- if (ret)
- return ret;
- boost_ratio = div_u64(numerator << SCHED_CAPACITY_SHIFT, nominal_perf);
- max_freq = (nominal_freq * boost_ratio >> SCHED_CAPACITY_SHIFT) * 1000;
+ min_freq *= 1000;
- lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);
- lowest_nonlinear_ratio = div_u64(lowest_nonlinear_perf << SCHED_CAPACITY_SHIFT,
- nominal_perf);
- lowest_nonlinear_freq = (nominal_freq * lowest_nonlinear_ratio >> SCHED_CAPACITY_SHIFT) * 1000;
+ WRITE_ONCE(cpudata->nominal_freq, nominal_freq);
- WRITE_ONCE(cpudata->min_freq, min_freq);
+ max_freq = perf_to_freq(perf, nominal_freq, perf.highest_perf);
+ lowest_nonlinear_freq = perf_to_freq(perf, nominal_freq, perf.lowest_nonlinear_perf);
WRITE_ONCE(cpudata->lowest_nonlinear_freq, lowest_nonlinear_freq);
- WRITE_ONCE(cpudata->nominal_freq, nominal_freq);
- WRITE_ONCE(cpudata->max_freq, max_freq);
/**
* Below values need to be initialized correctly, otherwise driver will fail to load
@@ -932,13 +949,13 @@ static int amd_pstate_init_freq(struct amd_cpudata *cpudata)
*/
if (min_freq <= 0 || max_freq <= 0 || nominal_freq <= 0 || min_freq > max_freq) {
pr_err("min_freq(%d) or max_freq(%d) or nominal_freq(%d) value is incorrect\n",
- min_freq, max_freq, nominal_freq * 1000);
+ min_freq, max_freq, nominal_freq);
return -EINVAL;
}
- if (lowest_nonlinear_freq <= min_freq || lowest_nonlinear_freq > nominal_freq * 1000) {
+ if (lowest_nonlinear_freq <= min_freq || lowest_nonlinear_freq > nominal_freq) {
pr_err("lowest_nonlinear_freq(%d) value is out of range [min_freq(%d), nominal_freq(%d)]\n",
- lowest_nonlinear_freq, min_freq, nominal_freq * 1000);
+ lowest_nonlinear_freq, min_freq, nominal_freq);
return -EINVAL;
}
@@ -947,9 +964,10 @@ static int amd_pstate_init_freq(struct amd_cpudata *cpudata)
static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
{
- int min_freq, max_freq, ret;
- struct device *dev;
struct amd_cpudata *cpudata;
+ union perf_cached perf;
+ struct device *dev;
+ int ret;
/*
* Resetting PERF_CTL_MSR will put the CPU in P0 frequency,
@@ -980,19 +998,23 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
if (ret)
goto free_cpudata1;
- min_freq = READ_ONCE(cpudata->min_freq);
- max_freq = READ_ONCE(cpudata->max_freq);
-
policy->cpuinfo.transition_latency = amd_pstate_get_transition_latency(policy->cpu);
policy->transition_delay_us = amd_pstate_get_transition_delay_us(policy->cpu);
- policy->min = min_freq;
- policy->max = max_freq;
+ perf = READ_ONCE(cpudata->perf);
+
+ policy->cpuinfo.min_freq = policy->min = perf_to_freq(perf,
+ cpudata->nominal_freq,
+ perf.lowest_perf);
+ policy->cpuinfo.max_freq = policy->max = perf_to_freq(perf,
+ cpudata->nominal_freq,
+ perf.highest_perf);
- policy->cpuinfo.min_freq = min_freq;
- policy->cpuinfo.max_freq = max_freq;
+ ret = amd_pstate_cppc_enable(policy);
+ if (ret)
+ goto free_cpudata1;
- policy->boost_enabled = READ_ONCE(cpudata->boost_supported);
+ policy->boost_supported = READ_ONCE(cpudata->boost_supported);
/* It will be updated by governor */
policy->cur = policy->cpuinfo.min_freq;
@@ -1001,7 +1023,7 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
policy->fast_switch_possible = true;
ret = freq_qos_add_request(&policy->constraints, &cpudata->req[0],
- FREQ_QOS_MIN, policy->cpuinfo.min_freq);
+ FREQ_QOS_MIN, FREQ_QOS_MIN_DEFAULT_VALUE);
if (ret < 0) {
dev_err(dev, "Failed to add min-freq constraint (%d)\n", ret);
goto free_cpudata1;
@@ -1014,9 +1036,6 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
goto free_cpudata2;
}
- cpudata->max_limit_freq = max_freq;
- cpudata->min_limit_freq = min_freq;
-
policy->driver_data = cpudata;
if (!current_pstate_driver->adjust_perf)
@@ -1027,6 +1046,7 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
free_cpudata2:
freq_qos_remove_request(&cpudata->req[0]);
free_cpudata1:
+ pr_warn("Failed to initialize CPU %d: %d\n", policy->cpu, ret);
kfree(cpudata);
return ret;
}
@@ -1034,6 +1054,10 @@ free_cpudata1:
static void amd_pstate_cpu_exit(struct cpufreq_policy *policy)
{
struct amd_cpudata *cpudata = policy->driver_data;
+ union perf_cached perf = READ_ONCE(cpudata->perf);
+
+ /* Reset CPPC_REQ MSR to the BIOS value */
+ amd_pstate_update_perf(policy, perf.bios_min_perf, 0U, 0U, 0U, false);
freq_qos_remove_request(&cpudata->req[1]);
freq_qos_remove_request(&cpudata->req[0]);
@@ -1041,28 +1065,6 @@ static void amd_pstate_cpu_exit(struct cpufreq_policy *policy)
kfree(cpudata);
}
-static int amd_pstate_cpu_resume(struct cpufreq_policy *policy)
-{
- int ret;
-
- ret = amd_pstate_enable(true);
- if (ret)
- pr_err("failed to enable amd-pstate during resume, return %d\n", ret);
-
- return ret;
-}
-
-static int amd_pstate_cpu_suspend(struct cpufreq_policy *policy)
-{
- int ret;
-
- ret = amd_pstate_enable(false);
- if (ret)
- pr_err("failed to disable amd-pstate during suspend, return %d\n", ret);
-
- return ret;
-}
-
/* Sysfs attributes */
/*
@@ -1073,27 +1075,27 @@ static int amd_pstate_cpu_suspend(struct cpufreq_policy *policy)
static ssize_t show_amd_pstate_max_freq(struct cpufreq_policy *policy,
char *buf)
{
- int max_freq;
- struct amd_cpudata *cpudata = policy->driver_data;
+ struct amd_cpudata *cpudata;
+ union perf_cached perf;
- max_freq = READ_ONCE(cpudata->max_freq);
- if (max_freq < 0)
- return max_freq;
+ cpudata = policy->driver_data;
+ perf = READ_ONCE(cpudata->perf);
- return sysfs_emit(buf, "%u\n", max_freq);
+ return sysfs_emit(buf, "%u\n",
+ perf_to_freq(perf, cpudata->nominal_freq, perf.highest_perf));
}
static ssize_t show_amd_pstate_lowest_nonlinear_freq(struct cpufreq_policy *policy,
char *buf)
{
- int freq;
- struct amd_cpudata *cpudata = policy->driver_data;
+ struct amd_cpudata *cpudata;
+ union perf_cached perf;
- freq = READ_ONCE(cpudata->lowest_nonlinear_freq);
- if (freq < 0)
- return freq;
+ cpudata = policy->driver_data;
+ perf = READ_ONCE(cpudata->perf);
- return sysfs_emit(buf, "%u\n", freq);
+ return sysfs_emit(buf, "%u\n",
+ perf_to_freq(perf, cpudata->nominal_freq, perf.lowest_nonlinear_perf));
}
/*
@@ -1103,18 +1105,17 @@ static ssize_t show_amd_pstate_lowest_nonlinear_freq(struct cpufreq_policy *poli
static ssize_t show_amd_pstate_highest_perf(struct cpufreq_policy *policy,
char *buf)
{
- u32 perf;
- struct amd_cpudata *cpudata = policy->driver_data;
+ struct amd_cpudata *cpudata;
- perf = READ_ONCE(cpudata->highest_perf);
+ cpudata = policy->driver_data;
- return sysfs_emit(buf, "%u\n", perf);
+ return sysfs_emit(buf, "%u\n", cpudata->perf.highest_perf);
}
static ssize_t show_amd_pstate_prefcore_ranking(struct cpufreq_policy *policy,
char *buf)
{
- u32 perf;
+ u8 perf;
struct amd_cpudata *cpudata = policy->driver_data;
perf = READ_ONCE(cpudata->prefcore_ranking);
@@ -1158,6 +1159,7 @@ static ssize_t store_energy_performance_preference(
struct amd_cpudata *cpudata = policy->driver_data;
char str_preference[21];
ssize_t ret;
+ u8 epp;
ret = sscanf(buf, "%20s", str_preference);
if (ret != 1)
@@ -1167,53 +1169,89 @@ static ssize_t store_energy_performance_preference(
if (ret < 0)
return -EINVAL;
- mutex_lock(&amd_pstate_limits_lock);
- ret = amd_pstate_set_energy_pref_index(cpudata, ret);
- mutex_unlock(&amd_pstate_limits_lock);
+ if (!ret)
+ epp = cpudata->epp_default;
+ else
+ epp = epp_values[ret];
+
+ if (epp > 0 && policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
+ pr_debug("EPP cannot be set under performance policy\n");
+ return -EBUSY;
+ }
+
+ ret = amd_pstate_set_epp(policy, epp);
- return ret ?: count;
+ return ret ? ret : count;
}
static ssize_t show_energy_performance_preference(
struct cpufreq_policy *policy, char *buf)
{
struct amd_cpudata *cpudata = policy->driver_data;
- int preference;
+ u8 preference, epp;
+
+ epp = FIELD_GET(AMD_CPPC_EPP_PERF_MASK, cpudata->cppc_req_cached);
- preference = amd_pstate_get_energy_pref_index(cpudata);
- if (preference < 0)
- return preference;
+ switch (epp) {
+ case AMD_CPPC_EPP_PERFORMANCE:
+ preference = EPP_INDEX_PERFORMANCE;
+ break;
+ case AMD_CPPC_EPP_BALANCE_PERFORMANCE:
+ preference = EPP_INDEX_BALANCE_PERFORMANCE;
+ break;
+ case AMD_CPPC_EPP_BALANCE_POWERSAVE:
+ preference = EPP_INDEX_BALANCE_POWERSAVE;
+ break;
+ case AMD_CPPC_EPP_POWERSAVE:
+ preference = EPP_INDEX_POWERSAVE;
+ break;
+ default:
+ return -EINVAL;
+ }
return sysfs_emit(buf, "%s\n", energy_perf_strings[preference]);
}
static void amd_pstate_driver_cleanup(void)
{
- amd_pstate_enable(false);
+ if (amd_pstate_prefcore)
+ sched_clear_itmt_support();
+
cppc_state = AMD_PSTATE_DISABLE;
current_pstate_driver = NULL;
}
+static int amd_pstate_set_driver(int mode_idx)
+{
+ if (mode_idx >= AMD_PSTATE_DISABLE && mode_idx < AMD_PSTATE_MAX) {
+ cppc_state = mode_idx;
+ if (cppc_state == AMD_PSTATE_DISABLE)
+ pr_info("driver is explicitly disabled\n");
+
+ if (cppc_state == AMD_PSTATE_ACTIVE)
+ current_pstate_driver = &amd_pstate_epp_driver;
+
+ if (cppc_state == AMD_PSTATE_PASSIVE || cppc_state == AMD_PSTATE_GUIDED)
+ current_pstate_driver = &amd_pstate_driver;
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
static int amd_pstate_register_driver(int mode)
{
int ret;
- if (mode == AMD_PSTATE_PASSIVE || mode == AMD_PSTATE_GUIDED)
- current_pstate_driver = &amd_pstate_driver;
- else if (mode == AMD_PSTATE_ACTIVE)
- current_pstate_driver = &amd_pstate_epp_driver;
- else
- return -EINVAL;
+ ret = amd_pstate_set_driver(mode);
+ if (ret)
+ return ret;
cppc_state = mode;
- ret = amd_pstate_enable(true);
- if (ret) {
- pr_err("failed to enable cppc during amd-pstate driver registration, return %d\n",
- ret);
- amd_pstate_driver_cleanup();
- return ret;
- }
+ /* at least one CPU supports CPB */
+ current_pstate_driver->boost_enabled = cpu_feature_enabled(X86_FEATURE_CPB);
ret = cpufreq_register_driver(current_pstate_driver);
if (ret) {
@@ -1221,6 +1259,10 @@ static int amd_pstate_register_driver(int mode)
return ret;
}
+ /* Enable ITMT support once all CPUs have initialized their asym priorities. */
+ if (amd_pstate_prefcore)
+ sched_set_itmt_support();
+
return 0;
}
@@ -1297,6 +1339,12 @@ static ssize_t amd_pstate_show_status(char *buf)
return sysfs_emit(buf, "%s\n", amd_pstate_mode_string[cppc_state]);
}
+int amd_pstate_get_status(void)
+{
+ return cppc_state;
+}
+EXPORT_SYMBOL_GPL(amd_pstate_get_status);
+
int amd_pstate_update_status(const char *buf, size_t size)
{
int mode_idx;
@@ -1309,8 +1357,10 @@ int amd_pstate_update_status(const char *buf, size_t size)
if (mode_idx < 0 || mode_idx >= AMD_PSTATE_MAX)
return -EINVAL;
- if (mode_state_machine[cppc_state][mode_idx])
+ if (mode_state_machine[cppc_state][mode_idx]) {
+ guard(mutex)(&amd_pstate_driver_lock);
return mode_state_machine[cppc_state][mode_idx](mode_idx);
+ }
return 0;
}
@@ -1319,13 +1369,10 @@ EXPORT_SYMBOL_GPL(amd_pstate_update_status);
static ssize_t status_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- ssize_t ret;
- mutex_lock(&amd_pstate_driver_lock);
- ret = amd_pstate_show_status(buf);
- mutex_unlock(&amd_pstate_driver_lock);
+ guard(mutex)(&amd_pstate_driver_lock);
- return ret;
+ return amd_pstate_show_status(buf);
}
static ssize_t status_store(struct device *a, struct device_attribute *b,
@@ -1334,9 +1381,7 @@ static ssize_t status_store(struct device *a, struct device_attribute *b,
char *p = memchr(buf, '\n', count);
int ret;
- mutex_lock(&amd_pstate_driver_lock);
ret = amd_pstate_update_status(buf, p ? p - buf : count);
- mutex_unlock(&amd_pstate_driver_lock);
return ret < 0 ? ret : count;
}
@@ -1411,10 +1456,10 @@ static bool amd_pstate_acpi_pm_profile_undefined(void)
static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
{
- int min_freq, max_freq, ret;
struct amd_cpudata *cpudata;
+ union perf_cached perf;
struct device *dev;
- u64 value;
+ int ret;
/*
* Resetting PERF_CTL_MSR will put the CPU in P0 frequency,
@@ -1430,7 +1475,6 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
return -ENOMEM;
cpudata->cpu = policy->cpu;
- cpudata->epp_policy = 0;
ret = amd_pstate_init_perf(cpudata);
if (ret)
@@ -1446,48 +1490,49 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
if (ret)
goto free_cpudata1;
- min_freq = READ_ONCE(cpudata->min_freq);
- max_freq = READ_ONCE(cpudata->max_freq);
-
- policy->cpuinfo.min_freq = min_freq;
- policy->cpuinfo.max_freq = max_freq;
- /* It will be updated by governor */
- policy->cur = policy->cpuinfo.min_freq;
+ perf = READ_ONCE(cpudata->perf);
+ policy->cpuinfo.min_freq = policy->min = perf_to_freq(perf,
+ cpudata->nominal_freq,
+ perf.lowest_perf);
+ policy->cpuinfo.max_freq = policy->max = perf_to_freq(perf,
+ cpudata->nominal_freq,
+ perf.highest_perf);
policy->driver_data = cpudata;
- cpudata->epp_cached = cpudata->epp_default = amd_pstate_get_epp(cpudata, 0);
+ ret = amd_pstate_cppc_enable(policy);
+ if (ret)
+ goto free_cpudata1;
- policy->min = policy->cpuinfo.min_freq;
- policy->max = policy->cpuinfo.max_freq;
+ /* It will be updated by governor */
+ policy->cur = policy->cpuinfo.min_freq;
- policy->boost_enabled = READ_ONCE(cpudata->boost_supported);
+
+ policy->boost_supported = READ_ONCE(cpudata->boost_supported);
/*
* Set the policy to provide a valid fallback value in case
* the default cpufreq governor is neither powersave nor performance.
*/
if (amd_pstate_acpi_pm_profile_server() ||
- amd_pstate_acpi_pm_profile_undefined())
+ amd_pstate_acpi_pm_profile_undefined()) {
policy->policy = CPUFREQ_POLICY_PERFORMANCE;
- else
+ cpudata->epp_default = amd_pstate_get_epp(cpudata);
+ } else {
policy->policy = CPUFREQ_POLICY_POWERSAVE;
+ cpudata->epp_default = AMD_CPPC_EPP_BALANCE_PERFORMANCE;
+ }
- if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
- ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value);
- if (ret)
- return ret;
- WRITE_ONCE(cpudata->cppc_req_cached, value);
+ ret = amd_pstate_set_epp(policy, cpudata->epp_default);
+ if (ret)
+ return ret;
- ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1, &value);
- if (ret)
- return ret;
- WRITE_ONCE(cpudata->cppc_cap1_cached, value);
- }
+ current_pstate_driver->adjust_perf = NULL;
return 0;
free_cpudata1:
+ pr_warn("Failed to initialize CPU %d: %d\n", policy->cpu, ret);
kfree(cpudata);
return ret;
}
@@ -1497,6 +1542,11 @@ static void amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy)
struct amd_cpudata *cpudata = policy->driver_data;
if (cpudata) {
+ union perf_cached perf = READ_ONCE(cpudata->perf);
+
+ /* Reset CPPC_REQ MSR to the BIOS value */
+ amd_pstate_update_perf(policy, perf.bios_min_perf, 0U, 0U, 0U, false);
+
kfree(cpudata);
policy->driver_data = NULL;
}
@@ -1504,73 +1554,26 @@ static void amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy)
pr_debug("CPU %d exiting\n", policy->cpu);
}
-static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
+static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy, bool policy_change)
{
struct amd_cpudata *cpudata = policy->driver_data;
- u32 max_perf, min_perf, min_limit_perf, max_limit_perf;
- u64 value;
- s16 epp;
-
- if (cpudata->boost_supported && !policy->boost_enabled)
- max_perf = READ_ONCE(cpudata->nominal_perf);
- else
- max_perf = READ_ONCE(cpudata->highest_perf);
- min_perf = READ_ONCE(cpudata->lowest_perf);
- max_limit_perf = div_u64(policy->max * max_perf, policy->cpuinfo.max_freq);
- min_limit_perf = div_u64(policy->min * max_perf, policy->cpuinfo.max_freq);
-
- if (min_limit_perf < min_perf)
- min_limit_perf = min_perf;
-
- if (max_limit_perf < min_limit_perf)
- max_limit_perf = min_limit_perf;
-
- WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf);
- WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf);
-
- max_perf = clamp_t(unsigned long, max_perf, cpudata->min_limit_perf,
- cpudata->max_limit_perf);
- min_perf = clamp_t(unsigned long, min_perf, cpudata->min_limit_perf,
- cpudata->max_limit_perf);
- value = READ_ONCE(cpudata->cppc_req_cached);
-
- if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
- min_perf = max_perf;
-
- /* Initial min/max values for CPPC Performance Controls Register */
- value &= ~AMD_CPPC_MIN_PERF(~0L);
- value |= AMD_CPPC_MIN_PERF(min_perf);
-
- value &= ~AMD_CPPC_MAX_PERF(~0L);
- value |= AMD_CPPC_MAX_PERF(max_perf);
+ union perf_cached perf;
+ u8 epp;
- /* CPPC EPP feature require to set zero to the desire perf bit */
- value &= ~AMD_CPPC_DES_PERF(~0L);
- value |= AMD_CPPC_DES_PERF(0);
-
- cpudata->epp_policy = cpudata->policy;
-
- /* Get BIOS pre-defined epp value */
- epp = amd_pstate_get_epp(cpudata, value);
- if (epp < 0) {
- /**
- * This return value can only be negative for shared_memory
- * systems where EPP register read/write not supported.
- */
- return epp;
- }
+ if (policy_change ||
+ policy->min != cpudata->min_limit_freq ||
+ policy->max != cpudata->max_limit_freq)
+ amd_pstate_update_min_max_limit(policy);
if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
epp = 0;
+ else
+ epp = FIELD_GET(AMD_CPPC_EPP_PERF_MASK, cpudata->cppc_req_cached);
- /* Set initial EPP value */
- if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
- value &= ~GENMASK_ULL(31, 24);
- value |= (u64)epp << 24;
- }
+ perf = READ_ONCE(cpudata->perf);
- WRITE_ONCE(cpudata->cppc_req_cached, value);
- return amd_pstate_set_epp(cpudata, epp);
+ return amd_pstate_update_perf(policy, perf.min_limit_perf, 0U,
+ perf.max_limit_perf, epp, false);
}
static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
@@ -1581,12 +1584,9 @@ static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
if (!policy->cpuinfo.max_freq)
return -ENODEV;
- pr_debug("set_policy: cpuinfo.max %u policy->max %u\n",
- policy->cpuinfo.max_freq, policy->max);
-
cpudata->policy = policy->policy;
- ret = amd_pstate_epp_update_limit(policy);
+ ret = amd_pstate_epp_update_limit(policy, true);
if (ret)
return ret;
@@ -1599,111 +1599,58 @@ static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
return 0;
}
-static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata)
-{
- struct cppc_perf_ctrls perf_ctrls;
- u64 value, max_perf;
- int ret;
-
- ret = amd_pstate_enable(true);
- if (ret)
- pr_err("failed to enable amd pstate during resume, return %d\n", ret);
-
- value = READ_ONCE(cpudata->cppc_req_cached);
- max_perf = READ_ONCE(cpudata->highest_perf);
-
- if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
- wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
- } else {
- perf_ctrls.max_perf = max_perf;
- perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(cpudata->epp_cached);
- cppc_set_perf(cpudata->cpu, &perf_ctrls);
- }
-}
-
-static int amd_pstate_epp_cpu_online(struct cpufreq_policy *policy)
+static int amd_pstate_cpu_online(struct cpufreq_policy *policy)
{
- struct amd_cpudata *cpudata = policy->driver_data;
-
- pr_debug("AMD CPU Core %d going online\n", cpudata->cpu);
-
- if (cppc_state == AMD_PSTATE_ACTIVE) {
- amd_pstate_epp_reenable(cpudata);
- cpudata->suspended = false;
- }
-
- return 0;
+ return amd_pstate_cppc_enable(policy);
}
-static void amd_pstate_epp_offline(struct cpufreq_policy *policy)
+static int amd_pstate_cpu_offline(struct cpufreq_policy *policy)
{
struct amd_cpudata *cpudata = policy->driver_data;
- struct cppc_perf_ctrls perf_ctrls;
- int min_perf;
- u64 value;
-
- min_perf = READ_ONCE(cpudata->lowest_perf);
- value = READ_ONCE(cpudata->cppc_req_cached);
+ union perf_cached perf = READ_ONCE(cpudata->perf);
- mutex_lock(&amd_pstate_limits_lock);
- if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
- cpudata->epp_policy = CPUFREQ_POLICY_UNKNOWN;
-
- /* Set max perf same as min perf */
- value &= ~AMD_CPPC_MAX_PERF(~0L);
- value |= AMD_CPPC_MAX_PERF(min_perf);
- value &= ~AMD_CPPC_MIN_PERF(~0L);
- value |= AMD_CPPC_MIN_PERF(min_perf);
- wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
- } else {
- perf_ctrls.desired_perf = 0;
- perf_ctrls.max_perf = min_perf;
- perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(HWP_EPP_BALANCE_POWERSAVE);
- cppc_set_perf(cpudata->cpu, &perf_ctrls);
- }
- mutex_unlock(&amd_pstate_limits_lock);
+ /*
+ * Reset CPPC_REQ MSR to the BIOS value, this will allow us to retain the BIOS specified
+ * min_perf value across kexec reboots. If this CPU is just onlined normally after this, the
+ * limits, epp and desired perf will get reset to the cached values in cpudata struct
+ */
+ return amd_pstate_update_perf(policy, perf.bios_min_perf, 0U, 0U, 0U, false);
}
-static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy)
+static int amd_pstate_suspend(struct cpufreq_policy *policy)
{
struct amd_cpudata *cpudata = policy->driver_data;
+ union perf_cached perf = READ_ONCE(cpudata->perf);
+ int ret;
- pr_debug("AMD CPU Core %d going offline\n", cpudata->cpu);
-
- if (cpudata->suspended)
- return 0;
-
- if (cppc_state == AMD_PSTATE_ACTIVE)
- amd_pstate_epp_offline(policy);
+ /*
+ * Reset CPPC_REQ MSR to the BIOS value, this will allow us to retain the BIOS specified
+ * min_perf value across kexec reboots. If this CPU is just resumed back without kexec,
+ * the limits, epp and desired perf will get reset to the cached values in cpudata struct
+ */
+ ret = amd_pstate_update_perf(policy, perf.bios_min_perf,
+ FIELD_GET(AMD_CPPC_DES_PERF_MASK, cpudata->cppc_req_cached),
+ FIELD_GET(AMD_CPPC_MAX_PERF_MASK, cpudata->cppc_req_cached),
+ FIELD_GET(AMD_CPPC_EPP_PERF_MASK, cpudata->cppc_req_cached),
+ false);
+ if (ret)
+ return ret;
- return 0;
-}
+ /* set this flag to avoid setting core offline*/
+ cpudata->suspended = true;
-static int amd_pstate_epp_verify_policy(struct cpufreq_policy_data *policy)
-{
- cpufreq_verify_within_cpu_limits(policy);
- pr_debug("policy_max =%d, policy_min=%d\n", policy->max, policy->min);
return 0;
}
-static int amd_pstate_epp_suspend(struct cpufreq_policy *policy)
+static int amd_pstate_resume(struct cpufreq_policy *policy)
{
struct amd_cpudata *cpudata = policy->driver_data;
- int ret;
-
- /* avoid suspending when EPP is not enabled */
- if (cppc_state != AMD_PSTATE_ACTIVE)
- return 0;
-
- /* set this flag to avoid setting core offline*/
- cpudata->suspended = true;
+ union perf_cached perf = READ_ONCE(cpudata->perf);
+ int cur_perf = freq_to_perf(perf, cpudata->nominal_freq, policy->cur);
- /* disable CPPC in lowlevel firmware */
- ret = amd_pstate_enable(false);
- if (ret)
- pr_err("failed to suspend, return %d\n", ret);
-
- return 0;
+ /* Set CPPC_REQ to last sane value until the governor updates it */
+ return amd_pstate_update_perf(policy, perf.min_limit_perf, cur_perf, perf.max_limit_perf,
+ 0U, false);
}
static int amd_pstate_epp_resume(struct cpufreq_policy *policy)
@@ -1711,12 +1658,12 @@ static int amd_pstate_epp_resume(struct cpufreq_policy *policy)
struct amd_cpudata *cpudata = policy->driver_data;
if (cpudata->suspended) {
- mutex_lock(&amd_pstate_limits_lock);
+ int ret;
/* enable amd pstate from suspend state*/
- amd_pstate_epp_reenable(cpudata);
-
- mutex_unlock(&amd_pstate_limits_lock);
+ ret = amd_pstate_epp_update_limit(policy, false);
+ if (ret)
+ return ret;
cpudata->suspended = false;
}
@@ -1731,8 +1678,10 @@ static struct cpufreq_driver amd_pstate_driver = {
.fast_switch = amd_pstate_fast_switch,
.init = amd_pstate_cpu_init,
.exit = amd_pstate_cpu_exit,
- .suspend = amd_pstate_cpu_suspend,
- .resume = amd_pstate_cpu_resume,
+ .online = amd_pstate_cpu_online,
+ .offline = amd_pstate_cpu_offline,
+ .suspend = amd_pstate_suspend,
+ .resume = amd_pstate_resume,
.set_boost = amd_pstate_set_boost,
.update_limits = amd_pstate_update_limits,
.name = "amd-pstate",
@@ -1741,13 +1690,13 @@ static struct cpufreq_driver amd_pstate_driver = {
static struct cpufreq_driver amd_pstate_epp_driver = {
.flags = CPUFREQ_CONST_LOOPS,
- .verify = amd_pstate_epp_verify_policy,
+ .verify = amd_pstate_verify,
.setpolicy = amd_pstate_epp_set_policy,
.init = amd_pstate_epp_cpu_init,
.exit = amd_pstate_epp_cpu_exit,
- .offline = amd_pstate_epp_cpu_offline,
- .online = amd_pstate_epp_cpu_online,
- .suspend = amd_pstate_epp_suspend,
+ .offline = amd_pstate_cpu_offline,
+ .online = amd_pstate_cpu_online,
+ .suspend = amd_pstate_suspend,
.resume = amd_pstate_epp_resume,
.update_limits = amd_pstate_update_limits,
.set_boost = amd_pstate_set_boost,
@@ -1755,26 +1704,7 @@ static struct cpufreq_driver amd_pstate_epp_driver = {
.attr = amd_pstate_epp_attr,
};
-static int __init amd_pstate_set_driver(int mode_idx)
-{
- if (mode_idx >= AMD_PSTATE_DISABLE && mode_idx < AMD_PSTATE_MAX) {
- cppc_state = mode_idx;
- if (cppc_state == AMD_PSTATE_DISABLE)
- pr_info("driver is explicitly disabled\n");
-
- if (cppc_state == AMD_PSTATE_ACTIVE)
- current_pstate_driver = &amd_pstate_epp_driver;
-
- if (cppc_state == AMD_PSTATE_PASSIVE || cppc_state == AMD_PSTATE_GUIDED)
- current_pstate_driver = &amd_pstate_driver;
-
- return 0;
- }
-
- return -EINVAL;
-}
-
-/**
+/*
* CPPC function is not supported for family ID 17H with model_ID ranging from 0x10 to 0x2F.
* show the debug message that helps to check if the CPU has CPPC support for loading issue.
*/
@@ -1864,10 +1794,10 @@ static int __init amd_pstate_init(void)
if (cppc_state == AMD_PSTATE_UNDEFINED) {
/* Disable on the following configs by default:
* 1. Undefined platforms
- * 2. Server platforms
+ * 2. Server platforms with CPUs older than Family 0x1A.
*/
if (amd_pstate_acpi_pm_profile_undefined() ||
- amd_pstate_acpi_pm_profile_server()) {
+ (amd_pstate_acpi_pm_profile_server() && boot_cpu_data.x86 < 0x1A)) {
pr_info("driver load is disabled, boot with specific mode to enable this\n");
return -ENODEV;
}
@@ -1875,31 +1805,21 @@ static int __init amd_pstate_init(void)
cppc_state = CONFIG_X86_AMD_PSTATE_DEFAULT_MODE;
}
- switch (cppc_state) {
- case AMD_PSTATE_DISABLE:
+ if (cppc_state == AMD_PSTATE_DISABLE) {
pr_info("driver load is disabled, boot with specific mode to enable this\n");
return -ENODEV;
- case AMD_PSTATE_PASSIVE:
- case AMD_PSTATE_ACTIVE:
- case AMD_PSTATE_GUIDED:
- ret = amd_pstate_set_driver(cppc_state);
- if (ret)
- return ret;
- break;
- default:
- return -EINVAL;
}
/* capability check */
if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
pr_debug("AMD CPPC MSR based functionality is supported\n");
- if (cppc_state != AMD_PSTATE_ACTIVE)
- current_pstate_driver->adjust_perf = amd_pstate_adjust_perf;
} else {
pr_debug("AMD CPPC shared memory based functionality is supported\n");
- static_call_update(amd_pstate_enable, cppc_enable);
- static_call_update(amd_pstate_init_perf, cppc_init_perf);
- static_call_update(amd_pstate_update_perf, cppc_update_perf);
+ static_call_update(amd_pstate_cppc_enable, shmem_cppc_enable);
+ static_call_update(amd_pstate_init_perf, shmem_init_perf);
+ static_call_update(amd_pstate_update_perf, shmem_update_perf);
+ static_call_update(amd_pstate_get_epp, shmem_get_epp);
+ static_call_update(amd_pstate_set_epp, shmem_set_epp);
}
if (amd_pstate_prefcore) {
@@ -1908,17 +1828,10 @@ static int __init amd_pstate_init(void)
return ret;
}
- /* enable amd pstate feature */
- ret = amd_pstate_enable(true);
- if (ret) {
- pr_err("failed to enable driver mode(%d)\n", cppc_state);
- return ret;
- }
-
- ret = cpufreq_register_driver(current_pstate_driver);
+ ret = amd_pstate_register_driver(cppc_state);
if (ret) {
pr_err("failed to register with return %d\n", ret);
- goto disable_driver;
+ return ret;
}
dev_root = bus_get_dev_root(&cpu_subsys);
@@ -1935,8 +1848,6 @@ static int __init amd_pstate_init(void)
global_attr_free:
cpufreq_unregister_driver(current_pstate_driver);
-disable_driver:
- amd_pstate_enable(false);
return ret;
}
device_initcall(amd_pstate_init);
diff --git a/drivers/cpufreq/amd-pstate.h b/drivers/cpufreq/amd-pstate.h
index cd573bc6b6db..cb45fdca27a6 100644
--- a/drivers/cpufreq/amd-pstate.h
+++ b/drivers/cpufreq/amd-pstate.h
@@ -13,6 +13,38 @@
/*********************************************************************
* AMD P-state INTERFACE *
*********************************************************************/
+
+/**
+ * union perf_cached - A union to cache performance-related data.
+ * @highest_perf: the maximum performance an individual processor may reach,
+ * assuming ideal conditions
+ * For platforms that support the preferred core feature, the highest_perf value maybe
+ * configured to any value in the range 166-255 by the firmware (because the preferred
+ * core ranking is encoded in the highest_perf value). To maintain consistency across
+ * all platforms, we split the highest_perf and preferred core ranking values into
+ * cpudata->perf.highest_perf and cpudata->prefcore_ranking.
+ * @nominal_perf: the maximum sustained performance level of the processor,
+ * assuming ideal operating conditions
+ * @lowest_nonlinear_perf: the lowest performance level at which nonlinear power
+ * savings are achieved
+ * @lowest_perf: the absolute lowest performance level of the processor
+ * @min_limit_perf: Cached value of the performance corresponding to policy->min
+ * @max_limit_perf: Cached value of the performance corresponding to policy->max
+ * @bios_min_perf: Cached perf value corresponding to the "Requested CPU Min Frequency" BIOS option
+ */
+union perf_cached {
+ struct {
+ u8 highest_perf;
+ u8 nominal_perf;
+ u8 lowest_nonlinear_perf;
+ u8 lowest_perf;
+ u8 min_limit_perf;
+ u8 max_limit_perf;
+ u8 bios_min_perf;
+ };
+ u64 val;
+};
+
/**
* struct amd_aperf_mperf
* @aperf: actual performance frequency clock count
@@ -30,24 +62,11 @@ struct amd_aperf_mperf {
* @cpu: CPU number
* @req: constraint request to apply
* @cppc_req_cached: cached performance request hints
- * @highest_perf: the maximum performance an individual processor may reach,
- * assuming ideal conditions
- * For platforms that do not support the preferred core feature, the
- * highest_pef may be configured with 166 or 255, to avoid max frequency
- * calculated wrongly. we take the fixed value as the highest_perf.
- * @nominal_perf: the maximum sustained performance level of the processor,
- * assuming ideal operating conditions
- * @lowest_nonlinear_perf: the lowest performance level at which nonlinear power
- * savings are achieved
- * @lowest_perf: the absolute lowest performance level of the processor
+ * @perf: cached performance-related data
* @prefcore_ranking: the preferred core ranking, the higher value indicates a higher
* priority.
- * @min_limit_perf: Cached value of the performance corresponding to policy->min
- * @max_limit_perf: Cached value of the performance corresponding to policy->max
* @min_limit_freq: Cached value of policy->min (in khz)
* @max_limit_freq: Cached value of policy->max (in khz)
- * @max_freq: the frequency (in khz) that mapped to highest_perf
- * @min_freq: the frequency (in khz) that mapped to lowest_perf
* @nominal_freq: the frequency (in khz) that mapped to nominal_perf
* @lowest_nonlinear_freq: the frequency (in khz) that mapped to lowest_nonlinear_perf
* @cur: Difference of Aperf/Mperf/tsc count between last and current sample
@@ -57,10 +76,8 @@ struct amd_aperf_mperf {
* @hw_prefcore: check whether HW supports preferred core featue.
* Only when hw_prefcore and early prefcore param are true,
* AMD P-State driver supports preferred core featue.
- * @epp_policy: Last saved policy used to set energy-performance preference
* @epp_cached: Cached CPPC energy-performance preference value
* @policy: Cpufreq policy value
- * @cppc_cap1_cached Cached MSR_AMD_CPPC_CAP1 register value
*
* The amd_cpudata is key private data for each CPU thread in AMD P-State, and
* represents all the attributes and goals that AMD P-State requests at runtime.
@@ -71,18 +88,11 @@ struct amd_cpudata {
struct freq_qos_request req[2];
u64 cppc_req_cached;
- u32 highest_perf;
- u32 nominal_perf;
- u32 lowest_nonlinear_perf;
- u32 lowest_perf;
- u32 prefcore_ranking;
- u32 min_limit_perf;
- u32 max_limit_perf;
- u32 min_limit_freq;
- u32 max_limit_freq;
+ union perf_cached perf;
- u32 max_freq;
- u32 min_freq;
+ u8 prefcore_ranking;
+ u32 min_limit_freq;
+ u32 max_limit_freq;
u32 nominal_freq;
u32 lowest_nonlinear_freq;
@@ -94,13 +104,9 @@ struct amd_cpudata {
bool hw_prefcore;
/* EPP feature related attributes*/
- s16 epp_policy;
- s16 epp_cached;
u32 policy;
- u64 cppc_cap1_cached;
bool suspended;
- s16 epp_default;
- bool boost_state;
+ u8 epp_default;
};
/*
@@ -115,6 +121,7 @@ enum amd_pstate_mode {
AMD_PSTATE_MAX,
};
const char *amd_pstate_get_mode_string(enum amd_pstate_mode mode);
+int amd_pstate_get_status(void);
int amd_pstate_update_status(const char *buf, size_t size);
#endif /* _LINUX_AMD_PSTATE_H */
diff --git a/drivers/cpufreq/amd_freq_sensitivity.c b/drivers/cpufreq/amd_freq_sensitivity.c
index 59b19b9975e8..13fed4b9e02b 100644
--- a/drivers/cpufreq/amd_freq_sensitivity.c
+++ b/drivers/cpufreq/amd_freq_sensitivity.c
@@ -129,7 +129,7 @@ static int __init amd_freq_sensitivity_init(void)
pci_dev_put(pcidev);
}
- if (rdmsrl_safe(MSR_AMD64_FREQ_SENSITIVITY_ACTUAL, &val))
+ if (rdmsrq_safe(MSR_AMD64_FREQ_SENSITIVITY_ACTUAL, &val))
return -ENODEV;
if (!(val >> CLASS_CODE_SHIFT))
diff --git a/drivers/cpufreq/apple-soc-cpufreq.c b/drivers/cpufreq/apple-soc-cpufreq.c
index 4dcacab9b4bf..b1d29b7af232 100644
--- a/drivers/cpufreq/apple-soc-cpufreq.c
+++ b/drivers/cpufreq/apple-soc-cpufreq.c
@@ -22,11 +22,14 @@
#include <linux/pm_opp.h>
#include <linux/slab.h>
-#define APPLE_DVFS_CMD 0x20
-#define APPLE_DVFS_CMD_BUSY BIT(31)
-#define APPLE_DVFS_CMD_SET BIT(25)
-#define APPLE_DVFS_CMD_PS2 GENMASK(16, 12)
-#define APPLE_DVFS_CMD_PS1 GENMASK(4, 0)
+#define APPLE_DVFS_CMD 0x20
+#define APPLE_DVFS_CMD_BUSY BIT(31)
+#define APPLE_DVFS_CMD_SET BIT(25)
+#define APPLE_DVFS_CMD_PS1_S5L8960X GENMASK(24, 22)
+#define APPLE_DVFS_CMD_PS1_S5L8960X_SHIFT 22
+#define APPLE_DVFS_CMD_PS2 GENMASK(15, 12)
+#define APPLE_DVFS_CMD_PS1 GENMASK(4, 0)
+#define APPLE_DVFS_CMD_PS1_SHIFT 0
/* Same timebase as CPU counter (24MHz) */
#define APPLE_DVFS_LAST_CHG_TIME 0x38
@@ -35,6 +38,9 @@
* Apple ran out of bits and had to shift this in T8112...
*/
#define APPLE_DVFS_STATUS 0x50
+#define APPLE_DVFS_STATUS_CUR_PS_S5L8960X GENMASK(5, 3)
+#define APPLE_DVFS_STATUS_CUR_PS_SHIFT_S5L8960X 3
+#define APPLE_DVFS_STATUS_TGT_PS_S5L8960X GENMASK(2, 0)
#define APPLE_DVFS_STATUS_CUR_PS_T8103 GENMASK(7, 4)
#define APPLE_DVFS_STATUS_CUR_PS_SHIFT_T8103 4
#define APPLE_DVFS_STATUS_TGT_PS_T8103 GENMASK(3, 0)
@@ -52,12 +58,15 @@
#define APPLE_DVFS_PLL_FACTOR_MULT GENMASK(31, 16)
#define APPLE_DVFS_PLL_FACTOR_DIV GENMASK(15, 0)
-#define APPLE_DVFS_TRANSITION_TIMEOUT 100
+#define APPLE_DVFS_TRANSITION_TIMEOUT 400
struct apple_soc_cpufreq_info {
+ bool has_ps2;
u64 max_pstate;
u64 cur_pstate_mask;
u64 cur_pstate_shift;
+ u64 ps1_mask;
+ u64 ps1_shift;
};
struct apple_cpu_priv {
@@ -68,25 +77,47 @@ struct apple_cpu_priv {
static struct cpufreq_driver apple_soc_cpufreq_driver;
+static const struct apple_soc_cpufreq_info soc_s5l8960x_info = {
+ .has_ps2 = false,
+ .max_pstate = 7,
+ .cur_pstate_mask = APPLE_DVFS_STATUS_CUR_PS_S5L8960X,
+ .cur_pstate_shift = APPLE_DVFS_STATUS_CUR_PS_SHIFT_S5L8960X,
+ .ps1_mask = APPLE_DVFS_CMD_PS1_S5L8960X,
+ .ps1_shift = APPLE_DVFS_CMD_PS1_S5L8960X_SHIFT,
+};
+
static const struct apple_soc_cpufreq_info soc_t8103_info = {
+ .has_ps2 = true,
.max_pstate = 15,
.cur_pstate_mask = APPLE_DVFS_STATUS_CUR_PS_T8103,
.cur_pstate_shift = APPLE_DVFS_STATUS_CUR_PS_SHIFT_T8103,
+ .ps1_mask = APPLE_DVFS_CMD_PS1,
+ .ps1_shift = APPLE_DVFS_CMD_PS1_SHIFT,
};
static const struct apple_soc_cpufreq_info soc_t8112_info = {
+ .has_ps2 = false,
.max_pstate = 31,
.cur_pstate_mask = APPLE_DVFS_STATUS_CUR_PS_T8112,
.cur_pstate_shift = APPLE_DVFS_STATUS_CUR_PS_SHIFT_T8112,
+ .ps1_mask = APPLE_DVFS_CMD_PS1,
+ .ps1_shift = APPLE_DVFS_CMD_PS1_SHIFT,
};
static const struct apple_soc_cpufreq_info soc_default_info = {
+ .has_ps2 = false,
.max_pstate = 15,
.cur_pstate_mask = 0, /* fallback */
+ .ps1_mask = APPLE_DVFS_CMD_PS1,
+ .ps1_shift = APPLE_DVFS_CMD_PS1_SHIFT,
};
static const struct of_device_id apple_soc_cpufreq_of_match[] __maybe_unused = {
{
+ .compatible = "apple,s5l8960x-cluster-cpufreq",
+ .data = &soc_s5l8960x_info,
+ },
+ {
.compatible = "apple,t8103-cluster-cpufreq",
.data = &soc_t8103_info,
},
@@ -103,13 +134,19 @@ static const struct of_device_id apple_soc_cpufreq_of_match[] __maybe_unused = {
static unsigned int apple_soc_cpufreq_get_rate(unsigned int cpu)
{
- struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
- struct apple_cpu_priv *priv = policy->driver_data;
+ struct cpufreq_policy *policy;
+ struct apple_cpu_priv *priv;
struct cpufreq_frequency_table *p;
unsigned int pstate;
+ policy = cpufreq_cpu_get_raw(cpu);
+ if (unlikely(!policy))
+ return 0;
+
+ priv = policy->driver_data;
+
if (priv->info->cur_pstate_mask) {
- u64 reg = readq_relaxed(priv->reg_base + APPLE_DVFS_STATUS);
+ u32 reg = readl_relaxed(priv->reg_base + APPLE_DVFS_STATUS);
pstate = (reg & priv->info->cur_pstate_mask) >> priv->info->cur_pstate_shift;
} else {
@@ -148,9 +185,12 @@ static int apple_soc_cpufreq_set_target(struct cpufreq_policy *policy,
return -EIO;
}
- reg &= ~(APPLE_DVFS_CMD_PS1 | APPLE_DVFS_CMD_PS2);
- reg |= FIELD_PREP(APPLE_DVFS_CMD_PS1, pstate);
- reg |= FIELD_PREP(APPLE_DVFS_CMD_PS2, pstate);
+ reg &= ~priv->info->ps1_mask;
+ reg |= pstate << priv->info->ps1_shift;
+ if (priv->info->has_ps2) {
+ reg &= ~APPLE_DVFS_CMD_PS2;
+ reg |= FIELD_PREP(APPLE_DVFS_CMD_PS2, pstate);
+ }
reg |= APPLE_DVFS_CMD_SET;
writeq_relaxed(reg, priv->reg_base + APPLE_DVFS_CMD);
@@ -195,12 +235,6 @@ static int apple_soc_cpufreq_find_cluster(struct cpufreq_policy *policy,
return 0;
}
-static struct freq_attr *apple_soc_cpufreq_hw_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL, /* Filled in below if boost is enabled */
- NULL,
-};
-
static int apple_soc_cpufreq_init(struct cpufreq_policy *policy)
{
int ret, i;
@@ -275,23 +309,13 @@ static int apple_soc_cpufreq_init(struct cpufreq_policy *policy)
transition_latency = dev_pm_opp_get_max_transition_latency(cpu_dev);
if (!transition_latency)
- transition_latency = CPUFREQ_ETERNAL;
+ transition_latency = APPLE_DVFS_TRANSITION_TIMEOUT * NSEC_PER_USEC;
policy->cpuinfo.transition_latency = transition_latency;
policy->dvfs_possible_from_any_cpu = true;
policy->fast_switch_possible = true;
policy->suspend_freq = freq_table[0].frequency;
- if (policy_has_boost_freq(policy)) {
- ret = cpufreq_enable_boost_support();
- if (ret) {
- dev_warn(cpu_dev, "failed to enable boost: %d\n", ret);
- } else {
- apple_soc_cpufreq_hw_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs;
- apple_soc_cpufreq_driver.boost_enabled = true;
- }
- }
-
return 0;
out_free_cpufreq_table:
@@ -326,7 +350,7 @@ static struct cpufreq_driver apple_soc_cpufreq_driver = {
.target_index = apple_soc_cpufreq_set_target,
.fast_switch = apple_soc_cpufreq_fast_switch,
.register_em = cpufreq_register_em_with_opp,
- .attr = apple_soc_cpufreq_hw_attr,
+ .set_boost = cpufreq_boost_set_sw,
.suspend = cpufreq_generic_suspend,
};
diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c
index bea41ccabf1f..0efe403a5980 100644
--- a/drivers/cpufreq/armada-37xx-cpufreq.c
+++ b/drivers/cpufreq/armada-37xx-cpufreq.c
@@ -102,11 +102,7 @@ struct armada_37xx_dvfs {
};
static struct armada_37xx_dvfs armada_37xx_dvfs[] = {
- /*
- * The cpufreq scaling for 1.2 GHz variant of the SOC is currently
- * unstable because we do not know how to configure it properly.
- */
- /* {.cpu_freq_max = 1200*1000*1000, .divider = {1, 2, 4, 6} }, */
+ {.cpu_freq_max = 1200*1000*1000, .divider = {1, 2, 4, 6} },
{.cpu_freq_max = 1000*1000*1000, .divider = {1, 2, 4, 5} },
{.cpu_freq_max = 800*1000*1000, .divider = {1, 2, 3, 4} },
{.cpu_freq_max = 600*1000*1000, .divider = {2, 4, 5, 6} },
@@ -269,7 +265,7 @@ static void __init armada37xx_cpufreq_avs_configure(struct regmap *base,
*/
target_vm = avs_map[l0_vdd_min] - 100;
- target_vm = target_vm > MIN_VOLT_MV ? target_vm : MIN_VOLT_MV;
+ target_vm = max(target_vm, MIN_VOLT_MV);
dvfs->avs[1] = armada_37xx_avs_val_match(target_vm);
/*
@@ -277,7 +273,7 @@ static void __init armada37xx_cpufreq_avs_configure(struct regmap *base,
* be larger than 1000mv
*/
target_vm = avs_map[l0_vdd_min] - 150;
- target_vm = target_vm > MIN_VOLT_MV ? target_vm : MIN_VOLT_MV;
+ target_vm = max(target_vm, MIN_VOLT_MV);
dvfs->avs[2] = dvfs->avs[3] = armada_37xx_avs_val_match(target_vm);
/*
diff --git a/drivers/cpufreq/armada-8k-cpufreq.c b/drivers/cpufreq/armada-8k-cpufreq.c
index 7a979db81f09..d96c1718f7f8 100644
--- a/drivers/cpufreq/armada-8k-cpufreq.c
+++ b/drivers/cpufreq/armada-8k-cpufreq.c
@@ -47,7 +47,7 @@ static void __init armada_8k_get_sharing_cpus(struct clk *cur_clk,
{
int cpu;
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
struct device *cpu_dev;
struct clk *clk;
@@ -103,7 +103,7 @@ static void armada_8k_cpufreq_free_table(struct freq_table *freq_tables)
{
int opps_index, nb_cpus = num_possible_cpus();
- for (opps_index = 0 ; opps_index <= nb_cpus; opps_index++) {
+ for (opps_index = 0 ; opps_index < nb_cpus; opps_index++) {
int i;
/* If cpu_dev is NULL then we reached the end of the array */
@@ -132,7 +132,7 @@ static int __init armada_8k_cpufreq_init(void)
int ret = 0, opps_index = 0, cpu, nb_cpus;
struct freq_table *freq_tables;
struct device_node *node;
- static struct cpumask cpus;
+ static struct cpumask cpus, shared_cpus;
node = of_find_matching_node_and_match(NULL, armada_8k_cpufreq_of_match,
NULL);
@@ -154,7 +154,6 @@ static int __init armada_8k_cpufreq_init(void)
* divisions of it).
*/
for_each_cpu(cpu, &cpus) {
- struct cpumask shared_cpus;
struct device *cpu_dev;
struct clk *clk;
diff --git a/drivers/cpufreq/bmips-cpufreq.c b/drivers/cpufreq/bmips-cpufreq.c
index 17a4c174553d..36051880640b 100644
--- a/drivers/cpufreq/bmips-cpufreq.c
+++ b/drivers/cpufreq/bmips-cpufreq.c
@@ -150,7 +150,6 @@ static struct cpufreq_driver bmips_cpufreq_driver = {
.get = bmips_cpufreq_get,
.init = bmips_cpufreq_init,
.exit = bmips_cpufreq_exit,
- .attr = cpufreq_generic_attr,
.name = BMIPS_CPUFREQ_PREFIX,
};
diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c
index ea8438550b49..71450cca8e9f 100644
--- a/drivers/cpufreq/brcmstb-avs-cpufreq.c
+++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c
@@ -474,13 +474,13 @@ static bool brcm_avs_is_firmware_loaded(struct private_data *priv)
rc = brcm_avs_get_pmap(priv, NULL);
magic = readl(priv->base + AVS_MBOX_MAGIC);
- return (magic == AVS_FIRMWARE_MAGIC) && ((rc != -ENOTSUPP) ||
- (rc != -EINVAL));
+ return (magic == AVS_FIRMWARE_MAGIC) && (rc != -ENOTSUPP) &&
+ (rc != -EINVAL);
}
static unsigned int brcm_avs_cpufreq_get(unsigned int cpu)
{
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
struct private_data *priv;
if (!policy)
@@ -488,8 +488,6 @@ static unsigned int brcm_avs_cpufreq_get(unsigned int cpu)
priv = policy->driver_data;
- cpufreq_cpu_put(policy);
-
return brcm_avs_get_frequency(priv->base);
}
@@ -720,7 +718,6 @@ cpufreq_freq_attr_ro(brcm_avs_voltage);
cpufreq_freq_attr_ro(brcm_avs_frequency);
static struct freq_attr *brcm_avs_cpufreq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
&brcm_avs_pstate,
&brcm_avs_mode,
&brcm_avs_pmap,
@@ -766,7 +763,7 @@ static void brcm_avs_cpufreq_remove(struct platform_device *pdev)
}
static const struct of_device_id brcm_avs_cpufreq_match[] = {
- { .compatible = BRCM_AVS_CPU_DATA },
+ { .compatible = "brcm,avs-cpu-data-mem" },
{ }
};
MODULE_DEVICE_TABLE(of, brcm_avs_cpufreq_match);
@@ -777,7 +774,7 @@ static struct platform_driver brcm_avs_cpufreq_platdrv = {
.of_match_table = brcm_avs_cpufreq_match,
},
.probe = brcm_avs_cpufreq_probe,
- .remove_new = brcm_avs_cpufreq_remove,
+ .remove = brcm_avs_cpufreq_remove,
};
module_platform_driver(brcm_avs_cpufreq_platdrv);
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index 2b8708475ac7..e23d9abea135 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -26,43 +26,15 @@
#include <acpi/cppc_acpi.h>
-/*
- * This list contains information parsed from per CPU ACPI _CPC and _PSD
- * structures: e.g. the highest and lowest supported performance, capabilities,
- * desired performance, level requested etc. Depending on the share_type, not
- * all CPUs will have an entry in the list.
- */
-static LIST_HEAD(cpu_data_list);
-
-static bool boost_supported;
-
-struct cppc_workaround_oem_info {
- char oem_id[ACPI_OEM_ID_SIZE + 1];
- char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
- u32 oem_revision;
-};
-
-static struct cppc_workaround_oem_info wa_info[] = {
- {
- .oem_id = "HISI ",
- .oem_table_id = "HIP07 ",
- .oem_revision = 0,
- }, {
- .oem_id = "HISI ",
- .oem_table_id = "HIP08 ",
- .oem_revision = 0,
- }
-};
-
static struct cpufreq_driver cppc_cpufreq_driver;
+#ifdef CONFIG_ACPI_CPPC_CPUFREQ_FIE
static enum {
FIE_UNSET = -1,
FIE_ENABLED,
FIE_DISABLED
} fie_disabled = FIE_UNSET;
-#ifdef CONFIG_ACPI_CPPC_CPUFREQ_FIE
module_param(fie_disabled, int, 0444);
MODULE_PARM_DESC(fie_disabled, "Disable Frequency Invariance Engine (FIE)");
@@ -78,9 +50,7 @@ struct cppc_freq_invariance {
static DEFINE_PER_CPU(struct cppc_freq_invariance, cppc_freq_inv);
static struct kthread_worker *kworker_fie;
-static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu);
-static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
- struct cppc_perf_fb_ctrs *fb_ctrs_t0,
+static int cppc_perf_from_fbctrs(struct cppc_perf_fb_ctrs *fb_ctrs_t0,
struct cppc_perf_fb_ctrs *fb_ctrs_t1);
/**
@@ -116,8 +86,10 @@ static void cppc_scale_freq_workfn(struct kthread_work *work)
return;
}
- perf = cppc_perf_from_fbctrs(cpu_data, &cppc_fi->prev_perf_fb_ctrs,
- &fb_ctrs);
+ perf = cppc_perf_from_fbctrs(&cppc_fi->prev_perf_fb_ctrs, &fb_ctrs);
+ if (!perf)
+ return;
+
cppc_fi->prev_perf_fb_ctrs = fb_ctrs;
perf <<= SCHED_CAPACITY_SHIFT;
@@ -241,7 +213,7 @@ static void __init cppc_freq_invariance_init(void)
if (fie_disabled)
return;
- kworker_fie = kthread_create_worker(0, "cppc_fie");
+ kworker_fie = kthread_run_worker(0, "cppc_fie");
if (IS_ERR(kworker_fie)) {
pr_warn("%s: failed to create kworker_fie: %ld\n", __func__,
PTR_ERR(kworker_fie));
@@ -336,6 +308,16 @@ static int cppc_verify_policy(struct cpufreq_policy_data *policy)
return 0;
}
+static unsigned int __cppc_cpufreq_get_transition_delay_us(unsigned int cpu)
+{
+ int transition_latency_ns = cppc_get_transition_latency(cpu);
+
+ if (transition_latency_ns < 0)
+ return CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS / NSEC_PER_USEC;
+
+ return transition_latency_ns / NSEC_PER_USEC;
+}
+
/*
* The PCC subspace describes the rate at which platform can accept commands
* on the shared PCC channel (including READs which do not count towards freq
@@ -358,19 +340,18 @@ static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu)
return 10000;
}
}
- return cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
+ return __cppc_cpufreq_get_transition_delay_us(cpu);
}
#else
static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu)
{
- return cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
+ return __cppc_cpufreq_get_transition_delay_us(cpu);
}
#endif
#if defined(CONFIG_ARM64) && defined(CONFIG_ENERGY_MODEL)
static DEFINE_PER_CPU(unsigned int, efficiency_class);
-static void cppc_cpufreq_register_em(struct cpufreq_policy *policy);
/* Create an artificial performance state every CPPC_EM_CAP_STEP capacity unit. */
#define CPPC_EM_CAP_STEP (20)
@@ -420,6 +401,9 @@ static int cppc_get_cpu_power(struct device *cpu_dev,
struct cppc_cpudata *cpu_data;
policy = cpufreq_cpu_get_raw(cpu_dev->id);
+ if (!policy)
+ return -EINVAL;
+
cpu_data = policy->driver_data;
perf_caps = &cpu_data->perf_caps;
max_cap = arch_scale_cpu_capacity(cpu_dev->id);
@@ -487,6 +471,9 @@ static int cppc_get_cpu_cost(struct device *cpu_dev, unsigned long KHz,
int step;
policy = cpufreq_cpu_get_raw(cpu_dev->id);
+ if (!policy)
+ return -EINVAL;
+
cpu_data = policy->driver_data;
perf_caps = &cpu_data->perf_caps;
max_cap = arch_scale_cpu_capacity(cpu_dev->id);
@@ -500,7 +487,19 @@ static int cppc_get_cpu_cost(struct device *cpu_dev, unsigned long KHz,
return 0;
}
-static int populate_efficiency_class(void)
+static void cppc_cpufreq_register_em(struct cpufreq_policy *policy)
+{
+ struct cppc_cpudata *cpu_data;
+ struct em_data_callback em_cb =
+ EM_ADV_DATA_CB(cppc_get_cpu_power, cppc_get_cpu_cost);
+
+ cpu_data = policy->driver_data;
+ em_dev_register_perf_domain(get_cpu_device(policy->cpu),
+ get_perf_level_count(policy), &em_cb,
+ cpu_data->shared_cpu_map, 0);
+}
+
+static void populate_efficiency_class(void)
{
struct acpi_madt_generic_interrupt *gicc;
DECLARE_BITMAP(used_classes, 256) = {};
@@ -515,7 +514,7 @@ static int populate_efficiency_class(void)
if (bitmap_weight(used_classes, 256) <= 1) {
pr_debug("Efficiency classes are all equal (=%d). "
"No EM registered", class);
- return -EINVAL;
+ return;
}
/*
@@ -532,26 +531,11 @@ static int populate_efficiency_class(void)
index++;
}
cppc_cpufreq_driver.register_em = cppc_cpufreq_register_em;
-
- return 0;
-}
-
-static void cppc_cpufreq_register_em(struct cpufreq_policy *policy)
-{
- struct cppc_cpudata *cpu_data;
- struct em_data_callback em_cb =
- EM_ADV_DATA_CB(cppc_get_cpu_power, cppc_get_cpu_cost);
-
- cpu_data = policy->driver_data;
- em_dev_register_perf_domain(get_cpu_device(policy->cpu),
- get_perf_level_count(policy), &em_cb,
- cpu_data->shared_cpu_map, 0);
}
#else
-static int populate_efficiency_class(void)
+static void populate_efficiency_class(void)
{
- return 0;
}
#endif
@@ -579,8 +563,6 @@ static struct cppc_cpudata *cppc_cpufreq_get_cpu_data(unsigned int cpu)
goto free_mask;
}
- list_add(&cpu_data->node, &cpu_data_list);
-
return cpu_data;
free_mask:
@@ -595,7 +577,6 @@ static void cppc_cpufreq_put_cpu_data(struct cpufreq_policy *policy)
{
struct cppc_cpudata *cpu_data = policy->driver_data;
- list_del(&cpu_data->node);
free_cpumask_var(cpu_data->shared_cpu_map);
kfree(cpu_data);
policy->driver_data = NULL;
@@ -621,7 +602,8 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
* Section 8.4.7.1.1.5 of ACPI 6.1 spec)
*/
policy->min = cppc_perf_to_khz(caps, caps->lowest_nonlinear_perf);
- policy->max = cppc_perf_to_khz(caps, caps->nominal_perf);
+ policy->max = cppc_perf_to_khz(caps, policy->boost_enabled ?
+ caps->highest_perf : caps->nominal_perf);
/*
* Set cpuinfo.min_freq to Lowest to make the full range of performance
@@ -629,7 +611,7 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
* nonlinear perf
*/
policy->cpuinfo.min_freq = cppc_perf_to_khz(caps, caps->lowest_perf);
- policy->cpuinfo.max_freq = cppc_perf_to_khz(caps, caps->nominal_perf);
+ policy->cpuinfo.max_freq = policy->max;
policy->transition_delay_us = cppc_cpufreq_get_transition_delay_us(cpu);
policy->shared_type = cpu_data->shared_type;
@@ -662,7 +644,7 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
* is supported.
*/
if (caps->highest_perf > caps->nominal_perf)
- boost_supported = true;
+ policy->boost_supported = true;
/* Set policy->cur to max now. The governors will adjust later. */
policy->cur = cppc_perf_to_khz(caps, caps->highest_perf);
@@ -710,8 +692,7 @@ static inline u64 get_delta(u64 t1, u64 t0)
return (u32)t1 - (u32)t0;
}
-static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
- struct cppc_perf_fb_ctrs *fb_ctrs_t0,
+static int cppc_perf_from_fbctrs(struct cppc_perf_fb_ctrs *fb_ctrs_t0,
struct cppc_perf_fb_ctrs *fb_ctrs_t1)
{
u64 delta_reference, delta_delivered;
@@ -724,40 +705,69 @@ static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
delta_delivered = get_delta(fb_ctrs_t1->delivered,
fb_ctrs_t0->delivered);
- /* Check to avoid divide-by zero and invalid delivered_perf */
+ /*
+ * Avoid divide-by zero and unchanged feedback counters.
+ * Leave it for callers to handle.
+ */
if (!delta_reference || !delta_delivered)
- return cpu_data->perf_ctrls.desired_perf;
+ return 0;
return (reference_perf * delta_delivered) / delta_reference;
}
+static int cppc_get_perf_ctrs_sample(int cpu,
+ struct cppc_perf_fb_ctrs *fb_ctrs_t0,
+ struct cppc_perf_fb_ctrs *fb_ctrs_t1)
+{
+ int ret;
+
+ ret = cppc_get_perf_ctrs(cpu, fb_ctrs_t0);
+ if (ret)
+ return ret;
+
+ udelay(2); /* 2usec delay between sampling */
+
+ return cppc_get_perf_ctrs(cpu, fb_ctrs_t1);
+}
+
static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
{
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
struct cppc_perf_fb_ctrs fb_ctrs_t0 = {0}, fb_ctrs_t1 = {0};
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
struct cppc_cpudata *cpu_data;
u64 delivered_perf;
int ret;
if (!policy)
- return -ENODEV;
+ return 0;
cpu_data = policy->driver_data;
- cpufreq_cpu_put(policy);
-
- ret = cppc_get_perf_ctrs(cpu, &fb_ctrs_t0);
- if (ret)
- return 0;
+ ret = cppc_get_perf_ctrs_sample(cpu, &fb_ctrs_t0, &fb_ctrs_t1);
+ if (ret) {
+ if (ret == -EFAULT)
+ /* Any of the associated CPPC regs is 0. */
+ goto out_invalid_counters;
+ else
+ return 0;
+ }
- udelay(2); /* 2usec delay between sampling */
+ delivered_perf = cppc_perf_from_fbctrs(&fb_ctrs_t0, &fb_ctrs_t1);
+ if (!delivered_perf)
+ goto out_invalid_counters;
- ret = cppc_get_perf_ctrs(cpu, &fb_ctrs_t1);
- if (ret)
- return 0;
+ return cppc_perf_to_khz(&cpu_data->perf_caps, delivered_perf);
- delivered_perf = cppc_perf_from_fbctrs(cpu_data, &fb_ctrs_t0,
- &fb_ctrs_t1);
+out_invalid_counters:
+ /*
+ * Feedback counters could be unchanged or 0 when a cpu enters a
+ * low-power idle state, e.g. clock-gated or power-gated.
+ * Use desired perf for reflecting frequency. Get the latest register
+ * value first as some platforms may update the actual delivered perf
+ * there; if failed, resort to the cached desired perf.
+ */
+ if (cppc_get_desired_perf(cpu, &delivered_perf))
+ delivered_perf = cpu_data->perf_ctrls.desired_perf;
return cppc_perf_to_khz(&cpu_data->perf_caps, delivered_perf);
}
@@ -768,11 +778,6 @@ static int cppc_cpufreq_set_boost(struct cpufreq_policy *policy, int state)
struct cppc_perf_caps *caps = &cpu_data->perf_caps;
int ret;
- if (!boost_supported) {
- pr_err("BOOST not supported by CPU or firmware\n");
- return -EINVAL;
- }
-
if (state)
policy->max = cppc_perf_to_khz(caps, caps->highest_perf);
else
@@ -792,15 +797,124 @@ static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf)
return cpufreq_show_cpus(cpu_data->shared_cpu_map, buf);
}
+
+static ssize_t show_auto_select(struct cpufreq_policy *policy, char *buf)
+{
+ bool val;
+ int ret;
+
+ ret = cppc_get_auto_sel(policy->cpu, &val);
+
+ /* show "<unsupported>" when this register is not supported by cpc */
+ if (ret == -EOPNOTSUPP)
+ return sysfs_emit(buf, "<unsupported>\n");
+
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%d\n", val);
+}
+
+static ssize_t store_auto_select(struct cpufreq_policy *policy,
+ const char *buf, size_t count)
+{
+ bool val;
+ int ret;
+
+ ret = kstrtobool(buf, &val);
+ if (ret)
+ return ret;
+
+ ret = cppc_set_auto_sel(policy->cpu, val);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t show_auto_act_window(struct cpufreq_policy *policy, char *buf)
+{
+ u64 val;
+ int ret;
+
+ ret = cppc_get_auto_act_window(policy->cpu, &val);
+
+ /* show "<unsupported>" when this register is not supported by cpc */
+ if (ret == -EOPNOTSUPP)
+ return sysfs_emit(buf, "<unsupported>\n");
+
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%llu\n", val);
+}
+
+static ssize_t store_auto_act_window(struct cpufreq_policy *policy,
+ const char *buf, size_t count)
+{
+ u64 usec;
+ int ret;
+
+ ret = kstrtou64(buf, 0, &usec);
+ if (ret)
+ return ret;
+
+ ret = cppc_set_auto_act_window(policy->cpu, usec);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t show_energy_performance_preference_val(struct cpufreq_policy *policy, char *buf)
+{
+ u64 val;
+ int ret;
+
+ ret = cppc_get_epp_perf(policy->cpu, &val);
+
+ /* show "<unsupported>" when this register is not supported by cpc */
+ if (ret == -EOPNOTSUPP)
+ return sysfs_emit(buf, "<unsupported>\n");
+
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%llu\n", val);
+}
+
+static ssize_t store_energy_performance_preference_val(struct cpufreq_policy *policy,
+ const char *buf, size_t count)
+{
+ u64 val;
+ int ret;
+
+ ret = kstrtou64(buf, 0, &val);
+ if (ret)
+ return ret;
+
+ ret = cppc_set_epp(policy->cpu, val);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
cpufreq_freq_attr_ro(freqdomain_cpus);
+cpufreq_freq_attr_rw(auto_select);
+cpufreq_freq_attr_rw(auto_act_window);
+cpufreq_freq_attr_rw(energy_performance_preference_val);
static struct freq_attr *cppc_cpufreq_attr[] = {
&freqdomain_cpus,
+ &auto_select,
+ &auto_act_window,
+ &energy_performance_preference_val,
NULL,
};
static struct cpufreq_driver cppc_cpufreq_driver = {
- .flags = CPUFREQ_CONST_LOOPS,
+ .flags = CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_UPDATE_LIMITS,
.verify = cppc_verify_policy,
.target = cppc_cpufreq_set_target,
.get = cppc_cpufreq_get_rate,
@@ -812,57 +926,6 @@ static struct cpufreq_driver cppc_cpufreq_driver = {
.name = "cppc_cpufreq",
};
-/*
- * HISI platform does not support delivered performance counter and
- * reference performance counter. It can calculate the performance using the
- * platform specific mechanism. We reuse the desired performance register to
- * store the real performance calculated by the platform.
- */
-static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu)
-{
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
- struct cppc_cpudata *cpu_data;
- u64 desired_perf;
- int ret;
-
- if (!policy)
- return -ENODEV;
-
- cpu_data = policy->driver_data;
-
- cpufreq_cpu_put(policy);
-
- ret = cppc_get_desired_perf(cpu, &desired_perf);
- if (ret < 0)
- return -EIO;
-
- return cppc_perf_to_khz(&cpu_data->perf_caps, desired_perf);
-}
-
-static void cppc_check_hisi_workaround(void)
-{
- struct acpi_table_header *tbl;
- acpi_status status = AE_OK;
- int i;
-
- status = acpi_get_table(ACPI_SIG_PCCT, 0, &tbl);
- if (ACPI_FAILURE(status) || !tbl)
- return;
-
- for (i = 0; i < ARRAY_SIZE(wa_info); i++) {
- if (!memcmp(wa_info[i].oem_id, tbl->oem_id, ACPI_OEM_ID_SIZE) &&
- !memcmp(wa_info[i].oem_table_id, tbl->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) &&
- wa_info[i].oem_revision == tbl->oem_revision) {
- /* Overwrite the get() callback */
- cppc_cpufreq_driver.get = hisi_cppc_cpufreq_get_rate;
- fie_disabled = FIE_DISABLED;
- break;
- }
- }
-
- acpi_put_table(tbl);
-}
-
static int __init cppc_cpufreq_init(void)
{
int ret;
@@ -870,7 +933,6 @@ static int __init cppc_cpufreq_init(void)
if (!acpi_cpc_valid())
return -ENODEV;
- cppc_check_hisi_workaround();
cppc_freq_invariance_init();
populate_efficiency_class();
@@ -881,24 +943,10 @@ static int __init cppc_cpufreq_init(void)
return ret;
}
-static inline void free_cpu_data(void)
-{
- struct cppc_cpudata *iter, *tmp;
-
- list_for_each_entry_safe(iter, tmp, &cpu_data_list, node) {
- free_cpumask_var(iter->shared_cpu_map);
- list_del(&iter->node);
- kfree(iter);
- }
-
-}
-
static void __exit cppc_cpufreq_exit(void)
{
cpufreq_unregister_driver(&cppc_cpufreq_driver);
cppc_freq_invariance_exit();
-
- free_cpu_data();
}
module_exit(cppc_cpufreq_exit);
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index 18942bfe9c95..cd1816a12bb9 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -103,6 +103,10 @@ static const struct of_device_id allowlist[] __initconst = {
* platforms using "operating-points-v2" property.
*/
static const struct of_device_id blocklist[] __initconst = {
+ { .compatible = "airoha,an7583", },
+ { .compatible = "airoha,en7581", },
+
+ { .compatible = "allwinner,sun50i-a100" },
{ .compatible = "allwinner,sun50i-h6", },
{ .compatible = "allwinner,sun50i-h616", },
{ .compatible = "allwinner,sun50i-h618", },
@@ -140,6 +144,7 @@ static const struct of_device_id blocklist[] __initconst = {
{ .compatible = "nvidia,tegra20", },
{ .compatible = "nvidia,tegra30", },
+ { .compatible = "nvidia,tegra114", },
{ .compatible = "nvidia,tegra124", },
{ .compatible = "nvidia,tegra210", },
{ .compatible = "nvidia,tegra234", },
@@ -172,6 +177,7 @@ static const struct of_device_id blocklist[] __initconst = {
{ .compatible = "qcom,sm8350", },
{ .compatible = "qcom,sm8450", },
{ .compatible = "qcom,sm8550", },
+ { .compatible = "qcom,sm8650", },
{ .compatible = "st,stih407", },
{ .compatible = "st,stih410", },
@@ -183,9 +189,11 @@ static const struct of_device_id blocklist[] __initconst = {
{ .compatible = "ti,omap3", },
{ .compatible = "ti,am625", },
{ .compatible = "ti,am62a7", },
+ { .compatible = "ti,am62d2", },
{ .compatible = "ti,am62p5", },
{ .compatible = "qcom,ipq5332", },
+ { .compatible = "qcom,ipq5424", },
{ .compatible = "qcom,ipq6018", },
{ .compatible = "qcom,ipq8064", },
{ .compatible = "qcom,ipq8074", },
@@ -234,5 +242,3 @@ create_pdev:
sizeof(struct cpufreq_dt_platform_data)));
}
core_initcall(cpufreq_dt_platdev_init);
-MODULE_DESCRIPTION("Generic DT based cpufreq platdev driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 983443396f8f..7d5079fd1688 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -36,12 +36,6 @@ struct private_data {
static LIST_HEAD(priv_list);
-static struct freq_attr *cpufreq_dt_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL, /* Extra space for boost-attr if required */
- NULL,
-};
-
static struct private_data *cpufreq_dt_find_data(int cpu)
{
struct private_data *priv;
@@ -110,7 +104,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
transition_latency = dev_pm_opp_get_max_transition_latency(cpu_dev);
if (!transition_latency)
- transition_latency = CPUFREQ_ETERNAL;
+ transition_latency = CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS;
cpumask_copy(policy->cpus, priv->cpus);
policy->driver_data = priv;
@@ -120,21 +114,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
policy->cpuinfo.transition_latency = transition_latency;
policy->dvfs_possible_from_any_cpu = true;
- /* Support turbo/boost mode */
- if (policy_has_boost_freq(policy)) {
- /* This gets disabled by core on driver unregister */
- ret = cpufreq_enable_boost_support();
- if (ret)
- goto out_clk_put;
- cpufreq_dt_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs;
- }
-
return 0;
-
-out_clk_put:
- clk_put(cpu_clk);
-
- return ret;
}
static int cpufreq_online(struct cpufreq_policy *policy)
@@ -169,7 +149,7 @@ static struct cpufreq_driver dt_cpufreq_driver = {
.offline = cpufreq_offline,
.register_em = cpufreq_register_em_with_opp,
.name = "cpufreq-dt",
- .attr = cpufreq_dt_attr,
+ .set_boost = cpufreq_boost_set_sw,
.suspend = cpufreq_generic_suspend,
};
@@ -303,7 +283,7 @@ static int dt_cpufreq_probe(struct platform_device *pdev)
int ret, cpu;
/* Request resources early so we can return in case of -EPROBE_DEFER */
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
ret = dt_cpufreq_early_init(&pdev->dev, cpu);
if (ret)
goto err;
@@ -345,10 +325,21 @@ static struct platform_driver dt_cpufreq_platdrv = {
.name = "cpufreq-dt",
},
.probe = dt_cpufreq_probe,
- .remove_new = dt_cpufreq_remove,
+ .remove = dt_cpufreq_remove,
};
module_platform_driver(dt_cpufreq_platdrv);
+struct platform_device *cpufreq_dt_pdev_register(struct device *dev)
+{
+ struct platform_device_info cpufreq_dt_devinfo = {};
+
+ cpufreq_dt_devinfo.name = "cpufreq-dt";
+ cpufreq_dt_devinfo.parent = dev;
+
+ return platform_device_register_full(&cpufreq_dt_devinfo);
+}
+EXPORT_SYMBOL_GPL(cpufreq_dt_pdev_register);
+
MODULE_ALIAS("platform:cpufreq-dt");
MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
diff --git a/drivers/cpufreq/cpufreq-dt.h b/drivers/cpufreq/cpufreq-dt.h
index 28c8af7ec5ef..fc1889aeb4f1 100644
--- a/drivers/cpufreq/cpufreq-dt.h
+++ b/drivers/cpufreq/cpufreq-dt.h
@@ -22,4 +22,6 @@ struct cpufreq_dt_platform_data {
int (*resume)(struct cpufreq_policy *policy);
};
+struct platform_device *cpufreq_dt_pdev_register(struct device *dev);
+
#endif /* __CPUFREQ_DT_H__ */
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index f98c9438760c..852e024facc3 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -25,6 +25,7 @@
#include <linux/mutex.h>
#include <linux/pm_qos.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/suspend.h>
#include <linux/syscore_ops.h>
#include <linux/tick.h>
@@ -87,6 +88,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
struct cpufreq_governor *new_gov,
unsigned int new_pol);
static bool cpufreq_boost_supported(void);
+static int cpufreq_boost_trigger_state(int state);
/*
* Two notifier lists: the "policy" list is involved in the
@@ -107,6 +109,8 @@ void disable_cpufreq(void)
{
off = 1;
}
+EXPORT_SYMBOL_GPL(disable_cpufreq);
+
static DEFINE_MUTEX(cpufreq_governor_mutex);
bool have_governor_per_policy(void)
@@ -253,51 +257,6 @@ void cpufreq_cpu_put(struct cpufreq_policy *policy)
}
EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
-/**
- * cpufreq_cpu_release - Unlock a policy and decrement its usage counter.
- * @policy: cpufreq policy returned by cpufreq_cpu_acquire().
- */
-void cpufreq_cpu_release(struct cpufreq_policy *policy)
-{
- if (WARN_ON(!policy))
- return;
-
- lockdep_assert_held(&policy->rwsem);
-
- up_write(&policy->rwsem);
-
- cpufreq_cpu_put(policy);
-}
-
-/**
- * cpufreq_cpu_acquire - Find policy for a CPU, mark it as busy and lock it.
- * @cpu: CPU to find the policy for.
- *
- * Call cpufreq_cpu_get() to get a reference on the cpufreq policy for @cpu and
- * if the policy returned by it is not NULL, acquire its rwsem for writing.
- * Return the policy if it is active or release it and return NULL otherwise.
- *
- * The policy returned by this function has to be released with the help of
- * cpufreq_cpu_release() in order to release its rwsem and balance its usage
- * counter properly.
- */
-struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu)
-{
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
-
- if (!policy)
- return NULL;
-
- down_write(&policy->rwsem);
-
- if (policy_is_inactive(policy)) {
- cpufreq_cpu_release(policy);
- return NULL;
- }
-
- return policy;
-}
-
/*********************************************************************
* EXTERNALLY AFFECTING FREQUENCY CHANGES *
*********************************************************************/
@@ -534,16 +493,18 @@ void cpufreq_disable_fast_switch(struct cpufreq_policy *policy)
EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
static unsigned int __resolve_freq(struct cpufreq_policy *policy,
- unsigned int target_freq, unsigned int relation)
+ unsigned int target_freq,
+ unsigned int min, unsigned int max,
+ unsigned int relation)
{
unsigned int idx;
- target_freq = clamp_val(target_freq, policy->min, policy->max);
+ target_freq = clamp_val(target_freq, min, max);
if (!policy->freq_table)
return target_freq;
- idx = cpufreq_frequency_table_target(policy, target_freq, relation);
+ idx = cpufreq_frequency_table_target(policy, target_freq, min, max, relation);
policy->cached_resolved_idx = idx;
policy->cached_target_freq = target_freq;
return policy->freq_table[idx].frequency;
@@ -563,7 +524,21 @@ static unsigned int __resolve_freq(struct cpufreq_policy *policy,
unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
unsigned int target_freq)
{
- return __resolve_freq(policy, target_freq, CPUFREQ_RELATION_LE);
+ unsigned int min = READ_ONCE(policy->min);
+ unsigned int max = READ_ONCE(policy->max);
+
+ /*
+ * If this function runs in parallel with cpufreq_set_policy(), it may
+ * read policy->min before the update and policy->max after the update
+ * or the other way around, so there is no ordering guarantee.
+ *
+ * Resolve this by always honoring the max (in case it comes from
+ * thermal throttling or similar).
+ */
+ if (unlikely(min > max))
+ min = max;
+
+ return __resolve_freq(policy, target_freq, min, max, CPUFREQ_RELATION_LE);
}
EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
@@ -602,12 +577,12 @@ static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
if (cpufreq_boost_trigger_state(enable)) {
pr_err("%s: Cannot %s BOOST!\n",
- __func__, enable ? "enable" : "disable");
+ __func__, str_enable_disable(enable));
return -EINVAL;
}
pr_debug("%s: cpufreq BOOST %s\n",
- __func__, enable ? "enabled" : "disabled");
+ __func__, str_enabled_disabled(enable));
return count;
}
@@ -618,6 +593,22 @@ static ssize_t show_local_boost(struct cpufreq_policy *policy, char *buf)
return sysfs_emit(buf, "%d\n", policy->boost_enabled);
}
+static int policy_set_boost(struct cpufreq_policy *policy, bool enable)
+{
+ int ret;
+
+ if (policy->boost_enabled == enable)
+ return 0;
+
+ policy->boost_enabled = enable;
+
+ ret = cpufreq_driver->set_boost(policy, enable);
+ if (ret)
+ policy->boost_enabled = !policy->boost_enabled;
+
+ return ret;
+}
+
static ssize_t store_local_boost(struct cpufreq_policy *policy,
const char *buf, size_t count)
{
@@ -630,21 +621,14 @@ static ssize_t store_local_boost(struct cpufreq_policy *policy,
if (!cpufreq_driver->boost_enabled)
return -EINVAL;
- if (policy->boost_enabled == enable)
- return count;
-
- policy->boost_enabled = enable;
-
- cpus_read_lock();
- ret = cpufreq_driver->set_boost(policy, enable);
- cpus_read_unlock();
+ if (!policy->boost_supported)
+ return -EINVAL;
- if (ret) {
- policy->boost_enabled = !policy->boost_enabled;
- return ret;
- }
+ ret = policy_set_boost(policy, enable);
+ if (!ret)
+ return count;
- return count;
+ return ret;
}
static struct freq_attr local_boost = __ATTR(boost, 0644, show_local_boost, store_local_boost);
@@ -680,10 +664,10 @@ unlock:
static unsigned int cpufreq_parse_policy(char *str_governor)
{
- if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN))
+ if (!strncasecmp(str_governor, "performance", strlen("performance")))
return CPUFREQ_POLICY_PERFORMANCE;
- if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN))
+ if (!strncasecmp(str_governor, "powersave", strlen("powersave")))
return CPUFREQ_POLICY_POWERSAVE;
return CPUFREQ_POLICY_UNKNOWN;
@@ -728,18 +712,26 @@ show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
show_one(scaling_min_freq, min);
show_one(scaling_max_freq, max);
-__weak unsigned int arch_freq_get_on_cpu(int cpu)
+__weak int arch_freq_get_on_cpu(int cpu)
{
- return 0;
+ return -EOPNOTSUPP;
+}
+
+static inline bool cpufreq_avg_freq_supported(struct cpufreq_policy *policy)
+{
+ return arch_freq_get_on_cpu(policy->cpu) != -EOPNOTSUPP;
}
static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
{
ssize_t ret;
- unsigned int freq;
+ int freq;
+
+ freq = IS_ENABLED(CONFIG_CPUFREQ_ARCH_CUR_FREQ)
+ ? arch_freq_get_on_cpu(policy->cpu)
+ : 0;
- freq = arch_freq_get_on_cpu(policy->cpu);
- if (freq)
+ if (freq > 0)
ret = sysfs_emit(buf, "%u\n", freq);
else if (cpufreq_driver->setpolicy && cpufreq_driver->get)
ret = sysfs_emit(buf, "%u\n", cpufreq_driver->get(policy->cpu));
@@ -784,6 +776,19 @@ static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
}
/*
+ * show_cpuinfo_avg_freq - average CPU frequency as detected by hardware
+ */
+static ssize_t show_cpuinfo_avg_freq(struct cpufreq_policy *policy,
+ char *buf)
+{
+ int avg_freq = arch_freq_get_on_cpu(policy->cpu);
+
+ if (avg_freq > 0)
+ return sysfs_emit(buf, "%u\n", avg_freq);
+ return avg_freq != 0 ? avg_freq : -EINVAL;
+}
+
+/*
* show_scaling_governor - show the current policy for the specified CPU
*/
static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
@@ -803,7 +808,7 @@ static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
const char *buf, size_t count)
{
- char str_governor[16];
+ char str_governor[CPUFREQ_NAME_LEN];
int ret;
ret = sscanf(buf, "%15s", str_governor);
@@ -909,14 +914,14 @@ static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
const char *buf, size_t count)
{
unsigned int freq = 0;
- unsigned int ret;
+ int ret;
if (!policy->governor || !policy->governor->store_setspeed)
return -EINVAL;
- ret = sscanf(buf, "%u", &freq);
- if (ret != 1)
- return -EINVAL;
+ ret = kstrtouint(buf, 0, &freq);
+ if (ret)
+ return ret;
policy->governor->store_setspeed(policy, freq);
@@ -945,6 +950,7 @@ static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
}
cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
+cpufreq_freq_attr_ro(cpuinfo_avg_freq);
cpufreq_freq_attr_ro(cpuinfo_min_freq);
cpufreq_freq_attr_ro(cpuinfo_max_freq);
cpufreq_freq_attr_ro(cpuinfo_transition_latency);
@@ -963,6 +969,7 @@ static struct attribute *cpufreq_attrs[] = {
&cpuinfo_min_freq.attr,
&cpuinfo_max_freq.attr,
&cpuinfo_transition_latency.attr,
+ &scaling_cur_freq.attr,
&scaling_min_freq.attr,
&scaling_max_freq.attr,
&affected_cpus.attr,
@@ -982,17 +989,16 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
{
struct cpufreq_policy *policy = to_policy(kobj);
struct freq_attr *fattr = to_attr(attr);
- ssize_t ret = -EBUSY;
if (!fattr->show)
return -EIO;
- down_read(&policy->rwsem);
+ guard(cpufreq_policy_read)(policy);
+
if (likely(!policy_is_inactive(policy)))
- ret = fattr->show(policy, buf);
- up_read(&policy->rwsem);
+ return fattr->show(policy, buf);
- return ret;
+ return -EBUSY;
}
static ssize_t store(struct kobject *kobj, struct attribute *attr,
@@ -1000,17 +1006,16 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
{
struct cpufreq_policy *policy = to_policy(kobj);
struct freq_attr *fattr = to_attr(attr);
- ssize_t ret = -EBUSY;
if (!fattr->store)
return -EIO;
- down_write(&policy->rwsem);
+ guard(cpufreq_policy_write)(policy);
+
if (likely(!policy_is_inactive(policy)))
- ret = fattr->store(policy, buf, count);
- up_write(&policy->rwsem);
+ return fattr->store(policy, buf, count);
- return ret;
+ return -EBUSY;
}
static void cpufreq_sysfs_release(struct kobject *kobj)
@@ -1058,6 +1063,21 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
struct freq_attr **drv_attr;
int ret = 0;
+ /* Attributes that need freq_table */
+ if (policy->freq_table) {
+ ret = sysfs_create_file(&policy->kobj,
+ &cpufreq_freq_attr_scaling_available_freqs.attr);
+ if (ret)
+ return ret;
+
+ if (cpufreq_boost_supported()) {
+ ret = sysfs_create_file(&policy->kobj,
+ &cpufreq_freq_attr_scaling_boost_freqs.attr);
+ if (ret)
+ return ret;
+ }
+ }
+
/* set up files for this cpu device */
drv_attr = cpufreq_driver->attr;
while (drv_attr && *drv_attr) {
@@ -1072,9 +1092,11 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
return ret;
}
- ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
- if (ret)
- return ret;
+ if (cpufreq_avg_freq_supported(policy)) {
+ ret = sysfs_create_file(&policy->kobj, &cpuinfo_avg_freq.attr);
+ if (ret)
+ return ret;
+ }
if (cpufreq_driver->bios_limit) {
ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
@@ -1099,7 +1121,8 @@ static int cpufreq_init_policy(struct cpufreq_policy *policy)
if (has_target()) {
/* Update policy governor to the one used before hotplug. */
- gov = get_governor(policy->last_governor);
+ if (policy->last_governor[0] != '\0')
+ gov = get_governor(policy->last_governor);
if (gov) {
pr_debug("Restoring governor %s for cpu %d\n",
gov->name, policy->cpu);
@@ -1147,7 +1170,8 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cp
if (cpumask_test_cpu(cpu, policy->cpus))
return 0;
- down_write(&policy->rwsem);
+ guard(cpufreq_policy_write)(policy);
+
if (has_target())
cpufreq_stop_governor(policy);
@@ -1158,7 +1182,7 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cp
if (ret)
pr_err("%s: Failed to start governor\n", __func__);
}
- up_write(&policy->rwsem);
+
return ret;
}
@@ -1178,9 +1202,10 @@ static void handle_update(struct work_struct *work)
container_of(work, struct cpufreq_policy, update);
pr_debug("handle_update for cpu %u called\n", policy->cpu);
- down_write(&policy->rwsem);
+
+ guard(cpufreq_policy_write)(policy);
+
refresh_frequency_limits(policy);
- up_write(&policy->rwsem);
}
static int cpufreq_notifier_min(struct notifier_block *nb, unsigned long freq,
@@ -1206,11 +1231,11 @@ static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
struct kobject *kobj;
struct completion *cmp;
- down_write(&policy->rwsem);
- cpufreq_stats_free_table(policy);
- kobj = &policy->kobj;
- cmp = &policy->kobj_unregister;
- up_write(&policy->rwsem);
+ scoped_guard(cpufreq_policy_write, policy) {
+ cpufreq_stats_free_table(policy);
+ kobj = &policy->kobj;
+ cmp = &policy->kobj_unregister;
+ }
kobject_put(kobj);
/*
@@ -1259,6 +1284,8 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
goto err_free_real_cpus;
}
+ init_rwsem(&policy->rwsem);
+
freq_constraints_init(&policy->constraints);
policy->nb_min.notifier_call = cpufreq_notifier_min;
@@ -1281,12 +1308,10 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
}
INIT_LIST_HEAD(&policy->policy_list);
- init_rwsem(&policy->rwsem);
spin_lock_init(&policy->transition_lock);
init_waitqueue_head(&policy->transition_wait);
INIT_WORK(&policy->update, handle_update);
- policy->cpu = cpu;
return policy;
err_min_qos_notifier:
@@ -1355,35 +1380,17 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy)
kfree(policy);
}
-static int cpufreq_online(unsigned int cpu)
+static int cpufreq_policy_online(struct cpufreq_policy *policy,
+ unsigned int cpu, bool new_policy)
{
- struct cpufreq_policy *policy;
- bool new_policy;
unsigned long flags;
unsigned int j;
int ret;
- pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
+ guard(cpufreq_policy_write)(policy);
- /* Check if this CPU already has a policy to manage it */
- policy = per_cpu(cpufreq_cpu_data, cpu);
- if (policy) {
- WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
- if (!policy_is_inactive(policy))
- return cpufreq_add_policy_cpu(policy, cpu);
-
- /* This is the only online CPU for the policy. Start over. */
- new_policy = false;
- down_write(&policy->rwsem);
- policy->cpu = cpu;
- policy->governor = NULL;
- } else {
- new_policy = true;
- policy = cpufreq_policy_alloc(cpu);
- if (!policy)
- return -ENOMEM;
- down_write(&policy->rwsem);
- }
+ policy->cpu = cpu;
+ policy->governor = NULL;
if (!new_policy && cpufreq_driver->online) {
/* Recover policy->cpus using related_cpus */
@@ -1406,13 +1413,9 @@ static int cpufreq_online(unsigned int cpu)
if (ret) {
pr_debug("%s: %d: initialization failed\n", __func__,
__LINE__);
- goto out_free_policy;
+ goto out_clear_policy;
}
- /* Let the per-policy boost flag mirror the cpufreq_driver boost during init */
- if (cpufreq_boost_enabled() && policy_has_boost_freq(policy))
- policy->boost_enabled = true;
-
/*
* The initialization has succeeded and the policy is online.
* If there is a problem with its frequency table, take it
@@ -1475,6 +1478,10 @@ static int cpufreq_online(unsigned int cpu)
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_CREATE_POLICY, policy);
+ } else {
+ ret = freq_qos_update_request(policy->max_freq_req, policy->max);
+ if (ret < 0)
+ goto out_destroy_policy;
}
if (cpufreq_driver->get && has_target()) {
@@ -1520,7 +1527,7 @@ static int cpufreq_online(unsigned int cpu)
* frequency for longer duration. Hence, a BUG_ON().
*/
BUG_ON(ret);
- pr_info("%s: CPU%d: Running at unlisted initial frequency: %u KHz, changing to: %u KHz\n",
+ pr_info("%s: CPU%d: Running at unlisted initial frequency: %u kHz, changing to: %u kHz\n",
__func__, policy->cpu, old_freq, policy->cur);
}
}
@@ -1538,7 +1545,7 @@ static int cpufreq_online(unsigned int cpu)
/*
* Register with the energy model before
- * sugov_eas_rebuild_sd() is called, which will result
+ * em_rebuild_sched_domains() is called, which will result
* in rebuilding of the sched domains, which should only be done
* once the energy model is properly initialized for the policy
* first.
@@ -1557,20 +1564,6 @@ static int cpufreq_online(unsigned int cpu)
goto out_destroy_policy;
}
- up_write(&policy->rwsem);
-
- kobject_uevent(&policy->kobj, KOBJ_ADD);
-
- /* Callback for handling stuff after policy is ready */
- if (cpufreq_driver->ready)
- cpufreq_driver->ready(policy);
-
- /* Register cpufreq cooling only for a new policy */
- if (new_policy && cpufreq_thermal_control_enabled(cpufreq_driver))
- policy->cdev = of_cpufreq_cooling_register(policy);
-
- pr_debug("initialization complete\n");
-
return 0;
out_destroy_policy:
@@ -1585,14 +1578,72 @@ out_exit_policy:
if (cpufreq_driver->exit)
cpufreq_driver->exit(policy);
-out_free_policy:
+out_clear_policy:
cpumask_clear(policy->cpus);
- up_write(&policy->rwsem);
- cpufreq_policy_free(policy);
return ret;
}
+static int cpufreq_online(unsigned int cpu)
+{
+ struct cpufreq_policy *policy;
+ bool new_policy;
+ int ret;
+
+ pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
+
+ /* Check if this CPU already has a policy to manage it */
+ policy = per_cpu(cpufreq_cpu_data, cpu);
+ if (policy) {
+ WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
+ if (!policy_is_inactive(policy))
+ return cpufreq_add_policy_cpu(policy, cpu);
+
+ /* This is the only online CPU for the policy. Start over. */
+ new_policy = false;
+ } else {
+ new_policy = true;
+ policy = cpufreq_policy_alloc(cpu);
+ if (!policy)
+ return -ENOMEM;
+ }
+
+ ret = cpufreq_policy_online(policy, cpu, new_policy);
+ if (ret) {
+ cpufreq_policy_free(policy);
+ return ret;
+ }
+
+ kobject_uevent(&policy->kobj, KOBJ_ADD);
+
+ /* Callback for handling stuff after policy is ready */
+ if (cpufreq_driver->ready)
+ cpufreq_driver->ready(policy);
+
+ /* Register cpufreq cooling only for a new policy */
+ if (new_policy && cpufreq_thermal_control_enabled(cpufreq_driver))
+ policy->cdev = of_cpufreq_cooling_register(policy);
+
+ /*
+ * Let the per-policy boost flag mirror the cpufreq_driver boost during
+ * initialization for a new policy. For an existing policy, maintain the
+ * previous boost value unless global boost is disabled.
+ */
+ if (cpufreq_driver->set_boost && policy->boost_supported &&
+ (new_policy || !cpufreq_boost_enabled())) {
+ ret = policy_set_boost(policy, cpufreq_boost_enabled());
+ if (ret) {
+ /* If the set_boost fails, the online operation is not affected */
+ pr_info("%s: CPU%d: Cannot %s BOOST\n", __func__, policy->cpu,
+ str_enable_disable(cpufreq_boost_enabled()));
+ }
+ }
+
+ pr_debug("initialization complete\n");
+
+ return 0;
+}
+
/**
* cpufreq_add_dev - the cpufreq interface for a CPU device.
* @dev: CPU device.
@@ -1644,14 +1695,13 @@ static void __cpufreq_offline(unsigned int cpu, struct cpufreq_policy *policy)
return;
}
- if (has_target())
+ if (has_target()) {
strscpy(policy->last_governor, policy->governor->name,
CPUFREQ_NAME_LEN);
- else
- policy->last_policy = policy->policy;
-
- if (has_target())
cpufreq_exit_governor(policy);
+ } else {
+ policy->last_policy = policy->policy;
+ }
/*
* Perform the ->offline() during light-weight tear-down, as
@@ -1680,11 +1730,10 @@ static int cpufreq_offline(unsigned int cpu)
return 0;
}
- down_write(&policy->rwsem);
+ guard(cpufreq_policy_write)(policy);
__cpufreq_offline(cpu, policy);
- up_write(&policy->rwsem);
return 0;
}
@@ -1701,33 +1750,29 @@ static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
if (!policy)
return;
- down_write(&policy->rwsem);
+ scoped_guard(cpufreq_policy_write, policy) {
+ if (cpu_online(cpu))
+ __cpufreq_offline(cpu, policy);
- if (cpu_online(cpu))
- __cpufreq_offline(cpu, policy);
+ remove_cpu_dev_symlink(policy, cpu, dev);
- remove_cpu_dev_symlink(policy, cpu, dev);
+ if (!cpumask_empty(policy->real_cpus))
+ return;
- if (!cpumask_empty(policy->real_cpus)) {
- up_write(&policy->rwsem);
- return;
- }
+ /*
+ * Unregister cpufreq cooling once all the CPUs of the policy
+ * are removed.
+ */
+ if (cpufreq_thermal_control_enabled(cpufreq_driver)) {
+ cpufreq_cooling_unregister(policy->cdev);
+ policy->cdev = NULL;
+ }
- /*
- * Unregister cpufreq cooling once all the CPUs of the policy are
- * removed.
- */
- if (cpufreq_thermal_control_enabled(cpufreq_driver)) {
- cpufreq_cooling_unregister(policy->cdev);
- policy->cdev = NULL;
+ /* We did light-weight exit earlier, do full tear down now */
+ if (cpufreq_driver->offline && cpufreq_driver->exit)
+ cpufreq_driver->exit(policy);
}
- /* We did light-weight exit earlier, do full tear down now */
- if (cpufreq_driver->offline && cpufreq_driver->exit)
- cpufreq_driver->exit(policy);
-
- up_write(&policy->rwsem);
-
cpufreq_policy_free(policy);
}
@@ -1758,6 +1803,9 @@ static unsigned int cpufreq_verify_current_freq(struct cpufreq_policy *policy, b
{
unsigned int new_freq;
+ if (!cpufreq_driver->get)
+ return 0;
+
new_freq = cpufreq_driver->get(policy->cpu);
if (!new_freq)
return 0;
@@ -1797,27 +1845,25 @@ static unsigned int cpufreq_verify_current_freq(struct cpufreq_policy *policy, b
*/
unsigned int cpufreq_quick_get(unsigned int cpu)
{
- struct cpufreq_policy *policy;
- unsigned int ret_freq = 0;
unsigned long flags;
read_lock_irqsave(&cpufreq_driver_lock, flags);
if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) {
- ret_freq = cpufreq_driver->get(cpu);
+ unsigned int ret_freq = cpufreq_driver->get(cpu);
+
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
return ret_freq;
}
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
- policy = cpufreq_cpu_get(cpu);
- if (policy) {
- ret_freq = policy->cur;
- cpufreq_cpu_put(policy);
- }
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
+ if (policy)
+ return policy->cur;
- return ret_freq;
+ return 0;
}
EXPORT_SYMBOL(cpufreq_quick_get);
@@ -1829,15 +1875,11 @@ EXPORT_SYMBOL(cpufreq_quick_get);
*/
unsigned int cpufreq_quick_get_max(unsigned int cpu)
{
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
- unsigned int ret_freq = 0;
-
- if (policy) {
- ret_freq = policy->max;
- cpufreq_cpu_put(policy);
- }
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
+ if (policy)
+ return policy->max;
- return ret_freq;
+ return 0;
}
EXPORT_SYMBOL(cpufreq_quick_get_max);
@@ -1849,15 +1891,11 @@ EXPORT_SYMBOL(cpufreq_quick_get_max);
*/
__weak unsigned int cpufreq_get_hw_max_freq(unsigned int cpu)
{
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
- unsigned int ret_freq = 0;
-
- if (policy) {
- ret_freq = policy->cpuinfo.max_freq;
- cpufreq_cpu_put(policy);
- }
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
+ if (policy)
+ return policy->cpuinfo.max_freq;
- return ret_freq;
+ return 0;
}
EXPORT_SYMBOL(cpufreq_get_hw_max_freq);
@@ -1877,19 +1915,13 @@ static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
*/
unsigned int cpufreq_get(unsigned int cpu)
{
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
- unsigned int ret_freq = 0;
-
- if (policy) {
- down_read(&policy->rwsem);
- if (cpufreq_driver->get)
- ret_freq = __cpufreq_get(policy);
- up_read(&policy->rwsem);
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
+ if (!policy)
+ return 0;
- cpufreq_cpu_put(policy);
- }
+ guard(cpufreq_policy_read)(policy);
- return ret_freq;
+ return __cpufreq_get(policy);
}
EXPORT_SYMBOL(cpufreq_get);
@@ -1948,9 +1980,9 @@ void cpufreq_suspend(void)
for_each_active_policy(policy) {
if (has_target()) {
- down_write(&policy->rwsem);
- cpufreq_stop_governor(policy);
- up_write(&policy->rwsem);
+ scoped_guard(cpufreq_policy_write, policy) {
+ cpufreq_stop_governor(policy);
+ }
}
if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy))
@@ -1991,9 +2023,9 @@ void cpufreq_resume(void)
pr_err("%s: Failed to resume driver: %s\n", __func__,
cpufreq_driver->name);
} else if (has_target()) {
- down_write(&policy->rwsem);
- ret = cpufreq_start_governor(policy);
- up_write(&policy->rwsem);
+ scoped_guard(cpufreq_policy_write, policy) {
+ ret = cpufreq_start_governor(policy);
+ }
if (ret)
pr_err("%s: Failed to start governor for CPU%u's policy\n",
@@ -2323,7 +2355,8 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
if (cpufreq_disabled())
return -ENODEV;
- target_freq = __resolve_freq(policy, target_freq, relation);
+ target_freq = __resolve_freq(policy, target_freq, policy->min,
+ policy->max, relation);
pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
policy->cpu, target_freq, relation, old_target_freq);
@@ -2360,15 +2393,9 @@ int cpufreq_driver_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
- int ret;
-
- down_write(&policy->rwsem);
-
- ret = __cpufreq_driver_target(policy, target_freq, relation);
-
- up_write(&policy->rwsem);
+ guard(cpufreq_policy_write)(policy);
- return ret;
+ return __cpufreq_driver_target(policy, target_freq, relation);
}
EXPORT_SYMBOL_GPL(cpufreq_driver_target);
@@ -2448,8 +2475,7 @@ int cpufreq_start_governor(struct cpufreq_policy *policy)
pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
- if (cpufreq_driver->get)
- cpufreq_verify_current_freq(policy, false);
+ cpufreq_verify_current_freq(policy, false);
if (policy->governor->start) {
ret = policy->governor->start(policy);
@@ -2540,31 +2566,6 @@ EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
* POLICY INTERFACE *
*********************************************************************/
-/**
- * cpufreq_get_policy - get the current cpufreq_policy
- * @policy: struct cpufreq_policy into which the current cpufreq_policy
- * is written
- * @cpu: CPU to find the policy for
- *
- * Reads the current cpufreq policy.
- */
-int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
-{
- struct cpufreq_policy *cpu_policy;
- if (!policy)
- return -EINVAL;
-
- cpu_policy = cpufreq_cpu_get(cpu);
- if (!cpu_policy)
- return -EINVAL;
-
- memcpy(policy, cpu_policy, sizeof(*policy));
-
- cpufreq_cpu_put(cpu_policy);
- return 0;
-}
-EXPORT_SYMBOL(cpufreq_get_policy);
-
DEFINE_PER_CPU(unsigned long, cpufreq_pressure);
/**
@@ -2647,11 +2648,18 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
* Resolve policy min/max to available frequencies. It ensures
* no frequency resolution will neither overshoot the requested maximum
* nor undershoot the requested minimum.
+ *
+ * Avoid storing intermediate values in policy->max or policy->min and
+ * compiler optimizations around them because they may be accessed
+ * concurrently by cpufreq_driver_resolve_freq() during the update.
*/
- policy->min = new_data.min;
- policy->max = new_data.max;
- policy->min = __resolve_freq(policy, policy->min, CPUFREQ_RELATION_L);
- policy->max = __resolve_freq(policy, policy->max, CPUFREQ_RELATION_H);
+ WRITE_ONCE(policy->max, __resolve_freq(policy, new_data.max,
+ new_data.min, new_data.max,
+ CPUFREQ_RELATION_H));
+ new_data.min = __resolve_freq(policy, new_data.min, new_data.min,
+ new_data.max, CPUFREQ_RELATION_L);
+ WRITE_ONCE(policy->min, new_data.min > policy->max ? policy->max : new_data.min);
+
trace_cpu_frequency_limits(policy);
cpufreq_update_pressure(policy);
@@ -2699,15 +2707,32 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
pr_debug("starting governor %s failed\n", policy->governor->name);
if (old_gov) {
policy->governor = old_gov;
- if (cpufreq_init_governor(policy))
+ if (cpufreq_init_governor(policy)) {
policy->governor = NULL;
- else
- cpufreq_start_governor(policy);
+ } else if (cpufreq_start_governor(policy)) {
+ cpufreq_exit_governor(policy);
+ policy->governor = NULL;
+ }
}
return ret;
}
+static void cpufreq_policy_refresh(struct cpufreq_policy *policy)
+{
+ guard(cpufreq_policy_write)(policy);
+
+ /*
+ * BIOS might change freq behind our back
+ * -> ask driver for current freq and notify governors about a change
+ */
+ if (cpufreq_driver->get && has_target() &&
+ (cpufreq_suspended || WARN_ON(!cpufreq_verify_current_freq(policy, false))))
+ return;
+
+ refresh_frequency_limits(policy);
+}
+
/**
* cpufreq_update_policy - Re-evaluate an existing cpufreq policy.
* @cpu: CPU to re-evaluate the policy for.
@@ -2719,23 +2744,11 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
*/
void cpufreq_update_policy(unsigned int cpu)
{
- struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
-
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
if (!policy)
return;
- /*
- * BIOS might change freq behind our back
- * -> ask driver for current freq and notify governors about a change
- */
- if (cpufreq_driver->get && has_target() &&
- (cpufreq_suspended || WARN_ON(!cpufreq_verify_current_freq(policy, false))))
- goto unlock;
-
- refresh_frequency_limits(policy);
-
-unlock:
- cpufreq_cpu_release(policy);
+ cpufreq_policy_refresh(policy);
}
EXPORT_SYMBOL(cpufreq_update_policy);
@@ -2744,28 +2757,32 @@ EXPORT_SYMBOL(cpufreq_update_policy);
* @cpu: CPU to update the policy limits for.
*
* Invoke the driver's ->update_limits callback if present or call
- * cpufreq_update_policy() for @cpu.
+ * cpufreq_policy_refresh() for @cpu.
*/
void cpufreq_update_limits(unsigned int cpu)
{
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
+ if (!policy)
+ return;
+
if (cpufreq_driver->update_limits)
- cpufreq_driver->update_limits(cpu);
+ cpufreq_driver->update_limits(policy);
else
- cpufreq_update_policy(cpu);
+ cpufreq_policy_refresh(policy);
}
EXPORT_SYMBOL_GPL(cpufreq_update_limits);
/*********************************************************************
* BOOST *
*********************************************************************/
-static int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state)
+int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state)
{
int ret;
if (!policy->freq_table)
return -ENXIO;
- ret = cpufreq_frequency_table_cpuinfo(policy, policy->freq_table);
+ ret = cpufreq_frequency_table_cpuinfo(policy);
if (ret) {
pr_err("%s: Policy frequency update failed\n", __func__);
return ret;
@@ -2777,15 +2794,18 @@ static int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state)
return 0;
}
+EXPORT_SYMBOL_GPL(cpufreq_boost_set_sw);
-int cpufreq_boost_trigger_state(int state)
+static int cpufreq_boost_trigger_state(int state)
{
struct cpufreq_policy *policy;
unsigned long flags;
int ret = 0;
- if (cpufreq_driver->boost_enabled == state)
- return 0;
+ /*
+ * Don't compare 'cpufreq_driver->boost_enabled' with 'state' here to
+ * make sure all policies are in sync with global boost flag.
+ */
write_lock_irqsave(&cpufreq_driver_lock, flags);
cpufreq_driver->boost_enabled = state;
@@ -2793,12 +2813,12 @@ int cpufreq_boost_trigger_state(int state)
cpus_read_lock();
for_each_active_policy(policy) {
- policy->boost_enabled = state;
- ret = cpufreq_driver->set_boost(policy, state);
- if (ret) {
- policy->boost_enabled = !policy->boost_enabled;
+ if (!policy->boost_supported)
+ continue;
+
+ ret = policy_set_boost(policy, state);
+ if (ret)
goto err_reset_state;
- }
}
cpus_read_unlock();
@@ -2812,7 +2832,7 @@ err_reset_state:
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
pr_err("%s: Cannot %s BOOST\n",
- __func__, state ? "enable" : "disable");
+ __func__, str_enable_disable(state));
return ret;
}
@@ -2840,21 +2860,6 @@ static void remove_boost_sysfs_file(void)
sysfs_remove_file(cpufreq_global_kobject, &boost.attr);
}
-int cpufreq_enable_boost_support(void)
-{
- if (!cpufreq_driver)
- return -EINVAL;
-
- if (cpufreq_boost_supported())
- return 0;
-
- cpufreq_driver->set_boost = cpufreq_boost_set_sw;
-
- /* This will get removed on driver unregister */
- return create_boost_sysfs_file();
-}
-EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support);
-
bool cpufreq_boost_enabled(void)
{
return cpufreq_driver->boost_enabled;
@@ -2906,10 +2911,8 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
return -EPROBE_DEFER;
if (!driver_data || !driver_data->verify || !driver_data->init ||
- !(driver_data->setpolicy || driver_data->target_index ||
- driver_data->target) ||
- (driver_data->setpolicy && (driver_data->target_index ||
- driver_data->target)) ||
+ (driver_data->target_index && driver_data->target) ||
+ (!!driver_data->setpolicy == (driver_data->target_index || driver_data->target)) ||
(!driver_data->get_intermediate != !driver_data->target_intermediate) ||
(!driver_data->online != !driver_data->offline) ||
(driver_data->adjust_perf && !driver_data->fast_switch))
@@ -2929,15 +2932,6 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
cpufreq_driver = driver_data;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
- /*
- * Mark support for the scheduler's frequency invariance engine for
- * drivers that implement target(), target_index() or fast_switch().
- */
- if (!cpufreq_driver->setpolicy) {
- static_branch_enable_cpuslocked(&cpufreq_freq_invariance);
- pr_debug("supports frequency invariance");
- }
-
if (driver_data->setpolicy)
driver_data->flags |= CPUFREQ_CONST_LOOPS;
@@ -2947,6 +2941,15 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
goto err_null_driver;
}
+ /*
+ * Mark support for the scheduler's frequency invariance engine for
+ * drivers that implement target(), target_index() or fast_switch().
+ */
+ if (!cpufreq_driver->setpolicy) {
+ static_branch_enable_cpuslocked(&cpufreq_freq_invariance);
+ pr_debug("cpufreq: supports frequency invariance\n");
+ }
+
ret = subsys_interface_register(&cpufreq_interface);
if (ret)
goto err_boost_unreg;
@@ -2974,6 +2977,8 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
err_if_unreg:
subsys_interface_unregister(&cpufreq_interface);
err_boost_unreg:
+ if (!cpufreq_driver->setpolicy)
+ static_branch_disable_cpuslocked(&cpufreq_freq_invariance);
remove_boost_sysfs_file();
err_null_driver:
write_lock_irqsave(&cpufreq_driver_lock, flags);
@@ -3038,6 +3043,34 @@ static int __init cpufreq_core_init(void)
return 0;
}
+
+static bool cpufreq_policy_is_good_for_eas(unsigned int cpu)
+{
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
+ if (!policy) {
+ pr_debug("cpufreq policy not set for CPU: %d\n", cpu);
+ return false;
+ }
+
+ return sugov_is_governor(policy);
+}
+
+bool cpufreq_ready_for_eas(const struct cpumask *cpu_mask)
+{
+ unsigned int cpu;
+
+ /* Do not attempt EAS if schedutil is not being used. */
+ for_each_cpu(cpu, cpu_mask) {
+ if (!cpufreq_policy_is_good_for_eas(cpu)) {
+ pr_debug("rd %*pbl: schedutil is mandatory for EAS\n",
+ cpumask_pr_args(cpu_mask));
+ return false;
+ }
+ }
+
+ return true;
+}
+
module_param(off, int, 0444);
module_param_string(default_governor, default_governor, CPUFREQ_NAME_LEN, 0444);
core_initcall(cpufreq_core_init);
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 56500b25d77c..cce6a8d113e1 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -152,9 +152,9 @@ static ssize_t sampling_down_factor_store(struct gov_attr_set *attr_set,
struct dbs_data *dbs_data = to_dbs_data(attr_set);
unsigned int input;
int ret;
- ret = sscanf(buf, "%u", &input);
+ ret = kstrtouint(buf, 0, &input);
- if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
+ if (ret || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
return -EINVAL;
dbs_data->sampling_down_factor = input;
@@ -168,9 +168,9 @@ static ssize_t up_threshold_store(struct gov_attr_set *attr_set,
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
unsigned int input;
int ret;
- ret = sscanf(buf, "%u", &input);
+ ret = kstrtouint(buf, 0, &input);
- if (ret != 1 || input > 100 || input <= cs_tuners->down_threshold)
+ if (ret || input > 100 || input <= cs_tuners->down_threshold)
return -EINVAL;
dbs_data->up_threshold = input;
@@ -184,10 +184,10 @@ static ssize_t down_threshold_store(struct gov_attr_set *attr_set,
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
unsigned int input;
int ret;
- ret = sscanf(buf, "%u", &input);
+ ret = kstrtouint(buf, 0, &input);
/* cannot be lower than 1 otherwise freq will not fall */
- if (ret != 1 || input < 1 || input >= dbs_data->up_threshold)
+ if (ret || input < 1 || input >= dbs_data->up_threshold)
return -EINVAL;
cs_tuners->down_threshold = input;
@@ -201,9 +201,9 @@ static ssize_t ignore_nice_load_store(struct gov_attr_set *attr_set,
unsigned int input;
int ret;
- ret = sscanf(buf, "%u", &input);
- if (ret != 1)
- return -EINVAL;
+ ret = kstrtouint(buf, 0, &input);
+ if (ret)
+ return ret;
if (input > 1)
input = 1;
@@ -226,10 +226,10 @@ static ssize_t freq_step_store(struct gov_attr_set *attr_set, const char *buf,
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
unsigned int input;
int ret;
- ret = sscanf(buf, "%u", &input);
+ ret = kstrtouint(buf, 0, &input);
- if (ret != 1)
- return -EINVAL;
+ if (ret)
+ return ret;
if (input > 100)
input = 100;
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index af44ee6a6430..1a7fcaf39cc9 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -145,7 +145,23 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
time_elapsed = update_time - j_cdbs->prev_update_time;
j_cdbs->prev_update_time = update_time;
- idle_time = cur_idle_time - j_cdbs->prev_cpu_idle;
+ /*
+ * cur_idle_time could be smaller than j_cdbs->prev_cpu_idle if
+ * it's obtained from get_cpu_idle_time_jiffy() when NOHZ is
+ * off, where idle_time is calculated by the difference between
+ * time elapsed in jiffies and "busy time" obtained from CPU
+ * statistics. If a CPU is 100% busy, the time elapsed and busy
+ * time should grow with the same amount in two consecutive
+ * samples, but in practice there could be a tiny difference,
+ * making the accumulated idle time decrease sometimes. Hence,
+ * in this case, idle_time should be regarded as 0 in order to
+ * make the further process correct.
+ */
+ if (cur_idle_time > j_cdbs->prev_cpu_idle)
+ idle_time = cur_idle_time - j_cdbs->prev_cpu_idle;
+ else
+ idle_time = 0;
+
j_cdbs->prev_cpu_idle = cur_idle_time;
if (ignore_nice) {
@@ -162,7 +178,7 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
* calls, so the previous load value can be used then.
*/
load = j_cdbs->prev_load;
- } else if (unlikely((int)idle_time > 2 * sampling_rate &&
+ } else if (unlikely(idle_time > 2 * sampling_rate &&
j_cdbs->prev_load)) {
/*
* If the CPU had gone completely idle and a task has
@@ -189,30 +205,15 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
load = j_cdbs->prev_load;
j_cdbs->prev_load = 0;
} else {
- if (time_elapsed >= idle_time) {
+ if (time_elapsed > idle_time)
load = 100 * (time_elapsed - idle_time) / time_elapsed;
- } else {
- /*
- * That can happen if idle_time is returned by
- * get_cpu_idle_time_jiffy(). In that case
- * idle_time is roughly equal to the difference
- * between time_elapsed and "busy time" obtained
- * from CPU statistics. Then, the "busy time"
- * can end up being greater than time_elapsed
- * (for example, if jiffies_64 and the CPU
- * statistics are updated by different CPUs),
- * so idle_time may in fact be negative. That
- * means, though, that the CPU was busy all
- * the time (on the rough average) during the
- * last sampling interval and 100 can be
- * returned as the load.
- */
- load = (int)idle_time < 0 ? 100 : 0;
- }
+ else
+ load = 0;
+
j_cdbs->prev_load = load;
}
- if (unlikely((int)idle_time > 2 * sampling_rate)) {
+ if (unlikely(idle_time > 2 * sampling_rate)) {
unsigned int periods = idle_time / sampling_rate;
if (periods < idle_periods)
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index a7c38b8b3e78..a6ecc203f7b7 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -30,29 +30,6 @@ static struct od_ops od_ops;
static unsigned int default_powersave_bias;
/*
- * Not all CPUs want IO time to be accounted as busy; this depends on how
- * efficient idling at a higher frequency/voltage is.
- * Pavel Machek says this is not so for various generations of AMD and old
- * Intel systems.
- * Mike Chan (android.com) claims this is also not true for ARM.
- * Because of this, whitelist specific known (series) of CPUs by default, and
- * leave all others up to the user.
- */
-static int should_io_be_busy(void)
-{
-#if defined(CONFIG_X86)
- /*
- * For Intel, Core 2 (model 15) and later have an efficient idle.
- */
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
- boot_cpu_data.x86 == 6 &&
- boot_cpu_data.x86_model >= 15)
- return 1;
-#endif
- return 0;
-}
-
-/*
* Find right freq to be set now with powersave_bias on.
* Returns the freq_hi to be used right now and will set freq_hi_delay_us,
* freq_lo, and freq_lo_delay_us in percpu area for averaging freqs.
@@ -76,7 +53,8 @@ static unsigned int generic_powersave_bias_target(struct cpufreq_policy *policy,
return freq_next;
}
- index = cpufreq_frequency_table_target(policy, freq_next, relation);
+ index = cpufreq_frequency_table_target(policy, freq_next, policy->min,
+ policy->max, relation);
freq_req = freq_table[index].frequency;
freq_reduc = freq_req * od_tuners->powersave_bias / 1000;
freq_avg = freq_req - freq_reduc;
@@ -376,7 +354,7 @@ static int od_init(struct dbs_data *dbs_data)
dbs_data->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
dbs_data->ignore_nice_load = 0;
tuners->powersave_bias = default_powersave_bias;
- dbs_data->io_is_busy = should_io_be_busy();
+ dbs_data->io_is_busy = od_should_io_be_busy();
dbs_data->tuners = tuners;
return 0;
diff --git a/drivers/cpufreq/cpufreq_ondemand.h b/drivers/cpufreq/cpufreq_ondemand.h
index 1af8e5c4b86f..2ca8f1aaf2e3 100644
--- a/drivers/cpufreq/cpufreq_ondemand.h
+++ b/drivers/cpufreq/cpufreq_ondemand.h
@@ -24,3 +24,26 @@ static inline struct od_policy_dbs_info *to_dbs_info(struct policy_dbs_info *pol
struct od_dbs_tuners {
unsigned int powersave_bias;
};
+
+#ifdef CONFIG_X86
+#include <asm/cpu_device_id.h>
+
+/*
+ * Not all CPUs want IO time to be accounted as busy; this depends on
+ * how efficient idling at a higher frequency/voltage is.
+ *
+ * Pavel Machek says this is not so for various generations of AMD and
+ * old Intel systems. Mike Chan (android.com) claims this is also not
+ * true for ARM.
+ *
+ * Because of this, select a known series of Intel CPUs (Family 6 and
+ * later) by default, and leave all others up to the user.
+ */
+static inline bool od_should_io_be_busy(void)
+{
+ return (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
+ boot_cpu_data.x86_vfm >= INTEL_PENTIUM_PRO);
+}
+#else
+static inline bool od_should_io_be_busy(void) { return false; }
+#endif
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
index 2c42fee76daa..77d62152cd38 100644
--- a/drivers/cpufreq/cpufreq_userspace.c
+++ b/drivers/cpufreq/cpufreq_userspace.c
@@ -134,6 +134,7 @@ static struct cpufreq_governor cpufreq_gov_userspace = {
.store_setspeed = cpufreq_set,
.show_setspeed = show_speed,
.owner = THIS_MODULE,
+ .flags = CPUFREQ_GOV_STRICT_TARGET,
};
MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>, "
diff --git a/drivers/cpufreq/davinci-cpufreq.c b/drivers/cpufreq/davinci-cpufreq.c
index 7d2754411d8c..2c277eb3795a 100644
--- a/drivers/cpufreq/davinci-cpufreq.c
+++ b/drivers/cpufreq/davinci-cpufreq.c
@@ -101,7 +101,6 @@ static struct cpufreq_driver davinci_driver = {
.get = cpufreq_generic_get,
.init = davinci_cpu_init,
.name = "davinci",
- .attr = cpufreq_generic_attr,
};
static int __init davinci_cpufreq_probe(struct platform_device *pdev)
@@ -145,7 +144,7 @@ static struct platform_driver davinci_cpufreq_driver = {
.driver = {
.name = "cpufreq-davinci",
},
- .remove_new = __exit_p(davinci_cpufreq_remove),
+ .remove = __exit_p(davinci_cpufreq_remove),
};
int __init davinci_cpufreq_init(void)
diff --git a/drivers/cpufreq/e_powersaver.c b/drivers/cpufreq/e_powersaver.c
index 6e958b09e1b5..320a0af2266a 100644
--- a/drivers/cpufreq/e_powersaver.c
+++ b/drivers/cpufreq/e_powersaver.c
@@ -225,12 +225,12 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
return -ENODEV;
}
/* Enable Enhanced PowerSaver */
- rdmsrl(MSR_IA32_MISC_ENABLE, val);
+ rdmsrq(MSR_IA32_MISC_ENABLE, val);
if (!(val & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
val |= MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP;
- wrmsrl(MSR_IA32_MISC_ENABLE, val);
+ wrmsrq(MSR_IA32_MISC_ENABLE, val);
/* Can be locked at 0 */
- rdmsrl(MSR_IA32_MISC_ENABLE, val);
+ rdmsrq(MSR_IA32_MISC_ENABLE, val);
if (!(val & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
pr_info("Can't enable Enhanced PowerSaver\n");
return -ENODEV;
@@ -376,7 +376,6 @@ static struct cpufreq_driver eps_driver = {
.exit = eps_cpu_exit,
.get = eps_get,
.name = "e_powersaver",
- .attr = cpufreq_generic_attr,
};
diff --git a/drivers/cpufreq/elanfreq.c b/drivers/cpufreq/elanfreq.c
index 4ce5eb35dc46..fc5a58088b35 100644
--- a/drivers/cpufreq/elanfreq.c
+++ b/drivers/cpufreq/elanfreq.c
@@ -21,7 +21,6 @@
#include <linux/cpufreq.h>
#include <asm/cpu_device_id.h>
-#include <asm/msr.h>
#include <linux/timex.h>
#include <linux/io.h>
@@ -194,7 +193,6 @@ static struct cpufreq_driver elanfreq_driver = {
.target_index = elanfreq_target,
.init = elanfreq_cpu_init,
.name = "elanfreq",
- .attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id elan_id[] = {
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index 10e80d912b8d..7f251daf03ce 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -14,7 +14,7 @@
* FREQUENCY TABLE HELPERS *
*********************************************************************/
-bool policy_has_boost_freq(struct cpufreq_policy *policy)
+static bool policy_has_boost_freq(struct cpufreq_policy *policy)
{
struct cpufreq_frequency_table *pos, *table = policy->freq_table;
@@ -27,24 +27,22 @@ bool policy_has_boost_freq(struct cpufreq_policy *policy)
return false;
}
-EXPORT_SYMBOL_GPL(policy_has_boost_freq);
-int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
- struct cpufreq_frequency_table *table)
+int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy)
{
- struct cpufreq_frequency_table *pos;
+ struct cpufreq_frequency_table *pos, *table = policy->freq_table;
unsigned int min_freq = ~0;
unsigned int max_freq = 0;
- unsigned int freq;
+ unsigned int freq, i;
- cpufreq_for_each_valid_entry(pos, table) {
+ cpufreq_for_each_valid_entry_idx(pos, table, i) {
freq = pos->frequency;
if ((!cpufreq_boost_enabled() || !policy->boost_enabled)
&& (pos->flags & CPUFREQ_BOOST_FREQ))
continue;
- pr_debug("table entry %u: %u kHz\n", (int)(pos - table), freq);
+ pr_debug("table entry %u: %u kHz\n", i, freq);
if (freq < min_freq)
min_freq = freq;
if (freq > max_freq)
@@ -66,10 +64,9 @@ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
return 0;
}
-int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy,
- struct cpufreq_frequency_table *table)
+int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy)
{
- struct cpufreq_frequency_table *pos;
+ struct cpufreq_frequency_table *pos, *table = policy->freq_table;
unsigned int freq, prev_smaller = 0;
bool found = false;
@@ -111,13 +108,13 @@ int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy)
if (!policy->freq_table)
return -ENODEV;
- return cpufreq_frequency_table_verify(policy, policy->freq_table);
+ return cpufreq_frequency_table_verify(policy);
}
EXPORT_SYMBOL_GPL(cpufreq_generic_frequency_table_verify);
int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+ unsigned int target_freq, unsigned int min,
+ unsigned int max, unsigned int relation)
{
struct cpufreq_frequency_table optimal = {
.driver_data = ~0,
@@ -129,7 +126,7 @@ int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
};
struct cpufreq_frequency_table *pos;
struct cpufreq_frequency_table *table = policy->freq_table;
- unsigned int freq, diff, i = 0;
+ unsigned int freq, diff, i;
int index;
pr_debug("request for target %u kHz (relation: %u) for cpu %u\n",
@@ -148,7 +145,7 @@ int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
cpufreq_for_each_valid_entry_idx(pos, table, i) {
freq = pos->frequency;
- if ((freq < policy->min) || (freq > policy->max))
+ if (freq < min || freq > max)
continue;
if (freq == target_freq) {
optimal.driver_data = i;
@@ -276,7 +273,6 @@ static ssize_t scaling_available_frequencies_show(struct cpufreq_policy *policy,
return show_available_freqs(policy, buf, false);
}
cpufreq_attr_available_freq(scaling_available);
-EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs);
/*
* scaling_boost_frequencies_show - show available boost frequencies for
@@ -288,13 +284,6 @@ static ssize_t scaling_boost_frequencies_show(struct cpufreq_policy *policy,
return show_available_freqs(policy, buf, true);
}
cpufreq_attr_available_freq(scaling_boost);
-EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_boost_freqs);
-
-struct freq_attr *cpufreq_generic_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-EXPORT_SYMBOL_GPL(cpufreq_generic_attr);
static int set_freq_table_sorted(struct cpufreq_policy *policy)
{
@@ -363,10 +352,14 @@ int cpufreq_table_validate_and_sort(struct cpufreq_policy *policy)
return 0;
}
- ret = cpufreq_frequency_table_cpuinfo(policy, policy->freq_table);
+ ret = cpufreq_frequency_table_cpuinfo(policy);
if (ret)
return ret;
+ /* Driver's may have set this field already */
+ if (policy_has_boost_freq(policy))
+ policy->boost_supported = true;
+
return set_freq_table_sorted(policy);
}
diff --git a/drivers/cpufreq/imx-cpufreq-dt.c b/drivers/cpufreq/imx-cpufreq-dt.c
index 577bb9e2f112..1492c92ffc1a 100644
--- a/drivers/cpufreq/imx-cpufreq-dt.c
+++ b/drivers/cpufreq/imx-cpufreq-dt.c
@@ -183,7 +183,7 @@ static void imx_cpufreq_dt_remove(struct platform_device *pdev)
static struct platform_driver imx_cpufreq_dt_driver = {
.probe = imx_cpufreq_dt_probe,
- .remove_new = imx_cpufreq_dt_remove,
+ .remove = imx_cpufreq_dt_remove,
.driver = {
.name = "imx-cpufreq-dt",
},
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index c20d3ecc5a81..e93697d3edfd 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -207,7 +207,6 @@ static struct cpufreq_driver imx6q_cpufreq_driver = {
.init = imx6q_cpufreq_init,
.register_em = cpufreq_register_em_with_opp,
.name = "imx6q-cpufreq",
- .attr = cpufreq_generic_attr,
.suspend = cpufreq_generic_suspend,
};
@@ -443,7 +442,7 @@ soc_opp_out:
}
if (of_property_read_u32(np, "clock-latency", &transition_latency))
- transition_latency = CPUFREQ_ETERNAL;
+ transition_latency = CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS;
/*
* Calculate the ramp time for max voltage change in the
@@ -522,7 +521,7 @@ static struct platform_driver imx6q_cpufreq_platdrv = {
.name = "imx6q-cpufreq",
},
.probe = imx6q_cpufreq_probe,
- .remove_new = imx6q_cpufreq_remove,
+ .remove = imx6q_cpufreq_remove,
};
module_platform_driver(imx6q_cpufreq_platdrv);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index b0018f371ea3..38897bb14a2c 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -28,6 +28,7 @@
#include <linux/pm_qos.h>
#include <linux/bitfield.h>
#include <trace/events/power.h>
+#include <linux/units.h>
#include <asm/cpu.h>
#include <asm/div64.h>
@@ -220,6 +221,7 @@ struct global_params {
* @sched_flags: Store scheduler flags for possible cross CPU update
* @hwp_boost_min: Last HWP boosted min performance
* @suspended: Whether or not the driver has been suspended.
+ * @pd_registered: Set when a perf domain is registered for this CPU.
* @hwp_notify_work: workqueue for HWP notifications.
*
* This structure stores per CPU instance data for all CPUs.
@@ -259,6 +261,9 @@ struct cpudata {
unsigned int sched_flags;
u32 hwp_boost_min;
bool suspended;
+#ifdef CONFIG_ENERGY_MODEL
+ bool pd_registered;
+#endif
struct delayed_work hwp_notify_work;
};
@@ -302,15 +307,16 @@ static bool hwp_is_hybrid;
static struct cpufreq_driver *intel_pstate_driver __read_mostly;
-#define HYBRID_SCALING_FACTOR 78741
+#define INTEL_PSTATE_CORE_SCALING 100000
+#define HYBRID_SCALING_FACTOR_ADL 78741
#define HYBRID_SCALING_FACTOR_MTL 80000
#define HYBRID_SCALING_FACTOR_LNL 86957
-static int hybrid_scaling_factor = HYBRID_SCALING_FACTOR;
+static int hybrid_scaling_factor;
static inline int core_get_scaling(void)
{
- return 100000;
+ return INTEL_PSTATE_CORE_SCALING;
}
#ifdef CONFIG_ACPI
@@ -414,18 +420,15 @@ static int intel_pstate_get_cppc_guaranteed(int cpu)
static int intel_pstate_cppc_get_scaling(int cpu)
{
struct cppc_perf_caps cppc_perf;
- int ret;
-
- ret = cppc_get_perf_caps(cpu, &cppc_perf);
/*
- * If the nominal frequency and the nominal performance are not
- * zero and the ratio between them is not 100, return the hybrid
- * scaling factor.
+ * Compute the perf-to-frequency scaling factor for the given CPU if
+ * possible, unless it would be 0.
*/
- if (!ret && cppc_perf.nominal_perf && cppc_perf.nominal_freq &&
- cppc_perf.nominal_perf * 100 != cppc_perf.nominal_freq)
- return hybrid_scaling_factor;
+ if (!cppc_get_perf_caps(cpu, &cppc_perf) &&
+ cppc_perf.nominal_perf && cppc_perf.nominal_freq)
+ return div_u64(cppc_perf.nominal_freq * KHZ_PER_MHZ,
+ cppc_perf.nominal_perf);
return core_get_scaling();
}
@@ -600,7 +603,10 @@ static bool turbo_is_disabled(void)
{
u64 misc_en;
- rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
+ if (!cpu_feature_enabled(X86_FEATURE_IDA))
+ return true;
+
+ rdmsrq(MSR_IA32_MISC_ENABLE, misc_en);
return !!(misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
}
@@ -614,24 +620,9 @@ static int min_perf_pct_min(void)
(cpu->pstate.min_pstate * 100 / turbo_pstate) : 0;
}
-static s16 intel_pstate_get_epb(struct cpudata *cpu_data)
-{
- u64 epb;
- int ret;
-
- if (!boot_cpu_has(X86_FEATURE_EPB))
- return -ENXIO;
-
- ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
- if (ret)
- return (s16)ret;
-
- return (s16)(epb & 0x0f);
-}
-
static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data)
{
- s16 epp;
+ s16 epp = -EOPNOTSUPP;
if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
/*
@@ -639,40 +630,19 @@ static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data)
* MSR_HWP_REQUEST, so need to read and get EPP.
*/
if (!hwp_req_data) {
- epp = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST,
+ epp = rdmsrq_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST,
&hwp_req_data);
if (epp)
return epp;
}
epp = (hwp_req_data >> 24) & 0xff;
- } else {
- /* When there is no EPP present, HWP uses EPB settings */
- epp = intel_pstate_get_epb(cpu_data);
}
return epp;
}
-static int intel_pstate_set_epb(int cpu, s16 pref)
-{
- u64 epb;
- int ret;
-
- if (!boot_cpu_has(X86_FEATURE_EPB))
- return -ENXIO;
-
- ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
- if (ret)
- return ret;
-
- epb = (epb & ~0x0f) | pref;
- wrmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, epb);
-
- return 0;
-}
-
/*
- * EPP/EPB display strings corresponding to EPP index in the
+ * EPP display strings corresponding to EPP index in the
* energy_perf_strings[]
* index String
*-------------------------------------
@@ -764,7 +734,7 @@ static int intel_pstate_set_epp(struct cpudata *cpu, u32 epp)
* function, so it cannot run in parallel with the update below.
*/
WRITE_ONCE(cpu->hwp_req_cached, value);
- ret = wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
+ ret = wrmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
if (!ret)
cpu->epp_cached = epp;
@@ -776,7 +746,7 @@ static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
u32 raw_epp)
{
int epp = -EINVAL;
- int ret;
+ int ret = -EOPNOTSUPP;
if (!pref_index)
epp = cpu_data->epp_default;
@@ -796,10 +766,6 @@ static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
return -EBUSY;
ret = intel_pstate_set_epp(cpu_data, epp);
- } else {
- if (epp == -EINVAL)
- epp = (pref_index - 1) << 2;
- ret = intel_pstate_set_epb(cpu_data->cpu, epp);
}
return ret;
@@ -918,7 +884,7 @@ static ssize_t show_base_frequency(struct cpufreq_policy *policy, char *buf)
if (ratio <= 0) {
u64 cap;
- rdmsrl_on_cpu(policy->cpu, MSR_HWP_CAPABILITIES, &cap);
+ rdmsrq_on_cpu(policy->cpu, MSR_HWP_CAPABILITIES, &cap);
ratio = HWP_GUARANTEED_PERF(cap);
}
@@ -931,13 +897,23 @@ static ssize_t show_base_frequency(struct cpufreq_policy *policy, char *buf)
cpufreq_freq_attr_ro(base_frequency);
+enum hwp_cpufreq_attr_index {
+ HWP_BASE_FREQUENCY_INDEX = 0,
+ HWP_PERFORMANCE_PREFERENCE_INDEX,
+ HWP_PERFORMANCE_AVAILABLE_PREFERENCES_INDEX,
+ HWP_CPUFREQ_ATTR_COUNT,
+};
+
static struct freq_attr *hwp_cpufreq_attrs[] = {
- &energy_performance_preference,
- &energy_performance_available_preferences,
- &base_frequency,
- NULL,
+ [HWP_BASE_FREQUENCY_INDEX] = &base_frequency,
+ [HWP_PERFORMANCE_PREFERENCE_INDEX] = &energy_performance_preference,
+ [HWP_PERFORMANCE_AVAILABLE_PREFERENCES_INDEX] =
+ &energy_performance_available_preferences,
+ [HWP_CPUFREQ_ATTR_COUNT] = NULL,
};
+static bool no_cas __ro_after_init;
+
static struct cpudata *hybrid_max_perf_cpu __read_mostly;
/*
* Protects hybrid_max_perf_cpu, the capacity_perf fields in struct cpudata,
@@ -945,12 +921,124 @@ static struct cpudata *hybrid_max_perf_cpu __read_mostly;
*/
static DEFINE_MUTEX(hybrid_capacity_lock);
+#ifdef CONFIG_ENERGY_MODEL
+#define HYBRID_EM_STATE_COUNT 4
+
+static int hybrid_active_power(struct device *dev, unsigned long *power,
+ unsigned long *freq)
+{
+ /*
+ * Create "utilization bins" of 0-40%, 40%-60%, 60%-80%, and 80%-100%
+ * of the maximum capacity such that two CPUs of the same type will be
+ * regarded as equally attractive if the utilization of each of them
+ * falls into the same bin, which should prevent tasks from being
+ * migrated between them too often.
+ *
+ * For this purpose, return the "frequency" of 2 for the first
+ * performance level and otherwise leave the value set by the caller.
+ */
+ if (!*freq)
+ *freq = 2;
+
+ /* No power information. */
+ *power = EM_MAX_POWER;
+
+ return 0;
+}
+
+static int hybrid_get_cost(struct device *dev, unsigned long freq,
+ unsigned long *cost)
+{
+ struct pstate_data *pstate = &all_cpu_data[dev->id]->pstate;
+ struct cpu_cacheinfo *cacheinfo = get_cpu_cacheinfo(dev->id);
+
+ /*
+ * The smaller the perf-to-frequency scaling factor, the larger the IPC
+ * ratio between the given CPU and the least capable CPU in the system.
+ * Regard that IPC ratio as the primary cost component and assume that
+ * the scaling factors for different CPU types will differ by at least
+ * 5% and they will not be above INTEL_PSTATE_CORE_SCALING.
+ *
+ * Add the freq value to the cost, so that the cost of running on CPUs
+ * of the same type in different "utilization bins" is different.
+ */
+ *cost = div_u64(100ULL * INTEL_PSTATE_CORE_SCALING, pstate->scaling) + freq;
+ /*
+ * Increase the cost slightly for CPUs able to access L3 to avoid
+ * touching it in case some other CPUs of the same type can do the work
+ * without it.
+ */
+ if (cacheinfo) {
+ unsigned int i;
+
+ /* Check if L3 cache is there. */
+ for (i = 0; i < cacheinfo->num_leaves; i++) {
+ if (cacheinfo->info_list[i].level == 3) {
+ *cost += 2;
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static bool hybrid_register_perf_domain(unsigned int cpu)
+{
+ static const struct em_data_callback cb
+ = EM_ADV_DATA_CB(hybrid_active_power, hybrid_get_cost);
+ struct cpudata *cpudata = all_cpu_data[cpu];
+ struct device *cpu_dev;
+
+ /*
+ * Registering EM perf domains without enabling asymmetric CPU capacity
+ * support is not really useful and one domain should not be registered
+ * more than once.
+ */
+ if (!hybrid_max_perf_cpu || cpudata->pd_registered)
+ return false;
+
+ cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev)
+ return false;
+
+ if (em_dev_register_pd_no_update(cpu_dev, HYBRID_EM_STATE_COUNT, &cb,
+ cpumask_of(cpu), false))
+ return false;
+
+ cpudata->pd_registered = true;
+
+ return true;
+}
+
+static void hybrid_register_all_perf_domains(void)
+{
+ unsigned int cpu;
+
+ for_each_online_cpu(cpu)
+ hybrid_register_perf_domain(cpu);
+}
+
+static void hybrid_update_perf_domain(struct cpudata *cpu)
+{
+ if (cpu->pd_registered)
+ em_adjust_cpu_capacity(cpu->cpu);
+}
+#else /* !CONFIG_ENERGY_MODEL */
+static inline bool hybrid_register_perf_domain(unsigned int cpu) { return false; }
+static inline void hybrid_register_all_perf_domains(void) {}
+static inline void hybrid_update_perf_domain(struct cpudata *cpu) {}
+#endif /* CONFIG_ENERGY_MODEL */
+
static void hybrid_set_cpu_capacity(struct cpudata *cpu)
{
arch_set_cpu_capacity(cpu->cpu, cpu->capacity_perf,
hybrid_max_perf_cpu->capacity_perf,
cpu->capacity_perf,
cpu->pstate.max_pstate_physical);
+ hybrid_update_perf_domain(cpu);
+
+ topology_set_cpu_scale(cpu->cpu, arch_scale_cpu_capacity(cpu->cpu));
pr_debug("CPU%d: perf = %u, max. perf = %u, base perf = %d\n", cpu->cpu,
cpu->capacity_perf, hybrid_max_perf_cpu->capacity_perf,
@@ -1028,26 +1116,38 @@ static void hybrid_update_cpu_capacity_scaling(void)
}
}
-static void __hybrid_init_cpu_capacity_scaling(void)
+static void __hybrid_refresh_cpu_capacity_scaling(void)
{
hybrid_max_perf_cpu = NULL;
hybrid_update_cpu_capacity_scaling();
}
-static void hybrid_init_cpu_capacity_scaling(void)
+static void hybrid_refresh_cpu_capacity_scaling(void)
{
- bool disable_itmt = false;
+ guard(mutex)(&hybrid_capacity_lock);
- mutex_lock(&hybrid_capacity_lock);
+ __hybrid_refresh_cpu_capacity_scaling();
+ /*
+ * Perf domains are not registered before setting hybrid_max_perf_cpu,
+ * so register them all after setting up CPU capacity scaling.
+ */
+ hybrid_register_all_perf_domains();
+}
+
+static void hybrid_init_cpu_capacity_scaling(bool refresh)
+{
+ /* Bail out if enabling capacity-aware scheduling is prohibited. */
+ if (no_cas)
+ return;
/*
* If hybrid_max_perf_cpu is set at this point, the hybrid CPU capacity
* scaling has been enabled already and the driver is just changing the
* operation mode.
*/
- if (hybrid_max_perf_cpu) {
- __hybrid_init_cpu_capacity_scaling();
- goto unlock;
+ if (refresh) {
+ hybrid_refresh_cpu_capacity_scaling();
+ return;
}
/*
@@ -1056,26 +1156,32 @@ static void hybrid_init_cpu_capacity_scaling(void)
* do not do that when SMT is in use.
*/
if (hwp_is_hybrid && !sched_smt_active() && arch_enable_hybrid_capacity_scale()) {
- __hybrid_init_cpu_capacity_scaling();
- disable_itmt = true;
+ hybrid_refresh_cpu_capacity_scaling();
+ /*
+ * Disabling ITMT causes sched domains to be rebuilt to disable asym
+ * packing and enable asym capacity and EAS.
+ */
+ sched_clear_itmt_support();
}
+}
-unlock:
- mutex_unlock(&hybrid_capacity_lock);
+static bool hybrid_clear_max_perf_cpu(void)
+{
+ bool ret;
- /*
- * Disabling ITMT causes sched domains to be rebuilt to disable asym
- * packing and enable asym capacity.
- */
- if (disable_itmt)
- sched_clear_itmt_support();
+ guard(mutex)(&hybrid_capacity_lock);
+
+ ret = !!hybrid_max_perf_cpu;
+ hybrid_max_perf_cpu = NULL;
+
+ return ret;
}
static void __intel_pstate_get_hwp_cap(struct cpudata *cpu)
{
u64 cap;
- rdmsrl_on_cpu(cpu->cpu, MSR_HWP_CAPABILITIES, &cap);
+ rdmsrq_on_cpu(cpu->cpu, MSR_HWP_CAPABILITIES, &cap);
WRITE_ONCE(cpu->hwp_cap_cached, cap);
cpu->pstate.max_pstate = HWP_GUARANTEED_PERF(cap);
cpu->pstate.turbo_pstate = HWP_HIGHEST_PERF(cap);
@@ -1131,6 +1237,14 @@ static void hybrid_update_capacity(struct cpudata *cpu)
}
hybrid_set_cpu_capacity(cpu);
+ /*
+ * If the CPU was offline to start with and it is going online for the
+ * first time, a perf domain needs to be registered for it if hybrid
+ * capacity scaling has been enabled already. In that case, sched
+ * domains need to be rebuilt to take the new perf domain into account.
+ */
+ if (hybrid_register_perf_domain(cpu->cpu))
+ em_rebuild_sched_domains();
unlock:
mutex_unlock(&hybrid_capacity_lock);
@@ -1149,7 +1263,7 @@ static void intel_pstate_hwp_set(unsigned int cpu)
if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE)
min = max;
- rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
+ rdmsrq_on_cpu(cpu, MSR_HWP_REQUEST, &value);
value &= ~HWP_MIN_PERF(~0L);
value |= HWP_MIN_PERF(min);
@@ -1191,12 +1305,11 @@ static void intel_pstate_hwp_set(unsigned int cpu)
if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
value &= ~GENMASK_ULL(31, 24);
value |= (u64)epp << 24;
- } else {
- intel_pstate_set_epb(cpu, epp);
}
+
skip_epp:
WRITE_ONCE(cpu_data->hwp_req_cached, value);
- wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
+ wrmsrq_on_cpu(cpu, MSR_HWP_REQUEST, value);
}
static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata);
@@ -1243,7 +1356,7 @@ static void intel_pstate_hwp_offline(struct cpudata *cpu)
if (boot_cpu_has(X86_FEATURE_HWP_EPP))
value |= HWP_ENERGY_PERF_PREFERENCE(HWP_EPP_POWERSAVE);
- wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
+ wrmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
mutex_lock(&hybrid_capacity_lock);
@@ -1265,6 +1378,9 @@ static void intel_pstate_hwp_offline(struct cpudata *cpu)
#define POWER_CTL_EE_ENABLE 1
#define POWER_CTL_EE_DISABLE 2
+/* Enable bit for Dynamic Efficiency Control (DEC) */
+#define POWER_CTL_DEC_ENABLE 27
+
static int power_ctl_ee_state;
static void set_power_ctl_ee_state(bool input)
@@ -1272,7 +1388,7 @@ static void set_power_ctl_ee_state(bool input)
u64 power_ctl;
mutex_lock(&intel_pstate_driver_lock);
- rdmsrl(MSR_IA32_POWER_CTL, power_ctl);
+ rdmsrq(MSR_IA32_POWER_CTL, power_ctl);
if (input) {
power_ctl &= ~BIT(MSR_IA32_POWER_CTL_BIT_EE);
power_ctl_ee_state = POWER_CTL_EE_ENABLE;
@@ -1280,7 +1396,7 @@ static void set_power_ctl_ee_state(bool input)
power_ctl |= BIT(MSR_IA32_POWER_CTL_BIT_EE);
power_ctl_ee_state = POWER_CTL_EE_DISABLE;
}
- wrmsrl(MSR_IA32_POWER_CTL, power_ctl);
+ wrmsrq(MSR_IA32_POWER_CTL, power_ctl);
mutex_unlock(&intel_pstate_driver_lock);
}
@@ -1289,7 +1405,7 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata);
static void intel_pstate_hwp_reenable(struct cpudata *cpu)
{
intel_pstate_hwp_enable(cpu);
- wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, READ_ONCE(cpu->hwp_req_cached));
+ wrmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, READ_ONCE(cpu->hwp_req_cached));
}
static int intel_pstate_suspend(struct cpufreq_policy *policy)
@@ -1340,9 +1456,11 @@ static void intel_pstate_update_policies(void)
cpufreq_update_policy(cpu);
}
-static void __intel_pstate_update_max_freq(struct cpudata *cpudata,
- struct cpufreq_policy *policy)
+static void __intel_pstate_update_max_freq(struct cpufreq_policy *policy,
+ struct cpudata *cpudata)
{
+ guard(cpufreq_policy_write)(policy);
+
if (hwp_active)
intel_pstate_get_hwp_cap(cpudata);
@@ -1352,47 +1470,37 @@ static void __intel_pstate_update_max_freq(struct cpudata *cpudata,
refresh_frequency_limits(policy);
}
-static void intel_pstate_update_limits(unsigned int cpu)
+static bool intel_pstate_update_max_freq(struct cpudata *cpudata)
{
- struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
- struct cpudata *cpudata;
-
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpudata->cpu);
if (!policy)
- return;
+ return false;
- cpudata = all_cpu_data[cpu];
+ __intel_pstate_update_max_freq(policy, cpudata);
- __intel_pstate_update_max_freq(cpudata, policy);
+ return true;
+}
- /* Prevent the driver from being unregistered now. */
- mutex_lock(&intel_pstate_driver_lock);
+static void intel_pstate_update_limits(struct cpufreq_policy *policy)
+{
+ struct cpudata *cpudata = all_cpu_data[policy->cpu];
- cpufreq_cpu_release(policy);
+ __intel_pstate_update_max_freq(policy, cpudata);
hybrid_update_capacity(cpudata);
-
- mutex_unlock(&intel_pstate_driver_lock);
}
static void intel_pstate_update_limits_for_all(void)
{
int cpu;
- for_each_possible_cpu(cpu) {
- struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
-
- if (!policy)
- continue;
-
- __intel_pstate_update_max_freq(all_cpu_data[cpu], policy);
-
- cpufreq_cpu_release(policy);
- }
+ for_each_possible_cpu(cpu)
+ intel_pstate_update_max_freq(all_cpu_data[cpu]);
mutex_lock(&hybrid_capacity_lock);
if (hybrid_max_perf_cpu)
- __hybrid_init_cpu_capacity_scaling();
+ __hybrid_refresh_cpu_capacity_scaling();
mutex_unlock(&hybrid_capacity_lock);
}
@@ -1555,41 +1663,40 @@ unlock_driver:
return count;
}
-static void update_qos_request(enum freq_qos_req_type type)
+static void update_cpu_qos_request(int cpu, enum freq_qos_req_type type)
{
+ struct cpudata *cpudata = all_cpu_data[cpu];
+ unsigned int freq = cpudata->pstate.turbo_freq;
struct freq_qos_request *req;
- struct cpufreq_policy *policy;
- int i;
- for_each_possible_cpu(i) {
- struct cpudata *cpu = all_cpu_data[i];
- unsigned int freq, perf_pct;
-
- policy = cpufreq_cpu_get(i);
- if (!policy)
- continue;
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
+ if (!policy)
+ return;
- req = policy->driver_data;
- cpufreq_cpu_put(policy);
+ req = policy->driver_data;
+ if (!req)
+ return;
- if (!req)
- continue;
+ if (hwp_active)
+ intel_pstate_get_hwp_cap(cpudata);
- if (hwp_active)
- intel_pstate_get_hwp_cap(cpu);
+ if (type == FREQ_QOS_MIN) {
+ freq = DIV_ROUND_UP(freq * global.min_perf_pct, 100);
+ } else {
+ req++;
+ freq = (freq * global.max_perf_pct) / 100;
+ }
- if (type == FREQ_QOS_MIN) {
- perf_pct = global.min_perf_pct;
- } else {
- req++;
- perf_pct = global.max_perf_pct;
- }
+ if (freq_qos_update_request(req, freq) < 0)
+ pr_warn("Failed to update freq constraint: CPU%d\n", cpu);
+}
- freq = DIV_ROUND_UP(cpu->pstate.turbo_freq * perf_pct, 100);
+static void update_qos_requests(enum freq_qos_req_type type)
+{
+ int i;
- if (freq_qos_update_request(req, freq) < 0)
- pr_warn("Failed to update freq constraint: CPU%d\n", i);
- }
+ for_each_possible_cpu(i)
+ update_cpu_qos_request(i, type);
}
static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b,
@@ -1618,7 +1725,7 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b,
if (intel_pstate_driver == &intel_pstate)
intel_pstate_update_policies();
else
- update_qos_request(FREQ_QOS_MAX);
+ update_qos_requests(FREQ_QOS_MAX);
mutex_unlock(&intel_pstate_driver_lock);
@@ -1652,7 +1759,7 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b,
if (intel_pstate_driver == &intel_pstate)
intel_pstate_update_policies();
else
- update_qos_request(FREQ_QOS_MIN);
+ update_qos_requests(FREQ_QOS_MIN);
mutex_unlock(&intel_pstate_driver_lock);
@@ -1690,7 +1797,7 @@ static ssize_t show_energy_efficiency(struct kobject *kobj, struct kobj_attribut
u64 power_ctl;
int enable;
- rdmsrl(MSR_IA32_POWER_CTL, power_ctl);
+ rdmsrq(MSR_IA32_POWER_CTL, power_ctl);
enable = !!(power_ctl & BIT(MSR_IA32_POWER_CTL_BIT_EE));
return sprintf(buf, "%d\n", !enable);
}
@@ -1827,13 +1934,8 @@ static void intel_pstate_notify_work(struct work_struct *work)
{
struct cpudata *cpudata =
container_of(to_delayed_work(work), struct cpudata, hwp_notify_work);
- struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpudata->cpu);
-
- if (policy) {
- __intel_pstate_update_max_freq(cpudata, policy);
-
- cpufreq_cpu_release(policy);
+ if (intel_pstate_update_max_freq(cpudata)) {
/*
* The driver will not be unregistered while this function is
* running, so update the capacity without acquiring the driver
@@ -1842,7 +1944,7 @@ static void intel_pstate_notify_work(struct work_struct *work)
hybrid_update_capacity(cpudata);
}
- wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0);
+ wrmsrq_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0);
}
static DEFINE_RAW_SPINLOCK(hwp_notify_lock);
@@ -1864,7 +1966,7 @@ void notify_hwp_interrupt(void)
if (cpu_feature_enabled(X86_FEATURE_HWP_HIGHEST_PERF_CHANGE))
status_mask |= HWP_HIGHEST_PERF_CHANGE_STATUS;
- rdmsrl_safe(MSR_HWP_STATUS, &value);
+ rdmsrq_safe(MSR_HWP_STATUS, &value);
if (!(value & status_mask))
return;
@@ -1881,7 +1983,7 @@ void notify_hwp_interrupt(void)
return;
ack_intr:
- wrmsrl_safe(MSR_HWP_STATUS, 0);
+ wrmsrq_safe(MSR_HWP_STATUS, 0);
raw_spin_unlock_irqrestore(&hwp_notify_lock, flags);
}
@@ -1892,8 +1994,8 @@ static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata)
if (!cpu_feature_enabled(X86_FEATURE_HWP_NOTIFY))
return;
- /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */
- wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
+ /* wrmsrq_on_cpu has to be outside spinlock as this can result in IPC */
+ wrmsrq_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
raw_spin_lock_irq(&hwp_notify_lock);
cancel_work = cpumask_test_and_clear_cpu(cpudata->cpu, &hwp_intr_enable_mask);
@@ -1920,9 +2022,9 @@ static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata)
if (cpu_feature_enabled(X86_FEATURE_HWP_HIGHEST_PERF_CHANGE))
interrupt_mask |= HWP_HIGHEST_PERF_CHANGE_REQ;
- /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */
- wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, interrupt_mask);
- wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0);
+ /* wrmsrq_on_cpu has to be outside spinlock as this can result in IPC */
+ wrmsrq_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, interrupt_mask);
+ wrmsrq_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0);
}
}
@@ -1961,9 +2063,9 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata)
{
/* First disable HWP notification interrupt till we activate again */
if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY))
- wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
+ wrmsrq_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
- wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
+ wrmsrq_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
intel_pstate_enable_hwp_interrupt(cpudata);
@@ -1977,7 +2079,7 @@ static int atom_get_min_pstate(int not_used)
{
u64 value;
- rdmsrl(MSR_ATOM_CORE_RATIOS, value);
+ rdmsrq(MSR_ATOM_CORE_RATIOS, value);
return (value >> 8) & 0x7F;
}
@@ -1985,7 +2087,7 @@ static int atom_get_max_pstate(int not_used)
{
u64 value;
- rdmsrl(MSR_ATOM_CORE_RATIOS, value);
+ rdmsrq(MSR_ATOM_CORE_RATIOS, value);
return (value >> 16) & 0x7F;
}
@@ -1993,7 +2095,7 @@ static int atom_get_turbo_pstate(int not_used)
{
u64 value;
- rdmsrl(MSR_ATOM_CORE_TURBO_RATIOS, value);
+ rdmsrq(MSR_ATOM_CORE_TURBO_RATIOS, value);
return value & 0x7F;
}
@@ -2028,7 +2130,7 @@ static int silvermont_get_scaling(void)
static int silvermont_freq_table[] = {
83300, 100000, 133300, 116700, 80000};
- rdmsrl(MSR_FSB_FREQ, value);
+ rdmsrq(MSR_FSB_FREQ, value);
i = value & 0x7;
WARN_ON(i > 4);
@@ -2044,7 +2146,7 @@ static int airmont_get_scaling(void)
83300, 100000, 133300, 116700, 80000,
93300, 90000, 88900, 87500};
- rdmsrl(MSR_FSB_FREQ, value);
+ rdmsrq(MSR_FSB_FREQ, value);
i = value & 0xF;
WARN_ON(i > 8);
@@ -2055,7 +2157,7 @@ static void atom_get_vid(struct cpudata *cpudata)
{
u64 value;
- rdmsrl(MSR_ATOM_CORE_VIDS, value);
+ rdmsrq(MSR_ATOM_CORE_VIDS, value);
cpudata->vid.min = int_tofp((value >> 8) & 0x7f);
cpudata->vid.max = int_tofp((value >> 16) & 0x7f);
cpudata->vid.ratio = div_fp(
@@ -2063,7 +2165,7 @@ static void atom_get_vid(struct cpudata *cpudata)
int_tofp(cpudata->pstate.max_pstate -
cpudata->pstate.min_pstate));
- rdmsrl(MSR_ATOM_CORE_TURBO_VIDS, value);
+ rdmsrq(MSR_ATOM_CORE_TURBO_VIDS, value);
cpudata->vid.turbo = value & 0x7f;
}
@@ -2071,7 +2173,7 @@ static int core_get_min_pstate(int cpu)
{
u64 value;
- rdmsrl_on_cpu(cpu, MSR_PLATFORM_INFO, &value);
+ rdmsrq_on_cpu(cpu, MSR_PLATFORM_INFO, &value);
return (value >> 40) & 0xFF;
}
@@ -2079,7 +2181,7 @@ static int core_get_max_pstate_physical(int cpu)
{
u64 value;
- rdmsrl_on_cpu(cpu, MSR_PLATFORM_INFO, &value);
+ rdmsrq_on_cpu(cpu, MSR_PLATFORM_INFO, &value);
return (value >> 8) & 0xFF;
}
@@ -2093,13 +2195,13 @@ static int core_get_tdp_ratio(int cpu, u64 plat_info)
int err;
/* Get the TDP level (0, 1, 2) to get ratios */
- err = rdmsrl_safe_on_cpu(cpu, MSR_CONFIG_TDP_CONTROL, &tdp_ctrl);
+ err = rdmsrq_safe_on_cpu(cpu, MSR_CONFIG_TDP_CONTROL, &tdp_ctrl);
if (err)
return err;
/* TDP MSR are continuous starting at 0x648 */
tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x03);
- err = rdmsrl_safe_on_cpu(cpu, tdp_msr, &tdp_ratio);
+ err = rdmsrq_safe_on_cpu(cpu, tdp_msr, &tdp_ratio);
if (err)
return err;
@@ -2124,7 +2226,7 @@ static int core_get_max_pstate(int cpu)
int tdp_ratio;
int err;
- rdmsrl_on_cpu(cpu, MSR_PLATFORM_INFO, &plat_info);
+ rdmsrq_on_cpu(cpu, MSR_PLATFORM_INFO, &plat_info);
max_pstate = (plat_info >> 8) & 0xFF;
tdp_ratio = core_get_tdp_ratio(cpu, plat_info);
@@ -2136,7 +2238,7 @@ static int core_get_max_pstate(int cpu)
return tdp_ratio;
}
- err = rdmsrl_safe_on_cpu(cpu, MSR_TURBO_ACTIVATION_RATIO, &tar);
+ err = rdmsrq_safe_on_cpu(cpu, MSR_TURBO_ACTIVATION_RATIO, &tar);
if (!err) {
int tar_levels;
@@ -2156,7 +2258,7 @@ static int core_get_turbo_pstate(int cpu)
u64 value;
int nont, ret;
- rdmsrl_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, &value);
+ rdmsrq_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, &value);
nont = core_get_max_pstate(cpu);
ret = (value) & 255;
if (ret <= nont)
@@ -2185,7 +2287,7 @@ static int knl_get_turbo_pstate(int cpu)
u64 value;
int nont, ret;
- rdmsrl_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, &value);
+ rdmsrq_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, &value);
nont = core_get_max_pstate(cpu);
ret = (((value) >> 8) & 0xFF);
if (ret <= nont)
@@ -2193,33 +2295,31 @@ static int knl_get_turbo_pstate(int cpu)
return ret;
}
-static void hybrid_get_type(void *data)
-{
- u8 *cpu_type = data;
-
- *cpu_type = get_this_hybrid_cpu_type();
-}
-
static int hwp_get_cpu_scaling(int cpu)
{
- u8 cpu_type = 0;
+ if (hybrid_scaling_factor) {
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
+ u8 cpu_type = c->topo.intel_type;
- smp_call_function_single(cpu, hybrid_get_type, &cpu_type, 1);
- /* P-cores have a smaller perf level-to-freqency scaling factor. */
- if (cpu_type == 0x40)
- return hybrid_scaling_factor;
+ /*
+ * Return the hybrid scaling factor for P-cores and use the
+ * default core scaling for E-cores.
+ */
+ if (cpu_type == INTEL_CPU_TYPE_CORE)
+ return hybrid_scaling_factor;
- /* Use default core scaling for E-cores */
- if (cpu_type == 0x20)
+ if (cpu_type == INTEL_CPU_TYPE_ATOM)
+ return core_get_scaling();
+ }
+
+ /* Use core scaling on non-hybrid systems. */
+ if (!cpu_feature_enabled(X86_FEATURE_HYBRID_CPU))
return core_get_scaling();
/*
- * If reached here, this system is either non-hybrid (like Tiger
- * Lake) or hybrid-capable (like Alder Lake or Raptor Lake) with
- * no E cores (in which case CPUID for hybrid support is 0).
- *
- * The CPPC nominal_frequency field is 0 for non-hybrid systems,
- * so the default core scaling will be used for them.
+ * The system is hybrid, but the hybrid scaling factor is not known or
+ * the CPU type is not one of the above, so use CPPC to compute the
+ * scaling factor for this CPU.
*/
return intel_pstate_cppc_get_scaling(cpu);
}
@@ -2233,7 +2333,7 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
* the CPU being updated, so force the register update to run on the
* right CPU.
*/
- wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL,
+ wrmsrq_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL,
pstate_funcs.get_val(cpu, pstate));
}
@@ -2263,6 +2363,11 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
} else {
cpu->pstate.scaling = perf_ctl_scaling;
}
+ /*
+ * If the CPU is going online for the first time and it was
+ * offline initially, asym capacity scaling needs to be updated.
+ */
+ hybrid_update_capacity(cpu);
} else {
cpu->pstate.scaling = perf_ctl_scaling;
cpu->pstate.max_pstate = pstate_funcs.get_max(cpu->cpu);
@@ -2335,7 +2440,7 @@ static inline void intel_pstate_hwp_boost_up(struct cpudata *cpu)
return;
hwp_req = (hwp_req & ~GENMASK_ULL(7, 0)) | cpu->hwp_boost_min;
- wrmsrl(MSR_HWP_REQUEST, hwp_req);
+ wrmsrq(MSR_HWP_REQUEST, hwp_req);
cpu->last_update = cpu->sample.time;
}
@@ -2348,7 +2453,7 @@ static inline void intel_pstate_hwp_boost_down(struct cpudata *cpu)
expired = time_after64(cpu->sample.time, cpu->last_update +
hwp_boost_hold_time_ns);
if (expired) {
- wrmsrl(MSR_HWP_REQUEST, cpu->hwp_req_cached);
+ wrmsrq(MSR_HWP_REQUEST, cpu->hwp_req_cached);
cpu->hwp_boost_min = 0;
}
}
@@ -2409,8 +2514,8 @@ static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
u64 tsc;
local_irq_save(flags);
- rdmsrl(MSR_IA32_APERF, aperf);
- rdmsrl(MSR_IA32_MPERF, mperf);
+ rdmsrq(MSR_IA32_APERF, aperf);
+ rdmsrq(MSR_IA32_MPERF, mperf);
tsc = rdtsc();
if (cpu->prev_mperf == mperf || cpu->prev_tsc == tsc) {
local_irq_restore(flags);
@@ -2437,7 +2542,7 @@ static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
* that sample.time will always be reset before setting the utilization
* update hook and make the caller skip the sample then.
*/
- if (cpu->last_sample_time) {
+ if (likely(cpu->last_sample_time)) {
intel_pstate_calc_avg_perf(cpu);
return true;
}
@@ -2504,7 +2609,7 @@ static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate)
return;
cpu->pstate.current_pstate = pstate;
- wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate));
+ wrmsrq(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate));
}
static void intel_pstate_adjust_pstate(struct cpudata *cpu)
@@ -2637,6 +2742,8 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
X86_MATCH(INTEL_TIGERLAKE, core_funcs),
X86_MATCH(INTEL_SAPPHIRERAPIDS_X, core_funcs),
X86_MATCH(INTEL_EMERALDRAPIDS_X, core_funcs),
+ X86_MATCH(INTEL_GRANITERAPIDS_D, core_funcs),
+ X86_MATCH(INTEL_GRANITERAPIDS_X, core_funcs),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
@@ -2653,6 +2760,7 @@ static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
X86_MATCH(INTEL_GRANITERAPIDS_X, core_funcs),
X86_MATCH(INTEL_ATOM_CRESTMONT, core_funcs),
X86_MATCH(INTEL_ATOM_CRESTMONT_X, core_funcs),
+ X86_MATCH(INTEL_ATOM_DARKMONT_X, core_funcs),
{}
};
#endif
@@ -2695,7 +2803,7 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
}
cpu->epp_powersave = -EINVAL;
- cpu->epp_policy = 0;
+ cpu->epp_policy = CPUFREQ_POLICY_UNKNOWN;
intel_pstate_get_cpu_pstates(cpu);
@@ -3084,19 +3192,19 @@ static void intel_cpufreq_hwp_update(struct cpudata *cpu, u32 min, u32 max,
WRITE_ONCE(cpu->hwp_req_cached, value);
if (fast_switch)
- wrmsrl(MSR_HWP_REQUEST, value);
+ wrmsrq(MSR_HWP_REQUEST, value);
else
- wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
+ wrmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
}
static void intel_cpufreq_perf_ctl_update(struct cpudata *cpu,
u32 target_pstate, bool fast_switch)
{
if (fast_switch)
- wrmsrl(MSR_IA32_PERF_CTL,
+ wrmsrq(MSR_IA32_PERF_CTL,
pstate_funcs.get_val(cpu, target_pstate));
else
- wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL,
+ wrmsrq_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL,
pstate_funcs.get_val(cpu, target_pstate));
}
@@ -3111,8 +3219,8 @@ static int intel_cpufreq_update_pstate(struct cpufreq_policy *policy,
int max_pstate = policy->strict_target ?
target_pstate : cpu->max_perf_ratio;
- intel_cpufreq_hwp_update(cpu, target_pstate, max_pstate, 0,
- fast_switch);
+ intel_cpufreq_hwp_update(cpu, target_pstate, max_pstate,
+ target_pstate, fast_switch);
} else if (target_pstate != old_pstate) {
intel_cpufreq_perf_ctl_update(cpu, target_pstate, fast_switch);
}
@@ -3240,7 +3348,7 @@ static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
intel_pstate_get_hwp_cap(cpu);
- rdmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, &value);
+ rdmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, &value);
WRITE_ONCE(cpu->hwp_req_cached, value);
cpu->epp_cached = intel_pstate_get_epp(cpu, value);
@@ -3307,7 +3415,7 @@ static int intel_cpufreq_suspend(struct cpufreq_policy *policy)
* written by it may not be suitable.
*/
value &= ~HWP_DESIRED_PERF(~0L);
- wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
+ wrmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
WRITE_ONCE(cpu->hwp_req_cached, value);
}
@@ -3352,6 +3460,7 @@ static void intel_pstate_driver_cleanup(void)
static int intel_pstate_register_driver(struct cpufreq_driver *driver)
{
+ bool refresh_cpu_cap_scaling;
int ret;
if (driver == &intel_pstate)
@@ -3364,6 +3473,8 @@ static int intel_pstate_register_driver(struct cpufreq_driver *driver)
arch_set_max_freq_ratio(global.turbo_disabled);
+ refresh_cpu_cap_scaling = hybrid_clear_max_perf_cpu();
+
intel_pstate_driver = driver;
ret = cpufreq_register_driver(intel_pstate_driver);
if (ret) {
@@ -3373,7 +3484,7 @@ static int intel_pstate_register_driver(struct cpufreq_driver *driver)
global.min_perf_pct = min_perf_pct_min();
- hybrid_init_cpu_capacity_scaling();
+ hybrid_init_cpu_capacity_scaling(refresh_cpu_cap_scaling);
return 0;
}
@@ -3554,7 +3665,7 @@ static bool __init intel_pstate_platform_pwr_mgmt_exists(void)
id = x86_match_cpu(intel_pstate_cpu_oob_ids);
if (id) {
- rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr);
+ rdmsrq(MSR_MISC_PWR_MGMT, misc_pwr);
if (misc_pwr & BITMASK_OOB) {
pr_debug("Bit 8 or 18 in the MISC_PWR_MGMT MSR set\n");
pr_debug("P states are controlled in Out of Band mode by the firmware/hardware\n");
@@ -3610,7 +3721,7 @@ static bool intel_pstate_hwp_is_enabled(void)
{
u64 value;
- rdmsrl(MSR_PM_ENABLE, value);
+ rdmsrq(MSR_PM_ENABLE, value);
return !!(value & 0x1);
}
@@ -3638,6 +3749,8 @@ static const struct x86_cpu_id intel_epp_default[] = {
X86_MATCH_VFM(INTEL_ALDERLAKE_L, HWP_SET_DEF_BALANCE_PERF_EPP(102)),
X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, HWP_SET_DEF_BALANCE_PERF_EPP(32)),
X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, HWP_SET_DEF_BALANCE_PERF_EPP(32)),
+ X86_MATCH_VFM(INTEL_GRANITERAPIDS_X, HWP_SET_DEF_BALANCE_PERF_EPP(32)),
+ X86_MATCH_VFM(INTEL_GRANITERAPIDS_D, HWP_SET_DEF_BALANCE_PERF_EPP(32)),
X86_MATCH_VFM(INTEL_METEORLAKE_L, HWP_SET_EPP_VALUES(HWP_EPP_POWERSAVE,
179, 64, 16)),
X86_MATCH_VFM(INTEL_ARROWLAKE, HWP_SET_EPP_VALUES(HWP_EPP_POWERSAVE,
@@ -3646,12 +3759,36 @@ static const struct x86_cpu_id intel_epp_default[] = {
};
static const struct x86_cpu_id intel_hybrid_scaling_factor[] = {
+ X86_MATCH_VFM(INTEL_ALDERLAKE, HYBRID_SCALING_FACTOR_ADL),
+ X86_MATCH_VFM(INTEL_ALDERLAKE_L, HYBRID_SCALING_FACTOR_ADL),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE, HYBRID_SCALING_FACTOR_ADL),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE_P, HYBRID_SCALING_FACTOR_ADL),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE_S, HYBRID_SCALING_FACTOR_ADL),
X86_MATCH_VFM(INTEL_METEORLAKE_L, HYBRID_SCALING_FACTOR_MTL),
- X86_MATCH_VFM(INTEL_ARROWLAKE, HYBRID_SCALING_FACTOR_MTL),
X86_MATCH_VFM(INTEL_LUNARLAKE_M, HYBRID_SCALING_FACTOR_LNL),
{}
};
+static bool hwp_check_epp(void)
+{
+ if (boot_cpu_has(X86_FEATURE_HWP_EPP))
+ return true;
+
+ /* Without EPP support, don't expose EPP-related sysfs attributes. */
+ hwp_cpufreq_attrs[HWP_PERFORMANCE_PREFERENCE_INDEX] = NULL;
+ hwp_cpufreq_attrs[HWP_PERFORMANCE_AVAILABLE_PREFERENCES_INDEX] = NULL;
+
+ return false;
+}
+
+static bool hwp_check_dec(void)
+{
+ u64 power_ctl;
+
+ rdmsrq(MSR_IA32_POWER_CTL, power_ctl);
+ return !!(power_ctl & BIT(POWER_CTL_DEC_ENABLE));
+}
+
static int __init intel_pstate_init(void)
{
static struct cpudata **_all_cpu_data;
@@ -3661,25 +3798,43 @@ static int __init intel_pstate_init(void)
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
return -ENODEV;
+ /*
+ * The Intel pstate driver will be ignored if the platform
+ * firmware has its own power management modes.
+ */
+ if (intel_pstate_platform_pwr_mgmt_exists()) {
+ pr_info("P-states controlled by the platform\n");
+ return -ENODEV;
+ }
+
id = x86_match_cpu(hwp_support_ids);
if (id) {
- hwp_forced = intel_pstate_hwp_is_enabled();
+ bool epp_present = hwp_check_epp();
- if (hwp_forced)
+ /*
+ * If HWP is enabled already, there is no choice but to deal
+ * with it.
+ */
+ hwp_forced = intel_pstate_hwp_is_enabled();
+ if (hwp_forced) {
pr_info("HWP enabled by BIOS\n");
- else if (no_load)
+ no_hwp = 0;
+ } else if (no_load) {
return -ENODEV;
+ } else if (!epp_present && !hwp_check_dec()) {
+ /*
+ * Avoid enabling HWP for processors without EPP support
+ * unless the Dynamic Efficiency Control (DEC) enable
+ * bit (MSR_IA32_POWER_CTL, bit 27) is set because that
+ * means incomplete HWP implementation which is a corner
+ * case and supporting it is generally problematic.
+ */
+ no_hwp = 1;
+ }
copy_cpu_funcs(&core_funcs);
- /*
- * Avoid enabling HWP for processors without EPP support,
- * because that means incomplete HWP implementation which is a
- * corner case and supporting it is generally problematic.
- *
- * If HWP is enabled already, though, there is no choice but to
- * deal with it.
- */
- if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) || hwp_forced) {
+
+ if (!no_hwp) {
hwp_active = true;
hwp_mode_bdw = id->driver_data;
intel_pstate.attr = hwp_cpufreq_attrs;
@@ -3716,15 +3871,6 @@ static int __init intel_pstate_init(void)
default_driver = &intel_cpufreq;
hwp_cpu_matched:
- /*
- * The Intel pstate driver will be ignored if the platform
- * firmware has its own power management modes.
- */
- if (intel_pstate_platform_pwr_mgmt_exists()) {
- pr_info("P-states controlled by the platform\n");
- return -ENODEV;
- }
-
if (!hwp_active && hwp_only)
return -ENOTSUPP;
@@ -3808,6 +3954,9 @@ static int __init intel_pstate_setup(char *str)
if (!strcmp(str, "no_hwp"))
no_hwp = 1;
+ if (!strcmp(str, "no_cas"))
+ no_cas = true;
+
if (!strcmp(str, "force"))
force_load = 1;
if (!strcmp(str, "hwp_only"))
diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c
index fd20b986d1f2..24b285cbeb8d 100644
--- a/drivers/cpufreq/kirkwood-cpufreq.c
+++ b/drivers/cpufreq/kirkwood-cpufreq.c
@@ -96,7 +96,6 @@ static struct cpufreq_driver kirkwood_cpufreq_driver = {
.target_index = kirkwood_cpufreq_target,
.init = kirkwood_cpufreq_cpu_init,
.name = "kirkwood-cpufreq",
- .attr = cpufreq_generic_attr,
};
static int kirkwood_cpufreq_probe(struct platform_device *pdev)
@@ -189,7 +188,7 @@ static void kirkwood_cpufreq_remove(struct platform_device *pdev)
static struct platform_driver kirkwood_cpufreq_platform_driver = {
.probe = kirkwood_cpufreq_probe,
- .remove_new = kirkwood_cpufreq_remove,
+ .remove = kirkwood_cpufreq_remove,
.driver = {
.name = "kirkwood-cpufreq",
},
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index bd6fe8638d39..49e76b44468a 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -136,7 +136,7 @@ static void do_longhaul1(unsigned int mults_index)
{
union msr_bcr2 bcr2;
- rdmsrl(MSR_VIA_BCR2, bcr2.val);
+ rdmsrq(MSR_VIA_BCR2, bcr2.val);
/* Enable software clock multiplier */
bcr2.bits.ESOFTBF = 1;
bcr2.bits.CLOCKMUL = mults_index & 0xff;
@@ -144,16 +144,16 @@ static void do_longhaul1(unsigned int mults_index)
/* Sync to timer tick */
safe_halt();
/* Change frequency on next halt or sleep */
- wrmsrl(MSR_VIA_BCR2, bcr2.val);
+ wrmsrq(MSR_VIA_BCR2, bcr2.val);
/* Invoke transition */
ACPI_FLUSH_CPU_CACHE();
halt();
/* Disable software clock multiplier */
local_irq_disable();
- rdmsrl(MSR_VIA_BCR2, bcr2.val);
+ rdmsrq(MSR_VIA_BCR2, bcr2.val);
bcr2.bits.ESOFTBF = 0;
- wrmsrl(MSR_VIA_BCR2, bcr2.val);
+ wrmsrq(MSR_VIA_BCR2, bcr2.val);
}
/* For processor with Longhaul MSR */
@@ -164,7 +164,7 @@ static void do_powersaver(int cx_address, unsigned int mults_index,
union msr_longhaul longhaul;
u32 t;
- rdmsrl(MSR_VIA_LONGHAUL, longhaul.val);
+ rdmsrq(MSR_VIA_LONGHAUL, longhaul.val);
/* Setup new frequency */
if (!revid_errata)
longhaul.bits.RevisionKey = longhaul.bits.RevisionID;
@@ -180,7 +180,7 @@ static void do_powersaver(int cx_address, unsigned int mults_index,
/* Raise voltage if necessary */
if (can_scale_voltage && dir) {
longhaul.bits.EnableSoftVID = 1;
- wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
+ wrmsrq(MSR_VIA_LONGHAUL, longhaul.val);
/* Change voltage */
if (!cx_address) {
ACPI_FLUSH_CPU_CACHE();
@@ -194,12 +194,12 @@ static void do_powersaver(int cx_address, unsigned int mults_index,
t = inl(acpi_gbl_FADT.xpm_timer_block.address);
}
longhaul.bits.EnableSoftVID = 0;
- wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
+ wrmsrq(MSR_VIA_LONGHAUL, longhaul.val);
}
/* Change frequency on next halt or sleep */
longhaul.bits.EnableSoftBusRatio = 1;
- wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
+ wrmsrq(MSR_VIA_LONGHAUL, longhaul.val);
if (!cx_address) {
ACPI_FLUSH_CPU_CACHE();
halt();
@@ -212,12 +212,12 @@ static void do_powersaver(int cx_address, unsigned int mults_index,
}
/* Disable bus ratio bit */
longhaul.bits.EnableSoftBusRatio = 0;
- wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
+ wrmsrq(MSR_VIA_LONGHAUL, longhaul.val);
/* Reduce voltage if necessary */
if (can_scale_voltage && !dir) {
longhaul.bits.EnableSoftVID = 1;
- wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
+ wrmsrq(MSR_VIA_LONGHAUL, longhaul.val);
/* Change voltage */
if (!cx_address) {
ACPI_FLUSH_CPU_CACHE();
@@ -231,7 +231,7 @@ static void do_powersaver(int cx_address, unsigned int mults_index,
t = inl(acpi_gbl_FADT.xpm_timer_block.address);
}
longhaul.bits.EnableSoftVID = 0;
- wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
+ wrmsrq(MSR_VIA_LONGHAUL, longhaul.val);
}
}
@@ -534,7 +534,7 @@ static void longhaul_setup_voltagescaling(void)
unsigned int j, speed, pos, kHz_step, numvscales;
int min_vid_speed;
- rdmsrl(MSR_VIA_LONGHAUL, longhaul.val);
+ rdmsrq(MSR_VIA_LONGHAUL, longhaul.val);
if (!(longhaul.bits.RevisionID & 1)) {
pr_info("Voltage scaling not supported by CPU\n");
return;
@@ -906,7 +906,6 @@ static struct cpufreq_driver longhaul_driver = {
.get = longhaul_get,
.init = longhaul_cpu_init,
.name = "longhaul",
- .attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id longhaul_id[] = {
@@ -954,6 +953,9 @@ static void __exit longhaul_exit(void)
struct cpufreq_policy *policy = cpufreq_cpu_get(0);
int i;
+ if (unlikely(!policy))
+ return;
+
for (i = 0; i < numscales; i++) {
if (mults[i] == maxmult) {
struct cpufreq_freqs freqs;
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c
index 6a8e97896d38..39a6c4315a60 100644
--- a/drivers/cpufreq/loongson2_cpufreq.c
+++ b/drivers/cpufreq/loongson2_cpufreq.c
@@ -91,7 +91,6 @@ static struct cpufreq_driver loongson2_cpufreq_driver = {
.verify = cpufreq_generic_frequency_table_verify,
.target_index = loongson2_cpufreq_target,
.get = cpufreq_generic_get,
- .attr = cpufreq_generic_attr,
};
static const struct platform_device_id platform_device_ids[] = {
@@ -148,7 +147,9 @@ static int __init cpufreq_init(void)
ret = cpufreq_register_driver(&loongson2_cpufreq_driver);
- if (!ret && !nowait) {
+ if (ret) {
+ platform_driver_unregister(&platform_driver);
+ } else if (!nowait) {
saved_cpu_wait = cpu_wait;
cpu_wait = loongson2_cpu_wait;
}
diff --git a/drivers/cpufreq/loongson3_cpufreq.c b/drivers/cpufreq/loongson3_cpufreq.c
index 6b5e6798d9a2..1e8715ea1b77 100644
--- a/drivers/cpufreq/loongson3_cpufreq.c
+++ b/drivers/cpufreq/loongson3_cpufreq.c
@@ -299,15 +299,6 @@ static int loongson3_cpufreq_cpu_init(struct cpufreq_policy *policy)
per_cpu(freq_data, i) = per_cpu(freq_data, cpu);
}
- if (policy_has_boost_freq(policy)) {
- ret = cpufreq_enable_boost_support();
- if (ret < 0) {
- pr_warn("cpufreq: Failed to enable boost: %d\n", ret);
- return ret;
- }
- loongson3_cpufreq_driver.boost_enabled = true;
- }
-
return 0;
}
@@ -337,8 +328,8 @@ static struct cpufreq_driver loongson3_cpufreq_driver = {
.offline = loongson3_cpufreq_cpu_offline,
.get = loongson3_cpufreq_get,
.target_index = loongson3_cpufreq_target,
- .attr = cpufreq_generic_attr,
.verify = cpufreq_generic_frequency_table_verify,
+ .set_boost = cpufreq_boost_set_sw,
.suspend = cpufreq_generic_suspend,
};
@@ -346,8 +337,11 @@ static int loongson3_cpufreq_probe(struct platform_device *pdev)
{
int i, ret;
- for (i = 0; i < MAX_PACKAGES; i++)
- devm_mutex_init(&pdev->dev, &cpufreq_mutex[i]);
+ for (i = 0; i < MAX_PACKAGES; i++) {
+ ret = devm_mutex_init(&pdev->dev, &cpufreq_mutex[i]);
+ if (ret)
+ return ret;
+ }
ret = do_service_request(0, 0, CMD_GET_VERSION, 0, 0);
if (ret <= 0)
@@ -386,7 +380,7 @@ static struct platform_driver loongson3_platform_driver = {
},
.id_table = cpufreq_id_table,
.probe = loongson3_cpufreq_probe,
- .remove_new = loongson3_cpufreq_remove,
+ .remove = loongson3_cpufreq_remove,
};
module_platform_driver(loongson3_platform_driver);
diff --git a/drivers/cpufreq/maple-cpufreq.c b/drivers/cpufreq/maple-cpufreq.c
deleted file mode 100644
index 690da85c4865..000000000000
--- a/drivers/cpufreq/maple-cpufreq.c
+++ /dev/null
@@ -1,242 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2011 Dmitry Eremin-Solenikov
- * Copyright (C) 2002 - 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org>
- * and Markus Demleitner <msdemlei@cl.uni-heidelberg.de>
- *
- * This driver adds basic cpufreq support for SMU & 970FX based G5 Macs,
- * that is iMac G5 and latest single CPU desktop.
- */
-
-#undef DEBUG
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/cpufreq.h>
-#include <linux/init.h>
-#include <linux/completion.h>
-#include <linux/mutex.h>
-#include <linux/time.h>
-#include <linux/of.h>
-
-#define DBG(fmt...) pr_debug(fmt)
-
-/* see 970FX user manual */
-
-#define SCOM_PCR 0x0aa001 /* PCR scom addr */
-
-#define PCR_HILO_SELECT 0x80000000U /* 1 = PCR, 0 = PCRH */
-#define PCR_SPEED_FULL 0x00000000U /* 1:1 speed value */
-#define PCR_SPEED_HALF 0x00020000U /* 1:2 speed value */
-#define PCR_SPEED_QUARTER 0x00040000U /* 1:4 speed value */
-#define PCR_SPEED_MASK 0x000e0000U /* speed mask */
-#define PCR_SPEED_SHIFT 17
-#define PCR_FREQ_REQ_VALID 0x00010000U /* freq request valid */
-#define PCR_VOLT_REQ_VALID 0x00008000U /* volt request valid */
-#define PCR_TARGET_TIME_MASK 0x00006000U /* target time */
-#define PCR_STATLAT_MASK 0x00001f00U /* STATLAT value */
-#define PCR_SNOOPLAT_MASK 0x000000f0U /* SNOOPLAT value */
-#define PCR_SNOOPACC_MASK 0x0000000fU /* SNOOPACC value */
-
-#define SCOM_PSR 0x408001 /* PSR scom addr */
-/* warning: PSR is a 64 bits register */
-#define PSR_CMD_RECEIVED 0x2000000000000000U /* command received */
-#define PSR_CMD_COMPLETED 0x1000000000000000U /* command completed */
-#define PSR_CUR_SPEED_MASK 0x0300000000000000U /* current speed */
-#define PSR_CUR_SPEED_SHIFT (56)
-
-/*
- * The G5 only supports two frequencies (Quarter speed is not supported)
- */
-#define CPUFREQ_HIGH 0
-#define CPUFREQ_LOW 1
-
-static struct cpufreq_frequency_table maple_cpu_freqs[] = {
- {0, CPUFREQ_HIGH, 0},
- {0, CPUFREQ_LOW, 0},
- {0, 0, CPUFREQ_TABLE_END},
-};
-
-/* Power mode data is an array of the 32 bits PCR values to use for
- * the various frequencies, retrieved from the device-tree
- */
-static int maple_pmode_cur;
-
-static const u32 *maple_pmode_data;
-static int maple_pmode_max;
-
-/*
- * SCOM based frequency switching for 970FX rev3
- */
-static int maple_scom_switch_freq(int speed_mode)
-{
- unsigned long flags;
- int to;
-
- local_irq_save(flags);
-
- /* Clear PCR high */
- scom970_write(SCOM_PCR, 0);
- /* Clear PCR low */
- scom970_write(SCOM_PCR, PCR_HILO_SELECT | 0);
- /* Set PCR low */
- scom970_write(SCOM_PCR, PCR_HILO_SELECT |
- maple_pmode_data[speed_mode]);
-
- /* Wait for completion */
- for (to = 0; to < 10; to++) {
- unsigned long psr = scom970_read(SCOM_PSR);
-
- if ((psr & PSR_CMD_RECEIVED) == 0 &&
- (((psr >> PSR_CUR_SPEED_SHIFT) ^
- (maple_pmode_data[speed_mode] >> PCR_SPEED_SHIFT)) & 0x3)
- == 0)
- break;
- if (psr & PSR_CMD_COMPLETED)
- break;
- udelay(100);
- }
-
- local_irq_restore(flags);
-
- maple_pmode_cur = speed_mode;
- ppc_proc_freq = maple_cpu_freqs[speed_mode].frequency * 1000ul;
-
- return 0;
-}
-
-static int maple_scom_query_freq(void)
-{
- unsigned long psr = scom970_read(SCOM_PSR);
- int i;
-
- for (i = 0; i <= maple_pmode_max; i++)
- if ((((psr >> PSR_CUR_SPEED_SHIFT) ^
- (maple_pmode_data[i] >> PCR_SPEED_SHIFT)) & 0x3) == 0)
- break;
- return i;
-}
-
-/*
- * Common interface to the cpufreq core
- */
-
-static int maple_cpufreq_target(struct cpufreq_policy *policy,
- unsigned int index)
-{
- return maple_scom_switch_freq(index);
-}
-
-static unsigned int maple_cpufreq_get_speed(unsigned int cpu)
-{
- return maple_cpu_freqs[maple_pmode_cur].frequency;
-}
-
-static int maple_cpufreq_cpu_init(struct cpufreq_policy *policy)
-{
- cpufreq_generic_init(policy, maple_cpu_freqs, 12000);
- return 0;
-}
-
-static struct cpufreq_driver maple_cpufreq_driver = {
- .name = "maple",
- .flags = CPUFREQ_CONST_LOOPS,
- .init = maple_cpufreq_cpu_init,
- .verify = cpufreq_generic_frequency_table_verify,
- .target_index = maple_cpufreq_target,
- .get = maple_cpufreq_get_speed,
- .attr = cpufreq_generic_attr,
-};
-
-static int __init maple_cpufreq_init(void)
-{
- struct device_node *cpunode;
- unsigned int psize;
- unsigned long max_freq;
- const u32 *valp;
- u32 pvr_hi;
- int rc = -ENODEV;
-
- /*
- * Behave here like powermac driver which checks machine compatibility
- * to ease merging of two drivers in future.
- */
- if (!of_machine_is_compatible("Momentum,Maple") &&
- !of_machine_is_compatible("Momentum,Apache"))
- return 0;
-
- /* Get first CPU node */
- cpunode = of_cpu_device_node_get(0);
- if (cpunode == NULL) {
- pr_err("Can't find any CPU 0 node\n");
- goto bail_noprops;
- }
-
- /* Check 970FX for now */
- /* we actually don't care on which CPU to access PVR */
- pvr_hi = PVR_VER(mfspr(SPRN_PVR));
- if (pvr_hi != 0x3c && pvr_hi != 0x44) {
- pr_err("Unsupported CPU version (%x)\n", pvr_hi);
- goto bail_noprops;
- }
-
- /* Look for the powertune data in the device-tree */
- /*
- * On Maple this property is provided by PIBS in dual-processor config,
- * not provided by PIBS in CPU0 config and also not provided by SLOF,
- * so YMMV
- */
- maple_pmode_data = of_get_property(cpunode, "power-mode-data", &psize);
- if (!maple_pmode_data) {
- DBG("No power-mode-data !\n");
- goto bail_noprops;
- }
- maple_pmode_max = psize / sizeof(u32) - 1;
-
- /*
- * From what I see, clock-frequency is always the maximal frequency.
- * The current driver can not slew sysclk yet, so we really only deal
- * with powertune steps for now. We also only implement full freq and
- * half freq in this version. So far, I haven't yet seen a machine
- * supporting anything else.
- */
- valp = of_get_property(cpunode, "clock-frequency", NULL);
- if (!valp)
- goto bail_noprops;
- max_freq = (*valp)/1000;
- maple_cpu_freqs[0].frequency = max_freq;
- maple_cpu_freqs[1].frequency = max_freq/2;
-
- /* Force apply current frequency to make sure everything is in
- * sync (voltage is right for example). Firmware may leave us with
- * a strange setting ...
- */
- msleep(10);
- maple_pmode_cur = -1;
- maple_scom_switch_freq(maple_scom_query_freq());
-
- pr_info("Registering Maple CPU frequency driver\n");
- pr_info("Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n",
- maple_cpu_freqs[1].frequency/1000,
- maple_cpu_freqs[0].frequency/1000,
- maple_cpu_freqs[maple_pmode_cur].frequency/1000);
-
- rc = cpufreq_register_driver(&maple_cpufreq_driver);
-
-bail_noprops:
- of_node_put(cpunode);
-
- return rc;
-}
-
-module_init(maple_cpufreq_init);
-
-
-MODULE_DESCRIPTION("cpufreq driver for Maple 970FX/970MP boards");
-MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/mediatek-cpufreq-hw.c b/drivers/cpufreq/mediatek-cpufreq-hw.c
index 8925e096d5b9..ae4500ab4891 100644
--- a/drivers/cpufreq/mediatek-cpufreq-hw.c
+++ b/drivers/cpufreq/mediatek-cpufreq-hw.c
@@ -24,6 +24,8 @@
#define POLL_USEC 1000
#define TIMEOUT_USEC 300000
+#define FDVFS_FDIV_HZ (26 * 1000)
+
enum {
REG_FREQ_LUT_TABLE,
REG_FREQ_ENABLE,
@@ -35,7 +37,14 @@ enum {
REG_ARRAY_SIZE,
};
-struct mtk_cpufreq_data {
+struct mtk_cpufreq_priv {
+ struct device *dev;
+ const struct mtk_cpufreq_variant *variant;
+ void __iomem *fdvfs;
+};
+
+struct mtk_cpufreq_domain {
+ struct mtk_cpufreq_priv *parent;
struct cpufreq_frequency_table *table;
void __iomem *reg_bases[REG_ARRAY_SIZE];
struct resource *res;
@@ -43,26 +52,57 @@ struct mtk_cpufreq_data {
int nr_opp;
};
-static const u16 cpufreq_mtk_offsets[REG_ARRAY_SIZE] = {
- [REG_FREQ_LUT_TABLE] = 0x0,
- [REG_FREQ_ENABLE] = 0x84,
- [REG_FREQ_PERF_STATE] = 0x88,
- [REG_FREQ_HW_STATE] = 0x8c,
- [REG_EM_POWER_TBL] = 0x90,
- [REG_FREQ_LATENCY] = 0x110,
+struct mtk_cpufreq_variant {
+ int (*init)(struct mtk_cpufreq_priv *priv);
+ const u16 reg_offsets[REG_ARRAY_SIZE];
+ const bool is_hybrid_dvfs;
+};
+
+static const struct mtk_cpufreq_variant cpufreq_mtk_base_variant = {
+ .reg_offsets = {
+ [REG_FREQ_LUT_TABLE] = 0x0,
+ [REG_FREQ_ENABLE] = 0x84,
+ [REG_FREQ_PERF_STATE] = 0x88,
+ [REG_FREQ_HW_STATE] = 0x8c,
+ [REG_EM_POWER_TBL] = 0x90,
+ [REG_FREQ_LATENCY] = 0x110,
+ },
+};
+
+static int mtk_cpufreq_hw_mt8196_init(struct mtk_cpufreq_priv *priv)
+{
+ priv->fdvfs = devm_of_iomap(priv->dev, priv->dev->of_node, 0, NULL);
+ if (IS_ERR(priv->fdvfs))
+ return dev_err_probe(priv->dev, PTR_ERR(priv->fdvfs),
+ "failed to get fdvfs iomem\n");
+
+ return 0;
+}
+
+static const struct mtk_cpufreq_variant cpufreq_mtk_mt8196_variant = {
+ .init = mtk_cpufreq_hw_mt8196_init,
+ .reg_offsets = {
+ [REG_FREQ_LUT_TABLE] = 0x0,
+ [REG_FREQ_ENABLE] = 0x84,
+ [REG_FREQ_PERF_STATE] = 0x88,
+ [REG_FREQ_HW_STATE] = 0x8c,
+ [REG_EM_POWER_TBL] = 0x90,
+ [REG_FREQ_LATENCY] = 0x114,
+ },
+ .is_hybrid_dvfs = true,
};
static int __maybe_unused
mtk_cpufreq_get_cpu_power(struct device *cpu_dev, unsigned long *uW,
unsigned long *KHz)
{
- struct mtk_cpufreq_data *data;
+ struct mtk_cpufreq_domain *data;
struct cpufreq_policy *policy;
int i;
policy = cpufreq_cpu_get_raw(cpu_dev->id);
if (!policy)
- return 0;
+ return -EINVAL;
data = policy->driver_data;
@@ -80,19 +120,38 @@ mtk_cpufreq_get_cpu_power(struct device *cpu_dev, unsigned long *uW,
return 0;
}
+static void mtk_cpufreq_hw_fdvfs_switch(unsigned int target_freq,
+ struct cpufreq_policy *policy)
+{
+ struct mtk_cpufreq_domain *data = policy->driver_data;
+ struct mtk_cpufreq_priv *priv = data->parent;
+ unsigned int cpu;
+
+ target_freq = DIV_ROUND_UP(target_freq, FDVFS_FDIV_HZ);
+ for_each_cpu(cpu, policy->real_cpus) {
+ writel_relaxed(target_freq, priv->fdvfs + cpu * 4);
+ }
+}
+
static int mtk_cpufreq_hw_target_index(struct cpufreq_policy *policy,
unsigned int index)
{
- struct mtk_cpufreq_data *data = policy->driver_data;
-
- writel_relaxed(index, data->reg_bases[REG_FREQ_PERF_STATE]);
+ struct mtk_cpufreq_domain *data = policy->driver_data;
+ unsigned int target_freq;
+
+ if (data->parent->fdvfs) {
+ target_freq = policy->freq_table[index].frequency;
+ mtk_cpufreq_hw_fdvfs_switch(target_freq, policy);
+ } else {
+ writel_relaxed(index, data->reg_bases[REG_FREQ_PERF_STATE]);
+ }
return 0;
}
static unsigned int mtk_cpufreq_hw_get(unsigned int cpu)
{
- struct mtk_cpufreq_data *data;
+ struct mtk_cpufreq_domain *data;
struct cpufreq_policy *policy;
unsigned int index;
@@ -111,18 +170,21 @@ static unsigned int mtk_cpufreq_hw_get(unsigned int cpu)
static unsigned int mtk_cpufreq_hw_fast_switch(struct cpufreq_policy *policy,
unsigned int target_freq)
{
- struct mtk_cpufreq_data *data = policy->driver_data;
+ struct mtk_cpufreq_domain *data = policy->driver_data;
unsigned int index;
index = cpufreq_table_find_index_dl(policy, target_freq, false);
- writel_relaxed(index, data->reg_bases[REG_FREQ_PERF_STATE]);
+ if (data->parent->fdvfs)
+ mtk_cpufreq_hw_fdvfs_switch(target_freq, policy);
+ else
+ writel_relaxed(index, data->reg_bases[REG_FREQ_PERF_STATE]);
return policy->freq_table[index].frequency;
}
static int mtk_cpu_create_freq_table(struct platform_device *pdev,
- struct mtk_cpufreq_data *data)
+ struct mtk_cpufreq_domain *data)
{
struct device *dev = &pdev->dev;
u32 temp, i, freq, prev_freq = 0;
@@ -157,9 +219,9 @@ static int mtk_cpu_create_freq_table(struct platform_device *pdev,
static int mtk_cpu_resources_init(struct platform_device *pdev,
struct cpufreq_policy *policy,
- const u16 *offsets)
+ struct mtk_cpufreq_priv *priv)
{
- struct mtk_cpufreq_data *data;
+ struct mtk_cpufreq_domain *data;
struct device *dev = &pdev->dev;
struct resource *res;
struct of_phandle_args args;
@@ -180,6 +242,15 @@ static int mtk_cpu_resources_init(struct platform_device *pdev,
index = args.args[0];
of_node_put(args.np);
+ /*
+ * In a cpufreq with hybrid DVFS, such as the MT8196, the first declared
+ * register range is for FDVFS, followed by the frequency domain MMIOs.
+ */
+ if (priv->variant->is_hybrid_dvfs)
+ index++;
+
+ data->parent = priv;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, index);
if (!res) {
dev_err(dev, "failed to get mem resource %d\n", index);
@@ -202,7 +273,7 @@ static int mtk_cpu_resources_init(struct platform_device *pdev,
data->res = res;
for (i = REG_FREQ_LUT_TABLE; i < REG_ARRAY_SIZE; i++)
- data->reg_bases[i] = base + offsets[i];
+ data->reg_bases[i] = base + priv->variant->reg_offsets[i];
ret = mtk_cpu_create_freq_table(pdev, data);
if (ret) {
@@ -223,7 +294,7 @@ static int mtk_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
{
struct platform_device *pdev = cpufreq_get_driver_data();
int sig, pwr_hw = CPUFREQ_HW_STATUS | SVS_HW_STATUS;
- struct mtk_cpufreq_data *data;
+ struct mtk_cpufreq_domain *data;
unsigned int latency;
int ret;
@@ -238,7 +309,7 @@ static int mtk_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
latency = readl_relaxed(data->reg_bases[REG_FREQ_LATENCY]) * 1000;
if (!latency)
- latency = CPUFREQ_ETERNAL;
+ latency = CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS;
policy->cpuinfo.transition_latency = latency;
policy->fast_switch_possible = true;
@@ -262,7 +333,7 @@ static int mtk_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
static void mtk_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
{
- struct mtk_cpufreq_data *data = policy->driver_data;
+ struct mtk_cpufreq_domain *data = policy->driver_data;
struct resource *res = data->res;
void __iomem *base = data->base;
@@ -275,7 +346,7 @@ static void mtk_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
static void mtk_cpufreq_register_em(struct cpufreq_policy *policy)
{
struct em_data_callback em_cb = EM_DATA_CB(mtk_cpufreq_get_cpu_power);
- struct mtk_cpufreq_data *data = policy->driver_data;
+ struct mtk_cpufreq_domain *data = policy->driver_data;
em_dev_register_perf_domain(get_cpu_device(policy->cpu), data->nr_opp,
&em_cb, policy->cpus, true);
@@ -293,18 +364,18 @@ static struct cpufreq_driver cpufreq_mtk_hw_driver = {
.register_em = mtk_cpufreq_register_em,
.fast_switch = mtk_cpufreq_hw_fast_switch,
.name = "mtk-cpufreq-hw",
- .attr = cpufreq_generic_attr,
};
static int mtk_cpufreq_hw_driver_probe(struct platform_device *pdev)
{
+ struct mtk_cpufreq_priv *priv;
const void *data;
int ret, cpu;
struct device *cpu_dev;
struct regulator *cpu_reg;
/* Make sure that all CPU supplies are available before proceeding. */
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
cpu_dev = get_cpu_device(cpu);
if (!cpu_dev)
return dev_err_probe(&pdev->dev, -EPROBE_DEFER,
@@ -321,7 +392,20 @@ static int mtk_cpufreq_hw_driver_probe(struct platform_device *pdev)
if (!data)
return -EINVAL;
- platform_set_drvdata(pdev, (void *) data);
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->variant = data;
+ priv->dev = &pdev->dev;
+
+ if (priv->variant->init) {
+ ret = priv->variant->init(priv);
+ if (ret)
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, priv);
cpufreq_mtk_hw_driver.driver_data = pdev;
ret = cpufreq_register_driver(&cpufreq_mtk_hw_driver);
@@ -337,14 +421,15 @@ static void mtk_cpufreq_hw_driver_remove(struct platform_device *pdev)
}
static const struct of_device_id mtk_cpufreq_hw_match[] = {
- { .compatible = "mediatek,cpufreq-hw", .data = &cpufreq_mtk_offsets },
+ { .compatible = "mediatek,cpufreq-hw", .data = &cpufreq_mtk_base_variant },
+ { .compatible = "mediatek,mt8196-cpufreq-hw", .data = &cpufreq_mtk_mt8196_variant },
{}
};
MODULE_DEVICE_TABLE(of, mtk_cpufreq_hw_match);
static struct platform_driver mtk_cpufreq_hw_driver = {
.probe = mtk_cpufreq_hw_driver_probe,
- .remove_new = mtk_cpufreq_hw_driver_remove,
+ .remove = mtk_cpufreq_hw_driver_remove,
.driver = {
.name = "mtk-cpufreq-hw",
.of_match_table = mtk_cpufreq_hw_match,
diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c
index 663f61565cf7..5d50a231f944 100644
--- a/drivers/cpufreq/mediatek-cpufreq.c
+++ b/drivers/cpufreq/mediatek-cpufreq.c
@@ -123,7 +123,7 @@ static int mtk_cpufreq_voltage_tracking(struct mtk_cpu_dvfs_info *info,
soc_data->sram_max_volt);
return ret;
}
- } else if (pre_vproc > new_vproc) {
+ } else {
vproc = max(new_vproc,
pre_vsram - soc_data->max_volt_shift);
ret = regulator_set_voltage(proc_reg, vproc,
@@ -320,7 +320,6 @@ static int mtk_cpufreq_opp_notifier(struct notifier_block *nb,
struct dev_pm_opp *new_opp;
struct mtk_cpu_dvfs_info *info;
unsigned long freq, volt;
- struct cpufreq_policy *policy;
int ret = 0;
info = container_of(nb, struct mtk_cpu_dvfs_info, opp_nb);
@@ -353,12 +352,12 @@ static int mtk_cpufreq_opp_notifier(struct notifier_block *nb,
}
dev_pm_opp_put(new_opp);
- policy = cpufreq_cpu_get(info->opp_cpu);
- if (policy) {
+
+ struct cpufreq_policy *policy __free(put_cpufreq_policy)
+ = cpufreq_cpu_get(info->opp_cpu);
+ if (policy)
cpufreq_driver_target(policy, freq / 1000,
CPUFREQ_RELATION_L);
- cpufreq_cpu_put(policy);
- }
}
}
@@ -404,9 +403,11 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
}
info->cpu_clk = clk_get(cpu_dev, "cpu");
- if (IS_ERR(info->cpu_clk))
- return dev_err_probe(cpu_dev, PTR_ERR(info->cpu_clk),
- "cpu%d: failed to get cpu clk\n", cpu);
+ if (IS_ERR(info->cpu_clk)) {
+ ret = PTR_ERR(info->cpu_clk);
+ dev_err_probe(cpu_dev, ret, "cpu%d: failed to get cpu clk\n", cpu);
+ goto out_put_cci_dev;
+ }
info->inter_clk = clk_get(cpu_dev, "intermediate");
if (IS_ERR(info->inter_clk)) {
@@ -552,6 +553,10 @@ out_free_inter_clock:
out_free_mux_clock:
clk_put(info->cpu_clk);
+out_put_cci_dev:
+ if (info->soc_data->ccifreq_supported)
+ put_device(info->cci_dev);
+
return ret;
}
@@ -569,6 +574,8 @@ static void mtk_cpu_dvfs_info_release(struct mtk_cpu_dvfs_info *info)
clk_put(info->inter_clk);
dev_pm_opp_of_cpumask_remove_table(&info->cpus);
dev_pm_opp_unregister_notifier(info->cpu_dev, &info->opp_nb);
+ if (info->soc_data->ccifreq_supported)
+ put_device(info->cci_dev);
}
static int mtk_cpufreq_init(struct cpufreq_policy *policy)
@@ -618,7 +625,6 @@ static struct cpufreq_driver mtk_cpufreq_driver = {
.exit = mtk_cpufreq_exit,
.register_em = cpufreq_register_em_with_opp,
.name = "mtk-cpufreq",
- .attr = cpufreq_generic_attr,
};
static int mtk_cpufreq_probe(struct platform_device *pdev)
@@ -632,7 +638,7 @@ static int mtk_cpufreq_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, -ENODEV,
"failed to get mtk cpufreq platform data\n");
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
info = mtk_cpu_dvfs_info_lookup(cpu);
if (info)
continue;
diff --git a/drivers/cpufreq/mvebu-cpufreq.c b/drivers/cpufreq/mvebu-cpufreq.c
index 7f3cfe668f30..2aad4c04673c 100644
--- a/drivers/cpufreq/mvebu-cpufreq.c
+++ b/drivers/cpufreq/mvebu-cpufreq.c
@@ -56,7 +56,7 @@ static int __init armada_xp_pmsu_cpufreq_init(void)
* it), and registers the clock notifier that will take care
* of doing the PMSU part of a frequency transition.
*/
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
struct device *cpu_dev;
struct clk *clk;
int ret;
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index de8be0a8932d..bbb01d93b54b 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -147,7 +147,6 @@ static struct cpufreq_driver omap_driver = {
.exit = omap_cpu_exit,
.register_em = cpufreq_register_em_with_opp,
.name = "omap",
- .attr = cpufreq_generic_attr,
};
static int omap_cpufreq_probe(struct platform_device *pdev)
@@ -188,7 +187,7 @@ static struct platform_driver omap_cpufreq_platdrv = {
.name = "omap-cpufreq",
},
.probe = omap_cpufreq_probe,
- .remove_new = omap_cpufreq_remove,
+ .remove = omap_cpufreq_remove,
};
module_platform_driver(omap_cpufreq_platdrv);
diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
index ef0a3216a386..69c19233fcd4 100644
--- a/drivers/cpufreq/p4-clockmod.c
+++ b/drivers/cpufreq/p4-clockmod.c
@@ -227,7 +227,6 @@ static struct cpufreq_driver p4clockmod_driver = {
.init = cpufreq_p4_cpu_init,
.get = cpufreq_p4_get,
.name = "p4-clockmod",
- .attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id cpufreq_p4_id[] = {
diff --git a/drivers/cpufreq/pasemi-cpufreq.c b/drivers/cpufreq/pasemi-cpufreq.c
index 5fc9cb480516..a3931349360f 100644
--- a/drivers/cpufreq/pasemi-cpufreq.c
+++ b/drivers/cpufreq/pasemi-cpufreq.c
@@ -245,7 +245,6 @@ static struct cpufreq_driver pas_cpufreq_driver = {
.exit = pas_cpufreq_cpu_exit,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = pas_cpufreq_target,
- .attr = cpufreq_generic_attr,
};
/*
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index 771efbf51a48..ac2e90a65f0c 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -615,7 +615,7 @@ static struct platform_driver pcc_cpufreq_platdrv = {
.driver = {
.name = "pcc-cpufreq",
},
- .remove_new = pcc_cpufreq_remove,
+ .remove = pcc_cpufreq_remove,
};
static int __init pcc_cpufreq_init(void)
diff --git a/drivers/cpufreq/pmac32-cpufreq.c b/drivers/cpufreq/pmac32-cpufreq.c
index 6c9f0888a2a7..a22c22bd693a 100644
--- a/drivers/cpufreq/pmac32-cpufreq.c
+++ b/drivers/cpufreq/pmac32-cpufreq.c
@@ -439,7 +439,6 @@ static struct cpufreq_driver pmac_cpufreq_driver = {
.suspend = pmac_cpufreq_suspend,
.resume = pmac_cpufreq_resume,
.flags = CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING,
- .attr = cpufreq_generic_attr,
.name = "powermac",
};
diff --git a/drivers/cpufreq/pmac64-cpufreq.c b/drivers/cpufreq/pmac64-cpufreq.c
index 74ff6c47df29..80897ec8f00e 100644
--- a/drivers/cpufreq/pmac64-cpufreq.c
+++ b/drivers/cpufreq/pmac64-cpufreq.c
@@ -332,7 +332,6 @@ static struct cpufreq_driver g5_cpufreq_driver = {
.verify = cpufreq_generic_frequency_table_verify,
.target_index = g5_cpufreq_target,
.get = g5_cpufreq_get_speed,
- .attr = cpufreq_generic_attr,
};
diff --git a/drivers/cpufreq/powernow-k6.c b/drivers/cpufreq/powernow-k6.c
index f0a4a6c31204..99d2244e03b0 100644
--- a/drivers/cpufreq/powernow-k6.c
+++ b/drivers/cpufreq/powernow-k6.c
@@ -253,7 +253,6 @@ static struct cpufreq_driver powernow_k6_driver = {
.exit = powernow_k6_cpu_exit,
.get = powernow_k6_get,
.name = "powernow-k6",
- .attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id powernow_k6_ids[] = {
diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c
index 4271446c8725..31039330a3ba 100644
--- a/drivers/cpufreq/powernow-k7.c
+++ b/drivers/cpufreq/powernow-k7.c
@@ -219,13 +219,13 @@ static void change_FID(int fid)
{
union msr_fidvidctl fidvidctl;
- rdmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val);
+ rdmsrq(MSR_K7_FID_VID_CTL, fidvidctl.val);
if (fidvidctl.bits.FID != fid) {
fidvidctl.bits.SGTC = latency;
fidvidctl.bits.FID = fid;
fidvidctl.bits.VIDC = 0;
fidvidctl.bits.FIDC = 1;
- wrmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val);
+ wrmsrq(MSR_K7_FID_VID_CTL, fidvidctl.val);
}
}
@@ -234,13 +234,13 @@ static void change_VID(int vid)
{
union msr_fidvidctl fidvidctl;
- rdmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val);
+ rdmsrq(MSR_K7_FID_VID_CTL, fidvidctl.val);
if (fidvidctl.bits.VID != vid) {
fidvidctl.bits.SGTC = latency;
fidvidctl.bits.VID = vid;
fidvidctl.bits.FIDC = 0;
fidvidctl.bits.VIDC = 1;
- wrmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val);
+ wrmsrq(MSR_K7_FID_VID_CTL, fidvidctl.val);
}
}
@@ -260,7 +260,7 @@ static int powernow_target(struct cpufreq_policy *policy, unsigned int index)
fid = powernow_table[index].driver_data & 0xFF;
vid = (powernow_table[index].driver_data & 0xFF00) >> 8;
- rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val);
+ rdmsrq(MSR_K7_FID_VID_STATUS, fidvidstatus.val);
cfid = fidvidstatus.bits.CFID;
freqs.old = fsb * fid_codes[cfid] / 10;
@@ -557,7 +557,7 @@ static unsigned int powernow_get(unsigned int cpu)
if (cpu)
return 0;
- rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val);
+ rdmsrq(MSR_K7_FID_VID_STATUS, fidvidstatus.val);
cfid = fidvidstatus.bits.CFID;
return fsb * fid_codes[cfid] / 10;
@@ -598,7 +598,7 @@ static int powernow_cpu_init(struct cpufreq_policy *policy)
if (policy->cpu != 0)
return -ENODEV;
- rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val);
+ rdmsrq(MSR_K7_FID_VID_STATUS, fidvidstatus.val);
recalibrate_cpu_khz();
@@ -667,7 +667,6 @@ static struct cpufreq_driver powernow_driver = {
.init = powernow_cpu_init,
.exit = powernow_cpu_exit,
.name = "powernow-k7",
- .attr = cpufreq_generic_attr,
};
static int __init powernow_init(void)
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index a01170f7d01c..f7512b4e923e 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -482,7 +482,7 @@ static void check_supported_cpu(void *_rc)
cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx);
if ((edx & P_STATE_TRANSITION_CAPABLE)
!= P_STATE_TRANSITION_CAPABLE) {
- pr_info("Power state transitions not supported\n");
+ pr_info_once("Power state transitions not supported\n");
return;
}
*rc = 0;
@@ -1143,7 +1143,6 @@ static struct cpufreq_driver cpufreq_amd64_driver = {
.exit = powernowk8_cpu_exit,
.get = powernowk8_get,
.name = "powernow-k8",
- .attr = cpufreq_generic_attr,
};
static void __request_acpi_cpufreq(void)
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index 8de759247771..7d9a5f656de8 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -18,9 +18,9 @@
#include <linux/of.h>
#include <linux/reboot.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/cpu.h>
#include <linux/hashtable.h>
-#include <trace/events/power.h>
#include <asm/cputhreads.h>
#include <asm/firmware.h>
@@ -29,6 +29,9 @@
#include <asm/opal.h>
#include <linux/timer.h>
+#define CREATE_TRACE_POINTS
+#include "powernv-trace.h"
+
#define POWERNV_MAX_PSTATES_ORDER 8
#define POWERNV_MAX_PSTATES (1UL << (POWERNV_MAX_PSTATES_ORDER))
#define PMSR_PSAFE_ENABLE (1UL << 30)
@@ -281,7 +284,7 @@ next:
pr_info("cpufreq pstate min 0x%x nominal 0x%x max 0x%x\n", pstate_min,
pstate_nominal, pstate_max);
pr_info("Workload Optimized Frequency is %s in the platform\n",
- (powernv_pstate_info.wof_enabled) ? "enabled" : "disabled");
+ str_enabled_disabled(powernv_pstate_info.wof_enabled));
pstate_ids = of_get_property(power_mgt, "ibm,pstate-ids", &len_ids);
if (!pstate_ids) {
@@ -385,12 +388,8 @@ static ssize_t cpuinfo_nominal_freq_show(struct cpufreq_policy *policy,
static struct freq_attr cpufreq_freq_attr_cpuinfo_nominal_freq =
__ATTR_RO(cpuinfo_nominal_freq);
-#define SCALING_BOOST_FREQS_ATTR_INDEX 2
-
static struct freq_attr *powernv_cpu_freq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
&cpufreq_freq_attr_cpuinfo_nominal_freq,
- &cpufreq_freq_attr_scaling_boost_freqs,
NULL,
};
@@ -670,7 +669,8 @@ static inline void queue_gpstate_timer(struct global_pstate_info *gpstates)
*/
static void gpstate_timer_handler(struct timer_list *t)
{
- struct global_pstate_info *gpstates = from_timer(gpstates, t, timer);
+ struct global_pstate_info *gpstates = timer_container_of(gpstates, t,
+ timer);
struct cpufreq_policy *policy = gpstates->policy;
int gpstate_idx, lpstate_idx;
unsigned long val;
@@ -805,7 +805,7 @@ static int powernv_cpufreq_target_index(struct cpufreq_policy *policy,
if (gpstate_idx != new_index)
queue_gpstate_timer(gpstates);
else
- del_timer_sync(&gpstates->timer);
+ timer_delete_sync(&gpstates->timer);
gpstates_done:
freq_data.gpstate_id = idx_to_pstate(gpstate_idx);
@@ -883,7 +883,7 @@ static void powernv_cpufreq_cpu_exit(struct cpufreq_policy *policy)
freq_data.gpstate_id = idx_to_pstate(powernv_pstate_info.min);
smp_call_function_single(policy->cpu, set_pstate, &freq_data, 1);
if (gpstates)
- del_timer_sync(&gpstates->timer);
+ timer_delete_sync(&gpstates->timer);
kfree(policy->driver_data);
}
@@ -1127,9 +1127,7 @@ static int __init powernv_cpufreq_init(void)
goto out;
if (powernv_pstate_info.wof_enabled)
- powernv_cpufreq_driver.boost_enabled = true;
- else
- powernv_cpu_freq_attr[SCALING_BOOST_FREQS_ATTR_INDEX] = NULL;
+ powernv_cpufreq_driver.set_boost = cpufreq_boost_set_sw;
rc = cpufreq_register_driver(&powernv_cpufreq_driver);
if (rc) {
@@ -1137,9 +1135,6 @@ static int __init powernv_cpufreq_init(void)
goto cleanup;
}
- if (powernv_pstate_info.wof_enabled)
- cpufreq_enable_boost_support();
-
register_reboot_notifier(&powernv_cpufreq_reboot_nb);
opal_message_notifier_register(OPAL_MSG_OCC, &powernv_cpufreq_opal_nb);
diff --git a/drivers/cpufreq/powernv-trace.h b/drivers/cpufreq/powernv-trace.h
new file mode 100644
index 000000000000..8cadb7c9427b
--- /dev/null
+++ b/drivers/cpufreq/powernv-trace.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#if !defined(_POWERNV_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _POWERNV_TRACE_H
+
+#include <linux/cpufreq.h>
+#include <linux/tracepoint.h>
+#include <linux/trace_events.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM power
+
+TRACE_EVENT(powernv_throttle,
+
+ TP_PROTO(int chip_id, const char *reason, int pmax),
+
+ TP_ARGS(chip_id, reason, pmax),
+
+ TP_STRUCT__entry(
+ __field(int, chip_id)
+ __string(reason, reason)
+ __field(int, pmax)
+ ),
+
+ TP_fast_assign(
+ __entry->chip_id = chip_id;
+ __assign_str(reason);
+ __entry->pmax = pmax;
+ ),
+
+ TP_printk("Chip %d Pmax %d %s", __entry->chip_id,
+ __entry->pmax, __get_str(reason))
+);
+
+#endif /* _POWERNV_TRACE_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE powernv-trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.c b/drivers/cpufreq/ppc_cbe_cpufreq.c
deleted file mode 100644
index 98595b3ea13f..000000000000
--- a/drivers/cpufreq/ppc_cbe_cpufreq.c
+++ /dev/null
@@ -1,173 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * cpufreq driver for the cell processor
- *
- * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007
- *
- * Author: Christian Krafft <krafft@de.ibm.com>
- */
-
-#include <linux/cpufreq.h>
-#include <linux/module.h>
-#include <linux/of.h>
-
-#include <asm/machdep.h>
-#include <asm/cell-regs.h>
-
-#include "ppc_cbe_cpufreq.h"
-
-/* the CBE supports an 8 step frequency scaling */
-static struct cpufreq_frequency_table cbe_freqs[] = {
- {0, 1, 0},
- {0, 2, 0},
- {0, 3, 0},
- {0, 4, 0},
- {0, 5, 0},
- {0, 6, 0},
- {0, 8, 0},
- {0, 10, 0},
- {0, 0, CPUFREQ_TABLE_END},
-};
-
-/*
- * hardware specific functions
- */
-
-static int set_pmode(unsigned int cpu, unsigned int slow_mode)
-{
- int rc;
-
- if (cbe_cpufreq_has_pmi)
- rc = cbe_cpufreq_set_pmode_pmi(cpu, slow_mode);
- else
- rc = cbe_cpufreq_set_pmode(cpu, slow_mode);
-
- pr_debug("register contains slow mode %d\n", cbe_cpufreq_get_pmode(cpu));
-
- return rc;
-}
-
-/*
- * cpufreq functions
- */
-
-static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
-{
- struct cpufreq_frequency_table *pos;
- const u32 *max_freqp;
- u32 max_freq;
- int cur_pmode;
- struct device_node *cpu;
-
- cpu = of_get_cpu_node(policy->cpu, NULL);
-
- if (!cpu)
- return -ENODEV;
-
- pr_debug("init cpufreq on CPU %d\n", policy->cpu);
-
- /*
- * Let's check we can actually get to the CELL regs
- */
- if (!cbe_get_cpu_pmd_regs(policy->cpu) ||
- !cbe_get_cpu_mic_tm_regs(policy->cpu)) {
- pr_info("invalid CBE regs pointers for cpufreq\n");
- of_node_put(cpu);
- return -EINVAL;
- }
-
- max_freqp = of_get_property(cpu, "clock-frequency", NULL);
-
- of_node_put(cpu);
-
- if (!max_freqp)
- return -EINVAL;
-
- /* we need the freq in kHz */
- max_freq = *max_freqp / 1000;
-
- pr_debug("max clock-frequency is at %u kHz\n", max_freq);
- pr_debug("initializing frequency table\n");
-
- /* initialize frequency table */
- cpufreq_for_each_entry(pos, cbe_freqs) {
- pos->frequency = max_freq / pos->driver_data;
- pr_debug("%d: %d\n", (int)(pos - cbe_freqs), pos->frequency);
- }
-
- /* if DEBUG is enabled set_pmode() measures the latency
- * of a transition */
- policy->cpuinfo.transition_latency = 25000;
-
- cur_pmode = cbe_cpufreq_get_pmode(policy->cpu);
- pr_debug("current pmode is at %d\n",cur_pmode);
-
- policy->cur = cbe_freqs[cur_pmode].frequency;
-
-#ifdef CONFIG_SMP
- cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu));
-#endif
-
- policy->freq_table = cbe_freqs;
- cbe_cpufreq_pmi_policy_init(policy);
- return 0;
-}
-
-static void cbe_cpufreq_cpu_exit(struct cpufreq_policy *policy)
-{
- cbe_cpufreq_pmi_policy_exit(policy);
-}
-
-static int cbe_cpufreq_target(struct cpufreq_policy *policy,
- unsigned int cbe_pmode_new)
-{
- pr_debug("setting frequency for cpu %d to %d kHz, " \
- "1/%d of max frequency\n",
- policy->cpu,
- cbe_freqs[cbe_pmode_new].frequency,
- cbe_freqs[cbe_pmode_new].driver_data);
-
- return set_pmode(policy->cpu, cbe_pmode_new);
-}
-
-static struct cpufreq_driver cbe_cpufreq_driver = {
- .verify = cpufreq_generic_frequency_table_verify,
- .target_index = cbe_cpufreq_target,
- .init = cbe_cpufreq_cpu_init,
- .exit = cbe_cpufreq_cpu_exit,
- .name = "cbe-cpufreq",
- .flags = CPUFREQ_CONST_LOOPS,
-};
-
-/*
- * module init and destoy
- */
-
-static int __init cbe_cpufreq_init(void)
-{
- int ret;
-
- if (!machine_is(cell))
- return -ENODEV;
-
- cbe_cpufreq_pmi_init();
-
- ret = cpufreq_register_driver(&cbe_cpufreq_driver);
- if (ret)
- cbe_cpufreq_pmi_exit();
-
- return ret;
-}
-
-static void __exit cbe_cpufreq_exit(void)
-{
- cpufreq_unregister_driver(&cbe_cpufreq_driver);
- cbe_cpufreq_pmi_exit();
-}
-
-module_init(cbe_cpufreq_init);
-module_exit(cbe_cpufreq_exit);
-
-MODULE_DESCRIPTION("cpufreq driver for Cell BE processors");
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>");
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.h b/drivers/cpufreq/ppc_cbe_cpufreq.h
deleted file mode 100644
index 00cd8633b0d9..000000000000
--- a/drivers/cpufreq/ppc_cbe_cpufreq.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * ppc_cbe_cpufreq.h
- *
- * This file contains the definitions used by the cbe_cpufreq driver.
- *
- * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007
- *
- * Author: Christian Krafft <krafft@de.ibm.com>
- *
- */
-
-#include <linux/cpufreq.h>
-#include <linux/types.h>
-
-int cbe_cpufreq_set_pmode(int cpu, unsigned int pmode);
-int cbe_cpufreq_get_pmode(int cpu);
-
-int cbe_cpufreq_set_pmode_pmi(int cpu, unsigned int pmode);
-
-#if IS_ENABLED(CONFIG_CPU_FREQ_CBE_PMI)
-extern bool cbe_cpufreq_has_pmi;
-void cbe_cpufreq_pmi_policy_init(struct cpufreq_policy *policy);
-void cbe_cpufreq_pmi_policy_exit(struct cpufreq_policy *policy);
-void cbe_cpufreq_pmi_init(void);
-void cbe_cpufreq_pmi_exit(void);
-#else
-#define cbe_cpufreq_has_pmi (0)
-static inline void cbe_cpufreq_pmi_policy_init(struct cpufreq_policy *policy) {}
-static inline void cbe_cpufreq_pmi_policy_exit(struct cpufreq_policy *policy) {}
-static inline void cbe_cpufreq_pmi_init(void) {}
-static inline void cbe_cpufreq_pmi_exit(void) {}
-#endif
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq_pervasive.c b/drivers/cpufreq/ppc_cbe_cpufreq_pervasive.c
deleted file mode 100644
index 04830cd95333..000000000000
--- a/drivers/cpufreq/ppc_cbe_cpufreq_pervasive.c
+++ /dev/null
@@ -1,102 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * pervasive backend for the cbe_cpufreq driver
- *
- * This driver makes use of the pervasive unit to
- * engage the desired frequency.
- *
- * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007
- *
- * Author: Christian Krafft <krafft@de.ibm.com>
- */
-
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/time.h>
-#include <asm/machdep.h>
-#include <asm/hw_irq.h>
-#include <asm/cell-regs.h>
-
-#include "ppc_cbe_cpufreq.h"
-
-/* to write to MIC register */
-static u64 MIC_Slow_Fast_Timer_table[] = {
- [0 ... 7] = 0x007fc00000000000ull,
-};
-
-/* more values for the MIC */
-static u64 MIC_Slow_Next_Timer_table[] = {
- 0x0000240000000000ull,
- 0x0000268000000000ull,
- 0x000029C000000000ull,
- 0x00002D0000000000ull,
- 0x0000300000000000ull,
- 0x0000334000000000ull,
- 0x000039C000000000ull,
- 0x00003FC000000000ull,
-};
-
-
-int cbe_cpufreq_set_pmode(int cpu, unsigned int pmode)
-{
- struct cbe_pmd_regs __iomem *pmd_regs;
- struct cbe_mic_tm_regs __iomem *mic_tm_regs;
- unsigned long flags;
- u64 value;
-#ifdef DEBUG
- long time;
-#endif
-
- local_irq_save(flags);
-
- mic_tm_regs = cbe_get_cpu_mic_tm_regs(cpu);
- pmd_regs = cbe_get_cpu_pmd_regs(cpu);
-
-#ifdef DEBUG
- time = jiffies;
-#endif
-
- out_be64(&mic_tm_regs->slow_fast_timer_0, MIC_Slow_Fast_Timer_table[pmode]);
- out_be64(&mic_tm_regs->slow_fast_timer_1, MIC_Slow_Fast_Timer_table[pmode]);
-
- out_be64(&mic_tm_regs->slow_next_timer_0, MIC_Slow_Next_Timer_table[pmode]);
- out_be64(&mic_tm_regs->slow_next_timer_1, MIC_Slow_Next_Timer_table[pmode]);
-
- value = in_be64(&pmd_regs->pmcr);
- /* set bits to zero */
- value &= 0xFFFFFFFFFFFFFFF8ull;
- /* set bits to next pmode */
- value |= pmode;
-
- out_be64(&pmd_regs->pmcr, value);
-
-#ifdef DEBUG
- /* wait until new pmode appears in status register */
- value = in_be64(&pmd_regs->pmsr) & 0x07;
- while (value != pmode) {
- cpu_relax();
- value = in_be64(&pmd_regs->pmsr) & 0x07;
- }
-
- time = jiffies - time;
- time = jiffies_to_msecs(time);
- pr_debug("had to wait %lu ms for a transition using " \
- "pervasive unit\n", time);
-#endif
- local_irq_restore(flags);
-
- return 0;
-}
-
-
-int cbe_cpufreq_get_pmode(int cpu)
-{
- int ret;
- struct cbe_pmd_regs __iomem *pmd_regs;
-
- pmd_regs = cbe_get_cpu_pmd_regs(cpu);
- ret = in_be64(&pmd_regs->pmsr) & 0x07;
-
- return ret;
-}
-
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c b/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
deleted file mode 100644
index 6f0c32592416..000000000000
--- a/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
+++ /dev/null
@@ -1,150 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * pmi backend for the cbe_cpufreq driver
- *
- * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007
- *
- * Author: Christian Krafft <krafft@de.ibm.com>
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/timer.h>
-#include <linux/init.h>
-#include <linux/pm_qos.h>
-#include <linux/slab.h>
-
-#include <asm/processor.h>
-#include <asm/pmi.h>
-#include <asm/cell-regs.h>
-
-#ifdef DEBUG
-#include <asm/time.h>
-#endif
-
-#include "ppc_cbe_cpufreq.h"
-
-bool cbe_cpufreq_has_pmi = false;
-EXPORT_SYMBOL_GPL(cbe_cpufreq_has_pmi);
-
-/*
- * hardware specific functions
- */
-
-int cbe_cpufreq_set_pmode_pmi(int cpu, unsigned int pmode)
-{
- int ret;
- pmi_message_t pmi_msg;
-#ifdef DEBUG
- long time;
-#endif
- pmi_msg.type = PMI_TYPE_FREQ_CHANGE;
- pmi_msg.data1 = cbe_cpu_to_node(cpu);
- pmi_msg.data2 = pmode;
-
-#ifdef DEBUG
- time = jiffies;
-#endif
- pmi_send_message(pmi_msg);
-
-#ifdef DEBUG
- time = jiffies - time;
- time = jiffies_to_msecs(time);
- pr_debug("had to wait %lu ms for a transition using " \
- "PMI\n", time);
-#endif
- ret = pmi_msg.data2;
- pr_debug("PMI returned slow mode %d\n", ret);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(cbe_cpufreq_set_pmode_pmi);
-
-
-static void cbe_cpufreq_handle_pmi(pmi_message_t pmi_msg)
-{
- struct cpufreq_policy *policy;
- struct freq_qos_request *req;
- u8 node, slow_mode;
- int cpu, ret;
-
- BUG_ON(pmi_msg.type != PMI_TYPE_FREQ_CHANGE);
-
- node = pmi_msg.data1;
- slow_mode = pmi_msg.data2;
-
- cpu = cbe_node_to_cpu(node);
-
- pr_debug("cbe_handle_pmi: node: %d max_freq: %d\n", node, slow_mode);
-
- policy = cpufreq_cpu_get(cpu);
- if (!policy) {
- pr_warn("cpufreq policy not found cpu%d\n", cpu);
- return;
- }
-
- req = policy->driver_data;
-
- ret = freq_qos_update_request(req,
- policy->freq_table[slow_mode].frequency);
- if (ret < 0)
- pr_warn("Failed to update freq constraint: %d\n", ret);
- else
- pr_debug("limiting node %d to slow mode %d\n", node, slow_mode);
-
- cpufreq_cpu_put(policy);
-}
-
-static struct pmi_handler cbe_pmi_handler = {
- .type = PMI_TYPE_FREQ_CHANGE,
- .handle_pmi_message = cbe_cpufreq_handle_pmi,
-};
-
-void cbe_cpufreq_pmi_policy_init(struct cpufreq_policy *policy)
-{
- struct freq_qos_request *req;
- int ret;
-
- if (!cbe_cpufreq_has_pmi)
- return;
-
- req = kzalloc(sizeof(*req), GFP_KERNEL);
- if (!req)
- return;
-
- ret = freq_qos_add_request(&policy->constraints, req, FREQ_QOS_MAX,
- policy->freq_table[0].frequency);
- if (ret < 0) {
- pr_err("Failed to add freq constraint (%d)\n", ret);
- kfree(req);
- return;
- }
-
- policy->driver_data = req;
-}
-EXPORT_SYMBOL_GPL(cbe_cpufreq_pmi_policy_init);
-
-void cbe_cpufreq_pmi_policy_exit(struct cpufreq_policy *policy)
-{
- struct freq_qos_request *req = policy->driver_data;
-
- if (cbe_cpufreq_has_pmi) {
- freq_qos_remove_request(req);
- kfree(req);
- }
-}
-EXPORT_SYMBOL_GPL(cbe_cpufreq_pmi_policy_exit);
-
-void cbe_cpufreq_pmi_init(void)
-{
- if (!pmi_register_handler(&cbe_pmi_handler))
- cbe_cpufreq_has_pmi = true;
-}
-EXPORT_SYMBOL_GPL(cbe_cpufreq_pmi_init);
-
-void cbe_cpufreq_pmi_exit(void)
-{
- pmi_unregister_handler(&cbe_pmi_handler);
- cbe_cpufreq_has_pmi = false;
-}
-EXPORT_SYMBOL_GPL(cbe_cpufreq_pmi_exit);
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index 900d6844c43d..8422704a3b10 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -143,14 +143,12 @@ static unsigned long qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data)
}
/* Get the frequency requested by the cpufreq core for the CPU */
-static unsigned int qcom_cpufreq_get_freq(unsigned int cpu)
+static unsigned int qcom_cpufreq_get_freq(struct cpufreq_policy *policy)
{
struct qcom_cpufreq_data *data;
const struct qcom_cpufreq_soc_data *soc_data;
- struct cpufreq_policy *policy;
unsigned int index;
- policy = cpufreq_cpu_get_raw(cpu);
if (!policy)
return 0;
@@ -163,12 +161,10 @@ static unsigned int qcom_cpufreq_get_freq(unsigned int cpu)
return policy->freq_table[index].frequency;
}
-static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
+static unsigned int __qcom_cpufreq_hw_get(struct cpufreq_policy *policy)
{
struct qcom_cpufreq_data *data;
- struct cpufreq_policy *policy;
- policy = cpufreq_cpu_get_raw(cpu);
if (!policy)
return 0;
@@ -177,7 +173,12 @@ static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
if (data->throttle_irq >= 0)
return qcom_lmh_get_throttle_freq(data) / HZ_PER_KHZ;
- return qcom_cpufreq_get_freq(cpu);
+ return qcom_cpufreq_get_freq(policy);
+}
+
+static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
+{
+ return __qcom_cpufreq_hw_get(cpufreq_cpu_get_raw(cpu));
}
static unsigned int qcom_cpufreq_hw_fast_switch(struct cpufreq_policy *policy,
@@ -305,7 +306,7 @@ static void qcom_get_related_cpus(int index, struct cpumask *m)
struct of_phandle_args args;
int cpu, ret;
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
cpu_np = of_cpu_device_node_get(cpu);
if (!cpu_np)
continue;
@@ -363,7 +364,7 @@ static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data)
* If h/w throttled frequency is higher than what cpufreq has requested
* for, then stop polling and switch back to interrupt mechanism.
*/
- if (throttled_freq >= qcom_cpufreq_get_freq(cpu))
+ if (throttled_freq >= qcom_cpufreq_get_freq(cpufreq_cpu_get_raw(cpu)))
enable_irq(data->throttle_irq);
else
mod_delayed_work(system_highpri_wq, &data->throttle_work,
@@ -441,7 +442,6 @@ static int qcom_cpufreq_hw_lmh_init(struct cpufreq_policy *policy, int index)
return data->throttle_irq;
data->cancel_throttle = false;
- data->policy = policy;
mutex_init(&data->throttle_lock);
INIT_DEFERRABLE_WORK(&data->throttle_work, qcom_lmh_dcvs_poll);
@@ -552,6 +552,7 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
policy->driver_data = data;
policy->dvfs_possible_from_any_cpu = true;
+ data->policy = policy;
ret = qcom_cpufreq_hw_read_lut(cpu_dev, policy);
if (ret) {
@@ -565,12 +566,6 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
return -ENODEV;
}
- if (policy_has_boost_freq(policy)) {
- ret = cpufreq_enable_boost_support();
- if (ret)
- dev_warn(cpu_dev, "failed to enable boost: %d\n", ret);
- }
-
return qcom_cpufreq_hw_lmh_init(policy, index);
}
@@ -594,12 +589,6 @@ static void qcom_cpufreq_ready(struct cpufreq_policy *policy)
enable_irq(data->throttle_irq);
}
-static struct freq_attr *qcom_cpufreq_hw_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- &cpufreq_freq_attr_scaling_boost_freqs,
- NULL
-};
-
static struct cpufreq_driver cpufreq_qcom_hw_driver = {
.flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK |
CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
@@ -614,19 +603,32 @@ static struct cpufreq_driver cpufreq_qcom_hw_driver = {
.register_em = cpufreq_register_em_with_opp,
.fast_switch = qcom_cpufreq_hw_fast_switch,
.name = "qcom-cpufreq-hw",
- .attr = qcom_cpufreq_hw_attr,
.ready = qcom_cpufreq_ready,
+ .set_boost = cpufreq_boost_set_sw,
};
static unsigned long qcom_cpufreq_hw_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
{
struct qcom_cpufreq_data *data = container_of(hw, struct qcom_cpufreq_data, cpu_clk);
- return qcom_lmh_get_throttle_freq(data);
+ return __qcom_cpufreq_hw_get(data->policy) * HZ_PER_KHZ;
+}
+
+/*
+ * Since we cannot determine the closest rate of the target rate, let's just
+ * return the actual rate at which the clock is running at. This is needed to
+ * make clk_set_rate() API work properly.
+ */
+static int qcom_cpufreq_hw_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
+{
+ req->rate = qcom_cpufreq_hw_recalc_rate(hw, 0);
+
+ return 0;
}
static const struct clk_ops qcom_cpufreq_hw_clk_ops = {
.recalc_rate = qcom_cpufreq_hw_recalc_rate,
+ .determine_rate = qcom_cpufreq_hw_determine_rate,
};
static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev)
@@ -736,7 +738,7 @@ static void qcom_cpufreq_hw_driver_remove(struct platform_device *pdev)
static struct platform_driver qcom_cpufreq_hw_driver = {
.probe = qcom_cpufreq_hw_driver_probe,
- .remove_new = qcom_cpufreq_hw_driver_remove,
+ .remove = qcom_cpufreq_hw_driver_remove,
.driver = {
.name = "qcom-cpufreq-hw",
.of_match_table = qcom_cpufreq_hw_match,
diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c
index 703308fb891a..765a5bb81829 100644
--- a/drivers/cpufreq/qcom-cpufreq-nvmem.c
+++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c
@@ -52,12 +52,13 @@ struct qcom_cpufreq_match_data {
struct nvmem_cell *speedbin_nvmem,
char **pvs_name,
struct qcom_cpufreq_drv *drv);
- const char **genpd_names;
+ const char **pd_names;
+ unsigned int num_pd_names;
};
struct qcom_cpufreq_drv_cpu {
int opp_token;
- struct device **virt_devs;
+ struct dev_pm_domain_list *pd_list;
};
struct qcom_cpufreq_drv {
@@ -199,6 +200,10 @@ static int qcom_cpufreq_kryo_name_version(struct device *cpu_dev,
case QCOM_ID_IPQ9574:
drv->versions = 1 << (unsigned int)(*speedbin);
break;
+ case QCOM_ID_IPQ5424:
+ case QCOM_ID_IPQ5404:
+ drv->versions = (*speedbin == 0x3b) ? BIT(1) : BIT(0);
+ break;
case QCOM_ID_MSM8996SG:
case QCOM_ID_APQ8096SG:
drv->versions = 1 << ((unsigned int)(*speedbin) + 4);
@@ -395,8 +400,6 @@ static int qcom_cpufreq_ipq8074_name_version(struct device *cpu_dev,
return 0;
}
-static const char *generic_genpd_names[] = { "perf", NULL };
-
static const struct qcom_cpufreq_match_data match_data_kryo = {
.get_version = qcom_cpufreq_kryo_name_version,
};
@@ -407,13 +410,13 @@ static const struct qcom_cpufreq_match_data match_data_krait = {
static const struct qcom_cpufreq_match_data match_data_msm8909 = {
.get_version = qcom_cpufreq_simple_get_version,
- .genpd_names = generic_genpd_names,
+ .pd_names = (const char *[]) { "perf" },
+ .num_pd_names = 1,
};
-static const char *qcs404_genpd_names[] = { "cpr", NULL };
-
static const struct qcom_cpufreq_match_data match_data_qcs404 = {
- .genpd_names = qcs404_genpd_names,
+ .pd_names = (const char *[]) { "cpr" },
+ .num_pd_names = 1,
};
static const struct qcom_cpufreq_match_data match_data_ipq6018 = {
@@ -428,28 +431,16 @@ static const struct qcom_cpufreq_match_data match_data_ipq8074 = {
.get_version = qcom_cpufreq_ipq8074_name_version,
};
-static void qcom_cpufreq_suspend_virt_devs(struct qcom_cpufreq_drv *drv, unsigned int cpu)
+static void qcom_cpufreq_suspend_pd_devs(struct qcom_cpufreq_drv *drv, unsigned int cpu)
{
- const char * const *name = drv->data->genpd_names;
+ struct dev_pm_domain_list *pd_list = drv->cpus[cpu].pd_list;
int i;
- if (!drv->cpus[cpu].virt_devs)
+ if (!pd_list)
return;
- for (i = 0; *name; i++, name++)
- device_set_awake_path(drv->cpus[cpu].virt_devs[i]);
-}
-
-static void qcom_cpufreq_put_virt_devs(struct qcom_cpufreq_drv *drv, unsigned int cpu)
-{
- const char * const *name = drv->data->genpd_names;
- int i;
-
- if (!drv->cpus[cpu].virt_devs)
- return;
-
- for (i = 0; *name; i++, name++)
- pm_runtime_put(drv->cpus[cpu].virt_devs[i]);
+ for (i = 0; i < pd_list->num_pds; i++)
+ device_set_awake_path(pd_list->pd_devs[i]);
}
static int qcom_cpufreq_probe(struct platform_device *pdev)
@@ -502,8 +493,7 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
nvmem_cell_put(speedbin_nvmem);
}
- for_each_possible_cpu(cpu) {
- struct device **virt_devs = NULL;
+ for_each_present_cpu(cpu) {
struct dev_pm_opp_config config = {
.supported_hw = NULL,
};
@@ -522,12 +512,7 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
config.prop_name = pvs_name;
}
- if (drv->data->genpd_names) {
- config.genpd_names = drv->data->genpd_names;
- config.virt_devs = &virt_devs;
- }
-
- if (config.supported_hw || config.genpd_names) {
+ if (config.supported_hw) {
drv->cpus[cpu].opp_token = dev_pm_opp_set_config(cpu_dev, &config);
if (drv->cpus[cpu].opp_token < 0) {
ret = drv->cpus[cpu].opp_token;
@@ -536,25 +521,18 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
}
}
- if (virt_devs) {
- const char * const *name = config.genpd_names;
- int i, j;
-
- for (i = 0; *name; i++, name++) {
- ret = pm_runtime_resume_and_get(virt_devs[i]);
- if (ret) {
- dev_err(cpu_dev, "failed to resume %s: %d\n",
- *name, ret);
-
- /* Rollback previous PM runtime calls */
- name = config.genpd_names;
- for (j = 0; *name && j < i; j++, name++)
- pm_runtime_put(virt_devs[j]);
-
- goto free_opp;
- }
- }
- drv->cpus[cpu].virt_devs = virt_devs;
+ if (drv->data->pd_names) {
+ struct dev_pm_domain_attach_data attach_data = {
+ .pd_names = drv->data->pd_names,
+ .num_pd_names = drv->data->num_pd_names,
+ .pd_flags = PD_FLAG_DEV_LINK_ON |
+ PD_FLAG_REQUIRED_OPP,
+ };
+
+ ret = dev_pm_domain_attach_list(cpu_dev, &attach_data,
+ &drv->cpus[cpu].pd_list);
+ if (ret < 0)
+ goto free_opp;
}
}
@@ -569,8 +547,8 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
dev_err(cpu_dev, "Failed to register platform device\n");
free_opp:
- for_each_possible_cpu(cpu) {
- qcom_cpufreq_put_virt_devs(drv, cpu);
+ for_each_present_cpu(cpu) {
+ dev_pm_domain_detach_list(drv->cpus[cpu].pd_list);
dev_pm_opp_clear_config(drv->cpus[cpu].opp_token);
}
return ret;
@@ -583,8 +561,8 @@ static void qcom_cpufreq_remove(struct platform_device *pdev)
platform_device_unregister(cpufreq_dt_pdev);
- for_each_possible_cpu(cpu) {
- qcom_cpufreq_put_virt_devs(drv, cpu);
+ for_each_present_cpu(cpu) {
+ dev_pm_domain_detach_list(drv->cpus[cpu].pd_list);
dev_pm_opp_clear_config(drv->cpus[cpu].opp_token);
}
}
@@ -594,8 +572,8 @@ static int qcom_cpufreq_suspend(struct device *dev)
struct qcom_cpufreq_drv *drv = dev_get_drvdata(dev);
unsigned int cpu;
- for_each_possible_cpu(cpu)
- qcom_cpufreq_suspend_virt_devs(drv, cpu);
+ for_each_present_cpu(cpu)
+ qcom_cpufreq_suspend_pd_devs(drv, cpu);
return 0;
}
@@ -604,7 +582,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(qcom_cpufreq_pm_ops, qcom_cpufreq_suspend, NULL)
static struct platform_driver qcom_cpufreq_driver = {
.probe = qcom_cpufreq_probe,
- .remove_new = qcom_cpufreq_remove,
+ .remove = qcom_cpufreq_remove,
.driver = {
.name = "qcom-cpufreq-nvmem",
.pm = pm_sleep_ptr(&qcom_cpufreq_pm_ops),
@@ -617,6 +595,7 @@ static const struct of_device_id qcom_cpufreq_match_list[] __initconst __maybe_u
{ .compatible = "qcom,msm8996", .data = &match_data_kryo },
{ .compatible = "qcom,qcs404", .data = &match_data_qcs404 },
{ .compatible = "qcom,ipq5332", .data = &match_data_kryo },
+ { .compatible = "qcom,ipq5424", .data = &match_data_kryo },
{ .compatible = "qcom,ipq6018", .data = &match_data_ipq6018 },
{ .compatible = "qcom,ipq8064", .data = &match_data_ipq8064 },
{ .compatible = "qcom,ipq8074", .data = &match_data_ipq8074 },
diff --git a/drivers/cpufreq/qoriq-cpufreq.c b/drivers/cpufreq/qoriq-cpufreq.c
index 3519bf34d397..8d1f5ac59132 100644
--- a/drivers/cpufreq/qoriq-cpufreq.c
+++ b/drivers/cpufreq/qoriq-cpufreq.c
@@ -254,7 +254,6 @@ static struct cpufreq_driver qoriq_cpufreq_driver = {
.verify = cpufreq_generic_frequency_table_verify,
.target_index = qoriq_cpufreq_target,
.get = cpufreq_generic_get,
- .attr = cpufreq_generic_attr,
};
static const struct of_device_id qoriq_cpufreq_blacklist[] = {
@@ -296,7 +295,7 @@ static struct platform_driver qoriq_cpufreq_platform_driver = {
.name = "qoriq-cpufreq",
},
.probe = qoriq_cpufreq_probe,
- .remove_new = qoriq_cpufreq_remove,
+ .remove = qoriq_cpufreq_remove,
};
module_platform_driver(qoriq_cpufreq_platform_driver);
diff --git a/drivers/cpufreq/raspberrypi-cpufreq.c b/drivers/cpufreq/raspberrypi-cpufreq.c
index e0705cc9a57d..5050932954e3 100644
--- a/drivers/cpufreq/raspberrypi-cpufreq.c
+++ b/drivers/cpufreq/raspberrypi-cpufreq.c
@@ -85,7 +85,7 @@ static struct platform_driver raspberrypi_cpufreq_driver = {
.name = "raspberrypi-cpufreq",
},
.probe = raspberrypi_cpufreq_probe,
- .remove_new = raspberrypi_cpufreq_remove,
+ .remove = raspberrypi_cpufreq_remove,
};
module_platform_driver(raspberrypi_cpufreq_driver);
diff --git a/drivers/cpufreq/rcpufreq_dt.rs b/drivers/cpufreq/rcpufreq_dt.rs
new file mode 100644
index 000000000000..53923b8ef7a1
--- /dev/null
+++ b/drivers/cpufreq/rcpufreq_dt.rs
@@ -0,0 +1,222 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Rust based implementation of the cpufreq-dt driver.
+
+use kernel::{
+ c_str,
+ clk::Clk,
+ cpu, cpufreq,
+ cpumask::CpumaskVar,
+ device::{Core, Device},
+ error::code::*,
+ macros::vtable,
+ module_platform_driver, of, opp, platform,
+ prelude::*,
+ str::CString,
+ sync::Arc,
+};
+
+/// Finds exact supply name from the OF node.
+fn find_supply_name_exact(dev: &Device, name: &str) -> Option<CString> {
+ let prop_name = CString::try_from_fmt(fmt!("{name}-supply")).ok()?;
+ dev.fwnode()?
+ .property_present(&prop_name)
+ .then(|| CString::try_from_fmt(fmt!("{name}")).ok())
+ .flatten()
+}
+
+/// Finds supply name for the CPU from DT.
+fn find_supply_names(dev: &Device, cpu: cpu::CpuId) -> Option<KVec<CString>> {
+ // Try "cpu0" for older DTs, fallback to "cpu".
+ (cpu.as_u32() == 0)
+ .then(|| find_supply_name_exact(dev, "cpu0"))
+ .flatten()
+ .or_else(|| find_supply_name_exact(dev, "cpu"))
+ .and_then(|name| kernel::kvec![name].ok())
+}
+
+/// Represents the cpufreq dt device.
+struct CPUFreqDTDevice {
+ opp_table: opp::Table,
+ freq_table: opp::FreqTable,
+ _mask: CpumaskVar,
+ _token: Option<opp::ConfigToken>,
+ _clk: Clk,
+}
+
+#[derive(Default)]
+struct CPUFreqDTDriver;
+
+#[vtable]
+impl opp::ConfigOps for CPUFreqDTDriver {}
+
+#[vtable]
+impl cpufreq::Driver for CPUFreqDTDriver {
+ const NAME: &'static CStr = c_str!("cpufreq-dt");
+ const FLAGS: u16 = cpufreq::flags::NEED_INITIAL_FREQ_CHECK | cpufreq::flags::IS_COOLING_DEV;
+ const BOOST_ENABLED: bool = true;
+
+ type PData = Arc<CPUFreqDTDevice>;
+
+ fn init(policy: &mut cpufreq::Policy) -> Result<Self::PData> {
+ let cpu = policy.cpu();
+ // SAFETY: The CPU device is only used during init; it won't get hot-unplugged. The cpufreq
+ // core registers with CPU notifiers and the cpufreq core/driver won't use the CPU device,
+ // once the CPU is hot-unplugged.
+ let dev = unsafe { cpu::from_cpu(cpu)? };
+ let mut mask = CpumaskVar::new_zero(GFP_KERNEL)?;
+
+ mask.set(cpu);
+
+ let token = find_supply_names(dev, cpu)
+ .map(|names| {
+ opp::Config::<Self>::new()
+ .set_regulator_names(names)?
+ .set(dev)
+ })
+ .transpose()?;
+
+ // Get OPP-sharing information from "operating-points-v2" bindings.
+ let fallback = match opp::Table::of_sharing_cpus(dev, &mut mask) {
+ Ok(()) => false,
+ Err(e) if e == ENOENT => {
+ // "operating-points-v2" not supported. If the platform hasn't
+ // set sharing CPUs, fallback to all CPUs share the `Policy`
+ // for backward compatibility.
+ opp::Table::sharing_cpus(dev, &mut mask).is_err()
+ }
+ Err(e) => return Err(e),
+ };
+
+ // Initialize OPP tables for all policy cpus.
+ //
+ // For platforms not using "operating-points-v2" bindings, we do this
+ // before updating policy cpus. Otherwise, we will end up creating
+ // duplicate OPPs for the CPUs.
+ //
+ // OPPs might be populated at runtime, don't fail for error here unless
+ // it is -EPROBE_DEFER.
+ let mut opp_table = match opp::Table::from_of_cpumask(dev, &mut mask) {
+ Ok(table) => table,
+ Err(e) => {
+ if e == EPROBE_DEFER {
+ return Err(e);
+ }
+
+ // The table is added dynamically ?
+ opp::Table::from_dev(dev)?
+ }
+ };
+
+ // The OPP table must be initialized, statically or dynamically, by this point.
+ opp_table.opp_count()?;
+
+ // Set sharing cpus for fallback scenario.
+ if fallback {
+ mask.setall();
+ opp_table.set_sharing_cpus(&mut mask)?;
+ }
+
+ let mut transition_latency = opp_table.max_transition_latency_ns() as u32;
+ if transition_latency == 0 {
+ transition_latency = cpufreq::DEFAULT_TRANSITION_LATENCY_NS;
+ }
+
+ policy
+ .set_dvfs_possible_from_any_cpu(true)
+ .set_suspend_freq(opp_table.suspend_freq())
+ .set_transition_latency_ns(transition_latency);
+
+ let freq_table = opp_table.cpufreq_table()?;
+ // SAFETY: The `freq_table` is not dropped while it is getting used by the C code.
+ unsafe { policy.set_freq_table(&freq_table) };
+
+ // SAFETY: The returned `clk` is not dropped while it is getting used by the C code.
+ let clk = unsafe { policy.set_clk(dev, None)? };
+
+ mask.copy(policy.cpus());
+
+ Ok(Arc::new(
+ CPUFreqDTDevice {
+ opp_table,
+ freq_table,
+ _mask: mask,
+ _token: token,
+ _clk: clk,
+ },
+ GFP_KERNEL,
+ )?)
+ }
+
+ fn exit(_policy: &mut cpufreq::Policy, _data: Option<Self::PData>) -> Result {
+ Ok(())
+ }
+
+ fn online(_policy: &mut cpufreq::Policy) -> Result {
+ // We did light-weight tear down earlier, nothing to do here.
+ Ok(())
+ }
+
+ fn offline(_policy: &mut cpufreq::Policy) -> Result {
+ // Preserve policy->data and don't free resources on light-weight
+ // tear down.
+ Ok(())
+ }
+
+ fn suspend(policy: &mut cpufreq::Policy) -> Result {
+ policy.generic_suspend()
+ }
+
+ fn verify(data: &mut cpufreq::PolicyData) -> Result {
+ data.generic_verify()
+ }
+
+ fn target_index(policy: &mut cpufreq::Policy, index: cpufreq::TableIndex) -> Result {
+ let Some(data) = policy.data::<Self::PData>() else {
+ return Err(ENOENT);
+ };
+
+ let freq = data.freq_table.freq(index)?;
+ data.opp_table.set_rate(freq)
+ }
+
+ fn get(policy: &mut cpufreq::Policy) -> Result<u32> {
+ policy.generic_get()
+ }
+
+ fn set_boost(_policy: &mut cpufreq::Policy, _state: i32) -> Result {
+ Ok(())
+ }
+
+ fn register_em(policy: &mut cpufreq::Policy) {
+ policy.register_em_opp()
+ }
+}
+
+kernel::of_device_table!(
+ OF_TABLE,
+ MODULE_OF_TABLE,
+ <CPUFreqDTDriver as platform::Driver>::IdInfo,
+ [(of::DeviceId::new(c_str!("operating-points-v2")), ())]
+);
+
+impl platform::Driver for CPUFreqDTDriver {
+ type IdInfo = ();
+ const OF_ID_TABLE: Option<of::IdTable<Self::IdInfo>> = Some(&OF_TABLE);
+
+ fn probe(
+ pdev: &platform::Device<Core>,
+ _id_info: Option<&Self::IdInfo>,
+ ) -> Result<Pin<KBox<Self>>> {
+ cpufreq::Registration::<CPUFreqDTDriver>::new_foreign_owned(pdev.as_ref())?;
+ Ok(KBox::new(Self {}, GFP_KERNEL)?.into())
+ }
+}
+
+module_platform_driver! {
+ type: CPUFreqDTDriver,
+ name: "cpufreq-dt",
+ authors: ["Viresh Kumar <viresh.kumar@linaro.org>"],
+ description: "Generic CPUFreq DT driver",
+ license: "GPL v2",
+}
diff --git a/drivers/cpufreq/s3c64xx-cpufreq.c b/drivers/cpufreq/s3c64xx-cpufreq.c
index c6bdfc308e99..9cef71528076 100644
--- a/drivers/cpufreq/s3c64xx-cpufreq.c
+++ b/drivers/cpufreq/s3c64xx-cpufreq.c
@@ -24,6 +24,7 @@ struct s3c64xx_dvfs {
unsigned int vddarm_max;
};
+#ifdef CONFIG_REGULATOR
static struct s3c64xx_dvfs s3c64xx_dvfs_table[] = {
[0] = { 1000000, 1150000 },
[1] = { 1050000, 1150000 },
@@ -31,6 +32,7 @@ static struct s3c64xx_dvfs s3c64xx_dvfs_table[] = {
[3] = { 1200000, 1350000 },
[4] = { 1300000, 1350000 },
};
+#endif
static struct cpufreq_frequency_table s3c64xx_freq_table[] = {
{ 0, 0, 66000 },
@@ -51,15 +53,16 @@ static struct cpufreq_frequency_table s3c64xx_freq_table[] = {
static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
unsigned int index)
{
- struct s3c64xx_dvfs *dvfs;
- unsigned int old_freq, new_freq;
+ unsigned int new_freq = s3c64xx_freq_table[index].frequency;
int ret;
+#ifdef CONFIG_REGULATOR
+ struct s3c64xx_dvfs *dvfs;
+ unsigned int old_freq;
+
old_freq = clk_get_rate(policy->clk) / 1000;
- new_freq = s3c64xx_freq_table[index].frequency;
dvfs = &s3c64xx_dvfs_table[s3c64xx_freq_table[index].driver_data];
-#ifdef CONFIG_REGULATOR
if (vddarm && new_freq > old_freq) {
ret = regulator_set_voltage(vddarm,
dvfs->vddarm_min,
diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c
index 76c888ed8d16..4215621deb3f 100644
--- a/drivers/cpufreq/s5pv210-cpufreq.c
+++ b/drivers/cpufreq/s5pv210-cpufreq.c
@@ -554,17 +554,15 @@ out_dmc0:
static int s5pv210_cpufreq_reboot_notifier_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(0);
int ret;
- struct cpufreq_policy *policy;
- policy = cpufreq_cpu_get(0);
if (!policy) {
pr_debug("cpufreq: get no policy for cpu0\n");
return NOTIFY_BAD;
}
ret = cpufreq_driver_target(policy, SLEEP_FREQ, 0);
- cpufreq_cpu_put(policy);
if (ret < 0)
return NOTIFY_BAD;
diff --git a/drivers/cpufreq/sc520_freq.c b/drivers/cpufreq/sc520_freq.c
index 330c8d6cf93c..b360f03a116f 100644
--- a/drivers/cpufreq/sc520_freq.c
+++ b/drivers/cpufreq/sc520_freq.c
@@ -21,7 +21,6 @@
#include <linux/io.h>
#include <asm/cpu_device_id.h>
-#include <asm/msr.h>
#define MMCR_BASE 0xfffef000 /* The default base address */
#define OFFS_CPUCTL 0x2 /* CPU Control Register */
@@ -92,7 +91,6 @@ static struct cpufreq_driver sc520_freq_driver = {
.target_index = sc520_freq_target,
.init = sc520_freq_cpu_init,
.name = "sc520_freq",
- .attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id sc520_ids[] = {
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index 5892c73e129d..d2a110079f5f 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -15,7 +15,9 @@
#include <linux/energy_model.h>
#include <linux/export.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/pm_opp.h>
+#include <linux/pm_qos.h>
#include <linux/slab.h>
#include <linux/scmi_protocol.h>
#include <linux/types.h>
@@ -26,6 +28,8 @@ struct scmi_data {
int nr_opp;
struct device *cpu_dev;
cpumask_var_t opp_shared_cpus;
+ struct notifier_block limit_notify_nb;
+ struct freq_qos_request limits_freq_req;
};
static struct scmi_protocol_handle *ph;
@@ -34,11 +38,17 @@ static struct cpufreq_driver scmi_cpufreq_driver;
static unsigned int scmi_cpufreq_get_rate(unsigned int cpu)
{
- struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
- struct scmi_data *priv = policy->driver_data;
+ struct cpufreq_policy *policy;
+ struct scmi_data *priv;
unsigned long rate;
int ret;
+ policy = cpufreq_cpu_get_raw(cpu);
+ if (unlikely(!policy))
+ return 0;
+
+ priv = policy->driver_data;
+
ret = perf_ops->freq_get(ph, priv->domain_id, &rate, false);
if (ret)
return 0;
@@ -101,7 +111,7 @@ scmi_get_sharing_cpus(struct device *cpu_dev, int domain,
int cpu, tdomain;
struct device *tcpu_dev;
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
if (cpu == cpu_dev->id)
continue;
@@ -168,11 +178,21 @@ scmi_get_rate_limit(u32 domain, bool has_fast_switch)
return rate_limit;
}
-static struct freq_attr *scmi_cpufreq_hw_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
- NULL,
-};
+static int scmi_limit_notify_cb(struct notifier_block *nb, unsigned long event, void *data)
+{
+ struct scmi_data *priv = container_of(nb, struct scmi_data, limit_notify_nb);
+ struct scmi_perf_limits_report *limit_notify = data;
+ unsigned int limit_freq_khz;
+ int ret;
+
+ limit_freq_khz = limit_notify->range_max_freq / HZ_PER_KHZ;
+
+ ret = freq_qos_update_request(&priv->limits_freq_req, limit_freq_khz);
+ if (ret < 0)
+ pr_warn("failed to update freq constraint: %d\n", ret);
+
+ return NOTIFY_OK;
+}
static int scmi_cpufreq_init(struct cpufreq_policy *policy)
{
@@ -181,6 +201,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
struct device *cpu_dev;
struct scmi_data *priv;
struct cpufreq_frequency_table *freq_table;
+ struct scmi_device *sdev = cpufreq_get_driver_data();
cpu_dev = get_cpu_device(policy->cpu);
if (!cpu_dev) {
@@ -273,7 +294,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
latency = perf_ops->transition_latency_get(ph, domain);
if (!latency)
- latency = CPUFREQ_ETERNAL;
+ latency = CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS;
policy->cpuinfo.transition_latency = latency;
@@ -283,19 +304,27 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
policy->transition_delay_us =
scmi_get_rate_limit(domain, policy->fast_switch_possible);
- if (policy_has_boost_freq(policy)) {
- ret = cpufreq_enable_boost_support();
- if (ret) {
- dev_warn(cpu_dev, "failed to enable boost: %d\n", ret);
- goto out_free_opp;
- } else {
- scmi_cpufreq_hw_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs;
- scmi_cpufreq_driver.boost_enabled = true;
- }
+ ret = freq_qos_add_request(&policy->constraints, &priv->limits_freq_req, FREQ_QOS_MAX,
+ FREQ_QOS_MAX_DEFAULT_VALUE);
+ if (ret < 0) {
+ dev_err(cpu_dev, "failed to add qos limits request: %d\n", ret);
+ goto out_free_table;
}
+ priv->limit_notify_nb.notifier_call = scmi_limit_notify_cb;
+ ret = sdev->handle->notify_ops->event_notifier_register(sdev->handle, SCMI_PROTOCOL_PERF,
+ SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED,
+ &priv->domain_id,
+ &priv->limit_notify_nb);
+ if (ret)
+ dev_warn(&sdev->dev,
+ "failed to register for limits change notifier for domain %d\n",
+ priv->domain_id);
+
return 0;
+out_free_table:
+ dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
out_free_opp:
dev_pm_opp_remove_all_dynamic(cpu_dev);
@@ -311,7 +340,13 @@ out_free_priv:
static void scmi_cpufreq_exit(struct cpufreq_policy *policy)
{
struct scmi_data *priv = policy->driver_data;
+ struct scmi_device *sdev = cpufreq_get_driver_data();
+ sdev->handle->notify_ops->event_notifier_unregister(sdev->handle, SCMI_PROTOCOL_PERF,
+ SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED,
+ &priv->domain_id,
+ &priv->limit_notify_nb);
+ freq_qos_remove_request(&priv->limits_freq_req);
dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
free_cpumask_var(priv->opp_shared_cpus);
@@ -350,15 +385,58 @@ static struct cpufreq_driver scmi_cpufreq_driver = {
CPUFREQ_NEED_INITIAL_FREQ_CHECK |
CPUFREQ_IS_COOLING_DEV,
.verify = cpufreq_generic_frequency_table_verify,
- .attr = scmi_cpufreq_hw_attr,
.target_index = scmi_cpufreq_set_target,
.fast_switch = scmi_cpufreq_fast_switch,
.get = scmi_cpufreq_get_rate,
.init = scmi_cpufreq_init,
.exit = scmi_cpufreq_exit,
.register_em = scmi_cpufreq_register_em,
+ .set_boost = cpufreq_boost_set_sw,
};
+static bool scmi_dev_used_by_cpus(struct device *scmi_dev)
+{
+ struct device_node *scmi_np = dev_of_node(scmi_dev);
+ struct device_node *cpu_np, *np;
+ struct device *cpu_dev;
+ int cpu, idx;
+
+ if (!scmi_np)
+ return false;
+
+ for_each_possible_cpu(cpu) {
+ cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev)
+ continue;
+
+ cpu_np = dev_of_node(cpu_dev);
+
+ np = of_parse_phandle(cpu_np, "clocks", 0);
+ of_node_put(np);
+
+ if (np == scmi_np)
+ return true;
+
+ idx = of_property_match_string(cpu_np, "power-domain-names", "perf");
+ np = of_parse_phandle(cpu_np, "power-domains", idx);
+ of_node_put(np);
+
+ if (np == scmi_np)
+ return true;
+ }
+
+ /*
+ * Older Broadcom STB chips had a "clocks" property for CPU node(s)
+ * that did not match the SCMI performance protocol node, if we got
+ * there, it means we had such an older Device Tree, therefore return
+ * true to preserve backwards compatibility.
+ */
+ if (of_machine_is_compatible("brcm,brcmstb"))
+ return true;
+
+ return false;
+}
+
static int scmi_cpufreq_probe(struct scmi_device *sdev)
{
int ret;
@@ -367,9 +445,11 @@ static int scmi_cpufreq_probe(struct scmi_device *sdev)
handle = sdev->handle;
- if (!handle)
+ if (!handle || !scmi_dev_used_by_cpus(dev))
return -ENODEV;
+ scmi_cpufreq_driver.driver_data = sdev;
+
perf_ops = handle->devm_protocol_get(sdev, SCMI_PROTOCOL_PERF, &ph);
if (IS_ERR(perf_ops))
return PTR_ERR(perf_ops);
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
index 8d73e6e8be2a..e530345baddf 100644
--- a/drivers/cpufreq/scpi-cpufreq.c
+++ b/drivers/cpufreq/scpi-cpufreq.c
@@ -29,9 +29,16 @@ static struct scpi_ops *scpi_ops;
static unsigned int scpi_cpufreq_get_rate(unsigned int cpu)
{
- struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
- struct scpi_data *priv = policy->driver_data;
- unsigned long rate = clk_get_rate(priv->clk);
+ struct cpufreq_policy *policy;
+ struct scpi_data *priv;
+ unsigned long rate;
+
+ policy = cpufreq_cpu_get_raw(cpu);
+ if (unlikely(!policy))
+ return 0;
+
+ priv = policy->driver_data;
+ rate = clk_get_rate(priv->clk);
return rate / 1000;
}
@@ -39,8 +46,9 @@ static unsigned int scpi_cpufreq_get_rate(unsigned int cpu)
static int
scpi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index)
{
- u64 rate = policy->freq_table[index].frequency * 1000;
+ unsigned long freq_khz = policy->freq_table[index].frequency;
struct scpi_data *priv = policy->driver_data;
+ unsigned long rate = freq_khz * 1000;
int ret;
ret = clk_set_rate(priv->clk, rate);
@@ -48,7 +56,7 @@ scpi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index)
if (ret)
return ret;
- if (clk_get_rate(priv->clk) != rate)
+ if (clk_get_rate(priv->clk) / 1000 != freq_khz)
return -EIO;
return 0;
@@ -64,7 +72,7 @@ scpi_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
if (domain < 0)
return domain;
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
if (cpu == cpu_dev->id)
continue;
@@ -149,7 +157,7 @@ static int scpi_cpufreq_init(struct cpufreq_policy *policy)
latency = scpi_ops->get_transition_latency(cpu_dev);
if (!latency)
- latency = CPUFREQ_ETERNAL;
+ latency = CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS;
policy->cpuinfo.transition_latency = latency;
@@ -183,7 +191,6 @@ static struct cpufreq_driver scpi_cpufreq_driver = {
CPUFREQ_NEED_INITIAL_FREQ_CHECK |
CPUFREQ_IS_COOLING_DEV,
.verify = cpufreq_generic_frequency_table_verify,
- .attr = cpufreq_generic_attr,
.get = scpi_cpufreq_get_rate,
.init = scpi_cpufreq_init,
.exit = scpi_cpufreq_exit,
@@ -217,7 +224,7 @@ static struct platform_driver scpi_cpufreq_platdrv = {
.name = "scpi-cpufreq",
},
.probe = scpi_cpufreq_probe,
- .remove_new = scpi_cpufreq_remove,
+ .remove = scpi_cpufreq_remove,
};
module_platform_driver(scpi_cpufreq_platdrv);
diff --git a/drivers/cpufreq/sh-cpufreq.c b/drivers/cpufreq/sh-cpufreq.c
index aa74036d0420..642ddb9ea217 100644
--- a/drivers/cpufreq/sh-cpufreq.c
+++ b/drivers/cpufreq/sh-cpufreq.c
@@ -89,11 +89,9 @@ static int sh_cpufreq_target(struct cpufreq_policy *policy,
static int sh_cpufreq_verify(struct cpufreq_policy_data *policy)
{
struct clk *cpuclk = &per_cpu(sh_cpuclk, policy->cpu);
- struct cpufreq_frequency_table *freq_table;
- freq_table = cpuclk->nr_freqs ? cpuclk->freq_table : NULL;
- if (freq_table)
- return cpufreq_frequency_table_verify(policy, freq_table);
+ if (policy->freq_table)
+ return cpufreq_frequency_table_verify(policy);
cpufreq_verify_within_cpu_limits(policy);
@@ -151,7 +149,6 @@ static struct cpufreq_driver sh_cpufreq_driver = {
.verify = sh_cpufreq_verify,
.init = sh_cpufreq_cpu_init,
.exit = sh_cpufreq_cpu_exit,
- .attr = cpufreq_generic_attr,
};
static int __init sh_cpufreq_module_init(void)
diff --git a/drivers/cpufreq/sparc-us2e-cpufreq.c b/drivers/cpufreq/sparc-us2e-cpufreq.c
index 8a0cd5312a59..15899dd77c08 100644
--- a/drivers/cpufreq/sparc-us2e-cpufreq.c
+++ b/drivers/cpufreq/sparc-us2e-cpufreq.c
@@ -323,7 +323,7 @@ static int __init us2e_freq_init(void)
impl = ((ver >> 32) & 0xffff);
if (manuf == 0x17 && impl == 0x13) {
- us2e_freq_table = kzalloc(NR_CPUS * sizeof(*us2e_freq_table),
+ us2e_freq_table = kcalloc(NR_CPUS, sizeof(*us2e_freq_table),
GFP_KERNEL);
if (!us2e_freq_table)
return -ENOMEM;
diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c
index b50f9d13e6d2..de50a2f3b124 100644
--- a/drivers/cpufreq/sparc-us3-cpufreq.c
+++ b/drivers/cpufreq/sparc-us3-cpufreq.c
@@ -171,7 +171,7 @@ static int __init us3_freq_init(void)
impl == CHEETAH_PLUS_IMPL ||
impl == JAGUAR_IMPL ||
impl == PANTHER_IMPL)) {
- us3_freq_table = kzalloc(NR_CPUS * sizeof(*us3_freq_table),
+ us3_freq_table = kcalloc(NR_CPUS, sizeof(*us3_freq_table),
GFP_KERNEL);
if (!us3_freq_table)
return -ENOMEM;
diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c
index d8ab5b01d46d..2a1550e1aa21 100644
--- a/drivers/cpufreq/spear-cpufreq.c
+++ b/drivers/cpufreq/spear-cpufreq.c
@@ -165,7 +165,6 @@ static struct cpufreq_driver spear_cpufreq_driver = {
.target_index = spear_cpufreq_target,
.get = cpufreq_generic_get,
.init = spear_cpufreq_init,
- .attr = cpufreq_generic_attr,
};
static int spear_cpufreq_probe(struct platform_device *pdev)
@@ -183,7 +182,7 @@ static int spear_cpufreq_probe(struct platform_device *pdev)
if (of_property_read_u32(np, "clock-latency",
&spear_cpufreq.transition_latency))
- spear_cpufreq.transition_latency = CPUFREQ_ETERNAL;
+ spear_cpufreq.transition_latency = CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS;
cnt = of_property_count_u32_elems(np, "cpufreq_tbl");
if (cnt <= 0) {
diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
index 3fafedb983b5..3e6e85a92212 100644
--- a/drivers/cpufreq/speedstep-centrino.c
+++ b/drivers/cpufreq/speedstep-centrino.c
@@ -507,7 +507,6 @@ static struct cpufreq_driver centrino_driver = {
.verify = cpufreq_generic_frequency_table_verify,
.target_index = centrino_target,
.get = get_cur_freq,
- .attr = cpufreq_generic_attr,
};
/*
diff --git a/drivers/cpufreq/speedstep-ich.c b/drivers/cpufreq/speedstep-ich.c
index f2076d72bf39..262cfbde9ca7 100644
--- a/drivers/cpufreq/speedstep-ich.c
+++ b/drivers/cpufreq/speedstep-ich.c
@@ -315,7 +315,6 @@ static struct cpufreq_driver speedstep_driver = {
.target_index = speedstep_target,
.init = speedstep_cpu_init,
.get = speedstep_get,
- .attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id ss_smi_ids[] = {
diff --git a/drivers/cpufreq/speedstep-lib.c b/drivers/cpufreq/speedstep-lib.c
index 0b66df4ed513..f8b42e981635 100644
--- a/drivers/cpufreq/speedstep-lib.c
+++ b/drivers/cpufreq/speedstep-lib.c
@@ -378,16 +378,16 @@ EXPORT_SYMBOL_GPL(speedstep_detect_processor);
* DETECT SPEEDSTEP SPEEDS *
*********************************************************************/
-unsigned int speedstep_get_freqs(enum speedstep_processor processor,
- unsigned int *low_speed,
- unsigned int *high_speed,
- unsigned int *transition_latency,
- void (*set_state) (unsigned int state))
+int speedstep_get_freqs(enum speedstep_processor processor,
+ unsigned int *low_speed,
+ unsigned int *high_speed,
+ unsigned int *transition_latency,
+ void (*set_state)(unsigned int state))
{
unsigned int prev_speed;
- unsigned int ret = 0;
unsigned long flags;
ktime_t tv1, tv2;
+ int ret = 0;
if ((!processor) || (!low_speed) || (!high_speed) || (!set_state))
return -EINVAL;
diff --git a/drivers/cpufreq/speedstep-lib.h b/drivers/cpufreq/speedstep-lib.h
index dc762ea786be..48329647d4c4 100644
--- a/drivers/cpufreq/speedstep-lib.h
+++ b/drivers/cpufreq/speedstep-lib.h
@@ -41,8 +41,8 @@ extern unsigned int speedstep_get_frequency(enum speedstep_processor processor);
* SPEEDSTEP_LOW; the second argument is zero so that no
* cpufreq_notify_transition calls are initiated.
*/
-extern unsigned int speedstep_get_freqs(enum speedstep_processor processor,
- unsigned int *low_speed,
- unsigned int *high_speed,
- unsigned int *transition_latency,
- void (*set_state) (unsigned int state));
+extern int speedstep_get_freqs(enum speedstep_processor processor,
+ unsigned int *low_speed,
+ unsigned int *high_speed,
+ unsigned int *transition_latency,
+ void (*set_state)(unsigned int state));
diff --git a/drivers/cpufreq/speedstep-smi.c b/drivers/cpufreq/speedstep-smi.c
index 0ce9d4b6dfcc..39265884c3f1 100644
--- a/drivers/cpufreq/speedstep-smi.c
+++ b/drivers/cpufreq/speedstep-smi.c
@@ -295,7 +295,6 @@ static struct cpufreq_driver speedstep_driver = {
.init = speedstep_cpu_init,
.get = speedstep_get,
.resume = speedstep_resume,
- .attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id ss_smi_ids[] = {
diff --git a/drivers/cpufreq/sun50i-cpufreq-nvmem.c b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
index 293921acec93..744312a44279 100644
--- a/drivers/cpufreq/sun50i-cpufreq-nvmem.c
+++ b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
@@ -22,6 +22,9 @@
#define NVMEM_MASK 0x7
#define NVMEM_SHIFT 5
+#define SUN50I_A100_NVMEM_MASK 0xf
+#define SUN50I_A100_NVMEM_SHIFT 12
+
static struct platform_device *cpufreq_dt_pdev, *sun50i_cpufreq_pdev;
struct sunxi_cpufreq_data {
@@ -45,6 +48,23 @@ static u32 sun50i_h6_efuse_xlate(u32 speedbin)
return 0;
}
+static u32 sun50i_a100_efuse_xlate(u32 speedbin)
+{
+ u32 efuse_value;
+
+ efuse_value = (speedbin >> SUN50I_A100_NVMEM_SHIFT) &
+ SUN50I_A100_NVMEM_MASK;
+
+ switch (efuse_value) {
+ case 0b100:
+ return 2;
+ case 0b010:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
static int get_soc_id_revision(void)
{
#ifdef CONFIG_HAVE_ARM_SMCCC_DISCOVERY
@@ -108,6 +128,10 @@ static struct sunxi_cpufreq_data sun50i_h6_cpufreq_data = {
.efuse_xlate = sun50i_h6_efuse_xlate,
};
+static struct sunxi_cpufreq_data sun50i_a100_cpufreq_data = {
+ .efuse_xlate = sun50i_a100_efuse_xlate,
+};
+
static struct sunxi_cpufreq_data sun50i_h616_cpufreq_data = {
.efuse_xlate = sun50i_h616_efuse_xlate,
};
@@ -116,6 +140,9 @@ static const struct of_device_id cpu_opp_match_list[] = {
{ .compatible = "allwinner,sun50i-h6-operating-points",
.data = &sun50i_h6_cpufreq_data,
},
+ { .compatible = "allwinner,sun50i-a100-operating-points",
+ .data = &sun50i_a100_cpufreq_data,
+ },
{ .compatible = "allwinner,sun50i-h616-operating-points",
.data = &sun50i_h616_cpufreq_data,
},
@@ -167,7 +194,9 @@ static int sun50i_cpufreq_get_efuse(void)
struct nvmem_cell *speedbin_nvmem;
const struct of_device_id *match;
struct device *cpu_dev;
- u32 *speedbin;
+ void *speedbin_ptr;
+ u32 speedbin = 0;
+ size_t len;
int ret;
cpu_dev = get_cpu_device(0);
@@ -190,14 +219,18 @@ static int sun50i_cpufreq_get_efuse(void)
return dev_err_probe(cpu_dev, PTR_ERR(speedbin_nvmem),
"Could not get nvmem cell\n");
- speedbin = nvmem_cell_read(speedbin_nvmem, NULL);
+ speedbin_ptr = nvmem_cell_read(speedbin_nvmem, &len);
nvmem_cell_put(speedbin_nvmem);
- if (IS_ERR(speedbin))
- return PTR_ERR(speedbin);
+ if (IS_ERR(speedbin_ptr))
+ return PTR_ERR(speedbin_ptr);
+
+ if (len <= 4)
+ memcpy(&speedbin, speedbin_ptr, len);
+ speedbin = le32_to_cpu(speedbin);
- ret = opp_data->efuse_xlate(*speedbin);
+ ret = opp_data->efuse_xlate(speedbin);
- kfree(speedbin);
+ kfree(speedbin_ptr);
return ret;
};
@@ -235,7 +268,7 @@ static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev)
snprintf(name, sizeof(name), "speed%d", speed);
config.prop_name = name;
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
struct device *cpu_dev = get_cpu_device(cpu);
if (!cpu_dev) {
@@ -261,7 +294,7 @@ static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev)
pr_err("Failed to register platform device\n");
free_opp:
- for_each_possible_cpu(cpu)
+ for_each_present_cpu(cpu)
dev_pm_opp_clear_config(opp_tokens[cpu]);
kfree(opp_tokens);
@@ -275,7 +308,7 @@ static void sun50i_cpufreq_nvmem_remove(struct platform_device *pdev)
platform_device_unregister(cpufreq_dt_pdev);
- for_each_possible_cpu(cpu)
+ for_each_present_cpu(cpu)
dev_pm_opp_clear_config(opp_tokens[cpu]);
kfree(opp_tokens);
@@ -283,7 +316,7 @@ static void sun50i_cpufreq_nvmem_remove(struct platform_device *pdev)
static struct platform_driver sun50i_cpufreq_driver = {
.probe = sun50i_cpufreq_nvmem_probe,
- .remove_new = sun50i_cpufreq_nvmem_remove,
+ .remove = sun50i_cpufreq_nvmem_remove,
.driver = {
.name = "sun50i-cpufreq-nvmem",
},
@@ -291,6 +324,7 @@ static struct platform_driver sun50i_cpufreq_driver = {
static const struct of_device_id sun50i_cpufreq_match_list[] = {
{ .compatible = "allwinner,sun50i-h6" },
+ { .compatible = "allwinner,sun50i-a100" },
{ .compatible = "allwinner,sun50i-h616" },
{ .compatible = "allwinner,sun50i-h618" },
{ .compatible = "allwinner,sun50i-h700" },
diff --git a/drivers/cpufreq/tegra124-cpufreq.c b/drivers/cpufreq/tegra124-cpufreq.c
index 514146d98bca..f8a76bbecef9 100644
--- a/drivers/cpufreq/tegra124-cpufreq.c
+++ b/drivers/cpufreq/tegra124-cpufreq.c
@@ -16,6 +16,10 @@
#include <linux/pm_opp.h>
#include <linux/types.h>
+#include "cpufreq-dt.h"
+
+static struct platform_device *tegra124_cpufreq_pdev;
+
struct tegra124_cpufreq_priv {
struct clk *cpu_clk;
struct clk *pllp_clk;
@@ -55,7 +59,6 @@ static int tegra124_cpufreq_probe(struct platform_device *pdev)
struct device_node *np __free(device_node) = of_cpu_device_node_get(0);
struct tegra124_cpufreq_priv *priv;
struct device *cpu_dev;
- struct platform_device_info cpufreq_dt_devinfo = {};
int ret;
if (!np)
@@ -95,11 +98,7 @@ static int tegra124_cpufreq_probe(struct platform_device *pdev)
if (ret)
goto out_put_pllp_clk;
- cpufreq_dt_devinfo.name = "cpufreq-dt";
- cpufreq_dt_devinfo.parent = &pdev->dev;
-
- priv->cpufreq_dt_pdev =
- platform_device_register_full(&cpufreq_dt_devinfo);
+ priv->cpufreq_dt_pdev = cpufreq_dt_pdev_register(&pdev->dev);
if (IS_ERR(priv->cpufreq_dt_pdev)) {
ret = PTR_ERR(priv->cpufreq_dt_pdev);
goto out_put_pllp_clk;
@@ -173,6 +172,21 @@ disable_cpufreq:
return err;
}
+static void tegra124_cpufreq_remove(struct platform_device *pdev)
+{
+ struct tegra124_cpufreq_priv *priv = dev_get_drvdata(&pdev->dev);
+
+ if (!IS_ERR(priv->cpufreq_dt_pdev)) {
+ platform_device_unregister(priv->cpufreq_dt_pdev);
+ priv->cpufreq_dt_pdev = ERR_PTR(-ENODEV);
+ }
+
+ clk_put(priv->pllp_clk);
+ clk_put(priv->pllx_clk);
+ clk_put(priv->dfll_clk);
+ clk_put(priv->cpu_clk);
+}
+
static const struct dev_pm_ops tegra124_cpufreq_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(tegra124_cpufreq_suspend,
tegra124_cpufreq_resume)
@@ -182,15 +196,16 @@ static struct platform_driver tegra124_cpufreq_platdrv = {
.driver.name = "cpufreq-tegra124",
.driver.pm = &tegra124_cpufreq_pm_ops,
.probe = tegra124_cpufreq_probe,
+ .remove = tegra124_cpufreq_remove,
};
static int __init tegra_cpufreq_init(void)
{
int ret;
- struct platform_device *pdev;
- if (!(of_machine_is_compatible("nvidia,tegra124") ||
- of_machine_is_compatible("nvidia,tegra210")))
+ if (!(of_machine_is_compatible("nvidia,tegra114") ||
+ of_machine_is_compatible("nvidia,tegra124") ||
+ of_machine_is_compatible("nvidia,tegra210")))
return -ENODEV;
/*
@@ -201,15 +216,25 @@ static int __init tegra_cpufreq_init(void)
if (ret)
return ret;
- pdev = platform_device_register_simple("cpufreq-tegra124", -1, NULL, 0);
- if (IS_ERR(pdev)) {
+ tegra124_cpufreq_pdev = platform_device_register_simple("cpufreq-tegra124", -1, NULL, 0);
+ if (IS_ERR(tegra124_cpufreq_pdev)) {
platform_driver_unregister(&tegra124_cpufreq_platdrv);
- return PTR_ERR(pdev);
+ return PTR_ERR(tegra124_cpufreq_pdev);
}
return 0;
}
module_init(tegra_cpufreq_init);
+static void __exit tegra_cpufreq_module_exit(void)
+{
+ if (!IS_ERR_OR_NULL(tegra124_cpufreq_pdev))
+ platform_device_unregister(tegra124_cpufreq_pdev);
+
+ platform_driver_unregister(&tegra124_cpufreq_platdrv);
+}
+module_exit(tegra_cpufreq_module_exit);
+
MODULE_AUTHOR("Tuomas Tynkkynen <ttynkkynen@nvidia.com>");
MODULE_DESCRIPTION("cpufreq driver for NVIDIA Tegra124");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/tegra186-cpufreq.c b/drivers/cpufreq/tegra186-cpufreq.c
index 7b8fcfa55038..136ab102f636 100644
--- a/drivers/cpufreq/tegra186-cpufreq.c
+++ b/drivers/cpufreq/tegra186-cpufreq.c
@@ -73,11 +73,18 @@ static int tegra186_cpufreq_init(struct cpufreq_policy *policy)
{
struct tegra186_cpufreq_data *data = cpufreq_get_driver_data();
unsigned int cluster = data->cpus[policy->cpu].bpmp_cluster_id;
+ u32 cpu;
policy->freq_table = data->clusters[cluster].table;
policy->cpuinfo.transition_latency = 300 * 1000;
policy->driver_data = NULL;
+ /* set same policy for all cpus in a cluster */
+ for (cpu = 0; cpu < ARRAY_SIZE(tegra186_cpus); cpu++) {
+ if (data->cpus[cpu].bpmp_cluster_id == cluster)
+ cpumask_set_cpu(cpu, policy->cpus);
+ }
+
return 0;
}
@@ -86,23 +93,26 @@ static int tegra186_cpufreq_set_target(struct cpufreq_policy *policy,
{
struct tegra186_cpufreq_data *data = cpufreq_get_driver_data();
struct cpufreq_frequency_table *tbl = policy->freq_table + index;
- unsigned int edvd_offset = data->cpus[policy->cpu].edvd_offset;
+ unsigned int edvd_offset;
u32 edvd_val = tbl->driver_data;
+ u32 cpu;
- writel(edvd_val, data->regs + edvd_offset);
+ for_each_cpu(cpu, policy->cpus) {
+ edvd_offset = data->cpus[cpu].edvd_offset;
+ writel(edvd_val, data->regs + edvd_offset);
+ }
return 0;
}
static unsigned int tegra186_cpufreq_get(unsigned int cpu)
{
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
struct tegra186_cpufreq_data *data = cpufreq_get_driver_data();
struct tegra186_cpufreq_cluster *cluster;
- struct cpufreq_policy *policy;
unsigned int edvd_offset, cluster_id;
u32 ndiv;
- policy = cpufreq_cpu_get(cpu);
if (!policy)
return 0;
@@ -110,7 +120,6 @@ static unsigned int tegra186_cpufreq_get(unsigned int cpu)
ndiv = readl(data->regs + edvd_offset) & EDVD_CORE_VOLT_FREQ_F_MASK;
cluster_id = data->cpus[policy->cpu].bpmp_cluster_id;
cluster = &data->clusters[cluster_id];
- cpufreq_cpu_put(policy);
return (cluster->ref_clk_khz * ndiv) / cluster->div;
}
@@ -123,18 +132,18 @@ static struct cpufreq_driver tegra186_cpufreq_driver = {
.verify = cpufreq_generic_frequency_table_verify,
.target_index = tegra186_cpufreq_set_target,
.init = tegra186_cpufreq_init,
- .attr = cpufreq_generic_attr,
};
static struct cpufreq_frequency_table *init_vhint_table(
struct platform_device *pdev, struct tegra_bpmp *bpmp,
- struct tegra186_cpufreq_cluster *cluster, unsigned int cluster_id)
+ struct tegra186_cpufreq_cluster *cluster, unsigned int cluster_id,
+ int *num_rates)
{
struct cpufreq_frequency_table *table;
struct mrq_cpu_vhint_request req;
struct tegra_bpmp_message msg;
struct cpu_vhint_data *data;
- int err, i, j, num_rates = 0;
+ int err, i, j;
dma_addr_t phys;
void *virt;
@@ -164,6 +173,7 @@ static struct cpufreq_frequency_table *init_vhint_table(
goto free;
}
+ *num_rates = 0;
for (i = data->vfloor; i <= data->vceil; i++) {
u16 ndiv = data->ndiv[i];
@@ -174,10 +184,10 @@ static struct cpufreq_frequency_table *init_vhint_table(
if (i > 0 && ndiv == data->ndiv[i - 1])
continue;
- num_rates++;
+ (*num_rates)++;
}
- table = devm_kcalloc(&pdev->dev, num_rates + 1, sizeof(*table),
+ table = devm_kcalloc(&pdev->dev, *num_rates + 1, sizeof(*table),
GFP_KERNEL);
if (!table) {
table = ERR_PTR(-ENOMEM);
@@ -219,7 +229,9 @@ static int tegra186_cpufreq_probe(struct platform_device *pdev)
{
struct tegra186_cpufreq_data *data;
struct tegra_bpmp *bpmp;
- unsigned int i = 0, err;
+ unsigned int i = 0, err, edvd_offset;
+ int num_rates = 0;
+ u32 edvd_val, cpu;
data = devm_kzalloc(&pdev->dev,
struct_size(data, clusters, TEGRA186_NUM_CLUSTERS),
@@ -242,10 +254,21 @@ static int tegra186_cpufreq_probe(struct platform_device *pdev)
for (i = 0; i < TEGRA186_NUM_CLUSTERS; i++) {
struct tegra186_cpufreq_cluster *cluster = &data->clusters[i];
- cluster->table = init_vhint_table(pdev, bpmp, cluster, i);
+ cluster->table = init_vhint_table(pdev, bpmp, cluster, i, &num_rates);
if (IS_ERR(cluster->table)) {
err = PTR_ERR(cluster->table);
goto put_bpmp;
+ } else if (!num_rates) {
+ err = -EINVAL;
+ goto put_bpmp;
+ }
+
+ for (cpu = 0; cpu < ARRAY_SIZE(tegra186_cpus); cpu++) {
+ if (data->cpus[cpu].bpmp_cluster_id == i) {
+ edvd_val = cluster->table[num_rates - 1].driver_data;
+ edvd_offset = data->cpus[cpu].edvd_offset;
+ writel(edvd_val, data->regs + edvd_offset);
+ }
}
}
@@ -276,7 +299,7 @@ static struct platform_driver tegra186_cpufreq_platform_driver = {
.of_match_table = tegra186_cpufreq_of_match,
},
.probe = tegra186_cpufreq_probe,
- .remove_new = tegra186_cpufreq_remove,
+ .remove = tegra186_cpufreq_remove,
};
module_platform_driver(tegra186_cpufreq_platform_driver);
diff --git a/drivers/cpufreq/tegra194-cpufreq.c b/drivers/cpufreq/tegra194-cpufreq.c
index 07ea7ed61b68..9b4f516f313e 100644
--- a/drivers/cpufreq/tegra194-cpufreq.c
+++ b/drivers/cpufreq/tegra194-cpufreq.c
@@ -589,7 +589,6 @@ static struct cpufreq_driver tegra194_cpufreq_driver = {
.exit = tegra194_cpufreq_exit,
.online = tegra194_cpufreq_online,
.offline = tegra194_cpufreq_offline,
- .attr = cpufreq_generic_attr,
};
static struct tegra_cpufreq_ops tegra194_cpufreq_ops = {
@@ -818,7 +817,7 @@ static struct platform_driver tegra194_ccplex_driver = {
.of_match_table = tegra194_cpufreq_of_match,
},
.probe = tegra194_cpufreq_probe,
- .remove_new = tegra194_cpufreq_remove,
+ .remove = tegra194_cpufreq_remove,
};
module_platform_driver(tegra194_ccplex_driver);
diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
index ba621ce1cdda..6ee76f5fe9c5 100644
--- a/drivers/cpufreq/ti-cpufreq.c
+++ b/drivers/cpufreq/ti-cpufreq.c
@@ -72,7 +72,9 @@ enum {
#define AM62P5_EFUSE_O_MPU_OPP 15
#define AM62P5_EFUSE_S_MPU_OPP 19
+#define AM62P5_EFUSE_T_MPU_OPP 20
#define AM62P5_EFUSE_U_MPU_OPP 21
+#define AM62P5_EFUSE_V_MPU_OPP 22
#define AM62P5_SUPPORT_O_MPU_OPP BIT(0)
#define AM62P5_SUPPORT_U_MPU_OPP BIT(2)
@@ -93,6 +95,8 @@ struct ti_cpufreq_soc_data {
bool multi_regulator;
/* Backward compatibility hack: Might have missing syscon */
#define TI_QUIRK_SYSCON_MAY_BE_MISSING 0x1
+/* Backward compatibility hack: new syscon size is 1 register wide */
+#define TI_QUIRK_SYSCON_IS_SINGLE_REG 0x2
u8 quirks;
};
@@ -151,7 +155,9 @@ static unsigned long am62p5_efuse_xlate(struct ti_cpufreq_data *opp_data,
unsigned long calculated_efuse = AM62P5_SUPPORT_O_MPU_OPP;
switch (efuse) {
+ case AM62P5_EFUSE_V_MPU_OPP:
case AM62P5_EFUSE_U_MPU_OPP:
+ case AM62P5_EFUSE_T_MPU_OPP:
case AM62P5_EFUSE_S_MPU_OPP:
calculated_efuse |= AM62P5_SUPPORT_U_MPU_OPP;
fallthrough;
@@ -305,9 +311,10 @@ static struct ti_cpufreq_soc_data am3517_soc_data = {
};
static const struct soc_device_attribute k3_cpufreq_soc[] = {
- { .family = "AM62X", .revision = "SR1.0" },
- { .family = "AM62AX", .revision = "SR1.0" },
- { .family = "AM62PX", .revision = "SR1.0" },
+ { .family = "AM62X", },
+ { .family = "AM62AX", },
+ { .family = "AM62PX", },
+ { .family = "AM62DX", },
{ /* sentinel */ }
};
@@ -316,8 +323,8 @@ static struct ti_cpufreq_soc_data am625_soc_data = {
.efuse_offset = 0x0018,
.efuse_mask = 0x07c0,
.efuse_shift = 0x6,
- .rev_offset = 0x0014,
.multi_regulator = false,
+ .quirks = TI_QUIRK_SYSCON_IS_SINGLE_REG,
};
static struct ti_cpufreq_soc_data am62a7_soc_data = {
@@ -325,7 +332,6 @@ static struct ti_cpufreq_soc_data am62a7_soc_data = {
.efuse_offset = 0x0,
.efuse_mask = 0x07c0,
.efuse_shift = 0x6,
- .rev_offset = 0x0014,
.multi_regulator = false,
};
@@ -334,7 +340,6 @@ static struct ti_cpufreq_soc_data am62p5_soc_data = {
.efuse_offset = 0x0,
.efuse_mask = 0x07c0,
.efuse_shift = 0x6,
- .rev_offset = 0x0014,
.multi_regulator = false,
};
@@ -354,6 +359,10 @@ static int ti_cpufreq_get_efuse(struct ti_cpufreq_data *opp_data,
ret = regmap_read(opp_data->syscon, opp_data->soc_data->efuse_offset,
&efuse);
+
+ if (opp_data->soc_data->quirks & TI_QUIRK_SYSCON_IS_SINGLE_REG && ret == -EIO)
+ ret = regmap_read(opp_data->syscon, 0x0, &efuse);
+
if (opp_data->soc_data->quirks & TI_QUIRK_SYSCON_MAY_BE_MISSING && ret == -EIO) {
/* not a syscon register! */
void __iomem *regs = ioremap(OMAP3_SYSCON_BASE +
@@ -453,6 +462,7 @@ static const struct of_device_id ti_cpufreq_of_match[] __maybe_unused = {
{ .compatible = "ti,omap36xx", .data = &omap36xx_soc_data, },
{ .compatible = "ti,am625", .data = &am625_soc_data, },
{ .compatible = "ti,am62a7", .data = &am62a7_soc_data, },
+ { .compatible = "ti,am62d2", .data = &am62a7_soc_data, },
{ .compatible = "ti,am62p5", .data = &am62p5_soc_data, },
/* legacy */
{ .compatible = "ti,omap3430", .data = &omap34xx_soc_data, },
diff --git a/drivers/cpufreq/vexpress-spc-cpufreq.c b/drivers/cpufreq/vexpress-spc-cpufreq.c
index 3fadf536c429..65fea47b82e6 100644
--- a/drivers/cpufreq/vexpress-spc-cpufreq.c
+++ b/drivers/cpufreq/vexpress-spc-cpufreq.c
@@ -471,7 +471,6 @@ static struct cpufreq_driver ve_spc_cpufreq_driver = {
.init = ve_spc_cpufreq_init,
.exit = ve_spc_cpufreq_exit,
.register_em = cpufreq_register_em_with_opp,
- .attr = cpufreq_generic_attr,
};
#ifdef CONFIG_BL_SWITCHER
@@ -565,7 +564,7 @@ static struct platform_driver ve_spc_cpufreq_platdrv = {
.name = "vexpress-spc-cpufreq",
},
.probe = ve_spc_cpufreq_probe,
- .remove_new = ve_spc_cpufreq_remove,
+ .remove = ve_spc_cpufreq_remove,
};
module_platform_driver(ve_spc_cpufreq_platdrv);
diff --git a/drivers/cpufreq/virtual-cpufreq.c b/drivers/cpufreq/virtual-cpufreq.c
new file mode 100644
index 000000000000..6ffa16d239b2
--- /dev/null
+++ b/drivers/cpufreq/virtual-cpufreq.c
@@ -0,0 +1,332 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024 Google LLC
+ */
+
+#include <linux/arch_topology.h>
+#include <linux/cpufreq.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/*
+ * CPU0..CPUn
+ * +-------------+-------------------------------+--------+-------+
+ * | Register | Description | Offset | Len |
+ * +-------------+-------------------------------+--------+-------+
+ * | cur_perf | read this register to get | 0x0 | 0x4 |
+ * | | the current perf (integer val | | |
+ * | | representing perf relative to | | |
+ * | | max performance) | | |
+ * | | that vCPU is running at | | |
+ * +-------------+-------------------------------+--------+-------+
+ * | set_perf | write to this register to set | 0x4 | 0x4 |
+ * | | perf value of the vCPU | | |
+ * +-------------+-------------------------------+--------+-------+
+ * | perftbl_len | number of entries in perf | 0x8 | 0x4 |
+ * | | table. A single entry in the | | |
+ * | | perf table denotes no table | | |
+ * | | and the entry contains | | |
+ * | | the maximum perf value | | |
+ * | | that this vCPU supports. | | |
+ * | | The guest can request any | | |
+ * | | value between 1 and max perf | | |
+ * | | when perftbls are not used. | | |
+ * +---------------------------------------------+--------+-------+
+ * | perftbl_sel | write to this register to | 0xc | 0x4 |
+ * | | select perf table entry to | | |
+ * | | read from | | |
+ * +---------------------------------------------+--------+-------+
+ * | perftbl_rd | read this register to get | 0x10 | 0x4 |
+ * | | perf value of the selected | | |
+ * | | entry based on perftbl_sel | | |
+ * +---------------------------------------------+--------+-------+
+ * | perf_domain | performance domain number | 0x14 | 0x4 |
+ * | | that this vCPU belongs to. | | |
+ * | | vCPUs sharing the same perf | | |
+ * | | domain number are part of the | | |
+ * | | same performance domain. | | |
+ * +-------------+-------------------------------+--------+-------+
+ */
+
+#define REG_CUR_PERF_STATE_OFFSET 0x0
+#define REG_SET_PERF_STATE_OFFSET 0x4
+#define REG_PERFTBL_LEN_OFFSET 0x8
+#define REG_PERFTBL_SEL_OFFSET 0xc
+#define REG_PERFTBL_RD_OFFSET 0x10
+#define REG_PERF_DOMAIN_OFFSET 0x14
+#define PER_CPU_OFFSET 0x1000
+
+#define PERFTBL_MAX_ENTRIES 64U
+
+static void __iomem *base;
+static DEFINE_PER_CPU(u32, perftbl_num_entries);
+
+static void virt_scale_freq_tick(void)
+{
+ int cpu = smp_processor_id();
+ u32 max_freq = (u32)cpufreq_get_hw_max_freq(cpu);
+ u64 cur_freq;
+ unsigned long scale;
+
+ cur_freq = (u64)readl_relaxed(base + cpu * PER_CPU_OFFSET
+ + REG_CUR_PERF_STATE_OFFSET);
+
+ cur_freq <<= SCHED_CAPACITY_SHIFT;
+ scale = (unsigned long)div_u64(cur_freq, max_freq);
+ scale = min(scale, SCHED_CAPACITY_SCALE);
+
+ this_cpu_write(arch_freq_scale, scale);
+}
+
+static struct scale_freq_data virt_sfd = {
+ .source = SCALE_FREQ_SOURCE_VIRT,
+ .set_freq_scale = virt_scale_freq_tick,
+};
+
+static unsigned int virt_cpufreq_set_perf(struct cpufreq_policy *policy,
+ unsigned int target_freq)
+{
+ writel_relaxed(target_freq,
+ base + policy->cpu * PER_CPU_OFFSET + REG_SET_PERF_STATE_OFFSET);
+ return 0;
+}
+
+static unsigned int virt_cpufreq_fast_switch(struct cpufreq_policy *policy,
+ unsigned int target_freq)
+{
+ virt_cpufreq_set_perf(policy, target_freq);
+ return target_freq;
+}
+
+static u32 virt_cpufreq_get_perftbl_entry(int cpu, u32 idx)
+{
+ writel_relaxed(idx, base + cpu * PER_CPU_OFFSET +
+ REG_PERFTBL_SEL_OFFSET);
+ return readl_relaxed(base + cpu * PER_CPU_OFFSET +
+ REG_PERFTBL_RD_OFFSET);
+}
+
+static int virt_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ struct cpufreq_freqs freqs;
+ int ret = 0;
+
+ freqs.old = policy->cur;
+ freqs.new = target_freq;
+
+ cpufreq_freq_transition_begin(policy, &freqs);
+ ret = virt_cpufreq_set_perf(policy, target_freq);
+ cpufreq_freq_transition_end(policy, &freqs, ret != 0);
+
+ return ret;
+}
+
+static int virt_cpufreq_get_sharing_cpus(struct cpufreq_policy *policy)
+{
+ u32 cur_perf_domain, perf_domain;
+ struct device *cpu_dev;
+ int cpu;
+
+ cur_perf_domain = readl_relaxed(base + policy->cpu *
+ PER_CPU_OFFSET + REG_PERF_DOMAIN_OFFSET);
+
+ for_each_present_cpu(cpu) {
+ cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev)
+ continue;
+
+ perf_domain = readl_relaxed(base + cpu *
+ PER_CPU_OFFSET + REG_PERF_DOMAIN_OFFSET);
+
+ if (perf_domain == cur_perf_domain)
+ cpumask_set_cpu(cpu, policy->cpus);
+ }
+
+ return 0;
+}
+
+static int virt_cpufreq_get_freq_info(struct cpufreq_policy *policy)
+{
+ struct cpufreq_frequency_table *table;
+ u32 num_perftbl_entries, idx;
+
+ num_perftbl_entries = per_cpu(perftbl_num_entries, policy->cpu);
+
+ if (num_perftbl_entries == 1) {
+ policy->cpuinfo.min_freq = 1;
+ policy->cpuinfo.max_freq = virt_cpufreq_get_perftbl_entry(policy->cpu, 0);
+
+ policy->min = policy->cpuinfo.min_freq;
+ policy->max = policy->cpuinfo.max_freq;
+
+ policy->cur = policy->max;
+ return 0;
+ }
+
+ table = kcalloc(num_perftbl_entries + 1, sizeof(*table), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ for (idx = 0; idx < num_perftbl_entries; idx++)
+ table[idx].frequency = virt_cpufreq_get_perftbl_entry(policy->cpu, idx);
+
+ table[idx].frequency = CPUFREQ_TABLE_END;
+ policy->freq_table = table;
+
+ return 0;
+}
+
+static int virt_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ struct device *cpu_dev;
+ int ret;
+
+ cpu_dev = get_cpu_device(policy->cpu);
+ if (!cpu_dev)
+ return -ENODEV;
+
+ ret = virt_cpufreq_get_freq_info(policy);
+ if (ret) {
+ dev_warn(cpu_dev, "failed to get cpufreq info\n");
+ return ret;
+ }
+
+ ret = virt_cpufreq_get_sharing_cpus(policy);
+ if (ret) {
+ dev_warn(cpu_dev, "failed to get sharing cpumask\n");
+ return ret;
+ }
+
+ /*
+ * To simplify and improve latency of handling frequency requests on
+ * the host side, this ensures that the vCPU thread triggering the MMIO
+ * abort is the same thread whose performance constraints (Ex. uclamp
+ * settings) need to be updated. This simplifies the VMM (Virtual
+ * Machine Manager) having to find the correct vCPU thread and/or
+ * facing permission issues when configuring other threads.
+ */
+ policy->dvfs_possible_from_any_cpu = false;
+ policy->fast_switch_possible = true;
+
+ /*
+ * Using the default SCALE_FREQ_SOURCE_CPUFREQ is insufficient since
+ * the actual physical CPU frequency may not match requested frequency
+ * from the vCPU thread due to frequency update latencies or other
+ * inputs to the physical CPU frequency selection. This additional FIE
+ * source allows for more accurate freq_scale updates and only takes
+ * effect if another FIE source such as AMUs have not been registered.
+ */
+ topology_set_scale_freq_source(&virt_sfd, policy->cpus);
+
+ return 0;
+}
+
+static void virt_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+{
+ topology_clear_scale_freq_source(SCALE_FREQ_SOURCE_VIRT, policy->related_cpus);
+ kfree(policy->freq_table);
+}
+
+static int virt_cpufreq_online(struct cpufreq_policy *policy)
+{
+ /* Nothing to restore. */
+ return 0;
+}
+
+static int virt_cpufreq_offline(struct cpufreq_policy *policy)
+{
+ /* Dummy offline() to avoid exit() being called and freeing resources. */
+ return 0;
+}
+
+static int virt_cpufreq_verify_policy(struct cpufreq_policy_data *policy)
+{
+ if (policy->freq_table)
+ return cpufreq_frequency_table_verify(policy);
+
+ cpufreq_verify_within_cpu_limits(policy);
+ return 0;
+}
+
+static struct cpufreq_driver cpufreq_virt_driver = {
+ .name = "virt-cpufreq",
+ .init = virt_cpufreq_cpu_init,
+ .exit = virt_cpufreq_cpu_exit,
+ .online = virt_cpufreq_online,
+ .offline = virt_cpufreq_offline,
+ .verify = virt_cpufreq_verify_policy,
+ .target = virt_cpufreq_target,
+ .fast_switch = virt_cpufreq_fast_switch,
+};
+
+static int virt_cpufreq_driver_probe(struct platform_device *pdev)
+{
+ u32 num_perftbl_entries;
+ int ret, cpu;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ for_each_possible_cpu(cpu) {
+ num_perftbl_entries = readl_relaxed(base + cpu * PER_CPU_OFFSET +
+ REG_PERFTBL_LEN_OFFSET);
+
+ if (!num_perftbl_entries || num_perftbl_entries > PERFTBL_MAX_ENTRIES)
+ return -ENODEV;
+
+ per_cpu(perftbl_num_entries, cpu) = num_perftbl_entries;
+ }
+
+ ret = cpufreq_register_driver(&cpufreq_virt_driver);
+ if (ret) {
+ dev_err(&pdev->dev, "Virtual CPUFreq driver failed to register: %d\n", ret);
+ return ret;
+ }
+
+ dev_dbg(&pdev->dev, "Virtual CPUFreq driver initialized\n");
+ return 0;
+}
+
+static void virt_cpufreq_driver_remove(struct platform_device *pdev)
+{
+ cpufreq_unregister_driver(&cpufreq_virt_driver);
+}
+
+static const struct of_device_id virt_cpufreq_match[] = {
+ { .compatible = "qemu,virtual-cpufreq", .data = NULL},
+ {}
+};
+MODULE_DEVICE_TABLE(of, virt_cpufreq_match);
+
+static struct platform_driver virt_cpufreq_driver = {
+ .probe = virt_cpufreq_driver_probe,
+ .remove = virt_cpufreq_driver_remove,
+ .driver = {
+ .name = "virt-cpufreq",
+ .of_match_table = virt_cpufreq_match,
+ },
+};
+
+static int __init virt_cpufreq_init(void)
+{
+ return platform_driver_register(&virt_cpufreq_driver);
+}
+postcore_initcall(virt_cpufreq_init);
+
+static void __exit virt_cpufreq_exit(void)
+{
+ platform_driver_unregister(&virt_cpufreq_driver);
+}
+module_exit(virt_cpufreq_exit);
+
+MODULE_DESCRIPTION("Virtual cpufreq driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index d103342b7cfc..1de9e92c5b0f 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -3,6 +3,9 @@
# Makefile for cpuidle.
#
+# Branch profiling isn't noinstr-safe
+ccflags-$(CONFIG_TRACE_BRANCH_PROFILING) += -DDISABLE_BRANCH_PROFILING
+
obj-y += cpuidle.o driver.o governor.o sysfs.o governors/
obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o
obj-$(CONFIG_DT_IDLE_STATES) += dt_idle_states.o
diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c
index 7cfb980a357d..e044fefdb816 100644
--- a/drivers/cpuidle/cpuidle-arm.c
+++ b/drivers/cpuidle/cpuidle-arm.c
@@ -137,9 +137,9 @@ out_kfree_drv:
/*
* arm_idle_init - Initializes arm cpuidle driver
*
- * Initializes arm cpuidle driver for all CPUs, if any CPU fails
- * to register cpuidle driver then rollback to cancel all CPUs
- * registeration.
+ * Initializes arm cpuidle driver for all present CPUs, if any
+ * CPU fails to register cpuidle driver then rollback to cancel
+ * all CPUs registration.
*/
static int __init arm_idle_init(void)
{
@@ -147,7 +147,7 @@ static int __init arm_idle_init(void)
struct cpuidle_driver *drv;
struct cpuidle_device *dev;
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
ret = arm_idle_init_cpu(cpu);
if (ret)
goto out_fail;
diff --git a/drivers/cpuidle/cpuidle-big_little.c b/drivers/cpuidle/cpuidle-big_little.c
index 74972deda0ea..4abba42fcc31 100644
--- a/drivers/cpuidle/cpuidle-big_little.c
+++ b/drivers/cpuidle/cpuidle-big_little.c
@@ -148,7 +148,7 @@ static int __init bl_idle_driver_init(struct cpuidle_driver *drv, int part_id)
if (!cpumask)
return -ENOMEM;
- for_each_possible_cpu(cpu)
+ for_each_present_cpu(cpu)
if (smp_cpuid_part(cpu) == part_id)
cpumask_set_cpu(cpu, cpumask);
diff --git a/drivers/cpuidle/cpuidle-kirkwood.c b/drivers/cpuidle/cpuidle-kirkwood.c
index 602c4dfdd7e2..5235e6e8f360 100644
--- a/drivers/cpuidle/cpuidle-kirkwood.c
+++ b/drivers/cpuidle/cpuidle-kirkwood.c
@@ -66,7 +66,7 @@ static void kirkwood_cpuidle_remove(struct platform_device *pdev)
static struct platform_driver kirkwood_cpuidle_driver = {
.probe = kirkwood_cpuidle_probe,
- .remove_new = kirkwood_cpuidle_remove,
+ .remove = kirkwood_cpuidle_remove,
.driver = {
.name = "kirkwood_cpuidle",
},
diff --git a/drivers/cpuidle/cpuidle-psci-domain.c b/drivers/cpuidle/cpuidle-psci-domain.c
index 146f97068022..37c41209eaf9 100644
--- a/drivers/cpuidle/cpuidle-psci-domain.c
+++ b/drivers/cpuidle/cpuidle-psci-domain.c
@@ -28,7 +28,6 @@ struct psci_pd_provider {
};
static LIST_HEAD(psci_pd_providers);
-static bool psci_pd_allow_domain_state;
static int psci_pd_power_off(struct generic_pm_domain *pd)
{
@@ -38,12 +37,9 @@ static int psci_pd_power_off(struct generic_pm_domain *pd)
if (!state->data)
return 0;
- if (!psci_pd_allow_domain_state)
- return -EBUSY;
-
/* OSI mode is enabled, set the corresponding domain state. */
pd_state = state->data;
- psci_set_domain_state(*pd_state);
+ psci_set_domain_state(pd, pd->state_idx, *pd_state);
return 0;
}
@@ -72,6 +68,7 @@ static int psci_pd_init(struct device_node *np, bool use_osi)
*/
if (use_osi) {
pd->power_off = psci_pd_power_off;
+ pd->flags |= GENPD_FLAG_ACTIVE_WAKEUP;
if (IS_ENABLED(CONFIG_PREEMPT_RT))
pd->flags |= GENPD_FLAG_RPM_ALWAYS_ON;
} else {
@@ -125,15 +122,6 @@ static void psci_pd_remove(void)
}
}
-static void psci_cpuidle_domain_sync_state(struct device *dev)
-{
- /*
- * All devices have now been attached/probed to the PM domain topology,
- * hence it's fine to allow domain states to be picked.
- */
- psci_pd_allow_domain_state = true;
-}
-
static const struct of_device_id psci_of_match[] = {
{ .compatible = "arm,psci-1.0" },
{}
@@ -194,7 +182,6 @@ static struct platform_driver psci_cpuidle_domain_driver = {
.driver = {
.name = "psci-cpuidle-domain",
.of_match_table = psci_of_match,
- .sync_state = psci_cpuidle_domain_sync_state,
},
};
diff --git a/drivers/cpuidle/cpuidle-psci.c b/drivers/cpuidle/cpuidle-psci.c
index 2562dc001fc1..b19bc60cc627 100644
--- a/drivers/cpuidle/cpuidle-psci.c
+++ b/drivers/cpuidle/cpuidle-psci.c
@@ -16,7 +16,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/platform_device.h>
+#include <linux/device/faux.h>
#include <linux/psci.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
@@ -25,6 +25,7 @@
#include <linux/syscore_ops.h>
#include <asm/cpuidle.h>
+#include <trace/events/power.h>
#include "cpuidle-psci.h"
#include "dt_idle_states.h"
@@ -35,19 +36,29 @@ struct psci_cpuidle_data {
struct device *dev;
};
+struct psci_cpuidle_domain_state {
+ struct generic_pm_domain *pd;
+ unsigned int state_idx;
+ u32 state;
+};
+
static DEFINE_PER_CPU_READ_MOSTLY(struct psci_cpuidle_data, psci_cpuidle_data);
-static DEFINE_PER_CPU(u32, domain_state);
+static DEFINE_PER_CPU(struct psci_cpuidle_domain_state, psci_domain_state);
static bool psci_cpuidle_use_syscore;
-static bool psci_cpuidle_use_cpuhp;
-void psci_set_domain_state(u32 state)
+void psci_set_domain_state(struct generic_pm_domain *pd, unsigned int state_idx,
+ u32 state)
{
- __this_cpu_write(domain_state, state);
+ struct psci_cpuidle_domain_state *ds = this_cpu_ptr(&psci_domain_state);
+
+ ds->pd = pd;
+ ds->state_idx = state_idx;
+ ds->state = state;
}
-static inline u32 psci_get_domain_state(void)
+static inline void psci_clear_domain_state(void)
{
- return __this_cpu_read(domain_state);
+ __this_cpu_write(psci_domain_state.state, 0);
}
static __cpuidle int __psci_enter_domain_idle_state(struct cpuidle_device *dev,
@@ -57,7 +68,8 @@ static __cpuidle int __psci_enter_domain_idle_state(struct cpuidle_device *dev,
struct psci_cpuidle_data *data = this_cpu_ptr(&psci_cpuidle_data);
u32 *states = data->psci_states;
struct device *pd_dev = data->dev;
- u32 state;
+ struct psci_cpuidle_domain_state *ds;
+ u32 state = states[idx];
int ret;
ret = cpu_pm_enter();
@@ -70,11 +82,13 @@ static __cpuidle int __psci_enter_domain_idle_state(struct cpuidle_device *dev,
else
pm_runtime_put_sync_suspend(pd_dev);
- state = psci_get_domain_state();
- if (!state)
- state = states[idx];
+ ds = this_cpu_ptr(&psci_domain_state);
+ if (ds->state)
+ state = ds->state;
+ trace_psci_domain_idle_enter(dev->cpu, state, s2idle);
ret = psci_cpu_suspend_enter(state) ? -1 : idx;
+ trace_psci_domain_idle_exit(dev->cpu, state, s2idle);
if (s2idle)
dev_pm_genpd_resume(pd_dev);
@@ -83,8 +97,12 @@ static __cpuidle int __psci_enter_domain_idle_state(struct cpuidle_device *dev,
cpu_pm_exit();
+ /* Correct domain-idlestate statistics if we failed to enter. */
+ if (ret == -1 && ds->state)
+ pm_genpd_inc_rejected(ds->pd, ds->state_idx);
+
/* Clear the domain state to start fresh when back from idle. */
- psci_set_domain_state(0);
+ psci_clear_domain_state();
return ret;
}
@@ -105,8 +123,12 @@ static int psci_idle_cpuhp_up(unsigned int cpu)
{
struct device *pd_dev = __this_cpu_read(psci_cpuidle_data.dev);
- if (pd_dev)
- pm_runtime_get_sync(pd_dev);
+ if (pd_dev) {
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ pm_runtime_get_sync(pd_dev);
+ else
+ dev_pm_genpd_resume(pd_dev);
+ }
return 0;
}
@@ -116,9 +138,13 @@ static int psci_idle_cpuhp_down(unsigned int cpu)
struct device *pd_dev = __this_cpu_read(psci_cpuidle_data.dev);
if (pd_dev) {
- pm_runtime_put_sync(pd_dev);
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ pm_runtime_put_sync(pd_dev);
+ else
+ dev_pm_genpd_suspend(pd_dev);
+
/* Clear domain state to start fresh at next online. */
- psci_set_domain_state(0);
+ psci_clear_domain_state();
}
return 0;
@@ -144,7 +170,7 @@ static void psci_idle_syscore_switch(bool suspend)
/* Clear domain state to re-start fresh. */
if (!cleared) {
- psci_set_domain_state(0);
+ psci_clear_domain_state();
cleared = true;
}
}
@@ -177,9 +203,6 @@ static void psci_idle_init_cpuhp(void)
{
int err;
- if (!psci_cpuidle_use_cpuhp)
- return;
-
err = cpuhp_setup_state_nocalls(CPUHP_AP_CPU_PM_STARTING,
"cpuidle/psci:online",
psci_idle_cpuhp_up,
@@ -240,10 +263,8 @@ static int psci_dt_cpu_init_topology(struct cpuidle_driver *drv,
* s2ram and s2idle.
*/
drv->states[state_count - 1].enter_s2idle = psci_enter_s2idle_domain_idle_state;
- if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
drv->states[state_count - 1].enter = psci_enter_domain_idle_state;
- psci_cpuidle_use_cpuhp = true;
- }
return 0;
}
@@ -320,7 +341,6 @@ static void psci_cpu_deinit_idle(int cpu)
dt_idle_detach_cpu(data->dev);
psci_cpuidle_use_syscore = false;
- psci_cpuidle_use_cpuhp = false;
}
static int psci_idle_init_cpu(struct device *dev, int cpu)
@@ -400,18 +420,18 @@ deinit:
/*
* psci_idle_probe - Initializes PSCI cpuidle driver
*
- * Initializes PSCI cpuidle driver for all CPUs, if any CPU fails
+ * Initializes PSCI cpuidle driver for all present CPUs, if any CPU fails
* to register cpuidle driver then rollback to cancel all CPUs
* registration.
*/
-static int psci_cpuidle_probe(struct platform_device *pdev)
+static int psci_cpuidle_probe(struct faux_device *fdev)
{
int cpu, ret;
struct cpuidle_driver *drv;
struct cpuidle_device *dev;
- for_each_possible_cpu(cpu) {
- ret = psci_idle_init_cpu(&pdev->dev, cpu);
+ for_each_present_cpu(cpu) {
+ ret = psci_idle_init_cpu(&fdev->dev, cpu);
if (ret)
goto out_fail;
}
@@ -431,26 +451,36 @@ out_fail:
return ret;
}
-static struct platform_driver psci_cpuidle_driver = {
+static struct faux_device_ops psci_cpuidle_ops = {
.probe = psci_cpuidle_probe,
- .driver = {
- .name = "psci-cpuidle",
- },
};
+static bool __init dt_idle_state_present(void)
+{
+ struct device_node *cpu_node __free(device_node) =
+ of_cpu_device_node_get(cpumask_first(cpu_possible_mask));
+ if (!cpu_node)
+ return false;
+
+ struct device_node *state_node __free(device_node) =
+ of_get_cpu_state_node(cpu_node, 0);
+ if (!state_node)
+ return false;
+
+ return !!of_match_node(psci_idle_state_match, state_node);
+}
+
static int __init psci_idle_init(void)
{
- struct platform_device *pdev;
- int ret;
+ struct faux_device *fdev;
- ret = platform_driver_register(&psci_cpuidle_driver);
- if (ret)
- return ret;
+ if (!dt_idle_state_present())
+ return 0;
- pdev = platform_device_register_simple("psci-cpuidle", -1, NULL, 0);
- if (IS_ERR(pdev)) {
- platform_driver_unregister(&psci_cpuidle_driver);
- return PTR_ERR(pdev);
+ fdev = faux_device_create("psci-cpuidle", NULL, &psci_cpuidle_ops);
+ if (!fdev) {
+ pr_err("Failed to create psci-cpuidle device\n");
+ return -ENODEV;
}
return 0;
diff --git a/drivers/cpuidle/cpuidle-psci.h b/drivers/cpuidle/cpuidle-psci.h
index ef004ec7a7c5..d29cbd796cd5 100644
--- a/drivers/cpuidle/cpuidle-psci.h
+++ b/drivers/cpuidle/cpuidle-psci.h
@@ -4,8 +4,10 @@
#define __CPUIDLE_PSCI_H
struct device_node;
+struct generic_pm_domain;
-void psci_set_domain_state(u32 state);
+void psci_set_domain_state(struct generic_pm_domain *pd, unsigned int state_idx,
+ u32 state);
int psci_dt_parse_state_node(struct device_node *np, u32 *state);
#endif /* __CPUIDLE_PSCI_H */
diff --git a/drivers/cpuidle/cpuidle-pseries.c b/drivers/cpuidle/cpuidle-pseries.c
index 14db9b7d985d..f68c65f1d023 100644
--- a/drivers/cpuidle/cpuidle-pseries.c
+++ b/drivers/cpuidle/cpuidle-pseries.c
@@ -22,6 +22,7 @@
#include <asm/idle.h>
#include <asm/plpar_wrappers.h>
#include <asm/rtas.h>
+#include <asm/time.h>
static struct cpuidle_driver pseries_idle_driver = {
.name = "pseries_idle",
diff --git a/drivers/cpuidle/cpuidle-qcom-spm.c b/drivers/cpuidle/cpuidle-qcom-spm.c
index 1fc9968eae19..7ab6f68b96a8 100644
--- a/drivers/cpuidle/cpuidle-qcom-spm.c
+++ b/drivers/cpuidle/cpuidle-qcom-spm.c
@@ -48,7 +48,7 @@ static int qcom_cpu_spc(struct spm_driver_data *drv)
ret = cpu_suspend(0, qcom_pm_collapse);
/*
* ARM common code executes WFI without calling into our driver and
- * if the SPM mode is not reset, then we may accidently power down the
+ * if the SPM mode is not reset, then we may accidentally power down the
* cpu when we intended only to gate the cpu clock.
* Ensure the state is set to standby before returning.
*/
@@ -86,9 +86,9 @@ static const struct of_device_id qcom_idle_state_match[] = {
static int spm_cpuidle_register(struct device *cpuidle_dev, int cpu)
{
- struct platform_device *pdev = NULL;
+ struct platform_device *pdev;
struct device_node *cpu_node, *saw_node;
- struct cpuidle_qcom_spm_data *data = NULL;
+ struct cpuidle_qcom_spm_data *data;
int ret;
cpu_node = of_cpu_device_node_get(cpu);
@@ -96,20 +96,23 @@ static int spm_cpuidle_register(struct device *cpuidle_dev, int cpu)
return -ENODEV;
saw_node = of_parse_phandle(cpu_node, "qcom,saw", 0);
+ of_node_put(cpu_node);
if (!saw_node)
return -ENODEV;
pdev = of_find_device_by_node(saw_node);
of_node_put(saw_node);
- of_node_put(cpu_node);
if (!pdev)
return -ENODEV;
data = devm_kzalloc(cpuidle_dev, sizeof(*data), GFP_KERNEL);
- if (!data)
+ if (!data) {
+ put_device(&pdev->dev);
return -ENOMEM;
+ }
data->spm = dev_get_drvdata(&pdev->dev);
+ put_device(&pdev->dev);
if (!data->spm)
return -EINVAL;
@@ -135,7 +138,7 @@ static int spm_cpuidle_drv_probe(struct platform_device *pdev)
if (ret)
return dev_err_probe(&pdev->dev, ret, "set warm boot addr failed");
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
ret = spm_cpuidle_register(&pdev->dev, cpu);
if (ret && ret != -ENODEV) {
dev_err(&pdev->dev,
diff --git a/drivers/cpuidle/cpuidle-riscv-sbi.c b/drivers/cpuidle/cpuidle-riscv-sbi.c
index d228b4d18d56..a360bc4d20b7 100644
--- a/drivers/cpuidle/cpuidle-riscv-sbi.c
+++ b/drivers/cpuidle/cpuidle-riscv-sbi.c
@@ -26,6 +26,7 @@
#include <asm/smp.h>
#include <asm/suspend.h>
+#include "cpuidle.h"
#include "dt_idle_states.h"
#include "dt_idle_genpd.h"
@@ -43,7 +44,6 @@ static DEFINE_PER_CPU_READ_MOSTLY(struct sbi_cpuidle_data, sbi_cpuidle_data);
static DEFINE_PER_CPU(struct sbi_domain_state, domain_state);
static bool sbi_cpuidle_use_osi;
static bool sbi_cpuidle_use_cpuhp;
-static bool sbi_cpuidle_pd_allow_domain_state;
static inline void sbi_set_domain_state(u32 state)
{
@@ -329,6 +329,9 @@ static int sbi_cpuidle_init_cpu(struct device *dev, int cpu)
return ret;
}
+ if (cpuidle_disabled())
+ return 0;
+
ret = cpuidle_register(drv, NULL);
if (ret)
goto deinit;
@@ -341,15 +344,6 @@ deinit:
return ret;
}
-static void sbi_cpuidle_domain_sync_state(struct device *dev)
-{
- /*
- * All devices have now been attached/probed to the PM domain
- * topology, hence it's fine to allow domain states to be picked.
- */
- sbi_cpuidle_pd_allow_domain_state = true;
-}
-
#ifdef CONFIG_DT_IDLE_GENPD
static int sbi_cpuidle_pd_power_off(struct generic_pm_domain *pd)
@@ -360,9 +354,6 @@ static int sbi_cpuidle_pd_power_off(struct generic_pm_domain *pd)
if (!state->data)
return 0;
- if (!sbi_cpuidle_pd_allow_domain_state)
- return -EBUSY;
-
/* OSI mode is enabled, set the corresponding domain state. */
pd_state = state->data;
sbi_set_domain_state(*pd_state);
@@ -500,12 +491,12 @@ static int sbi_cpuidle_probe(struct platform_device *pdev)
int cpu, ret;
struct cpuidle_driver *drv;
struct cpuidle_device *dev;
- struct device_node *np, *pds_node;
+ struct device_node *pds_node;
/* Detect OSI support based on CPU DT nodes */
sbi_cpuidle_use_osi = true;
for_each_possible_cpu(cpu) {
- np = of_cpu_device_node_get(cpu);
+ struct device_node *np __free(device_node) = of_cpu_device_node_get(cpu);
if (np &&
of_property_present(np, "power-domains") &&
of_property_present(np, "power-domain-names")) {
@@ -525,8 +516,8 @@ static int sbi_cpuidle_probe(struct platform_device *pdev)
return ret;
}
- /* Initialize CPU idle driver for each CPU */
- for_each_possible_cpu(cpu) {
+ /* Initialize CPU idle driver for each present CPU */
+ for_each_present_cpu(cpu) {
ret = sbi_cpuidle_init_cpu(&pdev->dev, cpu);
if (ret) {
pr_debug("HART%ld: idle driver init failed\n",
@@ -538,7 +529,10 @@ static int sbi_cpuidle_probe(struct platform_device *pdev)
/* Setup CPU hotplut notifiers */
sbi_idle_init_cpuhp();
- pr_info("idle driver registered for all CPUs\n");
+ if (cpuidle_disabled())
+ pr_info("cpuidle is disabled\n");
+ else
+ pr_info("idle driver registered for all CPUs\n");
return 0;
@@ -557,7 +551,6 @@ static struct platform_driver sbi_cpuidle_driver = {
.probe = sbi_cpuidle_probe,
.driver = {
.name = "sbi-cpuidle",
- .sync_state = sbi_cpuidle_domain_sync_state,
},
};
@@ -582,4 +575,4 @@ static int __init sbi_cpuidle_init(void)
return 0;
}
-device_initcall(sbi_cpuidle_init);
+arch_initcall(sbi_cpuidle_init);
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 9e418aec1755..56132e843c99 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -69,11 +69,15 @@ int cpuidle_play_dead(void)
if (!drv)
return -ENODEV;
- /* Find lowest-power state that supports long-term idle */
- for (i = drv->state_count - 1; i >= 0; i--)
+ for (i = drv->state_count - 1; i >= 0; i--) {
if (drv->states[i].enter_dead)
- return drv->states[i].enter_dead(dev, i);
+ drv->states[i].enter_dead(dev, i);
+ }
+ /*
+ * If :enter_dead() is successful, it will never return, so reaching
+ * here means that all of them failed above or were not present.
+ */
return -ENODEV;
}
@@ -406,7 +410,7 @@ void cpuidle_reflect(struct cpuidle_device *dev, int index)
* Min polling interval of 10usec is a guess. It is assuming that
* for most users, the time for a single ping-pong workload like
* perf bench pipe would generally complete within 10usec but
- * this is hardware dependant. Actual time can be estimated with
+ * this is hardware dependent. Actual time can be estimated with
*
* perf bench sched pipe -l 10000
*
@@ -631,8 +635,14 @@ static void __cpuidle_device_init(struct cpuidle_device *dev)
static int __cpuidle_register_device(struct cpuidle_device *dev)
{
struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
+ unsigned int cpu = dev->cpu;
int i, ret;
+ if (per_cpu(cpuidle_devices, cpu)) {
+ pr_info("CPU%d: cpuidle device already registered\n", cpu);
+ return -EEXIST;
+ }
+
if (!try_module_get(drv->owner))
return -EINVAL;
@@ -644,7 +654,7 @@ static int __cpuidle_register_device(struct cpuidle_device *dev)
dev->states_usage[i].disable |= CPUIDLE_STATE_DISABLED_BY_USER;
}
- per_cpu(cpuidle_devices, dev->cpu) = dev;
+ per_cpu(cpuidle_devices, cpu) = dev;
list_add(&dev->device_list, &cpuidle_detected_devices);
ret = cpuidle_coupled_register_device(dev);
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index cf5873cc45dc..9bbfa594c442 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -261,7 +261,7 @@ static void __cpuidle_unregister_driver(struct cpuidle_driver *drv)
* @drv: a pointer to a valid struct cpuidle_driver
*
* Register the driver under a lock to prevent concurrent attempts to
- * [un]register the driver from occuring at the same time.
+ * [un]register the driver from occurring at the same time.
*
* Returns 0 on success, a negative error code (returned by
* __cpuidle_register_driver()) otherwise.
@@ -296,7 +296,7 @@ EXPORT_SYMBOL_GPL(cpuidle_register_driver);
* @drv: a pointer to a valid struct cpuidle_driver
*
* Unregisters the cpuidle driver under a lock to prevent concurrent attempts
- * to [un]register the driver from occuring at the same time. @drv has to
+ * to [un]register the driver from occurring at the same time. @drv has to
* match the currently registered driver.
*/
void cpuidle_unregister_driver(struct cpuidle_driver *drv)
diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c
index 97feb7d8fb23..558d49838990 100644
--- a/drivers/cpuidle/dt_idle_states.c
+++ b/drivers/cpuidle/dt_idle_states.c
@@ -98,7 +98,6 @@ static bool idle_state_valid(struct device_node *state_node, unsigned int idx,
{
int cpu;
struct device_node *cpu_node, *curr_state_node;
- bool valid = true;
/*
* Compare idle state phandles for index idx on all CPUs in the
@@ -107,20 +106,17 @@ static bool idle_state_valid(struct device_node *state_node, unsigned int idx,
* retrieved from. If a mismatch is found bail out straight
* away since we certainly hit a firmware misconfiguration.
*/
- for (cpu = cpumask_next(cpumask_first(cpumask), cpumask);
- cpu < nr_cpu_ids; cpu = cpumask_next(cpu, cpumask)) {
+ cpu = cpumask_first(cpumask) + 1;
+ for_each_cpu_from(cpu, cpumask) {
cpu_node = of_cpu_device_node_get(cpu);
curr_state_node = of_get_cpu_state_node(cpu_node, idx);
- if (state_node != curr_state_node)
- valid = false;
-
of_node_put(curr_state_node);
of_node_put(cpu_node);
- if (!valid)
- break;
+ if (state_node != curr_state_node)
+ return false;
}
- return valid;
+ return true;
}
/**
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index f3c9d49f0f2a..4d9aa5ce31f0 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -19,7 +19,7 @@
#include "gov.h"
-#define BUCKETS 12
+#define BUCKETS 6
#define INTERVAL_SHIFT 3
#define INTERVALS (1UL << INTERVAL_SHIFT)
#define RESOLUTION 1024
@@ -29,12 +29,11 @@
/*
* Concepts and ideas behind the menu governor
*
- * For the menu governor, there are 3 decision factors for picking a C
+ * For the menu governor, there are 2 decision factors for picking a C
* state:
* 1) Energy break even point
- * 2) Performance impact
- * 3) Latency tolerance (from pmqos infrastructure)
- * These three factors are treated independently.
+ * 2) Latency tolerance (from pmqos infrastructure)
+ * These two factors are treated independently.
*
* Energy break even point
* -----------------------
@@ -42,7 +41,7 @@
* the C state is required to actually break even on this cost. CPUIDLE
* provides us this duration in the "target_residency" field. So all that we
* need is a good prediction of how long we'll be idle. Like the traditional
- * menu governor, we start with the actual known "next timer event" time.
+ * menu governor, we take the actual known "next timer event" time.
*
* Since there are other source of wakeups (interrupts for example) than
* the next timer event, this estimation is rather optimistic. To get a
@@ -51,54 +50,21 @@
* duration always was 50% of the next timer tick, the correction factor will
* be 0.5.
*
- * menu uses a running average for this correction factor, however it uses a
- * set of factors, not just a single factor. This stems from the realization
- * that the ratio is dependent on the order of magnitude of the expected
- * duration; if we expect 500 milliseconds of idle time the likelihood of
- * getting an interrupt very early is much higher than if we expect 50 micro
- * seconds of idle time. A second independent factor that has big impact on
- * the actual factor is if there is (disk) IO outstanding or not.
- * (as a special twist, we consider every sleep longer than 50 milliseconds
- * as perfect; there are no power gains for sleeping longer than this)
- *
- * For these two reasons we keep an array of 12 independent factors, that gets
- * indexed based on the magnitude of the expected duration as well as the
- * "is IO outstanding" property.
+ * menu uses a running average for this correction factor, but it uses a set of
+ * factors, not just a single factor. This stems from the realization that the
+ * ratio is dependent on the order of magnitude of the expected duration; if we
+ * expect 500 milliseconds of idle time the likelihood of getting an interrupt
+ * very early is much higher than if we expect 50 micro seconds of idle time.
+ * For this reason, menu keeps an array of 6 independent factors, that gets
+ * indexed based on the magnitude of the expected duration.
*
* Repeatable-interval-detector
* ----------------------------
* There are some cases where "next timer" is a completely unusable predictor:
* Those cases where the interval is fixed, for example due to hardware
- * interrupt mitigation, but also due to fixed transfer rate devices such as
- * mice.
+ * interrupt mitigation, but also due to fixed transfer rate devices like mice.
* For this, we use a different predictor: We track the duration of the last 8
- * intervals and if the stand deviation of these 8 intervals is below a
- * threshold value, we use the average of these intervals as prediction.
- *
- * Limiting Performance Impact
- * ---------------------------
- * C states, especially those with large exit latencies, can have a real
- * noticeable impact on workloads, which is not acceptable for most sysadmins,
- * and in addition, less performance has a power price of its own.
- *
- * As a general rule of thumb, menu assumes that the following heuristic
- * holds:
- * The busier the system, the less impact of C states is acceptable
- *
- * This rule-of-thumb is implemented using a performance-multiplier:
- * If the exit latency times the performance multiplier is longer than
- * the predicted duration, the C state is not considered a candidate
- * for selection due to a too high performance impact. So the higher
- * this multiplier is, the longer we need to be idle to pick a deep C
- * state, and thus the less likely a busy CPU will hit such a deep
- * C state.
- *
- * Currently there is only one value determining the factor:
- * 10 points are added for each process that is waiting for IO on this CPU.
- * (This value was experimentally determined.)
- * Utilization is no longer a factor as it was shown that it never contributed
- * significantly to the performance multiplier in the first place.
- *
+ * intervals and use them to estimate the duration of the next one.
*/
struct menu_device {
@@ -112,19 +78,10 @@ struct menu_device {
int interval_ptr;
};
-static inline int which_bucket(u64 duration_ns, unsigned int nr_iowaiters)
+static inline int which_bucket(u64 duration_ns)
{
int bucket = 0;
- /*
- * We keep two groups of stats; one with no
- * IO pending, one without.
- * This allows us to calculate
- * E(duration)|iowait
- */
- if (nr_iowaiters)
- bucket = BUCKETS/2;
-
if (duration_ns < 10ULL * NSEC_PER_USEC)
return bucket;
if (duration_ns < 100ULL * NSEC_PER_USEC)
@@ -138,21 +95,16 @@ static inline int which_bucket(u64 duration_ns, unsigned int nr_iowaiters)
return bucket + 5;
}
-/*
- * Return a multiplier for the exit latency that is intended
- * to take performance requirements into account.
- * The more performance critical we estimate the system
- * to be, the higher this multiplier, and thus the higher
- * the barrier to go to an expensive C state.
- */
-static inline int performance_multiplier(unsigned int nr_iowaiters)
+static DEFINE_PER_CPU(struct menu_device, menu_devices);
+
+static void menu_update_intervals(struct menu_device *data, unsigned int interval_us)
{
- /* for IO wait tasks (per cpu!) we add 10x each */
- return 1 + 10 * nr_iowaiters;
+ /* Update the repeating-pattern data. */
+ data->intervals[data->interval_ptr++] = interval_us;
+ if (data->interval_ptr >= INTERVALS)
+ data->interval_ptr = 0;
}
-static DEFINE_PER_CPU(struct menu_device, menu_devices);
-
static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev);
/*
@@ -163,53 +115,52 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev);
*/
static unsigned int get_typical_interval(struct menu_device *data)
{
- int i, divisor;
- unsigned int min, max, thresh, avg;
- uint64_t sum, variance;
-
- thresh = INT_MAX; /* Discard outliers above this value */
+ s64 value, min_thresh = -1, max_thresh = UINT_MAX;
+ unsigned int max, min, divisor;
+ u64 avg, variance, avg_sq;
+ int i;
again:
-
- /* First calculate the average of past intervals */
- min = UINT_MAX;
+ /* Compute the average and variance of past intervals. */
max = 0;
- sum = 0;
+ min = UINT_MAX;
+ avg = 0;
+ variance = 0;
divisor = 0;
for (i = 0; i < INTERVALS; i++) {
- unsigned int value = data->intervals[i];
- if (value <= thresh) {
- sum += value;
- divisor++;
- if (value > max)
- max = value;
-
- if (value < min)
- min = value;
- }
+ value = data->intervals[i];
+ /*
+ * Discard the samples outside the interval between the min and
+ * max thresholds.
+ */
+ if (value <= min_thresh || value >= max_thresh)
+ continue;
+
+ divisor++;
+
+ avg += value;
+ variance += value * value;
+
+ if (value > max)
+ max = value;
+
+ if (value < min)
+ min = value;
}
if (!max)
return UINT_MAX;
- if (divisor == INTERVALS)
- avg = sum >> INTERVAL_SHIFT;
- else
- avg = div_u64(sum, divisor);
-
- /* Then try to determine variance */
- variance = 0;
- for (i = 0; i < INTERVALS; i++) {
- unsigned int value = data->intervals[i];
- if (value <= thresh) {
- int64_t diff = (int64_t)value - avg;
- variance += diff * diff;
- }
- }
- if (divisor == INTERVALS)
+ if (divisor == INTERVALS) {
+ avg >>= INTERVAL_SHIFT;
variance >>= INTERVAL_SHIFT;
- else
+ } else {
+ do_div(avg, divisor);
do_div(variance, divisor);
+ }
+
+ avg_sq = avg * avg;
+ variance -= avg_sq;
/*
* The typical interval is obtained when standard deviation is
@@ -224,25 +175,40 @@ again:
* Use this result only if there is no timer to wake us up sooner.
*/
if (likely(variance <= U64_MAX/36)) {
- if ((((u64)avg*avg > variance*36) && (divisor * 4 >= INTERVALS * 3))
- || variance <= 400) {
+ if ((avg_sq > variance * 36 && divisor * 4 >= INTERVALS * 3) ||
+ variance <= 400)
return avg;
- }
}
/*
- * If we have outliers to the upside in our distribution, discard
- * those by setting the threshold to exclude these outliers, then
+ * If there are outliers, discard them by setting thresholds to exclude
+ * data points at a large enough distance from the average, then
* calculate the average and standard deviation again. Once we get
- * down to the bottom 3/4 of our samples, stop excluding samples.
+ * down to the last 3/4 of our samples, stop excluding samples.
*
* This can deal with workloads that have long pauses interspersed
* with sporadic activity with a bunch of short pauses.
*/
- if ((divisor * 4) <= INTERVALS * 3)
+ if (divisor * 4 <= INTERVALS * 3) {
+ /*
+ * If there are sufficiently many data points still under
+ * consideration after the outliers have been eliminated,
+ * returning without a prediction would be a mistake because it
+ * is likely that the next interval will not exceed the current
+ * maximum, so return the latter in that case.
+ */
+ if (divisor >= INTERVALS / 2)
+ return max;
+
return UINT_MAX;
+ }
+
+ /* Update the thresholds for the next round. */
+ if (avg - min > max - avg)
+ min_thresh = min;
+ else
+ max_thresh = max;
- thresh = max - 1;
goto again;
}
@@ -258,18 +224,22 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
struct menu_device *data = this_cpu_ptr(&menu_devices);
s64 latency_req = cpuidle_governor_latency_req(dev->cpu);
u64 predicted_ns;
- u64 interactivity_req;
- unsigned int nr_iowaiters;
ktime_t delta, delta_tick;
int i, idx;
if (data->needs_update) {
menu_update(drv, dev);
data->needs_update = 0;
+ } else if (!dev->last_residency_ns) {
+ /*
+ * This happens when the driver rejects the previously selected
+ * idle state and returns an error, so update the recent
+ * intervals table to prevent invalid information from being
+ * used going forward.
+ */
+ menu_update_intervals(data, UINT_MAX);
}
- nr_iowaiters = nr_iowait_cpu(dev->cpu);
-
/* Find the shortest expected idle interval. */
predicted_ns = get_typical_interval(data) * NSEC_PER_USEC;
if (predicted_ns > RESIDENCY_THRESHOLD_NS) {
@@ -283,7 +253,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
}
data->next_timer_ns = delta;
- data->bucket = which_bucket(data->next_timer_ns, nr_iowaiters);
+ data->bucket = which_bucket(data->next_timer_ns);
/* Round up the result for half microseconds. */
timer_us = div_u64((RESOLUTION * DECAY * NSEC_PER_USEC) / 2 +
@@ -301,7 +271,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
*/
data->next_timer_ns = KTIME_MAX;
delta_tick = TICK_NSEC / 2;
- data->bucket = which_bucket(KTIME_MAX, nr_iowaiters);
+ data->bucket = BUCKETS - 1;
}
if (unlikely(drv->state_count <= 1 || latency_req == 0) ||
@@ -317,27 +287,15 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
return 0;
}
- if (tick_nohz_tick_stopped()) {
- /*
- * If the tick is already stopped, the cost of possible short
- * idle duration misprediction is much higher, because the CPU
- * may be stuck in a shallow idle state for a long time as a
- * result of it. In that case say we might mispredict and use
- * the known time till the closest timer event for the idle
- * state selection.
- */
- if (predicted_ns < TICK_NSEC)
- predicted_ns = data->next_timer_ns;
- } else {
- /*
- * Use the performance multiplier and the user-configurable
- * latency_req to determine the maximum exit latency.
- */
- interactivity_req = div64_u64(predicted_ns,
- performance_multiplier(nr_iowaiters));
- if (latency_req > interactivity_req)
- latency_req = interactivity_req;
- }
+ /*
+ * If the tick is already stopped, the cost of possible short idle
+ * duration misprediction is much higher, because the CPU may be stuck
+ * in a shallow idle state for a long time as a result of it. In that
+ * case, say we might mispredict and use the known time till the closest
+ * timer event for the idle state selection.
+ */
+ if (tick_nohz_tick_stopped() && predicted_ns < TICK_NSEC)
+ predicted_ns = data->next_timer_ns;
/*
* Find the idle state with the lowest power while satisfying
@@ -353,48 +311,50 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
if (idx == -1)
idx = i; /* first enabled state */
- if (s->target_residency_ns > predicted_ns) {
- /*
- * Use a physical idle state, not busy polling, unless
- * a timer is going to trigger soon enough.
- */
- if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) &&
- s->exit_latency_ns <= latency_req &&
- s->target_residency_ns <= data->next_timer_ns) {
- predicted_ns = s->target_residency_ns;
- idx = i;
- break;
- }
- if (predicted_ns < TICK_NSEC)
- break;
-
- if (!tick_nohz_tick_stopped()) {
- /*
- * If the state selected so far is shallow,
- * waking up early won't hurt, so retain the
- * tick in that case and let the governor run
- * again in the next iteration of the loop.
- */
- predicted_ns = drv->states[idx].target_residency_ns;
- break;
- }
+ if (s->exit_latency_ns > latency_req)
+ break;
- /*
- * If the state selected so far is shallow and this
- * state's target residency matches the time till the
- * closest timer event, select this one to avoid getting
- * stuck in the shallow one for too long.
- */
- if (drv->states[idx].target_residency_ns < TICK_NSEC &&
- s->target_residency_ns <= delta_tick)
- idx = i;
+ if (s->target_residency_ns <= predicted_ns) {
+ idx = i;
+ continue;
+ }
- return idx;
+ /*
+ * Use a physical idle state, not busy polling, unless a timer
+ * is going to trigger soon enough.
+ */
+ if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) &&
+ s->target_residency_ns <= data->next_timer_ns) {
+ predicted_ns = s->target_residency_ns;
+ idx = i;
+ break;
}
- if (s->exit_latency_ns > latency_req)
+
+ if (predicted_ns < TICK_NSEC)
+ break;
+
+ if (!tick_nohz_tick_stopped()) {
+ /*
+ * If the state selected so far is shallow, waking up
+ * early won't hurt, so retain the tick in that case and
+ * let the governor run again in the next iteration of
+ * the idle loop.
+ */
+ predicted_ns = drv->states[idx].target_residency_ns;
break;
+ }
- idx = i;
+ /*
+ * If the state selected so far is shallow and this state's
+ * target residency matches the time till the closest timer
+ * event, select this one to avoid getting stuck in the shallow
+ * one for too long.
+ */
+ if (drv->states[idx].target_residency_ns < TICK_NSEC &&
+ s->target_residency_ns <= delta_tick)
+ idx = i;
+
+ return idx;
}
if (idx == -1)
@@ -535,10 +495,7 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
data->correction_factor[data->bucket] = new_factor;
- /* update the repeating-pattern data */
- data->intervals[data->interval_ptr++] = ktime_to_us(measured_ns);
- if (data->interval_ptr >= INTERVALS)
- data->interval_ptr = 0;
+ menu_update_intervals(data, ktime_to_us(measured_ns));
}
/**
diff --git a/drivers/cpuidle/governors/teo.c b/drivers/cpuidle/governors/teo.c
index f2992f92d8db..bfa55c1eab5b 100644
--- a/drivers/cpuidle/governors/teo.c
+++ b/drivers/cpuidle/governors/teo.c
@@ -10,25 +10,27 @@
* DOC: teo-description
*
* The idea of this governor is based on the observation that on many systems
- * timer events are two or more orders of magnitude more frequent than any
- * other interrupts, so they are likely to be the most significant cause of CPU
- * wakeups from idle states. Moreover, information about what happened in the
- * (relatively recent) past can be used to estimate whether or not the deepest
- * idle state with target residency within the (known) time till the closest
- * timer event, referred to as the sleep length, is likely to be suitable for
- * the upcoming CPU idle period and, if not, then which of the shallower idle
- * states to choose instead of it.
+ * timer interrupts are two or more orders of magnitude more frequent than any
+ * other interrupt types, so they are likely to dominate CPU wakeup patterns.
+ * Moreover, in principle, the time when the next timer event is going to occur
+ * can be determined at the idle state selection time, although doing that may
+ * be costly, so it can be regarded as the most reliable source of information
+ * for idle state selection.
*
- * Of course, non-timer wakeup sources are more important in some use cases
- * which can be covered by taking a few most recent idle time intervals of the
- * CPU into account. However, even in that context it is not necessary to
- * consider idle duration values greater than the sleep length, because the
- * closest timer will ultimately wake up the CPU anyway unless it is woken up
- * earlier.
+ * Of course, non-timer wakeup sources are more important in some use cases,
+ * but even then it is generally unnecessary to consider idle duration values
+ * greater than the time till the next timer event, referred as the sleep
+ * length in what follows, because the closest timer will ultimately wake up the
+ * CPU anyway unless it is woken up earlier.
*
- * Thus this governor estimates whether or not the prospective idle duration of
- * a CPU is likely to be significantly shorter than the sleep length and selects
- * an idle state for it accordingly.
+ * However, since obtaining the sleep length may be costly, the governor first
+ * checks if it can select a shallow idle state using wakeup pattern information
+ * from recent times, in which case it can do without knowing the sleep length
+ * at all. For this purpose, it counts CPU wakeup events and looks for an idle
+ * state whose target residency has not exceeded the idle duration (measured
+ * after wakeup) in the majority of relevant recent cases. If the target
+ * residency of that state is small enough, it may be used right away and the
+ * sleep length need not be determined.
*
* The computations carried out by this governor are based on using bins whose
* boundaries are aligned with the target residency parameter values of the CPU
@@ -49,47 +51,50 @@
* sleep length and the idle duration measured after CPU wakeup fall into the
* same bin (that is, the CPU appears to wake up "on time" relative to the sleep
* length). In turn, the "intercepts" metric reflects the relative frequency of
- * situations in which the measured idle duration is so much shorter than the
- * sleep length that the bin it falls into corresponds to an idle state
- * shallower than the one whose bin is fallen into by the sleep length (these
- * situations are referred to as "intercepts" below).
+ * non-timer wakeup events for which the measured idle duration falls into a bin
+ * that corresponds to an idle state shallower than the one whose bin is fallen
+ * into by the sleep length (these events are also referred to as "intercepts"
+ * below).
+ *
+ * The governor also counts "intercepts" with the measured idle duration below
+ * the tick period length and uses this information when deciding whether or not
+ * to stop the scheduler tick.
*
* In order to select an idle state for a CPU, the governor takes the following
* steps (modulo the possible latency constraint that must be taken into account
* too):
*
- * 1. Find the deepest CPU idle state whose target residency does not exceed
- * the current sleep length (the candidate idle state) and compute 2 sums as
- * follows:
+ * 1. Find the deepest enabled CPU idle state (the candidate idle state) and
+ * compute 2 sums as follows:
*
- * - The sum of the "hits" and "intercepts" metrics for the candidate state
- * and all of the deeper idle states (it represents the cases in which the
- * CPU was idle long enough to avoid being intercepted if the sleep length
- * had been equal to the current one).
+ * - The sum of the "hits" metric for all of the idle states shallower than
+ * the candidate one (it represents the cases in which the CPU was likely
+ * woken up by a timer).
*
- * - The sum of the "intercepts" metrics for all of the idle states shallower
- * than the candidate one (it represents the cases in which the CPU was not
- * idle long enough to avoid being intercepted if the sleep length had been
- * equal to the current one).
+ * - The sum of the "intercepts" metric for all of the idle states shallower
+ * than the candidate one (it represents the cases in which the CPU was
+ * likely woken up by a non-timer wakeup source).
*
- * 2. If the second sum is greater than the first one the CPU is likely to wake
- * up early, so look for an alternative idle state to select.
+ * 2. If the second sum computed in step 1 is greater than a half of the sum of
+ * both metrics for the candidate state bin and all subsequent bins(if any),
+ * a shallower idle state is likely to be more suitable, so look for it.
*
- * - Traverse the idle states shallower than the candidate one in the
+ * - Traverse the enabled idle states shallower than the candidate one in the
* descending order.
*
* - For each of them compute the sum of the "intercepts" metrics over all
* of the idle states between it and the candidate one (including the
* former and excluding the latter).
*
- * - If each of these sums that needs to be taken into account (because the
- * check related to it has indicated that the CPU is likely to wake up
- * early) is greater than a half of the corresponding sum computed in step
- * 1 (which means that the target residency of the state in question had
- * not exceeded the idle duration in over a half of the relevant cases),
- * select the given idle state instead of the candidate one.
+ * - If this sum is greater than a half of the second sum computed in step 1,
+ * use the given idle state as the new candidate one.
+ *
+ * 3. If the current candidate state is state 0 or its target residency is short
+ * enough, return it and prevent the scheduler tick from being stopped.
*
- * 3. By default, select the candidate state.
+ * 4. Obtain the sleep length value and check if it is below the target
+ * residency of the current candidate state, in which case a new shallower
+ * candidate state needs to be found, so look for it.
*/
#include <linux/cpuidle.h>
@@ -101,6 +106,12 @@
#include "gov.h"
/*
+ * Idle state exit latency threshold used for deciding whether or not to check
+ * the time till the closest expected timer event.
+ */
+#define LATENCY_THRESHOLD_NS (RESIDENCY_THRESHOLD_NS / 2)
+
+/*
* The PULSE value is added to metrics when they grow and the DECAY_SHIFT value
* is used for decreasing metrics on a regular basis.
*/
@@ -119,18 +130,20 @@ struct teo_bin {
/**
* struct teo_cpu - CPU data used by the TEO cpuidle governor.
- * @time_span_ns: Time between idle state selection and post-wakeup update.
* @sleep_length_ns: Time till the closest timer event (at the selection time).
* @state_bins: Idle state data bins for this CPU.
* @total: Grand total of the "intercepts" and "hits" metrics for all bins.
- * @tick_hits: Number of "hits" after TICK_NSEC.
+ * @tick_intercepts: "Intercepts" before TICK_NSEC.
+ * @short_idles: Wakeups after short idle periods.
+ * @artificial_wakeup: Set if the wakeup has been triggered by a safety net.
*/
struct teo_cpu {
- s64 time_span_ns;
s64 sleep_length_ns;
struct teo_bin state_bins[CPUIDLE_STATE_MAX];
unsigned int total;
- unsigned int tick_hits;
+ unsigned int tick_intercepts;
+ unsigned int short_idles;
+ bool artificial_wakeup;
};
static DEFINE_PER_CPU(struct teo_cpu, teo_cpus);
@@ -147,23 +160,17 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
s64 target_residency_ns;
u64 measured_ns;
- if (cpu_data->time_span_ns >= cpu_data->sleep_length_ns) {
+ cpu_data->short_idles -= cpu_data->short_idles >> DECAY_SHIFT;
+
+ if (cpu_data->artificial_wakeup) {
/*
- * One of the safety nets has triggered or the wakeup was close
- * enough to the closest timer event expected at the idle state
- * selection time to be discarded.
+ * If one of the safety nets has triggered, assume that this
+ * might have been a long sleep.
*/
measured_ns = U64_MAX;
} else {
u64 lat_ns = drv->states[dev->last_state_idx].exit_latency_ns;
- /*
- * The computations below are to determine whether or not the
- * (saved) time till the next timer event and the measured idle
- * duration fall into the same "bin", so use last_residency_ns
- * for that instead of time_span_ns which includes the cpuidle
- * overhead.
- */
measured_ns = dev->last_residency_ns;
/*
* The delay between the wakeup and the first instruction
@@ -171,14 +178,16 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* time, so take 1/2 of the exit latency as a very rough
* approximation of the average of it.
*/
- if (measured_ns >= lat_ns)
+ if (measured_ns >= lat_ns) {
measured_ns -= lat_ns / 2;
- else
+ if (measured_ns < RESIDENCY_THRESHOLD_NS)
+ cpu_data->short_idles += PULSE;
+ } else {
measured_ns /= 2;
+ cpu_data->short_idles += PULSE;
+ }
}
- cpu_data->total = 0;
-
/*
* Decay the "hits" and "intercepts" metrics for all of the bins and
* find the bins that the sleep length and the measured idle duration
@@ -190,8 +199,6 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
bin->hits -= bin->hits >> DECAY_SHIFT;
bin->intercepts -= bin->intercepts >> DECAY_SHIFT;
- cpu_data->total += bin->hits + bin->intercepts;
-
target_residency_ns = drv->states[i].target_residency_ns;
if (target_residency_ns <= cpu_data->sleep_length_ns) {
@@ -201,38 +208,22 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
}
}
- /*
- * If the deepest state's target residency is below the tick length,
- * make a record of it to help teo_select() decide whether or not
- * to stop the tick. This effectively adds an extra hits-only bin
- * beyond the last state-related one.
- */
- if (target_residency_ns < TICK_NSEC) {
- cpu_data->tick_hits -= cpu_data->tick_hits >> DECAY_SHIFT;
-
- cpu_data->total += cpu_data->tick_hits;
-
- if (TICK_NSEC <= cpu_data->sleep_length_ns) {
- idx_timer = drv->state_count;
- if (TICK_NSEC <= measured_ns) {
- cpu_data->tick_hits += PULSE;
- goto end;
- }
- }
- }
-
+ cpu_data->tick_intercepts -= cpu_data->tick_intercepts >> DECAY_SHIFT;
/*
* If the measured idle duration falls into the same bin as the sleep
* length, this is a "hit", so update the "hits" metric for that bin.
* Otherwise, update the "intercepts" metric for the bin fallen into by
* the measured idle duration.
*/
- if (idx_timer == idx_duration)
+ if (idx_timer == idx_duration) {
cpu_data->state_bins[idx_timer].hits += PULSE;
- else
+ } else {
cpu_data->state_bins[idx_duration].intercepts += PULSE;
+ if (TICK_NSEC <= measured_ns)
+ cpu_data->tick_intercepts += PULSE;
+ }
-end:
+ cpu_data->total -= cpu_data->total >> DECAY_SHIFT;
cpu_data->total += PULSE;
}
@@ -280,14 +271,12 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu);
s64 latency_req = cpuidle_governor_latency_req(dev->cpu);
ktime_t delta_tick = TICK_NSEC / 2;
- unsigned int tick_intercept_sum = 0;
unsigned int idx_intercept_sum = 0;
unsigned int intercept_sum = 0;
unsigned int idx_hit_sum = 0;
unsigned int hit_sum = 0;
int constraint_idx = 0;
int idx0 = 0, idx = -1;
- int prev_intercept_idx;
s64 duration_ns;
int i;
@@ -296,10 +285,14 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
dev->last_state_idx = -1;
}
- cpu_data->time_span_ns = local_clock();
/*
- * Set the expected sleep length to infinity in case of an early
- * return.
+ * Set the sleep length to infinity in case the invocation of
+ * tick_nohz_get_sleep_length() below is skipped, in which case it won't
+ * be known whether or not the subsequent wakeup is caused by a timer.
+ * It is generally fine to count the wakeup as an intercept then, except
+ * for the cases when the CPU is mostly woken up by timers and there may
+ * be opportunities to ask for a deeper idle state when no imminent
+ * timers are scheduled which may be missed.
*/
cpu_data->sleep_length_ns = KTIME_MAX;
@@ -318,7 +311,7 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
struct cpuidle_state *s = &drv->states[i];
/*
- * Update the sums of idle state mertics for all of the states
+ * Update the sums of idle state metrics for all of the states
* shallower than the current one.
*/
intercept_sum += prev_bin->intercepts;
@@ -355,17 +348,13 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
goto end;
}
- tick_intercept_sum = intercept_sum +
- cpu_data->state_bins[drv->state_count-1].intercepts;
-
/*
* If the sum of the intercepts metric for all of the idle states
* shallower than the current candidate one (idx) is greater than the
* sum of the intercepts and hits metrics for the candidate state and
- * all of the deeper states a shallower idle state is likely to be a
+ * all of the deeper states, a shallower idle state is likely to be a
* better choice.
*/
- prev_intercept_idx = idx;
if (2 * idx_intercept_sum > cpu_data->total - idx_hit_sum) {
int first_suitable_idx = idx;
@@ -391,41 +380,38 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* first enabled state that is deep enough.
*/
if (teo_state_ok(i, drv) &&
- !dev->states_usage[i].disable)
+ !dev->states_usage[i].disable) {
idx = i;
- else
- idx = first_suitable_idx;
-
+ break;
+ }
+ idx = first_suitable_idx;
break;
}
if (dev->states_usage[i].disable)
continue;
- if (!teo_state_ok(i, drv)) {
+ if (teo_state_ok(i, drv)) {
/*
- * The current state is too shallow, but if an
- * alternative candidate state has been found,
- * it may still turn out to be a better choice.
+ * The current state is deep enough, but still
+ * there may be a better one.
*/
- if (first_suitable_idx != idx)
- continue;
-
- break;
+ first_suitable_idx = i;
+ continue;
}
- first_suitable_idx = i;
+ /*
+ * The current state is too shallow, so if no suitable
+ * states other than the initial candidate have been
+ * found, give up (the remaining states to check are
+ * shallower still), but otherwise the first suitable
+ * state other than the initial candidate may turn out
+ * to be preferable.
+ */
+ if (first_suitable_idx == idx)
+ break;
}
}
- if (!idx && prev_intercept_idx) {
- /*
- * We have to query the sleep length here otherwise we don't
- * know after wakeup if our guess was correct.
- */
- duration_ns = tick_nohz_get_sleep_length(&delta_tick);
- cpu_data->sleep_length_ns = duration_ns;
- goto out_tick;
- }
/*
* If there is a latency constraint, it may be necessary to select an
@@ -435,24 +421,39 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
idx = constraint_idx;
/*
- * Skip the timers check if state 0 is the current candidate one,
- * because an immediate non-timer wakeup is expected in that case.
- */
- if (!idx)
- goto out_tick;
-
- /*
- * If state 0 is a polling one, check if the target residency of
- * the current candidate state is low enough and skip the timers
- * check in that case too.
+ * If either the candidate state is state 0 or its target residency is
+ * low enough, there is basically nothing more to do, but if the sleep
+ * length is not updated, the subsequent wakeup will be counted as an
+ * "intercept" which may be problematic in the cases when timer wakeups
+ * are dominant. Namely, it may effectively prevent deeper idle states
+ * from being selected at one point even if no imminent timers are
+ * scheduled.
+ *
+ * However, frequent timers in the RESIDENCY_THRESHOLD_NS range on one
+ * CPU are unlikely (user space has a default 50 us slack value for
+ * hrtimers and there are relatively few timers with a lower deadline
+ * value in the kernel), and even if they did happen, the potential
+ * benefit from using a deep idle state in that case would be
+ * questionable anyway for latency reasons. Thus if the measured idle
+ * duration falls into that range in the majority of cases, assume
+ * non-timer wakeups to be dominant and skip updating the sleep length
+ * to reduce latency.
+ *
+ * Also, if the latency constraint is sufficiently low, it will force
+ * shallow idle states regardless of the wakeup type, so the sleep
+ * length need not be known in that case.
*/
- if ((drv->states[0].flags & CPUIDLE_FLAG_POLLING) &&
- drv->states[idx].target_residency_ns < RESIDENCY_THRESHOLD_NS)
+ if ((!idx || drv->states[idx].target_residency_ns < RESIDENCY_THRESHOLD_NS) &&
+ (2 * cpu_data->short_idles >= cpu_data->total ||
+ latency_req < LATENCY_THRESHOLD_NS))
goto out_tick;
duration_ns = tick_nohz_get_sleep_length(&delta_tick);
cpu_data->sleep_length_ns = duration_ns;
+ if (!idx)
+ goto out_tick;
+
/*
* If the closest expected timer is before the target residency of the
* candidate state, a shallower one needs to be found.
@@ -469,7 +470,7 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* total wakeup events, do not stop the tick.
*/
if (drv->states[idx].target_residency_ns < TICK_NSEC &&
- tick_intercept_sum > cpu_data->total / 2 + cpu_data->total / 8)
+ cpu_data->tick_intercepts > cpu_data->total / 2 + cpu_data->total / 8)
duration_ns = TICK_NSEC / 2;
end:
@@ -506,17 +507,16 @@ static void teo_reflect(struct cpuidle_device *dev, int state)
struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu);
dev->last_state_idx = state;
- /*
- * If the wakeup was not "natural", but triggered by one of the safety
- * nets, assume that the CPU might have been idle for the entire sleep
- * length time.
- */
if (dev->poll_time_limit ||
(tick_nohz_idle_got_tick() && cpu_data->sleep_length_ns > TICK_NSEC)) {
+ /*
+ * The wakeup was not "genuine", but triggered by one of the
+ * safety nets.
+ */
dev->poll_time_limit = false;
- cpu_data->time_span_ns = cpu_data->sleep_length_ns;
+ cpu_data->artificial_wakeup = true;
} else {
- cpu_data->time_span_ns = local_clock() - cpu_data->time_span_ns;
+ cpu_data->artificial_wakeup = false;
}
}
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index d6f5da61cb7d..61de64817604 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -27,14 +27,14 @@ static ssize_t show_available_governors(struct device *dev,
mutex_lock(&cpuidle_lock);
list_for_each_entry(tmp, &cpuidle_governors, governor_list) {
- if (i >= (ssize_t) (PAGE_SIZE - (CPUIDLE_NAME_LEN + 2)))
+ if (i >= (ssize_t)(PAGE_SIZE - (CPUIDLE_NAME_LEN + 2)))
goto out;
- i += scnprintf(&buf[i], CPUIDLE_NAME_LEN + 1, "%s ", tmp->name);
+ i += sysfs_emit_at(buf, i, "%.*s ", CPUIDLE_NAME_LEN, tmp->name);
}
out:
- i+= sprintf(&buf[i], "\n");
+ i += sysfs_emit_at(buf, i, "\n");
mutex_unlock(&cpuidle_lock);
return i;
}
@@ -49,9 +49,9 @@ static ssize_t show_current_driver(struct device *dev,
spin_lock(&cpuidle_driver_lock);
drv = cpuidle_get_driver();
if (drv)
- ret = sprintf(buf, "%s\n", drv->name);
+ ret = sysfs_emit(buf, "%s\n", drv->name);
else
- ret = sprintf(buf, "none\n");
+ ret = sysfs_emit(buf, "none\n");
spin_unlock(&cpuidle_driver_lock);
return ret;
@@ -65,9 +65,9 @@ static ssize_t show_current_governor(struct device *dev,
mutex_lock(&cpuidle_lock);
if (cpuidle_curr_governor)
- ret = sprintf(buf, "%s\n", cpuidle_curr_governor->name);
+ ret = sysfs_emit(buf, "%s\n", cpuidle_curr_governor->name);
else
- ret = sprintf(buf, "none\n");
+ ret = sysfs_emit(buf, "none\n");
mutex_unlock(&cpuidle_lock);
return ret;
@@ -230,7 +230,7 @@ static struct cpuidle_state_attr attr_##_name = __ATTR(_name, 0644, show, store)
static ssize_t show_state_##_name(struct cpuidle_state *state, \
struct cpuidle_state_usage *state_usage, char *buf) \
{ \
- return sprintf(buf, "%u\n", state->_name);\
+ return sysfs_emit(buf, "%u\n", state->_name);\
}
#define define_show_state_ull_function(_name) \
@@ -238,7 +238,7 @@ static ssize_t show_state_##_name(struct cpuidle_state *state, \
struct cpuidle_state_usage *state_usage, \
char *buf) \
{ \
- return sprintf(buf, "%llu\n", state_usage->_name);\
+ return sysfs_emit(buf, "%llu\n", state_usage->_name);\
}
#define define_show_state_str_function(_name) \
@@ -247,8 +247,8 @@ static ssize_t show_state_##_name(struct cpuidle_state *state, \
char *buf) \
{ \
if (state->_name[0] == '\0')\
- return sprintf(buf, "<null>\n");\
- return sprintf(buf, "%s\n", state->_name);\
+ return sysfs_emit(buf, "<null>\n");\
+ return sysfs_emit(buf, "%s\n", state->_name);\
}
#define define_show_state_time_function(_name) \
@@ -256,7 +256,7 @@ static ssize_t show_state_##_name(struct cpuidle_state *state, \
struct cpuidle_state_usage *state_usage, \
char *buf) \
{ \
- return sprintf(buf, "%llu\n", ktime_to_us(state->_name##_ns)); \
+ return sysfs_emit(buf, "%llu\n", ktime_to_us(state->_name##_ns)); \
}
define_show_state_time_function(exit_latency)
@@ -273,14 +273,14 @@ static ssize_t show_state_time(struct cpuidle_state *state,
struct cpuidle_state_usage *state_usage,
char *buf)
{
- return sprintf(buf, "%llu\n", ktime_to_us(state_usage->time_ns));
+ return sysfs_emit(buf, "%llu\n", ktime_to_us(state_usage->time_ns));
}
static ssize_t show_state_disable(struct cpuidle_state *state,
struct cpuidle_state_usage *state_usage,
char *buf)
{
- return sprintf(buf, "%llu\n",
+ return sysfs_emit(buf, "%llu\n",
state_usage->disable & CPUIDLE_STATE_DISABLED_BY_USER);
}
@@ -310,7 +310,7 @@ static ssize_t show_state_default_status(struct cpuidle_state *state,
struct cpuidle_state_usage *state_usage,
char *buf)
{
- return sprintf(buf, "%s\n",
+ return sysfs_emit(buf, "%s\n",
state->flags & CPUIDLE_FLAG_OFF ? "disabled" : "enabled");
}
@@ -358,7 +358,7 @@ static ssize_t show_state_s2idle_##_name(struct cpuidle_state *state, \
struct cpuidle_state_usage *state_usage, \
char *buf) \
{ \
- return sprintf(buf, "%llu\n", state_usage->s2idle_##_name);\
+ return sysfs_emit(buf, "%llu\n", state_usage->s2idle_##_name);\
}
define_show_state_s2idle_ull_function(usage);
@@ -550,7 +550,7 @@ static ssize_t show_driver_name(struct cpuidle_driver *drv, char *buf)
ssize_t ret;
spin_lock(&cpuidle_driver_lock);
- ret = sprintf(buf, "%s\n", drv ? drv->name : "none");
+ ret = sysfs_emit(buf, "%s\n", drv ? drv->name : "none");
spin_unlock(&cpuidle_driver_lock);
return ret;
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 08b1238bcd7b..a6688d54984c 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -95,6 +95,9 @@ config PKEY
loaded when a CEX crypto card is available.
- A pkey EP11 kernel module (pkey-ep11.ko) which is automatically
loaded when a CEX crypto card is available.
+ - A pkey UV kernel module (pkey-uv.ko) which is automatically
+ loaded when the Ultravisor feature is available within a
+ protected execution environment.
Select this option if you want to enable the kernel and userspace
API for protected key handling.
@@ -152,6 +155,24 @@ config PKEY_PCKMO
this option unless you are sure you never need to derive protected
keys from clear key values directly via PCKMO.
+config PKEY_UV
+ tristate "PKEY UV support handler"
+ depends on PKEY
+ depends on S390_UV_UAPI
+ help
+ This is the PKEY Ultravisor support handler for deriving protected
+ keys from secrets stored within the Ultravisor (UV).
+
+ This module works together with the UV device and supports the
+ retrieval of protected keys from secrets stored within the
+ UV firmware layer. This service is only available within
+ a protected execution guest and thus this module will fail upon
+ modprobe if no protected execution environment is detected.
+
+ Enable this option if you intend to run this kernel with an KVM
+ guest with protected execution and you want to use UV retrievable
+ secrets via PKEY API.
+
config CRYPTO_PAES_S390
tristate "PAES cipher algorithms"
depends on S390
@@ -159,6 +180,7 @@ config CRYPTO_PAES_S390
depends on PKEY
select CRYPTO_ALGAPI
select CRYPTO_SKCIPHER
+ select CRYPTO_ENGINE
help
This is the s390 hardware accelerated implementation of the
AES cipher algorithms for use with protected key.
@@ -166,6 +188,19 @@ config CRYPTO_PAES_S390
Select this option if you want to use the paes cipher
for example to use protected key encrypted devices.
+config CRYPTO_PHMAC_S390
+ tristate "PHMAC cipher algorithms"
+ depends on S390
+ depends on PKEY
+ select CRYPTO_HASH
+ select CRYPTO_ENGINE
+ help
+ This is the s390 hardware accelerated implementation of the
+ protected key HMAC support for SHA224, SHA256, SHA384 and SHA512.
+
+ Select this option if you want to use the phmac digests
+ for example to use dm-integrity with secure/protected keys.
+
config S390_PRNG
tristate "Pseudo random number generator device driver"
depends on S390
@@ -179,23 +214,6 @@ config S390_PRNG
It is available as of z9.
-config CRYPTO_DEV_NIAGARA2
- tristate "Niagara2 Stream Processing Unit driver"
- select CRYPTO_LIB_DES
- select CRYPTO_SKCIPHER
- select CRYPTO_HASH
- select CRYPTO_MD5
- select CRYPTO_SHA1
- select CRYPTO_SHA256
- depends on SPARC64
- help
- Each core of a Niagara2 processor contains a Stream
- Processing Unit, which itself contains several cryptographic
- sub-units. One set provides the Modular Arithmetic Unit,
- used for SSL offload. The other set provides the Cipher
- Group, which can perform encryption, decryption, hashing,
- checksumming, and raw copies.
-
config CRYPTO_DEV_SL3516
tristate "Storlink SL3516 crypto offloader"
depends on ARCH_GEMINI || COMPILE_TEST
@@ -421,7 +439,7 @@ config CRYPTO_DEV_ATMEL_AUTHENC
config CRYPTO_DEV_ATMEL_AES
tristate "Support for Atmel AES hw accelerator"
- depends on ARCH_AT91 || COMPILE_TEST
+ depends on ARCH_MICROCHIP || COMPILE_TEST
select CRYPTO_AES
select CRYPTO_AEAD
select CRYPTO_SKCIPHER
@@ -526,13 +544,6 @@ source "drivers/crypto/cavium/nitrox/Kconfig"
source "drivers/crypto/marvell/Kconfig"
source "drivers/crypto/intel/Kconfig"
-config CRYPTO_DEV_CAVIUM_ZIP
- tristate "Cavium ZIP driver"
- depends on PCI && 64BIT && (ARM64 || COMPILE_TEST)
- help
- Select this option if you want to enable compression/decompression
- acceleration on Cavium's ARM based SoCs
-
config CRYPTO_DEV_QCE
tristate "Qualcomm crypto engine accelerator"
depends on ARCH_QCOM || COMPILE_TEST
@@ -714,6 +725,18 @@ config CRYPTO_DEV_TEGRA
Select this to enable Tegra Security Engine which accelerates various
AES encryption/decryption and HASH algorithms.
+config CRYPTO_DEV_XILINX_TRNG
+ tristate "Support for Xilinx True Random Generator"
+ depends on ZYNQMP_FIRMWARE || COMPILE_TEST
+ select CRYPTO_RNG
+ select HW_RANDOM
+ help
+ Xilinx Versal SoC driver provides kernel-side support for True Random Number
+ Generator and Pseudo random Number in CTR_DRBG mode as defined in NIST SP800-90A.
+
+ To compile this driver as a module, choose M here: the module
+ will be called xilinx-trng.
+
config CRYPTO_DEV_ZYNQMP_AES
tristate "Support for Xilinx ZynqMP AES hw accelerator"
depends on ZYNQMP_FIRMWARE || COMPILE_TEST
@@ -829,6 +852,7 @@ config CRYPTO_DEV_CCREE
If unsure say Y.
source "drivers/crypto/hisilicon/Kconfig"
+source "drivers/crypto/loongson/Kconfig"
source "drivers/crypto/amlogic/Kconfig"
@@ -851,5 +875,7 @@ config CRYPTO_DEV_SA2UL
source "drivers/crypto/aspeed/Kconfig"
source "drivers/crypto/starfive/Kconfig"
+source "drivers/crypto/inside-secure/eip93/Kconfig"
+source "drivers/crypto/ti/Kconfig"
endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index ad4ccef67d12..322ae8854e3e 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -8,12 +8,9 @@ obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_I2C) += atmel-i2c.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_ECC) += atmel-ecc.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA204A) += atmel-sha204a.o
-obj-$(CONFIG_CRYPTO_DEV_CAVIUM_ZIP) += cavium/
obj-$(CONFIG_CRYPTO_DEV_CCP) += ccp/
obj-$(CONFIG_CRYPTO_DEV_CCREE) += ccree/
obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chelsio/
-obj-$(CONFIG_CRYPTO_DEV_CPT) += cavium/cpt/
-obj-$(CONFIG_CRYPTO_DEV_NITROX) += cavium/nitrox/
obj-$(CONFIG_CRYPTO_DEV_EXYNOS_RNG) += exynos-rng.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += caam/
obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
@@ -21,8 +18,6 @@ obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o
obj-$(CONFIG_CRYPTO_DEV_MARVELL) += marvell/
obj-$(CONFIG_CRYPTO_DEV_MXS_DCP) += mxs-dcp.o
-obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o
-n2_crypto-y := n2_core.o n2_asm.o
obj-$(CONFIG_CRYPTO_DEV_NX) += nx/
obj-$(CONFIG_CRYPTO_DEV_OMAP) += omap-crypto.o
obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes-driver.o
@@ -45,10 +40,13 @@ obj-$(CONFIG_CRYPTO_DEV_TEGRA) += tegra/
obj-$(CONFIG_CRYPTO_DEV_VIRTIO) += virtio/
#obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
obj-$(CONFIG_CRYPTO_DEV_BCM_SPU) += bcm/
-obj-$(CONFIG_CRYPTO_DEV_SAFEXCEL) += inside-secure/
+obj-y += inside-secure/
obj-$(CONFIG_CRYPTO_DEV_ARTPEC6) += axis/
obj-y += xilinx/
obj-y += hisilicon/
+obj-y += loongson/
obj-$(CONFIG_CRYPTO_DEV_AMLOGIC_GXL) += amlogic/
obj-y += intel/
obj-y += starfive/
+obj-y += cavium/
+obj-y += ti/
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
index 890664bd5f0f..58a76e2ba64e 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
@@ -542,7 +542,7 @@ MODULE_DEVICE_TABLE(of, a20ss_crypto_of_match_table);
static struct platform_driver sun4i_ss_driver = {
.probe = sun4i_ss_probe,
- .remove_new = sun4i_ss_remove,
+ .remove = sun4i_ss_remove,
.driver = {
.name = "sun4i-ss",
.pm = &sun4i_ss_pm_ops,
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
index 19b7fb4a93e8..021614b65e39 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
@@ -33,22 +33,30 @@ static int sun8i_ce_cipher_need_fallback(struct skcipher_request *areq)
if (sg_nents_for_len(areq->src, areq->cryptlen) > MAX_SG ||
sg_nents_for_len(areq->dst, areq->cryptlen) > MAX_SG) {
- algt->stat_fb_maxsg++;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ algt->stat_fb_maxsg++;
+
return true;
}
if (areq->cryptlen < crypto_skcipher_ivsize(tfm)) {
- algt->stat_fb_leniv++;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ algt->stat_fb_leniv++;
+
return true;
}
if (areq->cryptlen == 0) {
- algt->stat_fb_len0++;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ algt->stat_fb_len0++;
+
return true;
}
if (areq->cryptlen % 16) {
- algt->stat_fb_mod16++;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ algt->stat_fb_mod16++;
+
return true;
}
@@ -56,12 +64,16 @@ static int sun8i_ce_cipher_need_fallback(struct skcipher_request *areq)
sg = areq->src;
while (sg) {
if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
- algt->stat_fb_srcali++;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ algt->stat_fb_srcali++;
+
return true;
}
todo = min(len, sg->length);
if (todo % 4) {
- algt->stat_fb_srclen++;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ algt->stat_fb_srclen++;
+
return true;
}
len -= todo;
@@ -72,12 +84,16 @@ static int sun8i_ce_cipher_need_fallback(struct skcipher_request *areq)
sg = areq->dst;
while (sg) {
if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
- algt->stat_fb_dstali++;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ algt->stat_fb_dstali++;
+
return true;
}
todo = min(len, sg->length);
if (todo % 4) {
- algt->stat_fb_dstlen++;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ algt->stat_fb_dstlen++;
+
return true;
}
len -= todo;
@@ -95,14 +111,12 @@ static int sun8i_ce_cipher_fallback(struct skcipher_request *areq)
if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) {
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
- struct sun8i_ce_alg_template *algt __maybe_unused;
+ struct sun8i_ce_alg_template *algt;
algt = container_of(alg, struct sun8i_ce_alg_template,
alg.skcipher.base);
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
algt->stat_fb++;
-#endif
}
skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
@@ -117,21 +131,19 @@ static int sun8i_ce_cipher_fallback(struct skcipher_request *areq)
return err;
}
-static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req)
+static int sun8i_ce_cipher_prepare(struct skcipher_request *areq,
+ struct ce_task *cet)
{
- struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
struct sun8i_ce_dev *ce = op->ce;
struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
struct sun8i_ce_alg_template *algt;
- struct sun8i_ce_flow *chan;
- struct ce_task *cet;
struct scatterlist *sg;
unsigned int todo, len, offset, ivsize;
u32 common, sym;
- int flow, i;
+ int i;
int nr_sgs = 0;
int nr_sgd = 0;
int err = 0;
@@ -146,18 +158,12 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req
rctx->op_dir, areq->iv, crypto_skcipher_ivsize(tfm),
op->keylen);
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
- algt->stat_req++;
-#endif
-
- flow = rctx->flow;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ algt->stat_req++;
- chan = &ce->chanlist[flow];
-
- cet = chan->tl;
memset(cet, 0, sizeof(struct ce_task));
- cet->t_id = cpu_to_le32(flow);
+ cet->t_id = cpu_to_le32(rctx->flow);
common = ce->variant->alg_cipher[algt->ce_algo_id];
common |= rctx->op_dir | CE_COMM_INT;
cet->t_common_ctl = cpu_to_le32(common);
@@ -193,15 +199,14 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req
cet->t_key = desc_addr_val_le32(ce, rctx->addr_key);
ivsize = crypto_skcipher_ivsize(tfm);
- if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) {
- rctx->ivlen = ivsize;
+ if (areq->iv && ivsize > 0) {
if (rctx->op_dir & CE_DECRYPTION) {
offset = areq->cryptlen - ivsize;
- scatterwalk_map_and_copy(chan->backup_iv, areq->src,
+ scatterwalk_map_and_copy(rctx->backup_iv, areq->src,
offset, ivsize, 0);
}
- memcpy(chan->bounce_iv, areq->iv, ivsize);
- rctx->addr_iv = dma_map_single(ce->dev, chan->bounce_iv, rctx->ivlen,
+ memcpy(rctx->bounce_iv, areq->iv, ivsize);
+ rctx->addr_iv = dma_map_single(ce->dev, rctx->bounce_iv, ivsize,
DMA_TO_DEVICE);
if (dma_mapping_error(ce->dev, rctx->addr_iv)) {
dev_err(ce->dev, "Cannot DMA MAP IV\n");
@@ -264,9 +269,8 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req
goto theend_sgs;
}
- chan->timeout = areq->cryptlen;
- rctx->nr_sgs = nr_sgs;
- rctx->nr_sgd = nr_sgd;
+ rctx->nr_sgs = ns;
+ rctx->nr_sgd = nd;
return 0;
theend_sgs:
@@ -275,22 +279,26 @@ theend_sgs:
} else {
if (nr_sgs > 0)
dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE);
- dma_unmap_sg(ce->dev, areq->dst, nd, DMA_FROM_DEVICE);
+
+ if (nr_sgd > 0)
+ dma_unmap_sg(ce->dev, areq->dst, nd, DMA_FROM_DEVICE);
}
theend_iv:
if (areq->iv && ivsize > 0) {
- if (rctx->addr_iv)
- dma_unmap_single(ce->dev, rctx->addr_iv, rctx->ivlen, DMA_TO_DEVICE);
+ if (!dma_mapping_error(ce->dev, rctx->addr_iv))
+ dma_unmap_single(ce->dev, rctx->addr_iv, ivsize,
+ DMA_TO_DEVICE);
+
offset = areq->cryptlen - ivsize;
if (rctx->op_dir & CE_DECRYPTION) {
- memcpy(areq->iv, chan->backup_iv, ivsize);
- memzero_explicit(chan->backup_iv, ivsize);
+ memcpy(areq->iv, rctx->backup_iv, ivsize);
+ memzero_explicit(rctx->backup_iv, ivsize);
} else {
scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
ivsize, 0);
}
- memzero_explicit(chan->bounce_iv, ivsize);
+ memzero_explicit(rctx->bounce_iv, ivsize);
}
dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE);
@@ -299,24 +307,17 @@ theend:
return err;
}
-static void sun8i_ce_cipher_unprepare(struct crypto_engine *engine,
- void *async_req)
+static void sun8i_ce_cipher_unprepare(struct skcipher_request *areq,
+ struct ce_task *cet)
{
- struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
struct sun8i_ce_dev *ce = op->ce;
struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
- struct sun8i_ce_flow *chan;
- struct ce_task *cet;
unsigned int ivsize, offset;
int nr_sgs = rctx->nr_sgs;
int nr_sgd = rctx->nr_sgd;
- int flow;
- flow = rctx->flow;
- chan = &ce->chanlist[flow];
- cet = chan->tl;
ivsize = crypto_skcipher_ivsize(tfm);
if (areq->src == areq->dst) {
@@ -329,46 +330,47 @@ static void sun8i_ce_cipher_unprepare(struct crypto_engine *engine,
if (areq->iv && ivsize > 0) {
if (cet->t_iv)
- dma_unmap_single(ce->dev, rctx->addr_iv, rctx->ivlen, DMA_TO_DEVICE);
+ dma_unmap_single(ce->dev, rctx->addr_iv, ivsize,
+ DMA_TO_DEVICE);
offset = areq->cryptlen - ivsize;
if (rctx->op_dir & CE_DECRYPTION) {
- memcpy(areq->iv, chan->backup_iv, ivsize);
- memzero_explicit(chan->backup_iv, ivsize);
+ memcpy(areq->iv, rctx->backup_iv, ivsize);
+ memzero_explicit(rctx->backup_iv, ivsize);
} else {
scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
ivsize, 0);
}
- memzero_explicit(chan->bounce_iv, ivsize);
+ memzero_explicit(rctx->bounce_iv, ivsize);
}
dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE);
}
-static void sun8i_ce_cipher_run(struct crypto_engine *engine, void *areq)
-{
- struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(breq);
- struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
- struct sun8i_ce_dev *ce = op->ce;
- struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(breq);
- int flow, err;
-
- flow = rctx->flow;
- err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(breq->base.tfm));
- sun8i_ce_cipher_unprepare(engine, areq);
- local_bh_disable();
- crypto_finalize_skcipher_request(engine, breq, err);
- local_bh_enable();
-}
-
int sun8i_ce_cipher_do_one(struct crypto_engine *engine, void *areq)
{
- int err = sun8i_ce_cipher_prepare(engine, areq);
+ struct skcipher_request *req = skcipher_request_cast(areq);
+ struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct sun8i_cipher_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct sun8i_ce_dev *ce = ctx->ce;
+ struct sun8i_ce_flow *chan;
+ int err;
+
+ chan = &ce->chanlist[rctx->flow];
+ err = sun8i_ce_cipher_prepare(req, chan->tl);
if (err)
return err;
- sun8i_ce_cipher_run(engine, areq);
+ err = sun8i_ce_run_task(ce, rctx->flow,
+ crypto_tfm_alg_name(req->base.tfm));
+
+ sun8i_ce_cipher_unprepare(req, chan->tl);
+
+ local_bh_disable();
+ crypto_finalize_skcipher_request(engine, req, err);
+ local_bh_enable();
+
return 0;
}
@@ -434,17 +436,17 @@ int sun8i_ce_cipher_init(struct crypto_tfm *tfm)
crypto_skcipher_set_reqsize(sktfm, sizeof(struct sun8i_cipher_req_ctx) +
crypto_skcipher_reqsize(op->fallback_tfm));
- memcpy(algt->fbname,
- crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)),
- CRYPTO_MAX_ALG_NAME);
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ memcpy(algt->fbname,
+ crypto_skcipher_driver_name(op->fallback_tfm),
+ CRYPTO_MAX_ALG_NAME);
- err = pm_runtime_get_sync(op->ce->dev);
+ err = pm_runtime_resume_and_get(op->ce->dev);
if (err < 0)
goto error_pm;
return 0;
error_pm:
- pm_runtime_put_noidle(op->ce->dev);
crypto_free_skcipher(op->fallback_tfm);
return err;
}
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
index e55e58e164db..c16bb6ce6ee3 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
@@ -169,6 +169,12 @@ static const struct ce_variant ce_r40_variant = {
.trng = CE_ID_NOTSUPP,
};
+static void sun8i_ce_dump_task_descriptors(struct sun8i_ce_flow *chan)
+{
+ print_hex_dump(KERN_INFO, "TASK: ", DUMP_PREFIX_NONE, 16, 4,
+ chan->tl, sizeof(struct ce_task), false);
+}
+
/*
* sun8i_ce_get_engine_number() get the next channel slot
* This is a simple round-robin way of getting the next channel
@@ -183,7 +189,6 @@ int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name)
{
u32 v;
int err = 0;
- struct ce_task *cet = ce->chanlist[flow].tl;
#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
ce->chanlist[flow].stat_req++;
@@ -210,11 +215,10 @@ int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name)
mutex_unlock(&ce->mlock);
wait_for_completion_interruptible_timeout(&ce->chanlist[flow].complete,
- msecs_to_jiffies(ce->chanlist[flow].timeout));
+ msecs_to_jiffies(CE_DMA_TIMEOUT_MS));
if (ce->chanlist[flow].status == 0) {
- dev_err(ce->dev, "DMA timeout for %s (tm=%d) on flow %d\n", name,
- ce->chanlist[flow].timeout, flow);
+ dev_err(ce->dev, "DMA timeout for %s on flow %d\n", name, flow);
err = -EFAULT;
}
/* No need to lock for this read, the channel is locked so
@@ -226,9 +230,8 @@ int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name)
/* Sadly, the error bit is not per flow */
if (v) {
dev_err(ce->dev, "CE ERROR: %x for flow %x\n", v, flow);
+ sun8i_ce_dump_task_descriptors(&ce->chanlist[flow]);
err = -EFAULT;
- print_hex_dump(KERN_INFO, "TASK: ", DUMP_PREFIX_NONE, 16, 4,
- cet, sizeof(struct ce_task), false);
}
if (v & CE_ERR_ALGO_NOTSUP)
dev_err(ce->dev, "CE ERROR: algorithm not supported\n");
@@ -245,9 +248,8 @@ int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name)
v &= 0xF;
if (v) {
dev_err(ce->dev, "CE ERROR: %x for flow %x\n", v, flow);
+ sun8i_ce_dump_task_descriptors(&ce->chanlist[flow]);
err = -EFAULT;
- print_hex_dump(KERN_INFO, "TASK: ", DUMP_PREFIX_NONE, 16, 4,
- cet, sizeof(struct ce_task), false);
}
if (v & CE_ERR_ALGO_NOTSUP)
dev_err(ce->dev, "CE ERROR: algorithm not supported\n");
@@ -261,9 +263,8 @@ int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name)
v &= 0xFF;
if (v) {
dev_err(ce->dev, "CE ERROR: %x for flow %x\n", v, flow);
+ sun8i_ce_dump_task_descriptors(&ce->chanlist[flow]);
err = -EFAULT;
- print_hex_dump(KERN_INFO, "TASK: ", DUMP_PREFIX_NONE, 16, 4,
- cet, sizeof(struct ce_task), false);
}
if (v & CE_ERR_ALGO_NOTSUP)
dev_err(ce->dev, "CE ERROR: algorithm not supported\n");
@@ -758,18 +759,6 @@ static int sun8i_ce_allocate_chanlist(struct sun8i_ce_dev *ce)
err = -ENOMEM;
goto error_engine;
}
- ce->chanlist[i].bounce_iv = devm_kmalloc(ce->dev, AES_BLOCK_SIZE,
- GFP_KERNEL | GFP_DMA);
- if (!ce->chanlist[i].bounce_iv) {
- err = -ENOMEM;
- goto error_engine;
- }
- ce->chanlist[i].backup_iv = devm_kmalloc(ce->dev, AES_BLOCK_SIZE,
- GFP_KERNEL);
- if (!ce->chanlist[i].backup_iv) {
- err = -ENOMEM;
- goto error_engine;
- }
}
return 0;
error_engine:
@@ -832,13 +821,12 @@ static int sun8i_ce_pm_init(struct sun8i_ce_dev *ce)
err = pm_runtime_set_suspended(ce->dev);
if (err)
return err;
- pm_runtime_enable(ce->dev);
- return err;
-}
-static void sun8i_ce_pm_exit(struct sun8i_ce_dev *ce)
-{
- pm_runtime_disable(ce->dev);
+ err = devm_pm_runtime_enable(ce->dev);
+ if (err)
+ return err;
+
+ return 0;
}
static int sun8i_ce_get_clks(struct sun8i_ce_dev *ce)
@@ -1041,7 +1029,7 @@ static int sun8i_ce_probe(struct platform_device *pdev)
"sun8i-ce-ns", ce);
if (err) {
dev_err(ce->dev, "Cannot request CryptoEngine Non-secure IRQ (err=%d)\n", err);
- goto error_irq;
+ goto error_pm;
}
err = sun8i_ce_register_algs(ce);
@@ -1064,7 +1052,7 @@ static int sun8i_ce_probe(struct platform_device *pdev)
pm_runtime_put_sync(ce->dev);
if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) {
- struct dentry *dbgfs_dir __maybe_unused;
+ struct dentry *dbgfs_dir;
struct dentry *dbgfs_stats __maybe_unused;
/* Ignore error of debugfs */
@@ -1082,8 +1070,6 @@ static int sun8i_ce_probe(struct platform_device *pdev)
return 0;
error_alg:
sun8i_ce_unregister_algs(ce);
-error_irq:
- sun8i_ce_pm_exit(ce);
error_pm:
sun8i_ce_free_chanlist(ce, MAXFLOW - 1);
return err;
@@ -1104,8 +1090,6 @@ static void sun8i_ce_remove(struct platform_device *pdev)
#endif
sun8i_ce_free_chanlist(ce, MAXFLOW - 1);
-
- sun8i_ce_pm_exit(ce);
}
static const struct of_device_id sun8i_ce_crypto_of_match_table[] = {
@@ -1129,7 +1113,7 @@ MODULE_DEVICE_TABLE(of, sun8i_ce_crypto_of_match_table);
static struct platform_driver sun8i_ce_driver = {
.probe = sun8i_ce_probe,
- .remove_new = sun8i_ce_remove,
+ .remove = sun8i_ce_remove,
.driver = {
.name = "sun8i-ce",
.pm = &sun8i_ce_pm_ops,
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
index 6072dd9f390b..d01594353d9a 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
@@ -23,6 +23,18 @@
#include <linux/string.h>
#include "sun8i-ce.h"
+static void sun8i_ce_hash_stat_fb_inc(struct crypto_ahash *tfm)
+{
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) {
+ struct sun8i_ce_alg_template *algt;
+ struct ahash_alg *alg = crypto_ahash_alg(tfm);
+
+ algt = container_of(alg, struct sun8i_ce_alg_template,
+ alg.hash.base);
+ algt->stat_fb++;
+ }
+}
+
int sun8i_ce_hash_init_tfm(struct crypto_ahash *tfm)
{
struct sun8i_ce_hash_tfm_ctx *op = crypto_ahash_ctx(tfm);
@@ -46,17 +58,19 @@ int sun8i_ce_hash_init_tfm(struct crypto_ahash *tfm)
crypto_ahash_set_reqsize(tfm,
sizeof(struct sun8i_ce_hash_reqctx) +
- crypto_ahash_reqsize(op->fallback_tfm));
+ crypto_ahash_reqsize(op->fallback_tfm) +
+ CRYPTO_DMA_PADDING);
- memcpy(algt->fbname, crypto_ahash_driver_name(op->fallback_tfm),
- CRYPTO_MAX_ALG_NAME);
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ memcpy(algt->fbname,
+ crypto_ahash_driver_name(op->fallback_tfm),
+ CRYPTO_MAX_ALG_NAME);
- err = pm_runtime_get_sync(op->ce->dev);
+ err = pm_runtime_resume_and_get(op->ce->dev);
if (err < 0)
goto error_pm;
return 0;
error_pm:
- pm_runtime_put_noidle(op->ce->dev);
crypto_free_ahash(op->fallback_tfm);
return err;
}
@@ -71,134 +85,112 @@ void sun8i_ce_hash_exit_tfm(struct crypto_ahash *tfm)
int sun8i_ce_hash_init(struct ahash_request *areq)
{
- struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
memset(rctx, 0, sizeof(struct sun8i_ce_hash_reqctx));
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
return crypto_ahash_init(&rctx->fallback_req);
}
int sun8i_ce_hash_export(struct ahash_request *areq, void *out)
{
- struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
return crypto_ahash_export(&rctx->fallback_req, out);
}
int sun8i_ce_hash_import(struct ahash_request *areq, const void *in)
{
- struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
return crypto_ahash_import(&rctx->fallback_req, in);
}
int sun8i_ce_hash_final(struct ahash_request *areq)
{
- struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
- ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.result = areq->result;
+ sun8i_ce_hash_stat_fb_inc(tfm);
- if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) {
- struct sun8i_ce_alg_template *algt __maybe_unused;
- struct ahash_alg *alg = crypto_ahash_alg(tfm);
-
- algt = container_of(alg, struct sun8i_ce_alg_template,
- alg.hash.base);
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
- algt->stat_fb++;
-#endif
- }
+ ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, NULL, areq->result, 0);
return crypto_ahash_final(&rctx->fallback_req);
}
int sun8i_ce_hash_update(struct ahash_request *areq)
{
- struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.nbytes = areq->nbytes;
- rctx->fallback_req.src = areq->src;
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, areq->src, NULL, areq->nbytes);
return crypto_ahash_update(&rctx->fallback_req);
}
int sun8i_ce_hash_finup(struct ahash_request *areq)
{
- struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
- ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
-
- rctx->fallback_req.nbytes = areq->nbytes;
- rctx->fallback_req.src = areq->src;
- rctx->fallback_req.result = areq->result;
+ sun8i_ce_hash_stat_fb_inc(tfm);
- if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) {
- struct sun8i_ce_alg_template *algt __maybe_unused;
- struct ahash_alg *alg = crypto_ahash_alg(tfm);
-
- algt = container_of(alg, struct sun8i_ce_alg_template,
- alg.hash.base);
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
- algt->stat_fb++;
-#endif
- }
+ ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, areq->src, areq->result,
+ areq->nbytes);
return crypto_ahash_finup(&rctx->fallback_req);
}
static int sun8i_ce_hash_digest_fb(struct ahash_request *areq)
{
- struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
- ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
-
- rctx->fallback_req.nbytes = areq->nbytes;
- rctx->fallback_req.src = areq->src;
- rctx->fallback_req.result = areq->result;
+ sun8i_ce_hash_stat_fb_inc(tfm);
- if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) {
- struct sun8i_ce_alg_template *algt __maybe_unused;
- struct ahash_alg *alg = crypto_ahash_alg(tfm);
-
- algt = container_of(alg, struct sun8i_ce_alg_template,
- alg.hash.base);
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
- algt->stat_fb++;
-#endif
- }
+ ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, areq->src, areq->result,
+ areq->nbytes);
return crypto_ahash_digest(&rctx->fallback_req);
}
@@ -213,22 +205,30 @@ static bool sun8i_ce_hash_need_fallback(struct ahash_request *areq)
algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base);
if (areq->nbytes == 0) {
- algt->stat_fb_len0++;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ algt->stat_fb_len0++;
+
return true;
}
/* we need to reserve one SG for padding one */
if (sg_nents_for_len(areq->src, areq->nbytes) > MAX_SG - 1) {
- algt->stat_fb_maxsg++;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ algt->stat_fb_maxsg++;
+
return true;
}
sg = areq->src;
while (sg) {
if (sg->length % 4) {
- algt->stat_fb_srclen++;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ algt->stat_fb_srclen++;
+
return true;
}
if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
- algt->stat_fb_srcali++;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ algt->stat_fb_srcali++;
+
return true;
}
sg = sg_next(sg);
@@ -239,29 +239,15 @@ static bool sun8i_ce_hash_need_fallback(struct ahash_request *areq)
int sun8i_ce_hash_digest(struct ahash_request *areq)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
- struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
- struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
- struct sun8i_ce_alg_template *algt;
- struct sun8i_ce_dev *ce;
+ struct sun8i_ce_hash_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
+ struct sun8i_ce_dev *ce = ctx->ce;
struct crypto_engine *engine;
- struct scatterlist *sg;
- int nr_sgs, e, i;
+ int e;
if (sun8i_ce_hash_need_fallback(areq))
return sun8i_ce_hash_digest_fb(areq);
- nr_sgs = sg_nents_for_len(areq->src, areq->nbytes);
- if (nr_sgs > MAX_SG - 1)
- return sun8i_ce_hash_digest_fb(areq);
-
- for_each_sg(areq->src, sg, nr_sgs, i) {
- if (sg->length % 4 || !IS_ALIGNED(sg->offset, sizeof(u32)))
- return sun8i_ce_hash_digest_fb(areq);
- }
-
- algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base);
- ce = algt->ce;
-
e = sun8i_ce_get_engine_number(ce);
rctx->flow = e;
engine = ce->chanlist[e].engine;
@@ -327,66 +313,43 @@ static u64 hash_pad(__le32 *buf, unsigned int bufsize, u64 padi, u64 byte_count,
return j;
}
-int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
+static int sun8i_ce_hash_prepare(struct ahash_request *areq, struct ce_task *cet)
{
- struct ahash_request *areq = container_of(breq, struct ahash_request, base);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
- struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
struct sun8i_ce_alg_template *algt;
struct sun8i_ce_dev *ce;
- struct sun8i_ce_flow *chan;
- struct ce_task *cet;
struct scatterlist *sg;
- int nr_sgs, flow, err;
+ int nr_sgs, err;
unsigned int len;
u32 common;
u64 byte_count;
__le32 *bf;
- void *buf = NULL;
int j, i, todo;
- void *result = NULL;
u64 bs;
int digestsize;
- dma_addr_t addr_res, addr_pad;
- int ns = sg_nents_for_len(areq->src, areq->nbytes);
algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base);
ce = algt->ce;
- bs = algt->alg.hash.base.halg.base.cra_blocksize;
- digestsize = algt->alg.hash.base.halg.digestsize;
+ bs = crypto_ahash_blocksize(tfm);
+ digestsize = crypto_ahash_digestsize(tfm);
if (digestsize == SHA224_DIGEST_SIZE)
digestsize = SHA256_DIGEST_SIZE;
if (digestsize == SHA384_DIGEST_SIZE)
digestsize = SHA512_DIGEST_SIZE;
- /* the padding could be up to two block. */
- buf = kcalloc(2, bs, GFP_KERNEL | GFP_DMA);
- if (!buf) {
- err = -ENOMEM;
- goto theend;
- }
- bf = (__le32 *)buf;
+ bf = (__le32 *)rctx->pad;
- result = kzalloc(digestsize, GFP_KERNEL | GFP_DMA);
- if (!result) {
- err = -ENOMEM;
- goto theend;
- }
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ algt->stat_req++;
- flow = rctx->flow;
- chan = &ce->chanlist[flow];
-
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
- algt->stat_req++;
-#endif
dev_dbg(ce->dev, "%s %s len=%d\n", __func__, crypto_tfm_alg_name(areq->base.tfm), areq->nbytes);
- cet = chan->tl;
memset(cet, 0, sizeof(struct ce_task));
- cet->t_id = cpu_to_le32(flow);
+ cet->t_id = cpu_to_le32(rctx->flow);
common = ce->variant->alg_hash[algt->ce_algo_id];
common |= CE_COMM_INT;
cet->t_common_ctl = cpu_to_le32(common);
@@ -394,11 +357,12 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
cet->t_sym_ctl = 0;
cet->t_asym_ctl = 0;
- nr_sgs = dma_map_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE);
+ rctx->nr_sgs = sg_nents_for_len(areq->src, areq->nbytes);
+ nr_sgs = dma_map_sg(ce->dev, areq->src, rctx->nr_sgs, DMA_TO_DEVICE);
if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
err = -EINVAL;
- goto theend;
+ goto err_out;
}
len = areq->nbytes;
@@ -411,15 +375,18 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
if (len > 0) {
dev_err(ce->dev, "remaining len %d\n", len);
err = -EINVAL;
- goto theend;
+ goto err_unmap_src;
}
- addr_res = dma_map_single(ce->dev, result, digestsize, DMA_FROM_DEVICE);
- cet->t_dst[0].addr = desc_addr_val_le32(ce, addr_res);
- cet->t_dst[0].len = cpu_to_le32(digestsize / 4);
- if (dma_mapping_error(ce->dev, addr_res)) {
+
+ rctx->result_len = digestsize;
+ rctx->addr_res = dma_map_single(ce->dev, rctx->result, rctx->result_len,
+ DMA_FROM_DEVICE);
+ cet->t_dst[0].addr = desc_addr_val_le32(ce, rctx->addr_res);
+ cet->t_dst[0].len = cpu_to_le32(rctx->result_len / 4);
+ if (dma_mapping_error(ce->dev, rctx->addr_res)) {
dev_err(ce->dev, "DMA map dest\n");
err = -EINVAL;
- goto theend;
+ goto err_unmap_src;
}
byte_count = areq->nbytes;
@@ -441,16 +408,18 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
}
if (!j) {
err = -EINVAL;
- goto theend;
+ goto err_unmap_result;
}
- addr_pad = dma_map_single(ce->dev, buf, j * 4, DMA_TO_DEVICE);
- cet->t_src[i].addr = desc_addr_val_le32(ce, addr_pad);
+ rctx->pad_len = j * 4;
+ rctx->addr_pad = dma_map_single(ce->dev, rctx->pad, rctx->pad_len,
+ DMA_TO_DEVICE);
+ cet->t_src[i].addr = desc_addr_val_le32(ce, rctx->addr_pad);
cet->t_src[i].len = cpu_to_le32(j);
- if (dma_mapping_error(ce->dev, addr_pad)) {
+ if (dma_mapping_error(ce->dev, rctx->addr_pad)) {
dev_err(ce->dev, "DMA error on padding SG\n");
err = -EINVAL;
- goto theend;
+ goto err_unmap_result;
}
if (ce->variant->hash_t_dlen_in_bits)
@@ -458,21 +427,60 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
else
cet->t_dlen = cpu_to_le32(areq->nbytes / 4 + j);
- chan->timeout = areq->nbytes;
+ return 0;
+
+err_unmap_result:
+ dma_unmap_single(ce->dev, rctx->addr_res, rctx->result_len,
+ DMA_FROM_DEVICE);
+
+err_unmap_src:
+ dma_unmap_sg(ce->dev, areq->src, rctx->nr_sgs, DMA_TO_DEVICE);
- err = sun8i_ce_run_task(ce, flow, crypto_ahash_alg_name(tfm));
+err_out:
+ return err;
+}
- dma_unmap_single(ce->dev, addr_pad, j * 4, DMA_TO_DEVICE);
- dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE);
- dma_unmap_single(ce->dev, addr_res, digestsize, DMA_FROM_DEVICE);
+static void sun8i_ce_hash_unprepare(struct ahash_request *areq,
+ struct ce_task *cet)
+{
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct sun8i_ce_hash_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct sun8i_ce_dev *ce = ctx->ce;
+ dma_unmap_single(ce->dev, rctx->addr_pad, rctx->pad_len, DMA_TO_DEVICE);
+ dma_unmap_single(ce->dev, rctx->addr_res, rctx->result_len,
+ DMA_FROM_DEVICE);
+ dma_unmap_sg(ce->dev, areq->src, rctx->nr_sgs, DMA_TO_DEVICE);
+}
+
+int sun8i_ce_hash_run(struct crypto_engine *engine, void *async_req)
+{
+ struct ahash_request *areq = ahash_request_cast(async_req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct sun8i_ce_hash_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
+ struct sun8i_ce_dev *ce = ctx->ce;
+ struct sun8i_ce_flow *chan;
+ int err;
+
+ chan = &ce->chanlist[rctx->flow];
+
+ err = sun8i_ce_hash_prepare(areq, chan->tl);
+ if (err)
+ return err;
+
+ err = sun8i_ce_run_task(ce, rctx->flow, crypto_ahash_alg_name(tfm));
+
+ sun8i_ce_hash_unprepare(areq, chan->tl);
+
+ if (!err)
+ memcpy(areq->result, rctx->result,
+ crypto_ahash_digestsize(tfm));
- memcpy(areq->result, result, algt->alg.hash.base.halg.digestsize);
-theend:
- kfree(buf);
- kfree(result);
local_bh_disable();
- crypto_finalize_hash_request(engine, breq, err);
+ crypto_finalize_hash_request(engine, async_req, err);
local_bh_enable();
+
return 0;
}
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c
index 762459867b6c..d0a1ac66738b 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c
@@ -137,7 +137,6 @@ int sun8i_ce_prng_generate(struct crypto_rng *tfm, const u8 *src,
cet->t_dst[0].addr = desc_addr_val_le32(ce, dma_dst);
cet->t_dst[0].len = cpu_to_le32(todo / 4);
- ce->chanlist[flow].timeout = 2000;
err = sun8i_ce_run_task(ce, 3, "PRNG");
mutex_unlock(&ce->rnglock);
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c
index e1e8bc15202e..244529bf0616 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c
@@ -79,7 +79,6 @@ static int sun8i_ce_trng_read(struct hwrng *rng, void *data, size_t max, bool wa
cet->t_dst[0].addr = desc_addr_val_le32(ce, dma_dst);
cet->t_dst[0].len = cpu_to_le32(todo / 4);
- ce->chanlist[flow].timeout = todo;
err = sun8i_ce_run_task(ce, 3, "TRNG");
mutex_unlock(&ce->rnglock);
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
index 3b5c2af013d0..71f5a0cd3d45 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
@@ -106,9 +106,13 @@
#define MAX_SG 8
#define CE_MAX_CLOCKS 4
+#define CE_DMA_TIMEOUT_MS 3000
#define MAXFLOW 4
+#define CE_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
+#define CE_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
+
/*
* struct ce_clock - Describe clocks used by sun8i-ce
* @name: Name of clock needed by this variant
@@ -187,8 +191,6 @@ struct ce_task {
* @status: set to 1 by interrupt if task is done
* @t_phy: Physical address of task
* @tl: pointer to the current ce_task for this flow
- * @backup_iv: buffer which contain the next IV to store
- * @bounce_iv: buffer which contain the IV
* @stat_req: number of request done by this flow
*/
struct sun8i_ce_flow {
@@ -196,10 +198,7 @@ struct sun8i_ce_flow {
struct completion complete;
int status;
dma_addr_t t_phy;
- int timeout;
struct ce_task *tl;
- void *backup_iv;
- void *bounce_iv;
#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
unsigned long stat_req;
#endif
@@ -260,21 +259,23 @@ static inline __le32 desc_addr_val_le32(struct sun8i_ce_dev *dev,
* struct sun8i_cipher_req_ctx - context for a skcipher request
* @op_dir: direction (encrypt vs decrypt) for this request
* @flow: the flow to use for this request
- * @ivlen: size of bounce_iv
* @nr_sgs: The number of source SG (as given by dma_map_sg())
* @nr_sgd: The number of destination SG (as given by dma_map_sg())
* @addr_iv: The IV addr returned by dma_map_single, need to unmap later
* @addr_key: The key addr returned by dma_map_single, need to unmap later
+ * @bounce_iv: Current IV buffer
+ * @backup_iv: Next IV buffer
* @fallback_req: request struct for invoking the fallback skcipher TFM
*/
struct sun8i_cipher_req_ctx {
u32 op_dir;
int flow;
- unsigned int ivlen;
int nr_sgs;
int nr_sgd;
dma_addr_t addr_iv;
dma_addr_t addr_key;
+ u8 bounce_iv[AES_BLOCK_SIZE] __aligned(sizeof(u32));
+ u8 backup_iv[AES_BLOCK_SIZE];
struct skcipher_request fallback_req; // keep at the end
};
@@ -306,10 +307,24 @@ struct sun8i_ce_hash_tfm_ctx {
* struct sun8i_ce_hash_reqctx - context for an ahash request
* @fallback_req: pre-allocated fallback request
* @flow: the flow to use for this request
+ * @nr_sgs: number of entries in the source scatterlist
+ * @result_len: result length in bytes
+ * @pad_len: padding length in bytes
+ * @addr_res: DMA address of the result buffer, returned by dma_map_single()
+ * @addr_pad: DMA address of the padding buffer, returned by dma_map_single()
+ * @result: per-request result buffer
+ * @pad: per-request padding buffer (up to 2 blocks)
*/
struct sun8i_ce_hash_reqctx {
- struct ahash_request fallback_req;
int flow;
+ int nr_sgs;
+ size_t result_len;
+ size_t pad_len;
+ dma_addr_t addr_res;
+ dma_addr_t addr_pad;
+ u8 result[CE_MAX_HASH_DIGEST_SIZE] __aligned(CRYPTO_DMA_ALIGN);
+ u8 pad[2 * CE_MAX_HASH_BLOCK_SIZE];
+ struct ahash_request fallback_req; // keep at the end
};
/*
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
index 9b9605ce8ee6..8831bcb230c2 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
@@ -141,7 +141,7 @@ static int sun8i_ss_setup_ivs(struct skcipher_request *areq)
/* we need to copy all IVs from source in case DMA is bi-directionnal */
while (sg && len) {
- if (sg_dma_len(sg) == 0) {
+ if (sg->length == 0) {
sg = sg_next(sg);
continue;
}
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
index 0dbc0220146c..f45685707e0d 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
@@ -929,7 +929,7 @@ MODULE_DEVICE_TABLE(of, sun8i_ss_crypto_of_match_table);
static struct platform_driver sun8i_ss_driver = {
.probe = sun8i_ss_probe,
- .remove_new = sun8i_ss_remove,
+ .remove = sun8i_ss_remove,
.driver = {
.name = "sun8i-ss",
.pm = &sun8i_ss_pm_ops,
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
index 753f67a36dc5..8bc08089f044 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
@@ -150,7 +150,9 @@ int sun8i_ss_hash_init(struct ahash_request *areq)
memset(rctx, 0, sizeof(struct sun8i_ss_hash_reqctx));
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
return crypto_ahash_init(&rctx->fallback_req);
}
@@ -162,7 +164,9 @@ int sun8i_ss_hash_export(struct ahash_request *areq, void *out)
struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
return crypto_ahash_export(&rctx->fallback_req, out);
}
@@ -174,7 +178,9 @@ int sun8i_ss_hash_import(struct ahash_request *areq, const void *in)
struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
return crypto_ahash_import(&rctx->fallback_req, in);
}
@@ -186,9 +192,10 @@ int sun8i_ss_hash_final(struct ahash_request *areq)
struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.result = areq->result;
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, NULL, areq->result, 0);
if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) {
struct ahash_alg *alg = crypto_ahash_alg(tfm);
@@ -212,10 +219,10 @@ int sun8i_ss_hash_update(struct ahash_request *areq)
struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.nbytes = areq->nbytes;
- rctx->fallback_req.src = areq->src;
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, areq->src, NULL, areq->nbytes);
return crypto_ahash_update(&rctx->fallback_req);
}
@@ -227,12 +234,11 @@ int sun8i_ss_hash_finup(struct ahash_request *areq)
struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
-
- rctx->fallback_req.nbytes = areq->nbytes;
- rctx->fallback_req.src = areq->src;
- rctx->fallback_req.result = areq->result;
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, areq->src, areq->result,
+ areq->nbytes);
if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) {
struct ahash_alg *alg = crypto_ahash_alg(tfm);
@@ -256,12 +262,11 @@ static int sun8i_ss_hash_digest_fb(struct ahash_request *areq)
struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
-
- rctx->fallback_req.nbytes = areq->nbytes;
- rctx->fallback_req.src = areq->src;
- rctx->fallback_req.result = areq->result;
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, areq->src, areq->result,
+ areq->nbytes);
if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) {
struct ahash_alg *alg = crypto_ahash_alg(tfm);
diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c
index e0af611a95d8..38e8a61e9166 100644
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -12,9 +12,6 @@
#include <linux/interrupt.h>
#include <linux/spinlock_types.h>
#include <linux/scatterlist.h>
-#include <linux/crypto.h>
-#include <linux/hash.h>
-#include <crypto/internal/hash.h>
#include <linux/dma-mapping.h>
#include <crypto/algapi.h>
#include <crypto/aead.h>
@@ -72,7 +69,7 @@ static inline int crypto4xx_crypt(struct skcipher_request *req,
{
struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher);
- __le32 iv[AES_IV_SIZE];
+ __le32 iv[AES_IV_SIZE / 4];
if (check_blocksize && !IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE))
return -EINVAL;
@@ -429,7 +426,7 @@ static int crypto4xx_crypt_aes_ccm(struct aead_request *req, bool decrypt)
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
struct crypto4xx_aead_reqctx *rctx = aead_request_ctx(req);
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- __le32 iv[16];
+ __le32 iv[4];
u32 tmp_sa[SA_AES128_CCM_LEN + 4];
struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *)tmp_sa;
unsigned int len = req->cryptlen;
@@ -602,106 +599,3 @@ int crypto4xx_decrypt_aes_gcm(struct aead_request *req)
{
return crypto4xx_crypt_aes_gcm(req, true);
}
-
-/*
- * HASH SHA1 Functions
- */
-static int crypto4xx_hash_alg_init(struct crypto_tfm *tfm,
- unsigned int sa_len,
- unsigned char ha,
- unsigned char hm)
-{
- struct crypto_alg *alg = tfm->__crt_alg;
- struct crypto4xx_alg *my_alg;
- struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
- struct dynamic_sa_hash160 *sa;
- int rc;
-
- my_alg = container_of(__crypto_ahash_alg(alg), struct crypto4xx_alg,
- alg.u.hash);
- ctx->dev = my_alg->dev;
-
- /* Create SA */
- if (ctx->sa_in || ctx->sa_out)
- crypto4xx_free_sa(ctx);
-
- rc = crypto4xx_alloc_sa(ctx, sa_len);
- if (rc)
- return rc;
-
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct crypto4xx_ctx));
- sa = (struct dynamic_sa_hash160 *)ctx->sa_in;
- set_dynamic_sa_command_0(&sa->ctrl, SA_SAVE_HASH, SA_NOT_SAVE_IV,
- SA_NOT_LOAD_HASH, SA_LOAD_IV_FROM_SA,
- SA_NO_HEADER_PROC, ha, SA_CIPHER_ALG_NULL,
- SA_PAD_TYPE_ZERO, SA_OP_GROUP_BASIC,
- SA_OPCODE_HASH, DIR_INBOUND);
- set_dynamic_sa_command_1(&sa->ctrl, 0, SA_HASH_MODE_HASH,
- CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF,
- SA_SEQ_MASK_OFF, SA_MC_ENABLE,
- SA_NOT_COPY_PAD, SA_NOT_COPY_PAYLOAD,
- SA_NOT_COPY_HDR);
- /* Need to zero hash digest in SA */
- memset(sa->inner_digest, 0, sizeof(sa->inner_digest));
- memset(sa->outer_digest, 0, sizeof(sa->outer_digest));
-
- return 0;
-}
-
-int crypto4xx_hash_init(struct ahash_request *req)
-{
- struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- int ds;
- struct dynamic_sa_ctl *sa;
-
- sa = ctx->sa_in;
- ds = crypto_ahash_digestsize(
- __crypto_ahash_cast(req->base.tfm));
- sa->sa_command_0.bf.digest_len = ds >> 2;
- sa->sa_command_0.bf.load_hash_state = SA_LOAD_HASH_FROM_SA;
-
- return 0;
-}
-
-int crypto4xx_hash_update(struct ahash_request *req)
-{
- struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct scatterlist dst;
- unsigned int ds = crypto_ahash_digestsize(ahash);
-
- sg_init_one(&dst, req->result, ds);
-
- return crypto4xx_build_pd(&req->base, ctx, req->src, &dst,
- req->nbytes, NULL, 0, ctx->sa_in,
- ctx->sa_len, 0, NULL);
-}
-
-int crypto4xx_hash_final(struct ahash_request *req)
-{
- return 0;
-}
-
-int crypto4xx_hash_digest(struct ahash_request *req)
-{
- struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct scatterlist dst;
- unsigned int ds = crypto_ahash_digestsize(ahash);
-
- sg_init_one(&dst, req->result, ds);
-
- return crypto4xx_build_pd(&req->base, ctx, req->src, &dst,
- req->nbytes, NULL, 0, ctx->sa_in,
- ctx->sa_len, 0, NULL);
-}
-
-/*
- * SHA1 Algorithm
- */
-int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm)
-{
- return crypto4xx_hash_alg_init(tfm, SA_HASH160_LEN, SA_HASH_ALG_SHA1,
- SA_HASH_MODE_HASH);
-}
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 6006703fb6d7..8cdc66d520c9 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -485,18 +485,6 @@ static void crypto4xx_copy_pkt_to_dst(struct crypto4xx_device *dev,
}
}
-static void crypto4xx_copy_digest_to_dst(void *dst,
- struct pd_uinfo *pd_uinfo,
- struct crypto4xx_ctx *ctx)
-{
- struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *) ctx->sa_in;
-
- if (sa->sa_command_0.bf.hash_alg == SA_HASH_ALG_SHA1) {
- memcpy(dst, pd_uinfo->sr_va->save_digest,
- SA_HASH_ALG_SHA1_DIGEST_SIZE);
- }
-}
-
static void crypto4xx_ret_sg_desc(struct crypto4xx_device *dev,
struct pd_uinfo *pd_uinfo)
{
@@ -549,23 +537,6 @@ static void crypto4xx_cipher_done(struct crypto4xx_device *dev,
skcipher_request_complete(req, 0);
}
-static void crypto4xx_ahash_done(struct crypto4xx_device *dev,
- struct pd_uinfo *pd_uinfo)
-{
- struct crypto4xx_ctx *ctx;
- struct ahash_request *ahash_req;
-
- ahash_req = ahash_request_cast(pd_uinfo->async_req);
- ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(ahash_req));
-
- crypto4xx_copy_digest_to_dst(ahash_req->result, pd_uinfo, ctx);
- crypto4xx_ret_sg_desc(dev, pd_uinfo);
-
- if (pd_uinfo->state & PD_ENTRY_BUSY)
- ahash_request_complete(ahash_req, -EINPROGRESS);
- ahash_request_complete(ahash_req, 0);
-}
-
static void crypto4xx_aead_done(struct crypto4xx_device *dev,
struct pd_uinfo *pd_uinfo,
struct ce_pd *pd)
@@ -642,9 +613,6 @@ static void crypto4xx_pd_done(struct crypto4xx_device *dev, u32 idx)
case CRYPTO_ALG_TYPE_AEAD:
crypto4xx_aead_done(dev, pd_uinfo, pd);
break;
- case CRYPTO_ALG_TYPE_AHASH:
- crypto4xx_ahash_done(dev, pd_uinfo);
- break;
}
}
@@ -653,9 +621,6 @@ static void crypto4xx_stop_all(struct crypto4xx_core_device *core_dev)
crypto4xx_destroy_pdr(core_dev->dev);
crypto4xx_destroy_gdr(core_dev->dev);
crypto4xx_destroy_sdr(core_dev->dev);
- iounmap(core_dev->dev->ce_base);
- kfree(core_dev->dev);
- kfree(core_dev);
}
static u32 get_next_gd(u32 current)
@@ -679,7 +644,7 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
struct scatterlist *src,
struct scatterlist *dst,
const unsigned int datalen,
- const __le32 *iv, const u32 iv_len,
+ const void *iv, const u32 iv_len,
const struct dynamic_sa_ctl *req_sa,
const unsigned int sa_len,
const unsigned int assoclen,
@@ -915,8 +880,7 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
}
pd->pd_ctl.w = PD_CTL_HOST_READY |
- ((crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AHASH) ||
- (crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AEAD) ?
+ ((crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AEAD) ?
PD_CTL_HASH_FINAL : 0);
pd->pd_ctl_len.w = 0x00400000 | (assoclen + datalen);
pd_uinfo->state = PD_ENTRY_INUSE | (is_busy ? PD_ENTRY_BUSY : 0);
@@ -1022,10 +986,6 @@ static int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
rc = crypto_register_aead(&alg->alg.u.aead);
break;
- case CRYPTO_ALG_TYPE_AHASH:
- rc = crypto_register_ahash(&alg->alg.u.hash);
- break;
-
case CRYPTO_ALG_TYPE_RNG:
rc = crypto_register_rng(&alg->alg.u.rng);
break;
@@ -1051,10 +1011,6 @@ static void crypto4xx_unregister_alg(struct crypto4xx_device *sec_dev)
list_for_each_entry_safe(alg, tmp, &sec_dev->alg_list, entry) {
list_del(&alg->entry);
switch (alg->alg.type) {
- case CRYPTO_ALG_TYPE_AHASH:
- crypto_unregister_ahash(&alg->alg.u.hash);
- break;
-
case CRYPTO_ALG_TYPE_AEAD:
crypto_unregister_aead(&alg->alg.u.aead);
break;
@@ -1333,17 +1289,12 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
static int crypto4xx_probe(struct platform_device *ofdev)
{
int rc;
- struct resource res;
struct device *dev = &ofdev->dev;
struct crypto4xx_core_device *core_dev;
struct device_node *np;
u32 pvr;
bool is_revb = true;
- rc = of_address_to_resource(ofdev->dev.of_node, 0, &res);
- if (rc)
- return -ENODEV;
-
np = of_find_compatible_node(NULL, NULL, "amcc,ppc460ex-crypto");
if (np) {
mtdcri(SDR0, PPC460EX_SDR0_SRST,
@@ -1374,16 +1325,17 @@ static int crypto4xx_probe(struct platform_device *ofdev)
of_node_put(np);
- core_dev = kzalloc(sizeof(struct crypto4xx_core_device), GFP_KERNEL);
+ core_dev = devm_kzalloc(
+ &ofdev->dev, sizeof(struct crypto4xx_core_device), GFP_KERNEL);
if (!core_dev)
return -ENOMEM;
dev_set_drvdata(dev, core_dev);
core_dev->ofdev = ofdev;
- core_dev->dev = kzalloc(sizeof(struct crypto4xx_device), GFP_KERNEL);
- rc = -ENOMEM;
+ core_dev->dev = devm_kzalloc(
+ &ofdev->dev, sizeof(struct crypto4xx_device), GFP_KERNEL);
if (!core_dev->dev)
- goto err_alloc_dev;
+ return -ENOMEM;
/*
* Older version of 460EX/GT have a hardware bug.
@@ -1402,7 +1354,9 @@ static int crypto4xx_probe(struct platform_device *ofdev)
core_dev->dev->core_dev = core_dev;
core_dev->dev->is_revb = is_revb;
core_dev->device = dev;
- mutex_init(&core_dev->rng_lock);
+ rc = devm_mutex_init(&ofdev->dev, &core_dev->rng_lock);
+ if (rc)
+ return rc;
spin_lock_init(&core_dev->lock);
INIT_LIST_HEAD(&core_dev->dev->alg_list);
ratelimit_default_init(&core_dev->dev->aead_ratelimit);
@@ -1421,21 +1375,21 @@ static int crypto4xx_probe(struct platform_device *ofdev)
tasklet_init(&core_dev->tasklet, crypto4xx_bh_tasklet_cb,
(unsigned long) dev);
- core_dev->dev->ce_base = of_iomap(ofdev->dev.of_node, 0);
- if (!core_dev->dev->ce_base) {
- dev_err(dev, "failed to of_iomap\n");
- rc = -ENOMEM;
- goto err_iomap;
+ core_dev->dev->ce_base = devm_platform_ioremap_resource(ofdev, 0);
+ if (IS_ERR(core_dev->dev->ce_base)) {
+ dev_err(&ofdev->dev, "failed to ioremap resource");
+ rc = PTR_ERR(core_dev->dev->ce_base);
+ goto err_build_sdr;
}
/* Register for Crypto isr, Crypto Engine IRQ */
core_dev->irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
- rc = request_irq(core_dev->irq, is_revb ?
- crypto4xx_ce_interrupt_handler_revb :
- crypto4xx_ce_interrupt_handler, 0,
- KBUILD_MODNAME, dev);
+ rc = devm_request_irq(&ofdev->dev, core_dev->irq,
+ is_revb ? crypto4xx_ce_interrupt_handler_revb :
+ crypto4xx_ce_interrupt_handler,
+ 0, KBUILD_MODNAME, dev);
if (rc)
- goto err_request_irq;
+ goto err_iomap;
/* need to setup pdr, rdr, gdr and sdr before this */
crypto4xx_hw_init(core_dev->dev);
@@ -1444,26 +1398,17 @@ static int crypto4xx_probe(struct platform_device *ofdev)
rc = crypto4xx_register_alg(core_dev->dev, crypto4xx_alg,
ARRAY_SIZE(crypto4xx_alg));
if (rc)
- goto err_start_dev;
+ goto err_iomap;
ppc4xx_trng_probe(core_dev);
return 0;
-err_start_dev:
- free_irq(core_dev->irq, dev);
-err_request_irq:
- irq_dispose_mapping(core_dev->irq);
- iounmap(core_dev->dev->ce_base);
err_iomap:
tasklet_kill(&core_dev->tasklet);
err_build_sdr:
crypto4xx_destroy_sdr(core_dev->dev);
crypto4xx_destroy_gdr(core_dev->dev);
crypto4xx_destroy_pdr(core_dev->dev);
- kfree(core_dev->dev);
-err_alloc_dev:
- kfree(core_dev);
-
return rc;
}
@@ -1474,13 +1419,9 @@ static void crypto4xx_remove(struct platform_device *ofdev)
ppc4xx_trng_remove(core_dev);
- free_irq(core_dev->irq, dev);
- irq_dispose_mapping(core_dev->irq);
-
tasklet_kill(&core_dev->tasklet);
/* Un-register with Linux CryptoAPI */
crypto4xx_unregister_alg(core_dev->dev);
- mutex_destroy(&core_dev->rng_lock);
/* Free all allocated memory */
crypto4xx_stop_all(core_dev);
}
@@ -1497,7 +1438,7 @@ static struct platform_driver crypto4xx_driver = {
.of_match_table = crypto4xx_match,
},
.probe = crypto4xx_probe,
- .remove_new = crypto4xx_remove,
+ .remove = crypto4xx_remove,
};
module_platform_driver(crypto4xx_driver);
diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h
index 3adcc5e65694..ee36630c670f 100644
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -16,7 +16,6 @@
#include <linux/ratelimit.h>
#include <linux/mutex.h>
#include <linux/scatterlist.h>
-#include <crypto/internal/hash.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/rng.h>
#include <crypto/internal/skcipher.h>
@@ -135,7 +134,6 @@ struct crypto4xx_alg_common {
u32 type;
union {
struct skcipher_alg cipher;
- struct ahash_alg hash;
struct aead_alg aead;
struct rng_alg rng;
} u;
@@ -147,6 +145,12 @@ struct crypto4xx_alg {
struct crypto4xx_device *dev;
};
+#if IS_ENABLED(CONFIG_CC_IS_GCC) && CONFIG_GCC_VERSION >= 120000
+#define BUILD_PD_ACCESS __attribute__((access(read_only, 6, 7)))
+#else
+#define BUILD_PD_ACCESS
+#endif
+
int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size);
void crypto4xx_free_sa(struct crypto4xx_ctx *ctx);
int crypto4xx_build_pd(struct crypto_async_request *req,
@@ -154,11 +158,11 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
struct scatterlist *src,
struct scatterlist *dst,
const unsigned int datalen,
- const __le32 *iv, const u32 iv_len,
+ const void *iv, const u32 iv_len,
const struct dynamic_sa_ctl *sa,
const unsigned int sa_len,
const unsigned int assoclen,
- struct scatterlist *dst_tmp);
+ struct scatterlist *dst_tmp) BUILD_PD_ACCESS;
int crypto4xx_setkey_aes_cbc(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen);
int crypto4xx_setkey_aes_ctr(struct crypto_skcipher *cipher,
@@ -177,11 +181,6 @@ int crypto4xx_encrypt_noiv_block(struct skcipher_request *req);
int crypto4xx_decrypt_noiv_block(struct skcipher_request *req);
int crypto4xx_rfc3686_encrypt(struct skcipher_request *req);
int crypto4xx_rfc3686_decrypt(struct skcipher_request *req);
-int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm);
-int crypto4xx_hash_digest(struct ahash_request *req);
-int crypto4xx_hash_final(struct ahash_request *req);
-int crypto4xx_hash_update(struct ahash_request *req);
-int crypto4xx_hash_init(struct ahash_request *req);
/*
* Note: Only use this function to copy items that is word aligned.
diff --git a/drivers/crypto/amlogic/amlogic-gxl-core.c b/drivers/crypto/amlogic/amlogic-gxl-core.c
index f54ab0d0b1e8..1c18a5b8470e 100644
--- a/drivers/crypto/amlogic/amlogic-gxl-core.c
+++ b/drivers/crypto/amlogic/amlogic-gxl-core.c
@@ -240,11 +240,9 @@ static int meson_crypto_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, mc);
mc->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(mc->base)) {
- err = PTR_ERR(mc->base);
- dev_err(&pdev->dev, "Cannot request MMIO err=%d\n", err);
- return err;
- }
+ if (IS_ERR(mc->base))
+ return PTR_ERR(mc->base);
+
mc->busclk = devm_clk_get(&pdev->dev, "blkmv");
if (IS_ERR(mc->busclk)) {
err = PTR_ERR(mc->busclk);
@@ -322,7 +320,7 @@ MODULE_DEVICE_TABLE(of, meson_crypto_of_match_table);
static struct platform_driver meson_crypto_driver = {
.probe = meson_crypto_probe,
- .remove_new = meson_crypto_remove,
+ .remove = meson_crypto_remove,
.driver = {
.name = "gxl-crypto",
.of_match_table = meson_crypto_of_match_table,
diff --git a/drivers/crypto/aspeed/aspeed-acry.c b/drivers/crypto/aspeed/aspeed-acry.c
index b4613bd4ad96..8d1c79aaca07 100644
--- a/drivers/crypto/aspeed/aspeed-acry.c
+++ b/drivers/crypto/aspeed/aspeed-acry.c
@@ -601,8 +601,6 @@ static struct aspeed_acry_alg aspeed_acry_akcipher_algs[] = {
.akcipher.base = {
.encrypt = aspeed_acry_rsa_enc,
.decrypt = aspeed_acry_rsa_dec,
- .sign = aspeed_acry_rsa_dec,
- .verify = aspeed_acry_rsa_enc,
.set_pub_key = aspeed_acry_rsa_set_pub_key,
.set_priv_key = aspeed_acry_rsa_set_priv_key,
.max_size = aspeed_acry_rsa_max_size,
@@ -808,7 +806,7 @@ MODULE_DEVICE_TABLE(of, aspeed_acry_of_matches);
static struct platform_driver aspeed_acry_driver = {
.probe = aspeed_acry_probe,
- .remove_new = aspeed_acry_remove,
+ .remove = aspeed_acry_remove,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = aspeed_acry_of_matches,
diff --git a/drivers/crypto/aspeed/aspeed-hace-crypto.c b/drivers/crypto/aspeed/aspeed-hace-crypto.c
index a72dfebc53ff..fa201dae1f81 100644
--- a/drivers/crypto/aspeed/aspeed-hace-crypto.c
+++ b/drivers/crypto/aspeed/aspeed-hace-crypto.c
@@ -346,7 +346,7 @@ free_req:
} else {
dma_unmap_sg(hace_dev->dev, req->dst, rctx->dst_nents,
- DMA_TO_DEVICE);
+ DMA_FROM_DEVICE);
dma_unmap_sg(hace_dev->dev, req->src, rctx->src_nents,
DMA_TO_DEVICE);
}
diff --git a/drivers/crypto/aspeed/aspeed-hace-hash.c b/drivers/crypto/aspeed/aspeed-hace-hash.c
index 0b6e49c06eff..f8f37c9d5f3c 100644
--- a/drivers/crypto/aspeed/aspeed-hace-hash.c
+++ b/drivers/crypto/aspeed/aspeed-hace-hash.c
@@ -5,7 +5,6 @@
#include "aspeed-hace.h"
#include <crypto/engine.h>
-#include <crypto/hmac.h>
#include <crypto/internal/hash.h>
#include <crypto/scatterwalk.h>
#include <crypto/sha1.h>
@@ -14,6 +13,7 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/scatterlist.h>
#include <linux/string.h>
#ifdef CONFIG_CRYPTO_DEV_ASPEED_DEBUG
@@ -59,6 +59,46 @@ static const __be64 sha512_iv[8] = {
cpu_to_be64(SHA512_H6), cpu_to_be64(SHA512_H7)
};
+static int aspeed_sham_init(struct ahash_request *req);
+static int aspeed_ahash_req_update(struct aspeed_hace_dev *hace_dev);
+
+static int aspeed_sham_export(struct ahash_request *req, void *out)
+{
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+ union {
+ u8 *u8;
+ u64 *u64;
+ } p = { .u8 = out };
+
+ memcpy(out, rctx->digest, rctx->ivsize);
+ p.u8 += rctx->ivsize;
+ put_unaligned(rctx->digcnt[0], p.u64++);
+ if (rctx->ivsize == 64)
+ put_unaligned(rctx->digcnt[1], p.u64);
+ return 0;
+}
+
+static int aspeed_sham_import(struct ahash_request *req, const void *in)
+{
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+ union {
+ const u8 *u8;
+ const u64 *u64;
+ } p = { .u8 = in };
+ int err;
+
+ err = aspeed_sham_init(req);
+ if (err)
+ return err;
+
+ memcpy(rctx->digest, in, rctx->ivsize);
+ p.u8 += rctx->ivsize;
+ rctx->digcnt[0] = get_unaligned(p.u64++);
+ if (rctx->ivsize == 64)
+ rctx->digcnt[1] = get_unaligned(p.u64);
+ return 0;
+}
+
/* The purpose of this padding is to ensure that the padded message is a
* multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512).
* The bit "1" is appended at the end of the message followed by
@@ -74,10 +114,10 @@ static const __be64 sha512_iv[8] = {
* - if message length < 112 bytes then padlen = 112 - message length
* - else padlen = 128 + 112 - message length
*/
-static void aspeed_ahash_fill_padding(struct aspeed_hace_dev *hace_dev,
- struct aspeed_sham_reqctx *rctx)
+static int aspeed_ahash_fill_padding(struct aspeed_hace_dev *hace_dev,
+ struct aspeed_sham_reqctx *rctx, u8 *buf)
{
- unsigned int index, padlen;
+ unsigned int index, padlen, bitslen;
__be64 bits[2];
AHASH_DBG(hace_dev, "rctx flags:0x%x\n", (u32)rctx->flags);
@@ -87,25 +127,32 @@ static void aspeed_ahash_fill_padding(struct aspeed_hace_dev *hace_dev,
case SHA_FLAGS_SHA224:
case SHA_FLAGS_SHA256:
bits[0] = cpu_to_be64(rctx->digcnt[0] << 3);
- index = rctx->bufcnt & 0x3f;
+ index = rctx->digcnt[0] & 0x3f;
padlen = (index < 56) ? (56 - index) : ((64 + 56) - index);
- *(rctx->buffer + rctx->bufcnt) = 0x80;
- memset(rctx->buffer + rctx->bufcnt + 1, 0, padlen - 1);
- memcpy(rctx->buffer + rctx->bufcnt + padlen, bits, 8);
- rctx->bufcnt += padlen + 8;
+ bitslen = 8;
break;
default:
bits[1] = cpu_to_be64(rctx->digcnt[0] << 3);
bits[0] = cpu_to_be64(rctx->digcnt[1] << 3 |
rctx->digcnt[0] >> 61);
- index = rctx->bufcnt & 0x7f;
+ index = rctx->digcnt[0] & 0x7f;
padlen = (index < 112) ? (112 - index) : ((128 + 112) - index);
- *(rctx->buffer + rctx->bufcnt) = 0x80;
- memset(rctx->buffer + rctx->bufcnt + 1, 0, padlen - 1);
- memcpy(rctx->buffer + rctx->bufcnt + padlen, bits, 16);
- rctx->bufcnt += padlen + 16;
+ bitslen = 16;
break;
}
+ buf[0] = 0x80;
+ memset(buf + 1, 0, padlen - 1);
+ memcpy(buf + padlen, bits, bitslen);
+ return padlen + bitslen;
+}
+
+static void aspeed_ahash_update_counter(struct aspeed_sham_reqctx *rctx,
+ unsigned int len)
+{
+ rctx->offset += len;
+ rctx->digcnt[0] += len;
+ if (rctx->digcnt[0] < len)
+ rctx->digcnt[1]++;
}
/*
@@ -117,31 +164,31 @@ static int aspeed_ahash_dma_prepare(struct aspeed_hace_dev *hace_dev)
struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
struct ahash_request *req = hash_engine->req;
struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
- int length, remain;
+ unsigned int length, remain;
+ bool final = false;
- length = rctx->total + rctx->bufcnt;
- remain = length % rctx->block_size;
+ length = rctx->total - rctx->offset;
+ remain = length - round_down(length, rctx->block_size);
AHASH_DBG(hace_dev, "length:0x%x, remain:0x%x\n", length, remain);
- if (rctx->bufcnt)
- memcpy(hash_engine->ahash_src_addr, rctx->buffer, rctx->bufcnt);
-
- if (rctx->total + rctx->bufcnt < ASPEED_CRYPTO_SRC_DMA_BUF_LEN) {
- scatterwalk_map_and_copy(hash_engine->ahash_src_addr +
- rctx->bufcnt, rctx->src_sg,
- rctx->offset, rctx->total - remain, 0);
- rctx->offset += rctx->total - remain;
-
- } else {
- dev_warn(hace_dev->dev, "Hash data length is too large\n");
- return -EINVAL;
- }
-
- scatterwalk_map_and_copy(rctx->buffer, rctx->src_sg,
- rctx->offset, remain, 0);
+ if (length > ASPEED_HASH_SRC_DMA_BUF_LEN)
+ length = ASPEED_HASH_SRC_DMA_BUF_LEN;
+ else if (rctx->flags & SHA_FLAGS_FINUP) {
+ if (round_up(length, rctx->block_size) + rctx->block_size >
+ ASPEED_CRYPTO_SRC_DMA_BUF_LEN)
+ length = round_down(length - 1, rctx->block_size);
+ else
+ final = true;
+ } else
+ length -= remain;
+ scatterwalk_map_and_copy(hash_engine->ahash_src_addr, rctx->src_sg,
+ rctx->offset, length, 0);
+ aspeed_ahash_update_counter(rctx, length);
+ if (final)
+ length += aspeed_ahash_fill_padding(
+ hace_dev, rctx, hash_engine->ahash_src_addr + length);
- rctx->bufcnt = remain;
rctx->digest_dma_addr = dma_map_single(hace_dev->dev, rctx->digest,
SHA512_DIGEST_SIZE,
DMA_BIDIRECTIONAL);
@@ -150,7 +197,7 @@ static int aspeed_ahash_dma_prepare(struct aspeed_hace_dev *hace_dev)
return -ENOMEM;
}
- hash_engine->src_length = length - remain;
+ hash_engine->src_length = length;
hash_engine->src_dma = hash_engine->ahash_src_dma_addr;
hash_engine->digest_dma = rctx->digest_dma_addr;
@@ -166,16 +213,20 @@ static int aspeed_ahash_dma_prepare_sg(struct aspeed_hace_dev *hace_dev)
struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
struct ahash_request *req = hash_engine->req;
struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+ bool final = rctx->flags & SHA_FLAGS_FINUP;
+ int remain, sg_len, i, max_sg_nents;
+ unsigned int length, offset, total;
struct aspeed_sg_list *src_list;
struct scatterlist *s;
- int length, remain, sg_len, i;
int rc = 0;
- remain = (rctx->total + rctx->bufcnt) % rctx->block_size;
- length = rctx->total + rctx->bufcnt - remain;
+ offset = rctx->offset;
+ length = rctx->total - offset;
+ remain = final ? 0 : length - round_down(length, rctx->block_size);
+ length -= remain;
- AHASH_DBG(hace_dev, "%s:0x%x, %s:%zu, %s:0x%x, %s:0x%x\n",
- "rctx total", rctx->total, "bufcnt", rctx->bufcnt,
+ AHASH_DBG(hace_dev, "%s:0x%x, %s:0x%x, %s:0x%x\n",
+ "rctx total", rctx->total,
"length", length, "remain", remain);
sg_len = dma_map_sg(hace_dev->dev, rctx->src_sg, rctx->src_nents,
@@ -186,6 +237,8 @@ static int aspeed_ahash_dma_prepare_sg(struct aspeed_hace_dev *hace_dev)
goto end;
}
+ max_sg_nents = ASPEED_HASH_SRC_DMA_BUF_LEN / sizeof(*src_list) - final;
+ sg_len = min(sg_len, max_sg_nents);
src_list = (struct aspeed_sg_list *)hash_engine->ahash_src_addr;
rctx->digest_dma_addr = dma_map_single(hace_dev->dev, rctx->digest,
SHA512_DIGEST_SIZE,
@@ -196,68 +249,66 @@ static int aspeed_ahash_dma_prepare_sg(struct aspeed_hace_dev *hace_dev)
goto free_src_sg;
}
- if (rctx->bufcnt != 0) {
- u32 phy_addr;
- u32 len;
+ total = 0;
+ for_each_sg(rctx->src_sg, s, sg_len, i) {
+ u32 phy_addr = sg_dma_address(s);
+ u32 len = sg_dma_len(s);
- rctx->buffer_dma_addr = dma_map_single(hace_dev->dev,
- rctx->buffer,
- rctx->block_size * 2,
- DMA_TO_DEVICE);
- if (dma_mapping_error(hace_dev->dev, rctx->buffer_dma_addr)) {
- dev_warn(hace_dev->dev, "dma_map() rctx buffer error\n");
- rc = -ENOMEM;
- goto free_rctx_digest;
+ if (len <= offset) {
+ offset -= len;
+ continue;
}
- phy_addr = rctx->buffer_dma_addr;
- len = rctx->bufcnt;
- length -= len;
+ len -= offset;
+ phy_addr += offset;
+ offset = 0;
- /* Last sg list */
- if (length == 0)
- len |= HASH_SG_LAST_LIST;
+ if (length > len)
+ length -= len;
+ else {
+ /* Last sg list */
+ len = length;
+ length = 0;
+ }
- src_list[0].phy_addr = cpu_to_le32(phy_addr);
- src_list[0].len = cpu_to_le32(len);
- src_list++;
+ total += len;
+ src_list[i].phy_addr = cpu_to_le32(phy_addr);
+ src_list[i].len = cpu_to_le32(len);
}
if (length != 0) {
- for_each_sg(rctx->src_sg, s, sg_len, i) {
- u32 phy_addr = sg_dma_address(s);
- u32 len = sg_dma_len(s);
-
- if (length > len)
- length -= len;
- else {
- /* Last sg list */
- len = length;
- len |= HASH_SG_LAST_LIST;
- length = 0;
- }
+ total = round_down(total, rctx->block_size);
+ final = false;
+ }
+
+ aspeed_ahash_update_counter(rctx, total);
+ if (final) {
+ int len = aspeed_ahash_fill_padding(hace_dev, rctx,
+ rctx->buffer);
- src_list[i].phy_addr = cpu_to_le32(phy_addr);
- src_list[i].len = cpu_to_le32(len);
+ total += len;
+ rctx->buffer_dma_addr = dma_map_single(hace_dev->dev,
+ rctx->buffer,
+ sizeof(rctx->buffer),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(hace_dev->dev, rctx->buffer_dma_addr)) {
+ dev_warn(hace_dev->dev, "dma_map() rctx buffer error\n");
+ rc = -ENOMEM;
+ goto free_rctx_digest;
}
- }
- if (length != 0) {
- rc = -EINVAL;
- goto free_rctx_buffer;
+ src_list[i].phy_addr = cpu_to_le32(rctx->buffer_dma_addr);
+ src_list[i].len = cpu_to_le32(len);
+ i++;
}
+ src_list[i - 1].len |= cpu_to_le32(HASH_SG_LAST_LIST);
- rctx->offset = rctx->total - remain;
- hash_engine->src_length = rctx->total + rctx->bufcnt - remain;
+ hash_engine->src_length = total;
hash_engine->src_dma = hash_engine->ahash_src_dma_addr;
hash_engine->digest_dma = rctx->digest_dma_addr;
return 0;
-free_rctx_buffer:
- if (rctx->bufcnt != 0)
- dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr,
- rctx->block_size * 2, DMA_TO_DEVICE);
free_rctx_digest:
dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
@@ -272,24 +323,6 @@ static int aspeed_ahash_complete(struct aspeed_hace_dev *hace_dev)
{
struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
struct ahash_request *req = hash_engine->req;
-
- AHASH_DBG(hace_dev, "\n");
-
- hash_engine->flags &= ~CRYPTO_FLAGS_BUSY;
-
- crypto_finalize_hash_request(hace_dev->crypt_engine_hash, req, 0);
-
- return 0;
-}
-
-/*
- * Copy digest to the corresponding request result.
- * This function will be called at final() stage.
- */
-static int aspeed_ahash_transfer(struct aspeed_hace_dev *hace_dev)
-{
- struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
- struct ahash_request *req = hash_engine->req;
struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
AHASH_DBG(hace_dev, "\n");
@@ -297,12 +330,19 @@ static int aspeed_ahash_transfer(struct aspeed_hace_dev *hace_dev)
dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
- dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr,
- rctx->block_size * 2, DMA_TO_DEVICE);
+ if (rctx->total - rctx->offset >= rctx->block_size ||
+ (rctx->total != rctx->offset && rctx->flags & SHA_FLAGS_FINUP))
+ return aspeed_ahash_req_update(hace_dev);
- memcpy(req->result, rctx->digest, rctx->digsize);
+ hash_engine->flags &= ~CRYPTO_FLAGS_BUSY;
- return aspeed_ahash_complete(hace_dev);
+ if (rctx->flags & SHA_FLAGS_FINUP)
+ memcpy(req->result, rctx->digest, rctx->digsize);
+
+ crypto_finalize_hash_request(hace_dev->crypt_engine_hash, req,
+ rctx->total - rctx->offset);
+
+ return 0;
}
/*
@@ -338,118 +378,6 @@ static int aspeed_hace_ahash_trigger(struct aspeed_hace_dev *hace_dev,
return -EINPROGRESS;
}
-/*
- * HMAC resume aims to do the second pass produces
- * the final HMAC code derived from the inner hash
- * result and the outer key.
- */
-static int aspeed_ahash_hmac_resume(struct aspeed_hace_dev *hace_dev)
-{
- struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
- struct ahash_request *req = hash_engine->req;
- struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
- struct aspeed_sha_hmac_ctx *bctx = tctx->base;
- int rc = 0;
-
- AHASH_DBG(hace_dev, "\n");
-
- dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
- SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
-
- dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr,
- rctx->block_size * 2, DMA_TO_DEVICE);
-
- /* o key pad + hash sum 1 */
- memcpy(rctx->buffer, bctx->opad, rctx->block_size);
- memcpy(rctx->buffer + rctx->block_size, rctx->digest, rctx->digsize);
-
- rctx->bufcnt = rctx->block_size + rctx->digsize;
- rctx->digcnt[0] = rctx->block_size + rctx->digsize;
-
- aspeed_ahash_fill_padding(hace_dev, rctx);
- memcpy(rctx->digest, rctx->sha_iv, rctx->ivsize);
-
- rctx->digest_dma_addr = dma_map_single(hace_dev->dev, rctx->digest,
- SHA512_DIGEST_SIZE,
- DMA_BIDIRECTIONAL);
- if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) {
- dev_warn(hace_dev->dev, "dma_map() rctx digest error\n");
- rc = -ENOMEM;
- goto end;
- }
-
- rctx->buffer_dma_addr = dma_map_single(hace_dev->dev, rctx->buffer,
- rctx->block_size * 2,
- DMA_TO_DEVICE);
- if (dma_mapping_error(hace_dev->dev, rctx->buffer_dma_addr)) {
- dev_warn(hace_dev->dev, "dma_map() rctx buffer error\n");
- rc = -ENOMEM;
- goto free_rctx_digest;
- }
-
- hash_engine->src_dma = rctx->buffer_dma_addr;
- hash_engine->src_length = rctx->bufcnt;
- hash_engine->digest_dma = rctx->digest_dma_addr;
-
- return aspeed_hace_ahash_trigger(hace_dev, aspeed_ahash_transfer);
-
-free_rctx_digest:
- dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
- SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
-end:
- return rc;
-}
-
-static int aspeed_ahash_req_final(struct aspeed_hace_dev *hace_dev)
-{
- struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
- struct ahash_request *req = hash_engine->req;
- struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
- int rc = 0;
-
- AHASH_DBG(hace_dev, "\n");
-
- aspeed_ahash_fill_padding(hace_dev, rctx);
-
- rctx->digest_dma_addr = dma_map_single(hace_dev->dev,
- rctx->digest,
- SHA512_DIGEST_SIZE,
- DMA_BIDIRECTIONAL);
- if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) {
- dev_warn(hace_dev->dev, "dma_map() rctx digest error\n");
- rc = -ENOMEM;
- goto end;
- }
-
- rctx->buffer_dma_addr = dma_map_single(hace_dev->dev,
- rctx->buffer,
- rctx->block_size * 2,
- DMA_TO_DEVICE);
- if (dma_mapping_error(hace_dev->dev, rctx->buffer_dma_addr)) {
- dev_warn(hace_dev->dev, "dma_map() rctx buffer error\n");
- rc = -ENOMEM;
- goto free_rctx_digest;
- }
-
- hash_engine->src_dma = rctx->buffer_dma_addr;
- hash_engine->src_length = rctx->bufcnt;
- hash_engine->digest_dma = rctx->digest_dma_addr;
-
- if (rctx->flags & SHA_FLAGS_HMAC)
- return aspeed_hace_ahash_trigger(hace_dev,
- aspeed_ahash_hmac_resume);
-
- return aspeed_hace_ahash_trigger(hace_dev, aspeed_ahash_transfer);
-
-free_rctx_digest:
- dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
- SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
-end:
- return rc;
-}
-
static int aspeed_ahash_update_resume_sg(struct aspeed_hace_dev *hace_dev)
{
struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
@@ -461,40 +389,12 @@ static int aspeed_ahash_update_resume_sg(struct aspeed_hace_dev *hace_dev)
dma_unmap_sg(hace_dev->dev, rctx->src_sg, rctx->src_nents,
DMA_TO_DEVICE);
- if (rctx->bufcnt != 0)
+ if (rctx->flags & SHA_FLAGS_FINUP && rctx->total == rctx->offset)
dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr,
- rctx->block_size * 2,
- DMA_TO_DEVICE);
+ sizeof(rctx->buffer), DMA_TO_DEVICE);
- dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
- SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
-
- scatterwalk_map_and_copy(rctx->buffer, rctx->src_sg, rctx->offset,
- rctx->total - rctx->offset, 0);
-
- rctx->bufcnt = rctx->total - rctx->offset;
rctx->cmd &= ~HASH_CMD_HASH_SRC_SG_CTRL;
- if (rctx->flags & SHA_FLAGS_FINUP)
- return aspeed_ahash_req_final(hace_dev);
-
- return aspeed_ahash_complete(hace_dev);
-}
-
-static int aspeed_ahash_update_resume(struct aspeed_hace_dev *hace_dev)
-{
- struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
- struct ahash_request *req = hash_engine->req;
- struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
-
- AHASH_DBG(hace_dev, "\n");
-
- dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
- SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
-
- if (rctx->flags & SHA_FLAGS_FINUP)
- return aspeed_ahash_req_final(hace_dev);
-
return aspeed_ahash_complete(hace_dev);
}
@@ -513,7 +413,7 @@ static int aspeed_ahash_req_update(struct aspeed_hace_dev *hace_dev)
resume = aspeed_ahash_update_resume_sg;
} else {
- resume = aspeed_ahash_update_resume;
+ resume = aspeed_ahash_complete;
}
ret = hash_engine->dma_prepare(hace_dev);
@@ -530,26 +430,47 @@ static int aspeed_hace_hash_handle_queue(struct aspeed_hace_dev *hace_dev,
hace_dev->crypt_engine_hash, req);
}
+static noinline int aspeed_ahash_fallback(struct ahash_request *req)
+{
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+ HASH_FBREQ_ON_STACK(fbreq, req);
+ u8 *state = rctx->buffer;
+ struct scatterlist sg[2];
+ struct scatterlist *ssg;
+ int ret;
+
+ ssg = scatterwalk_ffwd(sg, req->src, rctx->offset);
+ ahash_request_set_crypt(fbreq, ssg, req->result,
+ rctx->total - rctx->offset);
+
+ ret = aspeed_sham_export(req, state) ?:
+ crypto_ahash_import_core(fbreq, state);
+
+ if (rctx->flags & SHA_FLAGS_FINUP)
+ ret = ret ?: crypto_ahash_finup(fbreq);
+ else
+ ret = ret ?: crypto_ahash_update(fbreq) ?:
+ crypto_ahash_export_core(fbreq, state) ?:
+ aspeed_sham_import(req, state);
+ HASH_REQUEST_ZERO(fbreq);
+ return ret;
+}
+
static int aspeed_ahash_do_request(struct crypto_engine *engine, void *areq)
{
struct ahash_request *req = ahash_request_cast(areq);
- struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
struct aspeed_engine_hash *hash_engine;
- int ret = 0;
+ int ret;
hash_engine = &hace_dev->hash_engine;
hash_engine->flags |= CRYPTO_FLAGS_BUSY;
- if (rctx->op == SHA_OP_UPDATE)
- ret = aspeed_ahash_req_update(hace_dev);
- else if (rctx->op == SHA_OP_FINAL)
- ret = aspeed_ahash_req_final(hace_dev);
-
+ ret = aspeed_ahash_req_update(hace_dev);
if (ret != -EINPROGRESS)
- return ret;
+ return aspeed_ahash_fallback(req);
return 0;
}
@@ -590,45 +511,7 @@ static int aspeed_sham_update(struct ahash_request *req)
rctx->total = req->nbytes;
rctx->src_sg = req->src;
rctx->offset = 0;
- rctx->src_nents = sg_nents(req->src);
- rctx->op = SHA_OP_UPDATE;
-
- rctx->digcnt[0] += rctx->total;
- if (rctx->digcnt[0] < rctx->total)
- rctx->digcnt[1]++;
-
- if (rctx->bufcnt + rctx->total < rctx->block_size) {
- scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt,
- rctx->src_sg, rctx->offset,
- rctx->total, 0);
- rctx->bufcnt += rctx->total;
-
- return 0;
- }
-
- return aspeed_hace_hash_handle_queue(hace_dev, req);
-}
-
-static int aspeed_sham_shash_digest(struct crypto_shash *tfm, u32 flags,
- const u8 *data, unsigned int len, u8 *out)
-{
- SHASH_DESC_ON_STACK(shash, tfm);
-
- shash->tfm = tfm;
-
- return crypto_shash_digest(shash, data, len, out);
-}
-
-static int aspeed_sham_final(struct ahash_request *req)
-{
- struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
- struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
-
- AHASH_DBG(hace_dev, "req->nbytes:%d, rctx->total:%d\n",
- req->nbytes, rctx->total);
- rctx->op = SHA_OP_FINAL;
+ rctx->src_nents = sg_nents_for_len(req->src, req->nbytes);
return aspeed_hace_hash_handle_queue(hace_dev, req);
}
@@ -639,23 +522,12 @@ static int aspeed_sham_finup(struct ahash_request *req)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
- int rc1, rc2;
AHASH_DBG(hace_dev, "req->nbytes: %d\n", req->nbytes);
rctx->flags |= SHA_FLAGS_FINUP;
- rc1 = aspeed_sham_update(req);
- if (rc1 == -EINPROGRESS || rc1 == -EBUSY)
- return rc1;
-
- /*
- * final() has to be always called to cleanup resources
- * even if update() failed, except EINPROGRESS
- */
- rc2 = aspeed_sham_final(req);
-
- return rc1 ? : rc2;
+ return aspeed_sham_update(req);
}
static int aspeed_sham_init(struct ahash_request *req)
@@ -664,7 +536,6 @@ static int aspeed_sham_init(struct ahash_request *req)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
- struct aspeed_sha_hmac_ctx *bctx = tctx->base;
AHASH_DBG(hace_dev, "%s: digest size:%d\n",
crypto_tfm_alg_name(&tfm->base),
@@ -679,7 +550,6 @@ static int aspeed_sham_init(struct ahash_request *req)
rctx->flags |= SHA_FLAGS_SHA1;
rctx->digsize = SHA1_DIGEST_SIZE;
rctx->block_size = SHA1_BLOCK_SIZE;
- rctx->sha_iv = sha1_iv;
rctx->ivsize = 32;
memcpy(rctx->digest, sha1_iv, rctx->ivsize);
break;
@@ -688,7 +558,6 @@ static int aspeed_sham_init(struct ahash_request *req)
rctx->flags |= SHA_FLAGS_SHA224;
rctx->digsize = SHA224_DIGEST_SIZE;
rctx->block_size = SHA224_BLOCK_SIZE;
- rctx->sha_iv = sha224_iv;
rctx->ivsize = 32;
memcpy(rctx->digest, sha224_iv, rctx->ivsize);
break;
@@ -697,7 +566,6 @@ static int aspeed_sham_init(struct ahash_request *req)
rctx->flags |= SHA_FLAGS_SHA256;
rctx->digsize = SHA256_DIGEST_SIZE;
rctx->block_size = SHA256_BLOCK_SIZE;
- rctx->sha_iv = sha256_iv;
rctx->ivsize = 32;
memcpy(rctx->digest, sha256_iv, rctx->ivsize);
break;
@@ -707,7 +575,6 @@ static int aspeed_sham_init(struct ahash_request *req)
rctx->flags |= SHA_FLAGS_SHA384;
rctx->digsize = SHA384_DIGEST_SIZE;
rctx->block_size = SHA384_BLOCK_SIZE;
- rctx->sha_iv = (const __be32 *)sha384_iv;
rctx->ivsize = 64;
memcpy(rctx->digest, sha384_iv, rctx->ivsize);
break;
@@ -717,7 +584,6 @@ static int aspeed_sham_init(struct ahash_request *req)
rctx->flags |= SHA_FLAGS_SHA512;
rctx->digsize = SHA512_DIGEST_SIZE;
rctx->block_size = SHA512_BLOCK_SIZE;
- rctx->sha_iv = (const __be32 *)sha512_iv;
rctx->ivsize = 64;
memcpy(rctx->digest, sha512_iv, rctx->ivsize);
break;
@@ -727,19 +593,10 @@ static int aspeed_sham_init(struct ahash_request *req)
return -EINVAL;
}
- rctx->bufcnt = 0;
rctx->total = 0;
rctx->digcnt[0] = 0;
rctx->digcnt[1] = 0;
- /* HMAC init */
- if (tctx->flags & SHA_FLAGS_HMAC) {
- rctx->digcnt[0] = rctx->block_size;
- rctx->bufcnt = rctx->block_size;
- memcpy(rctx->buffer, bctx->ipad, rctx->block_size);
- rctx->flags |= SHA_FLAGS_HMAC;
- }
-
return 0;
}
@@ -748,102 +605,14 @@ static int aspeed_sham_digest(struct ahash_request *req)
return aspeed_sham_init(req) ? : aspeed_sham_finup(req);
}
-static int aspeed_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
- unsigned int keylen)
+static int aspeed_sham_cra_init(struct crypto_ahash *tfm)
{
+ struct ahash_alg *alg = crypto_ahash_alg(tfm);
struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
- struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
- struct aspeed_sha_hmac_ctx *bctx = tctx->base;
- int ds = crypto_shash_digestsize(bctx->shash);
- int bs = crypto_shash_blocksize(bctx->shash);
- int err = 0;
- int i;
-
- AHASH_DBG(hace_dev, "%s: keylen:%d\n", crypto_tfm_alg_name(&tfm->base),
- keylen);
-
- if (keylen > bs) {
- err = aspeed_sham_shash_digest(bctx->shash,
- crypto_shash_get_flags(bctx->shash),
- key, keylen, bctx->ipad);
- if (err)
- return err;
- keylen = ds;
-
- } else {
- memcpy(bctx->ipad, key, keylen);
- }
-
- memset(bctx->ipad + keylen, 0, bs - keylen);
- memcpy(bctx->opad, bctx->ipad, bs);
-
- for (i = 0; i < bs; i++) {
- bctx->ipad[i] ^= HMAC_IPAD_VALUE;
- bctx->opad[i] ^= HMAC_OPAD_VALUE;
- }
-
- return err;
-}
-
-static int aspeed_sham_cra_init(struct crypto_tfm *tfm)
-{
- struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
- struct aspeed_sham_ctx *tctx = crypto_tfm_ctx(tfm);
struct aspeed_hace_alg *ast_alg;
ast_alg = container_of(alg, struct aspeed_hace_alg, alg.ahash.base);
tctx->hace_dev = ast_alg->hace_dev;
- tctx->flags = 0;
-
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct aspeed_sham_reqctx));
-
- if (ast_alg->alg_base) {
- /* hmac related */
- struct aspeed_sha_hmac_ctx *bctx = tctx->base;
-
- tctx->flags |= SHA_FLAGS_HMAC;
- bctx->shash = crypto_alloc_shash(ast_alg->alg_base, 0,
- CRYPTO_ALG_NEED_FALLBACK);
- if (IS_ERR(bctx->shash)) {
- dev_warn(ast_alg->hace_dev->dev,
- "base driver '%s' could not be loaded.\n",
- ast_alg->alg_base);
- return PTR_ERR(bctx->shash);
- }
- }
-
- return 0;
-}
-
-static void aspeed_sham_cra_exit(struct crypto_tfm *tfm)
-{
- struct aspeed_sham_ctx *tctx = crypto_tfm_ctx(tfm);
- struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
-
- AHASH_DBG(hace_dev, "%s\n", crypto_tfm_alg_name(tfm));
-
- if (tctx->flags & SHA_FLAGS_HMAC) {
- struct aspeed_sha_hmac_ctx *bctx = tctx->base;
-
- crypto_free_shash(bctx->shash);
- }
-}
-
-static int aspeed_sham_export(struct ahash_request *req, void *out)
-{
- struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
-
- memcpy(out, rctx, sizeof(*rctx));
-
- return 0;
-}
-
-static int aspeed_sham_import(struct ahash_request *req, const void *in)
-{
- struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
-
- memcpy(rctx, in, sizeof(*rctx));
return 0;
}
@@ -853,11 +622,11 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = {
.alg.ahash.base = {
.init = aspeed_sham_init,
.update = aspeed_sham_update,
- .final = aspeed_sham_final,
.finup = aspeed_sham_finup,
.digest = aspeed_sham_digest,
.export = aspeed_sham_export,
.import = aspeed_sham_import,
+ .init_tfm = aspeed_sham_cra_init,
.halg = {
.digestsize = SHA1_DIGEST_SIZE,
.statesize = sizeof(struct aspeed_sham_reqctx),
@@ -867,13 +636,13 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = {
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC |
+ CRYPTO_AHASH_ALG_BLOCK_ONLY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aspeed_sham_ctx),
+ .cra_reqsize = sizeof(struct aspeed_sham_reqctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
- .cra_init = aspeed_sham_cra_init,
- .cra_exit = aspeed_sham_cra_exit,
}
}
},
@@ -885,11 +654,11 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = {
.alg.ahash.base = {
.init = aspeed_sham_init,
.update = aspeed_sham_update,
- .final = aspeed_sham_final,
.finup = aspeed_sham_finup,
.digest = aspeed_sham_digest,
.export = aspeed_sham_export,
.import = aspeed_sham_import,
+ .init_tfm = aspeed_sham_cra_init,
.halg = {
.digestsize = SHA256_DIGEST_SIZE,
.statesize = sizeof(struct aspeed_sham_reqctx),
@@ -899,13 +668,13 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = {
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC |
+ CRYPTO_AHASH_ALG_BLOCK_ONLY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aspeed_sham_ctx),
+ .cra_reqsize = sizeof(struct aspeed_sham_reqctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
- .cra_init = aspeed_sham_cra_init,
- .cra_exit = aspeed_sham_cra_exit,
}
}
},
@@ -917,11 +686,11 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = {
.alg.ahash.base = {
.init = aspeed_sham_init,
.update = aspeed_sham_update,
- .final = aspeed_sham_final,
.finup = aspeed_sham_finup,
.digest = aspeed_sham_digest,
.export = aspeed_sham_export,
.import = aspeed_sham_import,
+ .init_tfm = aspeed_sham_cra_init,
.halg = {
.digestsize = SHA224_DIGEST_SIZE,
.statesize = sizeof(struct aspeed_sham_reqctx),
@@ -931,118 +700,13 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = {
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC |
+ CRYPTO_AHASH_ALG_BLOCK_ONLY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aspeed_sham_ctx),
+ .cra_reqsize = sizeof(struct aspeed_sham_reqctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
- .cra_init = aspeed_sham_cra_init,
- .cra_exit = aspeed_sham_cra_exit,
- }
- }
- },
- .alg.ahash.op = {
- .do_one_request = aspeed_ahash_do_one,
- },
- },
- {
- .alg_base = "sha1",
- .alg.ahash.base = {
- .init = aspeed_sham_init,
- .update = aspeed_sham_update,
- .final = aspeed_sham_final,
- .finup = aspeed_sham_finup,
- .digest = aspeed_sham_digest,
- .setkey = aspeed_sham_setkey,
- .export = aspeed_sham_export,
- .import = aspeed_sham_import,
- .halg = {
- .digestsize = SHA1_DIGEST_SIZE,
- .statesize = sizeof(struct aspeed_sham_reqctx),
- .base = {
- .cra_name = "hmac(sha1)",
- .cra_driver_name = "aspeed-hmac-sha1",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct aspeed_sham_ctx) +
- sizeof(struct aspeed_sha_hmac_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = aspeed_sham_cra_init,
- .cra_exit = aspeed_sham_cra_exit,
- }
- }
- },
- .alg.ahash.op = {
- .do_one_request = aspeed_ahash_do_one,
- },
- },
- {
- .alg_base = "sha224",
- .alg.ahash.base = {
- .init = aspeed_sham_init,
- .update = aspeed_sham_update,
- .final = aspeed_sham_final,
- .finup = aspeed_sham_finup,
- .digest = aspeed_sham_digest,
- .setkey = aspeed_sham_setkey,
- .export = aspeed_sham_export,
- .import = aspeed_sham_import,
- .halg = {
- .digestsize = SHA224_DIGEST_SIZE,
- .statesize = sizeof(struct aspeed_sham_reqctx),
- .base = {
- .cra_name = "hmac(sha224)",
- .cra_driver_name = "aspeed-hmac-sha224",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = SHA224_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct aspeed_sham_ctx) +
- sizeof(struct aspeed_sha_hmac_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = aspeed_sham_cra_init,
- .cra_exit = aspeed_sham_cra_exit,
- }
- }
- },
- .alg.ahash.op = {
- .do_one_request = aspeed_ahash_do_one,
- },
- },
- {
- .alg_base = "sha256",
- .alg.ahash.base = {
- .init = aspeed_sham_init,
- .update = aspeed_sham_update,
- .final = aspeed_sham_final,
- .finup = aspeed_sham_finup,
- .digest = aspeed_sham_digest,
- .setkey = aspeed_sham_setkey,
- .export = aspeed_sham_export,
- .import = aspeed_sham_import,
- .halg = {
- .digestsize = SHA256_DIGEST_SIZE,
- .statesize = sizeof(struct aspeed_sham_reqctx),
- .base = {
- .cra_name = "hmac(sha256)",
- .cra_driver_name = "aspeed-hmac-sha256",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct aspeed_sham_ctx) +
- sizeof(struct aspeed_sha_hmac_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = aspeed_sham_cra_init,
- .cra_exit = aspeed_sham_cra_exit,
}
}
},
@@ -1057,11 +721,11 @@ static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = {
.alg.ahash.base = {
.init = aspeed_sham_init,
.update = aspeed_sham_update,
- .final = aspeed_sham_final,
.finup = aspeed_sham_finup,
.digest = aspeed_sham_digest,
.export = aspeed_sham_export,
.import = aspeed_sham_import,
+ .init_tfm = aspeed_sham_cra_init,
.halg = {
.digestsize = SHA384_DIGEST_SIZE,
.statesize = sizeof(struct aspeed_sham_reqctx),
@@ -1071,13 +735,13 @@ static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = {
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC |
+ CRYPTO_AHASH_ALG_BLOCK_ONLY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aspeed_sham_ctx),
+ .cra_reqsize = sizeof(struct aspeed_sham_reqctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
- .cra_init = aspeed_sham_cra_init,
- .cra_exit = aspeed_sham_cra_exit,
}
}
},
@@ -1089,11 +753,11 @@ static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = {
.alg.ahash.base = {
.init = aspeed_sham_init,
.update = aspeed_sham_update,
- .final = aspeed_sham_final,
.finup = aspeed_sham_finup,
.digest = aspeed_sham_digest,
.export = aspeed_sham_export,
.import = aspeed_sham_import,
+ .init_tfm = aspeed_sham_cra_init,
.halg = {
.digestsize = SHA512_DIGEST_SIZE,
.statesize = sizeof(struct aspeed_sham_reqctx),
@@ -1103,83 +767,13 @@ static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = {
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC |
+ CRYPTO_AHASH_ALG_BLOCK_ONLY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aspeed_sham_ctx),
+ .cra_reqsize = sizeof(struct aspeed_sham_reqctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
- .cra_init = aspeed_sham_cra_init,
- .cra_exit = aspeed_sham_cra_exit,
- }
- }
- },
- .alg.ahash.op = {
- .do_one_request = aspeed_ahash_do_one,
- },
- },
- {
- .alg_base = "sha384",
- .alg.ahash.base = {
- .init = aspeed_sham_init,
- .update = aspeed_sham_update,
- .final = aspeed_sham_final,
- .finup = aspeed_sham_finup,
- .digest = aspeed_sham_digest,
- .setkey = aspeed_sham_setkey,
- .export = aspeed_sham_export,
- .import = aspeed_sham_import,
- .halg = {
- .digestsize = SHA384_DIGEST_SIZE,
- .statesize = sizeof(struct aspeed_sham_reqctx),
- .base = {
- .cra_name = "hmac(sha384)",
- .cra_driver_name = "aspeed-hmac-sha384",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = SHA384_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct aspeed_sham_ctx) +
- sizeof(struct aspeed_sha_hmac_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = aspeed_sham_cra_init,
- .cra_exit = aspeed_sham_cra_exit,
- }
- }
- },
- .alg.ahash.op = {
- .do_one_request = aspeed_ahash_do_one,
- },
- },
- {
- .alg_base = "sha512",
- .alg.ahash.base = {
- .init = aspeed_sham_init,
- .update = aspeed_sham_update,
- .final = aspeed_sham_final,
- .finup = aspeed_sham_finup,
- .digest = aspeed_sham_digest,
- .setkey = aspeed_sham_setkey,
- .export = aspeed_sham_export,
- .import = aspeed_sham_import,
- .halg = {
- .digestsize = SHA512_DIGEST_SIZE,
- .statesize = sizeof(struct aspeed_sham_reqctx),
- .base = {
- .cra_name = "hmac(sha512)",
- .cra_driver_name = "aspeed-hmac-sha512",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = SHA512_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct aspeed_sham_ctx) +
- sizeof(struct aspeed_sha_hmac_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = aspeed_sham_cra_init,
- .cra_exit = aspeed_sham_cra_exit,
}
}
},
diff --git a/drivers/crypto/aspeed/aspeed-hace.c b/drivers/crypto/aspeed/aspeed-hace.c
index 062f2a66dd23..3fe644bfe037 100644
--- a/drivers/crypto/aspeed/aspeed-hace.c
+++ b/drivers/crypto/aspeed/aspeed-hace.c
@@ -266,7 +266,7 @@ MODULE_DEVICE_TABLE(of, aspeed_hace_of_matches);
static struct platform_driver aspeed_hace_driver = {
.probe = aspeed_hace_probe,
- .remove_new = aspeed_hace_remove,
+ .remove = aspeed_hace_remove,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = aspeed_hace_of_matches,
diff --git a/drivers/crypto/aspeed/aspeed-hace.h b/drivers/crypto/aspeed/aspeed-hace.h
index 68f70e01fccb..b1d07730d543 100644
--- a/drivers/crypto/aspeed/aspeed-hace.h
+++ b/drivers/crypto/aspeed/aspeed-hace.h
@@ -119,7 +119,6 @@
#define SHA_FLAGS_SHA512 BIT(4)
#define SHA_FLAGS_SHA512_224 BIT(5)
#define SHA_FLAGS_SHA512_256 BIT(6)
-#define SHA_FLAGS_HMAC BIT(8)
#define SHA_FLAGS_FINUP BIT(9)
#define SHA_FLAGS_MASK (0xff)
@@ -161,22 +160,18 @@ struct aspeed_engine_hash {
aspeed_hace_fn_t dma_prepare;
};
-struct aspeed_sha_hmac_ctx {
- struct crypto_shash *shash;
- u8 ipad[SHA512_BLOCK_SIZE];
- u8 opad[SHA512_BLOCK_SIZE];
-};
-
struct aspeed_sham_ctx {
struct aspeed_hace_dev *hace_dev;
- unsigned long flags; /* hmac flag */
-
- struct aspeed_sha_hmac_ctx base[];
};
struct aspeed_sham_reqctx {
+ /* DMA buffer written by hardware */
+ u8 digest[SHA512_DIGEST_SIZE] __aligned(64);
+
+ /* Software state sorted by size. */
+ u64 digcnt[2];
+
unsigned long flags; /* final update flag should no use*/
- unsigned long op; /* final or update */
u32 cmd; /* trigger cmd */
/* walk state */
@@ -188,17 +183,12 @@ struct aspeed_sham_reqctx {
size_t digsize;
size_t block_size;
size_t ivsize;
- const __be32 *sha_iv;
- /* remain data buffer */
- u8 buffer[SHA512_BLOCK_SIZE * 2];
dma_addr_t buffer_dma_addr;
- size_t bufcnt; /* buffer counter */
-
- /* output buffer */
- u8 digest[SHA512_DIGEST_SIZE] __aligned(64);
dma_addr_t digest_dma_addr;
- u64 digcnt[2];
+
+ /* This is DMA too but read-only for hardware. */
+ u8 buffer[SHA512_BLOCK_SIZE + 16];
};
struct aspeed_engine_crypto {
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index 0dd90785db9a..3a2684208dda 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -1743,7 +1743,8 @@ static struct skcipher_alg aes_xts_alg = {
.base.cra_driver_name = "atmel-xts-aes",
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct atmel_aes_xts_ctx),
- .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
.min_keysize = 2 * AES_MIN_KEY_SIZE,
.max_keysize = 2 * AES_MAX_KEY_SIZE,
@@ -2220,7 +2221,7 @@ static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
static void atmel_aes_crypto_alg_init(struct crypto_alg *alg)
{
- alg->cra_flags |= CRYPTO_ALG_ASYNC;
+ alg->cra_flags |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
alg->cra_alignmask = 0xf;
alg->cra_priority = ATMEL_AES_PRIORITY;
alg->cra_module = THIS_MODULE;
@@ -2296,6 +2297,7 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
/* keep only major version number */
switch (dd->hw_version & 0xff0) {
+ case 0x800:
case 0x700:
case 0x600:
case 0x500:
@@ -2453,7 +2455,7 @@ static void atmel_aes_remove(struct platform_device *pdev)
static struct platform_driver atmel_aes_driver = {
.probe = atmel_aes_probe,
- .remove_new = atmel_aes_remove,
+ .remove = atmel_aes_remove,
.driver = {
.name = "atmel_aes",
.of_match_table = atmel_aes_dt_ids,
diff --git a/drivers/crypto/atmel-ecc.c b/drivers/crypto/atmel-ecc.c
index 590ea984c622..0d48e64d28b1 100644
--- a/drivers/crypto/atmel-ecc.c
+++ b/drivers/crypto/atmel-ecc.c
@@ -379,7 +379,7 @@ MODULE_DEVICE_TABLE(of, atmel_ecc_dt_ids);
#endif
static const struct i2c_device_id atmel_ecc_id[] = {
- { "atecc508a", 0 },
+ { "atecc508a" },
{ }
};
MODULE_DEVICE_TABLE(i2c, atmel_ecc_id);
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 8cc57df25778..3d7573c7bd1c 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -1254,7 +1254,8 @@ static int atmel_sha_cra_init(struct crypto_tfm *tfm)
static void atmel_sha_alg_init(struct ahash_alg *alg)
{
alg->halg.base.cra_priority = ATMEL_SHA_PRIORITY;
- alg->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
+ alg->halg.base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY;
alg->halg.base.cra_ctxsize = sizeof(struct atmel_sha_ctx);
alg->halg.base.cra_module = THIS_MODULE;
alg->halg.base.cra_init = atmel_sha_cra_init;
@@ -2041,7 +2042,8 @@ static void atmel_sha_hmac_cra_exit(struct crypto_tfm *tfm)
static void atmel_sha_hmac_alg_init(struct ahash_alg *alg)
{
alg->halg.base.cra_priority = ATMEL_SHA_PRIORITY;
- alg->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
+ alg->halg.base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY;
alg->halg.base.cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx);
alg->halg.base.cra_module = THIS_MODULE;
alg->halg.base.cra_init = atmel_sha_hmac_cra_init;
@@ -2532,6 +2534,7 @@ static void atmel_sha_get_cap(struct atmel_sha_dev *dd)
/* keep only major version number */
switch (dd->hw_version & 0xff0) {
+ case 0x800:
case 0x700:
case 0x600:
case 0x510:
@@ -2691,7 +2694,7 @@ static void atmel_sha_remove(struct platform_device *pdev)
static struct platform_driver atmel_sha_driver = {
.probe = atmel_sha_probe,
- .remove_new = atmel_sha_remove,
+ .remove = atmel_sha_remove,
.driver = {
.name = "atmel_sha",
.of_match_table = atmel_sha_dt_ids,
diff --git a/drivers/crypto/atmel-sha204a.c b/drivers/crypto/atmel-sha204a.c
index a02d496f4c41..0fcf4a39de27 100644
--- a/drivers/crypto/atmel-sha204a.c
+++ b/drivers/crypto/atmel-sha204a.c
@@ -163,6 +163,12 @@ static int atmel_sha204a_probe(struct i2c_client *client)
i2c_priv->hwrng.name = dev_name(&client->dev);
i2c_priv->hwrng.read = atmel_sha204a_rng_read;
+ /*
+ * According to review by Bill Cox [1], this HWRNG has very low entropy.
+ * [1] https://www.metzdowd.com/pipermail/cryptography/2014-December/023858.html
+ */
+ i2c_priv->hwrng.quality = 1;
+
ret = devm_hwrng_register(&client->dev, &i2c_priv->hwrng);
if (ret)
dev_warn(&client->dev, "failed to register RNG (%d)\n", ret);
@@ -202,8 +208,8 @@ static const struct of_device_id atmel_sha204a_dt_ids[] __maybe_unused = {
MODULE_DEVICE_TABLE(of, atmel_sha204a_dt_ids);
static const struct i2c_device_id atmel_sha204a_id[] = {
- { "atsha204", 0 },
- { "atsha204a", 0 },
+ { "atsha204" },
+ { "atsha204a" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(i2c, atmel_sha204a_id);
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index dcc2380a5889..3b2a92029b16 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -512,7 +512,7 @@ static int atmel_tdes_crypt_start(struct atmel_tdes_dev *dd)
if (err && (dd->flags & TDES_FLAGS_FAST)) {
dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
- dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE);
+ dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
}
return err;
@@ -785,7 +785,7 @@ static int atmel_tdes_init_tfm(struct crypto_skcipher *tfm)
static void atmel_tdes_skcipher_alg_init(struct skcipher_alg *alg)
{
alg->base.cra_priority = ATMEL_TDES_PRIORITY;
- alg->base.cra_flags = CRYPTO_ALG_ASYNC;
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
alg->base.cra_ctxsize = sizeof(struct atmel_tdes_ctx);
alg->base.cra_module = THIS_MODULE;
@@ -872,7 +872,7 @@ static void atmel_tdes_done_task(unsigned long data)
if (!err)
err = atmel_tdes_crypt_start(dd);
if (!err)
- return; /* DMA started. Not fininishing. */
+ return; /* DMA started. Not finishing. */
}
atmel_tdes_finish_req(dd, err);
@@ -1074,7 +1074,7 @@ static void atmel_tdes_remove(struct platform_device *pdev)
static struct platform_driver atmel_tdes_driver = {
.probe = atmel_tdes_probe,
- .remove_new = atmel_tdes_remove,
+ .remove = atmel_tdes_remove,
.driver = {
.name = "atmel_tdes",
.of_match_table = atmel_tdes_dt_ids,
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index 75440ea6206e..75ee065da1ec 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -2067,12 +2067,12 @@ static void artpec6_crypto_process_queue(struct artpec6_crypto *ac,
if (ac->pending_count)
mod_timer(&ac->timer, jiffies + msecs_to_jiffies(100));
else
- del_timer(&ac->timer);
+ timer_delete(&ac->timer);
}
static void artpec6_crypto_timeout(struct timer_list *t)
{
- struct artpec6_crypto *ac = from_timer(ac, t, timer);
+ struct artpec6_crypto *ac = timer_container_of(ac, t, timer);
dev_info_ratelimited(artpec6_crypto_dev, "timeout\n");
@@ -2897,13 +2897,13 @@ static int artpec6_crypto_probe(struct platform_device *pdev)
tasklet_init(&ac->task, artpec6_crypto_task,
(unsigned long)ac);
- ac->pad_buffer = devm_kzalloc(&pdev->dev, 2 * ARTPEC_CACHE_LINE_MAX,
+ ac->pad_buffer = devm_kcalloc(&pdev->dev, 2, ARTPEC_CACHE_LINE_MAX,
GFP_KERNEL);
if (!ac->pad_buffer)
return -ENOMEM;
ac->pad_buffer = PTR_ALIGN(ac->pad_buffer, ARTPEC_CACHE_LINE_MAX);
- ac->zero_buffer = devm_kzalloc(&pdev->dev, 2 * ARTPEC_CACHE_LINE_MAX,
+ ac->zero_buffer = devm_kcalloc(&pdev->dev, 2, ARTPEC_CACHE_LINE_MAX,
GFP_KERNEL);
if (!ac->zero_buffer)
return -ENOMEM;
@@ -2963,7 +2963,7 @@ static void artpec6_crypto_remove(struct platform_device *pdev)
tasklet_disable(&ac->task);
devm_free_irq(&pdev->dev, irq, ac);
tasklet_kill(&ac->task);
- del_timer_sync(&ac->timer);
+ timer_delete_sync(&ac->timer);
artpec6_crypto_disable_hw(ac);
@@ -2975,7 +2975,7 @@ static void artpec6_crypto_remove(struct platform_device *pdev)
static struct platform_driver artpec6_crypto_driver = {
.probe = artpec6_crypto_probe,
- .remove_new = artpec6_crypto_remove,
+ .remove = artpec6_crypto_remove,
.driver = {
.name = "artpec6-crypto",
.of_match_table = artpec6_crypto_of_match,
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index 1a3ecd44cbaf..6b80d033648e 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -15,6 +15,7 @@
#include <linux/kthread.h>
#include <linux/rtnetlink.h>
#include <linux/sched.h>
+#include <linux/string_choices.h>
#include <linux/of.h>
#include <linux/io.h>
#include <linux/bitops.h>
@@ -140,8 +141,8 @@ spu_skcipher_rx_sg_create(struct brcm_message *mssg,
struct iproc_ctx_s *ctx = rctx->ctx;
u32 datalen; /* Number of bytes of response data expected */
- mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist),
- rctx->gfp);
+ mssg->spu.dst = kmalloc_array(rx_frag_num, sizeof(struct scatterlist),
+ rctx->gfp);
if (!mssg->spu.dst)
return -ENOMEM;
@@ -204,8 +205,8 @@ spu_skcipher_tx_sg_create(struct brcm_message *mssg,
u32 datalen; /* Number of bytes of response data expected */
u32 stat_len;
- mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist),
- rctx->gfp);
+ mssg->spu.src = kmalloc_array(tx_frag_num, sizeof(struct scatterlist),
+ rctx->gfp);
if (unlikely(!mssg->spu.src))
return -ENOMEM;
@@ -531,8 +532,8 @@ spu_ahash_rx_sg_create(struct brcm_message *mssg,
struct scatterlist *sg; /* used to build sgs in mbox message */
struct iproc_ctx_s *ctx = rctx->ctx;
- mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist),
- rctx->gfp);
+ mssg->spu.dst = kmalloc_array(rx_frag_num, sizeof(struct scatterlist),
+ rctx->gfp);
if (!mssg->spu.dst)
return -ENOMEM;
@@ -586,8 +587,8 @@ spu_ahash_tx_sg_create(struct brcm_message *mssg,
u32 datalen; /* Number of bytes of response data expected */
u32 stat_len;
- mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist),
- rctx->gfp);
+ mssg->spu.src = kmalloc_array(tx_frag_num, sizeof(struct scatterlist),
+ rctx->gfp);
if (!mssg->spu.src)
return -ENOMEM;
@@ -1076,8 +1077,8 @@ static int spu_aead_rx_sg_create(struct brcm_message *mssg,
/* have to catch gcm pad in separate buffer */
rx_frag_num++;
- mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist),
- rctx->gfp);
+ mssg->spu.dst = kmalloc_array(rx_frag_num, sizeof(struct scatterlist),
+ rctx->gfp);
if (!mssg->spu.dst)
return -ENOMEM;
@@ -1178,8 +1179,8 @@ static int spu_aead_tx_sg_create(struct brcm_message *mssg,
u32 assoc_offset = 0;
u32 stat_len;
- mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist),
- rctx->gfp);
+ mssg->spu.src = kmalloc_array(tx_frag_num, sizeof(struct scatterlist),
+ rctx->gfp);
if (!mssg->spu.src)
return -ENOMEM;
@@ -2415,6 +2416,7 @@ static int ahash_hmac_setkey(struct crypto_ahash *ahash, const u8 *key,
static int ahash_hmac_init(struct ahash_request *req)
{
+ int ret;
struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
@@ -2424,7 +2426,9 @@ static int ahash_hmac_init(struct ahash_request *req)
flow_log("ahash_hmac_init()\n");
/* init the context as a hash */
- ahash_init(req);
+ ret = ahash_init(req);
+ if (ret)
+ return ret;
if (!spu_no_incr_hash(ctx)) {
/* SPU-M can do incr hashing but needs sw for outer HMAC */
@@ -2684,7 +2688,7 @@ static int aead_enqueue(struct aead_request *req, bool is_encrypt)
flow_log(" iv_ctr_len:%u\n", rctx->iv_ctr_len);
flow_dump(" iv: ", req->iv, rctx->iv_ctr_len);
flow_log(" authkeylen:%u\n", ctx->authkeylen);
- flow_log(" is_esp: %s\n", ctx->is_esp ? "yes" : "no");
+ flow_log(" is_esp: %s\n", str_yes_no(ctx->is_esp));
if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
flow_log(" max_payload infinite");
@@ -4704,7 +4708,7 @@ static struct platform_driver bcm_spu_pdriver = {
.of_match_table = of_match_ptr(bcm_spu_dt_ids),
},
.probe = bcm_spu_probe,
- .remove_new = bcm_spu_remove,
+ .remove = bcm_spu_remove,
};
module_platform_driver(bcm_spu_pdriver);
diff --git a/drivers/crypto/bcm/spu.c b/drivers/crypto/bcm/spu.c
index 6283e8c6d51d..86c227caa722 100644
--- a/drivers/crypto/bcm/spu.c
+++ b/drivers/crypto/bcm/spu.c
@@ -836,7 +836,6 @@ u16 spum_cipher_req_init(u8 *spu_hdr, struct spu_cipher_parms *cipher_parms)
u32 cipher_bits = 0;
u32 ecf_bits = 0;
u8 sctx_words = 0;
- u8 *ptr = spu_hdr;
flow_log("%s()\n", __func__);
flow_log(" cipher alg:%u mode:%u type %u\n", cipher_parms->alg,
@@ -847,7 +846,6 @@ u16 spum_cipher_req_init(u8 *spu_hdr, struct spu_cipher_parms *cipher_parms)
/* starting out: zero the header (plus some) */
memset(spu_hdr, 0, sizeof(struct SPUHEADER));
- ptr += sizeof(struct SPUHEADER);
/* format master header word */
/* Do not set the next bit even though the datasheet says to */
@@ -861,10 +859,8 @@ u16 spum_cipher_req_init(u8 *spu_hdr, struct spu_cipher_parms *cipher_parms)
/* copy the encryption keys in the SAD entry */
if (cipher_parms->alg) {
- if (cipher_parms->key_len) {
- ptr += cipher_parms->key_len;
+ if (cipher_parms->key_len)
sctx_words += cipher_parms->key_len / 4;
- }
/*
* if encrypting then set IV size, use SCTX IV unless no IV
@@ -873,7 +869,6 @@ u16 spum_cipher_req_init(u8 *spu_hdr, struct spu_cipher_parms *cipher_parms)
if (cipher_parms->iv_len) {
/* Use SCTX IV */
ecf_bits |= SCTX_IV;
- ptr += cipher_parms->iv_len;
sctx_words += cipher_parms->iv_len / 4;
}
}
diff --git a/drivers/crypto/bcm/spu2.c b/drivers/crypto/bcm/spu2.c
index 3fdc64b5a65e..ce322cf1baa5 100644
--- a/drivers/crypto/bcm/spu2.c
+++ b/drivers/crypto/bcm/spu2.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/string.h>
+#include <linux/string_choices.h>
#include "util.h"
#include "spu.h"
@@ -999,7 +1000,7 @@ u32 spu2_create_request(u8 *spu_hdr,
req_opts->is_inbound, req_opts->auth_first);
flow_log(" cipher alg:%u mode:%u type %u\n", cipher_parms->alg,
cipher_parms->mode, cipher_parms->type);
- flow_log(" is_esp: %s\n", req_opts->is_esp ? "yes" : "no");
+ flow_log(" is_esp: %s\n", str_yes_no(req_opts->is_esp));
flow_log(" key: %d\n", cipher_parms->key_len);
flow_dump(" key: ", cipher_parms->key_buf, cipher_parms->key_len);
flow_log(" iv: %d\n", cipher_parms->iv_len);
diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile
index acf1b197eb84..d2eaf5205b1c 100644
--- a/drivers/crypto/caam/Makefile
+++ b/drivers/crypto/caam/Makefile
@@ -25,10 +25,6 @@ caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caampkc.o pkc_desc.o
caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_BLOB_GEN) += blob_gen.o
caam-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += qi.o
-ifneq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI),)
- ccflags-y += -DCONFIG_CAAM_QI
-endif
-
caam-$(CONFIG_DEBUG_FS) += debugfs.o
obj-$(CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM) += dpaa2_caam.o
diff --git a/drivers/crypto/caam/blob_gen.c b/drivers/crypto/caam/blob_gen.c
index 87781c1534ee..079a22cc9f02 100644
--- a/drivers/crypto/caam/blob_gen.c
+++ b/drivers/crypto/caam/blob_gen.c
@@ -2,6 +2,7 @@
/*
* Copyright (C) 2015 Pengutronix, Steffen Trumtrar <kernel@pengutronix.de>
* Copyright (C) 2021 Pengutronix, Ahmad Fatoum <kernel@pengutronix.de>
+ * Copyright 2024 NXP
*/
#define pr_fmt(fmt) "caam blob_gen: " fmt
@@ -104,7 +105,7 @@ int caam_process_blob(struct caam_blob_priv *priv,
}
ctrlpriv = dev_get_drvdata(jrdev->parent);
- moo = FIELD_GET(CSTA_MOO, rd_reg32(&ctrlpriv->ctrl->perfmon.status));
+ moo = FIELD_GET(CSTA_MOO, rd_reg32(&ctrlpriv->jr[0]->perfmon.status));
if (moo != CSTA_MOO_SECURE && moo != CSTA_MOO_TRUSTED)
dev_warn(jrdev,
"using insecure test key, enable HAB to use unique device key!\n");
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
index e809d030ab11..107ccb2ade42 100644
--- a/drivers/crypto/caam/caamalg_qi2.c
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -19,6 +19,7 @@
#include <linux/dma-mapping.h>
#include <linux/fsl/mc.h>
#include <linux/kernel.h>
+#include <linux/string_choices.h>
#include <soc/fsl/dpaa2-io.h>
#include <soc/fsl/dpaa2-fd.h>
#include <crypto/xts.h>
@@ -5175,7 +5176,7 @@ static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv)
return err;
}
- dev_dbg(dev, "disable: %s\n", enabled ? "false" : "true");
+ dev_dbg(dev, "disable: %s\n", str_false_true(enabled));
for (i = 0; i < priv->num_pairs; i++) {
ppriv = per_cpu_ptr(priv->ppriv, i);
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index 887a5f2fb927..cb001aa1de66 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -984,7 +984,7 @@ err:
return -ENOMEM;
}
-static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
+static int caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
struct rsa_key *raw_key)
{
struct caam_rsa_key *rsa_key = &ctx->key;
@@ -994,7 +994,7 @@ static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
rsa_key->p = caam_read_raw_data(raw_key->p, &p_sz);
if (!rsa_key->p)
- return;
+ return -ENOMEM;
rsa_key->p_sz = p_sz;
rsa_key->q = caam_read_raw_data(raw_key->q, &q_sz);
@@ -1029,7 +1029,7 @@ static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
rsa_key->priv_form = FORM3;
- return;
+ return 0;
free_dq:
kfree_sensitive(rsa_key->dq);
@@ -1043,6 +1043,7 @@ free_q:
kfree_sensitive(rsa_key->q);
free_p:
kfree_sensitive(rsa_key->p);
+ return -ENOMEM;
}
static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
@@ -1088,7 +1089,9 @@ static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
rsa_key->e_sz = raw_key.e_sz;
rsa_key->n_sz = raw_key.n_sz;
- caam_rsa_set_priv_key_form(ctx, &raw_key);
+ ret = caam_rsa_set_priv_key_form(ctx, &raw_key);
+ if (ret)
+ goto err;
return 0;
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index d4b39184dbdb..320be5d77737 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -24,7 +24,7 @@
bool caam_dpaa2;
EXPORT_SYMBOL(caam_dpaa2);
-#ifdef CONFIG_CAAM_QI
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
#include "qi.h"
#endif
@@ -573,6 +573,7 @@ static const struct soc_device_attribute caam_imx_soc_table[] = {
{ .soc_id = "i.MX7*", .data = &caam_imx7_data },
{ .soc_id = "i.MX8M*", .data = &caam_imx7_data },
{ .soc_id = "i.MX8ULP", .data = &caam_imx8ulp_data },
+ { .soc_id = "i.MX8Q*", .data = &caam_imx8ulp_data },
{ .soc_id = "VF*", .data = &caam_vf610_data },
{ .family = "Freescale i.MX" },
{ /* sentinel */ }
@@ -591,9 +592,9 @@ static int init_clocks(struct device *dev, const struct caam_imx_data *data)
int ret;
ctrlpriv->num_clks = data->num_clks;
- ctrlpriv->clks = devm_kmemdup(dev, data->clks,
- data->num_clks * sizeof(data->clks[0]),
- GFP_KERNEL);
+ ctrlpriv->clks = devm_kmemdup_array(dev, data->clks,
+ data->num_clks, sizeof(*data->clks),
+ GFP_KERNEL);
if (!ctrlpriv->clks)
return -ENOMEM;
@@ -702,12 +703,12 @@ static int caam_ctrl_rng_init(struct device *dev)
*/
if (needs_entropy_delay_adjustment())
ent_delay = 12000;
- if (!(ctrlpriv->rng4_sh_init || inst_handles)) {
+ if (!inst_handles) {
dev_info(dev,
"Entropy delay = %u\n",
ent_delay);
kick_trng(dev, ent_delay);
- ent_delay += 400;
+ ent_delay = ent_delay * 2;
}
/*
* if instantiate_rng(...) fails, the loop will rerun
@@ -830,7 +831,7 @@ static int caam_ctrl_suspend(struct device *dev)
{
const struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev);
- if (ctrlpriv->caam_off_during_pm && !ctrlpriv->optee_en)
+ if (ctrlpriv->caam_off_during_pm && !ctrlpriv->no_page0)
caam_state_save(dev);
return 0;
@@ -841,7 +842,7 @@ static int caam_ctrl_resume(struct device *dev)
struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev);
int ret = 0;
- if (ctrlpriv->caam_off_during_pm && !ctrlpriv->optee_en) {
+ if (ctrlpriv->caam_off_during_pm && !ctrlpriv->no_page0) {
caam_state_restore(dev);
/* HW and rng will be reset so deinstantiation can be removed */
@@ -907,6 +908,7 @@ static int caam_probe(struct platform_device *pdev)
imx_soc_data = imx_soc_match->data;
reg_access = reg_access && imx_soc_data->page0_access;
+ ctrlpriv->no_page0 = !reg_access;
/*
* CAAM clocks cannot be controlled from kernel.
*/
@@ -966,7 +968,7 @@ iomap_ctrl:
caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2);
ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK);
-#ifdef CONFIG_CAAM_QI
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
/* If (DPAA 1.x) QI present, check whether dependencies are available */
if (ctrlpriv->qi_present && !caam_dpaa2) {
ret = qman_is_probed();
@@ -1097,7 +1099,7 @@ set_dma_mask:
wr_reg32(&ctrlpriv->qi->qi_control_lo, QICTL_DQEN);
/* If QMAN driver is present, init CAAM-QI backend */
-#ifdef CONFIG_CAAM_QI
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
ret = caam_qi_init(pdev);
if (ret)
dev_err(dev, "caam qi i/f init failed: %d\n", ret);
diff --git a/drivers/crypto/caam/debugfs.c b/drivers/crypto/caam/debugfs.c
index 6358d3cabf57..718352b7afb5 100644
--- a/drivers/crypto/caam/debugfs.c
+++ b/drivers/crypto/caam/debugfs.c
@@ -22,7 +22,7 @@ static int caam_debugfs_u32_get(void *data, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n");
DEFINE_DEBUGFS_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n");
-#ifdef CONFIG_CAAM_QI
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
/*
* This is a counter for the number of times the congestion group (where all
* the request and response queueus are) reached congestion. Incremented
diff --git a/drivers/crypto/caam/debugfs.h b/drivers/crypto/caam/debugfs.h
index 8b5d1acd21a7..ef238c71f92a 100644
--- a/drivers/crypto/caam/debugfs.h
+++ b/drivers/crypto/caam/debugfs.h
@@ -18,7 +18,7 @@ static inline void caam_debugfs_init(struct caam_drv_private *ctrlpriv,
{}
#endif
-#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_CAAM_QI)
+#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI)
void caam_debugfs_qi_congested(void);
void caam_debugfs_qi_init(struct caam_drv_private *ctrlpriv);
#else
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index e51320150872..a88da0d31b23 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -115,6 +115,7 @@ struct caam_drv_private {
u8 blob_present; /* Nonzero if BLOB support present in device */
u8 mc_en; /* Nonzero if MC f/w is active */
u8 optee_en; /* Nonzero if OP-TEE f/w is active */
+ u8 no_page0; /* Nonzero if register page 0 is not controlled by Linux */
bool pr_support; /* RNG prediction resistance available */
int secvio_irq; /* Security violation interrupt number */
int virt_en; /* Virtualization enabled in CAAM */
@@ -226,7 +227,7 @@ static inline int caam_prng_register(struct device *dev)
static inline void caam_prng_unregister(void *data) {}
#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_PRNG_API */
-#ifdef CONFIG_CAAM_QI
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
int caam_qi_algapi_init(struct device *dev);
void caam_qi_algapi_exit(void);
@@ -242,7 +243,7 @@ static inline void caam_qi_algapi_exit(void)
{
}
-#endif /* CONFIG_CAAM_QI */
+#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI */
static inline u64 caam_get_dma_mask(struct device *dev)
{
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index 26eba7de3fb0..0ef00df9730e 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -629,8 +629,7 @@ static int caam_jr_probe(struct platform_device *pdev)
}
/* Initialize crypto engine */
- jrpriv->engine = crypto_engine_alloc_init_and_set(jrdev, true, NULL,
- false,
+ jrpriv->engine = crypto_engine_alloc_init_and_set(jrdev, true, false,
CRYPTO_ENGINE_MAX_QLEN);
if (!jrpriv->engine) {
dev_err(jrdev, "Could not init crypto-engine\n");
@@ -819,7 +818,7 @@ static struct platform_driver caam_jr_driver = {
.pm = pm_ptr(&caam_jr_pm_ops),
},
.probe = caam_jr_probe,
- .remove_new = caam_jr_remove,
+ .remove = caam_jr_remove,
.shutdown = caam_jr_remove,
};
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
index f6111ee9ed34..1e731ed8702b 100644
--- a/drivers/crypto/caam/qi.c
+++ b/drivers/crypto/caam/qi.c
@@ -122,12 +122,12 @@ int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req)
qm_fd_addr_set64(&fd, addr);
do {
+ refcount_inc(&req->drv_ctx->refcnt);
ret = qman_enqueue(req->drv_ctx->req_fq, &fd);
- if (likely(!ret)) {
- refcount_inc(&req->drv_ctx->refcnt);
+ if (likely(!ret))
return 0;
- }
+ refcount_dec(&req->drv_ctx->refcnt);
if (ret != -EBUSY)
break;
num_retries++;
@@ -442,11 +442,8 @@ struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev,
if (!cpumask_test_cpu(*cpu, cpus)) {
int *pcpu = &get_cpu_var(last_cpu);
- *pcpu = cpumask_next(*pcpu, cpus);
- if (*pcpu >= nr_cpu_ids)
- *pcpu = cpumask_first(cpus);
+ *pcpu = cpumask_next_wrap(*pcpu, cpus);
*cpu = *pcpu;
-
put_cpu_var(last_cpu);
}
drv_ctx->cpu = *cpu;
@@ -733,7 +730,7 @@ static void free_caam_qi_pcpu_netdev(const cpumask_t *cpus)
int caam_qi_init(struct platform_device *caam_pdev)
{
int err, i;
- struct device *ctrldev = &caam_pdev->dev, *qidev;
+ struct device *qidev = &caam_pdev->dev;
struct caam_drv_private *ctrlpriv;
const cpumask_t *cpus = qman_affine_cpus();
cpumask_var_t clean_mask;
@@ -742,8 +739,7 @@ int caam_qi_init(struct platform_device *caam_pdev)
if (!zalloc_cpumask_var(&clean_mask, GFP_KERNEL))
goto fail_cpumask;
- ctrlpriv = dev_get_drvdata(ctrldev);
- qidev = ctrldev;
+ ctrlpriv = dev_get_drvdata(qidev);
/* Initialize the congestion detection */
err = init_cgr(qidev);
@@ -794,7 +790,7 @@ int caam_qi_init(struct platform_device *caam_pdev)
caam_debugfs_qi_init(ctrlpriv);
- err = devm_add_action_or_reset(qidev, caam_qi_shutdown, ctrlpriv);
+ err = devm_add_action_or_reset(qidev, caam_qi_shutdown, qidev);
if (err)
goto fail2;
diff --git a/drivers/crypto/cavium/Makefile b/drivers/crypto/cavium/Makefile
index 4679c06b611f..75227c587ed0 100644
--- a/drivers/crypto/cavium/Makefile
+++ b/drivers/crypto/cavium/Makefile
@@ -2,4 +2,5 @@
#
# Makefile for Cavium crypto device drivers
#
-obj-$(CONFIG_CRYPTO_DEV_CAVIUM_ZIP) += zip/
+obj-$(CONFIG_CRYPTO_DEV_CPT) += cpt/
+obj-$(CONFIG_CRYPTO_DEV_NITROX) += nitrox/
diff --git a/drivers/crypto/cavium/cpt/cptpf_main.c b/drivers/crypto/cavium/cpt/cptpf_main.c
index 6872ac344001..54de869e5374 100644
--- a/drivers/crypto/cavium/cpt/cptpf_main.c
+++ b/drivers/crypto/cavium/cpt/cptpf_main.c
@@ -44,7 +44,7 @@ static void cpt_disable_cores(struct cpt_device *cpt, u64 coremask,
dev_err(dev, "Cores still busy %llx", coremask);
grp = cpt_read_csr64(cpt->reg_base,
CPTX_PF_EXEC_BUSY(0));
- if (timeout--)
+ if (!timeout--)
break;
udelay(CSR_DELAY);
@@ -302,6 +302,8 @@ static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae)
ret = do_cpt_init(cpt, mcode);
if (ret) {
+ dma_free_coherent(&cpt->pdev->dev, mcode->code_size,
+ mcode->code, mcode->phys_base);
dev_err(dev, "do_cpt_init failed with ret: %d\n", ret);
goto fw_release;
}
@@ -394,7 +396,7 @@ static void cpt_disable_all_cores(struct cpt_device *cpt)
dev_err(dev, "Cores still busy");
grp = cpt_read_csr64(cpt->reg_base,
CPTX_PF_EXEC_BUSY(0));
- if (timeout--)
+ if (!timeout--)
break;
udelay(CSR_DELAY);
diff --git a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
index 153004bdfb5c..fb59bb282455 100644
--- a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
+++ b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
@@ -238,7 +238,7 @@ static int send_cpt_command(struct cpt_vf *cptvf, union cpt_inst_s *cmd,
qinfo = &cptvf->cqinfo;
queue = &qinfo->queue[qno];
- /* lock commad queue */
+ /* lock command queue */
spin_lock(&queue->lock);
ent = &queue->qhead->head[queue->idx * qinfo->cmd_size];
memcpy(ent, (void *)cmd, qinfo->cmd_size);
@@ -510,7 +510,7 @@ get_pending_entry:
info->time_in = jiffies;
info->req = req;
- /* Create the CPT_INST_S type command for HW intrepretation */
+ /* Create the CPT_INST_S type command for HW interpretation */
cptinst.s.doneint = true;
cptinst.s.res_addr = (u64)info->comp_baddr;
cptinst.s.tag = 0;
diff --git a/drivers/crypto/cavium/nitrox/nitrox_lib.c b/drivers/crypto/cavium/nitrox/nitrox_lib.c
index a5cdc2b48bd6..068265207ddd 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_lib.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_lib.c
@@ -17,7 +17,7 @@
#define CRYPTO_CTX_SIZE 256
-/* packet inuput ring alignments */
+/* packet input ring alignments */
#define PKTIN_Q_ALIGN_BYTES 16
/* AQM Queue input alignments */
#define AQM_Q_ALIGN_BYTES 32
diff --git a/drivers/crypto/cavium/zip/Makefile b/drivers/crypto/cavium/zip/Makefile
deleted file mode 100644
index 020d189d793d..000000000000
--- a/drivers/crypto/cavium/zip/Makefile
+++ /dev/null
@@ -1,12 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile for Cavium's ZIP Driver.
-#
-
-obj-$(CONFIG_CRYPTO_DEV_CAVIUM_ZIP) += thunderx_zip.o
-thunderx_zip-y := zip_main.o \
- zip_device.o \
- zip_crypto.o \
- zip_mem.o \
- zip_deflate.o \
- zip_inflate.o
diff --git a/drivers/crypto/cavium/zip/common.h b/drivers/crypto/cavium/zip/common.h
deleted file mode 100644
index 54f6fb054119..000000000000
--- a/drivers/crypto/cavium/zip/common.h
+++ /dev/null
@@ -1,222 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#ifndef __COMMON_H__
-#define __COMMON_H__
-
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/seq_file.h>
-#include <linux/string.h>
-#include <linux/types.h>
-
-/* Device specific zlib function definitions */
-#include "zip_device.h"
-
-/* ZIP device definitions */
-#include "zip_main.h"
-
-/* ZIP memory allocation/deallocation related definitions */
-#include "zip_mem.h"
-
-/* Device specific structure definitions */
-#include "zip_regs.h"
-
-#define ZIP_ERROR -1
-
-#define ZIP_FLUSH_FINISH 4
-
-#define RAW_FORMAT 0 /* for rawpipe */
-#define ZLIB_FORMAT 1 /* for zpipe */
-#define GZIP_FORMAT 2 /* for gzpipe */
-#define LZS_FORMAT 3 /* for lzspipe */
-
-/* Max number of ZIP devices supported */
-#define MAX_ZIP_DEVICES 2
-
-/* Configures the number of zip queues to be used */
-#define ZIP_NUM_QUEUES 2
-
-#define DYNAMIC_STOP_EXCESS 1024
-
-/* Maximum buffer sizes in direct mode */
-#define MAX_INPUT_BUFFER_SIZE (64 * 1024)
-#define MAX_OUTPUT_BUFFER_SIZE (64 * 1024)
-
-/**
- * struct zip_operation - common data structure for comp and decomp operations
- * @input: Next input byte is read from here
- * @output: Next output byte written here
- * @ctx_addr: Inflate context buffer address
- * @history: Pointer to the history buffer
- * @input_len: Number of bytes available at next_in
- * @input_total_len: Total number of input bytes read
- * @output_len: Remaining free space at next_out
- * @output_total_len: Total number of bytes output so far
- * @csum: Checksum value of the uncompressed data
- * @flush: Flush flag
- * @format: Format (depends on stream's wrap)
- * @speed: Speed depends on stream's level
- * @ccode: Compression code ( stream's strategy)
- * @lzs_flag: Flag for LZS support
- * @begin_file: Beginning of file indication for inflate
- * @history_len: Size of the history data
- * @end_file: Ending of the file indication for inflate
- * @compcode: Completion status of the ZIP invocation
- * @bytes_read: Input bytes read in current instruction
- * @bits_processed: Total bits processed for entire file
- * @sizeofptr: To distinguish between ILP32 and LP64
- * @sizeofzops: Optional just for padding
- *
- * This structure is used to maintain the required meta data for the
- * comp and decomp operations.
- */
-struct zip_operation {
- u8 *input;
- u8 *output;
- u64 ctx_addr;
- u64 history;
-
- u32 input_len;
- u32 input_total_len;
-
- u32 output_len;
- u32 output_total_len;
-
- u32 csum;
- u32 flush;
-
- u32 format;
- u32 speed;
- u32 ccode;
- u32 lzs_flag;
-
- u32 begin_file;
- u32 history_len;
-
- u32 end_file;
- u32 compcode;
- u32 bytes_read;
- u32 bits_processed;
-
- u32 sizeofptr;
- u32 sizeofzops;
-};
-
-static inline int zip_poll_result(union zip_zres_s *result)
-{
- int retries = 1000;
-
- while (!result->s.compcode) {
- if (!--retries) {
- pr_err("ZIP ERR: request timed out");
- return -ETIMEDOUT;
- }
- udelay(10);
- /*
- * Force re-reading of compcode which is updated
- * by the ZIP coprocessor.
- */
- rmb();
- }
- return 0;
-}
-
-/* error messages */
-#define zip_err(fmt, args...) pr_err("ZIP ERR:%s():%d: " \
- fmt "\n", __func__, __LINE__, ## args)
-
-#ifdef MSG_ENABLE
-/* Enable all messages */
-#define zip_msg(fmt, args...) pr_info("ZIP_MSG:" fmt "\n", ## args)
-#else
-#define zip_msg(fmt, args...)
-#endif
-
-#if defined(ZIP_DEBUG_ENABLE) && defined(MSG_ENABLE)
-
-#ifdef DEBUG_LEVEL
-
-#define FILE_NAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : \
- strrchr(__FILE__, '\\') ? strrchr(__FILE__, '\\') + 1 : __FILE__)
-
-#if DEBUG_LEVEL >= 4
-
-#define zip_dbg(fmt, args...) pr_info("ZIP DBG: %s: %s() : %d: " \
- fmt "\n", FILE_NAME, __func__, __LINE__, ## args)
-
-#elif DEBUG_LEVEL >= 3
-
-#define zip_dbg(fmt, args...) pr_info("ZIP DBG: %s: %s() : %d: " \
- fmt "\n", FILE_NAME, __func__, __LINE__, ## args)
-
-#elif DEBUG_LEVEL >= 2
-
-#define zip_dbg(fmt, args...) pr_info("ZIP DBG: %s() : %d: " \
- fmt "\n", __func__, __LINE__, ## args)
-
-#else
-
-#define zip_dbg(fmt, args...) pr_info("ZIP DBG:" fmt "\n", ## args)
-
-#endif /* DEBUG LEVEL >=4 */
-
-#else
-
-#define zip_dbg(fmt, args...) pr_info("ZIP DBG:" fmt "\n", ## args)
-
-#endif /* DEBUG_LEVEL */
-#else
-
-#define zip_dbg(fmt, args...)
-
-#endif /* ZIP_DEBUG_ENABLE && MSG_ENABLE*/
-
-#endif
diff --git a/drivers/crypto/cavium/zip/zip_crypto.c b/drivers/crypto/cavium/zip/zip_crypto.c
deleted file mode 100644
index 1046a746d36f..000000000000
--- a/drivers/crypto/cavium/zip/zip_crypto.c
+++ /dev/null
@@ -1,301 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#include "zip_crypto.h"
-
-static void zip_static_init_zip_ops(struct zip_operation *zip_ops,
- int lzs_flag)
-{
- zip_ops->flush = ZIP_FLUSH_FINISH;
-
- /* equivalent to level 6 of opensource zlib */
- zip_ops->speed = 1;
-
- if (!lzs_flag) {
- zip_ops->ccode = 0; /* Auto Huffman */
- zip_ops->lzs_flag = 0;
- zip_ops->format = ZLIB_FORMAT;
- } else {
- zip_ops->ccode = 3; /* LZS Encoding */
- zip_ops->lzs_flag = 1;
- zip_ops->format = LZS_FORMAT;
- }
- zip_ops->begin_file = 1;
- zip_ops->history_len = 0;
- zip_ops->end_file = 1;
- zip_ops->compcode = 0;
- zip_ops->csum = 1; /* Adler checksum desired */
-}
-
-static int zip_ctx_init(struct zip_kernel_ctx *zip_ctx, int lzs_flag)
-{
- struct zip_operation *comp_ctx = &zip_ctx->zip_comp;
- struct zip_operation *decomp_ctx = &zip_ctx->zip_decomp;
-
- zip_static_init_zip_ops(comp_ctx, lzs_flag);
- zip_static_init_zip_ops(decomp_ctx, lzs_flag);
-
- comp_ctx->input = zip_data_buf_alloc(MAX_INPUT_BUFFER_SIZE);
- if (!comp_ctx->input)
- return -ENOMEM;
-
- comp_ctx->output = zip_data_buf_alloc(MAX_OUTPUT_BUFFER_SIZE);
- if (!comp_ctx->output)
- goto err_comp_input;
-
- decomp_ctx->input = zip_data_buf_alloc(MAX_INPUT_BUFFER_SIZE);
- if (!decomp_ctx->input)
- goto err_comp_output;
-
- decomp_ctx->output = zip_data_buf_alloc(MAX_OUTPUT_BUFFER_SIZE);
- if (!decomp_ctx->output)
- goto err_decomp_input;
-
- return 0;
-
-err_decomp_input:
- zip_data_buf_free(decomp_ctx->input, MAX_INPUT_BUFFER_SIZE);
-
-err_comp_output:
- zip_data_buf_free(comp_ctx->output, MAX_OUTPUT_BUFFER_SIZE);
-
-err_comp_input:
- zip_data_buf_free(comp_ctx->input, MAX_INPUT_BUFFER_SIZE);
-
- return -ENOMEM;
-}
-
-static void zip_ctx_exit(struct zip_kernel_ctx *zip_ctx)
-{
- struct zip_operation *comp_ctx = &zip_ctx->zip_comp;
- struct zip_operation *dec_ctx = &zip_ctx->zip_decomp;
-
- zip_data_buf_free(comp_ctx->input, MAX_INPUT_BUFFER_SIZE);
- zip_data_buf_free(comp_ctx->output, MAX_OUTPUT_BUFFER_SIZE);
-
- zip_data_buf_free(dec_ctx->input, MAX_INPUT_BUFFER_SIZE);
- zip_data_buf_free(dec_ctx->output, MAX_OUTPUT_BUFFER_SIZE);
-}
-
-static int zip_compress(const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen,
- struct zip_kernel_ctx *zip_ctx)
-{
- struct zip_operation *zip_ops = NULL;
- struct zip_state *zip_state;
- struct zip_device *zip = NULL;
- int ret;
-
- if (!zip_ctx || !src || !dst || !dlen)
- return -ENOMEM;
-
- zip = zip_get_device(zip_get_node_id());
- if (!zip)
- return -ENODEV;
-
- zip_state = kzalloc(sizeof(*zip_state), GFP_ATOMIC);
- if (!zip_state)
- return -ENOMEM;
-
- zip_ops = &zip_ctx->zip_comp;
-
- zip_ops->input_len = slen;
- zip_ops->output_len = *dlen;
- memcpy(zip_ops->input, src, slen);
-
- ret = zip_deflate(zip_ops, zip_state, zip);
-
- if (!ret) {
- *dlen = zip_ops->output_len;
- memcpy(dst, zip_ops->output, *dlen);
- }
- kfree(zip_state);
- return ret;
-}
-
-static int zip_decompress(const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen,
- struct zip_kernel_ctx *zip_ctx)
-{
- struct zip_operation *zip_ops = NULL;
- struct zip_state *zip_state;
- struct zip_device *zip = NULL;
- int ret;
-
- if (!zip_ctx || !src || !dst || !dlen)
- return -ENOMEM;
-
- zip = zip_get_device(zip_get_node_id());
- if (!zip)
- return -ENODEV;
-
- zip_state = kzalloc(sizeof(*zip_state), GFP_ATOMIC);
- if (!zip_state)
- return -ENOMEM;
-
- zip_ops = &zip_ctx->zip_decomp;
- memcpy(zip_ops->input, src, slen);
-
- /* Work around for a bug in zlib which needs an extra bytes sometimes */
- if (zip_ops->ccode != 3) /* Not LZS Encoding */
- zip_ops->input[slen++] = 0;
-
- zip_ops->input_len = slen;
- zip_ops->output_len = *dlen;
-
- ret = zip_inflate(zip_ops, zip_state, zip);
-
- if (!ret) {
- *dlen = zip_ops->output_len;
- memcpy(dst, zip_ops->output, *dlen);
- }
- kfree(zip_state);
- return ret;
-}
-
-/* Legacy Compress framework start */
-int zip_alloc_comp_ctx_deflate(struct crypto_tfm *tfm)
-{
- struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm);
-
- return zip_ctx_init(zip_ctx, 0);
-}
-
-int zip_alloc_comp_ctx_lzs(struct crypto_tfm *tfm)
-{
- struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm);
-
- return zip_ctx_init(zip_ctx, 1);
-}
-
-void zip_free_comp_ctx(struct crypto_tfm *tfm)
-{
- struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm);
-
- zip_ctx_exit(zip_ctx);
-}
-
-int zip_comp_compress(struct crypto_tfm *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen)
-{
- struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm);
-
- return zip_compress(src, slen, dst, dlen, zip_ctx);
-}
-
-int zip_comp_decompress(struct crypto_tfm *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen)
-{
- struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm);
-
- return zip_decompress(src, slen, dst, dlen, zip_ctx);
-} /* Legacy compress framework end */
-
-/* SCOMP framework start */
-void *zip_alloc_scomp_ctx_deflate(struct crypto_scomp *tfm)
-{
- int ret;
- struct zip_kernel_ctx *zip_ctx;
-
- zip_ctx = kzalloc(sizeof(*zip_ctx), GFP_KERNEL);
- if (!zip_ctx)
- return ERR_PTR(-ENOMEM);
-
- ret = zip_ctx_init(zip_ctx, 0);
-
- if (ret) {
- kfree_sensitive(zip_ctx);
- return ERR_PTR(ret);
- }
-
- return zip_ctx;
-}
-
-void *zip_alloc_scomp_ctx_lzs(struct crypto_scomp *tfm)
-{
- int ret;
- struct zip_kernel_ctx *zip_ctx;
-
- zip_ctx = kzalloc(sizeof(*zip_ctx), GFP_KERNEL);
- if (!zip_ctx)
- return ERR_PTR(-ENOMEM);
-
- ret = zip_ctx_init(zip_ctx, 1);
-
- if (ret) {
- kfree_sensitive(zip_ctx);
- return ERR_PTR(ret);
- }
-
- return zip_ctx;
-}
-
-void zip_free_scomp_ctx(struct crypto_scomp *tfm, void *ctx)
-{
- struct zip_kernel_ctx *zip_ctx = ctx;
-
- zip_ctx_exit(zip_ctx);
- kfree_sensitive(zip_ctx);
-}
-
-int zip_scomp_compress(struct crypto_scomp *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen, void *ctx)
-{
- struct zip_kernel_ctx *zip_ctx = ctx;
-
- return zip_compress(src, slen, dst, dlen, zip_ctx);
-}
-
-int zip_scomp_decompress(struct crypto_scomp *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen, void *ctx)
-{
- struct zip_kernel_ctx *zip_ctx = ctx;
-
- return zip_decompress(src, slen, dst, dlen, zip_ctx);
-} /* SCOMP framework end */
diff --git a/drivers/crypto/cavium/zip/zip_crypto.h b/drivers/crypto/cavium/zip/zip_crypto.h
deleted file mode 100644
index b59ddfcacd34..000000000000
--- a/drivers/crypto/cavium/zip/zip_crypto.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#ifndef __ZIP_CRYPTO_H__
-#define __ZIP_CRYPTO_H__
-
-#include <linux/crypto.h>
-#include <crypto/internal/scompress.h>
-#include "common.h"
-#include "zip_deflate.h"
-#include "zip_inflate.h"
-
-struct zip_kernel_ctx {
- struct zip_operation zip_comp;
- struct zip_operation zip_decomp;
-};
-
-int zip_alloc_comp_ctx_deflate(struct crypto_tfm *tfm);
-int zip_alloc_comp_ctx_lzs(struct crypto_tfm *tfm);
-void zip_free_comp_ctx(struct crypto_tfm *tfm);
-int zip_comp_compress(struct crypto_tfm *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen);
-int zip_comp_decompress(struct crypto_tfm *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen);
-
-void *zip_alloc_scomp_ctx_deflate(struct crypto_scomp *tfm);
-void *zip_alloc_scomp_ctx_lzs(struct crypto_scomp *tfm);
-void zip_free_scomp_ctx(struct crypto_scomp *tfm, void *zip_ctx);
-int zip_scomp_compress(struct crypto_scomp *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen, void *ctx);
-int zip_scomp_decompress(struct crypto_scomp *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen, void *ctx);
-#endif
diff --git a/drivers/crypto/cavium/zip/zip_deflate.c b/drivers/crypto/cavium/zip/zip_deflate.c
deleted file mode 100644
index d7133f857d67..000000000000
--- a/drivers/crypto/cavium/zip/zip_deflate.c
+++ /dev/null
@@ -1,200 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#include <linux/delay.h>
-#include <linux/sched.h>
-
-#include "common.h"
-#include "zip_deflate.h"
-
-/* Prepares the deflate zip command */
-static int prepare_zip_command(struct zip_operation *zip_ops,
- struct zip_state *s, union zip_inst_s *zip_cmd)
-{
- union zip_zres_s *result_ptr = &s->result;
-
- memset(zip_cmd, 0, sizeof(s->zip_cmd));
- memset(result_ptr, 0, sizeof(s->result));
-
- /* IWORD #0 */
- /* History gather */
- zip_cmd->s.hg = 0;
- /* compression enable = 1 for deflate */
- zip_cmd->s.ce = 1;
- /* sf (sync flush) */
- zip_cmd->s.sf = 1;
- /* ef (end of file) */
- if (zip_ops->flush == ZIP_FLUSH_FINISH) {
- zip_cmd->s.ef = 1;
- zip_cmd->s.sf = 0;
- }
-
- zip_cmd->s.cc = zip_ops->ccode;
- /* ss (compression speed/storage) */
- zip_cmd->s.ss = zip_ops->speed;
-
- /* IWORD #1 */
- /* adler checksum */
- zip_cmd->s.adlercrc32 = zip_ops->csum;
- zip_cmd->s.historylength = zip_ops->history_len;
- zip_cmd->s.dg = 0;
-
- /* IWORD # 6 and 7 - compression input/history pointer */
- zip_cmd->s.inp_ptr_addr.s.addr = __pa(zip_ops->input);
- zip_cmd->s.inp_ptr_ctl.s.length = (zip_ops->input_len +
- zip_ops->history_len);
- zip_cmd->s.ds = 0;
-
- /* IWORD # 8 and 9 - Output pointer */
- zip_cmd->s.out_ptr_addr.s.addr = __pa(zip_ops->output);
- zip_cmd->s.out_ptr_ctl.s.length = zip_ops->output_len;
- /* maximum number of output-stream bytes that can be written */
- zip_cmd->s.totaloutputlength = zip_ops->output_len;
-
- /* IWORD # 10 and 11 - Result pointer */
- zip_cmd->s.res_ptr_addr.s.addr = __pa(result_ptr);
- /* Clearing completion code */
- result_ptr->s.compcode = 0;
-
- return 0;
-}
-
-/**
- * zip_deflate - API to offload deflate operation to hardware
- * @zip_ops: Pointer to zip operation structure
- * @s: Pointer to the structure representing zip state
- * @zip_dev: Pointer to zip device structure
- *
- * This function prepares the zip deflate command and submits it to the zip
- * engine for processing.
- *
- * Return: 0 if successful or error code
- */
-int zip_deflate(struct zip_operation *zip_ops, struct zip_state *s,
- struct zip_device *zip_dev)
-{
- union zip_inst_s *zip_cmd = &s->zip_cmd;
- union zip_zres_s *result_ptr = &s->result;
- u32 queue;
-
- /* Prepares zip command based on the input parameters */
- prepare_zip_command(zip_ops, s, zip_cmd);
-
- atomic64_add(zip_ops->input_len, &zip_dev->stats.comp_in_bytes);
- /* Loads zip command into command queues and rings door bell */
- queue = zip_load_instr(zip_cmd, zip_dev);
-
- /* Stats update for compression requests submitted */
- atomic64_inc(&zip_dev->stats.comp_req_submit);
-
- /* Wait for completion or error */
- zip_poll_result(result_ptr);
-
- /* Stats update for compression requests completed */
- atomic64_inc(&zip_dev->stats.comp_req_complete);
-
- zip_ops->compcode = result_ptr->s.compcode;
- switch (zip_ops->compcode) {
- case ZIP_CMD_NOTDONE:
- zip_dbg("Zip instruction not yet completed");
- return ZIP_ERROR;
-
- case ZIP_CMD_SUCCESS:
- zip_dbg("Zip instruction completed successfully");
- zip_update_cmd_bufs(zip_dev, queue);
- break;
-
- case ZIP_CMD_DTRUNC:
- zip_dbg("Output Truncate error");
- /* Returning ZIP_ERROR to avoid copy to user */
- return ZIP_ERROR;
-
- default:
- zip_err("Zip instruction failed. Code:%d", zip_ops->compcode);
- return ZIP_ERROR;
- }
-
- /* Update the CRC depending on the format */
- switch (zip_ops->format) {
- case RAW_FORMAT:
- zip_dbg("RAW Format: %d ", zip_ops->format);
- /* Get checksum from engine, need to feed it again */
- zip_ops->csum = result_ptr->s.adler32;
- break;
-
- case ZLIB_FORMAT:
- zip_dbg("ZLIB Format: %d ", zip_ops->format);
- zip_ops->csum = result_ptr->s.adler32;
- break;
-
- case GZIP_FORMAT:
- zip_dbg("GZIP Format: %d ", zip_ops->format);
- zip_ops->csum = result_ptr->s.crc32;
- break;
-
- case LZS_FORMAT:
- zip_dbg("LZS Format: %d ", zip_ops->format);
- break;
-
- default:
- zip_err("Unknown Format:%d\n", zip_ops->format);
- }
-
- atomic64_add(result_ptr->s.totalbyteswritten,
- &zip_dev->stats.comp_out_bytes);
-
- /* Update output_len */
- if (zip_ops->output_len < result_ptr->s.totalbyteswritten) {
- /* Dynamic stop && strm->output_len < zipconstants[onfsize] */
- zip_err("output_len (%d) < total bytes written(%d)\n",
- zip_ops->output_len, result_ptr->s.totalbyteswritten);
- zip_ops->output_len = 0;
-
- } else {
- zip_ops->output_len = result_ptr->s.totalbyteswritten;
- }
-
- return 0;
-}
diff --git a/drivers/crypto/cavium/zip/zip_deflate.h b/drivers/crypto/cavium/zip/zip_deflate.h
deleted file mode 100644
index 1d32e76edc4d..000000000000
--- a/drivers/crypto/cavium/zip/zip_deflate.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#ifndef __ZIP_DEFLATE_H__
-#define __ZIP_DEFLATE_H__
-
-/**
- * zip_deflate - API to offload deflate operation to hardware
- * @zip_ops: Pointer to zip operation structure
- * @s: Pointer to the structure representing zip state
- * @zip_dev: Pointer to the structure representing zip device
- *
- * This function prepares the zip deflate command and submits it to the zip
- * engine by ringing the doorbell.
- *
- * Return: 0 if successful or error code
- */
-int zip_deflate(struct zip_operation *zip_ops, struct zip_state *s,
- struct zip_device *zip_dev);
-#endif
diff --git a/drivers/crypto/cavium/zip/zip_device.c b/drivers/crypto/cavium/zip/zip_device.c
deleted file mode 100644
index f174ec29ed69..000000000000
--- a/drivers/crypto/cavium/zip/zip_device.c
+++ /dev/null
@@ -1,202 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#include "common.h"
-#include "zip_deflate.h"
-
-/**
- * zip_cmd_queue_consumed - Calculates the space consumed in the command queue.
- *
- * @zip_dev: Pointer to zip device structure
- * @queue: Queue number
- *
- * Return: Bytes consumed in the command queue buffer.
- */
-static inline u32 zip_cmd_queue_consumed(struct zip_device *zip_dev, int queue)
-{
- return ((zip_dev->iq[queue].sw_head - zip_dev->iq[queue].sw_tail) *
- sizeof(u64 *));
-}
-
-/**
- * zip_load_instr - Submits the instruction into the ZIP command queue
- * @instr: Pointer to the instruction to be submitted
- * @zip_dev: Pointer to ZIP device structure to which the instruction is to
- * be submitted
- *
- * This function copies the ZIP instruction to the command queue and rings the
- * doorbell to notify the engine of the instruction submission. The command
- * queue is maintained in a circular fashion. When there is space for exactly
- * one instruction in the queue, next chunk pointer of the queue is made to
- * point to the head of the queue, thus maintaining a circular queue.
- *
- * Return: Queue number to which the instruction was submitted
- */
-u32 zip_load_instr(union zip_inst_s *instr,
- struct zip_device *zip_dev)
-{
- union zip_quex_doorbell dbell;
- u32 queue = 0;
- u32 consumed = 0;
- u64 *ncb_ptr = NULL;
- union zip_nptr_s ncp;
-
- /*
- * Distribute the instructions between the enabled queues based on
- * the CPU id.
- */
- if (raw_smp_processor_id() % 2 == 0)
- queue = 0;
- else
- queue = 1;
-
- zip_dbg("CPU Core: %d Queue number:%d", raw_smp_processor_id(), queue);
-
- /* Take cmd buffer lock */
- spin_lock(&zip_dev->iq[queue].lock);
-
- /*
- * Command Queue implementation
- * 1. If there is place for new instructions, push the cmd at sw_head.
- * 2. If there is place for exactly one instruction, push the new cmd
- * at the sw_head. Make sw_head point to the sw_tail to make it
- * circular. Write sw_head's physical address to the "Next-Chunk
- * Buffer Ptr" to make it cmd_hw_tail.
- * 3. Ring the door bell.
- */
- zip_dbg("sw_head : %lx", zip_dev->iq[queue].sw_head);
- zip_dbg("sw_tail : %lx", zip_dev->iq[queue].sw_tail);
-
- consumed = zip_cmd_queue_consumed(zip_dev, queue);
- /* Check if there is space to push just one cmd */
- if ((consumed + 128) == (ZIP_CMD_QBUF_SIZE - 8)) {
- zip_dbg("Cmd queue space available for single command");
- /* Space for one cmd, pust it and make it circular queue */
- memcpy((u8 *)zip_dev->iq[queue].sw_head, (u8 *)instr,
- sizeof(union zip_inst_s));
- zip_dev->iq[queue].sw_head += 16; /* 16 64_bit words = 128B */
-
- /* Now, point the "Next-Chunk Buffer Ptr" to sw_head */
- ncb_ptr = zip_dev->iq[queue].sw_head;
-
- zip_dbg("ncb addr :0x%lx sw_head addr :0x%lx",
- ncb_ptr, zip_dev->iq[queue].sw_head - 16);
-
- /* Using Circular command queue */
- zip_dev->iq[queue].sw_head = zip_dev->iq[queue].sw_tail;
- /* Mark this buffer for free */
- zip_dev->iq[queue].free_flag = 1;
-
- /* Write new chunk buffer address at "Next-Chunk Buffer Ptr" */
- ncp.u_reg64 = 0ull;
- ncp.s.addr = __pa(zip_dev->iq[queue].sw_head);
- *ncb_ptr = ncp.u_reg64;
- zip_dbg("*ncb_ptr :0x%lx sw_head[phys] :0x%lx",
- *ncb_ptr, __pa(zip_dev->iq[queue].sw_head));
-
- zip_dev->iq[queue].pend_cnt++;
-
- } else {
- zip_dbg("Enough space is available for commands");
- /* Push this cmd to cmd queue buffer */
- memcpy((u8 *)zip_dev->iq[queue].sw_head, (u8 *)instr,
- sizeof(union zip_inst_s));
- zip_dev->iq[queue].sw_head += 16; /* 16 64_bit words = 128B */
-
- zip_dev->iq[queue].pend_cnt++;
- }
- zip_dbg("sw_head :0x%lx sw_tail :0x%lx hw_tail :0x%lx",
- zip_dev->iq[queue].sw_head, zip_dev->iq[queue].sw_tail,
- zip_dev->iq[queue].hw_tail);
-
- zip_dbg(" Pushed the new cmd : pend_cnt : %d",
- zip_dev->iq[queue].pend_cnt);
-
- /* Ring the doorbell */
- dbell.u_reg64 = 0ull;
- dbell.s.dbell_cnt = 1;
- zip_reg_write(dbell.u_reg64,
- (zip_dev->reg_base + ZIP_QUEX_DOORBELL(queue)));
-
- /* Unlock cmd buffer lock */
- spin_unlock(&zip_dev->iq[queue].lock);
-
- return queue;
-}
-
-/**
- * zip_update_cmd_bufs - Updates the queue statistics after posting the
- * instruction
- * @zip_dev: Pointer to zip device structure
- * @queue: Queue number
- */
-void zip_update_cmd_bufs(struct zip_device *zip_dev, u32 queue)
-{
- /* Take cmd buffer lock */
- spin_lock(&zip_dev->iq[queue].lock);
-
- /* Check if the previous buffer can be freed */
- if (zip_dev->iq[queue].free_flag == 1) {
- zip_dbg("Free flag. Free cmd buffer, adjust sw head and tail");
- /* Reset the free flag */
- zip_dev->iq[queue].free_flag = 0;
-
- /* Point the hw_tail to start of the new chunk buffer */
- zip_dev->iq[queue].hw_tail = zip_dev->iq[queue].sw_head;
- } else {
- zip_dbg("Free flag not set. increment hw tail");
- zip_dev->iq[queue].hw_tail += 16; /* 16 64_bit words = 128B */
- }
-
- zip_dev->iq[queue].done_cnt++;
- zip_dev->iq[queue].pend_cnt--;
-
- zip_dbg("sw_head :0x%lx sw_tail :0x%lx hw_tail :0x%lx",
- zip_dev->iq[queue].sw_head, zip_dev->iq[queue].sw_tail,
- zip_dev->iq[queue].hw_tail);
- zip_dbg(" Got CC : pend_cnt : %d\n", zip_dev->iq[queue].pend_cnt);
-
- spin_unlock(&zip_dev->iq[queue].lock);
-}
diff --git a/drivers/crypto/cavium/zip/zip_device.h b/drivers/crypto/cavium/zip/zip_device.h
deleted file mode 100644
index 9e18b3b93d38..000000000000
--- a/drivers/crypto/cavium/zip/zip_device.h
+++ /dev/null
@@ -1,108 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#ifndef __ZIP_DEVICE_H__
-#define __ZIP_DEVICE_H__
-
-#include <linux/types.h>
-#include "zip_main.h"
-
-struct sg_info {
- /*
- * Pointer to the input data when scatter_gather == 0 and
- * pointer to the input gather list buffer when scatter_gather == 1
- */
- union zip_zptr_s *gather;
-
- /*
- * Pointer to the output data when scatter_gather == 0 and
- * pointer to the output scatter list buffer when scatter_gather == 1
- */
- union zip_zptr_s *scatter;
-
- /*
- * Holds size of the output buffer pointed by scatter list
- * when scatter_gather == 1
- */
- u64 scatter_buf_size;
-
- /* for gather data */
- u64 gather_enable;
-
- /* for scatter data */
- u64 scatter_enable;
-
- /* Number of gather list pointers for gather data */
- u32 gbuf_cnt;
-
- /* Number of scatter list pointers for scatter data */
- u32 sbuf_cnt;
-
- /* Buffers allocation state */
- u8 alloc_state;
-};
-
-/**
- * struct zip_state - Structure representing the required information related
- * to a command
- * @zip_cmd: Pointer to zip instruction structure
- * @result: Pointer to zip result structure
- * @ctx: Context pointer for inflate
- * @history: Decompression history pointer
- * @sginfo: Scatter-gather info structure
- */
-struct zip_state {
- union zip_inst_s zip_cmd;
- union zip_zres_s result;
- union zip_zptr_s *ctx;
- union zip_zptr_s *history;
- struct sg_info sginfo;
-};
-
-#define ZIP_CONTEXT_SIZE 2048
-#define ZIP_INFLATE_HISTORY_SIZE 32768
-#define ZIP_DEFLATE_HISTORY_SIZE 32768
-
-#endif
diff --git a/drivers/crypto/cavium/zip/zip_inflate.c b/drivers/crypto/cavium/zip/zip_inflate.c
deleted file mode 100644
index 7e0d73e2f89e..000000000000
--- a/drivers/crypto/cavium/zip/zip_inflate.c
+++ /dev/null
@@ -1,223 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#include <linux/delay.h>
-#include <linux/sched.h>
-
-#include "common.h"
-#include "zip_inflate.h"
-
-static int prepare_inflate_zcmd(struct zip_operation *zip_ops,
- struct zip_state *s, union zip_inst_s *zip_cmd)
-{
- union zip_zres_s *result_ptr = &s->result;
-
- memset(zip_cmd, 0, sizeof(s->zip_cmd));
- memset(result_ptr, 0, sizeof(s->result));
-
- /* IWORD#0 */
-
- /* Decompression History Gather list - no gather list */
- zip_cmd->s.hg = 0;
- /* For decompression, CE must be 0x0. */
- zip_cmd->s.ce = 0;
- /* For decompression, SS must be 0x0. */
- zip_cmd->s.ss = 0;
- /* For decompression, SF should always be set. */
- zip_cmd->s.sf = 1;
-
- /* Begin File */
- if (zip_ops->begin_file == 0)
- zip_cmd->s.bf = 0;
- else
- zip_cmd->s.bf = 1;
-
- zip_cmd->s.ef = 1;
- /* 0: for Deflate decompression, 3: for LZS decompression */
- zip_cmd->s.cc = zip_ops->ccode;
-
- /* IWORD #1*/
-
- /* adler checksum */
- zip_cmd->s.adlercrc32 = zip_ops->csum;
-
- /*
- * HISTORYLENGTH must be 0x0 for any ZIP decompress operation.
- * History data is added to a decompression operation via IWORD3.
- */
- zip_cmd->s.historylength = 0;
- zip_cmd->s.ds = 0;
-
- /* IWORD # 8 and 9 - Output pointer */
- zip_cmd->s.out_ptr_addr.s.addr = __pa(zip_ops->output);
- zip_cmd->s.out_ptr_ctl.s.length = zip_ops->output_len;
-
- /* Maximum number of output-stream bytes that can be written */
- zip_cmd->s.totaloutputlength = zip_ops->output_len;
-
- zip_dbg("Data Direct Input case ");
-
- /* IWORD # 6 and 7 - input pointer */
- zip_cmd->s.dg = 0;
- zip_cmd->s.inp_ptr_addr.s.addr = __pa((u8 *)zip_ops->input);
- zip_cmd->s.inp_ptr_ctl.s.length = zip_ops->input_len;
-
- /* IWORD # 10 and 11 - Result pointer */
- zip_cmd->s.res_ptr_addr.s.addr = __pa(result_ptr);
-
- /* Clearing completion code */
- result_ptr->s.compcode = 0;
-
- /* Returning 0 for time being.*/
- return 0;
-}
-
-/**
- * zip_inflate - API to offload inflate operation to hardware
- * @zip_ops: Pointer to zip operation structure
- * @s: Pointer to the structure representing zip state
- * @zip_dev: Pointer to zip device structure
- *
- * This function prepares the zip inflate command and submits it to the zip
- * engine for processing.
- *
- * Return: 0 if successful or error code
- */
-int zip_inflate(struct zip_operation *zip_ops, struct zip_state *s,
- struct zip_device *zip_dev)
-{
- union zip_inst_s *zip_cmd = &s->zip_cmd;
- union zip_zres_s *result_ptr = &s->result;
- u32 queue;
-
- /* Prepare inflate zip command */
- prepare_inflate_zcmd(zip_ops, s, zip_cmd);
-
- atomic64_add(zip_ops->input_len, &zip_dev->stats.decomp_in_bytes);
-
- /* Load inflate command to zip queue and ring the doorbell */
- queue = zip_load_instr(zip_cmd, zip_dev);
-
- /* Decompression requests submitted stats update */
- atomic64_inc(&zip_dev->stats.decomp_req_submit);
-
- /* Wait for completion or error */
- zip_poll_result(result_ptr);
-
- /* Decompression requests completed stats update */
- atomic64_inc(&zip_dev->stats.decomp_req_complete);
-
- zip_ops->compcode = result_ptr->s.compcode;
- switch (zip_ops->compcode) {
- case ZIP_CMD_NOTDONE:
- zip_dbg("Zip Instruction not yet completed\n");
- return ZIP_ERROR;
-
- case ZIP_CMD_SUCCESS:
- zip_dbg("Zip Instruction completed successfully\n");
- break;
-
- case ZIP_CMD_DYNAMIC_STOP:
- zip_dbg(" Dynamic stop Initiated\n");
- break;
-
- default:
- zip_dbg("Instruction failed. Code = %d\n", zip_ops->compcode);
- atomic64_inc(&zip_dev->stats.decomp_bad_reqs);
- zip_update_cmd_bufs(zip_dev, queue);
- return ZIP_ERROR;
- }
-
- zip_update_cmd_bufs(zip_dev, queue);
-
- if ((zip_ops->ccode == 3) && (zip_ops->flush == 4) &&
- (zip_ops->compcode != ZIP_CMD_DYNAMIC_STOP))
- result_ptr->s.ef = 1;
-
- zip_ops->csum = result_ptr->s.adler32;
-
- atomic64_add(result_ptr->s.totalbyteswritten,
- &zip_dev->stats.decomp_out_bytes);
-
- if (zip_ops->output_len < result_ptr->s.totalbyteswritten) {
- zip_err("output_len (%d) < total bytes written (%d)\n",
- zip_ops->output_len, result_ptr->s.totalbyteswritten);
- zip_ops->output_len = 0;
- } else {
- zip_ops->output_len = result_ptr->s.totalbyteswritten;
- }
-
- zip_ops->bytes_read = result_ptr->s.totalbytesread;
- zip_ops->bits_processed = result_ptr->s.totalbitsprocessed;
- zip_ops->end_file = result_ptr->s.ef;
- if (zip_ops->end_file) {
- switch (zip_ops->format) {
- case RAW_FORMAT:
- zip_dbg("RAW Format: %d ", zip_ops->format);
- /* Get checksum from engine */
- zip_ops->csum = result_ptr->s.adler32;
- break;
-
- case ZLIB_FORMAT:
- zip_dbg("ZLIB Format: %d ", zip_ops->format);
- zip_ops->csum = result_ptr->s.adler32;
- break;
-
- case GZIP_FORMAT:
- zip_dbg("GZIP Format: %d ", zip_ops->format);
- zip_ops->csum = result_ptr->s.crc32;
- break;
-
- case LZS_FORMAT:
- zip_dbg("LZS Format: %d ", zip_ops->format);
- break;
-
- default:
- zip_err("Format error:%d\n", zip_ops->format);
- }
- }
-
- return 0;
-}
diff --git a/drivers/crypto/cavium/zip/zip_inflate.h b/drivers/crypto/cavium/zip/zip_inflate.h
deleted file mode 100644
index 6b20f179978e..000000000000
--- a/drivers/crypto/cavium/zip/zip_inflate.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#ifndef __ZIP_INFLATE_H__
-#define __ZIP_INFLATE_H__
-
-/**
- * zip_inflate - API to offload inflate operation to hardware
- * @zip_ops: Pointer to zip operation structure
- * @s: Pointer to the structure representing zip state
- * @zip_dev: Pointer to the structure representing zip device
- *
- * This function prepares the zip inflate command and submits it to the zip
- * engine for processing.
- *
- * Return: 0 if successful or error code
- */
-int zip_inflate(struct zip_operation *zip_ops, struct zip_state *s,
- struct zip_device *zip_dev);
-#endif
diff --git a/drivers/crypto/cavium/zip/zip_main.c b/drivers/crypto/cavium/zip/zip_main.c
deleted file mode 100644
index dc5b7bf7e1fd..000000000000
--- a/drivers/crypto/cavium/zip/zip_main.c
+++ /dev/null
@@ -1,651 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#include "common.h"
-#include "zip_crypto.h"
-
-#define DRV_NAME "ThunderX-ZIP"
-
-static struct zip_device *zip_dev[MAX_ZIP_DEVICES];
-
-static const struct pci_device_id zip_id_table[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDERX_ZIP) },
- { 0, }
-};
-
-static void zip_debugfs_init(void);
-static void zip_debugfs_exit(void);
-static int zip_register_compression_device(void);
-static void zip_unregister_compression_device(void);
-
-void zip_reg_write(u64 val, u64 __iomem *addr)
-{
- writeq(val, addr);
-}
-
-u64 zip_reg_read(u64 __iomem *addr)
-{
- return readq(addr);
-}
-
-/*
- * Allocates new ZIP device structure
- * Returns zip_device pointer or NULL if cannot allocate memory for zip_device
- */
-static struct zip_device *zip_alloc_device(struct pci_dev *pdev)
-{
- struct zip_device *zip = NULL;
- int idx;
-
- for (idx = 0; idx < MAX_ZIP_DEVICES; idx++) {
- if (!zip_dev[idx])
- break;
- }
-
- /* To ensure that the index is within the limit */
- if (idx < MAX_ZIP_DEVICES)
- zip = devm_kzalloc(&pdev->dev, sizeof(*zip), GFP_KERNEL);
-
- if (!zip)
- return NULL;
-
- zip_dev[idx] = zip;
- zip->index = idx;
- return zip;
-}
-
-/**
- * zip_get_device - Get ZIP device based on node id of cpu
- *
- * @node: Node id of the current cpu
- * Return: Pointer to Zip device structure
- */
-struct zip_device *zip_get_device(int node)
-{
- if ((node < MAX_ZIP_DEVICES) && (node >= 0))
- return zip_dev[node];
-
- zip_err("ZIP device not found for node id %d\n", node);
- return NULL;
-}
-
-/**
- * zip_get_node_id - Get the node id of the current cpu
- *
- * Return: Node id of the current cpu
- */
-int zip_get_node_id(void)
-{
- return cpu_to_node(raw_smp_processor_id());
-}
-
-/* Initializes the ZIP h/w sub-system */
-static int zip_init_hw(struct zip_device *zip)
-{
- union zip_cmd_ctl cmd_ctl;
- union zip_constants constants;
- union zip_que_ena que_ena;
- union zip_quex_map que_map;
- union zip_que_pri que_pri;
-
- union zip_quex_sbuf_addr que_sbuf_addr;
- union zip_quex_sbuf_ctl que_sbuf_ctl;
-
- int q = 0;
-
- /* Enable the ZIP Engine(Core) Clock */
- cmd_ctl.u_reg64 = zip_reg_read(zip->reg_base + ZIP_CMD_CTL);
- cmd_ctl.s.forceclk = 1;
- zip_reg_write(cmd_ctl.u_reg64 & 0xFF, (zip->reg_base + ZIP_CMD_CTL));
-
- zip_msg("ZIP_CMD_CTL : 0x%016llx",
- zip_reg_read(zip->reg_base + ZIP_CMD_CTL));
-
- constants.u_reg64 = zip_reg_read(zip->reg_base + ZIP_CONSTANTS);
- zip->depth = constants.s.depth;
- zip->onfsize = constants.s.onfsize;
- zip->ctxsize = constants.s.ctxsize;
-
- zip_msg("depth: 0x%016llx , onfsize : 0x%016llx , ctxsize : 0x%016llx",
- zip->depth, zip->onfsize, zip->ctxsize);
-
- /*
- * Program ZIP_QUE(0..7)_SBUF_ADDR and ZIP_QUE(0..7)_SBUF_CTL to
- * have the correct buffer pointer and size configured for each
- * instruction queue.
- */
- for (q = 0; q < ZIP_NUM_QUEUES; q++) {
- que_sbuf_ctl.u_reg64 = 0ull;
- que_sbuf_ctl.s.size = (ZIP_CMD_QBUF_SIZE / sizeof(u64));
- que_sbuf_ctl.s.inst_be = 0;
- que_sbuf_ctl.s.stream_id = 0;
- zip_reg_write(que_sbuf_ctl.u_reg64,
- (zip->reg_base + ZIP_QUEX_SBUF_CTL(q)));
-
- zip_msg("QUEX_SBUF_CTL[%d]: 0x%016llx", q,
- zip_reg_read(zip->reg_base + ZIP_QUEX_SBUF_CTL(q)));
- }
-
- for (q = 0; q < ZIP_NUM_QUEUES; q++) {
- memset(&zip->iq[q], 0x0, sizeof(struct zip_iq));
-
- spin_lock_init(&zip->iq[q].lock);
-
- if (zip_cmd_qbuf_alloc(zip, q)) {
- while (q != 0) {
- q--;
- zip_cmd_qbuf_free(zip, q);
- }
- return -ENOMEM;
- }
-
- /* Initialize tail ptr to head */
- zip->iq[q].sw_tail = zip->iq[q].sw_head;
- zip->iq[q].hw_tail = zip->iq[q].sw_head;
-
- /* Write the physical addr to register */
- que_sbuf_addr.u_reg64 = 0ull;
- que_sbuf_addr.s.ptr = (__pa(zip->iq[q].sw_head) >>
- ZIP_128B_ALIGN);
-
- zip_msg("QUE[%d]_PTR(PHYS): 0x%016llx", q,
- (u64)que_sbuf_addr.s.ptr);
-
- zip_reg_write(que_sbuf_addr.u_reg64,
- (zip->reg_base + ZIP_QUEX_SBUF_ADDR(q)));
-
- zip_msg("QUEX_SBUF_ADDR[%d]: 0x%016llx", q,
- zip_reg_read(zip->reg_base + ZIP_QUEX_SBUF_ADDR(q)));
-
- zip_dbg("sw_head :0x%lx sw_tail :0x%lx hw_tail :0x%lx",
- zip->iq[q].sw_head, zip->iq[q].sw_tail,
- zip->iq[q].hw_tail);
- zip_dbg("sw_head phy addr : 0x%lx", que_sbuf_addr.s.ptr);
- }
-
- /*
- * Queue-to-ZIP core mapping
- * If a queue is not mapped to a particular core, it is equivalent to
- * the ZIP core being disabled.
- */
- que_ena.u_reg64 = 0x0ull;
- /* Enabling queues based on ZIP_NUM_QUEUES */
- for (q = 0; q < ZIP_NUM_QUEUES; q++)
- que_ena.s.ena |= (0x1 << q);
- zip_reg_write(que_ena.u_reg64, (zip->reg_base + ZIP_QUE_ENA));
-
- zip_msg("QUE_ENA : 0x%016llx",
- zip_reg_read(zip->reg_base + ZIP_QUE_ENA));
-
- for (q = 0; q < ZIP_NUM_QUEUES; q++) {
- que_map.u_reg64 = 0ull;
- /* Mapping each queue to two ZIP cores */
- que_map.s.zce = 0x3;
- zip_reg_write(que_map.u_reg64,
- (zip->reg_base + ZIP_QUEX_MAP(q)));
-
- zip_msg("QUE_MAP(%d) : 0x%016llx", q,
- zip_reg_read(zip->reg_base + ZIP_QUEX_MAP(q)));
- }
-
- que_pri.u_reg64 = 0ull;
- for (q = 0; q < ZIP_NUM_QUEUES; q++)
- que_pri.s.pri |= (0x1 << q); /* Higher Priority RR */
- zip_reg_write(que_pri.u_reg64, (zip->reg_base + ZIP_QUE_PRI));
-
- zip_msg("QUE_PRI %016llx", zip_reg_read(zip->reg_base + ZIP_QUE_PRI));
-
- return 0;
-}
-
-static void zip_reset(struct zip_device *zip)
-{
- union zip_cmd_ctl cmd_ctl;
-
- cmd_ctl.u_reg64 = 0x0ull;
- cmd_ctl.s.reset = 1; /* Forces ZIP cores to do reset */
- zip_reg_write(cmd_ctl.u_reg64, (zip->reg_base + ZIP_CMD_CTL));
-}
-
-static int zip_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- struct device *dev = &pdev->dev;
- struct zip_device *zip = NULL;
- int err;
-
- zip = zip_alloc_device(pdev);
- if (!zip)
- return -ENOMEM;
-
- dev_info(dev, "Found ZIP device %d %x:%x on Node %d\n", zip->index,
- pdev->vendor, pdev->device, dev_to_node(dev));
-
- pci_set_drvdata(pdev, zip);
- zip->pdev = pdev;
-
- err = pci_enable_device(pdev);
- if (err) {
- dev_err(dev, "Failed to enable PCI device");
- goto err_free_device;
- }
-
- err = pci_request_regions(pdev, DRV_NAME);
- if (err) {
- dev_err(dev, "PCI request regions failed 0x%x", err);
- goto err_disable_device;
- }
-
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
- if (err) {
- dev_err(dev, "Unable to get usable 48-bit DMA configuration\n");
- goto err_release_regions;
- }
-
- /* MAP configuration registers */
- zip->reg_base = pci_ioremap_bar(pdev, PCI_CFG_ZIP_PF_BAR0);
- if (!zip->reg_base) {
- dev_err(dev, "ZIP: Cannot map BAR0 CSR memory space, aborting");
- err = -ENOMEM;
- goto err_release_regions;
- }
-
- /* Initialize ZIP Hardware */
- err = zip_init_hw(zip);
- if (err)
- goto err_release_regions;
-
- /* Register with the Kernel Crypto Interface */
- err = zip_register_compression_device();
- if (err < 0) {
- zip_err("ZIP: Kernel Crypto Registration failed\n");
- goto err_register;
- }
-
- /* comp-decomp statistics are handled with debugfs interface */
- zip_debugfs_init();
-
- return 0;
-
-err_register:
- zip_reset(zip);
-
-err_release_regions:
- if (zip->reg_base)
- iounmap(zip->reg_base);
- pci_release_regions(pdev);
-
-err_disable_device:
- pci_disable_device(pdev);
-
-err_free_device:
- pci_set_drvdata(pdev, NULL);
-
- /* Remove zip_dev from zip_device list, free the zip_device memory */
- zip_dev[zip->index] = NULL;
- devm_kfree(dev, zip);
-
- return err;
-}
-
-static void zip_remove(struct pci_dev *pdev)
-{
- struct zip_device *zip = pci_get_drvdata(pdev);
- int q = 0;
-
- if (!zip)
- return;
-
- zip_debugfs_exit();
-
- zip_unregister_compression_device();
-
- if (zip->reg_base) {
- zip_reset(zip);
- iounmap(zip->reg_base);
- }
-
- pci_release_regions(pdev);
- pci_disable_device(pdev);
-
- /*
- * Free Command Queue buffers. This free should be called for all
- * the enabled Queues.
- */
- for (q = 0; q < ZIP_NUM_QUEUES; q++)
- zip_cmd_qbuf_free(zip, q);
-
- pci_set_drvdata(pdev, NULL);
- /* remove zip device from zip device list */
- zip_dev[zip->index] = NULL;
-}
-
-/* PCI Sub-System Interface */
-static struct pci_driver zip_driver = {
- .name = DRV_NAME,
- .id_table = zip_id_table,
- .probe = zip_probe,
- .remove = zip_remove,
-};
-
-/* Kernel Crypto Subsystem Interface */
-
-static struct crypto_alg zip_comp_deflate = {
- .cra_name = "deflate",
- .cra_driver_name = "deflate-cavium",
- .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
- .cra_ctxsize = sizeof(struct zip_kernel_ctx),
- .cra_priority = 300,
- .cra_module = THIS_MODULE,
- .cra_init = zip_alloc_comp_ctx_deflate,
- .cra_exit = zip_free_comp_ctx,
- .cra_u = { .compress = {
- .coa_compress = zip_comp_compress,
- .coa_decompress = zip_comp_decompress
- } }
-};
-
-static struct crypto_alg zip_comp_lzs = {
- .cra_name = "lzs",
- .cra_driver_name = "lzs-cavium",
- .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
- .cra_ctxsize = sizeof(struct zip_kernel_ctx),
- .cra_priority = 300,
- .cra_module = THIS_MODULE,
- .cra_init = zip_alloc_comp_ctx_lzs,
- .cra_exit = zip_free_comp_ctx,
- .cra_u = { .compress = {
- .coa_compress = zip_comp_compress,
- .coa_decompress = zip_comp_decompress
- } }
-};
-
-static struct scomp_alg zip_scomp_deflate = {
- .alloc_ctx = zip_alloc_scomp_ctx_deflate,
- .free_ctx = zip_free_scomp_ctx,
- .compress = zip_scomp_compress,
- .decompress = zip_scomp_decompress,
- .base = {
- .cra_name = "deflate",
- .cra_driver_name = "deflate-scomp-cavium",
- .cra_module = THIS_MODULE,
- .cra_priority = 300,
- }
-};
-
-static struct scomp_alg zip_scomp_lzs = {
- .alloc_ctx = zip_alloc_scomp_ctx_lzs,
- .free_ctx = zip_free_scomp_ctx,
- .compress = zip_scomp_compress,
- .decompress = zip_scomp_decompress,
- .base = {
- .cra_name = "lzs",
- .cra_driver_name = "lzs-scomp-cavium",
- .cra_module = THIS_MODULE,
- .cra_priority = 300,
- }
-};
-
-static int zip_register_compression_device(void)
-{
- int ret;
-
- ret = crypto_register_alg(&zip_comp_deflate);
- if (ret < 0) {
- zip_err("Deflate algorithm registration failed\n");
- return ret;
- }
-
- ret = crypto_register_alg(&zip_comp_lzs);
- if (ret < 0) {
- zip_err("LZS algorithm registration failed\n");
- goto err_unregister_alg_deflate;
- }
-
- ret = crypto_register_scomp(&zip_scomp_deflate);
- if (ret < 0) {
- zip_err("Deflate scomp algorithm registration failed\n");
- goto err_unregister_alg_lzs;
- }
-
- ret = crypto_register_scomp(&zip_scomp_lzs);
- if (ret < 0) {
- zip_err("LZS scomp algorithm registration failed\n");
- goto err_unregister_scomp_deflate;
- }
-
- return ret;
-
-err_unregister_scomp_deflate:
- crypto_unregister_scomp(&zip_scomp_deflate);
-err_unregister_alg_lzs:
- crypto_unregister_alg(&zip_comp_lzs);
-err_unregister_alg_deflate:
- crypto_unregister_alg(&zip_comp_deflate);
-
- return ret;
-}
-
-static void zip_unregister_compression_device(void)
-{
- crypto_unregister_alg(&zip_comp_deflate);
- crypto_unregister_alg(&zip_comp_lzs);
- crypto_unregister_scomp(&zip_scomp_deflate);
- crypto_unregister_scomp(&zip_scomp_lzs);
-}
-
-/*
- * debugfs functions
- */
-#ifdef CONFIG_DEBUG_FS
-#include <linux/debugfs.h>
-
-/* Displays ZIP device statistics */
-static int zip_stats_show(struct seq_file *s, void *unused)
-{
- u64 val = 0ull;
- u64 avg_chunk = 0ull, avg_cr = 0ull;
- u32 q = 0;
-
- int index = 0;
- struct zip_device *zip;
- struct zip_stats *st;
-
- for (index = 0; index < MAX_ZIP_DEVICES; index++) {
- u64 pending = 0;
-
- if (zip_dev[index]) {
- zip = zip_dev[index];
- st = &zip->stats;
-
- /* Get all the pending requests */
- for (q = 0; q < ZIP_NUM_QUEUES; q++) {
- val = zip_reg_read((zip->reg_base +
- ZIP_DBG_QUEX_STA(q)));
- pending += val >> 32 & 0xffffff;
- }
-
- val = atomic64_read(&st->comp_req_complete);
- avg_chunk = (val) ? atomic64_read(&st->comp_in_bytes) / val : 0;
-
- val = atomic64_read(&st->comp_out_bytes);
- avg_cr = (val) ? atomic64_read(&st->comp_in_bytes) / val : 0;
- seq_printf(s, " ZIP Device %d Stats\n"
- "-----------------------------------\n"
- "Comp Req Submitted : \t%lld\n"
- "Comp Req Completed : \t%lld\n"
- "Compress In Bytes : \t%lld\n"
- "Compressed Out Bytes : \t%lld\n"
- "Average Chunk size : \t%llu\n"
- "Average Compression ratio : \t%llu\n"
- "Decomp Req Submitted : \t%lld\n"
- "Decomp Req Completed : \t%lld\n"
- "Decompress In Bytes : \t%lld\n"
- "Decompressed Out Bytes : \t%lld\n"
- "Decompress Bad requests : \t%lld\n"
- "Pending Req : \t%lld\n"
- "---------------------------------\n",
- index,
- (u64)atomic64_read(&st->comp_req_submit),
- (u64)atomic64_read(&st->comp_req_complete),
- (u64)atomic64_read(&st->comp_in_bytes),
- (u64)atomic64_read(&st->comp_out_bytes),
- avg_chunk,
- avg_cr,
- (u64)atomic64_read(&st->decomp_req_submit),
- (u64)atomic64_read(&st->decomp_req_complete),
- (u64)atomic64_read(&st->decomp_in_bytes),
- (u64)atomic64_read(&st->decomp_out_bytes),
- (u64)atomic64_read(&st->decomp_bad_reqs),
- pending);
- }
- }
- return 0;
-}
-
-/* Clears stats data */
-static int zip_clear_show(struct seq_file *s, void *unused)
-{
- int index = 0;
-
- for (index = 0; index < MAX_ZIP_DEVICES; index++) {
- if (zip_dev[index]) {
- memset(&zip_dev[index]->stats, 0,
- sizeof(struct zip_stats));
- seq_printf(s, "Cleared stats for zip %d\n", index);
- }
- }
-
- return 0;
-}
-
-static struct zip_registers zipregs[64] = {
- {"ZIP_CMD_CTL ", 0x0000ull},
- {"ZIP_THROTTLE ", 0x0010ull},
- {"ZIP_CONSTANTS ", 0x00A0ull},
- {"ZIP_QUE0_MAP ", 0x1400ull},
- {"ZIP_QUE1_MAP ", 0x1408ull},
- {"ZIP_QUE_ENA ", 0x0500ull},
- {"ZIP_QUE_PRI ", 0x0508ull},
- {"ZIP_QUE0_DONE ", 0x2000ull},
- {"ZIP_QUE1_DONE ", 0x2008ull},
- {"ZIP_QUE0_DOORBELL ", 0x4000ull},
- {"ZIP_QUE1_DOORBELL ", 0x4008ull},
- {"ZIP_QUE0_SBUF_ADDR ", 0x1000ull},
- {"ZIP_QUE1_SBUF_ADDR ", 0x1008ull},
- {"ZIP_QUE0_SBUF_CTL ", 0x1200ull},
- {"ZIP_QUE1_SBUF_CTL ", 0x1208ull},
- { NULL, 0}
-};
-
-/* Prints registers' contents */
-static int zip_regs_show(struct seq_file *s, void *unused)
-{
- u64 val = 0;
- int i = 0, index = 0;
-
- for (index = 0; index < MAX_ZIP_DEVICES; index++) {
- if (zip_dev[index]) {
- seq_printf(s, "--------------------------------\n"
- " ZIP Device %d Registers\n"
- "--------------------------------\n",
- index);
-
- i = 0;
-
- while (zipregs[i].reg_name) {
- val = zip_reg_read((zip_dev[index]->reg_base +
- zipregs[i].reg_offset));
- seq_printf(s, "%s: 0x%016llx\n",
- zipregs[i].reg_name, val);
- i++;
- }
- }
- }
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(zip_stats);
-DEFINE_SHOW_ATTRIBUTE(zip_clear);
-DEFINE_SHOW_ATTRIBUTE(zip_regs);
-
-/* Root directory for thunderx_zip debugfs entry */
-static struct dentry *zip_debugfs_root;
-
-static void zip_debugfs_init(void)
-{
- if (!debugfs_initialized())
- return;
-
- zip_debugfs_root = debugfs_create_dir("thunderx_zip", NULL);
-
- /* Creating files for entries inside thunderx_zip directory */
- debugfs_create_file("zip_stats", 0444, zip_debugfs_root, NULL,
- &zip_stats_fops);
-
- debugfs_create_file("zip_clear", 0444, zip_debugfs_root, NULL,
- &zip_clear_fops);
-
- debugfs_create_file("zip_regs", 0444, zip_debugfs_root, NULL,
- &zip_regs_fops);
-
-}
-
-static void zip_debugfs_exit(void)
-{
- debugfs_remove_recursive(zip_debugfs_root);
-}
-
-#else
-static void __init zip_debugfs_init(void) { }
-static void __exit zip_debugfs_exit(void) { }
-#endif
-/* debugfs - end */
-
-module_pci_driver(zip_driver);
-
-MODULE_AUTHOR("Cavium Inc");
-MODULE_DESCRIPTION("Cavium Inc ThunderX ZIP Driver");
-MODULE_LICENSE("GPL v2");
-MODULE_DEVICE_TABLE(pci, zip_id_table);
diff --git a/drivers/crypto/cavium/zip/zip_main.h b/drivers/crypto/cavium/zip/zip_main.h
deleted file mode 100644
index e1e4fa92ce80..000000000000
--- a/drivers/crypto/cavium/zip/zip_main.h
+++ /dev/null
@@ -1,120 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#ifndef __ZIP_MAIN_H__
-#define __ZIP_MAIN_H__
-
-#include "zip_device.h"
-#include "zip_regs.h"
-
-/* PCI device IDs */
-#define PCI_DEVICE_ID_THUNDERX_ZIP 0xA01A
-
-/* ZIP device BARs */
-#define PCI_CFG_ZIP_PF_BAR0 0 /* Base addr for normal regs */
-
-/* Maximum available zip queues */
-#define ZIP_MAX_NUM_QUEUES 8
-
-#define ZIP_128B_ALIGN 7
-
-/* Command queue buffer size */
-#define ZIP_CMD_QBUF_SIZE (8064 + 8)
-
-struct zip_registers {
- char *reg_name;
- u64 reg_offset;
-};
-
-/* ZIP Compression - Decompression stats */
-struct zip_stats {
- atomic64_t comp_req_submit;
- atomic64_t comp_req_complete;
- atomic64_t decomp_req_submit;
- atomic64_t decomp_req_complete;
- atomic64_t comp_in_bytes;
- atomic64_t comp_out_bytes;
- atomic64_t decomp_in_bytes;
- atomic64_t decomp_out_bytes;
- atomic64_t decomp_bad_reqs;
-};
-
-/* ZIP Instruction Queue */
-struct zip_iq {
- u64 *sw_head;
- u64 *sw_tail;
- u64 *hw_tail;
- u64 done_cnt;
- u64 pend_cnt;
- u64 free_flag;
-
- /* ZIP IQ lock */
- spinlock_t lock;
-};
-
-/* ZIP Device */
-struct zip_device {
- u32 index;
- void __iomem *reg_base;
- struct pci_dev *pdev;
-
- /* Different ZIP Constants */
- u64 depth;
- u64 onfsize;
- u64 ctxsize;
-
- struct zip_iq iq[ZIP_MAX_NUM_QUEUES];
- struct zip_stats stats;
-};
-
-/* Prototypes */
-struct zip_device *zip_get_device(int node_id);
-int zip_get_node_id(void);
-void zip_reg_write(u64 val, u64 __iomem *addr);
-u64 zip_reg_read(u64 __iomem *addr);
-void zip_update_cmd_bufs(struct zip_device *zip_dev, u32 queue);
-u32 zip_load_instr(union zip_inst_s *instr, struct zip_device *zip_dev);
-
-#endif /* ZIP_MAIN_H */
diff --git a/drivers/crypto/cavium/zip/zip_mem.c b/drivers/crypto/cavium/zip/zip_mem.c
deleted file mode 100644
index b3e0843a9169..000000000000
--- a/drivers/crypto/cavium/zip/zip_mem.c
+++ /dev/null
@@ -1,114 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#include <linux/types.h>
-#include <linux/vmalloc.h>
-
-#include "common.h"
-
-/**
- * zip_cmd_qbuf_alloc - Allocates a cmd buffer for ZIP Instruction Queue
- * @zip: Pointer to zip device structure
- * @q: Queue number to allocate bufffer to
- * Return: 0 if successful, -ENOMEM otherwise
- */
-int zip_cmd_qbuf_alloc(struct zip_device *zip, int q)
-{
- zip->iq[q].sw_head = (u64 *)__get_free_pages((GFP_KERNEL | GFP_DMA),
- get_order(ZIP_CMD_QBUF_SIZE));
-
- if (!zip->iq[q].sw_head)
- return -ENOMEM;
-
- memset(zip->iq[q].sw_head, 0, ZIP_CMD_QBUF_SIZE);
-
- zip_dbg("cmd_qbuf_alloc[%d] Success : %p\n", q, zip->iq[q].sw_head);
- return 0;
-}
-
-/**
- * zip_cmd_qbuf_free - Frees the cmd Queue buffer
- * @zip: Pointer to zip device structure
- * @q: Queue number to free buffer of
- */
-void zip_cmd_qbuf_free(struct zip_device *zip, int q)
-{
- zip_dbg("Freeing cmd_qbuf 0x%lx\n", zip->iq[q].sw_tail);
-
- free_pages((u64)zip->iq[q].sw_tail, get_order(ZIP_CMD_QBUF_SIZE));
-}
-
-/**
- * zip_data_buf_alloc - Allocates memory for a data bufffer
- * @size: Size of the buffer to allocate
- * Returns: Pointer to the buffer allocated
- */
-u8 *zip_data_buf_alloc(u64 size)
-{
- u8 *ptr;
-
- ptr = (u8 *)__get_free_pages((GFP_KERNEL | GFP_DMA),
- get_order(size));
-
- if (!ptr)
- return NULL;
-
- memset(ptr, 0, size);
-
- zip_dbg("Data buffer allocation success\n");
- return ptr;
-}
-
-/**
- * zip_data_buf_free - Frees the memory of a data buffer
- * @ptr: Pointer to the buffer
- * @size: Buffer size
- */
-void zip_data_buf_free(u8 *ptr, u64 size)
-{
- zip_dbg("Freeing data buffer 0x%lx\n", ptr);
-
- free_pages((u64)ptr, get_order(size));
-}
diff --git a/drivers/crypto/cavium/zip/zip_mem.h b/drivers/crypto/cavium/zip/zip_mem.h
deleted file mode 100644
index f8f2f08c4a5c..000000000000
--- a/drivers/crypto/cavium/zip/zip_mem.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#ifndef __ZIP_MEM_H__
-#define __ZIP_MEM_H__
-
-/**
- * zip_cmd_qbuf_free - Frees the cmd Queue buffer
- * @zip: Pointer to zip device structure
- * @q: Queue nmber to free buffer of
- */
-void zip_cmd_qbuf_free(struct zip_device *zip, int q);
-
-/**
- * zip_cmd_qbuf_alloc - Allocates a Chunk/cmd buffer for ZIP Inst(cmd) Queue
- * @zip: Pointer to zip device structure
- * @q: Queue number to allocate bufffer to
- * Return: 0 if successful, 1 otherwise
- */
-int zip_cmd_qbuf_alloc(struct zip_device *zip, int q);
-
-/**
- * zip_data_buf_alloc - Allocates memory for a data bufffer
- * @size: Size of the buffer to allocate
- * Returns: Pointer to the buffer allocated
- */
-u8 *zip_data_buf_alloc(u64 size);
-
-/**
- * zip_data_buf_free - Frees the memory of a data buffer
- * @ptr: Pointer to the buffer
- * @size: Buffer size
- */
-void zip_data_buf_free(u8 *ptr, u64 size);
-
-#endif
diff --git a/drivers/crypto/cavium/zip/zip_regs.h b/drivers/crypto/cavium/zip/zip_regs.h
deleted file mode 100644
index 874e0236c87e..000000000000
--- a/drivers/crypto/cavium/zip/zip_regs.h
+++ /dev/null
@@ -1,1347 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#ifndef __ZIP_REGS_H__
-#define __ZIP_REGS_H__
-
-/*
- * Configuration and status register (CSR) address and type definitions for
- * Cavium ZIP.
- */
-
-#include <linux/kern_levels.h>
-
-/* ZIP invocation result completion status codes */
-#define ZIP_CMD_NOTDONE 0x0
-
-/* Successful completion. */
-#define ZIP_CMD_SUCCESS 0x1
-
-/* Output truncated */
-#define ZIP_CMD_DTRUNC 0x2
-
-/* Dynamic Stop */
-#define ZIP_CMD_DYNAMIC_STOP 0x3
-
-/* Uncompress ran out of input data when IWORD0[EF] was set */
-#define ZIP_CMD_ITRUNC 0x4
-
-/* Uncompress found the reserved block type 3 */
-#define ZIP_CMD_RBLOCK 0x5
-
-/*
- * Uncompress found LEN != ZIP_CMD_NLEN in an uncompressed block in the input.
- */
-#define ZIP_CMD_NLEN 0x6
-
-/* Uncompress found a bad code in the main Huffman codes. */
-#define ZIP_CMD_BADCODE 0x7
-
-/* Uncompress found a bad code in the 19 Huffman codes encoding lengths. */
-#define ZIP_CMD_BADCODE2 0x8
-
-/* Compress found a zero-length input. */
-#define ZIP_CMD_ZERO_LEN 0x9
-
-/* The compress or decompress encountered an internal parity error. */
-#define ZIP_CMD_PARITY 0xA
-
-/*
- * Uncompress found a string identifier that precedes the uncompressed data and
- * decompression history.
- */
-#define ZIP_CMD_FATAL 0xB
-
-/**
- * enum zip_int_vec_e - ZIP MSI-X Vector Enumeration, enumerates the MSI-X
- * interrupt vectors.
- */
-enum zip_int_vec_e {
- ZIP_INT_VEC_E_ECCE = 0x10,
- ZIP_INT_VEC_E_FIFE = 0x11,
- ZIP_INT_VEC_E_QUE0_DONE = 0x0,
- ZIP_INT_VEC_E_QUE0_ERR = 0x8,
- ZIP_INT_VEC_E_QUE1_DONE = 0x1,
- ZIP_INT_VEC_E_QUE1_ERR = 0x9,
- ZIP_INT_VEC_E_QUE2_DONE = 0x2,
- ZIP_INT_VEC_E_QUE2_ERR = 0xa,
- ZIP_INT_VEC_E_QUE3_DONE = 0x3,
- ZIP_INT_VEC_E_QUE3_ERR = 0xb,
- ZIP_INT_VEC_E_QUE4_DONE = 0x4,
- ZIP_INT_VEC_E_QUE4_ERR = 0xc,
- ZIP_INT_VEC_E_QUE5_DONE = 0x5,
- ZIP_INT_VEC_E_QUE5_ERR = 0xd,
- ZIP_INT_VEC_E_QUE6_DONE = 0x6,
- ZIP_INT_VEC_E_QUE6_ERR = 0xe,
- ZIP_INT_VEC_E_QUE7_DONE = 0x7,
- ZIP_INT_VEC_E_QUE7_ERR = 0xf,
- ZIP_INT_VEC_E_ENUM_LAST = 0x12,
-};
-
-/**
- * union zip_zptr_addr_s - ZIP Generic Pointer Structure for ADDR.
- *
- * It is the generic format of pointers in ZIP_INST_S.
- */
-union zip_zptr_addr_s {
- u64 u_reg64;
- struct {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_49_63 : 15;
- u64 addr : 49;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 addr : 49;
- u64 reserved_49_63 : 15;
-#endif
- } s;
-
-};
-
-/**
- * union zip_zptr_ctl_s - ZIP Generic Pointer Structure for CTL.
- *
- * It is the generic format of pointers in ZIP_INST_S.
- */
-union zip_zptr_ctl_s {
- u64 u_reg64;
- struct {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_112_127 : 16;
- u64 length : 16;
- u64 reserved_67_95 : 29;
- u64 fw : 1;
- u64 nc : 1;
- u64 data_be : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 data_be : 1;
- u64 nc : 1;
- u64 fw : 1;
- u64 reserved_67_95 : 29;
- u64 length : 16;
- u64 reserved_112_127 : 16;
-#endif
- } s;
-};
-
-/**
- * union zip_inst_s - ZIP Instruction Structure.
- * Each ZIP instruction has 16 words (they are called IWORD0 to IWORD15 within
- * the structure).
- */
-union zip_inst_s {
- u64 u_reg64[16];
- struct {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 doneint : 1;
- u64 reserved_56_62 : 7;
- u64 totaloutputlength : 24;
- u64 reserved_27_31 : 5;
- u64 exn : 3;
- u64 reserved_23_23 : 1;
- u64 exbits : 7;
- u64 reserved_12_15 : 4;
- u64 sf : 1;
- u64 ss : 2;
- u64 cc : 2;
- u64 ef : 1;
- u64 bf : 1;
- u64 ce : 1;
- u64 reserved_3_3 : 1;
- u64 ds : 1;
- u64 dg : 1;
- u64 hg : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 hg : 1;
- u64 dg : 1;
- u64 ds : 1;
- u64 reserved_3_3 : 1;
- u64 ce : 1;
- u64 bf : 1;
- u64 ef : 1;
- u64 cc : 2;
- u64 ss : 2;
- u64 sf : 1;
- u64 reserved_12_15 : 4;
- u64 exbits : 7;
- u64 reserved_23_23 : 1;
- u64 exn : 3;
- u64 reserved_27_31 : 5;
- u64 totaloutputlength : 24;
- u64 reserved_56_62 : 7;
- u64 doneint : 1;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 historylength : 16;
- u64 reserved_96_111 : 16;
- u64 adlercrc32 : 32;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 adlercrc32 : 32;
- u64 reserved_96_111 : 16;
- u64 historylength : 16;
-#endif
- union zip_zptr_addr_s ctx_ptr_addr;
- union zip_zptr_ctl_s ctx_ptr_ctl;
- union zip_zptr_addr_s his_ptr_addr;
- union zip_zptr_ctl_s his_ptr_ctl;
- union zip_zptr_addr_s inp_ptr_addr;
- union zip_zptr_ctl_s inp_ptr_ctl;
- union zip_zptr_addr_s out_ptr_addr;
- union zip_zptr_ctl_s out_ptr_ctl;
- union zip_zptr_addr_s res_ptr_addr;
- union zip_zptr_ctl_s res_ptr_ctl;
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_817_831 : 15;
- u64 wq_ptr : 49;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 wq_ptr : 49;
- u64 reserved_817_831 : 15;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_882_895 : 14;
- u64 tt : 2;
- u64 reserved_874_879 : 6;
- u64 grp : 10;
- u64 tag : 32;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 tag : 32;
- u64 grp : 10;
- u64 reserved_874_879 : 6;
- u64 tt : 2;
- u64 reserved_882_895 : 14;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_896_959 : 64;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 reserved_896_959 : 64;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_960_1023 : 64;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 reserved_960_1023 : 64;
-#endif
- } s;
-};
-
-/**
- * union zip_nptr_s - ZIP Instruction Next-Chunk-Buffer Pointer (NPTR)
- * Structure
- *
- * ZIP_NPTR structure is used to chain all the zip instruction buffers
- * together. ZIP instruction buffers are managed (allocated and released) by
- * the software.
- */
-union zip_nptr_s {
- u64 u_reg64;
- struct {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_49_63 : 15;
- u64 addr : 49;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 addr : 49;
- u64 reserved_49_63 : 15;
-#endif
- } s;
-};
-
-/**
- * union zip_zptr_s - ZIP Generic Pointer Structure.
- *
- * It is the generic format of pointers in ZIP_INST_S.
- */
-union zip_zptr_s {
- u64 u_reg64[2];
- struct {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_49_63 : 15;
- u64 addr : 49;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 addr : 49;
- u64 reserved_49_63 : 15;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_112_127 : 16;
- u64 length : 16;
- u64 reserved_67_95 : 29;
- u64 fw : 1;
- u64 nc : 1;
- u64 data_be : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 data_be : 1;
- u64 nc : 1;
- u64 fw : 1;
- u64 reserved_67_95 : 29;
- u64 length : 16;
- u64 reserved_112_127 : 16;
-#endif
- } s;
-};
-
-/**
- * union zip_zres_s - ZIP Result Structure
- *
- * The ZIP coprocessor writes the result structure after it completes the
- * invocation. The result structure is exactly 24 bytes, and each invocation of
- * the ZIP coprocessor produces exactly one result structure.
- */
-union zip_zres_s {
- u64 u_reg64[3];
- struct {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 crc32 : 32;
- u64 adler32 : 32;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 adler32 : 32;
- u64 crc32 : 32;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 totalbyteswritten : 32;
- u64 totalbytesread : 32;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 totalbytesread : 32;
- u64 totalbyteswritten : 32;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 totalbitsprocessed : 32;
- u64 doneint : 1;
- u64 reserved_155_158 : 4;
- u64 exn : 3;
- u64 reserved_151_151 : 1;
- u64 exbits : 7;
- u64 reserved_137_143 : 7;
- u64 ef : 1;
-
- volatile u64 compcode : 8;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
-
- volatile u64 compcode : 8;
- u64 ef : 1;
- u64 reserved_137_143 : 7;
- u64 exbits : 7;
- u64 reserved_151_151 : 1;
- u64 exn : 3;
- u64 reserved_155_158 : 4;
- u64 doneint : 1;
- u64 totalbitsprocessed : 32;
-#endif
- } s;
-};
-
-/**
- * union zip_cmd_ctl - Structure representing the register that controls
- * clock and reset.
- */
-union zip_cmd_ctl {
- u64 u_reg64;
- struct zip_cmd_ctl_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_2_63 : 62;
- u64 forceclk : 1;
- u64 reset : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 reset : 1;
- u64 forceclk : 1;
- u64 reserved_2_63 : 62;
-#endif
- } s;
-};
-
-#define ZIP_CMD_CTL 0x0ull
-
-/**
- * union zip_constants - Data structure representing the register that contains
- * all of the current implementation-related parameters of the zip core in this
- * chip.
- */
-union zip_constants {
- u64 u_reg64;
- struct zip_constants_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 nexec : 8;
- u64 reserved_49_55 : 7;
- u64 syncflush_capable : 1;
- u64 depth : 16;
- u64 onfsize : 12;
- u64 ctxsize : 12;
- u64 reserved_1_7 : 7;
- u64 disabled : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 disabled : 1;
- u64 reserved_1_7 : 7;
- u64 ctxsize : 12;
- u64 onfsize : 12;
- u64 depth : 16;
- u64 syncflush_capable : 1;
- u64 reserved_49_55 : 7;
- u64 nexec : 8;
-#endif
- } s;
-};
-
-#define ZIP_CONSTANTS 0x00A0ull
-
-/**
- * union zip_corex_bist_status - Represents registers which have the BIST
- * status of memories in zip cores.
- *
- * Each bit is the BIST result of an individual memory
- * (per bit, 0 = pass and 1 = fail).
- */
-union zip_corex_bist_status {
- u64 u_reg64;
- struct zip_corex_bist_status_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_53_63 : 11;
- u64 bstatus : 53;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 bstatus : 53;
- u64 reserved_53_63 : 11;
-#endif
- } s;
-};
-
-static inline u64 ZIP_COREX_BIST_STATUS(u64 param1)
-{
- if (param1 <= 1)
- return 0x0520ull + (param1 & 1) * 0x8ull;
- pr_err("ZIP_COREX_BIST_STATUS: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_ctl_bist_status - Represents register that has the BIST status of
- * memories in ZIP_CTL (instruction buffer, G/S pointer FIFO, input data
- * buffer, output data buffers).
- *
- * Each bit is the BIST result of an individual memory
- * (per bit, 0 = pass and 1 = fail).
- */
-union zip_ctl_bist_status {
- u64 u_reg64;
- struct zip_ctl_bist_status_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_9_63 : 55;
- u64 bstatus : 9;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 bstatus : 9;
- u64 reserved_9_63 : 55;
-#endif
- } s;
-};
-
-#define ZIP_CTL_BIST_STATUS 0x0510ull
-
-/**
- * union zip_ctl_cfg - Represents the register that controls the behavior of
- * the ZIP DMA engines.
- *
- * It is recommended to keep default values for normal operation. Changing the
- * values of the fields may be useful for diagnostics.
- */
-union zip_ctl_cfg {
- u64 u_reg64;
- struct zip_ctl_cfg_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_52_63 : 12;
- u64 ildf : 4;
- u64 reserved_36_47 : 12;
- u64 drtf : 4;
- u64 reserved_27_31 : 5;
- u64 stcf : 3;
- u64 reserved_19_23 : 5;
- u64 ldf : 3;
- u64 reserved_2_15 : 14;
- u64 busy : 1;
- u64 reserved_0_0 : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 reserved_0_0 : 1;
- u64 busy : 1;
- u64 reserved_2_15 : 14;
- u64 ldf : 3;
- u64 reserved_19_23 : 5;
- u64 stcf : 3;
- u64 reserved_27_31 : 5;
- u64 drtf : 4;
- u64 reserved_36_47 : 12;
- u64 ildf : 4;
- u64 reserved_52_63 : 12;
-#endif
- } s;
-};
-
-#define ZIP_CTL_CFG 0x0560ull
-
-/**
- * union zip_dbg_corex_inst - Represents the registers that reflect the status
- * of the current instruction that the ZIP core is executing or has executed.
- *
- * These registers are only for debug use.
- */
-union zip_dbg_corex_inst {
- u64 u_reg64;
- struct zip_dbg_corex_inst_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 busy : 1;
- u64 reserved_35_62 : 28;
- u64 qid : 3;
- u64 iid : 32;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 iid : 32;
- u64 qid : 3;
- u64 reserved_35_62 : 28;
- u64 busy : 1;
-#endif
- } s;
-};
-
-static inline u64 ZIP_DBG_COREX_INST(u64 param1)
-{
- if (param1 <= 1)
- return 0x0640ull + (param1 & 1) * 0x8ull;
- pr_err("ZIP_DBG_COREX_INST: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_dbg_corex_sta - Represents registers that reflect the status of
- * the zip cores.
- *
- * They are for debug use only.
- */
-union zip_dbg_corex_sta {
- u64 u_reg64;
- struct zip_dbg_corex_sta_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 busy : 1;
- u64 reserved_37_62 : 26;
- u64 ist : 5;
- u64 nie : 32;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 nie : 32;
- u64 ist : 5;
- u64 reserved_37_62 : 26;
- u64 busy : 1;
-#endif
- } s;
-};
-
-static inline u64 ZIP_DBG_COREX_STA(u64 param1)
-{
- if (param1 <= 1)
- return 0x0680ull + (param1 & 1) * 0x8ull;
- pr_err("ZIP_DBG_COREX_STA: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_dbg_quex_sta - Represets registers that reflect status of the zip
- * instruction queues.
- *
- * They are for debug use only.
- */
-union zip_dbg_quex_sta {
- u64 u_reg64;
- struct zip_dbg_quex_sta_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 busy : 1;
- u64 reserved_56_62 : 7;
- u64 rqwc : 24;
- u64 nii : 32;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 nii : 32;
- u64 rqwc : 24;
- u64 reserved_56_62 : 7;
- u64 busy : 1;
-#endif
- } s;
-};
-
-static inline u64 ZIP_DBG_QUEX_STA(u64 param1)
-{
- if (param1 <= 7)
- return 0x1800ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_DBG_QUEX_STA: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_ecc_ctl - Represents the register that enables ECC for each
- * individual internal memory that requires ECC.
- *
- * For debug purpose, it can also flip one or two bits in the ECC data.
- */
-union zip_ecc_ctl {
- u64 u_reg64;
- struct zip_ecc_ctl_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_19_63 : 45;
- u64 vmem_cdis : 1;
- u64 vmem_fs : 2;
- u64 reserved_15_15 : 1;
- u64 idf1_cdis : 1;
- u64 idf1_fs : 2;
- u64 reserved_11_11 : 1;
- u64 idf0_cdis : 1;
- u64 idf0_fs : 2;
- u64 reserved_7_7 : 1;
- u64 gspf_cdis : 1;
- u64 gspf_fs : 2;
- u64 reserved_3_3 : 1;
- u64 iqf_cdis : 1;
- u64 iqf_fs : 2;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 iqf_fs : 2;
- u64 iqf_cdis : 1;
- u64 reserved_3_3 : 1;
- u64 gspf_fs : 2;
- u64 gspf_cdis : 1;
- u64 reserved_7_7 : 1;
- u64 idf0_fs : 2;
- u64 idf0_cdis : 1;
- u64 reserved_11_11 : 1;
- u64 idf1_fs : 2;
- u64 idf1_cdis : 1;
- u64 reserved_15_15 : 1;
- u64 vmem_fs : 2;
- u64 vmem_cdis : 1;
- u64 reserved_19_63 : 45;
-#endif
- } s;
-};
-
-#define ZIP_ECC_CTL 0x0568ull
-
-/* NCB - zip_ecce_ena_w1c */
-union zip_ecce_ena_w1c {
- u64 u_reg64;
- struct zip_ecce_ena_w1c_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_37_63 : 27;
- u64 dbe : 5;
- u64 reserved_5_31 : 27;
- u64 sbe : 5;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 sbe : 5;
- u64 reserved_5_31 : 27;
- u64 dbe : 5;
- u64 reserved_37_63 : 27;
-#endif
- } s;
-};
-
-#define ZIP_ECCE_ENA_W1C 0x0598ull
-
-/* NCB - zip_ecce_ena_w1s */
-union zip_ecce_ena_w1s {
- u64 u_reg64;
- struct zip_ecce_ena_w1s_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_37_63 : 27;
- u64 dbe : 5;
- u64 reserved_5_31 : 27;
- u64 sbe : 5;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 sbe : 5;
- u64 reserved_5_31 : 27;
- u64 dbe : 5;
- u64 reserved_37_63 : 27;
-#endif
- } s;
-};
-
-#define ZIP_ECCE_ENA_W1S 0x0590ull
-
-/**
- * union zip_ecce_int - Represents the register that contains the status of the
- * ECC interrupt sources.
- */
-union zip_ecce_int {
- u64 u_reg64;
- struct zip_ecce_int_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_37_63 : 27;
- u64 dbe : 5;
- u64 reserved_5_31 : 27;
- u64 sbe : 5;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 sbe : 5;
- u64 reserved_5_31 : 27;
- u64 dbe : 5;
- u64 reserved_37_63 : 27;
-#endif
- } s;
-};
-
-#define ZIP_ECCE_INT 0x0580ull
-
-/* NCB - zip_ecce_int_w1s */
-union zip_ecce_int_w1s {
- u64 u_reg64;
- struct zip_ecce_int_w1s_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_37_63 : 27;
- u64 dbe : 5;
- u64 reserved_5_31 : 27;
- u64 sbe : 5;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 sbe : 5;
- u64 reserved_5_31 : 27;
- u64 dbe : 5;
- u64 reserved_37_63 : 27;
-#endif
- } s;
-};
-
-#define ZIP_ECCE_INT_W1S 0x0588ull
-
-/* NCB - zip_fife_ena_w1c */
-union zip_fife_ena_w1c {
- u64 u_reg64;
- struct zip_fife_ena_w1c_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_42_63 : 22;
- u64 asserts : 42;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 asserts : 42;
- u64 reserved_42_63 : 22;
-#endif
- } s;
-};
-
-#define ZIP_FIFE_ENA_W1C 0x0090ull
-
-/* NCB - zip_fife_ena_w1s */
-union zip_fife_ena_w1s {
- u64 u_reg64;
- struct zip_fife_ena_w1s_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_42_63 : 22;
- u64 asserts : 42;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 asserts : 42;
- u64 reserved_42_63 : 22;
-#endif
- } s;
-};
-
-#define ZIP_FIFE_ENA_W1S 0x0088ull
-
-/* NCB - zip_fife_int */
-union zip_fife_int {
- u64 u_reg64;
- struct zip_fife_int_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_42_63 : 22;
- u64 asserts : 42;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 asserts : 42;
- u64 reserved_42_63 : 22;
-#endif
- } s;
-};
-
-#define ZIP_FIFE_INT 0x0078ull
-
-/* NCB - zip_fife_int_w1s */
-union zip_fife_int_w1s {
- u64 u_reg64;
- struct zip_fife_int_w1s_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_42_63 : 22;
- u64 asserts : 42;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 asserts : 42;
- u64 reserved_42_63 : 22;
-#endif
- } s;
-};
-
-#define ZIP_FIFE_INT_W1S 0x0080ull
-
-/**
- * union zip_msix_pbax - Represents the register that is the MSI-X PBA table
- *
- * The bit number is indexed by the ZIP_INT_VEC_E enumeration.
- */
-union zip_msix_pbax {
- u64 u_reg64;
- struct zip_msix_pbax_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 pend : 64;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 pend : 64;
-#endif
- } s;
-};
-
-static inline u64 ZIP_MSIX_PBAX(u64 param1)
-{
- if (param1 == 0)
- return 0x0000838000FF0000ull;
- pr_err("ZIP_MSIX_PBAX: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_msix_vecx_addr - Represents the register that is the MSI-X vector
- * table, indexed by the ZIP_INT_VEC_E enumeration.
- */
-union zip_msix_vecx_addr {
- u64 u_reg64;
- struct zip_msix_vecx_addr_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_49_63 : 15;
- u64 addr : 47;
- u64 reserved_1_1 : 1;
- u64 secvec : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 secvec : 1;
- u64 reserved_1_1 : 1;
- u64 addr : 47;
- u64 reserved_49_63 : 15;
-#endif
- } s;
-};
-
-static inline u64 ZIP_MSIX_VECX_ADDR(u64 param1)
-{
- if (param1 <= 17)
- return 0x0000838000F00000ull + (param1 & 31) * 0x10ull;
- pr_err("ZIP_MSIX_VECX_ADDR: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_msix_vecx_ctl - Represents the register that is the MSI-X vector
- * table, indexed by the ZIP_INT_VEC_E enumeration.
- */
-union zip_msix_vecx_ctl {
- u64 u_reg64;
- struct zip_msix_vecx_ctl_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_33_63 : 31;
- u64 mask : 1;
- u64 reserved_20_31 : 12;
- u64 data : 20;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 data : 20;
- u64 reserved_20_31 : 12;
- u64 mask : 1;
- u64 reserved_33_63 : 31;
-#endif
- } s;
-};
-
-static inline u64 ZIP_MSIX_VECX_CTL(u64 param1)
-{
- if (param1 <= 17)
- return 0x0000838000F00008ull + (param1 & 31) * 0x10ull;
- pr_err("ZIP_MSIX_VECX_CTL: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_quex_done - Represents the registers that contain the per-queue
- * instruction done count.
- */
-union zip_quex_done {
- u64 u_reg64;
- struct zip_quex_done_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_20_63 : 44;
- u64 done : 20;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 done : 20;
- u64 reserved_20_63 : 44;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_DONE(u64 param1)
-{
- if (param1 <= 7)
- return 0x2000ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_DONE: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_quex_done_ack - Represents the registers on write to which will
- * decrement the per-queue instructiona done count.
- */
-union zip_quex_done_ack {
- u64 u_reg64;
- struct zip_quex_done_ack_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_20_63 : 44;
- u64 done_ack : 20;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 done_ack : 20;
- u64 reserved_20_63 : 44;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_DONE_ACK(u64 param1)
-{
- if (param1 <= 7)
- return 0x2200ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_DONE_ACK: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_quex_done_ena_w1c - Represents the register which when written
- * 1 to will disable the DONEINT interrupt for the queue.
- */
-union zip_quex_done_ena_w1c {
- u64 u_reg64;
- struct zip_quex_done_ena_w1c_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_1_63 : 63;
- u64 done_ena : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 done_ena : 1;
- u64 reserved_1_63 : 63;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_DONE_ENA_W1C(u64 param1)
-{
- if (param1 <= 7)
- return 0x2600ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_DONE_ENA_W1C: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_quex_done_ena_w1s - Represents the register that when written 1 to
- * will enable the DONEINT interrupt for the queue.
- */
-union zip_quex_done_ena_w1s {
- u64 u_reg64;
- struct zip_quex_done_ena_w1s_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_1_63 : 63;
- u64 done_ena : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 done_ena : 1;
- u64 reserved_1_63 : 63;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_DONE_ENA_W1S(u64 param1)
-{
- if (param1 <= 7)
- return 0x2400ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_DONE_ENA_W1S: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_quex_done_wait - Represents the register that specifies the per
- * queue interrupt coalescing settings.
- */
-union zip_quex_done_wait {
- u64 u_reg64;
- struct zip_quex_done_wait_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_48_63 : 16;
- u64 time_wait : 16;
- u64 reserved_20_31 : 12;
- u64 num_wait : 20;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 num_wait : 20;
- u64 reserved_20_31 : 12;
- u64 time_wait : 16;
- u64 reserved_48_63 : 16;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_DONE_WAIT(u64 param1)
-{
- if (param1 <= 7)
- return 0x2800ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_DONE_WAIT: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_quex_doorbell - Represents doorbell registers for the ZIP
- * instruction queues.
- */
-union zip_quex_doorbell {
- u64 u_reg64;
- struct zip_quex_doorbell_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_20_63 : 44;
- u64 dbell_cnt : 20;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 dbell_cnt : 20;
- u64 reserved_20_63 : 44;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_DOORBELL(u64 param1)
-{
- if (param1 <= 7)
- return 0x4000ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_DOORBELL: %llu\n", param1);
- return 0;
-}
-
-union zip_quex_err_ena_w1c {
- u64 u_reg64;
- struct zip_quex_err_ena_w1c_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_5_63 : 59;
- u64 mdbe : 1;
- u64 nwrp : 1;
- u64 nrrp : 1;
- u64 irde : 1;
- u64 dovf : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 dovf : 1;
- u64 irde : 1;
- u64 nrrp : 1;
- u64 nwrp : 1;
- u64 mdbe : 1;
- u64 reserved_5_63 : 59;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_ERR_ENA_W1C(u64 param1)
-{
- if (param1 <= 7)
- return 0x3600ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_ERR_ENA_W1C: %llu\n", param1);
- return 0;
-}
-
-union zip_quex_err_ena_w1s {
- u64 u_reg64;
- struct zip_quex_err_ena_w1s_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_5_63 : 59;
- u64 mdbe : 1;
- u64 nwrp : 1;
- u64 nrrp : 1;
- u64 irde : 1;
- u64 dovf : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 dovf : 1;
- u64 irde : 1;
- u64 nrrp : 1;
- u64 nwrp : 1;
- u64 mdbe : 1;
- u64 reserved_5_63 : 59;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_ERR_ENA_W1S(u64 param1)
-{
- if (param1 <= 7)
- return 0x3400ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_ERR_ENA_W1S: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_quex_err_int - Represents registers that contain the per-queue
- * error interrupts.
- */
-union zip_quex_err_int {
- u64 u_reg64;
- struct zip_quex_err_int_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_5_63 : 59;
- u64 mdbe : 1;
- u64 nwrp : 1;
- u64 nrrp : 1;
- u64 irde : 1;
- u64 dovf : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 dovf : 1;
- u64 irde : 1;
- u64 nrrp : 1;
- u64 nwrp : 1;
- u64 mdbe : 1;
- u64 reserved_5_63 : 59;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_ERR_INT(u64 param1)
-{
- if (param1 <= 7)
- return 0x3000ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_ERR_INT: %llu\n", param1);
- return 0;
-}
-
-/* NCB - zip_que#_err_int_w1s */
-union zip_quex_err_int_w1s {
- u64 u_reg64;
- struct zip_quex_err_int_w1s_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_5_63 : 59;
- u64 mdbe : 1;
- u64 nwrp : 1;
- u64 nrrp : 1;
- u64 irde : 1;
- u64 dovf : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 dovf : 1;
- u64 irde : 1;
- u64 nrrp : 1;
- u64 nwrp : 1;
- u64 mdbe : 1;
- u64 reserved_5_63 : 59;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_ERR_INT_W1S(u64 param1)
-{
- if (param1 <= 7)
- return 0x3200ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_ERR_INT_W1S: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_quex_gcfg - Represents the registers that reflect status of the
- * zip instruction queues,debug use only.
- */
-union zip_quex_gcfg {
- u64 u_reg64;
- struct zip_quex_gcfg_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_4_63 : 60;
- u64 iqb_ldwb : 1;
- u64 cbw_sty : 1;
- u64 l2ld_cmd : 2;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 l2ld_cmd : 2;
- u64 cbw_sty : 1;
- u64 iqb_ldwb : 1;
- u64 reserved_4_63 : 60;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_GCFG(u64 param1)
-{
- if (param1 <= 7)
- return 0x1A00ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_GCFG: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_quex_map - Represents the registers that control how each
- * instruction queue maps to zip cores.
- */
-union zip_quex_map {
- u64 u_reg64;
- struct zip_quex_map_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_2_63 : 62;
- u64 zce : 2;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 zce : 2;
- u64 reserved_2_63 : 62;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_MAP(u64 param1)
-{
- if (param1 <= 7)
- return 0x1400ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_MAP: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_quex_sbuf_addr - Represents the registers that set the buffer
- * parameters for the instruction queues.
- *
- * When quiescent (i.e. outstanding doorbell count is 0), it is safe to rewrite
- * this register to effectively reset the command buffer state machine.
- * These registers must be programmed after SW programs the corresponding
- * ZIP_QUE(0..7)_SBUF_CTL.
- */
-union zip_quex_sbuf_addr {
- u64 u_reg64;
- struct zip_quex_sbuf_addr_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_49_63 : 15;
- u64 ptr : 42;
- u64 off : 7;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 off : 7;
- u64 ptr : 42;
- u64 reserved_49_63 : 15;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_SBUF_ADDR(u64 param1)
-{
- if (param1 <= 7)
- return 0x1000ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_SBUF_ADDR: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_quex_sbuf_ctl - Represents the registers that set the buffer
- * parameters for the instruction queues.
- *
- * When quiescent (i.e. outstanding doorbell count is 0), it is safe to rewrite
- * this register to effectively reset the command buffer state machine.
- * These registers must be programmed before SW programs the corresponding
- * ZIP_QUE(0..7)_SBUF_ADDR.
- */
-union zip_quex_sbuf_ctl {
- u64 u_reg64;
- struct zip_quex_sbuf_ctl_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_45_63 : 19;
- u64 size : 13;
- u64 inst_be : 1;
- u64 reserved_24_30 : 7;
- u64 stream_id : 8;
- u64 reserved_12_15 : 4;
- u64 aura : 12;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 aura : 12;
- u64 reserved_12_15 : 4;
- u64 stream_id : 8;
- u64 reserved_24_30 : 7;
- u64 inst_be : 1;
- u64 size : 13;
- u64 reserved_45_63 : 19;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_SBUF_CTL(u64 param1)
-{
- if (param1 <= 7)
- return 0x1200ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_SBUF_CTL: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_que_ena - Represents queue enable register
- *
- * If a queue is disabled, ZIP_CTL stops fetching instructions from the queue.
- */
-union zip_que_ena {
- u64 u_reg64;
- struct zip_que_ena_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_8_63 : 56;
- u64 ena : 8;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 ena : 8;
- u64 reserved_8_63 : 56;
-#endif
- } s;
-};
-
-#define ZIP_QUE_ENA 0x0500ull
-
-/**
- * union zip_que_pri - Represents the register that defines the priority
- * between instruction queues.
- */
-union zip_que_pri {
- u64 u_reg64;
- struct zip_que_pri_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_8_63 : 56;
- u64 pri : 8;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 pri : 8;
- u64 reserved_8_63 : 56;
-#endif
- } s;
-};
-
-#define ZIP_QUE_PRI 0x0508ull
-
-/**
- * union zip_throttle - Represents the register that controls the maximum
- * number of in-flight X2I data fetch transactions.
- *
- * Writing 0 to this register causes the ZIP module to temporarily suspend NCB
- * accesses; it is not recommended for normal operation, but may be useful for
- * diagnostics.
- */
-union zip_throttle {
- u64 u_reg64;
- struct zip_throttle_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_6_63 : 58;
- u64 ld_infl : 6;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 ld_infl : 6;
- u64 reserved_6_63 : 58;
-#endif
- } s;
-};
-
-#define ZIP_THROTTLE 0x0010ull
-
-#endif /* _CSRS_ZIP__ */
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
index 394484929dae..a9626b30044a 100644
--- a/drivers/crypto/ccp/Makefile
+++ b/drivers/crypto/ccp/Makefile
@@ -13,7 +13,8 @@ ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o \
tee-dev.o \
platform-access.o \
dbc.o \
- hsti.o
+ hsti.o \
+ sfs.o
obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o
ccp-crypto-objs := ccp-crypto-main.o \
diff --git a/drivers/crypto/ccp/ccp-crypto-aes.c b/drivers/crypto/ccp/ccp-crypto-aes.c
index d11daaf47f06..685d42ec7ade 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes.c
@@ -7,15 +7,16 @@
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*/
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/delay.h>
-#include <linux/scatterlist.h>
-#include <linux/crypto.h>
-#include <crypto/algapi.h>
#include <crypto/aes.h>
#include <crypto/ctr.h>
-#include <crypto/scatterwalk.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/string.h>
#include "ccp-crypto.h"
diff --git a/drivers/crypto/ccp/ccp-crypto-des3.c b/drivers/crypto/ccp/ccp-crypto-des3.c
index afae30adb703..91b1189c47de 100644
--- a/drivers/crypto/ccp/ccp-crypto-des3.c
+++ b/drivers/crypto/ccp/ccp-crypto-des3.c
@@ -7,14 +7,15 @@
* Author: Gary R Hook <ghook@amd.com>
*/
+#include <crypto/internal/des.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/delay.h>
#include <linux/scatterlist.h>
-#include <linux/crypto.h>
-#include <crypto/algapi.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/internal/des.h>
+#include <linux/slab.h>
+#include <linux/string.h>
#include "ccp-crypto.h"
diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c
index ecd58b38c46e..bc90aba5162a 100644
--- a/drivers/crypto/ccp/ccp-crypto-main.c
+++ b/drivers/crypto/ccp/ccp-crypto-main.c
@@ -7,14 +7,17 @@
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*/
-#include <linux/module.h>
-#include <linux/moduleparam.h>
+#include <crypto/internal/akcipher.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/ccp.h>
+#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/list.h>
-#include <linux/ccp.h>
+#include <linux/module.h>
#include <linux/scatterlist.h>
-#include <crypto/internal/hash.h>
-#include <crypto/internal/akcipher.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
#include "ccp-crypto.h"
diff --git a/drivers/crypto/ccp/ccp-debugfs.c b/drivers/crypto/ccp/ccp-debugfs.c
index a1055554b47a..dc26bc22c91d 100644
--- a/drivers/crypto/ccp/ccp-debugfs.c
+++ b/drivers/crypto/ccp/ccp-debugfs.c
@@ -319,5 +319,8 @@ void ccp5_debugfs_setup(struct ccp_device *ccp)
void ccp5_debugfs_destroy(void)
{
+ mutex_lock(&ccp_debugfs_lock);
debugfs_remove_recursive(ccp_debugfs_dir);
+ ccp_debugfs_dir = NULL;
+ mutex_unlock(&ccp_debugfs_lock);
}
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index cb8e99936abb..d78865d9d5f0 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -8,13 +8,14 @@
* Author: Gary R Hook <gary.hook@amd.com>
*/
-#include <linux/dma-mapping.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <crypto/scatterwalk.h>
#include <crypto/des.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/utils.h>
#include <linux/ccp.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include "ccp-dev.h"
@@ -632,10 +633,16 @@ static noinline_for_stack int
ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
{
struct ccp_aes_engine *aes = &cmd->u.aes;
- struct ccp_dm_workarea key, ctx, final_wa, tag;
- struct ccp_data src, dst;
- struct ccp_data aad;
- struct ccp_op op;
+ struct {
+ struct ccp_dm_workarea key;
+ struct ccp_dm_workarea ctx;
+ struct ccp_dm_workarea final;
+ struct ccp_dm_workarea tag;
+ struct ccp_data src;
+ struct ccp_data dst;
+ struct ccp_data aad;
+ struct ccp_op op;
+ } *wa __cleanup(kfree) = kzalloc(sizeof *wa, GFP_KERNEL);
unsigned int dm_offset;
unsigned int authsize;
unsigned int jobid;
@@ -649,6 +656,9 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
struct scatterlist *p_outp, sg_outp[2];
struct scatterlist *p_aad;
+ if (!wa)
+ return -ENOMEM;
+
if (!aes->iv)
return -EINVAL;
@@ -695,26 +705,26 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
jobid = CCP_NEW_JOBID(cmd_q->ccp);
- memset(&op, 0, sizeof(op));
- op.cmd_q = cmd_q;
- op.jobid = jobid;
- op.sb_key = cmd_q->sb_key; /* Pre-allocated */
- op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
- op.init = 1;
- op.u.aes.type = aes->type;
+ memset(&wa->op, 0, sizeof(wa->op));
+ wa->op.cmd_q = cmd_q;
+ wa->op.jobid = jobid;
+ wa->op.sb_key = cmd_q->sb_key; /* Pre-allocated */
+ wa->op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
+ wa->op.init = 1;
+ wa->op.u.aes.type = aes->type;
/* Copy the key to the LSB */
- ret = ccp_init_dm_workarea(&key, cmd_q,
+ ret = ccp_init_dm_workarea(&wa->key, cmd_q,
CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
DMA_TO_DEVICE);
if (ret)
return ret;
dm_offset = CCP_SB_BYTES - aes->key_len;
- ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
+ ret = ccp_set_dm_area(&wa->key, dm_offset, aes->key, 0, aes->key_len);
if (ret)
goto e_key;
- ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
+ ret = ccp_copy_to_sb(cmd_q, &wa->key, wa->op.jobid, wa->op.sb_key,
CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
@@ -725,58 +735,58 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
* There is an assumption here that the IV is 96 bits in length, plus
* a nonce of 32 bits. If no IV is present, use a zeroed buffer.
*/
- ret = ccp_init_dm_workarea(&ctx, cmd_q,
+ ret = ccp_init_dm_workarea(&wa->ctx, cmd_q,
CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
DMA_BIDIRECTIONAL);
if (ret)
goto e_key;
dm_offset = CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES - aes->iv_len;
- ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
+ ret = ccp_set_dm_area(&wa->ctx, dm_offset, aes->iv, 0, aes->iv_len);
if (ret)
goto e_ctx;
- ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+ ret = ccp_copy_to_sb(cmd_q, &wa->ctx, wa->op.jobid, wa->op.sb_ctx,
CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_ctx;
}
- op.init = 1;
+ wa->op.init = 1;
if (aes->aad_len > 0) {
/* Step 1: Run a GHASH over the Additional Authenticated Data */
- ret = ccp_init_data(&aad, cmd_q, p_aad, aes->aad_len,
+ ret = ccp_init_data(&wa->aad, cmd_q, p_aad, aes->aad_len,
AES_BLOCK_SIZE,
DMA_TO_DEVICE);
if (ret)
goto e_ctx;
- op.u.aes.mode = CCP_AES_MODE_GHASH;
- op.u.aes.action = CCP_AES_GHASHAAD;
+ wa->op.u.aes.mode = CCP_AES_MODE_GHASH;
+ wa->op.u.aes.action = CCP_AES_GHASHAAD;
- while (aad.sg_wa.bytes_left) {
- ccp_prepare_data(&aad, NULL, &op, AES_BLOCK_SIZE, true);
+ while (wa->aad.sg_wa.bytes_left) {
+ ccp_prepare_data(&wa->aad, NULL, &wa->op, AES_BLOCK_SIZE, true);
- ret = cmd_q->ccp->vdata->perform->aes(&op);
+ ret = cmd_q->ccp->vdata->perform->aes(&wa->op);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_aad;
}
- ccp_process_data(&aad, NULL, &op);
- op.init = 0;
+ ccp_process_data(&wa->aad, NULL, &wa->op);
+ wa->op.init = 0;
}
}
- op.u.aes.mode = CCP_AES_MODE_GCTR;
- op.u.aes.action = aes->action;
+ wa->op.u.aes.mode = CCP_AES_MODE_GCTR;
+ wa->op.u.aes.action = aes->action;
if (ilen > 0) {
/* Step 2: Run a GCTR over the plaintext */
in_place = (sg_virt(p_inp) == sg_virt(p_outp)) ? true : false;
- ret = ccp_init_data(&src, cmd_q, p_inp, ilen,
+ ret = ccp_init_data(&wa->src, cmd_q, p_inp, ilen,
AES_BLOCK_SIZE,
in_place ? DMA_BIDIRECTIONAL
: DMA_TO_DEVICE);
@@ -784,52 +794,52 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
goto e_aad;
if (in_place) {
- dst = src;
+ wa->dst = wa->src;
} else {
- ret = ccp_init_data(&dst, cmd_q, p_outp, ilen,
+ ret = ccp_init_data(&wa->dst, cmd_q, p_outp, ilen,
AES_BLOCK_SIZE, DMA_FROM_DEVICE);
if (ret)
goto e_src;
}
- op.soc = 0;
- op.eom = 0;
- op.init = 1;
- while (src.sg_wa.bytes_left) {
- ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true);
- if (!src.sg_wa.bytes_left) {
+ wa->op.soc = 0;
+ wa->op.eom = 0;
+ wa->op.init = 1;
+ while (wa->src.sg_wa.bytes_left) {
+ ccp_prepare_data(&wa->src, &wa->dst, &wa->op, AES_BLOCK_SIZE, true);
+ if (!wa->src.sg_wa.bytes_left) {
unsigned int nbytes = ilen % AES_BLOCK_SIZE;
if (nbytes) {
- op.eom = 1;
- op.u.aes.size = (nbytes * 8) - 1;
+ wa->op.eom = 1;
+ wa->op.u.aes.size = (nbytes * 8) - 1;
}
}
- ret = cmd_q->ccp->vdata->perform->aes(&op);
+ ret = cmd_q->ccp->vdata->perform->aes(&wa->op);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_dst;
}
- ccp_process_data(&src, &dst, &op);
- op.init = 0;
+ ccp_process_data(&wa->src, &wa->dst, &wa->op);
+ wa->op.init = 0;
}
}
/* Step 3: Update the IV portion of the context with the original IV */
- ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+ ret = ccp_copy_from_sb(cmd_q, &wa->ctx, wa->op.jobid, wa->op.sb_ctx,
CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_dst;
}
- ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
+ ret = ccp_set_dm_area(&wa->ctx, dm_offset, aes->iv, 0, aes->iv_len);
if (ret)
goto e_dst;
- ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+ ret = ccp_copy_to_sb(cmd_q, &wa->ctx, wa->op.jobid, wa->op.sb_ctx,
CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
@@ -839,75 +849,75 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
/* Step 4: Concatenate the lengths of the AAD and source, and
* hash that 16 byte buffer.
*/
- ret = ccp_init_dm_workarea(&final_wa, cmd_q, AES_BLOCK_SIZE,
+ ret = ccp_init_dm_workarea(&wa->final, cmd_q, AES_BLOCK_SIZE,
DMA_BIDIRECTIONAL);
if (ret)
goto e_dst;
- final = (__be64 *)final_wa.address;
+ final = (__be64 *)wa->final.address;
final[0] = cpu_to_be64(aes->aad_len * 8);
final[1] = cpu_to_be64(ilen * 8);
- memset(&op, 0, sizeof(op));
- op.cmd_q = cmd_q;
- op.jobid = jobid;
- op.sb_key = cmd_q->sb_key; /* Pre-allocated */
- op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
- op.init = 1;
- op.u.aes.type = aes->type;
- op.u.aes.mode = CCP_AES_MODE_GHASH;
- op.u.aes.action = CCP_AES_GHASHFINAL;
- op.src.type = CCP_MEMTYPE_SYSTEM;
- op.src.u.dma.address = final_wa.dma.address;
- op.src.u.dma.length = AES_BLOCK_SIZE;
- op.dst.type = CCP_MEMTYPE_SYSTEM;
- op.dst.u.dma.address = final_wa.dma.address;
- op.dst.u.dma.length = AES_BLOCK_SIZE;
- op.eom = 1;
- op.u.aes.size = 0;
- ret = cmd_q->ccp->vdata->perform->aes(&op);
+ memset(&wa->op, 0, sizeof(wa->op));
+ wa->op.cmd_q = cmd_q;
+ wa->op.jobid = jobid;
+ wa->op.sb_key = cmd_q->sb_key; /* Pre-allocated */
+ wa->op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
+ wa->op.init = 1;
+ wa->op.u.aes.type = aes->type;
+ wa->op.u.aes.mode = CCP_AES_MODE_GHASH;
+ wa->op.u.aes.action = CCP_AES_GHASHFINAL;
+ wa->op.src.type = CCP_MEMTYPE_SYSTEM;
+ wa->op.src.u.dma.address = wa->final.dma.address;
+ wa->op.src.u.dma.length = AES_BLOCK_SIZE;
+ wa->op.dst.type = CCP_MEMTYPE_SYSTEM;
+ wa->op.dst.u.dma.address = wa->final.dma.address;
+ wa->op.dst.u.dma.length = AES_BLOCK_SIZE;
+ wa->op.eom = 1;
+ wa->op.u.aes.size = 0;
+ ret = cmd_q->ccp->vdata->perform->aes(&wa->op);
if (ret)
goto e_final_wa;
if (aes->action == CCP_AES_ACTION_ENCRYPT) {
/* Put the ciphered tag after the ciphertext. */
- ccp_get_dm_area(&final_wa, 0, p_tag, 0, authsize);
+ ccp_get_dm_area(&wa->final, 0, p_tag, 0, authsize);
} else {
/* Does this ciphered tag match the input? */
- ret = ccp_init_dm_workarea(&tag, cmd_q, authsize,
+ ret = ccp_init_dm_workarea(&wa->tag, cmd_q, authsize,
DMA_BIDIRECTIONAL);
if (ret)
goto e_final_wa;
- ret = ccp_set_dm_area(&tag, 0, p_tag, 0, authsize);
+ ret = ccp_set_dm_area(&wa->tag, 0, p_tag, 0, authsize);
if (ret) {
- ccp_dm_free(&tag);
+ ccp_dm_free(&wa->tag);
goto e_final_wa;
}
- ret = crypto_memneq(tag.address, final_wa.address,
+ ret = crypto_memneq(wa->tag.address, wa->final.address,
authsize) ? -EBADMSG : 0;
- ccp_dm_free(&tag);
+ ccp_dm_free(&wa->tag);
}
e_final_wa:
- ccp_dm_free(&final_wa);
+ ccp_dm_free(&wa->final);
e_dst:
if (ilen > 0 && !in_place)
- ccp_free_data(&dst, cmd_q);
+ ccp_free_data(&wa->dst, cmd_q);
e_src:
if (ilen > 0)
- ccp_free_data(&src, cmd_q);
+ ccp_free_data(&wa->src, cmd_q);
e_aad:
if (aes->aad_len)
- ccp_free_data(&aad, cmd_q);
+ ccp_free_data(&wa->aad, cmd_q);
e_ctx:
- ccp_dm_free(&ctx);
+ ccp_dm_free(&wa->ctx);
e_key:
- ccp_dm_free(&key);
+ ccp_dm_free(&wa->key);
return ret;
}
diff --git a/drivers/crypto/ccp/dbc.c b/drivers/crypto/ccp/dbc.c
index 5b105a23f699..410084a9039c 100644
--- a/drivers/crypto/ccp/dbc.c
+++ b/drivers/crypto/ccp/dbc.c
@@ -7,6 +7,8 @@
* Author: Mario Limonciello <mario.limonciello@amd.com>
*/
+#include <linux/mutex.h>
+
#include "dbc.h"
#define DBC_DEFAULT_TIMEOUT (10 * MSEC_PER_SEC)
@@ -137,64 +139,49 @@ static long dbc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return -ENODEV;
dbc_dev = psp_master->dbc_data;
- mutex_lock(&dbc_dev->ioctl_mutex);
+ guard(mutex)(&dbc_dev->ioctl_mutex);
switch (cmd) {
case DBCIOCNONCE:
- if (copy_from_user(dbc_dev->payload, argp, sizeof(struct dbc_user_nonce))) {
- ret = -EFAULT;
- goto unlock;
- }
+ if (copy_from_user(dbc_dev->payload, argp, sizeof(struct dbc_user_nonce)))
+ return -EFAULT;
ret = send_dbc_nonce(dbc_dev);
if (ret)
- goto unlock;
+ return ret;
- if (copy_to_user(argp, dbc_dev->payload, sizeof(struct dbc_user_nonce))) {
- ret = -EFAULT;
- goto unlock;
- }
+ if (copy_to_user(argp, dbc_dev->payload, sizeof(struct dbc_user_nonce)))
+ return -EFAULT;
break;
case DBCIOCUID:
- if (copy_from_user(dbc_dev->payload, argp, sizeof(struct dbc_user_setuid))) {
- ret = -EFAULT;
- goto unlock;
- }
+ if (copy_from_user(dbc_dev->payload, argp, sizeof(struct dbc_user_setuid)))
+ return -EFAULT;
*dbc_dev->payload_size = dbc_dev->header_size + sizeof(struct dbc_user_setuid);
ret = send_dbc_cmd(dbc_dev, PSP_DYNAMIC_BOOST_SET_UID);
if (ret)
- goto unlock;
+ return ret;
- if (copy_to_user(argp, dbc_dev->payload, sizeof(struct dbc_user_setuid))) {
- ret = -EFAULT;
- goto unlock;
- }
+ if (copy_to_user(argp, dbc_dev->payload, sizeof(struct dbc_user_setuid)))
+ return -EFAULT;
break;
case DBCIOCPARAM:
- if (copy_from_user(dbc_dev->payload, argp, sizeof(struct dbc_user_param))) {
- ret = -EFAULT;
- goto unlock;
- }
+ if (copy_from_user(dbc_dev->payload, argp, sizeof(struct dbc_user_param)))
+ return -EFAULT;
*dbc_dev->payload_size = dbc_dev->header_size + sizeof(struct dbc_user_param);
ret = send_dbc_parameter(dbc_dev);
if (ret)
- goto unlock;
+ return ret;
- if (copy_to_user(argp, dbc_dev->payload, sizeof(struct dbc_user_param))) {
- ret = -EFAULT;
- goto unlock;
- }
+ if (copy_to_user(argp, dbc_dev->payload, sizeof(struct dbc_user_param)))
+ return -EFAULT;
break;
default:
- ret = -EINVAL;
-
+ return -EINVAL;
}
-unlock:
- mutex_unlock(&dbc_dev->ioctl_mutex);
- return ret;
+ return 0;
}
static const struct file_operations dbc_fops = {
diff --git a/drivers/crypto/ccp/hsti.c b/drivers/crypto/ccp/hsti.c
index 1b39a4fb55c0..c29c6a9c0f3f 100644
--- a/drivers/crypto/ccp/hsti.c
+++ b/drivers/crypto/ccp/hsti.c
@@ -74,7 +74,7 @@ struct attribute_group psp_security_attr_group = {
.is_visible = psp_security_is_visible,
};
-static int psp_poulate_hsti(struct psp_device *psp)
+static int psp_populate_hsti(struct psp_device *psp)
{
struct hsti_request *req;
int ret;
@@ -84,11 +84,11 @@ static int psp_poulate_hsti(struct psp_device *psp)
return 0;
/* Allocate command-response buffer */
- req = kzalloc(sizeof(*req), GFP_KERNEL | __GFP_ZERO);
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return -ENOMEM;
- req->header.payload_size = sizeof(req);
+ req->header.payload_size = sizeof(*req);
ret = psp_send_platform_access_msg(PSP_CMD_HSTI_QUERY, (struct psp_request *)req);
if (ret)
@@ -114,7 +114,7 @@ int psp_init_hsti(struct psp_device *psp)
int ret;
if (PSP_FEATURE(psp, HSTI)) {
- ret = psp_poulate_hsti(psp);
+ ret = psp_populate_hsti(psp);
if (ret)
return ret;
}
diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
index 1c5a7189631e..9e21da0e298a 100644
--- a/drivers/crypto/ccp/psp-dev.c
+++ b/drivers/crypto/ccp/psp-dev.c
@@ -17,6 +17,7 @@
#include "psp-dev.h"
#include "sev-dev.h"
#include "tee-dev.h"
+#include "sfs.h"
#include "platform-access.h"
#include "dbc.h"
#include "hsti.h"
@@ -182,6 +183,17 @@ static int psp_check_tee_support(struct psp_device *psp)
return 0;
}
+static int psp_check_sfs_support(struct psp_device *psp)
+{
+ /* Check if device supports SFS feature */
+ if (!psp->capability.sfs) {
+ dev_dbg(psp->dev, "psp does not support SFS\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
static int psp_init(struct psp_device *psp)
{
int ret;
@@ -198,6 +210,12 @@ static int psp_init(struct psp_device *psp)
return ret;
}
+ if (!psp_check_sfs_support(psp)) {
+ ret = sfs_dev_init(psp);
+ if (ret)
+ return ret;
+ }
+
if (psp->vdata->platform_access) {
ret = platform_access_dev_init(psp);
if (ret)
@@ -302,6 +320,8 @@ void psp_dev_destroy(struct sp_device *sp)
tee_dev_destroy(psp);
+ sfs_dev_destroy(psp);
+
dbc_dev_destroy(psp);
platform_access_dev_destroy(psp);
diff --git a/drivers/crypto/ccp/psp-dev.h b/drivers/crypto/ccp/psp-dev.h
index e43ce87ede76..268c83f298cb 100644
--- a/drivers/crypto/ccp/psp-dev.h
+++ b/drivers/crypto/ccp/psp-dev.h
@@ -32,7 +32,8 @@ union psp_cap_register {
unsigned int sev :1,
tee :1,
dbc_thru_ext :1,
- rsvd1 :4,
+ sfs :1,
+ rsvd1 :3,
security_reporting :1,
fused_part :1,
rsvd2 :1,
@@ -68,6 +69,7 @@ struct psp_device {
void *tee_data;
void *platform_access_data;
void *dbc_data;
+ void *sfs_data;
union psp_cap_register capability;
};
@@ -118,12 +120,16 @@ struct psp_ext_request {
* @PSP_SUB_CMD_DBC_SET_UID: Set UID for DBC
* @PSP_SUB_CMD_DBC_GET_PARAMETER: Get parameter from DBC
* @PSP_SUB_CMD_DBC_SET_PARAMETER: Set parameter for DBC
+ * @PSP_SUB_CMD_SFS_GET_FW_VERS: Get firmware versions for ASP and other MP
+ * @PSP_SUB_CMD_SFS_UPDATE: Command to load, verify and execute SFS package
*/
enum psp_sub_cmd {
PSP_SUB_CMD_DBC_GET_NONCE = PSP_DYNAMIC_BOOST_GET_NONCE,
PSP_SUB_CMD_DBC_SET_UID = PSP_DYNAMIC_BOOST_SET_UID,
PSP_SUB_CMD_DBC_GET_PARAMETER = PSP_DYNAMIC_BOOST_GET_PARAMETER,
PSP_SUB_CMD_DBC_SET_PARAMETER = PSP_DYNAMIC_BOOST_SET_PARAMETER,
+ PSP_SUB_CMD_SFS_GET_FW_VERS = PSP_SFS_GET_FW_VERSIONS,
+ PSP_SUB_CMD_SFS_UPDATE = PSP_SFS_UPDATE,
};
int psp_extended_mailbox_cmd(struct psp_device *psp, unsigned int timeout_msecs,
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index af018afd9cd7..0d13d47c164b 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -28,11 +28,13 @@
#include <linux/fs_struct.h>
#include <linux/psp.h>
#include <linux/amd-iommu.h>
+#include <linux/crash_dump.h>
#include <asm/smp.h>
#include <asm/cacheflush.h>
#include <asm/e820/types.h>
#include <asm/sev.h>
+#include <asm/msr.h>
#include "psp-dev.h"
#include "sev-dev.h"
@@ -81,6 +83,21 @@ MODULE_FIRMWARE("amd/amd_sev_fam19h_model1xh.sbin"); /* 4th gen EPYC */
static bool psp_dead;
static int psp_timeout;
+enum snp_hv_fixed_pages_state {
+ ALLOCATED,
+ HV_FIXED,
+};
+
+struct snp_hv_fixed_pages_entry {
+ struct list_head list;
+ struct page *page;
+ unsigned int order;
+ bool free;
+ enum snp_hv_fixed_pages_state page_state;
+};
+
+static LIST_HEAD(snp_hv_fixed_pages);
+
/* Trusted Memory Region (TMR):
* The TMR is a 1MB area that must be 1MB aligned. Use the page allocator
* to allocate the memory, which will return aligned memory for the specified
@@ -109,6 +126,15 @@ static void *sev_init_ex_buffer;
*/
static struct sev_data_range_list *snp_range_list;
+static void __sev_firmware_shutdown(struct sev_device *sev, bool panic);
+
+static int snp_shutdown_on_panic(struct notifier_block *nb,
+ unsigned long reason, void *arg);
+
+static struct notifier_block snp_panic_notifier = {
+ .notifier_call = snp_shutdown_on_panic,
+};
+
static inline bool sev_version_greater_or_equal(u8 maj, u8 min)
{
struct sev_device *sev = psp_master->sev_data;
@@ -223,6 +249,8 @@ static int sev_cmd_buffer_len(int cmd)
case SEV_CMD_SNP_GUEST_REQUEST: return sizeof(struct sev_data_snp_guest_request);
case SEV_CMD_SNP_CONFIG: return sizeof(struct sev_user_data_snp_config);
case SEV_CMD_SNP_COMMIT: return sizeof(struct sev_data_snp_commit);
+ case SEV_CMD_SNP_FEATURE_INFO: return sizeof(struct sev_data_snp_feature_info);
+ case SEV_CMD_SNP_VLEK_LOAD: return sizeof(struct sev_user_data_snp_vlek_load);
default: return 0;
}
@@ -249,7 +277,7 @@ static struct file *open_file_as_root(const char *filename, int flags, umode_t m
fp = file_open_root(&root, filename, flags, mode);
path_put(&root);
- revert_creds(old_cred);
+ put_cred(revert_creds(old_cred));
return fp;
}
@@ -424,7 +452,7 @@ cleanup:
return rc;
}
-static struct page *__snp_alloc_firmware_pages(gfp_t gfp_mask, int order)
+static struct page *__snp_alloc_firmware_pages(gfp_t gfp_mask, int order, bool locked)
{
unsigned long npages = 1ul << order, paddr;
struct sev_device *sev;
@@ -443,7 +471,7 @@ static struct page *__snp_alloc_firmware_pages(gfp_t gfp_mask, int order)
return page;
paddr = __pa((unsigned long)page_address(page));
- if (rmp_mark_pages_firmware(paddr, npages, false))
+ if (rmp_mark_pages_firmware(paddr, npages, locked))
return NULL;
return page;
@@ -453,7 +481,7 @@ void *snp_alloc_firmware_page(gfp_t gfp_mask)
{
struct page *page;
- page = __snp_alloc_firmware_pages(gfp_mask, 0);
+ page = __snp_alloc_firmware_pages(gfp_mask, 0, false);
return page ? page_address(page) : NULL;
}
@@ -488,7 +516,7 @@ static void *sev_fw_alloc(unsigned long len)
{
struct page *page;
- page = __snp_alloc_firmware_pages(GFP_KERNEL, get_order(len));
+ page = __snp_alloc_firmware_pages(GFP_KERNEL, get_order(len), true);
if (!page)
return NULL;
@@ -836,9 +864,10 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
struct sev_device *sev;
unsigned int cmdbuff_hi, cmdbuff_lo;
unsigned int phys_lsb, phys_msb;
- unsigned int reg, ret = 0;
+ unsigned int reg;
void *cmd_buf;
int buf_len;
+ int ret = 0;
if (!psp || !psp->sev_data)
return -ENODEV;
@@ -1060,7 +1089,248 @@ static inline int __sev_do_init_locked(int *psp_ret)
static void snp_set_hsave_pa(void *arg)
{
- wrmsrl(MSR_VM_HSAVE_PA, 0);
+ wrmsrq(MSR_VM_HSAVE_PA, 0);
+}
+
+/* Hypervisor Fixed pages API interface */
+static void snp_hv_fixed_pages_state_update(struct sev_device *sev,
+ enum snp_hv_fixed_pages_state page_state)
+{
+ struct snp_hv_fixed_pages_entry *entry;
+
+ /* List is protected by sev_cmd_mutex */
+ lockdep_assert_held(&sev_cmd_mutex);
+
+ if (list_empty(&snp_hv_fixed_pages))
+ return;
+
+ list_for_each_entry(entry, &snp_hv_fixed_pages, list)
+ entry->page_state = page_state;
+}
+
+/*
+ * Allocate HV_FIXED pages in 2MB aligned sizes to ensure the whole
+ * 2MB pages are marked as HV_FIXED.
+ */
+struct page *snp_alloc_hv_fixed_pages(unsigned int num_2mb_pages)
+{
+ struct psp_device *psp_master = psp_get_master_device();
+ struct snp_hv_fixed_pages_entry *entry;
+ struct sev_device *sev;
+ unsigned int order;
+ struct page *page;
+
+ if (!psp_master || !psp_master->sev_data)
+ return NULL;
+
+ sev = psp_master->sev_data;
+
+ order = get_order(PMD_SIZE * num_2mb_pages);
+
+ /*
+ * SNP_INIT_EX is protected by sev_cmd_mutex, therefore this list
+ * also needs to be protected using the same mutex.
+ */
+ guard(mutex)(&sev_cmd_mutex);
+
+ /*
+ * This API uses SNP_INIT_EX to transition allocated pages to HV_Fixed
+ * page state, fail if SNP is already initialized.
+ */
+ if (sev->snp_initialized)
+ return NULL;
+
+ /* Re-use freed pages that match the request */
+ list_for_each_entry(entry, &snp_hv_fixed_pages, list) {
+ /* Hypervisor fixed page allocator implements exact fit policy */
+ if (entry->order == order && entry->free) {
+ entry->free = false;
+ memset(page_address(entry->page), 0,
+ (1 << entry->order) * PAGE_SIZE);
+ return entry->page;
+ }
+ }
+
+ page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
+ if (!page)
+ return NULL;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ __free_pages(page, order);
+ return NULL;
+ }
+
+ entry->page = page;
+ entry->order = order;
+ list_add_tail(&entry->list, &snp_hv_fixed_pages);
+
+ return page;
+}
+
+void snp_free_hv_fixed_pages(struct page *page)
+{
+ struct psp_device *psp_master = psp_get_master_device();
+ struct snp_hv_fixed_pages_entry *entry, *nentry;
+
+ if (!psp_master || !psp_master->sev_data)
+ return;
+
+ /*
+ * SNP_INIT_EX is protected by sev_cmd_mutex, therefore this list
+ * also needs to be protected using the same mutex.
+ */
+ guard(mutex)(&sev_cmd_mutex);
+
+ list_for_each_entry_safe(entry, nentry, &snp_hv_fixed_pages, list) {
+ if (entry->page != page)
+ continue;
+
+ /*
+ * HV_FIXED page state cannot be changed until reboot
+ * and they cannot be used by an SNP guest, so they cannot
+ * be returned back to the page allocator.
+ * Mark the pages as free internally to allow possible re-use.
+ */
+ if (entry->page_state == HV_FIXED) {
+ entry->free = true;
+ } else {
+ __free_pages(page, entry->order);
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ return;
+ }
+}
+
+static void snp_add_hv_fixed_pages(struct sev_device *sev, struct sev_data_range_list *range_list)
+{
+ struct snp_hv_fixed_pages_entry *entry;
+ struct sev_data_range *range;
+ int num_elements;
+
+ lockdep_assert_held(&sev_cmd_mutex);
+
+ if (list_empty(&snp_hv_fixed_pages))
+ return;
+
+ num_elements = list_count_nodes(&snp_hv_fixed_pages) +
+ range_list->num_elements;
+
+ /*
+ * Ensure the list of HV_FIXED pages that will be passed to firmware
+ * do not exceed the page-sized argument buffer.
+ */
+ if (num_elements * sizeof(*range) + sizeof(*range_list) > PAGE_SIZE) {
+ dev_warn(sev->dev, "Additional HV_Fixed pages cannot be accommodated, omitting\n");
+ return;
+ }
+
+ range = &range_list->ranges[range_list->num_elements];
+ list_for_each_entry(entry, &snp_hv_fixed_pages, list) {
+ range->base = page_to_pfn(entry->page) << PAGE_SHIFT;
+ range->page_count = 1 << entry->order;
+ range++;
+ }
+ range_list->num_elements = num_elements;
+}
+
+static void snp_leak_hv_fixed_pages(void)
+{
+ struct snp_hv_fixed_pages_entry *entry;
+
+ /* List is protected by sev_cmd_mutex */
+ lockdep_assert_held(&sev_cmd_mutex);
+
+ if (list_empty(&snp_hv_fixed_pages))
+ return;
+
+ list_for_each_entry(entry, &snp_hv_fixed_pages, list)
+ if (entry->page_state == HV_FIXED)
+ __snp_leak_pages(page_to_pfn(entry->page),
+ 1 << entry->order, false);
+}
+
+bool sev_is_snp_ciphertext_hiding_supported(void)
+{
+ struct psp_device *psp = psp_master;
+ struct sev_device *sev;
+
+ if (!psp || !psp->sev_data)
+ return false;
+
+ sev = psp->sev_data;
+
+ /*
+ * Feature information indicates if CipherTextHiding feature is
+ * supported by the SEV firmware and additionally platform status
+ * indicates if CipherTextHiding feature is enabled in the
+ * Platform BIOS.
+ */
+ return ((sev->snp_feat_info_0.ecx & SNP_CIPHER_TEXT_HIDING_SUPPORTED) &&
+ sev->snp_plat_status.ciphertext_hiding_cap);
+}
+EXPORT_SYMBOL_GPL(sev_is_snp_ciphertext_hiding_supported);
+
+static int snp_get_platform_data(struct sev_device *sev, int *error)
+{
+ struct sev_data_snp_feature_info snp_feat_info;
+ struct snp_feature_info *feat_info;
+ struct sev_data_snp_addr buf;
+ struct page *page;
+ int rc;
+
+ /*
+ * This function is expected to be called before SNP is
+ * initialized.
+ */
+ if (sev->snp_initialized)
+ return -EINVAL;
+
+ buf.address = __psp_pa(&sev->snp_plat_status);
+ rc = sev_do_cmd(SEV_CMD_SNP_PLATFORM_STATUS, &buf, error);
+ if (rc) {
+ dev_err(sev->dev, "SNP PLATFORM_STATUS command failed, ret = %d, error = %#x\n",
+ rc, *error);
+ return rc;
+ }
+
+ sev->api_major = sev->snp_plat_status.api_major;
+ sev->api_minor = sev->snp_plat_status.api_minor;
+ sev->build = sev->snp_plat_status.build_id;
+
+ /*
+ * Do feature discovery of the currently loaded firmware,
+ * and cache feature information from CPUID 0x8000_0024,
+ * sub-function 0.
+ */
+ if (!sev->snp_plat_status.feature_info)
+ return 0;
+
+ /*
+ * Use dynamically allocated structure for the SNP_FEATURE_INFO
+ * command to ensure structure is 8-byte aligned, and does not
+ * cross a page boundary.
+ */
+ page = alloc_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+
+ feat_info = page_address(page);
+ snp_feat_info.length = sizeof(snp_feat_info);
+ snp_feat_info.ecx_in = 0;
+ snp_feat_info.feature_info_paddr = __psp_pa(feat_info);
+
+ rc = sev_do_cmd(SEV_CMD_SNP_FEATURE_INFO, &snp_feat_info, error);
+ if (!rc)
+ sev->snp_feat_info_0 = *feat_info;
+ else
+ dev_err(sev->dev, "SNP FEATURE_INFO command failed, ret = %d, error = %#x\n",
+ rc, *error);
+
+ __free_page(page);
+
+ return rc;
}
static int snp_filter_reserved_mem_regions(struct resource *rs, void *arg)
@@ -1093,7 +1363,7 @@ static int snp_filter_reserved_mem_regions(struct resource *rs, void *arg)
return 0;
}
-static int __sev_snp_init_locked(int *error)
+static int __sev_snp_init_locked(int *error, unsigned int max_snp_asid)
{
struct psp_device *psp = psp_master;
struct sev_data_snp_init_ex data;
@@ -1112,7 +1382,7 @@ static int __sev_snp_init_locked(int *error)
if (!sev_version_greater_or_equal(SNP_MIN_API_MAJOR, SNP_MIN_API_MINOR)) {
dev_dbg(sev->dev, "SEV-SNP support requires firmware version >= %d:%d\n",
SNP_MIN_API_MAJOR, SNP_MIN_API_MINOR);
- return 0;
+ return -EOPNOTSUPP;
}
/* SNP_INIT requires MSR_VM_HSAVE_PA to be cleared on all CPUs. */
@@ -1153,7 +1423,19 @@ static int __sev_snp_init_locked(int *error)
return rc;
}
+ /*
+ * Add HV_Fixed pages from other PSP sub-devices, such as SFS to the
+ * HV_Fixed page list.
+ */
+ snp_add_hv_fixed_pages(sev, snp_range_list);
+
memset(&data, 0, sizeof(data));
+
+ if (max_snp_asid) {
+ data.ciphertext_hiding_en = 1;
+ data.max_snp_asid = max_snp_asid;
+ }
+
data.init_rmp = 1;
data.list_paddr_en = 1;
data.list_paddr = __psp_pa(snp_range_list);
@@ -1176,21 +1458,35 @@ static int __sev_snp_init_locked(int *error)
wbinvd_on_all_cpus();
rc = __sev_do_cmd_locked(cmd, arg, error);
- if (rc)
+ if (rc) {
+ dev_err(sev->dev, "SEV-SNP: %s failed rc %d, error %#x\n",
+ cmd == SEV_CMD_SNP_INIT_EX ? "SNP_INIT_EX" : "SNP_INIT",
+ rc, *error);
return rc;
+ }
/* Prepare for first SNP guest launch after INIT. */
wbinvd_on_all_cpus();
rc = __sev_do_cmd_locked(SEV_CMD_SNP_DF_FLUSH, NULL, error);
- if (rc)
+ if (rc) {
+ dev_err(sev->dev, "SEV-SNP: SNP_DF_FLUSH failed rc %d, error %#x\n",
+ rc, *error);
return rc;
+ }
+ snp_hv_fixed_pages_state_update(sev, HV_FIXED);
sev->snp_initialized = true;
dev_dbg(sev->dev, "SEV-SNP firmware initialized\n");
+ dev_info(sev->dev, "SEV-SNP API:%d.%d build:%d\n", sev->api_major,
+ sev->api_minor, sev->build);
+
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &snp_panic_notifier);
+
sev_es_tmr_size = SNP_TMR_SIZE;
- return rc;
+ return 0;
}
static void __sev_platform_init_handle_tmr(struct sev_device *sev)
@@ -1253,15 +1549,17 @@ static int __sev_platform_init_handle_init_ex_path(struct sev_device *sev)
static int __sev_platform_init_locked(int *error)
{
- int rc, psp_ret = SEV_RET_NO_FW_CALL;
+ int rc, psp_ret, dfflush_error;
struct sev_device *sev;
+ psp_ret = dfflush_error = SEV_RET_NO_FW_CALL;
+
if (!psp_master || !psp_master->sev_data)
return -ENODEV;
sev = psp_master->sev_data;
- if (sev->state == SEV_STATE_INIT)
+ if (sev->sev_plat_status.state == SEV_STATE_INIT)
return 0;
__sev_platform_init_handle_tmr(sev);
@@ -1287,16 +1585,22 @@ static int __sev_platform_init_locked(int *error)
if (error)
*error = psp_ret;
- if (rc)
+ if (rc) {
+ dev_err(sev->dev, "SEV: %s failed %#x, rc %d\n",
+ sev_init_ex_buffer ? "INIT_EX" : "INIT", psp_ret, rc);
return rc;
+ }
- sev->state = SEV_STATE_INIT;
+ sev->sev_plat_status.state = SEV_STATE_INIT;
/* Prepare for first SEV guest launch after INIT */
wbinvd_on_all_cpus();
- rc = __sev_do_cmd_locked(SEV_CMD_DF_FLUSH, NULL, error);
- if (rc)
+ rc = __sev_do_cmd_locked(SEV_CMD_DF_FLUSH, NULL, &dfflush_error);
+ if (rc) {
+ dev_err(sev->dev, "SEV: DF_FLUSH failed %#x, rc %d\n",
+ dfflush_error, rc);
return rc;
+ }
dev_dbg(sev->dev, "SEV firmware initialized\n");
@@ -1314,24 +1618,23 @@ static int _sev_platform_init_locked(struct sev_platform_init_args *args)
if (!psp_master || !psp_master->sev_data)
return -ENODEV;
+ /*
+ * Skip SNP/SEV initialization under a kdump kernel as SEV/SNP
+ * may already be initialized in the previous kernel. Since no
+ * SNP/SEV guests are run under a kdump kernel, there is no
+ * need to initialize SNP or SEV during kdump boot.
+ */
+ if (is_kdump_kernel())
+ return 0;
+
sev = psp_master->sev_data;
- if (sev->state == SEV_STATE_INIT)
+ if (sev->sev_plat_status.state == SEV_STATE_INIT)
return 0;
- /*
- * Legacy guests cannot be running while SNP_INIT(_EX) is executing,
- * so perform SEV-SNP initialization at probe time.
- */
- rc = __sev_snp_init_locked(&args->error);
- if (rc && rc != -ENODEV) {
- /*
- * Don't abort the probe if SNP INIT failed,
- * continue to initialize the legacy SEV firmware.
- */
- dev_err(sev->dev, "SEV-SNP: failed to INIT rc %d, error %#x\n",
- rc, args->error);
- }
+ rc = __sev_snp_init_locked(&args->error, args->max_snp_asid);
+ if (rc && rc != -ENODEV)
+ return rc;
/* Defer legacy SEV/SEV-ES support if allowed by caller/module. */
if (args->probe && !psp_init_on_probe)
@@ -1363,14 +1666,17 @@ static int __sev_platform_shutdown_locked(int *error)
sev = psp->sev_data;
- if (sev->state == SEV_STATE_UNINIT)
+ if (sev->sev_plat_status.state == SEV_STATE_UNINIT)
return 0;
ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error);
- if (ret)
+ if (ret) {
+ dev_err(sev->dev, "SEV: failed to SHUTDOWN error %#x, rc %d\n",
+ *error, ret);
return ret;
+ }
- sev->state = SEV_STATE_UNINIT;
+ sev->sev_plat_status.state = SEV_STATE_UNINIT;
dev_dbg(sev->dev, "SEV firmware shutdown\n");
return ret;
@@ -1389,6 +1695,37 @@ static int sev_get_platform_state(int *state, int *error)
return rc;
}
+static int sev_move_to_init_state(struct sev_issue_cmd *argp, bool *shutdown_required)
+{
+ struct sev_platform_init_args init_args = {0};
+ int rc;
+
+ rc = _sev_platform_init_locked(&init_args);
+ if (rc) {
+ argp->error = SEV_RET_INVALID_PLATFORM_STATE;
+ return rc;
+ }
+
+ *shutdown_required = true;
+
+ return 0;
+}
+
+static int snp_move_to_init_state(struct sev_issue_cmd *argp, bool *shutdown_required)
+{
+ int error, rc;
+
+ rc = __sev_snp_init_locked(&error, 0);
+ if (rc) {
+ argp->error = SEV_RET_INVALID_PLATFORM_STATE;
+ return rc;
+ }
+
+ *shutdown_required = true;
+
+ return 0;
+}
+
static int sev_ioctl_do_reset(struct sev_issue_cmd *argp, bool writable)
{
int state, rc;
@@ -1441,24 +1778,31 @@ static int sev_ioctl_do_platform_status(struct sev_issue_cmd *argp)
static int sev_ioctl_do_pek_pdh_gen(int cmd, struct sev_issue_cmd *argp, bool writable)
{
struct sev_device *sev = psp_master->sev_data;
+ bool shutdown_required = false;
int rc;
if (!writable)
return -EPERM;
- if (sev->state == SEV_STATE_UNINIT) {
- rc = __sev_platform_init_locked(&argp->error);
+ if (sev->sev_plat_status.state == SEV_STATE_UNINIT) {
+ rc = sev_move_to_init_state(argp, &shutdown_required);
if (rc)
return rc;
}
- return __sev_do_cmd_locked(cmd, NULL, &argp->error);
+ rc = __sev_do_cmd_locked(cmd, NULL, &argp->error);
+
+ if (shutdown_required)
+ __sev_firmware_shutdown(sev, false);
+
+ return rc;
}
static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp, bool writable)
{
struct sev_device *sev = psp_master->sev_data;
struct sev_user_data_pek_csr input;
+ bool shutdown_required = false;
struct sev_data_pek_csr data;
void __user *input_address;
void *blob = NULL;
@@ -1489,8 +1833,8 @@ static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp, bool writable)
data.len = input.length;
cmd:
- if (sev->state == SEV_STATE_UNINIT) {
- ret = __sev_platform_init_locked(&argp->error);
+ if (sev->sev_plat_status.state == SEV_STATE_UNINIT) {
+ ret = sev_move_to_init_state(argp, &shutdown_required);
if (ret)
goto e_free_blob;
}
@@ -1511,6 +1855,9 @@ cmd:
}
e_free_blob:
+ if (shutdown_required)
+ __sev_firmware_shutdown(sev, false);
+
kfree(blob);
return ret;
}
@@ -1534,6 +1881,16 @@ static int sev_get_api_version(void)
struct sev_user_data_status status;
int error = 0, ret;
+ /*
+ * Cache SNP platform status and SNP feature information
+ * if SNP is available.
+ */
+ if (cc_platform_has(CC_ATTR_HOST_SEV_SNP)) {
+ ret = snp_get_platform_data(sev, &error);
+ if (ret)
+ return 1;
+ }
+
ret = sev_platform_status(&status, &error);
if (ret) {
dev_err(sev->dev,
@@ -1541,10 +1898,12 @@ static int sev_get_api_version(void)
return 1;
}
+ /* Cache SEV platform status */
+ sev->sev_plat_status = status;
+
sev->api_major = status.api_major;
sev->api_minor = status.api_minor;
sev->build = status.build;
- sev->state = status.state;
return 0;
}
@@ -1682,9 +2041,12 @@ static int __sev_snp_shutdown_locked(int *error, bool panic)
ret = __sev_do_cmd_locked(SEV_CMD_SNP_SHUTDOWN_EX, &data, error);
/* SHUTDOWN may require DF_FLUSH */
if (*error == SEV_RET_DFFLUSH_REQUIRED) {
- ret = __sev_do_cmd_locked(SEV_CMD_SNP_DF_FLUSH, NULL, NULL);
+ int dfflush_error = SEV_RET_NO_FW_CALL;
+
+ ret = __sev_do_cmd_locked(SEV_CMD_SNP_DF_FLUSH, NULL, &dfflush_error);
if (ret) {
- dev_err(sev->dev, "SEV-SNP DF_FLUSH failed\n");
+ dev_err(sev->dev, "SEV-SNP DF_FLUSH failed, ret = %d, error = %#x\n",
+ ret, dfflush_error);
return ret;
}
/* reissue the shutdown command */
@@ -1692,7 +2054,8 @@ static int __sev_snp_shutdown_locked(int *error, bool panic)
error);
}
if (ret) {
- dev_err(sev->dev, "SEV-SNP firmware shutdown failed\n");
+ dev_err(sev->dev, "SEV-SNP firmware shutdown failed, rc %d, error %#x\n",
+ ret, *error);
return ret;
}
@@ -1715,9 +2078,22 @@ static int __sev_snp_shutdown_locked(int *error, bool panic)
return ret;
}
+ snp_leak_hv_fixed_pages();
sev->snp_initialized = false;
dev_dbg(sev->dev, "SEV-SNP firmware shutdown\n");
+ /*
+ * __sev_snp_shutdown_locked() deadlocks when it tries to unregister
+ * itself during panic as the panic notifier is called with RCU read
+ * lock held and notifier unregistration does RCU synchronization.
+ */
+ if (!panic)
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &snp_panic_notifier);
+
+ /* Reset TMR size back to default */
+ sev_es_tmr_size = SEV_TMR_SIZE;
+
return ret;
}
@@ -1726,6 +2102,7 @@ static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp, bool writable)
struct sev_device *sev = psp_master->sev_data;
struct sev_user_data_pek_cert_import input;
struct sev_data_pek_cert_import data;
+ bool shutdown_required = false;
void *pek_blob, *oca_blob;
int ret;
@@ -1755,8 +2132,8 @@ static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp, bool writable)
data.oca_cert_len = input.oca_cert_len;
/* If platform is not in INIT state then transition it to INIT */
- if (sev->state != SEV_STATE_INIT) {
- ret = __sev_platform_init_locked(&argp->error);
+ if (sev->sev_plat_status.state != SEV_STATE_INIT) {
+ ret = sev_move_to_init_state(argp, &shutdown_required);
if (ret)
goto e_free_oca;
}
@@ -1764,6 +2141,9 @@ static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp, bool writable)
ret = __sev_do_cmd_locked(SEV_CMD_PEK_CERT_IMPORT, &data, &argp->error);
e_free_oca:
+ if (shutdown_required)
+ __sev_firmware_shutdown(sev, false);
+
kfree(oca_blob);
e_free_pek:
kfree(pek_blob);
@@ -1880,32 +2260,23 @@ static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp, bool writable)
struct sev_data_pdh_cert_export data;
void __user *input_cert_chain_address;
void __user *input_pdh_cert_address;
+ bool shutdown_required = false;
int ret;
- /* If platform is not in INIT state then transition it to INIT. */
- if (sev->state != SEV_STATE_INIT) {
- if (!writable)
- return -EPERM;
-
- ret = __sev_platform_init_locked(&argp->error);
- if (ret)
- return ret;
- }
-
if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
return -EFAULT;
memset(&data, 0, sizeof(data));
+ input_pdh_cert_address = (void __user *)input.pdh_cert_address;
+ input_cert_chain_address = (void __user *)input.cert_chain_address;
+
/* Userspace wants to query the certificate length. */
if (!input.pdh_cert_address ||
!input.pdh_cert_len ||
!input.cert_chain_address)
goto cmd;
- input_pdh_cert_address = (void __user *)input.pdh_cert_address;
- input_cert_chain_address = (void __user *)input.cert_chain_address;
-
/* Allocate a physically contiguous buffer to store the PDH blob. */
if (input.pdh_cert_len > SEV_FW_BLOB_MAX_SIZE)
return -EFAULT;
@@ -1931,6 +2302,17 @@ static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp, bool writable)
data.cert_chain_len = input.cert_chain_len;
cmd:
+ /* If platform is not in INIT state then transition it to INIT. */
+ if (sev->sev_plat_status.state != SEV_STATE_INIT) {
+ if (!writable) {
+ ret = -EPERM;
+ goto e_free_cert;
+ }
+ ret = sev_move_to_init_state(argp, &shutdown_required);
+ if (ret)
+ goto e_free_cert;
+ }
+
ret = __sev_do_cmd_locked(SEV_CMD_PDH_CERT_EXPORT, &data, &argp->error);
/* If we query the length, FW responded with expected data. */
@@ -1957,6 +2339,9 @@ cmd:
}
e_free_cert:
+ if (shutdown_required)
+ __sev_firmware_shutdown(sev, false);
+
kfree(cert_blob);
e_free_pdh:
kfree(pdh_blob);
@@ -1966,12 +2351,13 @@ e_free_pdh:
static int sev_ioctl_do_snp_platform_status(struct sev_issue_cmd *argp)
{
struct sev_device *sev = psp_master->sev_data;
+ bool shutdown_required = false;
struct sev_data_snp_addr buf;
struct page *status_page;
+ int ret, error;
void *data;
- int ret;
- if (!sev->snp_initialized || !argp->data)
+ if (!argp->data)
return -EINVAL;
status_page = alloc_page(GFP_KERNEL_ACCOUNT);
@@ -1980,6 +2366,12 @@ static int sev_ioctl_do_snp_platform_status(struct sev_issue_cmd *argp)
data = page_address(status_page);
+ if (!sev->snp_initialized) {
+ ret = snp_move_to_init_state(argp, &shutdown_required);
+ if (ret)
+ goto cleanup;
+ }
+
/*
* Firmware expects status page to be in firmware-owned state, otherwise
* it will report firmware error code INVALID_PAGE_STATE (0x1A).
@@ -2008,6 +2400,9 @@ static int sev_ioctl_do_snp_platform_status(struct sev_issue_cmd *argp)
ret = -EFAULT;
cleanup:
+ if (shutdown_required)
+ __sev_snp_shutdown_locked(&error, false);
+
__free_pages(status_page, 0);
return ret;
}
@@ -2016,21 +2411,33 @@ static int sev_ioctl_do_snp_commit(struct sev_issue_cmd *argp)
{
struct sev_device *sev = psp_master->sev_data;
struct sev_data_snp_commit buf;
+ bool shutdown_required = false;
+ int ret, error;
- if (!sev->snp_initialized)
- return -EINVAL;
+ if (!sev->snp_initialized) {
+ ret = snp_move_to_init_state(argp, &shutdown_required);
+ if (ret)
+ return ret;
+ }
buf.len = sizeof(buf);
- return __sev_do_cmd_locked(SEV_CMD_SNP_COMMIT, &buf, &argp->error);
+ ret = __sev_do_cmd_locked(SEV_CMD_SNP_COMMIT, &buf, &argp->error);
+
+ if (shutdown_required)
+ __sev_snp_shutdown_locked(&error, false);
+
+ return ret;
}
static int sev_ioctl_do_snp_set_config(struct sev_issue_cmd *argp, bool writable)
{
struct sev_device *sev = psp_master->sev_data;
struct sev_user_data_snp_config config;
+ bool shutdown_required = false;
+ int ret, error;
- if (!sev->snp_initialized || !argp->data)
+ if (!argp->data)
return -EINVAL;
if (!writable)
@@ -2039,17 +2446,29 @@ static int sev_ioctl_do_snp_set_config(struct sev_issue_cmd *argp, bool writable
if (copy_from_user(&config, (void __user *)argp->data, sizeof(config)))
return -EFAULT;
- return __sev_do_cmd_locked(SEV_CMD_SNP_CONFIG, &config, &argp->error);
+ if (!sev->snp_initialized) {
+ ret = snp_move_to_init_state(argp, &shutdown_required);
+ if (ret)
+ return ret;
+ }
+
+ ret = __sev_do_cmd_locked(SEV_CMD_SNP_CONFIG, &config, &argp->error);
+
+ if (shutdown_required)
+ __sev_snp_shutdown_locked(&error, false);
+
+ return ret;
}
static int sev_ioctl_do_snp_vlek_load(struct sev_issue_cmd *argp, bool writable)
{
struct sev_device *sev = psp_master->sev_data;
struct sev_user_data_snp_vlek_load input;
+ bool shutdown_required = false;
+ int ret, error;
void *blob;
- int ret;
- if (!sev->snp_initialized || !argp->data)
+ if (!argp->data)
return -EINVAL;
if (!writable)
@@ -2068,8 +2487,18 @@ static int sev_ioctl_do_snp_vlek_load(struct sev_issue_cmd *argp, bool writable)
input.vlek_wrapped_address = __psp_pa(blob);
+ if (!sev->snp_initialized) {
+ ret = snp_move_to_init_state(argp, &shutdown_required);
+ if (ret)
+ goto cleanup;
+ }
+
ret = __sev_do_cmd_locked(SEV_CMD_SNP_VLEK_LOAD, &input, &argp->error);
+ if (shutdown_required)
+ __sev_snp_shutdown_locked(&error, false);
+
+cleanup:
kfree(blob);
return ret;
@@ -2296,7 +2725,7 @@ static void __sev_firmware_shutdown(struct sev_device *sev, bool panic)
{
int error;
- __sev_platform_shutdown_locked(NULL);
+ __sev_platform_shutdown_locked(&error);
if (sev_es_tmr) {
/*
@@ -2339,6 +2768,15 @@ static void sev_firmware_shutdown(struct sev_device *sev)
mutex_unlock(&sev_cmd_mutex);
}
+void sev_platform_shutdown(void)
+{
+ if (!psp_master || !psp_master->sev_data)
+ return;
+
+ sev_firmware_shutdown(psp_master->sev_data);
+}
+EXPORT_SYMBOL_GPL(sev_platform_shutdown);
+
void sev_dev_destroy(struct psp_device *psp)
{
struct sev_device *sev = psp->sev_data;
@@ -2373,10 +2811,6 @@ static int snp_shutdown_on_panic(struct notifier_block *nb,
return NOTIFY_DONE;
}
-static struct notifier_block snp_panic_notifier = {
- .notifier_call = snp_shutdown_on_panic,
-};
-
int sev_issue_cmd_external_user(struct file *filep, unsigned int cmd,
void *data, int *error)
{
@@ -2390,9 +2824,7 @@ EXPORT_SYMBOL_GPL(sev_issue_cmd_external_user);
void sev_pci_init(void)
{
struct sev_device *sev = psp_master->sev_data;
- struct sev_platform_init_args args = {0};
u8 api_major, api_minor, build;
- int rc;
if (!sev)
return;
@@ -2415,18 +2847,6 @@ void sev_pci_init(void)
api_major, api_minor, build,
sev->api_major, sev->api_minor, sev->build);
- /* Initialize the platform */
- args.probe = true;
- rc = sev_platform_init(&args);
- if (rc)
- dev_err(sev->dev, "SEV: failed to INIT error %#x, rc %d\n",
- args.error, rc);
-
- dev_info(sev->dev, "SEV%s API:%d.%d build:%d\n", sev->snp_initialized ?
- "-SNP" : "", sev->api_major, sev->api_minor, sev->build);
-
- atomic_notifier_chain_register(&panic_notifier_list,
- &snp_panic_notifier);
return;
err:
@@ -2443,7 +2863,4 @@ void sev_pci_exit(void)
return;
sev_firmware_shutdown(sev);
-
- atomic_notifier_chain_unregister(&panic_notifier_list,
- &snp_panic_notifier);
}
diff --git a/drivers/crypto/ccp/sev-dev.h b/drivers/crypto/ccp/sev-dev.h
index 3e4e5574e88a..ac03bd0848f7 100644
--- a/drivers/crypto/ccp/sev-dev.h
+++ b/drivers/crypto/ccp/sev-dev.h
@@ -42,7 +42,6 @@ struct sev_device {
struct sev_vdata *vdata;
- int state;
unsigned int int_rcvd;
wait_queue_head_t int_queue;
struct sev_misc_dev *misc;
@@ -57,6 +56,11 @@ struct sev_device {
bool cmd_buf_backup_active;
bool snp_initialized;
+
+ struct sev_user_data_status sev_plat_status;
+
+ struct sev_user_data_snp_status snp_plat_status;
+ struct snp_feature_info snp_feat_info_0;
};
int sev_dev_init(struct psp_device *psp);
@@ -65,4 +69,7 @@ void sev_dev_destroy(struct psp_device *psp);
void sev_pci_init(void);
void sev_pci_exit(void);
+struct page *snp_alloc_hv_fixed_pages(unsigned int num_2mb_pages);
+void snp_free_hv_fixed_pages(struct page *page);
+
#endif /* __SEV_DEV_H */
diff --git a/drivers/crypto/ccp/sfs.c b/drivers/crypto/ccp/sfs.c
new file mode 100644
index 000000000000..2f4beaafe7ec
--- /dev/null
+++ b/drivers/crypto/ccp/sfs.c
@@ -0,0 +1,311 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AMD Secure Processor Seamless Firmware Servicing support.
+ *
+ * Copyright (C) 2025 Advanced Micro Devices, Inc.
+ *
+ * Author: Ashish Kalra <ashish.kalra@amd.com>
+ */
+
+#include <linux/firmware.h>
+
+#include "sfs.h"
+#include "sev-dev.h"
+
+#define SFS_DEFAULT_TIMEOUT (10 * MSEC_PER_SEC)
+#define SFS_MAX_PAYLOAD_SIZE (2 * 1024 * 1024)
+#define SFS_NUM_2MB_PAGES_CMDBUF (SFS_MAX_PAYLOAD_SIZE / PMD_SIZE)
+#define SFS_NUM_PAGES_CMDBUF (SFS_MAX_PAYLOAD_SIZE / PAGE_SIZE)
+
+static DEFINE_MUTEX(sfs_ioctl_mutex);
+
+static struct sfs_misc_dev *misc_dev;
+
+static int send_sfs_cmd(struct sfs_device *sfs_dev, int msg)
+{
+ int ret;
+
+ sfs_dev->command_buf->hdr.status = 0;
+ sfs_dev->command_buf->hdr.sub_cmd_id = msg;
+
+ ret = psp_extended_mailbox_cmd(sfs_dev->psp,
+ SFS_DEFAULT_TIMEOUT,
+ (struct psp_ext_request *)sfs_dev->command_buf);
+ if (ret == -EIO) {
+ dev_dbg(sfs_dev->dev,
+ "msg 0x%x failed with PSP error: 0x%x, extended status: 0x%x\n",
+ msg, sfs_dev->command_buf->hdr.status,
+ *(u32 *)sfs_dev->command_buf->buf);
+ }
+
+ return ret;
+}
+
+static int send_sfs_get_fw_versions(struct sfs_device *sfs_dev)
+{
+ /*
+ * SFS_GET_FW_VERSIONS command needs the output buffer to be
+ * initialized to 0xC7 in every byte.
+ */
+ memset(sfs_dev->command_buf->sfs_buffer, 0xc7, PAGE_SIZE);
+ sfs_dev->command_buf->hdr.payload_size = 2 * PAGE_SIZE;
+
+ return send_sfs_cmd(sfs_dev, PSP_SFS_GET_FW_VERSIONS);
+}
+
+static int send_sfs_update_package(struct sfs_device *sfs_dev, const char *payload_name)
+{
+ char payload_path[PAYLOAD_NAME_SIZE + sizeof("amd/")];
+ const struct firmware *firmware;
+ unsigned long package_size;
+ int ret;
+
+ /* Sanitize userspace provided payload name */
+ if (!strnchr(payload_name, PAYLOAD_NAME_SIZE, '\0'))
+ return -EINVAL;
+
+ snprintf(payload_path, sizeof(payload_path), "amd/%s", payload_name);
+
+ ret = firmware_request_nowarn(&firmware, payload_path, sfs_dev->dev);
+ if (ret < 0) {
+ dev_warn_ratelimited(sfs_dev->dev, "firmware request failed for %s (%d)\n",
+ payload_path, ret);
+ return -ENOENT;
+ }
+
+ /*
+ * SFS Update Package command's input buffer contains TEE_EXT_CMD_BUFFER
+ * followed by the Update Package and it should be 64KB aligned.
+ */
+ package_size = ALIGN(firmware->size + PAGE_SIZE, 0x10000U);
+
+ /*
+ * SFS command buffer is a pre-allocated 2MB buffer, fail update package
+ * if SFS payload is larger than the pre-allocated command buffer.
+ */
+ if (package_size > SFS_MAX_PAYLOAD_SIZE) {
+ dev_warn_ratelimited(sfs_dev->dev,
+ "SFS payload size %ld larger than maximum supported payload size of %u\n",
+ package_size, SFS_MAX_PAYLOAD_SIZE);
+ release_firmware(firmware);
+ return -E2BIG;
+ }
+
+ /*
+ * Copy firmware data to a HV_Fixed memory region.
+ */
+ memcpy(sfs_dev->command_buf->sfs_buffer, firmware->data, firmware->size);
+ sfs_dev->command_buf->hdr.payload_size = package_size;
+
+ release_firmware(firmware);
+
+ return send_sfs_cmd(sfs_dev, PSP_SFS_UPDATE);
+}
+
+static long sfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct sfs_user_get_fw_versions __user *sfs_get_fw_versions;
+ struct sfs_user_update_package __user *sfs_update_package;
+ struct psp_device *psp_master = psp_get_master_device();
+ char payload_name[PAYLOAD_NAME_SIZE];
+ struct sfs_device *sfs_dev;
+ int ret = 0;
+
+ if (!psp_master || !psp_master->sfs_data)
+ return -ENODEV;
+
+ sfs_dev = psp_master->sfs_data;
+
+ guard(mutex)(&sfs_ioctl_mutex);
+
+ switch (cmd) {
+ case SFSIOCFWVERS:
+ dev_dbg(sfs_dev->dev, "in SFSIOCFWVERS\n");
+
+ sfs_get_fw_versions = (struct sfs_user_get_fw_versions __user *)arg;
+
+ ret = send_sfs_get_fw_versions(sfs_dev);
+ if (ret && ret != -EIO)
+ return ret;
+
+ /*
+ * Return SFS status and extended status back to userspace
+ * if PSP status indicated success or command error.
+ */
+ if (copy_to_user(&sfs_get_fw_versions->blob, sfs_dev->command_buf->sfs_buffer,
+ PAGE_SIZE))
+ return -EFAULT;
+ if (copy_to_user(&sfs_get_fw_versions->sfs_status,
+ &sfs_dev->command_buf->hdr.status,
+ sizeof(sfs_get_fw_versions->sfs_status)))
+ return -EFAULT;
+ if (copy_to_user(&sfs_get_fw_versions->sfs_extended_status,
+ &sfs_dev->command_buf->buf,
+ sizeof(sfs_get_fw_versions->sfs_extended_status)))
+ return -EFAULT;
+ break;
+ case SFSIOCUPDATEPKG:
+ dev_dbg(sfs_dev->dev, "in SFSIOCUPDATEPKG\n");
+
+ sfs_update_package = (struct sfs_user_update_package __user *)arg;
+
+ if (copy_from_user(payload_name, sfs_update_package->payload_name,
+ PAYLOAD_NAME_SIZE))
+ return -EFAULT;
+
+ ret = send_sfs_update_package(sfs_dev, payload_name);
+ if (ret && ret != -EIO)
+ return ret;
+
+ /*
+ * Return SFS status and extended status back to userspace
+ * if PSP status indicated success or command error.
+ */
+ if (copy_to_user(&sfs_update_package->sfs_status,
+ &sfs_dev->command_buf->hdr.status,
+ sizeof(sfs_update_package->sfs_status)))
+ return -EFAULT;
+ if (copy_to_user(&sfs_update_package->sfs_extended_status,
+ &sfs_dev->command_buf->buf,
+ sizeof(sfs_update_package->sfs_extended_status)))
+ return -EFAULT;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct file_operations sfs_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = sfs_ioctl,
+};
+
+static void sfs_exit(struct kref *ref)
+{
+ misc_deregister(&misc_dev->misc);
+ kfree(misc_dev);
+ misc_dev = NULL;
+}
+
+void sfs_dev_destroy(struct psp_device *psp)
+{
+ struct sfs_device *sfs_dev = psp->sfs_data;
+
+ if (!sfs_dev)
+ return;
+
+ /*
+ * Change SFS command buffer back to the default "Write-Back" type.
+ */
+ set_memory_wb((unsigned long)sfs_dev->command_buf, SFS_NUM_PAGES_CMDBUF);
+
+ snp_free_hv_fixed_pages(sfs_dev->page);
+
+ if (sfs_dev->misc)
+ kref_put(&misc_dev->refcount, sfs_exit);
+
+ psp->sfs_data = NULL;
+}
+
+/* Based on sev_misc_init() */
+static int sfs_misc_init(struct sfs_device *sfs)
+{
+ struct device *dev = sfs->dev;
+ int ret;
+
+ /*
+ * SFS feature support can be detected on multiple devices but the SFS
+ * FW commands must be issued on the master. During probe, we do not
+ * know the master hence we create /dev/sfs on the first device probe.
+ */
+ if (!misc_dev) {
+ struct miscdevice *misc;
+
+ misc_dev = kzalloc(sizeof(*misc_dev), GFP_KERNEL);
+ if (!misc_dev)
+ return -ENOMEM;
+
+ misc = &misc_dev->misc;
+ misc->minor = MISC_DYNAMIC_MINOR;
+ misc->name = "sfs";
+ misc->fops = &sfs_fops;
+ misc->mode = 0600;
+
+ ret = misc_register(misc);
+ if (ret)
+ return ret;
+
+ kref_init(&misc_dev->refcount);
+ } else {
+ kref_get(&misc_dev->refcount);
+ }
+
+ sfs->misc = misc_dev;
+ dev_dbg(dev, "registered SFS device\n");
+
+ return 0;
+}
+
+int sfs_dev_init(struct psp_device *psp)
+{
+ struct device *dev = psp->dev;
+ struct sfs_device *sfs_dev;
+ struct page *page;
+ int ret = -ENOMEM;
+
+ sfs_dev = devm_kzalloc(dev, sizeof(*sfs_dev), GFP_KERNEL);
+ if (!sfs_dev)
+ return -ENOMEM;
+
+ /*
+ * Pre-allocate 2MB command buffer for all SFS commands using
+ * SNP HV_Fixed page allocator which also transitions the
+ * SFS command buffer to HV_Fixed page state if SNP is enabled.
+ */
+ page = snp_alloc_hv_fixed_pages(SFS_NUM_2MB_PAGES_CMDBUF);
+ if (!page) {
+ dev_dbg(dev, "Command Buffer HV-Fixed page allocation failed\n");
+ goto cleanup_dev;
+ }
+ sfs_dev->page = page;
+ sfs_dev->command_buf = page_address(page);
+
+ dev_dbg(dev, "Command buffer 0x%px to be marked as HV_Fixed\n", sfs_dev->command_buf);
+
+ /*
+ * SFS command buffer must be mapped as non-cacheable.
+ */
+ ret = set_memory_uc((unsigned long)sfs_dev->command_buf, SFS_NUM_PAGES_CMDBUF);
+ if (ret) {
+ dev_dbg(dev, "Set memory uc failed\n");
+ goto cleanup_cmd_buf;
+ }
+
+ dev_dbg(dev, "Command buffer 0x%px marked uncacheable\n", sfs_dev->command_buf);
+
+ psp->sfs_data = sfs_dev;
+ sfs_dev->dev = dev;
+ sfs_dev->psp = psp;
+
+ ret = sfs_misc_init(sfs_dev);
+ if (ret)
+ goto cleanup_mem_attr;
+
+ dev_notice(sfs_dev->dev, "SFS support is available\n");
+
+ return 0;
+
+cleanup_mem_attr:
+ set_memory_wb((unsigned long)sfs_dev->command_buf, SFS_NUM_PAGES_CMDBUF);
+
+cleanup_cmd_buf:
+ snp_free_hv_fixed_pages(page);
+
+cleanup_dev:
+ psp->sfs_data = NULL;
+ devm_kfree(dev, sfs_dev);
+
+ return ret;
+}
diff --git a/drivers/crypto/ccp/sfs.h b/drivers/crypto/ccp/sfs.h
new file mode 100644
index 000000000000..97704c210efd
--- /dev/null
+++ b/drivers/crypto/ccp/sfs.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * AMD Platform Security Processor (PSP) Seamless Firmware (SFS) Support.
+ *
+ * Copyright (C) 2025 Advanced Micro Devices, Inc.
+ *
+ * Author: Ashish Kalra <ashish.kalra@amd.com>
+ */
+
+#ifndef __SFS_H__
+#define __SFS_H__
+
+#include <uapi/linux/psp-sfs.h>
+
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/psp-sev.h>
+#include <linux/psp-platform-access.h>
+#include <linux/set_memory.h>
+
+#include "psp-dev.h"
+
+struct sfs_misc_dev {
+ struct kref refcount;
+ struct miscdevice misc;
+};
+
+struct sfs_command {
+ struct psp_ext_req_buffer_hdr hdr;
+ u8 buf[PAGE_SIZE - sizeof(struct psp_ext_req_buffer_hdr)];
+ u8 sfs_buffer[];
+} __packed;
+
+struct sfs_device {
+ struct device *dev;
+ struct psp_device *psp;
+
+ struct page *page;
+ struct sfs_command *command_buf;
+
+ struct sfs_misc_dev *misc;
+};
+
+void sfs_dev_destroy(struct psp_device *psp);
+int sfs_dev_init(struct psp_device *psp);
+
+#endif /* __SFS_H__ */
diff --git a/drivers/crypto/ccp/sp-dev.c b/drivers/crypto/ccp/sp-dev.c
index 7eb3e4668286..3467f6db4f50 100644
--- a/drivers/crypto/ccp/sp-dev.c
+++ b/drivers/crypto/ccp/sp-dev.c
@@ -19,6 +19,7 @@
#include <linux/types.h>
#include <linux/ccp.h>
+#include "sev-dev.h"
#include "ccp-dev.h"
#include "sp-dev.h"
@@ -253,8 +254,12 @@ unlock:
static int __init sp_mod_init(void)
{
#ifdef CONFIG_X86
+ static bool initialized;
int ret;
+ if (initialized)
+ return 0;
+
ret = sp_pci_init();
if (ret)
return ret;
@@ -263,6 +268,8 @@ static int __init sp_mod_init(void)
psp_pci_init();
#endif
+ initialized = true;
+
return 0;
#endif
@@ -279,6 +286,13 @@ static int __init sp_mod_init(void)
return -ENODEV;
}
+#if IS_BUILTIN(CONFIG_KVM_AMD) && IS_ENABLED(CONFIG_KVM_AMD_SEV)
+int __init sev_module_init(void)
+{
+ return sp_mod_init();
+}
+#endif
+
static void __exit sp_mod_exit(void)
{
#ifdef CONFIG_X86
diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
index 248d98fd8c48..e7bb803912a6 100644
--- a/drivers/crypto/ccp/sp-pci.c
+++ b/drivers/crypto/ccp/sp-pci.c
@@ -189,14 +189,17 @@ static bool sp_pci_is_master(struct sp_device *sp)
pdev_new = to_pci_dev(dev_new);
pdev_cur = to_pci_dev(dev_cur);
- if (pdev_new->bus->number < pdev_cur->bus->number)
- return true;
+ if (pci_domain_nr(pdev_new->bus) != pci_domain_nr(pdev_cur->bus))
+ return pci_domain_nr(pdev_new->bus) < pci_domain_nr(pdev_cur->bus);
- if (PCI_SLOT(pdev_new->devfn) < PCI_SLOT(pdev_cur->devfn))
- return true;
+ if (pdev_new->bus->number != pdev_cur->bus->number)
+ return pdev_new->bus->number < pdev_cur->bus->number;
- if (PCI_FUNC(pdev_new->devfn) < PCI_FUNC(pdev_cur->devfn))
- return true;
+ if (PCI_SLOT(pdev_new->devfn) != PCI_SLOT(pdev_cur->devfn))
+ return PCI_SLOT(pdev_new->devfn) < PCI_SLOT(pdev_cur->devfn);
+
+ if (PCI_FUNC(pdev_new->devfn) != PCI_FUNC(pdev_cur->devfn))
+ return PCI_FUNC(pdev_new->devfn) < PCI_FUNC(pdev_cur->devfn);
return false;
}
@@ -372,6 +375,7 @@ static const struct tee_vdata teev1 = {
static const struct tee_vdata teev2 = {
.ring_wptr_reg = 0x10950, /* C2PMSG_20 */
.ring_rptr_reg = 0x10954, /* C2PMSG_21 */
+ .info_reg = 0x109e8, /* C2PMSG_58 */
};
static const struct platform_access_vdata pa_v1 = {
@@ -437,6 +441,7 @@ static const struct psp_vdata pspv5 = {
.cmdresp_reg = 0x10944, /* C2PMSG_17 */
.cmdbuff_addr_lo_reg = 0x10948, /* C2PMSG_18 */
.cmdbuff_addr_hi_reg = 0x1094c, /* C2PMSG_19 */
+ .bootloader_info_reg = 0x109ec, /* C2PMSG_59 */
.feature_reg = 0x109fc, /* C2PMSG_63 */
.inten_reg = 0x10510, /* P2CMSG_INTEN */
.intsts_reg = 0x10514, /* P2CMSG_INTSTS */
@@ -448,6 +453,7 @@ static const struct psp_vdata pspv6 = {
.cmdresp_reg = 0x10944, /* C2PMSG_17 */
.cmdbuff_addr_lo_reg = 0x10948, /* C2PMSG_18 */
.cmdbuff_addr_hi_reg = 0x1094c, /* C2PMSG_19 */
+ .bootloader_info_reg = 0x109ec, /* C2PMSG_59 */
.feature_reg = 0x109fc, /* C2PMSG_63 */
.inten_reg = 0x10510, /* P2CMSG_INTEN */
.intsts_reg = 0x10514, /* P2CMSG_INTSTS */
@@ -529,8 +535,10 @@ static const struct pci_device_id sp_pci_table[] = {
{ PCI_VDEVICE(AMD, 0x14CA), (kernel_ulong_t)&dev_vdata[5] },
{ PCI_VDEVICE(AMD, 0x15C7), (kernel_ulong_t)&dev_vdata[6] },
{ PCI_VDEVICE(AMD, 0x1649), (kernel_ulong_t)&dev_vdata[6] },
+ { PCI_VDEVICE(AMD, 0x1134), (kernel_ulong_t)&dev_vdata[7] },
{ PCI_VDEVICE(AMD, 0x17E0), (kernel_ulong_t)&dev_vdata[7] },
{ PCI_VDEVICE(AMD, 0x156E), (kernel_ulong_t)&dev_vdata[8] },
+ { PCI_VDEVICE(AMD, 0x17D8), (kernel_ulong_t)&dev_vdata[8] },
/* Last entry must be zero */
{ 0, }
};
diff --git a/drivers/crypto/ccp/sp-platform.c b/drivers/crypto/ccp/sp-platform.c
index ff6ceb4feee0..3933cac1694d 100644
--- a/drivers/crypto/ccp/sp-platform.c
+++ b/drivers/crypto/ccp/sp-platform.c
@@ -210,7 +210,7 @@ static struct platform_driver sp_platform_driver = {
.of_match_table = sp_of_match,
},
.probe = sp_platform_probe,
- .remove_new = sp_platform_remove,
+ .remove = sp_platform_remove,
#ifdef CONFIG_PM
.suspend = sp_platform_suspend,
.resume = sp_platform_resume,
diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
index 5ef39d682389..81533681f7fb 100644
--- a/drivers/crypto/ccree/cc_aead.c
+++ b/drivers/crypto/ccree/cc_aead.c
@@ -2226,7 +2226,7 @@ static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
memset(areq_ctx, 0, sizeof(*areq_ctx));
- //plaintext is not encryped with rfc4543
+ //plaintext is not encrypted with rfc4543
areq_ctx->plaintext_authenticate_only = true;
/* No generated IV required */
@@ -2277,7 +2277,7 @@ static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
memset(areq_ctx, 0, sizeof(*areq_ctx));
- //plaintext is not decryped with rfc4543
+ //plaintext is not decrypted with rfc4543
areq_ctx->plaintext_authenticate_only = true;
/* No generated IV required */
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
index bcca55bff910..3963bb91321f 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.c
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
@@ -224,7 +224,7 @@ static int cc_generate_mlli(struct device *dev, struct buffer_array *sg_data,
/* Set MLLI size for the bypass operation */
mlli_params->mlli_len = (total_nents * LLI_ENTRY_BYTE_SIZE);
- dev_dbg(dev, "MLLI params: virt_addr=%pK dma_addr=%pad mlli_len=0x%X\n",
+ dev_dbg(dev, "MLLI params: virt_addr=%p dma_addr=%pad mlli_len=0x%X\n",
mlli_params->mlli_virt_addr, &mlli_params->mlli_dma_addr,
mlli_params->mlli_len);
@@ -239,7 +239,7 @@ static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data,
{
unsigned int index = sgl_data->num_of_buffers;
- dev_dbg(dev, "index=%u nents=%u sgl=%pK data_len=0x%08X is_last=%d\n",
+ dev_dbg(dev, "index=%u nents=%u sgl=%p data_len=0x%08X is_last=%d\n",
index, nents, sgl, data_len, is_last_table);
sgl_data->nents[index] = nents;
sgl_data->entry[index].sgl = sgl;
@@ -298,7 +298,7 @@ cc_set_aead_conf_buf(struct device *dev, struct aead_req_ctx *areq_ctx,
dev_err(dev, "dma_map_sg() config buffer failed\n");
return -ENOMEM;
}
- dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
+ dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%p offset=%u length=%u\n",
&sg_dma_address(&areq_ctx->ccm_adata_sg),
sg_page(&areq_ctx->ccm_adata_sg),
sg_virt(&areq_ctx->ccm_adata_sg),
@@ -323,7 +323,7 @@ static int cc_set_hash_buf(struct device *dev, struct ahash_req_ctx *areq_ctx,
dev_err(dev, "dma_map_sg() src buffer failed\n");
return -ENOMEM;
}
- dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
+ dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%p offset=%u length=%u\n",
&sg_dma_address(areq_ctx->buff_sg), sg_page(areq_ctx->buff_sg),
sg_virt(areq_ctx->buff_sg), areq_ctx->buff_sg->offset,
areq_ctx->buff_sg->length);
@@ -359,11 +359,11 @@ void cc_unmap_cipher_request(struct device *dev, void *ctx,
if (src != dst) {
dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_TO_DEVICE);
dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_FROM_DEVICE);
- dev_dbg(dev, "Unmapped req->dst=%pK\n", sg_virt(dst));
- dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src));
+ dev_dbg(dev, "Unmapped req->dst=%p\n", sg_virt(dst));
+ dev_dbg(dev, "Unmapped req->src=%p\n", sg_virt(src));
} else {
dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL);
- dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src));
+ dev_dbg(dev, "Unmapped req->src=%p\n", sg_virt(src));
}
}
@@ -391,11 +391,11 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
req_ctx->gen_ctx.iv_dma_addr =
dma_map_single(dev, info, ivsize, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) {
- dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
+ dev_err(dev, "Mapping iv %u B at va=%p for DMA failed\n",
ivsize, info);
return -ENOMEM;
}
- dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
+ dev_dbg(dev, "Mapped iv %u B at va=%p to dma=%pad\n",
ivsize, info, &req_ctx->gen_ctx.iv_dma_addr);
} else {
req_ctx->gen_ctx.iv_dma_addr = 0;
@@ -506,7 +506,7 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
if ((areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) &&
(areq_ctx->mlli_params.mlli_virt_addr)) {
- dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
+ dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%p\n",
&areq_ctx->mlli_params.mlli_dma_addr,
areq_ctx->mlli_params.mlli_virt_addr);
dma_pool_free(areq_ctx->mlli_params.curr_pool,
@@ -514,13 +514,13 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
areq_ctx->mlli_params.mlli_dma_addr);
}
- dev_dbg(dev, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n",
+ dev_dbg(dev, "Unmapping src sgl: req->src=%p areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n",
sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents,
areq_ctx->assoclen, req->cryptlen);
dma_unmap_sg(dev, req->src, areq_ctx->src.mapped_nents, src_direction);
if (req->src != req->dst) {
- dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
+ dev_dbg(dev, "Unmapping dst sgl: req->dst=%p\n",
sg_virt(req->dst));
dma_unmap_sg(dev, req->dst, areq_ctx->dst.mapped_nents, DMA_FROM_DEVICE);
}
@@ -566,7 +566,7 @@ static int cc_aead_chain_iv(struct cc_drvdata *drvdata,
dma_map_single(dev, areq_ctx->gen_ctx.iv, hw_iv_size,
DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) {
- dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
+ dev_err(dev, "Mapping iv %u B at va=%p for DMA failed\n",
hw_iv_size, req->iv);
kfree_sensitive(areq_ctx->gen_ctx.iv);
areq_ctx->gen_ctx.iv = NULL;
@@ -574,7 +574,7 @@ static int cc_aead_chain_iv(struct cc_drvdata *drvdata,
goto chain_iv_exit;
}
- dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
+ dev_dbg(dev, "Mapped iv %u B at va=%p to dma=%pad\n",
hw_iv_size, req->iv, &areq_ctx->gen_ctx.iv_dma_addr);
chain_iv_exit:
@@ -977,7 +977,7 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
dma_addr = dma_map_single(dev, areq_ctx->mac_buf, MAX_MAC_SIZE,
DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, dma_addr)) {
- dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
+ dev_err(dev, "Mapping mac_buf %u B at va=%p for DMA failed\n",
MAX_MAC_SIZE, areq_ctx->mac_buf);
rc = -ENOMEM;
goto aead_map_failure;
@@ -991,7 +991,7 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma_addr)) {
- dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
+ dev_err(dev, "Mapping mac_buf %u B at va=%p for DMA failed\n",
AES_BLOCK_SIZE, addr);
areq_ctx->ccm_iv0_dma_addr = 0;
rc = -ENOMEM;
@@ -1009,7 +1009,7 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
dma_addr = dma_map_single(dev, areq_ctx->hkey, AES_BLOCK_SIZE,
DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, dma_addr)) {
- dev_err(dev, "Mapping hkey %u B at va=%pK for DMA failed\n",
+ dev_err(dev, "Mapping hkey %u B at va=%p for DMA failed\n",
AES_BLOCK_SIZE, areq_ctx->hkey);
rc = -ENOMEM;
goto aead_map_failure;
@@ -1019,7 +1019,7 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
dma_addr = dma_map_single(dev, &areq_ctx->gcm_len_block,
AES_BLOCK_SIZE, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma_addr)) {
- dev_err(dev, "Mapping gcm_len_block %u B at va=%pK for DMA failed\n",
+ dev_err(dev, "Mapping gcm_len_block %u B at va=%p for DMA failed\n",
AES_BLOCK_SIZE, &areq_ctx->gcm_len_block);
rc = -ENOMEM;
goto aead_map_failure;
@@ -1030,7 +1030,7 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
AES_BLOCK_SIZE, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma_addr)) {
- dev_err(dev, "Mapping gcm_iv_inc1 %u B at va=%pK for DMA failed\n",
+ dev_err(dev, "Mapping gcm_iv_inc1 %u B at va=%p for DMA failed\n",
AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc1));
areq_ctx->gcm_iv_inc1_dma_addr = 0;
rc = -ENOMEM;
@@ -1042,7 +1042,7 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
AES_BLOCK_SIZE, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma_addr)) {
- dev_err(dev, "Mapping gcm_iv_inc2 %u B at va=%pK for DMA failed\n",
+ dev_err(dev, "Mapping gcm_iv_inc2 %u B at va=%p for DMA failed\n",
AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc2));
areq_ctx->gcm_iv_inc2_dma_addr = 0;
rc = -ENOMEM;
@@ -1152,7 +1152,7 @@ int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
u32 dummy = 0;
u32 mapped_nents = 0;
- dev_dbg(dev, "final params : curr_buff=%pK curr_buff_cnt=0x%X nbytes = 0x%X src=%pK curr_index=%u\n",
+ dev_dbg(dev, "final params : curr_buff=%p curr_buff_cnt=0x%X nbytes = 0x%X src=%p curr_index=%u\n",
curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
/* Init the type of the dma buffer */
areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
@@ -1236,7 +1236,7 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
u32 dummy = 0;
u32 mapped_nents = 0;
- dev_dbg(dev, " update params : curr_buff=%pK curr_buff_cnt=0x%X nbytes=0x%X src=%pK curr_index=%u\n",
+ dev_dbg(dev, " update params : curr_buff=%p curr_buff_cnt=0x%X nbytes=0x%X src=%p curr_index=%u\n",
curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
/* Init the type of the dma buffer */
areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
@@ -1246,7 +1246,7 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
areq_ctx->in_nents = 0;
if (total_in_len < block_size) {
- dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n",
+ dev_dbg(dev, " less than one block: curr_buff=%p *curr_buff_cnt=0x%X copy_to=%p\n",
curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]);
areq_ctx->in_nents = sg_nents_for_len(src, nbytes);
sg_copy_to_buffer(src, areq_ctx->in_nents,
@@ -1265,7 +1265,7 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
/* Copy the new residue to next buffer */
if (*next_buff_cnt) {
- dev_dbg(dev, " handle residue: next buff %pK skip data %u residue %u\n",
+ dev_dbg(dev, " handle residue: next buff %p skip data %u residue %u\n",
next_buff, (update_data_len - *curr_buff_cnt),
*next_buff_cnt);
cc_copy_sg_portion(dev, next_buff, src,
@@ -1338,7 +1338,7 @@ void cc_unmap_hash_request(struct device *dev, void *ctx,
*allocated and should be released
*/
if (areq_ctx->mlli_params.curr_pool) {
- dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
+ dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%p\n",
&areq_ctx->mlli_params.mlli_dma_addr,
areq_ctx->mlli_params.mlli_virt_addr);
dma_pool_free(areq_ctx->mlli_params.curr_pool,
@@ -1347,14 +1347,14 @@ void cc_unmap_hash_request(struct device *dev, void *ctx,
}
if (src && areq_ctx->in_nents) {
- dev_dbg(dev, "Unmapped sg src: virt=%pK dma=%pad len=0x%X\n",
+ dev_dbg(dev, "Unmapped sg src: virt=%p dma=%pad len=0x%X\n",
sg_virt(src), &sg_dma_address(src), sg_dma_len(src));
dma_unmap_sg(dev, src,
areq_ctx->in_nents, DMA_TO_DEVICE);
}
if (*prev_len) {
- dev_dbg(dev, "Unmapped buffer: areq_ctx->buff_sg=%pK dma=%pad len 0x%X\n",
+ dev_dbg(dev, "Unmapped buffer: areq_ctx->buff_sg=%p dma=%pad len 0x%X\n",
sg_virt(areq_ctx->buff_sg),
&sg_dma_address(areq_ctx->buff_sg),
sg_dma_len(areq_ctx->buff_sg));
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index 3fb667a17bbb..e2cbfdf7a0e4 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -179,7 +179,7 @@ static int cc_cipher_init(struct crypto_tfm *tfm)
}
max_key_buf_size <<= 1;
- /* Alloc fallabck tfm or essiv when key size != 256 bit */
+ /* Alloc fallback tfm or essiv when key size != 256 bit */
ctx_p->fallback_tfm =
crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC);
@@ -211,11 +211,11 @@ static int cc_cipher_init(struct crypto_tfm *tfm)
max_key_buf_size,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, ctx_p->user.key_dma_addr)) {
- dev_err(dev, "Mapping Key %u B at va=%pK for DMA failed\n",
+ dev_err(dev, "Mapping Key %u B at va=%p for DMA failed\n",
max_key_buf_size, ctx_p->user.key);
goto free_key;
}
- dev_dbg(dev, "Mapped key %u B at va=%pK to dma=%pad\n",
+ dev_dbg(dev, "Mapped key %u B at va=%p to dma=%pad\n",
max_key_buf_size, ctx_p->user.key, &ctx_p->user.key_dma_addr);
return 0;
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
index 9177b54bb0f5..061e68a31c36 100644
--- a/drivers/crypto/ccree/cc_driver.c
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -643,7 +643,7 @@ static struct platform_driver ccree_driver = {
#endif
},
.probe = ccree_probe,
- .remove_new = ccree_remove,
+ .remove = ccree_remove,
};
static int __init ccree_init(void)
diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c
index f418162932fe..c6d085c8ff79 100644
--- a/drivers/crypto/ccree/cc_hash.c
+++ b/drivers/crypto/ccree/cc_hash.c
@@ -125,7 +125,7 @@ static int cc_map_result(struct device *dev, struct ahash_req_ctx *state,
digestsize);
return -ENOMEM;
}
- dev_dbg(dev, "Mapped digest result buffer %u B at va=%pK to dma=%pad\n",
+ dev_dbg(dev, "Mapped digest result buffer %u B at va=%p to dma=%pad\n",
digestsize, state->digest_result_buff,
&state->digest_result_dma_addr);
@@ -184,11 +184,11 @@ static int cc_map_req(struct device *dev, struct ahash_req_ctx *state,
dma_map_single(dev, state->digest_buff,
ctx->inter_digestsize, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, state->digest_buff_dma_addr)) {
- dev_err(dev, "Mapping digest len %d B at va=%pK for DMA failed\n",
+ dev_err(dev, "Mapping digest len %d B at va=%p for DMA failed\n",
ctx->inter_digestsize, state->digest_buff);
return -EINVAL;
}
- dev_dbg(dev, "Mapped digest %d B at va=%pK to dma=%pad\n",
+ dev_dbg(dev, "Mapped digest %d B at va=%p to dma=%pad\n",
ctx->inter_digestsize, state->digest_buff,
&state->digest_buff_dma_addr);
@@ -197,11 +197,11 @@ static int cc_map_req(struct device *dev, struct ahash_req_ctx *state,
dma_map_single(dev, state->digest_bytes_len,
HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, state->digest_bytes_len_dma_addr)) {
- dev_err(dev, "Mapping digest len %u B at va=%pK for DMA failed\n",
+ dev_err(dev, "Mapping digest len %u B at va=%p for DMA failed\n",
HASH_MAX_LEN_SIZE, state->digest_bytes_len);
goto unmap_digest_buf;
}
- dev_dbg(dev, "Mapped digest len %u B at va=%pK to dma=%pad\n",
+ dev_dbg(dev, "Mapped digest len %u B at va=%p to dma=%pad\n",
HASH_MAX_LEN_SIZE, state->digest_bytes_len,
&state->digest_bytes_len_dma_addr);
}
@@ -212,12 +212,12 @@ static int cc_map_req(struct device *dev, struct ahash_req_ctx *state,
ctx->inter_digestsize,
DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, state->opad_digest_dma_addr)) {
- dev_err(dev, "Mapping opad digest %d B at va=%pK for DMA failed\n",
+ dev_err(dev, "Mapping opad digest %d B at va=%p for DMA failed\n",
ctx->inter_digestsize,
state->opad_digest_buff);
goto unmap_digest_len;
}
- dev_dbg(dev, "Mapped opad digest %d B at va=%pK to dma=%pad\n",
+ dev_dbg(dev, "Mapped opad digest %d B at va=%p to dma=%pad\n",
ctx->inter_digestsize, state->opad_digest_buff,
&state->opad_digest_dma_addr);
}
@@ -272,7 +272,7 @@ static void cc_unmap_result(struct device *dev, struct ahash_req_ctx *state,
if (state->digest_result_dma_addr) {
dma_unmap_single(dev, state->digest_result_dma_addr, digestsize,
DMA_BIDIRECTIONAL);
- dev_dbg(dev, "unmpa digest result buffer va (%pK) pa (%pad) len %u\n",
+ dev_dbg(dev, "unmpa digest result buffer va (%p) pa (%pad) len %u\n",
state->digest_result_buff,
&state->digest_result_dma_addr, digestsize);
memcpy(result, state->digest_result_buff, digestsize);
@@ -287,7 +287,7 @@ static void cc_update_complete(struct device *dev, void *cc_req, int err)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
- dev_dbg(dev, "req=%pK\n", req);
+ dev_dbg(dev, "req=%p\n", req);
if (err != -EINPROGRESS) {
/* Not a BACKLOG notification */
@@ -306,7 +306,7 @@ static void cc_digest_complete(struct device *dev, void *cc_req, int err)
struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
u32 digestsize = crypto_ahash_digestsize(tfm);
- dev_dbg(dev, "req=%pK\n", req);
+ dev_dbg(dev, "req=%p\n", req);
if (err != -EINPROGRESS) {
/* Not a BACKLOG notification */
@@ -326,7 +326,7 @@ static void cc_hash_complete(struct device *dev, void *cc_req, int err)
struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
u32 digestsize = crypto_ahash_digestsize(tfm);
- dev_dbg(dev, "req=%pK\n", req);
+ dev_dbg(dev, "req=%p\n", req);
if (err != -EINPROGRESS) {
/* Not a BACKLOG notification */
@@ -1077,11 +1077,11 @@ static int cc_alloc_ctx(struct cc_hash_ctx *ctx)
dma_map_single(dev, ctx->digest_buff, sizeof(ctx->digest_buff),
DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, ctx->digest_buff_dma_addr)) {
- dev_err(dev, "Mapping digest len %zu B at va=%pK for DMA failed\n",
+ dev_err(dev, "Mapping digest len %zu B at va=%p for DMA failed\n",
sizeof(ctx->digest_buff), ctx->digest_buff);
goto fail;
}
- dev_dbg(dev, "Mapped digest %zu B at va=%pK to dma=%pad\n",
+ dev_dbg(dev, "Mapped digest %zu B at va=%p to dma=%pad\n",
sizeof(ctx->digest_buff), ctx->digest_buff,
&ctx->digest_buff_dma_addr);
@@ -1090,12 +1090,12 @@ static int cc_alloc_ctx(struct cc_hash_ctx *ctx)
sizeof(ctx->opad_tmp_keys_buff),
DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, ctx->opad_tmp_keys_dma_addr)) {
- dev_err(dev, "Mapping opad digest %zu B at va=%pK for DMA failed\n",
+ dev_err(dev, "Mapping opad digest %zu B at va=%p for DMA failed\n",
sizeof(ctx->opad_tmp_keys_buff),
ctx->opad_tmp_keys_buff);
goto fail;
}
- dev_dbg(dev, "Mapped opad_tmp_keys %zu B at va=%pK to dma=%pad\n",
+ dev_dbg(dev, "Mapped opad_tmp_keys %zu B at va=%p to dma=%pad\n",
sizeof(ctx->opad_tmp_keys_buff), ctx->opad_tmp_keys_buff,
&ctx->opad_tmp_keys_dma_addr);
@@ -1577,7 +1577,7 @@ struct cc_hash_template {
/* hash descriptors */
static struct cc_hash_template driver_hash[] = {
- //Asynchronize hash template
+ //Asynchronous hash template
{
.name = "sha1",
.driver_name = "sha1-ccree",
diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c
index 6124fbbbed94..bbd118f8de0e 100644
--- a/drivers/crypto/ccree/cc_pm.c
+++ b/drivers/crypto/ccree/cc_pm.c
@@ -77,6 +77,5 @@ int cc_pm_get(struct device *dev)
void cc_pm_put_suspend(struct device *dev)
{
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
}
diff --git a/drivers/crypto/chelsio/Kconfig b/drivers/crypto/chelsio/Kconfig
index 5dd3f6a4781a..37294bb74003 100644
--- a/drivers/crypto/chelsio/Kconfig
+++ b/drivers/crypto/chelsio/Kconfig
@@ -4,9 +4,9 @@ config CRYPTO_DEV_CHELSIO
depends on CHELSIO_T4
select CRYPTO_LIB_AES
select CRYPTO_LIB_GF128MUL
- select CRYPTO_SHA1
- select CRYPTO_SHA256
- select CRYPTO_SHA512
+ select CRYPTO_LIB_SHA1
+ select CRYPTO_LIB_SHA256
+ select CRYPTO_LIB_SHA512
select CRYPTO_AUTHENC
help
The Chelsio Crypto Co-processor driver for T6 adapters.
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 177428480c7d..22cbc343198a 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -51,7 +51,6 @@
#include <crypto/aes.h>
#include <crypto/algapi.h>
-#include <crypto/hash.h>
#include <crypto/gcm.h>
#include <crypto/sha1.h>
#include <crypto/sha2.h>
@@ -277,88 +276,60 @@ static void get_aes_decrypt_key(unsigned char *dec_key,
}
}
-static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
+static int chcr_prepare_hmac_key(const u8 *raw_key, unsigned int raw_key_len,
+ int digestsize, void *istate, void *ostate)
{
- struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
-
- switch (ds) {
+ __be32 *istate32 = istate, *ostate32 = ostate;
+ __be64 *istate64 = istate, *ostate64 = ostate;
+ union {
+ struct hmac_sha1_key sha1;
+ struct hmac_sha224_key sha224;
+ struct hmac_sha256_key sha256;
+ struct hmac_sha384_key sha384;
+ struct hmac_sha512_key sha512;
+ } k;
+
+ switch (digestsize) {
case SHA1_DIGEST_SIZE:
- base_hash = crypto_alloc_shash("sha1", 0, 0);
+ hmac_sha1_preparekey(&k.sha1, raw_key, raw_key_len);
+ for (int i = 0; i < ARRAY_SIZE(k.sha1.istate.h); i++) {
+ istate32[i] = cpu_to_be32(k.sha1.istate.h[i]);
+ ostate32[i] = cpu_to_be32(k.sha1.ostate.h[i]);
+ }
break;
case SHA224_DIGEST_SIZE:
- base_hash = crypto_alloc_shash("sha224", 0, 0);
+ hmac_sha224_preparekey(&k.sha224, raw_key, raw_key_len);
+ for (int i = 0; i < ARRAY_SIZE(k.sha224.key.istate.h); i++) {
+ istate32[i] = cpu_to_be32(k.sha224.key.istate.h[i]);
+ ostate32[i] = cpu_to_be32(k.sha224.key.ostate.h[i]);
+ }
break;
case SHA256_DIGEST_SIZE:
- base_hash = crypto_alloc_shash("sha256", 0, 0);
+ hmac_sha256_preparekey(&k.sha256, raw_key, raw_key_len);
+ for (int i = 0; i < ARRAY_SIZE(k.sha256.key.istate.h); i++) {
+ istate32[i] = cpu_to_be32(k.sha256.key.istate.h[i]);
+ ostate32[i] = cpu_to_be32(k.sha256.key.ostate.h[i]);
+ }
break;
case SHA384_DIGEST_SIZE:
- base_hash = crypto_alloc_shash("sha384", 0, 0);
+ hmac_sha384_preparekey(&k.sha384, raw_key, raw_key_len);
+ for (int i = 0; i < ARRAY_SIZE(k.sha384.key.istate.h); i++) {
+ istate64[i] = cpu_to_be64(k.sha384.key.istate.h[i]);
+ ostate64[i] = cpu_to_be64(k.sha384.key.ostate.h[i]);
+ }
break;
case SHA512_DIGEST_SIZE:
- base_hash = crypto_alloc_shash("sha512", 0, 0);
+ hmac_sha512_preparekey(&k.sha512, raw_key, raw_key_len);
+ for (int i = 0; i < ARRAY_SIZE(k.sha512.key.istate.h); i++) {
+ istate64[i] = cpu_to_be64(k.sha512.key.istate.h[i]);
+ ostate64[i] = cpu_to_be64(k.sha512.key.ostate.h[i]);
+ }
break;
+ default:
+ return -EINVAL;
}
-
- return base_hash;
-}
-
-static int chcr_compute_partial_hash(struct shash_desc *desc,
- char *iopad, char *result_hash,
- int digest_size)
-{
- struct sha1_state sha1_st;
- struct sha256_state sha256_st;
- struct sha512_state sha512_st;
- int error;
-
- if (digest_size == SHA1_DIGEST_SIZE) {
- error = crypto_shash_init(desc) ?:
- crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
- crypto_shash_export(desc, (void *)&sha1_st);
- memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
- } else if (digest_size == SHA224_DIGEST_SIZE) {
- error = crypto_shash_init(desc) ?:
- crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
- crypto_shash_export(desc, (void *)&sha256_st);
- memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
-
- } else if (digest_size == SHA256_DIGEST_SIZE) {
- error = crypto_shash_init(desc) ?:
- crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
- crypto_shash_export(desc, (void *)&sha256_st);
- memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
-
- } else if (digest_size == SHA384_DIGEST_SIZE) {
- error = crypto_shash_init(desc) ?:
- crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
- crypto_shash_export(desc, (void *)&sha512_st);
- memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
-
- } else if (digest_size == SHA512_DIGEST_SIZE) {
- error = crypto_shash_init(desc) ?:
- crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
- crypto_shash_export(desc, (void *)&sha512_st);
- memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
- } else {
- error = -EINVAL;
- pr_err("Unknown digest size %d\n", digest_size);
- }
- return error;
-}
-
-static void chcr_change_order(char *buf, int ds)
-{
- int i;
-
- if (ds == SHA512_DIGEST_SIZE) {
- for (i = 0; i < (ds / sizeof(u64)); i++)
- *((__be64 *)buf + i) =
- cpu_to_be64(*((u64 *)buf + i));
- } else {
- for (i = 0; i < (ds / sizeof(u32)); i++)
- *((__be32 *)buf + i) =
- cpu_to_be32(*((u32 *)buf + i));
- }
+ memzero_explicit(&k, sizeof(k));
+ return 0;
}
static inline int is_hmac(struct crypto_tfm *tfm)
@@ -1186,7 +1157,7 @@ static int chcr_handle_cipher_resp(struct skcipher_request *req,
else
bytes = rounddown(bytes, 16);
} else {
- /*CTR mode counter overfloa*/
+ /*CTR mode counter overflow*/
bytes = req->cryptlen - reqctx->processed;
}
err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
@@ -1547,11 +1518,6 @@ static int get_alg_config(struct algo_param *params,
return 0;
}
-static inline void chcr_free_shash(struct crypto_shash *base_hash)
-{
- crypto_free_shash(base_hash);
-}
-
/**
* create_hash_wr - Create hash work request
* @req: Cipher req base
@@ -2202,53 +2168,13 @@ static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
- unsigned int digestsize = crypto_ahash_digestsize(tfm);
- unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
- unsigned int i, err = 0, updated_digestsize;
-
- SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
/* use the key to calculate the ipad and opad. ipad will sent with the
* first request's data. opad will be sent with the final hash result
* ipad in hmacctx->ipad and opad in hmacctx->opad location
*/
- shash->tfm = hmacctx->base_hash;
- if (keylen > bs) {
- err = crypto_shash_digest(shash, key, keylen,
- hmacctx->ipad);
- if (err)
- goto out;
- keylen = digestsize;
- } else {
- memcpy(hmacctx->ipad, key, keylen);
- }
- memset(hmacctx->ipad + keylen, 0, bs - keylen);
- unsafe_memcpy(hmacctx->opad, hmacctx->ipad, bs,
- "fortified memcpy causes -Wrestrict warning");
-
- for (i = 0; i < bs / sizeof(int); i++) {
- *((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
- *((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
- }
-
- updated_digestsize = digestsize;
- if (digestsize == SHA224_DIGEST_SIZE)
- updated_digestsize = SHA256_DIGEST_SIZE;
- else if (digestsize == SHA384_DIGEST_SIZE)
- updated_digestsize = SHA512_DIGEST_SIZE;
- err = chcr_compute_partial_hash(shash, hmacctx->ipad,
- hmacctx->ipad, digestsize);
- if (err)
- goto out;
- chcr_change_order(hmacctx->ipad, updated_digestsize);
-
- err = chcr_compute_partial_hash(shash, hmacctx->opad,
- hmacctx->opad, digestsize);
- if (err)
- goto out;
- chcr_change_order(hmacctx->opad, updated_digestsize);
-out:
- return err;
+ return chcr_prepare_hmac_key(key, keylen, crypto_ahash_digestsize(tfm),
+ hmacctx->ipad, hmacctx->opad);
}
static int chcr_aes_xts_setkey(struct crypto_skcipher *cipher, const u8 *key,
@@ -2344,30 +2270,11 @@ static int chcr_hmac_init(struct ahash_request *areq)
static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
{
- struct chcr_context *ctx = crypto_tfm_ctx(tfm);
- struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
- unsigned int digestsize =
- crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
-
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct chcr_ahash_req_ctx));
- hmacctx->base_hash = chcr_alloc_shash(digestsize);
- if (IS_ERR(hmacctx->base_hash))
- return PTR_ERR(hmacctx->base_hash);
return chcr_device_init(crypto_tfm_ctx(tfm));
}
-static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
-{
- struct chcr_context *ctx = crypto_tfm_ctx(tfm);
- struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
-
- if (hmacctx->base_hash) {
- chcr_free_shash(hmacctx->base_hash);
- hmacctx->base_hash = NULL;
- }
-}
-
inline void chcr_aead_common_exit(struct aead_request *req)
{
struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
@@ -3557,15 +3464,12 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
/* it contains auth and cipher key both*/
struct crypto_authenc_keys keys;
- unsigned int bs, subtype;
+ unsigned int subtype;
unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
- int err = 0, i, key_ctx_len = 0;
+ int err = 0, key_ctx_len = 0;
unsigned char ck_size = 0;
- unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
- struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
struct algo_param param;
int align;
- u8 *o_ptr = NULL;
crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
@@ -3613,68 +3517,26 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
aeadctx->enckey_len << 3);
}
- base_hash = chcr_alloc_shash(max_authsize);
- if (IS_ERR(base_hash)) {
- pr_err("Base driver cannot be loaded\n");
+
+ align = KEYCTX_ALIGN_PAD(max_authsize);
+ err = chcr_prepare_hmac_key(keys.authkey, keys.authkeylen, max_authsize,
+ actx->h_iopad,
+ actx->h_iopad + param.result_size + align);
+ if (err)
goto out;
- }
- {
- SHASH_DESC_ON_STACK(shash, base_hash);
-
- shash->tfm = base_hash;
- bs = crypto_shash_blocksize(base_hash);
- align = KEYCTX_ALIGN_PAD(max_authsize);
- o_ptr = actx->h_iopad + param.result_size + align;
-
- if (keys.authkeylen > bs) {
- err = crypto_shash_digest(shash, keys.authkey,
- keys.authkeylen,
- o_ptr);
- if (err) {
- pr_err("Base driver cannot be loaded\n");
- goto out;
- }
- keys.authkeylen = max_authsize;
- } else
- memcpy(o_ptr, keys.authkey, keys.authkeylen);
-
- /* Compute the ipad-digest*/
- memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
- memcpy(pad, o_ptr, keys.authkeylen);
- for (i = 0; i < bs >> 2; i++)
- *((unsigned int *)pad + i) ^= IPAD_DATA;
-
- if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
- max_authsize))
- goto out;
- /* Compute the opad-digest */
- memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
- memcpy(pad, o_ptr, keys.authkeylen);
- for (i = 0; i < bs >> 2; i++)
- *((unsigned int *)pad + i) ^= OPAD_DATA;
- if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
- goto out;
+ key_ctx_len = sizeof(struct _key_ctx) + roundup(keys.enckeylen, 16) +
+ (param.result_size + align) * 2;
+ aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size, 0, 1,
+ key_ctx_len >> 4);
+ actx->auth_mode = param.auth_mode;
+
+ memzero_explicit(&keys, sizeof(keys));
+ return 0;
- /* convert the ipad and opad digest to network order */
- chcr_change_order(actx->h_iopad, param.result_size);
- chcr_change_order(o_ptr, param.result_size);
- key_ctx_len = sizeof(struct _key_ctx) +
- roundup(keys.enckeylen, 16) +
- (param.result_size + align) * 2;
- aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
- 0, 1, key_ctx_len >> 4);
- actx->auth_mode = param.auth_mode;
- chcr_free_shash(base_hash);
-
- memzero_explicit(&keys, sizeof(keys));
- return 0;
- }
out:
aeadctx->enckey_len = 0;
memzero_explicit(&keys, sizeof(keys));
- if (!IS_ERR(base_hash))
- chcr_free_shash(base_hash);
return -EINVAL;
}
@@ -4490,7 +4352,6 @@ static int chcr_register_alg(void)
if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
a_hash->halg.base.cra_init = chcr_hmac_cra_init;
- a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
a_hash->init = chcr_hmac_init;
a_hash->setkey = chcr_ahash_setkey;
a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
index 1d693b8436e6..e1e79e5f01e7 100644
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -241,7 +241,6 @@ struct chcr_aead_ctx {
};
struct hmac_ctx {
- struct crypto_shash *base_hash;
u8 ipad[CHCR_HASH_MAX_BLOCK_SIZE_128];
u8 opad[CHCR_HASH_MAX_BLOCK_SIZE_128];
};
diff --git a/drivers/crypto/exynos-rng.c b/drivers/crypto/exynos-rng.c
index 0dd8baf16cb4..2aaa98f9b44e 100644
--- a/drivers/crypto/exynos-rng.c
+++ b/drivers/crypto/exynos-rng.c
@@ -389,7 +389,7 @@ static struct platform_driver exynos_rng_driver = {
.of_match_table = exynos_rng_dt_match,
},
.probe = exynos_rng_probe,
- .remove_new = exynos_rng_remove,
+ .remove = exynos_rng_remove,
};
module_platform_driver(exynos_rng_driver);
diff --git a/drivers/crypto/gemini/sl3516-ce-core.c b/drivers/crypto/gemini/sl3516-ce-core.c
index 1d1a889599bb..f7e0e3fea15c 100644
--- a/drivers/crypto/gemini/sl3516-ce-core.c
+++ b/drivers/crypto/gemini/sl3516-ce-core.c
@@ -528,7 +528,7 @@ MODULE_DEVICE_TABLE(of, sl3516_ce_crypto_of_match_table);
static struct platform_driver sl3516_ce_driver = {
.probe = sl3516_ce_probe,
- .remove_new = sl3516_ce_remove,
+ .remove = sl3516_ce_remove,
.driver = {
.name = "sl3516-crypto",
.pm = &sl3516_ce_pm_ops,
diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c
index fa5a9f207bc9..d933f26aeb3a 100644
--- a/drivers/crypto/geode-aes.c
+++ b/drivers/crypto/geode-aes.c
@@ -433,4 +433,4 @@ module_pci_driver(geode_aes_driver);
MODULE_AUTHOR("Advanced Micro Devices, Inc.");
MODULE_DESCRIPTION("Geode LX Hardware AES driver");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(CRYPTO_INTERNAL);
+MODULE_IMPORT_NS("CRYPTO_INTERNAL");
diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig
index 4137a8bf131f..4835bdebdbb3 100644
--- a/drivers/crypto/hisilicon/Kconfig
+++ b/drivers/crypto/hisilicon/Kconfig
@@ -69,7 +69,6 @@ config CRYPTO_DEV_HISI_HPRE
select CRYPTO_DEV_HISI_QM
select CRYPTO_DH
select CRYPTO_RSA
- select CRYPTO_CURVE25519
select CRYPTO_ECDH
help
Support for HiSilicon HPRE(High Performance RSA Engine)
diff --git a/drivers/crypto/hisilicon/debugfs.c b/drivers/crypto/hisilicon/debugfs.c
index 1b9b7bccdeff..17eb236e9ee4 100644
--- a/drivers/crypto/hisilicon/debugfs.c
+++ b/drivers/crypto/hisilicon/debugfs.c
@@ -192,7 +192,7 @@ static int qm_sqc_dump(struct hisi_qm *qm, char *s, char *name)
down_read(&qm->qps_lock);
if (qm->sqc) {
- memcpy(&sqc, qm->sqc + qp_id * sizeof(struct qm_sqc), sizeof(struct qm_sqc));
+ memcpy(&sqc, qm->sqc + qp_id, sizeof(struct qm_sqc));
sqc.base_h = cpu_to_le32(QM_XQC_ADDR_MASK);
sqc.base_l = cpu_to_le32(QM_XQC_ADDR_MASK);
dump_show(qm, &sqc, sizeof(struct qm_sqc), "SOFT SQC");
@@ -229,7 +229,7 @@ static int qm_cqc_dump(struct hisi_qm *qm, char *s, char *name)
down_read(&qm->qps_lock);
if (qm->cqc) {
- memcpy(&cqc, qm->cqc + qp_id * sizeof(struct qm_cqc), sizeof(struct qm_cqc));
+ memcpy(&cqc, qm->cqc + qp_id, sizeof(struct qm_cqc));
cqc.base_h = cpu_to_le32(QM_XQC_ADDR_MASK);
cqc.base_l = cpu_to_le32(QM_XQC_ADDR_MASK);
dump_show(qm, &cqc, sizeof(struct qm_cqc), "SOFT CQC");
@@ -888,6 +888,7 @@ static int qm_diff_regs_init(struct hisi_qm *qm,
dfx_regs_uninit(qm, qm->debug.qm_diff_regs, ARRAY_SIZE(qm_diff_regs));
ret = PTR_ERR(qm->debug.acc_diff_regs);
qm->debug.acc_diff_regs = NULL;
+ qm->debug.qm_diff_regs = NULL;
return ret;
}
diff --git a/drivers/crypto/hisilicon/hpre/hpre.h b/drivers/crypto/hisilicon/hpre/hpre.h
index 9f0b94c8e03d..0f3ddbadbcf9 100644
--- a/drivers/crypto/hisilicon/hpre/hpre.h
+++ b/drivers/crypto/hisilicon/hpre/hpre.h
@@ -100,6 +100,29 @@ struct hpre_sqe {
__le32 rsvd1[_HPRE_SQE_ALIGN_EXT];
};
+enum hpre_cap_table_type {
+ QM_RAS_NFE_TYPE = 0x0,
+ QM_RAS_NFE_RESET,
+ QM_RAS_CE_TYPE,
+ HPRE_RAS_NFE_TYPE,
+ HPRE_RAS_NFE_RESET,
+ HPRE_RAS_CE_TYPE,
+ HPRE_CORE_INFO,
+ HPRE_CORE_EN,
+ HPRE_DRV_ALG_BITMAP,
+ HPRE_ALG_BITMAP,
+ HPRE_CORE1_BITMAP_CAP,
+ HPRE_CORE2_BITMAP_CAP,
+ HPRE_CORE3_BITMAP_CAP,
+ HPRE_CORE4_BITMAP_CAP,
+ HPRE_CORE5_BITMAP_CAP,
+ HPRE_CORE6_BITMAP_CAP,
+ HPRE_CORE7_BITMAP_CAP,
+ HPRE_CORE8_BITMAP_CAP,
+ HPRE_CORE9_BITMAP_CAP,
+ HPRE_CORE10_BITMAP_CAP,
+};
+
struct hisi_qp *hpre_create_qp(u8 type);
int hpre_algs_register(struct hisi_qm *qm);
void hpre_algs_unregister(struct hisi_qm *qm);
diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
index c167dbd6c7d6..21ccf879f70c 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 HiSilicon Limited. */
#include <crypto/akcipher.h>
-#include <crypto/curve25519.h>
#include <crypto/dh.h>
#include <crypto/ecc_curve.h>
#include <crypto/ecdh.h>
@@ -39,6 +38,8 @@ struct hpre_ctx;
#define HPRE_DFX_SEC_TO_US 1000000
#define HPRE_DFX_US_TO_NS 1000
+#define HPRE_ENABLE_HPCORE_SHIFT 7
+
/* due to nist p521 */
#define HPRE_ECC_MAX_KSZ 66
@@ -104,16 +105,6 @@ struct hpre_ecdh_ctx {
dma_addr_t dma_g;
};
-struct hpre_curve25519_ctx {
- /* low address: p->a->k */
- unsigned char *p;
- dma_addr_t dma_p;
-
- /* gx coordinate */
- unsigned char *g;
- dma_addr_t dma_g;
-};
-
struct hpre_ctx {
struct hisi_qp *qp;
struct device *dev;
@@ -127,10 +118,11 @@ struct hpre_ctx {
struct hpre_rsa_ctx rsa;
struct hpre_dh_ctx dh;
struct hpre_ecdh_ctx ecdh;
- struct hpre_curve25519_ctx curve25519;
};
/* for ecc algorithms */
unsigned int curve_id;
+ /* for high performance core */
+ u8 enable_hpcore;
};
struct hpre_asym_request {
@@ -142,7 +134,6 @@ struct hpre_asym_request {
struct akcipher_request *rsa;
struct kpp_request *dh;
struct kpp_request *ecdh;
- struct kpp_request *curve25519;
} areq;
int err;
int req_id;
@@ -1210,8 +1201,7 @@ static void hpre_key_to_big_end(u8 *data, int len)
}
}
-static void hpre_ecc_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all,
- bool is_ecdh)
+static void hpre_ecc_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
{
struct device *dev = ctx->dev;
unsigned int sz = ctx->key_sz;
@@ -1220,17 +1210,11 @@ static void hpre_ecc_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all,
if (is_clear_all)
hisi_qm_stop_qp(ctx->qp);
- if (is_ecdh && ctx->ecdh.p) {
+ if (ctx->ecdh.p) {
/* ecdh: p->a->k->b */
memzero_explicit(ctx->ecdh.p + shift, sz);
dma_free_coherent(dev, sz << 3, ctx->ecdh.p, ctx->ecdh.dma_p);
ctx->ecdh.p = NULL;
- } else if (!is_ecdh && ctx->curve25519.p) {
- /* curve25519: p->a->k */
- memzero_explicit(ctx->curve25519.p + shift, sz);
- dma_free_coherent(dev, sz << 2, ctx->curve25519.p,
- ctx->curve25519.dma_p);
- ctx->curve25519.p = NULL;
}
hpre_ctx_clear(ctx, is_clear_all);
@@ -1428,7 +1412,7 @@ static int hpre_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
return -EINVAL;
}
- hpre_ecc_clear_ctx(ctx, false, true);
+ hpre_ecc_clear_ctx(ctx, false);
ret = hpre_ecdh_set_param(ctx, &params);
if (ret < 0) {
@@ -1487,11 +1471,13 @@ static void hpre_ecdh_cb(struct hpre_ctx *ctx, void *resp)
if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
+ /* Do unmap before data processing */
+ hpre_ecdh_hw_data_clr_all(ctx, req, areq->dst, areq->src);
+
p = sg_virt(areq->dst);
memmove(p, p + ctx->key_sz - curve_sz, curve_sz);
memmove(p + curve_sz, p + areq->dst_len - curve_sz, curve_sz);
- hpre_ecdh_hw_data_clr_all(ctx, req, areq->dst, areq->src);
kpp_request_complete(areq, ret);
atomic64_inc(&dfx[HPRE_RECV_CNT].value);
@@ -1619,6 +1605,8 @@ static int hpre_ecdh_compute_value(struct kpp_request *req)
}
msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_ECC_MUL);
+ msg->resv1 = ctx->enable_hpcore << HPRE_ENABLE_HPCORE_SHIFT;
+
ret = hpre_send(ctx, msg);
if (likely(!ret))
return -EINPROGRESS;
@@ -1653,6 +1641,7 @@ static int hpre_ecdh_nist_p256_init_tfm(struct crypto_kpp *tfm)
struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
ctx->curve_id = ECC_CURVE_NIST_P256;
+ ctx->enable_hpcore = 1;
kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd());
@@ -1674,340 +1663,10 @@ static void hpre_ecdh_exit_tfm(struct crypto_kpp *tfm)
{
struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
- hpre_ecc_clear_ctx(ctx, true, true);
-}
-
-static void hpre_curve25519_fill_curve(struct hpre_ctx *ctx, const void *buf,
- unsigned int len)
-{
- u8 secret[CURVE25519_KEY_SIZE] = { 0 };
- unsigned int sz = ctx->key_sz;
- const struct ecc_curve *curve;
- unsigned int shift = sz << 1;
- void *p;
-
- /*
- * The key from 'buf' is in little-endian, we should preprocess it as
- * the description in rfc7748: "k[0] &= 248, k[31] &= 127, k[31] |= 64",
- * then convert it to big endian. Only in this way, the result can be
- * the same as the software curve-25519 that exists in crypto.
- */
- memcpy(secret, buf, len);
- curve25519_clamp_secret(secret);
- hpre_key_to_big_end(secret, CURVE25519_KEY_SIZE);
-
- p = ctx->curve25519.p + sz - len;
-
- curve = ecc_get_curve25519();
-
- /* fill curve parameters */
- fill_curve_param(p, curve->p, len, curve->g.ndigits);
- fill_curve_param(p + sz, curve->a, len, curve->g.ndigits);
- memcpy(p + shift, secret, len);
- fill_curve_param(p + shift + sz, curve->g.x, len, curve->g.ndigits);
- memzero_explicit(secret, CURVE25519_KEY_SIZE);
-}
-
-static int hpre_curve25519_set_param(struct hpre_ctx *ctx, const void *buf,
- unsigned int len)
-{
- struct device *dev = ctx->dev;
- unsigned int sz = ctx->key_sz;
- unsigned int shift = sz << 1;
-
- /* p->a->k->gx */
- if (!ctx->curve25519.p) {
- ctx->curve25519.p = dma_alloc_coherent(dev, sz << 2,
- &ctx->curve25519.dma_p,
- GFP_KERNEL);
- if (!ctx->curve25519.p)
- return -ENOMEM;
- }
-
- ctx->curve25519.g = ctx->curve25519.p + shift + sz;
- ctx->curve25519.dma_g = ctx->curve25519.dma_p + shift + sz;
-
- hpre_curve25519_fill_curve(ctx, buf, len);
-
- return 0;
-}
-
-static int hpre_curve25519_set_secret(struct crypto_kpp *tfm, const void *buf,
- unsigned int len)
-{
- struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
- struct device *dev = ctx->dev;
- int ret = -EINVAL;
-
- if (len != CURVE25519_KEY_SIZE ||
- !crypto_memneq(buf, curve25519_null_point, CURVE25519_KEY_SIZE)) {
- dev_err(dev, "key is null or key len is not 32bytes!\n");
- return ret;
- }
-
- /* Free old secret if any */
- hpre_ecc_clear_ctx(ctx, false, false);
-
- ctx->key_sz = CURVE25519_KEY_SIZE;
- ret = hpre_curve25519_set_param(ctx, buf, CURVE25519_KEY_SIZE);
- if (ret) {
- dev_err(dev, "failed to set curve25519 param, ret = %d!\n", ret);
- hpre_ecc_clear_ctx(ctx, false, false);
- return ret;
- }
-
- return 0;
-}
-
-static void hpre_curve25519_hw_data_clr_all(struct hpre_ctx *ctx,
- struct hpre_asym_request *req,
- struct scatterlist *dst,
- struct scatterlist *src)
-{
- struct device *dev = ctx->dev;
- struct hpre_sqe *sqe = &req->req;
- dma_addr_t dma;
-
- dma = le64_to_cpu(sqe->in);
- if (unlikely(dma_mapping_error(dev, dma)))
- return;
-
- if (src && req->src)
- dma_free_coherent(dev, ctx->key_sz, req->src, dma);
-
- dma = le64_to_cpu(sqe->out);
- if (unlikely(dma_mapping_error(dev, dma)))
- return;
-
- if (req->dst)
- dma_free_coherent(dev, ctx->key_sz, req->dst, dma);
- if (dst)
- dma_unmap_single(dev, dma, ctx->key_sz, DMA_FROM_DEVICE);
-}
-
-static void hpre_curve25519_cb(struct hpre_ctx *ctx, void *resp)
-{
- struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
- struct hpre_asym_request *req = NULL;
- struct kpp_request *areq;
- u64 overtime_thrhld;
- int ret;
-
- ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
- areq = req->areq.curve25519;
- areq->dst_len = ctx->key_sz;
-
- overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
- if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
- atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
-
- hpre_key_to_big_end(sg_virt(areq->dst), CURVE25519_KEY_SIZE);
-
- hpre_curve25519_hw_data_clr_all(ctx, req, areq->dst, areq->src);
- kpp_request_complete(areq, ret);
-
- atomic64_inc(&dfx[HPRE_RECV_CNT].value);
-}
-
-static int hpre_curve25519_msg_request_set(struct hpre_ctx *ctx,
- struct kpp_request *req)
-{
- struct hpre_asym_request *h_req;
- struct hpre_sqe *msg;
- int req_id;
- void *tmp;
-
- if (unlikely(req->dst_len < ctx->key_sz)) {
- req->dst_len = ctx->key_sz;
- return -EINVAL;
- }
-
- tmp = kpp_request_ctx(req);
- h_req = PTR_ALIGN(tmp, hpre_align_sz());
- h_req->cb = hpre_curve25519_cb;
- h_req->areq.curve25519 = req;
- msg = &h_req->req;
- memset(msg, 0, sizeof(*msg));
- msg->in = cpu_to_le64(DMA_MAPPING_ERROR);
- msg->out = cpu_to_le64(DMA_MAPPING_ERROR);
- msg->key = cpu_to_le64(ctx->curve25519.dma_p);
-
- msg->dw0 |= cpu_to_le32(0x1U << HPRE_SQE_DONE_SHIFT);
- msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
- h_req->ctx = ctx;
-
- req_id = hpre_add_req_to_ctx(h_req);
- if (req_id < 0)
- return -EBUSY;
-
- msg->tag = cpu_to_le16((u16)req_id);
- return 0;
-}
-
-static void hpre_curve25519_src_modulo_p(u8 *ptr)
-{
- int i;
-
- for (i = 0; i < CURVE25519_KEY_SIZE - 1; i++)
- ptr[i] = 0;
-
- /* The modulus is ptr's last byte minus '0xed'(last byte of p) */
- ptr[i] -= 0xed;
-}
-
-static int hpre_curve25519_src_init(struct hpre_asym_request *hpre_req,
- struct scatterlist *data, unsigned int len)
-{
- struct hpre_sqe *msg = &hpre_req->req;
- struct hpre_ctx *ctx = hpre_req->ctx;
- struct device *dev = ctx->dev;
- u8 p[CURVE25519_KEY_SIZE] = { 0 };
- const struct ecc_curve *curve;
- dma_addr_t dma = 0;
- u8 *ptr;
-
- if (len != CURVE25519_KEY_SIZE) {
- dev_err(dev, "sourc_data len is not 32bytes, len = %u!\n", len);
- return -EINVAL;
- }
-
- ptr = dma_alloc_coherent(dev, ctx->key_sz, &dma, GFP_KERNEL);
- if (unlikely(!ptr))
- return -ENOMEM;
-
- scatterwalk_map_and_copy(ptr, data, 0, len, 0);
-
- if (!crypto_memneq(ptr, curve25519_null_point, CURVE25519_KEY_SIZE)) {
- dev_err(dev, "gx is null!\n");
- goto err;
- }
-
- /*
- * Src_data(gx) is in little-endian order, MSB in the final byte should
- * be masked as described in RFC7748, then transform it to big-endian
- * form, then hisi_hpre can use the data.
- */
- ptr[31] &= 0x7f;
- hpre_key_to_big_end(ptr, CURVE25519_KEY_SIZE);
-
- curve = ecc_get_curve25519();
-
- fill_curve_param(p, curve->p, CURVE25519_KEY_SIZE, curve->g.ndigits);
-
- /*
- * When src_data equals (2^255 - 19) ~ (2^255 - 1), it is out of p,
- * we get its modulus to p, and then use it.
- */
- if (memcmp(ptr, p, ctx->key_sz) == 0) {
- dev_err(dev, "gx is p!\n");
- goto err;
- } else if (memcmp(ptr, p, ctx->key_sz) > 0) {
- hpre_curve25519_src_modulo_p(ptr);
- }
-
- hpre_req->src = ptr;
- msg->in = cpu_to_le64(dma);
- return 0;
-
-err:
- dma_free_coherent(dev, ctx->key_sz, ptr, dma);
- return -EINVAL;
-}
-
-static int hpre_curve25519_dst_init(struct hpre_asym_request *hpre_req,
- struct scatterlist *data, unsigned int len)
-{
- struct hpre_sqe *msg = &hpre_req->req;
- struct hpre_ctx *ctx = hpre_req->ctx;
- struct device *dev = ctx->dev;
- dma_addr_t dma;
-
- if (!data || !sg_is_last(data) || len != ctx->key_sz) {
- dev_err(dev, "data or data length is illegal!\n");
- return -EINVAL;
- }
-
- hpre_req->dst = NULL;
- dma = dma_map_single(dev, sg_virt(data), len, DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(dev, dma))) {
- dev_err(dev, "dma map data err!\n");
- return -ENOMEM;
- }
-
- msg->out = cpu_to_le64(dma);
- return 0;
-}
-
-static int hpre_curve25519_compute_value(struct kpp_request *req)
-{
- struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
- struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
- struct device *dev = ctx->dev;
- void *tmp = kpp_request_ctx(req);
- struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, hpre_align_sz());
- struct hpre_sqe *msg = &hpre_req->req;
- int ret;
-
- ret = hpre_curve25519_msg_request_set(ctx, req);
- if (unlikely(ret)) {
- dev_err(dev, "failed to set curve25519 request, ret = %d!\n", ret);
- return ret;
- }
-
- if (req->src) {
- ret = hpre_curve25519_src_init(hpre_req, req->src, req->src_len);
- if (unlikely(ret)) {
- dev_err(dev, "failed to init src data, ret = %d!\n",
- ret);
- goto clear_all;
- }
- } else {
- msg->in = cpu_to_le64(ctx->curve25519.dma_g);
- }
-
- ret = hpre_curve25519_dst_init(hpre_req, req->dst, req->dst_len);
- if (unlikely(ret)) {
- dev_err(dev, "failed to init dst data, ret = %d!\n", ret);
- goto clear_all;
- }
-
- msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_CURVE25519_MUL);
- ret = hpre_send(ctx, msg);
- if (likely(!ret))
- return -EINPROGRESS;
-
-clear_all:
- hpre_rm_req_from_ctx(hpre_req);
- hpre_curve25519_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
- return ret;
-}
-
-static unsigned int hpre_curve25519_max_size(struct crypto_kpp *tfm)
-{
- struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
-
- return ctx->key_sz;
-}
-
-static int hpre_curve25519_init_tfm(struct crypto_kpp *tfm)
-{
- struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
-
- kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd());
-
- return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
-}
-
-static void hpre_curve25519_exit_tfm(struct crypto_kpp *tfm)
-{
- struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
-
- hpre_ecc_clear_ctx(ctx, true, false);
+ hpre_ecc_clear_ctx(ctx, true);
}
static struct akcipher_alg rsa = {
- .sign = hpre_rsa_dec,
- .verify = hpre_rsa_enc,
.encrypt = hpre_rsa_enc,
.decrypt = hpre_rsa_dec,
.set_pub_key = hpre_rsa_setpubkey,
@@ -2086,22 +1745,6 @@ static struct kpp_alg ecdh_curves[] = {
}
};
-static struct kpp_alg curve25519_alg = {
- .set_secret = hpre_curve25519_set_secret,
- .generate_public_key = hpre_curve25519_compute_value,
- .compute_shared_secret = hpre_curve25519_compute_value,
- .max_size = hpre_curve25519_max_size,
- .init = hpre_curve25519_init_tfm,
- .exit = hpre_curve25519_exit_tfm,
- .base = {
- .cra_ctxsize = sizeof(struct hpre_ctx),
- .cra_priority = HPRE_CRYPTO_ALG_PRI,
- .cra_name = "curve25519",
- .cra_driver_name = "hpre-curve25519",
- .cra_module = THIS_MODULE,
- },
-};
-
static int hpre_register_rsa(struct hisi_qm *qm)
{
int ret;
@@ -2183,28 +1826,6 @@ static void hpre_unregister_ecdh(struct hisi_qm *qm)
crypto_unregister_kpp(&ecdh_curves[i]);
}
-static int hpre_register_x25519(struct hisi_qm *qm)
-{
- int ret;
-
- if (!hpre_check_alg_support(qm, HPRE_DRV_X25519_MASK_CAP))
- return 0;
-
- ret = crypto_register_kpp(&curve25519_alg);
- if (ret)
- dev_err(&qm->pdev->dev, "failed to register x25519 (%d)!\n", ret);
-
- return ret;
-}
-
-static void hpre_unregister_x25519(struct hisi_qm *qm)
-{
- if (!hpre_check_alg_support(qm, HPRE_DRV_X25519_MASK_CAP))
- return;
-
- crypto_unregister_kpp(&curve25519_alg);
-}
-
int hpre_algs_register(struct hisi_qm *qm)
{
int ret = 0;
@@ -2227,17 +1848,11 @@ int hpre_algs_register(struct hisi_qm *qm)
if (ret)
goto unreg_dh;
- ret = hpre_register_x25519(qm);
- if (ret)
- goto unreg_ecdh;
-
hpre_available_devs++;
mutex_unlock(&hpre_algs_lock);
return ret;
-unreg_ecdh:
- hpre_unregister_ecdh(qm);
unreg_dh:
hpre_unregister_dh(qm);
unreg_rsa:
@@ -2253,7 +1868,6 @@ void hpre_algs_unregister(struct hisi_qm *qm)
if (--hpre_available_devs)
goto unlock;
- hpre_unregister_x25519(qm);
hpre_unregister_ecdh(qm);
hpre_unregister_dh(qm);
hpre_unregister_rsa(qm);
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index 6b536ad2ada5..b94fecd765ee 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -13,6 +13,7 @@
#include <linux/uacce.h>
#include "hpre.h"
+#define CAP_FILE_PERMISSION 0444
#define HPRE_CTRL_CNT_CLR_CE_BIT BIT(0)
#define HPRE_CTRL_CNT_CLR_CE 0x301000
#define HPRE_FSM_MAX_CNT 0x301008
@@ -38,6 +39,7 @@
#define HPRE_HAC_RAS_NFE_ENB 0x301414
#define HPRE_HAC_RAS_FE_ENB 0x301418
#define HPRE_HAC_INT_SET 0x301500
+#define HPRE_AXI_ERROR_MASK GENMASK(21, 10)
#define HPRE_RNG_TIMEOUT_NUM 0x301A34
#define HPRE_CORE_INT_ENABLE 0
#define HPRE_RDCHN_INI_ST 0x301a00
@@ -77,6 +79,11 @@
#define HPRE_PREFETCH_ENABLE (~(BIT(0) | BIT(30)))
#define HPRE_PREFETCH_DISABLE BIT(30)
#define HPRE_SVA_DISABLE_READY (BIT(4) | BIT(8))
+#define HPRE_SVA_PREFTCH_DFX4 0x301144
+#define HPRE_WAIT_SVA_READY 500000
+#define HPRE_READ_SVA_STATUS_TIMES 3
+#define HPRE_WAIT_US_MIN 10
+#define HPRE_WAIT_US_MAX 20
/* clock gate */
#define HPRE_CLKGATE_CTL 0x301a10
@@ -203,7 +210,7 @@ static const struct hisi_qm_cap_info hpre_basic_info[] = {
{HPRE_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0xBFFC3E},
{HPRE_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x22, 0xBFFC3E},
{HPRE_CE_MASK_CAP, 0x3138, 0, GENMASK(31, 0), 0x0, 0x1, 0x1},
- {HPRE_CLUSTER_NUM_CAP, 0x313c, 20, GENMASK(3, 0), 0x0, 0x4, 0x1},
+ {HPRE_CLUSTER_NUM_CAP, 0x313c, 20, GENMASK(3, 0), 0x0, 0x4, 0x1},
{HPRE_CORE_TYPE_NUM_CAP, 0x313c, 16, GENMASK(3, 0), 0x0, 0x2, 0x2},
{HPRE_CORE_NUM_CAP, 0x313c, 8, GENMASK(7, 0), 0x0, 0x8, 0xA},
{HPRE_CLUSTER_CORE_NUM_CAP, 0x313c, 0, GENMASK(7, 0), 0x0, 0x2, 0xA},
@@ -222,18 +229,27 @@ static const struct hisi_qm_cap_info hpre_basic_info[] = {
{HPRE_CORE10_ALG_BITMAP_CAP, 0x3170, 0, GENMASK(31, 0), 0x0, 0x10, 0x10}
};
-enum hpre_pre_store_cap_idx {
- HPRE_CLUSTER_NUM_CAP_IDX = 0x0,
- HPRE_CORE_ENABLE_BITMAP_CAP_IDX,
- HPRE_DRV_ALG_BITMAP_CAP_IDX,
- HPRE_DEV_ALG_BITMAP_CAP_IDX,
-};
-
-static const u32 hpre_pre_store_caps[] = {
- HPRE_CLUSTER_NUM_CAP,
- HPRE_CORE_ENABLE_BITMAP_CAP,
- HPRE_DRV_ALG_BITMAP_CAP,
- HPRE_DEV_ALG_BITMAP_CAP,
+static const struct hisi_qm_cap_query_info hpre_cap_query_info[] = {
+ {QM_RAS_NFE_TYPE, "QM_RAS_NFE_TYPE ", 0x3124, 0x0, 0x1C37, 0x7C37},
+ {QM_RAS_NFE_RESET, "QM_RAS_NFE_RESET ", 0x3128, 0x0, 0xC77, 0x6C77},
+ {QM_RAS_CE_TYPE, "QM_RAS_CE_TYPE ", 0x312C, 0x0, 0x8, 0x8},
+ {HPRE_RAS_NFE_TYPE, "HPRE_RAS_NFE_TYPE ", 0x3130, 0x0, 0x3FFFFE, 0x1FFFC3E},
+ {HPRE_RAS_NFE_RESET, "HPRE_RAS_NFE_RESET ", 0x3134, 0x0, 0x3FFFFE, 0xBFFC3E},
+ {HPRE_RAS_CE_TYPE, "HPRE_RAS_CE_TYPE ", 0x3138, 0x0, 0x1, 0x1},
+ {HPRE_CORE_INFO, "HPRE_CORE_INFO ", 0x313c, 0x0, 0x420802, 0x120A0A},
+ {HPRE_CORE_EN, "HPRE_CORE_EN ", 0x3140, 0x0, 0xF, 0x3FF},
+ {HPRE_DRV_ALG_BITMAP, "HPRE_DRV_ALG_BITMAP ", 0x3144, 0x0, 0x03, 0x27},
+ {HPRE_ALG_BITMAP, "HPRE_ALG_BITMAP ", 0x3148, 0x0, 0x03, 0x7F},
+ {HPRE_CORE1_BITMAP_CAP, "HPRE_CORE1_BITMAP_CAP ", 0x314c, 0x0, 0x7F, 0x7F},
+ {HPRE_CORE2_BITMAP_CAP, "HPRE_CORE2_BITMAP_CAP ", 0x3150, 0x0, 0x7F, 0x7F},
+ {HPRE_CORE3_BITMAP_CAP, "HPRE_CORE3_BITMAP_CAP ", 0x3154, 0x0, 0x7F, 0x7F},
+ {HPRE_CORE4_BITMAP_CAP, "HPRE_CORE4_BITMAP_CAP ", 0x3158, 0x0, 0x7F, 0x7F},
+ {HPRE_CORE5_BITMAP_CAP, "HPRE_CORE5_BITMAP_CAP ", 0x315c, 0x0, 0x7F, 0x7F},
+ {HPRE_CORE6_BITMAP_CAP, "HPRE_CORE6_BITMAP_CAP ", 0x3160, 0x0, 0x7F, 0x7F},
+ {HPRE_CORE7_BITMAP_CAP, "HPRE_CORE7_BITMAP_CAP ", 0x3164, 0x0, 0x7F, 0x7F},
+ {HPRE_CORE8_BITMAP_CAP, "HPRE_CORE8_BITMAP_CAP ", 0x3168, 0x0, 0x7F, 0x7F},
+ {HPRE_CORE9_BITMAP_CAP, "HPRE_CORE9_BITMAP_CAP ", 0x316c, 0x0, 0x10, 0x10},
+ {HPRE_CORE10_BITMAP_CAP, "HPRE_CORE10_BITMAP_CAP ", 0x3170, 0x0, 0x10, 0x10},
};
static const struct hpre_hw_error hpre_hw_errors[] = {
@@ -360,7 +376,7 @@ bool hpre_check_alg_support(struct hisi_qm *qm, u32 alg)
{
u32 cap_val;
- cap_val = qm->cap_tables.dev_cap_table[HPRE_DRV_ALG_BITMAP_CAP_IDX].cap_val;
+ cap_val = qm->cap_tables.dev_cap_table[HPRE_DRV_ALG_BITMAP].cap_val;
if (alg & cap_val)
return true;
@@ -415,7 +431,7 @@ static int pf_q_num_set(const char *val, const struct kernel_param *kp)
{
pf_q_num_flag = true;
- return q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_HPRE_PF);
+ return hisi_qm_q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_HPRE_PF);
}
static const struct kernel_param_ops hpre_pf_q_num_ops = {
@@ -456,6 +472,33 @@ struct hisi_qp *hpre_create_qp(u8 type)
return NULL;
}
+static int hpre_wait_sva_ready(struct hisi_qm *qm)
+{
+ u32 val, try_times = 0;
+ u8 count = 0;
+
+ /*
+ * Read the register value every 10-20us. If the value is 0 for three
+ * consecutive times, the SVA module is ready.
+ */
+ do {
+ val = readl(qm->io_base + HPRE_SVA_PREFTCH_DFX4);
+ if (val)
+ count = 0;
+ else if (++count == HPRE_READ_SVA_STATUS_TIMES)
+ break;
+
+ usleep_range(HPRE_WAIT_US_MIN, HPRE_WAIT_US_MAX);
+ } while (++try_times < HPRE_WAIT_SVA_READY);
+
+ if (try_times == HPRE_WAIT_SVA_READY) {
+ pci_err(qm->pdev, "failed to wait sva prefetch ready\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
static void hpre_config_pasid(struct hisi_qm *qm)
{
u32 val1, val2;
@@ -503,14 +546,17 @@ static int hpre_cfg_by_dsm(struct hisi_qm *qm)
static int hpre_set_cluster(struct hisi_qm *qm)
{
struct device *dev = &qm->pdev->dev;
- unsigned long offset;
u32 cluster_core_mask;
+ unsigned long offset;
+ u32 hpre_core_info;
u8 clusters_num;
u32 val = 0;
int ret, i;
- cluster_core_mask = qm->cap_tables.dev_cap_table[HPRE_CORE_ENABLE_BITMAP_CAP_IDX].cap_val;
- clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val;
+ cluster_core_mask = qm->cap_tables.dev_cap_table[HPRE_CORE_EN].cap_val;
+ hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val;
+ clusters_num = (hpre_core_info >> hpre_basic_info[HPRE_CLUSTER_NUM_CAP].shift) &
+ hpre_basic_info[HPRE_CLUSTER_NUM_CAP].mask;
for (i = 0; i < clusters_num; i++) {
offset = i * HPRE_CLSTR_ADDR_INTRVL;
@@ -550,7 +596,7 @@ static void disable_flr_of_bme(struct hisi_qm *qm)
writel(PEH_AXUSER_CFG_ENABLE, qm->io_base + QM_PEH_AXUSER_CFG_ENABLE);
}
-static void hpre_open_sva_prefetch(struct hisi_qm *qm)
+static void hpre_close_sva_prefetch(struct hisi_qm *qm)
{
u32 val;
int ret;
@@ -558,20 +604,21 @@ static void hpre_open_sva_prefetch(struct hisi_qm *qm)
if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
return;
- /* Enable prefetch */
val = readl_relaxed(qm->io_base + HPRE_PREFETCH_CFG);
- val &= HPRE_PREFETCH_ENABLE;
+ val |= HPRE_PREFETCH_DISABLE;
writel(val, qm->io_base + HPRE_PREFETCH_CFG);
- ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_PREFETCH_CFG,
- val, !(val & HPRE_PREFETCH_DISABLE),
+ ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_SVA_PREFTCH_DFX,
+ val, !(val & HPRE_SVA_DISABLE_READY),
HPRE_REG_RD_INTVRL_US,
HPRE_REG_RD_TMOUT_US);
if (ret)
- pci_err(qm->pdev, "failed to open sva prefetch\n");
+ pci_err(qm->pdev, "failed to close sva prefetch\n");
+
+ (void)hpre_wait_sva_ready(qm);
}
-static void hpre_close_sva_prefetch(struct hisi_qm *qm)
+static void hpre_open_sva_prefetch(struct hisi_qm *qm)
{
u32 val;
int ret;
@@ -579,20 +626,31 @@ static void hpre_close_sva_prefetch(struct hisi_qm *qm)
if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
return;
+ /* Enable prefetch */
val = readl_relaxed(qm->io_base + HPRE_PREFETCH_CFG);
- val |= HPRE_PREFETCH_DISABLE;
+ val &= HPRE_PREFETCH_ENABLE;
writel(val, qm->io_base + HPRE_PREFETCH_CFG);
- ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_SVA_PREFTCH_DFX,
- val, !(val & HPRE_SVA_DISABLE_READY),
+ ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_PREFETCH_CFG,
+ val, !(val & HPRE_PREFETCH_DISABLE),
HPRE_REG_RD_INTVRL_US,
HPRE_REG_RD_TMOUT_US);
+ if (ret) {
+ pci_err(qm->pdev, "failed to open sva prefetch\n");
+ hpre_close_sva_prefetch(qm);
+ return;
+ }
+
+ ret = hpre_wait_sva_ready(qm);
if (ret)
- pci_err(qm->pdev, "failed to close sva prefetch\n");
+ hpre_close_sva_prefetch(qm);
}
static void hpre_enable_clock_gate(struct hisi_qm *qm)
{
+ unsigned long offset;
+ u8 clusters_num, i;
+ u32 hpre_core_info;
u32 val;
if (qm->ver < QM_HW_V3)
@@ -606,17 +664,26 @@ static void hpre_enable_clock_gate(struct hisi_qm *qm)
val |= HPRE_PEH_CFG_AUTO_GATE_EN;
writel(val, qm->io_base + HPRE_PEH_CFG_AUTO_GATE);
- val = readl(qm->io_base + HPRE_CLUSTER_DYN_CTL);
- val |= HPRE_CLUSTER_DYN_CTL_EN;
- writel(val, qm->io_base + HPRE_CLUSTER_DYN_CTL);
-
- val = readl_relaxed(qm->io_base + HPRE_CORE_SHB_CFG);
- val |= HPRE_CORE_GATE_EN;
- writel(val, qm->io_base + HPRE_CORE_SHB_CFG);
+ hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val;
+ clusters_num = (hpre_core_info >> hpre_basic_info[HPRE_CLUSTER_NUM_CAP].shift) &
+ hpre_basic_info[HPRE_CLUSTER_NUM_CAP].mask;
+ for (i = 0; i < clusters_num; i++) {
+ offset = (unsigned long)i * HPRE_CLSTR_ADDR_INTRVL;
+ val = readl(qm->io_base + offset + HPRE_CLUSTER_DYN_CTL);
+ val |= HPRE_CLUSTER_DYN_CTL_EN;
+ writel(val, qm->io_base + offset + HPRE_CLUSTER_DYN_CTL);
+
+ val = readl(qm->io_base + offset + HPRE_CORE_SHB_CFG);
+ val |= HPRE_CORE_GATE_EN;
+ writel(val, qm->io_base + offset + HPRE_CORE_SHB_CFG);
+ }
}
static void hpre_disable_clock_gate(struct hisi_qm *qm)
{
+ unsigned long offset;
+ u8 clusters_num, i;
+ u32 hpre_core_info;
u32 val;
if (qm->ver < QM_HW_V3)
@@ -630,13 +697,19 @@ static void hpre_disable_clock_gate(struct hisi_qm *qm)
val &= ~HPRE_PEH_CFG_AUTO_GATE_EN;
writel(val, qm->io_base + HPRE_PEH_CFG_AUTO_GATE);
- val = readl(qm->io_base + HPRE_CLUSTER_DYN_CTL);
- val &= ~HPRE_CLUSTER_DYN_CTL_EN;
- writel(val, qm->io_base + HPRE_CLUSTER_DYN_CTL);
-
- val = readl_relaxed(qm->io_base + HPRE_CORE_SHB_CFG);
- val &= ~HPRE_CORE_GATE_EN;
- writel(val, qm->io_base + HPRE_CORE_SHB_CFG);
+ hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val;
+ clusters_num = (hpre_core_info >> hpre_basic_info[HPRE_CLUSTER_NUM_CAP].shift) &
+ hpre_basic_info[HPRE_CLUSTER_NUM_CAP].mask;
+ for (i = 0; i < clusters_num; i++) {
+ offset = (unsigned long)i * HPRE_CLSTR_ADDR_INTRVL;
+ val = readl(qm->io_base + offset + HPRE_CLUSTER_DYN_CTL);
+ val &= ~HPRE_CLUSTER_DYN_CTL_EN;
+ writel(val, qm->io_base + offset + HPRE_CLUSTER_DYN_CTL);
+
+ val = readl(qm->io_base + offset + HPRE_CORE_SHB_CFG);
+ val &= ~HPRE_CORE_GATE_EN;
+ writel(val, qm->io_base + offset + HPRE_CORE_SHB_CFG);
+ }
}
static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
@@ -690,6 +763,7 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
/* Config data buffer pasid needed by Kunpeng 920 */
hpre_config_pasid(qm);
+ hpre_open_sva_prefetch(qm);
hpre_enable_clock_gate(qm);
@@ -699,11 +773,14 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
static void hpre_cnt_regs_clear(struct hisi_qm *qm)
{
unsigned long offset;
+ u32 hpre_core_info;
u8 clusters_num;
int i;
/* clear clusterX/cluster_ctrl */
- clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val;
+ hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val;
+ clusters_num = (hpre_core_info >> hpre_basic_info[HPRE_CLUSTER_NUM_CAP].shift) &
+ hpre_basic_info[HPRE_CLUSTER_NUM_CAP].mask;
for (i = 0; i < clusters_num; i++) {
offset = HPRE_CLSTR_BASE + i * HPRE_CLSTR_ADDR_INTRVL;
writel(0x0, qm->io_base + offset + HPRE_CLUSTER_INQURY);
@@ -722,8 +799,7 @@ static void hpre_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
val1 = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
if (enable) {
val1 |= HPRE_AM_OOO_SHUTDOWN_ENABLE;
- val2 = hisi_qm_get_hw_info(qm, hpre_basic_info,
- HPRE_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+ val2 = qm->err_info.dev_err.shutdown_mask;
} else {
val1 &= ~HPRE_AM_OOO_SHUTDOWN_ENABLE;
val2 = 0x0;
@@ -737,38 +813,33 @@ static void hpre_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
static void hpre_hw_error_disable(struct hisi_qm *qm)
{
- u32 ce, nfe;
-
- ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CE_MASK_CAP, qm->cap_ver);
- nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);
+ struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err;
+ u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe;
/* disable hpre hw error interrupts */
- writel(ce | nfe | HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_INT_MASK);
+ writel(err_mask, qm->io_base + HPRE_INT_MASK);
/* disable HPRE block master OOO when nfe occurs on Kunpeng930 */
hpre_master_ooo_ctrl(qm, false);
}
static void hpre_hw_error_enable(struct hisi_qm *qm)
{
- u32 ce, nfe, err_en;
-
- ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CE_MASK_CAP, qm->cap_ver);
- nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);
+ struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err;
+ u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe;
/* clear HPRE hw error source if having */
- writel(ce | nfe | HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_HAC_SOURCE_INT);
+ writel(err_mask, qm->io_base + HPRE_HAC_SOURCE_INT);
/* configure error type */
- writel(ce, qm->io_base + HPRE_RAS_CE_ENB);
- writel(nfe, qm->io_base + HPRE_RAS_NFE_ENB);
- writel(HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_RAS_FE_ENB);
+ writel(dev_err->ce, qm->io_base + HPRE_RAS_CE_ENB);
+ writel(dev_err->nfe, qm->io_base + HPRE_RAS_NFE_ENB);
+ writel(dev_err->fe, qm->io_base + HPRE_RAS_FE_ENB);
/* enable HPRE block master OOO when nfe occurs on Kunpeng930 */
hpre_master_ooo_ctrl(qm, true);
/* enable hpre hw error interrupts */
- err_en = ce | nfe | HPRE_HAC_RAS_FE_ENABLE;
- writel(~err_en, qm->io_base + HPRE_INT_MASK);
+ writel(~err_mask, qm->io_base + HPRE_INT_MASK);
}
static inline struct hisi_qm *hpre_file_to_qm(struct hpre_debugfs_file *file)
@@ -995,10 +1066,13 @@ static int hpre_cluster_debugfs_init(struct hisi_qm *qm)
char buf[HPRE_DBGFS_VAL_MAX_LEN];
struct debugfs_regset32 *regset;
struct dentry *tmp_d;
+ u32 hpre_core_info;
u8 clusters_num;
int i, ret;
- clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val;
+ hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val;
+ clusters_num = (hpre_core_info >> hpre_basic_info[HPRE_CLUSTER_NUM_CAP].shift) &
+ hpre_basic_info[HPRE_CLUSTER_NUM_CAP].mask;
for (i = 0; i < clusters_num; i++) {
ret = snprintf(buf, HPRE_DBGFS_VAL_MAX_LEN, "cluster%d", i);
if (ret >= HPRE_DBGFS_VAL_MAX_LEN)
@@ -1041,6 +1115,26 @@ static int hpre_ctrl_debug_init(struct hisi_qm *qm)
return hpre_cluster_debugfs_init(qm);
}
+static int hpre_cap_regs_show(struct seq_file *s, void *unused)
+{
+ struct hisi_qm *qm = s->private;
+ u32 i, size;
+
+ size = qm->cap_tables.qm_cap_size;
+ for (i = 0; i < size; i++)
+ seq_printf(s, "%s= 0x%08x\n", qm->cap_tables.qm_cap_table[i].name,
+ qm->cap_tables.qm_cap_table[i].cap_val);
+
+ size = qm->cap_tables.dev_cap_size;
+ for (i = 0; i < size; i++)
+ seq_printf(s, "%s= 0x%08x\n", qm->cap_tables.dev_cap_table[i].name,
+ qm->cap_tables.dev_cap_table[i].cap_val);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(hpre_cap_regs);
+
static void hpre_dfx_debug_init(struct hisi_qm *qm)
{
struct dfx_diff_registers *hpre_regs = qm->debug.acc_diff_regs;
@@ -1059,6 +1153,9 @@ static void hpre_dfx_debug_init(struct hisi_qm *qm)
if (qm->fun_type == QM_HW_PF && hpre_regs)
debugfs_create_file("diff_regs", 0444, parent,
qm, &hpre_diff_regs_fops);
+
+ debugfs_create_file("cap_regs", CAP_FILE_PERMISSION,
+ qm->debug.debug_root, qm, &hpre_cap_regs_fops);
}
static int hpre_debugfs_init(struct hisi_qm *qm)
@@ -1106,26 +1203,33 @@ static int hpre_pre_store_cap_reg(struct hisi_qm *qm)
{
struct hisi_qm_cap_record *hpre_cap;
struct device *dev = &qm->pdev->dev;
+ u32 hpre_core_info;
+ u8 clusters_num;
size_t i, size;
- size = ARRAY_SIZE(hpre_pre_store_caps);
- hpre_cap = devm_kzalloc(dev, sizeof(*hpre_cap) * size, GFP_KERNEL);
+ size = ARRAY_SIZE(hpre_cap_query_info);
+ hpre_cap = devm_kcalloc(dev, size, sizeof(*hpre_cap), GFP_KERNEL);
if (!hpre_cap)
return -ENOMEM;
for (i = 0; i < size; i++) {
- hpre_cap[i].type = hpre_pre_store_caps[i];
- hpre_cap[i].cap_val = hisi_qm_get_hw_info(qm, hpre_basic_info,
- hpre_pre_store_caps[i], qm->cap_ver);
+ hpre_cap[i].type = hpre_cap_query_info[i].type;
+ hpre_cap[i].name = hpre_cap_query_info[i].name;
+ hpre_cap[i].cap_val = hisi_qm_get_cap_value(qm, hpre_cap_query_info,
+ i, qm->cap_ver);
}
- if (hpre_cap[HPRE_CLUSTER_NUM_CAP_IDX].cap_val > HPRE_CLUSTERS_NUM_MAX) {
+ hpre_core_info = hpre_cap[HPRE_CORE_INFO].cap_val;
+ clusters_num = (hpre_core_info >> hpre_basic_info[HPRE_CLUSTER_NUM_CAP].shift) &
+ hpre_basic_info[HPRE_CLUSTER_NUM_CAP].mask;
+ if (clusters_num > HPRE_CLUSTERS_NUM_MAX) {
dev_err(dev, "Device cluster num %u is out of range for driver supports %d!\n",
- hpre_cap[HPRE_CLUSTER_NUM_CAP_IDX].cap_val, HPRE_CLUSTERS_NUM_MAX);
+ clusters_num, HPRE_CLUSTERS_NUM_MAX);
return -EINVAL;
}
qm->cap_tables.dev_cap_table = hpre_cap;
+ qm->cap_tables.dev_cap_size = size;
return 0;
}
@@ -1142,7 +1246,6 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
qm->mode = uacce_mode;
qm->pdev = pdev;
- qm->ver = pdev->revision;
qm->sqe_size = HPRE_SQE_SIZE;
qm->dev_name = hpre_name;
@@ -1172,7 +1275,7 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
return ret;
}
- alg_msk = qm->cap_tables.dev_cap_table[HPRE_DEV_ALG_BITMAP_CAP_IDX].cap_val;
+ alg_msk = qm->cap_tables.dev_cap_table[HPRE_ALG_BITMAP].cap_val;
ret = hisi_qm_set_algs(qm, alg_msk, hpre_dev_algs, ARRAY_SIZE(hpre_dev_algs));
if (ret) {
pci_err(pdev, "Failed to set hpre algs!\n");
@@ -1188,10 +1291,13 @@ static int hpre_show_last_regs_init(struct hisi_qm *qm)
int com_dfx_regs_num = ARRAY_SIZE(hpre_com_dfx_regs);
struct qm_debug *debug = &qm->debug;
void __iomem *io_base;
+ u32 hpre_core_info;
u8 clusters_num;
int i, j, idx;
- clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val;
+ hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val;
+ clusters_num = (hpre_core_info >> hpre_basic_info[HPRE_CLUSTER_NUM_CAP].shift) &
+ hpre_basic_info[HPRE_CLUSTER_NUM_CAP].mask;
debug->last_words = kcalloc(cluster_dfx_regs_num * clusters_num +
com_dfx_regs_num, sizeof(unsigned int), GFP_KERNEL);
if (!debug->last_words)
@@ -1231,6 +1337,7 @@ static void hpre_show_last_dfx_regs(struct hisi_qm *qm)
struct qm_debug *debug = &qm->debug;
struct pci_dev *pdev = qm->pdev;
void __iomem *io_base;
+ u32 hpre_core_info;
u8 clusters_num;
int i, j, idx;
u32 val;
@@ -1246,7 +1353,9 @@ static void hpre_show_last_dfx_regs(struct hisi_qm *qm)
hpre_com_dfx_regs[i].name, debug->last_words[i], val);
}
- clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val;
+ hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val;
+ clusters_num = (hpre_core_info >> hpre_basic_info[HPRE_CLUSTER_NUM_CAP].shift) &
+ hpre_basic_info[HPRE_CLUSTER_NUM_CAP].mask;
for (i = 0; i < clusters_num; i++) {
io_base = qm->io_base + hpre_cluster_offsets[i];
for (j = 0; j < cluster_dfx_regs_num; j++) {
@@ -1280,11 +1389,23 @@ static u32 hpre_get_hw_err_status(struct hisi_qm *qm)
static void hpre_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
{
- u32 nfe;
-
writel(err_sts, qm->io_base + HPRE_HAC_SOURCE_INT);
- nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);
- writel(nfe, qm->io_base + HPRE_RAS_NFE_ENB);
+}
+
+static void hpre_disable_error_report(struct hisi_qm *qm, u32 err_type)
+{
+ u32 nfe_mask = qm->err_info.dev_err.nfe;
+
+ writel(nfe_mask & (~err_type), qm->io_base + HPRE_RAS_NFE_ENB);
+}
+
+static void hpre_enable_error_report(struct hisi_qm *qm)
+{
+ u32 nfe_mask = qm->err_info.dev_err.nfe;
+ u32 ce_mask = qm->err_info.dev_err.ce;
+
+ writel(nfe_mask, qm->io_base + HPRE_RAS_NFE_ENB);
+ writel(ce_mask, qm->io_base + HPRE_RAS_CE_ENB);
}
static void hpre_open_axi_master_ooo(struct hisi_qm *qm)
@@ -1298,22 +1419,92 @@ static void hpre_open_axi_master_ooo(struct hisi_qm *qm)
qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
}
+static enum acc_err_result hpre_get_err_result(struct hisi_qm *qm)
+{
+ u32 err_status;
+
+ err_status = hpre_get_hw_err_status(qm);
+ if (err_status) {
+ if (err_status & qm->err_info.dev_err.ecc_2bits_mask)
+ qm->err_status.is_dev_ecc_mbit = true;
+ hpre_log_hw_error(qm, err_status);
+
+ if (err_status & qm->err_info.dev_err.reset_mask) {
+ /* Disable the same error reporting until device is recovered. */
+ hpre_disable_error_report(qm, err_status);
+ return ACC_ERR_NEED_RESET;
+ }
+ hpre_clear_hw_err_status(qm, err_status);
+ /* Avoid firmware disable error report, re-enable. */
+ hpre_enable_error_report(qm);
+ }
+
+ return ACC_ERR_RECOVERED;
+}
+
+static bool hpre_dev_is_abnormal(struct hisi_qm *qm)
+{
+ u32 err_status;
+
+ err_status = hpre_get_hw_err_status(qm);
+ if (err_status & qm->err_info.dev_err.shutdown_mask)
+ return true;
+
+ return false;
+}
+
+static void hpre_disable_axi_error(struct hisi_qm *qm)
+{
+ struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err;
+ u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe;
+ u32 val;
+
+ val = ~(err_mask & (~HPRE_AXI_ERROR_MASK));
+ writel(val, qm->io_base + HPRE_INT_MASK);
+
+ if (qm->ver > QM_HW_V2)
+ writel(dev_err->shutdown_mask & (~HPRE_AXI_ERROR_MASK),
+ qm->io_base + HPRE_OOO_SHUTDOWN_SEL);
+}
+
+static void hpre_enable_axi_error(struct hisi_qm *qm)
+{
+ struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err;
+ u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe;
+
+ /* clear axi error source */
+ writel(HPRE_AXI_ERROR_MASK, qm->io_base + HPRE_HAC_SOURCE_INT);
+
+ writel(~err_mask, qm->io_base + HPRE_INT_MASK);
+
+ if (qm->ver > QM_HW_V2)
+ writel(dev_err->shutdown_mask, qm->io_base + HPRE_OOO_SHUTDOWN_SEL);
+}
+
static void hpre_err_info_init(struct hisi_qm *qm)
{
struct hisi_qm_err_info *err_info = &qm->err_info;
+ struct hisi_qm_err_mask *qm_err = &err_info->qm_err;
+ struct hisi_qm_err_mask *dev_err = &err_info->dev_err;
+
+ qm_err->fe = HPRE_HAC_RAS_FE_ENABLE;
+ qm_err->ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_QM_CE_MASK_CAP, qm->cap_ver);
+ qm_err->nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_QM_NFE_MASK_CAP, qm->cap_ver);
+ qm_err->shutdown_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
+ HPRE_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+ qm_err->reset_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
+ HPRE_QM_RESET_MASK_CAP, qm->cap_ver);
+ qm_err->ecc_2bits_mask = QM_ECC_MBIT;
+
+ dev_err->fe = HPRE_HAC_RAS_FE_ENABLE;
+ dev_err->ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CE_MASK_CAP, qm->cap_ver);
+ dev_err->nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);
+ dev_err->shutdown_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
+ HPRE_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+ dev_err->reset_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
+ HPRE_RESET_MASK_CAP, qm->cap_ver);
+ dev_err->ecc_2bits_mask = HPRE_CORE_ECC_2BIT_ERR | HPRE_OOO_ECC_2BIT_ERR;
- err_info->fe = HPRE_HAC_RAS_FE_ENABLE;
- err_info->ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_QM_CE_MASK_CAP, qm->cap_ver);
- err_info->nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_QM_NFE_MASK_CAP, qm->cap_ver);
- err_info->ecc_2bits_mask = HPRE_CORE_ECC_2BIT_ERR | HPRE_OOO_ECC_2BIT_ERR;
- err_info->dev_shutdown_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
- HPRE_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
- err_info->qm_shutdown_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
- HPRE_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
- err_info->qm_reset_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
- HPRE_QM_RESET_MASK_CAP, qm->cap_ver);
- err_info->dev_reset_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
- HPRE_RESET_MASK_CAP, qm->cap_ver);
err_info->msi_wr_port = HPRE_WR_MSI_PORT;
err_info->acpi_rst = "HRST";
}
@@ -1324,12 +1515,15 @@ static const struct hisi_qm_err_ini hpre_err_ini = {
.hw_err_disable = hpre_hw_error_disable,
.get_dev_hw_err_status = hpre_get_hw_err_status,
.clear_dev_hw_err_status = hpre_clear_hw_err_status,
- .log_dev_hw_err = hpre_log_hw_error,
.open_axi_master_ooo = hpre_open_axi_master_ooo,
.open_sva_prefetch = hpre_open_sva_prefetch,
.close_sva_prefetch = hpre_close_sva_prefetch,
.show_last_dfx_regs = hpre_show_last_dfx_regs,
.err_info_init = hpre_err_info_init,
+ .get_err_result = hpre_get_err_result,
+ .dev_is_abnormal = hpre_dev_is_abnormal,
+ .disable_axi_error = hpre_disable_axi_error,
+ .enable_axi_error = hpre_enable_axi_error,
};
static int hpre_pf_probe_init(struct hpre *hpre)
@@ -1341,8 +1535,6 @@ static int hpre_pf_probe_init(struct hpre *hpre)
if (ret)
return ret;
- hpre_open_sva_prefetch(qm);
-
hisi_qm_dev_err_init(qm);
ret = hpre_show_last_regs_init(qm);
if (ret)
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index 07983af9e3e2..a5b96adf2d1e 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -30,8 +30,6 @@
/* mailbox */
#define QM_MB_PING_ALL_VFS 0xffff
-#define QM_MB_CMD_DATA_SHIFT 32
-#define QM_MB_CMD_DATA_MASK GENMASK(31, 0)
#define QM_MB_STATUS_MASK GENMASK(12, 9)
/* sqc shift */
@@ -47,6 +45,8 @@
#define QM_SQ_TYPE_MASK GENMASK(3, 0)
#define QM_SQ_TAIL_IDX(sqc) ((le16_to_cpu((sqc).w11) >> 6) & 0x1)
+#define QM_SQC_DISABLE_QP (1U << 6)
+#define QM_XQC_RANDOM_DATA 0xaaaa
/* cqc shift */
#define QM_CQ_HOP_NUM_SHIFT 0
@@ -102,6 +102,8 @@
#define QM_PM_CTRL 0x100148
#define QM_IDLE_DISABLE BIT(9)
+#define QM_SUB_VERSION_ID 0x210
+
#define QM_VFT_CFG_DATA_L 0x100064
#define QM_VFT_CFG_DATA_H 0x100068
#define QM_SQC_VFT_BUF_SIZE (7ULL << 8)
@@ -119,6 +121,7 @@
#define QM_SQC_VFT_BASE_MASK_V2 GENMASK(15, 0)
#define QM_SQC_VFT_NUM_SHIFT_V2 45
#define QM_SQC_VFT_NUM_MASK_V2 GENMASK(9, 0)
+#define QM_MAX_QC_TYPE 2
#define QM_ABNORMAL_INT_SOURCE 0x100000
#define QM_ABNORMAL_INT_MASK 0x100004
@@ -144,9 +147,9 @@
#define QM_RAS_CE_TIMES_PER_IRQ 1
#define QM_OOO_SHUTDOWN_SEL 0x1040f8
#define QM_AXI_RRESP_ERR BIT(0)
-#define QM_ECC_MBIT BIT(2)
#define QM_DB_TIMEOUT BIT(10)
#define QM_OF_FIFO_OF BIT(11)
+#define QM_RAS_AXI_ERROR (BIT(0) | BIT(1) | BIT(12))
#define QM_RESET_WAIT_TIMEOUT 400
#define QM_PEH_VENDOR_ID 0x1000d8
@@ -162,7 +165,6 @@
#define ACC_MASTER_TRANS_RETURN 0x300150
#define ACC_MASTER_GLOBAL_CTRL 0x300000
#define ACC_AM_CFG_PORT_WR_EN 0x30001c
-#define QM_RAS_NFE_MBIT_DISABLE ~QM_ECC_MBIT
#define ACC_AM_ROB_ECC_INT_STS 0x300104
#define ACC_ROB_ECC_ERR_MULTPL BIT(1)
#define QM_MSI_CAP_ENABLE BIT(16)
@@ -176,6 +178,10 @@
#define QM_IFC_INT_MASK 0x0024
#define QM_IFC_INT_STATUS 0x0028
#define QM_IFC_INT_SET_V 0x002C
+#define QM_PF2VF_PF_W 0x104700
+#define QM_VF2PF_PF_R 0x104800
+#define QM_VF2PF_VF_W 0x320
+#define QM_PF2VF_VF_R 0x380
#define QM_IFC_SEND_ALL_VFS GENMASK(6, 0)
#define QM_IFC_INT_SOURCE_CLR GENMASK(63, 0)
#define QM_IFC_INT_SOURCE_MASK BIT(0)
@@ -185,8 +191,11 @@
#define QM_WAIT_DST_ACK 10
#define QM_MAX_PF_WAIT_COUNT 10
#define QM_MAX_VF_WAIT_COUNT 40
-#define QM_VF_RESET_WAIT_US 20000
-#define QM_VF_RESET_WAIT_CNT 3000
+#define QM_VF_RESET_WAIT_US 20000
+#define QM_VF_RESET_WAIT_CNT 3000
+#define QM_VF2PF_REG_SIZE 4
+#define QM_IFC_CMD_MASK GENMASK(31, 0)
+#define QM_IFC_DATA_SHIFT 32
#define QM_VF_RESET_WAIT_TIMEOUT_US \
(QM_VF_RESET_WAIT_US * QM_VF_RESET_WAIT_CNT)
@@ -234,8 +243,6 @@
#define QM_QOS_MAX_CIR_U 6
#define QM_AUTOSUSPEND_DELAY 3000
-#define QM_DEV_ALG_MAX_LEN 256
-
/* abnormal status value for stopping queue */
#define QM_STOP_QUEUE_FAIL 1
#define QM_DUMP_SQC_FAIL 3
@@ -271,18 +278,12 @@ enum vft_type {
SHAPER_VFT,
};
-enum acc_err_result {
- ACC_ERR_NONE,
- ACC_ERR_NEED_RESET,
- ACC_ERR_RECOVERED,
-};
-
enum qm_alg_type {
ALG_TYPE_0,
ALG_TYPE_1,
};
-enum qm_mb_cmd {
+enum qm_ifc_cmd {
QM_PF_FLR_PREPARE = 0x01,
QM_PF_SRST_PREPARE,
QM_PF_RESET_DONE,
@@ -307,11 +308,29 @@ enum qm_basic_type {
QM_VF_IRQ_NUM_CAP,
};
-enum qm_pre_store_cap_idx {
- QM_EQ_IRQ_TYPE_CAP_IDX = 0x0,
- QM_AEQ_IRQ_TYPE_CAP_IDX,
- QM_ABN_IRQ_TYPE_CAP_IDX,
- QM_PF2VF_IRQ_TYPE_CAP_IDX,
+enum qm_cap_table_type {
+ QM_CAP_VF = 0x0,
+ QM_AEQE_NUM,
+ QM_SCQE_NUM,
+ QM_EQ_IRQ,
+ QM_AEQ_IRQ,
+ QM_ABNORMAL_IRQ,
+ QM_MB_IRQ,
+ MAX_IRQ_NUM,
+ EXT_BAR_INDEX,
+};
+
+static const struct hisi_qm_cap_query_info qm_cap_query_info[] = {
+ {QM_CAP_VF, "QM_CAP_VF ", 0x3100, 0x0, 0x0, 0x6F01},
+ {QM_AEQE_NUM, "QM_AEQE_NUM ", 0x3104, 0x800, 0x4000800, 0x4000800},
+ {QM_SCQE_NUM, "QM_SCQE_NUM ",
+ 0x3108, 0x4000400, 0x4000400, 0x4000400},
+ {QM_EQ_IRQ, "QM_EQ_IRQ ", 0x310c, 0x10000, 0x10000, 0x10000},
+ {QM_AEQ_IRQ, "QM_AEQ_IRQ ", 0x3110, 0x0, 0x10001, 0x10001},
+ {QM_ABNORMAL_IRQ, "QM_ABNORMAL_IRQ ", 0x3114, 0x0, 0x10003, 0x10003},
+ {QM_MB_IRQ, "QM_MB_IRQ ", 0x3118, 0x0, 0x0, 0x10002},
+ {MAX_IRQ_NUM, "MAX_IRQ_NUM ", 0x311c, 0x10001, 0x40002, 0x40003},
+ {EXT_BAR_INDEX, "EXT_BAR_INDEX ", 0x3120, 0x0, 0x0, 0x14},
};
static const struct hisi_qm_cap_info qm_cap_info_comm[] = {
@@ -321,6 +340,7 @@ static const struct hisi_qm_cap_info qm_cap_info_comm[] = {
{QM_SUPPORT_STOP_FUNC, 0x3100, 0, BIT(10), 0x0, 0x0, 0x1},
{QM_SUPPORT_MB_COMMAND, 0x3100, 0, BIT(11), 0x0, 0x0, 0x1},
{QM_SUPPORT_SVA_PREFETCH, 0x3100, 0, BIT(14), 0x0, 0x0, 0x1},
+ {QM_SUPPORT_DAE, 0x3100, 0, BIT(15), 0x0, 0x0, 0x0},
};
static const struct hisi_qm_cap_info qm_cap_info_pf[] = {
@@ -344,13 +364,6 @@ static const struct hisi_qm_cap_info qm_basic_info[] = {
{QM_VF_IRQ_NUM_CAP, 0x311c, 0, GENMASK(15, 0), 0x1, 0x2, 0x3},
};
-static const u32 qm_pre_store_caps[] = {
- QM_EQ_IRQ_TYPE_CAP,
- QM_AEQ_IRQ_TYPE_CAP,
- QM_ABN_IRQ_TYPE_CAP,
- QM_PF2VF_IRQ_TYPE_CAP,
-};
-
struct qm_mailbox {
__le16 w0;
__le16 queue_num;
@@ -391,6 +404,11 @@ struct hisi_qm_hw_ops {
void (*hw_error_uninit)(struct hisi_qm *qm);
enum acc_err_result (*hw_error_handle)(struct hisi_qm *qm);
int (*set_msi)(struct hisi_qm *qm, bool set);
+
+ /* (u64)msg = (u32)data << 32 | (enum qm_ifc_cmd)cmd */
+ int (*set_ifc_begin)(struct hisi_qm *qm, enum qm_ifc_cmd cmd, u32 data, u32 fun_num);
+ void (*set_ifc_end)(struct hisi_qm *qm);
+ int (*get_ifc)(struct hisi_qm *qm, enum qm_ifc_cmd *cmd, u32 *data, u32 fun_num);
};
struct hisi_qm_hw_error {
@@ -451,6 +469,37 @@ static struct qm_typical_qos_table shaper_cbs_s[] = {
static void qm_irqs_unregister(struct hisi_qm *qm);
static int qm_reset_device(struct hisi_qm *qm);
+int hisi_qm_q_num_set(const char *val, const struct kernel_param *kp,
+ unsigned int device)
+{
+ struct pci_dev *pdev;
+ u32 n, q_num;
+ int ret;
+
+ if (!val)
+ return -EINVAL;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, device, NULL);
+ if (!pdev) {
+ q_num = min_t(u32, QM_QNUM_V1, QM_QNUM_V2);
+ pr_info("No device found currently, suppose queue number is %u\n",
+ q_num);
+ } else {
+ if (pdev->revision == QM_HW_V1)
+ q_num = QM_QNUM_V1;
+ else
+ q_num = QM_QNUM_V2;
+
+ pci_dev_put(pdev);
+ }
+
+ ret = kstrtou32(val, 10, &n);
+ if (ret || n < QM_MIN_QNUM || n > q_num)
+ return -EINVAL;
+
+ return param_set_int(val, kp);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_q_num_set);
static u32 qm_get_hw_error_status(struct hisi_qm *qm)
{
@@ -465,15 +514,20 @@ static u32 qm_get_dev_err_status(struct hisi_qm *qm)
/* Check if the error causes the master ooo block */
static bool qm_check_dev_error(struct hisi_qm *qm)
{
- u32 val, dev_val;
+ struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev));
+ u32 err_status;
- if (qm->fun_type == QM_HW_VF)
+ if (pf_qm->fun_type == QM_HW_VF)
return false;
- val = qm_get_hw_error_status(qm) & qm->err_info.qm_shutdown_mask;
- dev_val = qm_get_dev_err_status(qm) & qm->err_info.dev_shutdown_mask;
+ err_status = qm_get_hw_error_status(pf_qm);
+ if (err_status & pf_qm->err_info.qm_err.shutdown_mask)
+ return true;
- return val || dev_val;
+ if (pf_qm->err_ini->dev_is_abnormal)
+ return pf_qm->err_ini->dev_is_abnormal(pf_qm);
+
+ return false;
}
static int qm_wait_reset_finish(struct hisi_qm *qm)
@@ -618,7 +672,6 @@ EXPORT_SYMBOL_GPL(hisi_qm_mb);
/* op 0: set xqc information to hardware, 1: get xqc information from hardware. */
int qm_set_and_get_xqc(struct hisi_qm *qm, u8 cmd, void *xqc, u32 qp_id, bool op)
{
- struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev));
struct qm_mailbox mailbox;
dma_addr_t xqc_dma;
void *tmp_xqc;
@@ -652,7 +705,7 @@ int qm_set_and_get_xqc(struct hisi_qm *qm, u8 cmd, void *xqc, u32 qp_id, bool op
}
/* Setting xqc will fail if master OOO is blocked. */
- if (qm_check_dev_error(pf_qm)) {
+ if (qm_check_dev_error(qm)) {
dev_err(&qm->pdev->dev, "failed to send mailbox since qm is stop!\n");
return -EIO;
}
@@ -763,6 +816,27 @@ u32 hisi_qm_get_hw_info(struct hisi_qm *qm,
}
EXPORT_SYMBOL_GPL(hisi_qm_get_hw_info);
+u32 hisi_qm_get_cap_value(struct hisi_qm *qm,
+ const struct hisi_qm_cap_query_info *info_table,
+ u32 index, bool is_read)
+{
+ u32 val;
+
+ switch (qm->ver) {
+ case QM_HW_V1:
+ return info_table[index].v1_val;
+ case QM_HW_V2:
+ return info_table[index].v2_val;
+ default:
+ if (!is_read)
+ return info_table[index].v3_val;
+
+ val = readl(qm->io_base + info_table[index].offset);
+ return val;
+ }
+}
+EXPORT_SYMBOL_GPL(hisi_qm_get_cap_value);
+
static void qm_get_xqc_depth(struct hisi_qm *qm, u16 *low_bits,
u16 *high_bits, enum qm_basic_type type)
{
@@ -789,7 +863,7 @@ int hisi_qm_set_algs(struct hisi_qm *qm, u64 alg_msk, const struct qm_dev_alg *d
return -EINVAL;
}
- algs = devm_kzalloc(dev, QM_DEV_ALG_MAX_LEN * sizeof(char), GFP_KERNEL);
+ algs = devm_kzalloc(dev, QM_DEV_ALG_MAX_LEN, GFP_KERNEL);
if (!algs)
return -ENOMEM;
@@ -798,10 +872,10 @@ int hisi_qm_set_algs(struct hisi_qm *qm, u64 alg_msk, const struct qm_dev_alg *d
strcat(algs, dev_algs[i].alg);
ptr = strrchr(algs, '\n');
- if (ptr) {
+ if (ptr)
*ptr = '\0';
- qm->uacce->algs = algs;
- }
+
+ qm->uacce->algs = algs;
return 0;
}
@@ -839,7 +913,6 @@ static void qm_pm_put_sync(struct hisi_qm *qm)
if (!test_bit(QM_SUPPORT_RPM, &qm->caps))
return;
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
}
@@ -995,11 +1068,10 @@ static void qm_disable_qp(struct hisi_qm *qm, u32 qp_id)
static void qm_reset_function(struct hisi_qm *qm)
{
- struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev));
struct device *dev = &qm->pdev->dev;
int ret;
- if (qm_check_dev_error(pf_qm))
+ if (qm_check_dev_error(qm))
return;
ret = qm_reset_prepare_ready(qm);
@@ -1324,17 +1396,17 @@ static void qm_hw_error_init_v1(struct hisi_qm *qm)
static void qm_hw_error_cfg(struct hisi_qm *qm)
{
- struct hisi_qm_err_info *err_info = &qm->err_info;
+ struct hisi_qm_err_mask *qm_err = &qm->err_info.qm_err;
- qm->error_mask = err_info->nfe | err_info->ce | err_info->fe;
+ qm->error_mask = qm_err->nfe | qm_err->ce | qm_err->fe;
/* clear QM hw residual error source */
writel(qm->error_mask, qm->io_base + QM_ABNORMAL_INT_SOURCE);
/* configure error type */
- writel(err_info->ce, qm->io_base + QM_RAS_CE_ENABLE);
+ writel(qm_err->ce, qm->io_base + QM_RAS_CE_ENABLE);
writel(QM_RAS_CE_TIMES_PER_IRQ, qm->io_base + QM_RAS_CE_THRESHOLD);
- writel(err_info->nfe, qm->io_base + QM_RAS_NFE_ENABLE);
- writel(err_info->fe, qm->io_base + QM_RAS_FE_ENABLE);
+ writel(qm_err->nfe, qm->io_base + QM_RAS_NFE_ENABLE);
+ writel(qm_err->fe, qm->io_base + QM_RAS_FE_ENABLE);
}
static void qm_hw_error_init_v2(struct hisi_qm *qm)
@@ -1363,7 +1435,7 @@ static void qm_hw_error_init_v3(struct hisi_qm *qm)
qm_hw_error_cfg(qm);
/* enable close master ooo when hardware error happened */
- writel(qm->err_info.qm_shutdown_mask, qm->io_base + QM_OOO_SHUTDOWN_SEL);
+ writel(qm->err_info.qm_err.shutdown_mask, qm->io_base + QM_OOO_SHUTDOWN_SEL);
irq_unmask = ~qm->error_mask;
irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
@@ -1425,22 +1497,25 @@ static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status)
static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm)
{
- u32 error_status, tmp;
-
- /* read err sts */
- tmp = readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
- error_status = qm->error_mask & tmp;
+ struct hisi_qm_err_mask *qm_err = &qm->err_info.qm_err;
+ u32 error_status;
- if (error_status) {
+ error_status = qm_get_hw_error_status(qm);
+ if (error_status & qm->error_mask) {
if (error_status & QM_ECC_MBIT)
qm->err_status.is_qm_ecc_mbit = true;
qm_log_hw_error(qm, error_status);
- if (error_status & qm->err_info.qm_reset_mask)
+ if (error_status & qm_err->reset_mask) {
+ /* Disable the same error reporting until device is recovered. */
+ writel(qm_err->nfe & (~error_status), qm->io_base + QM_RAS_NFE_ENABLE);
return ACC_ERR_NEED_RESET;
+ }
+ /* Clear error source if not need reset. */
writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE);
- writel(qm->err_info.nfe, qm->io_base + QM_RAS_NFE_ENABLE);
+ writel(qm_err->nfe, qm->io_base + QM_RAS_NFE_ENABLE);
+ writel(qm_err->ce, qm->io_base + QM_RAS_CE_ENABLE);
}
return ACC_ERR_RECOVERED;
@@ -1480,17 +1555,15 @@ static void qm_clear_cmd_interrupt(struct hisi_qm *qm, u64 vf_mask)
static void qm_handle_vf_msg(struct hisi_qm *qm, u32 vf_id)
{
struct device *dev = &qm->pdev->dev;
- u32 cmd;
- u64 msg;
+ enum qm_ifc_cmd cmd;
int ret;
- ret = qm_get_mb_cmd(qm, &msg, vf_id);
+ ret = qm->ops->get_ifc(qm, &cmd, NULL, vf_id);
if (ret) {
- dev_err(dev, "failed to get msg from VF(%u)!\n", vf_id);
+ dev_err(dev, "failed to get command from VF(%u)!\n", vf_id);
return;
}
- cmd = msg & QM_MB_CMD_DATA_MASK;
switch (cmd) {
case QM_VF_PREPARE_FAIL:
dev_err(dev, "failed to stop VF(%u)!\n", vf_id);
@@ -1502,7 +1575,7 @@ static void qm_handle_vf_msg(struct hisi_qm *qm, u32 vf_id)
case QM_VF_START_DONE:
break;
default:
- dev_err(dev, "unsupported cmd %u sent by VF(%u)!\n", cmd, vf_id);
+ dev_err(dev, "unsupported command(0x%x) sent by VF(%u)!\n", cmd, vf_id);
break;
}
}
@@ -1570,17 +1643,14 @@ static void qm_trigger_pf_interrupt(struct hisi_qm *qm)
writel(val, qm->io_base + QM_IFC_INT_SET_V);
}
-static int qm_ping_single_vf(struct hisi_qm *qm, u64 cmd, u32 fun_num)
+static int qm_ping_single_vf(struct hisi_qm *qm, enum qm_ifc_cmd cmd, u32 data, u32 fun_num)
{
struct device *dev = &qm->pdev->dev;
- struct qm_mailbox mailbox;
int cnt = 0;
u64 val;
int ret;
- qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, fun_num, 0);
- mutex_lock(&qm->mailbox_lock);
- ret = qm_mb_nolock(qm, &mailbox);
+ ret = qm->ops->set_ifc_begin(qm, cmd, data, fun_num);
if (ret) {
dev_err(dev, "failed to send command to vf(%u)!\n", fun_num);
goto err_unlock;
@@ -1602,27 +1672,23 @@ static int qm_ping_single_vf(struct hisi_qm *qm, u64 cmd, u32 fun_num)
}
err_unlock:
- mutex_unlock(&qm->mailbox_lock);
+ qm->ops->set_ifc_end(qm);
return ret;
}
-static int qm_ping_all_vfs(struct hisi_qm *qm, u64 cmd)
+static int qm_ping_all_vfs(struct hisi_qm *qm, enum qm_ifc_cmd cmd)
{
struct device *dev = &qm->pdev->dev;
u32 vfs_num = qm->vfs_num;
- struct qm_mailbox mailbox;
u64 val = 0;
int cnt = 0;
int ret;
u32 i;
- qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, QM_MB_PING_ALL_VFS, 0);
- mutex_lock(&qm->mailbox_lock);
- /* PF sends command to all VFs by mailbox */
- ret = qm_mb_nolock(qm, &mailbox);
+ ret = qm->ops->set_ifc_begin(qm, cmd, 0, QM_MB_PING_ALL_VFS);
if (ret) {
- dev_err(dev, "failed to send command to VFs!\n");
- mutex_unlock(&qm->mailbox_lock);
+ dev_err(dev, "failed to send command(0x%x) to all vfs!\n", cmd);
+ qm->ops->set_ifc_end(qm);
return ret;
}
@@ -1632,7 +1698,7 @@ static int qm_ping_all_vfs(struct hisi_qm *qm, u64 cmd)
val = readq(qm->io_base + QM_IFC_READY_STATUS);
/* If all VFs acked, PF notifies VFs successfully. */
if (!(val & GENMASK(vfs_num, 1))) {
- mutex_unlock(&qm->mailbox_lock);
+ qm->ops->set_ifc_end(qm);
return 0;
}
@@ -1640,7 +1706,7 @@ static int qm_ping_all_vfs(struct hisi_qm *qm, u64 cmd)
break;
}
- mutex_unlock(&qm->mailbox_lock);
+ qm->ops->set_ifc_end(qm);
/* Check which vf respond timeout. */
for (i = 1; i <= vfs_num; i++) {
@@ -1651,18 +1717,15 @@ static int qm_ping_all_vfs(struct hisi_qm *qm, u64 cmd)
return -ETIMEDOUT;
}
-static int qm_ping_pf(struct hisi_qm *qm, u64 cmd)
+static int qm_ping_pf(struct hisi_qm *qm, enum qm_ifc_cmd cmd)
{
- struct qm_mailbox mailbox;
int cnt = 0;
u32 val;
int ret;
- qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, 0, 0);
- mutex_lock(&qm->mailbox_lock);
- ret = qm_mb_nolock(qm, &mailbox);
+ ret = qm->ops->set_ifc_begin(qm, cmd, 0, 0);
if (ret) {
- dev_err(&qm->pdev->dev, "failed to send command to PF!\n");
+ dev_err(&qm->pdev->dev, "failed to send command(0x%x) to PF!\n", cmd);
goto unlock;
}
@@ -1681,7 +1744,8 @@ static int qm_ping_pf(struct hisi_qm *qm, u64 cmd)
}
unlock:
- mutex_unlock(&qm->mailbox_lock);
+ qm->ops->set_ifc_end(qm);
+
return ret;
}
@@ -1782,6 +1846,94 @@ static int qm_set_msi_v3(struct hisi_qm *qm, bool set)
return ret;
}
+static int qm_set_ifc_begin_v3(struct hisi_qm *qm, enum qm_ifc_cmd cmd, u32 data, u32 fun_num)
+{
+ struct qm_mailbox mailbox;
+ u64 msg;
+
+ msg = cmd | (u64)data << QM_IFC_DATA_SHIFT;
+
+ qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, msg, fun_num, 0);
+ mutex_lock(&qm->mailbox_lock);
+ return qm_mb_nolock(qm, &mailbox);
+}
+
+static void qm_set_ifc_end_v3(struct hisi_qm *qm)
+{
+ mutex_unlock(&qm->mailbox_lock);
+}
+
+static int qm_get_ifc_v3(struct hisi_qm *qm, enum qm_ifc_cmd *cmd, u32 *data, u32 fun_num)
+{
+ u64 msg;
+ int ret;
+
+ ret = qm_get_mb_cmd(qm, &msg, fun_num);
+ if (ret)
+ return ret;
+
+ *cmd = msg & QM_IFC_CMD_MASK;
+
+ if (data)
+ *data = msg >> QM_IFC_DATA_SHIFT;
+
+ return 0;
+}
+
+static int qm_set_ifc_begin_v4(struct hisi_qm *qm, enum qm_ifc_cmd cmd, u32 data, u32 fun_num)
+{
+ uintptr_t offset;
+ u64 msg;
+
+ if (qm->fun_type == QM_HW_PF)
+ offset = QM_PF2VF_PF_W;
+ else
+ offset = QM_VF2PF_VF_W;
+
+ msg = cmd | (u64)data << QM_IFC_DATA_SHIFT;
+
+ mutex_lock(&qm->ifc_lock);
+ writeq(msg, qm->io_base + offset);
+
+ return 0;
+}
+
+static void qm_set_ifc_end_v4(struct hisi_qm *qm)
+{
+ mutex_unlock(&qm->ifc_lock);
+}
+
+static u64 qm_get_ifc_pf(struct hisi_qm *qm, u32 fun_num)
+{
+ uintptr_t offset;
+
+ offset = QM_VF2PF_PF_R + QM_VF2PF_REG_SIZE * fun_num;
+
+ return (u64)readl(qm->io_base + offset);
+}
+
+static u64 qm_get_ifc_vf(struct hisi_qm *qm)
+{
+ return readq(qm->io_base + QM_PF2VF_VF_R);
+}
+
+static int qm_get_ifc_v4(struct hisi_qm *qm, enum qm_ifc_cmd *cmd, u32 *data, u32 fun_num)
+{
+ u64 msg;
+
+ if (qm->fun_type == QM_HW_PF)
+ msg = qm_get_ifc_pf(qm, fun_num);
+ else
+ msg = qm_get_ifc_vf(qm);
+
+ *cmd = msg & QM_IFC_CMD_MASK;
+
+ if (data)
+ *data = msg >> QM_IFC_DATA_SHIFT;
+
+ return 0;
+}
+
static const struct hisi_qm_hw_ops qm_hw_ops_v1 = {
.qm_db = qm_db_v1,
.hw_error_init = qm_hw_error_init_v1,
@@ -1804,6 +1956,21 @@ static const struct hisi_qm_hw_ops qm_hw_ops_v3 = {
.hw_error_uninit = qm_hw_error_uninit_v3,
.hw_error_handle = qm_hw_error_handle_v2,
.set_msi = qm_set_msi_v3,
+ .set_ifc_begin = qm_set_ifc_begin_v3,
+ .set_ifc_end = qm_set_ifc_end_v3,
+ .get_ifc = qm_get_ifc_v3,
+};
+
+static const struct hisi_qm_hw_ops qm_hw_ops_v4 = {
+ .get_vft = qm_get_vft_v2,
+ .qm_db = qm_db_v2,
+ .hw_error_init = qm_hw_error_init_v3,
+ .hw_error_uninit = qm_hw_error_uninit_v3,
+ .hw_error_handle = qm_hw_error_handle_v2,
+ .set_msi = qm_set_msi_v3,
+ .set_ifc_begin = qm_set_ifc_begin_v4,
+ .set_ifc_end = qm_set_ifc_end_v4,
+ .get_ifc = qm_get_ifc_v4,
};
static void *qm_get_avail_sqe(struct hisi_qp *qp)
@@ -2096,12 +2263,11 @@ static int qm_wait_qp_empty(struct hisi_qm *qm, u32 *state, u32 qp_id)
static int qm_drain_qp(struct hisi_qp *qp)
{
struct hisi_qm *qm = qp->qm;
- struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev));
u32 state = 0;
int ret;
/* No need to judge if master OOO is blocked. */
- if (qm_check_dev_error(pf_qm))
+ if (qm_check_dev_error(qm))
return 0;
/* HW V3 supports drain qp by device */
@@ -2415,7 +2581,7 @@ static long hisi_qm_uacce_ioctl(struct uacce_queue *q, unsigned int cmd,
sizeof(struct hisi_qp_ctx)))
return -EFAULT;
- if (qp_ctx.qc_type != 0 && qp_ctx.qc_type != 1)
+ if (qp_ctx.qc_type > QM_MAX_QC_TYPE)
return -EINVAL;
qm_set_sqctype(q, qp_ctx.qc_type);
@@ -2577,6 +2743,27 @@ static void qm_remove_uacce(struct hisi_qm *qm)
}
}
+static void qm_uacce_api_ver_init(struct hisi_qm *qm)
+{
+ struct uacce_device *uacce = qm->uacce;
+
+ switch (qm->ver) {
+ case QM_HW_V1:
+ uacce->api_ver = HISI_QM_API_VER_BASE;
+ break;
+ case QM_HW_V2:
+ uacce->api_ver = HISI_QM_API_VER2_BASE;
+ break;
+ case QM_HW_V3:
+ case QM_HW_V4:
+ uacce->api_ver = HISI_QM_API_VER3_BASE;
+ break;
+ default:
+ uacce->api_ver = HISI_QM_API_VER5_BASE;
+ break;
+ }
+}
+
static int qm_alloc_uacce(struct hisi_qm *qm)
{
struct pci_dev *pdev = qm->pdev;
@@ -2611,13 +2798,6 @@ static int qm_alloc_uacce(struct hisi_qm *qm)
uacce->priv = qm;
if (qm->ver == QM_HW_V1)
- uacce->api_ver = HISI_QM_API_VER_BASE;
- else if (qm->ver == QM_HW_V2)
- uacce->api_ver = HISI_QM_API_VER2_BASE;
- else
- uacce->api_ver = HISI_QM_API_VER3_BASE;
-
- if (qm->ver == QM_HW_V1)
mmio_page_nr = QM_DOORBELL_PAGE_NR;
else if (!test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps))
mmio_page_nr = QM_DOORBELL_PAGE_NR +
@@ -2636,6 +2816,7 @@ static int qm_alloc_uacce(struct hisi_qm *qm)
uacce->qf_pg_num[UACCE_QFRT_DUS] = dus_page_nr;
qm->uacce = uacce;
+ qm_uacce_api_ver_init(qm);
INIT_LIST_HEAD(&qm->isolate_data.qm_hw_errs);
mutex_init(&qm->isolate_data.isolate_lock);
@@ -2783,11 +2964,14 @@ static void hisi_qm_pre_init(struct hisi_qm *qm)
qm->ops = &qm_hw_ops_v1;
else if (qm->ver == QM_HW_V2)
qm->ops = &qm_hw_ops_v2;
- else
+ else if (qm->ver == QM_HW_V3)
qm->ops = &qm_hw_ops_v3;
+ else
+ qm->ops = &qm_hw_ops_v4;
pci_set_drvdata(pdev, qm);
mutex_init(&qm->mailbox_lock);
+ mutex_init(&qm->ifc_lock);
init_rwsem(&qm->qps_lock);
qm->qp_in_used = 0;
if (test_bit(QM_SUPPORT_RPM, &qm->caps)) {
@@ -3011,6 +3195,9 @@ static int qm_eq_aeq_ctx_cfg(struct hisi_qm *qm)
qm_init_eq_aeq_status(qm);
+ /* Before starting the dev, clear the memory and then configure to device using. */
+ memset(qm->qdma.va, 0, qm->qdma.size);
+
ret = qm_eq_ctx_cfg(qm);
if (ret) {
dev_err(dev, "Set eqc failed!\n");
@@ -3022,9 +3209,13 @@ static int qm_eq_aeq_ctx_cfg(struct hisi_qm *qm)
static int __hisi_qm_start(struct hisi_qm *qm)
{
+ struct device *dev = &qm->pdev->dev;
int ret;
- WARN_ON(!qm->qdma.va);
+ if (!qm->qdma.va) {
+ dev_err(dev, "qm qdma is NULL!\n");
+ return -EINVAL;
+ }
if (qm->fun_type == QM_HW_PF) {
ret = hisi_qm_set_vft(qm, 0, qm->qp_base, qm->qp_num);
@@ -3098,7 +3289,7 @@ static int qm_restart(struct hisi_qm *qm)
for (i = 0; i < qm->qp_num; i++) {
qp = &qm->qp_array[i];
if (atomic_read(&qp->qp_status.flags) == QP_STOP &&
- qp->is_resetting == true) {
+ qp->is_resetting == true && qp->is_in_kernel == true) {
ret = qm_start_qp_nolock(qp, 0);
if (ret < 0) {
dev_err(dev, "Failed to start qp%d!\n", i);
@@ -3130,24 +3321,44 @@ static void qm_stop_started_qp(struct hisi_qm *qm)
}
/**
- * qm_clear_queues() - Clear all queues memory in a qm.
- * @qm: The qm in which the queues will be cleared.
+ * qm_invalid_queues() - invalid all queues in use.
+ * @qm: The qm in which the queues will be invalidated.
*
- * This function clears all queues memory in a qm. Reset of accelerator can
- * use this to clear queues.
+ * This function invalid all queues in use. If the doorbell command is sent
+ * to device in user space after the device is reset, the device discards
+ * the doorbell command.
*/
-static void qm_clear_queues(struct hisi_qm *qm)
+static void qm_invalid_queues(struct hisi_qm *qm)
{
struct hisi_qp *qp;
+ struct qm_sqc *sqc;
+ struct qm_cqc *cqc;
int i;
+ /*
+ * Normal stop queues is no longer used and does not need to be
+ * invalid queues.
+ */
+ if (qm->status.stop_reason == QM_NORMAL)
+ return;
+
+ if (qm->status.stop_reason == QM_DOWN)
+ hisi_qm_cache_wb(qm);
+
for (i = 0; i < qm->qp_num; i++) {
qp = &qm->qp_array[i];
- if (qp->is_in_kernel && qp->is_resetting)
+ if (!qp->is_resetting)
+ continue;
+
+ /* Modify random data and set sqc close bit to invalid queue. */
+ sqc = qm->sqc + i;
+ cqc = qm->cqc + i;
+ sqc->w8 = cpu_to_le16(QM_XQC_RANDOM_DATA);
+ sqc->w13 = cpu_to_le16(QM_SQC_DISABLE_QP);
+ cqc->w8 = cpu_to_le16(QM_XQC_RANDOM_DATA);
+ if (qp->is_in_kernel)
memset(qp->qdma.va, 0, qp->qdma.size);
}
-
- memset(qm->qdma.va, 0, qm->qdma.size);
}
/**
@@ -3204,7 +3415,7 @@ int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r)
}
}
- qm_clear_queues(qm);
+ qm_invalid_queues(qm);
qm->status.stop_reason = QM_NORMAL;
err_unlock:
@@ -3449,19 +3660,19 @@ static int qm_vf_q_assign(struct hisi_qm *qm, u32 num_vfs)
return 0;
}
-static int qm_clear_vft_config(struct hisi_qm *qm)
+static void qm_clear_vft_config(struct hisi_qm *qm)
{
- int ret;
u32 i;
- for (i = 1; i <= qm->vfs_num; i++) {
- ret = hisi_qm_set_vft(qm, i, 0, 0);
- if (ret)
- return ret;
- }
- qm->vfs_num = 0;
+ /*
+ * When disabling SR-IOV, clear the configuration of each VF in the hardware
+ * sequentially. Failure to clear a single VF should not affect the clearing
+ * operation of other VFs.
+ */
+ for (i = 1; i <= qm->vfs_num; i++)
+ (void)hisi_qm_set_vft(qm, i, 0, 0);
- return 0;
+ qm->vfs_num = 0;
}
static int qm_func_shaper_enable(struct hisi_qm *qm, u32 fun_index, u32 qos)
@@ -3547,7 +3758,6 @@ static u32 qm_get_shaper_vft_qos(struct hisi_qm *qm, u32 fun_index)
static void qm_vf_get_qos(struct hisi_qm *qm, u32 fun_num)
{
struct device *dev = &qm->pdev->dev;
- u64 mb_cmd;
u32 qos;
int ret;
@@ -3557,10 +3767,9 @@ static void qm_vf_get_qos(struct hisi_qm *qm, u32 fun_num)
return;
}
- mb_cmd = QM_PF_SET_QOS | (u64)qos << QM_MB_CMD_DATA_SHIFT;
- ret = qm_ping_single_vf(qm, mb_cmd, fun_num);
+ ret = qm_ping_single_vf(qm, QM_PF_SET_QOS, qos, fun_num);
if (ret)
- dev_err(dev, "failed to send cmd to VF(%u)!\n", fun_num);
+ dev_err(dev, "failed to send command(0x%x) to VF(%u)!\n", QM_PF_SET_QOS, fun_num);
}
static int qm_vf_read_qos(struct hisi_qm *qm)
@@ -3660,6 +3869,10 @@ static ssize_t qm_get_qos_value(struct hisi_qm *qm, const char *buf,
}
pdev = container_of(dev, struct pci_dev, dev);
+ if (pci_physfn(pdev) != qm->pdev) {
+ pci_err(qm->pdev, "the pdev input does not match the pf!\n");
+ return -EINVAL;
+ }
*fun_index = pdev->devfn;
@@ -3794,13 +4007,13 @@ int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs)
goto err_put_sync;
}
+ qm->vfs_num = num_vfs;
ret = pci_enable_sriov(pdev, num_vfs);
if (ret) {
pci_err(pdev, "Can't enable VF!\n");
qm_clear_vft_config(qm);
goto err_put_sync;
}
- qm->vfs_num = num_vfs;
pci_info(pdev, "VF enabled, vfs_num(=%d)!\n", num_vfs);
@@ -3835,11 +4048,10 @@ int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen)
}
pci_disable_sriov(pdev);
-
- qm->vfs_num = 0;
+ qm_clear_vft_config(qm);
qm_pm_put_sync(qm);
- return qm_clear_vft_config(qm);
+ return 0;
}
EXPORT_SYMBOL_GPL(hisi_qm_sriov_disable);
@@ -3861,30 +4073,12 @@ EXPORT_SYMBOL_GPL(hisi_qm_sriov_configure);
static enum acc_err_result qm_dev_err_handle(struct hisi_qm *qm)
{
- u32 err_sts;
-
- if (!qm->err_ini->get_dev_hw_err_status) {
- dev_err(&qm->pdev->dev, "Device doesn't support get hw error status!\n");
+ if (!qm->err_ini->get_err_result) {
+ dev_err(&qm->pdev->dev, "Device doesn't support reset!\n");
return ACC_ERR_NONE;
}
- /* get device hardware error status */
- err_sts = qm->err_ini->get_dev_hw_err_status(qm);
- if (err_sts) {
- if (err_sts & qm->err_info.ecc_2bits_mask)
- qm->err_status.is_dev_ecc_mbit = true;
-
- if (qm->err_ini->log_dev_hw_err)
- qm->err_ini->log_dev_hw_err(qm, err_sts);
-
- if (err_sts & qm->err_info.dev_reset_mask)
- return ACC_ERR_NEED_RESET;
-
- if (qm->err_ini->clear_dev_hw_err_status)
- qm->err_ini->clear_dev_hw_err_status(qm, err_sts);
- }
-
- return ACC_ERR_RECOVERED;
+ return qm->err_ini->get_err_result(qm);
}
static enum acc_err_result qm_process_dev_error(struct hisi_qm *qm)
@@ -4031,9 +4225,9 @@ static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm)
!qm->err_status.is_qm_ecc_mbit &&
!qm->err_ini->close_axi_master_ooo) {
nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE);
- writel(nfe_enb & QM_RAS_NFE_MBIT_DISABLE,
+ writel(nfe_enb & ~qm->err_info.qm_err.ecc_2bits_mask,
qm->io_base + QM_RAS_NFE_ENABLE);
- writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SET);
+ writel(qm->err_info.qm_err.ecc_2bits_mask, qm->io_base + QM_ABNORMAL_INT_SET);
}
}
@@ -4067,7 +4261,7 @@ stop_fail:
return ret;
}
-static int qm_try_stop_vfs(struct hisi_qm *qm, u64 cmd,
+static int qm_try_stop_vfs(struct hisi_qm *qm, enum qm_ifc_cmd cmd,
enum qm_stop_reason stop_reason)
{
struct pci_dev *pdev = qm->pdev;
@@ -4080,7 +4274,7 @@ static int qm_try_stop_vfs(struct hisi_qm *qm, u64 cmd,
if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) {
ret = qm_ping_all_vfs(qm, cmd);
if (ret)
- pci_err(pdev, "failed to send cmd to all VFs before PF reset!\n");
+ pci_err(pdev, "failed to send command to all VFs before PF reset!\n");
} else {
ret = qm_vf_reset_prepare(qm, stop_reason);
if (ret)
@@ -4095,6 +4289,12 @@ static int qm_controller_reset_prepare(struct hisi_qm *qm)
struct pci_dev *pdev = qm->pdev;
int ret;
+ if (qm->err_ini->set_priv_status) {
+ ret = qm->err_ini->set_priv_status(qm);
+ if (ret)
+ return ret;
+ }
+
ret = qm_reset_prepare_ready(qm);
if (ret) {
pci_err(pdev, "Controller reset not ready!\n");
@@ -4256,7 +4456,7 @@ restart_fail:
return ret;
}
-static int qm_try_start_vfs(struct hisi_qm *qm, enum qm_mb_cmd cmd)
+static int qm_try_start_vfs(struct hisi_qm *qm, enum qm_ifc_cmd cmd)
{
struct pci_dev *pdev = qm->pdev;
int ret;
@@ -4293,9 +4493,6 @@ static void qm_restart_prepare(struct hisi_qm *qm)
{
u32 value;
- if (qm->err_ini->open_sva_prefetch)
- qm->err_ini->open_sva_prefetch(qm);
-
if (qm->ver >= QM_HW_V3)
return;
@@ -4309,12 +4506,12 @@ static void qm_restart_prepare(struct hisi_qm *qm)
qm->io_base + ACC_AM_CFG_PORT_WR_EN);
/* clear dev ecc 2bit error source if having */
- value = qm_get_dev_err_status(qm) & qm->err_info.ecc_2bits_mask;
+ value = qm_get_dev_err_status(qm) & qm->err_info.dev_err.ecc_2bits_mask;
if (value && qm->err_ini->clear_dev_hw_err_status)
qm->err_ini->clear_dev_hw_err_status(qm, value);
/* clear QM ecc mbit error source */
- writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SOURCE);
+ writel(qm->err_info.qm_err.ecc_2bits_mask, qm->io_base + QM_ABNORMAL_INT_SOURCE);
/* clear AM Reorder Buffer ecc mbit source */
writel(ACC_ROB_ECC_ERR_MULTPL, qm->io_base + ACC_AM_ROB_ECC_INT_STS);
@@ -4341,6 +4538,34 @@ clear_flags:
qm->err_status.is_dev_ecc_mbit = false;
}
+static void qm_disable_axi_error(struct hisi_qm *qm)
+{
+ struct hisi_qm_err_mask *qm_err = &qm->err_info.qm_err;
+ u32 val;
+
+ val = ~(qm->error_mask & (~QM_RAS_AXI_ERROR));
+ writel(val, qm->io_base + QM_ABNORMAL_INT_MASK);
+ if (qm->ver > QM_HW_V2)
+ writel(qm_err->shutdown_mask & (~QM_RAS_AXI_ERROR),
+ qm->io_base + QM_OOO_SHUTDOWN_SEL);
+
+ if (qm->err_ini->disable_axi_error)
+ qm->err_ini->disable_axi_error(qm);
+}
+
+static void qm_enable_axi_error(struct hisi_qm *qm)
+{
+ /* clear axi error source */
+ writel(QM_RAS_AXI_ERROR, qm->io_base + QM_ABNORMAL_INT_SOURCE);
+
+ writel(~qm->error_mask, qm->io_base + QM_ABNORMAL_INT_MASK);
+ if (qm->ver > QM_HW_V2)
+ writel(qm->err_info.qm_err.shutdown_mask, qm->io_base + QM_OOO_SHUTDOWN_SEL);
+
+ if (qm->err_ini->enable_axi_error)
+ qm->err_ini->enable_axi_error(qm);
+}
+
static int qm_controller_reset_done(struct hisi_qm *qm)
{
struct pci_dev *pdev = qm->pdev;
@@ -4374,6 +4599,7 @@ static int qm_controller_reset_done(struct hisi_qm *qm)
qm_restart_prepare(qm);
hisi_qm_dev_err_init(qm);
+ qm_disable_axi_error(qm);
if (qm->err_ini->open_axi_master_ooo)
qm->err_ini->open_axi_master_ooo(qm);
@@ -4396,7 +4622,7 @@ static int qm_controller_reset_done(struct hisi_qm *qm)
ret = qm_wait_vf_prepare_finish(qm);
if (ret)
pci_err(pdev, "failed to start by vfs in soft reset!\n");
-
+ qm_enable_axi_error(qm);
qm_cmd_init(qm);
qm_restart_done(qm);
@@ -4485,7 +4711,7 @@ void hisi_qm_reset_prepare(struct pci_dev *pdev)
* Check whether there is an ECC mbit error, If it occurs, need to
* wait for soft reset to fix it.
*/
- while (qm_check_dev_error(pf_qm)) {
+ while (qm_check_dev_error(qm)) {
msleep(++delay);
if (delay > QM_RESET_WAIT_TIMEOUT)
return;
@@ -4577,6 +4803,15 @@ flr_done:
}
EXPORT_SYMBOL_GPL(hisi_qm_reset_done);
+static irqreturn_t qm_rsvd_irq(int irq, void *data)
+{
+ struct hisi_qm *qm = data;
+
+ dev_info(&qm->pdev->dev, "Reserved interrupt, ignore!\n");
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t qm_abnormal_irq(int irq, void *data)
{
struct hisi_qm *qm = data;
@@ -4606,8 +4841,6 @@ void hisi_qm_dev_shutdown(struct pci_dev *pdev)
ret = hisi_qm_stop(qm, QM_DOWN);
if (ret)
dev_err(&pdev->dev, "Fail to stop qm in shutdown!\n");
-
- hisi_qm_cache_wb(qm);
}
EXPORT_SYMBOL_GPL(hisi_qm_dev_shutdown);
@@ -4633,7 +4866,7 @@ static void hisi_qm_controller_reset(struct work_struct *rst_work)
static void qm_pf_reset_vf_prepare(struct hisi_qm *qm,
enum qm_stop_reason stop_reason)
{
- enum qm_mb_cmd cmd = QM_VF_PREPARE_DONE;
+ enum qm_ifc_cmd cmd = QM_VF_PREPARE_DONE;
struct pci_dev *pdev = qm->pdev;
int ret;
@@ -4667,7 +4900,7 @@ out:
static void qm_pf_reset_vf_done(struct hisi_qm *qm)
{
- enum qm_mb_cmd cmd = QM_VF_START_DONE;
+ enum qm_ifc_cmd cmd = QM_VF_START_DONE;
struct pci_dev *pdev = qm->pdev;
int ret;
@@ -4690,7 +4923,6 @@ static int qm_wait_pf_reset_finish(struct hisi_qm *qm)
{
struct device *dev = &qm->pdev->dev;
u32 val, cmd;
- u64 msg;
int ret;
/* Wait for reset to finish */
@@ -4707,16 +4939,15 @@ static int qm_wait_pf_reset_finish(struct hisi_qm *qm)
* Whether message is got successfully,
* VF needs to ack PF by clearing the interrupt.
*/
- ret = qm_get_mb_cmd(qm, &msg, 0);
+ ret = qm->ops->get_ifc(qm, &cmd, NULL, 0);
qm_clear_cmd_interrupt(qm, 0);
if (ret) {
- dev_err(dev, "failed to get msg from PF in reset done!\n");
+ dev_err(dev, "failed to get command from PF in reset done!\n");
return ret;
}
- cmd = msg & QM_MB_CMD_DATA_MASK;
if (cmd != QM_PF_RESET_DONE) {
- dev_err(dev, "the cmd(%u) is not reset done!\n", cmd);
+ dev_err(dev, "the command(0x%x) is not reset done!\n", cmd);
ret = -EINVAL;
}
@@ -4753,22 +4984,21 @@ err_get_status:
static void qm_handle_cmd_msg(struct hisi_qm *qm, u32 fun_num)
{
struct device *dev = &qm->pdev->dev;
- u64 msg;
- u32 cmd;
+ enum qm_ifc_cmd cmd;
+ u32 data;
int ret;
/*
* Get the msg from source by sending mailbox. Whether message is got
* successfully, destination needs to ack source by clearing the interrupt.
*/
- ret = qm_get_mb_cmd(qm, &msg, fun_num);
+ ret = qm->ops->get_ifc(qm, &cmd, &data, fun_num);
qm_clear_cmd_interrupt(qm, BIT(fun_num));
if (ret) {
- dev_err(dev, "failed to get msg from source!\n");
+ dev_err(dev, "failed to get command from source!\n");
return;
}
- cmd = msg & QM_MB_CMD_DATA_MASK;
switch (cmd) {
case QM_PF_FLR_PREPARE:
qm_pf_reset_vf_process(qm, QM_DOWN);
@@ -4780,10 +5010,10 @@ static void qm_handle_cmd_msg(struct hisi_qm *qm, u32 fun_num)
qm_vf_get_qos(qm, fun_num);
break;
case QM_PF_SET_QOS:
- qm->mb_qos = msg >> QM_MB_CMD_DATA_SHIFT;
+ qm->mb_qos = data;
break;
default:
- dev_err(dev, "unsupported cmd %u sent by function(%u)!\n", cmd, fun_num);
+ dev_err(dev, "unsupported command(0x%x) sent by function(%u)!\n", cmd, fun_num);
break;
}
}
@@ -4863,10 +5093,10 @@ static void qm_unregister_abnormal_irq(struct hisi_qm *qm)
struct pci_dev *pdev = qm->pdev;
u32 irq_vector, val;
- if (qm->fun_type == QM_HW_VF)
+ if (qm->fun_type == QM_HW_VF && qm->ver < QM_HW_V3)
return;
- val = qm->cap_tables.qm_cap_table[QM_ABN_IRQ_TYPE_CAP_IDX].cap_val;
+ val = qm->cap_tables.qm_cap_table[QM_ABNORMAL_IRQ].cap_val;
if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK))
return;
@@ -4880,17 +5110,28 @@ static int qm_register_abnormal_irq(struct hisi_qm *qm)
u32 irq_vector, val;
int ret;
- if (qm->fun_type == QM_HW_VF)
+ val = qm->cap_tables.qm_cap_table[QM_ABNORMAL_IRQ].cap_val;
+ if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK))
return 0;
+ irq_vector = val & QM_IRQ_VECTOR_MASK;
- val = qm->cap_tables.qm_cap_table[QM_ABN_IRQ_TYPE_CAP_IDX].cap_val;
- if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK))
+ /* For VF, this is a reserved interrupt in V3 version. */
+ if (qm->fun_type == QM_HW_VF) {
+ if (qm->ver < QM_HW_V3)
+ return 0;
+
+ ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_rsvd_irq,
+ IRQF_NO_AUTOEN, qm->dev_name, qm);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request reserved irq, ret = %d!\n", ret);
+ return ret;
+ }
return 0;
+ }
- irq_vector = val & QM_IRQ_VECTOR_MASK;
ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_abnormal_irq, 0, qm->dev_name, qm);
if (ret)
- dev_err(&qm->pdev->dev, "failed to request abnormal irq, ret = %d", ret);
+ dev_err(&qm->pdev->dev, "failed to request abnormal irq, ret = %d!\n", ret);
return ret;
}
@@ -4900,7 +5141,7 @@ static void qm_unregister_mb_cmd_irq(struct hisi_qm *qm)
struct pci_dev *pdev = qm->pdev;
u32 irq_vector, val;
- val = qm->cap_tables.qm_cap_table[QM_PF2VF_IRQ_TYPE_CAP_IDX].cap_val;
+ val = qm->cap_tables.qm_cap_table[QM_MB_IRQ].cap_val;
if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
return;
@@ -4914,7 +5155,7 @@ static int qm_register_mb_cmd_irq(struct hisi_qm *qm)
u32 irq_vector, val;
int ret;
- val = qm->cap_tables.qm_cap_table[QM_PF2VF_IRQ_TYPE_CAP_IDX].cap_val;
+ val = qm->cap_tables.qm_cap_table[QM_MB_IRQ].cap_val;
if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
return 0;
@@ -4931,7 +5172,7 @@ static void qm_unregister_aeq_irq(struct hisi_qm *qm)
struct pci_dev *pdev = qm->pdev;
u32 irq_vector, val;
- val = qm->cap_tables.qm_cap_table[QM_AEQ_IRQ_TYPE_CAP_IDX].cap_val;
+ val = qm->cap_tables.qm_cap_table[QM_AEQ_IRQ].cap_val;
if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
return;
@@ -4945,7 +5186,7 @@ static int qm_register_aeq_irq(struct hisi_qm *qm)
u32 irq_vector, val;
int ret;
- val = qm->cap_tables.qm_cap_table[QM_AEQ_IRQ_TYPE_CAP_IDX].cap_val;
+ val = qm->cap_tables.qm_cap_table[QM_AEQ_IRQ].cap_val;
if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
return 0;
@@ -4963,7 +5204,7 @@ static void qm_unregister_eq_irq(struct hisi_qm *qm)
struct pci_dev *pdev = qm->pdev;
u32 irq_vector, val;
- val = qm->cap_tables.qm_cap_table[QM_EQ_IRQ_TYPE_CAP_IDX].cap_val;
+ val = qm->cap_tables.qm_cap_table[QM_EQ_IRQ].cap_val;
if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
return;
@@ -4977,7 +5218,7 @@ static int qm_register_eq_irq(struct hisi_qm *qm)
u32 irq_vector, val;
int ret;
- val = qm->cap_tables.qm_cap_table[QM_EQ_IRQ_TYPE_CAP_IDX].cap_val;
+ val = qm->cap_tables.qm_cap_table[QM_EQ_IRQ].cap_val;
if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
return 0;
@@ -5065,24 +5306,26 @@ static int qm_get_qp_num(struct hisi_qm *qm)
return 0;
}
-static int qm_pre_store_irq_type_caps(struct hisi_qm *qm)
+static int qm_pre_store_caps(struct hisi_qm *qm)
{
struct hisi_qm_cap_record *qm_cap;
struct pci_dev *pdev = qm->pdev;
size_t i, size;
- size = ARRAY_SIZE(qm_pre_store_caps);
- qm_cap = devm_kzalloc(&pdev->dev, sizeof(*qm_cap) * size, GFP_KERNEL);
+ size = ARRAY_SIZE(qm_cap_query_info);
+ qm_cap = devm_kcalloc(&pdev->dev, sizeof(*qm_cap), size, GFP_KERNEL);
if (!qm_cap)
return -ENOMEM;
for (i = 0; i < size; i++) {
- qm_cap[i].type = qm_pre_store_caps[i];
- qm_cap[i].cap_val = hisi_qm_get_hw_info(qm, qm_basic_info,
- qm_pre_store_caps[i], qm->cap_ver);
+ qm_cap[i].type = qm_cap_query_info[i].type;
+ qm_cap[i].name = qm_cap_query_info[i].name;
+ qm_cap[i].cap_val = hisi_qm_get_cap_value(qm, qm_cap_query_info,
+ i, qm->cap_ver);
}
qm->cap_tables.qm_cap_table = qm_cap;
+ qm->cap_tables.qm_cap_size = size;
return 0;
}
@@ -5119,8 +5362,22 @@ static int qm_get_hw_caps(struct hisi_qm *qm)
set_bit(cap_info[i].type, &qm->caps);
}
- /* Fetch and save the value of irq type related capability registers */
- return qm_pre_store_irq_type_caps(qm);
+ /* Fetch and save the value of qm capability registers */
+ return qm_pre_store_caps(qm);
+}
+
+static void qm_get_version(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ u32 sub_version_id;
+
+ qm->ver = pdev->revision;
+
+ if (pdev->revision == QM_HW_V3) {
+ sub_version_id = readl(qm->io_base + QM_SUB_VERSION_ID);
+ if (sub_version_id)
+ qm->ver = sub_version_id;
+ }
}
static int qm_get_pci_res(struct hisi_qm *qm)
@@ -5142,6 +5399,8 @@ static int qm_get_pci_res(struct hisi_qm *qm)
goto err_request_mem_regions;
}
+ qm_get_version(qm);
+
ret = qm_get_hw_caps(qm);
if (ret)
goto err_ioremap;
@@ -5161,6 +5420,7 @@ static int qm_get_pci_res(struct hisi_qm *qm)
qm->db_interval = 0;
}
+ hisi_qm_pre_init(qm);
ret = qm_get_qp_num(qm);
if (ret)
goto err_db_ioremap;
@@ -5203,6 +5463,14 @@ static int qm_clear_device(struct hisi_qm *qm)
return ret;
}
+ if (qm->err_ini->set_priv_status) {
+ ret = qm->err_ini->set_priv_status(qm);
+ if (ret) {
+ writel(0x0, qm->io_base + ACC_MASTER_GLOBAL_CTRL);
+ return ret;
+ }
+ }
+
return qm_reset_device(qm);
}
@@ -5229,6 +5497,12 @@ static int hisi_qm_pci_init(struct hisi_qm *qm)
pci_set_master(pdev);
num_vec = qm_get_irq_num(qm);
+ if (!num_vec) {
+ dev_err(dev, "Device irq num is zero!\n");
+ ret = -EINVAL;
+ goto err_get_pci_res;
+ }
+ num_vec = roundup_pow_of_two(num_vec);
ret = pci_alloc_irq_vectors(pdev, num_vec, num_vec, PCI_IRQ_MSI);
if (ret < 0) {
dev_err(dev, "Failed to enable MSI vectors!\n");
@@ -5417,8 +5691,6 @@ int hisi_qm_init(struct hisi_qm *qm)
struct device *dev = &pdev->dev;
int ret;
- hisi_qm_pre_init(qm);
-
ret = hisi_qm_pci_init(qm);
if (ret)
return ret;
@@ -5554,6 +5826,12 @@ static int qm_prepare_for_suspend(struct hisi_qm *qm)
if (ret)
return ret;
+ if (qm->err_ini->set_priv_status) {
+ ret = qm->err_ini->set_priv_status(qm);
+ if (ret)
+ return ret;
+ }
+
ret = qm_set_pf_mse(qm, false);
if (ret)
pci_err(pdev, "failed to disable MSE before suspending!\n");
diff --git a/drivers/crypto/hisilicon/sec/sec_drv.c b/drivers/crypto/hisilicon/sec/sec_drv.c
index 9bafcc5aa404..129cb6faa0b7 100644
--- a/drivers/crypto/hisilicon/sec/sec_drv.c
+++ b/drivers/crypto/hisilicon/sec/sec_drv.c
@@ -922,7 +922,8 @@ static int sec_hw_init(struct sec_dev_info *info)
struct iommu_domain *domain;
u32 sec_ipv4_mask = 0;
u32 sec_ipv6_mask[10] = {};
- u32 i, ret;
+ int ret;
+ u32 i;
domain = iommu_get_domain_for_dev(info->dev);
@@ -1304,7 +1305,7 @@ MODULE_DEVICE_TABLE(acpi, sec_acpi_match);
static struct platform_driver sec_driver = {
.probe = sec_probe,
- .remove_new = sec_remove,
+ .remove = sec_remove,
.driver = {
.name = "hisi_sec_platform_driver",
.of_match_table = sec_match,
diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
index 410c83712e28..81d0beda93b2 100644
--- a/drivers/crypto/hisilicon/sec2/sec.h
+++ b/drivers/crypto/hisilicon/sec2/sec.h
@@ -7,6 +7,12 @@
#include <linux/hisi_acc_qm.h>
#include "sec_crypto.h"
+#define SEC_PBUF_SZ 512
+#define SEC_MAX_MAC_LEN 64
+#define SEC_IV_SIZE 24
+#define SEC_SGE_NR_NUM 4
+#define SEC_SGL_ALIGN_SIZE 64
+
/* Algorithm resource per hardware SEC queue */
struct sec_alg_res {
u8 *pbuf;
@@ -20,6 +26,40 @@ struct sec_alg_res {
u16 depth;
};
+struct sec_hw_sge {
+ dma_addr_t buf;
+ void *page_ctrl;
+ __le32 len;
+ __le32 pad;
+ __le32 pad0;
+ __le32 pad1;
+};
+
+struct sec_hw_sgl {
+ dma_addr_t next_dma;
+ __le16 entry_sum_in_chain;
+ __le16 entry_sum_in_sgl;
+ __le16 entry_length_in_sgl;
+ __le16 pad0;
+ __le64 pad1[5];
+ struct sec_hw_sgl *next;
+ struct sec_hw_sge sge_entries[SEC_SGE_NR_NUM];
+} __aligned(SEC_SGL_ALIGN_SIZE);
+
+struct sec_src_dst_buf {
+ struct sec_hw_sgl in;
+ struct sec_hw_sgl out;
+};
+
+struct sec_request_buf {
+ union {
+ struct sec_src_dst_buf data_buf;
+ __u8 pbuf[SEC_PBUF_SZ];
+ };
+ dma_addr_t in_dma;
+ dma_addr_t out_dma;
+};
+
/* Cipher request of SEC private */
struct sec_cipher_req {
struct hisi_acc_hw_sgl *c_out;
@@ -29,6 +69,7 @@ struct sec_cipher_req {
struct skcipher_request *sk_req;
u32 c_len;
bool encrypt;
+ __u8 c_ivin_buf[SEC_IV_SIZE];
};
struct sec_aead_req {
@@ -37,6 +78,13 @@ struct sec_aead_req {
u8 *a_ivin;
dma_addr_t a_ivin_dma;
struct aead_request *aead_req;
+ __u8 a_ivin_buf[SEC_IV_SIZE];
+ __u8 out_mac_buf[SEC_MAX_MAC_LEN];
+};
+
+struct sec_instance_backlog {
+ struct list_head list;
+ spinlock_t lock;
};
/* SEC request of Crypto */
@@ -55,15 +103,17 @@ struct sec_req {
dma_addr_t in_dma;
struct sec_cipher_req c_req;
struct sec_aead_req aead_req;
- struct list_head backlog_head;
+ struct crypto_async_request *base;
int err_type;
int req_id;
u32 flag;
- /* Status of the SEC request */
- bool fake_busy;
bool use_pbuf;
+
+ struct list_head list;
+ struct sec_instance_backlog *backlog;
+ struct sec_request_buf buf;
};
/**
@@ -90,9 +140,7 @@ struct sec_auth_ctx {
dma_addr_t a_key_dma;
u8 *a_key;
u8 a_key_len;
- u8 mac_len;
u8 a_alg;
- bool fallback;
struct crypto_shash *hash_tfm;
struct crypto_aead *fallback_aead_tfm;
};
@@ -121,9 +169,11 @@ struct sec_qp_ctx {
struct sec_alg_res *res;
struct sec_ctx *ctx;
spinlock_t req_lock;
- struct list_head backlog;
+ spinlock_t id_lock;
struct hisi_acc_sgl_pool *c_in_pool;
struct hisi_acc_sgl_pool *c_out_pool;
+ struct sec_instance_backlog backlog;
+ u16 send_head;
};
enum sec_alg_type {
@@ -141,9 +191,6 @@ struct sec_ctx {
/* Half queues for encipher, and half for decipher */
u32 hlf_q_num;
- /* Threshold for fake busy, trigger to return -EBUSY to user */
- u32 fake_req_limit;
-
/* Current cyclic index to select a queue for encipher */
atomic_t enc_qcyclic;
@@ -220,11 +267,27 @@ enum sec_cap_type {
SEC_CORE4_ALG_BITMAP_HIGH,
};
-enum sec_cap_reg_record_idx {
- SEC_DRV_ALG_BITMAP_LOW_IDX = 0x0,
- SEC_DRV_ALG_BITMAP_HIGH_IDX,
- SEC_DEV_ALG_BITMAP_LOW_IDX,
- SEC_DEV_ALG_BITMAP_HIGH_IDX,
+enum sec_cap_table_type {
+ QM_RAS_NFE_TYPE = 0x0,
+ QM_RAS_NFE_RESET,
+ QM_RAS_CE_TYPE,
+ SEC_RAS_NFE_TYPE,
+ SEC_RAS_NFE_RESET,
+ SEC_RAS_CE_TYPE,
+ SEC_CORE_INFO,
+ SEC_CORE_EN,
+ SEC_DRV_ALG_BITMAP_LOW_TB,
+ SEC_DRV_ALG_BITMAP_HIGH_TB,
+ SEC_ALG_BITMAP_LOW,
+ SEC_ALG_BITMAP_HIGH,
+ SEC_CORE1_BITMAP_LOW,
+ SEC_CORE1_BITMAP_HIGH,
+ SEC_CORE2_BITMAP_LOW,
+ SEC_CORE2_BITMAP_HIGH,
+ SEC_CORE3_BITMAP_LOW,
+ SEC_CORE3_BITMAP_HIGH,
+ SEC_CORE4_BITMAP_LOW,
+ SEC_CORE4_BITMAP_HIGH,
};
void sec_destroy_qps(struct hisi_qp **qps, int qp_num);
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index 0558f98e221f..31590d01139a 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -57,7 +57,6 @@
#define SEC_TYPE_MASK 0x0F
#define SEC_DONE_MASK 0x0001
#define SEC_ICV_MASK 0x000E
-#define SEC_SQE_LEN_RATE_MASK 0x3
#define SEC_TOTAL_IV_SZ(depth) (SEC_IV_SIZE * (depth))
#define SEC_SGL_SGE_NR 128
@@ -68,7 +67,6 @@
#define SEC_MAX_CCM_AAD_LEN 65279
#define SEC_TOTAL_MAC_SZ(depth) (SEC_MAX_MAC_LEN * (depth))
-#define SEC_PBUF_SZ 512
#define SEC_PBUF_IV_OFFSET SEC_PBUF_SZ
#define SEC_PBUF_MAC_OFFSET (SEC_PBUF_SZ + SEC_IV_SIZE)
#define SEC_PBUF_PKG (SEC_PBUF_SZ + SEC_IV_SIZE + \
@@ -80,16 +78,16 @@
#define SEC_TOTAL_PBUF_SZ(depth) (PAGE_SIZE * SEC_PBUF_PAGE_NUM(depth) + \
SEC_PBUF_LEFT_SZ(depth))
-#define SEC_SQE_LEN_RATE 4
#define SEC_SQE_CFLAG 2
#define SEC_SQE_AEAD_FLAG 3
#define SEC_SQE_DONE 0x1
#define SEC_ICV_ERR 0x2
-#define MIN_MAC_LEN 4
#define MAC_LEN_MASK 0x1U
#define MAX_INPUT_DATA_LEN 0xFFFE00
#define BITS_MASK 0xFF
+#define WORD_MASK 0x3
#define BYTE_BITS 0x8
+#define BYTES_TO_WORDS(bcount) ((bcount) >> 2)
#define SEC_XTS_NAME_SZ 0x3
#define IV_CM_CAL_NUM 2
#define IV_CL_MASK 0x7
@@ -103,6 +101,8 @@
#define IV_LAST_BYTE_MASK 0xFF
#define IV_CTR_INIT 0x1
#define IV_BYTE_OFFSET 0x8
+#define SEC_GCM_MIN_AUTH_SZ 0x8
+#define SEC_RETRY_MAX_CNT 5U
static DEFINE_MUTEX(sec_algs_lock);
static unsigned int sec_available_devs;
@@ -117,40 +117,19 @@ struct sec_aead {
struct aead_alg alg;
};
-/* Get an en/de-cipher queue cyclically to balance load over queues of TFM */
-static inline u32 sec_alloc_queue_id(struct sec_ctx *ctx, struct sec_req *req)
-{
- if (req->c_req.encrypt)
- return (u32)atomic_inc_return(&ctx->enc_qcyclic) %
- ctx->hlf_q_num;
-
- return (u32)atomic_inc_return(&ctx->dec_qcyclic) % ctx->hlf_q_num +
- ctx->hlf_q_num;
-}
-
-static inline void sec_free_queue_id(struct sec_ctx *ctx, struct sec_req *req)
-{
- if (req->c_req.encrypt)
- atomic_dec(&ctx->enc_qcyclic);
- else
- atomic_dec(&ctx->dec_qcyclic);
-}
+static int sec_aead_soft_crypto(struct sec_ctx *ctx,
+ struct aead_request *aead_req,
+ bool encrypt);
+static int sec_skcipher_soft_crypto(struct sec_ctx *ctx,
+ struct skcipher_request *sreq, bool encrypt);
static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx)
{
int req_id;
- spin_lock_bh(&qp_ctx->req_lock);
+ spin_lock_bh(&qp_ctx->id_lock);
req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL, 0, qp_ctx->qp->sq_depth, GFP_ATOMIC);
- spin_unlock_bh(&qp_ctx->req_lock);
- if (unlikely(req_id < 0)) {
- dev_err(req->ctx->dev, "alloc req id fail!\n");
- return req_id;
- }
-
- req->qp_ctx = qp_ctx;
- qp_ctx->req_list[req_id] = req;
-
+ spin_unlock_bh(&qp_ctx->id_lock);
return req_id;
}
@@ -164,12 +143,9 @@ static void sec_free_req_id(struct sec_req *req)
return;
}
- qp_ctx->req_list[req_id] = NULL;
- req->qp_ctx = NULL;
-
- spin_lock_bh(&qp_ctx->req_lock);
+ spin_lock_bh(&qp_ctx->id_lock);
idr_remove(&qp_ctx->req_idr, req_id);
- spin_unlock_bh(&qp_ctx->req_lock);
+ spin_unlock_bh(&qp_ctx->id_lock);
}
static u8 pre_parse_finished_bd(struct bd_status *status, void *resp)
@@ -230,6 +206,90 @@ static int sec_cb_status_check(struct sec_req *req,
return 0;
}
+static int qp_send_message(struct sec_req *req)
+{
+ struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+ int ret;
+
+ if (atomic_read(&qp_ctx->qp->qp_status.used) == qp_ctx->qp->sq_depth - 1)
+ return -EBUSY;
+
+ spin_lock_bh(&qp_ctx->req_lock);
+ if (atomic_read(&qp_ctx->qp->qp_status.used) == qp_ctx->qp->sq_depth - 1) {
+ spin_unlock_bh(&qp_ctx->req_lock);
+ return -EBUSY;
+ }
+
+ if (qp_ctx->ctx->type_supported == SEC_BD_TYPE2) {
+ req->sec_sqe.type2.tag = cpu_to_le16((u16)qp_ctx->send_head);
+ qp_ctx->req_list[qp_ctx->send_head] = req;
+ }
+
+ ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe);
+ if (ret) {
+ spin_unlock_bh(&qp_ctx->req_lock);
+ return ret;
+ }
+ if (qp_ctx->ctx->type_supported == SEC_BD_TYPE2)
+ qp_ctx->send_head = (qp_ctx->send_head + 1) % qp_ctx->qp->sq_depth;
+
+ spin_unlock_bh(&qp_ctx->req_lock);
+
+ atomic64_inc(&req->ctx->sec->debug.dfx.send_cnt);
+ return -EINPROGRESS;
+}
+
+static void sec_alg_send_backlog_soft(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx)
+{
+ struct sec_req *req, *tmp;
+ int ret;
+
+ list_for_each_entry_safe(req, tmp, &qp_ctx->backlog.list, list) {
+ list_del(&req->list);
+ ctx->req_op->buf_unmap(ctx, req);
+ if (req->req_id >= 0)
+ sec_free_req_id(req);
+
+ if (ctx->alg_type == SEC_AEAD)
+ ret = sec_aead_soft_crypto(ctx, req->aead_req.aead_req,
+ req->c_req.encrypt);
+ else
+ ret = sec_skcipher_soft_crypto(ctx, req->c_req.sk_req,
+ req->c_req.encrypt);
+
+ /* Wake up the busy thread first, then return the errno. */
+ crypto_request_complete(req->base, -EINPROGRESS);
+ crypto_request_complete(req->base, ret);
+ }
+}
+
+static void sec_alg_send_backlog(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx)
+{
+ struct sec_req *req, *tmp;
+ int ret;
+
+ spin_lock_bh(&qp_ctx->backlog.lock);
+ list_for_each_entry_safe(req, tmp, &qp_ctx->backlog.list, list) {
+ ret = qp_send_message(req);
+ switch (ret) {
+ case -EINPROGRESS:
+ list_del(&req->list);
+ crypto_request_complete(req->base, -EINPROGRESS);
+ break;
+ case -EBUSY:
+ /* Device is busy and stop send any request. */
+ goto unlock;
+ default:
+ /* Release memory resources and send all requests through software. */
+ sec_alg_send_backlog_soft(ctx, qp_ctx);
+ goto unlock;
+ }
+ }
+
+unlock:
+ spin_unlock_bh(&qp_ctx->backlog.lock);
+}
+
static void sec_req_cb(struct hisi_qp *qp, void *resp)
{
struct sec_qp_ctx *qp_ctx = qp->qp_ctx;
@@ -274,40 +334,54 @@ static void sec_req_cb(struct hisi_qp *qp, void *resp)
ctx->req_op->callback(ctx, req, err);
}
-static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
+static int sec_alg_send_message_retry(struct sec_req *req)
{
- struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+ int ctr = 0;
int ret;
- if (ctx->fake_req_limit <=
- atomic_read(&qp_ctx->qp->qp_status.used) &&
- !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG))
- return -EBUSY;
+ do {
+ ret = qp_send_message(req);
+ } while (ret == -EBUSY && ctr++ < SEC_RETRY_MAX_CNT);
- spin_lock_bh(&qp_ctx->req_lock);
- ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe);
- if (ctx->fake_req_limit <=
- atomic_read(&qp_ctx->qp->qp_status.used) && !ret) {
- list_add_tail(&req->backlog_head, &qp_ctx->backlog);
- atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
- atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt);
- spin_unlock_bh(&qp_ctx->req_lock);
+ return ret;
+}
+
+static int sec_alg_try_enqueue(struct sec_req *req)
+{
+ /* Check if any request is already backlogged */
+ if (!list_empty(&req->backlog->list))
return -EBUSY;
- }
- spin_unlock_bh(&qp_ctx->req_lock);
- if (unlikely(ret == -EBUSY))
- return -ENOBUFS;
+ /* Try to enqueue to HW ring */
+ return qp_send_message(req);
+}
- if (likely(!ret)) {
- ret = -EINPROGRESS;
- atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
- }
+
+static int sec_alg_send_message_maybacklog(struct sec_req *req)
+{
+ int ret;
+
+ ret = sec_alg_try_enqueue(req);
+ if (ret != -EBUSY)
+ return ret;
+
+ spin_lock_bh(&req->backlog->lock);
+ ret = sec_alg_try_enqueue(req);
+ if (ret == -EBUSY)
+ list_add_tail(&req->list, &req->backlog->list);
+ spin_unlock_bh(&req->backlog->lock);
return ret;
}
-/* Get DMA memory resources */
+static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
+{
+ if (req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)
+ return sec_alg_send_message_maybacklog(req);
+
+ return sec_alg_send_message_retry(req);
+}
+
static int sec_alloc_civ_resource(struct device *dev, struct sec_alg_res *res)
{
u16 q_depth = res->depth;
@@ -559,7 +633,10 @@ static int sec_create_qp_ctx(struct sec_ctx *ctx, int qp_ctx_id)
spin_lock_init(&qp_ctx->req_lock);
idr_init(&qp_ctx->req_idr);
- INIT_LIST_HEAD(&qp_ctx->backlog);
+ spin_lock_init(&qp_ctx->backlog.lock);
+ spin_lock_init(&qp_ctx->id_lock);
+ INIT_LIST_HEAD(&qp_ctx->backlog.list);
+ qp_ctx->send_head = 0;
ret = sec_alloc_qp_ctx_resource(ctx, qp_ctx);
if (ret)
@@ -603,9 +680,6 @@ static int sec_ctx_base_init(struct sec_ctx *ctx)
ctx->hlf_q_num = sec->ctx_q_num >> 1;
ctx->pbuf_supported = ctx->sec->iommu_used;
-
- /* Half of queue depth is taken as fake requests limit in the queue. */
- ctx->fake_req_limit = ctx->qps[0]->sq_depth >> 1;
ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx),
GFP_KERNEL);
if (!ctx->qp_ctx) {
@@ -691,14 +765,10 @@ static int sec_skcipher_fbtfm_init(struct crypto_skcipher *tfm)
c_ctx->fallback = false;
- /* Currently, only XTS mode need fallback tfm when using 192bit key */
- if (likely(strncmp(alg, "xts", SEC_XTS_NAME_SZ)))
- return 0;
-
c_ctx->fbtfm = crypto_alloc_sync_skcipher(alg, 0,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(c_ctx->fbtfm)) {
- pr_err("failed to alloc xts mode fallback tfm!\n");
+ pr_err("failed to alloc fallback tfm for %s!\n", alg);
return PTR_ERR(c_ctx->fbtfm);
}
@@ -711,7 +781,7 @@ static int sec_skcipher_init(struct crypto_skcipher *tfm)
int ret;
ctx->alg_type = SEC_SKCIPHER;
- crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req));
+ crypto_skcipher_set_reqsize_dma(tfm, sizeof(struct sec_req));
ctx->c_ctx.ivsize = crypto_skcipher_ivsize(tfm);
if (ctx->c_ctx.ivsize > SEC_IV_SIZE) {
pr_err("get error skcipher iv size!\n");
@@ -858,7 +928,7 @@ static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
}
memcpy(c_ctx->c_key, key, keylen);
- if (c_ctx->fallback && c_ctx->fbtfm) {
+ if (c_ctx->fbtfm) {
ret = crypto_sync_skcipher_setkey(c_ctx->fbtfm, key, keylen);
if (ret) {
dev_err(dev, "failed to set fallback skcipher key!\n");
@@ -888,24 +958,25 @@ GEN_SEC_SETKEY_FUNC(sm4_ctr, SEC_CALG_SM4, SEC_CMODE_CTR)
static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req,
struct scatterlist *src)
{
- struct sec_aead_req *a_req = &req->aead_req;
- struct aead_request *aead_req = a_req->aead_req;
+ struct aead_request *aead_req = req->aead_req.aead_req;
struct sec_cipher_req *c_req = &req->c_req;
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+ struct sec_request_buf *buf = &req->buf;
struct device *dev = ctx->dev;
int copy_size, pbuf_length;
int req_id = req->req_id;
struct crypto_aead *tfm;
+ u8 *mac_offset, *pbuf;
size_t authsize;
- u8 *mac_offset;
if (ctx->alg_type == SEC_AEAD)
copy_size = aead_req->cryptlen + aead_req->assoclen;
else
copy_size = c_req->c_len;
- pbuf_length = sg_copy_to_buffer(src, sg_nents(src),
- qp_ctx->res[req_id].pbuf, copy_size);
+
+ pbuf = req->req_id < 0 ? buf->pbuf : qp_ctx->res[req_id].pbuf;
+ pbuf_length = sg_copy_to_buffer(src, sg_nents(src), pbuf, copy_size);
if (unlikely(pbuf_length != copy_size)) {
dev_err(dev, "copy src data to pbuf error!\n");
return -EINVAL;
@@ -913,8 +984,17 @@ static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req,
if (!c_req->encrypt && ctx->alg_type == SEC_AEAD) {
tfm = crypto_aead_reqtfm(aead_req);
authsize = crypto_aead_authsize(tfm);
- mac_offset = qp_ctx->res[req_id].pbuf + copy_size - authsize;
- memcpy(a_req->out_mac, mac_offset, authsize);
+ mac_offset = pbuf + copy_size - authsize;
+ memcpy(req->aead_req.out_mac, mac_offset, authsize);
+ }
+
+ if (req->req_id < 0) {
+ buf->in_dma = dma_map_single(dev, buf->pbuf, SEC_PBUF_SZ, DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(dev, buf->in_dma)))
+ return -ENOMEM;
+
+ buf->out_dma = buf->in_dma;
+ return 0;
}
req->in_dma = qp_ctx->res[req_id].pbuf_dma;
@@ -929,6 +1009,7 @@ static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req,
struct aead_request *aead_req = req->aead_req.aead_req;
struct sec_cipher_req *c_req = &req->c_req;
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+ struct sec_request_buf *buf = &req->buf;
int copy_size, pbuf_length;
int req_id = req->req_id;
@@ -937,10 +1018,16 @@ static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req,
else
copy_size = c_req->c_len;
- pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst),
- qp_ctx->res[req_id].pbuf, copy_size);
+ if (req->req_id < 0)
+ pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst), buf->pbuf, copy_size);
+ else
+ pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst), qp_ctx->res[req_id].pbuf,
+ copy_size);
if (unlikely(pbuf_length != copy_size))
dev_err(ctx->dev, "copy pbuf data to dst error!\n");
+
+ if (req->req_id < 0)
+ dma_unmap_single(ctx->dev, buf->in_dma, SEC_PBUF_SZ, DMA_BIDIRECTIONAL);
}
static int sec_aead_mac_init(struct sec_aead_req *req)
@@ -948,29 +1035,109 @@ static int sec_aead_mac_init(struct sec_aead_req *req)
struct aead_request *aead_req = req->aead_req;
struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
size_t authsize = crypto_aead_authsize(tfm);
- u8 *mac_out = req->out_mac;
struct scatterlist *sgl = aead_req->src;
+ u8 *mac_out = req->out_mac;
size_t copy_size;
off_t skip_size;
/* Copy input mac */
skip_size = aead_req->assoclen + aead_req->cryptlen - authsize;
- copy_size = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac_out,
- authsize, skip_size);
+ copy_size = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac_out, authsize, skip_size);
if (unlikely(copy_size != authsize))
return -EINVAL;
return 0;
}
-static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
- struct scatterlist *src, struct scatterlist *dst)
+static void fill_sg_to_hw_sge(struct scatterlist *sgl, struct sec_hw_sge *hw_sge)
+{
+ hw_sge->buf = sg_dma_address(sgl);
+ hw_sge->len = cpu_to_le32(sg_dma_len(sgl));
+ hw_sge->page_ctrl = sg_virt(sgl);
+}
+
+static int sec_cipher_to_hw_sgl(struct device *dev, struct scatterlist *src,
+ struct sec_hw_sgl *src_in, dma_addr_t *hw_sgl_dma,
+ int dma_dir)
+{
+ struct sec_hw_sge *curr_hw_sge = src_in->sge_entries;
+ u32 i, sg_n, sg_n_mapped;
+ struct scatterlist *sg;
+ u32 sge_var = 0;
+
+ sg_n = sg_nents(src);
+ sg_n_mapped = dma_map_sg(dev, src, sg_n, dma_dir);
+ if (unlikely(!sg_n_mapped)) {
+ dev_err(dev, "dma mapping for SG error!\n");
+ return -EINVAL;
+ } else if (unlikely(sg_n_mapped > SEC_SGE_NR_NUM)) {
+ dev_err(dev, "the number of entries in input scatterlist error!\n");
+ dma_unmap_sg(dev, src, sg_n, dma_dir);
+ return -EINVAL;
+ }
+
+ for_each_sg(src, sg, sg_n_mapped, i) {
+ fill_sg_to_hw_sge(sg, curr_hw_sge);
+ curr_hw_sge++;
+ sge_var++;
+ }
+
+ src_in->entry_sum_in_sgl = cpu_to_le16(sge_var);
+ src_in->entry_sum_in_chain = cpu_to_le16(SEC_SGE_NR_NUM);
+ src_in->entry_length_in_sgl = cpu_to_le16(SEC_SGE_NR_NUM);
+ *hw_sgl_dma = dma_map_single(dev, src_in, sizeof(struct sec_hw_sgl), dma_dir);
+ if (unlikely(dma_mapping_error(dev, *hw_sgl_dma))) {
+ dma_unmap_sg(dev, src, sg_n, dma_dir);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void sec_cipher_put_hw_sgl(struct device *dev, struct scatterlist *src,
+ dma_addr_t src_in, int dma_dir)
+{
+ dma_unmap_single(dev, src_in, sizeof(struct sec_hw_sgl), dma_dir);
+ dma_unmap_sg(dev, src, sg_nents(src), dma_dir);
+}
+
+static int sec_cipher_map_sgl(struct device *dev, struct sec_req *req,
+ struct scatterlist *src, struct scatterlist *dst)
+{
+ struct sec_hw_sgl *src_in = &req->buf.data_buf.in;
+ struct sec_hw_sgl *dst_out = &req->buf.data_buf.out;
+ int ret;
+
+ if (dst == src) {
+ ret = sec_cipher_to_hw_sgl(dev, src, src_in, &req->buf.in_dma,
+ DMA_BIDIRECTIONAL);
+ req->buf.out_dma = req->buf.in_dma;
+ return ret;
+ }
+
+ ret = sec_cipher_to_hw_sgl(dev, src, src_in, &req->buf.in_dma, DMA_TO_DEVICE);
+ if (unlikely(ret))
+ return ret;
+
+ ret = sec_cipher_to_hw_sgl(dev, dst, dst_out, &req->buf.out_dma,
+ DMA_FROM_DEVICE);
+ if (unlikely(ret)) {
+ sec_cipher_put_hw_sgl(dev, src, req->buf.in_dma, DMA_TO_DEVICE);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int sec_cipher_map_inner(struct sec_ctx *ctx, struct sec_req *req,
+ struct scatterlist *src, struct scatterlist *dst)
{
struct sec_cipher_req *c_req = &req->c_req;
struct sec_aead_req *a_req = &req->aead_req;
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
struct sec_alg_res *res = &qp_ctx->res[req->req_id];
struct device *dev = ctx->dev;
+ enum dma_data_direction src_direction;
int ret;
if (req->use_pbuf) {
@@ -983,10 +1150,9 @@ static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
a_req->out_mac_dma = res->pbuf_dma +
SEC_PBUF_MAC_OFFSET;
}
- ret = sec_cipher_pbuf_map(ctx, req, src);
-
- return ret;
+ return sec_cipher_pbuf_map(ctx, req, src);
}
+
c_req->c_ivin = res->c_ivin;
c_req->c_ivin_dma = res->c_ivin_dma;
if (ctx->alg_type == SEC_AEAD) {
@@ -996,10 +1162,11 @@ static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
a_req->out_mac_dma = res->out_mac_dma;
}
+ src_direction = dst == src ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
req->in = hisi_acc_sg_buf_map_to_hw_sgl(dev, src,
qp_ctx->c_in_pool,
req->req_id,
- &req->in_dma);
+ &req->in_dma, src_direction);
if (IS_ERR(req->in)) {
dev_err(dev, "fail to dma map input sgl buffers!\n");
return PTR_ERR(req->in);
@@ -1009,7 +1176,7 @@ static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
ret = sec_aead_mac_init(a_req);
if (unlikely(ret)) {
dev_err(dev, "fail to init mac data for ICV!\n");
- hisi_acc_sg_buf_unmap(dev, src, req->in);
+ hisi_acc_sg_buf_unmap(dev, src, req->in, src_direction);
return ret;
}
}
@@ -1021,11 +1188,12 @@ static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
c_req->c_out = hisi_acc_sg_buf_map_to_hw_sgl(dev, dst,
qp_ctx->c_out_pool,
req->req_id,
- &c_req->c_out_dma);
+ &c_req->c_out_dma,
+ DMA_FROM_DEVICE);
if (IS_ERR(c_req->c_out)) {
dev_err(dev, "fail to dma map output sgl buffers!\n");
- hisi_acc_sg_buf_unmap(dev, src, req->in);
+ hisi_acc_sg_buf_unmap(dev, src, req->in, src_direction);
return PTR_ERR(c_req->c_out);
}
}
@@ -1033,19 +1201,108 @@ static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
return 0;
}
+static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
+ struct scatterlist *src, struct scatterlist *dst)
+{
+ struct sec_aead_req *a_req = &req->aead_req;
+ struct sec_cipher_req *c_req = &req->c_req;
+ bool is_aead = (ctx->alg_type == SEC_AEAD);
+ struct device *dev = ctx->dev;
+ int ret = -ENOMEM;
+
+ if (req->req_id >= 0)
+ return sec_cipher_map_inner(ctx, req, src, dst);
+
+ c_req->c_ivin = c_req->c_ivin_buf;
+ c_req->c_ivin_dma = dma_map_single(dev, c_req->c_ivin,
+ SEC_IV_SIZE, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, c_req->c_ivin_dma)))
+ return -ENOMEM;
+
+ if (is_aead) {
+ a_req->a_ivin = a_req->a_ivin_buf;
+ a_req->out_mac = a_req->out_mac_buf;
+ a_req->a_ivin_dma = dma_map_single(dev, a_req->a_ivin,
+ SEC_IV_SIZE, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, a_req->a_ivin_dma)))
+ goto free_c_ivin_dma;
+
+ a_req->out_mac_dma = dma_map_single(dev, a_req->out_mac,
+ SEC_MAX_MAC_LEN, DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(dev, a_req->out_mac_dma)))
+ goto free_a_ivin_dma;
+ }
+ if (req->use_pbuf) {
+ ret = sec_cipher_pbuf_map(ctx, req, src);
+ if (unlikely(ret))
+ goto free_out_mac_dma;
+
+ return 0;
+ }
+
+ if (!c_req->encrypt && is_aead) {
+ ret = sec_aead_mac_init(a_req);
+ if (unlikely(ret)) {
+ dev_err(dev, "fail to init mac data for ICV!\n");
+ goto free_out_mac_dma;
+ }
+ }
+
+ ret = sec_cipher_map_sgl(dev, req, src, dst);
+ if (unlikely(ret)) {
+ dev_err(dev, "fail to dma map input sgl buffers!\n");
+ goto free_out_mac_dma;
+ }
+
+ return 0;
+
+free_out_mac_dma:
+ if (is_aead)
+ dma_unmap_single(dev, a_req->out_mac_dma, SEC_MAX_MAC_LEN, DMA_BIDIRECTIONAL);
+free_a_ivin_dma:
+ if (is_aead)
+ dma_unmap_single(dev, a_req->a_ivin_dma, SEC_IV_SIZE, DMA_TO_DEVICE);
+free_c_ivin_dma:
+ dma_unmap_single(dev, c_req->c_ivin_dma, SEC_IV_SIZE, DMA_TO_DEVICE);
+ return ret;
+}
+
static void sec_cipher_unmap(struct sec_ctx *ctx, struct sec_req *req,
struct scatterlist *src, struct scatterlist *dst)
{
+ struct sec_aead_req *a_req = &req->aead_req;
struct sec_cipher_req *c_req = &req->c_req;
struct device *dev = ctx->dev;
+ if (req->req_id >= 0) {
+ if (req->use_pbuf) {
+ sec_cipher_pbuf_unmap(ctx, req, dst);
+ } else {
+ if (dst != src) {
+ hisi_acc_sg_buf_unmap(dev, dst, c_req->c_out, DMA_FROM_DEVICE);
+ hisi_acc_sg_buf_unmap(dev, src, req->in, DMA_TO_DEVICE);
+ } else {
+ hisi_acc_sg_buf_unmap(dev, src, req->in, DMA_BIDIRECTIONAL);
+ }
+ }
+ return;
+ }
+
if (req->use_pbuf) {
sec_cipher_pbuf_unmap(ctx, req, dst);
} else {
- if (dst != src)
- hisi_acc_sg_buf_unmap(dev, src, req->in);
+ if (dst != src) {
+ sec_cipher_put_hw_sgl(dev, dst, req->buf.out_dma, DMA_FROM_DEVICE);
+ sec_cipher_put_hw_sgl(dev, src, req->buf.in_dma, DMA_TO_DEVICE);
+ } else {
+ sec_cipher_put_hw_sgl(dev, src, req->buf.in_dma, DMA_BIDIRECTIONAL);
+ }
+ }
- hisi_acc_sg_buf_unmap(dev, dst, c_req->c_out);
+ dma_unmap_single(dev, c_req->c_ivin_dma, SEC_IV_SIZE, DMA_TO_DEVICE);
+ if (ctx->alg_type == SEC_AEAD) {
+ dma_unmap_single(dev, a_req->a_ivin_dma, SEC_IV_SIZE, DMA_TO_DEVICE);
+ dma_unmap_single(dev, a_req->out_mac_dma, SEC_MAX_MAC_LEN, DMA_BIDIRECTIONAL);
}
}
@@ -1091,11 +1348,6 @@ static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx,
struct crypto_shash *hash_tfm = ctx->hash_tfm;
int blocksize, digestsize, ret;
- if (!keys->authkeylen) {
- pr_err("hisi_sec2: aead auth key error!\n");
- return -EINVAL;
- }
-
blocksize = crypto_shash_blocksize(hash_tfm);
digestsize = crypto_shash_digestsize(hash_tfm);
if (keys->authkeylen > blocksize) {
@@ -1107,7 +1359,8 @@ static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx,
}
ctx->a_key_len = digestsize;
} else {
- memcpy(ctx->a_key, keys->authkey, keys->authkeylen);
+ if (keys->authkeylen)
+ memcpy(ctx->a_key, keys->authkey, keys->authkeylen);
ctx->a_key_len = keys->authkeylen;
}
@@ -1120,10 +1373,7 @@ static int sec_aead_setauthsize(struct crypto_aead *aead, unsigned int authsize)
struct sec_ctx *ctx = crypto_tfm_ctx(tfm);
struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
- if (unlikely(a_ctx->fallback_aead_tfm))
- return crypto_aead_setauthsize(a_ctx->fallback_aead_tfm, authsize);
-
- return 0;
+ return crypto_aead_setauthsize(a_ctx->fallback_aead_tfm, authsize);
}
static int sec_aead_fallback_setkey(struct sec_auth_ctx *a_ctx,
@@ -1139,7 +1389,6 @@ static int sec_aead_fallback_setkey(struct sec_auth_ctx *a_ctx,
static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
const u32 keylen, const enum sec_hash_alg a_alg,
const enum sec_calg c_alg,
- const enum sec_mac_len mac_len,
const enum sec_cmode c_mode)
{
struct sec_ctx *ctx = crypto_aead_ctx(tfm);
@@ -1151,7 +1400,6 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
ctx->a_ctx.a_alg = a_alg;
ctx->c_ctx.c_alg = c_alg;
- ctx->a_ctx.mac_len = mac_len;
c_ctx->c_mode = c_mode;
if (c_mode == SEC_CMODE_CCM || c_mode == SEC_CMODE_GCM) {
@@ -1162,18 +1410,14 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
}
memcpy(c_ctx->c_key, key, keylen);
- if (unlikely(a_ctx->fallback_aead_tfm)) {
- ret = sec_aead_fallback_setkey(a_ctx, tfm, key, keylen);
- if (ret)
- return ret;
- }
-
- return 0;
+ return sec_aead_fallback_setkey(a_ctx, tfm, key, keylen);
}
ret = crypto_authenc_extractkeys(&keys, key, keylen);
- if (ret)
+ if (ret) {
+ dev_err(dev, "sec extract aead keys err!\n");
goto bad_key;
+ }
ret = sec_aead_aes_set_key(c_ctx, &keys);
if (ret) {
@@ -1187,10 +1431,9 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
goto bad_key;
}
- if ((ctx->a_ctx.mac_len & SEC_SQE_LEN_RATE_MASK) ||
- (ctx->a_ctx.a_key_len & SEC_SQE_LEN_RATE_MASK)) {
- ret = -EINVAL;
- dev_err(dev, "MAC or AUTH key length error!\n");
+ ret = sec_aead_fallback_setkey(a_ctx, tfm, key, keylen);
+ if (ret) {
+ dev_err(dev, "set sec fallback key err!\n");
goto bad_key;
}
@@ -1202,27 +1445,19 @@ bad_key:
}
-#define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, maclen, cmode) \
-static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key, \
- u32 keylen) \
-{ \
- return sec_aead_setkey(tfm, key, keylen, aalg, calg, maclen, cmode);\
-}
-
-GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1, SEC_A_HMAC_SHA1,
- SEC_CALG_AES, SEC_HMAC_SHA1_MAC, SEC_CMODE_CBC)
-GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256,
- SEC_CALG_AES, SEC_HMAC_SHA256_MAC, SEC_CMODE_CBC)
-GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512,
- SEC_CALG_AES, SEC_HMAC_SHA512_MAC, SEC_CMODE_CBC)
-GEN_SEC_AEAD_SETKEY_FUNC(aes_ccm, 0, SEC_CALG_AES,
- SEC_HMAC_CCM_MAC, SEC_CMODE_CCM)
-GEN_SEC_AEAD_SETKEY_FUNC(aes_gcm, 0, SEC_CALG_AES,
- SEC_HMAC_GCM_MAC, SEC_CMODE_GCM)
-GEN_SEC_AEAD_SETKEY_FUNC(sm4_ccm, 0, SEC_CALG_SM4,
- SEC_HMAC_CCM_MAC, SEC_CMODE_CCM)
-GEN_SEC_AEAD_SETKEY_FUNC(sm4_gcm, 0, SEC_CALG_SM4,
- SEC_HMAC_GCM_MAC, SEC_CMODE_GCM)
+#define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, cmode) \
+static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key, u32 keylen) \
+{ \
+ return sec_aead_setkey(tfm, key, keylen, aalg, calg, cmode); \
+}
+
+GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1, SEC_A_HMAC_SHA1, SEC_CALG_AES, SEC_CMODE_CBC)
+GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256, SEC_CALG_AES, SEC_CMODE_CBC)
+GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512, SEC_CALG_AES, SEC_CMODE_CBC)
+GEN_SEC_AEAD_SETKEY_FUNC(aes_ccm, 0, SEC_CALG_AES, SEC_CMODE_CCM)
+GEN_SEC_AEAD_SETKEY_FUNC(aes_gcm, 0, SEC_CALG_AES, SEC_CMODE_GCM)
+GEN_SEC_AEAD_SETKEY_FUNC(sm4_ccm, 0, SEC_CALG_SM4, SEC_CMODE_CCM)
+GEN_SEC_AEAD_SETKEY_FUNC(sm4_gcm, 0, SEC_CALG_SM4, SEC_CMODE_GCM)
static int sec_aead_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
{
@@ -1285,8 +1520,15 @@ static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
sec_sqe->type2.c_key_addr = cpu_to_le64(c_ctx->c_key_dma);
sec_sqe->type2.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma);
- sec_sqe->type2.data_src_addr = cpu_to_le64(req->in_dma);
- sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma);
+ if (req->req_id < 0) {
+ sec_sqe->type2.data_src_addr = cpu_to_le64(req->buf.in_dma);
+ sec_sqe->type2.data_dst_addr = cpu_to_le64(req->buf.out_dma);
+ } else {
+ sec_sqe->type2.data_src_addr = cpu_to_le64(req->in_dma);
+ sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma);
+ }
+ if (sec_sqe->type2.data_src_addr != sec_sqe->type2.data_dst_addr)
+ de = 0x1 << SEC_DE_OFFSET;
sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_mode) <<
SEC_CMODE_OFFSET);
@@ -1312,13 +1554,10 @@ static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
sec_sqe->sdm_addr_type |= da_type;
scene = SEC_COMM_SCENE << SEC_SCENE_OFFSET;
- if (req->in_dma != c_req->c_out_dma)
- de = 0x1 << SEC_DE_OFFSET;
sec_sqe->sds_sa_type = (de | scene | sa_type);
sec_sqe->type2.clen_ivhlen |= cpu_to_le32(c_req->c_len);
- sec_sqe->type2.tag = cpu_to_le16((u16)req->req_id);
return 0;
}
@@ -1335,8 +1574,15 @@ static int sec_skcipher_bd_fill_v3(struct sec_ctx *ctx, struct sec_req *req)
sec_sqe3->c_key_addr = cpu_to_le64(c_ctx->c_key_dma);
sec_sqe3->no_scene.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma);
- sec_sqe3->data_src_addr = cpu_to_le64(req->in_dma);
- sec_sqe3->data_dst_addr = cpu_to_le64(c_req->c_out_dma);
+ if (req->req_id < 0) {
+ sec_sqe3->data_src_addr = cpu_to_le64(req->buf.in_dma);
+ sec_sqe3->data_dst_addr = cpu_to_le64(req->buf.out_dma);
+ } else {
+ sec_sqe3->data_src_addr = cpu_to_le64(req->in_dma);
+ sec_sqe3->data_dst_addr = cpu_to_le64(c_req->c_out_dma);
+ }
+ if (sec_sqe3->data_src_addr != sec_sqe3->data_dst_addr)
+ bd_param |= 0x1 << SEC_DE_OFFSET_V3;
sec_sqe3->c_mode_alg = ((u8)c_ctx->c_alg << SEC_CALG_OFFSET_V3) |
c_ctx->c_mode;
@@ -1362,8 +1608,6 @@ static int sec_skcipher_bd_fill_v3(struct sec_ctx *ctx, struct sec_req *req)
}
bd_param |= SEC_COMM_SCENE << SEC_SCENE_OFFSET_V3;
- if (req->in_dma != c_req->c_out_dma)
- bd_param |= 0x1 << SEC_DE_OFFSET_V3;
bd_param |= SEC_BD_TYPE3;
sec_sqe3->bd_param = cpu_to_le32(bd_param);
@@ -1395,15 +1639,12 @@ static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type)
size_t sz;
u8 *iv;
- if (req->c_req.encrypt)
- sgl = alg_type == SEC_SKCIPHER ? sk_req->dst : aead_req->dst;
- else
- sgl = alg_type == SEC_SKCIPHER ? sk_req->src : aead_req->src;
-
if (alg_type == SEC_SKCIPHER) {
+ sgl = req->c_req.encrypt ? sk_req->dst : sk_req->src;
iv = sk_req->iv;
cryptlen = sk_req->cryptlen;
} else {
+ sgl = req->c_req.encrypt ? aead_req->dst : aead_req->src;
iv = aead_req->iv;
cryptlen = aead_req->cryptlen;
}
@@ -1414,65 +1655,35 @@ static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type)
if (unlikely(sz != iv_size))
dev_err(req->ctx->dev, "copy output iv error!\n");
} else {
- sz = cryptlen / iv_size;
- if (cryptlen % iv_size)
- sz += 1;
+ sz = (cryptlen + iv_size - 1) / iv_size;
ctr_iv_inc(iv, iv_size, sz);
}
}
-static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx,
- struct sec_qp_ctx *qp_ctx)
-{
- struct sec_req *backlog_req = NULL;
-
- spin_lock_bh(&qp_ctx->req_lock);
- if (ctx->fake_req_limit >=
- atomic_read(&qp_ctx->qp->qp_status.used) &&
- !list_empty(&qp_ctx->backlog)) {
- backlog_req = list_first_entry(&qp_ctx->backlog,
- typeof(*backlog_req), backlog_head);
- list_del(&backlog_req->backlog_head);
- }
- spin_unlock_bh(&qp_ctx->req_lock);
-
- return backlog_req;
-}
-
static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,
int err)
{
- struct skcipher_request *sk_req = req->c_req.sk_req;
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
- struct skcipher_request *backlog_sk_req;
- struct sec_req *backlog_req;
- sec_free_req_id(req);
+ if (req->req_id >= 0)
+ sec_free_req_id(req);
/* IV output at encrypto of CBC/CTR mode */
if (!err && (ctx->c_ctx.c_mode == SEC_CMODE_CBC ||
ctx->c_ctx.c_mode == SEC_CMODE_CTR) && req->c_req.encrypt)
sec_update_iv(req, SEC_SKCIPHER);
- while (1) {
- backlog_req = sec_back_req_clear(ctx, qp_ctx);
- if (!backlog_req)
- break;
-
- backlog_sk_req = backlog_req->c_req.sk_req;
- skcipher_request_complete(backlog_sk_req, -EINPROGRESS);
- atomic64_inc(&ctx->sec->debug.dfx.recv_busy_cnt);
- }
-
- skcipher_request_complete(sk_req, err);
+ crypto_request_complete(req->base, err);
+ sec_alg_send_backlog(ctx, qp_ctx);
}
static void set_aead_auth_iv(struct sec_ctx *ctx, struct sec_req *req)
{
struct aead_request *aead_req = req->aead_req.aead_req;
- struct sec_cipher_req *c_req = &req->c_req;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
+ size_t authsize = crypto_aead_authsize(tfm);
struct sec_aead_req *a_req = &req->aead_req;
- size_t authsize = ctx->a_ctx.mac_len;
+ struct sec_cipher_req *c_req = &req->c_req;
u32 data_size = aead_req->cryptlen;
u8 flage = 0;
u8 cm, cl;
@@ -1513,10 +1724,8 @@ static void set_aead_auth_iv(struct sec_ctx *ctx, struct sec_req *req)
static void sec_aead_set_iv(struct sec_ctx *ctx, struct sec_req *req)
{
struct aead_request *aead_req = req->aead_req.aead_req;
- struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
- size_t authsize = crypto_aead_authsize(tfm);
- struct sec_cipher_req *c_req = &req->c_req;
struct sec_aead_req *a_req = &req->aead_req;
+ struct sec_cipher_req *c_req = &req->c_req;
memcpy(c_req->c_ivin, aead_req->iv, ctx->c_ctx.ivsize);
@@ -1524,15 +1733,11 @@ static void sec_aead_set_iv(struct sec_ctx *ctx, struct sec_req *req)
/*
* CCM 16Byte Cipher_IV: {1B_Flage,13B_IV,2B_counter},
* the counter must set to 0x01
+ * CCM 16Byte Auth_IV: {1B_AFlage,13B_IV,2B_Ptext_length}
*/
- ctx->a_ctx.mac_len = authsize;
- /* CCM 16Byte Auth_IV: {1B_AFlage,13B_IV,2B_Ptext_length} */
set_aead_auth_iv(ctx, req);
- }
-
- /* GCM 12Byte Cipher_IV == Auth_IV */
- if (ctx->c_ctx.c_mode == SEC_CMODE_GCM) {
- ctx->a_ctx.mac_len = authsize;
+ } else if (ctx->c_ctx.c_mode == SEC_CMODE_GCM) {
+ /* GCM 12Byte Cipher_IV == Auth_IV */
memcpy(a_req->a_ivin, c_req->c_ivin, SEC_AIV_SIZE);
}
}
@@ -1542,9 +1747,11 @@ static void sec_auth_bd_fill_xcm(struct sec_auth_ctx *ctx, int dir,
{
struct sec_aead_req *a_req = &req->aead_req;
struct aead_request *aq = a_req->aead_req;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(aq);
+ size_t authsize = crypto_aead_authsize(tfm);
/* C_ICV_Len is MAC size, 0x4 ~ 0x10 */
- sec_sqe->type2.icvw_kmode |= cpu_to_le16((u16)ctx->mac_len);
+ sec_sqe->type2.icvw_kmode |= cpu_to_le16((u16)authsize);
/* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */
sec_sqe->type2.a_key_addr = sec_sqe->type2.c_key_addr;
@@ -1568,9 +1775,11 @@ static void sec_auth_bd_fill_xcm_v3(struct sec_auth_ctx *ctx, int dir,
{
struct sec_aead_req *a_req = &req->aead_req;
struct aead_request *aq = a_req->aead_req;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(aq);
+ size_t authsize = crypto_aead_authsize(tfm);
/* C_ICV_Len is MAC size, 0x4 ~ 0x10 */
- sqe3->c_icv_key |= cpu_to_le16((u16)ctx->mac_len << SEC_MAC_OFFSET_V3);
+ sqe3->c_icv_key |= cpu_to_le16((u16)authsize << SEC_MAC_OFFSET_V3);
/* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */
sqe3->a_key_addr = sqe3->c_key_addr;
@@ -1594,15 +1803,15 @@ static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir,
struct sec_aead_req *a_req = &req->aead_req;
struct sec_cipher_req *c_req = &req->c_req;
struct aead_request *aq = a_req->aead_req;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(aq);
+ size_t authsize = crypto_aead_authsize(tfm);
sec_sqe->type2.a_key_addr = cpu_to_le64(ctx->a_key_dma);
- sec_sqe->type2.mac_key_alg =
- cpu_to_le32(ctx->mac_len / SEC_SQE_LEN_RATE);
+ sec_sqe->type2.mac_key_alg = cpu_to_le32(BYTES_TO_WORDS(authsize));
sec_sqe->type2.mac_key_alg |=
- cpu_to_le32((u32)((ctx->a_key_len) /
- SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET);
+ cpu_to_le32((u32)BYTES_TO_WORDS(ctx->a_key_len) << SEC_AKEY_OFFSET);
sec_sqe->type2.mac_key_alg |=
cpu_to_le32((u32)(ctx->a_alg) << SEC_AEAD_ALG_OFFSET);
@@ -1648,16 +1857,16 @@ static void sec_auth_bd_fill_ex_v3(struct sec_auth_ctx *ctx, int dir,
struct sec_aead_req *a_req = &req->aead_req;
struct sec_cipher_req *c_req = &req->c_req;
struct aead_request *aq = a_req->aead_req;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(aq);
+ size_t authsize = crypto_aead_authsize(tfm);
sqe3->a_key_addr = cpu_to_le64(ctx->a_key_dma);
sqe3->auth_mac_key |=
- cpu_to_le32((u32)(ctx->mac_len /
- SEC_SQE_LEN_RATE) << SEC_MAC_OFFSET_V3);
+ cpu_to_le32(BYTES_TO_WORDS(authsize) << SEC_MAC_OFFSET_V3);
sqe3->auth_mac_key |=
- cpu_to_le32((u32)(ctx->a_key_len /
- SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET_V3);
+ cpu_to_le32((u32)BYTES_TO_WORDS(ctx->a_key_len) << SEC_AKEY_OFFSET_V3);
sqe3->auth_mac_key |=
cpu_to_le32((u32)(ctx->a_alg) << SEC_AUTH_ALG_OFFSET_V3);
@@ -1703,73 +1912,53 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
{
struct aead_request *a_req = req->aead_req.aead_req;
struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
- struct sec_aead_req *aead_req = &req->aead_req;
- struct sec_cipher_req *c_req = &req->c_req;
size_t authsize = crypto_aead_authsize(tfm);
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
- struct aead_request *backlog_aead_req;
- struct sec_req *backlog_req;
size_t sz;
- if (!err && c->c_ctx.c_mode == SEC_CMODE_CBC && c_req->encrypt)
- sec_update_iv(req, SEC_AEAD);
+ if (!err && req->c_req.encrypt) {
+ if (c->c_ctx.c_mode == SEC_CMODE_CBC)
+ sec_update_iv(req, SEC_AEAD);
- /* Copy output mac */
- if (!err && c_req->encrypt) {
- struct scatterlist *sgl = a_req->dst;
-
- sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl),
- aead_req->out_mac,
- authsize, a_req->cryptlen +
- a_req->assoclen);
+ sz = sg_pcopy_from_buffer(a_req->dst, sg_nents(a_req->dst), req->aead_req.out_mac,
+ authsize, a_req->cryptlen + a_req->assoclen);
if (unlikely(sz != authsize)) {
dev_err(c->dev, "copy out mac err!\n");
err = -EINVAL;
}
}
- sec_free_req_id(req);
-
- while (1) {
- backlog_req = sec_back_req_clear(c, qp_ctx);
- if (!backlog_req)
- break;
-
- backlog_aead_req = backlog_req->aead_req.aead_req;
- aead_request_complete(backlog_aead_req, -EINPROGRESS);
- atomic64_inc(&c->sec->debug.dfx.recv_busy_cnt);
- }
+ if (req->req_id >= 0)
+ sec_free_req_id(req);
- aead_request_complete(a_req, err);
+ crypto_request_complete(req->base, err);
+ sec_alg_send_backlog(c, qp_ctx);
}
-static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req)
+static void sec_request_uninit(struct sec_req *req)
{
- sec_free_req_id(req);
- sec_free_queue_id(ctx, req);
+ if (req->req_id >= 0)
+ sec_free_req_id(req);
}
static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req)
{
struct sec_qp_ctx *qp_ctx;
- int queue_id;
+ int i = 0;
- /* To load balance */
- queue_id = sec_alloc_queue_id(ctx, req);
- qp_ctx = &ctx->qp_ctx[queue_id];
+ do {
+ qp_ctx = &ctx->qp_ctx[i];
+ req->req_id = sec_alloc_req_id(req, qp_ctx);
+ } while (req->req_id < 0 && ++i < ctx->sec->ctx_q_num);
- req->req_id = sec_alloc_req_id(req, qp_ctx);
- if (unlikely(req->req_id < 0)) {
- sec_free_queue_id(ctx, req);
- return req->req_id;
- }
+ req->qp_ctx = qp_ctx;
+ req->backlog = &qp_ctx->backlog;
return 0;
}
static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
{
- struct sec_cipher_req *c_req = &req->c_req;
int ret;
ret = sec_request_init(ctx, req);
@@ -1786,8 +1975,7 @@ static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
sec_update_iv(req, ctx->alg_type);
ret = ctx->req_op->bd_send(ctx, req);
- if (unlikely((ret != -EBUSY && ret != -EINPROGRESS) ||
- (ret == -EBUSY && !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
+ if (unlikely((ret != -EBUSY && ret != -EINPROGRESS))) {
dev_err_ratelimited(ctx->dev, "send sec request failed!\n");
goto err_send_req;
}
@@ -1798,16 +1986,23 @@ err_send_req:
/* As failing, restore the IV from user */
if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt) {
if (ctx->alg_type == SEC_SKCIPHER)
- memcpy(req->c_req.sk_req->iv, c_req->c_ivin,
+ memcpy(req->c_req.sk_req->iv, req->c_req.c_ivin,
ctx->c_ctx.ivsize);
else
- memcpy(req->aead_req.aead_req->iv, c_req->c_ivin,
+ memcpy(req->aead_req.aead_req->iv, req->c_req.c_ivin,
ctx->c_ctx.ivsize);
}
sec_request_untransfer(ctx, req);
+
err_uninit_req:
- sec_request_uninit(ctx, req);
+ sec_request_uninit(req);
+ if (ctx->alg_type == SEC_AEAD)
+ ret = sec_aead_soft_crypto(ctx, req->aead_req.aead_req,
+ req->c_req.encrypt);
+ else
+ ret = sec_skcipher_soft_crypto(ctx, req->c_req.sk_req,
+ req->c_req.encrypt);
return ret;
}
@@ -1881,7 +2076,7 @@ static int sec_aead_init(struct crypto_aead *tfm)
struct sec_ctx *ctx = crypto_aead_ctx(tfm);
int ret;
- crypto_aead_set_reqsize(tfm, sizeof(struct sec_req));
+ crypto_aead_set_reqsize_dma(tfm, sizeof(struct sec_req));
ctx->alg_type = SEC_AEAD;
ctx->c_ctx.ivsize = crypto_aead_ivsize(tfm);
if (ctx->c_ctx.ivsize < SEC_AIV_SIZE ||
@@ -1929,8 +2124,10 @@ static void sec_aead_exit(struct crypto_aead *tfm)
static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name)
{
+ struct aead_alg *alg = crypto_aead_alg(tfm);
struct sec_ctx *ctx = crypto_aead_ctx(tfm);
- struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
+ struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
+ const char *aead_name = alg->base.cra_name;
int ret;
ret = sec_aead_init(tfm);
@@ -1939,11 +2136,20 @@ static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name)
return ret;
}
- auth_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
- if (IS_ERR(auth_ctx->hash_tfm)) {
+ a_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
+ if (IS_ERR(a_ctx->hash_tfm)) {
dev_err(ctx->dev, "aead alloc shash error!\n");
sec_aead_exit(tfm);
- return PTR_ERR(auth_ctx->hash_tfm);
+ return PTR_ERR(a_ctx->hash_tfm);
+ }
+
+ a_ctx->fallback_aead_tfm = crypto_alloc_aead(aead_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC);
+ if (IS_ERR(a_ctx->fallback_aead_tfm)) {
+ dev_err(ctx->dev, "aead driver alloc fallback tfm error!\n");
+ crypto_free_shash(ctx->a_ctx.hash_tfm);
+ sec_aead_exit(tfm);
+ return PTR_ERR(a_ctx->fallback_aead_tfm);
}
return 0;
@@ -1953,6 +2159,7 @@ static void sec_aead_ctx_exit(struct crypto_aead *tfm)
{
struct sec_ctx *ctx = crypto_aead_ctx(tfm);
+ crypto_free_aead(ctx->a_ctx.fallback_aead_tfm);
crypto_free_shash(ctx->a_ctx.hash_tfm);
sec_aead_exit(tfm);
}
@@ -1979,7 +2186,6 @@ static int sec_aead_xcm_ctx_init(struct crypto_aead *tfm)
sec_aead_exit(tfm);
return PTR_ERR(a_ctx->fallback_aead_tfm);
}
- a_ctx->fallback = false;
return 0;
}
@@ -2007,8 +2213,7 @@ static int sec_aead_sha512_ctx_init(struct crypto_aead *tfm)
return sec_aead_ctx_init(tfm, "sha512");
}
-static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx,
- struct sec_req *sreq)
+static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx, struct sec_req *sreq)
{
u32 cryptlen = sreq->c_req.sk_req->cryptlen;
struct device *dev = ctx->dev;
@@ -2030,10 +2235,6 @@ static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx,
}
break;
case SEC_CMODE_CTR:
- if (unlikely(ctx->sec->qm.ver < QM_HW_V3)) {
- dev_err(dev, "skcipher HW version error!\n");
- ret = -EINVAL;
- }
break;
default:
ret = -EINVAL;
@@ -2042,17 +2243,21 @@ static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx,
return ret;
}
-static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
+static int sec_skcipher_param_check(struct sec_ctx *ctx,
+ struct sec_req *sreq, bool *need_fallback)
{
struct skcipher_request *sk_req = sreq->c_req.sk_req;
struct device *dev = ctx->dev;
u8 c_alg = ctx->c_ctx.c_alg;
- if (unlikely(!sk_req->src || !sk_req->dst ||
- sk_req->cryptlen > MAX_INPUT_DATA_LEN)) {
+ if (unlikely(!sk_req->src || !sk_req->dst)) {
dev_err(dev, "skcipher input param error!\n");
return -EINVAL;
}
+
+ if (sk_req->cryptlen > MAX_INPUT_DATA_LEN)
+ *need_fallback = true;
+
sreq->c_req.c_len = sk_req->cryptlen;
if (ctx->pbuf_supported && sk_req->cryptlen <= SEC_PBUF_SZ)
@@ -2108,8 +2313,9 @@ static int sec_skcipher_soft_crypto(struct sec_ctx *ctx,
static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(sk_req);
- struct sec_req *req = skcipher_request_ctx(sk_req);
+ struct sec_req *req = skcipher_request_ctx_dma(sk_req);
struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
+ bool need_fallback = false;
int ret;
if (!sk_req->cryptlen) {
@@ -2122,12 +2328,13 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
req->c_req.sk_req = sk_req;
req->c_req.encrypt = encrypt;
req->ctx = ctx;
+ req->base = &sk_req->base;
- ret = sec_skcipher_param_check(ctx, req);
+ ret = sec_skcipher_param_check(ctx, req, &need_fallback);
if (unlikely(ret))
return -EINVAL;
- if (unlikely(ctx->c_ctx.fallback))
+ if (unlikely(ctx->c_ctx.fallback || need_fallback))
return sec_skcipher_soft_crypto(ctx, sk_req, encrypt);
return ctx->req_op->process(ctx, req);
@@ -2233,55 +2440,40 @@ static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq)
{
struct aead_request *req = sreq->aead_req.aead_req;
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- size_t authsize = crypto_aead_authsize(tfm);
+ size_t sz = crypto_aead_authsize(tfm);
u8 c_mode = ctx->c_ctx.c_mode;
- struct device *dev = ctx->dev;
int ret;
- if (unlikely(req->cryptlen + req->assoclen > MAX_INPUT_DATA_LEN ||
- req->assoclen > SEC_MAX_AAD_LEN)) {
- dev_err(dev, "aead input spec error!\n");
+ if (unlikely(ctx->sec->qm.ver == QM_HW_V2 && !sreq->c_req.c_len))
return -EINVAL;
- }
- if (unlikely((c_mode == SEC_CMODE_GCM && authsize < DES_BLOCK_SIZE) ||
- (c_mode == SEC_CMODE_CCM && (authsize < MIN_MAC_LEN ||
- authsize & MAC_LEN_MASK)))) {
- dev_err(dev, "aead input mac length error!\n");
+ if (unlikely(req->cryptlen + req->assoclen > MAX_INPUT_DATA_LEN ||
+ req->assoclen > SEC_MAX_AAD_LEN))
return -EINVAL;
- }
if (c_mode == SEC_CMODE_CCM) {
- if (unlikely(req->assoclen > SEC_MAX_CCM_AAD_LEN)) {
- dev_err_ratelimited(dev, "CCM input aad parameter is too long!\n");
+ if (unlikely(req->assoclen > SEC_MAX_CCM_AAD_LEN))
return -EINVAL;
- }
- ret = aead_iv_demension_check(req);
- if (ret) {
- dev_err(dev, "aead input iv param error!\n");
- return ret;
- }
- }
- if (sreq->c_req.encrypt)
- sreq->c_req.c_len = req->cryptlen;
- else
- sreq->c_req.c_len = req->cryptlen - authsize;
- if (c_mode == SEC_CMODE_CBC) {
- if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) {
- dev_err(dev, "aead crypto length error!\n");
+ ret = aead_iv_demension_check(req);
+ if (unlikely(ret))
+ return -EINVAL;
+ } else if (c_mode == SEC_CMODE_CBC) {
+ if (unlikely(sz & WORD_MASK))
+ return -EINVAL;
+ if (unlikely(ctx->a_ctx.a_key_len & WORD_MASK))
+ return -EINVAL;
+ } else if (c_mode == SEC_CMODE_GCM) {
+ if (unlikely(sz < SEC_GCM_MIN_AUTH_SZ))
return -EINVAL;
- }
}
return 0;
}
-static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
+static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq, bool *need_fallback)
{
struct aead_request *req = sreq->aead_req.aead_req;
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- size_t authsize = crypto_aead_authsize(tfm);
struct device *dev = ctx->dev;
u8 c_alg = ctx->c_ctx.c_alg;
@@ -2290,12 +2482,10 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
return -EINVAL;
}
- if (ctx->sec->qm.ver == QM_HW_V2) {
- if (unlikely(!req->cryptlen || (!sreq->c_req.encrypt &&
- req->cryptlen <= authsize))) {
- ctx->a_ctx.fallback = true;
- return -EINVAL;
- }
+ if (unlikely(ctx->c_ctx.c_mode == SEC_CMODE_CBC &&
+ sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) {
+ dev_err(dev, "aead cbc mode input data length error!\n");
+ return -EINVAL;
}
/* Support AES or SM4 */
@@ -2304,8 +2494,10 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
return -EINVAL;
}
- if (unlikely(sec_aead_spec_check(ctx, sreq)))
+ if (unlikely(sec_aead_spec_check(ctx, sreq))) {
+ *need_fallback = true;
return -EINVAL;
+ }
if (ctx->pbuf_supported && (req->cryptlen + req->assoclen) <=
SEC_PBUF_SZ)
@@ -2321,16 +2513,9 @@ static int sec_aead_soft_crypto(struct sec_ctx *ctx,
bool encrypt)
{
struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
- struct device *dev = ctx->dev;
struct aead_request *subreq;
int ret;
- /* Kunpeng920 aead mode not support input 0 size */
- if (!a_ctx->fallback_aead_tfm) {
- dev_err(dev, "aead fallback tfm is NULL!\n");
- return -EINVAL;
- }
-
subreq = aead_request_alloc(a_ctx->fallback_aead_tfm, GFP_KERNEL);
if (!subreq)
return -ENOMEM;
@@ -2354,18 +2539,22 @@ static int sec_aead_soft_crypto(struct sec_ctx *ctx,
static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
- struct sec_req *req = aead_request_ctx(a_req);
+ struct sec_req *req = aead_request_ctx_dma(a_req);
struct sec_ctx *ctx = crypto_aead_ctx(tfm);
+ size_t sz = crypto_aead_authsize(tfm);
+ bool need_fallback = false;
int ret;
req->flag = a_req->base.flags;
req->aead_req.aead_req = a_req;
req->c_req.encrypt = encrypt;
req->ctx = ctx;
+ req->base = &a_req->base;
+ req->c_req.c_len = a_req->cryptlen - (req->c_req.encrypt ? 0 : sz);
- ret = sec_aead_param_check(ctx, req);
+ ret = sec_aead_param_check(ctx, req, &need_fallback);
if (unlikely(ret)) {
- if (ctx->a_ctx.fallback)
+ if (need_fallback)
return sec_aead_soft_crypto(ctx, a_req, encrypt);
return -EINVAL;
}
@@ -2520,8 +2709,8 @@ int sec_register_to_crypto(struct hisi_qm *qm)
u64 alg_mask;
int ret = 0;
- alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH_IDX,
- SEC_DRV_ALG_BITMAP_LOW_IDX);
+ alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH_TB,
+ SEC_DRV_ALG_BITMAP_LOW_TB);
mutex_lock(&sec_algs_lock);
if (sec_available_devs) {
@@ -2553,8 +2742,8 @@ void sec_unregister_from_crypto(struct hisi_qm *qm)
{
u64 alg_mask;
- alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH_IDX,
- SEC_DRV_ALG_BITMAP_LOW_IDX);
+ alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH_TB,
+ SEC_DRV_ALG_BITMAP_LOW_TB);
mutex_lock(&sec_algs_lock);
if (--sec_available_devs)
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.h b/drivers/crypto/hisilicon/sec2/sec_crypto.h
index 27a0ee5ad913..04725b514382 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.h
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.h
@@ -23,17 +23,6 @@ enum sec_hash_alg {
SEC_A_HMAC_SHA512 = 0x15,
};
-enum sec_mac_len {
- SEC_HMAC_CCM_MAC = 16,
- SEC_HMAC_GCM_MAC = 16,
- SEC_SM3_MAC = 32,
- SEC_HMAC_SM3_MAC = 32,
- SEC_HMAC_MD5_MAC = 16,
- SEC_HMAC_SHA1_MAC = 20,
- SEC_HMAC_SHA256_MAC = 32,
- SEC_HMAC_SHA512_MAC = 64,
-};
-
enum sec_cmode {
SEC_CMODE_ECB = 0x0,
SEC_CMODE_CBC = 0x1,
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index c35533d8930b..5eb2d6820742 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -14,9 +14,9 @@
#include <linux/seq_file.h>
#include <linux/topology.h>
#include <linux/uacce.h>
-
#include "sec.h"
+#define CAP_FILE_PERMISSION 0444
#define SEC_VF_NUM 63
#define SEC_QUEUE_NUM_V1 4096
#define PCI_DEVICE_ID_HUAWEI_SEC_PF 0xa255
@@ -47,6 +47,8 @@
#define SEC_RAS_FE_ENB_MSK 0x0
#define SEC_OOO_SHUTDOWN_SEL 0x301014
#define SEC_RAS_DISABLE 0x0
+#define SEC_AXI_ERROR_MASK (BIT(0) | BIT(1))
+
#define SEC_MEM_START_INIT_REG 0x301100
#define SEC_MEM_INIT_DONE_REG 0x301104
@@ -93,6 +95,16 @@
#define SEC_PREFETCH_ENABLE (~(BIT(0) | BIT(1) | BIT(11)))
#define SEC_PREFETCH_DISABLE BIT(1)
#define SEC_SVA_DISABLE_READY (BIT(7) | BIT(11))
+#define SEC_SVA_PREFETCH_INFO 0x301ED4
+#define SEC_SVA_STALL_NUM GENMASK(23, 8)
+#define SEC_SVA_PREFETCH_NUM GENMASK(2, 0)
+#define SEC_WAIT_SVA_READY 500000
+#define SEC_READ_SVA_STATUS_TIMES 3
+#define SEC_WAIT_US_MIN 10
+#define SEC_WAIT_US_MAX 20
+#define SEC_WAIT_QP_US_MIN 1000
+#define SEC_WAIT_QP_US_MAX 2000
+#define SEC_MAX_WAIT_TIMES 2000
#define SEC_DELAY_10_US 10
#define SEC_POLL_TIMEOUT_US 1000
@@ -167,11 +179,34 @@ static const struct hisi_qm_cap_info sec_basic_info[] = {
{SEC_CORE4_ALG_BITMAP_HIGH, 0x3170, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF},
};
-static const u32 sec_pre_store_caps[] = {
- SEC_DRV_ALG_BITMAP_LOW,
- SEC_DRV_ALG_BITMAP_HIGH,
- SEC_DEV_ALG_BITMAP_LOW,
- SEC_DEV_ALG_BITMAP_HIGH,
+static const struct hisi_qm_cap_query_info sec_cap_query_info[] = {
+ {QM_RAS_NFE_TYPE, "QM_RAS_NFE_TYPE ", 0x3124, 0x0, 0x1C77, 0x7C77},
+ {QM_RAS_NFE_RESET, "QM_RAS_NFE_RESET ", 0x3128, 0x0, 0xC77, 0x6C77},
+ {QM_RAS_CE_TYPE, "QM_RAS_CE_TYPE ", 0x312C, 0x0, 0x8, 0x8},
+ {SEC_RAS_NFE_TYPE, "SEC_RAS_NFE_TYPE ", 0x3130, 0x0, 0x177, 0x60177},
+ {SEC_RAS_NFE_RESET, "SEC_RAS_NFE_RESET ", 0x3134, 0x0, 0x177, 0x177},
+ {SEC_RAS_CE_TYPE, "SEC_RAS_CE_TYPE ", 0x3138, 0x0, 0x88, 0xC088},
+ {SEC_CORE_INFO, "SEC_CORE_INFO ", 0x313c, 0x110404, 0x110404, 0x110404},
+ {SEC_CORE_EN, "SEC_CORE_EN ", 0x3140, 0x17F, 0x17F, 0xF},
+ {SEC_DRV_ALG_BITMAP_LOW_TB, "SEC_DRV_ALG_BITMAP_LOW ",
+ 0x3144, 0x18050CB, 0x18050CB, 0x18670CF},
+ {SEC_DRV_ALG_BITMAP_HIGH_TB, "SEC_DRV_ALG_BITMAP_HIGH ",
+ 0x3148, 0x395C, 0x395C, 0x395C},
+ {SEC_ALG_BITMAP_LOW, "SEC_ALG_BITMAP_LOW ",
+ 0x314c, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
+ {SEC_ALG_BITMAP_HIGH, "SEC_ALG_BITMAP_HIGH ", 0x3150, 0x3FFF, 0x3FFF, 0x3FFF},
+ {SEC_CORE1_BITMAP_LOW, "SEC_CORE1_BITMAP_LOW ",
+ 0x3154, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
+ {SEC_CORE1_BITMAP_HIGH, "SEC_CORE1_BITMAP_HIGH ", 0x3158, 0x3FFF, 0x3FFF, 0x3FFF},
+ {SEC_CORE2_BITMAP_LOW, "SEC_CORE2_BITMAP_LOW ",
+ 0x315c, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
+ {SEC_CORE2_BITMAP_HIGH, "SEC_CORE2_BITMAP_HIGH ", 0x3160, 0x3FFF, 0x3FFF, 0x3FFF},
+ {SEC_CORE3_BITMAP_LOW, "SEC_CORE3_BITMAP_LOW ",
+ 0x3164, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
+ {SEC_CORE3_BITMAP_HIGH, "SEC_CORE3_BITMAP_HIGH ", 0x3168, 0x3FFF, 0x3FFF, 0x3FFF},
+ {SEC_CORE4_BITMAP_LOW, "SEC_CORE4_BITMAP_LOW ",
+ 0x316c, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
+ {SEC_CORE4_BITMAP_HIGH, "SEC_CORE4_BITMAP_HIGH ", 0x3170, 0x3FFF, 0x3FFF, 0x3FFF},
};
static const struct qm_dev_alg sec_dev_algs[] = { {
@@ -322,7 +357,7 @@ static int sec_pf_q_num_set(const char *val, const struct kernel_param *kp)
{
pf_q_num_flag = true;
- return q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_SEC_PF);
+ return hisi_qm_q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_SEC_PF);
}
static const struct kernel_param_ops sec_pf_q_num_ops = {
@@ -441,6 +476,81 @@ static void sec_set_endian(struct hisi_qm *qm)
writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG);
}
+static int sec_wait_sva_ready(struct hisi_qm *qm, __u32 offset, __u32 mask)
+{
+ u32 val, try_times = 0;
+ u8 count = 0;
+
+ /*
+ * Read the register value every 10-20us. If the value is 0 for three
+ * consecutive times, the SVA module is ready.
+ */
+ do {
+ val = readl(qm->io_base + offset);
+ if (val & mask)
+ count = 0;
+ else if (++count == SEC_READ_SVA_STATUS_TIMES)
+ break;
+
+ usleep_range(SEC_WAIT_US_MIN, SEC_WAIT_US_MAX);
+ } while (++try_times < SEC_WAIT_SVA_READY);
+
+ if (try_times == SEC_WAIT_SVA_READY) {
+ pci_err(qm->pdev, "failed to wait sva prefetch ready\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void sec_close_sva_prefetch(struct hisi_qm *qm)
+{
+ u32 val;
+ int ret;
+
+ if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
+ return;
+
+ val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG);
+ val |= SEC_PREFETCH_DISABLE;
+ writel(val, qm->io_base + SEC_PREFETCH_CFG);
+
+ ret = readl_relaxed_poll_timeout(qm->io_base + SEC_SVA_TRANS,
+ val, !(val & SEC_SVA_DISABLE_READY),
+ SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US);
+ if (ret)
+ pci_err(qm->pdev, "failed to close sva prefetch\n");
+
+ (void)sec_wait_sva_ready(qm, SEC_SVA_PREFETCH_INFO, SEC_SVA_STALL_NUM);
+}
+
+static void sec_open_sva_prefetch(struct hisi_qm *qm)
+{
+ u32 val;
+ int ret;
+
+ if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
+ return;
+
+ /* Enable prefetch */
+ val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG);
+ val &= SEC_PREFETCH_ENABLE;
+ writel(val, qm->io_base + SEC_PREFETCH_CFG);
+
+ ret = readl_relaxed_poll_timeout(qm->io_base + SEC_PREFETCH_CFG,
+ val, !(val & SEC_PREFETCH_DISABLE),
+ SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US);
+ if (ret) {
+ pci_err(qm->pdev, "failed to open sva prefetch\n");
+ sec_close_sva_prefetch(qm);
+ return;
+ }
+
+ ret = sec_wait_sva_ready(qm, SEC_SVA_TRANS, SEC_SVA_PREFETCH_NUM);
+ if (ret)
+ sec_close_sva_prefetch(qm);
+}
+
static void sec_engine_sva_config(struct hisi_qm *qm)
{
u32 reg;
@@ -474,45 +584,7 @@ static void sec_engine_sva_config(struct hisi_qm *qm)
writel_relaxed(reg, qm->io_base +
SEC_INTERFACE_USER_CTRL1_REG);
}
-}
-
-static void sec_open_sva_prefetch(struct hisi_qm *qm)
-{
- u32 val;
- int ret;
-
- if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
- return;
-
- /* Enable prefetch */
- val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG);
- val &= SEC_PREFETCH_ENABLE;
- writel(val, qm->io_base + SEC_PREFETCH_CFG);
-
- ret = readl_relaxed_poll_timeout(qm->io_base + SEC_PREFETCH_CFG,
- val, !(val & SEC_PREFETCH_DISABLE),
- SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US);
- if (ret)
- pci_err(qm->pdev, "failed to open sva prefetch\n");
-}
-
-static void sec_close_sva_prefetch(struct hisi_qm *qm)
-{
- u32 val;
- int ret;
-
- if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
- return;
-
- val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG);
- val |= SEC_PREFETCH_DISABLE;
- writel(val, qm->io_base + SEC_PREFETCH_CFG);
-
- ret = readl_relaxed_poll_timeout(qm->io_base + SEC_SVA_TRANS,
- val, !(val & SEC_SVA_DISABLE_READY),
- SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US);
- if (ret)
- pci_err(qm->pdev, "failed to close sva prefetch\n");
+ sec_open_sva_prefetch(qm);
}
static void sec_enable_clock_gate(struct hisi_qm *qm)
@@ -643,8 +715,7 @@ static void sec_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
val1 = readl(qm->io_base + SEC_CONTROL_REG);
if (enable) {
val1 |= SEC_AXI_SHUTDOWN_ENABLE;
- val2 = hisi_qm_get_hw_info(qm, sec_basic_info,
- SEC_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+ val2 = qm->err_info.dev_err.shutdown_mask;
} else {
val1 &= SEC_AXI_SHUTDOWN_DISABLE;
val2 = 0x0;
@@ -658,7 +729,8 @@ static void sec_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
static void sec_hw_error_enable(struct hisi_qm *qm)
{
- u32 ce, nfe;
+ struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err;
+ u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe;
if (qm->ver == QM_HW_V1) {
writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK);
@@ -666,22 +738,19 @@ static void sec_hw_error_enable(struct hisi_qm *qm)
return;
}
- ce = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_CE_MASK_CAP, qm->cap_ver);
- nfe = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_NFE_MASK_CAP, qm->cap_ver);
-
/* clear SEC hw error source if having */
- writel(ce | nfe | SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_CORE_INT_SOURCE);
+ writel(err_mask, qm->io_base + SEC_CORE_INT_SOURCE);
/* enable RAS int */
- writel(ce, qm->io_base + SEC_RAS_CE_REG);
- writel(SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_RAS_FE_REG);
- writel(nfe, qm->io_base + SEC_RAS_NFE_REG);
+ writel(dev_err->ce, qm->io_base + SEC_RAS_CE_REG);
+ writel(dev_err->fe, qm->io_base + SEC_RAS_FE_REG);
+ writel(dev_err->nfe, qm->io_base + SEC_RAS_NFE_REG);
/* enable SEC block master OOO when nfe occurs on Kunpeng930 */
sec_master_ooo_ctrl(qm, true);
/* enable SEC hw error interrupts */
- writel(ce | nfe | SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_CORE_INT_MASK);
+ writel(err_mask, qm->io_base + SEC_CORE_INT_MASK);
}
static void sec_hw_error_disable(struct hisi_qm *qm)
@@ -838,6 +907,26 @@ static int sec_regs_show(struct seq_file *s, void *unused)
DEFINE_SHOW_ATTRIBUTE(sec_regs);
+static int sec_cap_regs_show(struct seq_file *s, void *unused)
+{
+ struct hisi_qm *qm = s->private;
+ u32 i, size;
+
+ size = qm->cap_tables.qm_cap_size;
+ for (i = 0; i < size; i++)
+ seq_printf(s, "%s= 0x%08x\n", qm->cap_tables.qm_cap_table[i].name,
+ qm->cap_tables.qm_cap_table[i].cap_val);
+
+ size = qm->cap_tables.dev_cap_size;
+ for (i = 0; i < size; i++)
+ seq_printf(s, "%s= 0x%08x\n", qm->cap_tables.dev_cap_table[i].name,
+ qm->cap_tables.dev_cap_table[i].cap_val);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(sec_cap_regs);
+
static int sec_core_debug_init(struct hisi_qm *qm)
{
struct dfx_diff_registers *sec_regs = qm->debug.acc_diff_regs;
@@ -872,6 +961,9 @@ static int sec_core_debug_init(struct hisi_qm *qm)
tmp_d, data, &sec_atomic64_ops);
}
+ debugfs_create_file("cap_regs", CAP_FILE_PERMISSION,
+ qm->debug.debug_root, qm, &sec_cap_regs_fops);
+
return 0;
}
@@ -1010,11 +1102,23 @@ static u32 sec_get_hw_err_status(struct hisi_qm *qm)
static void sec_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
{
- u32 nfe;
-
writel(err_sts, qm->io_base + SEC_CORE_INT_SOURCE);
- nfe = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_NFE_MASK_CAP, qm->cap_ver);
- writel(nfe, qm->io_base + SEC_RAS_NFE_REG);
+}
+
+static void sec_disable_error_report(struct hisi_qm *qm, u32 err_type)
+{
+ u32 nfe_mask = qm->err_info.dev_err.nfe;
+
+ writel(nfe_mask & (~err_type), qm->io_base + SEC_RAS_NFE_REG);
+}
+
+static void sec_enable_error_report(struct hisi_qm *qm)
+{
+ u32 nfe_mask = qm->err_info.dev_err.nfe;
+ u32 ce_mask = qm->err_info.dev_err.ce;
+
+ writel(nfe_mask, qm->io_base + SEC_RAS_NFE_REG);
+ writel(ce_mask, qm->io_base + SEC_RAS_CE_REG);
}
static void sec_open_axi_master_ooo(struct hisi_qm *qm)
@@ -1026,22 +1130,90 @@ static void sec_open_axi_master_ooo(struct hisi_qm *qm)
writel(val | SEC_AXI_SHUTDOWN_ENABLE, qm->io_base + SEC_CONTROL_REG);
}
+static enum acc_err_result sec_get_err_result(struct hisi_qm *qm)
+{
+ u32 err_status;
+
+ err_status = sec_get_hw_err_status(qm);
+ if (err_status) {
+ if (err_status & qm->err_info.dev_err.ecc_2bits_mask)
+ qm->err_status.is_dev_ecc_mbit = true;
+ sec_log_hw_error(qm, err_status);
+
+ if (err_status & qm->err_info.dev_err.reset_mask) {
+ /* Disable the same error reporting until device is recovered. */
+ sec_disable_error_report(qm, err_status);
+ return ACC_ERR_NEED_RESET;
+ }
+ sec_clear_hw_err_status(qm, err_status);
+ /* Avoid firmware disable error report, re-enable. */
+ sec_enable_error_report(qm);
+ }
+
+ return ACC_ERR_RECOVERED;
+}
+
+static bool sec_dev_is_abnormal(struct hisi_qm *qm)
+{
+ u32 err_status;
+
+ err_status = sec_get_hw_err_status(qm);
+ if (err_status & qm->err_info.dev_err.shutdown_mask)
+ return true;
+
+ return false;
+}
+
+static void sec_disable_axi_error(struct hisi_qm *qm)
+{
+ struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err;
+ u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe;
+
+ writel(err_mask & ~SEC_AXI_ERROR_MASK, qm->io_base + SEC_CORE_INT_MASK);
+
+ if (qm->ver > QM_HW_V2)
+ writel(dev_err->shutdown_mask & (~SEC_AXI_ERROR_MASK),
+ qm->io_base + SEC_OOO_SHUTDOWN_SEL);
+}
+
+static void sec_enable_axi_error(struct hisi_qm *qm)
+{
+ struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err;
+ u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe;
+
+ /* clear axi error source */
+ writel(SEC_AXI_ERROR_MASK, qm->io_base + SEC_CORE_INT_SOURCE);
+
+ writel(err_mask, qm->io_base + SEC_CORE_INT_MASK);
+
+ if (qm->ver > QM_HW_V2)
+ writel(dev_err->shutdown_mask, qm->io_base + SEC_OOO_SHUTDOWN_SEL);
+}
+
static void sec_err_info_init(struct hisi_qm *qm)
{
struct hisi_qm_err_info *err_info = &qm->err_info;
+ struct hisi_qm_err_mask *qm_err = &err_info->qm_err;
+ struct hisi_qm_err_mask *dev_err = &err_info->dev_err;
+
+ qm_err->fe = SEC_RAS_FE_ENB_MSK;
+ qm_err->ce = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_QM_CE_MASK_CAP, qm->cap_ver);
+ qm_err->nfe = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_QM_NFE_MASK_CAP, qm->cap_ver);
+ qm_err->shutdown_mask = hisi_qm_get_hw_info(qm, sec_basic_info,
+ SEC_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+ qm_err->reset_mask = hisi_qm_get_hw_info(qm, sec_basic_info,
+ SEC_QM_RESET_MASK_CAP, qm->cap_ver);
+ qm_err->ecc_2bits_mask = QM_ECC_MBIT;
+
+ dev_err->fe = SEC_RAS_FE_ENB_MSK;
+ dev_err->ce = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_CE_MASK_CAP, qm->cap_ver);
+ dev_err->nfe = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_NFE_MASK_CAP, qm->cap_ver);
+ dev_err->shutdown_mask = hisi_qm_get_hw_info(qm, sec_basic_info,
+ SEC_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+ dev_err->reset_mask = hisi_qm_get_hw_info(qm, sec_basic_info,
+ SEC_RESET_MASK_CAP, qm->cap_ver);
+ dev_err->ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC;
- err_info->fe = SEC_RAS_FE_ENB_MSK;
- err_info->ce = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_QM_CE_MASK_CAP, qm->cap_ver);
- err_info->nfe = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_QM_NFE_MASK_CAP, qm->cap_ver);
- err_info->ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC;
- err_info->qm_shutdown_mask = hisi_qm_get_hw_info(qm, sec_basic_info,
- SEC_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
- err_info->dev_shutdown_mask = hisi_qm_get_hw_info(qm, sec_basic_info,
- SEC_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
- err_info->qm_reset_mask = hisi_qm_get_hw_info(qm, sec_basic_info,
- SEC_QM_RESET_MASK_CAP, qm->cap_ver);
- err_info->dev_reset_mask = hisi_qm_get_hw_info(qm, sec_basic_info,
- SEC_RESET_MASK_CAP, qm->cap_ver);
err_info->msi_wr_port = BIT(0);
err_info->acpi_rst = "SRST";
}
@@ -1052,12 +1224,15 @@ static const struct hisi_qm_err_ini sec_err_ini = {
.hw_err_disable = sec_hw_error_disable,
.get_dev_hw_err_status = sec_get_hw_err_status,
.clear_dev_hw_err_status = sec_clear_hw_err_status,
- .log_dev_hw_err = sec_log_hw_error,
.open_axi_master_ooo = sec_open_axi_master_ooo,
.open_sva_prefetch = sec_open_sva_prefetch,
.close_sva_prefetch = sec_close_sva_prefetch,
.show_last_dfx_regs = sec_show_last_dfx_regs,
.err_info_init = sec_err_info_init,
+ .get_err_result = sec_get_err_result,
+ .dev_is_abnormal = sec_dev_is_abnormal,
+ .disable_axi_error = sec_disable_axi_error,
+ .enable_axi_error = sec_enable_axi_error,
};
static int sec_pf_probe_init(struct sec_dev *sec)
@@ -1069,7 +1244,6 @@ static int sec_pf_probe_init(struct sec_dev *sec)
if (ret)
return ret;
- sec_open_sva_prefetch(qm);
hisi_qm_dev_err_init(qm);
sec_debug_regs_clear(qm);
ret = sec_show_last_regs_init(qm);
@@ -1085,18 +1259,20 @@ static int sec_pre_store_cap_reg(struct hisi_qm *qm)
struct pci_dev *pdev = qm->pdev;
size_t i, size;
- size = ARRAY_SIZE(sec_pre_store_caps);
- sec_cap = devm_kzalloc(&pdev->dev, sizeof(*sec_cap) * size, GFP_KERNEL);
+ size = ARRAY_SIZE(sec_cap_query_info);
+ sec_cap = devm_kcalloc(&pdev->dev, size, sizeof(*sec_cap), GFP_KERNEL);
if (!sec_cap)
return -ENOMEM;
for (i = 0; i < size; i++) {
- sec_cap[i].type = sec_pre_store_caps[i];
- sec_cap[i].cap_val = hisi_qm_get_hw_info(qm, sec_basic_info,
- sec_pre_store_caps[i], qm->cap_ver);
+ sec_cap[i].type = sec_cap_query_info[i].type;
+ sec_cap[i].name = sec_cap_query_info[i].name;
+ sec_cap[i].cap_val = hisi_qm_get_cap_value(qm, sec_cap_query_info,
+ i, qm->cap_ver);
}
qm->cap_tables.dev_cap_table = sec_cap;
+ qm->cap_tables.dev_cap_size = size;
return 0;
}
@@ -1107,7 +1283,6 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
int ret;
qm->pdev = pdev;
- qm->ver = pdev->revision;
qm->mode = uacce_mode;
qm->sqe_size = SEC_SQE_SIZE;
qm->dev_name = sec_name;
@@ -1146,8 +1321,7 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
hisi_qm_uninit(qm);
return ret;
}
-
- alg_msk = sec_get_alg_bitmap(qm, SEC_DEV_ALG_BITMAP_HIGH_IDX, SEC_DEV_ALG_BITMAP_LOW_IDX);
+ alg_msk = sec_get_alg_bitmap(qm, SEC_ALG_BITMAP_HIGH, SEC_ALG_BITMAP_LOW);
ret = hisi_qm_set_algs(qm, alg_msk, sec_dev_algs, ARRAY_SIZE(sec_dev_algs));
if (ret) {
pci_err(qm->pdev, "Failed to set sec algs!\n");
diff --git a/drivers/crypto/hisilicon/sgl.c b/drivers/crypto/hisilicon/sgl.c
index c974f95cd126..7a9ef2a9972a 100644
--- a/drivers/crypto/hisilicon/sgl.c
+++ b/drivers/crypto/hisilicon/sgl.c
@@ -210,15 +210,15 @@ static void clear_hw_sgl_sge(struct hisi_acc_hw_sgl *hw_sgl)
* @pool: Pool which hw sgl memory will be allocated in.
* @index: Index of hisi_acc_hw_sgl in pool.
* @hw_sgl_dma: The dma address of allocated hw sgl.
+ * @dir: DMA direction.
*
* This function builds hw sgl according input sgl, user can use hw_sgl_dma
* as src/dst in its BD. Only support single hw sgl currently.
*/
struct hisi_acc_hw_sgl *
-hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
- struct scatterlist *sgl,
- struct hisi_acc_sgl_pool *pool,
- u32 index, dma_addr_t *hw_sgl_dma)
+hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev, struct scatterlist *sgl,
+ struct hisi_acc_sgl_pool *pool, u32 index,
+ dma_addr_t *hw_sgl_dma, enum dma_data_direction dir)
{
struct hisi_acc_hw_sgl *curr_hw_sgl;
unsigned int i, sg_n_mapped;
@@ -232,7 +232,7 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
sg_n = sg_nents(sgl);
- sg_n_mapped = dma_map_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL);
+ sg_n_mapped = dma_map_sg(dev, sgl, sg_n, dir);
if (!sg_n_mapped) {
dev_err(dev, "DMA mapping for SG error!\n");
return ERR_PTR(-EINVAL);
@@ -276,16 +276,17 @@ EXPORT_SYMBOL_GPL(hisi_acc_sg_buf_map_to_hw_sgl);
* @dev: The device which hw sgl belongs to.
* @sgl: Related scatterlist.
* @hw_sgl: Virtual address of hw sgl.
+ * @dir: DMA direction.
*
* This function unmaps allocated hw sgl.
*/
void hisi_acc_sg_buf_unmap(struct device *dev, struct scatterlist *sgl,
- struct hisi_acc_hw_sgl *hw_sgl)
+ struct hisi_acc_hw_sgl *hw_sgl, enum dma_data_direction dir)
{
if (!dev || !sgl || !hw_sgl)
return;
- dma_unmap_sg(dev, sgl, sg_nents(sgl), DMA_BIDIRECTIONAL);
+ dma_unmap_sg(dev, sgl, sg_nents(sgl), dir);
clear_hw_sgl_sge(hw_sgl);
hw_sgl->entry_sum_in_chain = 0;
hw_sgl->entry_sum_in_sgl = 0;
diff --git a/drivers/crypto/hisilicon/trng/trng.c b/drivers/crypto/hisilicon/trng/trng.c
index 66c551ecdee8..ac74df4a9471 100644
--- a/drivers/crypto/hisilicon/trng/trng.c
+++ b/drivers/crypto/hisilicon/trng/trng.c
@@ -324,7 +324,7 @@ MODULE_DEVICE_TABLE(acpi, hisi_trng_acpi_match);
static struct platform_driver hisi_trng_driver = {
.probe = hisi_trng_probe,
- .remove_new = hisi_trng_remove,
+ .remove = hisi_trng_remove,
.driver = {
.name = "hisi-trng-v2",
.acpi_match_table = ACPI_PTR(hisi_trng_acpi_match),
diff --git a/drivers/crypto/hisilicon/zip/Makefile b/drivers/crypto/hisilicon/zip/Makefile
index a936f099ee22..13de020b77d6 100644
--- a/drivers/crypto/hisilicon/zip/Makefile
+++ b/drivers/crypto/hisilicon/zip/Makefile
@@ -1,2 +1,2 @@
obj-$(CONFIG_CRYPTO_DEV_HISI_ZIP) += hisi_zip.o
-hisi_zip-objs = zip_main.o zip_crypto.o
+hisi_zip-objs = zip_main.o zip_crypto.o dae_main.o
diff --git a/drivers/crypto/hisilicon/zip/dae_main.c b/drivers/crypto/hisilicon/zip/dae_main.c
new file mode 100644
index 000000000000..68aebd02fc84
--- /dev/null
+++ b/drivers/crypto/hisilicon/zip/dae_main.c
@@ -0,0 +1,277 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 HiSilicon Limited. */
+
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/uacce.h>
+#include "zip.h"
+
+/* memory */
+#define DAE_MEM_START_OFFSET 0x331040
+#define DAE_MEM_DONE_OFFSET 0x331044
+#define DAE_MEM_START_MASK 0x1
+#define DAE_MEM_DONE_MASK 0x1
+#define DAE_REG_RD_INTVRL_US 10
+#define DAE_REG_RD_TMOUT_US USEC_PER_SEC
+
+#define DAE_ALG_NAME "hashagg"
+#define DAE_V5_ALG_NAME "hashagg\nudma\nhashjoin\ngather"
+
+/* error */
+#define DAE_AXI_CFG_OFFSET 0x331000
+#define DAE_AXI_SHUTDOWN_MASK (BIT(0) | BIT(5))
+#define DAE_ERR_SOURCE_OFFSET 0x331C84
+#define DAE_ERR_STATUS_OFFSET 0x331C88
+#define DAE_ERR_CE_OFFSET 0x331CA0
+#define DAE_ERR_CE_MASK BIT(3)
+#define DAE_ERR_NFE_OFFSET 0x331CA4
+#define DAE_ERR_NFE_MASK 0x17
+#define DAE_ERR_FE_OFFSET 0x331CA8
+#define DAE_ERR_FE_MASK 0
+#define DAE_ECC_MBIT_MASK BIT(2)
+#define DAE_ECC_INFO_OFFSET 0x33400C
+#define DAE_ERR_SHUTDOWN_OFFSET 0x331CAC
+#define DAE_ERR_SHUTDOWN_MASK 0x17
+#define DAE_ERR_ENABLE_OFFSET 0x331C80
+#define DAE_ERR_ENABLE_MASK (DAE_ERR_FE_MASK | DAE_ERR_NFE_MASK | DAE_ERR_CE_MASK)
+#define DAE_AM_CTRL_GLOBAL_OFFSET 0x330000
+#define DAE_AM_RETURN_OFFSET 0x330150
+#define DAE_AM_RETURN_MASK 0x3
+#define DAE_AXI_CFG_OFFSET 0x331000
+#define DAE_AXI_SHUTDOWN_EN_MASK (BIT(0) | BIT(5))
+
+struct hisi_dae_hw_error {
+ u32 int_msk;
+ const char *msg;
+};
+
+static const struct hisi_dae_hw_error dae_hw_error[] = {
+ { .int_msk = BIT(0), .msg = "dae_axi_bus_err" },
+ { .int_msk = BIT(1), .msg = "dae_axi_poison_err" },
+ { .int_msk = BIT(2), .msg = "dae_ecc_2bit_err" },
+ { .int_msk = BIT(3), .msg = "dae_ecc_1bit_err" },
+ { .int_msk = BIT(4), .msg = "dae_fsm_hbeat_err" },
+};
+
+static inline bool dae_is_support(struct hisi_qm *qm)
+{
+ if (test_bit(QM_SUPPORT_DAE, &qm->caps))
+ return true;
+
+ return false;
+}
+
+int hisi_dae_set_user_domain(struct hisi_qm *qm)
+{
+ u32 val;
+ int ret;
+
+ if (!dae_is_support(qm))
+ return 0;
+
+ val = readl(qm->io_base + DAE_MEM_START_OFFSET);
+ val |= DAE_MEM_START_MASK;
+ writel(val, qm->io_base + DAE_MEM_START_OFFSET);
+ ret = readl_relaxed_poll_timeout(qm->io_base + DAE_MEM_DONE_OFFSET, val,
+ val & DAE_MEM_DONE_MASK,
+ DAE_REG_RD_INTVRL_US, DAE_REG_RD_TMOUT_US);
+ if (ret)
+ pci_err(qm->pdev, "failed to init dae memory!\n");
+
+ return ret;
+}
+
+int hisi_dae_set_alg(struct hisi_qm *qm)
+{
+ const char *alg_name;
+ size_t len;
+
+ if (!dae_is_support(qm))
+ return 0;
+
+ if (!qm->uacce)
+ return 0;
+
+ if (qm->ver >= QM_HW_V5)
+ alg_name = DAE_V5_ALG_NAME;
+ else
+ alg_name = DAE_ALG_NAME;
+
+ len = strlen(qm->uacce->algs);
+ /* A line break may be required */
+ if (len + strlen(alg_name) + 1 >= QM_DEV_ALG_MAX_LEN) {
+ pci_err(qm->pdev, "algorithm name is too long!\n");
+ return -EINVAL;
+ }
+
+ if (len)
+ strcat((char *)qm->uacce->algs, "\n");
+
+ strcat((char *)qm->uacce->algs, alg_name);
+
+ return 0;
+}
+
+static void hisi_dae_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
+{
+ u32 axi_val, err_val;
+
+ axi_val = readl(qm->io_base + DAE_AXI_CFG_OFFSET);
+ if (enable) {
+ axi_val |= DAE_AXI_SHUTDOWN_MASK;
+ err_val = DAE_ERR_SHUTDOWN_MASK;
+ } else {
+ axi_val &= ~DAE_AXI_SHUTDOWN_MASK;
+ err_val = 0;
+ }
+
+ writel(axi_val, qm->io_base + DAE_AXI_CFG_OFFSET);
+ writel(err_val, qm->io_base + DAE_ERR_SHUTDOWN_OFFSET);
+}
+
+void hisi_dae_hw_error_enable(struct hisi_qm *qm)
+{
+ if (!dae_is_support(qm))
+ return;
+
+ /* clear dae hw error source if having */
+ writel(DAE_ERR_ENABLE_MASK, qm->io_base + DAE_ERR_SOURCE_OFFSET);
+
+ /* configure error type */
+ writel(DAE_ERR_CE_MASK, qm->io_base + DAE_ERR_CE_OFFSET);
+ writel(DAE_ERR_NFE_MASK, qm->io_base + DAE_ERR_NFE_OFFSET);
+ writel(DAE_ERR_FE_MASK, qm->io_base + DAE_ERR_FE_OFFSET);
+
+ hisi_dae_master_ooo_ctrl(qm, true);
+
+ /* enable dae hw error interrupts */
+ writel(DAE_ERR_ENABLE_MASK, qm->io_base + DAE_ERR_ENABLE_OFFSET);
+}
+
+void hisi_dae_hw_error_disable(struct hisi_qm *qm)
+{
+ if (!dae_is_support(qm))
+ return;
+
+ writel(0, qm->io_base + DAE_ERR_ENABLE_OFFSET);
+ hisi_dae_master_ooo_ctrl(qm, false);
+}
+
+static u32 hisi_dae_get_hw_err_status(struct hisi_qm *qm)
+{
+ return readl(qm->io_base + DAE_ERR_STATUS_OFFSET);
+}
+
+static void hisi_dae_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
+{
+ if (!dae_is_support(qm))
+ return;
+
+ writel(err_sts, qm->io_base + DAE_ERR_SOURCE_OFFSET);
+}
+
+static void hisi_dae_disable_error_report(struct hisi_qm *qm, u32 err_type)
+{
+ writel(DAE_ERR_NFE_MASK & (~err_type), qm->io_base + DAE_ERR_NFE_OFFSET);
+}
+
+static void hisi_dae_enable_error_report(struct hisi_qm *qm)
+{
+ writel(DAE_ERR_CE_MASK, qm->io_base + DAE_ERR_CE_OFFSET);
+ writel(DAE_ERR_NFE_MASK, qm->io_base + DAE_ERR_NFE_OFFSET);
+}
+
+static void hisi_dae_log_hw_error(struct hisi_qm *qm, u32 err_type)
+{
+ const struct hisi_dae_hw_error *err = dae_hw_error;
+ struct device *dev = &qm->pdev->dev;
+ u32 ecc_info;
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(dae_hw_error); i++) {
+ err = &dae_hw_error[i];
+ if (!(err->int_msk & err_type))
+ continue;
+
+ dev_err(dev, "%s [error status=0x%x] found\n",
+ err->msg, err->int_msk);
+
+ if (err->int_msk & DAE_ECC_MBIT_MASK) {
+ ecc_info = readl(qm->io_base + DAE_ECC_INFO_OFFSET);
+ dev_err(dev, "dae multi ecc sram info 0x%x\n", ecc_info);
+ }
+ }
+}
+
+enum acc_err_result hisi_dae_get_err_result(struct hisi_qm *qm)
+{
+ u32 err_status;
+
+ if (!dae_is_support(qm))
+ return ACC_ERR_NONE;
+
+ err_status = hisi_dae_get_hw_err_status(qm);
+ if (!err_status)
+ return ACC_ERR_NONE;
+
+ hisi_dae_log_hw_error(qm, err_status);
+
+ if (err_status & DAE_ERR_NFE_MASK) {
+ /* Disable the same error reporting until device is recovered. */
+ hisi_dae_disable_error_report(qm, err_status);
+ return ACC_ERR_NEED_RESET;
+ }
+ hisi_dae_clear_hw_err_status(qm, err_status);
+ /* Avoid firmware disable error report, re-enable. */
+ hisi_dae_enable_error_report(qm);
+
+ return ACC_ERR_RECOVERED;
+}
+
+bool hisi_dae_dev_is_abnormal(struct hisi_qm *qm)
+{
+ u32 err_status;
+
+ if (!dae_is_support(qm))
+ return false;
+
+ err_status = hisi_dae_get_hw_err_status(qm);
+ if (err_status & DAE_ERR_NFE_MASK)
+ return true;
+
+ return false;
+}
+
+int hisi_dae_close_axi_master_ooo(struct hisi_qm *qm)
+{
+ u32 val;
+ int ret;
+
+ if (!dae_is_support(qm))
+ return 0;
+
+ val = readl(qm->io_base + DAE_AM_CTRL_GLOBAL_OFFSET);
+ val |= BIT(0);
+ writel(val, qm->io_base + DAE_AM_CTRL_GLOBAL_OFFSET);
+
+ ret = readl_relaxed_poll_timeout(qm->io_base + DAE_AM_RETURN_OFFSET,
+ val, (val == DAE_AM_RETURN_MASK),
+ DAE_REG_RD_INTVRL_US, DAE_REG_RD_TMOUT_US);
+ if (ret)
+ dev_err(&qm->pdev->dev, "failed to close dae axi ooo!\n");
+
+ return ret;
+}
+
+void hisi_dae_open_axi_master_ooo(struct hisi_qm *qm)
+{
+ u32 val;
+
+ if (!dae_is_support(qm))
+ return;
+
+ val = readl(qm->io_base + DAE_AXI_CFG_OFFSET);
+
+ writel(val & ~DAE_AXI_SHUTDOWN_EN_MASK, qm->io_base + DAE_AXI_CFG_OFFSET);
+ writel(val | DAE_AXI_SHUTDOWN_EN_MASK, qm->io_base + DAE_AXI_CFG_OFFSET);
+}
diff --git a/drivers/crypto/hisilicon/zip/zip.h b/drivers/crypto/hisilicon/zip/zip.h
index f2e6da3240ae..9fb2a9c01132 100644
--- a/drivers/crypto/hisilicon/zip/zip.h
+++ b/drivers/crypto/hisilicon/zip/zip.h
@@ -81,8 +81,34 @@ struct hisi_zip_sqe {
u32 rsvd1[4];
};
+enum zip_cap_table_type {
+ QM_RAS_NFE_TYPE,
+ QM_RAS_NFE_RESET,
+ QM_RAS_CE_TYPE,
+ ZIP_RAS_NFE_TYPE,
+ ZIP_RAS_NFE_RESET,
+ ZIP_RAS_CE_TYPE,
+ ZIP_CORE_INFO,
+ ZIP_CORE_EN,
+ ZIP_DRV_ALG_BITMAP_TB,
+ ZIP_ALG_BITMAP,
+ ZIP_CORE1_BITMAP,
+ ZIP_CORE2_BITMAP,
+ ZIP_CORE3_BITMAP,
+ ZIP_CORE4_BITMAP,
+ ZIP_CORE5_BITMAP,
+};
+
int zip_create_qps(struct hisi_qp **qps, int qp_num, int node);
int hisi_zip_register_to_crypto(struct hisi_qm *qm);
void hisi_zip_unregister_from_crypto(struct hisi_qm *qm);
bool hisi_zip_alg_support(struct hisi_qm *qm, u32 alg);
+int hisi_dae_set_user_domain(struct hisi_qm *qm);
+int hisi_dae_set_alg(struct hisi_qm *qm);
+void hisi_dae_hw_error_disable(struct hisi_qm *qm);
+void hisi_dae_hw_error_enable(struct hisi_qm *qm);
+void hisi_dae_open_axi_master_ooo(struct hisi_qm *qm);
+int hisi_dae_close_axi_master_ooo(struct hisi_qm *qm);
+bool hisi_dae_dev_is_abnormal(struct hisi_qm *qm);
+enum acc_err_result hisi_dae_get_err_result(struct hisi_qm *qm);
#endif
diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c
index 7327f8f29b01..b97513981a3b 100644
--- a/drivers/crypto/hisilicon/zip/zip_crypto.c
+++ b/drivers/crypto/hisilicon/zip/zip_crypto.c
@@ -224,7 +224,8 @@ static int hisi_zip_do_work(struct hisi_zip_qp_ctx *qp_ctx,
return -EINVAL;
req->hw_src = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->src, pool,
- req->req_id << 1, &req->dma_src);
+ req->req_id << 1, &req->dma_src,
+ DMA_TO_DEVICE);
if (IS_ERR(req->hw_src)) {
dev_err(dev, "failed to map the src buffer to hw sgl (%ld)!\n",
PTR_ERR(req->hw_src));
@@ -233,7 +234,7 @@ static int hisi_zip_do_work(struct hisi_zip_qp_ctx *qp_ctx,
req->hw_dst = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->dst, pool,
(req->req_id << 1) + 1,
- &req->dma_dst);
+ &req->dma_dst, DMA_FROM_DEVICE);
if (IS_ERR(req->hw_dst)) {
ret = PTR_ERR(req->hw_dst);
dev_err(dev, "failed to map the dst buffer to hw slg (%d)!\n",
@@ -258,9 +259,9 @@ static int hisi_zip_do_work(struct hisi_zip_qp_ctx *qp_ctx,
return -EINPROGRESS;
err_unmap_output:
- hisi_acc_sg_buf_unmap(dev, a_req->dst, req->hw_dst);
+ hisi_acc_sg_buf_unmap(dev, a_req->dst, req->hw_dst, DMA_FROM_DEVICE);
err_unmap_input:
- hisi_acc_sg_buf_unmap(dev, a_req->src, req->hw_src);
+ hisi_acc_sg_buf_unmap(dev, a_req->src, req->hw_src, DMA_TO_DEVICE);
return ret;
}
@@ -303,8 +304,8 @@ static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data)
err = -EIO;
}
- hisi_acc_sg_buf_unmap(dev, acomp_req->src, req->hw_src);
- hisi_acc_sg_buf_unmap(dev, acomp_req->dst, req->hw_dst);
+ hisi_acc_sg_buf_unmap(dev, acomp_req->dst, req->hw_dst, DMA_FROM_DEVICE);
+ hisi_acc_sg_buf_unmap(dev, acomp_req->src, req->hw_src, DMA_TO_DEVICE);
acomp_req->dlen = ops->get_dstlen(sqe);
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index d07e47b48be0..4fcbe6bada06 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -14,6 +14,7 @@
#include <linux/uacce.h>
#include "zip.h"
+#define CAP_FILE_PERMISSION 0444
#define PCI_DEVICE_ID_HUAWEI_ZIP_PF 0xa250
#define HZIP_QUEUE_NUM_V1 4096
@@ -64,6 +65,7 @@
#define HZIP_SRAM_ECC_ERR_NUM_SHIFT 16
#define HZIP_SRAM_ECC_ERR_ADDR_SHIFT 24
#define HZIP_CORE_INT_MASK_ALL GENMASK(12, 0)
+#define HZIP_AXI_ERROR_MASK (BIT(2) | BIT(3))
#define HZIP_SQE_SIZE 128
#define HZIP_PF_DEF_Q_NUM 64
#define HZIP_PF_DEF_Q_BASE 0
@@ -79,6 +81,7 @@
#define HZIP_ALG_GZIP_BIT GENMASK(3, 2)
#define HZIP_ALG_DEFLATE_BIT GENMASK(5, 4)
#define HZIP_ALG_LZ77_BIT GENMASK(7, 6)
+#define HZIP_ALG_LZ4_BIT GENMASK(9, 8)
#define HZIP_BUF_SIZE 22
#define HZIP_SQE_MASK_OFFSET 64
@@ -94,10 +97,16 @@
#define HZIP_PREFETCH_ENABLE (~(BIT(26) | BIT(17) | BIT(0)))
#define HZIP_SVA_PREFETCH_DISABLE BIT(26)
#define HZIP_SVA_DISABLE_READY (BIT(26) | BIT(30))
+#define HZIP_SVA_PREFETCH_NUM GENMASK(18, 16)
+#define HZIP_SVA_STALL_NUM GENMASK(15, 0)
#define HZIP_SHAPER_RATE_COMPRESS 750
#define HZIP_SHAPER_RATE_DECOMPRESS 140
-#define HZIP_DELAY_1_US 1
-#define HZIP_POLL_TIMEOUT_US 1000
+#define HZIP_DELAY_1_US 1
+#define HZIP_POLL_TIMEOUT_US 1000
+#define HZIP_WAIT_SVA_READY 500000
+#define HZIP_READ_SVA_STATUS_TIMES 3
+#define HZIP_WAIT_US_MIN 10
+#define HZIP_WAIT_US_MAX 20
/* clock gating */
#define HZIP_PEH_CFG_AUTO_GATE 0x3011A8
@@ -110,6 +119,9 @@
/* zip comp high performance */
#define HZIP_HIGH_PERF_OFFSET 0x301208
+#define HZIP_LIT_LEN_EN_OFFSET 0x301204
+#define HZIP_LIT_LEN_EN_EN BIT(4)
+
enum {
HZIP_HIGH_COMP_RATE,
HZIP_HIGH_COMP_PERF,
@@ -140,6 +152,12 @@ static const struct qm_dev_alg zip_dev_algs[] = { {
}, {
.alg_msk = HZIP_ALG_LZ77_BIT,
.alg = "lz77_zstd\n",
+ }, {
+ .alg_msk = HZIP_ALG_LZ77_BIT,
+ .alg = "lz77_only\n",
+ }, {
+ .alg_msk = HZIP_ALG_LZ4_BIT,
+ .alg = "lz4\n",
},
};
@@ -250,24 +268,22 @@ static struct hisi_qm_cap_info zip_basic_cap_info[] = {
{ZIP_CAP_MAX, 0x317c, 0, GENMASK(0, 0), 0x0, 0x0, 0x0}
};
-enum zip_pre_store_cap_idx {
- ZIP_CORE_NUM_CAP_IDX = 0x0,
- ZIP_CLUSTER_COMP_NUM_CAP_IDX,
- ZIP_CLUSTER_DECOMP_NUM_CAP_IDX,
- ZIP_DECOMP_ENABLE_BITMAP_IDX,
- ZIP_COMP_ENABLE_BITMAP_IDX,
- ZIP_DRV_ALG_BITMAP_IDX,
- ZIP_DEV_ALG_BITMAP_IDX,
-};
-
-static const u32 zip_pre_store_caps[] = {
- ZIP_CORE_NUM_CAP,
- ZIP_CLUSTER_COMP_NUM_CAP,
- ZIP_CLUSTER_DECOMP_NUM_CAP,
- ZIP_DECOMP_ENABLE_BITMAP,
- ZIP_COMP_ENABLE_BITMAP,
- ZIP_DRV_ALG_BITMAP,
- ZIP_DEV_ALG_BITMAP,
+static const struct hisi_qm_cap_query_info zip_cap_query_info[] = {
+ {QM_RAS_NFE_TYPE, "QM_RAS_NFE_TYPE ", 0x3124, 0x0, 0x1C57, 0x7C77},
+ {QM_RAS_NFE_RESET, "QM_RAS_NFE_RESET ", 0x3128, 0x0, 0xC57, 0x6C77},
+ {QM_RAS_CE_TYPE, "QM_RAS_CE_TYPE ", 0x312C, 0x0, 0x8, 0x8},
+ {ZIP_RAS_NFE_TYPE, "ZIP_RAS_NFE_TYPE ", 0x3130, 0x0, 0x7FE, 0x1FFE},
+ {ZIP_RAS_NFE_RESET, "ZIP_RAS_NFE_RESET ", 0x3134, 0x0, 0x7FE, 0x7FE},
+ {ZIP_RAS_CE_TYPE, "ZIP_RAS_CE_TYPE ", 0x3138, 0x0, 0x1, 0x1},
+ {ZIP_CORE_INFO, "ZIP_CORE_INFO ", 0x313C, 0x12080206, 0x12080206, 0x12050203},
+ {ZIP_CORE_EN, "ZIP_CORE_EN ", 0x3140, 0xFC0003, 0xFC0003, 0x1C0003},
+ {ZIP_DRV_ALG_BITMAP_TB, "ZIP_DRV_ALG_BITMAP ", 0x3144, 0x0, 0x0, 0x30},
+ {ZIP_ALG_BITMAP, "ZIP_ALG_BITMAP ", 0x3148, 0xF, 0xF, 0x3F},
+ {ZIP_CORE1_BITMAP, "ZIP_CORE1_BITMAP ", 0x314C, 0x5, 0x5, 0xD5},
+ {ZIP_CORE2_BITMAP, "ZIP_CORE2_BITMAP ", 0x3150, 0x5, 0x5, 0xD5},
+ {ZIP_CORE3_BITMAP, "ZIP_CORE3_BITMAP ", 0x3154, 0xA, 0xA, 0x2A},
+ {ZIP_CORE4_BITMAP, "ZIP_CORE4_BITMAP ", 0x3158, 0xA, 0xA, 0x2A},
+ {ZIP_CORE5_BITMAP, "ZIP_CORE5_BITMAP ", 0x315C, 0xA, 0xA, 0x2A},
};
static const struct debugfs_reg32 hzip_dfx_regs[] = {
@@ -402,7 +418,7 @@ static int pf_q_num_set(const char *val, const struct kernel_param *kp)
{
pf_q_num_flag = true;
- return q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_ZIP_PF);
+ return hisi_qm_q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_ZIP_PF);
}
static const struct kernel_param_ops pf_q_num_ops = {
@@ -442,17 +458,30 @@ bool hisi_zip_alg_support(struct hisi_qm *qm, u32 alg)
{
u32 cap_val;
- cap_val = qm->cap_tables.dev_cap_table[ZIP_DRV_ALG_BITMAP_IDX].cap_val;
+ cap_val = qm->cap_tables.dev_cap_table[ZIP_DRV_ALG_BITMAP_TB].cap_val;
if ((alg & cap_val) == alg)
return true;
return false;
}
-static int hisi_zip_set_high_perf(struct hisi_qm *qm)
+static void hisi_zip_literal_set(struct hisi_qm *qm)
+{
+ u32 val;
+
+ if (qm->ver < QM_HW_V3)
+ return;
+
+ val = readl_relaxed(qm->io_base + HZIP_LIT_LEN_EN_OFFSET);
+ val &= ~HZIP_LIT_LEN_EN_EN;
+
+ /* enable literal length in stream mode compression */
+ writel(val, qm->io_base + HZIP_LIT_LEN_EN_OFFSET);
+}
+
+static void hisi_zip_set_high_perf(struct hisi_qm *qm)
{
u32 val;
- int ret;
val = readl_relaxed(qm->io_base + HZIP_HIGH_PERF_OFFSET);
if (perf_mode == HZIP_HIGH_COMP_PERF)
@@ -462,16 +491,36 @@ static int hisi_zip_set_high_perf(struct hisi_qm *qm)
/* Set perf mode */
writel(val, qm->io_base + HZIP_HIGH_PERF_OFFSET);
- ret = readl_relaxed_poll_timeout(qm->io_base + HZIP_HIGH_PERF_OFFSET,
- val, val == perf_mode, HZIP_DELAY_1_US,
- HZIP_POLL_TIMEOUT_US);
- if (ret)
- pci_err(qm->pdev, "failed to set perf mode\n");
+}
- return ret;
+static int hisi_zip_wait_sva_ready(struct hisi_qm *qm, __u32 offset, __u32 mask)
+{
+ u32 val, try_times = 0;
+ u8 count = 0;
+
+ /*
+ * Read the register value every 10-20us. If the value is 0 for three
+ * consecutive times, the SVA module is ready.
+ */
+ do {
+ val = readl(qm->io_base + offset);
+ if (val & mask)
+ count = 0;
+ else if (++count == HZIP_READ_SVA_STATUS_TIMES)
+ break;
+
+ usleep_range(HZIP_WAIT_US_MIN, HZIP_WAIT_US_MAX);
+ } while (++try_times < HZIP_WAIT_SVA_READY);
+
+ if (try_times == HZIP_WAIT_SVA_READY) {
+ pci_err(qm->pdev, "failed to wait sva prefetch ready\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
}
-static void hisi_zip_open_sva_prefetch(struct hisi_qm *qm)
+static void hisi_zip_close_sva_prefetch(struct hisi_qm *qm)
{
u32 val;
int ret;
@@ -479,19 +528,20 @@ static void hisi_zip_open_sva_prefetch(struct hisi_qm *qm)
if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
return;
- /* Enable prefetch */
val = readl_relaxed(qm->io_base + HZIP_PREFETCH_CFG);
- val &= HZIP_PREFETCH_ENABLE;
+ val |= HZIP_SVA_PREFETCH_DISABLE;
writel(val, qm->io_base + HZIP_PREFETCH_CFG);
- ret = readl_relaxed_poll_timeout(qm->io_base + HZIP_PREFETCH_CFG,
- val, !(val & HZIP_SVA_PREFETCH_DISABLE),
+ ret = readl_relaxed_poll_timeout(qm->io_base + HZIP_SVA_TRANS,
+ val, !(val & HZIP_SVA_DISABLE_READY),
HZIP_DELAY_1_US, HZIP_POLL_TIMEOUT_US);
if (ret)
- pci_err(qm->pdev, "failed to open sva prefetch\n");
+ pci_err(qm->pdev, "failed to close sva prefetch\n");
+
+ (void)hisi_zip_wait_sva_ready(qm, HZIP_SVA_TRANS, HZIP_SVA_STALL_NUM);
}
-static void hisi_zip_close_sva_prefetch(struct hisi_qm *qm)
+static void hisi_zip_open_sva_prefetch(struct hisi_qm *qm)
{
u32 val;
int ret;
@@ -499,15 +549,23 @@ static void hisi_zip_close_sva_prefetch(struct hisi_qm *qm)
if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
return;
+ /* Enable prefetch */
val = readl_relaxed(qm->io_base + HZIP_PREFETCH_CFG);
- val |= HZIP_SVA_PREFETCH_DISABLE;
+ val &= HZIP_PREFETCH_ENABLE;
writel(val, qm->io_base + HZIP_PREFETCH_CFG);
- ret = readl_relaxed_poll_timeout(qm->io_base + HZIP_SVA_TRANS,
- val, !(val & HZIP_SVA_DISABLE_READY),
+ ret = readl_relaxed_poll_timeout(qm->io_base + HZIP_PREFETCH_CFG,
+ val, !(val & HZIP_SVA_PREFETCH_DISABLE),
HZIP_DELAY_1_US, HZIP_POLL_TIMEOUT_US);
+ if (ret) {
+ pci_err(qm->pdev, "failed to open sva prefetch\n");
+ hisi_zip_close_sva_prefetch(qm);
+ return;
+ }
+
+ ret = hisi_zip_wait_sva_ready(qm, HZIP_SVA_TRANS, HZIP_SVA_PREFETCH_NUM);
if (ret)
- pci_err(qm->pdev, "failed to close sva prefetch\n");
+ hisi_zip_close_sva_prefetch(qm);
}
static void hisi_zip_enable_clock_gate(struct hisi_qm *qm)
@@ -530,6 +588,8 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
{
void __iomem *base = qm->io_base;
u32 dcomp_bm, comp_bm;
+ u32 zip_core_en;
+ int ret;
/* qm user domain */
writel(AXUSER_BASE, base + QM_ARUSER_M_CFG_1);
@@ -565,10 +625,15 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
writel(AXUSER_BASE, base + HZIP_DATA_WUSER_32_63);
writel(AXUSER_BASE, base + HZIP_SGL_RUSER_32_63);
}
+ hisi_zip_open_sva_prefetch(qm);
/* let's open all compression/decompression cores */
- dcomp_bm = qm->cap_tables.dev_cap_table[ZIP_DECOMP_ENABLE_BITMAP_IDX].cap_val;
- comp_bm = qm->cap_tables.dev_cap_table[ZIP_COMP_ENABLE_BITMAP_IDX].cap_val;
+
+ zip_core_en = qm->cap_tables.dev_cap_table[ZIP_CORE_EN].cap_val;
+ dcomp_bm = (zip_core_en >> zip_basic_cap_info[ZIP_DECOMP_ENABLE_BITMAP].shift) &
+ zip_basic_cap_info[ZIP_DECOMP_ENABLE_BITMAP].mask;
+ comp_bm = (zip_core_en >> zip_basic_cap_info[ZIP_COMP_ENABLE_BITMAP].shift) &
+ zip_basic_cap_info[ZIP_COMP_ENABLE_BITMAP].mask;
writel(HZIP_DECOMP_CHECK_ENABLE | dcomp_bm | comp_bm, base + HZIP_CLOCK_GATE_CTRL);
/* enable sqc,cqc writeback */
@@ -576,9 +641,19 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) |
FIELD_PREP(CQC_CACHE_WB_THRD, 1), base + QM_CACHE_CTL);
+ hisi_zip_set_high_perf(qm);
+ hisi_zip_literal_set(qm);
hisi_zip_enable_clock_gate(qm);
+ ret = hisi_dae_set_user_domain(qm);
+ if (ret)
+ goto close_sva_prefetch;
+
return 0;
+
+close_sva_prefetch:
+ hisi_zip_close_sva_prefetch(qm);
+ return ret;
}
static void hisi_zip_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
@@ -588,8 +663,7 @@ static void hisi_zip_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
val1 = readl(qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
if (enable) {
val1 |= HZIP_AXI_SHUTDOWN_ENABLE;
- val2 = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
- ZIP_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+ val2 = qm->err_info.dev_err.shutdown_mask;
} else {
val1 &= ~HZIP_AXI_SHUTDOWN_ENABLE;
val2 = 0x0;
@@ -603,7 +677,8 @@ static void hisi_zip_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
static void hisi_zip_hw_error_enable(struct hisi_qm *qm)
{
- u32 nfe, ce;
+ struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err;
+ u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe;
if (qm->ver == QM_HW_V1) {
writel(HZIP_CORE_INT_MASK_ALL,
@@ -612,33 +687,33 @@ static void hisi_zip_hw_error_enable(struct hisi_qm *qm)
return;
}
- nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver);
- ce = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CE_MASK_CAP, qm->cap_ver);
-
/* clear ZIP hw error source if having */
- writel(ce | nfe | HZIP_CORE_INT_RAS_FE_ENB_MASK, qm->io_base + HZIP_CORE_INT_SOURCE);
+ writel(err_mask, qm->io_base + HZIP_CORE_INT_SOURCE);
/* configure error type */
- writel(ce, qm->io_base + HZIP_CORE_INT_RAS_CE_ENB);
- writel(HZIP_CORE_INT_RAS_FE_ENB_MASK, qm->io_base + HZIP_CORE_INT_RAS_FE_ENB);
- writel(nfe, qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
+ writel(dev_err->ce, qm->io_base + HZIP_CORE_INT_RAS_CE_ENB);
+ writel(dev_err->fe, qm->io_base + HZIP_CORE_INT_RAS_FE_ENB);
+ writel(dev_err->nfe, qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
hisi_zip_master_ooo_ctrl(qm, true);
/* enable ZIP hw error interrupts */
- writel(0, qm->io_base + HZIP_CORE_INT_MASK_REG);
+ writel(~err_mask, qm->io_base + HZIP_CORE_INT_MASK_REG);
+
+ hisi_dae_hw_error_enable(qm);
}
static void hisi_zip_hw_error_disable(struct hisi_qm *qm)
{
- u32 nfe, ce;
+ struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err;
+ u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe;
/* disable ZIP hw error interrupts */
- nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver);
- ce = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CE_MASK_CAP, qm->cap_ver);
- writel(ce | nfe | HZIP_CORE_INT_RAS_FE_ENB_MASK, qm->io_base + HZIP_CORE_INT_MASK_REG);
+ writel(err_mask, qm->io_base + HZIP_CORE_INT_MASK_REG);
hisi_zip_master_ooo_ctrl(qm, false);
+
+ hisi_dae_hw_error_disable(qm);
}
static inline struct hisi_qm *file_to_qm(struct ctrl_debug_file *file)
@@ -788,7 +863,12 @@ DEFINE_SHOW_ATTRIBUTE(hisi_zip_regs);
static void __iomem *get_zip_core_addr(struct hisi_qm *qm, int core_num)
{
- u32 zip_comp_core_num = qm->cap_tables.dev_cap_table[ZIP_CLUSTER_COMP_NUM_CAP_IDX].cap_val;
+ u8 zip_comp_core_num;
+ u32 zip_core_info;
+
+ zip_core_info = qm->cap_tables.dev_cap_table[ZIP_CORE_INFO].cap_val;
+ zip_comp_core_num = (zip_core_info >> zip_basic_cap_info[ZIP_CLUSTER_COMP_NUM_CAP].shift) &
+ zip_basic_cap_info[ZIP_CLUSTER_COMP_NUM_CAP].mask;
if (core_num < zip_comp_core_num)
return qm->io_base + HZIP_CORE_DFX_BASE +
@@ -803,12 +883,16 @@ static int hisi_zip_core_debug_init(struct hisi_qm *qm)
u32 zip_core_num, zip_comp_core_num;
struct device *dev = &qm->pdev->dev;
struct debugfs_regset32 *regset;
+ u32 zip_core_info;
struct dentry *tmp_d;
char buf[HZIP_BUF_SIZE];
int i;
- zip_core_num = qm->cap_tables.dev_cap_table[ZIP_CORE_NUM_CAP_IDX].cap_val;
- zip_comp_core_num = qm->cap_tables.dev_cap_table[ZIP_CLUSTER_COMP_NUM_CAP_IDX].cap_val;
+ zip_core_info = qm->cap_tables.dev_cap_table[ZIP_CORE_INFO].cap_val;
+ zip_core_num = (zip_core_info >> zip_basic_cap_info[ZIP_CORE_NUM_CAP].shift) &
+ zip_basic_cap_info[ZIP_CORE_NUM_CAP].mask;
+ zip_comp_core_num = (zip_core_info >> zip_basic_cap_info[ZIP_CLUSTER_COMP_NUM_CAP].shift) &
+ zip_basic_cap_info[ZIP_CLUSTER_COMP_NUM_CAP].mask;
for (i = 0; i < zip_core_num; i++) {
if (i < zip_comp_core_num)
@@ -834,6 +918,26 @@ static int hisi_zip_core_debug_init(struct hisi_qm *qm)
return 0;
}
+static int zip_cap_regs_show(struct seq_file *s, void *unused)
+{
+ struct hisi_qm *qm = s->private;
+ u32 i, size;
+
+ size = qm->cap_tables.qm_cap_size;
+ for (i = 0; i < size; i++)
+ seq_printf(s, "%s= 0x%08x\n", qm->cap_tables.qm_cap_table[i].name,
+ qm->cap_tables.qm_cap_table[i].cap_val);
+
+ size = qm->cap_tables.dev_cap_size;
+ for (i = 0; i < size; i++)
+ seq_printf(s, "%s= 0x%08x\n", qm->cap_tables.dev_cap_table[i].name,
+ qm->cap_tables.dev_cap_table[i].cap_val);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(zip_cap_regs);
+
static void hisi_zip_dfx_debug_init(struct hisi_qm *qm)
{
struct dfx_diff_registers *hzip_regs = qm->debug.acc_diff_regs;
@@ -854,6 +958,9 @@ static void hisi_zip_dfx_debug_init(struct hisi_qm *qm)
if (qm->fun_type == QM_HW_PF && hzip_regs)
debugfs_create_file("diff_regs", 0444, tmp_dir,
qm, &hzip_diff_regs_fops);
+
+ debugfs_create_file("cap_regs", CAP_FILE_PERMISSION,
+ qm->debug.debug_root, qm, &zip_cap_regs_fops);
}
static int hisi_zip_ctrl_debug_init(struct hisi_qm *qm)
@@ -912,9 +1019,14 @@ debugfs_remove:
/* hisi_zip_debug_regs_clear() - clear the zip debug regs */
static void hisi_zip_debug_regs_clear(struct hisi_qm *qm)
{
- u32 zip_core_num = qm->cap_tables.dev_cap_table[ZIP_CORE_NUM_CAP_IDX].cap_val;
+ u32 zip_core_info;
+ u8 zip_core_num;
int i, j;
+ zip_core_info = qm->cap_tables.dev_cap_table[ZIP_CORE_INFO].cap_val;
+ zip_core_num = (zip_core_info >> zip_basic_cap_info[ZIP_CORE_NUM_CAP].shift) &
+ zip_basic_cap_info[ZIP_CORE_NUM_CAP].mask;
+
/* enable register read_clear bit */
writel(HZIP_RD_CNT_CLR_CE_EN, qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE);
for (i = 0; i < zip_core_num; i++)
@@ -946,10 +1058,13 @@ static int hisi_zip_show_last_regs_init(struct hisi_qm *qm)
int com_dfx_regs_num = ARRAY_SIZE(hzip_com_dfx_regs);
struct qm_debug *debug = &qm->debug;
void __iomem *io_base;
+ u32 zip_core_info;
u32 zip_core_num;
int i, j, idx;
- zip_core_num = qm->cap_tables.dev_cap_table[ZIP_CORE_NUM_CAP_IDX].cap_val;
+ zip_core_info = qm->cap_tables.dev_cap_table[ZIP_CORE_INFO].cap_val;
+ zip_core_num = (zip_core_info >> zip_basic_cap_info[ZIP_CORE_NUM_CAP].shift) &
+ zip_basic_cap_info[ZIP_CORE_NUM_CAP].mask;
debug->last_words = kcalloc(core_dfx_regs_num * zip_core_num + com_dfx_regs_num,
sizeof(unsigned int), GFP_KERNEL);
@@ -991,6 +1106,7 @@ static void hisi_zip_show_last_dfx_regs(struct hisi_qm *qm)
u32 zip_core_num, zip_comp_core_num;
struct qm_debug *debug = &qm->debug;
char buf[HZIP_BUF_SIZE];
+ u32 zip_core_info;
void __iomem *base;
int i, j, idx;
u32 val;
@@ -1005,8 +1121,11 @@ static void hisi_zip_show_last_dfx_regs(struct hisi_qm *qm)
hzip_com_dfx_regs[i].name, debug->last_words[i], val);
}
- zip_core_num = qm->cap_tables.dev_cap_table[ZIP_CORE_NUM_CAP_IDX].cap_val;
- zip_comp_core_num = qm->cap_tables.dev_cap_table[ZIP_CLUSTER_COMP_NUM_CAP_IDX].cap_val;
+ zip_core_info = qm->cap_tables.dev_cap_table[ZIP_CORE_INFO].cap_val;
+ zip_core_num = (zip_core_info >> zip_basic_cap_info[ZIP_CORE_NUM_CAP].shift) &
+ zip_basic_cap_info[ZIP_CORE_NUM_CAP].mask;
+ zip_comp_core_num = (zip_core_info >> zip_basic_cap_info[ZIP_CLUSTER_COMP_NUM_CAP].shift) &
+ zip_basic_cap_info[ZIP_CLUSTER_COMP_NUM_CAP].mask;
for (i = 0; i < zip_core_num; i++) {
if (i < zip_comp_core_num)
@@ -1059,11 +1178,23 @@ static u32 hisi_zip_get_hw_err_status(struct hisi_qm *qm)
static void hisi_zip_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
{
- u32 nfe;
-
writel(err_sts, qm->io_base + HZIP_CORE_INT_SOURCE);
- nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver);
- writel(nfe, qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
+}
+
+static void hisi_zip_disable_error_report(struct hisi_qm *qm, u32 err_type)
+{
+ u32 nfe_mask = qm->err_info.dev_err.nfe;
+
+ writel(nfe_mask & (~err_type), qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
+}
+
+static void hisi_zip_enable_error_report(struct hisi_qm *qm)
+{
+ u32 nfe_mask = qm->err_info.dev_err.nfe;
+ u32 ce_mask = qm->err_info.dev_err.ce;
+
+ writel(nfe_mask, qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
+ writel(ce_mask, qm->io_base + HZIP_CORE_INT_RAS_CE_ENB);
}
static void hisi_zip_open_axi_master_ooo(struct hisi_qm *qm)
@@ -1077,6 +1208,8 @@ static void hisi_zip_open_axi_master_ooo(struct hisi_qm *qm)
writel(val | HZIP_AXI_SHUTDOWN_ENABLE,
qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
+
+ hisi_dae_open_axi_master_ooo(qm);
}
static void hisi_zip_close_axi_master_ooo(struct hisi_qm *qm)
@@ -1093,23 +1226,106 @@ static void hisi_zip_close_axi_master_ooo(struct hisi_qm *qm)
qm->io_base + HZIP_CORE_INT_SET);
}
+static enum acc_err_result hisi_zip_get_err_result(struct hisi_qm *qm)
+{
+ enum acc_err_result zip_result = ACC_ERR_NONE;
+ enum acc_err_result dae_result;
+ u32 err_status;
+
+ /* Get device hardware new error status */
+ err_status = hisi_zip_get_hw_err_status(qm);
+ if (err_status) {
+ if (err_status & qm->err_info.dev_err.ecc_2bits_mask)
+ qm->err_status.is_dev_ecc_mbit = true;
+ hisi_zip_log_hw_error(qm, err_status);
+
+ if (err_status & qm->err_info.dev_err.reset_mask) {
+ /* Disable the same error reporting until device is recovered. */
+ hisi_zip_disable_error_report(qm, err_status);
+ zip_result = ACC_ERR_NEED_RESET;
+ } else {
+ hisi_zip_clear_hw_err_status(qm, err_status);
+ /* Avoid firmware disable error report, re-enable. */
+ hisi_zip_enable_error_report(qm);
+ }
+ }
+
+ dae_result = hisi_dae_get_err_result(qm);
+
+ return (zip_result == ACC_ERR_NEED_RESET ||
+ dae_result == ACC_ERR_NEED_RESET) ?
+ ACC_ERR_NEED_RESET : ACC_ERR_RECOVERED;
+}
+
+static bool hisi_zip_dev_is_abnormal(struct hisi_qm *qm)
+{
+ u32 err_status;
+
+ err_status = hisi_zip_get_hw_err_status(qm);
+ if (err_status & qm->err_info.dev_err.shutdown_mask)
+ return true;
+
+ return hisi_dae_dev_is_abnormal(qm);
+}
+
+static int hisi_zip_set_priv_status(struct hisi_qm *qm)
+{
+ return hisi_dae_close_axi_master_ooo(qm);
+}
+
+static void hisi_zip_disable_axi_error(struct hisi_qm *qm)
+{
+ struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err;
+ u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe;
+ u32 val;
+
+ val = ~(err_mask & (~HZIP_AXI_ERROR_MASK));
+ writel(val, qm->io_base + HZIP_CORE_INT_MASK_REG);
+
+ if (qm->ver > QM_HW_V2)
+ writel(dev_err->shutdown_mask & (~HZIP_AXI_ERROR_MASK),
+ qm->io_base + HZIP_OOO_SHUTDOWN_SEL);
+}
+
+static void hisi_zip_enable_axi_error(struct hisi_qm *qm)
+{
+ struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err;
+ u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe;
+
+ /* clear axi error source */
+ writel(HZIP_AXI_ERROR_MASK, qm->io_base + HZIP_CORE_INT_SOURCE);
+
+ writel(~err_mask, qm->io_base + HZIP_CORE_INT_MASK_REG);
+
+ if (qm->ver > QM_HW_V2)
+ writel(dev_err->shutdown_mask, qm->io_base + HZIP_OOO_SHUTDOWN_SEL);
+}
+
static void hisi_zip_err_info_init(struct hisi_qm *qm)
{
struct hisi_qm_err_info *err_info = &qm->err_info;
+ struct hisi_qm_err_mask *qm_err = &err_info->qm_err;
+ struct hisi_qm_err_mask *dev_err = &err_info->dev_err;
+
+ qm_err->fe = HZIP_CORE_INT_RAS_FE_ENB_MASK;
+ qm_err->ce = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_QM_CE_MASK_CAP, qm->cap_ver);
+ qm_err->nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+ ZIP_QM_NFE_MASK_CAP, qm->cap_ver);
+ qm_err->ecc_2bits_mask = QM_ECC_MBIT;
+ qm_err->reset_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+ ZIP_QM_RESET_MASK_CAP, qm->cap_ver);
+ qm_err->shutdown_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+ ZIP_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+
+ dev_err->fe = HZIP_CORE_INT_RAS_FE_ENB_MASK;
+ dev_err->ce = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CE_MASK_CAP, qm->cap_ver);
+ dev_err->nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver);
+ dev_err->ecc_2bits_mask = HZIP_CORE_INT_STATUS_M_ECC;
+ dev_err->shutdown_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+ ZIP_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+ dev_err->reset_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+ ZIP_RESET_MASK_CAP, qm->cap_ver);
- err_info->fe = HZIP_CORE_INT_RAS_FE_ENB_MASK;
- err_info->ce = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_QM_CE_MASK_CAP, qm->cap_ver);
- err_info->nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
- ZIP_QM_NFE_MASK_CAP, qm->cap_ver);
- err_info->ecc_2bits_mask = HZIP_CORE_INT_STATUS_M_ECC;
- err_info->qm_shutdown_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
- ZIP_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
- err_info->dev_shutdown_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
- ZIP_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
- err_info->qm_reset_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
- ZIP_QM_RESET_MASK_CAP, qm->cap_ver);
- err_info->dev_reset_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
- ZIP_RESET_MASK_CAP, qm->cap_ver);
err_info->msi_wr_port = HZIP_WR_PORT;
err_info->acpi_rst = "ZRST";
}
@@ -1120,13 +1336,17 @@ static const struct hisi_qm_err_ini hisi_zip_err_ini = {
.hw_err_disable = hisi_zip_hw_error_disable,
.get_dev_hw_err_status = hisi_zip_get_hw_err_status,
.clear_dev_hw_err_status = hisi_zip_clear_hw_err_status,
- .log_dev_hw_err = hisi_zip_log_hw_error,
.open_axi_master_ooo = hisi_zip_open_axi_master_ooo,
.close_axi_master_ooo = hisi_zip_close_axi_master_ooo,
.open_sva_prefetch = hisi_zip_open_sva_prefetch,
.close_sva_prefetch = hisi_zip_close_sva_prefetch,
.show_last_dfx_regs = hisi_zip_show_last_dfx_regs,
.err_info_init = hisi_zip_err_info_init,
+ .get_err_result = hisi_zip_get_err_result,
+ .set_priv_status = hisi_zip_set_priv_status,
+ .dev_is_abnormal = hisi_zip_dev_is_abnormal,
+ .disable_axi_error = hisi_zip_disable_axi_error,
+ .enable_axi_error = hisi_zip_enable_axi_error,
};
static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
@@ -1146,11 +1366,6 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
if (ret)
return ret;
- ret = hisi_zip_set_high_perf(qm);
- if (ret)
- return ret;
-
- hisi_zip_open_sva_prefetch(qm);
hisi_qm_dev_err_init(qm);
hisi_zip_debug_regs_clear(qm);
@@ -1167,18 +1382,20 @@ static int zip_pre_store_cap_reg(struct hisi_qm *qm)
struct pci_dev *pdev = qm->pdev;
size_t i, size;
- size = ARRAY_SIZE(zip_pre_store_caps);
- zip_cap = devm_kzalloc(&pdev->dev, sizeof(*zip_cap) * size, GFP_KERNEL);
+ size = ARRAY_SIZE(zip_cap_query_info);
+ zip_cap = devm_kcalloc(&pdev->dev, size, sizeof(*zip_cap), GFP_KERNEL);
if (!zip_cap)
return -ENOMEM;
for (i = 0; i < size; i++) {
- zip_cap[i].type = zip_pre_store_caps[i];
- zip_cap[i].cap_val = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
- zip_pre_store_caps[i], qm->cap_ver);
+ zip_cap[i].type = zip_cap_query_info[i].type;
+ zip_cap[i].name = zip_cap_query_info[i].name;
+ zip_cap[i].cap_val = hisi_qm_get_cap_value(qm, zip_cap_query_info,
+ i, qm->cap_ver);
}
qm->cap_tables.dev_cap_table = zip_cap;
+ qm->cap_tables.dev_cap_size = size;
return 0;
}
@@ -1189,7 +1406,6 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
int ret;
qm->pdev = pdev;
- qm->ver = pdev->revision;
qm->mode = uacce_mode;
qm->sqe_size = HZIP_SQE_SIZE;
qm->dev_name = hisi_zip_name;
@@ -1226,17 +1442,24 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
ret = zip_pre_store_cap_reg(qm);
if (ret) {
pci_err(qm->pdev, "Failed to pre-store capability registers!\n");
- hisi_qm_uninit(qm);
- return ret;
+ goto err_qm_uninit;
}
- alg_msk = qm->cap_tables.dev_cap_table[ZIP_DEV_ALG_BITMAP_IDX].cap_val;
+ alg_msk = qm->cap_tables.dev_cap_table[ZIP_ALG_BITMAP].cap_val;
ret = hisi_qm_set_algs(qm, alg_msk, zip_dev_algs, ARRAY_SIZE(zip_dev_algs));
if (ret) {
pci_err(qm->pdev, "Failed to set zip algs!\n");
- hisi_qm_uninit(qm);
+ goto err_qm_uninit;
}
+ ret = hisi_dae_set_alg(qm);
+ if (ret)
+ goto err_qm_uninit;
+
+ return 0;
+
+err_qm_uninit:
+ hisi_qm_uninit(qm);
return ret;
}
diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c
index 7e93159c3b6b..f22c12e36b56 100644
--- a/drivers/crypto/img-hash.c
+++ b/drivers/crypto/img-hash.c
@@ -436,7 +436,7 @@ static int img_hash_write_via_dma_stop(struct img_hash_dev *hdev)
struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
if (ctx->flags & DRIVER_FLAGS_SG)
- dma_unmap_sg(hdev->dev, ctx->sg, ctx->dma_ct, DMA_TO_DEVICE);
+ dma_unmap_sg(hdev->dev, ctx->sg, 1, DMA_TO_DEVICE);
return 0;
}
@@ -491,8 +491,9 @@ static int img_hash_init(struct ahash_request *req)
struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
- rctx->fallback_req.base.flags = req->base.flags
- & CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
return crypto_ahash_init(&rctx->fallback_req);
}
@@ -555,10 +556,10 @@ static int img_hash_update(struct ahash_request *req)
struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
- rctx->fallback_req.base.flags = req->base.flags
- & CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.nbytes = req->nbytes;
- rctx->fallback_req.src = req->src;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, req->src, NULL, req->nbytes);
return crypto_ahash_update(&rctx->fallback_req);
}
@@ -570,9 +571,10 @@ static int img_hash_final(struct ahash_request *req)
struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
- rctx->fallback_req.base.flags = req->base.flags
- & CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.result = req->result;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, NULL, req->result, 0);
return crypto_ahash_final(&rctx->fallback_req);
}
@@ -584,11 +586,12 @@ static int img_hash_finup(struct ahash_request *req)
struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
- rctx->fallback_req.base.flags = req->base.flags
- & CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.nbytes = req->nbytes;
- rctx->fallback_req.src = req->src;
- rctx->fallback_req.result = req->result;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, req->src, req->result,
+ req->nbytes);
+
return crypto_ahash_finup(&rctx->fallback_req);
}
@@ -600,8 +603,9 @@ static int img_hash_import(struct ahash_request *req, const void *in)
struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
- rctx->fallback_req.base.flags = req->base.flags
- & CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
return crypto_ahash_import(&rctx->fallback_req, in);
}
@@ -613,8 +617,9 @@ static int img_hash_export(struct ahash_request *req, void *out)
struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
- rctx->fallback_req.base.flags = req->base.flags
- & CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
return crypto_ahash_export(&rctx->fallback_req, out);
}
@@ -695,22 +700,22 @@ static int img_hash_cra_init(struct crypto_tfm *tfm, const char *alg_name)
static int img_hash_cra_md5_init(struct crypto_tfm *tfm)
{
- return img_hash_cra_init(tfm, "md5-generic");
+ return img_hash_cra_init(tfm, "md5-lib");
}
static int img_hash_cra_sha1_init(struct crypto_tfm *tfm)
{
- return img_hash_cra_init(tfm, "sha1-generic");
+ return img_hash_cra_init(tfm, "sha1-lib");
}
static int img_hash_cra_sha224_init(struct crypto_tfm *tfm)
{
- return img_hash_cra_init(tfm, "sha224-generic");
+ return img_hash_cra_init(tfm, "sha224-lib");
}
static int img_hash_cra_sha256_init(struct crypto_tfm *tfm)
{
- return img_hash_cra_init(tfm, "sha256-generic");
+ return img_hash_cra_init(tfm, "sha256-lib");
}
static void img_hash_cra_exit(struct crypto_tfm *tfm)
@@ -1084,7 +1089,7 @@ static const struct dev_pm_ops img_hash_pm_ops = {
static struct platform_driver img_hash_driver = {
.probe = img_hash_probe,
- .remove_new = img_hash_remove,
+ .remove = img_hash_remove,
.driver = {
.name = "img-hash-accelerator",
.pm = &img_hash_pm_ops,
diff --git a/drivers/crypto/inside-secure/Makefile b/drivers/crypto/inside-secure/Makefile
index 13f64f96c626..30d13fd5d58e 100644
--- a/drivers/crypto/inside-secure/Makefile
+++ b/drivers/crypto/inside-secure/Makefile
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_CRYPTO_DEV_SAFEXCEL) += crypto_safexcel.o
crypto_safexcel-objs := safexcel.o safexcel_ring.o safexcel_cipher.o safexcel_hash.o
+obj-y += eip93/
diff --git a/drivers/crypto/inside-secure/eip93/Kconfig b/drivers/crypto/inside-secure/eip93/Kconfig
new file mode 100644
index 000000000000..8353d3d7ec9b
--- /dev/null
+++ b/drivers/crypto/inside-secure/eip93/Kconfig
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: GPL-2.0
+config CRYPTO_DEV_EIP93
+ tristate "Support for EIP93 crypto HW accelerators"
+ depends on SOC_MT7621 || ARCH_AIROHA ||COMPILE_TEST
+ select CRYPTO_LIB_AES
+ select CRYPTO_LIB_DES
+ select CRYPTO_SKCIPHER
+ select CRYPTO_AEAD
+ select CRYPTO_AUTHENC
+ select CRYPTO_MD5
+ select CRYPTO_SHA1
+ select CRYPTO_SHA256
+ help
+ EIP93 have various crypto HW accelerators. Select this if
+ you want to use the EIP93 modules for any of the crypto algorithms.
+
+ If the IP supports it, this provide offload for AES - ECB, CBC and
+ CTR crypto. Also provide DES and 3DES ECB and CBC.
+
+ Also provide AEAD authenc(hmac(x), cipher(y)) for supported algo.
diff --git a/drivers/crypto/inside-secure/eip93/Makefile b/drivers/crypto/inside-secure/eip93/Makefile
new file mode 100644
index 000000000000..a3d3d3677cdc
--- /dev/null
+++ b/drivers/crypto/inside-secure/eip93/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_CRYPTO_DEV_EIP93) += crypto-hw-eip93.o
+
+crypto-hw-eip93-y += eip93-main.o eip93-common.o
+crypto-hw-eip93-y += eip93-cipher.o eip93-aead.o
+crypto-hw-eip93-y += eip93-hash.o
diff --git a/drivers/crypto/inside-secure/eip93/eip93-aead.c b/drivers/crypto/inside-secure/eip93/eip93-aead.c
new file mode 100644
index 000000000000..18dd8a9a5165
--- /dev/null
+++ b/drivers/crypto/inside-secure/eip93/eip93-aead.c
@@ -0,0 +1,711 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 - 2021
+ *
+ * Richard van Schagen <vschagen@icloud.com>
+ * Christian Marangi <ansuelsmth@gmail.com
+ */
+
+#include <crypto/aead.h>
+#include <crypto/aes.h>
+#include <crypto/authenc.h>
+#include <crypto/ctr.h>
+#include <crypto/hmac.h>
+#include <crypto/internal/aead.h>
+#include <crypto/md5.h>
+#include <crypto/null.h>
+#include <crypto/sha1.h>
+#include <crypto/sha2.h>
+
+#include <crypto/internal/des.h>
+
+#include <linux/crypto.h>
+#include <linux/dma-mapping.h>
+
+#include "eip93-aead.h"
+#include "eip93-cipher.h"
+#include "eip93-common.h"
+#include "eip93-regs.h"
+
+void eip93_aead_handle_result(struct crypto_async_request *async, int err)
+{
+ struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(async->tfm);
+ struct eip93_device *eip93 = ctx->eip93;
+ struct aead_request *req = aead_request_cast(async);
+ struct eip93_cipher_reqctx *rctx = aead_request_ctx(req);
+
+ eip93_unmap_dma(eip93, rctx, req->src, req->dst);
+ eip93_handle_result(eip93, rctx, req->iv);
+
+ aead_request_complete(req, err);
+}
+
+static int eip93_aead_send_req(struct crypto_async_request *async)
+{
+ struct aead_request *req = aead_request_cast(async);
+ struct eip93_cipher_reqctx *rctx = aead_request_ctx(req);
+ int err;
+
+ err = check_valid_request(rctx);
+ if (err) {
+ aead_request_complete(req, err);
+ return err;
+ }
+
+ return eip93_send_req(async, req->iv, rctx);
+}
+
+/* Crypto aead API functions */
+static int eip93_aead_cra_init(struct crypto_tfm *tfm)
+{
+ struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct eip93_alg_template *tmpl = container_of(tfm->__crt_alg,
+ struct eip93_alg_template, alg.aead.base);
+
+ crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
+ sizeof(struct eip93_cipher_reqctx));
+
+ ctx->eip93 = tmpl->eip93;
+ ctx->flags = tmpl->flags;
+ ctx->type = tmpl->type;
+ ctx->set_assoc = true;
+
+ ctx->sa_record = kzalloc(sizeof(*ctx->sa_record), GFP_KERNEL);
+ if (!ctx->sa_record)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void eip93_aead_cra_exit(struct crypto_tfm *tfm)
+{
+ struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ dma_unmap_single(ctx->eip93->dev, ctx->sa_record_base,
+ sizeof(*ctx->sa_record), DMA_TO_DEVICE);
+ kfree(ctx->sa_record);
+}
+
+static int eip93_aead_setkey(struct crypto_aead *ctfm, const u8 *key,
+ unsigned int len)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(ctfm);
+ struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_authenc_keys keys;
+ struct crypto_aes_ctx aes;
+ struct sa_record *sa_record = ctx->sa_record;
+ u32 nonce = 0;
+ int ret;
+
+ if (crypto_authenc_extractkeys(&keys, key, len))
+ return -EINVAL;
+
+ if (IS_RFC3686(ctx->flags)) {
+ if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
+ return -EINVAL;
+
+ keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
+ memcpy(&nonce, keys.enckey + keys.enckeylen,
+ CTR_RFC3686_NONCE_SIZE);
+ }
+
+ switch ((ctx->flags & EIP93_ALG_MASK)) {
+ case EIP93_ALG_DES:
+ ret = verify_aead_des_key(ctfm, keys.enckey, keys.enckeylen);
+ if (ret)
+ return ret;
+
+ break;
+ case EIP93_ALG_3DES:
+ if (keys.enckeylen != DES3_EDE_KEY_SIZE)
+ return -EINVAL;
+
+ ret = verify_aead_des3_key(ctfm, keys.enckey, keys.enckeylen);
+ if (ret)
+ return ret;
+
+ break;
+ case EIP93_ALG_AES:
+ ret = aes_expandkey(&aes, keys.enckey, keys.enckeylen);
+ if (ret)
+ return ret;
+
+ break;
+ }
+
+ ctx->blksize = crypto_aead_blocksize(ctfm);
+ /* Encryption key */
+ eip93_set_sa_record(sa_record, keys.enckeylen, ctx->flags);
+ sa_record->sa_cmd0_word &= ~EIP93_SA_CMD_OPCODE;
+ sa_record->sa_cmd0_word |= FIELD_PREP(EIP93_SA_CMD_OPCODE,
+ EIP93_SA_CMD_OPCODE_BASIC_OUT_ENC_HASH);
+ sa_record->sa_cmd0_word &= ~EIP93_SA_CMD_DIGEST_LENGTH;
+ sa_record->sa_cmd0_word |= FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH,
+ ctx->authsize / sizeof(u32));
+
+ memcpy(sa_record->sa_key, keys.enckey, keys.enckeylen);
+ ctx->sa_nonce = nonce;
+ sa_record->sa_nonce = nonce;
+
+ /* authentication key */
+ ret = eip93_hmac_setkey(ctx->flags, keys.authkey, keys.authkeylen,
+ ctx->authsize, sa_record->sa_i_digest,
+ sa_record->sa_o_digest, false);
+
+ ctx->set_assoc = true;
+
+ return ret;
+}
+
+static int eip93_aead_setauthsize(struct crypto_aead *ctfm,
+ unsigned int authsize)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(ctfm);
+ struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ ctx->authsize = authsize;
+ ctx->sa_record->sa_cmd0_word &= ~EIP93_SA_CMD_DIGEST_LENGTH;
+ ctx->sa_record->sa_cmd0_word |= FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH,
+ ctx->authsize / sizeof(u32));
+
+ return 0;
+}
+
+static void eip93_aead_setassoc(struct eip93_crypto_ctx *ctx,
+ struct aead_request *req)
+{
+ struct sa_record *sa_record = ctx->sa_record;
+
+ sa_record->sa_cmd1_word &= ~EIP93_SA_CMD_HASH_CRYPT_OFFSET;
+ sa_record->sa_cmd1_word |= FIELD_PREP(EIP93_SA_CMD_HASH_CRYPT_OFFSET,
+ req->assoclen / sizeof(u32));
+
+ ctx->assoclen = req->assoclen;
+}
+
+static int eip93_aead_crypt(struct aead_request *req)
+{
+ struct eip93_cipher_reqctx *rctx = aead_request_ctx(req);
+ struct crypto_async_request *async = &req->base;
+ struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ int ret;
+
+ ctx->sa_record_base = dma_map_single(ctx->eip93->dev, ctx->sa_record,
+ sizeof(*ctx->sa_record), DMA_TO_DEVICE);
+ ret = dma_mapping_error(ctx->eip93->dev, ctx->sa_record_base);
+ if (ret)
+ return ret;
+
+ rctx->textsize = req->cryptlen;
+ rctx->blksize = ctx->blksize;
+ rctx->assoclen = req->assoclen;
+ rctx->authsize = ctx->authsize;
+ rctx->sg_src = req->src;
+ rctx->sg_dst = req->dst;
+ rctx->ivsize = crypto_aead_ivsize(aead);
+ rctx->desc_flags = EIP93_DESC_AEAD;
+ rctx->sa_record_base = ctx->sa_record_base;
+
+ if (IS_DECRYPT(rctx->flags))
+ rctx->textsize -= rctx->authsize;
+
+ return eip93_aead_send_req(async);
+}
+
+static int eip93_aead_encrypt(struct aead_request *req)
+{
+ struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct eip93_cipher_reqctx *rctx = aead_request_ctx(req);
+
+ rctx->flags = ctx->flags;
+ rctx->flags |= EIP93_ENCRYPT;
+ if (ctx->set_assoc) {
+ eip93_aead_setassoc(ctx, req);
+ ctx->set_assoc = false;
+ }
+
+ if (req->assoclen != ctx->assoclen) {
+ dev_err(ctx->eip93->dev, "Request AAD length error\n");
+ return -EINVAL;
+ }
+
+ return eip93_aead_crypt(req);
+}
+
+static int eip93_aead_decrypt(struct aead_request *req)
+{
+ struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct eip93_cipher_reqctx *rctx = aead_request_ctx(req);
+
+ ctx->sa_record->sa_cmd0_word |= EIP93_SA_CMD_DIRECTION_IN;
+ ctx->sa_record->sa_cmd1_word &= ~(EIP93_SA_CMD_COPY_PAD |
+ EIP93_SA_CMD_COPY_DIGEST);
+
+ rctx->flags = ctx->flags;
+ rctx->flags |= EIP93_DECRYPT;
+ if (ctx->set_assoc) {
+ eip93_aead_setassoc(ctx, req);
+ ctx->set_assoc = false;
+ }
+
+ if (req->assoclen != ctx->assoclen) {
+ dev_err(ctx->eip93->dev, "Request AAD length error\n");
+ return -EINVAL;
+ }
+
+ return eip93_aead_crypt(req);
+}
+
+/* Available authenc algorithms in this module */
+struct eip93_alg_template eip93_alg_authenc_hmac_md5_cbc_aes = {
+ .type = EIP93_ALG_TYPE_AEAD,
+ .flags = EIP93_HASH_HMAC | EIP93_HASH_MD5 | EIP93_MODE_CBC | EIP93_ALG_AES,
+ .alg.aead = {
+ .setkey = eip93_aead_setkey,
+ .encrypt = eip93_aead_encrypt,
+ .decrypt = eip93_aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .setauthsize = eip93_aead_setauthsize,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(aes))",
+ .cra_driver_name =
+ "authenc(hmac(md5-eip93), cbc(aes-eip93))",
+ .cra_priority = EIP93_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
+ .cra_alignmask = 0,
+ .cra_init = eip93_aead_cra_init,
+ .cra_exit = eip93_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+struct eip93_alg_template eip93_alg_authenc_hmac_sha1_cbc_aes = {
+ .type = EIP93_ALG_TYPE_AEAD,
+ .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA1 | EIP93_MODE_CBC | EIP93_ALG_AES,
+ .alg.aead = {
+ .setkey = eip93_aead_setkey,
+ .encrypt = eip93_aead_encrypt,
+ .decrypt = eip93_aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .setauthsize = eip93_aead_setauthsize,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(aes))",
+ .cra_driver_name =
+ "authenc(hmac(sha1-eip93),cbc(aes-eip93))",
+ .cra_priority = EIP93_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
+ .cra_alignmask = 0,
+ .cra_init = eip93_aead_cra_init,
+ .cra_exit = eip93_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+struct eip93_alg_template eip93_alg_authenc_hmac_sha224_cbc_aes = {
+ .type = EIP93_ALG_TYPE_AEAD,
+ .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA224 | EIP93_MODE_CBC | EIP93_ALG_AES,
+ .alg.aead = {
+ .setkey = eip93_aead_setkey,
+ .encrypt = eip93_aead_encrypt,
+ .decrypt = eip93_aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .setauthsize = eip93_aead_setauthsize,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha224),cbc(aes))",
+ .cra_driver_name =
+ "authenc(hmac(sha224-eip93),cbc(aes-eip93))",
+ .cra_priority = EIP93_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
+ .cra_alignmask = 0,
+ .cra_init = eip93_aead_cra_init,
+ .cra_exit = eip93_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+struct eip93_alg_template eip93_alg_authenc_hmac_sha256_cbc_aes = {
+ .type = EIP93_ALG_TYPE_AEAD,
+ .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA256 | EIP93_MODE_CBC | EIP93_ALG_AES,
+ .alg.aead = {
+ .setkey = eip93_aead_setkey,
+ .encrypt = eip93_aead_encrypt,
+ .decrypt = eip93_aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .setauthsize = eip93_aead_setauthsize,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha256),cbc(aes))",
+ .cra_driver_name =
+ "authenc(hmac(sha256-eip93),cbc(aes-eip93))",
+ .cra_priority = EIP93_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
+ .cra_alignmask = 0,
+ .cra_init = eip93_aead_cra_init,
+ .cra_exit = eip93_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+struct eip93_alg_template eip93_alg_authenc_hmac_md5_rfc3686_aes = {
+ .type = EIP93_ALG_TYPE_AEAD,
+ .flags = EIP93_HASH_HMAC | EIP93_HASH_MD5 |
+ EIP93_MODE_CTR | EIP93_MODE_RFC3686 | EIP93_ALG_AES,
+ .alg.aead = {
+ .setkey = eip93_aead_setkey,
+ .encrypt = eip93_aead_encrypt,
+ .decrypt = eip93_aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .setauthsize = eip93_aead_setauthsize,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(md5),rfc3686(ctr(aes)))",
+ .cra_driver_name =
+ "authenc(hmac(md5-eip93),rfc3686(ctr(aes-eip93)))",
+ .cra_priority = EIP93_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
+ .cra_alignmask = 0,
+ .cra_init = eip93_aead_cra_init,
+ .cra_exit = eip93_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+struct eip93_alg_template eip93_alg_authenc_hmac_sha1_rfc3686_aes = {
+ .type = EIP93_ALG_TYPE_AEAD,
+ .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA1 |
+ EIP93_MODE_CTR | EIP93_MODE_RFC3686 | EIP93_ALG_AES,
+ .alg.aead = {
+ .setkey = eip93_aead_setkey,
+ .encrypt = eip93_aead_encrypt,
+ .decrypt = eip93_aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .setauthsize = eip93_aead_setauthsize,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
+ .cra_driver_name =
+ "authenc(hmac(sha1-eip93),rfc3686(ctr(aes-eip93)))",
+ .cra_priority = EIP93_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
+ .cra_alignmask = 0,
+ .cra_init = eip93_aead_cra_init,
+ .cra_exit = eip93_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+struct eip93_alg_template eip93_alg_authenc_hmac_sha224_rfc3686_aes = {
+ .type = EIP93_ALG_TYPE_AEAD,
+ .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA224 |
+ EIP93_MODE_CTR | EIP93_MODE_RFC3686 | EIP93_ALG_AES,
+ .alg.aead = {
+ .setkey = eip93_aead_setkey,
+ .encrypt = eip93_aead_encrypt,
+ .decrypt = eip93_aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .setauthsize = eip93_aead_setauthsize,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
+ .cra_driver_name =
+ "authenc(hmac(sha224-eip93),rfc3686(ctr(aes-eip93)))",
+ .cra_priority = EIP93_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
+ .cra_alignmask = 0,
+ .cra_init = eip93_aead_cra_init,
+ .cra_exit = eip93_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+struct eip93_alg_template eip93_alg_authenc_hmac_sha256_rfc3686_aes = {
+ .type = EIP93_ALG_TYPE_AEAD,
+ .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA256 |
+ EIP93_MODE_CTR | EIP93_MODE_RFC3686 | EIP93_ALG_AES,
+ .alg.aead = {
+ .setkey = eip93_aead_setkey,
+ .encrypt = eip93_aead_encrypt,
+ .decrypt = eip93_aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .setauthsize = eip93_aead_setauthsize,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
+ .cra_driver_name =
+ "authenc(hmac(sha256-eip93),rfc3686(ctr(aes-eip93)))",
+ .cra_priority = EIP93_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
+ .cra_alignmask = 0,
+ .cra_init = eip93_aead_cra_init,
+ .cra_exit = eip93_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+struct eip93_alg_template eip93_alg_authenc_hmac_md5_cbc_des = {
+ .type = EIP93_ALG_TYPE_AEAD,
+ .flags = EIP93_HASH_HMAC | EIP93_HASH_MD5 | EIP93_MODE_CBC | EIP93_ALG_DES,
+ .alg.aead = {
+ .setkey = eip93_aead_setkey,
+ .encrypt = eip93_aead_encrypt,
+ .decrypt = eip93_aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .setauthsize = eip93_aead_setauthsize,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(des))",
+ .cra_driver_name =
+ "authenc(hmac(md5-eip93),cbc(des-eip93))",
+ .cra_priority = EIP93_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
+ .cra_alignmask = 0,
+ .cra_init = eip93_aead_cra_init,
+ .cra_exit = eip93_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+struct eip93_alg_template eip93_alg_authenc_hmac_sha1_cbc_des = {
+ .type = EIP93_ALG_TYPE_AEAD,
+ .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA1 | EIP93_MODE_CBC | EIP93_ALG_DES,
+ .alg.aead = {
+ .setkey = eip93_aead_setkey,
+ .encrypt = eip93_aead_encrypt,
+ .decrypt = eip93_aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .setauthsize = eip93_aead_setauthsize,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(des))",
+ .cra_driver_name =
+ "authenc(hmac(sha1-eip93),cbc(des-eip93))",
+ .cra_priority = EIP93_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
+ .cra_alignmask = 0,
+ .cra_init = eip93_aead_cra_init,
+ .cra_exit = eip93_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+struct eip93_alg_template eip93_alg_authenc_hmac_sha224_cbc_des = {
+ .type = EIP93_ALG_TYPE_AEAD,
+ .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA224 | EIP93_MODE_CBC | EIP93_ALG_DES,
+ .alg.aead = {
+ .setkey = eip93_aead_setkey,
+ .encrypt = eip93_aead_encrypt,
+ .decrypt = eip93_aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .setauthsize = eip93_aead_setauthsize,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha224),cbc(des))",
+ .cra_driver_name =
+ "authenc(hmac(sha224-eip93),cbc(des-eip93))",
+ .cra_priority = EIP93_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
+ .cra_alignmask = 0,
+ .cra_init = eip93_aead_cra_init,
+ .cra_exit = eip93_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+struct eip93_alg_template eip93_alg_authenc_hmac_sha256_cbc_des = {
+ .type = EIP93_ALG_TYPE_AEAD,
+ .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA256 | EIP93_MODE_CBC | EIP93_ALG_DES,
+ .alg.aead = {
+ .setkey = eip93_aead_setkey,
+ .encrypt = eip93_aead_encrypt,
+ .decrypt = eip93_aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .setauthsize = eip93_aead_setauthsize,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha256),cbc(des))",
+ .cra_driver_name =
+ "authenc(hmac(sha256-eip93),cbc(des-eip93))",
+ .cra_priority = EIP93_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
+ .cra_alignmask = 0,
+ .cra_init = eip93_aead_cra_init,
+ .cra_exit = eip93_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+struct eip93_alg_template eip93_alg_authenc_hmac_md5_cbc_des3_ede = {
+ .type = EIP93_ALG_TYPE_AEAD,
+ .flags = EIP93_HASH_HMAC | EIP93_HASH_MD5 | EIP93_MODE_CBC | EIP93_ALG_3DES,
+ .alg.aead = {
+ .setkey = eip93_aead_setkey,
+ .encrypt = eip93_aead_encrypt,
+ .decrypt = eip93_aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .setauthsize = eip93_aead_setauthsize,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
+ .cra_driver_name =
+ "authenc(hmac(md5-eip93),cbc(des3_ede-eip93))",
+ .cra_priority = EIP93_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
+ .cra_alignmask = 0x0,
+ .cra_init = eip93_aead_cra_init,
+ .cra_exit = eip93_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+struct eip93_alg_template eip93_alg_authenc_hmac_sha1_cbc_des3_ede = {
+ .type = EIP93_ALG_TYPE_AEAD,
+ .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA1 | EIP93_MODE_CBC | EIP93_ALG_3DES,
+ .alg.aead = {
+ .setkey = eip93_aead_setkey,
+ .encrypt = eip93_aead_encrypt,
+ .decrypt = eip93_aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .setauthsize = eip93_aead_setauthsize,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
+ .cra_driver_name =
+ "authenc(hmac(sha1-eip93),cbc(des3_ede-eip93))",
+ .cra_priority = EIP93_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
+ .cra_alignmask = 0x0,
+ .cra_init = eip93_aead_cra_init,
+ .cra_exit = eip93_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+struct eip93_alg_template eip93_alg_authenc_hmac_sha224_cbc_des3_ede = {
+ .type = EIP93_ALG_TYPE_AEAD,
+ .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA224 | EIP93_MODE_CBC | EIP93_ALG_3DES,
+ .alg.aead = {
+ .setkey = eip93_aead_setkey,
+ .encrypt = eip93_aead_encrypt,
+ .decrypt = eip93_aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .setauthsize = eip93_aead_setauthsize,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha224),cbc(des3_ede))",
+ .cra_driver_name =
+ "authenc(hmac(sha224-eip93),cbc(des3_ede-eip93))",
+ .cra_priority = EIP93_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
+ .cra_alignmask = 0x0,
+ .cra_init = eip93_aead_cra_init,
+ .cra_exit = eip93_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+struct eip93_alg_template eip93_alg_authenc_hmac_sha256_cbc_des3_ede = {
+ .type = EIP93_ALG_TYPE_AEAD,
+ .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA256 | EIP93_MODE_CBC | EIP93_ALG_3DES,
+ .alg.aead = {
+ .setkey = eip93_aead_setkey,
+ .encrypt = eip93_aead_encrypt,
+ .decrypt = eip93_aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .setauthsize = eip93_aead_setauthsize,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
+ .cra_driver_name =
+ "authenc(hmac(sha256-eip93),cbc(des3_ede-eip93))",
+ .cra_priority = EIP93_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
+ .cra_alignmask = 0x0,
+ .cra_init = eip93_aead_cra_init,
+ .cra_exit = eip93_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
diff --git a/drivers/crypto/inside-secure/eip93/eip93-aead.h b/drivers/crypto/inside-secure/eip93/eip93-aead.h
new file mode 100644
index 000000000000..e2fa8fd39c50
--- /dev/null
+++ b/drivers/crypto/inside-secure/eip93/eip93-aead.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2019 - 2021
+ *
+ * Richard van Schagen <vschagen@icloud.com>
+ * Christian Marangi <ansuelsmth@gmail.com
+ */
+#ifndef _EIP93_AEAD_H_
+#define _EIP93_AEAD_H_
+
+extern struct eip93_alg_template eip93_alg_authenc_hmac_md5_cbc_aes;
+extern struct eip93_alg_template eip93_alg_authenc_hmac_sha1_cbc_aes;
+extern struct eip93_alg_template eip93_alg_authenc_hmac_sha224_cbc_aes;
+extern struct eip93_alg_template eip93_alg_authenc_hmac_sha256_cbc_aes;
+extern struct eip93_alg_template eip93_alg_authenc_hmac_md5_ctr_aes;
+extern struct eip93_alg_template eip93_alg_authenc_hmac_sha1_ctr_aes;
+extern struct eip93_alg_template eip93_alg_authenc_hmac_sha224_ctr_aes;
+extern struct eip93_alg_template eip93_alg_authenc_hmac_sha256_ctr_aes;
+extern struct eip93_alg_template eip93_alg_authenc_hmac_md5_rfc3686_aes;
+extern struct eip93_alg_template eip93_alg_authenc_hmac_sha1_rfc3686_aes;
+extern struct eip93_alg_template eip93_alg_authenc_hmac_sha224_rfc3686_aes;
+extern struct eip93_alg_template eip93_alg_authenc_hmac_sha256_rfc3686_aes;
+extern struct eip93_alg_template eip93_alg_authenc_hmac_md5_cbc_des;
+extern struct eip93_alg_template eip93_alg_authenc_hmac_sha1_cbc_des;
+extern struct eip93_alg_template eip93_alg_authenc_hmac_sha224_cbc_des;
+extern struct eip93_alg_template eip93_alg_authenc_hmac_sha256_cbc_des;
+extern struct eip93_alg_template eip93_alg_authenc_hmac_md5_cbc_des3_ede;
+extern struct eip93_alg_template eip93_alg_authenc_hmac_sha1_cbc_des3_ede;
+extern struct eip93_alg_template eip93_alg_authenc_hmac_sha224_cbc_des3_ede;
+extern struct eip93_alg_template eip93_alg_authenc_hmac_sha256_cbc_des3_ede;
+extern struct eip93_alg_template eip93_alg_authenc_hmac_md5_ecb_null;
+extern struct eip93_alg_template eip93_alg_authenc_hmac_sha1_ecb_null;
+extern struct eip93_alg_template eip93_alg_authenc_hmac_sha224_ecb_null;
+extern struct eip93_alg_template eip93_alg_authenc_hmac_sha256_ecb_null;
+
+void eip93_aead_handle_result(struct crypto_async_request *async, int err);
+
+#endif /* _EIP93_AEAD_H_ */
diff --git a/drivers/crypto/inside-secure/eip93/eip93-aes.h b/drivers/crypto/inside-secure/eip93/eip93-aes.h
new file mode 100644
index 000000000000..1d83d39cab2a
--- /dev/null
+++ b/drivers/crypto/inside-secure/eip93/eip93-aes.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2019 - 2021
+ *
+ * Richard van Schagen <vschagen@icloud.com>
+ * Christian Marangi <ansuelsmth@gmail.com
+ */
+#ifndef _EIP93_AES_H_
+#define _EIP93_AES_H_
+
+extern struct eip93_alg_template eip93_alg_ecb_aes;
+extern struct eip93_alg_template eip93_alg_cbc_aes;
+extern struct eip93_alg_template eip93_alg_ctr_aes;
+extern struct eip93_alg_template eip93_alg_rfc3686_aes;
+
+#endif /* _EIP93_AES_H_ */
diff --git a/drivers/crypto/inside-secure/eip93/eip93-cipher.c b/drivers/crypto/inside-secure/eip93/eip93-cipher.c
new file mode 100644
index 000000000000..1f2d6846610f
--- /dev/null
+++ b/drivers/crypto/inside-secure/eip93/eip93-cipher.c
@@ -0,0 +1,413 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 - 2021
+ *
+ * Richard van Schagen <vschagen@icloud.com>
+ * Christian Marangi <ansuelsmth@gmail.com
+ */
+
+#include <crypto/aes.h>
+#include <crypto/ctr.h>
+#include <crypto/internal/des.h>
+#include <linux/dma-mapping.h>
+
+#include "eip93-aes.h"
+#include "eip93-cipher.h"
+#include "eip93-common.h"
+#include "eip93-des.h"
+#include "eip93-regs.h"
+
+void eip93_skcipher_handle_result(struct crypto_async_request *async, int err)
+{
+ struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(async->tfm);
+ struct eip93_device *eip93 = ctx->eip93;
+ struct skcipher_request *req = skcipher_request_cast(async);
+ struct eip93_cipher_reqctx *rctx = skcipher_request_ctx(req);
+
+ eip93_unmap_dma(eip93, rctx, req->src, req->dst);
+ eip93_handle_result(eip93, rctx, req->iv);
+
+ skcipher_request_complete(req, err);
+}
+
+static int eip93_skcipher_send_req(struct crypto_async_request *async)
+{
+ struct skcipher_request *req = skcipher_request_cast(async);
+ struct eip93_cipher_reqctx *rctx = skcipher_request_ctx(req);
+ int err;
+
+ err = check_valid_request(rctx);
+
+ if (err) {
+ skcipher_request_complete(req, err);
+ return err;
+ }
+
+ return eip93_send_req(async, req->iv, rctx);
+}
+
+/* Crypto skcipher API functions */
+static int eip93_skcipher_cra_init(struct crypto_tfm *tfm)
+{
+ struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct eip93_alg_template *tmpl = container_of(tfm->__crt_alg,
+ struct eip93_alg_template, alg.skcipher.base);
+
+ crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
+ sizeof(struct eip93_cipher_reqctx));
+
+ memset(ctx, 0, sizeof(*ctx));
+
+ ctx->eip93 = tmpl->eip93;
+ ctx->type = tmpl->type;
+
+ ctx->sa_record = kzalloc(sizeof(*ctx->sa_record), GFP_KERNEL);
+ if (!ctx->sa_record)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void eip93_skcipher_cra_exit(struct crypto_tfm *tfm)
+{
+ struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ dma_unmap_single(ctx->eip93->dev, ctx->sa_record_base,
+ sizeof(*ctx->sa_record), DMA_TO_DEVICE);
+ kfree(ctx->sa_record);
+}
+
+static int eip93_skcipher_setkey(struct crypto_skcipher *ctfm, const u8 *key,
+ unsigned int len)
+{
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
+ struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct eip93_alg_template *tmpl = container_of(tfm->__crt_alg,
+ struct eip93_alg_template,
+ alg.skcipher.base);
+ struct sa_record *sa_record = ctx->sa_record;
+ unsigned int keylen = len;
+ u32 flags = tmpl->flags;
+ u32 nonce = 0;
+ int ret;
+
+ if (!key || !keylen)
+ return -EINVAL;
+
+ if (IS_RFC3686(flags)) {
+ if (len < CTR_RFC3686_NONCE_SIZE)
+ return -EINVAL;
+
+ keylen = len - CTR_RFC3686_NONCE_SIZE;
+ memcpy(&nonce, key + keylen, CTR_RFC3686_NONCE_SIZE);
+ }
+
+ if (flags & EIP93_ALG_DES) {
+ ctx->blksize = DES_BLOCK_SIZE;
+ ret = verify_skcipher_des_key(ctfm, key);
+ if (ret)
+ return ret;
+ }
+ if (flags & EIP93_ALG_3DES) {
+ ctx->blksize = DES3_EDE_BLOCK_SIZE;
+ ret = verify_skcipher_des3_key(ctfm, key);
+ if (ret)
+ return ret;
+ }
+
+ if (flags & EIP93_ALG_AES) {
+ struct crypto_aes_ctx aes;
+
+ ctx->blksize = AES_BLOCK_SIZE;
+ ret = aes_expandkey(&aes, key, keylen);
+ if (ret)
+ return ret;
+ }
+
+ eip93_set_sa_record(sa_record, keylen, flags);
+
+ memcpy(sa_record->sa_key, key, keylen);
+ ctx->sa_nonce = nonce;
+ sa_record->sa_nonce = nonce;
+
+ return 0;
+}
+
+static int eip93_skcipher_crypt(struct skcipher_request *req)
+{
+ struct eip93_cipher_reqctx *rctx = skcipher_request_ctx(req);
+ struct crypto_async_request *async = &req->base;
+ struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ int ret;
+
+ if (!req->cryptlen)
+ return 0;
+
+ /*
+ * ECB and CBC algorithms require message lengths to be
+ * multiples of block size.
+ */
+ if (IS_ECB(rctx->flags) || IS_CBC(rctx->flags))
+ if (!IS_ALIGNED(req->cryptlen,
+ crypto_skcipher_blocksize(skcipher)))
+ return -EINVAL;
+
+ ctx->sa_record_base = dma_map_single(ctx->eip93->dev, ctx->sa_record,
+ sizeof(*ctx->sa_record), DMA_TO_DEVICE);
+ ret = dma_mapping_error(ctx->eip93->dev, ctx->sa_record_base);
+ if (ret)
+ return ret;
+
+ rctx->assoclen = 0;
+ rctx->textsize = req->cryptlen;
+ rctx->authsize = 0;
+ rctx->sg_src = req->src;
+ rctx->sg_dst = req->dst;
+ rctx->ivsize = crypto_skcipher_ivsize(skcipher);
+ rctx->blksize = ctx->blksize;
+ rctx->desc_flags = EIP93_DESC_SKCIPHER;
+ rctx->sa_record_base = ctx->sa_record_base;
+
+ return eip93_skcipher_send_req(async);
+}
+
+static int eip93_skcipher_encrypt(struct skcipher_request *req)
+{
+ struct eip93_cipher_reqctx *rctx = skcipher_request_ctx(req);
+ struct eip93_alg_template *tmpl = container_of(req->base.tfm->__crt_alg,
+ struct eip93_alg_template, alg.skcipher.base);
+
+ rctx->flags = tmpl->flags;
+ rctx->flags |= EIP93_ENCRYPT;
+
+ return eip93_skcipher_crypt(req);
+}
+
+static int eip93_skcipher_decrypt(struct skcipher_request *req)
+{
+ struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct eip93_cipher_reqctx *rctx = skcipher_request_ctx(req);
+ struct eip93_alg_template *tmpl = container_of(req->base.tfm->__crt_alg,
+ struct eip93_alg_template, alg.skcipher.base);
+
+ ctx->sa_record->sa_cmd0_word |= EIP93_SA_CMD_DIRECTION_IN;
+
+ rctx->flags = tmpl->flags;
+ rctx->flags |= EIP93_DECRYPT;
+
+ return eip93_skcipher_crypt(req);
+}
+
+/* Available algorithms in this module */
+struct eip93_alg_template eip93_alg_ecb_aes = {
+ .type = EIP93_ALG_TYPE_SKCIPHER,
+ .flags = EIP93_MODE_ECB | EIP93_ALG_AES,
+ .alg.skcipher = {
+ .setkey = eip93_skcipher_setkey,
+ .encrypt = eip93_skcipher_encrypt,
+ .decrypt = eip93_skcipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = 0,
+ .base = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ecb(aes-eip93)",
+ .cra_priority = EIP93_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
+ .cra_alignmask = 0xf,
+ .cra_init = eip93_skcipher_cra_init,
+ .cra_exit = eip93_skcipher_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+struct eip93_alg_template eip93_alg_cbc_aes = {
+ .type = EIP93_ALG_TYPE_SKCIPHER,
+ .flags = EIP93_MODE_CBC | EIP93_ALG_AES,
+ .alg.skcipher = {
+ .setkey = eip93_skcipher_setkey,
+ .encrypt = eip93_skcipher_encrypt,
+ .decrypt = eip93_skcipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .base = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc(aes-eip93)",
+ .cra_priority = EIP93_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
+ .cra_alignmask = 0xf,
+ .cra_init = eip93_skcipher_cra_init,
+ .cra_exit = eip93_skcipher_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+struct eip93_alg_template eip93_alg_ctr_aes = {
+ .type = EIP93_ALG_TYPE_SKCIPHER,
+ .flags = EIP93_MODE_CTR | EIP93_ALG_AES,
+ .alg.skcipher = {
+ .setkey = eip93_skcipher_setkey,
+ .encrypt = eip93_skcipher_encrypt,
+ .decrypt = eip93_skcipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .base = {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "ctr(aes-eip93)",
+ .cra_priority = EIP93_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
+ .cra_alignmask = 0xf,
+ .cra_init = eip93_skcipher_cra_init,
+ .cra_exit = eip93_skcipher_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+struct eip93_alg_template eip93_alg_rfc3686_aes = {
+ .type = EIP93_ALG_TYPE_SKCIPHER,
+ .flags = EIP93_MODE_CTR | EIP93_MODE_RFC3686 | EIP93_ALG_AES,
+ .alg.skcipher = {
+ .setkey = eip93_skcipher_setkey,
+ .encrypt = eip93_skcipher_encrypt,
+ .decrypt = eip93_skcipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .base = {
+ .cra_name = "rfc3686(ctr(aes))",
+ .cra_driver_name = "rfc3686(ctr(aes-eip93))",
+ .cra_priority = EIP93_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
+ .cra_alignmask = 0xf,
+ .cra_init = eip93_skcipher_cra_init,
+ .cra_exit = eip93_skcipher_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+struct eip93_alg_template eip93_alg_ecb_des = {
+ .type = EIP93_ALG_TYPE_SKCIPHER,
+ .flags = EIP93_MODE_ECB | EIP93_ALG_DES,
+ .alg.skcipher = {
+ .setkey = eip93_skcipher_setkey,
+ .encrypt = eip93_skcipher_encrypt,
+ .decrypt = eip93_skcipher_decrypt,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = 0,
+ .base = {
+ .cra_name = "ecb(des)",
+ .cra_driver_name = "ebc(des-eip93)",
+ .cra_priority = EIP93_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
+ .cra_alignmask = 0,
+ .cra_init = eip93_skcipher_cra_init,
+ .cra_exit = eip93_skcipher_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+struct eip93_alg_template eip93_alg_cbc_des = {
+ .type = EIP93_ALG_TYPE_SKCIPHER,
+ .flags = EIP93_MODE_CBC | EIP93_ALG_DES,
+ .alg.skcipher = {
+ .setkey = eip93_skcipher_setkey,
+ .encrypt = eip93_skcipher_encrypt,
+ .decrypt = eip93_skcipher_decrypt,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .base = {
+ .cra_name = "cbc(des)",
+ .cra_driver_name = "cbc(des-eip93)",
+ .cra_priority = EIP93_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
+ .cra_alignmask = 0,
+ .cra_init = eip93_skcipher_cra_init,
+ .cra_exit = eip93_skcipher_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+struct eip93_alg_template eip93_alg_ecb_des3_ede = {
+ .type = EIP93_ALG_TYPE_SKCIPHER,
+ .flags = EIP93_MODE_ECB | EIP93_ALG_3DES,
+ .alg.skcipher = {
+ .setkey = eip93_skcipher_setkey,
+ .encrypt = eip93_skcipher_encrypt,
+ .decrypt = eip93_skcipher_decrypt,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = 0,
+ .base = {
+ .cra_name = "ecb(des3_ede)",
+ .cra_driver_name = "ecb(des3_ede-eip93)",
+ .cra_priority = EIP93_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
+ .cra_alignmask = 0,
+ .cra_init = eip93_skcipher_cra_init,
+ .cra_exit = eip93_skcipher_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+struct eip93_alg_template eip93_alg_cbc_des3_ede = {
+ .type = EIP93_ALG_TYPE_SKCIPHER,
+ .flags = EIP93_MODE_CBC | EIP93_ALG_3DES,
+ .alg.skcipher = {
+ .setkey = eip93_skcipher_setkey,
+ .encrypt = eip93_skcipher_encrypt,
+ .decrypt = eip93_skcipher_decrypt,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .base = {
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "cbc(des3_ede-eip93)",
+ .cra_priority = EIP93_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
+ .cra_alignmask = 0,
+ .cra_init = eip93_skcipher_cra_init,
+ .cra_exit = eip93_skcipher_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
diff --git a/drivers/crypto/inside-secure/eip93/eip93-cipher.h b/drivers/crypto/inside-secure/eip93/eip93-cipher.h
new file mode 100644
index 000000000000..6e2545ebd879
--- /dev/null
+++ b/drivers/crypto/inside-secure/eip93/eip93-cipher.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2019 - 2021
+ *
+ * Richard van Schagen <vschagen@icloud.com>
+ * Christian Marangi <ansuelsmth@gmail.com
+ */
+#ifndef _EIP93_CIPHER_H_
+#define _EIP93_CIPHER_H_
+
+#include "eip93-main.h"
+
+struct eip93_crypto_ctx {
+ struct eip93_device *eip93;
+ u32 flags;
+ struct sa_record *sa_record;
+ u32 sa_nonce;
+ int blksize;
+ dma_addr_t sa_record_base;
+ /* AEAD specific */
+ unsigned int authsize;
+ unsigned int assoclen;
+ bool set_assoc;
+ enum eip93_alg_type type;
+};
+
+struct eip93_cipher_reqctx {
+ u16 desc_flags;
+ u16 flags;
+ unsigned int blksize;
+ unsigned int ivsize;
+ unsigned int textsize;
+ unsigned int assoclen;
+ unsigned int authsize;
+ dma_addr_t sa_record_base;
+ struct sa_state *sa_state;
+ dma_addr_t sa_state_base;
+ struct eip93_descriptor *cdesc;
+ struct scatterlist *sg_src;
+ struct scatterlist *sg_dst;
+ int src_nents;
+ int dst_nents;
+ struct sa_state *sa_state_ctr;
+ dma_addr_t sa_state_ctr_base;
+};
+
+int check_valid_request(struct eip93_cipher_reqctx *rctx);
+
+void eip93_unmap_dma(struct eip93_device *eip93, struct eip93_cipher_reqctx *rctx,
+ struct scatterlist *reqsrc, struct scatterlist *reqdst);
+
+void eip93_skcipher_handle_result(struct crypto_async_request *async, int err);
+
+int eip93_send_req(struct crypto_async_request *async,
+ const u8 *reqiv, struct eip93_cipher_reqctx *rctx);
+
+void eip93_handle_result(struct eip93_device *eip93, struct eip93_cipher_reqctx *rctx,
+ u8 *reqiv);
+
+#endif /* _EIP93_CIPHER_H_ */
diff --git a/drivers/crypto/inside-secure/eip93/eip93-common.c b/drivers/crypto/inside-secure/eip93/eip93-common.c
new file mode 100644
index 000000000000..66153aa2493f
--- /dev/null
+++ b/drivers/crypto/inside-secure/eip93/eip93-common.c
@@ -0,0 +1,822 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 - 2021
+ *
+ * Richard van Schagen <vschagen@icloud.com>
+ * Christian Marangi <ansuelsmth@gmail.com
+ */
+
+#include <crypto/aes.h>
+#include <crypto/ctr.h>
+#include <crypto/hmac.h>
+#include <crypto/sha1.h>
+#include <crypto/sha2.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/scatterlist.h>
+
+#include "eip93-cipher.h"
+#include "eip93-hash.h"
+#include "eip93-common.h"
+#include "eip93-main.h"
+#include "eip93-regs.h"
+
+int eip93_parse_ctrl_stat_err(struct eip93_device *eip93, int err)
+{
+ u32 ext_err;
+
+ if (!err)
+ return 0;
+
+ switch (err & ~EIP93_PE_CTRL_PE_EXT_ERR_CODE) {
+ case EIP93_PE_CTRL_PE_AUTH_ERR:
+ case EIP93_PE_CTRL_PE_PAD_ERR:
+ return -EBADMSG;
+ /* let software handle anti-replay errors */
+ case EIP93_PE_CTRL_PE_SEQNUM_ERR:
+ return 0;
+ case EIP93_PE_CTRL_PE_EXT_ERR:
+ break;
+ default:
+ dev_err(eip93->dev, "Unhandled error 0x%08x\n", err);
+ return -EINVAL;
+ }
+
+ /* Parse additional ext errors */
+ ext_err = FIELD_GET(EIP93_PE_CTRL_PE_EXT_ERR_CODE, err);
+ switch (ext_err) {
+ case EIP93_PE_CTRL_PE_EXT_ERR_BUS:
+ case EIP93_PE_CTRL_PE_EXT_ERR_PROCESSING:
+ return -EIO;
+ case EIP93_PE_CTRL_PE_EXT_ERR_DESC_OWNER:
+ return -EACCES;
+ case EIP93_PE_CTRL_PE_EXT_ERR_INVALID_CRYPTO_OP:
+ case EIP93_PE_CTRL_PE_EXT_ERR_INVALID_CRYPTO_ALGO:
+ case EIP93_PE_CTRL_PE_EXT_ERR_SPI:
+ return -EINVAL;
+ case EIP93_PE_CTRL_PE_EXT_ERR_ZERO_LENGTH:
+ case EIP93_PE_CTRL_PE_EXT_ERR_INVALID_PK_LENGTH:
+ case EIP93_PE_CTRL_PE_EXT_ERR_BLOCK_SIZE_ERR:
+ return -EBADMSG;
+ default:
+ dev_err(eip93->dev, "Unhandled ext error 0x%08x\n", ext_err);
+ return -EINVAL;
+ }
+}
+
+static void *eip93_ring_next_wptr(struct eip93_device *eip93,
+ struct eip93_desc_ring *ring)
+{
+ void *ptr = ring->write;
+
+ if ((ring->write == ring->read - ring->offset) ||
+ (ring->read == ring->base && ring->write == ring->base_end))
+ return ERR_PTR(-ENOMEM);
+
+ if (ring->write == ring->base_end)
+ ring->write = ring->base;
+ else
+ ring->write += ring->offset;
+
+ return ptr;
+}
+
+static void *eip93_ring_next_rptr(struct eip93_device *eip93,
+ struct eip93_desc_ring *ring)
+{
+ void *ptr = ring->read;
+
+ if (ring->write == ring->read)
+ return ERR_PTR(-ENOENT);
+
+ if (ring->read == ring->base_end)
+ ring->read = ring->base;
+ else
+ ring->read += ring->offset;
+
+ return ptr;
+}
+
+int eip93_put_descriptor(struct eip93_device *eip93,
+ struct eip93_descriptor *desc)
+{
+ struct eip93_descriptor *cdesc;
+ struct eip93_descriptor *rdesc;
+
+ rdesc = eip93_ring_next_wptr(eip93, &eip93->ring->rdr);
+ if (IS_ERR(rdesc))
+ return -ENOENT;
+
+ cdesc = eip93_ring_next_wptr(eip93, &eip93->ring->cdr);
+ if (IS_ERR(cdesc))
+ return -ENOENT;
+
+ memset(rdesc, 0, sizeof(struct eip93_descriptor));
+
+ memcpy(cdesc, desc, sizeof(struct eip93_descriptor));
+
+ return 0;
+}
+
+void *eip93_get_descriptor(struct eip93_device *eip93)
+{
+ struct eip93_descriptor *cdesc;
+ void *ptr;
+
+ cdesc = eip93_ring_next_rptr(eip93, &eip93->ring->cdr);
+ if (IS_ERR(cdesc))
+ return ERR_PTR(-ENOENT);
+
+ memset(cdesc, 0, sizeof(struct eip93_descriptor));
+
+ ptr = eip93_ring_next_rptr(eip93, &eip93->ring->rdr);
+ if (IS_ERR(ptr))
+ return ERR_PTR(-ENOENT);
+
+ return ptr;
+}
+
+static void eip93_free_sg_copy(const int len, struct scatterlist **sg)
+{
+ if (!*sg || !len)
+ return;
+
+ free_pages((unsigned long)sg_virt(*sg), get_order(len));
+ kfree(*sg);
+ *sg = NULL;
+}
+
+static int eip93_make_sg_copy(struct scatterlist *src, struct scatterlist **dst,
+ const u32 len, const bool copy)
+{
+ void *pages;
+
+ *dst = kmalloc(sizeof(**dst), GFP_KERNEL);
+ if (!*dst)
+ return -ENOMEM;
+
+ pages = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA,
+ get_order(len));
+ if (!pages) {
+ kfree(*dst);
+ *dst = NULL;
+ return -ENOMEM;
+ }
+
+ sg_init_table(*dst, 1);
+ sg_set_buf(*dst, pages, len);
+
+ /* copy only as requested */
+ if (copy)
+ sg_copy_to_buffer(src, sg_nents(src), pages, len);
+
+ return 0;
+}
+
+static bool eip93_is_sg_aligned(struct scatterlist *sg, u32 len,
+ const int blksize)
+{
+ int nents;
+
+ for (nents = 0; sg; sg = sg_next(sg), ++nents) {
+ if (!IS_ALIGNED(sg->offset, 4))
+ return false;
+
+ if (len <= sg->length) {
+ if (!IS_ALIGNED(len, blksize))
+ return false;
+
+ return true;
+ }
+
+ if (!IS_ALIGNED(sg->length, blksize))
+ return false;
+
+ len -= sg->length;
+ }
+ return false;
+}
+
+int check_valid_request(struct eip93_cipher_reqctx *rctx)
+{
+ struct scatterlist *src = rctx->sg_src;
+ struct scatterlist *dst = rctx->sg_dst;
+ u32 textsize = rctx->textsize;
+ u32 authsize = rctx->authsize;
+ u32 blksize = rctx->blksize;
+ u32 totlen_src = rctx->assoclen + rctx->textsize;
+ u32 totlen_dst = rctx->assoclen + rctx->textsize;
+ u32 copy_len;
+ bool src_align, dst_align;
+ int src_nents, dst_nents;
+ int err = -EINVAL;
+
+ if (!IS_CTR(rctx->flags)) {
+ if (!IS_ALIGNED(textsize, blksize))
+ return err;
+ }
+
+ if (authsize) {
+ if (IS_ENCRYPT(rctx->flags))
+ totlen_dst += authsize;
+ else
+ totlen_src += authsize;
+ }
+
+ src_nents = sg_nents_for_len(src, totlen_src);
+ if (src_nents < 0)
+ return src_nents;
+
+ dst_nents = sg_nents_for_len(dst, totlen_dst);
+ if (dst_nents < 0)
+ return dst_nents;
+
+ if (src == dst) {
+ src_nents = max(src_nents, dst_nents);
+ dst_nents = src_nents;
+ if (unlikely((totlen_src || totlen_dst) && !src_nents))
+ return err;
+
+ } else {
+ if (unlikely(totlen_src && !src_nents))
+ return err;
+
+ if (unlikely(totlen_dst && !dst_nents))
+ return err;
+ }
+
+ if (authsize) {
+ if (dst_nents == 1 && src_nents == 1) {
+ src_align = eip93_is_sg_aligned(src, totlen_src, blksize);
+ if (src == dst)
+ dst_align = src_align;
+ else
+ dst_align = eip93_is_sg_aligned(dst, totlen_dst, blksize);
+ } else {
+ src_align = false;
+ dst_align = false;
+ }
+ } else {
+ src_align = eip93_is_sg_aligned(src, totlen_src, blksize);
+ if (src == dst)
+ dst_align = src_align;
+ else
+ dst_align = eip93_is_sg_aligned(dst, totlen_dst, blksize);
+ }
+
+ copy_len = max(totlen_src, totlen_dst);
+ if (!src_align) {
+ err = eip93_make_sg_copy(src, &rctx->sg_src, copy_len, true);
+ if (err)
+ return err;
+ }
+
+ if (!dst_align) {
+ err = eip93_make_sg_copy(dst, &rctx->sg_dst, copy_len, false);
+ if (err)
+ return err;
+ }
+
+ src_nents = sg_nents_for_len(rctx->sg_src, totlen_src);
+ if (src_nents < 0)
+ return src_nents;
+
+ dst_nents = sg_nents_for_len(rctx->sg_dst, totlen_dst);
+ if (dst_nents < 0)
+ return dst_nents;
+
+ rctx->src_nents = src_nents;
+ rctx->dst_nents = dst_nents;
+
+ return 0;
+}
+
+/*
+ * Set sa_record function:
+ * Even sa_record is set to "0", keep " = 0" for readability.
+ */
+void eip93_set_sa_record(struct sa_record *sa_record, const unsigned int keylen,
+ const u32 flags)
+{
+ /* Reset cmd word */
+ sa_record->sa_cmd0_word = 0;
+ sa_record->sa_cmd1_word = 0;
+
+ sa_record->sa_cmd0_word |= EIP93_SA_CMD_IV_FROM_STATE;
+ if (!IS_ECB(flags))
+ sa_record->sa_cmd0_word |= EIP93_SA_CMD_SAVE_IV;
+
+ sa_record->sa_cmd0_word |= EIP93_SA_CMD_OP_BASIC;
+
+ switch ((flags & EIP93_ALG_MASK)) {
+ case EIP93_ALG_AES:
+ sa_record->sa_cmd0_word |= EIP93_SA_CMD_CIPHER_AES;
+ sa_record->sa_cmd1_word |= FIELD_PREP(EIP93_SA_CMD_AES_KEY_LENGTH,
+ keylen >> 3);
+ break;
+ case EIP93_ALG_3DES:
+ sa_record->sa_cmd0_word |= EIP93_SA_CMD_CIPHER_3DES;
+ break;
+ case EIP93_ALG_DES:
+ sa_record->sa_cmd0_word |= EIP93_SA_CMD_CIPHER_DES;
+ break;
+ default:
+ sa_record->sa_cmd0_word |= EIP93_SA_CMD_CIPHER_NULL;
+ }
+
+ switch ((flags & EIP93_HASH_MASK)) {
+ case EIP93_HASH_SHA256:
+ sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_SHA256;
+ break;
+ case EIP93_HASH_SHA224:
+ sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_SHA224;
+ break;
+ case EIP93_HASH_SHA1:
+ sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_SHA1;
+ break;
+ case EIP93_HASH_MD5:
+ sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_MD5;
+ break;
+ default:
+ sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_NULL;
+ }
+
+ sa_record->sa_cmd0_word |= EIP93_SA_CMD_PAD_ZERO;
+
+ switch ((flags & EIP93_MODE_MASK)) {
+ case EIP93_MODE_CBC:
+ sa_record->sa_cmd1_word |= EIP93_SA_CMD_CHIPER_MODE_CBC;
+ break;
+ case EIP93_MODE_CTR:
+ sa_record->sa_cmd1_word |= EIP93_SA_CMD_CHIPER_MODE_CTR;
+ break;
+ case EIP93_MODE_ECB:
+ sa_record->sa_cmd1_word |= EIP93_SA_CMD_CHIPER_MODE_ECB;
+ break;
+ }
+
+ sa_record->sa_cmd0_word |= EIP93_SA_CMD_DIGEST_3WORD;
+ if (IS_HASH(flags)) {
+ sa_record->sa_cmd1_word |= EIP93_SA_CMD_COPY_PAD;
+ sa_record->sa_cmd1_word |= EIP93_SA_CMD_COPY_DIGEST;
+ }
+
+ if (IS_HMAC(flags)) {
+ sa_record->sa_cmd1_word |= EIP93_SA_CMD_HMAC;
+ sa_record->sa_cmd1_word |= EIP93_SA_CMD_COPY_HEADER;
+ }
+
+ sa_record->sa_spi = 0x0;
+ sa_record->sa_seqmum_mask[0] = 0xFFFFFFFF;
+ sa_record->sa_seqmum_mask[1] = 0x0;
+}
+
+/*
+ * Poor mans Scatter/gather function:
+ * Create a Descriptor for every segment to avoid copying buffers.
+ * For performance better to wait for hardware to perform multiple DMA
+ */
+static int eip93_scatter_combine(struct eip93_device *eip93,
+ struct eip93_cipher_reqctx *rctx,
+ u32 datalen, u32 split, int offsetin)
+{
+ struct eip93_descriptor *cdesc = rctx->cdesc;
+ struct scatterlist *sgsrc = rctx->sg_src;
+ struct scatterlist *sgdst = rctx->sg_dst;
+ unsigned int remainin = sg_dma_len(sgsrc);
+ unsigned int remainout = sg_dma_len(sgdst);
+ dma_addr_t saddr = sg_dma_address(sgsrc);
+ dma_addr_t daddr = sg_dma_address(sgdst);
+ dma_addr_t state_addr;
+ u32 src_addr, dst_addr, len, n;
+ bool nextin = false;
+ bool nextout = false;
+ int offsetout = 0;
+ int err;
+
+ if (IS_ECB(rctx->flags))
+ rctx->sa_state_base = 0;
+
+ if (split < datalen) {
+ state_addr = rctx->sa_state_ctr_base;
+ n = split;
+ } else {
+ state_addr = rctx->sa_state_base;
+ n = datalen;
+ }
+
+ do {
+ if (nextin) {
+ sgsrc = sg_next(sgsrc);
+ remainin = sg_dma_len(sgsrc);
+ if (remainin == 0)
+ continue;
+
+ saddr = sg_dma_address(sgsrc);
+ offsetin = 0;
+ nextin = false;
+ }
+
+ if (nextout) {
+ sgdst = sg_next(sgdst);
+ remainout = sg_dma_len(sgdst);
+ if (remainout == 0)
+ continue;
+
+ daddr = sg_dma_address(sgdst);
+ offsetout = 0;
+ nextout = false;
+ }
+ src_addr = saddr + offsetin;
+ dst_addr = daddr + offsetout;
+
+ if (remainin == remainout) {
+ len = remainin;
+ if (len > n) {
+ len = n;
+ remainin -= n;
+ remainout -= n;
+ offsetin += n;
+ offsetout += n;
+ } else {
+ nextin = true;
+ nextout = true;
+ }
+ } else if (remainin < remainout) {
+ len = remainin;
+ if (len > n) {
+ len = n;
+ remainin -= n;
+ remainout -= n;
+ offsetin += n;
+ offsetout += n;
+ } else {
+ offsetout += len;
+ remainout -= len;
+ nextin = true;
+ }
+ } else {
+ len = remainout;
+ if (len > n) {
+ len = n;
+ remainin -= n;
+ remainout -= n;
+ offsetin += n;
+ offsetout += n;
+ } else {
+ offsetin += len;
+ remainin -= len;
+ nextout = true;
+ }
+ }
+ n -= len;
+
+ cdesc->src_addr = src_addr;
+ cdesc->dst_addr = dst_addr;
+ cdesc->state_addr = state_addr;
+ cdesc->pe_length_word = FIELD_PREP(EIP93_PE_LENGTH_HOST_PE_READY,
+ EIP93_PE_LENGTH_HOST_READY);
+ cdesc->pe_length_word |= FIELD_PREP(EIP93_PE_LENGTH_LENGTH, len);
+
+ if (n == 0) {
+ n = datalen - split;
+ split = datalen;
+ state_addr = rctx->sa_state_base;
+ }
+
+ if (n == 0)
+ cdesc->user_id |= FIELD_PREP(EIP93_PE_USER_ID_DESC_FLAGS,
+ EIP93_DESC_LAST);
+
+ /*
+ * Loop - Delay - No need to rollback
+ * Maybe refine by slowing down at EIP93_RING_BUSY
+ */
+again:
+ scoped_guard(spinlock_irqsave, &eip93->ring->write_lock)
+ err = eip93_put_descriptor(eip93, cdesc);
+ if (err) {
+ usleep_range(EIP93_RING_BUSY_DELAY,
+ EIP93_RING_BUSY_DELAY * 2);
+ goto again;
+ }
+ /* Writing new descriptor count starts DMA action */
+ writel(1, eip93->base + EIP93_REG_PE_CD_COUNT);
+ } while (n);
+
+ return -EINPROGRESS;
+}
+
+int eip93_send_req(struct crypto_async_request *async,
+ const u8 *reqiv, struct eip93_cipher_reqctx *rctx)
+{
+ struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(async->tfm);
+ struct eip93_device *eip93 = ctx->eip93;
+ struct scatterlist *src = rctx->sg_src;
+ struct scatterlist *dst = rctx->sg_dst;
+ struct sa_state *sa_state;
+ struct eip93_descriptor cdesc;
+ u32 flags = rctx->flags;
+ int offsetin = 0, err;
+ u32 datalen = rctx->assoclen + rctx->textsize;
+ u32 split = datalen;
+ u32 start, end, ctr, blocks;
+ u32 iv[AES_BLOCK_SIZE / sizeof(u32)];
+ int crypto_async_idr;
+
+ rctx->sa_state_ctr = NULL;
+ rctx->sa_state = NULL;
+
+ if (IS_ECB(flags))
+ goto skip_iv;
+
+ memcpy(iv, reqiv, rctx->ivsize);
+
+ rctx->sa_state = kzalloc(sizeof(*rctx->sa_state), GFP_KERNEL);
+ if (!rctx->sa_state)
+ return -ENOMEM;
+
+ sa_state = rctx->sa_state;
+
+ memcpy(sa_state->state_iv, iv, rctx->ivsize);
+ if (IS_RFC3686(flags)) {
+ sa_state->state_iv[0] = ctx->sa_nonce;
+ sa_state->state_iv[1] = iv[0];
+ sa_state->state_iv[2] = iv[1];
+ sa_state->state_iv[3] = (u32 __force)cpu_to_be32(0x1);
+ } else if (!IS_HMAC(flags) && IS_CTR(flags)) {
+ /* Compute data length. */
+ blocks = DIV_ROUND_UP(rctx->textsize, AES_BLOCK_SIZE);
+ ctr = be32_to_cpu((__be32 __force)iv[3]);
+ /* Check 32bit counter overflow. */
+ start = ctr;
+ end = start + blocks - 1;
+ if (end < start) {
+ split = AES_BLOCK_SIZE * -start;
+ /*
+ * Increment the counter manually to cope with
+ * the hardware counter overflow.
+ */
+ iv[3] = 0xffffffff;
+ crypto_inc((u8 *)iv, AES_BLOCK_SIZE);
+
+ rctx->sa_state_ctr = kzalloc(sizeof(*rctx->sa_state_ctr),
+ GFP_KERNEL);
+ if (!rctx->sa_state_ctr) {
+ err = -ENOMEM;
+ goto free_sa_state;
+ }
+
+ memcpy(rctx->sa_state_ctr->state_iv, reqiv, rctx->ivsize);
+ memcpy(sa_state->state_iv, iv, rctx->ivsize);
+
+ rctx->sa_state_ctr_base = dma_map_single(eip93->dev, rctx->sa_state_ctr,
+ sizeof(*rctx->sa_state_ctr),
+ DMA_TO_DEVICE);
+ err = dma_mapping_error(eip93->dev, rctx->sa_state_ctr_base);
+ if (err)
+ goto free_sa_state_ctr;
+ }
+ }
+
+ rctx->sa_state_base = dma_map_single(eip93->dev, rctx->sa_state,
+ sizeof(*rctx->sa_state), DMA_TO_DEVICE);
+ err = dma_mapping_error(eip93->dev, rctx->sa_state_base);
+ if (err)
+ goto free_sa_state_ctr_dma;
+
+skip_iv:
+
+ cdesc.pe_ctrl_stat_word = FIELD_PREP(EIP93_PE_CTRL_PE_READY_DES_TRING_OWN,
+ EIP93_PE_CTRL_HOST_READY);
+ cdesc.sa_addr = rctx->sa_record_base;
+ cdesc.arc4_addr = 0;
+
+ scoped_guard(spinlock_bh, &eip93->ring->idr_lock)
+ crypto_async_idr = idr_alloc(&eip93->ring->crypto_async_idr, async, 0,
+ EIP93_RING_NUM - 1, GFP_ATOMIC);
+
+ cdesc.user_id = FIELD_PREP(EIP93_PE_USER_ID_CRYPTO_IDR, (u16)crypto_async_idr) |
+ FIELD_PREP(EIP93_PE_USER_ID_DESC_FLAGS, rctx->desc_flags);
+
+ rctx->cdesc = &cdesc;
+
+ /* map DMA_BIDIRECTIONAL to invalidate cache on destination
+ * implies __dma_cache_wback_inv
+ */
+ if (!dma_map_sg(eip93->dev, dst, rctx->dst_nents, DMA_BIDIRECTIONAL)) {
+ err = -ENOMEM;
+ goto free_sa_state_ctr_dma;
+ }
+
+ if (src != dst &&
+ !dma_map_sg(eip93->dev, src, rctx->src_nents, DMA_TO_DEVICE)) {
+ err = -ENOMEM;
+ goto free_sg_dma;
+ }
+
+ return eip93_scatter_combine(eip93, rctx, datalen, split, offsetin);
+
+free_sg_dma:
+ dma_unmap_sg(eip93->dev, dst, rctx->dst_nents, DMA_BIDIRECTIONAL);
+free_sa_state_ctr_dma:
+ if (rctx->sa_state_ctr)
+ dma_unmap_single(eip93->dev, rctx->sa_state_ctr_base,
+ sizeof(*rctx->sa_state_ctr),
+ DMA_TO_DEVICE);
+free_sa_state_ctr:
+ kfree(rctx->sa_state_ctr);
+ if (rctx->sa_state)
+ dma_unmap_single(eip93->dev, rctx->sa_state_base,
+ sizeof(*rctx->sa_state),
+ DMA_TO_DEVICE);
+free_sa_state:
+ kfree(rctx->sa_state);
+
+ return err;
+}
+
+void eip93_unmap_dma(struct eip93_device *eip93, struct eip93_cipher_reqctx *rctx,
+ struct scatterlist *reqsrc, struct scatterlist *reqdst)
+{
+ u32 len = rctx->assoclen + rctx->textsize;
+ u32 authsize = rctx->authsize;
+ u32 flags = rctx->flags;
+ u32 *otag;
+ int i;
+
+ if (rctx->sg_src == rctx->sg_dst) {
+ dma_unmap_sg(eip93->dev, rctx->sg_dst, rctx->dst_nents,
+ DMA_BIDIRECTIONAL);
+ goto process_tag;
+ }
+
+ dma_unmap_sg(eip93->dev, rctx->sg_src, rctx->src_nents,
+ DMA_TO_DEVICE);
+
+ if (rctx->sg_src != reqsrc)
+ eip93_free_sg_copy(len + rctx->authsize, &rctx->sg_src);
+
+ dma_unmap_sg(eip93->dev, rctx->sg_dst, rctx->dst_nents,
+ DMA_BIDIRECTIONAL);
+
+ /* SHA tags need conversion from net-to-host */
+process_tag:
+ if (IS_DECRYPT(flags))
+ authsize = 0;
+
+ if (authsize) {
+ if (!IS_HASH_MD5(flags)) {
+ otag = sg_virt(rctx->sg_dst) + len;
+ for (i = 0; i < (authsize / 4); i++)
+ otag[i] = be32_to_cpu((__be32 __force)otag[i]);
+ }
+ }
+
+ if (rctx->sg_dst != reqdst) {
+ sg_copy_from_buffer(reqdst, sg_nents(reqdst),
+ sg_virt(rctx->sg_dst), len + authsize);
+ eip93_free_sg_copy(len + rctx->authsize, &rctx->sg_dst);
+ }
+}
+
+void eip93_handle_result(struct eip93_device *eip93, struct eip93_cipher_reqctx *rctx,
+ u8 *reqiv)
+{
+ if (rctx->sa_state_ctr)
+ dma_unmap_single(eip93->dev, rctx->sa_state_ctr_base,
+ sizeof(*rctx->sa_state_ctr),
+ DMA_FROM_DEVICE);
+
+ if (rctx->sa_state)
+ dma_unmap_single(eip93->dev, rctx->sa_state_base,
+ sizeof(*rctx->sa_state),
+ DMA_FROM_DEVICE);
+
+ if (!IS_ECB(rctx->flags))
+ memcpy(reqiv, rctx->sa_state->state_iv, rctx->ivsize);
+
+ kfree(rctx->sa_state_ctr);
+ kfree(rctx->sa_state);
+}
+
+int eip93_hmac_setkey(u32 ctx_flags, const u8 *key, unsigned int keylen,
+ unsigned int hashlen, u8 *dest_ipad, u8 *dest_opad,
+ bool skip_ipad)
+{
+ u8 ipad[SHA256_BLOCK_SIZE], opad[SHA256_BLOCK_SIZE];
+ struct crypto_ahash *ahash_tfm;
+ struct eip93_hash_reqctx *rctx;
+ struct ahash_request *req;
+ DECLARE_CRYPTO_WAIT(wait);
+ struct scatterlist sg[1];
+ const char *alg_name;
+ int i, ret;
+
+ switch (ctx_flags & EIP93_HASH_MASK) {
+ case EIP93_HASH_SHA256:
+ alg_name = "sha256-eip93";
+ break;
+ case EIP93_HASH_SHA224:
+ alg_name = "sha224-eip93";
+ break;
+ case EIP93_HASH_SHA1:
+ alg_name = "sha1-eip93";
+ break;
+ case EIP93_HASH_MD5:
+ alg_name = "md5-eip93";
+ break;
+ default: /* Impossible */
+ return -EINVAL;
+ }
+
+ ahash_tfm = crypto_alloc_ahash(alg_name, 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(ahash_tfm))
+ return PTR_ERR(ahash_tfm);
+
+ req = ahash_request_alloc(ahash_tfm, GFP_ATOMIC);
+ if (!req) {
+ ret = -ENOMEM;
+ goto err_ahash;
+ }
+
+ rctx = ahash_request_ctx_dma(req);
+ crypto_init_wait(&wait);
+ ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ crypto_req_done, &wait);
+
+ /* Hash the key if > SHA256_BLOCK_SIZE */
+ if (keylen > SHA256_BLOCK_SIZE) {
+ sg_init_one(&sg[0], key, keylen);
+
+ ahash_request_set_crypt(req, sg, ipad, keylen);
+ ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
+ if (ret)
+ goto err_req;
+
+ keylen = hashlen;
+ } else {
+ memcpy(ipad, key, keylen);
+ }
+
+ /* Copy to opad */
+ memset(ipad + keylen, 0, SHA256_BLOCK_SIZE - keylen);
+ memcpy(opad, ipad, SHA256_BLOCK_SIZE);
+
+ /* Pad with HMAC constants */
+ for (i = 0; i < SHA256_BLOCK_SIZE; i++) {
+ ipad[i] ^= HMAC_IPAD_VALUE;
+ opad[i] ^= HMAC_OPAD_VALUE;
+ }
+
+ if (skip_ipad) {
+ memcpy(dest_ipad, ipad, SHA256_BLOCK_SIZE);
+ } else {
+ /* Hash ipad */
+ sg_init_one(&sg[0], ipad, SHA256_BLOCK_SIZE);
+ ahash_request_set_crypt(req, sg, dest_ipad, SHA256_BLOCK_SIZE);
+ ret = crypto_ahash_init(req);
+ if (ret)
+ goto err_req;
+
+ /* Disable HASH_FINALIZE for ipad hash */
+ rctx->partial_hash = true;
+
+ ret = crypto_wait_req(crypto_ahash_finup(req), &wait);
+ if (ret)
+ goto err_req;
+ }
+
+ /* Hash opad */
+ sg_init_one(&sg[0], opad, SHA256_BLOCK_SIZE);
+ ahash_request_set_crypt(req, sg, dest_opad, SHA256_BLOCK_SIZE);
+ ret = crypto_ahash_init(req);
+ if (ret)
+ goto err_req;
+
+ /* Disable HASH_FINALIZE for opad hash */
+ rctx->partial_hash = true;
+
+ ret = crypto_wait_req(crypto_ahash_finup(req), &wait);
+ if (ret)
+ goto err_req;
+
+ if (!IS_HASH_MD5(ctx_flags)) {
+ for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(u32); i++) {
+ u32 *ipad_hash = (u32 *)dest_ipad;
+ u32 *opad_hash = (u32 *)dest_opad;
+
+ if (!skip_ipad)
+ ipad_hash[i] = (u32 __force)cpu_to_be32(ipad_hash[i]);
+ opad_hash[i] = (u32 __force)cpu_to_be32(opad_hash[i]);
+ }
+ }
+
+err_req:
+ ahash_request_free(req);
+err_ahash:
+ crypto_free_ahash(ahash_tfm);
+
+ return ret;
+}
diff --git a/drivers/crypto/inside-secure/eip93/eip93-common.h b/drivers/crypto/inside-secure/eip93/eip93-common.h
new file mode 100644
index 000000000000..80964cfa34df
--- /dev/null
+++ b/drivers/crypto/inside-secure/eip93/eip93-common.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2019 - 2021
+ *
+ * Richard van Schagen <vschagen@icloud.com>
+ * Christian Marangi <ansuelsmth@gmail.com
+ */
+
+#ifndef _EIP93_COMMON_H_
+#define _EIP93_COMMON_H_
+
+void *eip93_get_descriptor(struct eip93_device *eip93);
+int eip93_put_descriptor(struct eip93_device *eip93, struct eip93_descriptor *desc);
+
+void eip93_set_sa_record(struct sa_record *sa_record, const unsigned int keylen,
+ const u32 flags);
+
+int eip93_parse_ctrl_stat_err(struct eip93_device *eip93, int err);
+
+int eip93_hmac_setkey(u32 ctx_flags, const u8 *key, unsigned int keylen,
+ unsigned int hashlen, u8 *ipad, u8 *opad,
+ bool skip_ipad);
+
+#endif /* _EIP93_COMMON_H_ */
diff --git a/drivers/crypto/inside-secure/eip93/eip93-des.h b/drivers/crypto/inside-secure/eip93/eip93-des.h
new file mode 100644
index 000000000000..74748df04acf
--- /dev/null
+++ b/drivers/crypto/inside-secure/eip93/eip93-des.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2019 - 2021
+ *
+ * Richard van Schagen <vschagen@icloud.com>
+ * Christian Marangi <ansuelsmth@gmail.com
+ */
+#ifndef _EIP93_DES_H_
+#define _EIP93_DES_H_
+
+extern struct eip93_alg_template eip93_alg_ecb_des;
+extern struct eip93_alg_template eip93_alg_cbc_des;
+extern struct eip93_alg_template eip93_alg_ecb_des3_ede;
+extern struct eip93_alg_template eip93_alg_cbc_des3_ede;
+
+#endif /* _EIP93_DES_H_ */
diff --git a/drivers/crypto/inside-secure/eip93/eip93-hash.c b/drivers/crypto/inside-secure/eip93/eip93-hash.c
new file mode 100644
index 000000000000..ac13d90a2b7c
--- /dev/null
+++ b/drivers/crypto/inside-secure/eip93/eip93-hash.c
@@ -0,0 +1,875 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024
+ *
+ * Christian Marangi <ansuelsmth@gmail.com
+ */
+
+#include <crypto/sha1.h>
+#include <crypto/sha2.h>
+#include <crypto/md5.h>
+#include <crypto/hmac.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+
+#include "eip93-cipher.h"
+#include "eip93-hash.h"
+#include "eip93-main.h"
+#include "eip93-common.h"
+#include "eip93-regs.h"
+
+static void eip93_hash_free_data_blocks(struct ahash_request *req)
+{
+ struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct eip93_device *eip93 = ctx->eip93;
+ struct mkt_hash_block *block, *tmp;
+
+ list_for_each_entry_safe(block, tmp, &rctx->blocks, list) {
+ dma_unmap_single(eip93->dev, block->data_dma,
+ SHA256_BLOCK_SIZE, DMA_TO_DEVICE);
+ kfree(block);
+ }
+ if (!list_empty(&rctx->blocks))
+ INIT_LIST_HEAD(&rctx->blocks);
+
+ if (rctx->finalize)
+ dma_unmap_single(eip93->dev, rctx->data_dma,
+ rctx->data_used,
+ DMA_TO_DEVICE);
+}
+
+static void eip93_hash_free_sa_record(struct ahash_request *req)
+{
+ struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct eip93_device *eip93 = ctx->eip93;
+
+ if (IS_HMAC(ctx->flags))
+ dma_unmap_single(eip93->dev, rctx->sa_record_hmac_base,
+ sizeof(rctx->sa_record_hmac), DMA_TO_DEVICE);
+
+ dma_unmap_single(eip93->dev, rctx->sa_record_base,
+ sizeof(rctx->sa_record), DMA_TO_DEVICE);
+}
+
+void eip93_hash_handle_result(struct crypto_async_request *async, int err)
+{
+ struct ahash_request *req = ahash_request_cast(async);
+ struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ int digestsize = crypto_ahash_digestsize(ahash);
+ struct sa_state *sa_state = &rctx->sa_state;
+ struct eip93_device *eip93 = ctx->eip93;
+ int i;
+
+ dma_unmap_single(eip93->dev, rctx->sa_state_base,
+ sizeof(*sa_state), DMA_FROM_DEVICE);
+
+ /*
+ * With partial_hash assume SHA256_DIGEST_SIZE buffer is passed.
+ * This is to handle SHA224 that have a 32 byte intermediate digest.
+ */
+ if (rctx->partial_hash)
+ digestsize = SHA256_DIGEST_SIZE;
+
+ if (rctx->finalize || rctx->partial_hash) {
+ /* bytes needs to be swapped for req->result */
+ if (!IS_HASH_MD5(ctx->flags)) {
+ for (i = 0; i < digestsize / sizeof(u32); i++) {
+ u32 *digest = (u32 *)sa_state->state_i_digest;
+
+ digest[i] = be32_to_cpu((__be32 __force)digest[i]);
+ }
+ }
+
+ memcpy(req->result, sa_state->state_i_digest, digestsize);
+ }
+
+ eip93_hash_free_sa_record(req);
+ eip93_hash_free_data_blocks(req);
+
+ ahash_request_complete(req, err);
+}
+
+static void eip93_hash_init_sa_state_digest(u32 hash, u8 *digest)
+{
+ static const u32 sha256_init[] = {
+ SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
+ SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7
+ };
+ static const u32 sha224_init[] = {
+ SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3,
+ SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7
+ };
+ static const u32 sha1_init[] = {
+ SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4
+ };
+ static const u32 md5_init[] = {
+ MD5_H0, MD5_H1, MD5_H2, MD5_H3
+ };
+
+ /* Init HASH constant */
+ switch (hash) {
+ case EIP93_HASH_SHA256:
+ memcpy(digest, sha256_init, sizeof(sha256_init));
+ return;
+ case EIP93_HASH_SHA224:
+ memcpy(digest, sha224_init, sizeof(sha224_init));
+ return;
+ case EIP93_HASH_SHA1:
+ memcpy(digest, sha1_init, sizeof(sha1_init));
+ return;
+ case EIP93_HASH_MD5:
+ memcpy(digest, md5_init, sizeof(md5_init));
+ return;
+ default: /* Impossible */
+ return;
+ }
+}
+
+static void eip93_hash_export_sa_state(struct ahash_request *req,
+ struct eip93_hash_export_state *state)
+{
+ struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
+ struct sa_state *sa_state = &rctx->sa_state;
+
+ /*
+ * EIP93 have special handling for state_byte_cnt in sa_state.
+ * Even if a zero packet is passed (and a BADMSG is returned),
+ * state_byte_cnt is incremented to the digest handled (with the hash
+ * primitive). This is problematic with export/import as EIP93
+ * expect 0 state_byte_cnt for the very first iteration.
+ */
+ if (!rctx->len)
+ memset(state->state_len, 0, sizeof(u32) * 2);
+ else
+ memcpy(state->state_len, sa_state->state_byte_cnt,
+ sizeof(u32) * 2);
+ memcpy(state->state_hash, sa_state->state_i_digest,
+ SHA256_DIGEST_SIZE);
+ state->len = rctx->len;
+ state->data_used = rctx->data_used;
+}
+
+static void __eip93_hash_init(struct ahash_request *req)
+{
+ struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct sa_record *sa_record = &rctx->sa_record;
+ int digestsize;
+
+ digestsize = crypto_ahash_digestsize(ahash);
+
+ eip93_set_sa_record(sa_record, 0, ctx->flags);
+ sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_FROM_STATE;
+ sa_record->sa_cmd0_word |= EIP93_SA_CMD_SAVE_HASH;
+ sa_record->sa_cmd0_word &= ~EIP93_SA_CMD_OPCODE;
+ sa_record->sa_cmd0_word |= FIELD_PREP(EIP93_SA_CMD_OPCODE,
+ EIP93_SA_CMD_OPCODE_BASIC_OUT_HASH);
+ sa_record->sa_cmd0_word &= ~EIP93_SA_CMD_DIGEST_LENGTH;
+ sa_record->sa_cmd0_word |= FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH,
+ digestsize / sizeof(u32));
+
+ /*
+ * HMAC special handling
+ * Enabling CMD_HMAC force the inner hash to be always finalized.
+ * This cause problems on handling message > 64 byte as we
+ * need to produce intermediate inner hash on sending intermediate
+ * 64 bytes blocks.
+ *
+ * To handle this, enable CMD_HMAC only on the last block.
+ * We make a duplicate of sa_record and on the last descriptor,
+ * we pass a dedicated sa_record with CMD_HMAC enabled to make
+ * EIP93 apply the outer hash.
+ */
+ if (IS_HMAC(ctx->flags)) {
+ struct sa_record *sa_record_hmac = &rctx->sa_record_hmac;
+
+ memcpy(sa_record_hmac, sa_record, sizeof(*sa_record));
+ /* Copy pre-hashed opad for HMAC */
+ memcpy(sa_record_hmac->sa_o_digest, ctx->opad, SHA256_DIGEST_SIZE);
+
+ /* Disable HMAC for hash normal sa_record */
+ sa_record->sa_cmd1_word &= ~EIP93_SA_CMD_HMAC;
+ }
+
+ rctx->len = 0;
+ rctx->data_used = 0;
+ rctx->partial_hash = false;
+ rctx->finalize = false;
+ INIT_LIST_HEAD(&rctx->blocks);
+}
+
+static int eip93_send_hash_req(struct crypto_async_request *async, u8 *data,
+ dma_addr_t *data_dma, u32 len, bool last)
+{
+ struct ahash_request *req = ahash_request_cast(async);
+ struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct eip93_device *eip93 = ctx->eip93;
+ struct eip93_descriptor cdesc = { };
+ dma_addr_t src_addr;
+ int ret;
+
+ /* Map block data to DMA */
+ src_addr = dma_map_single(eip93->dev, data, len, DMA_TO_DEVICE);
+ ret = dma_mapping_error(eip93->dev, src_addr);
+ if (ret)
+ return ret;
+
+ cdesc.pe_ctrl_stat_word = FIELD_PREP(EIP93_PE_CTRL_PE_READY_DES_TRING_OWN,
+ EIP93_PE_CTRL_HOST_READY);
+ cdesc.sa_addr = rctx->sa_record_base;
+ cdesc.arc4_addr = 0;
+
+ cdesc.state_addr = rctx->sa_state_base;
+ cdesc.src_addr = src_addr;
+ cdesc.pe_length_word = FIELD_PREP(EIP93_PE_LENGTH_HOST_PE_READY,
+ EIP93_PE_LENGTH_HOST_READY);
+ cdesc.pe_length_word |= FIELD_PREP(EIP93_PE_LENGTH_LENGTH,
+ len);
+
+ cdesc.user_id |= FIELD_PREP(EIP93_PE_USER_ID_DESC_FLAGS, EIP93_DESC_HASH);
+
+ if (last) {
+ int crypto_async_idr;
+
+ if (rctx->finalize && !rctx->partial_hash) {
+ /* For last block, pass sa_record with CMD_HMAC enabled */
+ if (IS_HMAC(ctx->flags)) {
+ struct sa_record *sa_record_hmac = &rctx->sa_record_hmac;
+
+ rctx->sa_record_hmac_base = dma_map_single(eip93->dev,
+ sa_record_hmac,
+ sizeof(*sa_record_hmac),
+ DMA_TO_DEVICE);
+ ret = dma_mapping_error(eip93->dev, rctx->sa_record_hmac_base);
+ if (ret)
+ return ret;
+
+ cdesc.sa_addr = rctx->sa_record_hmac_base;
+ }
+
+ cdesc.pe_ctrl_stat_word |= EIP93_PE_CTRL_PE_HASH_FINAL;
+ }
+
+ scoped_guard(spinlock_bh, &eip93->ring->idr_lock)
+ crypto_async_idr = idr_alloc(&eip93->ring->crypto_async_idr, async, 0,
+ EIP93_RING_NUM - 1, GFP_ATOMIC);
+
+ cdesc.user_id |= FIELD_PREP(EIP93_PE_USER_ID_CRYPTO_IDR, (u16)crypto_async_idr) |
+ FIELD_PREP(EIP93_PE_USER_ID_DESC_FLAGS, EIP93_DESC_LAST);
+ }
+
+again:
+ scoped_guard(spinlock_irqsave, &eip93->ring->write_lock)
+ ret = eip93_put_descriptor(eip93, &cdesc);
+ if (ret) {
+ usleep_range(EIP93_RING_BUSY_DELAY,
+ EIP93_RING_BUSY_DELAY * 2);
+ goto again;
+ }
+
+ /* Writing new descriptor count starts DMA action */
+ writel(1, eip93->base + EIP93_REG_PE_CD_COUNT);
+
+ *data_dma = src_addr;
+ return 0;
+}
+
+static int eip93_hash_init(struct ahash_request *req)
+{
+ struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct sa_state *sa_state = &rctx->sa_state;
+
+ memset(sa_state->state_byte_cnt, 0, sizeof(u32) * 2);
+ eip93_hash_init_sa_state_digest(ctx->flags & EIP93_HASH_MASK,
+ sa_state->state_i_digest);
+
+ __eip93_hash_init(req);
+
+ /* For HMAC setup the initial block for ipad */
+ if (IS_HMAC(ctx->flags)) {
+ memcpy(rctx->data, ctx->ipad, SHA256_BLOCK_SIZE);
+
+ rctx->data_used = SHA256_BLOCK_SIZE;
+ rctx->len += SHA256_BLOCK_SIZE;
+ }
+
+ return 0;
+}
+
+/*
+ * With complete_req true, we wait for the engine to consume all the block in list,
+ * else we just queue the block to the engine as final() will wait. This is useful
+ * for finup().
+ */
+static int __eip93_hash_update(struct ahash_request *req, bool complete_req)
+{
+ struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
+ struct crypto_async_request *async = &req->base;
+ unsigned int read, to_consume = req->nbytes;
+ unsigned int max_read, consumed = 0;
+ struct mkt_hash_block *block;
+ bool wait_req = false;
+ int offset;
+ int ret;
+
+ /* Get the offset and available space to fill req data */
+ offset = rctx->data_used;
+ max_read = SHA256_BLOCK_SIZE - offset;
+
+ /* Consume req in block of SHA256_BLOCK_SIZE.
+ * to_read is initially set to space available in the req data
+ * and then reset to SHA256_BLOCK_SIZE.
+ */
+ while (to_consume > max_read) {
+ block = kzalloc(sizeof(*block), GFP_ATOMIC);
+ if (!block) {
+ ret = -ENOMEM;
+ goto free_blocks;
+ }
+
+ read = sg_pcopy_to_buffer(req->src, sg_nents(req->src),
+ block->data + offset,
+ max_read, consumed);
+
+ /*
+ * For first iteration only, copy req data to block
+ * and reset offset and max_read for next iteration.
+ */
+ if (offset > 0) {
+ memcpy(block->data, rctx->data, offset);
+ offset = 0;
+ max_read = SHA256_BLOCK_SIZE;
+ }
+
+ list_add(&block->list, &rctx->blocks);
+ to_consume -= read;
+ consumed += read;
+ }
+
+ /* Write the remaining data to req data */
+ read = sg_pcopy_to_buffer(req->src, sg_nents(req->src),
+ rctx->data + offset, to_consume,
+ consumed);
+ rctx->data_used = offset + read;
+
+ /* Update counter with processed bytes */
+ rctx->len += read + consumed;
+
+ /* Consume all the block added to list */
+ list_for_each_entry_reverse(block, &rctx->blocks, list) {
+ wait_req = complete_req &&
+ list_is_first(&block->list, &rctx->blocks);
+
+ ret = eip93_send_hash_req(async, block->data,
+ &block->data_dma,
+ SHA256_BLOCK_SIZE, wait_req);
+ if (ret)
+ goto free_blocks;
+ }
+
+ return wait_req ? -EINPROGRESS : 0;
+
+free_blocks:
+ eip93_hash_free_data_blocks(req);
+
+ return ret;
+}
+
+static int eip93_hash_update(struct ahash_request *req)
+{
+ struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct sa_record *sa_record = &rctx->sa_record;
+ struct sa_state *sa_state = &rctx->sa_state;
+ struct eip93_device *eip93 = ctx->eip93;
+ int ret;
+
+ if (!req->nbytes)
+ return 0;
+
+ rctx->sa_state_base = dma_map_single(eip93->dev, sa_state,
+ sizeof(*sa_state),
+ DMA_TO_DEVICE);
+ ret = dma_mapping_error(eip93->dev, rctx->sa_state_base);
+ if (ret)
+ return ret;
+
+ rctx->sa_record_base = dma_map_single(eip93->dev, sa_record,
+ sizeof(*sa_record),
+ DMA_TO_DEVICE);
+ ret = dma_mapping_error(eip93->dev, rctx->sa_record_base);
+ if (ret)
+ goto free_sa_state;
+
+ ret = __eip93_hash_update(req, true);
+ if (ret && ret != -EINPROGRESS)
+ goto free_sa_record;
+
+ return ret;
+
+free_sa_record:
+ dma_unmap_single(eip93->dev, rctx->sa_record_base,
+ sizeof(*sa_record), DMA_TO_DEVICE);
+
+free_sa_state:
+ dma_unmap_single(eip93->dev, rctx->sa_state_base,
+ sizeof(*sa_state), DMA_TO_DEVICE);
+
+ return ret;
+}
+
+/*
+ * With map_data true, we map the sa_record and sa_state. This is needed
+ * for finup() as the they are mapped before calling update()
+ */
+static int __eip93_hash_final(struct ahash_request *req, bool map_dma)
+{
+ struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct crypto_async_request *async = &req->base;
+ struct sa_record *sa_record = &rctx->sa_record;
+ struct sa_state *sa_state = &rctx->sa_state;
+ struct eip93_device *eip93 = ctx->eip93;
+ int ret;
+
+ /* EIP93 can't handle zero bytes hash */
+ if (!rctx->len && !IS_HMAC(ctx->flags)) {
+ switch ((ctx->flags & EIP93_HASH_MASK)) {
+ case EIP93_HASH_SHA256:
+ memcpy(req->result, sha256_zero_message_hash,
+ SHA256_DIGEST_SIZE);
+ break;
+ case EIP93_HASH_SHA224:
+ memcpy(req->result, sha224_zero_message_hash,
+ SHA224_DIGEST_SIZE);
+ break;
+ case EIP93_HASH_SHA1:
+ memcpy(req->result, sha1_zero_message_hash,
+ SHA1_DIGEST_SIZE);
+ break;
+ case EIP93_HASH_MD5:
+ memcpy(req->result, md5_zero_message_hash,
+ MD5_DIGEST_SIZE);
+ break;
+ default: /* Impossible */
+ return -EINVAL;
+ }
+
+ return 0;
+ }
+
+ /* Signal interrupt from engine is for last block */
+ rctx->finalize = true;
+
+ if (map_dma) {
+ rctx->sa_state_base = dma_map_single(eip93->dev, sa_state,
+ sizeof(*sa_state),
+ DMA_TO_DEVICE);
+ ret = dma_mapping_error(eip93->dev, rctx->sa_state_base);
+ if (ret)
+ return ret;
+
+ rctx->sa_record_base = dma_map_single(eip93->dev, sa_record,
+ sizeof(*sa_record),
+ DMA_TO_DEVICE);
+ ret = dma_mapping_error(eip93->dev, rctx->sa_record_base);
+ if (ret)
+ goto free_sa_state;
+ }
+
+ /* Send last block */
+ ret = eip93_send_hash_req(async, rctx->data, &rctx->data_dma,
+ rctx->data_used, true);
+ if (ret)
+ goto free_blocks;
+
+ return -EINPROGRESS;
+
+free_blocks:
+ eip93_hash_free_data_blocks(req);
+
+ dma_unmap_single(eip93->dev, rctx->sa_record_base,
+ sizeof(*sa_record), DMA_TO_DEVICE);
+
+free_sa_state:
+ dma_unmap_single(eip93->dev, rctx->sa_state_base,
+ sizeof(*sa_state), DMA_TO_DEVICE);
+
+ return ret;
+}
+
+static int eip93_hash_final(struct ahash_request *req)
+{
+ return __eip93_hash_final(req, true);
+}
+
+static int eip93_hash_finup(struct ahash_request *req)
+{
+ struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct sa_record *sa_record = &rctx->sa_record;
+ struct sa_state *sa_state = &rctx->sa_state;
+ struct eip93_device *eip93 = ctx->eip93;
+ int ret;
+
+ if (rctx->len + req->nbytes || IS_HMAC(ctx->flags)) {
+ rctx->sa_state_base = dma_map_single(eip93->dev, sa_state,
+ sizeof(*sa_state),
+ DMA_TO_DEVICE);
+ ret = dma_mapping_error(eip93->dev, rctx->sa_state_base);
+ if (ret)
+ return ret;
+
+ rctx->sa_record_base = dma_map_single(eip93->dev, sa_record,
+ sizeof(*sa_record),
+ DMA_TO_DEVICE);
+ ret = dma_mapping_error(eip93->dev, rctx->sa_record_base);
+ if (ret)
+ goto free_sa_state;
+
+ ret = __eip93_hash_update(req, false);
+ if (ret)
+ goto free_sa_record;
+ }
+
+ return __eip93_hash_final(req, false);
+
+free_sa_record:
+ dma_unmap_single(eip93->dev, rctx->sa_record_base,
+ sizeof(*sa_record), DMA_TO_DEVICE);
+free_sa_state:
+ dma_unmap_single(eip93->dev, rctx->sa_state_base,
+ sizeof(*sa_state), DMA_TO_DEVICE);
+
+ return ret;
+}
+
+static int eip93_hash_hmac_setkey(struct crypto_ahash *ahash, const u8 *key,
+ u32 keylen)
+{
+ unsigned int digestsize = crypto_ahash_digestsize(ahash);
+ struct crypto_tfm *tfm = crypto_ahash_tfm(ahash);
+ struct eip93_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ return eip93_hmac_setkey(ctx->flags, key, keylen, digestsize,
+ ctx->ipad, ctx->opad, true);
+}
+
+static int eip93_hash_cra_init(struct crypto_tfm *tfm)
+{
+ struct eip93_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct eip93_alg_template *tmpl = container_of(tfm->__crt_alg,
+ struct eip93_alg_template, alg.ahash.halg.base);
+
+ crypto_ahash_set_reqsize_dma(__crypto_ahash_cast(tfm),
+ sizeof(struct eip93_hash_reqctx));
+
+ ctx->eip93 = tmpl->eip93;
+ ctx->flags = tmpl->flags;
+
+ return 0;
+}
+
+static int eip93_hash_digest(struct ahash_request *req)
+{
+ int ret;
+
+ ret = eip93_hash_init(req);
+ if (ret)
+ return ret;
+
+ return eip93_hash_finup(req);
+}
+
+static int eip93_hash_import(struct ahash_request *req, const void *in)
+{
+ struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
+ const struct eip93_hash_export_state *state = in;
+ struct sa_state *sa_state = &rctx->sa_state;
+
+ memcpy(sa_state->state_byte_cnt, state->state_len, sizeof(u32) * 2);
+ memcpy(sa_state->state_i_digest, state->state_hash, SHA256_DIGEST_SIZE);
+
+ __eip93_hash_init(req);
+
+ rctx->len = state->len;
+ rctx->data_used = state->data_used;
+
+ /* Skip copying data if we have nothing to copy */
+ if (rctx->len)
+ memcpy(rctx->data, state->data, rctx->data_used);
+
+ return 0;
+}
+
+static int eip93_hash_export(struct ahash_request *req, void *out)
+{
+ struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
+ struct eip93_hash_export_state *state = out;
+
+ /* Save the first block in state data */
+ if (rctx->len)
+ memcpy(state->data, rctx->data, rctx->data_used);
+
+ eip93_hash_export_sa_state(req, state);
+
+ return 0;
+}
+
+struct eip93_alg_template eip93_alg_md5 = {
+ .type = EIP93_ALG_TYPE_HASH,
+ .flags = EIP93_HASH_MD5,
+ .alg.ahash = {
+ .init = eip93_hash_init,
+ .update = eip93_hash_update,
+ .final = eip93_hash_final,
+ .finup = eip93_hash_finup,
+ .digest = eip93_hash_digest,
+ .export = eip93_hash_export,
+ .import = eip93_hash_import,
+ .halg = {
+ .digestsize = MD5_DIGEST_SIZE,
+ .statesize = sizeof(struct eip93_hash_export_state),
+ .base = {
+ .cra_name = "md5",
+ .cra_driver_name = "md5-eip93",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
+ .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct eip93_hash_ctx),
+ .cra_init = eip93_hash_cra_init,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+struct eip93_alg_template eip93_alg_sha1 = {
+ .type = EIP93_ALG_TYPE_HASH,
+ .flags = EIP93_HASH_SHA1,
+ .alg.ahash = {
+ .init = eip93_hash_init,
+ .update = eip93_hash_update,
+ .final = eip93_hash_final,
+ .finup = eip93_hash_finup,
+ .digest = eip93_hash_digest,
+ .export = eip93_hash_export,
+ .import = eip93_hash_import,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .statesize = sizeof(struct eip93_hash_export_state),
+ .base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "sha1-eip93",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct eip93_hash_ctx),
+ .cra_init = eip93_hash_cra_init,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+struct eip93_alg_template eip93_alg_sha224 = {
+ .type = EIP93_ALG_TYPE_HASH,
+ .flags = EIP93_HASH_SHA224,
+ .alg.ahash = {
+ .init = eip93_hash_init,
+ .update = eip93_hash_update,
+ .final = eip93_hash_final,
+ .finup = eip93_hash_finup,
+ .digest = eip93_hash_digest,
+ .export = eip93_hash_export,
+ .import = eip93_hash_import,
+ .halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .statesize = sizeof(struct eip93_hash_export_state),
+ .base = {
+ .cra_name = "sha224",
+ .cra_driver_name = "sha224-eip93",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct eip93_hash_ctx),
+ .cra_init = eip93_hash_cra_init,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+struct eip93_alg_template eip93_alg_sha256 = {
+ .type = EIP93_ALG_TYPE_HASH,
+ .flags = EIP93_HASH_SHA256,
+ .alg.ahash = {
+ .init = eip93_hash_init,
+ .update = eip93_hash_update,
+ .final = eip93_hash_final,
+ .finup = eip93_hash_finup,
+ .digest = eip93_hash_digest,
+ .export = eip93_hash_export,
+ .import = eip93_hash_import,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct eip93_hash_export_state),
+ .base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "sha256-eip93",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct eip93_hash_ctx),
+ .cra_init = eip93_hash_cra_init,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+struct eip93_alg_template eip93_alg_hmac_md5 = {
+ .type = EIP93_ALG_TYPE_HASH,
+ .flags = EIP93_HASH_HMAC | EIP93_HASH_MD5,
+ .alg.ahash = {
+ .init = eip93_hash_init,
+ .update = eip93_hash_update,
+ .final = eip93_hash_final,
+ .finup = eip93_hash_finup,
+ .digest = eip93_hash_digest,
+ .setkey = eip93_hash_hmac_setkey,
+ .export = eip93_hash_export,
+ .import = eip93_hash_import,
+ .halg = {
+ .digestsize = MD5_DIGEST_SIZE,
+ .statesize = sizeof(struct eip93_hash_export_state),
+ .base = {
+ .cra_name = "hmac(md5)",
+ .cra_driver_name = "hmac(md5-eip93)",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
+ .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct eip93_hash_ctx),
+ .cra_init = eip93_hash_cra_init,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+struct eip93_alg_template eip93_alg_hmac_sha1 = {
+ .type = EIP93_ALG_TYPE_HASH,
+ .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA1,
+ .alg.ahash = {
+ .init = eip93_hash_init,
+ .update = eip93_hash_update,
+ .final = eip93_hash_final,
+ .finup = eip93_hash_finup,
+ .digest = eip93_hash_digest,
+ .setkey = eip93_hash_hmac_setkey,
+ .export = eip93_hash_export,
+ .import = eip93_hash_import,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .statesize = sizeof(struct eip93_hash_export_state),
+ .base = {
+ .cra_name = "hmac(sha1)",
+ .cra_driver_name = "hmac(sha1-eip93)",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct eip93_hash_ctx),
+ .cra_init = eip93_hash_cra_init,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+struct eip93_alg_template eip93_alg_hmac_sha224 = {
+ .type = EIP93_ALG_TYPE_HASH,
+ .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA224,
+ .alg.ahash = {
+ .init = eip93_hash_init,
+ .update = eip93_hash_update,
+ .final = eip93_hash_final,
+ .finup = eip93_hash_finup,
+ .digest = eip93_hash_digest,
+ .setkey = eip93_hash_hmac_setkey,
+ .export = eip93_hash_export,
+ .import = eip93_hash_import,
+ .halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .statesize = sizeof(struct eip93_hash_export_state),
+ .base = {
+ .cra_name = "hmac(sha224)",
+ .cra_driver_name = "hmac(sha224-eip93)",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct eip93_hash_ctx),
+ .cra_init = eip93_hash_cra_init,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+struct eip93_alg_template eip93_alg_hmac_sha256 = {
+ .type = EIP93_ALG_TYPE_HASH,
+ .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA256,
+ .alg.ahash = {
+ .init = eip93_hash_init,
+ .update = eip93_hash_update,
+ .final = eip93_hash_final,
+ .finup = eip93_hash_finup,
+ .digest = eip93_hash_digest,
+ .setkey = eip93_hash_hmac_setkey,
+ .export = eip93_hash_export,
+ .import = eip93_hash_import,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct eip93_hash_export_state),
+ .base = {
+ .cra_name = "hmac(sha256)",
+ .cra_driver_name = "hmac(sha256-eip93)",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct eip93_hash_ctx),
+ .cra_init = eip93_hash_cra_init,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
diff --git a/drivers/crypto/inside-secure/eip93/eip93-hash.h b/drivers/crypto/inside-secure/eip93/eip93-hash.h
new file mode 100644
index 000000000000..556f22fc1dd0
--- /dev/null
+++ b/drivers/crypto/inside-secure/eip93/eip93-hash.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2019 - 2021
+ *
+ * Richard van Schagen <vschagen@icloud.com>
+ * Christian Marangi <ansuelsmth@gmail.com
+ */
+#ifndef _EIP93_HASH_H_
+#define _EIP93_HASH_H_
+
+#include <crypto/sha2.h>
+
+#include "eip93-main.h"
+#include "eip93-regs.h"
+
+struct eip93_hash_ctx {
+ struct eip93_device *eip93;
+ u32 flags;
+
+ u8 ipad[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
+ u8 opad[SHA256_DIGEST_SIZE] __aligned(sizeof(u32));
+};
+
+struct eip93_hash_reqctx {
+ /* Placement is important for DMA align */
+ struct {
+ struct sa_record sa_record;
+ struct sa_record sa_record_hmac;
+ struct sa_state sa_state;
+ } __aligned(CRYPTO_DMA_ALIGN);
+
+ dma_addr_t sa_record_base;
+ dma_addr_t sa_record_hmac_base;
+ dma_addr_t sa_state_base;
+
+ /* Don't enable HASH_FINALIZE when last block is sent */
+ bool partial_hash;
+
+ /* Set to signal interrupt is for final packet */
+ bool finalize;
+
+ /*
+ * EIP93 requires data to be accumulated in block of 64 bytes
+ * for intermediate hash calculation.
+ */
+ u64 len;
+ u32 data_used;
+
+ u8 data[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
+ dma_addr_t data_dma;
+
+ struct list_head blocks;
+};
+
+struct mkt_hash_block {
+ struct list_head list;
+ u8 data[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
+ dma_addr_t data_dma;
+};
+
+struct eip93_hash_export_state {
+ u64 len;
+ u32 data_used;
+
+ u32 state_len[2];
+ u8 state_hash[SHA256_DIGEST_SIZE] __aligned(sizeof(u32));
+
+ u8 data[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
+};
+
+void eip93_hash_handle_result(struct crypto_async_request *async, int err);
+
+extern struct eip93_alg_template eip93_alg_md5;
+extern struct eip93_alg_template eip93_alg_sha1;
+extern struct eip93_alg_template eip93_alg_sha224;
+extern struct eip93_alg_template eip93_alg_sha256;
+extern struct eip93_alg_template eip93_alg_hmac_md5;
+extern struct eip93_alg_template eip93_alg_hmac_sha1;
+extern struct eip93_alg_template eip93_alg_hmac_sha224;
+extern struct eip93_alg_template eip93_alg_hmac_sha256;
+
+#endif /* _EIP93_HASH_H_ */
diff --git a/drivers/crypto/inside-secure/eip93/eip93-main.c b/drivers/crypto/inside-secure/eip93/eip93-main.c
new file mode 100644
index 000000000000..0b38a567da0e
--- /dev/null
+++ b/drivers/crypto/inside-secure/eip93/eip93-main.c
@@ -0,0 +1,501 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 - 2021
+ *
+ * Richard van Schagen <vschagen@icloud.com>
+ * Christian Marangi <ansuelsmth@gmail.com
+ */
+
+#include <linux/atomic.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <crypto/aes.h>
+#include <crypto/ctr.h>
+
+#include "eip93-main.h"
+#include "eip93-regs.h"
+#include "eip93-common.h"
+#include "eip93-cipher.h"
+#include "eip93-aes.h"
+#include "eip93-des.h"
+#include "eip93-aead.h"
+#include "eip93-hash.h"
+
+static struct eip93_alg_template *eip93_algs[] = {
+ &eip93_alg_ecb_des,
+ &eip93_alg_cbc_des,
+ &eip93_alg_ecb_des3_ede,
+ &eip93_alg_cbc_des3_ede,
+ &eip93_alg_ecb_aes,
+ &eip93_alg_cbc_aes,
+ &eip93_alg_ctr_aes,
+ &eip93_alg_rfc3686_aes,
+ &eip93_alg_authenc_hmac_md5_cbc_des,
+ &eip93_alg_authenc_hmac_sha1_cbc_des,
+ &eip93_alg_authenc_hmac_sha224_cbc_des,
+ &eip93_alg_authenc_hmac_sha256_cbc_des,
+ &eip93_alg_authenc_hmac_md5_cbc_des3_ede,
+ &eip93_alg_authenc_hmac_sha1_cbc_des3_ede,
+ &eip93_alg_authenc_hmac_sha224_cbc_des3_ede,
+ &eip93_alg_authenc_hmac_sha256_cbc_des3_ede,
+ &eip93_alg_authenc_hmac_md5_cbc_aes,
+ &eip93_alg_authenc_hmac_sha1_cbc_aes,
+ &eip93_alg_authenc_hmac_sha224_cbc_aes,
+ &eip93_alg_authenc_hmac_sha256_cbc_aes,
+ &eip93_alg_authenc_hmac_md5_rfc3686_aes,
+ &eip93_alg_authenc_hmac_sha1_rfc3686_aes,
+ &eip93_alg_authenc_hmac_sha224_rfc3686_aes,
+ &eip93_alg_authenc_hmac_sha256_rfc3686_aes,
+ &eip93_alg_md5,
+ &eip93_alg_sha1,
+ &eip93_alg_sha224,
+ &eip93_alg_sha256,
+ &eip93_alg_hmac_md5,
+ &eip93_alg_hmac_sha1,
+ &eip93_alg_hmac_sha224,
+ &eip93_alg_hmac_sha256,
+};
+
+inline void eip93_irq_disable(struct eip93_device *eip93, u32 mask)
+{
+ __raw_writel(mask, eip93->base + EIP93_REG_MASK_DISABLE);
+}
+
+inline void eip93_irq_enable(struct eip93_device *eip93, u32 mask)
+{
+ __raw_writel(mask, eip93->base + EIP93_REG_MASK_ENABLE);
+}
+
+inline void eip93_irq_clear(struct eip93_device *eip93, u32 mask)
+{
+ __raw_writel(mask, eip93->base + EIP93_REG_INT_CLR);
+}
+
+static void eip93_unregister_algs(unsigned int i)
+{
+ unsigned int j;
+
+ for (j = 0; j < i; j++) {
+ switch (eip93_algs[j]->type) {
+ case EIP93_ALG_TYPE_SKCIPHER:
+ crypto_unregister_skcipher(&eip93_algs[j]->alg.skcipher);
+ break;
+ case EIP93_ALG_TYPE_AEAD:
+ crypto_unregister_aead(&eip93_algs[j]->alg.aead);
+ break;
+ case EIP93_ALG_TYPE_HASH:
+ crypto_unregister_ahash(&eip93_algs[i]->alg.ahash);
+ break;
+ }
+ }
+}
+
+static int eip93_register_algs(struct eip93_device *eip93, u32 supported_algo_flags)
+{
+ unsigned int i;
+ int ret = 0;
+
+ for (i = 0; i < ARRAY_SIZE(eip93_algs); i++) {
+ u32 alg_flags = eip93_algs[i]->flags;
+
+ eip93_algs[i]->eip93 = eip93;
+
+ if ((IS_DES(alg_flags) || IS_3DES(alg_flags)) &&
+ !(supported_algo_flags & EIP93_PE_OPTION_TDES))
+ continue;
+
+ if (IS_AES(alg_flags)) {
+ if (!(supported_algo_flags & EIP93_PE_OPTION_AES))
+ continue;
+
+ if (!IS_HMAC(alg_flags)) {
+ if (supported_algo_flags & EIP93_PE_OPTION_AES_KEY128)
+ eip93_algs[i]->alg.skcipher.max_keysize =
+ AES_KEYSIZE_128;
+
+ if (supported_algo_flags & EIP93_PE_OPTION_AES_KEY192)
+ eip93_algs[i]->alg.skcipher.max_keysize =
+ AES_KEYSIZE_192;
+
+ if (supported_algo_flags & EIP93_PE_OPTION_AES_KEY256)
+ eip93_algs[i]->alg.skcipher.max_keysize =
+ AES_KEYSIZE_256;
+
+ if (IS_RFC3686(alg_flags))
+ eip93_algs[i]->alg.skcipher.max_keysize +=
+ CTR_RFC3686_NONCE_SIZE;
+ }
+ }
+
+ if (IS_HASH_MD5(alg_flags) &&
+ !(supported_algo_flags & EIP93_PE_OPTION_MD5))
+ continue;
+
+ if (IS_HASH_SHA1(alg_flags) &&
+ !(supported_algo_flags & EIP93_PE_OPTION_SHA_1))
+ continue;
+
+ if (IS_HASH_SHA224(alg_flags) &&
+ !(supported_algo_flags & EIP93_PE_OPTION_SHA_224))
+ continue;
+
+ if (IS_HASH_SHA256(alg_flags) &&
+ !(supported_algo_flags & EIP93_PE_OPTION_SHA_256))
+ continue;
+
+ switch (eip93_algs[i]->type) {
+ case EIP93_ALG_TYPE_SKCIPHER:
+ ret = crypto_register_skcipher(&eip93_algs[i]->alg.skcipher);
+ break;
+ case EIP93_ALG_TYPE_AEAD:
+ ret = crypto_register_aead(&eip93_algs[i]->alg.aead);
+ break;
+ case EIP93_ALG_TYPE_HASH:
+ ret = crypto_register_ahash(&eip93_algs[i]->alg.ahash);
+ break;
+ }
+ if (ret)
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ eip93_unregister_algs(i);
+
+ return ret;
+}
+
+static void eip93_handle_result_descriptor(struct eip93_device *eip93)
+{
+ struct crypto_async_request *async;
+ struct eip93_descriptor *rdesc;
+ u16 desc_flags, crypto_idr;
+ bool last_entry;
+ int handled, left, err;
+ u32 pe_ctrl_stat;
+ u32 pe_length;
+
+get_more:
+ handled = 0;
+
+ left = readl(eip93->base + EIP93_REG_PE_RD_COUNT) & EIP93_PE_RD_COUNT;
+
+ if (!left) {
+ eip93_irq_clear(eip93, EIP93_INT_RDR_THRESH);
+ eip93_irq_enable(eip93, EIP93_INT_RDR_THRESH);
+ return;
+ }
+
+ last_entry = false;
+
+ while (left) {
+ scoped_guard(spinlock_irqsave, &eip93->ring->read_lock)
+ rdesc = eip93_get_descriptor(eip93);
+ if (IS_ERR(rdesc)) {
+ dev_err(eip93->dev, "Ndesc: %d nreq: %d\n",
+ handled, left);
+ err = -EIO;
+ break;
+ }
+ /* make sure DMA is finished writing */
+ do {
+ pe_ctrl_stat = READ_ONCE(rdesc->pe_ctrl_stat_word);
+ pe_length = READ_ONCE(rdesc->pe_length_word);
+ } while (FIELD_GET(EIP93_PE_CTRL_PE_READY_DES_TRING_OWN, pe_ctrl_stat) !=
+ EIP93_PE_CTRL_PE_READY ||
+ FIELD_GET(EIP93_PE_LENGTH_HOST_PE_READY, pe_length) !=
+ EIP93_PE_LENGTH_PE_READY);
+
+ err = rdesc->pe_ctrl_stat_word & (EIP93_PE_CTRL_PE_EXT_ERR_CODE |
+ EIP93_PE_CTRL_PE_EXT_ERR |
+ EIP93_PE_CTRL_PE_SEQNUM_ERR |
+ EIP93_PE_CTRL_PE_PAD_ERR |
+ EIP93_PE_CTRL_PE_AUTH_ERR);
+
+ desc_flags = FIELD_GET(EIP93_PE_USER_ID_DESC_FLAGS, rdesc->user_id);
+ crypto_idr = FIELD_GET(EIP93_PE_USER_ID_CRYPTO_IDR, rdesc->user_id);
+
+ writel(1, eip93->base + EIP93_REG_PE_RD_COUNT);
+ eip93_irq_clear(eip93, EIP93_INT_RDR_THRESH);
+
+ handled++;
+ left--;
+
+ if (desc_flags & EIP93_DESC_LAST) {
+ last_entry = true;
+ break;
+ }
+ }
+
+ if (!last_entry)
+ goto get_more;
+
+ /* Get crypto async ref only for last descriptor */
+ scoped_guard(spinlock_bh, &eip93->ring->idr_lock) {
+ async = idr_find(&eip93->ring->crypto_async_idr, crypto_idr);
+ idr_remove(&eip93->ring->crypto_async_idr, crypto_idr);
+ }
+
+ /* Parse error in ctrl stat word */
+ err = eip93_parse_ctrl_stat_err(eip93, err);
+
+ if (desc_flags & EIP93_DESC_SKCIPHER)
+ eip93_skcipher_handle_result(async, err);
+
+ if (desc_flags & EIP93_DESC_AEAD)
+ eip93_aead_handle_result(async, err);
+
+ if (desc_flags & EIP93_DESC_HASH)
+ eip93_hash_handle_result(async, err);
+
+ goto get_more;
+}
+
+static void eip93_done_task(unsigned long data)
+{
+ struct eip93_device *eip93 = (struct eip93_device *)data;
+
+ eip93_handle_result_descriptor(eip93);
+}
+
+static irqreturn_t eip93_irq_handler(int irq, void *data)
+{
+ struct eip93_device *eip93 = data;
+ u32 irq_status;
+
+ irq_status = readl(eip93->base + EIP93_REG_INT_MASK_STAT);
+ if (FIELD_GET(EIP93_INT_RDR_THRESH, irq_status)) {
+ eip93_irq_disable(eip93, EIP93_INT_RDR_THRESH);
+ tasklet_schedule(&eip93->ring->done_task);
+ return IRQ_HANDLED;
+ }
+
+ /* Ignore errors in AUTO mode, handled by the RDR */
+ eip93_irq_clear(eip93, irq_status);
+ if (irq_status)
+ eip93_irq_disable(eip93, irq_status);
+
+ return IRQ_NONE;
+}
+
+static void eip93_initialize(struct eip93_device *eip93, u32 supported_algo_flags)
+{
+ u32 val;
+
+ /* Reset PE and rings */
+ val = EIP93_PE_CONFIG_RST_PE | EIP93_PE_CONFIG_RST_RING;
+ val |= EIP93_PE_TARGET_AUTO_RING_MODE;
+ /* For Auto more, update the CDR ring owner after processing */
+ val |= EIP93_PE_CONFIG_EN_CDR_UPDATE;
+ writel(val, eip93->base + EIP93_REG_PE_CONFIG);
+
+ /* Wait for PE and ring to reset */
+ usleep_range(10, 20);
+
+ /* Release PE and ring reset */
+ val = readl(eip93->base + EIP93_REG_PE_CONFIG);
+ val &= ~(EIP93_PE_CONFIG_RST_PE | EIP93_PE_CONFIG_RST_RING);
+ writel(val, eip93->base + EIP93_REG_PE_CONFIG);
+
+ /* Config Clocks */
+ val = EIP93_PE_CLOCK_EN_PE_CLK;
+ if (supported_algo_flags & EIP93_PE_OPTION_TDES)
+ val |= EIP93_PE_CLOCK_EN_DES_CLK;
+ if (supported_algo_flags & EIP93_PE_OPTION_AES)
+ val |= EIP93_PE_CLOCK_EN_AES_CLK;
+ if (supported_algo_flags &
+ (EIP93_PE_OPTION_MD5 | EIP93_PE_OPTION_SHA_1 | EIP93_PE_OPTION_SHA_224 |
+ EIP93_PE_OPTION_SHA_256))
+ val |= EIP93_PE_CLOCK_EN_HASH_CLK;
+ writel(val, eip93->base + EIP93_REG_PE_CLOCK_CTRL);
+
+ /* Config DMA thresholds */
+ val = FIELD_PREP(EIP93_PE_OUTBUF_THRESH, 128) |
+ FIELD_PREP(EIP93_PE_INBUF_THRESH, 128);
+ writel(val, eip93->base + EIP93_REG_PE_BUF_THRESH);
+
+ /* Clear/ack all interrupts before disable all */
+ eip93_irq_clear(eip93, EIP93_INT_ALL);
+ eip93_irq_disable(eip93, EIP93_INT_ALL);
+
+ /* Setup CRD threshold to trigger interrupt */
+ val = FIELD_PREP(EIPR93_PE_CDR_THRESH, EIP93_RING_NUM - EIP93_RING_BUSY);
+ /*
+ * Configure RDR interrupt to be triggered if RD counter is not 0
+ * for more than 2^(N+10) system clocks.
+ */
+ val |= FIELD_PREP(EIPR93_PE_RD_TIMEOUT, 5) | EIPR93_PE_TIMEROUT_EN;
+ writel(val, eip93->base + EIP93_REG_PE_RING_THRESH);
+}
+
+static void eip93_desc_free(struct eip93_device *eip93)
+{
+ writel(0, eip93->base + EIP93_REG_PE_RING_CONFIG);
+ writel(0, eip93->base + EIP93_REG_PE_CDR_BASE);
+ writel(0, eip93->base + EIP93_REG_PE_RDR_BASE);
+}
+
+static int eip93_set_ring(struct eip93_device *eip93, struct eip93_desc_ring *ring)
+{
+ ring->offset = sizeof(struct eip93_descriptor);
+ ring->base = dmam_alloc_coherent(eip93->dev,
+ sizeof(struct eip93_descriptor) * EIP93_RING_NUM,
+ &ring->base_dma, GFP_KERNEL);
+ if (!ring->base)
+ return -ENOMEM;
+
+ ring->write = ring->base;
+ ring->base_end = ring->base + sizeof(struct eip93_descriptor) * (EIP93_RING_NUM - 1);
+ ring->read = ring->base;
+
+ return 0;
+}
+
+static int eip93_desc_init(struct eip93_device *eip93)
+{
+ struct eip93_desc_ring *cdr = &eip93->ring->cdr;
+ struct eip93_desc_ring *rdr = &eip93->ring->rdr;
+ int ret;
+ u32 val;
+
+ ret = eip93_set_ring(eip93, cdr);
+ if (ret)
+ return ret;
+
+ ret = eip93_set_ring(eip93, rdr);
+ if (ret)
+ return ret;
+
+ writel((u32 __force)cdr->base_dma, eip93->base + EIP93_REG_PE_CDR_BASE);
+ writel((u32 __force)rdr->base_dma, eip93->base + EIP93_REG_PE_RDR_BASE);
+
+ val = FIELD_PREP(EIP93_PE_RING_SIZE, EIP93_RING_NUM - 1);
+ writel(val, eip93->base + EIP93_REG_PE_RING_CONFIG);
+
+ return 0;
+}
+
+static void eip93_cleanup(struct eip93_device *eip93)
+{
+ tasklet_kill(&eip93->ring->done_task);
+
+ /* Clear/ack all interrupts before disable all */
+ eip93_irq_clear(eip93, EIP93_INT_ALL);
+ eip93_irq_disable(eip93, EIP93_INT_ALL);
+
+ writel(0, eip93->base + EIP93_REG_PE_CLOCK_CTRL);
+
+ eip93_desc_free(eip93);
+
+ idr_destroy(&eip93->ring->crypto_async_idr);
+}
+
+static int eip93_crypto_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct eip93_device *eip93;
+ u32 ver, algo_flags;
+ int ret;
+
+ eip93 = devm_kzalloc(dev, sizeof(*eip93), GFP_KERNEL);
+ if (!eip93)
+ return -ENOMEM;
+
+ eip93->dev = dev;
+ platform_set_drvdata(pdev, eip93);
+
+ eip93->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(eip93->base))
+ return PTR_ERR(eip93->base);
+
+ eip93->irq = platform_get_irq(pdev, 0);
+ if (eip93->irq < 0)
+ return eip93->irq;
+
+ ret = devm_request_threaded_irq(eip93->dev, eip93->irq, eip93_irq_handler,
+ NULL, IRQF_ONESHOT,
+ dev_name(eip93->dev), eip93);
+
+ eip93->ring = devm_kcalloc(eip93->dev, 1, sizeof(*eip93->ring), GFP_KERNEL);
+ if (!eip93->ring)
+ return -ENOMEM;
+
+ ret = eip93_desc_init(eip93);
+
+ if (ret)
+ return ret;
+
+ tasklet_init(&eip93->ring->done_task, eip93_done_task, (unsigned long)eip93);
+
+ spin_lock_init(&eip93->ring->read_lock);
+ spin_lock_init(&eip93->ring->write_lock);
+
+ spin_lock_init(&eip93->ring->idr_lock);
+ idr_init(&eip93->ring->crypto_async_idr);
+
+ algo_flags = readl(eip93->base + EIP93_REG_PE_OPTION_1);
+
+ eip93_initialize(eip93, algo_flags);
+
+ /* Init finished, enable RDR interrupt */
+ eip93_irq_enable(eip93, EIP93_INT_RDR_THRESH);
+
+ ret = eip93_register_algs(eip93, algo_flags);
+ if (ret) {
+ eip93_cleanup(eip93);
+ return ret;
+ }
+
+ ver = readl(eip93->base + EIP93_REG_PE_REVISION);
+ /* EIP_EIP_NO:MAJOR_HW_REV:MINOR_HW_REV:HW_PATCH,PE(ALGO_FLAGS) */
+ dev_info(eip93->dev, "EIP%lu:%lx:%lx:%lx,PE(0x%x:0x%x)\n",
+ FIELD_GET(EIP93_PE_REVISION_EIP_NO, ver),
+ FIELD_GET(EIP93_PE_REVISION_MAJ_HW_REV, ver),
+ FIELD_GET(EIP93_PE_REVISION_MIN_HW_REV, ver),
+ FIELD_GET(EIP93_PE_REVISION_HW_PATCH, ver),
+ algo_flags,
+ readl(eip93->base + EIP93_REG_PE_OPTION_0));
+
+ return 0;
+}
+
+static void eip93_crypto_remove(struct platform_device *pdev)
+{
+ struct eip93_device *eip93 = platform_get_drvdata(pdev);
+
+ eip93_unregister_algs(ARRAY_SIZE(eip93_algs));
+ eip93_cleanup(eip93);
+}
+
+static const struct of_device_id eip93_crypto_of_match[] = {
+ { .compatible = "inside-secure,safexcel-eip93i", },
+ { .compatible = "inside-secure,safexcel-eip93ie", },
+ { .compatible = "inside-secure,safexcel-eip93is", },
+ { .compatible = "inside-secure,safexcel-eip93ies", },
+ /* IW not supported currently, missing AES-XCB-MAC/AES-CCM */
+ /* { .compatible = "inside-secure,safexcel-eip93iw", }, */
+ {}
+};
+MODULE_DEVICE_TABLE(of, eip93_crypto_of_match);
+
+static struct platform_driver eip93_crypto_driver = {
+ .probe = eip93_crypto_probe,
+ .remove = eip93_crypto_remove,
+ .driver = {
+ .name = "inside-secure-eip93",
+ .of_match_table = eip93_crypto_of_match,
+ },
+};
+module_platform_driver(eip93_crypto_driver);
+
+MODULE_AUTHOR("Richard van Schagen <vschagen@cs.com>");
+MODULE_AUTHOR("Christian Marangi <ansuelsmth@gmail.com>");
+MODULE_DESCRIPTION("Mediatek EIP-93 crypto engine driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/inside-secure/eip93/eip93-main.h b/drivers/crypto/inside-secure/eip93/eip93-main.h
new file mode 100644
index 000000000000..79b078f0e5da
--- /dev/null
+++ b/drivers/crypto/inside-secure/eip93/eip93-main.h
@@ -0,0 +1,151 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2019 - 2021
+ *
+ * Richard van Schagen <vschagen@icloud.com>
+ * Christian Marangi <ansuelsmth@gmail.com
+ */
+#ifndef _EIP93_MAIN_H_
+#define _EIP93_MAIN_H_
+
+#include <crypto/internal/aead.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/bitfield.h>
+#include <linux/interrupt.h>
+
+#define EIP93_RING_BUSY_DELAY 500
+
+#define EIP93_RING_NUM 512
+#define EIP93_RING_BUSY 32
+#define EIP93_CRA_PRIORITY 1500
+
+#define EIP93_RING_SA_STATE_ADDR(base, idx) ((base) + (idx))
+#define EIP93_RING_SA_STATE_DMA(dma_base, idx) ((u32 __force)(dma_base) + \
+ ((idx) * sizeof(struct sa_state)))
+
+/* cipher algorithms */
+#define EIP93_ALG_DES BIT(0)
+#define EIP93_ALG_3DES BIT(1)
+#define EIP93_ALG_AES BIT(2)
+#define EIP93_ALG_MASK GENMASK(2, 0)
+/* hash and hmac algorithms */
+#define EIP93_HASH_MD5 BIT(3)
+#define EIP93_HASH_SHA1 BIT(4)
+#define EIP93_HASH_SHA224 BIT(5)
+#define EIP93_HASH_SHA256 BIT(6)
+#define EIP93_HASH_HMAC BIT(7)
+#define EIP93_HASH_MASK GENMASK(6, 3)
+/* cipher modes */
+#define EIP93_MODE_CBC BIT(8)
+#define EIP93_MODE_ECB BIT(9)
+#define EIP93_MODE_CTR BIT(10)
+#define EIP93_MODE_RFC3686 BIT(11)
+#define EIP93_MODE_MASK GENMASK(10, 8)
+
+/* cipher encryption/decryption operations */
+#define EIP93_ENCRYPT BIT(12)
+#define EIP93_DECRYPT BIT(13)
+
+#define EIP93_BUSY BIT(14)
+
+/* descriptor flags */
+#define EIP93_DESC_DMA_IV BIT(0)
+#define EIP93_DESC_IPSEC BIT(1)
+#define EIP93_DESC_FINISH BIT(2)
+#define EIP93_DESC_LAST BIT(3)
+#define EIP93_DESC_FAKE_HMAC BIT(4)
+#define EIP93_DESC_PRNG BIT(5)
+#define EIP93_DESC_HASH BIT(6)
+#define EIP93_DESC_AEAD BIT(7)
+#define EIP93_DESC_SKCIPHER BIT(8)
+#define EIP93_DESC_ASYNC BIT(9)
+
+#define IS_DMA_IV(desc_flags) ((desc_flags) & EIP93_DESC_DMA_IV)
+
+#define IS_DES(flags) ((flags) & EIP93_ALG_DES)
+#define IS_3DES(flags) ((flags) & EIP93_ALG_3DES)
+#define IS_AES(flags) ((flags) & EIP93_ALG_AES)
+
+#define IS_HASH_MD5(flags) ((flags) & EIP93_HASH_MD5)
+#define IS_HASH_SHA1(flags) ((flags) & EIP93_HASH_SHA1)
+#define IS_HASH_SHA224(flags) ((flags) & EIP93_HASH_SHA224)
+#define IS_HASH_SHA256(flags) ((flags) & EIP93_HASH_SHA256)
+#define IS_HMAC(flags) ((flags) & EIP93_HASH_HMAC)
+
+#define IS_CBC(mode) ((mode) & EIP93_MODE_CBC)
+#define IS_ECB(mode) ((mode) & EIP93_MODE_ECB)
+#define IS_CTR(mode) ((mode) & EIP93_MODE_CTR)
+#define IS_RFC3686(mode) ((mode) & EIP93_MODE_RFC3686)
+
+#define IS_BUSY(flags) ((flags) & EIP93_BUSY)
+
+#define IS_ENCRYPT(dir) ((dir) & EIP93_ENCRYPT)
+#define IS_DECRYPT(dir) ((dir) & EIP93_DECRYPT)
+
+#define IS_CIPHER(flags) ((flags) & (EIP93_ALG_DES | \
+ EIP93_ALG_3DES | \
+ EIP93_ALG_AES))
+
+#define IS_HASH(flags) ((flags) & (EIP93_HASH_MD5 | \
+ EIP93_HASH_SHA1 | \
+ EIP93_HASH_SHA224 | \
+ EIP93_HASH_SHA256))
+
+/**
+ * struct eip93_device - crypto engine device structure
+ */
+struct eip93_device {
+ void __iomem *base;
+ struct device *dev;
+ struct clk *clk;
+ int irq;
+ struct eip93_ring *ring;
+};
+
+struct eip93_desc_ring {
+ void *base;
+ void *base_end;
+ dma_addr_t base_dma;
+ /* write and read pointers */
+ void *read;
+ void *write;
+ /* descriptor element offset */
+ u32 offset;
+};
+
+struct eip93_state_pool {
+ void *base;
+ dma_addr_t base_dma;
+};
+
+struct eip93_ring {
+ struct tasklet_struct done_task;
+ /* command/result rings */
+ struct eip93_desc_ring cdr;
+ struct eip93_desc_ring rdr;
+ spinlock_t write_lock;
+ spinlock_t read_lock;
+ /* aync idr */
+ spinlock_t idr_lock;
+ struct idr crypto_async_idr;
+};
+
+enum eip93_alg_type {
+ EIP93_ALG_TYPE_AEAD,
+ EIP93_ALG_TYPE_SKCIPHER,
+ EIP93_ALG_TYPE_HASH,
+};
+
+struct eip93_alg_template {
+ struct eip93_device *eip93;
+ enum eip93_alg_type type;
+ u32 flags;
+ union {
+ struct aead_alg aead;
+ struct skcipher_alg skcipher;
+ struct ahash_alg ahash;
+ } alg;
+};
+
+#endif /* _EIP93_MAIN_H_ */
diff --git a/drivers/crypto/inside-secure/eip93/eip93-regs.h b/drivers/crypto/inside-secure/eip93/eip93-regs.h
new file mode 100644
index 000000000000..0490b8d15131
--- /dev/null
+++ b/drivers/crypto/inside-secure/eip93/eip93-regs.h
@@ -0,0 +1,335 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 - 2021
+ *
+ * Richard van Schagen <vschagen@icloud.com>
+ * Christian Marangi <ansuelsmth@gmail.com
+ */
+#ifndef REG_EIP93_H
+#define REG_EIP93_H
+
+#define EIP93_REG_PE_CTRL_STAT 0x0
+#define EIP93_PE_CTRL_PE_PAD_CTRL_STAT GENMASK(31, 24)
+#define EIP93_PE_CTRL_PE_EXT_ERR_CODE GENMASK(23, 20)
+#define EIP93_PE_CTRL_PE_EXT_ERR_PROCESSING 0x8
+#define EIP93_PE_CTRL_PE_EXT_ERR_BLOCK_SIZE_ERR 0x7
+#define EIP93_PE_CTRL_PE_EXT_ERR_INVALID_PK_LENGTH 0x6
+#define EIP93_PE_CTRL_PE_EXT_ERR_ZERO_LENGTH 0x5
+#define EIP93_PE_CTRL_PE_EXT_ERR_SPI 0x4
+#define EIP93_PE_CTRL_PE_EXT_ERR_INVALID_CRYPTO_ALGO 0x3
+#define EIP93_PE_CTRL_PE_EXT_ERR_INVALID_CRYPTO_OP 0x2
+#define EIP93_PE_CTRL_PE_EXT_ERR_DESC_OWNER 0x1
+#define EIP93_PE_CTRL_PE_EXT_ERR_BUS 0x0
+#define EIP93_PE_CTRL_PE_EXT_ERR BIT(19)
+#define EIP93_PE_CTRL_PE_SEQNUM_ERR BIT(18)
+#define EIP93_PE_CTRL_PE_PAD_ERR BIT(17)
+#define EIP93_PE_CTRL_PE_AUTH_ERR BIT(16)
+#define EIP93_PE_CTRL_PE_PAD_VALUE GENMASK(15, 8)
+#define EIP93_PE_CTRL_PE_PRNG_MODE GENMASK(7, 6)
+#define EIP93_PE_CTRL_PE_HASH_FINAL BIT(4)
+#define EIP93_PE_CTRL_PE_INIT_ARC4 BIT(3)
+#define EIP93_PE_CTRL_PE_READY_DES_TRING_OWN GENMASK(1, 0)
+#define EIP93_PE_CTRL_PE_READY 0x2
+#define EIP93_PE_CTRL_HOST_READY 0x1
+#define EIP93_REG_PE_SOURCE_ADDR 0x4
+#define EIP93_REG_PE_DEST_ADDR 0x8
+#define EIP93_REG_PE_SA_ADDR 0xc
+#define EIP93_REG_PE_ADDR 0x10 /* STATE_ADDR */
+/*
+ * Special implementation for user ID
+ * user_id in eip93_descriptor is used to identify the
+ * descriptor and is opaque and can be used by the driver
+ * in custom way.
+ *
+ * The usage of this should be to put an address to the crypto
+ * request struct from the kernel but this can't work in 64bit
+ * world.
+ *
+ * Also it's required to put some flags to identify the last
+ * descriptor.
+ *
+ * To handle this, split the u32 in 2 part:
+ * - 31:16 descriptor flags
+ * - 15:0 IDR to connect the crypto request address
+ */
+#define EIP93_REG_PE_USER_ID 0x18
+#define EIP93_PE_USER_ID_DESC_FLAGS GENMASK(31, 16)
+#define EIP93_PE_USER_ID_CRYPTO_IDR GENMASK(15, 0)
+#define EIP93_REG_PE_LENGTH 0x1c
+#define EIP93_PE_LENGTH_BYPASS GENMASK(31, 24)
+#define EIP93_PE_LENGTH_HOST_PE_READY GENMASK(23, 22)
+#define EIP93_PE_LENGTH_PE_READY 0x2
+#define EIP93_PE_LENGTH_HOST_READY 0x1
+#define EIP93_PE_LENGTH_LENGTH GENMASK(19, 0)
+
+/* PACKET ENGINE RING configuration registers */
+#define EIP93_REG_PE_CDR_BASE 0x80
+#define EIP93_REG_PE_RDR_BASE 0x84
+#define EIP93_REG_PE_RING_CONFIG 0x88
+#define EIP93_PE_EN_EXT_TRIG BIT(31)
+/* Absent in later revision of eip93 */
+/* #define EIP93_PE_RING_OFFSET GENMASK(23, 15) */
+#define EIP93_PE_RING_SIZE GENMASK(9, 0)
+#define EIP93_REG_PE_RING_THRESH 0x8c
+#define EIPR93_PE_TIMEROUT_EN BIT(31)
+#define EIPR93_PE_RD_TIMEOUT GENMASK(29, 26)
+#define EIPR93_PE_RDR_THRESH GENMASK(25, 16)
+#define EIPR93_PE_CDR_THRESH GENMASK(9, 0)
+#define EIP93_REG_PE_CD_COUNT 0x90
+#define EIP93_PE_CD_COUNT GENMASK(10, 0)
+/*
+ * In the same register, writing a value in GENMASK(7, 0) will
+ * increment the descriptor count and start DMA action.
+ */
+#define EIP93_PE_CD_COUNT_INCR GENMASK(7, 0)
+#define EIP93_REG_PE_RD_COUNT 0x94
+#define EIP93_PE_RD_COUNT GENMASK(10, 0)
+/*
+ * In the same register, writing a value in GENMASK(7, 0) will
+ * increment the descriptor count and start DMA action.
+ */
+#define EIP93_PE_RD_COUNT_INCR GENMASK(7, 0)
+#define EIP93_REG_PE_RING_RW_PNTR 0x98 /* RING_PNTR */
+
+/* PACKET ENGINE configuration registers */
+#define EIP93_REG_PE_CONFIG 0x100
+#define EIP93_PE_CONFIG_SWAP_TARGET BIT(20)
+#define EIP93_PE_CONFIG_SWAP_DATA BIT(18)
+#define EIP93_PE_CONFIG_SWAP_SA BIT(17)
+#define EIP93_PE_CONFIG_SWAP_CDRD BIT(16)
+#define EIP93_PE_CONFIG_EN_CDR_UPDATE BIT(10)
+#define EIP93_PE_CONFIG_PE_MODE GENMASK(9, 8)
+#define EIP93_PE_TARGET_AUTO_RING_MODE FIELD_PREP(EIP93_PE_CONFIG_PE_MODE, 0x3)
+#define EIP93_PE_TARGET_COMMAND_NO_RDR_MODE FIELD_PREP(EIP93_PE_CONFIG_PE_MODE, 0x2)
+#define EIP93_PE_TARGET_COMMAND_WITH_RDR_MODE FIELD_PREP(EIP93_PE_CONFIG_PE_MODE, 0x1)
+#define EIP93_PE_DIRECT_HOST_MODE FIELD_PREP(EIP93_PE_CONFIG_PE_MODE, 0x0)
+#define EIP93_PE_CONFIG_RST_RING BIT(2)
+#define EIP93_PE_CONFIG_RST_PE BIT(0)
+#define EIP93_REG_PE_STATUS 0x104
+#define EIP93_REG_PE_BUF_THRESH 0x10c
+#define EIP93_PE_OUTBUF_THRESH GENMASK(23, 16)
+#define EIP93_PE_INBUF_THRESH GENMASK(7, 0)
+#define EIP93_REG_PE_INBUF_COUNT 0x100
+#define EIP93_REG_PE_OUTBUF_COUNT 0x114
+#define EIP93_REG_PE_BUF_RW_PNTR 0x118 /* BUF_PNTR */
+
+/* PACKET ENGINE endian config */
+#define EIP93_REG_PE_ENDIAN_CONFIG 0x1cc
+#define EIP93_AIROHA_REG_PE_ENDIAN_CONFIG 0x1d0
+#define EIP93_PE_ENDIAN_TARGET_BYTE_SWAP GENMASK(23, 16)
+#define EIP93_PE_ENDIAN_MASTER_BYTE_SWAP GENMASK(7, 0)
+/*
+ * Byte goes 2 and 2 and are referenced by ID
+ * Split GENMASK(7, 0) in 4 part, one for each byte.
+ * Example LITTLE ENDIAN: Example BIG ENDIAN
+ * GENMASK(7, 6) 0x3 GENMASK(7, 6) 0x0
+ * GENMASK(5, 4) 0x2 GENMASK(7, 6) 0x1
+ * GENMASK(3, 2) 0x1 GENMASK(3, 2) 0x2
+ * GENMASK(1, 0) 0x0 GENMASK(1, 0) 0x3
+ */
+#define EIP93_PE_ENDIAN_BYTE0 0x0
+#define EIP93_PE_ENDIAN_BYTE1 0x1
+#define EIP93_PE_ENDIAN_BYTE2 0x2
+#define EIP93_PE_ENDIAN_BYTE3 0x3
+
+/* EIP93 CLOCK control registers */
+#define EIP93_REG_PE_CLOCK_CTRL 0x1e8
+#define EIP93_PE_CLOCK_EN_HASH_CLK BIT(4)
+#define EIP93_PE_CLOCK_EN_ARC4_CLK BIT(3)
+#define EIP93_PE_CLOCK_EN_AES_CLK BIT(2)
+#define EIP93_PE_CLOCK_EN_DES_CLK BIT(1)
+#define EIP93_PE_CLOCK_EN_PE_CLK BIT(0)
+
+/* EIP93 Device Option and Revision Register */
+#define EIP93_REG_PE_OPTION_1 0x1f4
+#define EIP93_PE_OPTION_MAC_KEY256 BIT(31)
+#define EIP93_PE_OPTION_MAC_KEY192 BIT(30)
+#define EIP93_PE_OPTION_MAC_KEY128 BIT(29)
+#define EIP93_PE_OPTION_AES_CBC_MAC BIT(28)
+#define EIP93_PE_OPTION_AES_XCBX BIT(23)
+#define EIP93_PE_OPTION_SHA_256 BIT(19)
+#define EIP93_PE_OPTION_SHA_224 BIT(18)
+#define EIP93_PE_OPTION_SHA_1 BIT(17)
+#define EIP93_PE_OPTION_MD5 BIT(16)
+#define EIP93_PE_OPTION_AES_KEY256 BIT(15)
+#define EIP93_PE_OPTION_AES_KEY192 BIT(14)
+#define EIP93_PE_OPTION_AES_KEY128 BIT(13)
+#define EIP93_PE_OPTION_AES BIT(2)
+#define EIP93_PE_OPTION_ARC4 BIT(1)
+#define EIP93_PE_OPTION_TDES BIT(0) /* DES and TDES */
+#define EIP93_REG_PE_OPTION_0 0x1f8
+#define EIP93_REG_PE_REVISION 0x1fc
+#define EIP93_PE_REVISION_MAJ_HW_REV GENMASK(27, 24)
+#define EIP93_PE_REVISION_MIN_HW_REV GENMASK(23, 20)
+#define EIP93_PE_REVISION_HW_PATCH GENMASK(19, 16)
+#define EIP93_PE_REVISION_EIP_NO GENMASK(7, 0)
+
+/* EIP93 Interrupt Control Register */
+#define EIP93_REG_INT_UNMASK_STAT 0x200
+#define EIP93_REG_INT_MASK_STAT 0x204
+#define EIP93_REG_INT_CLR 0x204
+#define EIP93_REG_INT_MASK 0x208 /* INT_EN */
+/* Each int reg have the same bitmap */
+#define EIP93_INT_INTERFACE_ERR BIT(18)
+#define EIP93_INT_RPOC_ERR BIT(17)
+#define EIP93_INT_PE_RING_ERR BIT(16)
+#define EIP93_INT_HALT BIT(15)
+#define EIP93_INT_OUTBUF_THRESH BIT(11)
+#define EIP93_INT_INBUF_THRESH BIT(10)
+#define EIP93_INT_OPERATION_DONE BIT(9)
+#define EIP93_INT_RDR_THRESH BIT(1)
+#define EIP93_INT_CDR_THRESH BIT(0)
+#define EIP93_INT_ALL (EIP93_INT_INTERFACE_ERR | \
+ EIP93_INT_RPOC_ERR | \
+ EIP93_INT_PE_RING_ERR | \
+ EIP93_INT_HALT | \
+ EIP93_INT_OUTBUF_THRESH | \
+ EIP93_INT_INBUF_THRESH | \
+ EIP93_INT_OPERATION_DONE | \
+ EIP93_INT_RDR_THRESH | \
+ EIP93_INT_CDR_THRESH)
+
+#define EIP93_REG_INT_CFG 0x20c
+#define EIP93_INT_TYPE_PULSE BIT(0)
+#define EIP93_REG_MASK_ENABLE 0x210
+#define EIP93_REG_MASK_DISABLE 0x214
+
+/* EIP93 SA Record register */
+#define EIP93_REG_SA_CMD_0 0x400
+#define EIP93_SA_CMD_SAVE_HASH BIT(29)
+#define EIP93_SA_CMD_SAVE_IV BIT(28)
+#define EIP93_SA_CMD_HASH_SOURCE GENMASK(27, 26)
+#define EIP93_SA_CMD_HASH_NO_LOAD FIELD_PREP(EIP93_SA_CMD_HASH_SOURCE, 0x3)
+#define EIP93_SA_CMD_HASH_FROM_STATE FIELD_PREP(EIP93_SA_CMD_HASH_SOURCE, 0x2)
+#define EIP93_SA_CMD_HASH_FROM_SA FIELD_PREP(EIP93_SA_CMD_HASH_SOURCE, 0x0)
+#define EIP93_SA_CMD_IV_SOURCE GENMASK(25, 24)
+#define EIP93_SA_CMD_IV_FROM_PRNG FIELD_PREP(EIP93_SA_CMD_IV_SOURCE, 0x3)
+#define EIP93_SA_CMD_IV_FROM_STATE FIELD_PREP(EIP93_SA_CMD_IV_SOURCE, 0x2)
+#define EIP93_SA_CMD_IV_FROM_INPUT FIELD_PREP(EIP93_SA_CMD_IV_SOURCE, 0x1)
+#define EIP93_SA_CMD_IV_NO_LOAD FIELD_PREP(EIP93_SA_CMD_IV_SOURCE, 0x0)
+#define EIP93_SA_CMD_DIGEST_LENGTH GENMASK(23, 20)
+#define EIP93_SA_CMD_DIGEST_10WORD FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0xa) /* SRTP and TLS */
+#define EIP93_SA_CMD_DIGEST_8WORD FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0x8) /* SHA-256 */
+#define EIP93_SA_CMD_DIGEST_7WORD FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0x7) /* SHA-224 */
+#define EIP93_SA_CMD_DIGEST_6WORD FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0x6)
+#define EIP93_SA_CMD_DIGEST_5WORD FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0x5) /* SHA1 */
+#define EIP93_SA_CMD_DIGEST_4WORD FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0x4) /* MD5 and AES-based */
+#define EIP93_SA_CMD_DIGEST_3WORD_IPSEC FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0x3) /* IPSEC */
+#define EIP93_SA_CMD_DIGEST_2WORD FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0x2)
+#define EIP93_SA_CMD_DIGEST_1WORD FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0x1)
+#define EIP93_SA_CMD_DIGEST_3WORD FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0x0) /* 96bit output */
+#define EIP93_SA_CMD_HDR_PROC BIT(19)
+#define EIP93_SA_CMD_EXT_PAD BIT(18)
+#define EIP93_SA_CMD_SCPAD BIT(17)
+#define EIP93_SA_CMD_HASH GENMASK(15, 12)
+#define EIP93_SA_CMD_HASH_NULL FIELD_PREP(EIP93_SA_CMD_HASH, 0xf)
+#define EIP93_SA_CMD_HASH_SHA256 FIELD_PREP(EIP93_SA_CMD_HASH, 0x3)
+#define EIP93_SA_CMD_HASH_SHA224 FIELD_PREP(EIP93_SA_CMD_HASH, 0x2)
+#define EIP93_SA_CMD_HASH_SHA1 FIELD_PREP(EIP93_SA_CMD_HASH, 0x1)
+#define EIP93_SA_CMD_HASH_MD5 FIELD_PREP(EIP93_SA_CMD_HASH, 0x0)
+#define EIP93_SA_CMD_CIPHER GENMASK(11, 8)
+#define EIP93_SA_CMD_CIPHER_NULL FIELD_PREP(EIP93_SA_CMD_CIPHER, 0xf)
+#define EIP93_SA_CMD_CIPHER_AES FIELD_PREP(EIP93_SA_CMD_CIPHER, 0x3)
+#define EIP93_SA_CMD_CIPHER_ARC4 FIELD_PREP(EIP93_SA_CMD_CIPHER, 0x2)
+#define EIP93_SA_CMD_CIPHER_3DES FIELD_PREP(EIP93_SA_CMD_CIPHER, 0x1)
+#define EIP93_SA_CMD_CIPHER_DES FIELD_PREP(EIP93_SA_CMD_CIPHER, 0x0)
+#define EIP93_SA_CMD_PAD_TYPE GENMASK(7, 6)
+#define EIP93_SA_CMD_PAD_CONST_SSL FIELD_PREP(EIP93_SA_CMD_PAD_TYPE, 0x6)
+#define EIP93_SA_CMD_PAD_TLS_DTLS FIELD_PREP(EIP93_SA_CMD_PAD_TYPE, 0x5)
+#define EIP93_SA_CMD_PAD_ZERO FIELD_PREP(EIP93_SA_CMD_PAD_TYPE, 0x3)
+#define EIP93_SA_CMD_PAD_CONST FIELD_PREP(EIP93_SA_CMD_PAD_TYPE, 0x2)
+#define EIP93_SA_CMD_PAD_PKCS7 FIELD_PREP(EIP93_SA_CMD_PAD_TYPE, 0x1)
+#define EIP93_SA_CMD_PAD_IPSEC FIELD_PREP(EIP93_SA_CMD_PAD_TYPE, 0x0)
+#define EIP93_SA_CMD_OPGROUP GENMASK(5, 4)
+#define EIP93_SA_CMD_OP_EXT FIELD_PREP(EIP93_SA_CMD_OPGROUP, 0x2)
+#define EIP93_SA_CMD_OP_PROTOCOL FIELD_PREP(EIP93_SA_CMD_OPGROUP, 0x1)
+#define EIP93_SA_CMD_OP_BASIC FIELD_PREP(EIP93_SA_CMD_OPGROUP, 0x0)
+#define EIP93_SA_CMD_DIRECTION_IN BIT(3) /* 0: outbount 1: inbound */
+#define EIP93_SA_CMD_OPCODE GENMASK(2, 0)
+#define EIP93_SA_CMD_OPCODE_BASIC_OUT_PRNG 0x7
+#define EIP93_SA_CMD_OPCODE_BASIC_OUT_HASH 0x3
+#define EIP93_SA_CMD_OPCODE_BASIC_OUT_ENC_HASH 0x1
+#define EIP93_SA_CMD_OPCODE_BASIC_OUT_ENC 0x0
+#define EIP93_SA_CMD_OPCODE_BASIC_IN_HASH 0x3
+#define EIP93_SA_CMD_OPCODE_BASIC_IN_HASH_DEC 0x1
+#define EIP93_SA_CMD_OPCODE_BASIC_IN_DEC 0x0
+#define EIP93_SA_CMD_OPCODE_PROTOCOL_OUT_ESP 0x0
+#define EIP93_SA_CMD_OPCODE_PROTOCOL_OUT_SSL 0x4
+#define EIP93_SA_CMD_OPCODE_PROTOCOL_OUT_TLS 0x5
+#define EIP93_SA_CMD_OPCODE_PROTOCOL_OUT_SRTP 0x7
+#define EIP93_SA_CMD_OPCODE_PROTOCOL_IN_ESP 0x0
+#define EIP93_SA_CMD_OPCODE_PROTOCOL_IN_SSL 0x2
+#define EIP93_SA_CMD_OPCODE_PROTOCOL_IN_TLS 0x3
+#define EIP93_SA_CMD_OPCODE_PROTOCOL_IN_SRTP 0x7
+#define EIP93_SA_CMD_OPCODE_EXT_OUT_DTSL 0x1
+#define EIP93_SA_CMD_OPCODE_EXT_OUT_SSL 0x4
+#define EIP93_SA_CMD_OPCODE_EXT_OUT_TLSV10 0x5
+#define EIP93_SA_CMD_OPCODE_EXT_OUT_TLSV11 0x6
+#define EIP93_SA_CMD_OPCODE_EXT_IN_DTSL 0x1
+#define EIP93_SA_CMD_OPCODE_EXT_IN_SSL 0x4
+#define EIP93_SA_CMD_OPCODE_EXT_IN_TLSV10 0x5
+#define EIP93_SA_CMD_OPCODE_EXT_IN_TLSV11 0x6
+#define EIP93_REG_SA_CMD_1 0x404
+#define EIP93_SA_CMD_EN_SEQNUM_CHK BIT(29)
+/* This mask can be either used for ARC4 or AES */
+#define EIP93_SA_CMD_ARC4_KEY_LENGHT GENMASK(28, 24)
+#define EIP93_SA_CMD_AES_DEC_KEY BIT(28) /* 0: encrypt key 1: decrypt key */
+#define EIP93_SA_CMD_AES_KEY_LENGTH GENMASK(26, 24)
+#define EIP93_SA_CMD_AES_KEY_256BIT FIELD_PREP(EIP93_SA_CMD_AES_KEY_LENGTH, 0x4)
+#define EIP93_SA_CMD_AES_KEY_192BIT FIELD_PREP(EIP93_SA_CMD_AES_KEY_LENGTH, 0x3)
+#define EIP93_SA_CMD_AES_KEY_128BIT FIELD_PREP(EIP93_SA_CMD_AES_KEY_LENGTH, 0x2)
+#define EIP93_SA_CMD_HASH_CRYPT_OFFSET GENMASK(23, 16)
+#define EIP93_SA_CMD_BYTE_OFFSET BIT(13) /* 0: CRYPT_OFFSET in 32bit word 1: CRYPT_OFFSET in 8bit bytes */
+#define EIP93_SA_CMD_HMAC BIT(12)
+#define EIP93_SA_CMD_SSL_MAC BIT(12)
+/* This mask can be either used for ARC4 or AES */
+#define EIP93_SA_CMD_CHIPER_MODE GENMASK(9, 8)
+/* AES or DES operations */
+#define EIP93_SA_CMD_CHIPER_MODE_ICM FIELD_PREP(EIP93_SA_CMD_CHIPER_MODE, 0x3)
+#define EIP93_SA_CMD_CHIPER_MODE_CTR FIELD_PREP(EIP93_SA_CMD_CHIPER_MODE, 0x2)
+#define EIP93_SA_CMD_CHIPER_MODE_CBC FIELD_PREP(EIP93_SA_CMD_CHIPER_MODE, 0x1)
+#define EIP93_SA_CMD_CHIPER_MODE_ECB FIELD_PREP(EIP93_SA_CMD_CHIPER_MODE, 0x0)
+/* ARC4 operations */
+#define EIP93_SA_CMD_CHIPER_MODE_STATEFULL FIELD_PREP(EIP93_SA_CMD_CHIPER_MODE, 0x1)
+#define EIP93_SA_CMD_CHIPER_MODE_STATELESS FIELD_PREP(EIP93_SA_CMD_CHIPER_MODE, 0x0)
+#define EIP93_SA_CMD_COPY_PAD BIT(3)
+#define EIP93_SA_CMD_COPY_PAYLOAD BIT(2)
+#define EIP93_SA_CMD_COPY_HEADER BIT(1)
+#define EIP93_SA_CMD_COPY_DIGEST BIT(0) /* With this enabled, COPY_PAD is required */
+
+/* State save register */
+#define EIP93_REG_STATE_IV_0 0x500
+#define EIP93_REG_STATE_IV_1 0x504
+
+#define EIP93_REG_PE_ARC4STATE 0x700
+
+struct sa_record {
+ u32 sa_cmd0_word;
+ u32 sa_cmd1_word;
+ u32 sa_key[8];
+ u8 sa_i_digest[32];
+ u8 sa_o_digest[32];
+ u32 sa_spi;
+ u32 sa_seqnum[2];
+ u32 sa_seqmum_mask[2];
+ u32 sa_nonce;
+} __packed;
+
+struct sa_state {
+ u32 state_iv[4];
+ u32 state_byte_cnt[2];
+ u8 state_i_digest[32];
+} __packed;
+
+struct eip93_descriptor {
+ u32 pe_ctrl_stat_word;
+ u32 src_addr;
+ u32 dst_addr;
+ u32 sa_addr;
+ u32 state_addr;
+ u32 arc4_addr;
+ u32 user_id;
+ u32 pe_length_word;
+} __packed;
+
+#endif
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index f5c1912aa564..c3b2b22934b7 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -1218,7 +1218,6 @@ static struct safexcel_alg_template *safexcel_algs[] = {
&safexcel_alg_xts_aes,
&safexcel_alg_gcm,
&safexcel_alg_ccm,
- &safexcel_alg_crc32,
&safexcel_alg_cbcmac,
&safexcel_alg_xcbcmac,
&safexcel_alg_cmac,
@@ -1868,7 +1867,7 @@ MODULE_DEVICE_TABLE(of, safexcel_of_match_table);
static struct platform_driver crypto_safexcel = {
.probe = safexcel_probe,
- .remove_new = safexcel_remove,
+ .remove = safexcel_remove,
.driver = {
.name = "crypto-safexcel",
.of_match_table = safexcel_of_match_table,
@@ -2031,7 +2030,7 @@ MODULE_AUTHOR("Ofer Heifetz <oferh@marvell.com>");
MODULE_AUTHOR("Igal Liberman <igall@marvell.com>");
MODULE_DESCRIPTION("Support for SafeXcel cryptographic engines: EIP97 & EIP197");
MODULE_LICENSE("GPL v2");
-MODULE_IMPORT_NS(CRYPTO_INTERNAL);
+MODULE_IMPORT_NS("CRYPTO_INTERNAL");
MODULE_FIRMWARE("ifpp.bin");
MODULE_FIRMWARE("ipue.bin");
diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h
index 0c79ad78d1c0..0f27367a85fa 100644
--- a/drivers/crypto/inside-secure/safexcel.h
+++ b/drivers/crypto/inside-secure/safexcel.h
@@ -959,7 +959,6 @@ extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_ctr_aes;
extern struct safexcel_alg_template safexcel_alg_xts_aes;
extern struct safexcel_alg_template safexcel_alg_gcm;
extern struct safexcel_alg_template safexcel_alg_ccm;
-extern struct safexcel_alg_template safexcel_alg_crc32;
extern struct safexcel_alg_template safexcel_alg_cbcmac;
extern struct safexcel_alg_template safexcel_alg_xcbcmac;
extern struct safexcel_alg_template safexcel_alg_cmac;
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index e17577b785c3..ef0ba4832928 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -249,7 +249,9 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv,
safexcel_complete(priv, ring);
if (sreq->nents) {
- dma_unmap_sg(priv->dev, areq->src, sreq->nents, DMA_TO_DEVICE);
+ dma_unmap_sg(priv->dev, areq->src,
+ sg_nents_for_len(areq->src, areq->nbytes),
+ DMA_TO_DEVICE);
sreq->nents = 0;
}
@@ -289,14 +291,8 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv,
return 1;
}
- if (unlikely(sreq->digest == CONTEXT_CONTROL_DIGEST_XCM &&
- ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_CRC32)) {
- /* Undo final XOR with 0xffffffff ...*/
- *(__le32 *)areq->result = ~sreq->state[0];
- } else {
- memcpy(areq->result, sreq->state,
- crypto_ahash_digestsize(ahash));
- }
+ memcpy(areq->result, sreq->state,
+ crypto_ahash_digestsize(ahash));
}
cache_len = safexcel_queued_len(sreq);
@@ -497,7 +493,9 @@ unmap_result:
DMA_FROM_DEVICE);
unmap_sg:
if (req->nents) {
- dma_unmap_sg(priv->dev, areq->src, req->nents, DMA_TO_DEVICE);
+ dma_unmap_sg(priv->dev, areq->src,
+ sg_nents_for_len(areq->src, areq->nbytes),
+ DMA_TO_DEVICE);
req->nents = 0;
}
cdesc_rollback:
@@ -1881,88 +1879,6 @@ struct safexcel_alg_template safexcel_alg_hmac_md5 = {
},
};
-static int safexcel_crc32_cra_init(struct crypto_tfm *tfm)
-{
- struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
- int ret = safexcel_ahash_cra_init(tfm);
-
- /* Default 'key' is all zeroes */
- memset(&ctx->base.ipad, 0, sizeof(u32));
- return ret;
-}
-
-static int safexcel_crc32_init(struct ahash_request *areq)
-{
- struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
- struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
-
- memset(req, 0, sizeof(*req));
-
- /* Start from loaded key */
- req->state[0] = cpu_to_le32(~ctx->base.ipad.word[0]);
- /* Set processed to non-zero to enable invalidation detection */
- req->len = sizeof(u32);
- req->processed = sizeof(u32);
-
- ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_CRC32;
- req->digest = CONTEXT_CONTROL_DIGEST_XCM;
- req->state_sz = sizeof(u32);
- req->digest_sz = sizeof(u32);
- req->block_sz = sizeof(u32);
-
- return 0;
-}
-
-static int safexcel_crc32_setkey(struct crypto_ahash *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
-
- if (keylen != sizeof(u32))
- return -EINVAL;
-
- memcpy(&ctx->base.ipad, key, sizeof(u32));
- return 0;
-}
-
-static int safexcel_crc32_digest(struct ahash_request *areq)
-{
- return safexcel_crc32_init(areq) ?: safexcel_ahash_finup(areq);
-}
-
-struct safexcel_alg_template safexcel_alg_crc32 = {
- .type = SAFEXCEL_ALG_TYPE_AHASH,
- .algo_mask = 0,
- .alg.ahash = {
- .init = safexcel_crc32_init,
- .update = safexcel_ahash_update,
- .final = safexcel_ahash_final,
- .finup = safexcel_ahash_finup,
- .digest = safexcel_crc32_digest,
- .setkey = safexcel_crc32_setkey,
- .export = safexcel_ahash_export,
- .import = safexcel_ahash_import,
- .halg = {
- .digestsize = sizeof(u32),
- .statesize = sizeof(struct safexcel_ahash_export_state),
- .base = {
- .cra_name = "crc32",
- .cra_driver_name = "safexcel-crc32",
- .cra_priority = SAFEXCEL_CRA_PRIORITY,
- .cra_flags = CRYPTO_ALG_OPTIONAL_KEY |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_ALLOCATES_MEMORY |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
- .cra_init = safexcel_crc32_cra_init,
- .cra_exit = safexcel_ahash_cra_exit,
- .cra_module = THIS_MODULE,
- },
- },
- },
-};
-
static int safexcel_cbcmac_init(struct ahash_request *areq)
{
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
@@ -2043,7 +1959,7 @@ struct safexcel_alg_template safexcel_alg_cbcmac = {
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = 1,
+ .cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
.cra_init = safexcel_ahash_cra_init,
.cra_exit = safexcel_ahash_cra_exit,
@@ -2093,7 +2009,7 @@ static int safexcel_xcbcmac_cra_init(struct crypto_tfm *tfm)
safexcel_ahash_cra_init(tfm);
ctx->aes = kmalloc(sizeof(*ctx->aes), GFP_KERNEL);
- return PTR_ERR_OR_ZERO(ctx->aes);
+ return ctx->aes == NULL ? -ENOMEM : 0;
}
static void safexcel_xcbcmac_cra_exit(struct crypto_tfm *tfm)
diff --git a/drivers/crypto/intel/iaa/Makefile b/drivers/crypto/intel/iaa/Makefile
index b64b208d2344..55bda7770fac 100644
--- a/drivers/crypto/intel/iaa/Makefile
+++ b/drivers/crypto/intel/iaa/Makefile
@@ -3,7 +3,7 @@
# Makefile for IAA crypto device drivers
#
-ccflags-y += -I $(srctree)/drivers/dma/idxd -DDEFAULT_SYMBOL_NAMESPACE=IDXD
+ccflags-y += -I $(srctree)/drivers/dma/idxd -DDEFAULT_SYMBOL_NAMESPACE='"IDXD"'
obj-$(CONFIG_CRYPTO_DEV_IAA_CRYPTO) := iaa_crypto.o
diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c
index 237f87000070..23f585219fb4 100644
--- a/drivers/crypto/intel/iaa/iaa_crypto_main.c
+++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c
@@ -33,8 +33,6 @@ static unsigned int nr_cpus_per_node;
/* Number of physical cpus sharing each iaa instance */
static unsigned int cpus_per_iaa;
-static struct crypto_comp *deflate_generic_tfm;
-
/* Per-cpu lookup table for balanced wqs */
static struct wq_table_entry __percpu *wq_table;
@@ -173,7 +171,7 @@ static int set_iaa_sync_mode(const char *name)
async_mode = false;
use_irq = false;
} else if (sysfs_streq(name, "async")) {
- async_mode = true;
+ async_mode = false;
use_irq = false;
} else if (sysfs_streq(name, "async_irq")) {
async_mode = true;
@@ -727,7 +725,7 @@ static int alloc_wq_table(int max_wqs)
for (cpu = 0; cpu < nr_cpus; cpu++) {
entry = per_cpu_ptr(wq_table, cpu);
- entry->wqs = kcalloc(max_wqs, sizeof(struct wq *), GFP_KERNEL);
+ entry->wqs = kcalloc(max_wqs, sizeof(*entry->wqs), GFP_KERNEL);
if (!entry->wqs) {
free_wq_table();
return -ENOMEM;
@@ -896,7 +894,7 @@ out:
static void rebalance_wq_table(void)
{
const struct cpumask *node_cpus;
- int node, cpu, iaa = -1;
+ int node_cpu, node, cpu, iaa = 0;
if (nr_iaa == 0)
return;
@@ -907,36 +905,29 @@ static void rebalance_wq_table(void)
clear_wq_table();
if (nr_iaa == 1) {
- for (cpu = 0; cpu < nr_cpus; cpu++) {
- if (WARN_ON(wq_table_add_wqs(0, cpu))) {
- pr_debug("could not add any wqs for iaa 0 to cpu %d!\n", cpu);
- return;
- }
+ for_each_possible_cpu(cpu) {
+ if (WARN_ON(wq_table_add_wqs(0, cpu)))
+ goto err;
}
return;
}
for_each_node_with_cpus(node) {
+ cpu = 0;
node_cpus = cpumask_of_node(node);
- for (cpu = 0; cpu < cpumask_weight(node_cpus); cpu++) {
- int node_cpu = cpumask_nth(cpu, node_cpus);
-
- if (WARN_ON(node_cpu >= nr_cpu_ids)) {
- pr_debug("node_cpu %d doesn't exist!\n", node_cpu);
- return;
- }
-
- if ((cpu % cpus_per_iaa) == 0)
- iaa++;
-
- if (WARN_ON(wq_table_add_wqs(iaa, node_cpu))) {
- pr_debug("could not add any wqs for iaa %d to cpu %d!\n", iaa, cpu);
- return;
- }
+ for_each_cpu(node_cpu, node_cpus) {
+ iaa = cpu / cpus_per_iaa;
+ if (WARN_ON(wq_table_add_wqs(iaa, node_cpu)))
+ goto err;
+ cpu++;
}
}
+
+ return;
+err:
+ pr_debug("could not add any wqs for iaa %d to cpu %d!\n", iaa, cpu);
}
static inline int check_completion(struct device *dev,
@@ -945,12 +936,22 @@ static inline int check_completion(struct device *dev,
bool only_once)
{
char *op_str = compress ? "compress" : "decompress";
+ int status_checks = 0;
int ret = 0;
while (!comp->status) {
if (only_once)
return -EAGAIN;
cpu_relax();
+ if (status_checks++ >= IAA_COMPLETION_TIMEOUT) {
+ /* Something is wrong with the hw, disable it. */
+ dev_err(dev, "%s completion timed out - "
+ "assuming broken hw, iaa_crypto now DISABLED\n",
+ op_str);
+ iaa_crypto_enabled = false;
+ ret = -ETIMEDOUT;
+ goto out;
+ }
}
if (comp->status != IAX_COMP_SUCCESS) {
@@ -991,17 +992,11 @@ out:
static int deflate_generic_decompress(struct acomp_req *req)
{
- void *src, *dst;
+ ACOMP_FBREQ_ON_STACK(fbreq, req);
int ret;
- src = kmap_local_page(sg_page(req->src)) + req->src->offset;
- dst = kmap_local_page(sg_page(req->dst)) + req->dst->offset;
-
- ret = crypto_comp_decompress(deflate_generic_tfm,
- src, req->slen, dst, &req->dlen);
-
- kunmap_local(src);
- kunmap_local(dst);
+ ret = crypto_acomp_decompress(fbreq);
+ req->dlen = fbreq->dlen;
update_total_sw_decomp_calls();
@@ -1015,8 +1010,7 @@ static int iaa_remap_for_verify(struct device *dev, struct iaa_wq *iaa_wq,
static int iaa_compress_verify(struct crypto_tfm *tfm, struct acomp_req *req,
struct idxd_wq *wq,
dma_addr_t src_addr, unsigned int slen,
- dma_addr_t dst_addr, unsigned int *dlen,
- u32 compression_crc);
+ dma_addr_t dst_addr, unsigned int *dlen);
static void iaa_desc_complete(struct idxd_desc *idxd_desc,
enum idxd_complete_type comp_type,
@@ -1082,10 +1076,10 @@ static void iaa_desc_complete(struct idxd_desc *idxd_desc,
}
if (ctx->compress && compression_ctx->verify_compress) {
+ u32 *compression_crc = acomp_request_ctx(ctx->req);
dma_addr_t src_addr, dst_addr;
- u32 compression_crc;
- compression_crc = idxd_desc->iax_completion->crc;
+ *compression_crc = idxd_desc->iax_completion->crc;
ret = iaa_remap_for_verify(dev, iaa_wq, ctx->req, &src_addr, &dst_addr);
if (ret) {
@@ -1095,8 +1089,7 @@ static void iaa_desc_complete(struct idxd_desc *idxd_desc,
}
ret = iaa_compress_verify(ctx->tfm, ctx->req, iaa_wq->wq, src_addr,
- ctx->req->slen, dst_addr, &ctx->req->dlen,
- compression_crc);
+ ctx->req->slen, dst_addr, &ctx->req->dlen);
if (ret) {
dev_dbg(dev, "%s: compress verify failed ret=%d\n", __func__, ret);
err = -EIO;
@@ -1125,12 +1118,11 @@ out:
static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req,
struct idxd_wq *wq,
dma_addr_t src_addr, unsigned int slen,
- dma_addr_t dst_addr, unsigned int *dlen,
- u32 *compression_crc,
- bool disable_async)
+ dma_addr_t dst_addr, unsigned int *dlen)
{
struct iaa_device_compression_mode *active_compression_mode;
struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm);
+ u32 *compression_crc = acomp_request_ctx(req);
struct iaa_device *iaa_device;
struct idxd_desc *idxd_desc;
struct iax_hw_desc *desc;
@@ -1170,7 +1162,7 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req,
desc->src2_size = sizeof(struct aecs_comp_table_record);
desc->completion_addr = idxd_desc->compl_dma;
- if (ctx->use_irq && !disable_async) {
+ if (ctx->use_irq) {
desc->flags |= IDXD_OP_FLAG_RCI;
idxd_desc->crypto.req = req;
@@ -1183,8 +1175,7 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req,
" src_addr %llx, dst_addr %llx\n", __func__,
active_compression_mode->name,
src_addr, dst_addr);
- } else if (ctx->async_mode && !disable_async)
- req->base.data = idxd_desc;
+ }
dev_dbg(dev, "%s: compression mode %s,"
" desc->src1_addr %llx, desc->src1_size %d,"
@@ -1204,7 +1195,7 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req,
update_total_comp_calls();
update_wq_comp_calls(wq);
- if (ctx->async_mode && !disable_async) {
+ if (ctx->async_mode) {
ret = -EINPROGRESS;
dev_dbg(dev, "%s: returning -EINPROGRESS\n", __func__);
goto out;
@@ -1224,7 +1215,7 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req,
*compression_crc = idxd_desc->iax_completion->crc;
- if (!ctx->async_mode || disable_async)
+ if (!ctx->async_mode)
idxd_free_desc(wq, idxd_desc);
out:
return ret;
@@ -1278,11 +1269,11 @@ out:
static int iaa_compress_verify(struct crypto_tfm *tfm, struct acomp_req *req,
struct idxd_wq *wq,
dma_addr_t src_addr, unsigned int slen,
- dma_addr_t dst_addr, unsigned int *dlen,
- u32 compression_crc)
+ dma_addr_t dst_addr, unsigned int *dlen)
{
struct iaa_device_compression_mode *active_compression_mode;
struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm);
+ u32 *compression_crc = acomp_request_ctx(req);
struct iaa_device *iaa_device;
struct idxd_desc *idxd_desc;
struct iax_hw_desc *desc;
@@ -1342,10 +1333,10 @@ static int iaa_compress_verify(struct crypto_tfm *tfm, struct acomp_req *req,
goto err;
}
- if (compression_crc != idxd_desc->iax_completion->crc) {
+ if (*compression_crc != idxd_desc->iax_completion->crc) {
ret = -EINVAL;
dev_dbg(dev, "(verify) iaa comp/decomp crc mismatch:"
- " comp=0x%x, decomp=0x%x\n", compression_crc,
+ " comp=0x%x, decomp=0x%x\n", *compression_crc,
idxd_desc->iax_completion->crc);
print_hex_dump(KERN_INFO, "cmp-rec: ", DUMP_PREFIX_OFFSET,
8, 1, idxd_desc->iax_completion, 64, 0);
@@ -1365,8 +1356,7 @@ err:
static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req,
struct idxd_wq *wq,
dma_addr_t src_addr, unsigned int slen,
- dma_addr_t dst_addr, unsigned int *dlen,
- bool disable_async)
+ dma_addr_t dst_addr, unsigned int *dlen)
{
struct iaa_device_compression_mode *active_compression_mode;
struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm);
@@ -1408,7 +1398,7 @@ static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req,
desc->src1_size = slen;
desc->completion_addr = idxd_desc->compl_dma;
- if (ctx->use_irq && !disable_async) {
+ if (ctx->use_irq) {
desc->flags |= IDXD_OP_FLAG_RCI;
idxd_desc->crypto.req = req;
@@ -1421,8 +1411,7 @@ static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req,
" src_addr %llx, dst_addr %llx\n", __func__,
active_compression_mode->name,
src_addr, dst_addr);
- } else if (ctx->async_mode && !disable_async)
- req->base.data = idxd_desc;
+ }
dev_dbg(dev, "%s: decompression mode %s,"
" desc->src1_addr %llx, desc->src1_size %d,"
@@ -1442,7 +1431,7 @@ static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req,
update_total_decomp_calls();
update_wq_decomp_calls(wq);
- if (ctx->async_mode && !disable_async) {
+ if (ctx->async_mode) {
ret = -EINPROGRESS;
dev_dbg(dev, "%s: returning -EINPROGRESS\n", __func__);
goto out;
@@ -1470,7 +1459,7 @@ static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req,
*dlen = req->dlen;
- if (!ctx->async_mode || disable_async)
+ if (!ctx->async_mode)
idxd_free_desc(wq, idxd_desc);
/* Update stats */
@@ -1490,13 +1479,10 @@ static int iaa_comp_acompress(struct acomp_req *req)
struct iaa_compression_ctx *compression_ctx;
struct crypto_tfm *tfm = req->base.tfm;
dma_addr_t src_addr, dst_addr;
- bool disable_async = false;
int nr_sgs, cpu, ret = 0;
struct iaa_wq *iaa_wq;
- u32 compression_crc;
struct idxd_wq *wq;
struct device *dev;
- int order = -1;
compression_ctx = crypto_tfm_ctx(tfm);
@@ -1526,21 +1512,6 @@ static int iaa_comp_acompress(struct acomp_req *req)
iaa_wq = idxd_wq_get_private(wq);
- if (!req->dst) {
- gfp_t flags = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
-
- /* incompressible data will always be < 2 * slen */
- req->dlen = 2 * req->slen;
- order = order_base_2(round_up(req->dlen, PAGE_SIZE) / PAGE_SIZE);
- req->dst = sgl_alloc_order(req->dlen, order, false, flags, NULL);
- if (!req->dst) {
- ret = -ENOMEM;
- order = -1;
- goto out;
- }
- disable_async = true;
- }
-
dev = &wq->idxd->pdev->dev;
nr_sgs = dma_map_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE);
@@ -1570,7 +1541,7 @@ static int iaa_comp_acompress(struct acomp_req *req)
req->dst, req->dlen, sg_dma_len(req->dst));
ret = iaa_compress(tfm, req, wq, src_addr, req->slen, dst_addr,
- &req->dlen, &compression_crc, disable_async);
+ &req->dlen);
if (ret == -EINPROGRESS)
return ret;
@@ -1582,7 +1553,7 @@ static int iaa_comp_acompress(struct acomp_req *req)
}
ret = iaa_compress_verify(tfm, req, wq, src_addr, req->slen,
- dst_addr, &req->dlen, compression_crc);
+ dst_addr, &req->dlen);
if (ret)
dev_dbg(dev, "asynchronous compress verification failed ret=%d\n", ret);
@@ -1601,100 +1572,6 @@ err_map_dst:
out:
iaa_wq_put(wq);
- if (order >= 0)
- sgl_free_order(req->dst, order);
-
- return ret;
-}
-
-static int iaa_comp_adecompress_alloc_dest(struct acomp_req *req)
-{
- gfp_t flags = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
- GFP_KERNEL : GFP_ATOMIC;
- struct crypto_tfm *tfm = req->base.tfm;
- dma_addr_t src_addr, dst_addr;
- int nr_sgs, cpu, ret = 0;
- struct iaa_wq *iaa_wq;
- struct device *dev;
- struct idxd_wq *wq;
- int order = -1;
-
- cpu = get_cpu();
- wq = wq_table_next_wq(cpu);
- put_cpu();
- if (!wq) {
- pr_debug("no wq configured for cpu=%d\n", cpu);
- return -ENODEV;
- }
-
- ret = iaa_wq_get(wq);
- if (ret) {
- pr_debug("no wq available for cpu=%d\n", cpu);
- return -ENODEV;
- }
-
- iaa_wq = idxd_wq_get_private(wq);
-
- dev = &wq->idxd->pdev->dev;
-
- nr_sgs = dma_map_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE);
- if (nr_sgs <= 0 || nr_sgs > 1) {
- dev_dbg(dev, "couldn't map src sg for iaa device %d,"
- " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id,
- iaa_wq->wq->id, ret);
- ret = -EIO;
- goto out;
- }
- src_addr = sg_dma_address(req->src);
- dev_dbg(dev, "dma_map_sg, src_addr %llx, nr_sgs %d, req->src %p,"
- " req->slen %d, sg_dma_len(sg) %d\n", src_addr, nr_sgs,
- req->src, req->slen, sg_dma_len(req->src));
-
- req->dlen = 4 * req->slen; /* start with ~avg comp rato */
-alloc_dest:
- order = order_base_2(round_up(req->dlen, PAGE_SIZE) / PAGE_SIZE);
- req->dst = sgl_alloc_order(req->dlen, order, false, flags, NULL);
- if (!req->dst) {
- ret = -ENOMEM;
- order = -1;
- goto out;
- }
-
- nr_sgs = dma_map_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE);
- if (nr_sgs <= 0 || nr_sgs > 1) {
- dev_dbg(dev, "couldn't map dst sg for iaa device %d,"
- " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id,
- iaa_wq->wq->id, ret);
- ret = -EIO;
- goto err_map_dst;
- }
-
- dst_addr = sg_dma_address(req->dst);
- dev_dbg(dev, "dma_map_sg, dst_addr %llx, nr_sgs %d, req->dst %p,"
- " req->dlen %d, sg_dma_len(sg) %d\n", dst_addr, nr_sgs,
- req->dst, req->dlen, sg_dma_len(req->dst));
- ret = iaa_decompress(tfm, req, wq, src_addr, req->slen,
- dst_addr, &req->dlen, true);
- if (ret == -EOVERFLOW) {
- dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE);
- req->dlen *= 2;
- if (req->dlen > CRYPTO_ACOMP_DST_MAX)
- goto err_map_dst;
- goto alloc_dest;
- }
-
- if (ret != 0)
- dev_dbg(dev, "asynchronous decompress failed ret=%d\n", ret);
-
- dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE);
-err_map_dst:
- dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE);
-out:
- iaa_wq_put(wq);
-
- if (order >= 0)
- sgl_free_order(req->dst, order);
-
return ret;
}
@@ -1717,9 +1594,6 @@ static int iaa_comp_adecompress(struct acomp_req *req)
return -EINVAL;
}
- if (!req->dst)
- return iaa_comp_adecompress_alloc_dest(req);
-
cpu = get_cpu();
wq = wq_table_next_wq(cpu);
put_cpu();
@@ -1765,7 +1639,7 @@ static int iaa_comp_adecompress(struct acomp_req *req)
req->dst, req->dlen, sg_dma_len(req->dst));
ret = iaa_decompress(tfm, req, wq, src_addr, req->slen,
- dst_addr, &req->dlen, false);
+ dst_addr, &req->dlen);
if (ret == -EINPROGRESS)
return ret;
@@ -1800,24 +1674,16 @@ static int iaa_comp_init_fixed(struct crypto_acomp *acomp_tfm)
return 0;
}
-static void dst_free(struct scatterlist *sgl)
-{
- /*
- * Called for req->dst = NULL cases but we free elsewhere
- * using sgl_free_order().
- */
-}
-
static struct acomp_alg iaa_acomp_fixed_deflate = {
.init = iaa_comp_init_fixed,
.compress = iaa_comp_acompress,
.decompress = iaa_comp_adecompress,
- .dst_free = dst_free,
.base = {
.cra_name = "deflate",
.cra_driver_name = "deflate-iaa",
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_ctxsize = sizeof(struct iaa_compression_ctx),
+ .cra_reqsize = sizeof(u32),
.cra_module = THIS_MODULE,
.cra_priority = IAA_ALG_PRIORITY,
}
@@ -2012,15 +1878,6 @@ static int __init iaa_crypto_init_module(void)
}
nr_cpus_per_node = nr_cpus / nr_nodes;
- if (crypto_has_comp("deflate-generic", 0, 0))
- deflate_generic_tfm = crypto_alloc_comp("deflate-generic", 0, 0);
-
- if (IS_ERR_OR_NULL(deflate_generic_tfm)) {
- pr_err("IAA could not alloc %s tfm: errcode = %ld\n",
- "deflate-generic", PTR_ERR(deflate_generic_tfm));
- return -ENOMEM;
- }
-
ret = iaa_aecs_init_fixed();
if (ret < 0) {
pr_debug("IAA fixed compression mode init failed\n");
@@ -2062,7 +1919,6 @@ err_verify_attr_create:
err_driver_reg:
iaa_aecs_cleanup_fixed();
err_aecs_init:
- crypto_free_comp(deflate_generic_tfm);
goto out;
}
@@ -2079,12 +1935,11 @@ static void __exit iaa_crypto_cleanup_module(void)
&driver_attr_verify_compress);
idxd_driver_unregister(&iaa_crypto_driver);
iaa_aecs_cleanup_fixed();
- crypto_free_comp(deflate_generic_tfm);
pr_debug("cleaned up\n");
}
-MODULE_IMPORT_NS(IDXD);
+MODULE_IMPORT_NS("IDXD");
MODULE_LICENSE("GPL");
MODULE_ALIAS_IDXD_DEVICE(0);
MODULE_AUTHOR("Intel Corporation");
diff --git a/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c b/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c
index f8a77bff8844..fcc0cf4df637 100644
--- a/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c
+++ b/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c
@@ -471,6 +471,7 @@ static int init_ixp_crypto(struct device *dev)
return -ENODEV;
}
npe_id = npe_spec.args[0];
+ of_node_put(npe_spec.np);
ret = of_parse_phandle_with_fixed_args(np, "queue-rx", 1, 0,
&queue_spec);
@@ -479,6 +480,7 @@ static int init_ixp_crypto(struct device *dev)
return -ENODEV;
}
recv_qid = queue_spec.args[0];
+ of_node_put(queue_spec.np);
ret = of_parse_phandle_with_fixed_args(np, "queue-txready", 1, 0,
&queue_spec);
@@ -487,6 +489,7 @@ static int init_ixp_crypto(struct device *dev)
return -ENODEV;
}
send_qid = queue_spec.args[0];
+ of_node_put(queue_spec.np);
} else {
/*
* Hardcoded engine when using platform data, this goes away
@@ -1588,7 +1591,7 @@ static const struct of_device_id ixp4xx_crypto_of_match[] = {
static struct platform_driver ixp_crypto_driver = {
.probe = ixp_crypto_probe,
- .remove_new = ixp_crypto_remove,
+ .remove = ixp_crypto_remove,
.driver = {
.name = "ixp4xx_crypto",
.of_match_table = ixp4xx_crypto_of_match,
diff --git a/drivers/crypto/intel/keembay/keembay-ocs-aes-core.c b/drivers/crypto/intel/keembay/keembay-ocs-aes-core.c
index 9b2d098e5eb2..8a8f6c81e010 100644
--- a/drivers/crypto/intel/keembay/keembay-ocs-aes-core.c
+++ b/drivers/crypto/intel/keembay/keembay-ocs-aes-core.c
@@ -1656,7 +1656,7 @@ list_del:
/* The OCS driver is a platform device. */
static struct platform_driver kmb_ocs_aes_driver = {
.probe = kmb_ocs_aes_probe,
- .remove_new = kmb_ocs_aes_remove,
+ .remove = kmb_ocs_aes_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = kmb_ocs_aes_of_match,
diff --git a/drivers/crypto/intel/keembay/keembay-ocs-ecc.c b/drivers/crypto/intel/keembay/keembay-ocs-ecc.c
index 5e24f2d8affc..59308926399d 100644
--- a/drivers/crypto/intel/keembay/keembay-ocs-ecc.c
+++ b/drivers/crypto/intel/keembay/keembay-ocs-ecc.c
@@ -991,7 +991,7 @@ static const struct of_device_id kmb_ocs_ecc_of_match[] = {
/* The OCS driver is a platform device. */
static struct platform_driver kmb_ocs_ecc_driver = {
.probe = kmb_ocs_ecc_probe,
- .remove_new = kmb_ocs_ecc_remove,
+ .remove = kmb_ocs_ecc_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = kmb_ocs_ecc_of_match,
diff --git a/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c b/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c
index e54c79890d44..48281d882260 100644
--- a/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c
+++ b/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c
@@ -68,6 +68,7 @@ struct ocs_hcu_ctx {
* @sg_data_total: Total data in the SG list at any time.
* @sg_data_offset: Offset into the data of the current individual SG node.
* @sg_dma_nents: Number of sg entries mapped in dma_list.
+ * @nents: Number of entries in the scatterlist.
*/
struct ocs_hcu_rctx {
struct ocs_hcu_dev *hcu_dev;
@@ -91,6 +92,7 @@ struct ocs_hcu_rctx {
unsigned int sg_data_total;
unsigned int sg_data_offset;
unsigned int sg_dma_nents;
+ unsigned int nents;
};
/**
@@ -199,7 +201,7 @@ static void kmb_ocs_hcu_dma_cleanup(struct ahash_request *req,
/* Unmap req->src (if mapped). */
if (rctx->sg_dma_nents) {
- dma_unmap_sg(dev, req->src, rctx->sg_dma_nents, DMA_TO_DEVICE);
+ dma_unmap_sg(dev, req->src, rctx->nents, DMA_TO_DEVICE);
rctx->sg_dma_nents = 0;
}
@@ -230,7 +232,7 @@ static int kmb_ocs_dma_prepare(struct ahash_request *req)
struct device *dev = rctx->hcu_dev->dev;
unsigned int remainder = 0;
unsigned int total;
- size_t nents;
+ int nents;
size_t count;
int rc;
int i;
@@ -251,6 +253,9 @@ static int kmb_ocs_dma_prepare(struct ahash_request *req)
/* Determine the number of scatter gather list entries to process. */
nents = sg_nents_for_len(req->src, rctx->sg_data_total - remainder);
+ if (nents < 0)
+ return nents;
+
/* If there are entries to process, map them. */
if (nents) {
rctx->sg_dma_nents = dma_map_sg(dev, req->src, nents,
@@ -260,6 +265,10 @@ static int kmb_ocs_dma_prepare(struct ahash_request *req)
rc = -ENOMEM;
goto cleanup;
}
+
+ /* Save the value of nents to pass to dma_unmap_sg. */
+ rctx->nents = nents;
+
/*
* The value returned by dma_map_sg() can be < nents; so update
* nents accordingly.
@@ -1243,7 +1252,7 @@ list_del:
/* The OCS driver is a platform device. */
static struct platform_driver kmb_ocs_hcu_driver = {
.probe = kmb_ocs_hcu_probe,
- .remove_new = kmb_ocs_hcu_remove,
+ .remove = kmb_ocs_hcu_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = kmb_ocs_hcu_of_match,
diff --git a/drivers/crypto/intel/keembay/ocs-aes.c b/drivers/crypto/intel/keembay/ocs-aes.c
index be9f32fc8f42..bb6f33f6b4d3 100644
--- a/drivers/crypto/intel/keembay/ocs-aes.c
+++ b/drivers/crypto/intel/keembay/ocs-aes.c
@@ -7,6 +7,7 @@
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
+#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/swab.h>
@@ -1473,8 +1474,7 @@ int ocs_create_linked_list_from_sg(const struct ocs_aes_dev *aes_dev,
ll = dll_desc->vaddr;
for (i = 0; i < dma_nents; i++, sg = sg_next(sg)) {
ll[i].src_addr = sg_dma_address(sg) + data_offset;
- ll[i].src_len = (sg_dma_len(sg) - data_offset) < data_size ?
- (sg_dma_len(sg) - data_offset) : data_size;
+ ll[i].src_len = min(sg_dma_len(sg) - data_offset, data_size);
data_offset = 0;
data_size -= ll[i].src_len;
/* Current element points to the DMA address of the next one. */
diff --git a/drivers/crypto/intel/qat/Kconfig b/drivers/crypto/intel/qat/Kconfig
index 02fb8abe4e6e..4b4861460dd4 100644
--- a/drivers/crypto/intel/qat/Kconfig
+++ b/drivers/crypto/intel/qat/Kconfig
@@ -6,12 +6,11 @@ config CRYPTO_DEV_QAT
select CRYPTO_SKCIPHER
select CRYPTO_AKCIPHER
select CRYPTO_DH
- select CRYPTO_HMAC
select CRYPTO_RSA
- select CRYPTO_SHA1
- select CRYPTO_SHA256
- select CRYPTO_SHA512
select CRYPTO_LIB_AES
+ select CRYPTO_LIB_SHA1
+ select CRYPTO_LIB_SHA256
+ select CRYPTO_LIB_SHA512
select FW_LOADER
select CRC8
@@ -70,6 +69,18 @@ config CRYPTO_DEV_QAT_420XX
To compile this as a module, choose M here: the module
will be called qat_420xx.
+config CRYPTO_DEV_QAT_6XXX
+ tristate "Support for Intel(R) QuickAssist Technology QAT_6XXX"
+ depends on (X86 || COMPILE_TEST)
+ depends on PCI
+ select CRYPTO_DEV_QAT
+ help
+ Support for Intel(R) QuickAssist Technology QAT_6xxx
+ for accelerating crypto and compression workloads.
+
+ To compile this as a module, choose M here: the module
+ will be called qat_6xxx.
+
config CRYPTO_DEV_QAT_DH895xCCVF
tristate "Support for Intel(R) DH895xCC Virtual Function"
depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST)
diff --git a/drivers/crypto/intel/qat/Makefile b/drivers/crypto/intel/qat/Makefile
index 235b69f4f3f7..abef14207afa 100644
--- a/drivers/crypto/intel/qat/Makefile
+++ b/drivers/crypto/intel/qat/Makefile
@@ -1,10 +1,12 @@
# SPDX-License-Identifier: GPL-2.0
+subdir-ccflags-y := -I$(src)/qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT) += qat_common/
obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc/
obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXX) += qat_c3xxx/
obj-$(CONFIG_CRYPTO_DEV_QAT_C62X) += qat_c62x/
obj-$(CONFIG_CRYPTO_DEV_QAT_4XXX) += qat_4xxx/
obj-$(CONFIG_CRYPTO_DEV_QAT_420XX) += qat_420xx/
+obj-$(CONFIG_CRYPTO_DEV_QAT_6XXX) += qat_6xxx/
obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf/
obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXXVF) += qat_c3xxxvf/
obj-$(CONFIG_CRYPTO_DEV_QAT_C62XVF) += qat_c62xvf/
diff --git a/drivers/crypto/intel/qat/qat_420xx/Makefile b/drivers/crypto/intel/qat/qat_420xx/Makefile
index 45728659fbc4..f6df54d2993e 100644
--- a/drivers/crypto/intel/qat/qat_420xx/Makefile
+++ b/drivers/crypto/intel/qat/qat_420xx/Makefile
@@ -1,4 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y := -I $(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_420XX) += qat_420xx.o
-qat_420xx-objs := adf_drv.o adf_420xx_hw_data.o
+qat_420xx-y := adf_drv.o adf_420xx_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
index 78f0ea49254d..53fa91d577ed 100644
--- a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
@@ -9,15 +9,14 @@
#include <adf_common_drv.h>
#include <adf_fw_config.h>
#include <adf_gen4_config.h>
-#include <adf_gen4_dc.h>
#include <adf_gen4_hw_csr_data.h>
#include <adf_gen4_hw_data.h>
#include <adf_gen4_pfvf.h>
#include <adf_gen4_pm.h>
#include <adf_gen4_ras.h>
-#include <adf_gen4_timer.h>
#include <adf_gen4_tl.h>
#include <adf_gen4_vf_mig.h>
+#include <adf_timer.h>
#include "adf_420xx_hw_data.h"
#include "icp_qat_hw.h"
@@ -93,12 +92,11 @@ static const struct adf_fw_config adf_fw_dcc_config[] = {
static struct adf_hw_device_class adf_420xx_class = {
.name = ADF_420XX_DEVICE_NAME,
.type = DEV_420XX,
- .instances = 0,
};
static u32 get_ae_mask(struct adf_hw_device_data *self)
{
- u32 me_disable = self->fuses;
+ u32 me_disable = self->fuses[ADF_FUSECTL4];
return ~me_disable & ADF_420XX_ACCELENGINES_MASK;
}
@@ -106,8 +104,7 @@ static u32 get_ae_mask(struct adf_hw_device_data *self)
static u32 uof_get_num_objs(struct adf_accel_dev *accel_dev)
{
switch (adf_get_service_enabled(accel_dev)) {
- case SVC_CY:
- case SVC_CY2:
+ case SVC_SYM_ASYM:
return ARRAY_SIZE(adf_fw_cy_config);
case SVC_DC:
return ARRAY_SIZE(adf_fw_dc_config);
@@ -118,10 +115,8 @@ static u32 uof_get_num_objs(struct adf_accel_dev *accel_dev)
case SVC_ASYM:
return ARRAY_SIZE(adf_fw_asym_config);
case SVC_ASYM_DC:
- case SVC_DC_ASYM:
return ARRAY_SIZE(adf_fw_asym_dc_config);
case SVC_SYM_DC:
- case SVC_DC_SYM:
return ARRAY_SIZE(adf_fw_sym_dc_config);
default:
return 0;
@@ -131,8 +126,7 @@ static u32 uof_get_num_objs(struct adf_accel_dev *accel_dev)
static const struct adf_fw_config *get_fw_config(struct adf_accel_dev *accel_dev)
{
switch (adf_get_service_enabled(accel_dev)) {
- case SVC_CY:
- case SVC_CY2:
+ case SVC_SYM_ASYM:
return adf_fw_cy_config;
case SVC_DC:
return adf_fw_dc_config;
@@ -143,10 +137,8 @@ static const struct adf_fw_config *get_fw_config(struct adf_accel_dev *accel_dev
case SVC_ASYM:
return adf_fw_asym_config;
case SVC_ASYM_DC:
- case SVC_DC_ASYM:
return adf_fw_asym_dc_config;
case SVC_SYM_DC:
- case SVC_DC_SYM:
return adf_fw_sym_dc_config;
default:
return NULL;
@@ -199,7 +191,6 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
ICP_ACCEL_CAPABILITIES_SM4 |
ICP_ACCEL_CAPABILITIES_AES_V2 |
ICP_ACCEL_CAPABILITIES_ZUC |
- ICP_ACCEL_CAPABILITIES_ZUC_256 |
ICP_ACCEL_CAPABILITIES_WIRELESS_CRYPTO_EXT |
ICP_ACCEL_CAPABILITIES_EXT_ALGCHAIN;
@@ -231,17 +222,11 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
if (fusectl1 & ICP_ACCEL_GEN4_MASK_WCP_WAT_SLICE) {
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC;
- capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC_256;
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_WIRELESS_CRYPTO_EXT;
}
- if (fusectl1 & ICP_ACCEL_GEN4_MASK_EIA3_SLICE) {
+ if (fusectl1 & ICP_ACCEL_GEN4_MASK_EIA3_SLICE)
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC;
- capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC_256;
- }
-
- if (fusectl1 & ICP_ACCEL_GEN4_MASK_ZUC_256_SLICE)
- capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC_256;
capabilities_asym = ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
ICP_ACCEL_CAPABILITIES_SM2 |
@@ -266,8 +251,7 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
}
switch (adf_get_service_enabled(accel_dev)) {
- case SVC_CY:
- case SVC_CY2:
+ case SVC_SYM_ASYM:
return capabilities_sym | capabilities_asym;
case SVC_DC:
return capabilities_dc;
@@ -284,10 +268,8 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
case SVC_ASYM:
return capabilities_asym;
case SVC_ASYM_DC:
- case SVC_DC_ASYM:
return capabilities_asym | capabilities_dc;
case SVC_SYM_DC:
- case SVC_DC_SYM:
return capabilities_sym | capabilities_dc;
default:
return 0;
@@ -314,11 +296,13 @@ static void adf_init_rl_data(struct adf_rl_hw_data *rl_data)
rl_data->pcie_scale_div = ADF_420XX_RL_PCIE_SCALE_FACTOR_DIV;
rl_data->pcie_scale_mul = ADF_420XX_RL_PCIE_SCALE_FACTOR_MUL;
rl_data->dcpr_correction = ADF_420XX_RL_DCPR_CORRECTION;
- rl_data->max_tp[ADF_SVC_ASYM] = ADF_420XX_RL_MAX_TP_ASYM;
- rl_data->max_tp[ADF_SVC_SYM] = ADF_420XX_RL_MAX_TP_SYM;
- rl_data->max_tp[ADF_SVC_DC] = ADF_420XX_RL_MAX_TP_DC;
+ rl_data->max_tp[SVC_ASYM] = ADF_420XX_RL_MAX_TP_ASYM;
+ rl_data->max_tp[SVC_SYM] = ADF_420XX_RL_MAX_TP_SYM;
+ rl_data->max_tp[SVC_DC] = ADF_420XX_RL_MAX_TP_DC;
rl_data->scan_interval = ADF_420XX_RL_SCANS_PER_SEC;
rl_data->scale_ref = ADF_420XX_RL_SLICE_REF;
+
+ adf_gen4_init_num_svc_aes(rl_data);
}
static int get_rp_group(struct adf_accel_dev *accel_dev, u32 ae_mask)
@@ -375,7 +359,7 @@ static const char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num,
else
id = -EINVAL;
- if (id < 0 || id > num_objs)
+ if (id < 0 || id >= num_objs)
return NULL;
return fw_objs[id];
@@ -420,6 +404,7 @@ static void adf_gen4_set_err_mask(struct adf_dev_err_mask *dev_err_mask)
dev_err_mask->parerr_cpr_xlt_mask = ADF_420XX_PARITYERRORMASK_CPR_XLT_MASK;
dev_err_mask->parerr_dcpr_ucs_mask = ADF_420XX_PARITYERRORMASK_DCPR_UCS_MASK;
dev_err_mask->parerr_pke_mask = ADF_420XX_PARITYERRORMASK_PKE_MASK;
+ dev_err_mask->parerr_wat_wcp_mask = ADF_420XX_PARITYERRORMASK_WAT_WCP_MASK;
dev_err_mask->ssmfeatren_mask = ADF_420XX_SSMFEATREN_MASK;
}
@@ -477,11 +462,13 @@ void adf_init_hw_data_420xx(struct adf_hw_device_data *hw_data, u32 dev_id)
hw_data->enable_pm = adf_gen4_enable_pm;
hw_data->handle_pm_interrupt = adf_gen4_handle_pm_interrupt;
hw_data->dev_config = adf_gen4_dev_config;
- hw_data->start_timer = adf_gen4_timer_start;
- hw_data->stop_timer = adf_gen4_timer_stop;
+ hw_data->start_timer = adf_timer_start;
+ hw_data->stop_timer = adf_timer_stop;
hw_data->get_hb_clock = adf_gen4_get_heartbeat_clock;
hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE;
hw_data->clock_frequency = ADF_420XX_AE_FREQ;
+ hw_data->services_supported = adf_gen4_services_supported;
+ hw_data->get_svc_slice_cnt = adf_gen4_get_svc_slice_cnt;
adf_gen4_set_err_mask(&hw_data->dev_err_mask);
adf_gen4_init_hw_csr_ops(&hw_data->csr_ops);
diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_drv.c b/drivers/crypto/intel/qat/qat_420xx/adf_drv.c
index f49818a13013..cfa00daeb4fb 100644
--- a/drivers/crypto/intel/qat/qat_420xx/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_420xx/adf_drv.c
@@ -14,7 +14,7 @@
#include "adf_420xx_hw_data.h"
static const struct pci_device_id adf_pci_tbl[] = {
- { PCI_VDEVICE(INTEL, ADF_420XX_PCI_DEVICE_ID), },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_420XX) },
{ }
};
MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
@@ -79,7 +79,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adf_init_hw_data_420xx(accel_dev->hw_device, ent->device);
pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
- pci_read_config_dword(pdev, ADF_GEN4_FUSECTL4_OFFSET, &hw_data->fuses);
+ pci_read_config_dword(pdev, ADF_GEN4_FUSECTL4_OFFSET, &hw_data->fuses[ADF_FUSECTL4]);
/* Get Accelerators and Accelerators Engines masks */
hw_data->accel_mask = hw_data->get_accel_mask(hw_data);
@@ -129,16 +129,21 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Find and map all the device's BARS */
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM) & ADF_GEN4_BAR_MASK;
- ret = pcim_iomap_regions_request_all(pdev, bar_mask, pci_name(pdev));
+ ret = pcim_request_all_regions(pdev, pci_name(pdev));
if (ret) {
- dev_err(&pdev->dev, "Failed to map pci regions.\n");
+ dev_err(&pdev->dev, "Failed to request PCI regions.\n");
goto out_err;
}
i = 0;
for_each_set_bit(bar_nr, &bar_mask, PCI_STD_NUM_BARS) {
bar = &accel_pci_dev->pci_bars[i++];
- bar->virt_addr = pcim_iomap_table(pdev)[bar_nr];
+ bar->virt_addr = pcim_iomap(pdev, bar_nr, 0);
+ if (!bar->virt_addr) {
+ dev_err(&pdev->dev, "Failed to ioremap PCI region.\n");
+ ret = -ENOMEM;
+ goto out_err;
+ }
}
pci_set_master(pdev);
@@ -181,11 +186,19 @@ static void adf_remove(struct pci_dev *pdev)
adf_cleanup_accel(accel_dev);
}
+static void adf_shutdown(struct pci_dev *pdev)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+ adf_dev_down(accel_dev);
+}
+
static struct pci_driver adf_driver = {
.id_table = adf_pci_tbl,
.name = ADF_420XX_DEVICE_NAME,
.probe = adf_probe,
.remove = adf_remove,
+ .shutdown = adf_shutdown,
.sriov_configure = adf_sriov_configure,
.err_handler = &adf_err_handler,
};
@@ -199,4 +212,4 @@ MODULE_FIRMWARE(ADF_420XX_MMP);
MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
MODULE_VERSION(ADF_DRV_VERSION);
MODULE_SOFTDEP("pre: crypto-intel_qat");
-MODULE_IMPORT_NS(CRYPTO_QAT);
+MODULE_IMPORT_NS("CRYPTO_QAT");
diff --git a/drivers/crypto/intel/qat/qat_4xxx/Makefile b/drivers/crypto/intel/qat/qat_4xxx/Makefile
index 9ba202079a22..188b611445e6 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/Makefile
+++ b/drivers/crypto/intel/qat/qat_4xxx/Makefile
@@ -1,4 +1,3 @@
# SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-ccflags-y := -I $(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_4XXX) += qat_4xxx.o
-qat_4xxx-objs := adf_drv.o adf_4xxx_hw_data.o
+qat_4xxx-y := adf_drv.o adf_4xxx_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
index 9fd7ec53b9f3..740f68a36ac5 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
@@ -3,21 +3,21 @@
#include <linux/iopoll.h>
#include <adf_accel_devices.h>
#include <adf_admin.h>
+#include <adf_bank_state.h>
#include <adf_cfg.h>
#include <adf_cfg_services.h>
#include <adf_clock.h>
#include <adf_common_drv.h>
#include <adf_fw_config.h>
#include <adf_gen4_config.h>
-#include <adf_gen4_dc.h>
#include <adf_gen4_hw_csr_data.h>
#include <adf_gen4_hw_data.h>
#include <adf_gen4_pfvf.h>
#include <adf_gen4_pm.h>
#include "adf_gen4_ras.h"
-#include <adf_gen4_timer.h>
#include <adf_gen4_tl.h>
#include <adf_gen4_vf_mig.h>
+#include <adf_timer.h>
#include "adf_4xxx_hw_data.h"
#include "icp_qat_hw.h"
@@ -96,12 +96,11 @@ static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_dcc_config));
static struct adf_hw_device_class adf_4xxx_class = {
.name = ADF_4XXX_DEVICE_NAME,
.type = DEV_4XXX,
- .instances = 0,
};
static u32 get_ae_mask(struct adf_hw_device_data *self)
{
- u32 me_disable = self->fuses;
+ u32 me_disable = self->fuses[ADF_FUSECTL4];
return ~me_disable & ADF_4XXX_ACCELENGINES_MASK;
}
@@ -178,8 +177,7 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
}
switch (adf_get_service_enabled(accel_dev)) {
- case SVC_CY:
- case SVC_CY2:
+ case SVC_SYM_ASYM:
return capabilities_sym | capabilities_asym;
case SVC_DC:
return capabilities_dc;
@@ -196,10 +194,8 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
case SVC_ASYM:
return capabilities_asym;
case SVC_ASYM_DC:
- case SVC_DC_ASYM:
return capabilities_asym | capabilities_dc;
case SVC_SYM_DC:
- case SVC_DC_SYM:
return capabilities_sym | capabilities_dc;
default:
return 0;
@@ -226,11 +222,13 @@ static void adf_init_rl_data(struct adf_rl_hw_data *rl_data)
rl_data->pcie_scale_div = ADF_4XXX_RL_PCIE_SCALE_FACTOR_DIV;
rl_data->pcie_scale_mul = ADF_4XXX_RL_PCIE_SCALE_FACTOR_MUL;
rl_data->dcpr_correction = ADF_4XXX_RL_DCPR_CORRECTION;
- rl_data->max_tp[ADF_SVC_ASYM] = ADF_4XXX_RL_MAX_TP_ASYM;
- rl_data->max_tp[ADF_SVC_SYM] = ADF_4XXX_RL_MAX_TP_SYM;
- rl_data->max_tp[ADF_SVC_DC] = ADF_4XXX_RL_MAX_TP_DC;
+ rl_data->max_tp[SVC_ASYM] = ADF_4XXX_RL_MAX_TP_ASYM;
+ rl_data->max_tp[SVC_SYM] = ADF_4XXX_RL_MAX_TP_SYM;
+ rl_data->max_tp[SVC_DC] = ADF_4XXX_RL_MAX_TP_DC;
rl_data->scan_interval = ADF_4XXX_RL_SCANS_PER_SEC;
rl_data->scale_ref = ADF_4XXX_RL_SLICE_REF;
+
+ adf_gen4_init_num_svc_aes(rl_data);
}
static u32 uof_get_num_objs(struct adf_accel_dev *accel_dev)
@@ -241,8 +239,7 @@ static u32 uof_get_num_objs(struct adf_accel_dev *accel_dev)
static const struct adf_fw_config *get_fw_config(struct adf_accel_dev *accel_dev)
{
switch (adf_get_service_enabled(accel_dev)) {
- case SVC_CY:
- case SVC_CY2:
+ case SVC_SYM_ASYM:
return adf_fw_cy_config;
case SVC_DC:
return adf_fw_dc_config;
@@ -253,10 +250,8 @@ static const struct adf_fw_config *get_fw_config(struct adf_accel_dev *accel_dev
case SVC_ASYM:
return adf_fw_asym_config;
case SVC_ASYM_DC:
- case SVC_DC_ASYM:
return adf_fw_asym_dc_config;
case SVC_SYM_DC:
- case SVC_DC_SYM:
return adf_fw_sym_dc_config;
default:
return NULL;
@@ -334,7 +329,7 @@ static const char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num,
else
id = -EINVAL;
- if (id < 0 || id > num_objs)
+ if (id < 0 || id >= num_objs)
return NULL;
return fw_objs[id];
@@ -428,13 +423,13 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
hw_data->admin_ae_mask = ADF_4XXX_ADMIN_AE_MASK;
hw_data->num_rps = ADF_GEN4_MAX_RPS;
switch (dev_id) {
- case ADF_402XX_PCI_DEVICE_ID:
+ case PCI_DEVICE_ID_INTEL_QAT_402XX:
hw_data->fw_name = ADF_402XX_FW;
hw_data->fw_mmp_name = ADF_402XX_MMP;
hw_data->uof_get_name = uof_get_name_402xx;
hw_data->get_ena_thd_mask = get_ena_thd_mask;
break;
- case ADF_401XX_PCI_DEVICE_ID:
+ case PCI_DEVICE_ID_INTEL_QAT_401XX:
hw_data->fw_name = ADF_4XXX_FW;
hw_data->fw_mmp_name = ADF_4XXX_MMP;
hw_data->uof_get_name = uof_get_name_4xxx;
@@ -456,16 +451,18 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
hw_data->get_ring_to_svc_map = adf_gen4_get_ring_to_svc_map;
hw_data->disable_iov = adf_disable_sriov;
hw_data->ring_pair_reset = adf_gen4_ring_pair_reset;
- hw_data->bank_state_save = adf_gen4_bank_state_save;
- hw_data->bank_state_restore = adf_gen4_bank_state_restore;
+ hw_data->bank_state_save = adf_bank_state_save;
+ hw_data->bank_state_restore = adf_bank_state_restore;
hw_data->enable_pm = adf_gen4_enable_pm;
hw_data->handle_pm_interrupt = adf_gen4_handle_pm_interrupt;
hw_data->dev_config = adf_gen4_dev_config;
- hw_data->start_timer = adf_gen4_timer_start;
- hw_data->stop_timer = adf_gen4_timer_stop;
+ hw_data->start_timer = adf_timer_start;
+ hw_data->stop_timer = adf_timer_stop;
hw_data->get_hb_clock = adf_gen4_get_heartbeat_clock;
hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE;
hw_data->clock_frequency = ADF_4XXX_AE_FREQ;
+ hw_data->services_supported = adf_gen4_services_supported;
+ hw_data->get_svc_slice_cnt = adf_gen4_get_svc_slice_cnt;
adf_gen4_set_err_mask(&hw_data->dev_err_mask);
adf_gen4_init_hw_csr_ops(&hw_data->csr_ops);
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
index 659905e45950..c9be5dcddb27 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
@@ -14,9 +14,9 @@
#include "adf_4xxx_hw_data.h"
static const struct pci_device_id adf_pci_tbl[] = {
- { PCI_VDEVICE(INTEL, ADF_4XXX_PCI_DEVICE_ID), },
- { PCI_VDEVICE(INTEL, ADF_401XX_PCI_DEVICE_ID), },
- { PCI_VDEVICE(INTEL, ADF_402XX_PCI_DEVICE_ID), },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_4XXX) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_401XX) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_402XX) },
{ }
};
MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
@@ -81,7 +81,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adf_init_hw_data_4xxx(accel_dev->hw_device, ent->device);
pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
- pci_read_config_dword(pdev, ADF_GEN4_FUSECTL4_OFFSET, &hw_data->fuses);
+ pci_read_config_dword(pdev, ADF_GEN4_FUSECTL4_OFFSET, &hw_data->fuses[ADF_FUSECTL4]);
/* Get Accelerators and Accelerators Engines masks */
hw_data->accel_mask = hw_data->get_accel_mask(hw_data);
@@ -131,16 +131,21 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Find and map all the device's BARS */
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM) & ADF_GEN4_BAR_MASK;
- ret = pcim_iomap_regions_request_all(pdev, bar_mask, pci_name(pdev));
+ ret = pcim_request_all_regions(pdev, pci_name(pdev));
if (ret) {
- dev_err(&pdev->dev, "Failed to map pci regions.\n");
+ dev_err(&pdev->dev, "Failed to request PCI regions.\n");
goto out_err;
}
i = 0;
for_each_set_bit(bar_nr, &bar_mask, PCI_STD_NUM_BARS) {
bar = &accel_pci_dev->pci_bars[i++];
- bar->virt_addr = pcim_iomap_table(pdev)[bar_nr];
+ bar->virt_addr = pcim_iomap(pdev, bar_nr, 0);
+ if (!bar->virt_addr) {
+ dev_err(&pdev->dev, "Failed to ioremap PCI region.\n");
+ ret = -ENOMEM;
+ goto out_err;
+ }
}
pci_set_master(pdev);
@@ -183,11 +188,19 @@ static void adf_remove(struct pci_dev *pdev)
adf_cleanup_accel(accel_dev);
}
+static void adf_shutdown(struct pci_dev *pdev)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+ adf_dev_down(accel_dev);
+}
+
static struct pci_driver adf_driver = {
.id_table = adf_pci_tbl,
.name = ADF_4XXX_DEVICE_NAME,
.probe = adf_probe,
.remove = adf_remove,
+ .shutdown = adf_shutdown,
.sriov_configure = adf_sriov_configure,
.err_handler = &adf_err_handler,
};
@@ -203,4 +216,4 @@ MODULE_FIRMWARE(ADF_402XX_MMP);
MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
MODULE_VERSION(ADF_DRV_VERSION);
MODULE_SOFTDEP("pre: crypto-intel_qat");
-MODULE_IMPORT_NS(CRYPTO_QAT);
+MODULE_IMPORT_NS("CRYPTO_QAT");
diff --git a/drivers/crypto/intel/qat/qat_6xxx/Makefile b/drivers/crypto/intel/qat/qat_6xxx/Makefile
new file mode 100644
index 000000000000..4b4de67cb0c2
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_6xxx/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_CRYPTO_DEV_QAT_6XXX) += qat_6xxx.o
+qat_6xxx-y := adf_drv.o adf_6xxx_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.c b/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.c
new file mode 100644
index 000000000000..bed88d3ce8ca
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.c
@@ -0,0 +1,950 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2025 Intel Corporation */
+#include <linux/array_size.h>
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/bits.h>
+#include <linux/iopoll.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+
+#include <adf_accel_devices.h>
+#include <adf_admin.h>
+#include <adf_bank_state.h>
+#include <adf_cfg.h>
+#include <adf_cfg_services.h>
+#include <adf_clock.h>
+#include <adf_common_drv.h>
+#include <adf_fw_config.h>
+#include <adf_gen6_pm.h>
+#include <adf_gen6_ras.h>
+#include <adf_gen6_shared.h>
+#include <adf_gen6_tl.h>
+#include <adf_timer.h>
+#include "adf_6xxx_hw_data.h"
+#include "icp_qat_fw_comp.h"
+#include "icp_qat_hw_51_comp.h"
+
+#define RP_GROUP_0_MASK (BIT(0) | BIT(2))
+#define RP_GROUP_1_MASK (BIT(1) | BIT(3))
+#define RP_GROUP_ALL_MASK (RP_GROUP_0_MASK | RP_GROUP_1_MASK)
+
+#define ADF_AE_GROUP_0 GENMASK(3, 0)
+#define ADF_AE_GROUP_1 GENMASK(7, 4)
+#define ADF_AE_GROUP_2 BIT(8)
+
+struct adf_ring_config {
+ u32 ring_mask;
+ enum adf_cfg_service_type ring_type;
+ const unsigned long *thrd_mask;
+};
+
+static u32 rmask_two_services[] = {
+ RP_GROUP_0_MASK,
+ RP_GROUP_1_MASK,
+};
+
+enum adf_gen6_rps {
+ RP0 = 0,
+ RP1 = 1,
+ RP2 = 2,
+ RP3 = 3,
+ RP_MAX = RP3
+};
+
+/*
+ * thrd_mask_[sym|asym|cpr|dcc]: these static arrays define the thread
+ * configuration for handling requests of specific services across the
+ * accelerator engines. Each element in an array corresponds to an
+ * accelerator engine, with the value being a bitmask that specifies which
+ * threads within that engine are capable of processing the particular service.
+ *
+ * For example, a value of 0x0C means that threads 2 and 3 are enabled for the
+ * service in the respective accelerator engine.
+ */
+static const unsigned long thrd_mask_sym[ADF_6XXX_MAX_ACCELENGINES] = {
+ 0x0C, 0x0C, 0x0C, 0x0C, 0x1C, 0x1C, 0x1C, 0x1C, 0x00
+};
+
+static const unsigned long thrd_mask_asym[ADF_6XXX_MAX_ACCELENGINES] = {
+ 0x70, 0x70, 0x70, 0x70, 0x60, 0x60, 0x60, 0x60, 0x00
+};
+
+static const unsigned long thrd_mask_cpr[ADF_6XXX_MAX_ACCELENGINES] = {
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00
+};
+
+static const unsigned long thrd_mask_dcc[ADF_6XXX_MAX_ACCELENGINES] = {
+ 0x00, 0x00, 0x00, 0x00, 0x07, 0x07, 0x03, 0x03, 0x00
+};
+
+static const unsigned long thrd_mask_dcpr[ADF_6XXX_MAX_ACCELENGINES] = {
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x00
+};
+
+static const char *const adf_6xxx_fw_objs[] = {
+ [ADF_FW_CY_OBJ] = ADF_6XXX_CY_OBJ,
+ [ADF_FW_DC_OBJ] = ADF_6XXX_DC_OBJ,
+ [ADF_FW_ADMIN_OBJ] = ADF_6XXX_ADMIN_OBJ,
+};
+
+static const struct adf_fw_config adf_default_fw_config[] = {
+ { ADF_AE_GROUP_1, ADF_FW_DC_OBJ },
+ { ADF_AE_GROUP_0, ADF_FW_CY_OBJ },
+ { ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ },
+};
+
+static struct adf_hw_device_class adf_6xxx_class = {
+ .name = ADF_6XXX_DEVICE_NAME,
+ .type = DEV_6XXX,
+};
+
+static bool services_supported(unsigned long mask)
+{
+ int num_svc;
+
+ if (mask >= BIT(SVC_COUNT))
+ return false;
+
+ num_svc = hweight_long(mask);
+ switch (num_svc) {
+ case ADF_ONE_SERVICE:
+ return true;
+ case ADF_TWO_SERVICES:
+ case ADF_THREE_SERVICES:
+ return !test_bit(SVC_DCC, &mask);
+ default:
+ return false;
+ }
+}
+
+static int get_service(unsigned long *mask)
+{
+ if (test_and_clear_bit(SVC_ASYM, mask))
+ return SVC_ASYM;
+
+ if (test_and_clear_bit(SVC_SYM, mask))
+ return SVC_SYM;
+
+ if (test_and_clear_bit(SVC_DC, mask))
+ return SVC_DC;
+
+ if (test_and_clear_bit(SVC_DCC, mask))
+ return SVC_DCC;
+
+ if (test_and_clear_bit(SVC_DECOMP, mask))
+ return SVC_DECOMP;
+
+ return -EINVAL;
+}
+
+static enum adf_cfg_service_type get_ring_type(unsigned int service)
+{
+ switch (service) {
+ case SVC_SYM:
+ return SYM;
+ case SVC_ASYM:
+ return ASYM;
+ case SVC_DC:
+ case SVC_DCC:
+ return COMP;
+ case SVC_DECOMP:
+ return DECOMP;
+ default:
+ return UNUSED;
+ }
+}
+
+static const unsigned long *get_thrd_mask(unsigned int service)
+{
+ switch (service) {
+ case SVC_SYM:
+ return thrd_mask_sym;
+ case SVC_ASYM:
+ return thrd_mask_asym;
+ case SVC_DC:
+ return thrd_mask_cpr;
+ case SVC_DCC:
+ return thrd_mask_dcc;
+ case SVC_DECOMP:
+ return thrd_mask_dcpr;
+ default:
+ return NULL;
+ }
+}
+
+static int get_rp_config(struct adf_accel_dev *accel_dev, struct adf_ring_config *rp_config,
+ unsigned int *num_services)
+{
+ unsigned int i, nservices;
+ unsigned long mask;
+ int ret, service;
+
+ ret = adf_get_service_mask(accel_dev, &mask);
+ if (ret)
+ return ret;
+
+ nservices = hweight_long(mask);
+ if (nservices > MAX_NUM_CONCURR_SVC)
+ return -EINVAL;
+
+ for (i = 0; i < nservices; i++) {
+ service = get_service(&mask);
+ if (service < 0)
+ return service;
+
+ rp_config[i].ring_type = get_ring_type(service);
+ rp_config[i].thrd_mask = get_thrd_mask(service);
+
+ /*
+ * If there is only one service enabled, use all ring pairs for
+ * that service.
+ * If there are two services enabled, use ring pairs 0 and 2 for
+ * one service and ring pairs 1 and 3 for the other service.
+ */
+ switch (nservices) {
+ case ADF_ONE_SERVICE:
+ rp_config[i].ring_mask = RP_GROUP_ALL_MASK;
+ break;
+ case ADF_TWO_SERVICES:
+ rp_config[i].ring_mask = rmask_two_services[i];
+ break;
+ case ADF_THREE_SERVICES:
+ rp_config[i].ring_mask = BIT(i);
+
+ /* If ASYM is enabled, use additional ring pair */
+ if (service == SVC_ASYM)
+ rp_config[i].ring_mask |= BIT(RP3);
+
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ *num_services = nservices;
+
+ return 0;
+}
+
+static u32 adf_gen6_get_arb_mask(struct adf_accel_dev *accel_dev, unsigned int ae)
+{
+ struct adf_ring_config rp_config[MAX_NUM_CONCURR_SVC];
+ unsigned int num_services, i, thrd;
+ u32 ring_mask, thd2arb_mask = 0;
+ const unsigned long *p_mask;
+
+ if (get_rp_config(accel_dev, rp_config, &num_services))
+ return 0;
+
+ /*
+ * The thd2arb_mask maps ring pairs to threads within an accelerator engine.
+ * It ensures that jobs submitted to ring pairs are scheduled on threads capable
+ * of handling the specified service type.
+ *
+ * Each group of 4 bits in the mask corresponds to a thread, with each bit
+ * indicating whether a job from a ring pair can be scheduled on that thread.
+ * The use of 4 bits is due to the organization of ring pairs into groups of
+ * four, where each group shares the same configuration.
+ */
+ for (i = 0; i < num_services; i++) {
+ p_mask = &rp_config[i].thrd_mask[ae];
+ ring_mask = rp_config[i].ring_mask;
+
+ for_each_set_bit(thrd, p_mask, ADF_NUM_THREADS_PER_AE)
+ thd2arb_mask |= ring_mask << (thrd * 4);
+ }
+
+ return thd2arb_mask;
+}
+
+static u16 get_ring_to_svc_map(struct adf_accel_dev *accel_dev)
+{
+ enum adf_cfg_service_type rps[ADF_GEN6_NUM_BANKS_PER_VF] = { };
+ struct adf_ring_config rp_config[MAX_NUM_CONCURR_SVC];
+ unsigned int num_services, rp_num, i;
+ unsigned long cfg_mask;
+ u16 ring_to_svc_map;
+
+ if (get_rp_config(accel_dev, rp_config, &num_services))
+ return 0;
+
+ /*
+ * Loop through the configured services and populate the `rps` array that
+ * contains what service that particular ring pair can handle (i.e. symmetric
+ * crypto, asymmetric crypto, data compression or compression chaining).
+ */
+ for (i = 0; i < num_services; i++) {
+ cfg_mask = rp_config[i].ring_mask;
+ for_each_set_bit(rp_num, &cfg_mask, ADF_GEN6_NUM_BANKS_PER_VF)
+ rps[rp_num] = rp_config[i].ring_type;
+ }
+
+ /*
+ * The ring_mask is structured into segments of 3 bits, with each
+ * segment representing the service configuration for a specific ring pair.
+ * Since ring pairs are organized into groups of 4, the ring_mask contains 4
+ * such 3-bit segments, each corresponding to one ring pair.
+ *
+ * The device has 64 ring pairs, which are organized in groups of 4, namely
+ * 16 groups. Each group has the same configuration, represented here by
+ * `ring_to_svc_map`.
+ */
+ ring_to_svc_map = rps[RP0] << ADF_CFG_SERV_RING_PAIR_0_SHIFT |
+ rps[RP1] << ADF_CFG_SERV_RING_PAIR_1_SHIFT |
+ rps[RP2] << ADF_CFG_SERV_RING_PAIR_2_SHIFT |
+ rps[RP3] << ADF_CFG_SERV_RING_PAIR_3_SHIFT;
+
+ return ring_to_svc_map;
+}
+
+static u32 get_accel_mask(struct adf_hw_device_data *self)
+{
+ return ADF_GEN6_ACCELERATORS_MASK;
+}
+
+static u32 get_num_accels(struct adf_hw_device_data *self)
+{
+ return ADF_GEN6_MAX_ACCELERATORS;
+}
+
+static u32 get_num_aes(struct adf_hw_device_data *self)
+{
+ return self ? hweight32(self->ae_mask) : 0;
+}
+
+static u32 get_misc_bar_id(struct adf_hw_device_data *self)
+{
+ return ADF_GEN6_PMISC_BAR;
+}
+
+static u32 get_etr_bar_id(struct adf_hw_device_data *self)
+{
+ return ADF_GEN6_ETR_BAR;
+}
+
+static u32 get_sram_bar_id(struct adf_hw_device_data *self)
+{
+ return ADF_GEN6_SRAM_BAR;
+}
+
+static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
+{
+ return DEV_SKU_1;
+}
+
+static void get_arb_info(struct arb_info *arb_info)
+{
+ arb_info->arb_cfg = ADF_GEN6_ARB_CONFIG;
+ arb_info->arb_offset = ADF_GEN6_ARB_OFFSET;
+ arb_info->wt2sam_offset = ADF_GEN6_ARB_WRK_2_SER_MAP_OFFSET;
+}
+
+static void get_admin_info(struct admin_info *admin_csrs_info)
+{
+ admin_csrs_info->mailbox_offset = ADF_GEN6_MAILBOX_BASE_OFFSET;
+ admin_csrs_info->admin_msg_ur = ADF_GEN6_ADMINMSGUR_OFFSET;
+ admin_csrs_info->admin_msg_lr = ADF_GEN6_ADMINMSGLR_OFFSET;
+}
+
+static u32 get_heartbeat_clock(struct adf_hw_device_data *self)
+{
+ return ADF_GEN6_COUNTER_FREQ;
+}
+
+static void enable_error_correction(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *csr = adf_get_pmisc_base(accel_dev);
+
+ /*
+ * Enable all error notification bits in errsou3 except VFLR
+ * notification on host.
+ */
+ ADF_CSR_WR(csr, ADF_GEN6_ERRMSK3, ADF_GEN6_VFLNOTIFY);
+}
+
+static void enable_ints(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *addr = adf_get_pmisc_base(accel_dev);
+
+ /* Enable bundle interrupts */
+ ADF_CSR_WR(addr, ADF_GEN6_SMIAPF_RP_X0_MASK_OFFSET, 0);
+ ADF_CSR_WR(addr, ADF_GEN6_SMIAPF_RP_X1_MASK_OFFSET, 0);
+
+ /* Enable misc interrupts */
+ ADF_CSR_WR(addr, ADF_GEN6_SMIAPF_MASK_OFFSET, 0);
+}
+
+static void set_ssm_wdtimer(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *addr = adf_get_pmisc_base(accel_dev);
+ u64 val_pke = ADF_SSM_WDT_PKE_DEFAULT_VALUE;
+ u64 val = ADF_SSM_WDT_DEFAULT_VALUE;
+
+ /* Enable watchdog timer for sym and dc */
+ ADF_CSR_WR64_LO_HI(addr, ADF_SSMWDTATHL_OFFSET, ADF_SSMWDTATHH_OFFSET, val);
+ ADF_CSR_WR64_LO_HI(addr, ADF_SSMWDTCNVL_OFFSET, ADF_SSMWDTCNVH_OFFSET, val);
+ ADF_CSR_WR64_LO_HI(addr, ADF_SSMWDTUCSL_OFFSET, ADF_SSMWDTUCSH_OFFSET, val);
+ ADF_CSR_WR64_LO_HI(addr, ADF_SSMWDTDCPRL_OFFSET, ADF_SSMWDTDCPRH_OFFSET, val);
+
+ /* Enable watchdog timer for pke */
+ ADF_CSR_WR64_LO_HI(addr, ADF_SSMWDTPKEL_OFFSET, ADF_SSMWDTPKEH_OFFSET, val_pke);
+}
+
+/*
+ * The vector routing table is used to select the MSI-X entry to use for each
+ * interrupt source.
+ * The first ADF_GEN6_ETR_MAX_BANKS entries correspond to ring interrupts.
+ * The final entry corresponds to VF2PF or error interrupts.
+ * This vector table could be used to configure one MSI-X entry to be shared
+ * between multiple interrupt sources.
+ *
+ * The default routing is set to have a one to one correspondence between the
+ * interrupt source and the MSI-X entry used.
+ */
+static void set_msix_default_rttable(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *csr = adf_get_pmisc_base(accel_dev);
+ unsigned int i;
+
+ for (i = 0; i <= ADF_GEN6_ETR_MAX_BANKS; i++)
+ ADF_CSR_WR(csr, ADF_GEN6_MSIX_RTTABLE_OFFSET(i), i);
+}
+
+static int reset_ring_pair(void __iomem *csr, u32 bank_number)
+{
+ u32 status;
+ int ret;
+
+ /*
+ * Write rpresetctl register BIT(0) as 1.
+ * Since rpresetctl registers have no RW fields, no need to preserve
+ * values for other bits. Just write directly.
+ */
+ ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETCTL(bank_number),
+ ADF_WQM_CSR_RPRESETCTL_RESET);
+
+ /* Read rpresetsts register and wait for rp reset to complete */
+ ret = read_poll_timeout(ADF_CSR_RD, status,
+ status & ADF_WQM_CSR_RPRESETSTS_STATUS,
+ ADF_RPRESET_POLL_DELAY_US,
+ ADF_RPRESET_POLL_TIMEOUT_US, true,
+ csr, ADF_WQM_CSR_RPRESETSTS(bank_number));
+ if (ret)
+ return ret;
+
+ /* When ring pair reset is done, clear rpresetsts */
+ ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETSTS(bank_number), ADF_WQM_CSR_RPRESETSTS_STATUS);
+
+ return 0;
+}
+
+static int ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ void __iomem *csr = adf_get_etr_base(accel_dev);
+ int ret;
+
+ if (bank_number >= hw_data->num_banks)
+ return -EINVAL;
+
+ dev_dbg(&GET_DEV(accel_dev), "ring pair reset for bank:%d\n", bank_number);
+
+ ret = reset_ring_pair(csr, bank_number);
+ if (ret)
+ dev_err(&GET_DEV(accel_dev), "ring pair reset failed (timeout)\n");
+ else
+ dev_dbg(&GET_DEV(accel_dev), "ring pair reset successful\n");
+
+ return ret;
+}
+
+static int build_comp_block(void *ctx, enum adf_dc_algo algo)
+{
+ struct icp_qat_fw_comp_req *req_tmpl = ctx;
+ struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
+ struct icp_qat_hw_comp_51_config_csr_lower hw_comp_lower_csr = { };
+ struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+ u32 lower_val;
+
+ switch (algo) {
+ case QAT_DEFLATE:
+ header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DYNAMIC;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ hw_comp_lower_csr.lllbd = ICP_QAT_HW_COMP_51_LLLBD_CTRL_LLLBD_DISABLED;
+ hw_comp_lower_csr.sd = ICP_QAT_HW_COMP_51_SEARCH_DEPTH_LEVEL_1;
+ lower_val = ICP_QAT_FW_COMP_51_BUILD_CONFIG_LOWER(hw_comp_lower_csr);
+ cd_pars->u.sl.comp_slice_cfg_word[0] = lower_val;
+ cd_pars->u.sl.comp_slice_cfg_word[1] = 0;
+
+ return 0;
+}
+
+static int build_decomp_block(void *ctx, enum adf_dc_algo algo)
+{
+ struct icp_qat_fw_comp_req *req_tmpl = ctx;
+ struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
+ struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+
+ switch (algo) {
+ case QAT_DEFLATE:
+ header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ cd_pars->u.sl.comp_slice_cfg_word[0] = 0;
+ cd_pars->u.sl.comp_slice_cfg_word[1] = 0;
+
+ return 0;
+}
+
+static void adf_gen6_init_dc_ops(struct adf_dc_ops *dc_ops)
+{
+ dc_ops->build_comp_block = build_comp_block;
+ dc_ops->build_decomp_block = build_decomp_block;
+}
+
+static int adf_gen6_init_thd2arb_map(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+ u32 *thd2arb_map = hw_data->thd_to_arb_map;
+ unsigned int i;
+
+ for (i = 0; i < hw_data->num_engines; i++) {
+ thd2arb_map[i] = adf_gen6_get_arb_mask(accel_dev, i);
+ dev_dbg(&GET_DEV(accel_dev), "ME:%d arb_mask:%#x\n", i, thd2arb_map[i]);
+ }
+
+ return 0;
+}
+
+static void init_num_svc_aes(struct adf_rl_hw_data *device_data)
+{
+ enum adf_fw_objs obj_type, obj_iter;
+ unsigned int svc, i, num_grp;
+ u32 ae_mask;
+
+ for (svc = 0; svc < SVC_BASE_COUNT; svc++) {
+ switch (svc) {
+ case SVC_SYM:
+ case SVC_ASYM:
+ obj_type = ADF_FW_CY_OBJ;
+ break;
+ case SVC_DC:
+ case SVC_DECOMP:
+ obj_type = ADF_FW_DC_OBJ;
+ break;
+ }
+
+ num_grp = ARRAY_SIZE(adf_default_fw_config);
+ for (i = 0; i < num_grp; i++) {
+ obj_iter = adf_default_fw_config[i].obj;
+ if (obj_iter == obj_type) {
+ ae_mask = adf_default_fw_config[i].ae_mask;
+ device_data->svc_ae_mask[svc] = hweight32(ae_mask);
+ break;
+ }
+ }
+ }
+}
+
+static u32 adf_gen6_get_svc_slice_cnt(struct adf_accel_dev *accel_dev,
+ enum adf_base_services svc)
+{
+ struct adf_rl_hw_data *device_data = &accel_dev->hw_device->rl_data;
+
+ switch (svc) {
+ case SVC_SYM:
+ return device_data->slices.cph_cnt;
+ case SVC_ASYM:
+ return device_data->slices.pke_cnt;
+ case SVC_DC:
+ return device_data->slices.cpr_cnt + device_data->slices.dcpr_cnt;
+ case SVC_DECOMP:
+ return device_data->slices.dcpr_cnt;
+ default:
+ return 0;
+ }
+}
+
+static void set_vc_csr_for_bank(void __iomem *csr, u32 bank_number)
+{
+ u32 value;
+
+ /*
+ * After each PF FLR, for each of the 64 ring pairs in the PF, the
+ * driver must program the ringmodectl CSRs.
+ */
+ value = ADF_CSR_RD(csr, ADF_GEN6_CSR_RINGMODECTL(bank_number));
+ FIELD_MODIFY(ADF_GEN6_RINGMODECTL_TC_MASK, &value, ADF_GEN6_RINGMODECTL_TC_DEFAULT);
+ FIELD_MODIFY(ADF_GEN6_RINGMODECTL_TC_EN_MASK, &value, ADF_GEN6_RINGMODECTL_TC_EN_OP1);
+ ADF_CSR_WR(csr, ADF_GEN6_CSR_RINGMODECTL(bank_number), value);
+}
+
+static int set_vc_config(struct adf_accel_dev *accel_dev)
+{
+ struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+ u32 value;
+ int err;
+
+ /*
+ * After each PF FLR, the driver must program the Port Virtual Channel (VC)
+ * Control Registers.
+ * Read PVC0CTL then write the masked values.
+ */
+ pci_read_config_dword(pdev, ADF_GEN6_PVC0CTL_OFFSET, &value);
+ FIELD_MODIFY(ADF_GEN6_PVC0CTL_TCVCMAP_MASK, &value, ADF_GEN6_PVC0CTL_TCVCMAP_DEFAULT);
+ err = pci_write_config_dword(pdev, ADF_GEN6_PVC0CTL_OFFSET, value);
+ if (err) {
+ dev_err(&GET_DEV(accel_dev), "pci write to PVC0CTL failed\n");
+ return pcibios_err_to_errno(err);
+ }
+
+ /* Read PVC1CTL then write masked values */
+ pci_read_config_dword(pdev, ADF_GEN6_PVC1CTL_OFFSET, &value);
+ FIELD_MODIFY(ADF_GEN6_PVC1CTL_TCVCMAP_MASK, &value, ADF_GEN6_PVC1CTL_TCVCMAP_DEFAULT);
+ FIELD_MODIFY(ADF_GEN6_PVC1CTL_VCEN_MASK, &value, ADF_GEN6_PVC1CTL_VCEN_ON);
+ err = pci_write_config_dword(pdev, ADF_GEN6_PVC1CTL_OFFSET, value);
+ if (err)
+ dev_err(&GET_DEV(accel_dev), "pci write to PVC1CTL failed\n");
+
+ return pcibios_err_to_errno(err);
+}
+
+static int adf_gen6_set_vc(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+ void __iomem *csr = adf_get_etr_base(accel_dev);
+ u32 i;
+
+ for (i = 0; i < hw_data->num_banks; i++) {
+ dev_dbg(&GET_DEV(accel_dev), "set virtual channels for bank:%d\n", i);
+ set_vc_csr_for_bank(csr, i);
+ }
+
+ return set_vc_config(accel_dev);
+}
+
+static u32 get_ae_mask(struct adf_hw_device_data *self)
+{
+ unsigned long fuses = self->fuses[ADF_FUSECTL4];
+ u32 mask = ADF_6XXX_ACCELENGINES_MASK;
+
+ /*
+ * If bit 0 is set in the fuses, the first 4 engines are disabled.
+ * If bit 4 is set, the second group of 4 engines are disabled.
+ * If bit 8 is set, the admin engine (bit 8) is disabled.
+ */
+ if (test_bit(0, &fuses))
+ mask &= ~ADF_AE_GROUP_0;
+
+ if (test_bit(4, &fuses))
+ mask &= ~ADF_AE_GROUP_1;
+
+ if (test_bit(8, &fuses))
+ mask &= ~ADF_AE_GROUP_2;
+
+ return mask;
+}
+
+static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
+{
+ u32 capabilities_sym, capabilities_asym;
+ u32 capabilities_dc;
+ unsigned long mask;
+ u32 caps = 0;
+ u32 fusectl1;
+
+ fusectl1 = GET_HW_DATA(accel_dev)->fuses[ADF_FUSECTL1];
+
+ /* Read accelerator capabilities mask */
+ capabilities_sym = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
+ ICP_ACCEL_CAPABILITIES_CIPHER |
+ ICP_ACCEL_CAPABILITIES_AUTHENTICATION |
+ ICP_ACCEL_CAPABILITIES_SHA3 |
+ ICP_ACCEL_CAPABILITIES_SHA3_EXT |
+ ICP_ACCEL_CAPABILITIES_CHACHA_POLY |
+ ICP_ACCEL_CAPABILITIES_AESGCM_SPC |
+ ICP_ACCEL_CAPABILITIES_AES_V2;
+
+ /* A set bit in fusectl1 means the corresponding feature is OFF in this SKU */
+ if (fusectl1 & ICP_ACCEL_GEN6_MASK_UCS_SLICE) {
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CHACHA_POLY;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AESGCM_SPC;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AES_V2;
+ }
+ if (fusectl1 & ICP_ACCEL_GEN6_MASK_AUTH_SLICE) {
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3_EXT;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+ }
+
+ capabilities_asym = ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
+ ICP_ACCEL_CAPABILITIES_SM2 |
+ ICP_ACCEL_CAPABILITIES_ECEDMONT;
+
+ if (fusectl1 & ICP_ACCEL_GEN6_MASK_PKE_SLICE) {
+ capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
+ capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_SM2;
+ capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_ECEDMONT;
+ }
+
+ capabilities_dc = ICP_ACCEL_CAPABILITIES_COMPRESSION |
+ ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION |
+ ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION |
+ ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
+
+ if (fusectl1 & ICP_ACCEL_GEN6_MASK_CPR_SLICE) {
+ capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
+ capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION;
+ capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION;
+ capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
+ }
+
+ if (adf_get_service_mask(accel_dev, &mask))
+ return 0;
+
+ if (test_bit(SVC_ASYM, &mask))
+ caps |= capabilities_asym;
+ if (test_bit(SVC_SYM, &mask))
+ caps |= capabilities_sym;
+ if (test_bit(SVC_DC, &mask) || test_bit(SVC_DECOMP, &mask))
+ caps |= capabilities_dc;
+ if (test_bit(SVC_DCC, &mask)) {
+ /*
+ * Sym capabilities are available for chaining operations,
+ * but sym crypto instances cannot be supported
+ */
+ caps = capabilities_dc | capabilities_sym;
+ caps &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
+ }
+
+ return caps;
+}
+
+static u32 uof_get_num_objs(struct adf_accel_dev *accel_dev)
+{
+ return ARRAY_SIZE(adf_default_fw_config);
+}
+
+static const char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num)
+{
+ int num_fw_objs = ARRAY_SIZE(adf_6xxx_fw_objs);
+ int id;
+
+ id = adf_default_fw_config[obj_num].obj;
+ if (id >= num_fw_objs)
+ return NULL;
+
+ return adf_6xxx_fw_objs[id];
+}
+
+static const char *uof_get_name_6xxx(struct adf_accel_dev *accel_dev, u32 obj_num)
+{
+ return uof_get_name(accel_dev, obj_num);
+}
+
+static int uof_get_obj_type(struct adf_accel_dev *accel_dev, u32 obj_num)
+{
+ if (obj_num >= uof_get_num_objs(accel_dev))
+ return -EINVAL;
+
+ return adf_default_fw_config[obj_num].obj;
+}
+
+static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num)
+{
+ return adf_default_fw_config[obj_num].ae_mask;
+}
+
+static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev)
+{
+ if (adf_gen6_init_thd2arb_map(accel_dev))
+ dev_warn(&GET_DEV(accel_dev),
+ "Failed to generate thread to arbiter mapping");
+
+ return GET_HW_DATA(accel_dev)->thd_to_arb_map;
+}
+
+static int adf_init_device(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *addr = adf_get_pmisc_base(accel_dev);
+ u32 status;
+ u32 csr;
+ int ret;
+
+ /* Temporarily mask PM interrupt */
+ csr = ADF_CSR_RD(addr, ADF_GEN6_ERRMSK2);
+ csr |= ADF_GEN6_PM_SOU;
+ ADF_CSR_WR(addr, ADF_GEN6_ERRMSK2, csr);
+
+ /* Set DRV_ACTIVE bit to power up the device */
+ ADF_CSR_WR(addr, ADF_GEN6_PM_INTERRUPT, ADF_GEN6_PM_DRV_ACTIVE);
+
+ /* Poll status register to make sure the device is powered up */
+ ret = read_poll_timeout(ADF_CSR_RD, status,
+ status & ADF_GEN6_PM_INIT_STATE,
+ ADF_GEN6_PM_POLL_DELAY_US,
+ ADF_GEN6_PM_POLL_TIMEOUT_US, true, addr,
+ ADF_GEN6_PM_STATUS);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev), "Failed to power up the device\n");
+ return ret;
+ }
+
+ dev_dbg(&GET_DEV(accel_dev), "Setting virtual channels for device qat_dev%d\n",
+ accel_dev->accel_id);
+
+ ret = adf_gen6_set_vc(accel_dev);
+ if (ret)
+ dev_err(&GET_DEV(accel_dev), "Failed to set virtual channels\n");
+
+ return ret;
+}
+
+static int enable_pm(struct adf_accel_dev *accel_dev)
+{
+ int ret;
+
+ ret = adf_init_admin_pm(accel_dev, ADF_GEN6_PM_DEFAULT_IDLE_FILTER);
+ if (ret)
+ return ret;
+
+ /* Initialize PM internal data */
+ adf_gen6_init_dev_pm_data(accel_dev);
+
+ return 0;
+}
+
+static int dev_config(struct adf_accel_dev *accel_dev)
+{
+ int ret;
+
+ ret = adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC);
+ if (ret)
+ return ret;
+
+ ret = adf_cfg_section_add(accel_dev, "Accelerator0");
+ if (ret)
+ return ret;
+
+ switch (adf_get_service_enabled(accel_dev)) {
+ case SVC_DC:
+ case SVC_DCC:
+ ret = adf_gen6_comp_dev_config(accel_dev);
+ break;
+ default:
+ ret = adf_gen6_no_dev_config(accel_dev);
+ break;
+ }
+ if (ret)
+ return ret;
+
+ __set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
+
+ return ret;
+}
+
+static void adf_gen6_init_rl_data(struct adf_rl_hw_data *rl_data)
+{
+ rl_data->pciout_tb_offset = ADF_GEN6_RL_TOKEN_PCIEOUT_BUCKET_OFFSET;
+ rl_data->pciin_tb_offset = ADF_GEN6_RL_TOKEN_PCIEIN_BUCKET_OFFSET;
+ rl_data->r2l_offset = ADF_GEN6_RL_R2L_OFFSET;
+ rl_data->l2c_offset = ADF_GEN6_RL_L2C_OFFSET;
+ rl_data->c2s_offset = ADF_GEN6_RL_C2S_OFFSET;
+ rl_data->pcie_scale_div = ADF_6XXX_RL_PCIE_SCALE_FACTOR_DIV;
+ rl_data->pcie_scale_mul = ADF_6XXX_RL_PCIE_SCALE_FACTOR_MUL;
+ rl_data->max_tp[SVC_ASYM] = ADF_6XXX_RL_MAX_TP_ASYM;
+ rl_data->max_tp[SVC_SYM] = ADF_6XXX_RL_MAX_TP_SYM;
+ rl_data->max_tp[SVC_DC] = ADF_6XXX_RL_MAX_TP_DC;
+ rl_data->max_tp[SVC_DECOMP] = ADF_6XXX_RL_MAX_TP_DECOMP;
+ rl_data->scan_interval = ADF_6XXX_RL_SCANS_PER_SEC;
+ rl_data->scale_ref = ADF_6XXX_RL_SLICE_REF;
+
+ init_num_svc_aes(rl_data);
+}
+
+void adf_init_hw_data_6xxx(struct adf_hw_device_data *hw_data)
+{
+ hw_data->dev_class = &adf_6xxx_class;
+ hw_data->instance_id = adf_6xxx_class.instances++;
+ hw_data->num_banks = ADF_GEN6_ETR_MAX_BANKS;
+ hw_data->num_banks_per_vf = ADF_GEN6_NUM_BANKS_PER_VF;
+ hw_data->num_rings_per_bank = ADF_GEN6_NUM_RINGS_PER_BANK;
+ hw_data->num_accel = ADF_GEN6_MAX_ACCELERATORS;
+ hw_data->num_engines = ADF_6XXX_MAX_ACCELENGINES;
+ hw_data->num_logical_accel = 1;
+ hw_data->tx_rx_gap = ADF_GEN6_RX_RINGS_OFFSET;
+ hw_data->tx_rings_mask = ADF_GEN6_TX_RINGS_MASK;
+ hw_data->ring_to_svc_map = 0;
+ hw_data->alloc_irq = adf_isr_resource_alloc;
+ hw_data->free_irq = adf_isr_resource_free;
+ hw_data->enable_error_correction = enable_error_correction;
+ hw_data->get_accel_mask = get_accel_mask;
+ hw_data->get_ae_mask = get_ae_mask;
+ hw_data->get_num_accels = get_num_accels;
+ hw_data->get_num_aes = get_num_aes;
+ hw_data->get_sram_bar_id = get_sram_bar_id;
+ hw_data->get_etr_bar_id = get_etr_bar_id;
+ hw_data->get_misc_bar_id = get_misc_bar_id;
+ hw_data->get_arb_info = get_arb_info;
+ hw_data->get_admin_info = get_admin_info;
+ hw_data->get_accel_cap = get_accel_cap;
+ hw_data->get_sku = get_sku;
+ hw_data->init_admin_comms = adf_init_admin_comms;
+ hw_data->exit_admin_comms = adf_exit_admin_comms;
+ hw_data->send_admin_init = adf_send_admin_init;
+ hw_data->init_arb = adf_init_arb;
+ hw_data->exit_arb = adf_exit_arb;
+ hw_data->get_arb_mapping = adf_get_arbiter_mapping;
+ hw_data->enable_ints = enable_ints;
+ hw_data->reset_device = adf_reset_flr;
+ hw_data->admin_ae_mask = ADF_6XXX_ADMIN_AE_MASK;
+ hw_data->fw_name = ADF_6XXX_FW;
+ hw_data->fw_mmp_name = ADF_6XXX_MMP;
+ hw_data->uof_get_name = uof_get_name_6xxx;
+ hw_data->uof_get_num_objs = uof_get_num_objs;
+ hw_data->uof_get_obj_type = uof_get_obj_type;
+ hw_data->uof_get_ae_mask = uof_get_ae_mask;
+ hw_data->set_msix_rttable = set_msix_default_rttable;
+ hw_data->set_ssm_wdtimer = set_ssm_wdtimer;
+ hw_data->get_ring_to_svc_map = get_ring_to_svc_map;
+ hw_data->disable_iov = adf_disable_sriov;
+ hw_data->ring_pair_reset = ring_pair_reset;
+ hw_data->dev_config = dev_config;
+ hw_data->bank_state_save = adf_bank_state_save;
+ hw_data->bank_state_restore = adf_bank_state_restore;
+ hw_data->get_hb_clock = get_heartbeat_clock;
+ hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE;
+ hw_data->start_timer = adf_timer_start;
+ hw_data->stop_timer = adf_timer_stop;
+ hw_data->init_device = adf_init_device;
+ hw_data->enable_pm = enable_pm;
+ hw_data->services_supported = services_supported;
+ hw_data->num_rps = ADF_GEN6_ETR_MAX_BANKS;
+ hw_data->clock_frequency = ADF_6XXX_AE_FREQ;
+ hw_data->get_svc_slice_cnt = adf_gen6_get_svc_slice_cnt;
+
+ adf_gen6_init_hw_csr_ops(&hw_data->csr_ops);
+ adf_gen6_init_pf_pfvf_ops(&hw_data->pfvf_ops);
+ adf_gen6_init_dc_ops(&hw_data->dc_ops);
+ adf_gen6_init_vf_mig_ops(&hw_data->vfmig_ops);
+ adf_gen6_init_ras_ops(&hw_data->ras_ops);
+ adf_gen6_init_tl_data(&hw_data->tl_data);
+ adf_gen6_init_rl_data(&hw_data->rl_data);
+}
+
+void adf_clean_hw_data_6xxx(struct adf_hw_device_data *hw_data)
+{
+ if (hw_data->dev_class->instances)
+ hw_data->dev_class->instances--;
+}
diff --git a/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.h b/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.h
new file mode 100644
index 000000000000..d822911fe68c
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.h
@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2025 Intel Corporation */
+#ifndef ADF_6XXX_HW_DATA_H_
+#define ADF_6XXX_HW_DATA_H_
+
+#include <linux/bits.h>
+#include <linux/time.h>
+#include <linux/units.h>
+
+#include "adf_accel_devices.h"
+#include "adf_cfg_common.h"
+#include "adf_dc.h"
+
+/* PCIe configuration space */
+#define ADF_GEN6_BAR_MASK (BIT(0) | BIT(2) | BIT(4))
+#define ADF_GEN6_SRAM_BAR 0
+#define ADF_GEN6_PMISC_BAR 1
+#define ADF_GEN6_ETR_BAR 2
+#define ADF_6XXX_MAX_ACCELENGINES 9
+
+/* Clocks frequency */
+#define ADF_GEN6_COUNTER_FREQ (100 * HZ_PER_MHZ)
+
+/* Physical function fuses */
+#define ADF_GEN6_FUSECTL0_OFFSET 0x2C8
+#define ADF_GEN6_FUSECTL1_OFFSET 0x2CC
+#define ADF_GEN6_FUSECTL4_OFFSET 0x2D8
+
+/* Accelerators */
+#define ADF_GEN6_ACCELERATORS_MASK 0x1
+#define ADF_GEN6_MAX_ACCELERATORS 1
+
+/* MSI-X interrupt */
+#define ADF_GEN6_SMIAPF_RP_X0_MASK_OFFSET 0x41A040
+#define ADF_GEN6_SMIAPF_RP_X1_MASK_OFFSET 0x41A044
+#define ADF_GEN6_SMIAPF_MASK_OFFSET 0x41A084
+#define ADF_GEN6_MSIX_RTTABLE_OFFSET(i) (0x409000 + ((i) * 4))
+
+/* Bank and ring configuration */
+#define ADF_GEN6_NUM_RINGS_PER_BANK 2
+#define ADF_GEN6_NUM_BANKS_PER_VF 4
+#define ADF_GEN6_ETR_MAX_BANKS 64
+#define ADF_GEN6_RX_RINGS_OFFSET 1
+#define ADF_GEN6_TX_RINGS_MASK 0x1
+
+/* Arbiter configuration */
+#define ADF_GEN6_ARB_CONFIG (BIT(31) | BIT(6) | BIT(0))
+#define ADF_GEN6_ARB_OFFSET 0x000
+#define ADF_GEN6_ARB_WRK_2_SER_MAP_OFFSET 0x400
+
+/* Admin interface configuration */
+#define ADF_GEN6_ADMINMSGUR_OFFSET 0x500574
+#define ADF_GEN6_ADMINMSGLR_OFFSET 0x500578
+#define ADF_GEN6_MAILBOX_BASE_OFFSET 0x600970
+
+/*
+ * Watchdog timers
+ * Timeout is in cycles. Clock speed may vary across products but this
+ * value should be a few milli-seconds.
+ */
+#define ADF_SSM_WDT_DEFAULT_VALUE 0x7000000ULL
+#define ADF_SSM_WDT_PKE_DEFAULT_VALUE 0x8000000ULL
+#define ADF_SSMWDTATHL_OFFSET 0x5208
+#define ADF_SSMWDTATHH_OFFSET 0x520C
+#define ADF_SSMWDTCNVL_OFFSET 0x5408
+#define ADF_SSMWDTCNVH_OFFSET 0x540C
+#define ADF_SSMWDTUCSL_OFFSET 0x5808
+#define ADF_SSMWDTUCSH_OFFSET 0x580C
+#define ADF_SSMWDTDCPRL_OFFSET 0x5A08
+#define ADF_SSMWDTDCPRH_OFFSET 0x5A0C
+#define ADF_SSMWDTPKEL_OFFSET 0x5E08
+#define ADF_SSMWDTPKEH_OFFSET 0x5E0C
+
+/* Ring reset */
+#define ADF_RPRESET_POLL_TIMEOUT_US (5 * USEC_PER_SEC)
+#define ADF_RPRESET_POLL_DELAY_US 20
+#define ADF_WQM_CSR_RPRESETCTL_RESET BIT(0)
+#define ADF_WQM_CSR_RPRESETCTL(bank) (0x6000 + (bank) * 8)
+#define ADF_WQM_CSR_RPRESETSTS_STATUS BIT(0)
+#define ADF_WQM_CSR_RPRESETSTS(bank) (ADF_WQM_CSR_RPRESETCTL(bank) + 4)
+
+/* Controls and sets up the corresponding ring mode of operation */
+#define ADF_GEN6_CSR_RINGMODECTL(bank) (0x9000 + (bank) * 4)
+
+/* Specifies the traffic class to use for the transactions to/from the ring */
+#define ADF_GEN6_RINGMODECTL_TC_MASK GENMASK(18, 16)
+#define ADF_GEN6_RINGMODECTL_TC_DEFAULT 0x7
+
+/* Specifies usage of tc for the transactions to/from this ring */
+#define ADF_GEN6_RINGMODECTL_TC_EN_MASK GENMASK(20, 19)
+
+/*
+ * Use the value programmed in the tc field for request descriptor
+ * and metadata read transactions
+ */
+#define ADF_GEN6_RINGMODECTL_TC_EN_OP1 0x1
+
+/* VC0 Resource Control Register */
+#define ADF_GEN6_PVC0CTL_OFFSET 0x204
+#define ADF_GEN6_PVC0CTL_TCVCMAP_OFFSET 1
+#define ADF_GEN6_PVC0CTL_TCVCMAP_MASK GENMASK(7, 1)
+#define ADF_GEN6_PVC0CTL_TCVCMAP_DEFAULT 0x3F
+
+/* VC1 Resource Control Register */
+#define ADF_GEN6_PVC1CTL_OFFSET 0x210
+#define ADF_GEN6_PVC1CTL_TCVCMAP_OFFSET 1
+#define ADF_GEN6_PVC1CTL_TCVCMAP_MASK GENMASK(7, 1)
+#define ADF_GEN6_PVC1CTL_TCVCMAP_DEFAULT 0x40
+#define ADF_GEN6_PVC1CTL_VCEN_OFFSET 31
+#define ADF_GEN6_PVC1CTL_VCEN_MASK BIT(31)
+/* RW bit: 0x1 - enables a Virtual Channel, 0x0 - disables */
+#define ADF_GEN6_PVC1CTL_VCEN_ON 0x1
+
+/* Error source mask registers */
+#define ADF_GEN6_ERRMSK0 0x41A210
+#define ADF_GEN6_ERRMSK1 0x41A214
+#define ADF_GEN6_ERRMSK2 0x41A218
+#define ADF_GEN6_ERRMSK3 0x41A21C
+
+#define ADF_GEN6_VFLNOTIFY BIT(7)
+
+/* Number of heartbeat counter pairs */
+#define ADF_NUM_HB_CNT_PER_AE ADF_NUM_THREADS_PER_AE
+
+/* Rate Limiting */
+#define ADF_GEN6_RL_R2L_OFFSET 0x508000
+#define ADF_GEN6_RL_L2C_OFFSET 0x509000
+#define ADF_GEN6_RL_C2S_OFFSET 0x508818
+#define ADF_GEN6_RL_TOKEN_PCIEIN_BUCKET_OFFSET 0x508800
+#define ADF_GEN6_RL_TOKEN_PCIEOUT_BUCKET_OFFSET 0x508804
+
+/* Physical function fuses */
+#define ADF_6XXX_ACCELENGINES_MASK GENMASK(8, 0)
+#define ADF_6XXX_ADMIN_AE_MASK GENMASK(8, 8)
+
+/* Firmware binaries */
+#define ADF_6XXX_FW "qat_6xxx.bin"
+#define ADF_6XXX_MMP "qat_6xxx_mmp.bin"
+#define ADF_6XXX_CY_OBJ "qat_6xxx_cy.bin"
+#define ADF_6XXX_DC_OBJ "qat_6xxx_dc.bin"
+#define ADF_6XXX_ADMIN_OBJ "qat_6xxx_admin.bin"
+
+/* RL constants */
+#define ADF_6XXX_RL_PCIE_SCALE_FACTOR_DIV 100
+#define ADF_6XXX_RL_PCIE_SCALE_FACTOR_MUL 102
+#define ADF_6XXX_RL_SCANS_PER_SEC 954
+#define ADF_6XXX_RL_MAX_TP_ASYM 173750UL
+#define ADF_6XXX_RL_MAX_TP_SYM 95000UL
+#define ADF_6XXX_RL_MAX_TP_DC 40000UL
+#define ADF_6XXX_RL_MAX_TP_DECOMP 40000UL
+#define ADF_6XXX_RL_SLICE_REF 1000UL
+
+/* Clock frequency */
+#define ADF_6XXX_AE_FREQ (1000 * HZ_PER_MHZ)
+
+enum icp_qat_gen6_slice_mask {
+ ICP_ACCEL_GEN6_MASK_UCS_SLICE = BIT(0),
+ ICP_ACCEL_GEN6_MASK_AUTH_SLICE = BIT(1),
+ ICP_ACCEL_GEN6_MASK_PKE_SLICE = BIT(2),
+ ICP_ACCEL_GEN6_MASK_CPR_SLICE = BIT(3),
+ ICP_ACCEL_GEN6_MASK_DCPRZ_SLICE = BIT(4),
+ ICP_ACCEL_GEN6_MASK_WCP_WAT_SLICE = BIT(6),
+};
+
+void adf_init_hw_data_6xxx(struct adf_hw_device_data *hw_data);
+void adf_clean_hw_data_6xxx(struct adf_hw_device_data *hw_data);
+
+#endif /* ADF_6XXX_HW_DATA_H_ */
diff --git a/drivers/crypto/intel/qat/qat_6xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_6xxx/adf_drv.c
new file mode 100644
index 000000000000..c1dc9c56fdf5
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_6xxx/adf_drv.c
@@ -0,0 +1,226 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2025 Intel Corporation */
+#include <linux/array_size.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+
+#include <adf_accel_devices.h>
+#include <adf_cfg.h>
+#include <adf_common_drv.h>
+#include <adf_dbgfs.h>
+
+#include "adf_gen6_shared.h"
+#include "adf_6xxx_hw_data.h"
+
+static int bar_map[] = {
+ 0, /* SRAM */
+ 2, /* PMISC */
+ 4, /* ETR */
+};
+
+static void adf_device_down(void *accel_dev)
+{
+ adf_dev_down(accel_dev);
+}
+
+static void adf_dbgfs_cleanup(void *accel_dev)
+{
+ adf_dbgfs_exit(accel_dev);
+}
+
+static void adf_cfg_device_remove(void *accel_dev)
+{
+ adf_cfg_dev_remove(accel_dev);
+}
+
+static void adf_cleanup_hw_data(void *accel_dev)
+{
+ struct adf_accel_dev *accel_device = accel_dev;
+
+ if (accel_device->hw_device) {
+ adf_clean_hw_data_6xxx(accel_device->hw_device);
+ accel_device->hw_device = NULL;
+ }
+}
+
+static void adf_devmgr_remove(void *accel_dev)
+{
+ adf_devmgr_rm_dev(accel_dev, NULL);
+}
+
+static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct adf_accel_pci *accel_pci_dev;
+ struct adf_hw_device_data *hw_data;
+ struct device *dev = &pdev->dev;
+ struct adf_accel_dev *accel_dev;
+ struct adf_bar *bar;
+ unsigned int i;
+ int ret;
+
+ if (num_possible_nodes() > 1 && dev_to_node(dev) < 0) {
+ /*
+ * If the accelerator is connected to a node with no memory
+ * there is no point in using the accelerator since the remote
+ * memory transaction will be very slow.
+ */
+ return dev_err_probe(dev, -EINVAL, "Invalid NUMA configuration.\n");
+ }
+
+ accel_dev = devm_kzalloc(dev, sizeof(*accel_dev), GFP_KERNEL);
+ if (!accel_dev)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&accel_dev->crypto_list);
+ INIT_LIST_HEAD(&accel_dev->list);
+ accel_pci_dev = &accel_dev->accel_pci_dev;
+ accel_pci_dev->pci_dev = pdev;
+ accel_dev->owner = THIS_MODULE;
+
+ hw_data = devm_kzalloc(dev, sizeof(*hw_data), GFP_KERNEL);
+ if (!hw_data)
+ return -ENOMEM;
+
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
+ pci_read_config_dword(pdev, ADF_GEN6_FUSECTL4_OFFSET, &hw_data->fuses[ADF_FUSECTL4]);
+ pci_read_config_dword(pdev, ADF_GEN6_FUSECTL0_OFFSET, &hw_data->fuses[ADF_FUSECTL0]);
+ pci_read_config_dword(pdev, ADF_GEN6_FUSECTL1_OFFSET, &hw_data->fuses[ADF_FUSECTL1]);
+
+ if (!(hw_data->fuses[ADF_FUSECTL1] & ICP_ACCEL_GEN6_MASK_WCP_WAT_SLICE))
+ return dev_err_probe(dev, -EFAULT, "Wireless mode is not supported.\n");
+
+ /* Enable PCI device */
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Cannot enable PCI device.\n");
+
+ ret = adf_devmgr_add_dev(accel_dev, NULL);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add new accelerator device.\n");
+
+ ret = devm_add_action_or_reset(dev, adf_devmgr_remove, accel_dev);
+ if (ret)
+ return ret;
+
+ accel_dev->hw_device = hw_data;
+ adf_init_hw_data_6xxx(accel_dev->hw_device);
+
+ ret = devm_add_action_or_reset(dev, adf_cleanup_hw_data, accel_dev);
+ if (ret)
+ return ret;
+
+ /* Get Accelerators and Accelerator Engine masks */
+ hw_data->accel_mask = hw_data->get_accel_mask(hw_data);
+ hw_data->ae_mask = hw_data->get_ae_mask(hw_data);
+ accel_pci_dev->sku = hw_data->get_sku(hw_data);
+
+ /* If the device has no acceleration engines then ignore it */
+ if (!hw_data->accel_mask || !hw_data->ae_mask ||
+ (~hw_data->ae_mask & ADF_GEN6_ACCELERATORS_MASK)) {
+ ret = -EFAULT;
+ return dev_err_probe(dev, ret, "No acceleration units were found.\n");
+ }
+
+ /* Create device configuration table */
+ ret = adf_cfg_dev_add(accel_dev);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, adf_cfg_device_remove, accel_dev);
+ if (ret)
+ return ret;
+
+ /* Set DMA identifier */
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ if (ret)
+ return dev_err_probe(dev, ret, "No usable DMA configuration.\n");
+
+ ret = adf_gen6_cfg_dev_init(accel_dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to initialize configuration.\n");
+
+ /* Get accelerator capability mask */
+ hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev);
+ if (!hw_data->accel_capabilities_mask) {
+ ret = -EINVAL;
+ return dev_err_probe(dev, ret, "Failed to get capabilities mask.\n");
+ }
+
+ for (i = 0; i < ARRAY_SIZE(bar_map); i++) {
+ bar = &accel_pci_dev->pci_bars[i];
+
+ /* Map 64-bit PCIe BAR */
+ bar->virt_addr = pcim_iomap_region(pdev, bar_map[i], pci_name(pdev));
+ if (IS_ERR(bar->virt_addr)) {
+ ret = PTR_ERR(bar->virt_addr);
+ return dev_err_probe(dev, ret, "Failed to ioremap PCI region.\n");
+ }
+ }
+
+ pci_set_master(pdev);
+
+ /*
+ * The PCI config space is saved at this point and will be restored
+ * after a Function Level Reset (FLR) as the FLR does not completely
+ * restore it.
+ */
+ ret = pci_save_state(pdev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to save pci state.\n");
+
+ accel_dev->ras_errors.enabled = true;
+
+ adf_dbgfs_init(accel_dev);
+
+ ret = devm_add_action_or_reset(dev, adf_dbgfs_cleanup, accel_dev);
+ if (ret)
+ return ret;
+
+ ret = adf_dev_up(accel_dev, true);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, adf_device_down, accel_dev);
+ if (ret)
+ return ret;
+
+ ret = adf_sysfs_init(accel_dev);
+
+ return ret;
+}
+
+static void adf_shutdown(struct pci_dev *pdev)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+ adf_dev_down(accel_dev);
+}
+
+static const struct pci_device_id adf_pci_tbl[] = {
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_6XXX) },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static struct pci_driver adf_driver = {
+ .id_table = adf_pci_tbl,
+ .name = ADF_6XXX_DEVICE_NAME,
+ .probe = adf_probe,
+ .shutdown = adf_shutdown,
+ .sriov_configure = adf_sriov_configure,
+ .err_handler = &adf_err_handler,
+};
+module_pci_driver(adf_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Intel");
+MODULE_FIRMWARE(ADF_6XXX_FW);
+MODULE_FIRMWARE(ADF_6XXX_MMP);
+MODULE_DESCRIPTION("Intel(R) QuickAssist Technology for GEN6 Devices");
+MODULE_SOFTDEP("pre: crypto-intel_qat");
+MODULE_IMPORT_NS("CRYPTO_QAT");
diff --git a/drivers/crypto/intel/qat/qat_c3xxx/Makefile b/drivers/crypto/intel/qat/qat_c3xxx/Makefile
index 7a06ad519bfc..43604c025f0c 100644
--- a/drivers/crypto/intel/qat/qat_c3xxx/Makefile
+++ b/drivers/crypto/intel/qat/qat_c3xxx/Makefile
@@ -1,4 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y := -I $(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXX) += qat_c3xxx.o
-qat_c3xxx-objs := adf_drv.o adf_c3xxx_hw_data.o
+qat_c3xxx-y := adf_drv.o adf_c3xxx_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c
index 201f9412c582..07f2c42a68f5 100644
--- a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c
@@ -5,7 +5,6 @@
#include <adf_clock.h>
#include <adf_common_drv.h>
#include <adf_gen2_config.h>
-#include <adf_gen2_dc.h>
#include <adf_gen2_hw_csr_data.h>
#include <adf_gen2_hw_data.h>
#include <adf_gen2_pfvf.h>
@@ -22,13 +21,12 @@ static const u32 thrd_to_arb_map[ADF_C3XXX_MAX_ACCELENGINES] = {
static struct adf_hw_device_class c3xxx_class = {
.name = ADF_C3XXX_DEVICE_NAME,
.type = DEV_C3XXX,
- .instances = 0
};
static u32 get_accel_mask(struct adf_hw_device_data *self)
{
+ u32 fuses = self->fuses[ADF_FUSECTL0];
u32 straps = self->straps;
- u32 fuses = self->fuses;
u32 accel;
accel = ~(fuses | straps) >> ADF_C3XXX_ACCELERATORS_REG_OFFSET;
@@ -39,8 +37,8 @@ static u32 get_accel_mask(struct adf_hw_device_data *self)
static u32 get_ae_mask(struct adf_hw_device_data *self)
{
+ u32 fuses = self->fuses[ADF_FUSECTL0];
u32 straps = self->straps;
- u32 fuses = self->fuses;
unsigned long disabled;
u32 ae_disable;
int accel;
diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c
index 4d18057745d4..bceb5dd8b148 100644
--- a/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c
@@ -19,24 +19,6 @@
#include <adf_dbgfs.h>
#include "adf_c3xxx_hw_data.h"
-static const struct pci_device_id adf_pci_tbl[] = {
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C3XXX), },
- { }
-};
-MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
-
-static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
-static void adf_remove(struct pci_dev *dev);
-
-static struct pci_driver adf_driver = {
- .id_table = adf_pci_tbl,
- .name = ADF_C3XXX_DEVICE_NAME,
- .probe = adf_probe,
- .remove = adf_remove,
- .sriov_configure = adf_sriov_configure,
- .err_handler = &adf_err_handler,
-};
-
static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
{
pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
@@ -126,7 +108,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adf_init_hw_data_c3xxx(accel_dev->hw_device);
pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
pci_read_config_dword(pdev, ADF_DEVICE_FUSECTL_OFFSET,
- &hw_data->fuses);
+ &hw_data->fuses[ADF_FUSECTL0]);
pci_read_config_dword(pdev, ADF_C3XXX_SOFTSTRAP_CSR_OFFSET,
&hw_data->straps);
@@ -227,6 +209,29 @@ static void adf_remove(struct pci_dev *pdev)
kfree(accel_dev);
}
+static void adf_shutdown(struct pci_dev *pdev)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+ adf_dev_down(accel_dev);
+}
+
+static const struct pci_device_id adf_pci_tbl[] = {
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C3XXX) },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static struct pci_driver adf_driver = {
+ .id_table = adf_pci_tbl,
+ .name = ADF_C3XXX_DEVICE_NAME,
+ .probe = adf_probe,
+ .remove = adf_remove,
+ .shutdown = adf_shutdown,
+ .sriov_configure = adf_sriov_configure,
+ .err_handler = &adf_err_handler,
+};
+
static int __init adfdrv_init(void)
{
request_module("intel_qat");
@@ -252,4 +257,4 @@ MODULE_FIRMWARE(ADF_C3XXX_FW);
MODULE_FIRMWARE(ADF_C3XXX_MMP);
MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
MODULE_VERSION(ADF_DRV_VERSION);
-MODULE_IMPORT_NS(CRYPTO_QAT);
+MODULE_IMPORT_NS("CRYPTO_QAT");
diff --git a/drivers/crypto/intel/qat/qat_c3xxxvf/Makefile b/drivers/crypto/intel/qat/qat_c3xxxvf/Makefile
index 7ef633058c4f..03f6745b4aa2 100644
--- a/drivers/crypto/intel/qat/qat_c3xxxvf/Makefile
+++ b/drivers/crypto/intel/qat/qat_c3xxxvf/Makefile
@@ -1,4 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y := -I $(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXXVF) += qat_c3xxxvf.o
-qat_c3xxxvf-objs := adf_drv.o adf_c3xxxvf_hw_data.o
+qat_c3xxxvf-y := adf_drv.o adf_c3xxxvf_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
index a512ca4efd3f..db3c33fa1881 100644
--- a/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
@@ -3,7 +3,6 @@
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
#include <adf_gen2_config.h>
-#include <adf_gen2_dc.h>
#include <adf_gen2_hw_csr_data.h>
#include <adf_gen2_hw_data.h>
#include <adf_gen2_pfvf.h>
@@ -13,7 +12,6 @@
static struct adf_hw_device_class c3xxxiov_class = {
.name = ADF_C3XXXVF_DEVICE_NAME,
.type = DEV_C3XXXVF,
- .instances = 0
};
static u32 get_accel_mask(struct adf_hw_device_data *self)
diff --git a/drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c
index f0023cfb234c..c622793e94a8 100644
--- a/drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c
@@ -226,4 +226,4 @@ MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Intel");
MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
MODULE_VERSION(ADF_DRV_VERSION);
-MODULE_IMPORT_NS(CRYPTO_QAT);
+MODULE_IMPORT_NS("CRYPTO_QAT");
diff --git a/drivers/crypto/intel/qat/qat_c62x/Makefile b/drivers/crypto/intel/qat/qat_c62x/Makefile
index cc9255b3b198..f3d722bef088 100644
--- a/drivers/crypto/intel/qat/qat_c62x/Makefile
+++ b/drivers/crypto/intel/qat/qat_c62x/Makefile
@@ -1,4 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y := -I $(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_C62X) += qat_c62x.o
-qat_c62x-objs := adf_drv.o adf_c62x_hw_data.o
+qat_c62x-y := adf_drv.o adf_c62x_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c
index 6b5b0cf9c7c7..0b410b41474d 100644
--- a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c
@@ -5,7 +5,6 @@
#include <adf_clock.h>
#include <adf_common_drv.h>
#include <adf_gen2_config.h>
-#include <adf_gen2_dc.h>
#include <adf_gen2_hw_csr_data.h>
#include <adf_gen2_hw_data.h>
#include <adf_gen2_pfvf.h>
@@ -22,13 +21,12 @@ static const u32 thrd_to_arb_map[ADF_C62X_MAX_ACCELENGINES] = {
static struct adf_hw_device_class c62x_class = {
.name = ADF_C62X_DEVICE_NAME,
.type = DEV_C62X,
- .instances = 0
};
static u32 get_accel_mask(struct adf_hw_device_data *self)
{
+ u32 fuses = self->fuses[ADF_FUSECTL0];
u32 straps = self->straps;
- u32 fuses = self->fuses;
u32 accel;
accel = ~(fuses | straps) >> ADF_C62X_ACCELERATORS_REG_OFFSET;
@@ -39,8 +37,8 @@ static u32 get_accel_mask(struct adf_hw_device_data *self)
static u32 get_ae_mask(struct adf_hw_device_data *self)
{
+ u32 fuses = self->fuses[ADF_FUSECTL0];
u32 straps = self->straps;
- u32 fuses = self->fuses;
unsigned long disabled;
u32 ae_disable;
int accel;
diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_drv.c b/drivers/crypto/intel/qat/qat_c62x/adf_drv.c
index e6b5de55434e..23ccb72b6ea2 100644
--- a/drivers/crypto/intel/qat/qat_c62x/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_c62x/adf_drv.c
@@ -19,24 +19,6 @@
#include <adf_dbgfs.h>
#include "adf_c62x_hw_data.h"
-static const struct pci_device_id adf_pci_tbl[] = {
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C62X), },
- { }
-};
-MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
-
-static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
-static void adf_remove(struct pci_dev *dev);
-
-static struct pci_driver adf_driver = {
- .id_table = adf_pci_tbl,
- .name = ADF_C62X_DEVICE_NAME,
- .probe = adf_probe,
- .remove = adf_remove,
- .sriov_configure = adf_sriov_configure,
- .err_handler = &adf_err_handler,
-};
-
static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
{
pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
@@ -126,7 +108,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adf_init_hw_data_c62x(accel_dev->hw_device);
pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
pci_read_config_dword(pdev, ADF_DEVICE_FUSECTL_OFFSET,
- &hw_data->fuses);
+ &hw_data->fuses[ADF_FUSECTL0]);
pci_read_config_dword(pdev, ADF_C62X_SOFTSTRAP_CSR_OFFSET,
&hw_data->straps);
@@ -169,7 +151,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev);
/* Find and map all the device's BARS */
- i = (hw_data->fuses & ADF_DEVICE_FUSECTL_MASK) ? 1 : 0;
+ i = (hw_data->fuses[ADF_FUSECTL0] & ADF_DEVICE_FUSECTL_MASK) ? 1 : 0;
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
@@ -227,6 +209,29 @@ static void adf_remove(struct pci_dev *pdev)
kfree(accel_dev);
}
+static void adf_shutdown(struct pci_dev *pdev)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+ adf_dev_down(accel_dev);
+}
+
+static const struct pci_device_id adf_pci_tbl[] = {
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C62X) },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static struct pci_driver adf_driver = {
+ .id_table = adf_pci_tbl,
+ .name = ADF_C62X_DEVICE_NAME,
+ .probe = adf_probe,
+ .remove = adf_remove,
+ .shutdown = adf_shutdown,
+ .sriov_configure = adf_sriov_configure,
+ .err_handler = &adf_err_handler,
+};
+
static int __init adfdrv_init(void)
{
request_module("intel_qat");
@@ -252,4 +257,4 @@ MODULE_FIRMWARE(ADF_C62X_FW);
MODULE_FIRMWARE(ADF_C62X_MMP);
MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
MODULE_VERSION(ADF_DRV_VERSION);
-MODULE_IMPORT_NS(CRYPTO_QAT);
+MODULE_IMPORT_NS("CRYPTO_QAT");
diff --git a/drivers/crypto/intel/qat/qat_c62xvf/Makefile b/drivers/crypto/intel/qat/qat_c62xvf/Makefile
index 256786662d60..ed7f3f722d99 100644
--- a/drivers/crypto/intel/qat/qat_c62xvf/Makefile
+++ b/drivers/crypto/intel/qat/qat_c62xvf/Makefile
@@ -1,4 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y := -I $(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_C62XVF) += qat_c62xvf.o
-qat_c62xvf-objs := adf_drv.o adf_c62xvf_hw_data.o
+qat_c62xvf-y := adf_drv.o adf_c62xvf_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c b/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c
index 4aaaaf921734..7f00035d3661 100644
--- a/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c
@@ -3,7 +3,6 @@
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
#include <adf_gen2_config.h>
-#include <adf_gen2_dc.h>
#include <adf_gen2_hw_csr_data.h>
#include <adf_gen2_hw_data.h>
#include <adf_gen2_pfvf.h>
@@ -13,7 +12,6 @@
static struct adf_hw_device_class c62xiov_class = {
.name = ADF_C62XVF_DEVICE_NAME,
.type = DEV_C62XVF,
- .instances = 0
};
static u32 get_accel_mask(struct adf_hw_device_data *self)
diff --git a/drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c
index 2bd5b0ff00e3..4840d44bbd5b 100644
--- a/drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c
@@ -226,4 +226,4 @@ MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Intel");
MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
MODULE_VERSION(ADF_DRV_VERSION);
-MODULE_IMPORT_NS(CRYPTO_QAT);
+MODULE_IMPORT_NS("CRYPTO_QAT");
diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile
index eac73cbfdd38..89845754841b 100644
--- a/drivers/crypto/intel/qat/qat_common/Makefile
+++ b/drivers/crypto/intel/qat/qat_common/Makefile
@@ -1,62 +1,67 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o
-ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=CRYPTO_QAT
-intel_qat-objs := adf_cfg.o \
- adf_isr.o \
- adf_ctl_drv.o \
+ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE='"CRYPTO_QAT"'
+intel_qat-y := adf_accel_engine.o \
+ adf_admin.o \
+ adf_aer.o \
+ adf_bank_state.o \
+ adf_cfg.o \
adf_cfg_services.o \
+ adf_clock.o \
+ adf_ctl_drv.o \
+ adf_dc.o \
adf_dev_mgr.o \
- adf_init.o \
- adf_accel_engine.o \
- adf_aer.o \
- adf_transport.o \
- adf_admin.o \
- adf_hw_arbiter.o \
- adf_sysfs.o \
- adf_sysfs_ras_counters.o \
+ adf_gen2_config.o \
adf_gen2_hw_csr_data.o \
adf_gen2_hw_data.o \
- adf_gen2_config.o \
adf_gen4_config.o \
adf_gen4_hw_csr_data.o \
adf_gen4_hw_data.o \
- adf_gen4_vf_mig.o \
adf_gen4_pm.o \
- adf_gen2_dc.o \
- adf_gen4_dc.o \
adf_gen4_ras.o \
- adf_gen4_timer.o \
- adf_clock.o \
+ adf_gen4_vf_mig.o \
+ adf_gen6_ras.o \
+ adf_gen6_shared.o \
+ adf_hw_arbiter.o \
+ adf_init.o \
+ adf_isr.o \
adf_mstate_mgr.o \
- qat_crypto.o \
- qat_compression.o \
- qat_comp_algs.o \
- qat_algs.o \
- qat_asym_algs.o \
- qat_algs_send.o \
- adf_rl.o \
adf_rl_admin.o \
+ adf_rl.o \
+ adf_sysfs.o \
+ adf_sysfs_ras_counters.o \
adf_sysfs_rl.o \
- qat_uclo.o \
- qat_hal.o \
+ adf_timer.o \
+ adf_transport.o \
+ qat_algs.o \
+ qat_algs_send.o \
+ qat_asym_algs.o \
qat_bl.o \
- qat_mig_dev.o
+ qat_comp_algs.o \
+ qat_compression.o \
+ qat_crypto.o \
+ qat_hal.o \
+ qat_mig_dev.o \
+ qat_uclo.o
-intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o \
+intel_qat-$(CONFIG_DEBUG_FS) += adf_cnv_dbgfs.o \
+ adf_dbgfs.o \
adf_fw_counters.o \
- adf_cnv_dbgfs.o \
adf_gen4_pm_debugfs.o \
adf_gen4_tl.o \
- adf_heartbeat.o \
+ adf_gen6_pm_dbgfs.o \
+ adf_gen6_tl.o \
adf_heartbeat_dbgfs.o \
+ adf_heartbeat.o \
adf_pm_dbgfs.o \
+ adf_pm_dbgfs_utils.o \
adf_telemetry.o \
adf_tl_debugfs.o \
- adf_dbgfs.o
+ adf_transport_debug.o
-intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_vf_isr.o adf_pfvf_utils.o \
+intel_qat-$(CONFIG_PCI_IOV) += adf_gen2_pfvf.o adf_gen4_pfvf.o \
adf_pfvf_pf_msg.o adf_pfvf_pf_proto.o \
- adf_pfvf_vf_msg.o adf_pfvf_vf_proto.o \
- adf_gen2_pfvf.o adf_gen4_pfvf.o
+ adf_pfvf_utils.o adf_pfvf_vf_msg.o \
+ adf_pfvf_vf_proto.o adf_sriov.o adf_vf_isr.o
intel_qat-$(CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION) += adf_heartbeat_inject.o
diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
index 7830ecb1a1f1..9fe3239f0114 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
@@ -10,7 +10,9 @@
#include <linux/ratelimit.h>
#include <linux/types.h>
#include <linux/qat/qat_mig_dev.h>
+#include <linux/wordpart.h>
#include "adf_cfg_common.h"
+#include "adf_dc.h"
#include "adf_rl.h"
#include "adf_telemetry.h"
#include "adf_pfvf_msg.h"
@@ -24,14 +26,18 @@
#define ADF_C3XXXVF_DEVICE_NAME "c3xxxvf"
#define ADF_4XXX_DEVICE_NAME "4xxx"
#define ADF_420XX_DEVICE_NAME "420xx"
-#define ADF_4XXX_PCI_DEVICE_ID 0x4940
-#define ADF_4XXXIOV_PCI_DEVICE_ID 0x4941
-#define ADF_401XX_PCI_DEVICE_ID 0x4942
-#define ADF_401XXIOV_PCI_DEVICE_ID 0x4943
-#define ADF_402XX_PCI_DEVICE_ID 0x4944
-#define ADF_402XXIOV_PCI_DEVICE_ID 0x4945
-#define ADF_420XX_PCI_DEVICE_ID 0x4946
-#define ADF_420XXIOV_PCI_DEVICE_ID 0x4947
+#define ADF_6XXX_DEVICE_NAME "6xxx"
+#define PCI_DEVICE_ID_INTEL_QAT_4XXX 0x4940
+#define PCI_DEVICE_ID_INTEL_QAT_4XXXIOV 0x4941
+#define PCI_DEVICE_ID_INTEL_QAT_401XX 0x4942
+#define PCI_DEVICE_ID_INTEL_QAT_401XXIOV 0x4943
+#define PCI_DEVICE_ID_INTEL_QAT_402XX 0x4944
+#define PCI_DEVICE_ID_INTEL_QAT_402XXIOV 0x4945
+#define PCI_DEVICE_ID_INTEL_QAT_420XX 0x4946
+#define PCI_DEVICE_ID_INTEL_QAT_420XXIOV 0x4947
+#define PCI_DEVICE_ID_INTEL_QAT_6XXX 0x4948
+#define PCI_DEVICE_ID_INTEL_QAT_6XXX_IOV 0x4949
+
#define ADF_DEVICE_FUSECTL_OFFSET 0x40
#define ADF_DEVICE_LEGFUSE_OFFSET 0x4C
#define ADF_DEVICE_FUSECTL_MASK 0x80000000
@@ -52,6 +58,16 @@ enum adf_accel_capabilities {
ADF_ACCEL_CAPABILITIES_RANDOM_NUMBER = 128
};
+enum adf_fuses {
+ ADF_FUSECTL0,
+ ADF_FUSECTL1,
+ ADF_FUSECTL2,
+ ADF_FUSECTL3,
+ ADF_FUSECTL4,
+ ADF_FUSECTL5,
+ ADF_MAX_FUSES
+};
+
struct adf_bar {
resource_size_t base_addr;
void __iomem *virt_addr;
@@ -141,39 +157,7 @@ struct admin_info {
u32 mailbox_offset;
};
-struct ring_config {
- u64 base;
- u32 config;
- u32 head;
- u32 tail;
- u32 reserved0;
-};
-
-struct bank_state {
- u32 ringstat0;
- u32 ringstat1;
- u32 ringuostat;
- u32 ringestat;
- u32 ringnestat;
- u32 ringnfstat;
- u32 ringfstat;
- u32 ringcstat0;
- u32 ringcstat1;
- u32 ringcstat2;
- u32 ringcstat3;
- u32 iaintflagen;
- u32 iaintflagreg;
- u32 iaintflagsrcsel0;
- u32 iaintflagsrcsel1;
- u32 iaintcolen;
- u32 iaintcolctl;
- u32 iaintflagandcolen;
- u32 ringexpstat;
- u32 ringexpintenable;
- u32 ringsrvarben;
- u32 reserved0;
- struct ring_config rings[ADF_ETR_MAX_RINGS_PER_BANK];
-};
+struct adf_bank_state;
struct adf_hw_csr_ops {
u64 (*build_csr_ring_base_addr)(dma_addr_t addr, u32 size);
@@ -256,7 +240,8 @@ struct adf_pfvf_ops {
};
struct adf_dc_ops {
- void (*build_deflate_ctx)(void *ctx);
+ int (*build_comp_block)(void *ctx, enum adf_dc_algo algo);
+ int (*build_decomp_block)(void *ctx, enum adf_dc_algo algo);
};
struct qat_migdev_ops {
@@ -321,9 +306,9 @@ struct adf_hw_device_data {
void (*set_ssm_wdtimer)(struct adf_accel_dev *accel_dev);
int (*ring_pair_reset)(struct adf_accel_dev *accel_dev, u32 bank_nr);
int (*bank_state_save)(struct adf_accel_dev *accel_dev, u32 bank_number,
- struct bank_state *state);
+ struct adf_bank_state *state);
int (*bank_state_restore)(struct adf_accel_dev *accel_dev,
- u32 bank_number, struct bank_state *state);
+ u32 bank_number, struct adf_bank_state *state);
void (*reset_device)(struct adf_accel_dev *accel_dev);
void (*set_msix_rttable)(struct adf_accel_dev *accel_dev);
const char *(*uof_get_name)(struct adf_accel_dev *accel_dev, u32 obj_num);
@@ -333,6 +318,9 @@ struct adf_hw_device_data {
int (*get_rp_group)(struct adf_accel_dev *accel_dev, u32 ae_mask);
u32 (*get_ena_thd_mask)(struct adf_accel_dev *accel_dev, u32 obj_num);
int (*dev_config)(struct adf_accel_dev *accel_dev);
+ bool (*services_supported)(unsigned long mask);
+ u32 (*get_svc_slice_cnt)(struct adf_accel_dev *accel_dev,
+ enum adf_base_services svc);
struct adf_pfvf_ops pfvf_ops;
struct adf_hw_csr_ops csr_ops;
struct adf_dc_ops dc_ops;
@@ -343,7 +331,7 @@ struct adf_hw_device_data {
struct qat_migdev_ops vfmig_ops;
const char *fw_name;
const char *fw_mmp_name;
- u32 fuses;
+ u32 fuses[ADF_MAX_FUSES];
u32 straps;
u32 accel_capabilities_mask;
u32 extended_dc_capabilities;
@@ -370,6 +358,15 @@ struct adf_hw_device_data {
/* CSR write macro */
#define ADF_CSR_WR(csr_base, csr_offset, val) \
__raw_writel(val, csr_base + csr_offset)
+/*
+ * CSR write macro to handle cases where the high and low
+ * offsets are sparsely located.
+ */
+#define ADF_CSR_WR64_LO_HI(csr_base, csr_low_offset, csr_high_offset, val) \
+do { \
+ ADF_CSR_WR(csr_base, csr_low_offset, lower_32_bits(val)); \
+ ADF_CSR_WR(csr_base, csr_high_offset, upper_32_bits(val)); \
+} while (0)
/* CSR read macro */
#define ADF_CSR_RD(csr_base, csr_offset) __raw_readl(csr_base + csr_offset)
diff --git a/drivers/crypto/intel/qat/qat_common/adf_admin.c b/drivers/crypto/intel/qat/qat_common/adf_admin.c
index acad526eb741..573388c37100 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_admin.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_admin.c
@@ -449,6 +449,7 @@ int adf_init_admin_pm(struct adf_accel_dev *accel_dev, u32 idle_delay)
return adf_send_admin(accel_dev, &req, &resp, ae_mask);
}
+EXPORT_SYMBOL_GPL(adf_init_admin_pm);
int adf_get_pm_info(struct adf_accel_dev *accel_dev, dma_addr_t p_state_addr,
size_t buff_size)
diff --git a/drivers/crypto/intel/qat/qat_common/adf_aer.c b/drivers/crypto/intel/qat/qat_common/adf_aer.c
index ec7913ab00a2..35679b21ff63 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_aer.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_aer.c
@@ -229,7 +229,7 @@ const struct pci_error_handlers adf_err_handler = {
};
EXPORT_SYMBOL_GPL(adf_err_handler);
-int adf_dev_autoreset(struct adf_accel_dev *accel_dev)
+static int adf_dev_autoreset(struct adf_accel_dev *accel_dev)
{
if (accel_dev->autoreset_on_error)
return adf_dev_aer_schedule_reset(accel_dev, ADF_DEV_RESET_ASYNC);
@@ -281,8 +281,11 @@ int adf_init_aer(void)
return -EFAULT;
device_sriov_wq = alloc_workqueue("qat_device_sriov_wq", 0, 0);
- if (!device_sriov_wq)
+ if (!device_sriov_wq) {
+ destroy_workqueue(device_reset_wq);
+ device_reset_wq = NULL;
return -EFAULT;
+ }
return 0;
}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_bank_state.c b/drivers/crypto/intel/qat/qat_common/adf_bank_state.c
new file mode 100644
index 000000000000..225d55d56a4b
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_bank_state.c
@@ -0,0 +1,238 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2025 Intel Corporation */
+
+#define pr_fmt(fmt) "QAT: " fmt
+
+#include <linux/bits.h>
+#include <linux/dev_printk.h>
+#include <linux/printk.h>
+#include "adf_accel_devices.h"
+#include "adf_bank_state.h"
+#include "adf_common_drv.h"
+
+/* Ring interrupt masks */
+#define ADF_RP_INT_SRC_SEL_F_RISE_MASK GENMASK(1, 0)
+#define ADF_RP_INT_SRC_SEL_F_FALL_MASK GENMASK(2, 0)
+#define ADF_RP_INT_SRC_SEL_RANGE_WIDTH 4
+
+static inline int check_stat(u32 (*op)(void __iomem *, u32), u32 expect_val,
+ const char *name, void __iomem *base, u32 bank)
+{
+ u32 actual_val = op(base, bank);
+
+ if (expect_val == actual_val)
+ return 0;
+
+ pr_err("Fail to restore %s register. Expected %#x, actual %#x\n",
+ name, expect_val, actual_val);
+
+ return -EINVAL;
+}
+
+static void bank_state_save(struct adf_hw_csr_ops *ops, void __iomem *base,
+ u32 bank, struct adf_bank_state *state, u32 num_rings)
+{
+ u32 i;
+
+ state->ringstat0 = ops->read_csr_stat(base, bank);
+ state->ringuostat = ops->read_csr_uo_stat(base, bank);
+ state->ringestat = ops->read_csr_e_stat(base, bank);
+ state->ringnestat = ops->read_csr_ne_stat(base, bank);
+ state->ringnfstat = ops->read_csr_nf_stat(base, bank);
+ state->ringfstat = ops->read_csr_f_stat(base, bank);
+ state->ringcstat0 = ops->read_csr_c_stat(base, bank);
+ state->iaintflagen = ops->read_csr_int_en(base, bank);
+ state->iaintflagreg = ops->read_csr_int_flag(base, bank);
+ state->iaintflagsrcsel0 = ops->read_csr_int_srcsel(base, bank);
+ state->iaintcolen = ops->read_csr_int_col_en(base, bank);
+ state->iaintcolctl = ops->read_csr_int_col_ctl(base, bank);
+ state->iaintflagandcolen = ops->read_csr_int_flag_and_col(base, bank);
+ state->ringexpstat = ops->read_csr_exp_stat(base, bank);
+ state->ringexpintenable = ops->read_csr_exp_int_en(base, bank);
+ state->ringsrvarben = ops->read_csr_ring_srv_arb_en(base, bank);
+
+ for (i = 0; i < num_rings; i++) {
+ state->rings[i].head = ops->read_csr_ring_head(base, bank, i);
+ state->rings[i].tail = ops->read_csr_ring_tail(base, bank, i);
+ state->rings[i].config = ops->read_csr_ring_config(base, bank, i);
+ state->rings[i].base = ops->read_csr_ring_base(base, bank, i);
+ }
+}
+
+static int bank_state_restore(struct adf_hw_csr_ops *ops, void __iomem *base,
+ u32 bank, struct adf_bank_state *state, u32 num_rings,
+ int tx_rx_gap)
+{
+ u32 val, tmp_val, i;
+ int ret;
+
+ for (i = 0; i < num_rings; i++)
+ ops->write_csr_ring_base(base, bank, i, state->rings[i].base);
+
+ for (i = 0; i < num_rings; i++)
+ ops->write_csr_ring_config(base, bank, i, state->rings[i].config);
+
+ for (i = 0; i < num_rings / 2; i++) {
+ int tx = i * (tx_rx_gap + 1);
+ int rx = tx + tx_rx_gap;
+
+ ops->write_csr_ring_head(base, bank, tx, state->rings[tx].head);
+ ops->write_csr_ring_tail(base, bank, tx, state->rings[tx].tail);
+
+ /*
+ * The TX ring head needs to be updated again to make sure that
+ * the HW will not consider the ring as full when it is empty
+ * and the correct state flags are set to match the recovered state.
+ */
+ if (state->ringestat & BIT(tx)) {
+ val = ops->read_csr_int_srcsel(base, bank);
+ val |= ADF_RP_INT_SRC_SEL_F_RISE_MASK;
+ ops->write_csr_int_srcsel_w_val(base, bank, val);
+ ops->write_csr_ring_head(base, bank, tx, state->rings[tx].head);
+ }
+
+ ops->write_csr_ring_tail(base, bank, rx, state->rings[rx].tail);
+ val = ops->read_csr_int_srcsel(base, bank);
+ val |= ADF_RP_INT_SRC_SEL_F_RISE_MASK << ADF_RP_INT_SRC_SEL_RANGE_WIDTH;
+ ops->write_csr_int_srcsel_w_val(base, bank, val);
+
+ ops->write_csr_ring_head(base, bank, rx, state->rings[rx].head);
+ val = ops->read_csr_int_srcsel(base, bank);
+ val |= ADF_RP_INT_SRC_SEL_F_FALL_MASK << ADF_RP_INT_SRC_SEL_RANGE_WIDTH;
+ ops->write_csr_int_srcsel_w_val(base, bank, val);
+
+ /*
+ * The RX ring tail needs to be updated again to make sure that
+ * the HW will not consider the ring as empty when it is full
+ * and the correct state flags are set to match the recovered state.
+ */
+ if (state->ringfstat & BIT(rx))
+ ops->write_csr_ring_tail(base, bank, rx, state->rings[rx].tail);
+ }
+
+ ops->write_csr_int_flag_and_col(base, bank, state->iaintflagandcolen);
+ ops->write_csr_int_en(base, bank, state->iaintflagen);
+ ops->write_csr_int_col_en(base, bank, state->iaintcolen);
+ ops->write_csr_int_srcsel_w_val(base, bank, state->iaintflagsrcsel0);
+ ops->write_csr_exp_int_en(base, bank, state->ringexpintenable);
+ ops->write_csr_int_col_ctl(base, bank, state->iaintcolctl);
+
+ /*
+ * Verify whether any exceptions were raised during the bank save process.
+ * If exceptions occurred, the status and exception registers cannot
+ * be directly restored. Consequently, further restoration is not
+ * feasible, and the current state of the ring should be maintained.
+ */
+ val = state->ringexpstat;
+ if (val) {
+ pr_info("Bank %u state not fully restored due to exception in saved state (%#x)\n",
+ bank, val);
+ return 0;
+ }
+
+ /* Ensure that the restoration process completed without exceptions */
+ tmp_val = ops->read_csr_exp_stat(base, bank);
+ if (tmp_val) {
+ pr_err("Bank %u restored with exception: %#x\n", bank, tmp_val);
+ return -EFAULT;
+ }
+
+ ops->write_csr_ring_srv_arb_en(base, bank, state->ringsrvarben);
+
+ /* Check that all ring statuses match the saved state. */
+ ret = check_stat(ops->read_csr_stat, state->ringstat0, "ringstat",
+ base, bank);
+ if (ret)
+ return ret;
+
+ ret = check_stat(ops->read_csr_e_stat, state->ringestat, "ringestat",
+ base, bank);
+ if (ret)
+ return ret;
+
+ ret = check_stat(ops->read_csr_ne_stat, state->ringnestat, "ringnestat",
+ base, bank);
+ if (ret)
+ return ret;
+
+ ret = check_stat(ops->read_csr_nf_stat, state->ringnfstat, "ringnfstat",
+ base, bank);
+ if (ret)
+ return ret;
+
+ ret = check_stat(ops->read_csr_f_stat, state->ringfstat, "ringfstat",
+ base, bank);
+ if (ret)
+ return ret;
+
+ ret = check_stat(ops->read_csr_c_stat, state->ringcstat0, "ringcstat",
+ base, bank);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * adf_bank_state_save() - save state of bank-related registers
+ * @accel_dev: Pointer to the device structure
+ * @bank_number: Bank number
+ * @state: Pointer to bank state structure
+ *
+ * This function saves the state of a bank by reading the bank CSRs and
+ * writing them in the @state structure.
+ *
+ * Returns 0 on success, error code otherwise
+ */
+int adf_bank_state_save(struct adf_accel_dev *accel_dev, u32 bank_number,
+ struct adf_bank_state *state)
+{
+ struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+ struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
+ void __iomem *csr_base = adf_get_etr_base(accel_dev);
+
+ if (bank_number >= hw_data->num_banks || !state)
+ return -EINVAL;
+
+ dev_dbg(&GET_DEV(accel_dev), "Saving state of bank %d\n", bank_number);
+
+ bank_state_save(csr_ops, csr_base, bank_number, state,
+ hw_data->num_rings_per_bank);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adf_bank_state_save);
+
+/**
+ * adf_bank_state_restore() - restore state of bank-related registers
+ * @accel_dev: Pointer to the device structure
+ * @bank_number: Bank number
+ * @state: Pointer to bank state structure
+ *
+ * This function attempts to restore the state of a bank by writing the
+ * bank CSRs to the values in the state structure.
+ *
+ * Returns 0 on success, error code otherwise
+ */
+int adf_bank_state_restore(struct adf_accel_dev *accel_dev, u32 bank_number,
+ struct adf_bank_state *state)
+{
+ struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+ struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
+ void __iomem *csr_base = adf_get_etr_base(accel_dev);
+ int ret;
+
+ if (bank_number >= hw_data->num_banks || !state)
+ return -EINVAL;
+
+ dev_dbg(&GET_DEV(accel_dev), "Restoring state of bank %d\n", bank_number);
+
+ ret = bank_state_restore(csr_ops, csr_base, bank_number, state,
+ hw_data->num_rings_per_bank, hw_data->tx_rx_gap);
+ if (ret)
+ dev_err(&GET_DEV(accel_dev),
+ "Unable to restore state of bank %d\n", bank_number);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adf_bank_state_restore);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_bank_state.h b/drivers/crypto/intel/qat/qat_common/adf_bank_state.h
new file mode 100644
index 000000000000..48b573d692dd
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_bank_state.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2025 Intel Corporation */
+#ifndef ADF_BANK_STATE_H_
+#define ADF_BANK_STATE_H_
+
+#include <linux/types.h>
+
+struct adf_accel_dev;
+
+struct ring_config {
+ u64 base;
+ u32 config;
+ u32 head;
+ u32 tail;
+ u32 reserved0;
+};
+
+struct adf_bank_state {
+ u32 ringstat0;
+ u32 ringstat1;
+ u32 ringuostat;
+ u32 ringestat;
+ u32 ringnestat;
+ u32 ringnfstat;
+ u32 ringfstat;
+ u32 ringcstat0;
+ u32 ringcstat1;
+ u32 ringcstat2;
+ u32 ringcstat3;
+ u32 iaintflagen;
+ u32 iaintflagreg;
+ u32 iaintflagsrcsel0;
+ u32 iaintflagsrcsel1;
+ u32 iaintcolen;
+ u32 iaintcolctl;
+ u32 iaintflagandcolen;
+ u32 ringexpstat;
+ u32 ringexpintenable;
+ u32 ringsrvarben;
+ u32 reserved0;
+ struct ring_config rings[ADF_ETR_MAX_RINGS_PER_BANK];
+};
+
+int adf_bank_state_restore(struct adf_accel_dev *accel_dev, u32 bank_number,
+ struct adf_bank_state *state);
+int adf_bank_state_save(struct adf_accel_dev *accel_dev, u32 bank_number,
+ struct adf_bank_state *state);
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h
index 89df3888d7ea..81e9e9d7eccd 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h
@@ -29,6 +29,7 @@ enum adf_cfg_service_type {
COMP,
SYM,
ASYM,
+ DECOMP,
USED
};
@@ -48,6 +49,7 @@ enum adf_device_type {
DEV_C3XXXVF,
DEV_4XXX,
DEV_420XX,
+ DEV_6XXX,
};
struct adf_dev_status_info {
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c
index 268052294468..7d00bcb41ce7 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c
@@ -1,47 +1,212 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2023 Intel Corporation */
+#include <linux/array_size.h>
+#include <linux/bitops.h>
#include <linux/export.h>
#include <linux/pci.h>
#include <linux/string.h>
#include "adf_cfg.h"
+#include "adf_cfg_common.h"
#include "adf_cfg_services.h"
#include "adf_cfg_strings.h"
-const char *const adf_cfg_services[] = {
- [SVC_CY] = ADF_CFG_CY,
- [SVC_CY2] = ADF_CFG_ASYM_SYM,
+static const char *const adf_cfg_services[] = {
+ [SVC_ASYM] = ADF_CFG_ASYM,
+ [SVC_SYM] = ADF_CFG_SYM,
[SVC_DC] = ADF_CFG_DC,
[SVC_DCC] = ADF_CFG_DCC,
- [SVC_SYM] = ADF_CFG_SYM,
- [SVC_ASYM] = ADF_CFG_ASYM,
- [SVC_DC_ASYM] = ADF_CFG_DC_ASYM,
- [SVC_ASYM_DC] = ADF_CFG_ASYM_DC,
- [SVC_DC_SYM] = ADF_CFG_DC_SYM,
- [SVC_SYM_DC] = ADF_CFG_SYM_DC,
+ [SVC_DECOMP] = ADF_CFG_DECOMP,
};
-EXPORT_SYMBOL_GPL(adf_cfg_services);
-int adf_get_service_enabled(struct adf_accel_dev *accel_dev)
+/*
+ * Ensure that the size of the array matches the number of services,
+ * SVC_COUNT, that is used to size the bitmap.
+ */
+static_assert(ARRAY_SIZE(adf_cfg_services) == SVC_COUNT);
+
+/*
+ * Ensure that the maximum number of concurrent services that can be
+ * enabled on a device is less than or equal to the number of total
+ * supported services.
+ */
+static_assert(ARRAY_SIZE(adf_cfg_services) >= MAX_NUM_CONCURR_SVC);
+
+/*
+ * Ensure that the number of services fit a single unsigned long, as each
+ * service is represented by a bit in the mask.
+ */
+static_assert(BITS_PER_LONG >= SVC_COUNT);
+
+/*
+ * Ensure that size of the concatenation of all service strings is smaller
+ * than the size of the buffer that will contain them.
+ */
+static_assert(sizeof(ADF_CFG_SYM ADF_SERVICES_DELIMITER
+ ADF_CFG_ASYM ADF_SERVICES_DELIMITER
+ ADF_CFG_DC ADF_SERVICES_DELIMITER
+ ADF_CFG_DECOMP ADF_SERVICES_DELIMITER
+ ADF_CFG_DCC) < ADF_CFG_MAX_VAL_LEN_IN_BYTES);
+
+static int adf_service_string_to_mask(struct adf_accel_dev *accel_dev, const char *buf,
+ size_t len, unsigned long *out_mask)
+{
+ struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+ char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { };
+ unsigned long mask = 0;
+ char *substr, *token;
+ int id, num_svc = 0;
+
+ if (len > ADF_CFG_MAX_VAL_LEN_IN_BYTES - 1)
+ return -EINVAL;
+
+ strscpy(services, buf, ADF_CFG_MAX_VAL_LEN_IN_BYTES);
+ substr = services;
+
+ while ((token = strsep(&substr, ADF_SERVICES_DELIMITER))) {
+ id = sysfs_match_string(adf_cfg_services, token);
+ if (id < 0)
+ return id;
+
+ if (test_and_set_bit(id, &mask))
+ return -EINVAL;
+
+ if (num_svc++ == MAX_NUM_CONCURR_SVC)
+ return -EINVAL;
+ }
+
+ if (hw_data->services_supported && !hw_data->services_supported(mask))
+ return -EINVAL;
+
+ *out_mask = mask;
+
+ return 0;
+}
+
+static int adf_service_mask_to_string(unsigned long mask, char *buf, size_t len)
+{
+ int offset = 0;
+ int bit;
+
+ if (len < ADF_CFG_MAX_VAL_LEN_IN_BYTES)
+ return -ENOSPC;
+
+ for_each_set_bit(bit, &mask, SVC_COUNT) {
+ if (offset)
+ offset += scnprintf(buf + offset, len - offset,
+ ADF_SERVICES_DELIMITER);
+
+ offset += scnprintf(buf + offset, len - offset, "%s",
+ adf_cfg_services[bit]);
+ }
+
+ return 0;
+}
+
+int adf_parse_service_string(struct adf_accel_dev *accel_dev, const char *in,
+ size_t in_len, char *out, size_t out_len)
{
- char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
+ unsigned long mask;
+ int ret;
+
+ ret = adf_service_string_to_mask(accel_dev, in, in_len, &mask);
+ if (ret)
+ return ret;
+
+ if (!mask)
+ return -EINVAL;
+
+ return adf_service_mask_to_string(mask, out, out_len);
+}
+
+int adf_get_service_mask(struct adf_accel_dev *accel_dev, unsigned long *mask)
+{
+ char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { };
+ size_t len;
int ret;
ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
ADF_SERVICES_ENABLED, services);
if (ret) {
- dev_err(&GET_DEV(accel_dev),
- ADF_SERVICES_ENABLED " param not found\n");
+ dev_err(&GET_DEV(accel_dev), "%s param not found\n",
+ ADF_SERVICES_ENABLED);
return ret;
}
- ret = match_string(adf_cfg_services, ARRAY_SIZE(adf_cfg_services),
- services);
- if (ret < 0)
- dev_err(&GET_DEV(accel_dev),
- "Invalid value of " ADF_SERVICES_ENABLED " param: %s\n",
- services);
+ len = strnlen(services, ADF_CFG_MAX_VAL_LEN_IN_BYTES);
+ ret = adf_service_string_to_mask(accel_dev, services, len, mask);
+ if (ret)
+ dev_err(&GET_DEV(accel_dev), "Invalid value of %s param: %s\n",
+ ADF_SERVICES_ENABLED, services);
return ret;
}
+EXPORT_SYMBOL_GPL(adf_get_service_mask);
+
+int adf_get_service_enabled(struct adf_accel_dev *accel_dev)
+{
+ unsigned long mask;
+ int ret;
+
+ ret = adf_get_service_mask(accel_dev, &mask);
+ if (ret)
+ return ret;
+
+ if (test_bit(SVC_SYM, &mask) && test_bit(SVC_ASYM, &mask))
+ return SVC_SYM_ASYM;
+
+ if (test_bit(SVC_SYM, &mask) && test_bit(SVC_DC, &mask))
+ return SVC_SYM_DC;
+
+ if (test_bit(SVC_ASYM, &mask) && test_bit(SVC_DC, &mask))
+ return SVC_ASYM_DC;
+
+ if (test_bit(SVC_SYM, &mask))
+ return SVC_SYM;
+
+ if (test_bit(SVC_ASYM, &mask))
+ return SVC_ASYM;
+
+ if (test_bit(SVC_DC, &mask))
+ return SVC_DC;
+
+ if (test_bit(SVC_DECOMP, &mask))
+ return SVC_DECOMP;
+
+ if (test_bit(SVC_DCC, &mask))
+ return SVC_DCC;
+
+ return -EINVAL;
+}
EXPORT_SYMBOL_GPL(adf_get_service_enabled);
+
+enum adf_cfg_service_type adf_srv_to_cfg_svc_type(enum adf_base_services svc)
+{
+ switch (svc) {
+ case SVC_ASYM:
+ return ASYM;
+ case SVC_SYM:
+ return SYM;
+ case SVC_DC:
+ return COMP;
+ case SVC_DECOMP:
+ return DECOMP;
+ default:
+ return UNUSED;
+ }
+}
+
+bool adf_is_service_enabled(struct adf_accel_dev *accel_dev, enum adf_base_services svc)
+{
+ enum adf_cfg_service_type arb_srv = adf_srv_to_cfg_svc_type(svc);
+ struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+ u8 rps_per_bundle = hw_data->num_banks_per_vf;
+ int i;
+
+ for (i = 0; i < rps_per_bundle; i++) {
+ if (GET_SRV_TYPE(accel_dev, i) == arb_srv)
+ return true;
+ }
+
+ return false;
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h
index c6b0328b0f5b..913d717280af 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h
@@ -7,22 +7,38 @@
struct adf_accel_dev;
-enum adf_services {
- SVC_CY = 0,
- SVC_CY2,
- SVC_DC,
- SVC_DCC,
+enum adf_base_services {
+ SVC_ASYM = 0,
SVC_SYM,
- SVC_ASYM,
- SVC_DC_ASYM,
- SVC_ASYM_DC,
- SVC_DC_SYM,
- SVC_SYM_DC,
+ SVC_DC,
+ SVC_DECOMP,
+ SVC_BASE_COUNT
+};
+
+enum adf_extended_services {
+ SVC_DCC = SVC_BASE_COUNT,
SVC_COUNT
};
-extern const char *const adf_cfg_services[SVC_COUNT];
+enum adf_composed_services {
+ SVC_SYM_ASYM = SVC_COUNT,
+ SVC_SYM_DC,
+ SVC_ASYM_DC,
+};
+
+enum {
+ ADF_ONE_SERVICE = 1,
+ ADF_TWO_SERVICES,
+ ADF_THREE_SERVICES,
+};
+
+#define MAX_NUM_CONCURR_SVC ADF_THREE_SERVICES
+int adf_parse_service_string(struct adf_accel_dev *accel_dev, const char *in,
+ size_t in_len, char *out, size_t out_len);
int adf_get_service_enabled(struct adf_accel_dev *accel_dev);
+int adf_get_service_mask(struct adf_accel_dev *accel_dev, unsigned long *mask);
+enum adf_cfg_service_type adf_srv_to_cfg_svc_type(enum adf_base_services svc);
+bool adf_is_service_enabled(struct adf_accel_dev *accel_dev, enum adf_base_services svc);
#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h
index e015ad6cace2..30107a02ee7f 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h
@@ -24,16 +24,13 @@
#define ADF_CY "Cy"
#define ADF_DC "Dc"
#define ADF_CFG_DC "dc"
+#define ADF_CFG_DECOMP "decomp"
#define ADF_CFG_CY "sym;asym"
#define ADF_CFG_SYM "sym"
#define ADF_CFG_ASYM "asym"
-#define ADF_CFG_ASYM_SYM "asym;sym"
-#define ADF_CFG_ASYM_DC "asym;dc"
-#define ADF_CFG_DC_ASYM "dc;asym"
-#define ADF_CFG_SYM_DC "sym;dc"
-#define ADF_CFG_DC_SYM "dc;sym"
#define ADF_CFG_DCC "dcc"
#define ADF_SERVICES_ENABLED "ServicesEnabled"
+#define ADF_SERVICES_DELIMITER ";"
#define ADF_PM_IDLE_SUPPORT "PmIdleSupport"
#define ADF_ETRMGR_COALESCING_ENABLED "InterruptCoalescingEnabled"
#define ADF_ETRMGR_COALESCING_ENABLED_FORMAT \
diff --git a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
index f7ecabdf7805..6cf3a95489e8 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
@@ -69,7 +69,6 @@ void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev,
struct adf_accel_dev *pf);
struct list_head *adf_devmgr_get_head(void);
struct adf_accel_dev *adf_devmgr_get_dev_by_id(u32 id);
-struct adf_accel_dev *adf_devmgr_get_first(void);
struct adf_accel_dev *adf_devmgr_pci_to_accel_dev(struct pci_dev *pci_dev);
int adf_devmgr_verify_id(u32 id);
void adf_devmgr_get_num_dev(u32 *num);
@@ -87,7 +86,6 @@ int adf_ae_stop(struct adf_accel_dev *accel_dev);
extern const struct pci_error_handlers adf_err_handler;
void adf_reset_sbr(struct adf_accel_dev *accel_dev);
void adf_reset_flr(struct adf_accel_dev *accel_dev);
-int adf_dev_autoreset(struct adf_accel_dev *accel_dev);
void adf_dev_restore(struct adf_accel_dev *accel_dev);
int adf_init_aer(void);
void adf_exit_aer(void);
@@ -190,6 +188,7 @@ void adf_exit_misc_wq(void);
bool adf_misc_wq_queue_work(struct work_struct *work);
bool adf_misc_wq_queue_delayed_work(struct delayed_work *work,
unsigned long delay);
+void adf_misc_wq_flush(void);
#if defined(CONFIG_PCI_IOV)
int adf_sriov_configure(struct pci_dev *pdev, int numvfs);
void adf_disable_sriov(struct adf_accel_dev *accel_dev);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c
index 70fa0f6497a9..c2e6f0cb7480 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c
@@ -89,26 +89,14 @@ err_chrdev_unreg:
return -EFAULT;
}
-static int adf_ctl_alloc_resources(struct adf_user_cfg_ctl_data **ctl_data,
- unsigned long arg)
+static struct adf_user_cfg_ctl_data *adf_ctl_alloc_resources(unsigned long arg)
{
struct adf_user_cfg_ctl_data *cfg_data;
- cfg_data = kzalloc(sizeof(*cfg_data), GFP_KERNEL);
- if (!cfg_data)
- return -ENOMEM;
-
- /* Initialize device id to NO DEVICE as 0 is a valid device id */
- cfg_data->device_id = ADF_CFG_NO_DEVICE;
-
- if (copy_from_user(cfg_data, (void __user *)arg, sizeof(*cfg_data))) {
+ cfg_data = memdup_user((void __user *)arg, sizeof(*cfg_data));
+ if (IS_ERR(cfg_data))
pr_err("QAT: failed to copy from user cfg_data.\n");
- kfree(cfg_data);
- return -EIO;
- }
-
- *ctl_data = cfg_data;
- return 0;
+ return cfg_data;
}
static int adf_add_key_value_data(struct adf_accel_dev *accel_dev,
@@ -188,13 +176,13 @@ out_err:
static int adf_ctl_ioctl_dev_config(struct file *fp, unsigned int cmd,
unsigned long arg)
{
- int ret;
struct adf_user_cfg_ctl_data *ctl_data;
struct adf_accel_dev *accel_dev;
+ int ret = 0;
- ret = adf_ctl_alloc_resources(&ctl_data, arg);
- if (ret)
- return ret;
+ ctl_data = adf_ctl_alloc_resources(arg);
+ if (IS_ERR(ctl_data))
+ return PTR_ERR(ctl_data);
accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id);
if (!accel_dev) {
@@ -267,9 +255,9 @@ static int adf_ctl_ioctl_dev_stop(struct file *fp, unsigned int cmd,
int ret;
struct adf_user_cfg_ctl_data *ctl_data;
- ret = adf_ctl_alloc_resources(&ctl_data, arg);
- if (ret)
- return ret;
+ ctl_data = adf_ctl_alloc_resources(arg);
+ if (IS_ERR(ctl_data))
+ return PTR_ERR(ctl_data);
if (adf_devmgr_verify_id(ctl_data->device_id)) {
pr_err("QAT: Device %d not found\n", ctl_data->device_id);
@@ -301,9 +289,9 @@ static int adf_ctl_ioctl_dev_start(struct file *fp, unsigned int cmd,
struct adf_user_cfg_ctl_data *ctl_data;
struct adf_accel_dev *accel_dev;
- ret = adf_ctl_alloc_resources(&ctl_data, arg);
- if (ret)
- return ret;
+ ctl_data = adf_ctl_alloc_resources(arg);
+ if (IS_ERR(ctl_data))
+ return PTR_ERR(ctl_data);
ret = -ENODEV;
accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id);
@@ -475,4 +463,4 @@ MODULE_AUTHOR("Intel");
MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
MODULE_ALIAS_CRYPTO("intel_qat");
MODULE_VERSION(ADF_DRV_VERSION);
-MODULE_IMPORT_NS(CRYPTO_INTERNAL);
+MODULE_IMPORT_NS("CRYPTO_INTERNAL");
diff --git a/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c b/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c
index c42f5c25aabd..4c11ad1ebcf0 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c
@@ -22,18 +22,13 @@
void adf_dbgfs_init(struct adf_accel_dev *accel_dev)
{
char name[ADF_DEVICE_NAME_LENGTH];
- void *ret;
/* Create dev top level debugfs entry */
snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX,
accel_dev->hw_device->dev_class->name,
pci_name(accel_dev->accel_pci_dev.pci_dev));
- ret = debugfs_create_dir(name, NULL);
- if (IS_ERR_OR_NULL(ret))
- return;
-
- accel_dev->debugfs_dir = ret;
+ accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
adf_cfg_dev_dbgfs_add(accel_dev);
}
@@ -59,9 +54,6 @@ EXPORT_SYMBOL_GPL(adf_dbgfs_exit);
*/
void adf_dbgfs_add(struct adf_accel_dev *accel_dev)
{
- if (!accel_dev->debugfs_dir)
- return;
-
if (!accel_dev->is_vf) {
adf_fw_counters_dbgfs_add(accel_dev);
adf_heartbeat_dbgfs_add(accel_dev);
@@ -77,9 +69,6 @@ void adf_dbgfs_add(struct adf_accel_dev *accel_dev)
*/
void adf_dbgfs_rm(struct adf_accel_dev *accel_dev)
{
- if (!accel_dev->debugfs_dir)
- return;
-
if (!accel_dev->is_vf) {
adf_tl_dbgfs_rm(accel_dev);
adf_cnv_dbgfs_rm(accel_dev);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_dc.c b/drivers/crypto/intel/qat/qat_common/adf_dc.c
new file mode 100644
index 000000000000..3e8fb4e3ed97
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_dc.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2022 Intel Corporation */
+#include "adf_accel_devices.h"
+#include "adf_dc.h"
+#include "icp_qat_fw_comp.h"
+
+int qat_comp_build_ctx(struct adf_accel_dev *accel_dev, void *ctx, enum adf_dc_algo algo)
+{
+ struct icp_qat_fw_comp_req *req_tmpl = ctx;
+ struct icp_qat_fw_comp_cd_hdr *comp_cd_ctrl = &req_tmpl->comp_cd_ctrl;
+ struct icp_qat_fw_comp_req_params *req_pars = &req_tmpl->comp_pars;
+ struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+ int ret;
+
+ memset(req_tmpl, 0, sizeof(*req_tmpl));
+ header->hdr_flags =
+ ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
+ header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_COMP;
+ header->comn_req_flags =
+ ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_16BYTE_DATA,
+ QAT_COMN_PTR_TYPE_SGL);
+ header->serv_specif_flags =
+ ICP_QAT_FW_COMP_FLAGS_BUILD(ICP_QAT_FW_COMP_STATELESS_SESSION,
+ ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST,
+ ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST,
+ ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST,
+ ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF);
+
+ /* Build HW config block for compression */
+ ret = GET_DC_OPS(accel_dev)->build_comp_block(ctx, algo);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev), "Failed to build compression block\n");
+ return ret;
+ }
+
+ req_pars->crc.legacy.initial_adler = COMP_CPR_INITIAL_ADLER;
+ req_pars->crc.legacy.initial_crc32 = COMP_CPR_INITIAL_CRC;
+ req_pars->req_par_flags =
+ ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(ICP_QAT_FW_COMP_SOP,
+ ICP_QAT_FW_COMP_EOP,
+ ICP_QAT_FW_COMP_BFINAL,
+ ICP_QAT_FW_COMP_CNV,
+ ICP_QAT_FW_COMP_CNV_RECOVERY,
+ ICP_QAT_FW_COMP_NO_CNV_DFX,
+ ICP_QAT_FW_COMP_CRC_MODE_LEGACY,
+ ICP_QAT_FW_COMP_NO_XXHASH_ACC,
+ ICP_QAT_FW_COMP_CNV_ERROR_NONE,
+ ICP_QAT_FW_COMP_NO_APPEND_CRC,
+ ICP_QAT_FW_COMP_NO_DROP_DATA,
+ ICP_QAT_FW_COMP_NO_PARTIAL_DECOMPRESS);
+ ICP_QAT_FW_COMN_NEXT_ID_SET(comp_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
+ ICP_QAT_FW_COMN_CURR_ID_SET(comp_cd_ctrl, ICP_QAT_FW_SLICE_COMP);
+
+ /* Fill second half of the template for decompression */
+ memcpy(req_tmpl + 1, req_tmpl, sizeof(*req_tmpl));
+ req_tmpl++;
+
+ /* Build HW config block for decompression */
+ ret = GET_DC_OPS(accel_dev)->build_decomp_block(req_tmpl, algo);
+ if (ret)
+ dev_err(&GET_DEV(accel_dev), "Failed to build decompression block\n");
+
+ return ret;
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_dc.h b/drivers/crypto/intel/qat/qat_common/adf_dc.h
new file mode 100644
index 000000000000..6cb5e09054a6
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_dc.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2025 Intel Corporation */
+#ifndef ADF_DC_H
+#define ADF_DC_H
+
+struct adf_accel_dev;
+
+enum adf_dc_algo {
+ QAT_DEFLATE,
+ QAT_LZ4,
+ QAT_LZ4S,
+ QAT_ZSTD,
+};
+
+int qat_comp_build_ctx(struct adf_accel_dev *accel_dev, void *ctx, enum adf_dc_algo algo);
+
+#endif /* ADF_DC_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c b/drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c
index 96ddd1c419c4..34b9f7731c78 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c
@@ -276,16 +276,6 @@ unlock:
}
EXPORT_SYMBOL_GPL(adf_devmgr_rm_dev);
-struct adf_accel_dev *adf_devmgr_get_first(void)
-{
- struct adf_accel_dev *dev = NULL;
-
- if (!list_empty(&accel_table))
- dev = list_first_entry(&accel_table, struct adf_accel_dev,
- list);
- return dev;
-}
-
/**
* adf_devmgr_pci_to_accel_dev() - Get accel_dev associated with the pci_dev.
* @pci_dev: Pointer to PCI device.
diff --git a/drivers/crypto/intel/qat/qat_common/adf_fw_config.h b/drivers/crypto/intel/qat/qat_common/adf_fw_config.h
index 4f86696800c9..78957fa900b7 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_fw_config.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_fw_config.h
@@ -8,6 +8,7 @@ enum adf_fw_objs {
ADF_FW_ASYM_OBJ,
ADF_FW_DC_OBJ,
ADF_FW_ADMIN_OBJ,
+ ADF_FW_CY_OBJ,
};
struct adf_fw_config {
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_dc.c b/drivers/crypto/intel/qat/qat_common/adf_gen2_dc.c
deleted file mode 100644
index 47261b1c1da6..000000000000
--- a/drivers/crypto/intel/qat/qat_common/adf_gen2_dc.c
+++ /dev/null
@@ -1,70 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright(c) 2022 Intel Corporation */
-#include "adf_accel_devices.h"
-#include "adf_gen2_dc.h"
-#include "icp_qat_fw_comp.h"
-
-static void qat_comp_build_deflate_ctx(void *ctx)
-{
- struct icp_qat_fw_comp_req *req_tmpl = (struct icp_qat_fw_comp_req *)ctx;
- struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
- struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
- struct icp_qat_fw_comp_req_params *req_pars = &req_tmpl->comp_pars;
- struct icp_qat_fw_comp_cd_hdr *comp_cd_ctrl = &req_tmpl->comp_cd_ctrl;
-
- memset(req_tmpl, 0, sizeof(*req_tmpl));
- header->hdr_flags =
- ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
- header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_COMP;
- header->service_cmd_id = ICP_QAT_FW_COMP_CMD_STATIC;
- header->comn_req_flags =
- ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_16BYTE_DATA,
- QAT_COMN_PTR_TYPE_SGL);
- header->serv_specif_flags =
- ICP_QAT_FW_COMP_FLAGS_BUILD(ICP_QAT_FW_COMP_STATELESS_SESSION,
- ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST,
- ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST,
- ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST,
- ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF);
- cd_pars->u.sl.comp_slice_cfg_word[0] =
- ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(ICP_QAT_HW_COMPRESSION_DIR_COMPRESS,
- ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED,
- ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE,
- ICP_QAT_HW_COMPRESSION_DEPTH_1,
- ICP_QAT_HW_COMPRESSION_FILE_TYPE_0);
- req_pars->crc.legacy.initial_adler = COMP_CPR_INITIAL_ADLER;
- req_pars->crc.legacy.initial_crc32 = COMP_CPR_INITIAL_CRC;
- req_pars->req_par_flags =
- ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(ICP_QAT_FW_COMP_SOP,
- ICP_QAT_FW_COMP_EOP,
- ICP_QAT_FW_COMP_BFINAL,
- ICP_QAT_FW_COMP_CNV,
- ICP_QAT_FW_COMP_CNV_RECOVERY,
- ICP_QAT_FW_COMP_NO_CNV_DFX,
- ICP_QAT_FW_COMP_CRC_MODE_LEGACY,
- ICP_QAT_FW_COMP_NO_XXHASH_ACC,
- ICP_QAT_FW_COMP_CNV_ERROR_NONE,
- ICP_QAT_FW_COMP_NO_APPEND_CRC,
- ICP_QAT_FW_COMP_NO_DROP_DATA);
- ICP_QAT_FW_COMN_NEXT_ID_SET(comp_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
- ICP_QAT_FW_COMN_CURR_ID_SET(comp_cd_ctrl, ICP_QAT_FW_SLICE_COMP);
-
- /* Fill second half of the template for decompression */
- memcpy(req_tmpl + 1, req_tmpl, sizeof(*req_tmpl));
- req_tmpl++;
- header = &req_tmpl->comn_hdr;
- header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS;
- cd_pars = &req_tmpl->cd_pars;
- cd_pars->u.sl.comp_slice_cfg_word[0] =
- ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(ICP_QAT_HW_COMPRESSION_DIR_DECOMPRESS,
- ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED,
- ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE,
- ICP_QAT_HW_COMPRESSION_DEPTH_1,
- ICP_QAT_HW_COMPRESSION_FILE_TYPE_0);
-}
-
-void adf_gen2_init_dc_ops(struct adf_dc_ops *dc_ops)
-{
- dc_ops->build_deflate_ctx = qat_comp_build_deflate_ctx;
-}
-EXPORT_SYMBOL_GPL(adf_gen2_init_dc_ops);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_dc.h b/drivers/crypto/intel/qat/qat_common/adf_gen2_dc.h
deleted file mode 100644
index 6eae023354d7..000000000000
--- a/drivers/crypto/intel/qat/qat_common/adf_gen2_dc.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright(c) 2022 Intel Corporation */
-#ifndef ADF_GEN2_DC_H
-#define ADF_GEN2_DC_H
-
-#include "adf_accel_devices.h"
-
-void adf_gen2_init_dc_ops(struct adf_dc_ops *dc_ops);
-
-#endif /* ADF_GEN2_DC_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c
index 1f64bf49b221..6a505e9a5cf9 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c
@@ -1,7 +1,9 @@
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2020 Intel Corporation */
#include "adf_common_drv.h"
+#include "adf_dc.h"
#include "adf_gen2_hw_data.h"
+#include "icp_qat_fw_comp.h"
#include "icp_qat_hw.h"
#include <linux/pci.h>
@@ -115,8 +117,8 @@ u32 adf_gen2_get_accel_cap(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct pci_dev *pdev = accel_dev->accel_pci_dev.pci_dev;
+ u32 fuses = hw_data->fuses[ADF_FUSECTL0];
u32 straps = hw_data->straps;
- u32 fuses = hw_data->fuses;
u32 legfuses;
u32 capabilities = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
@@ -169,3 +171,58 @@ void adf_gen2_set_ssm_wdtimer(struct adf_accel_dev *accel_dev)
}
}
EXPORT_SYMBOL_GPL(adf_gen2_set_ssm_wdtimer);
+
+static int adf_gen2_build_comp_block(void *ctx, enum adf_dc_algo algo)
+{
+ struct icp_qat_fw_comp_req *req_tmpl = ctx;
+ struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
+ struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+
+ switch (algo) {
+ case QAT_DEFLATE:
+ header->service_cmd_id = ICP_QAT_FW_COMP_CMD_STATIC;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ cd_pars->u.sl.comp_slice_cfg_word[0] =
+ ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(ICP_QAT_HW_COMPRESSION_DIR_COMPRESS,
+ ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED,
+ ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE,
+ ICP_QAT_HW_COMPRESSION_DEPTH_1,
+ ICP_QAT_HW_COMPRESSION_FILE_TYPE_0);
+
+ return 0;
+}
+
+static int adf_gen2_build_decomp_block(void *ctx, enum adf_dc_algo algo)
+{
+ struct icp_qat_fw_comp_req *req_tmpl = ctx;
+ struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
+ struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+
+ switch (algo) {
+ case QAT_DEFLATE:
+ header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ cd_pars->u.sl.comp_slice_cfg_word[0] =
+ ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(ICP_QAT_HW_COMPRESSION_DIR_DECOMPRESS,
+ ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED,
+ ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE,
+ ICP_QAT_HW_COMPRESSION_DEPTH_1,
+ ICP_QAT_HW_COMPRESSION_FILE_TYPE_0);
+
+ return 0;
+}
+
+void adf_gen2_init_dc_ops(struct adf_dc_ops *dc_ops)
+{
+ dc_ops->build_comp_block = adf_gen2_build_comp_block;
+ dc_ops->build_decomp_block = adf_gen2_build_decomp_block;
+}
+EXPORT_SYMBOL_GPL(adf_gen2_init_dc_ops);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h
index 708e9186127b..59bad368a921 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h
@@ -88,5 +88,6 @@ void adf_gen2_get_arb_info(struct arb_info *arb_info);
void adf_gen2_enable_ints(struct adf_accel_dev *accel_dev);
u32 adf_gen2_get_accel_cap(struct adf_accel_dev *accel_dev);
void adf_gen2_set_ssm_wdtimer(struct adf_accel_dev *accel_dev);
+void adf_gen2_init_dc_ops(struct adf_dc_ops *dc_ops);
#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.h b/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.h
index a716545a764c..34a63cf40db2 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.h
@@ -5,6 +5,7 @@
#include <linux/types.h>
#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
#define ADF_GEN2_ERRSOU3 (0x3A000 + 0x0C)
#define ADF_GEN2_ERRSOU5 (0x3A000 + 0xD8)
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_config.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_config.c
index fe1f3d727dc5..afcdfdd0a37a 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_config.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_config.c
@@ -11,7 +11,7 @@
#include "qat_compression.h"
#include "qat_crypto.h"
-static int adf_crypto_dev_config(struct adf_accel_dev *accel_dev)
+int adf_crypto_dev_config(struct adf_accel_dev *accel_dev)
{
char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
int banks = GET_MAX_BANKS(accel_dev);
@@ -117,7 +117,7 @@ err:
return ret;
}
-static int adf_comp_dev_config(struct adf_accel_dev *accel_dev)
+int adf_comp_dev_config(struct adf_accel_dev *accel_dev)
{
char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
int banks = GET_MAX_BANKS(accel_dev);
@@ -187,7 +187,7 @@ err:
return ret;
}
-static int adf_no_dev_config(struct adf_accel_dev *accel_dev)
+int adf_no_dev_config(struct adf_accel_dev *accel_dev)
{
unsigned long val;
int ret;
@@ -213,7 +213,6 @@ static int adf_no_dev_config(struct adf_accel_dev *accel_dev)
*/
int adf_gen4_dev_config(struct adf_accel_dev *accel_dev)
{
- char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
int ret;
ret = adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC);
@@ -224,18 +223,8 @@ int adf_gen4_dev_config(struct adf_accel_dev *accel_dev)
if (ret)
goto err;
- ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
- ADF_SERVICES_ENABLED, services);
- if (ret)
- goto err;
-
- ret = sysfs_match_string(adf_cfg_services, services);
- if (ret < 0)
- goto err;
-
- switch (ret) {
- case SVC_CY:
- case SVC_CY2:
+ switch (adf_get_service_enabled(accel_dev)) {
+ case SVC_SYM_ASYM:
ret = adf_crypto_dev_config(accel_dev);
break;
case SVC_DC:
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_config.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_config.h
index bb87655f69a8..38a674c27e40 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_config.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_config.h
@@ -7,5 +7,8 @@
int adf_gen4_dev_config(struct adf_accel_dev *accel_dev);
int adf_gen4_cfg_dev_init(struct adf_accel_dev *accel_dev);
+int adf_crypto_dev_config(struct adf_accel_dev *accel_dev);
+int adf_comp_dev_config(struct adf_accel_dev *accel_dev);
+int adf_no_dev_config(struct adf_accel_dev *accel_dev);
#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_dc.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_dc.c
deleted file mode 100644
index 5859238e37de..000000000000
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_dc.c
+++ /dev/null
@@ -1,83 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright(c) 2022 Intel Corporation */
-#include "adf_accel_devices.h"
-#include "icp_qat_fw_comp.h"
-#include "icp_qat_hw_20_comp.h"
-#include "adf_gen4_dc.h"
-
-static void qat_comp_build_deflate(void *ctx)
-{
- struct icp_qat_fw_comp_req *req_tmpl =
- (struct icp_qat_fw_comp_req *)ctx;
- struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
- struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
- struct icp_qat_fw_comp_req_params *req_pars = &req_tmpl->comp_pars;
- struct icp_qat_hw_comp_20_config_csr_upper hw_comp_upper_csr = {0};
- struct icp_qat_hw_comp_20_config_csr_lower hw_comp_lower_csr = {0};
- struct icp_qat_hw_decomp_20_config_csr_lower hw_decomp_lower_csr = {0};
- u32 upper_val;
- u32 lower_val;
-
- memset(req_tmpl, 0, sizeof(*req_tmpl));
- header->hdr_flags =
- ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
- header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_COMP;
- header->service_cmd_id = ICP_QAT_FW_COMP_CMD_STATIC;
- header->comn_req_flags =
- ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_16BYTE_DATA,
- QAT_COMN_PTR_TYPE_SGL);
- header->serv_specif_flags =
- ICP_QAT_FW_COMP_FLAGS_BUILD(ICP_QAT_FW_COMP_STATELESS_SESSION,
- ICP_QAT_FW_COMP_AUTO_SELECT_BEST,
- ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST,
- ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST,
- ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF);
- hw_comp_lower_csr.skip_ctrl = ICP_QAT_HW_COMP_20_BYTE_SKIP_3BYTE_LITERAL;
- hw_comp_lower_csr.algo = ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_ILZ77;
- hw_comp_lower_csr.lllbd = ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_ENABLED;
- hw_comp_lower_csr.sd = ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_1;
- hw_comp_lower_csr.hash_update = ICP_QAT_HW_COMP_20_SKIP_HASH_UPDATE_DONT_ALLOW;
- hw_comp_lower_csr.edmm = ICP_QAT_HW_COMP_20_EXTENDED_DELAY_MATCH_MODE_EDMM_ENABLED;
- hw_comp_upper_csr.nice = ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_DEFAULT_VAL;
- hw_comp_upper_csr.lazy = ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_DEFAULT_VAL;
-
- upper_val = ICP_QAT_FW_COMP_20_BUILD_CONFIG_UPPER(hw_comp_upper_csr);
- lower_val = ICP_QAT_FW_COMP_20_BUILD_CONFIG_LOWER(hw_comp_lower_csr);
-
- cd_pars->u.sl.comp_slice_cfg_word[0] = lower_val;
- cd_pars->u.sl.comp_slice_cfg_word[1] = upper_val;
-
- req_pars->crc.legacy.initial_adler = COMP_CPR_INITIAL_ADLER;
- req_pars->crc.legacy.initial_crc32 = COMP_CPR_INITIAL_CRC;
- req_pars->req_par_flags =
- ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(ICP_QAT_FW_COMP_SOP,
- ICP_QAT_FW_COMP_EOP,
- ICP_QAT_FW_COMP_BFINAL,
- ICP_QAT_FW_COMP_CNV,
- ICP_QAT_FW_COMP_CNV_RECOVERY,
- ICP_QAT_FW_COMP_NO_CNV_DFX,
- ICP_QAT_FW_COMP_CRC_MODE_LEGACY,
- ICP_QAT_FW_COMP_NO_XXHASH_ACC,
- ICP_QAT_FW_COMP_CNV_ERROR_NONE,
- ICP_QAT_FW_COMP_NO_APPEND_CRC,
- ICP_QAT_FW_COMP_NO_DROP_DATA);
-
- /* Fill second half of the template for decompression */
- memcpy(req_tmpl + 1, req_tmpl, sizeof(*req_tmpl));
- req_tmpl++;
- header = &req_tmpl->comn_hdr;
- header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS;
- cd_pars = &req_tmpl->cd_pars;
-
- hw_decomp_lower_csr.algo = ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_DEFLATE;
- lower_val = ICP_QAT_FW_DECOMP_20_BUILD_CONFIG_LOWER(hw_decomp_lower_csr);
-
- cd_pars->u.sl.comp_slice_cfg_word[0] = lower_val;
- cd_pars->u.sl.comp_slice_cfg_word[1] = 0;
-}
-
-void adf_gen4_init_dc_ops(struct adf_dc_ops *dc_ops)
-{
- dc_ops->build_deflate_ctx = qat_comp_build_deflate;
-}
-EXPORT_SYMBOL_GPL(adf_gen4_init_dc_ops);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_dc.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_dc.h
deleted file mode 100644
index 0b1a6774412e..000000000000
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_dc.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright(c) 2022 Intel Corporation */
-#ifndef ADF_GEN4_DC_H
-#define ADF_GEN4_DC_H
-
-#include "adf_accel_devices.h"
-
-void adf_gen4_init_dc_ops(struct adf_dc_ops *dc_ops);
-
-#endif /* ADF_GEN4_DC_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
index 41a0979e68c1..349fdb323763 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
@@ -1,5 +1,9 @@
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2020 Intel Corporation */
+
+#define pr_fmt(fmt) "QAT: " fmt
+
+#include <linux/bitops.h>
#include <linux/iopoll.h>
#include <asm/div64.h>
#include "adf_accel_devices.h"
@@ -8,6 +12,8 @@
#include "adf_fw_config.h"
#include "adf_gen4_hw_data.h"
#include "adf_gen4_pm.h"
+#include "icp_qat_fw_comp.h"
+#include "icp_qat_hw_20_comp.h"
u32 adf_gen4_get_accel_mask(struct adf_hw_device_data *self)
{
@@ -134,36 +140,18 @@ int adf_gen4_init_device(struct adf_accel_dev *accel_dev)
}
EXPORT_SYMBOL_GPL(adf_gen4_init_device);
-static inline void adf_gen4_unpack_ssm_wdtimer(u64 value, u32 *upper,
- u32 *lower)
-{
- *lower = lower_32_bits(value);
- *upper = upper_32_bits(value);
-}
-
void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev)
{
void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
u64 timer_val_pke = ADF_SSM_WDT_PKE_DEFAULT_VALUE;
u64 timer_val = ADF_SSM_WDT_DEFAULT_VALUE;
- u32 ssm_wdt_pke_high = 0;
- u32 ssm_wdt_pke_low = 0;
- u32 ssm_wdt_high = 0;
- u32 ssm_wdt_low = 0;
- /* Convert 64bit WDT timer value into 32bit values for
- * mmio write to 32bit CSRs.
- */
- adf_gen4_unpack_ssm_wdtimer(timer_val, &ssm_wdt_high, &ssm_wdt_low);
- adf_gen4_unpack_ssm_wdtimer(timer_val_pke, &ssm_wdt_pke_high,
- &ssm_wdt_pke_low);
-
- /* Enable WDT for sym and dc */
- ADF_CSR_WR(pmisc_addr, ADF_SSMWDTL_OFFSET, ssm_wdt_low);
- ADF_CSR_WR(pmisc_addr, ADF_SSMWDTH_OFFSET, ssm_wdt_high);
- /* Enable WDT for pke */
- ADF_CSR_WR(pmisc_addr, ADF_SSMWDTPKEL_OFFSET, ssm_wdt_pke_low);
- ADF_CSR_WR(pmisc_addr, ADF_SSMWDTPKEH_OFFSET, ssm_wdt_pke_high);
+ /* Enable watchdog timer for sym and dc */
+ ADF_CSR_WR64_LO_HI(pmisc_addr, ADF_SSMWDTL_OFFSET, ADF_SSMWDTH_OFFSET, timer_val);
+
+ /* Enable watchdog timer for pke */
+ ADF_CSR_WR64_LO_HI(pmisc_addr, ADF_SSMWDTPKEL_OFFSET, ADF_SSMWDTPKEH_OFFSET,
+ timer_val_pke);
}
EXPORT_SYMBOL_GPL(adf_gen4_set_ssm_wdtimer);
@@ -265,18 +253,32 @@ static bool is_single_service(int service_id)
case SVC_SYM:
case SVC_ASYM:
return true;
- case SVC_CY:
- case SVC_CY2:
- case SVC_DCC:
- case SVC_ASYM_DC:
- case SVC_DC_ASYM:
- case SVC_SYM_DC:
- case SVC_DC_SYM:
default:
return false;
}
}
+bool adf_gen4_services_supported(unsigned long mask)
+{
+ unsigned long num_svc = hweight_long(mask);
+
+ if (mask >= BIT(SVC_COUNT))
+ return false;
+
+ if (test_bit(SVC_DECOMP, &mask))
+ return false;
+
+ switch (num_svc) {
+ case ADF_ONE_SERVICE:
+ return true;
+ case ADF_TWO_SERVICES:
+ return !test_bit(SVC_DCC, &mask);
+ default:
+ return false;
+ }
+}
+EXPORT_SYMBOL_GPL(adf_gen4_services_supported);
+
int adf_gen4_init_thd2arb_map(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
@@ -489,183 +491,110 @@ int adf_gen4_bank_drain_start(struct adf_accel_dev *accel_dev,
return ret;
}
-static void bank_state_save(struct adf_hw_csr_ops *ops, void __iomem *base,
- u32 bank, struct bank_state *state, u32 num_rings)
+static int adf_gen4_build_comp_block(void *ctx, enum adf_dc_algo algo)
{
- u32 i;
-
- state->ringstat0 = ops->read_csr_stat(base, bank);
- state->ringuostat = ops->read_csr_uo_stat(base, bank);
- state->ringestat = ops->read_csr_e_stat(base, bank);
- state->ringnestat = ops->read_csr_ne_stat(base, bank);
- state->ringnfstat = ops->read_csr_nf_stat(base, bank);
- state->ringfstat = ops->read_csr_f_stat(base, bank);
- state->ringcstat0 = ops->read_csr_c_stat(base, bank);
- state->iaintflagen = ops->read_csr_int_en(base, bank);
- state->iaintflagreg = ops->read_csr_int_flag(base, bank);
- state->iaintflagsrcsel0 = ops->read_csr_int_srcsel(base, bank);
- state->iaintcolen = ops->read_csr_int_col_en(base, bank);
- state->iaintcolctl = ops->read_csr_int_col_ctl(base, bank);
- state->iaintflagandcolen = ops->read_csr_int_flag_and_col(base, bank);
- state->ringexpstat = ops->read_csr_exp_stat(base, bank);
- state->ringexpintenable = ops->read_csr_exp_int_en(base, bank);
- state->ringsrvarben = ops->read_csr_ring_srv_arb_en(base, bank);
-
- for (i = 0; i < num_rings; i++) {
- state->rings[i].head = ops->read_csr_ring_head(base, bank, i);
- state->rings[i].tail = ops->read_csr_ring_tail(base, bank, i);
- state->rings[i].config = ops->read_csr_ring_config(base, bank, i);
- state->rings[i].base = ops->read_csr_ring_base(base, bank, i);
- }
-}
-
-#define CHECK_STAT(op, expect_val, name, args...) \
-({ \
- u32 __expect_val = (expect_val); \
- u32 actual_val = op(args); \
- (__expect_val == actual_val) ? 0 : \
- (pr_err("QAT: Fail to restore %s register. Expected 0x%x, actual 0x%x\n", \
- name, __expect_val, actual_val), -EINVAL); \
-})
-
-static int bank_state_restore(struct adf_hw_csr_ops *ops, void __iomem *base,
- u32 bank, struct bank_state *state, u32 num_rings,
- int tx_rx_gap)
-{
- u32 val, tmp_val, i;
- int ret;
-
- for (i = 0; i < num_rings; i++)
- ops->write_csr_ring_base(base, bank, i, state->rings[i].base);
-
- for (i = 0; i < num_rings; i++)
- ops->write_csr_ring_config(base, bank, i, state->rings[i].config);
-
- for (i = 0; i < num_rings / 2; i++) {
- int tx = i * (tx_rx_gap + 1);
- int rx = tx + tx_rx_gap;
-
- ops->write_csr_ring_head(base, bank, tx, state->rings[tx].head);
- ops->write_csr_ring_tail(base, bank, tx, state->rings[tx].tail);
-
- /*
- * The TX ring head needs to be updated again to make sure that
- * the HW will not consider the ring as full when it is empty
- * and the correct state flags are set to match the recovered state.
- */
- if (state->ringestat & BIT(tx)) {
- val = ops->read_csr_int_srcsel(base, bank);
- val |= ADF_RP_INT_SRC_SEL_F_RISE_MASK;
- ops->write_csr_int_srcsel_w_val(base, bank, val);
- ops->write_csr_ring_head(base, bank, tx, state->rings[tx].head);
- }
-
- ops->write_csr_ring_tail(base, bank, rx, state->rings[rx].tail);
- val = ops->read_csr_int_srcsel(base, bank);
- val |= ADF_RP_INT_SRC_SEL_F_RISE_MASK << ADF_RP_INT_SRC_SEL_RANGE_WIDTH;
- ops->write_csr_int_srcsel_w_val(base, bank, val);
-
- ops->write_csr_ring_head(base, bank, rx, state->rings[rx].head);
- val = ops->read_csr_int_srcsel(base, bank);
- val |= ADF_RP_INT_SRC_SEL_F_FALL_MASK << ADF_RP_INT_SRC_SEL_RANGE_WIDTH;
- ops->write_csr_int_srcsel_w_val(base, bank, val);
-
- /*
- * The RX ring tail needs to be updated again to make sure that
- * the HW will not consider the ring as empty when it is full
- * and the correct state flags are set to match the recovered state.
- */
- if (state->ringfstat & BIT(rx))
- ops->write_csr_ring_tail(base, bank, rx, state->rings[rx].tail);
+ struct icp_qat_fw_comp_req *req_tmpl = ctx;
+ struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
+ struct icp_qat_hw_comp_20_config_csr_upper hw_comp_upper_csr = { };
+ struct icp_qat_hw_comp_20_config_csr_lower hw_comp_lower_csr = { };
+ struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+ u32 upper_val;
+ u32 lower_val;
+
+ switch (algo) {
+ case QAT_DEFLATE:
+ header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DYNAMIC;
+ break;
+ default:
+ return -EINVAL;
}
- ops->write_csr_int_flag_and_col(base, bank, state->iaintflagandcolen);
- ops->write_csr_int_en(base, bank, state->iaintflagen);
- ops->write_csr_int_col_en(base, bank, state->iaintcolen);
- ops->write_csr_int_srcsel_w_val(base, bank, state->iaintflagsrcsel0);
- ops->write_csr_exp_int_en(base, bank, state->ringexpintenable);
- ops->write_csr_int_col_ctl(base, bank, state->iaintcolctl);
- ops->write_csr_ring_srv_arb_en(base, bank, state->ringsrvarben);
-
- /* Check that all ring statuses match the saved state. */
- ret = CHECK_STAT(ops->read_csr_stat, state->ringstat0, "ringstat",
- base, bank);
- if (ret)
- return ret;
-
- ret = CHECK_STAT(ops->read_csr_e_stat, state->ringestat, "ringestat",
- base, bank);
- if (ret)
- return ret;
+ hw_comp_lower_csr.skip_ctrl = ICP_QAT_HW_COMP_20_BYTE_SKIP_3BYTE_LITERAL;
+ hw_comp_lower_csr.algo = ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_ILZ77;
+ hw_comp_lower_csr.lllbd = ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_ENABLED;
+ hw_comp_lower_csr.sd = ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_1;
+ hw_comp_lower_csr.hash_update = ICP_QAT_HW_COMP_20_SKIP_HASH_UPDATE_DONT_ALLOW;
+ hw_comp_lower_csr.edmm = ICP_QAT_HW_COMP_20_EXTENDED_DELAY_MATCH_MODE_EDMM_ENABLED;
+ hw_comp_upper_csr.nice = ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_DEFAULT_VAL;
+ hw_comp_upper_csr.lazy = ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_DEFAULT_VAL;
- ret = CHECK_STAT(ops->read_csr_ne_stat, state->ringnestat, "ringnestat",
- base, bank);
- if (ret)
- return ret;
+ upper_val = ICP_QAT_FW_COMP_20_BUILD_CONFIG_UPPER(hw_comp_upper_csr);
+ lower_val = ICP_QAT_FW_COMP_20_BUILD_CONFIG_LOWER(hw_comp_lower_csr);
- ret = CHECK_STAT(ops->read_csr_nf_stat, state->ringnfstat, "ringnfstat",
- base, bank);
- if (ret)
- return ret;
-
- ret = CHECK_STAT(ops->read_csr_f_stat, state->ringfstat, "ringfstat",
- base, bank);
- if (ret)
- return ret;
-
- ret = CHECK_STAT(ops->read_csr_c_stat, state->ringcstat0, "ringcstat",
- base, bank);
- if (ret)
- return ret;
-
- tmp_val = ops->read_csr_exp_stat(base, bank);
- val = state->ringexpstat;
- if (tmp_val && !val) {
- pr_err("QAT: Bank was restored with exception: 0x%x\n", val);
- return -EINVAL;
- }
+ cd_pars->u.sl.comp_slice_cfg_word[0] = lower_val;
+ cd_pars->u.sl.comp_slice_cfg_word[1] = upper_val;
return 0;
}
-int adf_gen4_bank_state_save(struct adf_accel_dev *accel_dev, u32 bank_number,
- struct bank_state *state)
+static int adf_gen4_build_decomp_block(void *ctx, enum adf_dc_algo algo)
{
- struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
- struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
- void __iomem *csr_base = adf_get_etr_base(accel_dev);
-
- if (bank_number >= hw_data->num_banks || !state)
+ struct icp_qat_fw_comp_req *req_tmpl = ctx;
+ struct icp_qat_hw_decomp_20_config_csr_lower hw_decomp_lower_csr = { };
+ struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
+ struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+ u32 lower_val;
+
+ switch (algo) {
+ case QAT_DEFLATE:
+ header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS;
+ break;
+ default:
return -EINVAL;
+ }
- dev_dbg(&GET_DEV(accel_dev), "Saving state of bank %d\n", bank_number);
+ hw_decomp_lower_csr.algo = ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_DEFLATE;
+ lower_val = ICP_QAT_FW_DECOMP_20_BUILD_CONFIG_LOWER(hw_decomp_lower_csr);
- bank_state_save(csr_ops, csr_base, bank_number, state,
- hw_data->num_rings_per_bank);
+ cd_pars->u.sl.comp_slice_cfg_word[0] = lower_val;
+ cd_pars->u.sl.comp_slice_cfg_word[1] = 0;
return 0;
}
-EXPORT_SYMBOL_GPL(adf_gen4_bank_state_save);
-int adf_gen4_bank_state_restore(struct adf_accel_dev *accel_dev, u32 bank_number,
- struct bank_state *state)
+void adf_gen4_init_dc_ops(struct adf_dc_ops *dc_ops)
{
- struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
- struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
- void __iomem *csr_base = adf_get_etr_base(accel_dev);
- int ret;
+ dc_ops->build_comp_block = adf_gen4_build_comp_block;
+ dc_ops->build_decomp_block = adf_gen4_build_decomp_block;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_init_dc_ops);
- if (bank_number >= hw_data->num_banks || !state)
- return -EINVAL;
+void adf_gen4_init_num_svc_aes(struct adf_rl_hw_data *device_data)
+{
+ struct adf_hw_device_data *hw_data;
+ unsigned int i;
+ u32 ae_cnt;
- dev_dbg(&GET_DEV(accel_dev), "Restoring state of bank %d\n", bank_number);
+ hw_data = container_of(device_data, struct adf_hw_device_data, rl_data);
+ ae_cnt = hweight32(hw_data->get_ae_mask(hw_data));
+ if (!ae_cnt)
+ return;
- ret = bank_state_restore(csr_ops, csr_base, bank_number, state,
- hw_data->num_rings_per_bank, hw_data->tx_rx_gap);
- if (ret)
- dev_err(&GET_DEV(accel_dev),
- "Unable to restore state of bank %d\n", bank_number);
+ for (i = 0; i < SVC_BASE_COUNT; i++)
+ device_data->svc_ae_mask[i] = ae_cnt - 1;
- return ret;
+ /*
+ * The decompression service is not supported on QAT GEN4 devices.
+ * Therefore, set svc_ae_mask to 0.
+ */
+ device_data->svc_ae_mask[SVC_DECOMP] = 0;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_init_num_svc_aes);
+
+u32 adf_gen4_get_svc_slice_cnt(struct adf_accel_dev *accel_dev,
+ enum adf_base_services svc)
+{
+ struct adf_rl_hw_data *device_data = &accel_dev->hw_device->rl_data;
+
+ switch (svc) {
+ case SVC_SYM:
+ return device_data->slices.cph_cnt;
+ case SVC_ASYM:
+ return device_data->slices.pke_cnt;
+ case SVC_DC:
+ return device_data->slices.dcpr_cnt;
+ default:
+ return 0;
+ }
}
-EXPORT_SYMBOL_GPL(adf_gen4_bank_state_restore);
+EXPORT_SYMBOL_GPL(adf_gen4_get_svc_slice_cnt);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
index e8c53bd76f1b..cd26b6724c43 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
@@ -7,6 +7,7 @@
#include "adf_accel_devices.h"
#include "adf_cfg_common.h"
+#include "adf_dc.h"
/* PCIe configuration space */
#define ADF_GEN4_BAR_MASK (BIT(0) | BIT(2) | BIT(4))
@@ -83,9 +84,6 @@
#define ADF_WQM_CSR_RPRESETSTS(bank) (ADF_WQM_CSR_RPRESETCTL(bank) + 4)
/* Ring interrupt */
-#define ADF_RP_INT_SRC_SEL_F_RISE_MASK GENMASK(1, 0)
-#define ADF_RP_INT_SRC_SEL_F_FALL_MASK GENMASK(2, 0)
-#define ADF_RP_INT_SRC_SEL_RANGE_WIDTH 4
#define ADF_COALESCED_POLL_TIMEOUT_US (1 * USEC_PER_SEC)
#define ADF_COALESCED_POLL_DELAY_US 1000
#define ADF_WQM_CSR_RPINTSOU(bank) (0x200000 + ((bank) << 12))
@@ -175,9 +173,10 @@ int adf_gen4_bank_drain_start(struct adf_accel_dev *accel_dev,
u32 bank_number, int timeout_us);
void adf_gen4_bank_drain_finish(struct adf_accel_dev *accel_dev,
u32 bank_number);
-int adf_gen4_bank_state_save(struct adf_accel_dev *accel_dev, u32 bank_number,
- struct bank_state *state);
-int adf_gen4_bank_state_restore(struct adf_accel_dev *accel_dev,
- u32 bank_number, struct bank_state *state);
+bool adf_gen4_services_supported(unsigned long service_mask);
+void adf_gen4_init_dc_ops(struct adf_dc_ops *dc_ops);
+void adf_gen4_init_num_svc_aes(struct adf_rl_hw_data *device_data);
+u32 adf_gen4_get_svc_slice_cnt(struct adf_accel_dev *accel_dev,
+ enum adf_base_services svc);
#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.h
index 17d1b774d4a8..2c8708117f70 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.h
@@ -4,6 +4,7 @@
#define ADF_GEN4_PFVF_H
#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
#ifdef CONFIG_PCI_IOV
void adf_gen4_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c
index ee0b5079de3e..b7e38842a46d 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c
@@ -1,54 +1,25 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2023 Intel Corporation */
#include <linux/dma-mapping.h>
-#include <linux/kernel.h>
#include <linux/string_helpers.h>
-#include <linux/stringify.h>
#include "adf_accel_devices.h"
#include "adf_admin.h"
#include "adf_common_drv.h"
#include "adf_gen4_pm.h"
+#include "adf_pm_dbgfs_utils.h"
#include "icp_qat_fw_init_admin.h"
-/*
- * This is needed because a variable is used to index the mask at
- * pm_scnprint_table(), making it not compile time constant, so the compile
- * asserts from FIELD_GET() or u32_get_bits() won't be fulfilled.
- */
-#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
-
-#define PM_INFO_MEMBER_OFF(member) \
- (offsetof(struct icp_qat_fw_init_admin_pm_info, member) / sizeof(u32))
-
-#define PM_INFO_REGSET_ENTRY_MASK(_reg_, _field_, _mask_) \
-{ \
- .reg_offset = PM_INFO_MEMBER_OFF(_reg_), \
- .key = __stringify(_field_), \
- .field_mask = _mask_, \
-}
-
-#define PM_INFO_REGSET_ENTRY32(_reg_, _field_) \
- PM_INFO_REGSET_ENTRY_MASK(_reg_, _field_, GENMASK(31, 0))
-
#define PM_INFO_REGSET_ENTRY(_reg_, _field_) \
PM_INFO_REGSET_ENTRY_MASK(_reg_, _field_, ADF_GEN4_PM_##_field_##_MASK)
-#define PM_INFO_MAX_KEY_LEN 21
-
-struct pm_status_row {
- int reg_offset;
- u32 field_mask;
- const char *key;
-};
-
-static struct pm_status_row pm_fuse_rows[] = {
+static const struct pm_status_row pm_fuse_rows[] = {
PM_INFO_REGSET_ENTRY(fusectl0, ENABLE_PM),
PM_INFO_REGSET_ENTRY(fusectl0, ENABLE_PM_IDLE),
PM_INFO_REGSET_ENTRY(fusectl0, ENABLE_DEEP_PM_IDLE),
};
-static struct pm_status_row pm_info_rows[] = {
+static const struct pm_status_row pm_info_rows[] = {
PM_INFO_REGSET_ENTRY(pm.status, CPM_PM_STATE),
PM_INFO_REGSET_ENTRY(pm.status, PENDING_WP),
PM_INFO_REGSET_ENTRY(pm.status, CURRENT_WP),
@@ -59,7 +30,7 @@ static struct pm_status_row pm_info_rows[] = {
PM_INFO_REGSET_ENTRY(pm.main, THR_VALUE),
};
-static struct pm_status_row pm_ssm_rows[] = {
+static const struct pm_status_row pm_ssm_rows[] = {
PM_INFO_REGSET_ENTRY(ssm.pm_enable, SSM_PM_ENABLE),
PM_INFO_REGSET_ENTRY32(ssm.active_constraint, ACTIVE_CONSTRAINT),
PM_INFO_REGSET_ENTRY(ssm.pm_domain_status, DOMAIN_POWER_GATED),
@@ -83,7 +54,7 @@ static struct pm_status_row pm_ssm_rows[] = {
PM_INFO_REGSET_ENTRY(ssm.pm_managed_status, WCP_MANAGED_COUNT),
};
-static struct pm_status_row pm_log_rows[] = {
+static const struct pm_status_row pm_log_rows[] = {
PM_INFO_REGSET_ENTRY32(event_counters.host_msg, HOST_MSG_EVENT_COUNT),
PM_INFO_REGSET_ENTRY32(event_counters.sys_pm, SYS_PM_EVENT_COUNT),
PM_INFO_REGSET_ENTRY32(event_counters.local_ssm, SSM_EVENT_COUNT),
@@ -91,7 +62,7 @@ static struct pm_status_row pm_log_rows[] = {
PM_INFO_REGSET_ENTRY32(event_counters.unknown, UNKNOWN_EVENT_COUNT),
};
-static struct pm_status_row pm_event_rows[ICP_QAT_NUMBER_OF_PM_EVENTS] = {
+static const struct pm_status_row pm_event_rows[ICP_QAT_NUMBER_OF_PM_EVENTS] = {
PM_INFO_REGSET_ENTRY32(event_log[0], EVENT0),
PM_INFO_REGSET_ENTRY32(event_log[1], EVENT1),
PM_INFO_REGSET_ENTRY32(event_log[2], EVENT2),
@@ -102,51 +73,13 @@ static struct pm_status_row pm_event_rows[ICP_QAT_NUMBER_OF_PM_EVENTS] = {
PM_INFO_REGSET_ENTRY32(event_log[7], EVENT7),
};
-static struct pm_status_row pm_csrs_rows[] = {
+static const struct pm_status_row pm_csrs_rows[] = {
PM_INFO_REGSET_ENTRY32(pm.fw_init, CPM_PM_FW_INIT),
PM_INFO_REGSET_ENTRY32(pm.status, CPM_PM_STATUS),
PM_INFO_REGSET_ENTRY32(pm.main, CPM_PM_MASTER_FW),
PM_INFO_REGSET_ENTRY32(pm.pwrreq, CPM_PM_PWRREQ),
};
-static int pm_scnprint_table(char *buff, struct pm_status_row *table,
- u32 *pm_info_regs, size_t buff_size, int table_len,
- bool lowercase)
-{
- char key[PM_INFO_MAX_KEY_LEN];
- int wr = 0;
- int i;
-
- for (i = 0; i < table_len; i++) {
- if (lowercase)
- string_lower(key, table[i].key);
- else
- string_upper(key, table[i].key);
-
- wr += scnprintf(&buff[wr], buff_size - wr, "%s: %#x\n", key,
- field_get(table[i].field_mask,
- pm_info_regs[table[i].reg_offset]));
- }
-
- return wr;
-}
-
-static int pm_scnprint_table_upper_keys(char *buff, struct pm_status_row *table,
- u32 *pm_info_regs, size_t buff_size,
- int table_len)
-{
- return pm_scnprint_table(buff, table, pm_info_regs, buff_size,
- table_len, false);
-}
-
-static int pm_scnprint_table_lower_keys(char *buff, struct pm_status_row *table,
- u32 *pm_info_regs, size_t buff_size,
- int table_len)
-{
- return pm_scnprint_table(buff, table, pm_info_regs, buff_size,
- table_len, true);
-}
-
static_assert(sizeof(struct icp_qat_fw_init_admin_pm_info) < PAGE_SIZE);
static ssize_t adf_gen4_print_pm_status(struct adf_accel_dev *accel_dev,
@@ -191,9 +124,9 @@ static ssize_t adf_gen4_print_pm_status(struct adf_accel_dev *accel_dev,
/* Fusectl related */
len += scnprintf(&pm_kv[len], PAGE_SIZE - len,
"----------- PM Fuse info ---------\n");
- len += pm_scnprint_table_lower_keys(&pm_kv[len], pm_fuse_rows,
- pm_info_regs, PAGE_SIZE - len,
- ARRAY_SIZE(pm_fuse_rows));
+ len += adf_pm_scnprint_table_lower_keys(&pm_kv[len], pm_fuse_rows,
+ pm_info_regs, PAGE_SIZE - len,
+ ARRAY_SIZE(pm_fuse_rows));
len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "max_pwrreq: %#x\n",
pm_info->max_pwrreq);
len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "min_pwrreq: %#x\n",
@@ -204,28 +137,28 @@ static ssize_t adf_gen4_print_pm_status(struct adf_accel_dev *accel_dev,
"------------ PM Info ------------\n");
len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "power_level: %s\n",
pm_info->pwr_state == PM_SET_MIN ? "min" : "max");
- len += pm_scnprint_table_lower_keys(&pm_kv[len], pm_info_rows,
- pm_info_regs, PAGE_SIZE - len,
- ARRAY_SIZE(pm_info_rows));
+ len += adf_pm_scnprint_table_lower_keys(&pm_kv[len], pm_info_rows,
+ pm_info_regs, PAGE_SIZE - len,
+ ARRAY_SIZE(pm_info_rows));
len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "pm_mode: STATIC\n");
/* SSM related */
len += scnprintf(&pm_kv[len], PAGE_SIZE - len,
"----------- SSM_PM Info ----------\n");
- len += pm_scnprint_table_lower_keys(&pm_kv[len], pm_ssm_rows,
- pm_info_regs, PAGE_SIZE - len,
- ARRAY_SIZE(pm_ssm_rows));
+ len += adf_pm_scnprint_table_lower_keys(&pm_kv[len], pm_ssm_rows,
+ pm_info_regs, PAGE_SIZE - len,
+ ARRAY_SIZE(pm_ssm_rows));
/* Log related */
len += scnprintf(&pm_kv[len], PAGE_SIZE - len,
"------------- PM Log -------------\n");
- len += pm_scnprint_table_lower_keys(&pm_kv[len], pm_log_rows,
- pm_info_regs, PAGE_SIZE - len,
- ARRAY_SIZE(pm_log_rows));
+ len += adf_pm_scnprint_table_lower_keys(&pm_kv[len], pm_log_rows,
+ pm_info_regs, PAGE_SIZE - len,
+ ARRAY_SIZE(pm_log_rows));
- len += pm_scnprint_table_lower_keys(&pm_kv[len], pm_event_rows,
- pm_info_regs, PAGE_SIZE - len,
- ARRAY_SIZE(pm_event_rows));
+ len += adf_pm_scnprint_table_lower_keys(&pm_kv[len], pm_event_rows,
+ pm_info_regs, PAGE_SIZE - len,
+ ARRAY_SIZE(pm_event_rows));
len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "idle_irq_count: %#x\n",
pm->idle_irq_counters);
@@ -241,9 +174,9 @@ static ssize_t adf_gen4_print_pm_status(struct adf_accel_dev *accel_dev,
/* CSRs content */
len += scnprintf(&pm_kv[len], PAGE_SIZE - len,
"----------- HW PM CSRs -----------\n");
- len += pm_scnprint_table_upper_keys(&pm_kv[len], pm_csrs_rows,
- pm_info_regs, PAGE_SIZE - len,
- ARRAY_SIZE(pm_csrs_rows));
+ len += adf_pm_scnprint_table_upper_keys(&pm_kv[len], pm_csrs_rows,
+ pm_info_regs, PAGE_SIZE - len,
+ ARRAY_SIZE(pm_csrs_rows));
val = ADF_CSR_RD(pmisc, ADF_GEN4_PM_HOST_MSG);
len += scnprintf(&pm_kv[len], PAGE_SIZE - len,
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c
index 2dd3772bf58a..0f7f00a19e7d 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c
@@ -695,7 +695,7 @@ static bool adf_handle_slice_hang_error(struct adf_accel_dev *accel_dev,
if (err_mask->parerr_wat_wcp_mask)
adf_poll_slicehang_csr(accel_dev, csr,
ADF_GEN4_SLICEHANGSTATUS_WAT_WCP,
- "ath_cph");
+ "wat_wcp");
return false;
}
@@ -1043,63 +1043,16 @@ static bool adf_handle_ssmcpppar_err(struct adf_accel_dev *accel_dev,
return reset_required;
}
-static bool adf_handle_rf_parr_err(struct adf_accel_dev *accel_dev,
+static void adf_handle_rf_parr_err(struct adf_accel_dev *accel_dev,
void __iomem *csr, u32 iastatssm)
{
- struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev);
- u32 reg;
-
if (!(iastatssm & ADF_GEN4_IAINTSTATSSM_SSMSOFTERRORPARITY_BIT))
- return false;
-
- reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_SRC);
- reg &= ADF_GEN4_SSMSOFTERRORPARITY_SRC_BIT;
- if (reg) {
- ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
- ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_SRC, reg);
- }
-
- reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_ATH_CPH);
- reg &= err_mask->parerr_ath_cph_mask;
- if (reg) {
- ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
- ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_ATH_CPH, reg);
- }
-
- reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_CPR_XLT);
- reg &= err_mask->parerr_cpr_xlt_mask;
- if (reg) {
- ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
- ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_CPR_XLT, reg);
- }
-
- reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_DCPR_UCS);
- reg &= err_mask->parerr_dcpr_ucs_mask;
- if (reg) {
- ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
- ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_DCPR_UCS, reg);
- }
-
- reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_PKE);
- reg &= err_mask->parerr_pke_mask;
- if (reg) {
- ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
- ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_PKE, reg);
- }
-
- if (err_mask->parerr_wat_wcp_mask) {
- reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_WAT_WCP);
- reg &= err_mask->parerr_wat_wcp_mask;
- if (reg) {
- ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
- ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_WAT_WCP,
- reg);
- }
- }
+ return;
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
dev_err(&GET_DEV(accel_dev), "Slice ssm soft parity error reported");
- return false;
+ return;
}
static bool adf_handle_ser_err_ssmsh(struct adf_accel_dev *accel_dev,
@@ -1171,8 +1124,8 @@ static bool adf_handle_iaintstatssm(struct adf_accel_dev *accel_dev,
reset_required |= adf_handle_slice_hang_error(accel_dev, csr, iastatssm);
reset_required |= adf_handle_spppar_err(accel_dev, csr, iastatssm);
reset_required |= adf_handle_ssmcpppar_err(accel_dev, csr, iastatssm);
- reset_required |= adf_handle_rf_parr_err(accel_dev, csr, iastatssm);
reset_required |= adf_handle_ser_err_ssmsh(accel_dev, csr, iastatssm);
+ adf_handle_rf_parr_err(accel_dev, csr, iastatssm);
ADF_CSR_WR(csr, ADF_GEN4_IAINTSTATSSM, iastatssm);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.c
deleted file mode 100644
index 35ccb91d6ec1..000000000000
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.c
+++ /dev/null
@@ -1,71 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright(c) 2023 Intel Corporation */
-
-#include <linux/container_of.h>
-#include <linux/dev_printk.h>
-#include <linux/export.h>
-#include <linux/jiffies.h>
-#include <linux/ktime.h>
-#include <linux/slab.h>
-#include <linux/workqueue.h>
-
-#include "adf_admin.h"
-#include "adf_accel_devices.h"
-#include "adf_common_drv.h"
-#include "adf_gen4_timer.h"
-
-#define ADF_GEN4_TIMER_PERIOD_MS 200
-
-/* This periodic update is used to trigger HB, RL & TL fw events */
-static void work_handler(struct work_struct *work)
-{
- struct adf_accel_dev *accel_dev;
- struct adf_timer *timer_ctx;
- u32 time_periods;
-
- timer_ctx = container_of(to_delayed_work(work), struct adf_timer, work_ctx);
- accel_dev = timer_ctx->accel_dev;
-
- adf_misc_wq_queue_delayed_work(&timer_ctx->work_ctx,
- msecs_to_jiffies(ADF_GEN4_TIMER_PERIOD_MS));
-
- time_periods = div_u64(ktime_ms_delta(ktime_get_real(), timer_ctx->initial_ktime),
- ADF_GEN4_TIMER_PERIOD_MS);
-
- if (adf_send_admin_tim_sync(accel_dev, time_periods))
- dev_err(&GET_DEV(accel_dev), "Failed to synchronize qat timer\n");
-}
-
-int adf_gen4_timer_start(struct adf_accel_dev *accel_dev)
-{
- struct adf_timer *timer_ctx;
-
- timer_ctx = kzalloc(sizeof(*timer_ctx), GFP_KERNEL);
- if (!timer_ctx)
- return -ENOMEM;
-
- timer_ctx->accel_dev = accel_dev;
- accel_dev->timer = timer_ctx;
- timer_ctx->initial_ktime = ktime_get_real();
-
- INIT_DELAYED_WORK(&timer_ctx->work_ctx, work_handler);
- adf_misc_wq_queue_delayed_work(&timer_ctx->work_ctx,
- msecs_to_jiffies(ADF_GEN4_TIMER_PERIOD_MS));
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(adf_gen4_timer_start);
-
-void adf_gen4_timer_stop(struct adf_accel_dev *accel_dev)
-{
- struct adf_timer *timer_ctx = accel_dev->timer;
-
- if (!timer_ctx)
- return;
-
- cancel_delayed_work_sync(&timer_ctx->work_ctx);
-
- kfree(timer_ctx);
- accel_dev->timer = NULL;
-}
-EXPORT_SYMBOL_GPL(adf_gen4_timer_stop);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.h
deleted file mode 100644
index 66a709e7b358..000000000000
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright(c) 2023 Intel Corporation */
-
-#ifndef ADF_GEN4_TIMER_H_
-#define ADF_GEN4_TIMER_H_
-
-#include <linux/ktime.h>
-#include <linux/workqueue.h>
-
-struct adf_accel_dev;
-
-struct adf_timer {
- struct adf_accel_dev *accel_dev;
- struct delayed_work work_ctx;
- ktime_t initial_ktime;
-};
-
-int adf_gen4_timer_start(struct adf_accel_dev *accel_dev);
-void adf_gen4_timer_stop(struct adf_accel_dev *accel_dev);
-
-#endif /* ADF_GEN4_TIMER_H_ */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.c
index a62eb5e8dbe6..adb21656a3ba 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.c
@@ -9,6 +9,7 @@
#include <asm/errno.h>
#include "adf_accel_devices.h"
+#include "adf_bank_state.h"
#include "adf_common_drv.h"
#include "adf_gen4_hw_data.h"
#include "adf_gen4_pfvf.h"
@@ -358,7 +359,7 @@ static int adf_gen4_vfmig_load_etr_regs(struct adf_mstate_mgr *sub_mgr,
pf_bank_nr = vf_bank_info->bank_nr + vf_bank_info->vf_nr * hw_data->num_banks_per_vf;
ret = hw_data->bank_state_restore(accel_dev, pf_bank_nr,
- (struct bank_state *)state);
+ (struct adf_bank_state *)state);
if (ret) {
dev_err(&GET_DEV(accel_dev),
"Failed to load regs for vf%d bank%d\n",
@@ -585,7 +586,7 @@ static int adf_gen4_vfmig_save_etr_regs(struct adf_mstate_mgr *subs, u8 *state,
pf_bank_nr += vf_bank_info->vf_nr * hw_data->num_banks_per_vf;
ret = hw_data->bank_state_save(accel_dev, pf_bank_nr,
- (struct bank_state *)state);
+ (struct adf_bank_state *)state);
if (ret) {
dev_err(&GET_DEV(accel_dev),
"Failed to save regs for vf%d bank%d\n",
@@ -593,7 +594,7 @@ static int adf_gen4_vfmig_save_etr_regs(struct adf_mstate_mgr *subs, u8 *state,
return ret;
}
- return sizeof(struct bank_state);
+ return sizeof(struct adf_bank_state);
}
static int adf_gen4_vfmig_save_etr_bank(struct adf_accel_dev *accel_dev,
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen6_pm.h b/drivers/crypto/intel/qat/qat_common/adf_gen6_pm.h
new file mode 100644
index 000000000000..4c0d576e8c21
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen6_pm.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2025 Intel Corporation */
+#ifndef ADF_GEN6_PM_H
+#define ADF_GEN6_PM_H
+
+#include <linux/bits.h>
+#include <linux/time.h>
+
+struct adf_accel_dev;
+
+/* Power management */
+#define ADF_GEN6_PM_POLL_DELAY_US 20
+#define ADF_GEN6_PM_POLL_TIMEOUT_US USEC_PER_SEC
+#define ADF_GEN6_PM_STATUS 0x50A00C
+#define ADF_GEN6_PM_INTERRUPT 0x50A028
+
+/* Power management source in ERRSOU2 and ERRMSK2 */
+#define ADF_GEN6_PM_SOU BIT(18)
+
+/* cpm_pm_interrupt bitfields */
+#define ADF_GEN6_PM_DRV_ACTIVE BIT(20)
+
+#define ADF_GEN6_PM_DEFAULT_IDLE_FILTER 0x6
+
+/* cpm_pm_status bitfields */
+#define ADF_GEN6_PM_INIT_STATE BIT(21)
+#define ADF_GEN6_PM_CPM_PM_STATE_MASK GENMASK(22, 20)
+
+/* fusectl0 bitfields */
+#define ADF_GEN6_PM_ENABLE_PM_MASK BIT(21)
+#define ADF_GEN6_PM_ENABLE_PM_IDLE_MASK BIT(22)
+#define ADF_GEN6_PM_ENABLE_DEEP_PM_IDLE_MASK BIT(23)
+
+/* cpm_pm_fw_init bitfields */
+#define ADF_GEN6_PM_IDLE_FILTER_MASK GENMASK(5, 3)
+#define ADF_GEN6_PM_IDLE_ENABLE_MASK BIT(2)
+
+/* ssm_pm_enable bitfield */
+#define ADF_GEN6_PM_SSM_PM_ENABLE_MASK BIT(0)
+
+/* ssm_pm_domain_status bitfield */
+#define ADF_GEN6_PM_DOMAIN_POWERED_UP_MASK BIT(0)
+
+#ifdef CONFIG_DEBUG_FS
+void adf_gen6_init_dev_pm_data(struct adf_accel_dev *accel_dev);
+#else
+static inline void adf_gen6_init_dev_pm_data(struct adf_accel_dev *accel_dev)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
+#endif /* ADF_GEN6_PM_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen6_pm_dbgfs.c b/drivers/crypto/intel/qat/qat_common/adf_gen6_pm_dbgfs.c
new file mode 100644
index 000000000000..603aefba0fdb
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen6_pm_dbgfs.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2025 Intel Corporation */
+#include <linux/dma-mapping.h>
+#include <linux/export.h>
+#include <linux/string_helpers.h>
+
+#include "adf_admin.h"
+#include "adf_common_drv.h"
+#include "adf_gen6_pm.h"
+#include "adf_pm_dbgfs_utils.h"
+#include "icp_qat_fw_init_admin.h"
+
+#define PM_INFO_REGSET_ENTRY(_reg_, _field_) \
+ PM_INFO_REGSET_ENTRY_MASK(_reg_, _field_, ADF_GEN6_PM_##_field_##_MASK)
+
+static struct pm_status_row pm_fuse_rows[] = {
+ PM_INFO_REGSET_ENTRY(fusectl0, ENABLE_PM),
+ PM_INFO_REGSET_ENTRY(fusectl0, ENABLE_PM_IDLE),
+ PM_INFO_REGSET_ENTRY(fusectl0, ENABLE_DEEP_PM_IDLE),
+};
+
+static struct pm_status_row pm_info_rows[] = {
+ PM_INFO_REGSET_ENTRY(pm.status, CPM_PM_STATE),
+ PM_INFO_REGSET_ENTRY(pm.fw_init, IDLE_ENABLE),
+ PM_INFO_REGSET_ENTRY(pm.fw_init, IDLE_FILTER),
+};
+
+static struct pm_status_row pm_ssm_rows[] = {
+ PM_INFO_REGSET_ENTRY(ssm.pm_enable, SSM_PM_ENABLE),
+ PM_INFO_REGSET_ENTRY(ssm.pm_domain_status, DOMAIN_POWERED_UP),
+};
+
+static struct pm_status_row pm_csrs_rows[] = {
+ PM_INFO_REGSET_ENTRY32(pm.fw_init, CPM_PM_FW_INIT),
+ PM_INFO_REGSET_ENTRY32(pm.status, CPM_PM_STATUS),
+};
+
+static_assert(sizeof(struct icp_qat_fw_init_admin_pm_info) < PAGE_SIZE);
+
+static ssize_t adf_gen6_print_pm_status(struct adf_accel_dev *accel_dev,
+ char __user *buf, size_t count,
+ loff_t *pos)
+{
+ void __iomem *pmisc = adf_get_pmisc_base(accel_dev);
+ struct icp_qat_fw_init_admin_pm_info *pm_info;
+ dma_addr_t p_state_addr;
+ u32 *pm_info_regs;
+ size_t len = 0;
+ char *pm_kv;
+ u32 val;
+ int ret;
+
+ pm_info = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!pm_info)
+ return -ENOMEM;
+
+ pm_kv = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!pm_kv) {
+ kfree(pm_info);
+ return -ENOMEM;
+ }
+
+ p_state_addr = dma_map_single(&GET_DEV(accel_dev), pm_info, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ ret = dma_mapping_error(&GET_DEV(accel_dev), p_state_addr);
+ if (ret)
+ goto out_free;
+
+ /* Query power management information from QAT FW */
+ ret = adf_get_pm_info(accel_dev, p_state_addr, PAGE_SIZE);
+ dma_unmap_single(&GET_DEV(accel_dev), p_state_addr, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ if (ret)
+ goto out_free;
+
+ pm_info_regs = (u32 *)pm_info;
+
+ /* Fuse control register */
+ len += scnprintf(&pm_kv[len], PAGE_SIZE - len,
+ "----------- PM Fuse info ---------\n");
+ len += adf_pm_scnprint_table_lower_keys(&pm_kv[len], pm_fuse_rows,
+ pm_info_regs, PAGE_SIZE - len,
+ ARRAY_SIZE(pm_fuse_rows));
+
+ /* Power management */
+ len += scnprintf(&pm_kv[len], PAGE_SIZE - len,
+ "----------- PM Info --------------\n");
+
+ len += adf_pm_scnprint_table_lower_keys(&pm_kv[len], pm_info_rows,
+ pm_info_regs, PAGE_SIZE - len,
+ ARRAY_SIZE(pm_info_rows));
+ len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "pm_mode: ACTIVE\n");
+
+ /* Shared Slice Module */
+ len += scnprintf(&pm_kv[len], PAGE_SIZE - len,
+ "----------- SSM_PM Info ----------\n");
+ len += adf_pm_scnprint_table_lower_keys(&pm_kv[len], pm_ssm_rows,
+ pm_info_regs, PAGE_SIZE - len,
+ ARRAY_SIZE(pm_ssm_rows));
+
+ /* Control status register content */
+ len += scnprintf(&pm_kv[len], PAGE_SIZE - len,
+ "----------- HW PM CSRs -----------\n");
+ len += adf_pm_scnprint_table_upper_keys(&pm_kv[len], pm_csrs_rows,
+ pm_info_regs, PAGE_SIZE - len,
+ ARRAY_SIZE(pm_csrs_rows));
+
+ val = ADF_CSR_RD(pmisc, ADF_GEN6_PM_INTERRUPT);
+ len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "CPM_PM_INTERRUPT: %#x\n", val);
+ ret = simple_read_from_buffer(buf, count, pos, pm_kv, len);
+
+out_free:
+ kfree(pm_info);
+ kfree(pm_kv);
+
+ return ret;
+}
+
+void adf_gen6_init_dev_pm_data(struct adf_accel_dev *accel_dev)
+{
+ accel_dev->power_management.print_pm_status = adf_gen6_print_pm_status;
+ accel_dev->power_management.present = true;
+}
+EXPORT_SYMBOL_GPL(adf_gen6_init_dev_pm_data);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen6_ras.c b/drivers/crypto/intel/qat/qat_common/adf_gen6_ras.c
new file mode 100644
index 000000000000..967253082a98
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen6_ras.c
@@ -0,0 +1,818 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2025 Intel Corporation */
+#include <linux/bitfield.h>
+#include <linux/types.h>
+
+#include "adf_common_drv.h"
+#include "adf_gen6_ras.h"
+#include "adf_sysfs_ras_counters.h"
+
+static void enable_errsou_reporting(void __iomem *csr)
+{
+ /* Enable correctable error reporting in ERRSOU0 */
+ ADF_CSR_WR(csr, ADF_GEN6_ERRMSK0, 0);
+
+ /* Enable uncorrectable error reporting in ERRSOU1 */
+ ADF_CSR_WR(csr, ADF_GEN6_ERRMSK1, 0);
+
+ /*
+ * Enable uncorrectable error reporting in ERRSOU2
+ * but disable PM interrupt by default
+ */
+ ADF_CSR_WR(csr, ADF_GEN6_ERRMSK2, ADF_GEN6_ERRSOU2_PM_INT_BIT);
+
+ /* Enable uncorrectable error reporting in ERRSOU3 */
+ ADF_CSR_WR(csr, ADF_GEN6_ERRMSK3, 0);
+}
+
+static void enable_ae_error_reporting(struct adf_accel_dev *accel_dev, void __iomem *csr)
+{
+ u32 ae_mask = GET_HW_DATA(accel_dev)->ae_mask;
+
+ /* Enable acceleration engine correctable error reporting */
+ ADF_CSR_WR(csr, ADF_GEN6_HIAECORERRLOGENABLE_CPP0, ae_mask);
+
+ /* Enable acceleration engine uncorrectable error reporting */
+ ADF_CSR_WR(csr, ADF_GEN6_HIAEUNCERRLOGENABLE_CPP0, ae_mask);
+}
+
+static void enable_cpp_error_reporting(struct adf_accel_dev *accel_dev, void __iomem *csr)
+{
+ /* Enable HI CPP agents command parity error reporting */
+ ADF_CSR_WR(csr, ADF_GEN6_HICPPAGENTCMDPARERRLOGENABLE,
+ ADF_6XXX_HICPPAGENTCMDPARERRLOG_MASK);
+
+ ADF_CSR_WR(csr, ADF_GEN6_CPP_CFC_ERR_CTRL, ADF_GEN6_CPP_CFC_ERR_CTRL_MASK);
+}
+
+static void enable_ti_ri_error_reporting(void __iomem *csr)
+{
+ u32 reg, mask;
+
+ /* Enable RI memory error reporting */
+ mask = ADF_GEN6_RIMEM_PARERR_FATAL_MASK | ADF_GEN6_RIMEM_PARERR_CERR_MASK;
+ ADF_CSR_WR(csr, ADF_GEN6_RI_MEM_PAR_ERR_EN0, mask);
+
+ /* Enable IOSF primary command parity error reporting */
+ ADF_CSR_WR(csr, ADF_GEN6_RIMISCCTL, ADF_GEN6_RIMISCSTS_BIT);
+
+ /* Enable TI internal memory parity error reporting */
+ reg = ADF_CSR_RD(csr, ADF_GEN6_TI_CI_PAR_ERR_MASK);
+ reg &= ~ADF_GEN6_TI_CI_PAR_STS_MASK;
+ ADF_CSR_WR(csr, ADF_GEN6_TI_CI_PAR_ERR_MASK, reg);
+
+ reg = ADF_CSR_RD(csr, ADF_GEN6_TI_PULL0FUB_PAR_ERR_MASK);
+ reg &= ~ADF_GEN6_TI_PULL0FUB_PAR_STS_MASK;
+ ADF_CSR_WR(csr, ADF_GEN6_TI_PULL0FUB_PAR_ERR_MASK, reg);
+
+ reg = ADF_CSR_RD(csr, ADF_GEN6_TI_PUSHFUB_PAR_ERR_MASK);
+ reg &= ~ADF_GEN6_TI_PUSHFUB_PAR_STS_MASK;
+ ADF_CSR_WR(csr, ADF_GEN6_TI_PUSHFUB_PAR_ERR_MASK, reg);
+
+ reg = ADF_CSR_RD(csr, ADF_GEN6_TI_CD_PAR_ERR_MASK);
+ reg &= ~ADF_GEN6_TI_CD_PAR_STS_MASK;
+ ADF_CSR_WR(csr, ADF_GEN6_TI_CD_PAR_ERR_MASK, reg);
+
+ reg = ADF_CSR_RD(csr, ADF_GEN6_TI_TRNSB_PAR_ERR_MASK);
+ reg &= ~ADF_GEN6_TI_TRNSB_PAR_STS_MASK;
+ ADF_CSR_WR(csr, ADF_GEN6_TI_TRNSB_PAR_ERR_MASK, reg);
+
+ /* Enable error handling in RI, TI CPP interface control registers */
+ ADF_CSR_WR(csr, ADF_GEN6_RICPPINTCTL, ADF_GEN6_RICPPINTCTL_MASK);
+ ADF_CSR_WR(csr, ADF_GEN6_TICPPINTCTL, ADF_GEN6_TICPPINTCTL_MASK);
+
+ /*
+ * Enable error detection and reporting in TIMISCSTS
+ * with bits 1, 2 and 30 value preserved
+ */
+ reg = ADF_CSR_RD(csr, ADF_GEN6_TIMISCCTL);
+ reg &= ADF_GEN6_TIMSCCTL_RELAY_MASK;
+ reg |= ADF_GEN6_TIMISCCTL_BIT;
+ ADF_CSR_WR(csr, ADF_GEN6_TIMISCCTL, reg);
+}
+
+static void enable_ssm_error_reporting(struct adf_accel_dev *accel_dev,
+ void __iomem *csr)
+{
+ /* Enable SSM interrupts */
+ ADF_CSR_WR(csr, ADF_GEN6_INTMASKSSM, 0);
+}
+
+static void adf_gen6_enable_ras(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *csr = adf_get_pmisc_base(accel_dev);
+
+ enable_errsou_reporting(csr);
+ enable_ae_error_reporting(accel_dev, csr);
+ enable_cpp_error_reporting(accel_dev, csr);
+ enable_ti_ri_error_reporting(csr);
+ enable_ssm_error_reporting(accel_dev, csr);
+}
+
+static void disable_errsou_reporting(void __iomem *csr)
+{
+ u32 val;
+
+ /* Disable correctable error reporting in ERRSOU0 */
+ ADF_CSR_WR(csr, ADF_GEN6_ERRMSK0, ADF_GEN6_ERRSOU0_MASK);
+
+ /* Disable uncorrectable error reporting in ERRSOU1 */
+ ADF_CSR_WR(csr, ADF_GEN6_ERRMSK1, ADF_GEN6_ERRMSK1_MASK);
+
+ /* Disable uncorrectable error reporting in ERRSOU2 */
+ val = ADF_CSR_RD(csr, ADF_GEN6_ERRMSK2);
+ val |= ADF_GEN6_ERRSOU2_DIS_MASK;
+ ADF_CSR_WR(csr, ADF_GEN6_ERRMSK2, val);
+
+ /* Disable uncorrectable error reporting in ERRSOU3 */
+ ADF_CSR_WR(csr, ADF_GEN6_ERRMSK3, ADF_GEN6_ERRSOU3_DIS_MASK);
+}
+
+static void disable_ae_error_reporting(void __iomem *csr)
+{
+ /* Disable acceleration engine correctable error reporting */
+ ADF_CSR_WR(csr, ADF_GEN6_HIAECORERRLOGENABLE_CPP0, 0);
+
+ /* Disable acceleration engine uncorrectable error reporting */
+ ADF_CSR_WR(csr, ADF_GEN6_HIAEUNCERRLOGENABLE_CPP0, 0);
+}
+
+static void disable_cpp_error_reporting(void __iomem *csr)
+{
+ /* Disable HI CPP agents command parity error reporting */
+ ADF_CSR_WR(csr, ADF_GEN6_HICPPAGENTCMDPARERRLOGENABLE, 0);
+
+ ADF_CSR_WR(csr, ADF_GEN6_CPP_CFC_ERR_CTRL, ADF_GEN6_CPP_CFC_ERR_CTRL_DIS_MASK);
+}
+
+static void disable_ti_ri_error_reporting(void __iomem *csr)
+{
+ u32 reg;
+
+ /* Disable RI memory error reporting */
+ ADF_CSR_WR(csr, ADF_GEN6_RI_MEM_PAR_ERR_EN0, 0);
+
+ /* Disable IOSF primary command parity error reporting */
+ reg = ADF_CSR_RD(csr, ADF_GEN6_RIMISCCTL);
+ reg &= ~ADF_GEN6_RIMISCSTS_BIT;
+ ADF_CSR_WR(csr, ADF_GEN6_RIMISCCTL, reg);
+
+ /* Disable TI internal memory parity error reporting */
+ ADF_CSR_WR(csr, ADF_GEN6_TI_CI_PAR_ERR_MASK, ADF_GEN6_TI_CI_PAR_STS_MASK);
+ ADF_CSR_WR(csr, ADF_GEN6_TI_PULL0FUB_PAR_ERR_MASK, ADF_GEN6_TI_PULL0FUB_PAR_STS_MASK);
+ ADF_CSR_WR(csr, ADF_GEN6_TI_PUSHFUB_PAR_ERR_MASK, ADF_GEN6_TI_PUSHFUB_PAR_STS_MASK);
+ ADF_CSR_WR(csr, ADF_GEN6_TI_CD_PAR_ERR_MASK, ADF_GEN6_TI_CD_PAR_STS_MASK);
+ ADF_CSR_WR(csr, ADF_GEN6_TI_TRNSB_PAR_ERR_MASK, ADF_GEN6_TI_TRNSB_PAR_STS_MASK);
+
+ /* Disable error handling in RI, TI CPP interface control registers */
+ reg = ADF_CSR_RD(csr, ADF_GEN6_RICPPINTCTL);
+ reg &= ~ADF_GEN6_RICPPINTCTL_MASK;
+ ADF_CSR_WR(csr, ADF_GEN6_RICPPINTCTL, reg);
+
+ reg = ADF_CSR_RD(csr, ADF_GEN6_TICPPINTCTL);
+ reg &= ~ADF_GEN6_TICPPINTCTL_MASK;
+ ADF_CSR_WR(csr, ADF_GEN6_TICPPINTCTL, reg);
+
+ /*
+ * Disable error detection and reporting in TIMISCSTS
+ * with bits 1, 2 and 30 value preserved
+ */
+ reg = ADF_CSR_RD(csr, ADF_GEN6_TIMISCCTL);
+ reg &= ADF_GEN6_TIMSCCTL_RELAY_MASK;
+ ADF_CSR_WR(csr, ADF_GEN6_TIMISCCTL, reg);
+}
+
+static void disable_ssm_error_reporting(void __iomem *csr)
+{
+ /* Disable SSM interrupts */
+ ADF_CSR_WR(csr, ADF_GEN6_INTMASKSSM, ADF_GEN6_INTMASKSSM_MASK);
+}
+
+static void adf_gen6_disable_ras(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *csr = adf_get_pmisc_base(accel_dev);
+
+ disable_errsou_reporting(csr);
+ disable_ae_error_reporting(csr);
+ disable_cpp_error_reporting(csr);
+ disable_ti_ri_error_reporting(csr);
+ disable_ssm_error_reporting(csr);
+}
+
+static void adf_gen6_process_errsou0(struct adf_accel_dev *accel_dev, void __iomem *csr)
+{
+ u32 ae, errsou;
+
+ ae = ADF_CSR_RD(csr, ADF_GEN6_HIAECORERRLOG_CPP0);
+ ae &= GET_HW_DATA(accel_dev)->ae_mask;
+
+ dev_warn(&GET_DEV(accel_dev), "Correctable error detected: %#x\n", ae);
+
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_CORR);
+
+ /* Clear interrupt from ERRSOU0 */
+ ADF_CSR_WR(csr, ADF_GEN6_HIAECORERRLOG_CPP0, ae);
+
+ errsou = ADF_CSR_RD(csr, ADF_GEN6_ERRSOU0);
+ if (errsou & ADF_GEN6_ERRSOU0_MASK)
+ dev_warn(&GET_DEV(accel_dev), "errsou0 still set: %#x\n", errsou);
+}
+
+static void adf_handle_cpp_ae_unc(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ u32 ae;
+
+ if (!(errsou & ADF_GEN6_ERRSOU1_CPP0_MEUNC_BIT))
+ return;
+
+ ae = ADF_CSR_RD(csr, ADF_GEN6_HIAEUNCERRLOG_CPP0);
+ ae &= GET_HW_DATA(accel_dev)->ae_mask;
+ if (ae) {
+ dev_err(&GET_DEV(accel_dev), "Uncorrectable error detected: %#x\n", ae);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+ ADF_CSR_WR(csr, ADF_GEN6_HIAEUNCERRLOG_CPP0, ae);
+ }
+}
+
+static void adf_handle_cpp_cmd_par_err(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ u32 cmd_par_err;
+
+ if (!(errsou & ADF_GEN6_ERRSOU1_CPP_CMDPARERR_BIT))
+ return;
+
+ cmd_par_err = ADF_CSR_RD(csr, ADF_GEN6_HICPPAGENTCMDPARERRLOG);
+ cmd_par_err &= ADF_6XXX_HICPPAGENTCMDPARERRLOG_MASK;
+ if (cmd_par_err) {
+ dev_err(&GET_DEV(accel_dev), "HI CPP agent command parity error: %#x\n",
+ cmd_par_err);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+ ADF_CSR_WR(csr, ADF_GEN6_HICPPAGENTCMDPARERRLOG, cmd_par_err);
+ }
+}
+
+static void adf_handle_ri_mem_par_err(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ u32 rimem_parerr_sts;
+
+ if (!(errsou & ADF_GEN6_ERRSOU1_RIMEM_PARERR_STS_BIT))
+ return;
+
+ rimem_parerr_sts = ADF_CSR_RD(csr, ADF_GEN6_RIMEM_PARERR_STS);
+ rimem_parerr_sts &= ADF_GEN6_RIMEM_PARERR_CERR_MASK |
+ ADF_GEN6_RIMEM_PARERR_FATAL_MASK;
+ if (rimem_parerr_sts & ADF_GEN6_RIMEM_PARERR_CERR_MASK) {
+ dev_err(&GET_DEV(accel_dev), "RI memory parity correctable error: %#x\n",
+ rimem_parerr_sts);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_CORR);
+ }
+
+ if (rimem_parerr_sts & ADF_GEN6_RIMEM_PARERR_FATAL_MASK) {
+ dev_err(&GET_DEV(accel_dev), "RI memory parity fatal error: %#x\n",
+ rimem_parerr_sts);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+ }
+
+ ADF_CSR_WR(csr, ADF_GEN6_RIMEM_PARERR_STS, rimem_parerr_sts);
+}
+
+static void adf_handle_ti_ci_par_sts(struct adf_accel_dev *accel_dev, void __iomem *csr)
+{
+ u32 ti_ci_par_sts;
+
+ ti_ci_par_sts = ADF_CSR_RD(csr, ADF_GEN6_TI_CI_PAR_STS);
+ ti_ci_par_sts &= ADF_GEN6_TI_CI_PAR_STS_MASK;
+ if (ti_ci_par_sts) {
+ dev_err(&GET_DEV(accel_dev), "TI memory parity error: %#x\n", ti_ci_par_sts);
+ ADF_CSR_WR(csr, ADF_GEN6_TI_CI_PAR_STS, ti_ci_par_sts);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+ }
+}
+
+static void adf_handle_ti_pullfub_par_sts(struct adf_accel_dev *accel_dev, void __iomem *csr)
+{
+ u32 ti_pullfub_par_sts;
+
+ ti_pullfub_par_sts = ADF_CSR_RD(csr, ADF_GEN6_TI_PULL0FUB_PAR_STS);
+ ti_pullfub_par_sts &= ADF_GEN6_TI_PULL0FUB_PAR_STS_MASK;
+ if (ti_pullfub_par_sts) {
+ dev_err(&GET_DEV(accel_dev), "TI pull parity error: %#x\n", ti_pullfub_par_sts);
+ ADF_CSR_WR(csr, ADF_GEN6_TI_PULL0FUB_PAR_STS, ti_pullfub_par_sts);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+ }
+}
+
+static void adf_handle_ti_pushfub_par_sts(struct adf_accel_dev *accel_dev, void __iomem *csr)
+{
+ u32 ti_pushfub_par_sts;
+
+ ti_pushfub_par_sts = ADF_CSR_RD(csr, ADF_GEN6_TI_PUSHFUB_PAR_STS);
+ ti_pushfub_par_sts &= ADF_GEN6_TI_PUSHFUB_PAR_STS_MASK;
+ if (ti_pushfub_par_sts) {
+ dev_err(&GET_DEV(accel_dev), "TI push parity error: %#x\n", ti_pushfub_par_sts);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+ ADF_CSR_WR(csr, ADF_GEN6_TI_PUSHFUB_PAR_STS, ti_pushfub_par_sts);
+ }
+}
+
+static void adf_handle_ti_cd_par_sts(struct adf_accel_dev *accel_dev, void __iomem *csr)
+{
+ u32 ti_cd_par_sts;
+
+ ti_cd_par_sts = ADF_CSR_RD(csr, ADF_GEN6_TI_CD_PAR_STS);
+ ti_cd_par_sts &= ADF_GEN6_TI_CD_PAR_STS_MASK;
+ if (ti_cd_par_sts) {
+ dev_err(&GET_DEV(accel_dev), "TI CD parity error: %#x\n", ti_cd_par_sts);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+ ADF_CSR_WR(csr, ADF_GEN6_TI_CD_PAR_STS, ti_cd_par_sts);
+ }
+}
+
+static void adf_handle_ti_trnsb_par_sts(struct adf_accel_dev *accel_dev, void __iomem *csr)
+{
+ u32 ti_trnsb_par_sts;
+
+ ti_trnsb_par_sts = ADF_CSR_RD(csr, ADF_GEN6_TI_TRNSB_PAR_STS);
+ ti_trnsb_par_sts &= ADF_GEN6_TI_TRNSB_PAR_STS_MASK;
+ if (ti_trnsb_par_sts) {
+ dev_err(&GET_DEV(accel_dev), "TI TRNSB parity error: %#x\n", ti_trnsb_par_sts);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+ ADF_CSR_WR(csr, ADF_GEN6_TI_TRNSB_PAR_STS, ti_trnsb_par_sts);
+ }
+}
+
+static void adf_handle_iosfp_cmd_parerr(struct adf_accel_dev *accel_dev, void __iomem *csr)
+{
+ u32 rimiscsts;
+
+ rimiscsts = ADF_CSR_RD(csr, ADF_GEN6_RIMISCSTS);
+ rimiscsts &= ADF_GEN6_RIMISCSTS_BIT;
+ if (rimiscsts) {
+ dev_err(&GET_DEV(accel_dev), "Command parity error detected on IOSFP: %#x\n",
+ rimiscsts);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+ ADF_CSR_WR(csr, ADF_GEN6_RIMISCSTS, rimiscsts);
+ }
+}
+
+static void adf_handle_ti_err(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ if (!(errsou & ADF_GEN6_ERRSOU1_TIMEM_PARERR_STS_BIT))
+ return;
+
+ adf_handle_ti_ci_par_sts(accel_dev, csr);
+ adf_handle_ti_pullfub_par_sts(accel_dev, csr);
+ adf_handle_ti_pushfub_par_sts(accel_dev, csr);
+ adf_handle_ti_cd_par_sts(accel_dev, csr);
+ adf_handle_ti_trnsb_par_sts(accel_dev, csr);
+ adf_handle_iosfp_cmd_parerr(accel_dev, csr);
+}
+
+static void adf_handle_sfi_cmd_parerr(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ if (!(errsou & ADF_GEN6_ERRSOU1_SFICMD_PARERR_BIT))
+ return;
+
+ dev_err(&GET_DEV(accel_dev),
+ "Command parity error detected on streaming fabric interface\n");
+
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+}
+
+static void adf_gen6_process_errsou1(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ adf_handle_cpp_ae_unc(accel_dev, csr, errsou);
+ adf_handle_cpp_cmd_par_err(accel_dev, csr, errsou);
+ adf_handle_ri_mem_par_err(accel_dev, csr, errsou);
+ adf_handle_ti_err(accel_dev, csr, errsou);
+ adf_handle_sfi_cmd_parerr(accel_dev, csr, errsou);
+
+ errsou = ADF_CSR_RD(csr, ADF_GEN6_ERRSOU1);
+ if (errsou & ADF_GEN6_ERRSOU1_MASK)
+ dev_warn(&GET_DEV(accel_dev), "errsou1 still set: %#x\n", errsou);
+}
+
+static void adf_handle_cerrssmsh(struct adf_accel_dev *accel_dev, void __iomem *csr)
+{
+ u32 reg;
+
+ reg = ADF_CSR_RD(csr, ADF_GEN6_CERRSSMSH);
+ reg &= ADF_GEN6_CERRSSMSH_ERROR_BIT;
+ if (reg) {
+ dev_warn(&GET_DEV(accel_dev),
+ "Correctable error on ssm shared memory: %#x\n", reg);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_CORR);
+ ADF_CSR_WR(csr, ADF_GEN6_CERRSSMSH, reg);
+ }
+}
+
+static void adf_handle_uerrssmsh(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 iastatssm)
+{
+ u32 reg;
+
+ if (!(iastatssm & ADF_GEN6_IAINTSTATSSM_SH_ERR_BIT))
+ return;
+
+ reg = ADF_CSR_RD(csr, ADF_GEN6_UERRSSMSH);
+ reg &= ADF_GEN6_UERRSSMSH_MASK;
+ if (reg) {
+ dev_err(&GET_DEV(accel_dev),
+ "Fatal error on ssm shared memory: %#x\n", reg);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+ ADF_CSR_WR(csr, ADF_GEN6_UERRSSMSH, reg);
+ }
+}
+
+static void adf_handle_pperr_err(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 iastatssm)
+{
+ u32 reg;
+
+ if (!(iastatssm & ADF_GEN6_IAINTSTATSSM_PPERR_BIT))
+ return;
+
+ reg = ADF_CSR_RD(csr, ADF_GEN6_PPERR);
+ reg &= ADF_GEN6_PPERR_MASK;
+ if (reg) {
+ dev_err(&GET_DEV(accel_dev),
+ "Fatal push or pull data error: %#x\n", reg);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+ ADF_CSR_WR(csr, ADF_GEN6_PPERR, reg);
+ }
+}
+
+static void adf_handle_scmpar_err(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 iastatssm)
+{
+ u32 reg;
+
+ if (!(iastatssm & ADF_GEN6_IAINTSTATSSM_SCMPAR_ERR_BIT))
+ return;
+
+ reg = ADF_CSR_RD(csr, ADF_GEN6_SSM_FERR_STATUS);
+ reg &= ADF_GEN6_SCM_PAR_ERR_MASK;
+ if (reg) {
+ dev_err(&GET_DEV(accel_dev), "Fatal error on SCM: %#x\n", reg);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+ ADF_CSR_WR(csr, ADF_GEN6_SSM_FERR_STATUS, reg);
+ }
+}
+
+static void adf_handle_cpppar_err(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 iastatssm)
+{
+ u32 reg;
+
+ if (!(iastatssm & ADF_GEN6_IAINTSTATSSM_CPPPAR_ERR_BIT))
+ return;
+
+ reg = ADF_CSR_RD(csr, ADF_GEN6_SSM_FERR_STATUS);
+ reg &= ADF_GEN6_CPP_PAR_ERR_MASK;
+ if (reg) {
+ dev_err(&GET_DEV(accel_dev), "Fatal error on CPP: %#x\n", reg);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+ ADF_CSR_WR(csr, ADF_GEN6_SSM_FERR_STATUS, reg);
+ }
+}
+
+static void adf_handle_rfpar_err(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 iastatssm)
+{
+ u32 reg;
+
+ if (!(iastatssm & ADF_GEN6_IAINTSTATSSM_RFPAR_ERR_BIT))
+ return;
+
+ reg = ADF_CSR_RD(csr, ADF_GEN6_SSM_FERR_STATUS);
+ reg &= ADF_GEN6_RF_PAR_ERR_MASK;
+ if (reg) {
+ dev_err(&GET_DEV(accel_dev), "Fatal error on RF Parity: %#x\n", reg);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+ ADF_CSR_WR(csr, ADF_GEN6_SSM_FERR_STATUS, reg);
+ }
+}
+
+static void adf_handle_unexp_cpl_err(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 iastatssm)
+{
+ u32 reg;
+
+ if (!(iastatssm & ADF_GEN6_IAINTSTATSSM_UNEXP_CPL_ERR_BIT))
+ return;
+
+ reg = ADF_CSR_RD(csr, ADF_GEN6_SSM_FERR_STATUS);
+ reg &= ADF_GEN6_UNEXP_CPL_ERR_MASK;
+ if (reg) {
+ dev_err(&GET_DEV(accel_dev),
+ "Fatal error for AXI unexpected tag/length: %#x\n", reg);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+ ADF_CSR_WR(csr, ADF_GEN6_SSM_FERR_STATUS, reg);
+ }
+}
+
+static void adf_handle_iaintstatssm(struct adf_accel_dev *accel_dev, void __iomem *csr)
+{
+ u32 iastatssm = ADF_CSR_RD(csr, ADF_GEN6_IAINTSTATSSM);
+
+ iastatssm &= ADF_GEN6_IAINTSTATSSM_MASK;
+ if (!iastatssm)
+ return;
+
+ adf_handle_uerrssmsh(accel_dev, csr, iastatssm);
+ adf_handle_pperr_err(accel_dev, csr, iastatssm);
+ adf_handle_scmpar_err(accel_dev, csr, iastatssm);
+ adf_handle_cpppar_err(accel_dev, csr, iastatssm);
+ adf_handle_rfpar_err(accel_dev, csr, iastatssm);
+ adf_handle_unexp_cpl_err(accel_dev, csr, iastatssm);
+
+ ADF_CSR_WR(csr, ADF_GEN6_IAINTSTATSSM, iastatssm);
+}
+
+static void adf_handle_ssm(struct adf_accel_dev *accel_dev, void __iomem *csr, u32 errsou)
+{
+ if (!(errsou & ADF_GEN6_ERRSOU2_SSM_ERR_BIT))
+ return;
+
+ adf_handle_cerrssmsh(accel_dev, csr);
+ adf_handle_iaintstatssm(accel_dev, csr);
+}
+
+static void adf_handle_cpp_cfc_err(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ u32 reg;
+
+ if (!(errsou & ADF_GEN6_ERRSOU2_CPP_CFC_ERR_STATUS_BIT))
+ return;
+
+ reg = ADF_CSR_RD(csr, ADF_GEN6_CPP_CFC_ERR_STATUS);
+ if (reg & ADF_GEN6_CPP_CFC_ERR_STATUS_DATAPAR_BIT) {
+ dev_err(&GET_DEV(accel_dev), "CPP_CFC_ERR: data parity: %#x", reg);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+ }
+
+ if (reg & ADF_GEN6_CPP_CFC_ERR_STATUS_CMDPAR_BIT) {
+ dev_err(&GET_DEV(accel_dev), "CPP_CFC_ERR: command parity: %#x", reg);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+ }
+
+ if (reg & ADF_GEN6_CPP_CFC_FATAL_ERR_BIT) {
+ dev_err(&GET_DEV(accel_dev), "CPP_CFC_ERR: errors: %#x", reg);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+ }
+
+ ADF_CSR_WR(csr, ADF_GEN6_CPP_CFC_ERR_STATUS_CLR,
+ ADF_GEN6_CPP_CFC_ERR_STATUS_CLR_MASK);
+}
+
+static void adf_gen6_process_errsou2(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ adf_handle_ssm(accel_dev, csr, errsou);
+ adf_handle_cpp_cfc_err(accel_dev, csr, errsou);
+
+ errsou = ADF_CSR_RD(csr, ADF_GEN6_ERRSOU2);
+ if (errsou & ADF_GEN6_ERRSOU2_MASK)
+ dev_warn(&GET_DEV(accel_dev), "errsou2 still set: %#x\n", errsou);
+}
+
+static void adf_handle_timiscsts(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ u32 timiscsts;
+
+ if (!(errsou & ADF_GEN6_ERRSOU3_TIMISCSTS_BIT))
+ return;
+
+ timiscsts = ADF_CSR_RD(csr, ADF_GEN6_TIMISCSTS);
+ if (timiscsts) {
+ dev_err(&GET_DEV(accel_dev), "Fatal error in transmit interface: %#x\n",
+ timiscsts);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+ }
+}
+
+static void adf_handle_ricppintsts(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ u32 ricppintsts;
+
+ if (!(errsou & ADF_GEN6_ERRSOU3_RICPPINTSTS_MASK))
+ return;
+
+ ricppintsts = ADF_CSR_RD(csr, ADF_GEN6_RICPPINTSTS);
+ ricppintsts &= ADF_GEN6_RICPPINTSTS_MASK;
+ if (ricppintsts) {
+ dev_err(&GET_DEV(accel_dev), "RI push pull error: %#x\n", ricppintsts);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+ ADF_CSR_WR(csr, ADF_GEN6_RICPPINTSTS, ricppintsts);
+ }
+}
+
+static void adf_handle_ticppintsts(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ u32 ticppintsts;
+
+ if (!(errsou & ADF_GEN6_ERRSOU3_TICPPINTSTS_MASK))
+ return;
+
+ ticppintsts = ADF_CSR_RD(csr, ADF_GEN6_TICPPINTSTS);
+ ticppintsts &= ADF_GEN6_TICPPINTSTS_MASK;
+ if (ticppintsts) {
+ dev_err(&GET_DEV(accel_dev), "TI push pull error: %#x\n", ticppintsts);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+ ADF_CSR_WR(csr, ADF_GEN6_TICPPINTSTS, ticppintsts);
+ }
+}
+
+static void adf_handle_atufaultstatus(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ u32 max_rp_num = GET_HW_DATA(accel_dev)->num_banks;
+ u32 atufaultstatus;
+ u32 i;
+
+ if (!(errsou & ADF_GEN6_ERRSOU3_ATUFAULTSTATUS_BIT))
+ return;
+
+ for (i = 0; i < max_rp_num; i++) {
+ atufaultstatus = ADF_CSR_RD(csr, ADF_GEN6_ATUFAULTSTATUS(i));
+
+ atufaultstatus &= ADF_GEN6_ATUFAULTSTATUS_BIT;
+ if (atufaultstatus) {
+ dev_err(&GET_DEV(accel_dev), "Ring pair (%u) ATU detected fault: %#x\n", i,
+ atufaultstatus);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+ ADF_CSR_WR(csr, ADF_GEN6_ATUFAULTSTATUS(i), atufaultstatus);
+ }
+ }
+}
+
+static void adf_handle_rlterror(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ u32 rlterror;
+
+ if (!(errsou & ADF_GEN6_ERRSOU3_RLTERROR_BIT))
+ return;
+
+ rlterror = ADF_CSR_RD(csr, ADF_GEN6_RLT_ERRLOG);
+ rlterror &= ADF_GEN6_RLT_ERRLOG_MASK;
+ if (rlterror) {
+ dev_err(&GET_DEV(accel_dev), "Error in rate limiting block: %#x\n", rlterror);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+ ADF_CSR_WR(csr, ADF_GEN6_RLT_ERRLOG, rlterror);
+ }
+}
+
+static void adf_handle_vflr(struct adf_accel_dev *accel_dev, void __iomem *csr, u32 errsou)
+{
+ if (!(errsou & ADF_GEN6_ERRSOU3_VFLRNOTIFY_BIT))
+ return;
+
+ dev_err(&GET_DEV(accel_dev), "Uncorrectable error in VF\n");
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+}
+
+static void adf_handle_tc_vc_map_error(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ if (!(errsou & ADF_GEN6_ERRSOU3_TC_VC_MAP_ERROR_BIT))
+ return;
+
+ dev_err(&GET_DEV(accel_dev), "Violation of PCIe TC VC mapping\n");
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+}
+
+static void adf_handle_pcie_devhalt(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ if (!(errsou & ADF_GEN6_ERRSOU3_PCIE_DEVHALT_BIT))
+ return;
+
+ dev_err(&GET_DEV(accel_dev),
+ "DEVHALT due to an error in an incoming transaction\n");
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+}
+
+static void adf_handle_pg_req_devhalt(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ if (!(errsou & ADF_GEN6_ERRSOU3_PG_REQ_DEVHALT_BIT))
+ return;
+
+ dev_err(&GET_DEV(accel_dev),
+ "Error due to response failure in response to a page request\n");
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+}
+
+static void adf_handle_xlt_cpl_devhalt(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ if (!(errsou & ADF_GEN6_ERRSOU3_XLT_CPL_DEVHALT_BIT))
+ return;
+
+ dev_err(&GET_DEV(accel_dev), "Error status for a address translation request\n");
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+}
+
+static void adf_handle_ti_int_err_devhalt(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ if (!(errsou & ADF_GEN6_ERRSOU3_TI_INT_ERR_DEVHALT_BIT))
+ return;
+
+ dev_err(&GET_DEV(accel_dev), "DEVHALT due to a TI internal memory error\n");
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+}
+
+static void adf_gen6_process_errsou3(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ adf_handle_timiscsts(accel_dev, csr, errsou);
+ adf_handle_ricppintsts(accel_dev, csr, errsou);
+ adf_handle_ticppintsts(accel_dev, csr, errsou);
+ adf_handle_atufaultstatus(accel_dev, csr, errsou);
+ adf_handle_rlterror(accel_dev, csr, errsou);
+ adf_handle_vflr(accel_dev, csr, errsou);
+ adf_handle_tc_vc_map_error(accel_dev, csr, errsou);
+ adf_handle_pcie_devhalt(accel_dev, csr, errsou);
+ adf_handle_pg_req_devhalt(accel_dev, csr, errsou);
+ adf_handle_xlt_cpl_devhalt(accel_dev, csr, errsou);
+ adf_handle_ti_int_err_devhalt(accel_dev, csr, errsou);
+
+ errsou = ADF_CSR_RD(csr, ADF_GEN6_ERRSOU3);
+ if (errsou & ADF_GEN6_ERRSOU3_MASK)
+ dev_warn(&GET_DEV(accel_dev), "errsou3 still set: %#x\n", errsou);
+}
+
+static void adf_gen6_is_reset_required(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ bool *reset_required)
+{
+ u8 reset, dev_state;
+ u32 gensts;
+
+ gensts = ADF_CSR_RD(csr, ADF_GEN6_GENSTS);
+ dev_state = FIELD_GET(ADF_GEN6_GENSTS_DEVICE_STATE_MASK, gensts);
+ reset = FIELD_GET(ADF_GEN6_GENSTS_RESET_TYPE_MASK, gensts);
+ if (dev_state == ADF_GEN6_GENSTS_DEVHALT && reset == ADF_GEN6_GENSTS_PFLR) {
+ *reset_required = true;
+ return;
+ }
+
+ if (reset == ADF_GEN6_GENSTS_COLD_RESET)
+ dev_err(&GET_DEV(accel_dev), "Fatal error, cold reset required\n");
+
+ *reset_required = false;
+}
+
+static bool adf_gen6_handle_interrupt(struct adf_accel_dev *accel_dev, bool *reset_required)
+{
+ void __iomem *csr = adf_get_pmisc_base(accel_dev);
+ bool handled = false;
+ u32 errsou;
+
+ errsou = ADF_CSR_RD(csr, ADF_GEN6_ERRSOU0);
+ if (errsou & ADF_GEN6_ERRSOU0_MASK) {
+ adf_gen6_process_errsou0(accel_dev, csr);
+ handled = true;
+ }
+
+ errsou = ADF_CSR_RD(csr, ADF_GEN6_ERRSOU1);
+ if (errsou & ADF_GEN6_ERRSOU1_MASK) {
+ adf_gen6_process_errsou1(accel_dev, csr, errsou);
+ handled = true;
+ }
+
+ errsou = ADF_CSR_RD(csr, ADF_GEN6_ERRSOU2);
+ if (errsou & ADF_GEN6_ERRSOU2_MASK) {
+ adf_gen6_process_errsou2(accel_dev, csr, errsou);
+ handled = true;
+ }
+
+ errsou = ADF_CSR_RD(csr, ADF_GEN6_ERRSOU3);
+ if (errsou & ADF_GEN6_ERRSOU3_MASK) {
+ adf_gen6_process_errsou3(accel_dev, csr, errsou);
+ handled = true;
+ }
+
+ adf_gen6_is_reset_required(accel_dev, csr, reset_required);
+
+ return handled;
+}
+
+void adf_gen6_init_ras_ops(struct adf_ras_ops *ras_ops)
+{
+ ras_ops->enable_ras_errors = adf_gen6_enable_ras;
+ ras_ops->disable_ras_errors = adf_gen6_disable_ras;
+ ras_ops->handle_interrupt = adf_gen6_handle_interrupt;
+}
+EXPORT_SYMBOL_GPL(adf_gen6_init_ras_ops);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen6_ras.h b/drivers/crypto/intel/qat/qat_common/adf_gen6_ras.h
new file mode 100644
index 000000000000..66ced271d173
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen6_ras.h
@@ -0,0 +1,504 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2025 Intel Corporation */
+#ifndef ADF_GEN6_RAS_H_
+#define ADF_GEN6_RAS_H_
+
+#include <linux/bits.h>
+
+struct adf_ras_ops;
+
+/* Error source registers */
+#define ADF_GEN6_ERRSOU0 0x41A200
+#define ADF_GEN6_ERRSOU1 0x41A204
+#define ADF_GEN6_ERRSOU2 0x41A208
+#define ADF_GEN6_ERRSOU3 0x41A20C
+
+/* Error source mask registers */
+#define ADF_GEN6_ERRMSK0 0x41A210
+#define ADF_GEN6_ERRMSK1 0x41A214
+#define ADF_GEN6_ERRMSK2 0x41A218
+#define ADF_GEN6_ERRMSK3 0x41A21C
+
+/* ERRSOU0 Correctable error mask */
+#define ADF_GEN6_ERRSOU0_MASK BIT(0)
+
+#define ADF_GEN6_ERRSOU1_CPP0_MEUNC_BIT BIT(0)
+#define ADF_GEN6_ERRSOU1_CPP_CMDPARERR_BIT BIT(1)
+#define ADF_GEN6_ERRSOU1_RIMEM_PARERR_STS_BIT BIT(2)
+#define ADF_GEN6_ERRSOU1_TIMEM_PARERR_STS_BIT BIT(3)
+#define ADF_GEN6_ERRSOU1_SFICMD_PARERR_BIT BIT(4)
+
+#define ADF_GEN6_ERRSOU1_MASK ( \
+ (ADF_GEN6_ERRSOU1_CPP0_MEUNC_BIT) | \
+ (ADF_GEN6_ERRSOU1_CPP_CMDPARERR_BIT) | \
+ (ADF_GEN6_ERRSOU1_RIMEM_PARERR_STS_BIT) | \
+ (ADF_GEN6_ERRSOU1_TIMEM_PARERR_STS_BIT) | \
+ (ADF_GEN6_ERRSOU1_SFICMD_PARERR_BIT))
+
+#define ADF_GEN6_ERRMSK1_CPP0_MEUNC_BIT BIT(0)
+#define ADF_GEN6_ERRMSK1_CPP_CMDPARERR_BIT BIT(1)
+#define ADF_GEN6_ERRMSK1_RIMEM_PARERR_STS_BIT BIT(2)
+#define ADF_GEN6_ERRMSK1_TIMEM_PARERR_STS_BIT BIT(3)
+#define ADF_GEN6_ERRMSK1_IOSFCMD_PARERR_BIT BIT(4)
+
+#define ADF_GEN6_ERRMSK1_MASK ( \
+ (ADF_GEN6_ERRMSK1_CPP0_MEUNC_BIT) | \
+ (ADF_GEN6_ERRMSK1_CPP_CMDPARERR_BIT) | \
+ (ADF_GEN6_ERRMSK1_RIMEM_PARERR_STS_BIT) | \
+ (ADF_GEN6_ERRMSK1_TIMEM_PARERR_STS_BIT) | \
+ (ADF_GEN6_ERRMSK1_IOSFCMD_PARERR_BIT))
+
+/* HI AE Uncorrectable error log */
+#define ADF_GEN6_HIAEUNCERRLOG_CPP0 0x41A300
+
+/* HI AE Uncorrectable error log enable */
+#define ADF_GEN6_HIAEUNCERRLOGENABLE_CPP0 0x41A320
+
+/* HI AE Correctable error log */
+#define ADF_GEN6_HIAECORERRLOG_CPP0 0x41A308
+
+/* HI AE Correctable error log enable */
+#define ADF_GEN6_HIAECORERRLOGENABLE_CPP0 0x41A318
+
+/* HI CPP Agent Command parity error log */
+#define ADF_GEN6_HICPPAGENTCMDPARERRLOG 0x41A310
+
+/* HI CPP Agent command parity error logging enable */
+#define ADF_GEN6_HICPPAGENTCMDPARERRLOGENABLE 0x41A314
+
+#define ADF_6XXX_HICPPAGENTCMDPARERRLOG_MASK 0x1B
+
+/* RI Memory parity error status register */
+#define ADF_GEN6_RIMEM_PARERR_STS 0x41B128
+
+/* RI Memory parity error reporting enable */
+#define ADF_GEN6_RI_MEM_PAR_ERR_EN0 0x41B12C
+
+/*
+ * RI Memory parity error mask
+ * BIT(4) - ri_tlq_phdr parity error
+ * BIT(5) - ri_tlq_pdata parity error
+ * BIT(6) - ri_tlq_nphdr parity error
+ * BIT(7) - ri_tlq_npdata parity error
+ * BIT(8) - ri_tlq_cplhdr parity error
+ * BIT(10) - BIT(13) - ri_tlq_cpldata[0:3] parity error
+ * BIT(19) - ri_cds_cmd_fifo parity error
+ * BIT(20) - ri_obc_ricpl_fifo parity error
+ * BIT(21) - ri_obc_tiricpl_fifo parity error
+ * BIT(22) - ri_obc_cppcpl_fifo parity error
+ * BIT(23) - ri_obc_pendcpl_fifo parity error
+ * BIT(24) - ri_cpp_cmd_fifo parity error
+ * BIT(25) - ri_cds_ticmd_fifo parity error
+ * BIT(26) - riti_cmd_fifo parity error
+ * BIT(27) - ri_int_msixtbl parity error
+ * BIT(28) - ri_int_imstbl parity error
+ * BIT(30) - ri_kpt_fuses parity error
+ */
+#define ADF_GEN6_RIMEM_PARERR_FATAL_MASK \
+ (BIT(0) | BIT(1) | BIT(2) | BIT(4) | BIT(5) | BIT(6) | \
+ BIT(7) | BIT(8) | BIT(18) | BIT(19) | BIT(20) | BIT(21) | \
+ BIT(22) | BIT(23) | BIT(24) | BIT(25) | BIT(26) | BIT(27) | \
+ BIT(28) | BIT(30))
+
+#define ADF_GEN6_RIMEM_PARERR_CERR_MASK \
+ (BIT(10) | BIT(11) | BIT(12) | BIT(13))
+
+/* TI CI parity status */
+#define ADF_GEN6_TI_CI_PAR_STS 0x50060C
+
+/* TI CI parity reporting mask */
+#define ADF_GEN6_TI_CI_PAR_ERR_MASK 0x500608
+
+/*
+ * TI CI parity status mask
+ * BIT(0) - CdCmdQ_sts patiry error status
+ * BIT(1) - CdDataQ_sts parity error status
+ * BIT(3) - CPP_SkidQ_sts parity error status
+ */
+#define ADF_GEN6_TI_CI_PAR_STS_MASK \
+ (BIT(0) | BIT(1) | BIT(3))
+
+/* TI PULLFUB parity status */
+#define ADF_GEN6_TI_PULL0FUB_PAR_STS 0x500618
+
+/* TI PULLFUB parity error reporting mask */
+#define ADF_GEN6_TI_PULL0FUB_PAR_ERR_MASK 0x500614
+
+/*
+ * TI PULLFUB parity status mask
+ * BIT(0) - TrnPullReqQ_sts parity status
+ * BIT(1) - TrnSharedDataQ_sts parity status
+ * BIT(2) - TrnPullReqDataQ_sts parity status
+ * BIT(4) - CPP_CiPullReqQ_sts parity status
+ * BIT(5) - CPP_TrnPullReqQ_sts parity status
+ * BIT(6) - CPP_PullidQ_sts parity status
+ * BIT(7) - CPP_WaitDataQ_sts parity status
+ * BIT(8) - CPP_CdDataQ_sts parity status
+ * BIT(9) - CPP_TrnDataQP0_sts parity status
+ * BIT(10) - BIT(11) - CPP_TrnDataQRF[00:01]_sts parity status
+ * BIT(12) - CPP_TrnDataQP1_sts parity status
+ * BIT(13) - BIT(14) - CPP_TrnDataQRF[10:11]_sts parity status
+ */
+#define ADF_GEN6_TI_PULL0FUB_PAR_STS_MASK \
+ (BIT(0) | BIT(1) | BIT(2) | BIT(4) | BIT(5) | BIT(6) | BIT(7) | \
+ BIT(8) | BIT(9) | BIT(10) | BIT(11) | BIT(12) | BIT(13) | BIT(14))
+
+/* TI PUSHUB parity status */
+#define ADF_GEN6_TI_PUSHFUB_PAR_STS 0x500630
+
+/* TI PUSHFUB parity error reporting mask */
+#define ADF_GEN6_TI_PUSHFUB_PAR_ERR_MASK 0x50062C
+
+/*
+ * TI PUSHUB parity status mask
+ * BIT(0) - SbPushReqQ_sts parity status
+ * BIT(1) - BIT(2) - SbPushDataQ[0:1]_sts parity status
+ * BIT(4) - CPP_CdPushReqQ_sts parity status
+ * BIT(5) - BIT(6) - CPP_CdPushDataQ[0:1]_sts parity status
+ * BIT(7) - CPP_SbPushReqQ_sts parity status
+ * BIT(8) - CPP_SbPushDataQP_sts parity status
+ * BIT(9) - BIT(10) - CPP_SbPushDataQRF[0:1]_sts parity status
+ */
+#define ADF_GEN6_TI_PUSHFUB_PAR_STS_MASK \
+ (BIT(0) | BIT(1) | BIT(2) | BIT(4) | BIT(5) | \
+ BIT(6) | BIT(7) | BIT(8) | BIT(9) | BIT(10))
+
+/* TI CD parity status */
+#define ADF_GEN6_TI_CD_PAR_STS 0x50063C
+
+/* TI CD parity error mask */
+#define ADF_GEN6_TI_CD_PAR_ERR_MASK 0x500638
+
+/*
+ * TI CD parity status mask
+ * BIT(0) - BIT(15) - CtxMdRam[0:15]_sts parity status
+ * BIT(16) - Leaf2ClusterRam_sts parity status
+ * BIT(17) - BIT(18) - Ring2LeafRam[0:1]_sts parity status
+ * BIT(19) - VirtualQ_sts parity status
+ * BIT(20) - DtRdQ_sts parity status
+ * BIT(21) - DtWrQ_sts parity status
+ * BIT(22) - RiCmdQ_sts parity status
+ * BIT(23) - BypassQ_sts parity status
+ * BIT(24) - DtRdQ_sc_sts parity status
+ * BIT(25) - DtWrQ_sc_sts parity status
+ */
+#define ADF_GEN6_TI_CD_PAR_STS_MASK \
+ (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | BIT(6) | \
+ BIT(7) | BIT(8) | BIT(9) | BIT(10) | BIT(11) | BIT(12) | BIT(13) | \
+ BIT(14) | BIT(15) | BIT(16) | BIT(17) | BIT(18) | BIT(19) | BIT(20) | \
+ BIT(21) | BIT(22) | BIT(23) | BIT(24) | BIT(25))
+
+/* TI TRNSB parity status */
+#define ADF_GEN6_TI_TRNSB_PAR_STS 0x500648
+
+/* TI TRNSB parity error reporting mask */
+#define ADF_GEN6_TI_TRNSB_PAR_ERR_MASK 0x500644
+
+/*
+ * TI TRNSB parity status mask
+ * BIT(0) - TrnPHdrQP_sts parity status
+ * BIT(1) - TrnPHdrQRF_sts parity status
+ * BIT(2) - TrnPDataQP_sts parity status
+ * BIT(3) - BIT(6) - TrnPDataQRF[0:3]_sts parity status
+ * BIT(7) - TrnNpHdrQP_sts parity status
+ * BIT(8) - BIT(9) - TrnNpHdrQRF[0:1]_sts parity status
+ * BIT(10) - TrnCplHdrQ_sts parity status
+ * BIT(11) - TrnPutObsReqQ_sts parity status
+ * BIT(12) - TrnPushReqQ_sts parity status
+ * BIT(13) - SbSplitIdRam_sts parity status
+ * BIT(14) - SbReqCountQ_sts parity status
+ * BIT(15) - SbCplTrkRam_sts parity status
+ * BIT(16) - SbGetObsReqQ_sts parity status
+ * BIT(17) - SbEpochIdQ_sts parity status
+ * BIT(18) - SbAtCplHdrQ_sts parity status
+ * BIT(19) - SbAtCplDataQ_sts parity status
+ * BIT(20) - SbReqCountRam_sts parity status
+ * BIT(21) - SbAtCplHdrQ_sc_sts parity status
+ */
+#define ADF_GEN6_TI_TRNSB_PAR_STS_MASK \
+ (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | BIT(6) | \
+ BIT(7) | BIT(8) | BIT(9) | BIT(10) | BIT(11) | BIT(12) | \
+ BIT(13) | BIT(14) | BIT(15) | BIT(16) | BIT(17) | BIT(18) | \
+ BIT(19) | BIT(20) | BIT(21))
+
+/* Status register to log misc error on RI */
+#define ADF_GEN6_RIMISCSTS 0x41B1B8
+
+/* Status control register to log misc RI error */
+#define ADF_GEN6_RIMISCCTL 0x41B1BC
+
+/*
+ * ERRSOU2 bit mask
+ * BIT(0) - SSM Interrupt Mask
+ * BIT(1) - CFC on CPP. ORed of CFC Push error and Pull error
+ * BIT(2) - BIT(4) - CPP attention interrupts
+ * BIT(18) - PM interrupt
+ */
+#define ADF_GEN6_ERRSOU2_SSM_ERR_BIT BIT(0)
+#define ADF_GEN6_ERRSOU2_CPP_CFC_ERR_STATUS_BIT BIT(1)
+#define ADF_GEN6_ERRSOU2_CPP_CFC_ATT_INT_MASK \
+ (BIT(2) | BIT(3) | BIT(4))
+
+#define ADF_GEN6_ERRSOU2_PM_INT_BIT BIT(18)
+
+#define ADF_GEN6_ERRSOU2_MASK \
+ (ADF_GEN6_ERRSOU2_SSM_ERR_BIT | \
+ ADF_GEN6_ERRSOU2_CPP_CFC_ERR_STATUS_BIT)
+
+#define ADF_GEN6_ERRSOU2_DIS_MASK \
+ (ADF_GEN6_ERRSOU2_SSM_ERR_BIT | \
+ ADF_GEN6_ERRSOU2_CPP_CFC_ERR_STATUS_BIT | \
+ ADF_GEN6_ERRSOU2_CPP_CFC_ATT_INT_MASK)
+
+#define ADF_GEN6_IAINTSTATSSM 0x28
+
+/* IAINTSTATSSM error bit mask definitions */
+#define ADF_GEN6_IAINTSTATSSM_SH_ERR_BIT BIT(0)
+#define ADF_GEN6_IAINTSTATSSM_PPERR_BIT BIT(2)
+#define ADF_GEN6_IAINTSTATSSM_SCMPAR_ERR_BIT BIT(4)
+#define ADF_GEN6_IAINTSTATSSM_CPPPAR_ERR_BIT BIT(5)
+#define ADF_GEN6_IAINTSTATSSM_RFPAR_ERR_BIT BIT(6)
+#define ADF_GEN6_IAINTSTATSSM_UNEXP_CPL_ERR_BIT BIT(7)
+
+#define ADF_GEN6_IAINTSTATSSM_MASK \
+ (ADF_GEN6_IAINTSTATSSM_SH_ERR_BIT | \
+ ADF_GEN6_IAINTSTATSSM_PPERR_BIT | \
+ ADF_GEN6_IAINTSTATSSM_SCMPAR_ERR_BIT | \
+ ADF_GEN6_IAINTSTATSSM_CPPPAR_ERR_BIT | \
+ ADF_GEN6_IAINTSTATSSM_RFPAR_ERR_BIT | \
+ ADF_GEN6_IAINTSTATSSM_UNEXP_CPL_ERR_BIT)
+
+#define ADF_GEN6_UERRSSMSH 0x18
+
+/*
+ * UERRSSMSH error bit mask definitions
+ *
+ * BIT(0) - Indicates one uncorrectable error
+ * BIT(15) - Indicates multiple uncorrectable errors
+ * in device shared memory
+ */
+#define ADF_GEN6_UERRSSMSH_MASK (BIT(0) | BIT(15))
+
+/*
+ * CERRSSMSH error bit
+ * BIT(0) - Indicates one correctable error
+ */
+#define ADF_GEN6_CERRSSMSH_ERROR_BIT (BIT(0) | BIT(15) | BIT(24))
+#define ADF_GEN6_CERRSSMSH 0x10
+
+#define ADF_GEN6_INTMASKSSM 0x0
+
+/*
+ * Error reporting mask in INTMASKSSM
+ * BIT(0) - Shared memory uncorrectable interrupt mask
+ * BIT(2) - PPERR interrupt mask
+ * BIT(4) - SCM parity error interrupt mask
+ * BIT(5) - CPP parity error interrupt mask
+ * BIT(6) - SHRAM RF parity error interrupt mask
+ * BIT(7) - AXI unexpected completion error mask
+ */
+#define ADF_GEN6_INTMASKSSM_MASK \
+ (BIT(0) | BIT(2) | BIT(4) | BIT(5) | BIT(6) | BIT(7))
+
+/* CPP push or pull error */
+#define ADF_GEN6_PPERR 0x8
+
+#define ADF_GEN6_PPERR_MASK (BIT(0) | BIT(1))
+
+/*
+ * SSM_FERR_STATUS error bit mask definitions
+ */
+#define ADF_GEN6_SCM_PAR_ERR_MASK BIT(5)
+#define ADF_GEN6_CPP_PAR_ERR_MASK (BIT(0) | BIT(1) | BIT(2))
+#define ADF_GEN6_UNEXP_CPL_ERR_MASK (BIT(3) | BIT(4) | BIT(10) | BIT(11))
+#define ADF_GEN6_RF_PAR_ERR_MASK BIT(16)
+
+#define ADF_GEN6_SSM_FERR_STATUS 0x9C
+
+#define ADF_GEN6_CPP_CFC_ERR_STATUS 0x640C04
+
+/*
+ * BIT(0) - Indicates one or more CPP CFC errors
+ * BIT(1) - Indicates multiple CPP CFC errors
+ * BIT(7) - Indicates CPP CFC command parity error type
+ * BIT(8) - Indicates CPP CFC data parity error type
+ */
+#define ADF_GEN6_CPP_CFC_ERR_STATUS_ERR_BIT BIT(0)
+#define ADF_GEN6_CPP_CFC_ERR_STATUS_MERR_BIT BIT(1)
+#define ADF_GEN6_CPP_CFC_ERR_STATUS_CMDPAR_BIT BIT(7)
+#define ADF_GEN6_CPP_CFC_ERR_STATUS_DATAPAR_BIT BIT(8)
+#define ADF_GEN6_CPP_CFC_FATAL_ERR_BIT \
+ (ADF_GEN6_CPP_CFC_ERR_STATUS_ERR_BIT | \
+ ADF_GEN6_CPP_CFC_ERR_STATUS_MERR_BIT)
+
+/*
+ * BIT(0) - Enables CFC to detect and log a push/pull data error
+ * BIT(1) - Enables CFC to generate interrupt to PCIEP for a CPP error
+ * BIT(4) - When 1 parity detection is disabled
+ * BIT(5) - When 1 parity detection is disabled on CPP command bus
+ * BIT(6) - When 1 parity detection is disabled on CPP push/pull bus
+ * BIT(9) - When 1 RF parity error detection is disabled
+ */
+#define ADF_GEN6_CPP_CFC_ERR_CTRL_MASK (BIT(0) | BIT(1))
+
+#define ADF_GEN6_CPP_CFC_ERR_CTRL_DIS_MASK \
+ (BIT(4) | BIT(5) | BIT(6) | BIT(9) | BIT(10))
+
+#define ADF_GEN6_CPP_CFC_ERR_CTRL 0x640C00
+
+/*
+ * BIT(0) - Clears bit(0) of ADF_GEN6_CPP_CFC_ERR_STATUS
+ * when an error is reported on CPP
+ * BIT(1) - Clears bit(1) of ADF_GEN6_CPP_CFC_ERR_STATUS
+ * when multiple errors are reported on CPP
+ * BIT(2) - Clears bit(2) of ADF_GEN6_CPP_CFC_ERR_STATUS
+ * when attention interrupt is reported
+ */
+#define ADF_GEN6_CPP_CFC_ERR_STATUS_CLR_MASK (BIT(0) | BIT(1) | BIT(2))
+#define ADF_GEN6_CPP_CFC_ERR_STATUS_CLR 0x640C08
+
+/*
+ * ERRSOU3 bit masks
+ * BIT(0) - indicates error response order overflow and/or BME error
+ * BIT(1) - indicates RI push/pull error
+ * BIT(2) - indicates TI push/pull error
+ * BIT(5) - indicates TI pull parity error
+ * BIT(6) - indicates RI push parity error
+ * BIT(7) - indicates VFLR interrupt
+ * BIT(8) - indicates ring pair interrupts for ATU detected fault
+ * BIT(9) - indicates rate limiting error
+ */
+#define ADF_GEN6_ERRSOU3_TIMISCSTS_BIT BIT(0)
+#define ADF_GEN6_ERRSOU3_RICPPINTSTS_MASK (BIT(1) | BIT(6))
+#define ADF_GEN6_ERRSOU3_TICPPINTSTS_MASK (BIT(2) | BIT(5))
+#define ADF_GEN6_ERRSOU3_VFLRNOTIFY_BIT BIT(7)
+#define ADF_GEN6_ERRSOU3_ATUFAULTSTATUS_BIT BIT(8)
+#define ADF_GEN6_ERRSOU3_RLTERROR_BIT BIT(9)
+#define ADF_GEN6_ERRSOU3_TC_VC_MAP_ERROR_BIT BIT(16)
+#define ADF_GEN6_ERRSOU3_PCIE_DEVHALT_BIT BIT(17)
+#define ADF_GEN6_ERRSOU3_PG_REQ_DEVHALT_BIT BIT(18)
+#define ADF_GEN6_ERRSOU3_XLT_CPL_DEVHALT_BIT BIT(19)
+#define ADF_GEN6_ERRSOU3_TI_INT_ERR_DEVHALT_BIT BIT(20)
+
+#define ADF_GEN6_ERRSOU3_MASK ( \
+ (ADF_GEN6_ERRSOU3_TIMISCSTS_BIT) | \
+ (ADF_GEN6_ERRSOU3_RICPPINTSTS_MASK) | \
+ (ADF_GEN6_ERRSOU3_TICPPINTSTS_MASK) | \
+ (ADF_GEN6_ERRSOU3_VFLRNOTIFY_BIT) | \
+ (ADF_GEN6_ERRSOU3_ATUFAULTSTATUS_BIT) | \
+ (ADF_GEN6_ERRSOU3_RLTERROR_BIT) | \
+ (ADF_GEN6_ERRSOU3_TC_VC_MAP_ERROR_BIT) | \
+ (ADF_GEN6_ERRSOU3_PCIE_DEVHALT_BIT) | \
+ (ADF_GEN6_ERRSOU3_PG_REQ_DEVHALT_BIT) | \
+ (ADF_GEN6_ERRSOU3_XLT_CPL_DEVHALT_BIT) | \
+ (ADF_GEN6_ERRSOU3_TI_INT_ERR_DEVHALT_BIT))
+
+#define ADF_GEN6_ERRSOU3_DIS_MASK ( \
+ (ADF_GEN6_ERRSOU3_TIMISCSTS_BIT) | \
+ (ADF_GEN6_ERRSOU3_RICPPINTSTS_MASK) | \
+ (ADF_GEN6_ERRSOU3_TICPPINTSTS_MASK) | \
+ (ADF_GEN6_ERRSOU3_VFLRNOTIFY_BIT) | \
+ (ADF_GEN6_ERRSOU3_ATUFAULTSTATUS_BIT) | \
+ (ADF_GEN6_ERRSOU3_RLTERROR_BIT) | \
+ (ADF_GEN6_ERRSOU3_TC_VC_MAP_ERROR_BIT))
+
+/* Rate limiting error log register */
+#define ADF_GEN6_RLT_ERRLOG 0x508814
+
+#define ADF_GEN6_RLT_ERRLOG_MASK (BIT(0) | BIT(1) | BIT(2) | BIT(3))
+
+/* TI misc status register */
+#define ADF_GEN6_TIMISCSTS 0x50054C
+
+/* TI misc error reporting mask */
+#define ADF_GEN6_TIMISCCTL 0x500548
+
+/*
+ * TI Misc error reporting control mask
+ * BIT(0) - Enables error detection and logging in TIMISCSTS register
+ * BIT(1) - It has effect only when SRIOV enabled, this bit is 0 by default
+ * BIT(2) - Enables the D-F-x counter within the dispatch arbiter
+ * to start based on the command triggered from
+ * BIT(30) - Disables VFLR functionality
+ * bits 1, 2 and 30 value should be preserved and not meant to be changed
+ * within RAS.
+ */
+#define ADF_GEN6_TIMISCCTL_BIT BIT(0)
+#define ADF_GEN6_TIMSCCTL_RELAY_MASK (BIT(1) | BIT(2) | BIT(30))
+
+/* RI CPP interface status register */
+#define ADF_GEN6_RICPPINTSTS 0x41A330
+
+/*
+ * Uncorrectable error mask in RICPPINTSTS register
+ * BIT(0) - RI asserted the CPP error signal during a push
+ * BIT(1) - RI detected the CPP error signal asserted during a pull
+ * BIT(2) - RI detected a push data parity error
+ * BIT(3) - RI detected a push valid parity error
+ */
+#define ADF_GEN6_RICPPINTSTS_MASK (BIT(0) | BIT(1) | BIT(2) | BIT(3))
+
+/* RI CPP interface register control */
+#define ADF_GEN6_RICPPINTCTL 0x41A32C
+
+/*
+ * Control bit mask for RICPPINTCTL register
+ * BIT(0) - value of 1 enables error detection and reporting
+ * on the RI CPP Push interface
+ * BIT(1) - value of 1 enables error detection and reporting
+ * on the RI CPP Pull interface
+ * BIT(2) - value of 1 enables error detection and reporting
+ * on the RI Parity
+ * BIT(3) - value of 1 enable checking parity on CPP
+ */
+#define ADF_GEN6_RICPPINTCTL_MASK \
+ (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4))
+
+/* TI CPP interface status register */
+#define ADF_GEN6_TICPPINTSTS 0x50053C
+
+/*
+ * Uncorrectable error mask in TICPPINTSTS register
+ * BIT(0) - value of 1 indicates that the TI asserted
+ * the CPP error signal during a push
+ * BIT(1) - value of 1 indicates that the TI detected
+ * the CPP error signal asserted during a pull
+ * BIT(2) - value of 1 indicates that the TI detected
+ * a pull data parity error
+ */
+#define ADF_GEN6_TICPPINTSTS_MASK (BIT(0) | BIT(1) | BIT(2))
+
+/* TI CPP interface status register control */
+#define ADF_GEN6_TICPPINTCTL 0x500538
+
+/*
+ * Control bit mask for TICPPINTCTL register
+ * BIT(0) - value of 1 enables error detection and reporting on
+ * the TI CPP Push interface
+ * BIT(1) - value of 1 enables error detection and reporting on
+ * the TI CPP Push interface
+ * BIT(2) - value of 1 enables parity error detection and logging on
+ * the TI CPP Pull interface
+ * BIT(3) - value of 1 enables CPP CMD and Pull Data parity checking
+ */
+#define ADF_GEN6_TICPPINTCTL_MASK \
+ (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4))
+
+/* ATU fault status register */
+#define ADF_GEN6_ATUFAULTSTATUS(i) (0x506000 + ((i) * 0x4))
+
+#define ADF_GEN6_ATUFAULTSTATUS_BIT BIT(0)
+
+/* Command parity error detected on IOSFP command to QAT */
+#define ADF_GEN6_RIMISCSTS_BIT BIT(0)
+
+#define ADF_GEN6_GENSTS 0x41A220
+#define ADF_GEN6_GENSTS_DEVICE_STATE_MASK GENMASK(1, 0)
+#define ADF_GEN6_GENSTS_RESET_TYPE_MASK GENMASK(3, 2)
+#define ADF_GEN6_GENSTS_PFLR 0x1
+#define ADF_GEN6_GENSTS_COLD_RESET 0x3
+#define ADF_GEN6_GENSTS_DEVHALT 0x1
+
+void adf_gen6_init_ras_ops(struct adf_ras_ops *ras_ops);
+
+#endif /* ADF_GEN6_RAS_H_ */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.c b/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.c
new file mode 100644
index 000000000000..c9b151006dca
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2025 Intel Corporation */
+#include <linux/export.h>
+
+#include "adf_gen4_config.h"
+#include "adf_gen4_hw_csr_data.h"
+#include "adf_gen4_pfvf.h"
+#include "adf_gen4_vf_mig.h"
+#include "adf_gen6_shared.h"
+
+struct adf_accel_dev;
+struct adf_pfvf_ops;
+struct adf_hw_csr_ops;
+
+/*
+ * QAT GEN4 and GEN6 devices often differ in terms of supported features,
+ * options and internal logic. However, some of the mechanisms and register
+ * layout are shared between those two GENs. This file serves as an abstraction
+ * layer that allows to use existing GEN4 implementation that is also
+ * applicable to GEN6 without additional overhead and complexity.
+ */
+void adf_gen6_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops)
+{
+ adf_gen4_init_pf_pfvf_ops(pfvf_ops);
+}
+EXPORT_SYMBOL_GPL(adf_gen6_init_pf_pfvf_ops);
+
+void adf_gen6_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops)
+{
+ return adf_gen4_init_hw_csr_ops(csr_ops);
+}
+EXPORT_SYMBOL_GPL(adf_gen6_init_hw_csr_ops);
+
+int adf_gen6_cfg_dev_init(struct adf_accel_dev *accel_dev)
+{
+ return adf_gen4_cfg_dev_init(accel_dev);
+}
+EXPORT_SYMBOL_GPL(adf_gen6_cfg_dev_init);
+
+int adf_gen6_comp_dev_config(struct adf_accel_dev *accel_dev)
+{
+ return adf_comp_dev_config(accel_dev);
+}
+EXPORT_SYMBOL_GPL(adf_gen6_comp_dev_config);
+
+int adf_gen6_no_dev_config(struct adf_accel_dev *accel_dev)
+{
+ return adf_no_dev_config(accel_dev);
+}
+EXPORT_SYMBOL_GPL(adf_gen6_no_dev_config);
+
+void adf_gen6_init_vf_mig_ops(struct qat_migdev_ops *vfmig_ops)
+{
+ adf_gen4_init_vf_mig_ops(vfmig_ops);
+}
+EXPORT_SYMBOL_GPL(adf_gen6_init_vf_mig_ops);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.h b/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.h
new file mode 100644
index 000000000000..fc6fad029a70
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2025 Intel Corporation */
+#ifndef ADF_GEN6_SHARED_H_
+#define ADF_GEN6_SHARED_H_
+
+struct adf_hw_csr_ops;
+struct qat_migdev_ops;
+struct adf_accel_dev;
+struct adf_pfvf_ops;
+
+void adf_gen6_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops);
+void adf_gen6_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops);
+int adf_gen6_cfg_dev_init(struct adf_accel_dev *accel_dev);
+int adf_gen6_comp_dev_config(struct adf_accel_dev *accel_dev);
+int adf_gen6_no_dev_config(struct adf_accel_dev *accel_dev);
+void adf_gen6_init_vf_mig_ops(struct qat_migdev_ops *vfmig_ops);
+#endif/* ADF_GEN6_SHARED_H_ */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen6_tl.c b/drivers/crypto/intel/qat/qat_common/adf_gen6_tl.c
new file mode 100644
index 000000000000..faa60b04c406
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen6_tl.c
@@ -0,0 +1,258 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2025 Intel Corporation. */
+#include <linux/export.h>
+
+#include "adf_gen6_tl.h"
+#include "adf_telemetry.h"
+#include "adf_tl_debugfs.h"
+#include "icp_qat_fw_init_admin.h"
+
+#define ADF_GEN6_TL_DEV_REG_OFF(reg) ADF_TL_DEV_REG_OFF(reg, gen6)
+
+#define ADF_GEN6_TL_RP_REG_OFF(reg) ADF_TL_RP_REG_OFF(reg, gen6)
+
+#define ADF_GEN6_TL_SL_UTIL_COUNTER(_name) \
+ ADF_TL_COUNTER("util_" #_name, ADF_TL_SIMPLE_COUNT, \
+ ADF_TL_SLICE_REG_OFF(_name, reg_tm_slice_util, gen6))
+
+#define ADF_GEN6_TL_SL_EXEC_COUNTER(_name) \
+ ADF_TL_COUNTER("exec_" #_name, ADF_TL_SIMPLE_COUNT, \
+ ADF_TL_SLICE_REG_OFF(_name, reg_tm_slice_exec_cnt, gen6))
+
+#define SLICE_IDX(sl) offsetof(struct icp_qat_fw_init_admin_slice_cnt, sl##_cnt)
+
+#define ADF_GEN6_TL_CMDQ_WAIT_COUNTER(_name) \
+ ADF_TL_COUNTER("cmdq_wait_" #_name, ADF_TL_SIMPLE_COUNT, \
+ ADF_TL_CMDQ_REG_OFF(_name, reg_tm_cmdq_wait_cnt, gen6))
+#define ADF_GEN6_TL_CMDQ_EXEC_COUNTER(_name) \
+ ADF_TL_COUNTER("cmdq_exec_" #_name, ADF_TL_SIMPLE_COUNT, \
+ ADF_TL_CMDQ_REG_OFF(_name, reg_tm_cmdq_exec_cnt, gen6))
+#define ADF_GEN6_TL_CMDQ_DRAIN_COUNTER(_name) \
+ ADF_TL_COUNTER("cmdq_drain_" #_name, ADF_TL_SIMPLE_COUNT, \
+ ADF_TL_CMDQ_REG_OFF(_name, reg_tm_cmdq_drain_cnt, \
+ gen6))
+
+#define CPR_QUEUE_COUNT 5
+#define DCPR_QUEUE_COUNT 3
+#define PKE_QUEUE_COUNT 1
+#define WAT_QUEUE_COUNT 7
+#define WCP_QUEUE_COUNT 7
+#define USC_QUEUE_COUNT 3
+#define ATH_QUEUE_COUNT 2
+
+/* Device level counters. */
+static const struct adf_tl_dbg_counter dev_counters[] = {
+ /* PCIe partial transactions. */
+ ADF_TL_COUNTER(PCI_TRANS_CNT_NAME, ADF_TL_SIMPLE_COUNT,
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_prt_trans_cnt)),
+ /* Max read latency[ns]. */
+ ADF_TL_COUNTER(MAX_RD_LAT_NAME, ADF_TL_COUNTER_NS,
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_rd_lat_max)),
+ /* Read latency average[ns]. */
+ ADF_TL_COUNTER_LATENCY(RD_LAT_ACC_NAME, ADF_TL_COUNTER_NS_AVG,
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_rd_lat_acc),
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_rd_cmpl_cnt)),
+ /* Max "get to put" latency[ns]. */
+ ADF_TL_COUNTER(MAX_LAT_NAME, ADF_TL_COUNTER_NS,
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_gp_lat_max)),
+ /* "Get to put" latency average[ns]. */
+ ADF_TL_COUNTER_LATENCY(LAT_ACC_NAME, ADF_TL_COUNTER_NS_AVG,
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_gp_lat_acc),
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_ae_put_cnt)),
+ /* PCIe write bandwidth[Mbps]. */
+ ADF_TL_COUNTER(BW_IN_NAME, ADF_TL_COUNTER_MBPS,
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_bw_in)),
+ /* PCIe read bandwidth[Mbps]. */
+ ADF_TL_COUNTER(BW_OUT_NAME, ADF_TL_COUNTER_MBPS,
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_bw_out)),
+ /* Page request latency average[ns]. */
+ ADF_TL_COUNTER_LATENCY(PAGE_REQ_LAT_NAME, ADF_TL_COUNTER_NS_AVG,
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_at_page_req_lat_acc),
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_at_page_req_cnt)),
+ /* Page translation latency average[ns]. */
+ ADF_TL_COUNTER_LATENCY(AT_TRANS_LAT_NAME, ADF_TL_COUNTER_NS_AVG,
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_at_trans_lat_acc),
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_at_trans_lat_cnt)),
+ /* Maximum uTLB used. */
+ ADF_TL_COUNTER(AT_MAX_UTLB_USED_NAME, ADF_TL_SIMPLE_COUNT,
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_at_max_utlb_used)),
+ /* Ring Empty average[ns] across all rings */
+ ADF_TL_COUNTER_LATENCY(RE_ACC_NAME, ADF_TL_COUNTER_NS_AVG,
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_re_acc),
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_re_cnt)),
+};
+
+/* Accelerator utilization counters */
+static const struct adf_tl_dbg_counter sl_util_counters[ADF_TL_SL_CNT_COUNT] = {
+ /* Compression accelerator utilization. */
+ [SLICE_IDX(cpr)] = ADF_GEN6_TL_SL_UTIL_COUNTER(cnv),
+ /* Decompression accelerator utilization. */
+ [SLICE_IDX(dcpr)] = ADF_GEN6_TL_SL_UTIL_COUNTER(dcprz),
+ /* PKE accelerator utilization. */
+ [SLICE_IDX(pke)] = ADF_GEN6_TL_SL_UTIL_COUNTER(pke),
+ /* Wireless Authentication accelerator utilization. */
+ [SLICE_IDX(wat)] = ADF_GEN6_TL_SL_UTIL_COUNTER(wat),
+ /* Wireless Cipher accelerator utilization. */
+ [SLICE_IDX(wcp)] = ADF_GEN6_TL_SL_UTIL_COUNTER(wcp),
+ /* UCS accelerator utilization. */
+ [SLICE_IDX(ucs)] = ADF_GEN6_TL_SL_UTIL_COUNTER(ucs),
+ /* Authentication accelerator utilization. */
+ [SLICE_IDX(ath)] = ADF_GEN6_TL_SL_UTIL_COUNTER(ath),
+};
+
+/* Accelerator execution counters */
+static const struct adf_tl_dbg_counter sl_exec_counters[ADF_TL_SL_CNT_COUNT] = {
+ /* Compression accelerator execution count. */
+ [SLICE_IDX(cpr)] = ADF_GEN6_TL_SL_EXEC_COUNTER(cnv),
+ /* Decompression accelerator execution count. */
+ [SLICE_IDX(dcpr)] = ADF_GEN6_TL_SL_EXEC_COUNTER(dcprz),
+ /* PKE execution count. */
+ [SLICE_IDX(pke)] = ADF_GEN6_TL_SL_EXEC_COUNTER(pke),
+ /* Wireless Authentication accelerator execution count. */
+ [SLICE_IDX(wat)] = ADF_GEN6_TL_SL_EXEC_COUNTER(wat),
+ /* Wireless Cipher accelerator execution count. */
+ [SLICE_IDX(wcp)] = ADF_GEN6_TL_SL_EXEC_COUNTER(wcp),
+ /* UCS accelerator execution count. */
+ [SLICE_IDX(ucs)] = ADF_GEN6_TL_SL_EXEC_COUNTER(ucs),
+ /* Authentication accelerator execution count. */
+ [SLICE_IDX(ath)] = ADF_GEN6_TL_SL_EXEC_COUNTER(ath),
+};
+
+static const struct adf_tl_dbg_counter cnv_cmdq_counters[] = {
+ ADF_GEN6_TL_CMDQ_WAIT_COUNTER(cnv),
+ ADF_GEN6_TL_CMDQ_EXEC_COUNTER(cnv),
+ ADF_GEN6_TL_CMDQ_DRAIN_COUNTER(cnv)
+};
+
+#define NUM_CMDQ_COUNTERS ARRAY_SIZE(cnv_cmdq_counters)
+
+static const struct adf_tl_dbg_counter dcprz_cmdq_counters[] = {
+ ADF_GEN6_TL_CMDQ_WAIT_COUNTER(dcprz),
+ ADF_GEN6_TL_CMDQ_EXEC_COUNTER(dcprz),
+ ADF_GEN6_TL_CMDQ_DRAIN_COUNTER(dcprz)
+};
+
+static_assert(ARRAY_SIZE(dcprz_cmdq_counters) == NUM_CMDQ_COUNTERS);
+
+static const struct adf_tl_dbg_counter pke_cmdq_counters[] = {
+ ADF_GEN6_TL_CMDQ_WAIT_COUNTER(pke),
+ ADF_GEN6_TL_CMDQ_EXEC_COUNTER(pke),
+ ADF_GEN6_TL_CMDQ_DRAIN_COUNTER(pke)
+};
+
+static_assert(ARRAY_SIZE(pke_cmdq_counters) == NUM_CMDQ_COUNTERS);
+
+static const struct adf_tl_dbg_counter wat_cmdq_counters[] = {
+ ADF_GEN6_TL_CMDQ_WAIT_COUNTER(wat),
+ ADF_GEN6_TL_CMDQ_EXEC_COUNTER(wat),
+ ADF_GEN6_TL_CMDQ_DRAIN_COUNTER(wat)
+};
+
+static_assert(ARRAY_SIZE(wat_cmdq_counters) == NUM_CMDQ_COUNTERS);
+
+static const struct adf_tl_dbg_counter wcp_cmdq_counters[] = {
+ ADF_GEN6_TL_CMDQ_WAIT_COUNTER(wcp),
+ ADF_GEN6_TL_CMDQ_EXEC_COUNTER(wcp),
+ ADF_GEN6_TL_CMDQ_DRAIN_COUNTER(wcp)
+};
+
+static_assert(ARRAY_SIZE(wcp_cmdq_counters) == NUM_CMDQ_COUNTERS);
+
+static const struct adf_tl_dbg_counter ucs_cmdq_counters[] = {
+ ADF_GEN6_TL_CMDQ_WAIT_COUNTER(ucs),
+ ADF_GEN6_TL_CMDQ_EXEC_COUNTER(ucs),
+ ADF_GEN6_TL_CMDQ_DRAIN_COUNTER(ucs)
+};
+
+static_assert(ARRAY_SIZE(ucs_cmdq_counters) == NUM_CMDQ_COUNTERS);
+
+static const struct adf_tl_dbg_counter ath_cmdq_counters[] = {
+ ADF_GEN6_TL_CMDQ_WAIT_COUNTER(ath),
+ ADF_GEN6_TL_CMDQ_EXEC_COUNTER(ath),
+ ADF_GEN6_TL_CMDQ_DRAIN_COUNTER(ath)
+};
+
+static_assert(ARRAY_SIZE(ath_cmdq_counters) == NUM_CMDQ_COUNTERS);
+
+/* CMDQ drain counters. */
+static const struct adf_tl_dbg_counter *cmdq_counters[ADF_TL_SL_CNT_COUNT] = {
+ /* Compression accelerator execution count. */
+ [SLICE_IDX(cpr)] = cnv_cmdq_counters,
+ /* Decompression accelerator execution count. */
+ [SLICE_IDX(dcpr)] = dcprz_cmdq_counters,
+ /* PKE execution count. */
+ [SLICE_IDX(pke)] = pke_cmdq_counters,
+ /* Wireless Authentication accelerator execution count. */
+ [SLICE_IDX(wat)] = wat_cmdq_counters,
+ /* Wireless Cipher accelerator execution count. */
+ [SLICE_IDX(wcp)] = wcp_cmdq_counters,
+ /* UCS accelerator execution count. */
+ [SLICE_IDX(ucs)] = ucs_cmdq_counters,
+ /* Authentication accelerator execution count. */
+ [SLICE_IDX(ath)] = ath_cmdq_counters,
+};
+
+/* Ring pair counters. */
+static const struct adf_tl_dbg_counter rp_counters[] = {
+ /* PCIe partial transactions. */
+ ADF_TL_COUNTER(PCI_TRANS_CNT_NAME, ADF_TL_SIMPLE_COUNT,
+ ADF_GEN6_TL_RP_REG_OFF(reg_tl_prt_trans_cnt)),
+ /* "Get to put" latency average[ns]. */
+ ADF_TL_COUNTER_LATENCY(LAT_ACC_NAME, ADF_TL_COUNTER_NS_AVG,
+ ADF_GEN6_TL_RP_REG_OFF(reg_tl_gp_lat_acc),
+ ADF_GEN6_TL_RP_REG_OFF(reg_tl_ae_put_cnt)),
+ /* PCIe write bandwidth[Mbps]. */
+ ADF_TL_COUNTER(BW_IN_NAME, ADF_TL_COUNTER_MBPS,
+ ADF_GEN6_TL_RP_REG_OFF(reg_tl_bw_in)),
+ /* PCIe read bandwidth[Mbps]. */
+ ADF_TL_COUNTER(BW_OUT_NAME, ADF_TL_COUNTER_MBPS,
+ ADF_GEN6_TL_RP_REG_OFF(reg_tl_bw_out)),
+ /* Message descriptor DevTLB hit rate. */
+ ADF_TL_COUNTER(AT_GLOB_DTLB_HIT_NAME, ADF_TL_SIMPLE_COUNT,
+ ADF_GEN6_TL_RP_REG_OFF(reg_tl_at_glob_devtlb_hit)),
+ /* Message descriptor DevTLB miss rate. */
+ ADF_TL_COUNTER(AT_GLOB_DTLB_MISS_NAME, ADF_TL_SIMPLE_COUNT,
+ ADF_GEN6_TL_RP_REG_OFF(reg_tl_at_glob_devtlb_miss)),
+ /* Payload DevTLB hit rate. */
+ ADF_TL_COUNTER(AT_PAYLD_DTLB_HIT_NAME, ADF_TL_SIMPLE_COUNT,
+ ADF_GEN6_TL_RP_REG_OFF(reg_tl_at_payld_devtlb_hit)),
+ /* Payload DevTLB miss rate. */
+ ADF_TL_COUNTER(AT_PAYLD_DTLB_MISS_NAME, ADF_TL_SIMPLE_COUNT,
+ ADF_GEN6_TL_RP_REG_OFF(reg_tl_at_payld_devtlb_miss)),
+ /* Ring Empty average[ns]. */
+ ADF_TL_COUNTER_LATENCY(RE_ACC_NAME, ADF_TL_COUNTER_NS_AVG,
+ ADF_GEN6_TL_RP_REG_OFF(reg_tl_re_acc),
+ ADF_GEN6_TL_RP_REG_OFF(reg_tl_re_cnt)),
+};
+
+void adf_gen6_init_tl_data(struct adf_tl_hw_data *tl_data)
+{
+ tl_data->layout_sz = ADF_GEN6_TL_LAYOUT_SZ;
+ tl_data->slice_reg_sz = ADF_GEN6_TL_SLICE_REG_SZ;
+ tl_data->cmdq_reg_sz = ADF_GEN6_TL_CMDQ_REG_SZ;
+ tl_data->rp_reg_sz = ADF_GEN6_TL_RP_REG_SZ;
+ tl_data->num_hbuff = ADF_GEN6_TL_NUM_HIST_BUFFS;
+ tl_data->max_rp = ADF_GEN6_TL_MAX_RP_NUM;
+ tl_data->msg_cnt_off = ADF_GEN6_TL_MSG_CNT_OFF;
+ tl_data->cpp_ns_per_cycle = ADF_GEN6_CPP_NS_PER_CYCLE;
+ tl_data->bw_units_to_bytes = ADF_GEN6_TL_BW_HW_UNITS_TO_BYTES;
+
+ tl_data->dev_counters = dev_counters;
+ tl_data->num_dev_counters = ARRAY_SIZE(dev_counters);
+ tl_data->sl_util_counters = sl_util_counters;
+ tl_data->sl_exec_counters = sl_exec_counters;
+ tl_data->cmdq_counters = cmdq_counters;
+ tl_data->num_cmdq_counters = NUM_CMDQ_COUNTERS;
+ tl_data->rp_counters = rp_counters;
+ tl_data->num_rp_counters = ARRAY_SIZE(rp_counters);
+ tl_data->max_sl_cnt = ADF_GEN6_TL_MAX_SLICES_PER_TYPE;
+
+ tl_data->multiplier.cpr_cnt = CPR_QUEUE_COUNT;
+ tl_data->multiplier.dcpr_cnt = DCPR_QUEUE_COUNT;
+ tl_data->multiplier.pke_cnt = PKE_QUEUE_COUNT;
+ tl_data->multiplier.wat_cnt = WAT_QUEUE_COUNT;
+ tl_data->multiplier.wcp_cnt = WCP_QUEUE_COUNT;
+ tl_data->multiplier.ucs_cnt = USC_QUEUE_COUNT;
+ tl_data->multiplier.ath_cnt = ATH_QUEUE_COUNT;
+}
+EXPORT_SYMBOL_GPL(adf_gen6_init_tl_data);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen6_tl.h b/drivers/crypto/intel/qat/qat_common/adf_gen6_tl.h
new file mode 100644
index 000000000000..49db660b8eb9
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen6_tl.h
@@ -0,0 +1,198 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2025 Intel Corporation. */
+#ifndef ADF_GEN6_TL_H
+#define ADF_GEN6_TL_H
+
+#include <linux/types.h>
+
+struct adf_tl_hw_data;
+
+/* Computation constants. */
+#define ADF_GEN6_CPP_NS_PER_CYCLE 2
+#define ADF_GEN6_TL_BW_HW_UNITS_TO_BYTES 64
+
+/* Maximum aggregation time. Value is in milliseconds. */
+#define ADF_GEN6_TL_MAX_AGGR_TIME_MS 4000
+/* Number of buffers to store historic values. */
+#define ADF_GEN6_TL_NUM_HIST_BUFFS \
+ (ADF_GEN6_TL_MAX_AGGR_TIME_MS / ADF_TL_DATA_WR_INTERVAL_MS)
+
+/* Max number of HW resources of one type */
+#define ADF_GEN6_TL_MAX_SLICES_PER_TYPE 32
+#define MAX_ATH_SL_COUNT 7
+#define MAX_CNV_SL_COUNT 2
+#define MAX_DCPRZ_SL_COUNT 2
+#define MAX_PKE_SL_COUNT 32
+#define MAX_UCS_SL_COUNT 4
+#define MAX_WAT_SL_COUNT 5
+#define MAX_WCP_SL_COUNT 5
+
+#define MAX_ATH_CMDQ_COUNT 14
+#define MAX_CNV_CMDQ_COUNT 6
+#define MAX_DCPRZ_CMDQ_COUNT 6
+#define MAX_PKE_CMDQ_COUNT 32
+#define MAX_UCS_CMDQ_COUNT 12
+#define MAX_WAT_CMDQ_COUNT 35
+#define MAX_WCP_CMDQ_COUNT 35
+
+/* Max number of simultaneously monitored ring pairs. */
+#define ADF_GEN6_TL_MAX_RP_NUM 4
+
+/**
+ * struct adf_gen6_tl_slice_data_regs - HW slice data as populated by FW.
+ * @reg_tm_slice_exec_cnt: Slice execution count.
+ * @reg_tm_slice_util: Slice utilization.
+ */
+struct adf_gen6_tl_slice_data_regs {
+ __u32 reg_tm_slice_exec_cnt;
+ __u32 reg_tm_slice_util;
+};
+
+#define ADF_GEN6_TL_SLICE_REG_SZ sizeof(struct adf_gen6_tl_slice_data_regs)
+
+/**
+ * struct adf_gen6_tl_cmdq_data_regs - HW CMDQ data as populated by FW.
+ * @reg_tm_cmdq_wait_cnt: CMDQ wait count.
+ * @reg_tm_cmdq_exec_cnt: CMDQ execution count.
+ * @reg_tm_cmdq_drain_cnt: CMDQ drain count.
+ */
+struct adf_gen6_tl_cmdq_data_regs {
+ __u32 reg_tm_cmdq_wait_cnt;
+ __u32 reg_tm_cmdq_exec_cnt;
+ __u32 reg_tm_cmdq_drain_cnt;
+ __u32 reserved;
+};
+
+#define ADF_GEN6_TL_CMDQ_REG_SZ sizeof(struct adf_gen6_tl_cmdq_data_regs)
+
+/**
+ * struct adf_gen6_tl_device_data_regs - This structure stores device telemetry
+ * counter values as are being populated periodically by device.
+ * @reg_tl_rd_lat_acc: read latency accumulator
+ * @reg_tl_gp_lat_acc: "get to put" latency accumulator
+ * @reg_tl_at_page_req_lat_acc: AT/DevTLB page request latency accumulator
+ * @reg_tl_at_trans_lat_acc: DevTLB transaction latency accumulator
+ * @reg_tl_re_acc: accumulated ring empty time
+ * @reg_tl_prt_trans_cnt: PCIe partial transactions
+ * @reg_tl_rd_lat_max: maximum logged read latency
+ * @reg_tl_rd_cmpl_cnt: read requests completed count
+ * @reg_tl_gp_lat_max: maximum logged get to put latency
+ * @reg_tl_ae_put_cnt: Accelerator Engine put counts across all rings
+ * @reg_tl_bw_in: PCIe write bandwidth
+ * @reg_tl_bw_out: PCIe read bandwidth
+ * @reg_tl_at_page_req_cnt: DevTLB page requests count
+ * @reg_tl_at_trans_lat_cnt: DevTLB transaction latency samples count
+ * @reg_tl_at_max_utlb_used: maximum uTLB used
+ * @reg_tl_re_cnt: ring empty time samples count
+ * @reserved: reserved
+ * @ath_slices: array of Authentication slices utilization registers
+ * @cnv_slices: array of Compression slices utilization registers
+ * @dcprz_slices: array of Decompression slices utilization registers
+ * @pke_slices: array of PKE slices utilization registers
+ * @ucs_slices: array of UCS slices utilization registers
+ * @wat_slices: array of Wireless Authentication slices utilization registers
+ * @wcp_slices: array of Wireless Cipher slices utilization registers
+ * @ath_cmdq: array of Authentication cmdq telemetry registers
+ * @cnv_cmdq: array of Compression cmdq telemetry registers
+ * @dcprz_cmdq: array of Decomopression cmdq telemetry registers
+ * @pke_cmdq: array of PKE cmdq telemetry registers
+ * @ucs_cmdq: array of UCS cmdq telemetry registers
+ * @wat_cmdq: array of Wireless Authentication cmdq telemetry registers
+ * @wcp_cmdq: array of Wireless Cipher cmdq telemetry registers
+ */
+struct adf_gen6_tl_device_data_regs {
+ __u64 reg_tl_rd_lat_acc;
+ __u64 reg_tl_gp_lat_acc;
+ __u64 reg_tl_at_page_req_lat_acc;
+ __u64 reg_tl_at_trans_lat_acc;
+ __u64 reg_tl_re_acc;
+ __u32 reg_tl_prt_trans_cnt;
+ __u32 reg_tl_rd_lat_max;
+ __u32 reg_tl_rd_cmpl_cnt;
+ __u32 reg_tl_gp_lat_max;
+ __u32 reg_tl_ae_put_cnt;
+ __u32 reg_tl_bw_in;
+ __u32 reg_tl_bw_out;
+ __u32 reg_tl_at_page_req_cnt;
+ __u32 reg_tl_at_trans_lat_cnt;
+ __u32 reg_tl_at_max_utlb_used;
+ __u32 reg_tl_re_cnt;
+ __u32 reserved;
+ struct adf_gen6_tl_slice_data_regs ath_slices[MAX_ATH_SL_COUNT];
+ struct adf_gen6_tl_slice_data_regs cnv_slices[MAX_CNV_SL_COUNT];
+ struct adf_gen6_tl_slice_data_regs dcprz_slices[MAX_DCPRZ_SL_COUNT];
+ struct adf_gen6_tl_slice_data_regs pke_slices[MAX_PKE_SL_COUNT];
+ struct adf_gen6_tl_slice_data_regs ucs_slices[MAX_UCS_SL_COUNT];
+ struct adf_gen6_tl_slice_data_regs wat_slices[MAX_WAT_SL_COUNT];
+ struct adf_gen6_tl_slice_data_regs wcp_slices[MAX_WCP_SL_COUNT];
+ struct adf_gen6_tl_cmdq_data_regs ath_cmdq[MAX_ATH_CMDQ_COUNT];
+ struct adf_gen6_tl_cmdq_data_regs cnv_cmdq[MAX_CNV_CMDQ_COUNT];
+ struct adf_gen6_tl_cmdq_data_regs dcprz_cmdq[MAX_DCPRZ_CMDQ_COUNT];
+ struct adf_gen6_tl_cmdq_data_regs pke_cmdq[MAX_PKE_CMDQ_COUNT];
+ struct adf_gen6_tl_cmdq_data_regs ucs_cmdq[MAX_UCS_CMDQ_COUNT];
+ struct adf_gen6_tl_cmdq_data_regs wat_cmdq[MAX_WAT_CMDQ_COUNT];
+ struct adf_gen6_tl_cmdq_data_regs wcp_cmdq[MAX_WCP_CMDQ_COUNT];
+};
+
+/**
+ * struct adf_gen6_tl_ring_pair_data_regs - This structure stores ring pair
+ * telemetry counter values as they are being populated periodically by device.
+ * @reg_tl_gp_lat_acc: get-put latency accumulator
+ * @reg_tl_re_acc: accumulated ring empty time
+ * @reg_tl_pci_trans_cnt: PCIe partial transactions
+ * @reg_tl_ae_put_cnt: Accelerator Engine put counts across all rings
+ * @reg_tl_bw_in: PCIe write bandwidth
+ * @reg_tl_bw_out: PCIe read bandwidth
+ * @reg_tl_at_glob_devtlb_hit: Message descriptor DevTLB hit rate
+ * @reg_tl_at_glob_devtlb_miss: Message descriptor DevTLB miss rate
+ * @reg_tl_at_payld_devtlb_hit: Payload DevTLB hit rate
+ * @reg_tl_at_payld_devtlb_miss: Payload DevTLB miss rate
+ * @reg_tl_re_cnt: ring empty time samples count
+ * @reserved1: reserved
+ */
+struct adf_gen6_tl_ring_pair_data_regs {
+ __u64 reg_tl_gp_lat_acc;
+ __u64 reg_tl_re_acc;
+ __u32 reg_tl_prt_trans_cnt;
+ __u32 reg_tl_ae_put_cnt;
+ __u32 reg_tl_bw_in;
+ __u32 reg_tl_bw_out;
+ __u32 reg_tl_at_glob_devtlb_hit;
+ __u32 reg_tl_at_glob_devtlb_miss;
+ __u32 reg_tl_at_payld_devtlb_hit;
+ __u32 reg_tl_at_payld_devtlb_miss;
+ __u32 reg_tl_re_cnt;
+ __u32 reserved1;
+};
+
+#define ADF_GEN6_TL_RP_REG_SZ sizeof(struct adf_gen6_tl_ring_pair_data_regs)
+
+/**
+ * struct adf_gen6_tl_layout - This structure represents the entire telemetry
+ * counters data: Device + 4 Ring Pairs as they are being populated periodically
+ * by device.
+ * @tl_device_data_regs: structure of device telemetry registers
+ * @tl_ring_pairs_data_regs: array of ring pairs telemetry registers
+ * @reg_tl_msg_cnt: telemetry message counter
+ * @reserved: reserved
+ */
+struct adf_gen6_tl_layout {
+ struct adf_gen6_tl_device_data_regs tl_device_data_regs;
+ struct adf_gen6_tl_ring_pair_data_regs
+ tl_ring_pairs_data_regs[ADF_GEN6_TL_MAX_RP_NUM];
+ __u32 reg_tl_msg_cnt;
+ __u32 reserved;
+};
+
+#define ADF_GEN6_TL_LAYOUT_SZ sizeof(struct adf_gen6_tl_layout)
+#define ADF_GEN6_TL_MSG_CNT_OFF \
+ offsetof(struct adf_gen6_tl_layout, reg_tl_msg_cnt)
+
+#ifdef CONFIG_DEBUG_FS
+void adf_gen6_init_tl_data(struct adf_tl_hw_data *tl_data);
+#else
+static inline void adf_gen6_init_tl_data(struct adf_tl_hw_data *tl_data)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+#endif /* ADF_GEN6_TL_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c b/drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c
index 65bd26b25abc..f93d9cca70ce 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c
@@ -90,10 +90,6 @@ void adf_exit_arb(struct adf_accel_dev *accel_dev)
hw_data->get_arb_info(&info);
- /* Reset arbiter configuration */
- for (i = 0; i < ADF_ARB_NUM; i++)
- WRITE_CSR_ARB_SARCONFIG(csr, arb_off, i, 0);
-
/* Unmap worker threads to service arbiters */
for (i = 0; i < hw_data->num_engines; i++)
WRITE_CSR_ARB_WT2SAM(csr, arb_off, wt_off, i, 0);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_init.c b/drivers/crypto/intel/qat/qat_common/adf_init.c
index f189cce7d153..46491048e0bb 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_init.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_init.c
@@ -404,6 +404,7 @@ static void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
hw_data->exit_admin_comms(accel_dev);
adf_cleanup_etr_data(accel_dev);
+ adf_misc_wq_flush();
adf_dev_restore(accel_dev);
}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_isr.c b/drivers/crypto/intel/qat/qat_common/adf_isr.c
index cae1aee5479a..12e565613661 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_isr.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_isr.c
@@ -407,3 +407,8 @@ bool adf_misc_wq_queue_delayed_work(struct delayed_work *work,
{
return queue_delayed_work(adf_misc_wq, work, delay);
}
+
+void adf_misc_wq_flush(void)
+{
+ flush_workqueue(adf_misc_wq);
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.c b/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.c
new file mode 100644
index 000000000000..69295a9ddf0a
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2025 Intel Corporation */
+#include <linux/bitops.h>
+#include <linux/sprintf.h>
+#include <linux/string_helpers.h>
+
+#include "adf_pm_dbgfs_utils.h"
+
+/*
+ * This is needed because a variable is used to index the mask at
+ * pm_scnprint_table(), making it not compile time constant, so the compile
+ * asserts from FIELD_GET() or u32_get_bits() won't be fulfilled.
+ */
+#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
+
+#define PM_INFO_MAX_KEY_LEN 21
+
+static int pm_scnprint_table(char *buff, const struct pm_status_row *table,
+ u32 *pm_info_regs, size_t buff_size, int table_len,
+ bool lowercase)
+{
+ char key[PM_INFO_MAX_KEY_LEN];
+ int wr = 0;
+ int i;
+
+ for (i = 0; i < table_len; i++) {
+ if (lowercase)
+ string_lower(key, table[i].key);
+ else
+ string_upper(key, table[i].key);
+
+ wr += scnprintf(&buff[wr], buff_size - wr, "%s: %#x\n", key,
+ field_get(table[i].field_mask,
+ pm_info_regs[table[i].reg_offset]));
+ }
+
+ return wr;
+}
+
+int adf_pm_scnprint_table_upper_keys(char *buff, const struct pm_status_row *table,
+ u32 *pm_info_regs, size_t buff_size, int table_len)
+{
+ return pm_scnprint_table(buff, table, pm_info_regs, buff_size,
+ table_len, false);
+}
+
+int adf_pm_scnprint_table_lower_keys(char *buff, const struct pm_status_row *table,
+ u32 *pm_info_regs, size_t buff_size, int table_len)
+{
+ return pm_scnprint_table(buff, table, pm_info_regs, buff_size,
+ table_len, true);
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.h b/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.h
new file mode 100644
index 000000000000..854f058b35ed
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2025 Intel Corporation */
+#ifndef ADF_PM_DBGFS_UTILS_H_
+#define ADF_PM_DBGFS_UTILS_H_
+
+#include <linux/stddef.h>
+#include <linux/stringify.h>
+#include <linux/types.h>
+#include "icp_qat_fw_init_admin.h"
+
+#define PM_INFO_MEMBER_OFF(member) \
+ (offsetof(struct icp_qat_fw_init_admin_pm_info, member) / sizeof(u32))
+
+#define PM_INFO_REGSET_ENTRY_MASK(_reg_, _field_, _mask_) \
+{ \
+ .reg_offset = PM_INFO_MEMBER_OFF(_reg_), \
+ .key = __stringify(_field_), \
+ .field_mask = _mask_, \
+}
+
+#define PM_INFO_REGSET_ENTRY32(_reg_, _field_) \
+ PM_INFO_REGSET_ENTRY_MASK(_reg_, _field_, GENMASK(31, 0))
+
+struct pm_status_row {
+ int reg_offset;
+ u32 field_mask;
+ const char *key;
+};
+
+int adf_pm_scnprint_table_upper_keys(char *buff, const struct pm_status_row *table,
+ u32 *pm_info_regs, size_t buff_size, int table_len);
+
+int adf_pm_scnprint_table_lower_keys(char *buff, const struct pm_status_row *table,
+ u32 *pm_info_regs, size_t buff_size, int table_len);
+
+#endif /* ADF_PM_DBGFS_UTILS_H_ */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl.c b/drivers/crypto/intel/qat/qat_common/adf_rl.c
index e782c23fc1bf..c6a54e465931 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_rl.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_rl.c
@@ -13,6 +13,7 @@
#include <linux/units.h>
#include "adf_accel_devices.h"
+#include "adf_cfg_services.h"
#include "adf_common_drv.h"
#include "adf_rl_admin.h"
#include "adf_rl.h"
@@ -55,7 +56,7 @@ static int validate_user_input(struct adf_accel_dev *accel_dev,
}
}
- if (sla_in->srv >= ADF_SVC_NONE) {
+ if (sla_in->srv >= SVC_BASE_COUNT) {
dev_notice(&GET_DEV(accel_dev),
"Wrong service type\n");
return -EINVAL;
@@ -168,20 +169,6 @@ static struct rl_sla *find_parent(struct adf_rl *rl_data,
return NULL;
}
-static enum adf_cfg_service_type srv_to_cfg_svc_type(enum adf_base_services rl_srv)
-{
- switch (rl_srv) {
- case ADF_SVC_ASYM:
- return ASYM;
- case ADF_SVC_SYM:
- return SYM;
- case ADF_SVC_DC:
- return COMP;
- default:
- return UNUSED;
- }
-}
-
/**
* adf_rl_get_sla_arr_of_type() - Returns a pointer to SLA type specific array
* @rl_data: pointer to ratelimiting data
@@ -209,22 +196,6 @@ u32 adf_rl_get_sla_arr_of_type(struct adf_rl *rl_data, enum rl_node_type type,
}
}
-static bool is_service_enabled(struct adf_accel_dev *accel_dev,
- enum adf_base_services rl_srv)
-{
- enum adf_cfg_service_type arb_srv = srv_to_cfg_svc_type(rl_srv);
- struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
- u8 rps_per_bundle = hw_data->num_banks_per_vf;
- int i;
-
- for (i = 0; i < rps_per_bundle; i++) {
- if (GET_SRV_TYPE(accel_dev, i) == arb_srv)
- return true;
- }
-
- return false;
-}
-
/**
* prepare_rp_ids() - Creates an array of ring pair IDs from bitmask
* @accel_dev: pointer to acceleration device structure
@@ -243,7 +214,7 @@ static bool is_service_enabled(struct adf_accel_dev *accel_dev,
static int prepare_rp_ids(struct adf_accel_dev *accel_dev, struct rl_sla *sla,
const unsigned long rp_mask)
{
- enum adf_cfg_service_type arb_srv = srv_to_cfg_svc_type(sla->srv);
+ enum adf_cfg_service_type arb_srv = adf_srv_to_cfg_svc_type(sla->srv);
u16 rps_per_bundle = GET_HW_DATA(accel_dev)->num_banks_per_vf;
bool *rp_in_use = accel_dev->rate_limiting->rp_in_use;
size_t rp_cnt_max = ARRAY_SIZE(sla->ring_pairs_ids);
@@ -558,21 +529,9 @@ u32 adf_rl_calculate_slice_tokens(struct adf_accel_dev *accel_dev, u32 sla_val,
if (!sla_val)
return 0;
+ /* Handle generation specific slice count adjustment */
avail_slice_cycles = hw_data->clock_frequency;
-
- switch (svc_type) {
- case ADF_SVC_ASYM:
- avail_slice_cycles *= device_data->slices.pke_cnt;
- break;
- case ADF_SVC_SYM:
- avail_slice_cycles *= device_data->slices.cph_cnt;
- break;
- case ADF_SVC_DC:
- avail_slice_cycles *= device_data->slices.dcpr_cnt;
- break;
- default:
- break;
- }
+ avail_slice_cycles *= hw_data->get_svc_slice_cnt(accel_dev, svc_type);
do_div(avail_slice_cycles, device_data->scan_interval);
allocated_tokens = avail_slice_cycles * sla_val;
@@ -581,6 +540,17 @@ u32 adf_rl_calculate_slice_tokens(struct adf_accel_dev *accel_dev, u32 sla_val,
return allocated_tokens;
}
+static u32 adf_rl_get_num_svc_aes(struct adf_accel_dev *accel_dev,
+ enum adf_base_services svc)
+{
+ struct adf_rl_hw_data *device_data = &accel_dev->hw_device->rl_data;
+
+ if (svc >= SVC_BASE_COUNT)
+ return 0;
+
+ return device_data->svc_ae_mask[svc];
+}
+
u32 adf_rl_calculate_ae_cycles(struct adf_accel_dev *accel_dev, u32 sla_val,
enum adf_base_services svc_type)
{
@@ -592,7 +562,7 @@ u32 adf_rl_calculate_ae_cycles(struct adf_accel_dev *accel_dev, u32 sla_val,
return 0;
avail_ae_cycles = hw_data->clock_frequency;
- avail_ae_cycles *= hw_data->get_num_aes(hw_data) - 1;
+ avail_ae_cycles *= adf_rl_get_num_svc_aes(accel_dev, svc_type);
do_div(avail_ae_cycles, device_data->scan_interval);
sla_val *= device_data->max_tp[svc_type];
@@ -617,9 +587,8 @@ u32 adf_rl_calculate_pci_bw(struct adf_accel_dev *accel_dev, u32 sla_val,
sla_to_bytes *= device_data->max_tp[svc_type];
do_div(sla_to_bytes, device_data->scale_ref);
- sla_to_bytes *= (svc_type == ADF_SVC_ASYM) ? RL_TOKEN_ASYM_SIZE :
- BYTES_PER_MBIT;
- if (svc_type == ADF_SVC_DC && is_bw_out)
+ sla_to_bytes *= (svc_type == SVC_ASYM) ? RL_TOKEN_ASYM_SIZE : BYTES_PER_MBIT;
+ if (svc_type == SVC_DC && is_bw_out)
sla_to_bytes *= device_data->slices.dcpr_cnt -
device_data->dcpr_correction;
@@ -660,7 +629,7 @@ static int add_new_sla_entry(struct adf_accel_dev *accel_dev,
}
*sla_out = sla;
- if (!is_service_enabled(accel_dev, sla_in->srv)) {
+ if (!adf_is_service_enabled(accel_dev, sla_in->srv)) {
dev_notice(&GET_DEV(accel_dev),
"Provided service is not enabled\n");
ret = -EINVAL;
@@ -730,8 +699,8 @@ static int initialize_default_nodes(struct adf_accel_dev *accel_dev)
sla_in.type = RL_ROOT;
sla_in.parent_id = RL_PARENT_DEFAULT_ID;
- for (i = 0; i < ADF_SVC_NONE; i++) {
- if (!is_service_enabled(accel_dev, i))
+ for (i = 0; i < SVC_BASE_COUNT; i++) {
+ if (!adf_is_service_enabled(accel_dev, i))
continue;
sla_in.cir = device_data->scale_ref;
@@ -745,10 +714,9 @@ static int initialize_default_nodes(struct adf_accel_dev *accel_dev)
/* Init default cluster for each root */
sla_in.type = RL_CLUSTER;
- for (i = 0; i < ADF_SVC_NONE; i++) {
+ for (i = 0; i < SVC_BASE_COUNT; i++) {
if (!rl_data->root[i])
continue;
-
sla_in.cir = rl_data->root[i]->cir;
sla_in.pir = sla_in.cir;
sla_in.srv = rl_data->root[i]->srv;
@@ -987,7 +955,7 @@ int adf_rl_get_capability_remaining(struct adf_accel_dev *accel_dev,
struct rl_sla *sla = NULL;
int i;
- if (srv >= ADF_SVC_NONE)
+ if (srv >= SVC_BASE_COUNT)
return -EINVAL;
if (sla_id > RL_SLA_EMPTY_ID && !validate_sla_id(accel_dev, sla_id)) {
@@ -1086,9 +1054,9 @@ int adf_rl_init(struct adf_accel_dev *accel_dev)
int ret = 0;
/* Validate device parameters */
- if (RL_VALIDATE_NON_ZERO(rl_hw_data->max_tp[ADF_SVC_ASYM]) ||
- RL_VALIDATE_NON_ZERO(rl_hw_data->max_tp[ADF_SVC_SYM]) ||
- RL_VALIDATE_NON_ZERO(rl_hw_data->max_tp[ADF_SVC_DC]) ||
+ if (RL_VALIDATE_NON_ZERO(rl_hw_data->max_tp[SVC_ASYM]) ||
+ RL_VALIDATE_NON_ZERO(rl_hw_data->max_tp[SVC_SYM]) ||
+ RL_VALIDATE_NON_ZERO(rl_hw_data->max_tp[SVC_DC]) ||
RL_VALIDATE_NON_ZERO(rl_hw_data->scan_interval) ||
RL_VALIDATE_NON_ZERO(rl_hw_data->pcie_scale_div) ||
RL_VALIDATE_NON_ZERO(rl_hw_data->pcie_scale_mul) ||
diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl.h b/drivers/crypto/intel/qat/qat_common/adf_rl.h
index bfe750ea0e83..c1f3f9a51195 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_rl.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_rl.h
@@ -7,6 +7,8 @@
#include <linux/mutex.h>
#include <linux/types.h>
+#include "adf_cfg_services.h"
+
struct adf_accel_dev;
#define RL_ROOT_MAX 4
@@ -24,13 +26,6 @@ enum rl_node_type {
RL_LEAF,
};
-enum adf_base_services {
- ADF_SVC_ASYM = 0,
- ADF_SVC_SYM,
- ADF_SVC_DC,
- ADF_SVC_NONE,
-};
-
/**
* struct adf_rl_sla_input_data - ratelimiting user input data structure
* @rp_mask: 64 bit bitmask of ring pair IDs which will be assigned to SLA.
@@ -73,6 +68,7 @@ struct rl_slice_cnt {
u8 dcpr_cnt;
u8 pke_cnt;
u8 cph_cnt;
+ u8 cpr_cnt;
};
struct adf_rl_interface_data {
@@ -94,6 +90,7 @@ struct adf_rl_hw_data {
u32 pcie_scale_div;
u32 dcpr_correction;
u32 max_tp[RL_ROOT_MAX];
+ u32 svc_ae_mask[SVC_BASE_COUNT];
struct rl_slice_cnt slices;
};
diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl_admin.c b/drivers/crypto/intel/qat/qat_common/adf_rl_admin.c
index 698a14f4ce66..4a3e0591fdba 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_rl_admin.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_rl_admin.c
@@ -63,6 +63,7 @@ int adf_rl_send_admin_init_msg(struct adf_accel_dev *accel_dev,
slices_int->pke_cnt = slices_resp.pke_cnt;
/* For symmetric crypto, slice tokens are relative to the UCS slice */
slices_int->cph_cnt = slices_resp.ucs_cnt;
+ slices_int->cpr_cnt = slices_resp.cpr_cnt;
return 0;
}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_sriov.c b/drivers/crypto/intel/qat/qat_common/adf_sriov.c
index c75d0b6cb0ad..31d1ef0cb1f5 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_sriov.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_sriov.c
@@ -155,7 +155,6 @@ static int adf_do_enable_sriov(struct adf_accel_dev *accel_dev)
if (!device_iommu_mapped(&GET_DEV(accel_dev))) {
dev_warn(&GET_DEV(accel_dev),
"IOMMU should be enabled for SR-IOV to work correctly\n");
- return -EINVAL;
}
if (adf_dev_started(accel_dev)) {
diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c
index 4fcd61ff70d1..79c63dfa8ff3 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c
@@ -3,6 +3,7 @@
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/pci.h>
+#include <linux/string_choices.h>
#include "adf_accel_devices.h"
#include "adf_cfg.h"
#include "adf_cfg_services.h"
@@ -19,14 +20,12 @@ static ssize_t state_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct adf_accel_dev *accel_dev;
- char *state;
accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
if (!accel_dev)
return -EINVAL;
- state = adf_dev_started(accel_dev) ? "up" : "down";
- return sysfs_emit(buf, "%s\n", state);
+ return sysfs_emit(buf, "%s\n", str_up_down(adf_dev_started(accel_dev)));
}
static ssize_t state_store(struct device *dev, struct device_attribute *attr,
@@ -117,25 +116,27 @@ static int adf_sysfs_update_dev_config(struct adf_accel_dev *accel_dev,
static ssize_t cfg_services_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
+ char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { };
struct adf_hw_device_data *hw_data;
struct adf_accel_dev *accel_dev;
int ret;
- ret = sysfs_match_string(adf_cfg_services, buf);
- if (ret < 0)
- return ret;
-
accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
if (!accel_dev)
return -EINVAL;
+ ret = adf_parse_service_string(accel_dev, buf, count, services,
+ ADF_CFG_MAX_VAL_LEN_IN_BYTES);
+ if (ret)
+ return ret;
+
if (adf_dev_started(accel_dev)) {
dev_info(dev, "Device qat_dev%d must be down to reconfigure the service.\n",
accel_dev->accel_id);
return -EINVAL;
}
- ret = adf_sysfs_update_dev_config(accel_dev, adf_cfg_services[ret]);
+ ret = adf_sysfs_update_dev_config(accel_dev, services);
if (ret < 0)
return ret;
@@ -207,16 +208,13 @@ static DEVICE_ATTR_RW(pm_idle_enabled);
static ssize_t auto_reset_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- char *auto_reset;
struct adf_accel_dev *accel_dev;
accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
if (!accel_dev)
return -EINVAL;
- auto_reset = accel_dev->autoreset_on_error ? "on" : "off";
-
- return sysfs_emit(buf, "%s\n", auto_reset);
+ return sysfs_emit(buf, "%s\n", str_on_off(accel_dev->autoreset_on_error));
}
static ssize_t auto_reset_store(struct device *dev, struct device_attribute *attr,
@@ -271,6 +269,8 @@ static ssize_t rp2srv_show(struct device *dev, struct device_attribute *attr,
return sysfs_emit(buf, "%s\n", ADF_CFG_SYM);
case ASYM:
return sysfs_emit(buf, "%s\n", ADF_CFG_ASYM);
+ case DECOMP:
+ return sysfs_emit(buf, "%s\n", ADF_CFG_DECOMP);
default:
break;
}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c
index bedb514d4e30..f31556beed8b 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c
@@ -32,9 +32,10 @@ enum rl_params {
};
static const char *const rl_services[] = {
- [ADF_SVC_ASYM] = "asym",
- [ADF_SVC_SYM] = "sym",
- [ADF_SVC_DC] = "dc",
+ [SVC_ASYM] = "asym",
+ [SVC_SYM] = "sym",
+ [SVC_DC] = "dc",
+ [SVC_DECOMP] = "decomp",
};
static const char *const rl_operations[] = {
@@ -282,7 +283,7 @@ static ssize_t srv_show(struct device *dev, struct device_attribute *attr,
if (ret)
return ret;
- if (get == ADF_SVC_NONE)
+ if (get == SVC_BASE_COUNT)
return -EINVAL;
return sysfs_emit(buf, "%s\n", rl_services[get]);
@@ -291,14 +292,22 @@ static ssize_t srv_show(struct device *dev, struct device_attribute *attr,
static ssize_t srv_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
+ struct adf_accel_dev *accel_dev;
unsigned int val;
int ret;
+ accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
+ if (!accel_dev)
+ return -EINVAL;
+
ret = sysfs_match_string(rl_services, buf);
if (ret < 0)
return ret;
val = ret;
+ if (!adf_is_service_enabled(accel_dev, val))
+ return -EINVAL;
+
ret = set_param_u(dev, SRV, val);
if (ret)
return ret;
@@ -439,8 +448,8 @@ int adf_sysfs_rl_add(struct adf_accel_dev *accel_dev)
dev_err(&GET_DEV(accel_dev),
"Failed to create qat_rl attribute group\n");
- data->cap_rem_srv = ADF_SVC_NONE;
- data->input.srv = ADF_SVC_NONE;
+ data->cap_rem_srv = SVC_BASE_COUNT;
+ data->input.srv = SVC_BASE_COUNT;
data->sysfs_added = true;
return ret;
diff --git a/drivers/crypto/intel/qat/qat_common/adf_telemetry.c b/drivers/crypto/intel/qat/qat_common/adf_telemetry.c
index 74fb0c2ed241..b64142db1f0d 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_telemetry.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_telemetry.c
@@ -212,6 +212,23 @@ int adf_tl_halt(struct adf_accel_dev *accel_dev)
return ret;
}
+static void adf_set_cmdq_cnt(struct adf_accel_dev *accel_dev,
+ struct adf_tl_hw_data *tl_data)
+{
+ struct icp_qat_fw_init_admin_slice_cnt *slice_cnt, *cmdq_cnt;
+
+ slice_cnt = &accel_dev->telemetry->slice_cnt;
+ cmdq_cnt = &accel_dev->telemetry->cmdq_cnt;
+
+ cmdq_cnt->cpr_cnt = slice_cnt->cpr_cnt * tl_data->multiplier.cpr_cnt;
+ cmdq_cnt->dcpr_cnt = slice_cnt->dcpr_cnt * tl_data->multiplier.dcpr_cnt;
+ cmdq_cnt->pke_cnt = slice_cnt->pke_cnt * tl_data->multiplier.pke_cnt;
+ cmdq_cnt->wat_cnt = slice_cnt->wat_cnt * tl_data->multiplier.wat_cnt;
+ cmdq_cnt->wcp_cnt = slice_cnt->wcp_cnt * tl_data->multiplier.wcp_cnt;
+ cmdq_cnt->ucs_cnt = slice_cnt->ucs_cnt * tl_data->multiplier.ucs_cnt;
+ cmdq_cnt->ath_cnt = slice_cnt->ath_cnt * tl_data->multiplier.ath_cnt;
+}
+
int adf_tl_run(struct adf_accel_dev *accel_dev, int state)
{
struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev);
@@ -235,6 +252,8 @@ int adf_tl_run(struct adf_accel_dev *accel_dev, int state)
return ret;
}
+ adf_set_cmdq_cnt(accel_dev, tl_data);
+
telemetry->hbuffs = state;
atomic_set(&telemetry->state, state);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_telemetry.h b/drivers/crypto/intel/qat/qat_common/adf_telemetry.h
index e54a406cc1b4..02d75c3c214a 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_telemetry.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_telemetry.h
@@ -28,19 +28,23 @@ struct dentry;
struct adf_tl_hw_data {
size_t layout_sz;
size_t slice_reg_sz;
+ size_t cmdq_reg_sz;
size_t rp_reg_sz;
size_t msg_cnt_off;
const struct adf_tl_dbg_counter *dev_counters;
const struct adf_tl_dbg_counter *sl_util_counters;
const struct adf_tl_dbg_counter *sl_exec_counters;
+ const struct adf_tl_dbg_counter **cmdq_counters;
const struct adf_tl_dbg_counter *rp_counters;
u8 num_hbuff;
u8 cpp_ns_per_cycle;
u8 bw_units_to_bytes;
u8 num_dev_counters;
u8 num_rp_counters;
+ u8 num_cmdq_counters;
u8 max_rp;
u8 max_sl_cnt;
+ struct icp_qat_fw_init_admin_slice_cnt multiplier;
};
struct adf_telemetry {
@@ -69,6 +73,7 @@ struct adf_telemetry {
struct mutex wr_lock;
struct delayed_work work_ctx;
struct icp_qat_fw_init_admin_slice_cnt slice_cnt;
+ struct icp_qat_fw_init_admin_slice_cnt cmdq_cnt;
};
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/crypto/intel/qat/qat_common/adf_timer.c b/drivers/crypto/intel/qat/qat_common/adf_timer.c
new file mode 100644
index 000000000000..8962a49f145a
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_timer.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2023 Intel Corporation */
+
+#include <linux/container_of.h>
+#include <linux/dev_printk.h>
+#include <linux/export.h>
+#include <linux/jiffies.h>
+#include <linux/ktime.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+#include "adf_admin.h"
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_timer.h"
+
+#define ADF_DEFAULT_TIMER_PERIOD_MS 200
+
+/* This periodic update is used to trigger HB, RL & TL fw events */
+static void work_handler(struct work_struct *work)
+{
+ struct adf_accel_dev *accel_dev;
+ struct adf_timer *timer_ctx;
+ u32 time_periods;
+
+ timer_ctx = container_of(to_delayed_work(work), struct adf_timer, work_ctx);
+ accel_dev = timer_ctx->accel_dev;
+
+ adf_misc_wq_queue_delayed_work(&timer_ctx->work_ctx,
+ msecs_to_jiffies(ADF_DEFAULT_TIMER_PERIOD_MS));
+
+ time_periods = div_u64(ktime_ms_delta(ktime_get_real(), timer_ctx->initial_ktime),
+ ADF_DEFAULT_TIMER_PERIOD_MS);
+
+ if (adf_send_admin_tim_sync(accel_dev, time_periods))
+ dev_err(&GET_DEV(accel_dev), "Failed to synchronize qat timer\n");
+}
+
+int adf_timer_start(struct adf_accel_dev *accel_dev)
+{
+ struct adf_timer *timer_ctx;
+
+ timer_ctx = kzalloc(sizeof(*timer_ctx), GFP_KERNEL);
+ if (!timer_ctx)
+ return -ENOMEM;
+
+ timer_ctx->accel_dev = accel_dev;
+ accel_dev->timer = timer_ctx;
+ timer_ctx->initial_ktime = ktime_get_real();
+
+ INIT_DELAYED_WORK(&timer_ctx->work_ctx, work_handler);
+ adf_misc_wq_queue_delayed_work(&timer_ctx->work_ctx,
+ msecs_to_jiffies(ADF_DEFAULT_TIMER_PERIOD_MS));
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adf_timer_start);
+
+void adf_timer_stop(struct adf_accel_dev *accel_dev)
+{
+ struct adf_timer *timer_ctx = accel_dev->timer;
+
+ if (!timer_ctx)
+ return;
+
+ cancel_delayed_work_sync(&timer_ctx->work_ctx);
+
+ kfree(timer_ctx);
+ accel_dev->timer = NULL;
+}
+EXPORT_SYMBOL_GPL(adf_timer_stop);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_timer.h b/drivers/crypto/intel/qat/qat_common/adf_timer.h
new file mode 100644
index 000000000000..68e5136d6ba1
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_timer.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2023 Intel Corporation */
+
+#ifndef ADF_TIMER_H_
+#define ADF_TIMER_H_
+
+#include <linux/ktime.h>
+#include <linux/workqueue.h>
+
+struct adf_accel_dev;
+
+struct adf_timer {
+ struct adf_accel_dev *accel_dev;
+ struct delayed_work work_ctx;
+ ktime_t initial_ktime;
+};
+
+int adf_timer_start(struct adf_accel_dev *accel_dev);
+void adf_timer_stop(struct adf_accel_dev *accel_dev);
+
+#endif /* ADF_TIMER_H_ */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c
index c8241f5a0a26..b81f70576683 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c
@@ -339,6 +339,48 @@ static int tl_calc_and_print_sl_counters(struct adf_accel_dev *accel_dev,
return 0;
}
+static int tl_print_cmdq_counter(struct adf_telemetry *telemetry,
+ const struct adf_tl_dbg_counter *ctr,
+ struct seq_file *s, u8 cnt_id, u8 counter)
+{
+ size_t cmdq_regs_sz = GET_TL_DATA(telemetry->accel_dev).cmdq_reg_sz;
+ size_t offset_inc = cnt_id * cmdq_regs_sz;
+ struct adf_tl_dbg_counter slice_ctr;
+ char cnt_name[MAX_COUNT_NAME_SIZE];
+
+ slice_ctr = *(ctr + counter);
+ slice_ctr.offset1 += offset_inc;
+ snprintf(cnt_name, MAX_COUNT_NAME_SIZE, "%s%d", slice_ctr.name, cnt_id);
+
+ return tl_calc_and_print_counter(telemetry, s, &slice_ctr, cnt_name);
+}
+
+static int tl_calc_and_print_cmdq_counters(struct adf_accel_dev *accel_dev,
+ struct seq_file *s, u8 cnt_type,
+ u8 cnt_id)
+{
+ struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev);
+ struct adf_telemetry *telemetry = accel_dev->telemetry;
+ const struct adf_tl_dbg_counter **cmdq_tl_counters;
+ const struct adf_tl_dbg_counter *ctr;
+ u8 counter;
+ int ret;
+
+ cmdq_tl_counters = tl_data->cmdq_counters;
+ ctr = cmdq_tl_counters[cnt_type];
+
+ for (counter = 0; counter < tl_data->num_cmdq_counters; counter++) {
+ ret = tl_print_cmdq_counter(telemetry, ctr, s, cnt_id, counter);
+ if (ret) {
+ dev_notice(&GET_DEV(accel_dev),
+ "invalid slice utilization counter type\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static void tl_print_msg_cnt(struct seq_file *s, u32 msg_cnt)
{
seq_printf(s, "%-*s", TL_KEY_MIN_PADDING, SNAPSHOT_CNT_MSG);
@@ -352,6 +394,7 @@ static int tl_print_dev_data(struct adf_accel_dev *accel_dev,
struct adf_telemetry *telemetry = accel_dev->telemetry;
const struct adf_tl_dbg_counter *dev_tl_counters;
u8 num_dev_counters = tl_data->num_dev_counters;
+ u8 *cmdq_cnt = (u8 *)&telemetry->cmdq_cnt;
u8 *sl_cnt = (u8 *)&telemetry->slice_cnt;
const struct adf_tl_dbg_counter *ctr;
unsigned int i;
@@ -387,6 +430,15 @@ static int tl_print_dev_data(struct adf_accel_dev *accel_dev,
}
}
+ /* Print per command queue telemetry. */
+ for (i = 0; i < ADF_TL_SL_CNT_COUNT; i++) {
+ for (j = 0; j < cmdq_cnt[i]; j++) {
+ ret = tl_calc_and_print_cmdq_counters(accel_dev, s, i, j);
+ if (ret)
+ return ret;
+ }
+ }
+
return 0;
}
@@ -473,22 +525,6 @@ unlock_and_exit:
}
DEFINE_SHOW_STORE_ATTRIBUTE(tl_control);
-static int get_rp_index_from_file(const struct file *f, u8 *rp_id, u8 rp_num)
-{
- char alpha;
- u8 index;
- int ret;
-
- ret = sscanf(f->f_path.dentry->d_name.name, ADF_TL_RP_REGS_FNAME, &alpha);
- if (ret != 1)
- return -EINVAL;
-
- index = ADF_TL_DBG_RP_INDEX_ALPHA(alpha);
- *rp_id = index;
-
- return 0;
-}
-
static int adf_tl_dbg_change_rp_index(struct adf_accel_dev *accel_dev,
unsigned int new_rp_num,
unsigned int rp_regs_index)
@@ -554,6 +590,9 @@ static void tl_print_rp_srv(struct adf_accel_dev *accel_dev, struct seq_file *s,
case ASYM:
seq_printf(s, "%*s\n", TL_VALUE_MIN_PADDING, ADF_CFG_ASYM);
break;
+ case DECOMP:
+ seq_printf(s, "%*s\n", TL_VALUE_MIN_PADDING, ADF_CFG_DECOMP);
+ break;
default:
seq_printf(s, "%*s\n", TL_VALUE_MIN_PADDING, TL_RP_SRV_UNKNOWN);
break;
@@ -611,18 +650,11 @@ static int tl_rp_data_show(struct seq_file *s, void *unused)
{
struct adf_accel_dev *accel_dev = s->private;
u8 rp_regs_index;
- u8 max_rp;
- int ret;
if (!accel_dev)
return -EINVAL;
- max_rp = GET_TL_DATA(accel_dev).max_rp;
- ret = get_rp_index_from_file(s->file, &rp_regs_index, max_rp);
- if (ret) {
- dev_dbg(&GET_DEV(accel_dev), "invalid RP data file name\n");
- return ret;
- }
+ rp_regs_index = debugfs_get_aux_num(s->file);
return tl_print_rp_data(accel_dev, s, rp_regs_index);
}
@@ -635,7 +667,6 @@ static ssize_t tl_rp_data_write(struct file *file, const char __user *userbuf,
struct adf_telemetry *telemetry;
unsigned int new_rp_num;
u8 rp_regs_index;
- u8 max_rp;
int ret;
accel_dev = seq_f->private;
@@ -643,15 +674,10 @@ static ssize_t tl_rp_data_write(struct file *file, const char __user *userbuf,
return -EINVAL;
telemetry = accel_dev->telemetry;
- max_rp = GET_TL_DATA(accel_dev).max_rp;
mutex_lock(&telemetry->wr_lock);
- ret = get_rp_index_from_file(file, &rp_regs_index, max_rp);
- if (ret) {
- dev_dbg(&GET_DEV(accel_dev), "invalid RP data file name\n");
- goto unlock_and_exit;
- }
+ rp_regs_index = debugfs_get_aux_num(file);
ret = kstrtou32_from_user(userbuf, count, 10, &new_rp_num);
if (ret)
@@ -689,7 +715,8 @@ void adf_tl_dbgfs_add(struct adf_accel_dev *accel_dev)
for (i = 0; i < max_rp; i++) {
snprintf(name, sizeof(name), ADF_TL_RP_REGS_FNAME,
ADF_TL_DBG_RP_ALPHA_INDEX(i));
- debugfs_create_file(name, 0644, dir, accel_dev, &tl_rp_data_fops);
+ debugfs_create_file_aux_num(name, 0644, dir, accel_dev, i,
+ &tl_rp_data_fops);
}
}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.h b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.h
index 11cc9eae19b3..97c5eeaa1b17 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.h
@@ -17,6 +17,7 @@ struct adf_accel_dev;
#define LAT_ACC_NAME "gp_lat_acc_avg"
#define BW_IN_NAME "bw_in"
#define BW_OUT_NAME "bw_out"
+#define RE_ACC_NAME "re_acc_avg"
#define PAGE_REQ_LAT_NAME "at_page_req_lat_avg"
#define AT_TRANS_LAT_NAME "at_trans_lat_avg"
#define AT_MAX_UTLB_USED_NAME "at_max_tlb_used"
@@ -43,6 +44,10 @@ struct adf_accel_dev;
(ADF_TL_DEV_REG_OFF(slice##_slices[0], qat_gen) + \
offsetof(struct adf_##qat_gen##_tl_slice_data_regs, reg))
+#define ADF_TL_CMDQ_REG_OFF(slice, reg, qat_gen) \
+ (ADF_TL_DEV_REG_OFF(slice##_cmdq[0], qat_gen) + \
+ offsetof(struct adf_##qat_gen##_tl_cmdq_data_regs, reg))
+
#define ADF_TL_RP_REG_OFF(reg, qat_gen) \
(ADF_TL_DATA_REG_OFF(tl_ring_pairs_data_regs[0], qat_gen) + \
offsetof(struct adf_##qat_gen##_tl_ring_pair_data_regs, reg))
diff --git a/drivers/crypto/intel/qat/qat_common/adf_transport_debug.c b/drivers/crypto/intel/qat/qat_common/adf_transport_debug.c
index e2dd568b87b5..6c22bc9b28e4 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_transport_debug.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_transport_debug.c
@@ -10,16 +10,21 @@
static DEFINE_MUTEX(ring_read_lock);
static DEFINE_MUTEX(bank_read_lock);
+#define ADF_RING_NUM_MSGS(ring) \
+ (ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size) / \
+ ADF_MSG_SIZE_TO_BYTES(ring->msg_size))
+
static void *adf_ring_start(struct seq_file *sfile, loff_t *pos)
{
struct adf_etr_ring_data *ring = sfile->private;
+ unsigned int num_msg = ADF_RING_NUM_MSGS(ring);
+ loff_t val = *pos;
mutex_lock(&ring_read_lock);
- if (*pos == 0)
+ if (val == 0)
return SEQ_START_TOKEN;
- if (*pos >= (ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size) /
- ADF_MSG_SIZE_TO_BYTES(ring->msg_size)))
+ if (val >= num_msg)
return NULL;
return ring->base_addr +
@@ -29,13 +34,15 @@ static void *adf_ring_start(struct seq_file *sfile, loff_t *pos)
static void *adf_ring_next(struct seq_file *sfile, void *v, loff_t *pos)
{
struct adf_etr_ring_data *ring = sfile->private;
+ unsigned int num_msg = ADF_RING_NUM_MSGS(ring);
+ loff_t val = *pos;
+
+ (*pos)++;
- if (*pos >= (ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size) /
- ADF_MSG_SIZE_TO_BYTES(ring->msg_size)))
+ if (val >= num_msg)
return NULL;
- return ring->base_addr +
- (ADF_MSG_SIZE_TO_BYTES(ring->msg_size) * (*pos)++);
+ return ring->base_addr + (ADF_MSG_SIZE_TO_BYTES(ring->msg_size) * val);
}
static int adf_ring_show(struct seq_file *sfile, void *v)
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h
index a03d43fef2b3..81969c515a17 100644
--- a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h
@@ -16,8 +16,8 @@ enum icp_qat_fw_comp_20_cmd_id {
ICP_QAT_FW_COMP_20_CMD_LZ4_DECOMPRESS = 4,
ICP_QAT_FW_COMP_20_CMD_LZ4S_COMPRESS = 5,
ICP_QAT_FW_COMP_20_CMD_LZ4S_DECOMPRESS = 6,
- ICP_QAT_FW_COMP_20_CMD_XP10_COMPRESS = 7,
- ICP_QAT_FW_COMP_20_CMD_XP10_DECOMPRESS = 8,
+ ICP_QAT_FW_COMP_20_CMD_RESERVED_7 = 7,
+ ICP_QAT_FW_COMP_20_CMD_RESERVED_8 = 8,
ICP_QAT_FW_COMP_20_CMD_RESERVED_9 = 9,
ICP_QAT_FW_COMP_23_CMD_ZSTD_COMPRESS = 10,
ICP_QAT_FW_COMP_23_CMD_ZSTD_DECOMPRESS = 11,
@@ -44,6 +44,7 @@ enum icp_qat_fw_comp_20_cmd_id {
#define ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_MASK 0x1
#define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_BITPOS 7
#define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_MASK 0x1
+#define ICP_QAT_FW_COMP_AUTO_SELECT_BEST_MAX_VALUE 0xFFFFFFFF
#define ICP_QAT_FW_COMP_FLAGS_BUILD(sesstype, autoselect, enhanced_asb, \
ret_uncomp, secure_ram) \
@@ -117,7 +118,7 @@ struct icp_qat_fw_comp_req_params {
#define ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(sop, eop, bfinal, cnv, cnvnr, \
cnvdfx, crc, xxhash_acc, \
cnv_error_type, append_crc, \
- drop_data) \
+ drop_data, partial_decomp) \
((((sop) & ICP_QAT_FW_COMP_SOP_MASK) << \
ICP_QAT_FW_COMP_SOP_BITPOS) | \
(((eop) & ICP_QAT_FW_COMP_EOP_MASK) << \
@@ -139,7 +140,9 @@ struct icp_qat_fw_comp_req_params {
(((append_crc) & ICP_QAT_FW_COMP_APPEND_CRC_MASK) \
<< ICP_QAT_FW_COMP_APPEND_CRC_BITPOS) | \
(((drop_data) & ICP_QAT_FW_COMP_DROP_DATA_MASK) \
- << ICP_QAT_FW_COMP_DROP_DATA_BITPOS))
+ << ICP_QAT_FW_COMP_DROP_DATA_BITPOS) | \
+ (((partial_decomp) & ICP_QAT_FW_COMP_PARTIAL_DECOMP_MASK) \
+ << ICP_QAT_FW_COMP_PARTIAL_DECOMP_BITPOS))
#define ICP_QAT_FW_COMP_NOT_SOP 0
#define ICP_QAT_FW_COMP_SOP 1
@@ -161,6 +164,8 @@ struct icp_qat_fw_comp_req_params {
#define ICP_QAT_FW_COMP_NO_APPEND_CRC 0
#define ICP_QAT_FW_COMP_DROP_DATA 1
#define ICP_QAT_FW_COMP_NO_DROP_DATA 0
+#define ICP_QAT_FW_COMP_PARTIAL_DECOMPRESS 1
+#define ICP_QAT_FW_COMP_NO_PARTIAL_DECOMPRESS 0
#define ICP_QAT_FW_COMP_SOP_BITPOS 0
#define ICP_QAT_FW_COMP_SOP_MASK 0x1
#define ICP_QAT_FW_COMP_EOP_BITPOS 1
@@ -189,6 +194,8 @@ struct icp_qat_fw_comp_req_params {
#define ICP_QAT_FW_COMP_APPEND_CRC_MASK 0x1
#define ICP_QAT_FW_COMP_DROP_DATA_BITPOS 25
#define ICP_QAT_FW_COMP_DROP_DATA_MASK 0x1
+#define ICP_QAT_FW_COMP_PARTIAL_DECOMP_BITPOS 27
+#define ICP_QAT_FW_COMP_PARTIAL_DECOMP_MASK 0x1
#define ICP_QAT_FW_COMP_SOP_GET(flags) \
QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_SOP_BITPOS, \
@@ -281,8 +288,18 @@ struct icp_qat_fw_comp_req {
union {
struct icp_qat_fw_xlt_req_params xlt_pars;
__u32 resrvd1[ICP_QAT_FW_NUM_LONGWORDS_2];
+ struct {
+ __u32 partial_decompress_length;
+ __u32 partial_decompress_offset;
+ } partial_decompress;
} u1;
- __u32 resrvd2[ICP_QAT_FW_NUM_LONGWORDS_2];
+ union {
+ __u32 resrvd2[ICP_QAT_FW_NUM_LONGWORDS_2];
+ struct {
+ __u32 asb_value;
+ __u32 reserved;
+ } asb_threshold;
+ } u3;
struct icp_qat_fw_comp_cd_hdr comp_cd_ctrl;
union {
struct icp_qat_fw_xlt_cd_hdr xlt_cd_ctrl;
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h
index 7eb5daef4f88..6887930c7995 100644
--- a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h
@@ -35,6 +35,7 @@ struct icp_qat_fw_loader_chip_info {
u32 wakeup_event_val;
bool fw_auth;
bool css_3k;
+ bool dual_sign;
bool tgroup_share_ustore;
u32 fcu_ctl_csr;
u32 fcu_sts_csr;
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_hw_51_comp.h b/drivers/crypto/intel/qat/qat_common/icp_qat_hw_51_comp.h
new file mode 100644
index 000000000000..dce639152345
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_hw_51_comp.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2025 Intel Corporation */
+#ifndef ICP_QAT_HW_51_COMP_H_
+#define ICP_QAT_HW_51_COMP_H_
+
+#include <linux/types.h>
+
+#include "icp_qat_fw.h"
+#include "icp_qat_hw_51_comp_defs.h"
+
+struct icp_qat_hw_comp_51_config_csr_lower {
+ enum icp_qat_hw_comp_51_abd abd;
+ enum icp_qat_hw_comp_51_lllbd_ctrl lllbd;
+ enum icp_qat_hw_comp_51_search_depth sd;
+ enum icp_qat_hw_comp_51_min_match_control mmctrl;
+ enum icp_qat_hw_comp_51_lz4_block_checksum lbc;
+};
+
+static inline u32
+ICP_QAT_FW_COMP_51_BUILD_CONFIG_LOWER(struct icp_qat_hw_comp_51_config_csr_lower csr)
+{
+ u32 val32 = 0;
+
+ QAT_FIELD_SET(val32, csr.abd,
+ ICP_QAT_HW_COMP_51_CONFIG_CSR_ABD_BITPOS,
+ ICP_QAT_HW_COMP_51_CONFIG_CSR_ABD_MASK);
+ QAT_FIELD_SET(val32, csr.lllbd,
+ ICP_QAT_HW_COMP_51_CONFIG_CSR_LLLBD_CTRL_BITPOS,
+ ICP_QAT_HW_COMP_51_CONFIG_CSR_LLLBD_CTRL_MASK);
+ QAT_FIELD_SET(val32, csr.sd,
+ ICP_QAT_HW_COMP_51_CONFIG_CSR_SEARCH_DEPTH_BITPOS,
+ ICP_QAT_HW_COMP_51_CONFIG_CSR_SEARCH_DEPTH_MASK);
+ QAT_FIELD_SET(val32, csr.mmctrl,
+ ICP_QAT_HW_COMP_51_CONFIG_CSR_MIN_MATCH_CONTROL_BITPOS,
+ ICP_QAT_HW_COMP_51_CONFIG_CSR_MIN_MATCH_CONTROL_MASK);
+ QAT_FIELD_SET(val32, csr.lbc,
+ ICP_QAT_HW_COMP_51_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_BITPOS,
+ ICP_QAT_HW_COMP_51_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_MASK);
+
+ return val32;
+}
+
+struct icp_qat_hw_comp_51_config_csr_upper {
+ enum icp_qat_hw_comp_51_dmm_algorithm edmm;
+ enum icp_qat_hw_comp_51_bms bms;
+ enum icp_qat_hw_comp_51_scb_mode_reset_mask scb_mode_reset;
+};
+
+static inline u32
+ICP_QAT_FW_COMP_51_BUILD_CONFIG_UPPER(struct icp_qat_hw_comp_51_config_csr_upper csr)
+{
+ u32 val32 = 0;
+
+ QAT_FIELD_SET(val32, csr.edmm,
+ ICP_QAT_HW_COMP_51_CONFIG_CSR_DMM_ALGORITHM_BITPOS,
+ ICP_QAT_HW_COMP_51_CONFIG_CSR_DMM_ALGORITHM_MASK);
+ QAT_FIELD_SET(val32, csr.bms,
+ ICP_QAT_HW_COMP_51_CONFIG_CSR_BMS_BITPOS,
+ ICP_QAT_HW_COMP_51_CONFIG_CSR_BMS_MASK);
+ QAT_FIELD_SET(val32, csr.scb_mode_reset,
+ ICP_QAT_HW_COMP_51_CONFIG_CSR_SCB_MODE_RESET_MASK_BITPOS,
+ ICP_QAT_HW_COMP_51_CONFIG_CSR_SCB_MODE_RESET_MASK_MASK);
+
+ return val32;
+}
+
+struct icp_qat_hw_decomp_51_config_csr_lower {
+ enum icp_qat_hw_decomp_51_lz4_block_checksum lbc;
+};
+
+static inline u32
+ICP_QAT_FW_DECOMP_51_BUILD_CONFIG_LOWER(struct icp_qat_hw_decomp_51_config_csr_lower csr)
+{
+ u32 val32 = 0;
+
+ QAT_FIELD_SET(val32, csr.lbc,
+ ICP_QAT_HW_DECOMP_51_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_BITPOS,
+ ICP_QAT_HW_DECOMP_51_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_MASK);
+
+ return val32;
+}
+
+struct icp_qat_hw_decomp_51_config_csr_upper {
+ enum icp_qat_hw_decomp_51_bms bms;
+};
+
+static inline u32
+ICP_QAT_FW_DECOMP_51_BUILD_CONFIG_UPPER(struct icp_qat_hw_decomp_51_config_csr_upper csr)
+{
+ u32 val32 = 0;
+
+ QAT_FIELD_SET(val32, csr.bms,
+ ICP_QAT_HW_DECOMP_51_CONFIG_CSR_BMS_BITPOS,
+ ICP_QAT_HW_DECOMP_51_CONFIG_CSR_BMS_MASK);
+
+ return val32;
+}
+
+#endif /* ICP_QAT_HW_51_COMP_H_ */
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_hw_51_comp_defs.h b/drivers/crypto/intel/qat/qat_common/icp_qat_hw_51_comp_defs.h
new file mode 100644
index 000000000000..e745688c5da4
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_hw_51_comp_defs.h
@@ -0,0 +1,318 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2025 Intel Corporation */
+#ifndef ICP_QAT_HW_51_COMP_DEFS_H_
+#define ICP_QAT_HW_51_COMP_DEFS_H_
+
+#include <linux/bits.h>
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SOM_CONTROL_BITPOS 28
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SOM_CONTROL_MASK GENMASK(1, 0)
+enum icp_qat_hw_comp_51_som_control {
+ ICP_QAT_HW_COMP_51_SOM_CONTROL_NORMAL_MODE = 0x0,
+ ICP_QAT_HW_COMP_51_SOM_CONTROL_DICTIONARY_MODE = 0x1,
+ ICP_QAT_HW_COMP_51_SOM_CONTROL_INPUT_CRC = 0x2,
+ ICP_QAT_HW_COMP_51_SOM_CONTROL_RESERVED_MODE = 0x3,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SOM_CONTROL_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_SOM_CONTROL_NORMAL_MODE
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_RD_CONTROL_BITPOS 27
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_RD_CONTROL_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_skip_hash_rd_control {
+ ICP_QAT_HW_COMP_51_SKIP_HASH_RD_CONTROL_NO_SKIP = 0x0,
+ ICP_QAT_HW_COMP_51_SKIP_HASH_RD_CONTROL_SKIP_HASH_READS = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_RD_CONTROL_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_SKIP_HASH_RD_CONTROL_NO_SKIP
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_BYPASS_COMPRESSION_BITPOS 25
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_BYPASS_COMPRESSION_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_bypass_compression {
+ ICP_QAT_HW_COMP_51_BYPASS_COMPRESSION_DISABLED = 0x0,
+ ICP_QAT_HW_COMP_51_BYPASS_COMPRESSION_ENABLED = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_BYPASS_COMPRESSION_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_BYPASS_COMPRESSION_DISABLED
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_DMM_ALGORITHM_BITPOS 22
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_DMM_ALGORITHM_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_dmm_algorithm {
+ ICP_QAT_HW_COMP_51_DMM_ALGORITHM_EDMM_ENABLED = 0x0,
+ ICP_QAT_HW_COMP_51_DMM_ALGORITHM_ZSTD_DMM_LITE = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_DMM_ALGORITHM_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_DMM_ALGORITHM_EDMM_ENABLED
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_TOKEN_FUSION_INTERNAL_ONLY_BITPOS 21
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_TOKEN_FUSION_INTERNAL_ONLY_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_token_fusion_internal_only {
+ ICP_QAT_HW_COMP_51_TOKEN_FUSION_INTERNAL_ONLY_ENABLED = 0x0,
+ ICP_QAT_HW_COMP_51_TOKEN_FUSION_INTERNAL_ONLY_DISABLED = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_TOKEN_FUSION_INTERNAL_ONLY_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_TOKEN_FUSION_INTERNAL_ONLY_ENABLED
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_BMS_BITPOS 19
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_BMS_MASK GENMASK(1, 0)
+enum icp_qat_hw_comp_51_bms {
+ ICP_QAT_HW_COMP_51_BMS_BMS_64KB = 0x0,
+ ICP_QAT_HW_COMP_51_BMS_BMS_256KB = 0x1,
+ ICP_QAT_HW_COMP_51_BMS_BMS_1MB = 0x2,
+ ICP_QAT_HW_COMP_51_BMS_BMS_4MB = 0x3,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_BMS_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_BMS_BMS_64KB
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SCB_MODE_RESET_MASK_BITPOS 18
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SCB_MODE_RESET_MASK_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_scb_mode_reset_mask {
+ ICP_QAT_HW_COMP_51_SCB_MODE_RESET_MASK_DO_NOT_RESET_HB_HT = 0x0,
+ ICP_QAT_HW_COMP_51_SCB_MODE_RESET_MASK_RESET_HB_HT = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SCB_MODE_RESET_MASK_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_SCB_MODE_RESET_MASK_DO_NOT_RESET_HB_HT
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_ZSTD_FRAME_GEN_DEC_EN_BITPOS 2
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_ZSTD_FRAME_GEN_DEC_EN_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_zstd_frame_gen_dec_en {
+ ICP_QAT_HW_COMP_51_ZSTD_FRAME_GEN_DEC_EN_ZSTD_FRAME_HDR_DISABLE = 0x0,
+ ICP_QAT_HW_COMP_51_ZSTD_FRAME_GEN_DEC_EN_ZSTD_FRAME_HDR_ENABLE = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_ZSTD_FRAME_GEN_DEC_EN_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_ZSTD_FRAME_GEN_DEC_EN_ZSTD_FRAME_HDR_ENABLE
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_CNV_DISABLE_BITPOS 1
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_CNV_DISABLE_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_cnv_disable {
+ ICP_QAT_HW_COMP_51_CNV_DISABLE_CNV_ENABLED = 0x0,
+ ICP_QAT_HW_COMP_51_CNV_DISABLE_CNV_DISABLED = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_CNV_DISABLE_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_CNV_DISABLE_CNV_ENABLED
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_ASB_DISABLE_BITPOS 0
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_ASB_DISABLE_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_asb_disable {
+ ICP_QAT_HW_COMP_51_ASB_DISABLE_ASB_ENABLED = 0x0,
+ ICP_QAT_HW_COMP_51_ASB_DISABLE_ASB_DISABLED = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_ASB_DISABLE_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_ASB_DISABLE_ASB_ENABLED
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SPEC_DECODER_INTERNAL_ONLY_BITPOS 21
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SPEC_DECODER_INTERNAL_ONLY_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_spec_decoder_internal_only {
+ ICP_QAT_HW_COMP_51_SPEC_DECODER_INTERNAL_ONLY_NORMAL = 0x0,
+ ICP_QAT_HW_COMP_51_SPEC_DECODER_INTERNAL_ONLY_DISABLED = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SPEC_DECODER_INTERNAL_ONLY_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_SPEC_DECODER_INTERNAL_ONLY_NORMAL
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_MINI_XCAM_INTERNAL_ONLY_BITPOS 20
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_MINI_XCAM_INTERNAL_ONLY_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_mini_xcam_internal_only {
+ ICP_QAT_HW_COMP_51_MINI_XCAM_INTERNAL_ONLY_NORMAL = 0x0,
+ ICP_QAT_HW_COMP_51_MINI_XCAM_INTERNAL_ONLY_DISABLED = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_MINI_XCAM_INTERNAL_ONLY_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_MINI_XCAM_INTERNAL_ONLY_NORMAL
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_REP_OFF_ENC_INTERNAL_ONLY_BITPOS 19
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_REP_OFF_ENC_INTERNAL_ONLY_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_rep_off_enc_internal_only {
+ ICP_QAT_HW_COMP_51_REP_OFF_ENC_INTERNAL_ONLY_ENABLED = 0x0,
+ ICP_QAT_HW_COMP_51_REP_OFF_ENC_INTERNAL_ONLY_DISABLED = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_REP_OFF_ENC_INTERNAL_ONLY_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_REP_OFF_ENC_INTERNAL_ONLY_ENABLED
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_PROG_BLOCK_DROP_INTERNAL_ONLY_BITPOS 18
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_PROG_BLOCK_DROP_INTERNAL_ONLY_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_prog_block_drop_internal_only {
+ ICP_QAT_HW_COMP_51_PROG_BLOCK_DROP_INTERNAL_ONLY_DISABLE = 0x0,
+ ICP_QAT_HW_COMP_51_PROG_BLOCK_DROP_INTERNAL_ONLY_ENABLE = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_PROG_BLOCK_DROP_INTERNAL_ONLY_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_PROG_BLOCK_DROP_INTERNAL_ONLY_DISABLE
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_OVERRIDE_INTERNAL_ONLY_BITPOS 17
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_OVERRIDE_INTERNAL_ONLY_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_skip_hash_override_internal_only {
+ ICP_QAT_HW_COMP_51_SKIP_HASH_OVERRIDE_INTERNAL_ONLY_DETERMINE_HASH_PARAMS = 0x0,
+ ICP_QAT_HW_COMP_51_SKIP_HASH_OVERRIDE_INTERNAL_ONLY_OVERRIDE_HASH_PARAMS = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_OVERRIDE_INTERNAL_ONLY_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_SKIP_HASH_OVERRIDE_INTERNAL_ONLY_DETERMINE_HASH_PARAMS
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_HBS_BITPOS 14
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_HBS_MASK GENMASK(2, 0)
+enum icp_qat_hw_comp_51_hbs {
+ ICP_QAT_HW_COMP_51_HBS_32KB = 0x0,
+ ICP_QAT_HW_COMP_51_HBS_64KB = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_HBS_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_HBS_32KB
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_ABD_BITPOS 13
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_ABD_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_abd {
+ ICP_QAT_HW_COMP_51_ABD_ABD_ENABLED = 0x0,
+ ICP_QAT_HW_COMP_51_ABD_ABD_DISABLED = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_ABD_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_ABD_ABD_ENABLED
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_LLLBD_CTRL_BITPOS 12
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_LLLBD_CTRL_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_lllbd_ctrl {
+ ICP_QAT_HW_COMP_51_LLLBD_CTRL_LLLBD_ENABLED = 0x0,
+ ICP_QAT_HW_COMP_51_LLLBD_CTRL_LLLBD_DISABLED = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_LLLBD_CTRL_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_LLLBD_CTRL_LLLBD_ENABLED
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SEARCH_DEPTH_BITPOS 8
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SEARCH_DEPTH_MASK GENMASK(3, 0)
+enum icp_qat_hw_comp_51_search_depth {
+ ICP_QAT_HW_COMP_51_SEARCH_DEPTH_LEVEL_1 = 0x1,
+ ICP_QAT_HW_COMP_51_SEARCH_DEPTH_LEVEL_6 = 0x3,
+ ICP_QAT_HW_COMP_51_SEARCH_DEPTH_LEVEL_9 = 0x4,
+ ICP_QAT_HW_COMP_51_SEARCH_DEPTH_LEVEL_10 = 0x4,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SEARCH_DEPTH_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_SEARCH_DEPTH_LEVEL_1
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_FORMAT_BITPOS 5
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_FORMAT_MASK GENMASK(2, 0)
+enum icp_qat_hw_comp_51_format {
+ ICP_QAT_HW_COMP_51_FORMAT_ILZ77 = 0x1,
+ ICP_QAT_HW_COMP_51_FORMAT_LZ4 = 0x2,
+ ICP_QAT_HW_COMP_51_FORMAT_LZ4s = 0x3,
+ ICP_QAT_HW_COMP_51_FORMAT_ZSTD = 0x4,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_FORMAT_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_FORMAT_ILZ77
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_MIN_MATCH_CONTROL_BITPOS 4
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_MIN_MATCH_CONTROL_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_min_match_control {
+ ICP_QAT_HW_COMP_51_MIN_MATCH_CONTROL_MATCH_3B = 0x0,
+ ICP_QAT_HW_COMP_51_MIN_MATCH_CONTROL_MATCH_4B = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_MIN_MATCH_CONTROL_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_MIN_MATCH_CONTROL_MATCH_3B
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_COLLISION_BITPOS 3
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_COLLISION_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_skip_hash_collision {
+ ICP_QAT_HW_COMP_51_SKIP_HASH_COLLISION_ALLOW = 0x0,
+ ICP_QAT_HW_COMP_51_SKIP_HASH_COLLISION_DONT_ALLOW = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_COLLISION_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_SKIP_HASH_COLLISION_ALLOW
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_UPDATE_BITPOS 2
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_UPDATE_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_skip_hash_update {
+ ICP_QAT_HW_COMP_51_SKIP_HASH_UPDATE_ALLOW = 0x0,
+ ICP_QAT_HW_COMP_51_SKIP_HASH_UPDATE_DONT_ALLOW = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_UPDATE_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_SKIP_HASH_UPDATE_ALLOW
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_BYTE_SKIP_BITPOS 1
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_BYTE_SKIP_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_byte_skip {
+ ICP_QAT_HW_COMP_51_BYTE_SKIP_3BYTE_TOKEN = 0x0,
+ ICP_QAT_HW_COMP_51_BYTE_SKIP_3BYTE_LITERAL = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_BYTE_SKIP_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_BYTE_SKIP_3BYTE_TOKEN
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_BITPOS 0
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_lz4_block_checksum {
+ ICP_QAT_HW_COMP_51_LZ4_BLOCK_CHECKSUM_ABSENT = 0x0,
+ ICP_QAT_HW_COMP_51_LZ4_BLOCK_CHECKSUM_PRESENT = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_LZ4_BLOCK_CHECKSUM_ABSENT
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_DISCARD_DATA_BITPOS 26
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_DISCARD_DATA_MASK GENMASK(0, 0)
+enum icp_qat_hw_decomp_51_discard_data {
+ ICP_QAT_HW_DECOMP_51_DISCARD_DATA_DISABLED = 0x0,
+ ICP_QAT_HW_DECOMP_51_DISCARD_DATA_ENABLED = 0x1,
+};
+
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_DISCARD_DATA_DEFAULT_VAL \
+ ICP_QAT_HW_DECOMP_51_DISCARD_DATA_DISABLED
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_BMS_BITPOS 19
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_BMS_MASK GENMASK(1, 0)
+enum icp_qat_hw_decomp_51_bms {
+ ICP_QAT_HW_DECOMP_51_BMS_BMS_64KB = 0x0,
+ ICP_QAT_HW_DECOMP_51_BMS_BMS_256KB = 0x1,
+ ICP_QAT_HW_DECOMP_51_BMS_BMS_1MB = 0x2,
+ ICP_QAT_HW_DECOMP_51_BMS_BMS_4MB = 0x3,
+};
+
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_BMS_DEFAULT_VAL \
+ ICP_QAT_HW_DECOMP_51_BMS_BMS_64KB
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_ZSTD_FRAME_GEN_DEC_EN_BITPOS 2
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_ZSTD_FRAME_GEN_DEC_EN_MASK GENMASK(0, 0)
+enum icp_qat_hw_decomp_51_zstd_frame_gen_dec_en {
+ ICP_QAT_HW_DECOMP_51_ZSTD_FRAME_GEN_DEC_EN_ZSTD_FRAME_HDR_DISABLE = 0x0,
+ ICP_QAT_HW_DECOMP_51_ZSTD_FRAME_GEN_DEC_EN_ZSTD_FRAME_HDR_ENABLE = 0x1,
+};
+
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_ZSTD_FRAME_GEN_DEC_EN_DEFAULT_VAL \
+ ICP_QAT_HW_DECOMP_51_ZSTD_FRAME_GEN_DEC_EN_ZSTD_FRAME_HDR_ENABLE
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_SPEC_DECODER_INTERNAL_ONLY_BITPOS 21
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_SPEC_DECODER_INTERNAL_ONLY_MASK GENMASK(0, 0)
+enum icp_qat_hw_decomp_51_spec_decoder_internal_only {
+ ICP_QAT_HW_DECOMP_51_SPEC_DECODER_INTERNAL_ONLY_NORMAL = 0x0,
+ ICP_QAT_HW_DECOMP_51_SPEC_DECODER_INTERNAL_ONLY_DISABLED = 0x1,
+};
+
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_SPEC_DECODER_INTERNAL_ONLY_DEFAULT_VAL \
+ ICP_QAT_HW_DECOMP_51_SPEC_DECODER_INTERNAL_ONLY_NORMAL
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_MINI_XCAM_INTERNAL_ONLY_BITPOS 20
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_MINI_XCAM_INTERNAL_ONLY_MASK GENMASK(0, 0)
+enum icp_qat_hw_decomp_51_mini_xcam_internal_only {
+ ICP_QAT_HW_DECOMP_51_MINI_XCAM_INTERNAL_ONLY_NORMAL = 0x0,
+ ICP_QAT_HW_DECOMP_51_MINI_XCAM_INTERNAL_ONLY_DISABLED = 0x1,
+};
+
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_MINI_XCAM_INTERNAL_ONLY_DEFAULT_VAL \
+ ICP_QAT_HW_DECOMP_51_MINI_XCAM_INTERNAL_ONLY_NORMAL
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_HBS_BITPOS 14
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_HBS_MASK GENMASK(2, 0)
+enum icp_qat_hw_decomp_51_hbs {
+ ICP_QAT_HW_DECOMP_51_HBS_32KB = 0x0,
+ ICP_QAT_HW_DECOMP_51_HBS_64KB = 0x1,
+};
+
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_HBS_DEFAULT_VAL \
+ ICP_QAT_HW_DECOMP_51_HBS_32KB
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_FORMAT_BITPOS 5
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_FORMAT_MASK GENMASK(2, 0)
+enum icp_qat_hw_decomp_51_format {
+ ICP_QAT_HW_DECOMP_51_FORMAT_ILZ77 = 0x1,
+ ICP_QAT_HW_DECOMP_51_FORMAT_LZ4 = 0x2,
+ ICP_QAT_HW_DECOMP_51_FORMAT_RESERVED = 0x3,
+ ICP_QAT_HW_DECOMP_51_FORMAT_ZSTD = 0x4,
+};
+
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_FORMAT_DEFAULT_VAL \
+ ICP_QAT_HW_DECOMP_51_FORMAT_ILZ77
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_BITPOS 0
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_MASK GENMASK(0, 0)
+enum icp_qat_hw_decomp_51_lz4_block_checksum {
+ ICP_QAT_HW_DECOMP_51_LZ4_BLOCK_CHECKSUM_ABSENT = 0x0,
+ ICP_QAT_HW_DECOMP_51_LZ4_BLOCK_CHECKSUM_PRESENT = 0x1,
+};
+
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_DEFAULT_VAL \
+ ICP_QAT_HW_DECOMP_51_LZ4_BLOCK_CHECKSUM_ABSENT
+
+#endif /* ICP_QAT_HW_51_COMP_DEFS_H_ */
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h b/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h
index e28241bdd0f4..6313c35eff0c 100644
--- a/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h
@@ -7,6 +7,7 @@
#define ICP_QAT_AC_C62X_DEV_TYPE 0x01000000
#define ICP_QAT_AC_C3XXX_DEV_TYPE 0x02000000
#define ICP_QAT_AC_4XXX_A_DEV_TYPE 0x08000000
+#define ICP_QAT_AC_6XXX_DEV_TYPE 0x80000000
#define ICP_QAT_UCLO_MAX_AE 17
#define ICP_QAT_UCLO_MAX_CTX 8
#define ICP_QAT_UCLO_MAX_UIMAGE (ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX)
@@ -43,7 +44,6 @@
#define ICP_QAT_SUOF_OBJS "SUF_OBJS"
#define ICP_QAT_SUOF_IMAG "SUF_IMAG"
#define ICP_QAT_SIMG_AE_INIT_SEQ_LEN (50 * sizeof(unsigned long long))
-#define ICP_QAT_SIMG_AE_INSTS_LEN (0x4000 * sizeof(unsigned long long))
#define DSS_FWSK_MODULUS_LEN 384 /* RSA3K */
#define DSS_FWSK_EXPONENT_LEN 4
@@ -75,13 +75,6 @@
DSS_SIGNATURE_LEN : \
CSS_SIGNATURE_LEN)
-#define ICP_QAT_CSS_AE_IMG_LEN (sizeof(struct icp_qat_simg_ae_mode) + \
- ICP_QAT_SIMG_AE_INIT_SEQ_LEN + \
- ICP_QAT_SIMG_AE_INSTS_LEN)
-#define ICP_QAT_CSS_AE_SIMG_LEN(handle) (sizeof(struct icp_qat_css_hdr) + \
- ICP_QAT_CSS_FWSK_PUB_LEN(handle) + \
- ICP_QAT_CSS_SIGNATURE_LEN(handle) + \
- ICP_QAT_CSS_AE_IMG_LEN)
#define ICP_QAT_AE_IMG_OFFSET(handle) (sizeof(struct icp_qat_css_hdr) + \
ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) + \
ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle) + \
@@ -89,6 +82,21 @@
#define ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN 0x40000
#define ICP_QAT_CSS_RSA3K_MAX_IMAGE_LEN 0x30000
+/* All lengths below are in bytes */
+#define ICP_QAT_DUALSIGN_OPAQUE_HDR_LEN 12
+#define ICP_QAT_DUALSIGN_OPAQUE_HDR_ALIGN_LEN 16
+#define ICP_QAT_DUALSIGN_OPAQUE_DATA_LEN 3540
+#define ICP_QAT_DUALSIGN_XMSS_PUBKEY_LEN 64
+#define ICP_QAT_DUALSIGN_XMSS_SIG_LEN 2692
+#define ICP_QAT_DUALSIGN_XMSS_SIG_ALIGN_LEN 2696
+#define ICP_QAT_DUALSIGN_MISC_INFO_LEN 16
+#define ICP_QAT_DUALSIGN_FW_TYPE_LEN 7
+#define ICP_QAT_DUALSIGN_MODULE_TYPE 0x14
+#define ICP_QAT_DUALSIGN_HDR_LEN 0x375
+#define ICP_QAT_DUALSIGN_HDR_VER 0x40001
+#define ICP_QAT_DUALSIGN_HDR_LEN_OFFSET 4
+#define ICP_QAT_DUALSIGN_HDR_VER_OFFSET 8
+
#define ICP_QAT_CTX_MODE(ae_mode) ((ae_mode) & 0xf)
#define ICP_QAT_NN_MODE(ae_mode) (((ae_mode) >> 0x4) & 0xf)
#define ICP_QAT_SHARED_USTORE_MODE(ae_mode) (((ae_mode) >> 0xb) & 0x1)
@@ -404,8 +412,6 @@ struct icp_qat_suof_img_hdr {
char *simg_buf;
unsigned long simg_len;
char *css_header;
- char *css_key;
- char *css_signature;
char *css_simg;
unsigned long simg_size;
unsigned int ae_num;
@@ -450,6 +456,13 @@ struct icp_qat_fw_auth_desc {
unsigned int img_ae_init_data_low;
unsigned int img_ae_insts_high;
unsigned int img_ae_insts_low;
+ unsigned int cpp_mask;
+ unsigned int reserved;
+ unsigned int xmss_pubkey_high;
+ unsigned int xmss_pubkey_low;
+ unsigned int xmss_sig_high;
+ unsigned int xmss_sig_low;
+ unsigned int reserved2[2];
};
struct icp_qat_auth_chunk {
diff --git a/drivers/crypto/intel/qat/qat_common/qat_algs.c b/drivers/crypto/intel/qat/qat_common/qat_algs.c
index 3c4bba4a8779..7f638a62e3ad 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_algs.c
@@ -9,8 +9,6 @@
#include <crypto/aes.h>
#include <crypto/sha1.h>
#include <crypto/sha2.h>
-#include <crypto/hash.h>
-#include <crypto/hmac.h>
#include <crypto/algapi.h>
#include <crypto/authenc.h>
#include <crypto/scatterwalk.h>
@@ -68,16 +66,10 @@ struct qat_alg_aead_ctx {
dma_addr_t dec_cd_paddr;
struct icp_qat_fw_la_bulk_req enc_fw_req;
struct icp_qat_fw_la_bulk_req dec_fw_req;
- struct crypto_shash *hash_tfm;
enum icp_qat_hw_auth_algo qat_hash_alg;
+ unsigned int hash_digestsize;
+ unsigned int hash_blocksize;
struct qat_crypto_instance *inst;
- union {
- struct sha1_state sha1;
- struct sha256_state sha256;
- struct sha512_state sha512;
- };
- char ipad[SHA512_BLOCK_SIZE]; /* sufficient for SHA-1/SHA-256 as well */
- char opad[SHA512_BLOCK_SIZE];
};
struct qat_alg_skcipher_ctx {
@@ -94,125 +86,57 @@ struct qat_alg_skcipher_ctx {
int mode;
};
-static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
-{
- switch (qat_hash_alg) {
- case ICP_QAT_HW_AUTH_ALGO_SHA1:
- return ICP_QAT_HW_SHA1_STATE1_SZ;
- case ICP_QAT_HW_AUTH_ALGO_SHA256:
- return ICP_QAT_HW_SHA256_STATE1_SZ;
- case ICP_QAT_HW_AUTH_ALGO_SHA512:
- return ICP_QAT_HW_SHA512_STATE1_SZ;
- default:
- return -EFAULT;
- }
-}
-
static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
struct qat_alg_aead_ctx *ctx,
const u8 *auth_key,
unsigned int auth_keylen)
{
- SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
- int block_size = crypto_shash_blocksize(ctx->hash_tfm);
- int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
- __be32 *hash_state_out;
- __be64 *hash512_state_out;
- int i, offset;
-
- memset(ctx->ipad, 0, block_size);
- memset(ctx->opad, 0, block_size);
- shash->tfm = ctx->hash_tfm;
-
- if (auth_keylen > block_size) {
- int ret = crypto_shash_digest(shash, auth_key,
- auth_keylen, ctx->ipad);
- if (ret)
- return ret;
-
- memcpy(ctx->opad, ctx->ipad, digest_size);
- } else {
- memcpy(ctx->ipad, auth_key, auth_keylen);
- memcpy(ctx->opad, auth_key, auth_keylen);
+ switch (ctx->qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_SHA1: {
+ struct hmac_sha1_key key;
+ __be32 *istate = (__be32 *)hash->sha.state1;
+ __be32 *ostate = (__be32 *)(hash->sha.state1 +
+ round_up(sizeof(key.istate.h), 8));
+
+ hmac_sha1_preparekey(&key, auth_key, auth_keylen);
+ for (int i = 0; i < ARRAY_SIZE(key.istate.h); i++) {
+ istate[i] = cpu_to_be32(key.istate.h[i]);
+ ostate[i] = cpu_to_be32(key.ostate.h[i]);
+ }
+ memzero_explicit(&key, sizeof(key));
+ return 0;
}
-
- for (i = 0; i < block_size; i++) {
- char *ipad_ptr = ctx->ipad + i;
- char *opad_ptr = ctx->opad + i;
- *ipad_ptr ^= HMAC_IPAD_VALUE;
- *opad_ptr ^= HMAC_OPAD_VALUE;
+ case ICP_QAT_HW_AUTH_ALGO_SHA256: {
+ struct hmac_sha256_key key;
+ __be32 *istate = (__be32 *)hash->sha.state1;
+ __be32 *ostate = (__be32 *)(hash->sha.state1 +
+ sizeof(key.key.istate.h));
+
+ hmac_sha256_preparekey(&key, auth_key, auth_keylen);
+ for (int i = 0; i < ARRAY_SIZE(key.key.istate.h); i++) {
+ istate[i] = cpu_to_be32(key.key.istate.h[i]);
+ ostate[i] = cpu_to_be32(key.key.ostate.h[i]);
+ }
+ memzero_explicit(&key, sizeof(key));
+ return 0;
}
-
- if (crypto_shash_init(shash))
- return -EFAULT;
-
- if (crypto_shash_update(shash, ctx->ipad, block_size))
- return -EFAULT;
-
- hash_state_out = (__be32 *)hash->sha.state1;
- hash512_state_out = (__be64 *)hash_state_out;
-
- switch (ctx->qat_hash_alg) {
- case ICP_QAT_HW_AUTH_ALGO_SHA1:
- if (crypto_shash_export(shash, &ctx->sha1))
- return -EFAULT;
- for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
- *hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
- break;
- case ICP_QAT_HW_AUTH_ALGO_SHA256:
- if (crypto_shash_export(shash, &ctx->sha256))
- return -EFAULT;
- for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
- *hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
- break;
- case ICP_QAT_HW_AUTH_ALGO_SHA512:
- if (crypto_shash_export(shash, &ctx->sha512))
- return -EFAULT;
- for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
- *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
- break;
- default:
- return -EFAULT;
+ case ICP_QAT_HW_AUTH_ALGO_SHA512: {
+ struct hmac_sha512_key key;
+ __be64 *istate = (__be64 *)hash->sha.state1;
+ __be64 *ostate = (__be64 *)(hash->sha.state1 +
+ sizeof(key.key.istate.h));
+
+ hmac_sha512_preparekey(&key, auth_key, auth_keylen);
+ for (int i = 0; i < ARRAY_SIZE(key.key.istate.h); i++) {
+ istate[i] = cpu_to_be64(key.key.istate.h[i]);
+ ostate[i] = cpu_to_be64(key.key.ostate.h[i]);
+ }
+ memzero_explicit(&key, sizeof(key));
+ return 0;
}
-
- if (crypto_shash_init(shash))
- return -EFAULT;
-
- if (crypto_shash_update(shash, ctx->opad, block_size))
- return -EFAULT;
-
- offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
- if (offset < 0)
- return -EFAULT;
-
- hash_state_out = (__be32 *)(hash->sha.state1 + offset);
- hash512_state_out = (__be64 *)hash_state_out;
-
- switch (ctx->qat_hash_alg) {
- case ICP_QAT_HW_AUTH_ALGO_SHA1:
- if (crypto_shash_export(shash, &ctx->sha1))
- return -EFAULT;
- for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
- *hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
- break;
- case ICP_QAT_HW_AUTH_ALGO_SHA256:
- if (crypto_shash_export(shash, &ctx->sha256))
- return -EFAULT;
- for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
- *hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
- break;
- case ICP_QAT_HW_AUTH_ALGO_SHA512:
- if (crypto_shash_export(shash, &ctx->sha512))
- return -EFAULT;
- for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
- *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
- break;
default:
return -EFAULT;
}
- memzero_explicit(ctx->ipad, block_size);
- memzero_explicit(ctx->opad, block_size);
- return 0;
}
static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
@@ -259,7 +183,7 @@ static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
ctx->qat_hash_alg, digestsize);
hash->sha.inner_setup.auth_counter.counter =
- cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
+ cpu_to_be32(ctx->hash_blocksize);
if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
return -EFAULT;
@@ -326,7 +250,7 @@ static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
struct icp_qat_hw_cipher_algo_blk *cipher =
(struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
sizeof(struct icp_qat_hw_auth_setup) +
- roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
+ roundup(ctx->hash_digestsize, 8) * 2);
struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req;
struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
@@ -346,7 +270,7 @@ static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
ctx->qat_hash_alg,
digestsize);
hash->sha.inner_setup.auth_counter.counter =
- cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
+ cpu_to_be32(ctx->hash_blocksize);
if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
return -EFAULT;
@@ -368,7 +292,7 @@ static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
cipher_cd_ctrl->cipher_cfg_offset =
(sizeof(struct icp_qat_hw_auth_setup) +
- roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
+ roundup(ctx->hash_digestsize, 8) * 2) >> 3;
ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
@@ -1150,32 +1074,35 @@ static int qat_alg_skcipher_xts_decrypt(struct skcipher_request *req)
}
static int qat_alg_aead_init(struct crypto_aead *tfm,
- enum icp_qat_hw_auth_algo hash,
- const char *hash_name)
+ enum icp_qat_hw_auth_algo hash_alg,
+ unsigned int hash_digestsize,
+ unsigned int hash_blocksize)
{
struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
- ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
- if (IS_ERR(ctx->hash_tfm))
- return PTR_ERR(ctx->hash_tfm);
- ctx->qat_hash_alg = hash;
+ ctx->qat_hash_alg = hash_alg;
+ ctx->hash_digestsize = hash_digestsize;
+ ctx->hash_blocksize = hash_blocksize;
crypto_aead_set_reqsize(tfm, sizeof(struct qat_crypto_request));
return 0;
}
static int qat_alg_aead_sha1_init(struct crypto_aead *tfm)
{
- return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
+ return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1,
+ SHA1_DIGEST_SIZE, SHA1_BLOCK_SIZE);
}
static int qat_alg_aead_sha256_init(struct crypto_aead *tfm)
{
- return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
+ return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256,
+ SHA256_DIGEST_SIZE, SHA256_BLOCK_SIZE);
}
static int qat_alg_aead_sha512_init(struct crypto_aead *tfm)
{
- return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
+ return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512,
+ SHA512_DIGEST_SIZE, SHA512_BLOCK_SIZE);
}
static void qat_alg_aead_exit(struct crypto_aead *tfm)
@@ -1184,8 +1111,6 @@ static void qat_alg_aead_exit(struct crypto_aead *tfm)
struct qat_crypto_instance *inst = ctx->inst;
struct device *dev;
- crypto_free_shash(ctx->hash_tfm);
-
if (!inst)
return;
@@ -1277,7 +1202,7 @@ static struct aead_alg qat_aeads[] = { {
.base = {
.cra_name = "authenc(hmac(sha1),cbc(aes))",
.cra_driver_name = "qat_aes_cbc_hmac_sha1",
- .cra_priority = 4001,
+ .cra_priority = 100,
.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
@@ -1294,7 +1219,7 @@ static struct aead_alg qat_aeads[] = { {
.base = {
.cra_name = "authenc(hmac(sha256),cbc(aes))",
.cra_driver_name = "qat_aes_cbc_hmac_sha256",
- .cra_priority = 4001,
+ .cra_priority = 100,
.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
@@ -1311,7 +1236,7 @@ static struct aead_alg qat_aeads[] = { {
.base = {
.cra_name = "authenc(hmac(sha512),cbc(aes))",
.cra_driver_name = "qat_aes_cbc_hmac_sha512",
- .cra_priority = 4001,
+ .cra_priority = 100,
.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
@@ -1329,7 +1254,7 @@ static struct aead_alg qat_aeads[] = { {
static struct skcipher_alg qat_skciphers[] = { {
.base.cra_name = "cbc(aes)",
.base.cra_driver_name = "qat_aes_cbc",
- .base.cra_priority = 4001,
+ .base.cra_priority = 100,
.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
@@ -1347,7 +1272,7 @@ static struct skcipher_alg qat_skciphers[] = { {
}, {
.base.cra_name = "ctr(aes)",
.base.cra_driver_name = "qat_aes_ctr",
- .base.cra_priority = 4001,
+ .base.cra_priority = 100,
.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
@@ -1365,7 +1290,7 @@ static struct skcipher_alg qat_skciphers[] = { {
}, {
.base.cra_name = "xts(aes)",
.base.cra_driver_name = "qat_aes_xts",
- .base.cra_priority = 4001,
+ .base.cra_priority = 100,
.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK |
CRYPTO_ALG_ALLOCATES_MEMORY,
.base.cra_blocksize = AES_BLOCK_SIZE,
diff --git a/drivers/crypto/intel/qat/qat_common/qat_bl.c b/drivers/crypto/intel/qat/qat_common/qat_bl.c
index 338acf29c487..9b2338f58d97 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_bl.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_bl.c
@@ -38,7 +38,7 @@ void qat_bl_free_bufl(struct adf_accel_dev *accel_dev,
for (i = 0; i < blout->num_mapped_bufs; i++) {
dma_unmap_single(dev, blout->buffers[i].addr,
blout->buffers[i].len,
- DMA_FROM_DEVICE);
+ DMA_BIDIRECTIONAL);
}
dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
@@ -162,7 +162,7 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
}
buffers[y].addr = dma_map_single(dev, sg_virt(sg) + left,
sg->length - left,
- DMA_FROM_DEVICE);
+ DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(dev, buffers[y].addr)))
goto err_out;
buffers[y].len = sg->length;
@@ -204,7 +204,7 @@ err_out:
if (!dma_mapping_error(dev, buflout->buffers[i].addr))
dma_unmap_single(dev, buflout->buffers[i].addr,
buflout->buffers[i].len,
- DMA_FROM_DEVICE);
+ DMA_BIDIRECTIONAL);
}
if (!buf->sgl_dst_valid)
@@ -251,162 +251,3 @@ int qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
extra_dst_buff, sz_extra_dst_buff,
sskip, dskip, flags);
}
-
-static void qat_bl_sgl_unmap(struct adf_accel_dev *accel_dev,
- struct qat_alg_buf_list *bl)
-{
- struct device *dev = &GET_DEV(accel_dev);
- int n = bl->num_bufs;
- int i;
-
- for (i = 0; i < n; i++)
- if (!dma_mapping_error(dev, bl->buffers[i].addr))
- dma_unmap_single(dev, bl->buffers[i].addr,
- bl->buffers[i].len, DMA_FROM_DEVICE);
-}
-
-static int qat_bl_sgl_map(struct adf_accel_dev *accel_dev,
- struct scatterlist *sgl,
- struct qat_alg_buf_list **bl)
-{
- struct device *dev = &GET_DEV(accel_dev);
- struct qat_alg_buf_list *bufl;
- int node = dev_to_node(dev);
- struct scatterlist *sg;
- int n, i, sg_nctr;
- size_t sz;
-
- n = sg_nents(sgl);
- sz = struct_size(bufl, buffers, n);
- bufl = kzalloc_node(sz, GFP_KERNEL, node);
- if (unlikely(!bufl))
- return -ENOMEM;
-
- for (i = 0; i < n; i++)
- bufl->buffers[i].addr = DMA_MAPPING_ERROR;
-
- sg_nctr = 0;
- for_each_sg(sgl, sg, n, i) {
- int y = sg_nctr;
-
- if (!sg->length)
- continue;
-
- bufl->buffers[y].addr = dma_map_single(dev, sg_virt(sg),
- sg->length,
- DMA_FROM_DEVICE);
- bufl->buffers[y].len = sg->length;
- if (unlikely(dma_mapping_error(dev, bufl->buffers[y].addr)))
- goto err_map;
- sg_nctr++;
- }
- bufl->num_bufs = sg_nctr;
- bufl->num_mapped_bufs = sg_nctr;
-
- *bl = bufl;
-
- return 0;
-
-err_map:
- for (i = 0; i < n; i++)
- if (!dma_mapping_error(dev, bufl->buffers[i].addr))
- dma_unmap_single(dev, bufl->buffers[i].addr,
- bufl->buffers[i].len,
- DMA_FROM_DEVICE);
- kfree(bufl);
- *bl = NULL;
-
- return -ENOMEM;
-}
-
-static void qat_bl_sgl_free_unmap(struct adf_accel_dev *accel_dev,
- struct scatterlist *sgl,
- struct qat_alg_buf_list *bl,
- bool free_bl)
-{
- if (bl) {
- qat_bl_sgl_unmap(accel_dev, bl);
-
- if (free_bl)
- kfree(bl);
- }
- if (sgl)
- sgl_free(sgl);
-}
-
-static int qat_bl_sgl_alloc_map(struct adf_accel_dev *accel_dev,
- struct scatterlist **sgl,
- struct qat_alg_buf_list **bl,
- unsigned int dlen,
- gfp_t gfp)
-{
- struct scatterlist *dst;
- int ret;
-
- dst = sgl_alloc(dlen, gfp, NULL);
- if (!dst) {
- dev_err(&GET_DEV(accel_dev), "sg_alloc failed\n");
- return -ENOMEM;
- }
-
- ret = qat_bl_sgl_map(accel_dev, dst, bl);
- if (ret)
- goto err;
-
- *sgl = dst;
-
- return 0;
-
-err:
- sgl_free(dst);
- *sgl = NULL;
- return ret;
-}
-
-int qat_bl_realloc_map_new_dst(struct adf_accel_dev *accel_dev,
- struct scatterlist **sg,
- unsigned int dlen,
- struct qat_request_buffs *qat_bufs,
- gfp_t gfp)
-{
- struct device *dev = &GET_DEV(accel_dev);
- dma_addr_t new_blp = DMA_MAPPING_ERROR;
- struct qat_alg_buf_list *new_bl;
- struct scatterlist *new_sg;
- size_t new_bl_size;
- int ret;
-
- ret = qat_bl_sgl_alloc_map(accel_dev, &new_sg, &new_bl, dlen, gfp);
- if (ret)
- return ret;
-
- new_bl_size = struct_size(new_bl, buffers, new_bl->num_bufs);
-
- /* Map new firmware SGL descriptor */
- new_blp = dma_map_single(dev, new_bl, new_bl_size, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev, new_blp)))
- goto err;
-
- /* Unmap old firmware SGL descriptor */
- dma_unmap_single(dev, qat_bufs->bloutp, qat_bufs->sz_out, DMA_TO_DEVICE);
-
- /* Free and unmap old scatterlist */
- qat_bl_sgl_free_unmap(accel_dev, *sg, qat_bufs->blout,
- !qat_bufs->sgl_dst_valid);
-
- qat_bufs->sgl_dst_valid = false;
- qat_bufs->blout = new_bl;
- qat_bufs->bloutp = new_blp;
- qat_bufs->sz_out = new_bl_size;
-
- *sg = new_sg;
-
- return 0;
-err:
- qat_bl_sgl_free_unmap(accel_dev, new_sg, new_bl, true);
-
- if (!dma_mapping_error(dev, new_blp))
- dma_unmap_single(dev, new_blp, new_bl_size, DMA_TO_DEVICE);
-
- return -ENOMEM;
-}
diff --git a/drivers/crypto/intel/qat/qat_common/qat_bl.h b/drivers/crypto/intel/qat/qat_common/qat_bl.h
index 3f5b79015400..2827d5055d3c 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_bl.h
+++ b/drivers/crypto/intel/qat/qat_common/qat_bl.h
@@ -65,10 +65,4 @@ static inline gfp_t qat_algs_alloc_flags(struct crypto_async_request *req)
return req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
}
-int qat_bl_realloc_map_new_dst(struct adf_accel_dev *accel_dev,
- struct scatterlist **newd,
- unsigned int dlen,
- struct qat_request_buffs *qat_bufs,
- gfp_t gfp);
-
#endif
diff --git a/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c b/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c
index 2ba4aa22e092..8b123472b71c 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c
@@ -8,6 +8,7 @@
#include <linux/workqueue.h>
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
+#include "adf_dc.h"
#include "qat_bl.h"
#include "qat_comp_req.h"
#include "qat_compression.h"
@@ -29,11 +30,6 @@ struct qat_compression_ctx {
int (*qat_comp_callback)(struct qat_compression_req *qat_req, void *resp);
};
-struct qat_dst {
- bool is_null;
- int resubmitted;
-};
-
struct qat_compression_req {
u8 req[QAT_COMP_REQ_SIZE];
struct qat_compression_ctx *qat_compression_ctx;
@@ -42,8 +38,6 @@ struct qat_compression_req {
enum direction dir;
int actual_dlen;
struct qat_alg_req alg_req;
- struct work_struct resubmit;
- struct qat_dst dst;
};
static int qat_alg_send_dc_message(struct qat_compression_req *qat_req,
@@ -60,46 +54,6 @@ static int qat_alg_send_dc_message(struct qat_compression_req *qat_req,
return qat_alg_send_message(alg_req);
}
-static void qat_comp_resubmit(struct work_struct *work)
-{
- struct qat_compression_req *qat_req =
- container_of(work, struct qat_compression_req, resubmit);
- struct qat_compression_ctx *ctx = qat_req->qat_compression_ctx;
- struct adf_accel_dev *accel_dev = ctx->inst->accel_dev;
- struct qat_request_buffs *qat_bufs = &qat_req->buf;
- struct qat_compression_instance *inst = ctx->inst;
- struct acomp_req *areq = qat_req->acompress_req;
- struct crypto_acomp *tfm = crypto_acomp_reqtfm(areq);
- unsigned int dlen = CRYPTO_ACOMP_DST_MAX;
- u8 *req = qat_req->req;
- dma_addr_t dfbuf;
- int ret;
-
- areq->dlen = dlen;
-
- dev_dbg(&GET_DEV(accel_dev), "[%s][%s] retry NULL dst request - dlen = %d\n",
- crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm)),
- qat_req->dir == COMPRESSION ? "comp" : "decomp", dlen);
-
- ret = qat_bl_realloc_map_new_dst(accel_dev, &areq->dst, dlen, qat_bufs,
- qat_algs_alloc_flags(&areq->base));
- if (ret)
- goto err;
-
- qat_req->dst.resubmitted = true;
-
- dfbuf = qat_req->buf.bloutp;
- qat_comp_override_dst(req, dfbuf, dlen);
-
- ret = qat_alg_send_dc_message(qat_req, inst, &areq->base);
- if (ret != -ENOSPC)
- return;
-
-err:
- qat_bl_free_bufl(accel_dev, qat_bufs);
- acomp_request_complete(areq, ret);
-}
-
static void qat_comp_generic_callback(struct qat_compression_req *qat_req,
void *resp)
{
@@ -131,21 +85,6 @@ static void qat_comp_generic_callback(struct qat_compression_req *qat_req,
areq->dlen = 0;
- if (qat_req->dir == DECOMPRESSION && qat_req->dst.is_null) {
- if (cmp_err == ERR_CODE_OVERFLOW_ERROR) {
- if (qat_req->dst.resubmitted) {
- dev_dbg(&GET_DEV(accel_dev),
- "Output does not fit destination buffer\n");
- res = -EOVERFLOW;
- goto end;
- }
-
- INIT_WORK(&qat_req->resubmit, qat_comp_resubmit);
- adf_misc_wq_queue_work(&qat_req->resubmit);
- return;
- }
- }
-
if (unlikely(status != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
goto end;
@@ -207,9 +146,7 @@ static int qat_comp_alg_init_tfm(struct crypto_acomp *acomp_tfm)
return -EINVAL;
ctx->inst = inst;
- ctx->inst->build_deflate_ctx(ctx->comp_ctx);
-
- return 0;
+ return qat_comp_build_ctx(inst->accel_dev, ctx->comp_ctx, QAT_DEFLATE);
}
static void qat_comp_alg_exit_tfm(struct crypto_acomp *acomp_tfm)
@@ -245,29 +182,9 @@ static int qat_comp_alg_compress_decompress(struct acomp_req *areq, enum directi
if (!areq->src || !slen)
return -EINVAL;
- if (areq->dst && !dlen)
+ if (!areq->dst || !dlen)
return -EINVAL;
- qat_req->dst.is_null = false;
-
- /* Handle acomp requests that require the allocation of a destination
- * buffer. The size of the destination buffer is double the source
- * buffer (rounded up to the size of a page) to fit the decompressed
- * output or an expansion on the data for compression.
- */
- if (!areq->dst) {
- qat_req->dst.is_null = true;
-
- dlen = round_up(2 * slen, PAGE_SIZE);
- areq->dst = sgl_alloc(dlen, f, NULL);
- if (!areq->dst)
- return -ENOMEM;
-
- dlen -= dhdr + dftr;
- areq->dlen = dlen;
- qat_req->dst.resubmitted = false;
- }
-
if (dir == COMPRESSION) {
params.extra_dst_buff = inst->dc_data->ovf_buff_p;
ovf_buff_sz = inst->dc_data->ovf_buff_sz;
@@ -323,14 +240,13 @@ static struct acomp_alg qat_acomp[] = { {
.cra_priority = 4001,
.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_ctxsize = sizeof(struct qat_compression_ctx),
+ .cra_reqsize = sizeof(struct qat_compression_req),
.cra_module = THIS_MODULE,
},
.init = qat_comp_alg_init_tfm,
.exit = qat_comp_alg_exit_tfm,
.compress = qat_comp_alg_compress,
.decompress = qat_comp_alg_decompress,
- .dst_free = sgl_free,
- .reqsize = sizeof(struct qat_compression_req),
}};
int qat_comp_algs_register(void)
diff --git a/drivers/crypto/intel/qat/qat_common/qat_comp_req.h b/drivers/crypto/intel/qat/qat_common/qat_comp_req.h
index 404e32c5e778..18a1f33a6db9 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_comp_req.h
+++ b/drivers/crypto/intel/qat/qat_common/qat_comp_req.h
@@ -25,16 +25,6 @@ static inline void qat_comp_create_req(void *ctx, void *req, u64 src, u32 slen,
req_pars->out_buffer_sz = dlen;
}
-static inline void qat_comp_override_dst(void *req, u64 dst, u32 dlen)
-{
- struct icp_qat_fw_comp_req *fw_req = req;
- struct icp_qat_fw_comp_req_params *req_pars = &fw_req->comp_pars;
-
- fw_req->comn_mid.dest_data_addr = dst;
- fw_req->comn_mid.dst_length = dlen;
- req_pars->out_buffer_sz = dlen;
-}
-
static inline void qat_comp_create_compression_req(void *ctx, void *req,
u64 src, u32 slen,
u64 dst, u32 dlen,
diff --git a/drivers/crypto/intel/qat/qat_common/qat_compression.c b/drivers/crypto/intel/qat/qat_common/qat_compression.c
index 7842a9f22178..53a4db5507ec 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_compression.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_compression.c
@@ -144,7 +144,6 @@ static int qat_compression_create_instances(struct adf_accel_dev *accel_dev)
inst->id = i;
atomic_set(&inst->refctr, 0);
inst->accel_dev = accel_dev;
- inst->build_deflate_ctx = GET_DC_OPS(accel_dev)->build_deflate_ctx;
snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_BANK_NUM, i);
ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
@@ -197,7 +196,7 @@ static int qat_compression_alloc_dc_data(struct adf_accel_dev *accel_dev)
struct adf_dc_data *dc_data = NULL;
u8 *obuff = NULL;
- dc_data = devm_kzalloc(dev, sizeof(*dc_data), GFP_KERNEL);
+ dc_data = kzalloc_node(sizeof(*dc_data), GFP_KERNEL, dev_to_node(dev));
if (!dc_data)
goto err;
@@ -205,7 +204,7 @@ static int qat_compression_alloc_dc_data(struct adf_accel_dev *accel_dev)
if (!obuff)
goto err;
- obuff_p = dma_map_single(dev, obuff, ovf_buff_sz, DMA_FROM_DEVICE);
+ obuff_p = dma_map_single(dev, obuff, ovf_buff_sz, DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(dev, obuff_p)))
goto err;
@@ -233,9 +232,9 @@ static void qat_free_dc_data(struct adf_accel_dev *accel_dev)
return;
dma_unmap_single(dev, dc_data->ovf_buff_p, dc_data->ovf_buff_sz,
- DMA_FROM_DEVICE);
+ DMA_BIDIRECTIONAL);
kfree_sensitive(dc_data->ovf_buff);
- devm_kfree(dev, dc_data);
+ kfree(dc_data);
accel_dev->dc_data = NULL;
}
diff --git a/drivers/crypto/intel/qat/qat_common/qat_compression.h b/drivers/crypto/intel/qat/qat_common/qat_compression.h
index aebac2302dcf..5ced3ed0e5ea 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_compression.h
+++ b/drivers/crypto/intel/qat/qat_common/qat_compression.h
@@ -20,7 +20,6 @@ struct qat_compression_instance {
atomic_t refctr;
struct qat_instance_backlog backlog;
struct adf_dc_data *dc_data;
- void (*build_deflate_ctx)(void *ctx);
};
static inline bool adf_hw_dev_has_compression(struct adf_accel_dev *accel_dev)
diff --git a/drivers/crypto/intel/qat/qat_common/qat_hal.c b/drivers/crypto/intel/qat/qat_common/qat_hal.c
index 317cafa9d11f..da4eca6e1633 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_hal.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_hal.c
@@ -163,7 +163,7 @@ int qat_hal_set_ae_ctx_mode(struct icp_qat_fw_loader_handle *handle,
return -EINVAL;
}
- /* Sets the accelaration engine context mode to either four or eight */
+ /* Sets the acceleration engine context mode to either four or eight */
csr = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
csr = IGNORE_W1C_MASK & csr;
new_csr = (mode == 4) ?
@@ -694,16 +694,17 @@ static int qat_hal_chip_init(struct icp_qat_fw_loader_handle *handle,
handle->pci_dev = pci_info->pci_dev;
switch (handle->pci_dev->device) {
- case ADF_4XXX_PCI_DEVICE_ID:
- case ADF_401XX_PCI_DEVICE_ID:
- case ADF_402XX_PCI_DEVICE_ID:
- case ADF_420XX_PCI_DEVICE_ID:
+ case PCI_DEVICE_ID_INTEL_QAT_4XXX:
+ case PCI_DEVICE_ID_INTEL_QAT_401XX:
+ case PCI_DEVICE_ID_INTEL_QAT_402XX:
+ case PCI_DEVICE_ID_INTEL_QAT_420XX:
+ case PCI_DEVICE_ID_INTEL_QAT_6XXX:
handle->chip_info->mmp_sram_size = 0;
handle->chip_info->nn = false;
handle->chip_info->lm2lm3 = true;
handle->chip_info->lm_size = ICP_QAT_UCLO_MAX_LMEM_REG_2X;
handle->chip_info->icp_rst_csr = ICP_RESET_CPP0;
- if (handle->pci_dev->device == ADF_420XX_PCI_DEVICE_ID)
+ if (handle->pci_dev->device == PCI_DEVICE_ID_INTEL_QAT_420XX)
handle->chip_info->icp_rst_mask = 0x100155;
else
handle->chip_info->icp_rst_mask = 0x100015;
@@ -712,6 +713,8 @@ static int qat_hal_chip_init(struct icp_qat_fw_loader_handle *handle,
handle->chip_info->wakeup_event_val = 0x80000000;
handle->chip_info->fw_auth = true;
handle->chip_info->css_3k = true;
+ if (handle->pci_dev->device == PCI_DEVICE_ID_INTEL_QAT_6XXX)
+ handle->chip_info->dual_sign = true;
handle->chip_info->tgroup_share_ustore = true;
handle->chip_info->fcu_ctl_csr = FCU_CONTROL_4XXX;
handle->chip_info->fcu_sts_csr = FCU_STATUS_4XXX;
diff --git a/drivers/crypto/intel/qat/qat_common/qat_uclo.c b/drivers/crypto/intel/qat/qat_common/qat_uclo.c
index 7ea40b4f6e5b..18c3e4416dc5 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_uclo.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_uclo.c
@@ -1,10 +1,16 @@
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2014 - 2020 Intel Corporation */
+
+#define pr_fmt(fmt) "QAT: " fmt
+
+#include <linux/align.h>
+#include <linux/bitops.h>
#include <linux/slab.h>
#include <linux/ctype.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/pci_ids.h>
+#include <linux/wordpart.h>
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
#include "icp_qat_uclo.h"
@@ -58,7 +64,7 @@ static int qat_uclo_free_ae_data(struct icp_qat_uclo_aedata *ae_data)
unsigned int i;
if (!ae_data) {
- pr_err("QAT: bad argument, ae_data is NULL\n");
+ pr_err("bad argument, ae_data is NULL\n");
return -EINVAL;
}
@@ -85,12 +91,11 @@ static int qat_uclo_check_uof_format(struct icp_qat_uof_filehdr *hdr)
int min = hdr->min_ver & 0xff;
if (hdr->file_id != ICP_QAT_UOF_FID) {
- pr_err("QAT: Invalid header 0x%x\n", hdr->file_id);
+ pr_err("Invalid header 0x%x\n", hdr->file_id);
return -EINVAL;
}
if (min != ICP_QAT_UOF_MINVER || maj != ICP_QAT_UOF_MAJVER) {
- pr_err("QAT: bad UOF version, major 0x%x, minor 0x%x\n",
- maj, min);
+ pr_err("bad UOF version, major 0x%x, minor 0x%x\n", maj, min);
return -EINVAL;
}
return 0;
@@ -102,20 +107,19 @@ static int qat_uclo_check_suof_format(struct icp_qat_suof_filehdr *suof_hdr)
int min = suof_hdr->min_ver & 0xff;
if (suof_hdr->file_id != ICP_QAT_SUOF_FID) {
- pr_err("QAT: invalid header 0x%x\n", suof_hdr->file_id);
+ pr_err("invalid header 0x%x\n", suof_hdr->file_id);
return -EINVAL;
}
if (suof_hdr->fw_type != 0) {
- pr_err("QAT: unsupported firmware type\n");
+ pr_err("unsupported firmware type\n");
return -EINVAL;
}
if (suof_hdr->num_chunks <= 0x1) {
- pr_err("QAT: SUOF chunk amount is incorrect\n");
+ pr_err("SUOF chunk amount is incorrect\n");
return -EINVAL;
}
if (maj != ICP_QAT_SUOF_MAJVER || min != ICP_QAT_SUOF_MINVER) {
- pr_err("QAT: bad SUOF version, major 0x%x, minor 0x%x\n",
- maj, min);
+ pr_err("bad SUOF version, major 0x%x, minor 0x%x\n", maj, min);
return -EINVAL;
}
return 0;
@@ -222,24 +226,24 @@ static int qat_uclo_fetch_initmem_ae(struct icp_qat_fw_loader_handle *handle,
char *str;
if ((init_mem->addr + init_mem->num_in_bytes) > (size_range << 0x2)) {
- pr_err("QAT: initmem is out of range");
+ pr_err("initmem is out of range");
return -EINVAL;
}
if (init_mem->scope != ICP_QAT_UOF_LOCAL_SCOPE) {
- pr_err("QAT: Memory scope for init_mem error\n");
+ pr_err("Memory scope for init_mem error\n");
return -EINVAL;
}
str = qat_uclo_get_string(&obj_handle->str_table, init_mem->sym_name);
if (!str) {
- pr_err("QAT: AE name assigned in UOF init table is NULL\n");
+ pr_err("AE name assigned in UOF init table is NULL\n");
return -EINVAL;
}
if (qat_uclo_parse_num(str, ae)) {
- pr_err("QAT: Parse num for AE number failed\n");
+ pr_err("Parse num for AE number failed\n");
return -EINVAL;
}
if (*ae >= ICP_QAT_UCLO_MAX_AE) {
- pr_err("QAT: ae %d out of range\n", *ae);
+ pr_err("ae %d out of range\n", *ae);
return -EINVAL;
}
return 0;
@@ -355,8 +359,7 @@ static int qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle,
return -EINVAL;
break;
default:
- pr_err("QAT: initmem region error. region type=0x%x\n",
- init_mem->region);
+ pr_err("initmem region error. region type=0x%x\n", init_mem->region);
return -EINVAL;
}
return 0;
@@ -430,7 +433,7 @@ static int qat_uclo_init_memory(struct icp_qat_fw_loader_handle *handle)
for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
if (qat_hal_batch_wr_lm(handle, ae,
obj_handle->lm_init_tab[ae])) {
- pr_err("QAT: fail to batch init lmem for AE %d\n", ae);
+ pr_err("fail to batch init lmem for AE %d\n", ae);
return -EINVAL;
}
qat_uclo_cleanup_batch_init_list(handle,
@@ -538,26 +541,26 @@ qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj *encap_uof_obj,
code_page->imp_expr_tab_offset);
if (uc_var_tab->entry_num || imp_var_tab->entry_num ||
imp_expr_tab->entry_num) {
- pr_err("QAT: UOF can't contain imported variable to be parsed\n");
+ pr_err("UOF can't contain imported variable to be parsed\n");
return -EINVAL;
}
neigh_reg_tab = (struct icp_qat_uof_objtable *)
(encap_uof_obj->beg_uof +
code_page->neigh_reg_tab_offset);
if (neigh_reg_tab->entry_num) {
- pr_err("QAT: UOF can't contain neighbor register table\n");
+ pr_err("UOF can't contain neighbor register table\n");
return -EINVAL;
}
if (image->numpages > 1) {
- pr_err("QAT: UOF can't contain multiple pages\n");
+ pr_err("UOF can't contain multiple pages\n");
return -EINVAL;
}
if (ICP_QAT_SHARED_USTORE_MODE(image->ae_mode)) {
- pr_err("QAT: UOF can't use shared control store feature\n");
+ pr_err("UOF can't use shared control store feature\n");
return -EFAULT;
}
if (RELOADABLE_CTX_SHARED_MODE(image->ae_mode)) {
- pr_err("QAT: UOF can't use reloadable feature\n");
+ pr_err("UOF can't use reloadable feature\n");
return -EFAULT;
}
return 0;
@@ -676,7 +679,7 @@ static int qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae)
}
}
if (!mflag) {
- pr_err("QAT: uimage uses AE not set\n");
+ pr_err("uimage uses AE not set\n");
return -EINVAL;
}
return 0;
@@ -730,14 +733,15 @@ qat_uclo_get_dev_type(struct icp_qat_fw_loader_handle *handle)
return ICP_QAT_AC_C62X_DEV_TYPE;
case PCI_DEVICE_ID_INTEL_QAT_C3XXX:
return ICP_QAT_AC_C3XXX_DEV_TYPE;
- case ADF_4XXX_PCI_DEVICE_ID:
- case ADF_401XX_PCI_DEVICE_ID:
- case ADF_402XX_PCI_DEVICE_ID:
- case ADF_420XX_PCI_DEVICE_ID:
+ case PCI_DEVICE_ID_INTEL_QAT_4XXX:
+ case PCI_DEVICE_ID_INTEL_QAT_401XX:
+ case PCI_DEVICE_ID_INTEL_QAT_402XX:
+ case PCI_DEVICE_ID_INTEL_QAT_420XX:
return ICP_QAT_AC_4XXX_A_DEV_TYPE;
+ case PCI_DEVICE_ID_INTEL_QAT_6XXX:
+ return ICP_QAT_AC_6XXX_DEV_TYPE;
default:
- pr_err("QAT: unsupported device 0x%x\n",
- handle->pci_dev->device);
+ pr_err("unsupported device 0x%x\n", handle->pci_dev->device);
return 0;
}
}
@@ -747,7 +751,7 @@ static int qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle)
unsigned int maj_ver, prod_type = obj_handle->prod_type;
if (!(prod_type & obj_handle->encap_uof_obj.obj_hdr->ac_dev_type)) {
- pr_err("QAT: UOF type 0x%x doesn't match with platform 0x%x\n",
+ pr_err("UOF type 0x%x doesn't match with platform 0x%x\n",
obj_handle->encap_uof_obj.obj_hdr->ac_dev_type,
prod_type);
return -EINVAL;
@@ -755,7 +759,7 @@ static int qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle)
maj_ver = obj_handle->prod_rev & 0xff;
if (obj_handle->encap_uof_obj.obj_hdr->max_cpu_ver < maj_ver ||
obj_handle->encap_uof_obj.obj_hdr->min_cpu_ver > maj_ver) {
- pr_err("QAT: UOF majVer 0x%x out of range\n", maj_ver);
+ pr_err("UOF majVer 0x%x out of range\n", maj_ver);
return -EINVAL;
}
return 0;
@@ -798,7 +802,7 @@ static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle,
case ICP_NEIGH_REL:
return qat_hal_init_nn(handle, ae, ctx_mask, reg_addr, value);
default:
- pr_err("QAT: UOF uses not supported reg type 0x%x\n", reg_type);
+ pr_err("UOF uses not supported reg type 0x%x\n", reg_type);
return -EFAULT;
}
return 0;
@@ -834,8 +838,7 @@ static int qat_uclo_init_reg_sym(struct icp_qat_fw_loader_handle *handle,
case ICP_QAT_UOF_INIT_REG_CTX:
/* check if ctx is appropriate for the ctxMode */
if (!((1 << init_regsym->ctx) & ctx_mask)) {
- pr_err("QAT: invalid ctx num = 0x%x\n",
- init_regsym->ctx);
+ pr_err("invalid ctx num = 0x%x\n", init_regsym->ctx);
return -EINVAL;
}
qat_uclo_init_reg(handle, ae,
@@ -847,10 +850,10 @@ static int qat_uclo_init_reg_sym(struct icp_qat_fw_loader_handle *handle,
exp_res);
break;
case ICP_QAT_UOF_INIT_EXPR:
- pr_err("QAT: INIT_EXPR feature not supported\n");
+ pr_err("INIT_EXPR feature not supported\n");
return -EINVAL;
case ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP:
- pr_err("QAT: INIT_EXPR_ENDIAN_SWAP feature not supported\n");
+ pr_err("INIT_EXPR_ENDIAN_SWAP feature not supported\n");
return -EINVAL;
default:
break;
@@ -870,7 +873,7 @@ static int qat_uclo_init_globals(struct icp_qat_fw_loader_handle *handle)
return 0;
if (obj_handle->init_mem_tab.entry_num) {
if (qat_uclo_init_memory(handle)) {
- pr_err("QAT: initialize memory failed\n");
+ pr_err("initialize memory failed\n");
return -EINVAL;
}
}
@@ -899,40 +902,40 @@ static int qat_hal_set_modes(struct icp_qat_fw_loader_handle *handle,
mode = ICP_QAT_CTX_MODE(uof_image->ae_mode);
ret = qat_hal_set_ae_ctx_mode(handle, ae, mode);
if (ret) {
- pr_err("QAT: qat_hal_set_ae_ctx_mode error\n");
+ pr_err("qat_hal_set_ae_ctx_mode error\n");
return ret;
}
if (handle->chip_info->nn) {
mode = ICP_QAT_NN_MODE(uof_image->ae_mode);
ret = qat_hal_set_ae_nn_mode(handle, ae, mode);
if (ret) {
- pr_err("QAT: qat_hal_set_ae_nn_mode error\n");
+ pr_err("qat_hal_set_ae_nn_mode error\n");
return ret;
}
}
mode = ICP_QAT_LOC_MEM0_MODE(uof_image->ae_mode);
ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM0, mode);
if (ret) {
- pr_err("QAT: qat_hal_set_ae_lm_mode LMEM0 error\n");
+ pr_err("qat_hal_set_ae_lm_mode LMEM0 error\n");
return ret;
}
mode = ICP_QAT_LOC_MEM1_MODE(uof_image->ae_mode);
ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM1, mode);
if (ret) {
- pr_err("QAT: qat_hal_set_ae_lm_mode LMEM1 error\n");
+ pr_err("qat_hal_set_ae_lm_mode LMEM1 error\n");
return ret;
}
if (handle->chip_info->lm2lm3) {
mode = ICP_QAT_LOC_MEM2_MODE(uof_image->ae_mode);
ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM2, mode);
if (ret) {
- pr_err("QAT: qat_hal_set_ae_lm_mode LMEM2 error\n");
+ pr_err("qat_hal_set_ae_lm_mode LMEM2 error\n");
return ret;
}
mode = ICP_QAT_LOC_MEM3_MODE(uof_image->ae_mode);
ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM3, mode);
if (ret) {
- pr_err("QAT: qat_hal_set_ae_lm_mode LMEM3 error\n");
+ pr_err("qat_hal_set_ae_lm_mode LMEM3 error\n");
return ret;
}
mode = ICP_QAT_LOC_TINDEX_MODE(uof_image->ae_mode);
@@ -996,7 +999,7 @@ static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle)
obj_handle->prod_rev = PID_MAJOR_REV |
(PID_MINOR_REV & handle->hal_handle->revision_id);
if (qat_uclo_check_uof_compat(obj_handle)) {
- pr_err("QAT: UOF incompatible\n");
+ pr_err("UOF incompatible\n");
return -EINVAL;
}
obj_handle->uword_buf = kcalloc(UWORD_CPYBUF_SIZE, sizeof(u64),
@@ -1007,7 +1010,7 @@ static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle)
if (!obj_handle->obj_hdr->file_buff ||
!qat_uclo_map_str_table(obj_handle->obj_hdr, ICP_QAT_UOF_STRT,
&obj_handle->str_table)) {
- pr_err("QAT: UOF doesn't have effective images\n");
+ pr_err("UOF doesn't have effective images\n");
goto out_err;
}
obj_handle->uimage_num =
@@ -1016,7 +1019,7 @@ static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle)
if (!obj_handle->uimage_num)
goto out_err;
if (qat_uclo_map_ae(handle, handle->hal_handle->ae_max_num)) {
- pr_err("QAT: Bad object\n");
+ pr_err("Bad object\n");
goto out_check_uof_aemask_err;
}
qat_uclo_init_uword_num(handle);
@@ -1033,6 +1036,36 @@ out_err:
return -EFAULT;
}
+static unsigned int qat_uclo_simg_hdr2sign_len(struct icp_qat_fw_loader_handle *handle)
+{
+ if (handle->chip_info->dual_sign)
+ return ICP_QAT_DUALSIGN_OPAQUE_DATA_LEN;
+
+ return ICP_QAT_AE_IMG_OFFSET(handle);
+}
+
+static unsigned int qat_uclo_simg_hdr2cont_len(struct icp_qat_fw_loader_handle *handle)
+{
+ if (handle->chip_info->dual_sign)
+ return ICP_QAT_DUALSIGN_OPAQUE_DATA_LEN + ICP_QAT_DUALSIGN_MISC_INFO_LEN;
+
+ return ICP_QAT_AE_IMG_OFFSET(handle);
+}
+
+static unsigned int qat_uclo_simg_fw_type(struct icp_qat_fw_loader_handle *handle, void *img_ptr)
+{
+ struct icp_qat_css_hdr *hdr = img_ptr;
+ char *fw_hdr = img_ptr;
+ unsigned int offset;
+
+ if (handle->chip_info->dual_sign) {
+ offset = qat_uclo_simg_hdr2sign_len(handle) + ICP_QAT_DUALSIGN_FW_TYPE_LEN;
+ return *(fw_hdr + offset);
+ }
+
+ return hdr->fw_type;
+}
+
static int qat_uclo_map_suof_file_hdr(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_suof_filehdr *suof_ptr,
int suof_size)
@@ -1049,7 +1082,7 @@ static int qat_uclo_map_suof_file_hdr(struct icp_qat_fw_loader_handle *handle,
check_sum = qat_uclo_calc_str_checksum((char *)&suof_ptr->min_ver,
min_ver_offset);
if (check_sum != suof_ptr->check_sum) {
- pr_err("QAT: incorrect SUOF checksum\n");
+ pr_err("incorrect SUOF checksum\n");
return -EINVAL;
}
suof_handle->check_sum = suof_ptr->check_sum;
@@ -1064,8 +1097,9 @@ static void qat_uclo_map_simg(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_suof_chunk_hdr *suof_chunk_hdr)
{
struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
- struct icp_qat_simg_ae_mode *ae_mode;
+ unsigned int offset = qat_uclo_simg_hdr2cont_len(handle);
struct icp_qat_suof_objhdr *suof_objhdr;
+ struct icp_qat_simg_ae_mode *ae_mode;
suof_img_hdr->simg_buf = (suof_handle->suof_buf +
suof_chunk_hdr->offset +
@@ -1075,13 +1109,7 @@ static void qat_uclo_map_simg(struct icp_qat_fw_loader_handle *handle,
suof_chunk_hdr->offset))->img_length;
suof_img_hdr->css_header = suof_img_hdr->simg_buf;
- suof_img_hdr->css_key = (suof_img_hdr->css_header +
- sizeof(struct icp_qat_css_hdr));
- suof_img_hdr->css_signature = suof_img_hdr->css_key +
- ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) +
- ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle);
- suof_img_hdr->css_simg = suof_img_hdr->css_signature +
- ICP_QAT_CSS_SIGNATURE_LEN(handle);
+ suof_img_hdr->css_simg = suof_img_hdr->css_header + offset;
ae_mode = (struct icp_qat_simg_ae_mode *)(suof_img_hdr->css_simg);
suof_img_hdr->ae_mask = ae_mode->ae_mask;
@@ -1116,14 +1144,13 @@ static int qat_uclo_check_simg_compat(struct icp_qat_fw_loader_handle *handle,
prod_rev = PID_MAJOR_REV |
(PID_MINOR_REV & handle->hal_handle->revision_id);
if (img_ae_mode->dev_type != prod_type) {
- pr_err("QAT: incompatible product type %x\n",
- img_ae_mode->dev_type);
+ pr_err("incompatible product type %x\n", img_ae_mode->dev_type);
return -EINVAL;
}
maj_ver = prod_rev & 0xff;
if (maj_ver > img_ae_mode->devmax_ver ||
maj_ver < img_ae_mode->devmin_ver) {
- pr_err("QAT: incompatible device majver 0x%x\n", maj_ver);
+ pr_err("incompatible device majver 0x%x\n", maj_ver);
return -EINVAL;
}
return 0;
@@ -1166,7 +1193,7 @@ static int qat_uclo_map_suof(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_suof_img_hdr img_header;
if (!suof_ptr || suof_size == 0) {
- pr_err("QAT: input parameter SUOF pointer/size is NULL\n");
+ pr_err("input parameter SUOF pointer/size is NULL\n");
return -EINVAL;
}
if (qat_uclo_check_suof_format(suof_ptr))
@@ -1209,7 +1236,6 @@ static int qat_uclo_map_suof(struct icp_qat_fw_loader_handle *handle,
}
#define ADD_ADDR(high, low) ((((u64)high) << 32) + low)
-#define BITS_IN_DWORD 32
static int qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_fw_auth_desc *desc)
@@ -1227,7 +1253,7 @@ static int qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle,
fcu_dram_hi_csr = handle->chip_info->fcu_dram_addr_hi;
fcu_dram_lo_csr = handle->chip_info->fcu_dram_addr_lo;
- SET_CAP_CSR(handle, fcu_dram_hi_csr, (bus_addr >> BITS_IN_DWORD));
+ SET_CAP_CSR(handle, fcu_dram_hi_csr, bus_addr >> BITS_PER_TYPE(u32));
SET_CAP_CSR(handle, fcu_dram_lo_csr, bus_addr);
SET_CAP_CSR(handle, fcu_ctl_csr, FCU_CTRL_CMD_AUTH);
@@ -1241,7 +1267,7 @@ static int qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle,
return 0;
} while (retry++ < FW_AUTH_MAX_RETRY);
auth_fail:
- pr_err("QAT: authentication error (FCU_STATUS = 0x%x),retry = %d\n",
+ pr_err("authentication error (FCU_STATUS = 0x%x),retry = %d\n",
fcu_sts & FCU_AUTH_STS_MASK, retry);
return -EINVAL;
}
@@ -1277,14 +1303,13 @@ static int qat_uclo_broadcast_load_fw(struct icp_qat_fw_loader_handle *handle,
fcu_sts_csr = handle->chip_info->fcu_sts_csr;
fcu_loaded_csr = handle->chip_info->fcu_loaded_ae_csr;
} else {
- pr_err("Chip 0x%x doesn't support broadcast load\n",
- handle->pci_dev->device);
+ pr_err("Chip 0x%x doesn't support broadcast load\n", handle->pci_dev->device);
return -EINVAL;
}
for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
if (qat_hal_check_ae_active(handle, (unsigned char)ae)) {
- pr_err("QAT: Broadcast load failed. AE is not enabled or active.\n");
+ pr_err("Broadcast load failed. AE is not enabled or active.\n");
return -EINVAL;
}
@@ -1316,7 +1341,7 @@ static int qat_uclo_broadcast_load_fw(struct icp_qat_fw_loader_handle *handle,
} while (retry++ < FW_AUTH_MAX_RETRY);
if (retry > FW_AUTH_MAX_RETRY) {
- pr_err("QAT: broadcast load failed timeout %d\n", retry);
+ pr_err("broadcast load failed timeout %d\n", retry);
return -EINVAL;
}
}
@@ -1370,24 +1395,38 @@ static void qat_uclo_ummap_auth_fw(struct icp_qat_fw_loader_handle *handle,
}
static int qat_uclo_check_image(struct icp_qat_fw_loader_handle *handle,
- char *image, unsigned int size,
+ void *image, unsigned int size,
unsigned int fw_type)
{
char *fw_type_name = fw_type ? "MMP" : "AE";
unsigned int css_dword_size = sizeof(u32);
+ unsigned int header_len, simg_type;
+ struct icp_qat_css_hdr *css_hdr;
if (handle->chip_info->fw_auth) {
- struct icp_qat_css_hdr *css_hdr = (struct icp_qat_css_hdr *)image;
- unsigned int header_len = ICP_QAT_AE_IMG_OFFSET(handle);
+ header_len = qat_uclo_simg_hdr2sign_len(handle);
+ simg_type = qat_uclo_simg_fw_type(handle, image);
+ css_hdr = image;
+
+ if (handle->chip_info->dual_sign) {
+ if (css_hdr->module_type != ICP_QAT_DUALSIGN_MODULE_TYPE)
+ goto err;
+ if (css_hdr->header_len != ICP_QAT_DUALSIGN_HDR_LEN)
+ goto err;
+ if (css_hdr->header_ver != ICP_QAT_DUALSIGN_HDR_VER)
+ goto err;
+ } else {
+ if (css_hdr->header_len * css_dword_size != header_len)
+ goto err;
+ if (css_hdr->size * css_dword_size != size)
+ goto err;
+ if (size <= header_len)
+ goto err;
+ }
- if ((css_hdr->header_len * css_dword_size) != header_len)
- goto err;
- if ((css_hdr->size * css_dword_size) != size)
- goto err;
- if (fw_type != css_hdr->fw_type)
- goto err;
- if (size <= header_len)
+ if (fw_type != simg_type)
goto err;
+
size -= header_len;
}
@@ -1401,115 +1440,95 @@ static int qat_uclo_check_image(struct icp_qat_fw_loader_handle *handle,
if (size > ICP_QAT_CSS_RSA3K_MAX_IMAGE_LEN)
goto err;
} else {
- pr_err("QAT: Unsupported firmware type\n");
+ pr_err("Unsupported firmware type\n");
return -EINVAL;
}
return 0;
err:
- pr_err("QAT: Invalid %s firmware image\n", fw_type_name);
+ pr_err("Invalid %s firmware image\n", fw_type_name);
return -EINVAL;
}
-static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle,
- char *image, unsigned int size,
- struct icp_qat_fw_auth_desc **desc)
+static int qat_uclo_build_auth_desc_RSA(struct icp_qat_fw_loader_handle *handle,
+ char *image, unsigned int size,
+ struct icp_firml_dram_desc *dram_desc,
+ unsigned int fw_type, struct icp_qat_fw_auth_desc **desc)
{
struct icp_qat_css_hdr *css_hdr = (struct icp_qat_css_hdr *)image;
- struct icp_qat_fw_auth_desc *auth_desc;
- struct icp_qat_auth_chunk *auth_chunk;
- u64 virt_addr, bus_addr, virt_base;
- unsigned int length, simg_offset = sizeof(*auth_chunk);
struct icp_qat_simg_ae_mode *simg_ae_mode;
- struct icp_firml_dram_desc img_desc;
-
- if (size > (ICP_QAT_AE_IMG_OFFSET(handle) + ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN)) {
- pr_err("QAT: error, input image size overflow %d\n", size);
- return -EINVAL;
- }
- length = (css_hdr->fw_type == CSS_AE_FIRMWARE) ?
- ICP_QAT_CSS_AE_SIMG_LEN(handle) + simg_offset :
- size + ICP_QAT_CSS_FWSK_PAD_LEN(handle) + simg_offset;
- if (qat_uclo_simg_alloc(handle, &img_desc, length)) {
- pr_err("QAT: error, allocate continuous dram fail\n");
- return -ENOMEM;
- }
+ struct icp_qat_fw_auth_desc *auth_desc;
+ char *virt_addr, *virt_base;
+ u64 bus_addr;
- auth_chunk = img_desc.dram_base_addr_v;
- auth_chunk->chunk_size = img_desc.dram_size;
- auth_chunk->chunk_bus_addr = img_desc.dram_bus_addr;
- virt_base = (uintptr_t)img_desc.dram_base_addr_v + simg_offset;
- bus_addr = img_desc.dram_bus_addr + simg_offset;
- auth_desc = img_desc.dram_base_addr_v;
- auth_desc->css_hdr_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
- auth_desc->css_hdr_low = (unsigned int)bus_addr;
+ virt_base = dram_desc->dram_base_addr_v;
+ virt_base += sizeof(struct icp_qat_auth_chunk);
+ bus_addr = dram_desc->dram_bus_addr + sizeof(struct icp_qat_auth_chunk);
+ auth_desc = dram_desc->dram_base_addr_v;
+ auth_desc->css_hdr_high = upper_32_bits(bus_addr);
+ auth_desc->css_hdr_low = lower_32_bits(bus_addr);
virt_addr = virt_base;
- memcpy((void *)(uintptr_t)virt_addr, image, sizeof(*css_hdr));
+ memcpy(virt_addr, image, sizeof(*css_hdr));
/* pub key */
bus_addr = ADD_ADDR(auth_desc->css_hdr_high, auth_desc->css_hdr_low) +
sizeof(*css_hdr);
virt_addr = virt_addr + sizeof(*css_hdr);
- auth_desc->fwsk_pub_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
- auth_desc->fwsk_pub_low = (unsigned int)bus_addr;
+ auth_desc->fwsk_pub_high = upper_32_bits(bus_addr);
+ auth_desc->fwsk_pub_low = lower_32_bits(bus_addr);
- memcpy((void *)(uintptr_t)virt_addr,
- (void *)(image + sizeof(*css_hdr)),
- ICP_QAT_CSS_FWSK_MODULUS_LEN(handle));
+ memcpy(virt_addr, image + sizeof(*css_hdr), ICP_QAT_CSS_FWSK_MODULUS_LEN(handle));
/* padding */
memset((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN(handle)),
0, ICP_QAT_CSS_FWSK_PAD_LEN(handle));
/* exponent */
- memcpy((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) +
- ICP_QAT_CSS_FWSK_PAD_LEN(handle)),
- (void *)(image + sizeof(*css_hdr) +
- ICP_QAT_CSS_FWSK_MODULUS_LEN(handle)),
- sizeof(unsigned int));
+ memcpy(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) +
+ ICP_QAT_CSS_FWSK_PAD_LEN(handle), image + sizeof(*css_hdr) +
+ ICP_QAT_CSS_FWSK_MODULUS_LEN(handle), sizeof(unsigned int));
/* signature */
bus_addr = ADD_ADDR(auth_desc->fwsk_pub_high,
auth_desc->fwsk_pub_low) +
ICP_QAT_CSS_FWSK_PUB_LEN(handle);
virt_addr = virt_addr + ICP_QAT_CSS_FWSK_PUB_LEN(handle);
- auth_desc->signature_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
- auth_desc->signature_low = (unsigned int)bus_addr;
+ auth_desc->signature_high = upper_32_bits(bus_addr);
+ auth_desc->signature_low = lower_32_bits(bus_addr);
- memcpy((void *)(uintptr_t)virt_addr,
- (void *)(image + sizeof(*css_hdr) +
- ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) +
- ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle)),
- ICP_QAT_CSS_SIGNATURE_LEN(handle));
+ memcpy(virt_addr, image + sizeof(*css_hdr) + ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) +
+ ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle), ICP_QAT_CSS_SIGNATURE_LEN(handle));
bus_addr = ADD_ADDR(auth_desc->signature_high,
auth_desc->signature_low) +
ICP_QAT_CSS_SIGNATURE_LEN(handle);
virt_addr += ICP_QAT_CSS_SIGNATURE_LEN(handle);
- auth_desc->img_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
- auth_desc->img_low = (unsigned int)bus_addr;
- auth_desc->img_len = size - ICP_QAT_AE_IMG_OFFSET(handle);
- memcpy((void *)(uintptr_t)virt_addr,
- (void *)(image + ICP_QAT_AE_IMG_OFFSET(handle)),
- auth_desc->img_len);
+ auth_desc->img_high = upper_32_bits(bus_addr);
+ auth_desc->img_low = lower_32_bits(bus_addr);
+ auth_desc->img_len = size - qat_uclo_simg_hdr2sign_len(handle);
+ if (bus_addr + auth_desc->img_len >
+ dram_desc->dram_bus_addr + ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN) {
+ pr_err("insufficient memory size for authentication data\n");
+ qat_uclo_simg_free(handle, dram_desc);
+ return -ENOMEM;
+ }
+
+ memcpy(virt_addr, image + qat_uclo_simg_hdr2sign_len(handle), auth_desc->img_len);
virt_addr = virt_base;
/* AE firmware */
- if (((struct icp_qat_css_hdr *)(uintptr_t)virt_addr)->fw_type ==
- CSS_AE_FIRMWARE) {
+ if (fw_type == CSS_AE_FIRMWARE) {
auth_desc->img_ae_mode_data_high = auth_desc->img_high;
auth_desc->img_ae_mode_data_low = auth_desc->img_low;
bus_addr = ADD_ADDR(auth_desc->img_ae_mode_data_high,
auth_desc->img_ae_mode_data_low) +
sizeof(struct icp_qat_simg_ae_mode);
- auth_desc->img_ae_init_data_high = (unsigned int)
- (bus_addr >> BITS_IN_DWORD);
- auth_desc->img_ae_init_data_low = (unsigned int)bus_addr;
+ auth_desc->img_ae_init_data_high = upper_32_bits(bus_addr);
+ auth_desc->img_ae_init_data_low = lower_32_bits(bus_addr);
bus_addr += ICP_QAT_SIMG_AE_INIT_SEQ_LEN;
- auth_desc->img_ae_insts_high = (unsigned int)
- (bus_addr >> BITS_IN_DWORD);
- auth_desc->img_ae_insts_low = (unsigned int)bus_addr;
+ auth_desc->img_ae_insts_high = upper_32_bits(bus_addr);
+ auth_desc->img_ae_insts_low = lower_32_bits(bus_addr);
virt_addr += sizeof(struct icp_qat_css_hdr);
virt_addr += ICP_QAT_CSS_FWSK_PUB_LEN(handle);
virt_addr += ICP_QAT_CSS_SIGNATURE_LEN(handle);
@@ -1523,6 +1542,141 @@ static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle,
return 0;
}
+static int qat_uclo_build_auth_desc_dualsign(struct icp_qat_fw_loader_handle *handle,
+ char *image, unsigned int size,
+ struct icp_firml_dram_desc *dram_desc,
+ unsigned int fw_type,
+ struct icp_qat_fw_auth_desc **desc)
+{
+ struct icp_qat_simg_ae_mode *simg_ae_mode;
+ struct icp_qat_fw_auth_desc *auth_desc;
+ unsigned int chunk_offset, img_offset;
+ u64 bus_addr, addr;
+ char *virt_addr;
+
+ virt_addr = dram_desc->dram_base_addr_v;
+ virt_addr += sizeof(struct icp_qat_auth_chunk);
+ bus_addr = dram_desc->dram_bus_addr + sizeof(struct icp_qat_auth_chunk);
+
+ auth_desc = dram_desc->dram_base_addr_v;
+ auth_desc->img_len = size - qat_uclo_simg_hdr2sign_len(handle);
+ auth_desc->css_hdr_high = upper_32_bits(bus_addr);
+ auth_desc->css_hdr_low = lower_32_bits(bus_addr);
+ memcpy(virt_addr, image, ICP_QAT_DUALSIGN_OPAQUE_HDR_LEN);
+
+ img_offset = ICP_QAT_DUALSIGN_OPAQUE_HDR_LEN;
+ chunk_offset = ICP_QAT_DUALSIGN_OPAQUE_HDR_ALIGN_LEN;
+
+ /* RSA pub key */
+ addr = bus_addr + chunk_offset;
+ auth_desc->fwsk_pub_high = upper_32_bits(addr);
+ auth_desc->fwsk_pub_low = lower_32_bits(addr);
+ memcpy(virt_addr + chunk_offset, image + img_offset, ICP_QAT_CSS_FWSK_MODULUS_LEN(handle));
+
+ img_offset += ICP_QAT_CSS_FWSK_MODULUS_LEN(handle);
+ chunk_offset += ICP_QAT_CSS_FWSK_MODULUS_LEN(handle);
+ /* RSA padding */
+ memset(virt_addr + chunk_offset, 0, ICP_QAT_CSS_FWSK_PAD_LEN(handle));
+
+ chunk_offset += ICP_QAT_CSS_FWSK_PAD_LEN(handle);
+ /* RSA exponent */
+ memcpy(virt_addr + chunk_offset, image + img_offset, ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle));
+
+ img_offset += ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle);
+ chunk_offset += ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle);
+ /* RSA signature */
+ addr = bus_addr + chunk_offset;
+ auth_desc->signature_high = upper_32_bits(addr);
+ auth_desc->signature_low = lower_32_bits(addr);
+ memcpy(virt_addr + chunk_offset, image + img_offset, ICP_QAT_CSS_SIGNATURE_LEN(handle));
+
+ img_offset += ICP_QAT_CSS_SIGNATURE_LEN(handle);
+ chunk_offset += ICP_QAT_CSS_SIGNATURE_LEN(handle);
+ /* XMSS pubkey */
+ addr = bus_addr + chunk_offset;
+ auth_desc->xmss_pubkey_high = upper_32_bits(addr);
+ auth_desc->xmss_pubkey_low = lower_32_bits(addr);
+ memcpy(virt_addr + chunk_offset, image + img_offset, ICP_QAT_DUALSIGN_XMSS_PUBKEY_LEN);
+
+ img_offset += ICP_QAT_DUALSIGN_XMSS_PUBKEY_LEN;
+ chunk_offset += ICP_QAT_DUALSIGN_XMSS_PUBKEY_LEN;
+ /* XMSS signature */
+ addr = bus_addr + chunk_offset;
+ auth_desc->xmss_sig_high = upper_32_bits(addr);
+ auth_desc->xmss_sig_low = lower_32_bits(addr);
+ memcpy(virt_addr + chunk_offset, image + img_offset, ICP_QAT_DUALSIGN_XMSS_SIG_LEN);
+
+ img_offset += ICP_QAT_DUALSIGN_XMSS_SIG_LEN;
+ chunk_offset += ICP_QAT_DUALSIGN_XMSS_SIG_ALIGN_LEN;
+
+ if (dram_desc->dram_size < (chunk_offset + auth_desc->img_len)) {
+ pr_err("auth chunk memory size is not enough to store data\n");
+ return -ENOMEM;
+ }
+
+ /* Signed data */
+ addr = bus_addr + chunk_offset;
+ auth_desc->img_high = upper_32_bits(addr);
+ auth_desc->img_low = lower_32_bits(addr);
+ memcpy(virt_addr + chunk_offset, image + img_offset, auth_desc->img_len);
+
+ chunk_offset += ICP_QAT_DUALSIGN_MISC_INFO_LEN;
+ /* AE firmware */
+ if (fw_type == CSS_AE_FIRMWARE) {
+ /* AE mode data */
+ addr = bus_addr + chunk_offset;
+ auth_desc->img_ae_mode_data_high = upper_32_bits(addr);
+ auth_desc->img_ae_mode_data_low = lower_32_bits(addr);
+ simg_ae_mode =
+ (struct icp_qat_simg_ae_mode *)(virt_addr + chunk_offset);
+ auth_desc->ae_mask = simg_ae_mode->ae_mask & handle->cfg_ae_mask;
+
+ chunk_offset += sizeof(struct icp_qat_simg_ae_mode);
+ /* AE init seq */
+ addr = bus_addr + chunk_offset;
+ auth_desc->img_ae_init_data_high = upper_32_bits(addr);
+ auth_desc->img_ae_init_data_low = lower_32_bits(addr);
+
+ chunk_offset += ICP_QAT_SIMG_AE_INIT_SEQ_LEN;
+ /* AE instructions */
+ addr = bus_addr + chunk_offset;
+ auth_desc->img_ae_insts_high = upper_32_bits(addr);
+ auth_desc->img_ae_insts_low = lower_32_bits(addr);
+ } else {
+ addr = bus_addr + chunk_offset;
+ auth_desc->img_ae_insts_high = upper_32_bits(addr);
+ auth_desc->img_ae_insts_low = lower_32_bits(addr);
+ }
+ *desc = auth_desc;
+ return 0;
+}
+
+static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle,
+ char *image, unsigned int size,
+ struct icp_qat_fw_auth_desc **desc)
+{
+ struct icp_qat_auth_chunk *auth_chunk;
+ struct icp_firml_dram_desc img_desc;
+ unsigned int simg_fw_type;
+ int ret;
+
+ ret = qat_uclo_simg_alloc(handle, &img_desc, ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN);
+ if (ret)
+ return ret;
+
+ simg_fw_type = qat_uclo_simg_fw_type(handle, image);
+ auth_chunk = img_desc.dram_base_addr_v;
+ auth_chunk->chunk_size = img_desc.dram_size;
+ auth_chunk->chunk_bus_addr = img_desc.dram_bus_addr;
+
+ if (handle->chip_info->dual_sign)
+ return qat_uclo_build_auth_desc_dualsign(handle, image, size, &img_desc,
+ simg_fw_type, desc);
+
+ return qat_uclo_build_auth_desc_RSA(handle, image, size, &img_desc,
+ simg_fw_type, desc);
+}
+
static int qat_uclo_load_fw(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_fw_auth_desc *desc)
{
@@ -1542,7 +1696,7 @@ static int qat_uclo_load_fw(struct icp_qat_fw_loader_handle *handle,
if (!((desc->ae_mask >> i) & 0x1))
continue;
if (qat_hal_check_ae_active(handle, i)) {
- pr_err("QAT: AE %d is active\n", i);
+ pr_err("AE %d is active\n", i);
return -EINVAL;
}
SET_CAP_CSR(handle, fcu_ctl_csr,
@@ -1562,7 +1716,7 @@ static int qat_uclo_load_fw(struct icp_qat_fw_loader_handle *handle,
}
} while (retry++ < FW_AUTH_MAX_RETRY);
if (retry > FW_AUTH_MAX_RETRY) {
- pr_err("QAT: firmware load failed timeout %x\n", retry);
+ pr_err("firmware load failed timeout %x\n", retry);
return -EINVAL;
}
}
@@ -1580,7 +1734,7 @@ static int qat_uclo_map_suof_obj(struct icp_qat_fw_loader_handle *handle,
handle->sobj_handle = suof_handle;
if (qat_uclo_map_suof(handle, addr_ptr, mem_size)) {
qat_uclo_del_suof(handle);
- pr_err("QAT: map SUOF failed\n");
+ pr_err("map SUOF failed\n");
return -EINVAL;
}
return 0;
@@ -1604,7 +1758,7 @@ int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle,
qat_uclo_ummap_auth_fw(handle, &desc);
} else {
if (handle->chip_info->mmp_sram_size < mem_size) {
- pr_err("QAT: MMP size is too large: 0x%x\n", mem_size);
+ pr_err("MMP size is too large: 0x%x\n", mem_size);
return -EFBIG;
}
qat_uclo_wr_sram_by_words(handle, 0, addr_ptr, mem_size);
@@ -1630,7 +1784,7 @@ static int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
objhdl->obj_hdr = qat_uclo_map_chunk((char *)objhdl->obj_buf, filehdr,
ICP_QAT_UOF_OBJS);
if (!objhdl->obj_hdr) {
- pr_err("QAT: object file chunk is null\n");
+ pr_err("object file chunk is null\n");
goto out_objhdr_err;
}
handle->obj_handle = objhdl;
@@ -1665,7 +1819,7 @@ static int qat_uclo_map_mof_file_hdr(struct icp_qat_fw_loader_handle *handle,
checksum = qat_uclo_calc_str_checksum(&mof_ptr->min_ver,
min_ver_offset);
if (checksum != mof_ptr->checksum) {
- pr_err("QAT: incorrect MOF checksum\n");
+ pr_err("incorrect MOF checksum\n");
return -EINVAL;
}
@@ -1701,7 +1855,7 @@ static int qat_uclo_seek_obj_inside_mof(struct icp_qat_mof_handle *mobj_handle,
}
}
- pr_err("QAT: object %s is not found inside MOF\n", obj_name);
+ pr_err("object %s is not found inside MOF\n", obj_name);
return -EINVAL;
}
@@ -1718,7 +1872,7 @@ static int qat_uclo_map_obj_from_mof(struct icp_qat_mof_handle *mobj_handle,
ICP_QAT_MOF_OBJ_CHUNKID_LEN)) {
obj = mobj_handle->sobjs_hdr + obj_chunkhdr->offset;
} else {
- pr_err("QAT: unsupported chunk id\n");
+ pr_err("unsupported chunk id\n");
return -EINVAL;
}
mobj_hdr->obj_buf = obj;
@@ -1746,7 +1900,7 @@ static int qat_uclo_map_objs_from_mof(struct icp_qat_mof_handle *mobj_handle)
if (sobj_hdr)
sobj_chunk_num = sobj_hdr->num_chunks;
- mobj_hdr = kzalloc((uobj_chunk_num + sobj_chunk_num) *
+ mobj_hdr = kcalloc(size_add(uobj_chunk_num, sobj_chunk_num),
sizeof(*mobj_hdr), GFP_KERNEL);
if (!mobj_hdr)
return -ENOMEM;
@@ -1779,7 +1933,7 @@ static int qat_uclo_map_objs_from_mof(struct icp_qat_mof_handle *mobj_handle)
}
if ((uobj_chunk_num + sobj_chunk_num) != *valid_chunk) {
- pr_err("QAT: inconsistent UOF/SUOF chunk amount\n");
+ pr_err("inconsistent UOF/SUOF chunk amount\n");
return -EINVAL;
}
return 0;
@@ -1820,17 +1974,16 @@ static int qat_uclo_check_mof_format(struct icp_qat_mof_file_hdr *mof_hdr)
int min = mof_hdr->min_ver & 0xff;
if (mof_hdr->file_id != ICP_QAT_MOF_FID) {
- pr_err("QAT: invalid header 0x%x\n", mof_hdr->file_id);
+ pr_err("invalid header 0x%x\n", mof_hdr->file_id);
return -EINVAL;
}
if (mof_hdr->num_chunks <= 0x1) {
- pr_err("QAT: MOF chunk amount is incorrect\n");
+ pr_err("MOF chunk amount is incorrect\n");
return -EINVAL;
}
if (maj != ICP_QAT_MOF_MAJVER || min != ICP_QAT_MOF_MINVER) {
- pr_err("QAT: bad MOF version, major 0x%x, minor 0x%x\n",
- maj, min);
+ pr_err("bad MOF version, major 0x%x, minor 0x%x\n", maj, min);
return -EINVAL;
}
return 0;
diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/Makefile b/drivers/crypto/intel/qat/qat_dh895xcc/Makefile
index cfd3bd757715..1427fe76f171 100644
--- a/drivers/crypto/intel/qat/qat_dh895xcc/Makefile
+++ b/drivers/crypto/intel/qat/qat_dh895xcc/Makefile
@@ -1,4 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y := -I $(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc.o
-qat_dh895xcc-objs := adf_drv.o adf_dh895xcc_hw_data.o
+qat_dh895xcc-y := adf_drv.o adf_dh895xcc_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
index c0661ff5e929..5b4bd0ba1ccb 100644
--- a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
@@ -4,7 +4,6 @@
#include <adf_admin.h>
#include <adf_common_drv.h>
#include <adf_gen2_config.h>
-#include <adf_gen2_dc.h>
#include <adf_gen2_hw_csr_data.h>
#include <adf_gen2_hw_data.h>
#include <adf_gen2_pfvf.h>
@@ -24,12 +23,11 @@ static const u32 thrd_to_arb_map[ADF_DH895XCC_MAX_ACCELENGINES] = {
static struct adf_hw_device_class dh895xcc_class = {
.name = ADF_DH895XCC_DEVICE_NAME,
.type = DEV_DH895XCC,
- .instances = 0
};
static u32 get_accel_mask(struct adf_hw_device_data *self)
{
- u32 fuses = self->fuses;
+ u32 fuses = self->fuses[ADF_FUSECTL0];
return ~fuses >> ADF_DH895XCC_ACCELERATORS_REG_OFFSET &
ADF_DH895XCC_ACCELERATORS_MASK;
@@ -37,7 +35,7 @@ static u32 get_accel_mask(struct adf_hw_device_data *self)
static u32 get_ae_mask(struct adf_hw_device_data *self)
{
- u32 fuses = self->fuses;
+ u32 fuses = self->fuses[ADF_FUSECTL0];
return ~fuses & ADF_DH895XCC_ACCELENGINES_MASK;
}
@@ -99,7 +97,7 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
{
- int sku = (self->fuses & ADF_DH895XCC_FUSECTL_SKU_MASK)
+ int sku = (self->fuses[ADF_FUSECTL0] & ADF_DH895XCC_FUSECTL_SKU_MASK)
>> ADF_DH895XCC_FUSECTL_SKU_SHIFT;
switch (sku) {
diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c
index 2a50cce41515..b59e0cc49e52 100644
--- a/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c
@@ -19,24 +19,6 @@
#include <adf_dbgfs.h>
#include "adf_dh895xcc_hw_data.h"
-static const struct pci_device_id adf_pci_tbl[] = {
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_DH895XCC), },
- { }
-};
-MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
-
-static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
-static void adf_remove(struct pci_dev *dev);
-
-static struct pci_driver adf_driver = {
- .id_table = adf_pci_tbl,
- .name = ADF_DH895XCC_DEVICE_NAME,
- .probe = adf_probe,
- .remove = adf_remove,
- .sriov_configure = adf_sriov_configure,
- .err_handler = &adf_err_handler,
-};
-
static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
{
pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
@@ -126,7 +108,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adf_init_hw_data_dh895xcc(accel_dev->hw_device);
pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
pci_read_config_dword(pdev, ADF_DEVICE_FUSECTL_OFFSET,
- &hw_data->fuses);
+ &hw_data->fuses[ADF_FUSECTL0]);
/* Get Accelerators and Accelerators Engines masks */
hw_data->accel_mask = hw_data->get_accel_mask(hw_data);
@@ -227,6 +209,29 @@ static void adf_remove(struct pci_dev *pdev)
kfree(accel_dev);
}
+static void adf_shutdown(struct pci_dev *pdev)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+ adf_dev_down(accel_dev);
+}
+
+static const struct pci_device_id adf_pci_tbl[] = {
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_DH895XCC) },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static struct pci_driver adf_driver = {
+ .id_table = adf_pci_tbl,
+ .name = ADF_DH895XCC_DEVICE_NAME,
+ .probe = adf_probe,
+ .remove = adf_remove,
+ .shutdown = adf_shutdown,
+ .sriov_configure = adf_sriov_configure,
+ .err_handler = &adf_err_handler,
+};
+
static int __init adfdrv_init(void)
{
request_module("intel_qat");
@@ -252,4 +257,4 @@ MODULE_FIRMWARE(ADF_DH895XCC_FW);
MODULE_FIRMWARE(ADF_DH895XCC_MMP);
MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
MODULE_VERSION(ADF_DRV_VERSION);
-MODULE_IMPORT_NS(CRYPTO_QAT);
+MODULE_IMPORT_NS("CRYPTO_QAT");
diff --git a/drivers/crypto/intel/qat/qat_dh895xccvf/Makefile b/drivers/crypto/intel/qat/qat_dh895xccvf/Makefile
index 64b54e92b2b4..c2fdb6e0f68f 100644
--- a/drivers/crypto/intel/qat/qat_dh895xccvf/Makefile
+++ b/drivers/crypto/intel/qat/qat_dh895xccvf/Makefile
@@ -1,4 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y := -I $(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf.o
-qat_dh895xccvf-objs := adf_drv.o adf_dh895xccvf_hw_data.o
+qat_dh895xccvf-y := adf_drv.o adf_dh895xccvf_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
index f4ee4c2e00da..828456c43b76 100644
--- a/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
@@ -3,7 +3,6 @@
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
#include <adf_gen2_config.h>
-#include <adf_gen2_dc.h>
#include <adf_gen2_hw_csr_data.h>
#include <adf_gen2_hw_data.h>
#include <adf_gen2_pfvf.h>
@@ -13,7 +12,6 @@
static struct adf_hw_device_class dh895xcciov_class = {
.name = ADF_DH895XCCVF_DEVICE_NAME,
.type = DEV_DH895XCCVF,
- .instances = 0
};
static u32 get_accel_mask(struct adf_hw_device_data *self)
diff --git a/drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c
index 7cb015b55122..7cd528ee31e7 100644
--- a/drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c
@@ -226,4 +226,4 @@ MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Intel");
MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
MODULE_VERSION(ADF_DRV_VERSION);
-MODULE_IMPORT_NS(CRYPTO_QAT);
+MODULE_IMPORT_NS("CRYPTO_QAT");
diff --git a/drivers/crypto/loongson/Kconfig b/drivers/crypto/loongson/Kconfig
new file mode 100644
index 000000000000..15475da8fc11
--- /dev/null
+++ b/drivers/crypto/loongson/Kconfig
@@ -0,0 +1,5 @@
+config CRYPTO_DEV_LOONGSON_RNG
+ tristate "Support for Loongson RNG Driver"
+ depends on MFD_LOONGSON_SE
+ help
+ Support for Loongson RNG Driver.
diff --git a/drivers/crypto/loongson/Makefile b/drivers/crypto/loongson/Makefile
new file mode 100644
index 000000000000..1ce5ec32b553
--- /dev/null
+++ b/drivers/crypto/loongson/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_CRYPTO_DEV_LOONGSON_RNG) += loongson-rng.o
diff --git a/drivers/crypto/loongson/loongson-rng.c b/drivers/crypto/loongson/loongson-rng.c
new file mode 100644
index 000000000000..3a4940260f9e
--- /dev/null
+++ b/drivers/crypto/loongson/loongson-rng.c
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 HiSilicon Limited. */
+/* Copyright (c) 2025 Loongson Technology Corporation Limited. */
+
+#include <linux/crypto.h>
+#include <linux/err.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mfd/loongson-se.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/random.h>
+#include <crypto/internal/rng.h>
+
+#define SE_SEED_SIZE 32
+
+struct loongson_rng_list {
+ struct mutex lock;
+ struct list_head list;
+ int registered;
+};
+
+struct loongson_rng {
+ u32 used;
+ struct loongson_se_engine *engine;
+ struct list_head list;
+ struct mutex lock;
+};
+
+struct loongson_rng_ctx {
+ struct loongson_rng *rng;
+};
+
+struct loongson_rng_cmd {
+ u32 cmd_id;
+ union {
+ u32 len;
+ u32 ret;
+ } u;
+ u32 seed_off;
+ u32 out_off;
+ u32 pad[4];
+};
+
+static struct loongson_rng_list rng_devices = {
+ .lock = __MUTEX_INITIALIZER(rng_devices.lock),
+ .list = LIST_HEAD_INIT(rng_devices.list),
+};
+
+static int loongson_rng_generate(struct crypto_rng *tfm, const u8 *src,
+ unsigned int slen, u8 *dstn, unsigned int dlen)
+{
+ struct loongson_rng_ctx *ctx = crypto_rng_ctx(tfm);
+ struct loongson_rng *rng = ctx->rng;
+ struct loongson_rng_cmd *cmd = rng->engine->command;
+ int err, len;
+
+ mutex_lock(&rng->lock);
+ cmd->seed_off = 0;
+ do {
+ len = min(dlen, rng->engine->buffer_size);
+ cmd = rng->engine->command;
+ cmd->u.len = len;
+ err = loongson_se_send_engine_cmd(rng->engine);
+ if (err)
+ break;
+
+ cmd = rng->engine->command_ret;
+ if (cmd->u.ret) {
+ err = -EIO;
+ break;
+ }
+
+ memcpy(dstn, rng->engine->data_buffer, len);
+ dlen -= len;
+ dstn += len;
+ } while (dlen > 0);
+ mutex_unlock(&rng->lock);
+
+ return err;
+}
+
+static int loongson_rng_init(struct crypto_tfm *tfm)
+{
+ struct loongson_rng_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct loongson_rng *rng;
+ u32 min_used = U32_MAX;
+
+ mutex_lock(&rng_devices.lock);
+ list_for_each_entry(rng, &rng_devices.list, list) {
+ if (rng->used < min_used) {
+ ctx->rng = rng;
+ min_used = rng->used;
+ }
+ }
+ ctx->rng->used++;
+ mutex_unlock(&rng_devices.lock);
+
+ return 0;
+}
+
+static void loongson_rng_exit(struct crypto_tfm *tfm)
+{
+ struct loongson_rng_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ mutex_lock(&rng_devices.lock);
+ ctx->rng->used--;
+ mutex_unlock(&rng_devices.lock);
+}
+
+static int loongson_rng_seed(struct crypto_rng *tfm, const u8 *seed,
+ unsigned int slen)
+{
+ struct loongson_rng_ctx *ctx = crypto_rng_ctx(tfm);
+ struct loongson_rng *rng = ctx->rng;
+ struct loongson_rng_cmd *cmd;
+ int err;
+
+ if (slen < SE_SEED_SIZE)
+ return -EINVAL;
+
+ slen = min(slen, rng->engine->buffer_size);
+
+ mutex_lock(&rng->lock);
+ cmd = rng->engine->command;
+ cmd->u.len = slen;
+ cmd->seed_off = rng->engine->buffer_off;
+ memcpy(rng->engine->data_buffer, seed, slen);
+ err = loongson_se_send_engine_cmd(rng->engine);
+ if (err)
+ goto out;
+
+ cmd = rng->engine->command_ret;
+ if (cmd->u.ret)
+ err = -EIO;
+out:
+ mutex_unlock(&rng->lock);
+
+ return err;
+}
+
+static struct rng_alg loongson_rng_alg = {
+ .generate = loongson_rng_generate,
+ .seed = loongson_rng_seed,
+ .seedsize = SE_SEED_SIZE,
+ .base = {
+ .cra_name = "stdrng",
+ .cra_driver_name = "loongson_stdrng",
+ .cra_priority = 300,
+ .cra_ctxsize = sizeof(struct loongson_rng_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = loongson_rng_init,
+ .cra_exit = loongson_rng_exit,
+ },
+};
+
+static int loongson_rng_probe(struct platform_device *pdev)
+{
+ struct loongson_rng_cmd *cmd;
+ struct loongson_rng *rng;
+ int ret = 0;
+
+ rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL);
+ if (!rng)
+ return -ENOMEM;
+
+ rng->engine = loongson_se_init_engine(pdev->dev.parent, SE_ENGINE_RNG);
+ if (!rng->engine)
+ return -ENODEV;
+ cmd = rng->engine->command;
+ cmd->cmd_id = SE_CMD_RNG;
+ cmd->out_off = rng->engine->buffer_off;
+ mutex_init(&rng->lock);
+
+ mutex_lock(&rng_devices.lock);
+
+ if (!rng_devices.registered) {
+ ret = crypto_register_rng(&loongson_rng_alg);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register crypto(%d)\n", ret);
+ goto out;
+ }
+ rng_devices.registered = 1;
+ }
+
+ list_add_tail(&rng->list, &rng_devices.list);
+out:
+ mutex_unlock(&rng_devices.lock);
+
+ return ret;
+}
+
+static struct platform_driver loongson_rng_driver = {
+ .probe = loongson_rng_probe,
+ .driver = {
+ .name = "loongson-rng",
+ },
+};
+module_platform_driver(loongson_rng_driver);
+
+MODULE_ALIAS("platform:loongson-rng");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Yinggang Gu <guyinggang@loongson.cn>");
+MODULE_AUTHOR("Qunqin Zhao <zhaoqunqin@loongson.cn>");
+MODULE_DESCRIPTION("Loongson Random Number Generator driver");
diff --git a/drivers/crypto/marvell/Kconfig b/drivers/crypto/marvell/Kconfig
index 78217577aa54..aa269abb0499 100644
--- a/drivers/crypto/marvell/Kconfig
+++ b/drivers/crypto/marvell/Kconfig
@@ -7,7 +7,7 @@ config CRYPTO_DEV_MARVELL
config CRYPTO_DEV_MARVELL_CESA
tristate "Marvell's Cryptographic Engine driver"
- depends on PLAT_ORION || ARCH_MVEBU
+ depends on PLAT_ORION || ARCH_MVEBU || COMPILE_TEST
select CRYPTO_LIB_AES
select CRYPTO_LIB_DES
select CRYPTO_SKCIPHER
@@ -24,7 +24,7 @@ config CRYPTO_DEV_OCTEONTX_CPT
tristate "Support for Marvell OcteonTX CPT driver"
depends on ARCH_THUNDER || COMPILE_TEST
depends on PCI_MSI && 64BIT
- depends on CRYPTO_LIB_AES
+ select CRYPTO_LIB_AES
select CRYPTO_SKCIPHER
select CRYPTO_HASH
select CRYPTO_AEAD
@@ -41,10 +41,10 @@ config CRYPTO_DEV_OCTEONTX2_CPT
tristate "Marvell OcteonTX2 CPT driver"
depends on ARCH_THUNDER2 || COMPILE_TEST
depends on PCI_MSI && 64BIT
- depends on CRYPTO_LIB_AES
depends on NET_VENDOR_MARVELL
select OCTEONTX2_MBOX
select CRYPTO_DEV_MARVELL
+ select CRYPTO_LIB_AES
select CRYPTO_SKCIPHER
select CRYPTO_HASH
select CRYPTO_AEAD
diff --git a/drivers/crypto/marvell/cesa/cesa.c b/drivers/crypto/marvell/cesa/cesa.c
index 5fd31ba715c2..9c21f5d835d2 100644
--- a/drivers/crypto/marvell/cesa/cesa.c
+++ b/drivers/crypto/marvell/cesa/cesa.c
@@ -94,7 +94,7 @@ static int mv_cesa_std_process(struct mv_cesa_engine *engine, u32 status)
static int mv_cesa_int_process(struct mv_cesa_engine *engine, u32 status)
{
- if (engine->chain.first && engine->chain.last)
+ if (engine->chain_hw.first && engine->chain_hw.last)
return mv_cesa_tdma_process(engine, status);
return mv_cesa_std_process(engine, status);
@@ -375,7 +375,6 @@ static int mv_cesa_get_sram(struct platform_device *pdev, int idx)
{
struct mv_cesa_dev *cesa = platform_get_drvdata(pdev);
struct mv_cesa_engine *engine = &cesa->engines[idx];
- const char *res_name = "sram";
struct resource *res;
engine->pool = of_gen_pool_get(cesa->dev->of_node,
@@ -391,19 +390,7 @@ static int mv_cesa_get_sram(struct platform_device *pdev, int idx)
return -ENOMEM;
}
- if (cesa->caps->nengines > 1) {
- if (!idx)
- res_name = "sram0";
- else
- res_name = "sram1";
- }
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- res_name);
- if (!res || resource_size(res) < cesa->sram_size)
- return -EINVAL;
-
- engine->sram = devm_ioremap_resource(cesa->dev, res);
+ engine->sram = devm_platform_get_and_ioremap_resource(pdev, idx, &res);
if (IS_ERR(engine->sram))
return PTR_ERR(engine->sram);
@@ -510,25 +497,21 @@ static int mv_cesa_probe(struct platform_device *pdev)
* if the clock does not exist.
*/
snprintf(res_name, sizeof(res_name), "cesa%u", i);
- engine->clk = devm_clk_get(dev, res_name);
+ engine->clk = devm_clk_get_optional_enabled(dev, res_name);
if (IS_ERR(engine->clk)) {
- engine->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(engine->clk))
- engine->clk = NULL;
+ engine->clk = devm_clk_get_optional_enabled(dev, NULL);
+ if (IS_ERR(engine->clk)) {
+ ret = PTR_ERR(engine->clk);
+ goto err_cleanup;
+ }
}
snprintf(res_name, sizeof(res_name), "cesaz%u", i);
- engine->zclk = devm_clk_get(dev, res_name);
- if (IS_ERR(engine->zclk))
- engine->zclk = NULL;
-
- ret = clk_prepare_enable(engine->clk);
- if (ret)
- goto err_cleanup;
-
- ret = clk_prepare_enable(engine->zclk);
- if (ret)
+ engine->zclk = devm_clk_get_optional_enabled(dev, res_name);
+ if (IS_ERR(engine->zclk)) {
+ ret = PTR_ERR(engine->zclk);
goto err_cleanup;
+ }
engine->regs = cesa->regs + CESA_ENGINE_OFF(i);
@@ -570,13 +553,8 @@ static int mv_cesa_probe(struct platform_device *pdev)
return 0;
err_cleanup:
- for (i = 0; i < caps->nengines; i++) {
- clk_disable_unprepare(cesa->engines[i].zclk);
- clk_disable_unprepare(cesa->engines[i].clk);
+ for (i = 0; i < caps->nengines; i++)
mv_cesa_put_sram(pdev, i);
- if (cesa->engines[i].irq > 0)
- irq_set_affinity_hint(cesa->engines[i].irq, NULL);
- }
return ret;
}
@@ -588,12 +566,8 @@ static void mv_cesa_remove(struct platform_device *pdev)
mv_cesa_remove_algs(cesa);
- for (i = 0; i < cesa->caps->nengines; i++) {
- clk_disable_unprepare(cesa->engines[i].zclk);
- clk_disable_unprepare(cesa->engines[i].clk);
+ for (i = 0; i < cesa->caps->nengines; i++)
mv_cesa_put_sram(pdev, i);
- irq_set_affinity_hint(cesa->engines[i].irq, NULL);
- }
}
static const struct platform_device_id mv_cesa_plat_id_table[] = {
@@ -604,7 +578,7 @@ MODULE_DEVICE_TABLE(platform, mv_cesa_plat_id_table);
static struct platform_driver marvell_cesa = {
.probe = mv_cesa_probe,
- .remove_new = mv_cesa_remove,
+ .remove = mv_cesa_remove,
.id_table = mv_cesa_plat_id_table,
.driver = {
.name = "marvell-cesa",
diff --git a/drivers/crypto/marvell/cesa/cesa.h b/drivers/crypto/marvell/cesa/cesa.h
index d215a6bed6bc..50ca1039fdaa 100644
--- a/drivers/crypto/marvell/cesa/cesa.h
+++ b/drivers/crypto/marvell/cesa/cesa.h
@@ -440,8 +440,10 @@ struct mv_cesa_dev {
* SRAM
* @queue: fifo of the pending crypto requests
* @load: engine load counter, useful for load balancing
- * @chain: list of the current tdma descriptors being processed
- * by this engine.
+ * @chain_hw: list of the current tdma descriptors being processed
+ * by the hardware.
+ * @chain_sw: list of the current tdma descriptors that will be
+ * submitted to the hardware.
* @complete_queue: fifo of the processed requests by the engine
*
* Structure storing CESA engine information.
@@ -463,7 +465,8 @@ struct mv_cesa_engine {
struct gen_pool *pool;
struct crypto_queue queue;
atomic_t load;
- struct mv_cesa_tdma_chain chain;
+ struct mv_cesa_tdma_chain chain_hw;
+ struct mv_cesa_tdma_chain chain_sw;
struct list_head complete_queue;
int irq;
};
diff --git a/drivers/crypto/marvell/cesa/cipher.c b/drivers/crypto/marvell/cesa/cipher.c
index 0f37dfd42d85..3fe0fd9226cf 100644
--- a/drivers/crypto/marvell/cesa/cipher.c
+++ b/drivers/crypto/marvell/cesa/cipher.c
@@ -75,9 +75,12 @@ mv_cesa_skcipher_dma_cleanup(struct skcipher_request *req)
static inline void mv_cesa_skcipher_cleanup(struct skcipher_request *req)
{
struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
+ struct mv_cesa_engine *engine = creq->base.engine;
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
mv_cesa_skcipher_dma_cleanup(req);
+
+ atomic_sub(req->cryptlen, &engine->load);
}
static void mv_cesa_skcipher_std_step(struct skcipher_request *req)
@@ -212,7 +215,6 @@ mv_cesa_skcipher_complete(struct crypto_async_request *req)
struct mv_cesa_engine *engine = creq->base.engine;
unsigned int ivsize;
- atomic_sub(skreq->cryptlen, &engine->load);
ivsize = crypto_skcipher_ivsize(crypto_skcipher_reqtfm(skreq));
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) {
@@ -459,6 +461,9 @@ static int mv_cesa_skcipher_queue_req(struct skcipher_request *req,
struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
struct mv_cesa_engine *engine;
+ if (!req->cryptlen)
+ return 0;
+
ret = mv_cesa_skcipher_req_init(req, tmpl);
if (ret)
return ret;
@@ -489,7 +494,7 @@ static int mv_cesa_des_op(struct skcipher_request *req,
static int mv_cesa_ecb_des_encrypt(struct skcipher_request *req)
{
- struct mv_cesa_op_ctx tmpl;
+ struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl,
CESA_SA_DESC_CFG_CRYPTCM_ECB |
@@ -500,7 +505,7 @@ static int mv_cesa_ecb_des_encrypt(struct skcipher_request *req)
static int mv_cesa_ecb_des_decrypt(struct skcipher_request *req)
{
- struct mv_cesa_op_ctx tmpl;
+ struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl,
CESA_SA_DESC_CFG_CRYPTCM_ECB |
@@ -543,7 +548,7 @@ static int mv_cesa_cbc_des_op(struct skcipher_request *req,
static int mv_cesa_cbc_des_encrypt(struct skcipher_request *req)
{
- struct mv_cesa_op_ctx tmpl;
+ struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC);
@@ -552,7 +557,7 @@ static int mv_cesa_cbc_des_encrypt(struct skcipher_request *req)
static int mv_cesa_cbc_des_decrypt(struct skcipher_request *req)
{
- struct mv_cesa_op_ctx tmpl;
+ struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC);
@@ -596,7 +601,7 @@ static int mv_cesa_des3_op(struct skcipher_request *req,
static int mv_cesa_ecb_des3_ede_encrypt(struct skcipher_request *req)
{
- struct mv_cesa_op_ctx tmpl;
+ struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl,
CESA_SA_DESC_CFG_CRYPTCM_ECB |
@@ -608,7 +613,7 @@ static int mv_cesa_ecb_des3_ede_encrypt(struct skcipher_request *req)
static int mv_cesa_ecb_des3_ede_decrypt(struct skcipher_request *req)
{
- struct mv_cesa_op_ctx tmpl;
+ struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl,
CESA_SA_DESC_CFG_CRYPTCM_ECB |
@@ -649,7 +654,7 @@ static int mv_cesa_cbc_des3_op(struct skcipher_request *req,
static int mv_cesa_cbc_des3_ede_encrypt(struct skcipher_request *req)
{
- struct mv_cesa_op_ctx tmpl;
+ struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl,
CESA_SA_DESC_CFG_CRYPTCM_CBC |
@@ -661,7 +666,7 @@ static int mv_cesa_cbc_des3_ede_encrypt(struct skcipher_request *req)
static int mv_cesa_cbc_des3_ede_decrypt(struct skcipher_request *req)
{
- struct mv_cesa_op_ctx tmpl;
+ struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl,
CESA_SA_DESC_CFG_CRYPTCM_CBC |
@@ -725,7 +730,7 @@ static int mv_cesa_aes_op(struct skcipher_request *req,
static int mv_cesa_ecb_aes_encrypt(struct skcipher_request *req)
{
- struct mv_cesa_op_ctx tmpl;
+ struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl,
CESA_SA_DESC_CFG_CRYPTCM_ECB |
@@ -736,7 +741,7 @@ static int mv_cesa_ecb_aes_encrypt(struct skcipher_request *req)
static int mv_cesa_ecb_aes_decrypt(struct skcipher_request *req)
{
- struct mv_cesa_op_ctx tmpl;
+ struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl,
CESA_SA_DESC_CFG_CRYPTCM_ECB |
@@ -778,7 +783,7 @@ static int mv_cesa_cbc_aes_op(struct skcipher_request *req,
static int mv_cesa_cbc_aes_encrypt(struct skcipher_request *req)
{
- struct mv_cesa_op_ctx tmpl;
+ struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC);
@@ -787,7 +792,7 @@ static int mv_cesa_cbc_aes_encrypt(struct skcipher_request *req)
static int mv_cesa_cbc_aes_decrypt(struct skcipher_request *req)
{
- struct mv_cesa_op_ctx tmpl;
+ struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC);
diff --git a/drivers/crypto/marvell/cesa/hash.c b/drivers/crypto/marvell/cesa/hash.c
index f150861ceaf6..5103d36cdfdb 100644
--- a/drivers/crypto/marvell/cesa/hash.c
+++ b/drivers/crypto/marvell/cesa/hash.c
@@ -110,9 +110,12 @@ static inline void mv_cesa_ahash_dma_cleanup(struct ahash_request *req)
static inline void mv_cesa_ahash_cleanup(struct ahash_request *req)
{
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
+ struct mv_cesa_engine *engine = creq->base.engine;
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
mv_cesa_ahash_dma_cleanup(req);
+
+ atomic_sub(req->nbytes, &engine->load);
}
static void mv_cesa_ahash_last_cleanup(struct ahash_request *req)
@@ -362,16 +365,13 @@ static void mv_cesa_ahash_complete(struct crypto_async_request *req)
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ &&
(creq->base.chain.last->flags & CESA_TDMA_TYPE_MSK) ==
CESA_TDMA_RESULT) {
- __le32 *data = NULL;
+ const void *data;
/*
* Result is already in the correct endianness when the SA is
* used
*/
data = creq->base.chain.last->op->ctx.hash.hash;
- for (i = 0; i < digsize / 4; i++)
- creq->state[i] = le32_to_cpu(data[i]);
-
memcpy(ahashreq->result, data, digsize);
} else {
for (i = 0; i < digsize / 4; i++)
@@ -395,8 +395,6 @@ static void mv_cesa_ahash_complete(struct crypto_async_request *req)
}
}
}
-
- atomic_sub(ahashreq->nbytes, &engine->load);
}
static void mv_cesa_ahash_prepare(struct crypto_async_request *req,
@@ -663,7 +661,7 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
if (ret)
goto err_free_tdma;
- if (iter.src.sg) {
+ if (iter.base.len > iter.src.op_offset) {
/*
* Add all the new data, inserting an operation block and
* launch command between each full SRAM block-worth of
diff --git a/drivers/crypto/marvell/cesa/tdma.c b/drivers/crypto/marvell/cesa/tdma.c
index 388a06e180d6..243305354420 100644
--- a/drivers/crypto/marvell/cesa/tdma.c
+++ b/drivers/crypto/marvell/cesa/tdma.c
@@ -38,6 +38,15 @@ void mv_cesa_dma_step(struct mv_cesa_req *dreq)
{
struct mv_cesa_engine *engine = dreq->engine;
+ spin_lock_bh(&engine->lock);
+ if (engine->chain_sw.first == dreq->chain.first) {
+ engine->chain_sw.first = NULL;
+ engine->chain_sw.last = NULL;
+ }
+ engine->chain_hw.first = dreq->chain.first;
+ engine->chain_hw.last = dreq->chain.last;
+ spin_unlock_bh(&engine->lock);
+
writel_relaxed(0, engine->regs + CESA_SA_CFG);
mv_cesa_set_int_mask(engine, CESA_SA_INT_ACC0_IDMA_DONE);
@@ -96,25 +105,27 @@ void mv_cesa_dma_prepare(struct mv_cesa_req *dreq,
void mv_cesa_tdma_chain(struct mv_cesa_engine *engine,
struct mv_cesa_req *dreq)
{
- if (engine->chain.first == NULL && engine->chain.last == NULL) {
- engine->chain.first = dreq->chain.first;
- engine->chain.last = dreq->chain.last;
- } else {
- struct mv_cesa_tdma_desc *last;
+ struct mv_cesa_tdma_desc *last = engine->chain_sw.last;
- last = engine->chain.last;
+ /*
+ * Break the DMA chain if the request being queued needs the IV
+ * regs to be set before lauching the request.
+ */
+ if (!last || dreq->chain.first->flags & CESA_TDMA_SET_STATE)
+ engine->chain_sw.first = dreq->chain.first;
+ else {
last->next = dreq->chain.first;
- engine->chain.last = dreq->chain.last;
-
- /*
- * Break the DMA chain if the CESA_TDMA_BREAK_CHAIN is set on
- * the last element of the current chain, or if the request
- * being queued needs the IV regs to be set before lauching
- * the request.
- */
- if (!(last->flags & CESA_TDMA_BREAK_CHAIN) &&
- !(dreq->chain.first->flags & CESA_TDMA_SET_STATE))
- last->next_dma = cpu_to_le32(dreq->chain.first->cur_dma);
+ last->next_dma = cpu_to_le32(dreq->chain.first->cur_dma);
+ }
+ last = dreq->chain.last;
+ engine->chain_sw.last = last;
+ /*
+ * Break the DMA chain if the CESA_TDMA_BREAK_CHAIN is set on
+ * the last element of the current chain.
+ */
+ if (last->flags & CESA_TDMA_BREAK_CHAIN) {
+ engine->chain_sw.first = NULL;
+ engine->chain_sw.last = NULL;
}
}
@@ -127,7 +138,7 @@ int mv_cesa_tdma_process(struct mv_cesa_engine *engine, u32 status)
tdma_cur = readl(engine->regs + CESA_TDMA_CUR);
- for (tdma = engine->chain.first; tdma; tdma = next) {
+ for (tdma = engine->chain_hw.first; tdma; tdma = next) {
spin_lock_bh(&engine->lock);
next = tdma->next;
spin_unlock_bh(&engine->lock);
@@ -149,12 +160,12 @@ int mv_cesa_tdma_process(struct mv_cesa_engine *engine, u32 status)
&backlog);
/* Re-chaining to the next request */
- engine->chain.first = tdma->next;
+ engine->chain_hw.first = tdma->next;
tdma->next = NULL;
/* If this is the last request, clear the chain */
- if (engine->chain.first == NULL)
- engine->chain.last = NULL;
+ if (engine->chain_hw.first == NULL)
+ engine->chain_hw.last = NULL;
spin_unlock_bh(&engine->lock);
ctx = crypto_tfm_ctx(req->tfm);
diff --git a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c
index c4250e5fcf8f..9f5601c0280b 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c
@@ -10,6 +10,7 @@
#include <linux/ctype.h>
#include <linux/firmware.h>
+#include <linux/string_choices.h>
#include "otx_cpt_common.h"
#include "otx_cptpf_ucode.h"
#include "otx_cptpf.h"
@@ -505,17 +506,6 @@ int otx_cpt_uc_supports_eng_type(struct otx_cpt_ucode *ucode, int eng_type)
}
EXPORT_SYMBOL_GPL(otx_cpt_uc_supports_eng_type);
-int otx_cpt_eng_grp_has_eng_type(struct otx_cpt_eng_grp_info *eng_grp,
- int eng_type)
-{
- struct otx_cpt_engs_rsvd *engs;
-
- engs = find_engines_by_type(eng_grp, eng_type);
-
- return (engs != NULL ? 1 : 0);
-}
-EXPORT_SYMBOL_GPL(otx_cpt_eng_grp_has_eng_type);
-
static void print_ucode_info(struct otx_cpt_eng_grp_info *eng_grp,
char *buf, int size)
{
@@ -614,8 +604,8 @@ static void print_dbg_info(struct device *dev,
for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
grp = &eng_grps->grp[i];
- pr_debug("engine_group%d, state %s\n", i, grp->is_enabled ?
- "enabled" : "disabled");
+ pr_debug("engine_group%d, state %s\n", i,
+ str_enabled_disabled(grp->is_enabled));
if (grp->is_enabled) {
mirrored_grp = &eng_grps->grp[grp->mirror.idx];
pr_debug("Ucode0 filename %s, version %s\n",
diff --git a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.h b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.h
index 8620ac87a447..df79ee416c0d 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.h
+++ b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.h
@@ -174,7 +174,5 @@ int otx_cpt_try_create_default_eng_grps(struct pci_dev *pdev,
void otx_cpt_set_eng_grps_is_rdonly(struct otx_cpt_eng_grps *eng_grps,
bool is_rdonly);
int otx_cpt_uc_supports_eng_type(struct otx_cpt_ucode *ucode, int eng_type);
-int otx_cpt_eng_grp_has_eng_type(struct otx_cpt_eng_grp_info *eng_grp,
- int eng_type);
#endif /* __OTX_CPTPF_UCODE_H */
diff --git a/drivers/crypto/marvell/octeontx2/cn10k_cpt.c b/drivers/crypto/marvell/octeontx2/cn10k_cpt.c
index 6bfc59e67747..d4aab9e20f2a 100644
--- a/drivers/crypto/marvell/octeontx2/cn10k_cpt.c
+++ b/drivers/crypto/marvell/octeontx2/cn10k_cpt.c
@@ -6,6 +6,7 @@
#include "otx2_cptvf.h"
#include "otx2_cptlf.h"
#include "cn10k_cpt.h"
+#include "otx2_cpt_common.h"
static void cn10k_cpt_send_cmd(union otx2_cpt_inst_s *cptinst, u32 insts_num,
struct otx2_cptlf_info *lf);
@@ -27,7 +28,7 @@ static struct cpt_hw_ops cn10k_hw_ops = {
static void cn10k_cpt_send_cmd(union otx2_cpt_inst_s *cptinst, u32 insts_num,
struct otx2_cptlf_info *lf)
{
- void __iomem *lmtline = lf->lmtline;
+ void *lmtline = lf->lfs->lmt_info.base + (lf->slot * LMTLINE_SIZE);
u64 val = (lf->slot & 0x7FF);
u64 tar_addr = 0;
@@ -41,15 +42,49 @@ static void cn10k_cpt_send_cmd(union otx2_cpt_inst_s *cptinst, u32 insts_num,
dma_wmb();
/* Copy CPT command to LMTLINE */
- memcpy_toio(lmtline, cptinst, insts_num * OTX2_CPT_INST_SIZE);
+ memcpy(lmtline, cptinst, insts_num * OTX2_CPT_INST_SIZE);
cn10k_lmt_flush(val, tar_addr);
}
+void cn10k_cpt_lmtst_free(struct pci_dev *pdev, struct otx2_cptlfs_info *lfs)
+{
+ struct otx2_lmt_info *lmt_info = &lfs->lmt_info;
+
+ if (!lmt_info->base)
+ return;
+
+ dma_free_attrs(&pdev->dev, lmt_info->size,
+ lmt_info->base - lmt_info->align,
+ lmt_info->iova - lmt_info->align,
+ DMA_ATTR_FORCE_CONTIGUOUS);
+}
+EXPORT_SYMBOL_NS_GPL(cn10k_cpt_lmtst_free, "CRYPTO_DEV_OCTEONTX2_CPT");
+
+static int cn10k_cpt_lmtst_alloc(struct pci_dev *pdev,
+ struct otx2_cptlfs_info *lfs, u32 size)
+{
+ struct otx2_lmt_info *lmt_info = &lfs->lmt_info;
+ dma_addr_t align_iova;
+ dma_addr_t iova;
+
+ lmt_info->base = dma_alloc_attrs(&pdev->dev, size, &iova, GFP_KERNEL,
+ DMA_ATTR_FORCE_CONTIGUOUS);
+ if (!lmt_info->base)
+ return -ENOMEM;
+
+ align_iova = ALIGN((u64)iova, LMTLINE_ALIGN);
+ lmt_info->iova = align_iova;
+ lmt_info->align = align_iova - iova;
+ lmt_info->size = size;
+ lmt_info->base += lmt_info->align;
+ return 0;
+}
+
int cn10k_cptpf_lmtst_init(struct otx2_cptpf_dev *cptpf)
{
struct pci_dev *pdev = cptpf->pdev;
- resource_size_t size;
- u64 lmt_base;
+ u32 size;
+ int ret;
if (!test_bit(CN10K_LMTST, &cptpf->cap_flag)) {
cptpf->lfs.ops = &otx2_hw_ops;
@@ -57,44 +92,52 @@ int cn10k_cptpf_lmtst_init(struct otx2_cptpf_dev *cptpf)
}
cptpf->lfs.ops = &cn10k_hw_ops;
- lmt_base = readq(cptpf->reg_base + RVU_PF_LMTLINE_ADDR);
- if (!lmt_base) {
- dev_err(&pdev->dev, "PF LMTLINE address not configured\n");
- return -ENOMEM;
+ size = OTX2_CPT_MAX_VFS_NUM * LMTLINE_SIZE + LMTLINE_ALIGN;
+ ret = cn10k_cpt_lmtst_alloc(pdev, &cptpf->lfs, size);
+ if (ret) {
+ dev_err(&pdev->dev, "PF-%d LMTLINE memory allocation failed\n",
+ cptpf->pf_id);
+ return ret;
}
- size = pci_resource_len(pdev, PCI_MBOX_BAR_NUM);
- size -= ((1 + cptpf->max_vfs) * MBOX_SIZE);
- cptpf->lfs.lmt_base = devm_ioremap_wc(&pdev->dev, lmt_base, size);
- if (!cptpf->lfs.lmt_base) {
- dev_err(&pdev->dev,
- "Mapping of PF LMTLINE address failed\n");
- return -ENOMEM;
+
+ ret = otx2_cpt_lmtst_tbl_setup_msg(&cptpf->lfs);
+ if (ret) {
+ dev_err(&pdev->dev, "PF-%d: LMTST Table setup failed\n",
+ cptpf->pf_id);
+ cn10k_cpt_lmtst_free(pdev, &cptpf->lfs);
}
return 0;
}
-EXPORT_SYMBOL_NS_GPL(cn10k_cptpf_lmtst_init, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(cn10k_cptpf_lmtst_init, "CRYPTO_DEV_OCTEONTX2_CPT");
int cn10k_cptvf_lmtst_init(struct otx2_cptvf_dev *cptvf)
{
struct pci_dev *pdev = cptvf->pdev;
- resource_size_t offset, size;
+ u32 size;
+ int ret;
if (!test_bit(CN10K_LMTST, &cptvf->cap_flag))
return 0;
- offset = pci_resource_start(pdev, PCI_MBOX_BAR_NUM);
- size = pci_resource_len(pdev, PCI_MBOX_BAR_NUM);
- /* Map VF LMILINE region */
- cptvf->lfs.lmt_base = devm_ioremap_wc(&pdev->dev, offset, size);
- if (!cptvf->lfs.lmt_base) {
- dev_err(&pdev->dev, "Unable to map BAR4\n");
- return -ENOMEM;
+ size = cptvf->lfs.lfs_num * LMTLINE_SIZE + LMTLINE_ALIGN;
+ ret = cn10k_cpt_lmtst_alloc(pdev, &cptvf->lfs, size);
+ if (ret) {
+ dev_err(&pdev->dev, "VF-%d LMTLINE memory allocation failed\n",
+ cptvf->vf_id);
+ return ret;
+ }
+
+ ret = otx2_cpt_lmtst_tbl_setup_msg(&cptvf->lfs);
+ if (ret) {
+ dev_err(&pdev->dev, "VF-%d: LMTST Table setup failed\n",
+ cptvf->vf_id);
+ cn10k_cpt_lmtst_free(pdev, &cptvf->lfs);
}
return 0;
}
-EXPORT_SYMBOL_NS_GPL(cn10k_cptvf_lmtst_init, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(cn10k_cptvf_lmtst_init, "CRYPTO_DEV_OCTEONTX2_CPT");
void cn10k_cpt_hw_ctx_clear(struct pci_dev *pdev,
struct cn10k_cpt_errata_ctx *er_ctx)
@@ -110,7 +153,7 @@ void cn10k_cpt_hw_ctx_clear(struct pci_dev *pdev,
DMA_BIDIRECTIONAL);
kfree(er_ctx->hw_ctx);
}
-EXPORT_SYMBOL_NS_GPL(cn10k_cpt_hw_ctx_clear, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(cn10k_cpt_hw_ctx_clear, "CRYPTO_DEV_OCTEONTX2_CPT");
void cn10k_cpt_hw_ctx_set(union cn10k_cpt_hw_ctx *hctx, u16 ctx_sz)
{
@@ -119,7 +162,7 @@ void cn10k_cpt_hw_ctx_set(union cn10k_cpt_hw_ctx *hctx, u16 ctx_sz)
hctx->w0.ctx_sz = ctx_sz;
hctx->w0.ctx_push_sz = 1;
}
-EXPORT_SYMBOL_NS_GPL(cn10k_cpt_hw_ctx_set, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(cn10k_cpt_hw_ctx_set, "CRYPTO_DEV_OCTEONTX2_CPT");
int cn10k_cpt_hw_ctx_init(struct pci_dev *pdev,
struct cn10k_cpt_errata_ctx *er_ctx)
@@ -149,7 +192,7 @@ int cn10k_cpt_hw_ctx_init(struct pci_dev *pdev,
return 0;
}
-EXPORT_SYMBOL_NS_GPL(cn10k_cpt_hw_ctx_init, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(cn10k_cpt_hw_ctx_init, "CRYPTO_DEV_OCTEONTX2_CPT");
void cn10k_cpt_ctx_flush(struct pci_dev *pdev, u64 cptr, bool inval)
{
@@ -168,7 +211,7 @@ void cn10k_cpt_ctx_flush(struct pci_dev *pdev, u64 cptr, bool inval)
otx2_cpt_read64(lfs->reg_base, lfs->blkaddr, lfs->lf[0].slot,
OTX2_CPT_LF_CTX_ERR);
}
-EXPORT_SYMBOL_NS_GPL(cn10k_cpt_ctx_flush, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(cn10k_cpt_ctx_flush, "CRYPTO_DEV_OCTEONTX2_CPT");
void cptvf_hw_ops_get(struct otx2_cptvf_dev *cptvf)
{
@@ -177,4 +220,4 @@ void cptvf_hw_ops_get(struct otx2_cptvf_dev *cptvf)
else
cptvf->lfs.ops = &otx2_hw_ops;
}
-EXPORT_SYMBOL_NS_GPL(cptvf_hw_ops_get, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(cptvf_hw_ops_get, "CRYPTO_DEV_OCTEONTX2_CPT");
diff --git a/drivers/crypto/marvell/octeontx2/cn10k_cpt.h b/drivers/crypto/marvell/octeontx2/cn10k_cpt.h
index 92be3ecf570f..ea5990048c21 100644
--- a/drivers/crypto/marvell/octeontx2/cn10k_cpt.h
+++ b/drivers/crypto/marvell/octeontx2/cn10k_cpt.h
@@ -50,6 +50,7 @@ static inline u8 otx2_cpt_get_uc_compcode(union otx2_cpt_res_s *result)
int cn10k_cptpf_lmtst_init(struct otx2_cptpf_dev *cptpf);
int cn10k_cptvf_lmtst_init(struct otx2_cptvf_dev *cptvf);
+void cn10k_cpt_lmtst_free(struct pci_dev *pdev, struct otx2_cptlfs_info *lfs);
void cn10k_cpt_ctx_flush(struct pci_dev *pdev, u64 cptr, bool inval);
int cn10k_cpt_hw_ctx_init(struct pci_dev *pdev,
struct cn10k_cpt_errata_ctx *er_ctx);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
index c5b7c57574ef..062def303dce 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
@@ -18,9 +18,8 @@
#define OTX2_CPT_MAX_VFS_NUM 128
#define OTX2_CPT_RVU_FUNC_ADDR_S(blk, slot, offs) \
(((blk) << 20) | ((slot) << 12) | (offs))
-#define OTX2_CPT_RVU_PFFUNC(pf, func) \
- ((((pf) & RVU_PFVF_PF_MASK) << RVU_PFVF_PF_SHIFT) | \
- (((func) & RVU_PFVF_FUNC_MASK) << RVU_PFVF_FUNC_SHIFT))
+
+#define OTX2_CPT_RVU_PFFUNC(pdev, pf, func) rvu_make_pcifunc(pdev, pf, func)
#define OTX2_CPT_INVALID_CRYPTO_ENG_GRP 0xFF
#define OTX2_CPT_NAME_LENGTH 64
@@ -145,11 +144,8 @@ static inline u64 otx2_cpt_read64(void __iomem *reg_base, u64 blk, u64 slot,
static inline bool is_dev_otx2(struct pci_dev *pdev)
{
- if (pdev->device == OTX2_CPT_PCI_PF_DEVICE_ID ||
- pdev->device == OTX2_CPT_PCI_VF_DEVICE_ID)
- return true;
-
- return false;
+ return pdev->device == OTX2_CPT_PCI_PF_DEVICE_ID ||
+ pdev->device == OTX2_CPT_PCI_VF_DEVICE_ID;
}
static inline bool is_dev_cn10ka(struct pci_dev *pdev)
@@ -159,12 +155,10 @@ static inline bool is_dev_cn10ka(struct pci_dev *pdev)
static inline bool is_dev_cn10ka_ax(struct pci_dev *pdev)
{
- if (pdev->subsystem_device == CPT_PCI_SUBSYS_DEVID_CN10K_A &&
- ((pdev->revision & 0xFF) == 4 || (pdev->revision & 0xFF) == 0x50 ||
- (pdev->revision & 0xff) == 0x51))
- return true;
-
- return false;
+ return pdev->subsystem_device == CPT_PCI_SUBSYS_DEVID_CN10K_A &&
+ ((pdev->revision & 0xFF) == 4 ||
+ (pdev->revision & 0xFF) == 0x50 ||
+ (pdev->revision & 0xFF) == 0x51);
}
static inline bool is_dev_cn10kb(struct pci_dev *pdev)
@@ -174,11 +168,8 @@ static inline bool is_dev_cn10kb(struct pci_dev *pdev)
static inline bool is_dev_cn10ka_b0(struct pci_dev *pdev)
{
- if (pdev->subsystem_device == CPT_PCI_SUBSYS_DEVID_CN10K_A &&
- (pdev->revision & 0xFF) == 0x54)
- return true;
-
- return false;
+ return pdev->subsystem_device == CPT_PCI_SUBSYS_DEVID_CN10K_A &&
+ (pdev->revision & 0xFF) == 0x54;
}
static inline void otx2_cpt_set_hw_caps(struct pci_dev *pdev,
@@ -192,18 +183,12 @@ static inline void otx2_cpt_set_hw_caps(struct pci_dev *pdev,
static inline bool cpt_is_errata_38550_exists(struct pci_dev *pdev)
{
- if (is_dev_otx2(pdev) || is_dev_cn10ka_ax(pdev))
- return true;
-
- return false;
+ return is_dev_otx2(pdev) || is_dev_cn10ka_ax(pdev);
}
static inline bool cpt_feature_sgv2(struct pci_dev *pdev)
{
- if (!is_dev_otx2(pdev) && !is_dev_cn10ka_ax(pdev))
- return true;
-
- return false;
+ return !is_dev_otx2(pdev) && !is_dev_cn10ka_ax(pdev);
}
int otx2_cpt_send_ready_msg(struct otx2_mbox *mbox, struct pci_dev *pdev);
@@ -223,5 +208,6 @@ int otx2_cpt_detach_rsrcs_msg(struct otx2_cptlfs_info *lfs);
int otx2_cpt_msix_offset_msg(struct otx2_cptlfs_info *lfs);
int otx2_cpt_sync_mbox_msg(struct otx2_mbox *mbox);
int otx2_cpt_lf_reset_msg(struct otx2_cptlfs_info *lfs, int slot);
+int otx2_cpt_lmtst_tbl_setup_msg(struct otx2_cptlfs_info *lfs);
#endif /* __OTX2_CPT_COMMON_H */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c b/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
index 5be0103c1fb8..95f3de3a34eb 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
@@ -19,7 +19,7 @@ int otx2_cpt_send_mbox_msg(struct otx2_mbox *mbox, struct pci_dev *pdev)
}
return ret;
}
-EXPORT_SYMBOL_NS_GPL(otx2_cpt_send_mbox_msg, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(otx2_cpt_send_mbox_msg, "CRYPTO_DEV_OCTEONTX2_CPT");
int otx2_cpt_send_ready_msg(struct otx2_mbox *mbox, struct pci_dev *pdev)
{
@@ -37,13 +37,13 @@ int otx2_cpt_send_ready_msg(struct otx2_mbox *mbox, struct pci_dev *pdev)
return otx2_cpt_send_mbox_msg(mbox, pdev);
}
-EXPORT_SYMBOL_NS_GPL(otx2_cpt_send_ready_msg, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(otx2_cpt_send_ready_msg, "CRYPTO_DEV_OCTEONTX2_CPT");
int otx2_cpt_send_af_reg_requests(struct otx2_mbox *mbox, struct pci_dev *pdev)
{
return otx2_cpt_send_mbox_msg(mbox, pdev);
}
-EXPORT_SYMBOL_NS_GPL(otx2_cpt_send_af_reg_requests, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(otx2_cpt_send_af_reg_requests, "CRYPTO_DEV_OCTEONTX2_CPT");
static int otx2_cpt_add_read_af_reg(struct otx2_mbox *mbox,
struct pci_dev *pdev, u64 reg,
@@ -95,7 +95,7 @@ int otx2_cpt_add_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
return 0;
}
-EXPORT_SYMBOL_NS_GPL(otx2_cpt_add_write_af_reg, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(otx2_cpt_add_write_af_reg, "CRYPTO_DEV_OCTEONTX2_CPT");
int otx2_cpt_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
u64 reg, u64 *val, int blkaddr)
@@ -108,7 +108,7 @@ int otx2_cpt_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
return otx2_cpt_send_mbox_msg(mbox, pdev);
}
-EXPORT_SYMBOL_NS_GPL(otx2_cpt_read_af_reg, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(otx2_cpt_read_af_reg, "CRYPTO_DEV_OCTEONTX2_CPT");
int otx2_cpt_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
u64 reg, u64 val, int blkaddr)
@@ -121,7 +121,7 @@ int otx2_cpt_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
return otx2_cpt_send_mbox_msg(mbox, pdev);
}
-EXPORT_SYMBOL_NS_GPL(otx2_cpt_write_af_reg, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(otx2_cpt_write_af_reg, "CRYPTO_DEV_OCTEONTX2_CPT");
int otx2_cpt_attach_rscrs_msg(struct otx2_cptlfs_info *lfs)
{
@@ -180,7 +180,7 @@ int otx2_cpt_detach_rsrcs_msg(struct otx2_cptlfs_info *lfs)
return ret;
}
-EXPORT_SYMBOL_NS_GPL(otx2_cpt_detach_rsrcs_msg, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(otx2_cpt_detach_rsrcs_msg, "CRYPTO_DEV_OCTEONTX2_CPT");
int otx2_cpt_msix_offset_msg(struct otx2_cptlfs_info *lfs)
{
@@ -213,7 +213,7 @@ int otx2_cpt_msix_offset_msg(struct otx2_cptlfs_info *lfs)
}
return ret;
}
-EXPORT_SYMBOL_NS_GPL(otx2_cpt_msix_offset_msg, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(otx2_cpt_msix_offset_msg, "CRYPTO_DEV_OCTEONTX2_CPT");
int otx2_cpt_sync_mbox_msg(struct otx2_mbox *mbox)
{
@@ -228,7 +228,7 @@ int otx2_cpt_sync_mbox_msg(struct otx2_mbox *mbox)
return otx2_mbox_check_rsp_msgs(mbox, 0);
}
-EXPORT_SYMBOL_NS_GPL(otx2_cpt_sync_mbox_msg, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(otx2_cpt_sync_mbox_msg, "CRYPTO_DEV_OCTEONTX2_CPT");
int otx2_cpt_lf_reset_msg(struct otx2_cptlfs_info *lfs, int slot)
{
@@ -254,4 +254,29 @@ int otx2_cpt_lf_reset_msg(struct otx2_cptlfs_info *lfs, int slot)
return ret;
}
-EXPORT_SYMBOL_NS_GPL(otx2_cpt_lf_reset_msg, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(otx2_cpt_lf_reset_msg, "CRYPTO_DEV_OCTEONTX2_CPT");
+
+int otx2_cpt_lmtst_tbl_setup_msg(struct otx2_cptlfs_info *lfs)
+{
+ struct otx2_mbox *mbox = lfs->mbox;
+ struct pci_dev *pdev = lfs->pdev;
+ struct lmtst_tbl_setup_req *req;
+
+ req = (struct lmtst_tbl_setup_req *)
+ otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
+ sizeof(struct msg_rsp));
+ if (!req) {
+ dev_err(&pdev->dev, "RVU MBOX failed to alloc message.\n");
+ return -EFAULT;
+ }
+
+ req->hdr.id = MBOX_MSG_LMTST_TBL_SETUP;
+ req->hdr.sig = OTX2_MBOX_REQ_SIG;
+ req->hdr.pcifunc = 0;
+
+ req->use_local_lmt_region = true;
+ req->lmt_iova = lfs->lmt_info.iova;
+
+ return otx2_cpt_send_mbox_msg(mbox, pdev);
+}
+EXPORT_SYMBOL_NS_GPL(otx2_cpt_lmtst_tbl_setup_msg, "CRYPTO_DEV_OCTEONTX2_CPT");
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_reqmgr.h b/drivers/crypto/marvell/octeontx2/otx2_cpt_reqmgr.h
index e27e849b01df..e64ca30335de 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_reqmgr.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_reqmgr.h
@@ -34,6 +34,9 @@
#define SG_COMP_2 2
#define SG_COMP_1 1
+#define OTX2_CPT_DPTR_RPTR_ALIGN 8
+#define OTX2_CPT_RES_ADDR_ALIGN 32
+
union otx2_cpt_opcode {
u16 flags;
struct {
@@ -347,22 +350,48 @@ static inline struct otx2_cpt_inst_info *
cn10k_sgv2_info_create(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
gfp_t gfp)
{
- u32 dlen = 0, g_len, sg_len, info_len;
- int align = OTX2_CPT_DMA_MINALIGN;
+ u32 dlen = 0, g_len, s_len, sg_len, info_len;
struct otx2_cpt_inst_info *info;
- u16 g_sz_bytes, s_sz_bytes;
u32 total_mem_len;
int i;
- g_sz_bytes = ((req->in_cnt + 2) / 3) *
- sizeof(struct cn10kb_cpt_sglist_component);
- s_sz_bytes = ((req->out_cnt + 2) / 3) *
- sizeof(struct cn10kb_cpt_sglist_component);
+ /* Allocate memory to meet below alignment requirement:
+ * ------------------------------------
+ * | struct otx2_cpt_inst_info |
+ * | (No alignment required) |
+ * | --------------------------------|
+ * | | padding for ARCH_DMA_MINALIGN |
+ * | | alignment |
+ * |------------------------------------|
+ * | SG List Gather/Input memory |
+ * | Length = multiple of 32Bytes |
+ * | Alignment = 8Byte |
+ * |---------------------------------- |
+ * | SG List Scatter/Output memory |
+ * | Length = multiple of 32Bytes |
+ * | Alignment = 8Byte |
+ * | -------------------------------|
+ * | | padding for 32B alignment |
+ * |------------------------------------|
+ * | Result response memory |
+ * | Alignment = 32Byte |
+ * ------------------------------------
+ */
+
+ info_len = sizeof(*info);
+
+ g_len = ((req->in_cnt + 2) / 3) *
+ sizeof(struct cn10kb_cpt_sglist_component);
+ s_len = ((req->out_cnt + 2) / 3) *
+ sizeof(struct cn10kb_cpt_sglist_component);
+ sg_len = g_len + s_len;
- g_len = ALIGN(g_sz_bytes, align);
- sg_len = ALIGN(g_len + s_sz_bytes, align);
- info_len = ALIGN(sizeof(*info), align);
- total_mem_len = sg_len + info_len + sizeof(union otx2_cpt_res_s);
+ /* Allocate extra memory for SG and response address alignment */
+ total_mem_len = ALIGN(info_len, OTX2_CPT_DPTR_RPTR_ALIGN);
+ total_mem_len += (ARCH_DMA_MINALIGN - 1) &
+ ~(OTX2_CPT_DPTR_RPTR_ALIGN - 1);
+ total_mem_len += ALIGN(sg_len, OTX2_CPT_RES_ADDR_ALIGN);
+ total_mem_len += sizeof(union otx2_cpt_res_s);
info = kzalloc(total_mem_len, gfp);
if (unlikely(!info))
@@ -372,7 +401,8 @@ cn10k_sgv2_info_create(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
dlen += req->in[i].size;
info->dlen = dlen;
- info->in_buffer = (u8 *)info + info_len;
+ info->in_buffer = PTR_ALIGN((u8 *)info + info_len, ARCH_DMA_MINALIGN);
+ info->out_buffer = info->in_buffer + g_len;
info->gthr_sz = req->in_cnt;
info->sctr_sz = req->out_cnt;
@@ -384,7 +414,7 @@ cn10k_sgv2_info_create(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
}
if (sgv2io_components_setup(pdev, req->out, req->out_cnt,
- &info->in_buffer[g_len])) {
+ info->out_buffer)) {
dev_err(&pdev->dev, "Failed to setup scatter list\n");
goto destroy_info;
}
@@ -401,8 +431,10 @@ cn10k_sgv2_info_create(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
* Get buffer for union otx2_cpt_res_s response
* structure and its physical address
*/
- info->completion_addr = info->in_buffer + sg_len;
- info->comp_baddr = info->dptr_baddr + sg_len;
+ info->completion_addr = PTR_ALIGN((info->in_buffer + sg_len),
+ OTX2_CPT_RES_ADDR_ALIGN);
+ info->comp_baddr = ALIGN((info->dptr_baddr + sg_len),
+ OTX2_CPT_RES_ADDR_ALIGN);
return info;
@@ -417,10 +449,9 @@ static inline struct otx2_cpt_inst_info *
otx2_sg_info_create(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
gfp_t gfp)
{
- int align = OTX2_CPT_DMA_MINALIGN;
struct otx2_cpt_inst_info *info;
- u32 dlen, align_dlen, info_len;
- u16 g_sz_bytes, s_sz_bytes;
+ u32 dlen, info_len;
+ u16 g_len, s_len;
u32 total_mem_len;
if (unlikely(req->in_cnt > OTX2_CPT_MAX_SG_IN_CNT ||
@@ -429,22 +460,54 @@ otx2_sg_info_create(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
return NULL;
}
- g_sz_bytes = ((req->in_cnt + 3) / 4) *
- sizeof(struct otx2_cpt_sglist_component);
- s_sz_bytes = ((req->out_cnt + 3) / 4) *
- sizeof(struct otx2_cpt_sglist_component);
+ /* Allocate memory to meet below alignment requirement:
+ * ------------------------------------
+ * | struct otx2_cpt_inst_info |
+ * | (No alignment required) |
+ * | --------------------------------|
+ * | | padding for ARCH_DMA_MINALIGN |
+ * | | alignment |
+ * |------------------------------------|
+ * | SG List Header of 8 Byte |
+ * |------------------------------------|
+ * | SG List Gather/Input memory |
+ * | Length = multiple of 32Bytes |
+ * | Alignment = 8Byte |
+ * |---------------------------------- |
+ * | SG List Scatter/Output memory |
+ * | Length = multiple of 32Bytes |
+ * | Alignment = 8Byte |
+ * | -------------------------------|
+ * | | padding for 32B alignment |
+ * |------------------------------------|
+ * | Result response memory |
+ * | Alignment = 32Byte |
+ * ------------------------------------
+ */
+
+ info_len = sizeof(*info);
+
+ g_len = ((req->in_cnt + 3) / 4) *
+ sizeof(struct otx2_cpt_sglist_component);
+ s_len = ((req->out_cnt + 3) / 4) *
+ sizeof(struct otx2_cpt_sglist_component);
+
+ dlen = g_len + s_len + SG_LIST_HDR_SIZE;
- dlen = g_sz_bytes + s_sz_bytes + SG_LIST_HDR_SIZE;
- align_dlen = ALIGN(dlen, align);
- info_len = ALIGN(sizeof(*info), align);
- total_mem_len = align_dlen + info_len + sizeof(union otx2_cpt_res_s);
+ /* Allocate extra memory for SG and response address alignment */
+ total_mem_len = ALIGN(info_len, OTX2_CPT_DPTR_RPTR_ALIGN);
+ total_mem_len += (ARCH_DMA_MINALIGN - 1) &
+ ~(OTX2_CPT_DPTR_RPTR_ALIGN - 1);
+ total_mem_len += ALIGN(dlen, OTX2_CPT_RES_ADDR_ALIGN);
+ total_mem_len += sizeof(union otx2_cpt_res_s);
info = kzalloc(total_mem_len, gfp);
if (unlikely(!info))
return NULL;
info->dlen = dlen;
- info->in_buffer = (u8 *)info + info_len;
+ info->in_buffer = PTR_ALIGN((u8 *)info + info_len, ARCH_DMA_MINALIGN);
+ info->out_buffer = info->in_buffer + SG_LIST_HDR_SIZE + g_len;
((u16 *)info->in_buffer)[0] = req->out_cnt;
((u16 *)info->in_buffer)[1] = req->in_cnt;
@@ -460,7 +523,7 @@ otx2_sg_info_create(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
}
if (setup_sgio_components(pdev, req->out, req->out_cnt,
- &info->in_buffer[8 + g_sz_bytes])) {
+ info->out_buffer)) {
dev_err(&pdev->dev, "Failed to setup scatter list\n");
goto destroy_info;
}
@@ -476,8 +539,10 @@ otx2_sg_info_create(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
* Get buffer for union otx2_cpt_res_s response
* structure and its physical address
*/
- info->completion_addr = info->in_buffer + align_dlen;
- info->comp_baddr = info->dptr_baddr + align_dlen;
+ info->completion_addr = PTR_ALIGN((info->in_buffer + dlen),
+ OTX2_CPT_RES_ADDR_ALIGN);
+ info->comp_baddr = ALIGN((info->dptr_baddr + dlen),
+ OTX2_CPT_RES_ADDR_ALIGN);
return info;
@@ -490,6 +555,7 @@ struct otx2_cptlf_wqe;
int otx2_cpt_do_request(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
int cpu_num);
void otx2_cpt_post_process(struct otx2_cptlf_wqe *wqe);
-int otx2_cpt_get_kcrypto_eng_grp_num(struct pci_dev *pdev);
+int otx2_cpt_get_eng_grp_num(struct pci_dev *pdev,
+ enum otx2_cpt_eng_type);
#endif /* __OTX2_CPT_REQMGR_H */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptlf.c b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
index b52728e3c0d1..dc7c7a2650a5 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
@@ -288,8 +288,7 @@ void otx2_cptlf_unregister_misc_interrupts(struct otx2_cptlfs_info *lfs)
cptlf_set_misc_intrs(lfs, false);
}
-EXPORT_SYMBOL_NS_GPL(otx2_cptlf_unregister_misc_interrupts,
- CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(otx2_cptlf_unregister_misc_interrupts, "CRYPTO_DEV_OCTEONTX2_CPT");
void otx2_cptlf_unregister_done_interrupts(struct otx2_cptlfs_info *lfs)
{
@@ -308,8 +307,7 @@ void otx2_cptlf_unregister_done_interrupts(struct otx2_cptlfs_info *lfs)
cptlf_set_done_intrs(lfs, false);
}
-EXPORT_SYMBOL_NS_GPL(otx2_cptlf_unregister_done_interrupts,
- CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(otx2_cptlf_unregister_done_interrupts, "CRYPTO_DEV_OCTEONTX2_CPT");
static int cptlf_do_register_interrrupts(struct otx2_cptlfs_info *lfs,
int lf_num, int irq_offset,
@@ -351,8 +349,7 @@ free_irq:
otx2_cptlf_unregister_misc_interrupts(lfs);
return ret;
}
-EXPORT_SYMBOL_NS_GPL(otx2_cptlf_register_misc_interrupts,
- CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(otx2_cptlf_register_misc_interrupts, "CRYPTO_DEV_OCTEONTX2_CPT");
int otx2_cptlf_register_done_interrupts(struct otx2_cptlfs_info *lfs)
{
@@ -375,8 +372,7 @@ free_irq:
otx2_cptlf_unregister_done_interrupts(lfs);
return ret;
}
-EXPORT_SYMBOL_NS_GPL(otx2_cptlf_register_done_interrupts,
- CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(otx2_cptlf_register_done_interrupts, "CRYPTO_DEV_OCTEONTX2_CPT");
void otx2_cptlf_free_irqs_affinity(struct otx2_cptlfs_info *lfs)
{
@@ -390,7 +386,7 @@ void otx2_cptlf_free_irqs_affinity(struct otx2_cptlfs_info *lfs)
free_cpumask_var(lfs->lf[slot].affinity_mask);
}
}
-EXPORT_SYMBOL_NS_GPL(otx2_cptlf_free_irqs_affinity, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(otx2_cptlf_free_irqs_affinity, "CRYPTO_DEV_OCTEONTX2_CPT");
int otx2_cptlf_set_irqs_affinity(struct otx2_cptlfs_info *lfs)
{
@@ -423,7 +419,7 @@ free_affinity_mask:
otx2_cptlf_free_irqs_affinity(lfs);
return ret;
}
-EXPORT_SYMBOL_NS_GPL(otx2_cptlf_set_irqs_affinity, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(otx2_cptlf_set_irqs_affinity, "CRYPTO_DEV_OCTEONTX2_CPT");
int otx2_cptlf_init(struct otx2_cptlfs_info *lfs, u8 eng_grp_mask, int pri,
int lfs_num)
@@ -437,10 +433,7 @@ int otx2_cptlf_init(struct otx2_cptlfs_info *lfs, u8 eng_grp_mask, int pri,
for (slot = 0; slot < lfs->lfs_num; slot++) {
lfs->lf[slot].lfs = lfs;
lfs->lf[slot].slot = slot;
- if (lfs->lmt_base)
- lfs->lf[slot].lmtline = lfs->lmt_base +
- (slot * LMTLINE_SIZE);
- else
+ if (!lfs->lmt_info.base)
lfs->lf[slot].lmtline = lfs->reg_base +
OTX2_CPT_RVU_FUNC_ADDR_S(BLKADDR_LMT, slot,
OTX2_CPT_LMT_LF_LMTLINEX(0));
@@ -486,7 +479,7 @@ clear_lfs_num:
lfs->lfs_num = 0;
return ret;
}
-EXPORT_SYMBOL_NS_GPL(otx2_cptlf_init, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(otx2_cptlf_init, "CRYPTO_DEV_OCTEONTX2_CPT");
void otx2_cptlf_shutdown(struct otx2_cptlfs_info *lfs)
{
@@ -498,7 +491,7 @@ void otx2_cptlf_shutdown(struct otx2_cptlfs_info *lfs)
otx2_cpt_detach_rsrcs_msg(lfs);
lfs->lfs_num = 0;
}
-EXPORT_SYMBOL_NS_GPL(otx2_cptlf_shutdown, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(otx2_cptlf_shutdown, "CRYPTO_DEV_OCTEONTX2_CPT");
MODULE_AUTHOR("Marvell");
MODULE_DESCRIPTION("Marvell RVU CPT Common module");
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptlf.h b/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
index bd8604be2952..1b9f75214d18 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
@@ -105,18 +105,27 @@ struct cpt_hw_ops {
gfp_t gfp);
};
+#define LMTLINE_SIZE 128
+#define LMTLINE_ALIGN 128
+struct otx2_lmt_info {
+ void *base;
+ dma_addr_t iova;
+ u32 size;
+ u8 align;
+};
+
struct otx2_cptlfs_info {
/* Registers start address of VF/PF LFs are attached to */
void __iomem *reg_base;
-#define LMTLINE_SIZE 128
- void __iomem *lmt_base;
+ struct otx2_lmt_info lmt_info;
struct pci_dev *pdev; /* Device LFs are attached to */
struct otx2_cptlf_info lf[OTX2_CPT_MAX_LFS_NUM];
struct otx2_mbox *mbox;
struct cpt_hw_ops *ops;
u8 are_lfs_attached; /* Whether CPT LFs are attached */
u8 lfs_num; /* Number of CPT LFs */
- u8 kcrypto_eng_grp_num; /* Kernel crypto engine group number */
+ u8 kcrypto_se_eng_grp_num; /* Crypto symmetric engine group number */
+ u8 kcrypto_ae_eng_grp_num; /* Crypto asymmetric engine group number */
u8 kvf_limits; /* Kernel crypto limits */
atomic_t state; /* LF's state. started/reset */
int blkaddr; /* CPT blkaddr: BLKADDR_CPT0/BLKADDR_CPT1 */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
index 400e36d9908f..1c5c262af48d 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
@@ -639,6 +639,12 @@ static int cptpf_device_init(struct otx2_cptpf_dev *cptpf)
/* Disable all cores */
ret = otx2_cpt_disable_all_cores(cptpf);
+ otx2_cptlf_set_dev_info(&cptpf->lfs, cptpf->pdev, cptpf->reg_base,
+ &cptpf->afpf_mbox, BLKADDR_CPT0);
+ if (cptpf->has_cpt1)
+ otx2_cptlf_set_dev_info(&cptpf->cpt1_lfs, cptpf->pdev,
+ cptpf->reg_base, &cptpf->afpf_mbox,
+ BLKADDR_CPT1);
return ret;
}
@@ -739,18 +745,22 @@ static int otx2_cptpf_probe(struct pci_dev *pdev,
dev_err(dev, "Unable to get usable DMA configuration\n");
goto clear_drvdata;
}
- /* Map PF's configuration registers */
- err = pcim_iomap_regions_request_all(pdev, 1 << PCI_PF_REG_BAR_NUM,
- OTX2_CPT_DRV_NAME);
+ err = pcim_request_all_regions(pdev, OTX2_CPT_DRV_NAME);
if (err) {
- dev_err(dev, "Couldn't get PCI resources 0x%x\n", err);
+ dev_err(dev, "Couldn't request PCI resources 0x%x\n", err);
goto clear_drvdata;
}
pci_set_master(pdev);
pci_set_drvdata(pdev, cptpf);
cptpf->pdev = pdev;
- cptpf->reg_base = pcim_iomap_table(pdev)[PCI_PF_REG_BAR_NUM];
+ /* Map PF's configuration registers */
+ cptpf->reg_base = pcim_iomap(pdev, PCI_PF_REG_BAR_NUM, 0);
+ if (!cptpf->reg_base) {
+ err = -ENOMEM;
+ dev_err(dev, "Couldn't ioremap PCI resource 0x%x\n", err);
+ goto clear_drvdata;
+ }
/* Check if AF driver is up, otherwise defer probe */
err = cpt_is_pf_usable(cptpf);
@@ -782,19 +792,19 @@ static int otx2_cptpf_probe(struct pci_dev *pdev,
cptpf->max_vfs = pci_sriov_get_totalvfs(pdev);
cptpf->kvf_limits = 1;
- err = cn10k_cptpf_lmtst_init(cptpf);
+ /* Initialize CPT PF device */
+ err = cptpf_device_init(cptpf);
if (err)
goto unregister_intr;
- /* Initialize CPT PF device */
- err = cptpf_device_init(cptpf);
+ err = cn10k_cptpf_lmtst_init(cptpf);
if (err)
goto unregister_intr;
/* Initialize engine groups */
err = otx2_cpt_init_eng_grps(pdev, &cptpf->eng_grps);
if (err)
- goto unregister_intr;
+ goto free_lmtst;
err = sysfs_create_group(&dev->kobj, &cptpf_sysfs_group);
if (err)
@@ -810,6 +820,8 @@ sysfs_grp_del:
sysfs_remove_group(&dev->kobj, &cptpf_sysfs_group);
cleanup_eng_grps:
otx2_cpt_cleanup_eng_grps(pdev, &cptpf->eng_grps);
+free_lmtst:
+ cn10k_cpt_lmtst_free(pdev, &cptpf->lfs);
unregister_intr:
cptpf_disable_afpf_mbox_intr(cptpf);
destroy_afpf_mbox:
@@ -844,6 +856,8 @@ static void otx2_cptpf_remove(struct pci_dev *pdev)
cptpf_disable_afpf_mbox_intr(cptpf);
/* Destroy AF-PF mbox */
cptpf_afpf_mbox_destroy(cptpf);
+ /* Free LMTST memory */
+ cn10k_cpt_lmtst_free(pdev, &cptpf->lfs);
pci_set_drvdata(pdev, NULL);
}
@@ -864,7 +878,7 @@ static struct pci_driver otx2_cpt_pci_driver = {
module_pci_driver(otx2_cpt_pci_driver);
-MODULE_IMPORT_NS(CRYPTO_DEV_OCTEONTX2_CPT);
+MODULE_IMPORT_NS("CRYPTO_DEV_OCTEONTX2_CPT");
MODULE_AUTHOR("Marvell");
MODULE_DESCRIPTION(OTX2_CPT_DRV_STRING);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c
index ec1ac7e836a3..b4b2d3d1cbc2 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c
@@ -142,7 +142,7 @@ static int send_inline_ipsec_inbound_msg(struct otx2_cptpf_dev *cptpf,
memset(req, 0, sizeof(*req));
req->hdr.id = MBOX_MSG_CPT_INLINE_IPSEC_CFG;
req->hdr.sig = OTX2_MBOX_REQ_SIG;
- req->hdr.pcifunc = OTX2_CPT_RVU_PFFUNC(cptpf->pf_id, 0);
+ req->hdr.pcifunc = OTX2_CPT_RVU_PFFUNC(cptpf->pdev, cptpf->pf_id, 0);
req->dir = CPT_INLINE_INBOUND;
req->slot = slot;
req->sso_pf_func_ovrd = cptpf->sso_pf_func_ovrd;
@@ -184,7 +184,8 @@ static int rx_inline_ipsec_lf_cfg(struct otx2_cptpf_dev *cptpf, u8 egrp,
nix_req->gen_cfg.opcode = cpt_inline_rx_opcode(pdev);
nix_req->gen_cfg.param1 = req->param1;
nix_req->gen_cfg.param2 = req->param2;
- nix_req->inst_qsel.cpt_pf_func = OTX2_CPT_RVU_PFFUNC(cptpf->pf_id, 0);
+ nix_req->inst_qsel.cpt_pf_func =
+ OTX2_CPT_RVU_PFFUNC(cptpf->pdev, cptpf->pf_id, 0);
nix_req->inst_qsel.cpt_slot = 0;
ret = otx2_cpt_send_mbox_msg(&cptpf->afpf_mbox, pdev);
if (ret)
@@ -264,8 +265,6 @@ static int handle_msg_rx_inline_ipsec_lf_cfg(struct otx2_cptpf_dev *cptpf,
return -ENOENT;
}
- otx2_cptlf_set_dev_info(&cptpf->lfs, cptpf->pdev, cptpf->reg_base,
- &cptpf->afpf_mbox, BLKADDR_CPT0);
cptpf->lfs.global_slot = 0;
cptpf->lfs.ctx_ilen_ovrd = cfg_req->ctx_ilen_valid;
cptpf->lfs.ctx_ilen = cfg_req->ctx_ilen;
@@ -278,9 +277,6 @@ static int handle_msg_rx_inline_ipsec_lf_cfg(struct otx2_cptpf_dev *cptpf,
if (cptpf->has_cpt1) {
cptpf->rsrc_req_blkaddr = BLKADDR_CPT1;
- otx2_cptlf_set_dev_info(&cptpf->cpt1_lfs, cptpf->pdev,
- cptpf->reg_base, &cptpf->afpf_mbox,
- BLKADDR_CPT1);
cptpf->cpt1_lfs.global_slot = num_lfs;
cptpf->cpt1_lfs.ctx_ilen_ovrd = cfg_req->ctx_ilen_valid;
cptpf->cpt1_lfs.ctx_ilen = cfg_req->ctx_ilen;
@@ -397,9 +393,8 @@ void otx2_cptpf_vfpf_mbox_handler(struct work_struct *work)
msg = (struct mbox_msghdr *)(mdev->mbase + offset);
/* Set which VF sent this message based on mbox IRQ */
- msg->pcifunc = ((u16)cptpf->pf_id << RVU_PFVF_PF_SHIFT) |
- ((vf->vf_id + 1) & RVU_PFVF_FUNC_MASK);
-
+ msg->pcifunc = rvu_make_pcifunc(cptpf->pdev, cptpf->pf_id,
+ (vf->vf_id + 1));
err = cptpf_handle_vf_req(cptpf, vf, msg,
msg->next_msgoff - offset);
/*
@@ -474,8 +469,7 @@ static void process_afpf_mbox_msg(struct otx2_cptpf_dev *cptpf,
switch (msg->id) {
case MBOX_MSG_READY:
- cptpf->pf_id = (msg->pcifunc >> RVU_PFVF_PF_SHIFT) &
- RVU_PFVF_PF_MASK;
+ cptpf->pf_id = rvu_get_pf(cptpf->pdev, msg->pcifunc);
break;
case MBOX_MSG_MSIX_OFFSET:
rsp_msix = (struct msix_offset_rsp *) msg;
@@ -507,6 +501,7 @@ static void process_afpf_mbox_msg(struct otx2_cptpf_dev *cptpf,
case MBOX_MSG_CPT_INLINE_IPSEC_CFG:
case MBOX_MSG_NIX_INLINE_IPSEC_CFG:
case MBOX_MSG_CPT_LF_RESET:
+ case MBOX_MSG_LMTST_TBL_SETUP:
break;
default:
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
index 5c9484646172..ebdf4efa09d4 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
@@ -3,6 +3,7 @@
#include <linux/ctype.h>
#include <linux/firmware.h>
+#include <linux/string_choices.h>
#include "otx2_cptpf_ucode.h"
#include "otx2_cpt_common.h"
#include "otx2_cptpf.h"
@@ -175,7 +176,9 @@ static int cptx_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp,
/* Set PF number for microcode fetches */
ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
CPT_AF_PF_FUNC,
- cptpf->pf_id << RVU_PFVF_PF_SHIFT, blkaddr);
+ rvu_make_pcifunc(cptpf->pdev,
+ cptpf->pf_id, 0),
+ blkaddr);
if (ret)
return ret;
@@ -1490,11 +1493,13 @@ int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf)
union otx2_cpt_opcode opcode;
union otx2_cpt_res_s *result;
union otx2_cpt_inst_s inst;
+ dma_addr_t result_baddr;
dma_addr_t rptr_baddr;
struct pci_dev *pdev;
- u32 len, compl_rlen;
+ int timeout = 10000;
+ void *base, *rptr;
int ret, etype;
- void *rptr;
+ u32 len;
/*
* We don't get capabilities if it was already done
@@ -1512,29 +1517,33 @@ int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf)
if (ret)
goto delete_grps;
- otx2_cptlf_set_dev_info(lfs, cptpf->pdev, cptpf->reg_base,
- &cptpf->afpf_mbox, BLKADDR_CPT0);
ret = otx2_cptlf_init(lfs, OTX2_CPT_ALL_ENG_GRPS_MASK,
OTX2_CPT_QUEUE_HI_PRIO, 1);
if (ret)
goto delete_grps;
- compl_rlen = ALIGN(sizeof(union otx2_cpt_res_s), OTX2_CPT_DMA_MINALIGN);
- len = compl_rlen + LOADFVC_RLEN;
+ /* Allocate extra memory for "rptr" and "result" pointer alignment */
+ len = LOADFVC_RLEN + ARCH_DMA_MINALIGN +
+ sizeof(union otx2_cpt_res_s) + OTX2_CPT_RES_ADDR_ALIGN;
- result = kzalloc(len, GFP_KERNEL);
- if (!result) {
+ base = kzalloc(len, GFP_KERNEL);
+ if (!base) {
ret = -ENOMEM;
goto lf_cleanup;
}
- rptr_baddr = dma_map_single(&pdev->dev, (void *)result, len,
- DMA_BIDIRECTIONAL);
+
+ rptr = PTR_ALIGN(base, ARCH_DMA_MINALIGN);
+ rptr_baddr = dma_map_single(&pdev->dev, rptr, len, DMA_BIDIRECTIONAL);
if (dma_mapping_error(&pdev->dev, rptr_baddr)) {
dev_err(&pdev->dev, "DMA mapping failed\n");
ret = -EFAULT;
- goto free_result;
+ goto free_rptr;
}
- rptr = (u8 *)result + compl_rlen;
+
+ result = (union otx2_cpt_res_s *)PTR_ALIGN(rptr + LOADFVC_RLEN,
+ OTX2_CPT_RES_ADDR_ALIGN);
+ result_baddr = ALIGN(rptr_baddr + LOADFVC_RLEN,
+ OTX2_CPT_RES_ADDR_ALIGN);
/* Fill in the command */
opcode.s.major = LOADFVC_MAJOR_OP;
@@ -1546,27 +1555,38 @@ int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf)
/* 64-bit swap for microcode data reads, not needed for addresses */
cpu_to_be64s(&iq_cmd.cmd.u);
iq_cmd.dptr = 0;
- iq_cmd.rptr = rptr_baddr + compl_rlen;
+ iq_cmd.rptr = rptr_baddr;
iq_cmd.cptr.u = 0;
for (etype = 1; etype < OTX2_CPT_MAX_ENG_TYPES; etype++) {
result->s.compcode = OTX2_CPT_COMPLETION_CODE_INIT;
iq_cmd.cptr.s.grp = otx2_cpt_get_eng_grp(&cptpf->eng_grps,
etype);
- otx2_cpt_fill_inst(&inst, &iq_cmd, rptr_baddr);
+ otx2_cpt_fill_inst(&inst, &iq_cmd, result_baddr);
lfs->ops->send_cmd(&inst, 1, &cptpf->lfs.lf[0]);
+ timeout = 10000;
while (lfs->ops->cpt_get_compcode(result) ==
- OTX2_CPT_COMPLETION_CODE_INIT)
+ OTX2_CPT_COMPLETION_CODE_INIT) {
cpu_relax();
+ udelay(1);
+ timeout--;
+ if (!timeout) {
+ ret = -ENODEV;
+ cptpf->is_eng_caps_discovered = false;
+ dev_warn(&pdev->dev, "Timeout on CPT load_fvc completion poll\n");
+ goto error_no_response;
+ }
+ }
cptpf->eng_caps[etype].u = be64_to_cpup(rptr);
}
- dma_unmap_single(&pdev->dev, rptr_baddr, len, DMA_BIDIRECTIONAL);
cptpf->is_eng_caps_discovered = true;
-free_result:
- kfree(result);
+error_no_response:
+ dma_unmap_single(&pdev->dev, rptr_baddr, len, DMA_BIDIRECTIONAL);
+free_rptr:
+ kfree(base);
lf_cleanup:
otx2_cptlf_shutdown(lfs);
delete_grps:
@@ -1595,7 +1615,7 @@ int otx2_cpt_dl_custom_egrp_create(struct otx2_cptpf_dev *cptpf,
return -EINVAL;
}
err_msg = "Invalid engine group format";
- strscpy(tmp_buf, ctx->val.vstr, strlen(ctx->val.vstr) + 1);
+ strscpy(tmp_buf, ctx->val.vstr);
start = tmp_buf;
has_se = has_ie = has_ae = false;
@@ -1774,102 +1794,3 @@ err_print:
dev_err(dev, "%s\n", err_msg);
return -EINVAL;
}
-
-static void get_engs_info(struct otx2_cpt_eng_grp_info *eng_grp, char *buf,
- int size, int idx)
-{
- struct otx2_cpt_engs_rsvd *mirrored_engs = NULL;
- struct otx2_cpt_engs_rsvd *engs;
- int len, i;
-
- buf[0] = '\0';
- for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
- engs = &eng_grp->engs[i];
- if (!engs->type)
- continue;
- if (idx != -1 && idx != i)
- continue;
-
- if (eng_grp->mirror.is_ena)
- mirrored_engs = find_engines_by_type(
- &eng_grp->g->grp[eng_grp->mirror.idx],
- engs->type);
- if (i > 0 && idx == -1) {
- len = strlen(buf);
- scnprintf(buf + len, size - len, ", ");
- }
-
- len = strlen(buf);
- scnprintf(buf + len, size - len, "%d %s ",
- mirrored_engs ? engs->count + mirrored_engs->count :
- engs->count,
- get_eng_type_str(engs->type));
- if (mirrored_engs) {
- len = strlen(buf);
- scnprintf(buf + len, size - len,
- "(%d shared with engine_group%d) ",
- engs->count <= 0 ?
- engs->count + mirrored_engs->count :
- mirrored_engs->count,
- eng_grp->mirror.idx);
- }
- }
-}
-
-void otx2_cpt_print_uc_dbg_info(struct otx2_cptpf_dev *cptpf)
-{
- struct otx2_cpt_eng_grps *eng_grps = &cptpf->eng_grps;
- struct otx2_cpt_eng_grp_info *mirrored_grp;
- char engs_info[2 * OTX2_CPT_NAME_LENGTH];
- struct otx2_cpt_eng_grp_info *grp;
- struct otx2_cpt_engs_rsvd *engs;
- int i, j;
-
- pr_debug("Engine groups global info");
- pr_debug("max SE %d, max IE %d, max AE %d", eng_grps->avail.max_se_cnt,
- eng_grps->avail.max_ie_cnt, eng_grps->avail.max_ae_cnt);
- pr_debug("free SE %d", eng_grps->avail.se_cnt);
- pr_debug("free IE %d", eng_grps->avail.ie_cnt);
- pr_debug("free AE %d", eng_grps->avail.ae_cnt);
-
- for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
- grp = &eng_grps->grp[i];
- pr_debug("engine_group%d, state %s", i,
- grp->is_enabled ? "enabled" : "disabled");
- if (grp->is_enabled) {
- mirrored_grp = &eng_grps->grp[grp->mirror.idx];
- pr_debug("Ucode0 filename %s, version %s",
- grp->mirror.is_ena ?
- mirrored_grp->ucode[0].filename :
- grp->ucode[0].filename,
- grp->mirror.is_ena ?
- mirrored_grp->ucode[0].ver_str :
- grp->ucode[0].ver_str);
- if (is_2nd_ucode_used(grp))
- pr_debug("Ucode1 filename %s, version %s",
- grp->ucode[1].filename,
- grp->ucode[1].ver_str);
- }
-
- for (j = 0; j < OTX2_CPT_MAX_ETYPES_PER_GRP; j++) {
- engs = &grp->engs[j];
- if (engs->type) {
- u32 mask[5] = { };
-
- get_engs_info(grp, engs_info,
- 2 * OTX2_CPT_NAME_LENGTH, j);
- pr_debug("Slot%d: %s", j, engs_info);
- bitmap_to_arr32(mask, engs->bmap,
- eng_grps->engs_num);
- if (is_dev_otx2(cptpf->pdev))
- pr_debug("Mask: %8.8x %8.8x %8.8x %8.8x",
- mask[3], mask[2], mask[1],
- mask[0]);
- else
- pr_debug("Mask: %8.8x %8.8x %8.8x %8.8x %8.8x",
- mask[4], mask[3], mask[2], mask[1],
- mask[0]);
- }
- }
- }
-}
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h
index 365fe8943bd9..7e6a6a4ec37c 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h
@@ -166,7 +166,6 @@ int otx2_cpt_dl_custom_egrp_create(struct otx2_cptpf_dev *cptpf,
struct devlink_param_gset_ctx *ctx);
int otx2_cpt_dl_custom_egrp_delete(struct otx2_cptpf_dev *cptpf,
struct devlink_param_gset_ctx *ctx);
-void otx2_cpt_print_uc_dbg_info(struct otx2_cptpf_dev *cptpf);
struct otx2_cpt_engs_rsvd *find_engines_by_type(
struct otx2_cpt_eng_grp_info *eng_grp,
int eng_type);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
index 7eb0bc13994d..8d9f394d6b50 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
@@ -384,7 +384,8 @@ static inline int cpt_enc_dec(struct skcipher_request *req, u32 enc)
req_info->req_type = OTX2_CPT_ENC_DEC_REQ;
req_info->is_enc = enc;
req_info->is_trunc_hmac = false;
- req_info->ctrl.s.grp = otx2_cpt_get_kcrypto_eng_grp_num(pdev);
+ req_info->ctrl.s.grp = otx2_cpt_get_eng_grp_num(pdev,
+ OTX2_CPT_SE_TYPES);
req_info->req.cptr = ctx->er_ctx.hw_ctx;
req_info->req.cptr_dma = ctx->er_ctx.cptr_dma;
@@ -1288,7 +1289,8 @@ static int cpt_aead_enc_dec(struct aead_request *req, u8 reg_type, u8 enc)
if (status)
return status;
- req_info->ctrl.s.grp = otx2_cpt_get_kcrypto_eng_grp_num(pdev);
+ req_info->ctrl.s.grp = otx2_cpt_get_eng_grp_num(pdev,
+ OTX2_CPT_SE_TYPES);
/*
* We perform an asynchronous send and once
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
index 527d34cc258b..c1c44a7b89fa 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
@@ -265,17 +265,33 @@ static int cptvf_lf_init(struct otx2_cptvf_dev *cptvf)
u8 eng_grp_msk;
/* Get engine group number for symmetric crypto */
- cptvf->lfs.kcrypto_eng_grp_num = OTX2_CPT_INVALID_CRYPTO_ENG_GRP;
+ cptvf->lfs.kcrypto_se_eng_grp_num = OTX2_CPT_INVALID_CRYPTO_ENG_GRP;
ret = otx2_cptvf_send_eng_grp_num_msg(cptvf, OTX2_CPT_SE_TYPES);
if (ret)
return ret;
- if (cptvf->lfs.kcrypto_eng_grp_num == OTX2_CPT_INVALID_CRYPTO_ENG_GRP) {
- dev_err(dev, "Engine group for kernel crypto not available\n");
- ret = -ENOENT;
+ if (cptvf->lfs.kcrypto_se_eng_grp_num ==
+ OTX2_CPT_INVALID_CRYPTO_ENG_GRP) {
+ dev_err(dev,
+ "Symmetric Engine group for crypto not available\n");
+ return -ENOENT;
+ }
+
+ /* Get engine group number for asymmetric crypto */
+ cptvf->lfs.kcrypto_ae_eng_grp_num = OTX2_CPT_INVALID_CRYPTO_ENG_GRP;
+ ret = otx2_cptvf_send_eng_grp_num_msg(cptvf, OTX2_CPT_AE_TYPES);
+ if (ret)
return ret;
+
+ if (cptvf->lfs.kcrypto_ae_eng_grp_num ==
+ OTX2_CPT_INVALID_CRYPTO_ENG_GRP) {
+ dev_err(dev,
+ "Asymmetric Engine group for crypto not available\n");
+ return -ENOENT;
}
- eng_grp_msk = 1 << cptvf->lfs.kcrypto_eng_grp_num;
+
+ eng_grp_msk = BIT(cptvf->lfs.kcrypto_se_eng_grp_num) |
+ BIT(cptvf->lfs.kcrypto_ae_eng_grp_num);
ret = otx2_cptvf_send_kvf_limits_msg(cptvf);
if (ret)
@@ -283,8 +299,6 @@ static int cptvf_lf_init(struct otx2_cptvf_dev *cptvf)
lfs_num = cptvf->lfs.kvf_limits;
- otx2_cptlf_set_dev_info(lfs, cptvf->pdev, cptvf->reg_base,
- &cptvf->pfvf_mbox, cptvf->blkaddr);
ret = otx2_cptlf_init(lfs, eng_grp_msk, OTX2_CPT_QUEUE_HI_PRIO,
lfs_num);
if (ret)
@@ -358,9 +372,8 @@ static int otx2_cptvf_probe(struct pci_dev *pdev,
dev_err(dev, "Unable to get usable DMA configuration\n");
goto clear_drvdata;
}
- /* Map VF's configuration registers */
- ret = pcim_iomap_regions_request_all(pdev, 1 << PCI_PF_REG_BAR_NUM,
- OTX2_CPTVF_DRV_NAME);
+
+ ret = pcim_request_all_regions(pdev, OTX2_CPTVF_DRV_NAME);
if (ret) {
dev_err(dev, "Couldn't get PCI resources 0x%x\n", ret);
goto clear_drvdata;
@@ -369,14 +382,16 @@ static int otx2_cptvf_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, cptvf);
cptvf->pdev = pdev;
- cptvf->reg_base = pcim_iomap_table(pdev)[PCI_PF_REG_BAR_NUM];
+ /* Map VF's configuration registers */
+ cptvf->reg_base = pcim_iomap(pdev, PCI_PF_REG_BAR_NUM, 0);
+ if (!cptvf->reg_base) {
+ ret = -ENOMEM;
+ dev_err(dev, "Couldn't ioremap PCI resource 0x%x\n", ret);
+ goto clear_drvdata;
+ }
otx2_cpt_set_hw_caps(pdev, &cptvf->cap_flag);
- ret = cn10k_cptvf_lmtst_init(cptvf);
- if (ret)
- goto clear_drvdata;
-
/* Initialize PF<=>VF mailbox */
ret = cptvf_pfvf_mbox_init(cptvf);
if (ret)
@@ -391,6 +406,9 @@ static int otx2_cptvf_probe(struct pci_dev *pdev,
cptvf_hw_ops_get(cptvf);
+ otx2_cptlf_set_dev_info(&cptvf->lfs, cptvf->pdev, cptvf->reg_base,
+ &cptvf->pfvf_mbox, cptvf->blkaddr);
+
ret = otx2_cptvf_send_caps_msg(cptvf);
if (ret) {
dev_err(&pdev->dev, "Couldn't get CPT engine capabilities.\n");
@@ -399,13 +417,19 @@ static int otx2_cptvf_probe(struct pci_dev *pdev,
if (cptvf->eng_caps[OTX2_CPT_SE_TYPES] & BIT_ULL(35))
cptvf->lfs.ops->cpt_sg_info_create = cn10k_sgv2_info_create;
+ ret = cn10k_cptvf_lmtst_init(cptvf);
+ if (ret)
+ goto unregister_interrupts;
+
/* Initialize CPT LFs */
ret = cptvf_lf_init(cptvf);
if (ret)
- goto unregister_interrupts;
+ goto free_lmtst;
return 0;
+free_lmtst:
+ cn10k_cpt_lmtst_free(pdev, &cptvf->lfs);
unregister_interrupts:
cptvf_disable_pfvf_mbox_intrs(cptvf);
destroy_pfvf_mbox:
@@ -429,6 +453,8 @@ static void otx2_cptvf_remove(struct pci_dev *pdev)
cptvf_disable_pfvf_mbox_intrs(cptvf);
/* Destroy PF-VF mbox */
cptvf_pfvf_mbox_destroy(cptvf);
+ /* Free LMTST memory */
+ cn10k_cpt_lmtst_free(pdev, &cptvf->lfs);
pci_set_drvdata(pdev, NULL);
}
@@ -448,7 +474,7 @@ static struct pci_driver otx2_cptvf_pci_driver = {
module_pci_driver(otx2_cptvf_pci_driver);
-MODULE_IMPORT_NS(CRYPTO_DEV_OCTEONTX2_CPT);
+MODULE_IMPORT_NS("CRYPTO_DEV_OCTEONTX2_CPT");
MODULE_AUTHOR("Marvell");
MODULE_DESCRIPTION("Marvell RVU CPT Virtual Function Driver");
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c
index d9fa5f6e204d..5277bcfa275e 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c
@@ -75,6 +75,7 @@ static void process_pfvf_mbox_mbox_msg(struct otx2_cptvf_dev *cptvf,
struct otx2_cpt_caps_rsp *eng_caps;
struct cpt_rd_wr_reg_msg *rsp_reg;
struct msix_offset_rsp *rsp_msix;
+ u8 grp_num;
int i;
if (msg->id >= MBOX_MSG_MAX) {
@@ -122,7 +123,11 @@ static void process_pfvf_mbox_mbox_msg(struct otx2_cptvf_dev *cptvf,
break;
case MBOX_MSG_GET_ENG_GRP_NUM:
rsp_grp = (struct otx2_cpt_egrp_num_rsp *) msg;
- cptvf->lfs.kcrypto_eng_grp_num = rsp_grp->eng_grp_num;
+ grp_num = rsp_grp->eng_grp_num;
+ if (rsp_grp->eng_type == OTX2_CPT_SE_TYPES)
+ cptvf->lfs.kcrypto_se_eng_grp_num = grp_num;
+ else if (rsp_grp->eng_type == OTX2_CPT_AE_TYPES)
+ cptvf->lfs.kcrypto_ae_eng_grp_num = grp_num;
break;
case MBOX_MSG_GET_KVF_LIMITS:
rsp_limits = (struct otx2_cpt_kvf_limits_rsp *) msg;
@@ -134,6 +139,7 @@ static void process_pfvf_mbox_mbox_msg(struct otx2_cptvf_dev *cptvf,
sizeof(cptvf->eng_caps));
break;
case MBOX_MSG_CPT_LF_RESET:
+ case MBOX_MSG_LMTST_TBL_SETUP:
break;
default:
dev_err(&cptvf->pdev->dev, "Unsupported msg %d received.\n",
@@ -188,7 +194,7 @@ int otx2_cptvf_send_eng_grp_num_msg(struct otx2_cptvf_dev *cptvf, int eng_type)
}
req->hdr.id = MBOX_MSG_GET_ENG_GRP_NUM;
req->hdr.sig = OTX2_MBOX_REQ_SIG;
- req->hdr.pcifunc = OTX2_CPT_RVU_PFFUNC(cptvf->vf_id, 0);
+ req->hdr.pcifunc = OTX2_CPT_RVU_PFFUNC(cptvf->pdev, cptvf->vf_id, 0);
req->eng_type = eng_type;
return otx2_cpt_send_mbox_msg(mbox, pdev);
@@ -209,7 +215,7 @@ int otx2_cptvf_send_kvf_limits_msg(struct otx2_cptvf_dev *cptvf)
}
req->id = MBOX_MSG_GET_KVF_LIMITS;
req->sig = OTX2_MBOX_REQ_SIG;
- req->pcifunc = OTX2_CPT_RVU_PFFUNC(cptvf->vf_id, 0);
+ req->pcifunc = OTX2_CPT_RVU_PFFUNC(cptvf->pdev, cptvf->vf_id, 0);
return otx2_cpt_send_mbox_msg(mbox, pdev);
}
@@ -229,7 +235,7 @@ int otx2_cptvf_send_caps_msg(struct otx2_cptvf_dev *cptvf)
}
req->id = MBOX_MSG_GET_CAPS;
req->sig = OTX2_MBOX_REQ_SIG;
- req->pcifunc = OTX2_CPT_RVU_PFFUNC(cptvf->vf_id, 0);
+ req->pcifunc = OTX2_CPT_RVU_PFFUNC(cptvf->pdev, cptvf->vf_id, 0);
return otx2_cpt_send_mbox_msg(mbox, pdev);
}
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c
index 5387c68f3c9d..e71494486c64 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c
@@ -264,9 +264,10 @@ static int cpt_process_ccode(struct otx2_cptlfs_info *lfs,
break;
}
- dev_err(&pdev->dev,
- "Request failed with software error code 0x%x\n",
- cpt_status->s.uc_compcode);
+ pr_debug("Request failed with software error code 0x%x: algo = %s driver = %s\n",
+ cpt_status->s.uc_compcode,
+ info->req->areq->tfm->__crt_alg->cra_name,
+ info->req->areq->tfm->__crt_alg->cra_driver_name);
otx2_cpt_dump_sg_list(pdev, info->req);
break;
}
@@ -390,9 +391,19 @@ void otx2_cpt_post_process(struct otx2_cptlf_wqe *wqe)
&wqe->lfs->lf[wqe->lf_num].pqueue);
}
-int otx2_cpt_get_kcrypto_eng_grp_num(struct pci_dev *pdev)
+int otx2_cpt_get_eng_grp_num(struct pci_dev *pdev,
+ enum otx2_cpt_eng_type eng_type)
{
struct otx2_cptvf_dev *cptvf = pci_get_drvdata(pdev);
- return cptvf->lfs.kcrypto_eng_grp_num;
+ switch (eng_type) {
+ case OTX2_CPT_SE_TYPES:
+ return cptvf->lfs.kcrypto_se_eng_grp_num;
+ case OTX2_CPT_AE_TYPES:
+ return cptvf->lfs.kcrypto_ae_eng_grp_num;
+ default:
+ dev_err(&cptvf->pdev->dev, "Unsupported engine type");
+ break;
+ }
+ return -ENXIO;
}
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index c82775dbb557..133ebc998236 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -225,21 +225,22 @@ static int mxs_dcp_start_dma(struct dcp_async_ctx *actx)
static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
struct skcipher_request *req, int init)
{
- dma_addr_t key_phys = 0;
- dma_addr_t src_phys, dst_phys;
+ dma_addr_t key_phys, src_phys, dst_phys;
struct dcp *sdcp = global_sdcp;
struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
bool key_referenced = actx->key_referenced;
int ret;
- if (!key_referenced) {
+ if (key_referenced)
+ key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key + AES_KEYSIZE_128,
+ AES_KEYSIZE_128, DMA_TO_DEVICE);
+ else
key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
2 * AES_KEYSIZE_128, DMA_TO_DEVICE);
- ret = dma_mapping_error(sdcp->dev, key_phys);
- if (ret)
- return ret;
- }
+ ret = dma_mapping_error(sdcp->dev, key_phys);
+ if (ret)
+ return ret;
src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf,
DCP_BUF_SZ, DMA_TO_DEVICE);
@@ -264,12 +265,12 @@ static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
MXS_DCP_CONTROL0_INTERRUPT |
MXS_DCP_CONTROL0_ENABLE_CIPHER;
- if (key_referenced)
- /* Set OTP key bit to select the key via KEY_SELECT. */
- desc->control0 |= MXS_DCP_CONTROL0_OTP_KEY;
- else
+ if (!key_referenced)
/* Payload contains the key. */
desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY;
+ else if (actx->key[0] == DCP_PAES_KEY_OTP)
+ /* Set OTP key bit to select the key via KEY_SELECT. */
+ desc->control0 |= MXS_DCP_CONTROL0_OTP_KEY;
if (rctx->enc)
desc->control0 |= MXS_DCP_CONTROL0_CIPHER_ENCRYPT;
@@ -300,7 +301,10 @@ aes_done_run:
err_dst:
dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
err_src:
- if (!key_referenced)
+ if (key_referenced)
+ dma_unmap_single(sdcp->dev, key_phys, AES_KEYSIZE_128,
+ DMA_TO_DEVICE);
+ else
dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128,
DMA_TO_DEVICE);
return ret;
@@ -1243,7 +1247,7 @@ MODULE_DEVICE_TABLE(of, mxs_dcp_dt_ids);
static struct platform_driver mxs_dcp_driver = {
.probe = mxs_dcp_probe,
- .remove_new = mxs_dcp_remove,
+ .remove = mxs_dcp_remove,
.driver = {
.name = "mxs-dcp",
.of_match_table = mxs_dcp_dt_ids,
diff --git a/drivers/crypto/n2_asm.S b/drivers/crypto/n2_asm.S
deleted file mode 100644
index 9a67dbf340f4..000000000000
--- a/drivers/crypto/n2_asm.S
+++ /dev/null
@@ -1,96 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* n2_asm.S: Hypervisor calls for NCS support.
- *
- * Copyright (C) 2009 David S. Miller <davem@davemloft.net>
- */
-
-#include <linux/linkage.h>
-#include <asm/hypervisor.h>
-#include "n2_core.h"
-
- /* o0: queue type
- * o1: RA of queue
- * o2: num entries in queue
- * o3: address of queue handle return
- */
-ENTRY(sun4v_ncs_qconf)
- mov HV_FAST_NCS_QCONF, %o5
- ta HV_FAST_TRAP
- stx %o1, [%o3]
- retl
- nop
-ENDPROC(sun4v_ncs_qconf)
-
- /* %o0: queue handle
- * %o1: address of queue type return
- * %o2: address of queue base address return
- * %o3: address of queue num entries return
- */
-ENTRY(sun4v_ncs_qinfo)
- mov %o1, %g1
- mov %o2, %g2
- mov %o3, %g3
- mov HV_FAST_NCS_QINFO, %o5
- ta HV_FAST_TRAP
- stx %o1, [%g1]
- stx %o2, [%g2]
- stx %o3, [%g3]
- retl
- nop
-ENDPROC(sun4v_ncs_qinfo)
-
- /* %o0: queue handle
- * %o1: address of head offset return
- */
-ENTRY(sun4v_ncs_gethead)
- mov %o1, %o2
- mov HV_FAST_NCS_GETHEAD, %o5
- ta HV_FAST_TRAP
- stx %o1, [%o2]
- retl
- nop
-ENDPROC(sun4v_ncs_gethead)
-
- /* %o0: queue handle
- * %o1: address of tail offset return
- */
-ENTRY(sun4v_ncs_gettail)
- mov %o1, %o2
- mov HV_FAST_NCS_GETTAIL, %o5
- ta HV_FAST_TRAP
- stx %o1, [%o2]
- retl
- nop
-ENDPROC(sun4v_ncs_gettail)
-
- /* %o0: queue handle
- * %o1: new tail offset
- */
-ENTRY(sun4v_ncs_settail)
- mov HV_FAST_NCS_SETTAIL, %o5
- ta HV_FAST_TRAP
- retl
- nop
-ENDPROC(sun4v_ncs_settail)
-
- /* %o0: queue handle
- * %o1: address of devino return
- */
-ENTRY(sun4v_ncs_qhandle_to_devino)
- mov %o1, %o2
- mov HV_FAST_NCS_QHANDLE_TO_DEVINO, %o5
- ta HV_FAST_TRAP
- stx %o1, [%o2]
- retl
- nop
-ENDPROC(sun4v_ncs_qhandle_to_devino)
-
- /* %o0: queue handle
- * %o1: new head offset
- */
-ENTRY(sun4v_ncs_sethead_marker)
- mov HV_FAST_NCS_SETHEAD_MARKER, %o5
- ta HV_FAST_TRAP
- retl
- nop
-ENDPROC(sun4v_ncs_sethead_marker)
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
deleted file mode 100644
index b11545cc5cb7..000000000000
--- a/drivers/crypto/n2_core.c
+++ /dev/null
@@ -1,2168 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* n2_core.c: Niagara2 Stream Processing Unit (SPU) crypto support.
- *
- * Copyright (C) 2010, 2011 David S. Miller <davem@davemloft.net>
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/platform_device.h>
-#include <linux/cpumask.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/crypto.h>
-#include <crypto/md5.h>
-#include <crypto/sha1.h>
-#include <crypto/sha2.h>
-#include <crypto/aes.h>
-#include <crypto/internal/des.h>
-#include <linux/mutex.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-
-#include <crypto/internal/hash.h>
-#include <crypto/internal/skcipher.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/algapi.h>
-
-#include <asm/hypervisor.h>
-#include <asm/mdesc.h>
-
-#include "n2_core.h"
-
-#define DRV_MODULE_NAME "n2_crypto"
-#define DRV_MODULE_VERSION "0.2"
-#define DRV_MODULE_RELDATE "July 28, 2011"
-
-static const char version[] =
- DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
-
-MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
-MODULE_DESCRIPTION("Niagara2 Crypto driver");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_MODULE_VERSION);
-
-#define N2_CRA_PRIORITY 200
-
-static DEFINE_MUTEX(spu_lock);
-
-struct spu_queue {
- cpumask_t sharing;
- unsigned long qhandle;
-
- spinlock_t lock;
- u8 q_type;
- void *q;
- unsigned long head;
- unsigned long tail;
- struct list_head jobs;
-
- unsigned long devino;
-
- char irq_name[32];
- unsigned int irq;
-
- struct list_head list;
-};
-
-struct spu_qreg {
- struct spu_queue *queue;
- unsigned long type;
-};
-
-static struct spu_queue **cpu_to_cwq;
-static struct spu_queue **cpu_to_mau;
-
-static unsigned long spu_next_offset(struct spu_queue *q, unsigned long off)
-{
- if (q->q_type == HV_NCS_QTYPE_MAU) {
- off += MAU_ENTRY_SIZE;
- if (off == (MAU_ENTRY_SIZE * MAU_NUM_ENTRIES))
- off = 0;
- } else {
- off += CWQ_ENTRY_SIZE;
- if (off == (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES))
- off = 0;
- }
- return off;
-}
-
-struct n2_request_common {
- struct list_head entry;
- unsigned int offset;
-};
-#define OFFSET_NOT_RUNNING (~(unsigned int)0)
-
-/* An async job request records the final tail value it used in
- * n2_request_common->offset, test to see if that offset is in
- * the range old_head, new_head, inclusive.
- */
-static inline bool job_finished(struct spu_queue *q, unsigned int offset,
- unsigned long old_head, unsigned long new_head)
-{
- if (old_head <= new_head) {
- if (offset > old_head && offset <= new_head)
- return true;
- } else {
- if (offset > old_head || offset <= new_head)
- return true;
- }
- return false;
-}
-
-/* When the HEAD marker is unequal to the actual HEAD, we get
- * a virtual device INO interrupt. We should process the
- * completed CWQ entries and adjust the HEAD marker to clear
- * the IRQ.
- */
-static irqreturn_t cwq_intr(int irq, void *dev_id)
-{
- unsigned long off, new_head, hv_ret;
- struct spu_queue *q = dev_id;
-
- pr_err("CPU[%d]: Got CWQ interrupt for qhdl[%lx]\n",
- smp_processor_id(), q->qhandle);
-
- spin_lock(&q->lock);
-
- hv_ret = sun4v_ncs_gethead(q->qhandle, &new_head);
-
- pr_err("CPU[%d]: CWQ gethead[%lx] hv_ret[%lu]\n",
- smp_processor_id(), new_head, hv_ret);
-
- for (off = q->head; off != new_head; off = spu_next_offset(q, off)) {
- /* XXX ... XXX */
- }
-
- hv_ret = sun4v_ncs_sethead_marker(q->qhandle, new_head);
- if (hv_ret == HV_EOK)
- q->head = new_head;
-
- spin_unlock(&q->lock);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t mau_intr(int irq, void *dev_id)
-{
- struct spu_queue *q = dev_id;
- unsigned long head, hv_ret;
-
- spin_lock(&q->lock);
-
- pr_err("CPU[%d]: Got MAU interrupt for qhdl[%lx]\n",
- smp_processor_id(), q->qhandle);
-
- hv_ret = sun4v_ncs_gethead(q->qhandle, &head);
-
- pr_err("CPU[%d]: MAU gethead[%lx] hv_ret[%lu]\n",
- smp_processor_id(), head, hv_ret);
-
- sun4v_ncs_sethead_marker(q->qhandle, head);
-
- spin_unlock(&q->lock);
-
- return IRQ_HANDLED;
-}
-
-static void *spu_queue_next(struct spu_queue *q, void *cur)
-{
- return q->q + spu_next_offset(q, cur - q->q);
-}
-
-static int spu_queue_num_free(struct spu_queue *q)
-{
- unsigned long head = q->head;
- unsigned long tail = q->tail;
- unsigned long end = (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES);
- unsigned long diff;
-
- if (head > tail)
- diff = head - tail;
- else
- diff = (end - tail) + head;
-
- return (diff / CWQ_ENTRY_SIZE) - 1;
-}
-
-static void *spu_queue_alloc(struct spu_queue *q, int num_entries)
-{
- int avail = spu_queue_num_free(q);
-
- if (avail >= num_entries)
- return q->q + q->tail;
-
- return NULL;
-}
-
-static unsigned long spu_queue_submit(struct spu_queue *q, void *last)
-{
- unsigned long hv_ret, new_tail;
-
- new_tail = spu_next_offset(q, last - q->q);
-
- hv_ret = sun4v_ncs_settail(q->qhandle, new_tail);
- if (hv_ret == HV_EOK)
- q->tail = new_tail;
- return hv_ret;
-}
-
-static u64 control_word_base(unsigned int len, unsigned int hmac_key_len,
- int enc_type, int auth_type,
- unsigned int hash_len,
- bool sfas, bool sob, bool eob, bool encrypt,
- int opcode)
-{
- u64 word = (len - 1) & CONTROL_LEN;
-
- word |= ((u64) opcode << CONTROL_OPCODE_SHIFT);
- word |= ((u64) enc_type << CONTROL_ENC_TYPE_SHIFT);
- word |= ((u64) auth_type << CONTROL_AUTH_TYPE_SHIFT);
- if (sfas)
- word |= CONTROL_STORE_FINAL_AUTH_STATE;
- if (sob)
- word |= CONTROL_START_OF_BLOCK;
- if (eob)
- word |= CONTROL_END_OF_BLOCK;
- if (encrypt)
- word |= CONTROL_ENCRYPT;
- if (hmac_key_len)
- word |= ((u64) (hmac_key_len - 1)) << CONTROL_HMAC_KEY_LEN_SHIFT;
- if (hash_len)
- word |= ((u64) (hash_len - 1)) << CONTROL_HASH_LEN_SHIFT;
-
- return word;
-}
-
-#if 0
-static inline bool n2_should_run_async(struct spu_queue *qp, int this_len)
-{
- if (this_len >= 64 ||
- qp->head != qp->tail)
- return true;
- return false;
-}
-#endif
-
-struct n2_ahash_alg {
- struct list_head entry;
- const u8 *hash_zero;
- const u8 *hash_init;
- u8 hw_op_hashsz;
- u8 digest_size;
- u8 auth_type;
- u8 hmac_type;
- struct ahash_alg alg;
-};
-
-static inline struct n2_ahash_alg *n2_ahash_alg(struct crypto_tfm *tfm)
-{
- struct crypto_alg *alg = tfm->__crt_alg;
- struct ahash_alg *ahash_alg;
-
- ahash_alg = container_of(alg, struct ahash_alg, halg.base);
-
- return container_of(ahash_alg, struct n2_ahash_alg, alg);
-}
-
-struct n2_hmac_alg {
- const char *child_alg;
- struct n2_ahash_alg derived;
-};
-
-static inline struct n2_hmac_alg *n2_hmac_alg(struct crypto_tfm *tfm)
-{
- struct crypto_alg *alg = tfm->__crt_alg;
- struct ahash_alg *ahash_alg;
-
- ahash_alg = container_of(alg, struct ahash_alg, halg.base);
-
- return container_of(ahash_alg, struct n2_hmac_alg, derived.alg);
-}
-
-struct n2_hash_ctx {
- struct crypto_ahash *fallback_tfm;
-};
-
-#define N2_HASH_KEY_MAX 32 /* HW limit for all HMAC requests */
-
-struct n2_hmac_ctx {
- struct n2_hash_ctx base;
-
- struct crypto_shash *child_shash;
-
- int hash_key_len;
- unsigned char hash_key[N2_HASH_KEY_MAX];
-};
-
-struct n2_hash_req_ctx {
- union {
- struct md5_state md5;
- struct sha1_state sha1;
- struct sha256_state sha256;
- } u;
-
- struct ahash_request fallback_req;
-};
-
-static int n2_hash_async_init(struct ahash_request *req)
-{
- struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
-
- ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
-
- return crypto_ahash_init(&rctx->fallback_req);
-}
-
-static int n2_hash_async_update(struct ahash_request *req)
-{
- struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
-
- ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.nbytes = req->nbytes;
- rctx->fallback_req.src = req->src;
-
- return crypto_ahash_update(&rctx->fallback_req);
-}
-
-static int n2_hash_async_final(struct ahash_request *req)
-{
- struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
-
- ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.result = req->result;
-
- return crypto_ahash_final(&rctx->fallback_req);
-}
-
-static int n2_hash_async_finup(struct ahash_request *req)
-{
- struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
-
- ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.nbytes = req->nbytes;
- rctx->fallback_req.src = req->src;
- rctx->fallback_req.result = req->result;
-
- return crypto_ahash_finup(&rctx->fallback_req);
-}
-
-static int n2_hash_async_noimport(struct ahash_request *req, const void *in)
-{
- return -ENOSYS;
-}
-
-static int n2_hash_async_noexport(struct ahash_request *req, void *out)
-{
- return -ENOSYS;
-}
-
-static int n2_hash_cra_init(struct crypto_tfm *tfm)
-{
- const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
- struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
- struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct crypto_ahash *fallback_tfm;
- int err;
-
- fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
- CRYPTO_ALG_NEED_FALLBACK);
- if (IS_ERR(fallback_tfm)) {
- pr_warn("Fallback driver '%s' could not be loaded!\n",
- fallback_driver_name);
- err = PTR_ERR(fallback_tfm);
- goto out;
- }
-
- crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) +
- crypto_ahash_reqsize(fallback_tfm)));
-
- ctx->fallback_tfm = fallback_tfm;
- return 0;
-
-out:
- return err;
-}
-
-static void n2_hash_cra_exit(struct crypto_tfm *tfm)
-{
- struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
- struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
-
- crypto_free_ahash(ctx->fallback_tfm);
-}
-
-static int n2_hmac_cra_init(struct crypto_tfm *tfm)
-{
- const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
- struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
- struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash);
- struct n2_hmac_alg *n2alg = n2_hmac_alg(tfm);
- struct crypto_ahash *fallback_tfm;
- struct crypto_shash *child_shash;
- int err;
-
- fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
- CRYPTO_ALG_NEED_FALLBACK);
- if (IS_ERR(fallback_tfm)) {
- pr_warn("Fallback driver '%s' could not be loaded!\n",
- fallback_driver_name);
- err = PTR_ERR(fallback_tfm);
- goto out;
- }
-
- child_shash = crypto_alloc_shash(n2alg->child_alg, 0, 0);
- if (IS_ERR(child_shash)) {
- pr_warn("Child shash '%s' could not be loaded!\n",
- n2alg->child_alg);
- err = PTR_ERR(child_shash);
- goto out_free_fallback;
- }
-
- crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) +
- crypto_ahash_reqsize(fallback_tfm)));
-
- ctx->child_shash = child_shash;
- ctx->base.fallback_tfm = fallback_tfm;
- return 0;
-
-out_free_fallback:
- crypto_free_ahash(fallback_tfm);
-
-out:
- return err;
-}
-
-static void n2_hmac_cra_exit(struct crypto_tfm *tfm)
-{
- struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
- struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash);
-
- crypto_free_ahash(ctx->base.fallback_tfm);
- crypto_free_shash(ctx->child_shash);
-}
-
-static int n2_hmac_async_setkey(struct crypto_ahash *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm);
- struct crypto_shash *child_shash = ctx->child_shash;
- struct crypto_ahash *fallback_tfm;
- int err, bs, ds;
-
- fallback_tfm = ctx->base.fallback_tfm;
- err = crypto_ahash_setkey(fallback_tfm, key, keylen);
- if (err)
- return err;
-
- bs = crypto_shash_blocksize(child_shash);
- ds = crypto_shash_digestsize(child_shash);
- BUG_ON(ds > N2_HASH_KEY_MAX);
- if (keylen > bs) {
- err = crypto_shash_tfm_digest(child_shash, key, keylen,
- ctx->hash_key);
- if (err)
- return err;
- keylen = ds;
- } else if (keylen <= N2_HASH_KEY_MAX)
- memcpy(ctx->hash_key, key, keylen);
-
- ctx->hash_key_len = keylen;
-
- return err;
-}
-
-static unsigned long wait_for_tail(struct spu_queue *qp)
-{
- unsigned long head, hv_ret;
-
- do {
- hv_ret = sun4v_ncs_gethead(qp->qhandle, &head);
- if (hv_ret != HV_EOK) {
- pr_err("Hypervisor error on gethead\n");
- break;
- }
- if (head == qp->tail) {
- qp->head = head;
- break;
- }
- } while (1);
- return hv_ret;
-}
-
-static unsigned long submit_and_wait_for_tail(struct spu_queue *qp,
- struct cwq_initial_entry *ent)
-{
- unsigned long hv_ret = spu_queue_submit(qp, ent);
-
- if (hv_ret == HV_EOK)
- hv_ret = wait_for_tail(qp);
-
- return hv_ret;
-}
-
-static int n2_do_async_digest(struct ahash_request *req,
- unsigned int auth_type, unsigned int digest_size,
- unsigned int result_size, void *hash_loc,
- unsigned long auth_key, unsigned int auth_key_len)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct cwq_initial_entry *ent;
- struct crypto_hash_walk walk;
- struct spu_queue *qp;
- unsigned long flags;
- int err = -ENODEV;
- int nbytes, cpu;
-
- /* The total effective length of the operation may not
- * exceed 2^16.
- */
- if (unlikely(req->nbytes > (1 << 16))) {
- struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
- struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
-
- ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags =
- req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.nbytes = req->nbytes;
- rctx->fallback_req.src = req->src;
- rctx->fallback_req.result = req->result;
-
- return crypto_ahash_digest(&rctx->fallback_req);
- }
-
- nbytes = crypto_hash_walk_first(req, &walk);
-
- cpu = get_cpu();
- qp = cpu_to_cwq[cpu];
- if (!qp)
- goto out;
-
- spin_lock_irqsave(&qp->lock, flags);
-
- /* XXX can do better, improve this later by doing a by-hand scatterlist
- * XXX walk, etc.
- */
- ent = qp->q + qp->tail;
-
- ent->control = control_word_base(nbytes, auth_key_len, 0,
- auth_type, digest_size,
- false, true, false, false,
- OPCODE_INPLACE_BIT |
- OPCODE_AUTH_MAC);
- ent->src_addr = __pa(walk.data);
- ent->auth_key_addr = auth_key;
- ent->auth_iv_addr = __pa(hash_loc);
- ent->final_auth_state_addr = 0UL;
- ent->enc_key_addr = 0UL;
- ent->enc_iv_addr = 0UL;
- ent->dest_addr = __pa(hash_loc);
-
- nbytes = crypto_hash_walk_done(&walk, 0);
- while (nbytes > 0) {
- ent = spu_queue_next(qp, ent);
-
- ent->control = (nbytes - 1);
- ent->src_addr = __pa(walk.data);
- ent->auth_key_addr = 0UL;
- ent->auth_iv_addr = 0UL;
- ent->final_auth_state_addr = 0UL;
- ent->enc_key_addr = 0UL;
- ent->enc_iv_addr = 0UL;
- ent->dest_addr = 0UL;
-
- nbytes = crypto_hash_walk_done(&walk, 0);
- }
- ent->control |= CONTROL_END_OF_BLOCK;
-
- if (submit_and_wait_for_tail(qp, ent) != HV_EOK)
- err = -EINVAL;
- else
- err = 0;
-
- spin_unlock_irqrestore(&qp->lock, flags);
-
- if (!err)
- memcpy(req->result, hash_loc, result_size);
-out:
- put_cpu();
-
- return err;
-}
-
-static int n2_hash_async_digest(struct ahash_request *req)
-{
- struct n2_ahash_alg *n2alg = n2_ahash_alg(req->base.tfm);
- struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
- int ds;
-
- ds = n2alg->digest_size;
- if (unlikely(req->nbytes == 0)) {
- memcpy(req->result, n2alg->hash_zero, ds);
- return 0;
- }
- memcpy(&rctx->u, n2alg->hash_init, n2alg->hw_op_hashsz);
-
- return n2_do_async_digest(req, n2alg->auth_type,
- n2alg->hw_op_hashsz, ds,
- &rctx->u, 0UL, 0);
-}
-
-static int n2_hmac_async_digest(struct ahash_request *req)
-{
- struct n2_hmac_alg *n2alg = n2_hmac_alg(req->base.tfm);
- struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm);
- int ds;
-
- ds = n2alg->derived.digest_size;
- if (unlikely(req->nbytes == 0) ||
- unlikely(ctx->hash_key_len > N2_HASH_KEY_MAX)) {
- struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
- struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
-
- ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags =
- req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.nbytes = req->nbytes;
- rctx->fallback_req.src = req->src;
- rctx->fallback_req.result = req->result;
-
- return crypto_ahash_digest(&rctx->fallback_req);
- }
- memcpy(&rctx->u, n2alg->derived.hash_init,
- n2alg->derived.hw_op_hashsz);
-
- return n2_do_async_digest(req, n2alg->derived.hmac_type,
- n2alg->derived.hw_op_hashsz, ds,
- &rctx->u,
- __pa(&ctx->hash_key),
- ctx->hash_key_len);
-}
-
-struct n2_skcipher_context {
- int key_len;
- int enc_type;
- union {
- u8 aes[AES_MAX_KEY_SIZE];
- u8 des[DES_KEY_SIZE];
- u8 des3[3 * DES_KEY_SIZE];
- } key;
-};
-
-#define N2_CHUNK_ARR_LEN 16
-
-struct n2_crypto_chunk {
- struct list_head entry;
- unsigned long iv_paddr : 44;
- unsigned long arr_len : 20;
- unsigned long dest_paddr;
- unsigned long dest_final;
- struct {
- unsigned long src_paddr : 44;
- unsigned long src_len : 20;
- } arr[N2_CHUNK_ARR_LEN];
-};
-
-struct n2_request_context {
- struct skcipher_walk walk;
- struct list_head chunk_list;
- struct n2_crypto_chunk chunk;
- u8 temp_iv[16];
-};
-
-/* The SPU allows some level of flexibility for partial cipher blocks
- * being specified in a descriptor.
- *
- * It merely requires that every descriptor's length field is at least
- * as large as the cipher block size. This means that a cipher block
- * can span at most 2 descriptors. However, this does not allow a
- * partial block to span into the final descriptor as that would
- * violate the rule (since every descriptor's length must be at lest
- * the block size). So, for example, assuming an 8 byte block size:
- *
- * 0xe --> 0xa --> 0x8
- *
- * is a valid length sequence, whereas:
- *
- * 0xe --> 0xb --> 0x7
- *
- * is not a valid sequence.
- */
-
-struct n2_skcipher_alg {
- struct list_head entry;
- u8 enc_type;
- struct skcipher_alg skcipher;
-};
-
-static inline struct n2_skcipher_alg *n2_skcipher_alg(struct crypto_skcipher *tfm)
-{
- struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
-
- return container_of(alg, struct n2_skcipher_alg, skcipher);
-}
-
-static int n2_aes_setkey(struct crypto_skcipher *skcipher, const u8 *key,
- unsigned int keylen)
-{
- struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
- struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
- struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
-
- ctx->enc_type = (n2alg->enc_type & ENC_TYPE_CHAINING_MASK);
-
- switch (keylen) {
- case AES_KEYSIZE_128:
- ctx->enc_type |= ENC_TYPE_ALG_AES128;
- break;
- case AES_KEYSIZE_192:
- ctx->enc_type |= ENC_TYPE_ALG_AES192;
- break;
- case AES_KEYSIZE_256:
- ctx->enc_type |= ENC_TYPE_ALG_AES256;
- break;
- default:
- return -EINVAL;
- }
-
- ctx->key_len = keylen;
- memcpy(ctx->key.aes, key, keylen);
- return 0;
-}
-
-static int n2_des_setkey(struct crypto_skcipher *skcipher, const u8 *key,
- unsigned int keylen)
-{
- struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
- struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
- struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
- int err;
-
- err = verify_skcipher_des_key(skcipher, key);
- if (err)
- return err;
-
- ctx->enc_type = n2alg->enc_type;
-
- ctx->key_len = keylen;
- memcpy(ctx->key.des, key, keylen);
- return 0;
-}
-
-static int n2_3des_setkey(struct crypto_skcipher *skcipher, const u8 *key,
- unsigned int keylen)
-{
- struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
- struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
- struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
- int err;
-
- err = verify_skcipher_des3_key(skcipher, key);
- if (err)
- return err;
-
- ctx->enc_type = n2alg->enc_type;
-
- ctx->key_len = keylen;
- memcpy(ctx->key.des3, key, keylen);
- return 0;
-}
-
-static inline int skcipher_descriptor_len(int nbytes, unsigned int block_size)
-{
- int this_len = nbytes;
-
- this_len -= (nbytes & (block_size - 1));
- return this_len > (1 << 16) ? (1 << 16) : this_len;
-}
-
-static int __n2_crypt_chunk(struct crypto_skcipher *skcipher,
- struct n2_crypto_chunk *cp,
- struct spu_queue *qp, bool encrypt)
-{
- struct n2_skcipher_context *ctx = crypto_skcipher_ctx(skcipher);
- struct cwq_initial_entry *ent;
- bool in_place;
- int i;
-
- ent = spu_queue_alloc(qp, cp->arr_len);
- if (!ent) {
- pr_info("queue_alloc() of %d fails\n",
- cp->arr_len);
- return -EBUSY;
- }
-
- in_place = (cp->dest_paddr == cp->arr[0].src_paddr);
-
- ent->control = control_word_base(cp->arr[0].src_len,
- 0, ctx->enc_type, 0, 0,
- false, true, false, encrypt,
- OPCODE_ENCRYPT |
- (in_place ? OPCODE_INPLACE_BIT : 0));
- ent->src_addr = cp->arr[0].src_paddr;
- ent->auth_key_addr = 0UL;
- ent->auth_iv_addr = 0UL;
- ent->final_auth_state_addr = 0UL;
- ent->enc_key_addr = __pa(&ctx->key);
- ent->enc_iv_addr = cp->iv_paddr;
- ent->dest_addr = (in_place ? 0UL : cp->dest_paddr);
-
- for (i = 1; i < cp->arr_len; i++) {
- ent = spu_queue_next(qp, ent);
-
- ent->control = cp->arr[i].src_len - 1;
- ent->src_addr = cp->arr[i].src_paddr;
- ent->auth_key_addr = 0UL;
- ent->auth_iv_addr = 0UL;
- ent->final_auth_state_addr = 0UL;
- ent->enc_key_addr = 0UL;
- ent->enc_iv_addr = 0UL;
- ent->dest_addr = 0UL;
- }
- ent->control |= CONTROL_END_OF_BLOCK;
-
- return (spu_queue_submit(qp, ent) != HV_EOK) ? -EINVAL : 0;
-}
-
-static int n2_compute_chunks(struct skcipher_request *req)
-{
- struct n2_request_context *rctx = skcipher_request_ctx(req);
- struct skcipher_walk *walk = &rctx->walk;
- struct n2_crypto_chunk *chunk;
- unsigned long dest_prev;
- unsigned int tot_len;
- bool prev_in_place;
- int err, nbytes;
-
- err = skcipher_walk_async(walk, req);
- if (err)
- return err;
-
- INIT_LIST_HEAD(&rctx->chunk_list);
-
- chunk = &rctx->chunk;
- INIT_LIST_HEAD(&chunk->entry);
-
- chunk->iv_paddr = 0UL;
- chunk->arr_len = 0;
- chunk->dest_paddr = 0UL;
-
- prev_in_place = false;
- dest_prev = ~0UL;
- tot_len = 0;
-
- while ((nbytes = walk->nbytes) != 0) {
- unsigned long dest_paddr, src_paddr;
- bool in_place;
- int this_len;
-
- src_paddr = (page_to_phys(walk->src.phys.page) +
- walk->src.phys.offset);
- dest_paddr = (page_to_phys(walk->dst.phys.page) +
- walk->dst.phys.offset);
- in_place = (src_paddr == dest_paddr);
- this_len = skcipher_descriptor_len(nbytes, walk->blocksize);
-
- if (chunk->arr_len != 0) {
- if (in_place != prev_in_place ||
- (!prev_in_place &&
- dest_paddr != dest_prev) ||
- chunk->arr_len == N2_CHUNK_ARR_LEN ||
- tot_len + this_len > (1 << 16)) {
- chunk->dest_final = dest_prev;
- list_add_tail(&chunk->entry,
- &rctx->chunk_list);
- chunk = kzalloc(sizeof(*chunk), GFP_ATOMIC);
- if (!chunk) {
- err = -ENOMEM;
- break;
- }
- INIT_LIST_HEAD(&chunk->entry);
- }
- }
- if (chunk->arr_len == 0) {
- chunk->dest_paddr = dest_paddr;
- tot_len = 0;
- }
- chunk->arr[chunk->arr_len].src_paddr = src_paddr;
- chunk->arr[chunk->arr_len].src_len = this_len;
- chunk->arr_len++;
-
- dest_prev = dest_paddr + this_len;
- prev_in_place = in_place;
- tot_len += this_len;
-
- err = skcipher_walk_done(walk, nbytes - this_len);
- if (err)
- break;
- }
- if (!err && chunk->arr_len != 0) {
- chunk->dest_final = dest_prev;
- list_add_tail(&chunk->entry, &rctx->chunk_list);
- }
-
- return err;
-}
-
-static void n2_chunk_complete(struct skcipher_request *req, void *final_iv)
-{
- struct n2_request_context *rctx = skcipher_request_ctx(req);
- struct n2_crypto_chunk *c, *tmp;
-
- if (final_iv)
- memcpy(rctx->walk.iv, final_iv, rctx->walk.blocksize);
-
- list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
- list_del(&c->entry);
- if (unlikely(c != &rctx->chunk))
- kfree(c);
- }
-
-}
-
-static int n2_do_ecb(struct skcipher_request *req, bool encrypt)
-{
- struct n2_request_context *rctx = skcipher_request_ctx(req);
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- int err = n2_compute_chunks(req);
- struct n2_crypto_chunk *c, *tmp;
- unsigned long flags, hv_ret;
- struct spu_queue *qp;
-
- if (err)
- return err;
-
- qp = cpu_to_cwq[get_cpu()];
- err = -ENODEV;
- if (!qp)
- goto out;
-
- spin_lock_irqsave(&qp->lock, flags);
-
- list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
- err = __n2_crypt_chunk(tfm, c, qp, encrypt);
- if (err)
- break;
- list_del(&c->entry);
- if (unlikely(c != &rctx->chunk))
- kfree(c);
- }
- if (!err) {
- hv_ret = wait_for_tail(qp);
- if (hv_ret != HV_EOK)
- err = -EINVAL;
- }
-
- spin_unlock_irqrestore(&qp->lock, flags);
-
-out:
- put_cpu();
-
- n2_chunk_complete(req, NULL);
- return err;
-}
-
-static int n2_encrypt_ecb(struct skcipher_request *req)
-{
- return n2_do_ecb(req, true);
-}
-
-static int n2_decrypt_ecb(struct skcipher_request *req)
-{
- return n2_do_ecb(req, false);
-}
-
-static int n2_do_chaining(struct skcipher_request *req, bool encrypt)
-{
- struct n2_request_context *rctx = skcipher_request_ctx(req);
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- unsigned long flags, hv_ret, iv_paddr;
- int err = n2_compute_chunks(req);
- struct n2_crypto_chunk *c, *tmp;
- struct spu_queue *qp;
- void *final_iv_addr;
-
- final_iv_addr = NULL;
-
- if (err)
- return err;
-
- qp = cpu_to_cwq[get_cpu()];
- err = -ENODEV;
- if (!qp)
- goto out;
-
- spin_lock_irqsave(&qp->lock, flags);
-
- if (encrypt) {
- iv_paddr = __pa(rctx->walk.iv);
- list_for_each_entry_safe(c, tmp, &rctx->chunk_list,
- entry) {
- c->iv_paddr = iv_paddr;
- err = __n2_crypt_chunk(tfm, c, qp, true);
- if (err)
- break;
- iv_paddr = c->dest_final - rctx->walk.blocksize;
- list_del(&c->entry);
- if (unlikely(c != &rctx->chunk))
- kfree(c);
- }
- final_iv_addr = __va(iv_paddr);
- } else {
- list_for_each_entry_safe_reverse(c, tmp, &rctx->chunk_list,
- entry) {
- if (c == &rctx->chunk) {
- iv_paddr = __pa(rctx->walk.iv);
- } else {
- iv_paddr = (tmp->arr[tmp->arr_len-1].src_paddr +
- tmp->arr[tmp->arr_len-1].src_len -
- rctx->walk.blocksize);
- }
- if (!final_iv_addr) {
- unsigned long pa;
-
- pa = (c->arr[c->arr_len-1].src_paddr +
- c->arr[c->arr_len-1].src_len -
- rctx->walk.blocksize);
- final_iv_addr = rctx->temp_iv;
- memcpy(rctx->temp_iv, __va(pa),
- rctx->walk.blocksize);
- }
- c->iv_paddr = iv_paddr;
- err = __n2_crypt_chunk(tfm, c, qp, false);
- if (err)
- break;
- list_del(&c->entry);
- if (unlikely(c != &rctx->chunk))
- kfree(c);
- }
- }
- if (!err) {
- hv_ret = wait_for_tail(qp);
- if (hv_ret != HV_EOK)
- err = -EINVAL;
- }
-
- spin_unlock_irqrestore(&qp->lock, flags);
-
-out:
- put_cpu();
-
- n2_chunk_complete(req, err ? NULL : final_iv_addr);
- return err;
-}
-
-static int n2_encrypt_chaining(struct skcipher_request *req)
-{
- return n2_do_chaining(req, true);
-}
-
-static int n2_decrypt_chaining(struct skcipher_request *req)
-{
- return n2_do_chaining(req, false);
-}
-
-struct n2_skcipher_tmpl {
- const char *name;
- const char *drv_name;
- u8 block_size;
- u8 enc_type;
- struct skcipher_alg skcipher;
-};
-
-static const struct n2_skcipher_tmpl skcipher_tmpls[] = {
- /* DES: ECB CBC and CFB are supported */
- { .name = "ecb(des)",
- .drv_name = "ecb-des",
- .block_size = DES_BLOCK_SIZE,
- .enc_type = (ENC_TYPE_ALG_DES |
- ENC_TYPE_CHAINING_ECB),
- .skcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .setkey = n2_des_setkey,
- .encrypt = n2_encrypt_ecb,
- .decrypt = n2_decrypt_ecb,
- },
- },
- { .name = "cbc(des)",
- .drv_name = "cbc-des",
- .block_size = DES_BLOCK_SIZE,
- .enc_type = (ENC_TYPE_ALG_DES |
- ENC_TYPE_CHAINING_CBC),
- .skcipher = {
- .ivsize = DES_BLOCK_SIZE,
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .setkey = n2_des_setkey,
- .encrypt = n2_encrypt_chaining,
- .decrypt = n2_decrypt_chaining,
- },
- },
-
- /* 3DES: ECB CBC and CFB are supported */
- { .name = "ecb(des3_ede)",
- .drv_name = "ecb-3des",
- .block_size = DES_BLOCK_SIZE,
- .enc_type = (ENC_TYPE_ALG_3DES |
- ENC_TYPE_CHAINING_ECB),
- .skcipher = {
- .min_keysize = 3 * DES_KEY_SIZE,
- .max_keysize = 3 * DES_KEY_SIZE,
- .setkey = n2_3des_setkey,
- .encrypt = n2_encrypt_ecb,
- .decrypt = n2_decrypt_ecb,
- },
- },
- { .name = "cbc(des3_ede)",
- .drv_name = "cbc-3des",
- .block_size = DES_BLOCK_SIZE,
- .enc_type = (ENC_TYPE_ALG_3DES |
- ENC_TYPE_CHAINING_CBC),
- .skcipher = {
- .ivsize = DES_BLOCK_SIZE,
- .min_keysize = 3 * DES_KEY_SIZE,
- .max_keysize = 3 * DES_KEY_SIZE,
- .setkey = n2_3des_setkey,
- .encrypt = n2_encrypt_chaining,
- .decrypt = n2_decrypt_chaining,
- },
- },
-
- /* AES: ECB CBC and CTR are supported */
- { .name = "ecb(aes)",
- .drv_name = "ecb-aes",
- .block_size = AES_BLOCK_SIZE,
- .enc_type = (ENC_TYPE_ALG_AES128 |
- ENC_TYPE_CHAINING_ECB),
- .skcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = n2_aes_setkey,
- .encrypt = n2_encrypt_ecb,
- .decrypt = n2_decrypt_ecb,
- },
- },
- { .name = "cbc(aes)",
- .drv_name = "cbc-aes",
- .block_size = AES_BLOCK_SIZE,
- .enc_type = (ENC_TYPE_ALG_AES128 |
- ENC_TYPE_CHAINING_CBC),
- .skcipher = {
- .ivsize = AES_BLOCK_SIZE,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = n2_aes_setkey,
- .encrypt = n2_encrypt_chaining,
- .decrypt = n2_decrypt_chaining,
- },
- },
- { .name = "ctr(aes)",
- .drv_name = "ctr-aes",
- .block_size = AES_BLOCK_SIZE,
- .enc_type = (ENC_TYPE_ALG_AES128 |
- ENC_TYPE_CHAINING_COUNTER),
- .skcipher = {
- .ivsize = AES_BLOCK_SIZE,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = n2_aes_setkey,
- .encrypt = n2_encrypt_chaining,
- .decrypt = n2_encrypt_chaining,
- },
- },
-
-};
-#define NUM_CIPHER_TMPLS ARRAY_SIZE(skcipher_tmpls)
-
-static LIST_HEAD(skcipher_algs);
-
-struct n2_hash_tmpl {
- const char *name;
- const u8 *hash_zero;
- const u8 *hash_init;
- u8 hw_op_hashsz;
- u8 digest_size;
- u8 statesize;
- u8 block_size;
- u8 auth_type;
- u8 hmac_type;
-};
-
-static const __le32 n2_md5_init[MD5_HASH_WORDS] = {
- cpu_to_le32(MD5_H0),
- cpu_to_le32(MD5_H1),
- cpu_to_le32(MD5_H2),
- cpu_to_le32(MD5_H3),
-};
-static const u32 n2_sha1_init[SHA1_DIGEST_SIZE / 4] = {
- SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4,
-};
-static const u32 n2_sha256_init[SHA256_DIGEST_SIZE / 4] = {
- SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
- SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7,
-};
-static const u32 n2_sha224_init[SHA256_DIGEST_SIZE / 4] = {
- SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3,
- SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7,
-};
-
-static const struct n2_hash_tmpl hash_tmpls[] = {
- { .name = "md5",
- .hash_zero = md5_zero_message_hash,
- .hash_init = (u8 *)n2_md5_init,
- .auth_type = AUTH_TYPE_MD5,
- .hmac_type = AUTH_TYPE_HMAC_MD5,
- .hw_op_hashsz = MD5_DIGEST_SIZE,
- .digest_size = MD5_DIGEST_SIZE,
- .statesize = sizeof(struct md5_state),
- .block_size = MD5_HMAC_BLOCK_SIZE },
- { .name = "sha1",
- .hash_zero = sha1_zero_message_hash,
- .hash_init = (u8 *)n2_sha1_init,
- .auth_type = AUTH_TYPE_SHA1,
- .hmac_type = AUTH_TYPE_HMAC_SHA1,
- .hw_op_hashsz = SHA1_DIGEST_SIZE,
- .digest_size = SHA1_DIGEST_SIZE,
- .statesize = sizeof(struct sha1_state),
- .block_size = SHA1_BLOCK_SIZE },
- { .name = "sha256",
- .hash_zero = sha256_zero_message_hash,
- .hash_init = (u8 *)n2_sha256_init,
- .auth_type = AUTH_TYPE_SHA256,
- .hmac_type = AUTH_TYPE_HMAC_SHA256,
- .hw_op_hashsz = SHA256_DIGEST_SIZE,
- .digest_size = SHA256_DIGEST_SIZE,
- .statesize = sizeof(struct sha256_state),
- .block_size = SHA256_BLOCK_SIZE },
- { .name = "sha224",
- .hash_zero = sha224_zero_message_hash,
- .hash_init = (u8 *)n2_sha224_init,
- .auth_type = AUTH_TYPE_SHA256,
- .hmac_type = AUTH_TYPE_RESERVED,
- .hw_op_hashsz = SHA256_DIGEST_SIZE,
- .digest_size = SHA224_DIGEST_SIZE,
- .statesize = sizeof(struct sha256_state),
- .block_size = SHA224_BLOCK_SIZE },
-};
-#define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls)
-
-static LIST_HEAD(ahash_algs);
-static LIST_HEAD(hmac_algs);
-
-static int algs_registered;
-
-static void __n2_unregister_algs(void)
-{
- struct n2_skcipher_alg *skcipher, *skcipher_tmp;
- struct n2_ahash_alg *alg, *alg_tmp;
- struct n2_hmac_alg *hmac, *hmac_tmp;
-
- list_for_each_entry_safe(skcipher, skcipher_tmp, &skcipher_algs, entry) {
- crypto_unregister_skcipher(&skcipher->skcipher);
- list_del(&skcipher->entry);
- kfree(skcipher);
- }
- list_for_each_entry_safe(hmac, hmac_tmp, &hmac_algs, derived.entry) {
- crypto_unregister_ahash(&hmac->derived.alg);
- list_del(&hmac->derived.entry);
- kfree(hmac);
- }
- list_for_each_entry_safe(alg, alg_tmp, &ahash_algs, entry) {
- crypto_unregister_ahash(&alg->alg);
- list_del(&alg->entry);
- kfree(alg);
- }
-}
-
-static int n2_skcipher_init_tfm(struct crypto_skcipher *tfm)
-{
- crypto_skcipher_set_reqsize(tfm, sizeof(struct n2_request_context));
- return 0;
-}
-
-static int __n2_register_one_skcipher(const struct n2_skcipher_tmpl *tmpl)
-{
- struct n2_skcipher_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
- struct skcipher_alg *alg;
- int err;
-
- if (!p)
- return -ENOMEM;
-
- alg = &p->skcipher;
- *alg = tmpl->skcipher;
-
- snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
- snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->drv_name);
- alg->base.cra_priority = N2_CRA_PRIORITY;
- alg->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_ALLOCATES_MEMORY;
- alg->base.cra_blocksize = tmpl->block_size;
- p->enc_type = tmpl->enc_type;
- alg->base.cra_ctxsize = sizeof(struct n2_skcipher_context);
- alg->base.cra_module = THIS_MODULE;
- alg->init = n2_skcipher_init_tfm;
-
- list_add(&p->entry, &skcipher_algs);
- err = crypto_register_skcipher(alg);
- if (err) {
- pr_err("%s alg registration failed\n", alg->base.cra_name);
- list_del(&p->entry);
- kfree(p);
- } else {
- pr_info("%s alg registered\n", alg->base.cra_name);
- }
- return err;
-}
-
-static int __n2_register_one_hmac(struct n2_ahash_alg *n2ahash)
-{
- struct n2_hmac_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
- struct ahash_alg *ahash;
- struct crypto_alg *base;
- int err;
-
- if (!p)
- return -ENOMEM;
-
- p->child_alg = n2ahash->alg.halg.base.cra_name;
- memcpy(&p->derived, n2ahash, sizeof(struct n2_ahash_alg));
- INIT_LIST_HEAD(&p->derived.entry);
-
- ahash = &p->derived.alg;
- ahash->digest = n2_hmac_async_digest;
- ahash->setkey = n2_hmac_async_setkey;
-
- base = &ahash->halg.base;
- err = -EINVAL;
- if (snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
- p->child_alg) >= CRYPTO_MAX_ALG_NAME)
- goto out_free_p;
- if (snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s-n2",
- p->child_alg) >= CRYPTO_MAX_ALG_NAME)
- goto out_free_p;
-
- base->cra_ctxsize = sizeof(struct n2_hmac_ctx);
- base->cra_init = n2_hmac_cra_init;
- base->cra_exit = n2_hmac_cra_exit;
-
- list_add(&p->derived.entry, &hmac_algs);
- err = crypto_register_ahash(ahash);
- if (err) {
- pr_err("%s alg registration failed\n", base->cra_name);
- list_del(&p->derived.entry);
-out_free_p:
- kfree(p);
- } else {
- pr_info("%s alg registered\n", base->cra_name);
- }
- return err;
-}
-
-static int __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl)
-{
- struct n2_ahash_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
- struct hash_alg_common *halg;
- struct crypto_alg *base;
- struct ahash_alg *ahash;
- int err;
-
- if (!p)
- return -ENOMEM;
-
- p->hash_zero = tmpl->hash_zero;
- p->hash_init = tmpl->hash_init;
- p->auth_type = tmpl->auth_type;
- p->hmac_type = tmpl->hmac_type;
- p->hw_op_hashsz = tmpl->hw_op_hashsz;
- p->digest_size = tmpl->digest_size;
-
- ahash = &p->alg;
- ahash->init = n2_hash_async_init;
- ahash->update = n2_hash_async_update;
- ahash->final = n2_hash_async_final;
- ahash->finup = n2_hash_async_finup;
- ahash->digest = n2_hash_async_digest;
- ahash->export = n2_hash_async_noexport;
- ahash->import = n2_hash_async_noimport;
-
- halg = &ahash->halg;
- halg->digestsize = tmpl->digest_size;
- halg->statesize = tmpl->statesize;
-
- base = &halg->base;
- snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
- snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->name);
- base->cra_priority = N2_CRA_PRIORITY;
- base->cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_NEED_FALLBACK;
- base->cra_blocksize = tmpl->block_size;
- base->cra_ctxsize = sizeof(struct n2_hash_ctx);
- base->cra_module = THIS_MODULE;
- base->cra_init = n2_hash_cra_init;
- base->cra_exit = n2_hash_cra_exit;
-
- list_add(&p->entry, &ahash_algs);
- err = crypto_register_ahash(ahash);
- if (err) {
- pr_err("%s alg registration failed\n", base->cra_name);
- list_del(&p->entry);
- kfree(p);
- } else {
- pr_info("%s alg registered\n", base->cra_name);
- }
- if (!err && p->hmac_type != AUTH_TYPE_RESERVED)
- err = __n2_register_one_hmac(p);
- return err;
-}
-
-static int n2_register_algs(void)
-{
- int i, err = 0;
-
- mutex_lock(&spu_lock);
- if (algs_registered++)
- goto out;
-
- for (i = 0; i < NUM_HASH_TMPLS; i++) {
- err = __n2_register_one_ahash(&hash_tmpls[i]);
- if (err) {
- __n2_unregister_algs();
- goto out;
- }
- }
- for (i = 0; i < NUM_CIPHER_TMPLS; i++) {
- err = __n2_register_one_skcipher(&skcipher_tmpls[i]);
- if (err) {
- __n2_unregister_algs();
- goto out;
- }
- }
-
-out:
- mutex_unlock(&spu_lock);
- return err;
-}
-
-static void n2_unregister_algs(void)
-{
- mutex_lock(&spu_lock);
- if (!--algs_registered)
- __n2_unregister_algs();
- mutex_unlock(&spu_lock);
-}
-
-/* To map CWQ queues to interrupt sources, the hypervisor API provides
- * a devino. This isn't very useful to us because all of the
- * interrupts listed in the device_node have been translated to
- * Linux virtual IRQ cookie numbers.
- *
- * So we have to back-translate, going through the 'intr' and 'ino'
- * property tables of the n2cp MDESC node, matching it with the OF
- * 'interrupts' property entries, in order to figure out which
- * devino goes to which already-translated IRQ.
- */
-static int find_devino_index(struct platform_device *dev, struct spu_mdesc_info *ip,
- unsigned long dev_ino)
-{
- const unsigned int *dev_intrs;
- unsigned int intr;
- int i;
-
- for (i = 0; i < ip->num_intrs; i++) {
- if (ip->ino_table[i].ino == dev_ino)
- break;
- }
- if (i == ip->num_intrs)
- return -ENODEV;
-
- intr = ip->ino_table[i].intr;
-
- dev_intrs = of_get_property(dev->dev.of_node, "interrupts", NULL);
- if (!dev_intrs)
- return -ENODEV;
-
- for (i = 0; i < dev->archdata.num_irqs; i++) {
- if (dev_intrs[i] == intr)
- return i;
- }
-
- return -ENODEV;
-}
-
-static int spu_map_ino(struct platform_device *dev, struct spu_mdesc_info *ip,
- const char *irq_name, struct spu_queue *p,
- irq_handler_t handler)
-{
- unsigned long herr;
- int index;
-
- herr = sun4v_ncs_qhandle_to_devino(p->qhandle, &p->devino);
- if (herr)
- return -EINVAL;
-
- index = find_devino_index(dev, ip, p->devino);
- if (index < 0)
- return index;
-
- p->irq = dev->archdata.irqs[index];
-
- sprintf(p->irq_name, "%s-%d", irq_name, index);
-
- return request_irq(p->irq, handler, 0, p->irq_name, p);
-}
-
-static struct kmem_cache *queue_cache[2];
-
-static void *new_queue(unsigned long q_type)
-{
- return kmem_cache_zalloc(queue_cache[q_type - 1], GFP_KERNEL);
-}
-
-static void free_queue(void *p, unsigned long q_type)
-{
- kmem_cache_free(queue_cache[q_type - 1], p);
-}
-
-static int queue_cache_init(void)
-{
- if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
- queue_cache[HV_NCS_QTYPE_MAU - 1] =
- kmem_cache_create("mau_queue",
- (MAU_NUM_ENTRIES *
- MAU_ENTRY_SIZE),
- MAU_ENTRY_SIZE, 0, NULL);
- if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
- return -ENOMEM;
-
- if (!queue_cache[HV_NCS_QTYPE_CWQ - 1])
- queue_cache[HV_NCS_QTYPE_CWQ - 1] =
- kmem_cache_create("cwq_queue",
- (CWQ_NUM_ENTRIES *
- CWQ_ENTRY_SIZE),
- CWQ_ENTRY_SIZE, 0, NULL);
- if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) {
- kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
- queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
- return -ENOMEM;
- }
- return 0;
-}
-
-static void queue_cache_destroy(void)
-{
- kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
- kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]);
- queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
- queue_cache[HV_NCS_QTYPE_CWQ - 1] = NULL;
-}
-
-static long spu_queue_register_workfn(void *arg)
-{
- struct spu_qreg *qr = arg;
- struct spu_queue *p = qr->queue;
- unsigned long q_type = qr->type;
- unsigned long hv_ret;
-
- hv_ret = sun4v_ncs_qconf(q_type, __pa(p->q),
- CWQ_NUM_ENTRIES, &p->qhandle);
- if (!hv_ret)
- sun4v_ncs_sethead_marker(p->qhandle, 0);
-
- return hv_ret ? -EINVAL : 0;
-}
-
-static int spu_queue_register(struct spu_queue *p, unsigned long q_type)
-{
- int cpu = cpumask_any_and(&p->sharing, cpu_online_mask);
- struct spu_qreg qr = { .queue = p, .type = q_type };
-
- return work_on_cpu_safe(cpu, spu_queue_register_workfn, &qr);
-}
-
-static int spu_queue_setup(struct spu_queue *p)
-{
- int err;
-
- p->q = new_queue(p->q_type);
- if (!p->q)
- return -ENOMEM;
-
- err = spu_queue_register(p, p->q_type);
- if (err) {
- free_queue(p->q, p->q_type);
- p->q = NULL;
- }
-
- return err;
-}
-
-static void spu_queue_destroy(struct spu_queue *p)
-{
- unsigned long hv_ret;
-
- if (!p->q)
- return;
-
- hv_ret = sun4v_ncs_qconf(p->q_type, p->qhandle, 0, &p->qhandle);
-
- if (!hv_ret)
- free_queue(p->q, p->q_type);
-}
-
-static void spu_list_destroy(struct list_head *list)
-{
- struct spu_queue *p, *n;
-
- list_for_each_entry_safe(p, n, list, list) {
- int i;
-
- for (i = 0; i < NR_CPUS; i++) {
- if (cpu_to_cwq[i] == p)
- cpu_to_cwq[i] = NULL;
- }
-
- if (p->irq) {
- free_irq(p->irq, p);
- p->irq = 0;
- }
- spu_queue_destroy(p);
- list_del(&p->list);
- kfree(p);
- }
-}
-
-/* Walk the backward arcs of a CWQ 'exec-unit' node,
- * gathering cpu membership information.
- */
-static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc,
- struct platform_device *dev,
- u64 node, struct spu_queue *p,
- struct spu_queue **table)
-{
- u64 arc;
-
- mdesc_for_each_arc(arc, mdesc, node, MDESC_ARC_TYPE_BACK) {
- u64 tgt = mdesc_arc_target(mdesc, arc);
- const char *name = mdesc_node_name(mdesc, tgt);
- const u64 *id;
-
- if (strcmp(name, "cpu"))
- continue;
- id = mdesc_get_property(mdesc, tgt, "id", NULL);
- if (table[*id] != NULL) {
- dev_err(&dev->dev, "%pOF: SPU cpu slot already set.\n",
- dev->dev.of_node);
- return -EINVAL;
- }
- cpumask_set_cpu(*id, &p->sharing);
- table[*id] = p;
- }
- return 0;
-}
-
-/* Process an 'exec-unit' MDESC node of type 'cwq'. */
-static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list,
- struct platform_device *dev, struct mdesc_handle *mdesc,
- u64 node, const char *iname, unsigned long q_type,
- irq_handler_t handler, struct spu_queue **table)
-{
- struct spu_queue *p;
- int err;
-
- p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL);
- if (!p) {
- dev_err(&dev->dev, "%pOF: Could not allocate SPU queue.\n",
- dev->dev.of_node);
- return -ENOMEM;
- }
-
- cpumask_clear(&p->sharing);
- spin_lock_init(&p->lock);
- p->q_type = q_type;
- INIT_LIST_HEAD(&p->jobs);
- list_add(&p->list, list);
-
- err = spu_mdesc_walk_arcs(mdesc, dev, node, p, table);
- if (err)
- return err;
-
- err = spu_queue_setup(p);
- if (err)
- return err;
-
- return spu_map_ino(dev, ip, iname, p, handler);
-}
-
-static int spu_mdesc_scan(struct mdesc_handle *mdesc, struct platform_device *dev,
- struct spu_mdesc_info *ip, struct list_head *list,
- const char *exec_name, unsigned long q_type,
- irq_handler_t handler, struct spu_queue **table)
-{
- int err = 0;
- u64 node;
-
- mdesc_for_each_node_by_name(mdesc, node, "exec-unit") {
- const char *type;
-
- type = mdesc_get_property(mdesc, node, "type", NULL);
- if (!type || strcmp(type, exec_name))
- continue;
-
- err = handle_exec_unit(ip, list, dev, mdesc, node,
- exec_name, q_type, handler, table);
- if (err) {
- spu_list_destroy(list);
- break;
- }
- }
-
- return err;
-}
-
-static int get_irq_props(struct mdesc_handle *mdesc, u64 node,
- struct spu_mdesc_info *ip)
-{
- const u64 *ino;
- int ino_len;
- int i;
-
- ino = mdesc_get_property(mdesc, node, "ino", &ino_len);
- if (!ino) {
- printk("NO 'ino'\n");
- return -ENODEV;
- }
-
- ip->num_intrs = ino_len / sizeof(u64);
- ip->ino_table = kzalloc((sizeof(struct ino_blob) *
- ip->num_intrs),
- GFP_KERNEL);
- if (!ip->ino_table)
- return -ENOMEM;
-
- for (i = 0; i < ip->num_intrs; i++) {
- struct ino_blob *b = &ip->ino_table[i];
- b->intr = i + 1;
- b->ino = ino[i];
- }
-
- return 0;
-}
-
-static int grab_mdesc_irq_props(struct mdesc_handle *mdesc,
- struct platform_device *dev,
- struct spu_mdesc_info *ip,
- const char *node_name)
-{
- u64 node, reg;
-
- if (of_property_read_reg(dev->dev.of_node, 0, &reg, NULL) < 0)
- return -ENODEV;
-
- mdesc_for_each_node_by_name(mdesc, node, "virtual-device") {
- const char *name;
- const u64 *chdl;
-
- name = mdesc_get_property(mdesc, node, "name", NULL);
- if (!name || strcmp(name, node_name))
- continue;
- chdl = mdesc_get_property(mdesc, node, "cfg-handle", NULL);
- if (!chdl || (*chdl != reg))
- continue;
- ip->cfg_handle = *chdl;
- return get_irq_props(mdesc, node, ip);
- }
-
- return -ENODEV;
-}
-
-static unsigned long n2_spu_hvapi_major;
-static unsigned long n2_spu_hvapi_minor;
-
-static int n2_spu_hvapi_register(void)
-{
- int err;
-
- n2_spu_hvapi_major = 2;
- n2_spu_hvapi_minor = 0;
-
- err = sun4v_hvapi_register(HV_GRP_NCS,
- n2_spu_hvapi_major,
- &n2_spu_hvapi_minor);
-
- if (!err)
- pr_info("Registered NCS HVAPI version %lu.%lu\n",
- n2_spu_hvapi_major,
- n2_spu_hvapi_minor);
-
- return err;
-}
-
-static void n2_spu_hvapi_unregister(void)
-{
- sun4v_hvapi_unregister(HV_GRP_NCS);
-}
-
-static int global_ref;
-
-static int grab_global_resources(void)
-{
- int err = 0;
-
- mutex_lock(&spu_lock);
-
- if (global_ref++)
- goto out;
-
- err = n2_spu_hvapi_register();
- if (err)
- goto out;
-
- err = queue_cache_init();
- if (err)
- goto out_hvapi_release;
-
- err = -ENOMEM;
- cpu_to_cwq = kcalloc(NR_CPUS, sizeof(struct spu_queue *),
- GFP_KERNEL);
- if (!cpu_to_cwq)
- goto out_queue_cache_destroy;
-
- cpu_to_mau = kcalloc(NR_CPUS, sizeof(struct spu_queue *),
- GFP_KERNEL);
- if (!cpu_to_mau)
- goto out_free_cwq_table;
-
- err = 0;
-
-out:
- if (err)
- global_ref--;
- mutex_unlock(&spu_lock);
- return err;
-
-out_free_cwq_table:
- kfree(cpu_to_cwq);
- cpu_to_cwq = NULL;
-
-out_queue_cache_destroy:
- queue_cache_destroy();
-
-out_hvapi_release:
- n2_spu_hvapi_unregister();
- goto out;
-}
-
-static void release_global_resources(void)
-{
- mutex_lock(&spu_lock);
- if (!--global_ref) {
- kfree(cpu_to_cwq);
- cpu_to_cwq = NULL;
-
- kfree(cpu_to_mau);
- cpu_to_mau = NULL;
-
- queue_cache_destroy();
- n2_spu_hvapi_unregister();
- }
- mutex_unlock(&spu_lock);
-}
-
-static struct n2_crypto *alloc_n2cp(void)
-{
- struct n2_crypto *np = kzalloc(sizeof(struct n2_crypto), GFP_KERNEL);
-
- if (np)
- INIT_LIST_HEAD(&np->cwq_list);
-
- return np;
-}
-
-static void free_n2cp(struct n2_crypto *np)
-{
- kfree(np->cwq_info.ino_table);
- np->cwq_info.ino_table = NULL;
-
- kfree(np);
-}
-
-static void n2_spu_driver_version(void)
-{
- static int n2_spu_version_printed;
-
- if (n2_spu_version_printed++ == 0)
- pr_info("%s", version);
-}
-
-static int n2_crypto_probe(struct platform_device *dev)
-{
- struct mdesc_handle *mdesc;
- struct n2_crypto *np;
- int err;
-
- n2_spu_driver_version();
-
- pr_info("Found N2CP at %pOF\n", dev->dev.of_node);
-
- np = alloc_n2cp();
- if (!np) {
- dev_err(&dev->dev, "%pOF: Unable to allocate n2cp.\n",
- dev->dev.of_node);
- return -ENOMEM;
- }
-
- err = grab_global_resources();
- if (err) {
- dev_err(&dev->dev, "%pOF: Unable to grab global resources.\n",
- dev->dev.of_node);
- goto out_free_n2cp;
- }
-
- mdesc = mdesc_grab();
-
- if (!mdesc) {
- dev_err(&dev->dev, "%pOF: Unable to grab MDESC.\n",
- dev->dev.of_node);
- err = -ENODEV;
- goto out_free_global;
- }
- err = grab_mdesc_irq_props(mdesc, dev, &np->cwq_info, "n2cp");
- if (err) {
- dev_err(&dev->dev, "%pOF: Unable to grab IRQ props.\n",
- dev->dev.of_node);
- mdesc_release(mdesc);
- goto out_free_global;
- }
-
- err = spu_mdesc_scan(mdesc, dev, &np->cwq_info, &np->cwq_list,
- "cwq", HV_NCS_QTYPE_CWQ, cwq_intr,
- cpu_to_cwq);
- mdesc_release(mdesc);
-
- if (err) {
- dev_err(&dev->dev, "%pOF: CWQ MDESC scan failed.\n",
- dev->dev.of_node);
- goto out_free_global;
- }
-
- err = n2_register_algs();
- if (err) {
- dev_err(&dev->dev, "%pOF: Unable to register algorithms.\n",
- dev->dev.of_node);
- goto out_free_spu_list;
- }
-
- dev_set_drvdata(&dev->dev, np);
-
- return 0;
-
-out_free_spu_list:
- spu_list_destroy(&np->cwq_list);
-
-out_free_global:
- release_global_resources();
-
-out_free_n2cp:
- free_n2cp(np);
-
- return err;
-}
-
-static void n2_crypto_remove(struct platform_device *dev)
-{
- struct n2_crypto *np = dev_get_drvdata(&dev->dev);
-
- n2_unregister_algs();
-
- spu_list_destroy(&np->cwq_list);
-
- release_global_resources();
-
- free_n2cp(np);
-}
-
-static struct n2_mau *alloc_ncp(void)
-{
- struct n2_mau *mp = kzalloc(sizeof(struct n2_mau), GFP_KERNEL);
-
- if (mp)
- INIT_LIST_HEAD(&mp->mau_list);
-
- return mp;
-}
-
-static void free_ncp(struct n2_mau *mp)
-{
- kfree(mp->mau_info.ino_table);
- mp->mau_info.ino_table = NULL;
-
- kfree(mp);
-}
-
-static int n2_mau_probe(struct platform_device *dev)
-{
- struct mdesc_handle *mdesc;
- struct n2_mau *mp;
- int err;
-
- n2_spu_driver_version();
-
- pr_info("Found NCP at %pOF\n", dev->dev.of_node);
-
- mp = alloc_ncp();
- if (!mp) {
- dev_err(&dev->dev, "%pOF: Unable to allocate ncp.\n",
- dev->dev.of_node);
- return -ENOMEM;
- }
-
- err = grab_global_resources();
- if (err) {
- dev_err(&dev->dev, "%pOF: Unable to grab global resources.\n",
- dev->dev.of_node);
- goto out_free_ncp;
- }
-
- mdesc = mdesc_grab();
-
- if (!mdesc) {
- dev_err(&dev->dev, "%pOF: Unable to grab MDESC.\n",
- dev->dev.of_node);
- err = -ENODEV;
- goto out_free_global;
- }
-
- err = grab_mdesc_irq_props(mdesc, dev, &mp->mau_info, "ncp");
- if (err) {
- dev_err(&dev->dev, "%pOF: Unable to grab IRQ props.\n",
- dev->dev.of_node);
- mdesc_release(mdesc);
- goto out_free_global;
- }
-
- err = spu_mdesc_scan(mdesc, dev, &mp->mau_info, &mp->mau_list,
- "mau", HV_NCS_QTYPE_MAU, mau_intr,
- cpu_to_mau);
- mdesc_release(mdesc);
-
- if (err) {
- dev_err(&dev->dev, "%pOF: MAU MDESC scan failed.\n",
- dev->dev.of_node);
- goto out_free_global;
- }
-
- dev_set_drvdata(&dev->dev, mp);
-
- return 0;
-
-out_free_global:
- release_global_resources();
-
-out_free_ncp:
- free_ncp(mp);
-
- return err;
-}
-
-static void n2_mau_remove(struct platform_device *dev)
-{
- struct n2_mau *mp = dev_get_drvdata(&dev->dev);
-
- spu_list_destroy(&mp->mau_list);
-
- release_global_resources();
-
- free_ncp(mp);
-}
-
-static const struct of_device_id n2_crypto_match[] = {
- {
- .name = "n2cp",
- .compatible = "SUNW,n2-cwq",
- },
- {
- .name = "n2cp",
- .compatible = "SUNW,vf-cwq",
- },
- {
- .name = "n2cp",
- .compatible = "SUNW,kt-cwq",
- },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, n2_crypto_match);
-
-static struct platform_driver n2_crypto_driver = {
- .driver = {
- .name = "n2cp",
- .of_match_table = n2_crypto_match,
- },
- .probe = n2_crypto_probe,
- .remove_new = n2_crypto_remove,
-};
-
-static const struct of_device_id n2_mau_match[] = {
- {
- .name = "ncp",
- .compatible = "SUNW,n2-mau",
- },
- {
- .name = "ncp",
- .compatible = "SUNW,vf-mau",
- },
- {
- .name = "ncp",
- .compatible = "SUNW,kt-mau",
- },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, n2_mau_match);
-
-static struct platform_driver n2_mau_driver = {
- .driver = {
- .name = "ncp",
- .of_match_table = n2_mau_match,
- },
- .probe = n2_mau_probe,
- .remove_new = n2_mau_remove,
-};
-
-static struct platform_driver * const drivers[] = {
- &n2_crypto_driver,
- &n2_mau_driver,
-};
-
-static int __init n2_init(void)
-{
- return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
-}
-
-static void __exit n2_exit(void)
-{
- platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
-}
-
-module_init(n2_init);
-module_exit(n2_exit);
diff --git a/drivers/crypto/n2_core.h b/drivers/crypto/n2_core.h
deleted file mode 100644
index 2406763b0306..000000000000
--- a/drivers/crypto/n2_core.h
+++ /dev/null
@@ -1,232 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _N2_CORE_H
-#define _N2_CORE_H
-
-#ifndef __ASSEMBLY__
-
-struct ino_blob {
- u64 intr;
- u64 ino;
-};
-
-struct spu_mdesc_info {
- u64 cfg_handle;
- struct ino_blob *ino_table;
- int num_intrs;
-};
-
-struct n2_crypto {
- struct spu_mdesc_info cwq_info;
- struct list_head cwq_list;
-};
-
-struct n2_mau {
- struct spu_mdesc_info mau_info;
- struct list_head mau_list;
-};
-
-#define CWQ_ENTRY_SIZE 64
-#define CWQ_NUM_ENTRIES 64
-
-#define MAU_ENTRY_SIZE 64
-#define MAU_NUM_ENTRIES 64
-
-struct cwq_initial_entry {
- u64 control;
- u64 src_addr;
- u64 auth_key_addr;
- u64 auth_iv_addr;
- u64 final_auth_state_addr;
- u64 enc_key_addr;
- u64 enc_iv_addr;
- u64 dest_addr;
-};
-
-struct cwq_ext_entry {
- u64 len;
- u64 src_addr;
- u64 resv1;
- u64 resv2;
- u64 resv3;
- u64 resv4;
- u64 resv5;
- u64 resv6;
-};
-
-struct cwq_final_entry {
- u64 control;
- u64 src_addr;
- u64 resv1;
- u64 resv2;
- u64 resv3;
- u64 resv4;
- u64 resv5;
- u64 resv6;
-};
-
-#define CONTROL_LEN 0x000000000000ffffULL
-#define CONTROL_LEN_SHIFT 0
-#define CONTROL_HMAC_KEY_LEN 0x0000000000ff0000ULL
-#define CONTROL_HMAC_KEY_LEN_SHIFT 16
-#define CONTROL_ENC_TYPE 0x00000000ff000000ULL
-#define CONTROL_ENC_TYPE_SHIFT 24
-#define ENC_TYPE_ALG_RC4_STREAM 0x00ULL
-#define ENC_TYPE_ALG_RC4_NOSTREAM 0x04ULL
-#define ENC_TYPE_ALG_DES 0x08ULL
-#define ENC_TYPE_ALG_3DES 0x0cULL
-#define ENC_TYPE_ALG_AES128 0x10ULL
-#define ENC_TYPE_ALG_AES192 0x14ULL
-#define ENC_TYPE_ALG_AES256 0x18ULL
-#define ENC_TYPE_ALG_RESERVED 0x1cULL
-#define ENC_TYPE_ALG_MASK 0x1cULL
-#define ENC_TYPE_CHAINING_ECB 0x00ULL
-#define ENC_TYPE_CHAINING_CBC 0x01ULL
-#define ENC_TYPE_CHAINING_CFB 0x02ULL
-#define ENC_TYPE_CHAINING_COUNTER 0x03ULL
-#define ENC_TYPE_CHAINING_MASK 0x03ULL
-#define CONTROL_AUTH_TYPE 0x0000001f00000000ULL
-#define CONTROL_AUTH_TYPE_SHIFT 32
-#define AUTH_TYPE_RESERVED 0x00ULL
-#define AUTH_TYPE_MD5 0x01ULL
-#define AUTH_TYPE_SHA1 0x02ULL
-#define AUTH_TYPE_SHA256 0x03ULL
-#define AUTH_TYPE_CRC32 0x04ULL
-#define AUTH_TYPE_HMAC_MD5 0x05ULL
-#define AUTH_TYPE_HMAC_SHA1 0x06ULL
-#define AUTH_TYPE_HMAC_SHA256 0x07ULL
-#define AUTH_TYPE_TCP_CHECKSUM 0x08ULL
-#define AUTH_TYPE_SSL_HMAC_MD5 0x09ULL
-#define AUTH_TYPE_SSL_HMAC_SHA1 0x0aULL
-#define AUTH_TYPE_SSL_HMAC_SHA256 0x0bULL
-#define CONTROL_STRAND 0x000000e000000000ULL
-#define CONTROL_STRAND_SHIFT 37
-#define CONTROL_HASH_LEN 0x0000ff0000000000ULL
-#define CONTROL_HASH_LEN_SHIFT 40
-#define CONTROL_INTERRUPT 0x0001000000000000ULL
-#define CONTROL_STORE_FINAL_AUTH_STATE 0x0002000000000000ULL
-#define CONTROL_RESERVED 0x001c000000000000ULL
-#define CONTROL_HV_DONE 0x0004000000000000ULL
-#define CONTROL_HV_PROTOCOL_ERROR 0x0008000000000000ULL
-#define CONTROL_HV_HARDWARE_ERROR 0x0010000000000000ULL
-#define CONTROL_END_OF_BLOCK 0x0020000000000000ULL
-#define CONTROL_START_OF_BLOCK 0x0040000000000000ULL
-#define CONTROL_ENCRYPT 0x0080000000000000ULL
-#define CONTROL_OPCODE 0xff00000000000000ULL
-#define CONTROL_OPCODE_SHIFT 56
-#define OPCODE_INPLACE_BIT 0x80ULL
-#define OPCODE_SSL_KEYBLOCK 0x10ULL
-#define OPCODE_COPY 0x20ULL
-#define OPCODE_ENCRYPT 0x40ULL
-#define OPCODE_AUTH_MAC 0x41ULL
-
-#endif /* !(__ASSEMBLY__) */
-
-/* NCS v2.0 hypervisor interfaces */
-#define HV_NCS_QTYPE_MAU 0x01
-#define HV_NCS_QTYPE_CWQ 0x02
-
-/* ncs_qconf()
- * TRAP: HV_FAST_TRAP
- * FUNCTION: HV_FAST_NCS_QCONF
- * ARG0: Queue type (HV_NCS_QTYPE_{MAU,CWQ})
- * ARG1: Real address of queue, or handle for unconfigure
- * ARG2: Number of entries in queue, zero for unconfigure
- * RET0: status
- * RET1: queue handle
- *
- * Configure a queue in the stream processing unit.
- *
- * The real address given as the base must be 64-byte
- * aligned.
- *
- * The queue size can range from a minimum of 2 to a maximum
- * of 64. The queue size must be a power of two.
- *
- * To unconfigure a queue, specify a length of zero and place
- * the queue handle into ARG1.
- *
- * On configure success the hypervisor will set the FIRST, HEAD,
- * and TAIL registers to the address of the first entry in the
- * queue. The LAST register will be set to point to the last
- * entry in the queue.
- */
-#define HV_FAST_NCS_QCONF 0x111
-
-/* ncs_qinfo()
- * TRAP: HV_FAST_TRAP
- * FUNCTION: HV_FAST_NCS_QINFO
- * ARG0: Queue handle
- * RET0: status
- * RET1: Queue type (HV_NCS_QTYPE_{MAU,CWQ})
- * RET2: Queue base address
- * RET3: Number of entries
- */
-#define HV_FAST_NCS_QINFO 0x112
-
-/* ncs_gethead()
- * TRAP: HV_FAST_TRAP
- * FUNCTION: HV_FAST_NCS_GETHEAD
- * ARG0: Queue handle
- * RET0: status
- * RET1: queue head offset
- */
-#define HV_FAST_NCS_GETHEAD 0x113
-
-/* ncs_gettail()
- * TRAP: HV_FAST_TRAP
- * FUNCTION: HV_FAST_NCS_GETTAIL
- * ARG0: Queue handle
- * RET0: status
- * RET1: queue tail offset
- */
-#define HV_FAST_NCS_GETTAIL 0x114
-
-/* ncs_settail()
- * TRAP: HV_FAST_TRAP
- * FUNCTION: HV_FAST_NCS_SETTAIL
- * ARG0: Queue handle
- * ARG1: New tail offset
- * RET0: status
- */
-#define HV_FAST_NCS_SETTAIL 0x115
-
-/* ncs_qhandle_to_devino()
- * TRAP: HV_FAST_TRAP
- * FUNCTION: HV_FAST_NCS_QHANDLE_TO_DEVINO
- * ARG0: Queue handle
- * RET0: status
- * RET1: devino
- */
-#define HV_FAST_NCS_QHANDLE_TO_DEVINO 0x116
-
-/* ncs_sethead_marker()
- * TRAP: HV_FAST_TRAP
- * FUNCTION: HV_FAST_NCS_SETHEAD_MARKER
- * ARG0: Queue handle
- * ARG1: New head offset
- * RET0: status
- */
-#define HV_FAST_NCS_SETHEAD_MARKER 0x117
-
-#ifndef __ASSEMBLY__
-extern unsigned long sun4v_ncs_qconf(unsigned long queue_type,
- unsigned long queue_ra,
- unsigned long num_entries,
- unsigned long *qhandle);
-extern unsigned long sun4v_ncs_qinfo(unsigned long qhandle,
- unsigned long *queue_type,
- unsigned long *queue_ra,
- unsigned long *num_entries);
-extern unsigned long sun4v_ncs_gethead(unsigned long qhandle,
- unsigned long *head);
-extern unsigned long sun4v_ncs_gettail(unsigned long qhandle,
- unsigned long *tail);
-extern unsigned long sun4v_ncs_settail(unsigned long qhandle,
- unsigned long tail);
-extern unsigned long sun4v_ncs_qhandle_to_devino(unsigned long qhandle,
- unsigned long *devino);
-extern unsigned long sun4v_ncs_sethead_marker(unsigned long qhandle,
- unsigned long head);
-#endif /* !(__ASSEMBLY__) */
-
-#endif /* _N2_CORE_H */
diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c
index 82214cde2bcd..b950fcce8a9b 100644
--- a/drivers/crypto/nx/nx-842.c
+++ b/drivers/crypto/nx/nx-842.c
@@ -101,9 +101,13 @@ static int update_param(struct nx842_crypto_param *p,
return 0;
}
-int nx842_crypto_init(struct crypto_tfm *tfm, struct nx842_driver *driver)
+void *nx842_crypto_alloc_ctx(struct nx842_driver *driver)
{
- struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct nx842_crypto_ctx *ctx;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
spin_lock_init(&ctx->lock);
ctx->driver = driver;
@@ -114,22 +118,23 @@ int nx842_crypto_init(struct crypto_tfm *tfm, struct nx842_driver *driver)
kfree(ctx->wmem);
free_page((unsigned long)ctx->sbounce);
free_page((unsigned long)ctx->dbounce);
- return -ENOMEM;
+ kfree(ctx);
+ return ERR_PTR(-ENOMEM);
}
- return 0;
+ return ctx;
}
-EXPORT_SYMBOL_GPL(nx842_crypto_init);
+EXPORT_SYMBOL_GPL(nx842_crypto_alloc_ctx);
-void nx842_crypto_exit(struct crypto_tfm *tfm)
+void nx842_crypto_free_ctx(void *p)
{
- struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct nx842_crypto_ctx *ctx = p;
kfree(ctx->wmem);
free_page((unsigned long)ctx->sbounce);
free_page((unsigned long)ctx->dbounce);
}
-EXPORT_SYMBOL_GPL(nx842_crypto_exit);
+EXPORT_SYMBOL_GPL(nx842_crypto_free_ctx);
static void check_constraints(struct nx842_constraints *c)
{
@@ -246,11 +251,11 @@ nospc:
return update_param(p, slen, dskip + dlen);
}
-int nx842_crypto_compress(struct crypto_tfm *tfm,
+int nx842_crypto_compress(struct crypto_scomp *tfm,
const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen)
+ u8 *dst, unsigned int *dlen, void *pctx)
{
- struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct nx842_crypto_ctx *ctx = pctx;
struct nx842_crypto_header *hdr =
container_of(&ctx->header,
struct nx842_crypto_header, hdr);
@@ -431,11 +436,11 @@ usesw:
return update_param(p, slen + padding, dlen);
}
-int nx842_crypto_decompress(struct crypto_tfm *tfm,
+int nx842_crypto_decompress(struct crypto_scomp *tfm,
const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen)
+ u8 *dst, unsigned int *dlen, void *pctx)
{
- struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct nx842_crypto_ctx *ctx = pctx;
struct nx842_crypto_header *hdr;
struct nx842_crypto_param p;
struct nx842_constraints c = *ctx->driver->constraints;
diff --git a/drivers/crypto/nx/nx-842.h b/drivers/crypto/nx/nx-842.h
index 887d4ce3cb49..f5e2c82ba876 100644
--- a/drivers/crypto/nx/nx-842.h
+++ b/drivers/crypto/nx/nx-842.h
@@ -3,7 +3,6 @@
#ifndef __NX_842_H__
#define __NX_842_H__
-#include <crypto/algapi.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -101,6 +100,8 @@
#define LEN_ON_SIZE(pa, size) ((size) - ((pa) & ((size) - 1)))
#define LEN_ON_PAGE(pa) LEN_ON_SIZE(pa, PAGE_SIZE)
+struct crypto_scomp;
+
static inline unsigned long nx842_get_pa(void *addr)
{
if (!is_vmalloc_addr(addr))
@@ -182,13 +183,13 @@ struct nx842_crypto_ctx {
struct nx842_driver *driver;
};
-int nx842_crypto_init(struct crypto_tfm *tfm, struct nx842_driver *driver);
-void nx842_crypto_exit(struct crypto_tfm *tfm);
-int nx842_crypto_compress(struct crypto_tfm *tfm,
+void *nx842_crypto_alloc_ctx(struct nx842_driver *driver);
+void nx842_crypto_free_ctx(void *ctx);
+int nx842_crypto_compress(struct crypto_scomp *tfm,
const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen);
-int nx842_crypto_decompress(struct crypto_tfm *tfm,
+ u8 *dst, unsigned int *dlen, void *ctx);
+int nx842_crypto_decompress(struct crypto_scomp *tfm,
const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen);
+ u8 *dst, unsigned int *dlen, void *ctx);
#endif /* __NX_842_H__ */
diff --git a/drivers/crypto/nx/nx-aes-cbc.c b/drivers/crypto/nx/nx-aes-cbc.c
index 0e440f704a8f..35fa5bad1d9f 100644
--- a/drivers/crypto/nx/nx-aes-cbc.c
+++ b/drivers/crypto/nx/nx-aes-cbc.c
@@ -8,10 +8,12 @@
*/
#include <crypto/aes.h>
-#include <crypto/algapi.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/crypto.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
#include <asm/vio.h>
#include "nx_csbcpb.h"
diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c
index c843f4c6f684..56a0b3a67c33 100644
--- a/drivers/crypto/nx/nx-aes-ccm.c
+++ b/drivers/crypto/nx/nx-aes-ccm.c
@@ -217,13 +217,11 @@ static int generate_pat(u8 *iv,
memset(b1, 0, 16);
if (assoclen <= 65280) {
*(u16 *)b1 = assoclen;
- scatterwalk_map_and_copy(b1 + 2, req->src, 0,
- iauth_len, SCATTERWALK_FROM_SG);
+ memcpy_from_sglist(b1 + 2, req->src, 0, iauth_len);
} else {
*(u16 *)b1 = (u16)(0xfffe);
*(u32 *)&b1[2] = assoclen;
- scatterwalk_map_and_copy(b1 + 6, req->src, 0,
- iauth_len, SCATTERWALK_FROM_SG);
+ memcpy_from_sglist(b1 + 6, req->src, 0, iauth_len);
}
}
@@ -341,9 +339,8 @@ static int ccm_nx_decrypt(struct aead_request *req,
nbytes -= authsize;
/* copy out the auth tag to compare with later */
- scatterwalk_map_and_copy(priv->oauth_tag,
- req->src, nbytes + req->assoclen, authsize,
- SCATTERWALK_FROM_SG);
+ memcpy_from_sglist(priv->oauth_tag, req->src, nbytes + req->assoclen,
+ authsize);
rc = generate_pat(iv, req, nx_ctx, authsize, nbytes, assoclen,
csbcpb->cpb.aes_ccm.in_pat_or_b0);
@@ -465,9 +462,8 @@ static int ccm_nx_encrypt(struct aead_request *req,
} while (processed < nbytes);
/* copy out the auth tag */
- scatterwalk_map_and_copy(csbcpb->cpb.aes_ccm.out_pat_or_mac,
- req->dst, nbytes + req->assoclen, authsize,
- SCATTERWALK_TO_SG);
+ memcpy_to_sglist(req->dst, nbytes + req->assoclen,
+ csbcpb->cpb.aes_ccm.out_pat_or_mac, authsize);
out:
spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
diff --git a/drivers/crypto/nx/nx-aes-ctr.c b/drivers/crypto/nx/nx-aes-ctr.c
index dfa3ad1a12f2..709b3ee74657 100644
--- a/drivers/crypto/nx/nx-aes-ctr.c
+++ b/drivers/crypto/nx/nx-aes-ctr.c
@@ -9,10 +9,12 @@
#include <crypto/aes.h>
#include <crypto/ctr.h>
-#include <crypto/algapi.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/crypto.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
#include <asm/vio.h>
#include "nx_csbcpb.h"
diff --git a/drivers/crypto/nx/nx-aes-ecb.c b/drivers/crypto/nx/nx-aes-ecb.c
index 502a565074e9..4039cf3b22d4 100644
--- a/drivers/crypto/nx/nx-aes-ecb.c
+++ b/drivers/crypto/nx/nx-aes-ecb.c
@@ -8,10 +8,12 @@
*/
#include <crypto/aes.h>
-#include <crypto/algapi.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/crypto.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
#include <asm/vio.h>
#include "nx_csbcpb.h"
diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c
index 4a796318b430..b7fe2de96d96 100644
--- a/drivers/crypto/nx/nx-aes-gcm.c
+++ b/drivers/crypto/nx/nx-aes-gcm.c
@@ -103,16 +103,13 @@ static int nx_gca(struct nx_crypto_ctx *nx_ctx,
{
int rc;
struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
- struct scatter_walk walk;
struct nx_sg *nx_sg = nx_ctx->in_sg;
unsigned int nbytes = assoclen;
unsigned int processed = 0, to_process;
unsigned int max_sg_len;
if (nbytes <= AES_BLOCK_SIZE) {
- scatterwalk_start(&walk, req->src);
- scatterwalk_copychunks(out, &walk, nbytes, SCATTERWALK_FROM_SG);
- scatterwalk_done(&walk, SCATTERWALK_FROM_SG, 0);
+ memcpy_from_sglist(out, req->src, 0, nbytes);
return 0;
}
@@ -391,19 +388,17 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc,
mac:
if (enc) {
/* copy out the auth tag */
- scatterwalk_map_and_copy(
- csbcpb->cpb.aes_gcm.out_pat_or_mac,
+ memcpy_to_sglist(
req->dst, req->assoclen + nbytes,
- crypto_aead_authsize(crypto_aead_reqtfm(req)),
- SCATTERWALK_TO_SG);
+ csbcpb->cpb.aes_gcm.out_pat_or_mac,
+ crypto_aead_authsize(crypto_aead_reqtfm(req)));
} else {
u8 *itag = nx_ctx->priv.gcm.iauth_tag;
u8 *otag = csbcpb->cpb.aes_gcm.out_pat_or_mac;
- scatterwalk_map_and_copy(
+ memcpy_from_sglist(
itag, req->src, req->assoclen + nbytes,
- crypto_aead_authsize(crypto_aead_reqtfm(req)),
- SCATTERWALK_FROM_SG);
+ crypto_aead_authsize(crypto_aead_reqtfm(req)));
rc = crypto_memneq(itag, otag,
crypto_aead_authsize(crypto_aead_reqtfm(req))) ?
-EBADMSG : 0;
diff --git a/drivers/crypto/nx/nx-aes-xcbc.c b/drivers/crypto/nx/nx-aes-xcbc.c
index eb5c8f689360..bf465d824e2c 100644
--- a/drivers/crypto/nx/nx-aes-xcbc.c
+++ b/drivers/crypto/nx/nx-aes-xcbc.c
@@ -7,13 +7,14 @@
* Author: Kent Yoder <yoder1@us.ibm.com>
*/
-#include <crypto/internal/hash.h>
#include <crypto/aes.h>
-#include <crypto/algapi.h>
+#include <crypto/internal/hash.h>
+#include <linux/atomic.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/crypto.h>
-#include <asm/vio.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
#include "nx_csbcpb.h"
#include "nx.h"
@@ -21,8 +22,6 @@
struct xcbc_state {
u8 state[AES_BLOCK_SIZE];
- unsigned int count;
- u8 buffer[AES_BLOCK_SIZE];
};
static int nx_xcbc_set_key(struct crypto_shash *desc,
@@ -58,7 +57,7 @@ static int nx_xcbc_set_key(struct crypto_shash *desc,
*/
static int nx_xcbc_empty(struct shash_desc *desc, u8 *out)
{
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+ struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc->tfm);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
struct nx_sg *in_sg, *out_sg;
u8 keys[2][AES_BLOCK_SIZE];
@@ -135,9 +134,9 @@ out:
return rc;
}
-static int nx_crypto_ctx_aes_xcbc_init2(struct crypto_tfm *tfm)
+static int nx_crypto_ctx_aes_xcbc_init2(struct crypto_shash *tfm)
{
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
+ struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(tfm);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
int err;
@@ -166,31 +165,24 @@ static int nx_xcbc_update(struct shash_desc *desc,
const u8 *data,
unsigned int len)
{
+ struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc->tfm);
struct xcbc_state *sctx = shash_desc_ctx(desc);
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
struct nx_sg *in_sg;
struct nx_sg *out_sg;
- u32 to_process = 0, leftover, total;
unsigned int max_sg_len;
unsigned long irq_flags;
+ u32 to_process, total;
int rc = 0;
int data_len;
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
+ memcpy(csbcpb->cpb.aes_xcbc.out_cv_mac, sctx->state, AES_BLOCK_SIZE);
+ NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
+ NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
- total = sctx->count + len;
-
- /* 2 cases for total data len:
- * 1: <= AES_BLOCK_SIZE: copy into state, return 0
- * 2: > AES_BLOCK_SIZE: process X blocks, copy in leftover
- */
- if (total <= AES_BLOCK_SIZE) {
- memcpy(sctx->buffer + sctx->count, data, len);
- sctx->count += len;
- goto out;
- }
+ total = len;
in_sg = nx_ctx->in_sg;
max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
@@ -200,7 +192,7 @@ static int nx_xcbc_update(struct shash_desc *desc,
data_len = AES_BLOCK_SIZE;
out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
- &len, nx_ctx->ap->sglen);
+ &data_len, nx_ctx->ap->sglen);
if (data_len != AES_BLOCK_SIZE) {
rc = -EINVAL;
@@ -210,56 +202,21 @@ static int nx_xcbc_update(struct shash_desc *desc,
nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
do {
- to_process = total - to_process;
- to_process = to_process & ~(AES_BLOCK_SIZE - 1);
-
- leftover = total - to_process;
-
- /* the hardware will not accept a 0 byte operation for this
- * algorithm and the operation MUST be finalized to be correct.
- * So if we happen to get an update that falls on a block sized
- * boundary, we must save off the last block to finalize with
- * later. */
- if (!leftover) {
- to_process -= AES_BLOCK_SIZE;
- leftover = AES_BLOCK_SIZE;
- }
-
- if (sctx->count) {
- data_len = sctx->count;
- in_sg = nx_build_sg_list(nx_ctx->in_sg,
- (u8 *) sctx->buffer,
- &data_len,
- max_sg_len);
- if (data_len != sctx->count) {
- rc = -EINVAL;
- goto out;
- }
- }
+ to_process = total & ~(AES_BLOCK_SIZE - 1);
- data_len = to_process - sctx->count;
in_sg = nx_build_sg_list(in_sg,
(u8 *) data,
- &data_len,
+ &to_process,
max_sg_len);
- if (data_len != to_process - sctx->count) {
- rc = -EINVAL;
- goto out;
- }
-
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
sizeof(struct nx_sg);
/* we've hit the nx chip previously and we're updating again,
* so copy over the partial digest */
- if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
- memcpy(csbcpb->cpb.aes_xcbc.cv,
- csbcpb->cpb.aes_xcbc.out_cv_mac,
- AES_BLOCK_SIZE);
- }
+ memcpy(csbcpb->cpb.aes_xcbc.cv,
+ csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE);
- NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
rc = -EINVAL;
goto out;
@@ -271,28 +228,24 @@ static int nx_xcbc_update(struct shash_desc *desc,
atomic_inc(&(nx_ctx->stats->aes_ops));
- /* everything after the first update is continuation */
- NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
-
total -= to_process;
- data += to_process - sctx->count;
- sctx->count = 0;
+ data += to_process;
in_sg = nx_ctx->in_sg;
- } while (leftover > AES_BLOCK_SIZE);
+ } while (total >= AES_BLOCK_SIZE);
- /* copy the leftover back into the state struct */
- memcpy(sctx->buffer, data, leftover);
- sctx->count = leftover;
+ rc = total;
+ memcpy(sctx->state, csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE);
out:
spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
return rc;
}
-static int nx_xcbc_final(struct shash_desc *desc, u8 *out)
+static int nx_xcbc_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int nbytes, u8 *out)
{
+ struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc->tfm);
struct xcbc_state *sctx = shash_desc_ctx(desc);
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
struct nx_sg *in_sg, *out_sg;
unsigned long irq_flags;
@@ -301,12 +254,10 @@ static int nx_xcbc_final(struct shash_desc *desc, u8 *out)
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
- if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
- /* we've hit the nx chip previously, now we're finalizing,
- * so copy over the partial digest */
- memcpy(csbcpb->cpb.aes_xcbc.cv,
- csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE);
- } else if (sctx->count == 0) {
+ if (nbytes) {
+ /* non-zero final, so copy over the partial digest */
+ memcpy(csbcpb->cpb.aes_xcbc.cv, sctx->state, AES_BLOCK_SIZE);
+ } else {
/*
* we've never seen an update, so this is a 0 byte op. The
* hardware cannot handle a 0 byte op, so just ECB to
@@ -320,11 +271,11 @@ static int nx_xcbc_final(struct shash_desc *desc, u8 *out)
* this is not an intermediate operation */
NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
- len = sctx->count;
- in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buffer,
- &len, nx_ctx->ap->sglen);
+ len = nbytes;
+ in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)src, &len,
+ nx_ctx->ap->sglen);
- if (len != sctx->count) {
+ if (len != nbytes) {
rc = -EINVAL;
goto out;
}
@@ -362,18 +313,19 @@ struct shash_alg nx_shash_aes_xcbc_alg = {
.digestsize = AES_BLOCK_SIZE,
.init = nx_xcbc_init,
.update = nx_xcbc_update,
- .final = nx_xcbc_final,
+ .finup = nx_xcbc_finup,
.setkey = nx_xcbc_set_key,
.descsize = sizeof(struct xcbc_state),
- .statesize = sizeof(struct xcbc_state),
+ .init_tfm = nx_crypto_ctx_aes_xcbc_init2,
+ .exit_tfm = nx_crypto_ctx_shash_exit,
.base = {
.cra_name = "xcbc(aes)",
.cra_driver_name = "xcbc-aes-nx",
.cra_priority = 300,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINAL_NONZERO,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
- .cra_init = nx_crypto_ctx_aes_xcbc_init2,
- .cra_exit = nx_crypto_ctx_exit,
}
};
diff --git a/drivers/crypto/nx/nx-common-powernv.c b/drivers/crypto/nx/nx-common-powernv.c
index 8c859872c183..0493041ea088 100644
--- a/drivers/crypto/nx/nx-common-powernv.c
+++ b/drivers/crypto/nx/nx-common-powernv.c
@@ -9,6 +9,7 @@
#include "nx-842.h"
+#include <crypto/internal/scompress.h>
#include <linux/timer.h>
#include <asm/prom.h>
@@ -1031,23 +1032,23 @@ static struct nx842_driver nx842_powernv_driver = {
.decompress = nx842_powernv_decompress,
};
-static int nx842_powernv_crypto_init(struct crypto_tfm *tfm)
+static void *nx842_powernv_crypto_alloc_ctx(void)
{
- return nx842_crypto_init(tfm, &nx842_powernv_driver);
+ return nx842_crypto_alloc_ctx(&nx842_powernv_driver);
}
-static struct crypto_alg nx842_powernv_alg = {
- .cra_name = "842",
- .cra_driver_name = "842-nx",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
- .cra_ctxsize = sizeof(struct nx842_crypto_ctx),
- .cra_module = THIS_MODULE,
- .cra_init = nx842_powernv_crypto_init,
- .cra_exit = nx842_crypto_exit,
- .cra_u = { .compress = {
- .coa_compress = nx842_crypto_compress,
- .coa_decompress = nx842_crypto_decompress } }
+static struct scomp_alg nx842_powernv_alg = {
+ .base.cra_name = "842",
+ .base.cra_driver_name = "842-nx",
+ .base.cra_priority = 300,
+ .base.cra_module = THIS_MODULE,
+
+ .streams = {
+ .alloc_ctx = nx842_powernv_crypto_alloc_ctx,
+ .free_ctx = nx842_crypto_free_ctx,
+ },
+ .compress = nx842_crypto_compress,
+ .decompress = nx842_crypto_decompress,
};
static __init int nx_compress_powernv_init(void)
@@ -1107,7 +1108,7 @@ static __init int nx_compress_powernv_init(void)
nx842_powernv_exec = nx842_exec_vas;
}
- ret = crypto_register_alg(&nx842_powernv_alg);
+ ret = crypto_register_scomp(&nx842_powernv_alg);
if (ret) {
nx_delete_coprocs();
return ret;
@@ -1128,7 +1129,7 @@ static void __exit nx_compress_powernv_exit(void)
if (!nx842_ct)
vas_unregister_api_powernv();
- crypto_unregister_alg(&nx842_powernv_alg);
+ crypto_unregister_scomp(&nx842_powernv_alg);
nx_delete_coprocs();
}
diff --git a/drivers/crypto/nx/nx-common-pseries.c b/drivers/crypto/nx/nx-common-pseries.c
index 35f2d0d8507e..fc0222ebe807 100644
--- a/drivers/crypto/nx/nx-common-pseries.c
+++ b/drivers/crypto/nx/nx-common-pseries.c
@@ -11,6 +11,7 @@
#include <asm/vio.h>
#include <asm/hvcall.h>
#include <asm/vas.h>
+#include <crypto/internal/scompress.h>
#include "nx-842.h"
#include "nx_csbcpb.h" /* struct nx_csbcpb */
@@ -133,7 +134,7 @@ struct nx842_devdata {
};
static struct nx842_devdata __rcu *devdata;
-static DEFINE_SPINLOCK(devdata_mutex);
+static DEFINE_SPINLOCK(devdata_spinlock);
#define NX842_COUNTER_INC(_x) \
static inline void nx842_inc_##_x( \
@@ -750,15 +751,15 @@ static int nx842_OF_upd(struct property *new_prop)
if (!new_devdata)
return -ENOMEM;
- spin_lock_irqsave(&devdata_mutex, flags);
+ spin_lock_irqsave(&devdata_spinlock, flags);
old_devdata = rcu_dereference_check(devdata,
- lockdep_is_held(&devdata_mutex));
+ lockdep_is_held(&devdata_spinlock));
if (old_devdata)
of_node = old_devdata->dev->of_node;
if (!old_devdata || !of_node) {
pr_err("%s: device is not available\n", __func__);
- spin_unlock_irqrestore(&devdata_mutex, flags);
+ spin_unlock_irqrestore(&devdata_spinlock, flags);
kfree(new_devdata);
return -ENODEV;
}
@@ -810,7 +811,7 @@ out:
old_devdata->max_sg_len);
rcu_assign_pointer(devdata, new_devdata);
- spin_unlock_irqrestore(&devdata_mutex, flags);
+ spin_unlock_irqrestore(&devdata_spinlock, flags);
synchronize_rcu();
dev_set_drvdata(new_devdata->dev, new_devdata);
kfree(old_devdata);
@@ -821,13 +822,13 @@ error_out:
dev_info(old_devdata->dev, "%s: device disabled\n", __func__);
nx842_OF_set_defaults(new_devdata);
rcu_assign_pointer(devdata, new_devdata);
- spin_unlock_irqrestore(&devdata_mutex, flags);
+ spin_unlock_irqrestore(&devdata_spinlock, flags);
synchronize_rcu();
dev_set_drvdata(new_devdata->dev, new_devdata);
kfree(old_devdata);
} else {
dev_err(old_devdata->dev, "%s: could not update driver from hardware\n", __func__);
- spin_unlock_irqrestore(&devdata_mutex, flags);
+ spin_unlock_irqrestore(&devdata_spinlock, flags);
}
if (!ret)
@@ -1008,23 +1009,23 @@ static struct nx842_driver nx842_pseries_driver = {
.decompress = nx842_pseries_decompress,
};
-static int nx842_pseries_crypto_init(struct crypto_tfm *tfm)
+static void *nx842_pseries_crypto_alloc_ctx(void)
{
- return nx842_crypto_init(tfm, &nx842_pseries_driver);
+ return nx842_crypto_alloc_ctx(&nx842_pseries_driver);
}
-static struct crypto_alg nx842_pseries_alg = {
- .cra_name = "842",
- .cra_driver_name = "842-nx",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
- .cra_ctxsize = sizeof(struct nx842_crypto_ctx),
- .cra_module = THIS_MODULE,
- .cra_init = nx842_pseries_crypto_init,
- .cra_exit = nx842_crypto_exit,
- .cra_u = { .compress = {
- .coa_compress = nx842_crypto_compress,
- .coa_decompress = nx842_crypto_decompress } }
+static struct scomp_alg nx842_pseries_alg = {
+ .base.cra_name = "842",
+ .base.cra_driver_name = "842-nx",
+ .base.cra_priority = 300,
+ .base.cra_module = THIS_MODULE,
+
+ .streams = {
+ .alloc_ctx = nx842_pseries_crypto_alloc_ctx,
+ .free_ctx = nx842_crypto_free_ctx,
+ },
+ .compress = nx842_crypto_compress,
+ .decompress = nx842_crypto_decompress,
};
static int nx842_probe(struct vio_dev *viodev,
@@ -1045,9 +1046,9 @@ static int nx842_probe(struct vio_dev *viodev,
return -ENOMEM;
}
- spin_lock_irqsave(&devdata_mutex, flags);
+ spin_lock_irqsave(&devdata_spinlock, flags);
old_devdata = rcu_dereference_check(devdata,
- lockdep_is_held(&devdata_mutex));
+ lockdep_is_held(&devdata_spinlock));
if (old_devdata && old_devdata->vdev != NULL) {
dev_err(&viodev->dev, "%s: Attempt to register more than one instance of the hardware\n", __func__);
@@ -1062,7 +1063,7 @@ static int nx842_probe(struct vio_dev *viodev,
nx842_OF_set_defaults(new_devdata);
rcu_assign_pointer(devdata, new_devdata);
- spin_unlock_irqrestore(&devdata_mutex, flags);
+ spin_unlock_irqrestore(&devdata_spinlock, flags);
synchronize_rcu();
kfree(old_devdata);
@@ -1072,7 +1073,7 @@ static int nx842_probe(struct vio_dev *viodev,
if (ret)
goto error;
- ret = crypto_register_alg(&nx842_pseries_alg);
+ ret = crypto_register_scomp(&nx842_pseries_alg);
if (ret) {
dev_err(&viodev->dev, "could not register comp alg: %d\n", ret);
goto error;
@@ -1101,7 +1102,7 @@ static int nx842_probe(struct vio_dev *viodev,
return 0;
error_unlock:
- spin_unlock_irqrestore(&devdata_mutex, flags);
+ spin_unlock_irqrestore(&devdata_spinlock, flags);
if (new_devdata)
kfree(new_devdata->counters);
kfree(new_devdata);
@@ -1120,14 +1121,15 @@ static void nx842_remove(struct vio_dev *viodev)
if (caps_feat)
sysfs_remove_group(&viodev->dev.kobj, &nxcop_caps_attr_group);
- crypto_unregister_alg(&nx842_pseries_alg);
+ crypto_unregister_scomp(&nx842_pseries_alg);
- spin_lock_irqsave(&devdata_mutex, flags);
- old_devdata = rcu_dereference_check(devdata,
- lockdep_is_held(&devdata_mutex));
of_reconfig_notifier_unregister(&nx842_of_nb);
+
+ spin_lock_irqsave(&devdata_spinlock, flags);
+ old_devdata = rcu_dereference_check(devdata,
+ lockdep_is_held(&devdata_spinlock));
RCU_INIT_POINTER(devdata, NULL);
- spin_unlock_irqrestore(&devdata_mutex, flags);
+ spin_unlock_irqrestore(&devdata_spinlock, flags);
synchronize_rcu();
dev_set_drvdata(&viodev->dev, NULL);
if (old_devdata)
@@ -1144,6 +1146,7 @@ static void __init nxcop_get_capabilities(void)
{
struct hv_vas_all_caps *hv_caps;
struct hv_nx_cop_caps *hv_nxc;
+ u64 feat;
int rc;
hv_caps = kmalloc(sizeof(*hv_caps), GFP_KERNEL);
@@ -1154,27 +1157,26 @@ static void __init nxcop_get_capabilities(void)
*/
rc = h_query_vas_capabilities(H_QUERY_NX_CAPABILITIES, 0,
(u64)virt_to_phys(hv_caps));
+ if (!rc)
+ feat = be64_to_cpu(hv_caps->feat_type);
+ kfree(hv_caps);
if (rc)
- goto out;
+ return;
+ if (!(feat & VAS_NX_GZIP_FEAT_BIT))
+ return;
- caps_feat = be64_to_cpu(hv_caps->feat_type);
/*
* NX-GZIP feature available
*/
- if (caps_feat & VAS_NX_GZIP_FEAT_BIT) {
- hv_nxc = kmalloc(sizeof(*hv_nxc), GFP_KERNEL);
- if (!hv_nxc)
- goto out;
- /*
- * Get capabilities for NX-GZIP feature
- */
- rc = h_query_vas_capabilities(H_QUERY_NX_CAPABILITIES,
- VAS_NX_GZIP_FEAT,
- (u64)virt_to_phys(hv_nxc));
- } else {
- pr_err("NX-GZIP feature is not available\n");
- rc = -EINVAL;
- }
+ hv_nxc = kmalloc(sizeof(*hv_nxc), GFP_KERNEL);
+ if (!hv_nxc)
+ return;
+ /*
+ * Get capabilities for NX-GZIP feature
+ */
+ rc = h_query_vas_capabilities(H_QUERY_NX_CAPABILITIES,
+ VAS_NX_GZIP_FEAT,
+ (u64)virt_to_phys(hv_nxc));
if (!rc) {
nx_cop_caps.descriptor = be64_to_cpu(hv_nxc->descriptor);
@@ -1184,13 +1186,10 @@ static void __init nxcop_get_capabilities(void)
be64_to_cpu(hv_nxc->min_compress_len);
nx_cop_caps.min_decompress_len =
be64_to_cpu(hv_nxc->min_decompress_len);
- } else {
- caps_feat = 0;
+ caps_feat = feat;
}
kfree(hv_nxc);
-out:
- kfree(hv_caps);
}
static const struct vio_device_id nx842_vio_driver_ids[] = {
@@ -1255,13 +1254,13 @@ static void __exit nx842_pseries_exit(void)
vas_unregister_api_pseries();
- crypto_unregister_alg(&nx842_pseries_alg);
+ crypto_unregister_scomp(&nx842_pseries_alg);
- spin_lock_irqsave(&devdata_mutex, flags);
+ spin_lock_irqsave(&devdata_spinlock, flags);
old_devdata = rcu_dereference_check(devdata,
- lockdep_is_held(&devdata_mutex));
+ lockdep_is_held(&devdata_spinlock));
RCU_INIT_POINTER(devdata, NULL);
- spin_unlock_irqrestore(&devdata_mutex, flags);
+ spin_unlock_irqrestore(&devdata_spinlock, flags);
synchronize_rcu();
if (old_devdata && old_devdata->dev)
dev_set_drvdata(old_devdata->dev, NULL);
diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c
index c3bebf0feabe..5b29dd026df2 100644
--- a/drivers/crypto/nx/nx-sha256.c
+++ b/drivers/crypto/nx/nx-sha256.c
@@ -9,9 +9,12 @@
#include <crypto/internal/hash.h>
#include <crypto/sha2.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
#include <linux/module.h>
-#include <asm/vio.h>
-#include <asm/byteorder.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/unaligned.h>
#include "nx_csbcpb.h"
#include "nx.h"
@@ -19,12 +22,11 @@
struct sha256_state_be {
__be32 state[SHA256_DIGEST_SIZE / 4];
u64 count;
- u8 buf[SHA256_BLOCK_SIZE];
};
-static int nx_crypto_ctx_sha256_init(struct crypto_tfm *tfm)
+static int nx_crypto_ctx_sha256_init(struct crypto_shash *tfm)
{
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
+ struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(tfm);
int err;
err = nx_crypto_ctx_sha_init(tfm);
@@ -40,11 +42,10 @@ static int nx_crypto_ctx_sha256_init(struct crypto_tfm *tfm)
return 0;
}
-static int nx_sha256_init(struct shash_desc *desc) {
+static int nx_sha256_init(struct shash_desc *desc)
+{
struct sha256_state_be *sctx = shash_desc_ctx(desc);
- memset(sctx, 0, sizeof *sctx);
-
sctx->state[0] = __cpu_to_be32(SHA256_H0);
sctx->state[1] = __cpu_to_be32(SHA256_H1);
sctx->state[2] = __cpu_to_be32(SHA256_H2);
@@ -61,30 +62,18 @@ static int nx_sha256_init(struct shash_desc *desc) {
static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
+ struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc->tfm);
struct sha256_state_be *sctx = shash_desc_ctx(desc);
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
+ u64 to_process, leftover, total = len;
struct nx_sg *out_sg;
- u64 to_process = 0, leftover, total;
unsigned long irq_flags;
int rc = 0;
int data_len;
u32 max_sg_len;
- u64 buf_len = (sctx->count % SHA256_BLOCK_SIZE);
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
- /* 2 cases for total data len:
- * 1: < SHA256_BLOCK_SIZE: copy into state, return 0
- * 2: >= SHA256_BLOCK_SIZE: process X blocks, copy in leftover
- */
- total = (sctx->count % SHA256_BLOCK_SIZE) + len;
- if (total < SHA256_BLOCK_SIZE) {
- memcpy(sctx->buf + buf_len, data, len);
- sctx->count += len;
- goto out;
- }
-
memcpy(csbcpb->cpb.sha256.message_digest, sctx->state, SHA256_DIGEST_SIZE);
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
@@ -105,41 +94,17 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
}
do {
- int used_sgs = 0;
struct nx_sg *in_sg = nx_ctx->in_sg;
- if (buf_len) {
- data_len = buf_len;
- in_sg = nx_build_sg_list(in_sg,
- (u8 *) sctx->buf,
- &data_len,
- max_sg_len);
-
- if (data_len != buf_len) {
- rc = -EINVAL;
- goto out;
- }
- used_sgs = in_sg - nx_ctx->in_sg;
- }
+ to_process = total & ~(SHA256_BLOCK_SIZE - 1);
- /* to_process: SHA256_BLOCK_SIZE aligned chunk to be
- * processed in this iteration. This value is restricted
- * by sg list limits and number of sgs we already used
- * for leftover data. (see above)
- * In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
- * but because data may not be aligned, we need to account
- * for that too. */
- to_process = min_t(u64, total,
- (max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
- to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
-
- data_len = to_process - buf_len;
+ data_len = to_process;
in_sg = nx_build_sg_list(in_sg, (u8 *) data,
&data_len, max_sg_len);
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
- to_process = data_len + buf_len;
+ to_process = data_len;
leftover = total - to_process;
/*
@@ -162,26 +127,22 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
atomic_inc(&(nx_ctx->stats->sha256_ops));
total -= to_process;
- data += to_process - buf_len;
- buf_len = 0;
-
+ data += to_process;
+ sctx->count += to_process;
} while (leftover >= SHA256_BLOCK_SIZE);
- /* copy the leftover back into the state struct */
- if (leftover)
- memcpy(sctx->buf, data, leftover);
-
- sctx->count += len;
+ rc = leftover;
memcpy(sctx->state, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
out:
spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
return rc;
}
-static int nx_sha256_final(struct shash_desc *desc, u8 *out)
+static int nx_sha256_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int nbytes, u8 *out)
{
+ struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc->tfm);
struct sha256_state_be *sctx = shash_desc_ctx(desc);
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
struct nx_sg *in_sg, *out_sg;
unsigned long irq_flags;
@@ -197,25 +158,19 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out)
nx_ctx->ap->databytelen/NX_PAGE_SIZE);
/* final is represented by continuing the operation and indicating that
- * this is not an intermediate operation */
- if (sctx->count >= SHA256_BLOCK_SIZE) {
- /* we've hit the nx chip previously, now we're finalizing,
- * so copy over the partial digest */
- memcpy(csbcpb->cpb.sha256.input_partial_digest, sctx->state, SHA256_DIGEST_SIZE);
- NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
- NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
- } else {
- NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
- NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
- }
+ * this is not an intermediate operation
+ * copy over the partial digest */
+ memcpy(csbcpb->cpb.sha256.input_partial_digest, sctx->state, SHA256_DIGEST_SIZE);
+ NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
+ NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
+ sctx->count += nbytes;
csbcpb->cpb.sha256.message_bit_length = (u64) (sctx->count * 8);
- len = sctx->count & (SHA256_BLOCK_SIZE - 1);
- in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) sctx->buf,
- &len, max_sg_len);
+ len = nbytes;
+ in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)src, &len, max_sg_len);
- if (len != (sctx->count & (SHA256_BLOCK_SIZE - 1))) {
+ if (len != nbytes) {
rc = -EINVAL;
goto out;
}
@@ -251,18 +206,34 @@ out:
static int nx_sha256_export(struct shash_desc *desc, void *out)
{
struct sha256_state_be *sctx = shash_desc_ctx(desc);
+ union {
+ u8 *u8;
+ u32 *u32;
+ u64 *u64;
+ } p = { .u8 = out };
+ int i;
- memcpy(out, sctx, sizeof(*sctx));
+ for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(*p.u32); i++)
+ put_unaligned(be32_to_cpu(sctx->state[i]), p.u32++);
+ put_unaligned(sctx->count, p.u64++);
return 0;
}
static int nx_sha256_import(struct shash_desc *desc, const void *in)
{
struct sha256_state_be *sctx = shash_desc_ctx(desc);
+ union {
+ const u8 *u8;
+ const u32 *u32;
+ const u64 *u64;
+ } p = { .u8 = in };
+ int i;
- memcpy(sctx, in, sizeof(*sctx));
+ for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(*p.u32); i++)
+ sctx->state[i] = cpu_to_be32(get_unaligned(p.u32++));
+ sctx->count = get_unaligned(p.u64++);
return 0;
}
@@ -270,19 +241,20 @@ struct shash_alg nx_shash_sha256_alg = {
.digestsize = SHA256_DIGEST_SIZE,
.init = nx_sha256_init,
.update = nx_sha256_update,
- .final = nx_sha256_final,
+ .finup = nx_sha256_finup,
.export = nx_sha256_export,
.import = nx_sha256_import,
+ .init_tfm = nx_crypto_ctx_sha256_init,
+ .exit_tfm = nx_crypto_ctx_shash_exit,
.descsize = sizeof(struct sha256_state_be),
.statesize = sizeof(struct sha256_state_be),
.base = {
.cra_name = "sha256",
.cra_driver_name = "sha256-nx",
.cra_priority = 300,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
- .cra_init = nx_crypto_ctx_sha256_init,
- .cra_exit = nx_crypto_ctx_exit,
}
};
diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c
index 1ffb40d2c324..f74776b7d7d7 100644
--- a/drivers/crypto/nx/nx-sha512.c
+++ b/drivers/crypto/nx/nx-sha512.c
@@ -9,8 +9,12 @@
#include <crypto/internal/hash.h>
#include <crypto/sha2.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
#include <linux/module.h>
-#include <asm/vio.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/unaligned.h>
#include "nx_csbcpb.h"
#include "nx.h"
@@ -18,12 +22,11 @@
struct sha512_state_be {
__be64 state[SHA512_DIGEST_SIZE / 8];
u64 count[2];
- u8 buf[SHA512_BLOCK_SIZE];
};
-static int nx_crypto_ctx_sha512_init(struct crypto_tfm *tfm)
+static int nx_crypto_ctx_sha512_init(struct crypto_shash *tfm)
{
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
+ struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(tfm);
int err;
err = nx_crypto_ctx_sha_init(tfm);
@@ -43,8 +46,6 @@ static int nx_sha512_init(struct shash_desc *desc)
{
struct sha512_state_be *sctx = shash_desc_ctx(desc);
- memset(sctx, 0, sizeof *sctx);
-
sctx->state[0] = __cpu_to_be64(SHA512_H0);
sctx->state[1] = __cpu_to_be64(SHA512_H1);
sctx->state[2] = __cpu_to_be64(SHA512_H2);
@@ -54,6 +55,7 @@ static int nx_sha512_init(struct shash_desc *desc)
sctx->state[6] = __cpu_to_be64(SHA512_H6);
sctx->state[7] = __cpu_to_be64(SHA512_H7);
sctx->count[0] = 0;
+ sctx->count[1] = 0;
return 0;
}
@@ -61,30 +63,18 @@ static int nx_sha512_init(struct shash_desc *desc)
static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
+ struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc->tfm);
struct sha512_state_be *sctx = shash_desc_ctx(desc);
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
+ u64 to_process, leftover, total = len;
struct nx_sg *out_sg;
- u64 to_process, leftover = 0, total;
unsigned long irq_flags;
int rc = 0;
int data_len;
u32 max_sg_len;
- u64 buf_len = (sctx->count[0] % SHA512_BLOCK_SIZE);
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
- /* 2 cases for total data len:
- * 1: < SHA512_BLOCK_SIZE: copy into state, return 0
- * 2: >= SHA512_BLOCK_SIZE: process X blocks, copy in leftover
- */
- total = (sctx->count[0] % SHA512_BLOCK_SIZE) + len;
- if (total < SHA512_BLOCK_SIZE) {
- memcpy(sctx->buf + buf_len, data, len);
- sctx->count[0] += len;
- goto out;
- }
-
memcpy(csbcpb->cpb.sha512.message_digest, sctx->state, SHA512_DIGEST_SIZE);
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
@@ -105,45 +95,17 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
}
do {
- int used_sgs = 0;
struct nx_sg *in_sg = nx_ctx->in_sg;
- if (buf_len) {
- data_len = buf_len;
- in_sg = nx_build_sg_list(in_sg,
- (u8 *) sctx->buf,
- &data_len, max_sg_len);
-
- if (data_len != buf_len) {
- rc = -EINVAL;
- goto out;
- }
- used_sgs = in_sg - nx_ctx->in_sg;
- }
+ to_process = total & ~(SHA512_BLOCK_SIZE - 1);
- /* to_process: SHA512_BLOCK_SIZE aligned chunk to be
- * processed in this iteration. This value is restricted
- * by sg list limits and number of sgs we already used
- * for leftover data. (see above)
- * In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
- * but because data may not be aligned, we need to account
- * for that too. */
- to_process = min_t(u64, total,
- (max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
- to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);
-
- data_len = to_process - buf_len;
+ data_len = to_process;
in_sg = nx_build_sg_list(in_sg, (u8 *) data,
&data_len, max_sg_len);
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
- if (data_len != (to_process - buf_len)) {
- rc = -EINVAL;
- goto out;
- }
-
- to_process = data_len + buf_len;
+ to_process = data_len;
leftover = total - to_process;
/*
@@ -166,30 +128,29 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
atomic_inc(&(nx_ctx->stats->sha512_ops));
total -= to_process;
- data += to_process - buf_len;
- buf_len = 0;
-
+ data += to_process;
+ sctx->count[0] += to_process;
+ if (sctx->count[0] < to_process)
+ sctx->count[1]++;
} while (leftover >= SHA512_BLOCK_SIZE);
- /* copy the leftover back into the state struct */
- if (leftover)
- memcpy(sctx->buf, data, leftover);
- sctx->count[0] += len;
+ rc = leftover;
memcpy(sctx->state, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
out:
spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
return rc;
}
-static int nx_sha512_final(struct shash_desc *desc, u8 *out)
+static int nx_sha512_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int nbytes, u8 *out)
{
struct sha512_state_be *sctx = shash_desc_ctx(desc);
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+ struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc->tfm);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
struct nx_sg *in_sg, *out_sg;
u32 max_sg_len;
- u64 count0;
unsigned long irq_flags;
+ u64 count0, count1;
int rc = 0;
int len;
@@ -201,30 +162,23 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out)
nx_ctx->ap->databytelen/NX_PAGE_SIZE);
/* final is represented by continuing the operation and indicating that
- * this is not an intermediate operation */
- if (sctx->count[0] >= SHA512_BLOCK_SIZE) {
- /* we've hit the nx chip previously, now we're finalizing,
- * so copy over the partial digest */
- memcpy(csbcpb->cpb.sha512.input_partial_digest, sctx->state,
- SHA512_DIGEST_SIZE);
- NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
- NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
- } else {
- NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
- NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
- }
-
+ * this is not an intermediate operation
+ * copy over the partial digest */
+ memcpy(csbcpb->cpb.sha512.input_partial_digest, sctx->state, SHA512_DIGEST_SIZE);
NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
+ NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
- count0 = sctx->count[0] * 8;
+ count0 = sctx->count[0] + nbytes;
+ count1 = sctx->count[1];
- csbcpb->cpb.sha512.message_bit_length_lo = count0;
+ csbcpb->cpb.sha512.message_bit_length_lo = count0 << 3;
+ csbcpb->cpb.sha512.message_bit_length_hi = (count1 << 3) |
+ (count0 >> 61);
- len = sctx->count[0] & (SHA512_BLOCK_SIZE - 1);
- in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buf, &len,
- max_sg_len);
+ len = nbytes;
+ in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)src, &len, max_sg_len);
- if (len != (sctx->count[0] & (SHA512_BLOCK_SIZE - 1))) {
+ if (len != nbytes) {
rc = -EINVAL;
goto out;
}
@@ -246,7 +200,7 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out)
goto out;
atomic_inc(&(nx_ctx->stats->sha512_ops));
- atomic64_add(sctx->count[0], &(nx_ctx->stats->sha512_bytes));
+ atomic64_add(count0, &(nx_ctx->stats->sha512_bytes));
memcpy(out, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
out:
@@ -257,18 +211,34 @@ out:
static int nx_sha512_export(struct shash_desc *desc, void *out)
{
struct sha512_state_be *sctx = shash_desc_ctx(desc);
+ union {
+ u8 *u8;
+ u64 *u64;
+ } p = { .u8 = out };
+ int i;
- memcpy(out, sctx, sizeof(*sctx));
+ for (i = 0; i < SHA512_DIGEST_SIZE / sizeof(*p.u64); i++)
+ put_unaligned(be64_to_cpu(sctx->state[i]), p.u64++);
+ put_unaligned(sctx->count[0], p.u64++);
+ put_unaligned(sctx->count[1], p.u64++);
return 0;
}
static int nx_sha512_import(struct shash_desc *desc, const void *in)
{
struct sha512_state_be *sctx = shash_desc_ctx(desc);
+ union {
+ const u8 *u8;
+ const u64 *u64;
+ } p = { .u8 = in };
+ int i;
- memcpy(sctx, in, sizeof(*sctx));
+ for (i = 0; i < SHA512_DIGEST_SIZE / sizeof(*p.u64); i++)
+ sctx->state[i] = cpu_to_be64(get_unaligned(p.u64++));
+ sctx->count[0] = get_unaligned(p.u64++);
+ sctx->count[1] = get_unaligned(p.u64++);
return 0;
}
@@ -276,19 +246,20 @@ struct shash_alg nx_shash_sha512_alg = {
.digestsize = SHA512_DIGEST_SIZE,
.init = nx_sha512_init,
.update = nx_sha512_update,
- .final = nx_sha512_final,
+ .finup = nx_sha512_finup,
.export = nx_sha512_export,
.import = nx_sha512_import,
+ .init_tfm = nx_crypto_ctx_sha512_init,
+ .exit_tfm = nx_crypto_ctx_shash_exit,
.descsize = sizeof(struct sha512_state_be),
.statesize = sizeof(struct sha512_state_be),
.base = {
.cra_name = "sha512",
.cra_driver_name = "sha512-nx",
.cra_priority = 300,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
- .cra_init = nx_crypto_ctx_sha512_init,
- .cra_exit = nx_crypto_ctx_exit,
}
};
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c
index 010e87d9da36..78135fb13f5c 100644
--- a/drivers/crypto/nx/nx.c
+++ b/drivers/crypto/nx/nx.c
@@ -7,11 +7,11 @@
* Author: Kent Yoder <yoder1@us.ibm.com>
*/
+#include <crypto/aes.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/hash.h>
-#include <crypto/aes.h>
+#include <crypto/internal/skcipher.h>
#include <crypto/sha2.h>
-#include <crypto/algapi.h>
#include <crypto/scatterwalk.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -124,8 +124,6 @@ struct nx_sg *nx_build_sg_list(struct nx_sg *sg_head,
}
if ((sg - sg_head) == sgmax) {
- pr_err("nx: scatter/gather list overflow, pid: %d\n",
- current->pid);
sg++;
break;
}
@@ -153,40 +151,18 @@ struct nx_sg *nx_walk_and_build(struct nx_sg *nx_dst,
{
struct scatter_walk walk;
struct nx_sg *nx_sg = nx_dst;
- unsigned int n, offset = 0, len = *src_len;
- char *dst;
+ unsigned int n, len = *src_len;
/* we need to fast forward through @start bytes first */
- for (;;) {
- scatterwalk_start(&walk, sg_src);
-
- if (start < offset + sg_src->length)
- break;
-
- offset += sg_src->length;
- sg_src = sg_next(sg_src);
- }
-
- /* start - offset is the number of bytes to advance in the scatterlist
- * element we're currently looking at */
- scatterwalk_advance(&walk, start - offset);
+ scatterwalk_start_at_pos(&walk, sg_src, start);
while (len && (nx_sg - nx_dst) < sglen) {
- n = scatterwalk_clamp(&walk, len);
- if (!n) {
- /* In cases where we have scatterlist chain sg_next
- * handles with it properly */
- scatterwalk_start(&walk, sg_next(walk.sg));
- n = scatterwalk_clamp(&walk, len);
- }
- dst = scatterwalk_map(&walk);
+ n = scatterwalk_next(&walk, len);
- nx_sg = nx_build_sg_list(nx_sg, dst, &n, sglen - (nx_sg - nx_dst));
- len -= n;
+ nx_sg = nx_build_sg_list(nx_sg, walk.addr, &n, sglen - (nx_sg - nx_dst));
- scatterwalk_unmap(dst);
- scatterwalk_advance(&walk, n);
- scatterwalk_done(&walk, SCATTERWALK_FROM_SG, len);
+ scatterwalk_done_src(&walk, n);
+ len -= n;
}
/* update to_process */
*src_len -= len;
@@ -724,14 +700,14 @@ int nx_crypto_ctx_aes_ecb_init(struct crypto_skcipher *tfm)
NX_MODE_AES_ECB);
}
-int nx_crypto_ctx_sha_init(struct crypto_tfm *tfm)
+int nx_crypto_ctx_sha_init(struct crypto_shash *tfm)
{
- return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_SHA, NX_MODE_SHA);
+ return nx_crypto_ctx_init(crypto_shash_ctx(tfm), NX_FC_SHA, NX_MODE_SHA);
}
-int nx_crypto_ctx_aes_xcbc_init(struct crypto_tfm *tfm)
+int nx_crypto_ctx_aes_xcbc_init(struct crypto_shash *tfm)
{
- return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
+ return nx_crypto_ctx_init(crypto_shash_ctx(tfm), NX_FC_AES,
NX_MODE_AES_XCBC_MAC);
}
@@ -766,6 +742,11 @@ void nx_crypto_ctx_aead_exit(struct crypto_aead *tfm)
kfree_sensitive(nx_ctx->kmem);
}
+void nx_crypto_ctx_shash_exit(struct crypto_shash *tfm)
+{
+ nx_crypto_ctx_exit(crypto_shash_ctx(tfm));
+}
+
static int nx_probe(struct vio_dev *viodev, const struct vio_device_id *id)
{
dev_dbg(&viodev->dev, "driver probed: %s resource id: 0x%x\n",
diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h
index 2697baebb6a3..36974f08490a 100644
--- a/drivers/crypto/nx/nx.h
+++ b/drivers/crypto/nx/nx.h
@@ -3,7 +3,11 @@
#ifndef __NX_H__
#define __NX_H__
+#include <asm/vio.h>
#include <crypto/ctr.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
#define NX_NAME "nx-crypto"
#define NX_STRING "IBM Power7+ Nest Accelerator Crypto Driver"
@@ -139,19 +143,20 @@ struct nx_crypto_ctx {
} priv;
};
-struct crypto_aead;
+struct scatterlist;
/* prototypes */
int nx_crypto_ctx_aes_ccm_init(struct crypto_aead *tfm);
int nx_crypto_ctx_aes_gcm_init(struct crypto_aead *tfm);
-int nx_crypto_ctx_aes_xcbc_init(struct crypto_tfm *tfm);
+int nx_crypto_ctx_aes_xcbc_init(struct crypto_shash *tfm);
int nx_crypto_ctx_aes_ctr_init(struct crypto_skcipher *tfm);
int nx_crypto_ctx_aes_cbc_init(struct crypto_skcipher *tfm);
int nx_crypto_ctx_aes_ecb_init(struct crypto_skcipher *tfm);
-int nx_crypto_ctx_sha_init(struct crypto_tfm *tfm);
+int nx_crypto_ctx_sha_init(struct crypto_shash *tfm);
void nx_crypto_ctx_exit(struct crypto_tfm *tfm);
void nx_crypto_ctx_skcipher_exit(struct crypto_skcipher *tfm);
void nx_crypto_ctx_aead_exit(struct crypto_aead *tfm);
+void nx_crypto_ctx_shash_exit(struct crypto_shash *tfm);
void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function);
int nx_hcall_sync(struct nx_crypto_ctx *ctx, struct vio_pfo_op *op,
u32 may_sleep);
@@ -189,7 +194,4 @@ extern struct shash_alg nx_shash_sha256_alg;
extern struct nx_crypto_driver nx_driver;
-#define SCATTERWALK_TO_SG 1
-#define SCATTERWALK_FROM_SG 0
-
#endif
diff --git a/drivers/crypto/omap-aes-gcm.c b/drivers/crypto/omap-aes-gcm.c
index c498950402e8..1f4586509ca4 100644
--- a/drivers/crypto/omap-aes-gcm.c
+++ b/drivers/crypto/omap-aes-gcm.c
@@ -38,7 +38,6 @@ static void omap_aes_gcm_finish_req(struct omap_aes_dev *dd, int ret)
crypto_finalize_aead_request(dd->engine, req, ret);
- pm_runtime_mark_last_busy(dd->dev);
pm_runtime_put_autosuspend(dd->dev);
}
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index bad1adacbc84..3cc802622dd5 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -18,7 +18,6 @@
#include <crypto/internal/aead.h>
#include <crypto/internal/engine.h>
#include <crypto/internal/skcipher.h>
-#include <crypto/scatterwalk.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/err.h>
@@ -33,6 +32,7 @@
#include <linux/pm_runtime.h>
#include <linux/scatterlist.h>
#include <linux/string.h>
+#include <linux/workqueue.h>
#include "omap-crypto.h"
#include "omap-aes.h"
@@ -222,7 +222,7 @@ static void omap_aes_dma_out_callback(void *data)
struct omap_aes_dev *dd = data;
/* dma_lch_out - completed */
- tasklet_schedule(&dd->done_task);
+ queue_work(system_bh_wq, &dd->done_task);
}
static int omap_aes_dma_init(struct omap_aes_dev *dd)
@@ -272,9 +272,9 @@ static int omap_aes_crypt_dma(struct omap_aes_dev *dd,
int ret;
if (dd->pio_only) {
- scatterwalk_start(&dd->in_walk, dd->in_sg);
+ dd->in_sg_offset = 0;
if (out_sg_len)
- scatterwalk_start(&dd->out_walk, dd->out_sg);
+ dd->out_sg_offset = 0;
/* Enable DATAIN interrupt and let it take
care of the rest */
@@ -401,7 +401,6 @@ static void omap_aes_finish_req(struct omap_aes_dev *dd, int err)
crypto_finalize_skcipher_request(dd->engine, req, err);
- pm_runtime_mark_last_busy(dd->dev);
pm_runtime_put_autosuspend(dd->dev);
}
@@ -496,9 +495,9 @@ static void omap_aes_copy_ivout(struct omap_aes_dev *dd, u8 *ivbuf)
((u32 *)ivbuf)[i] = omap_aes_read(dd, AES_REG_IV(dd, i));
}
-static void omap_aes_done_task(unsigned long data)
+static void omap_aes_done_task(struct work_struct *t)
{
- struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
+ struct omap_aes_dev *dd = from_work(dd, t, done_task);
pr_debug("enter done_task\n");
@@ -871,21 +870,18 @@ static irqreturn_t omap_aes_irq(int irq, void *dev_id)
BUG_ON(!dd->in_sg);
- BUG_ON(_calc_walked(in) > dd->in_sg->length);
+ BUG_ON(dd->in_sg_offset > dd->in_sg->length);
- src = sg_virt(dd->in_sg) + _calc_walked(in);
+ src = sg_virt(dd->in_sg) + dd->in_sg_offset;
for (i = 0; i < AES_BLOCK_WORDS; i++) {
omap_aes_write(dd, AES_REG_DATA_N(dd, i), *src);
-
- scatterwalk_advance(&dd->in_walk, 4);
- if (dd->in_sg->length == _calc_walked(in)) {
+ dd->in_sg_offset += 4;
+ if (dd->in_sg_offset == dd->in_sg->length) {
dd->in_sg = sg_next(dd->in_sg);
if (dd->in_sg) {
- scatterwalk_start(&dd->in_walk,
- dd->in_sg);
- src = sg_virt(dd->in_sg) +
- _calc_walked(in);
+ dd->in_sg_offset = 0;
+ src = sg_virt(dd->in_sg);
}
} else {
src++;
@@ -904,20 +900,18 @@ static irqreturn_t omap_aes_irq(int irq, void *dev_id)
BUG_ON(!dd->out_sg);
- BUG_ON(_calc_walked(out) > dd->out_sg->length);
+ BUG_ON(dd->out_sg_offset > dd->out_sg->length);
- dst = sg_virt(dd->out_sg) + _calc_walked(out);
+ dst = sg_virt(dd->out_sg) + dd->out_sg_offset;
for (i = 0; i < AES_BLOCK_WORDS; i++) {
*dst = omap_aes_read(dd, AES_REG_DATA_N(dd, i));
- scatterwalk_advance(&dd->out_walk, 4);
- if (dd->out_sg->length == _calc_walked(out)) {
+ dd->out_sg_offset += 4;
+ if (dd->out_sg_offset == dd->out_sg->length) {
dd->out_sg = sg_next(dd->out_sg);
if (dd->out_sg) {
- scatterwalk_start(&dd->out_walk,
- dd->out_sg);
- dst = sg_virt(dd->out_sg) +
- _calc_walked(out);
+ dd->out_sg_offset = 0;
+ dst = sg_virt(dd->out_sg);
}
} else {
dst++;
@@ -932,7 +926,7 @@ static irqreturn_t omap_aes_irq(int irq, void *dev_id)
if (!dd->total)
/* All bytes read! */
- tasklet_schedule(&dd->done_task);
+ queue_work(system_bh_wq, &dd->done_task);
else
/* Enable DATA_IN interrupt for next block */
omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x2);
@@ -1092,10 +1086,7 @@ static struct attribute *omap_aes_attrs[] = {
&dev_attr_fallback.attr,
NULL,
};
-
-static const struct attribute_group omap_aes_attr_group = {
- .attrs = omap_aes_attrs,
-};
+ATTRIBUTE_GROUPS(omap_aes);
static int omap_aes_probe(struct platform_device *pdev)
{
@@ -1150,7 +1141,7 @@ static int omap_aes_probe(struct platform_device *pdev)
(reg & dd->pdata->major_mask) >> dd->pdata->major_shift,
(reg & dd->pdata->minor_mask) >> dd->pdata->minor_shift);
- tasklet_init(&dd->done_task, omap_aes_done_task, (unsigned long)dd);
+ INIT_WORK(&dd->done_task, omap_aes_done_task);
err = omap_aes_dma_init(dd);
if (err == -EPROBE_DEFER) {
@@ -1221,12 +1212,6 @@ static int omap_aes_probe(struct platform_device *pdev)
}
}
- err = sysfs_create_group(&dev->kobj, &omap_aes_attr_group);
- if (err) {
- dev_err(dev, "could not create sysfs device attrs\n");
- goto err_aead_algs;
- }
-
return 0;
err_aead_algs:
for (i = dd->pdata->aead_algs_info->registered - 1; i >= 0; i--) {
@@ -1245,7 +1230,7 @@ err_engine:
omap_aes_dma_cleanup(dd);
err_irq:
- tasklet_kill(&dd->done_task);
+ cancel_work_sync(&dd->done_task);
err_pm_disable:
pm_runtime_disable(dev);
err_res:
@@ -1280,11 +1265,9 @@ static void omap_aes_remove(struct platform_device *pdev)
crypto_engine_exit(dd->engine);
- tasklet_kill(&dd->done_task);
+ cancel_work_sync(&dd->done_task);
omap_aes_dma_cleanup(dd);
pm_runtime_disable(dd->dev);
-
- sysfs_remove_group(&dd->dev->kobj, &omap_aes_attr_group);
}
#ifdef CONFIG_PM_SLEEP
@@ -1305,11 +1288,12 @@ static SIMPLE_DEV_PM_OPS(omap_aes_pm_ops, omap_aes_suspend, omap_aes_resume);
static struct platform_driver omap_aes_driver = {
.probe = omap_aes_probe,
- .remove_new = omap_aes_remove,
+ .remove = omap_aes_remove,
.driver = {
.name = "omap-aes",
.pm = &omap_aes_pm_ops,
.of_match_table = omap_aes_of_match,
+ .dev_groups = omap_aes_groups,
},
};
diff --git a/drivers/crypto/omap-aes.h b/drivers/crypto/omap-aes.h
index 0f35c9164764..99c36a777e97 100644
--- a/drivers/crypto/omap-aes.h
+++ b/drivers/crypto/omap-aes.h
@@ -14,8 +14,6 @@
#define DST_MAXBURST 4
#define DMA_MIN (DST_MAXBURST * sizeof(u32))
-#define _calc_walked(inout) (dd->inout##_walk.offset - dd->inout##_sg->offset)
-
/*
* OMAP TRM gives bitfields as start:end, where start is the higher bit
* number. For example 7:0
@@ -161,7 +159,7 @@ struct omap_aes_dev {
unsigned long flags;
int err;
- struct tasklet_struct done_task;
+ struct work_struct done_task;
struct aead_queue aead_queue;
spinlock_t lock;
@@ -186,8 +184,8 @@ struct omap_aes_dev {
struct scatterlist out_sgl;
struct scatterlist *orig_out;
- struct scatter_walk in_walk;
- struct scatter_walk out_walk;
+ unsigned int in_sg_offset;
+ unsigned int out_sg_offset;
struct dma_chan *dma_lch_in;
struct dma_chan *dma_lch_out;
int in_sg_len;
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index 209d3dc03a9b..149ebd77710b 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -19,7 +19,6 @@
#include <crypto/engine.h>
#include <crypto/internal/des.h>
#include <crypto/internal/skcipher.h>
-#include <crypto/scatterwalk.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/err.h>
@@ -33,6 +32,7 @@
#include <linux/pm_runtime.h>
#include <linux/scatterlist.h>
#include <linux/string.h>
+#include <linux/workqueue.h>
#include "omap-crypto.h"
@@ -40,8 +40,6 @@
#define DES_BLOCK_WORDS (DES_BLOCK_SIZE >> 2)
-#define _calc_walked(inout) (dd->inout##_walk.offset - dd->inout##_sg->offset)
-
#define DES_REG_KEY(dd, x) ((dd)->pdata->key_ofs - \
((x ^ 0x01) * 0x04))
@@ -133,7 +131,7 @@ struct omap_des_dev {
unsigned long flags;
int err;
- struct tasklet_struct done_task;
+ struct work_struct done_task;
struct skcipher_request *req;
struct crypto_engine *engine;
@@ -152,8 +150,8 @@ struct omap_des_dev {
struct scatterlist out_sgl;
struct scatterlist *orig_out;
- struct scatter_walk in_walk;
- struct scatter_walk out_walk;
+ unsigned int in_sg_offset;
+ unsigned int out_sg_offset;
struct dma_chan *dma_lch_in;
struct dma_chan *dma_lch_out;
int in_sg_len;
@@ -328,7 +326,7 @@ static void omap_des_dma_out_callback(void *data)
struct omap_des_dev *dd = data;
/* dma_lch_out - completed */
- tasklet_schedule(&dd->done_task);
+ queue_work(system_bh_wq, &dd->done_task);
}
static int omap_des_dma_init(struct omap_des_dev *dd)
@@ -379,8 +377,8 @@ static int omap_des_crypt_dma(struct crypto_tfm *tfm,
int ret;
if (dd->pio_only) {
- scatterwalk_start(&dd->in_walk, dd->in_sg);
- scatterwalk_start(&dd->out_walk, dd->out_sg);
+ dd->in_sg_offset = 0;
+ dd->out_sg_offset = 0;
/* Enable DATAIN interrupt and let it take
care of the rest */
@@ -492,7 +490,6 @@ static void omap_des_finish_req(struct omap_des_dev *dd, int err)
crypto_finalize_skcipher_request(dd->engine, req, err);
- pm_runtime_mark_last_busy(dd->dev);
pm_runtime_put_autosuspend(dd->dev);
}
@@ -584,9 +581,9 @@ static int omap_des_crypt_req(struct crypto_engine *engine,
omap_des_crypt_dma_start(dd);
}
-static void omap_des_done_task(unsigned long data)
+static void omap_des_done_task(struct work_struct *t)
{
- struct omap_des_dev *dd = (struct omap_des_dev *)data;
+ struct omap_des_dev *dd = from_work(dd, t, done_task);
int i;
pr_debug("enter done_task\n");
@@ -836,21 +833,18 @@ static irqreturn_t omap_des_irq(int irq, void *dev_id)
BUG_ON(!dd->in_sg);
- BUG_ON(_calc_walked(in) > dd->in_sg->length);
+ BUG_ON(dd->in_sg_offset > dd->in_sg->length);
- src = sg_virt(dd->in_sg) + _calc_walked(in);
+ src = sg_virt(dd->in_sg) + dd->in_sg_offset;
for (i = 0; i < DES_BLOCK_WORDS; i++) {
omap_des_write(dd, DES_REG_DATA_N(dd, i), *src);
-
- scatterwalk_advance(&dd->in_walk, 4);
- if (dd->in_sg->length == _calc_walked(in)) {
+ dd->in_sg_offset += 4;
+ if (dd->in_sg_offset == dd->in_sg->length) {
dd->in_sg = sg_next(dd->in_sg);
if (dd->in_sg) {
- scatterwalk_start(&dd->in_walk,
- dd->in_sg);
- src = sg_virt(dd->in_sg) +
- _calc_walked(in);
+ dd->in_sg_offset = 0;
+ src = sg_virt(dd->in_sg);
}
} else {
src++;
@@ -869,20 +863,18 @@ static irqreturn_t omap_des_irq(int irq, void *dev_id)
BUG_ON(!dd->out_sg);
- BUG_ON(_calc_walked(out) > dd->out_sg->length);
+ BUG_ON(dd->out_sg_offset > dd->out_sg->length);
- dst = sg_virt(dd->out_sg) + _calc_walked(out);
+ dst = sg_virt(dd->out_sg) + dd->out_sg_offset;
for (i = 0; i < DES_BLOCK_WORDS; i++) {
*dst = omap_des_read(dd, DES_REG_DATA_N(dd, i));
- scatterwalk_advance(&dd->out_walk, 4);
- if (dd->out_sg->length == _calc_walked(out)) {
+ dd->out_sg_offset += 4;
+ if (dd->out_sg_offset == dd->out_sg->length) {
dd->out_sg = sg_next(dd->out_sg);
if (dd->out_sg) {
- scatterwalk_start(&dd->out_walk,
- dd->out_sg);
- dst = sg_virt(dd->out_sg) +
- _calc_walked(out);
+ dd->out_sg_offset = 0;
+ dst = sg_virt(dd->out_sg);
}
} else {
dst++;
@@ -899,7 +891,7 @@ static irqreturn_t omap_des_irq(int irq, void *dev_id)
if (!dd->total)
/* All bytes read! */
- tasklet_schedule(&dd->done_task);
+ queue_work(system_bh_wq, &dd->done_task);
else
/* Enable DATA_IN interrupt for next block */
omap_des_write(dd, DES_REG_IRQ_ENABLE(dd), 0x2);
@@ -995,7 +987,7 @@ static int omap_des_probe(struct platform_device *pdev)
(reg & dd->pdata->major_mask) >> dd->pdata->major_shift,
(reg & dd->pdata->minor_mask) >> dd->pdata->minor_shift);
- tasklet_init(&dd->done_task, omap_des_done_task, (unsigned long)dd);
+ INIT_WORK(&dd->done_task, omap_des_done_task);
err = omap_des_dma_init(dd);
if (err == -EPROBE_DEFER) {
@@ -1062,7 +1054,7 @@ err_engine:
omap_des_dma_cleanup(dd);
err_irq:
- tasklet_kill(&dd->done_task);
+ cancel_work_sync(&dd->done_task);
err_get:
pm_runtime_disable(dev);
err_res:
@@ -1086,7 +1078,7 @@ static void omap_des_remove(struct platform_device *pdev)
crypto_engine_unregister_skcipher(
&dd->pdata->algs_info[i].algs_list[j]);
- tasklet_kill(&dd->done_task);
+ cancel_work_sync(&dd->done_task);
omap_des_dma_cleanup(dd);
pm_runtime_disable(dd->dev);
}
@@ -1115,7 +1107,7 @@ static SIMPLE_DEV_PM_OPS(omap_des_pm_ops, omap_des_suspend, omap_des_resume);
static struct platform_driver omap_des_driver = {
.probe = omap_des_probe,
- .remove_new = omap_des_remove,
+ .remove = omap_des_remove,
.driver = {
.name = "omap-des",
.pm = &omap_des_pm_ops,
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 5bcd9ab0f72a..ff8aac02994a 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -37,6 +37,7 @@
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/string.h>
+#include <linux/workqueue.h>
#define MD5_DIGEST_SIZE 16
@@ -217,7 +218,7 @@ struct omap_sham_dev {
int irq;
int err;
struct dma_chan *dma_lch;
- struct tasklet_struct done_task;
+ struct work_struct done_task;
u8 polling_mode;
u8 xmit_buf[BUFLEN] OMAP_ALIGNED;
@@ -561,7 +562,7 @@ static void omap_sham_dma_callback(void *param)
struct omap_sham_dev *dd = param;
set_bit(FLAGS_DMA_READY, &dd->flags);
- tasklet_schedule(&dd->done_task);
+ queue_work(system_bh_wq, &dd->done_task);
}
static int omap_sham_xmit_dma(struct omap_sham_dev *dd, size_t length,
@@ -1167,7 +1168,6 @@ static void omap_sham_finish_req(struct ahash_request *req, int err)
dd->flags &= ~(BIT(FLAGS_FINAL) | BIT(FLAGS_CPU) |
BIT(FLAGS_DMA_READY) | BIT(FLAGS_OUTPUT_READY));
- pm_runtime_mark_last_busy(dd->dev);
pm_runtime_put_autosuspend(dd->dev);
ctx->offset = 0;
@@ -1704,9 +1704,9 @@ static struct ahash_engine_alg algs_sha384_sha512[] = {
},
};
-static void omap_sham_done_task(unsigned long data)
+static void omap_sham_done_task(struct work_struct *t)
{
- struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
+ struct omap_sham_dev *dd = from_work(dd, t, done_task);
int err = 0;
dev_dbg(dd->dev, "%s: flags=%lx\n", __func__, dd->flags);
@@ -1740,7 +1740,7 @@ finish:
static irqreturn_t omap_sham_irq_common(struct omap_sham_dev *dd)
{
set_bit(FLAGS_OUTPUT_READY, &dd->flags);
- tasklet_schedule(&dd->done_task);
+ queue_work(system_bh_wq, &dd->done_task);
return IRQ_HANDLED;
}
@@ -2039,10 +2039,7 @@ static struct attribute *omap_sham_attrs[] = {
&dev_attr_fallback.attr,
NULL,
};
-
-static const struct attribute_group omap_sham_attr_group = {
- .attrs = omap_sham_attrs,
-};
+ATTRIBUTE_GROUPS(omap_sham);
static int omap_sham_probe(struct platform_device *pdev)
{
@@ -2063,7 +2060,7 @@ static int omap_sham_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dd);
INIT_LIST_HEAD(&dd->list);
- tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd);
+ INIT_WORK(&dd->done_task, omap_sham_done_task);
crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH);
err = (dev->of_node) ? omap_sham_get_res_of(dd, dev, &res) :
@@ -2158,12 +2155,6 @@ static int omap_sham_probe(struct platform_device *pdev)
}
}
- err = sysfs_create_group(&dev->kobj, &omap_sham_attr_group);
- if (err) {
- dev_err(dev, "could not create sysfs device attrs\n");
- goto err_algs;
- }
-
return 0;
err_algs:
@@ -2204,22 +2195,21 @@ static void omap_sham_remove(struct platform_device *pdev)
&dd->pdata->algs_info[i].algs_list[j]);
dd->pdata->algs_info[i].registered--;
}
- tasklet_kill(&dd->done_task);
+ cancel_work_sync(&dd->done_task);
pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_disable(&pdev->dev);
if (!dd->polling_mode)
dma_release_channel(dd->dma_lch);
-
- sysfs_remove_group(&dd->dev->kobj, &omap_sham_attr_group);
}
static struct platform_driver omap_sham_driver = {
.probe = omap_sham_probe,
- .remove_new = omap_sham_remove,
+ .remove = omap_sham_remove,
.driver = {
.name = "omap-sham",
.of_match_table = omap_sham_of_match,
+ .dev_groups = omap_sham_groups,
},
};
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index 6865c7f1fc1a..329f60ad422e 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -7,59 +7,89 @@
* Copyright (c) 2006 Michal Ludvig <michal@logix.cz>
*/
+#include <asm/cpu_device_id.h>
#include <crypto/internal/hash.h>
#include <crypto/padlock.h>
#include <crypto/sha1.h>
#include <crypto/sha2.h>
+#include <linux/cpufeature.h>
#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-#include <linux/interrupt.h>
#include <linux/kernel.h>
-#include <linux/scatterlist.h>
-#include <asm/cpu_device_id.h>
-#include <asm/fpu/api.h>
+#include <linux/module.h>
-struct padlock_sha_desc {
- struct shash_desc fallback;
-};
+#define PADLOCK_SHA_DESCSIZE (128 + ((PADLOCK_ALIGNMENT - 1) & \
+ ~(CRYPTO_MINALIGN - 1)))
struct padlock_sha_ctx {
- struct crypto_shash *fallback;
+ struct crypto_ahash *fallback;
};
-static int padlock_sha_init(struct shash_desc *desc)
+static inline void *padlock_shash_desc_ctx(struct shash_desc *desc)
{
- struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
- struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm);
+ return PTR_ALIGN(shash_desc_ctx(desc), PADLOCK_ALIGNMENT);
+}
+
+static int padlock_sha1_init(struct shash_desc *desc)
+{
+ struct sha1_state *sctx = padlock_shash_desc_ctx(desc);
+
+ *sctx = (struct sha1_state){
+ .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
+ };
+
+ return 0;
+}
+
+static int padlock_sha256_init(struct shash_desc *desc)
+{
+ struct crypto_sha256_state *sctx = padlock_shash_desc_ctx(desc);
- dctx->fallback.tfm = ctx->fallback;
- return crypto_shash_init(&dctx->fallback);
+ sha256_block_init(sctx);
+ return 0;
}
static int padlock_sha_update(struct shash_desc *desc,
const u8 *data, unsigned int length)
{
- struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
+ u8 *state = padlock_shash_desc_ctx(desc);
+ struct crypto_shash *tfm = desc->tfm;
+ int err, remain;
+
+ remain = length - round_down(length, crypto_shash_blocksize(tfm));
+ {
+ struct padlock_sha_ctx *ctx = crypto_shash_ctx(tfm);
+ HASH_REQUEST_ON_STACK(req, ctx->fallback);
+
+ ahash_request_set_callback(req, 0, NULL, NULL);
+ ahash_request_set_virt(req, data, NULL, length - remain);
+ err = crypto_ahash_import_core(req, state) ?:
+ crypto_ahash_update(req) ?:
+ crypto_ahash_export_core(req, state);
+ HASH_REQUEST_ZERO(req);
+ }
- return crypto_shash_update(&dctx->fallback, data, length);
+ return err ?: remain;
}
static int padlock_sha_export(struct shash_desc *desc, void *out)
{
- struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
-
- return crypto_shash_export(&dctx->fallback, out);
+ memcpy(out, padlock_shash_desc_ctx(desc),
+ crypto_shash_coresize(desc->tfm));
+ return 0;
}
static int padlock_sha_import(struct shash_desc *desc, const void *in)
{
- struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
- struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm);
+ unsigned int bs = crypto_shash_blocksize(desc->tfm);
+ unsigned int ss = crypto_shash_coresize(desc->tfm);
+ u64 *state = padlock_shash_desc_ctx(desc);
+
+ memcpy(state, in, ss);
+
+ /* Stop evil imports from generating a fault. */
+ state[ss / 8 - 1] &= ~(bs - 1);
- dctx->fallback.tfm = ctx->fallback;
- return crypto_shash_import(&dctx->fallback, in);
+ return 0;
}
static inline void padlock_output_block(uint32_t *src,
@@ -69,65 +99,38 @@ static inline void padlock_output_block(uint32_t *src,
*dst++ = swab32(*src++);
}
+static int padlock_sha_finup(struct shash_desc *desc, const u8 *in,
+ unsigned int count, u8 *out)
+{
+ struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm);
+ HASH_REQUEST_ON_STACK(req, ctx->fallback);
+
+ ahash_request_set_callback(req, 0, NULL, NULL);
+ ahash_request_set_virt(req, in, out, count);
+ return crypto_ahash_import_core(req, padlock_shash_desc_ctx(desc)) ?:
+ crypto_ahash_finup(req);
+}
+
static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in,
unsigned int count, u8 *out)
{
/* We can't store directly to *out as it may be unaligned. */
/* BTW Don't reduce the buffer size below 128 Bytes!
* PadLock microcode needs it that big. */
- char buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
- ((aligned(STACK_ALIGN)));
- char *result = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
- struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
- struct sha1_state state;
- unsigned int space;
- unsigned int leftover;
- int err;
-
- err = crypto_shash_export(&dctx->fallback, &state);
- if (err)
- goto out;
+ struct sha1_state *state = padlock_shash_desc_ctx(desc);
+ u64 start = state->count;
- if (state.count + count > ULONG_MAX)
- return crypto_shash_finup(&dctx->fallback, in, count, out);
-
- leftover = ((state.count - 1) & (SHA1_BLOCK_SIZE - 1)) + 1;
- space = SHA1_BLOCK_SIZE - leftover;
- if (space) {
- if (count > space) {
- err = crypto_shash_update(&dctx->fallback, in, space) ?:
- crypto_shash_export(&dctx->fallback, &state);
- if (err)
- goto out;
- count -= space;
- in += space;
- } else {
- memcpy(state.buffer + leftover, in, count);
- in = state.buffer;
- count += leftover;
- state.count &= ~(SHA1_BLOCK_SIZE - 1);
- }
- }
-
- memcpy(result, &state.state, SHA1_DIGEST_SIZE);
+ if (start + count > ULONG_MAX)
+ return padlock_sha_finup(desc, in, count, out);
asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */
: \
- : "c"((unsigned long)state.count + count), \
- "a"((unsigned long)state.count), \
- "S"(in), "D"(result));
-
- padlock_output_block((uint32_t *)result, (uint32_t *)out, 5);
+ : "c"((unsigned long)start + count), \
+ "a"((unsigned long)start), \
+ "S"(in), "D"(state));
-out:
- return err;
-}
-
-static int padlock_sha1_final(struct shash_desc *desc, u8 *out)
-{
- u8 buf[4];
-
- return padlock_sha1_finup(desc, buf, 0, out);
+ padlock_output_block(state->state, (uint32_t *)out, 5);
+ return 0;
}
static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in,
@@ -136,78 +139,46 @@ static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in,
/* We can't store directly to *out as it may be unaligned. */
/* BTW Don't reduce the buffer size below 128 Bytes!
* PadLock microcode needs it that big. */
- char buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
- ((aligned(STACK_ALIGN)));
- char *result = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
- struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
- struct sha256_state state;
- unsigned int space;
- unsigned int leftover;
- int err;
-
- err = crypto_shash_export(&dctx->fallback, &state);
- if (err)
- goto out;
+ struct sha256_state *state = padlock_shash_desc_ctx(desc);
+ u64 start = state->count;
- if (state.count + count > ULONG_MAX)
- return crypto_shash_finup(&dctx->fallback, in, count, out);
-
- leftover = ((state.count - 1) & (SHA256_BLOCK_SIZE - 1)) + 1;
- space = SHA256_BLOCK_SIZE - leftover;
- if (space) {
- if (count > space) {
- err = crypto_shash_update(&dctx->fallback, in, space) ?:
- crypto_shash_export(&dctx->fallback, &state);
- if (err)
- goto out;
- count -= space;
- in += space;
- } else {
- memcpy(state.buf + leftover, in, count);
- in = state.buf;
- count += leftover;
- state.count &= ~(SHA1_BLOCK_SIZE - 1);
- }
- }
-
- memcpy(result, &state.state, SHA256_DIGEST_SIZE);
+ if (start + count > ULONG_MAX)
+ return padlock_sha_finup(desc, in, count, out);
asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */
: \
- : "c"((unsigned long)state.count + count), \
- "a"((unsigned long)state.count), \
- "S"(in), "D"(result));
+ : "c"((unsigned long)start + count), \
+ "a"((unsigned long)start), \
+ "S"(in), "D"(state));
- padlock_output_block((uint32_t *)result, (uint32_t *)out, 8);
-
-out:
- return err;
-}
-
-static int padlock_sha256_final(struct shash_desc *desc, u8 *out)
-{
- u8 buf[4];
-
- return padlock_sha256_finup(desc, buf, 0, out);
+ padlock_output_block(state->state, (uint32_t *)out, 8);
+ return 0;
}
static int padlock_init_tfm(struct crypto_shash *hash)
{
const char *fallback_driver_name = crypto_shash_alg_name(hash);
struct padlock_sha_ctx *ctx = crypto_shash_ctx(hash);
- struct crypto_shash *fallback_tfm;
+ struct crypto_ahash *fallback_tfm;
/* Allocate a fallback and abort if it failed. */
- fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
- CRYPTO_ALG_NEED_FALLBACK);
+ fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC);
if (IS_ERR(fallback_tfm)) {
printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n",
fallback_driver_name);
return PTR_ERR(fallback_tfm);
}
+ if (crypto_shash_statesize(hash) !=
+ crypto_ahash_statesize(fallback_tfm)) {
+ crypto_free_ahash(fallback_tfm);
+ return -EINVAL;
+ }
+
ctx->fallback = fallback_tfm;
- hash->descsize += crypto_shash_descsize(fallback_tfm);
+
return 0;
}
@@ -215,26 +186,27 @@ static void padlock_exit_tfm(struct crypto_shash *hash)
{
struct padlock_sha_ctx *ctx = crypto_shash_ctx(hash);
- crypto_free_shash(ctx->fallback);
+ crypto_free_ahash(ctx->fallback);
}
static struct shash_alg sha1_alg = {
.digestsize = SHA1_DIGEST_SIZE,
- .init = padlock_sha_init,
+ .init = padlock_sha1_init,
.update = padlock_sha_update,
.finup = padlock_sha1_finup,
- .final = padlock_sha1_final,
.export = padlock_sha_export,
.import = padlock_sha_import,
.init_tfm = padlock_init_tfm,
.exit_tfm = padlock_exit_tfm,
- .descsize = sizeof(struct padlock_sha_desc),
- .statesize = sizeof(struct sha1_state),
+ .descsize = PADLOCK_SHA_DESCSIZE,
+ .statesize = SHA1_STATE_SIZE,
.base = {
.cra_name = "sha1",
.cra_driver_name = "sha1-padlock",
.cra_priority = PADLOCK_CRA_PRIORITY,
- .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct padlock_sha_ctx),
.cra_module = THIS_MODULE,
@@ -243,21 +215,22 @@ static struct shash_alg sha1_alg = {
static struct shash_alg sha256_alg = {
.digestsize = SHA256_DIGEST_SIZE,
- .init = padlock_sha_init,
+ .init = padlock_sha256_init,
.update = padlock_sha_update,
.finup = padlock_sha256_finup,
- .final = padlock_sha256_final,
+ .init_tfm = padlock_init_tfm,
.export = padlock_sha_export,
.import = padlock_sha_import,
- .init_tfm = padlock_init_tfm,
.exit_tfm = padlock_exit_tfm,
- .descsize = sizeof(struct padlock_sha_desc),
- .statesize = sizeof(struct sha256_state),
+ .descsize = PADLOCK_SHA_DESCSIZE,
+ .statesize = sizeof(struct crypto_sha256_state),
.base = {
.cra_name = "sha256",
.cra_driver_name = "sha256-padlock",
.cra_priority = PADLOCK_CRA_PRIORITY,
- .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct padlock_sha_ctx),
.cra_module = THIS_MODULE,
@@ -266,207 +239,58 @@ static struct shash_alg sha256_alg = {
/* Add two shash_alg instance for hardware-implemented *
* multiple-parts hash supported by VIA Nano Processor.*/
-static int padlock_sha1_init_nano(struct shash_desc *desc)
-{
- struct sha1_state *sctx = shash_desc_ctx(desc);
-
- *sctx = (struct sha1_state){
- .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
- };
-
- return 0;
-}
static int padlock_sha1_update_nano(struct shash_desc *desc,
- const u8 *data, unsigned int len)
+ const u8 *src, unsigned int len)
{
- struct sha1_state *sctx = shash_desc_ctx(desc);
- unsigned int partial, done;
- const u8 *src;
/*The PHE require the out buffer must 128 bytes and 16-bytes aligned*/
- u8 buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
- ((aligned(STACK_ALIGN)));
- u8 *dst = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
-
- partial = sctx->count & 0x3f;
- sctx->count += len;
- done = 0;
- src = data;
- memcpy(dst, (u8 *)(sctx->state), SHA1_DIGEST_SIZE);
-
- if ((partial + len) >= SHA1_BLOCK_SIZE) {
-
- /* Append the bytes in state's buffer to a block to handle */
- if (partial) {
- done = -partial;
- memcpy(sctx->buffer + partial, data,
- done + SHA1_BLOCK_SIZE);
- src = sctx->buffer;
- asm volatile (".byte 0xf3,0x0f,0xa6,0xc8"
- : "+S"(src), "+D"(dst) \
- : "a"((long)-1), "c"((unsigned long)1));
- done += SHA1_BLOCK_SIZE;
- src = data + done;
- }
-
- /* Process the left bytes from the input data */
- if (len - done >= SHA1_BLOCK_SIZE) {
- asm volatile (".byte 0xf3,0x0f,0xa6,0xc8"
- : "+S"(src), "+D"(dst)
- : "a"((long)-1),
- "c"((unsigned long)((len - done) / SHA1_BLOCK_SIZE)));
- done += ((len - done) - (len - done) % SHA1_BLOCK_SIZE);
- src = data + done;
- }
- partial = 0;
- }
- memcpy((u8 *)(sctx->state), dst, SHA1_DIGEST_SIZE);
- memcpy(sctx->buffer + partial, src, len - done);
-
- return 0;
-}
-
-static int padlock_sha1_final_nano(struct shash_desc *desc, u8 *out)
-{
- struct sha1_state *state = (struct sha1_state *)shash_desc_ctx(desc);
- unsigned int partial, padlen;
- __be64 bits;
- static const u8 padding[64] = { 0x80, };
-
- bits = cpu_to_be64(state->count << 3);
-
- /* Pad out to 56 mod 64 */
- partial = state->count & 0x3f;
- padlen = (partial < 56) ? (56 - partial) : ((64+56) - partial);
- padlock_sha1_update_nano(desc, padding, padlen);
-
- /* Append length field bytes */
- padlock_sha1_update_nano(desc, (const u8 *)&bits, sizeof(bits));
-
- /* Swap to output */
- padlock_output_block((uint32_t *)(state->state), (uint32_t *)out, 5);
-
- return 0;
-}
-
-static int padlock_sha256_init_nano(struct shash_desc *desc)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
-
- *sctx = (struct sha256_state){
- .state = { SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, \
- SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7},
- };
-
- return 0;
+ struct sha1_state *state = padlock_shash_desc_ctx(desc);
+ int blocks = len / SHA1_BLOCK_SIZE;
+
+ len -= blocks * SHA1_BLOCK_SIZE;
+ state->count += blocks * SHA1_BLOCK_SIZE;
+
+ /* Process the left bytes from the input data */
+ asm volatile (".byte 0xf3,0x0f,0xa6,0xc8"
+ : "+S"(src), "+D"(state)
+ : "a"((long)-1),
+ "c"((unsigned long)blocks));
+ return len;
}
-static int padlock_sha256_update_nano(struct shash_desc *desc, const u8 *data,
+static int padlock_sha256_update_nano(struct shash_desc *desc, const u8 *src,
unsigned int len)
{
- struct sha256_state *sctx = shash_desc_ctx(desc);
- unsigned int partial, done;
- const u8 *src;
/*The PHE require the out buffer must 128 bytes and 16-bytes aligned*/
- u8 buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
- ((aligned(STACK_ALIGN)));
- u8 *dst = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
-
- partial = sctx->count & 0x3f;
- sctx->count += len;
- done = 0;
- src = data;
- memcpy(dst, (u8 *)(sctx->state), SHA256_DIGEST_SIZE);
-
- if ((partial + len) >= SHA256_BLOCK_SIZE) {
-
- /* Append the bytes in state's buffer to a block to handle */
- if (partial) {
- done = -partial;
- memcpy(sctx->buf + partial, data,
- done + SHA256_BLOCK_SIZE);
- src = sctx->buf;
- asm volatile (".byte 0xf3,0x0f,0xa6,0xd0"
- : "+S"(src), "+D"(dst)
- : "a"((long)-1), "c"((unsigned long)1));
- done += SHA256_BLOCK_SIZE;
- src = data + done;
- }
-
- /* Process the left bytes from input data*/
- if (len - done >= SHA256_BLOCK_SIZE) {
- asm volatile (".byte 0xf3,0x0f,0xa6,0xd0"
- : "+S"(src), "+D"(dst)
- : "a"((long)-1),
- "c"((unsigned long)((len - done) / 64)));
- done += ((len - done) - (len - done) % 64);
- src = data + done;
- }
- partial = 0;
- }
- memcpy((u8 *)(sctx->state), dst, SHA256_DIGEST_SIZE);
- memcpy(sctx->buf + partial, src, len - done);
-
- return 0;
-}
-
-static int padlock_sha256_final_nano(struct shash_desc *desc, u8 *out)
-{
- struct sha256_state *state =
- (struct sha256_state *)shash_desc_ctx(desc);
- unsigned int partial, padlen;
- __be64 bits;
- static const u8 padding[64] = { 0x80, };
-
- bits = cpu_to_be64(state->count << 3);
-
- /* Pad out to 56 mod 64 */
- partial = state->count & 0x3f;
- padlen = (partial < 56) ? (56 - partial) : ((64+56) - partial);
- padlock_sha256_update_nano(desc, padding, padlen);
-
- /* Append length field bytes */
- padlock_sha256_update_nano(desc, (const u8 *)&bits, sizeof(bits));
-
- /* Swap to output */
- padlock_output_block((uint32_t *)(state->state), (uint32_t *)out, 8);
-
- return 0;
-}
-
-static int padlock_sha_export_nano(struct shash_desc *desc,
- void *out)
-{
- int statesize = crypto_shash_statesize(desc->tfm);
- void *sctx = shash_desc_ctx(desc);
-
- memcpy(out, sctx, statesize);
- return 0;
-}
-
-static int padlock_sha_import_nano(struct shash_desc *desc,
- const void *in)
-{
- int statesize = crypto_shash_statesize(desc->tfm);
- void *sctx = shash_desc_ctx(desc);
-
- memcpy(sctx, in, statesize);
- return 0;
+ struct crypto_sha256_state *state = padlock_shash_desc_ctx(desc);
+ int blocks = len / SHA256_BLOCK_SIZE;
+
+ len -= blocks * SHA256_BLOCK_SIZE;
+ state->count += blocks * SHA256_BLOCK_SIZE;
+
+ /* Process the left bytes from input data*/
+ asm volatile (".byte 0xf3,0x0f,0xa6,0xd0"
+ : "+S"(src), "+D"(state)
+ : "a"((long)-1),
+ "c"((unsigned long)blocks));
+ return len;
}
static struct shash_alg sha1_alg_nano = {
.digestsize = SHA1_DIGEST_SIZE,
- .init = padlock_sha1_init_nano,
+ .init = padlock_sha1_init,
.update = padlock_sha1_update_nano,
- .final = padlock_sha1_final_nano,
- .export = padlock_sha_export_nano,
- .import = padlock_sha_import_nano,
- .descsize = sizeof(struct sha1_state),
- .statesize = sizeof(struct sha1_state),
+ .finup = padlock_sha1_finup,
+ .export = padlock_sha_export,
+ .import = padlock_sha_import,
+ .descsize = PADLOCK_SHA_DESCSIZE,
+ .statesize = SHA1_STATE_SIZE,
.base = {
.cra_name = "sha1",
.cra_driver_name = "sha1-padlock-nano",
.cra_priority = PADLOCK_CRA_PRIORITY,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -474,17 +298,19 @@ static struct shash_alg sha1_alg_nano = {
static struct shash_alg sha256_alg_nano = {
.digestsize = SHA256_DIGEST_SIZE,
- .init = padlock_sha256_init_nano,
+ .init = padlock_sha256_init,
.update = padlock_sha256_update_nano,
- .final = padlock_sha256_final_nano,
- .export = padlock_sha_export_nano,
- .import = padlock_sha_import_nano,
- .descsize = sizeof(struct sha256_state),
- .statesize = sizeof(struct sha256_state),
+ .finup = padlock_sha256_finup,
+ .export = padlock_sha_export,
+ .import = padlock_sha_import,
+ .descsize = PADLOCK_SHA_DESCSIZE,
+ .statesize = sizeof(struct crypto_sha256_state),
.base = {
.cra_name = "sha256",
.cra_driver_name = "sha256-padlock-nano",
.cra_priority = PADLOCK_CRA_PRIORITY,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/drivers/crypto/qce/aead.c b/drivers/crypto/qce/aead.c
index 7d811728f047..97b56e92ea33 100644
--- a/drivers/crypto/qce/aead.c
+++ b/drivers/crypto/qce/aead.c
@@ -786,7 +786,7 @@ static int qce_aead_register_one(const struct qce_aead_def *def, struct qce_devi
alg->init = qce_aead_init;
alg->exit = qce_aead_exit;
- alg->base.cra_priority = 300;
+ alg->base.cra_priority = 275;
alg->base.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY |
diff --git a/drivers/crypto/qce/core.c b/drivers/crypto/qce/core.c
index 28b5fd823827..e95e84486d9a 100644
--- a/drivers/crypto/qce/core.c
+++ b/drivers/crypto/qce/core.c
@@ -3,14 +3,15 @@
* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
*/
+#include <linux/cleanup.h>
#include <linux/clk.h>
+#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/interconnect.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
-#include <linux/spinlock.h>
#include <linux/types.h>
#include <crypto/algapi.h>
#include <crypto/internal/hash.h>
@@ -37,9 +38,10 @@ static const struct qce_algo_ops *qce_ops[] = {
#endif
};
-static void qce_unregister_algs(struct qce_device *qce)
+static void qce_unregister_algs(void *data)
{
const struct qce_algo_ops *ops;
+ struct qce_device *qce = data;
int i;
for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
@@ -48,19 +50,22 @@ static void qce_unregister_algs(struct qce_device *qce)
}
}
-static int qce_register_algs(struct qce_device *qce)
+static int devm_qce_register_algs(struct qce_device *qce)
{
const struct qce_algo_ops *ops;
- int i, ret = -ENODEV;
+ int i, j, ret = -ENODEV;
for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
ops = qce_ops[i];
ret = ops->register_algs(qce);
- if (ret)
- break;
+ if (ret) {
+ for (j = i - 1; j >= 0; j--)
+ ops->unregister_algs(qce);
+ return ret;
+ }
}
- return ret;
+ return devm_add_action_or_reset(qce->dev, qce_unregister_algs, qce);
}
static int qce_handle_request(struct crypto_async_request *async_req)
@@ -84,55 +89,49 @@ static int qce_handle_queue(struct qce_device *qce,
struct crypto_async_request *req)
{
struct crypto_async_request *async_req, *backlog;
- unsigned long flags;
int ret = 0, err;
- spin_lock_irqsave(&qce->lock, flags);
+ scoped_guard(mutex, &qce->lock) {
+ if (req)
+ ret = crypto_enqueue_request(&qce->queue, req);
- if (req)
- ret = crypto_enqueue_request(&qce->queue, req);
+ /* busy, do not dequeue request */
+ if (qce->req)
+ return ret;
- /* busy, do not dequeue request */
- if (qce->req) {
- spin_unlock_irqrestore(&qce->lock, flags);
- return ret;
+ backlog = crypto_get_backlog(&qce->queue);
+ async_req = crypto_dequeue_request(&qce->queue);
+ if (async_req)
+ qce->req = async_req;
}
- backlog = crypto_get_backlog(&qce->queue);
- async_req = crypto_dequeue_request(&qce->queue);
- if (async_req)
- qce->req = async_req;
-
- spin_unlock_irqrestore(&qce->lock, flags);
-
if (!async_req)
return ret;
if (backlog) {
- spin_lock_bh(&qce->lock);
- crypto_request_complete(backlog, -EINPROGRESS);
- spin_unlock_bh(&qce->lock);
+ scoped_guard(mutex, &qce->lock)
+ crypto_request_complete(backlog, -EINPROGRESS);
}
err = qce_handle_request(async_req);
if (err) {
qce->result = err;
- tasklet_schedule(&qce->done_tasklet);
+ schedule_work(&qce->done_work);
}
return ret;
}
-static void qce_tasklet_req_done(unsigned long data)
+static void qce_req_done_work(struct work_struct *work)
{
- struct qce_device *qce = (struct qce_device *)data;
+ struct qce_device *qce = container_of(work, struct qce_device,
+ done_work);
struct crypto_async_request *req;
- unsigned long flags;
- spin_lock_irqsave(&qce->lock, flags);
- req = qce->req;
- qce->req = NULL;
- spin_unlock_irqrestore(&qce->lock, flags);
+ scoped_guard(mutex, &qce->lock) {
+ req = qce->req;
+ qce->req = NULL;
+ }
if (req)
crypto_request_complete(req, qce->result);
@@ -149,7 +148,7 @@ static int qce_async_request_enqueue(struct qce_device *qce,
static void qce_async_request_done(struct qce_device *qce, int ret)
{
qce->result = ret;
- tasklet_schedule(&qce->done_tasklet);
+ schedule_work(&qce->done_work);
}
static int qce_check_version(struct qce_device *qce)
@@ -209,15 +208,15 @@ static int qce_crypto_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- qce->core = devm_clk_get_optional(qce->dev, "core");
+ qce->core = devm_clk_get_optional_enabled(qce->dev, "core");
if (IS_ERR(qce->core))
return PTR_ERR(qce->core);
- qce->iface = devm_clk_get_optional(qce->dev, "iface");
+ qce->iface = devm_clk_get_optional_enabled(qce->dev, "iface");
if (IS_ERR(qce->iface))
return PTR_ERR(qce->iface);
- qce->bus = devm_clk_get_optional(qce->dev, "bus");
+ qce->bus = devm_clk_get_optional_enabled(qce->dev, "bus");
if (IS_ERR(qce->bus))
return PTR_ERR(qce->bus);
@@ -229,64 +228,25 @@ static int qce_crypto_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = clk_prepare_enable(qce->core);
+ ret = devm_qce_dma_request(qce->dev, &qce->dma);
if (ret)
- goto err_mem_path_disable;
-
- ret = clk_prepare_enable(qce->iface);
- if (ret)
- goto err_clks_core;
-
- ret = clk_prepare_enable(qce->bus);
- if (ret)
- goto err_clks_iface;
+ return ret;
- ret = qce_dma_request(qce->dev, &qce->dma);
+ ret = qce_check_version(qce);
if (ret)
- goto err_clks;
+ return ret;
- ret = qce_check_version(qce);
+ ret = devm_mutex_init(qce->dev, &qce->lock);
if (ret)
- goto err_clks;
+ return ret;
- spin_lock_init(&qce->lock);
- tasklet_init(&qce->done_tasklet, qce_tasklet_req_done,
- (unsigned long)qce);
+ INIT_WORK(&qce->done_work, qce_req_done_work);
crypto_init_queue(&qce->queue, QCE_QUEUE_LENGTH);
qce->async_req_enqueue = qce_async_request_enqueue;
qce->async_req_done = qce_async_request_done;
- ret = qce_register_algs(qce);
- if (ret)
- goto err_dma;
-
- return 0;
-
-err_dma:
- qce_dma_release(&qce->dma);
-err_clks:
- clk_disable_unprepare(qce->bus);
-err_clks_iface:
- clk_disable_unprepare(qce->iface);
-err_clks_core:
- clk_disable_unprepare(qce->core);
-err_mem_path_disable:
- icc_set_bw(qce->mem_path, 0, 0);
-
- return ret;
-}
-
-static void qce_crypto_remove(struct platform_device *pdev)
-{
- struct qce_device *qce = platform_get_drvdata(pdev);
-
- tasklet_kill(&qce->done_tasklet);
- qce_unregister_algs(qce);
- qce_dma_release(&qce->dma);
- clk_disable_unprepare(qce->bus);
- clk_disable_unprepare(qce->iface);
- clk_disable_unprepare(qce->core);
+ return devm_qce_register_algs(qce);
}
static const struct of_device_id qce_crypto_of_match[] = {
@@ -299,7 +259,6 @@ MODULE_DEVICE_TABLE(of, qce_crypto_of_match);
static struct platform_driver qce_crypto_driver = {
.probe = qce_crypto_probe,
- .remove_new = qce_crypto_remove,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = qce_crypto_of_match,
diff --git a/drivers/crypto/qce/core.h b/drivers/crypto/qce/core.h
index 228fcd69ec51..eb6fa7a8b64a 100644
--- a/drivers/crypto/qce/core.h
+++ b/drivers/crypto/qce/core.h
@@ -6,13 +6,16 @@
#ifndef _CORE_H_
#define _CORE_H_
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+
#include "dma.h"
/**
* struct qce_device - crypto engine device structure
* @queue: crypto request queue
* @lock: the lock protects queue and req
- * @done_tasklet: done tasklet object
+ * @done_work: workqueue context
* @req: current active request
* @result: result of current transform
* @base: virtual IO base
@@ -28,8 +31,8 @@
*/
struct qce_device {
struct crypto_queue queue;
- spinlock_t lock;
- struct tasklet_struct done_tasklet;
+ struct mutex lock;
+ struct work_struct done_work;
struct crypto_async_request *req;
int result;
void __iomem *base;
diff --git a/drivers/crypto/qce/dma.c b/drivers/crypto/qce/dma.c
index 46db5bf366b4..1dec7aea852d 100644
--- a/drivers/crypto/qce/dma.c
+++ b/drivers/crypto/qce/dma.c
@@ -3,12 +3,22 @@
* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
*/
+#include <linux/device.h>
#include <linux/dmaengine.h>
#include <crypto/scatterwalk.h>
#include "dma.h"
-int qce_dma_request(struct device *dev, struct qce_dma_data *dma)
+static void qce_dma_release(void *data)
+{
+ struct qce_dma_data *dma = data;
+
+ dma_release_channel(dma->txchan);
+ dma_release_channel(dma->rxchan);
+ kfree(dma->result_buf);
+}
+
+int devm_qce_dma_request(struct device *dev, struct qce_dma_data *dma)
{
int ret;
@@ -31,7 +41,8 @@ int qce_dma_request(struct device *dev, struct qce_dma_data *dma)
dma->ignore_buf = dma->result_buf + QCE_RESULT_BUF_SZ;
- return 0;
+ return devm_add_action_or_reset(dev, qce_dma_release, dma);
+
error_nomem:
dma_release_channel(dma->rxchan);
error_rx:
@@ -39,13 +50,6 @@ error_rx:
return ret;
}
-void qce_dma_release(struct qce_dma_data *dma)
-{
- dma_release_channel(dma->txchan);
- dma_release_channel(dma->rxchan);
- kfree(dma->result_buf);
-}
-
struct scatterlist *
qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl,
unsigned int max_len)
diff --git a/drivers/crypto/qce/dma.h b/drivers/crypto/qce/dma.h
index 786402169360..31629185000e 100644
--- a/drivers/crypto/qce/dma.h
+++ b/drivers/crypto/qce/dma.h
@@ -34,8 +34,7 @@ struct qce_dma_data {
void *ignore_buf;
};
-int qce_dma_request(struct device *dev, struct qce_dma_data *dma);
-void qce_dma_release(struct qce_dma_data *dma);
+int devm_qce_dma_request(struct device *dev, struct qce_dma_data *dma);
int qce_dma_prep_sgs(struct qce_dma_data *dma, struct scatterlist *sg_in,
int in_ents, struct scatterlist *sg_out, int out_ents,
dma_async_tx_callback cb, void *cb_param);
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
index fc72af8aa9a7..71b748183cfa 100644
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -482,7 +482,7 @@ static int qce_ahash_register_one(const struct qce_ahash_def *def,
base = &alg->halg.base;
base->cra_blocksize = def->blocksize;
- base->cra_priority = 300;
+ base->cra_priority = 175;
base->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
base->cra_ctxsize = sizeof(struct qce_sha_ctx);
base->cra_alignmask = 0;
diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
index 5b493fdc1e74..ffb334eb5b34 100644
--- a/drivers/crypto/qce/skcipher.c
+++ b/drivers/crypto/qce/skcipher.c
@@ -461,7 +461,7 @@ static int qce_skcipher_register_one(const struct qce_skcipher_def *def,
alg->encrypt = qce_skcipher_encrypt;
alg->decrypt = qce_skcipher_decrypt;
- alg->base.cra_priority = 300;
+ alg->base.cra_priority = 275;
alg->base.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY;
diff --git a/drivers/crypto/qcom-rng.c b/drivers/crypto/qcom-rng.c
index 09419e79e34c..0685ba122e8a 100644
--- a/drivers/crypto/qcom-rng.c
+++ b/drivers/crypto/qcom-rng.c
@@ -262,7 +262,7 @@ MODULE_DEVICE_TABLE(of, qcom_rng_of_match);
static struct platform_driver qcom_rng_driver = {
.probe = qcom_rng_probe,
- .remove_new = qcom_rng_remove,
+ .remove = qcom_rng_remove,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = of_match_ptr(qcom_rng_of_match),
diff --git a/drivers/crypto/rockchip/rk3288_crypto.c b/drivers/crypto/rockchip/rk3288_crypto.c
index f74b3c81ba6d..b77bdce8e7fc 100644
--- a/drivers/crypto/rockchip/rk3288_crypto.c
+++ b/drivers/crypto/rockchip/rk3288_crypto.c
@@ -433,7 +433,7 @@ static void rk_crypto_remove(struct platform_device *pdev)
static struct platform_driver crypto_driver = {
.probe = rk_crypto_probe,
- .remove_new = rk_crypto_remove,
+ .remove = rk_crypto_remove,
.driver = {
.name = "rk3288-crypto",
.pm = &rk_crypto_pm_ops,
diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
index 69d6019d8abc..b9f5a8b42e66 100644
--- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c
+++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
@@ -52,12 +52,11 @@ static int rk_ahash_digest_fb(struct ahash_request *areq)
algt->stat_fb++;
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
-
- rctx->fallback_req.nbytes = areq->nbytes;
- rctx->fallback_req.src = areq->src;
- rctx->fallback_req.result = areq->result;
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, areq->src, areq->result,
+ areq->nbytes);
return crypto_ahash_digest(&rctx->fallback_req);
}
@@ -124,8 +123,9 @@ static int rk_ahash_init(struct ahash_request *req)
struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
return crypto_ahash_init(&rctx->fallback_req);
}
@@ -137,10 +137,10 @@ static int rk_ahash_update(struct ahash_request *req)
struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.nbytes = req->nbytes;
- rctx->fallback_req.src = req->src;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, req->src, NULL, req->nbytes);
return crypto_ahash_update(&rctx->fallback_req);
}
@@ -152,9 +152,10 @@ static int rk_ahash_final(struct ahash_request *req)
struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.result = req->result;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, NULL, req->result, 0);
return crypto_ahash_final(&rctx->fallback_req);
}
@@ -166,12 +167,11 @@ static int rk_ahash_finup(struct ahash_request *req)
struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
-
- rctx->fallback_req.nbytes = req->nbytes;
- rctx->fallback_req.src = req->src;
- rctx->fallback_req.result = req->result;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, req->src, req->result,
+ req->nbytes);
return crypto_ahash_finup(&rctx->fallback_req);
}
@@ -183,8 +183,9 @@ static int rk_ahash_import(struct ahash_request *req, const void *in)
struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
return crypto_ahash_import(&rctx->fallback_req, in);
}
@@ -196,8 +197,9 @@ static int rk_ahash_export(struct ahash_request *req, void *out)
struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
return crypto_ahash_export(&rctx->fallback_req, out);
}
@@ -252,7 +254,7 @@ static void rk_hash_unprepare(struct crypto_engine *engine, void *breq)
struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
struct rk_crypto_info *rkc = rctx->dev;
- dma_unmap_sg(rkc->dev, areq->src, rctx->nrsg, DMA_TO_DEVICE);
+ dma_unmap_sg(rkc->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE);
}
static int rk_hash_run(struct crypto_engine *engine, void *breq)
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index 8b6e3f5c94de..b829c84f60f2 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -9,11 +9,17 @@
//
// Hash part based on omap-sham.c driver.
+#include <crypto/aes.h>
+#include <crypto/ctr.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/md5.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/sha1.h>
+#include <crypto/sha2.h>
#include <linux/clk.h>
-#include <linux/crypto.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
-#include <linux/errno.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -22,17 +28,9 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/scatterlist.h>
-
-#include <crypto/ctr.h>
-#include <crypto/aes.h>
-#include <crypto/algapi.h>
-#include <crypto/scatterwalk.h>
-
-#include <crypto/hash.h>
-#include <crypto/md5.h>
-#include <crypto/sha1.h>
-#include <crypto/sha2.h>
-#include <crypto/internal/hash.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
#define _SBF(s, v) ((v) << (s))
@@ -458,19 +456,6 @@ static void s5p_free_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist **sg)
*sg = NULL;
}
-static void s5p_sg_copy_buf(void *buf, struct scatterlist *sg,
- unsigned int nbytes, int out)
-{
- struct scatter_walk walk;
-
- if (!nbytes)
- return;
-
- scatterwalk_start(&walk, sg);
- scatterwalk_copychunks(buf, &walk, nbytes, out);
- scatterwalk_done(&walk, out, 0);
-}
-
static void s5p_sg_done(struct s5p_aes_dev *dev)
{
struct skcipher_request *req = dev->req;
@@ -480,8 +465,8 @@ static void s5p_sg_done(struct s5p_aes_dev *dev)
dev_dbg(dev->dev,
"Copying %d bytes of output data back to original place\n",
dev->req->cryptlen);
- s5p_sg_copy_buf(sg_virt(dev->sg_dst_cpy), dev->req->dst,
- dev->req->cryptlen, 1);
+ memcpy_to_sglist(dev->req->dst, 0, sg_virt(dev->sg_dst_cpy),
+ dev->req->cryptlen);
}
s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);
@@ -526,7 +511,7 @@ static int s5p_make_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist *src,
return -ENOMEM;
}
- s5p_sg_copy_buf(pages, src, dev->req->cryptlen, 0);
+ memcpy_from_sglist(pages, src, 0, dev->req->cryptlen);
sg_init_table(*dst, 1);
sg_set_buf(*dst, pages, len);
@@ -1035,8 +1020,7 @@ static int s5p_hash_copy_sgs(struct s5p_hash_reqctx *ctx,
if (ctx->bufcnt)
memcpy(buf, ctx->dd->xmit_buf, ctx->bufcnt);
- scatterwalk_map_and_copy(buf + ctx->bufcnt, sg, ctx->skip,
- new_len, 0);
+ memcpy_from_sglist(buf + ctx->bufcnt, sg, ctx->skip, new_len);
sg_init_table(ctx->sgl, 1);
sg_set_buf(ctx->sgl, buf, len);
ctx->sg = ctx->sgl;
@@ -1229,8 +1213,7 @@ static int s5p_hash_prepare_request(struct ahash_request *req, bool update)
if (len > nbytes)
len = nbytes;
- scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src,
- 0, len, 0);
+ memcpy_from_sglist(ctx->buffer + ctx->bufcnt, req->src, 0, len);
ctx->bufcnt += len;
nbytes -= len;
ctx->skip = len;
@@ -1253,9 +1236,8 @@ static int s5p_hash_prepare_request(struct ahash_request *req, bool update)
hash_later = ctx->total - xmit_len;
/* copy hash_later bytes from end of req->src */
/* previous bytes are in xmit_buf, so no overwrite */
- scatterwalk_map_and_copy(ctx->buffer, req->src,
- req->nbytes - hash_later,
- hash_later, 0);
+ memcpy_from_sglist(ctx->buffer, req->src,
+ req->nbytes - hash_later, hash_later);
}
if (xmit_len > BUFLEN) {
@@ -1267,8 +1249,8 @@ static int s5p_hash_prepare_request(struct ahash_request *req, bool update)
/* have buffered data only */
if (unlikely(!ctx->bufcnt)) {
/* first update didn't fill up buffer */
- scatterwalk_map_and_copy(ctx->dd->xmit_buf, req->src,
- 0, xmit_len, 0);
+ memcpy_from_sglist(ctx->dd->xmit_buf, req->src,
+ 0, xmit_len);
}
sg_init_table(ctx->sgl, 1);
@@ -1506,8 +1488,8 @@ static int s5p_hash_update(struct ahash_request *req)
return 0;
if (ctx->bufcnt + req->nbytes <= BUFLEN) {
- scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src,
- 0, req->nbytes, 0);
+ memcpy_from_sglist(ctx->buffer + ctx->bufcnt, req->src,
+ 0, req->nbytes);
ctx->bufcnt += req->nbytes;
return 0;
}
@@ -2335,7 +2317,7 @@ static void s5p_aes_remove(struct platform_device *pdev)
static struct platform_driver s5p_aes_crypto = {
.probe = s5p_aes_probe,
- .remove_new = s5p_aes_remove,
+ .remove = s5p_aes_remove,
.driver = {
.name = "s5p-secss",
.of_match_table = s5p_sss_dt_match,
diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c
index 461eca40e878..fdc0b2486069 100644
--- a/drivers/crypto/sa2ul.c
+++ b/drivers/crypto/sa2ul.c
@@ -574,7 +574,7 @@ static int sa_format_cmdl_gen(struct sa_cmdl_cfg *cfg, u8 *cmdl,
/* Clear the command label */
memzero_explicit(cmdl, (SA_MAX_CMDL_WORDS * sizeof(u32)));
- /* Iniialize the command update structure */
+ /* Initialize the command update structure */
memzero_explicit(upd_info, sizeof(*upd_info));
if (cfg->enc_eng_id && cfg->auth_eng_id) {
@@ -1415,22 +1415,13 @@ static int sa_sha_run(struct ahash_request *req)
(auth_len >= SA_UNSAFE_DATA_SZ_MIN &&
auth_len <= SA_UNSAFE_DATA_SZ_MAX)) {
struct ahash_request *subreq = &rctx->fallback_req;
- int ret = 0;
+ int ret;
ahash_request_set_tfm(subreq, ctx->fallback.ahash);
- subreq->base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
-
- crypto_ahash_init(subreq);
-
- subreq->nbytes = auth_len;
- subreq->src = req->src;
- subreq->result = req->result;
-
- ret |= crypto_ahash_update(subreq);
-
- subreq->nbytes = 0;
+ ahash_request_set_callback(subreq, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
+ ahash_request_set_crypt(subreq, req->src, req->result, auth_len);
- ret |= crypto_ahash_final(subreq);
+ ret = crypto_ahash_digest(subreq);
return ret;
}
@@ -1502,8 +1493,7 @@ static int sa_sha_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
return ret;
if (alg_base) {
- ctx->shash = crypto_alloc_shash(alg_base, 0,
- CRYPTO_ALG_NEED_FALLBACK);
+ ctx->shash = crypto_alloc_shash(alg_base, 0, 0);
if (IS_ERR(ctx->shash)) {
dev_err(sa_k3_dev, "base driver %s couldn't be loaded\n",
alg_base);
@@ -1511,8 +1501,7 @@ static int sa_sha_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
}
/* for fallback */
ctx->fallback.ahash =
- crypto_alloc_ahash(alg_base, 0,
- CRYPTO_ALG_NEED_FALLBACK);
+ crypto_alloc_ahash(alg_base, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(ctx->fallback.ahash)) {
dev_err(ctx->dev_data->dev,
"Could not load fallback driver\n");
@@ -1546,54 +1535,38 @@ static int sa_sha_init(struct ahash_request *req)
crypto_ahash_digestsize(tfm), rctx);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
- rctx->fallback_req.base.flags =
- req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
+ ahash_request_set_crypt(&rctx->fallback_req, NULL, NULL, 0);
return crypto_ahash_init(&rctx->fallback_req);
}
static int sa_sha_update(struct ahash_request *req)
{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
- struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
- ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
- rctx->fallback_req.base.flags =
- req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.nbytes = req->nbytes;
- rctx->fallback_req.src = req->src;
+ ahash_request_set_callback(&rctx->fallback_req, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
+ ahash_request_set_crypt(&rctx->fallback_req, req->src, NULL, req->nbytes);
return crypto_ahash_update(&rctx->fallback_req);
}
static int sa_sha_final(struct ahash_request *req)
{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
- struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
- ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
- rctx->fallback_req.base.flags =
- req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.result = req->result;
+ ahash_request_set_callback(&rctx->fallback_req, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
+ ahash_request_set_crypt(&rctx->fallback_req, NULL, req->result, 0);
return crypto_ahash_final(&rctx->fallback_req);
}
static int sa_sha_finup(struct ahash_request *req)
{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
- struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
- ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
- rctx->fallback_req.base.flags =
- req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
-
- rctx->fallback_req.nbytes = req->nbytes;
- rctx->fallback_req.src = req->src;
- rctx->fallback_req.result = req->result;
+ ahash_request_set_callback(&rctx->fallback_req, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
+ ahash_request_set_crypt(&rctx->fallback_req, req->src, req->result, req->nbytes);
return crypto_ahash_finup(&rctx->fallback_req);
}
@@ -1605,8 +1578,7 @@ static int sa_sha_import(struct ahash_request *req, const void *in)
struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
- rctx->fallback_req.base.flags = req->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
return crypto_ahash_import(&rctx->fallback_req, in);
}
@@ -1614,12 +1586,9 @@ static int sa_sha_import(struct ahash_request *req, const void *in)
static int sa_sha_export(struct ahash_request *req, void *out)
{
struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
struct ahash_request *subreq = &rctx->fallback_req;
- ahash_request_set_tfm(subreq, ctx->fallback.ahash);
- subreq->base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(subreq, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
return crypto_ahash_export(subreq, out);
}
@@ -2489,7 +2458,7 @@ static void sa_ul_remove(struct platform_device *pdev)
static struct platform_driver sa_ul_driver = {
.probe = sa_ul_probe,
- .remove_new = sa_ul_remove,
+ .remove = sa_ul_remove,
.driver = {
.name = "saul-crypto",
.of_match_table = of_match,
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 96d4af5d48a6..533080b0cddc 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -1421,7 +1421,7 @@ static void sahara_remove(struct platform_device *pdev)
static struct platform_driver sahara_driver = {
.probe = sahara_probe,
- .remove_new = sahara_remove,
+ .remove = sahara_remove,
.driver = {
.name = SAHARA_NAME,
.of_match_table = sahara_dt_ids,
diff --git a/drivers/crypto/starfive/jh7110-aes.c b/drivers/crypto/starfive/jh7110-aes.c
index 86a1a1fa9f8f..426b24889af8 100644
--- a/drivers/crypto/starfive/jh7110-aes.c
+++ b/drivers/crypto/starfive/jh7110-aes.c
@@ -511,8 +511,7 @@ static int starfive_aes_map_sg(struct starfive_cryp_dev *cryp,
stsg = sg_next(stsg), dtsg = sg_next(dtsg)) {
src_nents = dma_map_sg(cryp->dev, stsg, 1, DMA_BIDIRECTIONAL);
if (src_nents == 0)
- return dev_err_probe(cryp->dev, -ENOMEM,
- "dma_map_sg error\n");
+ return -ENOMEM;
dst_nents = src_nents;
len = min(sg_dma_len(stsg), remain);
@@ -528,13 +527,11 @@ static int starfive_aes_map_sg(struct starfive_cryp_dev *cryp,
for (stsg = src, dtsg = dst;;) {
src_nents = dma_map_sg(cryp->dev, stsg, 1, DMA_TO_DEVICE);
if (src_nents == 0)
- return dev_err_probe(cryp->dev, -ENOMEM,
- "dma_map_sg src error\n");
+ return -ENOMEM;
dst_nents = dma_map_sg(cryp->dev, dtsg, 1, DMA_FROM_DEVICE);
if (dst_nents == 0)
- return dev_err_probe(cryp->dev, -ENOMEM,
- "dma_map_sg dst error\n");
+ return -ENOMEM;
len = min(sg_dma_len(stsg), sg_dma_len(dtsg));
len = min(len, remain);
@@ -669,8 +666,7 @@ static int starfive_aes_aead_do_one_req(struct crypto_engine *engine, void *areq
if (cryp->assoclen) {
rctx->adata = kzalloc(cryp->assoclen + AES_BLOCK_SIZE, GFP_KERNEL);
if (!rctx->adata)
- return dev_err_probe(cryp->dev, -ENOMEM,
- "Failed to alloc memory for adata");
+ return -ENOMEM;
if (sg_copy_to_buffer(req->src, sg_nents_for_len(req->src, cryp->assoclen),
rctx->adata, cryp->assoclen) != cryp->assoclen)
diff --git a/drivers/crypto/starfive/jh7110-cryp.c b/drivers/crypto/starfive/jh7110-cryp.c
index e4dfed7ee0b0..42114e9364f0 100644
--- a/drivers/crypto/starfive/jh7110-cryp.c
+++ b/drivers/crypto/starfive/jh7110-cryp.c
@@ -151,7 +151,7 @@ static int starfive_cryp_probe(struct platform_device *pdev)
ret = starfive_aes_register_algs();
if (ret)
- goto err_algs_aes;
+ goto err_engine_start;
ret = starfive_hash_register_algs();
if (ret)
@@ -167,8 +167,6 @@ err_algs_rsa:
starfive_hash_unregister_algs();
err_algs_hash:
starfive_aes_unregister_algs();
-err_algs_aes:
- crypto_engine_stop(cryp->engine);
err_engine_start:
crypto_engine_exit(cryp->engine);
err_engine:
@@ -193,7 +191,6 @@ static void starfive_cryp_remove(struct platform_device *pdev)
starfive_hash_unregister_algs();
starfive_rsa_unregister_algs();
- crypto_engine_stop(cryp->engine);
crypto_engine_exit(cryp->engine);
starfive_dma_cleanup(cryp);
@@ -215,7 +212,7 @@ MODULE_DEVICE_TABLE(of, starfive_dt_ids);
static struct platform_driver starfive_cryp_driver = {
.probe = starfive_cryp_probe,
- .remove_new = starfive_cryp_remove,
+ .remove = starfive_cryp_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = starfive_dt_ids,
diff --git a/drivers/crypto/starfive/jh7110-hash.c b/drivers/crypto/starfive/jh7110-hash.c
index 2c60a1047bc3..e6839c7bfb73 100644
--- a/drivers/crypto/starfive/jh7110-hash.c
+++ b/drivers/crypto/starfive/jh7110-hash.c
@@ -229,8 +229,7 @@ static int starfive_hash_one_request(struct crypto_engine *engine, void *areq)
for_each_sg(rctx->in_sg, tsg, rctx->in_sg_len, i) {
src_nents = dma_map_sg(cryp->dev, tsg, 1, DMA_TO_DEVICE);
if (src_nents == 0)
- return dev_err_probe(cryp->dev, -ENOMEM,
- "dma_map_sg error\n");
+ return -ENOMEM;
ret = starfive_hash_dma_xfer(cryp, tsg);
dma_unmap_sg(cryp->dev, tsg, 1, DMA_TO_DEVICE);
@@ -493,25 +492,25 @@ static int starfive_hash_setkey(struct crypto_ahash *hash,
static int starfive_sha224_init_tfm(struct crypto_ahash *hash)
{
- return starfive_hash_init_tfm(hash, "sha224-generic",
+ return starfive_hash_init_tfm(hash, "sha224-lib",
STARFIVE_HASH_SHA224, 0);
}
static int starfive_sha256_init_tfm(struct crypto_ahash *hash)
{
- return starfive_hash_init_tfm(hash, "sha256-generic",
+ return starfive_hash_init_tfm(hash, "sha256-lib",
STARFIVE_HASH_SHA256, 0);
}
static int starfive_sha384_init_tfm(struct crypto_ahash *hash)
{
- return starfive_hash_init_tfm(hash, "sha384-generic",
+ return starfive_hash_init_tfm(hash, "sha384-lib",
STARFIVE_HASH_SHA384, 0);
}
static int starfive_sha512_init_tfm(struct crypto_ahash *hash)
{
- return starfive_hash_init_tfm(hash, "sha512-generic",
+ return starfive_hash_init_tfm(hash, "sha512-lib",
STARFIVE_HASH_SHA512, 0);
}
@@ -523,25 +522,25 @@ static int starfive_sm3_init_tfm(struct crypto_ahash *hash)
static int starfive_hmac_sha224_init_tfm(struct crypto_ahash *hash)
{
- return starfive_hash_init_tfm(hash, "hmac(sha224-generic)",
+ return starfive_hash_init_tfm(hash, "hmac-sha224-lib",
STARFIVE_HASH_SHA224, 1);
}
static int starfive_hmac_sha256_init_tfm(struct crypto_ahash *hash)
{
- return starfive_hash_init_tfm(hash, "hmac(sha256-generic)",
+ return starfive_hash_init_tfm(hash, "hmac-sha256-lib",
STARFIVE_HASH_SHA256, 1);
}
static int starfive_hmac_sha384_init_tfm(struct crypto_ahash *hash)
{
- return starfive_hash_init_tfm(hash, "hmac(sha384-generic)",
+ return starfive_hash_init_tfm(hash, "hmac-sha384-lib",
STARFIVE_HASH_SHA384, 1);
}
static int starfive_hmac_sha512_init_tfm(struct crypto_ahash *hash)
{
- return starfive_hash_init_tfm(hash, "hmac(sha512-generic)",
+ return starfive_hash_init_tfm(hash, "hmac-sha512-lib",
STARFIVE_HASH_SHA512, 1);
}
diff --git a/drivers/crypto/starfive/jh7110-rsa.c b/drivers/crypto/starfive/jh7110-rsa.c
index a778c4846025..d109c743f076 100644
--- a/drivers/crypto/starfive/jh7110-rsa.c
+++ b/drivers/crypto/starfive/jh7110-rsa.c
@@ -565,8 +565,6 @@ static void starfive_rsa_exit_tfm(struct crypto_akcipher *tfm)
static struct akcipher_alg starfive_rsa = {
.encrypt = starfive_rsa_enc,
.decrypt = starfive_rsa_dec,
- .sign = starfive_rsa_dec,
- .verify = starfive_rsa_enc,
.set_pub_key = starfive_rsa_set_pub_key,
.set_priv_key = starfive_rsa_set_priv_key,
.max_size = starfive_rsa_max_size,
diff --git a/drivers/crypto/stm32/Kconfig b/drivers/crypto/stm32/Kconfig
index 49dfd161e9b9..d6dc848c82ee 100644
--- a/drivers/crypto/stm32/Kconfig
+++ b/drivers/crypto/stm32/Kconfig
@@ -1,13 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-config CRYPTO_DEV_STM32_CRC
- tristate "Support for STM32 crc accelerators"
- depends on ARCH_STM32
- select CRYPTO_HASH
- select CRC32
- help
- This enables support for the CRC32 hw accelerator which can be found
- on STMicroelectronics STM32 SOC.
-
config CRYPTO_DEV_STM32_HASH
tristate "Support for STM32 hash accelerators"
depends on ARCH_STM32 || ARCH_U8500
diff --git a/drivers/crypto/stm32/Makefile b/drivers/crypto/stm32/Makefile
index 518e0e0b11a9..c63004026afb 100644
--- a/drivers/crypto/stm32/Makefile
+++ b/drivers/crypto/stm32/Makefile
@@ -1,4 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_CRYPTO_DEV_STM32_CRC) += stm32-crc32.o
obj-$(CONFIG_CRYPTO_DEV_STM32_HASH) += stm32-hash.o
obj-$(CONFIG_CRYPTO_DEV_STM32_CRYP) += stm32-cryp.o
diff --git a/drivers/crypto/stm32/stm32-crc32.c b/drivers/crypto/stm32/stm32-crc32.c
deleted file mode 100644
index e0faddbf8990..000000000000
--- a/drivers/crypto/stm32/stm32-crc32.c
+++ /dev/null
@@ -1,480 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) STMicroelectronics SA 2017
- * Author: Fabien Dessenne <fabien.dessenne@st.com>
- */
-
-#include <linux/bitrev.h>
-#include <linux/clk.h>
-#include <linux/crc32.h>
-#include <linux/crc32poly.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mod_devicetable.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-
-#include <crypto/internal/hash.h>
-
-#include <linux/unaligned.h>
-
-#define DRIVER_NAME "stm32-crc32"
-#define CHKSUM_DIGEST_SIZE 4
-#define CHKSUM_BLOCK_SIZE 1
-
-/* Registers */
-#define CRC_DR 0x00000000
-#define CRC_CR 0x00000008
-#define CRC_INIT 0x00000010
-#define CRC_POL 0x00000014
-
-/* Registers values */
-#define CRC_CR_RESET BIT(0)
-#define CRC_CR_REV_IN_WORD (BIT(6) | BIT(5))
-#define CRC_CR_REV_IN_BYTE BIT(5)
-#define CRC_CR_REV_OUT BIT(7)
-#define CRC32C_INIT_DEFAULT 0xFFFFFFFF
-
-#define CRC_AUTOSUSPEND_DELAY 50
-
-static unsigned int burst_size;
-module_param(burst_size, uint, 0644);
-MODULE_PARM_DESC(burst_size, "Select burst byte size (0 unlimited)");
-
-struct stm32_crc {
- struct list_head list;
- struct device *dev;
- void __iomem *regs;
- struct clk *clk;
- spinlock_t lock;
-};
-
-struct stm32_crc_list {
- struct list_head dev_list;
- spinlock_t lock; /* protect dev_list */
-};
-
-static struct stm32_crc_list crc_list = {
- .dev_list = LIST_HEAD_INIT(crc_list.dev_list),
- .lock = __SPIN_LOCK_UNLOCKED(crc_list.lock),
-};
-
-struct stm32_crc_ctx {
- u32 key;
- u32 poly;
-};
-
-struct stm32_crc_desc_ctx {
- u32 partial; /* crc32c: partial in first 4 bytes of that struct */
-};
-
-static int stm32_crc32_cra_init(struct crypto_tfm *tfm)
-{
- struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm);
-
- mctx->key = 0;
- mctx->poly = CRC32_POLY_LE;
- return 0;
-}
-
-static int stm32_crc32c_cra_init(struct crypto_tfm *tfm)
-{
- struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm);
-
- mctx->key = CRC32C_INIT_DEFAULT;
- mctx->poly = CRC32C_POLY_LE;
- return 0;
-}
-
-static int stm32_crc_setkey(struct crypto_shash *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct stm32_crc_ctx *mctx = crypto_shash_ctx(tfm);
-
- if (keylen != sizeof(u32))
- return -EINVAL;
-
- mctx->key = get_unaligned_le32(key);
- return 0;
-}
-
-static struct stm32_crc *stm32_crc_get_next_crc(void)
-{
- struct stm32_crc *crc;
-
- spin_lock_bh(&crc_list.lock);
- crc = list_first_entry_or_null(&crc_list.dev_list, struct stm32_crc, list);
- if (crc)
- list_move_tail(&crc->list, &crc_list.dev_list);
- spin_unlock_bh(&crc_list.lock);
-
- return crc;
-}
-
-static int stm32_crc_init(struct shash_desc *desc)
-{
- struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
- struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
- struct stm32_crc *crc;
- unsigned long flags;
-
- crc = stm32_crc_get_next_crc();
- if (!crc)
- return -ENODEV;
-
- pm_runtime_get_sync(crc->dev);
-
- spin_lock_irqsave(&crc->lock, flags);
-
- /* Reset, set key, poly and configure in bit reverse mode */
- writel_relaxed(bitrev32(mctx->key), crc->regs + CRC_INIT);
- writel_relaxed(bitrev32(mctx->poly), crc->regs + CRC_POL);
- writel_relaxed(CRC_CR_RESET | CRC_CR_REV_IN_WORD | CRC_CR_REV_OUT,
- crc->regs + CRC_CR);
-
- /* Store partial result */
- ctx->partial = readl_relaxed(crc->regs + CRC_DR);
-
- spin_unlock_irqrestore(&crc->lock, flags);
-
- pm_runtime_mark_last_busy(crc->dev);
- pm_runtime_put_autosuspend(crc->dev);
-
- return 0;
-}
-
-static int burst_update(struct shash_desc *desc, const u8 *d8,
- size_t length)
-{
- struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
- struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
- struct stm32_crc *crc;
-
- crc = stm32_crc_get_next_crc();
- if (!crc)
- return -ENODEV;
-
- pm_runtime_get_sync(crc->dev);
-
- if (!spin_trylock(&crc->lock)) {
- /* Hardware is busy, calculate crc32 by software */
- if (mctx->poly == CRC32_POLY_LE)
- ctx->partial = crc32_le(ctx->partial, d8, length);
- else
- ctx->partial = __crc32c_le(ctx->partial, d8, length);
-
- goto pm_out;
- }
-
- /*
- * Restore previously calculated CRC for this context as init value
- * Restore polynomial configuration
- * Configure in register for word input data,
- * Configure out register in reversed bit mode data.
- */
- writel_relaxed(bitrev32(ctx->partial), crc->regs + CRC_INIT);
- writel_relaxed(bitrev32(mctx->poly), crc->regs + CRC_POL);
- writel_relaxed(CRC_CR_RESET | CRC_CR_REV_IN_WORD | CRC_CR_REV_OUT,
- crc->regs + CRC_CR);
-
- if (d8 != PTR_ALIGN(d8, sizeof(u32))) {
- /* Configure for byte data */
- writel_relaxed(CRC_CR_REV_IN_BYTE | CRC_CR_REV_OUT,
- crc->regs + CRC_CR);
- while (d8 != PTR_ALIGN(d8, sizeof(u32)) && length) {
- writeb_relaxed(*d8++, crc->regs + CRC_DR);
- length--;
- }
- /* Configure for word data */
- writel_relaxed(CRC_CR_REV_IN_WORD | CRC_CR_REV_OUT,
- crc->regs + CRC_CR);
- }
-
- for (; length >= sizeof(u32); d8 += sizeof(u32), length -= sizeof(u32))
- writel_relaxed(*((u32 *)d8), crc->regs + CRC_DR);
-
- if (length) {
- /* Configure for byte data */
- writel_relaxed(CRC_CR_REV_IN_BYTE | CRC_CR_REV_OUT,
- crc->regs + CRC_CR);
- while (length--)
- writeb_relaxed(*d8++, crc->regs + CRC_DR);
- }
-
- /* Store partial result */
- ctx->partial = readl_relaxed(crc->regs + CRC_DR);
-
- spin_unlock(&crc->lock);
-
-pm_out:
- pm_runtime_mark_last_busy(crc->dev);
- pm_runtime_put_autosuspend(crc->dev);
-
- return 0;
-}
-
-static int stm32_crc_update(struct shash_desc *desc, const u8 *d8,
- unsigned int length)
-{
- const unsigned int burst_sz = burst_size;
- unsigned int rem_sz;
- const u8 *cur;
- size_t size;
- int ret;
-
- if (!burst_sz)
- return burst_update(desc, d8, length);
-
- /* Digest first bytes not 32bit aligned at first pass in the loop */
- size = min_t(size_t, length, burst_sz + (size_t)d8 -
- ALIGN_DOWN((size_t)d8, sizeof(u32)));
- for (rem_sz = length, cur = d8; rem_sz;
- rem_sz -= size, cur += size, size = min(rem_sz, burst_sz)) {
- ret = burst_update(desc, cur, size);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static int stm32_crc_final(struct shash_desc *desc, u8 *out)
-{
- struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
- struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
-
- /* Send computed CRC */
- put_unaligned_le32(mctx->poly == CRC32C_POLY_LE ?
- ~ctx->partial : ctx->partial, out);
-
- return 0;
-}
-
-static int stm32_crc_finup(struct shash_desc *desc, const u8 *data,
- unsigned int length, u8 *out)
-{
- return stm32_crc_update(desc, data, length) ?:
- stm32_crc_final(desc, out);
-}
-
-static int stm32_crc_digest(struct shash_desc *desc, const u8 *data,
- unsigned int length, u8 *out)
-{
- return stm32_crc_init(desc) ?: stm32_crc_finup(desc, data, length, out);
-}
-
-static unsigned int refcnt;
-static DEFINE_MUTEX(refcnt_lock);
-static struct shash_alg algs[] = {
- /* CRC-32 */
- {
- .setkey = stm32_crc_setkey,
- .init = stm32_crc_init,
- .update = stm32_crc_update,
- .final = stm32_crc_final,
- .finup = stm32_crc_finup,
- .digest = stm32_crc_digest,
- .descsize = sizeof(struct stm32_crc_desc_ctx),
- .digestsize = CHKSUM_DIGEST_SIZE,
- .base = {
- .cra_name = "crc32",
- .cra_driver_name = "stm32-crc32-crc32",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
- .cra_blocksize = CHKSUM_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct stm32_crc_ctx),
- .cra_module = THIS_MODULE,
- .cra_init = stm32_crc32_cra_init,
- }
- },
- /* CRC-32Castagnoli */
- {
- .setkey = stm32_crc_setkey,
- .init = stm32_crc_init,
- .update = stm32_crc_update,
- .final = stm32_crc_final,
- .finup = stm32_crc_finup,
- .digest = stm32_crc_digest,
- .descsize = sizeof(struct stm32_crc_desc_ctx),
- .digestsize = CHKSUM_DIGEST_SIZE,
- .base = {
- .cra_name = "crc32c",
- .cra_driver_name = "stm32-crc32-crc32c",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
- .cra_blocksize = CHKSUM_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct stm32_crc_ctx),
- .cra_module = THIS_MODULE,
- .cra_init = stm32_crc32c_cra_init,
- }
- }
-};
-
-static int stm32_crc_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct stm32_crc *crc;
- int ret;
-
- crc = devm_kzalloc(dev, sizeof(*crc), GFP_KERNEL);
- if (!crc)
- return -ENOMEM;
-
- crc->dev = dev;
-
- crc->regs = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(crc->regs)) {
- dev_err(dev, "Cannot map CRC IO\n");
- return PTR_ERR(crc->regs);
- }
-
- crc->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(crc->clk)) {
- dev_err(dev, "Could not get clock\n");
- return PTR_ERR(crc->clk);
- }
-
- ret = clk_prepare_enable(crc->clk);
- if (ret) {
- dev_err(crc->dev, "Failed to enable clock\n");
- return ret;
- }
-
- pm_runtime_set_autosuspend_delay(dev, CRC_AUTOSUSPEND_DELAY);
- pm_runtime_use_autosuspend(dev);
-
- pm_runtime_get_noresume(dev);
- pm_runtime_set_active(dev);
- pm_runtime_irq_safe(dev);
- pm_runtime_enable(dev);
-
- spin_lock_init(&crc->lock);
-
- platform_set_drvdata(pdev, crc);
-
- spin_lock(&crc_list.lock);
- list_add(&crc->list, &crc_list.dev_list);
- spin_unlock(&crc_list.lock);
-
- mutex_lock(&refcnt_lock);
- if (!refcnt) {
- ret = crypto_register_shashes(algs, ARRAY_SIZE(algs));
- if (ret) {
- mutex_unlock(&refcnt_lock);
- dev_err(dev, "Failed to register\n");
- clk_disable_unprepare(crc->clk);
- return ret;
- }
- }
- refcnt++;
- mutex_unlock(&refcnt_lock);
-
- dev_info(dev, "Initialized\n");
-
- pm_runtime_put_sync(dev);
-
- return 0;
-}
-
-static void stm32_crc_remove(struct platform_device *pdev)
-{
- struct stm32_crc *crc = platform_get_drvdata(pdev);
- int ret = pm_runtime_get_sync(crc->dev);
-
- spin_lock(&crc_list.lock);
- list_del(&crc->list);
- spin_unlock(&crc_list.lock);
-
- mutex_lock(&refcnt_lock);
- if (!--refcnt)
- crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
- mutex_unlock(&refcnt_lock);
-
- pm_runtime_disable(crc->dev);
- pm_runtime_put_noidle(crc->dev);
-
- if (ret >= 0)
- clk_disable(crc->clk);
- clk_unprepare(crc->clk);
-}
-
-static int __maybe_unused stm32_crc_suspend(struct device *dev)
-{
- struct stm32_crc *crc = dev_get_drvdata(dev);
- int ret;
-
- ret = pm_runtime_force_suspend(dev);
- if (ret)
- return ret;
-
- clk_unprepare(crc->clk);
-
- return 0;
-}
-
-static int __maybe_unused stm32_crc_resume(struct device *dev)
-{
- struct stm32_crc *crc = dev_get_drvdata(dev);
- int ret;
-
- ret = clk_prepare(crc->clk);
- if (ret) {
- dev_err(crc->dev, "Failed to prepare clock\n");
- return ret;
- }
-
- return pm_runtime_force_resume(dev);
-}
-
-static int __maybe_unused stm32_crc_runtime_suspend(struct device *dev)
-{
- struct stm32_crc *crc = dev_get_drvdata(dev);
-
- clk_disable(crc->clk);
-
- return 0;
-}
-
-static int __maybe_unused stm32_crc_runtime_resume(struct device *dev)
-{
- struct stm32_crc *crc = dev_get_drvdata(dev);
- int ret;
-
- ret = clk_enable(crc->clk);
- if (ret) {
- dev_err(crc->dev, "Failed to enable clock\n");
- return ret;
- }
-
- return 0;
-}
-
-static const struct dev_pm_ops stm32_crc_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(stm32_crc_suspend,
- stm32_crc_resume)
- SET_RUNTIME_PM_OPS(stm32_crc_runtime_suspend,
- stm32_crc_runtime_resume, NULL)
-};
-
-static const struct of_device_id stm32_dt_ids[] = {
- { .compatible = "st,stm32f7-crc", },
- {},
-};
-MODULE_DEVICE_TABLE(of, stm32_dt_ids);
-
-static struct platform_driver stm32_crc_driver = {
- .probe = stm32_crc_probe,
- .remove_new = stm32_crc_remove,
- .driver = {
- .name = DRIVER_NAME,
- .pm = &stm32_crc_pm_ops,
- .of_match_table = stm32_dt_ids,
- },
-};
-
-module_platform_driver(stm32_crc_driver);
-
-MODULE_AUTHOR("Fabien Dessenne <fabien.dessenne@st.com>");
-MODULE_DESCRIPTION("STMicrolectronics STM32 CRC32 hardware driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c
index 937f6dab8955..5e82e8a1f71a 100644
--- a/drivers/crypto/stm32/stm32-cryp.c
+++ b/drivers/crypto/stm32/stm32-cryp.c
@@ -666,7 +666,7 @@ static void stm32_cryp_write_ccm_first_header(struct stm32_cryp *cryp)
written = min_t(size_t, AES_BLOCK_SIZE - len, alen);
- scatterwalk_copychunks((char *)block + len, &cryp->in_walk, written, 0);
+ memcpy_from_scatterwalk((char *)block + len, &cryp->in_walk, written);
writesl(cryp->regs + cryp->caps->din, block, AES_BLOCK_32);
@@ -851,7 +851,6 @@ static void stm32_cryp_finish_req(struct stm32_cryp *cryp, int err)
if (!err && (!(is_gcm(cryp) || is_ccm(cryp) || is_ecb(cryp))))
stm32_cryp_get_iv(cryp);
- pm_runtime_mark_last_busy(cryp->dev);
pm_runtime_put_autosuspend(cryp->dev);
if (is_gcm(cryp) || is_ccm(cryp))
@@ -993,7 +992,7 @@ static int stm32_cryp_header_dma_start(struct stm32_cryp *cryp)
/* Advance scatterwalk to not DMA'ed data */
align_size = ALIGN_DOWN(cryp->header_in, cryp->hw_blocksize);
- scatterwalk_copychunks(NULL, &cryp->in_walk, align_size, 2);
+ scatterwalk_skip(&cryp->in_walk, align_size);
cryp->header_in -= align_size;
ret = dma_submit_error(dmaengine_submit(tx_in));
@@ -1056,7 +1055,7 @@ static int stm32_cryp_dma_start(struct stm32_cryp *cryp)
/* Advance scatterwalk to not DMA'ed data */
align_size = ALIGN_DOWN(cryp->payload_in, cryp->hw_blocksize);
- scatterwalk_copychunks(NULL, &cryp->in_walk, align_size, 2);
+ scatterwalk_skip(&cryp->in_walk, align_size);
cryp->payload_in -= align_size;
ret = dma_submit_error(dmaengine_submit(tx_in));
@@ -1067,7 +1066,7 @@ static int stm32_cryp_dma_start(struct stm32_cryp *cryp)
dma_async_issue_pending(cryp->dma_lch_in);
/* Advance scatterwalk to not DMA'ed data */
- scatterwalk_copychunks(NULL, &cryp->out_walk, align_size, 2);
+ scatterwalk_skip(&cryp->out_walk, align_size);
cryp->payload_out -= align_size;
ret = dma_submit_error(dmaengine_submit(tx_out));
if (ret < 0) {
@@ -1737,9 +1736,9 @@ static int stm32_cryp_prepare_req(struct skcipher_request *req,
out_sg = areq->dst;
scatterwalk_start(&cryp->in_walk, in_sg);
- scatterwalk_start(&cryp->out_walk, out_sg);
/* In output, jump after assoc data */
- scatterwalk_copychunks(NULL, &cryp->out_walk, cryp->areq->assoclen, 2);
+ scatterwalk_start_at_pos(&cryp->out_walk, out_sg,
+ areq->assoclen);
ret = stm32_cryp_hw_init(cryp);
if (ret)
@@ -1873,12 +1872,12 @@ static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp)
/* Get and write tag */
readsl(cryp->regs + cryp->caps->dout, out_tag, AES_BLOCK_32);
- scatterwalk_copychunks(out_tag, &cryp->out_walk, cryp->authsize, 1);
+ memcpy_to_scatterwalk(&cryp->out_walk, out_tag, cryp->authsize);
} else {
/* Get and check tag */
u32 in_tag[AES_BLOCK_32], out_tag[AES_BLOCK_32];
- scatterwalk_copychunks(in_tag, &cryp->in_walk, cryp->authsize, 0);
+ memcpy_from_scatterwalk(in_tag, &cryp->in_walk, cryp->authsize);
readsl(cryp->regs + cryp->caps->dout, out_tag, AES_BLOCK_32);
if (crypto_memneq(in_tag, out_tag, cryp->authsize))
@@ -1923,8 +1922,8 @@ static void stm32_cryp_irq_read_data(struct stm32_cryp *cryp)
u32 block[AES_BLOCK_32];
readsl(cryp->regs + cryp->caps->dout, block, cryp->hw_blocksize / sizeof(u32));
- scatterwalk_copychunks(block, &cryp->out_walk, min_t(size_t, cryp->hw_blocksize,
- cryp->payload_out), 1);
+ memcpy_to_scatterwalk(&cryp->out_walk, block, min_t(size_t, cryp->hw_blocksize,
+ cryp->payload_out));
cryp->payload_out -= min_t(size_t, cryp->hw_blocksize,
cryp->payload_out);
}
@@ -1933,8 +1932,8 @@ static void stm32_cryp_irq_write_block(struct stm32_cryp *cryp)
{
u32 block[AES_BLOCK_32] = {0};
- scatterwalk_copychunks(block, &cryp->in_walk, min_t(size_t, cryp->hw_blocksize,
- cryp->payload_in), 0);
+ memcpy_from_scatterwalk(block, &cryp->in_walk, min_t(size_t, cryp->hw_blocksize,
+ cryp->payload_in));
writesl(cryp->regs + cryp->caps->din, block, cryp->hw_blocksize / sizeof(u32));
cryp->payload_in -= min_t(size_t, cryp->hw_blocksize, cryp->payload_in);
}
@@ -1981,8 +1980,8 @@ static void stm32_cryp_irq_write_gcm_padded_data(struct stm32_cryp *cryp)
*/
readsl(cryp->regs + cryp->caps->dout, block, cryp->hw_blocksize / sizeof(u32));
- scatterwalk_copychunks(block, &cryp->out_walk, min_t(size_t, cryp->hw_blocksize,
- cryp->payload_out), 1);
+ memcpy_to_scatterwalk(&cryp->out_walk, block, min_t(size_t, cryp->hw_blocksize,
+ cryp->payload_out));
cryp->payload_out -= min_t(size_t, cryp->hw_blocksize,
cryp->payload_out);
@@ -2079,8 +2078,8 @@ static void stm32_cryp_irq_write_ccm_padded_data(struct stm32_cryp *cryp)
*/
readsl(cryp->regs + cryp->caps->dout, block, cryp->hw_blocksize / sizeof(u32));
- scatterwalk_copychunks(block, &cryp->out_walk, min_t(size_t, cryp->hw_blocksize,
- cryp->payload_out), 1);
+ memcpy_to_scatterwalk(&cryp->out_walk, block, min_t(size_t, cryp->hw_blocksize,
+ cryp->payload_out));
cryp->payload_out -= min_t(size_t, cryp->hw_blocksize, cryp->payload_out);
/* d) Load again CRYP_CSGCMCCMxR */
@@ -2161,7 +2160,7 @@ static void stm32_cryp_irq_write_gcmccm_header(struct stm32_cryp *cryp)
written = min_t(size_t, AES_BLOCK_SIZE, cryp->header_in);
- scatterwalk_copychunks(block, &cryp->in_walk, written, 0);
+ memcpy_from_scatterwalk(block, &cryp->in_walk, written);
writesl(cryp->regs + cryp->caps->din, block, AES_BLOCK_32);
@@ -2771,7 +2770,7 @@ static const struct dev_pm_ops stm32_cryp_pm_ops = {
static struct platform_driver stm32_cryp_driver = {
.probe = stm32_cryp_probe,
- .remove_new = stm32_cryp_remove,
+ .remove = stm32_cryp_remove,
.driver = {
.name = DRIVER_NAME,
.pm = &stm32_cryp_pm_ops,
@@ -2782,5 +2781,5 @@ static struct platform_driver stm32_cryp_driver = {
module_platform_driver(stm32_cryp_driver);
MODULE_AUTHOR("Fabien Dessenne <fabien.dessenne@st.com>");
-MODULE_DESCRIPTION("STMicrolectronics STM32 CRYP hardware driver");
+MODULE_DESCRIPTION("STMicroelectronics STM32 CRYP hardware driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
index 351827372ea6..a4436728b0db 100644
--- a/drivers/crypto/stm32/stm32-hash.c
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -1373,7 +1373,6 @@ static void stm32_hash_unprepare_request(struct ahash_request *req)
*preg++ = stm32_hash_read(hdev, HASH_CSR(i));
pm_runtime:
- pm_runtime_mark_last_busy(hdev->dev);
pm_runtime_put_autosuspend(hdev->dev);
}
@@ -2532,7 +2531,7 @@ static const struct dev_pm_ops stm32_hash_pm_ops = {
static struct platform_driver stm32_hash_driver = {
.probe = stm32_hash_probe,
- .remove_new = stm32_hash_remove,
+ .remove = stm32_hash_remove,
.driver = {
.name = "stm32-hash",
.pm = &stm32_hash_pm_ops,
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 511ddcb0efd4..e8c0db687c57 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -3560,7 +3560,7 @@ static struct platform_driver talitos_driver = {
.of_match_table = talitos_match,
},
.probe = talitos_probe,
- .remove_new = talitos_remove,
+ .remove = talitos_remove,
};
module_platform_driver(talitos_driver);
diff --git a/drivers/crypto/tegra/tegra-se-aes.c b/drivers/crypto/tegra/tegra-se-aes.c
index ae7a0f8435fc..0e07d0523291 100644
--- a/drivers/crypto/tegra/tegra-se-aes.c
+++ b/drivers/crypto/tegra/tegra-se-aes.c
@@ -28,6 +28,9 @@ struct tegra_aes_ctx {
u32 ivsize;
u32 key1_id;
u32 key2_id;
+ u32 keylen;
+ u8 key1[AES_MAX_KEY_SIZE];
+ u8 key2[AES_MAX_KEY_SIZE];
};
struct tegra_aes_reqctx {
@@ -43,8 +46,9 @@ struct tegra_aead_ctx {
struct tegra_se *se;
unsigned int authsize;
u32 alg;
- u32 keylen;
u32 key_id;
+ u32 keylen;
+ u8 key[AES_MAX_KEY_SIZE];
};
struct tegra_aead_reqctx {
@@ -56,8 +60,8 @@ struct tegra_aead_reqctx {
unsigned int cryptlen;
unsigned int authsize;
bool encrypt;
- u32 config;
u32 crypto_config;
+ u32 config;
u32 key_id;
u32 iv[4];
u8 authdata[16];
@@ -67,6 +71,8 @@ struct tegra_cmac_ctx {
struct tegra_se *se;
unsigned int alg;
u32 key_id;
+ u32 keylen;
+ u8 key[AES_MAX_KEY_SIZE];
struct crypto_shash *fallback_tfm;
};
@@ -260,17 +266,13 @@ static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq)
struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
struct tegra_aes_reqctx *rctx = skcipher_request_ctx(req);
struct tegra_se *se = ctx->se;
- unsigned int cmdlen;
+ unsigned int cmdlen, key1_id, key2_id;
int ret;
- rctx->datbuf.buf = dma_alloc_coherent(se->dev, SE_AES_BUFLEN,
- &rctx->datbuf.addr, GFP_KERNEL);
- if (!rctx->datbuf.buf)
- return -ENOMEM;
-
- rctx->datbuf.size = SE_AES_BUFLEN;
- rctx->iv = (u32 *)req->iv;
+ rctx->iv = (ctx->alg == SE_ALG_ECB) ? NULL : (u32 *)req->iv;
rctx->len = req->cryptlen;
+ key1_id = ctx->key1_id;
+ key2_id = ctx->key2_id;
/* Pad input to AES Block size */
if (ctx->alg != SE_ALG_XTS) {
@@ -278,20 +280,59 @@ static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq)
rctx->len += AES_BLOCK_SIZE - (rctx->len % AES_BLOCK_SIZE);
}
+ rctx->datbuf.size = rctx->len;
+ rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->datbuf.size,
+ &rctx->datbuf.addr, GFP_KERNEL);
+ if (!rctx->datbuf.buf) {
+ ret = -ENOMEM;
+ goto out_finalize;
+ }
+
scatterwalk_map_and_copy(rctx->datbuf.buf, req->src, 0, req->cryptlen, 0);
+ rctx->config = tegra234_aes_cfg(ctx->alg, rctx->encrypt);
+ rctx->crypto_config = tegra234_aes_crypto_cfg(ctx->alg, rctx->encrypt);
+
+ if (!key1_id) {
+ ret = tegra_key_submit_reserved_aes(ctx->se, ctx->key1,
+ ctx->keylen, ctx->alg, &key1_id);
+ if (ret)
+ goto out;
+ }
+
+ rctx->crypto_config |= SE_AES_KEY_INDEX(key1_id);
+
+ if (ctx->alg == SE_ALG_XTS) {
+ if (!key2_id) {
+ ret = tegra_key_submit_reserved_xts(ctx->se, ctx->key2,
+ ctx->keylen, ctx->alg, &key2_id);
+ if (ret)
+ goto out;
+ }
+
+ rctx->crypto_config |= SE_AES_KEY2_INDEX(key2_id);
+ }
+
/* Prepare the command and submit for execution */
cmdlen = tegra_aes_prep_cmd(ctx, rctx);
- ret = tegra_se_host1x_submit(se, cmdlen);
+ ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
/* Copy the result */
tegra_aes_update_iv(req, ctx);
scatterwalk_map_and_copy(rctx->datbuf.buf, req->dst, 0, req->cryptlen, 1);
+out:
/* Free the buffer */
- dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN,
+ dma_free_coherent(ctx->se->dev, rctx->datbuf.size,
rctx->datbuf.buf, rctx->datbuf.addr);
+ if (tegra_key_is_reserved(key1_id))
+ tegra_key_invalidate_reserved(ctx->se, key1_id, ctx->alg);
+
+ if (tegra_key_is_reserved(key2_id))
+ tegra_key_invalidate_reserved(ctx->se, key2_id, ctx->alg);
+
+out_finalize:
crypto_finalize_skcipher_request(se->engine, req, ret);
return 0;
@@ -313,6 +354,7 @@ static int tegra_aes_cra_init(struct crypto_skcipher *tfm)
ctx->se = se_alg->se_dev;
ctx->key1_id = 0;
ctx->key2_id = 0;
+ ctx->keylen = 0;
algname = crypto_tfm_alg_name(&tfm->base);
ret = se_algname_to_algid(algname);
@@ -341,13 +383,20 @@ static int tegra_aes_setkey(struct crypto_skcipher *tfm,
const u8 *key, u32 keylen)
{
struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int ret;
if (aes_check_keylen(keylen)) {
dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
return -EINVAL;
}
- return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key1_id);
+ ret = tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key1_id);
+ if (ret) {
+ ctx->keylen = keylen;
+ memcpy(ctx->key1, key, keylen);
+ }
+
+ return 0;
}
static int tegra_xts_setkey(struct crypto_skcipher *tfm,
@@ -365,11 +414,17 @@ static int tegra_xts_setkey(struct crypto_skcipher *tfm,
ret = tegra_key_submit(ctx->se, key, len,
ctx->alg, &ctx->key1_id);
- if (ret)
- return ret;
+ if (ret) {
+ ctx->keylen = len;
+ memcpy(ctx->key1, key, len);
+ }
- return tegra_key_submit(ctx->se, key + len, len,
+ ret = tegra_key_submit(ctx->se, key + len, len,
ctx->alg, &ctx->key2_id);
+ if (ret) {
+ ctx->keylen = len;
+ memcpy(ctx->key2, key + len, len);
+ }
return 0;
}
@@ -444,12 +499,6 @@ static int tegra_aes_crypt(struct skcipher_request *req, bool encrypt)
return 0;
rctx->encrypt = encrypt;
- rctx->config = tegra234_aes_cfg(ctx->alg, encrypt);
- rctx->crypto_config = tegra234_aes_crypto_cfg(ctx->alg, encrypt);
- rctx->crypto_config |= SE_AES_KEY_INDEX(ctx->key1_id);
-
- if (ctx->key2_id)
- rctx->crypto_config |= SE_AES_KEY2_INDEX(ctx->key2_id);
return crypto_transfer_skcipher_request_to_engine(ctx->se->engine, req);
}
@@ -715,11 +764,11 @@ static int tegra_gcm_do_gmac(struct tegra_aead_ctx *ctx, struct tegra_aead_reqct
rctx->config = tegra234_aes_cfg(SE_ALG_GMAC, rctx->encrypt);
rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GMAC, rctx->encrypt) |
- SE_AES_KEY_INDEX(ctx->key_id);
+ SE_AES_KEY_INDEX(rctx->key_id);
cmdlen = tegra_gmac_prep_cmd(ctx, rctx);
- return tegra_se_host1x_submit(se, cmdlen);
+ return tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
}
static int tegra_gcm_do_crypt(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
@@ -732,11 +781,11 @@ static int tegra_gcm_do_crypt(struct tegra_aead_ctx *ctx, struct tegra_aead_reqc
rctx->config = tegra234_aes_cfg(SE_ALG_GCM, rctx->encrypt);
rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GCM, rctx->encrypt) |
- SE_AES_KEY_INDEX(ctx->key_id);
+ SE_AES_KEY_INDEX(rctx->key_id);
/* Prepare command and submit */
cmdlen = tegra_gcm_crypt_prep_cmd(ctx, rctx);
- ret = tegra_se_host1x_submit(se, cmdlen);
+ ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
if (ret)
return ret;
@@ -755,11 +804,11 @@ static int tegra_gcm_do_final(struct tegra_aead_ctx *ctx, struct tegra_aead_reqc
rctx->config = tegra234_aes_cfg(SE_ALG_GCM_FINAL, rctx->encrypt);
rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GCM_FINAL, rctx->encrypt) |
- SE_AES_KEY_INDEX(ctx->key_id);
+ SE_AES_KEY_INDEX(rctx->key_id);
/* Prepare command and submit */
cmdlen = tegra_gcm_prep_final_cmd(se, cpuvaddr, rctx);
- ret = tegra_se_host1x_submit(se, cmdlen);
+ ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
if (ret)
return ret;
@@ -886,12 +935,12 @@ static int tegra_ccm_do_cbcmac(struct tegra_aead_ctx *ctx, struct tegra_aead_req
rctx->config = tegra234_aes_cfg(SE_ALG_CBC_MAC, rctx->encrypt);
rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_CBC_MAC,
rctx->encrypt) |
- SE_AES_KEY_INDEX(ctx->key_id);
+ SE_AES_KEY_INDEX(rctx->key_id);
/* Prepare command and submit */
cmdlen = tegra_cbcmac_prep_cmd(ctx, rctx);
- return tegra_se_host1x_submit(se, cmdlen);
+ return tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
}
static int tegra_ccm_set_msg_len(u8 *block, unsigned int msglen, int csize)
@@ -1073,7 +1122,7 @@ static int tegra_ccm_do_ctr(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx
rctx->config = tegra234_aes_cfg(SE_ALG_CTR, rctx->encrypt);
rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_CTR, rctx->encrypt) |
- SE_AES_KEY_INDEX(ctx->key_id);
+ SE_AES_KEY_INDEX(rctx->key_id);
/* Copy authdata in the top of buffer for encryption/decryption */
if (rctx->encrypt)
@@ -1098,7 +1147,7 @@ static int tegra_ccm_do_ctr(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx
/* Prepare command and submit */
cmdlen = tegra_ctr_prep_cmd(ctx, rctx);
- ret = tegra_se_host1x_submit(se, cmdlen);
+ ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
if (ret)
return ret;
@@ -1117,6 +1166,11 @@ static int tegra_ccm_crypt_init(struct aead_request *req, struct tegra_se *se,
rctx->assoclen = req->assoclen;
rctx->authsize = crypto_aead_authsize(tfm);
+ if (rctx->encrypt)
+ rctx->cryptlen = req->cryptlen;
+ else
+ rctx->cryptlen = req->cryptlen - rctx->authsize;
+
memcpy(iv, req->iv, 16);
ret = tegra_ccm_check_iv(iv);
@@ -1145,30 +1199,35 @@ static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq)
struct tegra_se *se = ctx->se;
int ret;
+ ret = tegra_ccm_crypt_init(req, se, rctx);
+ if (ret)
+ goto out_finalize;
+
+ rctx->key_id = ctx->key_id;
+
/* Allocate buffers required */
- rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN,
+ rctx->inbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen + 100;
+ rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->inbuf.size,
&rctx->inbuf.addr, GFP_KERNEL);
if (!rctx->inbuf.buf)
- return -ENOMEM;
-
- rctx->inbuf.size = SE_AES_BUFLEN;
+ goto out_finalize;
- rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN,
+ rctx->outbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen + 100;
+ rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->outbuf.size,
&rctx->outbuf.addr, GFP_KERNEL);
if (!rctx->outbuf.buf) {
ret = -ENOMEM;
- goto outbuf_err;
+ goto out_free_inbuf;
}
- rctx->outbuf.size = SE_AES_BUFLEN;
-
- ret = tegra_ccm_crypt_init(req, se, rctx);
- if (ret)
- goto out;
+ if (!ctx->key_id) {
+ ret = tegra_key_submit_reserved_aes(ctx->se, ctx->key,
+ ctx->keylen, ctx->alg, &rctx->key_id);
+ if (ret)
+ goto out;
+ }
if (rctx->encrypt) {
- rctx->cryptlen = req->cryptlen;
-
/* CBC MAC Operation */
ret = tegra_ccm_compute_auth(ctx, rctx);
if (ret)
@@ -1179,10 +1238,6 @@ static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq)
if (ret)
goto out;
} else {
- rctx->cryptlen = req->cryptlen - ctx->authsize;
- if (ret)
- goto out;
-
/* CTR operation */
ret = tegra_ccm_do_ctr(ctx, rctx);
if (ret)
@@ -1195,13 +1250,17 @@ static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq)
}
out:
- dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN,
+ dma_free_coherent(ctx->se->dev, rctx->inbuf.size,
rctx->outbuf.buf, rctx->outbuf.addr);
-outbuf_err:
- dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN,
+out_free_inbuf:
+ dma_free_coherent(ctx->se->dev, rctx->outbuf.size,
rctx->inbuf.buf, rctx->inbuf.addr);
+ if (tegra_key_is_reserved(rctx->key_id))
+ tegra_key_invalidate_reserved(ctx->se, rctx->key_id, ctx->alg);
+
+out_finalize:
crypto_finalize_aead_request(ctx->se->engine, req, ret);
return 0;
@@ -1215,23 +1274,6 @@ static int tegra_gcm_do_one_req(struct crypto_engine *engine, void *areq)
struct tegra_aead_reqctx *rctx = aead_request_ctx(req);
int ret;
- /* Allocate buffers required */
- rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN,
- &rctx->inbuf.addr, GFP_KERNEL);
- if (!rctx->inbuf.buf)
- return -ENOMEM;
-
- rctx->inbuf.size = SE_AES_BUFLEN;
-
- rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN,
- &rctx->outbuf.addr, GFP_KERNEL);
- if (!rctx->outbuf.buf) {
- ret = -ENOMEM;
- goto outbuf_err;
- }
-
- rctx->outbuf.size = SE_AES_BUFLEN;
-
rctx->src_sg = req->src;
rctx->dst_sg = req->dst;
rctx->assoclen = req->assoclen;
@@ -1245,6 +1287,32 @@ static int tegra_gcm_do_one_req(struct crypto_engine *engine, void *areq)
memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE);
rctx->iv[3] = (1 << 24);
+ rctx->key_id = ctx->key_id;
+
+ /* Allocate buffers required */
+ rctx->inbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen;
+ rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->inbuf.size,
+ &rctx->inbuf.addr, GFP_KERNEL);
+ if (!rctx->inbuf.buf) {
+ ret = -ENOMEM;
+ goto out_finalize;
+ }
+
+ rctx->outbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen;
+ rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->outbuf.size,
+ &rctx->outbuf.addr, GFP_KERNEL);
+ if (!rctx->outbuf.buf) {
+ ret = -ENOMEM;
+ goto out_free_inbuf;
+ }
+
+ if (!ctx->key_id) {
+ ret = tegra_key_submit_reserved_aes(ctx->se, ctx->key,
+ ctx->keylen, ctx->alg, &rctx->key_id);
+ if (ret)
+ goto out;
+ }
+
/* If there is associated data perform GMAC operation */
if (rctx->assoclen) {
ret = tegra_gcm_do_gmac(ctx, rctx);
@@ -1268,14 +1336,17 @@ static int tegra_gcm_do_one_req(struct crypto_engine *engine, void *areq)
ret = tegra_gcm_do_verify(ctx->se, rctx);
out:
- dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN,
+ dma_free_coherent(ctx->se->dev, rctx->outbuf.size,
rctx->outbuf.buf, rctx->outbuf.addr);
-outbuf_err:
- dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN,
+out_free_inbuf:
+ dma_free_coherent(ctx->se->dev, rctx->inbuf.size,
rctx->inbuf.buf, rctx->inbuf.addr);
- /* Finalize the request if there are no errors */
+ if (tegra_key_is_reserved(rctx->key_id))
+ tegra_key_invalidate_reserved(ctx->se, rctx->key_id, ctx->alg);
+
+out_finalize:
crypto_finalize_aead_request(ctx->se->engine, req, ret);
return 0;
@@ -1297,6 +1368,7 @@ static int tegra_aead_cra_init(struct crypto_aead *tfm)
ctx->se = se_alg->se_dev;
ctx->key_id = 0;
+ ctx->keylen = 0;
ret = se_algname_to_algid(algname);
if (ret < 0) {
@@ -1378,13 +1450,20 @@ static int tegra_aead_setkey(struct crypto_aead *tfm,
const u8 *key, u32 keylen)
{
struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ int ret;
if (aes_check_keylen(keylen)) {
dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
return -EINVAL;
}
- return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
+ ret = tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
+ if (ret) {
+ ctx->keylen = keylen;
+ memcpy(ctx->key, key, keylen);
+ }
+
+ return 0;
}
static unsigned int tegra_cmac_prep_cmd(struct tegra_cmac_ctx *ctx,
@@ -1458,6 +1537,35 @@ static void tegra_cmac_paste_result(struct tegra_se *se, struct tegra_cmac_reqct
se->base + se->hw->regs->result + (i * 4));
}
+static int tegra_cmac_do_init(struct ahash_request *req)
+{
+ struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct tegra_se *se = ctx->se;
+ int i;
+
+ rctx->total_len = 0;
+ rctx->datbuf.size = 0;
+ rctx->residue.size = 0;
+ rctx->key_id = ctx->key_id;
+ rctx->task |= SHA_FIRST;
+ rctx->blk_size = crypto_ahash_blocksize(tfm);
+
+ rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size * 2,
+ &rctx->residue.addr, GFP_KERNEL);
+ if (!rctx->residue.buf)
+ return -ENOMEM;
+
+ rctx->residue.size = 0;
+
+ /* Clear any previous result */
+ for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
+ writel(0, se->base + se->hw->regs->result + (i * 4));
+
+ return 0;
+}
+
static int tegra_cmac_do_update(struct ahash_request *req)
{
struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
@@ -1485,7 +1593,7 @@ static int tegra_cmac_do_update(struct ahash_request *req)
rctx->datbuf.size = (req->nbytes + rctx->residue.size) - nresidue;
rctx->total_len += rctx->datbuf.size;
rctx->config = tegra234_aes_cfg(SE_ALG_CMAC, 0);
- rctx->crypto_config = SE_AES_KEY_INDEX(ctx->key_id);
+ rctx->crypto_config = SE_AES_KEY_INDEX(rctx->key_id);
/*
* Keep one block and residue bytes in residue and
@@ -1499,6 +1607,11 @@ static int tegra_cmac_do_update(struct ahash_request *req)
return 0;
}
+ rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->datbuf.size,
+ &rctx->datbuf.addr, GFP_KERNEL);
+ if (!rctx->datbuf.buf)
+ return -ENOMEM;
+
/* Copy the previous residue first */
if (rctx->residue.size)
memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
@@ -1513,23 +1626,19 @@ static int tegra_cmac_do_update(struct ahash_request *req)
rctx->residue.size = nresidue;
/*
- * If this is not the first 'update' call, paste the previous copied
+ * If this is not the first task, paste the previous copied
* intermediate results to the registers so that it gets picked up.
- * This is to support the import/export functionality.
*/
if (!(rctx->task & SHA_FIRST))
tegra_cmac_paste_result(ctx->se, rctx);
cmdlen = tegra_cmac_prep_cmd(ctx, rctx);
+ ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
- ret = tegra_se_host1x_submit(se, cmdlen);
- /*
- * If this is not the final update, copy the intermediate results
- * from the registers so that it can be used in the next 'update'
- * call. This is to support the import/export functionality.
- */
- if (!(rctx->task & SHA_FINAL))
- tegra_cmac_copy_result(ctx->se, rctx);
+ tegra_cmac_copy_result(ctx->se, rctx);
+
+ dma_free_coherent(ctx->se->dev, rctx->datbuf.size,
+ rctx->datbuf.buf, rctx->datbuf.addr);
return ret;
}
@@ -1545,17 +1654,34 @@ static int tegra_cmac_do_final(struct ahash_request *req)
if (!req->nbytes && !rctx->total_len && ctx->fallback_tfm) {
return crypto_shash_tfm_digest(ctx->fallback_tfm,
- rctx->datbuf.buf, 0, req->result);
+ NULL, 0, req->result);
+ }
+
+ if (rctx->residue.size) {
+ rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->residue.size,
+ &rctx->datbuf.addr, GFP_KERNEL);
+ if (!rctx->datbuf.buf) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
}
- memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
rctx->datbuf.size = rctx->residue.size;
rctx->total_len += rctx->residue.size;
rctx->config = tegra234_aes_cfg(SE_ALG_CMAC, 0);
+ /*
+ * If this is not the first task, paste the previous copied
+ * intermediate results to the registers so that it gets picked up.
+ */
+ if (!(rctx->task & SHA_FIRST))
+ tegra_cmac_paste_result(ctx->se, rctx);
+
/* Prepare command and submit */
cmdlen = tegra_cmac_prep_cmd(ctx, rctx);
- ret = tegra_se_host1x_submit(se, cmdlen);
+ ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
if (ret)
goto out;
@@ -1567,8 +1693,10 @@ static int tegra_cmac_do_final(struct ahash_request *req)
writel(0, se->base + se->hw->regs->result + (i * 4));
out:
- dma_free_coherent(se->dev, SE_SHA_BUFLEN,
- rctx->datbuf.buf, rctx->datbuf.addr);
+ if (rctx->residue.size)
+ dma_free_coherent(se->dev, rctx->datbuf.size,
+ rctx->datbuf.buf, rctx->datbuf.addr);
+out_free:
dma_free_coherent(se->dev, crypto_ahash_blocksize(tfm) * 2,
rctx->residue.buf, rctx->residue.addr);
return ret;
@@ -1581,17 +1709,41 @@ static int tegra_cmac_do_one_req(struct crypto_engine *engine, void *areq)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
struct tegra_se *se = ctx->se;
- int ret;
+ int ret = 0;
+
+ if (rctx->task & SHA_INIT) {
+ ret = tegra_cmac_do_init(req);
+ if (ret)
+ goto out;
+
+ rctx->task &= ~SHA_INIT;
+ }
+
+ if (!ctx->key_id) {
+ ret = tegra_key_submit_reserved_aes(ctx->se, ctx->key,
+ ctx->keylen, ctx->alg, &rctx->key_id);
+ if (ret)
+ goto out;
+ }
if (rctx->task & SHA_UPDATE) {
ret = tegra_cmac_do_update(req);
+ if (ret)
+ goto out;
+
rctx->task &= ~SHA_UPDATE;
}
if (rctx->task & SHA_FINAL) {
ret = tegra_cmac_do_final(req);
+ if (ret)
+ goto out;
+
rctx->task &= ~SHA_FINAL;
}
+out:
+ if (tegra_key_is_reserved(rctx->key_id))
+ tegra_key_invalidate_reserved(ctx->se, rctx->key_id, ctx->alg);
crypto_finalize_hash_request(se->engine, req, ret);
@@ -1633,6 +1785,7 @@ static int tegra_cmac_cra_init(struct crypto_tfm *tfm)
ctx->se = se_alg->se_dev;
ctx->key_id = 0;
+ ctx->keylen = 0;
ret = se_algname_to_algid(algname);
if (ret < 0) {
@@ -1657,51 +1810,11 @@ static void tegra_cmac_cra_exit(struct crypto_tfm *tfm)
tegra_key_invalidate(ctx->se, ctx->key_id, ctx->alg);
}
-static int tegra_cmac_init(struct ahash_request *req)
-{
- struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
- struct tegra_se *se = ctx->se;
- int i;
-
- rctx->total_len = 0;
- rctx->datbuf.size = 0;
- rctx->residue.size = 0;
- rctx->task = SHA_FIRST;
- rctx->blk_size = crypto_ahash_blocksize(tfm);
-
- rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size * 2,
- &rctx->residue.addr, GFP_KERNEL);
- if (!rctx->residue.buf)
- goto resbuf_fail;
-
- rctx->residue.size = 0;
-
- rctx->datbuf.buf = dma_alloc_coherent(se->dev, SE_SHA_BUFLEN,
- &rctx->datbuf.addr, GFP_KERNEL);
- if (!rctx->datbuf.buf)
- goto datbuf_fail;
-
- rctx->datbuf.size = 0;
-
- /* Clear any previous result */
- for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
- writel(0, se->base + se->hw->regs->result + (i * 4));
-
- return 0;
-
-datbuf_fail:
- dma_free_coherent(se->dev, rctx->blk_size, rctx->residue.buf,
- rctx->residue.addr);
-resbuf_fail:
- return -ENOMEM;
-}
-
static int tegra_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
+ int ret;
if (aes_check_keylen(keylen)) {
dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
@@ -1711,7 +1824,24 @@ static int tegra_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
if (ctx->fallback_tfm)
crypto_shash_setkey(ctx->fallback_tfm, key, keylen);
- return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
+ ret = tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
+ if (ret) {
+ ctx->keylen = keylen;
+ memcpy(ctx->key, key, keylen);
+ }
+
+ return 0;
+}
+
+static int tegra_cmac_init(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
+
+ rctx->task = SHA_INIT;
+
+ return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
}
static int tegra_cmac_update(struct ahash_request *req)
@@ -1753,8 +1883,7 @@ static int tegra_cmac_digest(struct ahash_request *req)
struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
- tegra_cmac_init(req);
- rctx->task |= SHA_UPDATE | SHA_FINAL;
+ rctx->task |= SHA_INIT | SHA_UPDATE | SHA_FINAL;
return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
}
diff --git a/drivers/crypto/tegra/tegra-se-hash.c b/drivers/crypto/tegra/tegra-se-hash.c
index 4d4bd727f498..4a298ace6e9f 100644
--- a/drivers/crypto/tegra/tegra-se-hash.c
+++ b/drivers/crypto/tegra/tegra-se-hash.c
@@ -34,6 +34,7 @@ struct tegra_sha_reqctx {
struct tegra_se_datbuf datbuf;
struct tegra_se_datbuf residue;
struct tegra_se_datbuf digest;
+ struct tegra_se_datbuf intr_res;
unsigned int alg;
unsigned int config;
unsigned int total_len;
@@ -116,8 +117,9 @@ static int tegra_sha_fallback_init(struct ahash_request *req)
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
return crypto_ahash_init(&rctx->fallback_req);
}
@@ -129,10 +131,10 @@ static int tegra_sha_fallback_update(struct ahash_request *req)
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.nbytes = req->nbytes;
- rctx->fallback_req.src = req->src;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, req->src, NULL, req->nbytes);
return crypto_ahash_update(&rctx->fallback_req);
}
@@ -144,9 +146,10 @@ static int tegra_sha_fallback_final(struct ahash_request *req)
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.result = req->result;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, NULL, req->result, 0);
return crypto_ahash_final(&rctx->fallback_req);
}
@@ -158,12 +161,11 @@ static int tegra_sha_fallback_finup(struct ahash_request *req)
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
-
- rctx->fallback_req.nbytes = req->nbytes;
- rctx->fallback_req.src = req->src;
- rctx->fallback_req.result = req->result;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, req->src, req->result,
+ req->nbytes);
return crypto_ahash_finup(&rctx->fallback_req);
}
@@ -175,12 +177,11 @@ static int tegra_sha_fallback_digest(struct ahash_request *req)
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
-
- rctx->fallback_req.nbytes = req->nbytes;
- rctx->fallback_req.src = req->src;
- rctx->fallback_req.result = req->result;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, req->src, req->result,
+ req->nbytes);
return crypto_ahash_digest(&rctx->fallback_req);
}
@@ -192,8 +193,9 @@ static int tegra_sha_fallback_import(struct ahash_request *req, const void *in)
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
return crypto_ahash_import(&rctx->fallback_req, in);
}
@@ -205,15 +207,69 @@ static int tegra_sha_fallback_export(struct ahash_request *req, void *out)
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
return crypto_ahash_export(&rctx->fallback_req, out);
}
-static int tegra_sha_prep_cmd(struct tegra_se *se, u32 *cpuvaddr,
+static int tegra_se_insert_hash_result(struct tegra_sha_ctx *ctx, u32 *cpuvaddr,
+ struct tegra_sha_reqctx *rctx)
+{
+ __be32 *res_be = (__be32 *)rctx->intr_res.buf;
+ u32 *res = (u32 *)rctx->intr_res.buf;
+ int i = 0, j;
+
+ cpuvaddr[i++] = 0;
+ cpuvaddr[i++] = host1x_opcode_setpayload(HASH_RESULT_REG_COUNT);
+ cpuvaddr[i++] = se_host1x_opcode_incr_w(SE_SHA_HASH_RESULT);
+
+ for (j = 0; j < HASH_RESULT_REG_COUNT; j++) {
+ int idx = j;
+
+ /*
+ * The initial, intermediate and final hash value of SHA-384, SHA-512
+ * in SHA_HASH_RESULT registers follow the below layout of bytes.
+ *
+ * +---------------+------------+
+ * | HASH_RESULT_0 | B4...B7 |
+ * +---------------+------------+
+ * | HASH_RESULT_1 | B0...B3 |
+ * +---------------+------------+
+ * | HASH_RESULT_2 | B12...B15 |
+ * +---------------+------------+
+ * | HASH_RESULT_3 | B8...B11 |
+ * +---------------+------------+
+ * | ...... |
+ * +---------------+------------+
+ * | HASH_RESULT_14| B60...B63 |
+ * +---------------+------------+
+ * | HASH_RESULT_15| B56...B59 |
+ * +---------------+------------+
+ *
+ */
+ if (ctx->alg == SE_ALG_SHA384 || ctx->alg == SE_ALG_SHA512)
+ idx = (j % 2) ? j - 1 : j + 1;
+
+ /* For SHA-1, SHA-224, SHA-256, SHA-384, SHA-512 the initial
+ * intermediate and final hash value when stored in
+ * SHA_HASH_RESULT registers, the byte order is NOT in
+ * little-endian.
+ */
+ if (ctx->alg <= SE_ALG_SHA512)
+ cpuvaddr[i++] = be32_to_cpu(res_be[idx]);
+ else
+ cpuvaddr[i++] = res[idx];
+ }
+
+ return i;
+}
+
+static int tegra_sha_prep_cmd(struct tegra_sha_ctx *ctx, u32 *cpuvaddr,
struct tegra_sha_reqctx *rctx)
{
+ struct tegra_se *se = ctx->se;
u64 msg_len, msg_left;
int i = 0;
@@ -241,7 +297,7 @@ static int tegra_sha_prep_cmd(struct tegra_se *se, u32 *cpuvaddr,
cpuvaddr[i++] = upper_32_bits(msg_left);
cpuvaddr[i++] = 0;
cpuvaddr[i++] = 0;
- cpuvaddr[i++] = host1x_opcode_setpayload(6);
+ cpuvaddr[i++] = host1x_opcode_setpayload(2);
cpuvaddr[i++] = se_host1x_opcode_incr_w(SE_SHA_CFG);
cpuvaddr[i++] = rctx->config;
@@ -249,15 +305,29 @@ static int tegra_sha_prep_cmd(struct tegra_se *se, u32 *cpuvaddr,
cpuvaddr[i++] = SE_SHA_TASK_HASH_INIT;
rctx->task &= ~SHA_FIRST;
} else {
- cpuvaddr[i++] = 0;
+ /*
+ * If it isn't the first task, program the HASH_RESULT register
+ * with the intermediate result from the previous task
+ */
+ i += tegra_se_insert_hash_result(ctx, cpuvaddr + i, rctx);
}
+ cpuvaddr[i++] = host1x_opcode_setpayload(4);
+ cpuvaddr[i++] = se_host1x_opcode_incr_w(SE_SHA_IN_ADDR);
cpuvaddr[i++] = rctx->datbuf.addr;
cpuvaddr[i++] = (u32)(SE_ADDR_HI_MSB(upper_32_bits(rctx->datbuf.addr)) |
SE_ADDR_HI_SZ(rctx->datbuf.size));
- cpuvaddr[i++] = rctx->digest.addr;
- cpuvaddr[i++] = (u32)(SE_ADDR_HI_MSB(upper_32_bits(rctx->digest.addr)) |
- SE_ADDR_HI_SZ(rctx->digest.size));
+
+ if (rctx->task & SHA_UPDATE) {
+ cpuvaddr[i++] = rctx->intr_res.addr;
+ cpuvaddr[i++] = (u32)(SE_ADDR_HI_MSB(upper_32_bits(rctx->intr_res.addr)) |
+ SE_ADDR_HI_SZ(rctx->intr_res.size));
+ } else {
+ cpuvaddr[i++] = rctx->digest.addr;
+ cpuvaddr[i++] = (u32)(SE_ADDR_HI_MSB(upper_32_bits(rctx->digest.addr)) |
+ SE_ADDR_HI_SZ(rctx->digest.size));
+ }
+
if (rctx->key_id) {
cpuvaddr[i++] = host1x_opcode_setpayload(1);
cpuvaddr[i++] = se_host1x_opcode_nonincr_w(SE_SHA_CRYPTO_CFG);
@@ -266,42 +336,73 @@ static int tegra_sha_prep_cmd(struct tegra_se *se, u32 *cpuvaddr,
cpuvaddr[i++] = host1x_opcode_setpayload(1);
cpuvaddr[i++] = se_host1x_opcode_nonincr_w(SE_SHA_OPERATION);
- cpuvaddr[i++] = SE_SHA_OP_WRSTALL |
- SE_SHA_OP_START |
+ cpuvaddr[i++] = SE_SHA_OP_WRSTALL | SE_SHA_OP_START |
SE_SHA_OP_LASTBUF;
cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
- dev_dbg(se->dev, "msg len %llu msg left %llu cfg %#x",
- msg_len, msg_left, rctx->config);
+ dev_dbg(se->dev, "msg len %llu msg left %llu sz %zd cfg %#x",
+ msg_len, msg_left, rctx->datbuf.size, rctx->config);
return i;
}
-static void tegra_sha_copy_hash_result(struct tegra_se *se, struct tegra_sha_reqctx *rctx)
+static int tegra_sha_do_init(struct ahash_request *req)
{
- int i;
+ struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct tegra_se *se = ctx->se;
- for (i = 0; i < HASH_RESULT_REG_COUNT; i++)
- rctx->result[i] = readl(se->base + se->hw->regs->result + (i * 4));
-}
+ if (ctx->fallback)
+ return tegra_sha_fallback_init(req);
-static void tegra_sha_paste_hash_result(struct tegra_se *se, struct tegra_sha_reqctx *rctx)
-{
- int i;
+ rctx->total_len = 0;
+ rctx->datbuf.size = 0;
+ rctx->residue.size = 0;
+ rctx->key_id = ctx->key_id;
+ rctx->task |= SHA_FIRST;
+ rctx->alg = ctx->alg;
+ rctx->blk_size = crypto_ahash_blocksize(tfm);
+ rctx->digest.size = crypto_ahash_digestsize(tfm);
+
+ rctx->digest.buf = dma_alloc_coherent(se->dev, rctx->digest.size,
+ &rctx->digest.addr, GFP_KERNEL);
+ if (!rctx->digest.buf)
+ goto digbuf_fail;
+
+ rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size,
+ &rctx->residue.addr, GFP_KERNEL);
+ if (!rctx->residue.buf)
+ goto resbuf_fail;
+
+ rctx->intr_res.size = HASH_RESULT_REG_COUNT * 4;
+ rctx->intr_res.buf = dma_alloc_coherent(se->dev, rctx->intr_res.size,
+ &rctx->intr_res.addr, GFP_KERNEL);
+ if (!rctx->intr_res.buf)
+ goto intr_res_fail;
- for (i = 0; i < HASH_RESULT_REG_COUNT; i++)
- writel(rctx->result[i],
- se->base + se->hw->regs->result + (i * 4));
+ return 0;
+
+intr_res_fail:
+ dma_free_coherent(se->dev, rctx->residue.size, rctx->residue.buf,
+ rctx->residue.addr);
+resbuf_fail:
+ dma_free_coherent(se->dev, rctx->digest.size, rctx->digest.buf,
+ rctx->digest.addr);
+digbuf_fail:
+ return -ENOMEM;
}
static int tegra_sha_do_update(struct ahash_request *req)
{
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
- unsigned int nblks, nresidue, size, ret;
- u32 *cpuvaddr = ctx->se->cmdbuf->addr;
+ struct tegra_se *se = ctx->se;
+ unsigned int nblks, nresidue, size;
+ u32 *cpuvaddr = se->cmdbuf->addr;
+ int ret;
nresidue = (req->nbytes + rctx->residue.size) % rctx->blk_size;
nblks = (req->nbytes + rctx->residue.size) / rctx->blk_size;
@@ -317,7 +418,6 @@ static int tegra_sha_do_update(struct ahash_request *req)
rctx->src_sg = req->src;
rctx->datbuf.size = (req->nbytes + rctx->residue.size) - nresidue;
- rctx->total_len += rctx->datbuf.size;
/*
* If nbytes are less than a block size, copy it residue and
@@ -326,11 +426,16 @@ static int tegra_sha_do_update(struct ahash_request *req)
if (nblks < 1) {
scatterwalk_map_and_copy(rctx->residue.buf + rctx->residue.size,
rctx->src_sg, 0, req->nbytes, 0);
-
rctx->residue.size += req->nbytes;
+
return 0;
}
+ rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->datbuf.size,
+ &rctx->datbuf.addr, GFP_KERNEL);
+ if (!rctx->datbuf.buf)
+ return -ENOMEM;
+
/* Copy the previous residue first */
if (rctx->residue.size)
memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
@@ -343,29 +448,16 @@ static int tegra_sha_do_update(struct ahash_request *req)
/* Update residue value with the residue after current block */
rctx->residue.size = nresidue;
+ rctx->total_len += rctx->datbuf.size;
rctx->config = tegra_sha_get_config(rctx->alg) |
- SE_SHA_DST_HASH_REG;
+ SE_SHA_DST_MEMORY;
- /*
- * If this is not the first 'update' call, paste the previous copied
- * intermediate results to the registers so that it gets picked up.
- * This is to support the import/export functionality.
- */
- if (!(rctx->task & SHA_FIRST))
- tegra_sha_paste_hash_result(ctx->se, rctx);
-
- size = tegra_sha_prep_cmd(ctx->se, cpuvaddr, rctx);
-
- ret = tegra_se_host1x_submit(ctx->se, size);
+ size = tegra_sha_prep_cmd(ctx, cpuvaddr, rctx);
+ ret = tegra_se_host1x_submit(se, se->cmdbuf, size);
- /*
- * If this is not the final update, copy the intermediate results
- * from the registers so that it can be used in the next 'update'
- * call. This is to support the import/export functionality.
- */
- if (!(rctx->task & SHA_FINAL))
- tegra_sha_copy_hash_result(ctx->se, rctx);
+ dma_free_coherent(se->dev, rctx->datbuf.size,
+ rctx->datbuf.buf, rctx->datbuf.addr);
return ret;
}
@@ -379,16 +471,25 @@ static int tegra_sha_do_final(struct ahash_request *req)
u32 *cpuvaddr = se->cmdbuf->addr;
int size, ret = 0;
- memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
+ if (rctx->residue.size) {
+ rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->residue.size,
+ &rctx->datbuf.addr, GFP_KERNEL);
+ if (!rctx->datbuf.buf) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
+ }
+
rctx->datbuf.size = rctx->residue.size;
rctx->total_len += rctx->residue.size;
rctx->config = tegra_sha_get_config(rctx->alg) |
SE_SHA_DST_MEMORY;
- size = tegra_sha_prep_cmd(se, cpuvaddr, rctx);
-
- ret = tegra_se_host1x_submit(se, size);
+ size = tegra_sha_prep_cmd(ctx, cpuvaddr, rctx);
+ ret = tegra_se_host1x_submit(se, se->cmdbuf, size);
if (ret)
goto out;
@@ -396,12 +497,18 @@ static int tegra_sha_do_final(struct ahash_request *req)
memcpy(req->result, rctx->digest.buf, rctx->digest.size);
out:
- dma_free_coherent(se->dev, SE_SHA_BUFLEN,
- rctx->datbuf.buf, rctx->datbuf.addr);
+ if (rctx->residue.size)
+ dma_free_coherent(se->dev, rctx->datbuf.size,
+ rctx->datbuf.buf, rctx->datbuf.addr);
+out_free:
dma_free_coherent(se->dev, crypto_ahash_blocksize(tfm),
rctx->residue.buf, rctx->residue.addr);
dma_free_coherent(se->dev, rctx->digest.size, rctx->digest.buf,
rctx->digest.addr);
+
+ dma_free_coherent(se->dev, rctx->intr_res.size, rctx->intr_res.buf,
+ rctx->intr_res.addr);
+
return ret;
}
@@ -414,16 +521,31 @@ static int tegra_sha_do_one_req(struct crypto_engine *engine, void *areq)
struct tegra_se *se = ctx->se;
int ret = 0;
+ if (rctx->task & SHA_INIT) {
+ ret = tegra_sha_do_init(req);
+ if (ret)
+ goto out;
+
+ rctx->task &= ~SHA_INIT;
+ }
+
if (rctx->task & SHA_UPDATE) {
ret = tegra_sha_do_update(req);
+ if (ret)
+ goto out;
+
rctx->task &= ~SHA_UPDATE;
}
if (rctx->task & SHA_FINAL) {
ret = tegra_sha_do_final(req);
+ if (ret)
+ goto out;
+
rctx->task &= ~SHA_FINAL;
}
+out:
crypto_finalize_hash_request(se->engine, req, ret);
return 0;
@@ -497,52 +619,6 @@ static void tegra_sha_cra_exit(struct crypto_tfm *tfm)
tegra_key_invalidate(ctx->se, ctx->key_id, ctx->alg);
}
-static int tegra_sha_init(struct ahash_request *req)
-{
- struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
- struct tegra_se *se = ctx->se;
-
- if (ctx->fallback)
- return tegra_sha_fallback_init(req);
-
- rctx->total_len = 0;
- rctx->datbuf.size = 0;
- rctx->residue.size = 0;
- rctx->key_id = ctx->key_id;
- rctx->task = SHA_FIRST;
- rctx->alg = ctx->alg;
- rctx->blk_size = crypto_ahash_blocksize(tfm);
- rctx->digest.size = crypto_ahash_digestsize(tfm);
-
- rctx->digest.buf = dma_alloc_coherent(se->dev, rctx->digest.size,
- &rctx->digest.addr, GFP_KERNEL);
- if (!rctx->digest.buf)
- goto digbuf_fail;
-
- rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size,
- &rctx->residue.addr, GFP_KERNEL);
- if (!rctx->residue.buf)
- goto resbuf_fail;
-
- rctx->datbuf.buf = dma_alloc_coherent(se->dev, SE_SHA_BUFLEN,
- &rctx->datbuf.addr, GFP_KERNEL);
- if (!rctx->datbuf.buf)
- goto datbuf_fail;
-
- return 0;
-
-datbuf_fail:
- dma_free_coherent(se->dev, rctx->blk_size, rctx->residue.buf,
- rctx->residue.addr);
-resbuf_fail:
- dma_free_coherent(se->dev, SE_SHA_BUFLEN, rctx->datbuf.buf,
- rctx->datbuf.addr);
-digbuf_fail:
- return -ENOMEM;
-}
-
static int tegra_hmac_fallback_setkey(struct tegra_sha_ctx *ctx, const u8 *key,
unsigned int keylen)
{
@@ -559,13 +635,29 @@ static int tegra_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
+ int ret;
if (aes_check_keylen(keylen))
return tegra_hmac_fallback_setkey(ctx, key, keylen);
+ ret = tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
+ if (ret)
+ return tegra_hmac_fallback_setkey(ctx, key, keylen);
+
ctx->fallback = false;
- return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
+ return 0;
+}
+
+static int tegra_sha_init(struct ahash_request *req)
+{
+ struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ rctx->task = SHA_INIT;
+
+ return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
}
static int tegra_sha_update(struct ahash_request *req)
@@ -619,8 +711,7 @@ static int tegra_sha_digest(struct ahash_request *req)
if (ctx->fallback)
return tegra_sha_fallback_digest(req);
- tegra_sha_init(req);
- rctx->task |= SHA_UPDATE | SHA_FINAL;
+ rctx->task |= SHA_INIT | SHA_UPDATE | SHA_FINAL;
return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
}
diff --git a/drivers/crypto/tegra/tegra-se-key.c b/drivers/crypto/tegra/tegra-se-key.c
index ac14678dbd30..956fa9b4e9b1 100644
--- a/drivers/crypto/tegra/tegra-se-key.c
+++ b/drivers/crypto/tegra/tegra-se-key.c
@@ -115,11 +115,17 @@ static int tegra_key_insert(struct tegra_se *se, const u8 *key,
u32 keylen, u16 slot, u32 alg)
{
const u32 *keyval = (u32 *)key;
- u32 *addr = se->cmdbuf->addr, size;
+ u32 *addr = se->keybuf->addr, size;
+ int ret;
+
+ mutex_lock(&kslt_lock);
size = tegra_key_prep_ins_cmd(se, addr, keyval, keylen, slot, alg);
+ ret = tegra_se_host1x_submit(se, se->keybuf, size);
+
+ mutex_unlock(&kslt_lock);
- return tegra_se_host1x_submit(se, size);
+ return ret;
}
void tegra_key_invalidate(struct tegra_se *se, u32 keyid, u32 alg)
@@ -135,6 +141,23 @@ void tegra_key_invalidate(struct tegra_se *se, u32 keyid, u32 alg)
tegra_keyslot_free(keyid);
}
+void tegra_key_invalidate_reserved(struct tegra_se *se, u32 keyid, u32 alg)
+{
+ u8 zkey[AES_MAX_KEY_SIZE] = {0};
+
+ if (!keyid)
+ return;
+
+ /* Overwrite the key with 0s */
+ tegra_key_insert(se, zkey, AES_MAX_KEY_SIZE, keyid, alg);
+}
+
+inline int tegra_key_submit_reserved(struct tegra_se *se, const u8 *key,
+ u32 keylen, u32 alg, u32 *keyid)
+{
+ return tegra_key_insert(se, key, keylen, *keyid, alg);
+}
+
int tegra_key_submit(struct tegra_se *se, const u8 *key, u32 keylen, u32 alg, u32 *keyid)
{
int ret;
@@ -143,7 +166,7 @@ int tegra_key_submit(struct tegra_se *se, const u8 *key, u32 keylen, u32 alg, u3
if (!tegra_key_in_kslt(*keyid)) {
*keyid = tegra_keyslot_alloc();
if (!(*keyid)) {
- dev_err(se->dev, "failed to allocate key slot\n");
+ dev_dbg(se->dev, "failed to allocate key slot\n");
return -ENOMEM;
}
}
diff --git a/drivers/crypto/tegra/tegra-se-main.c b/drivers/crypto/tegra/tegra-se-main.c
index f94c0331b148..7237f14eaf5a 100644
--- a/drivers/crypto/tegra/tegra-se-main.c
+++ b/drivers/crypto/tegra/tegra-se-main.c
@@ -141,7 +141,7 @@ static struct tegra_se_cmdbuf *tegra_se_host1x_bo_alloc(struct tegra_se *se, ssi
return cmdbuf;
}
-int tegra_se_host1x_submit(struct tegra_se *se, u32 size)
+int tegra_se_host1x_submit(struct tegra_se *se, struct tegra_se_cmdbuf *cmdbuf, u32 size)
{
struct host1x_job *job;
int ret;
@@ -160,9 +160,9 @@ int tegra_se_host1x_submit(struct tegra_se *se, u32 size)
job->engine_fallback_streamid = se->stream_id;
job->engine_streamid_offset = SE_STREAM_ID;
- se->cmdbuf->words = size;
+ cmdbuf->words = size;
- host1x_job_add_gather(job, &se->cmdbuf->bo, size, 0);
+ host1x_job_add_gather(job, &cmdbuf->bo, size, 0);
ret = host1x_job_pin(job, se->dev);
if (ret) {
@@ -220,14 +220,22 @@ static int tegra_se_client_init(struct host1x_client *client)
goto syncpt_put;
}
+ se->keybuf = tegra_se_host1x_bo_alloc(se, SZ_4K);
+ if (!se->keybuf) {
+ ret = -ENOMEM;
+ goto cmdbuf_put;
+ }
+
ret = se->hw->init_alg(se);
if (ret) {
dev_err(se->dev, "failed to register algorithms\n");
- goto cmdbuf_put;
+ goto keybuf_put;
}
return 0;
+keybuf_put:
+ tegra_se_cmdbuf_put(&se->keybuf->bo);
cmdbuf_put:
tegra_se_cmdbuf_put(&se->cmdbuf->bo);
syncpt_put:
@@ -302,7 +310,7 @@ static int tegra_se_probe(struct platform_device *pdev)
se->engine = crypto_engine_alloc_init(dev, 0);
if (!se->engine)
- return dev_err_probe(dev, -ENOMEM, "failed to init crypto engine\n");
+ return -ENOMEM;
ret = crypto_engine_start(se->engine);
if (ret) {
@@ -312,7 +320,6 @@ static int tegra_se_probe(struct platform_device *pdev)
ret = tegra_se_host1x_register(se);
if (ret) {
- crypto_engine_stop(se->engine);
crypto_engine_exit(se->engine);
return dev_err_probe(dev, ret, "failed to init host1x params\n");
}
@@ -324,7 +331,6 @@ static void tegra_se_remove(struct platform_device *pdev)
{
struct tegra_se *se = platform_get_drvdata(pdev);
- crypto_engine_stop(se->engine);
crypto_engine_exit(se->engine);
host1x_client_unregister(&se->client);
}
@@ -387,7 +393,7 @@ static struct platform_driver tegra_se_driver = {
.of_match_table = tegra_se_of_match,
},
.probe = tegra_se_probe,
- .remove_new = tegra_se_remove,
+ .remove = tegra_se_remove,
};
static int tegra_se_host1x_probe(struct host1x_device *dev)
diff --git a/drivers/crypto/tegra/tegra-se.h b/drivers/crypto/tegra/tegra-se.h
index b9dd7ceb8783..b6cac9384f66 100644
--- a/drivers/crypto/tegra/tegra-se.h
+++ b/drivers/crypto/tegra/tegra-se.h
@@ -24,6 +24,7 @@
#define SE_STREAM_ID 0x90
#define SE_SHA_CFG 0x4004
+#define SE_SHA_IN_ADDR 0x400c
#define SE_SHA_KEY_ADDR 0x4094
#define SE_SHA_KEY_DATA 0x4098
#define SE_SHA_KEYMANIFEST 0x409c
@@ -340,12 +341,14 @@
#define SE_CRYPTO_CTR_REG_COUNT 4
#define SE_MAX_KEYSLOT 15
#define SE_MAX_MEM_ALLOC SZ_4M
-#define SE_AES_BUFLEN 0x8000
-#define SE_SHA_BUFLEN 0x2000
+
+#define TEGRA_AES_RESERVED_KSLT 14
+#define TEGRA_XTS_RESERVED_KSLT 15
#define SHA_FIRST BIT(0)
-#define SHA_UPDATE BIT(1)
-#define SHA_FINAL BIT(2)
+#define SHA_INIT BIT(1)
+#define SHA_UPDATE BIT(2)
+#define SHA_FINAL BIT(3)
/* Security Engine operation modes */
enum se_aes_alg {
@@ -420,6 +423,7 @@ struct tegra_se {
struct host1x_client client;
struct host1x_channel *channel;
struct tegra_se_cmdbuf *cmdbuf;
+ struct tegra_se_cmdbuf *keybuf;
struct crypto_engine *engine;
struct host1x_syncpt *syncpt;
struct device *dev;
@@ -501,8 +505,33 @@ void tegra_deinit_aes(struct tegra_se *se);
void tegra_deinit_hash(struct tegra_se *se);
int tegra_key_submit(struct tegra_se *se, const u8 *key,
u32 keylen, u32 alg, u32 *keyid);
+
+int tegra_key_submit_reserved(struct tegra_se *se, const u8 *key,
+ u32 keylen, u32 alg, u32 *keyid);
+
void tegra_key_invalidate(struct tegra_se *se, u32 keyid, u32 alg);
-int tegra_se_host1x_submit(struct tegra_se *se, u32 size);
+void tegra_key_invalidate_reserved(struct tegra_se *se, u32 keyid, u32 alg);
+int tegra_se_host1x_submit(struct tegra_se *se, struct tegra_se_cmdbuf *cmdbuf, u32 size);
+
+static inline int tegra_key_submit_reserved_aes(struct tegra_se *se, const u8 *key,
+ u32 keylen, u32 alg, u32 *keyid)
+{
+ *keyid = TEGRA_AES_RESERVED_KSLT;
+ return tegra_key_submit_reserved(se, key, keylen, alg, keyid);
+}
+
+static inline int tegra_key_submit_reserved_xts(struct tegra_se *se, const u8 *key,
+ u32 keylen, u32 alg, u32 *keyid)
+{
+ *keyid = TEGRA_XTS_RESERVED_KSLT;
+ return tegra_key_submit_reserved(se, key, keylen, alg, keyid);
+}
+
+static inline bool tegra_key_is_reserved(u32 keyid)
+{
+ return ((keyid == TEGRA_AES_RESERVED_KSLT) ||
+ (keyid == TEGRA_XTS_RESERVED_KSLT));
+}
/* HOST1x OPCODES */
static inline u32 host1x_opcode_setpayload(unsigned int payload)
diff --git a/drivers/crypto/ti/Kconfig b/drivers/crypto/ti/Kconfig
new file mode 100644
index 000000000000..d4f91c1e0cb5
--- /dev/null
+++ b/drivers/crypto/ti/Kconfig
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config CRYPTO_DEV_TI_DTHEV2
+ tristate "Support for TI DTHE V2 cryptography engine"
+ depends on ARCH_K3 || COMPILE_TEST
+ select CRYPTO_ENGINE
+ select CRYPTO_SKCIPHER
+ select CRYPTO_ECB
+ select CRYPTO_CBC
+ help
+ This enables support for the TI DTHE V2 hw cryptography engine
+ which can be found on TI K3 SOCs. Selecting this enables use
+ of hardware offloading for cryptographic algorithms on
+ these devices, providing enhanced resistance against side-channel
+ attacks.
diff --git a/drivers/crypto/ti/Makefile b/drivers/crypto/ti/Makefile
new file mode 100644
index 000000000000..b883078f203d
--- /dev/null
+++ b/drivers/crypto/ti/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_CRYPTO_DEV_TI_DTHEV2) += dthev2.o
+dthev2-objs := dthev2-common.o dthev2-aes.o
diff --git a/drivers/crypto/ti/dthev2-aes.c b/drivers/crypto/ti/dthev2-aes.c
new file mode 100644
index 000000000000..3547c41fa4ed
--- /dev/null
+++ b/drivers/crypto/ti/dthev2-aes.c
@@ -0,0 +1,413 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * K3 DTHE V2 crypto accelerator driver
+ *
+ * Copyright (C) Texas Instruments 2025 - https://www.ti.com
+ * Author: T Pratham <t-pratham@ti.com>
+ */
+
+#include <crypto/aead.h>
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/engine.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/skcipher.h>
+
+#include "dthev2-common.h"
+
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/scatterlist.h>
+
+/* Registers */
+
+// AES Engine
+#define DTHE_P_AES_BASE 0x7000
+#define DTHE_P_AES_KEY1_0 0x0038
+#define DTHE_P_AES_KEY1_1 0x003C
+#define DTHE_P_AES_KEY1_2 0x0030
+#define DTHE_P_AES_KEY1_3 0x0034
+#define DTHE_P_AES_KEY1_4 0x0028
+#define DTHE_P_AES_KEY1_5 0x002C
+#define DTHE_P_AES_KEY1_6 0x0020
+#define DTHE_P_AES_KEY1_7 0x0024
+#define DTHE_P_AES_IV_IN_0 0x0040
+#define DTHE_P_AES_IV_IN_1 0x0044
+#define DTHE_P_AES_IV_IN_2 0x0048
+#define DTHE_P_AES_IV_IN_3 0x004C
+#define DTHE_P_AES_CTRL 0x0050
+#define DTHE_P_AES_C_LENGTH_0 0x0054
+#define DTHE_P_AES_C_LENGTH_1 0x0058
+#define DTHE_P_AES_AUTH_LENGTH 0x005C
+#define DTHE_P_AES_DATA_IN_OUT 0x0060
+
+#define DTHE_P_AES_SYSCONFIG 0x0084
+#define DTHE_P_AES_IRQSTATUS 0x008C
+#define DTHE_P_AES_IRQENABLE 0x0090
+
+/* Register write values and macros */
+
+enum aes_ctrl_mode_masks {
+ AES_CTRL_ECB_MASK = 0x00,
+ AES_CTRL_CBC_MASK = BIT(5),
+};
+
+#define DTHE_AES_CTRL_MODE_CLEAR_MASK ~GENMASK(28, 5)
+
+#define DTHE_AES_CTRL_DIR_ENC BIT(2)
+
+#define DTHE_AES_CTRL_KEYSIZE_16B BIT(3)
+#define DTHE_AES_CTRL_KEYSIZE_24B BIT(4)
+#define DTHE_AES_CTRL_KEYSIZE_32B (BIT(3) | BIT(4))
+
+#define DTHE_AES_CTRL_SAVE_CTX_SET BIT(29)
+
+#define DTHE_AES_CTRL_OUTPUT_READY BIT_MASK(0)
+#define DTHE_AES_CTRL_INPUT_READY BIT_MASK(1)
+#define DTHE_AES_CTRL_SAVED_CTX_READY BIT_MASK(30)
+#define DTHE_AES_CTRL_CTX_READY BIT_MASK(31)
+
+#define DTHE_AES_SYSCONFIG_DMA_DATA_IN_OUT_EN GENMASK(6, 5)
+#define DTHE_AES_IRQENABLE_EN_ALL GENMASK(3, 0)
+
+/* Misc */
+#define AES_IV_SIZE AES_BLOCK_SIZE
+#define AES_BLOCK_WORDS (AES_BLOCK_SIZE / sizeof(u32))
+#define AES_IV_WORDS AES_BLOCK_WORDS
+
+static int dthe_cipher_init_tfm(struct crypto_skcipher *tfm)
+{
+ struct dthe_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct dthe_data *dev_data = dthe_get_dev(ctx);
+
+ ctx->dev_data = dev_data;
+ ctx->keylen = 0;
+
+ return 0;
+}
+
+static int dthe_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen)
+{
+ struct dthe_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256)
+ return -EINVAL;
+
+ ctx->keylen = keylen;
+ memcpy(ctx->key, key, keylen);
+
+ return 0;
+}
+
+static int dthe_aes_ecb_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen)
+{
+ struct dthe_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ ctx->aes_mode = DTHE_AES_ECB;
+
+ return dthe_aes_setkey(tfm, key, keylen);
+}
+
+static int dthe_aes_cbc_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen)
+{
+ struct dthe_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ ctx->aes_mode = DTHE_AES_CBC;
+
+ return dthe_aes_setkey(tfm, key, keylen);
+}
+
+static void dthe_aes_set_ctrl_key(struct dthe_tfm_ctx *ctx,
+ struct dthe_aes_req_ctx *rctx,
+ u32 *iv_in)
+{
+ struct dthe_data *dev_data = dthe_get_dev(ctx);
+ void __iomem *aes_base_reg = dev_data->regs + DTHE_P_AES_BASE;
+ u32 ctrl_val = 0;
+
+ writel_relaxed(ctx->key[0], aes_base_reg + DTHE_P_AES_KEY1_0);
+ writel_relaxed(ctx->key[1], aes_base_reg + DTHE_P_AES_KEY1_1);
+ writel_relaxed(ctx->key[2], aes_base_reg + DTHE_P_AES_KEY1_2);
+ writel_relaxed(ctx->key[3], aes_base_reg + DTHE_P_AES_KEY1_3);
+
+ if (ctx->keylen > AES_KEYSIZE_128) {
+ writel_relaxed(ctx->key[4], aes_base_reg + DTHE_P_AES_KEY1_4);
+ writel_relaxed(ctx->key[5], aes_base_reg + DTHE_P_AES_KEY1_5);
+ }
+ if (ctx->keylen == AES_KEYSIZE_256) {
+ writel_relaxed(ctx->key[6], aes_base_reg + DTHE_P_AES_KEY1_6);
+ writel_relaxed(ctx->key[7], aes_base_reg + DTHE_P_AES_KEY1_7);
+ }
+
+ if (rctx->enc)
+ ctrl_val |= DTHE_AES_CTRL_DIR_ENC;
+
+ if (ctx->keylen == AES_KEYSIZE_128)
+ ctrl_val |= DTHE_AES_CTRL_KEYSIZE_16B;
+ else if (ctx->keylen == AES_KEYSIZE_192)
+ ctrl_val |= DTHE_AES_CTRL_KEYSIZE_24B;
+ else
+ ctrl_val |= DTHE_AES_CTRL_KEYSIZE_32B;
+
+ // Write AES mode
+ ctrl_val &= DTHE_AES_CTRL_MODE_CLEAR_MASK;
+ switch (ctx->aes_mode) {
+ case DTHE_AES_ECB:
+ ctrl_val |= AES_CTRL_ECB_MASK;
+ break;
+ case DTHE_AES_CBC:
+ ctrl_val |= AES_CTRL_CBC_MASK;
+ break;
+ }
+
+ if (iv_in) {
+ ctrl_val |= DTHE_AES_CTRL_SAVE_CTX_SET;
+ for (int i = 0; i < AES_IV_WORDS; ++i)
+ writel_relaxed(iv_in[i],
+ aes_base_reg + DTHE_P_AES_IV_IN_0 + (DTHE_REG_SIZE * i));
+ }
+
+ writel_relaxed(ctrl_val, aes_base_reg + DTHE_P_AES_CTRL);
+}
+
+static void dthe_aes_dma_in_callback(void *data)
+{
+ struct skcipher_request *req = (struct skcipher_request *)data;
+ struct dthe_aes_req_ctx *rctx = skcipher_request_ctx(req);
+
+ complete(&rctx->aes_compl);
+}
+
+static int dthe_aes_run(struct crypto_engine *engine, void *areq)
+{
+ struct skcipher_request *req = container_of(areq, struct skcipher_request, base);
+ struct dthe_tfm_ctx *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
+ struct dthe_data *dev_data = dthe_get_dev(ctx);
+ struct dthe_aes_req_ctx *rctx = skcipher_request_ctx(req);
+
+ unsigned int len = req->cryptlen;
+ struct scatterlist *src = req->src;
+ struct scatterlist *dst = req->dst;
+
+ int src_nents = sg_nents_for_len(src, len);
+ int dst_nents;
+
+ int src_mapped_nents;
+ int dst_mapped_nents;
+
+ bool diff_dst;
+ enum dma_data_direction src_dir, dst_dir;
+
+ struct device *tx_dev, *rx_dev;
+ struct dma_async_tx_descriptor *desc_in, *desc_out;
+
+ int ret;
+
+ void __iomem *aes_base_reg = dev_data->regs + DTHE_P_AES_BASE;
+
+ u32 aes_irqenable_val = readl_relaxed(aes_base_reg + DTHE_P_AES_IRQENABLE);
+ u32 aes_sysconfig_val = readl_relaxed(aes_base_reg + DTHE_P_AES_SYSCONFIG);
+
+ aes_sysconfig_val |= DTHE_AES_SYSCONFIG_DMA_DATA_IN_OUT_EN;
+ writel_relaxed(aes_sysconfig_val, aes_base_reg + DTHE_P_AES_SYSCONFIG);
+
+ aes_irqenable_val |= DTHE_AES_IRQENABLE_EN_ALL;
+ writel_relaxed(aes_irqenable_val, aes_base_reg + DTHE_P_AES_IRQENABLE);
+
+ if (src == dst) {
+ diff_dst = false;
+ src_dir = DMA_BIDIRECTIONAL;
+ dst_dir = DMA_BIDIRECTIONAL;
+ } else {
+ diff_dst = true;
+ src_dir = DMA_TO_DEVICE;
+ dst_dir = DMA_FROM_DEVICE;
+ }
+
+ tx_dev = dmaengine_get_dma_device(dev_data->dma_aes_tx);
+ rx_dev = dmaengine_get_dma_device(dev_data->dma_aes_rx);
+
+ src_mapped_nents = dma_map_sg(tx_dev, src, src_nents, src_dir);
+ if (src_mapped_nents == 0) {
+ ret = -EINVAL;
+ goto aes_err;
+ }
+
+ if (!diff_dst) {
+ dst_nents = src_nents;
+ dst_mapped_nents = src_mapped_nents;
+ } else {
+ dst_nents = sg_nents_for_len(dst, len);
+ dst_mapped_nents = dma_map_sg(rx_dev, dst, dst_nents, dst_dir);
+ if (dst_mapped_nents == 0) {
+ dma_unmap_sg(tx_dev, src, src_nents, src_dir);
+ ret = -EINVAL;
+ goto aes_err;
+ }
+ }
+
+ desc_in = dmaengine_prep_slave_sg(dev_data->dma_aes_rx, dst, dst_mapped_nents,
+ DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc_in) {
+ dev_err(dev_data->dev, "IN prep_slave_sg() failed\n");
+ ret = -EINVAL;
+ goto aes_prep_err;
+ }
+
+ desc_out = dmaengine_prep_slave_sg(dev_data->dma_aes_tx, src, src_mapped_nents,
+ DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc_out) {
+ dev_err(dev_data->dev, "OUT prep_slave_sg() failed\n");
+ ret = -EINVAL;
+ goto aes_prep_err;
+ }
+
+ desc_in->callback = dthe_aes_dma_in_callback;
+ desc_in->callback_param = req;
+
+ init_completion(&rctx->aes_compl);
+
+ if (ctx->aes_mode == DTHE_AES_ECB)
+ dthe_aes_set_ctrl_key(ctx, rctx, NULL);
+ else
+ dthe_aes_set_ctrl_key(ctx, rctx, (u32 *)req->iv);
+
+ writel_relaxed(lower_32_bits(req->cryptlen), aes_base_reg + DTHE_P_AES_C_LENGTH_0);
+ writel_relaxed(upper_32_bits(req->cryptlen), aes_base_reg + DTHE_P_AES_C_LENGTH_1);
+
+ dmaengine_submit(desc_in);
+ dmaengine_submit(desc_out);
+
+ dma_async_issue_pending(dev_data->dma_aes_rx);
+ dma_async_issue_pending(dev_data->dma_aes_tx);
+
+ // Need to do a timeout to ensure finalise gets called if DMA callback fails for any reason
+ ret = wait_for_completion_timeout(&rctx->aes_compl, msecs_to_jiffies(DTHE_DMA_TIMEOUT_MS));
+ if (!ret) {
+ ret = -ETIMEDOUT;
+ dmaengine_terminate_sync(dev_data->dma_aes_rx);
+ dmaengine_terminate_sync(dev_data->dma_aes_tx);
+
+ for (int i = 0; i < AES_BLOCK_WORDS; ++i)
+ readl_relaxed(aes_base_reg + DTHE_P_AES_DATA_IN_OUT + (DTHE_REG_SIZE * i));
+ } else {
+ ret = 0;
+ }
+
+ // For modes other than ECB, read IV_OUT
+ if (ctx->aes_mode != DTHE_AES_ECB) {
+ u32 *iv_out = (u32 *)req->iv;
+
+ for (int i = 0; i < AES_IV_WORDS; ++i)
+ iv_out[i] = readl_relaxed(aes_base_reg +
+ DTHE_P_AES_IV_IN_0 +
+ (DTHE_REG_SIZE * i));
+ }
+
+aes_prep_err:
+ dma_unmap_sg(tx_dev, src, src_nents, src_dir);
+ if (dst_dir != DMA_BIDIRECTIONAL)
+ dma_unmap_sg(rx_dev, dst, dst_nents, dst_dir);
+
+aes_err:
+ local_bh_disable();
+ crypto_finalize_skcipher_request(dev_data->engine, req, ret);
+ local_bh_enable();
+ return ret;
+}
+
+static int dthe_aes_crypt(struct skcipher_request *req)
+{
+ struct dthe_tfm_ctx *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
+ struct dthe_data *dev_data = dthe_get_dev(ctx);
+ struct crypto_engine *engine;
+
+ /*
+ * If data is not a multiple of AES_BLOCK_SIZE, need to return -EINVAL
+ * If data length input is zero, no need to do any operation.
+ */
+ if (req->cryptlen % AES_BLOCK_SIZE)
+ return -EINVAL;
+
+ if (req->cryptlen == 0)
+ return 0;
+
+ engine = dev_data->engine;
+ return crypto_transfer_skcipher_request_to_engine(engine, req);
+}
+
+static int dthe_aes_encrypt(struct skcipher_request *req)
+{
+ struct dthe_aes_req_ctx *rctx = skcipher_request_ctx(req);
+
+ rctx->enc = 1;
+ return dthe_aes_crypt(req);
+}
+
+static int dthe_aes_decrypt(struct skcipher_request *req)
+{
+ struct dthe_aes_req_ctx *rctx = skcipher_request_ctx(req);
+
+ rctx->enc = 0;
+ return dthe_aes_crypt(req);
+}
+
+static struct skcipher_engine_alg cipher_algs[] = {
+ {
+ .base.init = dthe_cipher_init_tfm,
+ .base.setkey = dthe_aes_ecb_setkey,
+ .base.encrypt = dthe_aes_encrypt,
+ .base.decrypt = dthe_aes_decrypt,
+ .base.min_keysize = AES_MIN_KEY_SIZE,
+ .base.max_keysize = AES_MAX_KEY_SIZE,
+ .base.base = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ecb-aes-dthev2",
+ .cra_priority = 299,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_alignmask = AES_BLOCK_SIZE - 1,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct dthe_tfm_ctx),
+ .cra_reqsize = sizeof(struct dthe_aes_req_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .op.do_one_request = dthe_aes_run,
+ }, /* ECB AES */
+ {
+ .base.init = dthe_cipher_init_tfm,
+ .base.setkey = dthe_aes_cbc_setkey,
+ .base.encrypt = dthe_aes_encrypt,
+ .base.decrypt = dthe_aes_decrypt,
+ .base.min_keysize = AES_MIN_KEY_SIZE,
+ .base.max_keysize = AES_MAX_KEY_SIZE,
+ .base.ivsize = AES_IV_SIZE,
+ .base.base = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-dthev2",
+ .cra_priority = 299,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_alignmask = AES_BLOCK_SIZE - 1,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct dthe_tfm_ctx),
+ .cra_reqsize = sizeof(struct dthe_aes_req_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .op.do_one_request = dthe_aes_run,
+ } /* CBC AES */
+};
+
+int dthe_register_aes_algs(void)
+{
+ return crypto_engine_register_skciphers(cipher_algs, ARRAY_SIZE(cipher_algs));
+}
+
+void dthe_unregister_aes_algs(void)
+{
+ crypto_engine_unregister_skciphers(cipher_algs, ARRAY_SIZE(cipher_algs));
+}
diff --git a/drivers/crypto/ti/dthev2-common.c b/drivers/crypto/ti/dthev2-common.c
new file mode 100644
index 000000000000..c39d37933b9e
--- /dev/null
+++ b/drivers/crypto/ti/dthev2-common.c
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * K3 DTHE V2 crypto accelerator driver
+ *
+ * Copyright (C) Texas Instruments 2025 - https://www.ti.com
+ * Author: T Pratham <t-pratham@ti.com>
+ */
+
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/engine.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/skcipher.h>
+
+#include "dthev2-common.h"
+
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dmapool.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+
+#define DRIVER_NAME "dthev2"
+
+static struct dthe_list dthe_dev_list = {
+ .dev_list = LIST_HEAD_INIT(dthe_dev_list.dev_list),
+ .lock = __SPIN_LOCK_UNLOCKED(dthe_dev_list.lock),
+};
+
+struct dthe_data *dthe_get_dev(struct dthe_tfm_ctx *ctx)
+{
+ struct dthe_data *dev_data;
+
+ if (ctx->dev_data)
+ return ctx->dev_data;
+
+ spin_lock_bh(&dthe_dev_list.lock);
+ dev_data = list_first_entry(&dthe_dev_list.dev_list, struct dthe_data, list);
+ if (dev_data)
+ list_move_tail(&dev_data->list, &dthe_dev_list.dev_list);
+ spin_unlock_bh(&dthe_dev_list.lock);
+
+ return dev_data;
+}
+
+static int dthe_dma_init(struct dthe_data *dev_data)
+{
+ int ret;
+ struct dma_slave_config cfg;
+
+ dev_data->dma_aes_rx = NULL;
+ dev_data->dma_aes_tx = NULL;
+ dev_data->dma_sha_tx = NULL;
+
+ dev_data->dma_aes_rx = dma_request_chan(dev_data->dev, "rx");
+ if (IS_ERR(dev_data->dma_aes_rx)) {
+ return dev_err_probe(dev_data->dev, PTR_ERR(dev_data->dma_aes_rx),
+ "Unable to request rx DMA channel\n");
+ }
+
+ dev_data->dma_aes_tx = dma_request_chan(dev_data->dev, "tx1");
+ if (IS_ERR(dev_data->dma_aes_tx)) {
+ ret = dev_err_probe(dev_data->dev, PTR_ERR(dev_data->dma_aes_tx),
+ "Unable to request tx1 DMA channel\n");
+ goto err_dma_aes_tx;
+ }
+
+ dev_data->dma_sha_tx = dma_request_chan(dev_data->dev, "tx2");
+ if (IS_ERR(dev_data->dma_sha_tx)) {
+ ret = dev_err_probe(dev_data->dev, PTR_ERR(dev_data->dma_sha_tx),
+ "Unable to request tx2 DMA channel\n");
+ goto err_dma_sha_tx;
+ }
+
+ memzero_explicit(&cfg, sizeof(cfg));
+
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.src_maxburst = 4;
+
+ ret = dmaengine_slave_config(dev_data->dma_aes_rx, &cfg);
+ if (ret) {
+ dev_err(dev_data->dev, "Can't configure IN dmaengine slave: %d\n", ret);
+ goto err_dma_config;
+ }
+
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.dst_maxburst = 4;
+
+ ret = dmaengine_slave_config(dev_data->dma_aes_tx, &cfg);
+ if (ret) {
+ dev_err(dev_data->dev, "Can't configure OUT dmaengine slave: %d\n", ret);
+ goto err_dma_config;
+ }
+
+ return 0;
+
+err_dma_config:
+ dma_release_channel(dev_data->dma_sha_tx);
+err_dma_sha_tx:
+ dma_release_channel(dev_data->dma_aes_tx);
+err_dma_aes_tx:
+ dma_release_channel(dev_data->dma_aes_rx);
+
+ return ret;
+}
+
+static int dthe_register_algs(void)
+{
+ return dthe_register_aes_algs();
+}
+
+static void dthe_unregister_algs(void)
+{
+ dthe_unregister_aes_algs();
+}
+
+static int dthe_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dthe_data *dev_data;
+ int ret;
+
+ dev_data = devm_kzalloc(dev, sizeof(*dev_data), GFP_KERNEL);
+ if (!dev_data)
+ return -ENOMEM;
+
+ dev_data->dev = dev;
+ dev_data->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(dev_data->regs))
+ return PTR_ERR(dev_data->regs);
+
+ platform_set_drvdata(pdev, dev_data);
+
+ spin_lock(&dthe_dev_list.lock);
+ list_add(&dev_data->list, &dthe_dev_list.dev_list);
+ spin_unlock(&dthe_dev_list.lock);
+
+ ret = dthe_dma_init(dev_data);
+ if (ret)
+ goto probe_dma_err;
+
+ dev_data->engine = crypto_engine_alloc_init(dev, 1);
+ if (!dev_data->engine) {
+ ret = -ENOMEM;
+ goto probe_engine_err;
+ }
+
+ ret = crypto_engine_start(dev_data->engine);
+ if (ret) {
+ dev_err(dev, "Failed to start crypto engine\n");
+ goto probe_engine_start_err;
+ }
+
+ ret = dthe_register_algs();
+ if (ret) {
+ dev_err(dev, "Failed to register algs\n");
+ goto probe_engine_start_err;
+ }
+
+ return 0;
+
+probe_engine_start_err:
+ crypto_engine_exit(dev_data->engine);
+probe_engine_err:
+ dma_release_channel(dev_data->dma_aes_rx);
+ dma_release_channel(dev_data->dma_aes_tx);
+ dma_release_channel(dev_data->dma_sha_tx);
+probe_dma_err:
+ spin_lock(&dthe_dev_list.lock);
+ list_del(&dev_data->list);
+ spin_unlock(&dthe_dev_list.lock);
+
+ return ret;
+}
+
+static void dthe_remove(struct platform_device *pdev)
+{
+ struct dthe_data *dev_data = platform_get_drvdata(pdev);
+
+ spin_lock(&dthe_dev_list.lock);
+ list_del(&dev_data->list);
+ spin_unlock(&dthe_dev_list.lock);
+
+ dthe_unregister_algs();
+
+ crypto_engine_exit(dev_data->engine);
+
+ dma_release_channel(dev_data->dma_aes_rx);
+ dma_release_channel(dev_data->dma_aes_tx);
+ dma_release_channel(dev_data->dma_sha_tx);
+}
+
+static const struct of_device_id dthe_of_match[] = {
+ { .compatible = "ti,am62l-dthev2", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, dthe_of_match);
+
+static struct platform_driver dthe_driver = {
+ .probe = dthe_probe,
+ .remove = dthe_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = dthe_of_match,
+ },
+};
+
+module_platform_driver(dthe_driver);
+
+MODULE_AUTHOR("T Pratham <t-pratham@ti.com>");
+MODULE_DESCRIPTION("Texas Instruments DTHE V2 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/ti/dthev2-common.h b/drivers/crypto/ti/dthev2-common.h
new file mode 100644
index 000000000000..68c94acda8aa
--- /dev/null
+++ b/drivers/crypto/ti/dthev2-common.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * K3 DTHE V2 crypto accelerator driver
+ *
+ * Copyright (C) Texas Instruments 2025 - https://www.ti.com
+ * Author: T Pratham <t-pratham@ti.com>
+ */
+
+#ifndef __TI_DTHEV2_H__
+#define __TI_DTHEV2_H__
+
+#include <crypto/aead.h>
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/engine.h>
+#include <crypto/hash.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
+
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dmapool.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/scatterlist.h>
+
+#define DTHE_REG_SIZE 4
+#define DTHE_DMA_TIMEOUT_MS 2000
+
+enum dthe_aes_mode {
+ DTHE_AES_ECB = 0,
+ DTHE_AES_CBC,
+};
+
+/* Driver specific struct definitions */
+
+/**
+ * struct dthe_data - DTHE_V2 driver instance data
+ * @dev: Device pointer
+ * @regs: Base address of the register space
+ * @list: list node for dev
+ * @engine: Crypto engine instance
+ * @dma_aes_rx: AES Rx DMA Channel
+ * @dma_aes_tx: AES Tx DMA Channel
+ * @dma_sha_tx: SHA Tx DMA Channel
+ */
+struct dthe_data {
+ struct device *dev;
+ void __iomem *regs;
+ struct list_head list;
+ struct crypto_engine *engine;
+
+ struct dma_chan *dma_aes_rx;
+ struct dma_chan *dma_aes_tx;
+
+ struct dma_chan *dma_sha_tx;
+};
+
+/**
+ * struct dthe_list - device data list head
+ * @dev_list: linked list head
+ * @lock: Spinlock protecting accesses to the list
+ */
+struct dthe_list {
+ struct list_head dev_list;
+ spinlock_t lock;
+};
+
+/**
+ * struct dthe_tfm_ctx - Transform ctx struct containing ctx for all sub-components of DTHE V2
+ * @dev_data: Device data struct pointer
+ * @keylen: AES key length
+ * @key: AES key
+ * @aes_mode: AES mode
+ */
+struct dthe_tfm_ctx {
+ struct dthe_data *dev_data;
+ unsigned int keylen;
+ u32 key[AES_KEYSIZE_256 / sizeof(u32)];
+ enum dthe_aes_mode aes_mode;
+};
+
+/**
+ * struct dthe_aes_req_ctx - AES engine req ctx struct
+ * @enc: flag indicating encryption or decryption operation
+ * @aes_compl: Completion variable for use in manual completion in case of DMA callback failure
+ */
+struct dthe_aes_req_ctx {
+ int enc;
+ struct completion aes_compl;
+};
+
+/* Struct definitions end */
+
+struct dthe_data *dthe_get_dev(struct dthe_tfm_ctx *ctx);
+
+int dthe_register_aes_algs(void);
+void dthe_unregister_aes_algs(void);
+
+#endif
diff --git a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
index cb92b7fa99c6..2e44915c9f23 100644
--- a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
+++ b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
@@ -21,12 +21,11 @@
#include "virtio_crypto_common.h"
struct virtio_crypto_rsa_ctx {
- MPI n;
+ unsigned int key_size;
};
struct virtio_crypto_akcipher_ctx {
struct virtio_crypto *vcrypto;
- struct crypto_akcipher *tfm;
bool session_valid;
__u64 session_id;
union {
@@ -36,8 +35,6 @@ struct virtio_crypto_akcipher_ctx {
struct virtio_crypto_akcipher_request {
struct virtio_crypto_request base;
- struct virtio_crypto_akcipher_ctx *akcipher_ctx;
- struct akcipher_request *akcipher_req;
void *src_buf;
void *dst_buf;
uint32_t opcode;
@@ -69,7 +66,9 @@ static void virtio_crypto_dataq_akcipher_callback(struct virtio_crypto_request *
{
struct virtio_crypto_akcipher_request *vc_akcipher_req =
container_of(vc_req, struct virtio_crypto_akcipher_request, base);
- struct akcipher_request *akcipher_req;
+ struct akcipher_request *akcipher_req =
+ container_of((void *)vc_akcipher_req, struct akcipher_request,
+ __ctx);
int error;
switch (vc_req->status) {
@@ -83,23 +82,15 @@ static void virtio_crypto_dataq_akcipher_callback(struct virtio_crypto_request *
case VIRTIO_CRYPTO_BADMSG:
error = -EBADMSG;
break;
-
- case VIRTIO_CRYPTO_KEY_REJECTED:
- error = -EKEYREJECTED;
- break;
-
default:
error = -EIO;
break;
}
- akcipher_req = vc_akcipher_req->akcipher_req;
- if (vc_akcipher_req->opcode != VIRTIO_CRYPTO_AKCIPHER_VERIFY) {
- /* actuall length maybe less than dst buffer */
- akcipher_req->dst_len = len - sizeof(vc_req->status);
- sg_copy_from_buffer(akcipher_req->dst, sg_nents(akcipher_req->dst),
- vc_akcipher_req->dst_buf, akcipher_req->dst_len);
- }
+ /* actual length may be less than dst buffer */
+ akcipher_req->dst_len = len - sizeof(vc_req->status);
+ sg_copy_from_buffer(akcipher_req->dst, sg_nents(akcipher_req->dst),
+ vc_akcipher_req->dst_buf, akcipher_req->dst_len);
virtio_crypto_akcipher_finalize_req(vc_akcipher_req, akcipher_req, error);
}
@@ -220,7 +211,8 @@ out:
static int __virtio_crypto_akcipher_do_req(struct virtio_crypto_akcipher_request *vc_akcipher_req,
struct akcipher_request *req, struct data_queue *data_vq)
{
- struct virtio_crypto_akcipher_ctx *ctx = vc_akcipher_req->akcipher_ctx;
+ struct crypto_akcipher *atfm = crypto_akcipher_reqtfm(req);
+ struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(atfm);
struct virtio_crypto_request *vc_req = &vc_akcipher_req->base;
struct virtio_crypto *vcrypto = ctx->vcrypto;
struct virtio_crypto_op_data_req *req_data = vc_req->req_data;
@@ -230,36 +222,27 @@ static int __virtio_crypto_akcipher_do_req(struct virtio_crypto_akcipher_request
int node = dev_to_node(&vcrypto->vdev->dev);
unsigned long flags;
int ret;
- bool verify = vc_akcipher_req->opcode == VIRTIO_CRYPTO_AKCIPHER_VERIFY;
- unsigned int src_len = verify ? req->src_len + req->dst_len : req->src_len;
/* out header */
sg_init_one(&outhdr_sg, req_data, sizeof(*req_data));
sgs[num_out++] = &outhdr_sg;
/* src data */
- src_buf = kcalloc_node(src_len, 1, GFP_KERNEL, node);
+ src_buf = kcalloc_node(req->src_len, 1, GFP_KERNEL, node);
if (!src_buf)
return -ENOMEM;
- if (verify) {
- /* for verify operation, both src and dst data work as OUT direction */
- sg_copy_to_buffer(req->src, sg_nents(req->src), src_buf, src_len);
- sg_init_one(&srcdata_sg, src_buf, src_len);
- sgs[num_out++] = &srcdata_sg;
- } else {
- sg_copy_to_buffer(req->src, sg_nents(req->src), src_buf, src_len);
- sg_init_one(&srcdata_sg, src_buf, src_len);
- sgs[num_out++] = &srcdata_sg;
+ sg_copy_to_buffer(req->src, sg_nents(req->src), src_buf, req->src_len);
+ sg_init_one(&srcdata_sg, src_buf, req->src_len);
+ sgs[num_out++] = &srcdata_sg;
- /* dst data */
- dst_buf = kcalloc_node(req->dst_len, 1, GFP_KERNEL, node);
- if (!dst_buf)
- goto free_src;
+ /* dst data */
+ dst_buf = kcalloc_node(req->dst_len, 1, GFP_KERNEL, node);
+ if (!dst_buf)
+ goto free_src;
- sg_init_one(&dstdata_sg, dst_buf, req->dst_len);
- sgs[num_out + num_in++] = &dstdata_sg;
- }
+ sg_init_one(&dstdata_sg, dst_buf, req->dst_len);
+ sgs[num_out + num_in++] = &dstdata_sg;
vc_akcipher_req->src_buf = src_buf;
vc_akcipher_req->dst_buf = dst_buf;
@@ -289,7 +272,8 @@ static int virtio_crypto_rsa_do_req(struct crypto_engine *engine, void *vreq)
struct akcipher_request *req = container_of(vreq, struct akcipher_request, base);
struct virtio_crypto_akcipher_request *vc_akcipher_req = akcipher_request_ctx(req);
struct virtio_crypto_request *vc_req = &vc_akcipher_req->base;
- struct virtio_crypto_akcipher_ctx *ctx = vc_akcipher_req->akcipher_ctx;
+ struct crypto_akcipher *atfm = crypto_akcipher_reqtfm(req);
+ struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(atfm);
struct virtio_crypto *vcrypto = ctx->vcrypto;
struct data_queue *data_vq = vc_req->dataq;
struct virtio_crypto_op_header *header;
@@ -335,8 +319,6 @@ static int virtio_crypto_rsa_req(struct akcipher_request *req, uint32_t opcode)
vc_req->dataq = data_vq;
vc_req->alg_cb = virtio_crypto_dataq_akcipher_callback;
- vc_akcipher_req->akcipher_ctx = ctx;
- vc_akcipher_req->akcipher_req = req;
vc_akcipher_req->opcode = opcode;
return crypto_transfer_akcipher_request_to_engine(data_vq->engine, req);
@@ -352,16 +334,6 @@ static int virtio_crypto_rsa_decrypt(struct akcipher_request *req)
return virtio_crypto_rsa_req(req, VIRTIO_CRYPTO_AKCIPHER_DECRYPT);
}
-static int virtio_crypto_rsa_sign(struct akcipher_request *req)
-{
- return virtio_crypto_rsa_req(req, VIRTIO_CRYPTO_AKCIPHER_SIGN);
-}
-
-static int virtio_crypto_rsa_verify(struct akcipher_request *req)
-{
- return virtio_crypto_rsa_req(req, VIRTIO_CRYPTO_AKCIPHER_VERIFY);
-}
-
static int virtio_crypto_rsa_set_key(struct crypto_akcipher *tfm,
const void *key,
unsigned int keylen,
@@ -378,10 +350,7 @@ static int virtio_crypto_rsa_set_key(struct crypto_akcipher *tfm,
int node = virtio_crypto_get_current_node();
uint32_t keytype;
int ret;
-
- /* mpi_free will test n, just free it. */
- mpi_free(rsa_ctx->n);
- rsa_ctx->n = NULL;
+ MPI n;
if (private) {
keytype = VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PRIVATE;
@@ -394,10 +363,13 @@ static int virtio_crypto_rsa_set_key(struct crypto_akcipher *tfm,
if (ret)
return ret;
- rsa_ctx->n = mpi_read_raw_data(rsa_key.n, rsa_key.n_sz);
- if (!rsa_ctx->n)
+ n = mpi_read_raw_data(rsa_key.n, rsa_key.n_sz);
+ if (!n)
return -ENOMEM;
+ rsa_ctx->key_size = mpi_get_size(n);
+ mpi_free(n);
+
if (!ctx->vcrypto) {
vcrypto = virtcrypto_get_dev_node(node, VIRTIO_CRYPTO_SERVICE_AKCIPHER,
VIRTIO_CRYPTO_AKCIPHER_RSA);
@@ -468,15 +440,11 @@ static unsigned int virtio_crypto_rsa_max_size(struct crypto_akcipher *tfm)
struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(tfm);
struct virtio_crypto_rsa_ctx *rsa_ctx = &ctx->rsa_ctx;
- return mpi_get_size(rsa_ctx->n);
+ return rsa_ctx->key_size;
}
static int virtio_crypto_rsa_init_tfm(struct crypto_akcipher *tfm)
{
- struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(tfm);
-
- ctx->tfm = tfm;
-
akcipher_set_reqsize(tfm,
sizeof(struct virtio_crypto_akcipher_request));
@@ -486,12 +454,9 @@ static int virtio_crypto_rsa_init_tfm(struct crypto_akcipher *tfm)
static void virtio_crypto_rsa_exit_tfm(struct crypto_akcipher *tfm)
{
struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(tfm);
- struct virtio_crypto_rsa_ctx *rsa_ctx = &ctx->rsa_ctx;
virtio_crypto_alg_akcipher_close_session(ctx);
virtcrypto_dev_put(ctx->vcrypto);
- mpi_free(rsa_ctx->n);
- rsa_ctx->n = NULL;
}
static struct virtio_crypto_akcipher_algo virtio_crypto_akcipher_algs[] = {
@@ -524,16 +489,19 @@ static struct virtio_crypto_akcipher_algo virtio_crypto_akcipher_algs[] = {
.algo.base = {
.encrypt = virtio_crypto_rsa_encrypt,
.decrypt = virtio_crypto_rsa_decrypt,
- .sign = virtio_crypto_rsa_sign,
- .verify = virtio_crypto_rsa_verify,
+ /*
+ * Must specify an arbitrary hash algorithm upon
+ * set_{pub,priv}_key (even though it's not used
+ * by encrypt/decrypt) because qemu checks for it.
+ */
.set_pub_key = virtio_crypto_p1pad_rsa_sha1_set_pub_key,
.set_priv_key = virtio_crypto_p1pad_rsa_sha1_set_priv_key,
.max_size = virtio_crypto_rsa_max_size,
.init = virtio_crypto_rsa_init_tfm,
.exit = virtio_crypto_rsa_exit_tfm,
.base = {
- .cra_name = "pkcs1pad(rsa,sha1)",
- .cra_driver_name = "virtio-pkcs1-rsa-with-sha1",
+ .cra_name = "pkcs1pad(rsa)",
+ .cra_driver_name = "virtio-pkcs1-rsa",
.cra_priority = 150,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct virtio_crypto_akcipher_ctx),
diff --git a/drivers/crypto/virtio/virtio_crypto_common.h b/drivers/crypto/virtio/virtio_crypto_common.h
index 7059bbe5a2eb..19c934af3df6 100644
--- a/drivers/crypto/virtio/virtio_crypto_common.h
+++ b/drivers/crypto/virtio/virtio_crypto_common.h
@@ -113,8 +113,6 @@ struct virtio_crypto_request {
int virtcrypto_devmgr_add_dev(struct virtio_crypto *vcrypto_dev);
struct list_head *virtcrypto_devmgr_get_head(void);
void virtcrypto_devmgr_rm_dev(struct virtio_crypto *vcrypto_dev);
-struct virtio_crypto *virtcrypto_devmgr_get_first(void);
-int virtcrypto_dev_in_use(struct virtio_crypto *vcrypto_dev);
int virtcrypto_dev_get(struct virtio_crypto *vcrypto_dev);
void virtcrypto_dev_put(struct virtio_crypto *vcrypto_dev);
int virtcrypto_dev_started(struct virtio_crypto *vcrypto_dev);
diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c
index d0278eb568b9..3d241446099c 100644
--- a/drivers/crypto/virtio/virtio_crypto_core.c
+++ b/drivers/crypto/virtio/virtio_crypto_core.c
@@ -139,7 +139,7 @@ static int virtcrypto_find_vqs(struct virtio_crypto *vi)
spin_lock_init(&vi->data_vq[i].lock);
vi->data_vq[i].vq = vqs[i];
/* Initialize crypto engine */
- vi->data_vq[i].engine = crypto_engine_alloc_init_and_set(dev, true, NULL, true,
+ vi->data_vq[i].engine = crypto_engine_alloc_init_and_set(dev, true, true,
virtqueue_get_vring_size(vqs[i]));
if (!vi->data_vq[i].engine) {
ret = -ENOMEM;
@@ -480,10 +480,8 @@ static void virtcrypto_free_unused_reqs(struct virtio_crypto *vcrypto)
for (i = 0; i < vcrypto->max_data_queues; i++) {
vq = vcrypto->data_vq[i].vq;
- while ((vc_req = virtqueue_detach_unused_buf(vq)) != NULL) {
- kfree(vc_req->req_data);
- kfree(vc_req->sgs);
- }
+ while ((vc_req = virtqueue_detach_unused_buf(vq)) != NULL)
+ virtcrypto_clear_request(vc_req);
cond_resched();
}
}
diff --git a/drivers/crypto/virtio/virtio_crypto_mgr.c b/drivers/crypto/virtio/virtio_crypto_mgr.c
index 70e778aac0f2..06c74fa132cd 100644
--- a/drivers/crypto/virtio/virtio_crypto_mgr.c
+++ b/drivers/crypto/virtio/virtio_crypto_mgr.c
@@ -82,42 +82,6 @@ void virtcrypto_devmgr_rm_dev(struct virtio_crypto *vcrypto_dev)
}
/*
- * virtcrypto_devmgr_get_first()
- *
- * Function returns the first virtio crypto device from the acceleration
- * framework.
- *
- * To be used by virtio crypto device specific drivers.
- *
- * Return: pointer to vcrypto_dev or NULL if not found.
- */
-struct virtio_crypto *virtcrypto_devmgr_get_first(void)
-{
- struct virtio_crypto *dev = NULL;
-
- mutex_lock(&table_lock);
- if (!list_empty(&virtio_crypto_table))
- dev = list_first_entry(&virtio_crypto_table,
- struct virtio_crypto,
- list);
- mutex_unlock(&table_lock);
- return dev;
-}
-
-/*
- * virtcrypto_dev_in_use() - Check whether vcrypto_dev is currently in use
- * @vcrypto_dev: Pointer to virtio crypto device.
- *
- * To be used by virtio crypto device specific drivers.
- *
- * Return: 1 when device is in use, 0 otherwise.
- */
-int virtcrypto_dev_in_use(struct virtio_crypto *vcrypto_dev)
-{
- return atomic_read(&vcrypto_dev->ref_count) != 0;
-}
-
-/*
* virtcrypto_dev_get() - Increment vcrypto_dev reference count
* @vcrypto_dev: Pointer to virtio crypto device.
*
@@ -256,7 +220,7 @@ int virtcrypto_dev_start(struct virtio_crypto *vcrypto)
* @vcrypto: Pointer to virtio crypto device.
*
* Function notifies all the registered services that the virtio crypto device
- * is ready to be used.
+ * shall no longer be used.
* To be used by virtio crypto device specific drivers.
*
* Return: void
diff --git a/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c b/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c
index 23c41d87d835..1b3fb21a2a7d 100644
--- a/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c
+++ b/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c
@@ -17,7 +17,6 @@
struct virtio_crypto_skcipher_ctx {
struct virtio_crypto *vcrypto;
- struct crypto_skcipher *tfm;
struct virtio_crypto_sym_session_info enc_sess_info;
struct virtio_crypto_sym_session_info dec_sess_info;
@@ -28,8 +27,6 @@ struct virtio_crypto_sym_request {
/* Cipher or aead */
uint32_t type;
- struct virtio_crypto_skcipher_ctx *skcipher_ctx;
- struct skcipher_request *skcipher_req;
uint8_t *iv;
/* Encryption? */
bool encrypt;
@@ -57,7 +54,9 @@ static void virtio_crypto_dataq_sym_callback
{
struct virtio_crypto_sym_request *vc_sym_req =
container_of(vc_req, struct virtio_crypto_sym_request, base);
- struct skcipher_request *ablk_req;
+ struct skcipher_request *ablk_req =
+ container_of((void *)vc_sym_req, struct skcipher_request,
+ __ctx);
int error;
/* Finish the encrypt or decrypt process */
@@ -77,7 +76,6 @@ static void virtio_crypto_dataq_sym_callback
error = -EIO;
break;
}
- ablk_req = vc_sym_req->skcipher_req;
virtio_crypto_skcipher_finalize_req(vc_sym_req,
ablk_req, error);
}
@@ -325,7 +323,7 @@ __virtio_crypto_skcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
struct data_queue *data_vq)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct virtio_crypto_skcipher_ctx *ctx = vc_sym_req->skcipher_ctx;
+ struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
struct virtio_crypto_request *vc_req = &vc_sym_req->base;
unsigned int ivsize = crypto_skcipher_ivsize(tfm);
struct virtio_crypto *vcrypto = ctx->vcrypto;
@@ -481,8 +479,6 @@ static int virtio_crypto_skcipher_encrypt(struct skcipher_request *req)
vc_req->dataq = data_vq;
vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
- vc_sym_req->skcipher_ctx = ctx;
- vc_sym_req->skcipher_req = req;
vc_sym_req->encrypt = true;
return crypto_transfer_skcipher_request_to_engine(data_vq->engine, req);
@@ -506,8 +502,6 @@ static int virtio_crypto_skcipher_decrypt(struct skcipher_request *req)
vc_req->dataq = data_vq;
vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
- vc_sym_req->skcipher_ctx = ctx;
- vc_sym_req->skcipher_req = req;
vc_sym_req->encrypt = false;
return crypto_transfer_skcipher_request_to_engine(data_vq->engine, req);
@@ -515,10 +509,7 @@ static int virtio_crypto_skcipher_decrypt(struct skcipher_request *req)
static int virtio_crypto_skcipher_init(struct crypto_skcipher *tfm)
{
- struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
-
crypto_skcipher_set_reqsize(tfm, sizeof(struct virtio_crypto_sym_request));
- ctx->tfm = tfm;
return 0;
}
diff --git a/drivers/crypto/xilinx/Makefile b/drivers/crypto/xilinx/Makefile
index 730feff5b5f2..9b51636ef75e 100644
--- a/drivers/crypto/xilinx/Makefile
+++ b/drivers/crypto/xilinx/Makefile
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_CRYPTO_DEV_XILINX_TRNG) += xilinx-trng.o
obj-$(CONFIG_CRYPTO_DEV_ZYNQMP_AES) += zynqmp-aes-gcm.o
obj-$(CONFIG_CRYPTO_DEV_ZYNQMP_SHA3) += zynqmp-sha.o
diff --git a/drivers/crypto/xilinx/xilinx-trng.c b/drivers/crypto/xilinx/xilinx-trng.c
new file mode 100644
index 000000000000..4e4700d68127
--- /dev/null
+++ b/drivers/crypto/xilinx/xilinx-trng.c
@@ -0,0 +1,405 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AMD Versal True Random Number Generator driver
+ * Copyright (c) 2024 - 2025 Advanced Micro Devices, Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/crypto.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
+#include <crypto/internal/cipher.h>
+#include <crypto/internal/rng.h>
+#include <crypto/aes.h>
+
+/* TRNG Registers Offsets */
+#define TRNG_STATUS_OFFSET 0x4U
+#define TRNG_CTRL_OFFSET 0x8U
+#define TRNG_EXT_SEED_OFFSET 0x40U
+#define TRNG_PER_STRNG_OFFSET 0x80U
+#define TRNG_CORE_OUTPUT_OFFSET 0xC0U
+#define TRNG_RESET_OFFSET 0xD0U
+#define TRNG_OSC_EN_OFFSET 0xD4U
+
+/* Mask values */
+#define TRNG_RESET_VAL_MASK BIT(0)
+#define TRNG_OSC_EN_VAL_MASK BIT(0)
+#define TRNG_CTRL_PRNGSRST_MASK BIT(0)
+#define TRNG_CTRL_EUMODE_MASK BIT(8)
+#define TRNG_CTRL_TRSSEN_MASK BIT(2)
+#define TRNG_CTRL_PRNGSTART_MASK BIT(5)
+#define TRNG_CTRL_PRNGXS_MASK BIT(3)
+#define TRNG_CTRL_PRNGMODE_MASK BIT(7)
+#define TRNG_STATUS_DONE_MASK BIT(0)
+#define TRNG_STATUS_QCNT_MASK GENMASK(11, 9)
+#define TRNG_STATUS_QCNT_16_BYTES 0x800
+
+/* Sizes in bytes */
+#define TRNG_SEED_LEN_BYTES 48U
+#define TRNG_ENTROPY_SEED_LEN_BYTES 64U
+#define TRNG_SEC_STRENGTH_SHIFT 5U
+#define TRNG_SEC_STRENGTH_BYTES BIT(TRNG_SEC_STRENGTH_SHIFT)
+#define TRNG_BYTES_PER_REG 4U
+#define TRNG_RESET_DELAY 10
+#define TRNG_NUM_INIT_REGS 12U
+#define TRNG_READ_4_WORD 4
+#define TRNG_DATA_READ_DELAY 8000
+
+struct xilinx_rng {
+ void __iomem *rng_base;
+ struct device *dev;
+ struct mutex lock; /* Protect access to TRNG device */
+ struct hwrng trng;
+};
+
+struct xilinx_rng_ctx {
+ struct xilinx_rng *rng;
+};
+
+static struct xilinx_rng *xilinx_rng_dev;
+
+static void xtrng_readwrite32(void __iomem *addr, u32 mask, u8 value)
+{
+ u32 val;
+
+ val = ioread32(addr);
+ val = (val & (~mask)) | (mask & value);
+ iowrite32(val, addr);
+}
+
+static void xtrng_trng_reset(void __iomem *addr)
+{
+ xtrng_readwrite32(addr + TRNG_RESET_OFFSET, TRNG_RESET_VAL_MASK, TRNG_RESET_VAL_MASK);
+ udelay(TRNG_RESET_DELAY);
+ xtrng_readwrite32(addr + TRNG_RESET_OFFSET, TRNG_RESET_VAL_MASK, 0);
+}
+
+static void xtrng_hold_reset(void __iomem *addr)
+{
+ xtrng_readwrite32(addr + TRNG_CTRL_OFFSET, TRNG_CTRL_PRNGSRST_MASK,
+ TRNG_CTRL_PRNGSRST_MASK);
+ iowrite32(TRNG_RESET_VAL_MASK, addr + TRNG_RESET_OFFSET);
+ udelay(TRNG_RESET_DELAY);
+}
+
+static void xtrng_softreset(struct xilinx_rng *rng)
+{
+ xtrng_readwrite32(rng->rng_base + TRNG_CTRL_OFFSET, TRNG_CTRL_PRNGSRST_MASK,
+ TRNG_CTRL_PRNGSRST_MASK);
+ udelay(TRNG_RESET_DELAY);
+ xtrng_readwrite32(rng->rng_base + TRNG_CTRL_OFFSET, TRNG_CTRL_PRNGSRST_MASK, 0);
+}
+
+/* Return no. of bytes read */
+static size_t xtrng_readblock32(void __iomem *rng_base, __be32 *buf, int blocks32, bool wait)
+{
+ int read = 0, ret;
+ int timeout = 1;
+ int i, idx;
+ u32 val;
+
+ if (wait)
+ timeout = TRNG_DATA_READ_DELAY;
+
+ for (i = 0; i < (blocks32 * 2); i++) {
+ /* TRNG core generate data in 16 bytes. Read twice to complete 32 bytes read */
+ ret = readl_poll_timeout(rng_base + TRNG_STATUS_OFFSET, val,
+ (val & TRNG_STATUS_QCNT_MASK) ==
+ TRNG_STATUS_QCNT_16_BYTES, !!wait, timeout);
+ if (ret)
+ break;
+
+ for (idx = 0; idx < TRNG_READ_4_WORD; idx++) {
+ *(buf + read) = cpu_to_be32(ioread32(rng_base + TRNG_CORE_OUTPUT_OFFSET));
+ read += 1;
+ }
+ }
+ return read * 4;
+}
+
+static int xtrng_collect_random_data(struct xilinx_rng *rng, u8 *rand_gen_buf,
+ int no_of_random_bytes, bool wait)
+{
+ u8 randbuf[TRNG_SEC_STRENGTH_BYTES];
+ int byteleft, blocks, count = 0;
+ int ret;
+
+ byteleft = no_of_random_bytes & (TRNG_SEC_STRENGTH_BYTES - 1);
+ blocks = no_of_random_bytes >> TRNG_SEC_STRENGTH_SHIFT;
+ xtrng_readwrite32(rng->rng_base + TRNG_CTRL_OFFSET, TRNG_CTRL_PRNGSTART_MASK,
+ TRNG_CTRL_PRNGSTART_MASK);
+ if (blocks) {
+ ret = xtrng_readblock32(rng->rng_base, (__be32 *)rand_gen_buf, blocks, wait);
+ if (!ret)
+ return 0;
+ count += ret;
+ }
+
+ if (byteleft) {
+ ret = xtrng_readblock32(rng->rng_base, (__be32 *)randbuf, 1, wait);
+ if (!ret)
+ return count;
+ memcpy(rand_gen_buf + (blocks * TRNG_SEC_STRENGTH_BYTES), randbuf, byteleft);
+ count += byteleft;
+ }
+
+ xtrng_readwrite32(rng->rng_base + TRNG_CTRL_OFFSET,
+ TRNG_CTRL_PRNGMODE_MASK | TRNG_CTRL_PRNGSTART_MASK, 0U);
+
+ return count;
+}
+
+static void xtrng_write_multiple_registers(void __iomem *base_addr, u32 *values, size_t n)
+{
+ void __iomem *reg_addr;
+ size_t i;
+
+ /* Write seed value into EXTERNAL_SEED Registers in big endian format */
+ for (i = 0; i < n; i++) {
+ reg_addr = (base_addr + ((n - 1 - i) * TRNG_BYTES_PER_REG));
+ iowrite32((u32 __force)(cpu_to_be32(values[i])), reg_addr);
+ }
+}
+
+static void xtrng_enable_entropy(struct xilinx_rng *rng)
+{
+ iowrite32(TRNG_OSC_EN_VAL_MASK, rng->rng_base + TRNG_OSC_EN_OFFSET);
+ xtrng_softreset(rng);
+ iowrite32(TRNG_CTRL_EUMODE_MASK | TRNG_CTRL_TRSSEN_MASK, rng->rng_base + TRNG_CTRL_OFFSET);
+}
+
+static int xtrng_reseed_internal(struct xilinx_rng *rng)
+{
+ u8 entropy[TRNG_ENTROPY_SEED_LEN_BYTES];
+ u32 val;
+ int ret;
+
+ memset(entropy, 0, sizeof(entropy));
+ xtrng_enable_entropy(rng);
+
+ /* collect random data to use it as entropy (input for DF) */
+ ret = xtrng_collect_random_data(rng, entropy, TRNG_SEED_LEN_BYTES, true);
+ if (ret != TRNG_SEED_LEN_BYTES)
+ return -EINVAL;
+
+ xtrng_write_multiple_registers(rng->rng_base + TRNG_EXT_SEED_OFFSET,
+ (u32 *)entropy, TRNG_NUM_INIT_REGS);
+ /* select reseed operation */
+ iowrite32(TRNG_CTRL_PRNGXS_MASK, rng->rng_base + TRNG_CTRL_OFFSET);
+
+ /* Start the reseed operation with above configuration and wait for STATUS.Done bit to be
+ * set. Monitor STATUS.CERTF bit, if set indicates SP800-90B entropy health test has failed.
+ */
+ xtrng_readwrite32(rng->rng_base + TRNG_CTRL_OFFSET, TRNG_CTRL_PRNGSTART_MASK,
+ TRNG_CTRL_PRNGSTART_MASK);
+
+ ret = readl_poll_timeout(rng->rng_base + TRNG_STATUS_OFFSET, val,
+ (val & TRNG_STATUS_DONE_MASK) == TRNG_STATUS_DONE_MASK,
+ 1U, 15000U);
+ if (ret)
+ return ret;
+
+ xtrng_readwrite32(rng->rng_base + TRNG_CTRL_OFFSET, TRNG_CTRL_PRNGSTART_MASK, 0U);
+
+ return 0;
+}
+
+static int xtrng_random_bytes_generate(struct xilinx_rng *rng, u8 *rand_buf_ptr,
+ u32 rand_buf_size, int wait)
+{
+ int nbytes;
+ int ret;
+
+ xtrng_readwrite32(rng->rng_base + TRNG_CTRL_OFFSET,
+ TRNG_CTRL_PRNGMODE_MASK | TRNG_CTRL_PRNGXS_MASK,
+ TRNG_CTRL_PRNGMODE_MASK | TRNG_CTRL_PRNGXS_MASK);
+ nbytes = xtrng_collect_random_data(rng, rand_buf_ptr, rand_buf_size, wait);
+
+ ret = xtrng_reseed_internal(rng);
+ if (ret) {
+ dev_err(rng->dev, "Re-seed fail\n");
+ return ret;
+ }
+
+ return nbytes;
+}
+
+static int xtrng_trng_generate(struct crypto_rng *tfm, const u8 *src, u32 slen,
+ u8 *dst, u32 dlen)
+{
+ struct xilinx_rng_ctx *ctx = crypto_rng_ctx(tfm);
+ int ret;
+
+ mutex_lock(&ctx->rng->lock);
+ ret = xtrng_random_bytes_generate(ctx->rng, dst, dlen, true);
+ mutex_unlock(&ctx->rng->lock);
+
+ return ret < 0 ? ret : 0;
+}
+
+static int xtrng_trng_seed(struct crypto_rng *tfm, const u8 *seed, unsigned int slen)
+{
+ return 0;
+}
+
+static int xtrng_trng_init(struct crypto_tfm *rtfm)
+{
+ struct xilinx_rng_ctx *ctx = crypto_tfm_ctx(rtfm);
+
+ ctx->rng = xilinx_rng_dev;
+
+ return 0;
+}
+
+static struct rng_alg xtrng_trng_alg = {
+ .generate = xtrng_trng_generate,
+ .seed = xtrng_trng_seed,
+ .seedsize = 0,
+ .base = {
+ .cra_name = "stdrng",
+ .cra_driver_name = "xilinx-trng",
+ .cra_priority = 300,
+ .cra_ctxsize = sizeof(struct xilinx_rng_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = xtrng_trng_init,
+ },
+};
+
+static int xtrng_hwrng_trng_read(struct hwrng *hwrng, void *data, size_t max, bool wait)
+{
+ u8 buf[TRNG_SEC_STRENGTH_BYTES];
+ struct xilinx_rng *rng;
+ int ret = -EINVAL, i = 0;
+
+ rng = container_of(hwrng, struct xilinx_rng, trng);
+ /* Return in case wait not set and lock not available. */
+ if (!mutex_trylock(&rng->lock) && !wait)
+ return 0;
+ else if (!mutex_is_locked(&rng->lock) && wait)
+ mutex_lock(&rng->lock);
+
+ while (i < max) {
+ ret = xtrng_random_bytes_generate(rng, buf, TRNG_SEC_STRENGTH_BYTES, wait);
+ if (ret < 0)
+ break;
+
+ memcpy(data + i, buf, min_t(int, ret, (max - i)));
+ i += min_t(int, ret, (max - i));
+ }
+ mutex_unlock(&rng->lock);
+
+ return ret;
+}
+
+static int xtrng_hwrng_register(struct hwrng *trng)
+{
+ int ret;
+
+ trng->name = "Xilinx Versal Crypto Engine TRNG";
+ trng->read = xtrng_hwrng_trng_read;
+
+ ret = hwrng_register(trng);
+ if (ret)
+ pr_err("Fail to register the TRNG\n");
+
+ return ret;
+}
+
+static void xtrng_hwrng_unregister(struct hwrng *trng)
+{
+ hwrng_unregister(trng);
+}
+
+static int xtrng_probe(struct platform_device *pdev)
+{
+ struct xilinx_rng *rng;
+ int ret;
+
+ rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL);
+ if (!rng)
+ return -ENOMEM;
+
+ rng->dev = &pdev->dev;
+ rng->rng_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(rng->rng_base)) {
+ dev_err(&pdev->dev, "Failed to map resource %ld\n", PTR_ERR(rng->rng_base));
+ return PTR_ERR(rng->rng_base);
+ }
+
+ xtrng_trng_reset(rng->rng_base);
+ ret = xtrng_reseed_internal(rng);
+ if (ret) {
+ dev_err(&pdev->dev, "TRNG Seed fail\n");
+ return ret;
+ }
+
+ xilinx_rng_dev = rng;
+ mutex_init(&rng->lock);
+ ret = crypto_register_rng(&xtrng_trng_alg);
+ if (ret) {
+ dev_err(&pdev->dev, "Crypto Random device registration failed: %d\n", ret);
+ return ret;
+ }
+ ret = xtrng_hwrng_register(&rng->trng);
+ if (ret) {
+ dev_err(&pdev->dev, "HWRNG device registration failed: %d\n", ret);
+ goto crypto_rng_free;
+ }
+ platform_set_drvdata(pdev, rng);
+
+ return 0;
+
+crypto_rng_free:
+ crypto_unregister_rng(&xtrng_trng_alg);
+
+ return ret;
+}
+
+static void xtrng_remove(struct platform_device *pdev)
+{
+ struct xilinx_rng *rng;
+ u32 zero[TRNG_NUM_INIT_REGS] = { };
+
+ rng = platform_get_drvdata(pdev);
+ xtrng_hwrng_unregister(&rng->trng);
+ crypto_unregister_rng(&xtrng_trng_alg);
+ xtrng_write_multiple_registers(rng->rng_base + TRNG_EXT_SEED_OFFSET, zero,
+ TRNG_NUM_INIT_REGS);
+ xtrng_write_multiple_registers(rng->rng_base + TRNG_PER_STRNG_OFFSET, zero,
+ TRNG_NUM_INIT_REGS);
+ xtrng_hold_reset(rng->rng_base);
+ xilinx_rng_dev = NULL;
+}
+
+static const struct of_device_id xtrng_of_match[] = {
+ { .compatible = "xlnx,versal-trng", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, xtrng_of_match);
+
+static struct platform_driver xtrng_driver = {
+ .driver = {
+ .name = "xlnx,versal-trng",
+ .of_match_table = xtrng_of_match,
+ },
+ .probe = xtrng_probe,
+ .remove = xtrng_remove,
+};
+
+module_platform_driver(xtrng_driver);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Harsh Jain <h.jain@amd.com>");
+MODULE_AUTHOR("Mounika Botcha <mounika.botcha@amd.com>");
+MODULE_DESCRIPTION("True Random Number Generator Driver");
diff --git a/drivers/crypto/xilinx/zynqmp-aes-gcm.c b/drivers/crypto/xilinx/zynqmp-aes-gcm.c
index 7f0ec6887a39..6e72d9229410 100644
--- a/drivers/crypto/xilinx/zynqmp-aes-gcm.c
+++ b/drivers/crypto/xilinx/zynqmp-aes-gcm.c
@@ -438,7 +438,7 @@ MODULE_DEVICE_TABLE(of, zynqmp_aes_dt_ids);
static struct platform_driver zynqmp_aes_driver = {
.probe = zynqmp_aes_aead_probe,
- .remove_new = zynqmp_aes_aead_remove,
+ .remove = zynqmp_aes_aead_remove,
.driver = {
.name = "zynqmp-aes",
.of_match_table = zynqmp_aes_dt_ids,
diff --git a/drivers/crypto/xilinx/zynqmp-sha.c b/drivers/crypto/xilinx/zynqmp-sha.c
index 1bcec6f46c9c..5813017b6b79 100644
--- a/drivers/crypto/xilinx/zynqmp-sha.c
+++ b/drivers/crypto/xilinx/zynqmp-sha.c
@@ -3,18 +3,18 @@
* Xilinx ZynqMP SHA Driver.
* Copyright (c) 2022 Xilinx Inc.
*/
-#include <linux/cacheflush.h>
-#include <crypto/hash.h>
#include <crypto/internal/hash.h>
#include <crypto/sha3.h>
-#include <linux/crypto.h>
+#include <linux/cacheflush.h>
+#include <linux/cleanup.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
+#include <linux/err.h>
#include <linux/firmware/xlnx-zynqmp.h>
-#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/spinlock.h>
#include <linux/platform_device.h>
#define ZYNQMP_DMA_BIT_MASK 32U
@@ -36,13 +36,11 @@ struct zynqmp_sha_tfm_ctx {
struct crypto_shash *fbk_tfm;
};
-struct zynqmp_sha_desc_ctx {
- struct shash_desc fbk_req;
-};
-
static dma_addr_t update_dma_addr, final_dma_addr;
static char *ubuf, *fbuf;
+static DEFINE_SPINLOCK(zynqmp_sha_lock);
+
static int zynqmp_sha_init_tfm(struct crypto_shash *hash)
{
const char *fallback_driver_name = crypto_shash_alg_name(hash);
@@ -60,8 +58,13 @@ static int zynqmp_sha_init_tfm(struct crypto_shash *hash)
if (IS_ERR(fallback_tfm))
return PTR_ERR(fallback_tfm);
+ if (crypto_shash_descsize(hash) <
+ crypto_shash_statesize(tfm_ctx->fbk_tfm)) {
+ crypto_free_shash(fallback_tfm);
+ return -EINVAL;
+ }
+
tfm_ctx->fbk_tfm = fallback_tfm;
- hash->descsize += crypto_shash_descsize(tfm_ctx->fbk_tfm);
return 0;
}
@@ -70,61 +73,55 @@ static void zynqmp_sha_exit_tfm(struct crypto_shash *hash)
{
struct zynqmp_sha_tfm_ctx *tfm_ctx = crypto_shash_ctx(hash);
- if (tfm_ctx->fbk_tfm) {
- crypto_free_shash(tfm_ctx->fbk_tfm);
- tfm_ctx->fbk_tfm = NULL;
- }
+ crypto_free_shash(tfm_ctx->fbk_tfm);
+}
- memzero_explicit(tfm_ctx, sizeof(struct zynqmp_sha_tfm_ctx));
+static int zynqmp_sha_continue(struct shash_desc *desc,
+ struct shash_desc *fbdesc, int err)
+{
+ err = err ?: crypto_shash_export(fbdesc, shash_desc_ctx(desc));
+ shash_desc_zero(fbdesc);
+ return err;
}
static int zynqmp_sha_init(struct shash_desc *desc)
{
- struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc);
struct zynqmp_sha_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
+ struct crypto_shash *fbtfm = tctx->fbk_tfm;
+ SHASH_DESC_ON_STACK(fbdesc, fbtfm);
+ int err;
- dctx->fbk_req.tfm = tctx->fbk_tfm;
- return crypto_shash_init(&dctx->fbk_req);
+ fbdesc->tfm = fbtfm;
+ err = crypto_shash_init(fbdesc);
+ return zynqmp_sha_continue(desc, fbdesc, err);
}
static int zynqmp_sha_update(struct shash_desc *desc, const u8 *data, unsigned int length)
{
- struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc);
-
- return crypto_shash_update(&dctx->fbk_req, data, length);
-}
-
-static int zynqmp_sha_final(struct shash_desc *desc, u8 *out)
-{
- struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc);
+ struct zynqmp_sha_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
+ struct crypto_shash *fbtfm = tctx->fbk_tfm;
+ SHASH_DESC_ON_STACK(fbdesc, fbtfm);
+ int err;
- return crypto_shash_final(&dctx->fbk_req, out);
+ fbdesc->tfm = fbtfm;
+ err = crypto_shash_import(fbdesc, shash_desc_ctx(desc)) ?:
+ crypto_shash_update(fbdesc, data, length);
+ return zynqmp_sha_continue(desc, fbdesc, err);
}
static int zynqmp_sha_finup(struct shash_desc *desc, const u8 *data, unsigned int length, u8 *out)
{
- struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc);
-
- return crypto_shash_finup(&dctx->fbk_req, data, length, out);
-}
-
-static int zynqmp_sha_import(struct shash_desc *desc, const void *in)
-{
- struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc);
struct zynqmp_sha_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
+ struct crypto_shash *fbtfm = tctx->fbk_tfm;
+ SHASH_DESC_ON_STACK(fbdesc, fbtfm);
- dctx->fbk_req.tfm = tctx->fbk_tfm;
- return crypto_shash_import(&dctx->fbk_req, in);
+ fbdesc->tfm = fbtfm;
+ return crypto_shash_import(fbdesc, shash_desc_ctx(desc)) ?:
+ crypto_shash_finup(fbdesc, data, length, out);
}
-static int zynqmp_sha_export(struct shash_desc *desc, void *out)
-{
- struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc);
-
- return crypto_shash_export(&dctx->fbk_req, out);
-}
-
-static int zynqmp_sha_digest(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out)
+static int __zynqmp_sha_digest(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
{
unsigned int remaining_len = len;
int update_size;
@@ -159,26 +156,27 @@ static int zynqmp_sha_digest(struct shash_desc *desc, const u8 *data, unsigned i
return ret;
}
+static int zynqmp_sha_digest(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out)
+{
+ scoped_guard(spinlock_bh, &zynqmp_sha_lock)
+ return __zynqmp_sha_digest(desc, data, len, out);
+}
+
static struct zynqmp_sha_drv_ctx sha3_drv_ctx = {
.sha3_384 = {
.init = zynqmp_sha_init,
.update = zynqmp_sha_update,
- .final = zynqmp_sha_final,
.finup = zynqmp_sha_finup,
.digest = zynqmp_sha_digest,
- .export = zynqmp_sha_export,
- .import = zynqmp_sha_import,
.init_tfm = zynqmp_sha_init_tfm,
.exit_tfm = zynqmp_sha_exit_tfm,
- .descsize = sizeof(struct zynqmp_sha_desc_ctx),
- .statesize = sizeof(struct sha3_state),
+ .descsize = SHA3_384_EXPORT_SIZE,
.digestsize = SHA3_384_DIGEST_SIZE,
.base = {
.cra_name = "sha3-384",
.cra_driver_name = "zynqmp-sha3-384",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA3_384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct zynqmp_sha_tfm_ctx),
@@ -248,7 +246,7 @@ static void zynqmp_sha_remove(struct platform_device *pdev)
static struct platform_driver zynqmp_sha_driver = {
.probe = zynqmp_sha_probe,
- .remove_new = zynqmp_sha_remove,
+ .remove = zynqmp_sha_remove,
.driver = {
.name = "zynqmp-sha3-384",
},
diff --git a/drivers/cxl/Kconfig b/drivers/cxl/Kconfig
index 29c192f20082..48b7314afdb8 100644
--- a/drivers/cxl/Kconfig
+++ b/drivers/cxl/Kconfig
@@ -7,6 +7,7 @@ menuconfig CXL_BUS
select PCI_DOE
select FIRMWARE_TABLE
select NUMA_KEEP_MEMINFO if NUMA_MEMBLKS
+ select FWCTL if CXL_FEATURES
help
CXL is a bus that is electrically compatible with PCI Express, but
layers three protocols on that signalling (CXL.io, CXL.cache, and
@@ -60,6 +61,7 @@ config CXL_ACPI
default CXL_BUS
select ACPI_TABLE_LIB
select ACPI_HMAT
+ select CXL_PORT
help
Enable support for host managed device memory (HDM) resources
published by a platform's ACPI CXL memory layout description. See
@@ -101,6 +103,88 @@ config CXL_MEM
If unsure say 'm'.
+config CXL_FEATURES
+ bool "CXL: Features"
+ depends on CXL_PCI
+ help
+ Enable support for CXL Features. A CXL device that includes a mailbox
+ supports commands that allows listing, getting, and setting of
+ optionally defined features such as memory sparing or post package
+ sparing. Vendors may define custom features for the device.
+
+ If unsure say 'n'
+
+config CXL_EDAC_MEM_FEATURES
+ bool "CXL: EDAC Memory Features"
+ depends on EXPERT
+ depends on CXL_MEM
+ depends on CXL_FEATURES
+ depends on EDAC >= CXL_BUS
+ help
+ The CXL EDAC memory feature is optional and allows host to
+ control the EDAC memory features configurations of CXL memory
+ expander devices.
+
+ Say 'y' if you have an expert need to change default settings
+ of a memory RAS feature established by the platform/device.
+ Otherwise say 'n'.
+
+config CXL_EDAC_SCRUB
+ bool "Enable CXL Patrol Scrub Control (Patrol Read)"
+ depends on CXL_EDAC_MEM_FEATURES
+ depends on EDAC_SCRUB
+ help
+ The CXL EDAC scrub control is optional and allows host to
+ control the scrub feature configurations of CXL memory expander
+ devices.
+
+ When enabled 'cxl_mem' and 'cxl_region' EDAC devices are
+ published with memory scrub control attributes as described by
+ Documentation/ABI/testing/sysfs-edac-scrub.
+
+ Say 'y' if you have an expert need to change default settings
+ of a memory scrub feature established by the platform/device
+ (e.g. scrub rates for the patrol scrub feature).
+ Otherwise say 'n'.
+
+config CXL_EDAC_ECS
+ bool "Enable CXL Error Check Scrub (Repair)"
+ depends on CXL_EDAC_MEM_FEATURES
+ depends on EDAC_ECS
+ help
+ The CXL EDAC ECS control is optional and allows host to
+ control the ECS feature configurations of CXL memory expander
+ devices.
+
+ When enabled 'cxl_mem' EDAC devices are published with memory
+ ECS control attributes as described by
+ Documentation/ABI/testing/sysfs-edac-ecs.
+
+ Say 'y' if you have an expert need to change default settings
+ of a memory ECS feature established by the platform/device.
+ Otherwise say 'n'.
+
+config CXL_EDAC_MEM_REPAIR
+ bool "Enable CXL Memory Repair"
+ depends on CXL_EDAC_MEM_FEATURES
+ depends on EDAC_MEM_REPAIR
+ help
+ The CXL EDAC memory repair control is optional and allows host
+ to control the memory repair features (e.g. sparing, PPR)
+ configurations of CXL memory expander devices.
+
+ When enabled, the memory repair feature requires an additional
+ memory of approximately 43KB to store CXL DRAM and CXL general
+ media event records.
+
+ When enabled 'cxl_mem' EDAC devices are published with memory
+ repair control attributes as described by
+ Documentation/ABI/testing/sysfs-edac-memory-repair.
+
+ Say 'y' if you have an expert need to change default settings
+ of a memory repair feature established by the platform/device.
+ Otherwise say 'n'.
+
config CXL_PORT
default CXL_BUS
tristate
@@ -145,4 +229,8 @@ config CXL_REGION_INVALIDATION_TEST
If unsure, or if this kernel is meant for production environments,
say N.
+config CXL_MCE
+ def_bool y
+ depends on X86_MCE && MEMORY_FAILURE
+
endif
diff --git a/drivers/cxl/Makefile b/drivers/cxl/Makefile
index db321f48ba52..2caa90fa4bf2 100644
--- a/drivers/cxl/Makefile
+++ b/drivers/cxl/Makefile
@@ -1,13 +1,21 @@
# SPDX-License-Identifier: GPL-2.0
+
+# Order is important here for the built-in case:
+# - 'core' first for fundamental init
+# - 'port' before platform root drivers like 'acpi' so that CXL-root ports
+# are immediately enabled
+# - 'mem' and 'pmem' before endpoint drivers so that memdevs are
+# immediately enabled
+# - 'pci' last, also mirrors the hardware enumeration hierarchy
obj-y += core/
-obj-$(CONFIG_CXL_PCI) += cxl_pci.o
-obj-$(CONFIG_CXL_MEM) += cxl_mem.o
+obj-$(CONFIG_CXL_PORT) += cxl_port.o
obj-$(CONFIG_CXL_ACPI) += cxl_acpi.o
obj-$(CONFIG_CXL_PMEM) += cxl_pmem.o
-obj-$(CONFIG_CXL_PORT) += cxl_port.o
+obj-$(CONFIG_CXL_MEM) += cxl_mem.o
+obj-$(CONFIG_CXL_PCI) += cxl_pci.o
-cxl_mem-y := mem.o
-cxl_pci-y := pci.o
+cxl_port-y := port.o
cxl_acpi-y := acpi.o
cxl_pmem-y := pmem.o security.o
-cxl_port-y := port.o
+cxl_mem-y := mem.o
+cxl_pci-y := pci.o
diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c
index 82b78e331d8e..d7a5539d07d4 100644
--- a/drivers/cxl/acpi.c
+++ b/drivers/cxl/acpi.c
@@ -11,8 +11,6 @@
#include "cxlpci.h"
#include "cxl.h"
-#define CXL_RCRB_SIZE SZ_8K
-
struct cxl_cxims_data {
int nr_maps;
u64 xormaps[] __counted_by(nr_maps);
@@ -22,8 +20,7 @@ static const guid_t acpi_cxl_qtg_id_guid =
GUID_INIT(0xF365F9A6, 0xA7DE, 0x4071,
0xA6, 0x6A, 0xB4, 0x0C, 0x0B, 0x4F, 0x8E, 0x52);
-
-static u64 cxl_xor_hpa_to_spa(struct cxl_root_decoder *cxlrd, u64 hpa)
+static u64 cxl_apply_xor_maps(struct cxl_root_decoder *cxlrd, u64 addr)
{
struct cxl_cxims_data *cximsd = cxlrd->platform_data;
int hbiw = cxlrd->cxlsd.nr_targets;
@@ -32,19 +29,23 @@ static u64 cxl_xor_hpa_to_spa(struct cxl_root_decoder *cxlrd, u64 hpa)
/* No xormaps for host bridge interleave ways of 1 or 3 */
if (hbiw == 1 || hbiw == 3)
- return hpa;
+ return addr;
/*
- * For root decoders using xormaps (hbiw: 2,4,6,8,12,16) restore
- * the position bit to its value before the xormap was applied at
- * HPA->DPA translation.
+ * In regions using XOR interleave arithmetic the CXL HPA may not
+ * be the same as the SPA. This helper performs the SPA->CXL HPA
+ * or the CXL HPA->SPA translation. Since XOR is self-inverting,
+ * so is this function.
+ *
+ * For root decoders using xormaps (hbiw: 2,4,6,8,12,16) applying the
+ * xormaps will toggle a position bit.
*
* pos is the lowest set bit in an XORMAP
- * val is the XORALLBITS(HPA & XORMAP)
+ * val is the XORALLBITS(addr & XORMAP)
*
* XORALLBITS: The CXL spec (3.1 Table 9-22) defines XORALLBITS
* as an operation that outputs a single bit by XORing all the
- * bits in the input (hpa & xormap). Implement XORALLBITS using
+ * bits in the input (addr & xormap). Implement XORALLBITS using
* hweight64(). If the hamming weight is even the XOR of those
* bits results in val==0, if odd the XOR result is val==1.
*/
@@ -53,11 +54,11 @@ static u64 cxl_xor_hpa_to_spa(struct cxl_root_decoder *cxlrd, u64 hpa)
if (!cximsd->xormaps[i])
continue;
pos = __ffs(cximsd->xormaps[i]);
- val = (hweight64(hpa & cximsd->xormaps[i]) & 1);
- hpa = (hpa & ~(1ULL << pos)) | (val << pos);
+ val = (hweight64(addr & cximsd->xormaps[i]) & 1);
+ addr = (addr & ~(1ULL << pos)) | (val << pos);
}
- return hpa;
+ return addr;
}
struct cxl_cxims_context {
@@ -115,9 +116,9 @@ static unsigned long cfmws_to_decoder_flags(int restrictions)
{
unsigned long flags = CXL_DECODER_F_ENABLE;
- if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_TYPE2)
+ if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_DEVMEM)
flags |= CXL_DECODER_F_TYPE2;
- if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_TYPE3)
+ if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_HOSTONLYMEM)
flags |= CXL_DECODER_F_TYPE3;
if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_VOLATILE)
flags |= CXL_DECODER_F_RAM;
@@ -337,13 +338,69 @@ static int add_or_reset_cxl_resource(struct resource *parent, struct resource *r
return rc;
}
+static int cxl_acpi_set_cache_size(struct cxl_root_decoder *cxlrd)
+{
+ struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
+ struct range *hpa = &cxld->hpa_range;
+ resource_size_t size = range_len(hpa);
+ resource_size_t start = hpa->start;
+ resource_size_t cache_size;
+ struct resource res;
+ int nid, rc;
+
+ res = DEFINE_RES(start, size, 0);
+ nid = phys_to_target_node(start);
+
+ rc = hmat_get_extended_linear_cache_size(&res, nid, &cache_size);
+ if (rc)
+ return rc;
+
+ /*
+ * The cache range is expected to be within the CFMWS.
+ * Currently there is only support cache_size == cxl_size. CXL
+ * size is then half of the total CFMWS window size.
+ */
+ size = size >> 1;
+ if (cache_size && size != cache_size) {
+ dev_warn(&cxld->dev,
+ "Extended Linear Cache size %pa != CXL size %pa. No Support!",
+ &cache_size, &size);
+ return -ENXIO;
+ }
+
+ cxlrd->cache_size = cache_size;
+
+ return 0;
+}
+
+static void cxl_setup_extended_linear_cache(struct cxl_root_decoder *cxlrd)
+{
+ int rc;
+
+ rc = cxl_acpi_set_cache_size(cxlrd);
+ if (!rc)
+ return;
+
+ if (rc != -EOPNOTSUPP) {
+ /*
+ * Failing to support extended linear cache region resize does not
+ * prevent the region from functioning. Only causes cxl list showing
+ * incorrect region size.
+ */
+ dev_warn(cxlrd->cxlsd.cxld.dev.parent,
+ "Extended linear cache calculation failed rc:%d\n", rc);
+ }
+
+ /* Ignoring return code */
+ cxlrd->cache_size = 0;
+}
+
DEFINE_FREE(put_cxlrd, struct cxl_root_decoder *,
if (!IS_ERR_OR_NULL(_T)) put_device(&_T->cxlsd.cxld.dev))
DEFINE_FREE(del_cxl_resource, struct resource *, if (_T) del_cxl_resource(_T))
static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws,
struct cxl_cfmws_context *ctx)
{
- int target_map[CXL_DECODER_MAX_INTERLEAVE];
struct cxl_port *root_port = ctx->root_port;
struct cxl_cxims_context cxims_ctx;
struct device *dev = ctx->dev;
@@ -361,8 +418,6 @@ static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws,
rc = eig_to_granularity(cfmws->granularity, &ig);
if (rc)
return rc;
- for (i = 0; i < ways; i++)
- target_map[i] = cfmws->interleave_targets[i];
struct resource *res __free(del_cxl_resource) = alloc_cxl_resource(
cfmws->base_hpa, cfmws->window_size, ctx->id++);
@@ -388,6 +443,8 @@ static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws,
.end = cfmws->base_hpa + cfmws->window_size - 1,
};
cxld->interleave_ways = ways;
+ for (i = 0; i < ways; i++)
+ cxld->target_map[i] = cfmws->interleave_targets[i];
/*
* Minimize the x1 granularity to advertise support for any
* valid region granularity
@@ -396,6 +453,8 @@ static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws,
ig = CXL_DECODER_MIN_GRANULARITY;
cxld->interleave_granularity = ig;
+ cxl_setup_extended_linear_cache(cxlrd);
+
if (cfmws->interleave_arithmetic == ACPI_CEDT_CFMWS_ARITHMETIC_XOR) {
if (ways != 1 && ways != 3) {
cxims_ctx = (struct cxl_cxims_context) {
@@ -415,13 +474,27 @@ static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws,
cxlrd->qos_class = cfmws->qtg_id;
- if (cfmws->interleave_arithmetic == ACPI_CEDT_CFMWS_ARITHMETIC_XOR)
- cxlrd->hpa_to_spa = cxl_xor_hpa_to_spa;
+ if (cfmws->interleave_arithmetic == ACPI_CEDT_CFMWS_ARITHMETIC_XOR) {
+ cxlrd->ops = kzalloc(sizeof(*cxlrd->ops), GFP_KERNEL);
+ if (!cxlrd->ops)
+ return -ENOMEM;
+
+ cxlrd->ops->hpa_to_spa = cxl_apply_xor_maps;
+ cxlrd->ops->spa_to_hpa = cxl_apply_xor_maps;
+ }
+
+ rc = cxl_decoder_add(cxld);
+ if (rc)
+ return rc;
- rc = cxl_decoder_add(cxld, target_map);
+ rc = cxl_root_decoder_autoremove(dev, no_free_ptr(cxlrd));
if (rc)
return rc;
- return cxl_root_decoder_autoremove(dev, no_free_ptr(cxlrd));
+
+ dev_dbg(root_port->dev.parent, "%s added to %s\n",
+ dev_name(&cxld->dev), dev_name(&root_port->dev));
+
+ return 0;
}
static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
@@ -479,7 +552,11 @@ static int cxl_get_chbs_iter(union acpi_subtable_headers *header, void *arg,
chbs = (struct acpi_cedt_chbs *) header;
if (chbs->cxl_version == ACPI_CEDT_CHBS_VERSION_CXL11 &&
- chbs->length != CXL_RCRB_SIZE)
+ chbs->length != ACPI_CEDT_CHBS_LENGTH_CXL11)
+ return 0;
+
+ if (chbs->cxl_version == ACPI_CEDT_CHBS_VERSION_CXL20 &&
+ chbs->length != ACPI_CEDT_CHBS_LENGTH_CXL20)
return 0;
if (!chbs->base)
@@ -739,10 +816,10 @@ static void remove_cxl_resources(void *data)
* expanding its boundaries to ensure that any conflicting resources become
* children. If a window is expanded it may then conflict with a another window
* entry and require the window to be truncated or trimmed. Consider this
- * situation:
+ * situation::
*
- * |-- "CXL Window 0" --||----- "CXL Window 1" -----|
- * |--------------- "System RAM" -------------|
+ * |-- "CXL Window 0" --||----- "CXL Window 1" -----|
+ * |--------------- "System RAM" -------------|
*
* ...where platform firmware has established as System RAM resource across 2
* windows, but has left some portion of window 1 for dynamic CXL region
@@ -924,8 +1001,15 @@ static void __exit cxl_acpi_exit(void)
/* load before dax_hmem sees 'Soft Reserved' CXL ranges */
subsys_initcall(cxl_acpi_init);
+
+/*
+ * Arrange for host-bridge ports to be active synchronous with
+ * cxl_acpi_probe() exit.
+ */
+MODULE_SOFTDEP("pre: cxl_port");
+
module_exit(cxl_acpi_exit);
MODULE_DESCRIPTION("CXL ACPI: Platform Support");
MODULE_LICENSE("GPL v2");
-MODULE_IMPORT_NS(CXL);
-MODULE_IMPORT_NS(ACPI);
+MODULE_IMPORT_NS("CXL");
+MODULE_IMPORT_NS("ACPI");
diff --git a/drivers/cxl/core/Makefile b/drivers/cxl/core/Makefile
index 9259bcc6773c..5ad8fef210b5 100644
--- a/drivers/cxl/core/Makefile
+++ b/drivers/cxl/core/Makefile
@@ -14,5 +14,9 @@ cxl_core-y += pci.o
cxl_core-y += hdm.o
cxl_core-y += pmu.o
cxl_core-y += cdat.o
+cxl_core-y += ras.o
cxl_core-$(CONFIG_TRACING) += trace.o
cxl_core-$(CONFIG_CXL_REGION) += region.o
+cxl_core-$(CONFIG_CXL_MCE) += mce.o
+cxl_core-$(CONFIG_CXL_FEATURES) += features.o
+cxl_core-$(CONFIG_CXL_EDAC_MEM_FEATURES) += edac.o
diff --git a/drivers/cxl/core/cdat.c b/drivers/cxl/core/cdat.c
index ef1621d40f05..c4bd6e8a0cf0 100644
--- a/drivers/cxl/core/cdat.c
+++ b/drivers/cxl/core/cdat.c
@@ -28,7 +28,7 @@ static u32 cdat_normalize(u16 entry, u64 base, u8 type)
*/
if (entry == 0xffff || !entry)
return 0;
- else if (base > (UINT_MAX / (entry)))
+ if (base > (UINT_MAX / (entry)))
return 0;
/*
@@ -247,8 +247,8 @@ static void update_perf_entry(struct device *dev, struct dsmas_entry *dent,
dpa_perf->dpa_range = dent->dpa_range;
dpa_perf->qos_class = dent->qos_class;
dev_dbg(dev,
- "DSMAS: dpa: %#llx qos: %d read_bw: %d write_bw %d read_lat: %d write_lat: %d\n",
- dent->dpa_range.start, dpa_perf->qos_class,
+ "DSMAS: dpa: %pra qos: %d read_bw: %d write_bw %d read_lat: %d write_lat: %d\n",
+ &dent->dpa_range, dpa_perf->qos_class,
dent->coord[ACCESS_COORDINATE_CPU].read_bandwidth,
dent->coord[ACCESS_COORDINATE_CPU].write_bandwidth,
dent->coord[ACCESS_COORDINATE_CPU].read_latency,
@@ -258,29 +258,31 @@ static void update_perf_entry(struct device *dev, struct dsmas_entry *dent,
static void cxl_memdev_set_qos_class(struct cxl_dev_state *cxlds,
struct xarray *dsmas_xa)
{
- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
struct device *dev = cxlds->dev;
- struct range pmem_range = {
- .start = cxlds->pmem_res.start,
- .end = cxlds->pmem_res.end,
- };
- struct range ram_range = {
- .start = cxlds->ram_res.start,
- .end = cxlds->ram_res.end,
- };
struct dsmas_entry *dent;
unsigned long index;
xa_for_each(dsmas_xa, index, dent) {
- if (resource_size(&cxlds->ram_res) &&
- range_contains(&ram_range, &dent->dpa_range))
- update_perf_entry(dev, dent, &mds->ram_perf);
- else if (resource_size(&cxlds->pmem_res) &&
- range_contains(&pmem_range, &dent->dpa_range))
- update_perf_entry(dev, dent, &mds->pmem_perf);
- else
- dev_dbg(dev, "no partition for dsmas dpa: %#llx\n",
- dent->dpa_range.start);
+ bool found = false;
+
+ for (int i = 0; i < cxlds->nr_partitions; i++) {
+ struct resource *res = &cxlds->part[i].res;
+ struct range range = {
+ .start = res->start,
+ .end = res->end,
+ };
+
+ if (range_contains(&range, &dent->dpa_range)) {
+ update_perf_entry(dev, dent,
+ &cxlds->part[i].perf);
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ dev_dbg(dev, "no partition for dsmas dpa: %pra\n",
+ &dent->dpa_range);
}
}
@@ -334,45 +336,55 @@ static int match_cxlrd_hb(struct device *dev, void *data)
cxlrd = to_cxl_root_decoder(dev);
cxlsd = &cxlrd->cxlsd;
- guard(rwsem_read)(&cxl_region_rwsem);
+ guard(rwsem_read)(&cxl_rwsem.region);
for (int i = 0; i < cxlsd->nr_targets; i++) {
- if (host_bridge == cxlsd->target[i]->dport_dev)
+ if (cxlsd->target[i] && host_bridge == cxlsd->target[i]->dport_dev)
return 1;
}
return 0;
}
-static int cxl_qos_class_verify(struct cxl_memdev *cxlmd)
+static void cxl_qos_class_verify(struct cxl_memdev *cxlmd)
{
struct cxl_dev_state *cxlds = cxlmd->cxlds;
- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
struct cxl_port *root_port;
- int rc;
struct cxl_root *cxl_root __free(put_cxl_root) =
find_cxl_root(cxlmd->endpoint);
+ /*
+ * No need to reset_dpa_perf() here as find_cxl_root() is guaranteed to
+ * succeed when called in the cxl_endpoint_port_probe() path.
+ */
if (!cxl_root)
- return -ENODEV;
+ return;
root_port = &cxl_root->port;
- /* Check that the QTG IDs are all sane between end device and root decoders */
- if (!cxl_qos_match(root_port, &mds->ram_perf))
- reset_dpa_perf(&mds->ram_perf);
- if (!cxl_qos_match(root_port, &mds->pmem_perf))
- reset_dpa_perf(&mds->pmem_perf);
-
- /* Check to make sure that the device's host bridge is under a root decoder */
- rc = device_for_each_child(&root_port->dev,
- cxlmd->endpoint->host_bridge, match_cxlrd_hb);
- if (!rc) {
- reset_dpa_perf(&mds->ram_perf);
- reset_dpa_perf(&mds->pmem_perf);
+ /*
+ * Save userspace from needing to check if a qos class has any matches
+ * by hiding qos class info if the memdev is not mapped by a root
+ * decoder, or the partition class does not match any root decoder
+ * class.
+ */
+ if (!device_for_each_child(&root_port->dev,
+ cxlmd->endpoint->host_bridge,
+ match_cxlrd_hb)) {
+ for (int i = 0; i < cxlds->nr_partitions; i++) {
+ struct cxl_dpa_perf *perf = &cxlds->part[i].perf;
+
+ reset_dpa_perf(perf);
+ }
+ return;
}
- return rc;
+ for (int i = 0; i < cxlds->nr_partitions; i++) {
+ struct cxl_dpa_perf *perf = &cxlds->part[i].perf;
+
+ if (!cxl_qos_match(root_port, perf))
+ reset_dpa_perf(perf);
+ }
}
static void discard_dsmas(struct xarray *xa)
@@ -416,7 +428,7 @@ void cxl_endpoint_parse_cdat(struct cxl_port *port)
cxl_qos_class_verify(cxlmd);
cxl_memdev_update_perf(cxlmd);
}
-EXPORT_SYMBOL_NS_GPL(cxl_endpoint_parse_cdat, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_endpoint_parse_cdat, "CXL");
static int cdat_sslbis_handler(union acpi_subtable_headers *header, void *arg,
const unsigned long end)
@@ -428,8 +440,8 @@ static int cdat_sslbis_handler(union acpi_subtable_headers *header, void *arg,
} *tbl = (struct acpi_cdat_sslbis_table *)header;
int size = sizeof(header->cdat) + sizeof(tbl->sslbis_header);
struct acpi_cdat_sslbis *sslbis;
- struct cxl_port *port = arg;
- struct device *dev = &port->dev;
+ struct cxl_dport *dport = arg;
+ struct device *dev = &dport->port->dev;
int remain, entries, i;
u16 len;
@@ -455,8 +467,6 @@ static int cdat_sslbis_handler(union acpi_subtable_headers *header, void *arg,
u16 y = le16_to_cpu((__force __le16)tbl->entries[i].porty_id);
__le64 le_base;
__le16 le_val;
- struct cxl_dport *dport;
- unsigned long index;
u16 dsp_id;
u64 val;
@@ -487,33 +497,32 @@ static int cdat_sslbis_handler(union acpi_subtable_headers *header, void *arg,
val = cdat_normalize(le16_to_cpu(le_val), le64_to_cpu(le_base),
sslbis->data_type);
- xa_for_each(&port->dports, index, dport) {
- if (dsp_id == ACPI_CDAT_SSLBIS_ANY_PORT ||
- dsp_id == dport->port_id) {
- cxl_access_coordinate_set(dport->coord,
- sslbis->data_type,
- val);
- }
+ if (dsp_id == ACPI_CDAT_SSLBIS_ANY_PORT ||
+ dsp_id == dport->port_id) {
+ cxl_access_coordinate_set(dport->coord,
+ sslbis->data_type, val);
+ return 0;
}
}
return 0;
}
-void cxl_switch_parse_cdat(struct cxl_port *port)
+void cxl_switch_parse_cdat(struct cxl_dport *dport)
{
+ struct cxl_port *port = dport->port;
int rc;
if (!port->cdat.table)
return;
rc = cdat_table_parse(ACPI_CDAT_TYPE_SSLBIS, cdat_sslbis_handler,
- port, port->cdat.table, port->cdat.length);
+ dport, port->cdat.table, port->cdat.length);
rc = cdat_table_parse_output(rc);
if (rc)
dev_dbg(&port->dev, "Failed to parse SSLBIS: %d\n", rc);
}
-EXPORT_SYMBOL_NS_GPL(cxl_switch_parse_cdat, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_switch_parse_cdat, "CXL");
static void __cxl_coordinates_combine(struct access_coordinate *out,
struct access_coordinate *c1,
@@ -545,7 +554,7 @@ void cxl_coordinates_combine(struct access_coordinate *out,
__cxl_coordinates_combine(&out[i], &c1[i], &c2[i]);
}
-MODULE_IMPORT_NS(CXL);
+MODULE_IMPORT_NS("CXL");
static void cxl_bandwidth_add(struct access_coordinate *coord,
struct access_coordinate *c1,
@@ -570,23 +579,18 @@ static bool dpa_perf_contains(struct cxl_dpa_perf *perf,
return range_contains(&perf->dpa_range, &dpa);
}
-static struct cxl_dpa_perf *cxled_get_dpa_perf(struct cxl_endpoint_decoder *cxled,
- enum cxl_decoder_mode mode)
+static struct cxl_dpa_perf *cxled_get_dpa_perf(struct cxl_endpoint_decoder *cxled)
{
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_dpa_perf *perf;
- switch (mode) {
- case CXL_DECODER_RAM:
- perf = &mds->ram_perf;
- break;
- case CXL_DECODER_PMEM:
- perf = &mds->pmem_perf;
- break;
- default:
+ if (cxled->part < 0)
+ return ERR_PTR(-EINVAL);
+ perf = &cxlds->part[cxled->part].perf;
+
+ if (!perf)
return ERR_PTR(-EINVAL);
- }
if (!dpa_perf_contains(perf, cxled->dpa_res))
return ERR_PTR(-EINVAL);
@@ -641,14 +645,16 @@ static int cxl_endpoint_gather_bandwidth(struct cxl_region *cxlr,
void *ptr;
int rc;
+ if (!dev_is_pci(cxlds->dev))
+ return -ENODEV;
+
if (cxlds->rcd)
return -ENODEV;
- perf = cxled_get_dpa_perf(cxled, cxlr->mode);
+ perf = cxled_get_dpa_perf(cxled);
if (IS_ERR(perf))
return PTR_ERR(perf);
- gp_port = to_cxl_port(parent_port->dev.parent);
*gp_is_root = is_cxl_root(gp_port);
/*
@@ -978,7 +984,7 @@ void cxl_region_shared_upstream_bandwidth_update(struct cxl_region *cxlr)
bool is_root;
int rc;
- lockdep_assert_held(&cxl_dpa_rwsem);
+ lockdep_assert_held(&cxl_rwsem.dpa);
struct xarray *usp_xa __free(free_perf_xa) =
kzalloc(sizeof(*usp_xa), GFP_KERNEL);
@@ -1048,9 +1054,9 @@ void cxl_region_perf_data_calculate(struct cxl_region *cxlr,
{
struct cxl_dpa_perf *perf;
- lockdep_assert_held(&cxl_dpa_rwsem);
+ lockdep_assert_held(&cxl_rwsem.dpa);
- perf = cxled_get_dpa_perf(cxled, cxlr->mode);
+ perf = cxled_get_dpa_perf(cxled);
if (IS_ERR(perf))
return;
@@ -1066,14 +1072,3 @@ void cxl_region_perf_data_calculate(struct cxl_region *cxlr,
cxlr->coord[i].write_bandwidth += perf->coord[i].write_bandwidth;
}
}
-
-int cxl_update_hmat_access_coordinates(int nid, struct cxl_region *cxlr,
- enum access_coordinate_class access)
-{
- return hmat_update_target_coordinates(nid, &cxlr->coord[access], access);
-}
-
-bool cxl_need_node_perf_attrs_update(int nid)
-{
- return !acpi_node_backed_by_real_pxm(nid);
-}
diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h
index 0c62b4069ba0..1fb66132b777 100644
--- a/drivers/cxl/core/core.h
+++ b/drivers/cxl/core/core.h
@@ -4,12 +4,20 @@
#ifndef __CXL_CORE_H__
#define __CXL_CORE_H__
+#include <cxl/mailbox.h>
+#include <linux/rwsem.h>
+
extern const struct device_type cxl_nvdimm_bridge_type;
extern const struct device_type cxl_nvdimm_type;
extern const struct device_type cxl_pmu_type;
extern struct attribute_group cxl_base_attribute_group;
+enum cxl_detach_mode {
+ DETACH_ONLY,
+ DETACH_INVALIDATE,
+};
+
#ifdef CONFIG_CXL_REGION
extern struct device_attribute dev_attr_create_pmem_region;
extern struct device_attribute dev_attr_create_ram_region;
@@ -18,7 +26,11 @@ extern struct device_attribute dev_attr_region;
extern const struct device_type cxl_pmem_region_type;
extern const struct device_type cxl_dax_region_type;
extern const struct device_type cxl_region_type;
-void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled);
+
+int cxl_decoder_detach(struct cxl_region *cxlr,
+ struct cxl_endpoint_decoder *cxled, int pos,
+ enum cxl_detach_mode mode);
+
#define CXL_REGION_ATTR(x) (&dev_attr_##x.attr)
#define CXL_REGION_TYPE(x) (&cxl_region_type)
#define SET_CXL_REGION_ATTR(x) (&dev_attr_##x.attr),
@@ -46,8 +58,11 @@ static inline int cxl_get_poison_by_endpoint(struct cxl_port *port)
{
return 0;
}
-static inline void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled)
+static inline int cxl_decoder_detach(struct cxl_region *cxlr,
+ struct cxl_endpoint_decoder *cxled,
+ int pos, enum cxl_detach_mode mode)
{
+ return 0;
}
static inline int cxl_region_init(void)
{
@@ -65,19 +80,20 @@ static inline void cxl_region_exit(void)
struct cxl_send_command;
struct cxl_mem_query_commands;
-int cxl_query_cmd(struct cxl_memdev *cxlmd,
+int cxl_query_cmd(struct cxl_mailbox *cxl_mbox,
struct cxl_mem_query_commands __user *q);
-int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s);
+int cxl_send_cmd(struct cxl_mailbox *cxl_mbox, struct cxl_send_command __user *s);
void __iomem *devm_cxl_iomap_block(struct device *dev, resource_size_t addr,
resource_size_t length);
struct dentry *cxl_debugfs_create_dir(const char *dir);
-int cxl_dpa_set_mode(struct cxl_endpoint_decoder *cxled,
- enum cxl_decoder_mode mode);
-int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size);
+int cxl_dpa_set_part(struct cxl_endpoint_decoder *cxled,
+ enum cxl_partition_mode mode);
+int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, u64 size);
int cxl_dpa_free(struct cxl_endpoint_decoder *cxled);
resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled);
resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled);
+bool cxl_resource_contains_addr(const struct resource *res, const resource_size_t addr);
enum cxl_rcrb {
CXL_RCRB_DOWNSTREAM,
@@ -89,8 +105,25 @@ resource_size_t __rcrb_to_component(struct device *dev,
enum cxl_rcrb which);
u16 cxl_rcrb_to_aer(struct device *dev, resource_size_t rcrb);
-extern struct rw_semaphore cxl_dpa_rwsem;
-extern struct rw_semaphore cxl_region_rwsem;
+#define PCI_RCRB_CAP_LIST_ID_MASK GENMASK(7, 0)
+#define PCI_RCRB_CAP_HDR_ID_MASK GENMASK(7, 0)
+#define PCI_RCRB_CAP_HDR_NEXT_MASK GENMASK(15, 8)
+#define PCI_CAP_EXP_SIZEOF 0x3c
+
+struct cxl_rwsem {
+ /*
+ * All changes to HPA (interleave configuration) occur with this
+ * lock held for write.
+ */
+ struct rw_semaphore region;
+ /*
+ * All changes to a device DPA space occur with this lock held
+ * for write.
+ */
+ struct rw_semaphore dpa;
+};
+
+extern struct cxl_rwsem cxl_rwsem;
int cxl_memdev_init(void);
void cxl_memdev_exit(void);
@@ -102,12 +135,35 @@ enum cxl_poison_trace_type {
CXL_POISON_TRACE_CLEAR,
};
+enum poison_cmd_enabled_bits;
+bool cxl_memdev_has_poison_cmd(struct cxl_memdev *cxlmd,
+ enum poison_cmd_enabled_bits cmd);
+
long cxl_pci_get_latency(struct pci_dev *pdev);
int cxl_pci_get_bandwidth(struct pci_dev *pdev, struct access_coordinate *c);
-int cxl_update_hmat_access_coordinates(int nid, struct cxl_region *cxlr,
- enum access_coordinate_class access);
-bool cxl_need_node_perf_attrs_update(int nid);
int cxl_port_get_switch_dport_bandwidth(struct cxl_port *port,
struct access_coordinate *c);
+int cxl_ras_init(void);
+void cxl_ras_exit(void);
+int cxl_gpf_port_setup(struct cxl_dport *dport);
+
+struct cxl_hdm;
+int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm,
+ struct cxl_endpoint_dvsec_info *info);
+int cxl_port_get_possible_dports(struct cxl_port *port);
+
+#ifdef CONFIG_CXL_FEATURES
+struct cxl_feat_entry *
+cxl_feature_info(struct cxl_features_state *cxlfs, const uuid_t *uuid);
+size_t cxl_get_feature(struct cxl_mailbox *cxl_mbox, const uuid_t *feat_uuid,
+ enum cxl_get_feat_selection selection,
+ void *feat_out, size_t feat_out_size, u16 offset,
+ u16 *return_code);
+int cxl_set_feature(struct cxl_mailbox *cxl_mbox, const uuid_t *feat_uuid,
+ u8 feat_version, const void *feat_data,
+ size_t feat_data_size, u32 feat_flag, u16 offset,
+ u16 *return_code);
+#endif
+
#endif /* __CXL_CORE_H__ */
diff --git a/drivers/cxl/core/edac.c b/drivers/cxl/core/edac.c
new file mode 100644
index 000000000000..79994ca9bc9f
--- /dev/null
+++ b/drivers/cxl/core/edac.c
@@ -0,0 +1,2109 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * CXL EDAC memory feature driver.
+ *
+ * Copyright (c) 2024-2025 HiSilicon Limited.
+ *
+ * - Supports functions to configure EDAC features of the
+ * CXL memory devices.
+ * - Registers with the EDAC device subsystem driver to expose
+ * the features sysfs attributes to the user for configuring
+ * CXL memory RAS feature.
+ */
+
+#include <linux/cleanup.h>
+#include <linux/edac.h>
+#include <linux/limits.h>
+#include <linux/unaligned.h>
+#include <linux/xarray.h>
+#include <cxl/features.h>
+#include <cxl.h>
+#include <cxlmem.h>
+#include "core.h"
+#include "trace.h"
+
+#define CXL_NR_EDAC_DEV_FEATURES 7
+
+#define CXL_SCRUB_NO_REGION -1
+
+struct cxl_patrol_scrub_context {
+ u8 instance;
+ u16 get_feat_size;
+ u16 set_feat_size;
+ u8 get_version;
+ u8 set_version;
+ u16 effects;
+ struct cxl_memdev *cxlmd;
+ struct cxl_region *cxlr;
+};
+
+/*
+ * See CXL spec rev 3.2 @8.2.10.9.11.1 Table 8-222 Device Patrol Scrub Control
+ * Feature Readable Attributes.
+ */
+struct cxl_scrub_rd_attrbs {
+ u8 scrub_cycle_cap;
+ __le16 scrub_cycle_hours;
+ u8 scrub_flags;
+} __packed;
+
+/*
+ * See CXL spec rev 3.2 @8.2.10.9.11.1 Table 8-223 Device Patrol Scrub Control
+ * Feature Writable Attributes.
+ */
+struct cxl_scrub_wr_attrbs {
+ u8 scrub_cycle_hours;
+ u8 scrub_flags;
+} __packed;
+
+#define CXL_SCRUB_CONTROL_CHANGEABLE BIT(0)
+#define CXL_SCRUB_CONTROL_REALTIME BIT(1)
+#define CXL_SCRUB_CONTROL_CYCLE_MASK GENMASK(7, 0)
+#define CXL_SCRUB_CONTROL_MIN_CYCLE_MASK GENMASK(15, 8)
+#define CXL_SCRUB_CONTROL_ENABLE BIT(0)
+
+#define CXL_GET_SCRUB_CYCLE_CHANGEABLE(cap) \
+ FIELD_GET(CXL_SCRUB_CONTROL_CHANGEABLE, cap)
+#define CXL_GET_SCRUB_CYCLE(cycle) \
+ FIELD_GET(CXL_SCRUB_CONTROL_CYCLE_MASK, cycle)
+#define CXL_GET_SCRUB_MIN_CYCLE(cycle) \
+ FIELD_GET(CXL_SCRUB_CONTROL_MIN_CYCLE_MASK, cycle)
+#define CXL_GET_SCRUB_EN_STS(flags) FIELD_GET(CXL_SCRUB_CONTROL_ENABLE, flags)
+
+#define CXL_SET_SCRUB_CYCLE(cycle) \
+ FIELD_PREP(CXL_SCRUB_CONTROL_CYCLE_MASK, cycle)
+#define CXL_SET_SCRUB_EN(en) FIELD_PREP(CXL_SCRUB_CONTROL_ENABLE, en)
+
+static int cxl_mem_scrub_get_attrbs(struct cxl_mailbox *cxl_mbox, u8 *cap,
+ u16 *cycle, u8 *flags, u8 *min_cycle)
+{
+ size_t rd_data_size = sizeof(struct cxl_scrub_rd_attrbs);
+ size_t data_size;
+ struct cxl_scrub_rd_attrbs *rd_attrbs __free(kfree) =
+ kzalloc(rd_data_size, GFP_KERNEL);
+ if (!rd_attrbs)
+ return -ENOMEM;
+
+ data_size = cxl_get_feature(cxl_mbox, &CXL_FEAT_PATROL_SCRUB_UUID,
+ CXL_GET_FEAT_SEL_CURRENT_VALUE, rd_attrbs,
+ rd_data_size, 0, NULL);
+ if (!data_size)
+ return -EIO;
+
+ *cap = rd_attrbs->scrub_cycle_cap;
+ *cycle = le16_to_cpu(rd_attrbs->scrub_cycle_hours);
+ *flags = rd_attrbs->scrub_flags;
+ if (min_cycle)
+ *min_cycle = CXL_GET_SCRUB_MIN_CYCLE(*cycle);
+
+ return 0;
+}
+
+static int cxl_scrub_get_attrbs(struct cxl_patrol_scrub_context *cxl_ps_ctx,
+ u8 *cap, u16 *cycle, u8 *flags, u8 *min_cycle)
+{
+ struct cxl_mailbox *cxl_mbox;
+ struct cxl_region_params *p;
+ struct cxl_memdev *cxlmd;
+ struct cxl_region *cxlr;
+ u8 min_scrub_cycle = 0;
+ int i, ret;
+
+ if (!cxl_ps_ctx->cxlr) {
+ cxl_mbox = &cxl_ps_ctx->cxlmd->cxlds->cxl_mbox;
+ return cxl_mem_scrub_get_attrbs(cxl_mbox, cap, cycle,
+ flags, min_cycle);
+ }
+
+ ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region);
+ if ((ret = ACQUIRE_ERR(rwsem_read_intr, &rwsem)))
+ return ret;
+
+ cxlr = cxl_ps_ctx->cxlr;
+ p = &cxlr->params;
+
+ for (i = 0; i < p->nr_targets; i++) {
+ struct cxl_endpoint_decoder *cxled = p->targets[i];
+
+ cxlmd = cxled_to_memdev(cxled);
+ cxl_mbox = &cxlmd->cxlds->cxl_mbox;
+ ret = cxl_mem_scrub_get_attrbs(cxl_mbox, cap, cycle, flags,
+ min_cycle);
+ if (ret)
+ return ret;
+
+ /*
+ * The min_scrub_cycle of a region is the max of minimum scrub
+ * cycles supported by memdevs that back the region.
+ */
+ if (min_cycle)
+ min_scrub_cycle = max(*min_cycle, min_scrub_cycle);
+ }
+
+ if (min_cycle)
+ *min_cycle = min_scrub_cycle;
+
+ return 0;
+}
+
+static int cxl_scrub_set_attrbs_region(struct device *dev,
+ struct cxl_patrol_scrub_context *cxl_ps_ctx,
+ u8 cycle, u8 flags)
+{
+ struct cxl_scrub_wr_attrbs wr_attrbs;
+ struct cxl_mailbox *cxl_mbox;
+ struct cxl_region_params *p;
+ struct cxl_memdev *cxlmd;
+ struct cxl_region *cxlr;
+ int ret, i;
+
+ ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region);
+ if ((ret = ACQUIRE_ERR(rwsem_read_intr, &rwsem)))
+ return ret;
+
+ cxlr = cxl_ps_ctx->cxlr;
+ p = &cxlr->params;
+ wr_attrbs.scrub_cycle_hours = cycle;
+ wr_attrbs.scrub_flags = flags;
+
+ for (i = 0; i < p->nr_targets; i++) {
+ struct cxl_endpoint_decoder *cxled = p->targets[i];
+
+ cxlmd = cxled_to_memdev(cxled);
+ cxl_mbox = &cxlmd->cxlds->cxl_mbox;
+ ret = cxl_set_feature(cxl_mbox, &CXL_FEAT_PATROL_SCRUB_UUID,
+ cxl_ps_ctx->set_version, &wr_attrbs,
+ sizeof(wr_attrbs),
+ CXL_SET_FEAT_FLAG_DATA_SAVED_ACROSS_RESET,
+ 0, NULL);
+ if (ret)
+ return ret;
+
+ if (cycle != cxlmd->scrub_cycle) {
+ if (cxlmd->scrub_region_id != CXL_SCRUB_NO_REGION)
+ dev_info(dev,
+ "Device scrub rate(%d hours) set by region%d rate overwritten by region%d scrub rate(%d hours)\n",
+ cxlmd->scrub_cycle,
+ cxlmd->scrub_region_id, cxlr->id,
+ cycle);
+
+ cxlmd->scrub_cycle = cycle;
+ cxlmd->scrub_region_id = cxlr->id;
+ }
+ }
+
+ return 0;
+}
+
+static int cxl_scrub_set_attrbs_device(struct device *dev,
+ struct cxl_patrol_scrub_context *cxl_ps_ctx,
+ u8 cycle, u8 flags)
+{
+ struct cxl_scrub_wr_attrbs wr_attrbs;
+ struct cxl_mailbox *cxl_mbox;
+ struct cxl_memdev *cxlmd;
+ int ret;
+
+ wr_attrbs.scrub_cycle_hours = cycle;
+ wr_attrbs.scrub_flags = flags;
+
+ cxlmd = cxl_ps_ctx->cxlmd;
+ cxl_mbox = &cxlmd->cxlds->cxl_mbox;
+ ret = cxl_set_feature(cxl_mbox, &CXL_FEAT_PATROL_SCRUB_UUID,
+ cxl_ps_ctx->set_version, &wr_attrbs,
+ sizeof(wr_attrbs),
+ CXL_SET_FEAT_FLAG_DATA_SAVED_ACROSS_RESET, 0,
+ NULL);
+ if (ret)
+ return ret;
+
+ if (cycle != cxlmd->scrub_cycle) {
+ if (cxlmd->scrub_region_id != CXL_SCRUB_NO_REGION)
+ dev_info(dev,
+ "Device scrub rate(%d hours) set by region%d rate overwritten with device local scrub rate(%d hours)\n",
+ cxlmd->scrub_cycle, cxlmd->scrub_region_id,
+ cycle);
+
+ cxlmd->scrub_cycle = cycle;
+ cxlmd->scrub_region_id = CXL_SCRUB_NO_REGION;
+ }
+
+ return 0;
+}
+
+static int cxl_scrub_set_attrbs(struct device *dev,
+ struct cxl_patrol_scrub_context *cxl_ps_ctx,
+ u8 cycle, u8 flags)
+{
+ if (cxl_ps_ctx->cxlr)
+ return cxl_scrub_set_attrbs_region(dev, cxl_ps_ctx, cycle, flags);
+
+ return cxl_scrub_set_attrbs_device(dev, cxl_ps_ctx, cycle, flags);
+}
+
+static int cxl_patrol_scrub_get_enabled_bg(struct device *dev, void *drv_data,
+ bool *enabled)
+{
+ struct cxl_patrol_scrub_context *ctx = drv_data;
+ u8 cap, flags;
+ u16 cycle;
+ int ret;
+
+ ret = cxl_scrub_get_attrbs(ctx, &cap, &cycle, &flags, NULL);
+ if (ret)
+ return ret;
+
+ *enabled = CXL_GET_SCRUB_EN_STS(flags);
+
+ return 0;
+}
+
+static int cxl_patrol_scrub_set_enabled_bg(struct device *dev, void *drv_data,
+ bool enable)
+{
+ struct cxl_patrol_scrub_context *ctx = drv_data;
+ u8 cap, flags, wr_cycle;
+ u16 rd_cycle;
+ int ret;
+
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+
+ ret = cxl_scrub_get_attrbs(ctx, &cap, &rd_cycle, &flags, NULL);
+ if (ret)
+ return ret;
+
+ wr_cycle = CXL_GET_SCRUB_CYCLE(rd_cycle);
+ flags = CXL_SET_SCRUB_EN(enable);
+
+ return cxl_scrub_set_attrbs(dev, ctx, wr_cycle, flags);
+}
+
+static int cxl_patrol_scrub_get_min_scrub_cycle(struct device *dev,
+ void *drv_data, u32 *min)
+{
+ struct cxl_patrol_scrub_context *ctx = drv_data;
+ u8 cap, flags, min_cycle;
+ u16 cycle;
+ int ret;
+
+ ret = cxl_scrub_get_attrbs(ctx, &cap, &cycle, &flags, &min_cycle);
+ if (ret)
+ return ret;
+
+ *min = min_cycle * 3600;
+
+ return 0;
+}
+
+static int cxl_patrol_scrub_get_max_scrub_cycle(struct device *dev,
+ void *drv_data, u32 *max)
+{
+ *max = U8_MAX * 3600; /* Max set by register size */
+
+ return 0;
+}
+
+static int cxl_patrol_scrub_get_scrub_cycle(struct device *dev, void *drv_data,
+ u32 *scrub_cycle_secs)
+{
+ struct cxl_patrol_scrub_context *ctx = drv_data;
+ u8 cap, flags;
+ u16 cycle;
+ int ret;
+
+ ret = cxl_scrub_get_attrbs(ctx, &cap, &cycle, &flags, NULL);
+ if (ret)
+ return ret;
+
+ *scrub_cycle_secs = CXL_GET_SCRUB_CYCLE(cycle) * 3600;
+
+ return 0;
+}
+
+static int cxl_patrol_scrub_set_scrub_cycle(struct device *dev, void *drv_data,
+ u32 scrub_cycle_secs)
+{
+ struct cxl_patrol_scrub_context *ctx = drv_data;
+ u8 scrub_cycle_hours = scrub_cycle_secs / 3600;
+ u8 cap, wr_cycle, flags, min_cycle;
+ u16 rd_cycle;
+ int ret;
+
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+
+ ret = cxl_scrub_get_attrbs(ctx, &cap, &rd_cycle, &flags, &min_cycle);
+ if (ret)
+ return ret;
+
+ if (!CXL_GET_SCRUB_CYCLE_CHANGEABLE(cap))
+ return -EOPNOTSUPP;
+
+ if (scrub_cycle_hours < min_cycle) {
+ dev_dbg(dev, "Invalid CXL patrol scrub cycle(%d) to set\n",
+ scrub_cycle_hours);
+ dev_dbg(dev,
+ "Minimum supported CXL patrol scrub cycle in hour %d\n",
+ min_cycle);
+ return -EINVAL;
+ }
+ wr_cycle = CXL_SET_SCRUB_CYCLE(scrub_cycle_hours);
+
+ return cxl_scrub_set_attrbs(dev, ctx, wr_cycle, flags);
+}
+
+static const struct edac_scrub_ops cxl_ps_scrub_ops = {
+ .get_enabled_bg = cxl_patrol_scrub_get_enabled_bg,
+ .set_enabled_bg = cxl_patrol_scrub_set_enabled_bg,
+ .get_min_cycle = cxl_patrol_scrub_get_min_scrub_cycle,
+ .get_max_cycle = cxl_patrol_scrub_get_max_scrub_cycle,
+ .get_cycle_duration = cxl_patrol_scrub_get_scrub_cycle,
+ .set_cycle_duration = cxl_patrol_scrub_set_scrub_cycle,
+};
+
+static int cxl_memdev_scrub_init(struct cxl_memdev *cxlmd,
+ struct edac_dev_feature *ras_feature,
+ u8 scrub_inst)
+{
+ struct cxl_patrol_scrub_context *cxl_ps_ctx;
+ struct cxl_feat_entry *feat_entry;
+ u8 cap, flags;
+ u16 cycle;
+ int rc;
+
+ feat_entry = cxl_feature_info(to_cxlfs(cxlmd->cxlds),
+ &CXL_FEAT_PATROL_SCRUB_UUID);
+ if (IS_ERR(feat_entry))
+ return -EOPNOTSUPP;
+
+ if (!(le32_to_cpu(feat_entry->flags) & CXL_FEATURE_F_CHANGEABLE))
+ return -EOPNOTSUPP;
+
+ cxl_ps_ctx = devm_kzalloc(&cxlmd->dev, sizeof(*cxl_ps_ctx), GFP_KERNEL);
+ if (!cxl_ps_ctx)
+ return -ENOMEM;
+
+ *cxl_ps_ctx = (struct cxl_patrol_scrub_context){
+ .get_feat_size = le16_to_cpu(feat_entry->get_feat_size),
+ .set_feat_size = le16_to_cpu(feat_entry->set_feat_size),
+ .get_version = feat_entry->get_feat_ver,
+ .set_version = feat_entry->set_feat_ver,
+ .effects = le16_to_cpu(feat_entry->effects),
+ .instance = scrub_inst,
+ .cxlmd = cxlmd,
+ };
+
+ rc = cxl_mem_scrub_get_attrbs(&cxlmd->cxlds->cxl_mbox, &cap, &cycle,
+ &flags, NULL);
+ if (rc)
+ return rc;
+
+ cxlmd->scrub_cycle = CXL_GET_SCRUB_CYCLE(cycle);
+ cxlmd->scrub_region_id = CXL_SCRUB_NO_REGION;
+
+ ras_feature->ft_type = RAS_FEAT_SCRUB;
+ ras_feature->instance = cxl_ps_ctx->instance;
+ ras_feature->scrub_ops = &cxl_ps_scrub_ops;
+ ras_feature->ctx = cxl_ps_ctx;
+
+ return 0;
+}
+
+static int cxl_region_scrub_init(struct cxl_region *cxlr,
+ struct edac_dev_feature *ras_feature,
+ u8 scrub_inst)
+{
+ struct cxl_patrol_scrub_context *cxl_ps_ctx;
+ struct cxl_region_params *p = &cxlr->params;
+ struct cxl_feat_entry *feat_entry = NULL;
+ struct cxl_memdev *cxlmd;
+ u8 cap, flags;
+ u16 cycle;
+ int i, rc;
+
+ /*
+ * The cxl_region_rwsem must be held if the code below is used in a context
+ * other than when the region is in the probe state, as shown here.
+ */
+ for (i = 0; i < p->nr_targets; i++) {
+ struct cxl_endpoint_decoder *cxled = p->targets[i];
+
+ cxlmd = cxled_to_memdev(cxled);
+ feat_entry = cxl_feature_info(to_cxlfs(cxlmd->cxlds),
+ &CXL_FEAT_PATROL_SCRUB_UUID);
+ if (IS_ERR(feat_entry))
+ return -EOPNOTSUPP;
+
+ if (!(le32_to_cpu(feat_entry->flags) &
+ CXL_FEATURE_F_CHANGEABLE))
+ return -EOPNOTSUPP;
+
+ rc = cxl_mem_scrub_get_attrbs(&cxlmd->cxlds->cxl_mbox, &cap,
+ &cycle, &flags, NULL);
+ if (rc)
+ return rc;
+
+ cxlmd->scrub_cycle = CXL_GET_SCRUB_CYCLE(cycle);
+ cxlmd->scrub_region_id = CXL_SCRUB_NO_REGION;
+ }
+
+ cxl_ps_ctx = devm_kzalloc(&cxlr->dev, sizeof(*cxl_ps_ctx), GFP_KERNEL);
+ if (!cxl_ps_ctx)
+ return -ENOMEM;
+
+ *cxl_ps_ctx = (struct cxl_patrol_scrub_context){
+ .get_feat_size = le16_to_cpu(feat_entry->get_feat_size),
+ .set_feat_size = le16_to_cpu(feat_entry->set_feat_size),
+ .get_version = feat_entry->get_feat_ver,
+ .set_version = feat_entry->set_feat_ver,
+ .effects = le16_to_cpu(feat_entry->effects),
+ .instance = scrub_inst,
+ .cxlr = cxlr,
+ };
+
+ ras_feature->ft_type = RAS_FEAT_SCRUB;
+ ras_feature->instance = cxl_ps_ctx->instance;
+ ras_feature->scrub_ops = &cxl_ps_scrub_ops;
+ ras_feature->ctx = cxl_ps_ctx;
+
+ return 0;
+}
+
+struct cxl_ecs_context {
+ u16 num_media_frus;
+ u16 get_feat_size;
+ u16 set_feat_size;
+ u8 get_version;
+ u8 set_version;
+ u16 effects;
+ struct cxl_memdev *cxlmd;
+};
+
+/*
+ * See CXL spec rev 3.2 @8.2.10.9.11.2 Table 8-225 DDR5 ECS Control Feature
+ * Readable Attributes.
+ */
+struct cxl_ecs_fru_rd_attrbs {
+ u8 ecs_cap;
+ __le16 ecs_config;
+ u8 ecs_flags;
+} __packed;
+
+struct cxl_ecs_rd_attrbs {
+ u8 ecs_log_cap;
+ struct cxl_ecs_fru_rd_attrbs fru_attrbs[];
+} __packed;
+
+/*
+ * See CXL spec rev 3.2 @8.2.10.9.11.2 Table 8-226 DDR5 ECS Control Feature
+ * Writable Attributes.
+ */
+struct cxl_ecs_fru_wr_attrbs {
+ __le16 ecs_config;
+} __packed;
+
+struct cxl_ecs_wr_attrbs {
+ u8 ecs_log_cap;
+ struct cxl_ecs_fru_wr_attrbs fru_attrbs[];
+} __packed;
+
+#define CXL_ECS_LOG_ENTRY_TYPE_MASK GENMASK(1, 0)
+#define CXL_ECS_REALTIME_REPORT_CAP_MASK BIT(0)
+#define CXL_ECS_THRESHOLD_COUNT_MASK GENMASK(2, 0)
+#define CXL_ECS_COUNT_MODE_MASK BIT(3)
+#define CXL_ECS_RESET_COUNTER_MASK BIT(4)
+#define CXL_ECS_RESET_COUNTER 1
+
+enum {
+ ECS_THRESHOLD_256 = 256,
+ ECS_THRESHOLD_1024 = 1024,
+ ECS_THRESHOLD_4096 = 4096,
+};
+
+enum {
+ ECS_THRESHOLD_IDX_256 = 3,
+ ECS_THRESHOLD_IDX_1024 = 4,
+ ECS_THRESHOLD_IDX_4096 = 5,
+};
+
+static const u16 ecs_supp_threshold[] = {
+ [ECS_THRESHOLD_IDX_256] = 256,
+ [ECS_THRESHOLD_IDX_1024] = 1024,
+ [ECS_THRESHOLD_IDX_4096] = 4096,
+};
+
+enum {
+ ECS_LOG_ENTRY_TYPE_DRAM = 0x0,
+ ECS_LOG_ENTRY_TYPE_MEM_MEDIA_FRU = 0x1,
+};
+
+enum cxl_ecs_count_mode {
+ ECS_MODE_COUNTS_ROWS = 0,
+ ECS_MODE_COUNTS_CODEWORDS = 1,
+};
+
+static int cxl_mem_ecs_get_attrbs(struct device *dev,
+ struct cxl_ecs_context *cxl_ecs_ctx,
+ int fru_id, u8 *log_cap, u16 *config)
+{
+ struct cxl_memdev *cxlmd = cxl_ecs_ctx->cxlmd;
+ struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox;
+ struct cxl_ecs_fru_rd_attrbs *fru_rd_attrbs;
+ size_t rd_data_size;
+ size_t data_size;
+
+ rd_data_size = cxl_ecs_ctx->get_feat_size;
+
+ struct cxl_ecs_rd_attrbs *rd_attrbs __free(kvfree) =
+ kvzalloc(rd_data_size, GFP_KERNEL);
+ if (!rd_attrbs)
+ return -ENOMEM;
+
+ data_size = cxl_get_feature(cxl_mbox, &CXL_FEAT_ECS_UUID,
+ CXL_GET_FEAT_SEL_CURRENT_VALUE, rd_attrbs,
+ rd_data_size, 0, NULL);
+ if (!data_size)
+ return -EIO;
+
+ fru_rd_attrbs = rd_attrbs->fru_attrbs;
+ *log_cap = rd_attrbs->ecs_log_cap;
+ *config = le16_to_cpu(fru_rd_attrbs[fru_id].ecs_config);
+
+ return 0;
+}
+
+static int cxl_mem_ecs_set_attrbs(struct device *dev,
+ struct cxl_ecs_context *cxl_ecs_ctx,
+ int fru_id, u8 log_cap, u16 config)
+{
+ struct cxl_memdev *cxlmd = cxl_ecs_ctx->cxlmd;
+ struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox;
+ struct cxl_ecs_fru_rd_attrbs *fru_rd_attrbs;
+ struct cxl_ecs_fru_wr_attrbs *fru_wr_attrbs;
+ size_t rd_data_size, wr_data_size;
+ u16 num_media_frus, count;
+ size_t data_size;
+
+ num_media_frus = cxl_ecs_ctx->num_media_frus;
+ rd_data_size = cxl_ecs_ctx->get_feat_size;
+ wr_data_size = cxl_ecs_ctx->set_feat_size;
+ struct cxl_ecs_rd_attrbs *rd_attrbs __free(kvfree) =
+ kvzalloc(rd_data_size, GFP_KERNEL);
+ if (!rd_attrbs)
+ return -ENOMEM;
+
+ data_size = cxl_get_feature(cxl_mbox, &CXL_FEAT_ECS_UUID,
+ CXL_GET_FEAT_SEL_CURRENT_VALUE, rd_attrbs,
+ rd_data_size, 0, NULL);
+ if (!data_size)
+ return -EIO;
+
+ struct cxl_ecs_wr_attrbs *wr_attrbs __free(kvfree) =
+ kvzalloc(wr_data_size, GFP_KERNEL);
+ if (!wr_attrbs)
+ return -ENOMEM;
+
+ /*
+ * Fill writable attributes from the current attributes read
+ * for all the media FRUs.
+ */
+ fru_rd_attrbs = rd_attrbs->fru_attrbs;
+ fru_wr_attrbs = wr_attrbs->fru_attrbs;
+ wr_attrbs->ecs_log_cap = log_cap;
+ for (count = 0; count < num_media_frus; count++)
+ fru_wr_attrbs[count].ecs_config =
+ fru_rd_attrbs[count].ecs_config;
+
+ fru_wr_attrbs[fru_id].ecs_config = cpu_to_le16(config);
+
+ return cxl_set_feature(cxl_mbox, &CXL_FEAT_ECS_UUID,
+ cxl_ecs_ctx->set_version, wr_attrbs,
+ wr_data_size,
+ CXL_SET_FEAT_FLAG_DATA_SAVED_ACROSS_RESET,
+ 0, NULL);
+}
+
+static u8 cxl_get_ecs_log_entry_type(u8 log_cap, u16 config)
+{
+ return FIELD_GET(CXL_ECS_LOG_ENTRY_TYPE_MASK, log_cap);
+}
+
+static u16 cxl_get_ecs_threshold(u8 log_cap, u16 config)
+{
+ u8 index = FIELD_GET(CXL_ECS_THRESHOLD_COUNT_MASK, config);
+
+ return ecs_supp_threshold[index];
+}
+
+static u8 cxl_get_ecs_count_mode(u8 log_cap, u16 config)
+{
+ return FIELD_GET(CXL_ECS_COUNT_MODE_MASK, config);
+}
+
+#define CXL_ECS_GET_ATTR(attrb) \
+ static int cxl_ecs_get_##attrb(struct device *dev, void *drv_data, \
+ int fru_id, u32 *val) \
+ { \
+ struct cxl_ecs_context *ctx = drv_data; \
+ u8 log_cap; \
+ u16 config; \
+ int ret; \
+ \
+ ret = cxl_mem_ecs_get_attrbs(dev, ctx, fru_id, &log_cap, \
+ &config); \
+ if (ret) \
+ return ret; \
+ \
+ *val = cxl_get_ecs_##attrb(log_cap, config); \
+ \
+ return 0; \
+ }
+
+CXL_ECS_GET_ATTR(log_entry_type)
+CXL_ECS_GET_ATTR(count_mode)
+CXL_ECS_GET_ATTR(threshold)
+
+static int cxl_set_ecs_log_entry_type(struct device *dev, u8 *log_cap,
+ u16 *config, u32 val)
+{
+ if (val != ECS_LOG_ENTRY_TYPE_DRAM &&
+ val != ECS_LOG_ENTRY_TYPE_MEM_MEDIA_FRU)
+ return -EINVAL;
+
+ *log_cap = FIELD_PREP(CXL_ECS_LOG_ENTRY_TYPE_MASK, val);
+
+ return 0;
+}
+
+static int cxl_set_ecs_threshold(struct device *dev, u8 *log_cap, u16 *config,
+ u32 val)
+{
+ *config &= ~CXL_ECS_THRESHOLD_COUNT_MASK;
+
+ switch (val) {
+ case ECS_THRESHOLD_256:
+ *config |= FIELD_PREP(CXL_ECS_THRESHOLD_COUNT_MASK,
+ ECS_THRESHOLD_IDX_256);
+ break;
+ case ECS_THRESHOLD_1024:
+ *config |= FIELD_PREP(CXL_ECS_THRESHOLD_COUNT_MASK,
+ ECS_THRESHOLD_IDX_1024);
+ break;
+ case ECS_THRESHOLD_4096:
+ *config |= FIELD_PREP(CXL_ECS_THRESHOLD_COUNT_MASK,
+ ECS_THRESHOLD_IDX_4096);
+ break;
+ default:
+ dev_dbg(dev, "Invalid CXL ECS threshold count(%u) to set\n",
+ val);
+ dev_dbg(dev, "Supported ECS threshold counts: %u, %u, %u\n",
+ ECS_THRESHOLD_256, ECS_THRESHOLD_1024,
+ ECS_THRESHOLD_4096);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cxl_set_ecs_count_mode(struct device *dev, u8 *log_cap, u16 *config,
+ u32 val)
+{
+ if (val != ECS_MODE_COUNTS_ROWS && val != ECS_MODE_COUNTS_CODEWORDS) {
+ dev_dbg(dev, "Invalid CXL ECS scrub mode(%d) to set\n", val);
+ dev_dbg(dev,
+ "Supported ECS Modes: 0: ECS counts rows with errors,"
+ " 1: ECS counts codewords with errors\n");
+ return -EINVAL;
+ }
+
+ *config &= ~CXL_ECS_COUNT_MODE_MASK;
+ *config |= FIELD_PREP(CXL_ECS_COUNT_MODE_MASK, val);
+
+ return 0;
+}
+
+static int cxl_set_ecs_reset_counter(struct device *dev, u8 *log_cap,
+ u16 *config, u32 val)
+{
+ if (val != CXL_ECS_RESET_COUNTER)
+ return -EINVAL;
+
+ *config &= ~CXL_ECS_RESET_COUNTER_MASK;
+ *config |= FIELD_PREP(CXL_ECS_RESET_COUNTER_MASK, val);
+
+ return 0;
+}
+
+#define CXL_ECS_SET_ATTR(attrb) \
+ static int cxl_ecs_set_##attrb(struct device *dev, void *drv_data, \
+ int fru_id, u32 val) \
+ { \
+ struct cxl_ecs_context *ctx = drv_data; \
+ u8 log_cap; \
+ u16 config; \
+ int ret; \
+ \
+ if (!capable(CAP_SYS_RAWIO)) \
+ return -EPERM; \
+ \
+ ret = cxl_mem_ecs_get_attrbs(dev, ctx, fru_id, &log_cap, \
+ &config); \
+ if (ret) \
+ return ret; \
+ \
+ ret = cxl_set_ecs_##attrb(dev, &log_cap, &config, val); \
+ if (ret) \
+ return ret; \
+ \
+ return cxl_mem_ecs_set_attrbs(dev, ctx, fru_id, log_cap, \
+ config); \
+ }
+CXL_ECS_SET_ATTR(log_entry_type)
+CXL_ECS_SET_ATTR(count_mode)
+CXL_ECS_SET_ATTR(reset_counter)
+CXL_ECS_SET_ATTR(threshold)
+
+static const struct edac_ecs_ops cxl_ecs_ops = {
+ .get_log_entry_type = cxl_ecs_get_log_entry_type,
+ .set_log_entry_type = cxl_ecs_set_log_entry_type,
+ .get_mode = cxl_ecs_get_count_mode,
+ .set_mode = cxl_ecs_set_count_mode,
+ .reset = cxl_ecs_set_reset_counter,
+ .get_threshold = cxl_ecs_get_threshold,
+ .set_threshold = cxl_ecs_set_threshold,
+};
+
+static int cxl_memdev_ecs_init(struct cxl_memdev *cxlmd,
+ struct edac_dev_feature *ras_feature)
+{
+ struct cxl_ecs_context *cxl_ecs_ctx;
+ struct cxl_feat_entry *feat_entry;
+ int num_media_frus;
+
+ feat_entry =
+ cxl_feature_info(to_cxlfs(cxlmd->cxlds), &CXL_FEAT_ECS_UUID);
+ if (IS_ERR(feat_entry))
+ return -EOPNOTSUPP;
+
+ if (!(le32_to_cpu(feat_entry->flags) & CXL_FEATURE_F_CHANGEABLE))
+ return -EOPNOTSUPP;
+
+ num_media_frus = (le16_to_cpu(feat_entry->get_feat_size) -
+ sizeof(struct cxl_ecs_rd_attrbs)) /
+ sizeof(struct cxl_ecs_fru_rd_attrbs);
+ if (!num_media_frus)
+ return -EOPNOTSUPP;
+
+ cxl_ecs_ctx =
+ devm_kzalloc(&cxlmd->dev, sizeof(*cxl_ecs_ctx), GFP_KERNEL);
+ if (!cxl_ecs_ctx)
+ return -ENOMEM;
+
+ *cxl_ecs_ctx = (struct cxl_ecs_context){
+ .get_feat_size = le16_to_cpu(feat_entry->get_feat_size),
+ .set_feat_size = le16_to_cpu(feat_entry->set_feat_size),
+ .get_version = feat_entry->get_feat_ver,
+ .set_version = feat_entry->set_feat_ver,
+ .effects = le16_to_cpu(feat_entry->effects),
+ .num_media_frus = num_media_frus,
+ .cxlmd = cxlmd,
+ };
+
+ ras_feature->ft_type = RAS_FEAT_ECS;
+ ras_feature->ecs_ops = &cxl_ecs_ops;
+ ras_feature->ctx = cxl_ecs_ctx;
+ ras_feature->ecs_info.num_media_frus = num_media_frus;
+
+ return 0;
+}
+
+/*
+ * Perform Maintenance CXL 3.2 Spec 8.2.10.7.1
+ */
+
+/*
+ * Perform Maintenance input payload
+ * CXL rev 3.2 section 8.2.10.7.1 Table 8-117
+ */
+struct cxl_mbox_maintenance_hdr {
+ u8 op_class;
+ u8 op_subclass;
+} __packed;
+
+static int cxl_perform_maintenance(struct cxl_mailbox *cxl_mbox, u8 class,
+ u8 subclass, void *data_in,
+ size_t data_in_size)
+{
+ struct cxl_memdev_maintenance_pi {
+ struct cxl_mbox_maintenance_hdr hdr;
+ u8 data[];
+ } __packed;
+ struct cxl_mbox_cmd mbox_cmd;
+ size_t hdr_size;
+
+ struct cxl_memdev_maintenance_pi *pi __free(kvfree) =
+ kvzalloc(cxl_mbox->payload_size, GFP_KERNEL);
+ if (!pi)
+ return -ENOMEM;
+
+ pi->hdr.op_class = class;
+ pi->hdr.op_subclass = subclass;
+ hdr_size = sizeof(pi->hdr);
+ /*
+ * Check minimum mbox payload size is available for
+ * the maintenance data transfer.
+ */
+ if (hdr_size + data_in_size > cxl_mbox->payload_size)
+ return -ENOMEM;
+
+ memcpy(pi->data, data_in, data_in_size);
+ mbox_cmd = (struct cxl_mbox_cmd){
+ .opcode = CXL_MBOX_OP_DO_MAINTENANCE,
+ .size_in = hdr_size + data_in_size,
+ .payload_in = pi,
+ };
+
+ return cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
+}
+
+/*
+ * Support for finding a memory operation attributes
+ * are from the current boot or not.
+ */
+
+struct cxl_mem_err_rec {
+ struct xarray rec_gen_media;
+ struct xarray rec_dram;
+};
+
+enum cxl_mem_repair_type {
+ CXL_PPR,
+ CXL_CACHELINE_SPARING,
+ CXL_ROW_SPARING,
+ CXL_BANK_SPARING,
+ CXL_RANK_SPARING,
+ CXL_REPAIR_MAX,
+};
+
+/**
+ * struct cxl_mem_repair_attrbs - CXL memory repair attributes
+ * @dpa: DPA of memory to repair
+ * @nibble_mask: nibble mask, identifies one or more nibbles on the memory bus
+ * @row: row of memory to repair
+ * @column: column of memory to repair
+ * @channel: channel of memory to repair
+ * @sub_channel: sub channel of memory to repair
+ * @rank: rank of memory to repair
+ * @bank_group: bank group of memory to repair
+ * @bank: bank of memory to repair
+ * @repair_type: repair type. For eg. PPR, memory sparing etc.
+ */
+struct cxl_mem_repair_attrbs {
+ u64 dpa;
+ u32 nibble_mask;
+ u32 row;
+ u16 column;
+ u8 channel;
+ u8 sub_channel;
+ u8 rank;
+ u8 bank_group;
+ u8 bank;
+ enum cxl_mem_repair_type repair_type;
+};
+
+static struct cxl_event_gen_media *
+cxl_find_rec_gen_media(struct cxl_memdev *cxlmd,
+ struct cxl_mem_repair_attrbs *attrbs)
+{
+ struct cxl_mem_err_rec *array_rec = cxlmd->err_rec_array;
+ struct cxl_event_gen_media *rec;
+
+ if (!array_rec)
+ return NULL;
+
+ rec = xa_load(&array_rec->rec_gen_media, attrbs->dpa);
+ if (!rec)
+ return NULL;
+
+ if (attrbs->repair_type == CXL_PPR)
+ return rec;
+
+ return NULL;
+}
+
+static struct cxl_event_dram *
+cxl_find_rec_dram(struct cxl_memdev *cxlmd,
+ struct cxl_mem_repair_attrbs *attrbs)
+{
+ struct cxl_mem_err_rec *array_rec = cxlmd->err_rec_array;
+ struct cxl_event_dram *rec;
+ u16 validity_flags;
+
+ if (!array_rec)
+ return NULL;
+
+ rec = xa_load(&array_rec->rec_dram, attrbs->dpa);
+ if (!rec)
+ return NULL;
+
+ validity_flags = get_unaligned_le16(rec->media_hdr.validity_flags);
+ if (!(validity_flags & CXL_DER_VALID_CHANNEL) ||
+ !(validity_flags & CXL_DER_VALID_RANK))
+ return NULL;
+
+ switch (attrbs->repair_type) {
+ case CXL_PPR:
+ if (!(validity_flags & CXL_DER_VALID_NIBBLE) ||
+ get_unaligned_le24(rec->nibble_mask) == attrbs->nibble_mask)
+ return rec;
+ break;
+ case CXL_CACHELINE_SPARING:
+ if (!(validity_flags & CXL_DER_VALID_BANK_GROUP) ||
+ !(validity_flags & CXL_DER_VALID_BANK) ||
+ !(validity_flags & CXL_DER_VALID_ROW) ||
+ !(validity_flags & CXL_DER_VALID_COLUMN))
+ return NULL;
+
+ if (rec->media_hdr.channel == attrbs->channel &&
+ rec->media_hdr.rank == attrbs->rank &&
+ rec->bank_group == attrbs->bank_group &&
+ rec->bank == attrbs->bank &&
+ get_unaligned_le24(rec->row) == attrbs->row &&
+ get_unaligned_le16(rec->column) == attrbs->column &&
+ (!(validity_flags & CXL_DER_VALID_NIBBLE) ||
+ get_unaligned_le24(rec->nibble_mask) ==
+ attrbs->nibble_mask) &&
+ (!(validity_flags & CXL_DER_VALID_SUB_CHANNEL) ||
+ rec->sub_channel == attrbs->sub_channel))
+ return rec;
+ break;
+ case CXL_ROW_SPARING:
+ if (!(validity_flags & CXL_DER_VALID_BANK_GROUP) ||
+ !(validity_flags & CXL_DER_VALID_BANK) ||
+ !(validity_flags & CXL_DER_VALID_ROW))
+ return NULL;
+
+ if (rec->media_hdr.channel == attrbs->channel &&
+ rec->media_hdr.rank == attrbs->rank &&
+ rec->bank_group == attrbs->bank_group &&
+ rec->bank == attrbs->bank &&
+ get_unaligned_le24(rec->row) == attrbs->row &&
+ (!(validity_flags & CXL_DER_VALID_NIBBLE) ||
+ get_unaligned_le24(rec->nibble_mask) ==
+ attrbs->nibble_mask))
+ return rec;
+ break;
+ case CXL_BANK_SPARING:
+ if (!(validity_flags & CXL_DER_VALID_BANK_GROUP) ||
+ !(validity_flags & CXL_DER_VALID_BANK))
+ return NULL;
+
+ if (rec->media_hdr.channel == attrbs->channel &&
+ rec->media_hdr.rank == attrbs->rank &&
+ rec->bank_group == attrbs->bank_group &&
+ rec->bank == attrbs->bank &&
+ (!(validity_flags & CXL_DER_VALID_NIBBLE) ||
+ get_unaligned_le24(rec->nibble_mask) ==
+ attrbs->nibble_mask))
+ return rec;
+ break;
+ case CXL_RANK_SPARING:
+ if (rec->media_hdr.channel == attrbs->channel &&
+ rec->media_hdr.rank == attrbs->rank &&
+ (!(validity_flags & CXL_DER_VALID_NIBBLE) ||
+ get_unaligned_le24(rec->nibble_mask) ==
+ attrbs->nibble_mask))
+ return rec;
+ break;
+ default:
+ return NULL;
+ }
+
+ return NULL;
+}
+
+#define CXL_MAX_STORAGE_DAYS 10
+#define CXL_MAX_STORAGE_TIME_SECS (CXL_MAX_STORAGE_DAYS * 24 * 60 * 60)
+
+static void cxl_del_expired_gmedia_recs(struct xarray *rec_xarray,
+ struct cxl_event_gen_media *cur_rec)
+{
+ u64 cur_ts = le64_to_cpu(cur_rec->media_hdr.hdr.timestamp);
+ struct cxl_event_gen_media *rec;
+ unsigned long index;
+ u64 delta_ts_secs;
+
+ xa_for_each(rec_xarray, index, rec) {
+ delta_ts_secs = (cur_ts -
+ le64_to_cpu(rec->media_hdr.hdr.timestamp)) / 1000000000ULL;
+ if (delta_ts_secs >= CXL_MAX_STORAGE_TIME_SECS) {
+ xa_erase(rec_xarray, index);
+ kfree(rec);
+ }
+ }
+}
+
+static void cxl_del_expired_dram_recs(struct xarray *rec_xarray,
+ struct cxl_event_dram *cur_rec)
+{
+ u64 cur_ts = le64_to_cpu(cur_rec->media_hdr.hdr.timestamp);
+ struct cxl_event_dram *rec;
+ unsigned long index;
+ u64 delta_secs;
+
+ xa_for_each(rec_xarray, index, rec) {
+ delta_secs = (cur_ts -
+ le64_to_cpu(rec->media_hdr.hdr.timestamp)) / 1000000000ULL;
+ if (delta_secs >= CXL_MAX_STORAGE_TIME_SECS) {
+ xa_erase(rec_xarray, index);
+ kfree(rec);
+ }
+ }
+}
+
+#define CXL_MAX_REC_STORAGE_COUNT 200
+
+static void cxl_del_overflow_old_recs(struct xarray *rec_xarray)
+{
+ void *err_rec;
+ unsigned long index, count = 0;
+
+ xa_for_each(rec_xarray, index, err_rec)
+ count++;
+
+ if (count <= CXL_MAX_REC_STORAGE_COUNT)
+ return;
+
+ count -= CXL_MAX_REC_STORAGE_COUNT;
+ xa_for_each(rec_xarray, index, err_rec) {
+ xa_erase(rec_xarray, index);
+ kfree(err_rec);
+ count--;
+ if (!count)
+ break;
+ }
+}
+
+int cxl_store_rec_gen_media(struct cxl_memdev *cxlmd, union cxl_event *evt)
+{
+ struct cxl_mem_err_rec *array_rec = cxlmd->err_rec_array;
+ struct cxl_event_gen_media *rec;
+ void *old_rec;
+
+ if (!IS_ENABLED(CONFIG_CXL_EDAC_MEM_REPAIR) || !array_rec)
+ return 0;
+
+ rec = kmemdup(&evt->gen_media, sizeof(*rec), GFP_KERNEL);
+ if (!rec)
+ return -ENOMEM;
+
+ old_rec = xa_store(&array_rec->rec_gen_media,
+ le64_to_cpu(rec->media_hdr.phys_addr), rec,
+ GFP_KERNEL);
+ if (xa_is_err(old_rec)) {
+ kfree(rec);
+ return xa_err(old_rec);
+ }
+
+ kfree(old_rec);
+
+ cxl_del_expired_gmedia_recs(&array_rec->rec_gen_media, rec);
+ cxl_del_overflow_old_recs(&array_rec->rec_gen_media);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_store_rec_gen_media, "CXL");
+
+int cxl_store_rec_dram(struct cxl_memdev *cxlmd, union cxl_event *evt)
+{
+ struct cxl_mem_err_rec *array_rec = cxlmd->err_rec_array;
+ struct cxl_event_dram *rec;
+ void *old_rec;
+
+ if (!IS_ENABLED(CONFIG_CXL_EDAC_MEM_REPAIR) || !array_rec)
+ return 0;
+
+ rec = kmemdup(&evt->dram, sizeof(*rec), GFP_KERNEL);
+ if (!rec)
+ return -ENOMEM;
+
+ old_rec = xa_store(&array_rec->rec_dram,
+ le64_to_cpu(rec->media_hdr.phys_addr), rec,
+ GFP_KERNEL);
+ if (xa_is_err(old_rec)) {
+ kfree(rec);
+ return xa_err(old_rec);
+ }
+
+ kfree(old_rec);
+
+ cxl_del_expired_dram_recs(&array_rec->rec_dram, rec);
+ cxl_del_overflow_old_recs(&array_rec->rec_dram);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_store_rec_dram, "CXL");
+
+static bool cxl_is_memdev_memory_online(const struct cxl_memdev *cxlmd)
+{
+ struct cxl_port *port = cxlmd->endpoint;
+
+ if (port && cxl_num_decoders_committed(port))
+ return true;
+
+ return false;
+}
+
+/*
+ * CXL memory sparing control
+ */
+enum cxl_mem_sparing_granularity {
+ CXL_MEM_SPARING_CACHELINE,
+ CXL_MEM_SPARING_ROW,
+ CXL_MEM_SPARING_BANK,
+ CXL_MEM_SPARING_RANK,
+ CXL_MEM_SPARING_MAX
+};
+
+struct cxl_mem_sparing_context {
+ struct cxl_memdev *cxlmd;
+ uuid_t repair_uuid;
+ u16 get_feat_size;
+ u16 set_feat_size;
+ u16 effects;
+ u8 instance;
+ u8 get_version;
+ u8 set_version;
+ u8 op_class;
+ u8 op_subclass;
+ bool cap_safe_when_in_use;
+ bool cap_hard_sparing;
+ bool cap_soft_sparing;
+ u8 channel;
+ u8 rank;
+ u8 bank_group;
+ u32 nibble_mask;
+ u64 dpa;
+ u32 row;
+ u16 column;
+ u8 bank;
+ u8 sub_channel;
+ enum edac_mem_repair_type repair_type;
+ bool persist_mode;
+};
+
+#define CXL_SPARING_RD_CAP_SAFE_IN_USE_MASK BIT(0)
+#define CXL_SPARING_RD_CAP_HARD_SPARING_MASK BIT(1)
+#define CXL_SPARING_RD_CAP_SOFT_SPARING_MASK BIT(2)
+
+#define CXL_SPARING_WR_DEVICE_INITIATED_MASK BIT(0)
+
+#define CXL_SPARING_QUERY_RESOURCE_FLAG BIT(0)
+#define CXL_SET_HARD_SPARING_FLAG BIT(1)
+#define CXL_SPARING_SUB_CHNL_VALID_FLAG BIT(2)
+#define CXL_SPARING_NIB_MASK_VALID_FLAG BIT(3)
+
+#define CXL_GET_SPARING_SAFE_IN_USE(flags) \
+ (FIELD_GET(CXL_SPARING_RD_CAP_SAFE_IN_USE_MASK, \
+ flags) ^ 1)
+#define CXL_GET_CAP_HARD_SPARING(flags) \
+ FIELD_GET(CXL_SPARING_RD_CAP_HARD_SPARING_MASK, \
+ flags)
+#define CXL_GET_CAP_SOFT_SPARING(flags) \
+ FIELD_GET(CXL_SPARING_RD_CAP_SOFT_SPARING_MASK, \
+ flags)
+
+#define CXL_SET_SPARING_QUERY_RESOURCE(val) \
+ FIELD_PREP(CXL_SPARING_QUERY_RESOURCE_FLAG, val)
+#define CXL_SET_HARD_SPARING(val) \
+ FIELD_PREP(CXL_SET_HARD_SPARING_FLAG, val)
+#define CXL_SET_SPARING_SUB_CHNL_VALID(val) \
+ FIELD_PREP(CXL_SPARING_SUB_CHNL_VALID_FLAG, val)
+#define CXL_SET_SPARING_NIB_MASK_VALID(val) \
+ FIELD_PREP(CXL_SPARING_NIB_MASK_VALID_FLAG, val)
+
+/*
+ * See CXL spec rev 3.2 @8.2.10.7.2.3 Table 8-134 Memory Sparing Feature
+ * Readable Attributes.
+ */
+struct cxl_memdev_repair_rd_attrbs_hdr {
+ u8 max_op_latency;
+ __le16 op_cap;
+ __le16 op_mode;
+ u8 op_class;
+ u8 op_subclass;
+ u8 rsvd[9];
+} __packed;
+
+struct cxl_memdev_sparing_rd_attrbs {
+ struct cxl_memdev_repair_rd_attrbs_hdr hdr;
+ u8 rsvd;
+ __le16 restriction_flags;
+} __packed;
+
+/*
+ * See CXL spec rev 3.2 @8.2.10.7.1.4 Table 8-120 Memory Sparing Input Payload.
+ */
+struct cxl_memdev_sparing_in_payload {
+ u8 flags;
+ u8 channel;
+ u8 rank;
+ u8 nibble_mask[3];
+ u8 bank_group;
+ u8 bank;
+ u8 row[3];
+ __le16 column;
+ u8 sub_channel;
+} __packed;
+
+static int
+cxl_mem_sparing_get_attrbs(struct cxl_mem_sparing_context *cxl_sparing_ctx)
+{
+ size_t rd_data_size = sizeof(struct cxl_memdev_sparing_rd_attrbs);
+ struct cxl_memdev *cxlmd = cxl_sparing_ctx->cxlmd;
+ struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox;
+ u16 restriction_flags;
+ size_t data_size;
+ u16 return_code;
+ struct cxl_memdev_sparing_rd_attrbs *rd_attrbs __free(kfree) =
+ kzalloc(rd_data_size, GFP_KERNEL);
+ if (!rd_attrbs)
+ return -ENOMEM;
+
+ data_size = cxl_get_feature(cxl_mbox, &cxl_sparing_ctx->repair_uuid,
+ CXL_GET_FEAT_SEL_CURRENT_VALUE, rd_attrbs,
+ rd_data_size, 0, &return_code);
+ if (!data_size)
+ return -EIO;
+
+ cxl_sparing_ctx->op_class = rd_attrbs->hdr.op_class;
+ cxl_sparing_ctx->op_subclass = rd_attrbs->hdr.op_subclass;
+ restriction_flags = le16_to_cpu(rd_attrbs->restriction_flags);
+ cxl_sparing_ctx->cap_safe_when_in_use =
+ CXL_GET_SPARING_SAFE_IN_USE(restriction_flags);
+ cxl_sparing_ctx->cap_hard_sparing =
+ CXL_GET_CAP_HARD_SPARING(restriction_flags);
+ cxl_sparing_ctx->cap_soft_sparing =
+ CXL_GET_CAP_SOFT_SPARING(restriction_flags);
+
+ return 0;
+}
+
+static struct cxl_event_dram *
+cxl_mem_get_rec_dram(struct cxl_memdev *cxlmd,
+ struct cxl_mem_sparing_context *ctx)
+{
+ struct cxl_mem_repair_attrbs attrbs = { 0 };
+
+ attrbs.dpa = ctx->dpa;
+ attrbs.channel = ctx->channel;
+ attrbs.rank = ctx->rank;
+ attrbs.nibble_mask = ctx->nibble_mask;
+ switch (ctx->repair_type) {
+ case EDAC_REPAIR_CACHELINE_SPARING:
+ attrbs.repair_type = CXL_CACHELINE_SPARING;
+ attrbs.bank_group = ctx->bank_group;
+ attrbs.bank = ctx->bank;
+ attrbs.row = ctx->row;
+ attrbs.column = ctx->column;
+ attrbs.sub_channel = ctx->sub_channel;
+ break;
+ case EDAC_REPAIR_ROW_SPARING:
+ attrbs.repair_type = CXL_ROW_SPARING;
+ attrbs.bank_group = ctx->bank_group;
+ attrbs.bank = ctx->bank;
+ attrbs.row = ctx->row;
+ break;
+ case EDAC_REPAIR_BANK_SPARING:
+ attrbs.repair_type = CXL_BANK_SPARING;
+ attrbs.bank_group = ctx->bank_group;
+ attrbs.bank = ctx->bank;
+ break;
+ case EDAC_REPAIR_RANK_SPARING:
+ attrbs.repair_type = CXL_RANK_SPARING;
+ break;
+ default:
+ return NULL;
+ }
+
+ return cxl_find_rec_dram(cxlmd, &attrbs);
+}
+
+static int
+cxl_mem_perform_sparing(struct device *dev,
+ struct cxl_mem_sparing_context *cxl_sparing_ctx)
+{
+ struct cxl_memdev *cxlmd = cxl_sparing_ctx->cxlmd;
+ struct cxl_memdev_sparing_in_payload sparing_pi;
+ struct cxl_event_dram *rec = NULL;
+ u16 validity_flags = 0;
+ int ret;
+
+ ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region);
+ if ((ret = ACQUIRE_ERR(rwsem_read_intr, &region_rwsem)))
+ return ret;
+
+ ACQUIRE(rwsem_read_intr, dpa_rwsem)(&cxl_rwsem.dpa);
+ if ((ret = ACQUIRE_ERR(rwsem_read_intr, &dpa_rwsem)))
+ return ret;
+
+ if (!cxl_sparing_ctx->cap_safe_when_in_use) {
+ /* Memory to repair must be offline */
+ if (cxl_is_memdev_memory_online(cxlmd))
+ return -EBUSY;
+ } else {
+ if (cxl_is_memdev_memory_online(cxlmd)) {
+ rec = cxl_mem_get_rec_dram(cxlmd, cxl_sparing_ctx);
+ if (!rec)
+ return -EINVAL;
+
+ if (!get_unaligned_le16(rec->media_hdr.validity_flags))
+ return -EINVAL;
+ }
+ }
+
+ memset(&sparing_pi, 0, sizeof(sparing_pi));
+ sparing_pi.flags = CXL_SET_SPARING_QUERY_RESOURCE(0);
+ if (cxl_sparing_ctx->persist_mode)
+ sparing_pi.flags |= CXL_SET_HARD_SPARING(1);
+
+ if (rec)
+ validity_flags = get_unaligned_le16(rec->media_hdr.validity_flags);
+
+ switch (cxl_sparing_ctx->repair_type) {
+ case EDAC_REPAIR_CACHELINE_SPARING:
+ sparing_pi.column = cpu_to_le16(cxl_sparing_ctx->column);
+ if (!rec || (validity_flags & CXL_DER_VALID_SUB_CHANNEL)) {
+ sparing_pi.flags |= CXL_SET_SPARING_SUB_CHNL_VALID(1);
+ sparing_pi.sub_channel = cxl_sparing_ctx->sub_channel;
+ }
+ fallthrough;
+ case EDAC_REPAIR_ROW_SPARING:
+ put_unaligned_le24(cxl_sparing_ctx->row, sparing_pi.row);
+ fallthrough;
+ case EDAC_REPAIR_BANK_SPARING:
+ sparing_pi.bank_group = cxl_sparing_ctx->bank_group;
+ sparing_pi.bank = cxl_sparing_ctx->bank;
+ fallthrough;
+ case EDAC_REPAIR_RANK_SPARING:
+ sparing_pi.rank = cxl_sparing_ctx->rank;
+ fallthrough;
+ default:
+ sparing_pi.channel = cxl_sparing_ctx->channel;
+ if ((rec && (validity_flags & CXL_DER_VALID_NIBBLE)) ||
+ (!rec && (!cxl_sparing_ctx->nibble_mask ||
+ (cxl_sparing_ctx->nibble_mask & 0xFFFFFF)))) {
+ sparing_pi.flags |= CXL_SET_SPARING_NIB_MASK_VALID(1);
+ put_unaligned_le24(cxl_sparing_ctx->nibble_mask,
+ sparing_pi.nibble_mask);
+ }
+ break;
+ }
+
+ return cxl_perform_maintenance(&cxlmd->cxlds->cxl_mbox,
+ cxl_sparing_ctx->op_class,
+ cxl_sparing_ctx->op_subclass,
+ &sparing_pi, sizeof(sparing_pi));
+}
+
+static int cxl_mem_sparing_get_repair_type(struct device *dev, void *drv_data,
+ const char **repair_type)
+{
+ struct cxl_mem_sparing_context *ctx = drv_data;
+
+ switch (ctx->repair_type) {
+ case EDAC_REPAIR_CACHELINE_SPARING:
+ case EDAC_REPAIR_ROW_SPARING:
+ case EDAC_REPAIR_BANK_SPARING:
+ case EDAC_REPAIR_RANK_SPARING:
+ *repair_type = edac_repair_type[ctx->repair_type];
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#define CXL_SPARING_GET_ATTR(attrb, data_type) \
+ static int cxl_mem_sparing_get_##attrb( \
+ struct device *dev, void *drv_data, data_type *val) \
+ { \
+ struct cxl_mem_sparing_context *ctx = drv_data; \
+ \
+ *val = ctx->attrb; \
+ \
+ return 0; \
+ }
+CXL_SPARING_GET_ATTR(persist_mode, bool)
+CXL_SPARING_GET_ATTR(dpa, u64)
+CXL_SPARING_GET_ATTR(nibble_mask, u32)
+CXL_SPARING_GET_ATTR(bank_group, u32)
+CXL_SPARING_GET_ATTR(bank, u32)
+CXL_SPARING_GET_ATTR(rank, u32)
+CXL_SPARING_GET_ATTR(row, u32)
+CXL_SPARING_GET_ATTR(column, u32)
+CXL_SPARING_GET_ATTR(channel, u32)
+CXL_SPARING_GET_ATTR(sub_channel, u32)
+
+#define CXL_SPARING_SET_ATTR(attrb, data_type) \
+ static int cxl_mem_sparing_set_##attrb(struct device *dev, \
+ void *drv_data, data_type val) \
+ { \
+ struct cxl_mem_sparing_context *ctx = drv_data; \
+ \
+ ctx->attrb = val; \
+ \
+ return 0; \
+ }
+CXL_SPARING_SET_ATTR(nibble_mask, u32)
+CXL_SPARING_SET_ATTR(bank_group, u32)
+CXL_SPARING_SET_ATTR(bank, u32)
+CXL_SPARING_SET_ATTR(rank, u32)
+CXL_SPARING_SET_ATTR(row, u32)
+CXL_SPARING_SET_ATTR(column, u32)
+CXL_SPARING_SET_ATTR(channel, u32)
+CXL_SPARING_SET_ATTR(sub_channel, u32)
+
+static int cxl_mem_sparing_set_persist_mode(struct device *dev, void *drv_data,
+ bool persist_mode)
+{
+ struct cxl_mem_sparing_context *ctx = drv_data;
+
+ if ((persist_mode && ctx->cap_hard_sparing) ||
+ (!persist_mode && ctx->cap_soft_sparing))
+ ctx->persist_mode = persist_mode;
+ else
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int cxl_get_mem_sparing_safe_when_in_use(struct device *dev,
+ void *drv_data, bool *safe)
+{
+ struct cxl_mem_sparing_context *ctx = drv_data;
+
+ *safe = ctx->cap_safe_when_in_use;
+
+ return 0;
+}
+
+static int cxl_mem_sparing_get_min_dpa(struct device *dev, void *drv_data,
+ u64 *min_dpa)
+{
+ struct cxl_mem_sparing_context *ctx = drv_data;
+ struct cxl_memdev *cxlmd = ctx->cxlmd;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+
+ *min_dpa = cxlds->dpa_res.start;
+
+ return 0;
+}
+
+static int cxl_mem_sparing_get_max_dpa(struct device *dev, void *drv_data,
+ u64 *max_dpa)
+{
+ struct cxl_mem_sparing_context *ctx = drv_data;
+ struct cxl_memdev *cxlmd = ctx->cxlmd;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+
+ *max_dpa = cxlds->dpa_res.end;
+
+ return 0;
+}
+
+static int cxl_mem_sparing_set_dpa(struct device *dev, void *drv_data, u64 dpa)
+{
+ struct cxl_mem_sparing_context *ctx = drv_data;
+ struct cxl_memdev *cxlmd = ctx->cxlmd;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+
+ if (!cxl_resource_contains_addr(&cxlds->dpa_res, dpa))
+ return -EINVAL;
+
+ ctx->dpa = dpa;
+
+ return 0;
+}
+
+static int cxl_do_mem_sparing(struct device *dev, void *drv_data, u32 val)
+{
+ struct cxl_mem_sparing_context *ctx = drv_data;
+
+ if (val != EDAC_DO_MEM_REPAIR)
+ return -EINVAL;
+
+ return cxl_mem_perform_sparing(dev, ctx);
+}
+
+#define RANK_OPS \
+ .get_repair_type = cxl_mem_sparing_get_repair_type, \
+ .get_persist_mode = cxl_mem_sparing_get_persist_mode, \
+ .set_persist_mode = cxl_mem_sparing_set_persist_mode, \
+ .get_repair_safe_when_in_use = cxl_get_mem_sparing_safe_when_in_use, \
+ .get_min_dpa = cxl_mem_sparing_get_min_dpa, \
+ .get_max_dpa = cxl_mem_sparing_get_max_dpa, \
+ .get_dpa = cxl_mem_sparing_get_dpa, \
+ .set_dpa = cxl_mem_sparing_set_dpa, \
+ .get_nibble_mask = cxl_mem_sparing_get_nibble_mask, \
+ .set_nibble_mask = cxl_mem_sparing_set_nibble_mask, \
+ .get_rank = cxl_mem_sparing_get_rank, \
+ .set_rank = cxl_mem_sparing_set_rank, \
+ .get_channel = cxl_mem_sparing_get_channel, \
+ .set_channel = cxl_mem_sparing_set_channel, \
+ .do_repair = cxl_do_mem_sparing
+
+#define BANK_OPS \
+ RANK_OPS, .get_bank_group = cxl_mem_sparing_get_bank_group, \
+ .set_bank_group = cxl_mem_sparing_set_bank_group, \
+ .get_bank = cxl_mem_sparing_get_bank, \
+ .set_bank = cxl_mem_sparing_set_bank
+
+#define ROW_OPS \
+ BANK_OPS, .get_row = cxl_mem_sparing_get_row, \
+ .set_row = cxl_mem_sparing_set_row
+
+#define CACHELINE_OPS \
+ ROW_OPS, .get_column = cxl_mem_sparing_get_column, \
+ .set_column = cxl_mem_sparing_set_column, \
+ .get_sub_channel = cxl_mem_sparing_get_sub_channel, \
+ .set_sub_channel = cxl_mem_sparing_set_sub_channel
+
+static const struct edac_mem_repair_ops cxl_rank_sparing_ops = {
+ RANK_OPS,
+};
+
+static const struct edac_mem_repair_ops cxl_bank_sparing_ops = {
+ BANK_OPS,
+};
+
+static const struct edac_mem_repair_ops cxl_row_sparing_ops = {
+ ROW_OPS,
+};
+
+static const struct edac_mem_repair_ops cxl_cacheline_sparing_ops = {
+ CACHELINE_OPS,
+};
+
+struct cxl_mem_sparing_desc {
+ const uuid_t repair_uuid;
+ enum edac_mem_repair_type repair_type;
+ const struct edac_mem_repair_ops *repair_ops;
+};
+
+static const struct cxl_mem_sparing_desc mem_sparing_desc[] = {
+ {
+ .repair_uuid = CXL_FEAT_CACHELINE_SPARING_UUID,
+ .repair_type = EDAC_REPAIR_CACHELINE_SPARING,
+ .repair_ops = &cxl_cacheline_sparing_ops,
+ },
+ {
+ .repair_uuid = CXL_FEAT_ROW_SPARING_UUID,
+ .repair_type = EDAC_REPAIR_ROW_SPARING,
+ .repair_ops = &cxl_row_sparing_ops,
+ },
+ {
+ .repair_uuid = CXL_FEAT_BANK_SPARING_UUID,
+ .repair_type = EDAC_REPAIR_BANK_SPARING,
+ .repair_ops = &cxl_bank_sparing_ops,
+ },
+ {
+ .repair_uuid = CXL_FEAT_RANK_SPARING_UUID,
+ .repair_type = EDAC_REPAIR_RANK_SPARING,
+ .repair_ops = &cxl_rank_sparing_ops,
+ },
+};
+
+static int cxl_memdev_sparing_init(struct cxl_memdev *cxlmd,
+ struct edac_dev_feature *ras_feature,
+ const struct cxl_mem_sparing_desc *desc,
+ u8 repair_inst)
+{
+ struct cxl_mem_sparing_context *cxl_sparing_ctx;
+ struct cxl_feat_entry *feat_entry;
+ int ret;
+
+ feat_entry = cxl_feature_info(to_cxlfs(cxlmd->cxlds),
+ &desc->repair_uuid);
+ if (IS_ERR(feat_entry))
+ return -EOPNOTSUPP;
+
+ if (!(le32_to_cpu(feat_entry->flags) & CXL_FEATURE_F_CHANGEABLE))
+ return -EOPNOTSUPP;
+
+ cxl_sparing_ctx = devm_kzalloc(&cxlmd->dev, sizeof(*cxl_sparing_ctx),
+ GFP_KERNEL);
+ if (!cxl_sparing_ctx)
+ return -ENOMEM;
+
+ *cxl_sparing_ctx = (struct cxl_mem_sparing_context){
+ .get_feat_size = le16_to_cpu(feat_entry->get_feat_size),
+ .set_feat_size = le16_to_cpu(feat_entry->set_feat_size),
+ .get_version = feat_entry->get_feat_ver,
+ .set_version = feat_entry->set_feat_ver,
+ .effects = le16_to_cpu(feat_entry->effects),
+ .cxlmd = cxlmd,
+ .repair_type = desc->repair_type,
+ .instance = repair_inst++,
+ };
+ uuid_copy(&cxl_sparing_ctx->repair_uuid, &desc->repair_uuid);
+
+ ret = cxl_mem_sparing_get_attrbs(cxl_sparing_ctx);
+ if (ret)
+ return ret;
+
+ if ((cxl_sparing_ctx->cap_soft_sparing &&
+ cxl_sparing_ctx->cap_hard_sparing) ||
+ cxl_sparing_ctx->cap_soft_sparing)
+ cxl_sparing_ctx->persist_mode = 0;
+ else if (cxl_sparing_ctx->cap_hard_sparing)
+ cxl_sparing_ctx->persist_mode = 1;
+ else
+ return -EOPNOTSUPP;
+
+ ras_feature->ft_type = RAS_FEAT_MEM_REPAIR;
+ ras_feature->instance = cxl_sparing_ctx->instance;
+ ras_feature->mem_repair_ops = desc->repair_ops;
+ ras_feature->ctx = cxl_sparing_ctx;
+
+ return 0;
+}
+
+/*
+ * CXL memory soft PPR & hard PPR control
+ */
+struct cxl_ppr_context {
+ uuid_t repair_uuid;
+ u8 instance;
+ u16 get_feat_size;
+ u16 set_feat_size;
+ u8 get_version;
+ u8 set_version;
+ u16 effects;
+ u8 op_class;
+ u8 op_subclass;
+ bool cap_dpa;
+ bool cap_nib_mask;
+ bool media_accessible;
+ bool data_retained;
+ struct cxl_memdev *cxlmd;
+ enum edac_mem_repair_type repair_type;
+ bool persist_mode;
+ u64 dpa;
+ u32 nibble_mask;
+};
+
+/*
+ * See CXL rev 3.2 @8.2.10.7.2.1 Table 8-128 sPPR Feature Readable Attributes
+ *
+ * See CXL rev 3.2 @8.2.10.7.2.2 Table 8-131 hPPR Feature Readable Attributes
+ */
+
+#define CXL_PPR_OP_CAP_DEVICE_INITIATED BIT(0)
+#define CXL_PPR_OP_MODE_DEV_INITIATED BIT(0)
+
+#define CXL_PPR_FLAG_DPA_SUPPORT_MASK BIT(0)
+#define CXL_PPR_FLAG_NIB_SUPPORT_MASK BIT(1)
+#define CXL_PPR_FLAG_MEM_SPARING_EV_REC_SUPPORT_MASK BIT(2)
+#define CXL_PPR_FLAG_DEV_INITED_PPR_AT_BOOT_CAP_MASK BIT(3)
+
+#define CXL_PPR_RESTRICTION_FLAG_MEDIA_ACCESSIBLE_MASK BIT(0)
+#define CXL_PPR_RESTRICTION_FLAG_DATA_RETAINED_MASK BIT(2)
+
+#define CXL_PPR_SPARING_EV_REC_EN_MASK BIT(0)
+#define CXL_PPR_DEV_INITED_PPR_AT_BOOT_EN_MASK BIT(1)
+
+#define CXL_PPR_GET_CAP_DPA(flags) \
+ FIELD_GET(CXL_PPR_FLAG_DPA_SUPPORT_MASK, flags)
+#define CXL_PPR_GET_CAP_NIB_MASK(flags) \
+ FIELD_GET(CXL_PPR_FLAG_NIB_SUPPORT_MASK, flags)
+#define CXL_PPR_GET_MEDIA_ACCESSIBLE(restriction_flags) \
+ (FIELD_GET(CXL_PPR_RESTRICTION_FLAG_MEDIA_ACCESSIBLE_MASK, \
+ restriction_flags) ^ 1)
+#define CXL_PPR_GET_DATA_RETAINED(restriction_flags) \
+ (FIELD_GET(CXL_PPR_RESTRICTION_FLAG_DATA_RETAINED_MASK, \
+ restriction_flags) ^ 1)
+
+struct cxl_memdev_ppr_rd_attrbs {
+ struct cxl_memdev_repair_rd_attrbs_hdr hdr;
+ u8 ppr_flags;
+ __le16 restriction_flags;
+ u8 ppr_op_mode;
+} __packed;
+
+/*
+ * See CXL rev 3.2 @8.2.10.7.1.2 Table 8-118 sPPR Maintenance Input Payload
+ *
+ * See CXL rev 3.2 @8.2.10.7.1.3 Table 8-119 hPPR Maintenance Input Payload
+ */
+struct cxl_memdev_ppr_maintenance_attrbs {
+ u8 flags;
+ __le64 dpa;
+ u8 nibble_mask[3];
+} __packed;
+
+static int cxl_mem_ppr_get_attrbs(struct cxl_ppr_context *cxl_ppr_ctx)
+{
+ size_t rd_data_size = sizeof(struct cxl_memdev_ppr_rd_attrbs);
+ struct cxl_memdev *cxlmd = cxl_ppr_ctx->cxlmd;
+ struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox;
+ u16 restriction_flags;
+ size_t data_size;
+ u16 return_code;
+
+ struct cxl_memdev_ppr_rd_attrbs *rd_attrbs __free(kfree) =
+ kmalloc(rd_data_size, GFP_KERNEL);
+ if (!rd_attrbs)
+ return -ENOMEM;
+
+ data_size = cxl_get_feature(cxl_mbox, &cxl_ppr_ctx->repair_uuid,
+ CXL_GET_FEAT_SEL_CURRENT_VALUE, rd_attrbs,
+ rd_data_size, 0, &return_code);
+ if (!data_size)
+ return -EIO;
+
+ cxl_ppr_ctx->op_class = rd_attrbs->hdr.op_class;
+ cxl_ppr_ctx->op_subclass = rd_attrbs->hdr.op_subclass;
+ cxl_ppr_ctx->cap_dpa = CXL_PPR_GET_CAP_DPA(rd_attrbs->ppr_flags);
+ cxl_ppr_ctx->cap_nib_mask =
+ CXL_PPR_GET_CAP_NIB_MASK(rd_attrbs->ppr_flags);
+
+ restriction_flags = le16_to_cpu(rd_attrbs->restriction_flags);
+ cxl_ppr_ctx->media_accessible =
+ CXL_PPR_GET_MEDIA_ACCESSIBLE(restriction_flags);
+ cxl_ppr_ctx->data_retained =
+ CXL_PPR_GET_DATA_RETAINED(restriction_flags);
+
+ return 0;
+}
+
+static int cxl_mem_perform_ppr(struct cxl_ppr_context *cxl_ppr_ctx)
+{
+ struct cxl_memdev_ppr_maintenance_attrbs maintenance_attrbs;
+ struct cxl_memdev *cxlmd = cxl_ppr_ctx->cxlmd;
+ struct cxl_mem_repair_attrbs attrbs = { 0 };
+ int ret;
+
+ ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region);
+ if ((ret = ACQUIRE_ERR(rwsem_read_intr, &region_rwsem)))
+ return ret;
+
+ ACQUIRE(rwsem_read_intr, dpa_rwsem)(&cxl_rwsem.dpa);
+ if ((ret = ACQUIRE_ERR(rwsem_read_intr, &dpa_rwsem)))
+ return ret;
+
+ if (!cxl_ppr_ctx->media_accessible || !cxl_ppr_ctx->data_retained) {
+ /* Memory to repair must be offline */
+ if (cxl_is_memdev_memory_online(cxlmd))
+ return -EBUSY;
+ } else {
+ if (cxl_is_memdev_memory_online(cxlmd)) {
+ /* Check memory to repair is from the current boot */
+ attrbs.repair_type = CXL_PPR;
+ attrbs.dpa = cxl_ppr_ctx->dpa;
+ attrbs.nibble_mask = cxl_ppr_ctx->nibble_mask;
+ if (!cxl_find_rec_dram(cxlmd, &attrbs) &&
+ !cxl_find_rec_gen_media(cxlmd, &attrbs))
+ return -EINVAL;
+ }
+ }
+
+ memset(&maintenance_attrbs, 0, sizeof(maintenance_attrbs));
+ maintenance_attrbs.flags = 0;
+ maintenance_attrbs.dpa = cpu_to_le64(cxl_ppr_ctx->dpa);
+ put_unaligned_le24(cxl_ppr_ctx->nibble_mask,
+ maintenance_attrbs.nibble_mask);
+
+ return cxl_perform_maintenance(&cxlmd->cxlds->cxl_mbox,
+ cxl_ppr_ctx->op_class,
+ cxl_ppr_ctx->op_subclass,
+ &maintenance_attrbs,
+ sizeof(maintenance_attrbs));
+}
+
+static int cxl_ppr_get_repair_type(struct device *dev, void *drv_data,
+ const char **repair_type)
+{
+ *repair_type = edac_repair_type[EDAC_REPAIR_PPR];
+
+ return 0;
+}
+
+static int cxl_ppr_get_persist_mode(struct device *dev, void *drv_data,
+ bool *persist_mode)
+{
+ struct cxl_ppr_context *cxl_ppr_ctx = drv_data;
+
+ *persist_mode = cxl_ppr_ctx->persist_mode;
+
+ return 0;
+}
+
+static int cxl_get_ppr_safe_when_in_use(struct device *dev, void *drv_data,
+ bool *safe)
+{
+ struct cxl_ppr_context *cxl_ppr_ctx = drv_data;
+
+ *safe = cxl_ppr_ctx->media_accessible & cxl_ppr_ctx->data_retained;
+
+ return 0;
+}
+
+static int cxl_ppr_get_min_dpa(struct device *dev, void *drv_data, u64 *min_dpa)
+{
+ struct cxl_ppr_context *cxl_ppr_ctx = drv_data;
+ struct cxl_memdev *cxlmd = cxl_ppr_ctx->cxlmd;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+
+ *min_dpa = cxlds->dpa_res.start;
+
+ return 0;
+}
+
+static int cxl_ppr_get_max_dpa(struct device *dev, void *drv_data, u64 *max_dpa)
+{
+ struct cxl_ppr_context *cxl_ppr_ctx = drv_data;
+ struct cxl_memdev *cxlmd = cxl_ppr_ctx->cxlmd;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+
+ *max_dpa = cxlds->dpa_res.end;
+
+ return 0;
+}
+
+static int cxl_ppr_get_dpa(struct device *dev, void *drv_data, u64 *dpa)
+{
+ struct cxl_ppr_context *cxl_ppr_ctx = drv_data;
+
+ *dpa = cxl_ppr_ctx->dpa;
+
+ return 0;
+}
+
+static int cxl_ppr_set_dpa(struct device *dev, void *drv_data, u64 dpa)
+{
+ struct cxl_ppr_context *cxl_ppr_ctx = drv_data;
+ struct cxl_memdev *cxlmd = cxl_ppr_ctx->cxlmd;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+
+ if (!cxl_resource_contains_addr(&cxlds->dpa_res, dpa))
+ return -EINVAL;
+
+ cxl_ppr_ctx->dpa = dpa;
+
+ return 0;
+}
+
+static int cxl_ppr_get_nibble_mask(struct device *dev, void *drv_data,
+ u32 *nibble_mask)
+{
+ struct cxl_ppr_context *cxl_ppr_ctx = drv_data;
+
+ *nibble_mask = cxl_ppr_ctx->nibble_mask;
+
+ return 0;
+}
+
+static int cxl_ppr_set_nibble_mask(struct device *dev, void *drv_data,
+ u32 nibble_mask)
+{
+ struct cxl_ppr_context *cxl_ppr_ctx = drv_data;
+
+ cxl_ppr_ctx->nibble_mask = nibble_mask;
+
+ return 0;
+}
+
+static int cxl_do_ppr(struct device *dev, void *drv_data, u32 val)
+{
+ struct cxl_ppr_context *cxl_ppr_ctx = drv_data;
+ struct cxl_memdev *cxlmd = cxl_ppr_ctx->cxlmd;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+
+ if (val != EDAC_DO_MEM_REPAIR ||
+ !cxl_resource_contains_addr(&cxlds->dpa_res, cxl_ppr_ctx->dpa))
+ return -EINVAL;
+
+ return cxl_mem_perform_ppr(cxl_ppr_ctx);
+}
+
+static const struct edac_mem_repair_ops cxl_sppr_ops = {
+ .get_repair_type = cxl_ppr_get_repair_type,
+ .get_persist_mode = cxl_ppr_get_persist_mode,
+ .get_repair_safe_when_in_use = cxl_get_ppr_safe_when_in_use,
+ .get_min_dpa = cxl_ppr_get_min_dpa,
+ .get_max_dpa = cxl_ppr_get_max_dpa,
+ .get_dpa = cxl_ppr_get_dpa,
+ .set_dpa = cxl_ppr_set_dpa,
+ .get_nibble_mask = cxl_ppr_get_nibble_mask,
+ .set_nibble_mask = cxl_ppr_set_nibble_mask,
+ .do_repair = cxl_do_ppr,
+};
+
+static int cxl_memdev_soft_ppr_init(struct cxl_memdev *cxlmd,
+ struct edac_dev_feature *ras_feature,
+ u8 repair_inst)
+{
+ struct cxl_ppr_context *cxl_sppr_ctx;
+ struct cxl_feat_entry *feat_entry;
+ int ret;
+
+ feat_entry = cxl_feature_info(to_cxlfs(cxlmd->cxlds),
+ &CXL_FEAT_SPPR_UUID);
+ if (IS_ERR(feat_entry))
+ return -EOPNOTSUPP;
+
+ if (!(le32_to_cpu(feat_entry->flags) & CXL_FEATURE_F_CHANGEABLE))
+ return -EOPNOTSUPP;
+
+ cxl_sppr_ctx =
+ devm_kzalloc(&cxlmd->dev, sizeof(*cxl_sppr_ctx), GFP_KERNEL);
+ if (!cxl_sppr_ctx)
+ return -ENOMEM;
+
+ *cxl_sppr_ctx = (struct cxl_ppr_context){
+ .get_feat_size = le16_to_cpu(feat_entry->get_feat_size),
+ .set_feat_size = le16_to_cpu(feat_entry->set_feat_size),
+ .get_version = feat_entry->get_feat_ver,
+ .set_version = feat_entry->set_feat_ver,
+ .effects = le16_to_cpu(feat_entry->effects),
+ .cxlmd = cxlmd,
+ .repair_type = EDAC_REPAIR_PPR,
+ .persist_mode = 0,
+ .instance = repair_inst,
+ };
+ uuid_copy(&cxl_sppr_ctx->repair_uuid, &CXL_FEAT_SPPR_UUID);
+
+ ret = cxl_mem_ppr_get_attrbs(cxl_sppr_ctx);
+ if (ret)
+ return ret;
+
+ ras_feature->ft_type = RAS_FEAT_MEM_REPAIR;
+ ras_feature->instance = cxl_sppr_ctx->instance;
+ ras_feature->mem_repair_ops = &cxl_sppr_ops;
+ ras_feature->ctx = cxl_sppr_ctx;
+
+ return 0;
+}
+
+int devm_cxl_memdev_edac_register(struct cxl_memdev *cxlmd)
+{
+ struct edac_dev_feature ras_features[CXL_NR_EDAC_DEV_FEATURES];
+ int num_ras_features = 0;
+ u8 repair_inst = 0;
+ int rc;
+
+ if (IS_ENABLED(CONFIG_CXL_EDAC_SCRUB)) {
+ rc = cxl_memdev_scrub_init(cxlmd, &ras_features[num_ras_features], 0);
+ if (rc < 0 && rc != -EOPNOTSUPP)
+ return rc;
+
+ if (rc != -EOPNOTSUPP)
+ num_ras_features++;
+ }
+
+ if (IS_ENABLED(CONFIG_CXL_EDAC_ECS)) {
+ rc = cxl_memdev_ecs_init(cxlmd, &ras_features[num_ras_features]);
+ if (rc < 0 && rc != -EOPNOTSUPP)
+ return rc;
+
+ if (rc != -EOPNOTSUPP)
+ num_ras_features++;
+ }
+
+ if (IS_ENABLED(CONFIG_CXL_EDAC_MEM_REPAIR)) {
+ for (int i = 0; i < CXL_MEM_SPARING_MAX; i++) {
+ rc = cxl_memdev_sparing_init(cxlmd,
+ &ras_features[num_ras_features],
+ &mem_sparing_desc[i], repair_inst);
+ if (rc == -EOPNOTSUPP)
+ continue;
+ if (rc < 0)
+ return rc;
+
+ repair_inst++;
+ num_ras_features++;
+ }
+
+ rc = cxl_memdev_soft_ppr_init(cxlmd, &ras_features[num_ras_features],
+ repair_inst);
+ if (rc < 0 && rc != -EOPNOTSUPP)
+ return rc;
+
+ if (rc != -EOPNOTSUPP) {
+ repair_inst++;
+ num_ras_features++;
+ }
+
+ if (repair_inst) {
+ struct cxl_mem_err_rec *array_rec =
+ devm_kzalloc(&cxlmd->dev, sizeof(*array_rec),
+ GFP_KERNEL);
+ if (!array_rec)
+ return -ENOMEM;
+
+ xa_init(&array_rec->rec_gen_media);
+ xa_init(&array_rec->rec_dram);
+ cxlmd->err_rec_array = array_rec;
+ }
+ }
+
+ if (!num_ras_features)
+ return -EINVAL;
+
+ char *cxl_dev_name __free(kfree) =
+ kasprintf(GFP_KERNEL, "cxl_%s", dev_name(&cxlmd->dev));
+ if (!cxl_dev_name)
+ return -ENOMEM;
+
+ return edac_dev_register(&cxlmd->dev, cxl_dev_name, NULL,
+ num_ras_features, ras_features);
+}
+EXPORT_SYMBOL_NS_GPL(devm_cxl_memdev_edac_register, "CXL");
+
+int devm_cxl_region_edac_register(struct cxl_region *cxlr)
+{
+ struct edac_dev_feature ras_features[CXL_NR_EDAC_DEV_FEATURES];
+ int num_ras_features = 0;
+ int rc;
+
+ if (!IS_ENABLED(CONFIG_CXL_EDAC_SCRUB))
+ return 0;
+
+ rc = cxl_region_scrub_init(cxlr, &ras_features[num_ras_features], 0);
+ if (rc < 0)
+ return rc;
+
+ num_ras_features++;
+
+ char *cxl_dev_name __free(kfree) =
+ kasprintf(GFP_KERNEL, "cxl_%s", dev_name(&cxlr->dev));
+ if (!cxl_dev_name)
+ return -ENOMEM;
+
+ return edac_dev_register(&cxlr->dev, cxl_dev_name, NULL,
+ num_ras_features, ras_features);
+}
+EXPORT_SYMBOL_NS_GPL(devm_cxl_region_edac_register, "CXL");
+
+void devm_cxl_memdev_edac_release(struct cxl_memdev *cxlmd)
+{
+ struct cxl_mem_err_rec *array_rec = cxlmd->err_rec_array;
+ struct cxl_event_gen_media *rec_gen_media;
+ struct cxl_event_dram *rec_dram;
+ unsigned long index;
+
+ if (!IS_ENABLED(CONFIG_CXL_EDAC_MEM_REPAIR) || !array_rec)
+ return;
+
+ xa_for_each(&array_rec->rec_dram, index, rec_dram)
+ kfree(rec_dram);
+ xa_destroy(&array_rec->rec_dram);
+
+ xa_for_each(&array_rec->rec_gen_media, index, rec_gen_media)
+ kfree(rec_gen_media);
+ xa_destroy(&array_rec->rec_gen_media);
+}
+EXPORT_SYMBOL_NS_GPL(devm_cxl_memdev_edac_release, "CXL");
diff --git a/drivers/cxl/core/features.c b/drivers/cxl/core/features.c
new file mode 100644
index 000000000000..7c750599ea69
--- /dev/null
+++ b/drivers/cxl/core/features.c
@@ -0,0 +1,703 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2024-2025 Intel Corporation. All rights reserved. */
+#include <linux/fwctl.h>
+#include <linux/device.h>
+#include <cxl/mailbox.h>
+#include <cxl/features.h>
+#include <uapi/fwctl/cxl.h>
+#include "cxl.h"
+#include "core.h"
+#include "cxlmem.h"
+
+/**
+ * DOC: cxl features
+ *
+ * CXL Features:
+ * A CXL device that includes a mailbox supports commands that allows
+ * listing, getting, and setting of optionally defined features such
+ * as memory sparing or post package sparing. Vendors may define custom
+ * features for the device.
+ */
+
+/* All the features below are exclusive to the kernel */
+static const uuid_t cxl_exclusive_feats[] = {
+ CXL_FEAT_PATROL_SCRUB_UUID,
+ CXL_FEAT_ECS_UUID,
+ CXL_FEAT_SPPR_UUID,
+ CXL_FEAT_HPPR_UUID,
+ CXL_FEAT_CACHELINE_SPARING_UUID,
+ CXL_FEAT_ROW_SPARING_UUID,
+ CXL_FEAT_BANK_SPARING_UUID,
+ CXL_FEAT_RANK_SPARING_UUID,
+};
+
+static bool is_cxl_feature_exclusive_by_uuid(const uuid_t *uuid)
+{
+ for (int i = 0; i < ARRAY_SIZE(cxl_exclusive_feats); i++) {
+ if (uuid_equal(uuid, &cxl_exclusive_feats[i]))
+ return true;
+ }
+
+ return false;
+}
+
+static bool is_cxl_feature_exclusive(struct cxl_feat_entry *entry)
+{
+ return is_cxl_feature_exclusive_by_uuid(&entry->uuid);
+}
+
+struct cxl_features_state *to_cxlfs(struct cxl_dev_state *cxlds)
+{
+ return cxlds->cxlfs;
+}
+EXPORT_SYMBOL_NS_GPL(to_cxlfs, "CXL");
+
+static int cxl_get_supported_features_count(struct cxl_mailbox *cxl_mbox)
+{
+ struct cxl_mbox_get_sup_feats_out mbox_out;
+ struct cxl_mbox_get_sup_feats_in mbox_in;
+ struct cxl_mbox_cmd mbox_cmd;
+ int rc;
+
+ memset(&mbox_in, 0, sizeof(mbox_in));
+ mbox_in.count = cpu_to_le32(sizeof(mbox_out));
+ memset(&mbox_out, 0, sizeof(mbox_out));
+ mbox_cmd = (struct cxl_mbox_cmd) {
+ .opcode = CXL_MBOX_OP_GET_SUPPORTED_FEATURES,
+ .size_in = sizeof(mbox_in),
+ .payload_in = &mbox_in,
+ .size_out = sizeof(mbox_out),
+ .payload_out = &mbox_out,
+ .min_out = sizeof(mbox_out),
+ };
+ rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
+ if (rc < 0)
+ return rc;
+
+ return le16_to_cpu(mbox_out.supported_feats);
+}
+
+static struct cxl_feat_entries *
+get_supported_features(struct cxl_features_state *cxlfs)
+{
+ int remain_feats, max_size, max_feats, start, rc, hdr_size;
+ struct cxl_mailbox *cxl_mbox = &cxlfs->cxlds->cxl_mbox;
+ int feat_size = sizeof(struct cxl_feat_entry);
+ struct cxl_mbox_get_sup_feats_in mbox_in;
+ struct cxl_feat_entry *entry;
+ struct cxl_mbox_cmd mbox_cmd;
+ int user_feats = 0;
+ int count;
+
+ count = cxl_get_supported_features_count(cxl_mbox);
+ if (count <= 0)
+ return NULL;
+
+ struct cxl_feat_entries *entries __free(kvfree) =
+ kvmalloc(struct_size(entries, ent, count), GFP_KERNEL);
+ if (!entries)
+ return NULL;
+
+ struct cxl_mbox_get_sup_feats_out *mbox_out __free(kvfree) =
+ kvmalloc(cxl_mbox->payload_size, GFP_KERNEL);
+ if (!mbox_out)
+ return NULL;
+
+ hdr_size = struct_size(mbox_out, ents, 0);
+ max_size = cxl_mbox->payload_size - hdr_size;
+ /* max feat entries that can fit in mailbox max payload size */
+ max_feats = max_size / feat_size;
+ entry = entries->ent;
+
+ start = 0;
+ remain_feats = count;
+ do {
+ int retrieved, alloc_size, copy_feats;
+ int num_entries;
+
+ if (remain_feats > max_feats) {
+ alloc_size = struct_size(mbox_out, ents, max_feats);
+ remain_feats = remain_feats - max_feats;
+ copy_feats = max_feats;
+ } else {
+ alloc_size = struct_size(mbox_out, ents, remain_feats);
+ copy_feats = remain_feats;
+ remain_feats = 0;
+ }
+
+ memset(&mbox_in, 0, sizeof(mbox_in));
+ mbox_in.count = cpu_to_le32(alloc_size);
+ mbox_in.start_idx = cpu_to_le16(start);
+ memset(mbox_out, 0, alloc_size);
+ mbox_cmd = (struct cxl_mbox_cmd) {
+ .opcode = CXL_MBOX_OP_GET_SUPPORTED_FEATURES,
+ .size_in = sizeof(mbox_in),
+ .payload_in = &mbox_in,
+ .size_out = alloc_size,
+ .payload_out = mbox_out,
+ .min_out = hdr_size,
+ };
+ rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
+ if (rc < 0)
+ return NULL;
+
+ if (mbox_cmd.size_out <= hdr_size)
+ return NULL;
+
+ /*
+ * Make sure retrieved out buffer is multiple of feature
+ * entries.
+ */
+ retrieved = mbox_cmd.size_out - hdr_size;
+ if (retrieved % feat_size)
+ return NULL;
+
+ num_entries = le16_to_cpu(mbox_out->num_entries);
+ /*
+ * If the reported output entries * defined entry size !=
+ * retrieved output bytes, then the output package is incorrect.
+ */
+ if (num_entries * feat_size != retrieved)
+ return NULL;
+
+ memcpy(entry, mbox_out->ents, retrieved);
+ for (int i = 0; i < num_entries; i++) {
+ if (!is_cxl_feature_exclusive(entry + i))
+ user_feats++;
+ }
+ entry += num_entries;
+ /*
+ * If the number of output entries is less than expected, add the
+ * remaining entries to the next batch.
+ */
+ remain_feats += copy_feats - num_entries;
+ start += num_entries;
+ } while (remain_feats);
+
+ entries->num_features = count;
+ entries->num_user_features = user_feats;
+
+ return no_free_ptr(entries);
+}
+
+static void free_cxlfs(void *_cxlfs)
+{
+ struct cxl_features_state *cxlfs = _cxlfs;
+ struct cxl_dev_state *cxlds = cxlfs->cxlds;
+
+ cxlds->cxlfs = NULL;
+ kvfree(cxlfs->entries);
+ kfree(cxlfs);
+}
+
+/**
+ * devm_cxl_setup_features() - Allocate and initialize features context
+ * @cxlds: CXL device context
+ *
+ * Return 0 on success or -errno on failure.
+ */
+int devm_cxl_setup_features(struct cxl_dev_state *cxlds)
+{
+ struct cxl_mailbox *cxl_mbox = &cxlds->cxl_mbox;
+
+ if (cxl_mbox->feat_cap < CXL_FEATURES_RO)
+ return -ENODEV;
+
+ struct cxl_features_state *cxlfs __free(kfree) =
+ kzalloc(sizeof(*cxlfs), GFP_KERNEL);
+ if (!cxlfs)
+ return -ENOMEM;
+
+ cxlfs->cxlds = cxlds;
+
+ cxlfs->entries = get_supported_features(cxlfs);
+ if (!cxlfs->entries)
+ return -ENOMEM;
+
+ cxlds->cxlfs = cxlfs;
+
+ return devm_add_action_or_reset(cxlds->dev, free_cxlfs, no_free_ptr(cxlfs));
+}
+EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_features, "CXL");
+
+size_t cxl_get_feature(struct cxl_mailbox *cxl_mbox, const uuid_t *feat_uuid,
+ enum cxl_get_feat_selection selection,
+ void *feat_out, size_t feat_out_size, u16 offset,
+ u16 *return_code)
+{
+ size_t data_to_rd_size, size_out;
+ struct cxl_mbox_get_feat_in pi;
+ struct cxl_mbox_cmd mbox_cmd;
+ size_t data_rcvd_size = 0;
+ int rc;
+
+ if (return_code)
+ *return_code = CXL_MBOX_CMD_RC_INPUT;
+
+ if (!feat_out || !feat_out_size)
+ return 0;
+
+ size_out = min(feat_out_size, cxl_mbox->payload_size);
+ uuid_copy(&pi.uuid, feat_uuid);
+ pi.selection = selection;
+ do {
+ data_to_rd_size = min(feat_out_size - data_rcvd_size,
+ cxl_mbox->payload_size);
+ pi.offset = cpu_to_le16(offset + data_rcvd_size);
+ pi.count = cpu_to_le16(data_to_rd_size);
+
+ mbox_cmd = (struct cxl_mbox_cmd) {
+ .opcode = CXL_MBOX_OP_GET_FEATURE,
+ .size_in = sizeof(pi),
+ .payload_in = &pi,
+ .size_out = size_out,
+ .payload_out = feat_out + data_rcvd_size,
+ .min_out = data_to_rd_size,
+ };
+ rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
+ if (rc < 0 || !mbox_cmd.size_out) {
+ if (return_code)
+ *return_code = mbox_cmd.return_code;
+ return 0;
+ }
+ data_rcvd_size += mbox_cmd.size_out;
+ } while (data_rcvd_size < feat_out_size);
+
+ if (return_code)
+ *return_code = CXL_MBOX_CMD_RC_SUCCESS;
+
+ return data_rcvd_size;
+}
+
+/*
+ * FEAT_DATA_MIN_PAYLOAD_SIZE - min extra number of bytes should be
+ * available in the mailbox for storing the actual feature data so that
+ * the feature data transfer would work as expected.
+ */
+#define FEAT_DATA_MIN_PAYLOAD_SIZE 10
+int cxl_set_feature(struct cxl_mailbox *cxl_mbox,
+ const uuid_t *feat_uuid, u8 feat_version,
+ const void *feat_data, size_t feat_data_size,
+ u32 feat_flag, u16 offset, u16 *return_code)
+{
+ size_t data_in_size, data_sent_size = 0;
+ struct cxl_mbox_cmd mbox_cmd;
+ size_t hdr_size;
+
+ if (return_code)
+ *return_code = CXL_MBOX_CMD_RC_INPUT;
+
+ struct cxl_mbox_set_feat_in *pi __free(kfree) =
+ kzalloc(cxl_mbox->payload_size, GFP_KERNEL);
+ if (!pi)
+ return -ENOMEM;
+
+ uuid_copy(&pi->uuid, feat_uuid);
+ pi->version = feat_version;
+ feat_flag &= ~CXL_SET_FEAT_FLAG_DATA_TRANSFER_MASK;
+ feat_flag |= CXL_SET_FEAT_FLAG_DATA_SAVED_ACROSS_RESET;
+ hdr_size = sizeof(pi->hdr);
+ /*
+ * Check minimum mbox payload size is available for
+ * the feature data transfer.
+ */
+ if (hdr_size + FEAT_DATA_MIN_PAYLOAD_SIZE > cxl_mbox->payload_size)
+ return -ENOMEM;
+
+ if (hdr_size + feat_data_size <= cxl_mbox->payload_size) {
+ pi->flags = cpu_to_le32(feat_flag |
+ CXL_SET_FEAT_FLAG_FULL_DATA_TRANSFER);
+ data_in_size = feat_data_size;
+ } else {
+ pi->flags = cpu_to_le32(feat_flag |
+ CXL_SET_FEAT_FLAG_INITIATE_DATA_TRANSFER);
+ data_in_size = cxl_mbox->payload_size - hdr_size;
+ }
+
+ do {
+ int rc;
+
+ pi->offset = cpu_to_le16(offset + data_sent_size);
+ memcpy(pi->feat_data, feat_data + data_sent_size, data_in_size);
+ mbox_cmd = (struct cxl_mbox_cmd) {
+ .opcode = CXL_MBOX_OP_SET_FEATURE,
+ .size_in = hdr_size + data_in_size,
+ .payload_in = pi,
+ };
+ rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
+ if (rc < 0) {
+ if (return_code)
+ *return_code = mbox_cmd.return_code;
+ return rc;
+ }
+
+ data_sent_size += data_in_size;
+ if (data_sent_size >= feat_data_size) {
+ if (return_code)
+ *return_code = CXL_MBOX_CMD_RC_SUCCESS;
+ return 0;
+ }
+
+ if ((feat_data_size - data_sent_size) <= (cxl_mbox->payload_size - hdr_size)) {
+ data_in_size = feat_data_size - data_sent_size;
+ pi->flags = cpu_to_le32(feat_flag |
+ CXL_SET_FEAT_FLAG_FINISH_DATA_TRANSFER);
+ } else {
+ pi->flags = cpu_to_le32(feat_flag |
+ CXL_SET_FEAT_FLAG_CONTINUE_DATA_TRANSFER);
+ }
+ } while (true);
+}
+
+/* FWCTL support */
+
+static inline struct cxl_memdev *fwctl_to_memdev(struct fwctl_device *fwctl_dev)
+{
+ return to_cxl_memdev(fwctl_dev->dev.parent);
+}
+
+static int cxlctl_open_uctx(struct fwctl_uctx *uctx)
+{
+ return 0;
+}
+
+static void cxlctl_close_uctx(struct fwctl_uctx *uctx)
+{
+}
+
+struct cxl_feat_entry *
+cxl_feature_info(struct cxl_features_state *cxlfs,
+ const uuid_t *uuid)
+{
+ struct cxl_feat_entry *feat;
+
+ for (int i = 0; i < cxlfs->entries->num_features; i++) {
+ feat = &cxlfs->entries->ent[i];
+ if (uuid_equal(uuid, &feat->uuid))
+ return feat;
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static void *cxlctl_get_supported_features(struct cxl_features_state *cxlfs,
+ const struct fwctl_rpc_cxl *rpc_in,
+ size_t *out_len)
+{
+ const struct cxl_mbox_get_sup_feats_in *feat_in;
+ struct cxl_mbox_get_sup_feats_out *feat_out;
+ struct cxl_feat_entry *pos;
+ size_t out_size;
+ int requested;
+ u32 count;
+ u16 start;
+ int i;
+
+ if (rpc_in->op_size != sizeof(*feat_in))
+ return ERR_PTR(-EINVAL);
+
+ feat_in = &rpc_in->get_sup_feats_in;
+ count = le32_to_cpu(feat_in->count);
+ start = le16_to_cpu(feat_in->start_idx);
+ requested = count / sizeof(*pos);
+
+ /*
+ * Make sure that the total requested number of entries is not greater
+ * than the total number of supported features allowed for userspace.
+ */
+ if (start >= cxlfs->entries->num_features)
+ return ERR_PTR(-EINVAL);
+
+ requested = min_t(int, requested, cxlfs->entries->num_features - start);
+
+ out_size = sizeof(struct fwctl_rpc_cxl_out) +
+ struct_size(feat_out, ents, requested);
+
+ struct fwctl_rpc_cxl_out *rpc_out __free(kvfree) =
+ kvzalloc(out_size, GFP_KERNEL);
+ if (!rpc_out)
+ return ERR_PTR(-ENOMEM);
+
+ rpc_out->size = struct_size(feat_out, ents, requested);
+ feat_out = &rpc_out->get_sup_feats_out;
+
+ for (i = start, pos = &feat_out->ents[0];
+ i < cxlfs->entries->num_features; i++, pos++) {
+ if (i - start == requested)
+ break;
+
+ memcpy(pos, &cxlfs->entries->ent[i], sizeof(*pos));
+ /*
+ * If the feature is exclusive, set the set_feat_size to 0 to
+ * indicate that the feature is not changeable.
+ */
+ if (is_cxl_feature_exclusive(pos)) {
+ u32 flags;
+
+ pos->set_feat_size = 0;
+ flags = le32_to_cpu(pos->flags);
+ flags &= ~CXL_FEATURE_F_CHANGEABLE;
+ pos->flags = cpu_to_le32(flags);
+ }
+ }
+
+ feat_out->num_entries = cpu_to_le16(requested);
+ feat_out->supported_feats = cpu_to_le16(cxlfs->entries->num_features);
+ rpc_out->retval = CXL_MBOX_CMD_RC_SUCCESS;
+ *out_len = out_size;
+
+ return no_free_ptr(rpc_out);
+}
+
+static void *cxlctl_get_feature(struct cxl_features_state *cxlfs,
+ const struct fwctl_rpc_cxl *rpc_in,
+ size_t *out_len)
+{
+ struct cxl_mailbox *cxl_mbox = &cxlfs->cxlds->cxl_mbox;
+ const struct cxl_mbox_get_feat_in *feat_in;
+ u16 offset, count, return_code;
+ size_t out_size = *out_len;
+
+ if (rpc_in->op_size != sizeof(*feat_in))
+ return ERR_PTR(-EINVAL);
+
+ feat_in = &rpc_in->get_feat_in;
+ offset = le16_to_cpu(feat_in->offset);
+ count = le16_to_cpu(feat_in->count);
+
+ if (!count)
+ return ERR_PTR(-EINVAL);
+
+ struct fwctl_rpc_cxl_out *rpc_out __free(kvfree) =
+ kvzalloc(out_size, GFP_KERNEL);
+ if (!rpc_out)
+ return ERR_PTR(-ENOMEM);
+
+ out_size = cxl_get_feature(cxl_mbox, &feat_in->uuid,
+ feat_in->selection, rpc_out->payload,
+ count, offset, &return_code);
+ *out_len = sizeof(struct fwctl_rpc_cxl_out);
+ if (!out_size) {
+ rpc_out->size = 0;
+ rpc_out->retval = return_code;
+ return no_free_ptr(rpc_out);
+ }
+
+ rpc_out->size = out_size;
+ rpc_out->retval = CXL_MBOX_CMD_RC_SUCCESS;
+ *out_len += out_size;
+
+ return no_free_ptr(rpc_out);
+}
+
+static void *cxlctl_set_feature(struct cxl_features_state *cxlfs,
+ const struct fwctl_rpc_cxl *rpc_in,
+ size_t *out_len)
+{
+ struct cxl_mailbox *cxl_mbox = &cxlfs->cxlds->cxl_mbox;
+ const struct cxl_mbox_set_feat_in *feat_in;
+ size_t out_size, data_size;
+ u16 offset, return_code;
+ u32 flags;
+ int rc;
+
+ if (rpc_in->op_size <= sizeof(feat_in->hdr))
+ return ERR_PTR(-EINVAL);
+
+ feat_in = &rpc_in->set_feat_in;
+
+ if (is_cxl_feature_exclusive_by_uuid(&feat_in->uuid))
+ return ERR_PTR(-EPERM);
+
+ offset = le16_to_cpu(feat_in->offset);
+ flags = le32_to_cpu(feat_in->flags);
+ out_size = *out_len;
+
+ struct fwctl_rpc_cxl_out *rpc_out __free(kvfree) =
+ kvzalloc(out_size, GFP_KERNEL);
+ if (!rpc_out)
+ return ERR_PTR(-ENOMEM);
+
+ rpc_out->size = 0;
+
+ data_size = rpc_in->op_size - sizeof(feat_in->hdr);
+ rc = cxl_set_feature(cxl_mbox, &feat_in->uuid,
+ feat_in->version, feat_in->feat_data,
+ data_size, flags, offset, &return_code);
+ *out_len = sizeof(*rpc_out);
+ if (rc) {
+ rpc_out->retval = return_code;
+ return no_free_ptr(rpc_out);
+ }
+
+ rpc_out->retval = CXL_MBOX_CMD_RC_SUCCESS;
+
+ return no_free_ptr(rpc_out);
+}
+
+static bool cxlctl_validate_set_features(struct cxl_features_state *cxlfs,
+ const struct fwctl_rpc_cxl *rpc_in,
+ enum fwctl_rpc_scope scope)
+{
+ u16 effects, imm_mask, reset_mask;
+ struct cxl_feat_entry *feat;
+ u32 flags;
+
+ if (rpc_in->op_size < sizeof(uuid_t))
+ return false;
+
+ feat = cxl_feature_info(cxlfs, &rpc_in->set_feat_in.uuid);
+ if (IS_ERR(feat))
+ return false;
+
+ /* Ensure that the attribute is changeable */
+ flags = le32_to_cpu(feat->flags);
+ if (!(flags & CXL_FEATURE_F_CHANGEABLE))
+ return false;
+
+ effects = le16_to_cpu(feat->effects);
+
+ /*
+ * Reserved bits are set, rejecting since the effects is not
+ * comprehended by the driver.
+ */
+ if (effects & CXL_CMD_EFFECTS_RESERVED) {
+ dev_warn_once(cxlfs->cxlds->dev,
+ "Reserved bits set in the Feature effects field!\n");
+ return false;
+ }
+
+ /* Currently no user background command support */
+ if (effects & CXL_CMD_BACKGROUND)
+ return false;
+
+ /* Effects cause immediate change, highest security scope is needed */
+ imm_mask = CXL_CMD_CONFIG_CHANGE_IMMEDIATE |
+ CXL_CMD_DATA_CHANGE_IMMEDIATE |
+ CXL_CMD_POLICY_CHANGE_IMMEDIATE |
+ CXL_CMD_LOG_CHANGE_IMMEDIATE;
+
+ reset_mask = CXL_CMD_CONFIG_CHANGE_COLD_RESET |
+ CXL_CMD_CONFIG_CHANGE_CONV_RESET |
+ CXL_CMD_CONFIG_CHANGE_CXL_RESET;
+
+ /* If no immediate or reset effect set, The hardware has a bug */
+ if (!(effects & imm_mask) && !(effects & reset_mask))
+ return false;
+
+ /*
+ * If the Feature setting causes immediate configuration change
+ * then we need the full write permission policy.
+ */
+ if (effects & imm_mask && scope >= FWCTL_RPC_DEBUG_WRITE_FULL)
+ return true;
+
+ /*
+ * If the Feature setting only causes configuration change
+ * after a reset, then the lesser level of write permission
+ * policy is ok.
+ */
+ if (!(effects & imm_mask) && scope >= FWCTL_RPC_DEBUG_WRITE)
+ return true;
+
+ return false;
+}
+
+static bool cxlctl_validate_hw_command(struct cxl_features_state *cxlfs,
+ const struct fwctl_rpc_cxl *rpc_in,
+ enum fwctl_rpc_scope scope,
+ u16 opcode)
+{
+ struct cxl_mailbox *cxl_mbox = &cxlfs->cxlds->cxl_mbox;
+
+ switch (opcode) {
+ case CXL_MBOX_OP_GET_SUPPORTED_FEATURES:
+ case CXL_MBOX_OP_GET_FEATURE:
+ return cxl_mbox->feat_cap >= CXL_FEATURES_RO;
+ case CXL_MBOX_OP_SET_FEATURE:
+ if (cxl_mbox->feat_cap < CXL_FEATURES_RW)
+ return false;
+ return cxlctl_validate_set_features(cxlfs, rpc_in, scope);
+ default:
+ return false;
+ }
+}
+
+static void *cxlctl_handle_commands(struct cxl_features_state *cxlfs,
+ const struct fwctl_rpc_cxl *rpc_in,
+ size_t *out_len, u16 opcode)
+{
+ switch (opcode) {
+ case CXL_MBOX_OP_GET_SUPPORTED_FEATURES:
+ return cxlctl_get_supported_features(cxlfs, rpc_in, out_len);
+ case CXL_MBOX_OP_GET_FEATURE:
+ return cxlctl_get_feature(cxlfs, rpc_in, out_len);
+ case CXL_MBOX_OP_SET_FEATURE:
+ return cxlctl_set_feature(cxlfs, rpc_in, out_len);
+ default:
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+}
+
+static void *cxlctl_fw_rpc(struct fwctl_uctx *uctx, enum fwctl_rpc_scope scope,
+ void *in, size_t in_len, size_t *out_len)
+{
+ struct fwctl_device *fwctl_dev = uctx->fwctl;
+ struct cxl_memdev *cxlmd = fwctl_to_memdev(fwctl_dev);
+ struct cxl_features_state *cxlfs = to_cxlfs(cxlmd->cxlds);
+ const struct fwctl_rpc_cxl *rpc_in = in;
+ u16 opcode = rpc_in->opcode;
+
+ if (!cxlctl_validate_hw_command(cxlfs, rpc_in, scope, opcode))
+ return ERR_PTR(-EINVAL);
+
+ return cxlctl_handle_commands(cxlfs, rpc_in, out_len, opcode);
+}
+
+static const struct fwctl_ops cxlctl_ops = {
+ .device_type = FWCTL_DEVICE_TYPE_CXL,
+ .uctx_size = sizeof(struct fwctl_uctx),
+ .open_uctx = cxlctl_open_uctx,
+ .close_uctx = cxlctl_close_uctx,
+ .fw_rpc = cxlctl_fw_rpc,
+};
+
+DEFINE_FREE(free_fwctl_dev, struct fwctl_device *, if (_T) fwctl_put(_T))
+
+static void free_memdev_fwctl(void *_fwctl_dev)
+{
+ struct fwctl_device *fwctl_dev = _fwctl_dev;
+
+ fwctl_unregister(fwctl_dev);
+ fwctl_put(fwctl_dev);
+}
+
+int devm_cxl_setup_fwctl(struct device *host, struct cxl_memdev *cxlmd)
+{
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct cxl_features_state *cxlfs;
+ int rc;
+
+ cxlfs = to_cxlfs(cxlds);
+ if (!cxlfs)
+ return -ENODEV;
+
+ /* No need to setup FWCTL if there are no user allowed features found */
+ if (!cxlfs->entries->num_user_features)
+ return -ENODEV;
+
+ struct fwctl_device *fwctl_dev __free(free_fwctl_dev) =
+ _fwctl_alloc_device(&cxlmd->dev, &cxlctl_ops, sizeof(*fwctl_dev));
+ if (!fwctl_dev)
+ return -ENOMEM;
+
+ rc = fwctl_register(fwctl_dev);
+ if (rc)
+ return rc;
+
+ return devm_add_action_or_reset(host, free_memdev_fwctl,
+ no_free_ptr(fwctl_dev));
+}
+EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_fwctl, "CXL");
+
+MODULE_IMPORT_NS("FWCTL");
diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c
index 3df10517a327..d3a094ca01ad 100644
--- a/drivers/cxl/core/hdm.c
+++ b/drivers/cxl/core/hdm.c
@@ -16,14 +16,16 @@
* for enumerating these registers and capabilities.
*/
-DECLARE_RWSEM(cxl_dpa_rwsem);
+struct cxl_rwsem cxl_rwsem = {
+ .region = __RWSEM_INITIALIZER(cxl_rwsem.region),
+ .dpa = __RWSEM_INITIALIZER(cxl_rwsem.dpa),
+};
-static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
- int *target_map)
+static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld)
{
int rc;
- rc = cxl_decoder_add_locked(cxld, target_map);
+ rc = cxl_decoder_add_locked(cxld);
if (rc) {
put_device(&cxld->dev);
dev_err(&port->dev, "Failed to add decoder\n");
@@ -34,7 +36,8 @@ static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
if (rc)
return rc;
- dev_dbg(&cxld->dev, "Added to port %s\n", dev_name(&port->dev));
+ dev_dbg(port->uport_dev, "%s added to %s\n",
+ dev_name(&cxld->dev), dev_name(&port->dev));
return 0;
}
@@ -46,12 +49,9 @@ static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
* are claimed and passed to the single dport. Disable the range until the first
* CXL region is enumerated / activated.
*/
-int devm_cxl_add_passthrough_decoder(struct cxl_port *port)
+static int devm_cxl_add_passthrough_decoder(struct cxl_port *port)
{
struct cxl_switch_decoder *cxlsd;
- struct cxl_dport *dport = NULL;
- int single_port_map[1];
- unsigned long index;
struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
/*
@@ -67,13 +67,8 @@ int devm_cxl_add_passthrough_decoder(struct cxl_port *port)
device_lock_assert(&port->dev);
- xa_for_each(&port->dports, index, dport)
- break;
- single_port_map[0] = dport->port_id;
-
- return add_hdm_decoder(port, &cxlsd->cxld, single_port_map);
+ return add_hdm_decoder(port, &cxlsd->cxld);
}
-EXPORT_SYMBOL_NS_GPL(devm_cxl_add_passthrough_decoder, CXL);
static void parse_hdm_decoder_caps(struct cxl_hdm *cxlhdm)
{
@@ -143,8 +138,8 @@ static bool should_emulate_decoders(struct cxl_endpoint_dvsec_info *info)
* @port: cxl_port to map
* @info: cached DVSEC range register info
*/
-struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
- struct cxl_endpoint_dvsec_info *info)
+static struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
+ struct cxl_endpoint_dvsec_info *info)
{
struct cxl_register_map *reg_map = &port->reg_map;
struct device *dev = &port->dev;
@@ -193,13 +188,12 @@ struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
*/
if (should_emulate_decoders(info)) {
dev_dbg(dev, "Fallback map %d range register%s\n", info->ranges,
- info->ranges > 1 ? "s" : "");
+ str_plural(info->ranges));
cxlhdm->decoder_count = info->ranges;
}
return cxlhdm;
}
-EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_hdm, CXL);
static void __cxl_dpa_debug(struct seq_file *file, struct resource *r, int depth)
{
@@ -213,15 +207,45 @@ void cxl_dpa_debug(struct seq_file *file, struct cxl_dev_state *cxlds)
{
struct resource *p1, *p2;
- down_read(&cxl_dpa_rwsem);
+ guard(rwsem_read)(&cxl_rwsem.dpa);
for (p1 = cxlds->dpa_res.child; p1; p1 = p1->sibling) {
__cxl_dpa_debug(file, p1, 0);
for (p2 = p1->child; p2; p2 = p2->sibling)
__cxl_dpa_debug(file, p2, 1);
}
- up_read(&cxl_dpa_rwsem);
}
-EXPORT_SYMBOL_NS_GPL(cxl_dpa_debug, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_dpa_debug, "CXL");
+
+/* See request_skip() kernel-doc */
+static resource_size_t __adjust_skip(struct cxl_dev_state *cxlds,
+ const resource_size_t skip_base,
+ const resource_size_t skip_len,
+ const char *requester)
+{
+ const resource_size_t skip_end = skip_base + skip_len - 1;
+
+ for (int i = 0; i < cxlds->nr_partitions; i++) {
+ const struct resource *part_res = &cxlds->part[i].res;
+ resource_size_t adjust_start, adjust_end, size;
+
+ adjust_start = max(skip_base, part_res->start);
+ adjust_end = min(skip_end, part_res->end);
+
+ if (adjust_end < adjust_start)
+ continue;
+
+ size = adjust_end - adjust_start + 1;
+
+ if (!requester)
+ __release_region(&cxlds->dpa_res, adjust_start, size);
+ else if (!__request_region(&cxlds->dpa_res, adjust_start, size,
+ requester, 0))
+ return adjust_start - skip_base;
+ }
+
+ return skip_len;
+}
+#define release_skip(c, b, l) __adjust_skip((c), (b), (l), NULL)
/*
* Must be called in a context that synchronizes against this decoder's
@@ -235,13 +259,13 @@ static void __cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
struct resource *res = cxled->dpa_res;
resource_size_t skip_start;
- lockdep_assert_held_write(&cxl_dpa_rwsem);
+ lockdep_assert_held_write(&cxl_rwsem.dpa);
/* save @skip_start, before @res is released */
skip_start = res->start - cxled->skip;
__release_region(&cxlds->dpa_res, res->start, resource_size(res));
if (cxled->skip)
- __release_region(&cxlds->dpa_res, skip_start, cxled->skip);
+ release_skip(cxlds, skip_start, cxled->skip);
cxled->skip = 0;
cxled->dpa_res = NULL;
put_device(&cxled->cxld.dev);
@@ -250,9 +274,8 @@ static void __cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
static void cxl_dpa_release(void *cxled)
{
- down_write(&cxl_dpa_rwsem);
+ guard(rwsem_write)(&cxl_rwsem.dpa);
__cxl_dpa_release(cxled);
- up_write(&cxl_dpa_rwsem);
}
/*
@@ -263,11 +286,63 @@ static void devm_cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
{
struct cxl_port *port = cxled_to_port(cxled);
- lockdep_assert_held_write(&cxl_dpa_rwsem);
+ lockdep_assert_held_write(&cxl_rwsem.dpa);
devm_remove_action(&port->dev, cxl_dpa_release, cxled);
__cxl_dpa_release(cxled);
}
+/**
+ * request_skip() - Track DPA 'skip' in @cxlds->dpa_res resource tree
+ * @cxlds: CXL.mem device context that parents @cxled
+ * @cxled: Endpoint decoder establishing new allocation that skips lower DPA
+ * @skip_base: DPA < start of new DPA allocation (DPAnew)
+ * @skip_len: @skip_base + @skip_len == DPAnew
+ *
+ * DPA 'skip' arises from out-of-sequence DPA allocation events relative
+ * to free capacity across multiple partitions. It is a wasteful event
+ * as usable DPA gets thrown away, but if a deployment has, for example,
+ * a dual RAM+PMEM device, wants to use PMEM, and has unallocated RAM
+ * DPA, the free RAM DPA must be sacrificed to start allocating PMEM.
+ * See third "Implementation Note" in CXL 3.1 8.2.4.19.13 "Decoder
+ * Protection" for more details.
+ *
+ * A 'skip' always covers the last allocated DPA in a previous partition
+ * to the start of the current partition to allocate. Allocations never
+ * start in the middle of a partition, and allocations are always
+ * de-allocated in reverse order (see cxl_dpa_free(), or natural devm
+ * unwind order from forced in-order allocation).
+ *
+ * If @cxlds->nr_partitions was guaranteed to be <= 2 then the 'skip'
+ * would always be contained to a single partition. Given
+ * @cxlds->nr_partitions may be > 2 it results in cases where the 'skip'
+ * might span "tail capacity of partition[0], all of partition[1], ...,
+ * all of partition[N-1]" to support allocating from partition[N]. That
+ * in turn interacts with the partition 'struct resource' boundaries
+ * within @cxlds->dpa_res whereby 'skip' requests need to be divided by
+ * partition. I.e. this is a quirk of using a 'struct resource' tree to
+ * detect range conflicts while also tracking partition boundaries in
+ * @cxlds->dpa_res.
+ */
+static int request_skip(struct cxl_dev_state *cxlds,
+ struct cxl_endpoint_decoder *cxled,
+ const resource_size_t skip_base,
+ const resource_size_t skip_len)
+{
+ resource_size_t skipped = __adjust_skip(cxlds, skip_base, skip_len,
+ dev_name(&cxled->cxld.dev));
+
+ if (skipped == skip_len)
+ return 0;
+
+ dev_dbg(cxlds->dev,
+ "%s: failed to reserve skipped space (%pa %pa %pa)\n",
+ dev_name(&cxled->cxld.dev), &skip_base, &skip_len, &skipped);
+
+ release_skip(cxlds, skip_base, skipped);
+
+ return -EBUSY;
+}
+
static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
resource_size_t base, resource_size_t len,
resource_size_t skipped)
@@ -277,8 +352,9 @@ static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct device *dev = &port->dev;
struct resource *res;
+ int rc;
- lockdep_assert_held_write(&cxl_dpa_rwsem);
+ lockdep_assert_held_write(&cxl_rwsem.dpa);
if (!len) {
dev_warn(dev, "decoder%d.%d: empty reservation attempted\n",
@@ -305,14 +381,9 @@ static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
}
if (skipped) {
- res = __request_region(&cxlds->dpa_res, base - skipped, skipped,
- dev_name(&cxled->cxld.dev), 0);
- if (!res) {
- dev_dbg(dev,
- "decoder%d.%d: failed to reserve skipped space\n",
- port->id, cxled->cxld.id);
- return -EBUSY;
- }
+ rc = request_skip(cxlds, cxled, base - skipped, skipped);
+ if (rc)
+ return rc;
}
res = __request_region(&cxlds->dpa_res, base, len,
dev_name(&cxled->cxld.dev), 0);
@@ -320,28 +391,117 @@ static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
dev_dbg(dev, "decoder%d.%d: failed to reserve allocation\n",
port->id, cxled->cxld.id);
if (skipped)
- __release_region(&cxlds->dpa_res, base - skipped,
- skipped);
+ release_skip(cxlds, base - skipped, skipped);
return -EBUSY;
}
cxled->dpa_res = res;
cxled->skip = skipped;
- if (resource_contains(&cxlds->pmem_res, res))
- cxled->mode = CXL_DECODER_PMEM;
- else if (resource_contains(&cxlds->ram_res, res))
- cxled->mode = CXL_DECODER_RAM;
- else {
- dev_warn(dev, "decoder%d.%d: %pr mixed mode not supported\n",
- port->id, cxled->cxld.id, cxled->dpa_res);
- cxled->mode = CXL_DECODER_MIXED;
- }
+ /*
+ * When allocating new capacity, ->part is already set, when
+ * discovering decoder settings at initial enumeration, ->part
+ * is not set.
+ */
+ if (cxled->part < 0)
+ for (int i = 0; cxlds->nr_partitions; i++)
+ if (resource_contains(&cxlds->part[i].res, res)) {
+ cxled->part = i;
+ break;
+ }
+
+ if (cxled->part < 0)
+ dev_warn(dev, "decoder%d.%d: %pr does not map any partition\n",
+ port->id, cxled->cxld.id, res);
port->hdm_end++;
get_device(&cxled->cxld.dev);
return 0;
}
+static int add_dpa_res(struct device *dev, struct resource *parent,
+ struct resource *res, resource_size_t start,
+ resource_size_t size, const char *type)
+{
+ int rc;
+
+ *res = (struct resource) {
+ .name = type,
+ .start = start,
+ .end = start + size - 1,
+ .flags = IORESOURCE_MEM,
+ };
+ if (resource_size(res) == 0) {
+ dev_dbg(dev, "DPA(%s): no capacity\n", res->name);
+ return 0;
+ }
+ rc = request_resource(parent, res);
+ if (rc) {
+ dev_err(dev, "DPA(%s): failed to track %pr (%d)\n", res->name,
+ res, rc);
+ return rc;
+ }
+
+ dev_dbg(dev, "DPA(%s): %pr\n", res->name, res);
+
+ return 0;
+}
+
+static const char *cxl_mode_name(enum cxl_partition_mode mode)
+{
+ switch (mode) {
+ case CXL_PARTMODE_RAM:
+ return "ram";
+ case CXL_PARTMODE_PMEM:
+ return "pmem";
+ default:
+ return "";
+ };
+}
+
+/* if this fails the caller must destroy @cxlds, there is no recovery */
+int cxl_dpa_setup(struct cxl_dev_state *cxlds, const struct cxl_dpa_info *info)
+{
+ struct device *dev = cxlds->dev;
+
+ guard(rwsem_write)(&cxl_rwsem.dpa);
+
+ if (cxlds->nr_partitions)
+ return -EBUSY;
+
+ if (!info->size || !info->nr_partitions) {
+ cxlds->dpa_res = DEFINE_RES_MEM(0, 0);
+ cxlds->nr_partitions = 0;
+ return 0;
+ }
+
+ cxlds->dpa_res = DEFINE_RES_MEM(0, info->size);
+
+ for (int i = 0; i < info->nr_partitions; i++) {
+ const struct cxl_dpa_part_info *part = &info->part[i];
+ int rc;
+
+ cxlds->part[i].perf.qos_class = CXL_QOS_CLASS_INVALID;
+ cxlds->part[i].mode = part->mode;
+
+ /* Require ordered + contiguous partitions */
+ if (i) {
+ const struct cxl_dpa_part_info *prev = &info->part[i - 1];
+
+ if (prev->range.end + 1 != part->range.start)
+ return -EINVAL;
+ }
+ rc = add_dpa_res(dev, &cxlds->dpa_res, &cxlds->part[i].res,
+ part->range.start, range_len(&part->range),
+ cxl_mode_name(part->mode));
+ if (rc)
+ return rc;
+ cxlds->nr_partitions++;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cxl_dpa_setup);
+
int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
resource_size_t base, resource_size_t len,
resource_size_t skipped)
@@ -349,198 +509,177 @@ int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
struct cxl_port *port = cxled_to_port(cxled);
int rc;
- down_write(&cxl_dpa_rwsem);
- rc = __cxl_dpa_reserve(cxled, base, len, skipped);
- up_write(&cxl_dpa_rwsem);
+ scoped_guard(rwsem_write, &cxl_rwsem.dpa)
+ rc = __cxl_dpa_reserve(cxled, base, len, skipped);
if (rc)
return rc;
return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled);
}
-EXPORT_SYMBOL_NS_GPL(devm_cxl_dpa_reserve, CXL);
+EXPORT_SYMBOL_NS_GPL(devm_cxl_dpa_reserve, "CXL");
resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled)
{
- resource_size_t size = 0;
-
- down_read(&cxl_dpa_rwsem);
+ guard(rwsem_read)(&cxl_rwsem.dpa);
if (cxled->dpa_res)
- size = resource_size(cxled->dpa_res);
- up_read(&cxl_dpa_rwsem);
+ return resource_size(cxled->dpa_res);
- return size;
+ return 0;
}
resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled)
{
resource_size_t base = -1;
- lockdep_assert_held(&cxl_dpa_rwsem);
+ lockdep_assert_held(&cxl_rwsem.dpa);
if (cxled->dpa_res)
base = cxled->dpa_res->start;
return base;
}
+bool cxl_resource_contains_addr(const struct resource *res, const resource_size_t addr)
+{
+ struct resource _addr = DEFINE_RES_MEM(addr, 1);
+
+ return resource_contains(res, &_addr);
+}
+
int cxl_dpa_free(struct cxl_endpoint_decoder *cxled)
{
struct cxl_port *port = cxled_to_port(cxled);
struct device *dev = &cxled->cxld.dev;
- int rc;
- down_write(&cxl_dpa_rwsem);
- if (!cxled->dpa_res) {
- rc = 0;
- goto out;
- }
+ guard(rwsem_write)(&cxl_rwsem.dpa);
+ if (!cxled->dpa_res)
+ return 0;
if (cxled->cxld.region) {
dev_dbg(dev, "decoder assigned to: %s\n",
dev_name(&cxled->cxld.region->dev));
- rc = -EBUSY;
- goto out;
+ return -EBUSY;
}
if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
dev_dbg(dev, "decoder enabled\n");
- rc = -EBUSY;
- goto out;
+ return -EBUSY;
}
if (cxled->cxld.id != port->hdm_end) {
dev_dbg(dev, "expected decoder%d.%d\n", port->id,
port->hdm_end);
- rc = -EBUSY;
- goto out;
+ return -EBUSY;
}
+
devm_cxl_dpa_release(cxled);
- rc = 0;
-out:
- up_write(&cxl_dpa_rwsem);
- return rc;
+ return 0;
}
-int cxl_dpa_set_mode(struct cxl_endpoint_decoder *cxled,
- enum cxl_decoder_mode mode)
+int cxl_dpa_set_part(struct cxl_endpoint_decoder *cxled,
+ enum cxl_partition_mode mode)
{
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct device *dev = &cxled->cxld.dev;
- int rc;
+ int part;
- switch (mode) {
- case CXL_DECODER_RAM:
- case CXL_DECODER_PMEM:
- break;
- default:
+ guard(rwsem_write)(&cxl_rwsem.dpa);
+ if (cxled->cxld.flags & CXL_DECODER_F_ENABLE)
+ return -EBUSY;
+
+ for (part = 0; part < cxlds->nr_partitions; part++)
+ if (cxlds->part[part].mode == mode)
+ break;
+
+ if (part >= cxlds->nr_partitions) {
dev_dbg(dev, "unsupported mode: %d\n", mode);
return -EINVAL;
}
- down_write(&cxl_dpa_rwsem);
- if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
- rc = -EBUSY;
- goto out;
- }
-
- /*
- * Only allow modes that are supported by the current partition
- * configuration
- */
- if (mode == CXL_DECODER_PMEM && !resource_size(&cxlds->pmem_res)) {
- dev_dbg(dev, "no available pmem capacity\n");
- rc = -ENXIO;
- goto out;
- }
- if (mode == CXL_DECODER_RAM && !resource_size(&cxlds->ram_res)) {
- dev_dbg(dev, "no available ram capacity\n");
- rc = -ENXIO;
- goto out;
+ if (!resource_size(&cxlds->part[part].res)) {
+ dev_dbg(dev, "no available capacity for mode: %d\n", mode);
+ return -ENXIO;
}
- cxled->mode = mode;
- rc = 0;
-out:
- up_write(&cxl_dpa_rwsem);
-
- return rc;
+ cxled->part = part;
+ return 0;
}
-int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size)
+static int __cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, u64 size)
{
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
- resource_size_t free_ram_start, free_pmem_start;
- struct cxl_port *port = cxled_to_port(cxled);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct device *dev = &cxled->cxld.dev;
- resource_size_t start, avail, skip;
+ struct resource *res, *prev = NULL;
+ resource_size_t start, avail, skip, skip_start;
struct resource *p, *last;
- int rc;
+ int part;
- down_write(&cxl_dpa_rwsem);
+ guard(rwsem_write)(&cxl_rwsem.dpa);
if (cxled->cxld.region) {
dev_dbg(dev, "decoder attached to %s\n",
dev_name(&cxled->cxld.region->dev));
- rc = -EBUSY;
- goto out;
+ return -EBUSY;
}
if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
dev_dbg(dev, "decoder enabled\n");
- rc = -EBUSY;
- goto out;
+ return -EBUSY;
}
- for (p = cxlds->ram_res.child, last = NULL; p; p = p->sibling)
- last = p;
- if (last)
- free_ram_start = last->end + 1;
- else
- free_ram_start = cxlds->ram_res.start;
+ part = cxled->part;
+ if (part < 0) {
+ dev_dbg(dev, "partition not set\n");
+ return -EBUSY;
+ }
- for (p = cxlds->pmem_res.child, last = NULL; p; p = p->sibling)
+ res = &cxlds->part[part].res;
+ for (p = res->child, last = NULL; p; p = p->sibling)
last = p;
if (last)
- free_pmem_start = last->end + 1;
+ start = last->end + 1;
else
- free_pmem_start = cxlds->pmem_res.start;
-
- if (cxled->mode == CXL_DECODER_RAM) {
- start = free_ram_start;
- avail = cxlds->ram_res.end - start + 1;
- skip = 0;
- } else if (cxled->mode == CXL_DECODER_PMEM) {
- resource_size_t skip_start, skip_end;
+ start = res->start;
- start = free_pmem_start;
- avail = cxlds->pmem_res.end - start + 1;
- skip_start = free_ram_start;
-
- /*
- * If some pmem is already allocated, then that allocation
- * already handled the skip.
- */
- if (cxlds->pmem_res.child &&
- skip_start == cxlds->pmem_res.child->start)
- skip_end = skip_start - 1;
- else
- skip_end = start - 1;
- skip = skip_end - skip_start + 1;
- } else {
- dev_dbg(dev, "mode not set\n");
- rc = -EINVAL;
- goto out;
+ /*
+ * To allocate at partition N, a skip needs to be calculated for all
+ * unallocated space at lower partitions indices.
+ *
+ * If a partition has any allocations, the search can end because a
+ * previous cxl_dpa_alloc() invocation is assumed to have accounted for
+ * all previous partitions.
+ */
+ skip_start = CXL_RESOURCE_NONE;
+ for (int i = part; i; i--) {
+ prev = &cxlds->part[i - 1].res;
+ for (p = prev->child, last = NULL; p; p = p->sibling)
+ last = p;
+ if (last) {
+ skip_start = last->end + 1;
+ break;
+ }
+ skip_start = prev->start;
}
+ avail = res->end - start + 1;
+ if (skip_start == CXL_RESOURCE_NONE)
+ skip = 0;
+ else
+ skip = res->start - skip_start;
+
if (size > avail) {
- dev_dbg(dev, "%pa exceeds available %s capacity: %pa\n", &size,
- cxl_decoder_mode_name(cxled->mode), &avail);
- rc = -ENOSPC;
- goto out;
+ dev_dbg(dev, "%llu exceeds available %s capacity: %llu\n", size,
+ res->name, (u64)avail);
+ return -ENOSPC;
}
- rc = __cxl_dpa_reserve(cxled, start, size, skip);
-out:
- up_write(&cxl_dpa_rwsem);
+ return __cxl_dpa_reserve(cxled, start, size, skip);
+}
+
+int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, u64 size)
+{
+ struct cxl_port *port = cxled_to_port(cxled);
+ int rc;
+ rc = __cxl_dpa_alloc(cxled, size);
if (rc)
return rc;
@@ -624,46 +763,12 @@ static int cxld_await_commit(void __iomem *hdm, int id)
return -ETIMEDOUT;
}
-static int cxl_decoder_commit(struct cxl_decoder *cxld)
+static void setup_hw_decoder(struct cxl_decoder *cxld, void __iomem *hdm)
{
- struct cxl_port *port = to_cxl_port(cxld->dev.parent);
- struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
- void __iomem *hdm = cxlhdm->regs.hdm_decoder;
- int id = cxld->id, rc;
+ int id = cxld->id;
u64 base, size;
u32 ctrl;
- if (cxld->flags & CXL_DECODER_F_ENABLE)
- return 0;
-
- if (cxl_num_decoders_committed(port) != id) {
- dev_dbg(&port->dev,
- "%s: out of order commit, expected decoder%d.%d\n",
- dev_name(&cxld->dev), port->id,
- cxl_num_decoders_committed(port));
- return -EBUSY;
- }
-
- /*
- * For endpoint decoders hosted on CXL memory devices that
- * support the sanitize operation, make sure sanitize is not in-flight.
- */
- if (is_endpoint_decoder(&cxld->dev)) {
- struct cxl_endpoint_decoder *cxled =
- to_cxl_endpoint_decoder(&cxld->dev);
- struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
- struct cxl_memdev_state *mds =
- to_cxl_memdev_state(cxlmd->cxlds);
-
- if (mds && mds->security.sanitize_active) {
- dev_dbg(&cxlmd->dev,
- "attempted to commit %s during sanitize\n",
- dev_name(&cxld->dev));
- return -EBUSY;
- }
- }
-
- down_read(&cxl_dpa_rwsem);
/* common decoder settings */
ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(cxld->id));
cxld_set_interleave(cxld, &ctrl);
@@ -697,7 +802,47 @@ static int cxl_decoder_commit(struct cxl_decoder *cxld)
}
writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
- up_read(&cxl_dpa_rwsem);
+}
+
+static int cxl_decoder_commit(struct cxl_decoder *cxld)
+{
+ struct cxl_port *port = to_cxl_port(cxld->dev.parent);
+ struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
+ void __iomem *hdm = cxlhdm->regs.hdm_decoder;
+ int id = cxld->id, rc;
+
+ if (cxld->flags & CXL_DECODER_F_ENABLE)
+ return 0;
+
+ if (cxl_num_decoders_committed(port) != id) {
+ dev_dbg(&port->dev,
+ "%s: out of order commit, expected decoder%d.%d\n",
+ dev_name(&cxld->dev), port->id,
+ cxl_num_decoders_committed(port));
+ return -EBUSY;
+ }
+
+ /*
+ * For endpoint decoders hosted on CXL memory devices that
+ * support the sanitize operation, make sure sanitize is not in-flight.
+ */
+ if (is_endpoint_decoder(&cxld->dev)) {
+ struct cxl_endpoint_decoder *cxled =
+ to_cxl_endpoint_decoder(&cxld->dev);
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_memdev_state *mds =
+ to_cxl_memdev_state(cxlmd->cxlds);
+
+ if (mds && mds->security.sanitize_active) {
+ dev_dbg(&cxlmd->dev,
+ "attempted to commit %s during sanitize\n",
+ dev_name(&cxld->dev));
+ return -EBUSY;
+ }
+ }
+
+ scoped_guard(rwsem_read, &cxl_rwsem.dpa)
+ setup_hw_decoder(cxld, hdm);
port->commit_end++;
rc = cxld_await_commit(hdm, cxld->id);
@@ -712,7 +857,44 @@ static int cxl_decoder_commit(struct cxl_decoder *cxld)
return 0;
}
-static int cxl_decoder_reset(struct cxl_decoder *cxld)
+static int commit_reap(struct device *dev, void *data)
+{
+ struct cxl_port *port = to_cxl_port(dev->parent);
+ struct cxl_decoder *cxld;
+
+ if (!is_switch_decoder(dev) && !is_endpoint_decoder(dev))
+ return 0;
+
+ cxld = to_cxl_decoder(dev);
+ if (port->commit_end == cxld->id &&
+ ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)) {
+ port->commit_end--;
+ dev_dbg(&port->dev, "reap: %s commit_end: %d\n",
+ dev_name(&cxld->dev), port->commit_end);
+ }
+
+ return 0;
+}
+
+void cxl_port_commit_reap(struct cxl_decoder *cxld)
+{
+ struct cxl_port *port = to_cxl_port(cxld->dev.parent);
+
+ lockdep_assert_held_write(&cxl_rwsem.region);
+
+ /*
+ * Once the highest committed decoder is disabled, free any other
+ * decoders that were pinned allocated by out-of-order release.
+ */
+ port->commit_end--;
+ dev_dbg(&port->dev, "reap: %s commit_end: %d\n", dev_name(&cxld->dev),
+ port->commit_end);
+ device_for_each_child_reverse_from(&port->dev, &cxld->dev, NULL,
+ commit_reap);
+}
+EXPORT_SYMBOL_NS_GPL(cxl_port_commit_reap, "CXL");
+
+static void cxl_decoder_reset(struct cxl_decoder *cxld)
{
struct cxl_port *port = to_cxl_port(cxld->dev.parent);
struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
@@ -721,16 +903,15 @@ static int cxl_decoder_reset(struct cxl_decoder *cxld)
u32 ctrl;
if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)
- return 0;
+ return;
- if (port->commit_end != id) {
+ if (port->commit_end == id)
+ cxl_port_commit_reap(cxld);
+ else
dev_dbg(&port->dev,
"%s: out of order reset, expected decoder%d.%d\n",
dev_name(&cxld->dev), port->id, port->commit_end);
- return -EBUSY;
- }
- down_read(&cxl_dpa_rwsem);
ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT;
writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
@@ -739,9 +920,7 @@ static int cxl_decoder_reset(struct cxl_decoder *cxld)
writel(0, hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id));
writel(0, hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id));
writel(0, hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id));
- up_read(&cxl_dpa_rwsem);
- port->commit_end--;
cxld->flags &= ~CXL_DECODER_F_ENABLE;
/* Userspace is now responsible for reconfiguring this decoder */
@@ -751,8 +930,6 @@ static int cxl_decoder_reset(struct cxl_decoder *cxld)
cxled = to_cxl_endpoint_decoder(&cxld->dev);
cxled->state = CXL_DECODER_STATE_MANUAL;
}
-
- return 0;
}
static int cxl_setup_hdm_decoder_from_dvsec(
@@ -797,7 +974,7 @@ static int cxl_setup_hdm_decoder_from_dvsec(
}
static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
- int *target_map, void __iomem *hdm, int which,
+ void __iomem *hdm, int which,
u64 *dpa_base, struct cxl_endpoint_dvsec_info *info)
{
struct cxl_endpoint_decoder *cxled = NULL;
@@ -851,7 +1028,7 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
else
cxld->target_type = CXL_DECODER_DEVMEM;
- guard(rwsem_write)(&cxl_region_rwsem);
+ guard(rwsem_write)(&cxl_rwsem.region);
if (cxld->id != cxl_num_decoders_committed(port)) {
dev_warn(&port->dev,
"decoder%d.%d: Committed out of order\n",
@@ -916,7 +1093,7 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
hi = readl(hdm + CXL_HDM_DECODER0_TL_HIGH(which));
target_list.value = (hi << 32) + lo;
for (i = 0; i < cxld->interleave_ways; i++)
- target_map[i] = target_list.target_id[i];
+ cxld->target_map[i] = target_list.target_id[i];
return 0;
}
@@ -981,8 +1158,8 @@ static void cxl_settle_decoders(struct cxl_hdm *cxlhdm)
* @cxlhdm: Structure to populate with HDM capabilities
* @info: cached DVSEC range register info
*/
-int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
- struct cxl_endpoint_dvsec_info *info)
+static int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
+ struct cxl_endpoint_dvsec_info *info)
{
void __iomem *hdm = cxlhdm->regs.hdm_decoder;
struct cxl_port *port = cxlhdm->port;
@@ -992,7 +1169,6 @@ int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
cxl_settle_decoders(cxlhdm);
for (i = 0; i < cxlhdm->decoder_count; i++) {
- int target_map[CXL_DECODER_MAX_INTERLEAVE] = { 0 };
int rc, target_count = cxlhdm->target_count;
struct cxl_decoder *cxld;
@@ -1020,8 +1196,7 @@ int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
cxld = &cxlsd->cxld;
}
- rc = init_hdm_decoder(port, cxld, target_map, hdm, i,
- &dpa_base, info);
+ rc = init_hdm_decoder(port, cxld, hdm, i, &dpa_base, info);
if (rc) {
dev_warn(&port->dev,
"Failed to initialize decoder%d.%d\n",
@@ -1029,7 +1204,7 @@ int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
put_device(&cxld->dev);
return rc;
}
- rc = add_hdm_decoder(port, cxld, target_map);
+ rc = add_hdm_decoder(port, cxld);
if (rc) {
dev_warn(&port->dev,
"Failed to add decoder%d.%d\n", port->id, i);
@@ -1039,4 +1214,71 @@ int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
return 0;
}
-EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_decoders, CXL);
+
+/**
+ * __devm_cxl_switch_port_decoders_setup - allocate and setup switch decoders
+ * @port: CXL port context
+ *
+ * Return 0 or -errno on error
+ */
+int __devm_cxl_switch_port_decoders_setup(struct cxl_port *port)
+{
+ struct cxl_hdm *cxlhdm;
+
+ if (is_cxl_root(port) || is_cxl_endpoint(port))
+ return -EOPNOTSUPP;
+
+ cxlhdm = devm_cxl_setup_hdm(port, NULL);
+ if (!IS_ERR(cxlhdm))
+ return devm_cxl_enumerate_decoders(cxlhdm, NULL);
+
+ if (PTR_ERR(cxlhdm) != -ENODEV) {
+ dev_err(&port->dev, "Failed to map HDM decoder capability\n");
+ return PTR_ERR(cxlhdm);
+ }
+
+ if (cxl_port_get_possible_dports(port) == 1) {
+ dev_dbg(&port->dev, "Fallback to passthrough decoder\n");
+ return devm_cxl_add_passthrough_decoder(port);
+ }
+
+ dev_err(&port->dev, "HDM decoder capability not found\n");
+ return -ENXIO;
+}
+EXPORT_SYMBOL_NS_GPL(__devm_cxl_switch_port_decoders_setup, "CXL");
+
+/**
+ * devm_cxl_endpoint_decoders_setup - allocate and setup endpoint decoders
+ * @port: CXL port context
+ *
+ * Return 0 or -errno on error
+ */
+int devm_cxl_endpoint_decoders_setup(struct cxl_port *port)
+{
+ struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev);
+ struct cxl_endpoint_dvsec_info info = { .port = port };
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct cxl_hdm *cxlhdm;
+ int rc;
+
+ if (!is_cxl_endpoint(port))
+ return -EOPNOTSUPP;
+
+ rc = cxl_dvsec_rr_decode(cxlds, &info);
+ if (rc < 0)
+ return rc;
+
+ cxlhdm = devm_cxl_setup_hdm(port, &info);
+ if (IS_ERR(cxlhdm)) {
+ if (PTR_ERR(cxlhdm) == -ENODEV)
+ dev_err(&port->dev, "HDM decoder registers not found\n");
+ return PTR_ERR(cxlhdm);
+ }
+
+ rc = cxl_hdm_decode_init(cxlds, cxlhdm, &info);
+ if (rc)
+ return rc;
+
+ return devm_cxl_enumerate_decoders(cxlhdm, &info);
+}
+EXPORT_SYMBOL_NS_GPL(devm_cxl_endpoint_decoders_setup, "CXL");
diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c
index 5175138c4fb7..fa6dd0c94656 100644
--- a/drivers/cxl/core/mbox.c
+++ b/drivers/cxl/core/mbox.c
@@ -11,6 +11,7 @@
#include "core.h"
#include "trace.h"
+#include "mce.h"
static bool cxl_raw_allow_all;
@@ -281,7 +282,7 @@ int cxl_internal_send_cmd(struct cxl_mailbox *cxl_mbox,
return -EIO;
return 0;
}
-EXPORT_SYMBOL_NS_GPL(cxl_internal_send_cmd, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_internal_send_cmd, "CXL");
static bool cxl_mem_raw_command_allowed(u16 opcode)
{
@@ -349,40 +350,39 @@ static bool cxl_payload_from_user_allowed(u16 opcode, void *payload_in)
return true;
}
-static int cxl_mbox_cmd_ctor(struct cxl_mbox_cmd *mbox,
- struct cxl_memdev_state *mds, u16 opcode,
+static int cxl_mbox_cmd_ctor(struct cxl_mbox_cmd *mbox_cmd,
+ struct cxl_mailbox *cxl_mbox, u16 opcode,
size_t in_size, size_t out_size, u64 in_payload)
{
- struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
- *mbox = (struct cxl_mbox_cmd) {
+ *mbox_cmd = (struct cxl_mbox_cmd) {
.opcode = opcode,
.size_in = in_size,
};
if (in_size) {
- mbox->payload_in = vmemdup_user(u64_to_user_ptr(in_payload),
- in_size);
- if (IS_ERR(mbox->payload_in))
- return PTR_ERR(mbox->payload_in);
+ mbox_cmd->payload_in = vmemdup_user(u64_to_user_ptr(in_payload),
+ in_size);
+ if (IS_ERR(mbox_cmd->payload_in))
+ return PTR_ERR(mbox_cmd->payload_in);
- if (!cxl_payload_from_user_allowed(opcode, mbox->payload_in)) {
- dev_dbg(mds->cxlds.dev, "%s: input payload not allowed\n",
+ if (!cxl_payload_from_user_allowed(opcode, mbox_cmd->payload_in)) {
+ dev_dbg(cxl_mbox->host, "%s: input payload not allowed\n",
cxl_mem_opcode_to_name(opcode));
- kvfree(mbox->payload_in);
+ kvfree(mbox_cmd->payload_in);
return -EBUSY;
}
}
/* Prepare to handle a full payload for variable sized output */
if (out_size == CXL_VARIABLE_PAYLOAD)
- mbox->size_out = cxl_mbox->payload_size;
+ mbox_cmd->size_out = cxl_mbox->payload_size;
else
- mbox->size_out = out_size;
+ mbox_cmd->size_out = out_size;
- if (mbox->size_out) {
- mbox->payload_out = kvzalloc(mbox->size_out, GFP_KERNEL);
- if (!mbox->payload_out) {
- kvfree(mbox->payload_in);
+ if (mbox_cmd->size_out) {
+ mbox_cmd->payload_out = kvzalloc(mbox_cmd->size_out, GFP_KERNEL);
+ if (!mbox_cmd->payload_out) {
+ kvfree(mbox_cmd->payload_in);
return -ENOMEM;
}
}
@@ -397,10 +397,8 @@ static void cxl_mbox_cmd_dtor(struct cxl_mbox_cmd *mbox)
static int cxl_to_mem_cmd_raw(struct cxl_mem_command *mem_cmd,
const struct cxl_send_command *send_cmd,
- struct cxl_memdev_state *mds)
+ struct cxl_mailbox *cxl_mbox)
{
- struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
-
if (send_cmd->raw.rsvd)
return -EINVAL;
@@ -415,7 +413,7 @@ static int cxl_to_mem_cmd_raw(struct cxl_mem_command *mem_cmd,
if (!cxl_mem_raw_command_allowed(send_cmd->raw.opcode))
return -EPERM;
- dev_WARN_ONCE(mds->cxlds.dev, true, "raw command path used\n");
+ dev_WARN_ONCE(cxl_mbox->host, true, "raw command path used\n");
*mem_cmd = (struct cxl_mem_command) {
.info = {
@@ -431,7 +429,7 @@ static int cxl_to_mem_cmd_raw(struct cxl_mem_command *mem_cmd,
static int cxl_to_mem_cmd(struct cxl_mem_command *mem_cmd,
const struct cxl_send_command *send_cmd,
- struct cxl_memdev_state *mds)
+ struct cxl_mailbox *cxl_mbox)
{
struct cxl_mem_command *c = &cxl_mem_commands[send_cmd->id];
const struct cxl_command_info *info = &c->info;
@@ -446,11 +444,11 @@ static int cxl_to_mem_cmd(struct cxl_mem_command *mem_cmd,
return -EINVAL;
/* Check that the command is enabled for hardware */
- if (!test_bit(info->id, mds->enabled_cmds))
+ if (!test_bit(info->id, cxl_mbox->enabled_cmds))
return -ENOTTY;
/* Check that the command is not claimed for exclusive kernel use */
- if (test_bit(info->id, mds->exclusive_cmds))
+ if (test_bit(info->id, cxl_mbox->exclusive_cmds))
return -EBUSY;
/* Check the input buffer is the expected size */
@@ -479,7 +477,7 @@ static int cxl_to_mem_cmd(struct cxl_mem_command *mem_cmd,
/**
* cxl_validate_cmd_from_user() - Check fields for CXL_MEM_SEND_COMMAND.
* @mbox_cmd: Sanitized and populated &struct cxl_mbox_cmd.
- * @mds: The driver data for the operation
+ * @cxl_mbox: CXL mailbox context
* @send_cmd: &struct cxl_send_command copied in from userspace.
*
* Return:
@@ -494,10 +492,9 @@ static int cxl_to_mem_cmd(struct cxl_mem_command *mem_cmd,
* safe to send to the hardware.
*/
static int cxl_validate_cmd_from_user(struct cxl_mbox_cmd *mbox_cmd,
- struct cxl_memdev_state *mds,
+ struct cxl_mailbox *cxl_mbox,
const struct cxl_send_command *send_cmd)
{
- struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
struct cxl_mem_command mem_cmd;
int rc;
@@ -514,24 +511,23 @@ static int cxl_validate_cmd_from_user(struct cxl_mbox_cmd *mbox_cmd,
/* Sanitize and construct a cxl_mem_command */
if (send_cmd->id == CXL_MEM_COMMAND_ID_RAW)
- rc = cxl_to_mem_cmd_raw(&mem_cmd, send_cmd, mds);
+ rc = cxl_to_mem_cmd_raw(&mem_cmd, send_cmd, cxl_mbox);
else
- rc = cxl_to_mem_cmd(&mem_cmd, send_cmd, mds);
+ rc = cxl_to_mem_cmd(&mem_cmd, send_cmd, cxl_mbox);
if (rc)
return rc;
/* Sanitize and construct a cxl_mbox_cmd */
- return cxl_mbox_cmd_ctor(mbox_cmd, mds, mem_cmd.opcode,
+ return cxl_mbox_cmd_ctor(mbox_cmd, cxl_mbox, mem_cmd.opcode,
mem_cmd.info.size_in, mem_cmd.info.size_out,
send_cmd->in.payload);
}
-int cxl_query_cmd(struct cxl_memdev *cxlmd,
+int cxl_query_cmd(struct cxl_mailbox *cxl_mbox,
struct cxl_mem_query_commands __user *q)
{
- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
- struct device *dev = &cxlmd->dev;
+ struct device *dev = cxl_mbox->host;
struct cxl_mem_command *cmd;
u32 n_commands;
int j = 0;
@@ -552,9 +548,9 @@ int cxl_query_cmd(struct cxl_memdev *cxlmd,
cxl_for_each_cmd(cmd) {
struct cxl_command_info info = cmd->info;
- if (test_bit(info.id, mds->enabled_cmds))
+ if (test_bit(info.id, cxl_mbox->enabled_cmds))
info.flags |= CXL_MEM_COMMAND_FLAG_ENABLED;
- if (test_bit(info.id, mds->exclusive_cmds))
+ if (test_bit(info.id, cxl_mbox->exclusive_cmds))
info.flags |= CXL_MEM_COMMAND_FLAG_EXCLUSIVE;
if (copy_to_user(&q->commands[j++], &info, sizeof(info)))
@@ -569,7 +565,7 @@ int cxl_query_cmd(struct cxl_memdev *cxlmd,
/**
* handle_mailbox_cmd_from_user() - Dispatch a mailbox command for userspace.
- * @mds: The driver data for the operation
+ * @cxl_mbox: The mailbox context for the operation.
* @mbox_cmd: The validated mailbox command.
* @out_payload: Pointer to userspace's output payload.
* @size_out: (Input) Max payload size to copy out.
@@ -590,13 +586,12 @@ int cxl_query_cmd(struct cxl_memdev *cxlmd,
*
* See cxl_send_cmd().
*/
-static int handle_mailbox_cmd_from_user(struct cxl_memdev_state *mds,
+static int handle_mailbox_cmd_from_user(struct cxl_mailbox *cxl_mbox,
struct cxl_mbox_cmd *mbox_cmd,
u64 out_payload, s32 *size_out,
u32 *retval)
{
- struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
- struct device *dev = mds->cxlds.dev;
+ struct device *dev = cxl_mbox->host;
int rc;
dev_dbg(dev,
@@ -633,10 +628,9 @@ out:
return rc;
}
-int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s)
+int cxl_send_cmd(struct cxl_mailbox *cxl_mbox, struct cxl_send_command __user *s)
{
- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
- struct device *dev = &cxlmd->dev;
+ struct device *dev = cxl_mbox->host;
struct cxl_send_command send;
struct cxl_mbox_cmd mbox_cmd;
int rc;
@@ -646,11 +640,11 @@ int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s)
if (copy_from_user(&send, s, sizeof(send)))
return -EFAULT;
- rc = cxl_validate_cmd_from_user(&mbox_cmd, mds, &send);
+ rc = cxl_validate_cmd_from_user(&mbox_cmd, cxl_mbox, &send);
if (rc)
return rc;
- rc = handle_mailbox_cmd_from_user(mds, &mbox_cmd, send.out.payload,
+ rc = handle_mailbox_cmd_from_user(cxl_mbox, &mbox_cmd, send.out.payload,
&send.out.size, &send.retval);
if (rc)
return rc;
@@ -713,6 +707,35 @@ static int cxl_xfer_log(struct cxl_memdev_state *mds, uuid_t *uuid,
return 0;
}
+static int check_features_opcodes(u16 opcode, int *ro_cmds, int *wr_cmds)
+{
+ switch (opcode) {
+ case CXL_MBOX_OP_GET_SUPPORTED_FEATURES:
+ case CXL_MBOX_OP_GET_FEATURE:
+ (*ro_cmds)++;
+ return 1;
+ case CXL_MBOX_OP_SET_FEATURE:
+ (*wr_cmds)++;
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+/* 'Get Supported Features' and 'Get Feature' */
+#define MAX_FEATURES_READ_CMDS 2
+static void set_features_cap(struct cxl_mailbox *cxl_mbox,
+ int ro_cmds, int wr_cmds)
+{
+ /* Setting up Features capability while walking the CEL */
+ if (ro_cmds == MAX_FEATURES_READ_CMDS) {
+ if (wr_cmds)
+ cxl_mbox->feat_cap = CXL_FEATURES_RW;
+ else
+ cxl_mbox->feat_cap = CXL_FEATURES_RO;
+ }
+}
+
/**
* cxl_walk_cel() - Walk through the Command Effects Log.
* @mds: The driver data for the operation
@@ -724,10 +747,11 @@ static int cxl_xfer_log(struct cxl_memdev_state *mds, uuid_t *uuid,
*/
static void cxl_walk_cel(struct cxl_memdev_state *mds, size_t size, u8 *cel)
{
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
struct cxl_cel_entry *cel_entry;
const int cel_entries = size / sizeof(*cel_entry);
struct device *dev = mds->cxlds.dev;
- int i;
+ int i, ro_cmds = 0, wr_cmds = 0;
cel_entry = (struct cxl_cel_entry *) cel;
@@ -737,10 +761,13 @@ static void cxl_walk_cel(struct cxl_memdev_state *mds, size_t size, u8 *cel)
int enabled = 0;
if (cmd) {
- set_bit(cmd->info.id, mds->enabled_cmds);
+ set_bit(cmd->info.id, cxl_mbox->enabled_cmds);
enabled++;
}
+ enabled += check_features_opcodes(opcode, &ro_cmds,
+ &wr_cmds);
+
if (cxl_is_poison_command(opcode)) {
cxl_set_poison_cmd_enabled(&mds->poison, opcode);
enabled++;
@@ -754,6 +781,8 @@ static void cxl_walk_cel(struct cxl_memdev_state *mds, size_t size, u8 *cel)
dev_dbg(dev, "Opcode 0x%04x %s\n", opcode,
enabled ? "enabled" : "unsupported by driver");
}
+
+ set_features_cap(cxl_mbox, ro_cmds, wr_cmds);
}
static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_memdev_state *mds)
@@ -807,6 +836,7 @@ static const uuid_t log_uuid[] = {
*/
int cxl_enumerate_cmds(struct cxl_memdev_state *mds)
{
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
struct cxl_mbox_get_supported_logs *gsl;
struct device *dev = mds->cxlds.dev;
struct cxl_mem_command *cmd;
@@ -845,7 +875,7 @@ int cxl_enumerate_cmds(struct cxl_memdev_state *mds)
/* In case CEL was bogus, enable some default commands. */
cxl_for_each_cmd(cmd)
if (cmd->flags & CXL_CMD_FLAG_FORCE_ENABLE)
- set_bit(cmd->info.id, mds->enabled_cmds);
+ set_bit(cmd->info.id, cxl_mbox->enabled_cmds);
/* Found the required CEL */
rc = 0;
@@ -854,7 +884,7 @@ out:
kvfree(gsl);
return rc;
}
-EXPORT_SYMBOL_NS_GPL(cxl_enumerate_cmds, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_enumerate_cmds, "CXL");
void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
enum cxl_event_log_type type,
@@ -869,9 +899,13 @@ void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
trace_cxl_generic_event(cxlmd, type, uuid, &evt->generic);
return;
}
+ if (event_type == CXL_CPER_EVENT_MEM_SPARING) {
+ trace_cxl_memory_sparing(cxlmd, type, &evt->mem_sparing);
+ return;
+ }
if (trace_cxl_general_media_enabled() || trace_cxl_dram_enabled()) {
- u64 dpa, hpa = ULLONG_MAX;
+ u64 dpa, hpa = ULLONG_MAX, hpa_alias = ULLONG_MAX;
struct cxl_region *cxlr;
/*
@@ -879,22 +913,53 @@ void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
* translations. Take topology mutation locks and lookup
* { HPA, REGION } from { DPA, MEMDEV } in the event record.
*/
- guard(rwsem_read)(&cxl_region_rwsem);
- guard(rwsem_read)(&cxl_dpa_rwsem);
+ guard(rwsem_read)(&cxl_rwsem.region);
+ guard(rwsem_read)(&cxl_rwsem.dpa);
dpa = le64_to_cpu(evt->media_hdr.phys_addr) & CXL_DPA_MASK;
cxlr = cxl_dpa_to_region(cxlmd, dpa);
- if (cxlr)
+ if (cxlr) {
+ u64 cache_size = cxlr->params.cache_size;
+
hpa = cxl_dpa_to_hpa(cxlr, cxlmd, dpa);
+ if (cache_size)
+ hpa_alias = hpa - cache_size;
+ }
+
+ if (event_type == CXL_CPER_EVENT_GEN_MEDIA) {
+ if (cxl_store_rec_gen_media((struct cxl_memdev *)cxlmd, evt))
+ dev_dbg(&cxlmd->dev, "CXL store rec_gen_media failed\n");
+
+ if (evt->gen_media.media_hdr.descriptor &
+ CXL_GMER_EVT_DESC_THRESHOLD_EVENT)
+ WARN_ON_ONCE((evt->gen_media.media_hdr.type &
+ CXL_GMER_MEM_EVT_TYPE_AP_CME_COUNTER_EXPIRE) &&
+ !get_unaligned_le24(evt->gen_media.cme_count));
+ else
+ WARN_ON_ONCE(evt->gen_media.media_hdr.type &
+ CXL_GMER_MEM_EVT_TYPE_AP_CME_COUNTER_EXPIRE);
- if (event_type == CXL_CPER_EVENT_GEN_MEDIA)
trace_cxl_general_media(cxlmd, type, cxlr, hpa,
- &evt->gen_media);
- else if (event_type == CXL_CPER_EVENT_DRAM)
- trace_cxl_dram(cxlmd, type, cxlr, hpa, &evt->dram);
+ hpa_alias, &evt->gen_media);
+ } else if (event_type == CXL_CPER_EVENT_DRAM) {
+ if (cxl_store_rec_dram((struct cxl_memdev *)cxlmd, evt))
+ dev_dbg(&cxlmd->dev, "CXL store rec_dram failed\n");
+
+ if (evt->dram.media_hdr.descriptor &
+ CXL_GMER_EVT_DESC_THRESHOLD_EVENT)
+ WARN_ON_ONCE((evt->dram.media_hdr.type &
+ CXL_DER_MEM_EVT_TYPE_AP_CME_COUNTER_EXPIRE) &&
+ !get_unaligned_le24(evt->dram.cvme_count));
+ else
+ WARN_ON_ONCE(evt->dram.media_hdr.type &
+ CXL_DER_MEM_EVT_TYPE_AP_CME_COUNTER_EXPIRE);
+
+ trace_cxl_dram(cxlmd, type, cxlr, hpa, hpa_alias,
+ &evt->dram);
+ }
}
}
-EXPORT_SYMBOL_NS_GPL(cxl_event_trace_record, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_event_trace_record, "CXL");
static void __cxl_event_trace_record(const struct cxl_memdev *cxlmd,
enum cxl_event_log_type type,
@@ -909,6 +974,8 @@ static void __cxl_event_trace_record(const struct cxl_memdev *cxlmd,
ev_type = CXL_CPER_EVENT_DRAM;
else if (uuid_equal(uuid, &CXL_EVENT_MEM_MODULE_UUID))
ev_type = CXL_CPER_EVENT_MEM_MODULE;
+ else if (uuid_equal(uuid, &CXL_EVENT_MEM_SPARING_UUID))
+ ev_type = CXL_CPER_EVENT_MEM_SPARING;
cxl_event_trace_record(cxlmd, type, ev_type, uuid, &record->event);
}
@@ -1063,7 +1130,7 @@ void cxl_mem_get_event_records(struct cxl_memdev_state *mds, u32 status)
if (status & CXLDEV_EVENT_STATUS_INFO)
cxl_mem_get_records_log(mds, CXL_EVENT_TYPE_INFO);
}
-EXPORT_SYMBOL_NS_GPL(cxl_mem_get_event_records, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_mem_get_event_records, "CXL");
/**
* cxl_mem_get_partition_info - Get partition info
@@ -1097,10 +1164,6 @@ static int cxl_mem_get_partition_info(struct cxl_memdev_state *mds)
le64_to_cpu(pi.active_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
mds->active_persistent_bytes =
le64_to_cpu(pi.active_persistent_cap) * CXL_CAPACITY_MULTIPLIER;
- mds->next_volatile_bytes =
- le64_to_cpu(pi.next_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
- mds->next_persistent_bytes =
- le64_to_cpu(pi.next_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
return 0;
}
@@ -1155,7 +1218,7 @@ int cxl_dev_state_identify(struct cxl_memdev_state *mds)
return 0;
}
-EXPORT_SYMBOL_NS_GPL(cxl_dev_state_identify, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_dev_state_identify, "CXL");
static int __cxl_mem_sanitize(struct cxl_memdev_state *mds, u16 cmd)
{
@@ -1222,74 +1285,54 @@ int cxl_mem_sanitize(struct cxl_memdev *cxlmd, u16 cmd)
{
struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
struct cxl_port *endpoint;
- int rc;
/* synchronize with cxl_mem_probe() and decoder write operations */
guard(device)(&cxlmd->dev);
endpoint = cxlmd->endpoint;
- down_read(&cxl_region_rwsem);
+ guard(rwsem_read)(&cxl_rwsem.region);
/*
* Require an endpoint to be safe otherwise the driver can not
* be sure that the device is unmapped.
*/
if (endpoint && cxl_num_decoders_committed(endpoint) == 0)
- rc = __cxl_mem_sanitize(mds, cmd);
- else
- rc = -EBUSY;
- up_read(&cxl_region_rwsem);
+ return __cxl_mem_sanitize(mds, cmd);
- return rc;
+ return -EBUSY;
}
-static int add_dpa_res(struct device *dev, struct resource *parent,
- struct resource *res, resource_size_t start,
- resource_size_t size, const char *type)
+static void add_part(struct cxl_dpa_info *info, u64 start, u64 size, enum cxl_partition_mode mode)
{
- int rc;
-
- res->name = type;
- res->start = start;
- res->end = start + size - 1;
- res->flags = IORESOURCE_MEM;
- if (resource_size(res) == 0) {
- dev_dbg(dev, "DPA(%s): no capacity\n", res->name);
- return 0;
- }
- rc = request_resource(parent, res);
- if (rc) {
- dev_err(dev, "DPA(%s): failed to track %pr (%d)\n", res->name,
- res, rc);
- return rc;
- }
+ int i = info->nr_partitions;
- dev_dbg(dev, "DPA(%s): %pr\n", res->name, res);
+ if (size == 0)
+ return;
- return 0;
+ info->part[i].range = (struct range) {
+ .start = start,
+ .end = start + size - 1,
+ };
+ info->part[i].mode = mode;
+ info->nr_partitions++;
}
-int cxl_mem_create_range_info(struct cxl_memdev_state *mds)
+int cxl_mem_dpa_fetch(struct cxl_memdev_state *mds, struct cxl_dpa_info *info)
{
struct cxl_dev_state *cxlds = &mds->cxlds;
struct device *dev = cxlds->dev;
int rc;
if (!cxlds->media_ready) {
- cxlds->dpa_res = DEFINE_RES_MEM(0, 0);
- cxlds->ram_res = DEFINE_RES_MEM(0, 0);
- cxlds->pmem_res = DEFINE_RES_MEM(0, 0);
+ info->size = 0;
return 0;
}
- cxlds->dpa_res = DEFINE_RES_MEM(0, mds->total_bytes);
+ info->size = mds->total_bytes;
if (mds->partition_align_bytes == 0) {
- rc = add_dpa_res(dev, &cxlds->dpa_res, &cxlds->ram_res, 0,
- mds->volatile_only_bytes, "ram");
- if (rc)
- return rc;
- return add_dpa_res(dev, &cxlds->dpa_res, &cxlds->pmem_res,
- mds->volatile_only_bytes,
- mds->persistent_only_bytes, "pmem");
+ add_part(info, 0, mds->volatile_only_bytes, CXL_PARTMODE_RAM);
+ add_part(info, mds->volatile_only_bytes,
+ mds->persistent_only_bytes, CXL_PARTMODE_PMEM);
+ return 0;
}
rc = cxl_mem_get_partition_info(mds);
@@ -1298,15 +1341,52 @@ int cxl_mem_create_range_info(struct cxl_memdev_state *mds)
return rc;
}
- rc = add_dpa_res(dev, &cxlds->dpa_res, &cxlds->ram_res, 0,
- mds->active_volatile_bytes, "ram");
- if (rc)
- return rc;
- return add_dpa_res(dev, &cxlds->dpa_res, &cxlds->pmem_res,
- mds->active_volatile_bytes,
- mds->active_persistent_bytes, "pmem");
+ add_part(info, 0, mds->active_volatile_bytes, CXL_PARTMODE_RAM);
+ add_part(info, mds->active_volatile_bytes, mds->active_persistent_bytes,
+ CXL_PARTMODE_PMEM);
+
+ return 0;
}
-EXPORT_SYMBOL_NS_GPL(cxl_mem_create_range_info, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_mem_dpa_fetch, "CXL");
+
+int cxl_get_dirty_count(struct cxl_memdev_state *mds, u32 *count)
+{
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
+ struct cxl_mbox_get_health_info_out hi;
+ struct cxl_mbox_cmd mbox_cmd;
+ int rc;
+
+ mbox_cmd = (struct cxl_mbox_cmd) {
+ .opcode = CXL_MBOX_OP_GET_HEALTH_INFO,
+ .size_out = sizeof(hi),
+ .payload_out = &hi,
+ };
+
+ rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
+ if (!rc)
+ *count = le32_to_cpu(hi.dirty_shutdown_cnt);
+
+ return rc;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_get_dirty_count, "CXL");
+
+int cxl_arm_dirty_shutdown(struct cxl_memdev_state *mds)
+{
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
+ struct cxl_mbox_cmd mbox_cmd;
+ struct cxl_mbox_set_shutdown_state_in in = {
+ .state = 1
+ };
+
+ mbox_cmd = (struct cxl_mbox_cmd) {
+ .opcode = CXL_MBOX_OP_SET_SHUTDOWN_STATE,
+ .size_in = sizeof(in),
+ .payload_in = &in,
+ };
+
+ return cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
+}
+EXPORT_SYMBOL_NS_GPL(cxl_arm_dirty_shutdown, "CXL");
int cxl_set_timestamp(struct cxl_memdev_state *mds)
{
@@ -1333,7 +1413,7 @@ int cxl_set_timestamp(struct cxl_memdev_state *mds)
return 0;
}
-EXPORT_SYMBOL_NS_GPL(cxl_set_timestamp, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_set_timestamp, "CXL");
int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
struct cxl_region *cxlr)
@@ -1345,8 +1425,8 @@ int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
int nr_records = 0;
int rc;
- rc = mutex_lock_interruptible(&mds->poison.lock);
- if (rc)
+ ACQUIRE(mutex_intr, lock)(&mds->poison.mutex);
+ if ((rc = ACQUIRE_ERR(mutex_intr, &lock)))
return rc;
po = mds->poison.list_out;
@@ -1381,10 +1461,9 @@ int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
}
} while (po->flags & CXL_POISON_FLAG_MORE);
- mutex_unlock(&mds->poison.lock);
return rc;
}
-EXPORT_SYMBOL_NS_GPL(cxl_mem_get_poison, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_mem_get_poison, "CXL");
static void free_poison_buf(void *buf)
{
@@ -1417,10 +1496,10 @@ int cxl_poison_state_init(struct cxl_memdev_state *mds)
return rc;
}
- mutex_init(&mds->poison.lock);
+ mutex_init(&mds->poison.mutex);
return 0;
}
-EXPORT_SYMBOL_NS_GPL(cxl_poison_state_init, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_poison_state_init, "CXL");
int cxl_mailbox_init(struct cxl_mailbox *cxl_mbox, struct device *host)
{
@@ -1433,11 +1512,12 @@ int cxl_mailbox_init(struct cxl_mailbox *cxl_mbox, struct device *host)
return 0;
}
-EXPORT_SYMBOL_NS_GPL(cxl_mailbox_init, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_mailbox_init, "CXL");
struct cxl_memdev_state *cxl_memdev_state_create(struct device *dev)
{
struct cxl_memdev_state *mds;
+ int rc;
mds = devm_kzalloc(dev, sizeof(*mds), GFP_KERNEL);
if (!mds) {
@@ -1448,14 +1528,19 @@ struct cxl_memdev_state *cxl_memdev_state_create(struct device *dev)
mutex_init(&mds->event.log_lock);
mds->cxlds.dev = dev;
mds->cxlds.reg_map.host = dev;
+ mds->cxlds.cxl_mbox.host = dev;
mds->cxlds.reg_map.resource = CXL_RESOURCE_NONE;
mds->cxlds.type = CXL_DEVTYPE_CLASSMEM;
- mds->ram_perf.qos_class = CXL_QOS_CLASS_INVALID;
- mds->pmem_perf.qos_class = CXL_QOS_CLASS_INVALID;
+
+ rc = devm_cxl_register_mce_notifier(dev, &mds->mce_notifier);
+ if (rc == -EOPNOTSUPP)
+ dev_warn(dev, "CXL MCE unsupported\n");
+ else if (rc)
+ return ERR_PTR(rc);
return mds;
}
-EXPORT_SYMBOL_NS_GPL(cxl_memdev_state_create, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_memdev_state_create, "CXL");
void __init cxl_mbox_init(void)
{
diff --git a/drivers/cxl/core/mce.c b/drivers/cxl/core/mce.c
new file mode 100644
index 000000000000..ff8d078c6ca1
--- /dev/null
+++ b/drivers/cxl/core/mce.c
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2024 Intel Corporation. All rights reserved. */
+#include <linux/mm.h>
+#include <linux/notifier.h>
+#include <linux/set_memory.h>
+#include <asm/mce.h>
+#include <cxlmem.h>
+#include "mce.h"
+
+static int cxl_handle_mce(struct notifier_block *nb, unsigned long val,
+ void *data)
+{
+ struct cxl_memdev_state *mds = container_of(nb, struct cxl_memdev_state,
+ mce_notifier);
+ struct cxl_memdev *cxlmd = mds->cxlds.cxlmd;
+ struct cxl_port *endpoint = cxlmd->endpoint;
+ struct mce *mce = data;
+ u64 spa, spa_alias;
+ unsigned long pfn;
+
+ if (!mce || !mce_usable_address(mce))
+ return NOTIFY_DONE;
+
+ if (!endpoint)
+ return NOTIFY_DONE;
+
+ spa = mce->addr & MCI_ADDR_PHYSADDR;
+
+ pfn = spa >> PAGE_SHIFT;
+ if (!pfn_valid(pfn))
+ return NOTIFY_DONE;
+
+ spa_alias = cxl_port_get_spa_cache_alias(endpoint, spa);
+ if (spa_alias == ~0ULL)
+ return NOTIFY_DONE;
+
+ pfn = spa_alias >> PAGE_SHIFT;
+
+ /*
+ * Take down the aliased memory page. The original memory page flagged
+ * by the MCE will be taken cared of by the standard MCE handler.
+ */
+ dev_emerg(mds->cxlds.dev, "Offlining aliased SPA address0: %#llx\n",
+ spa_alias);
+ if (!memory_failure(pfn, 0))
+ set_mce_nospec(pfn);
+
+ return NOTIFY_OK;
+}
+
+static void cxl_unregister_mce_notifier(void *mce_notifier)
+{
+ mce_unregister_decode_chain(mce_notifier);
+}
+
+int devm_cxl_register_mce_notifier(struct device *dev,
+ struct notifier_block *mce_notifier)
+{
+ mce_notifier->notifier_call = cxl_handle_mce;
+ mce_notifier->priority = MCE_PRIO_UC;
+ mce_register_decode_chain(mce_notifier);
+
+ return devm_add_action_or_reset(dev, cxl_unregister_mce_notifier,
+ mce_notifier);
+}
diff --git a/drivers/cxl/core/mce.h b/drivers/cxl/core/mce.h
new file mode 100644
index 000000000000..ca272e8db6c7
--- /dev/null
+++ b/drivers/cxl/core/mce.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2024 Intel Corporation. All rights reserved. */
+#ifndef _CXL_CORE_MCE_H_
+#define _CXL_CORE_MCE_H_
+
+#include <linux/notifier.h>
+
+#ifdef CONFIG_CXL_MCE
+int devm_cxl_register_mce_notifier(struct device *dev,
+ struct notifier_block *mce_notifier);
+#else
+static inline int
+devm_cxl_register_mce_notifier(struct device *dev,
+ struct notifier_block *mce_notifier)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+#endif
diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
index 84fefb76dafa..e370d733e440 100644
--- a/drivers/cxl/core/memdev.c
+++ b/drivers/cxl/core/memdev.c
@@ -27,6 +27,7 @@ static void cxl_memdev_release(struct device *dev)
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
ida_free(&cxl_memdev_ida, cxlmd->id);
+ devm_cxl_memdev_edac_release(cxlmd);
kfree(cxlmd);
}
@@ -75,12 +76,20 @@ static ssize_t label_storage_size_show(struct device *dev,
}
static DEVICE_ATTR_RO(label_storage_size);
+static resource_size_t cxl_ram_size(struct cxl_dev_state *cxlds)
+{
+ /* Static RAM is only expected at partition 0. */
+ if (cxlds->part[0].mode != CXL_PARTMODE_RAM)
+ return 0;
+ return resource_size(&cxlds->part[0].res);
+}
+
static ssize_t ram_size_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
- unsigned long long len = resource_size(&cxlds->ram_res);
+ unsigned long long len = cxl_ram_size(cxlds);
return sysfs_emit(buf, "%#llx\n", len);
}
@@ -93,7 +102,7 @@ static ssize_t pmem_size_show(struct device *dev, struct device_attribute *attr,
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
- unsigned long long len = resource_size(&cxlds->pmem_res);
+ unsigned long long len = cxl_pmem_size(cxlds);
return sysfs_emit(buf, "%#llx\n", len);
}
@@ -145,8 +154,8 @@ static ssize_t security_state_show(struct device *dev,
return sysfs_emit(buf, "frozen\n");
if (state & CXL_PMEM_SEC_STATE_LOCKED)
return sysfs_emit(buf, "locked\n");
- else
- return sysfs_emit(buf, "unlocked\n");
+
+ return sysfs_emit(buf, "unlocked\n");
}
static struct device_attribute dev_attr_security_state =
__ATTR(state, 0444, security_state_show, NULL);
@@ -191,6 +200,14 @@ static ssize_t security_erase_store(struct device *dev,
static struct device_attribute dev_attr_security_erase =
__ATTR(erase, 0200, NULL, security_erase_store);
+bool cxl_memdev_has_poison_cmd(struct cxl_memdev *cxlmd,
+ enum poison_cmd_enabled_bits cmd)
+{
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+
+ return test_bit(cmd, mds->poison.enabled_cmds);
+}
+
static int cxl_get_poison_by_memdev(struct cxl_memdev *cxlmd)
{
struct cxl_dev_state *cxlds = cxlmd->cxlds;
@@ -198,22 +215,17 @@ static int cxl_get_poison_by_memdev(struct cxl_memdev *cxlmd)
int rc = 0;
/* CXL 3.0 Spec 8.2.9.8.4.1 Separate pmem and ram poison requests */
- if (resource_size(&cxlds->pmem_res)) {
- offset = cxlds->pmem_res.start;
- length = resource_size(&cxlds->pmem_res);
- rc = cxl_mem_get_poison(cxlmd, offset, length, NULL);
- if (rc)
- return rc;
- }
- if (resource_size(&cxlds->ram_res)) {
- offset = cxlds->ram_res.start;
- length = resource_size(&cxlds->ram_res);
+ for (int i = 0; i < cxlds->nr_partitions; i++) {
+ const struct resource *res = &cxlds->part[i].res;
+
+ offset = res->start;
+ length = resource_size(res);
rc = cxl_mem_get_poison(cxlmd, offset, length, NULL);
/*
* Invalid Physical Address is not an error for
* volatile addresses. Device support is optional.
*/
- if (rc == -EFAULT)
+ if (rc == -EFAULT && cxlds->part[i].mode == CXL_PARTMODE_RAM)
rc = 0;
}
return rc;
@@ -228,15 +240,13 @@ int cxl_trigger_poison_list(struct cxl_memdev *cxlmd)
if (!port || !is_cxl_endpoint(port))
return -EINVAL;
- rc = down_read_interruptible(&cxl_region_rwsem);
- if (rc)
+ ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &region_rwsem)))
return rc;
- rc = down_read_interruptible(&cxl_dpa_rwsem);
- if (rc) {
- up_read(&cxl_region_rwsem);
+ ACQUIRE(rwsem_read_intr, dpa_rwsem)(&cxl_rwsem.dpa);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &dpa_rwsem)))
return rc;
- }
if (cxl_num_decoders_committed(port) == 0) {
/* No regions mapped to this memdev */
@@ -245,12 +255,10 @@ int cxl_trigger_poison_list(struct cxl_memdev *cxlmd)
/* Regions mapped, collect poison by endpoint */
rc = cxl_get_poison_by_endpoint(port);
}
- up_read(&cxl_dpa_rwsem);
- up_read(&cxl_region_rwsem);
return rc;
}
-EXPORT_SYMBOL_NS_GPL(cxl_trigger_poison_list, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_trigger_poison_list, "CXL");
static int cxl_validate_poison_dpa(struct cxl_memdev *cxlmd, u64 dpa)
{
@@ -263,7 +271,7 @@ static int cxl_validate_poison_dpa(struct cxl_memdev *cxlmd, u64 dpa)
dev_dbg(cxlds->dev, "device has no dpa resource\n");
return -EINVAL;
}
- if (dpa < cxlds->dpa_res.start || dpa > cxlds->dpa_res.end) {
+ if (!cxl_resource_contains_addr(&cxlds->dpa_res, dpa)) {
dev_dbg(cxlds->dev, "dpa:0x%llx not in resource:%pR\n",
dpa, &cxlds->dpa_res);
return -EINVAL;
@@ -276,7 +284,7 @@ static int cxl_validate_poison_dpa(struct cxl_memdev *cxlmd, u64 dpa)
return 0;
}
-int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
+int cxl_inject_poison_locked(struct cxl_memdev *cxlmd, u64 dpa)
{
struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox;
struct cxl_mbox_inject_poison inject;
@@ -288,19 +296,12 @@ int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
if (!IS_ENABLED(CONFIG_DEBUG_FS))
return 0;
- rc = down_read_interruptible(&cxl_region_rwsem);
- if (rc)
- return rc;
-
- rc = down_read_interruptible(&cxl_dpa_rwsem);
- if (rc) {
- up_read(&cxl_region_rwsem);
- return rc;
- }
+ lockdep_assert_held(&cxl_rwsem.dpa);
+ lockdep_assert_held(&cxl_rwsem.region);
rc = cxl_validate_poison_dpa(cxlmd, dpa);
if (rc)
- goto out;
+ return rc;
inject.address = cpu_to_le64(dpa);
mbox_cmd = (struct cxl_mbox_cmd) {
@@ -310,7 +311,7 @@ int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
};
rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
if (rc)
- goto out;
+ return rc;
cxlr = cxl_dpa_to_region(cxlmd, dpa);
if (cxlr)
@@ -323,15 +324,27 @@ int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
.length = cpu_to_le32(1),
};
trace_cxl_poison(cxlmd, cxlr, &record, 0, 0, CXL_POISON_TRACE_INJECT);
-out:
- up_read(&cxl_dpa_rwsem);
- up_read(&cxl_region_rwsem);
- return rc;
+ return 0;
}
-EXPORT_SYMBOL_NS_GPL(cxl_inject_poison, CXL);
-int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
+int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
+{
+ int rc;
+
+ ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &region_rwsem)))
+ return rc;
+
+ ACQUIRE(rwsem_read_intr, dpa_rwsem)(&cxl_rwsem.dpa);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &dpa_rwsem)))
+ return rc;
+
+ return cxl_inject_poison_locked(cxlmd, dpa);
+}
+EXPORT_SYMBOL_NS_GPL(cxl_inject_poison, "CXL");
+
+int cxl_clear_poison_locked(struct cxl_memdev *cxlmd, u64 dpa)
{
struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox;
struct cxl_mbox_clear_poison clear;
@@ -343,19 +356,12 @@ int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
if (!IS_ENABLED(CONFIG_DEBUG_FS))
return 0;
- rc = down_read_interruptible(&cxl_region_rwsem);
- if (rc)
- return rc;
-
- rc = down_read_interruptible(&cxl_dpa_rwsem);
- if (rc) {
- up_read(&cxl_region_rwsem);
- return rc;
- }
+ lockdep_assert_held(&cxl_rwsem.dpa);
+ lockdep_assert_held(&cxl_rwsem.region);
rc = cxl_validate_poison_dpa(cxlmd, dpa);
if (rc)
- goto out;
+ return rc;
/*
* In CXL 3.0 Spec 8.2.9.8.4.3, the Clear Poison mailbox command
@@ -374,7 +380,7 @@ int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
if (rc)
- goto out;
+ return rc;
cxlr = cxl_dpa_to_region(cxlmd, dpa);
if (cxlr)
@@ -387,13 +393,25 @@ int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
.length = cpu_to_le32(1),
};
trace_cxl_poison(cxlmd, cxlr, &record, 0, 0, CXL_POISON_TRACE_CLEAR);
-out:
- up_read(&cxl_dpa_rwsem);
- up_read(&cxl_region_rwsem);
- return rc;
+ return 0;
}
-EXPORT_SYMBOL_NS_GPL(cxl_clear_poison, CXL);
+
+int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
+{
+ int rc;
+
+ ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &region_rwsem)))
+ return rc;
+
+ ACQUIRE(rwsem_read_intr, dpa_rwsem)(&cxl_rwsem.dpa);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &dpa_rwsem)))
+ return rc;
+
+ return cxl_clear_poison_locked(cxlmd, dpa);
+}
+EXPORT_SYMBOL_NS_GPL(cxl_clear_poison, "CXL");
static struct attribute *cxl_memdev_attributes[] = {
&dev_attr_serial.attr,
@@ -404,14 +422,21 @@ static struct attribute *cxl_memdev_attributes[] = {
NULL,
};
+static struct cxl_dpa_perf *to_pmem_perf(struct cxl_dev_state *cxlds)
+{
+ for (int i = 0; i < cxlds->nr_partitions; i++)
+ if (cxlds->part[i].mode == CXL_PARTMODE_PMEM)
+ return &cxlds->part[i].perf;
+ return NULL;
+}
+
static ssize_t pmem_qos_class_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
- return sysfs_emit(buf, "%d\n", mds->pmem_perf.qos_class);
+ return sysfs_emit(buf, "%d\n", to_pmem_perf(cxlds)->qos_class);
}
static struct device_attribute dev_attr_pmem_qos_class =
@@ -423,14 +448,20 @@ static struct attribute *cxl_memdev_pmem_attributes[] = {
NULL,
};
+static struct cxl_dpa_perf *to_ram_perf(struct cxl_dev_state *cxlds)
+{
+ if (cxlds->part[0].mode != CXL_PARTMODE_RAM)
+ return NULL;
+ return &cxlds->part[0].perf;
+}
+
static ssize_t ram_qos_class_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
- return sysfs_emit(buf, "%d\n", mds->ram_perf.qos_class);
+ return sysfs_emit(buf, "%d\n", to_ram_perf(cxlds)->qos_class);
}
static struct device_attribute dev_attr_ram_qos_class =
@@ -466,11 +497,11 @@ static umode_t cxl_ram_visible(struct kobject *kobj, struct attribute *a, int n)
{
struct device *dev = kobj_to_dev(kobj);
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+ struct cxl_dpa_perf *perf = to_ram_perf(cxlmd->cxlds);
- if (a == &dev_attr_ram_qos_class.attr)
- if (mds->ram_perf.qos_class == CXL_QOS_CLASS_INVALID)
- return 0;
+ if (a == &dev_attr_ram_qos_class.attr &&
+ (!perf || perf->qos_class == CXL_QOS_CLASS_INVALID))
+ return 0;
return a->mode;
}
@@ -485,11 +516,11 @@ static umode_t cxl_pmem_visible(struct kobject *kobj, struct attribute *a, int n
{
struct device *dev = kobj_to_dev(kobj);
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+ struct cxl_dpa_perf *perf = to_pmem_perf(cxlmd->cxlds);
- if (a == &dev_attr_pmem_qos_class.attr)
- if (mds->pmem_perf.qos_class == CXL_QOS_CLASS_INVALID)
- return 0;
+ if (a == &dev_attr_pmem_qos_class.attr &&
+ (!perf || perf->qos_class == CXL_QOS_CLASS_INVALID))
+ return 0;
return a->mode;
}
@@ -537,7 +568,7 @@ void cxl_memdev_update_perf(struct cxl_memdev *cxlmd)
sysfs_update_group(&cxlmd->dev.kobj, &cxl_memdev_ram_attribute_group);
sysfs_update_group(&cxlmd->dev.kobj, &cxl_memdev_pmem_attribute_group);
}
-EXPORT_SYMBOL_NS_GPL(cxl_memdev_update_perf, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_memdev_update_perf, "CXL");
static const struct device_type cxl_memdev_type = {
.name = "cxl_memdev",
@@ -550,7 +581,7 @@ bool is_cxl_memdev(const struct device *dev)
{
return dev->type == &cxl_memdev_type;
}
-EXPORT_SYMBOL_NS_GPL(is_cxl_memdev, CXL);
+EXPORT_SYMBOL_NS_GPL(is_cxl_memdev, "CXL");
/**
* set_exclusive_cxl_commands() - atomically disable user cxl commands
@@ -564,12 +595,13 @@ EXPORT_SYMBOL_NS_GPL(is_cxl_memdev, CXL);
void set_exclusive_cxl_commands(struct cxl_memdev_state *mds,
unsigned long *cmds)
{
- down_write(&cxl_memdev_rwsem);
- bitmap_or(mds->exclusive_cmds, mds->exclusive_cmds, cmds,
- CXL_MEM_COMMAND_ID_MAX);
- up_write(&cxl_memdev_rwsem);
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
+
+ guard(rwsem_write)(&cxl_memdev_rwsem);
+ bitmap_or(cxl_mbox->exclusive_cmds, cxl_mbox->exclusive_cmds,
+ cmds, CXL_MEM_COMMAND_ID_MAX);
}
-EXPORT_SYMBOL_NS_GPL(set_exclusive_cxl_commands, CXL);
+EXPORT_SYMBOL_NS_GPL(set_exclusive_cxl_commands, "CXL");
/**
* clear_exclusive_cxl_commands() - atomically enable user cxl commands
@@ -579,20 +611,20 @@ EXPORT_SYMBOL_NS_GPL(set_exclusive_cxl_commands, CXL);
void clear_exclusive_cxl_commands(struct cxl_memdev_state *mds,
unsigned long *cmds)
{
- down_write(&cxl_memdev_rwsem);
- bitmap_andnot(mds->exclusive_cmds, mds->exclusive_cmds, cmds,
- CXL_MEM_COMMAND_ID_MAX);
- up_write(&cxl_memdev_rwsem);
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
+
+ guard(rwsem_write)(&cxl_memdev_rwsem);
+ bitmap_andnot(cxl_mbox->exclusive_cmds, cxl_mbox->exclusive_cmds,
+ cmds, CXL_MEM_COMMAND_ID_MAX);
}
-EXPORT_SYMBOL_NS_GPL(clear_exclusive_cxl_commands, CXL);
+EXPORT_SYMBOL_NS_GPL(clear_exclusive_cxl_commands, "CXL");
static void cxl_memdev_shutdown(struct device *dev)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
- down_write(&cxl_memdev_rwsem);
+ guard(rwsem_write)(&cxl_memdev_rwsem);
cxlmd->cxlds = NULL;
- up_write(&cxl_memdev_rwsem);
}
static void cxl_memdev_unregister(void *_cxlmd)
@@ -656,11 +688,14 @@ err:
static long __cxl_memdev_ioctl(struct cxl_memdev *cxlmd, unsigned int cmd,
unsigned long arg)
{
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
+
switch (cmd) {
case CXL_MEM_QUERY_COMMANDS:
- return cxl_query_cmd(cxlmd, (void __user *)arg);
+ return cxl_query_cmd(cxl_mbox, (void __user *)arg);
case CXL_MEM_SEND_COMMAND:
- return cxl_send_cmd(cxlmd, (void __user *)arg);
+ return cxl_send_cmd(cxl_mbox, (void __user *)arg);
default:
return -ENOTTY;
}
@@ -671,15 +706,13 @@ static long cxl_memdev_ioctl(struct file *file, unsigned int cmd,
{
struct cxl_memdev *cxlmd = file->private_data;
struct cxl_dev_state *cxlds;
- int rc = -ENXIO;
- down_read(&cxl_memdev_rwsem);
+ guard(rwsem_read)(&cxl_memdev_rwsem);
cxlds = cxlmd->cxlds;
if (cxlds && cxlds->type == CXL_DEVTYPE_CLASSMEM)
- rc = __cxl_memdev_ioctl(cxlmd, cmd, arg);
- up_read(&cxl_memdev_rwsem);
+ return __cxl_memdev_ioctl(cxlmd, cmd, arg);
- return rc;
+ return -ENXIO;
}
static int cxl_memdev_open(struct inode *inode, struct file *file)
@@ -994,10 +1027,11 @@ static void cxl_remove_fw_upload(void *fwl)
int devm_cxl_setup_fw_upload(struct device *host, struct cxl_memdev_state *mds)
{
struct cxl_dev_state *cxlds = &mds->cxlds;
+ struct cxl_mailbox *cxl_mbox = &cxlds->cxl_mbox;
struct device *dev = &cxlds->cxlmd->dev;
struct fw_upload *fwl;
- if (!test_bit(CXL_MEM_COMMAND_ID_GET_FW_INFO, mds->enabled_cmds))
+ if (!test_bit(CXL_MEM_COMMAND_ID_GET_FW_INFO, cxl_mbox->enabled_cmds))
return 0;
fwl = firmware_upload_register(THIS_MODULE, dev, dev_name(dev),
@@ -1006,7 +1040,7 @@ int devm_cxl_setup_fw_upload(struct device *host, struct cxl_memdev_state *mds)
return PTR_ERR(fwl);
return devm_add_action_or_reset(host, cxl_remove_fw_upload, fwl);
}
-EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_fw_upload, CXL);
+EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_fw_upload, "CXL");
static const struct file_operations cxl_memdev_fops = {
.owner = THIS_MODULE,
@@ -1060,7 +1094,7 @@ err:
put_device(dev);
return ERR_PTR(rc);
}
-EXPORT_SYMBOL_NS_GPL(devm_cxl_add_memdev, CXL);
+EXPORT_SYMBOL_NS_GPL(devm_cxl_add_memdev, "CXL");
static void sanitize_teardown_notifier(void *data)
{
@@ -1105,7 +1139,7 @@ int devm_cxl_sanitize_setup_notifier(struct device *host,
return devm_add_action_or_reset(host, sanitize_teardown_notifier, mds);
}
-EXPORT_SYMBOL_NS_GPL(devm_cxl_sanitize_setup_notifier, CXL);
+EXPORT_SYMBOL_NS_GPL(devm_cxl_sanitize_setup_notifier, "CXL");
__init int cxl_memdev_init(void)
{
diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c
index 5b46bc46aaa9..18825e1505d6 100644
--- a/drivers/cxl/core/pci.c
+++ b/drivers/cxl/core/pci.c
@@ -24,6 +24,53 @@ static unsigned short media_ready_timeout = 60;
module_param(media_ready_timeout, ushort, 0644);
MODULE_PARM_DESC(media_ready_timeout, "seconds to wait for media ready");
+static int pci_get_port_num(struct pci_dev *pdev)
+{
+ u32 lnkcap;
+ int type;
+
+ type = pci_pcie_type(pdev);
+ if (type != PCI_EXP_TYPE_DOWNSTREAM && type != PCI_EXP_TYPE_ROOT_PORT)
+ return -EINVAL;
+
+ if (pci_read_config_dword(pdev, pci_pcie_cap(pdev) + PCI_EXP_LNKCAP,
+ &lnkcap))
+ return -ENXIO;
+
+ return FIELD_GET(PCI_EXP_LNKCAP_PN, lnkcap);
+}
+
+/**
+ * __devm_cxl_add_dport_by_dev - allocate a dport by dport device
+ * @port: cxl_port that hosts the dport
+ * @dport_dev: 'struct device' of the dport
+ *
+ * Returns the allocated dport on success or ERR_PTR() of -errno on error
+ */
+struct cxl_dport *__devm_cxl_add_dport_by_dev(struct cxl_port *port,
+ struct device *dport_dev)
+{
+ struct cxl_register_map map;
+ struct pci_dev *pdev;
+ int port_num, rc;
+
+ if (!dev_is_pci(dport_dev))
+ return ERR_PTR(-EINVAL);
+
+ pdev = to_pci_dev(dport_dev);
+ port_num = pci_get_port_num(pdev);
+ if (port_num < 0)
+ return ERR_PTR(port_num);
+
+ rc = cxl_find_regblock(pdev, CXL_REGLOC_RBI_COMPONENT, &map);
+ if (rc)
+ return ERR_PTR(rc);
+
+ device_lock_assert(&port->dev);
+ return devm_cxl_add_dport(port, dport_dev, port_num, map.resource);
+}
+EXPORT_SYMBOL_NS_GPL(__devm_cxl_add_dport_by_dev, "CXL");
+
struct cxl_walk_context {
struct pci_bus *bus;
struct cxl_port *port;
@@ -101,7 +148,7 @@ int devm_cxl_port_enumerate_dports(struct cxl_port *port)
return ctx.error;
return ctx.count;
}
-EXPORT_SYMBOL_NS_GPL(devm_cxl_port_enumerate_dports, CXL);
+EXPORT_SYMBOL_NS_GPL(devm_cxl_port_enumerate_dports, "CXL");
static int cxl_dvsec_mem_range_valid(struct cxl_dev_state *cxlds, int id)
{
@@ -209,7 +256,7 @@ int cxl_await_media_ready(struct cxl_dev_state *cxlds)
return 0;
}
-EXPORT_SYMBOL_NS_GPL(cxl_await_media_ready, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_await_media_ready, "CXL");
static int cxl_set_mem_enable(struct cxl_dev_state *cxlds, u16 val)
{
@@ -252,9 +299,9 @@ static int devm_cxl_enable_mem(struct device *host, struct cxl_dev_state *cxlds)
}
/* require dvsec ranges to be covered by a locked platform window */
-static int dvsec_range_allowed(struct device *dev, void *arg)
+static int dvsec_range_allowed(struct device *dev, const void *arg)
{
- struct range *dev_range = arg;
+ const struct range *dev_range = arg;
struct cxl_decoder *cxld;
if (!is_root_decoder(dev))
@@ -291,11 +338,11 @@ static int devm_cxl_enable_hdm(struct device *host, struct cxl_hdm *cxlhdm)
return devm_add_action_or_reset(host, disable_hdm, cxlhdm);
}
-int cxl_dvsec_rr_decode(struct device *dev, struct cxl_port *port,
+int cxl_dvsec_rr_decode(struct cxl_dev_state *cxlds,
struct cxl_endpoint_dvsec_info *info)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
+ struct pci_dev *pdev = to_pci_dev(cxlds->dev);
+ struct device *dev = cxlds->dev;
int hdm_count, rc, i, ranges = 0;
int d = cxlds->cxl_dvsec;
u16 cap, ctrl;
@@ -386,7 +433,7 @@ int cxl_dvsec_rr_decode(struct device *dev, struct cxl_port *port,
return 0;
}
-EXPORT_SYMBOL_NS_GPL(cxl_dvsec_rr_decode, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_dvsec_rr_decode, "CXL");
/**
* cxl_hdm_decode_init() - Setup HDM decoding for the endpoint
@@ -415,17 +462,20 @@ int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm,
*/
if (global_ctrl & CXL_HDM_DECODER_ENABLE || (!hdm && info->mem_enabled))
return devm_cxl_enable_mem(&port->dev, cxlds);
- else if (!hdm)
- return -ENODEV;
- root = to_cxl_port(port->dev.parent);
- while (!is_cxl_root(root) && is_cxl_port(root->dev.parent))
- root = to_cxl_port(root->dev.parent);
- if (!is_cxl_root(root)) {
- dev_err(dev, "Failed to acquire root port for HDM enable\n");
+ /*
+ * If the HDM Decoder Capability does not exist and DVSEC was
+ * not setup, the DVSEC based emulation cannot be used.
+ */
+ if (!hdm)
return -ENODEV;
- }
+ /* The HDM Decoder Capability exists but is globally disabled. */
+
+ /*
+ * If the DVSEC CXL Range registers are not enabled, just
+ * enable and use the HDM Decoder Capability registers.
+ */
if (!info->mem_enabled) {
rc = devm_cxl_enable_hdm(&port->dev, cxlhdm);
if (rc)
@@ -434,6 +484,26 @@ int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm,
return devm_cxl_enable_mem(&port->dev, cxlds);
}
+ /*
+ * Per CXL 2.0 Section 8.1.3.8.3 and 8.1.3.8.4 DVSEC CXL Range 1 Base
+ * [High,Low] when HDM operation is enabled the range register values
+ * are ignored by the device, but the spec also recommends matching the
+ * DVSEC Range 1,2 to HDM Decoder Range 0,1. So, non-zero info->ranges
+ * are expected even though Linux does not require or maintain that
+ * match. Check if at least one DVSEC range is enabled and allowed by
+ * the platform. That is, the DVSEC range must be covered by a locked
+ * platform window (CFMWS). Fail otherwise as the endpoint's decoders
+ * cannot be used.
+ */
+
+ root = to_cxl_port(port->dev.parent);
+ while (!is_cxl_root(root) && is_cxl_port(root->dev.parent))
+ root = to_cxl_port(root->dev.parent);
+ if (!is_cxl_root(root)) {
+ dev_err(dev, "Failed to acquire root port for HDM enable\n");
+ return -ENODEV;
+ }
+
for (i = 0, allowed = 0; i < info->ranges; i++) {
struct device *cxld_dev;
@@ -453,18 +523,9 @@ int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm,
return -ENXIO;
}
- /*
- * Per CXL 2.0 Section 8.1.3.8.3 and 8.1.3.8.4 DVSEC CXL Range 1 Base
- * [High,Low] when HDM operation is enabled the range register values
- * are ignored by the device, but the spec also recommends matching the
- * DVSEC Range 1,2 to HDM Decoder Range 0,1. So, non-zero info->ranges
- * are expected even though Linux does not require or maintain that
- * match. If at least one DVSEC range is enabled and allowed, skip HDM
- * Decoder Capability Enable.
- */
return 0;
}
-EXPORT_SYMBOL_NS_GPL(cxl_hdm_decode_init, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_hdm_decode_init, "CXL");
#define CXL_DOE_TABLE_ACCESS_REQ_CODE 0x000000ff
#define CXL_DOE_TABLE_ACCESS_REQ_CODE_READ 0
@@ -648,7 +709,7 @@ err:
devm_kfree(dev, buf);
dev_err(dev, "Failed to read/validate CDAT.\n");
}
-EXPORT_SYMBOL_NS_GPL(read_cdat_data, CXL);
+EXPORT_SYMBOL_NS_GPL(read_cdat_data, "CXL");
static void __cxl_handle_cor_ras(struct cxl_dev_state *cxlds,
void __iomem *ras_base)
@@ -805,7 +866,7 @@ void cxl_dport_init_ras_reporting(struct cxl_dport *dport, struct device *host)
cxl_disable_rch_root_ints(dport);
}
}
-EXPORT_SYMBOL_NS_GPL(cxl_dport_init_ras_reporting, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_dport_init_ras_reporting, "CXL");
static void cxl_handle_rdport_cor_ras(struct cxl_dev_state *cxlds,
struct cxl_dport *dport)
@@ -916,7 +977,7 @@ void cxl_cor_error_detected(struct pci_dev *pdev)
cxl_handle_endpoint_cor_ras(cxlds);
}
}
-EXPORT_SYMBOL_NS_GPL(cxl_cor_error_detected, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_cor_error_detected, "CXL");
pci_ers_result_t cxl_error_detected(struct pci_dev *pdev,
pci_channel_state_t state)
@@ -966,7 +1027,7 @@ pci_ers_result_t cxl_error_detected(struct pci_dev *pdev,
}
return PCI_ERS_RESULT_NEED_RESET;
}
-EXPORT_SYMBOL_NS_GPL(cxl_error_detected, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_error_detected, "CXL");
static int cxl_flit_size(struct pci_dev *pdev)
{
@@ -1030,7 +1091,7 @@ bool cxl_endpoint_decoder_reset_detected(struct cxl_port *port)
return device_for_each_child(&port->dev, port,
__cxl_endpoint_decoder_reset_detected);
}
-EXPORT_SYMBOL_NS_GPL(cxl_endpoint_decoder_reset_detected, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_endpoint_decoder_reset_detected, "CXL");
int cxl_pci_get_bandwidth(struct pci_dev *pdev, struct access_coordinate *c)
{
@@ -1054,3 +1115,146 @@ int cxl_pci_get_bandwidth(struct pci_dev *pdev, struct access_coordinate *c)
return 0;
}
+
+/*
+ * Set max timeout such that platforms will optimize GPF flow to avoid
+ * the implied worst-case scenario delays. On a sane platform, all
+ * devices should always complete GPF within the energy budget of
+ * the GPF flow. The kernel does not have enough information to pick
+ * anything better than "maximize timeouts and hope it works".
+ *
+ * A misbehaving device could block forward progress of GPF for all
+ * the other devices, exhausting the energy budget of the platform.
+ * However, the spec seems to assume that moving on from slow to respond
+ * devices is a virtue. It is not possible to know that, in actuality,
+ * the slow to respond device is *the* most critical device in the
+ * system to wait.
+ */
+#define GPF_TIMEOUT_BASE_MAX 2
+#define GPF_TIMEOUT_SCALE_MAX 7 /* 10 seconds */
+
+u16 cxl_gpf_get_dvsec(struct device *dev)
+{
+ struct pci_dev *pdev;
+ bool is_port = true;
+ u16 dvsec;
+
+ if (!dev_is_pci(dev))
+ return 0;
+
+ pdev = to_pci_dev(dev);
+ if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ENDPOINT)
+ is_port = false;
+
+ dvsec = pci_find_dvsec_capability(pdev, PCI_VENDOR_ID_CXL,
+ is_port ? CXL_DVSEC_PORT_GPF : CXL_DVSEC_DEVICE_GPF);
+ if (!dvsec)
+ dev_warn(dev, "%s GPF DVSEC not present\n",
+ is_port ? "Port" : "Device");
+ return dvsec;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_gpf_get_dvsec, "CXL");
+
+static int update_gpf_port_dvsec(struct pci_dev *pdev, int dvsec, int phase)
+{
+ u64 base, scale;
+ int rc, offset;
+ u16 ctrl;
+
+ switch (phase) {
+ case 1:
+ offset = CXL_DVSEC_PORT_GPF_PHASE_1_CONTROL_OFFSET;
+ base = CXL_DVSEC_PORT_GPF_PHASE_1_TMO_BASE_MASK;
+ scale = CXL_DVSEC_PORT_GPF_PHASE_1_TMO_SCALE_MASK;
+ break;
+ case 2:
+ offset = CXL_DVSEC_PORT_GPF_PHASE_2_CONTROL_OFFSET;
+ base = CXL_DVSEC_PORT_GPF_PHASE_2_TMO_BASE_MASK;
+ scale = CXL_DVSEC_PORT_GPF_PHASE_2_TMO_SCALE_MASK;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ rc = pci_read_config_word(pdev, dvsec + offset, &ctrl);
+ if (rc)
+ return rc;
+
+ if (FIELD_GET(base, ctrl) == GPF_TIMEOUT_BASE_MAX &&
+ FIELD_GET(scale, ctrl) == GPF_TIMEOUT_SCALE_MAX)
+ return 0;
+
+ ctrl = FIELD_PREP(base, GPF_TIMEOUT_BASE_MAX);
+ ctrl |= FIELD_PREP(scale, GPF_TIMEOUT_SCALE_MAX);
+
+ rc = pci_write_config_word(pdev, dvsec + offset, ctrl);
+ if (!rc)
+ pci_dbg(pdev, "Port GPF phase %d timeout: %d0 secs\n",
+ phase, GPF_TIMEOUT_BASE_MAX);
+
+ return rc;
+}
+
+int cxl_gpf_port_setup(struct cxl_dport *dport)
+{
+ if (!dport)
+ return -EINVAL;
+
+ if (!dport->gpf_dvsec) {
+ struct pci_dev *pdev;
+ int dvsec;
+
+ dvsec = cxl_gpf_get_dvsec(dport->dport_dev);
+ if (!dvsec)
+ return -EINVAL;
+
+ dport->gpf_dvsec = dvsec;
+ pdev = to_pci_dev(dport->dport_dev);
+ update_gpf_port_dvsec(pdev, dport->gpf_dvsec, 1);
+ update_gpf_port_dvsec(pdev, dport->gpf_dvsec, 2);
+ }
+
+ return 0;
+}
+
+static int count_dports(struct pci_dev *pdev, void *data)
+{
+ struct cxl_walk_context *ctx = data;
+ int type = pci_pcie_type(pdev);
+
+ if (pdev->bus != ctx->bus)
+ return 0;
+ if (!pci_is_pcie(pdev))
+ return 0;
+ if (type != ctx->type)
+ return 0;
+
+ ctx->count++;
+ return 0;
+}
+
+int cxl_port_get_possible_dports(struct cxl_port *port)
+{
+ struct pci_bus *bus = cxl_port_to_pci_bus(port);
+ struct cxl_walk_context ctx;
+ int type;
+
+ if (!bus) {
+ dev_err(&port->dev, "No PCI bus found for port %s\n",
+ dev_name(&port->dev));
+ return -ENXIO;
+ }
+
+ if (pci_is_root_bus(bus))
+ type = PCI_EXP_TYPE_ROOT_PORT;
+ else
+ type = PCI_EXP_TYPE_DOWNSTREAM;
+
+ ctx = (struct cxl_walk_context) {
+ .bus = bus,
+ .type = type,
+ };
+ pci_walk_bus(bus, count_dports, &ctx);
+
+ return ctx.count;
+}
diff --git a/drivers/cxl/core/pmem.c b/drivers/cxl/core/pmem.c
index c00f3a933164..8853415c106a 100644
--- a/drivers/cxl/core/pmem.c
+++ b/drivers/cxl/core/pmem.c
@@ -49,18 +49,7 @@ struct cxl_nvdimm_bridge *to_cxl_nvdimm_bridge(struct device *dev)
return NULL;
return container_of(dev, struct cxl_nvdimm_bridge, dev);
}
-EXPORT_SYMBOL_NS_GPL(to_cxl_nvdimm_bridge, CXL);
-
-bool is_cxl_nvdimm_bridge(struct device *dev)
-{
- return dev->type == &cxl_nvdimm_bridge_type;
-}
-EXPORT_SYMBOL_NS_GPL(is_cxl_nvdimm_bridge, CXL);
-
-static int match_nvdimm_bridge(struct device *dev, void *data)
-{
- return is_cxl_nvdimm_bridge(dev);
-}
+EXPORT_SYMBOL_NS_GPL(to_cxl_nvdimm_bridge, "CXL");
/**
* cxl_find_nvdimm_bridge() - find a bridge device relative to a port
@@ -75,14 +64,16 @@ struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_port *port)
if (!cxl_root)
return NULL;
- dev = device_find_child(&cxl_root->port.dev, NULL, match_nvdimm_bridge);
+ dev = device_find_child(&cxl_root->port.dev,
+ &cxl_nvdimm_bridge_type,
+ device_match_type);
if (!dev)
return NULL;
return to_cxl_nvdimm_bridge(dev);
}
-EXPORT_SYMBOL_NS_GPL(cxl_find_nvdimm_bridge, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_find_nvdimm_bridge, "CXL");
static struct lock_class_key cxl_nvdimm_bridge_key;
@@ -164,7 +155,7 @@ err:
put_device(dev);
return ERR_PTR(rc);
}
-EXPORT_SYMBOL_NS_GPL(devm_cxl_add_nvdimm_bridge, CXL);
+EXPORT_SYMBOL_NS_GPL(devm_cxl_add_nvdimm_bridge, "CXL");
static void cxl_nvdimm_release(struct device *dev)
{
@@ -188,7 +179,7 @@ bool is_cxl_nvdimm(struct device *dev)
{
return dev->type == &cxl_nvdimm_type;
}
-EXPORT_SYMBOL_NS_GPL(is_cxl_nvdimm, CXL);
+EXPORT_SYMBOL_NS_GPL(is_cxl_nvdimm, "CXL");
struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev)
{
@@ -197,7 +188,7 @@ struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev)
return NULL;
return container_of(dev, struct cxl_nvdimm, dev);
}
-EXPORT_SYMBOL_NS_GPL(to_cxl_nvdimm, CXL);
+EXPORT_SYMBOL_NS_GPL(to_cxl_nvdimm, "CXL");
static struct lock_class_key cxl_nvdimm_key;
@@ -293,4 +284,4 @@ err_alloc:
return rc;
}
-EXPORT_SYMBOL_NS_GPL(devm_cxl_add_nvdimm, CXL);
+EXPORT_SYMBOL_NS_GPL(devm_cxl_add_nvdimm, "CXL");
diff --git a/drivers/cxl/core/pmu.c b/drivers/cxl/core/pmu.c
index 5d8e06b0ba6e..b3136d7664ab 100644
--- a/drivers/cxl/core/pmu.c
+++ b/drivers/cxl/core/pmu.c
@@ -65,4 +65,4 @@ err:
put_device(&pmu->dev);
return rc;
}
-EXPORT_SYMBOL_NS_GPL(devm_cxl_pmu_add, CXL);
+EXPORT_SYMBOL_NS_GPL(devm_cxl_pmu_add, "CXL");
diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c
index e666ec6a9085..d5f71eb1ade8 100644
--- a/drivers/cxl/core/port.c
+++ b/drivers/cxl/core/port.c
@@ -30,18 +30,21 @@
* instantiated by the core.
*/
-/*
- * All changes to the interleave configuration occur with this lock held
- * for write.
- */
-DECLARE_RWSEM(cxl_region_rwsem);
-
static DEFINE_IDA(cxl_port_ida);
static DEFINE_XARRAY(cxl_root_buses);
+/*
+ * The terminal device in PCI is NULL and @platform_bus
+ * for platform devices (for cxl_test)
+ */
+static bool is_cxl_host_bridge(struct device *dev)
+{
+ return (!dev || dev == &platform_bus);
+}
+
int cxl_num_decoders_committed(struct cxl_port *port)
{
- lockdep_assert_held(&cxl_region_rwsem);
+ lockdep_assert_held(&cxl_rwsem.region);
return port->commit_end + 1;
}
@@ -176,7 +179,7 @@ static ssize_t target_list_show(struct device *dev,
ssize_t offset;
int rc;
- guard(rwsem_read)(&cxl_region_rwsem);
+ guard(rwsem_read)(&cxl_rwsem.region);
rc = emit_target_list(cxlsd, buf);
if (rc < 0)
return rc;
@@ -194,25 +197,35 @@ static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ /* without @cxl_rwsem.dpa, make sure @part is not reloaded */
+ int part = READ_ONCE(cxled->part);
+ const char *desc;
+
+ if (part < 0)
+ desc = "none";
+ else
+ desc = cxlds->part[part].res.name;
- return sysfs_emit(buf, "%s\n", cxl_decoder_mode_name(cxled->mode));
+ return sysfs_emit(buf, "%s\n", desc);
}
static ssize_t mode_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t len)
{
struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
- enum cxl_decoder_mode mode;
+ enum cxl_partition_mode mode;
ssize_t rc;
if (sysfs_streq(buf, "pmem"))
- mode = CXL_DECODER_PMEM;
+ mode = CXL_PARTMODE_PMEM;
else if (sysfs_streq(buf, "ram"))
- mode = CXL_DECODER_RAM;
+ mode = CXL_PARTMODE_RAM;
else
return -EINVAL;
- rc = cxl_dpa_set_mode(cxled, mode);
+ rc = cxl_dpa_set_part(cxled, mode);
if (rc)
return rc;
@@ -225,7 +238,7 @@ static ssize_t dpa_resource_show(struct device *dev, struct device_attribute *at
{
struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
- guard(rwsem_read)(&cxl_dpa_rwsem);
+ guard(rwsem_read)(&cxl_rwsem.dpa);
return sysfs_emit(buf, "%#llx\n", (u64)cxl_dpa_resource_start(cxled));
}
static DEVICE_ATTR_RO(dpa_resource);
@@ -437,7 +450,7 @@ struct cxl_root_decoder *to_cxl_root_decoder(struct device *dev)
return NULL;
return container_of(dev, struct cxl_root_decoder, cxlsd.cxld.dev);
}
-EXPORT_SYMBOL_NS_GPL(to_cxl_root_decoder, CXL);
+EXPORT_SYMBOL_NS_GPL(to_cxl_root_decoder, "CXL");
static void cxl_root_decoder_release(struct device *dev)
{
@@ -446,6 +459,7 @@ static void cxl_root_decoder_release(struct device *dev)
if (atomic_read(&cxlrd->region_id) >= 0)
memregion_free(atomic_read(&cxlrd->region_id));
__cxl_decoder_release(&cxlrd->cxlsd.cxld);
+ kfree(cxlrd->ops);
kfree(cxlrd);
}
@@ -471,19 +485,19 @@ bool is_endpoint_decoder(struct device *dev)
{
return dev->type == &cxl_decoder_endpoint_type;
}
-EXPORT_SYMBOL_NS_GPL(is_endpoint_decoder, CXL);
+EXPORT_SYMBOL_NS_GPL(is_endpoint_decoder, "CXL");
bool is_root_decoder(struct device *dev)
{
return dev->type == &cxl_decoder_root_type;
}
-EXPORT_SYMBOL_NS_GPL(is_root_decoder, CXL);
+EXPORT_SYMBOL_NS_GPL(is_root_decoder, "CXL");
bool is_switch_decoder(struct device *dev)
{
return is_root_decoder(dev) || dev->type == &cxl_decoder_switch_type;
}
-EXPORT_SYMBOL_NS_GPL(is_switch_decoder, CXL);
+EXPORT_SYMBOL_NS_GPL(is_switch_decoder, "CXL");
struct cxl_decoder *to_cxl_decoder(struct device *dev)
{
@@ -493,7 +507,7 @@ struct cxl_decoder *to_cxl_decoder(struct device *dev)
return NULL;
return container_of(dev, struct cxl_decoder, dev);
}
-EXPORT_SYMBOL_NS_GPL(to_cxl_decoder, CXL);
+EXPORT_SYMBOL_NS_GPL(to_cxl_decoder, "CXL");
struct cxl_endpoint_decoder *to_cxl_endpoint_decoder(struct device *dev)
{
@@ -502,7 +516,7 @@ struct cxl_endpoint_decoder *to_cxl_endpoint_decoder(struct device *dev)
return NULL;
return container_of(dev, struct cxl_endpoint_decoder, cxld.dev);
}
-EXPORT_SYMBOL_NS_GPL(to_cxl_endpoint_decoder, CXL);
+EXPORT_SYMBOL_NS_GPL(to_cxl_endpoint_decoder, "CXL");
struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev)
{
@@ -511,7 +525,7 @@ struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev)
return NULL;
return container_of(dev, struct cxl_switch_decoder, cxld.dev);
}
-EXPORT_SYMBOL_NS_GPL(to_cxl_switch_decoder, CXL);
+EXPORT_SYMBOL_NS_GPL(to_cxl_switch_decoder, "CXL");
static void cxl_ep_release(struct cxl_ep *ep)
{
@@ -549,13 +563,9 @@ static ssize_t decoders_committed_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cxl_port *port = to_cxl_port(dev);
- int rc;
-
- down_read(&cxl_region_rwsem);
- rc = sysfs_emit(buf, "%d\n", cxl_num_decoders_committed(port));
- up_read(&cxl_region_rwsem);
- return rc;
+ guard(rwsem_read)(&cxl_rwsem.region);
+ return sysfs_emit(buf, "%d\n", cxl_num_decoders_committed(port));
}
static DEVICE_ATTR_RO(decoders_committed);
@@ -585,7 +595,7 @@ bool is_cxl_port(const struct device *dev)
{
return dev->type == &cxl_port_type;
}
-EXPORT_SYMBOL_NS_GPL(is_cxl_port, CXL);
+EXPORT_SYMBOL_NS_GPL(is_cxl_port, "CXL");
struct cxl_port *to_cxl_port(const struct device *dev)
{
@@ -594,19 +604,21 @@ struct cxl_port *to_cxl_port(const struct device *dev)
return NULL;
return container_of(dev, struct cxl_port, dev);
}
-EXPORT_SYMBOL_NS_GPL(to_cxl_port, CXL);
+EXPORT_SYMBOL_NS_GPL(to_cxl_port, "CXL");
+
+struct cxl_port *parent_port_of(struct cxl_port *port)
+{
+ if (!port || !port->parent_dport)
+ return NULL;
+ return port->parent_dport->port;
+}
static void unregister_port(void *_port)
{
struct cxl_port *port = _port;
- struct cxl_port *parent;
+ struct cxl_port *parent = parent_port_of(port);
struct device *lock_dev;
- if (is_cxl_root(port))
- parent = NULL;
- else
- parent = to_cxl_port(port->dev.parent);
-
/*
* CXL root port's and the first level of ports are unregistered
* under the platform firmware device lock, all other ports are
@@ -738,6 +750,7 @@ static struct cxl_port *cxl_port_alloc(struct device *uport_dev,
xa_init(&port->dports);
xa_init(&port->endpoints);
xa_init(&port->regions);
+ port->component_reg_phys = CXL_RESOURCE_NONE;
device_initialize(dev);
lockdep_set_class_and_subclass(&dev->mutex, &cxl_port_key, port->depth);
@@ -856,9 +869,7 @@ static int cxl_port_add(struct cxl_port *port,
if (rc)
return rc;
- rc = cxl_port_setup_regs(port, component_reg_phys);
- if (rc)
- return rc;
+ port->component_reg_phys = component_reg_phys;
} else {
rc = dev_set_name(dev, "root%d", port->id);
if (rc)
@@ -942,7 +953,7 @@ struct cxl_port *devm_cxl_add_port(struct device *host,
return port;
}
-EXPORT_SYMBOL_NS_GPL(devm_cxl_add_port, CXL);
+EXPORT_SYMBOL_NS_GPL(devm_cxl_add_port, "CXL");
struct cxl_root *devm_cxl_add_root(struct device *host,
const struct cxl_root_ops *ops)
@@ -958,7 +969,7 @@ struct cxl_root *devm_cxl_add_root(struct device *host,
cxl_root->ops = ops;
return cxl_root;
}
-EXPORT_SYMBOL_NS_GPL(devm_cxl_add_root, CXL);
+EXPORT_SYMBOL_NS_GPL(devm_cxl_add_root, "CXL");
struct pci_bus *cxl_port_to_pci_bus(struct cxl_port *port)
{
@@ -974,7 +985,7 @@ struct pci_bus *cxl_port_to_pci_bus(struct cxl_port *port)
return xa_load(&cxl_root_buses, (unsigned long)port->uport_dev);
}
-EXPORT_SYMBOL_NS_GPL(cxl_port_to_pci_bus, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_port_to_pci_bus, "CXL");
static void unregister_pci_bus(void *uport_dev)
{
@@ -995,7 +1006,7 @@ int devm_cxl_register_pci_bus(struct device *host, struct device *uport_dev,
return rc;
return devm_add_action_or_reset(host, unregister_pci_bus, uport_dev);
}
-EXPORT_SYMBOL_NS_GPL(devm_cxl_register_pci_bus, CXL);
+EXPORT_SYMBOL_NS_GPL(devm_cxl_register_pci_bus, "CXL");
static bool dev_is_cxl_root_child(struct device *dev)
{
@@ -1027,16 +1038,7 @@ struct cxl_root *find_cxl_root(struct cxl_port *port)
get_device(&iter->dev);
return to_cxl_root(iter);
}
-EXPORT_SYMBOL_NS_GPL(find_cxl_root, CXL);
-
-void put_cxl_root(struct cxl_root *cxl_root)
-{
- if (!cxl_root)
- return;
-
- put_device(&cxl_root->port.dev);
-}
-EXPORT_SYMBOL_NS_GPL(put_cxl_root, CXL);
+EXPORT_SYMBOL_NS_GPL(find_cxl_root, "CXL");
static struct cxl_dport *find_dport(struct cxl_port *port, int id)
{
@@ -1198,6 +1200,18 @@ __devm_cxl_add_dport(struct cxl_port *port, struct device *dport_dev,
cxl_debugfs_create_dport_dir(dport);
+ /*
+ * Setup port register if this is the first dport showed up. Having
+ * a dport also means that there is at least 1 active link.
+ */
+ if (port->nr_dports == 1 &&
+ port->component_reg_phys != CXL_RESOURCE_NONE) {
+ rc = cxl_port_setup_regs(port, port->component_reg_phys);
+ if (rc)
+ return ERR_PTR(rc);
+ port->component_reg_phys = CXL_RESOURCE_NONE;
+ }
+
return dport;
}
@@ -1230,7 +1244,7 @@ struct cxl_dport *devm_cxl_add_dport(struct cxl_port *port,
return dport;
}
-EXPORT_SYMBOL_NS_GPL(devm_cxl_add_dport, CXL);
+EXPORT_SYMBOL_NS_GPL(devm_cxl_add_dport, "CXL");
/**
* devm_cxl_add_rch_dport - append RCH downstream port data to a cxl_port
@@ -1264,7 +1278,7 @@ struct cxl_dport *devm_cxl_add_rch_dport(struct cxl_port *port,
return dport;
}
-EXPORT_SYMBOL_NS_GPL(devm_cxl_add_rch_dport, CXL);
+EXPORT_SYMBOL_NS_GPL(devm_cxl_add_rch_dport, "CXL");
static int add_ep(struct cxl_ep *new)
{
@@ -1355,21 +1369,6 @@ static struct cxl_port *find_cxl_port(struct device *dport_dev,
return port;
}
-static struct cxl_port *find_cxl_port_at(struct cxl_port *parent_port,
- struct device *dport_dev,
- struct cxl_dport **dport)
-{
- struct cxl_find_port_ctx ctx = {
- .dport_dev = dport_dev,
- .parent_port = parent_port,
- .dport = dport,
- };
- struct cxl_port *port;
-
- port = __find_cxl_port(&ctx);
- return port;
-}
-
/*
* All users of grandparent() are using it to walk PCIe-like switch port
* hierarchy. A PCIe switch is comprised of a bridge device representing the
@@ -1421,7 +1420,7 @@ int cxl_endpoint_autoremove(struct cxl_memdev *cxlmd, struct cxl_port *endpoint)
cxlmd->depth = endpoint->depth;
return devm_add_action_or_reset(dev, delete_endpoint, cxlmd);
}
-EXPORT_SYMBOL_NS_GPL(cxl_endpoint_autoremove, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_endpoint_autoremove, "CXL");
/*
* The natural end of life of a non-root 'cxl_port' is when its parent port goes
@@ -1430,7 +1429,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_endpoint_autoremove, CXL);
* through ->remove(). This "bottom-up" removal selectively removes individual
* child ports manually. This depends on devm_cxl_add_port() to not change is
* devm action registration order, and for dports to have already been
- * destroyed by reap_dports().
+ * destroyed by del_dports().
*/
static void delete_switch_port(struct cxl_port *port)
{
@@ -1439,18 +1438,24 @@ static void delete_switch_port(struct cxl_port *port)
devm_release_action(port->dev.parent, unregister_port, port);
}
-static void reap_dports(struct cxl_port *port)
+static void del_dport(struct cxl_dport *dport)
+{
+ struct cxl_port *port = dport->port;
+
+ devm_release_action(&port->dev, cxl_dport_unlink, dport);
+ devm_release_action(&port->dev, cxl_dport_remove, dport);
+ devm_kfree(&port->dev, dport);
+}
+
+static void del_dports(struct cxl_port *port)
{
struct cxl_dport *dport;
unsigned long index;
device_lock_assert(&port->dev);
- xa_for_each(&port->dports, index, dport) {
- devm_release_action(&port->dev, cxl_dport_unlink, dport);
- devm_release_action(&port->dev, cxl_dport_remove, dport);
- devm_kfree(&port->dev, dport);
- }
+ xa_for_each(&port->dports, index, dport)
+ del_dport(dport);
}
struct detach_ctx {
@@ -1508,7 +1513,7 @@ static void cxl_detach_ep(void *data)
*/
died = true;
port->dead = true;
- reap_dports(port);
+ del_dports(port);
}
device_unlock(&port->dev);
@@ -1539,16 +1544,157 @@ static resource_size_t find_component_registers(struct device *dev)
return map.resource;
}
+static int match_port_by_uport(struct device *dev, const void *data)
+{
+ const struct device *uport_dev = data;
+ struct cxl_port *port;
+
+ if (!is_cxl_port(dev))
+ return 0;
+
+ port = to_cxl_port(dev);
+ return uport_dev == port->uport_dev;
+}
+
+/*
+ * Function takes a device reference on the port device. Caller should do a
+ * put_device() when done.
+ */
+static struct cxl_port *find_cxl_port_by_uport(struct device *uport_dev)
+{
+ struct device *dev;
+
+ dev = bus_find_device(&cxl_bus_type, NULL, uport_dev, match_port_by_uport);
+ if (dev)
+ return to_cxl_port(dev);
+ return NULL;
+}
+
+static int update_decoder_targets(struct device *dev, void *data)
+{
+ struct cxl_dport *dport = data;
+ struct cxl_switch_decoder *cxlsd;
+ struct cxl_decoder *cxld;
+ int i;
+
+ if (!is_switch_decoder(dev))
+ return 0;
+
+ cxlsd = to_cxl_switch_decoder(dev);
+ cxld = &cxlsd->cxld;
+ guard(rwsem_write)(&cxl_rwsem.region);
+
+ for (i = 0; i < cxld->interleave_ways; i++) {
+ if (cxld->target_map[i] == dport->port_id) {
+ cxlsd->target[i] = dport;
+ dev_dbg(dev, "dport%d found in target list, index %d\n",
+ dport->port_id, i);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+DEFINE_FREE(del_cxl_dport, struct cxl_dport *, if (!IS_ERR_OR_NULL(_T)) del_dport(_T))
+static struct cxl_dport *cxl_port_add_dport(struct cxl_port *port,
+ struct device *dport_dev)
+{
+ struct cxl_dport *dport;
+ int rc;
+
+ device_lock_assert(&port->dev);
+ if (!port->dev.driver)
+ return ERR_PTR(-ENXIO);
+
+ dport = cxl_find_dport_by_dev(port, dport_dev);
+ if (dport) {
+ dev_dbg(&port->dev, "dport%d:%s already exists\n",
+ dport->port_id, dev_name(dport_dev));
+ return ERR_PTR(-EBUSY);
+ }
+
+ struct cxl_dport *new_dport __free(del_cxl_dport) =
+ devm_cxl_add_dport_by_dev(port, dport_dev);
+ if (IS_ERR(new_dport))
+ return new_dport;
+
+ cxl_switch_parse_cdat(new_dport);
+
+ if (ida_is_empty(&port->decoder_ida)) {
+ rc = devm_cxl_switch_port_decoders_setup(port);
+ if (rc)
+ return ERR_PTR(rc);
+ dev_dbg(&port->dev, "first dport%d:%s added with decoders\n",
+ new_dport->port_id, dev_name(dport_dev));
+ return no_free_ptr(new_dport);
+ }
+
+ /* New dport added, update the decoder targets */
+ device_for_each_child(&port->dev, new_dport, update_decoder_targets);
+
+ dev_dbg(&port->dev, "dport%d:%s added\n", new_dport->port_id,
+ dev_name(dport_dev));
+
+ return no_free_ptr(new_dport);
+}
+
+static struct cxl_dport *devm_cxl_create_port(struct device *ep_dev,
+ struct cxl_port *parent_port,
+ struct cxl_dport *parent_dport,
+ struct device *uport_dev,
+ struct device *dport_dev)
+{
+ resource_size_t component_reg_phys;
+
+ device_lock_assert(&parent_port->dev);
+ if (!parent_port->dev.driver) {
+ dev_warn(ep_dev,
+ "port %s:%s:%s disabled, failed to enumerate CXL.mem\n",
+ dev_name(&parent_port->dev), dev_name(uport_dev),
+ dev_name(dport_dev));
+ }
+
+ struct cxl_port *port __free(put_cxl_port) =
+ find_cxl_port_by_uport(uport_dev);
+ if (!port) {
+ component_reg_phys = find_component_registers(uport_dev);
+ port = devm_cxl_add_port(&parent_port->dev, uport_dev,
+ component_reg_phys, parent_dport);
+ if (IS_ERR(port))
+ return ERR_CAST(port);
+
+ /*
+ * retry to make sure a port is found. a port device
+ * reference is taken.
+ */
+ port = find_cxl_port_by_uport(uport_dev);
+ if (!port)
+ return ERR_PTR(-ENODEV);
+
+ dev_dbg(ep_dev, "created port %s:%s\n",
+ dev_name(&port->dev), dev_name(port->uport_dev));
+ } else {
+ /*
+ * Port was created before right before this function is
+ * called. Signal the caller to deal with it.
+ */
+ return ERR_PTR(-EAGAIN);
+ }
+
+ guard(device)(&port->dev);
+ return cxl_port_add_dport(port, dport_dev);
+}
+
static int add_port_attach_ep(struct cxl_memdev *cxlmd,
struct device *uport_dev,
struct device *dport_dev)
{
struct device *dparent = grandparent(dport_dev);
struct cxl_dport *dport, *parent_dport;
- resource_size_t component_reg_phys;
int rc;
- if (!dparent) {
+ if (is_cxl_host_bridge(dparent)) {
/*
* The iteration reached the topology root without finding the
* CXL-root 'cxl_port' on a previous iteration, fail for now to
@@ -1560,42 +1706,31 @@ static int add_port_attach_ep(struct cxl_memdev *cxlmd,
}
struct cxl_port *parent_port __free(put_cxl_port) =
- find_cxl_port(dparent, &parent_dport);
+ find_cxl_port_by_uport(dparent->parent);
if (!parent_port) {
/* iterate to create this parent_port */
return -EAGAIN;
}
- /*
- * Definition with __free() here to keep the sequence of
- * dereferencing the device of the port before the parent_port releasing.
- */
- struct cxl_port *port __free(put_cxl_port) = NULL;
scoped_guard(device, &parent_port->dev) {
- if (!parent_port->dev.driver) {
- dev_warn(&cxlmd->dev,
- "port %s:%s disabled, failed to enumerate CXL.mem\n",
- dev_name(&parent_port->dev), dev_name(uport_dev));
- return -ENXIO;
+ parent_dport = cxl_find_dport_by_dev(parent_port, dparent);
+ if (!parent_dport) {
+ parent_dport = cxl_port_add_dport(parent_port, dparent);
+ if (IS_ERR(parent_dport))
+ return PTR_ERR(parent_dport);
}
- port = find_cxl_port_at(parent_port, dport_dev, &dport);
- if (!port) {
- component_reg_phys = find_component_registers(uport_dev);
- port = devm_cxl_add_port(&parent_port->dev, uport_dev,
- component_reg_phys, parent_dport);
- if (IS_ERR(port))
- return PTR_ERR(port);
-
- /* retry find to pick up the new dport information */
- port = find_cxl_port_at(parent_port, dport_dev, &dport);
- if (!port)
- return -ENXIO;
+ dport = devm_cxl_create_port(&cxlmd->dev, parent_port,
+ parent_dport, uport_dev,
+ dport_dev);
+ if (IS_ERR(dport)) {
+ /* Port already exists, restart iteration */
+ if (PTR_ERR(dport) == -EAGAIN)
+ return 0;
+ return PTR_ERR(dport);
}
}
- dev_dbg(&cxlmd->dev, "add to new port %s:%s\n",
- dev_name(&port->dev), dev_name(port->uport_dev));
rc = cxl_add_ep(dport, &cxlmd->dev);
if (rc == -EBUSY) {
/*
@@ -1608,6 +1743,25 @@ static int add_port_attach_ep(struct cxl_memdev *cxlmd,
return rc;
}
+static struct cxl_dport *find_or_add_dport(struct cxl_port *port,
+ struct device *dport_dev)
+{
+ struct cxl_dport *dport;
+
+ device_lock_assert(&port->dev);
+ dport = cxl_find_dport_by_dev(port, dport_dev);
+ if (!dport) {
+ dport = cxl_port_add_dport(port, dport_dev);
+ if (IS_ERR(dport))
+ return dport;
+
+ /* New dport added, restart iteration */
+ return ERR_PTR(-EAGAIN);
+ }
+
+ return dport;
+}
+
int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd)
{
struct device *dev = &cxlmd->dev;
@@ -1636,11 +1790,7 @@ retry:
struct device *uport_dev;
struct cxl_dport *dport;
- /*
- * The terminal "grandparent" in PCI is NULL and @platform_bus
- * for platform devices
- */
- if (!dport_dev || dport_dev == &platform_bus)
+ if (is_cxl_host_bridge(dport_dev))
return 0;
uport_dev = dport_dev->parent;
@@ -1654,12 +1804,26 @@ retry:
dev_name(iter), dev_name(dport_dev),
dev_name(uport_dev));
struct cxl_port *port __free(put_cxl_port) =
- find_cxl_port(dport_dev, &dport);
+ find_cxl_port_by_uport(uport_dev);
if (port) {
dev_dbg(&cxlmd->dev,
"found already registered port %s:%s\n",
dev_name(&port->dev),
dev_name(port->uport_dev));
+
+ /*
+ * RP port enumerated by cxl_acpi without dport will
+ * have the dport added here.
+ */
+ scoped_guard(device, &port->dev) {
+ dport = find_or_add_dport(port, dport_dev);
+ if (IS_ERR(dport)) {
+ if (PTR_ERR(dport) == -EAGAIN)
+ goto retry;
+ return PTR_ERR(dport);
+ }
+ }
+
rc = cxl_add_ep(dport, &cxlmd->dev);
/*
@@ -1672,6 +1836,8 @@ retry:
if (rc && rc != -EBUSY)
return rc;
+ cxl_gpf_port_setup(dport);
+
/* Any more ports to add between this one and the root? */
if (!dev_is_cxl_root_child(&port->dev))
continue;
@@ -1692,41 +1858,41 @@ retry:
return 0;
}
-EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_ports, CXL);
+EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_ports, "CXL");
struct cxl_port *cxl_pci_find_port(struct pci_dev *pdev,
struct cxl_dport **dport)
{
return find_cxl_port(pdev->dev.parent, dport);
}
-EXPORT_SYMBOL_NS_GPL(cxl_pci_find_port, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_pci_find_port, "CXL");
struct cxl_port *cxl_mem_find_port(struct cxl_memdev *cxlmd,
struct cxl_dport **dport)
{
return find_cxl_port(grandparent(&cxlmd->dev), dport);
}
-EXPORT_SYMBOL_NS_GPL(cxl_mem_find_port, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_mem_find_port, "CXL");
static int decoder_populate_targets(struct cxl_switch_decoder *cxlsd,
- struct cxl_port *port, int *target_map)
+ struct cxl_port *port)
{
+ struct cxl_decoder *cxld = &cxlsd->cxld;
int i;
- if (!target_map)
- return 0;
-
device_lock_assert(&port->dev);
if (xa_empty(&port->dports))
- return -EINVAL;
+ return 0;
- guard(rwsem_write)(&cxl_region_rwsem);
+ guard(rwsem_write)(&cxl_rwsem.region);
for (i = 0; i < cxlsd->cxld.interleave_ways; i++) {
- struct cxl_dport *dport = find_dport(port, target_map[i]);
+ struct cxl_dport *dport = find_dport(port, cxld->target_map[i]);
- if (!dport)
- return -ENXIO;
+ if (!dport) {
+ /* dport may be activated later */
+ continue;
+ }
cxlsd->target[i] = dport;
}
@@ -1840,7 +2006,7 @@ struct cxl_root_decoder *cxl_root_decoder_alloc(struct cxl_port *port,
cxlrd->qos_class = CXL_QOS_CLASS_INVALID;
return cxlrd;
}
-EXPORT_SYMBOL_NS_GPL(cxl_root_decoder_alloc, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_root_decoder_alloc, "CXL");
/**
* cxl_switch_decoder_alloc - Allocate a switch level decoder
@@ -1877,7 +2043,7 @@ struct cxl_switch_decoder *cxl_switch_decoder_alloc(struct cxl_port *port,
cxld->dev.type = &cxl_decoder_switch_type;
return cxlsd;
}
-EXPORT_SYMBOL_NS_GPL(cxl_switch_decoder_alloc, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_switch_decoder_alloc, "CXL");
/**
* cxl_endpoint_decoder_alloc - Allocate an endpoint decoder
@@ -1899,6 +2065,7 @@ struct cxl_endpoint_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port)
return ERR_PTR(-ENOMEM);
cxled->pos = -1;
+ cxled->part = -1;
cxld = &cxled->cxld;
rc = cxl_decoder_init(port, cxld);
if (rc) {
@@ -1909,14 +2076,11 @@ struct cxl_endpoint_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port)
cxld->dev.type = &cxl_decoder_endpoint_type;
return cxled;
}
-EXPORT_SYMBOL_NS_GPL(cxl_endpoint_decoder_alloc, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_endpoint_decoder_alloc, "CXL");
/**
* cxl_decoder_add_locked - Add a decoder with targets
* @cxld: The cxl decoder allocated by cxl_<type>_decoder_alloc()
- * @target_map: A list of downstream ports that this decoder can direct memory
- * traffic to. These numbers should correspond with the port number
- * in the PCIe Link Capabilities structure.
*
* Certain types of decoders may not have any targets. The main example of this
* is an endpoint device. A more awkward example is a hostbridge whose root
@@ -1930,7 +2094,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_endpoint_decoder_alloc, CXL);
* Return: Negative error code if the decoder wasn't properly configured; else
* returns 0.
*/
-int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map)
+int cxl_decoder_add_locked(struct cxl_decoder *cxld)
{
struct cxl_port *port;
struct device *dev;
@@ -1951,7 +2115,7 @@ int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map)
if (!is_endpoint_decoder(dev)) {
struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev);
- rc = decoder_populate_targets(cxlsd, port, target_map);
+ rc = decoder_populate_targets(cxlsd, port);
if (rc && (cxld->flags & CXL_DECODER_F_ENABLE)) {
dev_err(&port->dev,
"Failed to populate active decoder targets\n");
@@ -1965,14 +2129,11 @@ int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map)
return device_add(dev);
}
-EXPORT_SYMBOL_NS_GPL(cxl_decoder_add_locked, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_decoder_add_locked, "CXL");
/**
* cxl_decoder_add - Add a decoder with targets
* @cxld: The cxl decoder allocated by cxl_<type>_decoder_alloc()
- * @target_map: A list of downstream ports that this decoder can direct memory
- * traffic to. These numbers should correspond with the port number
- * in the PCIe Link Capabilities structure.
*
* This is the unlocked variant of cxl_decoder_add_locked().
* See cxl_decoder_add_locked().
@@ -1980,7 +2141,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_decoder_add_locked, CXL);
* Context: Process context. Takes and releases the device lock of the port that
* owns the @cxld.
*/
-int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map)
+int cxl_decoder_add(struct cxl_decoder *cxld)
{
struct cxl_port *port;
@@ -1993,18 +2154,15 @@ int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map)
port = to_cxl_port(cxld->dev.parent);
guard(device)(&port->dev);
- return cxl_decoder_add_locked(cxld, target_map);
+ return cxl_decoder_add_locked(cxld);
}
-EXPORT_SYMBOL_NS_GPL(cxl_decoder_add, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_decoder_add, "CXL");
static void cxld_unregister(void *dev)
{
- struct cxl_endpoint_decoder *cxled;
-
- if (is_endpoint_decoder(dev)) {
- cxled = to_cxl_endpoint_decoder(dev);
- cxl_decoder_kill_region(cxled);
- }
+ if (is_endpoint_decoder(dev))
+ cxl_decoder_detach(NULL, to_cxl_endpoint_decoder(dev), -1,
+ DETACH_INVALIDATE);
device_unregister(dev);
}
@@ -2013,7 +2171,7 @@ int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld)
{
return devm_add_action_or_reset(host, cxld_unregister, &cxld->dev);
}
-EXPORT_SYMBOL_NS_GPL(cxl_decoder_autoremove, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_decoder_autoremove, "CXL");
/**
* __cxl_driver_register - register a driver for the cxl bus
@@ -2046,13 +2204,13 @@ int __cxl_driver_register(struct cxl_driver *cxl_drv, struct module *owner,
return driver_register(&cxl_drv->drv);
}
-EXPORT_SYMBOL_NS_GPL(__cxl_driver_register, CXL);
+EXPORT_SYMBOL_NS_GPL(__cxl_driver_register, "CXL");
void cxl_driver_unregister(struct cxl_driver *cxl_drv)
{
driver_unregister(&cxl_drv->drv);
}
-EXPORT_SYMBOL_NS_GPL(cxl_driver_unregister, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_driver_unregister, "CXL");
static int cxl_bus_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
@@ -2084,11 +2242,18 @@ static void cxl_bus_remove(struct device *dev)
static struct workqueue_struct *cxl_bus_wq;
-static void cxl_bus_rescan_queue(struct work_struct *w)
+static int cxl_rescan_attach(struct device *dev, void *data)
{
- int rc = bus_rescan_devices(&cxl_bus_type);
+ int rc = device_attach(dev);
+
+ dev_vdbg(dev, "rescan: %s\n", rc ? "attach" : "detached");
+
+ return 0;
+}
- pr_debug("CXL bus rescan result: %d\n", rc);
+static void cxl_bus_rescan_queue(struct work_struct *w)
+{
+ bus_for_each_dev(&cxl_bus_type, NULL, NULL, cxl_rescan_attach);
}
void cxl_bus_rescan(void)
@@ -2097,19 +2262,19 @@ void cxl_bus_rescan(void)
queue_work(cxl_bus_wq, &rescan_work);
}
-EXPORT_SYMBOL_NS_GPL(cxl_bus_rescan, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_bus_rescan, "CXL");
void cxl_bus_drain(void)
{
drain_workqueue(cxl_bus_wq);
}
-EXPORT_SYMBOL_NS_GPL(cxl_bus_drain, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_bus_drain, "CXL");
bool schedule_cxl_memdev_detach(struct cxl_memdev *cxlmd)
{
return queue_work(cxl_bus_wq, &cxlmd->detach_work);
}
-EXPORT_SYMBOL_NS_GPL(schedule_cxl_memdev_detach, CXL);
+EXPORT_SYMBOL_NS_GPL(schedule_cxl_memdev_detach, "CXL");
static void add_latency(struct access_coordinate *c, long latency)
{
@@ -2235,7 +2400,7 @@ int cxl_endpoint_get_perf_coordinates(struct cxl_port *port,
return 0;
}
-EXPORT_SYMBOL_NS_GPL(cxl_endpoint_get_perf_coordinates, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_endpoint_get_perf_coordinates, "CXL");
int cxl_port_get_switch_dport_bandwidth(struct cxl_port *port,
struct access_coordinate *c)
@@ -2284,7 +2449,7 @@ static const struct attribute_group *cxl_bus_attribute_groups[] = {
NULL,
};
-struct bus_type cxl_bus_type = {
+const struct bus_type cxl_bus_type = {
.name = "cxl",
.uevent = cxl_bus_uevent,
.match = cxl_bus_match,
@@ -2292,7 +2457,7 @@ struct bus_type cxl_bus_type = {
.remove = cxl_bus_remove,
.bus_groups = cxl_bus_attribute_groups,
};
-EXPORT_SYMBOL_NS_GPL(cxl_bus_type, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_bus_type, "CXL");
static struct dentry *cxl_debugfs;
@@ -2300,7 +2465,7 @@ struct dentry *cxl_debugfs_create_dir(const char *dir)
{
return debugfs_create_dir(dir, cxl_debugfs);
}
-EXPORT_SYMBOL_NS_GPL(cxl_debugfs_create_dir, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_debugfs_create_dir, "CXL");
static __init int cxl_core_init(void)
{
@@ -2332,8 +2497,14 @@ static __init int cxl_core_init(void)
if (rc)
goto err_region;
+ rc = cxl_ras_init();
+ if (rc)
+ goto err_ras;
+
return 0;
+err_ras:
+ cxl_region_exit();
err_region:
bus_unregister(&cxl_bus_type);
err_bus:
@@ -2345,6 +2516,7 @@ err_wq:
static void cxl_core_exit(void)
{
+ cxl_ras_exit();
cxl_region_exit();
bus_unregister(&cxl_bus_type);
destroy_workqueue(cxl_bus_wq);
@@ -2356,4 +2528,4 @@ subsys_initcall(cxl_core_init);
module_exit(cxl_core_exit);
MODULE_DESCRIPTION("CXL: Core Compute Express Link support");
MODULE_LICENSE("GPL v2");
-MODULE_IMPORT_NS(CXL);
+MODULE_IMPORT_NS("CXL");
diff --git a/drivers/cxl/core/ras.c b/drivers/cxl/core/ras.c
new file mode 100644
index 000000000000..2731ba3a0799
--- /dev/null
+++ b/drivers/cxl/core/ras.c
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2025 AMD Corporation. All rights reserved. */
+
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <cxl/event.h>
+#include <cxlmem.h>
+#include "trace.h"
+
+static void cxl_cper_trace_corr_port_prot_err(struct pci_dev *pdev,
+ struct cxl_ras_capability_regs ras_cap)
+{
+ u32 status = ras_cap.cor_status & ~ras_cap.cor_mask;
+
+ trace_cxl_port_aer_correctable_error(&pdev->dev, status);
+}
+
+static void cxl_cper_trace_uncorr_port_prot_err(struct pci_dev *pdev,
+ struct cxl_ras_capability_regs ras_cap)
+{
+ u32 status = ras_cap.uncor_status & ~ras_cap.uncor_mask;
+ u32 fe;
+
+ if (hweight32(status) > 1)
+ fe = BIT(FIELD_GET(CXL_RAS_CAP_CONTROL_FE_MASK,
+ ras_cap.cap_control));
+ else
+ fe = status;
+
+ trace_cxl_port_aer_uncorrectable_error(&pdev->dev, status, fe,
+ ras_cap.header_log);
+}
+
+static void cxl_cper_trace_corr_prot_err(struct cxl_memdev *cxlmd,
+ struct cxl_ras_capability_regs ras_cap)
+{
+ u32 status = ras_cap.cor_status & ~ras_cap.cor_mask;
+
+ trace_cxl_aer_correctable_error(cxlmd, status);
+}
+
+static void
+cxl_cper_trace_uncorr_prot_err(struct cxl_memdev *cxlmd,
+ struct cxl_ras_capability_regs ras_cap)
+{
+ u32 status = ras_cap.uncor_status & ~ras_cap.uncor_mask;
+ u32 fe;
+
+ if (hweight32(status) > 1)
+ fe = BIT(FIELD_GET(CXL_RAS_CAP_CONTROL_FE_MASK,
+ ras_cap.cap_control));
+ else
+ fe = status;
+
+ trace_cxl_aer_uncorrectable_error(cxlmd, status, fe,
+ ras_cap.header_log);
+}
+
+static int match_memdev_by_parent(struct device *dev, const void *uport)
+{
+ if (is_cxl_memdev(dev) && dev->parent == uport)
+ return 1;
+ return 0;
+}
+
+static void cxl_cper_handle_prot_err(struct cxl_cper_prot_err_work_data *data)
+{
+ unsigned int devfn = PCI_DEVFN(data->prot_err.agent_addr.device,
+ data->prot_err.agent_addr.function);
+ struct pci_dev *pdev __free(pci_dev_put) =
+ pci_get_domain_bus_and_slot(data->prot_err.agent_addr.segment,
+ data->prot_err.agent_addr.bus,
+ devfn);
+ struct cxl_memdev *cxlmd;
+ int port_type;
+
+ if (!pdev)
+ return;
+
+ port_type = pci_pcie_type(pdev);
+ if (port_type == PCI_EXP_TYPE_ROOT_PORT ||
+ port_type == PCI_EXP_TYPE_DOWNSTREAM ||
+ port_type == PCI_EXP_TYPE_UPSTREAM) {
+ if (data->severity == AER_CORRECTABLE)
+ cxl_cper_trace_corr_port_prot_err(pdev, data->ras_cap);
+ else
+ cxl_cper_trace_uncorr_port_prot_err(pdev, data->ras_cap);
+
+ return;
+ }
+
+ guard(device)(&pdev->dev);
+ if (!pdev->dev.driver)
+ return;
+
+ struct device *mem_dev __free(put_device) = bus_find_device(
+ &cxl_bus_type, NULL, pdev, match_memdev_by_parent);
+ if (!mem_dev)
+ return;
+
+ cxlmd = to_cxl_memdev(mem_dev);
+ if (data->severity == AER_CORRECTABLE)
+ cxl_cper_trace_corr_prot_err(cxlmd, data->ras_cap);
+ else
+ cxl_cper_trace_uncorr_prot_err(cxlmd, data->ras_cap);
+}
+
+static void cxl_cper_prot_err_work_fn(struct work_struct *work)
+{
+ struct cxl_cper_prot_err_work_data wd;
+
+ while (cxl_cper_prot_err_kfifo_get(&wd))
+ cxl_cper_handle_prot_err(&wd);
+}
+static DECLARE_WORK(cxl_cper_prot_err_work, cxl_cper_prot_err_work_fn);
+
+int cxl_ras_init(void)
+{
+ return cxl_cper_register_prot_err_work(&cxl_cper_prot_err_work);
+}
+
+void cxl_ras_exit(void)
+{
+ cxl_cper_unregister_prot_err_work(&cxl_cper_prot_err_work);
+ cancel_work_sync(&cxl_cper_prot_err_work);
+}
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index e701e4b04032..e14c1d305b22 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -2,6 +2,7 @@
/* Copyright(c) 2022 Intel Corporation. All rights reserved. */
#include <linux/memregion.h>
#include <linux/genalloc.h>
+#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/memory.h>
@@ -10,6 +11,7 @@
#include <linux/sort.h>
#include <linux/idr.h>
#include <linux/memory-tiers.h>
+#include <linux/string_choices.h>
#include <cxlmem.h>
#include <cxl.h>
#include "core.h"
@@ -30,6 +32,12 @@
* 3. Decoder targets
*/
+/*
+ * nodemask that sets per node when the access_coordinates for the node has
+ * been updated by the CXL memory hotplug notifier.
+ */
+static nodemask_t nodemask_region_seen = NODE_MASK_NONE;
+
static struct cxl_region *to_cxl_region(struct device *dev);
#define __ACCESS_ATTR_RO(_level, _name) { \
@@ -141,16 +149,12 @@ static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
struct cxl_region_params *p = &cxlr->params;
ssize_t rc;
- rc = down_read_interruptible(&cxl_region_rwsem);
- if (rc)
+ ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &region_rwsem)))
return rc;
- if (cxlr->mode != CXL_DECODER_PMEM)
- rc = sysfs_emit(buf, "\n");
- else
- rc = sysfs_emit(buf, "%pUb\n", &p->uuid);
- up_read(&cxl_region_rwsem);
-
- return rc;
+ if (cxlr->mode != CXL_PARTMODE_PMEM)
+ return sysfs_emit(buf, "\n");
+ return sysfs_emit(buf, "%pUb\n", &p->uuid);
}
static int is_dup(struct device *match, void *data)
@@ -162,7 +166,7 @@ static int is_dup(struct device *match, void *data)
if (!is_cxl_region(match))
return 0;
- lockdep_assert_held(&cxl_region_rwsem);
+ lockdep_assert_held(&cxl_rwsem.region);
cxlr = to_cxl_region(match);
p = &cxlr->params;
@@ -192,27 +196,22 @@ static ssize_t uuid_store(struct device *dev, struct device_attribute *attr,
if (uuid_is_null(&temp))
return -EINVAL;
- rc = down_write_killable(&cxl_region_rwsem);
- if (rc)
+ ACQUIRE(rwsem_write_kill, region_rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_write_kill, &region_rwsem)))
return rc;
if (uuid_equal(&p->uuid, &temp))
- goto out;
+ return len;
- rc = -EBUSY;
if (p->state >= CXL_CONFIG_ACTIVE)
- goto out;
+ return -EBUSY;
rc = bus_for_each_dev(&cxl_bus_type, NULL, &temp, is_dup);
if (rc < 0)
- goto out;
+ return rc;
uuid_copy(&p->uuid, &temp);
-out:
- up_write(&cxl_region_rwsem);
- if (rc)
- return rc;
return len;
}
static DEVICE_ATTR_RW(uuid);
@@ -231,30 +230,27 @@ static int cxl_region_invalidate_memregion(struct cxl_region *cxlr)
&cxlr->dev,
"Bypassing cpu_cache_invalidate_memregion() for testing!\n");
return 0;
- } else {
- dev_err(&cxlr->dev,
- "Failed to synchronize CPU cache state\n");
- return -ENXIO;
}
+ dev_WARN(&cxlr->dev,
+ "Failed to synchronize CPU cache state\n");
+ return -ENXIO;
}
cpu_cache_invalidate_memregion(IORES_DESC_CXL);
return 0;
}
-static int cxl_region_decode_reset(struct cxl_region *cxlr, int count)
+static void cxl_region_decode_reset(struct cxl_region *cxlr, int count)
{
struct cxl_region_params *p = &cxlr->params;
- int i, rc = 0;
+ int i;
/*
- * Before region teardown attempt to flush, and if the flush
- * fails cancel the region teardown for data consistency
- * concerns
+ * Before region teardown attempt to flush, evict any data cached for
+ * this region, or scream loudly about missing arch / platform support
+ * for CXL teardown.
*/
- rc = cxl_region_invalidate_memregion(cxlr);
- if (rc)
- return rc;
+ cxl_region_invalidate_memregion(cxlr);
for (i = count - 1; i >= 0; i--) {
struct cxl_endpoint_decoder *cxled = p->targets[i];
@@ -277,23 +273,17 @@ static int cxl_region_decode_reset(struct cxl_region *cxlr, int count)
cxl_rr = cxl_rr_load(iter, cxlr);
cxld = cxl_rr->decoder;
if (cxld->reset)
- rc = cxld->reset(cxld);
- if (rc)
- return rc;
+ cxld->reset(cxld);
set_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags);
}
endpoint_reset:
- rc = cxled->cxld.reset(&cxled->cxld);
- if (rc)
- return rc;
+ cxled->cxld.reset(&cxled->cxld);
set_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags);
}
/* all decoders associated with this region have been torn down */
clear_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags);
-
- return 0;
}
static int commit_decoder(struct cxl_decoder *cxld)
@@ -358,33 +348,40 @@ err:
return rc;
}
-static ssize_t commit_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t len)
+static int queue_reset(struct cxl_region *cxlr)
{
- struct cxl_region *cxlr = to_cxl_region(dev);
struct cxl_region_params *p = &cxlr->params;
- bool commit;
- ssize_t rc;
+ int rc;
- rc = kstrtobool(buf, &commit);
- if (rc)
+ ACQUIRE(rwsem_write_kill, rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_write_kill, &rwsem)))
return rc;
- rc = down_write_killable(&cxl_region_rwsem);
- if (rc)
+ /* Already in the requested state? */
+ if (p->state < CXL_CONFIG_COMMIT)
+ return 0;
+
+ p->state = CXL_CONFIG_RESET_PENDING;
+
+ return 0;
+}
+
+static int __commit(struct cxl_region *cxlr)
+{
+ struct cxl_region_params *p = &cxlr->params;
+ int rc;
+
+ ACQUIRE(rwsem_write_kill, rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_write_kill, &rwsem)))
return rc;
/* Already in the requested state? */
- if (commit && p->state >= CXL_CONFIG_COMMIT)
- goto out;
- if (!commit && p->state < CXL_CONFIG_COMMIT)
- goto out;
+ if (p->state >= CXL_CONFIG_COMMIT)
+ return 0;
/* Not ready to commit? */
- if (commit && p->state < CXL_CONFIG_ACTIVE) {
- rc = -ENXIO;
- goto out;
- }
+ if (p->state < CXL_CONFIG_ACTIVE)
+ return -ENXIO;
/*
* Invalidate caches before region setup to drop any speculative
@@ -392,41 +389,61 @@ static ssize_t commit_store(struct device *dev, struct device_attribute *attr,
*/
rc = cxl_region_invalidate_memregion(cxlr);
if (rc)
- goto out;
+ return rc;
- if (commit) {
- rc = cxl_region_decode_commit(cxlr);
- if (rc == 0)
- p->state = CXL_CONFIG_COMMIT;
- } else {
- p->state = CXL_CONFIG_RESET_PENDING;
- up_write(&cxl_region_rwsem);
- device_release_driver(&cxlr->dev);
- down_write(&cxl_region_rwsem);
+ rc = cxl_region_decode_commit(cxlr);
+ if (rc)
+ return rc;
- /*
- * The lock was dropped, so need to revalidate that the reset is
- * still pending.
- */
- if (p->state == CXL_CONFIG_RESET_PENDING) {
- rc = cxl_region_decode_reset(cxlr, p->interleave_ways);
- /*
- * Revert to committed since there may still be active
- * decoders associated with this region, or move forward
- * to active to mark the reset successful
- */
- if (rc)
- p->state = CXL_CONFIG_COMMIT;
- else
- p->state = CXL_CONFIG_ACTIVE;
- }
- }
+ p->state = CXL_CONFIG_COMMIT;
-out:
- up_write(&cxl_region_rwsem);
+ return 0;
+}
+
+static ssize_t commit_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct cxl_region *cxlr = to_cxl_region(dev);
+ struct cxl_region_params *p = &cxlr->params;
+ bool commit;
+ ssize_t rc;
+
+ rc = kstrtobool(buf, &commit);
+ if (rc)
+ return rc;
+
+ if (commit) {
+ rc = __commit(cxlr);
+ if (rc)
+ return rc;
+ return len;
+ }
+ rc = queue_reset(cxlr);
if (rc)
return rc;
+
+ /*
+ * Unmap the region and depend the reset-pending state to ensure
+ * it does not go active again until post reset
+ */
+ device_release_driver(&cxlr->dev);
+
+ /*
+ * With the reset pending take cxl_rwsem.region unconditionally
+ * to ensure the reset gets handled before returning.
+ */
+ guard(rwsem_write)(&cxl_rwsem.region);
+
+ /*
+ * Revalidate that the reset is still pending in case another
+ * thread already handled this reset.
+ */
+ if (p->state == CXL_CONFIG_RESET_PENDING) {
+ cxl_region_decode_reset(cxlr, p->interleave_ways);
+ p->state = CXL_CONFIG_ACTIVE;
+ }
+
return len;
}
@@ -437,13 +454,10 @@ static ssize_t commit_show(struct device *dev, struct device_attribute *attr,
struct cxl_region_params *p = &cxlr->params;
ssize_t rc;
- rc = down_read_interruptible(&cxl_region_rwsem);
- if (rc)
+ ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &rwsem)))
return rc;
- rc = sysfs_emit(buf, "%d\n", p->state >= CXL_CONFIG_COMMIT);
- up_read(&cxl_region_rwsem);
-
- return rc;
+ return sysfs_emit(buf, "%d\n", p->state >= CXL_CONFIG_COMMIT);
}
static DEVICE_ATTR_RW(commit);
@@ -457,7 +471,7 @@ static umode_t cxl_region_visible(struct kobject *kobj, struct attribute *a,
* Support tooling that expects to find a 'uuid' attribute for all
* regions regardless of mode.
*/
- if (a == &dev_attr_uuid.attr && cxlr->mode != CXL_DECODER_PMEM)
+ if (a == &dev_attr_uuid.attr && cxlr->mode != CXL_PARTMODE_PMEM)
return 0444;
return a->mode;
}
@@ -467,15 +481,12 @@ static ssize_t interleave_ways_show(struct device *dev,
{
struct cxl_region *cxlr = to_cxl_region(dev);
struct cxl_region_params *p = &cxlr->params;
- ssize_t rc;
+ int rc;
- rc = down_read_interruptible(&cxl_region_rwsem);
- if (rc)
+ ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &rwsem)))
return rc;
- rc = sysfs_emit(buf, "%d\n", p->interleave_ways);
- up_read(&cxl_region_rwsem);
-
- return rc;
+ return sysfs_emit(buf, "%d\n", p->interleave_ways);
}
static const struct attribute_group *get_cxl_region_target_group(void);
@@ -510,23 +521,21 @@ static ssize_t interleave_ways_store(struct device *dev,
return -EINVAL;
}
- rc = down_write_killable(&cxl_region_rwsem);
- if (rc)
+ ACQUIRE(rwsem_write_kill, rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_write_kill, &rwsem)))
return rc;
- if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) {
- rc = -EBUSY;
- goto out;
- }
+
+ if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE)
+ return -EBUSY;
save = p->interleave_ways;
p->interleave_ways = val;
rc = sysfs_update_group(&cxlr->dev.kobj, get_cxl_region_target_group());
- if (rc)
+ if (rc) {
p->interleave_ways = save;
-out:
- up_write(&cxl_region_rwsem);
- if (rc)
return rc;
+ }
+
return len;
}
static DEVICE_ATTR_RW(interleave_ways);
@@ -537,15 +546,12 @@ static ssize_t interleave_granularity_show(struct device *dev,
{
struct cxl_region *cxlr = to_cxl_region(dev);
struct cxl_region_params *p = &cxlr->params;
- ssize_t rc;
+ int rc;
- rc = down_read_interruptible(&cxl_region_rwsem);
- if (rc)
+ ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &rwsem)))
return rc;
- rc = sysfs_emit(buf, "%d\n", p->interleave_granularity);
- up_read(&cxl_region_rwsem);
-
- return rc;
+ return sysfs_emit(buf, "%d\n", p->interleave_granularity);
}
static ssize_t interleave_granularity_store(struct device *dev,
@@ -578,19 +584,15 @@ static ssize_t interleave_granularity_store(struct device *dev,
if (cxld->interleave_ways > 1 && val != cxld->interleave_granularity)
return -EINVAL;
- rc = down_write_killable(&cxl_region_rwsem);
- if (rc)
+ ACQUIRE(rwsem_write_kill, rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_write_kill, &rwsem)))
return rc;
- if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) {
- rc = -EBUSY;
- goto out;
- }
+
+ if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE)
+ return -EBUSY;
p->interleave_granularity = val;
-out:
- up_write(&cxl_region_rwsem);
- if (rc)
- return rc;
+
return len;
}
static DEVICE_ATTR_RW(interleave_granularity);
@@ -601,17 +603,15 @@ static ssize_t resource_show(struct device *dev, struct device_attribute *attr,
struct cxl_region *cxlr = to_cxl_region(dev);
struct cxl_region_params *p = &cxlr->params;
u64 resource = -1ULL;
- ssize_t rc;
+ int rc;
- rc = down_read_interruptible(&cxl_region_rwsem);
- if (rc)
+ ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &rwsem)))
return rc;
+
if (p->res)
resource = p->res->start;
- rc = sysfs_emit(buf, "%#llx\n", resource);
- up_read(&cxl_region_rwsem);
-
- return rc;
+ return sysfs_emit(buf, "%#llx\n", resource);
}
static DEVICE_ATTR_RO(resource);
@@ -619,8 +619,16 @@ static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct cxl_region *cxlr = to_cxl_region(dev);
+ const char *desc;
+
+ if (cxlr->mode == CXL_PARTMODE_RAM)
+ desc = "ram";
+ else if (cxlr->mode == CXL_PARTMODE_PMEM)
+ desc = "pmem";
+ else
+ desc = "";
- return sysfs_emit(buf, "%s\n", cxl_decoder_mode_name(cxlr->mode));
+ return sysfs_emit(buf, "%s\n", desc);
}
static DEVICE_ATTR_RO(mode);
@@ -631,7 +639,7 @@ static int alloc_hpa(struct cxl_region *cxlr, resource_size_t size)
struct resource *res;
u64 remainder = 0;
- lockdep_assert_held_write(&cxl_region_rwsem);
+ lockdep_assert_held_write(&cxl_rwsem.region);
/* Nothing to do... */
if (p->res && resource_size(p->res) == size)
@@ -646,7 +654,7 @@ static int alloc_hpa(struct cxl_region *cxlr, resource_size_t size)
/* ways, granularity and uuid (if PMEM) need to be set before HPA */
if (!p->interleave_ways || !p->interleave_granularity ||
- (cxlr->mode == CXL_DECODER_PMEM && uuid_is_null(&p->uuid)))
+ (cxlr->mode == CXL_PARTMODE_PMEM && uuid_is_null(&p->uuid)))
return -ENXIO;
div64_u64_rem(size, (u64)SZ_256M * p->interleave_ways, &remainder);
@@ -673,7 +681,7 @@ static void cxl_region_iomem_release(struct cxl_region *cxlr)
struct cxl_region_params *p = &cxlr->params;
if (device_is_registered(&cxlr->dev))
- lockdep_assert_held_write(&cxl_region_rwsem);
+ lockdep_assert_held_write(&cxl_rwsem.region);
if (p->res) {
/*
* Autodiscovered regions may not have been able to insert their
@@ -690,7 +698,7 @@ static int free_hpa(struct cxl_region *cxlr)
{
struct cxl_region_params *p = &cxlr->params;
- lockdep_assert_held_write(&cxl_region_rwsem);
+ lockdep_assert_held_write(&cxl_rwsem.region);
if (!p->res)
return 0;
@@ -714,15 +722,14 @@ static ssize_t size_store(struct device *dev, struct device_attribute *attr,
if (rc)
return rc;
- rc = down_write_killable(&cxl_region_rwsem);
- if (rc)
+ ACQUIRE(rwsem_write_kill, rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_write_kill, &rwsem)))
return rc;
if (val)
rc = alloc_hpa(cxlr, val);
else
rc = free_hpa(cxlr);
- up_write(&cxl_region_rwsem);
if (rc)
return rc;
@@ -738,15 +745,12 @@ static ssize_t size_show(struct device *dev, struct device_attribute *attr,
u64 size = 0;
ssize_t rc;
- rc = down_read_interruptible(&cxl_region_rwsem);
- if (rc)
+ ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &rwsem)))
return rc;
if (p->res)
size = resource_size(p->res);
- rc = sysfs_emit(buf, "%#llx\n", size);
- up_read(&cxl_region_rwsem);
-
- return rc;
+ return sysfs_emit(buf, "%#llx\n", size);
}
static DEVICE_ATTR_RW(size);
@@ -772,53 +776,86 @@ static size_t show_targetN(struct cxl_region *cxlr, char *buf, int pos)
struct cxl_endpoint_decoder *cxled;
int rc;
- rc = down_read_interruptible(&cxl_region_rwsem);
- if (rc)
+ ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &rwsem)))
return rc;
if (pos >= p->interleave_ways) {
dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos,
p->interleave_ways);
- rc = -ENXIO;
- goto out;
+ return -ENXIO;
}
cxled = p->targets[pos];
if (!cxled)
- rc = sysfs_emit(buf, "\n");
- else
- rc = sysfs_emit(buf, "%s\n", dev_name(&cxled->cxld.dev));
-out:
- up_read(&cxl_region_rwsem);
+ return sysfs_emit(buf, "\n");
+ return sysfs_emit(buf, "%s\n", dev_name(&cxled->cxld.dev));
+}
- return rc;
+static int check_commit_order(struct device *dev, void *data)
+{
+ struct cxl_decoder *cxld = to_cxl_decoder(dev);
+
+ /*
+ * if port->commit_end is not the only free decoder, then out of
+ * order shutdown has occurred, block further allocations until
+ * that is resolved
+ */
+ if (((cxld->flags & CXL_DECODER_F_ENABLE) == 0))
+ return -EBUSY;
+ return 0;
}
-static int match_free_decoder(struct device *dev, void *data)
+static int match_free_decoder(struct device *dev, const void *data)
{
+ struct cxl_port *port = to_cxl_port(dev->parent);
struct cxl_decoder *cxld;
- int *id = data;
+ int rc;
if (!is_switch_decoder(dev))
return 0;
cxld = to_cxl_decoder(dev);
- /* enforce ordered allocation */
- if (cxld->id != *id)
+ if (cxld->id != port->commit_end + 1)
return 0;
- if (!cxld->region)
- return 1;
+ if (cxld->region) {
+ dev_dbg(dev->parent,
+ "next decoder to commit (%s) is already reserved (%s)\n",
+ dev_name(dev), dev_name(&cxld->region->dev));
+ return 0;
+ }
+
+ rc = device_for_each_child_reverse_from(dev->parent, dev, NULL,
+ check_commit_order);
+ if (rc) {
+ dev_dbg(dev->parent,
+ "unable to allocate %s due to out of order shutdown\n",
+ dev_name(dev));
+ return 0;
+ }
+ return 1;
+}
- (*id)++;
+static bool region_res_match_cxl_range(const struct cxl_region_params *p,
+ struct range *range)
+{
+ if (!p->res)
+ return false;
- return 0;
+ /*
+ * If an extended linear cache region then the CXL range is assumed
+ * to be fronted by the DRAM range in current known implementation.
+ * This assumption will be made until a variant implementation exists.
+ */
+ return p->res->start + p->cache_size == range->start &&
+ p->res->end == range->end;
}
-static int match_auto_decoder(struct device *dev, void *data)
+static int match_auto_decoder(struct device *dev, const void *data)
{
- struct cxl_region_params *p = data;
+ const struct cxl_region_params *p = data;
struct cxl_decoder *cxld;
struct range *r;
@@ -828,19 +865,31 @@ static int match_auto_decoder(struct device *dev, void *data)
cxld = to_cxl_decoder(dev);
r = &cxld->hpa_range;
- if (p->res && p->res->start == r->start && p->res->end == r->end)
+ if (region_res_match_cxl_range(p, r))
return 1;
return 0;
}
+/**
+ * cxl_port_pick_region_decoder() - assign or lookup a decoder for a region
+ * @port: a port in the ancestry of the endpoint implied by @cxled
+ * @cxled: endpoint decoder to be, or currently, mapped by @port
+ * @cxlr: region to establish, or validate, decode @port
+ *
+ * In the region creation path cxl_port_pick_region_decoder() is an
+ * allocator to find a free port. In the region assembly path, it is
+ * recalling the decoder that platform firmware picked for validation
+ * purposes.
+ *
+ * The result is recorded in a 'struct cxl_region_ref' in @port.
+ */
static struct cxl_decoder *
-cxl_region_find_decoder(struct cxl_port *port,
- struct cxl_endpoint_decoder *cxled,
- struct cxl_region *cxlr)
+cxl_port_pick_region_decoder(struct cxl_port *port,
+ struct cxl_endpoint_decoder *cxled,
+ struct cxl_region *cxlr)
{
struct device *dev;
- int id = 0;
if (port == cxled_to_port(cxled))
return &cxled->cxld;
@@ -849,13 +898,13 @@ cxl_region_find_decoder(struct cxl_port *port,
dev = device_find_child(&port->dev, &cxlr->params,
match_auto_decoder);
else
- dev = device_find_child(&port->dev, &id, match_free_decoder);
+ dev = device_find_child(&port->dev, NULL, match_free_decoder);
if (!dev)
return NULL;
/*
* This decoder is pinned registered as long as the endpoint decoder is
* registered, and endpoint decoder unregistration holds the
- * cxl_region_rwsem over unregister events, so no need to hold on to
+ * cxl_rwsem.region over unregister events, so no need to hold on to
* this extra reference.
*/
put_device(dev);
@@ -886,7 +935,8 @@ static bool auto_order_ok(struct cxl_port *port, struct cxl_region *cxlr_iter,
static struct cxl_region_ref *
alloc_region_ref(struct cxl_port *port, struct cxl_region *cxlr,
- struct cxl_endpoint_decoder *cxled)
+ struct cxl_endpoint_decoder *cxled,
+ struct cxl_decoder *cxld)
{
struct cxl_region_params *p = &cxlr->params;
struct cxl_region_ref *cxl_rr, *iter;
@@ -900,9 +950,6 @@ alloc_region_ref(struct cxl_port *port, struct cxl_region *cxlr,
continue;
if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) {
- struct cxl_decoder *cxld;
-
- cxld = cxl_region_find_decoder(port, cxled, cxlr);
if (auto_order_ok(port, iter->region, cxld))
continue;
}
@@ -984,19 +1031,11 @@ static int cxl_rr_ep_add(struct cxl_region_ref *cxl_rr,
return 0;
}
-static int cxl_rr_alloc_decoder(struct cxl_port *port, struct cxl_region *cxlr,
- struct cxl_endpoint_decoder *cxled,
- struct cxl_region_ref *cxl_rr)
+static int cxl_rr_assign_decoder(struct cxl_port *port, struct cxl_region *cxlr,
+ struct cxl_endpoint_decoder *cxled,
+ struct cxl_region_ref *cxl_rr,
+ struct cxl_decoder *cxld)
{
- struct cxl_decoder *cxld;
-
- cxld = cxl_region_find_decoder(port, cxled, cxlr);
- if (!cxld) {
- dev_dbg(&cxlr->dev, "%s: no decoder available\n",
- dev_name(&port->dev));
- return -EBUSY;
- }
-
if (cxld->region) {
dev_dbg(&cxlr->dev, "%s: %s already attached to %s\n",
dev_name(&port->dev), dev_name(&cxld->dev),
@@ -1056,7 +1095,7 @@ static int cxl_port_attach_region(struct cxl_port *port,
unsigned long index;
int rc = -EBUSY;
- lockdep_assert_held_write(&cxl_region_rwsem);
+ lockdep_assert_held_write(&cxl_rwsem.region);
cxl_rr = cxl_rr_load(port, cxlr);
if (cxl_rr) {
@@ -1087,7 +1126,16 @@ static int cxl_port_attach_region(struct cxl_port *port,
nr_targets_inc = true;
}
} else {
- cxl_rr = alloc_region_ref(port, cxlr, cxled);
+ struct cxl_decoder *cxld;
+
+ cxld = cxl_port_pick_region_decoder(port, cxled, cxlr);
+ if (!cxld) {
+ dev_dbg(&cxlr->dev, "%s: no decoder available\n",
+ dev_name(&port->dev));
+ return -EBUSY;
+ }
+
+ cxl_rr = alloc_region_ref(port, cxlr, cxled, cxld);
if (IS_ERR(cxl_rr)) {
dev_dbg(&cxlr->dev,
"%s: failed to allocate region reference\n",
@@ -1096,7 +1144,7 @@ static int cxl_port_attach_region(struct cxl_port *port,
}
nr_targets_inc = true;
- rc = cxl_rr_alloc_decoder(port, cxlr, cxled, cxl_rr);
+ rc = cxl_rr_assign_decoder(port, cxlr, cxled, cxl_rr, cxld);
if (rc)
goto out_erase;
}
@@ -1157,7 +1205,7 @@ static void cxl_port_detach_region(struct cxl_port *port,
struct cxl_region_ref *cxl_rr;
struct cxl_ep *ep = NULL;
- lockdep_assert_held_write(&cxl_region_rwsem);
+ lockdep_assert_held_write(&cxl_rwsem.region);
cxl_rr = cxl_rr_load(port, cxlr);
if (!cxl_rr)
@@ -1288,6 +1336,7 @@ static int cxl_port_setup_targets(struct cxl_port *port,
struct cxl_region_params *p = &cxlr->params;
struct cxl_decoder *cxld = cxl_rr->decoder;
struct cxl_switch_decoder *cxlsd;
+ struct cxl_port *iter = port;
u16 eig, peig;
u8 eiw, peiw;
@@ -1304,16 +1353,26 @@ static int cxl_port_setup_targets(struct cxl_port *port,
cxlsd = to_cxl_switch_decoder(&cxld->dev);
if (cxl_rr->nr_targets_set) {
- int i, distance;
+ int i, distance = 1;
+ struct cxl_region_ref *cxl_rr_iter;
/*
- * Passthrough decoders impose no distance requirements between
- * peers
+ * The "distance" between peer downstream ports represents which
+ * endpoint positions in the region interleave a given port can
+ * host.
+ *
+ * For example, at the root of a hierarchy the distance is
+ * always 1 as every index targets a different host-bridge. At
+ * each subsequent switch level those ports map every Nth region
+ * position where N is the width of the switch == distance.
*/
- if (cxl_rr->nr_targets == 1)
- distance = 0;
- else
- distance = p->nr_targets / cxl_rr->nr_targets;
+ do {
+ cxl_rr_iter = cxl_rr_load(iter, cxlr);
+ distance *= cxl_rr_iter->nr_targets;
+ iter = to_cxl_port(iter->dev.parent);
+ } while (!is_cxl_root(iter));
+ distance *= cxlrd->cxlsd.cxld.interleave_ways;
+
for (i = 0; i < cxl_rr->nr_targets_set; i++)
if (ep->dport == cxlsd->target[i]) {
rc = check_last_peer(cxled, ep, cxl_rr,
@@ -1405,9 +1464,8 @@ static int cxl_port_setup_targets(struct cxl_port *port,
if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) {
if (cxld->interleave_ways != iw ||
- cxld->interleave_granularity != ig ||
- cxld->hpa_range.start != p->res->start ||
- cxld->hpa_range.end != p->res->end ||
+ (iw > 1 && cxld->interleave_granularity != ig) ||
+ !region_res_match_cxl_range(p, &cxld->hpa_range) ||
((cxld->flags & CXL_DECODER_F_ENABLE) == 0)) {
dev_err(&cxlr->dev,
"%s:%s %s expected iw: %d ig: %d %pr\n",
@@ -1418,9 +1476,7 @@ static int cxl_port_setup_targets(struct cxl_port *port,
dev_name(port->uport_dev), dev_name(&port->dev),
__func__, cxld->interleave_ways,
cxld->interleave_granularity,
- (cxld->flags & CXL_DECODER_F_ENABLE) ?
- "enabled" :
- "disabled",
+ str_enabled_disabled(cxld->flags & CXL_DECODER_F_ENABLE),
cxld->hpa_range.start, cxld->hpa_range.end);
return -ENXIO;
}
@@ -1460,8 +1516,10 @@ add_target:
cxl_rr->nr_targets_set);
return -ENXIO;
}
- } else
+ } else {
cxlsd->target[cxl_rr->nr_targets_set] = ep->dport;
+ cxlsd->cxld.target_map[cxl_rr->nr_targets_set] = ep->dport->port_id;
+ }
inc = 1;
out_target_set:
cxl_rr->nr_targets_set += inc;
@@ -1708,17 +1766,12 @@ static int cmp_interleave_pos(const void *a, const void *b)
return cxled_a->pos - cxled_b->pos;
}
-static struct cxl_port *next_port(struct cxl_port *port)
-{
- if (!port->parent_dport)
- return NULL;
- return port->parent_dport->port;
-}
-
-static int match_switch_decoder_by_range(struct device *dev, void *data)
+static int match_switch_decoder_by_range(struct device *dev,
+ const void *data)
{
struct cxl_switch_decoder *cxlsd;
- struct range *r1, *r2 = data;
+ const struct range *r1, *r2 = data;
+
if (!is_switch_decoder(dev))
return 0;
@@ -1739,7 +1792,7 @@ static int find_pos_and_ways(struct cxl_port *port, struct range *range,
struct device *dev;
int rc = -ENXIO;
- parent = next_port(port);
+ parent = parent_port_of(port);
if (!parent)
return rc;
@@ -1763,6 +1816,13 @@ static int find_pos_and_ways(struct cxl_port *port, struct range *range,
}
put_device(dev);
+ if (rc)
+ dev_err(port->uport_dev,
+ "failed to find %s:%s in target list of %s\n",
+ dev_name(&port->dev),
+ dev_name(port->parent_dport->dport_dev),
+ dev_name(&cxlsd->cxld.dev));
+
return rc;
}
@@ -1819,7 +1879,7 @@ static int cxl_calc_interleave_pos(struct cxl_endpoint_decoder *cxled)
*/
/* Iterate from endpoint to root_port refining the position */
- for (iter = port; iter; iter = next_port(iter)) {
+ for (iter = port; iter; iter = parent_port_of(iter)) {
if (is_cxl_root(iter))
break;
@@ -1868,6 +1928,7 @@ static int cxl_region_attach(struct cxl_region *cxlr,
{
struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_region_params *p = &cxlr->params;
struct cxl_port *ep_port, *root_port;
struct cxl_dport *dport;
@@ -1882,22 +1943,24 @@ static int cxl_region_attach(struct cxl_region *cxlr,
return rc;
}
- if (cxled->mode != cxlr->mode) {
- dev_dbg(&cxlr->dev, "%s region mode: %d mismatch: %d\n",
- dev_name(&cxled->cxld.dev), cxlr->mode, cxled->mode);
- return -EINVAL;
- }
-
- if (cxled->mode == CXL_DECODER_DEAD) {
+ if (cxled->part < 0) {
dev_dbg(&cxlr->dev, "%s dead\n", dev_name(&cxled->cxld.dev));
return -ENODEV;
}
+ if (cxlds->part[cxled->part].mode != cxlr->mode) {
+ dev_dbg(&cxlr->dev, "%s region mode: %d mismatch\n",
+ dev_name(&cxled->cxld.dev), cxlr->mode);
+ return -EINVAL;
+ }
+
/* all full of members, or interleave config not established? */
if (p->state > CXL_CONFIG_INTERLEAVE_ACTIVE) {
dev_dbg(&cxlr->dev, "region already active\n");
return -EBUSY;
- } else if (p->state < CXL_CONFIG_INTERLEAVE_ACTIVE) {
+ }
+
+ if (p->state < CXL_CONFIG_INTERLEAVE_ACTIVE) {
dev_dbg(&cxlr->dev, "interleave config missing\n");
return -ENXIO;
}
@@ -1931,13 +1994,13 @@ static int cxl_region_attach(struct cxl_region *cxlr,
return -ENXIO;
}
- if (resource_size(cxled->dpa_res) * p->interleave_ways !=
+ if (resource_size(cxled->dpa_res) * p->interleave_ways + p->cache_size !=
resource_size(p->res)) {
dev_dbg(&cxlr->dev,
- "%s:%s: decoder-size-%#llx * ways-%d != region-size-%#llx\n",
+ "%s:%s-size-%#llx * ways-%d + cache-%#llx != region-size-%#llx\n",
dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
(u64)resource_size(cxled->dpa_res), p->interleave_ways,
- (u64)resource_size(p->res));
+ (u64)p->cache_size, (u64)resource_size(p->res));
return -EINVAL;
}
@@ -2038,33 +2101,43 @@ static int cxl_region_attach(struct cxl_region *cxlr,
return 0;
}
-static int cxl_region_detach(struct cxl_endpoint_decoder *cxled)
+static struct cxl_region *
+__cxl_decoder_detach(struct cxl_region *cxlr,
+ struct cxl_endpoint_decoder *cxled, int pos,
+ enum cxl_detach_mode mode)
{
- struct cxl_port *iter, *ep_port = cxled_to_port(cxled);
- struct cxl_region *cxlr = cxled->cxld.region;
struct cxl_region_params *p;
- int rc = 0;
- lockdep_assert_held_write(&cxl_region_rwsem);
+ lockdep_assert_held_write(&cxl_rwsem.region);
- if (!cxlr)
- return 0;
+ if (!cxled) {
+ p = &cxlr->params;
- p = &cxlr->params;
- get_device(&cxlr->dev);
+ if (pos >= p->interleave_ways) {
+ dev_dbg(&cxlr->dev, "position %d out of range %d\n",
+ pos, p->interleave_ways);
+ return NULL;
+ }
+
+ if (!p->targets[pos])
+ return NULL;
+ cxled = p->targets[pos];
+ } else {
+ cxlr = cxled->cxld.region;
+ if (!cxlr)
+ return NULL;
+ p = &cxlr->params;
+ }
+
+ if (mode == DETACH_INVALIDATE)
+ cxled->part = -1;
if (p->state > CXL_CONFIG_ACTIVE) {
- /*
- * TODO: tear down all impacted regions if a device is
- * removed out of order
- */
- rc = cxl_region_decode_reset(cxlr, p->interleave_ways);
- if (rc)
- goto out;
+ cxl_region_decode_reset(cxlr, p->interleave_ways);
p->state = CXL_CONFIG_ACTIVE;
}
- for (iter = ep_port; !is_cxl_root(iter);
+ for (struct cxl_port *iter = cxled_to_port(cxled); !is_cxl_root(iter);
iter = to_cxl_port(iter->dev.parent))
cxl_port_detach_region(iter, cxlr, cxled);
@@ -2075,7 +2148,7 @@ static int cxl_region_detach(struct cxl_endpoint_decoder *cxled)
dev_WARN_ONCE(&cxlr->dev, 1, "expected %s:%s at position %d\n",
dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
cxled->pos);
- goto out;
+ return NULL;
}
if (p->state == CXL_CONFIG_ACTIVE) {
@@ -2089,68 +2162,79 @@ static int cxl_region_detach(struct cxl_endpoint_decoder *cxled)
.end = -1,
};
- /* notify the region driver that one of its targets has departed */
- up_write(&cxl_region_rwsem);
- device_release_driver(&cxlr->dev);
- down_write(&cxl_region_rwsem);
-out:
- put_device(&cxlr->dev);
- return rc;
+ get_device(&cxlr->dev);
+ return cxlr;
}
-void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled)
+/*
+ * Cleanup a decoder's interest in a region. There are 2 cases to
+ * handle, removing an unknown @cxled from a known position in a region
+ * (detach_target()) or removing a known @cxled from an unknown @cxlr
+ * (cxld_unregister())
+ *
+ * When the detachment finds a region release the region driver.
+ */
+int cxl_decoder_detach(struct cxl_region *cxlr,
+ struct cxl_endpoint_decoder *cxled, int pos,
+ enum cxl_detach_mode mode)
{
- down_write(&cxl_region_rwsem);
- cxled->mode = CXL_DECODER_DEAD;
- cxl_region_detach(cxled);
- up_write(&cxl_region_rwsem);
+ struct cxl_region *detach;
+
+ /* when the decoder is being destroyed lock unconditionally */
+ if (mode == DETACH_INVALIDATE) {
+ guard(rwsem_write)(&cxl_rwsem.region);
+ detach = __cxl_decoder_detach(cxlr, cxled, pos, mode);
+ } else {
+ int rc;
+
+ ACQUIRE(rwsem_write_kill, rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_write_kill, &rwsem)))
+ return rc;
+ detach = __cxl_decoder_detach(cxlr, cxled, pos, mode);
+ }
+
+ if (detach) {
+ device_release_driver(&detach->dev);
+ put_device(&detach->dev);
+ }
+ return 0;
+}
+
+static int __attach_target(struct cxl_region *cxlr,
+ struct cxl_endpoint_decoder *cxled, int pos,
+ unsigned int state)
+{
+ int rc;
+
+ if (state == TASK_INTERRUPTIBLE) {
+ ACQUIRE(rwsem_write_kill, rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_write_kill, &rwsem)))
+ return rc;
+ guard(rwsem_read)(&cxl_rwsem.dpa);
+ return cxl_region_attach(cxlr, cxled, pos);
+ }
+ guard(rwsem_write)(&cxl_rwsem.region);
+ guard(rwsem_read)(&cxl_rwsem.dpa);
+ return cxl_region_attach(cxlr, cxled, pos);
}
static int attach_target(struct cxl_region *cxlr,
struct cxl_endpoint_decoder *cxled, int pos,
unsigned int state)
{
- int rc = 0;
+ int rc = __attach_target(cxlr, cxled, pos, state);
- if (state == TASK_INTERRUPTIBLE)
- rc = down_write_killable(&cxl_region_rwsem);
- else
- down_write(&cxl_region_rwsem);
- if (rc)
- return rc;
+ if (rc == 0)
+ return 0;
- down_read(&cxl_dpa_rwsem);
- rc = cxl_region_attach(cxlr, cxled, pos);
- up_read(&cxl_dpa_rwsem);
- up_write(&cxl_region_rwsem);
+ dev_warn(cxled->cxld.dev.parent, "failed to attach %s to %s: %d\n",
+ dev_name(&cxled->cxld.dev), dev_name(&cxlr->dev), rc);
return rc;
}
static int detach_target(struct cxl_region *cxlr, int pos)
{
- struct cxl_region_params *p = &cxlr->params;
- int rc;
-
- rc = down_write_killable(&cxl_region_rwsem);
- if (rc)
- return rc;
-
- if (pos >= p->interleave_ways) {
- dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos,
- p->interleave_ways);
- rc = -ENXIO;
- goto out;
- }
-
- if (!p->targets[pos]) {
- rc = 0;
- goto out;
- }
-
- rc = cxl_region_detach(p->targets[pos]);
-out:
- up_write(&cxl_region_rwsem);
- return rc;
+ return cxl_decoder_detach(cxlr, NULL, pos, DETACH_ONLY);
}
static size_t store_targetN(struct cxl_region *cxlr, const char *buf, int pos,
@@ -2298,7 +2382,7 @@ bool is_cxl_region(struct device *dev)
{
return dev->type == &cxl_region_type;
}
-EXPORT_SYMBOL_NS_GPL(is_cxl_region, CXL);
+EXPORT_SYMBOL_NS_GPL(is_cxl_region, "CXL");
static struct cxl_region *to_cxl_region(struct device *dev)
{
@@ -2366,14 +2450,8 @@ static bool cxl_region_update_coordinates(struct cxl_region *cxlr, int nid)
for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
if (cxlr->coord[i].read_bandwidth) {
- rc = 0;
- if (cxl_need_node_perf_attrs_update(nid))
- node_set_perf_attrs(nid, &cxlr->coord[i], i);
- else
- rc = cxl_update_hmat_access_coordinates(nid, cxlr, i);
-
- if (rc == 0)
- cset++;
+ node_update_perf_attrs(nid, &cxlr->coord[i], i);
+ cset++;
}
}
@@ -2395,22 +2473,26 @@ static int cxl_region_perf_attrs_callback(struct notifier_block *nb,
unsigned long action, void *arg)
{
struct cxl_region *cxlr = container_of(nb, struct cxl_region,
- memory_notifier);
- struct memory_notify *mnb = arg;
- int nid = mnb->status_change_nid;
+ node_notifier);
+ struct node_notify *nn = arg;
+ int nid = nn->nid;
int region_nid;
- if (nid == NUMA_NO_NODE || action != MEM_ONLINE)
+ if (action != NODE_ADDED_FIRST_MEMORY)
return NOTIFY_DONE;
/*
- * No need to hold cxl_region_rwsem; region parameters are stable
+ * No need to hold cxl_rwsem.region; region parameters are stable
* within the cxl_region driver.
*/
region_nid = phys_to_target_node(cxlr->params.res->start);
if (nid != region_nid)
return NOTIFY_DONE;
+ /* No action needed if node bit already set */
+ if (node_test_and_set(nid, nodemask_region_seen))
+ return NOTIFY_DONE;
+
if (!cxl_region_update_coordinates(cxlr, nid))
return NOTIFY_DONE;
@@ -2427,7 +2509,7 @@ static int cxl_region_calculate_adistance(struct notifier_block *nb,
int region_nid;
/*
- * No need to hold cxl_region_rwsem; region parameters are stable
+ * No need to hold cxl_rwsem.region; region parameters are stable
* within the cxl_region driver.
*/
region_nid = phys_to_target_node(cxlr->params.res->start);
@@ -2457,7 +2539,7 @@ static int cxl_region_calculate_adistance(struct notifier_block *nb,
*/
static struct cxl_region *devm_cxl_add_region(struct cxl_root_decoder *cxlrd,
int id,
- enum cxl_decoder_mode mode,
+ enum cxl_partition_mode mode,
enum cxl_decoder_type type)
{
struct cxl_port *port = to_cxl_port(cxlrd->cxlsd.cxld.dev.parent);
@@ -2511,13 +2593,13 @@ static ssize_t create_ram_region_show(struct device *dev,
}
static struct cxl_region *__create_region(struct cxl_root_decoder *cxlrd,
- enum cxl_decoder_mode mode, int id)
+ enum cxl_partition_mode mode, int id)
{
int rc;
switch (mode) {
- case CXL_DECODER_RAM:
- case CXL_DECODER_PMEM:
+ case CXL_PARTMODE_RAM:
+ case CXL_PARTMODE_PMEM:
break;
default:
dev_err(&cxlrd->cxlsd.cxld.dev, "unsupported mode %d\n", mode);
@@ -2536,9 +2618,8 @@ static struct cxl_region *__create_region(struct cxl_root_decoder *cxlrd,
return devm_cxl_add_region(cxlrd, id, mode, CXL_DECODER_HOSTONLYMEM);
}
-static ssize_t create_pmem_region_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
+static ssize_t create_region_store(struct device *dev, const char *buf,
+ size_t len, enum cxl_partition_mode mode)
{
struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
struct cxl_region *cxlr;
@@ -2548,31 +2629,26 @@ static ssize_t create_pmem_region_store(struct device *dev,
if (rc != 1)
return -EINVAL;
- cxlr = __create_region(cxlrd, CXL_DECODER_PMEM, id);
+ cxlr = __create_region(cxlrd, mode, id);
if (IS_ERR(cxlr))
return PTR_ERR(cxlr);
return len;
}
+
+static ssize_t create_pmem_region_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ return create_region_store(dev, buf, len, CXL_PARTMODE_PMEM);
+}
DEVICE_ATTR_RW(create_pmem_region);
static ssize_t create_ram_region_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
- struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
- struct cxl_region *cxlr;
- int rc, id;
-
- rc = sscanf(buf, "region%d\n", &id);
- if (rc != 1)
- return -EINVAL;
-
- cxlr = __create_region(cxlrd, CXL_DECODER_RAM, id);
- if (IS_ERR(cxlr))
- return PTR_ERR(cxlr);
-
- return len;
+ return create_region_store(dev, buf, len, CXL_PARTMODE_RAM);
}
DEVICE_ATTR_RW(create_ram_region);
@@ -2582,17 +2658,13 @@ static ssize_t region_show(struct device *dev, struct device_attribute *attr,
struct cxl_decoder *cxld = to_cxl_decoder(dev);
ssize_t rc;
- rc = down_read_interruptible(&cxl_region_rwsem);
- if (rc)
+ ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &rwsem)))
return rc;
if (cxld->region)
- rc = sysfs_emit(buf, "%s\n", dev_name(&cxld->region->dev));
- else
- rc = sysfs_emit(buf, "\n");
- up_read(&cxl_region_rwsem);
-
- return rc;
+ return sysfs_emit(buf, "%s\n", dev_name(&cxld->region->dev));
+ return sysfs_emit(buf, "\n");
}
DEVICE_ATTR_RO(region);
@@ -2657,7 +2729,7 @@ bool is_cxl_pmem_region(struct device *dev)
{
return dev->type == &cxl_pmem_region_type;
}
-EXPORT_SYMBOL_NS_GPL(is_cxl_pmem_region, CXL);
+EXPORT_SYMBOL_NS_GPL(is_cxl_pmem_region, "CXL");
struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev)
{
@@ -2666,11 +2738,11 @@ struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev)
return NULL;
return container_of(dev, struct cxl_pmem_region, dev);
}
-EXPORT_SYMBOL_NS_GPL(to_cxl_pmem_region, CXL);
+EXPORT_SYMBOL_NS_GPL(to_cxl_pmem_region, "CXL");
struct cxl_poison_context {
struct cxl_port *port;
- enum cxl_decoder_mode mode;
+ int part;
u64 offset;
};
@@ -2678,47 +2750,45 @@ static int cxl_get_poison_unmapped(struct cxl_memdev *cxlmd,
struct cxl_poison_context *ctx)
{
struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ const struct resource *res;
+ struct resource *p, *last;
u64 offset, length;
int rc = 0;
+ if (ctx->part < 0)
+ return 0;
+
/*
- * Collect poison for the remaining unmapped resources
- * after poison is collected by committed endpoints.
- *
- * Knowing that PMEM must always follow RAM, get poison
- * for unmapped resources based on the last decoder's mode:
- * ram: scan remains of ram range, then any pmem range
- * pmem: scan remains of pmem range
+ * Collect poison for the remaining unmapped resources after
+ * poison is collected by committed endpoints decoders.
*/
-
- if (ctx->mode == CXL_DECODER_RAM) {
- offset = ctx->offset;
- length = resource_size(&cxlds->ram_res) - offset;
+ for (int i = ctx->part; i < cxlds->nr_partitions; i++) {
+ res = &cxlds->part[i].res;
+ for (p = res->child, last = NULL; p; p = p->sibling)
+ last = p;
+ if (last)
+ offset = last->end + 1;
+ else
+ offset = res->start;
+ length = res->end - offset + 1;
+ if (!length)
+ break;
rc = cxl_mem_get_poison(cxlmd, offset, length, NULL);
- if (rc == -EFAULT)
- rc = 0;
+ if (rc == -EFAULT && cxlds->part[i].mode == CXL_PARTMODE_RAM)
+ continue;
if (rc)
- return rc;
- }
- if (ctx->mode == CXL_DECODER_PMEM) {
- offset = ctx->offset;
- length = resource_size(&cxlds->dpa_res) - offset;
- if (!length)
- return 0;
- } else if (resource_size(&cxlds->pmem_res)) {
- offset = cxlds->pmem_res.start;
- length = resource_size(&cxlds->pmem_res);
- } else {
- return 0;
+ break;
}
- return cxl_mem_get_poison(cxlmd, offset, length, NULL);
+ return rc;
}
static int poison_by_decoder(struct device *dev, void *arg)
{
struct cxl_poison_context *ctx = arg;
struct cxl_endpoint_decoder *cxled;
+ enum cxl_partition_mode mode;
+ struct cxl_dev_state *cxlds;
struct cxl_memdev *cxlmd;
u64 offset, length;
int rc = 0;
@@ -2727,27 +2797,18 @@ static int poison_by_decoder(struct device *dev, void *arg)
return rc;
cxled = to_cxl_endpoint_decoder(dev);
- if (!cxled->dpa_res || !resource_size(cxled->dpa_res))
- return rc;
-
- /*
- * Regions are only created with single mode decoders: pmem or ram.
- * Linux does not support mixed mode decoders. This means that
- * reading poison per endpoint decoder adheres to the requirement
- * that poison reads of pmem and ram must be separated.
- * CXL 3.0 Spec 8.2.9.8.4.1
- */
- if (cxled->mode == CXL_DECODER_MIXED) {
- dev_dbg(dev, "poison list read unsupported in mixed mode\n");
+ if (!cxled->dpa_res)
return rc;
- }
cxlmd = cxled_to_memdev(cxled);
+ cxlds = cxlmd->cxlds;
+ mode = cxlds->part[cxled->part].mode;
+
if (cxled->skip) {
offset = cxled->dpa_res->start - cxled->skip;
length = cxled->skip;
rc = cxl_mem_get_poison(cxlmd, offset, length, NULL);
- if (rc == -EFAULT && cxled->mode == CXL_DECODER_RAM)
+ if (rc == -EFAULT && mode == CXL_PARTMODE_RAM)
rc = 0;
if (rc)
return rc;
@@ -2756,7 +2817,7 @@ static int poison_by_decoder(struct device *dev, void *arg)
offset = cxled->dpa_res->start;
length = cxled->dpa_res->end - offset + 1;
rc = cxl_mem_get_poison(cxlmd, offset, length, cxled->cxld.region);
- if (rc == -EFAULT && cxled->mode == CXL_DECODER_RAM)
+ if (rc == -EFAULT && mode == CXL_PARTMODE_RAM)
rc = 0;
if (rc)
return rc;
@@ -2764,7 +2825,7 @@ static int poison_by_decoder(struct device *dev, void *arg)
/* Iterate until commit_end is reached */
if (cxled->cxld.id == ctx->port->commit_end) {
ctx->offset = cxled->dpa_res->end + 1;
- ctx->mode = cxled->mode;
+ ctx->part = cxled->part;
return 1;
}
@@ -2777,7 +2838,8 @@ int cxl_get_poison_by_endpoint(struct cxl_port *port)
int rc = 0;
ctx = (struct cxl_poison_context) {
- .port = port
+ .port = port,
+ .part = -1,
};
rc = device_for_each_child(&port->dev, &ctx, poison_by_decoder);
@@ -2807,7 +2869,7 @@ static int __cxl_dpa_to_region(struct device *dev, void *arg)
if (!cxled || !cxled->dpa_res || !resource_size(cxled->dpa_res))
return 0;
- if (dpa > cxled->dpa_res->end || dpa < cxled->dpa_res->start)
+ if (!cxl_resource_contains_addr(cxled->dpa_res, dpa))
return 0;
/*
@@ -2862,6 +2924,16 @@ static bool cxl_is_hpa_in_chunk(u64 hpa, struct cxl_region *cxlr, int pos)
return false;
}
+static bool has_hpa_to_spa(struct cxl_root_decoder *cxlrd)
+{
+ return cxlrd->ops && cxlrd->ops->hpa_to_spa;
+}
+
+static bool has_spa_to_hpa(struct cxl_root_decoder *cxlrd)
+{
+ return cxlrd->ops && cxlrd->ops->spa_to_hpa;
+}
+
u64 cxl_dpa_to_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd,
u64 dpa)
{
@@ -2913,25 +2985,120 @@ u64 cxl_dpa_to_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd,
hpa_offset |= dpa_offset & GENMASK_ULL(eig + 7, 0);
/* Apply the hpa_offset to the region base address */
- hpa = hpa_offset + p->res->start;
+ hpa = hpa_offset + p->res->start + p->cache_size;
/* Root decoder translation overrides typical modulo decode */
- if (cxlrd->hpa_to_spa)
- hpa = cxlrd->hpa_to_spa(cxlrd, hpa);
+ if (has_hpa_to_spa(cxlrd))
+ hpa = cxlrd->ops->hpa_to_spa(cxlrd, hpa);
- if (hpa < p->res->start || hpa > p->res->end) {
+ if (!cxl_resource_contains_addr(p->res, hpa)) {
dev_dbg(&cxlr->dev,
"Addr trans fail: hpa 0x%llx not in region\n", hpa);
return ULLONG_MAX;
}
/* Simple chunk check, by pos & gran, only applies to modulo decodes */
- if (!cxlrd->hpa_to_spa && (!cxl_is_hpa_in_chunk(hpa, cxlr, pos)))
+ if (!has_hpa_to_spa(cxlrd) && (!cxl_is_hpa_in_chunk(hpa, cxlr, pos)))
return ULLONG_MAX;
return hpa;
}
+struct dpa_result {
+ struct cxl_memdev *cxlmd;
+ u64 dpa;
+};
+
+static int region_offset_to_dpa_result(struct cxl_region *cxlr, u64 offset,
+ struct dpa_result *result)
+{
+ struct cxl_region_params *p = &cxlr->params;
+ struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
+ struct cxl_endpoint_decoder *cxled;
+ u64 hpa, hpa_offset, dpa_offset;
+ u64 bits_upper, bits_lower;
+ u64 shifted, rem, temp;
+ u16 eig = 0;
+ u8 eiw = 0;
+ int pos;
+
+ lockdep_assert_held(&cxl_rwsem.region);
+ lockdep_assert_held(&cxl_rwsem.dpa);
+
+ /* Input validation ensures valid ways and gran */
+ granularity_to_eig(p->interleave_granularity, &eig);
+ ways_to_eiw(p->interleave_ways, &eiw);
+
+ /*
+ * If the root decoder has SPA to CXL HPA callback, use it. Otherwise
+ * CXL HPA is assumed to equal SPA.
+ */
+ if (has_spa_to_hpa(cxlrd)) {
+ hpa = cxlrd->ops->spa_to_hpa(cxlrd, p->res->start + offset);
+ hpa_offset = hpa - p->res->start;
+ } else {
+ hpa_offset = offset;
+ }
+ /*
+ * Interleave position: CXL Spec 3.2 Section 8.2.4.20.13
+ * eiw < 8
+ * Position is in the IW bits at HPA_OFFSET[IG+8+IW-1:IG+8].
+ * Per spec "remove IW bits starting with bit position IG+8"
+ * eiw >= 8
+ * Position is not explicitly stored in HPA_OFFSET bits. It is
+ * derived from the modulo operation of the upper bits using
+ * the total number of interleave ways.
+ */
+ if (eiw < 8) {
+ pos = (hpa_offset >> (eig + 8)) & GENMASK(eiw - 1, 0);
+ } else {
+ shifted = hpa_offset >> (eig + 8);
+ div64_u64_rem(shifted, p->interleave_ways, &rem);
+ pos = rem;
+ }
+ if (pos < 0 || pos >= p->nr_targets) {
+ dev_dbg(&cxlr->dev, "Invalid position %d for %d targets\n",
+ pos, p->nr_targets);
+ return -ENXIO;
+ }
+
+ /*
+ * DPA offset: CXL Spec 3.2 Section 8.2.4.20.13
+ * Lower bits [IG+7:0] pass through unchanged
+ * (eiw < 8)
+ * Per spec: DPAOffset[51:IG+8] = (HPAOffset[51:IG+IW+8] >> IW)
+ * Clear the position bits to isolate upper section, then
+ * reverse the left shift by eiw that occurred during DPA->HPA
+ * (eiw >= 8)
+ * Per spec: DPAOffset[51:IG+8] = HPAOffset[51:IG+IW] / 3
+ * Extract upper bits from the correct bit range and divide by 3
+ * to recover the original DPA upper bits
+ */
+ bits_lower = hpa_offset & GENMASK_ULL(eig + 7, 0);
+ if (eiw < 8) {
+ temp = hpa_offset &= ~((u64)GENMASK(eig + eiw + 8 - 1, 0));
+ dpa_offset = temp >> eiw;
+ } else {
+ bits_upper = div64_u64(hpa_offset >> (eig + eiw), 3);
+ dpa_offset = bits_upper << (eig + 8);
+ }
+ dpa_offset |= bits_lower;
+
+ /* Look-up and return the result: a memdev and a DPA */
+ for (int i = 0; i < p->nr_targets; i++) {
+ cxled = p->targets[i];
+ if (cxled->pos != pos)
+ continue;
+ result->cxlmd = cxled_to_memdev(cxled);
+ result->dpa = cxl_dpa_resource_start(cxled) + dpa_offset;
+
+ return 0;
+ }
+ dev_err(&cxlr->dev, "No device found for position %d\n", pos);
+
+ return -ENXIO;
+}
+
static struct lock_class_key cxl_pmem_region_key;
static int cxl_pmem_region_alloc(struct cxl_region *cxlr)
@@ -2941,7 +3108,7 @@ static int cxl_pmem_region_alloc(struct cxl_region *cxlr)
struct device *dev;
int i;
- guard(rwsem_read)(&cxl_region_rwsem);
+ guard(rwsem_read)(&cxl_rwsem.region);
if (p->state != CXL_CONFIG_COMMIT)
return -ENXIO;
@@ -2953,7 +3120,7 @@ static int cxl_pmem_region_alloc(struct cxl_region *cxlr)
cxlr_pmem->hpa_range.start = p->res->start;
cxlr_pmem->hpa_range.end = p->res->end;
- /* Snapshot the region configuration underneath the cxl_region_rwsem */
+ /* Snapshot the region configuration underneath the cxl_rwsem.region */
cxlr_pmem->nr_mappings = p->nr_targets;
for (i = 0; i < p->nr_targets; i++) {
struct cxl_endpoint_decoder *cxled = p->targets[i];
@@ -3020,7 +3187,7 @@ struct cxl_dax_region *to_cxl_dax_region(struct device *dev)
return NULL;
return container_of(dev, struct cxl_dax_region, dev);
}
-EXPORT_SYMBOL_NS_GPL(to_cxl_dax_region, CXL);
+EXPORT_SYMBOL_NS_GPL(to_cxl_dax_region, "CXL");
static struct lock_class_key cxl_dax_region_key;
@@ -3030,17 +3197,13 @@ static struct cxl_dax_region *cxl_dax_region_alloc(struct cxl_region *cxlr)
struct cxl_dax_region *cxlr_dax;
struct device *dev;
- down_read(&cxl_region_rwsem);
- if (p->state != CXL_CONFIG_COMMIT) {
- cxlr_dax = ERR_PTR(-ENXIO);
- goto out;
- }
+ guard(rwsem_read)(&cxl_rwsem.region);
+ if (p->state != CXL_CONFIG_COMMIT)
+ return ERR_PTR(-ENXIO);
cxlr_dax = kzalloc(sizeof(*cxlr_dax), GFP_KERNEL);
- if (!cxlr_dax) {
- cxlr_dax = ERR_PTR(-ENOMEM);
- goto out;
- }
+ if (!cxlr_dax)
+ return ERR_PTR(-ENOMEM);
cxlr_dax->hpa_range.start = p->res->start;
cxlr_dax->hpa_range.end = p->res->end;
@@ -3053,8 +3216,6 @@ static struct cxl_dax_region *cxl_dax_region_alloc(struct cxl_region *cxlr)
dev->parent = &cxlr->dev;
dev->bus = &cxl_bus_type;
dev->type = &cxl_dax_region_type;
-out:
- up_read(&cxl_region_rwsem);
return cxlr_dax;
}
@@ -3181,25 +3342,54 @@ err:
return rc;
}
-static int match_root_decoder_by_range(struct device *dev, void *data)
+static int match_decoder_by_range(struct device *dev, const void *data)
{
- struct range *r1, *r2 = data;
- struct cxl_root_decoder *cxlrd;
+ const struct range *r1, *r2 = data;
+ struct cxl_decoder *cxld;
- if (!is_root_decoder(dev))
+ if (!is_switch_decoder(dev))
return 0;
- cxlrd = to_cxl_root_decoder(dev);
- r1 = &cxlrd->cxlsd.cxld.hpa_range;
+ cxld = to_cxl_decoder(dev);
+ r1 = &cxld->hpa_range;
return range_contains(r1, r2);
}
-static int match_region_by_range(struct device *dev, void *data)
+static struct cxl_decoder *
+cxl_port_find_switch_decoder(struct cxl_port *port, struct range *hpa)
+{
+ struct device *cxld_dev = device_find_child(&port->dev, hpa,
+ match_decoder_by_range);
+
+ return cxld_dev ? to_cxl_decoder(cxld_dev) : NULL;
+}
+
+static struct cxl_root_decoder *
+cxl_find_root_decoder(struct cxl_endpoint_decoder *cxled)
+{
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_port *port = cxled_to_port(cxled);
+ struct cxl_root *cxl_root __free(put_cxl_root) = find_cxl_root(port);
+ struct cxl_decoder *root, *cxld = &cxled->cxld;
+ struct range *hpa = &cxld->hpa_range;
+
+ root = cxl_port_find_switch_decoder(&cxl_root->port, hpa);
+ if (!root) {
+ dev_err(cxlmd->dev.parent,
+ "%s:%s no CXL window for range %#llx:%#llx\n",
+ dev_name(&cxlmd->dev), dev_name(&cxld->dev),
+ cxld->hpa_range.start, cxld->hpa_range.end);
+ return NULL;
+ }
+
+ return to_cxl_root_decoder(&root->dev);
+}
+
+static int match_region_by_range(struct device *dev, const void *data)
{
struct cxl_region_params *p;
struct cxl_region *cxlr;
- struct range *r = data;
- int rc = 0;
+ const struct range *r = data;
if (!is_cxl_region(dev))
return 0;
@@ -3207,60 +3397,91 @@ static int match_region_by_range(struct device *dev, void *data)
cxlr = to_cxl_region(dev);
p = &cxlr->params;
- down_read(&cxl_region_rwsem);
+ guard(rwsem_read)(&cxl_rwsem.region);
if (p->res && p->res->start == r->start && p->res->end == r->end)
- rc = 1;
- up_read(&cxl_region_rwsem);
+ return 1;
- return rc;
+ return 0;
}
-/* Establish an empty region covering the given HPA range */
-static struct cxl_region *construct_region(struct cxl_root_decoder *cxlrd,
- struct cxl_endpoint_decoder *cxled)
+static int cxl_extended_linear_cache_resize(struct cxl_region *cxlr,
+ struct resource *res)
+{
+ struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
+ struct cxl_region_params *p = &cxlr->params;
+ resource_size_t size = resource_size(res);
+ resource_size_t cache_size, start;
+
+ cache_size = cxlrd->cache_size;
+ if (!cache_size)
+ return 0;
+
+ if (size != cache_size) {
+ dev_warn(&cxlr->dev,
+ "Extended Linear Cache size %pa != CXL size %pa. No Support!",
+ &cache_size, &size);
+ return -ENXIO;
+ }
+
+ /*
+ * Move the start of the range to where the cache range starts. The
+ * implementation assumes that the cache range is in front of the
+ * CXL range. This is not dictated by the HMAT spec but is how the
+ * current known implementation is configured.
+ *
+ * The cache range is expected to be within the CFMWS. The adjusted
+ * res->start should not be less than cxlrd->res->start.
+ */
+ start = res->start - cache_size;
+ if (start < cxlrd->res->start)
+ return -ENXIO;
+
+ res->start = start;
+ p->cache_size = cache_size;
+
+ return 0;
+}
+
+static int __construct_region(struct cxl_region *cxlr,
+ struct cxl_root_decoder *cxlrd,
+ struct cxl_endpoint_decoder *cxled)
{
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
- struct cxl_port *port = cxlrd_to_port(cxlrd);
struct range *hpa = &cxled->cxld.hpa_range;
struct cxl_region_params *p;
- struct cxl_region *cxlr;
struct resource *res;
int rc;
- do {
- cxlr = __create_region(cxlrd, cxled->mode,
- atomic_read(&cxlrd->region_id));
- } while (IS_ERR(cxlr) && PTR_ERR(cxlr) == -EBUSY);
-
- if (IS_ERR(cxlr)) {
- dev_err(cxlmd->dev.parent,
- "%s:%s: %s failed assign region: %ld\n",
- dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
- __func__, PTR_ERR(cxlr));
- return cxlr;
- }
-
- down_write(&cxl_region_rwsem);
+ guard(rwsem_write)(&cxl_rwsem.region);
p = &cxlr->params;
if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) {
dev_err(cxlmd->dev.parent,
"%s:%s: %s autodiscovery interrupted\n",
dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
__func__);
- rc = -EBUSY;
- goto err;
+ return -EBUSY;
}
set_bit(CXL_REGION_F_AUTO, &cxlr->flags);
res = kmalloc(sizeof(*res), GFP_KERNEL);
- if (!res) {
- rc = -ENOMEM;
- goto err;
- }
+ if (!res)
+ return -ENOMEM;
*res = DEFINE_RES_MEM_NAMED(hpa->start, range_len(hpa),
dev_name(&cxlr->dev));
+
+ rc = cxl_extended_linear_cache_resize(cxlr, res);
+ if (rc && rc != -EOPNOTSUPP) {
+ /*
+ * Failing to support extended linear cache region resize does not
+ * prevent the region from functioning. Only causes cxl list showing
+ * incorrect region size.
+ */
+ dev_warn(cxlmd->dev.parent,
+ "Extended linear cache calculation failed rc:%d\n", rc);
+ }
+
rc = insert_resource(cxlrd->res, res);
if (rc) {
/*
@@ -3280,7 +3501,7 @@ static struct cxl_region *construct_region(struct cxl_root_decoder *cxlrd,
rc = sysfs_update_group(&cxlr->dev.kobj, get_cxl_region_target_group());
if (rc)
- goto err;
+ return rc;
dev_dbg(cxlmd->dev.parent, "%s:%s: %s %s res: %pr iw: %d ig: %d\n",
dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), __func__,
@@ -3289,64 +3510,88 @@ static struct cxl_region *construct_region(struct cxl_root_decoder *cxlrd,
/* ...to match put_device() in cxl_add_to_region() */
get_device(&cxlr->dev);
- up_write(&cxl_region_rwsem);
+
+ return 0;
+}
+
+/* Establish an empty region covering the given HPA range */
+static struct cxl_region *construct_region(struct cxl_root_decoder *cxlrd,
+ struct cxl_endpoint_decoder *cxled)
+{
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_port *port = cxlrd_to_port(cxlrd);
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ int rc, part = READ_ONCE(cxled->part);
+ struct cxl_region *cxlr;
+
+ do {
+ cxlr = __create_region(cxlrd, cxlds->part[part].mode,
+ atomic_read(&cxlrd->region_id));
+ } while (IS_ERR(cxlr) && PTR_ERR(cxlr) == -EBUSY);
+
+ if (IS_ERR(cxlr)) {
+ dev_err(cxlmd->dev.parent,
+ "%s:%s: %s failed assign region: %ld\n",
+ dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
+ __func__, PTR_ERR(cxlr));
+ return cxlr;
+ }
+
+ rc = __construct_region(cxlr, cxlrd, cxled);
+ if (rc) {
+ devm_release_action(port->uport_dev, unregister_region, cxlr);
+ return ERR_PTR(rc);
+ }
return cxlr;
+}
-err:
- up_write(&cxl_region_rwsem);
- devm_release_action(port->uport_dev, unregister_region, cxlr);
- return ERR_PTR(rc);
+static struct cxl_region *
+cxl_find_region_by_range(struct cxl_root_decoder *cxlrd, struct range *hpa)
+{
+ struct device *region_dev;
+
+ region_dev = device_find_child(&cxlrd->cxlsd.cxld.dev, hpa,
+ match_region_by_range);
+ if (!region_dev)
+ return NULL;
+
+ return to_cxl_region(region_dev);
}
-int cxl_add_to_region(struct cxl_port *root, struct cxl_endpoint_decoder *cxled)
+int cxl_add_to_region(struct cxl_endpoint_decoder *cxled)
{
- struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
struct range *hpa = &cxled->cxld.hpa_range;
- struct cxl_decoder *cxld = &cxled->cxld;
- struct device *cxlrd_dev, *region_dev;
- struct cxl_root_decoder *cxlrd;
struct cxl_region_params *p;
- struct cxl_region *cxlr;
bool attach = false;
int rc;
- cxlrd_dev = device_find_child(&root->dev, &cxld->hpa_range,
- match_root_decoder_by_range);
- if (!cxlrd_dev) {
- dev_err(cxlmd->dev.parent,
- "%s:%s no CXL window for range %#llx:%#llx\n",
- dev_name(&cxlmd->dev), dev_name(&cxld->dev),
- cxld->hpa_range.start, cxld->hpa_range.end);
+ struct cxl_root_decoder *cxlrd __free(put_cxl_root_decoder) =
+ cxl_find_root_decoder(cxled);
+ if (!cxlrd)
return -ENXIO;
- }
-
- cxlrd = to_cxl_root_decoder(cxlrd_dev);
/*
* Ensure that if multiple threads race to construct_region() for @hpa
* one does the construction and the others add to that.
*/
mutex_lock(&cxlrd->range_lock);
- region_dev = device_find_child(&cxlrd->cxlsd.cxld.dev, hpa,
- match_region_by_range);
- if (!region_dev) {
+ struct cxl_region *cxlr __free(put_cxl_region) =
+ cxl_find_region_by_range(cxlrd, hpa);
+ if (!cxlr)
cxlr = construct_region(cxlrd, cxled);
- region_dev = &cxlr->dev;
- } else
- cxlr = to_cxl_region(region_dev);
mutex_unlock(&cxlrd->range_lock);
rc = PTR_ERR_OR_ZERO(cxlr);
if (rc)
- goto out;
+ return rc;
attach_target(cxlr, cxled, -1, TASK_UNINTERRUPTIBLE);
- down_read(&cxl_region_rwsem);
- p = &cxlr->params;
- attach = p->state == CXL_CONFIG_COMMIT;
- up_read(&cxl_region_rwsem);
+ scoped_guard(rwsem_read, &cxl_rwsem.region) {
+ p = &cxlr->params;
+ attach = p->state == CXL_CONFIG_COMMIT;
+ }
if (attach) {
/*
@@ -3359,12 +3604,37 @@ int cxl_add_to_region(struct cxl_port *root, struct cxl_endpoint_decoder *cxled)
p->res);
}
- put_device(region_dev);
-out:
- put_device(cxlrd_dev);
return rc;
}
-EXPORT_SYMBOL_NS_GPL(cxl_add_to_region, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_add_to_region, "CXL");
+
+u64 cxl_port_get_spa_cache_alias(struct cxl_port *endpoint, u64 spa)
+{
+ struct cxl_region_ref *iter;
+ unsigned long index;
+
+ if (!endpoint)
+ return ~0ULL;
+
+ guard(rwsem_write)(&cxl_rwsem.region);
+
+ xa_for_each(&endpoint->regions, index, iter) {
+ struct cxl_region_params *p = &iter->region->params;
+
+ if (cxl_resource_contains_addr(p->res, spa)) {
+ if (!p->cache_size)
+ return ~0ULL;
+
+ if (spa >= p->res->start + p->cache_size)
+ return spa - p->cache_size;
+
+ return spa + p->cache_size;
+ }
+ }
+
+ return ~0ULL;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_port_get_spa_cache_alias, "CXL");
static int is_system_ram(struct resource *res, void *arg)
{
@@ -3379,48 +3649,153 @@ static void shutdown_notifiers(void *_cxlr)
{
struct cxl_region *cxlr = _cxlr;
- unregister_memory_notifier(&cxlr->memory_notifier);
+ unregister_node_notifier(&cxlr->node_notifier);
unregister_mt_adistance_algorithm(&cxlr->adist_notifier);
}
-static int cxl_region_probe(struct device *dev)
+static void remove_debugfs(void *dentry)
+{
+ debugfs_remove_recursive(dentry);
+}
+
+static int validate_region_offset(struct cxl_region *cxlr, u64 offset)
{
- struct cxl_region *cxlr = to_cxl_region(dev);
struct cxl_region_params *p = &cxlr->params;
+ resource_size_t region_size;
+ u64 hpa;
+
+ if (offset < p->cache_size) {
+ dev_err(&cxlr->dev,
+ "Offset %#llx is within extended linear cache %pr\n",
+ offset, &p->cache_size);
+ return -EINVAL;
+ }
+
+ region_size = resource_size(p->res);
+ if (offset >= region_size) {
+ dev_err(&cxlr->dev, "Offset %#llx exceeds region size %pr\n",
+ offset, &region_size);
+ return -EINVAL;
+ }
+
+ hpa = p->res->start + offset;
+ if (hpa < p->res->start || hpa > p->res->end) {
+ dev_err(&cxlr->dev, "HPA %#llx not in region %pr\n", hpa,
+ p->res);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cxl_region_debugfs_poison_inject(void *data, u64 offset)
+{
+ struct dpa_result result = { .dpa = ULLONG_MAX, .cxlmd = NULL };
+ struct cxl_region *cxlr = data;
int rc;
- rc = down_read_interruptible(&cxl_region_rwsem);
- if (rc) {
+ ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &region_rwsem)))
+ return rc;
+
+ ACQUIRE(rwsem_read_intr, dpa_rwsem)(&cxl_rwsem.dpa);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &dpa_rwsem)))
+ return rc;
+
+ if (validate_region_offset(cxlr, offset))
+ return -EINVAL;
+
+ rc = region_offset_to_dpa_result(cxlr, offset, &result);
+ if (rc || !result.cxlmd || result.dpa == ULLONG_MAX) {
+ dev_dbg(&cxlr->dev,
+ "Failed to resolve DPA for region offset %#llx rc %d\n",
+ offset, rc);
+
+ return rc ? rc : -EINVAL;
+ }
+
+ return cxl_inject_poison_locked(result.cxlmd, result.dpa);
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(cxl_poison_inject_fops, NULL,
+ cxl_region_debugfs_poison_inject, "%llx\n");
+
+static int cxl_region_debugfs_poison_clear(void *data, u64 offset)
+{
+ struct dpa_result result = { .dpa = ULLONG_MAX, .cxlmd = NULL };
+ struct cxl_region *cxlr = data;
+ int rc;
+
+ ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &region_rwsem)))
+ return rc;
+
+ ACQUIRE(rwsem_read_intr, dpa_rwsem)(&cxl_rwsem.dpa);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &dpa_rwsem)))
+ return rc;
+
+ if (validate_region_offset(cxlr, offset))
+ return -EINVAL;
+
+ rc = region_offset_to_dpa_result(cxlr, offset, &result);
+ if (rc || !result.cxlmd || result.dpa == ULLONG_MAX) {
+ dev_dbg(&cxlr->dev,
+ "Failed to resolve DPA for region offset %#llx rc %d\n",
+ offset, rc);
+
+ return rc ? rc : -EINVAL;
+ }
+
+ return cxl_clear_poison_locked(result.cxlmd, result.dpa);
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(cxl_poison_clear_fops, NULL,
+ cxl_region_debugfs_poison_clear, "%llx\n");
+
+static int cxl_region_can_probe(struct cxl_region *cxlr)
+{
+ struct cxl_region_params *p = &cxlr->params;
+ int rc;
+
+ ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &rwsem))) {
dev_dbg(&cxlr->dev, "probe interrupted\n");
return rc;
}
if (p->state < CXL_CONFIG_COMMIT) {
dev_dbg(&cxlr->dev, "config state: %d\n", p->state);
- rc = -ENXIO;
- goto out;
+ return -ENXIO;
}
if (test_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags)) {
dev_err(&cxlr->dev,
"failed to activate, re-commit region and retry\n");
- rc = -ENXIO;
- goto out;
+ return -ENXIO;
}
+ return 0;
+}
+
+static int cxl_region_probe(struct device *dev)
+{
+ struct cxl_region *cxlr = to_cxl_region(dev);
+ struct cxl_region_params *p = &cxlr->params;
+ bool poison_supported = true;
+ int rc;
+
+ rc = cxl_region_can_probe(cxlr);
+ if (rc)
+ return rc;
+
/*
* From this point on any path that changes the region's state away from
* CXL_CONFIG_COMMIT is also responsible for releasing the driver.
*/
-out:
- up_read(&cxl_region_rwsem);
-
- if (rc)
- return rc;
- cxlr->memory_notifier.notifier_call = cxl_region_perf_attrs_callback;
- cxlr->memory_notifier.priority = CXL_CALLBACK_PRI;
- register_memory_notifier(&cxlr->memory_notifier);
+ cxlr->node_notifier.notifier_call = cxl_region_perf_attrs_callback;
+ cxlr->node_notifier.priority = CXL_CALLBACK_PRI;
+ register_node_notifier(&cxlr->node_notifier);
cxlr->adist_notifier.notifier_call = cxl_region_calculate_adistance;
cxlr->adist_notifier.priority = 100;
@@ -3430,10 +3805,45 @@ out:
if (rc)
return rc;
+ /* Create poison attributes if all memdevs support the capabilities */
+ for (int i = 0; i < p->nr_targets; i++) {
+ struct cxl_endpoint_decoder *cxled = p->targets[i];
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+
+ if (!cxl_memdev_has_poison_cmd(cxlmd, CXL_POISON_ENABLED_INJECT) ||
+ !cxl_memdev_has_poison_cmd(cxlmd, CXL_POISON_ENABLED_CLEAR)) {
+ poison_supported = false;
+ break;
+ }
+ }
+
+ if (poison_supported) {
+ struct dentry *dentry;
+
+ dentry = cxl_debugfs_create_dir(dev_name(dev));
+ debugfs_create_file("inject_poison", 0200, dentry, cxlr,
+ &cxl_poison_inject_fops);
+ debugfs_create_file("clear_poison", 0200, dentry, cxlr,
+ &cxl_poison_clear_fops);
+ rc = devm_add_action_or_reset(dev, remove_debugfs, dentry);
+ if (rc)
+ return rc;
+ }
+
switch (cxlr->mode) {
- case CXL_DECODER_PMEM:
+ case CXL_PARTMODE_PMEM:
+ rc = devm_cxl_region_edac_register(cxlr);
+ if (rc)
+ dev_dbg(&cxlr->dev, "CXL EDAC registration for region_id=%d failed\n",
+ cxlr->id);
+
return devm_cxl_add_pmem_region(cxlr);
- case CXL_DECODER_RAM:
+ case CXL_PARTMODE_RAM:
+ rc = devm_cxl_region_edac_register(cxlr);
+ if (rc)
+ dev_dbg(&cxlr->dev, "CXL EDAC registration for region_id=%d failed\n",
+ cxlr->id);
+
/*
* The region can not be manged by CXL if any portion of
* it is already online as 'System RAM'
@@ -3467,6 +3877,6 @@ void cxl_region_exit(void)
cxl_driver_unregister(&cxl_region_driver);
}
-MODULE_IMPORT_NS(CXL);
-MODULE_IMPORT_NS(DEVMEM);
+MODULE_IMPORT_NS("CXL");
+MODULE_IMPORT_NS("DEVMEM");
MODULE_ALIAS_CXL(CXL_DEVICE_REGION);
diff --git a/drivers/cxl/core/regs.c b/drivers/cxl/core/regs.c
index e1082e749c69..5ca7b0eed568 100644
--- a/drivers/cxl/core/regs.c
+++ b/drivers/cxl/core/regs.c
@@ -52,7 +52,7 @@ void cxl_probe_component_regs(struct device *dev, void __iomem *base,
cap_array = readl(base + CXL_CM_CAP_HDR_OFFSET);
if (FIELD_GET(CXL_CM_CAP_HDR_ID_MASK, cap_array) != CM_CAP_HDR_CAP_ID) {
- dev_err(dev,
+ dev_dbg(dev,
"Couldn't locate the CXL.cache and CXL.mem capability array header.\n");
return;
}
@@ -106,7 +106,7 @@ void cxl_probe_component_regs(struct device *dev, void __iomem *base,
rmap->size = length;
}
}
-EXPORT_SYMBOL_NS_GPL(cxl_probe_component_regs, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_probe_component_regs, "CXL");
/**
* cxl_probe_device_regs() - Detect CXL Device register blocks
@@ -174,7 +174,7 @@ void cxl_probe_device_regs(struct device *dev, void __iomem *base,
rmap->size = length;
}
}
-EXPORT_SYMBOL_NS_GPL(cxl_probe_device_regs, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_probe_device_regs, "CXL");
void __iomem *devm_cxl_iomap_block(struct device *dev, resource_size_t addr,
resource_size_t length)
@@ -232,7 +232,7 @@ int cxl_map_component_regs(const struct cxl_register_map *map,
return 0;
}
-EXPORT_SYMBOL_NS_GPL(cxl_map_component_regs, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_map_component_regs, "CXL");
int cxl_map_device_regs(const struct cxl_register_map *map,
struct cxl_device_regs *regs)
@@ -266,7 +266,7 @@ int cxl_map_device_regs(const struct cxl_register_map *map,
return 0;
}
-EXPORT_SYMBOL_NS_GPL(cxl_map_device_regs, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_map_device_regs, "CXL");
static bool cxl_decode_regblock(struct pci_dev *pdev, u32 reg_lo, u32 reg_hi,
struct cxl_register_map *map)
@@ -289,21 +289,17 @@ static bool cxl_decode_regblock(struct pci_dev *pdev, u32 reg_lo, u32 reg_hi,
return true;
}
-/**
- * cxl_find_regblock_instance() - Locate a register block by type / index
- * @pdev: The CXL PCI device to enumerate.
- * @type: Register Block Indicator id
- * @map: Enumeration output, clobbered on error
- * @index: Index into which particular instance of a regblock wanted in the
- * order found in register locator DVSEC.
- *
- * Return: 0 if register block enumerated, negative error code otherwise
+/*
+ * __cxl_find_regblock_instance() - Locate a register block or count instances by type / index
+ * Use CXL_INSTANCES_COUNT for @index if counting instances.
*
- * A CXL DVSEC may point to one or more register blocks, search for them
- * by @type and @index.
+ * __cxl_find_regblock_instance() may return:
+ * 0 - if register block enumerated.
+ * >= 0 - if counting instances.
+ * < 0 - error code otherwise.
*/
-int cxl_find_regblock_instance(struct pci_dev *pdev, enum cxl_regloc_type type,
- struct cxl_register_map *map, int index)
+static int __cxl_find_regblock_instance(struct pci_dev *pdev, enum cxl_regloc_type type,
+ struct cxl_register_map *map, int index)
{
u32 regloc_size, regblocks;
int instance = 0;
@@ -342,9 +338,31 @@ int cxl_find_regblock_instance(struct pci_dev *pdev, enum cxl_regloc_type type,
}
map->resource = CXL_RESOURCE_NONE;
+ if (index == CXL_INSTANCES_COUNT)
+ return instance;
+
return -ENODEV;
}
-EXPORT_SYMBOL_NS_GPL(cxl_find_regblock_instance, CXL);
+
+/**
+ * cxl_find_regblock_instance() - Locate a register block by type / index
+ * @pdev: The CXL PCI device to enumerate.
+ * @type: Register Block Indicator id
+ * @map: Enumeration output, clobbered on error
+ * @index: Index into which particular instance of a regblock wanted in the
+ * order found in register locator DVSEC.
+ *
+ * Return: 0 if register block enumerated, negative error code otherwise
+ *
+ * A CXL DVSEC may point to one or more register blocks, search for them
+ * by @type and @index.
+ */
+int cxl_find_regblock_instance(struct pci_dev *pdev, enum cxl_regloc_type type,
+ struct cxl_register_map *map, unsigned int index)
+{
+ return __cxl_find_regblock_instance(pdev, type, map, index);
+}
+EXPORT_SYMBOL_NS_GPL(cxl_find_regblock_instance, "CXL");
/**
* cxl_find_regblock() - Locate register blocks by type
@@ -360,9 +378,9 @@ EXPORT_SYMBOL_NS_GPL(cxl_find_regblock_instance, CXL);
int cxl_find_regblock(struct pci_dev *pdev, enum cxl_regloc_type type,
struct cxl_register_map *map)
{
- return cxl_find_regblock_instance(pdev, type, map, 0);
+ return __cxl_find_regblock_instance(pdev, type, map, 0);
}
-EXPORT_SYMBOL_NS_GPL(cxl_find_regblock, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_find_regblock, "CXL");
/**
* cxl_count_regblock() - Count instances of a given regblock type.
@@ -371,21 +389,15 @@ EXPORT_SYMBOL_NS_GPL(cxl_find_regblock, CXL);
*
* Some regblocks may be repeated. Count how many instances.
*
- * Return: count of matching regblocks.
+ * Return: non-negative count of matching regblocks, negative error code otherwise.
*/
int cxl_count_regblock(struct pci_dev *pdev, enum cxl_regloc_type type)
{
struct cxl_register_map map;
- int rc, count = 0;
- while (1) {
- rc = cxl_find_regblock_instance(pdev, type, &map, count);
- if (rc)
- return count;
- count++;
- }
+ return __cxl_find_regblock_instance(pdev, type, &map, CXL_INSTANCES_COUNT);
}
-EXPORT_SYMBOL_NS_GPL(cxl_count_regblock, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_count_regblock, "CXL");
int cxl_map_pmu_regs(struct cxl_register_map *map, struct cxl_pmu_regs *regs)
{
@@ -399,7 +411,7 @@ int cxl_map_pmu_regs(struct cxl_register_map *map, struct cxl_pmu_regs *regs)
return 0;
}
-EXPORT_SYMBOL_NS_GPL(cxl_map_pmu_regs, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_map_pmu_regs, "CXL");
static int cxl_map_regblock(struct cxl_register_map *map)
{
@@ -468,7 +480,7 @@ int cxl_setup_regs(struct cxl_register_map *map)
return rc;
}
-EXPORT_SYMBOL_NS_GPL(cxl_setup_regs, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_setup_regs, "CXL");
u16 cxl_rcrb_to_aer(struct device *dev, resource_size_t rcrb)
{
@@ -506,6 +518,62 @@ out:
return offset;
}
+static resource_size_t cxl_rcrb_to_linkcap(struct device *dev, struct cxl_dport *dport)
+{
+ resource_size_t rcrb = dport->rcrb.base;
+ void __iomem *addr;
+ u32 cap_hdr;
+ u16 offset;
+
+ if (!request_mem_region(rcrb, SZ_4K, "CXL RCRB"))
+ return CXL_RESOURCE_NONE;
+
+ addr = ioremap(rcrb, SZ_4K);
+ if (!addr) {
+ dev_err(dev, "Failed to map region %pr\n", addr);
+ release_mem_region(rcrb, SZ_4K);
+ return CXL_RESOURCE_NONE;
+ }
+
+ offset = FIELD_GET(PCI_RCRB_CAP_LIST_ID_MASK, readw(addr + PCI_CAPABILITY_LIST));
+ cap_hdr = readl(addr + offset);
+ while ((FIELD_GET(PCI_RCRB_CAP_HDR_ID_MASK, cap_hdr)) != PCI_CAP_ID_EXP) {
+ offset = FIELD_GET(PCI_RCRB_CAP_HDR_NEXT_MASK, cap_hdr);
+ if (offset == 0 || offset > SZ_4K) {
+ offset = 0;
+ break;
+ }
+ cap_hdr = readl(addr + offset);
+ }
+
+ iounmap(addr);
+ release_mem_region(rcrb, SZ_4K);
+ if (!offset)
+ return CXL_RESOURCE_NONE;
+
+ return offset;
+}
+
+int cxl_dport_map_rcd_linkcap(struct pci_dev *pdev, struct cxl_dport *dport)
+{
+ void __iomem *dport_pcie_cap = NULL;
+ resource_size_t pos;
+ struct cxl_rcrb_info *ri;
+
+ ri = &dport->rcrb;
+ pos = cxl_rcrb_to_linkcap(&pdev->dev, dport);
+ if (pos == CXL_RESOURCE_NONE)
+ return -ENXIO;
+
+ dport_pcie_cap = devm_cxl_iomap_block(&pdev->dev,
+ ri->base + pos,
+ PCI_CAP_EXP_SIZEOF);
+ dport->regs.rcd_pcie_cap = dport_pcie_cap;
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_dport_map_rcd_linkcap, "CXL");
+
resource_size_t __rcrb_to_component(struct device *dev, struct cxl_rcrb_info *ri,
enum cxl_rcrb which)
{
@@ -513,7 +581,6 @@ resource_size_t __rcrb_to_component(struct device *dev, struct cxl_rcrb_info *ri
resource_size_t rcrb = ri->base;
void __iomem *addr;
u32 bar0, bar1;
- u16 cmd;
u32 id;
if (which == CXL_RCRB_UPSTREAM)
@@ -535,7 +602,6 @@ resource_size_t __rcrb_to_component(struct device *dev, struct cxl_rcrb_info *ri
}
id = readl(addr + PCI_VENDOR_ID);
- cmd = readw(addr + PCI_COMMAND);
bar0 = readl(addr + PCI_BASE_ADDRESS_0);
bar1 = readl(addr + PCI_BASE_ADDRESS_1);
iounmap(addr);
@@ -550,8 +616,6 @@ resource_size_t __rcrb_to_component(struct device *dev, struct cxl_rcrb_info *ri
dev_err(dev, "Failed to access Downstream Port RCRB\n");
return CXL_RESOURCE_NONE;
}
- if (!(cmd & PCI_COMMAND_MEMORY))
- return CXL_RESOURCE_NONE;
/* The RCRB is a Memory Window, and the MEM_TYPE_1M bit is obsolete */
if (bar0 & (PCI_BASE_ADDRESS_MEM_TYPE_1M | PCI_BASE_ADDRESS_SPACE_IO))
return CXL_RESOURCE_NONE;
@@ -577,4 +641,4 @@ resource_size_t cxl_rcd_component_reg_phys(struct device *dev,
return CXL_RESOURCE_NONE;
return __rcrb_to_component(dev, &dport->rcrb, CXL_RCRB_UPSTREAM);
}
-EXPORT_SYMBOL_NS_GPL(cxl_rcd_component_reg_phys, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_rcd_component_reg_phys, "CXL");
diff --git a/drivers/cxl/core/suspend.c b/drivers/cxl/core/suspend.c
index a5984d96ea1d..29aa5cc5e565 100644
--- a/drivers/cxl/core/suspend.c
+++ b/drivers/cxl/core/suspend.c
@@ -15,10 +15,10 @@ void cxl_mem_active_inc(void)
{
atomic_inc(&mem_active);
}
-EXPORT_SYMBOL_NS_GPL(cxl_mem_active_inc, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_mem_active_inc, "CXL");
void cxl_mem_active_dec(void)
{
atomic_dec(&mem_active);
}
-EXPORT_SYMBOL_NS_GPL(cxl_mem_active_dec, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_mem_active_dec, "CXL");
diff --git a/drivers/cxl/core/trace.h b/drivers/cxl/core/trace.h
index 8672b42ee4d1..a53ec4798b12 100644
--- a/drivers/cxl/core/trace.h
+++ b/drivers/cxl/core/trace.h
@@ -48,6 +48,34 @@
{ CXL_RAS_UC_IDE_RX_ERR, "IDE Rx Error" } \
)
+TRACE_EVENT(cxl_port_aer_uncorrectable_error,
+ TP_PROTO(struct device *dev, u32 status, u32 fe, u32 *hl),
+ TP_ARGS(dev, status, fe, hl),
+ TP_STRUCT__entry(
+ __string(device, dev_name(dev))
+ __string(host, dev_name(dev->parent))
+ __field(u32, status)
+ __field(u32, first_error)
+ __array(u32, header_log, CXL_HEADERLOG_SIZE_U32)
+ ),
+ TP_fast_assign(
+ __assign_str(device);
+ __assign_str(host);
+ __entry->status = status;
+ __entry->first_error = fe;
+ /*
+ * Embed the 512B headerlog data for user app retrieval and
+ * parsing, but no need to print this in the trace buffer.
+ */
+ memcpy(__entry->header_log, hl, CXL_HEADERLOG_SIZE);
+ ),
+ TP_printk("device=%s host=%s status: '%s' first_error: '%s'",
+ __get_str(device), __get_str(host),
+ show_uc_errs(__entry->status),
+ show_uc_errs(__entry->first_error)
+ )
+);
+
TRACE_EVENT(cxl_aer_uncorrectable_error,
TP_PROTO(const struct cxl_memdev *cxlmd, u32 status, u32 fe, u32 *hl),
TP_ARGS(cxlmd, status, fe, hl),
@@ -96,6 +124,25 @@ TRACE_EVENT(cxl_aer_uncorrectable_error,
{ CXL_RAS_CE_PHYS_LAYER_ERR, "Received Error From Physical Layer" } \
)
+TRACE_EVENT(cxl_port_aer_correctable_error,
+ TP_PROTO(struct device *dev, u32 status),
+ TP_ARGS(dev, status),
+ TP_STRUCT__entry(
+ __string(device, dev_name(dev))
+ __string(host, dev_name(dev->parent))
+ __field(u32, status)
+ ),
+ TP_fast_assign(
+ __assign_str(device);
+ __assign_str(host);
+ __entry->status = status;
+ ),
+ TP_printk("device=%s host=%s status='%s'",
+ __get_str(device), __get_str(host),
+ show_ce_errs(__entry->status)
+ )
+);
+
TRACE_EVENT(cxl_aer_correctable_error,
TP_PROTO(const struct cxl_memdev *cxlmd, u32 status),
TP_ARGS(cxlmd, status),
@@ -166,11 +213,17 @@ TRACE_EVENT(cxl_overflow,
#define CXL_EVENT_RECORD_FLAG_MAINT_NEEDED BIT(3)
#define CXL_EVENT_RECORD_FLAG_PERF_DEGRADED BIT(4)
#define CXL_EVENT_RECORD_FLAG_HW_REPLACE BIT(5)
+#define CXL_EVENT_RECORD_FLAG_MAINT_OP_SUB_CLASS_VALID BIT(6)
+#define CXL_EVENT_RECORD_FLAG_LD_ID_VALID BIT(7)
+#define CXL_EVENT_RECORD_FLAG_HEAD_ID_VALID BIT(8)
#define show_hdr_flags(flags) __print_flags(flags, " | ", \
{ CXL_EVENT_RECORD_FLAG_PERMANENT, "PERMANENT_CONDITION" }, \
{ CXL_EVENT_RECORD_FLAG_MAINT_NEEDED, "MAINTENANCE_NEEDED" }, \
{ CXL_EVENT_RECORD_FLAG_PERF_DEGRADED, "PERFORMANCE_DEGRADED" }, \
- { CXL_EVENT_RECORD_FLAG_HW_REPLACE, "HARDWARE_REPLACEMENT_NEEDED" } \
+ { CXL_EVENT_RECORD_FLAG_HW_REPLACE, "HARDWARE_REPLACEMENT_NEEDED" }, \
+ { CXL_EVENT_RECORD_FLAG_MAINT_OP_SUB_CLASS_VALID, "MAINT_OP_SUB_CLASS_VALID" }, \
+ { CXL_EVENT_RECORD_FLAG_LD_ID_VALID, "LD_ID_VALID" }, \
+ { CXL_EVENT_RECORD_FLAG_HEAD_ID_VALID, "HEAD_ID_VALID" } \
)
/*
@@ -197,7 +250,10 @@ TRACE_EVENT(cxl_overflow,
__field(u16, hdr_related_handle) \
__field(u64, hdr_timestamp) \
__field(u8, hdr_length) \
- __field(u8, hdr_maint_op_class)
+ __field(u8, hdr_maint_op_class) \
+ __field(u8, hdr_maint_op_sub_class) \
+ __field(u16, hdr_ld_id) \
+ __field(u8, hdr_head_id)
#define CXL_EVT_TP_fast_assign(cxlmd, l, hdr) \
__assign_str(memdev); \
@@ -209,17 +265,23 @@ TRACE_EVENT(cxl_overflow,
__entry->hdr_handle = le16_to_cpu((hdr).handle); \
__entry->hdr_related_handle = le16_to_cpu((hdr).related_handle); \
__entry->hdr_timestamp = le64_to_cpu((hdr).timestamp); \
- __entry->hdr_maint_op_class = (hdr).maint_op_class
+ __entry->hdr_maint_op_class = (hdr).maint_op_class; \
+ __entry->hdr_maint_op_sub_class = (hdr).maint_op_sub_class; \
+ __entry->hdr_ld_id = le16_to_cpu((hdr).ld_id); \
+ __entry->hdr_head_id = (hdr).head_id
#define CXL_EVT_TP_printk(fmt, ...) \
TP_printk("memdev=%s host=%s serial=%lld log=%s : time=%llu uuid=%pUb " \
"len=%d flags='%s' handle=%x related_handle=%x " \
- "maint_op_class=%u : " fmt, \
+ "maint_op_class=%u maint_op_sub_class=%u " \
+ "ld_id=%x head_id=%x : " fmt, \
__get_str(memdev), __get_str(host), __entry->serial, \
cxl_event_log_type_str(__entry->log), \
__entry->hdr_timestamp, &__entry->hdr_uuid, __entry->hdr_length,\
show_hdr_flags(__entry->hdr_flags), __entry->hdr_handle, \
__entry->hdr_related_handle, __entry->hdr_maint_op_class, \
+ __entry->hdr_maint_op_sub_class, \
+ __entry->hdr_ld_id, __entry->hdr_head_id, \
##__VA_ARGS__)
TRACE_EVENT(cxl_generic_event,
@@ -264,8 +326,30 @@ TRACE_EVENT(cxl_generic_event,
)
/*
+ * Component ID Format
+ * CXL 3.1 section 8.2.9.2.1; Table 8-44
+ */
+#define CXL_PLDM_COMPONENT_ID_ENTITY_VALID BIT(0)
+#define CXL_PLDM_COMPONENT_ID_RES_VALID BIT(1)
+
+#define show_comp_id_pldm_flags(flags) __print_flags(flags, " | ", \
+ { CXL_PLDM_COMPONENT_ID_ENTITY_VALID, "PLDM Entity ID" }, \
+ { CXL_PLDM_COMPONENT_ID_RES_VALID, "Resource ID" } \
+)
+
+#define show_pldm_entity_id(flags, valid_comp_id, valid_id_format, comp_id) \
+ (flags & valid_comp_id && flags & valid_id_format) ? \
+ (comp_id[0] & CXL_PLDM_COMPONENT_ID_ENTITY_VALID) ? \
+ __print_hex(&comp_id[1], 6) : "0x00" : "0x00"
+
+#define show_pldm_resource_id(flags, valid_comp_id, valid_id_format, comp_id) \
+ (flags & valid_comp_id && flags & valid_id_format) ? \
+ (comp_id[0] & CXL_PLDM_COMPONENT_ID_RES_VALID) ? \
+ __print_hex(&comp_id[7], 4) : "0x00" : "0x00"
+
+/*
* General Media Event Record - GMER
- * CXL rev 3.0 Section 8.2.9.2.1.1; Table 8-43
+ * CXL rev 3.1 Section 8.2.9.2.1.1; Table 8-45
*/
#define CXL_GMER_EVT_DESC_UNCORECTABLE_EVENT BIT(0)
#define CXL_GMER_EVT_DESC_THRESHOLD_EVENT BIT(1)
@@ -279,10 +363,18 @@ TRACE_EVENT(cxl_generic_event,
#define CXL_GMER_MEM_EVT_TYPE_ECC_ERROR 0x00
#define CXL_GMER_MEM_EVT_TYPE_INV_ADDR 0x01
#define CXL_GMER_MEM_EVT_TYPE_DATA_PATH_ERROR 0x02
-#define show_mem_event_type(type) __print_symbolic(type, \
- { CXL_GMER_MEM_EVT_TYPE_ECC_ERROR, "ECC Error" }, \
- { CXL_GMER_MEM_EVT_TYPE_INV_ADDR, "Invalid Address" }, \
- { CXL_GMER_MEM_EVT_TYPE_DATA_PATH_ERROR, "Data Path Error" } \
+#define CXL_GMER_MEM_EVT_TYPE_TE_STATE_VIOLATION 0x03
+#define CXL_GMER_MEM_EVT_TYPE_SCRUB_MEDIA_ECC_ERROR 0x04
+#define CXL_GMER_MEM_EVT_TYPE_AP_CME_COUNTER_EXPIRE 0x05
+#define CXL_GMER_MEM_EVT_TYPE_CKID_VIOLATION 0x06
+#define show_gmer_mem_event_type(type) __print_symbolic(type, \
+ { CXL_GMER_MEM_EVT_TYPE_ECC_ERROR, "ECC Error" }, \
+ { CXL_GMER_MEM_EVT_TYPE_INV_ADDR, "Invalid Address" }, \
+ { CXL_GMER_MEM_EVT_TYPE_DATA_PATH_ERROR, "Data Path Error" }, \
+ { CXL_GMER_MEM_EVT_TYPE_TE_STATE_VIOLATION, "TE State Violation" }, \
+ { CXL_GMER_MEM_EVT_TYPE_SCRUB_MEDIA_ECC_ERROR, "Scrub Media ECC Error" }, \
+ { CXL_GMER_MEM_EVT_TYPE_AP_CME_COUNTER_EXPIRE, "Adv Prog CME Counter Expiration" }, \
+ { CXL_GMER_MEM_EVT_TYPE_CKID_VIOLATION, "CKID Violation" } \
)
#define CXL_GMER_TRANS_UNKNOWN 0x00
@@ -292,6 +384,8 @@ TRACE_EVENT(cxl_generic_event,
#define CXL_GMER_TRANS_HOST_INJECT_POISON 0x04
#define CXL_GMER_TRANS_INTERNAL_MEDIA_SCRUB 0x05
#define CXL_GMER_TRANS_INTERNAL_MEDIA_MANAGEMENT 0x06
+#define CXL_GMER_TRANS_INTERNAL_MEDIA_ECS 0x07
+#define CXL_GMER_TRANS_MEDIA_INITIALIZATION 0x08
#define show_trans_type(type) __print_symbolic(type, \
{ CXL_GMER_TRANS_UNKNOWN, "Unknown" }, \
{ CXL_GMER_TRANS_HOST_READ, "Host Read" }, \
@@ -299,26 +393,66 @@ TRACE_EVENT(cxl_generic_event,
{ CXL_GMER_TRANS_HOST_SCAN_MEDIA, "Host Scan Media" }, \
{ CXL_GMER_TRANS_HOST_INJECT_POISON, "Host Inject Poison" }, \
{ CXL_GMER_TRANS_INTERNAL_MEDIA_SCRUB, "Internal Media Scrub" }, \
- { CXL_GMER_TRANS_INTERNAL_MEDIA_MANAGEMENT, "Internal Media Management" } \
+ { CXL_GMER_TRANS_INTERNAL_MEDIA_MANAGEMENT, "Internal Media Management" }, \
+ { CXL_GMER_TRANS_INTERNAL_MEDIA_ECS, "Internal Media Error Check Scrub" }, \
+ { CXL_GMER_TRANS_MEDIA_INITIALIZATION, "Media Initialization" } \
)
#define CXL_GMER_VALID_CHANNEL BIT(0)
#define CXL_GMER_VALID_RANK BIT(1)
#define CXL_GMER_VALID_DEVICE BIT(2)
#define CXL_GMER_VALID_COMPONENT BIT(3)
+#define CXL_GMER_VALID_COMPONENT_ID_FORMAT BIT(4)
#define show_valid_flags(flags) __print_flags(flags, "|", \
{ CXL_GMER_VALID_CHANNEL, "CHANNEL" }, \
{ CXL_GMER_VALID_RANK, "RANK" }, \
{ CXL_GMER_VALID_DEVICE, "DEVICE" }, \
- { CXL_GMER_VALID_COMPONENT, "COMPONENT" } \
+ { CXL_GMER_VALID_COMPONENT, "COMPONENT" }, \
+ { CXL_GMER_VALID_COMPONENT_ID_FORMAT, "COMPONENT PLDM FORMAT" } \
+)
+
+#define CXL_GMER_CME_EV_FLAG_CME_MULTIPLE_MEDIA BIT(0)
+#define CXL_GMER_CME_EV_FLAG_THRESHOLD_EXCEEDED BIT(1)
+#define show_cme_threshold_ev_flags(flags) __print_flags(flags, "|", \
+ { \
+ CXL_GMER_CME_EV_FLAG_CME_MULTIPLE_MEDIA, \
+ "Corrected Memory Errors in Multiple Media Components" \
+ }, { \
+ CXL_GMER_CME_EV_FLAG_THRESHOLD_EXCEEDED, \
+ "Exceeded Programmable Threshold" \
+ } \
+)
+
+#define CXL_GMER_MEM_EVT_SUB_TYPE_NOT_REPORTED 0x00
+#define CXL_GMER_MEM_EVT_SUB_TYPE_INTERNAL_DATAPATH_ERROR 0x01
+#define CXL_GMER_MEM_EVT_SUB_TYPE_MEDIA_LINK_COMMAND_TRAINING_ERROR 0x02
+#define CXL_GMER_MEM_EVT_SUB_TYPE_MEDIA_LINK_CONTROL_TRAINING_ERROR 0x03
+#define CXL_GMER_MEM_EVT_SUB_TYPE_MEDIA_LINK_DATA_TRAINING_ERROR 0x04
+#define CXL_GMER_MEM_EVT_SUB_TYPE_MEDIA_LINK_CRC_ERROR 0x05
+#define show_mem_event_sub_type(sub_type) __print_symbolic(sub_type, \
+ { CXL_GMER_MEM_EVT_SUB_TYPE_NOT_REPORTED, "Not Reported" }, \
+ { CXL_GMER_MEM_EVT_SUB_TYPE_INTERNAL_DATAPATH_ERROR, "Internal Datapath Error" }, \
+ { \
+ CXL_GMER_MEM_EVT_SUB_TYPE_MEDIA_LINK_COMMAND_TRAINING_ERROR, \
+ "Media Link Command Training Error" \
+ }, { \
+ CXL_GMER_MEM_EVT_SUB_TYPE_MEDIA_LINK_CONTROL_TRAINING_ERROR, \
+ "Media Link Control Training Error" \
+ }, { \
+ CXL_GMER_MEM_EVT_SUB_TYPE_MEDIA_LINK_DATA_TRAINING_ERROR, \
+ "Media Link Data Training Error" \
+ }, { \
+ CXL_GMER_MEM_EVT_SUB_TYPE_MEDIA_LINK_CRC_ERROR, "Media Link CRC Error" \
+ } \
)
TRACE_EVENT(cxl_general_media,
TP_PROTO(const struct cxl_memdev *cxlmd, enum cxl_event_log_type log,
- struct cxl_region *cxlr, u64 hpa, struct cxl_event_gen_media *rec),
+ struct cxl_region *cxlr, u64 hpa, u64 hpa_alias0,
+ struct cxl_event_gen_media *rec),
- TP_ARGS(cxlmd, log, cxlr, hpa, rec),
+ TP_ARGS(cxlmd, log, cxlr, hpa, hpa_alias0, rec),
TP_STRUCT__entry(
CXL_EVT_TP_entry
@@ -332,10 +466,14 @@ TRACE_EVENT(cxl_general_media,
__array(u8, comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE)
/* Following are out of order to pack trace record */
__field(u64, hpa)
+ __field(u64, hpa_alias0)
__field_struct(uuid_t, region_uuid)
__field(u16, validity_flags)
__field(u8, rank)
__field(u8, dpa_flags)
+ __field(u32, cme_count)
+ __field(u8, sub_type)
+ __field(u8, cme_threshold_ev_flags)
__string(region_name, cxlr ? dev_name(&cxlr->dev) : "")
),
@@ -350,6 +488,7 @@ TRACE_EVENT(cxl_general_media,
__entry->dpa &= CXL_DPA_MASK;
__entry->descriptor = rec->media_hdr.descriptor;
__entry->type = rec->media_hdr.type;
+ __entry->sub_type = rec->sub_type;
__entry->transaction_type = rec->media_hdr.transaction_type;
__entry->channel = rec->media_hdr.channel;
__entry->rank = rec->media_hdr.rank;
@@ -358,6 +497,7 @@ TRACE_EVENT(cxl_general_media,
CXL_EVENT_GEN_MED_COMP_ID_SIZE);
__entry->validity_flags = get_unaligned_le16(&rec->media_hdr.validity_flags);
__entry->hpa = hpa;
+ __entry->hpa_alias0 = hpa_alias0;
if (cxlr) {
__assign_str(region_name);
uuid_copy(&__entry->region_uuid, &cxlr->params.uuid);
@@ -365,32 +505,65 @@ TRACE_EVENT(cxl_general_media,
__assign_str(region_name);
uuid_copy(&__entry->region_uuid, &uuid_null);
}
+ __entry->cme_threshold_ev_flags = rec->cme_threshold_ev_flags;
+ if (rec->media_hdr.descriptor & CXL_GMER_EVT_DESC_THRESHOLD_EVENT)
+ __entry->cme_count = get_unaligned_le24(rec->cme_count);
+ else
+ __entry->cme_count = 0;
),
CXL_EVT_TP_printk("dpa=%llx dpa_flags='%s' " \
- "descriptor='%s' type='%s' transaction_type='%s' channel=%u rank=%u " \
- "device=%x comp_id=%s validity_flags='%s' " \
- "hpa=%llx region=%s region_uuid=%pUb",
+ "descriptor='%s' type='%s' sub_type='%s' " \
+ "transaction_type='%s' channel=%u rank=%u " \
+ "device=%x validity_flags='%s' " \
+ "comp_id=%s comp_id_pldm_valid_flags='%s' " \
+ "pldm_entity_id=%s pldm_resource_id=%s " \
+ "hpa=%llx hpa_alias0=%llx region=%s region_uuid=%pUb " \
+ "cme_threshold_ev_flags='%s' cme_count=%u",
__entry->dpa, show_dpa_flags(__entry->dpa_flags),
show_event_desc_flags(__entry->descriptor),
- show_mem_event_type(__entry->type),
+ show_gmer_mem_event_type(__entry->type),
+ show_mem_event_sub_type(__entry->sub_type),
show_trans_type(__entry->transaction_type),
__entry->channel, __entry->rank, __entry->device,
- __print_hex(__entry->comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE),
show_valid_flags(__entry->validity_flags),
- __entry->hpa, __get_str(region_name), &__entry->region_uuid
+ __print_hex(__entry->comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE),
+ show_comp_id_pldm_flags(__entry->comp_id[0]),
+ show_pldm_entity_id(__entry->validity_flags, CXL_GMER_VALID_COMPONENT,
+ CXL_GMER_VALID_COMPONENT_ID_FORMAT, __entry->comp_id),
+ show_pldm_resource_id(__entry->validity_flags, CXL_GMER_VALID_COMPONENT,
+ CXL_GMER_VALID_COMPONENT_ID_FORMAT, __entry->comp_id),
+ __entry->hpa, __entry->hpa_alias0, __get_str(region_name), &__entry->region_uuid,
+ show_cme_threshold_ev_flags(__entry->cme_threshold_ev_flags), __entry->cme_count
)
);
/*
* DRAM Event Record - DER
*
- * CXL rev 3.0 section 8.2.9.2.1.2; Table 8-44
+ * CXL rev 3.1 section 8.2.9.2.1.2; Table 8-46
*/
/*
* DRAM Event Record defines many fields the same as the General Media Event
* Record. Reuse those definitions as appropriate.
*/
+#define CXL_DER_MEM_EVT_TYPE_ECC_ERROR 0x00
+#define CXL_DER_MEM_EVT_TYPE_SCRUB_MEDIA_ECC_ERROR 0x01
+#define CXL_DER_MEM_EVT_TYPE_INV_ADDR 0x02
+#define CXL_DER_MEM_EVT_TYPE_DATA_PATH_ERROR 0x03
+#define CXL_DER_MEM_EVT_TYPE_TE_STATE_VIOLATION 0x04
+#define CXL_DER_MEM_EVT_TYPE_AP_CME_COUNTER_EXPIRE 0x05
+#define CXL_DER_MEM_EVT_TYPE_CKID_VIOLATION 0x06
+#define show_dram_mem_event_type(type) __print_symbolic(type, \
+ { CXL_DER_MEM_EVT_TYPE_ECC_ERROR, "ECC Error" }, \
+ { CXL_DER_MEM_EVT_TYPE_SCRUB_MEDIA_ECC_ERROR, "Scrub Media ECC Error" }, \
+ { CXL_DER_MEM_EVT_TYPE_INV_ADDR, "Invalid Address" }, \
+ { CXL_DER_MEM_EVT_TYPE_DATA_PATH_ERROR, "Data Path Error" }, \
+ { CXL_DER_MEM_EVT_TYPE_TE_STATE_VIOLATION, "TE State Violation" }, \
+ { CXL_DER_MEM_EVT_TYPE_AP_CME_COUNTER_EXPIRE, "Adv Prog CME Counter Expiration" }, \
+ { CXL_DER_MEM_EVT_TYPE_CKID_VIOLATION, "CKID Violation" } \
+)
+
#define CXL_DER_VALID_CHANNEL BIT(0)
#define CXL_DER_VALID_RANK BIT(1)
#define CXL_DER_VALID_NIBBLE BIT(2)
@@ -399,23 +572,30 @@ TRACE_EVENT(cxl_general_media,
#define CXL_DER_VALID_ROW BIT(5)
#define CXL_DER_VALID_COLUMN BIT(6)
#define CXL_DER_VALID_CORRECTION_MASK BIT(7)
-#define show_dram_valid_flags(flags) __print_flags(flags, "|", \
- { CXL_DER_VALID_CHANNEL, "CHANNEL" }, \
- { CXL_DER_VALID_RANK, "RANK" }, \
- { CXL_DER_VALID_NIBBLE, "NIBBLE" }, \
- { CXL_DER_VALID_BANK_GROUP, "BANK GROUP" }, \
- { CXL_DER_VALID_BANK, "BANK" }, \
- { CXL_DER_VALID_ROW, "ROW" }, \
- { CXL_DER_VALID_COLUMN, "COLUMN" }, \
- { CXL_DER_VALID_CORRECTION_MASK, "CORRECTION MASK" } \
+#define CXL_DER_VALID_COMPONENT BIT(8)
+#define CXL_DER_VALID_COMPONENT_ID_FORMAT BIT(9)
+#define CXL_DER_VALID_SUB_CHANNEL BIT(10)
+#define show_dram_valid_flags(flags) __print_flags(flags, "|", \
+ { CXL_DER_VALID_CHANNEL, "CHANNEL" }, \
+ { CXL_DER_VALID_RANK, "RANK" }, \
+ { CXL_DER_VALID_NIBBLE, "NIBBLE" }, \
+ { CXL_DER_VALID_BANK_GROUP, "BANK GROUP" }, \
+ { CXL_DER_VALID_BANK, "BANK" }, \
+ { CXL_DER_VALID_ROW, "ROW" }, \
+ { CXL_DER_VALID_COLUMN, "COLUMN" }, \
+ { CXL_DER_VALID_CORRECTION_MASK, "CORRECTION MASK" }, \
+ { CXL_DER_VALID_COMPONENT, "COMPONENT" }, \
+ { CXL_DER_VALID_COMPONENT_ID_FORMAT, "COMPONENT PLDM FORMAT" }, \
+ { CXL_DER_VALID_SUB_CHANNEL, "SUB CHANNEL" } \
)
TRACE_EVENT(cxl_dram,
TP_PROTO(const struct cxl_memdev *cxlmd, enum cxl_event_log_type log,
- struct cxl_region *cxlr, u64 hpa, struct cxl_event_dram *rec),
+ struct cxl_region *cxlr, u64 hpa, u64 hpa_alias0,
+ struct cxl_event_dram *rec),
- TP_ARGS(cxlmd, log, cxlr, hpa, rec),
+ TP_ARGS(cxlmd, log, cxlr, hpa, hpa_alias0, rec),
TP_STRUCT__entry(
CXL_EVT_TP_entry
@@ -431,11 +611,18 @@ TRACE_EVENT(cxl_dram,
__field(u32, row)
__array(u8, cor_mask, CXL_EVENT_DER_CORRECTION_MASK_SIZE)
__field(u64, hpa)
+ __field(u64, hpa_alias0)
__field_struct(uuid_t, region_uuid)
__field(u8, rank) /* Out of order to pack trace record */
__field(u8, bank_group) /* Out of order to pack trace record */
__field(u8, bank) /* Out of order to pack trace record */
__field(u8, dpa_flags) /* Out of order to pack trace record */
+ /* Following are out of order to pack trace record */
+ __array(u8, comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE)
+ __field(u32, cvme_count)
+ __field(u8, sub_type)
+ __field(u8, sub_channel)
+ __field(u8, cme_threshold_ev_flags)
__string(region_name, cxlr ? dev_name(&cxlr->dev) : "")
),
@@ -449,6 +636,7 @@ TRACE_EVENT(cxl_dram,
__entry->dpa &= CXL_DPA_MASK;
__entry->descriptor = rec->media_hdr.descriptor;
__entry->type = rec->media_hdr.type;
+ __entry->sub_type = rec->sub_type;
__entry->transaction_type = rec->media_hdr.transaction_type;
__entry->validity_flags = get_unaligned_le16(rec->media_hdr.validity_flags);
__entry->channel = rec->media_hdr.channel;
@@ -461,6 +649,7 @@ TRACE_EVENT(cxl_dram,
memcpy(__entry->cor_mask, &rec->correction_mask,
CXL_EVENT_DER_CORRECTION_MASK_SIZE);
__entry->hpa = hpa;
+ __entry->hpa_alias0 = hpa_alias0;
if (cxlr) {
__assign_str(region_name);
uuid_copy(&__entry->region_uuid, &cxlr->params.uuid);
@@ -468,30 +657,50 @@ TRACE_EVENT(cxl_dram,
__assign_str(region_name);
uuid_copy(&__entry->region_uuid, &uuid_null);
}
+ memcpy(__entry->comp_id, &rec->component_id,
+ CXL_EVENT_GEN_MED_COMP_ID_SIZE);
+ __entry->sub_channel = rec->sub_channel;
+ __entry->cme_threshold_ev_flags = rec->cme_threshold_ev_flags;
+ if (rec->media_hdr.descriptor & CXL_GMER_EVT_DESC_THRESHOLD_EVENT)
+ __entry->cvme_count = get_unaligned_le24(rec->cvme_count);
+ else
+ __entry->cvme_count = 0;
),
- CXL_EVT_TP_printk("dpa=%llx dpa_flags='%s' descriptor='%s' type='%s' " \
+ CXL_EVT_TP_printk("dpa=%llx dpa_flags='%s' descriptor='%s' type='%s' sub_type='%s' " \
"transaction_type='%s' channel=%u rank=%u nibble_mask=%x " \
"bank_group=%u bank=%u row=%u column=%u cor_mask=%s " \
"validity_flags='%s' " \
- "hpa=%llx region=%s region_uuid=%pUb",
+ "comp_id=%s comp_id_pldm_valid_flags='%s' " \
+ "pldm_entity_id=%s pldm_resource_id=%s " \
+ "hpa=%llx hpa_alias0=%llx region=%s region_uuid=%pUb " \
+ "sub_channel=%u cme_threshold_ev_flags='%s' cvme_count=%u",
__entry->dpa, show_dpa_flags(__entry->dpa_flags),
show_event_desc_flags(__entry->descriptor),
- show_mem_event_type(__entry->type),
+ show_dram_mem_event_type(__entry->type),
+ show_mem_event_sub_type(__entry->sub_type),
show_trans_type(__entry->transaction_type),
__entry->channel, __entry->rank, __entry->nibble_mask,
__entry->bank_group, __entry->bank,
__entry->row, __entry->column,
__print_hex(__entry->cor_mask, CXL_EVENT_DER_CORRECTION_MASK_SIZE),
show_dram_valid_flags(__entry->validity_flags),
- __entry->hpa, __get_str(region_name), &__entry->region_uuid
+ __print_hex(__entry->comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE),
+ show_comp_id_pldm_flags(__entry->comp_id[0]),
+ show_pldm_entity_id(__entry->validity_flags, CXL_DER_VALID_COMPONENT,
+ CXL_DER_VALID_COMPONENT_ID_FORMAT, __entry->comp_id),
+ show_pldm_resource_id(__entry->validity_flags, CXL_DER_VALID_COMPONENT,
+ CXL_DER_VALID_COMPONENT_ID_FORMAT, __entry->comp_id),
+ __entry->hpa, __entry->hpa_alias0, __get_str(region_name), &__entry->region_uuid,
+ __entry->sub_channel, show_cme_threshold_ev_flags(__entry->cme_threshold_ev_flags),
+ __entry->cvme_count
)
);
/*
* Memory Module Event Record - MMER
*
- * CXL res 3.0 section 8.2.9.2.1.3; Table 8-45
+ * CXL res 3.1 section 8.2.9.2.1.3; Table 8-47
*/
#define CXL_MMER_HEALTH_STATUS_CHANGE 0x00
#define CXL_MMER_MEDIA_STATUS_CHANGE 0x01
@@ -499,27 +708,35 @@ TRACE_EVENT(cxl_dram,
#define CXL_MMER_TEMP_CHANGE 0x03
#define CXL_MMER_DATA_PATH_ERROR 0x04
#define CXL_MMER_LSA_ERROR 0x05
+#define CXL_MMER_UNRECOV_SIDEBAND_BUS_ERROR 0x06
+#define CXL_MMER_MEMORY_MEDIA_FRU_ERROR 0x07
+#define CXL_MMER_POWER_MANAGEMENT_FAULT 0x08
#define show_dev_evt_type(type) __print_symbolic(type, \
{ CXL_MMER_HEALTH_STATUS_CHANGE, "Health Status Change" }, \
{ CXL_MMER_MEDIA_STATUS_CHANGE, "Media Status Change" }, \
{ CXL_MMER_LIFE_USED_CHANGE, "Life Used Change" }, \
{ CXL_MMER_TEMP_CHANGE, "Temperature Change" }, \
{ CXL_MMER_DATA_PATH_ERROR, "Data Path Error" }, \
- { CXL_MMER_LSA_ERROR, "LSA Error" } \
+ { CXL_MMER_LSA_ERROR, "LSA Error" }, \
+ { CXL_MMER_UNRECOV_SIDEBAND_BUS_ERROR, "Unrecoverable Internal Sideband Bus Error" }, \
+ { CXL_MMER_MEMORY_MEDIA_FRU_ERROR, "Memory Media FRU Error" }, \
+ { CXL_MMER_POWER_MANAGEMENT_FAULT, "Power Management Fault" } \
)
/*
* Device Health Information - DHI
*
- * CXL res 3.0 section 8.2.9.8.3.1; Table 8-100
+ * CXL res 3.1 section 8.2.9.9.3.1; Table 8-133
*/
#define CXL_DHI_HS_MAINTENANCE_NEEDED BIT(0)
#define CXL_DHI_HS_PERFORMANCE_DEGRADED BIT(1)
#define CXL_DHI_HS_HW_REPLACEMENT_NEEDED BIT(2)
+#define CXL_DHI_HS_MEM_CAPACITY_DEGRADED BIT(3)
#define show_health_status_flags(flags) __print_flags(flags, "|", \
{ CXL_DHI_HS_MAINTENANCE_NEEDED, "MAINTENANCE_NEEDED" }, \
{ CXL_DHI_HS_PERFORMANCE_DEGRADED, "PERFORMANCE_DEGRADED" }, \
- { CXL_DHI_HS_HW_REPLACEMENT_NEEDED, "REPLACEMENT_NEEDED" } \
+ { CXL_DHI_HS_HW_REPLACEMENT_NEEDED, "REPLACEMENT_NEEDED" }, \
+ { CXL_DHI_HS_MEM_CAPACITY_DEGRADED, "MEM_CAPACITY_DEGRADED" } \
)
#define CXL_DHI_MS_NORMAL 0x00
@@ -573,6 +790,26 @@ TRACE_EVENT(cxl_dram,
#define CXL_DHI_AS_COR_VOL_ERR_CNT(as) ((as & 0x10) >> 4)
#define CXL_DHI_AS_COR_PER_ERR_CNT(as) ((as & 0x20) >> 5)
+#define CXL_MMER_VALID_COMPONENT BIT(0)
+#define CXL_MMER_VALID_COMPONENT_ID_FORMAT BIT(1)
+#define show_mem_module_valid_flags(flags) __print_flags(flags, "|", \
+ { CXL_MMER_VALID_COMPONENT, "COMPONENT" }, \
+ { CXL_MMER_VALID_COMPONENT_ID_FORMAT, "COMPONENT PLDM FORMAT" } \
+)
+#define CXL_MMER_DEV_EVT_SUB_TYPE_NOT_REPORTED 0x00
+#define CXL_MMER_DEV_EVT_SUB_TYPE_INVALID_CONFIG_DATA 0x01
+#define CXL_MMER_DEV_EVT_SUB_TYPE_UNSUPP_CONFIG_DATA 0x02
+#define CXL_MMER_DEV_EVT_SUB_TYPE_UNSUPP_MEM_MEDIA_FRU 0x03
+#define show_dev_event_sub_type(sub_type) __print_symbolic(sub_type, \
+ { CXL_MMER_DEV_EVT_SUB_TYPE_NOT_REPORTED, "Not Reported" }, \
+ { CXL_MMER_DEV_EVT_SUB_TYPE_INVALID_CONFIG_DATA, "Invalid Config Data" }, \
+ { CXL_MMER_DEV_EVT_SUB_TYPE_UNSUPP_CONFIG_DATA, "Unsupported Config Data" }, \
+ { \
+ CXL_MMER_DEV_EVT_SUB_TYPE_UNSUPP_MEM_MEDIA_FRU, \
+ "Unsupported Memory Media FRU" \
+ } \
+)
+
TRACE_EVENT(cxl_memory_module,
TP_PROTO(const struct cxl_memdev *cxlmd, enum cxl_event_log_type log,
@@ -595,6 +832,9 @@ TRACE_EVENT(cxl_memory_module,
__field(u32, cor_per_err_cnt)
__field(s16, device_temp)
__field(u8, add_status)
+ __field(u8, event_sub_type)
+ __array(u8, comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE)
+ __field(u16, validity_flags)
),
TP_fast_assign(
@@ -603,6 +843,7 @@ TRACE_EVENT(cxl_memory_module,
/* Memory Module Event */
__entry->event_type = rec->event_type;
+ __entry->event_sub_type = rec->event_sub_type;
/* Device Health Info */
__entry->health_status = rec->info.health_status;
@@ -613,13 +854,20 @@ TRACE_EVENT(cxl_memory_module,
__entry->cor_per_err_cnt = get_unaligned_le32(rec->info.cor_per_err_cnt);
__entry->device_temp = get_unaligned_le16(rec->info.device_temp);
__entry->add_status = rec->info.add_status;
+ __entry->validity_flags = get_unaligned_le16(rec->validity_flags);
+ memcpy(__entry->comp_id, &rec->component_id,
+ CXL_EVENT_GEN_MED_COMP_ID_SIZE);
),
- CXL_EVT_TP_printk("event_type='%s' health_status='%s' media_status='%s' " \
- "as_life_used=%s as_dev_temp=%s as_cor_vol_err_cnt=%s " \
+ CXL_EVT_TP_printk("event_type='%s' event_sub_type='%s' health_status='%s' " \
+ "media_status='%s' as_life_used=%s as_dev_temp=%s as_cor_vol_err_cnt=%s " \
"as_cor_per_err_cnt=%s life_used=%u device_temp=%d " \
- "dirty_shutdown_cnt=%u cor_vol_err_cnt=%u cor_per_err_cnt=%u",
+ "dirty_shutdown_cnt=%u cor_vol_err_cnt=%u cor_per_err_cnt=%u " \
+ "validity_flags='%s' " \
+ "comp_id=%s comp_id_pldm_valid_flags='%s' " \
+ "pldm_entity_id=%s pldm_resource_id=%s",
show_dev_evt_type(__entry->event_type),
+ show_dev_event_sub_type(__entry->event_sub_type),
show_health_status_flags(__entry->health_status),
show_media_status(__entry->media_status),
show_two_bit_status(CXL_DHI_AS_LIFE_USED(__entry->add_status)),
@@ -628,7 +876,119 @@ TRACE_EVENT(cxl_memory_module,
show_one_bit_status(CXL_DHI_AS_COR_PER_ERR_CNT(__entry->add_status)),
__entry->life_used, __entry->device_temp,
__entry->dirty_shutdown_cnt, __entry->cor_vol_err_cnt,
- __entry->cor_per_err_cnt
+ __entry->cor_per_err_cnt,
+ show_mem_module_valid_flags(__entry->validity_flags),
+ __print_hex(__entry->comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE),
+ show_comp_id_pldm_flags(__entry->comp_id[0]),
+ show_pldm_entity_id(__entry->validity_flags, CXL_MMER_VALID_COMPONENT,
+ CXL_MMER_VALID_COMPONENT_ID_FORMAT, __entry->comp_id),
+ show_pldm_resource_id(__entry->validity_flags, CXL_MMER_VALID_COMPONENT,
+ CXL_MMER_VALID_COMPONENT_ID_FORMAT, __entry->comp_id)
+ )
+);
+
+/*
+ * Memory Sparing Event Record - MSER
+ *
+ * CXL rev 3.2 section 8.2.10.2.1.4; Table 8-60
+ */
+#define CXL_MSER_QUERY_RESOURCE_FLAG BIT(0)
+#define CXL_MSER_HARD_SPARING_FLAG BIT(1)
+#define CXL_MSER_DEV_INITED_FLAG BIT(2)
+#define show_mem_sparing_flags(flags) __print_flags(flags, "|", \
+ { CXL_MSER_QUERY_RESOURCE_FLAG, "Query Resources" }, \
+ { CXL_MSER_HARD_SPARING_FLAG, "Hard Sparing" }, \
+ { CXL_MSER_DEV_INITED_FLAG, "Device Initiated Sparing" } \
+)
+
+#define CXL_MSER_VALID_CHANNEL BIT(0)
+#define CXL_MSER_VALID_RANK BIT(1)
+#define CXL_MSER_VALID_NIBBLE BIT(2)
+#define CXL_MSER_VALID_BANK_GROUP BIT(3)
+#define CXL_MSER_VALID_BANK BIT(4)
+#define CXL_MSER_VALID_ROW BIT(5)
+#define CXL_MSER_VALID_COLUMN BIT(6)
+#define CXL_MSER_VALID_COMPONENT_ID BIT(7)
+#define CXL_MSER_VALID_COMPONENT_ID_FORMAT BIT(8)
+#define CXL_MSER_VALID_SUB_CHANNEL BIT(9)
+#define show_mem_sparing_valid_flags(flags) __print_flags(flags, "|", \
+ { CXL_MSER_VALID_CHANNEL, "CHANNEL" }, \
+ { CXL_MSER_VALID_RANK, "RANK" }, \
+ { CXL_MSER_VALID_NIBBLE, "NIBBLE" }, \
+ { CXL_MSER_VALID_BANK_GROUP, "BANK GROUP" }, \
+ { CXL_MSER_VALID_BANK, "BANK" }, \
+ { CXL_MSER_VALID_ROW, "ROW" }, \
+ { CXL_MSER_VALID_COLUMN, "COLUMN" }, \
+ { CXL_MSER_VALID_COMPONENT_ID, "COMPONENT ID" }, \
+ { CXL_MSER_VALID_COMPONENT_ID_FORMAT, "COMPONENT ID PLDM FORMAT" }, \
+ { CXL_MSER_VALID_SUB_CHANNEL, "SUB CHANNEL" } \
+)
+
+TRACE_EVENT(cxl_memory_sparing,
+
+ TP_PROTO(const struct cxl_memdev *cxlmd, enum cxl_event_log_type log,
+ struct cxl_event_mem_sparing *rec),
+
+ TP_ARGS(cxlmd, log, rec),
+
+ TP_STRUCT__entry(
+ CXL_EVT_TP_entry
+
+ /* Memory Sparing Event */
+ __field(u8, flags)
+ __field(u8, result)
+ __field(u16, validity_flags)
+ __field(u16, res_avail)
+ __field(u8, channel)
+ __field(u8, rank)
+ __field(u32, nibble_mask)
+ __field(u8, bank_group)
+ __field(u8, bank)
+ __field(u32, row)
+ __field(u16, column)
+ __field(u8, sub_channel)
+ __array(u8, comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE)
+ ),
+
+ TP_fast_assign(
+ CXL_EVT_TP_fast_assign(cxlmd, log, rec->hdr);
+ __entry->hdr_uuid = CXL_EVENT_MEM_SPARING_UUID;
+
+ /* Memory Sparing Event */
+ __entry->flags = rec->flags;
+ __entry->result = rec->result;
+ __entry->validity_flags = le16_to_cpu(rec->validity_flags);
+ __entry->res_avail = le16_to_cpu(rec->res_avail);
+ __entry->channel = rec->channel;
+ __entry->rank = rec->rank;
+ __entry->nibble_mask = get_unaligned_le24(rec->nibble_mask);
+ __entry->bank_group = rec->bank_group;
+ __entry->bank = rec->bank;
+ __entry->row = get_unaligned_le24(rec->row);
+ __entry->column = le16_to_cpu(rec->column);
+ __entry->sub_channel = rec->sub_channel;
+ memcpy(__entry->comp_id, &rec->component_id,
+ CXL_EVENT_GEN_MED_COMP_ID_SIZE);
+ ),
+
+ CXL_EVT_TP_printk("flags='%s' result=%u validity_flags='%s' " \
+ "spare resource avail=%u channel=%u rank=%u " \
+ "nibble_mask=%x bank_group=%u bank=%u " \
+ "row=%u column=%u sub_channel=%u " \
+ "comp_id=%s comp_id_pldm_valid_flags='%s' " \
+ "pldm_entity_id=%s pldm_resource_id=%s",
+ show_mem_sparing_flags(__entry->flags),
+ __entry->result,
+ show_mem_sparing_valid_flags(__entry->validity_flags),
+ __entry->res_avail, __entry->channel, __entry->rank,
+ __entry->nibble_mask, __entry->bank_group, __entry->bank,
+ __entry->row, __entry->column, __entry->sub_channel,
+ __print_hex(__entry->comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE),
+ show_comp_id_pldm_flags(__entry->comp_id[0]),
+ show_pldm_entity_id(__entry->validity_flags, CXL_MSER_VALID_COMPONENT_ID,
+ CXL_MSER_VALID_COMPONENT_ID_FORMAT, __entry->comp_id),
+ show_pldm_resource_id(__entry->validity_flags, CXL_MSER_VALID_COMPONENT_ID,
+ CXL_MSER_VALID_COMPONENT_ID_FORMAT, __entry->comp_id)
)
);
@@ -684,6 +1044,7 @@ TRACE_EVENT(cxl_poison,
__string(region, cxlr ? dev_name(&cxlr->dev) : "")
__field(u64, overflow_ts)
__field(u64, hpa)
+ __field(u64, hpa_alias0)
__field(u64, dpa)
__field(u32, dpa_length)
__array(char, uuid, 16)
@@ -706,16 +1067,22 @@ TRACE_EVENT(cxl_poison,
memcpy(__entry->uuid, &cxlr->params.uuid, 16);
__entry->hpa = cxl_dpa_to_hpa(cxlr, cxlmd,
__entry->dpa);
+ if (__entry->hpa != ULLONG_MAX && cxlr->params.cache_size)
+ __entry->hpa_alias0 = __entry->hpa +
+ cxlr->params.cache_size;
+ else
+ __entry->hpa_alias0 = ULLONG_MAX;
} else {
__assign_str(region);
memset(__entry->uuid, 0, 16);
__entry->hpa = ULLONG_MAX;
+ __entry->hpa_alias0 = ULLONG_MAX;
}
),
TP_printk("memdev=%s host=%s serial=%lld trace_type=%s region=%s " \
- "region_uuid=%pU hpa=0x%llx dpa=0x%llx dpa_length=0x%x " \
- "source=%s flags=%s overflow_time=%llu",
+ "region_uuid=%pU hpa=0x%llx hpa_alias0=0x%llx dpa=0x%llx " \
+ "dpa_length=0x%x source=%s flags=%s overflow_time=%llu",
__get_str(memdev),
__get_str(host),
__entry->serial,
@@ -723,6 +1090,7 @@ TRACE_EVENT(cxl_poison,
__get_str(region),
__entry->uuid,
__entry->hpa,
+ __entry->hpa_alias0,
__entry->dpa,
__entry->dpa_length,
show_poison_source(__entry->source),
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index 0d8b810a51f0..231ddccf8977 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -11,6 +11,7 @@
#include <linux/log2.h>
#include <linux/node.h>
#include <linux/io.h>
+#include <linux/range.h>
extern const struct nvdimm_security_ops *cxl_security_ops;
@@ -235,6 +236,14 @@ struct cxl_regs {
struct_group_tagged(cxl_rch_regs, rch_regs,
void __iomem *dport_aer;
);
+
+ /*
+ * RCD upstream port specific PCIe cap register
+ * @pcie_cap: CXL 3.0 8.2.1.2 RCD Upstream Port RCRB
+ */
+ struct_group_tagged(cxl_rcd_regs, rcd_regs,
+ void __iomem *rcd_pcie_cap;
+ );
};
struct cxl_reg_map {
@@ -294,16 +303,18 @@ int cxl_map_device_regs(const struct cxl_register_map *map,
struct cxl_device_regs *regs);
int cxl_map_pmu_regs(struct cxl_register_map *map, struct cxl_pmu_regs *regs);
+#define CXL_INSTANCES_COUNT -1
enum cxl_regloc_type;
int cxl_count_regblock(struct pci_dev *pdev, enum cxl_regloc_type type);
int cxl_find_regblock_instance(struct pci_dev *pdev, enum cxl_regloc_type type,
- struct cxl_register_map *map, int index);
+ struct cxl_register_map *map, unsigned int index);
int cxl_find_regblock(struct pci_dev *pdev, enum cxl_regloc_type type,
struct cxl_register_map *map);
int cxl_setup_regs(struct cxl_register_map *map);
struct cxl_dport;
resource_size_t cxl_rcd_component_reg_phys(struct device *dev,
struct cxl_dport *dport);
+int cxl_dport_map_rcd_linkcap(struct pci_dev *pdev, struct cxl_dport *dport);
#define CXL_RESOURCE_NONE ((resource_size_t) -1)
#define CXL_TARGET_STRLEN 20
@@ -346,6 +357,9 @@ enum cxl_decoder_type {
* @target_type: accelerator vs expander (type2 vs type3) selector
* @region: currently assigned region for this decoder
* @flags: memory type capabilities and locking
+ * @target_map: cached copy of hardware port-id list, available at init
+ * before all @dport objects have been instantiated. While
+ * dport id is 8bit, CFMWS interleave targets are 32bits.
* @commit: device/decoder-type specific callback to commit settings to hw
* @reset: device/decoder-type specific callback to reset hw settings
*/
@@ -358,36 +372,11 @@ struct cxl_decoder {
enum cxl_decoder_type target_type;
struct cxl_region *region;
unsigned long flags;
+ u32 target_map[CXL_DECODER_MAX_INTERLEAVE];
int (*commit)(struct cxl_decoder *cxld);
- int (*reset)(struct cxl_decoder *cxld);
-};
-
-/*
- * CXL_DECODER_DEAD prevents endpoints from being reattached to regions
- * while cxld_unregister() is running
- */
-enum cxl_decoder_mode {
- CXL_DECODER_NONE,
- CXL_DECODER_RAM,
- CXL_DECODER_PMEM,
- CXL_DECODER_MIXED,
- CXL_DECODER_DEAD,
+ void (*reset)(struct cxl_decoder *cxld);
};
-static inline const char *cxl_decoder_mode_name(enum cxl_decoder_mode mode)
-{
- static const char * const names[] = {
- [CXL_DECODER_NONE] = "none",
- [CXL_DECODER_RAM] = "ram",
- [CXL_DECODER_PMEM] = "pmem",
- [CXL_DECODER_MIXED] = "mixed",
- };
-
- if (mode >= CXL_DECODER_NONE && mode <= CXL_DECODER_MIXED)
- return names[mode];
- return "mixed";
-}
-
/*
* Track whether this decoder is reserved for region autodiscovery, or
* free for userspace provisioning.
@@ -402,16 +391,16 @@ enum cxl_decoder_state {
* @cxld: base cxl_decoder_object
* @dpa_res: actively claimed DPA span of this decoder
* @skip: offset into @dpa_res where @cxld.hpa_range maps
- * @mode: which memory type / access-mode-partition this decoder targets
* @state: autodiscovery state
+ * @part: partition index this decoder maps
* @pos: interleave position in @cxld.region
*/
struct cxl_endpoint_decoder {
struct cxl_decoder cxld;
struct resource *dpa_res;
resource_size_t skip;
- enum cxl_decoder_mode mode;
enum cxl_decoder_state state;
+ int part;
int pos;
};
@@ -434,25 +423,35 @@ struct cxl_switch_decoder {
};
struct cxl_root_decoder;
-typedef u64 (*cxl_hpa_to_spa_fn)(struct cxl_root_decoder *cxlrd, u64 hpa);
+/**
+ * struct cxl_rd_ops - CXL root decoder callback operations
+ * @hpa_to_spa: Convert host physical address to system physical address
+ * @spa_to_hpa: Convert system physical address to host physical address
+ */
+struct cxl_rd_ops {
+ u64 (*hpa_to_spa)(struct cxl_root_decoder *cxlrd, u64 hpa);
+ u64 (*spa_to_hpa)(struct cxl_root_decoder *cxlrd, u64 spa);
+};
/**
* struct cxl_root_decoder - Static platform CXL address decoder
* @res: host / parent resource for region allocations
+ * @cache_size: extended linear cache size if exists, otherwise zero.
* @region_id: region id for next region provisioning event
- * @hpa_to_spa: translate CXL host-physical-address to Platform system-physical-address
* @platform_data: platform specific configuration data
* @range_lock: sync region autodiscovery by address range
* @qos_class: QoS performance class cookie
+ * @ops: CXL root decoder operations
* @cxlsd: base cxl switch decoder
*/
struct cxl_root_decoder {
struct resource *res;
+ resource_size_t cache_size;
atomic_t region_id;
- cxl_hpa_to_spa_fn hpa_to_spa;
void *platform_data;
struct mutex range_lock;
int qos_class;
+ struct cxl_rd_ops *ops;
struct cxl_switch_decoder cxlsd;
};
@@ -483,8 +482,9 @@ enum cxl_config_state {
* @res: allocated iomem capacity for this region
* @targets: active ordered targets in current decoder configuration
* @nr_targets: number of targets
+ * @cache_size: extended linear cache size if exists, otherwise zero.
*
- * State transitions are protected by the cxl_region_rwsem
+ * State transitions are protected by cxl_rwsem.region
*/
struct cxl_region_params {
enum cxl_config_state state;
@@ -494,6 +494,12 @@ struct cxl_region_params {
struct resource *res;
struct cxl_endpoint_decoder *targets[CXL_DECODER_MAX_INTERLEAVE];
int nr_targets;
+ resource_size_t cache_size;
+};
+
+enum cxl_partition_mode {
+ CXL_PARTMODE_RAM,
+ CXL_PARTMODE_PMEM,
};
/*
@@ -515,27 +521,27 @@ struct cxl_region_params {
* struct cxl_region - CXL region
* @dev: This region's device
* @id: This region's id. Id is globally unique across all regions
- * @mode: Endpoint decoder allocation / access mode
+ * @mode: Operational mode of the mapped capacity
* @type: Endpoint decoder target type
* @cxl_nvb: nvdimm bridge for coordinating @cxlr_pmem setup / shutdown
* @cxlr_pmem: (for pmem regions) cached copy of the nvdimm bridge
* @flags: Region state flags
* @params: active + config params for the region
* @coord: QoS access coordinates for the region
- * @memory_notifier: notifier for setting the access coordinates to node
+ * @node_notifier: notifier for setting the access coordinates to node
* @adist_notifier: notifier for calculating the abstract distance of node
*/
struct cxl_region {
struct device dev;
int id;
- enum cxl_decoder_mode mode;
+ enum cxl_partition_mode mode;
enum cxl_decoder_type type;
struct cxl_nvdimm_bridge *cxl_nvb;
struct cxl_pmem_region *cxlr_pmem;
unsigned long flags;
struct cxl_region_params params;
struct access_coordinate coord[ACCESS_COORDINATE_MAX];
- struct notifier_block memory_notifier;
+ struct notifier_block node_notifier;
struct notifier_block adist_notifier;
};
@@ -553,6 +559,7 @@ struct cxl_nvdimm {
struct device dev;
struct cxl_memdev *cxlmd;
u8 dev_id[CXL_DEV_ID_LEN]; /* for nvdimm, string of 'serial' */
+ u64 dirty_shutdowns;
};
struct cxl_pmem_region_mapping {
@@ -600,6 +607,7 @@ struct cxl_dax_region {
* @cdat: Cached CDAT data
* @cdat_available: Should a CDAT attribute be available in sysfs
* @pci_latency: Upstream latency in picoseconds
+ * @component_reg_phys: Physical address of component register
*/
struct cxl_port {
struct device dev;
@@ -623,6 +631,7 @@ struct cxl_port {
} cdat;
bool cdat_available;
long pci_latency;
+ resource_size_t component_reg_phys;
};
/**
@@ -670,6 +679,7 @@ struct cxl_rcrb_info {
* @regs: Dport parsed register blocks
* @coord: access coordinates (bandwidth and latency performance attributes)
* @link_latency: calculated PCIe downstream latency
+ * @gpf_dvsec: Cached GPF port DVSEC
*/
struct cxl_dport {
struct device *dport_dev;
@@ -681,6 +691,7 @@ struct cxl_dport {
struct cxl_regs regs;
struct access_coordinate coord[ACCESS_COORDINATE_MAX];
long link_latency;
+ int gpf_dvsec;
};
/**
@@ -730,6 +741,8 @@ static inline bool is_cxl_root(struct cxl_port *port)
int cxl_num_decoders_committed(struct cxl_port *port);
bool is_cxl_port(const struct device *dev);
struct cxl_port *to_cxl_port(const struct device *dev);
+struct cxl_port *parent_port_of(struct cxl_port *port);
+void cxl_port_commit_reap(struct cxl_decoder *cxld);
struct pci_bus;
int devm_cxl_register_pci_bus(struct device *host, struct device *uport_dev,
struct pci_bus *bus);
@@ -741,10 +754,12 @@ struct cxl_port *devm_cxl_add_port(struct device *host,
struct cxl_root *devm_cxl_add_root(struct device *host,
const struct cxl_root_ops *ops);
struct cxl_root *find_cxl_root(struct cxl_port *port);
-void put_cxl_root(struct cxl_root *cxl_root);
-DEFINE_FREE(put_cxl_root, struct cxl_root *, if (_T) put_cxl_root(_T))
+DEFINE_FREE(put_cxl_root, struct cxl_root *, if (_T) put_device(&_T->port.dev))
DEFINE_FREE(put_cxl_port, struct cxl_port *, if (!IS_ERR_OR_NULL(_T)) put_device(&_T->dev))
+DEFINE_FREE(put_cxl_root_decoder, struct cxl_root_decoder *, if (!IS_ERR_OR_NULL(_T)) put_device(&_T->cxlsd.cxld.dev))
+DEFINE_FREE(put_cxl_region, struct cxl_region *, if (!IS_ERR_OR_NULL(_T)) put_device(&_T->dev))
+
int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd);
void cxl_bus_rescan(void);
void cxl_bus_drain(void);
@@ -780,9 +795,9 @@ struct cxl_root_decoder *cxl_root_decoder_alloc(struct cxl_port *port,
unsigned int nr_targets);
struct cxl_switch_decoder *cxl_switch_decoder_alloc(struct cxl_port *port,
unsigned int nr_targets);
-int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map);
+int cxl_decoder_add(struct cxl_decoder *cxld);
struct cxl_endpoint_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port);
-int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map);
+int cxl_decoder_add_locked(struct cxl_decoder *cxld);
int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld);
static inline int cxl_root_decoder_autoremove(struct device *host,
struct cxl_root_decoder *cxlrd)
@@ -805,18 +820,17 @@ struct cxl_endpoint_dvsec_info {
struct range dvsec_range[2];
};
-struct cxl_hdm;
-struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
- struct cxl_endpoint_dvsec_info *info);
-int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
- struct cxl_endpoint_dvsec_info *info);
-int devm_cxl_add_passthrough_decoder(struct cxl_port *port);
-int cxl_dvsec_rr_decode(struct device *dev, struct cxl_port *port,
+int devm_cxl_switch_port_decoders_setup(struct cxl_port *port);
+int __devm_cxl_switch_port_decoders_setup(struct cxl_port *port);
+int devm_cxl_endpoint_decoders_setup(struct cxl_port *port);
+
+struct cxl_dev_state;
+int cxl_dvsec_rr_decode(struct cxl_dev_state *cxlds,
struct cxl_endpoint_dvsec_info *info);
bool is_cxl_region(struct device *dev);
-extern struct bus_type cxl_bus_type;
+extern const struct bus_type cxl_bus_type;
struct cxl_driver {
const char *name;
@@ -854,16 +868,15 @@ struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host,
struct cxl_port *port);
struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev);
bool is_cxl_nvdimm(struct device *dev);
-bool is_cxl_nvdimm_bridge(struct device *dev);
int devm_cxl_add_nvdimm(struct cxl_port *parent_port, struct cxl_memdev *cxlmd);
struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_port *port);
#ifdef CONFIG_CXL_REGION
bool is_cxl_pmem_region(struct device *dev);
struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev);
-int cxl_add_to_region(struct cxl_port *root,
- struct cxl_endpoint_decoder *cxled);
+int cxl_add_to_region(struct cxl_endpoint_decoder *cxled);
struct cxl_dax_region *to_cxl_dax_region(struct device *dev);
+u64 cxl_port_get_spa_cache_alias(struct cxl_port *endpoint, u64 spa);
#else
static inline bool is_cxl_pmem_region(struct device *dev)
{
@@ -873,8 +886,7 @@ static inline struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev)
{
return NULL;
}
-static inline int cxl_add_to_region(struct cxl_port *root,
- struct cxl_endpoint_decoder *cxled)
+static inline int cxl_add_to_region(struct cxl_endpoint_decoder *cxled)
{
return 0;
}
@@ -882,10 +894,15 @@ static inline struct cxl_dax_region *to_cxl_dax_region(struct device *dev)
{
return NULL;
}
+static inline u64 cxl_port_get_spa_cache_alias(struct cxl_port *endpoint,
+ u64 spa)
+{
+ return 0;
+}
#endif
void cxl_endpoint_parse_cdat(struct cxl_port *port);
-void cxl_switch_parse_cdat(struct cxl_port *port);
+void cxl_switch_parse_cdat(struct cxl_dport *dport);
int cxl_endpoint_get_perf_coordinates(struct cxl_port *port,
struct access_coordinate *coord);
@@ -900,6 +917,10 @@ void cxl_coordinates_combine(struct access_coordinate *out,
struct access_coordinate *c2);
bool cxl_endpoint_decoder_reset_detected(struct cxl_port *port);
+struct cxl_dport *devm_cxl_add_dport_by_dev(struct cxl_port *port,
+ struct device *dport_dev);
+struct cxl_dport *__devm_cxl_add_dport_by_dev(struct cxl_port *port,
+ struct device *dport_dev);
/*
* Unit test builds overrides this to __weak, find the 'strong' version
@@ -909,4 +930,22 @@ bool cxl_endpoint_decoder_reset_detected(struct cxl_port *port);
#define __mock static
#endif
+u16 cxl_gpf_get_dvsec(struct device *dev);
+
+/*
+ * Declaration for functions that are mocked by cxl_test that are called by
+ * cxl_core. The respective functions are defined as __foo() and called by
+ * cxl_core as foo(). The macros below ensures that those functions would
+ * exist as foo(). See tools/testing/cxl/cxl_core_exports.c and
+ * tools/testing/cxl/exports.h for setting up the mock functions. The dance
+ * is done to avoid a circular dependency where cxl_core calls a function that
+ * ends up being a mock function and goes to * cxl_test where it calls a
+ * cxl_core function.
+ */
+#ifndef CXL_TEST_ENABLE
+#define DECLARE_TESTABLE(x) __##x
+#define devm_cxl_add_dport_by_dev DECLARE_TESTABLE(devm_cxl_add_dport_by_dev)
+#define devm_cxl_switch_port_decoders_setup DECLARE_TESTABLE(devm_cxl_switch_port_decoders_setup)
+#endif
+
#endif /* __CXL_H__ */
diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
index 2a25d1957ddb..434031a0c1f7 100644
--- a/drivers/cxl/cxlmem.h
+++ b/drivers/cxl/cxlmem.h
@@ -45,6 +45,11 @@
* @endpoint: connection to the CXL port topology for this memory device
* @id: id number of this memdev instance.
* @depth: endpoint port depth
+ * @scrub_cycle: current scrub cycle set for this device
+ * @scrub_region_id: id number of a backed region (if any) for which current scrub cycle set
+ * @err_rec_array: List of xarrarys to store the memdev error records to
+ * check attributes for a memory repair operation are from
+ * current boot.
*/
struct cxl_memdev {
struct device dev;
@@ -56,6 +61,9 @@ struct cxl_memdev {
struct cxl_port *endpoint;
int id;
int depth;
+ u8 scrub_cycle;
+ int scrub_region_id;
+ void *err_rec_array;
};
static inline struct cxl_memdev *to_cxl_memdev(struct device *dev)
@@ -97,6 +105,19 @@ int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
resource_size_t base, resource_size_t len,
resource_size_t skipped);
+#define CXL_NR_PARTITIONS_MAX 2
+
+struct cxl_dpa_info {
+ u64 size;
+ struct cxl_dpa_part_info {
+ struct range range;
+ enum cxl_partition_mode mode;
+ } part[CXL_NR_PARTITIONS_MAX];
+ int nr_partitions;
+};
+
+int cxl_dpa_setup(struct cxl_dev_state *cxlds, const struct cxl_dpa_info *info);
+
static inline struct cxl_ep *cxl_ep_load(struct cxl_port *port,
struct cxl_memdev *cxlmd)
{
@@ -106,42 +127,6 @@ static inline struct cxl_ep *cxl_ep_load(struct cxl_port *port,
return xa_load(&port->endpoints, (unsigned long)&cxlmd->dev);
}
-/**
- * struct cxl_mbox_cmd - A command to be submitted to hardware.
- * @opcode: (input) The command set and command submitted to hardware.
- * @payload_in: (input) Pointer to the input payload.
- * @payload_out: (output) Pointer to the output payload. Must be allocated by
- * the caller.
- * @size_in: (input) Number of bytes to load from @payload_in.
- * @size_out: (input) Max number of bytes loaded into @payload_out.
- * (output) Number of bytes generated by the device. For fixed size
- * outputs commands this is always expected to be deterministic. For
- * variable sized output commands, it tells the exact number of bytes
- * written.
- * @min_out: (input) internal command output payload size validation
- * @poll_count: (input) Number of timeouts to attempt.
- * @poll_interval_ms: (input) Time between mailbox background command polling
- * interval timeouts.
- * @return_code: (output) Error code returned from hardware.
- *
- * This is the primary mechanism used to send commands to the hardware.
- * All the fields except @payload_* correspond exactly to the fields described in
- * Command Register section of the CXL 2.0 8.2.8.4.5. @payload_in and
- * @payload_out are written to, and read from the Command Payload Registers
- * defined in CXL 2.0 8.2.8.4.8.
- */
-struct cxl_mbox_cmd {
- u16 opcode;
- void *payload_in;
- void *payload_out;
- size_t size_in;
- size_t size_out;
- size_t min_out;
- int poll_count;
- int poll_interval_ms;
- u16 return_code;
-};
-
/*
* Per CXL 3.0 Section 8.2.8.4.5.1
*/
@@ -269,7 +254,7 @@ enum security_cmd_enabled_bits {
* @max_errors: Maximum media error records held in device cache
* @enabled_cmds: All poison commands enabled in the CEL
* @list_out: The poison list payload returned by device
- * @lock: Protect reads of the poison list
+ * @mutex: Protect reads of the poison list
*
* Reads of the poison list are synchronized to ensure that a reader
* does not get an incomplete list because their request overlapped
@@ -280,7 +265,7 @@ struct cxl_poison_state {
u32 max_errors;
DECLARE_BITMAP(enabled_cmds, CXL_POISON_ENABLED_MAX);
struct cxl_mbox_poison_out *list_out;
- struct mutex lock; /* Protect reads of poison list */
+ struct mutex mutex; /* Protect reads of poison list */
};
/*
@@ -409,6 +394,18 @@ struct cxl_dpa_perf {
};
/**
+ * struct cxl_dpa_partition - DPA partition descriptor
+ * @res: shortcut to the partition in the DPA resource tree (cxlds->dpa_res)
+ * @perf: performance attributes of the partition from CDAT
+ * @mode: operation mode for the DPA capacity, e.g. ram, pmem, dynamic...
+ */
+struct cxl_dpa_partition {
+ struct resource res;
+ struct cxl_dpa_perf perf;
+ enum cxl_partition_mode mode;
+};
+
+/**
* struct cxl_dev_state - The driver device state
*
* cxl_dev_state represents the CXL driver/device state. It provides an
@@ -423,11 +420,12 @@ struct cxl_dpa_perf {
* @rcd: operating in RCD mode (CXL 3.0 9.11.8 CXL Devices Attached to an RCH)
* @media_ready: Indicate whether the device media is usable
* @dpa_res: Overall DPA resource tree for the device
- * @pmem_res: Active Persistent memory capacity configuration
- * @ram_res: Active Volatile memory capacity configuration
+ * @part: DPA partition array
+ * @nr_partitions: Number of DPA partitions
* @serial: PCIe Device Serial Number
* @type: Generic Memory Class device or Vendor Specific Memory device
* @cxl_mbox: CXL mailbox context
+ * @cxlfs: CXL features context
*/
struct cxl_dev_state {
struct device *dev;
@@ -438,13 +436,28 @@ struct cxl_dev_state {
bool rcd;
bool media_ready;
struct resource dpa_res;
- struct resource pmem_res;
- struct resource ram_res;
+ struct cxl_dpa_partition part[CXL_NR_PARTITIONS_MAX];
+ unsigned int nr_partitions;
u64 serial;
enum cxl_devtype type;
struct cxl_mailbox cxl_mbox;
+#ifdef CONFIG_CXL_FEATURES
+ struct cxl_features_state *cxlfs;
+#endif
};
+static inline resource_size_t cxl_pmem_size(struct cxl_dev_state *cxlds)
+{
+ /*
+ * Static PMEM may be at partition index 0 when there is no static RAM
+ * capacity.
+ */
+ for (int i = 0; i < cxlds->nr_partitions; i++)
+ if (cxlds->part[i].mode == CXL_PARTMODE_PMEM)
+ return resource_size(&cxlds->part[i].res);
+ return 0;
+}
+
static inline struct cxl_dev_state *mbox_to_cxlds(struct cxl_mailbox *cxl_mbox)
{
return dev_get_drvdata(cxl_mbox->host);
@@ -461,22 +474,17 @@ static inline struct cxl_dev_state *mbox_to_cxlds(struct cxl_mailbox *cxl_mbox)
* @lsa_size: Size of Label Storage Area
* (CXL 2.0 8.2.9.5.1.1 Identify Memory Device)
* @firmware_version: Firmware version for the memory device.
- * @enabled_cmds: Hardware commands found enabled in CEL.
- * @exclusive_cmds: Commands that are kernel-internal only
* @total_bytes: sum of all possible capacities
* @volatile_only_bytes: hard volatile capacity
* @persistent_only_bytes: hard persistent capacity
* @partition_align_bytes: alignment size for partition-able capacity
* @active_volatile_bytes: sum of hard + soft volatile
* @active_persistent_bytes: sum of hard + soft persistent
- * @next_volatile_bytes: volatile capacity change pending device reset
- * @next_persistent_bytes: persistent capacity change pending device reset
- * @ram_perf: performance data entry matched to RAM partition
- * @pmem_perf: performance data entry matched to PMEM partition
* @event: event log driver state
* @poison: poison driver state info
* @security: security driver state info
* @fw: firmware upload / activation state
+ * @mce_notifier: MCE notifier
*
* See CXL 3.0 8.2.9.8.2 Capacity Configuration and Label Storage for
* details on capacity parameters.
@@ -485,24 +493,18 @@ struct cxl_memdev_state {
struct cxl_dev_state cxlds;
size_t lsa_size;
char firmware_version[0x10];
- DECLARE_BITMAP(enabled_cmds, CXL_MEM_COMMAND_ID_MAX);
- DECLARE_BITMAP(exclusive_cmds, CXL_MEM_COMMAND_ID_MAX);
u64 total_bytes;
u64 volatile_only_bytes;
u64 persistent_only_bytes;
u64 partition_align_bytes;
u64 active_volatile_bytes;
u64 active_persistent_bytes;
- u64 next_volatile_bytes;
- u64 next_persistent_bytes;
-
- struct cxl_dpa_perf ram_perf;
- struct cxl_dpa_perf pmem_perf;
struct cxl_event_state event;
struct cxl_poison_state poison;
struct cxl_security_state security;
struct cxl_fw_state fw;
+ struct notifier_block mce_notifier;
};
static inline struct cxl_memdev_state *
@@ -530,6 +532,10 @@ enum cxl_opcode {
CXL_MBOX_OP_GET_LOG_CAPS = 0x0402,
CXL_MBOX_OP_CLEAR_LOG = 0x0403,
CXL_MBOX_OP_GET_SUP_LOG_SUBLIST = 0x0405,
+ CXL_MBOX_OP_GET_SUPPORTED_FEATURES = 0x0500,
+ CXL_MBOX_OP_GET_FEATURE = 0x0501,
+ CXL_MBOX_OP_SET_FEATURE = 0x0502,
+ CXL_MBOX_OP_DO_MAINTENANCE = 0x0600,
CXL_MBOX_OP_IDENTIFY = 0x4000,
CXL_MBOX_OP_GET_PARTITION_INFO = 0x4100,
CXL_MBOX_OP_SET_PARTITION_INFO = 0x4101,
@@ -628,6 +634,14 @@ struct cxl_mbox_identify {
0x13, 0xb7, 0x74)
/*
+ * Memory Sparing Event Record UUID
+ * CXL rev 3.2 section 8.2.10.2.1.4: Table 8-60
+ */
+#define CXL_EVENT_MEM_SPARING_UUID \
+ UUID_INIT(0xe71f3a40, 0x2d29, 0x4092, 0x8a, 0x39, 0x4d, 0x1c, 0x96, \
+ 0x6c, 0x7c, 0x65)
+
+/*
* Get Event Records output payload
* CXL rev 3.0 section 8.2.9.2.2; Table 8-50
*/
@@ -693,6 +707,23 @@ struct cxl_mbox_set_partition_info {
#define CXL_SET_PARTITION_IMMEDIATE_FLAG BIT(0)
+/* Get Health Info Output Payload CXL 3.2 Spec 8.2.10.9.3.1 Table 8-148 */
+struct cxl_mbox_get_health_info_out {
+ u8 health_status;
+ u8 media_status;
+ u8 additional_status;
+ u8 life_used;
+ __le16 device_temperature;
+ __le32 dirty_shutdown_cnt;
+ __le32 corrected_volatile_error_cnt;
+ __le32 corrected_persistent_error_cnt;
+} __packed;
+
+/* Set Shutdown State Input Payload CXL 3.2 Spec 8.2.10.9.3.5 Table 8-152 */
+struct cxl_mbox_set_shutdown_state_in {
+ u8 state;
+} __packed;
+
/* Set Timestamp CXL 3.0 Spec 8.2.9.4.2 */
struct cxl_mbox_set_timestamp_in {
__le64 timestamp;
@@ -818,7 +849,7 @@ int cxl_internal_send_cmd(struct cxl_mailbox *cxl_mbox,
int cxl_dev_state_identify(struct cxl_memdev_state *mds);
int cxl_await_media_ready(struct cxl_dev_state *cxlds);
int cxl_enumerate_cmds(struct cxl_memdev_state *mds);
-int cxl_mem_create_range_info(struct cxl_memdev_state *mds);
+int cxl_mem_dpa_fetch(struct cxl_memdev_state *mds, struct cxl_dpa_info *info);
struct cxl_memdev_state *cxl_memdev_state_create(struct device *dev);
void set_exclusive_cxl_commands(struct cxl_memdev_state *mds,
unsigned long *cmds);
@@ -829,6 +860,8 @@ void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
enum cxl_event_log_type type,
enum cxl_event_type event_type,
const uuid_t *uuid, union cxl_event *evt);
+int cxl_get_dirty_count(struct cxl_memdev_state *mds, u32 *count);
+int cxl_arm_dirty_shutdown(struct cxl_memdev_state *mds);
int cxl_set_timestamp(struct cxl_memdev_state *mds);
int cxl_poison_state_init(struct cxl_memdev_state *mds);
int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
@@ -836,6 +869,29 @@ int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
int cxl_trigger_poison_list(struct cxl_memdev *cxlmd);
int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa);
int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa);
+int cxl_inject_poison_locked(struct cxl_memdev *cxlmd, u64 dpa);
+int cxl_clear_poison_locked(struct cxl_memdev *cxlmd, u64 dpa);
+
+#ifdef CONFIG_CXL_EDAC_MEM_FEATURES
+int devm_cxl_memdev_edac_register(struct cxl_memdev *cxlmd);
+int devm_cxl_region_edac_register(struct cxl_region *cxlr);
+int cxl_store_rec_gen_media(struct cxl_memdev *cxlmd, union cxl_event *evt);
+int cxl_store_rec_dram(struct cxl_memdev *cxlmd, union cxl_event *evt);
+void devm_cxl_memdev_edac_release(struct cxl_memdev *cxlmd);
+#else
+static inline int devm_cxl_memdev_edac_register(struct cxl_memdev *cxlmd)
+{ return 0; }
+static inline int devm_cxl_region_edac_register(struct cxl_region *cxlr)
+{ return 0; }
+static inline int cxl_store_rec_gen_media(struct cxl_memdev *cxlmd,
+ union cxl_event *evt)
+{ return 0; }
+static inline int cxl_store_rec_dram(struct cxl_memdev *cxlmd,
+ union cxl_event *evt)
+{ return 0; }
+static inline void devm_cxl_memdev_edac_release(struct cxl_memdev *cxlmd)
+{ return; }
+#endif
#ifdef CONFIG_CXL_SUSPEND
void cxl_mem_active_inc(void);
diff --git a/drivers/cxl/cxlpci.h b/drivers/cxl/cxlpci.h
index 4da07727ab9c..7ae621e618e7 100644
--- a/drivers/cxl/cxlpci.h
+++ b/drivers/cxl/cxlpci.h
@@ -40,6 +40,12 @@
/* CXL 2.0 8.1.6: GPF DVSEC for CXL Port */
#define CXL_DVSEC_PORT_GPF 4
+#define CXL_DVSEC_PORT_GPF_PHASE_1_CONTROL_OFFSET 0x0C
+#define CXL_DVSEC_PORT_GPF_PHASE_1_TMO_BASE_MASK GENMASK(3, 0)
+#define CXL_DVSEC_PORT_GPF_PHASE_1_TMO_SCALE_MASK GENMASK(11, 8)
+#define CXL_DVSEC_PORT_GPF_PHASE_2_CONTROL_OFFSET 0xE
+#define CXL_DVSEC_PORT_GPF_PHASE_2_TMO_BASE_MASK GENMASK(3, 0)
+#define CXL_DVSEC_PORT_GPF_PHASE_2_TMO_SCALE_MASK GENMASK(11, 8)
/* CXL 2.0 8.1.7: GPF DVSEC for CXL Device */
#define CXL_DVSEC_DEVICE_GPF 5
@@ -123,8 +129,6 @@ static inline bool cxl_pci_flit_256(struct pci_dev *pdev)
int devm_cxl_port_enumerate_dports(struct cxl_port *port);
struct cxl_dev_state;
-int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm,
- struct cxl_endpoint_dvsec_info *info);
void read_cdat_data(struct cxl_port *port);
void cxl_cor_error_detected(struct pci_dev *pdev);
pci_ers_result_t cxl_error_detected(struct pci_dev *pdev,
diff --git a/drivers/cxl/mem.c b/drivers/cxl/mem.c
index a9fd5cd5a0d2..6e6777b7bafb 100644
--- a/drivers/cxl/mem.c
+++ b/drivers/cxl/mem.c
@@ -152,7 +152,7 @@ static int cxl_mem_probe(struct device *dev)
return -ENXIO;
}
- if (resource_size(&cxlds->pmem_res) && IS_ENABLED(CONFIG_CXL_PMEM)) {
+ if (cxl_pmem_size(cxlds) && IS_ENABLED(CONFIG_CXL_PMEM)) {
rc = devm_cxl_add_nvdimm(parent_port, cxlmd);
if (rc) {
if (rc == -ENODEV)
@@ -180,6 +180,10 @@ static int cxl_mem_probe(struct device *dev)
return rc;
}
+ rc = devm_cxl_memdev_edac_register(cxlmd);
+ if (rc)
+ dev_dbg(dev, "CXL memdev EDAC registration failed rc=%d\n", rc);
+
/*
* The kernel may be operating out of CXL memory on this device,
* there is no spec defined way to determine whether this device
@@ -252,7 +256,7 @@ module_cxl_driver(cxl_mem_driver);
MODULE_DESCRIPTION("CXL: Memory Expansion");
MODULE_LICENSE("GPL v2");
-MODULE_IMPORT_NS(CXL);
+MODULE_IMPORT_NS("CXL");
MODULE_ALIAS_CXL(CXL_DEVICE_MEMORY_EXPANDER);
/*
* create_endpoint() wants to validate port driver attach immediately after
diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
index 188412d45e0d..bd100ac31672 100644
--- a/drivers/cxl/pci.c
+++ b/drivers/cxl/pci.c
@@ -379,7 +379,7 @@ static int cxl_pci_mbox_send(struct cxl_mailbox *cxl_mbox,
{
int rc;
- mutex_lock_io(&cxl_mbox->mbox_mutex);
+ mutex_lock(&cxl_mbox->mbox_mutex);
rc = __cxl_pci_mbox_send_cmd(cxl_mbox, cmd);
mutex_unlock(&cxl_mbox->mbox_mutex);
@@ -475,9 +475,9 @@ static bool is_cxl_restricted(struct pci_dev *pdev)
}
static int cxl_rcrb_get_comp_regs(struct pci_dev *pdev,
- struct cxl_register_map *map)
+ struct cxl_register_map *map,
+ struct cxl_dport *dport)
{
- struct cxl_dport *dport;
resource_size_t component_reg_phys;
*map = (struct cxl_register_map) {
@@ -513,11 +513,24 @@ static int cxl_pci_setup_regs(struct pci_dev *pdev, enum cxl_regloc_type type,
* is an RCH and try to extract the Component Registers from
* an RCRB.
*/
- if (rc && type == CXL_REGLOC_RBI_COMPONENT && is_cxl_restricted(pdev))
- rc = cxl_rcrb_get_comp_regs(pdev, map);
-
- if (rc)
+ if (rc && type == CXL_REGLOC_RBI_COMPONENT && is_cxl_restricted(pdev)) {
+ struct cxl_dport *dport;
+ struct cxl_port *port __free(put_cxl_port) =
+ cxl_pci_find_port(pdev, &dport);
+ if (!port)
+ return -EPROBE_DEFER;
+
+ rc = cxl_rcrb_get_comp_regs(pdev, map, dport);
+ if (rc)
+ return rc;
+
+ rc = cxl_dport_map_rcd_linkcap(pdev, dport);
+ if (rc)
+ return rc;
+
+ } else if (rc) {
return rc;
+ }
return cxl_setup_regs(map);
}
@@ -764,10 +777,6 @@ static int cxl_event_config(struct pci_host_bridge *host_bridge,
return 0;
}
- rc = cxl_mem_alloc_event_buf(mds);
- if (rc)
- return rc;
-
rc = cxl_event_get_int_policy(mds, &policy);
if (rc)
return rc;
@@ -781,6 +790,10 @@ static int cxl_event_config(struct pci_host_bridge *host_bridge,
return -EBUSY;
}
+ rc = cxl_mem_alloc_event_buf(mds);
+ if (rc)
+ return rc;
+
rc = cxl_event_irqsetup(mds);
if (rc)
return rc;
@@ -807,14 +820,96 @@ static int cxl_pci_type3_init_mailbox(struct cxl_dev_state *cxlds)
return 0;
}
+static ssize_t rcd_pcie_cap_emit(struct device *dev, u16 offset, char *buf, size_t width)
+{
+ struct cxl_dev_state *cxlds = dev_get_drvdata(dev);
+ struct cxl_memdev *cxlmd = cxlds->cxlmd;
+ struct device *root_dev;
+ struct cxl_dport *dport;
+ struct cxl_port *root __free(put_cxl_port) =
+ cxl_mem_find_port(cxlmd, &dport);
+
+ if (!root)
+ return -ENXIO;
+
+ root_dev = root->uport_dev;
+ if (!root_dev)
+ return -ENXIO;
+
+ if (!dport->regs.rcd_pcie_cap)
+ return -ENXIO;
+
+ guard(device)(root_dev);
+ if (!root_dev->driver)
+ return -ENXIO;
+
+ switch (width) {
+ case 2:
+ return sysfs_emit(buf, "%#x\n",
+ readw(dport->regs.rcd_pcie_cap + offset));
+ case 4:
+ return sysfs_emit(buf, "%#x\n",
+ readl(dport->regs.rcd_pcie_cap + offset));
+ default:
+ return -EINVAL;
+ }
+}
+
+static ssize_t rcd_link_cap_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return rcd_pcie_cap_emit(dev, PCI_EXP_LNKCAP, buf, sizeof(u32));
+}
+static DEVICE_ATTR_RO(rcd_link_cap);
+
+static ssize_t rcd_link_ctrl_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return rcd_pcie_cap_emit(dev, PCI_EXP_LNKCTL, buf, sizeof(u16));
+}
+static DEVICE_ATTR_RO(rcd_link_ctrl);
+
+static ssize_t rcd_link_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return rcd_pcie_cap_emit(dev, PCI_EXP_LNKSTA, buf, sizeof(u16));
+}
+static DEVICE_ATTR_RO(rcd_link_status);
+
+static struct attribute *cxl_rcd_attrs[] = {
+ &dev_attr_rcd_link_cap.attr,
+ &dev_attr_rcd_link_ctrl.attr,
+ &dev_attr_rcd_link_status.attr,
+ NULL
+};
+
+static umode_t cxl_rcd_visible(struct kobject *kobj, struct attribute *a, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ if (is_cxl_restricted(pdev))
+ return a->mode;
+
+ return 0;
+}
+
+static struct attribute_group cxl_rcd_group = {
+ .attrs = cxl_rcd_attrs,
+ .is_visible = cxl_rcd_visible,
+};
+__ATTRIBUTE_GROUPS(cxl_rcd);
+
static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct pci_host_bridge *host_bridge = pci_find_host_bridge(pdev->bus);
+ struct cxl_dpa_info range_info = { 0 };
struct cxl_memdev_state *mds;
struct cxl_dev_state *cxlds;
struct cxl_register_map map;
struct cxl_memdev *cxlmd;
- int i, rc, pmu_count;
+ int rc, pmu_count;
+ unsigned int i;
bool irq_avail;
/*
@@ -899,10 +994,18 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (rc)
return rc;
- rc = cxl_mem_create_range_info(mds);
+ rc = cxl_mem_dpa_fetch(mds, &range_info);
+ if (rc)
+ return rc;
+
+ rc = cxl_dpa_setup(cxlds, &range_info);
if (rc)
return rc;
+ rc = devm_cxl_setup_features(cxlds);
+ if (rc)
+ dev_dbg(&pdev->dev, "No CXL Features discovered\n");
+
cxlmd = devm_cxl_add_memdev(&pdev->dev, cxlds);
if (IS_ERR(cxlmd))
return PTR_ERR(cxlmd);
@@ -915,7 +1018,14 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (rc)
return rc;
+ rc = devm_cxl_setup_fwctl(&pdev->dev, cxlmd);
+ if (rc)
+ dev_dbg(&pdev->dev, "No CXL FWCTL setup\n");
+
pmu_count = cxl_count_regblock(pdev, CXL_REGLOC_RBI_PMU);
+ if (pmu_count < 0)
+ return pmu_count;
+
for (i = 0; i < pmu_count; i++) {
struct cxl_pmu_regs pmu_regs;
@@ -942,8 +1052,7 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (rc)
return rc;
- rc = cxl_pci_ras_unmask(pdev);
- if (rc)
+ if (cxl_pci_ras_unmask(pdev))
dev_dbg(&pdev->dev, "No RAS reporting unmasked\n");
pci_save_state(pdev);
@@ -1016,6 +1125,7 @@ static struct pci_driver cxl_pci_driver = {
.id_table = cxl_mem_pci_tbl,
.probe = cxl_pci_probe,
.err_handler = &cxl_error_handlers,
+ .dev_groups = cxl_rcd_groups,
.driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
@@ -1093,4 +1203,4 @@ module_init(cxl_pci_driver_init);
module_exit(cxl_pci_driver_exit);
MODULE_DESCRIPTION("CXL: PCI manageability");
MODULE_LICENSE("GPL v2");
-MODULE_IMPORT_NS(CXL);
+MODULE_IMPORT_NS("CXL");
diff --git a/drivers/cxl/pmem.c b/drivers/cxl/pmem.c
index d2d43a4fc053..e197883690ef 100644
--- a/drivers/cxl/pmem.c
+++ b/drivers/cxl/pmem.c
@@ -42,15 +42,44 @@ static ssize_t id_show(struct device *dev, struct device_attribute *attr, char *
}
static DEVICE_ATTR_RO(id);
+static ssize_t dirty_shutdown_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvdimm *nvdimm = to_nvdimm(dev);
+ struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
+
+ return sysfs_emit(buf, "%llu\n", cxl_nvd->dirty_shutdowns);
+}
+static DEVICE_ATTR_RO(dirty_shutdown);
+
static struct attribute *cxl_dimm_attributes[] = {
&dev_attr_id.attr,
&dev_attr_provider.attr,
+ &dev_attr_dirty_shutdown.attr,
NULL
};
+#define CXL_INVALID_DIRTY_SHUTDOWN_COUNT ULLONG_MAX
+static umode_t cxl_dimm_visible(struct kobject *kobj,
+ struct attribute *a, int n)
+{
+ if (a == &dev_attr_dirty_shutdown.attr) {
+ struct device *dev = kobj_to_dev(kobj);
+ struct nvdimm *nvdimm = to_nvdimm(dev);
+ struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
+
+ if (cxl_nvd->dirty_shutdowns ==
+ CXL_INVALID_DIRTY_SHUTDOWN_COUNT)
+ return 0;
+ }
+
+ return a->mode;
+}
+
static const struct attribute_group cxl_dimm_attribute_group = {
.name = "cxl",
.attrs = cxl_dimm_attributes,
+ .is_visible = cxl_dimm_visible
};
static const struct attribute_group *cxl_dimm_attribute_groups[] = {
@@ -58,6 +87,38 @@ static const struct attribute_group *cxl_dimm_attribute_groups[] = {
NULL
};
+static void cxl_nvdimm_arm_dirty_shutdown_tracking(struct cxl_nvdimm *cxl_nvd)
+{
+ struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
+ struct device *dev = &cxl_nvd->dev;
+ u32 count;
+
+ /*
+ * Dirty tracking is enabled and exposed to the user, only when:
+ * - dirty shutdown on the device can be set, and,
+ * - the device has a Device GPF DVSEC (albeit unused), and,
+ * - the Get Health Info cmd can retrieve the device's dirty count.
+ */
+ cxl_nvd->dirty_shutdowns = CXL_INVALID_DIRTY_SHUTDOWN_COUNT;
+
+ if (cxl_arm_dirty_shutdown(mds)) {
+ dev_warn(dev, "GPF: could not set dirty shutdown state\n");
+ return;
+ }
+
+ if (!cxl_gpf_get_dvsec(cxlds->dev))
+ return;
+
+ if (cxl_get_dirty_count(mds, &count)) {
+ dev_warn(dev, "GPF: could not retrieve dirty count\n");
+ return;
+ }
+
+ cxl_nvd->dirty_shutdowns = count;
+}
+
static int cxl_nvdimm_probe(struct device *dev)
{
struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev);
@@ -78,6 +139,14 @@ static int cxl_nvdimm_probe(struct device *dev)
set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask);
set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask);
set_bit(ND_CMD_SET_CONFIG_DATA, &cmd_mask);
+
+ /*
+ * Set dirty shutdown now, with the expectation that the device
+ * clear it upon a successful GPF flow. The exception to this
+ * is upon Viral detection, per CXL 3.2 section 12.4.2.
+ */
+ cxl_nvdimm_arm_dirty_shutdown_tracking(cxl_nvd);
+
nvdimm = __nvdimm_create(cxl_nvb->nvdimm_bus, cxl_nvd,
cxl_dimm_attribute_groups, flags,
cmd_mask, 0, NULL, cxl_nvd->dev_id,
@@ -375,6 +444,16 @@ static int cxl_pmem_region_probe(struct device *dev)
goto out_nvd;
}
+ if (cxlds->serial == 0) {
+ /* include missing alongside invalid in this error message. */
+ dev_err(dev, "%s: invalid or missing serial number\n",
+ dev_name(&cxlmd->dev));
+ rc = -ENXIO;
+ goto out_nvd;
+ }
+ info[i].serial = cxlds->serial;
+ info[i].offset = m->start;
+
m->cxl_nvd = cxl_nvd;
mappings[i] = (struct nd_mapping_desc) {
.nvdimm = nvdimm,
@@ -382,8 +461,6 @@ static int cxl_pmem_region_probe(struct device *dev)
.size = m->size,
.position = i,
};
- info[i].offset = m->start;
- info[i].serial = cxlds->serial;
}
ndr_desc.num_mappings = cxlr_pmem->nr_mappings;
ndr_desc.mapping = mappings;
@@ -459,7 +536,7 @@ MODULE_DESCRIPTION("CXL PMEM: Persistent Memory Support");
MODULE_LICENSE("GPL v2");
module_init(cxl_pmem_init);
module_exit(cxl_pmem_exit);
-MODULE_IMPORT_NS(CXL);
+MODULE_IMPORT_NS("CXL");
MODULE_ALIAS_CXL(CXL_DEVICE_NVDIMM_BRIDGE);
MODULE_ALIAS_CXL(CXL_DEVICE_NVDIMM);
MODULE_ALIAS_CXL(CXL_DEVICE_PMEM_REGION);
diff --git a/drivers/cxl/port.c b/drivers/cxl/port.c
index 861dde65768f..51c8f2f84717 100644
--- a/drivers/cxl/port.c
+++ b/drivers/cxl/port.c
@@ -30,7 +30,7 @@ static void schedule_detach(void *cxlmd)
schedule_cxl_memdev_detach(cxlmd);
}
-static int discover_region(struct device *dev, void *root)
+static int discover_region(struct device *dev, void *unused)
{
struct cxl_endpoint_decoder *cxled;
int rc;
@@ -49,7 +49,7 @@ static int discover_region(struct device *dev, void *root)
* Region enumeration is opportunistic, if this add-event fails,
* continue to the next endpoint decoder.
*/
- rc = cxl_add_to_region(root, cxled);
+ rc = cxl_add_to_region(cxled);
if (rc)
dev_dbg(dev, "failed to add to region: %#llx-%#llx\n",
cxled->cxld.hpa_range.start, cxled->cxld.hpa_range.end);
@@ -59,56 +59,20 @@ static int discover_region(struct device *dev, void *root)
static int cxl_switch_port_probe(struct cxl_port *port)
{
- struct cxl_hdm *cxlhdm;
- int rc;
+ /* Reset nr_dports for rebind of driver */
+ port->nr_dports = 0;
/* Cache the data early to ensure is_visible() works */
read_cdat_data(port);
- rc = devm_cxl_port_enumerate_dports(port);
- if (rc < 0)
- return rc;
-
- cxl_switch_parse_cdat(port);
-
- cxlhdm = devm_cxl_setup_hdm(port, NULL);
- if (!IS_ERR(cxlhdm))
- return devm_cxl_enumerate_decoders(cxlhdm, NULL);
-
- if (PTR_ERR(cxlhdm) != -ENODEV) {
- dev_err(&port->dev, "Failed to map HDM decoder capability\n");
- return PTR_ERR(cxlhdm);
- }
-
- if (rc == 1) {
- dev_dbg(&port->dev, "Fallback to passthrough decoder\n");
- return devm_cxl_add_passthrough_decoder(port);
- }
-
- dev_err(&port->dev, "HDM decoder capability not found\n");
- return -ENXIO;
+ return 0;
}
static int cxl_endpoint_port_probe(struct cxl_port *port)
{
- struct cxl_endpoint_dvsec_info info = { .port = port };
struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev);
- struct cxl_dev_state *cxlds = cxlmd->cxlds;
- struct cxl_hdm *cxlhdm;
- struct cxl_port *root;
int rc;
- rc = cxl_dvsec_rr_decode(cxlds->dev, port, &info);
- if (rc < 0)
- return rc;
-
- cxlhdm = devm_cxl_setup_hdm(port, &info);
- if (IS_ERR(cxlhdm)) {
- if (PTR_ERR(cxlhdm) == -ENODEV)
- dev_err(&port->dev, "HDM decoder registers not found\n");
- return PTR_ERR(cxlhdm);
- }
-
/* Cache the data early to ensure is_visible() works */
read_cdat_data(port);
cxl_endpoint_parse_cdat(port);
@@ -118,27 +82,15 @@ static int cxl_endpoint_port_probe(struct cxl_port *port)
if (rc)
return rc;
- rc = cxl_hdm_decode_init(cxlds, cxlhdm, &info);
+ rc = devm_cxl_endpoint_decoders_setup(port);
if (rc)
return rc;
- rc = devm_cxl_enumerate_decoders(cxlhdm, &info);
- if (rc)
- return rc;
-
- /*
- * This can't fail in practice as CXL root exit unregisters all
- * descendant ports and that in turn synchronizes with cxl_port_probe()
- */
- struct cxl_root *cxl_root __free(put_cxl_root) = find_cxl_root(port);
-
- root = &cxl_root->port;
-
/*
* Now that all endpoint decoders are successfully enumerated, try to
* assemble regions from committed decoders
*/
- device_for_each_child(&port->dev, root, discover_region);
+ device_for_each_child(&port->dev, NULL, discover_region);
return 0;
}
@@ -153,7 +105,7 @@ static int cxl_port_probe(struct device *dev)
}
static ssize_t CDAT_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t offset, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -170,10 +122,10 @@ static ssize_t CDAT_read(struct file *filp, struct kobject *kobj,
port->cdat.length);
}
-static BIN_ATTR_ADMIN_RO(CDAT, 0);
+static const BIN_ATTR_ADMIN_RO(CDAT, 0);
static umode_t cxl_port_bin_attr_is_visible(struct kobject *kobj,
- struct bin_attribute *attr, int i)
+ const struct bin_attribute *attr, int i)
{
struct device *dev = kobj_to_dev(kobj);
struct cxl_port *port = to_cxl_port(dev);
@@ -184,12 +136,12 @@ static umode_t cxl_port_bin_attr_is_visible(struct kobject *kobj,
return 0;
}
-static struct bin_attribute *cxl_cdat_bin_attributes[] = {
+static const struct bin_attribute *const cxl_cdat_bin_attributes[] = {
&bin_attr_CDAT,
NULL,
};
-static struct attribute_group cxl_cdat_attribute_group = {
+static const struct attribute_group cxl_cdat_attribute_group = {
.bin_attrs = cxl_cdat_bin_attributes,
.is_bin_visible = cxl_port_bin_attr_is_visible,
};
@@ -208,8 +160,23 @@ static struct cxl_driver cxl_port_driver = {
},
};
-module_cxl_driver(cxl_port_driver);
+static int __init cxl_port_init(void)
+{
+ return cxl_driver_register(&cxl_port_driver);
+}
+/*
+ * Be ready to immediately enable ports emitted by the platform CXL root
+ * (e.g. cxl_acpi) when CONFIG_CXL_PORT=y.
+ */
+subsys_initcall(cxl_port_init);
+
+static void __exit cxl_port_exit(void)
+{
+ cxl_driver_unregister(&cxl_port_driver);
+}
+module_exit(cxl_port_exit);
+
MODULE_DESCRIPTION("CXL: Port enumeration and services");
MODULE_LICENSE("GPL v2");
-MODULE_IMPORT_NS(CXL);
+MODULE_IMPORT_NS("CXL");
MODULE_ALIAS_CXL(CXL_DEVICE_PORT);
diff --git a/drivers/dax/cxl.c b/drivers/dax/cxl.c
index 9b29e732b39a..13cd94d32ff7 100644
--- a/drivers/dax/cxl.c
+++ b/drivers/dax/cxl.c
@@ -46,4 +46,4 @@ MODULE_ALIAS_CXL(CXL_DEVICE_DAX_REGION);
MODULE_DESCRIPTION("CXL DAX: direct access to CXL regions");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Intel Corporation");
-MODULE_IMPORT_NS(CXL);
+MODULE_IMPORT_NS("CXL");
diff --git a/drivers/dax/dax-private.h b/drivers/dax/dax-private.h
index 446617b73aea..0867115aeef2 100644
--- a/drivers/dax/dax-private.h
+++ b/drivers/dax/dax-private.h
@@ -40,6 +40,12 @@ struct dax_region {
struct device *youngest;
};
+/**
+ * struct dax_mapping - device to display mapping range attributes
+ * @dev: device representing this range
+ * @range_id: index within dev_dax ranges array
+ * @id: ida of this mapping
+ */
struct dax_mapping {
struct device dev;
int range_id;
@@ -47,6 +53,18 @@ struct dax_mapping {
};
/**
+ * struct dev_dax_range - tuple represenging a range of memory used by dev_dax
+ * @pgoff: page offset
+ * @range: resource-span
+ * @mapping: reference to the dax_mapping for this range
+ */
+struct dev_dax_range {
+ unsigned long pgoff;
+ struct range range;
+ struct dax_mapping *mapping;
+};
+
+/**
* struct dev_dax - instance data for a subdivision of a dax region, and
* data while the device is activated in the driver.
* @region - parent region
@@ -58,7 +76,7 @@ struct dax_mapping {
* @dev - device core
* @pgmap - pgmap for memmap setup / lifetime (driver owned)
* @nr_range: size of @ranges
- * @ranges: resource-span + pgoff tuples for the instance
+ * @ranges: range tuples of memory used
*/
struct dev_dax {
struct dax_region *region;
@@ -72,11 +90,7 @@ struct dev_dax {
struct dev_pagemap *pgmap;
bool memmap_on_memory;
int nr_range;
- struct dev_dax_range {
- unsigned long pgoff;
- struct range range;
- struct dax_mapping *mapping;
- } *ranges;
+ struct dev_dax_range *ranges;
};
/*
diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index 6d74e62bbee0..2bb40a6060af 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -4,7 +4,6 @@
#include <linux/pagemap.h>
#include <linux/module.h>
#include <linux/device.h>
-#include <linux/pfn_t.h>
#include <linux/cdev.h>
#include <linux/slab.h>
#include <linux/dax.h>
@@ -73,7 +72,7 @@ __weak phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff,
return -1;
}
-static void dax_set_mapping(struct vm_fault *vmf, pfn_t pfn,
+static void dax_set_mapping(struct vm_fault *vmf, unsigned long pfn,
unsigned long fault_size)
{
unsigned long i, nr_pages = fault_size / PAGE_SIZE;
@@ -89,14 +88,13 @@ static void dax_set_mapping(struct vm_fault *vmf, pfn_t pfn,
ALIGN_DOWN(vmf->address, fault_size));
for (i = 0; i < nr_pages; i++) {
- struct page *page = pfn_to_page(pfn_t_to_pfn(pfn) + i);
+ struct folio *folio = pfn_folio(pfn + i);
- page = compound_head(page);
- if (page->mapping)
+ if (folio->mapping)
continue;
- page->mapping = filp->f_mapping;
- page->index = pgoff + i;
+ folio->mapping = filp->f_mapping;
+ folio->index = pgoff + i;
}
}
@@ -105,7 +103,7 @@ static vm_fault_t __dev_dax_pte_fault(struct dev_dax *dev_dax,
{
struct device *dev = &dev_dax->dev;
phys_addr_t phys;
- pfn_t pfn;
+ unsigned long pfn;
unsigned int fault_size = PAGE_SIZE;
if (check_vma(dev_dax, vmf->vma, __func__))
@@ -126,11 +124,12 @@ static vm_fault_t __dev_dax_pte_fault(struct dev_dax *dev_dax,
return VM_FAULT_SIGBUS;
}
- pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP);
+ pfn = PHYS_PFN(phys);
dax_set_mapping(vmf, pfn, fault_size);
- return vmf_insert_mixed(vmf->vma, vmf->address, pfn);
+ return vmf_insert_page_mkwrite(vmf, pfn_to_page(pfn),
+ vmf->flags & FAULT_FLAG_WRITE);
}
static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax,
@@ -140,7 +139,7 @@ static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax,
struct device *dev = &dev_dax->dev;
phys_addr_t phys;
pgoff_t pgoff;
- pfn_t pfn;
+ unsigned long pfn;
unsigned int fault_size = PMD_SIZE;
if (check_vma(dev_dax, vmf->vma, __func__))
@@ -169,11 +168,12 @@ static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax,
return VM_FAULT_SIGBUS;
}
- pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP);
+ pfn = PHYS_PFN(phys);
dax_set_mapping(vmf, pfn, fault_size);
- return vmf_insert_pfn_pmd(vmf, pfn, vmf->flags & FAULT_FLAG_WRITE);
+ return vmf_insert_folio_pmd(vmf, page_folio(pfn_to_page(pfn)),
+ vmf->flags & FAULT_FLAG_WRITE);
}
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
@@ -184,7 +184,7 @@ static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
struct device *dev = &dev_dax->dev;
phys_addr_t phys;
pgoff_t pgoff;
- pfn_t pfn;
+ unsigned long pfn;
unsigned int fault_size = PUD_SIZE;
@@ -214,11 +214,12 @@ static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
return VM_FAULT_SIGBUS;
}
- pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP);
+ pfn = PHYS_PFN(phys);
dax_set_mapping(vmf, pfn, fault_size);
- return vmf_insert_pfn_pud(vmf, pfn, vmf->flags & FAULT_FLAG_WRITE);
+ return vmf_insert_folio_pud(vmf, page_folio(pfn_to_page(pfn)),
+ vmf->flags & FAULT_FLAG_WRITE);
}
#else
static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
diff --git a/drivers/dax/hmem/hmem.c b/drivers/dax/hmem/hmem.c
index 5e7c53f18491..c18451a37e4f 100644
--- a/drivers/dax/hmem/hmem.c
+++ b/drivers/dax/hmem/hmem.c
@@ -2,7 +2,6 @@
#include <linux/platform_device.h>
#include <linux/memregion.h>
#include <linux/module.h>
-#include <linux/pfn_t.h>
#include <linux/dax.h>
#include "../bus.h"
diff --git a/drivers/dax/kmem.c b/drivers/dax/kmem.c
index e97d47f42ee2..c036e4d0b610 100644
--- a/drivers/dax/kmem.c
+++ b/drivers/dax/kmem.c
@@ -5,7 +5,6 @@
#include <linux/memory.h>
#include <linux/module.h>
#include <linux/device.h>
-#include <linux/pfn_t.h>
#include <linux/slab.h>
#include <linux/dax.h>
#include <linux/fs.h>
@@ -13,6 +12,7 @@
#include <linux/mman.h>
#include <linux/memory-tiers.h>
#include <linux/memory_hotplug.h>
+#include <linux/string_helpers.h>
#include "dax-private.h"
#include "bus.h"
@@ -68,7 +68,7 @@ static void kmem_put_memory_types(void)
static int dev_dax_kmem_probe(struct dev_dax *dev_dax)
{
struct device *dev = &dev_dax->dev;
- unsigned long total_len = 0;
+ unsigned long total_len = 0, orig_len = 0;
struct dax_kmem_data *data;
struct memory_dev_type *mtype;
int i, rc, mapped = 0;
@@ -97,6 +97,7 @@ static int dev_dax_kmem_probe(struct dev_dax *dev_dax)
for (i = 0; i < dev_dax->nr_range; i++) {
struct range range;
+ orig_len += range_len(&dev_dax->ranges[i].range);
rc = dax_kmem_range(dev_dax, i, &range);
if (rc) {
dev_info(dev, "mapping%d: %#llx-%#llx too small after alignment\n",
@@ -109,6 +110,12 @@ static int dev_dax_kmem_probe(struct dev_dax *dev_dax)
if (!total_len) {
dev_warn(dev, "rejecting DAX region without any memory after alignment\n");
return -EINVAL;
+ } else if (total_len != orig_len) {
+ char buf[16];
+
+ string_get_size(orig_len - total_len, 1, STRING_UNITS_2,
+ buf, sizeof(buf));
+ dev_warn(dev, "DAX region truncated by %s due to alignment\n", buf);
}
init_node_memory_type(numa_node, mtype);
diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c
index c8ebf4e281f2..bee93066a849 100644
--- a/drivers/dax/pmem.c
+++ b/drivers/dax/pmem.c
@@ -2,7 +2,6 @@
/* Copyright(c) 2016 - 2018 Intel Corporation. All rights reserved. */
#include <linux/memremap.h>
#include <linux/module.h>
-#include <linux/pfn_t.h>
#include "../nvdimm/pfn.h"
#include "../nvdimm/nd.h"
#include "bus.h"
diff --git a/drivers/dax/pmem/Makefile b/drivers/dax/pmem/Makefile
deleted file mode 100644
index 191c31f0d4f0..000000000000
--- a/drivers/dax/pmem/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_DEV_DAX_PMEM) += dax_pmem.o
-obj-$(CONFIG_DEV_DAX_PMEM) += dax_pmem_core.o
-
-dax_pmem-y := pmem.o
-dax_pmem_core-y := core.o
-dax_pmem_compat-y := compat.o
diff --git a/drivers/dax/pmem/pmem.c b/drivers/dax/pmem/pmem.c
deleted file mode 100644
index dfe91a2990fe..000000000000
--- a/drivers/dax/pmem/pmem.c
+++ /dev/null
@@ -1,10 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2016 - 2018 Intel Corporation. All rights reserved. */
-#include <linux/percpu-refcount.h>
-#include <linux/memremap.h>
-#include <linux/module.h>
-#include <linux/pfn_t.h>
-#include <linux/nd.h>
-#include "../bus.h"
-
-
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index e16d1d40d773..d7714d8afb0f 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -7,7 +7,6 @@
#include <linux/mount.h>
#include <linux/pseudo_fs.h>
#include <linux/magic.h>
-#include <linux/pfn_t.h>
#include <linux/cdev.h>
#include <linux/slab.h>
#include <linux/uio.h>
@@ -148,7 +147,7 @@ enum dax_device_flags {
* pages accessible at the device relative @pgoff.
*/
long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
- enum dax_access_mode mode, void **kaddr, pfn_t *pfn)
+ enum dax_access_mode mode, void **kaddr, unsigned long *pfn)
{
long avail;
@@ -389,7 +388,7 @@ static const struct super_operations dax_sops = {
.alloc_inode = dax_alloc_inode,
.destroy_inode = dax_destroy_inode,
.free_inode = dax_free_inode,
- .drop_inode = generic_delete_inode,
+ .drop_inode = inode_just_drop,
};
static int dax_init_fs_context(struct fs_context *fc)
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index 3c4862a752b5..c999c4a1e567 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -90,6 +90,17 @@ config ARM_EXYNOS_BUS_DEVFREQ
and adjusts the operating frequencies and voltages with OPP support.
This does not yet operate with optimal voltages.
+config ARM_HISI_UNCORE_DEVFREQ
+ tristate "HiSilicon uncore DEVFREQ Driver"
+ depends on ACPI && ACPI_PPTT && PCC
+ select DEVFREQ_GOV_PERFORMANCE
+ select DEVFREQ_GOV_USERSPACE
+ help
+ This adds a DEVFREQ driver that manages uncore frequency scaling for
+ HiSilicon Kunpeng SoCs. This enables runtime management of uncore
+ frequency scaling from kernel and userspace. The uncore domain
+ contains system interconnects and L3 cache.
+
config ARM_IMX_BUS_DEVFREQ
tristate "i.MX Generic Bus DEVFREQ Driver"
depends on ARCH_MXC || COMPILE_TEST
diff --git a/drivers/devfreq/Makefile b/drivers/devfreq/Makefile
index bf40d04928d0..404179d79a9d 100644
--- a/drivers/devfreq/Makefile
+++ b/drivers/devfreq/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_DEVFREQ_GOV_PASSIVE) += governor_passive.o
# DEVFREQ Drivers
obj-$(CONFIG_ARM_EXYNOS_BUS_DEVFREQ) += exynos-bus.o
+obj-$(CONFIG_ARM_HISI_UNCORE_DEVFREQ) += hisi_uncore_freq.o
obj-$(CONFIG_ARM_IMX_BUS_DEVFREQ) += imx-bus.o
obj-$(CONFIG_ARM_IMX8M_DDRC_DEVFREQ) += imx8m-ddrc.o
obj-$(CONFIG_ARM_MEDIATEK_CCI_DEVFREQ) += mtk-cci-devfreq.o
diff --git a/drivers/devfreq/devfreq-event.c b/drivers/devfreq/devfreq-event.c
index 3ebac2496679..70219099c604 100644
--- a/drivers/devfreq/devfreq-event.c
+++ b/drivers/devfreq/devfreq-event.c
@@ -244,13 +244,9 @@ struct devfreq_event_dev *devfreq_event_get_edev_by_phandle(struct device *dev,
edev = NULL;
out:
mutex_unlock(&devfreq_event_list_lock);
-
- if (!edev) {
- of_node_put(node);
- return ERR_PTR(-ENODEV);
- }
-
of_node_put(node);
+ if (!edev)
+ return ERR_PTR(-ENODEV);
return edev;
}
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 98657d3b9435..2e8d01d47f69 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -152,11 +152,8 @@ void devfreq_get_freq_range(struct devfreq *devfreq,
(unsigned long)HZ_PER_KHZ * qos_max_freq);
/* Apply constraints from OPP interface */
- *min_freq = max(*min_freq, devfreq->scaling_min_freq);
- *max_freq = min(*max_freq, devfreq->scaling_max_freq);
-
- if (*min_freq > *max_freq)
- *min_freq = *max_freq;
+ *max_freq = clamp(*max_freq, devfreq->scaling_min_freq, devfreq->scaling_max_freq);
+ *min_freq = clamp(*min_freq, devfreq->scaling_min_freq, *max_freq);
}
EXPORT_SYMBOL(devfreq_get_freq_range);
@@ -807,7 +804,6 @@ struct devfreq *devfreq_add_device(struct device *dev,
{
struct devfreq *devfreq;
struct devfreq_governor *governor;
- unsigned long min_freq, max_freq;
int err = 0;
if (!dev || !profile || !governor_name) {
@@ -835,6 +831,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
mutex_lock(&devfreq->lock);
devfreq->dev.parent = dev;
devfreq->dev.class = devfreq_class;
+ devfreq->dev.groups = profile->dev_groups;
devfreq->dev.release = devfreq_dev_release;
INIT_LIST_HEAD(&devfreq->node);
devfreq->profile = profile;
@@ -875,8 +872,6 @@ struct devfreq *devfreq_add_device(struct device *dev,
goto err_dev;
}
- devfreq_get_freq_range(devfreq, &min_freq, &max_freq);
-
devfreq->suspend_freq = dev_pm_opp_get_suspend_opp_freq(dev);
devfreq->opp_table = dev_pm_opp_get_opp_table(dev);
if (IS_ERR(devfreq->opp_table))
@@ -1382,15 +1377,11 @@ int devfreq_remove_governor(struct devfreq_governor *governor)
int ret;
struct device *dev = devfreq->dev.parent;
+ if (!devfreq->governor)
+ continue;
+
if (!strncmp(devfreq->governor->name, governor->name,
DEVFREQ_NAME_LEN)) {
- /* we should have a devfreq governor! */
- if (!devfreq->governor) {
- dev_warn(dev, "%s: Governor %s NOT present\n",
- __func__, governor->name);
- continue;
- /* Fall through */
- }
ret = devfreq->governor->event_handler(devfreq,
DEVFREQ_GOV_STOP, NULL);
if (ret) {
@@ -1743,7 +1734,7 @@ static ssize_t trans_stat_show(struct device *dev,
for (i = 0; i < max_state; i++) {
if (len >= PAGE_SIZE - 1)
break;
- if (df->freq_table[2] == df->previous_freq)
+ if (df->freq_table[i] == df->previous_freq)
len += sysfs_emit_at(buf, len, "*");
else
len += sysfs_emit_at(buf, len, " ");
diff --git a/drivers/devfreq/event/exynos-nocp.c b/drivers/devfreq/event/exynos-nocp.c
index 5edc522f715c..6a3efd782ad0 100644
--- a/drivers/devfreq/event/exynos-nocp.c
+++ b/drivers/devfreq/event/exynos-nocp.c
@@ -284,7 +284,7 @@ static void exynos_nocp_remove(struct platform_device *pdev)
static struct platform_driver exynos_nocp_driver = {
.probe = exynos_nocp_probe,
- .remove_new = exynos_nocp_remove,
+ .remove = exynos_nocp_remove,
.driver = {
.name = "exynos-nocp",
.of_match_table = exynos_nocp_id_match,
diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c
index 7002df20a49e..88cd4dfe87e1 100644
--- a/drivers/devfreq/event/exynos-ppmu.c
+++ b/drivers/devfreq/event/exynos-ppmu.c
@@ -701,7 +701,7 @@ static void exynos_ppmu_remove(struct platform_device *pdev)
static struct platform_driver exynos_ppmu_driver = {
.probe = exynos_ppmu_probe,
- .remove_new = exynos_ppmu_remove,
+ .remove = exynos_ppmu_remove,
.driver = {
.name = "exynos-ppmu",
.of_match_table = exynos_ppmu_id_match,
diff --git a/drivers/devfreq/event/rockchip-dfi.c b/drivers/devfreq/event/rockchip-dfi.c
index e2a1e4463b6f..5a2c9badcc64 100644
--- a/drivers/devfreq/event/rockchip-dfi.c
+++ b/drivers/devfreq/event/rockchip-dfi.c
@@ -34,15 +34,18 @@
/* DDRMON_CTRL */
#define DDRMON_CTRL 0x04
+#define DDRMON_CTRL_LPDDR5 BIT(6)
#define DDRMON_CTRL_DDR4 BIT(5)
#define DDRMON_CTRL_LPDDR4 BIT(4)
#define DDRMON_CTRL_HARDWARE_EN BIT(3)
#define DDRMON_CTRL_LPDDR23 BIT(2)
#define DDRMON_CTRL_SOFTWARE_EN BIT(1)
#define DDRMON_CTRL_TIMER_CNT_EN BIT(0)
-#define DDRMON_CTRL_DDR_TYPE_MASK (DDRMON_CTRL_DDR4 | \
+#define DDRMON_CTRL_DDR_TYPE_MASK (DDRMON_CTRL_LPDDR5 | \
+ DDRMON_CTRL_DDR4 | \
DDRMON_CTRL_LPDDR4 | \
DDRMON_CTRL_LPDDR23)
+#define DDRMON_CTRL_LP5_BANK_MODE_MASK GENMASK(8, 7)
#define DDRMON_CH0_WR_NUM 0x20
#define DDRMON_CH0_RD_NUM 0x24
@@ -116,12 +119,60 @@ struct rockchip_dfi {
int buswidth[DMC_MAX_CHANNELS];
int ddrmon_stride;
bool ddrmon_ctrl_single;
+ u32 lp5_bank_mode;
+ bool lp5_ckr; /* true if in 4:1 command-to-data clock ratio mode */
+ unsigned int count_multiplier; /* number of data clocks per count */
};
+static int rockchip_dfi_ddrtype_to_ctrl(struct rockchip_dfi *dfi, u32 *ctrl,
+ u32 *mask)
+{
+ u32 ddrmon_ver;
+
+ *mask = DDRMON_CTRL_DDR_TYPE_MASK;
+
+ switch (dfi->ddr_type) {
+ case ROCKCHIP_DDRTYPE_LPDDR2:
+ case ROCKCHIP_DDRTYPE_LPDDR3:
+ *ctrl = DDRMON_CTRL_LPDDR23;
+ break;
+ case ROCKCHIP_DDRTYPE_LPDDR4:
+ case ROCKCHIP_DDRTYPE_LPDDR4X:
+ *ctrl = DDRMON_CTRL_LPDDR4;
+ break;
+ case ROCKCHIP_DDRTYPE_LPDDR5:
+ ddrmon_ver = readl_relaxed(dfi->regs);
+ if (ddrmon_ver < 0x40) {
+ *ctrl = DDRMON_CTRL_LPDDR5 | dfi->lp5_bank_mode;
+ *mask |= DDRMON_CTRL_LP5_BANK_MODE_MASK;
+ break;
+ }
+
+ /*
+ * As it is unknown whether the unpleasant special case
+ * behaviour used by the vendor kernel is needed for any
+ * shipping hardware, ask users to report if they have
+ * some of that hardware.
+ */
+ dev_err(&dfi->edev->dev,
+ "unsupported DDRMON version 0x%04X, please let linux-rockchip know!\n",
+ ddrmon_ver);
+ return -EOPNOTSUPP;
+ default:
+ dev_err(&dfi->edev->dev, "unsupported memory type 0x%X\n",
+ dfi->ddr_type);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static int rockchip_dfi_enable(struct rockchip_dfi *dfi)
{
void __iomem *dfi_regs = dfi->regs;
int i, ret = 0;
+ u32 ctrl;
+ u32 ctrl_mask;
mutex_lock(&dfi->mutex);
@@ -135,8 +186,11 @@ static int rockchip_dfi_enable(struct rockchip_dfi *dfi)
goto out;
}
+ ret = rockchip_dfi_ddrtype_to_ctrl(dfi, &ctrl, &ctrl_mask);
+ if (ret)
+ goto out;
+
for (i = 0; i < dfi->max_channels; i++) {
- u32 ctrl = 0;
if (!(dfi->channel_mask & BIT(i)))
continue;
@@ -146,21 +200,7 @@ static int rockchip_dfi_enable(struct rockchip_dfi *dfi)
DDRMON_CTRL_SOFTWARE_EN | DDRMON_CTRL_HARDWARE_EN),
dfi_regs + i * dfi->ddrmon_stride + DDRMON_CTRL);
- /* set ddr type to dfi */
- switch (dfi->ddr_type) {
- case ROCKCHIP_DDRTYPE_LPDDR2:
- case ROCKCHIP_DDRTYPE_LPDDR3:
- ctrl = DDRMON_CTRL_LPDDR23;
- break;
- case ROCKCHIP_DDRTYPE_LPDDR4:
- case ROCKCHIP_DDRTYPE_LPDDR4X:
- ctrl = DDRMON_CTRL_LPDDR4;
- break;
- default:
- break;
- }
-
- writel_relaxed(HIWORD_UPDATE(ctrl, DDRMON_CTRL_DDR_TYPE_MASK),
+ writel_relaxed(HIWORD_UPDATE(ctrl, ctrl_mask),
dfi_regs + i * dfi->ddrmon_stride + DDRMON_CTRL);
/* enable count, use software mode */
@@ -435,7 +475,7 @@ static u64 rockchip_ddr_perf_event_get_count(struct perf_event *event)
switch (event->attr.config) {
case PERF_EVENT_CYCLES:
- count = total.c[0].clock_cycles;
+ count = total.c[0].clock_cycles * dfi->count_multiplier;
break;
case PERF_EVENT_READ_BYTES:
for (i = 0; i < dfi->max_channels; i++)
@@ -642,8 +682,7 @@ static int rockchip_ddr_perf_init(struct rockchip_dfi *dfi)
if (ret)
return ret;
- hrtimer_init(&dfi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- dfi->timer.function = rockchip_dfi_timer;
+ hrtimer_setup(&dfi->timer, rockchip_dfi_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
switch (dfi->ddr_type) {
case ROCKCHIP_DDRTYPE_LPDDR2:
@@ -652,10 +691,14 @@ static int rockchip_ddr_perf_init(struct rockchip_dfi *dfi)
break;
case ROCKCHIP_DDRTYPE_LPDDR4:
case ROCKCHIP_DDRTYPE_LPDDR4X:
+ case ROCKCHIP_DDRTYPE_LPDDR5:
dfi->burst_len = 16;
break;
}
+ if (!dfi->count_multiplier)
+ dfi->count_multiplier = 1;
+
ret = perf_pmu_register(pmu, "rockchip_ddr", -1);
if (ret)
return ret;
@@ -727,7 +770,7 @@ static int rk3568_dfi_init(struct rockchip_dfi *dfi)
static int rk3588_dfi_init(struct rockchip_dfi *dfi)
{
struct regmap *regmap_pmu = dfi->regmap_pmu;
- u32 reg2, reg3, reg4;
+ u32 reg2, reg3, reg4, reg6;
regmap_read(regmap_pmu, RK3588_PMUGRF_OS_REG2, &reg2);
regmap_read(regmap_pmu, RK3588_PMUGRF_OS_REG3, &reg3);
@@ -752,6 +795,15 @@ static int rk3588_dfi_init(struct rockchip_dfi *dfi)
dfi->max_channels = 4;
dfi->ddrmon_stride = 0x4000;
+ dfi->count_multiplier = 2;
+
+ if (dfi->ddr_type == ROCKCHIP_DDRTYPE_LPDDR5) {
+ regmap_read(regmap_pmu, RK3588_PMUGRF_OS_REG6, &reg6);
+ dfi->lp5_bank_mode = FIELD_GET(RK3588_PMUGRF_OS_REG6_LP5_BANK_MODE, reg6) << 7;
+ dfi->lp5_ckr = FIELD_GET(RK3588_PMUGRF_OS_REG6_LP5_CKR, reg6);
+ if (dfi->lp5_ckr)
+ dfi->count_multiplier *= 2;
+ }
return 0;
};
diff --git a/drivers/devfreq/exynos-bus.c b/drivers/devfreq/exynos-bus.c
index 7d06c476d8e9..b9ea7ad2e51b 100644
--- a/drivers/devfreq/exynos-bus.c
+++ b/drivers/devfreq/exynos-bus.c
@@ -236,8 +236,7 @@ err_regulator:
return ret;
}
-static int exynos_bus_parse_of(struct device_node *np,
- struct exynos_bus *bus)
+static int exynos_bus_parse_of(struct exynos_bus *bus)
{
struct device *dev = bus->dev;
struct dev_pm_opp *opp;
@@ -408,7 +407,7 @@ static int exynos_bus_probe(struct platform_device *pdev)
}
/* Parse the device-tree to get the resource information */
- ret = exynos_bus_parse_of(np, bus);
+ ret = exynos_bus_parse_of(bus);
if (ret < 0)
goto err_reg;
diff --git a/drivers/devfreq/governor_userspace.c b/drivers/devfreq/governor_userspace.c
index d1aa6806b683..175de0c0b50e 100644
--- a/drivers/devfreq/governor_userspace.c
+++ b/drivers/devfreq/governor_userspace.c
@@ -9,6 +9,7 @@
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/devfreq.h>
+#include <linux/kstrtox.h>
#include <linux/pm.h>
#include <linux/mutex.h>
#include <linux/module.h>
@@ -39,10 +40,13 @@ static ssize_t set_freq_store(struct device *dev, struct device_attribute *attr,
unsigned long wanted;
int err = 0;
+ err = kstrtoul(buf, 0, &wanted);
+ if (err)
+ return err;
+
mutex_lock(&devfreq->lock);
data = devfreq->governor_data;
- sscanf(buf, "%lu", &wanted);
data->user_frequency = wanted;
data->valid = true;
err = update_devfreq(devfreq);
diff --git a/drivers/devfreq/hisi_uncore_freq.c b/drivers/devfreq/hisi_uncore_freq.c
new file mode 100644
index 000000000000..96d1815059e3
--- /dev/null
+++ b/drivers/devfreq/hisi_uncore_freq.c
@@ -0,0 +1,658 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * HiSilicon uncore frequency scaling driver
+ *
+ * Copyright (c) 2025 HiSilicon Co., Ltd
+ */
+
+#include <linux/acpi.h>
+#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/devfreq.h>
+#include <linux/device.h>
+#include <linux/dev_printk.h>
+#include <linux/errno.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/ktime.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/property.h>
+#include <linux/topology.h>
+#include <linux/units.h>
+#include <acpi/pcc.h>
+
+#include "governor.h"
+
+struct hisi_uncore_pcc_data {
+ u16 status;
+ u16 resv;
+ u32 data;
+};
+
+struct hisi_uncore_pcc_shmem {
+ struct acpi_pcct_shared_memory head;
+ struct hisi_uncore_pcc_data pcc_data;
+};
+
+enum hisi_uncore_pcc_cmd_type {
+ HUCF_PCC_CMD_GET_CAP = 0,
+ HUCF_PCC_CMD_GET_FREQ,
+ HUCF_PCC_CMD_SET_FREQ,
+ HUCF_PCC_CMD_GET_MODE,
+ HUCF_PCC_CMD_SET_MODE,
+ HUCF_PCC_CMD_GET_PLAT_FREQ_NUM,
+ HUCF_PCC_CMD_GET_PLAT_FREQ_BY_IDX,
+ HUCF_PCC_CMD_MAX = 256
+};
+
+static int hisi_platform_gov_usage;
+static DEFINE_MUTEX(hisi_platform_gov_usage_lock);
+
+enum hisi_uncore_freq_mode {
+ HUCF_MODE_PLATFORM = 0,
+ HUCF_MODE_OS,
+ HUCF_MODE_MAX
+};
+
+#define HUCF_CAP_PLATFORM_CTRL BIT(0)
+
+/**
+ * struct hisi_uncore_freq - hisi uncore frequency scaling device data
+ * @dev: device of this frequency scaling driver
+ * @cl: mailbox client object
+ * @pchan: PCC mailbox channel
+ * @chan_id: PCC channel ID
+ * @last_cmd_cmpl_time: timestamp of the last completed PCC command
+ * @pcc_lock: PCC channel lock
+ * @devfreq: devfreq data of this hisi_uncore_freq device
+ * @related_cpus: CPUs whose performance is majorly affected by this
+ * uncore frequency domain
+ * @cap: capability flag
+ */
+struct hisi_uncore_freq {
+ struct device *dev;
+ struct mbox_client cl;
+ struct pcc_mbox_chan *pchan;
+ int chan_id;
+ ktime_t last_cmd_cmpl_time;
+ struct mutex pcc_lock;
+ struct devfreq *devfreq;
+ struct cpumask related_cpus;
+ u32 cap;
+};
+
+/* PCC channel timeout = PCC nominal latency * NUM */
+#define HUCF_PCC_POLL_TIMEOUT_NUM 1000
+#define HUCF_PCC_POLL_INTERVAL_US 5
+
+/* Default polling interval in ms for devfreq governors*/
+#define HUCF_DEFAULT_POLLING_MS 100
+
+static void hisi_uncore_free_pcc_chan(struct hisi_uncore_freq *uncore)
+{
+ guard(mutex)(&uncore->pcc_lock);
+ pcc_mbox_free_channel(uncore->pchan);
+ uncore->pchan = NULL;
+}
+
+static void devm_hisi_uncore_free_pcc_chan(void *data)
+{
+ hisi_uncore_free_pcc_chan(data);
+}
+
+static int hisi_uncore_request_pcc_chan(struct hisi_uncore_freq *uncore)
+{
+ struct device *dev = uncore->dev;
+ struct pcc_mbox_chan *pcc_chan;
+
+ uncore->cl = (struct mbox_client) {
+ .dev = dev,
+ .tx_block = false,
+ .knows_txdone = true,
+ };
+
+ pcc_chan = pcc_mbox_request_channel(&uncore->cl, uncore->chan_id);
+ if (IS_ERR(pcc_chan))
+ return dev_err_probe(dev, PTR_ERR(pcc_chan),
+ "Failed to request PCC channel %u\n", uncore->chan_id);
+
+ if (!pcc_chan->shmem_base_addr) {
+ pcc_mbox_free_channel(pcc_chan);
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid PCC shared memory address\n");
+ }
+
+ if (pcc_chan->shmem_size < sizeof(struct hisi_uncore_pcc_shmem)) {
+ pcc_mbox_free_channel(pcc_chan);
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid PCC shared memory size (%lluB)\n",
+ pcc_chan->shmem_size);
+ }
+
+ uncore->pchan = pcc_chan;
+
+ return devm_add_action_or_reset(uncore->dev,
+ devm_hisi_uncore_free_pcc_chan, uncore);
+}
+
+static acpi_status hisi_uncore_pcc_reg_scan(struct acpi_resource *res,
+ void *ctx)
+{
+ struct acpi_resource_generic_register *reg;
+ struct hisi_uncore_freq *uncore;
+
+ if (!res || res->type != ACPI_RESOURCE_TYPE_GENERIC_REGISTER)
+ return AE_OK;
+
+ reg = &res->data.generic_reg;
+ if (reg->space_id != ACPI_ADR_SPACE_PLATFORM_COMM)
+ return AE_OK;
+
+ if (!ctx)
+ return AE_ERROR;
+
+ uncore = ctx;
+ /* PCC subspace ID stored in Access Size */
+ uncore->chan_id = reg->access_size;
+
+ return AE_CTRL_TERMINATE;
+}
+
+static int hisi_uncore_init_pcc_chan(struct hisi_uncore_freq *uncore)
+{
+ acpi_handle handle = ACPI_HANDLE(uncore->dev);
+ acpi_status status;
+ int rc;
+
+ uncore->chan_id = -1;
+ status = acpi_walk_resources(handle, METHOD_NAME__CRS,
+ hisi_uncore_pcc_reg_scan, uncore);
+ if (ACPI_FAILURE(status) || uncore->chan_id < 0)
+ return dev_err_probe(uncore->dev, -ENODEV,
+ "Failed to get a PCC channel\n");
+
+
+ rc = devm_mutex_init(uncore->dev, &uncore->pcc_lock);
+ if (rc)
+ return rc;
+
+ return hisi_uncore_request_pcc_chan(uncore);
+}
+
+static int hisi_uncore_cmd_send(struct hisi_uncore_freq *uncore,
+ u8 cmd, u32 *data)
+{
+ struct hisi_uncore_pcc_shmem __iomem *addr;
+ struct hisi_uncore_pcc_shmem shmem;
+ struct pcc_mbox_chan *pchan;
+ unsigned int mrtt;
+ s64 time_delta;
+ u16 status;
+ int rc;
+
+ guard(mutex)(&uncore->pcc_lock);
+
+ pchan = uncore->pchan;
+ if (!pchan)
+ return -ENODEV;
+
+ addr = (struct hisi_uncore_pcc_shmem __iomem *)pchan->shmem;
+ if (!addr)
+ return -EINVAL;
+
+ /* Handle the Minimum Request Turnaround Time (MRTT) */
+ mrtt = pchan->min_turnaround_time;
+ time_delta = ktime_us_delta(ktime_get(), uncore->last_cmd_cmpl_time);
+ if (mrtt > time_delta)
+ udelay(mrtt - time_delta);
+
+ /* Copy data */
+ shmem.head = (struct acpi_pcct_shared_memory) {
+ .signature = PCC_SIGNATURE | uncore->chan_id,
+ .command = cmd,
+ };
+ shmem.pcc_data.data = *data;
+ memcpy_toio(addr, &shmem, sizeof(shmem));
+
+ /* Ring doorbell */
+ rc = mbox_send_message(pchan->mchan, &cmd);
+ if (rc < 0) {
+ dev_err(uncore->dev, "Failed to send mbox message, %d\n", rc);
+ return rc;
+ }
+
+ /* Wait status */
+ rc = readw_poll_timeout(&addr->head.status, status,
+ status & (PCC_STATUS_CMD_COMPLETE |
+ PCC_STATUS_ERROR),
+ HUCF_PCC_POLL_INTERVAL_US,
+ pchan->latency * HUCF_PCC_POLL_TIMEOUT_NUM);
+ if (rc) {
+ dev_err(uncore->dev, "PCC channel response timeout, cmd=%u\n", cmd);
+ } else if (status & PCC_STATUS_ERROR) {
+ dev_err(uncore->dev, "PCC cmd error, cmd=%u\n", cmd);
+ rc = -EIO;
+ }
+
+ uncore->last_cmd_cmpl_time = ktime_get();
+
+ /* Copy data back */
+ memcpy_fromio(data, &addr->pcc_data.data, sizeof(*data));
+
+ /* Clear mailbox active req */
+ mbox_client_txdone(pchan->mchan, rc);
+
+ return rc;
+}
+
+static int hisi_uncore_target(struct device *dev, unsigned long *freq,
+ u32 flags)
+{
+ struct hisi_uncore_freq *uncore = dev_get_drvdata(dev);
+ struct dev_pm_opp *opp;
+ u32 data;
+
+ if (WARN_ON(!uncore || !uncore->pchan))
+ return -ENODEV;
+
+ opp = devfreq_recommended_opp(dev, freq, flags);
+ if (IS_ERR(opp)) {
+ dev_err(dev, "Failed to get opp for freq %lu hz\n", *freq);
+ return PTR_ERR(opp);
+ }
+ dev_pm_opp_put(opp);
+
+ data = (u32)(dev_pm_opp_get_freq(opp) / HZ_PER_MHZ);
+
+ return hisi_uncore_cmd_send(uncore, HUCF_PCC_CMD_SET_FREQ, &data);
+}
+
+static int hisi_uncore_get_dev_status(struct device *dev,
+ struct devfreq_dev_status *stat)
+{
+ /* Not used */
+ return 0;
+}
+
+static int hisi_uncore_get_cur_freq(struct device *dev, unsigned long *freq)
+{
+ struct hisi_uncore_freq *uncore = dev_get_drvdata(dev);
+ u32 data = 0;
+ int rc;
+
+ if (WARN_ON(!uncore || !uncore->pchan))
+ return -ENODEV;
+
+ rc = hisi_uncore_cmd_send(uncore, HUCF_PCC_CMD_GET_FREQ, &data);
+
+ /*
+ * Upon a failure, 'data' remains 0 and 'freq' is set to 0 rather than a
+ * random value. devfreq shouldn't use 'freq' in that case though.
+ */
+ *freq = data * HZ_PER_MHZ;
+
+ return rc;
+}
+
+static void devm_hisi_uncore_remove_opp(void *data)
+{
+ struct hisi_uncore_freq *uncore = data;
+
+ dev_pm_opp_remove_all_dynamic(uncore->dev);
+}
+
+static int hisi_uncore_init_opp(struct hisi_uncore_freq *uncore)
+{
+ struct device *dev = uncore->dev;
+ unsigned long freq_mhz;
+ u32 num, index;
+ u32 data = 0;
+ int rc;
+
+ rc = hisi_uncore_cmd_send(uncore, HUCF_PCC_CMD_GET_PLAT_FREQ_NUM,
+ &data);
+ if (rc)
+ return dev_err_probe(dev, rc, "Failed to get plat freq num\n");
+
+ num = data;
+
+ for (index = 0; index < num; index++) {
+ data = index;
+ rc = hisi_uncore_cmd_send(uncore,
+ HUCF_PCC_CMD_GET_PLAT_FREQ_BY_IDX,
+ &data);
+ if (rc) {
+ dev_pm_opp_remove_all_dynamic(dev);
+ return dev_err_probe(dev, rc,
+ "Failed to get plat freq at index %u\n", index);
+ }
+ freq_mhz = data;
+
+ /* Don't care OPP voltage, take 1V as default */
+ rc = dev_pm_opp_add(dev, freq_mhz * HZ_PER_MHZ, 1000000);
+ if (rc) {
+ dev_pm_opp_remove_all_dynamic(dev);
+ return dev_err_probe(dev, rc,
+ "Add OPP %lu failed\n", freq_mhz);
+ }
+ }
+
+ return devm_add_action_or_reset(dev, devm_hisi_uncore_remove_opp,
+ uncore);
+}
+
+static int hisi_platform_gov_func(struct devfreq *df, unsigned long *freq)
+{
+ /*
+ * Platform-controlled mode doesn't care the frequency issued from
+ * devfreq, so just pick the max freq.
+ */
+ *freq = DEVFREQ_MAX_FREQ;
+
+ return 0;
+}
+
+static int hisi_platform_gov_handler(struct devfreq *df, unsigned int event,
+ void *val)
+{
+ struct hisi_uncore_freq *uncore = dev_get_drvdata(df->dev.parent);
+ int rc = 0;
+ u32 data;
+
+ if (WARN_ON(!uncore || !uncore->pchan))
+ return -ENODEV;
+
+ switch (event) {
+ case DEVFREQ_GOV_START:
+ data = HUCF_MODE_PLATFORM;
+ rc = hisi_uncore_cmd_send(uncore, HUCF_PCC_CMD_SET_MODE, &data);
+ if (rc)
+ dev_err(uncore->dev, "Failed to set platform mode (%d)\n", rc);
+ break;
+ case DEVFREQ_GOV_STOP:
+ data = HUCF_MODE_OS;
+ rc = hisi_uncore_cmd_send(uncore, HUCF_PCC_CMD_SET_MODE, &data);
+ if (rc)
+ dev_err(uncore->dev, "Failed to set os mode (%d)\n", rc);
+ break;
+ default:
+ break;
+ }
+
+ return rc;
+}
+
+/*
+ * In the platform-controlled mode, the platform decides the uncore frequency
+ * and ignores the frequency issued from the driver.
+ * Thus, create a pseudo 'hisi_platform' governor that stops devfreq monitor
+ * from working so as to save meaningless overhead.
+ */
+static struct devfreq_governor hisi_platform_governor = {
+ .name = "hisi_platform",
+ /*
+ * Set interrupt_driven to skip the devfreq monitor mechanism, though
+ * this governor is not interrupt-driven.
+ */
+ .flags = DEVFREQ_GOV_FLAG_IRQ_DRIVEN,
+ .get_target_freq = hisi_platform_gov_func,
+ .event_handler = hisi_platform_gov_handler,
+};
+
+static void hisi_uncore_remove_platform_gov(struct hisi_uncore_freq *uncore)
+{
+ u32 data = HUCF_MODE_PLATFORM;
+ int rc;
+
+ if (!(uncore->cap & HUCF_CAP_PLATFORM_CTRL))
+ return;
+
+ guard(mutex)(&hisi_platform_gov_usage_lock);
+
+ if (--hisi_platform_gov_usage == 0) {
+ rc = devfreq_remove_governor(&hisi_platform_governor);
+ if (rc)
+ dev_err(uncore->dev, "Failed to remove hisi_platform gov (%d)\n", rc);
+ }
+
+ /*
+ * Set to the platform-controlled mode on exit if supported, so as to
+ * have a certain behaviour when the driver is detached.
+ */
+ rc = hisi_uncore_cmd_send(uncore, HUCF_PCC_CMD_SET_MODE, &data);
+ if (rc)
+ dev_err(uncore->dev, "Failed to set platform mode on exit (%d)\n", rc);
+}
+
+static void devm_hisi_uncore_remove_platform_gov(void *data)
+{
+ hisi_uncore_remove_platform_gov(data);
+}
+
+static int hisi_uncore_add_platform_gov(struct hisi_uncore_freq *uncore)
+{
+ if (!(uncore->cap & HUCF_CAP_PLATFORM_CTRL))
+ return 0;
+
+ guard(mutex)(&hisi_platform_gov_usage_lock);
+
+ if (hisi_platform_gov_usage == 0) {
+ int rc = devfreq_add_governor(&hisi_platform_governor);
+ if (rc)
+ return rc;
+ }
+ hisi_platform_gov_usage++;
+
+ return devm_add_action_or_reset(uncore->dev,
+ devm_hisi_uncore_remove_platform_gov,
+ uncore);
+}
+
+/*
+ * Returns:
+ * 0 if success, uncore->related_cpus is set.
+ * -EINVAL if property not found, or property found but without elements in it,
+ * or invalid arguments received in any of the subroutine.
+ * Other error codes if it goes wrong.
+ */
+static int hisi_uncore_mark_related_cpus(struct hisi_uncore_freq *uncore,
+ char *property, int (*get_topo_id)(int cpu),
+ const struct cpumask *(*get_cpumask)(int cpu))
+{
+ unsigned int i, cpu;
+ size_t len;
+ int rc;
+
+ rc = device_property_count_u32(uncore->dev, property);
+ if (rc < 0)
+ return rc;
+ if (rc == 0)
+ return -EINVAL;
+
+ len = rc;
+ u32 *num __free(kfree) = kcalloc(len, sizeof(*num), GFP_KERNEL);
+ if (!num)
+ return -ENOMEM;
+
+ rc = device_property_read_u32_array(uncore->dev, property, num, len);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < len; i++) {
+ for_each_possible_cpu(cpu) {
+ if (get_topo_id(cpu) != num[i])
+ continue;
+
+ cpumask_or(&uncore->related_cpus,
+ &uncore->related_cpus, get_cpumask(cpu));
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int get_package_id(int cpu)
+{
+ return topology_physical_package_id(cpu);
+}
+
+static const struct cpumask *get_package_cpumask(int cpu)
+{
+ return topology_core_cpumask(cpu);
+}
+
+static int get_cluster_id(int cpu)
+{
+ return topology_cluster_id(cpu);
+}
+
+static const struct cpumask *get_cluster_cpumask(int cpu)
+{
+ return topology_cluster_cpumask(cpu);
+}
+
+static int hisi_uncore_mark_related_cpus_wrap(struct hisi_uncore_freq *uncore)
+{
+ int rc;
+
+ cpumask_clear(&uncore->related_cpus);
+
+ rc = hisi_uncore_mark_related_cpus(uncore, "related-package",
+ get_package_id,
+ get_package_cpumask);
+ /* Success, or firmware probably broken */
+ if (!rc || rc != -EINVAL)
+ return rc;
+
+ /* Try another property name if rc == -EINVAL */
+ return hisi_uncore_mark_related_cpus(uncore, "related-cluster",
+ get_cluster_id,
+ get_cluster_cpumask);
+}
+
+static ssize_t related_cpus_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hisi_uncore_freq *uncore = dev_get_drvdata(dev->parent);
+
+ return cpumap_print_to_pagebuf(true, buf, &uncore->related_cpus);
+}
+
+static DEVICE_ATTR_RO(related_cpus);
+
+static struct attribute *hisi_uncore_freq_attrs[] = {
+ &dev_attr_related_cpus.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(hisi_uncore_freq);
+
+static int hisi_uncore_devfreq_register(struct hisi_uncore_freq *uncore)
+{
+ struct devfreq_dev_profile *profile;
+ struct device *dev = uncore->dev;
+ unsigned long freq;
+ u32 data;
+ int rc;
+
+ rc = hisi_uncore_get_cur_freq(dev, &freq);
+ if (rc)
+ return dev_err_probe(dev, rc, "Failed to get plat init freq\n");
+
+ profile = devm_kzalloc(dev, sizeof(*profile), GFP_KERNEL);
+ if (!profile)
+ return -ENOMEM;
+
+ *profile = (struct devfreq_dev_profile) {
+ .initial_freq = freq,
+ .polling_ms = HUCF_DEFAULT_POLLING_MS,
+ .timer = DEVFREQ_TIMER_DELAYED,
+ .target = hisi_uncore_target,
+ .get_dev_status = hisi_uncore_get_dev_status,
+ .get_cur_freq = hisi_uncore_get_cur_freq,
+ .dev_groups = hisi_uncore_freq_groups,
+ };
+
+ rc = hisi_uncore_cmd_send(uncore, HUCF_PCC_CMD_GET_MODE, &data);
+ if (rc)
+ return dev_err_probe(dev, rc, "Failed to get operate mode\n");
+
+ if (data == HUCF_MODE_PLATFORM)
+ uncore->devfreq = devm_devfreq_add_device(dev, profile,
+ hisi_platform_governor.name, NULL);
+ else
+ uncore->devfreq = devm_devfreq_add_device(dev, profile,
+ DEVFREQ_GOV_PERFORMANCE, NULL);
+ if (IS_ERR(uncore->devfreq))
+ return dev_err_probe(dev, PTR_ERR(uncore->devfreq),
+ "Failed to add devfreq device\n");
+
+ return 0;
+}
+
+static int hisi_uncore_freq_probe(struct platform_device *pdev)
+{
+ struct hisi_uncore_freq *uncore;
+ struct device *dev = &pdev->dev;
+ u32 cap;
+ int rc;
+
+ uncore = devm_kzalloc(dev, sizeof(*uncore), GFP_KERNEL);
+ if (!uncore)
+ return -ENOMEM;
+
+ uncore->dev = dev;
+ platform_set_drvdata(pdev, uncore);
+
+ rc = hisi_uncore_init_pcc_chan(uncore);
+ if (rc)
+ return dev_err_probe(dev, rc, "Failed to init PCC channel\n");
+
+ rc = hisi_uncore_init_opp(uncore);
+ if (rc)
+ return dev_err_probe(dev, rc, "Failed to init OPP\n");
+
+ rc = hisi_uncore_cmd_send(uncore, HUCF_PCC_CMD_GET_CAP, &cap);
+ if (rc)
+ return dev_err_probe(dev, rc, "Failed to get capability\n");
+
+ uncore->cap = cap;
+
+ rc = hisi_uncore_add_platform_gov(uncore);
+ if (rc)
+ return dev_err_probe(dev, rc, "Failed to add hisi_platform governor\n");
+
+ rc = hisi_uncore_mark_related_cpus_wrap(uncore);
+ if (rc)
+ return dev_err_probe(dev, rc, "Failed to mark related cpus\n");
+
+ rc = hisi_uncore_devfreq_register(uncore);
+ if (rc)
+ return dev_err_probe(dev, rc, "Failed to register devfreq\n");
+
+ return 0;
+}
+
+static const struct acpi_device_id hisi_uncore_freq_acpi_match[] = {
+ { "HISI04F1", },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, hisi_uncore_freq_acpi_match);
+
+static struct platform_driver hisi_uncore_freq_drv = {
+ .probe = hisi_uncore_freq_probe,
+ .driver = {
+ .name = "hisi_uncore_freq",
+ .acpi_match_table = hisi_uncore_freq_acpi_match,
+ },
+};
+module_platform_driver(hisi_uncore_freq_drv);
+
+MODULE_DESCRIPTION("HiSilicon uncore frequency scaling driver");
+MODULE_AUTHOR("Jie Zhan <zhanjie9@hisilicon.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/devfreq/mtk-cci-devfreq.c b/drivers/devfreq/mtk-cci-devfreq.c
index 7ad5225b0381..4c22be728f6a 100644
--- a/drivers/devfreq/mtk-cci-devfreq.c
+++ b/drivers/devfreq/mtk-cci-devfreq.c
@@ -86,7 +86,7 @@ static int mtk_ccifreq_set_voltage(struct mtk_ccifreq_drv *drv, int new_voltage)
soc_data->sram_max_volt);
return ret;
}
- } else if (pre_voltage > new_voltage) {
+ } else {
voltage = max(new_voltage,
pre_vsram - soc_data->max_volt_shift);
ret = regulator_set_voltage(drv->proc_reg, voltage,
@@ -386,7 +386,8 @@ out_disable_cci_clk:
out_free_resources:
if (regulator_is_enabled(drv->proc_reg))
regulator_disable(drv->proc_reg);
- if (drv->sram_reg && regulator_is_enabled(drv->sram_reg))
+ if (!IS_ERR_OR_NULL(drv->sram_reg) &&
+ regulator_is_enabled(drv->sram_reg))
regulator_disable(drv->sram_reg);
return ret;
@@ -430,7 +431,7 @@ MODULE_DEVICE_TABLE(of, mtk_ccifreq_machines);
static struct platform_driver mtk_ccifreq_platdrv = {
.probe = mtk_ccifreq_probe,
- .remove_new = mtk_ccifreq_remove,
+ .remove = mtk_ccifreq_remove,
.driver = {
.name = "mtk-ccifreq",
.of_match_table = mtk_ccifreq_machines,
diff --git a/drivers/devfreq/rk3399_dmc.c b/drivers/devfreq/rk3399_dmc.c
index d405cee92c25..dbdce7636ca5 100644
--- a/drivers/devfreq/rk3399_dmc.c
+++ b/drivers/devfreq/rk3399_dmc.c
@@ -474,7 +474,7 @@ MODULE_DEVICE_TABLE(of, rk3399dmc_devfreq_of_match);
static struct platform_driver rk3399_dmcfreq_driver = {
.probe = rk3399_dmcfreq_probe,
- .remove_new = rk3399_dmcfreq_remove,
+ .remove = rk3399_dmcfreq_remove,
.driver = {
.name = "rk3399-dmc-freq",
.pm = &rk3399_dmcfreq_pm,
diff --git a/drivers/devfreq/sun8i-a33-mbus.c b/drivers/devfreq/sun8i-a33-mbus.c
index bcf654f4ff96..4bd5657558d6 100644
--- a/drivers/devfreq/sun8i-a33-mbus.c
+++ b/drivers/devfreq/sun8i-a33-mbus.c
@@ -360,7 +360,7 @@ static int sun8i_a33_mbus_probe(struct platform_device *pdev)
if (IS_ERR(priv->reg_mbus))
return PTR_ERR(priv->reg_mbus);
- priv->clk_bus = devm_clk_get(dev, "bus");
+ priv->clk_bus = devm_clk_get_enabled(dev, "bus");
if (IS_ERR(priv->clk_bus))
return dev_err_probe(dev, PTR_ERR(priv->clk_bus),
"failed to get bus clock\n");
@@ -375,24 +375,15 @@ static int sun8i_a33_mbus_probe(struct platform_device *pdev)
return dev_err_probe(dev, PTR_ERR(priv->clk_mbus),
"failed to get mbus clock\n");
- ret = clk_prepare_enable(priv->clk_bus);
- if (ret)
- return dev_err_probe(dev, ret,
- "failed to enable bus clock\n");
-
/* Lock the DRAM clock rate to keep priv->nominal_bw in sync. */
- ret = clk_rate_exclusive_get(priv->clk_dram);
- if (ret) {
- err = "failed to lock dram clock rate\n";
- goto err_disable_bus;
- }
+ ret = devm_clk_rate_exclusive_get(dev, priv->clk_dram);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to lock dram clock rate\n");
/* Lock the MBUS clock rate to keep MBUS_TMR_PERIOD in sync. */
- ret = clk_rate_exclusive_get(priv->clk_mbus);
- if (ret) {
- err = "failed to lock mbus clock rate\n";
- goto err_unlock_dram;
- }
+ ret = devm_clk_rate_exclusive_get(dev, priv->clk_mbus);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to lock mbus clock rate\n");
priv->gov_data.upthreshold = 10;
priv->gov_data.downdifferential = 5;
@@ -405,10 +396,8 @@ static int sun8i_a33_mbus_probe(struct platform_device *pdev)
priv->profile.max_state = max_state;
ret = devm_pm_opp_set_clkname(dev, "dram");
- if (ret) {
- err = "failed to add OPP table\n";
- goto err_unlock_mbus;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to add OPP table\n");
base_freq = clk_get_rate(clk_get_parent(priv->clk_dram));
for (i = 0; i < max_state; ++i) {
@@ -448,12 +437,6 @@ static int sun8i_a33_mbus_probe(struct platform_device *pdev)
err_remove_opps:
dev_pm_opp_remove_all_dynamic(dev);
-err_unlock_mbus:
- clk_rate_exclusive_put(priv->clk_mbus);
-err_unlock_dram:
- clk_rate_exclusive_put(priv->clk_dram);
-err_disable_bus:
- clk_disable_unprepare(priv->clk_bus);
return dev_err_probe(dev, ret, err);
}
@@ -472,9 +455,6 @@ static void sun8i_a33_mbus_remove(struct platform_device *pdev)
dev_warn(dev, "failed to restore DRAM frequency: %d\n", ret);
dev_pm_opp_remove_all_dynamic(dev);
- clk_rate_exclusive_put(priv->clk_mbus);
- clk_rate_exclusive_put(priv->clk_dram);
- clk_disable_unprepare(priv->clk_bus);
}
static const struct sun8i_a33_mbus_variant sun50i_a64_mbus = {
@@ -495,7 +475,7 @@ static SIMPLE_DEV_PM_OPS(sun8i_a33_mbus_pm_ops,
static struct platform_driver sun8i_a33_mbus_driver = {
.probe = sun8i_a33_mbus_probe,
- .remove_new = sun8i_a33_mbus_remove,
+ .remove = sun8i_a33_mbus_remove,
.driver = {
.name = "sun8i-a33-mbus",
.of_match_table = sun8i_a33_mbus_of_match,
diff --git a/drivers/dibs/Kconfig b/drivers/dibs/Kconfig
new file mode 100644
index 000000000000..5dc347b9b235
--- /dev/null
+++ b/drivers/dibs/Kconfig
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-2.0
+config DIBS
+ tristate "DIBS support"
+ default n
+ help
+ Direct Internal Buffer Sharing (DIBS)
+ A communication method that uses common physical (internal) memory
+ for synchronous direct access into a remote buffer.
+
+ Select this option to provide the abstraction layer between
+ dibs devices and dibs clients like the SMC protocol.
+ The module name is dibs.
+
+config DIBS_LO
+ bool "intra-OS shortcut with dibs loopback"
+ depends on DIBS
+ default n
+ help
+ DIBS_LO enables the creation of an software-emulated dibs device
+ named lo which can be used for transferring data when communication
+ occurs within the same OS. This helps in convenient testing of
+ dibs clients, since dibs loopback is independent of architecture or
+ hardware.
diff --git a/drivers/dibs/Makefile b/drivers/dibs/Makefile
new file mode 100644
index 000000000000..85805490c77f
--- /dev/null
+++ b/drivers/dibs/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# DIBS class module
+#
+
+dibs-y += dibs_main.o
+obj-$(CONFIG_DIBS) += dibs.o
+dibs-$(CONFIG_DIBS_LO) += dibs_loopback.o \ No newline at end of file
diff --git a/drivers/dibs/dibs_loopback.c b/drivers/dibs/dibs_loopback.c
new file mode 100644
index 000000000000..aa029e29c6b2
--- /dev/null
+++ b/drivers/dibs/dibs_loopback.c
@@ -0,0 +1,361 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Functions for dibs loopback/loopback-ism device.
+ *
+ * Copyright (c) 2024, Alibaba Inc.
+ *
+ * Author: Wen Gu <guwen@linux.alibaba.com>
+ * Tony Lu <tonylu@linux.alibaba.com>
+ *
+ */
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/dibs.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include "dibs_loopback.h"
+
+#define DIBS_LO_SUPPORT_NOCOPY 0x1
+#define DIBS_DMA_ADDR_INVALID (~(dma_addr_t)0)
+
+static const char dibs_lo_dev_name[] = "lo";
+/* global loopback device */
+static struct dibs_lo_dev *lo_dev;
+
+static u16 dibs_lo_get_fabric_id(struct dibs_dev *dibs)
+{
+ return DIBS_LOOPBACK_FABRIC;
+}
+
+static int dibs_lo_query_rgid(struct dibs_dev *dibs, const uuid_t *rgid,
+ u32 vid_valid, u32 vid)
+{
+ /* rgid should be the same as lgid */
+ if (!uuid_equal(rgid, &dibs->gid))
+ return -ENETUNREACH;
+ return 0;
+}
+
+static int dibs_lo_max_dmbs(void)
+{
+ return DIBS_LO_MAX_DMBS;
+}
+
+static int dibs_lo_register_dmb(struct dibs_dev *dibs, struct dibs_dmb *dmb,
+ struct dibs_client *client)
+{
+ struct dibs_lo_dmb_node *dmb_node, *tmp_node;
+ struct dibs_lo_dev *ldev;
+ struct folio *folio;
+ unsigned long flags;
+ int sba_idx, rc;
+
+ ldev = dibs->drv_priv;
+ sba_idx = dmb->idx;
+ /* check space for new dmb */
+ for_each_clear_bit(sba_idx, ldev->sba_idx_mask, DIBS_LO_MAX_DMBS) {
+ if (!test_and_set_bit(sba_idx, ldev->sba_idx_mask))
+ break;
+ }
+ if (sba_idx == DIBS_LO_MAX_DMBS)
+ return -ENOSPC;
+
+ dmb_node = kzalloc(sizeof(*dmb_node), GFP_KERNEL);
+ if (!dmb_node) {
+ rc = -ENOMEM;
+ goto err_bit;
+ }
+
+ dmb_node->sba_idx = sba_idx;
+ dmb_node->len = dmb->dmb_len;
+
+ /* not critical; fail under memory pressure and fallback to TCP */
+ folio = folio_alloc(GFP_KERNEL | __GFP_NOWARN | __GFP_NOMEMALLOC |
+ __GFP_NORETRY | __GFP_ZERO,
+ get_order(dmb_node->len));
+ if (!folio) {
+ rc = -ENOMEM;
+ goto err_node;
+ }
+ dmb_node->cpu_addr = folio_address(folio);
+ dmb_node->dma_addr = DIBS_DMA_ADDR_INVALID;
+ refcount_set(&dmb_node->refcnt, 1);
+
+again:
+ /* add new dmb into hash table */
+ get_random_bytes(&dmb_node->token, sizeof(dmb_node->token));
+ write_lock_bh(&ldev->dmb_ht_lock);
+ hash_for_each_possible(ldev->dmb_ht, tmp_node, list, dmb_node->token) {
+ if (tmp_node->token == dmb_node->token) {
+ write_unlock_bh(&ldev->dmb_ht_lock);
+ goto again;
+ }
+ }
+ hash_add(ldev->dmb_ht, &dmb_node->list, dmb_node->token);
+ write_unlock_bh(&ldev->dmb_ht_lock);
+ atomic_inc(&ldev->dmb_cnt);
+
+ dmb->idx = dmb_node->sba_idx;
+ dmb->dmb_tok = dmb_node->token;
+ dmb->cpu_addr = dmb_node->cpu_addr;
+ dmb->dma_addr = dmb_node->dma_addr;
+ dmb->dmb_len = dmb_node->len;
+
+ spin_lock_irqsave(&dibs->lock, flags);
+ dibs->dmb_clientid_arr[sba_idx] = client->id;
+ spin_unlock_irqrestore(&dibs->lock, flags);
+
+ return 0;
+
+err_node:
+ kfree(dmb_node);
+err_bit:
+ clear_bit(sba_idx, ldev->sba_idx_mask);
+ return rc;
+}
+
+static void __dibs_lo_unregister_dmb(struct dibs_lo_dev *ldev,
+ struct dibs_lo_dmb_node *dmb_node)
+{
+ /* remove dmb from hash table */
+ write_lock_bh(&ldev->dmb_ht_lock);
+ hash_del(&dmb_node->list);
+ write_unlock_bh(&ldev->dmb_ht_lock);
+
+ clear_bit(dmb_node->sba_idx, ldev->sba_idx_mask);
+ folio_put(virt_to_folio(dmb_node->cpu_addr));
+ kfree(dmb_node);
+
+ if (atomic_dec_and_test(&ldev->dmb_cnt))
+ wake_up(&ldev->ldev_release);
+}
+
+static int dibs_lo_unregister_dmb(struct dibs_dev *dibs, struct dibs_dmb *dmb)
+{
+ struct dibs_lo_dmb_node *dmb_node = NULL, *tmp_node;
+ struct dibs_lo_dev *ldev;
+ unsigned long flags;
+
+ ldev = dibs->drv_priv;
+
+ /* find dmb from hash table */
+ read_lock_bh(&ldev->dmb_ht_lock);
+ hash_for_each_possible(ldev->dmb_ht, tmp_node, list, dmb->dmb_tok) {
+ if (tmp_node->token == dmb->dmb_tok) {
+ dmb_node = tmp_node;
+ break;
+ }
+ }
+ read_unlock_bh(&ldev->dmb_ht_lock);
+ if (!dmb_node)
+ return -EINVAL;
+
+ if (refcount_dec_and_test(&dmb_node->refcnt)) {
+ spin_lock_irqsave(&dibs->lock, flags);
+ dibs->dmb_clientid_arr[dmb_node->sba_idx] = NO_DIBS_CLIENT;
+ spin_unlock_irqrestore(&dibs->lock, flags);
+
+ __dibs_lo_unregister_dmb(ldev, dmb_node);
+ }
+ return 0;
+}
+
+static int dibs_lo_support_dmb_nocopy(struct dibs_dev *dibs)
+{
+ return DIBS_LO_SUPPORT_NOCOPY;
+}
+
+static int dibs_lo_attach_dmb(struct dibs_dev *dibs, struct dibs_dmb *dmb)
+{
+ struct dibs_lo_dmb_node *dmb_node = NULL, *tmp_node;
+ struct dibs_lo_dev *ldev;
+
+ ldev = dibs->drv_priv;
+
+ /* find dmb_node according to dmb->dmb_tok */
+ read_lock_bh(&ldev->dmb_ht_lock);
+ hash_for_each_possible(ldev->dmb_ht, tmp_node, list, dmb->dmb_tok) {
+ if (tmp_node->token == dmb->dmb_tok) {
+ dmb_node = tmp_node;
+ break;
+ }
+ }
+ if (!dmb_node) {
+ read_unlock_bh(&ldev->dmb_ht_lock);
+ return -EINVAL;
+ }
+ read_unlock_bh(&ldev->dmb_ht_lock);
+
+ if (!refcount_inc_not_zero(&dmb_node->refcnt))
+ /* the dmb is being unregistered, but has
+ * not been removed from the hash table.
+ */
+ return -EINVAL;
+
+ /* provide dmb information */
+ dmb->idx = dmb_node->sba_idx;
+ dmb->dmb_tok = dmb_node->token;
+ dmb->cpu_addr = dmb_node->cpu_addr;
+ dmb->dma_addr = dmb_node->dma_addr;
+ dmb->dmb_len = dmb_node->len;
+ return 0;
+}
+
+static int dibs_lo_detach_dmb(struct dibs_dev *dibs, u64 token)
+{
+ struct dibs_lo_dmb_node *dmb_node = NULL, *tmp_node;
+ struct dibs_lo_dev *ldev;
+
+ ldev = dibs->drv_priv;
+
+ /* find dmb_node according to dmb->dmb_tok */
+ read_lock_bh(&ldev->dmb_ht_lock);
+ hash_for_each_possible(ldev->dmb_ht, tmp_node, list, token) {
+ if (tmp_node->token == token) {
+ dmb_node = tmp_node;
+ break;
+ }
+ }
+ if (!dmb_node) {
+ read_unlock_bh(&ldev->dmb_ht_lock);
+ return -EINVAL;
+ }
+ read_unlock_bh(&ldev->dmb_ht_lock);
+
+ if (refcount_dec_and_test(&dmb_node->refcnt))
+ __dibs_lo_unregister_dmb(ldev, dmb_node);
+ return 0;
+}
+
+static int dibs_lo_move_data(struct dibs_dev *dibs, u64 dmb_tok,
+ unsigned int idx, bool sf, unsigned int offset,
+ void *data, unsigned int size)
+{
+ struct dibs_lo_dmb_node *rmb_node = NULL, *tmp_node;
+ struct dibs_lo_dev *ldev;
+ u16 s_mask;
+ u8 client_id;
+ u32 sba_idx;
+
+ ldev = dibs->drv_priv;
+
+ read_lock_bh(&ldev->dmb_ht_lock);
+ hash_for_each_possible(ldev->dmb_ht, tmp_node, list, dmb_tok) {
+ if (tmp_node->token == dmb_tok) {
+ rmb_node = tmp_node;
+ break;
+ }
+ }
+ if (!rmb_node) {
+ read_unlock_bh(&ldev->dmb_ht_lock);
+ return -EINVAL;
+ }
+ memcpy((char *)rmb_node->cpu_addr + offset, data, size);
+ sba_idx = rmb_node->sba_idx;
+ read_unlock_bh(&ldev->dmb_ht_lock);
+
+ if (!sf)
+ return 0;
+
+ spin_lock(&dibs->lock);
+ client_id = dibs->dmb_clientid_arr[sba_idx];
+ s_mask = ror16(0x1000, idx);
+ if (likely(client_id != NO_DIBS_CLIENT && dibs->subs[client_id]))
+ dibs->subs[client_id]->ops->handle_irq(dibs, sba_idx, s_mask);
+ spin_unlock(&dibs->lock);
+
+ return 0;
+}
+
+static const struct dibs_dev_ops dibs_lo_ops = {
+ .get_fabric_id = dibs_lo_get_fabric_id,
+ .query_remote_gid = dibs_lo_query_rgid,
+ .max_dmbs = dibs_lo_max_dmbs,
+ .register_dmb = dibs_lo_register_dmb,
+ .unregister_dmb = dibs_lo_unregister_dmb,
+ .move_data = dibs_lo_move_data,
+ .support_mmapped_rdmb = dibs_lo_support_dmb_nocopy,
+ .attach_dmb = dibs_lo_attach_dmb,
+ .detach_dmb = dibs_lo_detach_dmb,
+};
+
+static void dibs_lo_dev_init(struct dibs_lo_dev *ldev)
+{
+ rwlock_init(&ldev->dmb_ht_lock);
+ hash_init(ldev->dmb_ht);
+ atomic_set(&ldev->dmb_cnt, 0);
+ init_waitqueue_head(&ldev->ldev_release);
+}
+
+static void dibs_lo_dev_exit(struct dibs_lo_dev *ldev)
+{
+ if (atomic_read(&ldev->dmb_cnt))
+ wait_event(ldev->ldev_release, !atomic_read(&ldev->dmb_cnt));
+}
+
+static int dibs_lo_dev_probe(void)
+{
+ struct dibs_lo_dev *ldev;
+ struct dibs_dev *dibs;
+ int ret;
+
+ ldev = kzalloc(sizeof(*ldev), GFP_KERNEL);
+ if (!ldev)
+ return -ENOMEM;
+
+ dibs = dibs_dev_alloc();
+ if (!dibs) {
+ kfree(ldev);
+ return -ENOMEM;
+ }
+
+ ldev->dibs = dibs;
+ dibs->drv_priv = ldev;
+ dibs_lo_dev_init(ldev);
+ uuid_gen(&dibs->gid);
+ dibs->ops = &dibs_lo_ops;
+
+ dibs->dev.parent = NULL;
+ dev_set_name(&dibs->dev, "%s", dibs_lo_dev_name);
+
+ ret = dibs_dev_add(dibs);
+ if (ret)
+ goto err_reg;
+ lo_dev = ldev;
+ return 0;
+
+err_reg:
+ kfree(dibs->dmb_clientid_arr);
+ /* pairs with dibs_dev_alloc() */
+ put_device(&dibs->dev);
+ kfree(ldev);
+
+ return ret;
+}
+
+static void dibs_lo_dev_remove(void)
+{
+ if (!lo_dev)
+ return;
+
+ dibs_dev_del(lo_dev->dibs);
+ dibs_lo_dev_exit(lo_dev);
+ /* pairs with dibs_dev_alloc() */
+ put_device(&lo_dev->dibs->dev);
+ kfree(lo_dev);
+ lo_dev = NULL;
+}
+
+int dibs_loopback_init(void)
+{
+ return dibs_lo_dev_probe();
+}
+
+void dibs_loopback_exit(void)
+{
+ dibs_lo_dev_remove();
+}
diff --git a/drivers/dibs/dibs_loopback.h b/drivers/dibs/dibs_loopback.h
new file mode 100644
index 000000000000..0664f6a8e662
--- /dev/null
+++ b/drivers/dibs/dibs_loopback.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * dibs loopback (aka loopback-ism) device structure definitions.
+ *
+ * Copyright (c) 2024, Alibaba Inc.
+ *
+ * Author: Wen Gu <guwen@linux.alibaba.com>
+ * Tony Lu <tonylu@linux.alibaba.com>
+ *
+ */
+
+#ifndef _DIBS_LOOPBACK_H
+#define _DIBS_LOOPBACK_H
+
+#include <linux/dibs.h>
+#include <linux/hashtable.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+
+#if IS_ENABLED(CONFIG_DIBS_LO)
+#define DIBS_LO_DMBS_HASH_BITS 12
+#define DIBS_LO_MAX_DMBS 5000
+
+struct dibs_lo_dmb_node {
+ struct hlist_node list;
+ u64 token;
+ u32 len;
+ u32 sba_idx;
+ void *cpu_addr;
+ dma_addr_t dma_addr;
+ refcount_t refcnt;
+};
+
+struct dibs_lo_dev {
+ struct dibs_dev *dibs;
+ atomic_t dmb_cnt;
+ rwlock_t dmb_ht_lock;
+ DECLARE_BITMAP(sba_idx_mask, DIBS_LO_MAX_DMBS);
+ DECLARE_HASHTABLE(dmb_ht, DIBS_LO_DMBS_HASH_BITS);
+ wait_queue_head_t ldev_release;
+};
+
+int dibs_loopback_init(void);
+void dibs_loopback_exit(void);
+#else
+static inline int dibs_loopback_init(void)
+{
+ return 0;
+}
+
+static inline void dibs_loopback_exit(void)
+{
+}
+#endif
+
+#endif /* _DIBS_LOOPBACK_H */
diff --git a/drivers/dibs/dibs_main.c b/drivers/dibs/dibs_main.c
new file mode 100644
index 000000000000..0374f8350ff7
--- /dev/null
+++ b/drivers/dibs/dibs_main.c
@@ -0,0 +1,278 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DIBS - Direct Internal Buffer Sharing
+ *
+ * Implementation of the DIBS class module
+ *
+ * Copyright IBM Corp. 2025
+ */
+#define KMSG_COMPONENT "dibs"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/dibs.h>
+
+#include "dibs_loopback.h"
+
+MODULE_DESCRIPTION("Direct Internal Buffer Sharing class");
+MODULE_LICENSE("GPL");
+
+static struct class *dibs_class;
+
+/* use an array rather a list for fast mapping: */
+static struct dibs_client *clients[MAX_DIBS_CLIENTS];
+static u8 max_client;
+static DEFINE_MUTEX(clients_lock);
+struct dibs_dev_list {
+ struct list_head list;
+ struct mutex mutex; /* protects dibs device list */
+};
+
+static struct dibs_dev_list dibs_dev_list = {
+ .list = LIST_HEAD_INIT(dibs_dev_list.list),
+ .mutex = __MUTEX_INITIALIZER(dibs_dev_list.mutex),
+};
+
+static void dibs_setup_forwarding(struct dibs_client *client,
+ struct dibs_dev *dibs)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dibs->lock, flags);
+ dibs->subs[client->id] = client;
+ spin_unlock_irqrestore(&dibs->lock, flags);
+}
+
+int dibs_register_client(struct dibs_client *client)
+{
+ struct dibs_dev *dibs;
+ int i, rc = -ENOSPC;
+
+ mutex_lock(&dibs_dev_list.mutex);
+ mutex_lock(&clients_lock);
+ for (i = 0; i < MAX_DIBS_CLIENTS; ++i) {
+ if (!clients[i]) {
+ clients[i] = client;
+ client->id = i;
+ if (i == max_client)
+ max_client++;
+ rc = 0;
+ break;
+ }
+ }
+ mutex_unlock(&clients_lock);
+
+ if (i < MAX_DIBS_CLIENTS) {
+ /* initialize with all devices that we got so far */
+ list_for_each_entry(dibs, &dibs_dev_list.list, list) {
+ dibs->priv[i] = NULL;
+ client->ops->add_dev(dibs);
+ dibs_setup_forwarding(client, dibs);
+ }
+ }
+ mutex_unlock(&dibs_dev_list.mutex);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(dibs_register_client);
+
+int dibs_unregister_client(struct dibs_client *client)
+{
+ struct dibs_dev *dibs;
+ unsigned long flags;
+ int max_dmbs;
+ int rc = 0;
+
+ mutex_lock(&dibs_dev_list.mutex);
+ list_for_each_entry(dibs, &dibs_dev_list.list, list) {
+ spin_lock_irqsave(&dibs->lock, flags);
+ max_dmbs = dibs->ops->max_dmbs();
+ for (int i = 0; i < max_dmbs; ++i) {
+ if (dibs->dmb_clientid_arr[i] == client->id) {
+ WARN(1, "%s: attempt to unregister '%s' with registered dmb(s)\n",
+ __func__, client->name);
+ rc = -EBUSY;
+ goto err_reg_dmb;
+ }
+ }
+ /* Stop forwarding IRQs and events */
+ dibs->subs[client->id] = NULL;
+ spin_unlock_irqrestore(&dibs->lock, flags);
+ clients[client->id]->ops->del_dev(dibs);
+ dibs->priv[client->id] = NULL;
+ }
+
+ mutex_lock(&clients_lock);
+ clients[client->id] = NULL;
+ if (client->id + 1 == max_client)
+ max_client--;
+ mutex_unlock(&clients_lock);
+
+ mutex_unlock(&dibs_dev_list.mutex);
+ return rc;
+
+err_reg_dmb:
+ spin_unlock_irqrestore(&dibs->lock, flags);
+ mutex_unlock(&dibs_dev_list.mutex);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(dibs_unregister_client);
+
+static void dibs_dev_release(struct device *dev)
+{
+ struct dibs_dev *dibs;
+
+ dibs = container_of(dev, struct dibs_dev, dev);
+
+ kfree(dibs);
+}
+
+struct dibs_dev *dibs_dev_alloc(void)
+{
+ struct dibs_dev *dibs;
+
+ dibs = kzalloc(sizeof(*dibs), GFP_KERNEL);
+ if (!dibs)
+ return dibs;
+ dibs->dev.release = dibs_dev_release;
+ dibs->dev.class = dibs_class;
+ device_initialize(&dibs->dev);
+
+ return dibs;
+}
+EXPORT_SYMBOL_GPL(dibs_dev_alloc);
+
+static ssize_t gid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct dibs_dev *dibs;
+
+ dibs = container_of(dev, struct dibs_dev, dev);
+
+ return sysfs_emit(buf, "%pUb\n", &dibs->gid);
+}
+static DEVICE_ATTR_RO(gid);
+
+static ssize_t fabric_id_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct dibs_dev *dibs;
+ u16 fabric_id;
+
+ dibs = container_of(dev, struct dibs_dev, dev);
+ fabric_id = dibs->ops->get_fabric_id(dibs);
+
+ return sysfs_emit(buf, "0x%04x\n", fabric_id);
+}
+static DEVICE_ATTR_RO(fabric_id);
+
+static struct attribute *dibs_dev_attrs[] = {
+ &dev_attr_gid.attr,
+ &dev_attr_fabric_id.attr,
+ NULL,
+};
+
+static const struct attribute_group dibs_dev_attr_group = {
+ .attrs = dibs_dev_attrs,
+};
+
+int dibs_dev_add(struct dibs_dev *dibs)
+{
+ int max_dmbs;
+ int i, ret;
+
+ max_dmbs = dibs->ops->max_dmbs();
+ spin_lock_init(&dibs->lock);
+ dibs->dmb_clientid_arr = kzalloc(max_dmbs, GFP_KERNEL);
+ if (!dibs->dmb_clientid_arr)
+ return -ENOMEM;
+ memset(dibs->dmb_clientid_arr, NO_DIBS_CLIENT, max_dmbs);
+
+ ret = device_add(&dibs->dev);
+ if (ret)
+ goto free_client_arr;
+
+ ret = sysfs_create_group(&dibs->dev.kobj, &dibs_dev_attr_group);
+ if (ret) {
+ dev_err(&dibs->dev, "sysfs_create_group failed for dibs_dev\n");
+ goto err_device_del;
+ }
+ mutex_lock(&dibs_dev_list.mutex);
+ mutex_lock(&clients_lock);
+ for (i = 0; i < max_client; ++i) {
+ if (clients[i]) {
+ clients[i]->ops->add_dev(dibs);
+ dibs_setup_forwarding(clients[i], dibs);
+ }
+ }
+ mutex_unlock(&clients_lock);
+ list_add(&dibs->list, &dibs_dev_list.list);
+ mutex_unlock(&dibs_dev_list.mutex);
+
+ return 0;
+
+err_device_del:
+ device_del(&dibs->dev);
+free_client_arr:
+ kfree(dibs->dmb_clientid_arr);
+ return ret;
+
+}
+EXPORT_SYMBOL_GPL(dibs_dev_add);
+
+void dibs_dev_del(struct dibs_dev *dibs)
+{
+ unsigned long flags;
+ int i;
+
+ sysfs_remove_group(&dibs->dev.kobj, &dibs_dev_attr_group);
+
+ spin_lock_irqsave(&dibs->lock, flags);
+ for (i = 0; i < MAX_DIBS_CLIENTS; ++i)
+ dibs->subs[i] = NULL;
+ spin_unlock_irqrestore(&dibs->lock, flags);
+
+ mutex_lock(&dibs_dev_list.mutex);
+ mutex_lock(&clients_lock);
+ for (i = 0; i < max_client; ++i) {
+ if (clients[i])
+ clients[i]->ops->del_dev(dibs);
+ }
+ mutex_unlock(&clients_lock);
+ list_del_init(&dibs->list);
+ mutex_unlock(&dibs_dev_list.mutex);
+
+ device_del(&dibs->dev);
+ kfree(dibs->dmb_clientid_arr);
+}
+EXPORT_SYMBOL_GPL(dibs_dev_del);
+
+static int __init dibs_init(void)
+{
+ int rc;
+
+ memset(clients, 0, sizeof(clients));
+ max_client = 0;
+
+ dibs_class = class_create("dibs");
+ if (IS_ERR(dibs_class))
+ return PTR_ERR(dibs_class);
+
+ rc = dibs_loopback_init();
+ if (rc)
+ pr_err("%s fails with %d\n", __func__, rc);
+
+ return rc;
+}
+
+static void __exit dibs_exit(void)
+{
+ dibs_loopback_exit();
+ class_destroy(dibs_class);
+}
+
+module_init(dibs_init);
+module_exit(dibs_exit);
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 8892bc701a66..2bcf9ceca997 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -19,7 +19,9 @@
#include <linux/anon_inodes.h>
#include <linux/export.h>
#include <linux/debugfs.h>
+#include <linux/list.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/seq_file.h>
#include <linux/sync_file.h>
#include <linux/poll.h>
@@ -35,35 +37,91 @@
static inline int is_dma_buf_file(struct file *);
-#if IS_ENABLED(CONFIG_DEBUG_FS)
-static DEFINE_MUTEX(debugfs_list_mutex);
-static LIST_HEAD(debugfs_list);
+static DEFINE_MUTEX(dmabuf_list_mutex);
+static LIST_HEAD(dmabuf_list);
-static void __dma_buf_debugfs_list_add(struct dma_buf *dmabuf)
+static void __dma_buf_list_add(struct dma_buf *dmabuf)
{
- mutex_lock(&debugfs_list_mutex);
- list_add(&dmabuf->list_node, &debugfs_list);
- mutex_unlock(&debugfs_list_mutex);
+ mutex_lock(&dmabuf_list_mutex);
+ list_add(&dmabuf->list_node, &dmabuf_list);
+ mutex_unlock(&dmabuf_list_mutex);
}
-static void __dma_buf_debugfs_list_del(struct dma_buf *dmabuf)
+static void __dma_buf_list_del(struct dma_buf *dmabuf)
{
if (!dmabuf)
return;
- mutex_lock(&debugfs_list_mutex);
+ mutex_lock(&dmabuf_list_mutex);
list_del(&dmabuf->list_node);
- mutex_unlock(&debugfs_list_mutex);
+ mutex_unlock(&dmabuf_list_mutex);
}
-#else
-static void __dma_buf_debugfs_list_add(struct dma_buf *dmabuf)
+
+/**
+ * dma_buf_iter_begin - begin iteration through global list of all DMA buffers
+ *
+ * Returns the first buffer in the global list of DMA-bufs that's not in the
+ * process of being destroyed. Increments that buffer's reference count to
+ * prevent buffer destruction. Callers must release the reference, either by
+ * continuing iteration with dma_buf_iter_next(), or with dma_buf_put().
+ *
+ * Return:
+ * * First buffer from global list, with refcount elevated
+ * * NULL if no active buffers are present
+ */
+struct dma_buf *dma_buf_iter_begin(void)
{
+ struct dma_buf *ret = NULL, *dmabuf;
+
+ /*
+ * The list mutex does not protect a dmabuf's refcount, so it can be
+ * zeroed while we are iterating. We cannot call get_dma_buf() since the
+ * caller may not already own a reference to the buffer.
+ */
+ mutex_lock(&dmabuf_list_mutex);
+ list_for_each_entry(dmabuf, &dmabuf_list, list_node) {
+ if (file_ref_get(&dmabuf->file->f_ref)) {
+ ret = dmabuf;
+ break;
+ }
+ }
+ mutex_unlock(&dmabuf_list_mutex);
+ return ret;
}
-static void __dma_buf_debugfs_list_del(struct file *file)
+/**
+ * dma_buf_iter_next - continue iteration through global list of all DMA buffers
+ * @dmabuf: [in] pointer to dma_buf
+ *
+ * Decrements the reference count on the provided buffer. Returns the next
+ * buffer from the remainder of the global list of DMA-bufs with its reference
+ * count incremented. Callers must release the reference, either by continuing
+ * iteration with dma_buf_iter_next(), or with dma_buf_put().
+ *
+ * Return:
+ * * Next buffer from global list, with refcount elevated
+ * * NULL if no additional active buffers are present
+ */
+struct dma_buf *dma_buf_iter_next(struct dma_buf *dmabuf)
{
+ struct dma_buf *ret = NULL;
+
+ /*
+ * The list mutex does not protect a dmabuf's refcount, so it can be
+ * zeroed while we are iterating. We cannot call get_dma_buf() since the
+ * caller may not already own a reference to the buffer.
+ */
+ mutex_lock(&dmabuf_list_mutex);
+ dma_buf_put(dmabuf);
+ list_for_each_entry_continue(dmabuf, &dmabuf_list, list_node) {
+ if (file_ref_get(&dmabuf->file->f_ref)) {
+ ret = dmabuf;
+ break;
+ }
+ }
+ mutex_unlock(&dmabuf_list_mutex);
+ return ret;
}
-#endif
static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
{
@@ -115,7 +173,7 @@ static int dma_buf_file_release(struct inode *inode, struct file *file)
if (!is_dma_buf_file(file))
return -EINVAL;
- __dma_buf_debugfs_list_del(file->private_data);
+ __dma_buf_list_del(file->private_data);
return 0;
}
@@ -176,8 +234,9 @@ static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
dmabuf = file->private_data;
/* only support discovering the end of the buffer,
- but also allow SEEK_SET to maintain the idiomatic
- SEEK_END(0), SEEK_CUR(0) pattern */
+ * but also allow SEEK_SET to maintain the idiomatic
+ * SEEK_END(0), SEEK_CUR(0) pattern.
+ */
if (whence == SEEK_END)
base = dmabuf->size;
else if (whence == SEEK_SET)
@@ -558,7 +617,7 @@ static struct file *dma_buf_getfile(size_t size, int flags)
* Override ->i_ino with the unique and dmabuffs specific
* value.
*/
- inode->i_ino = atomic64_add_return(1, &dmabuf_inode);
+ inode->i_ino = atomic64_inc_return(&dmabuf_inode);
flags &= O_ACCMODE | O_NONBLOCK;
file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf",
flags, &dma_buf_fops);
@@ -635,10 +694,6 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
|| !exp_info->ops->release))
return ERR_PTR(-EINVAL);
- if (WARN_ON(exp_info->ops->cache_sgt_mapping &&
- (exp_info->ops->pin || exp_info->ops->unpin)))
- return ERR_PTR(-EINVAL);
-
if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin))
return ERR_PTR(-EINVAL);
@@ -688,7 +743,7 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
file->f_path.dentry->d_fsdata = dmabuf;
dmabuf->file = file;
- __dma_buf_debugfs_list_add(dmabuf);
+ __dma_buf_list_add(dmabuf);
return dmabuf;
@@ -702,7 +757,7 @@ err_module:
module_put(exp_info->owner);
return ERR_PTR(ret);
}
-EXPORT_SYMBOL_NS_GPL(dma_buf_export, DMA_BUF);
+EXPORT_SYMBOL_NS_GPL(dma_buf_export, "DMA_BUF");
/**
* dma_buf_fd - returns a file descriptor for the given struct dma_buf
@@ -726,7 +781,7 @@ int dma_buf_fd(struct dma_buf *dmabuf, int flags)
return fd;
}
-EXPORT_SYMBOL_NS_GPL(dma_buf_fd, DMA_BUF);
+EXPORT_SYMBOL_NS_GPL(dma_buf_fd, "DMA_BUF");
/**
* dma_buf_get - returns the struct dma_buf related to an fd
@@ -752,7 +807,7 @@ struct dma_buf *dma_buf_get(int fd)
return file->private_data;
}
-EXPORT_SYMBOL_NS_GPL(dma_buf_get, DMA_BUF);
+EXPORT_SYMBOL_NS_GPL(dma_buf_get, "DMA_BUF");
/**
* dma_buf_put - decreases refcount of the buffer
@@ -771,7 +826,7 @@ void dma_buf_put(struct dma_buf *dmabuf)
fput(dmabuf->file);
}
-EXPORT_SYMBOL_NS_GPL(dma_buf_put, DMA_BUF);
+EXPORT_SYMBOL_NS_GPL(dma_buf_put, "DMA_BUF");
static void mangle_sg_table(struct sg_table *sg_table)
{
@@ -781,36 +836,27 @@ static void mangle_sg_table(struct sg_table *sg_table)
/* To catch abuse of the underlying struct page by importers mix
* up the bits, but take care to preserve the low SG_ bits to
- * not corrupt the sgt. The mixing is undone in __unmap_dma_buf
- * before passing the sgt back to the exporter. */
+ * not corrupt the sgt. The mixing is undone on unmap
+ * before passing the sgt back to the exporter.
+ */
for_each_sgtable_sg(sg_table, sg, i)
sg->page_link ^= ~0xffUL;
#endif
}
-static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach,
- enum dma_data_direction direction)
-{
- struct sg_table *sg_table;
- signed long ret;
- sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
- if (IS_ERR_OR_NULL(sg_table))
- return sg_table;
-
- if (!dma_buf_attachment_is_dynamic(attach)) {
- ret = dma_resv_wait_timeout(attach->dmabuf->resv,
- DMA_RESV_USAGE_KERNEL, true,
- MAX_SCHEDULE_TIMEOUT);
- if (ret < 0) {
- attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
- direction);
- return ERR_PTR(ret);
- }
- }
+static inline bool
+dma_buf_attachment_is_dynamic(struct dma_buf_attachment *attach)
+{
+ return !!attach->importer_ops;
+}
- mangle_sg_table(sg_table);
- return sg_table;
+static bool
+dma_buf_pin_on_map(struct dma_buf_attachment *attach)
+{
+ return attach->dmabuf->ops->pin &&
+ (!dma_buf_attachment_is_dynamic(attach) ||
+ !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY));
}
/**
@@ -933,50 +979,13 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
list_add(&attach->node, &dmabuf->attachments);
dma_resv_unlock(dmabuf->resv);
- /* When either the importer or the exporter can't handle dynamic
- * mappings we cache the mapping here to avoid issues with the
- * reservation object lock.
- */
- if (dma_buf_attachment_is_dynamic(attach) !=
- dma_buf_is_dynamic(dmabuf)) {
- struct sg_table *sgt;
-
- dma_resv_lock(attach->dmabuf->resv, NULL);
- if (dma_buf_is_dynamic(attach->dmabuf)) {
- ret = dmabuf->ops->pin(attach);
- if (ret)
- goto err_unlock;
- }
-
- sgt = __map_dma_buf(attach, DMA_BIDIRECTIONAL);
- if (!sgt)
- sgt = ERR_PTR(-ENOMEM);
- if (IS_ERR(sgt)) {
- ret = PTR_ERR(sgt);
- goto err_unpin;
- }
- dma_resv_unlock(attach->dmabuf->resv);
- attach->sgt = sgt;
- attach->dir = DMA_BIDIRECTIONAL;
- }
-
return attach;
err_attach:
kfree(attach);
return ERR_PTR(ret);
-
-err_unpin:
- if (dma_buf_is_dynamic(attach->dmabuf))
- dmabuf->ops->unpin(attach);
-
-err_unlock:
- dma_resv_unlock(attach->dmabuf->resv);
-
- dma_buf_detach(dmabuf, attach);
- return ERR_PTR(ret);
}
-EXPORT_SYMBOL_NS_GPL(dma_buf_dynamic_attach, DMA_BUF);
+EXPORT_SYMBOL_NS_GPL(dma_buf_dynamic_attach, "DMA_BUF");
/**
* dma_buf_attach - Wrapper for dma_buf_dynamic_attach
@@ -991,17 +1000,7 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
{
return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL);
}
-EXPORT_SYMBOL_NS_GPL(dma_buf_attach, DMA_BUF);
-
-static void __unmap_dma_buf(struct dma_buf_attachment *attach,
- struct sg_table *sg_table,
- enum dma_data_direction direction)
-{
- /* uses XOR, hence this unmangles */
- mangle_sg_table(sg_table);
-
- attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
-}
+EXPORT_SYMBOL_NS_GPL(dma_buf_attach, "DMA_BUF");
/**
* dma_buf_detach - Remove the given attachment from dmabuf's attachments list
@@ -1018,16 +1017,7 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
return;
dma_resv_lock(dmabuf->resv, NULL);
-
- if (attach->sgt) {
-
- __unmap_dma_buf(attach, attach->sgt, attach->dir);
-
- if (dma_buf_is_dynamic(attach->dmabuf))
- dmabuf->ops->unpin(attach);
- }
list_del(&attach->node);
-
dma_resv_unlock(dmabuf->resv);
if (dmabuf->ops->detach)
@@ -1035,7 +1025,7 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
kfree(attach);
}
-EXPORT_SYMBOL_NS_GPL(dma_buf_detach, DMA_BUF);
+EXPORT_SYMBOL_NS_GPL(dma_buf_detach, "DMA_BUF");
/**
* dma_buf_pin - Lock down the DMA-buf
@@ -1056,7 +1046,7 @@ int dma_buf_pin(struct dma_buf_attachment *attach)
struct dma_buf *dmabuf = attach->dmabuf;
int ret = 0;
- WARN_ON(!dma_buf_attachment_is_dynamic(attach));
+ WARN_ON(!attach->importer_ops);
dma_resv_assert_held(dmabuf->resv);
@@ -1065,7 +1055,7 @@ int dma_buf_pin(struct dma_buf_attachment *attach)
return ret;
}
-EXPORT_SYMBOL_NS_GPL(dma_buf_pin, DMA_BUF);
+EXPORT_SYMBOL_NS_GPL(dma_buf_pin, "DMA_BUF");
/**
* dma_buf_unpin - Unpin a DMA-buf
@@ -1079,14 +1069,14 @@ void dma_buf_unpin(struct dma_buf_attachment *attach)
{
struct dma_buf *dmabuf = attach->dmabuf;
- WARN_ON(!dma_buf_attachment_is_dynamic(attach));
+ WARN_ON(!attach->importer_ops);
dma_resv_assert_held(dmabuf->resv);
if (dmabuf->ops->unpin)
dmabuf->ops->unpin(attach);
}
-EXPORT_SYMBOL_NS_GPL(dma_buf_unpin, DMA_BUF);
+EXPORT_SYMBOL_NS_GPL(dma_buf_unpin, "DMA_BUF");
/**
* dma_buf_map_attachment - Returns the scatterlist table of the attachment;
@@ -1113,7 +1103,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
enum dma_data_direction direction)
{
struct sg_table *sg_table;
- int r;
+ signed long ret;
might_sleep();
@@ -1122,41 +1112,37 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
dma_resv_assert_held(attach->dmabuf->resv);
- if (attach->sgt) {
+ if (dma_buf_pin_on_map(attach)) {
+ ret = attach->dmabuf->ops->pin(attach);
/*
- * Two mappings with different directions for the same
- * attachment are not allowed.
+ * Catch exporters making buffers inaccessible even when
+ * attachments preventing that exist.
*/
- if (attach->dir != direction &&
- attach->dir != DMA_BIDIRECTIONAL)
- return ERR_PTR(-EBUSY);
-
- return attach->sgt;
- }
-
- if (dma_buf_is_dynamic(attach->dmabuf)) {
- if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
- r = attach->dmabuf->ops->pin(attach);
- if (r)
- return ERR_PTR(r);
- }
+ WARN_ON_ONCE(ret == -EBUSY);
+ if (ret)
+ return ERR_PTR(ret);
}
- sg_table = __map_dma_buf(attach, direction);
+ sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
if (!sg_table)
sg_table = ERR_PTR(-ENOMEM);
+ if (IS_ERR(sg_table))
+ goto error_unpin;
- if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) &&
- !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
- attach->dmabuf->ops->unpin(attach);
-
- if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
- attach->sgt = sg_table;
- attach->dir = direction;
+ /*
+ * Importers with static attachments don't wait for fences.
+ */
+ if (!dma_buf_attachment_is_dynamic(attach)) {
+ ret = dma_resv_wait_timeout(attach->dmabuf->resv,
+ DMA_RESV_USAGE_KERNEL, true,
+ MAX_SCHEDULE_TIMEOUT);
+ if (ret < 0)
+ goto error_unmap;
}
+ mangle_sg_table(sg_table);
#ifdef CONFIG_DMA_API_DEBUG
- if (!IS_ERR(sg_table)) {
+ {
struct scatterlist *sg;
u64 addr;
int len;
@@ -1173,8 +1159,18 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
}
#endif /* CONFIG_DMA_API_DEBUG */
return sg_table;
+
+error_unmap:
+ attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
+ sg_table = ERR_PTR(ret);
+
+error_unpin:
+ if (dma_buf_pin_on_map(attach))
+ attach->dmabuf->ops->unpin(attach);
+
+ return sg_table;
}
-EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment, DMA_BUF);
+EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment, "DMA_BUF");
/**
* dma_buf_map_attachment_unlocked - Returns the scatterlist table of the attachment;
@@ -1202,7 +1198,7 @@ dma_buf_map_attachment_unlocked(struct dma_buf_attachment *attach,
return sg_table;
}
-EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment_unlocked, DMA_BUF);
+EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment_unlocked, "DMA_BUF");
/**
* dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
@@ -1225,16 +1221,13 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
dma_resv_assert_held(attach->dmabuf->resv);
- if (attach->sgt == sg_table)
- return;
-
- __unmap_dma_buf(attach, sg_table, direction);
+ mangle_sg_table(sg_table);
+ attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
- if (dma_buf_is_dynamic(attach->dmabuf) &&
- !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
- dma_buf_unpin(attach);
+ if (dma_buf_pin_on_map(attach))
+ attach->dmabuf->ops->unpin(attach);
}
-EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment, DMA_BUF);
+EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment, "DMA_BUF");
/**
* dma_buf_unmap_attachment_unlocked - unmaps and decreases usecount of the buffer;might
@@ -1259,7 +1252,7 @@ void dma_buf_unmap_attachment_unlocked(struct dma_buf_attachment *attach,
dma_buf_unmap_attachment(attach, sg_table, direction);
dma_resv_unlock(attach->dmabuf->resv);
}
-EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment_unlocked, DMA_BUF);
+EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment_unlocked, "DMA_BUF");
/**
* dma_buf_move_notify - notify attachments that DMA-buf is moving
@@ -1279,7 +1272,7 @@ void dma_buf_move_notify(struct dma_buf *dmabuf)
if (attach->importer_ops)
attach->importer_ops->move_notify(attach);
}
-EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, DMA_BUF);
+EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, "DMA_BUF");
/**
* DOC: cpu access
@@ -1296,10 +1289,12 @@ EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, DMA_BUF);
* vmap interface is introduced. Note that on very old 32-bit architectures
* vmalloc space might be limited and result in vmap calls failing.
*
- * Interfaces::
+ * Interfaces:
+ *
+ * .. code-block:: c
*
- * void \*dma_buf_vmap(struct dma_buf \*dmabuf, struct iosys_map \*map)
- * void dma_buf_vunmap(struct dma_buf \*dmabuf, struct iosys_map \*map)
+ * void *dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
+ * void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
*
* The vmap call can fail if there is no vmap support in the exporter, or if
* it runs out of vmalloc space. Note that the dma-buf layer keeps a reference
@@ -1356,10 +1351,11 @@ EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, DMA_BUF);
* enough, since adding interfaces to intercept pagefaults and allow pte
* shootdowns would increase the complexity quite a bit.
*
- * Interface::
+ * Interface:
+ *
+ * .. code-block:: c
*
- * int dma_buf_mmap(struct dma_buf \*, struct vm_area_struct \*,
- * unsigned long);
+ * int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *, unsigned long);
*
* If the importing subsystem simply provides a special-purpose mmap call to
* set up a mapping in userspace, calling do_mmap with &dma_buf.file will
@@ -1424,7 +1420,7 @@ int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
return ret;
}
-EXPORT_SYMBOL_NS_GPL(dma_buf_begin_cpu_access, DMA_BUF);
+EXPORT_SYMBOL_NS_GPL(dma_buf_begin_cpu_access, "DMA_BUF");
/**
* dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
@@ -1452,7 +1448,7 @@ int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
return ret;
}
-EXPORT_SYMBOL_NS_GPL(dma_buf_end_cpu_access, DMA_BUF);
+EXPORT_SYMBOL_NS_GPL(dma_buf_end_cpu_access, "DMA_BUF");
/**
@@ -1494,7 +1490,7 @@ int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
return dmabuf->ops->mmap(dmabuf, vma);
}
-EXPORT_SYMBOL_NS_GPL(dma_buf_mmap, DMA_BUF);
+EXPORT_SYMBOL_NS_GPL(dma_buf_mmap, "DMA_BUF");
/**
* dma_buf_vmap - Create virtual mapping for the buffer object into kernel
@@ -1547,7 +1543,7 @@ int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
return 0;
}
-EXPORT_SYMBOL_NS_GPL(dma_buf_vmap, DMA_BUF);
+EXPORT_SYMBOL_NS_GPL(dma_buf_vmap, "DMA_BUF");
/**
* dma_buf_vmap_unlocked - Create virtual mapping for the buffer object into kernel
@@ -1574,7 +1570,7 @@ int dma_buf_vmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map)
return ret;
}
-EXPORT_SYMBOL_NS_GPL(dma_buf_vmap_unlocked, DMA_BUF);
+EXPORT_SYMBOL_NS_GPL(dma_buf_vmap_unlocked, "DMA_BUF");
/**
* dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
@@ -1598,7 +1594,7 @@ void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
iosys_map_clear(&dmabuf->vmap_ptr);
}
}
-EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap, DMA_BUF);
+EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap, "DMA_BUF");
/**
* dma_buf_vunmap_unlocked - Unmap a vmap obtained by dma_buf_vmap.
@@ -1614,7 +1610,7 @@ void dma_buf_vunmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map)
dma_buf_vunmap(dmabuf, map);
dma_resv_unlock(dmabuf->resv);
}
-EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap_unlocked, DMA_BUF);
+EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap_unlocked, "DMA_BUF");
#ifdef CONFIG_DEBUG_FS
static int dma_buf_debug_show(struct seq_file *s, void *unused)
@@ -1625,7 +1621,7 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
size_t size = 0;
int ret;
- ret = mutex_lock_interruptible(&debugfs_list_mutex);
+ ret = mutex_lock_interruptible(&dmabuf_list_mutex);
if (ret)
return ret;
@@ -1634,7 +1630,7 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\tname\n",
"size", "flags", "mode", "count", "ino");
- list_for_each_entry(buf_obj, &debugfs_list, list_node) {
+ list_for_each_entry(buf_obj, &dmabuf_list, list_node) {
ret = dma_resv_lock_interruptible(buf_obj->resv, NULL);
if (ret)
@@ -1671,11 +1667,11 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
- mutex_unlock(&debugfs_list_mutex);
+ mutex_unlock(&dmabuf_list_mutex);
return 0;
error_unlock:
- mutex_unlock(&debugfs_list_mutex);
+ mutex_unlock(&dmabuf_list_mutex);
return ret;
}
@@ -1694,7 +1690,7 @@ static int dma_buf_init_debugfs(void)
dma_buf_debugfs_dir = d;
- d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir,
+ d = debugfs_create_file("bufinfo", 0444, dma_buf_debugfs_dir,
NULL, &dma_buf_debug_fops);
if (IS_ERR(d)) {
pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
diff --git a/drivers/dma-buf/dma-fence-array.c b/drivers/dma-buf/dma-fence-array.c
index 8a08ffde31e7..6657d4b30af9 100644
--- a/drivers/dma-buf/dma-fence-array.c
+++ b/drivers/dma-buf/dma-fence-array.c
@@ -103,10 +103,36 @@ static bool dma_fence_array_enable_signaling(struct dma_fence *fence)
static bool dma_fence_array_signaled(struct dma_fence *fence)
{
struct dma_fence_array *array = to_dma_fence_array(fence);
+ int num_pending;
+ unsigned int i;
- if (atomic_read(&array->num_pending) > 0)
+ /*
+ * We need to read num_pending before checking the enable_signal bit
+ * to avoid racing with the enable_signaling() implementation, which
+ * might decrement the counter, and cause a partial check.
+ * atomic_read_acquire() pairs with atomic_dec_and_test() in
+ * dma_fence_array_enable_signaling()
+ *
+ * The !--num_pending check is here to account for the any_signaled case
+ * if we race with enable_signaling(), that means the !num_pending check
+ * in the is_signalling_enabled branch might be outdated (num_pending
+ * might have been decremented), but that's fine. The user will get the
+ * right value when testing again later.
+ */
+ num_pending = atomic_read_acquire(&array->num_pending);
+ if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &array->base.flags)) {
+ if (num_pending <= 0)
+ goto signal;
return false;
+ }
+
+ for (i = 0; i < array->num_fences; ++i) {
+ if (dma_fence_is_signaled(array->fences[i]) && !--num_pending)
+ goto signal;
+ }
+ return false;
+signal:
dma_fence_array_clear_pending_error(array);
return true;
}
diff --git a/drivers/dma-buf/dma-fence-chain.c b/drivers/dma-buf/dma-fence-chain.c
index 9663ba1bb6ac..a8a90acf4f34 100644
--- a/drivers/dma-buf/dma-fence-chain.c
+++ b/drivers/dma-buf/dma-fence-chain.c
@@ -218,7 +218,6 @@ static void dma_fence_chain_set_deadline(struct dma_fence *fence,
}
const struct dma_fence_ops dma_fence_chain_ops = {
- .use_64bit_seqno = true,
.get_driver_name = dma_fence_chain_get_driver_name,
.get_timeline_name = dma_fence_chain_get_timeline_name,
.enable_signaling = dma_fence_chain_enable_signaling,
@@ -252,7 +251,7 @@ void dma_fence_chain_init(struct dma_fence_chain *chain,
chain->prev_seqno = 0;
/* Try to reuse the context of the previous chain node. */
- if (prev_chain && __dma_fence_is_later(seqno, prev->seqno, prev->ops)) {
+ if (prev_chain && __dma_fence_is_later(prev, seqno, prev->seqno)) {
context = prev->context;
chain->prev_seqno = prev->seqno;
} else {
@@ -262,8 +261,8 @@ void dma_fence_chain_init(struct dma_fence_chain *chain,
seqno = max(prev->seqno, seqno);
}
- dma_fence_init(&chain->base, &dma_fence_chain_ops,
- &chain->lock, context, seqno);
+ dma_fence_init64(&chain->base, &dma_fence_chain_ops, &chain->lock,
+ context, seqno);
/*
* Chaining dma_fence_chain container together is only allowed through
diff --git a/drivers/dma-buf/dma-fence-unwrap.c b/drivers/dma-buf/dma-fence-unwrap.c
index 628af51c81af..a495d8a6c2e3 100644
--- a/drivers/dma-buf/dma-fence-unwrap.c
+++ b/drivers/dma-buf/dma-fence-unwrap.c
@@ -12,6 +12,7 @@
#include <linux/dma-fence-chain.h>
#include <linux/dma-fence-unwrap.h>
#include <linux/slab.h>
+#include <linux/sort.h>
/* Internal helper to start new array iteration, don't use directly */
static struct dma_fence *
@@ -59,22 +60,77 @@ struct dma_fence *dma_fence_unwrap_next(struct dma_fence_unwrap *cursor)
}
EXPORT_SYMBOL_GPL(dma_fence_unwrap_next);
+
+static int fence_cmp(const void *_a, const void *_b)
+{
+ struct dma_fence *a = *(struct dma_fence **)_a;
+ struct dma_fence *b = *(struct dma_fence **)_b;
+
+ if (a->context < b->context)
+ return -1;
+ else if (a->context > b->context)
+ return 1;
+
+ if (dma_fence_is_later(b, a))
+ return 1;
+ else if (dma_fence_is_later(a, b))
+ return -1;
+
+ return 0;
+}
+
+/**
+ * dma_fence_dedup_array - Sort and deduplicate an array of dma_fence pointers
+ * @fences: Array of dma_fence pointers to be deduplicated
+ * @num_fences: Number of entries in the @fences array
+ *
+ * Sorts the input array by context, then removes duplicate
+ * fences with the same context, keeping only the most recent one.
+ *
+ * The array is modified in-place and unreferenced duplicate fences are released
+ * via dma_fence_put(). The function returns the new number of fences after
+ * deduplication.
+ *
+ * Return: Number of unique fences remaining in the array.
+ */
+int dma_fence_dedup_array(struct dma_fence **fences, int num_fences)
+{
+ int i, j;
+
+ sort(fences, num_fences, sizeof(*fences), fence_cmp, NULL);
+
+ /*
+ * Only keep the most recent fence for each context.
+ */
+ j = 0;
+ for (i = 1; i < num_fences; i++) {
+ if (fences[i]->context == fences[j]->context)
+ dma_fence_put(fences[i]);
+ else
+ fences[++j] = fences[i];
+ }
+
+ return ++j;
+}
+EXPORT_SYMBOL_GPL(dma_fence_dedup_array);
+
/* Implementation for the dma_fence_merge() marco, don't use directly */
struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
struct dma_fence **fences,
struct dma_fence_unwrap *iter)
{
+ struct dma_fence *tmp, *unsignaled = NULL, **array;
struct dma_fence_array *result;
- struct dma_fence *tmp, **array;
ktime_t timestamp;
- unsigned int i;
- size_t count;
+ int i, count;
count = 0;
timestamp = ns_to_ktime(0);
for (i = 0; i < num_fences; ++i) {
dma_fence_unwrap_for_each(tmp, &iter[i], fences[i]) {
if (!dma_fence_is_signaled(tmp)) {
+ dma_fence_put(unsignaled);
+ unsignaled = dma_fence_get(tmp);
++count;
} else {
ktime_t t = dma_fence_timestamp(tmp);
@@ -88,86 +144,58 @@ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
/*
* If we couldn't find a pending fence just return a private signaled
* fence with the timestamp of the last signaled one.
+ *
+ * Or if there was a single unsignaled fence left we can return it
+ * directly and early since that is a major path on many workloads.
*/
if (count == 0)
return dma_fence_allocate_private_stub(timestamp);
+ else if (count == 1)
+ return unsignaled;
+
+ dma_fence_put(unsignaled);
array = kmalloc_array(count, sizeof(*array), GFP_KERNEL);
if (!array)
return NULL;
- /*
- * This trashes the input fence array and uses it as position for the
- * following merge loop. This works because the dma_fence_merge()
- * wrapper macro is creating this temporary array on the stack together
- * with the iterators.
- */
- for (i = 0; i < num_fences; ++i)
- fences[i] = dma_fence_unwrap_first(fences[i], &iter[i]);
-
count = 0;
- do {
- unsigned int sel;
-
-restart:
- tmp = NULL;
- for (i = 0; i < num_fences; ++i) {
- struct dma_fence *next;
-
- while (fences[i] && dma_fence_is_signaled(fences[i]))
- fences[i] = dma_fence_unwrap_next(&iter[i]);
-
- next = fences[i];
- if (!next)
- continue;
-
- /*
- * We can't guarantee that inpute fences are ordered by
- * context, but it is still quite likely when this
- * function is used multiple times. So attempt to order
- * the fences by context as we pass over them and merge
- * fences with the same context.
- */
- if (!tmp || tmp->context > next->context) {
- tmp = next;
- sel = i;
-
- } else if (tmp->context < next->context) {
- continue;
-
- } else if (dma_fence_is_later(tmp, next)) {
- fences[i] = dma_fence_unwrap_next(&iter[i]);
- goto restart;
+ for (i = 0; i < num_fences; ++i) {
+ dma_fence_unwrap_for_each(tmp, &iter[i], fences[i]) {
+ if (!dma_fence_is_signaled(tmp)) {
+ array[count++] = dma_fence_get(tmp);
} else {
- fences[sel] = dma_fence_unwrap_next(&iter[sel]);
- goto restart;
+ ktime_t t = dma_fence_timestamp(tmp);
+
+ if (ktime_after(t, timestamp))
+ timestamp = t;
}
}
+ }
- if (tmp) {
- array[count++] = dma_fence_get(tmp);
- fences[sel] = dma_fence_unwrap_next(&iter[sel]);
- }
- } while (tmp);
+ if (count == 0 || count == 1)
+ goto return_fastpath;
- if (count == 0) {
- tmp = dma_fence_allocate_private_stub(ktime_get());
- goto return_tmp;
- }
+ count = dma_fence_dedup_array(array, count);
- if (count == 1) {
- tmp = array[0];
- goto return_tmp;
+ if (count > 1) {
+ result = dma_fence_array_create(count, array,
+ dma_fence_context_alloc(1),
+ 1, false);
+ if (!result) {
+ for (i = 0; i < count; i++)
+ dma_fence_put(array[i]);
+ tmp = NULL;
+ goto return_tmp;
+ }
+ return &result->base;
}
- result = dma_fence_array_create(count, array,
- dma_fence_context_alloc(1),
- 1, false);
- if (!result) {
- tmp = NULL;
- goto return_tmp;
- }
- return &result->base;
+return_fastpath:
+ if (count == 0)
+ tmp = dma_fence_allocate_private_stub(timestamp);
+ else
+ tmp = array[0];
return_tmp:
kfree(array);
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 0393a9bba3a8..3f78c56b58dc 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -309,8 +309,8 @@ bool dma_fence_begin_signalling(void)
if (in_atomic())
return true;
- /* ... and non-recursive readlock */
- lock_acquire(&dma_fence_lockdep_map, 0, 0, 1, 1, NULL, _RET_IP_);
+ /* ... and non-recursive successful read_trylock */
+ lock_acquire(&dma_fence_lockdep_map, 0, 1, 1, 1, NULL, _RET_IP_);
return false;
}
@@ -341,7 +341,7 @@ void __dma_fence_might_wait(void)
lock_map_acquire(&dma_fence_lockdep_map);
lock_map_release(&dma_fence_lockdep_map);
if (tmp)
- lock_acquire(&dma_fence_lockdep_map, 0, 0, 1, 1, NULL, _THIS_IP_);
+ lock_acquire(&dma_fence_lockdep_map, 0, 1, 1, 1, NULL, _THIS_IP_);
}
#endif
@@ -412,7 +412,7 @@ int dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp)
unsigned long flags;
int ret;
- if (!fence)
+ if (WARN_ON(!fence))
return -EINVAL;
spin_lock_irqsave(fence->lock, flags);
@@ -464,7 +464,7 @@ int dma_fence_signal(struct dma_fence *fence)
int ret;
bool tmp;
- if (!fence)
+ if (WARN_ON(!fence))
return -EINVAL;
tmp = dma_fence_begin_signalling();
@@ -511,12 +511,20 @@ dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout)
dma_fence_enable_sw_signaling(fence);
- trace_dma_fence_wait_start(fence);
+ if (trace_dma_fence_wait_start_enabled()) {
+ rcu_read_lock();
+ trace_dma_fence_wait_start(fence);
+ rcu_read_unlock();
+ }
if (fence->ops->wait)
ret = fence->ops->wait(fence, intr, timeout);
else
ret = dma_fence_default_wait(fence, intr, timeout);
- trace_dma_fence_wait_end(fence);
+ if (trace_dma_fence_wait_end_enabled()) {
+ rcu_read_lock();
+ trace_dma_fence_wait_end(fence);
+ rcu_read_unlock();
+ }
return ret;
}
EXPORT_SYMBOL(dma_fence_wait_timeout);
@@ -533,16 +541,23 @@ void dma_fence_release(struct kref *kref)
struct dma_fence *fence =
container_of(kref, struct dma_fence, refcount);
+ rcu_read_lock();
trace_dma_fence_destroy(fence);
- if (WARN(!list_empty(&fence->cb_list) &&
- !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags),
- "Fence %s:%s:%llx:%llx released with pending signals!\n",
- fence->ops->get_driver_name(fence),
- fence->ops->get_timeline_name(fence),
- fence->context, fence->seqno)) {
+ if (!list_empty(&fence->cb_list) &&
+ !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
+ const char __rcu *timeline;
+ const char __rcu *driver;
unsigned long flags;
+ driver = dma_fence_driver_name(fence);
+ timeline = dma_fence_timeline_name(fence);
+
+ WARN(1,
+ "Fence %s:%s:%llx:%llx released with pending signals!\n",
+ rcu_dereference(driver), rcu_dereference(timeline),
+ fence->context, fence->seqno);
+
/*
* Failed to signal before release, likely a refcounting issue.
*
@@ -556,6 +571,8 @@ void dma_fence_release(struct kref *kref)
spin_unlock_irqrestore(fence->lock, flags);
}
+ rcu_read_unlock();
+
if (fence->ops->release)
fence->ops->release(fence);
else
@@ -982,13 +999,43 @@ EXPORT_SYMBOL(dma_fence_set_deadline);
*/
void dma_fence_describe(struct dma_fence *fence, struct seq_file *seq)
{
+ const char __rcu *timeline;
+ const char __rcu *driver;
+
+ rcu_read_lock();
+
+ timeline = dma_fence_timeline_name(fence);
+ driver = dma_fence_driver_name(fence);
+
seq_printf(seq, "%s %s seq %llu %ssignalled\n",
- fence->ops->get_driver_name(fence),
- fence->ops->get_timeline_name(fence), fence->seqno,
+ rcu_dereference(driver),
+ rcu_dereference(timeline),
+ fence->seqno,
dma_fence_is_signaled(fence) ? "" : "un");
+
+ rcu_read_unlock();
}
EXPORT_SYMBOL(dma_fence_describe);
+static void
+__dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
+ spinlock_t *lock, u64 context, u64 seqno, unsigned long flags)
+{
+ BUG_ON(!lock);
+ BUG_ON(!ops || !ops->get_driver_name || !ops->get_timeline_name);
+
+ kref_init(&fence->refcount);
+ fence->ops = ops;
+ INIT_LIST_HEAD(&fence->cb_list);
+ fence->lock = lock;
+ fence->context = context;
+ fence->seqno = seqno;
+ fence->flags = flags;
+ fence->error = 0;
+
+ trace_dma_fence_init(fence);
+}
+
/**
* dma_fence_init - Initialize a custom fence.
* @fence: the fence to initialize
@@ -1008,18 +1055,94 @@ void
dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
spinlock_t *lock, u64 context, u64 seqno)
{
- BUG_ON(!lock);
- BUG_ON(!ops || !ops->get_driver_name || !ops->get_timeline_name);
+ __dma_fence_init(fence, ops, lock, context, seqno, 0UL);
+}
+EXPORT_SYMBOL(dma_fence_init);
- kref_init(&fence->refcount);
- fence->ops = ops;
- INIT_LIST_HEAD(&fence->cb_list);
- fence->lock = lock;
- fence->context = context;
- fence->seqno = seqno;
- fence->flags = 0UL;
- fence->error = 0;
+/**
+ * dma_fence_init64 - Initialize a custom fence with 64-bit seqno support.
+ * @fence: the fence to initialize
+ * @ops: the dma_fence_ops for operations on this fence
+ * @lock: the irqsafe spinlock to use for locking this fence
+ * @context: the execution context this fence is run on
+ * @seqno: a linear increasing sequence number for this context
+ *
+ * Initializes an allocated fence, the caller doesn't have to keep its
+ * refcount after committing with this fence, but it will need to hold a
+ * refcount again if &dma_fence_ops.enable_signaling gets called.
+ *
+ * Context and seqno are used for easy comparison between fences, allowing
+ * to check which fence is later by simply using dma_fence_later().
+ */
+void
+dma_fence_init64(struct dma_fence *fence, const struct dma_fence_ops *ops,
+ spinlock_t *lock, u64 context, u64 seqno)
+{
+ __dma_fence_init(fence, ops, lock, context, seqno,
+ BIT(DMA_FENCE_FLAG_SEQNO64_BIT));
+}
+EXPORT_SYMBOL(dma_fence_init64);
- trace_dma_fence_init(fence);
+/**
+ * dma_fence_driver_name - Access the driver name
+ * @fence: the fence to query
+ *
+ * Returns a driver name backing the dma-fence implementation.
+ *
+ * IMPORTANT CONSIDERATION:
+ * Dma-fence contract stipulates that access to driver provided data (data not
+ * directly embedded into the object itself), such as the &dma_fence.lock and
+ * memory potentially accessed by the &dma_fence.ops functions, is forbidden
+ * after the fence has been signalled. Drivers are allowed to free that data,
+ * and some do.
+ *
+ * To allow safe access drivers are mandated to guarantee a RCU grace period
+ * between signalling the fence and freeing said data.
+ *
+ * As such access to the driver name is only valid inside a RCU locked section.
+ * The pointer MUST be both queried and USED ONLY WITHIN a SINGLE block guarded
+ * by the &rcu_read_lock and &rcu_read_unlock pair.
+ */
+const char __rcu *dma_fence_driver_name(struct dma_fence *fence)
+{
+ RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
+ "RCU protection is required for safe access to returned string");
+
+ if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ return fence->ops->get_driver_name(fence);
+ else
+ return "detached-driver";
}
-EXPORT_SYMBOL(dma_fence_init);
+EXPORT_SYMBOL(dma_fence_driver_name);
+
+/**
+ * dma_fence_timeline_name - Access the timeline name
+ * @fence: the fence to query
+ *
+ * Returns a timeline name provided by the dma-fence implementation.
+ *
+ * IMPORTANT CONSIDERATION:
+ * Dma-fence contract stipulates that access to driver provided data (data not
+ * directly embedded into the object itself), such as the &dma_fence.lock and
+ * memory potentially accessed by the &dma_fence.ops functions, is forbidden
+ * after the fence has been signalled. Drivers are allowed to free that data,
+ * and some do.
+ *
+ * To allow safe access drivers are mandated to guarantee a RCU grace period
+ * between signalling the fence and freeing said data.
+ *
+ * As such access to the driver name is only valid inside a RCU locked section.
+ * The pointer MUST be both queried and USED ONLY WITHIN a SINGLE block guarded
+ * by the &rcu_read_lock and &rcu_read_unlock pair.
+ */
+const char __rcu *dma_fence_timeline_name(struct dma_fence *fence)
+{
+ RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
+ "RCU protection is required for safe access to returned string");
+
+ if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ return fence->ops->get_driver_name(fence);
+ else
+ return "signaled-timeline";
+}
+EXPORT_SYMBOL(dma_fence_timeline_name);
diff --git a/drivers/dma-buf/dma-heap.c b/drivers/dma-buf/dma-heap.c
index 3cbe87d4a464..8ab49924f8b7 100644
--- a/drivers/dma-buf/dma-heap.c
+++ b/drivers/dma-buf/dma-heap.c
@@ -11,6 +11,7 @@
#include <linux/dma-buf.h>
#include <linux/dma-heap.h>
#include <linux/err.h>
+#include <linux/export.h>
#include <linux/list.h>
#include <linux/nospec.h>
#include <linux/syscalls.h>
@@ -202,6 +203,7 @@ void *dma_heap_get_drvdata(struct dma_heap *heap)
{
return heap->priv;
}
+EXPORT_SYMBOL_NS_GPL(dma_heap_get_drvdata, "DMA_BUF_HEAP");
/**
* dma_heap_get_name - get heap name
@@ -214,6 +216,7 @@ const char *dma_heap_get_name(struct dma_heap *heap)
{
return heap->name;
}
+EXPORT_SYMBOL_NS_GPL(dma_heap_get_name, "DMA_BUF_HEAP");
/**
* dma_heap_add - adds a heap to dmabuf heaps
@@ -303,6 +306,7 @@ err0:
kfree(heap);
return err_ret;
}
+EXPORT_SYMBOL_NS_GPL(dma_heap_add, "DMA_BUF_HEAP");
static char *dma_heap_devnode(const struct device *dev, umode_t *mode)
{
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index 5f8d010516f0..bea3e9858aca 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -320,8 +320,9 @@ void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence,
count++;
dma_resv_list_set(fobj, i, fence, usage);
- /* pointer update must be visible before we extend the num_fences */
- smp_store_mb(fobj->num_fences, count);
+ /* fence update must be visible before we extend the num_fences */
+ smp_wmb();
+ fobj->num_fences = count;
}
EXPORT_SYMBOL(dma_resv_add_fence);
@@ -684,11 +685,13 @@ long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage,
dma_resv_iter_begin(&cursor, obj, usage);
dma_resv_for_each_fence_unlocked(&cursor, fence) {
- ret = dma_fence_wait_timeout(fence, intr, ret);
- if (ret <= 0) {
- dma_resv_iter_end(&cursor);
- return ret;
- }
+ ret = dma_fence_wait_timeout(fence, intr, timeout);
+ if (ret <= 0)
+ break;
+
+ /* Even for zero timeout the return value is 1 */
+ if (timeout)
+ timeout = ret;
}
dma_resv_iter_end(&cursor);
diff --git a/drivers/dma-buf/heaps/Kconfig b/drivers/dma-buf/heaps/Kconfig
index a5eef06c4226..bb369b38b001 100644
--- a/drivers/dma-buf/heaps/Kconfig
+++ b/drivers/dma-buf/heaps/Kconfig
@@ -12,3 +12,13 @@ config DMABUF_HEAPS_CMA
Choose this option to enable dma-buf CMA heap. This heap is backed
by the Contiguous Memory Allocator (CMA). If your system has these
regions, you should say Y here.
+
+config DMABUF_HEAPS_CMA_LEGACY
+ bool "Legacy DMA-BUF CMA Heap"
+ default y
+ depends on DMABUF_HEAPS_CMA
+ help
+ Add a duplicate CMA-backed dma-buf heap with legacy naming derived
+ from the CMA area's devicetree node, or "reserved" if the area is not
+ defined in the devicetree. This uses the same underlying allocator as
+ CONFIG_DMABUF_HEAPS_CMA.
diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c
index 93be88b805fe..0df007111975 100644
--- a/drivers/dma-buf/heaps/cma_heap.c
+++ b/drivers/dma-buf/heaps/cma_heap.c
@@ -9,6 +9,9 @@
* Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
* Andrew F. Davis <afd@ti.com>
*/
+
+#define pr_fmt(fmt) "cma_heap: " fmt
+
#include <linux/cma.h>
#include <linux/dma-buf.h>
#include <linux/dma-heap.h>
@@ -22,6 +25,7 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
+#define DEFAULT_CMA_NAME "default_cma_region"
struct cma_heap {
struct dma_heap *heap;
@@ -309,13 +313,13 @@ static struct dma_buf *cma_heap_allocate(struct dma_heap *heap,
struct page *page = cma_pages;
while (nr_clear_pages > 0) {
- void *vaddr = kmap_atomic(page);
+ void *vaddr = kmap_local_page(page);
memset(vaddr, 0, PAGE_SIZE);
- kunmap_atomic(vaddr);
+ kunmap_local(vaddr);
/*
* Avoid wasting time zeroing memory if the process
- * has been killed by by SIGKILL
+ * has been killed by SIGKILL.
*/
if (fatal_signal_pending(current))
goto free_cma;
@@ -366,17 +370,17 @@ static const struct dma_heap_ops cma_heap_ops = {
.allocate = cma_heap_allocate,
};
-static int __add_cma_heap(struct cma *cma, void *data)
+static int __init __add_cma_heap(struct cma *cma, const char *name)
{
- struct cma_heap *cma_heap;
struct dma_heap_export_info exp_info;
+ struct cma_heap *cma_heap;
cma_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL);
if (!cma_heap)
return -ENOMEM;
cma_heap->cma = cma;
- exp_info.name = cma_get_name(cma);
+ exp_info.name = name;
exp_info.ops = &cma_heap_ops;
exp_info.priv = cma_heap;
@@ -391,15 +395,33 @@ static int __add_cma_heap(struct cma *cma, void *data)
return 0;
}
-static int add_default_cma_heap(void)
+static int __init add_default_cma_heap(void)
{
struct cma *default_cma = dev_get_cma_area(NULL);
- int ret = 0;
+ const char *legacy_cma_name;
+ int ret;
- if (default_cma)
- ret = __add_cma_heap(default_cma, NULL);
+ if (!default_cma)
+ return 0;
- return ret;
+ ret = __add_cma_heap(default_cma, DEFAULT_CMA_NAME);
+ if (ret)
+ return ret;
+
+ if (IS_ENABLED(CONFIG_DMABUF_HEAPS_CMA_LEGACY)) {
+ legacy_cma_name = cma_get_name(default_cma);
+ if (!strcmp(legacy_cma_name, DEFAULT_CMA_NAME)) {
+ pr_warn("legacy name and default name are the same, skipping legacy heap\n");
+ return 0;
+ }
+
+ ret = __add_cma_heap(default_cma, legacy_cma_name);
+ if (ret)
+ pr_warn("failed to add legacy heap: %pe\n",
+ ERR_PTR(ret));
+ }
+
+ return 0;
}
module_init(add_default_cma_heap);
MODULE_DESCRIPTION("DMA-BUF CMA Heap");
diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c
index d78cdb9d01e5..bbe7881f1360 100644
--- a/drivers/dma-buf/heaps/system_heap.c
+++ b/drivers/dma-buf/heaps/system_heap.c
@@ -21,8 +21,6 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
-static struct dma_heap *sys_heap;
-
struct system_heap_buffer {
struct dma_heap *heap;
struct list_head attachments;
@@ -35,7 +33,7 @@ struct system_heap_buffer {
struct dma_heap_attachment {
struct device *dev;
- struct sg_table *table;
+ struct sg_table table;
struct list_head list;
bool mapped;
};
@@ -54,29 +52,22 @@ static gfp_t order_flags[] = {HIGH_ORDER_GFP, HIGH_ORDER_GFP, LOW_ORDER_GFP};
static const unsigned int orders[] = {8, 4, 0};
#define NUM_ORDERS ARRAY_SIZE(orders)
-static struct sg_table *dup_sg_table(struct sg_table *table)
+static int dup_sg_table(struct sg_table *from, struct sg_table *to)
{
- struct sg_table *new_table;
- int ret, i;
struct scatterlist *sg, *new_sg;
+ int ret, i;
- new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
- if (!new_table)
- return ERR_PTR(-ENOMEM);
-
- ret = sg_alloc_table(new_table, table->orig_nents, GFP_KERNEL);
- if (ret) {
- kfree(new_table);
- return ERR_PTR(-ENOMEM);
- }
+ ret = sg_alloc_table(to, from->orig_nents, GFP_KERNEL);
+ if (ret)
+ return ret;
- new_sg = new_table->sgl;
- for_each_sgtable_sg(table, sg, i) {
+ new_sg = to->sgl;
+ for_each_sgtable_sg(from, sg, i) {
sg_set_page(new_sg, sg_page(sg), sg->length, sg->offset);
new_sg = sg_next(new_sg);
}
- return new_table;
+ return 0;
}
static int system_heap_attach(struct dma_buf *dmabuf,
@@ -84,19 +75,18 @@ static int system_heap_attach(struct dma_buf *dmabuf,
{
struct system_heap_buffer *buffer = dmabuf->priv;
struct dma_heap_attachment *a;
- struct sg_table *table;
+ int ret;
a = kzalloc(sizeof(*a), GFP_KERNEL);
if (!a)
return -ENOMEM;
- table = dup_sg_table(&buffer->sg_table);
- if (IS_ERR(table)) {
+ ret = dup_sg_table(&buffer->sg_table, &a->table);
+ if (ret) {
kfree(a);
- return -ENOMEM;
+ return ret;
}
- a->table = table;
a->dev = attachment->dev;
INIT_LIST_HEAD(&a->list);
a->mapped = false;
@@ -120,8 +110,7 @@ static void system_heap_detach(struct dma_buf *dmabuf,
list_del(&a->list);
mutex_unlock(&buffer->lock);
- sg_free_table(a->table);
- kfree(a->table);
+ sg_free_table(&a->table);
kfree(a);
}
@@ -129,7 +118,7 @@ static struct sg_table *system_heap_map_dma_buf(struct dma_buf_attachment *attac
enum dma_data_direction direction)
{
struct dma_heap_attachment *a = attachment->priv;
- struct sg_table *table = a->table;
+ struct sg_table *table = &a->table;
int ret;
ret = dma_map_sgtable(attachment->dev, table, direction, 0);
@@ -164,7 +153,7 @@ static int system_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
list_for_each_entry(a, &buffer->attachments, list) {
if (!a->mapped)
continue;
- dma_sync_sgtable_for_cpu(a->dev, a->table, direction);
+ dma_sync_sgtable_for_cpu(a->dev, &a->table, direction);
}
mutex_unlock(&buffer->lock);
@@ -185,7 +174,7 @@ static int system_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
list_for_each_entry(a, &buffer->attachments, list) {
if (!a->mapped)
continue;
- dma_sync_sgtable_for_device(a->dev, a->table, direction);
+ dma_sync_sgtable_for_device(a->dev, &a->table, direction);
}
mutex_unlock(&buffer->lock);
@@ -421,9 +410,10 @@ static const struct dma_heap_ops system_heap_ops = {
.allocate = system_heap_allocate,
};
-static int system_heap_create(void)
+static int __init system_heap_create(void)
{
struct dma_heap_export_info exp_info;
+ struct dma_heap *sys_heap;
exp_info.name = "system";
exp_info.ops = &system_heap_ops;
diff --git a/drivers/dma-buf/st-dma-fence-unwrap.c b/drivers/dma-buf/st-dma-fence-unwrap.c
index f0cee984b6c7..a3be888ae2e8 100644
--- a/drivers/dma-buf/st-dma-fence-unwrap.c
+++ b/drivers/dma-buf/st-dma-fence-unwrap.c
@@ -28,7 +28,7 @@ static const struct dma_fence_ops mock_ops = {
.get_timeline_name = mock_name,
};
-static struct dma_fence *mock_fence(void)
+static struct dma_fence *__mock_fence(u64 context, u64 seqno)
{
struct mock_fence *f;
@@ -37,12 +37,16 @@ static struct dma_fence *mock_fence(void)
return NULL;
spin_lock_init(&f->lock);
- dma_fence_init(&f->base, &mock_ops, &f->lock,
- dma_fence_context_alloc(1), 1);
+ dma_fence_init(&f->base, &mock_ops, &f->lock, context, seqno);
return &f->base;
}
+static struct dma_fence *mock_fence(void)
+{
+ return __mock_fence(dma_fence_context_alloc(1), 1);
+}
+
static struct dma_fence *mock_array(unsigned int num_fences, ...)
{
struct dma_fence_array *array;
@@ -304,6 +308,177 @@ error_put_f1:
return err;
}
+static int unwrap_merge_duplicate(void *arg)
+{
+ struct dma_fence *fence, *f1, *f2;
+ struct dma_fence_unwrap iter;
+ int err = 0;
+
+ f1 = mock_fence();
+ if (!f1)
+ return -ENOMEM;
+
+ dma_fence_enable_sw_signaling(f1);
+
+ f2 = dma_fence_unwrap_merge(f1, f1);
+ if (!f2) {
+ err = -ENOMEM;
+ goto error_put_f1;
+ }
+
+ dma_fence_unwrap_for_each(fence, &iter, f2) {
+ if (fence == f1) {
+ dma_fence_put(f1);
+ f1 = NULL;
+ } else {
+ pr_err("Unexpected fence!\n");
+ err = -EINVAL;
+ }
+ }
+
+ if (f1) {
+ pr_err("Not all fences seen!\n");
+ err = -EINVAL;
+ }
+
+ dma_fence_put(f2);
+error_put_f1:
+ dma_fence_put(f1);
+ return err;
+}
+
+static int unwrap_merge_seqno(void *arg)
+{
+ struct dma_fence *fence, *f1, *f2, *f3, *f4;
+ struct dma_fence_unwrap iter;
+ int err = 0;
+ u64 ctx[2];
+
+ ctx[0] = dma_fence_context_alloc(1);
+ ctx[1] = dma_fence_context_alloc(1);
+
+ f1 = __mock_fence(ctx[1], 1);
+ if (!f1)
+ return -ENOMEM;
+
+ dma_fence_enable_sw_signaling(f1);
+
+ f2 = __mock_fence(ctx[1], 2);
+ if (!f2) {
+ err = -ENOMEM;
+ goto error_put_f1;
+ }
+
+ dma_fence_enable_sw_signaling(f2);
+
+ f3 = __mock_fence(ctx[0], 1);
+ if (!f3) {
+ err = -ENOMEM;
+ goto error_put_f2;
+ }
+
+ dma_fence_enable_sw_signaling(f3);
+
+ f4 = dma_fence_unwrap_merge(f1, f2, f3);
+ if (!f4) {
+ err = -ENOMEM;
+ goto error_put_f3;
+ }
+
+ dma_fence_unwrap_for_each(fence, &iter, f4) {
+ if (fence == f3 && f2) {
+ dma_fence_put(f3);
+ f3 = NULL;
+ } else if (fence == f2 && !f3) {
+ dma_fence_put(f2);
+ f2 = NULL;
+ } else {
+ pr_err("Unexpected fence!\n");
+ err = -EINVAL;
+ }
+ }
+
+ if (f2 || f3) {
+ pr_err("Not all fences seen!\n");
+ err = -EINVAL;
+ }
+
+ dma_fence_put(f4);
+error_put_f3:
+ dma_fence_put(f3);
+error_put_f2:
+ dma_fence_put(f2);
+error_put_f1:
+ dma_fence_put(f1);
+ return err;
+}
+
+static int unwrap_merge_order(void *arg)
+{
+ struct dma_fence *fence, *f1, *f2, *a1, *a2, *c1, *c2;
+ struct dma_fence_unwrap iter;
+ int err = 0;
+
+ f1 = mock_fence();
+ if (!f1)
+ return -ENOMEM;
+
+ dma_fence_enable_sw_signaling(f1);
+
+ f2 = mock_fence();
+ if (!f2) {
+ dma_fence_put(f1);
+ return -ENOMEM;
+ }
+
+ dma_fence_enable_sw_signaling(f2);
+
+ a1 = mock_array(2, f1, f2);
+ if (!a1)
+ return -ENOMEM;
+
+ c1 = mock_chain(NULL, dma_fence_get(f1));
+ if (!c1)
+ goto error_put_a1;
+
+ c2 = mock_chain(c1, dma_fence_get(f2));
+ if (!c2)
+ goto error_put_a1;
+
+ /*
+ * The fences in the chain are the same as in a1 but in oposite order,
+ * the dma_fence_merge() function should be able to handle that.
+ */
+ a2 = dma_fence_unwrap_merge(a1, c2);
+
+ dma_fence_unwrap_for_each(fence, &iter, a2) {
+ if (fence == f1) {
+ f1 = NULL;
+ if (!f2)
+ pr_err("Unexpected order!\n");
+ } else if (fence == f2) {
+ f2 = NULL;
+ if (f1)
+ pr_err("Unexpected order!\n");
+ } else {
+ pr_err("Unexpected fence!\n");
+ err = -EINVAL;
+ }
+ }
+
+ if (f1 || f2) {
+ pr_err("Not all fences seen!\n");
+ err = -EINVAL;
+ }
+
+ dma_fence_put(a2);
+ return err;
+
+error_put_a1:
+ dma_fence_put(a1);
+ return -ENOMEM;
+}
+
static int unwrap_merge_complex(void *arg)
{
struct dma_fence *fence, *f1, *f2, *f3, *f4, *f5;
@@ -327,7 +502,7 @@ static int unwrap_merge_complex(void *arg)
goto error_put_f2;
/* The resulting array has the fences in reverse */
- f4 = dma_fence_unwrap_merge(f2, f1);
+ f4 = mock_array(2, dma_fence_get(f2), dma_fence_get(f1));
if (!f4)
goto error_put_f3;
@@ -367,6 +542,87 @@ error_put_f1:
return err;
}
+static int unwrap_merge_complex_seqno(void *arg)
+{
+ struct dma_fence *fence, *f1, *f2, *f3, *f4, *f5, *f6, *f7;
+ struct dma_fence_unwrap iter;
+ int err = -ENOMEM;
+ u64 ctx[2];
+
+ ctx[0] = dma_fence_context_alloc(1);
+ ctx[1] = dma_fence_context_alloc(1);
+
+ f1 = __mock_fence(ctx[0], 2);
+ if (!f1)
+ return -ENOMEM;
+
+ dma_fence_enable_sw_signaling(f1);
+
+ f2 = __mock_fence(ctx[1], 1);
+ if (!f2)
+ goto error_put_f1;
+
+ dma_fence_enable_sw_signaling(f2);
+
+ f3 = __mock_fence(ctx[0], 1);
+ if (!f3)
+ goto error_put_f2;
+
+ dma_fence_enable_sw_signaling(f3);
+
+ f4 = __mock_fence(ctx[1], 2);
+ if (!f4)
+ goto error_put_f3;
+
+ dma_fence_enable_sw_signaling(f4);
+
+ f5 = mock_array(2, dma_fence_get(f1), dma_fence_get(f2));
+ if (!f5)
+ goto error_put_f4;
+
+ f6 = mock_array(2, dma_fence_get(f3), dma_fence_get(f4));
+ if (!f6)
+ goto error_put_f5;
+
+ f7 = dma_fence_unwrap_merge(f5, f6);
+ if (!f7)
+ goto error_put_f6;
+
+ err = 0;
+ dma_fence_unwrap_for_each(fence, &iter, f7) {
+ if (fence == f1 && f4) {
+ dma_fence_put(f1);
+ f1 = NULL;
+ } else if (fence == f4 && !f1) {
+ dma_fence_put(f4);
+ f4 = NULL;
+ } else {
+ pr_err("Unexpected fence!\n");
+ err = -EINVAL;
+ }
+ }
+
+ if (f1 || f4) {
+ pr_err("Not all fences seen!\n");
+ err = -EINVAL;
+ }
+
+ dma_fence_put(f7);
+error_put_f6:
+ dma_fence_put(f6);
+error_put_f5:
+ dma_fence_put(f5);
+error_put_f4:
+ dma_fence_put(f4);
+error_put_f3:
+ dma_fence_put(f3);
+error_put_f2:
+ dma_fence_put(f2);
+error_put_f1:
+ dma_fence_put(f1);
+ return err;
+}
+
int dma_fence_unwrap(void)
{
static const struct subtest tests[] = {
@@ -375,7 +631,11 @@ int dma_fence_unwrap(void)
SUBTEST(unwrap_chain),
SUBTEST(unwrap_chain_array),
SUBTEST(unwrap_merge),
+ SUBTEST(unwrap_merge_duplicate),
+ SUBTEST(unwrap_merge_seqno),
+ SUBTEST(unwrap_merge_order),
SUBTEST(unwrap_merge_complex),
+ SUBTEST(unwrap_merge_complex_seqno),
};
return subtests(tests, NULL);
diff --git a/drivers/dma-buf/st-dma-fence.c b/drivers/dma-buf/st-dma-fence.c
index cf2ce3744ce6..27a36045410b 100644
--- a/drivers/dma-buf/st-dma-fence.c
+++ b/drivers/dma-buf/st-dma-fence.c
@@ -375,7 +375,7 @@ struct wait_timer {
static void wait_timer(struct timer_list *timer)
{
- struct wait_timer *wt = from_timer(wt, timer, timer);
+ struct wait_timer *wt = timer_container_of(wt, timer, timer);
dma_fence_signal(wt->f);
}
@@ -412,8 +412,8 @@ static int test_wait_timeout(void *arg)
err = 0;
err_free:
- del_timer_sync(&wt.timer);
- destroy_timer_on_stack(&wt.timer);
+ timer_delete_sync(&wt.timer);
+ timer_destroy_on_stack(&wt.timer);
dma_fence_signal(wt.f);
dma_fence_put(wt.f);
return err;
diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
index c353029789cf..3c20f1d31cf5 100644
--- a/drivers/dma-buf/sw_sync.c
+++ b/drivers/dma-buf/sw_sync.c
@@ -170,26 +170,7 @@ static bool timeline_fence_signaled(struct dma_fence *fence)
{
struct sync_timeline *parent = dma_fence_parent(fence);
- return !__dma_fence_is_later(fence->seqno, parent->value, fence->ops);
-}
-
-static bool timeline_fence_enable_signaling(struct dma_fence *fence)
-{
- return true;
-}
-
-static void timeline_fence_value_str(struct dma_fence *fence,
- char *str, int size)
-{
- snprintf(str, size, "%lld", fence->seqno);
-}
-
-static void timeline_fence_timeline_value_str(struct dma_fence *fence,
- char *str, int size)
-{
- struct sync_timeline *parent = dma_fence_parent(fence);
-
- snprintf(str, size, "%d", parent->value);
+ return !__dma_fence_is_later(fence, fence->seqno, parent->value);
}
static void timeline_fence_set_deadline(struct dma_fence *fence, ktime_t deadline)
@@ -211,11 +192,8 @@ static void timeline_fence_set_deadline(struct dma_fence *fence, ktime_t deadlin
static const struct dma_fence_ops timeline_fence_ops = {
.get_driver_name = timeline_fence_get_driver_name,
.get_timeline_name = timeline_fence_get_timeline_name,
- .enable_signaling = timeline_fence_enable_signaling,
.signaled = timeline_fence_signaled,
.release = timeline_fence_release,
- .fence_value_str = timeline_fence_value_str,
- .timeline_value_str = timeline_fence_timeline_value_str,
.set_deadline = timeline_fence_set_deadline,
};
@@ -444,15 +422,17 @@ static int sw_sync_ioctl_get_deadline(struct sync_timeline *obj, unsigned long a
return -EINVAL;
pt = dma_fence_to_sync_pt(fence);
- if (!pt)
- return -EINVAL;
+ if (!pt) {
+ ret = -EINVAL;
+ goto put_fence;
+ }
spin_lock_irqsave(fence->lock, flags);
- if (test_bit(SW_SYNC_HAS_DEADLINE_BIT, &fence->flags)) {
- data.deadline_ns = ktime_to_ns(pt->deadline);
- } else {
+ if (!test_bit(SW_SYNC_HAS_DEADLINE_BIT, &fence->flags)) {
ret = -ENOENT;
+ goto unlock;
}
+ data.deadline_ns = ktime_to_ns(pt->deadline);
spin_unlock_irqrestore(fence->lock, flags);
dma_fence_put(fence);
@@ -464,6 +444,13 @@ static int sw_sync_ioctl_get_deadline(struct sync_timeline *obj, unsigned long a
return -EFAULT;
return 0;
+
+unlock:
+ spin_unlock_irqrestore(fence->lock, flags);
+put_fence:
+ dma_fence_put(fence);
+
+ return ret;
}
static long sw_sync_ioctl(struct file *file, unsigned int cmd,
diff --git a/drivers/dma-buf/sync_debug.c b/drivers/dma-buf/sync_debug.c
index 237bce21d1e7..67cd69551e42 100644
--- a/drivers/dma-buf/sync_debug.c
+++ b/drivers/dma-buf/sync_debug.c
@@ -12,8 +12,6 @@ static struct dentry *dbgfs;
static LIST_HEAD(sync_timeline_list_head);
static DEFINE_SPINLOCK(sync_timeline_list_lock);
-static LIST_HEAD(sync_file_list_head);
-static DEFINE_SPINLOCK(sync_file_list_lock);
void sync_timeline_debug_add(struct sync_timeline *obj)
{
@@ -33,24 +31,6 @@ void sync_timeline_debug_remove(struct sync_timeline *obj)
spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
}
-void sync_file_debug_add(struct sync_file *sync_file)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&sync_file_list_lock, flags);
- list_add_tail(&sync_file->sync_file_list, &sync_file_list_head);
- spin_unlock_irqrestore(&sync_file_list_lock, flags);
-}
-
-void sync_file_debug_remove(struct sync_file *sync_file)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&sync_file_list_lock, flags);
- list_del(&sync_file->sync_file_list);
- spin_unlock_irqrestore(&sync_file_list_lock, flags);
-}
-
static const char *sync_status_str(int status)
{
if (status < 0)
@@ -82,25 +62,8 @@ static void sync_print_fence(struct seq_file *s,
seq_printf(s, "@%lld.%09ld", (s64)ts64.tv_sec, ts64.tv_nsec);
}
- if (fence->ops->timeline_value_str &&
- fence->ops->fence_value_str) {
- char value[64];
- bool success;
-
- fence->ops->fence_value_str(fence, value, sizeof(value));
- success = strlen(value);
-
- if (success) {
- seq_printf(s, ": %s", value);
-
- fence->ops->timeline_value_str(fence, value,
- sizeof(value));
-
- if (strlen(value))
- seq_printf(s, " / %s", value);
- }
- }
-
+ seq_printf(s, ": %lld", fence->seqno);
+ seq_printf(s, " / %d", parent->value);
seq_putc(s, '\n');
}
@@ -118,26 +81,6 @@ static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
spin_unlock(&obj->lock);
}
-static void sync_print_sync_file(struct seq_file *s,
- struct sync_file *sync_file)
-{
- char buf[128];
- int i;
-
- seq_printf(s, "[%p] %s: %s\n", sync_file,
- sync_file_get_name(sync_file, buf, sizeof(buf)),
- sync_status_str(dma_fence_get_status(sync_file->fence)));
-
- if (dma_fence_is_array(sync_file->fence)) {
- struct dma_fence_array *array = to_dma_fence_array(sync_file->fence);
-
- for (i = 0; i < array->num_fences; ++i)
- sync_print_fence(s, array->fences[i], true);
- } else {
- sync_print_fence(s, sync_file->fence, true);
- }
-}
-
static int sync_info_debugfs_show(struct seq_file *s, void *unused)
{
struct list_head *pos;
@@ -157,15 +100,6 @@ static int sync_info_debugfs_show(struct seq_file *s, void *unused)
seq_puts(s, "fences:\n--------------\n");
- spin_lock_irq(&sync_file_list_lock);
- list_for_each(pos, &sync_file_list_head) {
- struct sync_file *sync_file =
- container_of(pos, struct sync_file, sync_file_list);
-
- sync_print_sync_file(s, sync_file);
- seq_putc(s, '\n');
- }
- spin_unlock_irq(&sync_file_list_lock);
return 0;
}
diff --git a/drivers/dma-buf/sync_debug.h b/drivers/dma-buf/sync_debug.h
index a1bdd62efccd..02af347293d0 100644
--- a/drivers/dma-buf/sync_debug.h
+++ b/drivers/dma-buf/sync_debug.h
@@ -68,7 +68,5 @@ extern const struct file_operations sw_sync_debugfs_fops;
void sync_timeline_debug_add(struct sync_timeline *obj);
void sync_timeline_debug_remove(struct sync_timeline *obj);
-void sync_file_debug_add(struct sync_file *fence);
-void sync_file_debug_remove(struct sync_file *fence);
#endif /* _LINUX_SYNC_H */
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index d9b1c1b2a72b..747e377fb954 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -135,12 +135,18 @@ char *sync_file_get_name(struct sync_file *sync_file, char *buf, int len)
strscpy(buf, sync_file->user_name, len);
} else {
struct dma_fence *fence = sync_file->fence;
+ const char __rcu *timeline;
+ const char __rcu *driver;
+ rcu_read_lock();
+ driver = dma_fence_driver_name(fence);
+ timeline = dma_fence_timeline_name(fence);
snprintf(buf, len, "%s-%s%llu-%lld",
- fence->ops->get_driver_name(fence),
- fence->ops->get_timeline_name(fence),
+ rcu_dereference(driver),
+ rcu_dereference(timeline),
fence->context,
fence->seqno);
+ rcu_read_unlock();
}
return buf;
@@ -262,9 +268,17 @@ err_put_fd:
static int sync_fill_fence_info(struct dma_fence *fence,
struct sync_fence_info *info)
{
- strscpy(info->obj_name, fence->ops->get_timeline_name(fence),
+ const char __rcu *timeline;
+ const char __rcu *driver;
+
+ rcu_read_lock();
+
+ driver = dma_fence_driver_name(fence);
+ timeline = dma_fence_timeline_name(fence);
+
+ strscpy(info->obj_name, rcu_dereference(timeline),
sizeof(info->obj_name));
- strscpy(info->driver_name, fence->ops->get_driver_name(fence),
+ strscpy(info->driver_name, rcu_dereference(driver),
sizeof(info->driver_name));
info->status = dma_fence_get_status(fence);
@@ -273,6 +287,8 @@ static int sync_fill_fence_info(struct dma_fence *fence,
ktime_to_ns(dma_fence_timestamp(fence)) :
ktime_set(0, 0);
+ rcu_read_unlock();
+
return info->status;
}
diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
index 047c3cd2ceff..40399c26e6be 100644
--- a/drivers/dma-buf/udmabuf.c
+++ b/drivers/dma-buf/udmabuf.c
@@ -27,15 +27,21 @@ MODULE_PARM_DESC(size_limit_mb, "Max size of a dmabuf, in megabytes. Default is
struct udmabuf {
pgoff_t pagecount;
struct folio **folios;
+
+ /**
+ * Unlike folios, pinned_folios is only used for unpin.
+ * So, nr_pinned is not the same to pagecount, the pinned_folios
+ * only set each folio which already pinned when udmabuf_create.
+ * Note that, since a folio may be pinned multiple times, each folio
+ * can be added to pinned_folios multiple times, depending on how many
+ * times the folio has been pinned when create.
+ */
+ pgoff_t nr_pinned;
+ struct folio **pinned_folios;
+
struct sg_table *sg;
struct miscdevice *device;
pgoff_t *offsets;
- struct list_head unpin_list;
-};
-
-struct udmabuf_folio {
- struct folio *folio;
- struct list_head list;
};
static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf)
@@ -43,7 +49,8 @@ static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf)
struct vm_area_struct *vma = vmf->vma;
struct udmabuf *ubuf = vma->vm_private_data;
pgoff_t pgoff = vmf->pgoff;
- unsigned long pfn;
+ unsigned long addr, pfn;
+ vm_fault_t ret;
if (pgoff >= ubuf->pagecount)
return VM_FAULT_SIGBUS;
@@ -51,7 +58,35 @@ static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf)
pfn = folio_pfn(ubuf->folios[pgoff]);
pfn += ubuf->offsets[pgoff] >> PAGE_SHIFT;
- return vmf_insert_pfn(vma, vmf->address, pfn);
+ ret = vmf_insert_pfn(vma, vmf->address, pfn);
+ if (ret & VM_FAULT_ERROR)
+ return ret;
+
+ /* pre fault */
+ pgoff = vma->vm_pgoff;
+ addr = vma->vm_start;
+
+ for (; addr < vma->vm_end; pgoff++, addr += PAGE_SIZE) {
+ if (addr == vmf->address)
+ continue;
+
+ if (WARN_ON(pgoff >= ubuf->pagecount))
+ break;
+
+ pfn = folio_pfn(ubuf->folios[pgoff]);
+ pfn += ubuf->offsets[pgoff] >> PAGE_SHIFT;
+
+ /**
+ * If the below vmf_insert_pfn() fails, we do not return an
+ * error here during this pre-fault step. However, an error
+ * will be returned if the failure occurs when the addr is
+ * truly accessed.
+ */
+ if (vmf_insert_pfn(vma, addr, pfn) & VM_FAULT_ERROR)
+ break;
+ }
+
+ return ret;
}
static const struct vm_operations_struct udmabuf_vm_ops = {
@@ -80,15 +115,16 @@ static int vmap_udmabuf(struct dma_buf *buf, struct iosys_map *map)
dma_resv_assert_held(buf->resv);
- pages = kmalloc_array(ubuf->pagecount, sizeof(*pages), GFP_KERNEL);
+ pages = kvmalloc_array(ubuf->pagecount, sizeof(*pages), GFP_KERNEL);
if (!pages)
return -ENOMEM;
for (pg = 0; pg < ubuf->pagecount; pg++)
- pages[pg] = &ubuf->folios[pg]->page;
+ pages[pg] = folio_page(ubuf->folios[pg],
+ ubuf->offsets[pg] >> PAGE_SHIFT);
vaddr = vm_map_ram(pages, ubuf->pagecount, -1);
- kfree(pages);
+ kvfree(pages);
if (!vaddr)
return -EINVAL;
@@ -159,34 +195,42 @@ static void unmap_udmabuf(struct dma_buf_attachment *at,
return put_sg_table(at->dev, sg, direction);
}
-static void unpin_all_folios(struct list_head *unpin_list)
+static void unpin_all_folios(struct udmabuf *ubuf)
{
- struct udmabuf_folio *ubuf_folio;
+ pgoff_t i;
- while (!list_empty(unpin_list)) {
- ubuf_folio = list_first_entry(unpin_list,
- struct udmabuf_folio, list);
- unpin_folio(ubuf_folio->folio);
+ for (i = 0; i < ubuf->nr_pinned; ++i)
+ unpin_folio(ubuf->pinned_folios[i]);
- list_del(&ubuf_folio->list);
- kfree(ubuf_folio);
- }
+ kvfree(ubuf->pinned_folios);
}
-static int add_to_unpin_list(struct list_head *unpin_list,
- struct folio *folio)
+static __always_inline int init_udmabuf(struct udmabuf *ubuf, pgoff_t pgcnt)
{
- struct udmabuf_folio *ubuf_folio;
+ ubuf->folios = kvmalloc_array(pgcnt, sizeof(*ubuf->folios), GFP_KERNEL);
+ if (!ubuf->folios)
+ return -ENOMEM;
- ubuf_folio = kzalloc(sizeof(*ubuf_folio), GFP_KERNEL);
- if (!ubuf_folio)
+ ubuf->offsets = kvcalloc(pgcnt, sizeof(*ubuf->offsets), GFP_KERNEL);
+ if (!ubuf->offsets)
+ return -ENOMEM;
+
+ ubuf->pinned_folios = kvmalloc_array(pgcnt,
+ sizeof(*ubuf->pinned_folios),
+ GFP_KERNEL);
+ if (!ubuf->pinned_folios)
return -ENOMEM;
- ubuf_folio->folio = folio;
- list_add_tail(&ubuf_folio->list, unpin_list);
return 0;
}
+static __always_inline void deinit_udmabuf(struct udmabuf *ubuf)
+{
+ unpin_all_folios(ubuf);
+ kvfree(ubuf->offsets);
+ kvfree(ubuf->folios);
+}
+
static void release_udmabuf(struct dma_buf *buf)
{
struct udmabuf *ubuf = buf->priv;
@@ -195,9 +239,7 @@ static void release_udmabuf(struct dma_buf *buf)
if (ubuf->sg)
put_sg_table(dev, ubuf->sg, DMA_BIDIRECTIONAL);
- unpin_all_folios(&ubuf->unpin_list);
- kfree(ubuf->offsets);
- kfree(ubuf->folios);
+ deinit_udmabuf(ubuf);
kfree(ubuf);
}
@@ -215,8 +257,7 @@ static int begin_cpu_udmabuf(struct dma_buf *buf,
ubuf->sg = NULL;
}
} else {
- dma_sync_sg_for_cpu(dev, ubuf->sg->sgl, ubuf->sg->nents,
- direction);
+ dma_sync_sgtable_for_cpu(dev, ubuf->sg, direction);
}
return ret;
@@ -231,12 +272,11 @@ static int end_cpu_udmabuf(struct dma_buf *buf,
if (!ubuf->sg)
return -EINVAL;
- dma_sync_sg_for_device(dev, ubuf->sg->sgl, ubuf->sg->nents, direction);
+ dma_sync_sgtable_for_device(dev, ubuf->sg, direction);
return 0;
}
static const struct dma_buf_ops udmabuf_ops = {
- .cache_sgt_mapping = true,
.map_dma_buf = map_udmabuf,
.unmap_dma_buf = unmap_udmabuf,
.release = release_udmabuf,
@@ -248,15 +288,12 @@ static const struct dma_buf_ops udmabuf_ops = {
};
#define SEALS_WANTED (F_SEAL_SHRINK)
-#define SEALS_DENIED (F_SEAL_WRITE)
+#define SEALS_DENIED (F_SEAL_WRITE|F_SEAL_FUTURE_WRITE)
static int check_memfd_seals(struct file *memfd)
{
int seals;
- if (!memfd)
- return -EBADFD;
-
if (!shmem_file(memfd) && !is_file_hugepages(memfd))
return -EBADFD;
@@ -271,12 +308,10 @@ static int check_memfd_seals(struct file *memfd)
return 0;
}
-static int export_udmabuf(struct udmabuf *ubuf,
- struct miscdevice *device,
- u32 flags)
+static struct dma_buf *export_udmabuf(struct udmabuf *ubuf,
+ struct miscdevice *device)
{
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
- struct dma_buf *buf;
ubuf->device = device;
exp_info.ops = &udmabuf_ops;
@@ -284,123 +319,151 @@ static int export_udmabuf(struct udmabuf *ubuf,
exp_info.priv = ubuf;
exp_info.flags = O_RDWR;
- buf = dma_buf_export(&exp_info);
- if (IS_ERR(buf))
- return PTR_ERR(buf);
+ return dma_buf_export(&exp_info);
+}
+
+static long udmabuf_pin_folios(struct udmabuf *ubuf, struct file *memfd,
+ loff_t start, loff_t size, struct folio **folios)
+{
+ pgoff_t nr_pinned = ubuf->nr_pinned;
+ pgoff_t upgcnt = ubuf->pagecount;
+ u32 cur_folio, cur_pgcnt;
+ pgoff_t pgoff, pgcnt;
+ long nr_folios;
+ loff_t end;
+
+ pgcnt = size >> PAGE_SHIFT;
+ end = start + (pgcnt << PAGE_SHIFT) - 1;
+ nr_folios = memfd_pin_folios(memfd, start, end, folios, pgcnt, &pgoff);
+ if (nr_folios <= 0)
+ return nr_folios ? nr_folios : -EINVAL;
+
+ cur_pgcnt = 0;
+ for (cur_folio = 0; cur_folio < nr_folios; ++cur_folio) {
+ pgoff_t subpgoff = pgoff;
+ size_t fsize = folio_size(folios[cur_folio]);
+
+ ubuf->pinned_folios[nr_pinned++] = folios[cur_folio];
+
+ for (; subpgoff < fsize; subpgoff += PAGE_SIZE) {
+ ubuf->folios[upgcnt] = folios[cur_folio];
+ ubuf->offsets[upgcnt] = subpgoff;
+ ++upgcnt;
- return dma_buf_fd(buf, flags);
+ if (++cur_pgcnt >= pgcnt)
+ goto end;
+ }
+
+ /**
+ * In a given range, only the first subpage of the first folio
+ * has an offset, that is returned by memfd_pin_folios().
+ * The first subpages of other folios (in the range) have an
+ * offset of 0.
+ */
+ pgoff = 0;
+ }
+end:
+ ubuf->pagecount = upgcnt;
+ ubuf->nr_pinned = nr_pinned;
+ return 0;
}
static long udmabuf_create(struct miscdevice *device,
struct udmabuf_create_list *head,
struct udmabuf_create_item *list)
{
- pgoff_t pgoff, pgcnt, pglimit, pgbuf = 0;
- long nr_folios, ret = -EINVAL;
- struct file *memfd = NULL;
- struct folio **folios;
+ unsigned long max_nr_folios = 0;
+ struct folio **folios = NULL;
+ pgoff_t pgcnt = 0, pglimit;
struct udmabuf *ubuf;
- u32 i, j, k, flags;
- loff_t end;
+ struct dma_buf *dmabuf;
+ long ret = -EINVAL;
+ u32 i, flags;
ubuf = kzalloc(sizeof(*ubuf), GFP_KERNEL);
if (!ubuf)
return -ENOMEM;
- INIT_LIST_HEAD(&ubuf->unpin_list);
- pglimit = (size_limit_mb * 1024 * 1024) >> PAGE_SHIFT;
+ pglimit = ((u64)size_limit_mb * 1024 * 1024) >> PAGE_SHIFT;
for (i = 0; i < head->count; i++) {
- if (!IS_ALIGNED(list[i].offset, PAGE_SIZE))
- goto err;
- if (!IS_ALIGNED(list[i].size, PAGE_SIZE))
- goto err;
- ubuf->pagecount += list[i].size >> PAGE_SHIFT;
- if (ubuf->pagecount > pglimit)
- goto err;
+ pgoff_t subpgcnt;
+
+ if (!PAGE_ALIGNED(list[i].offset))
+ goto err_noinit;
+ if (!PAGE_ALIGNED(list[i].size))
+ goto err_noinit;
+
+ subpgcnt = list[i].size >> PAGE_SHIFT;
+ pgcnt += subpgcnt;
+ if (pgcnt > pglimit)
+ goto err_noinit;
+
+ max_nr_folios = max_t(unsigned long, subpgcnt, max_nr_folios);
}
- if (!ubuf->pagecount)
- goto err;
+ if (!pgcnt)
+ goto err_noinit;
- ubuf->folios = kmalloc_array(ubuf->pagecount, sizeof(*ubuf->folios),
- GFP_KERNEL);
- if (!ubuf->folios) {
- ret = -ENOMEM;
+ ret = init_udmabuf(ubuf, pgcnt);
+ if (ret)
goto err;
- }
- ubuf->offsets = kcalloc(ubuf->pagecount, sizeof(*ubuf->offsets),
- GFP_KERNEL);
- if (!ubuf->offsets) {
+
+ folios = kvmalloc_array(max_nr_folios, sizeof(*folios), GFP_KERNEL);
+ if (!folios) {
ret = -ENOMEM;
goto err;
}
- pgbuf = 0;
for (i = 0; i < head->count; i++) {
- memfd = fget(list[i].memfd);
- ret = check_memfd_seals(memfd);
- if (ret < 0)
- goto err;
+ struct file *memfd = fget(list[i].memfd);
- pgcnt = list[i].size >> PAGE_SHIFT;
- folios = kmalloc_array(pgcnt, sizeof(*folios), GFP_KERNEL);
- if (!folios) {
- ret = -ENOMEM;
+ if (!memfd) {
+ ret = -EBADFD;
goto err;
}
- end = list[i].offset + (pgcnt << PAGE_SHIFT) - 1;
- ret = memfd_pin_folios(memfd, list[i].offset, end,
- folios, pgcnt, &pgoff);
- if (ret <= 0) {
- kfree(folios);
- if (!ret)
- ret = -EINVAL;
- goto err;
- }
-
- nr_folios = ret;
- pgoff >>= PAGE_SHIFT;
- for (j = 0, k = 0; j < pgcnt; j++) {
- ubuf->folios[pgbuf] = folios[k];
- ubuf->offsets[pgbuf] = pgoff << PAGE_SHIFT;
-
- if (j == 0 || ubuf->folios[pgbuf-1] != folios[k]) {
- ret = add_to_unpin_list(&ubuf->unpin_list,
- folios[k]);
- if (ret < 0) {
- kfree(folios);
- goto err;
- }
- }
-
- pgbuf++;
- if (++pgoff == folio_nr_pages(folios[k])) {
- pgoff = 0;
- if (++k == nr_folios)
- break;
- }
- }
+ /*
+ * Take the inode lock to protect against concurrent
+ * memfd_add_seals(), which takes this lock in write mode.
+ */
+ inode_lock_shared(file_inode(memfd));
+ ret = check_memfd_seals(memfd);
+ if (ret)
+ goto out_unlock;
- kfree(folios);
+ ret = udmabuf_pin_folios(ubuf, memfd, list[i].offset,
+ list[i].size, folios);
+out_unlock:
+ inode_unlock_shared(file_inode(memfd));
fput(memfd);
- memfd = NULL;
+ if (ret)
+ goto err;
}
flags = head->flags & UDMABUF_FLAGS_CLOEXEC ? O_CLOEXEC : 0;
- ret = export_udmabuf(ubuf, device, flags);
- if (ret < 0)
+ dmabuf = export_udmabuf(ubuf, device);
+ if (IS_ERR(dmabuf)) {
+ ret = PTR_ERR(dmabuf);
goto err;
+ }
+ /*
+ * Ownership of ubuf is held by the dmabuf from here.
+ * If the following dma_buf_fd() fails, dma_buf_put() cleans up both the
+ * dmabuf and the ubuf (through udmabuf_ops.release).
+ */
+ ret = dma_buf_fd(dmabuf, flags);
+ if (ret < 0)
+ dma_buf_put(dmabuf);
+
+ kvfree(folios);
return ret;
err:
- if (memfd)
- fput(memfd);
- unpin_all_folios(&ubuf->unpin_list);
- kfree(ubuf->offsets);
- kfree(ubuf->folios);
+ deinit_udmabuf(ubuf);
+err_noinit:
kfree(ubuf);
+ kvfree(folios);
return ret;
}
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index d9ec1e69e428..b8a74b1798ba 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -89,10 +89,17 @@ config APPLE_ADMAC
tristate "Apple ADMAC support"
depends on ARCH_APPLE || COMPILE_TEST
select DMA_ENGINE
- default ARCH_APPLE
help
Enable support for Audio DMA Controller found on Apple Silicon SoCs.
+config ARM_DMA350
+ tristate "Arm DMA-350 support"
+ depends on ARM || ARM64 || COMPILE_TEST
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Enable support for the Arm DMA-350 controller.
+
config AT_HDMAC
tristate "Atmel AHB DMA support"
depends on ARCH_AT91
@@ -103,7 +110,7 @@ config AT_HDMAC
config AT_XDMAC
tristate "Atmel XDMA support"
- depends on ARCH_AT91
+ depends on ARCH_MICROCHIP
select DMA_ENGINE
help
Support the Atmel XDMA controller.
@@ -162,8 +169,8 @@ config DMA_SA11X0
config DMA_SUN4I
tristate "Allwinner A10 DMA SoCs support"
- depends on MACH_SUN4I || MACH_SUN5I || MACH_SUN7I
- default (MACH_SUN4I || MACH_SUN5I || MACH_SUN7I)
+ depends on MACH_SUN4I || MACH_SUN5I || MACH_SUN7I || MACH_SUNIV
+ default (MACH_SUN4I || MACH_SUN5I || MACH_SUN7I || MACH_SUNIV)
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
@@ -378,6 +385,20 @@ config LOONGSON1_APB_DMA
This selects support for the APB DMA controller in Loongson1 SoCs,
which is required by Loongson1 NAND and audio support.
+config LOONGSON2_APB_DMA
+ tristate "Loongson2 APB DMA support"
+ depends on LOONGARCH || COMPILE_TEST
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Support for the Loongson2 APB DMA controller driver. The
+ DMA controller is having single DMA channel which can be
+ configured for different peripherals like audio, nand, sdio
+ etc which is in APB bus.
+
+ This DMA controller transfers data from memory to peripheral fifo.
+ It does not support memory to memory data transfer.
+
config LPC18XX_DMAMUX
bool "NXP LPC18xx/43xx DMA MUX for PL080"
depends on ARCH_LPC18XX || COMPILE_TEST
@@ -396,20 +417,6 @@ config LPC32XX_DMAMUX
Support for PL080 multiplexed DMA request lines on
LPC32XX platrofm.
-config LS2X_APB_DMA
- tristate "Loongson LS2X APB DMA support"
- depends on LOONGARCH || COMPILE_TEST
- select DMA_ENGINE
- select DMA_VIRTUAL_CHANNELS
- help
- Support for the Loongson LS2X APB DMA controller driver. The
- DMA controller is having single DMA channel which can be
- configured for different peripherals like audio, nand, sdio
- etc which is in APB bus.
-
- This DMA controller transfers data from memory to peripheral fifo.
- It does not support memory to memory data transfer.
-
config MCF_EDMA
tristate "Freescale eDMA engine support, ColdFire mcf5441x SoCs"
depends on M5441x || (COMPILE_TEST && FSL_EDMA=n)
@@ -443,7 +450,7 @@ config MILBEAUT_XDMAC
config MMP_PDMA
tristate "MMP PDMA support"
- depends on ARCH_MMP || ARCH_PXA || COMPILE_TEST
+ depends on ARCH_MMP || ARCH_PXA || ARCH_SPACEMIT || COMPILE_TEST
select DMA_ENGINE
help
Support the MMP PDMA engine for PXA and MMP platform.
@@ -546,7 +553,7 @@ config PL330_DMA
config PXA_DMA
bool "PXA DMA support"
- depends on (ARCH_MMP || ARCH_PXA)
+ depends on ARCH_MMP || ARCH_PXA || COMPILE_TEST
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
@@ -564,6 +571,15 @@ config PLX_DMA
These are exposed via extra functions on the switch's
upstream port. Each function exposes one DMA channel.
+config SOPHGO_CV1800B_DMAMUX
+ tristate "Sophgo CV1800/SG2000 series SoC DMA multiplexer support"
+ depends on MFD_SYSCON
+ depends on ARCH_SOPHGO || COMPILE_TEST
+ help
+ Support for the DMA multiplexer on Sophgo CV1800/SG2000
+ series SoCs.
+ Say Y here if your board have this soc.
+
config STE_DMA40
bool "ST-Ericsson DMA40 support"
depends on ARCH_U8500
@@ -740,8 +756,6 @@ source "drivers/dma/bestcomm/Kconfig"
source "drivers/dma/mediatek/Kconfig"
-source "drivers/dma/ptdma/Kconfig"
-
source "drivers/dma/qcom/Kconfig"
source "drivers/dma/dw/Kconfig"
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index ad6a03c052ec..a54d7688392b 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -16,8 +16,8 @@ obj-$(CONFIG_DMATEST) += dmatest.o
obj-$(CONFIG_ALTERA_MSGDMA) += altera-msgdma.o
obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
-obj-$(CONFIG_AMD_PTDMA) += ptdma/
obj-$(CONFIG_APPLE_ADMAC) += apple-admac.o
+obj-$(CONFIG_ARM_DMA350) += arm-dma350.o
obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
obj-$(CONFIG_AT_XDMAC) += at_xdmac.o
obj-$(CONFIG_AXI_DMAC) += dma-axi-dmac.o
@@ -50,9 +50,9 @@ obj-$(CONFIG_INTEL_IOATDMA) += ioat/
obj-y += idxd/
obj-$(CONFIG_K3_DMA) += k3dma.o
obj-$(CONFIG_LOONGSON1_APB_DMA) += loongson1-apb-dma.o
+obj-$(CONFIG_LOONGSON2_APB_DMA) += loongson2-apb-dma.o
obj-$(CONFIG_LPC18XX_DMAMUX) += lpc18xx-dmamux.o
obj-$(CONFIG_LPC32XX_DMAMUX) += lpc32xx-dmamux.o
-obj-$(CONFIG_LS2X_APB_DMA) += ls2x-apb-dma.o
obj-$(CONFIG_MILBEAUT_HDMAC) += milbeaut-hdmac.o
obj-$(CONFIG_MILBEAUT_XDMAC) += milbeaut-xdmac.o
obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
@@ -71,6 +71,7 @@ obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/
obj-$(CONFIG_PXA_DMA) += pxa_dma.o
obj-$(CONFIG_RENESAS_DMA) += sh/
obj-$(CONFIG_SF_PDMA) += sf-pdma/
+obj-$(CONFIG_SOPHGO_CV1800B_DMAMUX) += cv1800b-dmamux.o
obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
obj-$(CONFIG_SPRD_DMA) += sprd-dma.o
obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c
index a58a1600dd65..2abbe11e797e 100644
--- a/drivers/dma/acpi-dma.c
+++ b/drivers/dma/acpi-dma.c
@@ -9,18 +9,21 @@
* Mika Westerberg <mika.westerberg@linux.intel.com>
*/
+#include <linux/acpi.h>
+#include <linux/acpi_dma.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
-#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/mutex.h>
-#include <linux/slab.h>
-#include <linux/ioport.h>
-#include <linux/acpi.h>
-#include <linux/acpi_dma.h>
#include <linux/property.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/types.h>
static LIST_HEAD(acpi_dma_list);
static DEFINE_MUTEX(acpi_dma_lock);
@@ -236,7 +239,7 @@ int acpi_dma_controller_free(struct device *dev)
}
EXPORT_SYMBOL_GPL(acpi_dma_controller_free);
-static void devm_acpi_dma_release(struct device *dev, void *res)
+static void devm_acpi_dma_free(void *dev)
{
acpi_dma_controller_free(dev);
}
@@ -259,37 +262,15 @@ int devm_acpi_dma_controller_register(struct device *dev,
(struct acpi_dma_spec *, struct acpi_dma *),
void *data)
{
- void *res;
int ret;
- res = devres_alloc(devm_acpi_dma_release, 0, GFP_KERNEL);
- if (!res)
- return -ENOMEM;
-
ret = acpi_dma_controller_register(dev, acpi_dma_xlate, data);
- if (ret) {
- devres_free(res);
+ if (ret)
return ret;
- }
- devres_add(dev, res);
- return 0;
-}
-EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_register);
-/**
- * devm_acpi_dma_controller_free - resource managed acpi_dma_controller_free()
- * @dev: device that is unregistering as DMA controller
- *
- * Unregister a DMA controller registered with
- * devm_acpi_dma_controller_register(). Normally this function will not need to
- * be called and the resource management code will ensure that the resource is
- * freed.
- */
-void devm_acpi_dma_controller_free(struct device *dev)
-{
- WARN_ON(devres_release(dev, devm_acpi_dma_release, NULL, NULL));
+ return devm_add_action_or_reset(dev, devm_acpi_dma_free, dev);
}
-EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_free);
+EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_register);
/**
* acpi_dma_update_dma_spec - prepare dma specifier to pass to translation function
diff --git a/drivers/dma/altera-msgdma.c b/drivers/dma/altera-msgdma.c
index e6a6566b309e..a203fdd84950 100644
--- a/drivers/dma/altera-msgdma.c
+++ b/drivers/dma/altera-msgdma.c
@@ -954,7 +954,7 @@ static struct platform_driver msgdma_driver = {
.of_match_table = of_match_ptr(msgdma_match),
},
.probe = msgdma_probe,
- .remove_new = msgdma_remove,
+ .remove = msgdma_remove,
};
module_platform_driver(msgdma_driver);
diff --git a/drivers/dma/amd/Kconfig b/drivers/dma/amd/Kconfig
index 7d1f51d69675..00d874872a8f 100644
--- a/drivers/dma/amd/Kconfig
+++ b/drivers/dma/amd/Kconfig
@@ -1,4 +1,32 @@
# SPDX-License-Identifier: GPL-2.0-only
+#
+
+config AMD_AE4DMA
+ tristate "AMD AE4DMA Engine"
+ depends on (X86_64 || COMPILE_TEST) && PCI
+ depends on AMD_PTDMA
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Enable support for the AMD AE4DMA controller. This controller
+ provides DMA capabilities to perform high bandwidth memory to
+ memory and IO copy operations. It performs DMA transfer through
+ queue-based descriptor management. This DMA controller is intended
+ to be used with AMD Non-Transparent Bridge devices and not for
+ general purpose peripheral DMA.
+
+config AMD_PTDMA
+ tristate "AMD PassThru DMA Engine"
+ depends on X86_64 && PCI
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Enable support for the AMD PTDMA controller. This controller
+ provides DMA capabilities to perform high bandwidth memory to
+ memory and IO copy operations. It performs DMA transfer through
+ queue-based descriptor management. This DMA controller is intended
+ to be used with AMD Non-Transparent Bridge devices and not for
+ general purpose peripheral DMA.
config AMD_QDMA
tristate "AMD Queue-based DMA"
diff --git a/drivers/dma/amd/Makefile b/drivers/dma/amd/Makefile
index 37212be9364f..11278c06374d 100644
--- a/drivers/dma/amd/Makefile
+++ b/drivers/dma/amd/Makefile
@@ -1,3 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_AMD_AE4DMA) += ae4dma/
+obj-$(CONFIG_AMD_PTDMA) += ptdma/
obj-$(CONFIG_AMD_QDMA) += qdma/
diff --git a/drivers/dma/amd/ae4dma/Makefile b/drivers/dma/amd/ae4dma/Makefile
new file mode 100644
index 000000000000..e918f85a80ec
--- /dev/null
+++ b/drivers/dma/amd/ae4dma/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# AMD AE4DMA driver
+#
+
+obj-$(CONFIG_AMD_AE4DMA) += ae4dma.o
+
+ae4dma-objs := ae4dma-dev.o
+
+ae4dma-$(CONFIG_PCI) += ae4dma-pci.o
diff --git a/drivers/dma/amd/ae4dma/ae4dma-dev.c b/drivers/dma/amd/ae4dma/ae4dma-dev.c
new file mode 100644
index 000000000000..8de3bef41b58
--- /dev/null
+++ b/drivers/dma/amd/ae4dma/ae4dma-dev.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AMD AE4DMA driver
+ *
+ * Copyright (c) 2024, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Basavaraj Natikar <Basavaraj.Natikar@amd.com>
+ */
+
+#include "ae4dma.h"
+
+static unsigned int max_hw_q = 1;
+module_param(max_hw_q, uint, 0444);
+MODULE_PARM_DESC(max_hw_q, "max hw queues supported by engine (any non-zero value, default: 1)");
+
+static void ae4_pending_work(struct work_struct *work)
+{
+ struct ae4_cmd_queue *ae4cmd_q = container_of(work, struct ae4_cmd_queue, p_work.work);
+ struct pt_cmd_queue *cmd_q = &ae4cmd_q->cmd_q;
+ struct pt_cmd *cmd;
+ u32 cridx;
+
+ for (;;) {
+ wait_event_interruptible(ae4cmd_q->q_w,
+ ((atomic64_read(&ae4cmd_q->done_cnt)) <
+ atomic64_read(&ae4cmd_q->intr_cnt)));
+
+ atomic64_inc(&ae4cmd_q->done_cnt);
+
+ mutex_lock(&ae4cmd_q->cmd_lock);
+ cridx = readl(cmd_q->reg_control + AE4_RD_IDX_OFF);
+ while ((ae4cmd_q->dridx != cridx) && !list_empty(&ae4cmd_q->cmd)) {
+ cmd = list_first_entry(&ae4cmd_q->cmd, struct pt_cmd, entry);
+ list_del(&cmd->entry);
+
+ ae4_check_status_error(ae4cmd_q, ae4cmd_q->dridx);
+ cmd->pt_cmd_callback(cmd->data, cmd->ret);
+
+ ae4cmd_q->q_cmd_count--;
+ ae4cmd_q->dridx = (ae4cmd_q->dridx + 1) % CMD_Q_LEN;
+
+ complete_all(&ae4cmd_q->cmp);
+ }
+ mutex_unlock(&ae4cmd_q->cmd_lock);
+ }
+}
+
+static irqreturn_t ae4_core_irq_handler(int irq, void *data)
+{
+ struct ae4_cmd_queue *ae4cmd_q = data;
+ struct pt_cmd_queue *cmd_q;
+ struct pt_device *pt;
+ u32 status;
+
+ cmd_q = &ae4cmd_q->cmd_q;
+ pt = cmd_q->pt;
+
+ pt->total_interrupts++;
+ atomic64_inc(&ae4cmd_q->intr_cnt);
+
+ status = readl(cmd_q->reg_control + AE4_INTR_STS_OFF);
+ if (status & BIT(0)) {
+ status &= GENMASK(31, 1);
+ writel(status, cmd_q->reg_control + AE4_INTR_STS_OFF);
+ }
+
+ wake_up(&ae4cmd_q->q_w);
+
+ return IRQ_HANDLED;
+}
+
+void ae4_destroy_work(struct ae4_device *ae4)
+{
+ struct ae4_cmd_queue *ae4cmd_q;
+ int i;
+
+ for (i = 0; i < ae4->cmd_q_count; i++) {
+ ae4cmd_q = &ae4->ae4cmd_q[i];
+
+ if (!ae4cmd_q->pws)
+ break;
+
+ cancel_delayed_work_sync(&ae4cmd_q->p_work);
+ destroy_workqueue(ae4cmd_q->pws);
+ }
+}
+
+int ae4_core_init(struct ae4_device *ae4)
+{
+ struct pt_device *pt = &ae4->pt;
+ struct ae4_cmd_queue *ae4cmd_q;
+ struct device *dev = pt->dev;
+ struct pt_cmd_queue *cmd_q;
+ int i, ret = 0;
+
+ writel(max_hw_q, pt->io_regs);
+
+ for (i = 0; i < max_hw_q; i++) {
+ ae4cmd_q = &ae4->ae4cmd_q[i];
+ ae4cmd_q->id = ae4->cmd_q_count;
+ ae4->cmd_q_count++;
+
+ cmd_q = &ae4cmd_q->cmd_q;
+ cmd_q->pt = pt;
+
+ cmd_q->reg_control = pt->io_regs + ((i + 1) * AE4_Q_SZ);
+
+ ret = devm_request_irq(dev, ae4->ae4_irq[i], ae4_core_irq_handler, 0,
+ dev_name(pt->dev), ae4cmd_q);
+ if (ret)
+ return ret;
+
+ cmd_q->qsize = Q_SIZE(sizeof(struct ae4dma_desc));
+
+ cmd_q->qbase = dmam_alloc_coherent(dev, cmd_q->qsize, &cmd_q->qbase_dma,
+ GFP_KERNEL);
+ if (!cmd_q->qbase)
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < ae4->cmd_q_count; i++) {
+ ae4cmd_q = &ae4->ae4cmd_q[i];
+
+ cmd_q = &ae4cmd_q->cmd_q;
+
+ cmd_q->reg_control = pt->io_regs + ((i + 1) * AE4_Q_SZ);
+
+ /* Update the device registers with queue information. */
+ writel(CMD_Q_LEN, cmd_q->reg_control + AE4_MAX_IDX_OFF);
+
+ cmd_q->qdma_tail = cmd_q->qbase_dma;
+ writel(lower_32_bits(cmd_q->qdma_tail), cmd_q->reg_control + AE4_Q_BASE_L_OFF);
+ writel(upper_32_bits(cmd_q->qdma_tail), cmd_q->reg_control + AE4_Q_BASE_H_OFF);
+
+ INIT_LIST_HEAD(&ae4cmd_q->cmd);
+ init_waitqueue_head(&ae4cmd_q->q_w);
+
+ ae4cmd_q->pws = alloc_ordered_workqueue("ae4dma_%d", WQ_MEM_RECLAIM, ae4cmd_q->id);
+ if (!ae4cmd_q->pws) {
+ ae4_destroy_work(ae4);
+ return -ENOMEM;
+ }
+ INIT_DELAYED_WORK(&ae4cmd_q->p_work, ae4_pending_work);
+ queue_delayed_work(ae4cmd_q->pws, &ae4cmd_q->p_work, usecs_to_jiffies(100));
+
+ init_completion(&ae4cmd_q->cmp);
+ }
+
+ ret = pt_dmaengine_register(pt);
+ if (ret)
+ ae4_destroy_work(ae4);
+ else
+ ptdma_debugfs_setup(pt);
+
+ return ret;
+}
diff --git a/drivers/dma/amd/ae4dma/ae4dma-pci.c b/drivers/dma/amd/ae4dma/ae4dma-pci.c
new file mode 100644
index 000000000000..2c63907db228
--- /dev/null
+++ b/drivers/dma/amd/ae4dma/ae4dma-pci.c
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AMD AE4DMA driver
+ *
+ * Copyright (c) 2024, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Basavaraj Natikar <Basavaraj.Natikar@amd.com>
+ */
+
+#include "ae4dma.h"
+
+static int ae4_get_irqs(struct ae4_device *ae4)
+{
+ struct ae4_msix *ae4_msix = ae4->ae4_msix;
+ struct pt_device *pt = &ae4->pt;
+ struct device *dev = pt->dev;
+ struct pci_dev *pdev;
+ int i, v, ret;
+
+ pdev = to_pci_dev(dev);
+
+ for (v = 0; v < ARRAY_SIZE(ae4_msix->msix_entry); v++)
+ ae4_msix->msix_entry[v].entry = v;
+
+ ret = pci_alloc_irq_vectors(pdev, v, v, PCI_IRQ_MSIX);
+ if (ret != v) {
+ if (ret > 0)
+ pci_free_irq_vectors(pdev);
+
+ dev_err(dev, "could not enable MSI-X (%d), trying MSI\n", ret);
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
+ if (ret < 0) {
+ dev_err(dev, "could not enable MSI (%d)\n", ret);
+ return ret;
+ }
+
+ ret = pci_irq_vector(pdev, 0);
+ if (ret < 0) {
+ pci_free_irq_vectors(pdev);
+ return ret;
+ }
+
+ for (i = 0; i < MAX_AE4_HW_QUEUES; i++)
+ ae4->ae4_irq[i] = ret;
+
+ } else {
+ ae4_msix->msix_count = ret;
+ for (i = 0; i < ae4_msix->msix_count; i++)
+ ae4->ae4_irq[i] = pci_irq_vector(pdev, i);
+ }
+
+ return ret;
+}
+
+static void ae4_free_irqs(struct ae4_device *ae4)
+{
+ struct ae4_msix *ae4_msix = ae4->ae4_msix;
+ struct pt_device *pt = &ae4->pt;
+ struct device *dev = pt->dev;
+ struct pci_dev *pdev;
+
+ pdev = to_pci_dev(dev);
+
+ if (ae4_msix && (ae4_msix->msix_count || ae4->ae4_irq[MAX_AE4_HW_QUEUES - 1]))
+ pci_free_irq_vectors(pdev);
+}
+
+static void ae4_deinit(struct ae4_device *ae4)
+{
+ ae4_free_irqs(ae4);
+}
+
+static int ae4_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ struct ae4_device *ae4;
+ struct pt_device *pt;
+ int bar_mask;
+ int ret = 0;
+
+ ae4 = devm_kzalloc(dev, sizeof(*ae4), GFP_KERNEL);
+ if (!ae4)
+ return -ENOMEM;
+
+ ae4->ae4_msix = devm_kzalloc(dev, sizeof(struct ae4_msix), GFP_KERNEL);
+ if (!ae4->ae4_msix)
+ return -ENOMEM;
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ goto ae4_error;
+
+ bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
+ ret = pcim_iomap_regions(pdev, bar_mask, "ae4dma");
+ if (ret)
+ goto ae4_error;
+
+ pt = &ae4->pt;
+ pt->dev = dev;
+ pt->ver = AE4_DMA_VERSION;
+
+ pt->io_regs = pcim_iomap_table(pdev)[0];
+ if (!pt->io_regs) {
+ ret = -ENOMEM;
+ goto ae4_error;
+ }
+
+ ret = ae4_get_irqs(ae4);
+ if (ret < 0)
+ goto ae4_error;
+
+ pci_set_master(pdev);
+
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+
+ dev_set_drvdata(dev, ae4);
+
+ ret = ae4_core_init(ae4);
+ if (ret)
+ goto ae4_error;
+
+ return 0;
+
+ae4_error:
+ ae4_deinit(ae4);
+
+ return ret;
+}
+
+static void ae4_pci_remove(struct pci_dev *pdev)
+{
+ struct ae4_device *ae4 = dev_get_drvdata(&pdev->dev);
+
+ ae4_destroy_work(ae4);
+ ae4_deinit(ae4);
+}
+
+static const struct pci_device_id ae4_pci_table[] = {
+ { PCI_VDEVICE(AMD, 0x149B), },
+ /* Last entry must be zero */
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, ae4_pci_table);
+
+static struct pci_driver ae4_pci_driver = {
+ .name = "ae4dma",
+ .id_table = ae4_pci_table,
+ .probe = ae4_pci_probe,
+ .remove = ae4_pci_remove,
+};
+
+module_pci_driver(ae4_pci_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("AMD AE4DMA driver");
diff --git a/drivers/dma/amd/ae4dma/ae4dma.h b/drivers/dma/amd/ae4dma/ae4dma.h
new file mode 100644
index 000000000000..57f6048726bb
--- /dev/null
+++ b/drivers/dma/amd/ae4dma/ae4dma.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * AMD AE4DMA driver
+ *
+ * Copyright (c) 2024, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Basavaraj Natikar <Basavaraj.Natikar@amd.com>
+ */
+#ifndef __AE4DMA_H__
+#define __AE4DMA_H__
+
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/dmapool.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+
+#include "../ptdma/ptdma.h"
+#include "../../virt-dma.h"
+
+#define MAX_AE4_HW_QUEUES 16
+
+#define AE4_DESC_COMPLETED 0x03
+
+#define AE4_MAX_IDX_OFF 0x08
+#define AE4_RD_IDX_OFF 0x0c
+#define AE4_WR_IDX_OFF 0x10
+#define AE4_INTR_STS_OFF 0x14
+#define AE4_Q_BASE_L_OFF 0x18
+#define AE4_Q_BASE_H_OFF 0x1c
+#define AE4_Q_SZ 0x20
+
+#define AE4_DMA_VERSION 4
+#define CMD_AE4_DESC_DW0_VAL 2
+
+#define AE4_TIME_OUT 5000
+
+struct ae4_msix {
+ int msix_count;
+ struct msix_entry msix_entry[MAX_AE4_HW_QUEUES];
+};
+
+struct ae4_cmd_queue {
+ struct ae4_device *ae4;
+ struct pt_cmd_queue cmd_q;
+ struct list_head cmd;
+ /* protect command operations */
+ struct mutex cmd_lock;
+ struct delayed_work p_work;
+ struct workqueue_struct *pws;
+ struct completion cmp;
+ wait_queue_head_t q_w;
+ atomic64_t intr_cnt;
+ atomic64_t done_cnt;
+ u64 q_cmd_count;
+ u32 dridx;
+ u32 tail_wi;
+ u32 id;
+};
+
+union dwou {
+ u32 dw0;
+ struct dword0 {
+ u8 byte0;
+ u8 byte1;
+ u16 timestamp;
+ } dws;
+};
+
+struct dword1 {
+ u8 status;
+ u8 err_code;
+ u16 desc_id;
+};
+
+struct ae4dma_desc {
+ union dwou dwouv;
+ struct dword1 dw1;
+ u32 length;
+ u32 rsvd;
+ u32 src_hi;
+ u32 src_lo;
+ u32 dst_hi;
+ u32 dst_lo;
+};
+
+struct ae4_device {
+ struct pt_device pt;
+ struct ae4_msix *ae4_msix;
+ struct ae4_cmd_queue ae4cmd_q[MAX_AE4_HW_QUEUES];
+ unsigned int ae4_irq[MAX_AE4_HW_QUEUES];
+ unsigned int cmd_q_count;
+};
+
+int ae4_core_init(struct ae4_device *ae4);
+void ae4_destroy_work(struct ae4_device *ae4);
+void ae4_check_status_error(struct ae4_cmd_queue *ae4cmd_q, int idx);
+#endif
diff --git a/drivers/dma/ptdma/Makefile b/drivers/dma/amd/ptdma/Makefile
index ce5410268a9a..ce5410268a9a 100644
--- a/drivers/dma/ptdma/Makefile
+++ b/drivers/dma/amd/ptdma/Makefile
diff --git a/drivers/dma/amd/ptdma/ptdma-debugfs.c b/drivers/dma/amd/ptdma/ptdma-debugfs.c
new file mode 100644
index 000000000000..c7c90bbf6fd8
--- /dev/null
+++ b/drivers/dma/amd/ptdma/ptdma-debugfs.c
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AMD Passthrough DMA device driver
+ * -- Based on the CCP driver
+ *
+ * Copyright (C) 2016,2021 Advanced Micro Devices, Inc.
+ *
+ * Author: Sanjay R Mehta <sanju.mehta@amd.com>
+ * Author: Gary R Hook <gary.hook@amd.com>
+ */
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include "ptdma.h"
+#include "../ae4dma/ae4dma.h"
+
+/* DebugFS helpers */
+#define RI_VERSION_NUM 0x0000003F
+
+#define RI_NUM_VQM 0x00078000
+#define RI_NVQM_SHIFT 15
+
+static int pt_debugfs_info_show(struct seq_file *s, void *p)
+{
+ struct pt_device *pt = s->private;
+ struct ae4_device *ae4;
+ unsigned int regval;
+
+ seq_printf(s, "Device name: %s\n", dev_name(pt->dev));
+
+ if (pt->ver == AE4_DMA_VERSION) {
+ ae4 = container_of(pt, struct ae4_device, pt);
+ seq_printf(s, " # Queues: %d\n", ae4->cmd_q_count);
+ seq_printf(s, " # Cmds per queue: %d\n", CMD_Q_LEN);
+ } else {
+ seq_printf(s, " # Queues: %d\n", 1);
+ seq_printf(s, " # Cmds: %d\n", pt->cmd_count);
+ }
+
+ regval = ioread32(pt->io_regs + CMD_PT_VERSION);
+
+ seq_printf(s, " Version: %d\n", regval & RI_VERSION_NUM);
+ seq_puts(s, " Engines:");
+ seq_puts(s, "\n");
+ seq_printf(s, " Queues: %d\n", (regval & RI_NUM_VQM) >> RI_NVQM_SHIFT);
+
+ return 0;
+}
+
+/*
+ * Return a formatted buffer containing the current
+ * statistics of queue for PTDMA
+ */
+static int pt_debugfs_stats_show(struct seq_file *s, void *p)
+{
+ struct pt_device *pt = s->private;
+
+ seq_printf(s, "Total Interrupts Handled: %ld\n", pt->total_interrupts);
+
+ return 0;
+}
+
+static int pt_debugfs_queue_show(struct seq_file *s, void *p)
+{
+ struct pt_cmd_queue *cmd_q = s->private;
+ struct pt_device *pt;
+ unsigned int regval;
+
+ if (!cmd_q)
+ return 0;
+
+ seq_printf(s, " Pass-Thru: %ld\n", cmd_q->total_pt_ops);
+
+ pt = cmd_q->pt;
+ if (pt->ver == AE4_DMA_VERSION) {
+ regval = readl(cmd_q->reg_control + 0x4);
+ seq_printf(s, " Enabled Interrupts:: status 0x%x\n", regval);
+ } else {
+ regval = ioread32(cmd_q->reg_control + 0x000C);
+
+ seq_puts(s, " Enabled Interrupts:");
+ if (regval & INT_EMPTY_QUEUE)
+ seq_puts(s, " EMPTY");
+ if (regval & INT_QUEUE_STOPPED)
+ seq_puts(s, " STOPPED");
+ if (regval & INT_ERROR)
+ seq_puts(s, " ERROR");
+ if (regval & INT_COMPLETION)
+ seq_puts(s, " COMPLETION");
+ seq_puts(s, "\n");
+ }
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(pt_debugfs_info);
+DEFINE_SHOW_ATTRIBUTE(pt_debugfs_queue);
+DEFINE_SHOW_ATTRIBUTE(pt_debugfs_stats);
+
+void ptdma_debugfs_setup(struct pt_device *pt)
+{
+ struct dentry *debugfs_q_instance;
+ struct ae4_cmd_queue *ae4cmd_q;
+ struct pt_cmd_queue *cmd_q;
+ struct ae4_device *ae4;
+ char name[30];
+ int i;
+
+ if (!debugfs_initialized())
+ return;
+
+ debugfs_create_file("info", 0400, pt->dma_dev.dbg_dev_root, pt,
+ &pt_debugfs_info_fops);
+
+ debugfs_create_file("stats", 0400, pt->dma_dev.dbg_dev_root, pt,
+ &pt_debugfs_stats_fops);
+
+
+ if (pt->ver == AE4_DMA_VERSION) {
+ ae4 = container_of(pt, struct ae4_device, pt);
+ for (i = 0; i < ae4->cmd_q_count; i++) {
+ ae4cmd_q = &ae4->ae4cmd_q[i];
+ cmd_q = &ae4cmd_q->cmd_q;
+
+ memset(name, 0, sizeof(name));
+ snprintf(name, 29, "q%d", ae4cmd_q->id);
+
+ debugfs_q_instance =
+ debugfs_create_dir(name, pt->dma_dev.dbg_dev_root);
+
+ debugfs_create_file("stats", 0400, debugfs_q_instance, cmd_q,
+ &pt_debugfs_queue_fops);
+ }
+ } else {
+ debugfs_q_instance =
+ debugfs_create_dir("q", pt->dma_dev.dbg_dev_root);
+ cmd_q = &pt->cmd_q;
+ debugfs_create_file("stats", 0400, debugfs_q_instance, cmd_q,
+ &pt_debugfs_queue_fops);
+ }
+}
+EXPORT_SYMBOL_GPL(ptdma_debugfs_setup);
diff --git a/drivers/dma/ptdma/ptdma-dev.c b/drivers/dma/amd/ptdma/ptdma-dev.c
index a2bf13ff18b6..a2bf13ff18b6 100644
--- a/drivers/dma/ptdma/ptdma-dev.c
+++ b/drivers/dma/amd/ptdma/ptdma-dev.c
diff --git a/drivers/dma/amd/ptdma/ptdma-dmaengine.c b/drivers/dma/amd/ptdma/ptdma-dmaengine.c
new file mode 100644
index 000000000000..628c49ce5de9
--- /dev/null
+++ b/drivers/dma/amd/ptdma/ptdma-dmaengine.c
@@ -0,0 +1,659 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AMD Passthrough DMA device driver
+ * -- Based on the CCP driver
+ *
+ * Copyright (C) 2016,2021 Advanced Micro Devices, Inc.
+ *
+ * Author: Sanjay R Mehta <sanju.mehta@amd.com>
+ * Author: Gary R Hook <gary.hook@amd.com>
+ */
+
+#include <linux/bitfield.h>
+#include "ptdma.h"
+#include "../ae4dma/ae4dma.h"
+#include "../../dmaengine.h"
+
+static char *ae4_error_codes[] = {
+ "",
+ "ERR 01: INVALID HEADER DW0",
+ "ERR 02: INVALID STATUS",
+ "ERR 03: INVALID LENGTH - 4 BYTE ALIGNMENT",
+ "ERR 04: INVALID SRC ADDR - 4 BYTE ALIGNMENT",
+ "ERR 05: INVALID DST ADDR - 4 BYTE ALIGNMENT",
+ "ERR 06: INVALID ALIGNMENT",
+ "ERR 07: INVALID DESCRIPTOR",
+};
+
+static void ae4_log_error(struct pt_device *d, int e)
+{
+ /* ERR 01 - 07 represents Invalid AE4 errors */
+ if (e <= 7)
+ dev_info(d->dev, "AE4DMA error: %s (0x%x)\n", ae4_error_codes[e], e);
+ /* ERR 08 - 15 represents Invalid Descriptor errors */
+ else if (e > 7 && e <= 15)
+ dev_info(d->dev, "AE4DMA error: %s (0x%x)\n", "INVALID DESCRIPTOR", e);
+ /* ERR 16 - 31 represents Firmware errors */
+ else if (e > 15 && e <= 31)
+ dev_info(d->dev, "AE4DMA error: %s (0x%x)\n", "FIRMWARE ERROR", e);
+ /* ERR 32 - 63 represents Fatal errors */
+ else if (e > 31 && e <= 63)
+ dev_info(d->dev, "AE4DMA error: %s (0x%x)\n", "FATAL ERROR", e);
+ /* ERR 64 - 255 represents PTE errors */
+ else if (e > 63 && e <= 255)
+ dev_info(d->dev, "AE4DMA error: %s (0x%x)\n", "PTE ERROR", e);
+ else
+ dev_info(d->dev, "Unknown AE4DMA error");
+}
+
+void ae4_check_status_error(struct ae4_cmd_queue *ae4cmd_q, int idx)
+{
+ struct pt_cmd_queue *cmd_q = &ae4cmd_q->cmd_q;
+ struct ae4dma_desc desc;
+ u8 status;
+
+ memcpy(&desc, &cmd_q->qbase[idx], sizeof(struct ae4dma_desc));
+ status = desc.dw1.status;
+ if (status && status != AE4_DESC_COMPLETED) {
+ cmd_q->cmd_error = desc.dw1.err_code;
+ if (cmd_q->cmd_error)
+ ae4_log_error(cmd_q->pt, cmd_q->cmd_error);
+ }
+}
+EXPORT_SYMBOL_GPL(ae4_check_status_error);
+
+static inline struct pt_dma_chan *to_pt_chan(struct dma_chan *dma_chan)
+{
+ return container_of(dma_chan, struct pt_dma_chan, vc.chan);
+}
+
+static inline struct pt_dma_desc *to_pt_desc(struct virt_dma_desc *vd)
+{
+ return container_of(vd, struct pt_dma_desc, vd);
+}
+
+static void pt_free_chan_resources(struct dma_chan *dma_chan)
+{
+ struct pt_dma_chan *chan = to_pt_chan(dma_chan);
+
+ vchan_free_chan_resources(&chan->vc);
+}
+
+static void pt_synchronize(struct dma_chan *dma_chan)
+{
+ struct pt_dma_chan *chan = to_pt_chan(dma_chan);
+
+ vchan_synchronize(&chan->vc);
+}
+
+static void pt_do_cleanup(struct virt_dma_desc *vd)
+{
+ struct pt_dma_desc *desc = to_pt_desc(vd);
+ struct pt_device *pt = desc->pt;
+
+ kmem_cache_free(pt->dma_desc_cache, desc);
+}
+
+static struct pt_cmd_queue *pt_get_cmd_queue(struct pt_device *pt, struct pt_dma_chan *chan)
+{
+ struct ae4_cmd_queue *ae4cmd_q;
+ struct pt_cmd_queue *cmd_q;
+ struct ae4_device *ae4;
+
+ if (pt->ver == AE4_DMA_VERSION) {
+ ae4 = container_of(pt, struct ae4_device, pt);
+ ae4cmd_q = &ae4->ae4cmd_q[chan->id];
+ cmd_q = &ae4cmd_q->cmd_q;
+ } else {
+ cmd_q = &pt->cmd_q;
+ }
+
+ return cmd_q;
+}
+
+static int ae4_core_execute_cmd(struct ae4dma_desc *desc, struct ae4_cmd_queue *ae4cmd_q)
+{
+ bool soc = FIELD_GET(DWORD0_SOC, desc->dwouv.dw0);
+ struct pt_cmd_queue *cmd_q = &ae4cmd_q->cmd_q;
+
+ if (soc) {
+ desc->dwouv.dw0 |= FIELD_PREP(DWORD0_IOC, desc->dwouv.dw0);
+ desc->dwouv.dw0 &= ~DWORD0_SOC;
+ }
+
+ mutex_lock(&ae4cmd_q->cmd_lock);
+ memcpy(&cmd_q->qbase[ae4cmd_q->tail_wi], desc, sizeof(struct ae4dma_desc));
+ ae4cmd_q->q_cmd_count++;
+ ae4cmd_q->tail_wi = (ae4cmd_q->tail_wi + 1) % CMD_Q_LEN;
+ writel(ae4cmd_q->tail_wi, cmd_q->reg_control + AE4_WR_IDX_OFF);
+ mutex_unlock(&ae4cmd_q->cmd_lock);
+
+ wake_up(&ae4cmd_q->q_w);
+
+ return 0;
+}
+
+static int pt_core_perform_passthru_ae4(struct pt_cmd_queue *cmd_q,
+ struct pt_passthru_engine *pt_engine)
+{
+ struct ae4_cmd_queue *ae4cmd_q = container_of(cmd_q, struct ae4_cmd_queue, cmd_q);
+ struct ae4dma_desc desc;
+
+ cmd_q->cmd_error = 0;
+ cmd_q->total_pt_ops++;
+ memset(&desc, 0, sizeof(desc));
+ desc.dwouv.dws.byte0 = CMD_AE4_DESC_DW0_VAL;
+
+ desc.dw1.status = 0;
+ desc.dw1.err_code = 0;
+ desc.dw1.desc_id = 0;
+
+ desc.length = pt_engine->src_len;
+
+ desc.src_lo = upper_32_bits(pt_engine->src_dma);
+ desc.src_hi = lower_32_bits(pt_engine->src_dma);
+ desc.dst_lo = upper_32_bits(pt_engine->dst_dma);
+ desc.dst_hi = lower_32_bits(pt_engine->dst_dma);
+
+ return ae4_core_execute_cmd(&desc, ae4cmd_q);
+}
+
+static int pt_dma_start_desc(struct pt_dma_desc *desc, struct pt_dma_chan *chan)
+{
+ struct pt_passthru_engine *pt_engine;
+ struct pt_device *pt;
+ struct pt_cmd *pt_cmd;
+ struct pt_cmd_queue *cmd_q;
+
+ desc->issued_to_hw = 1;
+
+ pt_cmd = &desc->pt_cmd;
+ pt = pt_cmd->pt;
+
+ cmd_q = pt_get_cmd_queue(pt, chan);
+
+ pt_engine = &pt_cmd->passthru;
+
+ pt->tdata.cmd = pt_cmd;
+
+ /* Execute the command */
+ if (pt->ver == AE4_DMA_VERSION)
+ pt_cmd->ret = pt_core_perform_passthru_ae4(cmd_q, pt_engine);
+ else
+ pt_cmd->ret = pt_core_perform_passthru(cmd_q, pt_engine);
+
+ return 0;
+}
+
+static struct pt_dma_desc *pt_next_dma_desc(struct pt_dma_chan *chan)
+{
+ /* Get the next DMA descriptor on the active list */
+ struct virt_dma_desc *vd = vchan_next_desc(&chan->vc);
+
+ return vd ? to_pt_desc(vd) : NULL;
+}
+
+static struct pt_dma_desc *pt_handle_active_desc(struct pt_dma_chan *chan,
+ struct pt_dma_desc *desc)
+{
+ struct dma_async_tx_descriptor *tx_desc;
+ struct virt_dma_desc *vd;
+ struct pt_device *pt;
+ unsigned long flags;
+
+ pt = chan->pt;
+ /* Loop over descriptors until one is found with commands */
+ do {
+ if (desc) {
+ if (!desc->issued_to_hw) {
+ /* No errors, keep going */
+ if (desc->status != DMA_ERROR)
+ return desc;
+ }
+
+ tx_desc = &desc->vd.tx;
+ vd = &desc->vd;
+ } else {
+ tx_desc = NULL;
+ }
+
+ spin_lock_irqsave(&chan->vc.lock, flags);
+
+ if (pt->ver != AE4_DMA_VERSION && desc) {
+ if (desc->status != DMA_COMPLETE) {
+ if (desc->status != DMA_ERROR)
+ desc->status = DMA_COMPLETE;
+
+ dma_cookie_complete(tx_desc);
+ dma_descriptor_unmap(tx_desc);
+ list_del(&desc->vd.node);
+ } else {
+ /* Don't handle it twice */
+ tx_desc = NULL;
+ }
+ }
+
+ desc = pt_next_dma_desc(chan);
+
+ spin_unlock_irqrestore(&chan->vc.lock, flags);
+
+ if (pt->ver != AE4_DMA_VERSION && tx_desc) {
+ dmaengine_desc_get_callback_invoke(tx_desc, NULL);
+ dma_run_dependencies(tx_desc);
+ vchan_vdesc_fini(vd);
+ }
+ } while (desc);
+
+ return NULL;
+}
+
+static inline bool ae4_core_queue_full(struct pt_cmd_queue *cmd_q)
+{
+ u32 front_wi = readl(cmd_q->reg_control + AE4_WR_IDX_OFF);
+ u32 rear_ri = readl(cmd_q->reg_control + AE4_RD_IDX_OFF);
+
+ if (((MAX_CMD_QLEN + front_wi - rear_ri) % MAX_CMD_QLEN) >= (MAX_CMD_QLEN - 1))
+ return true;
+
+ return false;
+}
+
+static void pt_cmd_callback(void *data, int err)
+{
+ struct pt_dma_desc *desc = data;
+ struct ae4_cmd_queue *ae4cmd_q;
+ struct dma_chan *dma_chan;
+ struct pt_dma_chan *chan;
+ struct ae4_device *ae4;
+ struct pt_device *pt;
+ int ret;
+
+ if (err == -EINPROGRESS)
+ return;
+
+ dma_chan = desc->vd.tx.chan;
+ chan = to_pt_chan(dma_chan);
+ pt = chan->pt;
+
+ if (err)
+ desc->status = DMA_ERROR;
+
+ while (true) {
+ if (pt->ver == AE4_DMA_VERSION) {
+ ae4 = container_of(pt, struct ae4_device, pt);
+ ae4cmd_q = &ae4->ae4cmd_q[chan->id];
+
+ if (ae4cmd_q->q_cmd_count >= (CMD_Q_LEN - 1) ||
+ ae4_core_queue_full(&ae4cmd_q->cmd_q)) {
+ wake_up(&ae4cmd_q->q_w);
+
+ if (wait_for_completion_timeout(&ae4cmd_q->cmp,
+ msecs_to_jiffies(AE4_TIME_OUT))
+ == 0) {
+ dev_err(pt->dev, "TIMEOUT %d:\n", ae4cmd_q->id);
+ break;
+ }
+
+ reinit_completion(&ae4cmd_q->cmp);
+ continue;
+ }
+ }
+
+ /* Check for DMA descriptor completion */
+ desc = pt_handle_active_desc(chan, desc);
+
+ /* Don't submit cmd if no descriptor or DMA is paused */
+ if (!desc)
+ break;
+
+ ret = pt_dma_start_desc(desc, chan);
+ if (!ret)
+ break;
+
+ desc->status = DMA_ERROR;
+ }
+}
+
+static struct pt_dma_desc *pt_alloc_dma_desc(struct pt_dma_chan *chan,
+ unsigned long flags)
+{
+ struct pt_dma_desc *desc;
+
+ desc = kmem_cache_zalloc(chan->pt->dma_desc_cache, GFP_NOWAIT);
+ if (!desc)
+ return NULL;
+
+ vchan_tx_prep(&chan->vc, &desc->vd, flags);
+
+ desc->pt = chan->pt;
+ desc->pt->cmd_q.int_en = !!(flags & DMA_PREP_INTERRUPT);
+ desc->issued_to_hw = 0;
+ desc->status = DMA_IN_PROGRESS;
+
+ return desc;
+}
+
+static void pt_cmd_callback_work(void *data, int err)
+{
+ struct dma_async_tx_descriptor *tx_desc;
+ struct pt_dma_desc *desc = data;
+ struct dma_chan *dma_chan;
+ struct virt_dma_desc *vd;
+ struct pt_dma_chan *chan;
+ unsigned long flags;
+
+ if (!desc)
+ return;
+
+ dma_chan = desc->vd.tx.chan;
+ chan = to_pt_chan(dma_chan);
+
+ if (err == -EINPROGRESS)
+ return;
+
+ tx_desc = &desc->vd.tx;
+ vd = &desc->vd;
+
+ if (err)
+ desc->status = DMA_ERROR;
+
+ spin_lock_irqsave(&chan->vc.lock, flags);
+ if (desc->status != DMA_COMPLETE) {
+ if (desc->status != DMA_ERROR)
+ desc->status = DMA_COMPLETE;
+
+ dma_cookie_complete(tx_desc);
+ dma_descriptor_unmap(tx_desc);
+ } else {
+ tx_desc = NULL;
+ }
+ spin_unlock_irqrestore(&chan->vc.lock, flags);
+
+ if (tx_desc) {
+ dmaengine_desc_get_callback_invoke(tx_desc, NULL);
+ dma_run_dependencies(tx_desc);
+ list_del(&desc->vd.node);
+ vchan_vdesc_fini(vd);
+ }
+}
+
+static struct pt_dma_desc *pt_create_desc(struct dma_chan *dma_chan,
+ dma_addr_t dst,
+ dma_addr_t src,
+ unsigned int len,
+ unsigned long flags)
+{
+ struct pt_dma_chan *chan = to_pt_chan(dma_chan);
+ struct pt_passthru_engine *pt_engine;
+ struct pt_device *pt = chan->pt;
+ struct ae4_cmd_queue *ae4cmd_q;
+ struct pt_dma_desc *desc;
+ struct ae4_device *ae4;
+ struct pt_cmd *pt_cmd;
+
+ desc = pt_alloc_dma_desc(chan, flags);
+ if (!desc)
+ return NULL;
+
+ pt_cmd = &desc->pt_cmd;
+ pt_cmd->pt = pt;
+ pt_engine = &pt_cmd->passthru;
+ pt_cmd->engine = PT_ENGINE_PASSTHRU;
+ pt_engine->src_dma = src;
+ pt_engine->dst_dma = dst;
+ pt_engine->src_len = len;
+ pt_cmd->pt_cmd_callback = pt_cmd_callback;
+ pt_cmd->data = desc;
+
+ desc->len = len;
+
+ if (pt->ver == AE4_DMA_VERSION) {
+ pt_cmd->pt_cmd_callback = pt_cmd_callback_work;
+ ae4 = container_of(pt, struct ae4_device, pt);
+ ae4cmd_q = &ae4->ae4cmd_q[chan->id];
+ mutex_lock(&ae4cmd_q->cmd_lock);
+ list_add_tail(&pt_cmd->entry, &ae4cmd_q->cmd);
+ mutex_unlock(&ae4cmd_q->cmd_lock);
+ }
+
+ return desc;
+}
+
+static struct dma_async_tx_descriptor *
+pt_prep_dma_memcpy(struct dma_chan *dma_chan, dma_addr_t dst,
+ dma_addr_t src, size_t len, unsigned long flags)
+{
+ struct pt_dma_desc *desc;
+
+ desc = pt_create_desc(dma_chan, dst, src, len, flags);
+ if (!desc)
+ return NULL;
+
+ return &desc->vd.tx;
+}
+
+static struct dma_async_tx_descriptor *
+pt_prep_dma_interrupt(struct dma_chan *dma_chan, unsigned long flags)
+{
+ struct pt_dma_chan *chan = to_pt_chan(dma_chan);
+ struct pt_dma_desc *desc;
+
+ desc = pt_alloc_dma_desc(chan, flags);
+ if (!desc)
+ return NULL;
+
+ return &desc->vd.tx;
+}
+
+static void pt_issue_pending(struct dma_chan *dma_chan)
+{
+ struct pt_dma_chan *chan = to_pt_chan(dma_chan);
+ struct pt_dma_desc *desc;
+ struct pt_device *pt;
+ unsigned long flags;
+ bool engine_is_idle = true;
+
+ pt = chan->pt;
+
+ spin_lock_irqsave(&chan->vc.lock, flags);
+
+ desc = pt_next_dma_desc(chan);
+ if (desc && pt->ver != AE4_DMA_VERSION)
+ engine_is_idle = false;
+
+ vchan_issue_pending(&chan->vc);
+
+ desc = pt_next_dma_desc(chan);
+
+ spin_unlock_irqrestore(&chan->vc.lock, flags);
+
+ /* If there was nothing active, start processing */
+ if (engine_is_idle && desc)
+ pt_cmd_callback(desc, 0);
+}
+
+static void pt_check_status_trans_ae4(struct pt_device *pt, struct pt_cmd_queue *cmd_q)
+{
+ struct ae4_cmd_queue *ae4cmd_q = container_of(cmd_q, struct ae4_cmd_queue, cmd_q);
+ int i;
+
+ for (i = 0; i < CMD_Q_LEN; i++)
+ ae4_check_status_error(ae4cmd_q, i);
+}
+
+static enum dma_status
+pt_tx_status(struct dma_chan *c, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct pt_dma_chan *chan = to_pt_chan(c);
+ struct pt_device *pt = chan->pt;
+ struct pt_cmd_queue *cmd_q;
+
+ cmd_q = pt_get_cmd_queue(pt, chan);
+
+ if (pt->ver == AE4_DMA_VERSION)
+ pt_check_status_trans_ae4(pt, cmd_q);
+ else
+ pt_check_status_trans(pt, cmd_q);
+
+ return dma_cookie_status(c, cookie, txstate);
+}
+
+static int pt_pause(struct dma_chan *dma_chan)
+{
+ struct pt_dma_chan *chan = to_pt_chan(dma_chan);
+ struct pt_device *pt = chan->pt;
+ struct pt_cmd_queue *cmd_q;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->vc.lock, flags);
+ cmd_q = pt_get_cmd_queue(pt, chan);
+ pt_stop_queue(cmd_q);
+ spin_unlock_irqrestore(&chan->vc.lock, flags);
+
+ return 0;
+}
+
+static int pt_resume(struct dma_chan *dma_chan)
+{
+ struct pt_dma_chan *chan = to_pt_chan(dma_chan);
+ struct pt_dma_desc *desc = NULL;
+ struct pt_device *pt = chan->pt;
+ struct pt_cmd_queue *cmd_q;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->vc.lock, flags);
+ cmd_q = pt_get_cmd_queue(pt, chan);
+ pt_start_queue(cmd_q);
+ desc = pt_next_dma_desc(chan);
+ spin_unlock_irqrestore(&chan->vc.lock, flags);
+
+ /* If there was something active, re-start */
+ if (desc)
+ pt_cmd_callback(desc, 0);
+
+ return 0;
+}
+
+static int pt_terminate_all(struct dma_chan *dma_chan)
+{
+ struct pt_dma_chan *chan = to_pt_chan(dma_chan);
+ struct pt_device *pt = chan->pt;
+ struct pt_cmd_queue *cmd_q;
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ cmd_q = pt_get_cmd_queue(pt, chan);
+ if (pt->ver == AE4_DMA_VERSION)
+ pt_stop_queue(cmd_q);
+ else
+ iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_control + 0x0010);
+
+ spin_lock_irqsave(&chan->vc.lock, flags);
+ vchan_get_all_descriptors(&chan->vc, &head);
+ spin_unlock_irqrestore(&chan->vc.lock, flags);
+
+ vchan_dma_desc_free_list(&chan->vc, &head);
+ vchan_free_chan_resources(&chan->vc);
+
+ return 0;
+}
+
+int pt_dmaengine_register(struct pt_device *pt)
+{
+ struct dma_device *dma_dev = &pt->dma_dev;
+ struct ae4_cmd_queue *ae4cmd_q = NULL;
+ struct ae4_device *ae4 = NULL;
+ struct pt_dma_chan *chan;
+ char *desc_cache_name;
+ int ret, i;
+
+ if (pt->ver == AE4_DMA_VERSION)
+ ae4 = container_of(pt, struct ae4_device, pt);
+
+ if (ae4)
+ pt->pt_dma_chan = devm_kcalloc(pt->dev, ae4->cmd_q_count,
+ sizeof(*pt->pt_dma_chan), GFP_KERNEL);
+ else
+ pt->pt_dma_chan = devm_kzalloc(pt->dev, sizeof(*pt->pt_dma_chan),
+ GFP_KERNEL);
+
+ if (!pt->pt_dma_chan)
+ return -ENOMEM;
+
+ desc_cache_name = devm_kasprintf(pt->dev, GFP_KERNEL,
+ "%s-dmaengine-desc-cache",
+ dev_name(pt->dev));
+ if (!desc_cache_name)
+ return -ENOMEM;
+
+ pt->dma_desc_cache = kmem_cache_create(desc_cache_name,
+ sizeof(struct pt_dma_desc), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!pt->dma_desc_cache)
+ return -ENOMEM;
+
+ dma_dev->dev = pt->dev;
+ dma_dev->src_addr_widths = DMA_SLAVE_BUSWIDTH_64_BYTES;
+ dma_dev->dst_addr_widths = DMA_SLAVE_BUSWIDTH_64_BYTES;
+ dma_dev->directions = DMA_MEM_TO_MEM;
+ dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+ dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
+ dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
+
+ /*
+ * PTDMA is intended to be used with the AMD NTB devices, hence
+ * marking it as DMA_PRIVATE.
+ */
+ dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
+
+ INIT_LIST_HEAD(&dma_dev->channels);
+
+ /* Set base and prep routines */
+ dma_dev->device_free_chan_resources = pt_free_chan_resources;
+ dma_dev->device_prep_dma_memcpy = pt_prep_dma_memcpy;
+ dma_dev->device_prep_dma_interrupt = pt_prep_dma_interrupt;
+ dma_dev->device_issue_pending = pt_issue_pending;
+ dma_dev->device_tx_status = pt_tx_status;
+ dma_dev->device_pause = pt_pause;
+ dma_dev->device_resume = pt_resume;
+ dma_dev->device_terminate_all = pt_terminate_all;
+ dma_dev->device_synchronize = pt_synchronize;
+
+ if (ae4) {
+ for (i = 0; i < ae4->cmd_q_count; i++) {
+ chan = pt->pt_dma_chan + i;
+ ae4cmd_q = &ae4->ae4cmd_q[i];
+ chan->id = ae4cmd_q->id;
+ chan->pt = pt;
+ chan->vc.desc_free = pt_do_cleanup;
+ vchan_init(&chan->vc, dma_dev);
+ }
+ } else {
+ chan = pt->pt_dma_chan;
+ chan->pt = pt;
+ chan->vc.desc_free = pt_do_cleanup;
+ vchan_init(&chan->vc, dma_dev);
+ }
+
+ ret = dma_async_device_register(dma_dev);
+ if (ret)
+ goto err_reg;
+
+ return 0;
+
+err_reg:
+ kmem_cache_destroy(pt->dma_desc_cache);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pt_dmaengine_register);
+
+void pt_dmaengine_unregister(struct pt_device *pt)
+{
+ struct dma_device *dma_dev = &pt->dma_dev;
+
+ dma_async_device_unregister(dma_dev);
+
+ kmem_cache_destroy(pt->dma_desc_cache);
+}
diff --git a/drivers/dma/ptdma/ptdma-pci.c b/drivers/dma/amd/ptdma/ptdma-pci.c
index 22739ff0c3c5..22739ff0c3c5 100644
--- a/drivers/dma/ptdma/ptdma-pci.c
+++ b/drivers/dma/amd/ptdma/ptdma-pci.c
diff --git a/drivers/dma/ptdma/ptdma.h b/drivers/dma/amd/ptdma/ptdma.h
index 39bc37268235..ef3f55632107 100644
--- a/drivers/dma/ptdma/ptdma.h
+++ b/drivers/dma/amd/ptdma/ptdma.h
@@ -22,7 +22,7 @@
#include <linux/wait.h>
#include <linux/dmapool.h>
-#include "../virt-dma.h"
+#include "../../virt-dma.h"
#define MAX_PT_NAME_LEN 16
#define MAX_DMAPOOL_NAME_LEN 32
@@ -184,6 +184,7 @@ struct pt_dma_desc {
struct pt_dma_chan {
struct virt_dma_chan vc;
struct pt_device *pt;
+ u32 id;
};
struct pt_cmd_queue {
@@ -253,7 +254,6 @@ struct pt_device {
/* Support for the DMA Engine capabilities */
struct dma_device dma_dev;
struct pt_dma_chan *pt_dma_chan;
- struct kmem_cache *dma_cmd_cache;
struct kmem_cache *dma_desc_cache;
wait_queue_head_t lsb_queue;
@@ -262,6 +262,7 @@ struct pt_device {
unsigned long total_interrupts;
struct pt_tasklet_data tdata;
+ int ver;
};
/*
diff --git a/drivers/dma/amd/qdma/qdma.c b/drivers/dma/amd/qdma/qdma.c
index b0a1f3ad851b..8fb2d5e1df20 100644
--- a/drivers/dma/amd/qdma/qdma.c
+++ b/drivers/dma/amd/qdma/qdma.c
@@ -7,9 +7,9 @@
#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
-#include <linux/dma-map-ops.h>
#include <linux/platform_device.h>
#include <linux/platform_data/amd_qdma.h>
#include <linux/regmap.h>
@@ -283,16 +283,20 @@ static int qdma_check_queue_status(struct qdma_device *qdev,
static int qdma_clear_queue_context(const struct qdma_queue *queue)
{
- enum qdma_ctxt_type h2c_types[] = { QDMA_CTXT_DESC_SW_H2C,
- QDMA_CTXT_DESC_HW_H2C,
- QDMA_CTXT_DESC_CR_H2C,
- QDMA_CTXT_PFTCH, };
- enum qdma_ctxt_type c2h_types[] = { QDMA_CTXT_DESC_SW_C2H,
- QDMA_CTXT_DESC_HW_C2H,
- QDMA_CTXT_DESC_CR_C2H,
- QDMA_CTXT_PFTCH, };
+ static const enum qdma_ctxt_type h2c_types[] = {
+ QDMA_CTXT_DESC_SW_H2C,
+ QDMA_CTXT_DESC_HW_H2C,
+ QDMA_CTXT_DESC_CR_H2C,
+ QDMA_CTXT_PFTCH,
+ };
+ static const enum qdma_ctxt_type c2h_types[] = {
+ QDMA_CTXT_DESC_SW_C2H,
+ QDMA_CTXT_DESC_HW_C2H,
+ QDMA_CTXT_DESC_CR_C2H,
+ QDMA_CTXT_PFTCH,
+ };
struct qdma_device *qdev = queue->qdev;
- enum qdma_ctxt_type *type;
+ const enum qdma_ctxt_type *type;
int ret, num, i;
if (queue->dir == DMA_MEM_TO_DEV) {
@@ -492,18 +496,9 @@ static int qdma_device_verify(struct qdma_device *qdev)
static int qdma_device_setup(struct qdma_device *qdev)
{
- struct device *dev = &qdev->pdev->dev;
u32 ring_sz = QDMA_DEFAULT_RING_SIZE;
int ret = 0;
- while (dev && get_dma_ops(dev))
- dev = dev->parent;
- if (!dev) {
- qdma_err(qdev, "dma device not found");
- return -EINVAL;
- }
- set_dma_ops(&qdev->pdev->dev, get_dma_ops(dev));
-
ret = qdma_setup_fmap_context(qdev);
if (ret) {
qdma_err(qdev, "Failed setup fmap context");
@@ -548,11 +543,12 @@ static void qdma_free_queue_resources(struct dma_chan *chan)
{
struct qdma_queue *queue = to_qdma_queue(chan);
struct qdma_device *qdev = queue->qdev;
- struct device *dev = qdev->dma_dev.dev;
+ struct qdma_platdata *pdata;
qdma_clear_queue_context(queue);
vchan_free_chan_resources(&queue->vchan);
- dma_free_coherent(dev, queue->ring_size * QDMA_MM_DESC_SIZE,
+ pdata = dev_get_platdata(&qdev->pdev->dev);
+ dma_free_coherent(pdata->dma_dev, queue->ring_size * QDMA_MM_DESC_SIZE,
queue->desc_base, queue->dma_desc_base);
}
@@ -565,6 +561,7 @@ static int qdma_alloc_queue_resources(struct dma_chan *chan)
struct qdma_queue *queue = to_qdma_queue(chan);
struct qdma_device *qdev = queue->qdev;
struct qdma_ctxt_sw_desc desc;
+ struct qdma_platdata *pdata;
size_t size;
int ret;
@@ -572,8 +569,9 @@ static int qdma_alloc_queue_resources(struct dma_chan *chan)
if (ret)
return ret;
+ pdata = dev_get_platdata(&qdev->pdev->dev);
size = queue->ring_size * QDMA_MM_DESC_SIZE;
- queue->desc_base = dma_alloc_coherent(qdev->dma_dev.dev, size,
+ queue->desc_base = dma_alloc_coherent(pdata->dma_dev, size,
&queue->dma_desc_base,
GFP_KERNEL);
if (!queue->desc_base) {
@@ -588,7 +586,7 @@ static int qdma_alloc_queue_resources(struct dma_chan *chan)
if (ret) {
qdma_err(qdev, "Failed to setup SW desc ctxt for %s",
chan->name);
- dma_free_coherent(qdev->dma_dev.dev, size, queue->desc_base,
+ dma_free_coherent(pdata->dma_dev, size, queue->desc_base,
queue->dma_desc_base);
return ret;
}
@@ -948,8 +946,9 @@ static int qdma_init_error_irq(struct qdma_device *qdev)
static int qdmam_alloc_qintr_rings(struct qdma_device *qdev)
{
- u32 ctxt[QDMA_CTXT_REGMAP_LEN];
+ struct qdma_platdata *pdata = dev_get_platdata(&qdev->pdev->dev);
struct device *dev = &qdev->pdev->dev;
+ u32 ctxt[QDMA_CTXT_REGMAP_LEN];
struct qdma_intr_ring *ring;
struct qdma_ctxt_intr intr_ctxt;
u32 vector;
@@ -969,7 +968,8 @@ static int qdmam_alloc_qintr_rings(struct qdma_device *qdev)
ring->msix_id = qdev->err_irq_idx + i + 1;
ring->ridx = i;
ring->color = 1;
- ring->base = dmam_alloc_coherent(dev, QDMA_INTR_RING_SIZE,
+ ring->base = dmam_alloc_coherent(pdata->dma_dev,
+ QDMA_INTR_RING_SIZE,
&ring->dev_base, GFP_KERNEL);
if (!ring->base) {
qdma_err(qdev, "Failed to alloc intr ring %d", i);
@@ -1133,7 +1133,7 @@ static struct platform_driver amd_qdma_driver = {
.name = "amd-qdma",
},
.probe = amd_qdma_probe,
- .remove_new = amd_qdma_remove,
+ .remove = amd_qdma_remove,
};
module_platform_driver(amd_qdma_driver);
diff --git a/drivers/dma/apple-admac.c b/drivers/dma/apple-admac.c
index 9588773dd2eb..bd49f0374291 100644
--- a/drivers/dma/apple-admac.c
+++ b/drivers/dma/apple-admac.c
@@ -153,6 +153,8 @@ static int admac_alloc_sram_carveout(struct admac_data *ad,
{
struct admac_sram *sram;
int i, ret = 0, nblocks;
+ ad->txcache.size = readl_relaxed(ad->base + REG_TX_SRAM_SIZE);
+ ad->rxcache.size = readl_relaxed(ad->base + REG_RX_SRAM_SIZE);
if (dir == DMA_MEM_TO_DEV)
sram = &ad->txcache;
@@ -912,12 +914,7 @@ static int admac_probe(struct platform_device *pdev)
goto free_irq;
}
- ad->txcache.size = readl_relaxed(ad->base + REG_TX_SRAM_SIZE);
- ad->rxcache.size = readl_relaxed(ad->base + REG_RX_SRAM_SIZE);
-
dev_info(&pdev->dev, "Audio DMA Controller\n");
- dev_info(&pdev->dev, "imprint %x TX cache %u RX cache %u\n",
- readl_relaxed(ad->base + REG_IMPRINT), ad->txcache.size, ad->rxcache.size);
return 0;
@@ -950,7 +947,7 @@ static struct platform_driver apple_admac_driver = {
.of_match_table = admac_of_match,
},
.probe = admac_probe,
- .remove_new = admac_remove,
+ .remove = admac_remove,
};
module_platform_driver(apple_admac_driver);
diff --git a/drivers/dma/arm-dma350.c b/drivers/dma/arm-dma350.c
new file mode 100644
index 000000000000..9efe2ca7d5ec
--- /dev/null
+++ b/drivers/dma/arm-dma350.c
@@ -0,0 +1,660 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2024-2025 Arm Limited
+// Arm DMA-350 driver
+
+#include <linux/bitfield.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "dmaengine.h"
+#include "virt-dma.h"
+
+#define DMAINFO 0x0f00
+
+#define DMA_BUILDCFG0 0xb0
+#define DMA_CFG_DATA_WIDTH GENMASK(18, 16)
+#define DMA_CFG_ADDR_WIDTH GENMASK(15, 10)
+#define DMA_CFG_NUM_CHANNELS GENMASK(9, 4)
+
+#define DMA_BUILDCFG1 0xb4
+#define DMA_CFG_NUM_TRIGGER_IN GENMASK(8, 0)
+
+#define IIDR 0xc8
+#define IIDR_PRODUCTID GENMASK(31, 20)
+#define IIDR_VARIANT GENMASK(19, 16)
+#define IIDR_REVISION GENMASK(15, 12)
+#define IIDR_IMPLEMENTER GENMASK(11, 0)
+
+#define PRODUCTID_DMA350 0x3a0
+#define IMPLEMENTER_ARM 0x43b
+
+#define DMACH(n) (0x1000 + 0x0100 * (n))
+
+#define CH_CMD 0x00
+#define CH_CMD_RESUME BIT(5)
+#define CH_CMD_PAUSE BIT(4)
+#define CH_CMD_STOP BIT(3)
+#define CH_CMD_DISABLE BIT(2)
+#define CH_CMD_CLEAR BIT(1)
+#define CH_CMD_ENABLE BIT(0)
+
+#define CH_STATUS 0x04
+#define CH_STAT_RESUMEWAIT BIT(21)
+#define CH_STAT_PAUSED BIT(20)
+#define CH_STAT_STOPPED BIT(19)
+#define CH_STAT_DISABLED BIT(18)
+#define CH_STAT_ERR BIT(17)
+#define CH_STAT_DONE BIT(16)
+#define CH_STAT_INTR_ERR BIT(1)
+#define CH_STAT_INTR_DONE BIT(0)
+
+#define CH_INTREN 0x08
+#define CH_INTREN_ERR BIT(1)
+#define CH_INTREN_DONE BIT(0)
+
+#define CH_CTRL 0x0c
+#define CH_CTRL_USEDESTRIGIN BIT(26)
+#define CH_CTRL_USESRCTRIGIN BIT(26)
+#define CH_CTRL_DONETYPE GENMASK(23, 21)
+#define CH_CTRL_REGRELOADTYPE GENMASK(20, 18)
+#define CH_CTRL_XTYPE GENMASK(11, 9)
+#define CH_CTRL_TRANSIZE GENMASK(2, 0)
+
+#define CH_SRCADDR 0x10
+#define CH_SRCADDRHI 0x14
+#define CH_DESADDR 0x18
+#define CH_DESADDRHI 0x1c
+#define CH_XSIZE 0x20
+#define CH_XSIZEHI 0x24
+#define CH_SRCTRANSCFG 0x28
+#define CH_DESTRANSCFG 0x2c
+#define CH_CFG_MAXBURSTLEN GENMASK(19, 16)
+#define CH_CFG_PRIVATTR BIT(11)
+#define CH_CFG_SHAREATTR GENMASK(9, 8)
+#define CH_CFG_MEMATTR GENMASK(7, 0)
+
+#define TRANSCFG_DEVICE \
+ FIELD_PREP(CH_CFG_MAXBURSTLEN, 0xf) | \
+ FIELD_PREP(CH_CFG_SHAREATTR, SHAREATTR_OSH) | \
+ FIELD_PREP(CH_CFG_MEMATTR, MEMATTR_DEVICE)
+#define TRANSCFG_NC \
+ FIELD_PREP(CH_CFG_MAXBURSTLEN, 0xf) | \
+ FIELD_PREP(CH_CFG_SHAREATTR, SHAREATTR_OSH) | \
+ FIELD_PREP(CH_CFG_MEMATTR, MEMATTR_NC)
+#define TRANSCFG_WB \
+ FIELD_PREP(CH_CFG_MAXBURSTLEN, 0xf) | \
+ FIELD_PREP(CH_CFG_SHAREATTR, SHAREATTR_ISH) | \
+ FIELD_PREP(CH_CFG_MEMATTR, MEMATTR_WB)
+
+#define CH_XADDRINC 0x30
+#define CH_XY_DES GENMASK(31, 16)
+#define CH_XY_SRC GENMASK(15, 0)
+
+#define CH_FILLVAL 0x38
+#define CH_SRCTRIGINCFG 0x4c
+#define CH_DESTRIGINCFG 0x50
+#define CH_LINKATTR 0x70
+#define CH_LINK_SHAREATTR GENMASK(9, 8)
+#define CH_LINK_MEMATTR GENMASK(7, 0)
+
+#define CH_AUTOCFG 0x74
+#define CH_LINKADDR 0x78
+#define CH_LINKADDR_EN BIT(0)
+
+#define CH_LINKADDRHI 0x7c
+#define CH_ERRINFO 0x90
+#define CH_ERRINFO_AXIRDPOISERR BIT(18)
+#define CH_ERRINFO_AXIWRRESPERR BIT(17)
+#define CH_ERRINFO_AXIRDRESPERR BIT(16)
+
+#define CH_BUILDCFG0 0xf8
+#define CH_CFG_INC_WIDTH GENMASK(29, 26)
+#define CH_CFG_DATA_WIDTH GENMASK(24, 22)
+#define CH_CFG_DATA_BUF_SIZE GENMASK(7, 0)
+
+#define CH_BUILDCFG1 0xfc
+#define CH_CFG_HAS_CMDLINK BIT(8)
+#define CH_CFG_HAS_TRIGSEL BIT(7)
+#define CH_CFG_HAS_TRIGIN BIT(5)
+#define CH_CFG_HAS_WRAP BIT(1)
+
+
+#define LINK_REGCLEAR BIT(0)
+#define LINK_INTREN BIT(2)
+#define LINK_CTRL BIT(3)
+#define LINK_SRCADDR BIT(4)
+#define LINK_SRCADDRHI BIT(5)
+#define LINK_DESADDR BIT(6)
+#define LINK_DESADDRHI BIT(7)
+#define LINK_XSIZE BIT(8)
+#define LINK_XSIZEHI BIT(9)
+#define LINK_SRCTRANSCFG BIT(10)
+#define LINK_DESTRANSCFG BIT(11)
+#define LINK_XADDRINC BIT(12)
+#define LINK_FILLVAL BIT(14)
+#define LINK_SRCTRIGINCFG BIT(19)
+#define LINK_DESTRIGINCFG BIT(20)
+#define LINK_AUTOCFG BIT(29)
+#define LINK_LINKADDR BIT(30)
+#define LINK_LINKADDRHI BIT(31)
+
+
+enum ch_ctrl_donetype {
+ CH_CTRL_DONETYPE_NONE = 0,
+ CH_CTRL_DONETYPE_CMD = 1,
+ CH_CTRL_DONETYPE_CYCLE = 3
+};
+
+enum ch_ctrl_xtype {
+ CH_CTRL_XTYPE_DISABLE = 0,
+ CH_CTRL_XTYPE_CONTINUE = 1,
+ CH_CTRL_XTYPE_WRAP = 2,
+ CH_CTRL_XTYPE_FILL = 3
+};
+
+enum ch_cfg_shareattr {
+ SHAREATTR_NSH = 0,
+ SHAREATTR_OSH = 2,
+ SHAREATTR_ISH = 3
+};
+
+enum ch_cfg_memattr {
+ MEMATTR_DEVICE = 0x00,
+ MEMATTR_NC = 0x44,
+ MEMATTR_WB = 0xff
+};
+
+struct d350_desc {
+ struct virt_dma_desc vd;
+ u32 command[16];
+ u16 xsize;
+ u16 xsizehi;
+ u8 tsz;
+};
+
+struct d350_chan {
+ struct virt_dma_chan vc;
+ struct d350_desc *desc;
+ void __iomem *base;
+ int irq;
+ enum dma_status status;
+ dma_cookie_t cookie;
+ u32 residue;
+ u8 tsz;
+ bool has_trig;
+ bool has_wrap;
+ bool coherent;
+};
+
+struct d350 {
+ struct dma_device dma;
+ int nchan;
+ int nreq;
+ struct d350_chan channels[] __counted_by(nchan);
+};
+
+static inline struct d350_chan *to_d350_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct d350_chan, vc.chan);
+}
+
+static inline struct d350_desc *to_d350_desc(struct virt_dma_desc *vd)
+{
+ return container_of(vd, struct d350_desc, vd);
+}
+
+static void d350_desc_free(struct virt_dma_desc *vd)
+{
+ kfree(to_d350_desc(vd));
+}
+
+static struct dma_async_tx_descriptor *d350_prep_memcpy(struct dma_chan *chan,
+ dma_addr_t dest, dma_addr_t src, size_t len, unsigned long flags)
+{
+ struct d350_chan *dch = to_d350_chan(chan);
+ struct d350_desc *desc;
+ u32 *cmd;
+
+ desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
+ if (!desc)
+ return NULL;
+
+ desc->tsz = __ffs(len | dest | src | (1 << dch->tsz));
+ desc->xsize = lower_16_bits(len >> desc->tsz);
+ desc->xsizehi = upper_16_bits(len >> desc->tsz);
+
+ cmd = desc->command;
+ cmd[0] = LINK_CTRL | LINK_SRCADDR | LINK_SRCADDRHI | LINK_DESADDR |
+ LINK_DESADDRHI | LINK_XSIZE | LINK_XSIZEHI | LINK_SRCTRANSCFG |
+ LINK_DESTRANSCFG | LINK_XADDRINC | LINK_LINKADDR;
+
+ cmd[1] = FIELD_PREP(CH_CTRL_TRANSIZE, desc->tsz) |
+ FIELD_PREP(CH_CTRL_XTYPE, CH_CTRL_XTYPE_CONTINUE) |
+ FIELD_PREP(CH_CTRL_DONETYPE, CH_CTRL_DONETYPE_CMD);
+
+ cmd[2] = lower_32_bits(src);
+ cmd[3] = upper_32_bits(src);
+ cmd[4] = lower_32_bits(dest);
+ cmd[5] = upper_32_bits(dest);
+ cmd[6] = FIELD_PREP(CH_XY_SRC, desc->xsize) | FIELD_PREP(CH_XY_DES, desc->xsize);
+ cmd[7] = FIELD_PREP(CH_XY_SRC, desc->xsizehi) | FIELD_PREP(CH_XY_DES, desc->xsizehi);
+ cmd[8] = dch->coherent ? TRANSCFG_WB : TRANSCFG_NC;
+ cmd[9] = dch->coherent ? TRANSCFG_WB : TRANSCFG_NC;
+ cmd[10] = FIELD_PREP(CH_XY_SRC, 1) | FIELD_PREP(CH_XY_DES, 1);
+ cmd[11] = 0;
+
+ return vchan_tx_prep(&dch->vc, &desc->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *d350_prep_memset(struct dma_chan *chan,
+ dma_addr_t dest, int value, size_t len, unsigned long flags)
+{
+ struct d350_chan *dch = to_d350_chan(chan);
+ struct d350_desc *desc;
+ u32 *cmd;
+
+ desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
+ if (!desc)
+ return NULL;
+
+ desc->tsz = __ffs(len | dest | (1 << dch->tsz));
+ desc->xsize = lower_16_bits(len >> desc->tsz);
+ desc->xsizehi = upper_16_bits(len >> desc->tsz);
+
+ cmd = desc->command;
+ cmd[0] = LINK_CTRL | LINK_DESADDR | LINK_DESADDRHI |
+ LINK_XSIZE | LINK_XSIZEHI | LINK_DESTRANSCFG |
+ LINK_XADDRINC | LINK_FILLVAL | LINK_LINKADDR;
+
+ cmd[1] = FIELD_PREP(CH_CTRL_TRANSIZE, desc->tsz) |
+ FIELD_PREP(CH_CTRL_XTYPE, CH_CTRL_XTYPE_FILL) |
+ FIELD_PREP(CH_CTRL_DONETYPE, CH_CTRL_DONETYPE_CMD);
+
+ cmd[2] = lower_32_bits(dest);
+ cmd[3] = upper_32_bits(dest);
+ cmd[4] = FIELD_PREP(CH_XY_DES, desc->xsize);
+ cmd[5] = FIELD_PREP(CH_XY_DES, desc->xsizehi);
+ cmd[6] = dch->coherent ? TRANSCFG_WB : TRANSCFG_NC;
+ cmd[7] = FIELD_PREP(CH_XY_DES, 1);
+ cmd[8] = (u8)value * 0x01010101;
+ cmd[9] = 0;
+
+ return vchan_tx_prep(&dch->vc, &desc->vd, flags);
+}
+
+static int d350_pause(struct dma_chan *chan)
+{
+ struct d350_chan *dch = to_d350_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dch->vc.lock, flags);
+ if (dch->status == DMA_IN_PROGRESS) {
+ writel_relaxed(CH_CMD_PAUSE, dch->base + CH_CMD);
+ dch->status = DMA_PAUSED;
+ }
+ spin_unlock_irqrestore(&dch->vc.lock, flags);
+
+ return 0;
+}
+
+static int d350_resume(struct dma_chan *chan)
+{
+ struct d350_chan *dch = to_d350_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dch->vc.lock, flags);
+ if (dch->status == DMA_PAUSED) {
+ writel_relaxed(CH_CMD_RESUME, dch->base + CH_CMD);
+ dch->status = DMA_IN_PROGRESS;
+ }
+ spin_unlock_irqrestore(&dch->vc.lock, flags);
+
+ return 0;
+}
+
+static u32 d350_get_residue(struct d350_chan *dch)
+{
+ u32 res, xsize, xsizehi, hi_new;
+ int retries = 3; /* 1st time unlucky, 2nd improbable, 3rd just broken */
+
+ hi_new = readl_relaxed(dch->base + CH_XSIZEHI);
+ do {
+ xsizehi = hi_new;
+ xsize = readl_relaxed(dch->base + CH_XSIZE);
+ hi_new = readl_relaxed(dch->base + CH_XSIZEHI);
+ } while (xsizehi != hi_new && --retries);
+
+ res = FIELD_GET(CH_XY_DES, xsize);
+ res |= FIELD_GET(CH_XY_DES, xsizehi) << 16;
+
+ return res << dch->desc->tsz;
+}
+
+static int d350_terminate_all(struct dma_chan *chan)
+{
+ struct d350_chan *dch = to_d350_chan(chan);
+ unsigned long flags;
+ LIST_HEAD(list);
+
+ spin_lock_irqsave(&dch->vc.lock, flags);
+ writel_relaxed(CH_CMD_STOP, dch->base + CH_CMD);
+ if (dch->desc) {
+ if (dch->status != DMA_ERROR)
+ vchan_terminate_vdesc(&dch->desc->vd);
+ dch->desc = NULL;
+ dch->status = DMA_COMPLETE;
+ }
+ vchan_get_all_descriptors(&dch->vc, &list);
+ list_splice_tail(&list, &dch->vc.desc_terminated);
+ spin_unlock_irqrestore(&dch->vc.lock, flags);
+
+ return 0;
+}
+
+static void d350_synchronize(struct dma_chan *chan)
+{
+ struct d350_chan *dch = to_d350_chan(chan);
+
+ vchan_synchronize(&dch->vc);
+}
+
+static u32 d350_desc_bytes(struct d350_desc *desc)
+{
+ return ((u32)desc->xsizehi << 16 | desc->xsize) << desc->tsz;
+}
+
+static enum dma_status d350_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ struct dma_tx_state *state)
+{
+ struct d350_chan *dch = to_d350_chan(chan);
+ struct virt_dma_desc *vd;
+ enum dma_status status;
+ unsigned long flags;
+ u32 residue = 0;
+
+ status = dma_cookie_status(chan, cookie, state);
+
+ spin_lock_irqsave(&dch->vc.lock, flags);
+ if (cookie == dch->cookie) {
+ status = dch->status;
+ if (status == DMA_IN_PROGRESS || status == DMA_PAUSED)
+ dch->residue = d350_get_residue(dch);
+ residue = dch->residue;
+ } else if ((vd = vchan_find_desc(&dch->vc, cookie))) {
+ residue = d350_desc_bytes(to_d350_desc(vd));
+ } else if (status == DMA_IN_PROGRESS) {
+ /* Somebody else terminated it? */
+ status = DMA_ERROR;
+ }
+ spin_unlock_irqrestore(&dch->vc.lock, flags);
+
+ dma_set_residue(state, residue);
+ return status;
+}
+
+static void d350_start_next(struct d350_chan *dch)
+{
+ u32 hdr, *reg;
+
+ dch->desc = to_d350_desc(vchan_next_desc(&dch->vc));
+ if (!dch->desc)
+ return;
+
+ list_del(&dch->desc->vd.node);
+ dch->status = DMA_IN_PROGRESS;
+ dch->cookie = dch->desc->vd.tx.cookie;
+ dch->residue = d350_desc_bytes(dch->desc);
+
+ hdr = dch->desc->command[0];
+ reg = &dch->desc->command[1];
+
+ if (hdr & LINK_INTREN)
+ writel_relaxed(*reg++, dch->base + CH_INTREN);
+ if (hdr & LINK_CTRL)
+ writel_relaxed(*reg++, dch->base + CH_CTRL);
+ if (hdr & LINK_SRCADDR)
+ writel_relaxed(*reg++, dch->base + CH_SRCADDR);
+ if (hdr & LINK_SRCADDRHI)
+ writel_relaxed(*reg++, dch->base + CH_SRCADDRHI);
+ if (hdr & LINK_DESADDR)
+ writel_relaxed(*reg++, dch->base + CH_DESADDR);
+ if (hdr & LINK_DESADDRHI)
+ writel_relaxed(*reg++, dch->base + CH_DESADDRHI);
+ if (hdr & LINK_XSIZE)
+ writel_relaxed(*reg++, dch->base + CH_XSIZE);
+ if (hdr & LINK_XSIZEHI)
+ writel_relaxed(*reg++, dch->base + CH_XSIZEHI);
+ if (hdr & LINK_SRCTRANSCFG)
+ writel_relaxed(*reg++, dch->base + CH_SRCTRANSCFG);
+ if (hdr & LINK_DESTRANSCFG)
+ writel_relaxed(*reg++, dch->base + CH_DESTRANSCFG);
+ if (hdr & LINK_XADDRINC)
+ writel_relaxed(*reg++, dch->base + CH_XADDRINC);
+ if (hdr & LINK_FILLVAL)
+ writel_relaxed(*reg++, dch->base + CH_FILLVAL);
+ if (hdr & LINK_SRCTRIGINCFG)
+ writel_relaxed(*reg++, dch->base + CH_SRCTRIGINCFG);
+ if (hdr & LINK_DESTRIGINCFG)
+ writel_relaxed(*reg++, dch->base + CH_DESTRIGINCFG);
+ if (hdr & LINK_AUTOCFG)
+ writel_relaxed(*reg++, dch->base + CH_AUTOCFG);
+ if (hdr & LINK_LINKADDR)
+ writel_relaxed(*reg++, dch->base + CH_LINKADDR);
+ if (hdr & LINK_LINKADDRHI)
+ writel_relaxed(*reg++, dch->base + CH_LINKADDRHI);
+
+ writel(CH_CMD_ENABLE, dch->base + CH_CMD);
+}
+
+static void d350_issue_pending(struct dma_chan *chan)
+{
+ struct d350_chan *dch = to_d350_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dch->vc.lock, flags);
+ if (vchan_issue_pending(&dch->vc) && !dch->desc)
+ d350_start_next(dch);
+ spin_unlock_irqrestore(&dch->vc.lock, flags);
+}
+
+static irqreturn_t d350_irq(int irq, void *data)
+{
+ struct d350_chan *dch = data;
+ struct device *dev = dch->vc.chan.device->dev;
+ struct virt_dma_desc *vd = &dch->desc->vd;
+ u32 ch_status;
+
+ ch_status = readl(dch->base + CH_STATUS);
+ if (!ch_status)
+ return IRQ_NONE;
+
+ if (ch_status & CH_STAT_INTR_ERR) {
+ u32 errinfo = readl_relaxed(dch->base + CH_ERRINFO);
+
+ if (errinfo & (CH_ERRINFO_AXIRDPOISERR | CH_ERRINFO_AXIRDRESPERR))
+ vd->tx_result.result = DMA_TRANS_READ_FAILED;
+ else if (errinfo & CH_ERRINFO_AXIWRRESPERR)
+ vd->tx_result.result = DMA_TRANS_WRITE_FAILED;
+ else
+ vd->tx_result.result = DMA_TRANS_ABORTED;
+
+ vd->tx_result.residue = d350_get_residue(dch);
+ } else if (!(ch_status & CH_STAT_INTR_DONE)) {
+ dev_warn(dev, "Unexpected IRQ source? 0x%08x\n", ch_status);
+ }
+ writel_relaxed(ch_status, dch->base + CH_STATUS);
+
+ spin_lock(&dch->vc.lock);
+ vchan_cookie_complete(vd);
+ if (ch_status & CH_STAT_INTR_DONE) {
+ dch->status = DMA_COMPLETE;
+ dch->residue = 0;
+ d350_start_next(dch);
+ } else {
+ dch->status = DMA_ERROR;
+ dch->residue = vd->tx_result.residue;
+ }
+ spin_unlock(&dch->vc.lock);
+
+ return IRQ_HANDLED;
+}
+
+static int d350_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct d350_chan *dch = to_d350_chan(chan);
+ int ret = request_irq(dch->irq, d350_irq, IRQF_SHARED,
+ dev_name(&dch->vc.chan.dev->device), dch);
+ if (!ret)
+ writel_relaxed(CH_INTREN_DONE | CH_INTREN_ERR, dch->base + CH_INTREN);
+
+ return ret;
+}
+
+static void d350_free_chan_resources(struct dma_chan *chan)
+{
+ struct d350_chan *dch = to_d350_chan(chan);
+
+ writel_relaxed(0, dch->base + CH_INTREN);
+ free_irq(dch->irq, dch);
+ vchan_free_chan_resources(&dch->vc);
+}
+
+static int d350_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct d350 *dmac;
+ void __iomem *base;
+ u32 reg;
+ int ret, nchan, dw, aw, r, p;
+ bool coherent, memset;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ reg = readl_relaxed(base + DMAINFO + IIDR);
+ r = FIELD_GET(IIDR_VARIANT, reg);
+ p = FIELD_GET(IIDR_REVISION, reg);
+ if (FIELD_GET(IIDR_IMPLEMENTER, reg) != IMPLEMENTER_ARM ||
+ FIELD_GET(IIDR_PRODUCTID, reg) != PRODUCTID_DMA350)
+ return dev_err_probe(dev, -ENODEV, "Not a DMA-350!");
+
+ reg = readl_relaxed(base + DMAINFO + DMA_BUILDCFG0);
+ nchan = FIELD_GET(DMA_CFG_NUM_CHANNELS, reg) + 1;
+ dw = 1 << FIELD_GET(DMA_CFG_DATA_WIDTH, reg);
+ aw = FIELD_GET(DMA_CFG_ADDR_WIDTH, reg) + 1;
+
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(aw));
+ coherent = device_get_dma_attr(dev) == DEV_DMA_COHERENT;
+
+ dmac = devm_kzalloc(dev, struct_size(dmac, channels, nchan), GFP_KERNEL);
+ if (!dmac)
+ return -ENOMEM;
+
+ dmac->nchan = nchan;
+
+ reg = readl_relaxed(base + DMAINFO + DMA_BUILDCFG1);
+ dmac->nreq = FIELD_GET(DMA_CFG_NUM_TRIGGER_IN, reg);
+
+ dev_dbg(dev, "DMA-350 r%dp%d with %d channels, %d requests\n", r, p, dmac->nchan, dmac->nreq);
+
+ dmac->dma.dev = dev;
+ for (int i = min(dw, 16); i > 0; i /= 2) {
+ dmac->dma.src_addr_widths |= BIT(i);
+ dmac->dma.dst_addr_widths |= BIT(i);
+ }
+ dmac->dma.directions = BIT(DMA_MEM_TO_MEM);
+ dmac->dma.descriptor_reuse = true;
+ dmac->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+ dmac->dma.device_alloc_chan_resources = d350_alloc_chan_resources;
+ dmac->dma.device_free_chan_resources = d350_free_chan_resources;
+ dma_cap_set(DMA_MEMCPY, dmac->dma.cap_mask);
+ dmac->dma.device_prep_dma_memcpy = d350_prep_memcpy;
+ dmac->dma.device_pause = d350_pause;
+ dmac->dma.device_resume = d350_resume;
+ dmac->dma.device_terminate_all = d350_terminate_all;
+ dmac->dma.device_synchronize = d350_synchronize;
+ dmac->dma.device_tx_status = d350_tx_status;
+ dmac->dma.device_issue_pending = d350_issue_pending;
+ INIT_LIST_HEAD(&dmac->dma.channels);
+
+ /* Would be nice to have per-channel caps for this... */
+ memset = true;
+ for (int i = 0; i < nchan; i++) {
+ struct d350_chan *dch = &dmac->channels[i];
+
+ dch->base = base + DMACH(i);
+ writel_relaxed(CH_CMD_CLEAR, dch->base + CH_CMD);
+
+ reg = readl_relaxed(dch->base + CH_BUILDCFG1);
+ if (!(FIELD_GET(CH_CFG_HAS_CMDLINK, reg))) {
+ dev_warn(dev, "No command link support on channel %d\n", i);
+ continue;
+ }
+ dch->irq = platform_get_irq(pdev, i);
+ if (dch->irq < 0)
+ return dev_err_probe(dev, dch->irq,
+ "Failed to get IRQ for channel %d\n", i);
+
+ dch->has_wrap = FIELD_GET(CH_CFG_HAS_WRAP, reg);
+ dch->has_trig = FIELD_GET(CH_CFG_HAS_TRIGIN, reg) &
+ FIELD_GET(CH_CFG_HAS_TRIGSEL, reg);
+
+ /* Fill is a special case of Wrap */
+ memset &= dch->has_wrap;
+
+ reg = readl_relaxed(dch->base + CH_BUILDCFG0);
+ dch->tsz = FIELD_GET(CH_CFG_DATA_WIDTH, reg);
+
+ reg = FIELD_PREP(CH_LINK_SHAREATTR, coherent ? SHAREATTR_ISH : SHAREATTR_OSH);
+ reg |= FIELD_PREP(CH_LINK_MEMATTR, coherent ? MEMATTR_WB : MEMATTR_NC);
+ writel_relaxed(reg, dch->base + CH_LINKATTR);
+
+ dch->vc.desc_free = d350_desc_free;
+ vchan_init(&dch->vc, &dmac->dma);
+ }
+
+ if (memset) {
+ dma_cap_set(DMA_MEMSET, dmac->dma.cap_mask);
+ dmac->dma.device_prep_dma_memset = d350_prep_memset;
+ }
+
+ platform_set_drvdata(pdev, dmac);
+
+ ret = dma_async_device_register(&dmac->dma);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to register DMA device\n");
+
+ return 0;
+}
+
+static void d350_remove(struct platform_device *pdev)
+{
+ struct d350 *dmac = platform_get_drvdata(pdev);
+
+ dma_async_device_unregister(&dmac->dma);
+}
+
+static const struct of_device_id d350_of_match[] __maybe_unused = {
+ { .compatible = "arm,dma-350" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, d350_of_match);
+
+static struct platform_driver d350_driver = {
+ .driver = {
+ .name = "arm-dma350",
+ .of_match_table = of_match_ptr(d350_of_match),
+ },
+ .probe = d350_probe,
+ .remove = d350_remove,
+};
+module_platform_driver(d350_driver);
+
+MODULE_AUTHOR("Robin Murphy <robin.murphy@arm.com>");
+MODULE_DESCRIPTION("Arm DMA-350 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index baebddc740b0..2d147712cbc6 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -2250,7 +2250,7 @@ static const struct dev_pm_ops __maybe_unused at_dma_dev_pm_ops = {
};
static struct platform_driver at_dma_driver = {
- .remove_new = at_dma_remove,
+ .remove = at_dma_remove,
.shutdown = at_dma_shutdown,
.id_table = atdma_devtypes,
.driver = {
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 299396121e6d..3fbc74710a13 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -1363,6 +1363,8 @@ at_xdmac_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
return NULL;
desc = at_xdmac_memset_create_desc(chan, atchan, dest, len, value);
+ if (!desc)
+ return NULL;
list_add_tail(&desc->desc_node, &desc->descs_list);
desc->tx_dma_desc.cookie = -EBUSY;
@@ -2031,10 +2033,8 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan)
* at_xdmac_start_xfer() for this descriptor. Now it's time
* to release it.
*/
- if (desc->active_xfer) {
- pm_runtime_put_autosuspend(atxdmac->dev);
- pm_runtime_mark_last_busy(atxdmac->dev);
- }
+ if (desc->active_xfer)
+ pm_runtime_put_noidle(atxdmac->dev);
}
clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
@@ -2476,7 +2476,7 @@ MODULE_DEVICE_TABLE(of, atmel_xdmac_dt_ids);
static struct platform_driver at_xdmac_driver = {
.probe = at_xdmac_probe,
- .remove_new = at_xdmac_remove,
+ .remove = at_xdmac_remove,
.driver = {
.name = "at_xdmac",
.of_match_table = of_match_ptr(atmel_xdmac_dt_ids),
diff --git a/drivers/dma/bcm-sba-raid.c b/drivers/dma/bcm-sba-raid.c
index cfa6e1167a1f..7f0e76439ce5 100644
--- a/drivers/dma/bcm-sba-raid.c
+++ b/drivers/dma/bcm-sba-raid.c
@@ -1756,7 +1756,7 @@ MODULE_DEVICE_TABLE(of, sba_of_match);
static struct platform_driver sba_driver = {
.probe = sba_probe,
- .remove_new = sba_remove,
+ .remove = sba_remove,
.driver = {
.name = "bcm-sba-raid",
.of_match_table = sba_of_match,
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index e1b92b4d7b05..0117bb2e8591 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -875,6 +875,27 @@ static struct dma_chan *bcm2835_dma_xlate(struct of_phandle_args *spec,
return chan;
}
+static int bcm2835_dma_suspend_late(struct device *dev)
+{
+ struct bcm2835_dmadev *od = dev_get_drvdata(dev);
+ struct bcm2835_chan *c, *next;
+
+ list_for_each_entry_safe(c, next, &od->ddev.channels,
+ vc.chan.device_node) {
+ void __iomem *chan_base = c->chan_base;
+
+ /* Check if DMA channel is busy */
+ if (readl(chan_base + BCM2835_DMA_ADDR))
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops bcm2835_dma_pm_ops = {
+ LATE_SYSTEM_SLEEP_PM_OPS(bcm2835_dma_suspend_late, NULL)
+};
+
static int bcm2835_dma_probe(struct platform_device *pdev)
{
struct bcm2835_dmadev *od;
@@ -1029,10 +1050,11 @@ static void bcm2835_dma_remove(struct platform_device *pdev)
static struct platform_driver bcm2835_dma_driver = {
.probe = bcm2835_dma_probe,
- .remove_new = bcm2835_dma_remove,
+ .remove = bcm2835_dma_remove,
.driver = {
.name = "bcm2835-dma",
.of_match_table = of_match_ptr(bcm2835_dma_of_match),
+ .pm = pm_ptr(&bcm2835_dma_pm_ops),
},
};
diff --git a/drivers/dma/bestcomm/bestcomm.c b/drivers/dma/bestcomm/bestcomm.c
index 0bbaa7620bdd..6c4d655ffe77 100644
--- a/drivers/dma/bestcomm/bestcomm.c
+++ b/drivers/dma/bestcomm/bestcomm.c
@@ -486,7 +486,7 @@ MODULE_DEVICE_TABLE(of, mpc52xx_bcom_of_match);
static struct platform_driver mpc52xx_bcom_of_platform_driver = {
.probe = mpc52xx_bcom_probe,
- .remove_new = mpc52xx_bcom_remove,
+ .remove = mpc52xx_bcom_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = mpc52xx_bcom_of_match,
diff --git a/drivers/dma/cv1800b-dmamux.c b/drivers/dma/cv1800b-dmamux.c
new file mode 100644
index 000000000000..e900d6595617
--- /dev/null
+++ b/drivers/dma/cv1800b-dmamux.c
@@ -0,0 +1,259 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025 Inochi Amaoto <inochiama@gmail.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/cleanup.h>
+#include <linux/module.h>
+#include <linux/of_dma.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/llist.h>
+#include <linux/regmap.h>
+#include <linux/spinlock.h>
+#include <linux/mfd/syscon.h>
+
+#define REG_DMA_CHANNEL_REMAP0 0x154
+#define REG_DMA_CHANNEL_REMAP1 0x158
+#define REG_DMA_INT_MUX 0x298
+
+#define DMAMUX_NCELLS 2
+#define MAX_DMA_MAPPING_ID 42
+#define MAX_DMA_CPU_ID 2
+#define MAX_DMA_CH_ID 7
+
+#define DMAMUX_INTMUX_REGISTER_LEN 4
+#define DMAMUX_NR_CH_PER_REGISTER 4
+#define DMAMUX_BIT_PER_CH 8
+#define DMAMUX_CH_MASk GENMASK(5, 0)
+#define DMAMUX_INT_BIT_PER_CPU 10
+#define DMAMUX_CH_UPDATE_BIT BIT(31)
+
+#define DMAMUX_CH_REGPOS(chid) \
+ ((chid) / DMAMUX_NR_CH_PER_REGISTER)
+#define DMAMUX_CH_REGOFF(chid) \
+ ((chid) % DMAMUX_NR_CH_PER_REGISTER)
+#define DMAMUX_CH_REG(chid) \
+ ((DMAMUX_CH_REGPOS(chid) * sizeof(u32)) + \
+ REG_DMA_CHANNEL_REMAP0)
+#define DMAMUX_CH_SET(chid, val) \
+ (((val) << (DMAMUX_CH_REGOFF(chid) * DMAMUX_BIT_PER_CH)) | \
+ DMAMUX_CH_UPDATE_BIT)
+#define DMAMUX_CH_MASK(chid) \
+ DMAMUX_CH_SET(chid, DMAMUX_CH_MASk)
+
+#define DMAMUX_INT_BIT(chid, cpuid) \
+ BIT((cpuid) * DMAMUX_INT_BIT_PER_CPU + (chid))
+#define DMAMUX_INTEN_BIT(cpuid) \
+ DMAMUX_INT_BIT(8, cpuid)
+#define DMAMUX_INT_CH_BIT(chid, cpuid) \
+ (DMAMUX_INT_BIT(chid, cpuid) | DMAMUX_INTEN_BIT(cpuid))
+#define DMAMUX_INT_MASK(chid) \
+ (DMAMUX_INT_BIT(chid, 0) | \
+ DMAMUX_INT_BIT(chid, 1) | \
+ DMAMUX_INT_BIT(chid, 2))
+#define DMAMUX_INT_CH_MASK(chid, cpuid) \
+ (DMAMUX_INT_MASK(chid) | DMAMUX_INTEN_BIT(cpuid))
+
+struct cv1800_dmamux_data {
+ struct dma_router dmarouter;
+ struct regmap *regmap;
+ spinlock_t lock;
+ struct llist_head free_maps;
+ struct llist_head reserve_maps;
+ DECLARE_BITMAP(mapped_peripherals, MAX_DMA_MAPPING_ID);
+};
+
+struct cv1800_dmamux_map {
+ struct llist_node node;
+ unsigned int channel;
+ unsigned int peripheral;
+ unsigned int cpu;
+};
+
+static void cv1800_dmamux_free(struct device *dev, void *route_data)
+{
+ struct cv1800_dmamux_data *dmamux = dev_get_drvdata(dev);
+ struct cv1800_dmamux_map *map = route_data;
+
+ guard(spinlock_irqsave)(&dmamux->lock);
+
+ regmap_update_bits(dmamux->regmap,
+ DMAMUX_CH_REG(map->channel),
+ DMAMUX_CH_MASK(map->channel),
+ DMAMUX_CH_UPDATE_BIT);
+
+ regmap_update_bits(dmamux->regmap, REG_DMA_INT_MUX,
+ DMAMUX_INT_CH_MASK(map->channel, map->cpu),
+ DMAMUX_INTEN_BIT(map->cpu));
+
+ dev_dbg(dev, "free channel %u for req %u (cpu %u)\n",
+ map->channel, map->peripheral, map->cpu);
+}
+
+static void *cv1800_dmamux_route_allocate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct platform_device *pdev = of_find_device_by_node(ofdma->of_node);
+ struct cv1800_dmamux_data *dmamux = platform_get_drvdata(pdev);
+ struct cv1800_dmamux_map *map;
+ struct llist_node *node;
+ unsigned long flags;
+ unsigned int chid, devid, cpuid;
+ int ret;
+
+ if (dma_spec->args_count != DMAMUX_NCELLS) {
+ dev_err(&pdev->dev, "invalid number of dma mux args\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ devid = dma_spec->args[0];
+ cpuid = dma_spec->args[1];
+ dma_spec->args_count = 1;
+
+ if (devid > MAX_DMA_MAPPING_ID) {
+ dev_err(&pdev->dev, "invalid device id: %u\n", devid);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (cpuid > MAX_DMA_CPU_ID) {
+ dev_err(&pdev->dev, "invalid cpu id: %u\n", cpuid);
+ return ERR_PTR(-EINVAL);
+ }
+
+ dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0);
+ if (!dma_spec->np) {
+ dev_err(&pdev->dev, "can't get dma master\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ spin_lock_irqsave(&dmamux->lock, flags);
+
+ if (test_bit(devid, dmamux->mapped_peripherals)) {
+ llist_for_each_entry(map, dmamux->reserve_maps.first, node) {
+ if (map->peripheral == devid && map->cpu == cpuid)
+ goto found;
+ }
+
+ ret = -EINVAL;
+ goto failed;
+ } else {
+ node = llist_del_first(&dmamux->free_maps);
+ if (!node) {
+ ret = -ENODEV;
+ goto failed;
+ }
+
+ map = llist_entry(node, struct cv1800_dmamux_map, node);
+ llist_add(&map->node, &dmamux->reserve_maps);
+ set_bit(devid, dmamux->mapped_peripherals);
+ }
+
+found:
+ chid = map->channel;
+ map->peripheral = devid;
+ map->cpu = cpuid;
+
+ regmap_set_bits(dmamux->regmap,
+ DMAMUX_CH_REG(chid),
+ DMAMUX_CH_SET(chid, devid));
+
+ regmap_update_bits(dmamux->regmap, REG_DMA_INT_MUX,
+ DMAMUX_INT_CH_MASK(chid, cpuid),
+ DMAMUX_INT_CH_BIT(chid, cpuid));
+
+ spin_unlock_irqrestore(&dmamux->lock, flags);
+
+ dma_spec->args[0] = chid;
+
+ dev_dbg(&pdev->dev, "register channel %u for req %u (cpu %u)\n",
+ chid, devid, cpuid);
+
+ return map;
+
+failed:
+ spin_unlock_irqrestore(&dmamux->lock, flags);
+ of_node_put(dma_spec->np);
+ dev_err(&pdev->dev, "errno %d\n", ret);
+ return ERR_PTR(ret);
+}
+
+static int cv1800_dmamux_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *mux_node = dev->of_node;
+ struct cv1800_dmamux_data *data;
+ struct cv1800_dmamux_map *tmp;
+ struct device *parent = dev->parent;
+ struct regmap *regmap = NULL;
+ unsigned int i;
+
+ if (!parent)
+ return -ENODEV;
+
+ regmap = device_node_to_regmap(parent->of_node);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ spin_lock_init(&data->lock);
+ init_llist_head(&data->free_maps);
+ init_llist_head(&data->reserve_maps);
+
+ for (i = 0; i <= MAX_DMA_CH_ID; i++) {
+ tmp = devm_kmalloc(dev, sizeof(*tmp), GFP_KERNEL);
+ if (!tmp) {
+ /* It is OK for not allocating all channel */
+ dev_warn(dev, "can not allocate channel %u\n", i);
+ continue;
+ }
+
+ init_llist_node(&tmp->node);
+ tmp->channel = i;
+ llist_add(&tmp->node, &data->free_maps);
+ }
+
+ /* if no channel is allocated, the probe must fail */
+ if (llist_empty(&data->free_maps))
+ return -ENOMEM;
+
+ data->regmap = regmap;
+ data->dmarouter.dev = dev;
+ data->dmarouter.route_free = cv1800_dmamux_free;
+
+ platform_set_drvdata(pdev, data);
+
+ return of_dma_router_register(mux_node,
+ cv1800_dmamux_route_allocate,
+ &data->dmarouter);
+}
+
+static void cv1800_dmamux_remove(struct platform_device *pdev)
+{
+ of_dma_controller_free(pdev->dev.of_node);
+}
+
+static const struct of_device_id cv1800_dmamux_ids[] = {
+ { .compatible = "sophgo,cv1800b-dmamux", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, cv1800_dmamux_ids);
+
+static struct platform_driver cv1800_dmamux_driver = {
+ .probe = cv1800_dmamux_probe,
+ .remove = cv1800_dmamux_remove,
+ .driver = {
+ .name = "cv1800-dmamux",
+ .of_match_table = cv1800_dmamux_ids,
+ },
+};
+module_platform_driver(cv1800_dmamux_driver);
+
+MODULE_AUTHOR("Inochi Amaoto <inochiama@gmail.com>");
+MODULE_DESCRIPTION("Sophgo CV1800/SG2000 Series SoC DMAMUX driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
index 36943b0c6d60..5b06b0dc67ee 100644
--- a/drivers/dma/dma-axi-dmac.c
+++ b/drivers/dma/dma-axi-dmac.c
@@ -6,6 +6,7 @@
* Author: Lars-Peter Clausen <lars@metafoo.de>
*/
+#include <linux/adi-axi-common.h>
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/device.h>
@@ -22,7 +23,6 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
-#include <linux/fpga/adi-axi-common.h>
#include <dt-bindings/dma/axi-dmac.h>
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
index c9cfa341db51..100057603fd4 100644
--- a/drivers/dma/dma-jz4780.c
+++ b/drivers/dma/dma-jz4780.c
@@ -1122,7 +1122,7 @@ MODULE_DEVICE_TABLE(of, jz4780_dma_dt_match);
static struct platform_driver jz4780_dma_driver = {
.probe = jz4780_dma_probe,
- .remove_new = jz4780_dma_remove,
+ .remove = jz4780_dma_remove,
.driver = {
.name = "jz4780-dma",
.of_match_table = jz4780_dma_dt_match,
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index c1357d7f3dc6..ca13cd39330b 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -40,6 +40,8 @@
#include <linux/dmaengine.h>
#include <linux/hardirq.h>
#include <linux/spinlock.h>
+#include <linux/of.h>
+#include <linux/property.h>
#include <linux/percpu.h>
#include <linux/rcupdate.h>
#include <linux/mutex.h>
@@ -812,15 +814,13 @@ static const struct dma_slave_map *dma_filter_match(struct dma_device *device,
*/
struct dma_chan *dma_request_chan(struct device *dev, const char *name)
{
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
struct dma_device *d, *_d;
struct dma_chan *chan = NULL;
- /* If device-tree is present get slave info from here */
- if (dev->of_node)
- chan = of_dma_request_slave_channel(dev->of_node, name);
-
- /* If device was enumerated by ACPI get slave info from here */
- if (has_acpi_companion(dev) && !chan)
+ if (is_of_node(fwnode))
+ chan = of_dma_request_slave_channel(to_of_node(fwnode), name);
+ else if (is_acpi_device_node(fwnode))
chan = acpi_dma_request_slave_chan_by_name(dev, name);
if (PTR_ERR(chan) == -EPROBE_DEFER)
@@ -854,8 +854,8 @@ struct dma_chan *dma_request_chan(struct device *dev, const char *name)
found:
#ifdef CONFIG_DEBUG_FS
- chan->dbg_client_name = kasprintf(GFP_KERNEL, "%s:%s", dev_name(dev),
- name);
+ chan->dbg_client_name = kasprintf(GFP_KERNEL, "%s:%s", dev_name(dev), name);
+ /* No functional issue if it fails, users are supposed to test before use */
#endif
chan->name = kasprintf(GFP_KERNEL, "dma:%s", name);
@@ -926,6 +926,36 @@ void dma_release_channel(struct dma_chan *chan)
}
EXPORT_SYMBOL_GPL(dma_release_channel);
+static void dmaenginem_release_channel(void *chan)
+{
+ dma_release_channel(chan);
+}
+
+/**
+ * devm_dma_request_chan - try to allocate an exclusive slave channel
+ * @dev: pointer to client device structure
+ * @name: slave channel name
+ *
+ * Returns pointer to appropriate DMA channel on success or an error pointer.
+ *
+ * The operation is managed and will be undone on driver detach.
+ */
+
+struct dma_chan *devm_dma_request_chan(struct device *dev, const char *name)
+{
+ struct dma_chan *chan = dma_request_chan(dev, name);
+ int ret = 0;
+
+ if (!IS_ERR(chan))
+ ret = devm_add_action_or_reset(dev, dmaenginem_release_channel, chan);
+
+ if (ret)
+ return ERR_PTR(ret);
+
+ return chan;
+}
+EXPORT_SYMBOL_GPL(devm_dma_request_chan);
+
/**
* dmaengine_get - register interest in dma_channels
*/
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
index fffafa86d964..b23536645ff7 100644
--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
@@ -1676,7 +1676,7 @@ MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
static struct platform_driver dw_driver = {
.probe = dw_probe,
- .remove_new = dw_remove,
+ .remove = dw_remove,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = dw_dma_of_id_table,
diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c
index 68236247059d..8e5f7defa6b6 100644
--- a/drivers/dma/dw-edma/dw-edma-core.c
+++ b/drivers/dma/dw-edma/dw-edma-core.c
@@ -15,6 +15,7 @@
#include <linux/irq.h>
#include <linux/dma/edma.h>
#include <linux/dma-mapping.h>
+#include <linux/string_choices.h>
#include "dw-edma-core.h"
#include "dw-edma-v0-core.h"
@@ -23,18 +24,6 @@
#include "../virt-dma.h"
static inline
-struct device *dchan2dev(struct dma_chan *dchan)
-{
- return &dchan->dev->device;
-}
-
-static inline
-struct device *chan2dev(struct dw_edma_chan *chan)
-{
- return &chan->vc.chan.dev->device;
-}
-
-static inline
struct dw_edma_desc *vd2dw_edma_desc(struct virt_dma_desc *vd)
{
return container_of(vd, struct dw_edma_desc, vd);
@@ -595,6 +584,25 @@ dw_edma_device_prep_interleaved_dma(struct dma_chan *dchan,
return dw_edma_device_transfer(&xfer);
}
+static void dw_hdma_set_callback_result(struct virt_dma_desc *vd,
+ enum dmaengine_tx_result result)
+{
+ u32 residue = 0;
+ struct dw_edma_desc *desc;
+ struct dmaengine_result *res;
+
+ if (!vd->tx.callback_result)
+ return;
+
+ desc = vd2dw_edma_desc(vd);
+ if (desc)
+ residue = desc->alloc_sz - desc->xfer_sz;
+
+ res = &vd->tx_result;
+ res->result = result;
+ res->residue = residue;
+}
+
static void dw_edma_done_interrupt(struct dw_edma_chan *chan)
{
struct dw_edma_desc *desc;
@@ -608,6 +616,8 @@ static void dw_edma_done_interrupt(struct dw_edma_chan *chan)
case EDMA_REQ_NONE:
desc = vd2dw_edma_desc(vd);
if (!desc->chunks_alloc) {
+ dw_hdma_set_callback_result(vd,
+ DMA_TRANS_NOERROR);
list_del(&vd->node);
vchan_cookie_complete(vd);
}
@@ -644,6 +654,7 @@ static void dw_edma_abort_interrupt(struct dw_edma_chan *chan)
spin_lock_irqsave(&chan->vc.lock, flags);
vd = vchan_next_desc(&chan->vc);
if (vd) {
+ dw_hdma_set_callback_result(vd, DMA_TRANS_ABORTED);
list_del(&vd->node);
vchan_cookie_complete(vd);
}
@@ -746,7 +757,7 @@ static int dw_edma_channel_setup(struct dw_edma *dw, u32 wr_alloc, u32 rd_alloc)
chan->ll_max -= 1;
dev_vdbg(dev, "L. List:\tChannel %s[%u] max_cnt=%u\n",
- chan->dir == EDMA_DIR_WRITE ? "write" : "read",
+ str_write_read(chan->dir == EDMA_DIR_WRITE),
chan->id, chan->ll_max);
if (dw->nr_irqs == 1)
@@ -767,7 +778,8 @@ static int dw_edma_channel_setup(struct dw_edma *dw, u32 wr_alloc, u32 rd_alloc)
memcpy(&chan->msi, &irq->msi, sizeof(chan->msi));
dev_vdbg(dev, "MSI:\t\tChannel %s[%u] addr=0x%.8x%.8x, data=0x%.8x\n",
- chan->dir == EDMA_DIR_WRITE ? "write" : "read", chan->id,
+ str_write_read(chan->dir == EDMA_DIR_WRITE),
+ chan->id,
chan->msi.address_hi, chan->msi.address_lo,
chan->msi.data);
diff --git a/drivers/dma/dw-edma/dw-edma-pcie.c b/drivers/dma/dw-edma/dw-edma-pcie.c
index 1c6043751dc9..3371e0a76d3c 100644
--- a/drivers/dma/dw-edma/dw-edma-pcie.c
+++ b/drivers/dma/dw-edma/dw-edma-pcie.c
@@ -136,7 +136,8 @@ static void dw_edma_pcie_get_vsec_dma_data(struct pci_dev *pdev,
map = FIELD_GET(DW_PCIE_VSEC_DMA_MAP, val);
if (map != EDMA_MF_EDMA_LEGACY &&
map != EDMA_MF_EDMA_UNROLL &&
- map != EDMA_MF_HDMA_COMPAT)
+ map != EDMA_MF_HDMA_COMPAT &&
+ map != EDMA_MF_HDMA_NATIVE)
return;
pdata->mf = map;
@@ -160,12 +161,16 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
const struct pci_device_id *pid)
{
struct dw_edma_pcie_data *pdata = (void *)pid->driver_data;
- struct dw_edma_pcie_data vsec_data;
+ struct dw_edma_pcie_data *vsec_data __free(kfree) = NULL;
struct device *dev = &pdev->dev;
struct dw_edma_chip *chip;
int err, nr_irqs;
int i, mask;
+ vsec_data = kmalloc(sizeof(*vsec_data), GFP_KERNEL);
+ if (!vsec_data)
+ return -ENOMEM;
+
/* Enable PCI device */
err = pcim_enable_device(pdev);
if (err) {
@@ -173,23 +178,23 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
return err;
}
- memcpy(&vsec_data, pdata, sizeof(struct dw_edma_pcie_data));
+ memcpy(vsec_data, pdata, sizeof(struct dw_edma_pcie_data));
/*
* Tries to find if exists a PCIe Vendor-Specific Extended Capability
* for the DMA, if one exists, then reconfigures it.
*/
- dw_edma_pcie_get_vsec_dma_data(pdev, &vsec_data);
+ dw_edma_pcie_get_vsec_dma_data(pdev, vsec_data);
/* Mapping PCI BAR regions */
- mask = BIT(vsec_data.rg.bar);
- for (i = 0; i < vsec_data.wr_ch_cnt; i++) {
- mask |= BIT(vsec_data.ll_wr[i].bar);
- mask |= BIT(vsec_data.dt_wr[i].bar);
+ mask = BIT(vsec_data->rg.bar);
+ for (i = 0; i < vsec_data->wr_ch_cnt; i++) {
+ mask |= BIT(vsec_data->ll_wr[i].bar);
+ mask |= BIT(vsec_data->dt_wr[i].bar);
}
- for (i = 0; i < vsec_data.rd_ch_cnt; i++) {
- mask |= BIT(vsec_data.ll_rd[i].bar);
- mask |= BIT(vsec_data.dt_rd[i].bar);
+ for (i = 0; i < vsec_data->rd_ch_cnt; i++) {
+ mask |= BIT(vsec_data->ll_rd[i].bar);
+ mask |= BIT(vsec_data->dt_rd[i].bar);
}
err = pcim_iomap_regions(pdev, mask, pci_name(pdev));
if (err) {
@@ -212,7 +217,7 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
return -ENOMEM;
/* IRQs allocation */
- nr_irqs = pci_alloc_irq_vectors(pdev, 1, vsec_data.irqs,
+ nr_irqs = pci_alloc_irq_vectors(pdev, 1, vsec_data->irqs,
PCI_IRQ_MSI | PCI_IRQ_MSIX);
if (nr_irqs < 1) {
pci_err(pdev, "fail to alloc IRQ vector (number of IRQs=%u)\n",
@@ -223,22 +228,22 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
/* Data structure initialization */
chip->dev = dev;
- chip->mf = vsec_data.mf;
+ chip->mf = vsec_data->mf;
chip->nr_irqs = nr_irqs;
chip->ops = &dw_edma_pcie_plat_ops;
- chip->ll_wr_cnt = vsec_data.wr_ch_cnt;
- chip->ll_rd_cnt = vsec_data.rd_ch_cnt;
+ chip->ll_wr_cnt = vsec_data->wr_ch_cnt;
+ chip->ll_rd_cnt = vsec_data->rd_ch_cnt;
- chip->reg_base = pcim_iomap_table(pdev)[vsec_data.rg.bar];
+ chip->reg_base = pcim_iomap_table(pdev)[vsec_data->rg.bar];
if (!chip->reg_base)
return -ENOMEM;
for (i = 0; i < chip->ll_wr_cnt; i++) {
struct dw_edma_region *ll_region = &chip->ll_region_wr[i];
struct dw_edma_region *dt_region = &chip->dt_region_wr[i];
- struct dw_edma_block *ll_block = &vsec_data.ll_wr[i];
- struct dw_edma_block *dt_block = &vsec_data.dt_wr[i];
+ struct dw_edma_block *ll_block = &vsec_data->ll_wr[i];
+ struct dw_edma_block *dt_block = &vsec_data->dt_wr[i];
ll_region->vaddr.io = pcim_iomap_table(pdev)[ll_block->bar];
if (!ll_region->vaddr.io)
@@ -262,8 +267,8 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
for (i = 0; i < chip->ll_rd_cnt; i++) {
struct dw_edma_region *ll_region = &chip->ll_region_rd[i];
struct dw_edma_region *dt_region = &chip->dt_region_rd[i];
- struct dw_edma_block *ll_block = &vsec_data.ll_rd[i];
- struct dw_edma_block *dt_block = &vsec_data.dt_rd[i];
+ struct dw_edma_block *ll_block = &vsec_data->ll_rd[i];
+ struct dw_edma_block *dt_block = &vsec_data->dt_rd[i];
ll_region->vaddr.io = pcim_iomap_table(pdev)[ll_block->bar];
if (!ll_region->vaddr.io)
@@ -291,35 +296,37 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
pci_dbg(pdev, "Version:\teDMA Unroll (0x%x)\n", chip->mf);
else if (chip->mf == EDMA_MF_HDMA_COMPAT)
pci_dbg(pdev, "Version:\tHDMA Compatible (0x%x)\n", chip->mf);
+ else if (chip->mf == EDMA_MF_HDMA_NATIVE)
+ pci_dbg(pdev, "Version:\tHDMA Native (0x%x)\n", chip->mf);
else
pci_dbg(pdev, "Version:\tUnknown (0x%x)\n", chip->mf);
pci_dbg(pdev, "Registers:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p)\n",
- vsec_data.rg.bar, vsec_data.rg.off, vsec_data.rg.sz,
+ vsec_data->rg.bar, vsec_data->rg.off, vsec_data->rg.sz,
chip->reg_base);
for (i = 0; i < chip->ll_wr_cnt; i++) {
pci_dbg(pdev, "L. List:\tWRITE CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
- i, vsec_data.ll_wr[i].bar,
- vsec_data.ll_wr[i].off, chip->ll_region_wr[i].sz,
+ i, vsec_data->ll_wr[i].bar,
+ vsec_data->ll_wr[i].off, chip->ll_region_wr[i].sz,
chip->ll_region_wr[i].vaddr.io, &chip->ll_region_wr[i].paddr);
pci_dbg(pdev, "Data:\tWRITE CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
- i, vsec_data.dt_wr[i].bar,
- vsec_data.dt_wr[i].off, chip->dt_region_wr[i].sz,
+ i, vsec_data->dt_wr[i].bar,
+ vsec_data->dt_wr[i].off, chip->dt_region_wr[i].sz,
chip->dt_region_wr[i].vaddr.io, &chip->dt_region_wr[i].paddr);
}
for (i = 0; i < chip->ll_rd_cnt; i++) {
pci_dbg(pdev, "L. List:\tREAD CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
- i, vsec_data.ll_rd[i].bar,
- vsec_data.ll_rd[i].off, chip->ll_region_rd[i].sz,
+ i, vsec_data->ll_rd[i].bar,
+ vsec_data->ll_rd[i].off, chip->ll_region_rd[i].sz,
chip->ll_region_rd[i].vaddr.io, &chip->ll_region_rd[i].paddr);
pci_dbg(pdev, "Data:\tREAD CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
- i, vsec_data.dt_rd[i].bar,
- vsec_data.dt_rd[i].off, chip->dt_region_rd[i].sz,
+ i, vsec_data->dt_rd[i].bar,
+ vsec_data->dt_rd[i].off, chip->dt_region_rd[i].sz,
chip->dt_region_rd[i].vaddr.io, &chip->dt_region_rd[i].paddr);
}
diff --git a/drivers/dma/dw/acpi.c b/drivers/dma/dw/acpi.c
index c510c109d2c3..b6452fffa657 100644
--- a/drivers/dma/dw/acpi.c
+++ b/drivers/dma/dw/acpi.c
@@ -8,13 +8,15 @@
static bool dw_dma_acpi_filter(struct dma_chan *chan, void *param)
{
+ struct dw_dma *dw = to_dw_dma(chan->device);
+ struct dw_dma_chip_pdata *data = dev_get_drvdata(dw->dma.dev);
struct acpi_dma_spec *dma_spec = param;
struct dw_dma_slave slave = {
.dma_dev = dma_spec->dev,
.src_id = dma_spec->slave_id,
.dst_id = dma_spec->slave_id,
- .m_master = 0,
- .p_master = 1,
+ .m_master = data->m_master,
+ .p_master = data->p_master,
};
return dw_dma_filter(chan, &slave);
diff --git a/drivers/dma/dw/internal.h b/drivers/dma/dw/internal.h
index 563ce73488db..f1bd06a20cd6 100644
--- a/drivers/dma/dw/internal.h
+++ b/drivers/dma/dw/internal.h
@@ -51,11 +51,15 @@ struct dw_dma_chip_pdata {
int (*probe)(struct dw_dma_chip *chip);
int (*remove)(struct dw_dma_chip *chip);
struct dw_dma_chip *chip;
+ u8 m_master;
+ u8 p_master;
};
static __maybe_unused const struct dw_dma_chip_pdata dw_dma_chip_pdata = {
.probe = dw_dma_probe,
.remove = dw_dma_remove,
+ .m_master = 0,
+ .p_master = 1,
};
static const struct dw_dma_platform_data idma32_pdata = {
@@ -72,6 +76,8 @@ static __maybe_unused const struct dw_dma_chip_pdata idma32_chip_pdata = {
.pdata = &idma32_pdata,
.probe = idma32_dma_probe,
.remove = idma32_dma_remove,
+ .m_master = 0,
+ .p_master = 0,
};
static const struct dw_dma_platform_data xbar_pdata = {
@@ -88,6 +94,8 @@ static __maybe_unused const struct dw_dma_chip_pdata xbar_chip_pdata = {
.pdata = &xbar_pdata,
.probe = idma32_dma_probe,
.remove = idma32_dma_remove,
+ .m_master = 0,
+ .p_master = 0,
};
#endif /* _DMA_DW_INTERNAL_H */
diff --git a/drivers/dma/dw/pci.c b/drivers/dma/dw/pci.c
index ad2d4d012cf7..a3aae3d1c093 100644
--- a/drivers/dma/dw/pci.c
+++ b/drivers/dma/dw/pci.c
@@ -56,10 +56,10 @@ static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
if (ret)
return ret;
- dw_dma_acpi_controller_register(chip->dw);
-
pci_set_drvdata(pdev, data);
+ dw_dma_acpi_controller_register(chip->dw);
+
return 0;
}
@@ -76,8 +76,6 @@ static void dw_pci_remove(struct pci_dev *pdev)
dev_warn(&pdev->dev, "can't remove device properly: %d\n", ret);
}
-#ifdef CONFIG_PM_SLEEP
-
static int dw_pci_suspend_late(struct device *dev)
{
struct dw_dma_chip_pdata *data = dev_get_drvdata(dev);
@@ -94,10 +92,8 @@ static int dw_pci_resume_early(struct device *dev)
return do_dw_dma_enable(chip);
};
-#endif /* CONFIG_PM_SLEEP */
-
static const struct dev_pm_ops dw_pci_dev_pm_ops = {
- SET_LATE_SYSTEM_SLEEP_PM_OPS(dw_pci_suspend_late, dw_pci_resume_early)
+ LATE_SYSTEM_SLEEP_PM_OPS(dw_pci_suspend_late, dw_pci_resume_early)
};
static const struct pci_device_id dw_pci_id_table[] = {
@@ -136,7 +132,7 @@ static struct pci_driver dw_pci_driver = {
.probe = dw_pci_probe,
.remove = dw_pci_remove,
.driver = {
- .pm = &dw_pci_dev_pm_ops,
+ .pm = pm_sleep_ptr(&dw_pci_dev_pm_ops),
},
};
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index 47c58ad468cb..cee56cd31a61 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -157,8 +157,6 @@ static const struct acpi_device_id dw_dma_acpi_id_table[] = {
MODULE_DEVICE_TABLE(acpi, dw_dma_acpi_id_table);
#endif
-#ifdef CONFIG_PM_SLEEP
-
static int dw_suspend_late(struct device *dev)
{
struct dw_dma_chip_pdata *data = dev_get_drvdata(dev);
@@ -183,19 +181,17 @@ static int dw_resume_early(struct device *dev)
return do_dw_dma_enable(chip);
}
-#endif /* CONFIG_PM_SLEEP */
-
static const struct dev_pm_ops dw_dev_pm_ops = {
- SET_LATE_SYSTEM_SLEEP_PM_OPS(dw_suspend_late, dw_resume_early)
+ LATE_SYSTEM_SLEEP_PM_OPS(dw_suspend_late, dw_resume_early)
};
static struct platform_driver dw_driver = {
.probe = dw_probe,
- .remove_new = dw_remove,
+ .remove = dw_remove,
.shutdown = dw_shutdown,
.driver = {
.name = DRV_NAME,
- .pm = &dw_dev_pm_ops,
+ .pm = pm_sleep_ptr(&dw_dev_pm_ops),
.of_match_table = of_match_ptr(dw_dma_of_id_table),
.acpi_match_table = ACPI_PTR(dw_dma_acpi_id_table),
},
diff --git a/drivers/dma/dw/rzn1-dmamux.c b/drivers/dma/dw/rzn1-dmamux.c
index 4fb8508419db..deadf135681b 100644
--- a/drivers/dma/dw/rzn1-dmamux.c
+++ b/drivers/dma/dw/rzn1-dmamux.c
@@ -48,12 +48,16 @@ static void *rzn1_dmamux_route_allocate(struct of_phandle_args *dma_spec,
u32 mask;
int ret;
- if (dma_spec->args_count != RNZ1_DMAMUX_NCELLS)
- return ERR_PTR(-EINVAL);
+ if (dma_spec->args_count != RNZ1_DMAMUX_NCELLS) {
+ ret = -EINVAL;
+ goto put_device;
+ }
map = kzalloc(sizeof(*map), GFP_KERNEL);
- if (!map)
- return ERR_PTR(-ENOMEM);
+ if (!map) {
+ ret = -ENOMEM;
+ goto put_device;
+ }
chan = dma_spec->args[0];
map->req_idx = dma_spec->args[4];
@@ -94,12 +98,15 @@ static void *rzn1_dmamux_route_allocate(struct of_phandle_args *dma_spec,
if (ret)
goto clear_bitmap;
+ put_device(&pdev->dev);
return map;
clear_bitmap:
clear_bit(map->req_idx, dmamux->used_chans);
free_map:
kfree(map);
+put_device:
+ put_device(&pdev->dev);
return ERR_PTR(ret);
}
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index 6b98a23e3332..e424bb5c40e7 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -929,8 +929,7 @@ static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
/* Sanity check the channel parameters */
if (!edmac->edma->m2m) {
- if (edmac->dma_cfg.port < EP93XX_DMA_I2S1 ||
- edmac->dma_cfg.port > EP93XX_DMA_IRDA)
+ if (edmac->dma_cfg.port > EP93XX_DMA_IRDA)
return -EINVAL;
if (edmac->dma_cfg.dir != ep93xx_dma_chan_direction(chan))
return -EINVAL;
diff --git a/drivers/dma/fsl-dpaa2-qdma/dpdmai.c b/drivers/dma/fsl-dpaa2-qdma/dpdmai.c
index b4323d243d6d..4be81db24a19 100644
--- a/drivers/dma/fsl-dpaa2-qdma/dpdmai.c
+++ b/drivers/dma/fsl-dpaa2-qdma/dpdmai.c
@@ -48,11 +48,6 @@ struct dpdmai_cmd_destroy {
__le32 dpdmai_id;
} __packed;
-static inline u64 mc_enc(int lsoffset, int width, u64 val)
-{
- return (val & MAKE_UMASK64(width)) << lsoffset;
-}
-
/**
* dpdmai_open() - Open a control session for the specified object
* @mc_io: Pointer to MC portal's I/O object
diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c
index b7f15ab96855..4976d7dde080 100644
--- a/drivers/dma/fsl-edma-common.c
+++ b/drivers/dma/fsl-edma-common.c
@@ -95,7 +95,7 @@ static void fsl_edma3_enable_request(struct fsl_edma_chan *fsl_chan)
}
val = edma_readl_chreg(fsl_chan, ch_csr);
- val |= EDMA_V3_CH_CSR_ERQ;
+ val |= EDMA_V3_CH_CSR_ERQ | EDMA_V3_CH_CSR_EEI;
edma_writel_chreg(fsl_chan, val, ch_csr);
}
@@ -480,8 +480,8 @@ void fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan,
bool disable_req, bool enable_sg)
{
struct dma_slave_config *cfg = &fsl_chan->cfg;
+ u32 burst = 0;
u16 csr = 0;
- u32 burst;
/*
* eDMA hardware SGs require the TCDs to be stored in little
@@ -496,16 +496,30 @@ void fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan,
fsl_edma_set_tcd_to_le(fsl_chan, tcd, soff, soff);
- if (fsl_chan->is_multi_fifo) {
- /* set mloff to support multiple fifo */
- burst = cfg->direction == DMA_DEV_TO_MEM ?
- cfg->src_maxburst : cfg->dst_maxburst;
- nbytes |= EDMA_V3_TCD_NBYTES_MLOFF(-(burst * 4));
- /* enable DMLOE/SMLOE */
- if (cfg->direction == DMA_MEM_TO_DEV) {
+ /* If we expect to have either multi_fifo or a port window size,
+ * we will use minor loop offset, meaning bits 29-10 will be used for
+ * address offset, while bits 9-0 will be used to tell DMA how much
+ * data to read from addr.
+ * If we don't have either of those, will use a major loop reading from addr
+ * nbytes (29bits).
+ */
+ if (cfg->direction == DMA_MEM_TO_DEV) {
+ if (fsl_chan->is_multi_fifo)
+ burst = cfg->dst_maxburst * 4;
+ if (cfg->dst_port_window_size)
+ burst = cfg->dst_port_window_size * cfg->dst_addr_width;
+ if (burst) {
+ nbytes |= EDMA_V3_TCD_NBYTES_MLOFF(-burst);
nbytes |= EDMA_V3_TCD_NBYTES_DMLOE;
nbytes &= ~EDMA_V3_TCD_NBYTES_SMLOE;
- } else {
+ }
+ } else {
+ if (fsl_chan->is_multi_fifo)
+ burst = cfg->src_maxburst * 4;
+ if (cfg->src_port_window_size)
+ burst = cfg->src_port_window_size * cfg->src_addr_width;
+ if (burst) {
+ nbytes |= EDMA_V3_TCD_NBYTES_MLOFF(-burst);
nbytes |= EDMA_V3_TCD_NBYTES_SMLOE;
nbytes &= ~EDMA_V3_TCD_NBYTES_DMLOE;
}
@@ -623,11 +637,15 @@ struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
dst_addr = fsl_chan->dma_dev_addr;
soff = fsl_chan->cfg.dst_addr_width;
doff = fsl_chan->is_multi_fifo ? 4 : 0;
+ if (fsl_chan->cfg.dst_port_window_size)
+ doff = fsl_chan->cfg.dst_addr_width;
} else if (direction == DMA_DEV_TO_MEM) {
src_addr = fsl_chan->dma_dev_addr;
dst_addr = dma_buf_next;
soff = fsl_chan->is_multi_fifo ? 4 : 0;
doff = fsl_chan->cfg.src_addr_width;
+ if (fsl_chan->cfg.src_port_window_size)
+ soff = fsl_chan->cfg.src_addr_width;
} else {
/* DMA_DEV_TO_DEV */
src_addr = fsl_chan->cfg.src_addr;
@@ -803,7 +821,7 @@ void fsl_edma_issue_pending(struct dma_chan *chan)
int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
{
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
- int ret;
+ int ret = 0;
if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_HAS_CHCLK)
clk_prepare_enable(fsl_chan->clk);
@@ -813,17 +831,29 @@ int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
sizeof(struct fsl_edma_hw_tcd64) : sizeof(struct fsl_edma_hw_tcd),
32, 0);
- if (fsl_chan->txirq) {
+ if (fsl_chan->txirq)
ret = request_irq(fsl_chan->txirq, fsl_chan->irq_handler, IRQF_SHARED,
fsl_chan->chan_name, fsl_chan);
- if (ret) {
- dma_pool_destroy(fsl_chan->tcd_pool);
- return ret;
- }
- }
+ if (ret)
+ goto err_txirq;
+
+ if (fsl_chan->errirq > 0)
+ ret = request_irq(fsl_chan->errirq, fsl_chan->errirq_handler, IRQF_SHARED,
+ fsl_chan->errirq_name, fsl_chan);
+
+ if (ret)
+ goto err_errirq;
return 0;
+
+err_errirq:
+ if (fsl_chan->txirq)
+ free_irq(fsl_chan->txirq, fsl_chan);
+err_txirq:
+ dma_pool_destroy(fsl_chan->tcd_pool);
+
+ return ret;
}
void fsl_edma_free_chan_resources(struct dma_chan *chan)
@@ -844,6 +874,8 @@ void fsl_edma_free_chan_resources(struct dma_chan *chan)
if (fsl_chan->txirq)
free_irq(fsl_chan->txirq, fsl_chan);
+ if (fsl_chan->errirq)
+ free_irq(fsl_chan->errirq, fsl_chan);
vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
dma_pool_destroy(fsl_chan->tcd_pool);
diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h
index ce37e1ee9c46..205a96489094 100644
--- a/drivers/dma/fsl-edma-common.h
+++ b/drivers/dma/fsl-edma-common.h
@@ -68,6 +68,20 @@
#define EDMA_V3_CH_CSR_EEI BIT(2)
#define EDMA_V3_CH_CSR_DONE BIT(30)
#define EDMA_V3_CH_CSR_ACTIVE BIT(31)
+#define EDMA_V3_CH_ES_ERR BIT(31)
+#define EDMA_V3_MP_ES_VLD BIT(31)
+
+#define EDMA_V3_CH_ERR_DBE BIT(0)
+#define EDMA_V3_CH_ERR_SBE BIT(1)
+#define EDMA_V3_CH_ERR_SGE BIT(2)
+#define EDMA_V3_CH_ERR_NCE BIT(3)
+#define EDMA_V3_CH_ERR_DOE BIT(4)
+#define EDMA_V3_CH_ERR_DAE BIT(5)
+#define EDMA_V3_CH_ERR_SOE BIT(6)
+#define EDMA_V3_CH_ERR_SAE BIT(7)
+#define EDMA_V3_CH_ERR_ECX BIT(8)
+#define EDMA_V3_CH_ERR_UCE BIT(9)
+#define EDMA_V3_CH_ERR BIT(31)
enum fsl_edma_pm_state {
RUNNING = 0,
@@ -160,18 +174,22 @@ struct fsl_edma_chan {
u32 dma_dev_size;
enum dma_data_direction dma_dir;
char chan_name[32];
+ char errirq_name[36];
void __iomem *tcd;
void __iomem *mux_addr;
u32 real_count;
struct work_struct issue_worker;
struct platform_device *pdev;
struct device *pd_dev;
+ struct device_link *pd_dev_link;
u32 srcid;
struct clk *clk;
int priority;
int hw_chanid;
int txirq;
+ int errirq;
irqreturn_t (*irq_handler)(int irq, void *dev_id);
+ irqreturn_t (*errirq_handler)(int irq, void *dev_id);
bool is_rxchan;
bool is_remote;
bool is_multi_fifo;
@@ -205,6 +223,9 @@ struct fsl_edma_desc {
/* Need clean CHn_CSR DONE before enable TCD's MAJORELINK */
#define FSL_EDMA_DRV_CLEAR_DONE_E_LINK BIT(14)
#define FSL_EDMA_DRV_TCD64 BIT(15)
+/* All channel ERR IRQ share one IRQ line */
+#define FSL_EDMA_DRV_ERRIRQ_SHARE BIT(16)
+
#define FSL_EDMA_DRV_EDMA3 (FSL_EDMA_DRV_SPLIT_REG | \
FSL_EDMA_DRV_BUS_8BYTE | \
@@ -240,6 +261,7 @@ struct fsl_edma_engine {
const struct fsl_edma_drvdata *drvdata;
u32 n_chans;
int txirq;
+ int txirq_16_31;
int errirq;
bool big_endian;
struct edma_regs regs;
diff --git a/drivers/dma/fsl-edma-main.c b/drivers/dma/fsl-edma-main.c
index f9f1eda79254..97583c7d51a2 100644
--- a/drivers/dma/fsl-edma-main.c
+++ b/drivers/dma/fsl-edma-main.c
@@ -3,10 +3,11 @@
* drivers/dma/fsl-edma.c
*
* Copyright 2013-2014 Freescale Semiconductor, Inc.
+ * Copyright 2024 NXP
*
* Driver for the Freescale eDMA engine with flexible channel multiplexing
* capability for DMA request sources. The eDMA block can be found on some
- * Vybrid and Layerscape SoCs.
+ * Vybrid, Layerscape and S32G SoCs.
*/
#include <dt-bindings/dma/fsl-edma.h>
@@ -49,6 +50,83 @@ static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static void fsl_edma3_err_check(struct fsl_edma_chan *fsl_chan)
+{
+ unsigned int ch_err;
+ u32 val;
+
+ scoped_guard(spinlock, &fsl_chan->vchan.lock) {
+ ch_err = edma_readl_chreg(fsl_chan, ch_es);
+ if (!(ch_err & EDMA_V3_CH_ERR))
+ return;
+
+ edma_writel_chreg(fsl_chan, EDMA_V3_CH_ERR, ch_es);
+ val = edma_readl_chreg(fsl_chan, ch_csr);
+ val &= ~EDMA_V3_CH_CSR_ERQ;
+ edma_writel_chreg(fsl_chan, val, ch_csr);
+ }
+
+ /* Ignore this interrupt since channel has been disabled already */
+ if (!fsl_chan->edesc)
+ return;
+
+ if (ch_err & EDMA_V3_CH_ERR_DBE)
+ dev_err(&fsl_chan->pdev->dev, "Destination Bus Error interrupt.\n");
+
+ if (ch_err & EDMA_V3_CH_ERR_SBE)
+ dev_err(&fsl_chan->pdev->dev, "Source Bus Error interrupt.\n");
+
+ if (ch_err & EDMA_V3_CH_ERR_SGE)
+ dev_err(&fsl_chan->pdev->dev, "Scatter/Gather Configuration Error interrupt.\n");
+
+ if (ch_err & EDMA_V3_CH_ERR_NCE)
+ dev_err(&fsl_chan->pdev->dev, "NBYTES/CITER Configuration Error interrupt.\n");
+
+ if (ch_err & EDMA_V3_CH_ERR_DOE)
+ dev_err(&fsl_chan->pdev->dev, "Destination Offset Error interrupt.\n");
+
+ if (ch_err & EDMA_V3_CH_ERR_DAE)
+ dev_err(&fsl_chan->pdev->dev, "Destination Address Error interrupt.\n");
+
+ if (ch_err & EDMA_V3_CH_ERR_SOE)
+ dev_err(&fsl_chan->pdev->dev, "Source Offset Error interrupt.\n");
+
+ if (ch_err & EDMA_V3_CH_ERR_SAE)
+ dev_err(&fsl_chan->pdev->dev, "Source Address Error interrupt.\n");
+
+ if (ch_err & EDMA_V3_CH_ERR_ECX)
+ dev_err(&fsl_chan->pdev->dev, "Transfer Canceled interrupt.\n");
+
+ if (ch_err & EDMA_V3_CH_ERR_UCE)
+ dev_err(&fsl_chan->pdev->dev, "Uncorrectable TCD error during channel execution interrupt.\n");
+
+ fsl_chan->status = DMA_ERROR;
+}
+
+static irqreturn_t fsl_edma3_err_handler_per_chan(int irq, void *dev_id)
+{
+ struct fsl_edma_chan *fsl_chan = dev_id;
+
+ fsl_edma3_err_check(fsl_chan);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t fsl_edma3_err_handler_shared(int irq, void *dev_id)
+{
+ struct fsl_edma_engine *fsl_edma = dev_id;
+ unsigned int ch;
+
+ for (ch = 0; ch < fsl_edma->n_chans; ch++) {
+ if (fsl_edma->chan_masked & BIT(ch))
+ continue;
+
+ fsl_edma3_err_check(&fsl_edma->chans[ch]);
+ }
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t fsl_edma3_tx_handler(int irq, void *dev_id)
{
struct fsl_edma_chan *fsl_chan = dev_id;
@@ -56,7 +134,7 @@ static irqreturn_t fsl_edma3_tx_handler(int irq, void *dev_id)
intr = edma_readl_chreg(fsl_chan, ch_int);
if (!intr)
- return IRQ_HANDLED;
+ return IRQ_NONE;
edma_writel_chreg(fsl_chan, 1, ch_int);
@@ -72,6 +150,60 @@ static irqreturn_t fsl_edma2_tx_handler(int irq, void *devi_id)
return fsl_edma_tx_handler(irq, fsl_chan->edma);
}
+static irqreturn_t fsl_edma3_or_tx_handler(int irq, void *dev_id,
+ u8 start, u8 end)
+{
+ struct fsl_edma_engine *fsl_edma = dev_id;
+ struct fsl_edma_chan *chan;
+ int i;
+
+ end = min(end, fsl_edma->n_chans);
+
+ for (i = start; i < end; i++) {
+ chan = &fsl_edma->chans[i];
+
+ fsl_edma3_tx_handler(irq, chan);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t fsl_edma3_tx_0_15_handler(int irq, void *dev_id)
+{
+ return fsl_edma3_or_tx_handler(irq, dev_id, 0, 16);
+}
+
+static irqreturn_t fsl_edma3_tx_16_31_handler(int irq, void *dev_id)
+{
+ return fsl_edma3_or_tx_handler(irq, dev_id, 16, 32);
+}
+
+static irqreturn_t fsl_edma3_or_err_handler(int irq, void *dev_id)
+{
+ struct fsl_edma_engine *fsl_edma = dev_id;
+ struct edma_regs *regs = &fsl_edma->regs;
+ unsigned int err, ch, ch_es;
+ struct fsl_edma_chan *chan;
+
+ err = edma_readl(fsl_edma, regs->es);
+ if (!(err & EDMA_V3_MP_ES_VLD))
+ return IRQ_NONE;
+
+ for (ch = 0; ch < fsl_edma->n_chans; ch++) {
+ chan = &fsl_edma->chans[ch];
+
+ ch_es = edma_readl_chreg(chan, ch_es);
+ if (!(ch_es & EDMA_V3_CH_ES_ERR))
+ continue;
+
+ edma_writel_chreg(chan, EDMA_V3_CH_ES_ERR, ch_es);
+ fsl_edma_disable_request(chan);
+ fsl_edma->chans[ch].status = DMA_ERROR;
+ }
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t fsl_edma_err_handler(int irq, void *dev_id)
{
struct fsl_edma_engine *fsl_edma = dev_id;
@@ -109,7 +241,7 @@ static bool fsl_edma_srcid_in_use(struct fsl_edma_engine *fsl_edma, u32 srcid)
fsl_chan = &fsl_edma->chans[i];
if (fsl_chan->srcid && srcid == fsl_chan->srcid) {
- dev_err(&fsl_chan->pdev->dev, "The srcid is in use, can't use!");
+ dev_err(&fsl_chan->pdev->dev, "The srcid is in use, can't use!\n");
return true;
}
}
@@ -254,7 +386,8 @@ fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma
static int fsl_edma3_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
{
- int i;
+ char *errirq_name;
+ int i, ret;
for (i = 0; i < fsl_edma->n_chans; i++) {
@@ -269,8 +402,72 @@ static int fsl_edma3_irq_init(struct platform_device *pdev, struct fsl_edma_engi
return -EINVAL;
fsl_chan->irq_handler = fsl_edma3_tx_handler;
+
+ if (!(fsl_edma->drvdata->flags & FSL_EDMA_DRV_ERRIRQ_SHARE)) {
+ fsl_chan->errirq = fsl_chan->txirq;
+ fsl_chan->errirq_handler = fsl_edma3_err_handler_per_chan;
+ }
+ }
+
+ /* All channel err use one irq number */
+ if (fsl_edma->drvdata->flags & FSL_EDMA_DRV_ERRIRQ_SHARE) {
+ /* last one is error irq */
+ fsl_edma->errirq = platform_get_irq_optional(pdev, fsl_edma->n_chans);
+ if (fsl_edma->errirq < 0)
+ return 0; /* dts miss err irq, treat as no err irq case */
+
+ errirq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s-err",
+ dev_name(&pdev->dev));
+
+ ret = devm_request_irq(&pdev->dev, fsl_edma->errirq, fsl_edma3_err_handler_shared,
+ 0, errirq_name, fsl_edma);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Can't register eDMA err IRQ.\n");
+ }
+
+ return 0;
+}
+
+static int fsl_edma3_or_irq_init(struct platform_device *pdev,
+ struct fsl_edma_engine *fsl_edma)
+{
+ int ret;
+
+ fsl_edma->txirq = platform_get_irq_byname(pdev, "tx-0-15");
+ if (fsl_edma->txirq < 0)
+ return fsl_edma->txirq;
+
+ fsl_edma->txirq_16_31 = platform_get_irq_byname(pdev, "tx-16-31");
+ if (fsl_edma->txirq_16_31 < 0)
+ return fsl_edma->txirq_16_31;
+
+ fsl_edma->errirq = platform_get_irq_byname(pdev, "err");
+ if (fsl_edma->errirq < 0)
+ return fsl_edma->errirq;
+
+ ret = devm_request_irq(&pdev->dev, fsl_edma->txirq,
+ fsl_edma3_tx_0_15_handler, 0, "eDMA tx0_15",
+ fsl_edma);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "Can't register eDMA tx0_15 IRQ.\n");
+
+ if (fsl_edma->n_chans > 16) {
+ ret = devm_request_irq(&pdev->dev, fsl_edma->txirq_16_31,
+ fsl_edma3_tx_16_31_handler, 0,
+ "eDMA tx16_31", fsl_edma);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "Can't register eDMA tx16_31 IRQ.\n");
}
+ ret = devm_request_irq(&pdev->dev, fsl_edma->errirq,
+ fsl_edma3_or_err_handler, 0, "eDMA err",
+ fsl_edma);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "Can't register eDMA err IRQ.\n");
+
return 0;
}
@@ -303,6 +500,7 @@ fsl_edma2_irq_init(struct platform_device *pdev,
/* The last IRQ is for eDMA err */
if (i == count - 1) {
+ fsl_edma->errirq = irq;
ret = devm_request_irq(&pdev->dev, irq,
fsl_edma_err_handler,
0, "eDMA2-ERR", fsl_edma);
@@ -322,10 +520,13 @@ static void fsl_edma_irq_exit(
struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
{
if (fsl_edma->txirq == fsl_edma->errirq) {
- devm_free_irq(&pdev->dev, fsl_edma->txirq, fsl_edma);
+ if (fsl_edma->txirq >= 0)
+ devm_free_irq(&pdev->dev, fsl_edma->txirq, fsl_edma);
} else {
- devm_free_irq(&pdev->dev, fsl_edma->txirq, fsl_edma);
- devm_free_irq(&pdev->dev, fsl_edma->errirq, fsl_edma);
+ if (fsl_edma->txirq >= 0)
+ devm_free_irq(&pdev->dev, fsl_edma->txirq, fsl_edma);
+ if (fsl_edma->errirq >= 0)
+ devm_free_irq(&pdev->dev, fsl_edma->errirq, fsl_edma);
}
}
@@ -362,7 +563,8 @@ static struct fsl_edma_drvdata imx7ulp_data = {
};
static struct fsl_edma_drvdata imx8qm_data = {
- .flags = FSL_EDMA_DRV_HAS_PD | FSL_EDMA_DRV_EDMA3 | FSL_EDMA_DRV_MEM_REMOTE,
+ .flags = FSL_EDMA_DRV_HAS_PD | FSL_EDMA_DRV_EDMA3 | FSL_EDMA_DRV_MEM_REMOTE
+ | FSL_EDMA_DRV_ERRIRQ_SHARE,
.chreg_space_sz = 0x10000,
.chreg_off = 0x10000,
.setup_irq = fsl_edma3_irq_init,
@@ -379,14 +581,15 @@ static struct fsl_edma_drvdata imx8ulp_data = {
};
static struct fsl_edma_drvdata imx93_data3 = {
- .flags = FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA3,
+ .flags = FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA3 | FSL_EDMA_DRV_ERRIRQ_SHARE,
.chreg_space_sz = 0x10000,
.chreg_off = 0x10000,
.setup_irq = fsl_edma3_irq_init,
};
static struct fsl_edma_drvdata imx93_data4 = {
- .flags = FSL_EDMA_DRV_HAS_CHMUX | FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA4,
+ .flags = FSL_EDMA_DRV_HAS_CHMUX | FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA4
+ | FSL_EDMA_DRV_ERRIRQ_SHARE,
.chreg_space_sz = 0x8000,
.chreg_off = 0x10000,
.mux_off = 0x10000 + offsetof(struct fsl_edma3_ch_reg, ch_mux),
@@ -396,7 +599,7 @@ static struct fsl_edma_drvdata imx93_data4 = {
static struct fsl_edma_drvdata imx95_data5 = {
.flags = FSL_EDMA_DRV_HAS_CHMUX | FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA4 |
- FSL_EDMA_DRV_TCD64,
+ FSL_EDMA_DRV_TCD64 | FSL_EDMA_DRV_ERRIRQ_SHARE,
.chreg_space_sz = 0x8000,
.chreg_off = 0x10000,
.mux_off = 0x200,
@@ -404,6 +607,14 @@ static struct fsl_edma_drvdata imx95_data5 = {
.setup_irq = fsl_edma3_irq_init,
};
+static const struct fsl_edma_drvdata s32g2_data = {
+ .dmamuxs = DMAMUX_NR,
+ .chreg_space_sz = EDMA_TCD,
+ .chreg_off = 0x4000,
+ .flags = FSL_EDMA_DRV_EDMA3 | FSL_EDMA_DRV_MUX_SWAP,
+ .setup_irq = fsl_edma3_or_irq_init,
+};
+
static const struct of_device_id fsl_edma_dt_ids[] = {
{ .compatible = "fsl,vf610-edma", .data = &vf610_data},
{ .compatible = "fsl,ls1028a-edma", .data = &ls1028a_data},
@@ -413,14 +624,38 @@ static const struct of_device_id fsl_edma_dt_ids[] = {
{ .compatible = "fsl,imx93-edma3", .data = &imx93_data3},
{ .compatible = "fsl,imx93-edma4", .data = &imx93_data4},
{ .compatible = "fsl,imx95-edma5", .data = &imx95_data5},
+ { .compatible = "nxp,s32g2-edma", .data = &s32g2_data},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, fsl_edma_dt_ids);
+static void fsl_edma3_detach_pd(struct fsl_edma_engine *fsl_edma)
+{
+ struct fsl_edma_chan *fsl_chan;
+ int i;
+
+ for (i = 0; i < fsl_edma->n_chans; i++) {
+ if (fsl_edma->chan_masked & BIT(i))
+ continue;
+ fsl_chan = &fsl_edma->chans[i];
+ if (fsl_chan->pd_dev_link)
+ device_link_del(fsl_chan->pd_dev_link);
+ if (fsl_chan->pd_dev) {
+ dev_pm_domain_detach(fsl_chan->pd_dev, false);
+ pm_runtime_dont_use_autosuspend(fsl_chan->pd_dev);
+ pm_runtime_set_suspended(fsl_chan->pd_dev);
+ }
+ }
+}
+
+static void devm_fsl_edma3_detach_pd(void *data)
+{
+ fsl_edma3_detach_pd(data);
+}
+
static int fsl_edma3_attach_pd(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
{
struct fsl_edma_chan *fsl_chan;
- struct device_link *link;
struct device *pd_chan;
struct device *dev;
int i;
@@ -436,15 +671,16 @@ static int fsl_edma3_attach_pd(struct platform_device *pdev, struct fsl_edma_eng
pd_chan = dev_pm_domain_attach_by_id(dev, i);
if (IS_ERR_OR_NULL(pd_chan)) {
dev_err(dev, "Failed attach pd %d\n", i);
- return -EINVAL;
+ goto detach;
}
- link = device_link_add(dev, pd_chan, DL_FLAG_STATELESS |
+ fsl_chan->pd_dev_link = device_link_add(dev, pd_chan, DL_FLAG_STATELESS |
DL_FLAG_PM_RUNTIME |
DL_FLAG_RPM_ACTIVE);
- if (!link) {
+ if (!fsl_chan->pd_dev_link) {
dev_err(dev, "Failed to add device_link to %d\n", i);
- return -EINVAL;
+ dev_pm_domain_detach(pd_chan, false);
+ goto detach;
}
fsl_chan->pd_dev = pd_chan;
@@ -455,6 +691,10 @@ static int fsl_edma3_attach_pd(struct platform_device *pdev, struct fsl_edma_eng
}
return 0;
+
+detach:
+ fsl_edma3_detach_pd(fsl_edma);
+ return -EINVAL;
}
static int fsl_edma_probe(struct platform_device *pdev)
@@ -485,6 +725,8 @@ static int fsl_edma_probe(struct platform_device *pdev)
if (!fsl_edma)
return -ENOMEM;
+ fsl_edma->errirq = -EINVAL;
+ fsl_edma->txirq = -EINVAL;
fsl_edma->drvdata = drvdata;
fsl_edma->n_chans = chans;
mutex_init(&fsl_edma->fsl_edma_mutex);
@@ -517,10 +759,6 @@ static int fsl_edma_probe(struct platform_device *pdev)
for (i = 0; i < fsl_edma->drvdata->dmamuxs; i++) {
char clkname[32];
- /* eDMAv3 mux register move to TCD area if ch_mux exist */
- if (drvdata->flags & FSL_EDMA_DRV_SPLIT_REG)
- break;
-
fsl_edma->muxbase[i] = devm_platform_ioremap_resource(pdev,
1 + i);
if (IS_ERR(fsl_edma->muxbase[i])) {
@@ -544,6 +782,9 @@ static int fsl_edma_probe(struct platform_device *pdev)
ret = fsl_edma3_attach_pd(pdev, fsl_edma);
if (ret)
return ret;
+ ret = devm_add_action_or_reset(&pdev->dev, devm_fsl_edma3_detach_pd, fsl_edma);
+ if (ret)
+ return ret;
}
if (drvdata->flags & FSL_EDMA_DRV_TCD64)
@@ -560,6 +801,9 @@ static int fsl_edma_probe(struct platform_device *pdev)
snprintf(fsl_chan->chan_name, sizeof(fsl_chan->chan_name), "%s-CH%02d",
dev_name(&pdev->dev), i);
+ snprintf(fsl_chan->errirq_name, sizeof(fsl_chan->errirq_name),
+ "%s-CH%02d-err", dev_name(&pdev->dev), i);
+
fsl_chan->edma = fsl_edma;
fsl_chan->pm_state = RUNNING;
fsl_chan->srcid = 0;
@@ -646,7 +890,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
}
ret = of_dma_controller_register(np,
- drvdata->flags & FSL_EDMA_DRV_SPLIT_REG ? fsl_edma3_xlate : fsl_edma_xlate,
+ drvdata->dmamuxs ? fsl_edma_xlate : fsl_edma3_xlate,
fsl_edma);
if (ret) {
dev_err(&pdev->dev,
@@ -668,9 +912,9 @@ static void fsl_edma_remove(struct platform_device *pdev)
struct fsl_edma_engine *fsl_edma = platform_get_drvdata(pdev);
fsl_edma_irq_exit(pdev, fsl_edma);
- fsl_edma_cleanup_vchan(&fsl_edma->dma_dev);
of_dma_controller_free(np);
dma_async_device_unregister(&fsl_edma->dma_dev);
+ fsl_edma_cleanup_vchan(&fsl_edma->dma_dev);
fsl_disable_clocks(fsl_edma, fsl_edma->drvdata->dmamuxs);
}
@@ -688,7 +932,7 @@ static int fsl_edma_suspend_late(struct device *dev)
spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
/* Make sure chan is idle or will force disable. */
if (unlikely(fsl_chan->status == DMA_IN_PROGRESS)) {
- dev_warn(dev, "WARN: There is non-idle channel.");
+ dev_warn(dev, "WARN: There is non-idle channel.\n");
fsl_edma_disable_request(fsl_chan);
fsl_edma_chan_mux(fsl_chan, 0, false);
}
@@ -740,7 +984,7 @@ static struct platform_driver fsl_edma_driver = {
.pm = &fsl_edma_pm_ops,
},
.probe = fsl_edma_probe,
- .remove_new = fsl_edma_remove,
+ .remove = fsl_edma_remove,
};
static int __init fsl_edma_init(void)
diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
index 5005e138fc23..21e13f1207cb 100644
--- a/drivers/dma/fsl-qdma.c
+++ b/drivers/dma/fsl-qdma.c
@@ -148,6 +148,9 @@
* @__reserved1: Reserved field.
* @cfg8b_w1: Compound descriptor command queue origin produced
* by qDMA and dynamic debug field.
+ * @__reserved2: Reserved field.
+ * @cmd: Command for QDMA (see FSL_QDMA_CMD_RWTTYPE and
+ * others).
* @data: Pointer to the memory 40-bit address, describes DMA
* source information and DMA destination information.
*/
@@ -1288,7 +1291,7 @@ static struct platform_driver fsl_qdma_driver = {
.of_match_table = fsl_qdma_dt_ids,
},
.probe = fsl_qdma_probe,
- .remove_new = fsl_qdma_remove,
+ .remove = fsl_qdma_remove,
};
module_platform_driver(fsl_qdma_driver);
diff --git a/drivers/dma/fsl_raid.c b/drivers/dma/fsl_raid.c
index 014ff523d5ec..6aa97e258a55 100644
--- a/drivers/dma/fsl_raid.c
+++ b/drivers/dma/fsl_raid.c
@@ -886,7 +886,7 @@ static struct platform_driver fsl_re_driver = {
.of_match_table = fsl_re_ids,
},
.probe = fsl_re_probe,
- .remove_new = fsl_re_remove,
+ .remove = fsl_re_remove,
};
module_platform_driver(fsl_re_driver);
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 18a6c4bf6275..9b126a260267 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -1226,6 +1226,8 @@ static int fsldma_of_probe(struct platform_device *op)
fdev->dev = &op->dev;
INIT_LIST_HEAD(&fdev->common.channels);
+ /* The DMA address bits supported for this device. */
+ fdev->addr_bits = (long)device_get_match_data(fdev->dev);
/* ioremap the registers for use */
fdev->regs = of_iomap(op->dev.of_node, 0);
@@ -1254,7 +1256,7 @@ static int fsldma_of_probe(struct platform_device *op)
fdev->common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
fdev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
- dma_set_mask(&(op->dev), DMA_BIT_MASK(36));
+ dma_set_mask(&(op->dev), DMA_BIT_MASK(fdev->addr_bits));
platform_set_drvdata(op, fdev);
@@ -1387,10 +1389,20 @@ static const struct dev_pm_ops fsldma_pm_ops = {
};
#endif
+/* The .data field is used for dma-bit-mask. */
static const struct of_device_id fsldma_of_ids[] = {
- { .compatible = "fsl,elo3-dma", },
- { .compatible = "fsl,eloplus-dma", },
- { .compatible = "fsl,elo-dma", },
+ {
+ .compatible = "fsl,elo3-dma",
+ .data = (void *)40,
+ },
+ {
+ .compatible = "fsl,eloplus-dma",
+ .data = (void *)36,
+ },
+ {
+ .compatible = "fsl,elo-dma",
+ .data = (void *)32,
+ },
{}
};
MODULE_DEVICE_TABLE(of, fsldma_of_ids);
@@ -1404,7 +1416,7 @@ static struct platform_driver fsldma_of_driver = {
#endif
},
.probe = fsldma_of_probe,
- .remove_new = fsldma_of_remove,
+ .remove = fsldma_of_remove,
};
/*----------------------------------------------------------------------------*/
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h
index 308bed0a560a..d7b7a3138b85 100644
--- a/drivers/dma/fsldma.h
+++ b/drivers/dma/fsldma.h
@@ -124,6 +124,7 @@ struct fsldma_device {
struct fsldma_chan *chan[FSL_DMA_MAX_CHANS_PER_DEVICE];
u32 feature; /* The same as DMA channels */
int irq; /* Channel IRQ */
+ int addr_bits; /* DMA addressing bits supported */
};
/* Define macros for fsldma_chan->feature property */
diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c
index 3c648308a54a..d147353d47ab 100644
--- a/drivers/dma/idma64.c
+++ b/drivers/dma/idma64.c
@@ -693,7 +693,7 @@ static const struct dev_pm_ops idma64_dev_pm_ops = {
static struct platform_driver idma64_platform_driver = {
.probe = idma64_platform_probe,
- .remove_new = idma64_platform_remove,
+ .remove = idma64_platform_remove,
.driver = {
.name = LPSS_IDMA64_DRIVER_NAME,
.pm = &idma64_dev_pm_ops,
diff --git a/drivers/dma/idxd/Makefile b/drivers/dma/idxd/Makefile
index 2b4a0d406e1e..9ff9d7b87b64 100644
--- a/drivers/dma/idxd/Makefile
+++ b/drivers/dma/idxd/Makefile
@@ -1,4 +1,4 @@
-ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=IDXD
+ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE='"IDXD"'
obj-$(CONFIG_INTEL_IDXD_BUS) += idxd_bus.o
idxd_bus-y := bus.o
diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
index 57f1bf2ab20b..7e4715f92773 100644
--- a/drivers/dma/idxd/cdev.c
+++ b/drivers/dma/idxd/cdev.c
@@ -28,7 +28,6 @@ struct idxd_cdev_context {
* global to avoid conflict file names.
*/
static DEFINE_IDA(file_ida);
-static DEFINE_MUTEX(ida_lock);
/*
* ictx is an array based off of accelerator types. enum idxd_type
@@ -123,9 +122,7 @@ static void idxd_file_dev_release(struct device *dev)
struct idxd_device *idxd = wq->idxd;
int rc;
- mutex_lock(&ida_lock);
ida_free(&file_ida, ctx->id);
- mutex_unlock(&ida_lock);
/* Wait for in-flight operations to complete. */
if (wq_shared(wq)) {
@@ -225,7 +222,7 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp)
struct idxd_wq *wq;
struct device *dev, *fdev;
int rc = 0;
- struct iommu_sva *sva;
+ struct iommu_sva *sva = NULL;
unsigned int pasid;
struct idxd_cdev *idxd_cdev;
@@ -284,9 +281,7 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp)
}
idxd_cdev = wq->idxd_cdev;
- mutex_lock(&ida_lock);
ctx->id = ida_alloc(&file_ida, GFP_KERNEL);
- mutex_unlock(&ida_lock);
if (ctx->id < 0) {
dev_warn(dev, "ida alloc failure\n");
goto failed_ida;
@@ -322,7 +317,7 @@ failed_set_pasid:
if (device_user_pasid_enabled(idxd))
idxd_xa_pasid_remove(ctx);
failed_get_pasid:
- if (device_user_pasid_enabled(idxd))
+ if (device_user_pasid_enabled(idxd) && !IS_ERR_OR_NULL(sva))
iommu_sva_unbind_device(sva);
failed:
mutex_unlock(&wq->wq_lock);
@@ -354,7 +349,9 @@ static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid)
set_bit(h, evl->bmap);
h = (h + 1) % size;
}
- drain_workqueue(wq->wq);
+ if (wq->wq)
+ drain_workqueue(wq->wq);
+
mutex_unlock(&evl->lock);
}
@@ -412,6 +409,9 @@ static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
if (!idxd->user_submission_safe && !capable(CAP_SYS_RAWIO))
return -EPERM;
+ if (current->mm != ctx->mm)
+ return -EPERM;
+
rc = check_vma(wq, vma, __func__);
if (rc < 0)
return rc;
@@ -444,10 +444,12 @@ static int idxd_submit_user_descriptor(struct idxd_user_context *ctx,
* DSA devices are capable of indirect ("batch") command submission.
* On devices where direct user submissions are not safe, we cannot
* allow this since there is no good way for us to verify these
- * indirect commands.
+ * indirect commands. Narrow the restriction of operations with the
+ * BATCH opcode to only DSA version 1 devices.
*/
if (is_dsa_dev(idxd_dev) && descriptor.opcode == DSA_OPCODE_BATCH &&
- !wq->idxd->user_submission_safe)
+ wq->idxd->hw.version == DEVICE_VERSION_1 &&
+ !wq->idxd->user_submission_safe)
return -EINVAL;
/*
* As per the programming specification, the completion address must be
@@ -478,6 +480,9 @@ static ssize_t idxd_cdev_write(struct file *filp, const char __user *buf, size_t
ssize_t written = 0;
int i;
+ if (current->mm != ctx->mm)
+ return -EPERM;
+
for (i = 0; i < len/sizeof(struct dsa_hw_desc); i++) {
int rc = idxd_submit_user_descriptor(ctx, udesc + i);
@@ -498,6 +503,9 @@ static __poll_t idxd_cdev_poll(struct file *filp,
struct idxd_device *idxd = wq->idxd;
__poll_t out = 0;
+ if (current->mm != ctx->mm)
+ return POLLNVAL;
+
poll_wait(filp, &wq->err_queue, wait);
spin_lock(&idxd->dev_lock);
if (idxd->sw_err.valid)
diff --git a/drivers/dma/idxd/compat.c b/drivers/dma/idxd/compat.c
index a4adb0c17995..eff9943f1a42 100644
--- a/drivers/dma/idxd/compat.c
+++ b/drivers/dma/idxd/compat.c
@@ -103,4 +103,4 @@ struct idxd_device_driver dsa_drv = {
};
module_idxd_driver(dsa_drv);
-MODULE_IMPORT_NS(IDXD);
+MODULE_IMPORT_NS("IDXD");
diff --git a/drivers/dma/idxd/defaults.c b/drivers/dma/idxd/defaults.c
index c607ae8dd12c..2bbbcd02a0da 100644
--- a/drivers/dma/idxd/defaults.c
+++ b/drivers/dma/idxd/defaults.c
@@ -36,12 +36,10 @@ int idxd_load_iaa_device_defaults(struct idxd_device *idxd)
group->num_wqs++;
/* set name to "iaa_crypto" */
- memset(wq->name, 0, WQ_NAME_SIZE + 1);
- strscpy(wq->name, "iaa_crypto", WQ_NAME_SIZE + 1);
+ strscpy_pad(wq->name, "iaa_crypto");
/* set driver_name to "crypto" */
- memset(wq->driver_name, 0, DRIVER_NAME_SIZE + 1);
- strscpy(wq->driver_name, "crypto", DRIVER_NAME_SIZE + 1);
+ strscpy_pad(wq->driver_name, "crypto");
engine = idxd->engines[0];
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
index c41ef195eeb9..5cf419fe6b46 100644
--- a/drivers/dma/idxd/device.c
+++ b/drivers/dma/idxd/device.c
@@ -161,7 +161,7 @@ int idxd_wq_alloc_resources(struct idxd_wq *wq)
free_hw_descs(wq);
return rc;
}
-EXPORT_SYMBOL_NS_GPL(idxd_wq_alloc_resources, IDXD);
+EXPORT_SYMBOL_NS_GPL(idxd_wq_alloc_resources, "IDXD");
void idxd_wq_free_resources(struct idxd_wq *wq)
{
@@ -175,7 +175,7 @@ void idxd_wq_free_resources(struct idxd_wq *wq)
dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr);
sbitmap_queue_free(&wq->sbq);
}
-EXPORT_SYMBOL_NS_GPL(idxd_wq_free_resources, IDXD);
+EXPORT_SYMBOL_NS_GPL(idxd_wq_free_resources, "IDXD");
int idxd_wq_enable(struct idxd_wq *wq)
{
@@ -407,7 +407,7 @@ int idxd_wq_init_percpu_ref(struct idxd_wq *wq)
reinit_completion(&wq->wq_resurrect);
return 0;
}
-EXPORT_SYMBOL_NS_GPL(idxd_wq_init_percpu_ref, IDXD);
+EXPORT_SYMBOL_NS_GPL(idxd_wq_init_percpu_ref, "IDXD");
void __idxd_wq_quiesce(struct idxd_wq *wq)
{
@@ -417,7 +417,7 @@ void __idxd_wq_quiesce(struct idxd_wq *wq)
complete_all(&wq->wq_resurrect);
wait_for_completion(&wq->wq_dead);
}
-EXPORT_SYMBOL_NS_GPL(__idxd_wq_quiesce, IDXD);
+EXPORT_SYMBOL_NS_GPL(__idxd_wq_quiesce, "IDXD");
void idxd_wq_quiesce(struct idxd_wq *wq)
{
@@ -425,7 +425,7 @@ void idxd_wq_quiesce(struct idxd_wq *wq)
__idxd_wq_quiesce(wq);
mutex_unlock(&wq->wq_lock);
}
-EXPORT_SYMBOL_NS_GPL(idxd_wq_quiesce, IDXD);
+EXPORT_SYMBOL_NS_GPL(idxd_wq_quiesce, "IDXD");
/* Device control bits */
static inline bool idxd_is_enabled(struct idxd_device *idxd)
@@ -1494,7 +1494,7 @@ err_map_portal:
err:
return rc;
}
-EXPORT_SYMBOL_NS_GPL(idxd_drv_enable_wq, IDXD);
+EXPORT_SYMBOL_NS_GPL(idxd_drv_enable_wq, "IDXD");
void idxd_drv_disable_wq(struct idxd_wq *wq)
{
@@ -1516,7 +1516,7 @@ void idxd_drv_disable_wq(struct idxd_wq *wq)
wq->type = IDXD_WQT_NONE;
wq->client_count = 0;
}
-EXPORT_SYMBOL_NS_GPL(idxd_drv_disable_wq, IDXD);
+EXPORT_SYMBOL_NS_GPL(idxd_drv_disable_wq, "IDXD");
int idxd_device_drv_probe(struct idxd_dev *idxd_dev)
{
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
index d84e21daa991..74e6695881e6 100644
--- a/drivers/dma/idxd/idxd.h
+++ b/drivers/dma/idxd/idxd.h
@@ -19,7 +19,6 @@
#define IDXD_DRIVER_VERSION "1.00"
-extern struct kmem_cache *idxd_desc_pool;
extern bool tc_override;
struct idxd_wq;
@@ -171,7 +170,6 @@ struct idxd_cdev {
#define DRIVER_NAME_SIZE 128
-#define IDXD_ALLOCATED_BATCH_SIZE 128U
#define WQ_NAME_SIZE 1024
#define WQ_TYPE_SIZE 10
@@ -374,6 +372,17 @@ struct idxd_device {
struct dentry *dbgfs_evl_file;
bool user_submission_safe;
+
+ struct idxd_saved_states *idxd_saved;
+};
+
+struct idxd_saved_states {
+ struct idxd_device saved_idxd;
+ struct idxd_evl saved_evl;
+ struct idxd_engine **saved_engines;
+ struct idxd_wq **saved_wqs;
+ struct idxd_group **saved_groups;
+ unsigned long *saved_wq_enable_map;
};
static inline unsigned int evl_ent_size(struct idxd_device *idxd)
@@ -725,8 +734,6 @@ static inline void idxd_desc_complete(struct idxd_desc *desc,
&desc->txd, &status);
}
-int idxd_register_bus_type(void);
-void idxd_unregister_bus_type(void);
int idxd_register_devices(struct idxd_device *idxd);
void idxd_unregister_devices(struct idxd_device *idxd);
void idxd_wqs_quiesce(struct idxd_device *idxd);
@@ -742,6 +749,8 @@ void idxd_unmask_error_interrupts(struct idxd_device *idxd);
/* device control */
int idxd_device_drv_probe(struct idxd_dev *idxd_dev);
+int idxd_pci_probe_alloc(struct idxd_device *idxd, struct pci_dev *pdev,
+ const struct pci_device_id *id);
void idxd_device_drv_remove(struct idxd_dev *idxd_dev);
int idxd_drv_enable_wq(struct idxd_wq *wq);
void idxd_drv_disable_wq(struct idxd_wq *wq);
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 234c1c658ec7..2acc34b3daff 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -25,7 +25,7 @@ MODULE_VERSION(IDXD_DRIVER_VERSION);
MODULE_DESCRIPTION("Intel Data Streaming Accelerator and In-Memory Analytics Accelerator common driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Intel Corporation");
-MODULE_IMPORT_NS(IDXD);
+MODULE_IMPORT_NS("IDXD");
static bool sva = true;
module_param(sva, bool, 0644);
@@ -78,6 +78,10 @@ static struct pci_device_id idxd_pci_tbl[] = {
{ PCI_DEVICE_DATA(INTEL, IAX_SPR0, &idxd_driver_data[IDXD_TYPE_IAX]) },
/* IAA on DMR platforms */
{ PCI_DEVICE_DATA(INTEL, IAA_DMR, &idxd_driver_data[IDXD_TYPE_IAX]) },
+ /* IAA PTL platforms */
+ { PCI_DEVICE_DATA(INTEL, IAA_PTL, &idxd_driver_data[IDXD_TYPE_IAX]) },
+ /* IAA WCL platforms */
+ { PCI_DEVICE_DATA(INTEL, IAA_WCL, &idxd_driver_data[IDXD_TYPE_IAX]) },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, idxd_pci_tbl);
@@ -153,6 +157,25 @@ static void idxd_cleanup_interrupts(struct idxd_device *idxd)
pci_free_irq_vectors(pdev);
}
+static void idxd_clean_wqs(struct idxd_device *idxd)
+{
+ struct idxd_wq *wq;
+ struct device *conf_dev;
+ int i;
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ wq = idxd->wqs[i];
+ if (idxd->hw.wq_cap.op_config)
+ bitmap_free(wq->opcap_bmap);
+ kfree(wq->wqcfg);
+ conf_dev = wq_confdev(wq);
+ put_device(conf_dev);
+ kfree(wq);
+ }
+ bitmap_free(idxd->wq_enable_map);
+ kfree(idxd->wqs);
+}
+
static int idxd_setup_wqs(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
@@ -167,29 +190,30 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
idxd->wq_enable_map = bitmap_zalloc_node(idxd->max_wqs, GFP_KERNEL, dev_to_node(dev));
if (!idxd->wq_enable_map) {
- kfree(idxd->wqs);
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto err_free_wqs;
}
for (i = 0; i < idxd->max_wqs; i++) {
wq = kzalloc_node(sizeof(*wq), GFP_KERNEL, dev_to_node(dev));
if (!wq) {
rc = -ENOMEM;
- goto err;
+ goto err_unwind;
}
idxd_dev_set_type(&wq->idxd_dev, IDXD_DEV_WQ);
conf_dev = wq_confdev(wq);
wq->id = i;
wq->idxd = idxd;
- device_initialize(wq_confdev(wq));
+ device_initialize(conf_dev);
conf_dev->parent = idxd_confdev(idxd);
conf_dev->bus = &dsa_bus_type;
conf_dev->type = &idxd_wq_device_type;
rc = dev_set_name(conf_dev, "wq%d.%d", idxd->id, wq->id);
if (rc < 0) {
put_device(conf_dev);
- goto err;
+ kfree(wq);
+ goto err_unwind;
}
mutex_init(&wq->wq_lock);
@@ -202,16 +226,19 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev));
if (!wq->wqcfg) {
put_device(conf_dev);
+ kfree(wq);
rc = -ENOMEM;
- goto err;
+ goto err_unwind;
}
if (idxd->hw.wq_cap.op_config) {
wq->opcap_bmap = bitmap_zalloc(IDXD_MAX_OPCAP_BITS, GFP_KERNEL);
if (!wq->opcap_bmap) {
+ kfree(wq->wqcfg);
put_device(conf_dev);
+ kfree(wq);
rc = -ENOMEM;
- goto err;
+ goto err_unwind;
}
bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS);
}
@@ -222,15 +249,39 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
return 0;
- err:
+err_unwind:
while (--i >= 0) {
wq = idxd->wqs[i];
+ if (idxd->hw.wq_cap.op_config)
+ bitmap_free(wq->opcap_bmap);
+ kfree(wq->wqcfg);
conf_dev = wq_confdev(wq);
put_device(conf_dev);
+ kfree(wq);
}
+ bitmap_free(idxd->wq_enable_map);
+
+err_free_wqs:
+ kfree(idxd->wqs);
+
return rc;
}
+static void idxd_clean_engines(struct idxd_device *idxd)
+{
+ struct idxd_engine *engine;
+ struct device *conf_dev;
+ int i;
+
+ for (i = 0; i < idxd->max_engines; i++) {
+ engine = idxd->engines[i];
+ conf_dev = engine_confdev(engine);
+ put_device(conf_dev);
+ kfree(engine);
+ }
+ kfree(idxd->engines);
+}
+
static int idxd_setup_engines(struct idxd_device *idxd)
{
struct idxd_engine *engine;
@@ -261,6 +312,7 @@ static int idxd_setup_engines(struct idxd_device *idxd)
rc = dev_set_name(conf_dev, "engine%d.%d", idxd->id, engine->id);
if (rc < 0) {
put_device(conf_dev);
+ kfree(engine);
goto err;
}
@@ -274,10 +326,26 @@ static int idxd_setup_engines(struct idxd_device *idxd)
engine = idxd->engines[i];
conf_dev = engine_confdev(engine);
put_device(conf_dev);
+ kfree(engine);
}
+ kfree(idxd->engines);
+
return rc;
}
+static void idxd_clean_groups(struct idxd_device *idxd)
+{
+ struct idxd_group *group;
+ int i;
+
+ for (i = 0; i < idxd->max_groups; i++) {
+ group = idxd->groups[i];
+ put_device(group_confdev(group));
+ kfree(group);
+ }
+ kfree(idxd->groups);
+}
+
static int idxd_setup_groups(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
@@ -308,6 +376,7 @@ static int idxd_setup_groups(struct idxd_device *idxd)
rc = dev_set_name(conf_dev, "group%d.%d", idxd->id, group->id);
if (rc < 0) {
put_device(conf_dev);
+ kfree(group);
goto err;
}
@@ -332,20 +401,18 @@ static int idxd_setup_groups(struct idxd_device *idxd)
while (--i >= 0) {
group = idxd->groups[i];
put_device(group_confdev(group));
+ kfree(group);
}
+ kfree(idxd->groups);
+
return rc;
}
static void idxd_cleanup_internals(struct idxd_device *idxd)
{
- int i;
-
- for (i = 0; i < idxd->max_groups; i++)
- put_device(group_confdev(idxd->groups[i]));
- for (i = 0; i < idxd->max_engines; i++)
- put_device(engine_confdev(idxd->engines[i]));
- for (i = 0; i < idxd->max_wqs; i++)
- put_device(wq_confdev(idxd->wqs[i]));
+ idxd_clean_groups(idxd);
+ idxd_clean_engines(idxd);
+ idxd_clean_wqs(idxd);
destroy_workqueue(idxd->wq);
}
@@ -388,7 +455,7 @@ static int idxd_init_evl(struct idxd_device *idxd)
static int idxd_setup_internals(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
- int rc, i;
+ int rc;
init_waitqueue_head(&idxd->cmd_waitq);
@@ -419,14 +486,11 @@ static int idxd_setup_internals(struct idxd_device *idxd)
err_evl:
destroy_workqueue(idxd->wq);
err_wkq_create:
- for (i = 0; i < idxd->max_groups; i++)
- put_device(group_confdev(idxd->groups[i]));
+ idxd_clean_groups(idxd);
err_group:
- for (i = 0; i < idxd->max_engines; i++)
- put_device(engine_confdev(idxd->engines[i]));
+ idxd_clean_engines(idxd);
err_engine:
- for (i = 0; i < idxd->max_wqs; i++)
- put_device(wq_confdev(idxd->wqs[i]));
+ idxd_clean_wqs(idxd);
err_wqs:
return rc;
}
@@ -526,6 +590,17 @@ static void idxd_read_caps(struct idxd_device *idxd)
idxd->hw.iaa_cap.bits = ioread64(idxd->reg_base + IDXD_IAACAP_OFFSET);
}
+static void idxd_free(struct idxd_device *idxd)
+{
+ if (!idxd)
+ return;
+
+ put_device(idxd_confdev(idxd));
+ bitmap_free(idxd->opcap_bmap);
+ ida_free(&idxd_ida, idxd->id);
+ kfree(idxd);
+}
+
static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_data *data)
{
struct device *dev = &pdev->dev;
@@ -543,28 +618,34 @@ static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_d
idxd_dev_set_type(&idxd->idxd_dev, idxd->data->type);
idxd->id = ida_alloc(&idxd_ida, GFP_KERNEL);
if (idxd->id < 0)
- return NULL;
+ goto err_ida;
idxd->opcap_bmap = bitmap_zalloc_node(IDXD_MAX_OPCAP_BITS, GFP_KERNEL, dev_to_node(dev));
- if (!idxd->opcap_bmap) {
- ida_free(&idxd_ida, idxd->id);
- return NULL;
- }
+ if (!idxd->opcap_bmap)
+ goto err_opcap;
device_initialize(conf_dev);
conf_dev->parent = dev;
conf_dev->bus = &dsa_bus_type;
conf_dev->type = idxd->data->dev_type;
rc = dev_set_name(conf_dev, "%s%d", idxd->data->name_prefix, idxd->id);
- if (rc < 0) {
- put_device(conf_dev);
- return NULL;
- }
+ if (rc < 0)
+ goto err_name;
spin_lock_init(&idxd->dev_lock);
spin_lock_init(&idxd->cmd_lock);
return idxd;
+
+err_name:
+ put_device(conf_dev);
+ bitmap_free(idxd->opcap_bmap);
+err_opcap:
+ ida_free(&idxd_ida, idxd->id);
+err_ida:
+ kfree(idxd);
+
+ return NULL;
}
static int idxd_enable_system_pasid(struct idxd_device *idxd)
@@ -624,27 +705,6 @@ static void idxd_disable_system_pasid(struct idxd_device *idxd)
idxd->pasid = IOMMU_PASID_INVALID;
}
-static int idxd_enable_sva(struct pci_dev *pdev)
-{
- int ret;
-
- ret = iommu_dev_enable_feature(&pdev->dev, IOMMU_DEV_FEAT_IOPF);
- if (ret)
- return ret;
-
- ret = iommu_dev_enable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
- if (ret)
- iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_IOPF);
-
- return ret;
-}
-
-static void idxd_disable_sva(struct pci_dev *pdev)
-{
- iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
- iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_IOPF);
-}
-
static int idxd_probe(struct idxd_device *idxd)
{
struct pci_dev *pdev = idxd->pdev;
@@ -659,17 +719,13 @@ static int idxd_probe(struct idxd_device *idxd)
dev_dbg(dev, "IDXD reset complete\n");
if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM) && sva) {
- if (idxd_enable_sva(pdev)) {
- dev_warn(dev, "Unable to turn on user SVA feature.\n");
- } else {
- set_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags);
+ set_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags);
- rc = idxd_enable_system_pasid(idxd);
- if (rc)
- dev_warn(dev, "No in-kernel DMA with PASID. %d\n", rc);
- else
- set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
- }
+ rc = idxd_enable_system_pasid(idxd);
+ if (rc)
+ dev_warn(dev, "No in-kernel DMA with PASID. %d\n", rc);
+ else
+ set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
} else if (!sva) {
dev_warn(dev, "User forced SVA off via module param.\n");
}
@@ -707,8 +763,6 @@ static int idxd_probe(struct idxd_device *idxd)
err:
if (device_pasid_enabled(idxd))
idxd_disable_system_pasid(idxd);
- if (device_user_pasid_enabled(idxd))
- idxd_disable_sva(pdev);
return rc;
}
@@ -719,71 +773,464 @@ static void idxd_cleanup(struct idxd_device *idxd)
idxd_cleanup_internals(idxd);
if (device_pasid_enabled(idxd))
idxd_disable_system_pasid(idxd);
- if (device_user_pasid_enabled(idxd))
- idxd_disable_sva(idxd->pdev);
}
-static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+/*
+ * Attach IDXD device to IDXD driver.
+ */
+static int idxd_bind(struct device_driver *drv, const char *buf)
{
- struct device *dev = &pdev->dev;
- struct idxd_device *idxd;
- struct idxd_driver_data *data = (struct idxd_driver_data *)id->driver_data;
+ const struct bus_type *bus = drv->bus;
+ struct device *dev;
+ int err = -ENODEV;
+
+ dev = bus_find_device_by_name(bus, NULL, buf);
+ if (dev)
+ err = device_driver_attach(drv, dev);
+
+ put_device(dev);
+
+ return err;
+}
+
+/*
+ * Detach IDXD device from driver.
+ */
+static void idxd_unbind(struct device_driver *drv, const char *buf)
+{
+ const struct bus_type *bus = drv->bus;
+ struct device *dev;
+
+ dev = bus_find_device_by_name(bus, NULL, buf);
+ if (dev && dev->driver == drv)
+ device_release_driver(dev);
+
+ put_device(dev);
+}
+
+#define idxd_free_saved_configs(saved_configs, count) \
+ do { \
+ int i; \
+ \
+ for (i = 0; i < (count); i++) \
+ kfree(saved_configs[i]); \
+ } while (0)
+
+static void idxd_free_saved(struct idxd_group **saved_groups,
+ struct idxd_engine **saved_engines,
+ struct idxd_wq **saved_wqs,
+ struct idxd_device *idxd)
+{
+ if (saved_groups)
+ idxd_free_saved_configs(saved_groups, idxd->max_groups);
+ if (saved_engines)
+ idxd_free_saved_configs(saved_engines, idxd->max_engines);
+ if (saved_wqs)
+ idxd_free_saved_configs(saved_wqs, idxd->max_wqs);
+}
+
+/*
+ * Save IDXD device configurations including engines, groups, wqs etc.
+ * The saved configurations can be restored when needed.
+ */
+static int idxd_device_config_save(struct idxd_device *idxd,
+ struct idxd_saved_states *idxd_saved)
+{
+ struct device *dev = &idxd->pdev->dev;
+ int i;
+
+ memcpy(&idxd_saved->saved_idxd, idxd, sizeof(*idxd));
+
+ if (idxd->evl) {
+ memcpy(&idxd_saved->saved_evl, idxd->evl,
+ sizeof(struct idxd_evl));
+ }
+
+ struct idxd_group **saved_groups __free(kfree) =
+ kcalloc_node(idxd->max_groups,
+ sizeof(struct idxd_group *),
+ GFP_KERNEL, dev_to_node(dev));
+ if (!saved_groups)
+ return -ENOMEM;
+
+ for (i = 0; i < idxd->max_groups; i++) {
+ struct idxd_group *saved_group __free(kfree) =
+ kzalloc_node(sizeof(*saved_group), GFP_KERNEL,
+ dev_to_node(dev));
+
+ if (!saved_group) {
+ /* Free saved groups */
+ idxd_free_saved(saved_groups, NULL, NULL, idxd);
+
+ return -ENOMEM;
+ }
+
+ memcpy(saved_group, idxd->groups[i], sizeof(*saved_group));
+ saved_groups[i] = no_free_ptr(saved_group);
+ }
+
+ struct idxd_engine **saved_engines =
+ kcalloc_node(idxd->max_engines,
+ sizeof(struct idxd_engine *),
+ GFP_KERNEL, dev_to_node(dev));
+ if (!saved_engines) {
+ /* Free saved groups */
+ idxd_free_saved(saved_groups, NULL, NULL, idxd);
+
+ return -ENOMEM;
+ }
+ for (i = 0; i < idxd->max_engines; i++) {
+ struct idxd_engine *saved_engine __free(kfree) =
+ kzalloc_node(sizeof(*saved_engine), GFP_KERNEL,
+ dev_to_node(dev));
+ if (!saved_engine) {
+ /* Free saved groups and engines */
+ idxd_free_saved(saved_groups, saved_engines, NULL,
+ idxd);
+
+ return -ENOMEM;
+ }
+
+ memcpy(saved_engine, idxd->engines[i], sizeof(*saved_engine));
+ saved_engines[i] = no_free_ptr(saved_engine);
+ }
+
+ unsigned long *saved_wq_enable_map __free(bitmap) =
+ bitmap_zalloc_node(idxd->max_wqs, GFP_KERNEL,
+ dev_to_node(dev));
+ if (!saved_wq_enable_map) {
+ /* Free saved groups and engines */
+ idxd_free_saved(saved_groups, saved_engines, NULL, idxd);
+
+ return -ENOMEM;
+ }
+
+ bitmap_copy(saved_wq_enable_map, idxd->wq_enable_map, idxd->max_wqs);
+
+ struct idxd_wq **saved_wqs __free(kfree) =
+ kcalloc_node(idxd->max_wqs, sizeof(struct idxd_wq *),
+ GFP_KERNEL, dev_to_node(dev));
+ if (!saved_wqs) {
+ /* Free saved groups and engines */
+ idxd_free_saved(saved_groups, saved_engines, NULL, idxd);
+
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *saved_wq __free(kfree) =
+ kzalloc_node(sizeof(*saved_wq), GFP_KERNEL,
+ dev_to_node(dev));
+ struct idxd_wq *wq;
+
+ if (!saved_wq) {
+ /* Free saved groups, engines, and wqs */
+ idxd_free_saved(saved_groups, saved_engines, saved_wqs,
+ idxd);
+
+ return -ENOMEM;
+ }
+
+ if (!test_bit(i, saved_wq_enable_map))
+ continue;
+
+ wq = idxd->wqs[i];
+ mutex_lock(&wq->wq_lock);
+ memcpy(saved_wq, wq, sizeof(*saved_wq));
+ saved_wqs[i] = no_free_ptr(saved_wq);
+ mutex_unlock(&wq->wq_lock);
+ }
+
+ /* Save configurations */
+ idxd_saved->saved_groups = no_free_ptr(saved_groups);
+ idxd_saved->saved_engines = no_free_ptr(saved_engines);
+ idxd_saved->saved_wq_enable_map = no_free_ptr(saved_wq_enable_map);
+ idxd_saved->saved_wqs = no_free_ptr(saved_wqs);
+
+ return 0;
+}
+
+/*
+ * Restore IDXD device configurations including engines, groups, wqs etc
+ * that were saved before.
+ */
+static void idxd_device_config_restore(struct idxd_device *idxd,
+ struct idxd_saved_states *idxd_saved)
+{
+ struct idxd_evl *saved_evl = &idxd_saved->saved_evl;
+ int i;
+
+ idxd->rdbuf_limit = idxd_saved->saved_idxd.rdbuf_limit;
+
+ idxd->evl->size = saved_evl->size;
+
+ for (i = 0; i < idxd->max_groups; i++) {
+ struct idxd_group *saved_group, *group;
+
+ saved_group = idxd_saved->saved_groups[i];
+ group = idxd->groups[i];
+
+ group->rdbufs_allowed = saved_group->rdbufs_allowed;
+ group->rdbufs_reserved = saved_group->rdbufs_reserved;
+ group->tc_a = saved_group->tc_a;
+ group->tc_b = saved_group->tc_b;
+ group->use_rdbuf_limit = saved_group->use_rdbuf_limit;
+
+ kfree(saved_group);
+ }
+ kfree(idxd_saved->saved_groups);
+
+ for (i = 0; i < idxd->max_engines; i++) {
+ struct idxd_engine *saved_engine, *engine;
+
+ saved_engine = idxd_saved->saved_engines[i];
+ engine = idxd->engines[i];
+
+ engine->group = saved_engine->group;
+
+ kfree(saved_engine);
+ }
+ kfree(idxd_saved->saved_engines);
+
+ bitmap_copy(idxd->wq_enable_map, idxd_saved->saved_wq_enable_map,
+ idxd->max_wqs);
+ bitmap_free(idxd_saved->saved_wq_enable_map);
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *saved_wq, *wq;
+ size_t len;
+
+ if (!test_bit(i, idxd->wq_enable_map))
+ continue;
+
+ saved_wq = idxd_saved->saved_wqs[i];
+ wq = idxd->wqs[i];
+
+ mutex_lock(&wq->wq_lock);
+
+ wq->group = saved_wq->group;
+ wq->flags = saved_wq->flags;
+ wq->threshold = saved_wq->threshold;
+ wq->size = saved_wq->size;
+ wq->priority = saved_wq->priority;
+ wq->type = saved_wq->type;
+ len = strlen(saved_wq->name) + 1;
+ strscpy(wq->name, saved_wq->name, len);
+ wq->max_xfer_bytes = saved_wq->max_xfer_bytes;
+ wq->max_batch_size = saved_wq->max_batch_size;
+ wq->enqcmds_retries = saved_wq->enqcmds_retries;
+ wq->descs = saved_wq->descs;
+ wq->idxd_chan = saved_wq->idxd_chan;
+ len = strlen(saved_wq->driver_name) + 1;
+ strscpy(wq->driver_name, saved_wq->driver_name, len);
+
+ mutex_unlock(&wq->wq_lock);
+
+ kfree(saved_wq);
+ }
+
+ kfree(idxd_saved->saved_wqs);
+}
+
+static void idxd_reset_prepare(struct pci_dev *pdev)
+{
+ struct idxd_device *idxd = pci_get_drvdata(pdev);
+ struct device *dev = &idxd->pdev->dev;
+ const char *idxd_name;
int rc;
- rc = pci_enable_device(pdev);
- if (rc)
- return rc;
+ idxd_name = dev_name(idxd_confdev(idxd));
- dev_dbg(dev, "Alloc IDXD context\n");
- idxd = idxd_alloc(pdev, data);
- if (!idxd) {
- rc = -ENOMEM;
- goto err_idxd_alloc;
+ struct idxd_saved_states *idxd_saved __free(kfree) =
+ kzalloc_node(sizeof(*idxd_saved), GFP_KERNEL,
+ dev_to_node(&pdev->dev));
+ if (!idxd_saved) {
+ dev_err(dev, "HALT: no memory\n");
+
+ return;
}
- dev_dbg(dev, "Mapping BARs\n");
- idxd->reg_base = pci_iomap(pdev, IDXD_MMIO_BAR, 0);
- if (!idxd->reg_base) {
- rc = -ENOMEM;
- goto err_iomap;
+ /* Save IDXD configurations. */
+ rc = idxd_device_config_save(idxd, idxd_saved);
+ if (rc < 0) {
+ dev_err(dev, "HALT: cannot save %s configs\n", idxd_name);
+
+ return;
+ }
+
+ idxd->idxd_saved = no_free_ptr(idxd_saved);
+
+ /* Save PCI device state. */
+ pci_save_state(idxd->pdev);
+}
+
+static void idxd_reset_done(struct pci_dev *pdev)
+{
+ struct idxd_device *idxd = pci_get_drvdata(pdev);
+ const char *idxd_name;
+ struct device *dev;
+ int rc, i;
+
+ if (!idxd->idxd_saved)
+ return;
+
+ dev = &idxd->pdev->dev;
+ idxd_name = dev_name(idxd_confdev(idxd));
+
+ /* Restore PCI device state. */
+ pci_restore_state(idxd->pdev);
+
+ /* Unbind idxd device from driver. */
+ idxd_unbind(&idxd_drv.drv, idxd_name);
+
+ /*
+ * Probe PCI device without allocating or changing
+ * idxd software data which keeps the same as before FLR.
+ */
+ idxd_pci_probe_alloc(idxd, NULL, NULL);
+
+ /* Restore IDXD configurations. */
+ idxd_device_config_restore(idxd, idxd->idxd_saved);
+
+ /* Re-configure IDXD device if allowed. */
+ if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
+ rc = idxd_device_config(idxd);
+ if (rc < 0) {
+ dev_err(dev, "HALT: %s config fails\n", idxd_name);
+ goto out;
+ }
+ }
+
+ /* Bind IDXD device to driver. */
+ rc = idxd_bind(&idxd_drv.drv, idxd_name);
+ if (rc < 0) {
+ dev_err(dev, "HALT: binding %s to driver fails\n", idxd_name);
+ goto out;
+ }
+
+ /* Bind enabled wq in the IDXD device to driver. */
+ for (i = 0; i < idxd->max_wqs; i++) {
+ if (test_bit(i, idxd->wq_enable_map)) {
+ struct idxd_wq *wq = idxd->wqs[i];
+ char wq_name[32];
+
+ wq->state = IDXD_WQ_DISABLED;
+ sprintf(wq_name, "wq%d.%d", idxd->id, wq->id);
+ /*
+ * Bind to user driver depending on wq type.
+ *
+ * Currently only support user type WQ. Will support
+ * kernel type WQ in the future.
+ */
+ if (wq->type == IDXD_WQT_USER)
+ rc = idxd_bind(&idxd_user_drv.drv, wq_name);
+ else
+ rc = -EINVAL;
+ if (rc < 0) {
+ clear_bit(i, idxd->wq_enable_map);
+ dev_err(dev,
+ "HALT: unable to re-enable wq %s\n",
+ dev_name(wq_confdev(wq)));
+ }
+ }
}
+out:
+ kfree(idxd->idxd_saved);
+}
+
+static const struct pci_error_handlers idxd_error_handler = {
+ .reset_prepare = idxd_reset_prepare,
+ .reset_done = idxd_reset_done,
+};
+
+/*
+ * Probe idxd PCI device.
+ * If idxd is not given, need to allocate idxd and set up its data.
+ *
+ * If idxd is given, idxd was allocated and setup already. Just need to
+ * configure device without re-allocating and re-configuring idxd data.
+ * This is useful for recovering from FLR.
+ */
+int idxd_pci_probe_alloc(struct idxd_device *idxd, struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ bool alloc_idxd = idxd ? false : true;
+ struct idxd_driver_data *data;
+ struct device *dev;
+ int rc;
- dev_dbg(dev, "Set DMA masks\n");
- rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ pdev = idxd ? idxd->pdev : pdev;
+ dev = &pdev->dev;
+ data = id ? (struct idxd_driver_data *)id->driver_data : NULL;
+ rc = pci_enable_device(pdev);
if (rc)
- goto err;
+ return rc;
+
+ if (alloc_idxd) {
+ dev_dbg(dev, "Alloc IDXD context\n");
+ idxd = idxd_alloc(pdev, data);
+ if (!idxd) {
+ rc = -ENOMEM;
+ goto err_idxd_alloc;
+ }
+
+ dev_dbg(dev, "Mapping BARs\n");
+ idxd->reg_base = pci_iomap(pdev, IDXD_MMIO_BAR, 0);
+ if (!idxd->reg_base) {
+ rc = -ENOMEM;
+ goto err_iomap;
+ }
+
+ dev_dbg(dev, "Set DMA masks\n");
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (rc)
+ goto err;
+ }
dev_dbg(dev, "Set PCI master\n");
pci_set_master(pdev);
pci_set_drvdata(pdev, idxd);
- idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET);
- rc = idxd_probe(idxd);
- if (rc) {
- dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n");
- goto err;
- }
+ if (alloc_idxd) {
+ idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET);
+ rc = idxd_probe(idxd);
+ if (rc) {
+ dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n");
+ goto err;
+ }
- if (data->load_device_defaults) {
- rc = data->load_device_defaults(idxd);
+ if (data->load_device_defaults) {
+ rc = data->load_device_defaults(idxd);
+ if (rc)
+ dev_warn(dev, "IDXD loading device defaults failed\n");
+ }
+
+ rc = idxd_register_devices(idxd);
+ if (rc) {
+ dev_err(dev, "IDXD sysfs setup failed\n");
+ goto err_dev_register;
+ }
+
+ rc = idxd_device_init_debugfs(idxd);
if (rc)
- dev_warn(dev, "IDXD loading device defaults failed\n");
+ dev_warn(dev, "IDXD debugfs failed to setup\n");
}
- rc = idxd_register_devices(idxd);
- if (rc) {
- dev_err(dev, "IDXD sysfs setup failed\n");
- goto err_dev_register;
- }
+ if (!alloc_idxd) {
+ /* Release interrupts in the IDXD device. */
+ idxd_cleanup_interrupts(idxd);
- rc = idxd_device_init_debugfs(idxd);
- if (rc)
- dev_warn(dev, "IDXD debugfs failed to setup\n");
+ /* Re-enable interrupts in the IDXD device. */
+ rc = idxd_setup_interrupts(idxd);
+ if (rc)
+ dev_warn(dev, "IDXD interrupts failed to setup\n");
+ }
dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n",
idxd->hw.version);
- idxd->user_submission_safe = data->user_submission_safe;
+ if (data)
+ idxd->user_submission_safe = data->user_submission_safe;
return 0;
@@ -792,12 +1239,17 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err:
pci_iounmap(pdev, idxd->reg_base);
err_iomap:
- put_device(idxd_confdev(idxd));
+ idxd_free(idxd);
err_idxd_alloc:
pci_disable_device(pdev);
return rc;
}
+static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ return idxd_pci_probe_alloc(NULL, pdev, id);
+}
+
void idxd_wqs_quiesce(struct idxd_device *idxd)
{
struct idxd_wq *wq;
@@ -829,7 +1281,6 @@ static void idxd_shutdown(struct pci_dev *pdev)
static void idxd_remove(struct pci_dev *pdev)
{
struct idxd_device *idxd = pci_get_drvdata(pdev);
- struct idxd_irq_entry *irq_entry;
idxd_unregister_devices(idxd);
/*
@@ -842,20 +1293,14 @@ static void idxd_remove(struct pci_dev *pdev)
get_device(idxd_confdev(idxd));
device_unregister(idxd_confdev(idxd));
idxd_shutdown(pdev);
+ idxd_device_remove_debugfs(idxd);
+ perfmon_pmu_remove(idxd);
+ idxd_cleanup_interrupts(idxd);
if (device_pasid_enabled(idxd))
idxd_disable_system_pasid(idxd);
- idxd_device_remove_debugfs(idxd);
-
- irq_entry = idxd_get_ie(idxd, 0);
- free_irq(irq_entry->vector, irq_entry);
- pci_free_irq_vectors(pdev);
pci_iounmap(pdev, idxd->reg_base);
- if (device_user_pasid_enabled(idxd))
- idxd_disable_sva(pdev);
- pci_disable_device(pdev);
- destroy_workqueue(idxd->wq);
- perfmon_pmu_remove(idxd);
put_device(idxd_confdev(idxd));
+ pci_disable_device(pdev);
}
static struct pci_driver idxd_pci_driver = {
@@ -864,6 +1309,7 @@ static struct pci_driver idxd_pci_driver = {
.probe = idxd_pci_probe,
.remove = idxd_remove,
.shutdown = idxd_shutdown,
+ .err_handler = &idxd_error_handler,
};
static int __init idxd_init_module(void)
diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
index fc049c9c9892..1107db3ce0a3 100644
--- a/drivers/dma/idxd/irq.c
+++ b/drivers/dma/idxd/irq.c
@@ -383,15 +383,65 @@ static void process_evl_entries(struct idxd_device *idxd)
mutex_unlock(&evl->lock);
}
+static void idxd_device_flr(struct work_struct *work)
+{
+ struct idxd_device *idxd = container_of(work, struct idxd_device, work);
+ int rc;
+
+ /*
+ * IDXD device requires a Function Level Reset (FLR).
+ * pci_reset_function() will reset the device with FLR.
+ */
+ rc = pci_reset_function(idxd->pdev);
+ if (rc)
+ dev_err(&idxd->pdev->dev, "FLR failed\n");
+}
+
+static irqreturn_t idxd_halt(struct idxd_device *idxd)
+{
+ union gensts_reg gensts;
+
+ gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
+ if (gensts.state == IDXD_DEVICE_STATE_HALT) {
+ idxd->state = IDXD_DEV_HALTED;
+ if (gensts.reset_type == IDXD_DEVICE_RESET_SOFTWARE) {
+ /*
+ * If we need a software reset, we will throw the work
+ * on a system workqueue in order to allow interrupts
+ * for the device command completions.
+ */
+ INIT_WORK(&idxd->work, idxd_device_reinit);
+ queue_work(idxd->wq, &idxd->work);
+ } else if (gensts.reset_type == IDXD_DEVICE_RESET_FLR) {
+ idxd->state = IDXD_DEV_HALTED;
+ idxd_mask_error_interrupts(idxd);
+ dev_dbg(&idxd->pdev->dev,
+ "idxd halted, doing FLR. After FLR, configs are restored\n");
+ INIT_WORK(&idxd->work, idxd_device_flr);
+ queue_work(idxd->wq, &idxd->work);
+
+ } else {
+ idxd->state = IDXD_DEV_HALTED;
+ idxd_wqs_quiesce(idxd);
+ idxd_wqs_unmap_portal(idxd);
+ idxd_device_clear_state(idxd);
+ dev_err(&idxd->pdev->dev,
+ "idxd halted, need system reset");
+
+ return -ENXIO;
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
irqreturn_t idxd_misc_thread(int vec, void *data)
{
struct idxd_irq_entry *irq_entry = data;
struct idxd_device *idxd = ie_to_idxd(irq_entry);
struct device *dev = &idxd->pdev->dev;
- union gensts_reg gensts;
u32 val = 0;
int i;
- bool err = false;
u32 cause;
cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET);
@@ -401,7 +451,7 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
if (cause & IDXD_INTC_HALT_STATE)
- goto halt;
+ return idxd_halt(idxd);
if (cause & IDXD_INTC_ERR) {
spin_lock(&idxd->dev_lock);
@@ -435,7 +485,6 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
for (i = 0; i < 4; i++)
dev_warn_ratelimited(dev, "err[%d]: %#16.16llx\n",
i, idxd->sw_err.bits[i]);
- err = true;
}
if (cause & IDXD_INTC_INT_HANDLE_REVOKED) {
@@ -480,34 +529,6 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
dev_warn_once(dev, "Unexpected interrupt cause bits set: %#x\n",
val);
- if (!err)
- goto out;
-
-halt:
- gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
- if (gensts.state == IDXD_DEVICE_STATE_HALT) {
- idxd->state = IDXD_DEV_HALTED;
- if (gensts.reset_type == IDXD_DEVICE_RESET_SOFTWARE) {
- /*
- * If we need a software reset, we will throw the work
- * on a system workqueue in order to allow interrupts
- * for the device command completions.
- */
- INIT_WORK(&idxd->work, idxd_device_reinit);
- queue_work(idxd->wq, &idxd->work);
- } else {
- idxd->state = IDXD_DEV_HALTED;
- idxd_wqs_quiesce(idxd);
- idxd_wqs_unmap_portal(idxd);
- idxd_device_clear_state(idxd);
- dev_err(&idxd->pdev->dev,
- "idxd halted, need %s.\n",
- gensts.reset_type == IDXD_DEVICE_RESET_FLR ?
- "FLR" : "system reset");
- }
- }
-
-out:
return IRQ_HANDLED;
}
diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h
index e16dbf9ab324..8dc2e8bca779 100644
--- a/drivers/dma/idxd/registers.h
+++ b/drivers/dma/idxd/registers.h
@@ -3,9 +3,19 @@
#ifndef _IDXD_REGISTERS_H_
#define _IDXD_REGISTERS_H_
+#ifdef __KERNEL__
#include <uapi/linux/idxd.h>
+#else
+#include <linux/idxd.h>
+#endif
/* PCI Config */
+#define PCI_DEVICE_ID_INTEL_DSA_GNRD 0x11fb
+#define PCI_DEVICE_ID_INTEL_DSA_DMR 0x1212
+#define PCI_DEVICE_ID_INTEL_IAA_DMR 0x1216
+#define PCI_DEVICE_ID_INTEL_IAA_PTL 0xb02d
+#define PCI_DEVICE_ID_INTEL_IAA_WCL 0xfd2d
+
#define DEVICE_VERSION_1 0x100
#define DEVICE_VERSION_2 0x200
@@ -40,7 +50,7 @@ union gen_cap_reg {
u64 rsvd3:32;
};
u64 bits;
-} __packed;
+};
#define IDXD_GENCAP_OFFSET 0x10
union wq_cap_reg {
@@ -60,7 +70,7 @@ union wq_cap_reg {
u64 rsvd4:8;
};
u64 bits;
-} __packed;
+};
#define IDXD_WQCAP_OFFSET 0x20
#define IDXD_WQCFG_MIN 5
@@ -74,7 +84,7 @@ union group_cap_reg {
u64 rsvd:45;
};
u64 bits;
-} __packed;
+};
#define IDXD_GRPCAP_OFFSET 0x30
union engine_cap_reg {
@@ -83,7 +93,7 @@ union engine_cap_reg {
u64 rsvd:56;
};
u64 bits;
-} __packed;
+};
#define IDXD_ENGCAP_OFFSET 0x38
@@ -109,7 +119,7 @@ union offsets_reg {
u64 rsvd:48;
};
u64 bits[2];
-} __packed;
+};
#define IDXD_TABLE_MULT 0x100
@@ -123,7 +133,7 @@ union gencfg_reg {
u32 rsvd2:18;
};
u32 bits;
-} __packed;
+};
#define IDXD_GENCTRL_OFFSET 0x88
union genctrl_reg {
@@ -134,7 +144,7 @@ union genctrl_reg {
u32 rsvd:29;
};
u32 bits;
-} __packed;
+};
#define IDXD_GENSTATS_OFFSET 0x90
union gensts_reg {
@@ -144,7 +154,7 @@ union gensts_reg {
u32 rsvd:28;
};
u32 bits;
-} __packed;
+};
enum idxd_device_status_state {
IDXD_DEVICE_STATE_DISABLED = 0,
@@ -178,7 +188,7 @@ union idxd_command_reg {
u32 int_req:1;
};
u32 bits;
-} __packed;
+};
enum idxd_cmd {
IDXD_CMD_ENABLE_DEVICE = 1,
@@ -208,7 +218,7 @@ union cmdsts_reg {
u8 active:1;
};
u32 bits;
-} __packed;
+};
#define IDXD_CMDSTS_ACTIVE 0x80000000
#define IDXD_CMDSTS_ERR_MASK 0xff
#define IDXD_CMDSTS_RES_SHIFT 8
@@ -279,7 +289,7 @@ union sw_err_reg {
u64 rsvd5;
};
u64 bits[4];
-} __packed;
+};
union iaa_cap_reg {
struct {
@@ -298,7 +308,7 @@ union iaa_cap_reg {
u64 rsvd:52;
};
u64 bits;
-} __packed;
+};
#define IDXD_IAACAP_OFFSET 0x180
@@ -315,7 +325,7 @@ union evlcfg_reg {
u64 rsvd2:28;
};
u64 bits[2];
-} __packed;
+};
#define IDXD_EVL_SIZE_MIN 0x0040
#define IDXD_EVL_SIZE_MAX 0xffff
@@ -329,7 +339,7 @@ union msix_perm {
u32 pasid:20;
};
u32 bits;
-} __packed;
+};
union group_flags {
struct {
@@ -347,13 +357,13 @@ union group_flags {
u64 rsvd5:26;
};
u64 bits;
-} __packed;
+};
struct grpcfg {
u64 wqs[4];
u64 engines;
union group_flags flags;
-} __packed;
+};
union wqcfg {
struct {
@@ -405,7 +415,7 @@ union wqcfg {
u64 op_config[4];
};
u32 bits[16];
-} __packed;
+};
#define WQCFG_PASID_IDX 2
#define WQCFG_PRIVL_IDX 2
@@ -469,7 +479,7 @@ union idxd_perfcap {
u64 rsvd3:8;
};
u64 bits;
-} __packed;
+};
#define IDXD_EVNTCAP_OFFSET 0x80
union idxd_evntcap {
@@ -478,7 +488,7 @@ union idxd_evntcap {
u64 rsvd:36;
};
u64 bits;
-} __packed;
+};
struct idxd_event {
union {
@@ -488,7 +498,7 @@ struct idxd_event {
};
u32 val;
};
-} __packed;
+};
#define IDXD_CNTRCAP_OFFSET 0x800
struct idxd_cntrcap {
@@ -501,7 +511,7 @@ struct idxd_cntrcap {
u32 val;
};
struct idxd_event events[];
-} __packed;
+};
#define IDXD_PERFRST_OFFSET 0x10
union idxd_perfrst {
@@ -511,7 +521,7 @@ union idxd_perfrst {
u32 rsvd:30;
};
u32 val;
-} __packed;
+};
#define IDXD_OVFSTATUS_OFFSET 0x30
#define IDXD_PERFFRZ_OFFSET 0x20
@@ -528,7 +538,7 @@ union idxd_cntrcfg {
u64 rsvd3:4;
};
u64 val;
-} __packed;
+};
#define IDXD_FLTCFG_OFFSET 0x300
@@ -538,7 +548,7 @@ union idxd_cntrdata {
u64 event_count_value;
};
u64 val;
-} __packed;
+};
union event_cfg {
struct {
@@ -546,7 +556,7 @@ union event_cfg {
u64 event_enc:28;
};
u64 val;
-} __packed;
+};
union filter_cfg {
struct {
@@ -557,7 +567,7 @@ union filter_cfg {
u64 eng:8;
};
u64 val;
-} __packed;
+};
#define IDXD_EVLSTATUS_OFFSET 0xf0
@@ -575,7 +585,7 @@ union evl_status_reg {
u32 bits_upper32;
};
u64 bits;
-} __packed;
+};
#define IDXD_MAX_BATCH_IDENT 256
@@ -615,17 +625,17 @@ struct __evl_entry {
};
u64 fault_addr;
u64 rsvd5;
-} __packed;
+};
struct dsa_evl_entry {
struct __evl_entry e;
struct dsa_completion_record cr;
-} __packed;
+};
struct iax_evl_entry {
struct __evl_entry e;
u64 rsvd[4];
struct iax_completion_record cr;
-} __packed;
+};
#endif
diff --git a/drivers/dma/idxd/submit.c b/drivers/dma/idxd/submit.c
index 94eca25ae9b9..6db1c5fcedc5 100644
--- a/drivers/dma/idxd/submit.c
+++ b/drivers/dma/idxd/submit.c
@@ -61,7 +61,7 @@ struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype)
return __get_desc(wq, idx, cpu);
}
-EXPORT_SYMBOL_NS_GPL(idxd_alloc_desc, IDXD);
+EXPORT_SYMBOL_NS_GPL(idxd_alloc_desc, "IDXD");
void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc)
{
@@ -70,7 +70,7 @@ void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc)
desc->cpu = -1;
sbitmap_queue_clear(&wq->sbq, desc->id, cpu);
}
-EXPORT_SYMBOL_NS_GPL(idxd_free_desc, IDXD);
+EXPORT_SYMBOL_NS_GPL(idxd_free_desc, "IDXD");
static struct idxd_desc *list_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
struct idxd_desc *desc)
@@ -219,4 +219,4 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
percpu_ref_put(&wq->wq_active);
return 0;
}
-EXPORT_SYMBOL_NS_GPL(idxd_submit_desc, IDXD);
+EXPORT_SYMBOL_NS_GPL(idxd_submit_desc, "IDXD");
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
index f706eae0e76b..9f0701021af0 100644
--- a/drivers/dma/idxd/sysfs.c
+++ b/drivers/dma/idxd/sysfs.c
@@ -1208,9 +1208,11 @@ static ssize_t op_cap_show_common(struct device *dev, char *buf, unsigned long *
/* On systems where direct user submissions are not safe, we need to clear out
* the BATCH capability from the capability mask in sysfs since we cannot support
- * that command on such systems.
+ * that command on such systems. Narrow the restriction of operations with the
+ * BATCH opcode to only DSA version 1 devices.
*/
- if (i == DSA_OPCODE_BATCH/64 && !confdev_to_idxd(dev)->user_submission_safe)
+ if (i == DSA_OPCODE_BATCH/64 && !confdev_to_idxd(dev)->user_submission_safe &&
+ confdev_to_idxd(dev)->hw.version == DEVICE_VERSION_1)
clear_bit(DSA_OPCODE_BATCH % 64, &val);
pos += sysfs_emit_at(buf, pos, "%*pb", 64, &val);
@@ -1979,13 +1981,3 @@ void idxd_unregister_devices(struct idxd_device *idxd)
device_unregister(group_confdev(group));
}
}
-
-int idxd_register_bus_type(void)
-{
- return bus_register(&dsa_bus_type);
-}
-
-void idxd_unregister_bus_type(void)
-{
- bus_unregister(&dsa_bus_type);
-}
diff --git a/drivers/dma/img-mdc-dma.c b/drivers/dma/img-mdc-dma.c
index 0532dd2640dc..fd55bcd060ab 100644
--- a/drivers/dma/img-mdc-dma.c
+++ b/drivers/dma/img-mdc-dma.c
@@ -1073,10 +1073,10 @@ static struct platform_driver mdc_dma_driver = {
.driver = {
.name = "img-mdc-dma",
.pm = &img_mdc_pm_ops,
- .of_match_table = of_match_ptr(mdc_dma_of_match),
+ .of_match_table = mdc_dma_of_match,
},
.probe = mdc_dma_probe,
- .remove_new = mdc_dma_remove,
+ .remove = mdc_dma_remove,
};
module_platform_driver(mdc_dma_driver);
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index e913f0db99da..ba434657059a 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -17,6 +17,7 @@
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/dmaengine.h>
@@ -323,7 +324,7 @@ static void imxdma_disable_hw(struct imxdma_channel *imxdmac)
dev_dbg(imxdma->dev, "%s channel %d\n", __func__, channel);
if (imxdma_hw_chain(imxdmac))
- del_timer(&imxdmac->watchdog);
+ timer_delete(&imxdmac->watchdog);
local_irq_save(flags);
imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_DIMR) |
@@ -336,7 +337,8 @@ static void imxdma_disable_hw(struct imxdma_channel *imxdmac)
static void imxdma_watchdog(struct timer_list *t)
{
- struct imxdma_channel *imxdmac = from_timer(imxdmac, t, watchdog);
+ struct imxdma_channel *imxdmac = timer_container_of(imxdmac, t,
+ watchdog);
struct imxdma_engine *imxdma = imxdmac->imxdma;
int channel = imxdmac->channel;
@@ -453,7 +455,7 @@ static void dma_irq_handle_channel(struct imxdma_channel *imxdmac)
}
if (imxdma_hw_chain(imxdmac)) {
- del_timer(&imxdmac->watchdog);
+ timer_delete(&imxdmac->watchdog);
return;
}
}
@@ -942,7 +944,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_interleaved(
" src_sgl=%s dst_sgl=%s numf=%zu frame_size=%zu\n", __func__,
imxdmac->channel, (unsigned long long)xt->src_start,
(unsigned long long) xt->dst_start,
- xt->src_sgl ? "true" : "false", xt->dst_sgl ? "true" : "false",
+ str_true_false(xt->src_sgl), str_true_false(xt->dst_sgl),
xt->numf, xt->frame_size);
if (list_empty(&imxdmac->ld_free) ||
@@ -1233,7 +1235,7 @@ static struct platform_driver imxdma_driver = {
.name = "imx-dma",
.of_match_table = imx_dma_of_dev_id,
},
- .remove_new = imxdma_remove,
+ .remove = imxdma_remove,
};
static int __init imxdma_module_init(void)
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 72299a08af44..ed9e56de5a9b 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -256,7 +256,7 @@ struct sdma_script_start_addrs {
/* End of v3 array */
union { s32 v3_end; s32 mcu_2_zqspi_addr; };
/* End of v4 array */
- s32 v4_end[0];
+ s32 v4_end[];
};
/*
@@ -1459,9 +1459,8 @@ static int sdma_alloc_chan_resources(struct dma_chan *chan)
* dmatest, thus create 'struct imx_dma_data mem_data' for this case.
* Please note in any other slave case, you have to setup chan->private
* with 'struct imx_dma_data' in your own filter function if you want to
- * request dma channel by dma_request_channel() rather than
- * dma_request_slave_channel(). Othwise, 'MEMCPY in case?' will appear
- * to warn you to correct your filter function.
+ * request DMA channel by dma_request_channel(), otherwise, 'MEMCPY in
+ * case?' will appear to warn you to correct your filter function.
*/
if (!data) {
dev_dbg(sdmac->sdma->dev, "MEMCPY in case?\n");
@@ -2440,7 +2439,7 @@ static struct platform_driver sdma_driver = {
.name = "imx-sdma",
.of_match_table = sdma_dt_ids,
},
- .remove_new = sdma_remove,
+ .remove = sdma_remove,
.probe = sdma_probe,
};
diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c
index 17f6b6367113..5d3c0ae6b342 100644
--- a/drivers/dma/ioat/dca.c
+++ b/drivers/dma/ioat/dca.c
@@ -10,6 +10,8 @@
#include <linux/interrupt.h>
#include <linux/dca.h>
+#include <asm/cpuid/api.h>
+
/* either a kernel change is needed, or we need something like this in kernel */
#ifndef CONFIG_SMP
#include <asm/smp.h>
@@ -58,11 +60,11 @@ static int dca_enabled_in_bios(struct pci_dev *pdev)
{
/* CPUID level 9 returns DCA configuration */
/* Bit 0 indicates DCA enabled by the BIOS */
- unsigned long cpuid_level_9;
+ u32 eax;
int res;
- cpuid_level_9 = cpuid_eax(9);
- res = test_bit(0, &cpuid_level_9);
+ eax = cpuid_eax(CPUID_LEAF_DCA);
+ res = eax & BIT(0);
if (!res)
dev_dbg(&pdev->dev, "DCA is disabled in BIOS\n");
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 79d8957f9e60..b8fff8333aef 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -159,7 +159,7 @@ void ioat_stop(struct ioatdma_chan *ioat_chan)
}
/* flush inflight timers */
- del_timer_sync(&ioat_chan->timer);
+ timer_delete_sync(&ioat_chan->timer);
/* flush inflight tasklet runs */
tasklet_kill(&ioat_chan->cleanup_task);
@@ -901,7 +901,8 @@ static void ioat_reboot_chan(struct ioatdma_chan *ioat_chan)
void ioat_timer_event(struct timer_list *t)
{
- struct ioatdma_chan *ioat_chan = from_timer(ioat_chan, t, timer);
+ struct ioatdma_chan *ioat_chan = timer_container_of(ioat_chan, t,
+ timer);
dma_addr_t phys_complete;
u64 status;
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index a180171087a8..12a4a4860a74 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -19,6 +19,8 @@
#define IOAT_DMA_DCA_ANY_CPU ~0
+int system_has_dca_enabled(struct pci_dev *pdev);
+
#define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, dma_dev)
#define to_dev(ioat_chan) (&(ioat_chan)->ioat_dma->pdev->dev)
#define to_pdev(ioat_chan) ((ioat_chan)->ioat_dma->pdev)
diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h
index 79e4e4c09c18..0373c48520c9 100644
--- a/drivers/dma/ioat/hw.h
+++ b/drivers/dma/ioat/hw.h
@@ -63,9 +63,6 @@
#define IOAT_VER_3_3 0x33 /* Version 3.3 */
#define IOAT_VER_3_4 0x34 /* Version 3.4 */
-
-int system_has_dca_enabled(struct pci_dev *pdev);
-
#define IOAT_DESC_SZ 64
struct ioat_dma_descriptor {
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index cc9ddd6c325b..02f68b328511 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -1224,12 +1224,12 @@ static void ioat_shutdown(struct pci_dev *pdev)
set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
spin_unlock_bh(&ioat_chan->prep_lock);
/*
- * Synchronization rule for del_timer_sync():
+ * Synchronization rule for timer_delete_sync():
* - The caller must not hold locks which would prevent
* completion of the timer's handler.
* So prep_lock cannot be held before calling it.
*/
- del_timer_sync(&ioat_chan->timer);
+ timer_delete_sync(&ioat_chan->timer);
/* this should quiesce then reset */
ioat_reset_hw(ioat_chan);
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index 5de8c21d41e7..acc2983e28e0 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -1028,7 +1028,7 @@ static struct platform_driver k3_pdma_driver = {
.of_match_table = k3_pdma_dt_ids,
},
.probe = k3_dma_probe,
- .remove_new = k3_dma_remove,
+ .remove = k3_dma_remove,
};
module_platform_driver(k3_pdma_driver);
diff --git a/drivers/dma/loongson2-apb-dma.c b/drivers/dma/loongson2-apb-dma.c
new file mode 100644
index 000000000000..c528f02b9f84
--- /dev/null
+++ b/drivers/dma/loongson2-apb-dma.c
@@ -0,0 +1,705 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for the Loongson-2 APB DMA Controller
+ *
+ * Copyright (C) 2017-2023 Loongson Corporation
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "dmaengine.h"
+#include "virt-dma.h"
+
+/* Global Configuration Register */
+#define LDMA_ORDER_ERG 0x0
+
+/* Bitfield definitions */
+
+/* Bitfields in Global Configuration Register */
+#define LDMA_64BIT_EN BIT(0) /* 1: 64 bit support */
+#define LDMA_UNCOHERENT_EN BIT(1) /* 0: cache, 1: uncache */
+#define LDMA_ASK_VALID BIT(2)
+#define LDMA_START BIT(3) /* DMA start operation */
+#define LDMA_STOP BIT(4) /* DMA stop operation */
+#define LDMA_CONFIG_MASK GENMASK_ULL(4, 0) /* DMA controller config bits mask */
+
+/* Bitfields in ndesc_addr field of HW descriptor */
+#define LDMA_DESC_EN BIT(0) /*1: The next descriptor is valid */
+#define LDMA_DESC_ADDR_LOW GENMASK(31, 1)
+
+/* Bitfields in cmd field of HW descriptor */
+#define LDMA_INT BIT(1) /* Enable DMA interrupts */
+#define LDMA_DATA_DIRECTION BIT(12) /* 1: write to device, 0: read from device */
+
+#define LDMA_SLAVE_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
+
+#define LDMA_MAX_TRANS_LEN U32_MAX
+
+/*-- descriptors -----------------------------------------------------*/
+
+/*
+ * struct ls2x_dma_hw_desc - DMA HW descriptor
+ * @ndesc_addr: the next descriptor low address.
+ * @mem_addr: memory low address.
+ * @apb_addr: device buffer address.
+ * @len: length of a piece of carried content, in words.
+ * @step_len: length between two moved memory data blocks.
+ * @step_times: number of blocks to be carried in a single DMA operation.
+ * @cmd: descriptor command or state.
+ * @stats: DMA status.
+ * @high_ndesc_addr: the next descriptor high address.
+ * @high_mem_addr: memory high address.
+ * @reserved: reserved
+ */
+struct ls2x_dma_hw_desc {
+ u32 ndesc_addr;
+ u32 mem_addr;
+ u32 apb_addr;
+ u32 len;
+ u32 step_len;
+ u32 step_times;
+ u32 cmd;
+ u32 stats;
+ u32 high_ndesc_addr;
+ u32 high_mem_addr;
+ u32 reserved[2];
+} __packed;
+
+/*
+ * struct ls2x_dma_sg - ls2x dma scatter gather entry
+ * @hw: the pointer to DMA HW descriptor.
+ * @llp: physical address of the DMA HW descriptor.
+ * @phys: destination or source address(mem).
+ * @len: number of Bytes to read.
+ */
+struct ls2x_dma_sg {
+ struct ls2x_dma_hw_desc *hw;
+ dma_addr_t llp;
+ dma_addr_t phys;
+ u32 len;
+};
+
+/*
+ * struct ls2x_dma_desc - software descriptor
+ * @vdesc: pointer to the virtual dma descriptor.
+ * @cyclic: flag to dma cyclic
+ * @burst_size: burst size of transaction, in words.
+ * @desc_num: number of sg entries.
+ * @direction: transfer direction, to or from device.
+ * @status: dma controller status.
+ * @sg: array of sgs.
+ */
+struct ls2x_dma_desc {
+ struct virt_dma_desc vdesc;
+ bool cyclic;
+ size_t burst_size;
+ u32 desc_num;
+ enum dma_transfer_direction direction;
+ enum dma_status status;
+ struct ls2x_dma_sg sg[] __counted_by(desc_num);
+};
+
+/*-- Channels --------------------------------------------------------*/
+
+/*
+ * struct ls2x_dma_chan - internal representation of an LS2X APB DMA channel
+ * @vchan: virtual dma channel entry.
+ * @desc: pointer to the ls2x sw dma descriptor.
+ * @pool: hw desc table
+ * @irq: irq line
+ * @sconfig: configuration for slave transfers, passed via .device_config
+ */
+struct ls2x_dma_chan {
+ struct virt_dma_chan vchan;
+ struct ls2x_dma_desc *desc;
+ void *pool;
+ int irq;
+ struct dma_slave_config sconfig;
+};
+
+/*-- Controller ------------------------------------------------------*/
+
+/*
+ * struct ls2x_dma_priv - LS2X APB DMAC specific information
+ * @ddev: dmaengine dma_device object members
+ * @dma_clk: DMAC clock source
+ * @regs: memory mapped register base
+ * @lchan: channel to store ls2x_dma_chan structures
+ */
+struct ls2x_dma_priv {
+ struct dma_device ddev;
+ struct clk *dma_clk;
+ void __iomem *regs;
+ struct ls2x_dma_chan lchan;
+};
+
+/*-- Helper functions ------------------------------------------------*/
+
+static inline struct ls2x_dma_desc *to_ldma_desc(struct virt_dma_desc *vdesc)
+{
+ return container_of(vdesc, struct ls2x_dma_desc, vdesc);
+}
+
+static inline struct ls2x_dma_chan *to_ldma_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct ls2x_dma_chan, vchan.chan);
+}
+
+static inline struct ls2x_dma_priv *to_ldma_priv(struct dma_device *ddev)
+{
+ return container_of(ddev, struct ls2x_dma_priv, ddev);
+}
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+ return &chan->dev->device;
+}
+
+static void ls2x_dma_desc_free(struct virt_dma_desc *vdesc)
+{
+ struct ls2x_dma_chan *lchan = to_ldma_chan(vdesc->tx.chan);
+ struct ls2x_dma_desc *desc = to_ldma_desc(vdesc);
+ int i;
+
+ for (i = 0; i < desc->desc_num; i++) {
+ if (desc->sg[i].hw)
+ dma_pool_free(lchan->pool, desc->sg[i].hw,
+ desc->sg[i].llp);
+ }
+
+ kfree(desc);
+}
+
+static void ls2x_dma_write_cmd(struct ls2x_dma_chan *lchan, bool cmd)
+{
+ struct ls2x_dma_priv *priv = to_ldma_priv(lchan->vchan.chan.device);
+ u64 val;
+
+ val = lo_hi_readq(priv->regs + LDMA_ORDER_ERG) & ~LDMA_CONFIG_MASK;
+ val |= LDMA_64BIT_EN | cmd;
+ lo_hi_writeq(val, priv->regs + LDMA_ORDER_ERG);
+}
+
+static void ls2x_dma_start_transfer(struct ls2x_dma_chan *lchan)
+{
+ struct ls2x_dma_priv *priv = to_ldma_priv(lchan->vchan.chan.device);
+ struct ls2x_dma_sg *ldma_sg;
+ struct virt_dma_desc *vdesc;
+ u64 val;
+
+ /* Get the next descriptor */
+ vdesc = vchan_next_desc(&lchan->vchan);
+ if (!vdesc) {
+ lchan->desc = NULL;
+ return;
+ }
+
+ list_del(&vdesc->node);
+ lchan->desc = to_ldma_desc(vdesc);
+ ldma_sg = &lchan->desc->sg[0];
+
+ /* Start DMA */
+ lo_hi_writeq(0, priv->regs + LDMA_ORDER_ERG);
+ val = (ldma_sg->llp & ~LDMA_CONFIG_MASK) | LDMA_64BIT_EN | LDMA_START;
+ lo_hi_writeq(val, priv->regs + LDMA_ORDER_ERG);
+}
+
+static size_t ls2x_dmac_detect_burst(struct ls2x_dma_chan *lchan)
+{
+ u32 maxburst, buswidth;
+
+ /* Reject definitely invalid configurations */
+ if ((lchan->sconfig.src_addr_width & LDMA_SLAVE_BUSWIDTHS) &&
+ (lchan->sconfig.dst_addr_width & LDMA_SLAVE_BUSWIDTHS))
+ return 0;
+
+ if (lchan->sconfig.direction == DMA_MEM_TO_DEV) {
+ maxburst = lchan->sconfig.dst_maxburst;
+ buswidth = lchan->sconfig.dst_addr_width;
+ } else {
+ maxburst = lchan->sconfig.src_maxburst;
+ buswidth = lchan->sconfig.src_addr_width;
+ }
+
+ /* If maxburst is zero, fallback to LDMA_MAX_TRANS_LEN */
+ return maxburst ? (maxburst * buswidth) >> 2 : LDMA_MAX_TRANS_LEN;
+}
+
+static void ls2x_dma_fill_desc(struct ls2x_dma_chan *lchan, u32 sg_index,
+ struct ls2x_dma_desc *desc)
+{
+ struct ls2x_dma_sg *ldma_sg = &desc->sg[sg_index];
+ u32 num_segments, segment_size;
+
+ if (desc->direction == DMA_MEM_TO_DEV) {
+ ldma_sg->hw->cmd = LDMA_INT | LDMA_DATA_DIRECTION;
+ ldma_sg->hw->apb_addr = lchan->sconfig.dst_addr;
+ } else {
+ ldma_sg->hw->cmd = LDMA_INT;
+ ldma_sg->hw->apb_addr = lchan->sconfig.src_addr;
+ }
+
+ ldma_sg->hw->mem_addr = lower_32_bits(ldma_sg->phys);
+ ldma_sg->hw->high_mem_addr = upper_32_bits(ldma_sg->phys);
+
+ /* Split into multiple equally sized segments if necessary */
+ num_segments = DIV_ROUND_UP((ldma_sg->len + 3) >> 2, desc->burst_size);
+ segment_size = DIV_ROUND_UP((ldma_sg->len + 3) >> 2, num_segments);
+
+ /* Word count register takes input in words */
+ ldma_sg->hw->len = segment_size;
+ ldma_sg->hw->step_times = num_segments;
+ ldma_sg->hw->step_len = 0;
+
+ /* lets make a link list */
+ if (sg_index) {
+ desc->sg[sg_index - 1].hw->ndesc_addr = ldma_sg->llp | LDMA_DESC_EN;
+ desc->sg[sg_index - 1].hw->high_ndesc_addr = upper_32_bits(ldma_sg->llp);
+ }
+}
+
+/*-- DMA Engine API --------------------------------------------------*/
+
+/*
+ * ls2x_dma_alloc_chan_resources - allocate resources for DMA channel
+ * @chan: allocate descriptor resources for this channel
+ *
+ * return - the number of allocated descriptors
+ */
+static int ls2x_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
+
+ /* Create a pool of consistent memory blocks for hardware descriptors */
+ lchan->pool = dma_pool_create(dev_name(chan2dev(chan)),
+ chan->device->dev, PAGE_SIZE,
+ __alignof__(struct ls2x_dma_hw_desc), 0);
+ if (!lchan->pool) {
+ dev_err(chan2dev(chan), "No memory for descriptors\n");
+ return -ENOMEM;
+ }
+
+ return 1;
+}
+
+/*
+ * ls2x_dma_free_chan_resources - free all channel resources
+ * @chan: DMA channel
+ */
+static void ls2x_dma_free_chan_resources(struct dma_chan *chan)
+{
+ struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
+
+ vchan_free_chan_resources(to_virt_chan(chan));
+ dma_pool_destroy(lchan->pool);
+ lchan->pool = NULL;
+}
+
+/*
+ * ls2x_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
+ * @chan: DMA channel
+ * @sgl: scatterlist to transfer to/from
+ * @sg_len: number of entries in @scatterlist
+ * @direction: DMA direction
+ * @flags: tx descriptor status flags
+ * @context: transaction context (ignored)
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *
+ls2x_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ u32 sg_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
+ struct ls2x_dma_desc *desc;
+ struct scatterlist *sg;
+ size_t burst_size;
+ int i;
+
+ if (unlikely(!sg_len || !is_slave_direction(direction)))
+ return NULL;
+
+ burst_size = ls2x_dmac_detect_burst(lchan);
+ if (!burst_size)
+ return NULL;
+
+ desc = kzalloc(struct_size(desc, sg, sg_len), GFP_NOWAIT);
+ if (!desc)
+ return NULL;
+
+ desc->desc_num = sg_len;
+ desc->direction = direction;
+ desc->burst_size = burst_size;
+
+ for_each_sg(sgl, sg, sg_len, i) {
+ struct ls2x_dma_sg *ldma_sg = &desc->sg[i];
+
+ /* Allocate DMA capable memory for hardware descriptor */
+ ldma_sg->hw = dma_pool_alloc(lchan->pool, GFP_NOWAIT, &ldma_sg->llp);
+ if (!ldma_sg->hw) {
+ desc->desc_num = i;
+ ls2x_dma_desc_free(&desc->vdesc);
+ return NULL;
+ }
+
+ ldma_sg->phys = sg_dma_address(sg);
+ ldma_sg->len = sg_dma_len(sg);
+
+ ls2x_dma_fill_desc(lchan, i, desc);
+ }
+
+ /* Setting the last descriptor enable bit */
+ desc->sg[sg_len - 1].hw->ndesc_addr &= ~LDMA_DESC_EN;
+ desc->status = DMA_IN_PROGRESS;
+
+ return vchan_tx_prep(&lchan->vchan, &desc->vdesc, flags);
+}
+
+/*
+ * ls2x_dma_prep_dma_cyclic - prepare the cyclic DMA transfer
+ * @chan: the DMA channel to prepare
+ * @buf_addr: physical DMA address where the buffer starts
+ * @buf_len: total number of bytes for the entire buffer
+ * @period_len: number of bytes for each period
+ * @direction: transfer direction, to or from device
+ * @flags: tx descriptor status flags
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *
+ls2x_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len, enum dma_transfer_direction direction,
+ unsigned long flags)
+{
+ struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
+ struct ls2x_dma_desc *desc;
+ size_t burst_size;
+ u32 num_periods;
+ int i;
+
+ if (unlikely(!buf_len || !period_len))
+ return NULL;
+
+ if (unlikely(!is_slave_direction(direction)))
+ return NULL;
+
+ burst_size = ls2x_dmac_detect_burst(lchan);
+ if (!burst_size)
+ return NULL;
+
+ num_periods = buf_len / period_len;
+ desc = kzalloc(struct_size(desc, sg, num_periods), GFP_NOWAIT);
+ if (!desc)
+ return NULL;
+
+ desc->desc_num = num_periods;
+ desc->direction = direction;
+ desc->burst_size = burst_size;
+
+ /* Build cyclic linked list */
+ for (i = 0; i < num_periods; i++) {
+ struct ls2x_dma_sg *ldma_sg = &desc->sg[i];
+
+ /* Allocate DMA capable memory for hardware descriptor */
+ ldma_sg->hw = dma_pool_alloc(lchan->pool, GFP_NOWAIT, &ldma_sg->llp);
+ if (!ldma_sg->hw) {
+ desc->desc_num = i;
+ ls2x_dma_desc_free(&desc->vdesc);
+ return NULL;
+ }
+
+ ldma_sg->phys = buf_addr + period_len * i;
+ ldma_sg->len = period_len;
+
+ ls2x_dma_fill_desc(lchan, i, desc);
+ }
+
+ /* Lets make a cyclic list */
+ desc->sg[num_periods - 1].hw->ndesc_addr = desc->sg[0].llp | LDMA_DESC_EN;
+ desc->sg[num_periods - 1].hw->high_ndesc_addr = upper_32_bits(desc->sg[0].llp);
+ desc->cyclic = true;
+ desc->status = DMA_IN_PROGRESS;
+
+ return vchan_tx_prep(&lchan->vchan, &desc->vdesc, flags);
+}
+
+/*
+ * ls2x_slave_config - set slave configuration for channel
+ * @chan: dma channel
+ * @cfg: slave configuration
+ *
+ * Sets slave configuration for channel
+ */
+static int ls2x_dma_slave_config(struct dma_chan *chan,
+ struct dma_slave_config *config)
+{
+ struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
+
+ memcpy(&lchan->sconfig, config, sizeof(*config));
+ return 0;
+}
+
+/*
+ * ls2x_dma_issue_pending - push pending transactions to the hardware
+ * @chan: channel
+ *
+ * When this function is called, all pending transactions are pushed to the
+ * hardware and executed.
+ */
+static void ls2x_dma_issue_pending(struct dma_chan *chan)
+{
+ struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&lchan->vchan.lock, flags);
+ if (vchan_issue_pending(&lchan->vchan) && !lchan->desc)
+ ls2x_dma_start_transfer(lchan);
+ spin_unlock_irqrestore(&lchan->vchan.lock, flags);
+}
+
+/*
+ * ls2x_dma_terminate_all - terminate all transactions
+ * @chan: channel
+ *
+ * Stops all DMA transactions.
+ */
+static int ls2x_dma_terminate_all(struct dma_chan *chan)
+{
+ struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&lchan->vchan.lock, flags);
+ /* Setting stop cmd */
+ ls2x_dma_write_cmd(lchan, LDMA_STOP);
+ if (lchan->desc) {
+ vchan_terminate_vdesc(&lchan->desc->vdesc);
+ lchan->desc = NULL;
+ }
+
+ vchan_get_all_descriptors(&lchan->vchan, &head);
+ spin_unlock_irqrestore(&lchan->vchan.lock, flags);
+
+ vchan_dma_desc_free_list(&lchan->vchan, &head);
+ return 0;
+}
+
+/*
+ * ls2x_dma_synchronize - Synchronizes the termination of transfers to the
+ * current context.
+ * @chan: channel
+ */
+static void ls2x_dma_synchronize(struct dma_chan *chan)
+{
+ struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
+
+ vchan_synchronize(&lchan->vchan);
+}
+
+static int ls2x_dma_pause(struct dma_chan *chan)
+{
+ struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&lchan->vchan.lock, flags);
+ if (lchan->desc && lchan->desc->status == DMA_IN_PROGRESS) {
+ ls2x_dma_write_cmd(lchan, LDMA_STOP);
+ lchan->desc->status = DMA_PAUSED;
+ }
+ spin_unlock_irqrestore(&lchan->vchan.lock, flags);
+
+ return 0;
+}
+
+static int ls2x_dma_resume(struct dma_chan *chan)
+{
+ struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&lchan->vchan.lock, flags);
+ if (lchan->desc && lchan->desc->status == DMA_PAUSED) {
+ lchan->desc->status = DMA_IN_PROGRESS;
+ ls2x_dma_write_cmd(lchan, LDMA_START);
+ }
+ spin_unlock_irqrestore(&lchan->vchan.lock, flags);
+
+ return 0;
+}
+
+/*
+ * ls2x_dma_isr - LS2X DMA Interrupt handler
+ * @irq: IRQ number
+ * @dev_id: Pointer to ls2x_dma_chan
+ *
+ * Return: IRQ_HANDLED/IRQ_NONE
+ */
+static irqreturn_t ls2x_dma_isr(int irq, void *dev_id)
+{
+ struct ls2x_dma_chan *lchan = dev_id;
+ struct ls2x_dma_desc *desc;
+
+ spin_lock(&lchan->vchan.lock);
+ desc = lchan->desc;
+ if (desc) {
+ if (desc->cyclic) {
+ vchan_cyclic_callback(&desc->vdesc);
+ } else {
+ desc->status = DMA_COMPLETE;
+ vchan_cookie_complete(&desc->vdesc);
+ ls2x_dma_start_transfer(lchan);
+ }
+
+ /* ls2x_dma_start_transfer() updates lchan->desc */
+ if (!lchan->desc)
+ ls2x_dma_write_cmd(lchan, LDMA_STOP);
+ }
+ spin_unlock(&lchan->vchan.lock);
+
+ return IRQ_HANDLED;
+}
+
+static int ls2x_dma_chan_init(struct platform_device *pdev,
+ struct ls2x_dma_priv *priv)
+{
+ struct ls2x_dma_chan *lchan = &priv->lchan;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ lchan->irq = platform_get_irq(pdev, 0);
+ if (lchan->irq < 0)
+ return lchan->irq;
+
+ ret = devm_request_irq(dev, lchan->irq, ls2x_dma_isr, IRQF_TRIGGER_RISING,
+ dev_name(&pdev->dev), lchan);
+ if (ret)
+ return ret;
+
+ /* Initialize channels related values */
+ INIT_LIST_HEAD(&priv->ddev.channels);
+ lchan->vchan.desc_free = ls2x_dma_desc_free;
+ vchan_init(&lchan->vchan, &priv->ddev);
+
+ return 0;
+}
+
+/*
+ * ls2x_dma_probe - Driver probe function
+ * @pdev: Pointer to the platform_device structure
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int ls2x_dma_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ls2x_dma_priv *priv;
+ struct dma_device *ddev;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->regs))
+ return dev_err_probe(dev, PTR_ERR(priv->regs),
+ "devm_platform_ioremap_resource failed.\n");
+
+ priv->dma_clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->dma_clk))
+ return dev_err_probe(dev, PTR_ERR(priv->dma_clk), "devm_clk_get failed.\n");
+
+ ret = clk_prepare_enable(priv->dma_clk);
+ if (ret)
+ return dev_err_probe(dev, ret, "clk_prepare_enable failed.\n");
+
+ ret = ls2x_dma_chan_init(pdev, priv);
+ if (ret)
+ goto disable_clk;
+
+ ddev = &priv->ddev;
+ ddev->dev = dev;
+ dma_cap_zero(ddev->cap_mask);
+ dma_cap_set(DMA_SLAVE, ddev->cap_mask);
+ dma_cap_set(DMA_CYCLIC, ddev->cap_mask);
+
+ ddev->device_alloc_chan_resources = ls2x_dma_alloc_chan_resources;
+ ddev->device_free_chan_resources = ls2x_dma_free_chan_resources;
+ ddev->device_tx_status = dma_cookie_status;
+ ddev->device_issue_pending = ls2x_dma_issue_pending;
+ ddev->device_prep_slave_sg = ls2x_dma_prep_slave_sg;
+ ddev->device_prep_dma_cyclic = ls2x_dma_prep_dma_cyclic;
+ ddev->device_config = ls2x_dma_slave_config;
+ ddev->device_terminate_all = ls2x_dma_terminate_all;
+ ddev->device_synchronize = ls2x_dma_synchronize;
+ ddev->device_pause = ls2x_dma_pause;
+ ddev->device_resume = ls2x_dma_resume;
+
+ ddev->src_addr_widths = LDMA_SLAVE_BUSWIDTHS;
+ ddev->dst_addr_widths = LDMA_SLAVE_BUSWIDTHS;
+ ddev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+
+ ret = dma_async_device_register(&priv->ddev);
+ if (ret < 0)
+ goto disable_clk;
+
+ ret = of_dma_controller_register(dev->of_node, of_dma_xlate_by_chan_id, priv);
+ if (ret < 0)
+ goto unregister_dmac;
+
+ platform_set_drvdata(pdev, priv);
+
+ dev_info(dev, "Loongson LS2X APB DMA driver registered successfully.\n");
+ return 0;
+
+unregister_dmac:
+ dma_async_device_unregister(&priv->ddev);
+disable_clk:
+ clk_disable_unprepare(priv->dma_clk);
+
+ return ret;
+}
+
+/*
+ * ls2x_dma_remove - Driver remove function
+ * @pdev: Pointer to the platform_device structure
+ */
+static void ls2x_dma_remove(struct platform_device *pdev)
+{
+ struct ls2x_dma_priv *priv = platform_get_drvdata(pdev);
+
+ of_dma_controller_free(pdev->dev.of_node);
+ dma_async_device_unregister(&priv->ddev);
+ clk_disable_unprepare(priv->dma_clk);
+}
+
+static const struct of_device_id ls2x_dma_of_match_table[] = {
+ { .compatible = "loongson,ls2k1000-apbdma" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ls2x_dma_of_match_table);
+
+static struct platform_driver ls2x_dmac_driver = {
+ .probe = ls2x_dma_probe,
+ .remove = ls2x_dma_remove,
+ .driver = {
+ .name = "ls2x-apbdma",
+ .of_match_table = ls2x_dma_of_match_table,
+ },
+};
+module_platform_driver(ls2x_dmac_driver);
+
+MODULE_DESCRIPTION("Loongson-2 APB DMA Controller driver");
+MODULE_AUTHOR("Loongson Technology Corporation Limited");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/ls2x-apb-dma.c b/drivers/dma/ls2x-apb-dma.c
deleted file mode 100644
index 9652e8666722..000000000000
--- a/drivers/dma/ls2x-apb-dma.c
+++ /dev/null
@@ -1,705 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Driver for the Loongson LS2X APB DMA Controller
- *
- * Copyright (C) 2017-2023 Loongson Corporation
- */
-
-#include <linux/clk.h>
-#include <linux/dma-mapping.h>
-#include <linux/dmapool.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/io-64-nonatomic-lo-hi.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_dma.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-
-#include "dmaengine.h"
-#include "virt-dma.h"
-
-/* Global Configuration Register */
-#define LDMA_ORDER_ERG 0x0
-
-/* Bitfield definitions */
-
-/* Bitfields in Global Configuration Register */
-#define LDMA_64BIT_EN BIT(0) /* 1: 64 bit support */
-#define LDMA_UNCOHERENT_EN BIT(1) /* 0: cache, 1: uncache */
-#define LDMA_ASK_VALID BIT(2)
-#define LDMA_START BIT(3) /* DMA start operation */
-#define LDMA_STOP BIT(4) /* DMA stop operation */
-#define LDMA_CONFIG_MASK GENMASK(4, 0) /* DMA controller config bits mask */
-
-/* Bitfields in ndesc_addr field of HW descriptor */
-#define LDMA_DESC_EN BIT(0) /*1: The next descriptor is valid */
-#define LDMA_DESC_ADDR_LOW GENMASK(31, 1)
-
-/* Bitfields in cmd field of HW descriptor */
-#define LDMA_INT BIT(1) /* Enable DMA interrupts */
-#define LDMA_DATA_DIRECTION BIT(12) /* 1: write to device, 0: read from device */
-
-#define LDMA_SLAVE_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
- BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
-
-#define LDMA_MAX_TRANS_LEN U32_MAX
-
-/*-- descriptors -----------------------------------------------------*/
-
-/*
- * struct ls2x_dma_hw_desc - DMA HW descriptor
- * @ndesc_addr: the next descriptor low address.
- * @mem_addr: memory low address.
- * @apb_addr: device buffer address.
- * @len: length of a piece of carried content, in words.
- * @step_len: length between two moved memory data blocks.
- * @step_times: number of blocks to be carried in a single DMA operation.
- * @cmd: descriptor command or state.
- * @stats: DMA status.
- * @high_ndesc_addr: the next descriptor high address.
- * @high_mem_addr: memory high address.
- * @reserved: reserved
- */
-struct ls2x_dma_hw_desc {
- u32 ndesc_addr;
- u32 mem_addr;
- u32 apb_addr;
- u32 len;
- u32 step_len;
- u32 step_times;
- u32 cmd;
- u32 stats;
- u32 high_ndesc_addr;
- u32 high_mem_addr;
- u32 reserved[2];
-} __packed;
-
-/*
- * struct ls2x_dma_sg - ls2x dma scatter gather entry
- * @hw: the pointer to DMA HW descriptor.
- * @llp: physical address of the DMA HW descriptor.
- * @phys: destination or source address(mem).
- * @len: number of Bytes to read.
- */
-struct ls2x_dma_sg {
- struct ls2x_dma_hw_desc *hw;
- dma_addr_t llp;
- dma_addr_t phys;
- u32 len;
-};
-
-/*
- * struct ls2x_dma_desc - software descriptor
- * @vdesc: pointer to the virtual dma descriptor.
- * @cyclic: flag to dma cyclic
- * @burst_size: burst size of transaction, in words.
- * @desc_num: number of sg entries.
- * @direction: transfer direction, to or from device.
- * @status: dma controller status.
- * @sg: array of sgs.
- */
-struct ls2x_dma_desc {
- struct virt_dma_desc vdesc;
- bool cyclic;
- size_t burst_size;
- u32 desc_num;
- enum dma_transfer_direction direction;
- enum dma_status status;
- struct ls2x_dma_sg sg[] __counted_by(desc_num);
-};
-
-/*-- Channels --------------------------------------------------------*/
-
-/*
- * struct ls2x_dma_chan - internal representation of an LS2X APB DMA channel
- * @vchan: virtual dma channel entry.
- * @desc: pointer to the ls2x sw dma descriptor.
- * @pool: hw desc table
- * @irq: irq line
- * @sconfig: configuration for slave transfers, passed via .device_config
- */
-struct ls2x_dma_chan {
- struct virt_dma_chan vchan;
- struct ls2x_dma_desc *desc;
- void *pool;
- int irq;
- struct dma_slave_config sconfig;
-};
-
-/*-- Controller ------------------------------------------------------*/
-
-/*
- * struct ls2x_dma_priv - LS2X APB DMAC specific information
- * @ddev: dmaengine dma_device object members
- * @dma_clk: DMAC clock source
- * @regs: memory mapped register base
- * @lchan: channel to store ls2x_dma_chan structures
- */
-struct ls2x_dma_priv {
- struct dma_device ddev;
- struct clk *dma_clk;
- void __iomem *regs;
- struct ls2x_dma_chan lchan;
-};
-
-/*-- Helper functions ------------------------------------------------*/
-
-static inline struct ls2x_dma_desc *to_ldma_desc(struct virt_dma_desc *vdesc)
-{
- return container_of(vdesc, struct ls2x_dma_desc, vdesc);
-}
-
-static inline struct ls2x_dma_chan *to_ldma_chan(struct dma_chan *chan)
-{
- return container_of(chan, struct ls2x_dma_chan, vchan.chan);
-}
-
-static inline struct ls2x_dma_priv *to_ldma_priv(struct dma_device *ddev)
-{
- return container_of(ddev, struct ls2x_dma_priv, ddev);
-}
-
-static struct device *chan2dev(struct dma_chan *chan)
-{
- return &chan->dev->device;
-}
-
-static void ls2x_dma_desc_free(struct virt_dma_desc *vdesc)
-{
- struct ls2x_dma_chan *lchan = to_ldma_chan(vdesc->tx.chan);
- struct ls2x_dma_desc *desc = to_ldma_desc(vdesc);
- int i;
-
- for (i = 0; i < desc->desc_num; i++) {
- if (desc->sg[i].hw)
- dma_pool_free(lchan->pool, desc->sg[i].hw,
- desc->sg[i].llp);
- }
-
- kfree(desc);
-}
-
-static void ls2x_dma_write_cmd(struct ls2x_dma_chan *lchan, bool cmd)
-{
- struct ls2x_dma_priv *priv = to_ldma_priv(lchan->vchan.chan.device);
- u64 val;
-
- val = lo_hi_readq(priv->regs + LDMA_ORDER_ERG) & ~LDMA_CONFIG_MASK;
- val |= LDMA_64BIT_EN | cmd;
- lo_hi_writeq(val, priv->regs + LDMA_ORDER_ERG);
-}
-
-static void ls2x_dma_start_transfer(struct ls2x_dma_chan *lchan)
-{
- struct ls2x_dma_priv *priv = to_ldma_priv(lchan->vchan.chan.device);
- struct ls2x_dma_sg *ldma_sg;
- struct virt_dma_desc *vdesc;
- u64 val;
-
- /* Get the next descriptor */
- vdesc = vchan_next_desc(&lchan->vchan);
- if (!vdesc) {
- lchan->desc = NULL;
- return;
- }
-
- list_del(&vdesc->node);
- lchan->desc = to_ldma_desc(vdesc);
- ldma_sg = &lchan->desc->sg[0];
-
- /* Start DMA */
- lo_hi_writeq(0, priv->regs + LDMA_ORDER_ERG);
- val = (ldma_sg->llp & ~LDMA_CONFIG_MASK) | LDMA_64BIT_EN | LDMA_START;
- lo_hi_writeq(val, priv->regs + LDMA_ORDER_ERG);
-}
-
-static size_t ls2x_dmac_detect_burst(struct ls2x_dma_chan *lchan)
-{
- u32 maxburst, buswidth;
-
- /* Reject definitely invalid configurations */
- if ((lchan->sconfig.src_addr_width & LDMA_SLAVE_BUSWIDTHS) &&
- (lchan->sconfig.dst_addr_width & LDMA_SLAVE_BUSWIDTHS))
- return 0;
-
- if (lchan->sconfig.direction == DMA_MEM_TO_DEV) {
- maxburst = lchan->sconfig.dst_maxburst;
- buswidth = lchan->sconfig.dst_addr_width;
- } else {
- maxburst = lchan->sconfig.src_maxburst;
- buswidth = lchan->sconfig.src_addr_width;
- }
-
- /* If maxburst is zero, fallback to LDMA_MAX_TRANS_LEN */
- return maxburst ? (maxburst * buswidth) >> 2 : LDMA_MAX_TRANS_LEN;
-}
-
-static void ls2x_dma_fill_desc(struct ls2x_dma_chan *lchan, u32 sg_index,
- struct ls2x_dma_desc *desc)
-{
- struct ls2x_dma_sg *ldma_sg = &desc->sg[sg_index];
- u32 num_segments, segment_size;
-
- if (desc->direction == DMA_MEM_TO_DEV) {
- ldma_sg->hw->cmd = LDMA_INT | LDMA_DATA_DIRECTION;
- ldma_sg->hw->apb_addr = lchan->sconfig.dst_addr;
- } else {
- ldma_sg->hw->cmd = LDMA_INT;
- ldma_sg->hw->apb_addr = lchan->sconfig.src_addr;
- }
-
- ldma_sg->hw->mem_addr = lower_32_bits(ldma_sg->phys);
- ldma_sg->hw->high_mem_addr = upper_32_bits(ldma_sg->phys);
-
- /* Split into multiple equally sized segments if necessary */
- num_segments = DIV_ROUND_UP((ldma_sg->len + 3) >> 2, desc->burst_size);
- segment_size = DIV_ROUND_UP((ldma_sg->len + 3) >> 2, num_segments);
-
- /* Word count register takes input in words */
- ldma_sg->hw->len = segment_size;
- ldma_sg->hw->step_times = num_segments;
- ldma_sg->hw->step_len = 0;
-
- /* lets make a link list */
- if (sg_index) {
- desc->sg[sg_index - 1].hw->ndesc_addr = ldma_sg->llp | LDMA_DESC_EN;
- desc->sg[sg_index - 1].hw->high_ndesc_addr = upper_32_bits(ldma_sg->llp);
- }
-}
-
-/*-- DMA Engine API --------------------------------------------------*/
-
-/*
- * ls2x_dma_alloc_chan_resources - allocate resources for DMA channel
- * @chan: allocate descriptor resources for this channel
- *
- * return - the number of allocated descriptors
- */
-static int ls2x_dma_alloc_chan_resources(struct dma_chan *chan)
-{
- struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
-
- /* Create a pool of consistent memory blocks for hardware descriptors */
- lchan->pool = dma_pool_create(dev_name(chan2dev(chan)),
- chan->device->dev, PAGE_SIZE,
- __alignof__(struct ls2x_dma_hw_desc), 0);
- if (!lchan->pool) {
- dev_err(chan2dev(chan), "No memory for descriptors\n");
- return -ENOMEM;
- }
-
- return 1;
-}
-
-/*
- * ls2x_dma_free_chan_resources - free all channel resources
- * @chan: DMA channel
- */
-static void ls2x_dma_free_chan_resources(struct dma_chan *chan)
-{
- struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
-
- vchan_free_chan_resources(to_virt_chan(chan));
- dma_pool_destroy(lchan->pool);
- lchan->pool = NULL;
-}
-
-/*
- * ls2x_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
- * @chan: DMA channel
- * @sgl: scatterlist to transfer to/from
- * @sg_len: number of entries in @scatterlist
- * @direction: DMA direction
- * @flags: tx descriptor status flags
- * @context: transaction context (ignored)
- *
- * Return: Async transaction descriptor on success and NULL on failure
- */
-static struct dma_async_tx_descriptor *
-ls2x_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
- u32 sg_len, enum dma_transfer_direction direction,
- unsigned long flags, void *context)
-{
- struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
- struct ls2x_dma_desc *desc;
- struct scatterlist *sg;
- size_t burst_size;
- int i;
-
- if (unlikely(!sg_len || !is_slave_direction(direction)))
- return NULL;
-
- burst_size = ls2x_dmac_detect_burst(lchan);
- if (!burst_size)
- return NULL;
-
- desc = kzalloc(struct_size(desc, sg, sg_len), GFP_NOWAIT);
- if (!desc)
- return NULL;
-
- desc->desc_num = sg_len;
- desc->direction = direction;
- desc->burst_size = burst_size;
-
- for_each_sg(sgl, sg, sg_len, i) {
- struct ls2x_dma_sg *ldma_sg = &desc->sg[i];
-
- /* Allocate DMA capable memory for hardware descriptor */
- ldma_sg->hw = dma_pool_alloc(lchan->pool, GFP_NOWAIT, &ldma_sg->llp);
- if (!ldma_sg->hw) {
- desc->desc_num = i;
- ls2x_dma_desc_free(&desc->vdesc);
- return NULL;
- }
-
- ldma_sg->phys = sg_dma_address(sg);
- ldma_sg->len = sg_dma_len(sg);
-
- ls2x_dma_fill_desc(lchan, i, desc);
- }
-
- /* Setting the last descriptor enable bit */
- desc->sg[sg_len - 1].hw->ndesc_addr &= ~LDMA_DESC_EN;
- desc->status = DMA_IN_PROGRESS;
-
- return vchan_tx_prep(&lchan->vchan, &desc->vdesc, flags);
-}
-
-/*
- * ls2x_dma_prep_dma_cyclic - prepare the cyclic DMA transfer
- * @chan: the DMA channel to prepare
- * @buf_addr: physical DMA address where the buffer starts
- * @buf_len: total number of bytes for the entire buffer
- * @period_len: number of bytes for each period
- * @direction: transfer direction, to or from device
- * @flags: tx descriptor status flags
- *
- * Return: Async transaction descriptor on success and NULL on failure
- */
-static struct dma_async_tx_descriptor *
-ls2x_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
- size_t period_len, enum dma_transfer_direction direction,
- unsigned long flags)
-{
- struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
- struct ls2x_dma_desc *desc;
- size_t burst_size;
- u32 num_periods;
- int i;
-
- if (unlikely(!buf_len || !period_len))
- return NULL;
-
- if (unlikely(!is_slave_direction(direction)))
- return NULL;
-
- burst_size = ls2x_dmac_detect_burst(lchan);
- if (!burst_size)
- return NULL;
-
- num_periods = buf_len / period_len;
- desc = kzalloc(struct_size(desc, sg, num_periods), GFP_NOWAIT);
- if (!desc)
- return NULL;
-
- desc->desc_num = num_periods;
- desc->direction = direction;
- desc->burst_size = burst_size;
-
- /* Build cyclic linked list */
- for (i = 0; i < num_periods; i++) {
- struct ls2x_dma_sg *ldma_sg = &desc->sg[i];
-
- /* Allocate DMA capable memory for hardware descriptor */
- ldma_sg->hw = dma_pool_alloc(lchan->pool, GFP_NOWAIT, &ldma_sg->llp);
- if (!ldma_sg->hw) {
- desc->desc_num = i;
- ls2x_dma_desc_free(&desc->vdesc);
- return NULL;
- }
-
- ldma_sg->phys = buf_addr + period_len * i;
- ldma_sg->len = period_len;
-
- ls2x_dma_fill_desc(lchan, i, desc);
- }
-
- /* Lets make a cyclic list */
- desc->sg[num_periods - 1].hw->ndesc_addr = desc->sg[0].llp | LDMA_DESC_EN;
- desc->sg[num_periods - 1].hw->high_ndesc_addr = upper_32_bits(desc->sg[0].llp);
- desc->cyclic = true;
- desc->status = DMA_IN_PROGRESS;
-
- return vchan_tx_prep(&lchan->vchan, &desc->vdesc, flags);
-}
-
-/*
- * ls2x_slave_config - set slave configuration for channel
- * @chan: dma channel
- * @cfg: slave configuration
- *
- * Sets slave configuration for channel
- */
-static int ls2x_dma_slave_config(struct dma_chan *chan,
- struct dma_slave_config *config)
-{
- struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
-
- memcpy(&lchan->sconfig, config, sizeof(*config));
- return 0;
-}
-
-/*
- * ls2x_dma_issue_pending - push pending transactions to the hardware
- * @chan: channel
- *
- * When this function is called, all pending transactions are pushed to the
- * hardware and executed.
- */
-static void ls2x_dma_issue_pending(struct dma_chan *chan)
-{
- struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
- unsigned long flags;
-
- spin_lock_irqsave(&lchan->vchan.lock, flags);
- if (vchan_issue_pending(&lchan->vchan) && !lchan->desc)
- ls2x_dma_start_transfer(lchan);
- spin_unlock_irqrestore(&lchan->vchan.lock, flags);
-}
-
-/*
- * ls2x_dma_terminate_all - terminate all transactions
- * @chan: channel
- *
- * Stops all DMA transactions.
- */
-static int ls2x_dma_terminate_all(struct dma_chan *chan)
-{
- struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
- unsigned long flags;
- LIST_HEAD(head);
-
- spin_lock_irqsave(&lchan->vchan.lock, flags);
- /* Setting stop cmd */
- ls2x_dma_write_cmd(lchan, LDMA_STOP);
- if (lchan->desc) {
- vchan_terminate_vdesc(&lchan->desc->vdesc);
- lchan->desc = NULL;
- }
-
- vchan_get_all_descriptors(&lchan->vchan, &head);
- spin_unlock_irqrestore(&lchan->vchan.lock, flags);
-
- vchan_dma_desc_free_list(&lchan->vchan, &head);
- return 0;
-}
-
-/*
- * ls2x_dma_synchronize - Synchronizes the termination of transfers to the
- * current context.
- * @chan: channel
- */
-static void ls2x_dma_synchronize(struct dma_chan *chan)
-{
- struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
-
- vchan_synchronize(&lchan->vchan);
-}
-
-static int ls2x_dma_pause(struct dma_chan *chan)
-{
- struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
- unsigned long flags;
-
- spin_lock_irqsave(&lchan->vchan.lock, flags);
- if (lchan->desc && lchan->desc->status == DMA_IN_PROGRESS) {
- ls2x_dma_write_cmd(lchan, LDMA_STOP);
- lchan->desc->status = DMA_PAUSED;
- }
- spin_unlock_irqrestore(&lchan->vchan.lock, flags);
-
- return 0;
-}
-
-static int ls2x_dma_resume(struct dma_chan *chan)
-{
- struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
- unsigned long flags;
-
- spin_lock_irqsave(&lchan->vchan.lock, flags);
- if (lchan->desc && lchan->desc->status == DMA_PAUSED) {
- lchan->desc->status = DMA_IN_PROGRESS;
- ls2x_dma_write_cmd(lchan, LDMA_START);
- }
- spin_unlock_irqrestore(&lchan->vchan.lock, flags);
-
- return 0;
-}
-
-/*
- * ls2x_dma_isr - LS2X DMA Interrupt handler
- * @irq: IRQ number
- * @dev_id: Pointer to ls2x_dma_chan
- *
- * Return: IRQ_HANDLED/IRQ_NONE
- */
-static irqreturn_t ls2x_dma_isr(int irq, void *dev_id)
-{
- struct ls2x_dma_chan *lchan = dev_id;
- struct ls2x_dma_desc *desc;
-
- spin_lock(&lchan->vchan.lock);
- desc = lchan->desc;
- if (desc) {
- if (desc->cyclic) {
- vchan_cyclic_callback(&desc->vdesc);
- } else {
- desc->status = DMA_COMPLETE;
- vchan_cookie_complete(&desc->vdesc);
- ls2x_dma_start_transfer(lchan);
- }
-
- /* ls2x_dma_start_transfer() updates lchan->desc */
- if (!lchan->desc)
- ls2x_dma_write_cmd(lchan, LDMA_STOP);
- }
- spin_unlock(&lchan->vchan.lock);
-
- return IRQ_HANDLED;
-}
-
-static int ls2x_dma_chan_init(struct platform_device *pdev,
- struct ls2x_dma_priv *priv)
-{
- struct ls2x_dma_chan *lchan = &priv->lchan;
- struct device *dev = &pdev->dev;
- int ret;
-
- lchan->irq = platform_get_irq(pdev, 0);
- if (lchan->irq < 0)
- return lchan->irq;
-
- ret = devm_request_irq(dev, lchan->irq, ls2x_dma_isr, IRQF_TRIGGER_RISING,
- dev_name(&pdev->dev), lchan);
- if (ret)
- return ret;
-
- /* Initialize channels related values */
- INIT_LIST_HEAD(&priv->ddev.channels);
- lchan->vchan.desc_free = ls2x_dma_desc_free;
- vchan_init(&lchan->vchan, &priv->ddev);
-
- return 0;
-}
-
-/*
- * ls2x_dma_probe - Driver probe function
- * @pdev: Pointer to the platform_device structure
- *
- * Return: '0' on success and failure value on error
- */
-static int ls2x_dma_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct ls2x_dma_priv *priv;
- struct dma_device *ddev;
- int ret;
-
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- priv->regs = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(priv->regs))
- return dev_err_probe(dev, PTR_ERR(priv->regs),
- "devm_platform_ioremap_resource failed.\n");
-
- priv->dma_clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(priv->dma_clk))
- return dev_err_probe(dev, PTR_ERR(priv->dma_clk), "devm_clk_get failed.\n");
-
- ret = clk_prepare_enable(priv->dma_clk);
- if (ret)
- return dev_err_probe(dev, ret, "clk_prepare_enable failed.\n");
-
- ret = ls2x_dma_chan_init(pdev, priv);
- if (ret)
- goto disable_clk;
-
- ddev = &priv->ddev;
- ddev->dev = dev;
- dma_cap_zero(ddev->cap_mask);
- dma_cap_set(DMA_SLAVE, ddev->cap_mask);
- dma_cap_set(DMA_CYCLIC, ddev->cap_mask);
-
- ddev->device_alloc_chan_resources = ls2x_dma_alloc_chan_resources;
- ddev->device_free_chan_resources = ls2x_dma_free_chan_resources;
- ddev->device_tx_status = dma_cookie_status;
- ddev->device_issue_pending = ls2x_dma_issue_pending;
- ddev->device_prep_slave_sg = ls2x_dma_prep_slave_sg;
- ddev->device_prep_dma_cyclic = ls2x_dma_prep_dma_cyclic;
- ddev->device_config = ls2x_dma_slave_config;
- ddev->device_terminate_all = ls2x_dma_terminate_all;
- ddev->device_synchronize = ls2x_dma_synchronize;
- ddev->device_pause = ls2x_dma_pause;
- ddev->device_resume = ls2x_dma_resume;
-
- ddev->src_addr_widths = LDMA_SLAVE_BUSWIDTHS;
- ddev->dst_addr_widths = LDMA_SLAVE_BUSWIDTHS;
- ddev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
-
- ret = dma_async_device_register(&priv->ddev);
- if (ret < 0)
- goto disable_clk;
-
- ret = of_dma_controller_register(dev->of_node, of_dma_xlate_by_chan_id, priv);
- if (ret < 0)
- goto unregister_dmac;
-
- platform_set_drvdata(pdev, priv);
-
- dev_info(dev, "Loongson LS2X APB DMA driver registered successfully.\n");
- return 0;
-
-unregister_dmac:
- dma_async_device_unregister(&priv->ddev);
-disable_clk:
- clk_disable_unprepare(priv->dma_clk);
-
- return ret;
-}
-
-/*
- * ls2x_dma_remove - Driver remove function
- * @pdev: Pointer to the platform_device structure
- */
-static void ls2x_dma_remove(struct platform_device *pdev)
-{
- struct ls2x_dma_priv *priv = platform_get_drvdata(pdev);
-
- of_dma_controller_free(pdev->dev.of_node);
- dma_async_device_unregister(&priv->ddev);
- clk_disable_unprepare(priv->dma_clk);
-}
-
-static const struct of_device_id ls2x_dma_of_match_table[] = {
- { .compatible = "loongson,ls2k1000-apbdma" },
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, ls2x_dma_of_match_table);
-
-static struct platform_driver ls2x_dmac_driver = {
- .probe = ls2x_dma_probe,
- .remove_new = ls2x_dma_remove,
- .driver = {
- .name = "ls2x-apbdma",
- .of_match_table = ls2x_dma_of_match_table,
- },
-};
-module_platform_driver(ls2x_dmac_driver);
-
-MODULE_DESCRIPTION("Loongson LS2X APB DMA Controller driver");
-MODULE_AUTHOR("Loongson Technology Corporation Limited");
-MODULE_LICENSE("GPL");
diff --git a/drivers/dma/mcf-edma-main.c b/drivers/dma/mcf-edma-main.c
index 0c5862bf26f8..9e1c6400c77b 100644
--- a/drivers/dma/mcf-edma-main.c
+++ b/drivers/dma/mcf-edma-main.c
@@ -267,7 +267,7 @@ static struct platform_driver mcf_edma_driver = {
.name = "mcf-edma",
},
.probe = mcf_edma_probe,
- .remove_new = mcf_edma_remove,
+ .remove = mcf_edma_remove,
};
bool mcf_edma_filter_fn(struct dma_chan *chan, void *param)
diff --git a/drivers/dma/mediatek/mtk-cqdma.c b/drivers/dma/mediatek/mtk-cqdma.c
index b69eabf12a24..9f0c41ca7770 100644
--- a/drivers/dma/mediatek/mtk-cqdma.c
+++ b/drivers/dma/mediatek/mtk-cqdma.c
@@ -420,15 +420,11 @@ static struct virt_dma_desc *mtk_cqdma_find_active_desc(struct dma_chan *c,
{
struct mtk_cqdma_vchan *cvc = to_cqdma_vchan(c);
struct virt_dma_desc *vd;
- unsigned long flags;
- spin_lock_irqsave(&cvc->pc->lock, flags);
list_for_each_entry(vd, &cvc->pc->queue, node)
if (vd->tx.cookie == cookie) {
- spin_unlock_irqrestore(&cvc->pc->lock, flags);
return vd;
}
- spin_unlock_irqrestore(&cvc->pc->lock, flags);
list_for_each_entry(vd, &cvc->vc.desc_issued, node)
if (vd->tx.cookie == cookie)
@@ -452,9 +448,11 @@ static enum dma_status mtk_cqdma_tx_status(struct dma_chan *c,
if (ret == DMA_COMPLETE || !txstate)
return ret;
- spin_lock_irqsave(&cvc->vc.lock, flags);
+ spin_lock_irqsave(&cvc->pc->lock, flags);
+ spin_lock(&cvc->vc.lock);
vd = mtk_cqdma_find_active_desc(c, cookie);
- spin_unlock_irqrestore(&cvc->vc.lock, flags);
+ spin_unlock(&cvc->vc.lock);
+ spin_unlock_irqrestore(&cvc->pc->lock, flags);
if (vd) {
cvd = to_cqdma_vdesc(vd);
@@ -922,7 +920,7 @@ static void mtk_cqdma_remove(struct platform_device *pdev)
static struct platform_driver mtk_cqdma_driver = {
.probe = mtk_cqdma_probe,
- .remove_new = mtk_cqdma_remove,
+ .remove = mtk_cqdma_remove,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = mtk_cqdma_match,
diff --git a/drivers/dma/mediatek/mtk-hsdma.c b/drivers/dma/mediatek/mtk-hsdma.c
index 58c7961ab9ad..fa77bb24a430 100644
--- a/drivers/dma/mediatek/mtk-hsdma.c
+++ b/drivers/dma/mediatek/mtk-hsdma.c
@@ -1038,7 +1038,7 @@ static void mtk_hsdma_remove(struct platform_device *pdev)
static struct platform_driver mtk_hsdma_driver = {
.probe = mtk_hsdma_probe,
- .remove_new = mtk_hsdma_remove,
+ .remove = mtk_hsdma_remove,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = mtk_hsdma_match,
diff --git a/drivers/dma/mediatek/mtk-uart-apdma.c b/drivers/dma/mediatek/mtk-uart-apdma.c
index 1bdc1500be40..08e15177427b 100644
--- a/drivers/dma/mediatek/mtk-uart-apdma.c
+++ b/drivers/dma/mediatek/mtk-uart-apdma.c
@@ -637,7 +637,7 @@ static const struct dev_pm_ops mtk_uart_apdma_pm_ops = {
static struct platform_driver mtk_uart_apdma_driver = {
.probe = mtk_uart_apdma_probe,
- .remove_new = mtk_uart_apdma_remove,
+ .remove = mtk_uart_apdma_remove,
.driver = {
.name = KBUILD_MODNAME,
.pm = &mtk_uart_apdma_pm_ops,
diff --git a/drivers/dma/milbeaut-hdmac.c b/drivers/dma/milbeaut-hdmac.c
index 7b41c670970a..9a5ec247ed6d 100644
--- a/drivers/dma/milbeaut-hdmac.c
+++ b/drivers/dma/milbeaut-hdmac.c
@@ -571,7 +571,7 @@ MODULE_DEVICE_TABLE(of, milbeaut_hdmac_match);
static struct platform_driver milbeaut_hdmac_driver = {
.probe = milbeaut_hdmac_probe,
- .remove_new = milbeaut_hdmac_remove,
+ .remove = milbeaut_hdmac_remove,
.driver = {
.name = "milbeaut-m10v-hdmac",
.of_match_table = milbeaut_hdmac_match,
diff --git a/drivers/dma/milbeaut-xdmac.c b/drivers/dma/milbeaut-xdmac.c
index 2cce529b448e..58d4fd6df0bf 100644
--- a/drivers/dma/milbeaut-xdmac.c
+++ b/drivers/dma/milbeaut-xdmac.c
@@ -409,7 +409,7 @@ MODULE_DEVICE_TABLE(of, milbeaut_xdmac_match);
static struct platform_driver milbeaut_xdmac_driver = {
.probe = milbeaut_xdmac_probe,
- .remove_new = milbeaut_xdmac_remove,
+ .remove = milbeaut_xdmac_remove,
.driver = {
.name = "milbeaut-m10v-xdmac",
.of_match_table = milbeaut_xdmac_match,
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index 136fcaeff8dd..d07229a74886 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -15,6 +15,8 @@
#include <linux/device.h>
#include <linux/platform_data/mmp_dma.h>
#include <linux/dmapool.h>
+#include <linux/clk.h>
+#include <linux/reset.h>
#include <linux/of_dma.h>
#include <linux/of.h>
@@ -23,9 +25,12 @@
#define DCSR 0x0000
#define DALGN 0x00a0
#define DINT 0x00f0
-#define DDADR 0x0200
+#define DDADR(n) (0x0200 + ((n) << 4))
#define DSADR(n) (0x0204 + ((n) << 4))
#define DTADR(n) (0x0208 + ((n) << 4))
+#define DDADRH(n) (0x0300 + ((n) << 4))
+#define DSADRH(n) (0x0304 + ((n) << 4))
+#define DTADRH(n) (0x0308 + ((n) << 4))
#define DCMD 0x020c
#define DCSR_RUN BIT(31) /* Run Bit (read / write) */
@@ -42,6 +47,7 @@
#define DCSR_EORSTOPEN BIT(26) /* STOP on an EOR */
#define DCSR_SETCMPST BIT(25) /* Set Descriptor Compare Status */
#define DCSR_CLRCMPST BIT(24) /* Clear Descriptor Compare Status */
+#define DCSR_LPAEEN BIT(21) /* Long Physical Address Extension Enable */
#define DCSR_CMPST BIT(10) /* The Descriptor Compare Status */
#define DCSR_EORINTR BIT(9) /* The end of Receive */
@@ -74,6 +80,16 @@ struct mmp_pdma_desc_hw {
u32 dsadr; /* DSADR value for the current transfer */
u32 dtadr; /* DTADR value for the current transfer */
u32 dcmd; /* DCMD value for the current transfer */
+ /*
+ * The following 32-bit words are only used in the 64-bit, ie.
+ * LPAE (Long Physical Address Extension) mode.
+ * They are used to specify the high 32 bits of the descriptor's
+ * addresses.
+ */
+ u32 ddadrh; /* High 32-bit of DDADR */
+ u32 dsadrh; /* High 32-bit of DSADR */
+ u32 dtadrh; /* High 32-bit of DTADR */
+ u32 rsvd; /* reserved */
} __aligned(32);
struct mmp_pdma_desc_sw {
@@ -118,12 +134,55 @@ struct mmp_pdma_phy {
struct mmp_pdma_chan *vchan;
};
+/**
+ * struct mmp_pdma_ops - Operations for the MMP PDMA controller
+ *
+ * Hardware Register Operations (read/write hardware registers):
+ * @write_next_addr: Function to program address of next descriptor into
+ * DDADR/DDADRH
+ * @read_src_addr: Function to read the source address from DSADR/DSADRH
+ * @read_dst_addr: Function to read the destination address from DTADR/DTADRH
+ *
+ * Descriptor Memory Operations (manipulate descriptor structs in memory):
+ * @set_desc_next_addr: Function to set next descriptor address in descriptor
+ * @set_desc_src_addr: Function to set the source address in descriptor
+ * @set_desc_dst_addr: Function to set the destination address in descriptor
+ * @get_desc_src_addr: Function to get the source address from descriptor
+ * @get_desc_dst_addr: Function to get the destination address from descriptor
+ *
+ * Controller Configuration:
+ * @run_bits: Control bits in DCSR register for channel start/stop
+ * @dma_mask: DMA addressing capability of controller. 0 to use OF/platform
+ * settings, or explicit mask like DMA_BIT_MASK(32/64)
+ */
+struct mmp_pdma_ops {
+ /* Hardware Register Operations */
+ void (*write_next_addr)(struct mmp_pdma_phy *phy, dma_addr_t addr);
+ u64 (*read_src_addr)(struct mmp_pdma_phy *phy);
+ u64 (*read_dst_addr)(struct mmp_pdma_phy *phy);
+
+ /* Descriptor Memory Operations */
+ void (*set_desc_next_addr)(struct mmp_pdma_desc_hw *desc,
+ dma_addr_t addr);
+ void (*set_desc_src_addr)(struct mmp_pdma_desc_hw *desc,
+ dma_addr_t addr);
+ void (*set_desc_dst_addr)(struct mmp_pdma_desc_hw *desc,
+ dma_addr_t addr);
+ u64 (*get_desc_src_addr)(const struct mmp_pdma_desc_hw *desc);
+ u64 (*get_desc_dst_addr)(const struct mmp_pdma_desc_hw *desc);
+
+ /* Controller Configuration */
+ u32 run_bits;
+ u64 dma_mask;
+};
+
struct mmp_pdma_device {
int dma_channels;
void __iomem *base;
struct device *dev;
struct dma_device device;
struct mmp_pdma_phy *phy;
+ const struct mmp_pdma_ops *ops;
spinlock_t phy_lock; /* protect alloc/free phy channels */
};
@@ -136,24 +195,112 @@ struct mmp_pdma_device {
#define to_mmp_pdma_dev(dmadev) \
container_of(dmadev, struct mmp_pdma_device, device)
-static int mmp_pdma_config_write(struct dma_chan *dchan,
- struct dma_slave_config *cfg,
- enum dma_transfer_direction direction);
+/* For 32-bit PDMA */
+static void write_next_addr_32(struct mmp_pdma_phy *phy, dma_addr_t addr)
+{
+ writel(addr, phy->base + DDADR(phy->idx));
+}
+
+static u64 read_src_addr_32(struct mmp_pdma_phy *phy)
+{
+ return readl(phy->base + DSADR(phy->idx));
+}
+
+static u64 read_dst_addr_32(struct mmp_pdma_phy *phy)
+{
+ return readl(phy->base + DTADR(phy->idx));
+}
+
+static void set_desc_next_addr_32(struct mmp_pdma_desc_hw *desc, dma_addr_t addr)
+{
+ desc->ddadr = addr;
+}
+
+static void set_desc_src_addr_32(struct mmp_pdma_desc_hw *desc, dma_addr_t addr)
+{
+ desc->dsadr = addr;
+}
+
+static void set_desc_dst_addr_32(struct mmp_pdma_desc_hw *desc, dma_addr_t addr)
+{
+ desc->dtadr = addr;
+}
+
+static u64 get_desc_src_addr_32(const struct mmp_pdma_desc_hw *desc)
+{
+ return desc->dsadr;
+}
+
+static u64 get_desc_dst_addr_32(const struct mmp_pdma_desc_hw *desc)
+{
+ return desc->dtadr;
+}
+
+/* For 64-bit PDMA */
+static void write_next_addr_64(struct mmp_pdma_phy *phy, dma_addr_t addr)
+{
+ writel(lower_32_bits(addr), phy->base + DDADR(phy->idx));
+ writel(upper_32_bits(addr), phy->base + DDADRH(phy->idx));
+}
+
+static u64 read_src_addr_64(struct mmp_pdma_phy *phy)
+{
+ u32 low = readl(phy->base + DSADR(phy->idx));
+ u32 high = readl(phy->base + DSADRH(phy->idx));
+
+ return ((u64)high << 32) | low;
+}
-static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr)
+static u64 read_dst_addr_64(struct mmp_pdma_phy *phy)
{
- u32 reg = (phy->idx << 4) + DDADR;
+ u32 low = readl(phy->base + DTADR(phy->idx));
+ u32 high = readl(phy->base + DTADRH(phy->idx));
- writel(addr, phy->base + reg);
+ return ((u64)high << 32) | low;
}
+static void set_desc_next_addr_64(struct mmp_pdma_desc_hw *desc, dma_addr_t addr)
+{
+ desc->ddadr = lower_32_bits(addr);
+ desc->ddadrh = upper_32_bits(addr);
+}
+
+static void set_desc_src_addr_64(struct mmp_pdma_desc_hw *desc, dma_addr_t addr)
+{
+ desc->dsadr = lower_32_bits(addr);
+ desc->dsadrh = upper_32_bits(addr);
+}
+
+static void set_desc_dst_addr_64(struct mmp_pdma_desc_hw *desc, dma_addr_t addr)
+{
+ desc->dtadr = lower_32_bits(addr);
+ desc->dtadrh = upper_32_bits(addr);
+}
+
+static u64 get_desc_src_addr_64(const struct mmp_pdma_desc_hw *desc)
+{
+ return ((u64)desc->dsadrh << 32) | desc->dsadr;
+}
+
+static u64 get_desc_dst_addr_64(const struct mmp_pdma_desc_hw *desc)
+{
+ return ((u64)desc->dtadrh << 32) | desc->dtadr;
+}
+
+static int mmp_pdma_config_write(struct dma_chan *dchan,
+ struct dma_slave_config *cfg,
+ enum dma_transfer_direction direction);
+
static void enable_chan(struct mmp_pdma_phy *phy)
{
u32 reg, dalgn;
+ struct mmp_pdma_device *pdev;
if (!phy->vchan)
return;
+ pdev = to_mmp_pdma_dev(phy->vchan->chan.device);
+
reg = DRCMR(phy->vchan->drcmr);
writel(DRCMR_MAPVLD | phy->idx, phy->base + reg);
@@ -165,18 +312,29 @@ static void enable_chan(struct mmp_pdma_phy *phy)
writel(dalgn, phy->base + DALGN);
reg = (phy->idx << 2) + DCSR;
- writel(readl(phy->base + reg) | DCSR_RUN, phy->base + reg);
+ writel(readl(phy->base + reg) | pdev->ops->run_bits,
+ phy->base + reg);
}
static void disable_chan(struct mmp_pdma_phy *phy)
{
- u32 reg;
+ u32 reg, dcsr;
if (!phy)
return;
reg = (phy->idx << 2) + DCSR;
- writel(readl(phy->base + reg) & ~DCSR_RUN, phy->base + reg);
+ dcsr = readl(phy->base + reg);
+
+ if (phy->vchan) {
+ struct mmp_pdma_device *pdev;
+
+ pdev = to_mmp_pdma_dev(phy->vchan->chan.device);
+ writel(dcsr & ~pdev->ops->run_bits, phy->base + reg);
+ } else {
+ /* If no vchan, just clear the RUN bit */
+ writel(dcsr & ~DCSR_RUN, phy->base + reg);
+ }
}
static int clear_chan_irq(struct mmp_pdma_phy *phy)
@@ -295,6 +453,7 @@ static void mmp_pdma_free_phy(struct mmp_pdma_chan *pchan)
static void start_pending_queue(struct mmp_pdma_chan *chan)
{
struct mmp_pdma_desc_sw *desc;
+ struct mmp_pdma_device *pdev = to_mmp_pdma_dev(chan->chan.device);
/* still in running, irq will start the pending list */
if (!chan->idle) {
@@ -329,7 +488,7 @@ static void start_pending_queue(struct mmp_pdma_chan *chan)
* Program the descriptor's address into the DMA controller,
* then start the DMA transaction
*/
- set_desc(chan->phy, desc->async_tx.phys);
+ pdev->ops->write_next_addr(chan->phy, desc->async_tx.phys);
enable_chan(chan->phy);
chan->idle = false;
}
@@ -445,15 +604,14 @@ mmp_pdma_prep_memcpy(struct dma_chan *dchan,
size_t len, unsigned long flags)
{
struct mmp_pdma_chan *chan;
+ struct mmp_pdma_device *pdev;
struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
size_t copy = 0;
- if (!dchan)
- return NULL;
-
- if (!len)
+ if (!dchan || !len)
return NULL;
+ pdev = to_mmp_pdma_dev(dchan->device);
chan = to_mmp_pdma_chan(dchan);
chan->byte_align = false;
@@ -476,13 +634,14 @@ mmp_pdma_prep_memcpy(struct dma_chan *dchan,
chan->byte_align = true;
new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & copy);
- new->desc.dsadr = dma_src;
- new->desc.dtadr = dma_dst;
+ pdev->ops->set_desc_src_addr(&new->desc, dma_src);
+ pdev->ops->set_desc_dst_addr(&new->desc, dma_dst);
if (!first)
first = new;
else
- prev->desc.ddadr = new->async_tx.phys;
+ pdev->ops->set_desc_next_addr(&prev->desc,
+ new->async_tx.phys);
new->async_tx.cookie = 0;
async_tx_ack(&new->async_tx);
@@ -526,6 +685,7 @@ mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
unsigned long flags, void *context)
{
struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
+ struct mmp_pdma_device *pdev = to_mmp_pdma_dev(dchan->device);
struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new = NULL;
size_t len, avail;
struct scatterlist *sg;
@@ -557,17 +717,18 @@ mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & len);
if (dir == DMA_MEM_TO_DEV) {
- new->desc.dsadr = addr;
+ pdev->ops->set_desc_src_addr(&new->desc, addr);
new->desc.dtadr = chan->dev_addr;
} else {
new->desc.dsadr = chan->dev_addr;
- new->desc.dtadr = addr;
+ pdev->ops->set_desc_dst_addr(&new->desc, addr);
}
if (!first)
first = new;
else
- prev->desc.ddadr = new->async_tx.phys;
+ pdev->ops->set_desc_next_addr(&prev->desc,
+ new->async_tx.phys);
new->async_tx.cookie = 0;
async_tx_ack(&new->async_tx);
@@ -607,12 +768,15 @@ mmp_pdma_prep_dma_cyclic(struct dma_chan *dchan,
unsigned long flags)
{
struct mmp_pdma_chan *chan;
+ struct mmp_pdma_device *pdev;
struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
dma_addr_t dma_src, dma_dst;
if (!dchan || !len || !period_len)
return NULL;
+ pdev = to_mmp_pdma_dev(dchan->device);
+
/* the buffer length must be a multiple of period_len */
if (len % period_len != 0)
return NULL;
@@ -649,13 +813,14 @@ mmp_pdma_prep_dma_cyclic(struct dma_chan *dchan,
new->desc.dcmd = (chan->dcmd | DCMD_ENDIRQEN |
(DCMD_LENGTH & period_len));
- new->desc.dsadr = dma_src;
- new->desc.dtadr = dma_dst;
+ pdev->ops->set_desc_src_addr(&new->desc, dma_src);
+ pdev->ops->set_desc_dst_addr(&new->desc, dma_dst);
if (!first)
first = new;
else
- prev->desc.ddadr = new->async_tx.phys;
+ pdev->ops->set_desc_next_addr(&prev->desc,
+ new->async_tx.phys);
new->async_tx.cookie = 0;
async_tx_ack(&new->async_tx);
@@ -676,7 +841,7 @@ mmp_pdma_prep_dma_cyclic(struct dma_chan *dchan,
first->async_tx.cookie = -EBUSY;
/* make the cyclic link */
- new->desc.ddadr = first->async_tx.phys;
+ pdev->ops->set_desc_next_addr(&new->desc, first->async_tx.phys);
chan->cyclic_first = first;
return &first->async_tx;
@@ -762,7 +927,9 @@ static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan,
dma_cookie_t cookie)
{
struct mmp_pdma_desc_sw *sw;
- u32 curr, residue = 0;
+ struct mmp_pdma_device *pdev = to_mmp_pdma_dev(chan->chan.device);
+ u64 curr;
+ u32 residue = 0;
bool passed = false;
bool cyclic = chan->cyclic_first != NULL;
@@ -774,17 +941,18 @@ static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan,
return 0;
if (chan->dir == DMA_DEV_TO_MEM)
- curr = readl(chan->phy->base + DTADR(chan->phy->idx));
+ curr = pdev->ops->read_dst_addr(chan->phy);
else
- curr = readl(chan->phy->base + DSADR(chan->phy->idx));
+ curr = pdev->ops->read_src_addr(chan->phy);
list_for_each_entry(sw, &chan->chain_running, node) {
- u32 start, end, len;
+ u64 start, end;
+ u32 len;
if (chan->dir == DMA_DEV_TO_MEM)
- start = sw->desc.dtadr;
+ start = pdev->ops->get_desc_dst_addr(&sw->desc);
else
- start = sw->desc.dsadr;
+ start = pdev->ops->get_desc_src_addr(&sw->desc);
len = sw->desc.dcmd & DCMD_LENGTH;
end = start + len;
@@ -800,7 +968,7 @@ static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan,
if (passed) {
residue += len;
} else if (curr >= start && curr <= end) {
- residue += end - curr;
+ residue += (u32)(end - curr);
passed = true;
}
@@ -994,9 +1162,42 @@ static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, int idx, int irq)
return 0;
}
+static const struct mmp_pdma_ops marvell_pdma_v1_ops = {
+ .write_next_addr = write_next_addr_32,
+ .read_src_addr = read_src_addr_32,
+ .read_dst_addr = read_dst_addr_32,
+ .set_desc_next_addr = set_desc_next_addr_32,
+ .set_desc_src_addr = set_desc_src_addr_32,
+ .set_desc_dst_addr = set_desc_dst_addr_32,
+ .get_desc_src_addr = get_desc_src_addr_32,
+ .get_desc_dst_addr = get_desc_dst_addr_32,
+ .run_bits = (DCSR_RUN),
+ .dma_mask = 0, /* let OF/platform set DMA mask */
+};
+
+static const struct mmp_pdma_ops spacemit_k1_pdma_ops = {
+ .write_next_addr = write_next_addr_64,
+ .read_src_addr = read_src_addr_64,
+ .read_dst_addr = read_dst_addr_64,
+ .set_desc_next_addr = set_desc_next_addr_64,
+ .set_desc_src_addr = set_desc_src_addr_64,
+ .set_desc_dst_addr = set_desc_dst_addr_64,
+ .get_desc_src_addr = get_desc_src_addr_64,
+ .get_desc_dst_addr = get_desc_dst_addr_64,
+ .run_bits = (DCSR_RUN | DCSR_LPAEEN),
+ .dma_mask = DMA_BIT_MASK(64), /* force 64-bit DMA addr capability */
+};
+
static const struct of_device_id mmp_pdma_dt_ids[] = {
- { .compatible = "marvell,pdma-1.0", },
- {}
+ {
+ .compatible = "marvell,pdma-1.0",
+ .data = &marvell_pdma_v1_ops
+ }, {
+ .compatible = "spacemit,k1-pdma",
+ .data = &spacemit_k1_pdma_ops
+ }, {
+ /* sentinel */
+ }
};
MODULE_DEVICE_TABLE(of, mmp_pdma_dt_ids);
@@ -1019,6 +1220,8 @@ static int mmp_pdma_probe(struct platform_device *op)
{
struct mmp_pdma_device *pdev;
struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
+ struct clk *clk;
+ struct reset_control *rst;
int i, ret, irq = 0;
int dma_channels = 0, irq_num = 0;
const enum dma_slave_buswidth widths =
@@ -1037,6 +1240,19 @@ static int mmp_pdma_probe(struct platform_device *op)
if (IS_ERR(pdev->base))
return PTR_ERR(pdev->base);
+ clk = devm_clk_get_optional_enabled(pdev->dev, NULL);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ rst = devm_reset_control_get_optional_exclusive_deasserted(pdev->dev,
+ NULL);
+ if (IS_ERR(rst))
+ return PTR_ERR(rst);
+
+ pdev->ops = of_device_get_match_data(&op->dev);
+ if (!pdev->ops)
+ return -ENODEV;
+
if (pdev->dev->of_node) {
/* Parse new and deprecated dma-channels properties */
if (of_property_read_u32(pdev->dev->of_node, "dma-channels",
@@ -1098,7 +1314,10 @@ static int mmp_pdma_probe(struct platform_device *op)
pdev->device.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
pdev->device.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
- if (pdev->dev->coherent_dma_mask)
+ /* Set DMA mask based on ops->dma_mask, or OF/platform */
+ if (pdev->ops->dma_mask)
+ dma_set_mask(pdev->dev, pdev->ops->dma_mask);
+ else if (pdev->dev->coherent_dma_mask)
dma_set_mask(pdev->dev, pdev->dev->coherent_dma_mask);
else
dma_set_mask(pdev->dev, DMA_BIT_MASK(64));
@@ -1137,7 +1356,7 @@ static struct platform_driver mmp_pdma_driver = {
},
.id_table = mmp_pdma_id_table,
.probe = mmp_pdma_probe,
- .remove_new = mmp_pdma_remove,
+ .remove = mmp_pdma_remove,
};
module_platform_driver(mmp_pdma_driver);
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index b76fe99e1151..b7fb843c67a6 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -641,7 +641,7 @@ static int mmp_tdma_probe(struct platform_device *pdev)
int chan_num = TDMA_CHANNEL_NUM;
struct gen_pool *pool = NULL;
- type = (enum mmp_tdma_type)device_get_match_data(&pdev->dev);
+ type = (kernel_ulong_t)device_get_match_data(&pdev->dev);
/* always have couple channels */
tdev = devm_kzalloc(&pdev->dev, sizeof(*tdev), GFP_KERNEL);
@@ -736,7 +736,7 @@ static struct platform_driver mmp_tdma_driver = {
.of_match_table = mmp_tdma_dt_ids,
},
.probe = mmp_tdma_probe,
- .remove_new = mmp_tdma_remove,
+ .remove = mmp_tdma_remove,
};
module_platform_driver(mmp_tdma_driver);
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
index 66dc6d31b603..de09e1ab7767 100644
--- a/drivers/dma/moxart-dma.c
+++ b/drivers/dma/moxart-dma.c
@@ -644,7 +644,7 @@ MODULE_DEVICE_TABLE(of, moxart_dma_match);
static struct platform_driver moxart_driver = {
.probe = moxart_probe,
- .remove_new = moxart_remove,
+ .remove = moxart_remove,
.driver = {
.name = "moxart-dma-engine",
.of_match_table = moxart_dma_match,
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index 68c247a46321..bf131cb5db66 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -1110,7 +1110,7 @@ MODULE_DEVICE_TABLE(of, mpc_dma_match);
static struct platform_driver mpc_dma_driver = {
.probe = mpc_dma_probe,
- .remove_new = mpc_dma_remove,
+ .remove = mpc_dma_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = mpc_dma_match,
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 43efce77bb57..5e8386296046 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -1013,7 +1013,7 @@ static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
dma_async_device_unregister(&mv_chan->dmadev);
- dma_free_coherent(dev, MV_XOR_POOL_SIZE,
+ dma_free_wc(dev, MV_XOR_POOL_SIZE,
mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
dma_unmap_single(dev, mv_chan->dummy_src_addr,
MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
@@ -1061,8 +1061,16 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
*/
mv_chan->dummy_src_addr = dma_map_single(dma_dev->dev,
mv_chan->dummy_src, MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dma_dev->dev, mv_chan->dummy_src_addr))
+ return ERR_PTR(-ENOMEM);
+
mv_chan->dummy_dst_addr = dma_map_single(dma_dev->dev,
mv_chan->dummy_dst, MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
+ if (dma_mapping_error(dma_dev->dev, mv_chan->dummy_dst_addr)) {
+ ret = -ENOMEM;
+ goto err_unmap_src;
+ }
+
/* allocate coherent memory for hardware descriptors
* note: writecombine gives slightly better performance, but
@@ -1071,8 +1079,10 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
mv_chan->dma_desc_pool_virt =
dma_alloc_wc(&pdev->dev, MV_XOR_POOL_SIZE, &mv_chan->dma_desc_pool,
GFP_KERNEL);
- if (!mv_chan->dma_desc_pool_virt)
- return ERR_PTR(-ENOMEM);
+ if (!mv_chan->dma_desc_pool_virt) {
+ ret = -ENOMEM;
+ goto err_unmap_dst;
+ }
/* discover transaction capabilities from the platform data */
dma_dev->cap_mask = cap_mask;
@@ -1153,8 +1163,15 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
err_free_irq:
free_irq(mv_chan->irq, mv_chan);
err_free_dma:
- dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE,
+ dma_free_wc(&pdev->dev, MV_XOR_POOL_SIZE,
mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
+err_unmap_dst:
+ dma_unmap_single(dma_dev->dev, mv_chan->dummy_dst_addr,
+ MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
+err_unmap_src:
+ dma_unmap_single(dma_dev->dev, mv_chan->dummy_src_addr,
+ MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
+
return ERR_PTR(ret);
}
@@ -1369,10 +1386,9 @@ static int mv_xor_probe(struct platform_device *pdev)
return 0;
if (pdev->dev.of_node) {
- struct device_node *np;
int i = 0;
- for_each_child_of_node(pdev->dev.of_node, np) {
+ for_each_child_of_node_scoped(pdev->dev.of_node, np) {
struct mv_xor_chan *chan;
dma_cap_mask_t cap_mask;
int irq;
diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c
index c8c67f4d982c..cad4d4fb51ac 100644
--- a/drivers/dma/mv_xor_v2.c
+++ b/drivers/dma/mv_xor_v2.c
@@ -635,7 +635,7 @@ static int mv_xor_v2_descq_init(struct mv_xor_v2_device *xor_dev)
writel(MV_XOR_V2_DESC_NUM,
xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_SIZE_OFF);
- /* write the DESQ address to the DMA enngine*/
+ /* write the DESQ address to the DMA engine*/
writel(lower_32_bits(xor_dev->hw_desq),
xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BALR_OFF);
writel(upper_32_bits(xor_dev->hw_desq),
@@ -884,7 +884,7 @@ static struct platform_driver mv_xor_v2_driver = {
.probe = mv_xor_v2_probe,
.suspend = mv_xor_v2_suspend,
.resume = mv_xor_v2_resume,
- .remove_new = mv_xor_v2_remove,
+ .remove = mv_xor_v2_remove,
.driver = {
.name = "mv_xor_v2",
.of_match_table = of_match_ptr(mv_xor_v2_dt_ids),
diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c
index 3b011a91d48e..765462303de0 100644
--- a/drivers/dma/nbpfaxi.c
+++ b/drivers/dma/nbpfaxi.c
@@ -711,6 +711,9 @@ static int nbpf_desc_page_alloc(struct nbpf_channel *chan)
list_add_tail(&ldesc->node, &lhead);
ldesc->hwdesc_dma_addr = dma_map_single(dchan->device->dev,
hwdesc, sizeof(*hwdesc), DMA_TO_DEVICE);
+ if (dma_mapping_error(dchan->device->dev,
+ ldesc->hwdesc_dma_addr))
+ goto unmap_error;
dev_dbg(dev, "%s(): mapped 0x%p to %pad\n", __func__,
hwdesc, &ldesc->hwdesc_dma_addr);
@@ -737,6 +740,16 @@ static int nbpf_desc_page_alloc(struct nbpf_channel *chan)
spin_unlock_irq(&chan->lock);
return ARRAY_SIZE(dpage->desc);
+
+unmap_error:
+ while (i--) {
+ ldesc--; hwdesc--;
+
+ dma_unmap_single(dchan->device->dev, ldesc->hwdesc_dma_addr,
+ sizeof(hwdesc), DMA_TO_DEVICE);
+ }
+
+ return -ENOMEM;
}
static void nbpf_desc_put(struct nbpf_desc *desc)
@@ -1351,7 +1364,7 @@ static int nbpf_probe(struct platform_device *pdev)
if (irqs == 1) {
eirq = irqbuf[0];
- for (i = 0; i <= num_channels; i++)
+ for (i = 0; i < num_channels; i++)
nbpf->chan[i].irq = irqbuf[0];
} else {
eirq = platform_get_irq_byname(pdev, "error");
@@ -1361,16 +1374,15 @@ static int nbpf_probe(struct platform_device *pdev)
if (irqs == num_channels + 1) {
struct nbpf_channel *chan;
- for (i = 0, chan = nbpf->chan; i <= num_channels;
+ for (i = 0, chan = nbpf->chan; i < num_channels;
i++, chan++) {
/* Skip the error IRQ */
if (irqbuf[i] == eirq)
i++;
+ if (i >= ARRAY_SIZE(irqbuf))
+ return -EINVAL;
chan->irq = irqbuf[i];
}
-
- if (chan != nbpf->chan + num_channels)
- return -EINVAL;
} else {
/* 2 IRQs and more than one channel */
if (irqbuf[0] == eirq)
@@ -1378,7 +1390,7 @@ static int nbpf_probe(struct platform_device *pdev)
else
irq = irqbuf[0];
- for (i = 0; i <= num_channels; i++)
+ for (i = 0; i < num_channels; i++)
nbpf->chan[i].irq = irq;
}
}
@@ -1515,7 +1527,7 @@ static struct platform_driver nbpf_driver = {
},
.id_table = nbpf_ids,
.probe = nbpf_probe,
- .remove_new = nbpf_remove,
+ .remove = nbpf_remove,
};
module_platform_driver(nbpf_driver);
diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c
index aa436f9e3571..57cec757d8f5 100644
--- a/drivers/dma/owl-dma.c
+++ b/drivers/dma/owl-dma.c
@@ -1252,7 +1252,7 @@ static void owl_dma_remove(struct platform_device *pdev)
static struct platform_driver owl_dma_driver = {
.probe = owl_dma_probe,
- .remove_new = owl_dma_remove,
+ .remove = owl_dma_remove,
.driver = {
.name = "dma-owl",
.of_match_table = of_match_ptr(owl_dma_match),
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index 7b78759ac734..61500ad7c850 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -874,7 +874,7 @@ static int ppc440spe_dma2_pq_slot_count(dma_addr_t *srcs,
pr_err("%s: src_cnt=%d, state=%d, addr_count=%d, order=%lld\n",
__func__, src_cnt, state, addr_count, order);
for (i = 0; i < src_cnt; i++)
- pr_err("\t[%d] 0x%llx \n", i, srcs[i]);
+ pr_err("\t[%d] 0x%llx\n", i, srcs[i]);
BUG();
}
@@ -3636,7 +3636,7 @@ static void ppc440spe_adma_issue_pending(struct dma_chan *chan)
ppc440spe_chan = to_ppc440spe_adma_chan(chan);
dev_dbg(ppc440spe_chan->device->common.dev,
- "ppc440spe adma%d: %s %d \n", ppc440spe_chan->device->id,
+ "ppc440spe adma%d: %s %d\n", ppc440spe_chan->device->id,
__func__, ppc440spe_chan->pending);
if (ppc440spe_chan->pending) {
@@ -4549,7 +4549,7 @@ MODULE_DEVICE_TABLE(of, ppc440spe_adma_of_match);
static struct platform_driver ppc440spe_adma_driver = {
.probe = ppc440spe_adma_probe,
- .remove_new = ppc440spe_adma_remove,
+ .remove = ppc440spe_adma_remove,
.driver = {
.name = "PPC440SP(E)-ADMA",
.of_match_table = ppc440spe_adma_of_match,
diff --git a/drivers/dma/ptdma/Kconfig b/drivers/dma/ptdma/Kconfig
deleted file mode 100644
index b430edd709f9..000000000000
--- a/drivers/dma/ptdma/Kconfig
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-config AMD_PTDMA
- tristate "AMD PassThru DMA Engine"
- depends on X86_64 && PCI
- select DMA_ENGINE
- select DMA_VIRTUAL_CHANNELS
- help
- Enable support for the AMD PTDMA controller. This controller
- provides DMA capabilities to perform high bandwidth memory to
- memory and IO copy operations. It performs DMA transfer through
- queue-based descriptor management. This DMA controller is intended
- to be used with AMD Non-Transparent Bridge devices and not for
- general purpose peripheral DMA.
diff --git a/drivers/dma/ptdma/ptdma-debugfs.c b/drivers/dma/ptdma/ptdma-debugfs.c
deleted file mode 100644
index c8307d3044a3..000000000000
--- a/drivers/dma/ptdma/ptdma-debugfs.c
+++ /dev/null
@@ -1,106 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * AMD Passthrough DMA device driver
- * -- Based on the CCP driver
- *
- * Copyright (C) 2016,2021 Advanced Micro Devices, Inc.
- *
- * Author: Sanjay R Mehta <sanju.mehta@amd.com>
- * Author: Gary R Hook <gary.hook@amd.com>
- */
-
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-
-#include "ptdma.h"
-
-/* DebugFS helpers */
-#define RI_VERSION_NUM 0x0000003F
-
-#define RI_NUM_VQM 0x00078000
-#define RI_NVQM_SHIFT 15
-
-static int pt_debugfs_info_show(struct seq_file *s, void *p)
-{
- struct pt_device *pt = s->private;
- unsigned int regval;
-
- seq_printf(s, "Device name: %s\n", dev_name(pt->dev));
- seq_printf(s, " # Queues: %d\n", 1);
- seq_printf(s, " # Cmds: %d\n", pt->cmd_count);
-
- regval = ioread32(pt->io_regs + CMD_PT_VERSION);
-
- seq_printf(s, " Version: %d\n", regval & RI_VERSION_NUM);
- seq_puts(s, " Engines:");
- seq_puts(s, "\n");
- seq_printf(s, " Queues: %d\n", (regval & RI_NUM_VQM) >> RI_NVQM_SHIFT);
-
- return 0;
-}
-
-/*
- * Return a formatted buffer containing the current
- * statistics of queue for PTDMA
- */
-static int pt_debugfs_stats_show(struct seq_file *s, void *p)
-{
- struct pt_device *pt = s->private;
-
- seq_printf(s, "Total Interrupts Handled: %ld\n", pt->total_interrupts);
-
- return 0;
-}
-
-static int pt_debugfs_queue_show(struct seq_file *s, void *p)
-{
- struct pt_cmd_queue *cmd_q = s->private;
- unsigned int regval;
-
- if (!cmd_q)
- return 0;
-
- seq_printf(s, " Pass-Thru: %ld\n", cmd_q->total_pt_ops);
-
- regval = ioread32(cmd_q->reg_control + 0x000C);
-
- seq_puts(s, " Enabled Interrupts:");
- if (regval & INT_EMPTY_QUEUE)
- seq_puts(s, " EMPTY");
- if (regval & INT_QUEUE_STOPPED)
- seq_puts(s, " STOPPED");
- if (regval & INT_ERROR)
- seq_puts(s, " ERROR");
- if (regval & INT_COMPLETION)
- seq_puts(s, " COMPLETION");
- seq_puts(s, "\n");
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(pt_debugfs_info);
-DEFINE_SHOW_ATTRIBUTE(pt_debugfs_queue);
-DEFINE_SHOW_ATTRIBUTE(pt_debugfs_stats);
-
-void ptdma_debugfs_setup(struct pt_device *pt)
-{
- struct pt_cmd_queue *cmd_q;
- struct dentry *debugfs_q_instance;
-
- if (!debugfs_initialized())
- return;
-
- debugfs_create_file("info", 0400, pt->dma_dev.dbg_dev_root, pt,
- &pt_debugfs_info_fops);
-
- debugfs_create_file("stats", 0400, pt->dma_dev.dbg_dev_root, pt,
- &pt_debugfs_stats_fops);
-
- cmd_q = &pt->cmd_q;
-
- debugfs_q_instance =
- debugfs_create_dir("q", pt->dma_dev.dbg_dev_root);
-
- debugfs_create_file("stats", 0400, debugfs_q_instance, cmd_q,
- &pt_debugfs_queue_fops);
-}
diff --git a/drivers/dma/ptdma/ptdma-dmaengine.c b/drivers/dma/ptdma/ptdma-dmaengine.c
deleted file mode 100644
index f79240734807..000000000000
--- a/drivers/dma/ptdma/ptdma-dmaengine.c
+++ /dev/null
@@ -1,411 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * AMD Passthrough DMA device driver
- * -- Based on the CCP driver
- *
- * Copyright (C) 2016,2021 Advanced Micro Devices, Inc.
- *
- * Author: Sanjay R Mehta <sanju.mehta@amd.com>
- * Author: Gary R Hook <gary.hook@amd.com>
- */
-
-#include "ptdma.h"
-#include "../dmaengine.h"
-#include "../virt-dma.h"
-
-static inline struct pt_dma_chan *to_pt_chan(struct dma_chan *dma_chan)
-{
- return container_of(dma_chan, struct pt_dma_chan, vc.chan);
-}
-
-static inline struct pt_dma_desc *to_pt_desc(struct virt_dma_desc *vd)
-{
- return container_of(vd, struct pt_dma_desc, vd);
-}
-
-static void pt_free_chan_resources(struct dma_chan *dma_chan)
-{
- struct pt_dma_chan *chan = to_pt_chan(dma_chan);
-
- vchan_free_chan_resources(&chan->vc);
-}
-
-static void pt_synchronize(struct dma_chan *dma_chan)
-{
- struct pt_dma_chan *chan = to_pt_chan(dma_chan);
-
- vchan_synchronize(&chan->vc);
-}
-
-static void pt_do_cleanup(struct virt_dma_desc *vd)
-{
- struct pt_dma_desc *desc = to_pt_desc(vd);
- struct pt_device *pt = desc->pt;
-
- kmem_cache_free(pt->dma_desc_cache, desc);
-}
-
-static int pt_dma_start_desc(struct pt_dma_desc *desc)
-{
- struct pt_passthru_engine *pt_engine;
- struct pt_device *pt;
- struct pt_cmd *pt_cmd;
- struct pt_cmd_queue *cmd_q;
-
- desc->issued_to_hw = 1;
-
- pt_cmd = &desc->pt_cmd;
- pt = pt_cmd->pt;
- cmd_q = &pt->cmd_q;
- pt_engine = &pt_cmd->passthru;
-
- pt->tdata.cmd = pt_cmd;
-
- /* Execute the command */
- pt_cmd->ret = pt_core_perform_passthru(cmd_q, pt_engine);
-
- return 0;
-}
-
-static struct pt_dma_desc *pt_next_dma_desc(struct pt_dma_chan *chan)
-{
- /* Get the next DMA descriptor on the active list */
- struct virt_dma_desc *vd = vchan_next_desc(&chan->vc);
-
- return vd ? to_pt_desc(vd) : NULL;
-}
-
-static struct pt_dma_desc *pt_handle_active_desc(struct pt_dma_chan *chan,
- struct pt_dma_desc *desc)
-{
- struct dma_async_tx_descriptor *tx_desc;
- struct virt_dma_desc *vd;
- unsigned long flags;
-
- /* Loop over descriptors until one is found with commands */
- do {
- if (desc) {
- if (!desc->issued_to_hw) {
- /* No errors, keep going */
- if (desc->status != DMA_ERROR)
- return desc;
- }
-
- tx_desc = &desc->vd.tx;
- vd = &desc->vd;
- } else {
- tx_desc = NULL;
- }
-
- spin_lock_irqsave(&chan->vc.lock, flags);
-
- if (desc) {
- if (desc->status != DMA_COMPLETE) {
- if (desc->status != DMA_ERROR)
- desc->status = DMA_COMPLETE;
-
- dma_cookie_complete(tx_desc);
- dma_descriptor_unmap(tx_desc);
- list_del(&desc->vd.node);
- } else {
- /* Don't handle it twice */
- tx_desc = NULL;
- }
- }
-
- desc = pt_next_dma_desc(chan);
-
- spin_unlock_irqrestore(&chan->vc.lock, flags);
-
- if (tx_desc) {
- dmaengine_desc_get_callback_invoke(tx_desc, NULL);
- dma_run_dependencies(tx_desc);
- vchan_vdesc_fini(vd);
- }
- } while (desc);
-
- return NULL;
-}
-
-static void pt_cmd_callback(void *data, int err)
-{
- struct pt_dma_desc *desc = data;
- struct dma_chan *dma_chan;
- struct pt_dma_chan *chan;
- int ret;
-
- if (err == -EINPROGRESS)
- return;
-
- dma_chan = desc->vd.tx.chan;
- chan = to_pt_chan(dma_chan);
-
- if (err)
- desc->status = DMA_ERROR;
-
- while (true) {
- /* Check for DMA descriptor completion */
- desc = pt_handle_active_desc(chan, desc);
-
- /* Don't submit cmd if no descriptor or DMA is paused */
- if (!desc)
- break;
-
- ret = pt_dma_start_desc(desc);
- if (!ret)
- break;
-
- desc->status = DMA_ERROR;
- }
-}
-
-static struct pt_dma_desc *pt_alloc_dma_desc(struct pt_dma_chan *chan,
- unsigned long flags)
-{
- struct pt_dma_desc *desc;
-
- desc = kmem_cache_zalloc(chan->pt->dma_desc_cache, GFP_NOWAIT);
- if (!desc)
- return NULL;
-
- vchan_tx_prep(&chan->vc, &desc->vd, flags);
-
- desc->pt = chan->pt;
- desc->pt->cmd_q.int_en = !!(flags & DMA_PREP_INTERRUPT);
- desc->issued_to_hw = 0;
- desc->status = DMA_IN_PROGRESS;
-
- return desc;
-}
-
-static struct pt_dma_desc *pt_create_desc(struct dma_chan *dma_chan,
- dma_addr_t dst,
- dma_addr_t src,
- unsigned int len,
- unsigned long flags)
-{
- struct pt_dma_chan *chan = to_pt_chan(dma_chan);
- struct pt_passthru_engine *pt_engine;
- struct pt_dma_desc *desc;
- struct pt_cmd *pt_cmd;
-
- desc = pt_alloc_dma_desc(chan, flags);
- if (!desc)
- return NULL;
-
- pt_cmd = &desc->pt_cmd;
- pt_cmd->pt = chan->pt;
- pt_engine = &pt_cmd->passthru;
- pt_cmd->engine = PT_ENGINE_PASSTHRU;
- pt_engine->src_dma = src;
- pt_engine->dst_dma = dst;
- pt_engine->src_len = len;
- pt_cmd->pt_cmd_callback = pt_cmd_callback;
- pt_cmd->data = desc;
-
- desc->len = len;
-
- return desc;
-}
-
-static struct dma_async_tx_descriptor *
-pt_prep_dma_memcpy(struct dma_chan *dma_chan, dma_addr_t dst,
- dma_addr_t src, size_t len, unsigned long flags)
-{
- struct pt_dma_desc *desc;
-
- desc = pt_create_desc(dma_chan, dst, src, len, flags);
- if (!desc)
- return NULL;
-
- return &desc->vd.tx;
-}
-
-static struct dma_async_tx_descriptor *
-pt_prep_dma_interrupt(struct dma_chan *dma_chan, unsigned long flags)
-{
- struct pt_dma_chan *chan = to_pt_chan(dma_chan);
- struct pt_dma_desc *desc;
-
- desc = pt_alloc_dma_desc(chan, flags);
- if (!desc)
- return NULL;
-
- return &desc->vd.tx;
-}
-
-static void pt_issue_pending(struct dma_chan *dma_chan)
-{
- struct pt_dma_chan *chan = to_pt_chan(dma_chan);
- struct pt_dma_desc *desc;
- unsigned long flags;
- bool engine_is_idle = true;
-
- spin_lock_irqsave(&chan->vc.lock, flags);
-
- desc = pt_next_dma_desc(chan);
- if (desc)
- engine_is_idle = false;
-
- vchan_issue_pending(&chan->vc);
-
- desc = pt_next_dma_desc(chan);
-
- spin_unlock_irqrestore(&chan->vc.lock, flags);
-
- /* If there was nothing active, start processing */
- if (engine_is_idle && desc)
- pt_cmd_callback(desc, 0);
-}
-
-static enum dma_status
-pt_tx_status(struct dma_chan *c, dma_cookie_t cookie,
- struct dma_tx_state *txstate)
-{
- struct pt_device *pt = to_pt_chan(c)->pt;
- struct pt_cmd_queue *cmd_q = &pt->cmd_q;
-
- pt_check_status_trans(pt, cmd_q);
- return dma_cookie_status(c, cookie, txstate);
-}
-
-static int pt_pause(struct dma_chan *dma_chan)
-{
- struct pt_dma_chan *chan = to_pt_chan(dma_chan);
- unsigned long flags;
-
- spin_lock_irqsave(&chan->vc.lock, flags);
- pt_stop_queue(&chan->pt->cmd_q);
- spin_unlock_irqrestore(&chan->vc.lock, flags);
-
- return 0;
-}
-
-static int pt_resume(struct dma_chan *dma_chan)
-{
- struct pt_dma_chan *chan = to_pt_chan(dma_chan);
- struct pt_dma_desc *desc = NULL;
- unsigned long flags;
-
- spin_lock_irqsave(&chan->vc.lock, flags);
- pt_start_queue(&chan->pt->cmd_q);
- desc = pt_next_dma_desc(chan);
- spin_unlock_irqrestore(&chan->vc.lock, flags);
-
- /* If there was something active, re-start */
- if (desc)
- pt_cmd_callback(desc, 0);
-
- return 0;
-}
-
-static int pt_terminate_all(struct dma_chan *dma_chan)
-{
- struct pt_dma_chan *chan = to_pt_chan(dma_chan);
- unsigned long flags;
- struct pt_cmd_queue *cmd_q = &chan->pt->cmd_q;
- LIST_HEAD(head);
-
- iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_control + 0x0010);
- spin_lock_irqsave(&chan->vc.lock, flags);
- vchan_get_all_descriptors(&chan->vc, &head);
- spin_unlock_irqrestore(&chan->vc.lock, flags);
-
- vchan_dma_desc_free_list(&chan->vc, &head);
- vchan_free_chan_resources(&chan->vc);
-
- return 0;
-}
-
-int pt_dmaengine_register(struct pt_device *pt)
-{
- struct pt_dma_chan *chan;
- struct dma_device *dma_dev = &pt->dma_dev;
- char *cmd_cache_name;
- char *desc_cache_name;
- int ret;
-
- pt->pt_dma_chan = devm_kzalloc(pt->dev, sizeof(*pt->pt_dma_chan),
- GFP_KERNEL);
- if (!pt->pt_dma_chan)
- return -ENOMEM;
-
- cmd_cache_name = devm_kasprintf(pt->dev, GFP_KERNEL,
- "%s-dmaengine-cmd-cache",
- dev_name(pt->dev));
- if (!cmd_cache_name)
- return -ENOMEM;
-
- desc_cache_name = devm_kasprintf(pt->dev, GFP_KERNEL,
- "%s-dmaengine-desc-cache",
- dev_name(pt->dev));
- if (!desc_cache_name) {
- ret = -ENOMEM;
- goto err_cache;
- }
-
- pt->dma_desc_cache = kmem_cache_create(desc_cache_name,
- sizeof(struct pt_dma_desc), 0,
- SLAB_HWCACHE_ALIGN, NULL);
- if (!pt->dma_desc_cache) {
- ret = -ENOMEM;
- goto err_cache;
- }
-
- dma_dev->dev = pt->dev;
- dma_dev->src_addr_widths = DMA_SLAVE_BUSWIDTH_64_BYTES;
- dma_dev->dst_addr_widths = DMA_SLAVE_BUSWIDTH_64_BYTES;
- dma_dev->directions = DMA_MEM_TO_MEM;
- dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
- dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
- dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
-
- /*
- * PTDMA is intended to be used with the AMD NTB devices, hence
- * marking it as DMA_PRIVATE.
- */
- dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
-
- INIT_LIST_HEAD(&dma_dev->channels);
-
- chan = pt->pt_dma_chan;
- chan->pt = pt;
-
- /* Set base and prep routines */
- dma_dev->device_free_chan_resources = pt_free_chan_resources;
- dma_dev->device_prep_dma_memcpy = pt_prep_dma_memcpy;
- dma_dev->device_prep_dma_interrupt = pt_prep_dma_interrupt;
- dma_dev->device_issue_pending = pt_issue_pending;
- dma_dev->device_tx_status = pt_tx_status;
- dma_dev->device_pause = pt_pause;
- dma_dev->device_resume = pt_resume;
- dma_dev->device_terminate_all = pt_terminate_all;
- dma_dev->device_synchronize = pt_synchronize;
-
- chan->vc.desc_free = pt_do_cleanup;
- vchan_init(&chan->vc, dma_dev);
-
- ret = dma_async_device_register(dma_dev);
- if (ret)
- goto err_reg;
-
- return 0;
-
-err_reg:
- kmem_cache_destroy(pt->dma_desc_cache);
-
-err_cache:
- kmem_cache_destroy(pt->dma_cmd_cache);
-
- return ret;
-}
-
-void pt_dmaengine_unregister(struct pt_device *pt)
-{
- struct dma_device *dma_dev = &pt->dma_dev;
-
- dma_async_device_unregister(dma_dev);
-
- kmem_cache_destroy(pt->dma_desc_cache);
- kmem_cache_destroy(pt->dma_cmd_cache);
-}
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index 31f8da810c05..249296389771 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -10,6 +10,7 @@
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/dmaengine.h>
#include <linux/platform_device.h>
#include <linux/device.h>
@@ -277,8 +278,7 @@ static int chan_state_show(struct seq_file *s, void *p)
seq_printf(s, "\tPriority : %s\n",
str_prio[(phy->idx & 0xf) / 4]);
seq_printf(s, "\tUnaligned transfer bit: %s\n",
- _phy_readl_relaxed(phy, DALGN) & BIT(phy->idx) ?
- "yes" : "no");
+ str_yes_no(_phy_readl_relaxed(phy, DALGN) & BIT(phy->idx)));
seq_printf(s, "\tDCSR = %08x (%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s)\n",
dcsr, PXA_DCSR_STR(RUN), PXA_DCSR_STR(NODESC),
PXA_DCSR_STR(STOPIRQEN), PXA_DCSR_STR(EORIRQEN),
@@ -1442,7 +1442,7 @@ static struct platform_driver pxad_driver = {
},
.id_table = pxad_id_table,
.probe = pxad_probe,
- .remove_new = pxad_remove,
+ .remove = pxad_remove,
};
static bool pxad_filter_fn(struct dma_chan *chan, void *param)
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
index d43a881e43b9..2cf060174795 100644
--- a/drivers/dma/qcom/bam_dma.c
+++ b/drivers/dma/qcom/bam_dma.c
@@ -1283,13 +1283,17 @@ static int bam_dma_probe(struct platform_device *pdev)
if (!bdev->bamclk) {
ret = of_property_read_u32(pdev->dev.of_node, "num-channels",
&bdev->num_channels);
- if (ret)
+ if (ret) {
dev_err(bdev->dev, "num-channels unspecified in dt\n");
+ return ret;
+ }
ret = of_property_read_u32(pdev->dev.of_node, "qcom,num-ees",
&bdev->num_ees);
- if (ret)
+ if (ret) {
dev_err(bdev->dev, "num-ees unspecified in dt\n");
+ return ret;
+ }
}
ret = clk_prepare_enable(bdev->bamclk);
@@ -1469,7 +1473,7 @@ static const struct dev_pm_ops bam_dma_pm_ops = {
static struct platform_driver bam_dma_driver = {
.probe = bam_dma_probe,
- .remove_new = bam_dma_remove,
+ .remove = bam_dma_remove,
.driver = {
.name = "bam-dma-engine",
.pm = &bam_dma_pm_ops,
diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c
index 52a7c8f2498f..8e87738086b2 100644
--- a/drivers/dma/qcom/gpi.c
+++ b/drivers/dma/qcom/gpi.c
@@ -18,6 +18,7 @@
#include "../virt-dma.h"
#define TRE_TYPE_DMA 0x10
+#define TRE_TYPE_IMMEDIATE_DMA 0x11
#define TRE_TYPE_GO 0x20
#define TRE_TYPE_CONFIG0 0x22
@@ -64,6 +65,7 @@
/* DMA TRE */
#define TRE_DMA_LEN GENMASK(23, 0)
+#define TRE_DMA_IMMEDIATE_LEN GENMASK(3, 0)
/* Register offsets from gpi-top */
#define GPII_n_CH_k_CNTXT_0_OFFS(n, k) (0x20000 + (0x4000 * (n)) + (0x80 * (k)))
@@ -567,17 +569,6 @@ static inline void gpi_write_reg(struct gpii *gpii, void __iomem *addr, u32 val)
writel_relaxed(val, addr);
}
-/* gpi_write_reg_field - write to specific bit field */
-static inline void gpi_write_reg_field(struct gpii *gpii, void __iomem *addr,
- u32 mask, u32 shift, u32 val)
-{
- u32 tmp = gpi_read_reg(gpii, addr);
-
- tmp &= ~mask;
- val = tmp | ((val << shift) & mask);
- gpi_write_reg(gpii, addr, val);
-}
-
static __always_inline void
gpi_update_reg(struct gpii *gpii, u32 offset, u32 mask, u32 val)
{
@@ -1711,6 +1702,7 @@ static int gpi_create_spi_tre(struct gchan *chan, struct gpi_desc *desc,
dma_addr_t address;
struct gpi_tre *tre;
unsigned int i;
+ int len;
/* first create config tre if applicable */
if (direction == DMA_MEM_TO_DEV && spi->set_config) {
@@ -1763,14 +1755,30 @@ static int gpi_create_spi_tre(struct gchan *chan, struct gpi_desc *desc,
tre_idx++;
address = sg_dma_address(sgl);
- tre->dword[0] = lower_32_bits(address);
- tre->dword[1] = upper_32_bits(address);
+ len = sg_dma_len(sgl);
- tre->dword[2] = u32_encode_bits(sg_dma_len(sgl), TRE_DMA_LEN);
+ /* Support Immediate dma for write transfers for data length up to 8 bytes */
+ if (direction == DMA_MEM_TO_DEV && len <= 2 * sizeof(tre->dword[0])) {
+ /*
+ * For Immediate dma, data length may not always be length of 8 bytes,
+ * it can be length less than 8, hence initialize both dword's with 0
+ */
+ tre->dword[0] = 0;
+ tre->dword[1] = 0;
+ memcpy(&tre->dword[0], sg_virt(sgl), len);
- tre->dword[3] = u32_encode_bits(TRE_TYPE_DMA, TRE_FLAGS_TYPE);
- if (direction == DMA_MEM_TO_DEV)
- tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_IEOT);
+ tre->dword[2] = u32_encode_bits(len, TRE_DMA_IMMEDIATE_LEN);
+ tre->dword[3] = u32_encode_bits(TRE_TYPE_IMMEDIATE_DMA, TRE_FLAGS_TYPE);
+ } else {
+ tre->dword[0] = lower_32_bits(address);
+ tre->dword[1] = upper_32_bits(address);
+
+ tre->dword[2] = u32_encode_bits(len, TRE_DMA_LEN);
+ tre->dword[3] = u32_encode_bits(TRE_TYPE_DMA, TRE_FLAGS_TYPE);
+ }
+
+ tre->dword[3] |= u32_encode_bits(direction == DMA_MEM_TO_DEV,
+ TRE_FLAGS_IEOT);
for (i = 0; i < tre_idx; i++)
dev_dbg(dev, "TRE:%d %x:%x:%x:%x\n", i, desc->tre[i].dword[0],
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
index 4d2cd8d9ec74..c2b3e4452e71 100644
--- a/drivers/dma/qcom/hidma.c
+++ b/drivers/dma/qcom/hidma.c
@@ -948,7 +948,7 @@ MODULE_DEVICE_TABLE(acpi, hidma_acpi_ids);
static struct platform_driver hidma_driver = {
.probe = hidma_probe,
- .remove_new = hidma_remove,
+ .remove = hidma_remove,
.shutdown = hidma_shutdown,
.driver = {
.name = "hidma",
diff --git a/drivers/dma/qcom/qcom_adm.c b/drivers/dma/qcom/qcom_adm.c
index c1db398adc84..6be54fddcee1 100644
--- a/drivers/dma/qcom/qcom_adm.c
+++ b/drivers/dma/qcom/qcom_adm.c
@@ -937,7 +937,7 @@ MODULE_DEVICE_TABLE(of, adm_of_match);
static struct platform_driver adm_dma_driver = {
.probe = adm_dma_probe,
- .remove_new = adm_dma_remove,
+ .remove = adm_dma_remove,
.driver = {
.name = "adm-dma-engine",
.of_match_table = adm_of_match,
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c
index 01e656c69e6c..dc1a9a05252e 100644
--- a/drivers/dma/sa11x0-dma.c
+++ b/drivers/dma/sa11x0-dma.c
@@ -1079,7 +1079,7 @@ static struct platform_driver sa11x0_dma_driver = {
.pm = &sa11x0_dma_pm_ops,
},
.probe = sa11x0_dma_probe,
- .remove_new = sa11x0_dma_remove,
+ .remove = sa11x0_dma_remove,
};
static int __init sa11x0_dma_init(void)
diff --git a/drivers/dma/sf-pdma/sf-pdma.c b/drivers/dma/sf-pdma/sf-pdma.c
index 428473611115..7ad3c29be146 100644
--- a/drivers/dma/sf-pdma/sf-pdma.c
+++ b/drivers/dma/sf-pdma/sf-pdma.c
@@ -354,7 +354,7 @@ static irqreturn_t sf_pdma_done_isr(int irq, void *dev_id)
if (!residue) {
tasklet_hi_schedule(&chan->done_tasklet);
} else {
- /* submit next trascatioin if possible */
+ /* submit next transaction if possible */
struct sf_pdma_desc *desc = chan->desc;
desc->src_addr += desc->xfer_size - residue;
@@ -633,7 +633,7 @@ MODULE_DEVICE_TABLE(of, sf_pdma_dt_ids);
static struct platform_driver sf_pdma_driver = {
.probe = sf_pdma_probe,
- .remove_new = sf_pdma_remove,
+ .remove = sf_pdma_remove,
.driver = {
.name = "sf-pdma",
.of_match_table = sf_pdma_dt_ids,
diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig
index c0b2997ab7fd..8184d475a49a 100644
--- a/drivers/dma/sh/Kconfig
+++ b/drivers/dma/sh/Kconfig
@@ -16,7 +16,7 @@ config SH_DMAE_BASE
depends on SUPERH || COMPILE_TEST
depends on !SUPERH || SH_DMA
depends on !SH_DMA_API
- default y
+ default SUPERH || SH_DMA
select RENESAS_DMA
help
Enable support for the Renesas SuperH DMA controllers.
@@ -49,10 +49,10 @@ config RENESAS_USB_DMAC
SoCs.
config RZ_DMAC
- tristate "Renesas RZ/{G2L,V2L} DMA Controller"
- depends on ARCH_RZG2L || COMPILE_TEST
+ tristate "Renesas RZ DMA Controller"
+ depends on ARCH_R7S72100 || ARCH_RZG2L || COMPILE_TEST
select RENESAS_DMA
select DMA_VIRTUAL_CHANNELS
help
- This driver supports the general purpose DMA controller found in the
- Renesas RZ/{G2L,V2L} SoC variants.
+ This driver supports the general purpose DMA controller typically
+ found in the Renesas RZ SoC variants.
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index 1094a2f82164..0c45ce8c74aa 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -2023,6 +2023,10 @@ static const struct of_device_id rcar_dmac_of_ids[] = {
.compatible = "renesas,rcar-gen4-dmac",
.data = &rcar_gen4_dmac_data,
}, {
+ /*
+ * Backward compatibility for between v5.12 - v5.19
+ * which didn't combined with "renesas,rcar-gen4-dmac"
+ */
.compatible = "renesas,dmac-r8a779a0",
.data = &rcar_gen4_dmac_data,
},
@@ -2037,7 +2041,7 @@ static struct platform_driver rcar_dmac_driver = {
.of_match_table = rcar_dmac_of_ids,
},
.probe = rcar_dmac_probe,
- .remove_new = rcar_dmac_remove,
+ .remove = rcar_dmac_remove,
.shutdown = rcar_dmac_shutdown,
};
diff --git a/drivers/dma/sh/rz-dmac.c b/drivers/dma/sh/rz-dmac.c
index 65a27c5a7bce..1f687b08d6b8 100644
--- a/drivers/dma/sh/rz-dmac.c
+++ b/drivers/dma/sh/rz-dmac.c
@@ -14,6 +14,7 @@
#include <linux/dmaengine.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
+#include <linux/irqchip/irq-renesas-rzv2h.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -89,8 +90,14 @@ struct rz_dmac_chan {
#define to_rz_dmac_chan(c) container_of(c, struct rz_dmac_chan, vc.chan)
+struct rz_dmac_icu {
+ struct platform_device *pdev;
+ u8 dmac_index;
+};
+
struct rz_dmac {
struct dma_device engine;
+ struct rz_dmac_icu icu;
struct device *dev;
struct reset_control *rstc;
void __iomem *base;
@@ -99,6 +106,8 @@ struct rz_dmac {
unsigned int n_channels;
struct rz_dmac_chan *channels;
+ bool has_icu;
+
DECLARE_BITMAP(modules, 1024);
};
@@ -167,6 +176,9 @@ struct rz_dmac {
#define RZ_DMAC_MAX_CHANNELS 16
#define DMAC_NR_LMDESC 64
+/* RZ/V2H ICU related */
+#define RZV2H_MAX_DMAC_INDEX 4
+
/*
* -----------------------------------------------------------------------------
* Device access
@@ -324,7 +336,13 @@ static void rz_dmac_prepare_desc_for_memcpy(struct rz_dmac_chan *channel)
lmdesc->chext = 0;
lmdesc->header = HEADER_LV;
- rz_dmac_set_dmars_register(dmac, channel->index, 0);
+ if (dmac->has_icu) {
+ rzv2h_icu_register_dma_req(dmac->icu.pdev, dmac->icu.dmac_index,
+ channel->index,
+ RZV2H_ICU_DMAC_REQ_NO_DEFAULT);
+ } else {
+ rz_dmac_set_dmars_register(dmac, channel->index, 0);
+ }
channel->chcfg = chcfg;
channel->chctrl = CHCTRL_STG | CHCTRL_SETEN;
@@ -375,7 +393,13 @@ static void rz_dmac_prepare_descs_for_slave_sg(struct rz_dmac_chan *channel)
channel->lmdesc.tail = lmdesc;
- rz_dmac_set_dmars_register(dmac, channel->index, channel->mid_rid);
+ if (dmac->has_icu) {
+ rzv2h_icu_register_dma_req(dmac->icu.pdev, dmac->icu.dmac_index,
+ channel->index, channel->mid_rid);
+ } else {
+ rz_dmac_set_dmars_register(dmac, channel->index, channel->mid_rid);
+ }
+
channel->chctrl = CHCTRL_SETEN;
}
@@ -601,22 +625,25 @@ static int rz_dmac_config(struct dma_chan *chan,
struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
u32 val;
- channel->src_per_address = config->src_addr;
channel->dst_per_address = config->dst_addr;
-
- val = rz_dmac_ds_to_val_mapping(config->dst_addr_width);
- if (val == CHCFG_DS_INVALID)
- return -EINVAL;
-
channel->chcfg &= ~CHCFG_FILL_DDS_MASK;
- channel->chcfg |= FIELD_PREP(CHCFG_FILL_DDS_MASK, val);
+ if (channel->dst_per_address) {
+ val = rz_dmac_ds_to_val_mapping(config->dst_addr_width);
+ if (val == CHCFG_DS_INVALID)
+ return -EINVAL;
- val = rz_dmac_ds_to_val_mapping(config->src_addr_width);
- if (val == CHCFG_DS_INVALID)
- return -EINVAL;
+ channel->chcfg |= FIELD_PREP(CHCFG_FILL_DDS_MASK, val);
+ }
+ channel->src_per_address = config->src_addr;
channel->chcfg &= ~CHCFG_FILL_SDS_MASK;
- channel->chcfg |= FIELD_PREP(CHCFG_FILL_SDS_MASK, val);
+ if (channel->src_per_address) {
+ val = rz_dmac_ds_to_val_mapping(config->src_addr_width);
+ if (val == CHCFG_DS_INVALID)
+ return -EINVAL;
+
+ channel->chcfg |= FIELD_PREP(CHCFG_FILL_SDS_MASK, val);
+ }
return 0;
}
@@ -644,7 +671,13 @@ static void rz_dmac_device_synchronize(struct dma_chan *chan)
if (ret < 0)
dev_warn(dmac->dev, "DMA Timeout");
- rz_dmac_set_dmars_register(dmac, channel->index, 0);
+ if (dmac->has_icu) {
+ rzv2h_icu_register_dma_req(dmac->icu.pdev, dmac->icu.dmac_index,
+ channel->index,
+ RZV2H_ICU_DMAC_REQ_NO_DEFAULT);
+ } else {
+ rz_dmac_set_dmars_register(dmac, channel->index, 0);
+ }
}
/*
@@ -745,7 +778,8 @@ static struct dma_chan *rz_dmac_of_xlate(struct of_phandle_args *dma_spec,
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
- return dma_request_channel(mask, rz_dmac_chan_filter, dma_spec);
+ return __dma_request_channel(&mask, rz_dmac_chan_filter, dma_spec,
+ ofdma->of_node);
}
/*
@@ -820,6 +854,38 @@ static int rz_dmac_chan_probe(struct rz_dmac *dmac,
return 0;
}
+static int rz_dmac_parse_of_icu(struct device *dev, struct rz_dmac *dmac)
+{
+ struct device_node *np = dev->of_node;
+ struct of_phandle_args args;
+ uint32_t dmac_index;
+ int ret;
+
+ ret = of_parse_phandle_with_fixed_args(np, "renesas,icu", 1, 0, &args);
+ if (ret == -ENOENT)
+ return 0;
+ if (ret)
+ return ret;
+
+ dmac->has_icu = true;
+
+ dmac->icu.pdev = of_find_device_by_node(args.np);
+ of_node_put(args.np);
+ if (!dmac->icu.pdev) {
+ dev_err(dev, "ICU device not found.\n");
+ return -ENODEV;
+ }
+
+ dmac_index = args.args[0];
+ if (dmac_index > RZV2H_MAX_DMAC_INDEX) {
+ dev_err(dev, "DMAC index %u invalid.\n", dmac_index);
+ return -EINVAL;
+ }
+ dmac->icu.dmac_index = dmac_index;
+
+ return 0;
+}
+
static int rz_dmac_parse_of(struct device *dev, struct rz_dmac *dmac)
{
struct device_node *np = dev->of_node;
@@ -836,7 +902,7 @@ static int rz_dmac_parse_of(struct device *dev, struct rz_dmac *dmac)
return -EINVAL;
}
- return 0;
+ return rz_dmac_parse_of_icu(dev, dmac);
}
static int rz_dmac_probe(struct platform_device *pdev)
@@ -870,9 +936,11 @@ static int rz_dmac_probe(struct platform_device *pdev)
if (IS_ERR(dmac->base))
return PTR_ERR(dmac->base);
- dmac->ext_base = devm_platform_ioremap_resource(pdev, 1);
- if (IS_ERR(dmac->ext_base))
- return PTR_ERR(dmac->ext_base);
+ if (!dmac->has_icu) {
+ dmac->ext_base = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(dmac->ext_base))
+ return PTR_ERR(dmac->ext_base);
+ }
/* Register interrupt handler for error */
irq = platform_get_irq_byname(pdev, irqname);
@@ -890,7 +958,7 @@ static int rz_dmac_probe(struct platform_device *pdev)
/* Initialize the channels. */
INIT_LIST_HEAD(&dmac->engine.channels);
- dmac->rstc = devm_reset_control_array_get_exclusive(&pdev->dev);
+ dmac->rstc = devm_reset_control_array_get_optional_exclusive(&pdev->dev);
if (IS_ERR(dmac->rstc))
return dev_err_probe(&pdev->dev, PTR_ERR(dmac->rstc),
"failed to get resets\n");
@@ -987,9 +1055,12 @@ static void rz_dmac_remove(struct platform_device *pdev)
reset_control_assert(dmac->rstc);
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+
+ platform_device_put(dmac->icu.pdev);
}
static const struct of_device_id of_rz_dmac_match[] = {
+ { .compatible = "renesas,r9a09g057-dmac", },
{ .compatible = "renesas,rz-dmac", },
{ /* Sentinel */ }
};
@@ -1001,7 +1072,7 @@ static struct platform_driver rz_dmac_driver = {
.of_match_table = of_rz_dmac_match,
},
.probe = rz_dmac_probe,
- .remove_new = rz_dmac_remove,
+ .remove = rz_dmac_remove,
};
module_platform_driver(rz_dmac_driver);
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
index 588c5f409a80..834741adadaa 100644
--- a/drivers/dma/sh/shdma-base.c
+++ b/drivers/dma/sh/shdma-base.c
@@ -129,12 +129,25 @@ static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx)
const struct shdma_ops *ops = sdev->ops;
dev_dbg(schan->dev, "Bring up channel %d\n",
schan->id);
- /*
- * TODO: .xfer_setup() might fail on some platforms.
- * Make it int then, on error remove chunks from the
- * queue again
- */
- ops->setup_xfer(schan, schan->slave_id);
+
+ ret = ops->setup_xfer(schan, schan->slave_id);
+ if (ret < 0) {
+ dev_err(schan->dev, "setup_xfer failed: %d\n", ret);
+
+ /* Remove chunks from the queue and mark them as idle */
+ list_for_each_entry_safe(chunk, c, &schan->ld_queue, node) {
+ if (chunk->cookie == cookie) {
+ chunk->mark = DESC_IDLE;
+ list_move(&chunk->node, &schan->ld_free);
+ }
+ }
+
+ schan->pm_state = SHDMA_PM_ESTABLISHED;
+ ret = pm_runtime_put(schan->dev);
+
+ spin_unlock_irq(&schan->chan_lock);
+ return ret;
+ }
if (schan->pm_state == SHDMA_PM_PENDING)
shdma_chan_xfer_ld_queue(schan);
@@ -725,7 +738,7 @@ static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic(
slave_addr = ops->slave_addr(schan);
/*
- * Allocate the sg list dynamically as it would consumer too much stack
+ * Allocate the sg list dynamically as it would consume too much stack
* space.
*/
sgl = kmalloc_array(sg_len, sizeof(*sgl), GFP_KERNEL);
@@ -961,7 +974,7 @@ void shdma_chan_probe(struct shdma_dev *sdev,
spin_lock_init(&schan->chan_lock);
- /* Init descripter manage list */
+ /* Init descriptor manage list */
INIT_LIST_HEAD(&schan->ld_queue);
INIT_LIST_HEAD(&schan->ld_free);
diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
index 8ead0a1fd237..603e15102e45 100644
--- a/drivers/dma/sh/shdmac.c
+++ b/drivers/dma/sh/shdmac.c
@@ -300,21 +300,30 @@ static bool sh_dmae_channel_busy(struct shdma_chan *schan)
return dmae_is_busy(sh_chan);
}
-static void sh_dmae_setup_xfer(struct shdma_chan *schan,
- int slave_id)
+static int sh_dmae_setup_xfer(struct shdma_chan *schan, int slave_id)
{
struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
shdma_chan);
+ int ret = 0;
if (slave_id >= 0) {
const struct sh_dmae_slave_config *cfg =
sh_chan->config;
- dmae_set_dmars(sh_chan, cfg->mid_rid);
- dmae_set_chcr(sh_chan, cfg->chcr);
+ ret = dmae_set_dmars(sh_chan, cfg->mid_rid);
+ if (ret < 0)
+ goto END;
+
+ ret = dmae_set_chcr(sh_chan, cfg->chcr);
+ if (ret < 0)
+ goto END;
+
} else {
dmae_init(sh_chan);
}
+
+END:
+ return ret;
}
/*
@@ -906,7 +915,7 @@ static struct platform_driver sh_dmae_driver = {
.pm = &sh_dmae_pm,
.name = SH_DMAE_DRV_NAME,
},
- .remove_new = sh_dmae_remove,
+ .remove = sh_dmae_remove,
};
static int __init sh_dmae_init(void)
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
index f7cd0cad056c..7e2b6c97fa2f 100644
--- a/drivers/dma/sh/usb-dmac.c
+++ b/drivers/dma/sh/usb-dmac.c
@@ -301,7 +301,7 @@ static struct usb_dmac_desc *usb_dmac_desc_get(struct usb_dmac_chan *chan,
struct usb_dmac_desc *desc = NULL;
unsigned long flags;
- /* Get a freed descritpor */
+ /* Get a freed descriptor */
spin_lock_irqsave(&chan->vc.lock, flags);
list_for_each_entry(desc, &chan->desc_freed, node) {
if (sg_len <= desc->sg_allocated_len) {
@@ -899,7 +899,7 @@ static struct platform_driver usb_dmac_driver = {
.of_match_table = usb_dmac_of_ids,
},
.probe = usb_dmac_probe,
- .remove_new = usb_dmac_remove,
+ .remove = usb_dmac_remove,
.shutdown = usb_dmac_shutdown,
};
diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
index 3f54ff37c5e0..187a090463ce 100644
--- a/drivers/dma/sprd-dma.c
+++ b/drivers/dma/sprd-dma.c
@@ -1298,7 +1298,7 @@ static const struct dev_pm_ops sprd_dma_pm_ops = {
static struct platform_driver sprd_dma_driver = {
.probe = sprd_dma_probe,
- .remove_new = sprd_dma_remove,
+ .remove = sprd_dma_remove,
.driver = {
.name = "sprd-dma",
.of_match_table = sprd_dma_match,
diff --git a/drivers/dma/st_fdma.c b/drivers/dma/st_fdma.c
index 8880b5e336f8..c65ee0c7bfbd 100644
--- a/drivers/dma/st_fdma.c
+++ b/drivers/dma/st_fdma.c
@@ -858,7 +858,7 @@ static struct platform_driver st_fdma_platform_driver = {
.of_match_table = st_fdma_match,
},
.probe = st_fdma_probe,
- .remove_new = st_fdma_remove,
+ .remove = st_fdma_remove,
};
module_platform_driver(st_fdma_platform_driver);
diff --git a/drivers/dma/stm32/stm32-dma.c b/drivers/dma/stm32/stm32-dma.c
index 917f8e922373..04389936c8a6 100644
--- a/drivers/dma/stm32/stm32-dma.c
+++ b/drivers/dma/stm32/stm32-dma.c
@@ -613,7 +613,7 @@ static void stm32_dma_start_transfer(struct stm32_dma_chan *chan)
reg->dma_scr |= STM32_DMA_SCR_EN;
stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), reg->dma_scr);
- dev_dbg(chan2dev(chan), "vchan %pK: started\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: started\n", &chan->vchan);
}
static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan)
@@ -676,7 +676,7 @@ static void stm32_dma_handle_chan_paused(struct stm32_dma_chan *chan)
chan->status = DMA_PAUSED;
- dev_dbg(chan2dev(chan), "vchan %pK: paused\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: paused\n", &chan->vchan);
}
static void stm32_dma_post_resume_reconfigure(struct stm32_dma_chan *chan)
@@ -728,7 +728,7 @@ static void stm32_dma_post_resume_reconfigure(struct stm32_dma_chan *chan)
dma_scr |= STM32_DMA_SCR_EN;
stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), dma_scr);
- dev_dbg(chan2dev(chan), "vchan %pK: reconfigured after pause/resume\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: reconfigured after pause/resume\n", &chan->vchan);
}
static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan, u32 scr)
@@ -744,7 +744,7 @@ static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan, u32 scr)
/* cyclic while CIRC/DBM disable => post resume reconfiguration needed */
if (!(scr & (STM32_DMA_SCR_CIRC | STM32_DMA_SCR_DBM)))
stm32_dma_post_resume_reconfigure(chan);
- else if (scr & STM32_DMA_SCR_DBM)
+ else if (scr & STM32_DMA_SCR_DBM && chan->desc->num_sgs > 2)
stm32_dma_configure_next_sg(chan);
} else {
chan->busy = false;
@@ -820,7 +820,7 @@ static void stm32_dma_issue_pending(struct dma_chan *c)
spin_lock_irqsave(&chan->vchan.lock, flags);
if (vchan_issue_pending(&chan->vchan) && !chan->desc && !chan->busy) {
- dev_dbg(chan2dev(chan), "vchan %pK: issued\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: issued\n", &chan->vchan);
stm32_dma_start_transfer(chan);
}
@@ -922,7 +922,7 @@ static int stm32_dma_resume(struct dma_chan *c)
spin_unlock_irqrestore(&chan->vchan.lock, flags);
- dev_dbg(chan2dev(chan), "vchan %pK: resumed\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: resumed\n", &chan->vchan);
return 0;
}
diff --git a/drivers/dma/stm32/stm32-dma3.c b/drivers/dma/stm32/stm32-dma3.c
index 0be6e944df6f..50e7106c5cb7 100644
--- a/drivers/dma/stm32/stm32-dma3.c
+++ b/drivers/dma/stm32/stm32-dma3.c
@@ -221,6 +221,8 @@ enum stm32_dma3_port_data_width {
#define STM32_DMA3_DT_BREQ BIT(8) /* CTR2_BREQ */
#define STM32_DMA3_DT_PFREQ BIT(9) /* CTR2_PFREQ */
#define STM32_DMA3_DT_TCEM GENMASK(13, 12) /* CTR2_TCEM */
+#define STM32_DMA3_DT_NOPACK BIT(16) /* CTR1_PAM */
+#define STM32_DMA3_DT_NOREFACT BIT(17)
/* struct stm32_dma3_chan .config_set bitfield */
#define STM32_DMA3_CFG_SET_DT BIT(0)
@@ -228,6 +230,8 @@ enum stm32_dma3_port_data_width {
#define STM32_DMA3_CFG_SET_BOTH (STM32_DMA3_CFG_SET_DT | STM32_DMA3_CFG_SET_DMA)
#define STM32_DMA3_MAX_BLOCK_SIZE ALIGN_DOWN(CBR1_BNDT, 64)
+#define STM32_DMA3_MAX_BURST_LEN (1 + min_t(u32, FIELD_MAX(CTR1_SBL_1), \
+ FIELD_MAX(CTR1_DBL_1)))
#define port_is_ahb(maxdw) ({ typeof(maxdw) (_maxdw) = (maxdw); \
((_maxdw) != DW_INVALID) && ((_maxdw) == DW_32); })
#define port_is_axi(maxdw) ({ typeof(maxdw) (_maxdw) = (maxdw); \
@@ -293,6 +297,10 @@ struct stm32_dma3_chan {
u32 dma_status;
};
+struct stm32_dma3_pdata {
+ u32 axi_max_burst_len;
+};
+
struct stm32_dma3_ddata {
struct dma_device dma_dev;
void __iomem *base;
@@ -301,6 +309,7 @@ struct stm32_dma3_ddata {
u32 dma_channels;
u32 dma_requests;
enum stm32_dma3_port_data_width ports_max_dw[2];
+ u32 axi_max_burst_len;
};
static inline struct stm32_dma3_ddata *to_stm32_dma3_ddata(struct stm32_dma3_chan *chan)
@@ -533,7 +542,8 @@ static enum dma_slave_buswidth stm32_dma3_get_max_dw(u32 chan_max_burst,
return 1 << __ffs(len | addr | max_dw);
}
-static u32 stm32_dma3_get_max_burst(u32 len, enum dma_slave_buswidth dw, u32 chan_max_burst)
+static u32 stm32_dma3_get_max_burst(u32 len, enum dma_slave_buswidth dw,
+ u32 chan_max_burst, u32 bus_max_burst)
{
u32 max_burst = chan_max_burst ? chan_max_burst / dw : 1;
@@ -544,8 +554,9 @@ static u32 stm32_dma3_get_max_burst(u32 len, enum dma_slave_buswidth dw, u32 cha
/*
* HW doesn't modify the burst if burst size <= half of the fifo size.
* If len is not a multiple of burst size, last burst is shortened by HW.
+ * Take care of maximum burst supported on interconnect bus.
*/
- return max_burst;
+ return min_t(u32, max_burst, bus_max_burst);
}
static int stm32_dma3_chan_prep_hw(struct stm32_dma3_chan *chan, enum dma_transfer_direction dir,
@@ -554,6 +565,7 @@ static int stm32_dma3_chan_prep_hw(struct stm32_dma3_chan *chan, enum dma_transf
{
struct stm32_dma3_ddata *ddata = to_stm32_dma3_ddata(chan);
struct dma_device dma_device = ddata->dma_dev;
+ u32 src_max_burst = STM32_DMA3_MAX_BURST_LEN, dst_max_burst = STM32_DMA3_MAX_BURST_LEN;
u32 sdw, ddw, sbl_max, dbl_max, tcem, init_dw, init_bl_max;
u32 _ctr1 = 0, _ctr2 = 0;
u32 ch_conf = chan->dt_config.ch_conf;
@@ -594,10 +606,14 @@ static int stm32_dma3_chan_prep_hw(struct stm32_dma3_chan *chan, enum dma_transf
_ctr1 |= CTR1_SINC;
if (sap)
_ctr1 |= CTR1_SAP;
+ if (port_is_axi(sap_max_dw)) /* AXI - apply axi maximum burst limitation */
+ src_max_burst = ddata->axi_max_burst_len;
if (FIELD_GET(STM32_DMA3_DT_DINC, tr_conf))
_ctr1 |= CTR1_DINC;
if (dap)
_ctr1 |= CTR1_DAP;
+ if (port_is_axi(dap_max_dw)) /* AXI - apply axi maximum burst limitation */
+ dst_max_burst = ddata->axi_max_burst_len;
_ctr2 |= FIELD_PREP(CTR2_REQSEL, chan->dt_config.req_line) & ~CTR2_SWREQ;
if (FIELD_GET(STM32_DMA3_DT_BREQ, tr_conf))
@@ -617,11 +633,16 @@ static int stm32_dma3_chan_prep_hw(struct stm32_dma3_chan *chan, enum dma_transf
/* Set destination (device) data width and burst */
ddw = min_t(u32, ddw, stm32_dma3_get_max_dw(chan->max_burst, dap_max_dw,
len, dst_addr));
- dbl_max = min_t(u32, dbl_max, stm32_dma3_get_max_burst(len, ddw, chan->max_burst));
+ dbl_max = min_t(u32, dbl_max, stm32_dma3_get_max_burst(len, ddw, chan->max_burst,
+ dst_max_burst));
/* Set source (memory) data width and burst */
sdw = stm32_dma3_get_max_dw(chan->max_burst, sap_max_dw, len, src_addr);
- sbl_max = stm32_dma3_get_max_burst(len, sdw, chan->max_burst);
+ sbl_max = stm32_dma3_get_max_burst(len, sdw, chan->max_burst, src_max_burst);
+ if (!!FIELD_GET(STM32_DMA3_DT_NOPACK, tr_conf)) {
+ sdw = ddw;
+ sbl_max = dbl_max;
+ }
_ctr1 |= FIELD_PREP(CTR1_SDW_LOG2, ilog2(sdw));
_ctr1 |= FIELD_PREP(CTR1_SBL_1, sbl_max - 1);
@@ -647,11 +668,17 @@ static int stm32_dma3_chan_prep_hw(struct stm32_dma3_chan *chan, enum dma_transf
/* Set source (device) data width and burst */
sdw = min_t(u32, sdw, stm32_dma3_get_max_dw(chan->max_burst, sap_max_dw,
len, src_addr));
- sbl_max = min_t(u32, sbl_max, stm32_dma3_get_max_burst(len, sdw, chan->max_burst));
+ sbl_max = min_t(u32, sbl_max, stm32_dma3_get_max_burst(len, sdw, chan->max_burst,
+ src_max_burst));
/* Set destination (memory) data width and burst */
ddw = stm32_dma3_get_max_dw(chan->max_burst, dap_max_dw, len, dst_addr);
- dbl_max = stm32_dma3_get_max_burst(len, ddw, chan->max_burst);
+ dbl_max = stm32_dma3_get_max_burst(len, ddw, chan->max_burst, dst_max_burst);
+ if (!!FIELD_GET(STM32_DMA3_DT_NOPACK, tr_conf) ||
+ ((_ctr2 & CTR2_PFREQ) && ddw > sdw)) { /* Packing to wider ddw not supported */
+ ddw = sdw;
+ dbl_max = sbl_max;
+ }
_ctr1 |= FIELD_PREP(CTR1_SDW_LOG2, ilog2(sdw));
_ctr1 |= FIELD_PREP(CTR1_SBL_1, sbl_max - 1);
@@ -678,22 +705,24 @@ static int stm32_dma3_chan_prep_hw(struct stm32_dma3_chan *chan, enum dma_transf
init_dw = sdw;
init_bl_max = sbl_max;
sdw = stm32_dma3_get_max_dw(chan->max_burst, sap_max_dw, len, src_addr);
- sbl_max = stm32_dma3_get_max_burst(len, sdw, chan->max_burst);
+ sbl_max = stm32_dma3_get_max_burst(len, sdw, chan->max_burst, src_max_burst);
if (chan->config_set & STM32_DMA3_CFG_SET_DMA) {
sdw = min_t(u32, init_dw, sdw);
- sbl_max = min_t(u32, init_bl_max,
- stm32_dma3_get_max_burst(len, sdw, chan->max_burst));
+ sbl_max = min_t(u32, init_bl_max, stm32_dma3_get_max_burst(len, sdw,
+ chan->max_burst,
+ src_max_burst));
}
/* Set destination (memory) data width and burst */
init_dw = ddw;
init_bl_max = dbl_max;
ddw = stm32_dma3_get_max_dw(chan->max_burst, dap_max_dw, len, dst_addr);
- dbl_max = stm32_dma3_get_max_burst(len, ddw, chan->max_burst);
+ dbl_max = stm32_dma3_get_max_burst(len, ddw, chan->max_burst, dst_max_burst);
if (chan->config_set & STM32_DMA3_CFG_SET_DMA) {
ddw = min_t(u32, init_dw, ddw);
- dbl_max = min_t(u32, init_bl_max,
- stm32_dma3_get_max_burst(len, ddw, chan->max_burst));
+ dbl_max = min_t(u32, init_bl_max, stm32_dma3_get_max_burst(len, ddw,
+ chan->max_burst,
+ dst_max_burst));
}
_ctr1 |= FIELD_PREP(CTR1_SDW_LOG2, ilog2(sdw));
@@ -772,7 +801,7 @@ static void stm32_dma3_chan_start(struct stm32_dma3_chan *chan)
chan->dma_status = DMA_IN_PROGRESS;
- dev_dbg(chan2dev(chan), "vchan %pK: started\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: started\n", &chan->vchan);
}
static int stm32_dma3_chan_suspend(struct stm32_dma3_chan *chan, bool susp)
@@ -1116,6 +1145,28 @@ static void stm32_dma3_free_chan_resources(struct dma_chan *c)
chan->config_set = 0;
}
+static u32 stm32_dma3_get_ll_count(struct stm32_dma3_chan *chan, size_t len, bool prevent_refactor)
+{
+ u32 count;
+
+ if (prevent_refactor)
+ return DIV_ROUND_UP(len, STM32_DMA3_MAX_BLOCK_SIZE);
+
+ count = len / STM32_DMA3_MAX_BLOCK_SIZE;
+ len -= (len / STM32_DMA3_MAX_BLOCK_SIZE) * STM32_DMA3_MAX_BLOCK_SIZE;
+
+ if (len >= chan->max_burst) {
+ count += 1; /* len < STM32_DMA3_MAX_BLOCK_SIZE here, so it fits in one item */
+ len -= (len / chan->max_burst) * chan->max_burst;
+ }
+
+ /* Unaligned remainder fits in one extra item */
+ if (len > 0)
+ count += 1;
+
+ return count;
+}
+
static void stm32_dma3_init_chan_config_for_memcpy(struct stm32_dma3_chan *chan,
dma_addr_t dst, dma_addr_t src)
{
@@ -1150,8 +1201,10 @@ static struct dma_async_tx_descriptor *stm32_dma3_prep_dma_memcpy(struct dma_cha
struct stm32_dma3_swdesc *swdesc;
size_t next_size, offset;
u32 count, i, ctr1, ctr2;
+ bool prevent_refactor = !!FIELD_GET(STM32_DMA3_DT_NOPACK, chan->dt_config.tr_conf) ||
+ !!FIELD_GET(STM32_DMA3_DT_NOREFACT, chan->dt_config.tr_conf);
- count = DIV_ROUND_UP(len, STM32_DMA3_MAX_BLOCK_SIZE);
+ count = stm32_dma3_get_ll_count(chan, len, prevent_refactor);
swdesc = stm32_dma3_chan_desc_alloc(chan, count);
if (!swdesc)
@@ -1167,6 +1220,10 @@ static struct dma_async_tx_descriptor *stm32_dma3_prep_dma_memcpy(struct dma_cha
remaining = len - offset;
next_size = min_t(size_t, remaining, STM32_DMA3_MAX_BLOCK_SIZE);
+ if (!prevent_refactor &&
+ (next_size < STM32_DMA3_MAX_BLOCK_SIZE && next_size >= chan->max_burst))
+ next_size = chan->max_burst * (remaining / chan->max_burst);
+
ret = stm32_dma3_chan_prep_hw(chan, DMA_MEM_TO_MEM, &swdesc->ccr, &ctr1, &ctr2,
src + offset, dst + offset, next_size);
if (ret)
@@ -1203,14 +1260,13 @@ static struct dma_async_tx_descriptor *stm32_dma3_prep_slave_sg(struct dma_chan
size_t len;
dma_addr_t sg_addr, dev_addr, src, dst;
u32 i, j, count, ctr1, ctr2;
+ bool prevent_refactor = !!FIELD_GET(STM32_DMA3_DT_NOPACK, chan->dt_config.tr_conf) ||
+ !!FIELD_GET(STM32_DMA3_DT_NOREFACT, chan->dt_config.tr_conf);
int ret;
- count = sg_len;
- for_each_sg(sgl, sg, sg_len, i) {
- len = sg_dma_len(sg);
- if (len > STM32_DMA3_MAX_BLOCK_SIZE)
- count += DIV_ROUND_UP(len, STM32_DMA3_MAX_BLOCK_SIZE) - 1;
- }
+ count = 0;
+ for_each_sg(sgl, sg, sg_len, i)
+ count += stm32_dma3_get_ll_count(chan, sg_dma_len(sg), prevent_refactor);
swdesc = stm32_dma3_chan_desc_alloc(chan, count);
if (!swdesc)
@@ -1227,6 +1283,10 @@ static struct dma_async_tx_descriptor *stm32_dma3_prep_slave_sg(struct dma_chan
do {
size_t chunk = min_t(size_t, len, STM32_DMA3_MAX_BLOCK_SIZE);
+ if (!prevent_refactor &&
+ (chunk < STM32_DMA3_MAX_BLOCK_SIZE && chunk >= chan->max_burst))
+ chunk = chan->max_burst * (len / chan->max_burst);
+
if (dir == DMA_MEM_TO_DEV) {
src = sg_addr;
dst = dev_addr;
@@ -1259,6 +1319,10 @@ static struct dma_async_tx_descriptor *stm32_dma3_prep_slave_sg(struct dma_chan
} while (len);
}
+ if (count != sg_len && chan->tcem != CTR2_TCEM_CHANNEL)
+ dev_warn(chan2dev(chan), "Linked-list refactored, %d items instead of %d\n",
+ count, sg_len);
+
/* Enable Error interrupts */
swdesc->ccr |= CCR_USEIE | CCR_ULEIE | CCR_DTEIE;
/* Enable Transfer state interrupts */
@@ -1388,7 +1452,7 @@ static int stm32_dma3_pause(struct dma_chan *c)
chan->dma_status = DMA_PAUSED;
- dev_dbg(chan2dev(chan), "vchan %pK: paused\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: paused\n", &chan->vchan);
return 0;
}
@@ -1401,7 +1465,7 @@ static int stm32_dma3_resume(struct dma_chan *c)
chan->dma_status = DMA_IN_PROGRESS;
- dev_dbg(chan2dev(chan), "vchan %pK: resumed\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: resumed\n", &chan->vchan);
return 0;
}
@@ -1426,7 +1490,7 @@ static int stm32_dma3_terminate_all(struct dma_chan *c)
spin_unlock_irqrestore(&chan->vchan.lock, flags);
vchan_dma_desc_free_list(&chan->vchan, &head);
- dev_dbg(chan2dev(chan), "vchan %pK: terminated\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: terminated\n", &chan->vchan);
return 0;
}
@@ -1479,7 +1543,7 @@ static void stm32_dma3_issue_pending(struct dma_chan *c)
spin_lock_irqsave(&chan->vchan.lock, flags);
if (vchan_issue_pending(&chan->vchan) && !chan->swdesc) {
- dev_dbg(chan2dev(chan), "vchan %pK: issued\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: issued\n", &chan->vchan);
stm32_dma3_chan_start(chan);
}
@@ -1601,8 +1665,12 @@ static u32 stm32_dma3_check_rif(struct stm32_dma3_ddata *ddata)
return chan_reserved;
}
+static struct stm32_dma3_pdata stm32mp25_pdata = {
+ .axi_max_burst_len = 16,
+};
+
static const struct of_device_id stm32_dma3_of_match[] = {
- { .compatible = "st,stm32mp25-dma3", },
+ { .compatible = "st,stm32mp25-dma3", .data = &stm32mp25_pdata, },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, stm32_dma3_of_match);
@@ -1610,6 +1678,7 @@ MODULE_DEVICE_TABLE(of, stm32_dma3_of_match);
static int stm32_dma3_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
+ const struct stm32_dma3_pdata *pdata;
struct stm32_dma3_ddata *ddata;
struct reset_control *reset;
struct stm32_dma3_chan *chan;
@@ -1704,6 +1773,16 @@ static int stm32_dma3_probe(struct platform_device *pdev)
else /* Dual master ports */
ddata->ports_max_dw[1] = FIELD_GET(G_M1_DATA_WIDTH_ENC, hwcfgr);
+ /* axi_max_burst_len is optional, if not defined, use STM32_DMA3_MAX_BURST_LEN */
+ ddata->axi_max_burst_len = STM32_DMA3_MAX_BURST_LEN;
+ pdata = device_get_match_data(&pdev->dev);
+ if (pdata && pdata->axi_max_burst_len) {
+ ddata->axi_max_burst_len = min_t(u32, pdata->axi_max_burst_len,
+ STM32_DMA3_MAX_BURST_LEN);
+ dev_dbg(&pdev->dev, "Burst is limited to %u beats through AXI port\n",
+ ddata->axi_max_burst_len);
+ }
+
ddata->chans = devm_kcalloc(&pdev->dev, ddata->dma_channels, sizeof(*ddata->chans),
GFP_KERNEL);
if (!ddata->chans) {
@@ -1827,7 +1906,7 @@ static const struct dev_pm_ops stm32_dma3_pm_ops = {
static struct platform_driver stm32_dma3_driver = {
.probe = stm32_dma3_probe,
- .remove_new = stm32_dma3_remove,
+ .remove = stm32_dma3_remove,
.driver = {
.name = "stm32-dma3",
.of_match_table = stm32_dma3_of_match,
diff --git a/drivers/dma/stm32/stm32-mdma.c b/drivers/dma/stm32/stm32-mdma.c
index e6d525901de7..080c1c725216 100644
--- a/drivers/dma/stm32/stm32-mdma.c
+++ b/drivers/dma/stm32/stm32-mdma.c
@@ -1187,7 +1187,7 @@ static void stm32_mdma_start_transfer(struct stm32_mdma_chan *chan)
chan->busy = true;
- dev_dbg(chan2dev(chan), "vchan %pK: started\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: started\n", &chan->vchan);
}
static void stm32_mdma_issue_pending(struct dma_chan *c)
@@ -1200,7 +1200,7 @@ static void stm32_mdma_issue_pending(struct dma_chan *c)
if (!vchan_issue_pending(&chan->vchan))
goto end;
- dev_dbg(chan2dev(chan), "vchan %pK: issued\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: issued\n", &chan->vchan);
if (!chan->desc && !chan->busy)
stm32_mdma_start_transfer(chan);
@@ -1220,7 +1220,7 @@ static int stm32_mdma_pause(struct dma_chan *c)
spin_unlock_irqrestore(&chan->vchan.lock, flags);
if (!ret)
- dev_dbg(chan2dev(chan), "vchan %pK: pause\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: pause\n", &chan->vchan);
return ret;
}
@@ -1261,7 +1261,7 @@ static int stm32_mdma_resume(struct dma_chan *c)
spin_unlock_irqrestore(&chan->vchan.lock, flags);
- dev_dbg(chan2dev(chan), "vchan %pK: resume\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: resume\n", &chan->vchan);
return 0;
}
diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c
index 2e7f9b07fdd2..00d2fd38d17f 100644
--- a/drivers/dma/sun4i-dma.c
+++ b/drivers/dma/sun4i-dma.c
@@ -13,7 +13,9 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of_dma.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/reset.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -31,12 +33,21 @@
#define SUN4I_DMA_CFG_SRC_ADDR_MODE(mode) ((mode) << 5)
#define SUN4I_DMA_CFG_SRC_DRQ_TYPE(type) (type)
+#define SUNIV_DMA_CFG_DST_DATA_WIDTH(width) ((width) << 24)
+#define SUNIV_DMA_CFG_SRC_DATA_WIDTH(width) ((width) << 8)
+
+#define SUN4I_MAX_BURST 8
+#define SUNIV_MAX_BURST 4
+
/** Normal DMA register values **/
/* Normal DMA source/destination data request type values */
#define SUN4I_NDMA_DRQ_TYPE_SDRAM 0x16
#define SUN4I_NDMA_DRQ_TYPE_LIMIT (0x1F + 1)
+#define SUNIV_NDMA_DRQ_TYPE_SDRAM 0x11
+#define SUNIV_NDMA_DRQ_TYPE_LIMIT (0x17 + 1)
+
/** Normal DMA register layout **/
/* Dedicated DMA source/destination address mode values */
@@ -50,6 +61,9 @@
#define SUN4I_NDMA_CFG_BYTE_COUNT_MODE_REMAIN BIT(15)
#define SUN4I_NDMA_CFG_SRC_NON_SECURE BIT(6)
+#define SUNIV_NDMA_CFG_CONT_MODE BIT(29)
+#define SUNIV_NDMA_CFG_WAIT_STATE(n) ((n) << 26)
+
/** Dedicated DMA register values **/
/* Dedicated DMA source/destination address mode values */
@@ -62,6 +76,9 @@
#define SUN4I_DDMA_DRQ_TYPE_SDRAM 0x1
#define SUN4I_DDMA_DRQ_TYPE_LIMIT (0x1F + 1)
+#define SUNIV_DDMA_DRQ_TYPE_SDRAM 0x1
+#define SUNIV_DDMA_DRQ_TYPE_LIMIT (0x9 + 1)
+
/** Dedicated DMA register layout **/
/* Dedicated DMA configuration register layout */
@@ -115,6 +132,11 @@
#define SUN4I_DMA_NR_MAX_VCHANS \
(SUN4I_NDMA_NR_MAX_VCHANS + SUN4I_DDMA_NR_MAX_VCHANS)
+#define SUNIV_NDMA_NR_MAX_CHANNELS 4
+#define SUNIV_DDMA_NR_MAX_CHANNELS 4
+#define SUNIV_NDMA_NR_MAX_VCHANS (24 * 2 - 1)
+#define SUNIV_DDMA_NR_MAX_VCHANS 10
+
/* This set of SUN4I_DDMA timing parameters were found experimentally while
* working with the SPI driver and seem to make it behave correctly */
#define SUN4I_DDMA_MAGIC_SPI_PARAMETERS \
@@ -132,6 +154,33 @@
#define SUN4I_DDMA_MAX_SEG_SIZE SZ_16M
#define SUN4I_DMA_MAX_SEG_SIZE SUN4I_NDMA_MAX_SEG_SIZE
+/*
+ * Hardware channels / ports representation
+ *
+ * The hardware is used in several SoCs, with differing numbers
+ * of channels and endpoints. This structure ties those numbers
+ * to a certain compatible string.
+ */
+struct sun4i_dma_config {
+ u32 ndma_nr_max_channels;
+ u32 ndma_nr_max_vchans;
+
+ u32 ddma_nr_max_channels;
+ u32 ddma_nr_max_vchans;
+
+ u32 dma_nr_max_channels;
+
+ void (*set_dst_data_width)(u32 *p_cfg, s8 data_width);
+ void (*set_src_data_width)(u32 *p_cfg, s8 data_width);
+ int (*convert_burst)(u32 maxburst);
+
+ u8 ndma_drq_sdram;
+ u8 ddma_drq_sdram;
+
+ u8 max_burst;
+ bool has_reset;
+};
+
struct sun4i_dma_pchan {
/* Register base of channel */
void __iomem *base;
@@ -170,7 +219,7 @@ struct sun4i_dma_contract {
};
struct sun4i_dma_dev {
- DECLARE_BITMAP(pchans_used, SUN4I_DMA_NR_MAX_CHANNELS);
+ unsigned long *pchans_used;
struct dma_device slave;
struct sun4i_dma_pchan *pchans;
struct sun4i_dma_vchan *vchans;
@@ -178,6 +227,8 @@ struct sun4i_dma_dev {
struct clk *clk;
int irq;
spinlock_t lock;
+ const struct sun4i_dma_config *cfg;
+ struct reset_control *rst;
};
static struct sun4i_dma_dev *to_sun4i_dma_dev(struct dma_device *dev)
@@ -200,7 +251,27 @@ static struct device *chan2dev(struct dma_chan *chan)
return &chan->dev->device;
}
-static int convert_burst(u32 maxburst)
+static void set_dst_data_width_a10(u32 *p_cfg, s8 data_width)
+{
+ *p_cfg |= SUN4I_DMA_CFG_DST_DATA_WIDTH(data_width);
+}
+
+static void set_src_data_width_a10(u32 *p_cfg, s8 data_width)
+{
+ *p_cfg |= SUN4I_DMA_CFG_SRC_DATA_WIDTH(data_width);
+}
+
+static void set_dst_data_width_f1c100s(u32 *p_cfg, s8 data_width)
+{
+ *p_cfg |= SUNIV_DMA_CFG_DST_DATA_WIDTH(data_width);
+}
+
+static void set_src_data_width_f1c100s(u32 *p_cfg, s8 data_width)
+{
+ *p_cfg |= SUNIV_DMA_CFG_SRC_DATA_WIDTH(data_width);
+}
+
+static int convert_burst_a10(u32 maxburst)
{
if (maxburst > 8)
return -EINVAL;
@@ -209,6 +280,15 @@ static int convert_burst(u32 maxburst)
return (maxburst >> 2);
}
+static int convert_burst_f1c100s(u32 maxburst)
+{
+ if (maxburst > 4)
+ return -EINVAL;
+
+ /* 1 -> 0, 4 -> 1 */
+ return (maxburst >> 2);
+}
+
static int convert_buswidth(enum dma_slave_buswidth addr_width)
{
if (addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES)
@@ -233,15 +313,15 @@ static struct sun4i_dma_pchan *find_and_use_pchan(struct sun4i_dma_dev *priv,
int i, max;
/*
- * pchans 0-SUN4I_NDMA_NR_MAX_CHANNELS are normal, and
- * SUN4I_NDMA_NR_MAX_CHANNELS+ are dedicated ones
+ * pchans 0-priv->cfg->ndma_nr_max_channels are normal, and
+ * priv->cfg->ndma_nr_max_channels+ are dedicated ones
*/
if (vchan->is_dedicated) {
- i = SUN4I_NDMA_NR_MAX_CHANNELS;
- max = SUN4I_DMA_NR_MAX_CHANNELS;
+ i = priv->cfg->ndma_nr_max_channels;
+ max = priv->cfg->dma_nr_max_channels;
} else {
i = 0;
- max = SUN4I_NDMA_NR_MAX_CHANNELS;
+ max = priv->cfg->ndma_nr_max_channels;
}
spin_lock_irqsave(&priv->lock, flags);
@@ -444,6 +524,7 @@ generate_ndma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
size_t len, struct dma_slave_config *sconfig,
enum dma_transfer_direction direction)
{
+ struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device);
struct sun4i_dma_promise *promise;
int ret;
@@ -467,13 +548,13 @@ generate_ndma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
sconfig->src_addr_width, sconfig->dst_addr_width);
/* Source burst */
- ret = convert_burst(sconfig->src_maxburst);
+ ret = priv->cfg->convert_burst(sconfig->src_maxburst);
if (ret < 0)
goto fail;
promise->cfg |= SUN4I_DMA_CFG_SRC_BURST_LENGTH(ret);
/* Destination burst */
- ret = convert_burst(sconfig->dst_maxburst);
+ ret = priv->cfg->convert_burst(sconfig->dst_maxburst);
if (ret < 0)
goto fail;
promise->cfg |= SUN4I_DMA_CFG_DST_BURST_LENGTH(ret);
@@ -482,13 +563,13 @@ generate_ndma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
ret = convert_buswidth(sconfig->src_addr_width);
if (ret < 0)
goto fail;
- promise->cfg |= SUN4I_DMA_CFG_SRC_DATA_WIDTH(ret);
+ priv->cfg->set_src_data_width(&promise->cfg, ret);
/* Destination bus width */
ret = convert_buswidth(sconfig->dst_addr_width);
if (ret < 0)
goto fail;
- promise->cfg |= SUN4I_DMA_CFG_DST_DATA_WIDTH(ret);
+ priv->cfg->set_dst_data_width(&promise->cfg, ret);
return promise;
@@ -510,6 +591,7 @@ static struct sun4i_dma_promise *
generate_ddma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
size_t len, struct dma_slave_config *sconfig)
{
+ struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device);
struct sun4i_dma_promise *promise;
int ret;
@@ -524,13 +606,13 @@ generate_ddma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
SUN4I_DDMA_CFG_BYTE_COUNT_MODE_REMAIN;
/* Source burst */
- ret = convert_burst(sconfig->src_maxburst);
+ ret = priv->cfg->convert_burst(sconfig->src_maxburst);
if (ret < 0)
goto fail;
promise->cfg |= SUN4I_DMA_CFG_SRC_BURST_LENGTH(ret);
/* Destination burst */
- ret = convert_burst(sconfig->dst_maxburst);
+ ret = priv->cfg->convert_burst(sconfig->dst_maxburst);
if (ret < 0)
goto fail;
promise->cfg |= SUN4I_DMA_CFG_DST_BURST_LENGTH(ret);
@@ -539,13 +621,13 @@ generate_ddma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
ret = convert_buswidth(sconfig->src_addr_width);
if (ret < 0)
goto fail;
- promise->cfg |= SUN4I_DMA_CFG_SRC_DATA_WIDTH(ret);
+ priv->cfg->set_src_data_width(&promise->cfg, ret);
/* Destination bus width */
ret = convert_buswidth(sconfig->dst_addr_width);
if (ret < 0)
goto fail;
- promise->cfg |= SUN4I_DMA_CFG_DST_DATA_WIDTH(ret);
+ priv->cfg->set_dst_data_width(&promise->cfg, ret);
return promise;
@@ -622,6 +704,7 @@ static struct dma_async_tx_descriptor *
sun4i_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
dma_addr_t src, size_t len, unsigned long flags)
{
+ struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device);
struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
struct dma_slave_config *sconfig = &vchan->cfg;
struct sun4i_dma_promise *promise;
@@ -638,8 +721,8 @@ sun4i_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
*/
sconfig->src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
sconfig->dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- sconfig->src_maxburst = 8;
- sconfig->dst_maxburst = 8;
+ sconfig->src_maxburst = priv->cfg->max_burst;
+ sconfig->dst_maxburst = priv->cfg->max_burst;
if (vchan->is_dedicated)
promise = generate_ddma_promise(chan, src, dest, len, sconfig);
@@ -654,11 +737,13 @@ sun4i_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
/* Configure memcpy mode */
if (vchan->is_dedicated) {
- promise->cfg |= SUN4I_DMA_CFG_SRC_DRQ_TYPE(SUN4I_DDMA_DRQ_TYPE_SDRAM) |
- SUN4I_DMA_CFG_DST_DRQ_TYPE(SUN4I_DDMA_DRQ_TYPE_SDRAM);
+ promise->cfg |=
+ SUN4I_DMA_CFG_SRC_DRQ_TYPE(priv->cfg->ddma_drq_sdram) |
+ SUN4I_DMA_CFG_DST_DRQ_TYPE(priv->cfg->ddma_drq_sdram);
} else {
- promise->cfg |= SUN4I_DMA_CFG_SRC_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM) |
- SUN4I_DMA_CFG_DST_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM);
+ promise->cfg |=
+ SUN4I_DMA_CFG_SRC_DRQ_TYPE(priv->cfg->ndma_drq_sdram) |
+ SUN4I_DMA_CFG_DST_DRQ_TYPE(priv->cfg->ndma_drq_sdram);
}
/* Fill the contract with our only promise */
@@ -673,6 +758,7 @@ sun4i_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf, size_t len,
size_t period_len, enum dma_transfer_direction dir,
unsigned long flags)
{
+ struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device);
struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
struct dma_slave_config *sconfig = &vchan->cfg;
struct sun4i_dma_promise *promise;
@@ -696,11 +782,11 @@ sun4i_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf, size_t len,
if (vchan->is_dedicated) {
io_mode = SUN4I_DDMA_ADDR_MODE_IO;
linear_mode = SUN4I_DDMA_ADDR_MODE_LINEAR;
- ram_type = SUN4I_DDMA_DRQ_TYPE_SDRAM;
+ ram_type = priv->cfg->ddma_drq_sdram;
} else {
io_mode = SUN4I_NDMA_ADDR_MODE_IO;
linear_mode = SUN4I_NDMA_ADDR_MODE_LINEAR;
- ram_type = SUN4I_NDMA_DRQ_TYPE_SDRAM;
+ ram_type = priv->cfg->ndma_drq_sdram;
}
if (dir == DMA_MEM_TO_DEV) {
@@ -793,6 +879,7 @@ sun4i_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction dir,
unsigned long flags, void *context)
{
+ struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device);
struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
struct dma_slave_config *sconfig = &vchan->cfg;
struct sun4i_dma_promise *promise;
@@ -818,11 +905,11 @@ sun4i_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
if (vchan->is_dedicated) {
io_mode = SUN4I_DDMA_ADDR_MODE_IO;
linear_mode = SUN4I_DDMA_ADDR_MODE_LINEAR;
- ram_type = SUN4I_DDMA_DRQ_TYPE_SDRAM;
+ ram_type = priv->cfg->ddma_drq_sdram;
} else {
io_mode = SUN4I_NDMA_ADDR_MODE_IO;
linear_mode = SUN4I_NDMA_ADDR_MODE_LINEAR;
- ram_type = SUN4I_NDMA_DRQ_TYPE_SDRAM;
+ ram_type = priv->cfg->ndma_drq_sdram;
}
if (dir == DMA_MEM_TO_DEV)
@@ -1150,6 +1237,10 @@ static int sun4i_dma_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
+ priv->cfg = of_device_get_match_data(&pdev->dev);
+ if (!priv->cfg)
+ return -ENODEV;
+
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
@@ -1158,10 +1249,16 @@ static int sun4i_dma_probe(struct platform_device *pdev)
if (priv->irq < 0)
return priv->irq;
- priv->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(priv->clk)) {
- dev_err(&pdev->dev, "No clock specified\n");
- return PTR_ERR(priv->clk);
+ priv->clk = devm_clk_get_enabled(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(priv->clk),
+ "Couldn't start the clock\n");
+
+ if (priv->cfg->has_reset) {
+ priv->rst = devm_reset_control_get_exclusive_deasserted(&pdev->dev, NULL);
+ if (IS_ERR(priv->rst))
+ return dev_err_probe(&pdev->dev, PTR_ERR(priv->rst),
+ "Failed to get reset control\n");
}
platform_set_drvdata(pdev, priv);
@@ -1197,23 +1294,26 @@ static int sun4i_dma_probe(struct platform_device *pdev)
priv->slave.dev = &pdev->dev;
- priv->pchans = devm_kcalloc(&pdev->dev, SUN4I_DMA_NR_MAX_CHANNELS,
+ priv->pchans = devm_kcalloc(&pdev->dev, priv->cfg->dma_nr_max_channels,
sizeof(struct sun4i_dma_pchan), GFP_KERNEL);
priv->vchans = devm_kcalloc(&pdev->dev, SUN4I_DMA_NR_MAX_VCHANS,
sizeof(struct sun4i_dma_vchan), GFP_KERNEL);
- if (!priv->vchans || !priv->pchans)
+ priv->pchans_used = devm_kcalloc(&pdev->dev,
+ BITS_TO_LONGS(priv->cfg->dma_nr_max_channels),
+ sizeof(unsigned long), GFP_KERNEL);
+ if (!priv->vchans || !priv->pchans || !priv->pchans_used)
return -ENOMEM;
/*
- * [0..SUN4I_NDMA_NR_MAX_CHANNELS) are normal pchans, and
- * [SUN4I_NDMA_NR_MAX_CHANNELS..SUN4I_DMA_NR_MAX_CHANNELS) are
+ * [0..priv->cfg->ndma_nr_max_channels) are normal pchans, and
+ * [priv->cfg->ndma_nr_max_channels..priv->cfg->dma_nr_max_channels) are
* dedicated ones
*/
- for (i = 0; i < SUN4I_NDMA_NR_MAX_CHANNELS; i++)
+ for (i = 0; i < priv->cfg->ndma_nr_max_channels; i++)
priv->pchans[i].base = priv->base +
SUN4I_NDMA_CHANNEL_REG_BASE(i);
- for (j = 0; i < SUN4I_DMA_NR_MAX_CHANNELS; i++, j++) {
+ for (j = 0; i < priv->cfg->dma_nr_max_channels; i++, j++) {
priv->pchans[i].base = priv->base +
SUN4I_DDMA_CHANNEL_REG_BASE(j);
priv->pchans[i].is_dedicated = 1;
@@ -1227,12 +1327,6 @@ static int sun4i_dma_probe(struct platform_device *pdev)
vchan_init(&vchan->vc, &priv->slave);
}
- ret = clk_prepare_enable(priv->clk);
- if (ret) {
- dev_err(&pdev->dev, "Couldn't enable the clock\n");
- return ret;
- }
-
/*
* Make sure the IRQs are all disabled and accounted for. The bootloader
* likes to leave these dirty
@@ -1242,33 +1336,23 @@ static int sun4i_dma_probe(struct platform_device *pdev)
ret = devm_request_irq(&pdev->dev, priv->irq, sun4i_dma_interrupt,
0, dev_name(&pdev->dev), priv);
- if (ret) {
- dev_err(&pdev->dev, "Cannot request IRQ\n");
- goto err_clk_disable;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Cannot request IRQ\n");
- ret = dma_async_device_register(&priv->slave);
- if (ret) {
- dev_warn(&pdev->dev, "Failed to register DMA engine device\n");
- goto err_clk_disable;
- }
+ ret = dmaenginem_async_device_register(&priv->slave);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to register DMA engine device\n");
ret = of_dma_controller_register(pdev->dev.of_node, sun4i_dma_of_xlate,
priv);
- if (ret) {
- dev_err(&pdev->dev, "of_dma_controller_register failed\n");
- goto err_dma_unregister;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to register translation function\n");
dev_dbg(&pdev->dev, "Successfully probed SUN4I_DMA\n");
return 0;
-
-err_dma_unregister:
- dma_async_device_unregister(&priv->slave);
-err_clk_disable:
- clk_disable_unprepare(priv->clk);
- return ret;
}
static void sun4i_dma_remove(struct platform_device *pdev)
@@ -1279,20 +1363,60 @@ static void sun4i_dma_remove(struct platform_device *pdev)
disable_irq(priv->irq);
of_dma_controller_free(pdev->dev.of_node);
- dma_async_device_unregister(&priv->slave);
-
- clk_disable_unprepare(priv->clk);
}
+static struct sun4i_dma_config sun4i_a10_dma_cfg = {
+ .ndma_nr_max_channels = SUN4I_NDMA_NR_MAX_CHANNELS,
+ .ndma_nr_max_vchans = SUN4I_NDMA_NR_MAX_VCHANS,
+
+ .ddma_nr_max_channels = SUN4I_DDMA_NR_MAX_CHANNELS,
+ .ddma_nr_max_vchans = SUN4I_DDMA_NR_MAX_VCHANS,
+
+ .dma_nr_max_channels = SUN4I_DMA_NR_MAX_CHANNELS,
+
+ .set_dst_data_width = set_dst_data_width_a10,
+ .set_src_data_width = set_src_data_width_a10,
+ .convert_burst = convert_burst_a10,
+
+ .ndma_drq_sdram = SUN4I_NDMA_DRQ_TYPE_SDRAM,
+ .ddma_drq_sdram = SUN4I_DDMA_DRQ_TYPE_SDRAM,
+
+ .max_burst = SUN4I_MAX_BURST,
+ .has_reset = false,
+};
+
+static struct sun4i_dma_config suniv_f1c100s_dma_cfg = {
+ .ndma_nr_max_channels = SUNIV_NDMA_NR_MAX_CHANNELS,
+ .ndma_nr_max_vchans = SUNIV_NDMA_NR_MAX_VCHANS,
+
+ .ddma_nr_max_channels = SUNIV_DDMA_NR_MAX_CHANNELS,
+ .ddma_nr_max_vchans = SUNIV_DDMA_NR_MAX_VCHANS,
+
+ .dma_nr_max_channels = SUNIV_NDMA_NR_MAX_CHANNELS +
+ SUNIV_DDMA_NR_MAX_CHANNELS,
+
+ .set_dst_data_width = set_dst_data_width_f1c100s,
+ .set_src_data_width = set_src_data_width_f1c100s,
+ .convert_burst = convert_burst_f1c100s,
+
+ .ndma_drq_sdram = SUNIV_NDMA_DRQ_TYPE_SDRAM,
+ .ddma_drq_sdram = SUNIV_DDMA_DRQ_TYPE_SDRAM,
+
+ .max_burst = SUNIV_MAX_BURST,
+ .has_reset = true,
+};
+
static const struct of_device_id sun4i_dma_match[] = {
- { .compatible = "allwinner,sun4i-a10-dma" },
+ { .compatible = "allwinner,sun4i-a10-dma", .data = &sun4i_a10_dma_cfg },
+ { .compatible = "allwinner,suniv-f1c100s-dma",
+ .data = &suniv_f1c100s_dma_cfg },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, sun4i_dma_match);
static struct platform_driver sun4i_dma_driver = {
.probe = sun4i_dma_probe,
- .remove_new = sun4i_dma_remove,
+ .remove = sun4i_dma_remove,
.driver = {
.name = "sun4i-dma",
.of_match_table = sun4i_dma_match,
diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c
index 583bf49031cf..2215ff877bf7 100644
--- a/drivers/dma/sun6i-dma.c
+++ b/drivers/dma/sun6i-dma.c
@@ -19,6 +19,7 @@
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/types.h>
#include "virt-dma.h"
@@ -553,7 +554,7 @@ static irqreturn_t sun6i_dma_interrupt(int irq, void *dev_id)
continue;
dev_dbg(sdev->slave.dev, "DMA irq status %s: 0x%x\n",
- i ? "high" : "low", status);
+ str_high_low(i), status);
writel(status, sdev->base + DMA_IRQ_STAT(i));
@@ -1488,7 +1489,7 @@ static void sun6i_dma_remove(struct platform_device *pdev)
static struct platform_driver sun6i_dma_driver = {
.probe = sun6i_dma_probe,
- .remove_new = sun6i_dma_remove,
+ .remove = sun6i_dma_remove,
.driver = {
.name = "sun6i-dma",
.of_match_table = sun6i_dma_match,
diff --git a/drivers/dma/tegra186-gpc-dma.c b/drivers/dma/tegra186-gpc-dma.c
index 3642508e88bb..4d6fe0efa76e 100644
--- a/drivers/dma/tegra186-gpc-dma.c
+++ b/drivers/dma/tegra186-gpc-dma.c
@@ -231,6 +231,7 @@ struct tegra_dma_channel {
bool config_init;
char name[30];
enum dma_transfer_direction sid_dir;
+ enum dma_status status;
int id;
int irq;
int slave_id;
@@ -393,6 +394,8 @@ static int tegra_dma_pause(struct tegra_dma_channel *tdc)
tegra_dma_dump_chan_regs(tdc);
}
+ tdc->status = DMA_PAUSED;
+
return ret;
}
@@ -419,6 +422,8 @@ static void tegra_dma_resume(struct tegra_dma_channel *tdc)
val = tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSRE);
val &= ~TEGRA_GPCDMA_CHAN_CSRE_PAUSE;
tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSRE, val);
+
+ tdc->status = DMA_IN_PROGRESS;
}
static int tegra_dma_device_resume(struct dma_chan *dc)
@@ -544,6 +549,7 @@ static void tegra_dma_xfer_complete(struct tegra_dma_channel *tdc)
tegra_dma_sid_free(tdc);
tdc->dma_desc = NULL;
+ tdc->status = DMA_COMPLETE;
}
static void tegra_dma_chan_decode_error(struct tegra_dma_channel *tdc,
@@ -716,6 +722,7 @@ static int tegra_dma_terminate_all(struct dma_chan *dc)
tdc->dma_desc = NULL;
}
+ tdc->status = DMA_COMPLETE;
tegra_dma_sid_free(tdc);
vchan_get_all_descriptors(&tdc->vc, &head);
spin_unlock_irqrestore(&tdc->vc.lock, flags);
@@ -769,6 +776,9 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
if (ret == DMA_COMPLETE)
return ret;
+ if (tdc->status == DMA_PAUSED)
+ ret = DMA_PAUSED;
+
spin_lock_irqsave(&tdc->vc.lock, flags);
vd = vchan_find_desc(&tdc->vc, cookie);
if (vd) {
@@ -1532,7 +1542,7 @@ static struct platform_driver tegra_dma_driver = {
.of_match_table = tegra_dma_of_match,
},
.probe = tegra_dma_probe,
- .remove_new = tegra_dma_remove,
+ .remove = tegra_dma_remove,
};
module_platform_driver(tegra_dma_driver);
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index 7d1acda2d72b..14a61e53a41b 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -1675,7 +1675,7 @@ static struct platform_driver tegra_dmac_driver = {
.of_match_table = tegra_dma_of_match,
},
.probe = tegra_dma_probe,
- .remove_new = tegra_dma_remove,
+ .remove = tegra_dma_remove,
};
module_platform_driver(tegra_dmac_driver);
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
index 24ad7077c53b..fad896ff29a2 100644
--- a/drivers/dma/tegra210-adma.c
+++ b/drivers/dma/tegra210-adma.c
@@ -27,10 +27,10 @@
#define ADMA_CH_INT_CLEAR 0x1c
#define ADMA_CH_CTRL 0x24
-#define ADMA_CH_CTRL_DIR(val) (((val) & 0xf) << 12)
+#define ADMA_CH_CTRL_DIR(val, mask, shift) (((val) & (mask)) << (shift))
#define ADMA_CH_CTRL_DIR_AHUB2MEM 2
#define ADMA_CH_CTRL_DIR_MEM2AHUB 4
-#define ADMA_CH_CTRL_MODE_CONTINUOUS (2 << 8)
+#define ADMA_CH_CTRL_MODE_CONTINUOUS(shift) (2 << (shift))
#define ADMA_CH_CTRL_FLOWCTRL_EN BIT(1)
#define ADMA_CH_CTRL_XFER_PAUSE_SHIFT 0
@@ -41,11 +41,27 @@
#define ADMA_CH_CONFIG_MAX_BURST_SIZE 16
#define ADMA_CH_CONFIG_WEIGHT_FOR_WRR(val) ((val) & 0xf)
#define ADMA_CH_CONFIG_MAX_BUFS 8
-#define TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(reqs) (reqs << 4)
+#define TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(reqs) ((reqs) << 4)
+
+#define ADMA_GLOBAL_CH_CONFIG 0x400
+#define ADMA_GLOBAL_CH_CONFIG_WEIGHT_FOR_WRR(val) ((val) & 0x7)
+#define ADMA_GLOBAL_CH_CONFIG_OUTSTANDING_REQS(reqs) ((reqs) << 8)
+
+#define TEGRA186_ADMA_GLOBAL_PAGE_CHGRP 0x30
+#define TEGRA186_ADMA_GLOBAL_PAGE_RX_REQ 0x70
+#define TEGRA186_ADMA_GLOBAL_PAGE_TX_REQ 0x84
+#define TEGRA264_ADMA_GLOBAL_PAGE_CHGRP_0 0x44
+#define TEGRA264_ADMA_GLOBAL_PAGE_CHGRP_1 0x48
+#define TEGRA264_ADMA_GLOBAL_PAGE_RX_REQ_0 0x100
+#define TEGRA264_ADMA_GLOBAL_PAGE_RX_REQ_1 0x104
+#define TEGRA264_ADMA_GLOBAL_PAGE_TX_REQ_0 0x180
+#define TEGRA264_ADMA_GLOBAL_PAGE_TX_REQ_1 0x184
+#define TEGRA264_ADMA_GLOBAL_PAGE_OFFSET 0x8
#define ADMA_CH_FIFO_CTRL 0x2c
#define ADMA_CH_TX_FIFO_SIZE_SHIFT 8
#define ADMA_CH_RX_FIFO_SIZE_SHIFT 0
+#define ADMA_GLOBAL_CH_FIFO_CTRL 0x300
#define ADMA_CH_LOWER_SRC_ADDR 0x34
#define ADMA_CH_LOWER_TRG_ADDR 0x3c
@@ -69,33 +85,49 @@ struct tegra_adma;
* @adma_get_burst_config: Function callback used to set DMA burst size.
* @global_reg_offset: Register offset of DMA global register.
* @global_int_clear: Register offset of DMA global interrupt clear.
+ * @global_ch_fifo_base: Global channel fifo ctrl base offset
+ * @global_ch_config_base: Global channel config base offset
* @ch_req_tx_shift: Register offset for AHUB transmit channel select.
* @ch_req_rx_shift: Register offset for AHUB receive channel select.
+ * @ch_dir_shift: Channel direction bit position.
+ * @ch_mode_shift: Channel mode bit position.
* @ch_base_offset: Register offset of DMA channel registers.
+ * @ch_tc_offset_diff: From TC register onwards offset differs for Tegra264
* @ch_fifo_ctrl: Default value for channel FIFO CTRL register.
+ * @ch_config: Outstanding and WRR config values
* @ch_req_mask: Mask for Tx or Rx channel select.
+ * @ch_dir_mask: Mask for channel direction.
* @ch_req_max: Maximum number of Tx or Rx channels available.
* @ch_reg_size: Size of DMA channel register space.
* @nr_channels: Number of DMA channels available.
* @ch_fifo_size_mask: Mask for FIFO size field.
* @sreq_index_offset: Slave channel index offset.
- * @has_outstanding_reqs: If DMA channel can have outstanding requests.
+ * @max_page: Maximum ADMA Channel Page.
+ * @set_global_pg_config: Global page programming.
*/
struct tegra_adma_chip_data {
unsigned int (*adma_get_burst_config)(unsigned int burst_size);
unsigned int global_reg_offset;
unsigned int global_int_clear;
+ unsigned int global_ch_fifo_base;
+ unsigned int global_ch_config_base;
unsigned int ch_req_tx_shift;
unsigned int ch_req_rx_shift;
+ unsigned int ch_dir_shift;
+ unsigned int ch_mode_shift;
unsigned int ch_base_offset;
+ unsigned int ch_tc_offset_diff;
unsigned int ch_fifo_ctrl;
+ unsigned int ch_config;
unsigned int ch_req_mask;
+ unsigned int ch_dir_mask;
unsigned int ch_req_max;
unsigned int ch_reg_size;
unsigned int nr_channels;
unsigned int ch_fifo_size_mask;
unsigned int sreq_index_offset;
- bool has_outstanding_reqs;
+ unsigned int max_page;
+ void (*set_global_pg_config)(struct tegra_adma *tdma);
};
/*
@@ -104,6 +136,7 @@ struct tegra_adma_chip_data {
struct tegra_adma_chan_regs {
unsigned int ctrl;
unsigned int config;
+ unsigned int global_config;
unsigned int src_addr;
unsigned int trg_addr;
unsigned int fifo_ctrl;
@@ -142,6 +175,9 @@ struct tegra_adma_chan {
/* Transfer count and position info */
unsigned int tx_buf_count;
unsigned int tx_buf_pos;
+
+ unsigned int global_ch_fifo_offset;
+ unsigned int global_ch_config_offset;
};
/*
@@ -151,6 +187,7 @@ struct tegra_adma {
struct dma_device dma_dev;
struct device *dev;
void __iomem *base_addr;
+ void __iomem *ch_base_addr;
struct clk *ahub_clk;
unsigned int nr_channels;
unsigned long *dma_chan_mask;
@@ -159,6 +196,7 @@ struct tegra_adma {
/* Used to store global command register state when suspending */
unsigned int global_cmd;
+ unsigned int ch_page_no;
const struct tegra_adma_chip_data *cdata;
@@ -176,6 +214,11 @@ static inline u32 tdma_read(struct tegra_adma *tdma, u32 reg)
return readl(tdma->base_addr + tdma->cdata->global_reg_offset + reg);
}
+static inline void tdma_ch_global_write(struct tegra_adma *tdma, u32 reg, u32 val)
+{
+ writel(val, tdma->ch_base_addr + tdma->cdata->global_reg_offset + reg);
+}
+
static inline void tdma_ch_write(struct tegra_adma_chan *tdc, u32 reg, u32 val)
{
writel(val, tdc->chan_addr + reg);
@@ -217,13 +260,53 @@ static int tegra_adma_slave_config(struct dma_chan *dc,
return 0;
}
+static void tegra186_adma_global_page_config(struct tegra_adma *tdma)
+{
+ /*
+ * Clear the default page1 channel group configs and program
+ * the global registers based on the actual page usage
+ */
+ tdma_write(tdma, TEGRA186_ADMA_GLOBAL_PAGE_CHGRP, 0);
+ tdma_write(tdma, TEGRA186_ADMA_GLOBAL_PAGE_RX_REQ, 0);
+ tdma_write(tdma, TEGRA186_ADMA_GLOBAL_PAGE_TX_REQ, 0);
+ tdma_write(tdma, TEGRA186_ADMA_GLOBAL_PAGE_CHGRP + (tdma->ch_page_no * 0x4), 0xff);
+ tdma_write(tdma, TEGRA186_ADMA_GLOBAL_PAGE_RX_REQ + (tdma->ch_page_no * 0x4), 0x1ffffff);
+ tdma_write(tdma, TEGRA186_ADMA_GLOBAL_PAGE_TX_REQ + (tdma->ch_page_no * 0x4), 0xffffff);
+}
+
+static void tegra264_adma_global_page_config(struct tegra_adma *tdma)
+{
+ u32 global_page_offset = tdma->ch_page_no * TEGRA264_ADMA_GLOBAL_PAGE_OFFSET;
+
+ /* If the default page (page1) is not used, then clear page1 registers */
+ if (tdma->ch_page_no) {
+ tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_CHGRP_0, 0);
+ tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_CHGRP_1, 0);
+ tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_RX_REQ_0, 0);
+ tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_RX_REQ_1, 0);
+ tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_TX_REQ_0, 0);
+ tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_TX_REQ_1, 0);
+ }
+
+ /* Program global registers for selected page */
+ tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_CHGRP_0 + global_page_offset, 0xffffffff);
+ tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_CHGRP_1 + global_page_offset, 0xffffffff);
+ tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_RX_REQ_0 + global_page_offset, 0xffffffff);
+ tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_RX_REQ_1 + global_page_offset, 0x1);
+ tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_TX_REQ_0 + global_page_offset, 0xffffffff);
+ tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_TX_REQ_1 + global_page_offset, 0x1);
+}
+
static int tegra_adma_init(struct tegra_adma *tdma)
{
u32 status;
int ret;
- /* Clear any interrupts */
- tdma_write(tdma, tdma->cdata->ch_base_offset + tdma->cdata->global_int_clear, 0x1);
+ /* Clear any channels group global interrupts */
+ tdma_ch_global_write(tdma, tdma->cdata->global_int_clear, 0x1);
+
+ if (!tdma->base_addr)
+ return 0;
/* Assert soft reset */
tdma_write(tdma, ADMA_GLOBAL_SOFT_RESET, 0x1);
@@ -237,6 +320,9 @@ static int tegra_adma_init(struct tegra_adma *tdma)
if (ret)
return ret;
+ if (tdma->cdata->set_global_pg_config)
+ tdma->cdata->set_global_pg_config(tdma);
+
/* Enable global ADMA registers */
tdma_write(tdma, ADMA_GLOBAL_CMD, 1);
@@ -369,11 +455,21 @@ static void tegra_adma_start(struct tegra_adma_chan *tdc)
tdc->tx_buf_pos = 0;
tdc->tx_buf_count = 0;
- tdma_ch_write(tdc, ADMA_CH_TC, ch_regs->tc);
+ tdma_ch_write(tdc, ADMA_CH_TC - tdc->tdma->cdata->ch_tc_offset_diff, ch_regs->tc);
tdma_ch_write(tdc, ADMA_CH_CTRL, ch_regs->ctrl);
- tdma_ch_write(tdc, ADMA_CH_LOWER_SRC_ADDR, ch_regs->src_addr);
- tdma_ch_write(tdc, ADMA_CH_LOWER_TRG_ADDR, ch_regs->trg_addr);
- tdma_ch_write(tdc, ADMA_CH_FIFO_CTRL, ch_regs->fifo_ctrl);
+ tdma_ch_write(tdc, ADMA_CH_LOWER_SRC_ADDR - tdc->tdma->cdata->ch_tc_offset_diff,
+ ch_regs->src_addr);
+ tdma_ch_write(tdc, ADMA_CH_LOWER_TRG_ADDR - tdc->tdma->cdata->ch_tc_offset_diff,
+ ch_regs->trg_addr);
+
+ if (!tdc->tdma->cdata->global_ch_fifo_base)
+ tdma_ch_write(tdc, ADMA_CH_FIFO_CTRL, ch_regs->fifo_ctrl);
+ else if (tdc->global_ch_fifo_offset)
+ tdma_write(tdc->tdma, tdc->global_ch_fifo_offset, ch_regs->fifo_ctrl);
+
+ if (tdc->global_ch_config_offset)
+ tdma_write(tdc->tdma, tdc->global_ch_config_offset, ch_regs->global_config);
+
tdma_ch_write(tdc, ADMA_CH_CONFIG, ch_regs->config);
/* Start ADMA */
@@ -386,7 +482,8 @@ static unsigned int tegra_adma_get_residue(struct tegra_adma_chan *tdc)
{
struct tegra_adma_desc *desc = tdc->desc;
unsigned int max = ADMA_CH_XFER_STATUS_COUNT_MASK + 1;
- unsigned int pos = tdma_ch_read(tdc, ADMA_CH_XFER_STATUS);
+ unsigned int pos = tdma_ch_read(tdc, ADMA_CH_XFER_STATUS -
+ tdc->tdma->cdata->ch_tc_offset_diff);
unsigned int periods_remaining;
/*
@@ -592,13 +689,16 @@ static int tegra_adma_set_xfer_params(struct tegra_adma_chan *tdc,
return -EINVAL;
}
- ch_regs->ctrl |= ADMA_CH_CTRL_DIR(adma_dir) |
- ADMA_CH_CTRL_MODE_CONTINUOUS |
+ ch_regs->ctrl |= ADMA_CH_CTRL_DIR(adma_dir, cdata->ch_dir_mask,
+ cdata->ch_dir_shift) |
+ ADMA_CH_CTRL_MODE_CONTINUOUS(cdata->ch_mode_shift) |
ADMA_CH_CTRL_FLOWCTRL_EN;
ch_regs->config |= cdata->adma_get_burst_config(burst_size);
- ch_regs->config |= ADMA_CH_CONFIG_WEIGHT_FOR_WRR(1);
- if (cdata->has_outstanding_reqs)
- ch_regs->config |= TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(8);
+
+ if (cdata->global_ch_config_base)
+ ch_regs->global_config |= cdata->ch_config;
+ else
+ ch_regs->config |= cdata->ch_config;
/*
* 'sreq_index' represents the current ADMAIF channel number and as per
@@ -736,7 +836,9 @@ static int __maybe_unused tegra_adma_runtime_suspend(struct device *dev)
struct tegra_adma_chan *tdc;
int i;
- tdma->global_cmd = tdma_read(tdma, ADMA_GLOBAL_CMD);
+ if (tdma->base_addr)
+ tdma->global_cmd = tdma_read(tdma, ADMA_GLOBAL_CMD);
+
if (!tdma->global_cmd)
goto clk_disable;
@@ -751,12 +853,23 @@ static int __maybe_unused tegra_adma_runtime_suspend(struct device *dev)
/* skip if channel is not active */
if (!ch_reg->cmd)
continue;
- ch_reg->tc = tdma_ch_read(tdc, ADMA_CH_TC);
- ch_reg->src_addr = tdma_ch_read(tdc, ADMA_CH_LOWER_SRC_ADDR);
- ch_reg->trg_addr = tdma_ch_read(tdc, ADMA_CH_LOWER_TRG_ADDR);
+ ch_reg->tc = tdma_ch_read(tdc, ADMA_CH_TC - tdma->cdata->ch_tc_offset_diff);
+ ch_reg->src_addr = tdma_ch_read(tdc, ADMA_CH_LOWER_SRC_ADDR -
+ tdma->cdata->ch_tc_offset_diff);
+ ch_reg->trg_addr = tdma_ch_read(tdc, ADMA_CH_LOWER_TRG_ADDR -
+ tdma->cdata->ch_tc_offset_diff);
ch_reg->ctrl = tdma_ch_read(tdc, ADMA_CH_CTRL);
- ch_reg->fifo_ctrl = tdma_ch_read(tdc, ADMA_CH_FIFO_CTRL);
+
+ if (tdc->global_ch_config_offset)
+ ch_reg->global_config = tdma_read(tdc->tdma, tdc->global_ch_config_offset);
+
+ if (!tdc->tdma->cdata->global_ch_fifo_base)
+ ch_reg->fifo_ctrl = tdma_ch_read(tdc, ADMA_CH_FIFO_CTRL);
+ else if (tdc->global_ch_fifo_offset)
+ ch_reg->fifo_ctrl = tdma_read(tdc->tdma, tdc->global_ch_fifo_offset);
+
ch_reg->config = tdma_ch_read(tdc, ADMA_CH_CONFIG);
+
}
clk_disable:
@@ -777,7 +890,11 @@ static int __maybe_unused tegra_adma_runtime_resume(struct device *dev)
dev_err(dev, "ahub clk_enable failed: %d\n", ret);
return ret;
}
- tdma_write(tdma, ADMA_GLOBAL_CMD, tdma->global_cmd);
+ if (tdma->base_addr) {
+ tdma_write(tdma, ADMA_GLOBAL_CMD, tdma->global_cmd);
+ if (tdma->cdata->set_global_pg_config)
+ tdma->cdata->set_global_pg_config(tdma);
+ }
if (!tdma->global_cmd)
return 0;
@@ -791,12 +908,23 @@ static int __maybe_unused tegra_adma_runtime_resume(struct device *dev)
/* skip if channel was not active earlier */
if (!ch_reg->cmd)
continue;
- tdma_ch_write(tdc, ADMA_CH_TC, ch_reg->tc);
- tdma_ch_write(tdc, ADMA_CH_LOWER_SRC_ADDR, ch_reg->src_addr);
- tdma_ch_write(tdc, ADMA_CH_LOWER_TRG_ADDR, ch_reg->trg_addr);
+ tdma_ch_write(tdc, ADMA_CH_TC - tdma->cdata->ch_tc_offset_diff, ch_reg->tc);
+ tdma_ch_write(tdc, ADMA_CH_LOWER_SRC_ADDR - tdma->cdata->ch_tc_offset_diff,
+ ch_reg->src_addr);
+ tdma_ch_write(tdc, ADMA_CH_LOWER_TRG_ADDR - tdma->cdata->ch_tc_offset_diff,
+ ch_reg->trg_addr);
tdma_ch_write(tdc, ADMA_CH_CTRL, ch_reg->ctrl);
- tdma_ch_write(tdc, ADMA_CH_FIFO_CTRL, ch_reg->fifo_ctrl);
+
+ if (!tdc->tdma->cdata->global_ch_fifo_base)
+ tdma_ch_write(tdc, ADMA_CH_FIFO_CTRL, ch_reg->fifo_ctrl);
+ else if (tdc->global_ch_fifo_offset)
+ tdma_write(tdc->tdma, tdc->global_ch_fifo_offset, ch_reg->fifo_ctrl);
+
+ if (tdc->global_ch_config_offset)
+ tdma_write(tdc->tdma, tdc->global_ch_config_offset, ch_reg->global_config);
+
tdma_ch_write(tdc, ADMA_CH_CONFIG, ch_reg->config);
+
tdma_ch_write(tdc, ADMA_CH_CMD, ch_reg->cmd);
}
@@ -807,37 +935,80 @@ static const struct tegra_adma_chip_data tegra210_chip_data = {
.adma_get_burst_config = tegra210_adma_get_burst_config,
.global_reg_offset = 0xc00,
.global_int_clear = 0x20,
+ .global_ch_fifo_base = 0,
+ .global_ch_config_base = 0,
.ch_req_tx_shift = 28,
.ch_req_rx_shift = 24,
+ .ch_dir_shift = 12,
+ .ch_mode_shift = 8,
.ch_base_offset = 0,
+ .ch_tc_offset_diff = 0,
+ .ch_config = ADMA_CH_CONFIG_WEIGHT_FOR_WRR(1),
.ch_req_mask = 0xf,
+ .ch_dir_mask = 0xf,
.ch_req_max = 10,
.ch_reg_size = 0x80,
.nr_channels = 22,
.ch_fifo_size_mask = 0xf,
.sreq_index_offset = 2,
- .has_outstanding_reqs = false,
+ .max_page = 0,
+ .set_global_pg_config = NULL,
};
static const struct tegra_adma_chip_data tegra186_chip_data = {
.adma_get_burst_config = tegra186_adma_get_burst_config,
.global_reg_offset = 0,
.global_int_clear = 0x402c,
+ .global_ch_fifo_base = 0,
+ .global_ch_config_base = 0,
.ch_req_tx_shift = 27,
.ch_req_rx_shift = 22,
+ .ch_dir_shift = 12,
+ .ch_mode_shift = 8,
.ch_base_offset = 0x10000,
+ .ch_tc_offset_diff = 0,
+ .ch_config = ADMA_CH_CONFIG_WEIGHT_FOR_WRR(1) |
+ TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(8),
.ch_req_mask = 0x1f,
+ .ch_dir_mask = 0xf,
.ch_req_max = 20,
.ch_reg_size = 0x100,
.nr_channels = 32,
.ch_fifo_size_mask = 0x1f,
.sreq_index_offset = 4,
- .has_outstanding_reqs = true,
+ .max_page = 4,
+ .set_global_pg_config = tegra186_adma_global_page_config,
+};
+
+static const struct tegra_adma_chip_data tegra264_chip_data = {
+ .adma_get_burst_config = tegra186_adma_get_burst_config,
+ .global_reg_offset = 0,
+ .global_int_clear = 0x800c,
+ .global_ch_fifo_base = ADMA_GLOBAL_CH_FIFO_CTRL,
+ .global_ch_config_base = ADMA_GLOBAL_CH_CONFIG,
+ .ch_req_tx_shift = 26,
+ .ch_req_rx_shift = 20,
+ .ch_dir_shift = 10,
+ .ch_mode_shift = 7,
+ .ch_base_offset = 0x10000,
+ .ch_tc_offset_diff = 4,
+ .ch_config = ADMA_GLOBAL_CH_CONFIG_WEIGHT_FOR_WRR(1) |
+ ADMA_GLOBAL_CH_CONFIG_OUTSTANDING_REQS(8),
+ .ch_req_mask = 0x3f,
+ .ch_dir_mask = 7,
+ .ch_req_max = 32,
+ .ch_reg_size = 0x100,
+ .nr_channels = 64,
+ .ch_fifo_size_mask = 0x7f,
+ .sreq_index_offset = 0,
+ .max_page = 10,
+ .set_global_pg_config = tegra264_adma_global_page_config,
};
static const struct of_device_id tegra_adma_of_match[] = {
{ .compatible = "nvidia,tegra210-adma", .data = &tegra210_chip_data },
{ .compatible = "nvidia,tegra186-adma", .data = &tegra186_chip_data },
+ { .compatible = "nvidia,tegra264-adma", .data = &tegra264_chip_data },
{ },
};
MODULE_DEVICE_TABLE(of, tegra_adma_of_match);
@@ -846,6 +1017,7 @@ static int tegra_adma_probe(struct platform_device *pdev)
{
const struct tegra_adma_chip_data *cdata;
struct tegra_adma *tdma;
+ struct resource *res_page, *res_base;
int ret, i;
cdata = of_device_get_match_data(&pdev->dev);
@@ -865,9 +1037,46 @@ static int tegra_adma_probe(struct platform_device *pdev)
tdma->nr_channels = cdata->nr_channels;
platform_set_drvdata(pdev, tdma);
- tdma->base_addr = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(tdma->base_addr))
- return PTR_ERR(tdma->base_addr);
+ res_page = platform_get_resource_byname(pdev, IORESOURCE_MEM, "page");
+ if (res_page) {
+ tdma->ch_base_addr = devm_ioremap_resource(&pdev->dev, res_page);
+ if (IS_ERR(tdma->ch_base_addr))
+ return PTR_ERR(tdma->ch_base_addr);
+
+ res_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "global");
+ if (res_base) {
+ resource_size_t page_offset, page_no;
+ unsigned int ch_base_offset;
+
+ if (res_page->start < res_base->start)
+ return -EINVAL;
+ page_offset = res_page->start - res_base->start;
+ ch_base_offset = cdata->ch_base_offset;
+ if (!ch_base_offset)
+ return -EINVAL;
+
+ page_no = div_u64(page_offset, ch_base_offset);
+ if (!page_no || page_no > INT_MAX)
+ return -EINVAL;
+
+ tdma->ch_page_no = page_no - 1;
+ tdma->base_addr = devm_ioremap_resource(&pdev->dev, res_base);
+ if (IS_ERR(tdma->base_addr))
+ return PTR_ERR(tdma->base_addr);
+ }
+ } else {
+ /* If no 'page' property found, then reg DT binding would be legacy */
+ res_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res_base) {
+ tdma->base_addr = devm_ioremap_resource(&pdev->dev, res_base);
+ if (IS_ERR(tdma->base_addr))
+ return PTR_ERR(tdma->base_addr);
+ } else {
+ return -ENODEV;
+ }
+
+ tdma->ch_base_addr = tdma->base_addr + cdata->ch_base_offset;
+ }
tdma->ahub_clk = devm_clk_get(&pdev->dev, "d_audio");
if (IS_ERR(tdma->ahub_clk)) {
@@ -900,8 +1109,16 @@ static int tegra_adma_probe(struct platform_device *pdev)
if (!test_bit(i, tdma->dma_chan_mask))
continue;
- tdc->chan_addr = tdma->base_addr + cdata->ch_base_offset
- + (cdata->ch_reg_size * i);
+ tdc->chan_addr = tdma->ch_base_addr + (cdata->ch_reg_size * i);
+
+ if (tdma->base_addr) {
+ if (cdata->global_ch_fifo_base)
+ tdc->global_ch_fifo_offset = cdata->global_ch_fifo_base + (4 * i);
+
+ if (cdata->global_ch_config_base)
+ tdc->global_ch_config_offset =
+ cdata->global_ch_config_base + (4 * i);
+ }
tdc->irq = of_irq_get(pdev->dev.of_node, i);
if (tdc->irq <= 0) {
@@ -1008,7 +1225,7 @@ static struct platform_driver tegra_admac_driver = {
.of_match_table = tegra_adma_of_match,
},
.probe = tegra_adma_probe,
- .remove_new = tegra_adma_remove,
+ .remove = tegra_adma_remove,
};
module_platform_driver(tegra_admac_driver);
diff --git a/drivers/dma/ti/Kconfig b/drivers/dma/ti/Kconfig
index 2adc2cca10e9..dbf168146d35 100644
--- a/drivers/dma/ti/Kconfig
+++ b/drivers/dma/ti/Kconfig
@@ -17,7 +17,7 @@ config TI_EDMA
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
select TI_DMA_CROSSBAR if (ARCH_OMAP || COMPILE_TEST)
- default y
+ default ARCH_DAVINCI || ARCH_OMAP || ARCH_KEYSTONE
help
Enable support for the TI EDMA (Enhanced DMA) controller. This DMA
engine is found on TI DaVinci, AM33xx, AM43xx, DRA7xx and Keystone 2
@@ -29,7 +29,7 @@ config DMA_OMAP
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
select TI_DMA_CROSSBAR if (SOC_DRA7XX || COMPILE_TEST)
- default y
+ default ARCH_OMAP
help
Enable support for the TI sDMA (System DMA or DMA4) controller. This
DMA engine is found on OMAP and DRA7xx parts.
diff --git a/drivers/dma/ti/cppi41.c b/drivers/dma/ti/cppi41.c
index a8bb70c2d109..8d8c3d6038fc 100644
--- a/drivers/dma/ti/cppi41.c
+++ b/drivers/dma/ti/cppi41.c
@@ -1243,7 +1243,7 @@ static const struct dev_pm_ops cppi41_pm_ops = {
static struct platform_driver cpp41_dma_driver = {
.probe = cppi41_dma_probe,
- .remove_new = cppi41_dma_remove,
+ .remove = cppi41_dma_remove,
.driver = {
.name = "cppi41-dma-engine",
.pm = &cppi41_pm_ops,
diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
index 5f8d2e93ff3f..552be71db6c4 100644
--- a/drivers/dma/ti/edma.c
+++ b/drivers/dma/ti/edma.c
@@ -16,6 +16,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/string_choices.h>
#include <linux/of.h>
#include <linux/of_dma.h>
#include <linux/of_irq.h>
@@ -208,7 +209,6 @@ struct edma_desc {
struct edma_cc;
struct edma_tc {
- struct device_node *node;
u16 id;
};
@@ -2048,7 +2048,7 @@ static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata,
dev_dbg(dev, "num_qchannels: %u\n", ecc->num_qchannels);
dev_dbg(dev, "num_slots: %u\n", ecc->num_slots);
dev_dbg(dev, "num_tc: %u\n", ecc->num_tc);
- dev_dbg(dev, "chmap_exist: %s\n", ecc->chmap_exist ? "yes" : "no");
+ dev_dbg(dev, "chmap_exist: %s\n", str_yes_no(ecc->chmap_exist));
/* Nothing need to be done if queue priority is provided */
if (pdata->queue_priority_mapping)
@@ -2064,8 +2064,8 @@ static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata,
* priority. So Q0 is the highest priority queue and the last queue has
* the lowest priority.
*/
- queue_priority_map = devm_kcalloc(dev, ecc->num_tc + 1, sizeof(s8),
- GFP_KERNEL);
+ queue_priority_map = devm_kcalloc(dev, ecc->num_tc + 1,
+ sizeof(*queue_priority_map), GFP_KERNEL);
if (!queue_priority_map)
return -ENOMEM;
@@ -2259,8 +2259,12 @@ static struct dma_chan *of_edma_xlate(struct of_phandle_args *dma_spec,
return NULL;
out:
- /* The channel is going to be used as HW synchronized */
- echan->hw_triggered = true;
+ /*
+ * The channel is going to be HW synchronized, unless it was
+ * reserved as a memcpy channel
+ */
+ echan->hw_triggered =
+ !edma_is_memcpy_channel(i, ecc->info->memcpy_channels);
return dma_get_slave_channel(chan);
}
#else
@@ -2460,19 +2464,19 @@ static int edma_probe(struct platform_device *pdev)
goto err_reg1;
}
- for (i = 0;; i++) {
+ for (i = 0; i < ecc->num_tc; i++) {
ret = of_parse_phandle_with_fixed_args(node, "ti,tptcs",
1, i, &tc_args);
- if (ret || i == ecc->num_tc)
+ if (ret)
break;
- ecc->tc_list[i].node = tc_args.np;
ecc->tc_list[i].id = i;
queue_priority_mapping[i][1] = tc_args.args[0];
if (queue_priority_mapping[i][1] > lowest_priority) {
lowest_priority = queue_priority_mapping[i][1];
info->default_queue = i;
}
+ of_node_put(tc_args.np);
}
/* See if we have optional dma-channel-mask array */
@@ -2636,7 +2640,7 @@ static const struct dev_pm_ops edma_pm_ops = {
static struct platform_driver edma_driver = {
.probe = edma_probe,
- .remove_new = edma_remove,
+ .remove = edma_remove,
.driver = {
.name = "edma",
.pm = &edma_pm_ops,
diff --git a/drivers/dma/ti/k3-udma-glue.c b/drivers/dma/ti/k3-udma-glue.c
index 7c224c3ab7a0..f87d244cc2d6 100644
--- a/drivers/dma/ti/k3-udma-glue.c
+++ b/drivers/dma/ti/k3-udma-glue.c
@@ -84,6 +84,7 @@ struct k3_udma_glue_rx_channel {
struct k3_udma_glue_rx_flow *flows;
u32 flow_num;
u32 flows_ready;
+ bool single_fdq; /* one FDQ for all flows */
};
static void k3_udma_chan_dev_release(struct device *dev)
@@ -970,10 +971,13 @@ k3_udma_glue_request_rx_chn_priv(struct device *dev, const char *name,
ep_cfg = rx_chn->common.ep_config;
- if (xudma_is_pktdma(rx_chn->common.udmax))
+ if (xudma_is_pktdma(rx_chn->common.udmax)) {
rx_chn->udma_rchan_id = ep_cfg->mapped_channel_id;
- else
+ rx_chn->single_fdq = false;
+ } else {
rx_chn->udma_rchan_id = -1;
+ rx_chn->single_fdq = true;
+ }
/* request and cfg UDMAP RX channel */
rx_chn->udma_rchanx = xudma_rchan_get(rx_chn->common.udmax,
@@ -1103,6 +1107,9 @@ k3_udma_glue_request_remote_rx_chn_common(struct k3_udma_glue_rx_channel *rx_chn
rx_chn->common.chan_dev.dma_coherent = true;
dma_coerce_mask_and_coherent(&rx_chn->common.chan_dev,
DMA_BIT_MASK(48));
+ rx_chn->single_fdq = false;
+ } else {
+ rx_chn->single_fdq = true;
}
ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
@@ -1453,7 +1460,7 @@ EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_rx_chn);
void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
u32 flow_num, void *data,
- void (*cleanup)(void *data, dma_addr_t desc_dma), bool skip_fdq)
+ void (*cleanup)(void *data, dma_addr_t desc_dma))
{
struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
struct device *dev = rx_chn->common.dev;
@@ -1465,7 +1472,7 @@ void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
dev_dbg(dev, "RX reset flow %u occ_rx %u\n", flow_num, occ_rx);
/* Skip RX FDQ in case one FDQ is used for the set of flows */
- if (skip_fdq)
+ if (rx_chn->single_fdq && flow_num)
goto do_reset;
/*
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index 406ee199c2ac..aa2dc762140f 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -1091,8 +1091,11 @@ static void udma_check_tx_completion(struct work_struct *work)
u32 residue_diff;
ktime_t time_diff;
unsigned long delay;
+ unsigned long flags;
while (1) {
+ spin_lock_irqsave(&uc->vc.lock, flags);
+
if (uc->desc) {
/* Get previous residue and time stamp */
residue_diff = uc->tx_drain.residue;
@@ -1127,6 +1130,8 @@ static void udma_check_tx_completion(struct work_struct *work)
break;
}
+ spin_unlock_irqrestore(&uc->vc.lock, flags);
+
usleep_range(ktime_to_us(delay),
ktime_to_us(delay) + 10);
continue;
@@ -1143,6 +1148,8 @@ static void udma_check_tx_completion(struct work_struct *work)
break;
}
+
+ spin_unlock_irqrestore(&uc->vc.lock, flags);
}
static irqreturn_t udma_ring_irq_handler(int irq, void *data)
@@ -3185,27 +3192,40 @@ static int udma_configure_statictr(struct udma_chan *uc, struct udma_desc *d,
d->static_tr.elcnt = elcnt;
- /*
- * PDMA must to close the packet when the channel is in packet mode.
- * For TR mode when the channel is not cyclic we also need PDMA to close
- * the packet otherwise the transfer will stall because PDMA holds on
- * the data it has received from the peripheral.
- */
if (uc->config.pkt_mode || !uc->cyclic) {
+ /*
+ * PDMA must close the packet when the channel is in packet mode.
+ * For TR mode when the channel is not cyclic we also need PDMA
+ * to close the packet otherwise the transfer will stall because
+ * PDMA holds on the data it has received from the peripheral.
+ */
unsigned int div = dev_width * elcnt;
if (uc->cyclic)
d->static_tr.bstcnt = d->residue / d->sglen / div;
else
d->static_tr.bstcnt = d->residue / div;
+ } else if (uc->ud->match_data->type == DMA_TYPE_BCDMA &&
+ uc->config.dir == DMA_DEV_TO_MEM &&
+ uc->cyclic) {
+ /*
+ * For cyclic mode with BCDMA we have to set EOP in each TR to
+ * prevent short packet errors seen on channel teardown. So the
+ * PDMA must close the packet after every TR transfer by setting
+ * burst count equal to the number of bytes transferred.
+ */
+ struct cppi5_tr_type1_t *tr_req = d->hwdesc[0].tr_req_base;
- if (uc->config.dir == DMA_DEV_TO_MEM &&
- d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask)
- return -EINVAL;
+ d->static_tr.bstcnt =
+ (tr_req->icnt0 * tr_req->icnt1) / dev_width;
} else {
d->static_tr.bstcnt = 0;
}
+ if (uc->config.dir == DMA_DEV_TO_MEM &&
+ d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask)
+ return -EINVAL;
+
return 0;
}
@@ -3450,8 +3470,9 @@ udma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
/* static TR for remote PDMA */
if (udma_configure_statictr(uc, d, dev_width, burst)) {
dev_err(uc->ud->dev,
- "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
- __func__, d->static_tr.bstcnt);
+ "%s: StaticTR Z is limited to maximum %u (%u)\n",
+ __func__, uc->ud->match_data->statictr_z_mask,
+ d->static_tr.bstcnt);
udma_free_hwdesc(uc, d);
kfree(d);
@@ -3476,6 +3497,7 @@ udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
unsigned int i;
int num_tr;
+ u32 period_csf = 0;
num_tr = udma_get_tr_counters(period_len, __ffs(buf_addr), &tr0_cnt0,
&tr0_cnt1, &tr1_cnt0);
@@ -3498,6 +3520,20 @@ udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
period_addr = buf_addr |
((u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT);
+ /*
+ * For BCDMA <-> PDMA transfers, the EOP flag needs to be set on the
+ * last TR of a descriptor, to mark the packet as complete.
+ * This is required for getting the teardown completion message in case
+ * of TX, and to avoid short-packet error in case of RX.
+ *
+ * As we are in cyclic mode, we do not know which period might be the
+ * last one, so set the flag for each period.
+ */
+ if (uc->config.ep_type == PSIL_EP_PDMA_XY &&
+ uc->ud->match_data->type == DMA_TYPE_BCDMA) {
+ period_csf = CPPI5_TR_CSF_EOP;
+ }
+
for (i = 0; i < periods; i++) {
int tr_idx = i * num_tr;
@@ -3525,8 +3561,10 @@ udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
}
if (!(flags & DMA_PREP_INTERRUPT))
- cppi5_tr_csf_set(&tr_req[tr_idx].flags,
- CPPI5_TR_CSF_SUPR_EVT);
+ period_csf |= CPPI5_TR_CSF_SUPR_EVT;
+
+ if (period_csf)
+ cppi5_tr_csf_set(&tr_req[tr_idx].flags, period_csf);
period_addr += period_len;
}
@@ -3655,8 +3693,9 @@ udma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
/* static TR for remote PDMA */
if (udma_configure_statictr(uc, d, dev_width, burst)) {
dev_err(uc->ud->dev,
- "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
- __func__, d->static_tr.bstcnt);
+ "%s: StaticTR Z is limited to maximum %u (%u)\n",
+ __func__, uc->ud->match_data->statictr_z_mask,
+ d->static_tr.bstcnt);
udma_free_hwdesc(uc, d);
kfree(d);
@@ -4214,7 +4253,6 @@ static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec,
struct of_dma *ofdma)
{
struct udma_dev *ud = ofdma->of_dma_data;
- dma_cap_mask_t mask = ud->ddev.cap_mask;
struct udma_filter_param filter_param;
struct dma_chan *chan;
@@ -4246,7 +4284,7 @@ static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec,
}
}
- chan = __dma_request_channel(&mask, udma_dma_filter_fn, &filter_param,
+ chan = __dma_request_channel(&ud->ddev.cap_mask, udma_dma_filter_fn, &filter_param,
ofdma->of_node);
if (!chan) {
dev_err(ud->dev, "get channel fail in %s.\n", __func__);
@@ -4372,6 +4410,18 @@ static struct udma_match_data j721s2_bcdma_csi_data = {
.soc_data = &j721s2_bcdma_csi_soc_data,
};
+static struct udma_match_data j722s_bcdma_csi_data = {
+ .type = DMA_TYPE_BCDMA,
+ .psil_base = 0x3100,
+ .enable_memcpy_support = false,
+ .burst_size = {
+ TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
+ 0, /* No H Channels */
+ 0, /* No UH Channels */
+ },
+ .soc_data = &j721s2_bcdma_csi_soc_data,
+};
+
static const struct of_device_id udma_of_match[] = {
{
.compatible = "ti,am654-navss-main-udmap",
@@ -4403,6 +4453,10 @@ static const struct of_device_id udma_of_match[] = {
.compatible = "ti,j721s2-dmss-bcdma-csi",
.data = &j721s2_bcdma_csi_data,
},
+ {
+ .compatible = "ti,j722s-dmss-bcdma-csi",
+ .data = &j722s_bcdma_csi_data,
+ },
{ /* Sentinel */ },
};
MODULE_DEVICE_TABLE(of, udma_of_match);
@@ -4838,6 +4892,12 @@ static int bcdma_setup_resources(struct udma_dev *ud)
irq_res.desc[i].start = rm_res->desc[i].start +
oes->bcdma_bchan_ring;
irq_res.desc[i].num = rm_res->desc[i].num;
+
+ if (rm_res->desc[i].num_sec) {
+ irq_res.desc[i].start_sec = rm_res->desc[i].start_sec +
+ oes->bcdma_bchan_ring;
+ irq_res.desc[i].num_sec = rm_res->desc[i].num_sec;
+ }
}
}
} else {
@@ -4861,6 +4921,15 @@ static int bcdma_setup_resources(struct udma_dev *ud)
irq_res.desc[i + 1].start = rm_res->desc[j].start +
oes->bcdma_tchan_ring;
irq_res.desc[i + 1].num = rm_res->desc[j].num;
+
+ if (rm_res->desc[j].num_sec) {
+ irq_res.desc[i].start_sec = rm_res->desc[j].start_sec +
+ oes->bcdma_tchan_data;
+ irq_res.desc[i].num_sec = rm_res->desc[j].num_sec;
+ irq_res.desc[i + 1].start_sec = rm_res->desc[j].start_sec +
+ oes->bcdma_tchan_ring;
+ irq_res.desc[i + 1].num_sec = rm_res->desc[j].num_sec;
+ }
}
}
}
@@ -4881,6 +4950,15 @@ static int bcdma_setup_resources(struct udma_dev *ud)
irq_res.desc[i + 1].start = rm_res->desc[j].start +
oes->bcdma_rchan_ring;
irq_res.desc[i + 1].num = rm_res->desc[j].num;
+
+ if (rm_res->desc[j].num_sec) {
+ irq_res.desc[i].start_sec = rm_res->desc[j].start_sec +
+ oes->bcdma_rchan_data;
+ irq_res.desc[i].num_sec = rm_res->desc[j].num_sec;
+ irq_res.desc[i + 1].start_sec = rm_res->desc[j].start_sec +
+ oes->bcdma_rchan_ring;
+ irq_res.desc[i + 1].num_sec = rm_res->desc[j].num_sec;
+ }
}
}
}
@@ -5015,6 +5093,12 @@ static int pktdma_setup_resources(struct udma_dev *ud)
irq_res.desc[i].start = rm_res->desc[i].start +
oes->pktdma_tchan_flow;
irq_res.desc[i].num = rm_res->desc[i].num;
+
+ if (rm_res->desc[i].num_sec) {
+ irq_res.desc[i].start_sec = rm_res->desc[i].start_sec +
+ oes->pktdma_tchan_flow;
+ irq_res.desc[i].num_sec = rm_res->desc[i].num_sec;
+ }
}
}
rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
@@ -5026,6 +5110,12 @@ static int pktdma_setup_resources(struct udma_dev *ud)
irq_res.desc[i].start = rm_res->desc[j].start +
oes->pktdma_rchan_flow;
irq_res.desc[i].num = rm_res->desc[j].num;
+
+ if (rm_res->desc[j].num_sec) {
+ irq_res.desc[i].start_sec = rm_res->desc[j].start_sec +
+ oes->pktdma_rchan_flow;
+ irq_res.desc[i].num_sec = rm_res->desc[j].num_sec;
+ }
}
}
ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
@@ -5534,7 +5624,8 @@ static int udma_probe(struct platform_device *pdev)
uc->config.dir = DMA_MEM_TO_MEM;
uc->name = devm_kasprintf(dev, GFP_KERNEL, "%s chan%d",
dev_name(dev), i);
-
+ if (!uc->name)
+ return -ENOMEM;
vchan_init(&uc->vc, &ud->ddev);
/* Use custom vchan completion handling */
tasklet_setup(&uc->vc.task, udma_vchan_complete);
diff --git a/drivers/dma/ti/omap-dma.c b/drivers/dma/ti/omap-dma.c
index 6ab9bfbdc480..8c023c6e623a 100644
--- a/drivers/dma/ti/omap-dma.c
+++ b/drivers/dma/ti/omap-dma.c
@@ -1915,7 +1915,7 @@ MODULE_DEVICE_TABLE(of, omap_dma_match);
static struct platform_driver omap_dma_driver = {
.probe = omap_dma_probe,
- .remove_new = omap_dma_remove,
+ .remove = omap_dma_remove,
.driver = {
.name = "omap-dma-engine",
.of_match_table = omap_dma_match,
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index 7410025605e0..ecaf002558af 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -761,7 +761,7 @@ static struct platform_driver td_driver = {
.name = DRIVER_NAME,
},
.probe = td_probe,
- .remove_new = td_remove,
+ .remove = td_remove,
};
module_platform_driver(td_driver);
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index 44ba377b4b5a..35d5221683b2 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -1260,14 +1260,14 @@ static const struct dev_pm_ops txx9dmac_dev_pm_ops = {
};
static struct platform_driver txx9dmac_chan_driver = {
- .remove_new = txx9dmac_chan_remove,
+ .remove = txx9dmac_chan_remove,
.driver = {
.name = "txx9dmac-chan",
},
};
static struct platform_driver txx9dmac_driver = {
- .remove_new = txx9dmac_remove,
+ .remove = txx9dmac_remove,
.shutdown = txx9dmac_shutdown,
.driver = {
.name = "txx9dmac",
diff --git a/drivers/dma/uniphier-mdmac.c b/drivers/dma/uniphier-mdmac.c
index ad7125f6e2ca..7a99f86ecb5a 100644
--- a/drivers/dma/uniphier-mdmac.c
+++ b/drivers/dma/uniphier-mdmac.c
@@ -493,7 +493,7 @@ MODULE_DEVICE_TABLE(of, uniphier_mdmac_match);
static struct platform_driver uniphier_mdmac_driver = {
.probe = uniphier_mdmac_probe,
- .remove_new = uniphier_mdmac_remove,
+ .remove = uniphier_mdmac_remove,
.driver = {
.name = "uniphier-mio-dmac",
.of_match_table = uniphier_mdmac_match,
diff --git a/drivers/dma/uniphier-xdmac.c b/drivers/dma/uniphier-xdmac.c
index 3ce2dc2ad9de..ceeb6171c9d1 100644
--- a/drivers/dma/uniphier-xdmac.c
+++ b/drivers/dma/uniphier-xdmac.c
@@ -603,7 +603,7 @@ MODULE_DEVICE_TABLE(of, uniphier_xdmac_match);
static struct platform_driver uniphier_xdmac_driver = {
.probe = uniphier_xdmac_probe,
- .remove_new = uniphier_xdmac_remove,
+ .remove = uniphier_xdmac_remove,
.driver = {
.name = "uniphier-xdmac",
.of_match_table = uniphier_xdmac_match,
diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c
index 275848a9c450..f64624ea44ad 100644
--- a/drivers/dma/xgene-dma.c
+++ b/drivers/dma/xgene-dma.c
@@ -1815,7 +1815,7 @@ MODULE_DEVICE_TABLE(of, xgene_dma_of_match_ptr);
static struct platform_driver xgene_dma_driver = {
.probe = xgene_dma_probe,
- .remove_new = xgene_dma_remove,
+ .remove = xgene_dma_remove,
.driver = {
.name = "X-Gene-DMA",
.of_match_table = xgene_dma_of_match_ptr,
diff --git a/drivers/dma/xilinx/xdma.c b/drivers/dma/xilinx/xdma.c
index 718842fdaf98..0d88b1a670e1 100644
--- a/drivers/dma/xilinx/xdma.c
+++ b/drivers/dma/xilinx/xdma.c
@@ -390,15 +390,11 @@ static int xdma_xfer_start(struct xdma_chan *xchan)
*/
static int xdma_xfer_stop(struct xdma_chan *xchan)
{
- int ret;
struct xdma_device *xdev = xchan->xdev_hdl;
/* clear run stop bit to prevent any further auto-triggering */
- ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL_W1C,
- CHAN_CTRL_RUN_STOP);
- if (ret)
- return ret;
- return ret;
+ return regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL_W1C,
+ CHAN_CTRL_RUN_STOP);
}
/**
@@ -1315,7 +1311,7 @@ static struct platform_driver xdma_driver = {
},
.id_table = xdma_id_table,
.probe = xdma_probe,
- .remove_new = xdma_remove,
+ .remove = xdma_remove,
};
module_platform_driver(xdma_driver);
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 5eb51ae93e89..fabff602065f 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -46,6 +46,7 @@
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/clk.h>
#include <linux/io-64-nonatomic-lo-hi.h>
@@ -1404,16 +1405,18 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
- j = chan->desc_submitcount;
- reg = dma_read(chan, XILINX_DMA_REG_PARK_PTR);
- if (chan->direction == DMA_MEM_TO_DEV) {
- reg &= ~XILINX_DMA_PARK_PTR_RD_REF_MASK;
- reg |= j << XILINX_DMA_PARK_PTR_RD_REF_SHIFT;
- } else {
- reg &= ~XILINX_DMA_PARK_PTR_WR_REF_MASK;
- reg |= j << XILINX_DMA_PARK_PTR_WR_REF_SHIFT;
+ if (config->park) {
+ j = chan->desc_submitcount;
+ reg = dma_read(chan, XILINX_DMA_REG_PARK_PTR);
+ if (chan->direction == DMA_MEM_TO_DEV) {
+ reg &= ~XILINX_DMA_PARK_PTR_RD_REF_MASK;
+ reg |= j << XILINX_DMA_PARK_PTR_RD_REF_SHIFT;
+ } else {
+ reg &= ~XILINX_DMA_PARK_PTR_WR_REF_MASK;
+ reg |= j << XILINX_DMA_PARK_PTR_WR_REF_SHIFT;
+ }
+ dma_write(chan, XILINX_DMA_REG_PARK_PTR, reg);
}
- dma_write(chan, XILINX_DMA_REG_PARK_PTR, reg);
/* Start the hardware */
xilinx_dma_start(chan);
@@ -2170,6 +2173,99 @@ error:
}
/**
+ * xilinx_dma_prep_peripheral_dma_vec - prepare descriptors for a DMA_SLAVE
+ * transaction from DMA vectors
+ * @dchan: DMA channel
+ * @vecs: Array of DMA vectors that should be transferred
+ * @nb: number of entries in @vecs
+ * @direction: DMA direction
+ * @flags: transfer ack flags
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *xilinx_dma_prep_peripheral_dma_vec(
+ struct dma_chan *dchan, const struct dma_vec *vecs, size_t nb,
+ enum dma_transfer_direction direction, unsigned long flags)
+{
+ struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+ struct xilinx_dma_tx_descriptor *desc;
+ struct xilinx_axidma_tx_segment *segment, *head, *prev = NULL;
+ size_t copy;
+ size_t sg_used;
+ unsigned int i;
+
+ if (!is_slave_direction(direction) || direction != chan->direction)
+ return NULL;
+
+ desc = xilinx_dma_alloc_tx_descriptor(chan);
+ if (!desc)
+ return NULL;
+
+ dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
+ desc->async_tx.tx_submit = xilinx_dma_tx_submit;
+
+ /* Build transactions using information from DMA vectors */
+ for (i = 0; i < nb; i++) {
+ sg_used = 0;
+
+ /* Loop until the entire dma_vec entry is used */
+ while (sg_used < vecs[i].len) {
+ struct xilinx_axidma_desc_hw *hw;
+
+ /* Get a free segment */
+ segment = xilinx_axidma_alloc_tx_segment(chan);
+ if (!segment)
+ goto error;
+
+ /*
+ * Calculate the maximum number of bytes to transfer,
+ * making sure it is less than the hw limit
+ */
+ copy = xilinx_dma_calc_copysize(chan, vecs[i].len,
+ sg_used);
+ hw = &segment->hw;
+
+ /* Fill in the descriptor */
+ xilinx_axidma_buf(chan, hw, vecs[i].addr, sg_used, 0);
+ hw->control = copy;
+
+ if (prev)
+ prev->hw.next_desc = segment->phys;
+
+ prev = segment;
+ sg_used += copy;
+
+ /*
+ * Insert the segment into the descriptor segments
+ * list.
+ */
+ list_add_tail(&segment->node, &desc->segments);
+ }
+ }
+
+ head = list_first_entry(&desc->segments, struct xilinx_axidma_tx_segment, node);
+ desc->async_tx.phys = head->phys;
+
+ /* For the last DMA_MEM_TO_DEV transfer, set EOP */
+ if (chan->direction == DMA_MEM_TO_DEV) {
+ segment->hw.control |= XILINX_DMA_BD_SOP;
+ segment = list_last_entry(&desc->segments,
+ struct xilinx_axidma_tx_segment,
+ node);
+ segment->hw.control |= XILINX_DMA_BD_EOP;
+ }
+
+ if (chan->xdev->has_axistream_connected)
+ desc->async_tx.metadata_ops = &xilinx_dma_metadata_ops;
+
+ return &desc->async_tx;
+
+error:
+ xilinx_dma_free_tx_descriptor(chan, desc);
+ return NULL;
+}
+
+/**
* xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
* @dchan: DMA channel
* @sgl: scatterlist to transfer to/from
@@ -2906,6 +3002,8 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
return -EINVAL;
}
+ xdev->common.directions |= chan->direction;
+
/* Request the interrupt */
chan->irq = of_irq_get(node, chan->tdest);
if (chan->irq < 0)
@@ -2938,7 +3036,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
XILINX_DMA_DMASR_SG_MASK)
chan->has_sg = true;
dev_dbg(chan->dev, "ch %d: SG %s\n", chan->id,
- chan->has_sg ? "enabled" : "disabled");
+ str_enabled_disabled(chan->has_sg));
}
/* Initialize the tasklet */
@@ -3112,6 +3210,8 @@ static int xilinx_dma_probe(struct platform_device *pdev)
}
}
+ dma_set_max_seg_size(xdev->dev, xdev->max_buffer_len);
+
if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
xdev->has_axistream_connected =
of_property_read_bool(node, "xlnx,axistream-connected");
@@ -3173,6 +3273,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
xdev->common.device_config = xilinx_dma_device_config;
if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask);
+ xdev->common.device_prep_peripheral_dma_vec = xilinx_dma_prep_peripheral_dma_vec;
xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
xdev->common.device_prep_dma_cyclic =
xilinx_dma_prep_dma_cyclic;
@@ -3271,7 +3372,7 @@ static struct platform_driver xilinx_vdma_driver = {
.of_match_table = xilinx_dma_of_ids,
},
.probe = xilinx_dma_probe,
- .remove_new = xilinx_dma_remove,
+ .remove = xilinx_dma_remove,
};
module_platform_driver(xilinx_vdma_driver);
diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c
index be87764af9e8..ee5d9fdbfd7f 100644
--- a/drivers/dma/xilinx/xilinx_dpdma.c
+++ b/drivers/dma/xilinx/xilinx_dpdma.c
@@ -1863,7 +1863,7 @@ MODULE_DEVICE_TABLE(of, xilinx_dpdma_of_match);
static struct platform_driver xilinx_dpdma_driver = {
.probe = xilinx_dpdma_probe,
- .remove_new = xilinx_dpdma_remove,
+ .remove = xilinx_dpdma_remove,
.driver = {
.name = "xilinx-zynqmp-dpdma",
.of_match_table = xilinx_dpdma_of_match,
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
index 9ae46f1198fe..f7e584de4335 100644
--- a/drivers/dma/xilinx/zynqmp_dma.c
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -366,7 +366,7 @@ static void zynqmp_dma_init(struct zynqmp_dma_chan *chan)
}
writel(val, chan->regs + ZYNQMP_DMA_DATA_ATTR);
- /* Clearing the interrupt account rgisters */
+ /* Clearing the interrupt account registers */
val = readl(chan->regs + ZYNQMP_DMA_IRQ_SRC_ACCT);
val = readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT);
@@ -1173,9 +1173,9 @@ static void zynqmp_dma_remove(struct platform_device *pdev)
dma_async_device_unregister(&zdev->common);
zynqmp_dma_chan_remove(zdev->chan);
- pm_runtime_disable(zdev->dev);
- if (!pm_runtime_enabled(zdev->dev))
+ if (pm_runtime_active(zdev->dev))
zynqmp_dma_runtime_suspend(zdev->dev);
+ pm_runtime_disable(zdev->dev);
}
static const struct of_device_id zynqmp_dma_of_match[] = {
@@ -1192,7 +1192,8 @@ static struct platform_driver zynqmp_dma_driver = {
.pm = &zynqmp_dma_dev_pm_ops,
},
.probe = zynqmp_dma_probe,
- .remove_new = zynqmp_dma_remove,
+ .remove = zynqmp_dma_remove,
+ .shutdown = zynqmp_dma_remove,
};
module_platform_driver(zynqmp_dma_driver);
diff --git a/drivers/dpll/Kconfig b/drivers/dpll/Kconfig
index 20607ed54243..ade872c915ac 100644
--- a/drivers/dpll/Kconfig
+++ b/drivers/dpll/Kconfig
@@ -3,5 +3,11 @@
# Generic DPLL drivers configuration
#
+menu "DPLL device support"
+
config DPLL
bool
+
+source "drivers/dpll/zl3073x/Kconfig"
+
+endmenu
diff --git a/drivers/dpll/Makefile b/drivers/dpll/Makefile
index 2e5b27850110..9e7a3a3e592e 100644
--- a/drivers/dpll/Makefile
+++ b/drivers/dpll/Makefile
@@ -7,3 +7,5 @@ obj-$(CONFIG_DPLL) += dpll.o
dpll-y += dpll_core.o
dpll-y += dpll_netlink.o
dpll-y += dpll_nl.o
+
+obj-$(CONFIG_ZL3073X) += zl3073x/
diff --git a/drivers/dpll/dpll_core.c b/drivers/dpll/dpll_core.c
index 32019dc33cca..a461095efd8a 100644
--- a/drivers/dpll/dpll_core.c
+++ b/drivers/dpll/dpll_core.c
@@ -443,8 +443,11 @@ static void dpll_pin_prop_free(struct dpll_pin_properties *prop)
static int dpll_pin_prop_dup(const struct dpll_pin_properties *src,
struct dpll_pin_properties *dst)
{
+ if (WARN_ON(src->freq_supported && !src->freq_supported_num))
+ return -EINVAL;
+
memcpy(dst, src, sizeof(*dst));
- if (src->freq_supported && src->freq_supported_num) {
+ if (src->freq_supported) {
size_t freq_size = src->freq_supported_num *
sizeof(*src->freq_supported);
dst->freq_supported = kmemdup(src->freq_supported,
@@ -503,14 +506,16 @@ dpll_pin_alloc(u64 clock_id, u32 pin_idx, struct module *module,
refcount_set(&pin->refcount, 1);
xa_init_flags(&pin->dpll_refs, XA_FLAGS_ALLOC);
xa_init_flags(&pin->parent_refs, XA_FLAGS_ALLOC);
+ xa_init_flags(&pin->ref_sync_pins, XA_FLAGS_ALLOC);
ret = xa_alloc_cyclic(&dpll_pin_xa, &pin->id, pin, xa_limit_32b,
&dpll_pin_xa_id, GFP_KERNEL);
- if (ret)
+ if (ret < 0)
goto err_xa_alloc;
return pin;
err_xa_alloc:
xa_destroy(&pin->dpll_refs);
xa_destroy(&pin->parent_refs);
+ xa_destroy(&pin->ref_sync_pins);
dpll_pin_prop_free(&pin->prop);
err_pin_prop:
kfree(pin);
@@ -592,6 +597,7 @@ void dpll_pin_put(struct dpll_pin *pin)
xa_erase(&dpll_pin_xa, pin->id);
xa_destroy(&pin->dpll_refs);
xa_destroy(&pin->parent_refs);
+ xa_destroy(&pin->ref_sync_pins);
dpll_pin_prop_free(&pin->prop);
kfree_rcu(pin, rcu);
}
@@ -656,11 +662,26 @@ dpll_pin_register(struct dpll_device *dpll, struct dpll_pin *pin,
}
EXPORT_SYMBOL_GPL(dpll_pin_register);
+static void dpll_pin_ref_sync_pair_del(u32 ref_sync_pin_id)
+{
+ struct dpll_pin *pin, *ref_sync_pin;
+ unsigned long i;
+
+ xa_for_each(&dpll_pin_xa, i, pin) {
+ ref_sync_pin = xa_load(&pin->ref_sync_pins, ref_sync_pin_id);
+ if (ref_sync_pin) {
+ xa_erase(&pin->ref_sync_pins, ref_sync_pin_id);
+ __dpll_pin_change_ntf(pin);
+ }
+ }
+}
+
static void
__dpll_pin_unregister(struct dpll_device *dpll, struct dpll_pin *pin,
const struct dpll_pin_ops *ops, void *priv, void *cookie)
{
ASSERT_DPLL_PIN_REGISTERED(pin);
+ dpll_pin_ref_sync_pair_del(pin->id);
dpll_xa_ref_pin_del(&dpll->pin_refs, pin, ops, priv, cookie);
dpll_xa_ref_dpll_del(&pin->dpll_refs, dpll, ops, priv, cookie);
if (xa_empty(&pin->dpll_refs))
@@ -780,6 +801,33 @@ void dpll_pin_on_pin_unregister(struct dpll_pin *parent, struct dpll_pin *pin,
}
EXPORT_SYMBOL_GPL(dpll_pin_on_pin_unregister);
+/**
+ * dpll_pin_ref_sync_pair_add - create a reference sync signal pin pair
+ * @pin: pin which produces the base frequency
+ * @ref_sync_pin: pin which produces the sync signal
+ *
+ * Once pins are paired, the user-space configuration of reference sync pair
+ * is possible.
+ * Context: Acquires a lock (dpll_lock)
+ * Return:
+ * * 0 on success
+ * * negative - error value
+ */
+int dpll_pin_ref_sync_pair_add(struct dpll_pin *pin,
+ struct dpll_pin *ref_sync_pin)
+{
+ int ret;
+
+ mutex_lock(&dpll_lock);
+ ret = xa_insert(&pin->ref_sync_pins, ref_sync_pin->id,
+ ref_sync_pin, GFP_KERNEL);
+ __dpll_pin_change_ntf(pin);
+ mutex_unlock(&dpll_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dpll_pin_ref_sync_pair_add);
+
static struct dpll_device_registration *
dpll_device_registration_first(struct dpll_device *dpll)
{
diff --git a/drivers/dpll/dpll_core.h b/drivers/dpll/dpll_core.h
index 2b6d8ef1cdf3..8ce969bbeb64 100644
--- a/drivers/dpll/dpll_core.h
+++ b/drivers/dpll/dpll_core.h
@@ -44,8 +44,8 @@ struct dpll_device {
* @module: module of creator
* @dpll_refs: hold referencees to dplls pin was registered with
* @parent_refs: hold references to parent pins pin was registered with
+ * @ref_sync_pins: hold references to pins for Reference SYNC feature
* @prop: pin properties copied from the registerer
- * @rclk_dev_name: holds name of device when pin can recover clock from it
* @refcount: refcount
* @rcu: rcu_head for kfree_rcu()
**/
@@ -56,6 +56,7 @@ struct dpll_pin {
struct module *module;
struct xarray dpll_refs;
struct xarray parent_refs;
+ struct xarray ref_sync_pins;
struct dpll_pin_properties prop;
refcount_t refcount;
struct rcu_head rcu;
diff --git a/drivers/dpll/dpll_netlink.c b/drivers/dpll/dpll_netlink.c
index fc0280dcddd1..74c1f0ca95f2 100644
--- a/drivers/dpll/dpll_netlink.c
+++ b/drivers/dpll/dpll_netlink.c
@@ -48,6 +48,24 @@ dpll_msg_add_dev_parent_handle(struct sk_buff *msg, u32 id)
return 0;
}
+static bool dpll_pin_available(struct dpll_pin *pin)
+{
+ struct dpll_pin_ref *par_ref;
+ unsigned long i;
+
+ if (!xa_get_mark(&dpll_pin_xa, pin->id, DPLL_REGISTERED))
+ return false;
+ xa_for_each(&pin->parent_refs, i, par_ref)
+ if (xa_get_mark(&dpll_pin_xa, par_ref->pin->id,
+ DPLL_REGISTERED))
+ return true;
+ xa_for_each(&pin->dpll_refs, i, par_ref)
+ if (xa_get_mark(&dpll_device_xa, par_ref->dpll->id,
+ DPLL_REGISTERED))
+ return true;
+ return false;
+}
+
/**
* dpll_msg_add_pin_handle - attach pin handle attribute to a given message
* @msg: pointer to sk_buff message to attach a pin handle
@@ -127,6 +145,47 @@ dpll_msg_add_mode_supported(struct sk_buff *msg, struct dpll_device *dpll,
}
static int
+dpll_msg_add_phase_offset_monitor(struct sk_buff *msg, struct dpll_device *dpll,
+ struct netlink_ext_ack *extack)
+{
+ const struct dpll_device_ops *ops = dpll_device_ops(dpll);
+ enum dpll_feature_state state;
+ int ret;
+
+ if (ops->phase_offset_monitor_set && ops->phase_offset_monitor_get) {
+ ret = ops->phase_offset_monitor_get(dpll, dpll_priv(dpll),
+ &state, extack);
+ if (ret)
+ return ret;
+ if (nla_put_u32(msg, DPLL_A_PHASE_OFFSET_MONITOR, state))
+ return -EMSGSIZE;
+ }
+
+ return 0;
+}
+
+static int
+dpll_msg_add_phase_offset_avg_factor(struct sk_buff *msg,
+ struct dpll_device *dpll,
+ struct netlink_ext_ack *extack)
+{
+ const struct dpll_device_ops *ops = dpll_device_ops(dpll);
+ u32 factor;
+ int ret;
+
+ if (ops->phase_offset_avg_factor_get) {
+ ret = ops->phase_offset_avg_factor_get(dpll, dpll_priv(dpll),
+ &factor, extack);
+ if (ret)
+ return ret;
+ if (nla_put_u32(msg, DPLL_A_PHASE_OFFSET_AVG_FACTOR, factor))
+ return -EMSGSIZE;
+ }
+
+ return 0;
+}
+
+static int
dpll_msg_add_lock_status(struct sk_buff *msg, struct dpll_device *dpll,
struct netlink_ext_ack *extack)
{
@@ -170,6 +229,27 @@ dpll_msg_add_temp(struct sk_buff *msg, struct dpll_device *dpll,
}
static int
+dpll_msg_add_clock_quality_level(struct sk_buff *msg, struct dpll_device *dpll,
+ struct netlink_ext_ack *extack)
+{
+ DECLARE_BITMAP(qls, DPLL_CLOCK_QUALITY_LEVEL_MAX + 1) = { 0 };
+ const struct dpll_device_ops *ops = dpll_device_ops(dpll);
+ enum dpll_clock_quality_level ql;
+ int ret;
+
+ if (!ops->clock_quality_level_get)
+ return 0;
+ ret = ops->clock_quality_level_get(dpll, dpll_priv(dpll), qls, extack);
+ if (ret)
+ return ret;
+ for_each_set_bit(ql, qls, DPLL_CLOCK_QUALITY_LEVEL_MAX + 1)
+ if (nla_put_u32(msg, DPLL_A_CLOCK_QUALITY_LEVEL, ql))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int
dpll_msg_add_pin_prio(struct sk_buff *msg, struct dpll_pin *pin,
struct dpll_pin_ref *ref,
struct netlink_ext_ack *extack)
@@ -387,6 +467,47 @@ nest_cancel:
return -EMSGSIZE;
}
+static int
+dpll_msg_add_pin_ref_sync(struct sk_buff *msg, struct dpll_pin *pin,
+ struct dpll_pin_ref *ref,
+ struct netlink_ext_ack *extack)
+{
+ const struct dpll_pin_ops *ops = dpll_pin_ops(ref);
+ struct dpll_device *dpll = ref->dpll;
+ void *pin_priv, *ref_sync_pin_priv;
+ struct dpll_pin *ref_sync_pin;
+ enum dpll_pin_state state;
+ struct nlattr *nest;
+ unsigned long index;
+ int ret;
+
+ pin_priv = dpll_pin_on_dpll_priv(dpll, pin);
+ xa_for_each(&pin->ref_sync_pins, index, ref_sync_pin) {
+ if (!dpll_pin_available(ref_sync_pin))
+ continue;
+ ref_sync_pin_priv = dpll_pin_on_dpll_priv(dpll, ref_sync_pin);
+ if (WARN_ON(!ops->ref_sync_get))
+ return -EOPNOTSUPP;
+ ret = ops->ref_sync_get(pin, pin_priv, ref_sync_pin,
+ ref_sync_pin_priv, &state, extack);
+ if (ret)
+ return ret;
+ nest = nla_nest_start(msg, DPLL_A_PIN_REFERENCE_SYNC);
+ if (!nest)
+ return -EMSGSIZE;
+ if (nla_put_s32(msg, DPLL_A_PIN_ID, ref_sync_pin->id))
+ goto nest_cancel;
+ if (nla_put_s32(msg, DPLL_A_PIN_STATE, state))
+ goto nest_cancel;
+ nla_nest_end(msg, nest);
+ }
+ return 0;
+
+nest_cancel:
+ nla_nest_cancel(msg, nest);
+ return -EMSGSIZE;
+}
+
static bool dpll_pin_is_freq_supported(struct dpll_pin *pin, u32 freq)
{
int fs;
@@ -531,6 +652,10 @@ dpll_cmd_pin_get_one(struct sk_buff *msg, struct dpll_pin *pin,
ret = dpll_msg_add_pin_esync(msg, pin, ref, extack);
if (ret)
return ret;
+ if (!xa_empty(&pin->ref_sync_pins))
+ ret = dpll_msg_add_pin_ref_sync(msg, pin, ref, extack);
+ if (ret)
+ return ret;
if (xa_empty(&pin->parent_refs))
ret = dpll_msg_add_pin_dplls(msg, pin, extack);
else
@@ -559,6 +684,9 @@ dpll_device_get_one(struct dpll_device *dpll, struct sk_buff *msg,
ret = dpll_msg_add_lock_status(msg, dpll, extack);
if (ret)
return ret;
+ ret = dpll_msg_add_clock_quality_level(msg, dpll, extack);
+ if (ret)
+ return ret;
ret = dpll_msg_add_mode(msg, dpll, extack);
if (ret)
return ret;
@@ -567,6 +695,12 @@ dpll_device_get_one(struct dpll_device *dpll, struct sk_buff *msg,
return ret;
if (nla_put_u32(msg, DPLL_A_TYPE, dpll->type))
return -EMSGSIZE;
+ ret = dpll_msg_add_phase_offset_monitor(msg, dpll, extack);
+ if (ret)
+ return ret;
+ ret = dpll_msg_add_phase_offset_avg_factor(msg, dpll, extack);
+ if (ret)
+ return ret;
return 0;
}
@@ -618,24 +752,6 @@ __dpll_device_change_ntf(struct dpll_device *dpll)
return dpll_device_event_send(DPLL_CMD_DEVICE_CHANGE_NTF, dpll);
}
-static bool dpll_pin_available(struct dpll_pin *pin)
-{
- struct dpll_pin_ref *par_ref;
- unsigned long i;
-
- if (!xa_get_mark(&dpll_pin_xa, pin->id, DPLL_REGISTERED))
- return false;
- xa_for_each(&pin->parent_refs, i, par_ref)
- if (xa_get_mark(&dpll_pin_xa, par_ref->pin->id,
- DPLL_REGISTERED))
- return true;
- xa_for_each(&pin->dpll_refs, i, par_ref)
- if (xa_get_mark(&dpll_device_xa, par_ref->dpll->id,
- DPLL_REGISTERED))
- return true;
- return false;
-}
-
/**
* dpll_device_change_ntf - notify that the dpll device has been changed
* @dpll: registered dpll pointer
@@ -698,7 +814,7 @@ int dpll_pin_delete_ntf(struct dpll_pin *pin)
return dpll_pin_event_send(DPLL_CMD_PIN_DELETE_NTF, pin);
}
-static int __dpll_pin_change_ntf(struct dpll_pin *pin)
+int __dpll_pin_change_ntf(struct dpll_pin *pin)
{
return dpll_pin_event_send(DPLL_CMD_PIN_CHANGE_NTF, pin);
}
@@ -723,6 +839,48 @@ int dpll_pin_change_ntf(struct dpll_pin *pin)
EXPORT_SYMBOL_GPL(dpll_pin_change_ntf);
static int
+dpll_phase_offset_monitor_set(struct dpll_device *dpll, struct nlattr *a,
+ struct netlink_ext_ack *extack)
+{
+ const struct dpll_device_ops *ops = dpll_device_ops(dpll);
+ enum dpll_feature_state state = nla_get_u32(a), old_state;
+ int ret;
+
+ if (!(ops->phase_offset_monitor_set && ops->phase_offset_monitor_get)) {
+ NL_SET_ERR_MSG_ATTR(extack, a, "dpll device not capable of phase offset monitor");
+ return -EOPNOTSUPP;
+ }
+ ret = ops->phase_offset_monitor_get(dpll, dpll_priv(dpll), &old_state,
+ extack);
+ if (ret) {
+ NL_SET_ERR_MSG(extack, "unable to get current state of phase offset monitor");
+ return ret;
+ }
+ if (state == old_state)
+ return 0;
+
+ return ops->phase_offset_monitor_set(dpll, dpll_priv(dpll), state,
+ extack);
+}
+
+static int
+dpll_phase_offset_avg_factor_set(struct dpll_device *dpll, struct nlattr *a,
+ struct netlink_ext_ack *extack)
+{
+ const struct dpll_device_ops *ops = dpll_device_ops(dpll);
+ u32 factor = nla_get_u32(a);
+
+ if (!ops->phase_offset_avg_factor_set) {
+ NL_SET_ERR_MSG_ATTR(extack, a,
+ "device not capable of changing phase offset average factor");
+ return -EOPNOTSUPP;
+ }
+
+ return ops->phase_offset_avg_factor_set(dpll, dpll_priv(dpll), factor,
+ extack);
+}
+
+static int
dpll_pin_freq_set(struct dpll_pin *pin, struct nlattr *a,
struct netlink_ext_ack *extack)
{
@@ -864,6 +1022,108 @@ rollback:
}
static int
+dpll_pin_ref_sync_state_set(struct dpll_pin *pin,
+ unsigned long ref_sync_pin_idx,
+ const enum dpll_pin_state state,
+ struct netlink_ext_ack *extack)
+
+{
+ struct dpll_pin_ref *ref, *failed;
+ const struct dpll_pin_ops *ops;
+ enum dpll_pin_state old_state;
+ struct dpll_pin *ref_sync_pin;
+ struct dpll_device *dpll;
+ unsigned long i;
+ int ret;
+
+ ref_sync_pin = xa_find(&pin->ref_sync_pins, &ref_sync_pin_idx,
+ ULONG_MAX, XA_PRESENT);
+ if (!ref_sync_pin) {
+ NL_SET_ERR_MSG(extack, "reference sync pin not found");
+ return -EINVAL;
+ }
+ if (!dpll_pin_available(ref_sync_pin)) {
+ NL_SET_ERR_MSG(extack, "reference sync pin not available");
+ return -EINVAL;
+ }
+ ref = dpll_xa_ref_dpll_first(&pin->dpll_refs);
+ ASSERT_NOT_NULL(ref);
+ ops = dpll_pin_ops(ref);
+ if (!ops->ref_sync_set || !ops->ref_sync_get) {
+ NL_SET_ERR_MSG(extack, "reference sync not supported by this pin");
+ return -EOPNOTSUPP;
+ }
+ dpll = ref->dpll;
+ ret = ops->ref_sync_get(pin, dpll_pin_on_dpll_priv(dpll, pin),
+ ref_sync_pin,
+ dpll_pin_on_dpll_priv(dpll, ref_sync_pin),
+ &old_state, extack);
+ if (ret) {
+ NL_SET_ERR_MSG(extack, "unable to get old reference sync state");
+ return ret;
+ }
+ if (state == old_state)
+ return 0;
+ xa_for_each(&pin->dpll_refs, i, ref) {
+ ops = dpll_pin_ops(ref);
+ dpll = ref->dpll;
+ ret = ops->ref_sync_set(pin, dpll_pin_on_dpll_priv(dpll, pin),
+ ref_sync_pin,
+ dpll_pin_on_dpll_priv(dpll,
+ ref_sync_pin),
+ state, extack);
+ if (ret) {
+ failed = ref;
+ NL_SET_ERR_MSG_FMT(extack, "reference sync set failed for dpll_id:%u",
+ dpll->id);
+ goto rollback;
+ }
+ }
+ __dpll_pin_change_ntf(pin);
+
+ return 0;
+
+rollback:
+ xa_for_each(&pin->dpll_refs, i, ref) {
+ if (ref == failed)
+ break;
+ ops = dpll_pin_ops(ref);
+ dpll = ref->dpll;
+ if (ops->ref_sync_set(pin, dpll_pin_on_dpll_priv(dpll, pin),
+ ref_sync_pin,
+ dpll_pin_on_dpll_priv(dpll, ref_sync_pin),
+ old_state, extack))
+ NL_SET_ERR_MSG(extack, "set reference sync rollback failed");
+ }
+ return ret;
+}
+
+static int
+dpll_pin_ref_sync_set(struct dpll_pin *pin, struct nlattr *nest,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[DPLL_A_PIN_MAX + 1];
+ enum dpll_pin_state state;
+ u32 sync_pin_id;
+
+ nla_parse_nested(tb, DPLL_A_PIN_MAX, nest,
+ dpll_reference_sync_nl_policy, extack);
+ if (!tb[DPLL_A_PIN_ID]) {
+ NL_SET_ERR_MSG(extack, "sync pin id expected");
+ return -EINVAL;
+ }
+ sync_pin_id = nla_get_u32(tb[DPLL_A_PIN_ID]);
+
+ if (!tb[DPLL_A_PIN_STATE]) {
+ NL_SET_ERR_MSG(extack, "sync pin state expected");
+ return -EINVAL;
+ }
+ state = nla_get_u32(tb[DPLL_A_PIN_STATE]);
+
+ return dpll_pin_ref_sync_state_set(pin, sync_pin_id, state, extack);
+}
+
+static int
dpll_pin_on_pin_state_set(struct dpll_pin *pin, u32 parent_idx,
enum dpll_pin_state state,
struct netlink_ext_ack *extack)
@@ -1169,6 +1429,11 @@ dpll_pin_set_from_nlattr(struct dpll_pin *pin, struct genl_info *info)
if (ret)
return ret;
break;
+ case DPLL_A_PIN_REFERENCE_SYNC:
+ ret = dpll_pin_ref_sync_set(pin, a, info->extack);
+ if (ret)
+ return ret;
+ break;
}
}
@@ -1509,12 +1774,40 @@ int dpll_nl_device_get_doit(struct sk_buff *skb, struct genl_info *info)
return genlmsg_reply(msg, info);
}
-int dpll_nl_device_set_doit(struct sk_buff *skb, struct genl_info *info)
+static int
+dpll_set_from_nlattr(struct dpll_device *dpll, struct genl_info *info)
{
- /* placeholder for set command */
+ struct nlattr *a;
+ int rem, ret;
+
+ nla_for_each_attr(a, genlmsg_data(info->genlhdr),
+ genlmsg_len(info->genlhdr), rem) {
+ switch (nla_type(a)) {
+ case DPLL_A_PHASE_OFFSET_MONITOR:
+ ret = dpll_phase_offset_monitor_set(dpll, a,
+ info->extack);
+ if (ret)
+ return ret;
+ break;
+ case DPLL_A_PHASE_OFFSET_AVG_FACTOR:
+ ret = dpll_phase_offset_avg_factor_set(dpll, a,
+ info->extack);
+ if (ret)
+ return ret;
+ break;
+ }
+ }
+
return 0;
}
+int dpll_nl_device_set_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct dpll_device *dpll = info->user_ptr[0];
+
+ return dpll_set_from_nlattr(dpll, info);
+}
+
int dpll_nl_device_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
{
struct dpll_dump_ctx *ctx = dpll_dump_context(cb);
diff --git a/drivers/dpll/dpll_netlink.h b/drivers/dpll/dpll_netlink.h
index a9cfd55f57fc..dd28b56d27c5 100644
--- a/drivers/dpll/dpll_netlink.h
+++ b/drivers/dpll/dpll_netlink.h
@@ -11,3 +11,5 @@ int dpll_device_delete_ntf(struct dpll_device *dpll);
int dpll_pin_create_ntf(struct dpll_pin *pin);
int dpll_pin_delete_ntf(struct dpll_pin *pin);
+
+int __dpll_pin_change_ntf(struct dpll_pin *pin);
diff --git a/drivers/dpll/dpll_nl.c b/drivers/dpll/dpll_nl.c
index fe9b6893d261..3c6d570babf8 100644
--- a/drivers/dpll/dpll_nl.c
+++ b/drivers/dpll/dpll_nl.c
@@ -24,6 +24,11 @@ const struct nla_policy dpll_pin_parent_pin_nl_policy[DPLL_A_PIN_STATE + 1] = {
[DPLL_A_PIN_STATE] = NLA_POLICY_RANGE(NLA_U32, 1, 3),
};
+const struct nla_policy dpll_reference_sync_nl_policy[DPLL_A_PIN_STATE + 1] = {
+ [DPLL_A_PIN_ID] = { .type = NLA_U32, },
+ [DPLL_A_PIN_STATE] = NLA_POLICY_RANGE(NLA_U32, 1, 3),
+};
+
/* DPLL_CMD_DEVICE_ID_GET - do */
static const struct nla_policy dpll_device_id_get_nl_policy[DPLL_A_TYPE + 1] = {
[DPLL_A_MODULE_NAME] = { .type = NLA_NUL_STRING, },
@@ -37,8 +42,10 @@ static const struct nla_policy dpll_device_get_nl_policy[DPLL_A_ID + 1] = {
};
/* DPLL_CMD_DEVICE_SET - do */
-static const struct nla_policy dpll_device_set_nl_policy[DPLL_A_ID + 1] = {
+static const struct nla_policy dpll_device_set_nl_policy[DPLL_A_PHASE_OFFSET_AVG_FACTOR + 1] = {
[DPLL_A_ID] = { .type = NLA_U32, },
+ [DPLL_A_PHASE_OFFSET_MONITOR] = NLA_POLICY_MAX(NLA_U32, 1),
+ [DPLL_A_PHASE_OFFSET_AVG_FACTOR] = { .type = NLA_U32, },
};
/* DPLL_CMD_PIN_ID_GET - do */
@@ -62,7 +69,7 @@ static const struct nla_policy dpll_pin_get_dump_nl_policy[DPLL_A_PIN_ID + 1] =
};
/* DPLL_CMD_PIN_SET - do */
-static const struct nla_policy dpll_pin_set_nl_policy[DPLL_A_PIN_ESYNC_FREQUENCY + 1] = {
+static const struct nla_policy dpll_pin_set_nl_policy[DPLL_A_PIN_REFERENCE_SYNC + 1] = {
[DPLL_A_PIN_ID] = { .type = NLA_U32, },
[DPLL_A_PIN_FREQUENCY] = { .type = NLA_U64, },
[DPLL_A_PIN_DIRECTION] = NLA_POLICY_RANGE(NLA_U32, 1, 2),
@@ -72,6 +79,7 @@ static const struct nla_policy dpll_pin_set_nl_policy[DPLL_A_PIN_ESYNC_FREQUENCY
[DPLL_A_PIN_PARENT_PIN] = NLA_POLICY_NESTED(dpll_pin_parent_pin_nl_policy),
[DPLL_A_PIN_PHASE_ADJUST] = { .type = NLA_S32, },
[DPLL_A_PIN_ESYNC_FREQUENCY] = { .type = NLA_U64, },
+ [DPLL_A_PIN_REFERENCE_SYNC] = NLA_POLICY_NESTED(dpll_reference_sync_nl_policy),
};
/* Ops table for dpll */
@@ -105,7 +113,7 @@ static const struct genl_split_ops dpll_nl_ops[] = {
.doit = dpll_nl_device_set_doit,
.post_doit = dpll_post_doit,
.policy = dpll_device_set_nl_policy,
- .maxattr = DPLL_A_ID,
+ .maxattr = DPLL_A_PHASE_OFFSET_AVG_FACTOR,
.flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
},
{
@@ -139,7 +147,7 @@ static const struct genl_split_ops dpll_nl_ops[] = {
.doit = dpll_nl_pin_set_doit,
.post_doit = dpll_pin_post_doit,
.policy = dpll_pin_set_nl_policy,
- .maxattr = DPLL_A_PIN_ESYNC_FREQUENCY,
+ .maxattr = DPLL_A_PIN_REFERENCE_SYNC,
.flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
},
};
diff --git a/drivers/dpll/dpll_nl.h b/drivers/dpll/dpll_nl.h
index f491262bee4f..3da10cfe9a6e 100644
--- a/drivers/dpll/dpll_nl.h
+++ b/drivers/dpll/dpll_nl.h
@@ -14,6 +14,7 @@
/* Common nested types */
extern const struct nla_policy dpll_pin_parent_device_nl_policy[DPLL_A_PIN_PHASE_OFFSET + 1];
extern const struct nla_policy dpll_pin_parent_pin_nl_policy[DPLL_A_PIN_STATE + 1];
+extern const struct nla_policy dpll_reference_sync_nl_policy[DPLL_A_PIN_STATE + 1];
int dpll_lock_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
struct genl_info *info);
diff --git a/drivers/dpll/zl3073x/Kconfig b/drivers/dpll/zl3073x/Kconfig
new file mode 100644
index 000000000000..5bbca1400581
--- /dev/null
+++ b/drivers/dpll/zl3073x/Kconfig
@@ -0,0 +1,39 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config ZL3073X
+ tristate "Microchip Azurite DPLL/PTP/SyncE devices" if COMPILE_TEST
+ depends on NET
+ select DPLL
+ select NET_DEVLINK
+ select REGMAP
+ help
+ This driver supports Microchip Azurite family DPLL/PTP/SyncE
+ devices that support up to 5 independent DPLL channels,
+ 10 input pins and up to 20 output pins.
+
+ To compile this driver as a module, choose M here. The module
+ will be called zl3073x.
+
+config ZL3073X_I2C
+ tristate "I2C bus implementation for Microchip Azurite devices"
+ depends on I2C && NET
+ select REGMAP_I2C
+ select ZL3073X
+ help
+ This is I2C bus implementation for Microchip Azurite DPLL/PTP/SyncE
+ devices.
+
+ To compile this driver as a module, choose M here: the module will
+ be called zl3073x_i2c.
+
+config ZL3073X_SPI
+ tristate "SPI bus implementation for Microchip Azurite devices"
+ depends on NET && SPI
+ select REGMAP_SPI
+ select ZL3073X
+ help
+ This is SPI bus implementation for Microchip Azurite DPLL/PTP/SyncE
+ devices.
+
+ To compile this driver as a module, choose M here: the module will
+ be called zl3073x_spi.
diff --git a/drivers/dpll/zl3073x/Makefile b/drivers/dpll/zl3073x/Makefile
new file mode 100644
index 000000000000..84e22aae57e5
--- /dev/null
+++ b/drivers/dpll/zl3073x/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_ZL3073X) += zl3073x.o
+zl3073x-objs := core.o devlink.o dpll.o flash.o fw.o prop.o
+
+obj-$(CONFIG_ZL3073X_I2C) += zl3073x_i2c.o
+zl3073x_i2c-objs := i2c.o
+
+obj-$(CONFIG_ZL3073X_SPI) += zl3073x_spi.o
+zl3073x_spi-objs := spi.o
diff --git a/drivers/dpll/zl3073x/core.c b/drivers/dpll/zl3073x/core.c
new file mode 100644
index 000000000000..092e7027948a
--- /dev/null
+++ b/drivers/dpll/zl3073x/core.c
@@ -0,0 +1,1266 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/array_size.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/dev_printk.h>
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/math64.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/regmap.h>
+#include <linux/sprintf.h>
+#include <linux/string_choices.h>
+#include <linux/unaligned.h>
+#include <net/devlink.h>
+
+#include "core.h"
+#include "devlink.h"
+#include "dpll.h"
+#include "regs.h"
+
+/* Chip IDs for zl30731 */
+static const u16 zl30731_ids[] = {
+ 0x0E93,
+ 0x1E93,
+ 0x2E93,
+};
+
+const struct zl3073x_chip_info zl30731_chip_info = {
+ .ids = zl30731_ids,
+ .num_ids = ARRAY_SIZE(zl30731_ids),
+ .num_channels = 1,
+};
+EXPORT_SYMBOL_NS_GPL(zl30731_chip_info, "ZL3073X");
+
+/* Chip IDs for zl30732 */
+static const u16 zl30732_ids[] = {
+ 0x0E30,
+ 0x0E94,
+ 0x1E94,
+ 0x1F60,
+ 0x2E94,
+ 0x3FC4,
+};
+
+const struct zl3073x_chip_info zl30732_chip_info = {
+ .ids = zl30732_ids,
+ .num_ids = ARRAY_SIZE(zl30732_ids),
+ .num_channels = 2,
+};
+EXPORT_SYMBOL_NS_GPL(zl30732_chip_info, "ZL3073X");
+
+/* Chip IDs for zl30733 */
+static const u16 zl30733_ids[] = {
+ 0x0E95,
+ 0x1E95,
+ 0x2E95,
+};
+
+const struct zl3073x_chip_info zl30733_chip_info = {
+ .ids = zl30733_ids,
+ .num_ids = ARRAY_SIZE(zl30733_ids),
+ .num_channels = 3,
+};
+EXPORT_SYMBOL_NS_GPL(zl30733_chip_info, "ZL3073X");
+
+/* Chip IDs for zl30734 */
+static const u16 zl30734_ids[] = {
+ 0x0E96,
+ 0x1E96,
+ 0x2E96,
+};
+
+const struct zl3073x_chip_info zl30734_chip_info = {
+ .ids = zl30734_ids,
+ .num_ids = ARRAY_SIZE(zl30734_ids),
+ .num_channels = 4,
+};
+EXPORT_SYMBOL_NS_GPL(zl30734_chip_info, "ZL3073X");
+
+/* Chip IDs for zl30735 */
+static const u16 zl30735_ids[] = {
+ 0x0E97,
+ 0x1E97,
+ 0x2E97,
+};
+
+const struct zl3073x_chip_info zl30735_chip_info = {
+ .ids = zl30735_ids,
+ .num_ids = ARRAY_SIZE(zl30735_ids),
+ .num_channels = 5,
+};
+EXPORT_SYMBOL_NS_GPL(zl30735_chip_info, "ZL3073X");
+
+#define ZL_RANGE_OFFSET 0x80
+#define ZL_PAGE_SIZE 0x80
+#define ZL_NUM_PAGES 256
+#define ZL_PAGE_SEL 0x7F
+#define ZL_PAGE_SEL_MASK GENMASK(7, 0)
+#define ZL_NUM_REGS (ZL_NUM_PAGES * ZL_PAGE_SIZE)
+
+/* Regmap range configuration */
+static const struct regmap_range_cfg zl3073x_regmap_range = {
+ .range_min = ZL_RANGE_OFFSET,
+ .range_max = ZL_RANGE_OFFSET + ZL_NUM_REGS - 1,
+ .selector_reg = ZL_PAGE_SEL,
+ .selector_mask = ZL_PAGE_SEL_MASK,
+ .selector_shift = 0,
+ .window_start = 0,
+ .window_len = ZL_PAGE_SIZE,
+};
+
+static bool
+zl3073x_is_volatile_reg(struct device *dev __maybe_unused, unsigned int reg)
+{
+ /* Only page selector is non-volatile */
+ return reg != ZL_PAGE_SEL;
+}
+
+const struct regmap_config zl3073x_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = ZL_RANGE_OFFSET + ZL_NUM_REGS - 1,
+ .ranges = &zl3073x_regmap_range,
+ .num_ranges = 1,
+ .cache_type = REGCACHE_MAPLE,
+ .volatile_reg = zl3073x_is_volatile_reg,
+};
+EXPORT_SYMBOL_NS_GPL(zl3073x_regmap_config, "ZL3073X");
+
+/**
+ * zl3073x_ref_freq_factorize - factorize given frequency
+ * @freq: input frequency
+ * @base: base frequency
+ * @mult: multiplier
+ *
+ * Checks if the given frequency can be factorized using one of the
+ * supported base frequencies. If so the base frequency and multiplier
+ * are stored into appropriate parameters if they are not NULL.
+ *
+ * Return: 0 on success, -EINVAL if the frequency cannot be factorized
+ */
+int
+zl3073x_ref_freq_factorize(u32 freq, u16 *base, u16 *mult)
+{
+ static const u16 base_freqs[] = {
+ 1, 2, 4, 5, 8, 10, 16, 20, 25, 32, 40, 50, 64, 80, 100, 125,
+ 128, 160, 200, 250, 256, 320, 400, 500, 625, 640, 800, 1000,
+ 1250, 1280, 1600, 2000, 2500, 3125, 3200, 4000, 5000, 6250,
+ 6400, 8000, 10000, 12500, 15625, 16000, 20000, 25000, 31250,
+ 32000, 40000, 50000, 62500,
+ };
+ u32 div;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(base_freqs); i++) {
+ div = freq / base_freqs[i];
+
+ if (div <= U16_MAX && (freq % base_freqs[i]) == 0) {
+ if (base)
+ *base = base_freqs[i];
+ if (mult)
+ *mult = div;
+
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static bool
+zl3073x_check_reg(struct zl3073x_dev *zldev, unsigned int reg, size_t size)
+{
+ /* Check that multiop lock is held when accessing registers
+ * from page 10 and above except the page 255 that does not
+ * need this protection.
+ */
+ if (ZL_REG_PAGE(reg) >= 10 && ZL_REG_PAGE(reg) < 255)
+ lockdep_assert_held(&zldev->multiop_lock);
+
+ /* Check the index is in valid range for indexed register */
+ if (ZL_REG_OFFSET(reg) > ZL_REG_MAX_OFFSET(reg)) {
+ dev_err(zldev->dev, "Index out of range for reg 0x%04lx\n",
+ ZL_REG_ADDR(reg));
+ return false;
+ }
+ /* Check the requested size corresponds to register size */
+ if (ZL_REG_SIZE(reg) != size) {
+ dev_err(zldev->dev, "Invalid size %zu for reg 0x%04lx\n",
+ size, ZL_REG_ADDR(reg));
+ return false;
+ }
+
+ return true;
+}
+
+static int
+zl3073x_read_reg(struct zl3073x_dev *zldev, unsigned int reg, void *val,
+ size_t size)
+{
+ int rc;
+
+ if (!zl3073x_check_reg(zldev, reg, size))
+ return -EINVAL;
+
+ /* Map the register address to virtual range */
+ reg = ZL_REG_ADDR(reg) + ZL_RANGE_OFFSET;
+
+ rc = regmap_bulk_read(zldev->regmap, reg, val, size);
+ if (rc) {
+ dev_err(zldev->dev, "Failed to read reg 0x%04x: %pe\n", reg,
+ ERR_PTR(rc));
+ return rc;
+ }
+
+ return 0;
+}
+
+static int
+zl3073x_write_reg(struct zl3073x_dev *zldev, unsigned int reg, const void *val,
+ size_t size)
+{
+ int rc;
+
+ if (!zl3073x_check_reg(zldev, reg, size))
+ return -EINVAL;
+
+ /* Map the register address to virtual range */
+ reg = ZL_REG_ADDR(reg) + ZL_RANGE_OFFSET;
+
+ rc = regmap_bulk_write(zldev->regmap, reg, val, size);
+ if (rc) {
+ dev_err(zldev->dev, "Failed to write reg 0x%04x: %pe\n", reg,
+ ERR_PTR(rc));
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * zl3073x_read_u8 - read value from 8bit register
+ * @zldev: zl3073x device pointer
+ * @reg: register to write to
+ * @val: value to write
+ *
+ * Reads value from given 8bit register.
+ *
+ * Returns: 0 on success, <0 on error
+ */
+int zl3073x_read_u8(struct zl3073x_dev *zldev, unsigned int reg, u8 *val)
+{
+ return zl3073x_read_reg(zldev, reg, val, sizeof(*val));
+}
+
+/**
+ * zl3073x_write_u8 - write value to 16bit register
+ * @zldev: zl3073x device pointer
+ * @reg: register to write to
+ * @val: value to write
+ *
+ * Writes value into given 8bit register.
+ *
+ * Returns: 0 on success, <0 on error
+ */
+int zl3073x_write_u8(struct zl3073x_dev *zldev, unsigned int reg, u8 val)
+{
+ return zl3073x_write_reg(zldev, reg, &val, sizeof(val));
+}
+
+/**
+ * zl3073x_read_u16 - read value from 16bit register
+ * @zldev: zl3073x device pointer
+ * @reg: register to write to
+ * @val: value to write
+ *
+ * Reads value from given 16bit register.
+ *
+ * Returns: 0 on success, <0 on error
+ */
+int zl3073x_read_u16(struct zl3073x_dev *zldev, unsigned int reg, u16 *val)
+{
+ int rc;
+
+ rc = zl3073x_read_reg(zldev, reg, val, sizeof(*val));
+ if (!rc)
+ be16_to_cpus(val);
+
+ return rc;
+}
+
+/**
+ * zl3073x_write_u16 - write value to 16bit register
+ * @zldev: zl3073x device pointer
+ * @reg: register to write to
+ * @val: value to write
+ *
+ * Writes value into given 16bit register.
+ *
+ * Returns: 0 on success, <0 on error
+ */
+int zl3073x_write_u16(struct zl3073x_dev *zldev, unsigned int reg, u16 val)
+{
+ cpu_to_be16s(&val);
+
+ return zl3073x_write_reg(zldev, reg, &val, sizeof(val));
+}
+
+/**
+ * zl3073x_read_u32 - read value from 32bit register
+ * @zldev: zl3073x device pointer
+ * @reg: register to write to
+ * @val: value to write
+ *
+ * Reads value from given 32bit register.
+ *
+ * Returns: 0 on success, <0 on error
+ */
+int zl3073x_read_u32(struct zl3073x_dev *zldev, unsigned int reg, u32 *val)
+{
+ int rc;
+
+ rc = zl3073x_read_reg(zldev, reg, val, sizeof(*val));
+ if (!rc)
+ be32_to_cpus(val);
+
+ return rc;
+}
+
+/**
+ * zl3073x_write_u32 - write value to 32bit register
+ * @zldev: zl3073x device pointer
+ * @reg: register to write to
+ * @val: value to write
+ *
+ * Writes value into given 32bit register.
+ *
+ * Returns: 0 on success, <0 on error
+ */
+int zl3073x_write_u32(struct zl3073x_dev *zldev, unsigned int reg, u32 val)
+{
+ cpu_to_be32s(&val);
+
+ return zl3073x_write_reg(zldev, reg, &val, sizeof(val));
+}
+
+/**
+ * zl3073x_read_u48 - read value from 48bit register
+ * @zldev: zl3073x device pointer
+ * @reg: register to write to
+ * @val: value to write
+ *
+ * Reads value from given 48bit register.
+ *
+ * Returns: 0 on success, <0 on error
+ */
+int zl3073x_read_u48(struct zl3073x_dev *zldev, unsigned int reg, u64 *val)
+{
+ u8 buf[6];
+ int rc;
+
+ rc = zl3073x_read_reg(zldev, reg, buf, sizeof(buf));
+ if (!rc)
+ *val = get_unaligned_be48(buf);
+
+ return rc;
+}
+
+/**
+ * zl3073x_write_u48 - write value to 48bit register
+ * @zldev: zl3073x device pointer
+ * @reg: register to write to
+ * @val: value to write
+ *
+ * Writes value into given 48bit register.
+ * The value must be from the interval -S48_MIN to U48_MAX.
+ *
+ * Returns: 0 on success, <0 on error
+ */
+int zl3073x_write_u48(struct zl3073x_dev *zldev, unsigned int reg, u64 val)
+{
+ u8 buf[6];
+
+ /* Check the value belongs to <S48_MIN, U48_MAX>
+ * Any value >= S48_MIN has bits 47..63 set.
+ */
+ if (val > GENMASK_ULL(47, 0) && val < GENMASK_ULL(63, 47)) {
+ dev_err(zldev->dev, "Value 0x%0llx out of range\n", val);
+ return -EINVAL;
+ }
+
+ put_unaligned_be48(val, buf);
+
+ return zl3073x_write_reg(zldev, reg, buf, sizeof(buf));
+}
+
+/**
+ * zl3073x_poll_zero_u8 - wait for register to be cleared by device
+ * @zldev: zl3073x device pointer
+ * @reg: register to poll (has to be 8bit register)
+ * @mask: bit mask for polling
+ *
+ * Waits for bits specified by @mask in register @reg value to be cleared
+ * by the device.
+ *
+ * Returns: 0 on success, <0 on error
+ */
+int zl3073x_poll_zero_u8(struct zl3073x_dev *zldev, unsigned int reg, u8 mask)
+{
+ /* Register polling sleep & timeout */
+#define ZL_POLL_SLEEP_US 10
+#define ZL_POLL_TIMEOUT_US 2000000
+ unsigned int val;
+
+ /* Check the register is 8bit */
+ if (ZL_REG_SIZE(reg) != 1) {
+ dev_err(zldev->dev, "Invalid reg 0x%04lx size for polling\n",
+ ZL_REG_ADDR(reg));
+ return -EINVAL;
+ }
+
+ /* Map the register address to virtual range */
+ reg = ZL_REG_ADDR(reg) + ZL_RANGE_OFFSET;
+
+ return regmap_read_poll_timeout(zldev->regmap, reg, val, !(val & mask),
+ ZL_POLL_SLEEP_US, ZL_POLL_TIMEOUT_US);
+}
+
+int zl3073x_mb_op(struct zl3073x_dev *zldev, unsigned int op_reg, u8 op_val,
+ unsigned int mask_reg, u16 mask_val)
+{
+ int rc;
+
+ /* Set mask for the operation */
+ rc = zl3073x_write_u16(zldev, mask_reg, mask_val);
+ if (rc)
+ return rc;
+
+ /* Trigger the operation */
+ rc = zl3073x_write_u8(zldev, op_reg, op_val);
+ if (rc)
+ return rc;
+
+ /* Wait for the operation to actually finish */
+ return zl3073x_poll_zero_u8(zldev, op_reg, op_val);
+}
+
+/**
+ * zl3073x_do_hwreg_op - Perform HW register read/write operation
+ * @zldev: zl3073x device pointer
+ * @op: operation to perform
+ *
+ * Performs requested operation and waits for its completion.
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_do_hwreg_op(struct zl3073x_dev *zldev, u8 op)
+{
+ int rc;
+
+ /* Set requested operation and set pending bit */
+ rc = zl3073x_write_u8(zldev, ZL_REG_HWREG_OP, op | ZL_HWREG_OP_PENDING);
+ if (rc)
+ return rc;
+
+ /* Poll for completion - pending bit cleared */
+ return zl3073x_poll_zero_u8(zldev, ZL_REG_HWREG_OP,
+ ZL_HWREG_OP_PENDING);
+}
+
+/**
+ * zl3073x_read_hwreg - Read HW register
+ * @zldev: zl3073x device pointer
+ * @addr: HW register address
+ * @value: Value of the HW register
+ *
+ * Reads HW register value and stores it into @value.
+ *
+ * Return: 0 on success, <0 on error
+ */
+int zl3073x_read_hwreg(struct zl3073x_dev *zldev, u32 addr, u32 *value)
+{
+ int rc;
+
+ /* Set address to read data from */
+ rc = zl3073x_write_u32(zldev, ZL_REG_HWREG_ADDR, addr);
+ if (rc)
+ return rc;
+
+ /* Perform the read operation */
+ rc = zl3073x_do_hwreg_op(zldev, ZL_HWREG_OP_READ);
+ if (rc)
+ return rc;
+
+ /* Read the received data */
+ return zl3073x_read_u32(zldev, ZL_REG_HWREG_READ_DATA, value);
+}
+
+/**
+ * zl3073x_write_hwreg - Write value to HW register
+ * @zldev: zl3073x device pointer
+ * @addr: HW registers address
+ * @value: Value to be written to HW register
+ *
+ * Stores the requested value into HW register.
+ *
+ * Return: 0 on success, <0 on error
+ */
+int zl3073x_write_hwreg(struct zl3073x_dev *zldev, u32 addr, u32 value)
+{
+ int rc;
+
+ /* Set address to write data to */
+ rc = zl3073x_write_u32(zldev, ZL_REG_HWREG_ADDR, addr);
+ if (rc)
+ return rc;
+
+ /* Set data to be written */
+ rc = zl3073x_write_u32(zldev, ZL_REG_HWREG_WRITE_DATA, value);
+ if (rc)
+ return rc;
+
+ /* Perform the write operation */
+ return zl3073x_do_hwreg_op(zldev, ZL_HWREG_OP_WRITE);
+}
+
+/**
+ * zl3073x_update_hwreg - Update certain bits in HW register
+ * @zldev: zl3073x device pointer
+ * @addr: HW register address
+ * @value: Value to be written into HW register
+ * @mask: Bitmask indicating bits to be updated
+ *
+ * Reads given HW register, updates requested bits specified by value and
+ * mask and writes result back to HW register.
+ *
+ * Return: 0 on success, <0 on error
+ */
+int zl3073x_update_hwreg(struct zl3073x_dev *zldev, u32 addr, u32 value,
+ u32 mask)
+{
+ u32 tmp;
+ int rc;
+
+ rc = zl3073x_read_hwreg(zldev, addr, &tmp);
+ if (rc)
+ return rc;
+
+ tmp &= ~mask;
+ tmp |= value & mask;
+
+ return zl3073x_write_hwreg(zldev, addr, tmp);
+}
+
+/**
+ * zl3073x_write_hwreg_seq - Write HW registers sequence
+ * @zldev: pointer to device structure
+ * @seq: pointer to first sequence item
+ * @num_items: number of items in sequence
+ *
+ * Writes given HW registers sequence.
+ *
+ * Return: 0 on success, <0 on error
+ */
+int zl3073x_write_hwreg_seq(struct zl3073x_dev *zldev,
+ const struct zl3073x_hwreg_seq_item *seq,
+ size_t num_items)
+{
+ int i, rc = 0;
+
+ for (i = 0; i < num_items; i++) {
+ dev_dbg(zldev->dev, "Write 0x%0x [0x%0x] to 0x%0x",
+ seq[i].value, seq[i].mask, seq[i].addr);
+
+ if (seq[i].mask == U32_MAX)
+ /* Write value directly */
+ rc = zl3073x_write_hwreg(zldev, seq[i].addr,
+ seq[i].value);
+ else
+ /* Update only bits specified by the mask */
+ rc = zl3073x_update_hwreg(zldev, seq[i].addr,
+ seq[i].value, seq[i].mask);
+ if (rc)
+ return rc;
+
+ if (seq->wait)
+ msleep(seq->wait);
+ }
+
+ return rc;
+}
+
+/**
+ * zl3073x_ref_state_fetch - get input reference state
+ * @zldev: pointer to zl3073x_dev structure
+ * @index: input reference index to fetch state for
+ *
+ * Function fetches information for the given input reference that are
+ * invariant and stores them for later use.
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_ref_state_fetch(struct zl3073x_dev *zldev, u8 index)
+{
+ struct zl3073x_ref *input = &zldev->ref[index];
+ u8 ref_config;
+ int rc;
+
+ /* If the input is differential then the configuration for N-pin
+ * reference is ignored and P-pin config is used for both.
+ */
+ if (zl3073x_is_n_pin(index) &&
+ zl3073x_ref_is_diff(zldev, index - 1)) {
+ input->enabled = zl3073x_ref_is_enabled(zldev, index - 1);
+ input->diff = true;
+
+ return 0;
+ }
+
+ guard(mutex)(&zldev->multiop_lock);
+
+ /* Read reference configuration */
+ rc = zl3073x_mb_op(zldev, ZL_REG_REF_MB_SEM, ZL_REF_MB_SEM_RD,
+ ZL_REG_REF_MB_MASK, BIT(index));
+ if (rc)
+ return rc;
+
+ /* Read ref_config register */
+ rc = zl3073x_read_u8(zldev, ZL_REG_REF_CONFIG, &ref_config);
+ if (rc)
+ return rc;
+
+ input->enabled = FIELD_GET(ZL_REF_CONFIG_ENABLE, ref_config);
+ input->diff = FIELD_GET(ZL_REF_CONFIG_DIFF_EN, ref_config);
+
+ dev_dbg(zldev->dev, "REF%u is %s and configured as %s\n", index,
+ str_enabled_disabled(input->enabled),
+ input->diff ? "differential" : "single-ended");
+
+ return rc;
+}
+
+/**
+ * zl3073x_out_state_fetch - get output state
+ * @zldev: pointer to zl3073x_dev structure
+ * @index: output index to fetch state for
+ *
+ * Function fetches information for the given output (not output pin)
+ * that are invariant and stores them for later use.
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_out_state_fetch(struct zl3073x_dev *zldev, u8 index)
+{
+ struct zl3073x_out *out = &zldev->out[index];
+ u8 output_ctrl, output_mode;
+ int rc;
+
+ /* Read output configuration */
+ rc = zl3073x_read_u8(zldev, ZL_REG_OUTPUT_CTRL(index), &output_ctrl);
+ if (rc)
+ return rc;
+
+ /* Store info about output enablement and synthesizer the output
+ * is connected to.
+ */
+ out->enabled = FIELD_GET(ZL_OUTPUT_CTRL_EN, output_ctrl);
+ out->synth = FIELD_GET(ZL_OUTPUT_CTRL_SYNTH_SEL, output_ctrl);
+
+ dev_dbg(zldev->dev, "OUT%u is %s and connected to SYNTH%u\n", index,
+ str_enabled_disabled(out->enabled), out->synth);
+
+ guard(mutex)(&zldev->multiop_lock);
+
+ /* Read output configuration */
+ rc = zl3073x_mb_op(zldev, ZL_REG_OUTPUT_MB_SEM, ZL_OUTPUT_MB_SEM_RD,
+ ZL_REG_OUTPUT_MB_MASK, BIT(index));
+ if (rc)
+ return rc;
+
+ /* Read output_mode */
+ rc = zl3073x_read_u8(zldev, ZL_REG_OUTPUT_MODE, &output_mode);
+ if (rc)
+ return rc;
+
+ /* Extract and store output signal format */
+ out->signal_format = FIELD_GET(ZL_OUTPUT_MODE_SIGNAL_FORMAT,
+ output_mode);
+
+ dev_dbg(zldev->dev, "OUT%u has signal format 0x%02x\n", index,
+ out->signal_format);
+
+ return rc;
+}
+
+/**
+ * zl3073x_synth_state_fetch - get synth state
+ * @zldev: pointer to zl3073x_dev structure
+ * @index: synth index to fetch state for
+ *
+ * Function fetches information for the given synthesizer that are
+ * invariant and stores them for later use.
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_synth_state_fetch(struct zl3073x_dev *zldev, u8 index)
+{
+ struct zl3073x_synth *synth = &zldev->synth[index];
+ u16 base, m, n;
+ u8 synth_ctrl;
+ u32 mult;
+ int rc;
+
+ /* Read synth control register */
+ rc = zl3073x_read_u8(zldev, ZL_REG_SYNTH_CTRL(index), &synth_ctrl);
+ if (rc)
+ return rc;
+
+ /* Store info about synth enablement and DPLL channel the synth is
+ * driven by.
+ */
+ synth->enabled = FIELD_GET(ZL_SYNTH_CTRL_EN, synth_ctrl);
+ synth->dpll = FIELD_GET(ZL_SYNTH_CTRL_DPLL_SEL, synth_ctrl);
+
+ dev_dbg(zldev->dev, "SYNTH%u is %s and driven by DPLL%u\n", index,
+ str_enabled_disabled(synth->enabled), synth->dpll);
+
+ guard(mutex)(&zldev->multiop_lock);
+
+ /* Read synth configuration */
+ rc = zl3073x_mb_op(zldev, ZL_REG_SYNTH_MB_SEM, ZL_SYNTH_MB_SEM_RD,
+ ZL_REG_SYNTH_MB_MASK, BIT(index));
+ if (rc)
+ return rc;
+
+ /* The output frequency is determined by the following formula:
+ * base * multiplier * numerator / denominator
+ *
+ * Read registers with these values
+ */
+ rc = zl3073x_read_u16(zldev, ZL_REG_SYNTH_FREQ_BASE, &base);
+ if (rc)
+ return rc;
+
+ rc = zl3073x_read_u32(zldev, ZL_REG_SYNTH_FREQ_MULT, &mult);
+ if (rc)
+ return rc;
+
+ rc = zl3073x_read_u16(zldev, ZL_REG_SYNTH_FREQ_M, &m);
+ if (rc)
+ return rc;
+
+ rc = zl3073x_read_u16(zldev, ZL_REG_SYNTH_FREQ_N, &n);
+ if (rc)
+ return rc;
+
+ /* Check denominator for zero to avoid div by 0 */
+ if (!n) {
+ dev_err(zldev->dev,
+ "Zero divisor for SYNTH%u retrieved from device\n",
+ index);
+ return -EINVAL;
+ }
+
+ /* Compute and store synth frequency */
+ zldev->synth[index].freq = div_u64(mul_u32_u32(base * m, mult), n);
+
+ dev_dbg(zldev->dev, "SYNTH%u frequency: %u Hz\n", index,
+ zldev->synth[index].freq);
+
+ return rc;
+}
+
+static int
+zl3073x_dev_state_fetch(struct zl3073x_dev *zldev)
+{
+ int rc;
+ u8 i;
+
+ for (i = 0; i < ZL3073X_NUM_REFS; i++) {
+ rc = zl3073x_ref_state_fetch(zldev, i);
+ if (rc) {
+ dev_err(zldev->dev,
+ "Failed to fetch input state: %pe\n",
+ ERR_PTR(rc));
+ return rc;
+ }
+ }
+
+ for (i = 0; i < ZL3073X_NUM_SYNTHS; i++) {
+ rc = zl3073x_synth_state_fetch(zldev, i);
+ if (rc) {
+ dev_err(zldev->dev,
+ "Failed to fetch synth state: %pe\n",
+ ERR_PTR(rc));
+ return rc;
+ }
+ }
+
+ for (i = 0; i < ZL3073X_NUM_OUTS; i++) {
+ rc = zl3073x_out_state_fetch(zldev, i);
+ if (rc) {
+ dev_err(zldev->dev,
+ "Failed to fetch output state: %pe\n",
+ ERR_PTR(rc));
+ return rc;
+ }
+ }
+
+ return rc;
+}
+
+/**
+ * zl3073x_ref_phase_offsets_update - update reference phase offsets
+ * @zldev: pointer to zl3073x_dev structure
+ * @channel: DPLL channel number or -1
+ *
+ * The function asks device to update phase offsets latch registers with
+ * the latest measured values. There are 2 sets of latch registers:
+ *
+ * 1) Up to 5 DPLL-to-connected-ref registers that contain phase offset
+ * values between particular DPLL channel and its *connected* input
+ * reference.
+ *
+ * 2) 10 selected-DPLL-to-all-ref registers that contain phase offset values
+ * between selected DPLL channel and all input references.
+ *
+ * If the caller is interested in 2) then it has to pass DPLL channel number
+ * in @channel parameter. If it is interested only in 1) then it should pass
+ * @channel parameter with value of -1.
+ *
+ * Return: 0 on success, <0 on error
+ */
+int zl3073x_ref_phase_offsets_update(struct zl3073x_dev *zldev, int channel)
+{
+ int rc;
+
+ /* Per datasheet we have to wait for 'dpll_ref_phase_err_rqst_rd'
+ * to be zero to ensure that the measured data are coherent.
+ */
+ rc = zl3073x_poll_zero_u8(zldev, ZL_REG_REF_PHASE_ERR_READ_RQST,
+ ZL_REF_PHASE_ERR_READ_RQST_RD);
+ if (rc)
+ return rc;
+
+ /* Select DPLL channel if it is specified */
+ if (channel != -1) {
+ rc = zl3073x_write_u8(zldev, ZL_REG_DPLL_MEAS_IDX, channel);
+ if (rc)
+ return rc;
+ }
+
+ /* Request to update phase offsets measurement values */
+ rc = zl3073x_write_u8(zldev, ZL_REG_REF_PHASE_ERR_READ_RQST,
+ ZL_REF_PHASE_ERR_READ_RQST_RD);
+ if (rc)
+ return rc;
+
+ /* Wait for finish */
+ return zl3073x_poll_zero_u8(zldev, ZL_REG_REF_PHASE_ERR_READ_RQST,
+ ZL_REF_PHASE_ERR_READ_RQST_RD);
+}
+
+/**
+ * zl3073x_ref_ffo_update - update reference fractional frequency offsets
+ * @zldev: pointer to zl3073x_dev structure
+ *
+ * The function asks device to update fractional frequency offsets latch
+ * registers the latest measured values, reads and stores them into
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_ref_ffo_update(struct zl3073x_dev *zldev)
+{
+ int i, rc;
+
+ /* Per datasheet we have to wait for 'ref_freq_meas_ctrl' to be zero
+ * to ensure that the measured data are coherent.
+ */
+ rc = zl3073x_poll_zero_u8(zldev, ZL_REG_REF_FREQ_MEAS_CTRL,
+ ZL_REF_FREQ_MEAS_CTRL);
+ if (rc)
+ return rc;
+
+ /* Select all references for measurement */
+ rc = zl3073x_write_u8(zldev, ZL_REG_REF_FREQ_MEAS_MASK_3_0,
+ GENMASK(7, 0)); /* REF0P..REF3N */
+ if (rc)
+ return rc;
+ rc = zl3073x_write_u8(zldev, ZL_REG_REF_FREQ_MEAS_MASK_4,
+ GENMASK(1, 0)); /* REF4P..REF4N */
+ if (rc)
+ return rc;
+
+ /* Request frequency offset measurement */
+ rc = zl3073x_write_u8(zldev, ZL_REG_REF_FREQ_MEAS_CTRL,
+ ZL_REF_FREQ_MEAS_CTRL_REF_FREQ_OFF);
+ if (rc)
+ return rc;
+
+ /* Wait for finish */
+ rc = zl3073x_poll_zero_u8(zldev, ZL_REG_REF_FREQ_MEAS_CTRL,
+ ZL_REF_FREQ_MEAS_CTRL);
+ if (rc)
+ return rc;
+
+ /* Read DPLL-to-REFx frequency offset measurements */
+ for (i = 0; i < ZL3073X_NUM_REFS; i++) {
+ s32 value;
+
+ /* Read value stored in units of 2^-32 signed */
+ rc = zl3073x_read_u32(zldev, ZL_REG_REF_FREQ(i), &value);
+ if (rc)
+ return rc;
+
+ /* Convert to ppm -> ffo = (10^6 * value) / 2^32 */
+ zldev->ref[i].ffo = mul_s64_u64_shr(value, 1000000, 32);
+ }
+
+ return 0;
+}
+
+static void
+zl3073x_dev_periodic_work(struct kthread_work *work)
+{
+ struct zl3073x_dev *zldev = container_of(work, struct zl3073x_dev,
+ work.work);
+ struct zl3073x_dpll *zldpll;
+ int rc;
+
+ /* Update DPLL-to-connected-ref phase offsets registers */
+ rc = zl3073x_ref_phase_offsets_update(zldev, -1);
+ if (rc)
+ dev_warn(zldev->dev, "Failed to update phase offsets: %pe\n",
+ ERR_PTR(rc));
+
+ /* Update references' fractional frequency offsets */
+ rc = zl3073x_ref_ffo_update(zldev);
+ if (rc)
+ dev_warn(zldev->dev,
+ "Failed to update fractional frequency offsets: %pe\n",
+ ERR_PTR(rc));
+
+ list_for_each_entry(zldpll, &zldev->dplls, list)
+ zl3073x_dpll_changes_check(zldpll);
+
+ /* Run twice a second */
+ kthread_queue_delayed_work(zldev->kworker, &zldev->work,
+ msecs_to_jiffies(500));
+}
+
+int zl3073x_dev_phase_avg_factor_set(struct zl3073x_dev *zldev, u8 factor)
+{
+ u8 dpll_meas_ctrl, value;
+ int rc;
+
+ /* Read DPLL phase measurement control register */
+ rc = zl3073x_read_u8(zldev, ZL_REG_DPLL_MEAS_CTRL, &dpll_meas_ctrl);
+ if (rc)
+ return rc;
+
+ /* Convert requested factor to register value */
+ value = (factor + 1) & 0x0f;
+
+ /* Update phase measurement control register */
+ dpll_meas_ctrl &= ~ZL_DPLL_MEAS_CTRL_AVG_FACTOR;
+ dpll_meas_ctrl |= FIELD_PREP(ZL_DPLL_MEAS_CTRL_AVG_FACTOR, value);
+ rc = zl3073x_write_u8(zldev, ZL_REG_DPLL_MEAS_CTRL, dpll_meas_ctrl);
+ if (rc)
+ return rc;
+
+ /* Save the new factor */
+ zldev->phase_avg_factor = factor;
+
+ return 0;
+}
+
+/**
+ * zl3073x_dev_phase_meas_setup - setup phase offset measurement
+ * @zldev: pointer to zl3073x_dev structure
+ *
+ * Enable phase offset measurement block, set measurement averaging factor
+ * and enable DPLL-to-its-ref phase measurement for all DPLLs.
+ *
+ * Returns: 0 on success, <0 on error
+ */
+static int
+zl3073x_dev_phase_meas_setup(struct zl3073x_dev *zldev)
+{
+ struct zl3073x_dpll *zldpll;
+ u8 dpll_meas_ctrl, mask = 0;
+ int rc;
+
+ /* Setup phase measurement averaging factor */
+ rc = zl3073x_dev_phase_avg_factor_set(zldev, zldev->phase_avg_factor);
+ if (rc)
+ return rc;
+
+ /* Read DPLL phase measurement control register */
+ rc = zl3073x_read_u8(zldev, ZL_REG_DPLL_MEAS_CTRL, &dpll_meas_ctrl);
+ if (rc)
+ return rc;
+
+ /* Enable DPLL measurement block */
+ dpll_meas_ctrl |= ZL_DPLL_MEAS_CTRL_EN;
+
+ /* Update phase measurement control register */
+ rc = zl3073x_write_u8(zldev, ZL_REG_DPLL_MEAS_CTRL, dpll_meas_ctrl);
+ if (rc)
+ return rc;
+
+ /* Enable DPLL-to-connected-ref measurement for each channel */
+ list_for_each_entry(zldpll, &zldev->dplls, list)
+ mask |= BIT(zldpll->id);
+
+ return zl3073x_write_u8(zldev, ZL_REG_DPLL_PHASE_ERR_READ_MASK, mask);
+}
+
+/**
+ * zl3073x_dev_start - Start normal operation
+ * @zldev: zl3073x device pointer
+ * @full: perform full initialization
+ *
+ * The function starts normal operation, which means registering all DPLLs and
+ * their pins, and starting monitoring. If full initialization is requested,
+ * the function additionally initializes the phase offset measurement block and
+ * fetches hardware-invariant parameters.
+ *
+ * Return: 0 on success, <0 on error
+ */
+int zl3073x_dev_start(struct zl3073x_dev *zldev, bool full)
+{
+ struct zl3073x_dpll *zldpll;
+ int rc;
+
+ if (full) {
+ /* Fetch device state */
+ rc = zl3073x_dev_state_fetch(zldev);
+ if (rc)
+ return rc;
+
+ /* Setup phase offset measurement block */
+ rc = zl3073x_dev_phase_meas_setup(zldev);
+ if (rc) {
+ dev_err(zldev->dev,
+ "Failed to setup phase measurement\n");
+ return rc;
+ }
+ }
+
+ /* Register all DPLLs */
+ list_for_each_entry(zldpll, &zldev->dplls, list) {
+ rc = zl3073x_dpll_register(zldpll);
+ if (rc) {
+ dev_err_probe(zldev->dev, rc,
+ "Failed to register DPLL%u\n",
+ zldpll->id);
+ return rc;
+ }
+ }
+
+ /* Perform initial firmware fine phase correction */
+ rc = zl3073x_dpll_init_fine_phase_adjust(zldev);
+ if (rc) {
+ dev_err_probe(zldev->dev, rc,
+ "Failed to init fine phase correction\n");
+ return rc;
+ }
+
+ /* Start monitoring */
+ kthread_queue_delayed_work(zldev->kworker, &zldev->work, 0);
+
+ return 0;
+}
+
+/**
+ * zl3073x_dev_stop - Stop normal operation
+ * @zldev: zl3073x device pointer
+ *
+ * The function stops the normal operation that mean deregistration of all
+ * DPLLs and their pins and stop monitoring.
+ *
+ * Return: 0 on success, <0 on error
+ */
+void zl3073x_dev_stop(struct zl3073x_dev *zldev)
+{
+ struct zl3073x_dpll *zldpll;
+
+ /* Stop monitoring */
+ kthread_cancel_delayed_work_sync(&zldev->work);
+
+ /* Unregister all DPLLs */
+ list_for_each_entry(zldpll, &zldev->dplls, list) {
+ if (zldpll->dpll_dev)
+ zl3073x_dpll_unregister(zldpll);
+ }
+}
+
+static void zl3073x_dev_dpll_fini(void *ptr)
+{
+ struct zl3073x_dpll *zldpll, *next;
+ struct zl3073x_dev *zldev = ptr;
+
+ /* Stop monitoring and unregister DPLLs */
+ zl3073x_dev_stop(zldev);
+
+ /* Destroy monitoring thread */
+ if (zldev->kworker) {
+ kthread_destroy_worker(zldev->kworker);
+ zldev->kworker = NULL;
+ }
+
+ /* Free all DPLLs */
+ list_for_each_entry_safe(zldpll, next, &zldev->dplls, list) {
+ list_del(&zldpll->list);
+ zl3073x_dpll_free(zldpll);
+ }
+}
+
+static int
+zl3073x_devm_dpll_init(struct zl3073x_dev *zldev, u8 num_dplls)
+{
+ struct kthread_worker *kworker;
+ struct zl3073x_dpll *zldpll;
+ unsigned int i;
+ int rc;
+
+ INIT_LIST_HEAD(&zldev->dplls);
+
+ /* Allocate all DPLLs */
+ for (i = 0; i < num_dplls; i++) {
+ zldpll = zl3073x_dpll_alloc(zldev, i);
+ if (IS_ERR(zldpll)) {
+ dev_err_probe(zldev->dev, PTR_ERR(zldpll),
+ "Failed to alloc DPLL%u\n", i);
+ rc = PTR_ERR(zldpll);
+ goto error;
+ }
+
+ list_add_tail(&zldpll->list, &zldev->dplls);
+ }
+
+ /* Initialize monitoring thread */
+ kthread_init_delayed_work(&zldev->work, zl3073x_dev_periodic_work);
+ kworker = kthread_run_worker(0, "zl3073x-%s", dev_name(zldev->dev));
+ if (IS_ERR(kworker)) {
+ rc = PTR_ERR(kworker);
+ goto error;
+ }
+ zldev->kworker = kworker;
+
+ /* Start normal operation */
+ rc = zl3073x_dev_start(zldev, true);
+ if (rc) {
+ dev_err_probe(zldev->dev, rc, "Failed to start device\n");
+ goto error;
+ }
+
+ /* Add devres action to release DPLL related resources */
+ rc = devm_add_action_or_reset(zldev->dev, zl3073x_dev_dpll_fini, zldev);
+ if (rc)
+ goto error;
+
+ return 0;
+
+error:
+ zl3073x_dev_dpll_fini(zldev);
+
+ return rc;
+}
+
+/**
+ * zl3073x_dev_probe - initialize zl3073x device
+ * @zldev: pointer to zl3073x device
+ * @chip_info: chip info based on compatible
+ *
+ * Common initialization of zl3073x device structure.
+ *
+ * Returns: 0 on success, <0 on error
+ */
+int zl3073x_dev_probe(struct zl3073x_dev *zldev,
+ const struct zl3073x_chip_info *chip_info)
+{
+ u16 id, revision, fw_ver;
+ unsigned int i;
+ u32 cfg_ver;
+ int rc;
+
+ /* Read chip ID */
+ rc = zl3073x_read_u16(zldev, ZL_REG_ID, &id);
+ if (rc)
+ return rc;
+
+ /* Check it matches */
+ for (i = 0; i < chip_info->num_ids; i++) {
+ if (id == chip_info->ids[i])
+ break;
+ }
+
+ if (i == chip_info->num_ids) {
+ return dev_err_probe(zldev->dev, -ENODEV,
+ "Unknown or non-match chip ID: 0x%0x\n",
+ id);
+ }
+
+ /* Read revision, firmware version and custom config version */
+ rc = zl3073x_read_u16(zldev, ZL_REG_REVISION, &revision);
+ if (rc)
+ return rc;
+ rc = zl3073x_read_u16(zldev, ZL_REG_FW_VER, &fw_ver);
+ if (rc)
+ return rc;
+ rc = zl3073x_read_u32(zldev, ZL_REG_CUSTOM_CONFIG_VER, &cfg_ver);
+ if (rc)
+ return rc;
+
+ dev_dbg(zldev->dev, "ChipID(%X), ChipRev(%X), FwVer(%u)\n", id,
+ revision, fw_ver);
+ dev_dbg(zldev->dev, "Custom config version: %lu.%lu.%lu.%lu\n",
+ FIELD_GET(GENMASK(31, 24), cfg_ver),
+ FIELD_GET(GENMASK(23, 16), cfg_ver),
+ FIELD_GET(GENMASK(15, 8), cfg_ver),
+ FIELD_GET(GENMASK(7, 0), cfg_ver));
+
+ /* Generate random clock ID as the device has not such property that
+ * could be used for this purpose. A user can later change this value
+ * using devlink.
+ */
+ zldev->clock_id = get_random_u64();
+
+ /* Default phase offset averaging factor */
+ zldev->phase_avg_factor = 2;
+
+ /* Initialize mutex for operations where multiple reads, writes
+ * and/or polls are required to be done atomically.
+ */
+ rc = devm_mutex_init(zldev->dev, &zldev->multiop_lock);
+ if (rc)
+ return dev_err_probe(zldev->dev, rc,
+ "Failed to initialize mutex\n");
+
+ /* Register DPLL channels */
+ rc = zl3073x_devm_dpll_init(zldev, chip_info->num_channels);
+ if (rc)
+ return rc;
+
+ /* Register the devlink instance and parameters */
+ rc = zl3073x_devlink_register(zldev);
+ if (rc)
+ return dev_err_probe(zldev->dev, rc,
+ "Failed to register devlink instance\n");
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(zl3073x_dev_probe, "ZL3073X");
+
+MODULE_AUTHOR("Ivan Vecera <ivecera@redhat.com>");
+MODULE_DESCRIPTION("Microchip ZL3073x core driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dpll/zl3073x/core.h b/drivers/dpll/zl3073x/core.h
new file mode 100644
index 000000000000..1dca4ddcf235
--- /dev/null
+++ b/drivers/dpll/zl3073x/core.h
@@ -0,0 +1,427 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _ZL3073X_CORE_H
+#define _ZL3073X_CORE_H
+
+#include <linux/bitfield.h>
+#include <linux/kthread.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+#include "regs.h"
+
+struct device;
+struct regmap;
+struct zl3073x_dpll;
+
+/*
+ * Hardware limits for ZL3073x chip family
+ */
+#define ZL3073X_MAX_CHANNELS 5
+#define ZL3073X_NUM_REFS 10
+#define ZL3073X_NUM_OUTS 10
+#define ZL3073X_NUM_SYNTHS 5
+#define ZL3073X_NUM_INPUT_PINS ZL3073X_NUM_REFS
+#define ZL3073X_NUM_OUTPUT_PINS (ZL3073X_NUM_OUTS * 2)
+#define ZL3073X_NUM_PINS (ZL3073X_NUM_INPUT_PINS + \
+ ZL3073X_NUM_OUTPUT_PINS)
+
+/**
+ * struct zl3073x_ref - input reference invariant info
+ * @enabled: input reference is enabled or disabled
+ * @diff: true if input reference is differential
+ * @ffo: current fractional frequency offset
+ */
+struct zl3073x_ref {
+ bool enabled;
+ bool diff;
+ s64 ffo;
+};
+
+/**
+ * struct zl3073x_out - output invariant info
+ * @enabled: out is enabled or disabled
+ * @synth: synthesizer the out is connected to
+ * @signal_format: out signal format
+ */
+struct zl3073x_out {
+ bool enabled;
+ u8 synth;
+ u8 signal_format;
+};
+
+/**
+ * struct zl3073x_synth - synthesizer invariant info
+ * @freq: synthesizer frequency
+ * @dpll: ID of DPLL the synthesizer is driven by
+ * @enabled: synth is enabled or disabled
+ */
+struct zl3073x_synth {
+ u32 freq;
+ u8 dpll;
+ bool enabled;
+};
+
+/**
+ * struct zl3073x_dev - zl3073x device
+ * @dev: pointer to device
+ * @regmap: regmap to access device registers
+ * @multiop_lock: to serialize multiple register operations
+ * @ref: array of input references' invariants
+ * @out: array of outs' invariants
+ * @synth: array of synths' invariants
+ * @dplls: list of DPLLs
+ * @kworker: thread for periodic work
+ * @work: periodic work
+ * @clock_id: clock id of the device
+ * @phase_avg_factor: phase offset measurement averaging factor
+ */
+struct zl3073x_dev {
+ struct device *dev;
+ struct regmap *regmap;
+ struct mutex multiop_lock;
+
+ /* Invariants */
+ struct zl3073x_ref ref[ZL3073X_NUM_REFS];
+ struct zl3073x_out out[ZL3073X_NUM_OUTS];
+ struct zl3073x_synth synth[ZL3073X_NUM_SYNTHS];
+
+ /* DPLL channels */
+ struct list_head dplls;
+
+ /* Monitor */
+ struct kthread_worker *kworker;
+ struct kthread_delayed_work work;
+
+ /* Devlink parameters */
+ u64 clock_id;
+ u8 phase_avg_factor;
+};
+
+struct zl3073x_chip_info {
+ const u16 *ids;
+ size_t num_ids;
+ int num_channels;
+};
+
+extern const struct zl3073x_chip_info zl30731_chip_info;
+extern const struct zl3073x_chip_info zl30732_chip_info;
+extern const struct zl3073x_chip_info zl30733_chip_info;
+extern const struct zl3073x_chip_info zl30734_chip_info;
+extern const struct zl3073x_chip_info zl30735_chip_info;
+extern const struct regmap_config zl3073x_regmap_config;
+
+struct zl3073x_dev *zl3073x_devm_alloc(struct device *dev);
+int zl3073x_dev_probe(struct zl3073x_dev *zldev,
+ const struct zl3073x_chip_info *chip_info);
+
+int zl3073x_dev_start(struct zl3073x_dev *zldev, bool full);
+void zl3073x_dev_stop(struct zl3073x_dev *zldev);
+
+static inline u8 zl3073x_dev_phase_avg_factor_get(struct zl3073x_dev *zldev)
+{
+ return zldev->phase_avg_factor;
+}
+
+int zl3073x_dev_phase_avg_factor_set(struct zl3073x_dev *zldev, u8 factor);
+
+/**********************
+ * Registers operations
+ **********************/
+
+/**
+ * struct zl3073x_hwreg_seq_item - HW register write sequence item
+ * @addr: HW register to be written
+ * @value: value to be written to HW register
+ * @mask: bitmask indicating bits to be updated
+ * @wait: number of ms to wait after register write
+ */
+struct zl3073x_hwreg_seq_item {
+ u32 addr;
+ u32 value;
+ u32 mask;
+ u32 wait;
+};
+
+#define HWREG_SEQ_ITEM(_addr, _value, _mask, _wait) \
+{ \
+ .addr = _addr, \
+ .value = FIELD_PREP_CONST(_mask, _value), \
+ .mask = _mask, \
+ .wait = _wait, \
+}
+
+int zl3073x_mb_op(struct zl3073x_dev *zldev, unsigned int op_reg, u8 op_val,
+ unsigned int mask_reg, u16 mask_val);
+int zl3073x_poll_zero_u8(struct zl3073x_dev *zldev, unsigned int reg, u8 mask);
+int zl3073x_read_u8(struct zl3073x_dev *zldev, unsigned int reg, u8 *val);
+int zl3073x_read_u16(struct zl3073x_dev *zldev, unsigned int reg, u16 *val);
+int zl3073x_read_u32(struct zl3073x_dev *zldev, unsigned int reg, u32 *val);
+int zl3073x_read_u48(struct zl3073x_dev *zldev, unsigned int reg, u64 *val);
+int zl3073x_write_u8(struct zl3073x_dev *zldev, unsigned int reg, u8 val);
+int zl3073x_write_u16(struct zl3073x_dev *zldev, unsigned int reg, u16 val);
+int zl3073x_write_u32(struct zl3073x_dev *zldev, unsigned int reg, u32 val);
+int zl3073x_write_u48(struct zl3073x_dev *zldev, unsigned int reg, u64 val);
+int zl3073x_read_hwreg(struct zl3073x_dev *zldev, u32 addr, u32 *value);
+int zl3073x_write_hwreg(struct zl3073x_dev *zldev, u32 addr, u32 value);
+int zl3073x_update_hwreg(struct zl3073x_dev *zldev, u32 addr, u32 value,
+ u32 mask);
+int zl3073x_write_hwreg_seq(struct zl3073x_dev *zldev,
+ const struct zl3073x_hwreg_seq_item *seq,
+ size_t num_items);
+
+/*****************
+ * Misc operations
+ *****************/
+
+int zl3073x_ref_freq_factorize(u32 freq, u16 *base, u16 *mult);
+int zl3073x_ref_phase_offsets_update(struct zl3073x_dev *zldev, int channel);
+
+static inline bool
+zl3073x_is_n_pin(u8 id)
+{
+ /* P-pins ids are even while N-pins are odd */
+ return id & 1;
+}
+
+static inline bool
+zl3073x_is_p_pin(u8 id)
+{
+ return !zl3073x_is_n_pin(id);
+}
+
+/**
+ * zl3073x_input_pin_ref_get - get reference for given input pin
+ * @id: input pin id
+ *
+ * Return: reference id for the given input pin
+ */
+static inline u8
+zl3073x_input_pin_ref_get(u8 id)
+{
+ return id;
+}
+
+/**
+ * zl3073x_output_pin_out_get - get output for the given output pin
+ * @id: output pin id
+ *
+ * Return: output id for the given output pin
+ */
+static inline u8
+zl3073x_output_pin_out_get(u8 id)
+{
+ /* Output pin pair shares the single output */
+ return id / 2;
+}
+
+/**
+ * zl3073x_ref_ffo_get - get current fractional frequency offset
+ * @zldev: pointer to zl3073x device
+ * @index: input reference index
+ *
+ * Return: the latest measured fractional frequency offset
+ */
+static inline s64
+zl3073x_ref_ffo_get(struct zl3073x_dev *zldev, u8 index)
+{
+ return zldev->ref[index].ffo;
+}
+
+/**
+ * zl3073x_ref_is_diff - check if the given input reference is differential
+ * @zldev: pointer to zl3073x device
+ * @index: input reference index
+ *
+ * Return: true if reference is differential, false if reference is single-ended
+ */
+static inline bool
+zl3073x_ref_is_diff(struct zl3073x_dev *zldev, u8 index)
+{
+ return zldev->ref[index].diff;
+}
+
+/**
+ * zl3073x_ref_is_enabled - check if the given input reference is enabled
+ * @zldev: pointer to zl3073x device
+ * @index: input reference index
+ *
+ * Return: true if input refernce is enabled, false otherwise
+ */
+static inline bool
+zl3073x_ref_is_enabled(struct zl3073x_dev *zldev, u8 index)
+{
+ return zldev->ref[index].enabled;
+}
+
+/**
+ * zl3073x_synth_dpll_get - get DPLL ID the synth is driven by
+ * @zldev: pointer to zl3073x device
+ * @index: synth index
+ *
+ * Return: ID of DPLL the given synthetizer is driven by
+ */
+static inline u8
+zl3073x_synth_dpll_get(struct zl3073x_dev *zldev, u8 index)
+{
+ return zldev->synth[index].dpll;
+}
+
+/**
+ * zl3073x_synth_freq_get - get synth current freq
+ * @zldev: pointer to zl3073x device
+ * @index: synth index
+ *
+ * Return: frequency of given synthetizer
+ */
+static inline u32
+zl3073x_synth_freq_get(struct zl3073x_dev *zldev, u8 index)
+{
+ return zldev->synth[index].freq;
+}
+
+/**
+ * zl3073x_synth_is_enabled - check if the given synth is enabled
+ * @zldev: pointer to zl3073x device
+ * @index: synth index
+ *
+ * Return: true if synth is enabled, false otherwise
+ */
+static inline bool
+zl3073x_synth_is_enabled(struct zl3073x_dev *zldev, u8 index)
+{
+ return zldev->synth[index].enabled;
+}
+
+/**
+ * zl3073x_out_synth_get - get synth connected to given output
+ * @zldev: pointer to zl3073x device
+ * @index: output index
+ *
+ * Return: index of synth connected to given output.
+ */
+static inline u8
+zl3073x_out_synth_get(struct zl3073x_dev *zldev, u8 index)
+{
+ return zldev->out[index].synth;
+}
+
+/**
+ * zl3073x_out_is_enabled - check if the given output is enabled
+ * @zldev: pointer to zl3073x device
+ * @index: output index
+ *
+ * Return: true if the output is enabled, false otherwise
+ */
+static inline bool
+zl3073x_out_is_enabled(struct zl3073x_dev *zldev, u8 index)
+{
+ u8 synth;
+
+ /* Output is enabled only if associated synth is enabled */
+ synth = zl3073x_out_synth_get(zldev, index);
+ if (zl3073x_synth_is_enabled(zldev, synth))
+ return zldev->out[index].enabled;
+
+ return false;
+}
+
+/**
+ * zl3073x_out_signal_format_get - get output signal format
+ * @zldev: pointer to zl3073x device
+ * @index: output index
+ *
+ * Return: signal format of given output
+ */
+static inline u8
+zl3073x_out_signal_format_get(struct zl3073x_dev *zldev, u8 index)
+{
+ return zldev->out[index].signal_format;
+}
+
+/**
+ * zl3073x_out_dpll_get - get DPLL ID the output is driven by
+ * @zldev: pointer to zl3073x device
+ * @index: output index
+ *
+ * Return: ID of DPLL the given output is driven by
+ */
+static inline
+u8 zl3073x_out_dpll_get(struct zl3073x_dev *zldev, u8 index)
+{
+ u8 synth;
+
+ /* Get synthesizer connected to given output */
+ synth = zl3073x_out_synth_get(zldev, index);
+
+ /* Return DPLL that drives the synth */
+ return zl3073x_synth_dpll_get(zldev, synth);
+}
+
+/**
+ * zl3073x_out_is_diff - check if the given output is differential
+ * @zldev: pointer to zl3073x device
+ * @index: output index
+ *
+ * Return: true if output is differential, false if output is single-ended
+ */
+static inline bool
+zl3073x_out_is_diff(struct zl3073x_dev *zldev, u8 index)
+{
+ switch (zl3073x_out_signal_format_get(zldev, index)) {
+ case ZL_OUTPUT_MODE_SIGNAL_FORMAT_LVDS:
+ case ZL_OUTPUT_MODE_SIGNAL_FORMAT_DIFF:
+ case ZL_OUTPUT_MODE_SIGNAL_FORMAT_LOWVCM:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+/**
+ * zl3073x_output_pin_is_enabled - check if the given output pin is enabled
+ * @zldev: pointer to zl3073x device
+ * @id: output pin id
+ *
+ * Checks if the output of the given output pin is enabled and also that
+ * its signal format also enables the given pin.
+ *
+ * Return: true if output pin is enabled, false if output pin is disabled
+ */
+static inline bool
+zl3073x_output_pin_is_enabled(struct zl3073x_dev *zldev, u8 id)
+{
+ u8 output = zl3073x_output_pin_out_get(id);
+
+ /* Check if the whole output is enabled */
+ if (!zl3073x_out_is_enabled(zldev, output))
+ return false;
+
+ /* Check signal format */
+ switch (zl3073x_out_signal_format_get(zldev, output)) {
+ case ZL_OUTPUT_MODE_SIGNAL_FORMAT_DISABLED:
+ /* Both output pins are disabled by signal format */
+ return false;
+
+ case ZL_OUTPUT_MODE_SIGNAL_FORMAT_1P:
+ /* Output is one single ended P-pin output */
+ if (zl3073x_is_n_pin(id))
+ return false;
+ break;
+ case ZL_OUTPUT_MODE_SIGNAL_FORMAT_1N:
+ /* Output is one single ended N-pin output */
+ if (zl3073x_is_p_pin(id))
+ return false;
+ break;
+ default:
+ /* For other format both pins are enabled */
+ break;
+ }
+
+ return true;
+}
+
+#endif /* _ZL3073X_CORE_H */
diff --git a/drivers/dpll/zl3073x/devlink.c b/drivers/dpll/zl3073x/devlink.c
new file mode 100644
index 000000000000..ccc22332b346
--- /dev/null
+++ b/drivers/dpll/zl3073x/devlink.c
@@ -0,0 +1,390 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/device/devres.h>
+#include <linux/netlink.h>
+#include <linux/sprintf.h>
+#include <linux/types.h>
+#include <net/devlink.h>
+
+#include "core.h"
+#include "devlink.h"
+#include "dpll.h"
+#include "flash.h"
+#include "fw.h"
+#include "regs.h"
+
+/**
+ * zl3073x_devlink_info_get - Devlink device info callback
+ * @devlink: devlink structure pointer
+ * @req: devlink request pointer to store information
+ * @extack: netlink extack pointer to report errors
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dev *zldev = devlink_priv(devlink);
+ u16 id, revision, fw_ver;
+ char buf[16];
+ u32 cfg_ver;
+ int rc;
+
+ rc = zl3073x_read_u16(zldev, ZL_REG_ID, &id);
+ if (rc)
+ return rc;
+
+ snprintf(buf, sizeof(buf), "%X", id);
+ rc = devlink_info_version_fixed_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_ASIC_ID,
+ buf);
+ if (rc)
+ return rc;
+
+ rc = zl3073x_read_u16(zldev, ZL_REG_REVISION, &revision);
+ if (rc)
+ return rc;
+
+ snprintf(buf, sizeof(buf), "%X", revision);
+ rc = devlink_info_version_fixed_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_ASIC_REV,
+ buf);
+ if (rc)
+ return rc;
+
+ rc = zl3073x_read_u16(zldev, ZL_REG_FW_VER, &fw_ver);
+ if (rc)
+ return rc;
+
+ snprintf(buf, sizeof(buf), "%u", fw_ver);
+ rc = devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW,
+ buf);
+ if (rc)
+ return rc;
+
+ rc = zl3073x_read_u32(zldev, ZL_REG_CUSTOM_CONFIG_VER, &cfg_ver);
+ if (rc)
+ return rc;
+
+ /* No custom config version */
+ if (cfg_ver == U32_MAX)
+ return 0;
+
+ snprintf(buf, sizeof(buf), "%lu.%lu.%lu.%lu",
+ FIELD_GET(GENMASK(31, 24), cfg_ver),
+ FIELD_GET(GENMASK(23, 16), cfg_ver),
+ FIELD_GET(GENMASK(15, 8), cfg_ver),
+ FIELD_GET(GENMASK(7, 0), cfg_ver));
+
+ return devlink_info_version_running_put(req, "custom_cfg", buf);
+}
+
+static int
+zl3073x_devlink_reload_down(struct devlink *devlink, bool netns_change,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dev *zldev = devlink_priv(devlink);
+
+ if (action != DEVLINK_RELOAD_ACTION_DRIVER_REINIT)
+ return -EOPNOTSUPP;
+
+ /* Stop normal operation */
+ zl3073x_dev_stop(zldev);
+
+ return 0;
+}
+
+static int
+zl3073x_devlink_reload_up(struct devlink *devlink,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ u32 *actions_performed,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dev *zldev = devlink_priv(devlink);
+ union devlink_param_value val;
+ int rc;
+
+ if (action != DEVLINK_RELOAD_ACTION_DRIVER_REINIT)
+ return -EOPNOTSUPP;
+
+ rc = devl_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_CLOCK_ID,
+ &val);
+ if (rc)
+ return rc;
+
+ if (zldev->clock_id != val.vu64) {
+ dev_dbg(zldev->dev,
+ "'clock_id' changed to %016llx\n", val.vu64);
+ zldev->clock_id = val.vu64;
+ }
+
+ /* Restart normal operation */
+ rc = zl3073x_dev_start(zldev, false);
+ if (rc)
+ dev_warn(zldev->dev, "Failed to re-start normal operation\n");
+
+ *actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
+
+ return 0;
+}
+
+void zl3073x_devlink_flash_notify(struct zl3073x_dev *zldev, const char *msg,
+ const char *component, u32 done, u32 total)
+{
+ struct devlink *devlink = priv_to_devlink(zldev);
+
+ devlink_flash_update_status_notify(devlink, msg, component, done,
+ total);
+}
+
+/**
+ * zl3073x_devlink_flash_prepare - Prepare and enter flash mode
+ * @zldev: zl3073x device pointer
+ * @zlfw: pointer to loaded firmware
+ * @extack: netlink extack pointer to report errors
+ *
+ * The function stops normal operation and switches the device to flash mode.
+ * If an error occurs the normal operation is resumed.
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_devlink_flash_prepare(struct zl3073x_dev *zldev,
+ struct zl3073x_fw *zlfw,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_fw_component *util;
+ int rc;
+
+ util = zlfw->component[ZL_FW_COMPONENT_UTIL];
+ if (!util) {
+ zl3073x_devlink_flash_notify(zldev,
+ "Utility is missing in firmware",
+ NULL, 0, 0);
+ return -ENOEXEC;
+ }
+
+ /* Stop normal operation prior entering flash mode */
+ zl3073x_dev_stop(zldev);
+
+ rc = zl3073x_flash_mode_enter(zldev, util->data, util->size, extack);
+ if (rc) {
+ zl3073x_devlink_flash_notify(zldev,
+ "Failed to enter flash mode",
+ NULL, 0, 0);
+
+ /* Resume normal operation */
+ zl3073x_dev_start(zldev, true);
+
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * zl3073x_devlink_flash_finish - Leave flash mode and resume normal operation
+ * @zldev: zl3073x device pointer
+ * @extack: netlink extack pointer to report errors
+ *
+ * The function switches the device back to standard mode and resumes normal
+ * operation.
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_devlink_flash_finish(struct zl3073x_dev *zldev,
+ struct netlink_ext_ack *extack)
+{
+ int rc;
+
+ /* Reset device CPU to normal mode */
+ zl3073x_flash_mode_leave(zldev, extack);
+
+ /* Resume normal operation */
+ rc = zl3073x_dev_start(zldev, true);
+ if (rc)
+ zl3073x_devlink_flash_notify(zldev,
+ "Failed to start normal operation",
+ NULL, 0, 0);
+
+ return rc;
+}
+
+/**
+ * zl3073x_devlink_flash_update - Devlink flash update callback
+ * @devlink: devlink structure pointer
+ * @params: flashing parameters pointer
+ * @extack: netlink extack pointer to report errors
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_devlink_flash_update(struct devlink *devlink,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dev *zldev = devlink_priv(devlink);
+ struct zl3073x_fw *zlfw;
+ int rc = 0;
+
+ zlfw = zl3073x_fw_load(zldev, params->fw->data, params->fw->size,
+ extack);
+ if (IS_ERR(zlfw)) {
+ zl3073x_devlink_flash_notify(zldev, "Failed to load firmware",
+ NULL, 0, 0);
+ rc = PTR_ERR(zlfw);
+ goto finish;
+ }
+
+ /* Stop normal operation and enter flash mode */
+ rc = zl3073x_devlink_flash_prepare(zldev, zlfw, extack);
+ if (rc)
+ goto finish;
+
+ rc = zl3073x_fw_flash(zldev, zlfw, extack);
+ if (rc) {
+ zl3073x_devlink_flash_finish(zldev, extack);
+ goto finish;
+ }
+
+ /* Resume normal mode */
+ rc = zl3073x_devlink_flash_finish(zldev, extack);
+
+finish:
+ if (!IS_ERR(zlfw))
+ zl3073x_fw_free(zlfw);
+
+ zl3073x_devlink_flash_notify(zldev,
+ rc ? "Flashing failed" : "Flashing done",
+ NULL, 0, 0);
+
+ return rc;
+}
+
+static const struct devlink_ops zl3073x_devlink_ops = {
+ .info_get = zl3073x_devlink_info_get,
+ .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT),
+ .reload_down = zl3073x_devlink_reload_down,
+ .reload_up = zl3073x_devlink_reload_up,
+ .flash_update = zl3073x_devlink_flash_update,
+};
+
+static void
+zl3073x_devlink_free(void *ptr)
+{
+ devlink_free(ptr);
+}
+
+/**
+ * zl3073x_devm_alloc - allocates zl3073x device structure
+ * @dev: pointer to device structure
+ *
+ * Allocates zl3073x device structure as device resource.
+ *
+ * Return: pointer to zl3073x device on success, error pointer on error
+ */
+struct zl3073x_dev *zl3073x_devm_alloc(struct device *dev)
+{
+ struct zl3073x_dev *zldev;
+ struct devlink *devlink;
+ int rc;
+
+ devlink = devlink_alloc(&zl3073x_devlink_ops, sizeof(*zldev), dev);
+ if (!devlink)
+ return ERR_PTR(-ENOMEM);
+
+ /* Add devres action to free devlink device */
+ rc = devm_add_action_or_reset(dev, zl3073x_devlink_free, devlink);
+ if (rc)
+ return ERR_PTR(rc);
+
+ zldev = devlink_priv(devlink);
+ zldev->dev = dev;
+ dev_set_drvdata(zldev->dev, zldev);
+
+ return zldev;
+}
+EXPORT_SYMBOL_NS_GPL(zl3073x_devm_alloc, "ZL3073X");
+
+static int
+zl3073x_devlink_param_clock_id_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ if (!val.vu64) {
+ NL_SET_ERR_MSG_MOD(extack, "'clock_id' must be non-zero");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct devlink_param zl3073x_devlink_params[] = {
+ DEVLINK_PARAM_GENERIC(CLOCK_ID, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL,
+ zl3073x_devlink_param_clock_id_validate),
+};
+
+static void
+zl3073x_devlink_unregister(void *ptr)
+{
+ struct devlink *devlink = priv_to_devlink(ptr);
+
+ devl_lock(devlink);
+
+ /* Unregister devlink params */
+ devl_params_unregister(devlink, zl3073x_devlink_params,
+ ARRAY_SIZE(zl3073x_devlink_params));
+
+ /* Unregister devlink instance */
+ devl_unregister(devlink);
+
+ devl_unlock(devlink);
+}
+
+/**
+ * zl3073x_devlink_register - register devlink instance and params
+ * @zldev: zl3073x device to register the devlink for
+ *
+ * Register the devlink instance and parameters associated with the device.
+ *
+ * Return: 0 on success, <0 on error
+ */
+int zl3073x_devlink_register(struct zl3073x_dev *zldev)
+{
+ struct devlink *devlink = priv_to_devlink(zldev);
+ union devlink_param_value value;
+ int rc;
+
+ devl_lock(devlink);
+
+ /* Register devlink params */
+ rc = devl_params_register(devlink, zl3073x_devlink_params,
+ ARRAY_SIZE(zl3073x_devlink_params));
+ if (rc) {
+ devl_unlock(devlink);
+
+ return rc;
+ }
+
+ value.vu64 = zldev->clock_id;
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_CLOCK_ID,
+ value);
+
+ /* Register devlink instance */
+ devl_register(devlink);
+
+ devl_unlock(devlink);
+
+ /* Add devres action to unregister devlink device */
+ return devm_add_action_or_reset(zldev->dev, zl3073x_devlink_unregister,
+ zldev);
+}
diff --git a/drivers/dpll/zl3073x/devlink.h b/drivers/dpll/zl3073x/devlink.h
new file mode 100644
index 000000000000..63dfd6fa1cd6
--- /dev/null
+++ b/drivers/dpll/zl3073x/devlink.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _ZL3073X_DEVLINK_H
+#define _ZL3073X_DEVLINK_H
+
+struct zl3073x_dev;
+
+struct zl3073x_dev *zl3073x_devm_alloc(struct device *dev);
+
+int zl3073x_devlink_register(struct zl3073x_dev *zldev);
+
+void zl3073x_devlink_flash_notify(struct zl3073x_dev *zldev, const char *msg,
+ const char *component, u32 done, u32 total);
+
+#endif /* _ZL3073X_DEVLINK_H */
diff --git a/drivers/dpll/zl3073x/dpll.c b/drivers/dpll/zl3073x/dpll.c
new file mode 100644
index 000000000000..93dc93eec79e
--- /dev/null
+++ b/drivers/dpll/zl3073x/dpll.c
@@ -0,0 +1,2376 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/bug.h>
+#include <linux/container_of.h>
+#include <linux/dev_printk.h>
+#include <linux/dpll.h>
+#include <linux/err.h>
+#include <linux/kthread.h>
+#include <linux/math64.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/sprintf.h>
+
+#include "core.h"
+#include "dpll.h"
+#include "prop.h"
+#include "regs.h"
+
+#define ZL3073X_DPLL_REF_NONE ZL3073X_NUM_REFS
+#define ZL3073X_DPLL_REF_IS_VALID(_ref) ((_ref) != ZL3073X_DPLL_REF_NONE)
+
+/**
+ * struct zl3073x_dpll_pin - DPLL pin
+ * @list: this DPLL pin list entry
+ * @dpll: DPLL the pin is registered to
+ * @dpll_pin: pointer to registered dpll_pin
+ * @label: package label
+ * @dir: pin direction
+ * @id: pin id
+ * @prio: pin priority <0, 14>
+ * @selectable: pin is selectable in automatic mode
+ * @esync_control: embedded sync is controllable
+ * @pin_state: last saved pin state
+ * @phase_offset: last saved pin phase offset
+ * @freq_offset: last saved fractional frequency offset
+ */
+struct zl3073x_dpll_pin {
+ struct list_head list;
+ struct zl3073x_dpll *dpll;
+ struct dpll_pin *dpll_pin;
+ char label[8];
+ enum dpll_pin_direction dir;
+ u8 id;
+ u8 prio;
+ bool selectable;
+ bool esync_control;
+ enum dpll_pin_state pin_state;
+ s64 phase_offset;
+ s64 freq_offset;
+};
+
+/*
+ * Supported esync ranges for input and for output per output pair type
+ */
+static const struct dpll_pin_frequency esync_freq_ranges[] = {
+ DPLL_PIN_FREQUENCY_RANGE(0, 1),
+};
+
+/**
+ * zl3073x_dpll_is_input_pin - check if the pin is input one
+ * @pin: pin to check
+ *
+ * Return: true if pin is input, false if pin is output.
+ */
+static bool
+zl3073x_dpll_is_input_pin(struct zl3073x_dpll_pin *pin)
+{
+ return pin->dir == DPLL_PIN_DIRECTION_INPUT;
+}
+
+/**
+ * zl3073x_dpll_is_p_pin - check if the pin is P-pin
+ * @pin: pin to check
+ *
+ * Return: true if the pin is P-pin, false if it is N-pin
+ */
+static bool
+zl3073x_dpll_is_p_pin(struct zl3073x_dpll_pin *pin)
+{
+ return zl3073x_is_p_pin(pin->id);
+}
+
+static int
+zl3073x_dpll_pin_direction_get(const struct dpll_pin *dpll_pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_pin_direction *direction,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll_pin *pin = pin_priv;
+
+ *direction = pin->dir;
+
+ return 0;
+}
+
+/**
+ * zl3073x_dpll_input_ref_frequency_get - get input reference frequency
+ * @zldpll: pointer to zl3073x_dpll
+ * @ref_id: reference id
+ * @frequency: pointer to variable to store frequency
+ *
+ * Reads frequency of given input reference.
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_dpll_input_ref_frequency_get(struct zl3073x_dpll *zldpll, u8 ref_id,
+ u32 *frequency)
+{
+ struct zl3073x_dev *zldev = zldpll->dev;
+ u16 base, mult, num, denom;
+ int rc;
+
+ guard(mutex)(&zldev->multiop_lock);
+
+ /* Read reference configuration */
+ rc = zl3073x_mb_op(zldev, ZL_REG_REF_MB_SEM, ZL_REF_MB_SEM_RD,
+ ZL_REG_REF_MB_MASK, BIT(ref_id));
+ if (rc)
+ return rc;
+
+ /* Read registers to compute resulting frequency */
+ rc = zl3073x_read_u16(zldev, ZL_REG_REF_FREQ_BASE, &base);
+ if (rc)
+ return rc;
+ rc = zl3073x_read_u16(zldev, ZL_REG_REF_FREQ_MULT, &mult);
+ if (rc)
+ return rc;
+ rc = zl3073x_read_u16(zldev, ZL_REG_REF_RATIO_M, &num);
+ if (rc)
+ return rc;
+ rc = zl3073x_read_u16(zldev, ZL_REG_REF_RATIO_N, &denom);
+ if (rc)
+ return rc;
+
+ /* Sanity check that HW has not returned zero denominator */
+ if (!denom) {
+ dev_err(zldev->dev,
+ "Zero divisor for ref %u frequency got from device\n",
+ ref_id);
+ return -EINVAL;
+ }
+
+ /* Compute the frequency */
+ *frequency = mul_u64_u32_div(base * mult, num, denom);
+
+ return rc;
+}
+
+static int
+zl3073x_dpll_input_pin_esync_get(const struct dpll_pin *dpll_pin,
+ void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv,
+ struct dpll_pin_esync *esync,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll *zldpll = dpll_priv;
+ struct zl3073x_dev *zldev = zldpll->dev;
+ struct zl3073x_dpll_pin *pin = pin_priv;
+ u8 ref, ref_sync_ctrl, sync_mode;
+ u32 esync_div, ref_freq;
+ int rc;
+
+ /* Get reference frequency */
+ ref = zl3073x_input_pin_ref_get(pin->id);
+ rc = zl3073x_dpll_input_ref_frequency_get(zldpll, pin->id, &ref_freq);
+ if (rc)
+ return rc;
+
+ guard(mutex)(&zldev->multiop_lock);
+
+ /* Read reference configuration into mailbox */
+ rc = zl3073x_mb_op(zldev, ZL_REG_REF_MB_SEM, ZL_REF_MB_SEM_RD,
+ ZL_REG_REF_MB_MASK, BIT(ref));
+ if (rc)
+ return rc;
+
+ /* Get ref sync mode */
+ rc = zl3073x_read_u8(zldev, ZL_REG_REF_SYNC_CTRL, &ref_sync_ctrl);
+ if (rc)
+ return rc;
+
+ /* Get esync divisor */
+ rc = zl3073x_read_u32(zldev, ZL_REG_REF_ESYNC_DIV, &esync_div);
+ if (rc)
+ return rc;
+
+ sync_mode = FIELD_GET(ZL_REF_SYNC_CTRL_MODE, ref_sync_ctrl);
+
+ switch (sync_mode) {
+ case ZL_REF_SYNC_CTRL_MODE_50_50_ESYNC_25_75:
+ esync->freq = (esync_div == ZL_REF_ESYNC_DIV_1HZ) ? 1 : 0;
+ esync->pulse = 25;
+ break;
+ default:
+ esync->freq = 0;
+ esync->pulse = 0;
+ break;
+ }
+
+ /* If the pin supports esync control expose its range but only
+ * if the current reference frequency is > 1 Hz.
+ */
+ if (pin->esync_control && ref_freq > 1) {
+ esync->range = esync_freq_ranges;
+ esync->range_num = ARRAY_SIZE(esync_freq_ranges);
+ } else {
+ esync->range = NULL;
+ esync->range_num = 0;
+ }
+
+ return rc;
+}
+
+static int
+zl3073x_dpll_input_pin_esync_set(const struct dpll_pin *dpll_pin,
+ void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv, u64 freq,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll *zldpll = dpll_priv;
+ struct zl3073x_dev *zldev = zldpll->dev;
+ struct zl3073x_dpll_pin *pin = pin_priv;
+ u8 ref, ref_sync_ctrl, sync_mode;
+ int rc;
+
+ guard(mutex)(&zldev->multiop_lock);
+
+ /* Read reference configuration into mailbox */
+ ref = zl3073x_input_pin_ref_get(pin->id);
+ rc = zl3073x_mb_op(zldev, ZL_REG_REF_MB_SEM, ZL_REF_MB_SEM_RD,
+ ZL_REG_REF_MB_MASK, BIT(ref));
+ if (rc)
+ return rc;
+
+ /* Get ref sync mode */
+ rc = zl3073x_read_u8(zldev, ZL_REG_REF_SYNC_CTRL, &ref_sync_ctrl);
+ if (rc)
+ return rc;
+
+ /* Use freq == 0 to disable esync */
+ if (!freq)
+ sync_mode = ZL_REF_SYNC_CTRL_MODE_REFSYNC_PAIR_OFF;
+ else
+ sync_mode = ZL_REF_SYNC_CTRL_MODE_50_50_ESYNC_25_75;
+
+ ref_sync_ctrl &= ~ZL_REF_SYNC_CTRL_MODE;
+ ref_sync_ctrl |= FIELD_PREP(ZL_REF_SYNC_CTRL_MODE, sync_mode);
+
+ /* Update ref sync control register */
+ rc = zl3073x_write_u8(zldev, ZL_REG_REF_SYNC_CTRL, ref_sync_ctrl);
+ if (rc)
+ return rc;
+
+ if (freq) {
+ /* 1 Hz is only supported frequnecy currently */
+ rc = zl3073x_write_u32(zldev, ZL_REG_REF_ESYNC_DIV,
+ ZL_REF_ESYNC_DIV_1HZ);
+ if (rc)
+ return rc;
+ }
+
+ /* Commit reference configuration */
+ return zl3073x_mb_op(zldev, ZL_REG_REF_MB_SEM, ZL_REF_MB_SEM_WR,
+ ZL_REG_REF_MB_MASK, BIT(ref));
+}
+
+static int
+zl3073x_dpll_input_pin_ffo_get(const struct dpll_pin *dpll_pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ s64 *ffo, struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll_pin *pin = pin_priv;
+
+ *ffo = pin->freq_offset;
+
+ return 0;
+}
+
+static int
+zl3073x_dpll_input_pin_frequency_get(const struct dpll_pin *dpll_pin,
+ void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv, u64 *frequency,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll *zldpll = dpll_priv;
+ struct zl3073x_dpll_pin *pin = pin_priv;
+ u32 ref_freq;
+ u8 ref;
+ int rc;
+
+ /* Read and return ref frequency */
+ ref = zl3073x_input_pin_ref_get(pin->id);
+ rc = zl3073x_dpll_input_ref_frequency_get(zldpll, ref, &ref_freq);
+ if (!rc)
+ *frequency = ref_freq;
+
+ return rc;
+}
+
+static int
+zl3073x_dpll_input_pin_frequency_set(const struct dpll_pin *dpll_pin,
+ void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv, u64 frequency,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll *zldpll = dpll_priv;
+ struct zl3073x_dev *zldev = zldpll->dev;
+ struct zl3073x_dpll_pin *pin = pin_priv;
+ u16 base, mult;
+ u8 ref;
+ int rc;
+
+ /* Get base frequency and multiplier for the requested frequency */
+ rc = zl3073x_ref_freq_factorize(frequency, &base, &mult);
+ if (rc)
+ return rc;
+
+ guard(mutex)(&zldev->multiop_lock);
+
+ /* Load reference configuration */
+ ref = zl3073x_input_pin_ref_get(pin->id);
+ rc = zl3073x_mb_op(zldev, ZL_REG_REF_MB_SEM, ZL_REF_MB_SEM_RD,
+ ZL_REG_REF_MB_MASK, BIT(ref));
+
+ /* Update base frequency, multiplier, numerator & denominator */
+ rc = zl3073x_write_u16(zldev, ZL_REG_REF_FREQ_BASE, base);
+ if (rc)
+ return rc;
+ rc = zl3073x_write_u16(zldev, ZL_REG_REF_FREQ_MULT, mult);
+ if (rc)
+ return rc;
+ rc = zl3073x_write_u16(zldev, ZL_REG_REF_RATIO_M, 1);
+ if (rc)
+ return rc;
+ rc = zl3073x_write_u16(zldev, ZL_REG_REF_RATIO_N, 1);
+ if (rc)
+ return rc;
+
+ /* Commit reference configuration */
+ return zl3073x_mb_op(zldev, ZL_REG_REF_MB_SEM, ZL_REF_MB_SEM_WR,
+ ZL_REG_REF_MB_MASK, BIT(ref));
+}
+
+/**
+ * zl3073x_dpll_selected_ref_get - get currently selected reference
+ * @zldpll: pointer to zl3073x_dpll
+ * @ref: place to store selected reference
+ *
+ * Check for currently selected reference the DPLL should be locked to
+ * and stores its index to given @ref.
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_dpll_selected_ref_get(struct zl3073x_dpll *zldpll, u8 *ref)
+{
+ struct zl3073x_dev *zldev = zldpll->dev;
+ u8 state, value;
+ int rc;
+
+ switch (zldpll->refsel_mode) {
+ case ZL_DPLL_MODE_REFSEL_MODE_AUTO:
+ /* For automatic mode read refsel_status register */
+ rc = zl3073x_read_u8(zldev,
+ ZL_REG_DPLL_REFSEL_STATUS(zldpll->id),
+ &value);
+ if (rc)
+ return rc;
+
+ /* Extract reference state */
+ state = FIELD_GET(ZL_DPLL_REFSEL_STATUS_STATE, value);
+
+ /* Return the reference only if the DPLL is locked to it */
+ if (state == ZL_DPLL_REFSEL_STATUS_STATE_LOCK)
+ *ref = FIELD_GET(ZL_DPLL_REFSEL_STATUS_REFSEL, value);
+ else
+ *ref = ZL3073X_DPLL_REF_NONE;
+ break;
+ case ZL_DPLL_MODE_REFSEL_MODE_REFLOCK:
+ /* For manual mode return stored value */
+ *ref = zldpll->forced_ref;
+ break;
+ default:
+ /* For other modes like NCO, freerun... there is no input ref */
+ *ref = ZL3073X_DPLL_REF_NONE;
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * zl3073x_dpll_selected_ref_set - select reference in manual mode
+ * @zldpll: pointer to zl3073x_dpll
+ * @ref: input reference to be selected
+ *
+ * Selects the given reference for the DPLL channel it should be
+ * locked to.
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_dpll_selected_ref_set(struct zl3073x_dpll *zldpll, u8 ref)
+{
+ struct zl3073x_dev *zldev = zldpll->dev;
+ u8 mode, mode_refsel;
+ int rc;
+
+ mode = zldpll->refsel_mode;
+
+ switch (mode) {
+ case ZL_DPLL_MODE_REFSEL_MODE_REFLOCK:
+ /* Manual mode with ref selected */
+ if (ref == ZL3073X_DPLL_REF_NONE) {
+ switch (zldpll->lock_status) {
+ case DPLL_LOCK_STATUS_LOCKED_HO_ACQ:
+ case DPLL_LOCK_STATUS_HOLDOVER:
+ /* Switch to forced holdover */
+ mode = ZL_DPLL_MODE_REFSEL_MODE_HOLDOVER;
+ break;
+ default:
+ /* Switch to freerun */
+ mode = ZL_DPLL_MODE_REFSEL_MODE_FREERUN;
+ break;
+ }
+ /* Keep selected reference */
+ ref = zldpll->forced_ref;
+ } else if (ref == zldpll->forced_ref) {
+ /* No register update - same mode and same ref */
+ return 0;
+ }
+ break;
+ case ZL_DPLL_MODE_REFSEL_MODE_FREERUN:
+ case ZL_DPLL_MODE_REFSEL_MODE_HOLDOVER:
+ /* Manual mode without no ref */
+ if (ref == ZL3073X_DPLL_REF_NONE)
+ /* No register update - keep current mode */
+ return 0;
+
+ /* Switch to reflock mode and update ref selection */
+ mode = ZL_DPLL_MODE_REFSEL_MODE_REFLOCK;
+ break;
+ default:
+ /* For other modes like automatic or NCO ref cannot be selected
+ * manually
+ */
+ return -EOPNOTSUPP;
+ }
+
+ /* Build mode_refsel value */
+ mode_refsel = FIELD_PREP(ZL_DPLL_MODE_REFSEL_MODE, mode) |
+ FIELD_PREP(ZL_DPLL_MODE_REFSEL_REF, ref);
+
+ /* Update dpll_mode_refsel register */
+ rc = zl3073x_write_u8(zldev, ZL_REG_DPLL_MODE_REFSEL(zldpll->id),
+ mode_refsel);
+ if (rc)
+ return rc;
+
+ /* Store new mode and forced reference */
+ zldpll->refsel_mode = mode;
+ zldpll->forced_ref = ref;
+
+ return rc;
+}
+
+/**
+ * zl3073x_dpll_connected_ref_get - get currently connected reference
+ * @zldpll: pointer to zl3073x_dpll
+ * @ref: place to store selected reference
+ *
+ * Looks for currently connected the DPLL is locked to and stores its index
+ * to given @ref.
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_dpll_connected_ref_get(struct zl3073x_dpll *zldpll, u8 *ref)
+{
+ struct zl3073x_dev *zldev = zldpll->dev;
+ int rc;
+
+ /* Get currently selected input reference */
+ rc = zl3073x_dpll_selected_ref_get(zldpll, ref);
+ if (rc)
+ return rc;
+
+ if (ZL3073X_DPLL_REF_IS_VALID(*ref)) {
+ u8 ref_status;
+
+ /* Read the reference monitor status */
+ rc = zl3073x_read_u8(zldev, ZL_REG_REF_MON_STATUS(*ref),
+ &ref_status);
+ if (rc)
+ return rc;
+
+ /* If the monitor indicates an error nothing is connected */
+ if (ref_status != ZL_REF_MON_STATUS_OK)
+ *ref = ZL3073X_DPLL_REF_NONE;
+ }
+
+ return 0;
+}
+
+static int
+zl3073x_dpll_input_pin_phase_offset_get(const struct dpll_pin *dpll_pin,
+ void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv, s64 *phase_offset,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll *zldpll = dpll_priv;
+ struct zl3073x_dev *zldev = zldpll->dev;
+ struct zl3073x_dpll_pin *pin = pin_priv;
+ u8 conn_ref, ref, ref_status;
+ s64 ref_phase;
+ int rc;
+
+ /* Get currently connected reference */
+ rc = zl3073x_dpll_connected_ref_get(zldpll, &conn_ref);
+ if (rc)
+ return rc;
+
+ /* Report phase offset only for currently connected pin if the phase
+ * monitor feature is disabled.
+ */
+ ref = zl3073x_input_pin_ref_get(pin->id);
+ if (!zldpll->phase_monitor && ref != conn_ref) {
+ *phase_offset = 0;
+
+ return 0;
+ }
+
+ /* Get this pin monitor status */
+ rc = zl3073x_read_u8(zldev, ZL_REG_REF_MON_STATUS(ref), &ref_status);
+ if (rc)
+ return rc;
+
+ /* Report phase offset only if the input pin signal is present */
+ if (ref_status != ZL_REF_MON_STATUS_OK) {
+ *phase_offset = 0;
+
+ return 0;
+ }
+
+ ref_phase = pin->phase_offset;
+
+ /* The DPLL being locked to a higher freq than the current ref
+ * the phase offset is modded to the period of the signal
+ * the dpll is locked to.
+ */
+ if (ZL3073X_DPLL_REF_IS_VALID(conn_ref) && conn_ref != ref) {
+ u32 conn_freq, ref_freq;
+
+ /* Get frequency of connected ref */
+ rc = zl3073x_dpll_input_ref_frequency_get(zldpll, conn_ref,
+ &conn_freq);
+ if (rc)
+ return rc;
+
+ /* Get frequency of given ref */
+ rc = zl3073x_dpll_input_ref_frequency_get(zldpll, ref,
+ &ref_freq);
+ if (rc)
+ return rc;
+
+ if (conn_freq > ref_freq) {
+ s64 conn_period, div_factor;
+
+ conn_period = div_s64(PSEC_PER_SEC, conn_freq);
+ div_factor = div64_s64(ref_phase, conn_period);
+ ref_phase -= conn_period * div_factor;
+ }
+ }
+
+ *phase_offset = ref_phase * DPLL_PHASE_OFFSET_DIVIDER;
+
+ return rc;
+}
+
+static int
+zl3073x_dpll_input_pin_phase_adjust_get(const struct dpll_pin *dpll_pin,
+ void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv,
+ s32 *phase_adjust,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll *zldpll = dpll_priv;
+ struct zl3073x_dev *zldev = zldpll->dev;
+ struct zl3073x_dpll_pin *pin = pin_priv;
+ s64 phase_comp;
+ u8 ref;
+ int rc;
+
+ guard(mutex)(&zldev->multiop_lock);
+
+ /* Read reference configuration */
+ ref = zl3073x_input_pin_ref_get(pin->id);
+ rc = zl3073x_mb_op(zldev, ZL_REG_REF_MB_SEM, ZL_REF_MB_SEM_RD,
+ ZL_REG_REF_MB_MASK, BIT(ref));
+ if (rc)
+ return rc;
+
+ /* Read current phase offset compensation */
+ rc = zl3073x_read_u48(zldev, ZL_REG_REF_PHASE_OFFSET_COMP, &phase_comp);
+ if (rc)
+ return rc;
+
+ /* Perform sign extension for 48bit signed value */
+ phase_comp = sign_extend64(phase_comp, 47);
+
+ /* Reverse two's complement negation applied during set and convert
+ * to 32bit signed int
+ */
+ *phase_adjust = (s32)-phase_comp;
+
+ return rc;
+}
+
+static int
+zl3073x_dpll_input_pin_phase_adjust_set(const struct dpll_pin *dpll_pin,
+ void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv,
+ s32 phase_adjust,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll *zldpll = dpll_priv;
+ struct zl3073x_dev *zldev = zldpll->dev;
+ struct zl3073x_dpll_pin *pin = pin_priv;
+ s64 phase_comp;
+ u8 ref;
+ int rc;
+
+ /* The value in the register is stored as two's complement negation
+ * of requested value.
+ */
+ phase_comp = -phase_adjust;
+
+ guard(mutex)(&zldev->multiop_lock);
+
+ /* Read reference configuration */
+ ref = zl3073x_input_pin_ref_get(pin->id);
+ rc = zl3073x_mb_op(zldev, ZL_REG_REF_MB_SEM, ZL_REF_MB_SEM_RD,
+ ZL_REG_REF_MB_MASK, BIT(ref));
+ if (rc)
+ return rc;
+
+ /* Write the requested value into the compensation register */
+ rc = zl3073x_write_u48(zldev, ZL_REG_REF_PHASE_OFFSET_COMP, phase_comp);
+ if (rc)
+ return rc;
+
+ /* Commit reference configuration */
+ return zl3073x_mb_op(zldev, ZL_REG_REF_MB_SEM, ZL_REF_MB_SEM_WR,
+ ZL_REG_REF_MB_MASK, BIT(ref));
+}
+
+/**
+ * zl3073x_dpll_ref_prio_get - get priority for given input pin
+ * @pin: pointer to pin
+ * @prio: place to store priority
+ *
+ * Reads current priority for the given input pin and stores the value
+ * to @prio.
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_dpll_ref_prio_get(struct zl3073x_dpll_pin *pin, u8 *prio)
+{
+ struct zl3073x_dpll *zldpll = pin->dpll;
+ struct zl3073x_dev *zldev = zldpll->dev;
+ u8 ref, ref_prio;
+ int rc;
+
+ guard(mutex)(&zldev->multiop_lock);
+
+ /* Read DPLL configuration */
+ rc = zl3073x_mb_op(zldev, ZL_REG_DPLL_MB_SEM, ZL_DPLL_MB_SEM_RD,
+ ZL_REG_DPLL_MB_MASK, BIT(zldpll->id));
+ if (rc)
+ return rc;
+
+ /* Read reference priority - one value for P&N pins (4 bits/pin) */
+ ref = zl3073x_input_pin_ref_get(pin->id);
+ rc = zl3073x_read_u8(zldev, ZL_REG_DPLL_REF_PRIO(ref / 2),
+ &ref_prio);
+ if (rc)
+ return rc;
+
+ /* Select nibble according pin type */
+ if (zl3073x_dpll_is_p_pin(pin))
+ *prio = FIELD_GET(ZL_DPLL_REF_PRIO_REF_P, ref_prio);
+ else
+ *prio = FIELD_GET(ZL_DPLL_REF_PRIO_REF_N, ref_prio);
+
+ return rc;
+}
+
+/**
+ * zl3073x_dpll_ref_prio_set - set priority for given input pin
+ * @pin: pointer to pin
+ * @prio: place to store priority
+ *
+ * Sets priority for the given input pin.
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_dpll_ref_prio_set(struct zl3073x_dpll_pin *pin, u8 prio)
+{
+ struct zl3073x_dpll *zldpll = pin->dpll;
+ struct zl3073x_dev *zldev = zldpll->dev;
+ u8 ref, ref_prio;
+ int rc;
+
+ guard(mutex)(&zldev->multiop_lock);
+
+ /* Read DPLL configuration into mailbox */
+ rc = zl3073x_mb_op(zldev, ZL_REG_DPLL_MB_SEM, ZL_DPLL_MB_SEM_RD,
+ ZL_REG_DPLL_MB_MASK, BIT(zldpll->id));
+ if (rc)
+ return rc;
+
+ /* Read reference priority - one value shared between P&N pins */
+ ref = zl3073x_input_pin_ref_get(pin->id);
+ rc = zl3073x_read_u8(zldev, ZL_REG_DPLL_REF_PRIO(ref / 2), &ref_prio);
+ if (rc)
+ return rc;
+
+ /* Update nibble according pin type */
+ if (zl3073x_dpll_is_p_pin(pin)) {
+ ref_prio &= ~ZL_DPLL_REF_PRIO_REF_P;
+ ref_prio |= FIELD_PREP(ZL_DPLL_REF_PRIO_REF_P, prio);
+ } else {
+ ref_prio &= ~ZL_DPLL_REF_PRIO_REF_N;
+ ref_prio |= FIELD_PREP(ZL_DPLL_REF_PRIO_REF_N, prio);
+ }
+
+ /* Update reference priority */
+ rc = zl3073x_write_u8(zldev, ZL_REG_DPLL_REF_PRIO(ref / 2), ref_prio);
+ if (rc)
+ return rc;
+
+ /* Commit configuration */
+ return zl3073x_mb_op(zldev, ZL_REG_DPLL_MB_SEM, ZL_DPLL_MB_SEM_WR,
+ ZL_REG_DPLL_MB_MASK, BIT(zldpll->id));
+}
+
+/**
+ * zl3073x_dpll_ref_state_get - get status for given input pin
+ * @pin: pointer to pin
+ * @state: place to store status
+ *
+ * Checks current status for the given input pin and stores the value
+ * to @state.
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_dpll_ref_state_get(struct zl3073x_dpll_pin *pin,
+ enum dpll_pin_state *state)
+{
+ struct zl3073x_dpll *zldpll = pin->dpll;
+ struct zl3073x_dev *zldev = zldpll->dev;
+ u8 ref, ref_conn, status;
+ int rc;
+
+ ref = zl3073x_input_pin_ref_get(pin->id);
+
+ /* Get currently connected reference */
+ rc = zl3073x_dpll_connected_ref_get(zldpll, &ref_conn);
+ if (rc)
+ return rc;
+
+ if (ref == ref_conn) {
+ *state = DPLL_PIN_STATE_CONNECTED;
+ return 0;
+ }
+
+ /* If the DPLL is running in automatic mode and the reference is
+ * selectable and its monitor does not report any error then report
+ * pin as selectable.
+ */
+ if (zldpll->refsel_mode == ZL_DPLL_MODE_REFSEL_MODE_AUTO &&
+ pin->selectable) {
+ /* Read reference monitor status */
+ rc = zl3073x_read_u8(zldev, ZL_REG_REF_MON_STATUS(ref),
+ &status);
+ if (rc)
+ return rc;
+
+ /* If the monitor indicates errors report the reference
+ * as disconnected
+ */
+ if (status == ZL_REF_MON_STATUS_OK) {
+ *state = DPLL_PIN_STATE_SELECTABLE;
+ return 0;
+ }
+ }
+
+ /* Otherwise report the pin as disconnected */
+ *state = DPLL_PIN_STATE_DISCONNECTED;
+
+ return 0;
+}
+
+static int
+zl3073x_dpll_input_pin_state_on_dpll_get(const struct dpll_pin *dpll_pin,
+ void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv,
+ enum dpll_pin_state *state,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll_pin *pin = pin_priv;
+
+ return zl3073x_dpll_ref_state_get(pin, state);
+}
+
+static int
+zl3073x_dpll_input_pin_state_on_dpll_set(const struct dpll_pin *dpll_pin,
+ void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv,
+ enum dpll_pin_state state,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll *zldpll = dpll_priv;
+ struct zl3073x_dpll_pin *pin = pin_priv;
+ u8 new_ref;
+ int rc;
+
+ switch (zldpll->refsel_mode) {
+ case ZL_DPLL_MODE_REFSEL_MODE_REFLOCK:
+ case ZL_DPLL_MODE_REFSEL_MODE_FREERUN:
+ case ZL_DPLL_MODE_REFSEL_MODE_HOLDOVER:
+ if (state == DPLL_PIN_STATE_CONNECTED) {
+ /* Choose the pin as new selected reference */
+ new_ref = zl3073x_input_pin_ref_get(pin->id);
+ } else if (state == DPLL_PIN_STATE_DISCONNECTED) {
+ /* No reference */
+ new_ref = ZL3073X_DPLL_REF_NONE;
+ } else {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Invalid pin state for manual mode");
+ return -EINVAL;
+ }
+
+ rc = zl3073x_dpll_selected_ref_set(zldpll, new_ref);
+ break;
+
+ case ZL_DPLL_MODE_REFSEL_MODE_AUTO:
+ if (state == DPLL_PIN_STATE_SELECTABLE) {
+ if (pin->selectable)
+ return 0; /* Pin is already selectable */
+
+ /* Restore pin priority in HW */
+ rc = zl3073x_dpll_ref_prio_set(pin, pin->prio);
+ if (rc)
+ return rc;
+
+ /* Mark pin as selectable */
+ pin->selectable = true;
+ } else if (state == DPLL_PIN_STATE_DISCONNECTED) {
+ if (!pin->selectable)
+ return 0; /* Pin is already disconnected */
+
+ /* Set pin priority to none in HW */
+ rc = zl3073x_dpll_ref_prio_set(pin,
+ ZL_DPLL_REF_PRIO_NONE);
+ if (rc)
+ return rc;
+
+ /* Mark pin as non-selectable */
+ pin->selectable = false;
+ } else {
+ NL_SET_ERR_MSG(extack,
+ "Invalid pin state for automatic mode");
+ return -EINVAL;
+ }
+ break;
+
+ default:
+ /* In other modes we cannot change input reference */
+ NL_SET_ERR_MSG(extack,
+ "Pin state cannot be changed in current mode");
+ rc = -EOPNOTSUPP;
+ break;
+ }
+
+ return rc;
+}
+
+static int
+zl3073x_dpll_input_pin_prio_get(const struct dpll_pin *dpll_pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u32 *prio, struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll_pin *pin = pin_priv;
+
+ *prio = pin->prio;
+
+ return 0;
+}
+
+static int
+zl3073x_dpll_input_pin_prio_set(const struct dpll_pin *dpll_pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u32 prio, struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll_pin *pin = pin_priv;
+ int rc;
+
+ if (prio > ZL_DPLL_REF_PRIO_MAX)
+ return -EINVAL;
+
+ /* If the pin is selectable then update HW registers */
+ if (pin->selectable) {
+ rc = zl3073x_dpll_ref_prio_set(pin, prio);
+ if (rc)
+ return rc;
+ }
+
+ /* Save priority */
+ pin->prio = prio;
+
+ return 0;
+}
+
+static int
+zl3073x_dpll_output_pin_esync_get(const struct dpll_pin *dpll_pin,
+ void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv,
+ struct dpll_pin_esync *esync,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll *zldpll = dpll_priv;
+ struct zl3073x_dev *zldev = zldpll->dev;
+ struct zl3073x_dpll_pin *pin = pin_priv;
+ struct device *dev = zldev->dev;
+ u32 esync_period, esync_width;
+ u8 clock_type, synth;
+ u8 out, output_mode;
+ u32 output_div;
+ u32 synth_freq;
+ int rc;
+
+ out = zl3073x_output_pin_out_get(pin->id);
+
+ /* If N-division is enabled, esync is not supported. The register used
+ * for N-division is also used for the esync divider so both cannot
+ * be used.
+ */
+ switch (zl3073x_out_signal_format_get(zldev, out)) {
+ case ZL_OUTPUT_MODE_SIGNAL_FORMAT_2_NDIV:
+ case ZL_OUTPUT_MODE_SIGNAL_FORMAT_2_NDIV_INV:
+ return -EOPNOTSUPP;
+ default:
+ break;
+ }
+
+ guard(mutex)(&zldev->multiop_lock);
+
+ /* Read output configuration into mailbox */
+ rc = zl3073x_mb_op(zldev, ZL_REG_OUTPUT_MB_SEM, ZL_OUTPUT_MB_SEM_RD,
+ ZL_REG_OUTPUT_MB_MASK, BIT(out));
+ if (rc)
+ return rc;
+
+ /* Read output mode */
+ rc = zl3073x_read_u8(zldev, ZL_REG_OUTPUT_MODE, &output_mode);
+ if (rc)
+ return rc;
+
+ /* Read output divisor */
+ rc = zl3073x_read_u32(zldev, ZL_REG_OUTPUT_DIV, &output_div);
+ if (rc)
+ return rc;
+
+ /* Check output divisor for zero */
+ if (!output_div) {
+ dev_err(dev, "Zero divisor for OUTPUT%u got from device\n",
+ out);
+ return -EINVAL;
+ }
+
+ /* Get synth attached to output pin */
+ synth = zl3073x_out_synth_get(zldev, out);
+
+ /* Get synth frequency */
+ synth_freq = zl3073x_synth_freq_get(zldev, synth);
+
+ clock_type = FIELD_GET(ZL_OUTPUT_MODE_CLOCK_TYPE, output_mode);
+ if (clock_type != ZL_OUTPUT_MODE_CLOCK_TYPE_ESYNC) {
+ /* No need to read esync data if it is not enabled */
+ esync->freq = 0;
+ esync->pulse = 0;
+
+ goto finish;
+ }
+
+ /* Read esync period */
+ rc = zl3073x_read_u32(zldev, ZL_REG_OUTPUT_ESYNC_PERIOD, &esync_period);
+ if (rc)
+ return rc;
+
+ /* Check esync divisor for zero */
+ if (!esync_period) {
+ dev_err(dev, "Zero esync divisor for OUTPUT%u got from device\n",
+ out);
+ return -EINVAL;
+ }
+
+ /* Get esync pulse width in units of half synth cycles */
+ rc = zl3073x_read_u32(zldev, ZL_REG_OUTPUT_ESYNC_WIDTH, &esync_width);
+ if (rc)
+ return rc;
+
+ /* Compute esync frequency */
+ esync->freq = synth_freq / output_div / esync_period;
+
+ /* By comparing the esync_pulse_width to the half of the pulse width
+ * the esync pulse percentage can be determined.
+ * Note that half pulse width is in units of half synth cycles, which
+ * is why it reduces down to be output_div.
+ */
+ esync->pulse = (50 * esync_width) / output_div;
+
+finish:
+ /* Set supported esync ranges if the pin supports esync control and
+ * if the output frequency is > 1 Hz.
+ */
+ if (pin->esync_control && (synth_freq / output_div) > 1) {
+ esync->range = esync_freq_ranges;
+ esync->range_num = ARRAY_SIZE(esync_freq_ranges);
+ } else {
+ esync->range = NULL;
+ esync->range_num = 0;
+ }
+
+ return 0;
+}
+
+static int
+zl3073x_dpll_output_pin_esync_set(const struct dpll_pin *dpll_pin,
+ void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv, u64 freq,
+ struct netlink_ext_ack *extack)
+{
+ u32 esync_period, esync_width, output_div;
+ struct zl3073x_dpll *zldpll = dpll_priv;
+ struct zl3073x_dev *zldev = zldpll->dev;
+ struct zl3073x_dpll_pin *pin = pin_priv;
+ u8 clock_type, out, output_mode, synth;
+ u32 synth_freq;
+ int rc;
+
+ out = zl3073x_output_pin_out_get(pin->id);
+
+ /* If N-division is enabled, esync is not supported. The register used
+ * for N-division is also used for the esync divider so both cannot
+ * be used.
+ */
+ switch (zl3073x_out_signal_format_get(zldev, out)) {
+ case ZL_OUTPUT_MODE_SIGNAL_FORMAT_2_NDIV:
+ case ZL_OUTPUT_MODE_SIGNAL_FORMAT_2_NDIV_INV:
+ return -EOPNOTSUPP;
+ default:
+ break;
+ }
+
+ guard(mutex)(&zldev->multiop_lock);
+
+ /* Read output configuration into mailbox */
+ rc = zl3073x_mb_op(zldev, ZL_REG_OUTPUT_MB_SEM, ZL_OUTPUT_MB_SEM_RD,
+ ZL_REG_OUTPUT_MB_MASK, BIT(out));
+ if (rc)
+ return rc;
+
+ /* Read output mode */
+ rc = zl3073x_read_u8(zldev, ZL_REG_OUTPUT_MODE, &output_mode);
+ if (rc)
+ return rc;
+
+ /* Select clock type */
+ if (freq)
+ clock_type = ZL_OUTPUT_MODE_CLOCK_TYPE_ESYNC;
+ else
+ clock_type = ZL_OUTPUT_MODE_CLOCK_TYPE_NORMAL;
+
+ /* Update clock type in output mode */
+ output_mode &= ~ZL_OUTPUT_MODE_CLOCK_TYPE;
+ output_mode |= FIELD_PREP(ZL_OUTPUT_MODE_CLOCK_TYPE, clock_type);
+ rc = zl3073x_write_u8(zldev, ZL_REG_OUTPUT_MODE, output_mode);
+ if (rc)
+ return rc;
+
+ /* If esync is being disabled just write mailbox and finish */
+ if (!freq)
+ goto write_mailbox;
+
+ /* Get synth attached to output pin */
+ synth = zl3073x_out_synth_get(zldev, out);
+
+ /* Get synth frequency */
+ synth_freq = zl3073x_synth_freq_get(zldev, synth);
+
+ rc = zl3073x_read_u32(zldev, ZL_REG_OUTPUT_DIV, &output_div);
+ if (rc)
+ return rc;
+
+ /* Check output divisor for zero */
+ if (!output_div) {
+ dev_err(zldev->dev,
+ "Zero divisor for OUTPUT%u got from device\n", out);
+ return -EINVAL;
+ }
+
+ /* Compute and update esync period */
+ esync_period = synth_freq / (u32)freq / output_div;
+ rc = zl3073x_write_u32(zldev, ZL_REG_OUTPUT_ESYNC_PERIOD, esync_period);
+ if (rc)
+ return rc;
+
+ /* Half of the period in units of 1/2 synth cycle can be represented by
+ * the output_div. To get the supported esync pulse width of 25% of the
+ * period the output_div can just be divided by 2. Note that this
+ * assumes that output_div is even, otherwise some resolution will be
+ * lost.
+ */
+ esync_width = output_div / 2;
+ rc = zl3073x_write_u32(zldev, ZL_REG_OUTPUT_ESYNC_WIDTH, esync_width);
+ if (rc)
+ return rc;
+
+write_mailbox:
+ /* Commit output configuration */
+ return zl3073x_mb_op(zldev, ZL_REG_OUTPUT_MB_SEM, ZL_OUTPUT_MB_SEM_WR,
+ ZL_REG_OUTPUT_MB_MASK, BIT(out));
+}
+
+static int
+zl3073x_dpll_output_pin_frequency_get(const struct dpll_pin *dpll_pin,
+ void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv, u64 *frequency,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll *zldpll = dpll_priv;
+ struct zl3073x_dev *zldev = zldpll->dev;
+ struct zl3073x_dpll_pin *pin = pin_priv;
+ struct device *dev = zldev->dev;
+ u8 out, signal_format, synth;
+ u32 output_div, synth_freq;
+ int rc;
+
+ out = zl3073x_output_pin_out_get(pin->id);
+ synth = zl3073x_out_synth_get(zldev, out);
+ synth_freq = zl3073x_synth_freq_get(zldev, synth);
+
+ guard(mutex)(&zldev->multiop_lock);
+
+ /* Read output configuration into mailbox */
+ rc = zl3073x_mb_op(zldev, ZL_REG_OUTPUT_MB_SEM, ZL_OUTPUT_MB_SEM_RD,
+ ZL_REG_OUTPUT_MB_MASK, BIT(out));
+ if (rc)
+ return rc;
+
+ rc = zl3073x_read_u32(zldev, ZL_REG_OUTPUT_DIV, &output_div);
+ if (rc)
+ return rc;
+
+ /* Check output divisor for zero */
+ if (!output_div) {
+ dev_err(dev, "Zero divisor for output %u got from device\n",
+ out);
+ return -EINVAL;
+ }
+
+ /* Read used signal format for the given output */
+ signal_format = zl3073x_out_signal_format_get(zldev, out);
+
+ switch (signal_format) {
+ case ZL_OUTPUT_MODE_SIGNAL_FORMAT_2_NDIV:
+ case ZL_OUTPUT_MODE_SIGNAL_FORMAT_2_NDIV_INV:
+ /* In case of divided format we have to distiguish between
+ * given output pin type.
+ */
+ if (zl3073x_dpll_is_p_pin(pin)) {
+ /* For P-pin the resulting frequency is computed as
+ * simple division of synth frequency and output
+ * divisor.
+ */
+ *frequency = synth_freq / output_div;
+ } else {
+ /* For N-pin we have to divide additionally by
+ * divisor stored in esync_period output mailbox
+ * register that is used as N-pin divisor for these
+ * modes.
+ */
+ u32 ndiv;
+
+ rc = zl3073x_read_u32(zldev, ZL_REG_OUTPUT_ESYNC_PERIOD,
+ &ndiv);
+ if (rc)
+ return rc;
+
+ /* Check N-pin divisor for zero */
+ if (!ndiv) {
+ dev_err(dev,
+ "Zero N-pin divisor for output %u got from device\n",
+ out);
+ return -EINVAL;
+ }
+
+ /* Compute final divisor for N-pin */
+ *frequency = synth_freq / output_div / ndiv;
+ }
+ break;
+ default:
+ /* In other modes the resulting frequency is computed as
+ * division of synth frequency and output divisor.
+ */
+ *frequency = synth_freq / output_div;
+ break;
+ }
+
+ return rc;
+}
+
+static int
+zl3073x_dpll_output_pin_frequency_set(const struct dpll_pin *dpll_pin,
+ void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv, u64 frequency,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll *zldpll = dpll_priv;
+ struct zl3073x_dev *zldev = zldpll->dev;
+ struct zl3073x_dpll_pin *pin = pin_priv;
+ struct device *dev = zldev->dev;
+ u32 output_n_freq, output_p_freq;
+ u8 out, signal_format, synth;
+ u32 cur_div, new_div, ndiv;
+ u32 synth_freq;
+ int rc;
+
+ out = zl3073x_output_pin_out_get(pin->id);
+ synth = zl3073x_out_synth_get(zldev, out);
+ synth_freq = zl3073x_synth_freq_get(zldev, synth);
+ new_div = synth_freq / (u32)frequency;
+
+ /* Get used signal format for the given output */
+ signal_format = zl3073x_out_signal_format_get(zldev, out);
+
+ guard(mutex)(&zldev->multiop_lock);
+
+ /* Load output configuration */
+ rc = zl3073x_mb_op(zldev, ZL_REG_OUTPUT_MB_SEM, ZL_OUTPUT_MB_SEM_RD,
+ ZL_REG_OUTPUT_MB_MASK, BIT(out));
+ if (rc)
+ return rc;
+
+ /* Check signal format */
+ if (signal_format != ZL_OUTPUT_MODE_SIGNAL_FORMAT_2_NDIV &&
+ signal_format != ZL_OUTPUT_MODE_SIGNAL_FORMAT_2_NDIV_INV) {
+ /* For non N-divided signal formats the frequency is computed
+ * as division of synth frequency and output divisor.
+ */
+ rc = zl3073x_write_u32(zldev, ZL_REG_OUTPUT_DIV, new_div);
+ if (rc)
+ return rc;
+
+ /* For 50/50 duty cycle the divisor is equal to width */
+ rc = zl3073x_write_u32(zldev, ZL_REG_OUTPUT_WIDTH, new_div);
+ if (rc)
+ return rc;
+
+ /* Commit output configuration */
+ return zl3073x_mb_op(zldev,
+ ZL_REG_OUTPUT_MB_SEM, ZL_OUTPUT_MB_SEM_WR,
+ ZL_REG_OUTPUT_MB_MASK, BIT(out));
+ }
+
+ /* For N-divided signal format get current divisor */
+ rc = zl3073x_read_u32(zldev, ZL_REG_OUTPUT_DIV, &cur_div);
+ if (rc)
+ return rc;
+
+ /* Check output divisor for zero */
+ if (!cur_div) {
+ dev_err(dev, "Zero divisor for output %u got from device\n",
+ out);
+ return -EINVAL;
+ }
+
+ /* Get N-pin divisor (shares the same register with esync */
+ rc = zl3073x_read_u32(zldev, ZL_REG_OUTPUT_ESYNC_PERIOD, &ndiv);
+ if (rc)
+ return rc;
+
+ /* Check N-pin divisor for zero */
+ if (!ndiv) {
+ dev_err(dev,
+ "Zero N-pin divisor for output %u got from device\n",
+ out);
+ return -EINVAL;
+ }
+
+ /* Compute current output frequency for P-pin */
+ output_p_freq = synth_freq / cur_div;
+
+ /* Compute current N-pin frequency */
+ output_n_freq = output_p_freq / ndiv;
+
+ if (zl3073x_dpll_is_p_pin(pin)) {
+ /* We are going to change output frequency for P-pin but
+ * if the requested frequency is less than current N-pin
+ * frequency then indicate a failure as we are not able
+ * to compute N-pin divisor to keep its frequency unchanged.
+ */
+ if (frequency <= output_n_freq)
+ return -EINVAL;
+
+ /* Update the output divisor */
+ rc = zl3073x_write_u32(zldev, ZL_REG_OUTPUT_DIV, new_div);
+ if (rc)
+ return rc;
+
+ /* For 50/50 duty cycle the divisor is equal to width */
+ rc = zl3073x_write_u32(zldev, ZL_REG_OUTPUT_WIDTH, new_div);
+ if (rc)
+ return rc;
+
+ /* Compute new divisor for N-pin */
+ ndiv = (u32)frequency / output_n_freq;
+ } else {
+ /* We are going to change frequency of N-pin but if
+ * the requested freq is greater or equal than freq of P-pin
+ * in the output pair we cannot compute divisor for the N-pin.
+ * In this case indicate a failure.
+ */
+ if (output_p_freq <= frequency)
+ return -EINVAL;
+
+ /* Compute new divisor for N-pin */
+ ndiv = output_p_freq / (u32)frequency;
+ }
+
+ /* Update divisor for the N-pin */
+ rc = zl3073x_write_u32(zldev, ZL_REG_OUTPUT_ESYNC_PERIOD, ndiv);
+ if (rc)
+ return rc;
+
+ /* For 50/50 duty cycle the divisor is equal to width */
+ rc = zl3073x_write_u32(zldev, ZL_REG_OUTPUT_ESYNC_WIDTH, ndiv);
+ if (rc)
+ return rc;
+
+ /* Commit output configuration */
+ return zl3073x_mb_op(zldev, ZL_REG_OUTPUT_MB_SEM, ZL_OUTPUT_MB_SEM_WR,
+ ZL_REG_OUTPUT_MB_MASK, BIT(out));
+}
+
+static int
+zl3073x_dpll_output_pin_phase_adjust_get(const struct dpll_pin *dpll_pin,
+ void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv,
+ s32 *phase_adjust,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll *zldpll = dpll_priv;
+ struct zl3073x_dev *zldev = zldpll->dev;
+ struct zl3073x_dpll_pin *pin = pin_priv;
+ u32 synth_freq;
+ s32 phase_comp;
+ u8 out, synth;
+ int rc;
+
+ out = zl3073x_output_pin_out_get(pin->id);
+ synth = zl3073x_out_synth_get(zldev, out);
+ synth_freq = zl3073x_synth_freq_get(zldev, synth);
+
+ /* Check synth freq for zero */
+ if (!synth_freq) {
+ dev_err(zldev->dev, "Got zero synth frequency for output %u\n",
+ out);
+ return -EINVAL;
+ }
+
+ guard(mutex)(&zldev->multiop_lock);
+
+ /* Read output configuration */
+ rc = zl3073x_mb_op(zldev, ZL_REG_OUTPUT_MB_SEM, ZL_OUTPUT_MB_SEM_RD,
+ ZL_REG_OUTPUT_MB_MASK, BIT(out));
+ if (rc)
+ return rc;
+
+ /* Read current output phase compensation */
+ rc = zl3073x_read_u32(zldev, ZL_REG_OUTPUT_PHASE_COMP, &phase_comp);
+ if (rc)
+ return rc;
+
+ /* Value in register is expressed in half synth clock cycles */
+ phase_comp *= (int)div_u64(PSEC_PER_SEC, 2 * synth_freq);
+
+ /* Reverse two's complement negation applied during 'set' */
+ *phase_adjust = -phase_comp;
+
+ return rc;
+}
+
+static int
+zl3073x_dpll_output_pin_phase_adjust_set(const struct dpll_pin *dpll_pin,
+ void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv,
+ s32 phase_adjust,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll *zldpll = dpll_priv;
+ struct zl3073x_dev *zldev = zldpll->dev;
+ struct zl3073x_dpll_pin *pin = pin_priv;
+ int half_synth_cycle;
+ u32 synth_freq;
+ u8 out, synth;
+ int rc;
+
+ /* Get attached synth */
+ out = zl3073x_output_pin_out_get(pin->id);
+ synth = zl3073x_out_synth_get(zldev, out);
+
+ /* Get synth's frequency */
+ synth_freq = zl3073x_synth_freq_get(zldev, synth);
+
+ /* Value in register is expressed in half synth clock cycles so
+ * the given phase adjustment a multiple of half synth clock.
+ */
+ half_synth_cycle = (int)div_u64(PSEC_PER_SEC, 2 * synth_freq);
+
+ if ((phase_adjust % half_synth_cycle) != 0) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "Phase adjustment value has to be multiple of %d",
+ half_synth_cycle);
+ return -EINVAL;
+ }
+ phase_adjust /= half_synth_cycle;
+
+ /* The value in the register is stored as two's complement negation
+ * of requested value.
+ */
+ phase_adjust = -phase_adjust;
+
+ guard(mutex)(&zldev->multiop_lock);
+
+ /* Read output configuration */
+ rc = zl3073x_mb_op(zldev, ZL_REG_OUTPUT_MB_SEM, ZL_OUTPUT_MB_SEM_RD,
+ ZL_REG_OUTPUT_MB_MASK, BIT(out));
+ if (rc)
+ return rc;
+
+ /* Write the requested value into the compensation register */
+ rc = zl3073x_write_u32(zldev, ZL_REG_OUTPUT_PHASE_COMP, phase_adjust);
+ if (rc)
+ return rc;
+
+ /* Update output configuration from mailbox */
+ return zl3073x_mb_op(zldev, ZL_REG_OUTPUT_MB_SEM, ZL_OUTPUT_MB_SEM_WR,
+ ZL_REG_OUTPUT_MB_MASK, BIT(out));
+}
+
+static int
+zl3073x_dpll_output_pin_state_on_dpll_get(const struct dpll_pin *dpll_pin,
+ void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv,
+ enum dpll_pin_state *state,
+ struct netlink_ext_ack *extack)
+{
+ /* If the output pin is registered then it is always connected */
+ *state = DPLL_PIN_STATE_CONNECTED;
+
+ return 0;
+}
+
+static int
+zl3073x_dpll_lock_status_get(const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_lock_status *status,
+ enum dpll_lock_status_error *status_error,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll *zldpll = dpll_priv;
+ struct zl3073x_dev *zldev = zldpll->dev;
+ u8 mon_status, state;
+ int rc;
+
+ switch (zldpll->refsel_mode) {
+ case ZL_DPLL_MODE_REFSEL_MODE_FREERUN:
+ case ZL_DPLL_MODE_REFSEL_MODE_NCO:
+ /* In FREERUN and NCO modes the DPLL is always unlocked */
+ *status = DPLL_LOCK_STATUS_UNLOCKED;
+
+ return 0;
+ default:
+ break;
+ }
+
+ /* Read DPLL monitor status */
+ rc = zl3073x_read_u8(zldev, ZL_REG_DPLL_MON_STATUS(zldpll->id),
+ &mon_status);
+ if (rc)
+ return rc;
+ state = FIELD_GET(ZL_DPLL_MON_STATUS_STATE, mon_status);
+
+ switch (state) {
+ case ZL_DPLL_MON_STATUS_STATE_LOCK:
+ if (FIELD_GET(ZL_DPLL_MON_STATUS_HO_READY, mon_status))
+ *status = DPLL_LOCK_STATUS_LOCKED_HO_ACQ;
+ else
+ *status = DPLL_LOCK_STATUS_LOCKED;
+ break;
+ case ZL_DPLL_MON_STATUS_STATE_HOLDOVER:
+ case ZL_DPLL_MON_STATUS_STATE_ACQUIRING:
+ *status = DPLL_LOCK_STATUS_HOLDOVER;
+ break;
+ default:
+ dev_warn(zldev->dev, "Unknown DPLL monitor status: 0x%02x\n",
+ mon_status);
+ *status = DPLL_LOCK_STATUS_UNLOCKED;
+ break;
+ }
+
+ return 0;
+}
+
+static int
+zl3073x_dpll_mode_get(const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_mode *mode, struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll *zldpll = dpll_priv;
+
+ switch (zldpll->refsel_mode) {
+ case ZL_DPLL_MODE_REFSEL_MODE_FREERUN:
+ case ZL_DPLL_MODE_REFSEL_MODE_HOLDOVER:
+ case ZL_DPLL_MODE_REFSEL_MODE_NCO:
+ case ZL_DPLL_MODE_REFSEL_MODE_REFLOCK:
+ /* Use MANUAL for device FREERUN, HOLDOVER, NCO and
+ * REFLOCK modes
+ */
+ *mode = DPLL_MODE_MANUAL;
+ break;
+ case ZL_DPLL_MODE_REFSEL_MODE_AUTO:
+ /* Use AUTO for device AUTO mode */
+ *mode = DPLL_MODE_AUTOMATIC;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+zl3073x_dpll_phase_offset_avg_factor_get(const struct dpll_device *dpll,
+ void *dpll_priv, u32 *factor,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll *zldpll = dpll_priv;
+
+ *factor = zl3073x_dev_phase_avg_factor_get(zldpll->dev);
+
+ return 0;
+}
+
+static void
+zl3073x_dpll_change_work(struct work_struct *work)
+{
+ struct zl3073x_dpll *zldpll;
+
+ zldpll = container_of(work, struct zl3073x_dpll, change_work);
+ dpll_device_change_ntf(zldpll->dpll_dev);
+}
+
+static int
+zl3073x_dpll_phase_offset_avg_factor_set(const struct dpll_device *dpll,
+ void *dpll_priv, u32 factor,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll *item, *zldpll = dpll_priv;
+ int rc;
+
+ if (factor > 15) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "Phase offset average factor has to be from range <0,15>");
+ return -EINVAL;
+ }
+
+ rc = zl3073x_dev_phase_avg_factor_set(zldpll->dev, factor);
+ if (rc) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "Failed to set phase offset averaging factor");
+ return rc;
+ }
+
+ /* The averaging factor is common for all DPLL channels so after change
+ * we have to send a notification for other DPLL devices.
+ */
+ list_for_each_entry(item, &zldpll->dev->dplls, list) {
+ if (item != zldpll)
+ schedule_work(&item->change_work);
+ }
+
+ return 0;
+}
+
+static int
+zl3073x_dpll_phase_offset_monitor_get(const struct dpll_device *dpll,
+ void *dpll_priv,
+ enum dpll_feature_state *state,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll *zldpll = dpll_priv;
+
+ if (zldpll->phase_monitor)
+ *state = DPLL_FEATURE_STATE_ENABLE;
+ else
+ *state = DPLL_FEATURE_STATE_DISABLE;
+
+ return 0;
+}
+
+static int
+zl3073x_dpll_phase_offset_monitor_set(const struct dpll_device *dpll,
+ void *dpll_priv,
+ enum dpll_feature_state state,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll *zldpll = dpll_priv;
+
+ zldpll->phase_monitor = (state == DPLL_FEATURE_STATE_ENABLE);
+
+ return 0;
+}
+
+static const struct dpll_pin_ops zl3073x_dpll_input_pin_ops = {
+ .direction_get = zl3073x_dpll_pin_direction_get,
+ .esync_get = zl3073x_dpll_input_pin_esync_get,
+ .esync_set = zl3073x_dpll_input_pin_esync_set,
+ .ffo_get = zl3073x_dpll_input_pin_ffo_get,
+ .frequency_get = zl3073x_dpll_input_pin_frequency_get,
+ .frequency_set = zl3073x_dpll_input_pin_frequency_set,
+ .phase_offset_get = zl3073x_dpll_input_pin_phase_offset_get,
+ .phase_adjust_get = zl3073x_dpll_input_pin_phase_adjust_get,
+ .phase_adjust_set = zl3073x_dpll_input_pin_phase_adjust_set,
+ .prio_get = zl3073x_dpll_input_pin_prio_get,
+ .prio_set = zl3073x_dpll_input_pin_prio_set,
+ .state_on_dpll_get = zl3073x_dpll_input_pin_state_on_dpll_get,
+ .state_on_dpll_set = zl3073x_dpll_input_pin_state_on_dpll_set,
+};
+
+static const struct dpll_pin_ops zl3073x_dpll_output_pin_ops = {
+ .direction_get = zl3073x_dpll_pin_direction_get,
+ .esync_get = zl3073x_dpll_output_pin_esync_get,
+ .esync_set = zl3073x_dpll_output_pin_esync_set,
+ .frequency_get = zl3073x_dpll_output_pin_frequency_get,
+ .frequency_set = zl3073x_dpll_output_pin_frequency_set,
+ .phase_adjust_get = zl3073x_dpll_output_pin_phase_adjust_get,
+ .phase_adjust_set = zl3073x_dpll_output_pin_phase_adjust_set,
+ .state_on_dpll_get = zl3073x_dpll_output_pin_state_on_dpll_get,
+};
+
+static const struct dpll_device_ops zl3073x_dpll_device_ops = {
+ .lock_status_get = zl3073x_dpll_lock_status_get,
+ .mode_get = zl3073x_dpll_mode_get,
+ .phase_offset_avg_factor_get = zl3073x_dpll_phase_offset_avg_factor_get,
+ .phase_offset_avg_factor_set = zl3073x_dpll_phase_offset_avg_factor_set,
+ .phase_offset_monitor_get = zl3073x_dpll_phase_offset_monitor_get,
+ .phase_offset_monitor_set = zl3073x_dpll_phase_offset_monitor_set,
+};
+
+/**
+ * zl3073x_dpll_pin_alloc - allocate DPLL pin
+ * @zldpll: pointer to zl3073x_dpll
+ * @dir: pin direction
+ * @id: pin id
+ *
+ * Allocates and initializes zl3073x_dpll_pin structure for given
+ * pin id and direction.
+ *
+ * Return: pointer to allocated structure on success, error pointer on error
+ */
+static struct zl3073x_dpll_pin *
+zl3073x_dpll_pin_alloc(struct zl3073x_dpll *zldpll, enum dpll_pin_direction dir,
+ u8 id)
+{
+ struct zl3073x_dpll_pin *pin;
+
+ pin = kzalloc(sizeof(*pin), GFP_KERNEL);
+ if (!pin)
+ return ERR_PTR(-ENOMEM);
+
+ pin->dpll = zldpll;
+ pin->dir = dir;
+ pin->id = id;
+
+ return pin;
+}
+
+/**
+ * zl3073x_dpll_pin_free - deallocate DPLL pin
+ * @pin: pin to free
+ *
+ * Deallocates DPLL pin previously allocated by @zl3073x_dpll_pin_alloc.
+ */
+static void
+zl3073x_dpll_pin_free(struct zl3073x_dpll_pin *pin)
+{
+ WARN(pin->dpll_pin, "DPLL pin is still registered\n");
+
+ kfree(pin);
+}
+
+/**
+ * zl3073x_dpll_pin_register - register DPLL pin
+ * @pin: pointer to DPLL pin
+ * @index: absolute pin index for registration
+ *
+ * Registers given DPLL pin into DPLL sub-system.
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_dpll_pin_register(struct zl3073x_dpll_pin *pin, u32 index)
+{
+ struct zl3073x_dpll *zldpll = pin->dpll;
+ struct zl3073x_pin_props *props;
+ const struct dpll_pin_ops *ops;
+ int rc;
+
+ /* Get pin properties */
+ props = zl3073x_pin_props_get(zldpll->dev, pin->dir, pin->id);
+ if (IS_ERR(props))
+ return PTR_ERR(props);
+
+ /* Save package label & esync capability */
+ strscpy(pin->label, props->package_label);
+ pin->esync_control = props->esync_control;
+
+ if (zl3073x_dpll_is_input_pin(pin)) {
+ rc = zl3073x_dpll_ref_prio_get(pin, &pin->prio);
+ if (rc)
+ goto err_prio_get;
+
+ if (pin->prio == ZL_DPLL_REF_PRIO_NONE) {
+ /* Clamp prio to max value & mark pin non-selectable */
+ pin->prio = ZL_DPLL_REF_PRIO_MAX;
+ pin->selectable = false;
+ } else {
+ /* Mark pin as selectable */
+ pin->selectable = true;
+ }
+ }
+
+ /* Create or get existing DPLL pin */
+ pin->dpll_pin = dpll_pin_get(zldpll->dev->clock_id, index, THIS_MODULE,
+ &props->dpll_props);
+ if (IS_ERR(pin->dpll_pin)) {
+ rc = PTR_ERR(pin->dpll_pin);
+ goto err_pin_get;
+ }
+
+ if (zl3073x_dpll_is_input_pin(pin))
+ ops = &zl3073x_dpll_input_pin_ops;
+ else
+ ops = &zl3073x_dpll_output_pin_ops;
+
+ /* Register the pin */
+ rc = dpll_pin_register(zldpll->dpll_dev, pin->dpll_pin, ops, pin);
+ if (rc)
+ goto err_register;
+
+ /* Free pin properties */
+ zl3073x_pin_props_put(props);
+
+ return 0;
+
+err_register:
+ dpll_pin_put(pin->dpll_pin);
+err_prio_get:
+ pin->dpll_pin = NULL;
+err_pin_get:
+ zl3073x_pin_props_put(props);
+
+ return rc;
+}
+
+/**
+ * zl3073x_dpll_pin_unregister - unregister DPLL pin
+ * @pin: pointer to DPLL pin
+ *
+ * Unregisters pin previously registered by @zl3073x_dpll_pin_register.
+ */
+static void
+zl3073x_dpll_pin_unregister(struct zl3073x_dpll_pin *pin)
+{
+ struct zl3073x_dpll *zldpll = pin->dpll;
+ const struct dpll_pin_ops *ops;
+
+ WARN(!pin->dpll_pin, "DPLL pin is not registered\n");
+
+ if (zl3073x_dpll_is_input_pin(pin))
+ ops = &zl3073x_dpll_input_pin_ops;
+ else
+ ops = &zl3073x_dpll_output_pin_ops;
+
+ /* Unregister the pin */
+ dpll_pin_unregister(zldpll->dpll_dev, pin->dpll_pin, ops, pin);
+
+ dpll_pin_put(pin->dpll_pin);
+ pin->dpll_pin = NULL;
+}
+
+/**
+ * zl3073x_dpll_pins_unregister - unregister all registered DPLL pins
+ * @zldpll: pointer to zl3073x_dpll structure
+ *
+ * Enumerates all DPLL pins registered to given DPLL device and
+ * unregisters them.
+ */
+static void
+zl3073x_dpll_pins_unregister(struct zl3073x_dpll *zldpll)
+{
+ struct zl3073x_dpll_pin *pin, *next;
+
+ list_for_each_entry_safe(pin, next, &zldpll->pins, list) {
+ zl3073x_dpll_pin_unregister(pin);
+ list_del(&pin->list);
+ zl3073x_dpll_pin_free(pin);
+ }
+}
+
+/**
+ * zl3073x_dpll_pin_is_registrable - check if the pin is registrable
+ * @zldpll: pointer to zl3073x_dpll structure
+ * @dir: pin direction
+ * @index: pin index
+ *
+ * Checks if the given pin can be registered to given DPLL. For both
+ * directions the pin can be registered if it is enabled. In case of
+ * differential signal type only P-pin is reported as registrable.
+ * And additionally for the output pin, the pin can be registered only
+ * if it is connected to synthesizer that is driven by given DPLL.
+ *
+ * Return: true if the pin is registrable, false if not
+ */
+static bool
+zl3073x_dpll_pin_is_registrable(struct zl3073x_dpll *zldpll,
+ enum dpll_pin_direction dir, u8 index)
+{
+ struct zl3073x_dev *zldev = zldpll->dev;
+ bool is_diff, is_enabled;
+ const char *name;
+
+ if (dir == DPLL_PIN_DIRECTION_INPUT) {
+ u8 ref = zl3073x_input_pin_ref_get(index);
+
+ name = "REF";
+
+ /* Skip the pin if the DPLL is running in NCO mode */
+ if (zldpll->refsel_mode == ZL_DPLL_MODE_REFSEL_MODE_NCO)
+ return false;
+
+ is_diff = zl3073x_ref_is_diff(zldev, ref);
+ is_enabled = zl3073x_ref_is_enabled(zldev, ref);
+ } else {
+ /* Output P&N pair shares single HW output */
+ u8 out = zl3073x_output_pin_out_get(index);
+
+ name = "OUT";
+
+ /* Skip the pin if it is connected to different DPLL channel */
+ if (zl3073x_out_dpll_get(zldev, out) != zldpll->id) {
+ dev_dbg(zldev->dev,
+ "%s%u is driven by different DPLL\n", name,
+ out);
+
+ return false;
+ }
+
+ is_diff = zl3073x_out_is_diff(zldev, out);
+ is_enabled = zl3073x_out_is_enabled(zldev, out);
+ }
+
+ /* Skip N-pin if the corresponding input/output is differential */
+ if (is_diff && zl3073x_is_n_pin(index)) {
+ dev_dbg(zldev->dev, "%s%u is differential, skipping N-pin\n",
+ name, index / 2);
+
+ return false;
+ }
+
+ /* Skip the pin if it is disabled */
+ if (!is_enabled) {
+ dev_dbg(zldev->dev, "%s%u%c is disabled\n", name, index / 2,
+ zl3073x_is_p_pin(index) ? 'P' : 'N');
+
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * zl3073x_dpll_pins_register - register all registerable DPLL pins
+ * @zldpll: pointer to zl3073x_dpll structure
+ *
+ * Enumerates all possible input/output pins and registers all of them
+ * that are registrable.
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_dpll_pins_register(struct zl3073x_dpll *zldpll)
+{
+ struct zl3073x_dpll_pin *pin;
+ enum dpll_pin_direction dir;
+ u8 id, index;
+ int rc;
+
+ /* Process input pins */
+ for (index = 0; index < ZL3073X_NUM_PINS; index++) {
+ /* First input pins and then output pins */
+ if (index < ZL3073X_NUM_INPUT_PINS) {
+ id = index;
+ dir = DPLL_PIN_DIRECTION_INPUT;
+ } else {
+ id = index - ZL3073X_NUM_INPUT_PINS;
+ dir = DPLL_PIN_DIRECTION_OUTPUT;
+ }
+
+ /* Check if the pin registrable to this DPLL */
+ if (!zl3073x_dpll_pin_is_registrable(zldpll, dir, id))
+ continue;
+
+ pin = zl3073x_dpll_pin_alloc(zldpll, dir, id);
+ if (IS_ERR(pin)) {
+ rc = PTR_ERR(pin);
+ goto error;
+ }
+
+ rc = zl3073x_dpll_pin_register(pin, index);
+ if (rc)
+ goto error;
+
+ list_add(&pin->list, &zldpll->pins);
+ }
+
+ return 0;
+
+error:
+ zl3073x_dpll_pins_unregister(zldpll);
+
+ return rc;
+}
+
+/**
+ * zl3073x_dpll_device_register - register DPLL device
+ * @zldpll: pointer to zl3073x_dpll structure
+ *
+ * Registers given DPLL device into DPLL sub-system.
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_dpll_device_register(struct zl3073x_dpll *zldpll)
+{
+ struct zl3073x_dev *zldev = zldpll->dev;
+ u8 dpll_mode_refsel;
+ int rc;
+
+ /* Read DPLL mode and forcibly selected reference */
+ rc = zl3073x_read_u8(zldev, ZL_REG_DPLL_MODE_REFSEL(zldpll->id),
+ &dpll_mode_refsel);
+ if (rc)
+ return rc;
+
+ /* Extract mode and selected input reference */
+ zldpll->refsel_mode = FIELD_GET(ZL_DPLL_MODE_REFSEL_MODE,
+ dpll_mode_refsel);
+ zldpll->forced_ref = FIELD_GET(ZL_DPLL_MODE_REFSEL_REF,
+ dpll_mode_refsel);
+
+ zldpll->dpll_dev = dpll_device_get(zldev->clock_id, zldpll->id,
+ THIS_MODULE);
+ if (IS_ERR(zldpll->dpll_dev)) {
+ rc = PTR_ERR(zldpll->dpll_dev);
+ zldpll->dpll_dev = NULL;
+
+ return rc;
+ }
+
+ rc = dpll_device_register(zldpll->dpll_dev,
+ zl3073x_prop_dpll_type_get(zldev, zldpll->id),
+ &zl3073x_dpll_device_ops, zldpll);
+ if (rc) {
+ dpll_device_put(zldpll->dpll_dev);
+ zldpll->dpll_dev = NULL;
+ }
+
+ return rc;
+}
+
+/**
+ * zl3073x_dpll_device_unregister - unregister DPLL device
+ * @zldpll: pointer to zl3073x_dpll structure
+ *
+ * Unregisters given DPLL device from DPLL sub-system previously registered
+ * by @zl3073x_dpll_device_register.
+ */
+static void
+zl3073x_dpll_device_unregister(struct zl3073x_dpll *zldpll)
+{
+ WARN(!zldpll->dpll_dev, "DPLL device is not registered\n");
+
+ cancel_work_sync(&zldpll->change_work);
+
+ dpll_device_unregister(zldpll->dpll_dev, &zl3073x_dpll_device_ops,
+ zldpll);
+ dpll_device_put(zldpll->dpll_dev);
+ zldpll->dpll_dev = NULL;
+}
+
+/**
+ * zl3073x_dpll_pin_phase_offset_check - check for pin phase offset change
+ * @pin: pin to check
+ *
+ * Check for the change of DPLL to connected pin phase offset change.
+ *
+ * Return: true on phase offset change, false otherwise
+ */
+static bool
+zl3073x_dpll_pin_phase_offset_check(struct zl3073x_dpll_pin *pin)
+{
+ struct zl3073x_dpll *zldpll = pin->dpll;
+ struct zl3073x_dev *zldev = zldpll->dev;
+ unsigned int reg;
+ s64 phase_offset;
+ u8 ref;
+ int rc;
+
+ ref = zl3073x_input_pin_ref_get(pin->id);
+
+ /* Select register to read phase offset value depending on pin and
+ * phase monitor state:
+ * 1) For connected pin use dpll_phase_err_data register
+ * 2) For other pins use appropriate ref_phase register if the phase
+ * monitor feature is enabled and reference monitor does not
+ * report signal errors for given input pin
+ */
+ if (pin->pin_state == DPLL_PIN_STATE_CONNECTED) {
+ reg = ZL_REG_DPLL_PHASE_ERR_DATA(zldpll->id);
+ } else if (zldpll->phase_monitor) {
+ u8 status;
+
+ /* Get reference monitor status */
+ rc = zl3073x_read_u8(zldev, ZL_REG_REF_MON_STATUS(ref),
+ &status);
+ if (rc) {
+ dev_err(zldev->dev,
+ "Failed to read %s refmon status: %pe\n",
+ pin->label, ERR_PTR(rc));
+
+ return false;
+ }
+
+ if (status != ZL_REF_MON_STATUS_OK)
+ return false;
+
+ reg = ZL_REG_REF_PHASE(ref);
+ } else {
+ /* The pin is not connected or phase monitor disabled */
+ return false;
+ }
+
+ /* Read measured phase offset value */
+ rc = zl3073x_read_u48(zldev, reg, &phase_offset);
+ if (rc) {
+ dev_err(zldev->dev, "Failed to read ref phase offset: %pe\n",
+ ERR_PTR(rc));
+
+ return false;
+ }
+
+ /* Convert to ps */
+ phase_offset = div_s64(sign_extend64(phase_offset, 47), 100);
+
+ /* Compare with previous value */
+ if (phase_offset != pin->phase_offset) {
+ dev_dbg(zldev->dev, "%s phase offset changed: %lld -> %lld\n",
+ pin->label, pin->phase_offset, phase_offset);
+ pin->phase_offset = phase_offset;
+
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * zl3073x_dpll_pin_ffo_check - check for pin fractional frequency offset change
+ * @pin: pin to check
+ *
+ * Check for the given pin's fractional frequency change.
+ *
+ * Return: true on fractional frequency offset change, false otherwise
+ */
+static bool
+zl3073x_dpll_pin_ffo_check(struct zl3073x_dpll_pin *pin)
+{
+ struct zl3073x_dpll *zldpll = pin->dpll;
+ struct zl3073x_dev *zldev = zldpll->dev;
+ u8 ref, status;
+ s64 ffo;
+ int rc;
+
+ /* Get reference monitor status */
+ ref = zl3073x_input_pin_ref_get(pin->id);
+ rc = zl3073x_read_u8(zldev, ZL_REG_REF_MON_STATUS(ref), &status);
+ if (rc) {
+ dev_err(zldev->dev, "Failed to read %s refmon status: %pe\n",
+ pin->label, ERR_PTR(rc));
+
+ return false;
+ }
+
+ /* Do not report ffo changes if the reference monitor report errors */
+ if (status != ZL_REF_MON_STATUS_OK)
+ return false;
+
+ /* Get the latest measured ref's ffo */
+ ffo = zl3073x_ref_ffo_get(zldev, ref);
+
+ /* Compare with previous value */
+ if (pin->freq_offset != ffo) {
+ dev_dbg(zldev->dev, "%s freq offset changed: %lld -> %lld\n",
+ pin->label, pin->freq_offset, ffo);
+ pin->freq_offset = ffo;
+
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * zl3073x_dpll_changes_check - check for changes and send notifications
+ * @zldpll: pointer to zl3073x_dpll structure
+ *
+ * Checks for changes on given DPLL device and its registered DPLL pins
+ * and sends notifications about them.
+ *
+ * This function is periodically called from @zl3073x_dev_periodic_work.
+ */
+void
+zl3073x_dpll_changes_check(struct zl3073x_dpll *zldpll)
+{
+ struct zl3073x_dev *zldev = zldpll->dev;
+ enum dpll_lock_status lock_status;
+ struct device *dev = zldev->dev;
+ struct zl3073x_dpll_pin *pin;
+ int rc;
+
+ zldpll->check_count++;
+
+ /* Get current lock status for the DPLL */
+ rc = zl3073x_dpll_lock_status_get(zldpll->dpll_dev, zldpll,
+ &lock_status, NULL, NULL);
+ if (rc) {
+ dev_err(dev, "Failed to get DPLL%u lock status: %pe\n",
+ zldpll->id, ERR_PTR(rc));
+ return;
+ }
+
+ /* If lock status was changed then notify DPLL core */
+ if (zldpll->lock_status != lock_status) {
+ zldpll->lock_status = lock_status;
+ dpll_device_change_ntf(zldpll->dpll_dev);
+ }
+
+ /* Input pin monitoring does make sense only in automatic
+ * or forced reference modes.
+ */
+ if (zldpll->refsel_mode != ZL_DPLL_MODE_REFSEL_MODE_AUTO &&
+ zldpll->refsel_mode != ZL_DPLL_MODE_REFSEL_MODE_REFLOCK)
+ return;
+
+ /* Update phase offset latch registers for this DPLL if the phase
+ * offset monitor feature is enabled.
+ */
+ if (zldpll->phase_monitor) {
+ rc = zl3073x_ref_phase_offsets_update(zldev, zldpll->id);
+ if (rc) {
+ dev_err(zldev->dev,
+ "Failed to update phase offsets: %pe\n",
+ ERR_PTR(rc));
+ return;
+ }
+ }
+
+ list_for_each_entry(pin, &zldpll->pins, list) {
+ enum dpll_pin_state state;
+ bool pin_changed = false;
+
+ /* Output pins change checks are not necessary because output
+ * states are constant.
+ */
+ if (!zl3073x_dpll_is_input_pin(pin))
+ continue;
+
+ rc = zl3073x_dpll_ref_state_get(pin, &state);
+ if (rc) {
+ dev_err(dev,
+ "Failed to get %s on DPLL%u state: %pe\n",
+ pin->label, zldpll->id, ERR_PTR(rc));
+ return;
+ }
+
+ if (state != pin->pin_state) {
+ dev_dbg(dev, "%s state changed: %u->%u\n", pin->label,
+ pin->pin_state, state);
+ pin->pin_state = state;
+ pin_changed = true;
+ }
+
+ /* Check for phase offset and ffo change once per second */
+ if (zldpll->check_count % 2 == 0) {
+ if (zl3073x_dpll_pin_phase_offset_check(pin))
+ pin_changed = true;
+
+ if (zl3073x_dpll_pin_ffo_check(pin))
+ pin_changed = true;
+ }
+
+ if (pin_changed)
+ dpll_pin_change_ntf(pin->dpll_pin);
+ }
+}
+
+/**
+ * zl3073x_dpll_init_fine_phase_adjust - do initial fine phase adjustments
+ * @zldev: pointer to zl3073x device
+ *
+ * Performs initial fine phase adjustments needed per datasheet.
+ *
+ * Return: 0 on success, <0 on error
+ */
+int
+zl3073x_dpll_init_fine_phase_adjust(struct zl3073x_dev *zldev)
+{
+ int rc;
+
+ rc = zl3073x_write_u8(zldev, ZL_REG_SYNTH_PHASE_SHIFT_MASK, 0x1f);
+ if (rc)
+ return rc;
+
+ rc = zl3073x_write_u8(zldev, ZL_REG_SYNTH_PHASE_SHIFT_INTVL, 0x01);
+ if (rc)
+ return rc;
+
+ rc = zl3073x_write_u16(zldev, ZL_REG_SYNTH_PHASE_SHIFT_DATA, 0xffff);
+ if (rc)
+ return rc;
+
+ rc = zl3073x_write_u8(zldev, ZL_REG_SYNTH_PHASE_SHIFT_CTRL, 0x01);
+ if (rc)
+ return rc;
+
+ return rc;
+}
+
+/**
+ * zl3073x_dpll_alloc - allocate DPLL device
+ * @zldev: pointer to zl3073x device
+ * @ch: DPLL channel number
+ *
+ * Allocates DPLL device structure for given DPLL channel.
+ *
+ * Return: pointer to DPLL device on success, error pointer on error
+ */
+struct zl3073x_dpll *
+zl3073x_dpll_alloc(struct zl3073x_dev *zldev, u8 ch)
+{
+ struct zl3073x_dpll *zldpll;
+
+ zldpll = kzalloc(sizeof(*zldpll), GFP_KERNEL);
+ if (!zldpll)
+ return ERR_PTR(-ENOMEM);
+
+ zldpll->dev = zldev;
+ zldpll->id = ch;
+ INIT_LIST_HEAD(&zldpll->pins);
+ INIT_WORK(&zldpll->change_work, zl3073x_dpll_change_work);
+
+ return zldpll;
+}
+
+/**
+ * zl3073x_dpll_free - free DPLL device
+ * @zldpll: pointer to zl3073x_dpll structure
+ *
+ * Deallocates given DPLL device previously allocated by @zl3073x_dpll_alloc.
+ */
+void
+zl3073x_dpll_free(struct zl3073x_dpll *zldpll)
+{
+ WARN(zldpll->dpll_dev, "DPLL device is still registered\n");
+
+ kfree(zldpll);
+}
+
+/**
+ * zl3073x_dpll_register - register DPLL device and all its pins
+ * @zldpll: pointer to zl3073x_dpll structure
+ *
+ * Registers given DPLL device and all its pins into DPLL sub-system.
+ *
+ * Return: 0 on success, <0 on error
+ */
+int
+zl3073x_dpll_register(struct zl3073x_dpll *zldpll)
+{
+ int rc;
+
+ rc = zl3073x_dpll_device_register(zldpll);
+ if (rc)
+ return rc;
+
+ rc = zl3073x_dpll_pins_register(zldpll);
+ if (rc) {
+ zl3073x_dpll_device_unregister(zldpll);
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * zl3073x_dpll_unregister - unregister DPLL device and its pins
+ * @zldpll: pointer to zl3073x_dpll structure
+ *
+ * Unregisters given DPLL device and all its pins from DPLL sub-system
+ * previously registered by @zl3073x_dpll_register.
+ */
+void
+zl3073x_dpll_unregister(struct zl3073x_dpll *zldpll)
+{
+ /* Unregister all pins and dpll */
+ zl3073x_dpll_pins_unregister(zldpll);
+ zl3073x_dpll_device_unregister(zldpll);
+}
diff --git a/drivers/dpll/zl3073x/dpll.h b/drivers/dpll/zl3073x/dpll.h
new file mode 100644
index 000000000000..e8c39b44b356
--- /dev/null
+++ b/drivers/dpll/zl3073x/dpll.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _ZL3073X_DPLL_H
+#define _ZL3073X_DPLL_H
+
+#include <linux/dpll.h>
+#include <linux/list.h>
+
+#include "core.h"
+
+/**
+ * struct zl3073x_dpll - ZL3073x DPLL sub-device structure
+ * @list: this DPLL list entry
+ * @dev: pointer to multi-function parent device
+ * @id: DPLL index
+ * @refsel_mode: reference selection mode
+ * @forced_ref: selected reference in forced reference lock mode
+ * @check_count: periodic check counter
+ * @phase_monitor: is phase offset monitor enabled
+ * @dpll_dev: pointer to registered DPLL device
+ * @lock_status: last saved DPLL lock status
+ * @pins: list of pins
+ * @change_work: device change notification work
+ */
+struct zl3073x_dpll {
+ struct list_head list;
+ struct zl3073x_dev *dev;
+ u8 id;
+ u8 refsel_mode;
+ u8 forced_ref;
+ u8 check_count;
+ bool phase_monitor;
+ struct dpll_device *dpll_dev;
+ enum dpll_lock_status lock_status;
+ struct list_head pins;
+ struct work_struct change_work;
+};
+
+struct zl3073x_dpll *zl3073x_dpll_alloc(struct zl3073x_dev *zldev, u8 ch);
+void zl3073x_dpll_free(struct zl3073x_dpll *zldpll);
+
+int zl3073x_dpll_register(struct zl3073x_dpll *zldpll);
+void zl3073x_dpll_unregister(struct zl3073x_dpll *zldpll);
+
+int zl3073x_dpll_init_fine_phase_adjust(struct zl3073x_dev *zldev);
+void zl3073x_dpll_changes_check(struct zl3073x_dpll *zldpll);
+
+#endif /* _ZL3073X_DPLL_H */
diff --git a/drivers/dpll/zl3073x/flash.c b/drivers/dpll/zl3073x/flash.c
new file mode 100644
index 000000000000..83452a77e3e9
--- /dev/null
+++ b/drivers/dpll/zl3073x/flash.c
@@ -0,0 +1,666 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/array_size.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/dev_printk.h>
+#include <linux/errno.h>
+#include <linux/jiffies.h>
+#include <linux/minmax.h>
+#include <linux/netlink.h>
+#include <linux/sched/signal.h>
+#include <linux/sizes.h>
+#include <linux/sprintf.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/unaligned.h>
+#include <net/devlink.h>
+
+#include "core.h"
+#include "devlink.h"
+#include "flash.h"
+
+#define ZL_FLASH_ERR_PFX "FW update failed: "
+#define ZL_FLASH_ERR_MSG(_extack, _msg, ...) \
+ NL_SET_ERR_MSG_FMT_MOD((_extack), ZL_FLASH_ERR_PFX _msg, \
+ ## __VA_ARGS__)
+
+/**
+ * zl3073x_flash_download - Download image block to device memory
+ * @zldev: zl3073x device structure
+ * @component: name of the component to be downloaded
+ * @addr: device memory target address
+ * @data: pointer to data to download
+ * @size: size of data to download
+ * @extack: netlink extack pointer to report errors
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_flash_download(struct zl3073x_dev *zldev, const char *component,
+ u32 addr, const void *data, size_t size,
+ struct netlink_ext_ack *extack)
+{
+#define ZL_CHECK_DELAY 5000 /* Check for interrupt each 5 seconds */
+ unsigned long check_time;
+ const void *ptr, *end;
+ int rc = 0;
+
+ dev_dbg(zldev->dev, "Downloading %zu bytes to device memory at 0x%0x\n",
+ size, addr);
+
+ check_time = jiffies + msecs_to_jiffies(ZL_CHECK_DELAY);
+
+ for (ptr = data, end = data + size; ptr < end; ptr += 4, addr += 4) {
+ /* Write current word to HW memory */
+ rc = zl3073x_write_hwreg(zldev, addr,
+ get_unaligned((u32 *)ptr));
+ if (rc) {
+ ZL_FLASH_ERR_MSG(extack,
+ "failed to write to memory at 0x%0x",
+ addr);
+ return rc;
+ }
+
+ if (time_is_before_jiffies(check_time)) {
+ if (signal_pending(current)) {
+ ZL_FLASH_ERR_MSG(extack,
+ "Flashing interrupted");
+ return -EINTR;
+ }
+
+ check_time = jiffies + msecs_to_jiffies(ZL_CHECK_DELAY);
+ }
+
+ /* Report status each 1 kB block */
+ if ((ptr - data) % 1024 == 0)
+ zl3073x_devlink_flash_notify(zldev, "Downloading image",
+ component, ptr - data,
+ size);
+ }
+
+ zl3073x_devlink_flash_notify(zldev, "Downloading image", component,
+ ptr - data, size);
+
+ dev_dbg(zldev->dev, "%zu bytes downloaded to device memory\n", size);
+
+ return rc;
+}
+
+/**
+ * zl3073x_flash_error_check - Check for flash utility errors
+ * @zldev: zl3073x device structure
+ * @extack: netlink extack pointer to report errors
+ *
+ * The function checks for errors detected by the flash utility and
+ * reports them if any were found.
+ *
+ * Return: 0 on success, -EIO when errors are detected
+ */
+static int
+zl3073x_flash_error_check(struct zl3073x_dev *zldev,
+ struct netlink_ext_ack *extack)
+{
+ u32 count, cause;
+ int rc;
+
+ rc = zl3073x_read_u32(zldev, ZL_REG_ERROR_COUNT, &count);
+ if (rc)
+ return rc;
+ else if (!count)
+ return 0; /* No error */
+
+ rc = zl3073x_read_u32(zldev, ZL_REG_ERROR_CAUSE, &cause);
+ if (rc)
+ return rc;
+
+ /* Report errors */
+ ZL_FLASH_ERR_MSG(extack,
+ "utility error occurred: count=%u cause=0x%x", count,
+ cause);
+
+ return -EIO;
+}
+
+/**
+ * zl3073x_flash_wait_ready - Check or wait for utility to be ready to flash
+ * @zldev: zl3073x device structure
+ * @timeout_ms: timeout for the waiting
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_flash_wait_ready(struct zl3073x_dev *zldev, unsigned int timeout_ms)
+{
+#define ZL_FLASH_POLL_DELAY_MS 100
+ unsigned long timeout;
+ int rc, i;
+
+ dev_dbg(zldev->dev, "Waiting for flashing to be ready\n");
+
+ timeout = jiffies + msecs_to_jiffies(timeout_ms);
+
+ for (i = 0; time_is_after_jiffies(timeout); i++) {
+ u8 value;
+
+ /* Check for interrupt each 1s */
+ if (i > 9) {
+ if (signal_pending(current))
+ return -EINTR;
+ i = 0;
+ }
+
+ rc = zl3073x_read_u8(zldev, ZL_REG_WRITE_FLASH, &value);
+ if (rc)
+ return rc;
+
+ value = FIELD_GET(ZL_WRITE_FLASH_OP, value);
+
+ if (value == ZL_WRITE_FLASH_OP_DONE)
+ return 0; /* Successfully done */
+
+ msleep(ZL_FLASH_POLL_DELAY_MS);
+ }
+
+ return -ETIMEDOUT;
+}
+
+/**
+ * zl3073x_flash_cmd_wait - Perform flash operation and wait for finish
+ * @zldev: zl3073x device structure
+ * @operation: operation to perform
+ * @extack: netlink extack pointer to report errors
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_flash_cmd_wait(struct zl3073x_dev *zldev, u32 operation,
+ struct netlink_ext_ack *extack)
+{
+#define ZL_FLASH_PHASE1_TIMEOUT_MS 60000 /* up to 1 minute */
+#define ZL_FLASH_PHASE2_TIMEOUT_MS 120000 /* up to 2 minutes */
+ u8 value;
+ int rc;
+
+ dev_dbg(zldev->dev, "Sending flash command: 0x%x\n", operation);
+
+ rc = zl3073x_flash_wait_ready(zldev, ZL_FLASH_PHASE1_TIMEOUT_MS);
+ if (rc)
+ return rc;
+
+ /* Issue the requested operation */
+ rc = zl3073x_read_u8(zldev, ZL_REG_WRITE_FLASH, &value);
+ if (rc)
+ return rc;
+
+ value &= ~ZL_WRITE_FLASH_OP;
+ value |= FIELD_PREP(ZL_WRITE_FLASH_OP, operation);
+
+ rc = zl3073x_write_u8(zldev, ZL_REG_WRITE_FLASH, value);
+ if (rc)
+ return rc;
+
+ /* Wait for command completion */
+ rc = zl3073x_flash_wait_ready(zldev, ZL_FLASH_PHASE2_TIMEOUT_MS);
+ if (rc)
+ return rc;
+
+ return zl3073x_flash_error_check(zldev, extack);
+}
+
+/**
+ * zl3073x_flash_get_sector_size - Get flash sector size
+ * @zldev: zl3073x device structure
+ * @sector_size: sector size returned by the function
+ *
+ * The function reads the flash sector size detected by flash utility and
+ * stores it into @sector_size.
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_flash_get_sector_size(struct zl3073x_dev *zldev, size_t *sector_size)
+{
+ u8 flash_info;
+ int rc;
+
+ rc = zl3073x_read_u8(zldev, ZL_REG_FLASH_INFO, &flash_info);
+ if (rc)
+ return rc;
+
+ switch (FIELD_GET(ZL_FLASH_INFO_SECTOR_SIZE, flash_info)) {
+ case ZL_FLASH_INFO_SECTOR_4K:
+ *sector_size = SZ_4K;
+ break;
+ case ZL_FLASH_INFO_SECTOR_64K:
+ *sector_size = SZ_64K;
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+
+ return rc;
+}
+
+/**
+ * zl3073x_flash_block - Download and flash memory block
+ * @zldev: zl3073x device structure
+ * @component: component name
+ * @operation: flash operation to perform
+ * @page: destination flash page
+ * @addr: device memory address to load data
+ * @data: pointer to data to be flashed
+ * @size: size of data
+ * @extack: netlink extack pointer to report errors
+ *
+ * The function downloads the memory block given by the @data pointer and
+ * the size @size and flashes it into internal memory on flash page @page.
+ * The internal flash operation performed by the firmware is specified by
+ * the @operation parameter.
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_flash_block(struct zl3073x_dev *zldev, const char *component,
+ u32 operation, u32 page, u32 addr, const void *data,
+ size_t size, struct netlink_ext_ack *extack)
+{
+ int rc;
+
+ /* Download block to device memory */
+ rc = zl3073x_flash_download(zldev, component, addr, data, size, extack);
+ if (rc)
+ return rc;
+
+ /* Set address to flash from */
+ rc = zl3073x_write_u32(zldev, ZL_REG_IMAGE_START_ADDR, addr);
+ if (rc)
+ return rc;
+
+ /* Set size of block to flash */
+ rc = zl3073x_write_u32(zldev, ZL_REG_IMAGE_SIZE, size);
+ if (rc)
+ return rc;
+
+ /* Set destination page to flash */
+ rc = zl3073x_write_u32(zldev, ZL_REG_FLASH_INDEX_WRITE, page);
+ if (rc)
+ return rc;
+
+ /* Set filling pattern */
+ rc = zl3073x_write_u32(zldev, ZL_REG_FILL_PATTERN, U32_MAX);
+ if (rc)
+ return rc;
+
+ zl3073x_devlink_flash_notify(zldev, "Flashing image", component, 0,
+ size);
+
+ dev_dbg(zldev->dev, "Flashing %zu bytes to page %u\n", size, page);
+
+ /* Execute sectors flash operation */
+ rc = zl3073x_flash_cmd_wait(zldev, operation, extack);
+ if (rc)
+ return rc;
+
+ zl3073x_devlink_flash_notify(zldev, "Flashing image", component, size,
+ size);
+
+ return 0;
+}
+
+/**
+ * zl3073x_flash_sectors - Flash sectors
+ * @zldev: zl3073x device structure
+ * @component: component name
+ * @page: destination flash page
+ * @addr: device memory address to load data
+ * @data: pointer to data to be flashed
+ * @size: size of data
+ * @extack: netlink extack pointer to report errors
+ *
+ * The function flashes given @data with size of @size to the internal flash
+ * memory block starting from page @page. The function uses sector flash
+ * method and has to take into account the flash sector size reported by
+ * flashing utility. Input data are spliced into blocks according this
+ * sector size and each block is flashed separately.
+ *
+ * Return: 0 on success, <0 on error
+ */
+int zl3073x_flash_sectors(struct zl3073x_dev *zldev, const char *component,
+ u32 page, u32 addr, const void *data, size_t size,
+ struct netlink_ext_ack *extack)
+{
+#define ZL_FLASH_MAX_BLOCK_SIZE 0x0001E000
+#define ZL_FLASH_PAGE_SIZE 256
+ size_t max_block_size, block_size, sector_size;
+ const void *ptr, *end;
+ int rc;
+
+ /* Get flash sector size */
+ rc = zl3073x_flash_get_sector_size(zldev, &sector_size);
+ if (rc) {
+ ZL_FLASH_ERR_MSG(extack, "Failed to get flash sector size");
+ return rc;
+ }
+
+ /* Determine max block size depending on sector size */
+ max_block_size = ALIGN_DOWN(ZL_FLASH_MAX_BLOCK_SIZE, sector_size);
+
+ for (ptr = data, end = data + size; ptr < end; ptr += block_size) {
+ char comp_str[32];
+
+ block_size = min_t(size_t, max_block_size, end - ptr);
+
+ /* Add suffix '-partN' if the requested component size is
+ * greater than max_block_size.
+ */
+ if (max_block_size < size)
+ snprintf(comp_str, sizeof(comp_str), "%s-part%zu",
+ component, (ptr - data) / max_block_size + 1);
+ else
+ strscpy(comp_str, component);
+
+ /* Flash the memory block */
+ rc = zl3073x_flash_block(zldev, comp_str,
+ ZL_WRITE_FLASH_OP_SECTORS, page, addr,
+ ptr, block_size, extack);
+ if (rc)
+ goto finish;
+
+ /* Move to next page */
+ page += block_size / ZL_FLASH_PAGE_SIZE;
+ }
+
+finish:
+ zl3073x_devlink_flash_notify(zldev,
+ rc ? "Flashing failed" : "Flashing done",
+ component, 0, 0);
+
+ return rc;
+}
+
+/**
+ * zl3073x_flash_page - Flash page
+ * @zldev: zl3073x device structure
+ * @component: component name
+ * @page: destination flash page
+ * @addr: device memory address to load data
+ * @data: pointer to data to be flashed
+ * @size: size of data
+ * @extack: netlink extack pointer to report errors
+ *
+ * The function flashes given @data with size of @size to the internal flash
+ * memory block starting with page @page.
+ *
+ * Return: 0 on success, <0 on error
+ */
+int zl3073x_flash_page(struct zl3073x_dev *zldev, const char *component,
+ u32 page, u32 addr, const void *data, size_t size,
+ struct netlink_ext_ack *extack)
+{
+ int rc;
+
+ /* Flash the memory block */
+ rc = zl3073x_flash_block(zldev, component, ZL_WRITE_FLASH_OP_PAGE, page,
+ addr, data, size, extack);
+
+ zl3073x_devlink_flash_notify(zldev,
+ rc ? "Flashing failed" : "Flashing done",
+ component, 0, 0);
+
+ return rc;
+}
+
+/**
+ * zl3073x_flash_page_copy - Copy flash page
+ * @zldev: zl3073x device structure
+ * @component: component name
+ * @src_page: source page to copy
+ * @dst_page: destination page
+ * @extack: netlink extack pointer to report errors
+ *
+ * The function copies one flash page specified by @src_page into the flash
+ * page specified by @dst_page.
+ *
+ * Return: 0 on success, <0 on error
+ */
+int zl3073x_flash_page_copy(struct zl3073x_dev *zldev, const char *component,
+ u32 src_page, u32 dst_page,
+ struct netlink_ext_ack *extack)
+{
+ int rc;
+
+ /* Set source page to be copied */
+ rc = zl3073x_write_u32(zldev, ZL_REG_FLASH_INDEX_READ, src_page);
+ if (rc)
+ return rc;
+
+ /* Set destination page for the copy */
+ rc = zl3073x_write_u32(zldev, ZL_REG_FLASH_INDEX_WRITE, dst_page);
+ if (rc)
+ return rc;
+
+ /* Perform copy operation */
+ rc = zl3073x_flash_cmd_wait(zldev, ZL_WRITE_FLASH_OP_COPY_PAGE, extack);
+ if (rc)
+ ZL_FLASH_ERR_MSG(extack, "Failed to copy page %u to page %u",
+ src_page, dst_page);
+
+ return rc;
+}
+
+/**
+ * zl3073x_flash_mode_verify - Check flash utility
+ * @zldev: zl3073x device structure
+ *
+ * Return: 0 if the flash utility is ready, <0 on error
+ */
+static int
+zl3073x_flash_mode_verify(struct zl3073x_dev *zldev)
+{
+ u8 family, release;
+ u32 hash;
+ int rc;
+
+ rc = zl3073x_read_u32(zldev, ZL_REG_FLASH_HASH, &hash);
+ if (rc)
+ return rc;
+
+ rc = zl3073x_read_u8(zldev, ZL_REG_FLASH_FAMILY, &family);
+ if (rc)
+ return rc;
+
+ rc = zl3073x_read_u8(zldev, ZL_REG_FLASH_RELEASE, &release);
+ if (rc)
+ return rc;
+
+ dev_dbg(zldev->dev,
+ "Flash utility check: hash 0x%08x, fam 0x%02x, rel 0x%02x\n",
+ hash, family, release);
+
+ /* Return success for correct family */
+ return (family == 0x21) ? 0 : -ENODEV;
+}
+
+static int
+zl3073x_flash_host_ctrl_enable(struct zl3073x_dev *zldev)
+{
+ u8 host_ctrl;
+ int rc;
+
+ /* Enable host control */
+ rc = zl3073x_read_u8(zldev, ZL_REG_HOST_CONTROL, &host_ctrl);
+ if (rc)
+ return rc;
+
+ host_ctrl |= ZL_HOST_CONTROL_ENABLE;
+
+ return zl3073x_write_u8(zldev, ZL_REG_HOST_CONTROL, host_ctrl);
+}
+
+/**
+ * zl3073x_flash_mode_enter - Switch the device to flash mode
+ * @zldev: zl3073x device structure
+ * @util_ptr: buffer with flash utility
+ * @util_size: size of buffer with flash utility
+ * @extack: netlink extack pointer to report errors
+ *
+ * The function prepares and switches the device into flash mode.
+ *
+ * The procedure:
+ * 1) Stop device CPU by specific HW register sequence
+ * 2) Download flash utility to device memory
+ * 3) Resume device CPU by specific HW register sequence
+ * 4) Check communication with flash utility
+ * 5) Enable host control necessary to access flash API
+ * 6) Check for potential error detected by the utility
+ *
+ * The API provided by normal firmware is not available in flash mode
+ * so the caller has to ensure that this API is not used in this mode.
+ *
+ * After performing flash operation the caller should call
+ * @zl3073x_flash_mode_leave to return back to normal operation.
+ *
+ * Return: 0 on success, <0 on error.
+ */
+int zl3073x_flash_mode_enter(struct zl3073x_dev *zldev, const void *util_ptr,
+ size_t util_size, struct netlink_ext_ack *extack)
+{
+ /* Sequence to be written prior utility download */
+ static const struct zl3073x_hwreg_seq_item pre_seq[] = {
+ HWREG_SEQ_ITEM(0x80000400, 1, BIT(0), 0),
+ HWREG_SEQ_ITEM(0x80206340, 1, BIT(4), 0),
+ HWREG_SEQ_ITEM(0x10000000, 1, BIT(2), 0),
+ HWREG_SEQ_ITEM(0x10000024, 0x00000001, U32_MAX, 0),
+ HWREG_SEQ_ITEM(0x10000020, 0x00000001, U32_MAX, 0),
+ HWREG_SEQ_ITEM(0x10000000, 1, BIT(10), 1000),
+ };
+ /* Sequence to be written after utility download */
+ static const struct zl3073x_hwreg_seq_item post_seq[] = {
+ HWREG_SEQ_ITEM(0x10400004, 0x000000C0, U32_MAX, 0),
+ HWREG_SEQ_ITEM(0x10400008, 0x00000000, U32_MAX, 0),
+ HWREG_SEQ_ITEM(0x10400010, 0x20000000, U32_MAX, 0),
+ HWREG_SEQ_ITEM(0x10400014, 0x20000004, U32_MAX, 0),
+ HWREG_SEQ_ITEM(0x10000000, 1, GENMASK(10, 9), 0),
+ HWREG_SEQ_ITEM(0x10000020, 0x00000000, U32_MAX, 0),
+ HWREG_SEQ_ITEM(0x10000000, 0, BIT(0), 1000),
+ };
+ int rc;
+
+ zl3073x_devlink_flash_notify(zldev, "Prepare flash mode", "utility",
+ 0, 0);
+
+ /* Execure pre-load sequence */
+ rc = zl3073x_write_hwreg_seq(zldev, pre_seq, ARRAY_SIZE(pre_seq));
+ if (rc) {
+ ZL_FLASH_ERR_MSG(extack, "cannot execute pre-load sequence");
+ goto error;
+ }
+
+ /* Download utility image to device memory */
+ rc = zl3073x_flash_download(zldev, "utility", 0x20000000, util_ptr,
+ util_size, extack);
+ if (rc) {
+ ZL_FLASH_ERR_MSG(extack, "cannot download flash utility");
+ goto error;
+ }
+
+ /* Execute post-load sequence */
+ rc = zl3073x_write_hwreg_seq(zldev, post_seq, ARRAY_SIZE(post_seq));
+ if (rc) {
+ ZL_FLASH_ERR_MSG(extack, "cannot execute post-load sequence");
+ goto error;
+ }
+
+ /* Check that utility identifies itself correctly */
+ rc = zl3073x_flash_mode_verify(zldev);
+ if (rc) {
+ ZL_FLASH_ERR_MSG(extack, "flash utility check failed");
+ goto error;
+ }
+
+ /* Enable host control */
+ rc = zl3073x_flash_host_ctrl_enable(zldev);
+ if (rc) {
+ ZL_FLASH_ERR_MSG(extack, "cannot enable host control");
+ goto error;
+ }
+
+ zl3073x_devlink_flash_notify(zldev, "Flash mode enabled", "utility",
+ 0, 0);
+
+ return 0;
+
+error:
+ zl3073x_flash_mode_leave(zldev, extack);
+
+ return rc;
+}
+
+/**
+ * zl3073x_flash_mode_leave - Leave flash mode
+ * @zldev: zl3073x device structure
+ * @extack: netlink extack pointer to report errors
+ *
+ * The function instructs the device to leave the flash mode and
+ * to return back to normal operation.
+ *
+ * The procedure:
+ * 1) Set reset flag
+ * 2) Reset the device CPU by specific HW register sequence
+ * 3) Wait for the device to be ready
+ * 4) Check the reset flag was cleared
+ *
+ * Return: 0 on success, <0 on error
+ */
+int zl3073x_flash_mode_leave(struct zl3073x_dev *zldev,
+ struct netlink_ext_ack *extack)
+{
+ /* Sequence to be written after flash */
+ static const struct zl3073x_hwreg_seq_item fw_reset_seq[] = {
+ HWREG_SEQ_ITEM(0x80000404, 1, BIT(0), 0),
+ HWREG_SEQ_ITEM(0x80000410, 1, BIT(0), 0),
+ };
+ u8 reset_status;
+ int rc;
+
+ zl3073x_devlink_flash_notify(zldev, "Leaving flash mode", "utility",
+ 0, 0);
+
+ /* Read reset status register */
+ rc = zl3073x_read_u8(zldev, ZL_REG_RESET_STATUS, &reset_status);
+ if (rc)
+ return rc;
+
+ /* Set reset bit */
+ reset_status |= ZL_REG_RESET_STATUS_RESET;
+
+ /* Update reset status register */
+ rc = zl3073x_write_u8(zldev, ZL_REG_RESET_STATUS, reset_status);
+ if (rc)
+ return rc;
+
+ /* We do not check the return value here as the sequence resets
+ * the device CPU and the last write always return an error.
+ */
+ zl3073x_write_hwreg_seq(zldev, fw_reset_seq, ARRAY_SIZE(fw_reset_seq));
+
+ /* Wait for the device to be ready */
+ msleep(500);
+
+ /* Read again the reset status register */
+ rc = zl3073x_read_u8(zldev, ZL_REG_RESET_STATUS, &reset_status);
+ if (rc)
+ return rc;
+
+ /* Check the reset bit was cleared */
+ if (reset_status & ZL_REG_RESET_STATUS_RESET) {
+ dev_err(zldev->dev,
+ "Reset not confirmed after switch to normal mode\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/dpll/zl3073x/flash.h b/drivers/dpll/zl3073x/flash.h
new file mode 100644
index 000000000000..effe1b16b359
--- /dev/null
+++ b/drivers/dpll/zl3073x/flash.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef __ZL3073X_FLASH_H
+#define __ZL3073X_FLASH_H
+
+#include <linux/types.h>
+
+struct netlink_ext_ack;
+struct zl3073x_dev;
+
+int zl3073x_flash_mode_enter(struct zl3073x_dev *zldev, const void *util_ptr,
+ size_t util_size, struct netlink_ext_ack *extack);
+
+int zl3073x_flash_mode_leave(struct zl3073x_dev *zldev,
+ struct netlink_ext_ack *extack);
+
+int zl3073x_flash_page(struct zl3073x_dev *zldev, const char *component,
+ u32 page, u32 addr, const void *data, size_t size,
+ struct netlink_ext_ack *extack);
+
+int zl3073x_flash_page_copy(struct zl3073x_dev *zldev, const char *component,
+ u32 src_page, u32 dst_page,
+ struct netlink_ext_ack *extack);
+
+int zl3073x_flash_sectors(struct zl3073x_dev *zldev, const char *component,
+ u32 page, u32 addr, const void *data, size_t size,
+ struct netlink_ext_ack *extack);
+
+#endif /* __ZL3073X_FLASH_H */
diff --git a/drivers/dpll/zl3073x/fw.c b/drivers/dpll/zl3073x/fw.c
new file mode 100644
index 000000000000..d5418ff74886
--- /dev/null
+++ b/drivers/dpll/zl3073x/fw.c
@@ -0,0 +1,419 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/array_size.h>
+#include <linux/build_bug.h>
+#include <linux/dev_printk.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/netlink.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include "core.h"
+#include "flash.h"
+#include "fw.h"
+
+#define ZL3073X_FW_ERR_PFX "FW load failed: "
+#define ZL3073X_FW_ERR_MSG(_extack, _msg, ...) \
+ NL_SET_ERR_MSG_FMT_MOD((_extack), ZL3073X_FW_ERR_PFX _msg, \
+ ## __VA_ARGS__)
+
+enum zl3073x_flash_type {
+ ZL3073X_FLASH_TYPE_NONE = 0,
+ ZL3073X_FLASH_TYPE_SECTORS,
+ ZL3073X_FLASH_TYPE_PAGE,
+ ZL3073X_FLASH_TYPE_PAGE_AND_COPY,
+};
+
+struct zl3073x_fw_component_info {
+ const char *name;
+ size_t max_size;
+ enum zl3073x_flash_type flash_type;
+ u32 load_addr;
+ u32 dest_page;
+ u32 copy_page;
+};
+
+static const struct zl3073x_fw_component_info component_info[] = {
+ [ZL_FW_COMPONENT_UTIL] = {
+ .name = "utility",
+ .max_size = 0x2300,
+ .load_addr = 0x20000000,
+ .flash_type = ZL3073X_FLASH_TYPE_NONE,
+ },
+ [ZL_FW_COMPONENT_FW1] = {
+ .name = "firmware1",
+ .max_size = 0x35000,
+ .load_addr = 0x20002000,
+ .flash_type = ZL3073X_FLASH_TYPE_SECTORS,
+ .dest_page = 0x020,
+ },
+ [ZL_FW_COMPONENT_FW2] = {
+ .name = "firmware2",
+ .max_size = 0x0040,
+ .load_addr = 0x20000000,
+ .flash_type = ZL3073X_FLASH_TYPE_PAGE_AND_COPY,
+ .dest_page = 0x3e0,
+ .copy_page = 0x000,
+ },
+ [ZL_FW_COMPONENT_FW3] = {
+ .name = "firmware3",
+ .max_size = 0x0248,
+ .load_addr = 0x20000400,
+ .flash_type = ZL3073X_FLASH_TYPE_PAGE_AND_COPY,
+ .dest_page = 0x3e4,
+ .copy_page = 0x004,
+ },
+ [ZL_FW_COMPONENT_CFG0] = {
+ .name = "config0",
+ .max_size = 0x1000,
+ .load_addr = 0x20000000,
+ .flash_type = ZL3073X_FLASH_TYPE_PAGE,
+ .dest_page = 0x3d0,
+ },
+ [ZL_FW_COMPONENT_CFG1] = {
+ .name = "config1",
+ .max_size = 0x1000,
+ .load_addr = 0x20000000,
+ .flash_type = ZL3073X_FLASH_TYPE_PAGE,
+ .dest_page = 0x3c0,
+ },
+ [ZL_FW_COMPONENT_CFG2] = {
+ .name = "config2",
+ .max_size = 0x1000,
+ .load_addr = 0x20000000,
+ .flash_type = ZL3073X_FLASH_TYPE_PAGE,
+ .dest_page = 0x3b0,
+ },
+ [ZL_FW_COMPONENT_CFG3] = {
+ .name = "config3",
+ .max_size = 0x1000,
+ .load_addr = 0x20000000,
+ .flash_type = ZL3073X_FLASH_TYPE_PAGE,
+ .dest_page = 0x3a0,
+ },
+ [ZL_FW_COMPONENT_CFG4] = {
+ .name = "config4",
+ .max_size = 0x1000,
+ .load_addr = 0x20000000,
+ .flash_type = ZL3073X_FLASH_TYPE_PAGE,
+ .dest_page = 0x390,
+ },
+ [ZL_FW_COMPONENT_CFG5] = {
+ .name = "config5",
+ .max_size = 0x1000,
+ .load_addr = 0x20000000,
+ .flash_type = ZL3073X_FLASH_TYPE_PAGE,
+ .dest_page = 0x380,
+ },
+ [ZL_FW_COMPONENT_CFG6] = {
+ .name = "config6",
+ .max_size = 0x1000,
+ .load_addr = 0x20000000,
+ .flash_type = ZL3073X_FLASH_TYPE_PAGE,
+ .dest_page = 0x370,
+ },
+};
+
+/* Sanity check */
+static_assert(ARRAY_SIZE(component_info) == ZL_FW_NUM_COMPONENTS);
+
+/**
+ * zl3073x_fw_component_alloc - Alloc structure to hold firmware component
+ * @size: size of buffer to store data
+ *
+ * Return: pointer to allocated component structure or NULL on error.
+ */
+static struct zl3073x_fw_component *
+zl3073x_fw_component_alloc(size_t size)
+{
+ struct zl3073x_fw_component *comp;
+
+ comp = kzalloc(sizeof(*comp), GFP_KERNEL);
+ if (!comp)
+ return NULL;
+
+ comp->size = size;
+ comp->data = kzalloc(size, GFP_KERNEL);
+ if (!comp->data) {
+ kfree(comp);
+ return NULL;
+ }
+
+ return comp;
+}
+
+/**
+ * zl3073x_fw_component_free - Free allocated component structure
+ * @comp: pointer to allocated component
+ */
+static void
+zl3073x_fw_component_free(struct zl3073x_fw_component *comp)
+{
+ if (comp)
+ kfree(comp->data);
+
+ kfree(comp);
+}
+
+/**
+ * zl3073x_fw_component_id_get - Get ID for firmware component name
+ * @name: input firmware component name
+ *
+ * Return:
+ * - ZL3073X_FW_COMPONENT_* ID for known component name
+ * - ZL3073X_FW_COMPONENT_INVALID if the given name is unknown
+ */
+static enum zl3073x_fw_component_id
+zl3073x_fw_component_id_get(const char *name)
+{
+ enum zl3073x_fw_component_id id;
+
+ for (id = 0; id < ZL_FW_NUM_COMPONENTS; id++)
+ if (!strcasecmp(name, component_info[id].name))
+ return id;
+
+ return ZL_FW_COMPONENT_INVALID;
+}
+
+/**
+ * zl3073x_fw_component_load - Load component from firmware source
+ * @zldev: zl3073x device structure
+ * @pcomp: pointer to loaded component
+ * @psrc: data pointer to load component from
+ * @psize: remaining bytes in buffer
+ * @extack: netlink extack pointer to report errors
+ *
+ * The function allocates single firmware component and loads the data from
+ * the buffer specified by @psrc and @psize. Pointer to allocated component
+ * is stored in output @pcomp. Source data pointer @psrc and remaining bytes
+ * @psize are updated accordingly.
+ *
+ * Return:
+ * * 1 when component was allocated and loaded
+ * * 0 when there is no component to load
+ * * <0 on error
+ */
+static ssize_t
+zl3073x_fw_component_load(struct zl3073x_dev *zldev,
+ struct zl3073x_fw_component **pcomp,
+ const char **psrc, size_t *psize,
+ struct netlink_ext_ack *extack)
+{
+ const struct zl3073x_fw_component_info *info;
+ struct zl3073x_fw_component *comp = NULL;
+ struct device *dev = zldev->dev;
+ enum zl3073x_fw_component_id id;
+ char buf[32], name[16];
+ u32 count, size, *dest;
+ int pos, rc;
+
+ /* Fetch image name and size from input */
+ strscpy(buf, *psrc, min(sizeof(buf), *psize));
+ rc = sscanf(buf, "%15s %u %n", name, &count, &pos);
+ if (!rc) {
+ /* No more data */
+ return 0;
+ } else if (rc == 1 || count > U32_MAX / sizeof(u32)) {
+ ZL3073X_FW_ERR_MSG(extack, "invalid component size");
+ return -EINVAL;
+ }
+ *psrc += pos;
+ *psize -= pos;
+
+ dev_dbg(dev, "Firmware component '%s' found\n", name);
+
+ id = zl3073x_fw_component_id_get(name);
+ if (id == ZL_FW_COMPONENT_INVALID) {
+ ZL3073X_FW_ERR_MSG(extack, "unknown component type '%s'", name);
+ return -EINVAL;
+ }
+
+ info = &component_info[id];
+ size = count * sizeof(u32); /* get size in bytes */
+
+ /* Check image size validity */
+ if (size > component_info[id].max_size) {
+ ZL3073X_FW_ERR_MSG(extack,
+ "[%s] component is too big (%u bytes)\n",
+ info->name, size);
+ return -EINVAL;
+ }
+
+ dev_dbg(dev, "Indicated component image size: %u bytes\n", size);
+
+ /* Alloc component */
+ comp = zl3073x_fw_component_alloc(size);
+ if (!comp) {
+ ZL3073X_FW_ERR_MSG(extack, "failed to alloc memory");
+ return -ENOMEM;
+ }
+ comp->id = id;
+
+ /* Load component data from firmware source */
+ for (dest = comp->data; count; count--, dest++) {
+ strscpy(buf, *psrc, min(sizeof(buf), *psize));
+ rc = sscanf(buf, "%x %n", dest, &pos);
+ if (!rc)
+ goto err_data;
+
+ *psrc += pos;
+ *psize -= pos;
+ }
+
+ *pcomp = comp;
+
+ return 1;
+
+err_data:
+ ZL3073X_FW_ERR_MSG(extack, "[%s] invalid or missing data", info->name);
+
+ zl3073x_fw_component_free(comp);
+
+ return -ENODATA;
+}
+
+/**
+ * zl3073x_fw_free - Free allocated firmware
+ * @fw: firmware pointer
+ *
+ * The function frees existing firmware allocated by @zl3073x_fw_load.
+ */
+void zl3073x_fw_free(struct zl3073x_fw *fw)
+{
+ size_t i;
+
+ if (!fw)
+ return;
+
+ for (i = 0; i < ZL_FW_NUM_COMPONENTS; i++)
+ zl3073x_fw_component_free(fw->component[i]);
+
+ kfree(fw);
+}
+
+/**
+ * zl3073x_fw_load - Load all components from source
+ * @zldev: zl3073x device structure
+ * @data: source buffer pointer
+ * @size: size of source buffer
+ * @extack: netlink extack pointer to report errors
+ *
+ * The functions allocate firmware structure and loads all components from
+ * the given buffer specified by @data and @size.
+ *
+ * Return: pointer to firmware on success, error pointer on error
+ */
+struct zl3073x_fw *zl3073x_fw_load(struct zl3073x_dev *zldev, const char *data,
+ size_t size, struct netlink_ext_ack *extack)
+{
+ struct zl3073x_fw_component *comp;
+ enum zl3073x_fw_component_id id;
+ struct zl3073x_fw *fw;
+ ssize_t rc;
+
+ /* Allocate firmware structure */
+ fw = kzalloc(sizeof(*fw), GFP_KERNEL);
+ if (!fw)
+ return ERR_PTR(-ENOMEM);
+
+ do {
+ /* Load single component */
+ rc = zl3073x_fw_component_load(zldev, &comp, &data, &size,
+ extack);
+ if (rc <= 0)
+ /* Everything was read or error occurred */
+ break;
+
+ id = comp->id;
+
+ /* Report error if the given component is present twice
+ * or more.
+ */
+ if (fw->component[id]) {
+ ZL3073X_FW_ERR_MSG(extack,
+ "duplicate component '%s' detected",
+ component_info[id].name);
+ zl3073x_fw_component_free(comp);
+ rc = -EINVAL;
+ break;
+ }
+
+ fw->component[id] = comp;
+ } while (true);
+
+ if (rc) {
+ /* Free allocated firmware in case of error */
+ zl3073x_fw_free(fw);
+ return ERR_PTR(rc);
+ }
+
+ return fw;
+}
+
+/**
+ * zl3073x_flash_bundle_flash - Flash all components
+ * @zldev: zl3073x device structure
+ * @components: pointer to components array
+ * @extack: netlink extack pointer to report errors
+ *
+ * Returns 0 in case of success or negative number otherwise.
+ */
+static int
+zl3073x_fw_component_flash(struct zl3073x_dev *zldev,
+ struct zl3073x_fw_component *comp,
+ struct netlink_ext_ack *extack)
+{
+ const struct zl3073x_fw_component_info *info;
+ int rc;
+
+ info = &component_info[comp->id];
+
+ switch (info->flash_type) {
+ case ZL3073X_FLASH_TYPE_NONE:
+ /* Non-flashable component - used for utility */
+ return 0;
+ case ZL3073X_FLASH_TYPE_SECTORS:
+ rc = zl3073x_flash_sectors(zldev, info->name, info->dest_page,
+ info->load_addr, comp->data,
+ comp->size, extack);
+ break;
+ case ZL3073X_FLASH_TYPE_PAGE:
+ rc = zl3073x_flash_page(zldev, info->name, info->dest_page,
+ info->load_addr, comp->data, comp->size,
+ extack);
+ break;
+ case ZL3073X_FLASH_TYPE_PAGE_AND_COPY:
+ rc = zl3073x_flash_page(zldev, info->name, info->dest_page,
+ info->load_addr, comp->data, comp->size,
+ extack);
+ if (!rc)
+ rc = zl3073x_flash_page_copy(zldev, info->name,
+ info->dest_page,
+ info->copy_page, extack);
+ break;
+ }
+ if (rc)
+ ZL3073X_FW_ERR_MSG(extack, "Failed to flash component '%s'",
+ info->name);
+
+ return rc;
+}
+
+int zl3073x_fw_flash(struct zl3073x_dev *zldev, struct zl3073x_fw *zlfw,
+ struct netlink_ext_ack *extack)
+{
+ int i, rc = 0;
+
+ for (i = 0; i < ZL_FW_NUM_COMPONENTS; i++) {
+ if (!zlfw->component[i])
+ continue; /* Component is not present */
+
+ rc = zl3073x_fw_component_flash(zldev, zlfw->component[i],
+ extack);
+ if (rc)
+ break;
+ }
+
+ return rc;
+}
diff --git a/drivers/dpll/zl3073x/fw.h b/drivers/dpll/zl3073x/fw.h
new file mode 100644
index 000000000000..fcaa89ab075e
--- /dev/null
+++ b/drivers/dpll/zl3073x/fw.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _ZL3073X_FW_H
+#define _ZL3073X_FW_H
+
+/*
+ * enum zl3073x_fw_component_id - Identifiers for possible flash components
+ */
+enum zl3073x_fw_component_id {
+ ZL_FW_COMPONENT_INVALID = -1,
+ ZL_FW_COMPONENT_UTIL = 0,
+ ZL_FW_COMPONENT_FW1,
+ ZL_FW_COMPONENT_FW2,
+ ZL_FW_COMPONENT_FW3,
+ ZL_FW_COMPONENT_CFG0,
+ ZL_FW_COMPONENT_CFG1,
+ ZL_FW_COMPONENT_CFG2,
+ ZL_FW_COMPONENT_CFG3,
+ ZL_FW_COMPONENT_CFG4,
+ ZL_FW_COMPONENT_CFG5,
+ ZL_FW_COMPONENT_CFG6,
+ ZL_FW_NUM_COMPONENTS
+};
+
+/**
+ * struct zl3073x_fw_component - Firmware component
+ * @id: Flash component ID
+ * @size: Size of the buffer
+ * @data: Pointer to buffer with component data
+ */
+struct zl3073x_fw_component {
+ enum zl3073x_fw_component_id id;
+ size_t size;
+ void *data;
+};
+
+/**
+ * struct zl3073x_fw - Firmware bundle
+ * @component: firmware components array
+ */
+struct zl3073x_fw {
+ struct zl3073x_fw_component *component[ZL_FW_NUM_COMPONENTS];
+};
+
+struct zl3073x_fw *zl3073x_fw_load(struct zl3073x_dev *zldev, const char *data,
+ size_t size, struct netlink_ext_ack *extack);
+void zl3073x_fw_free(struct zl3073x_fw *fw);
+
+int zl3073x_fw_flash(struct zl3073x_dev *zldev, struct zl3073x_fw *zlfw,
+ struct netlink_ext_ack *extack);
+
+#endif /* _ZL3073X_FW_H */
diff --git a/drivers/dpll/zl3073x/i2c.c b/drivers/dpll/zl3073x/i2c.c
new file mode 100644
index 000000000000..7bbfdd4ed867
--- /dev/null
+++ b/drivers/dpll/zl3073x/i2c.c
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/dev_printk.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include "core.h"
+
+static int zl3073x_i2c_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct zl3073x_dev *zldev;
+
+ zldev = zl3073x_devm_alloc(dev);
+ if (IS_ERR(zldev))
+ return PTR_ERR(zldev);
+
+ zldev->regmap = devm_regmap_init_i2c(client, &zl3073x_regmap_config);
+ if (IS_ERR(zldev->regmap))
+ return dev_err_probe(dev, PTR_ERR(zldev->regmap),
+ "Failed to initialize regmap\n");
+
+ return zl3073x_dev_probe(zldev, i2c_get_match_data(client));
+}
+
+static const struct i2c_device_id zl3073x_i2c_id[] = {
+ {
+ .name = "zl30731",
+ .driver_data = (kernel_ulong_t)&zl30731_chip_info,
+ },
+ {
+ .name = "zl30732",
+ .driver_data = (kernel_ulong_t)&zl30732_chip_info,
+ },
+ {
+ .name = "zl30733",
+ .driver_data = (kernel_ulong_t)&zl30733_chip_info,
+ },
+ {
+ .name = "zl30734",
+ .driver_data = (kernel_ulong_t)&zl30734_chip_info,
+ },
+ {
+ .name = "zl30735",
+ .driver_data = (kernel_ulong_t)&zl30735_chip_info,
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(i2c, zl3073x_i2c_id);
+
+static const struct of_device_id zl3073x_i2c_of_match[] = {
+ { .compatible = "microchip,zl30731", .data = &zl30731_chip_info },
+ { .compatible = "microchip,zl30732", .data = &zl30732_chip_info },
+ { .compatible = "microchip,zl30733", .data = &zl30733_chip_info },
+ { .compatible = "microchip,zl30734", .data = &zl30734_chip_info },
+ { .compatible = "microchip,zl30735", .data = &zl30735_chip_info },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, zl3073x_i2c_of_match);
+
+static struct i2c_driver zl3073x_i2c_driver = {
+ .driver = {
+ .name = "zl3073x-i2c",
+ .of_match_table = zl3073x_i2c_of_match,
+ },
+ .probe = zl3073x_i2c_probe,
+ .id_table = zl3073x_i2c_id,
+};
+module_i2c_driver(zl3073x_i2c_driver);
+
+MODULE_AUTHOR("Ivan Vecera <ivecera@redhat.com>");
+MODULE_DESCRIPTION("Microchip ZL3073x I2C driver");
+MODULE_IMPORT_NS("ZL3073X");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dpll/zl3073x/prop.c b/drivers/dpll/zl3073x/prop.c
new file mode 100644
index 000000000000..4cf7e8aefcb3
--- /dev/null
+++ b/drivers/dpll/zl3073x/prop.c
@@ -0,0 +1,358 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/array_size.h>
+#include <linux/dev_printk.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/fwnode.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include "core.h"
+#include "prop.h"
+
+/**
+ * zl3073x_pin_check_freq - verify frequency for given pin
+ * @zldev: pointer to zl3073x device
+ * @dir: pin direction
+ * @id: pin index
+ * @freq: frequency to check
+ *
+ * The function checks the given frequency is valid for the device. For input
+ * pins it checks that the frequency can be factorized using supported base
+ * frequencies. For output pins it checks that the frequency divides connected
+ * synth frequency without remainder.
+ *
+ * Return: true if the frequency is valid, false if not.
+ */
+static bool
+zl3073x_pin_check_freq(struct zl3073x_dev *zldev, enum dpll_pin_direction dir,
+ u8 id, u64 freq)
+{
+ if (freq > U32_MAX)
+ goto err_inv_freq;
+
+ if (dir == DPLL_PIN_DIRECTION_INPUT) {
+ int rc;
+
+ /* Check if the frequency can be factorized */
+ rc = zl3073x_ref_freq_factorize(freq, NULL, NULL);
+ if (rc)
+ goto err_inv_freq;
+ } else {
+ u32 synth_freq;
+ u8 out, synth;
+
+ /* Get output pin synthesizer */
+ out = zl3073x_output_pin_out_get(id);
+ synth = zl3073x_out_synth_get(zldev, out);
+
+ /* Get synth frequency */
+ synth_freq = zl3073x_synth_freq_get(zldev, synth);
+
+ /* Check the frequency divides synth frequency */
+ if (synth_freq % (u32)freq)
+ goto err_inv_freq;
+ }
+
+ return true;
+
+err_inv_freq:
+ dev_warn(zldev->dev,
+ "Unsupported frequency %llu Hz in firmware node\n", freq);
+
+ return false;
+}
+
+/**
+ * zl3073x_prop_pin_package_label_set - get package label for the pin
+ * @zldev: pointer to zl3073x device
+ * @props: pointer to pin properties
+ * @dir: pin direction
+ * @id: pin index
+ *
+ * Generates package label string and stores it into pin properties structure.
+ *
+ * Possible formats:
+ * REF<n> - differential input reference
+ * REF<n>P & REF<n>N - single-ended input reference (P or N pin)
+ * OUT<n> - differential output
+ * OUT<n>P & OUT<n>N - single-ended output (P or N pin)
+ */
+static void
+zl3073x_prop_pin_package_label_set(struct zl3073x_dev *zldev,
+ struct zl3073x_pin_props *props,
+ enum dpll_pin_direction dir, u8 id)
+{
+ const char *prefix, *suffix;
+ bool is_diff;
+
+ if (dir == DPLL_PIN_DIRECTION_INPUT) {
+ u8 ref;
+
+ prefix = "REF";
+ ref = zl3073x_input_pin_ref_get(id);
+ is_diff = zl3073x_ref_is_diff(zldev, ref);
+ } else {
+ u8 out;
+
+ prefix = "OUT";
+ out = zl3073x_output_pin_out_get(id);
+ is_diff = zl3073x_out_is_diff(zldev, out);
+ }
+
+ if (!is_diff)
+ suffix = zl3073x_is_p_pin(id) ? "P" : "N";
+ else
+ suffix = ""; /* No suffix for differential one */
+
+ snprintf(props->package_label, sizeof(props->package_label), "%s%u%s",
+ prefix, id / 2, suffix);
+
+ /* Set package_label pointer in DPLL core properties to generated
+ * string.
+ */
+ props->dpll_props.package_label = props->package_label;
+}
+
+/**
+ * zl3073x_prop_pin_fwnode_get - get fwnode for given pin
+ * @zldev: pointer to zl3073x device
+ * @props: pointer to pin properties
+ * @dir: pin direction
+ * @id: pin index
+ *
+ * Return: 0 on success, -ENOENT if the firmware node does not exist
+ */
+static int
+zl3073x_prop_pin_fwnode_get(struct zl3073x_dev *zldev,
+ struct zl3073x_pin_props *props,
+ enum dpll_pin_direction dir, u8 id)
+{
+ struct fwnode_handle *pins_node, *pin_node;
+ const char *node_name;
+
+ if (dir == DPLL_PIN_DIRECTION_INPUT)
+ node_name = "input-pins";
+ else
+ node_name = "output-pins";
+
+ /* Get node containing input or output pins */
+ pins_node = device_get_named_child_node(zldev->dev, node_name);
+ if (!pins_node) {
+ dev_dbg(zldev->dev, "'%s' sub-node is missing\n", node_name);
+ return -ENOENT;
+ }
+
+ /* Enumerate child pin nodes and find the requested one */
+ fwnode_for_each_child_node(pins_node, pin_node) {
+ u32 reg;
+
+ if (fwnode_property_read_u32(pin_node, "reg", &reg))
+ continue;
+
+ if (id == reg)
+ break;
+ }
+
+ /* Release pin parent node */
+ fwnode_handle_put(pins_node);
+
+ /* Save found node */
+ props->fwnode = pin_node;
+
+ dev_dbg(zldev->dev, "Firmware node for %s %sfound\n",
+ props->package_label, pin_node ? "" : "NOT ");
+
+ return pin_node ? 0 : -ENOENT;
+}
+
+/**
+ * zl3073x_pin_props_get - get pin properties
+ * @zldev: pointer to zl3073x device
+ * @dir: pin direction
+ * @index: pin index
+ *
+ * The function looks for firmware node for the given pin if it is provided
+ * by the system firmware (DT or ACPI), allocates pin properties structure,
+ * generates package label string according pin type and optionally fetches
+ * board label, connection type, supported frequencies and esync capability
+ * from the firmware node if it does exist.
+ *
+ * Pointer that is returned by this function should be freed using
+ * @zl3073x_pin_props_put().
+ *
+ * Return:
+ * * pointer to allocated pin properties structure on success
+ * * error pointer in case of error
+ */
+struct zl3073x_pin_props *zl3073x_pin_props_get(struct zl3073x_dev *zldev,
+ enum dpll_pin_direction dir,
+ u8 index)
+{
+ struct dpll_pin_frequency *ranges;
+ struct zl3073x_pin_props *props;
+ int i, j, num_freqs, rc;
+ const char *type;
+ u64 *freqs;
+
+ props = kzalloc(sizeof(*props), GFP_KERNEL);
+ if (!props)
+ return ERR_PTR(-ENOMEM);
+
+ /* Set default pin type and capabilities */
+ if (dir == DPLL_PIN_DIRECTION_INPUT) {
+ props->dpll_props.type = DPLL_PIN_TYPE_EXT;
+ props->dpll_props.capabilities =
+ DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE |
+ DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE;
+ } else {
+ props->dpll_props.type = DPLL_PIN_TYPE_GNSS;
+ }
+
+ props->dpll_props.phase_range.min = S32_MIN;
+ props->dpll_props.phase_range.max = S32_MAX;
+
+ zl3073x_prop_pin_package_label_set(zldev, props, dir, index);
+
+ /* Get firmware node for the given pin */
+ rc = zl3073x_prop_pin_fwnode_get(zldev, props, dir, index);
+ if (rc)
+ return props; /* Return if it does not exist */
+
+ /* Look for label property and store the value as board label */
+ fwnode_property_read_string(props->fwnode, "label",
+ &props->dpll_props.board_label);
+
+ /* Look for pin type property and translate its value to DPLL
+ * pin type enum if it is present.
+ */
+ if (!fwnode_property_read_string(props->fwnode, "connection-type",
+ &type)) {
+ if (!strcmp(type, "ext"))
+ props->dpll_props.type = DPLL_PIN_TYPE_EXT;
+ else if (!strcmp(type, "gnss"))
+ props->dpll_props.type = DPLL_PIN_TYPE_GNSS;
+ else if (!strcmp(type, "int"))
+ props->dpll_props.type = DPLL_PIN_TYPE_INT_OSCILLATOR;
+ else if (!strcmp(type, "synce"))
+ props->dpll_props.type = DPLL_PIN_TYPE_SYNCE_ETH_PORT;
+ else
+ dev_warn(zldev->dev,
+ "Unknown or unsupported pin type '%s'\n",
+ type);
+ }
+
+ /* Check if the pin supports embedded sync control */
+ props->esync_control = fwnode_property_read_bool(props->fwnode,
+ "esync-control");
+
+ /* Read supported frequencies property if it is specified */
+ num_freqs = fwnode_property_count_u64(props->fwnode,
+ "supported-frequencies-hz");
+ if (num_freqs <= 0)
+ /* Return if the property does not exist or number is 0 */
+ return props;
+
+ /* The firmware node specifies list of supported frequencies while
+ * DPLL core pin properties requires list of frequency ranges.
+ * So read the frequency list into temporary array.
+ */
+ freqs = kcalloc(num_freqs, sizeof(*freqs), GFP_KERNEL);
+ if (!freqs) {
+ rc = -ENOMEM;
+ goto err_alloc_freqs;
+ }
+
+ /* Read frequencies list from firmware node */
+ fwnode_property_read_u64_array(props->fwnode,
+ "supported-frequencies-hz", freqs,
+ num_freqs);
+
+ /* Allocate frequency ranges list and fill it */
+ ranges = kcalloc(num_freqs, sizeof(*ranges), GFP_KERNEL);
+ if (!ranges) {
+ rc = -ENOMEM;
+ goto err_alloc_ranges;
+ }
+
+ /* Convert list of frequencies to list of frequency ranges but
+ * filter-out frequencies that are not representable by device
+ */
+ for (i = 0, j = 0; i < num_freqs; i++) {
+ struct dpll_pin_frequency freq = DPLL_PIN_FREQUENCY(freqs[i]);
+
+ if (zl3073x_pin_check_freq(zldev, dir, index, freqs[i])) {
+ ranges[j] = freq;
+ j++;
+ }
+ }
+
+ /* Save number of freq ranges and pointer to them into pin properties */
+ props->dpll_props.freq_supported = ranges;
+ props->dpll_props.freq_supported_num = j;
+
+ /* Free temporary array */
+ kfree(freqs);
+
+ return props;
+
+err_alloc_ranges:
+ kfree(freqs);
+err_alloc_freqs:
+ fwnode_handle_put(props->fwnode);
+ kfree(props);
+
+ return ERR_PTR(rc);
+}
+
+/**
+ * zl3073x_pin_props_put - release pin properties
+ * @props: pin properties to free
+ *
+ * The function deallocates given pin properties structure.
+ */
+void zl3073x_pin_props_put(struct zl3073x_pin_props *props)
+{
+ /* Free supported frequency ranges list if it is present */
+ kfree(props->dpll_props.freq_supported);
+
+ /* Put firmware handle if it is present */
+ if (props->fwnode)
+ fwnode_handle_put(props->fwnode);
+
+ kfree(props);
+}
+
+/**
+ * zl3073x_prop_dpll_type_get - get DPLL channel type
+ * @zldev: pointer to zl3073x device
+ * @index: DPLL channel index
+ *
+ * Return: DPLL type for given DPLL channel
+ */
+enum dpll_type
+zl3073x_prop_dpll_type_get(struct zl3073x_dev *zldev, u8 index)
+{
+ const char *types[ZL3073X_MAX_CHANNELS];
+ int count;
+
+ /* Read dpll types property from firmware */
+ count = device_property_read_string_array(zldev->dev, "dpll-types",
+ types, ARRAY_SIZE(types));
+
+ /* Return default if property or entry for given channel is missing */
+ if (index >= count)
+ return DPLL_TYPE_PPS;
+
+ if (!strcmp(types[index], "pps"))
+ return DPLL_TYPE_PPS;
+ else if (!strcmp(types[index], "eec"))
+ return DPLL_TYPE_EEC;
+
+ dev_info(zldev->dev, "Unknown DPLL type '%s', using default\n",
+ types[index]);
+
+ return DPLL_TYPE_PPS; /* Default */
+}
diff --git a/drivers/dpll/zl3073x/prop.h b/drivers/dpll/zl3073x/prop.h
new file mode 100644
index 000000000000..721a18f05938
--- /dev/null
+++ b/drivers/dpll/zl3073x/prop.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _ZL3073X_PROP_H
+#define _ZL3073X_PROP_H
+
+#include <linux/dpll.h>
+
+#include "core.h"
+
+struct fwnode_handle;
+
+/**
+ * struct zl3073x_pin_props - pin properties
+ * @fwnode: pin firmware node
+ * @dpll_props: DPLL core pin properties
+ * @package_label: pin package label
+ * @esync_control: embedded sync support
+ */
+struct zl3073x_pin_props {
+ struct fwnode_handle *fwnode;
+ struct dpll_pin_properties dpll_props;
+ char package_label[8];
+ bool esync_control;
+};
+
+enum dpll_type zl3073x_prop_dpll_type_get(struct zl3073x_dev *zldev, u8 index);
+
+struct zl3073x_pin_props *zl3073x_pin_props_get(struct zl3073x_dev *zldev,
+ enum dpll_pin_direction dir,
+ u8 index);
+
+void zl3073x_pin_props_put(struct zl3073x_pin_props *props);
+
+#endif /* _ZL3073X_PROP_H */
diff --git a/drivers/dpll/zl3073x/regs.h b/drivers/dpll/zl3073x/regs.h
new file mode 100644
index 000000000000..19a25325bd9c
--- /dev/null
+++ b/drivers/dpll/zl3073x/regs.h
@@ -0,0 +1,314 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _ZL3073X_REGS_H
+#define _ZL3073X_REGS_H
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+
+/*
+ * Register address structure:
+ * ===========================
+ * 25 19 18 16 15 7 6 0
+ * +------------------------------------------+
+ * | max_offset | size | page | page_offset |
+ * +------------------------------------------+
+ *
+ * page_offset ... <0x00..0x7F>
+ * page .......... HW page number
+ * size .......... register byte size (1, 2, 4 or 6)
+ * max_offset .... maximal offset for indexed registers
+ * (for non-indexed regs max_offset == page_offset)
+ */
+
+#define ZL_REG_OFFSET_MASK GENMASK(6, 0)
+#define ZL_REG_PAGE_MASK GENMASK(15, 7)
+#define ZL_REG_SIZE_MASK GENMASK(18, 16)
+#define ZL_REG_MAX_OFFSET_MASK GENMASK(25, 19)
+#define ZL_REG_ADDR_MASK GENMASK(15, 0)
+
+#define ZL_REG_OFFSET(_reg) FIELD_GET(ZL_REG_OFFSET_MASK, _reg)
+#define ZL_REG_PAGE(_reg) FIELD_GET(ZL_REG_PAGE_MASK, _reg)
+#define ZL_REG_MAX_OFFSET(_reg) FIELD_GET(ZL_REG_MAX_OFFSET_MASK, _reg)
+#define ZL_REG_SIZE(_reg) FIELD_GET(ZL_REG_SIZE_MASK, _reg)
+#define ZL_REG_ADDR(_reg) FIELD_GET(ZL_REG_ADDR_MASK, _reg)
+
+/**
+ * ZL_REG_IDX - define indexed register
+ * @_idx: index of register to access
+ * @_page: register page
+ * @_offset: register offset in page
+ * @_size: register byte size (1, 2, 4 or 6)
+ * @_items: number of register indices
+ * @_stride: stride between items in bytes
+ *
+ * All parameters except @_idx should be constant.
+ */
+#define ZL_REG_IDX(_idx, _page, _offset, _size, _items, _stride) \
+ (FIELD_PREP(ZL_REG_OFFSET_MASK, \
+ (_offset) + (_idx) * (_stride)) | \
+ FIELD_PREP_CONST(ZL_REG_PAGE_MASK, _page) | \
+ FIELD_PREP_CONST(ZL_REG_SIZE_MASK, _size) | \
+ FIELD_PREP_CONST(ZL_REG_MAX_OFFSET_MASK, \
+ (_offset) + ((_items) - 1) * (_stride)))
+
+/**
+ * ZL_REG - define simple (non-indexed) register
+ * @_page: register page
+ * @_offset: register offset in page
+ * @_size: register byte size (1, 2, 4 or 6)
+ *
+ * All parameters should be constant.
+ */
+#define ZL_REG(_page, _offset, _size) \
+ ZL_REG_IDX(0, _page, _offset, _size, 1, 0)
+
+/**************************
+ * Register Page 0, General
+ **************************/
+
+#define ZL_REG_ID ZL_REG(0, 0x01, 2)
+#define ZL_REG_REVISION ZL_REG(0, 0x03, 2)
+#define ZL_REG_FW_VER ZL_REG(0, 0x05, 2)
+#define ZL_REG_CUSTOM_CONFIG_VER ZL_REG(0, 0x07, 4)
+
+#define ZL_REG_RESET_STATUS ZL_REG(0, 0x18, 1)
+#define ZL_REG_RESET_STATUS_RESET BIT(0)
+
+/*************************
+ * Register Page 2, Status
+ *************************/
+
+#define ZL_REG_REF_MON_STATUS(_idx) \
+ ZL_REG_IDX(_idx, 2, 0x02, 1, ZL3073X_NUM_REFS, 1)
+#define ZL_REF_MON_STATUS_OK 0 /* all bits zeroed */
+
+#define ZL_REG_DPLL_MON_STATUS(_idx) \
+ ZL_REG_IDX(_idx, 2, 0x10, 1, ZL3073X_MAX_CHANNELS, 1)
+#define ZL_DPLL_MON_STATUS_STATE GENMASK(1, 0)
+#define ZL_DPLL_MON_STATUS_STATE_ACQUIRING 0
+#define ZL_DPLL_MON_STATUS_STATE_LOCK 1
+#define ZL_DPLL_MON_STATUS_STATE_HOLDOVER 2
+#define ZL_DPLL_MON_STATUS_HO_READY BIT(2)
+
+#define ZL_REG_DPLL_REFSEL_STATUS(_idx) \
+ ZL_REG_IDX(_idx, 2, 0x30, 1, ZL3073X_MAX_CHANNELS, 1)
+#define ZL_DPLL_REFSEL_STATUS_REFSEL GENMASK(3, 0)
+#define ZL_DPLL_REFSEL_STATUS_STATE GENMASK(6, 4)
+#define ZL_DPLL_REFSEL_STATUS_STATE_LOCK 4
+
+#define ZL_REG_REF_FREQ(_idx) \
+ ZL_REG_IDX(_idx, 2, 0x44, 4, ZL3073X_NUM_REFS, 4)
+
+/**********************
+ * Register Page 4, Ref
+ **********************/
+
+#define ZL_REG_REF_PHASE_ERR_READ_RQST ZL_REG(4, 0x0f, 1)
+#define ZL_REF_PHASE_ERR_READ_RQST_RD BIT(0)
+
+#define ZL_REG_REF_FREQ_MEAS_CTRL ZL_REG(4, 0x1c, 1)
+#define ZL_REF_FREQ_MEAS_CTRL GENMASK(1, 0)
+#define ZL_REF_FREQ_MEAS_CTRL_REF_FREQ 1
+#define ZL_REF_FREQ_MEAS_CTRL_REF_FREQ_OFF 2
+#define ZL_REF_FREQ_MEAS_CTRL_DPLL_FREQ_OFF 3
+
+#define ZL_REG_REF_FREQ_MEAS_MASK_3_0 ZL_REG(4, 0x1d, 1)
+#define ZL_REF_FREQ_MEAS_MASK_3_0(_ref) BIT(_ref)
+
+#define ZL_REG_REF_FREQ_MEAS_MASK_4 ZL_REG(4, 0x1e, 1)
+#define ZL_REF_FREQ_MEAS_MASK_4(_ref) BIT((_ref) - 8)
+
+#define ZL_REG_DPLL_MEAS_REF_FREQ_CTRL ZL_REG(4, 0x1f, 1)
+#define ZL_DPLL_MEAS_REF_FREQ_CTRL_EN BIT(0)
+#define ZL_DPLL_MEAS_REF_FREQ_CTRL_IDX GENMASK(6, 4)
+
+#define ZL_REG_REF_PHASE(_idx) \
+ ZL_REG_IDX(_idx, 4, 0x20, 6, ZL3073X_NUM_REFS, 6)
+
+/***********************
+ * Register Page 5, DPLL
+ ***********************/
+
+#define ZL_REG_DPLL_MODE_REFSEL(_idx) \
+ ZL_REG_IDX(_idx, 5, 0x04, 1, ZL3073X_MAX_CHANNELS, 4)
+#define ZL_DPLL_MODE_REFSEL_MODE GENMASK(2, 0)
+#define ZL_DPLL_MODE_REFSEL_MODE_FREERUN 0
+#define ZL_DPLL_MODE_REFSEL_MODE_HOLDOVER 1
+#define ZL_DPLL_MODE_REFSEL_MODE_REFLOCK 2
+#define ZL_DPLL_MODE_REFSEL_MODE_AUTO 3
+#define ZL_DPLL_MODE_REFSEL_MODE_NCO 4
+#define ZL_DPLL_MODE_REFSEL_REF GENMASK(7, 4)
+
+#define ZL_REG_DPLL_MEAS_CTRL ZL_REG(5, 0x50, 1)
+#define ZL_DPLL_MEAS_CTRL_EN BIT(0)
+#define ZL_DPLL_MEAS_CTRL_AVG_FACTOR GENMASK(7, 4)
+
+#define ZL_REG_DPLL_MEAS_IDX ZL_REG(5, 0x51, 1)
+#define ZL_DPLL_MEAS_IDX GENMASK(2, 0)
+
+#define ZL_REG_DPLL_PHASE_ERR_READ_MASK ZL_REG(5, 0x54, 1)
+
+#define ZL_REG_DPLL_PHASE_ERR_DATA(_idx) \
+ ZL_REG_IDX(_idx, 5, 0x55, 6, ZL3073X_MAX_CHANNELS, 6)
+
+/***********************************
+ * Register Page 9, Synth and Output
+ ***********************************/
+
+#define ZL_REG_SYNTH_CTRL(_idx) \
+ ZL_REG_IDX(_idx, 9, 0x00, 1, ZL3073X_NUM_SYNTHS, 1)
+#define ZL_SYNTH_CTRL_EN BIT(0)
+#define ZL_SYNTH_CTRL_DPLL_SEL GENMASK(6, 4)
+
+#define ZL_REG_SYNTH_PHASE_SHIFT_CTRL ZL_REG(9, 0x1e, 1)
+#define ZL_REG_SYNTH_PHASE_SHIFT_MASK ZL_REG(9, 0x1f, 1)
+#define ZL_REG_SYNTH_PHASE_SHIFT_INTVL ZL_REG(9, 0x20, 1)
+#define ZL_REG_SYNTH_PHASE_SHIFT_DATA ZL_REG(9, 0x21, 2)
+
+#define ZL_REG_OUTPUT_CTRL(_idx) \
+ ZL_REG_IDX(_idx, 9, 0x28, 1, ZL3073X_NUM_OUTS, 1)
+#define ZL_OUTPUT_CTRL_EN BIT(0)
+#define ZL_OUTPUT_CTRL_SYNTH_SEL GENMASK(6, 4)
+
+/*******************************
+ * Register Page 10, Ref Mailbox
+ *******************************/
+
+#define ZL_REG_REF_MB_MASK ZL_REG(10, 0x02, 2)
+
+#define ZL_REG_REF_MB_SEM ZL_REG(10, 0x04, 1)
+#define ZL_REF_MB_SEM_WR BIT(0)
+#define ZL_REF_MB_SEM_RD BIT(1)
+
+#define ZL_REG_REF_FREQ_BASE ZL_REG(10, 0x05, 2)
+#define ZL_REG_REF_FREQ_MULT ZL_REG(10, 0x07, 2)
+#define ZL_REG_REF_RATIO_M ZL_REG(10, 0x09, 2)
+#define ZL_REG_REF_RATIO_N ZL_REG(10, 0x0b, 2)
+
+#define ZL_REG_REF_CONFIG ZL_REG(10, 0x0d, 1)
+#define ZL_REF_CONFIG_ENABLE BIT(0)
+#define ZL_REF_CONFIG_DIFF_EN BIT(2)
+
+#define ZL_REG_REF_PHASE_OFFSET_COMP ZL_REG(10, 0x28, 6)
+
+#define ZL_REG_REF_SYNC_CTRL ZL_REG(10, 0x2e, 1)
+#define ZL_REF_SYNC_CTRL_MODE GENMASK(2, 0)
+#define ZL_REF_SYNC_CTRL_MODE_REFSYNC_PAIR_OFF 0
+#define ZL_REF_SYNC_CTRL_MODE_50_50_ESYNC_25_75 2
+
+#define ZL_REG_REF_ESYNC_DIV ZL_REG(10, 0x30, 4)
+#define ZL_REF_ESYNC_DIV_1HZ 0
+
+/********************************
+ * Register Page 12, DPLL Mailbox
+ ********************************/
+
+#define ZL_REG_DPLL_MB_MASK ZL_REG(12, 0x02, 2)
+
+#define ZL_REG_DPLL_MB_SEM ZL_REG(12, 0x04, 1)
+#define ZL_DPLL_MB_SEM_WR BIT(0)
+#define ZL_DPLL_MB_SEM_RD BIT(1)
+
+#define ZL_REG_DPLL_REF_PRIO(_idx) \
+ ZL_REG_IDX(_idx, 12, 0x52, 1, ZL3073X_NUM_REFS / 2, 1)
+#define ZL_DPLL_REF_PRIO_REF_P GENMASK(3, 0)
+#define ZL_DPLL_REF_PRIO_REF_N GENMASK(7, 4)
+#define ZL_DPLL_REF_PRIO_MAX 14
+#define ZL_DPLL_REF_PRIO_NONE 15
+
+/*********************************
+ * Register Page 13, Synth Mailbox
+ *********************************/
+
+#define ZL_REG_SYNTH_MB_MASK ZL_REG(13, 0x02, 2)
+
+#define ZL_REG_SYNTH_MB_SEM ZL_REG(13, 0x04, 1)
+#define ZL_SYNTH_MB_SEM_WR BIT(0)
+#define ZL_SYNTH_MB_SEM_RD BIT(1)
+
+#define ZL_REG_SYNTH_FREQ_BASE ZL_REG(13, 0x06, 2)
+#define ZL_REG_SYNTH_FREQ_MULT ZL_REG(13, 0x08, 4)
+#define ZL_REG_SYNTH_FREQ_M ZL_REG(13, 0x0c, 2)
+#define ZL_REG_SYNTH_FREQ_N ZL_REG(13, 0x0e, 2)
+
+/**********************************
+ * Register Page 14, Output Mailbox
+ **********************************/
+#define ZL_REG_OUTPUT_MB_MASK ZL_REG(14, 0x02, 2)
+
+#define ZL_REG_OUTPUT_MB_SEM ZL_REG(14, 0x04, 1)
+#define ZL_OUTPUT_MB_SEM_WR BIT(0)
+#define ZL_OUTPUT_MB_SEM_RD BIT(1)
+
+#define ZL_REG_OUTPUT_MODE ZL_REG(14, 0x05, 1)
+#define ZL_OUTPUT_MODE_CLOCK_TYPE GENMASK(2, 0)
+#define ZL_OUTPUT_MODE_CLOCK_TYPE_NORMAL 0
+#define ZL_OUTPUT_MODE_CLOCK_TYPE_ESYNC 1
+#define ZL_OUTPUT_MODE_SIGNAL_FORMAT GENMASK(7, 4)
+#define ZL_OUTPUT_MODE_SIGNAL_FORMAT_DISABLED 0
+#define ZL_OUTPUT_MODE_SIGNAL_FORMAT_LVDS 1
+#define ZL_OUTPUT_MODE_SIGNAL_FORMAT_DIFF 2
+#define ZL_OUTPUT_MODE_SIGNAL_FORMAT_LOWVCM 3
+#define ZL_OUTPUT_MODE_SIGNAL_FORMAT_2 4
+#define ZL_OUTPUT_MODE_SIGNAL_FORMAT_1P 5
+#define ZL_OUTPUT_MODE_SIGNAL_FORMAT_1N 6
+#define ZL_OUTPUT_MODE_SIGNAL_FORMAT_2_INV 7
+#define ZL_OUTPUT_MODE_SIGNAL_FORMAT_2_NDIV 12
+#define ZL_OUTPUT_MODE_SIGNAL_FORMAT_2_NDIV_INV 15
+
+#define ZL_REG_OUTPUT_DIV ZL_REG(14, 0x0c, 4)
+#define ZL_REG_OUTPUT_WIDTH ZL_REG(14, 0x10, 4)
+#define ZL_REG_OUTPUT_ESYNC_PERIOD ZL_REG(14, 0x14, 4)
+#define ZL_REG_OUTPUT_ESYNC_WIDTH ZL_REG(14, 0x18, 4)
+#define ZL_REG_OUTPUT_PHASE_COMP ZL_REG(14, 0x20, 4)
+
+/*
+ * Register Page 255 - HW registers access
+ */
+#define ZL_REG_HWREG_OP ZL_REG(0xff, 0x00, 1)
+#define ZL_HWREG_OP_WRITE 0x28
+#define ZL_HWREG_OP_READ 0x29
+#define ZL_HWREG_OP_PENDING BIT(1)
+
+#define ZL_REG_HWREG_ADDR ZL_REG(0xff, 0x04, 4)
+#define ZL_REG_HWREG_WRITE_DATA ZL_REG(0xff, 0x08, 4)
+#define ZL_REG_HWREG_READ_DATA ZL_REG(0xff, 0x0c, 4)
+
+/*
+ * Registers available in flash mode
+ */
+#define ZL_REG_FLASH_HASH ZL_REG(0, 0x78, 4)
+#define ZL_REG_FLASH_FAMILY ZL_REG(0, 0x7c, 1)
+#define ZL_REG_FLASH_RELEASE ZL_REG(0, 0x7d, 1)
+
+#define ZL_REG_HOST_CONTROL ZL_REG(1, 0x02, 1)
+#define ZL_HOST_CONTROL_ENABLE BIT(0)
+
+#define ZL_REG_IMAGE_START_ADDR ZL_REG(1, 0x04, 4)
+#define ZL_REG_IMAGE_SIZE ZL_REG(1, 0x08, 4)
+#define ZL_REG_FLASH_INDEX_READ ZL_REG(1, 0x0c, 4)
+#define ZL_REG_FLASH_INDEX_WRITE ZL_REG(1, 0x10, 4)
+#define ZL_REG_FILL_PATTERN ZL_REG(1, 0x14, 4)
+
+#define ZL_REG_WRITE_FLASH ZL_REG(1, 0x18, 1)
+#define ZL_WRITE_FLASH_OP GENMASK(2, 0)
+#define ZL_WRITE_FLASH_OP_DONE 0x0
+#define ZL_WRITE_FLASH_OP_SECTORS 0x2
+#define ZL_WRITE_FLASH_OP_PAGE 0x3
+#define ZL_WRITE_FLASH_OP_COPY_PAGE 0x4
+
+#define ZL_REG_FLASH_INFO ZL_REG(2, 0x00, 1)
+#define ZL_FLASH_INFO_SECTOR_SIZE GENMASK(3, 0)
+#define ZL_FLASH_INFO_SECTOR_4K 0
+#define ZL_FLASH_INFO_SECTOR_64K 1
+
+#define ZL_REG_ERROR_COUNT ZL_REG(2, 0x04, 4)
+#define ZL_REG_ERROR_CAUSE ZL_REG(2, 0x08, 4)
+
+#define ZL_REG_OP_STATE ZL_REG(2, 0x14, 1)
+#define ZL_OP_STATE_NO_COMMAND 0
+#define ZL_OP_STATE_PENDING 1
+#define ZL_OP_STATE_DONE 2
+
+#endif /* _ZL3073X_REGS_H */
diff --git a/drivers/dpll/zl3073x/spi.c b/drivers/dpll/zl3073x/spi.c
new file mode 100644
index 000000000000..af901b4d6dda
--- /dev/null
+++ b/drivers/dpll/zl3073x/spi.c
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/dev_printk.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+
+#include "core.h"
+
+static int zl3073x_spi_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct zl3073x_dev *zldev;
+
+ zldev = zl3073x_devm_alloc(dev);
+ if (IS_ERR(zldev))
+ return PTR_ERR(zldev);
+
+ zldev->regmap = devm_regmap_init_spi(spi, &zl3073x_regmap_config);
+ if (IS_ERR(zldev->regmap))
+ return dev_err_probe(dev, PTR_ERR(zldev->regmap),
+ "Failed to initialize regmap\n");
+
+ return zl3073x_dev_probe(zldev, spi_get_device_match_data(spi));
+}
+
+static const struct spi_device_id zl3073x_spi_id[] = {
+ {
+ .name = "zl30731",
+ .driver_data = (kernel_ulong_t)&zl30731_chip_info
+ },
+ {
+ .name = "zl30732",
+ .driver_data = (kernel_ulong_t)&zl30732_chip_info,
+ },
+ {
+ .name = "zl30733",
+ .driver_data = (kernel_ulong_t)&zl30733_chip_info,
+ },
+ {
+ .name = "zl30734",
+ .driver_data = (kernel_ulong_t)&zl30734_chip_info,
+ },
+ {
+ .name = "zl30735",
+ .driver_data = (kernel_ulong_t)&zl30735_chip_info,
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(spi, zl3073x_spi_id);
+
+static const struct of_device_id zl3073x_spi_of_match[] = {
+ { .compatible = "microchip,zl30731", .data = &zl30731_chip_info },
+ { .compatible = "microchip,zl30732", .data = &zl30732_chip_info },
+ { .compatible = "microchip,zl30733", .data = &zl30733_chip_info },
+ { .compatible = "microchip,zl30734", .data = &zl30734_chip_info },
+ { .compatible = "microchip,zl30735", .data = &zl30735_chip_info },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, zl3073x_spi_of_match);
+
+static struct spi_driver zl3073x_spi_driver = {
+ .driver = {
+ .name = "zl3073x-spi",
+ .of_match_table = zl3073x_spi_of_match,
+ },
+ .probe = zl3073x_spi_probe,
+ .id_table = zl3073x_spi_id,
+};
+module_spi_driver(zl3073x_spi_driver);
+
+MODULE_AUTHOR("Ivan Vecera <ivecera@redhat.com>");
+MODULE_DESCRIPTION("Microchip ZL3073x SPI driver");
+MODULE_IMPORT_NS("ZL3073X");
+MODULE_LICENSE("GPL");
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 81af6c344d6b..39352b9b7a7e 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -75,9 +75,38 @@ config EDAC_GHES
In doubt, say 'Y'.
+config EDAC_SCRUB
+ bool "EDAC scrub feature"
+ help
+ The EDAC scrub feature is optional and is designed to control the
+ memory scrubbers in the system. The common sysfs scrub interface
+ abstracts the control of various arbitrary scrubbing functionalities
+ into a unified set of functions.
+ Say 'y/n' to enable/disable EDAC scrub feature.
+
+config EDAC_ECS
+ bool "EDAC ECS (Error Check Scrub) feature"
+ help
+ The EDAC ECS feature is optional and is designed to control on-die
+ error check scrub (e.g., DDR5 ECS) in the system. The common sysfs
+ ECS interface abstracts the control of various ECS functionalities
+ into a unified set of functions.
+ Say 'y/n' to enable/disable EDAC ECS feature.
+
+config EDAC_MEM_REPAIR
+ bool "EDAC memory repair feature"
+ help
+ The EDAC memory repair feature is optional and is designed to control
+ the memory devices with repair features, such as Post Package Repair
+ (PPR), memory sparing etc. The common sysfs memory repair interface
+ abstracts the control of various memory repair functionalities into
+ a unified set of functions.
+ Say 'y/n' to enable/disable EDAC memory repair feature.
+
config EDAC_AMD64
tristate "AMD64 (Opteron, Athlon64)"
depends on AMD_NB && EDAC_DECODE_MCE
+ depends on AMD_NODE
imply AMD_ATL
help
Support for error detection and correction of DRAM ECC errors on
@@ -167,7 +196,7 @@ config EDAC_I3200
config EDAC_IE31200
tristate "Intel e312xx"
- depends on PCI && X86
+ depends on PCI && X86 && X86_MCE_INTEL
help
Support for error detection and correction on the Intel
E3-1200 based DRAM controllers.
@@ -303,32 +332,6 @@ config EDAC_PASEMI
Support for error detection and correction on PA Semi
PWRficient.
-config EDAC_CELL
- tristate "Cell Broadband Engine memory controller"
- depends on PPC_CELL_COMMON
- help
- Support for error detection and correction on the
- Cell Broadband Engine internal memory controller
- on platform without a hypervisor
-
-config EDAC_AMD8131
- tristate "AMD8131 HyperTransport PCI-X Tunnel"
- depends on PCI && PPC_MAPLE
- help
- Support for error detection and correction on the
- AMD8131 HyperTransport PCI-X Tunnel chip.
- Note, add more Kconfig dependency if it's adopted
- on some machine other than Maple.
-
-config EDAC_AMD8111
- tristate "AMD8111 HyperTransport I/O Hub"
- depends on PCI && PPC_MAPLE
- help
- Support for error detection and correction on the
- AMD8111 HyperTransport I/O Hub chip.
- Note, add more Kconfig dependency if it's adopted
- on some machine other than Maple.
-
config EDAC_CPC925
tristate "IBM CPC925 Memory Controller (PPC970FX)"
depends on PPC64
@@ -564,5 +567,29 @@ config EDAC_VERSAL
Support injecting both correctable and uncorrectable errors
for debugging purposes.
+config EDAC_LOONGSON
+ tristate "Loongson Memory Controller"
+ depends on LOONGARCH && ACPI
+ help
+ Support for error detection and correction on the Loongson
+ family memory controller. This driver reports single bit
+ errors (CE) only. Loongson-3A5000/3C5000/3D5000/3A6000/3C6000
+ are compatible.
+
+config EDAC_CORTEX_A72
+ tristate "ARM Cortex A72"
+ depends on ARM64
+ help
+ Support for L1/L2 cache error detection for ARM Cortex A72 processor.
+ The detected and reported errors are from reading CPU/L2 memory error
+ syndrome registers.
+
+config EDAC_VERSALNET
+ tristate "AMD VersalNET DDR Controller"
+ depends on CDX_CONTROLLER && ARCH_ZYNQMP
+ help
+ Support for single bit error correction, double bit error detection
+ and other system errors from various IP subsystems like RPU, NOCs,
+ HNICX, PL on the AMD Versal NET DDR memory controller.
endif # EDAC
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index faf310eec4a6..1c14796410a3 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -12,6 +12,9 @@ edac_core-y := edac_mc.o edac_device.o edac_mc_sysfs.o
edac_core-y += edac_module.o edac_device_sysfs.o wq.o
edac_core-$(CONFIG_EDAC_DEBUG) += debugfs.o
+edac_core-$(CONFIG_EDAC_SCRUB) += scrub.o
+edac_core-$(CONFIG_EDAC_ECS) += ecs.o
+edac_core-$(CONFIG_EDAC_MEM_REPAIR) += mem_repair.o
ifdef CONFIG_PCI
edac_core-y += edac_pci.o edac_pci_sysfs.o
@@ -62,10 +65,6 @@ obj-$(CONFIG_EDAC_SKX) += skx_edac.o skx_edac_common.o
i10nm_edac-y := i10nm_base.o
obj-$(CONFIG_EDAC_I10NM) += i10nm_edac.o skx_edac_common.o
-obj-$(CONFIG_EDAC_CELL) += cell_edac.o
-obj-$(CONFIG_EDAC_AMD8111) += amd8111_edac.o
-obj-$(CONFIG_EDAC_AMD8131) += amd8131_edac.o
-
obj-$(CONFIG_EDAC_HIGHBANK_MC) += highbank_mc_edac.o
obj-$(CONFIG_EDAC_HIGHBANK_L2) += highbank_l2_edac.o
@@ -88,3 +87,6 @@ obj-$(CONFIG_EDAC_DMC520) += dmc520_edac.o
obj-$(CONFIG_EDAC_NPCM) += npcm_edac.o
obj-$(CONFIG_EDAC_ZYNQMP) += zynqmp_edac.o
obj-$(CONFIG_EDAC_VERSAL) += versal_edac.o
+obj-$(CONFIG_EDAC_LOONGSON) += loongson_edac.o
+obj-$(CONFIG_EDAC_VERSALNET) += versalnet_edac.o
+obj-$(CONFIG_EDAC_CORTEX_A72) += a72_edac.o
diff --git a/drivers/edac/a72_edac.c b/drivers/edac/a72_edac.c
new file mode 100644
index 000000000000..9262d75c3855
--- /dev/null
+++ b/drivers/edac/a72_edac.c
@@ -0,0 +1,225 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cortex A72 EDAC L1 and L2 cache error detection
+ *
+ * Copyright (c) 2020 Pengutronix, Sascha Hauer <s.hauer@pengutronix.de>
+ * Copyright (c) 2025 Microsoft Corporation, <vijayb@linux.microsoft.com>
+ *
+ * Based on Code from:
+ * Copyright (c) 2018, NXP Semiconductor
+ * Author: York Sun <york.sun@nxp.com>
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/bitfield.h>
+#include <asm/smp_plat.h>
+
+#include "edac_module.h"
+
+#define DRVNAME "a72-edac"
+
+#define SYS_CPUMERRSR_EL1 sys_reg(3, 1, 15, 2, 2)
+#define SYS_L2MERRSR_EL1 sys_reg(3, 1, 15, 2, 3)
+
+#define CPUMERRSR_EL1_RAMID GENMASK(30, 24)
+#define L2MERRSR_EL1_CPUID_WAY GENMASK(21, 18)
+
+#define CPUMERRSR_EL1_VALID BIT(31)
+#define CPUMERRSR_EL1_FATAL BIT(63)
+#define L2MERRSR_EL1_VALID BIT(31)
+#define L2MERRSR_EL1_FATAL BIT(63)
+
+#define L1_I_TAG_RAM 0x00
+#define L1_I_DATA_RAM 0x01
+#define L1_D_TAG_RAM 0x08
+#define L1_D_DATA_RAM 0x09
+#define TLB_RAM 0x18
+
+#define MESSAGE_SIZE 64
+
+struct mem_err_synd_reg {
+ u64 cpu_mesr;
+ u64 l2_mesr;
+};
+
+static struct cpumask compat_mask;
+
+static void report_errors(struct edac_device_ctl_info *edac_ctl, int cpu,
+ struct mem_err_synd_reg *mesr)
+{
+ u64 cpu_mesr = mesr->cpu_mesr;
+ u64 l2_mesr = mesr->l2_mesr;
+ char msg[MESSAGE_SIZE];
+
+ if (cpu_mesr & CPUMERRSR_EL1_VALID) {
+ const char *str;
+ bool fatal = cpu_mesr & CPUMERRSR_EL1_FATAL;
+
+ switch (FIELD_GET(CPUMERRSR_EL1_RAMID, cpu_mesr)) {
+ case L1_I_TAG_RAM:
+ str = "L1-I Tag RAM";
+ break;
+ case L1_I_DATA_RAM:
+ str = "L1-I Data RAM";
+ break;
+ case L1_D_TAG_RAM:
+ str = "L1-D Tag RAM";
+ break;
+ case L1_D_DATA_RAM:
+ str = "L1-D Data RAM";
+ break;
+ case TLB_RAM:
+ str = "TLB RAM";
+ break;
+ default:
+ str = "Unspecified";
+ break;
+ }
+
+ snprintf(msg, MESSAGE_SIZE, "%s %s error(s) on CPU %d",
+ str, fatal ? "fatal" : "correctable", cpu);
+
+ if (fatal)
+ edac_device_handle_ue(edac_ctl, cpu, 0, msg);
+ else
+ edac_device_handle_ce(edac_ctl, cpu, 0, msg);
+ }
+
+ if (l2_mesr & L2MERRSR_EL1_VALID) {
+ bool fatal = l2_mesr & L2MERRSR_EL1_FATAL;
+
+ snprintf(msg, MESSAGE_SIZE, "L2 %s error(s) on CPU %d CPUID/WAY 0x%lx",
+ fatal ? "fatal" : "correctable", cpu,
+ FIELD_GET(L2MERRSR_EL1_CPUID_WAY, l2_mesr));
+ if (fatal)
+ edac_device_handle_ue(edac_ctl, cpu, 1, msg);
+ else
+ edac_device_handle_ce(edac_ctl, cpu, 1, msg);
+ }
+}
+
+static void read_errors(void *data)
+{
+ struct mem_err_synd_reg *mesr = data;
+
+ mesr->cpu_mesr = read_sysreg_s(SYS_CPUMERRSR_EL1);
+ if (mesr->cpu_mesr & CPUMERRSR_EL1_VALID) {
+ write_sysreg_s(0, SYS_CPUMERRSR_EL1);
+ isb();
+ }
+ mesr->l2_mesr = read_sysreg_s(SYS_L2MERRSR_EL1);
+ if (mesr->l2_mesr & L2MERRSR_EL1_VALID) {
+ write_sysreg_s(0, SYS_L2MERRSR_EL1);
+ isb();
+ }
+}
+
+static void a72_edac_check(struct edac_device_ctl_info *edac_ctl)
+{
+ struct mem_err_synd_reg mesr;
+ int cpu;
+
+ cpus_read_lock();
+ for_each_cpu_and(cpu, cpu_online_mask, &compat_mask) {
+ smp_call_function_single(cpu, read_errors, &mesr, true);
+ report_errors(edac_ctl, cpu, &mesr);
+ }
+ cpus_read_unlock();
+}
+
+static int a72_edac_probe(struct platform_device *pdev)
+{
+ struct edac_device_ctl_info *edac_ctl;
+ struct device *dev = &pdev->dev;
+ int rc;
+
+ edac_ctl = edac_device_alloc_ctl_info(0, "cpu",
+ num_possible_cpus(), "L", 2, 1,
+ edac_device_alloc_index());
+ if (!edac_ctl)
+ return -ENOMEM;
+
+ edac_ctl->edac_check = a72_edac_check;
+ edac_ctl->dev = dev;
+ edac_ctl->mod_name = dev_name(dev);
+ edac_ctl->dev_name = dev_name(dev);
+ edac_ctl->ctl_name = DRVNAME;
+ dev_set_drvdata(dev, edac_ctl);
+
+ rc = edac_device_add_device(edac_ctl);
+ if (rc)
+ goto out_dev;
+
+ return 0;
+
+out_dev:
+ edac_device_free_ctl_info(edac_ctl);
+
+ return rc;
+}
+
+static void a72_edac_remove(struct platform_device *pdev)
+{
+ struct edac_device_ctl_info *edac_ctl = dev_get_drvdata(&pdev->dev);
+
+ edac_device_del_device(edac_ctl->dev);
+ edac_device_free_ctl_info(edac_ctl);
+}
+
+static const struct of_device_id cortex_arm64_edac_of_match[] = {
+ { .compatible = "arm,cortex-a72" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, cortex_arm64_edac_of_match);
+
+static struct platform_driver a72_edac_driver = {
+ .probe = a72_edac_probe,
+ .remove = a72_edac_remove,
+ .driver = {
+ .name = DRVNAME,
+ },
+};
+
+static struct platform_device *a72_pdev;
+
+static int __init a72_edac_driver_init(void)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct device_node *np __free(device_node) = of_cpu_device_node_get(cpu);
+ if (np) {
+ if (of_match_node(cortex_arm64_edac_of_match, np) &&
+ of_property_read_bool(np, "edac-enabled")) {
+ cpumask_set_cpu(cpu, &compat_mask);
+ }
+ } else {
+ pr_warn("failed to find device node for CPU %d\n", cpu);
+ }
+ }
+
+ if (cpumask_empty(&compat_mask))
+ return 0;
+
+ a72_pdev = platform_device_register_simple(DRVNAME, -1, NULL, 0);
+ if (IS_ERR(a72_pdev)) {
+ pr_err("failed to register A72 EDAC device\n");
+ return PTR_ERR(a72_pdev);
+ }
+
+ return platform_driver_register(&a72_edac_driver);
+}
+
+static void __exit a72_edac_driver_exit(void)
+{
+ platform_device_unregister(a72_pdev);
+ platform_driver_unregister(&a72_edac_driver);
+}
+
+module_init(a72_edac_driver_init);
+module_exit(a72_edac_driver_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
+MODULE_DESCRIPTION("Cortex A72 L1 and L2 cache EDAC driver");
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index fe89f5c4837f..103b2c2eba2a 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -99,7 +99,7 @@ static irqreturn_t altr_sdram_mc_err_handler(int irq, void *dev_id)
if (status & priv->ecc_stat_ce_mask) {
regmap_read(drvdata->mc_vbase, priv->ecc_saddr_offset,
&err_addr);
- if (priv->ecc_uecnt_offset)
+ if (priv->ecc_cecnt_offset)
regmap_read(drvdata->mc_vbase, priv->ecc_cecnt_offset,
&err_count);
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, err_count,
@@ -128,7 +128,6 @@ static ssize_t altr_sdr_mc_err_inject_write(struct file *file,
ptemp = dma_alloc_coherent(mci->pdev, 16, &dma_handle, GFP_KERNEL);
if (!ptemp) {
- dma_free_coherent(mci->pdev, 16, ptemp, dma_handle);
edac_printk(KERN_ERR, EDAC_MC,
"Inject: Buffer Allocation error\n");
return -ENOMEM;
@@ -482,7 +481,7 @@ static const struct dev_pm_ops altr_sdram_pm_ops = {
static struct platform_driver altr_sdram_edac_driver = {
.probe = altr_sdram_probe,
- .remove_new = altr_sdram_remove,
+ .remove = altr_sdram_remove,
.driver = {
.name = "altr_sdram_edac",
#ifdef CONFIG_PM
@@ -816,7 +815,7 @@ static void altr_edac_device_remove(struct platform_device *pdev)
static struct platform_driver altr_edac_device_driver = {
.probe = altr_edac_device_probe,
- .remove_new = altr_edac_device_remove,
+ .remove = altr_edac_device_remove,
.driver = {
.name = "altr_edac_device",
.of_match_table = altr_edac_device_of_match,
@@ -1005,9 +1004,6 @@ altr_init_a10_ecc_block(struct device_node *np, u32 irq_mask,
}
}
- /* Interrupt mode set to every SBERR */
- regmap_write(ecc_mgr_map, ALTR_A10_ECC_INTMODE_OFST,
- ALTR_A10_ECC_INTMODE);
/* Enable ECC */
ecc_set_bits(ecc_ctrl_en_mask, (ecc_block_base +
ALTR_A10_ECC_CTRL_OFST));
@@ -1749,9 +1745,9 @@ altr_edac_a10_device_trig(struct file *file, const char __user *user_buf,
local_irq_save(flags);
if (trig_type == ALTR_UE_TRIGGER_CHAR)
- writel(priv->ue_set_mask, set_addr);
+ writew(priv->ue_set_mask, set_addr);
else
- writel(priv->ce_set_mask, set_addr);
+ writew(priv->ce_set_mask, set_addr);
/* Ensure the interrupt test bits are set */
wmb();
@@ -1781,7 +1777,7 @@ altr_edac_a10_device_trig2(struct file *file, const char __user *user_buf,
local_irq_save(flags);
if (trig_type == ALTR_UE_TRIGGER_CHAR) {
- writel(priv->ue_set_mask, set_addr);
+ writew(priv->ue_set_mask, set_addr);
} else {
/* Setup read/write of 4 bytes */
writel(ECC_WORD_WRITE, drvdata->base + ECC_BLK_DBYTECTRL_OFST);
@@ -2127,11 +2123,15 @@ static int altr_edac_a10_probe(struct platform_device *pdev)
return PTR_ERR(edac->ecc_mgr_map);
}
+ /* Set irq mask for DDR SBE to avoid any pending irq before registration */
+ regmap_write(edac->ecc_mgr_map, A10_SYSMGR_ECC_INTMASK_SET_OFST,
+ (A10_SYSMGR_ECC_INTMASK_SDMMCB | A10_SYSMGR_ECC_INTMASK_DDR0));
+
edac->irq_chip.name = pdev->dev.of_node->name;
edac->irq_chip.irq_mask = a10_eccmgr_irq_mask;
edac->irq_chip.irq_unmask = a10_eccmgr_irq_unmask;
- edac->domain = irq_domain_add_linear(pdev->dev.of_node, 64,
- &a10_eccmgr_ic_ops, edac);
+ edac->domain = irq_domain_create_linear(dev_fwnode(&pdev->dev), 64, &a10_eccmgr_ic_ops,
+ edac);
if (!edac->domain) {
dev_err(&pdev->dev, "Error adding IRQ domain\n");
return -ENOMEM;
diff --git a/drivers/edac/altera_edac.h b/drivers/edac/altera_edac.h
index 3727e72c8c2e..7248d24c4908 100644
--- a/drivers/edac/altera_edac.h
+++ b/drivers/edac/altera_edac.h
@@ -249,6 +249,8 @@ struct altr_sdram_mc_data {
#define A10_SYSMGR_ECC_INTMASK_SET_OFST 0x94
#define A10_SYSMGR_ECC_INTMASK_CLR_OFST 0x98
#define A10_SYSMGR_ECC_INTMASK_OCRAM BIT(1)
+#define A10_SYSMGR_ECC_INTMASK_SDMMCB BIT(16)
+#define A10_SYSMGR_ECC_INTMASK_DDR0 BIT(17)
#define A10_SYSMGR_ECC_INTSTAT_SERR_OFST 0x9C
#define A10_SYSMGR_ECC_INTSTAT_DERR_OFST 0xA0
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index ddfbdb66b794..2f6ab783bf20 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -1,7 +1,9 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/ras.h>
+#include <linux/string_choices.h>
#include "amd64_edac.h"
-#include <asm/amd_nb.h>
+#include <asm/amd/nb.h>
+#include <asm/amd/node.h>
static struct edac_pci_ctl_info *pci_ctl;
@@ -1170,22 +1172,21 @@ static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
edac_dbg(1, " LRDIMM %dx rank multiply\n", (dcsm & 0x3));
}
- edac_dbg(1, "All DIMMs support ECC:%s\n",
- (dclr & BIT(19)) ? "yes" : "no");
+ edac_dbg(1, "All DIMMs support ECC: %s\n", str_yes_no(dclr & BIT(19)));
edac_dbg(1, " PAR/ERR parity: %s\n",
- (dclr & BIT(8)) ? "enabled" : "disabled");
+ str_enabled_disabled(dclr & BIT(8)));
if (pvt->fam == 0x10)
edac_dbg(1, " DCT 128bit mode width: %s\n",
(dclr & BIT(11)) ? "128b" : "64b");
edac_dbg(1, " x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
- (dclr & BIT(12)) ? "yes" : "no",
- (dclr & BIT(13)) ? "yes" : "no",
- (dclr & BIT(14)) ? "yes" : "no",
- (dclr & BIT(15)) ? "yes" : "no");
+ str_yes_no(dclr & BIT(12)),
+ str_yes_no(dclr & BIT(13)),
+ str_yes_no(dclr & BIT(14)),
+ str_yes_no(dclr & BIT(15)));
}
#define CS_EVEN_PRIMARY BIT(0)
@@ -1208,7 +1209,9 @@ static int umc_get_cs_mode(int dimm, u8 ctrl, struct amd64_pvt *pvt)
if (csrow_enabled(2 * dimm + 1, ctrl, pvt))
cs_mode |= CS_ODD_PRIMARY;
- /* Asymmetric dual-rank DIMM support. */
+ if (csrow_sec_enabled(2 * dimm, ctrl, pvt))
+ cs_mode |= CS_EVEN_SECONDARY;
+
if (csrow_sec_enabled(2 * dimm + 1, ctrl, pvt))
cs_mode |= CS_ODD_SECONDARY;
@@ -1229,12 +1232,13 @@ static int umc_get_cs_mode(int dimm, u8 ctrl, struct amd64_pvt *pvt)
return cs_mode;
}
-static int __addr_mask_to_cs_size(u32 addr_mask_orig, unsigned int cs_mode,
- int csrow_nr, int dimm)
+static int calculate_cs_size(u32 mask, unsigned int cs_mode)
{
- u32 msb, weight, num_zero_bits;
- u32 addr_mask_deinterleaved;
- int size = 0;
+ int msb, weight, num_zero_bits;
+ u32 deinterleaved_mask;
+
+ if (!mask)
+ return 0;
/*
* The number of zero bits in the mask is equal to the number of bits
@@ -1247,19 +1251,30 @@ static int __addr_mask_to_cs_size(u32 addr_mask_orig, unsigned int cs_mode,
* without swapping with the most significant bit. This can be handled
* by keeping the MSB where it is and ignoring the single zero bit.
*/
- msb = fls(addr_mask_orig) - 1;
- weight = hweight_long(addr_mask_orig);
+ msb = fls(mask) - 1;
+ weight = hweight_long(mask);
num_zero_bits = msb - weight - !!(cs_mode & CS_3R_INTERLEAVE);
/* Take the number of zero bits off from the top of the mask. */
- addr_mask_deinterleaved = GENMASK_ULL(msb - num_zero_bits, 1);
+ deinterleaved_mask = GENMASK(msb - num_zero_bits, 1);
+ edac_dbg(1, " Deinterleaved AddrMask: 0x%x\n", deinterleaved_mask);
+
+ return (deinterleaved_mask >> 2) + 1;
+}
+
+static int __addr_mask_to_cs_size(u32 addr_mask, u32 addr_mask_sec,
+ unsigned int cs_mode, int csrow_nr, int dimm)
+{
+ int size;
edac_dbg(1, "CS%d DIMM%d AddrMasks:\n", csrow_nr, dimm);
- edac_dbg(1, " Original AddrMask: 0x%x\n", addr_mask_orig);
- edac_dbg(1, " Deinterleaved AddrMask: 0x%x\n", addr_mask_deinterleaved);
+ edac_dbg(1, " Primary AddrMask: 0x%x\n", addr_mask);
/* Register [31:1] = Address [39:9]. Size is in kBs here. */
- size = (addr_mask_deinterleaved >> 2) + 1;
+ size = calculate_cs_size(addr_mask, cs_mode);
+
+ edac_dbg(1, " Secondary AddrMask: 0x%x\n", addr_mask_sec);
+ size += calculate_cs_size(addr_mask_sec, cs_mode);
/* Return size in MBs. */
return size >> 10;
@@ -1268,8 +1283,8 @@ static int __addr_mask_to_cs_size(u32 addr_mask_orig, unsigned int cs_mode,
static int umc_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
unsigned int cs_mode, int csrow_nr)
{
+ u32 addr_mask = 0, addr_mask_sec = 0;
int cs_mask_nr = csrow_nr;
- u32 addr_mask_orig;
int dimm, size = 0;
/* No Chip Selects are enabled. */
@@ -1307,13 +1322,13 @@ static int umc_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
if (!pvt->flags.zn_regs_v2)
cs_mask_nr >>= 1;
- /* Asymmetric dual-rank DIMM support. */
- if ((csrow_nr & 1) && (cs_mode & CS_ODD_SECONDARY))
- addr_mask_orig = pvt->csels[umc].csmasks_sec[cs_mask_nr];
- else
- addr_mask_orig = pvt->csels[umc].csmasks[cs_mask_nr];
+ if (cs_mode & (CS_EVEN_PRIMARY | CS_ODD_PRIMARY))
+ addr_mask = pvt->csels[umc].csmasks[cs_mask_nr];
+
+ if (cs_mode & (CS_EVEN_SECONDARY | CS_ODD_SECONDARY))
+ addr_mask_sec = pvt->csels[umc].csmasks_sec[cs_mask_nr];
- return __addr_mask_to_cs_size(addr_mask_orig, cs_mode, csrow_nr, dimm);
+ return __addr_mask_to_cs_size(addr_mask, addr_mask_sec, cs_mode, csrow_nr, dimm);
}
static void umc_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
@@ -1352,14 +1367,14 @@ static void umc_dump_misc_regs(struct amd64_pvt *pvt)
edac_dbg(1, "UMC%d UMC cap high: 0x%x\n", i, umc->umc_cap_hi);
edac_dbg(1, "UMC%d ECC capable: %s, ChipKill ECC capable: %s\n",
- i, (umc->umc_cap_hi & BIT(30)) ? "yes" : "no",
- (umc->umc_cap_hi & BIT(31)) ? "yes" : "no");
+ i, str_yes_no(umc->umc_cap_hi & BIT(30)),
+ str_yes_no(umc->umc_cap_hi & BIT(31)));
edac_dbg(1, "UMC%d All DIMMs support ECC: %s\n",
- i, (umc->umc_cfg & BIT(12)) ? "yes" : "no");
+ i, str_yes_no(umc->umc_cfg & BIT(12)));
edac_dbg(1, "UMC%d x4 DIMMs present: %s\n",
- i, (umc->dimm_cfg & BIT(6)) ? "yes" : "no");
+ i, str_yes_no(umc->dimm_cfg & BIT(6)));
edac_dbg(1, "UMC%d x16 DIMMs present: %s\n",
- i, (umc->dimm_cfg & BIT(7)) ? "yes" : "no");
+ i, str_yes_no(umc->dimm_cfg & BIT(7)));
umc_debug_display_dimm_sizes(pvt, i);
}
@@ -1370,11 +1385,11 @@ static void dct_dump_misc_regs(struct amd64_pvt *pvt)
edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
edac_dbg(1, " NB two channel DRAM capable: %s\n",
- (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
+ str_yes_no(pvt->nbcap & NBCAP_DCT_DUAL));
edac_dbg(1, " ECC capable: %s, ChipKill ECC capable: %s\n",
- (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
- (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
+ str_yes_no(pvt->nbcap & NBCAP_SECDED),
+ str_yes_no(pvt->nbcap & NBCAP_CHIPKILL));
debug_dump_dramcfg_low(pvt, pvt->dclr0, 0);
@@ -1397,7 +1412,7 @@ static void dct_dump_misc_regs(struct amd64_pvt *pvt)
if (!dct_ganging_enabled(pvt))
debug_dump_dramcfg_low(pvt, pvt->dclr1, 1);
- edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
+ edac_dbg(1, " DramHoleValid: %s\n", str_yes_no(dhar_valid(pvt)));
amd64_info("using x%u syndromes.\n", pvt->ecc_sym_sz);
}
@@ -2026,15 +2041,15 @@ static void read_dram_ctl_register(struct amd64_pvt *pvt)
if (!dct_ganging_enabled(pvt))
edac_dbg(0, " Address range split per DCT: %s\n",
- (dct_high_range_enabled(pvt) ? "yes" : "no"));
+ str_yes_no(dct_high_range_enabled(pvt)));
edac_dbg(0, " data interleave for ECC: %s, DRAM cleared since last warm reset: %s\n",
- (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
- (dct_memory_cleared(pvt) ? "yes" : "no"));
+ str_enabled_disabled(dct_data_intlv_enabled(pvt)),
+ str_yes_no(dct_memory_cleared(pvt)));
edac_dbg(0, " channel interleave: %s, "
"interleave bits selector: 0x%x\n",
- (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
+ str_enabled_disabled(dct_interleave_enabled(pvt)),
dct_sel_interleave_addr(pvt));
}
@@ -2941,13 +2956,13 @@ static void dct_read_mc_regs(struct amd64_pvt *pvt)
* Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
* those are Read-As-Zero.
*/
- rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
+ rdmsrq(MSR_K8_TOP_MEM1, pvt->top_mem);
edac_dbg(0, " TOP_MEM: 0x%016llx\n", pvt->top_mem);
/* Check first whether TOP_MEM2 is enabled: */
- rdmsrl(MSR_AMD64_SYSCFG, msr_val);
+ rdmsrq(MSR_AMD64_SYSCFG, msr_val);
if (msr_val & BIT(21)) {
- rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
+ rdmsrq(MSR_K8_TOP_MEM2, pvt->top_mem2);
edac_dbg(0, " TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
} else {
edac_dbg(0, " TOP_MEM2 disabled\n");
@@ -3207,8 +3222,7 @@ static bool nb_mce_bank_enabled_on_node(u16 nid)
nbe = reg->l & MSR_MCGCTL_NBE;
edac_dbg(0, "core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
- cpu, reg->q,
- (nbe ? "enabled" : "disabled"));
+ cpu, reg->q, str_enabled_disabled(nbe));
if (!nbe)
goto out;
@@ -3352,46 +3366,31 @@ static bool dct_ecc_enabled(struct amd64_pvt *pvt)
edac_dbg(0, "NB MCE bank disabled, set MSR 0x%08x[4] on node %d to enable.\n",
MSR_IA32_MCG_CTL, nid);
- edac_dbg(3, "Node %d: DRAM ECC %s.\n", nid, (ecc_en ? "enabled" : "disabled"));
+ edac_dbg(3, "Node %d: DRAM ECC %s.\n", nid, str_enabled_disabled(ecc_en));
- if (!ecc_en || !nb_mce_en)
- return false;
- else
- return true;
+ return ecc_en && nb_mce_en;
}
static bool umc_ecc_enabled(struct amd64_pvt *pvt)
{
- u8 umc_en_mask = 0, ecc_en_mask = 0;
- u16 nid = pvt->mc_node_id;
struct amd64_umc *umc;
- u8 ecc_en = 0, i;
+ bool ecc_en = false;
+ int i;
+ /* Check whether at least one UMC is enabled: */
for_each_umc(i) {
umc = &pvt->umc[i];
- /* Only check enabled UMCs. */
- if (!(umc->sdp_ctrl & UMC_SDP_INIT))
- continue;
-
- umc_en_mask |= BIT(i);
-
- if (umc->umc_cap_hi & UMC_ECC_ENABLED)
- ecc_en_mask |= BIT(i);
+ if (umc->sdp_ctrl & UMC_SDP_INIT &&
+ umc->umc_cap_hi & UMC_ECC_ENABLED) {
+ ecc_en = true;
+ break;
+ }
}
- /* Check whether at least one UMC is enabled: */
- if (umc_en_mask)
- ecc_en = umc_en_mask == ecc_en_mask;
- else
- edac_dbg(0, "Node %d: No enabled UMCs.\n", nid);
-
- edac_dbg(3, "Node %d: DRAM ECC %s.\n", nid, (ecc_en ? "enabled" : "disabled"));
+ edac_dbg(3, "Node %d: DRAM ECC %s.\n", pvt->mc_node_id, str_enabled_disabled(ecc_en));
- if (!ecc_en)
- return false;
- else
- return true;
+ return ecc_en;
}
static inline void
@@ -3527,9 +3526,10 @@ static void gpu_get_err_info(struct mce *m, struct err_info *err)
static int gpu_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
unsigned int cs_mode, int csrow_nr)
{
- u32 addr_mask_orig = pvt->csels[umc].csmasks[csrow_nr];
+ u32 addr_mask = pvt->csels[umc].csmasks[csrow_nr];
+ u32 addr_mask_sec = pvt->csels[umc].csmasks_sec[csrow_nr];
- return __addr_mask_to_cs_size(addr_mask_orig, cs_mode, csrow_nr, csrow_nr >> 1);
+ return __addr_mask_to_cs_size(addr_mask, addr_mask_sec, cs_mode, csrow_nr, csrow_nr >> 1);
}
static void gpu_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
@@ -3894,6 +3894,7 @@ static int per_family_init(struct amd64_pvt *pvt)
break;
case 0x70 ... 0x7f:
pvt->ctl_name = "F19h_M70h";
+ pvt->max_mcs = 4;
pvt->flags.zn_regs_v2 = 1;
break;
case 0x90 ... 0x9f:
@@ -3922,6 +3923,26 @@ static int per_family_init(struct amd64_pvt *pvt)
pvt->ctl_name = "F1Ah_M40h";
pvt->flags.zn_regs_v2 = 1;
break;
+ case 0x50 ... 0x57:
+ pvt->ctl_name = "F1Ah_M50h";
+ pvt->max_mcs = 16;
+ pvt->flags.zn_regs_v2 = 1;
+ break;
+ case 0x90 ... 0x9f:
+ pvt->ctl_name = "F1Ah_M90h";
+ pvt->max_mcs = 8;
+ pvt->flags.zn_regs_v2 = 1;
+ break;
+ case 0xa0 ... 0xaf:
+ pvt->ctl_name = "F1Ah_MA0h";
+ pvt->max_mcs = 8;
+ pvt->flags.zn_regs_v2 = 1;
+ break;
+ case 0xc0 ... 0xc7:
+ pvt->ctl_name = "F1Ah_MC0h";
+ pvt->max_mcs = 16;
+ pvt->flags.zn_regs_v2 = 1;
+ break;
}
break;
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 17228d07de4c..d70b8a8d0b09 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -96,7 +96,7 @@
/* Hardware limit on ChipSelect rows per MC and processors per system */
#define NUM_CHIPSELECTS 8
#define DRAM_RANGES 8
-#define NUM_CONTROLLERS 12
+#define NUM_CONTROLLERS 16
#define ON true
#define OFF false
diff --git a/drivers/edac/amd8111_edac.c b/drivers/edac/amd8111_edac.c
deleted file mode 100644
index a6d3013d5823..000000000000
--- a/drivers/edac/amd8111_edac.c
+++ /dev/null
@@ -1,596 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * amd8111_edac.c, AMD8111 Hyper Transport chip EDAC kernel module
- *
- * Copyright (c) 2008 Wind River Systems, Inc.
- *
- * Authors: Cao Qingtao <qingtao.cao@windriver.com>
- * Benjamin Walsh <benjamin.walsh@windriver.com>
- * Hu Yongqi <yongqi.hu@windriver.com>
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/bitops.h>
-#include <linux/edac.h>
-#include <linux/pci_ids.h>
-#include <asm/io.h>
-
-#include "edac_module.h"
-#include "amd8111_edac.h"
-
-#define AMD8111_EDAC_REVISION " Ver: 1.0.0"
-#define AMD8111_EDAC_MOD_STR "amd8111_edac"
-
-#define PCI_DEVICE_ID_AMD_8111_PCI 0x7460
-
-enum amd8111_edac_devs {
- LPC_BRIDGE = 0,
-};
-
-enum amd8111_edac_pcis {
- PCI_BRIDGE = 0,
-};
-
-/* Wrapper functions for accessing PCI configuration space */
-static int edac_pci_read_dword(struct pci_dev *dev, int reg, u32 *val32)
-{
- int ret;
-
- ret = pci_read_config_dword(dev, reg, val32);
- if (ret != 0)
- printk(KERN_ERR AMD8111_EDAC_MOD_STR
- " PCI Access Read Error at 0x%x\n", reg);
-
- return ret;
-}
-
-static void edac_pci_read_byte(struct pci_dev *dev, int reg, u8 *val8)
-{
- int ret;
-
- ret = pci_read_config_byte(dev, reg, val8);
- if (ret != 0)
- printk(KERN_ERR AMD8111_EDAC_MOD_STR
- " PCI Access Read Error at 0x%x\n", reg);
-}
-
-static void edac_pci_write_dword(struct pci_dev *dev, int reg, u32 val32)
-{
- int ret;
-
- ret = pci_write_config_dword(dev, reg, val32);
- if (ret != 0)
- printk(KERN_ERR AMD8111_EDAC_MOD_STR
- " PCI Access Write Error at 0x%x\n", reg);
-}
-
-static void edac_pci_write_byte(struct pci_dev *dev, int reg, u8 val8)
-{
- int ret;
-
- ret = pci_write_config_byte(dev, reg, val8);
- if (ret != 0)
- printk(KERN_ERR AMD8111_EDAC_MOD_STR
- " PCI Access Write Error at 0x%x\n", reg);
-}
-
-/*
- * device-specific methods for amd8111 PCI Bridge Controller
- *
- * Error Reporting and Handling for amd8111 chipset could be found
- * in its datasheet 3.1.2 section, P37
- */
-static void amd8111_pci_bridge_init(struct amd8111_pci_info *pci_info)
-{
- u32 val32;
- struct pci_dev *dev = pci_info->dev;
-
- /* First clear error detection flags on the host interface */
-
- /* Clear SSE/SMA/STA flags in the global status register*/
- edac_pci_read_dword(dev, REG_PCI_STSCMD, &val32);
- if (val32 & PCI_STSCMD_CLEAR_MASK)
- edac_pci_write_dword(dev, REG_PCI_STSCMD, val32);
-
- /* Clear CRC and Link Fail flags in HT Link Control reg */
- edac_pci_read_dword(dev, REG_HT_LINK, &val32);
- if (val32 & HT_LINK_CLEAR_MASK)
- edac_pci_write_dword(dev, REG_HT_LINK, val32);
-
- /* Second clear all fault on the secondary interface */
-
- /* Clear error flags in the memory-base limit reg. */
- edac_pci_read_dword(dev, REG_MEM_LIM, &val32);
- if (val32 & MEM_LIMIT_CLEAR_MASK)
- edac_pci_write_dword(dev, REG_MEM_LIM, val32);
-
- /* Clear Discard Timer Expired flag in Interrupt/Bridge Control reg */
- edac_pci_read_dword(dev, REG_PCI_INTBRG_CTRL, &val32);
- if (val32 & PCI_INTBRG_CTRL_CLEAR_MASK)
- edac_pci_write_dword(dev, REG_PCI_INTBRG_CTRL, val32);
-
- /* Last enable error detections */
- if (edac_op_state == EDAC_OPSTATE_POLL) {
- /* Enable System Error reporting in global status register */
- edac_pci_read_dword(dev, REG_PCI_STSCMD, &val32);
- val32 |= PCI_STSCMD_SERREN;
- edac_pci_write_dword(dev, REG_PCI_STSCMD, val32);
-
- /* Enable CRC Sync flood packets to HyperTransport Link */
- edac_pci_read_dword(dev, REG_HT_LINK, &val32);
- val32 |= HT_LINK_CRCFEN;
- edac_pci_write_dword(dev, REG_HT_LINK, val32);
-
- /* Enable SSE reporting etc in Interrupt control reg */
- edac_pci_read_dword(dev, REG_PCI_INTBRG_CTRL, &val32);
- val32 |= PCI_INTBRG_CTRL_POLL_MASK;
- edac_pci_write_dword(dev, REG_PCI_INTBRG_CTRL, val32);
- }
-}
-
-static void amd8111_pci_bridge_exit(struct amd8111_pci_info *pci_info)
-{
- u32 val32;
- struct pci_dev *dev = pci_info->dev;
-
- if (edac_op_state == EDAC_OPSTATE_POLL) {
- /* Disable System Error reporting */
- edac_pci_read_dword(dev, REG_PCI_STSCMD, &val32);
- val32 &= ~PCI_STSCMD_SERREN;
- edac_pci_write_dword(dev, REG_PCI_STSCMD, val32);
-
- /* Disable CRC flood packets */
- edac_pci_read_dword(dev, REG_HT_LINK, &val32);
- val32 &= ~HT_LINK_CRCFEN;
- edac_pci_write_dword(dev, REG_HT_LINK, val32);
-
- /* Disable DTSERREN/MARSP/SERREN in Interrupt Control reg */
- edac_pci_read_dword(dev, REG_PCI_INTBRG_CTRL, &val32);
- val32 &= ~PCI_INTBRG_CTRL_POLL_MASK;
- edac_pci_write_dword(dev, REG_PCI_INTBRG_CTRL, val32);
- }
-}
-
-static void amd8111_pci_bridge_check(struct edac_pci_ctl_info *edac_dev)
-{
- struct amd8111_pci_info *pci_info = edac_dev->pvt_info;
- struct pci_dev *dev = pci_info->dev;
- u32 val32;
-
- /* Check out PCI Bridge Status and Command Register */
- edac_pci_read_dword(dev, REG_PCI_STSCMD, &val32);
- if (val32 & PCI_STSCMD_CLEAR_MASK) {
- printk(KERN_INFO "Error(s) in PCI bridge status and command"
- "register on device %s\n", pci_info->ctl_name);
- printk(KERN_INFO "SSE: %d, RMA: %d, RTA: %d\n",
- (val32 & PCI_STSCMD_SSE) != 0,
- (val32 & PCI_STSCMD_RMA) != 0,
- (val32 & PCI_STSCMD_RTA) != 0);
-
- val32 |= PCI_STSCMD_CLEAR_MASK;
- edac_pci_write_dword(dev, REG_PCI_STSCMD, val32);
-
- edac_pci_handle_npe(edac_dev, edac_dev->ctl_name);
- }
-
- /* Check out HyperTransport Link Control Register */
- edac_pci_read_dword(dev, REG_HT_LINK, &val32);
- if (val32 & HT_LINK_LKFAIL) {
- printk(KERN_INFO "Error(s) in hypertransport link control"
- "register on device %s\n", pci_info->ctl_name);
- printk(KERN_INFO "LKFAIL: %d\n",
- (val32 & HT_LINK_LKFAIL) != 0);
-
- val32 |= HT_LINK_LKFAIL;
- edac_pci_write_dword(dev, REG_HT_LINK, val32);
-
- edac_pci_handle_npe(edac_dev, edac_dev->ctl_name);
- }
-
- /* Check out PCI Interrupt and Bridge Control Register */
- edac_pci_read_dword(dev, REG_PCI_INTBRG_CTRL, &val32);
- if (val32 & PCI_INTBRG_CTRL_DTSTAT) {
- printk(KERN_INFO "Error(s) in PCI interrupt and bridge control"
- "register on device %s\n", pci_info->ctl_name);
- printk(KERN_INFO "DTSTAT: %d\n",
- (val32 & PCI_INTBRG_CTRL_DTSTAT) != 0);
-
- val32 |= PCI_INTBRG_CTRL_DTSTAT;
- edac_pci_write_dword(dev, REG_PCI_INTBRG_CTRL, val32);
-
- edac_pci_handle_npe(edac_dev, edac_dev->ctl_name);
- }
-
- /* Check out PCI Bridge Memory Base-Limit Register */
- edac_pci_read_dword(dev, REG_MEM_LIM, &val32);
- if (val32 & MEM_LIMIT_CLEAR_MASK) {
- printk(KERN_INFO
- "Error(s) in mem limit register on %s device\n",
- pci_info->ctl_name);
- printk(KERN_INFO "DPE: %d, RSE: %d, RMA: %d\n"
- "RTA: %d, STA: %d, MDPE: %d\n",
- (val32 & MEM_LIMIT_DPE) != 0,
- (val32 & MEM_LIMIT_RSE) != 0,
- (val32 & MEM_LIMIT_RMA) != 0,
- (val32 & MEM_LIMIT_RTA) != 0,
- (val32 & MEM_LIMIT_STA) != 0,
- (val32 & MEM_LIMIT_MDPE) != 0);
-
- val32 |= MEM_LIMIT_CLEAR_MASK;
- edac_pci_write_dword(dev, REG_MEM_LIM, val32);
-
- edac_pci_handle_npe(edac_dev, edac_dev->ctl_name);
- }
-}
-
-static struct resource *legacy_io_res;
-static int at_compat_reg_broken;
-#define LEGACY_NR_PORTS 1
-
-/* device-specific methods for amd8111 LPC Bridge device */
-static void amd8111_lpc_bridge_init(struct amd8111_dev_info *dev_info)
-{
- u8 val8;
- struct pci_dev *dev = dev_info->dev;
-
- /* First clear REG_AT_COMPAT[SERR, IOCHK] if necessary */
- legacy_io_res = request_region(REG_AT_COMPAT, LEGACY_NR_PORTS,
- AMD8111_EDAC_MOD_STR);
- if (!legacy_io_res)
- printk(KERN_INFO "%s: failed to request legacy I/O region "
- "start %d, len %d\n", __func__,
- REG_AT_COMPAT, LEGACY_NR_PORTS);
- else {
- val8 = __do_inb(REG_AT_COMPAT);
- if (val8 == 0xff) { /* buggy port */
- printk(KERN_INFO "%s: port %d is buggy, not supported"
- " by hardware?\n", __func__, REG_AT_COMPAT);
- at_compat_reg_broken = 1;
- release_region(REG_AT_COMPAT, LEGACY_NR_PORTS);
- legacy_io_res = NULL;
- } else {
- u8 out8 = 0;
- if (val8 & AT_COMPAT_SERR)
- out8 = AT_COMPAT_CLRSERR;
- if (val8 & AT_COMPAT_IOCHK)
- out8 |= AT_COMPAT_CLRIOCHK;
- if (out8 > 0)
- __do_outb(out8, REG_AT_COMPAT);
- }
- }
-
- /* Second clear error flags on LPC bridge */
- edac_pci_read_byte(dev, REG_IO_CTRL_1, &val8);
- if (val8 & IO_CTRL_1_CLEAR_MASK)
- edac_pci_write_byte(dev, REG_IO_CTRL_1, val8);
-}
-
-static void amd8111_lpc_bridge_exit(struct amd8111_dev_info *dev_info)
-{
- if (legacy_io_res)
- release_region(REG_AT_COMPAT, LEGACY_NR_PORTS);
-}
-
-static void amd8111_lpc_bridge_check(struct edac_device_ctl_info *edac_dev)
-{
- struct amd8111_dev_info *dev_info = edac_dev->pvt_info;
- struct pci_dev *dev = dev_info->dev;
- u8 val8;
-
- edac_pci_read_byte(dev, REG_IO_CTRL_1, &val8);
- if (val8 & IO_CTRL_1_CLEAR_MASK) {
- printk(KERN_INFO
- "Error(s) in IO control register on %s device\n",
- dev_info->ctl_name);
- printk(KERN_INFO "LPC ERR: %d, PW2LPC: %d\n",
- (val8 & IO_CTRL_1_LPC_ERR) != 0,
- (val8 & IO_CTRL_1_PW2LPC) != 0);
-
- val8 |= IO_CTRL_1_CLEAR_MASK;
- edac_pci_write_byte(dev, REG_IO_CTRL_1, val8);
-
- edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
- }
-
- if (at_compat_reg_broken == 0) {
- u8 out8 = 0;
- val8 = __do_inb(REG_AT_COMPAT);
- if (val8 & AT_COMPAT_SERR)
- out8 = AT_COMPAT_CLRSERR;
- if (val8 & AT_COMPAT_IOCHK)
- out8 |= AT_COMPAT_CLRIOCHK;
- if (out8 > 0) {
- __do_outb(out8, REG_AT_COMPAT);
- edac_device_handle_ue(edac_dev, 0, 0,
- edac_dev->ctl_name);
- }
- }
-}
-
-/* General devices represented by edac_device_ctl_info */
-static struct amd8111_dev_info amd8111_devices[] = {
- [LPC_BRIDGE] = {
- .err_dev = PCI_DEVICE_ID_AMD_8111_LPC,
- .ctl_name = "lpc",
- .init = amd8111_lpc_bridge_init,
- .exit = amd8111_lpc_bridge_exit,
- .check = amd8111_lpc_bridge_check,
- },
- {0},
-};
-
-/* PCI controllers represented by edac_pci_ctl_info */
-static struct amd8111_pci_info amd8111_pcis[] = {
- [PCI_BRIDGE] = {
- .err_dev = PCI_DEVICE_ID_AMD_8111_PCI,
- .ctl_name = "AMD8111_PCI_Controller",
- .init = amd8111_pci_bridge_init,
- .exit = amd8111_pci_bridge_exit,
- .check = amd8111_pci_bridge_check,
- },
- {0},
-};
-
-static int amd8111_dev_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- struct amd8111_dev_info *dev_info = &amd8111_devices[id->driver_data];
- int ret = -ENODEV;
-
- dev_info->dev = pci_get_device(PCI_VENDOR_ID_AMD,
- dev_info->err_dev, NULL);
-
- if (!dev_info->dev) {
- printk(KERN_ERR "EDAC device not found:"
- "vendor %x, device %x, name %s\n",
- PCI_VENDOR_ID_AMD, dev_info->err_dev,
- dev_info->ctl_name);
- goto err;
- }
-
- if (pci_enable_device(dev_info->dev)) {
- printk(KERN_ERR "failed to enable:"
- "vendor %x, device %x, name %s\n",
- PCI_VENDOR_ID_AMD, dev_info->err_dev,
- dev_info->ctl_name);
- goto err_dev_put;
- }
-
- /*
- * we do not allocate extra private structure for
- * edac_device_ctl_info, but make use of existing
- * one instead.
- */
- dev_info->edac_idx = edac_device_alloc_index();
- dev_info->edac_dev =
- edac_device_alloc_ctl_info(0, dev_info->ctl_name, 1,
- NULL, 0, 0, dev_info->edac_idx);
- if (!dev_info->edac_dev) {
- ret = -ENOMEM;
- goto err_dev_put;
- }
-
- dev_info->edac_dev->pvt_info = dev_info;
- dev_info->edac_dev->dev = &dev_info->dev->dev;
- dev_info->edac_dev->mod_name = AMD8111_EDAC_MOD_STR;
- dev_info->edac_dev->ctl_name = dev_info->ctl_name;
- dev_info->edac_dev->dev_name = dev_name(&dev_info->dev->dev);
-
- if (edac_op_state == EDAC_OPSTATE_POLL)
- dev_info->edac_dev->edac_check = dev_info->check;
-
- if (dev_info->init)
- dev_info->init(dev_info);
-
- if (edac_device_add_device(dev_info->edac_dev) > 0) {
- printk(KERN_ERR "failed to add edac_dev for %s\n",
- dev_info->ctl_name);
- goto err_edac_free_ctl;
- }
-
- printk(KERN_INFO "added one edac_dev on AMD8111 "
- "vendor %x, device %x, name %s\n",
- PCI_VENDOR_ID_AMD, dev_info->err_dev,
- dev_info->ctl_name);
-
- return 0;
-
-err_edac_free_ctl:
- edac_device_free_ctl_info(dev_info->edac_dev);
-err_dev_put:
- pci_dev_put(dev_info->dev);
-err:
- return ret;
-}
-
-static void amd8111_dev_remove(struct pci_dev *dev)
-{
- struct amd8111_dev_info *dev_info;
-
- for (dev_info = amd8111_devices; dev_info->err_dev; dev_info++)
- if (dev_info->dev->device == dev->device)
- break;
-
- if (!dev_info->err_dev) /* should never happen */
- return;
-
- if (dev_info->edac_dev) {
- edac_device_del_device(dev_info->edac_dev->dev);
- edac_device_free_ctl_info(dev_info->edac_dev);
- }
-
- if (dev_info->exit)
- dev_info->exit(dev_info);
-
- pci_dev_put(dev_info->dev);
-}
-
-static int amd8111_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- struct amd8111_pci_info *pci_info = &amd8111_pcis[id->driver_data];
- int ret = -ENODEV;
-
- pci_info->dev = pci_get_device(PCI_VENDOR_ID_AMD,
- pci_info->err_dev, NULL);
-
- if (!pci_info->dev) {
- printk(KERN_ERR "EDAC device not found:"
- "vendor %x, device %x, name %s\n",
- PCI_VENDOR_ID_AMD, pci_info->err_dev,
- pci_info->ctl_name);
- goto err;
- }
-
- if (pci_enable_device(pci_info->dev)) {
- printk(KERN_ERR "failed to enable:"
- "vendor %x, device %x, name %s\n",
- PCI_VENDOR_ID_AMD, pci_info->err_dev,
- pci_info->ctl_name);
- goto err_dev_put;
- }
-
- /*
- * we do not allocate extra private structure for
- * edac_pci_ctl_info, but make use of existing
- * one instead.
- */
- pci_info->edac_idx = edac_pci_alloc_index();
- pci_info->edac_dev = edac_pci_alloc_ctl_info(0, pci_info->ctl_name);
- if (!pci_info->edac_dev) {
- ret = -ENOMEM;
- goto err_dev_put;
- }
-
- pci_info->edac_dev->pvt_info = pci_info;
- pci_info->edac_dev->dev = &pci_info->dev->dev;
- pci_info->edac_dev->mod_name = AMD8111_EDAC_MOD_STR;
- pci_info->edac_dev->ctl_name = pci_info->ctl_name;
- pci_info->edac_dev->dev_name = dev_name(&pci_info->dev->dev);
-
- if (edac_op_state == EDAC_OPSTATE_POLL)
- pci_info->edac_dev->edac_check = pci_info->check;
-
- if (pci_info->init)
- pci_info->init(pci_info);
-
- if (edac_pci_add_device(pci_info->edac_dev, pci_info->edac_idx) > 0) {
- printk(KERN_ERR "failed to add edac_pci for %s\n",
- pci_info->ctl_name);
- goto err_edac_free_ctl;
- }
-
- printk(KERN_INFO "added one edac_pci on AMD8111 "
- "vendor %x, device %x, name %s\n",
- PCI_VENDOR_ID_AMD, pci_info->err_dev,
- pci_info->ctl_name);
-
- return 0;
-
-err_edac_free_ctl:
- edac_pci_free_ctl_info(pci_info->edac_dev);
-err_dev_put:
- pci_dev_put(pci_info->dev);
-err:
- return ret;
-}
-
-static void amd8111_pci_remove(struct pci_dev *dev)
-{
- struct amd8111_pci_info *pci_info;
-
- for (pci_info = amd8111_pcis; pci_info->err_dev; pci_info++)
- if (pci_info->dev->device == dev->device)
- break;
-
- if (!pci_info->err_dev) /* should never happen */
- return;
-
- if (pci_info->edac_dev) {
- edac_pci_del_device(pci_info->edac_dev->dev);
- edac_pci_free_ctl_info(pci_info->edac_dev);
- }
-
- if (pci_info->exit)
- pci_info->exit(pci_info);
-
- pci_dev_put(pci_info->dev);
-}
-
-/* PCI Device ID talbe for general EDAC device */
-static const struct pci_device_id amd8111_edac_dev_tbl[] = {
- {
- PCI_VEND_DEV(AMD, 8111_LPC),
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .class = 0,
- .class_mask = 0,
- .driver_data = LPC_BRIDGE,
- },
- {
- 0,
- } /* table is NULL-terminated */
-};
-MODULE_DEVICE_TABLE(pci, amd8111_edac_dev_tbl);
-
-static struct pci_driver amd8111_edac_dev_driver = {
- .name = "AMD8111_EDAC_DEV",
- .probe = amd8111_dev_probe,
- .remove = amd8111_dev_remove,
- .id_table = amd8111_edac_dev_tbl,
-};
-
-/* PCI Device ID table for EDAC PCI controller */
-static const struct pci_device_id amd8111_edac_pci_tbl[] = {
- {
- PCI_VEND_DEV(AMD, 8111_PCI),
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .class = 0,
- .class_mask = 0,
- .driver_data = PCI_BRIDGE,
- },
- {
- 0,
- } /* table is NULL-terminated */
-};
-MODULE_DEVICE_TABLE(pci, amd8111_edac_pci_tbl);
-
-static struct pci_driver amd8111_edac_pci_driver = {
- .name = "AMD8111_EDAC_PCI",
- .probe = amd8111_pci_probe,
- .remove = amd8111_pci_remove,
- .id_table = amd8111_edac_pci_tbl,
-};
-
-static int __init amd8111_edac_init(void)
-{
- int val;
-
- printk(KERN_INFO "AMD8111 EDAC driver " AMD8111_EDAC_REVISION "\n");
- printk(KERN_INFO "\t(c) 2008 Wind River Systems, Inc.\n");
-
- /* Only POLL mode supported so far */
- edac_op_state = EDAC_OPSTATE_POLL;
-
- val = pci_register_driver(&amd8111_edac_dev_driver);
- val |= pci_register_driver(&amd8111_edac_pci_driver);
-
- return val;
-}
-
-static void __exit amd8111_edac_exit(void)
-{
- pci_unregister_driver(&amd8111_edac_pci_driver);
- pci_unregister_driver(&amd8111_edac_dev_driver);
-}
-
-
-module_init(amd8111_edac_init);
-module_exit(amd8111_edac_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Cao Qingtao <qingtao.cao@windriver.com>");
-MODULE_DESCRIPTION("AMD8111 HyperTransport I/O Hub EDAC kernel module");
diff --git a/drivers/edac/amd8111_edac.h b/drivers/edac/amd8111_edac.h
deleted file mode 100644
index 200cab1b3e42..000000000000
--- a/drivers/edac/amd8111_edac.h
+++ /dev/null
@@ -1,118 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * amd8111_edac.h, EDAC defs for AMD8111 hypertransport chip
- *
- * Copyright (c) 2008 Wind River Systems, Inc.
- *
- * Authors: Cao Qingtao <qingtao.cao@windriver.com>
- * Benjamin Walsh <benjamin.walsh@windriver.com>
- * Hu Yongqi <yongqi.hu@windriver.com>
- */
-
-#ifndef _AMD8111_EDAC_H_
-#define _AMD8111_EDAC_H_
-
-/************************************************************
- * PCI Bridge Status and Command Register, DevA:0x04
- ************************************************************/
-#define REG_PCI_STSCMD 0x04
-enum pci_stscmd_bits {
- PCI_STSCMD_SSE = BIT(30),
- PCI_STSCMD_RMA = BIT(29),
- PCI_STSCMD_RTA = BIT(28),
- PCI_STSCMD_SERREN = BIT(8),
- PCI_STSCMD_CLEAR_MASK = (PCI_STSCMD_SSE |
- PCI_STSCMD_RMA |
- PCI_STSCMD_RTA)
-};
-
-/************************************************************
- * PCI Bridge Memory Base-Limit Register, DevA:0x1c
- ************************************************************/
-#define REG_MEM_LIM 0x1c
-enum mem_limit_bits {
- MEM_LIMIT_DPE = BIT(31),
- MEM_LIMIT_RSE = BIT(30),
- MEM_LIMIT_RMA = BIT(29),
- MEM_LIMIT_RTA = BIT(28),
- MEM_LIMIT_STA = BIT(27),
- MEM_LIMIT_MDPE = BIT(24),
- MEM_LIMIT_CLEAR_MASK = (MEM_LIMIT_DPE |
- MEM_LIMIT_RSE |
- MEM_LIMIT_RMA |
- MEM_LIMIT_RTA |
- MEM_LIMIT_STA |
- MEM_LIMIT_MDPE)
-};
-
-/************************************************************
- * HyperTransport Link Control Register, DevA:0xc4
- ************************************************************/
-#define REG_HT_LINK 0xc4
-enum ht_link_bits {
- HT_LINK_LKFAIL = BIT(4),
- HT_LINK_CRCFEN = BIT(1),
- HT_LINK_CLEAR_MASK = (HT_LINK_LKFAIL)
-};
-
-/************************************************************
- * PCI Bridge Interrupt and Bridge Control, DevA:0x3c
- ************************************************************/
-#define REG_PCI_INTBRG_CTRL 0x3c
-enum pci_intbrg_ctrl_bits {
- PCI_INTBRG_CTRL_DTSERREN = BIT(27),
- PCI_INTBRG_CTRL_DTSTAT = BIT(26),
- PCI_INTBRG_CTRL_MARSP = BIT(21),
- PCI_INTBRG_CTRL_SERREN = BIT(17),
- PCI_INTBRG_CTRL_PEREN = BIT(16),
- PCI_INTBRG_CTRL_CLEAR_MASK = (PCI_INTBRG_CTRL_DTSTAT),
- PCI_INTBRG_CTRL_POLL_MASK = (PCI_INTBRG_CTRL_DTSERREN |
- PCI_INTBRG_CTRL_MARSP |
- PCI_INTBRG_CTRL_SERREN)
-};
-
-/************************************************************
- * I/O Control 1 Register, DevB:0x40
- ************************************************************/
-#define REG_IO_CTRL_1 0x40
-enum io_ctrl_1_bits {
- IO_CTRL_1_NMIONERR = BIT(7),
- IO_CTRL_1_LPC_ERR = BIT(6),
- IO_CTRL_1_PW2LPC = BIT(1),
- IO_CTRL_1_CLEAR_MASK = (IO_CTRL_1_LPC_ERR | IO_CTRL_1_PW2LPC)
-};
-
-/************************************************************
- * Legacy I/O Space Registers
- ************************************************************/
-#define REG_AT_COMPAT 0x61
-enum at_compat_bits {
- AT_COMPAT_SERR = BIT(7),
- AT_COMPAT_IOCHK = BIT(6),
- AT_COMPAT_CLRIOCHK = BIT(3),
- AT_COMPAT_CLRSERR = BIT(2),
-};
-
-struct amd8111_dev_info {
- u16 err_dev; /* PCI Device ID */
- struct pci_dev *dev;
- int edac_idx; /* device index */
- char *ctl_name;
- struct edac_device_ctl_info *edac_dev;
- void (*init)(struct amd8111_dev_info *dev_info);
- void (*exit)(struct amd8111_dev_info *dev_info);
- void (*check)(struct edac_device_ctl_info *edac_dev);
-};
-
-struct amd8111_pci_info {
- u16 err_dev; /* PCI Device ID */
- struct pci_dev *dev;
- int edac_idx; /* pci index */
- const char *ctl_name;
- struct edac_pci_ctl_info *edac_dev;
- void (*init)(struct amd8111_pci_info *dev_info);
- void (*exit)(struct amd8111_pci_info *dev_info);
- void (*check)(struct edac_pci_ctl_info *edac_dev);
-};
-
-#endif /* _AMD8111_EDAC_H_ */
diff --git a/drivers/edac/amd8131_edac.c b/drivers/edac/amd8131_edac.c
deleted file mode 100644
index 28610ba514f4..000000000000
--- a/drivers/edac/amd8131_edac.c
+++ /dev/null
@@ -1,358 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * amd8131_edac.c, AMD8131 hypertransport chip EDAC kernel module
- *
- * Copyright (c) 2008 Wind River Systems, Inc.
- *
- * Authors: Cao Qingtao <qingtao.cao@windriver.com>
- * Benjamin Walsh <benjamin.walsh@windriver.com>
- * Hu Yongqi <yongqi.hu@windriver.com>
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/bitops.h>
-#include <linux/edac.h>
-#include <linux/pci_ids.h>
-
-#include "edac_module.h"
-#include "amd8131_edac.h"
-
-#define AMD8131_EDAC_REVISION " Ver: 1.0.0"
-#define AMD8131_EDAC_MOD_STR "amd8131_edac"
-
-/* Wrapper functions for accessing PCI configuration space */
-static void edac_pci_read_dword(struct pci_dev *dev, int reg, u32 *val32)
-{
- int ret;
-
- ret = pci_read_config_dword(dev, reg, val32);
- if (ret != 0)
- printk(KERN_ERR AMD8131_EDAC_MOD_STR
- " PCI Access Read Error at 0x%x\n", reg);
-}
-
-static void edac_pci_write_dword(struct pci_dev *dev, int reg, u32 val32)
-{
- int ret;
-
- ret = pci_write_config_dword(dev, reg, val32);
- if (ret != 0)
- printk(KERN_ERR AMD8131_EDAC_MOD_STR
- " PCI Access Write Error at 0x%x\n", reg);
-}
-
-/* Support up to two AMD8131 chipsets on a platform */
-static struct amd8131_dev_info amd8131_devices[] = {
- {
- .inst = NORTH_A,
- .devfn = DEVFN_PCIX_BRIDGE_NORTH_A,
- .ctl_name = "AMD8131_PCIX_NORTH_A",
- },
- {
- .inst = NORTH_B,
- .devfn = DEVFN_PCIX_BRIDGE_NORTH_B,
- .ctl_name = "AMD8131_PCIX_NORTH_B",
- },
- {
- .inst = SOUTH_A,
- .devfn = DEVFN_PCIX_BRIDGE_SOUTH_A,
- .ctl_name = "AMD8131_PCIX_SOUTH_A",
- },
- {
- .inst = SOUTH_B,
- .devfn = DEVFN_PCIX_BRIDGE_SOUTH_B,
- .ctl_name = "AMD8131_PCIX_SOUTH_B",
- },
- {.inst = NO_BRIDGE,},
-};
-
-static void amd8131_pcix_init(struct amd8131_dev_info *dev_info)
-{
- u32 val32;
- struct pci_dev *dev = dev_info->dev;
-
- /* First clear error detection flags */
- edac_pci_read_dword(dev, REG_MEM_LIM, &val32);
- if (val32 & MEM_LIMIT_MASK)
- edac_pci_write_dword(dev, REG_MEM_LIM, val32);
-
- /* Clear Discard Timer Timedout flag */
- edac_pci_read_dword(dev, REG_INT_CTLR, &val32);
- if (val32 & INT_CTLR_DTS)
- edac_pci_write_dword(dev, REG_INT_CTLR, val32);
-
- /* Clear CRC Error flag on link side A */
- edac_pci_read_dword(dev, REG_LNK_CTRL_A, &val32);
- if (val32 & LNK_CTRL_CRCERR_A)
- edac_pci_write_dword(dev, REG_LNK_CTRL_A, val32);
-
- /* Clear CRC Error flag on link side B */
- edac_pci_read_dword(dev, REG_LNK_CTRL_B, &val32);
- if (val32 & LNK_CTRL_CRCERR_B)
- edac_pci_write_dword(dev, REG_LNK_CTRL_B, val32);
-
- /*
- * Then enable all error detections.
- *
- * Setup Discard Timer Sync Flood Enable,
- * System Error Enable and Parity Error Enable.
- */
- edac_pci_read_dword(dev, REG_INT_CTLR, &val32);
- val32 |= INT_CTLR_PERR | INT_CTLR_SERR | INT_CTLR_DTSE;
- edac_pci_write_dword(dev, REG_INT_CTLR, val32);
-
- /* Enable overall SERR Error detection */
- edac_pci_read_dword(dev, REG_STS_CMD, &val32);
- val32 |= STS_CMD_SERREN;
- edac_pci_write_dword(dev, REG_STS_CMD, val32);
-
- /* Setup CRC Flood Enable for link side A */
- edac_pci_read_dword(dev, REG_LNK_CTRL_A, &val32);
- val32 |= LNK_CTRL_CRCFEN;
- edac_pci_write_dword(dev, REG_LNK_CTRL_A, val32);
-
- /* Setup CRC Flood Enable for link side B */
- edac_pci_read_dword(dev, REG_LNK_CTRL_B, &val32);
- val32 |= LNK_CTRL_CRCFEN;
- edac_pci_write_dword(dev, REG_LNK_CTRL_B, val32);
-}
-
-static void amd8131_pcix_exit(struct amd8131_dev_info *dev_info)
-{
- u32 val32;
- struct pci_dev *dev = dev_info->dev;
-
- /* Disable SERR, PERR and DTSE Error detection */
- edac_pci_read_dword(dev, REG_INT_CTLR, &val32);
- val32 &= ~(INT_CTLR_PERR | INT_CTLR_SERR | INT_CTLR_DTSE);
- edac_pci_write_dword(dev, REG_INT_CTLR, val32);
-
- /* Disable overall System Error detection */
- edac_pci_read_dword(dev, REG_STS_CMD, &val32);
- val32 &= ~STS_CMD_SERREN;
- edac_pci_write_dword(dev, REG_STS_CMD, val32);
-
- /* Disable CRC Sync Flood on link side A */
- edac_pci_read_dword(dev, REG_LNK_CTRL_A, &val32);
- val32 &= ~LNK_CTRL_CRCFEN;
- edac_pci_write_dword(dev, REG_LNK_CTRL_A, val32);
-
- /* Disable CRC Sync Flood on link side B */
- edac_pci_read_dword(dev, REG_LNK_CTRL_B, &val32);
- val32 &= ~LNK_CTRL_CRCFEN;
- edac_pci_write_dword(dev, REG_LNK_CTRL_B, val32);
-}
-
-static void amd8131_pcix_check(struct edac_pci_ctl_info *edac_dev)
-{
- struct amd8131_dev_info *dev_info = edac_dev->pvt_info;
- struct pci_dev *dev = dev_info->dev;
- u32 val32;
-
- /* Check PCI-X Bridge Memory Base-Limit Register for errors */
- edac_pci_read_dword(dev, REG_MEM_LIM, &val32);
- if (val32 & MEM_LIMIT_MASK) {
- printk(KERN_INFO "Error(s) in mem limit register "
- "on %s bridge\n", dev_info->ctl_name);
- printk(KERN_INFO "DPE: %d, RSE: %d, RMA: %d\n"
- "RTA: %d, STA: %d, MDPE: %d\n",
- val32 & MEM_LIMIT_DPE,
- val32 & MEM_LIMIT_RSE,
- val32 & MEM_LIMIT_RMA,
- val32 & MEM_LIMIT_RTA,
- val32 & MEM_LIMIT_STA,
- val32 & MEM_LIMIT_MDPE);
-
- val32 |= MEM_LIMIT_MASK;
- edac_pci_write_dword(dev, REG_MEM_LIM, val32);
-
- edac_pci_handle_npe(edac_dev, edac_dev->ctl_name);
- }
-
- /* Check if Discard Timer timed out */
- edac_pci_read_dword(dev, REG_INT_CTLR, &val32);
- if (val32 & INT_CTLR_DTS) {
- printk(KERN_INFO "Error(s) in interrupt and control register "
- "on %s bridge\n", dev_info->ctl_name);
- printk(KERN_INFO "DTS: %d\n", val32 & INT_CTLR_DTS);
-
- val32 |= INT_CTLR_DTS;
- edac_pci_write_dword(dev, REG_INT_CTLR, val32);
-
- edac_pci_handle_npe(edac_dev, edac_dev->ctl_name);
- }
-
- /* Check if CRC error happens on link side A */
- edac_pci_read_dword(dev, REG_LNK_CTRL_A, &val32);
- if (val32 & LNK_CTRL_CRCERR_A) {
- printk(KERN_INFO "Error(s) in link conf and control register "
- "on %s bridge\n", dev_info->ctl_name);
- printk(KERN_INFO "CRCERR: %d\n", val32 & LNK_CTRL_CRCERR_A);
-
- val32 |= LNK_CTRL_CRCERR_A;
- edac_pci_write_dword(dev, REG_LNK_CTRL_A, val32);
-
- edac_pci_handle_npe(edac_dev, edac_dev->ctl_name);
- }
-
- /* Check if CRC error happens on link side B */
- edac_pci_read_dword(dev, REG_LNK_CTRL_B, &val32);
- if (val32 & LNK_CTRL_CRCERR_B) {
- printk(KERN_INFO "Error(s) in link conf and control register "
- "on %s bridge\n", dev_info->ctl_name);
- printk(KERN_INFO "CRCERR: %d\n", val32 & LNK_CTRL_CRCERR_B);
-
- val32 |= LNK_CTRL_CRCERR_B;
- edac_pci_write_dword(dev, REG_LNK_CTRL_B, val32);
-
- edac_pci_handle_npe(edac_dev, edac_dev->ctl_name);
- }
-}
-
-static struct amd8131_info amd8131_chipset = {
- .err_dev = PCI_DEVICE_ID_AMD_8131_APIC,
- .devices = amd8131_devices,
- .init = amd8131_pcix_init,
- .exit = amd8131_pcix_exit,
- .check = amd8131_pcix_check,
-};
-
-/*
- * There are 4 PCIX Bridges on ATCA-6101 that share the same PCI Device ID,
- * so amd8131_probe() would be called by kernel 4 times, with different
- * address of pci_dev for each of them each time.
- */
-static int amd8131_probe(struct pci_dev *dev, const struct pci_device_id *id)
-{
- struct amd8131_dev_info *dev_info;
-
- for (dev_info = amd8131_chipset.devices; dev_info->inst != NO_BRIDGE;
- dev_info++)
- if (dev_info->devfn == dev->devfn)
- break;
-
- if (dev_info->inst == NO_BRIDGE) /* should never happen */
- return -ENODEV;
-
- /*
- * We can't call pci_get_device() as we are used to do because
- * there are 4 of them but pci_dev_get() instead.
- */
- dev_info->dev = pci_dev_get(dev);
-
- if (pci_enable_device(dev_info->dev)) {
- pci_dev_put(dev_info->dev);
- printk(KERN_ERR "failed to enable:"
- "vendor %x, device %x, devfn %x, name %s\n",
- PCI_VENDOR_ID_AMD, amd8131_chipset.err_dev,
- dev_info->devfn, dev_info->ctl_name);
- return -ENODEV;
- }
-
- /*
- * we do not allocate extra private structure for
- * edac_pci_ctl_info, but make use of existing
- * one instead.
- */
- dev_info->edac_idx = edac_pci_alloc_index();
- dev_info->edac_dev = edac_pci_alloc_ctl_info(0, dev_info->ctl_name);
- if (!dev_info->edac_dev)
- return -ENOMEM;
-
- dev_info->edac_dev->pvt_info = dev_info;
- dev_info->edac_dev->dev = &dev_info->dev->dev;
- dev_info->edac_dev->mod_name = AMD8131_EDAC_MOD_STR;
- dev_info->edac_dev->ctl_name = dev_info->ctl_name;
- dev_info->edac_dev->dev_name = dev_name(&dev_info->dev->dev);
-
- if (edac_op_state == EDAC_OPSTATE_POLL)
- dev_info->edac_dev->edac_check = amd8131_chipset.check;
-
- if (amd8131_chipset.init)
- amd8131_chipset.init(dev_info);
-
- if (edac_pci_add_device(dev_info->edac_dev, dev_info->edac_idx) > 0) {
- printk(KERN_ERR "failed edac_pci_add_device() for %s\n",
- dev_info->ctl_name);
- edac_pci_free_ctl_info(dev_info->edac_dev);
- return -ENODEV;
- }
-
- printk(KERN_INFO "added one device on AMD8131 "
- "vendor %x, device %x, devfn %x, name %s\n",
- PCI_VENDOR_ID_AMD, amd8131_chipset.err_dev,
- dev_info->devfn, dev_info->ctl_name);
-
- return 0;
-}
-
-static void amd8131_remove(struct pci_dev *dev)
-{
- struct amd8131_dev_info *dev_info;
-
- for (dev_info = amd8131_chipset.devices; dev_info->inst != NO_BRIDGE;
- dev_info++)
- if (dev_info->devfn == dev->devfn)
- break;
-
- if (dev_info->inst == NO_BRIDGE) /* should never happen */
- return;
-
- if (dev_info->edac_dev) {
- edac_pci_del_device(dev_info->edac_dev->dev);
- edac_pci_free_ctl_info(dev_info->edac_dev);
- }
-
- if (amd8131_chipset.exit)
- amd8131_chipset.exit(dev_info);
-
- pci_dev_put(dev_info->dev);
-}
-
-static const struct pci_device_id amd8131_edac_pci_tbl[] = {
- {
- PCI_VEND_DEV(AMD, 8131_BRIDGE),
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .class = 0,
- .class_mask = 0,
- .driver_data = 0,
- },
- {
- 0,
- } /* table is NULL-terminated */
-};
-MODULE_DEVICE_TABLE(pci, amd8131_edac_pci_tbl);
-
-static struct pci_driver amd8131_edac_driver = {
- .name = AMD8131_EDAC_MOD_STR,
- .probe = amd8131_probe,
- .remove = amd8131_remove,
- .id_table = amd8131_edac_pci_tbl,
-};
-
-static int __init amd8131_edac_init(void)
-{
- printk(KERN_INFO "AMD8131 EDAC driver " AMD8131_EDAC_REVISION "\n");
- printk(KERN_INFO "\t(c) 2008 Wind River Systems, Inc.\n");
-
- /* Only POLL mode supported so far */
- edac_op_state = EDAC_OPSTATE_POLL;
-
- return pci_register_driver(&amd8131_edac_driver);
-}
-
-static void __exit amd8131_edac_exit(void)
-{
- pci_unregister_driver(&amd8131_edac_driver);
-}
-
-module_init(amd8131_edac_init);
-module_exit(amd8131_edac_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Cao Qingtao <qingtao.cao@windriver.com>");
-MODULE_DESCRIPTION("AMD8131 HyperTransport PCI-X Tunnel EDAC kernel module");
diff --git a/drivers/edac/amd8131_edac.h b/drivers/edac/amd8131_edac.h
deleted file mode 100644
index 5f362abdaf12..000000000000
--- a/drivers/edac/amd8131_edac.h
+++ /dev/null
@@ -1,107 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * amd8131_edac.h, EDAC defs for AMD8131 hypertransport chip
- *
- * Copyright (c) 2008 Wind River Systems, Inc.
- *
- * Authors: Cao Qingtao <qingtao.cao@windriver.com>
- * Benjamin Walsh <benjamin.walsh@windriver.com>
- * Hu Yongqi <yongqi.hu@windriver.com>
- */
-
-#ifndef _AMD8131_EDAC_H_
-#define _AMD8131_EDAC_H_
-
-#define DEVFN_PCIX_BRIDGE_NORTH_A 8
-#define DEVFN_PCIX_BRIDGE_NORTH_B 16
-#define DEVFN_PCIX_BRIDGE_SOUTH_A 24
-#define DEVFN_PCIX_BRIDGE_SOUTH_B 32
-
-/************************************************************
- * PCI-X Bridge Status and Command Register, DevA:0x04
- ************************************************************/
-#define REG_STS_CMD 0x04
-enum sts_cmd_bits {
- STS_CMD_SSE = BIT(30),
- STS_CMD_SERREN = BIT(8)
-};
-
-/************************************************************
- * PCI-X Bridge Interrupt and Bridge Control Register,
- ************************************************************/
-#define REG_INT_CTLR 0x3c
-enum int_ctlr_bits {
- INT_CTLR_DTSE = BIT(27),
- INT_CTLR_DTS = BIT(26),
- INT_CTLR_SERR = BIT(17),
- INT_CTLR_PERR = BIT(16)
-};
-
-/************************************************************
- * PCI-X Bridge Memory Base-Limit Register, DevA:0x1C
- ************************************************************/
-#define REG_MEM_LIM 0x1c
-enum mem_limit_bits {
- MEM_LIMIT_DPE = BIT(31),
- MEM_LIMIT_RSE = BIT(30),
- MEM_LIMIT_RMA = BIT(29),
- MEM_LIMIT_RTA = BIT(28),
- MEM_LIMIT_STA = BIT(27),
- MEM_LIMIT_MDPE = BIT(24),
- MEM_LIMIT_MASK = MEM_LIMIT_DPE|MEM_LIMIT_RSE|MEM_LIMIT_RMA|
- MEM_LIMIT_RTA|MEM_LIMIT_STA|MEM_LIMIT_MDPE
-};
-
-/************************************************************
- * Link Configuration And Control Register, side A
- ************************************************************/
-#define REG_LNK_CTRL_A 0xc4
-
-/************************************************************
- * Link Configuration And Control Register, side B
- ************************************************************/
-#define REG_LNK_CTRL_B 0xc8
-
-enum lnk_ctrl_bits {
- LNK_CTRL_CRCERR_A = BIT(9),
- LNK_CTRL_CRCERR_B = BIT(8),
- LNK_CTRL_CRCFEN = BIT(1)
-};
-
-enum pcix_bridge_inst {
- NORTH_A = 0,
- NORTH_B = 1,
- SOUTH_A = 2,
- SOUTH_B = 3,
- NO_BRIDGE = 4
-};
-
-struct amd8131_dev_info {
- int devfn;
- enum pcix_bridge_inst inst;
- struct pci_dev *dev;
- int edac_idx; /* pci device index */
- char *ctl_name;
- struct edac_pci_ctl_info *edac_dev;
-};
-
-/*
- * AMD8131 chipset has two pairs of PCIX Bridge and related IOAPIC
- * Controller, and ATCA-6101 has two AMD8131 chipsets, so there are
- * four PCIX Bridges on ATCA-6101 altogether.
- *
- * These PCIX Bridges share the same PCI Device ID and are all of
- * Function Zero, they could be discrimated by their pci_dev->devfn.
- * They share the same set of init/check/exit methods, and their
- * private structures are collected in the devices[] array.
- */
-struct amd8131_info {
- u16 err_dev; /* PCI Device ID for AMD8131 APIC*/
- struct amd8131_dev_info *devices;
- void (*init)(struct amd8131_dev_info *dev_info);
- void (*exit)(struct amd8131_dev_info *dev_info);
- void (*check)(struct edac_pci_ctl_info *edac_dev);
-};
-
-#endif /* _AMD8131_EDAC_H_ */
-
diff --git a/drivers/edac/armada_xp_edac.c b/drivers/edac/armada_xp_edac.c
index 589bc81f1249..d64248fcf4c0 100644
--- a/drivers/edac/armada_xp_edac.c
+++ b/drivers/edac/armada_xp_edac.c
@@ -364,7 +364,7 @@ static void axp_mc_remove(struct platform_device *pdev)
static struct platform_driver axp_mc_driver = {
.probe = axp_mc_probe,
- .remove_new = axp_mc_remove,
+ .remove = axp_mc_remove,
.driver = {
.name = "armada_xp_mc_edac",
.of_match_table = of_match_ptr(axp_mc_of_match),
@@ -579,7 +579,7 @@ static void aurora_l2_remove(struct platform_device *pdev)
static struct platform_driver aurora_l2_driver = {
.probe = aurora_l2_probe,
- .remove_new = aurora_l2_remove,
+ .remove = aurora_l2_remove,
.driver = {
.name = "aurora_l2_edac",
.of_match_table = of_match_ptr(aurora_l2_of_match),
diff --git a/drivers/edac/aspeed_edac.c b/drivers/edac/aspeed_edac.c
index 157a480eb761..dadb8acbee3d 100644
--- a/drivers/edac/aspeed_edac.c
+++ b/drivers/edac/aspeed_edac.c
@@ -387,7 +387,7 @@ static struct platform_driver aspeed_driver = {
.of_match_table = aspeed_of_match
},
.probe = aspeed_probe,
- .remove_new = aspeed_remove
+ .remove = aspeed_remove
};
module_platform_driver(aspeed_driver);
diff --git a/drivers/edac/bluefield_edac.c b/drivers/edac/bluefield_edac.c
index 5b3164560648..ae3bb7afa103 100644
--- a/drivers/edac/bluefield_edac.c
+++ b/drivers/edac/bluefield_edac.c
@@ -47,13 +47,22 @@
#define MLXBF_EDAC_MAX_DIMM_PER_MC 2
#define MLXBF_EDAC_ERROR_GRAIN 8
+#define MLXBF_WRITE_REG_32 (0x82000009)
+#define MLXBF_READ_REG_32 (0x8200000A)
+#define MLXBF_SIP_SVC_VERSION (0x8200ff03)
+
+#define MLXBF_SMCCC_ACCESS_VIOLATION (-4)
+
+#define MLXBF_SVC_REQ_MAJOR 0
+#define MLXBF_SVC_REQ_MINOR 3
+
/*
- * Request MLNX_SIP_GET_DIMM_INFO
+ * Request MLXBF_SIP_GET_DIMM_INFO
*
* Retrieve information about DIMM on a certain slot.
*
* Call register usage:
- * a0: MLNX_SIP_GET_DIMM_INFO
+ * a0: MLXBF_SIP_GET_DIMM_INFO
* a1: (Memory controller index) << 16 | (Dimm index in memory controller)
* a2-7: not used.
*
@@ -61,7 +70,7 @@
* a0: MLXBF_DIMM_INFO defined below describing the DIMM.
* a1-3: not used.
*/
-#define MLNX_SIP_GET_DIMM_INFO 0x82000008
+#define MLXBF_SIP_GET_DIMM_INFO 0x82000008
/* Format for the SMC response about the memory information */
#define MLXBF_DIMM_INFO__SIZE_GB GENMASK_ULL(15, 0)
@@ -72,9 +81,15 @@
#define MLXBF_DIMM_INFO__PACKAGE_X GENMASK_ULL(31, 24)
struct bluefield_edac_priv {
+ /* pointer to device structure */
+ struct device *dev;
int dimm_ranks[MLXBF_EDAC_MAX_DIMM_PER_MC];
void __iomem *emi_base;
int dimm_per_mc;
+ /* access to secure regs supported */
+ bool svc_sreg_support;
+ /* SMC table# for secure regs access */
+ u32 sreg_tbl;
};
static u64 smc_call1(u64 smc_op, u64 smc_arg)
@@ -86,6 +101,71 @@ static u64 smc_call1(u64 smc_op, u64 smc_arg)
return res.a0;
}
+static int secure_readl(void __iomem *addr, u32 *result, u32 sreg_tbl)
+{
+ struct arm_smccc_res res;
+ int status;
+
+ arm_smccc_smc(MLXBF_READ_REG_32, sreg_tbl, (uintptr_t)addr,
+ 0, 0, 0, 0, 0, &res);
+
+ status = res.a0;
+
+ if (status == SMCCC_RET_NOT_SUPPORTED ||
+ status == MLXBF_SMCCC_ACCESS_VIOLATION)
+ return -1;
+
+ *result = (u32)res.a1;
+ return 0;
+}
+
+static int secure_writel(void __iomem *addr, u32 data, u32 sreg_tbl)
+{
+ struct arm_smccc_res res;
+ int status;
+
+ arm_smccc_smc(MLXBF_WRITE_REG_32, sreg_tbl, data, (uintptr_t)addr,
+ 0, 0, 0, 0, &res);
+
+ status = res.a0;
+
+ if (status == SMCCC_RET_NOT_SUPPORTED ||
+ status == MLXBF_SMCCC_ACCESS_VIOLATION)
+ return -1;
+ else
+ return 0;
+}
+
+static int bluefield_edac_readl(struct bluefield_edac_priv *priv, u32 offset, u32 *result)
+{
+ void __iomem *addr;
+ int err = 0;
+
+ addr = priv->emi_base + offset;
+
+ if (priv->svc_sreg_support)
+ err = secure_readl(addr, result, priv->sreg_tbl);
+ else
+ *result = readl(addr);
+
+ return err;
+}
+
+static int bluefield_edac_writel(struct bluefield_edac_priv *priv, u32 offset, u32 data)
+{
+ void __iomem *addr;
+ int err = 0;
+
+ addr = priv->emi_base + offset;
+
+ if (priv->svc_sreg_support)
+ err = secure_writel(addr, data, priv->sreg_tbl);
+ else
+ writel(data, addr);
+
+ return err;
+}
+
/*
* Gather the ECC information from the External Memory Interface registers
* and report it to the edac handler.
@@ -99,7 +179,7 @@ static void bluefield_gather_report_ecc(struct mem_ctl_info *mci,
u32 ecc_latch_select, dram_syndrom, serr, derr, syndrom;
enum hw_event_mc_err_type ecc_type;
u64 ecc_dimm_addr;
- int ecc_dimm;
+ int ecc_dimm, err;
ecc_type = is_single_ecc ? HW_EVENT_ERR_CORRECTED :
HW_EVENT_ERR_UNCORRECTED;
@@ -109,14 +189,21 @@ static void bluefield_gather_report_ecc(struct mem_ctl_info *mci,
* registers with information about the last ECC error occurrence.
*/
ecc_latch_select = MLXBF_ECC_LATCH_SEL__START;
- writel(ecc_latch_select, priv->emi_base + MLXBF_ECC_LATCH_SEL);
+ err = bluefield_edac_writel(priv, MLXBF_ECC_LATCH_SEL, ecc_latch_select);
+ if (err)
+ dev_err(priv->dev, "ECC latch select write failed.\n");
/*
* Verify that the ECC reported info in the registers is of the
* same type as the one asked to report. If not, just report the
* error without the detailed information.
*/
- dram_syndrom = readl(priv->emi_base + MLXBF_SYNDROM);
+ err = bluefield_edac_readl(priv, MLXBF_SYNDROM, &dram_syndrom);
+ if (err) {
+ dev_err(priv->dev, "DRAM syndrom read failed.\n");
+ return;
+ }
+
serr = FIELD_GET(MLXBF_SYNDROM__SERR, dram_syndrom);
derr = FIELD_GET(MLXBF_SYNDROM__DERR, dram_syndrom);
syndrom = FIELD_GET(MLXBF_SYNDROM__SYN, dram_syndrom);
@@ -127,13 +214,27 @@ static void bluefield_gather_report_ecc(struct mem_ctl_info *mci,
return;
}
- dram_additional_info = readl(priv->emi_base + MLXBF_ADD_INFO);
+ err = bluefield_edac_readl(priv, MLXBF_ADD_INFO, &dram_additional_info);
+ if (err) {
+ dev_err(priv->dev, "DRAM additional info read failed.\n");
+ return;
+ }
+
err_prank = FIELD_GET(MLXBF_ADD_INFO__ERR_PRANK, dram_additional_info);
ecc_dimm = (err_prank >= 2 && priv->dimm_ranks[0] <= 2) ? 1 : 0;
- edea0 = readl(priv->emi_base + MLXBF_ERR_ADDR_0);
- edea1 = readl(priv->emi_base + MLXBF_ERR_ADDR_1);
+ err = bluefield_edac_readl(priv, MLXBF_ERR_ADDR_0, &edea0);
+ if (err) {
+ dev_err(priv->dev, "Error addr 0 read failed.\n");
+ return;
+ }
+
+ err = bluefield_edac_readl(priv, MLXBF_ERR_ADDR_1, &edea1);
+ if (err) {
+ dev_err(priv->dev, "Error addr 1 read failed.\n");
+ return;
+ }
ecc_dimm_addr = ((u64)edea1 << 32) | edea0;
@@ -147,6 +248,7 @@ static void bluefield_edac_check(struct mem_ctl_info *mci)
{
struct bluefield_edac_priv *priv = mci->pvt_info;
u32 ecc_count, single_error_count, double_error_count, ecc_error = 0;
+ int err;
/*
* The memory controller might not be initialized by the firmware
@@ -155,7 +257,12 @@ static void bluefield_edac_check(struct mem_ctl_info *mci)
if (mci->edac_cap == EDAC_FLAG_NONE)
return;
- ecc_count = readl(priv->emi_base + MLXBF_ECC_CNT);
+ err = bluefield_edac_readl(priv, MLXBF_ECC_CNT, &ecc_count);
+ if (err) {
+ dev_err(priv->dev, "ECC count read failed.\n");
+ return;
+ }
+
single_error_count = FIELD_GET(MLXBF_ECC_CNT__SERR_CNT, ecc_count);
double_error_count = FIELD_GET(MLXBF_ECC_CNT__DERR_CNT, ecc_count);
@@ -172,15 +279,18 @@ static void bluefield_edac_check(struct mem_ctl_info *mci)
}
/* Write to clear reported errors. */
- if (ecc_count)
- writel(ecc_error, priv->emi_base + MLXBF_ECC_ERR);
+ if (ecc_count) {
+ err = bluefield_edac_writel(priv, MLXBF_ECC_ERR, ecc_error);
+ if (err)
+ dev_err(priv->dev, "ECC Error write failed.\n");
+ }
}
/* Initialize the DIMMs information for the given memory controller. */
static void bluefield_edac_init_dimms(struct mem_ctl_info *mci)
{
struct bluefield_edac_priv *priv = mci->pvt_info;
- int mem_ctrl_idx = mci->mc_idx;
+ u64 mem_ctrl_idx = mci->mc_idx;
struct dimm_info *dimm;
u64 smc_info, smc_arg;
int is_empty = 1, i;
@@ -189,7 +299,7 @@ static void bluefield_edac_init_dimms(struct mem_ctl_info *mci)
dimm = mci->dimms[i];
smc_arg = mem_ctrl_idx << 16 | i;
- smc_info = smc_call1(MLNX_SIP_GET_DIMM_INFO, smc_arg);
+ smc_info = smc_call1(MLXBF_SIP_GET_DIMM_INFO, smc_arg);
if (!FIELD_GET(MLXBF_DIMM_INFO__SIZE_GB, smc_info)) {
dimm->mtype = MEM_EMPTY;
@@ -244,6 +354,7 @@ static int bluefield_edac_mc_probe(struct platform_device *pdev)
struct bluefield_edac_priv *priv;
struct device *dev = &pdev->dev;
struct edac_mc_layer layers[1];
+ struct arm_smccc_res res;
struct mem_ctl_info *mci;
struct resource *emi_res;
unsigned int mc_idx, dimm_count;
@@ -279,13 +390,43 @@ static int bluefield_edac_mc_probe(struct platform_device *pdev)
return -ENOMEM;
priv = mci->pvt_info;
+ priv->dev = dev;
+
+ /*
+ * The "sec_reg_block" property in the ACPI table determines the method
+ * the driver uses to access the EMI registers:
+ * a) property is not present - directly access registers via readl/writel
+ * b) property is present - indirectly access registers via SMC calls
+ * (assuming required Silicon Provider service version found)
+ */
+ if (device_property_read_u32(dev, "sec_reg_block", &priv->sreg_tbl)) {
+ priv->svc_sreg_support = false;
+ } else {
+ /*
+ * Check for minimum required Arm Silicon Provider (SiP) service
+ * version, ensuring support of required SMC function IDs.
+ */
+ arm_smccc_smc(MLXBF_SIP_SVC_VERSION, 0, 0, 0, 0, 0, 0, 0, &res);
+ if (res.a0 == MLXBF_SVC_REQ_MAJOR &&
+ res.a1 >= MLXBF_SVC_REQ_MINOR) {
+ priv->svc_sreg_support = true;
+ } else {
+ dev_err(dev, "Required SMCs are not supported.\n");
+ ret = -EINVAL;
+ goto err;
+ }
+ }
priv->dimm_per_mc = dimm_count;
- priv->emi_base = devm_ioremap_resource(dev, emi_res);
- if (IS_ERR(priv->emi_base)) {
- dev_err(dev, "failed to map EMI IO resource\n");
- ret = PTR_ERR(priv->emi_base);
- goto err;
+ if (!priv->svc_sreg_support) {
+ priv->emi_base = devm_ioremap_resource(dev, emi_res);
+ if (IS_ERR(priv->emi_base)) {
+ dev_err(dev, "failed to map EMI IO resource\n");
+ ret = PTR_ERR(priv->emi_base);
+ goto err;
+ }
+ } else {
+ priv->emi_base = (void __iomem *)emi_res->start;
}
mci->pdev = dev;
@@ -320,7 +461,6 @@ err:
edac_mc_free(mci);
return ret;
-
}
static void bluefield_edac_mc_remove(struct platform_device *pdev)
@@ -344,7 +484,7 @@ static struct platform_driver bluefield_edac_mc_driver = {
.acpi_match_table = bluefield_mc_acpi_ids,
},
.probe = bluefield_edac_mc_probe,
- .remove_new = bluefield_edac_mc_remove,
+ .remove = bluefield_edac_mc_remove,
};
module_platform_driver(bluefield_edac_mc_driver);
diff --git a/drivers/edac/cell_edac.c b/drivers/edac/cell_edac.c
deleted file mode 100644
index 2000f66fbf5c..000000000000
--- a/drivers/edac/cell_edac.c
+++ /dev/null
@@ -1,281 +0,0 @@
-/*
- * Cell MIC driver for ECC counting
- *
- * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
- * <benh@kernel.crashing.org>
- *
- * This file may be distributed under the terms of the
- * GNU General Public License.
- */
-#undef DEBUG
-
-#include <linux/edac.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/stop_machine.h>
-#include <linux/io.h>
-#include <linux/of_address.h>
-#include <asm/machdep.h>
-#include <asm/cell-regs.h>
-
-#include "edac_module.h"
-
-struct cell_edac_priv
-{
- struct cbe_mic_tm_regs __iomem *regs;
- int node;
- int chanmask;
-#ifdef DEBUG
- u64 prev_fir;
-#endif
-};
-
-static void cell_edac_count_ce(struct mem_ctl_info *mci, int chan, u64 ar)
-{
- struct cell_edac_priv *priv = mci->pvt_info;
- struct csrow_info *csrow = mci->csrows[0];
- unsigned long address, pfn, offset, syndrome;
-
- dev_dbg(mci->pdev, "ECC CE err on node %d, channel %d, ar = 0x%016llx\n",
- priv->node, chan, ar);
-
- /* Address decoding is likely a bit bogus, to dbl check */
- address = (ar & 0xffffffffe0000000ul) >> 29;
- if (priv->chanmask == 0x3)
- address = (address << 1) | chan;
- pfn = address >> PAGE_SHIFT;
- offset = address & ~PAGE_MASK;
- syndrome = (ar & 0x000000001fe00000ul) >> 21;
-
- /* TODO: Decoding of the error address */
- edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
- csrow->first_page + pfn, offset, syndrome,
- 0, chan, -1, "", "");
-}
-
-static void cell_edac_count_ue(struct mem_ctl_info *mci, int chan, u64 ar)
-{
- struct cell_edac_priv *priv = mci->pvt_info;
- struct csrow_info *csrow = mci->csrows[0];
- unsigned long address, pfn, offset;
-
- dev_dbg(mci->pdev, "ECC UE err on node %d, channel %d, ar = 0x%016llx\n",
- priv->node, chan, ar);
-
- /* Address decoding is likely a bit bogus, to dbl check */
- address = (ar & 0xffffffffe0000000ul) >> 29;
- if (priv->chanmask == 0x3)
- address = (address << 1) | chan;
- pfn = address >> PAGE_SHIFT;
- offset = address & ~PAGE_MASK;
-
- /* TODO: Decoding of the error address */
- edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
- csrow->first_page + pfn, offset, 0,
- 0, chan, -1, "", "");
-}
-
-static void cell_edac_check(struct mem_ctl_info *mci)
-{
- struct cell_edac_priv *priv = mci->pvt_info;
- u64 fir, addreg, clear = 0;
-
- fir = in_be64(&priv->regs->mic_fir);
-#ifdef DEBUG
- if (fir != priv->prev_fir) {
- dev_dbg(mci->pdev, "fir change : 0x%016lx\n", fir);
- priv->prev_fir = fir;
- }
-#endif
- if ((priv->chanmask & 0x1) && (fir & CBE_MIC_FIR_ECC_SINGLE_0_ERR)) {
- addreg = in_be64(&priv->regs->mic_df_ecc_address_0);
- clear |= CBE_MIC_FIR_ECC_SINGLE_0_RESET;
- cell_edac_count_ce(mci, 0, addreg);
- }
- if ((priv->chanmask & 0x2) && (fir & CBE_MIC_FIR_ECC_SINGLE_1_ERR)) {
- addreg = in_be64(&priv->regs->mic_df_ecc_address_1);
- clear |= CBE_MIC_FIR_ECC_SINGLE_1_RESET;
- cell_edac_count_ce(mci, 1, addreg);
- }
- if ((priv->chanmask & 0x1) && (fir & CBE_MIC_FIR_ECC_MULTI_0_ERR)) {
- addreg = in_be64(&priv->regs->mic_df_ecc_address_0);
- clear |= CBE_MIC_FIR_ECC_MULTI_0_RESET;
- cell_edac_count_ue(mci, 0, addreg);
- }
- if ((priv->chanmask & 0x2) && (fir & CBE_MIC_FIR_ECC_MULTI_1_ERR)) {
- addreg = in_be64(&priv->regs->mic_df_ecc_address_1);
- clear |= CBE_MIC_FIR_ECC_MULTI_1_RESET;
- cell_edac_count_ue(mci, 1, addreg);
- }
-
- /* The procedure for clearing FIR bits is a bit ... weird */
- if (clear) {
- fir &= ~(CBE_MIC_FIR_ECC_ERR_MASK | CBE_MIC_FIR_ECC_SET_MASK);
- fir |= CBE_MIC_FIR_ECC_RESET_MASK;
- fir &= ~clear;
- out_be64(&priv->regs->mic_fir, fir);
- (void)in_be64(&priv->regs->mic_fir);
-
- mb(); /* sync up */
-#ifdef DEBUG
- fir = in_be64(&priv->regs->mic_fir);
- dev_dbg(mci->pdev, "fir clear : 0x%016lx\n", fir);
-#endif
- }
-}
-
-static void cell_edac_init_csrows(struct mem_ctl_info *mci)
-{
- struct csrow_info *csrow = mci->csrows[0];
- struct dimm_info *dimm;
- struct cell_edac_priv *priv = mci->pvt_info;
- struct device_node *np;
- int j;
- u32 nr_pages;
-
- for_each_node_by_name(np, "memory") {
- struct resource r;
-
- /* We "know" that the Cell firmware only creates one entry
- * in the "memory" nodes. If that changes, this code will
- * need to be adapted.
- */
- if (of_address_to_resource(np, 0, &r))
- continue;
- if (of_node_to_nid(np) != priv->node)
- continue;
- csrow->first_page = r.start >> PAGE_SHIFT;
- nr_pages = resource_size(&r) >> PAGE_SHIFT;
- csrow->last_page = csrow->first_page + nr_pages - 1;
-
- for (j = 0; j < csrow->nr_channels; j++) {
- dimm = csrow->channels[j]->dimm;
- dimm->mtype = MEM_XDR;
- dimm->edac_mode = EDAC_SECDED;
- dimm->nr_pages = nr_pages / csrow->nr_channels;
- }
- dev_dbg(mci->pdev,
- "Initialized on node %d, chanmask=0x%x,"
- " first_page=0x%lx, nr_pages=0x%x\n",
- priv->node, priv->chanmask,
- csrow->first_page, nr_pages);
- break;
- }
- of_node_put(np);
-}
-
-static int cell_edac_probe(struct platform_device *pdev)
-{
- struct cbe_mic_tm_regs __iomem *regs;
- struct mem_ctl_info *mci;
- struct edac_mc_layer layers[2];
- struct cell_edac_priv *priv;
- u64 reg;
- int rc, chanmask, num_chans;
-
- regs = cbe_get_cpu_mic_tm_regs(cbe_node_to_cpu(pdev->id));
- if (regs == NULL)
- return -ENODEV;
-
- edac_op_state = EDAC_OPSTATE_POLL;
-
- /* Get channel population */
- reg = in_be64(&regs->mic_mnt_cfg);
- dev_dbg(&pdev->dev, "MIC_MNT_CFG = 0x%016llx\n", reg);
- chanmask = 0;
- if (reg & CBE_MIC_MNT_CFG_CHAN_0_POP)
- chanmask |= 0x1;
- if (reg & CBE_MIC_MNT_CFG_CHAN_1_POP)
- chanmask |= 0x2;
- if (chanmask == 0) {
- dev_warn(&pdev->dev,
- "Yuck ! No channel populated ? Aborting !\n");
- return -ENODEV;
- }
- dev_dbg(&pdev->dev, "Initial FIR = 0x%016llx\n",
- in_be64(&regs->mic_fir));
-
- /* Allocate & init EDAC MC data structure */
- num_chans = chanmask == 3 ? 2 : 1;
-
- layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
- layers[0].size = 1;
- layers[0].is_virt_csrow = true;
- layers[1].type = EDAC_MC_LAYER_CHANNEL;
- layers[1].size = num_chans;
- layers[1].is_virt_csrow = false;
- mci = edac_mc_alloc(pdev->id, ARRAY_SIZE(layers), layers,
- sizeof(struct cell_edac_priv));
- if (mci == NULL)
- return -ENOMEM;
- priv = mci->pvt_info;
- priv->regs = regs;
- priv->node = pdev->id;
- priv->chanmask = chanmask;
- mci->pdev = &pdev->dev;
- mci->mtype_cap = MEM_FLAG_XDR;
- mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
- mci->edac_cap = EDAC_FLAG_EC | EDAC_FLAG_SECDED;
- mci->mod_name = "cell_edac";
- mci->ctl_name = "MIC";
- mci->dev_name = dev_name(&pdev->dev);
- mci->edac_check = cell_edac_check;
- cell_edac_init_csrows(mci);
-
- /* Register with EDAC core */
- rc = edac_mc_add_mc(mci);
- if (rc) {
- dev_err(&pdev->dev, "failed to register with EDAC core\n");
- edac_mc_free(mci);
- return rc;
- }
-
- return 0;
-}
-
-static void cell_edac_remove(struct platform_device *pdev)
-{
- struct mem_ctl_info *mci = edac_mc_del_mc(&pdev->dev);
- if (mci)
- edac_mc_free(mci);
-}
-
-static struct platform_driver cell_edac_driver = {
- .driver = {
- .name = "cbe-mic",
- },
- .probe = cell_edac_probe,
- .remove_new = cell_edac_remove,
-};
-
-static int __init cell_edac_init(void)
-{
- /* Sanity check registers data structure */
- BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
- mic_df_ecc_address_0) != 0xf8);
- BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
- mic_df_ecc_address_1) != 0x1b8);
- BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
- mic_df_config) != 0x218);
- BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
- mic_fir) != 0x230);
- BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
- mic_mnt_cfg) != 0x210);
- BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
- mic_exc) != 0x208);
-
- return platform_driver_register(&cell_edac_driver);
-}
-
-static void __exit cell_edac_exit(void)
-{
- platform_driver_unregister(&cell_edac_driver);
-}
-
-module_init(cell_edac_init);
-module_exit(cell_edac_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
-MODULE_DESCRIPTION("ECC counting for Cell MIC");
diff --git a/drivers/edac/cpc925_edac.c b/drivers/edac/cpc925_edac.c
index eb702bc3aa29..9c9e4369c041 100644
--- a/drivers/edac/cpc925_edac.c
+++ b/drivers/edac/cpc925_edac.c
@@ -1027,7 +1027,7 @@ static void cpc925_remove(struct platform_device *pdev)
static struct platform_driver cpc925_edac_driver = {
.probe = cpc925_probe,
- .remove_new = cpc925_remove,
+ .remove = cpc925_remove,
.driver = {
.name = "cpc925_edac",
}
diff --git a/drivers/edac/debugfs.c b/drivers/edac/debugfs.c
index 4804332d9946..8195fc9c9354 100644
--- a/drivers/edac/debugfs.c
+++ b/drivers/edac/debugfs.c
@@ -1,4 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/string_choices.h>
+
#include "edac_module.h"
static struct dentry *edac_debugfs;
@@ -22,7 +25,7 @@ static ssize_t edac_fake_inject_write(struct file *file,
"Generating %d %s fake error%s to %d.%d.%d to test core handling. NOTE: this won't test the driver-specific decoding logic.\n",
errcount,
(type == HW_EVENT_ERR_UNCORRECTED) ? "UE" : "CE",
- errcount > 1 ? "s" : "",
+ str_plural(errcount),
mci->fake_inject_layer[0],
mci->fake_inject_layer[1],
mci->fake_inject_layer[2]
diff --git a/drivers/edac/dmc520_edac.c b/drivers/edac/dmc520_edac.c
index 5e52d31db3b8..64a4d0a07032 100644
--- a/drivers/edac/dmc520_edac.c
+++ b/drivers/edac/dmc520_edac.c
@@ -640,7 +640,7 @@ static struct platform_driver dmc520_edac_driver = {
},
.probe = dmc520_edac_probe,
- .remove_new = dmc520_edac_remove
+ .remove = dmc520_edac_remove
};
module_platform_driver(dmc520_edac_driver);
diff --git a/drivers/edac/ecs.c b/drivers/edac/ecs.c
new file mode 100644
index 000000000000..51c451c7f0f0
--- /dev/null
+++ b/drivers/edac/ecs.c
@@ -0,0 +1,207 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * The generic ECS driver is designed to support control of on-die error
+ * check scrub (e.g., DDR5 ECS). The common sysfs ECS interface abstracts
+ * the control of various ECS functionalities into a unified set of functions.
+ *
+ * Copyright (c) 2024-2025 HiSilicon Limited.
+ */
+
+#include <linux/edac.h>
+
+#define EDAC_ECS_FRU_NAME "ecs_fru"
+
+enum edac_ecs_attributes {
+ ECS_LOG_ENTRY_TYPE,
+ ECS_MODE,
+ ECS_RESET,
+ ECS_THRESHOLD,
+ ECS_MAX_ATTRS
+};
+
+struct edac_ecs_dev_attr {
+ struct device_attribute dev_attr;
+ int fru_id;
+};
+
+struct edac_ecs_fru_context {
+ char name[EDAC_FEAT_NAME_LEN];
+ struct edac_ecs_dev_attr dev_attr[ECS_MAX_ATTRS];
+ struct attribute *ecs_attrs[ECS_MAX_ATTRS + 1];
+ struct attribute_group group;
+};
+
+struct edac_ecs_context {
+ u16 num_media_frus;
+ struct edac_ecs_fru_context *fru_ctxs;
+};
+
+#define TO_ECS_DEV_ATTR(_dev_attr) \
+ container_of(_dev_attr, struct edac_ecs_dev_attr, dev_attr)
+
+#define EDAC_ECS_ATTR_SHOW(attrib, cb, type, format) \
+static ssize_t attrib##_show(struct device *ras_feat_dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct edac_ecs_dev_attr *dev_attr = TO_ECS_DEV_ATTR(attr); \
+ struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev); \
+ const struct edac_ecs_ops *ops = ctx->ecs.ecs_ops; \
+ type data; \
+ int ret; \
+ \
+ ret = ops->cb(ras_feat_dev->parent, ctx->ecs.private, \
+ dev_attr->fru_id, &data); \
+ if (ret) \
+ return ret; \
+ \
+ return sysfs_emit(buf, format, data); \
+}
+
+EDAC_ECS_ATTR_SHOW(log_entry_type, get_log_entry_type, u32, "%u\n")
+EDAC_ECS_ATTR_SHOW(mode, get_mode, u32, "%u\n")
+EDAC_ECS_ATTR_SHOW(threshold, get_threshold, u32, "%u\n")
+
+#define EDAC_ECS_ATTR_STORE(attrib, cb, type, conv_func) \
+static ssize_t attrib##_store(struct device *ras_feat_dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t len) \
+{ \
+ struct edac_ecs_dev_attr *dev_attr = TO_ECS_DEV_ATTR(attr); \
+ struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev); \
+ const struct edac_ecs_ops *ops = ctx->ecs.ecs_ops; \
+ type data; \
+ int ret; \
+ \
+ ret = conv_func(buf, 0, &data); \
+ if (ret < 0) \
+ return ret; \
+ \
+ ret = ops->cb(ras_feat_dev->parent, ctx->ecs.private, \
+ dev_attr->fru_id, data); \
+ if (ret) \
+ return ret; \
+ \
+ return len; \
+}
+
+EDAC_ECS_ATTR_STORE(log_entry_type, set_log_entry_type, unsigned long, kstrtoul)
+EDAC_ECS_ATTR_STORE(mode, set_mode, unsigned long, kstrtoul)
+EDAC_ECS_ATTR_STORE(reset, reset, unsigned long, kstrtoul)
+EDAC_ECS_ATTR_STORE(threshold, set_threshold, unsigned long, kstrtoul)
+
+static umode_t ecs_attr_visible(struct kobject *kobj, struct attribute *a, int attr_id)
+{
+ struct device *ras_feat_dev = kobj_to_dev(kobj);
+ struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev);
+ const struct edac_ecs_ops *ops = ctx->ecs.ecs_ops;
+
+ switch (attr_id) {
+ case ECS_LOG_ENTRY_TYPE:
+ if (ops->get_log_entry_type) {
+ if (ops->set_log_entry_type)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case ECS_MODE:
+ if (ops->get_mode) {
+ if (ops->set_mode)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case ECS_RESET:
+ if (ops->reset)
+ return a->mode;
+ break;
+ case ECS_THRESHOLD:
+ if (ops->get_threshold) {
+ if (ops->set_threshold)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+#define EDAC_ECS_ATTR_RO(_name, _fru_id) \
+ ((struct edac_ecs_dev_attr) { .dev_attr = __ATTR_RO(_name), \
+ .fru_id = _fru_id })
+
+#define EDAC_ECS_ATTR_WO(_name, _fru_id) \
+ ((struct edac_ecs_dev_attr) { .dev_attr = __ATTR_WO(_name), \
+ .fru_id = _fru_id })
+
+#define EDAC_ECS_ATTR_RW(_name, _fru_id) \
+ ((struct edac_ecs_dev_attr) { .dev_attr = __ATTR_RW(_name), \
+ .fru_id = _fru_id })
+
+static int ecs_create_desc(struct device *ecs_dev, const struct attribute_group **attr_groups,
+ u16 num_media_frus)
+{
+ struct edac_ecs_context *ecs_ctx;
+ u32 fru;
+
+ ecs_ctx = devm_kzalloc(ecs_dev, sizeof(*ecs_ctx), GFP_KERNEL);
+ if (!ecs_ctx)
+ return -ENOMEM;
+
+ ecs_ctx->num_media_frus = num_media_frus;
+ ecs_ctx->fru_ctxs = devm_kcalloc(ecs_dev, num_media_frus,
+ sizeof(*ecs_ctx->fru_ctxs),
+ GFP_KERNEL);
+ if (!ecs_ctx->fru_ctxs)
+ return -ENOMEM;
+
+ for (fru = 0; fru < num_media_frus; fru++) {
+ struct edac_ecs_fru_context *fru_ctx = &ecs_ctx->fru_ctxs[fru];
+ struct attribute_group *group = &fru_ctx->group;
+ int i;
+
+ fru_ctx->dev_attr[ECS_LOG_ENTRY_TYPE] = EDAC_ECS_ATTR_RW(log_entry_type, fru);
+ fru_ctx->dev_attr[ECS_MODE] = EDAC_ECS_ATTR_RW(mode, fru);
+ fru_ctx->dev_attr[ECS_RESET] = EDAC_ECS_ATTR_WO(reset, fru);
+ fru_ctx->dev_attr[ECS_THRESHOLD] = EDAC_ECS_ATTR_RW(threshold, fru);
+
+ for (i = 0; i < ECS_MAX_ATTRS; i++) {
+ sysfs_attr_init(&fru_ctx->dev_attr[i].dev_attr.attr);
+ fru_ctx->ecs_attrs[i] = &fru_ctx->dev_attr[i].dev_attr.attr;
+ }
+
+ sprintf(fru_ctx->name, "%s%d", EDAC_ECS_FRU_NAME, fru);
+ group->name = fru_ctx->name;
+ group->attrs = fru_ctx->ecs_attrs;
+ group->is_visible = ecs_attr_visible;
+
+ attr_groups[fru] = group;
+ }
+
+ return 0;
+}
+
+/**
+ * edac_ecs_get_desc - get EDAC ECS descriptors
+ * @ecs_dev: client device, supports ECS feature
+ * @attr_groups: pointer to attribute group container
+ * @num_media_frus: number of media FRUs in the device
+ *
+ * Return:
+ * * %0 - Success.
+ * * %-EINVAL - Invalid parameters passed.
+ * * %-ENOMEM - Dynamic memory allocation failed.
+ */
+int edac_ecs_get_desc(struct device *ecs_dev,
+ const struct attribute_group **attr_groups, u16 num_media_frus)
+{
+ if (!ecs_dev || !attr_groups || !num_media_frus)
+ return -EINVAL;
+
+ return ecs_create_desc(ecs_dev, attr_groups, num_media_frus);
+}
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
index 621dc2a5d034..0734909b08a4 100644
--- a/drivers/edac/edac_device.c
+++ b/drivers/edac/edac_device.c
@@ -570,3 +570,188 @@ void edac_device_handle_ue_count(struct edac_device_ctl_info *edac_dev,
block ? block->name : "N/A", count, msg);
}
EXPORT_SYMBOL_GPL(edac_device_handle_ue_count);
+
+static void edac_dev_release(struct device *dev)
+{
+ struct edac_dev_feat_ctx *ctx = container_of(dev, struct edac_dev_feat_ctx, dev);
+
+ kfree(ctx->mem_repair);
+ kfree(ctx->scrub);
+ kfree(ctx->dev.groups);
+ kfree(ctx);
+}
+
+static const struct device_type edac_dev_type = {
+ .name = "edac_dev",
+ .release = edac_dev_release,
+};
+
+static void edac_dev_unreg(void *data)
+{
+ device_unregister(data);
+}
+
+/**
+ * edac_dev_register - register device for RAS features with EDAC
+ * @parent: parent device.
+ * @name: name for the folder in the /sys/bus/edac/devices/,
+ * which is derived from the parent device.
+ * For e.g. /sys/bus/edac/devices/cxl_mem0/
+ * @private: parent driver's data to store in the context if any.
+ * @num_features: number of RAS features to register.
+ * @ras_features: list of RAS features to register.
+ *
+ * Return:
+ * * %0 - Success.
+ * * %-EINVAL - Invalid parameters passed.
+ * * %-ENOMEM - Dynamic memory allocation failed.
+ *
+ */
+int edac_dev_register(struct device *parent, char *name,
+ void *private, int num_features,
+ const struct edac_dev_feature *ras_features)
+{
+ const struct attribute_group **ras_attr_groups;
+ struct edac_dev_data *dev_data;
+ struct edac_dev_feat_ctx *ctx;
+ int mem_repair_cnt = 0;
+ int attr_gcnt = 0;
+ int ret = -ENOMEM;
+ int scrub_cnt = 0;
+ int feat;
+
+ if (!parent || !name || !num_features || !ras_features)
+ return -EINVAL;
+
+ /* Double parse to make space for attributes */
+ for (feat = 0; feat < num_features; feat++) {
+ switch (ras_features[feat].ft_type) {
+ case RAS_FEAT_SCRUB:
+ attr_gcnt++;
+ scrub_cnt++;
+ break;
+ case RAS_FEAT_ECS:
+ attr_gcnt += ras_features[feat].ecs_info.num_media_frus;
+ break;
+ case RAS_FEAT_MEM_REPAIR:
+ attr_gcnt++;
+ mem_repair_cnt++;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ras_attr_groups = kcalloc(attr_gcnt + 1, sizeof(*ras_attr_groups), GFP_KERNEL);
+ if (!ras_attr_groups)
+ goto ctx_free;
+
+ if (scrub_cnt) {
+ ctx->scrub = kcalloc(scrub_cnt, sizeof(*ctx->scrub), GFP_KERNEL);
+ if (!ctx->scrub)
+ goto groups_free;
+ }
+
+ if (mem_repair_cnt) {
+ ctx->mem_repair = kcalloc(mem_repair_cnt, sizeof(*ctx->mem_repair), GFP_KERNEL);
+ if (!ctx->mem_repair)
+ goto data_mem_free;
+ }
+
+ attr_gcnt = 0;
+ scrub_cnt = 0;
+ mem_repair_cnt = 0;
+ for (feat = 0; feat < num_features; feat++, ras_features++) {
+ switch (ras_features->ft_type) {
+ case RAS_FEAT_SCRUB:
+ if (!ras_features->scrub_ops || scrub_cnt != ras_features->instance) {
+ ret = -EINVAL;
+ goto data_mem_free;
+ }
+
+ dev_data = &ctx->scrub[scrub_cnt];
+ dev_data->instance = scrub_cnt;
+ dev_data->scrub_ops = ras_features->scrub_ops;
+ dev_data->private = ras_features->ctx;
+ ret = edac_scrub_get_desc(parent, &ras_attr_groups[attr_gcnt],
+ ras_features->instance);
+ if (ret)
+ goto data_mem_free;
+
+ scrub_cnt++;
+ attr_gcnt++;
+ break;
+ case RAS_FEAT_ECS:
+ if (!ras_features->ecs_ops) {
+ ret = -EINVAL;
+ goto data_mem_free;
+ }
+
+ dev_data = &ctx->ecs;
+ dev_data->ecs_ops = ras_features->ecs_ops;
+ dev_data->private = ras_features->ctx;
+ ret = edac_ecs_get_desc(parent, &ras_attr_groups[attr_gcnt],
+ ras_features->ecs_info.num_media_frus);
+ if (ret)
+ goto data_mem_free;
+
+ attr_gcnt += ras_features->ecs_info.num_media_frus;
+ break;
+ case RAS_FEAT_MEM_REPAIR:
+ if (!ras_features->mem_repair_ops ||
+ mem_repair_cnt != ras_features->instance) {
+ ret = -EINVAL;
+ goto data_mem_free;
+ }
+
+ dev_data = &ctx->mem_repair[mem_repair_cnt];
+ dev_data->instance = mem_repair_cnt;
+ dev_data->mem_repair_ops = ras_features->mem_repair_ops;
+ dev_data->private = ras_features->ctx;
+ ret = edac_mem_repair_get_desc(parent, &ras_attr_groups[attr_gcnt],
+ ras_features->instance);
+ if (ret)
+ goto data_mem_free;
+
+ mem_repair_cnt++;
+ attr_gcnt++;
+ break;
+ default:
+ ret = -EINVAL;
+ goto data_mem_free;
+ }
+ }
+
+ ctx->dev.parent = parent;
+ ctx->dev.bus = edac_get_sysfs_subsys();
+ ctx->dev.type = &edac_dev_type;
+ ctx->dev.groups = ras_attr_groups;
+ ctx->private = private;
+ dev_set_drvdata(&ctx->dev, ctx);
+
+ ret = dev_set_name(&ctx->dev, "%s", name);
+ if (ret)
+ goto data_mem_free;
+
+ ret = device_register(&ctx->dev);
+ if (ret) {
+ put_device(&ctx->dev);
+ return ret;
+ }
+
+ return devm_add_action_or_reset(parent, edac_dev_unreg, &ctx->dev);
+
+data_mem_free:
+ kfree(ctx->mem_repair);
+ kfree(ctx->scrub);
+groups_free:
+ kfree(ras_attr_groups);
+ctx_free:
+ kfree(ctx);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(edac_dev_register);
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index d6eed727b0cd..0959320fe51c 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -214,7 +214,7 @@ static int edac_mc_alloc_csrows(struct mem_ctl_info *mci)
unsigned int row, chn;
/*
- * Alocate and fill the csrow/channels structs
+ * Allocate and fill the csrow/channels structs
*/
mci->csrows = kcalloc(tot_csrows, sizeof(*mci->csrows), GFP_KERNEL);
if (!mci->csrows)
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 4200aec04831..8689631f1905 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -305,6 +305,14 @@ DEVICE_CHANNEL(ch10_dimm_label, S_IRUGO | S_IWUSR,
channel_dimm_label_show, channel_dimm_label_store, 10);
DEVICE_CHANNEL(ch11_dimm_label, S_IRUGO | S_IWUSR,
channel_dimm_label_show, channel_dimm_label_store, 11);
+DEVICE_CHANNEL(ch12_dimm_label, S_IRUGO | S_IWUSR,
+ channel_dimm_label_show, channel_dimm_label_store, 12);
+DEVICE_CHANNEL(ch13_dimm_label, S_IRUGO | S_IWUSR,
+ channel_dimm_label_show, channel_dimm_label_store, 13);
+DEVICE_CHANNEL(ch14_dimm_label, S_IRUGO | S_IWUSR,
+ channel_dimm_label_show, channel_dimm_label_store, 14);
+DEVICE_CHANNEL(ch15_dimm_label, S_IRUGO | S_IWUSR,
+ channel_dimm_label_show, channel_dimm_label_store, 15);
/* Total possible dynamic DIMM Label attribute file table */
static struct attribute *dynamic_csrow_dimm_attr[] = {
@@ -320,6 +328,10 @@ static struct attribute *dynamic_csrow_dimm_attr[] = {
&dev_attr_legacy_ch9_dimm_label.attr.attr,
&dev_attr_legacy_ch10_dimm_label.attr.attr,
&dev_attr_legacy_ch11_dimm_label.attr.attr,
+ &dev_attr_legacy_ch12_dimm_label.attr.attr,
+ &dev_attr_legacy_ch13_dimm_label.attr.attr,
+ &dev_attr_legacy_ch14_dimm_label.attr.attr,
+ &dev_attr_legacy_ch15_dimm_label.attr.attr,
NULL
};
@@ -348,6 +360,14 @@ DEVICE_CHANNEL(ch10_ce_count, S_IRUGO,
channel_ce_count_show, NULL, 10);
DEVICE_CHANNEL(ch11_ce_count, S_IRUGO,
channel_ce_count_show, NULL, 11);
+DEVICE_CHANNEL(ch12_ce_count, S_IRUGO,
+ channel_ce_count_show, NULL, 12);
+DEVICE_CHANNEL(ch13_ce_count, S_IRUGO,
+ channel_ce_count_show, NULL, 13);
+DEVICE_CHANNEL(ch14_ce_count, S_IRUGO,
+ channel_ce_count_show, NULL, 14);
+DEVICE_CHANNEL(ch15_ce_count, S_IRUGO,
+ channel_ce_count_show, NULL, 15);
/* Total possible dynamic ce_count attribute file table */
static struct attribute *dynamic_csrow_ce_count_attr[] = {
@@ -363,6 +383,10 @@ static struct attribute *dynamic_csrow_ce_count_attr[] = {
&dev_attr_legacy_ch9_ce_count.attr.attr,
&dev_attr_legacy_ch10_ce_count.attr.attr,
&dev_attr_legacy_ch11_ce_count.attr.attr,
+ &dev_attr_legacy_ch12_ce_count.attr.attr,
+ &dev_attr_legacy_ch13_ce_count.attr.attr,
+ &dev_attr_legacy_ch14_ce_count.attr.attr,
+ &dev_attr_legacy_ch15_ce_count.attr.attr,
NULL
};
@@ -422,7 +446,7 @@ static inline int nr_pages_per_csrow(struct csrow_info *csrow)
return nr_pages;
}
-/* Create a CSROW object under specifed edac_mc_device */
+/* Create a CSROW object under specified edac_mc_device */
static int edac_create_csrow_object(struct mem_ctl_info *mci,
struct csrow_info *csrow, int index)
{
@@ -449,7 +473,7 @@ static int edac_create_csrow_object(struct mem_ctl_info *mci,
return 0;
}
-/* Create a CSROW object under specifed edac_mc_device */
+/* Create a CSROW object under specified edac_mc_device */
static int edac_create_csrow_objects(struct mem_ctl_info *mci)
{
int err, i;
@@ -636,7 +660,7 @@ static void dimm_release(struct device *dev)
*/
}
-/* Create a DIMM object under specifed memory controller device */
+/* Create a DIMM object under specified memory controller device */
static int edac_create_dimm_object(struct mem_ctl_info *mci,
struct dimm_info *dimm)
{
diff --git a/drivers/edac/fsl_ddr_edac.c b/drivers/edac/fsl_ddr_edac.c
index d148d262d0d4..e4eaec0aa81d 100644
--- a/drivers/edac/fsl_ddr_edac.c
+++ b/drivers/edac/fsl_ddr_edac.c
@@ -31,18 +31,30 @@
static int edac_mc_idx;
-static u32 orig_ddr_err_disable;
-static u32 orig_ddr_err_sbe;
-static bool little_endian;
+static inline void __iomem *ddr_reg_addr(struct fsl_mc_pdata *pdata, unsigned int off)
+{
+ if (pdata->flag == TYPE_IMX9 && off >= FSL_MC_DATA_ERR_INJECT_HI && off <= FSL_MC_ERR_SBE)
+ return pdata->inject_vbase + off - FSL_MC_DATA_ERR_INJECT_HI
+ + IMX9_MC_DATA_ERR_INJECT_OFF;
+
+ if (pdata->flag == TYPE_IMX9 && off >= IMX9_MC_ERR_EN)
+ return pdata->inject_vbase + off - IMX9_MC_ERR_EN;
-static inline u32 ddr_in32(void __iomem *addr)
+ return pdata->mc_vbase + off;
+}
+
+static inline u32 ddr_in32(struct fsl_mc_pdata *pdata, unsigned int off)
{
- return little_endian ? ioread32(addr) : ioread32be(addr);
+ void __iomem *addr = ddr_reg_addr(pdata, off);
+
+ return pdata->little_endian ? ioread32(addr) : ioread32be(addr);
}
-static inline void ddr_out32(void __iomem *addr, u32 value)
+static inline void ddr_out32(struct fsl_mc_pdata *pdata, unsigned int off, u32 value)
{
- if (little_endian)
+ void __iomem *addr = ddr_reg_addr(pdata, off);
+
+ if (pdata->little_endian)
iowrite32(value, addr);
else
iowrite32be(value, addr);
@@ -60,7 +72,7 @@ static ssize_t fsl_mc_inject_data_hi_show(struct device *dev,
struct mem_ctl_info *mci = to_mci(dev);
struct fsl_mc_pdata *pdata = mci->pvt_info;
return sprintf(data, "0x%08x",
- ddr_in32(pdata->mc_vbase + FSL_MC_DATA_ERR_INJECT_HI));
+ ddr_in32(pdata, FSL_MC_DATA_ERR_INJECT_HI));
}
static ssize_t fsl_mc_inject_data_lo_show(struct device *dev,
@@ -70,7 +82,7 @@ static ssize_t fsl_mc_inject_data_lo_show(struct device *dev,
struct mem_ctl_info *mci = to_mci(dev);
struct fsl_mc_pdata *pdata = mci->pvt_info;
return sprintf(data, "0x%08x",
- ddr_in32(pdata->mc_vbase + FSL_MC_DATA_ERR_INJECT_LO));
+ ddr_in32(pdata, FSL_MC_DATA_ERR_INJECT_LO));
}
static ssize_t fsl_mc_inject_ctrl_show(struct device *dev,
@@ -80,7 +92,7 @@ static ssize_t fsl_mc_inject_ctrl_show(struct device *dev,
struct mem_ctl_info *mci = to_mci(dev);
struct fsl_mc_pdata *pdata = mci->pvt_info;
return sprintf(data, "0x%08x",
- ddr_in32(pdata->mc_vbase + FSL_MC_ECC_ERR_INJECT));
+ ddr_in32(pdata, FSL_MC_ECC_ERR_INJECT));
}
static ssize_t fsl_mc_inject_data_hi_store(struct device *dev,
@@ -97,7 +109,7 @@ static ssize_t fsl_mc_inject_data_hi_store(struct device *dev,
if (rc)
return rc;
- ddr_out32(pdata->mc_vbase + FSL_MC_DATA_ERR_INJECT_HI, val);
+ ddr_out32(pdata, FSL_MC_DATA_ERR_INJECT_HI, val);
return count;
}
return 0;
@@ -117,7 +129,7 @@ static ssize_t fsl_mc_inject_data_lo_store(struct device *dev,
if (rc)
return rc;
- ddr_out32(pdata->mc_vbase + FSL_MC_DATA_ERR_INJECT_LO, val);
+ ddr_out32(pdata, FSL_MC_DATA_ERR_INJECT_LO, val);
return count;
}
return 0;
@@ -137,7 +149,7 @@ static ssize_t fsl_mc_inject_ctrl_store(struct device *dev,
if (rc)
return rc;
- ddr_out32(pdata->mc_vbase + FSL_MC_ECC_ERR_INJECT, val);
+ ddr_out32(pdata, FSL_MC_ECC_ERR_INJECT, val);
return count;
}
return 0;
@@ -286,7 +298,7 @@ static void fsl_mc_check(struct mem_ctl_info *mci)
int bad_data_bit;
int bad_ecc_bit;
- err_detect = ddr_in32(pdata->mc_vbase + FSL_MC_ERR_DETECT);
+ err_detect = ddr_in32(pdata, FSL_MC_ERR_DETECT);
if (!err_detect)
return;
@@ -295,14 +307,14 @@ static void fsl_mc_check(struct mem_ctl_info *mci)
/* no more processing if not ECC bit errors */
if (!(err_detect & (DDR_EDE_SBE | DDR_EDE_MBE))) {
- ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DETECT, err_detect);
+ ddr_out32(pdata, FSL_MC_ERR_DETECT, err_detect);
return;
}
- syndrome = ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_ECC);
+ syndrome = ddr_in32(pdata, FSL_MC_CAPTURE_ECC);
/* Mask off appropriate bits of syndrome based on bus width */
- bus_width = (ddr_in32(pdata->mc_vbase + FSL_MC_DDR_SDRAM_CFG) &
+ bus_width = (ddr_in32(pdata, FSL_MC_DDR_SDRAM_CFG) &
DSC_DBW_MASK) ? 32 : 64;
if (bus_width == 64)
syndrome &= 0xff;
@@ -310,8 +322,8 @@ static void fsl_mc_check(struct mem_ctl_info *mci)
syndrome &= 0xffff;
err_addr = make64(
- ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_EXT_ADDRESS),
- ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_ADDRESS));
+ ddr_in32(pdata, FSL_MC_CAPTURE_EXT_ADDRESS),
+ ddr_in32(pdata, FSL_MC_CAPTURE_ADDRESS));
pfn = err_addr >> PAGE_SHIFT;
for (row_index = 0; row_index < mci->nr_csrows; row_index++) {
@@ -320,29 +332,33 @@ static void fsl_mc_check(struct mem_ctl_info *mci)
break;
}
- cap_high = ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_DATA_HI);
- cap_low = ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_DATA_LO);
+ cap_high = ddr_in32(pdata, FSL_MC_CAPTURE_DATA_HI);
+ cap_low = ddr_in32(pdata, FSL_MC_CAPTURE_DATA_LO);
/*
* Analyze single-bit errors on 64-bit wide buses
* TODO: Add support for 32-bit wide buses
*/
if ((err_detect & DDR_EDE_SBE) && (bus_width == 64)) {
+ u64 cap = (u64)cap_high << 32 | cap_low;
+ u32 s = syndrome;
+
sbe_ecc_decode(cap_high, cap_low, syndrome,
&bad_data_bit, &bad_ecc_bit);
- if (bad_data_bit != -1)
- fsl_mc_printk(mci, KERN_ERR,
- "Faulty Data bit: %d\n", bad_data_bit);
- if (bad_ecc_bit != -1)
- fsl_mc_printk(mci, KERN_ERR,
- "Faulty ECC bit: %d\n", bad_ecc_bit);
+ if (bad_data_bit >= 0) {
+ fsl_mc_printk(mci, KERN_ERR, "Faulty Data bit: %d\n", bad_data_bit);
+ cap ^= 1ULL << bad_data_bit;
+ }
+
+ if (bad_ecc_bit >= 0) {
+ fsl_mc_printk(mci, KERN_ERR, "Faulty ECC bit: %d\n", bad_ecc_bit);
+ s ^= 1 << bad_ecc_bit;
+ }
fsl_mc_printk(mci, KERN_ERR,
"Expected Data / ECC:\t%#8.8x_%08x / %#2.2x\n",
- cap_high ^ (1 << (bad_data_bit - 32)),
- cap_low ^ (1 << bad_data_bit),
- syndrome ^ (1 << bad_ecc_bit));
+ upper_32_bits(cap), lower_32_bits(cap), s);
}
fsl_mc_printk(mci, KERN_ERR,
@@ -367,7 +383,7 @@ static void fsl_mc_check(struct mem_ctl_info *mci)
row_index, 0, -1,
mci->ctl_name, "");
- ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DETECT, err_detect);
+ ddr_out32(pdata, FSL_MC_ERR_DETECT, err_detect);
}
static irqreturn_t fsl_mc_isr(int irq, void *dev_id)
@@ -376,7 +392,7 @@ static irqreturn_t fsl_mc_isr(int irq, void *dev_id)
struct fsl_mc_pdata *pdata = mci->pvt_info;
u32 err_detect;
- err_detect = ddr_in32(pdata->mc_vbase + FSL_MC_ERR_DETECT);
+ err_detect = ddr_in32(pdata, FSL_MC_ERR_DETECT);
if (!err_detect)
return IRQ_NONE;
@@ -396,7 +412,7 @@ static void fsl_ddr_init_csrows(struct mem_ctl_info *mci)
u32 cs_bnds;
int index;
- sdram_ctl = ddr_in32(pdata->mc_vbase + FSL_MC_DDR_SDRAM_CFG);
+ sdram_ctl = ddr_in32(pdata, FSL_MC_DDR_SDRAM_CFG);
sdtype = sdram_ctl & DSC_SDTYPE_MASK;
if (sdram_ctl & DSC_RD_EN) {
@@ -431,6 +447,9 @@ static void fsl_ddr_init_csrows(struct mem_ctl_info *mci)
case 0x05000000:
mtype = MEM_DDR4;
break;
+ case 0x04000000:
+ mtype = MEM_LPDDR4;
+ break;
default:
mtype = MEM_UNKNOWN;
break;
@@ -444,7 +463,7 @@ static void fsl_ddr_init_csrows(struct mem_ctl_info *mci)
csrow = mci->csrows[index];
dimm = csrow->channels[0]->dimm;
- cs_bnds = ddr_in32(pdata->mc_vbase + FSL_MC_CS_BNDS_0 +
+ cs_bnds = ddr_in32(pdata, FSL_MC_CS_BNDS_0 +
(index * FSL_MC_CS_BNDS_OFS));
start = (cs_bnds & 0xffff0000) >> 16;
@@ -464,7 +483,9 @@ static void fsl_ddr_init_csrows(struct mem_ctl_info *mci)
dimm->grain = 8;
dimm->mtype = mtype;
dimm->dtype = DEV_UNKNOWN;
- if (sdram_ctl & DSC_X32_EN)
+ if (pdata->flag == TYPE_IMX9)
+ dimm->dtype = DEV_X16;
+ else if (sdram_ctl & DSC_X32_EN)
dimm->dtype = DEV_X32;
dimm->edac_mode = EDAC_SECDED;
}
@@ -476,6 +497,7 @@ int fsl_mc_err_probe(struct platform_device *op)
struct edac_mc_layer layers[2];
struct fsl_mc_pdata *pdata;
struct resource r;
+ u32 ecc_en_mask;
u32 sdram_ctl;
int res;
@@ -503,11 +525,13 @@ int fsl_mc_err_probe(struct platform_device *op)
mci->ctl_name = pdata->name;
mci->dev_name = pdata->name;
+ pdata->flag = (unsigned long)device_get_match_data(&op->dev);
+
/*
* Get the endianness of DDR controller registers.
* Default is big endian.
*/
- little_endian = of_property_read_bool(op->dev.of_node, "little-endian");
+ pdata->little_endian = of_property_read_bool(op->dev.of_node, "little-endian");
res = of_address_to_resource(op->dev.of_node, 0, &r);
if (res) {
@@ -531,8 +555,23 @@ int fsl_mc_err_probe(struct platform_device *op)
goto err;
}
- sdram_ctl = ddr_in32(pdata->mc_vbase + FSL_MC_DDR_SDRAM_CFG);
- if (!(sdram_ctl & DSC_ECC_EN)) {
+ if (pdata->flag == TYPE_IMX9) {
+ pdata->inject_vbase = devm_platform_ioremap_resource_byname(op, "inject");
+ if (IS_ERR(pdata->inject_vbase)) {
+ res = -ENOMEM;
+ goto err;
+ }
+ }
+
+ if (pdata->flag == TYPE_IMX9) {
+ sdram_ctl = ddr_in32(pdata, IMX9_MC_ERR_EN);
+ ecc_en_mask = ERR_ECC_EN | ERR_INLINE_ECC;
+ } else {
+ sdram_ctl = ddr_in32(pdata, FSL_MC_DDR_SDRAM_CFG);
+ ecc_en_mask = DSC_ECC_EN;
+ }
+
+ if ((sdram_ctl & ecc_en_mask) != ecc_en_mask) {
/* no ECC */
pr_warn("%s: No ECC DIMMs discovered\n", __func__);
res = -ENODEV;
@@ -543,7 +582,8 @@ int fsl_mc_err_probe(struct platform_device *op)
mci->mtype_cap = MEM_FLAG_DDR | MEM_FLAG_RDDR |
MEM_FLAG_DDR2 | MEM_FLAG_RDDR2 |
MEM_FLAG_DDR3 | MEM_FLAG_RDDR3 |
- MEM_FLAG_DDR4 | MEM_FLAG_RDDR4;
+ MEM_FLAG_DDR4 | MEM_FLAG_RDDR4 |
+ MEM_FLAG_LPDDR4;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_SECDED;
mci->mod_name = EDAC_MOD_STR;
@@ -558,11 +598,11 @@ int fsl_mc_err_probe(struct platform_device *op)
fsl_ddr_init_csrows(mci);
/* store the original error disable bits */
- orig_ddr_err_disable = ddr_in32(pdata->mc_vbase + FSL_MC_ERR_DISABLE);
- ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DISABLE, 0);
+ pdata->orig_ddr_err_disable = ddr_in32(pdata, FSL_MC_ERR_DISABLE);
+ ddr_out32(pdata, FSL_MC_ERR_DISABLE, 0);
/* clear all error bits */
- ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DETECT, ~0);
+ ddr_out32(pdata, FSL_MC_ERR_DETECT, ~0);
res = edac_mc_add_mc_with_groups(mci, fsl_ddr_dev_groups);
if (res) {
@@ -571,15 +611,15 @@ int fsl_mc_err_probe(struct platform_device *op)
}
if (edac_op_state == EDAC_OPSTATE_INT) {
- ddr_out32(pdata->mc_vbase + FSL_MC_ERR_INT_EN,
+ ddr_out32(pdata, FSL_MC_ERR_INT_EN,
DDR_EIE_MBEE | DDR_EIE_SBEE);
/* store the original error management threshold */
- orig_ddr_err_sbe = ddr_in32(pdata->mc_vbase +
- FSL_MC_ERR_SBE) & 0xff0000;
+ pdata->orig_ddr_err_sbe = ddr_in32(pdata,
+ FSL_MC_ERR_SBE) & 0xff0000;
/* set threshold to 1 error per interrupt */
- ddr_out32(pdata->mc_vbase + FSL_MC_ERR_SBE, 0x10000);
+ ddr_out32(pdata, FSL_MC_ERR_SBE, 0x10000);
/* register interrupts */
pdata->irq = platform_get_irq(op, 0);
@@ -620,12 +660,13 @@ void fsl_mc_err_remove(struct platform_device *op)
edac_dbg(0, "\n");
if (edac_op_state == EDAC_OPSTATE_INT) {
- ddr_out32(pdata->mc_vbase + FSL_MC_ERR_INT_EN, 0);
+ ddr_out32(pdata, FSL_MC_ERR_INT_EN, 0);
}
- ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DISABLE,
- orig_ddr_err_disable);
- ddr_out32(pdata->mc_vbase + FSL_MC_ERR_SBE, orig_ddr_err_sbe);
+ ddr_out32(pdata, FSL_MC_ERR_DISABLE,
+ pdata->orig_ddr_err_disable);
+ ddr_out32(pdata, FSL_MC_ERR_SBE, pdata->orig_ddr_err_sbe);
+
edac_mc_del_mc(&op->dev);
edac_mc_free(mci);
diff --git a/drivers/edac/fsl_ddr_edac.h b/drivers/edac/fsl_ddr_edac.h
index c0994a2a003c..73618f79e587 100644
--- a/drivers/edac/fsl_ddr_edac.h
+++ b/drivers/edac/fsl_ddr_edac.h
@@ -39,6 +39,9 @@
#define FSL_MC_CAPTURE_EXT_ADDRESS 0x0e54
#define FSL_MC_ERR_SBE 0x0e58
+#define IMX9_MC_ERR_EN 0x1000
+#define IMX9_MC_DATA_ERR_INJECT_OFF 0x100
+
#define DSC_MEM_EN 0x80000000
#define DSC_ECC_EN 0x20000000
#define DSC_RD_EN 0x10000000
@@ -46,6 +49,9 @@
#define DSC_DBW_32 0x00080000
#define DSC_DBW_64 0x00000000
+#define ERR_ECC_EN 0x80000000
+#define ERR_INLINE_ECC 0x40000000
+
#define DSC_SDTYPE_MASK 0x07000000
#define DSC_X32_EN 0x00000020
@@ -65,11 +71,18 @@
#define DDR_EDI_SBED 0x4 /* single-bit ECC error disable */
#define DDR_EDI_MBED 0x8 /* multi-bit ECC error disable */
+#define TYPE_IMX9 0x1 /* MC used by iMX9 having registers changed */
+
struct fsl_mc_pdata {
char *name;
int edac_idx;
void __iomem *mc_vbase;
+ void __iomem *inject_vbase;
int irq;
+ u32 orig_ddr_err_disable;
+ u32 orig_ddr_err_sbe;
+ bool little_endian;
+ unsigned long flag;
};
int fsl_mc_err_probe(struct platform_device *op);
void fsl_mc_err_remove(struct platform_device *op);
diff --git a/drivers/edac/highbank_l2_edac.c b/drivers/edac/highbank_l2_edac.c
index 282ca6535f8f..24f163ff323f 100644
--- a/drivers/edac/highbank_l2_edac.c
+++ b/drivers/edac/highbank_l2_edac.c
@@ -128,7 +128,7 @@ static void highbank_l2_err_remove(struct platform_device *pdev)
static struct platform_driver highbank_l2_edac_driver = {
.probe = highbank_l2_err_probe,
- .remove_new = highbank_l2_err_remove,
+ .remove = highbank_l2_err_remove,
.driver = {
.name = "hb_l2_edac",
.of_match_table = hb_l2_err_of_match,
diff --git a/drivers/edac/highbank_mc_edac.c b/drivers/edac/highbank_mc_edac.c
index 1c5b888ab11d..a8879d72d064 100644
--- a/drivers/edac/highbank_mc_edac.c
+++ b/drivers/edac/highbank_mc_edac.c
@@ -261,7 +261,7 @@ static void highbank_mc_remove(struct platform_device *pdev)
static struct platform_driver highbank_mc_edac_driver = {
.probe = highbank_mc_probe,
- .remove_new = highbank_mc_remove,
+ .remove = highbank_mc_remove,
.driver = {
.name = "hb_mc_edac",
.of_match_table = hb_ddr_ctrl_of_match,
diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c
index e2a954de913b..2010a47149f4 100644
--- a/drivers/edac/i10nm_base.c
+++ b/drivers/edac/i10nm_base.c
@@ -62,6 +62,7 @@
((GET_BITFIELD(reg, 0, 10) << 12) + 0x140000)
#define I10NM_GNR_IMC_MMIO_OFFSET 0x24c000
+#define I10NM_GNR_D_IMC_MMIO_OFFSET 0x206000
#define I10NM_GNR_IMC_MMIO_SIZE 0x4000
#define I10NM_HBM_IMC_MMIO_SIZE 0x9000
#define I10NM_DDR_IMC_CH_CNT(reg) GET_BITFIELD(reg, 21, 24)
@@ -72,12 +73,6 @@
#define I10NM_SAD_ENABLE(reg) GET_BITFIELD(reg, 0, 0)
#define I10NM_SAD_NM_CACHEABLE(reg) GET_BITFIELD(reg, 5, 5)
-#define RETRY_RD_ERR_LOG_UC BIT(1)
-#define RETRY_RD_ERR_LOG_NOOVER BIT(14)
-#define RETRY_RD_ERR_LOG_EN BIT(15)
-#define RETRY_RD_ERR_LOG_NOOVER_UC (BIT(14) | BIT(1))
-#define RETRY_RD_ERR_LOG_OVER_UC_V (BIT(2) | BIT(1) | BIT(0))
-
static struct list_head *i10nm_edac_list;
static struct res_config *res_cfg;
@@ -85,227 +80,319 @@ static int retry_rd_err_log;
static int decoding_via_mca;
static bool mem_cfg_2lm;
-static u32 offsets_scrub_icx[] = {0x22c60, 0x22c54, 0x22c5c, 0x22c58, 0x22c28, 0x20ed8};
-static u32 offsets_scrub_spr[] = {0x22c60, 0x22c54, 0x22f08, 0x22c58, 0x22c28, 0x20ed8};
-static u32 offsets_scrub_spr_hbm0[] = {0x2860, 0x2854, 0x2b08, 0x2858, 0x2828, 0x0ed8};
-static u32 offsets_scrub_spr_hbm1[] = {0x2c60, 0x2c54, 0x2f08, 0x2c58, 0x2c28, 0x0fa8};
-static u32 offsets_demand_icx[] = {0x22e54, 0x22e60, 0x22e64, 0x22e58, 0x22e5c, 0x20ee0};
-static u32 offsets_demand_spr[] = {0x22e54, 0x22e60, 0x22f10, 0x22e58, 0x22e5c, 0x20ee0};
-static u32 offsets_demand2_spr[] = {0x22c70, 0x22d80, 0x22f18, 0x22d58, 0x22c64, 0x20f10};
-static u32 offsets_demand_spr_hbm0[] = {0x2a54, 0x2a60, 0x2b10, 0x2a58, 0x2a5c, 0x0ee0};
-static u32 offsets_demand_spr_hbm1[] = {0x2e54, 0x2e60, 0x2f10, 0x2e58, 0x2e5c, 0x0fb0};
-
-static void __enable_retry_rd_err_log(struct skx_imc *imc, int chan, bool enable,
- u32 *offsets_scrub, u32 *offsets_demand,
- u32 *offsets_demand2)
+static struct reg_rrl icx_reg_rrl_ddr = {
+ .set_num = 2,
+ .reg_num = 6,
+ .modes = {LRE_SCRUB, LRE_DEMAND},
+ .offsets = {
+ {0x22c60, 0x22c54, 0x22c5c, 0x22c58, 0x22c28, 0x20ed8},
+ {0x22e54, 0x22e60, 0x22e64, 0x22e58, 0x22e5c, 0x20ee0},
+ },
+ .widths = {4, 4, 4, 4, 4, 8},
+ .v_mask = BIT(0),
+ .uc_mask = BIT(1),
+ .over_mask = BIT(2),
+ .en_patspr_mask = BIT(13),
+ .noover_mask = BIT(14),
+ .en_mask = BIT(15),
+
+ .cecnt_num = 4,
+ .cecnt_offsets = {0x22c18, 0x22c1c, 0x22c20, 0x22c24},
+ .cecnt_widths = {4, 4, 4, 4},
+};
+
+static struct reg_rrl spr_reg_rrl_ddr = {
+ .set_num = 3,
+ .reg_num = 6,
+ .modes = {LRE_SCRUB, LRE_DEMAND, FRE_DEMAND},
+ .offsets = {
+ {0x22c60, 0x22c54, 0x22f08, 0x22c58, 0x22c28, 0x20ed8},
+ {0x22e54, 0x22e60, 0x22f10, 0x22e58, 0x22e5c, 0x20ee0},
+ {0x22c70, 0x22d80, 0x22f18, 0x22d58, 0x22c64, 0x20f10},
+ },
+ .widths = {4, 4, 8, 4, 4, 8},
+ .v_mask = BIT(0),
+ .uc_mask = BIT(1),
+ .over_mask = BIT(2),
+ .en_patspr_mask = BIT(13),
+ .noover_mask = BIT(14),
+ .en_mask = BIT(15),
+
+ .cecnt_num = 4,
+ .cecnt_offsets = {0x22c18, 0x22c1c, 0x22c20, 0x22c24},
+ .cecnt_widths = {4, 4, 4, 4},
+};
+
+static struct reg_rrl spr_reg_rrl_hbm_pch0 = {
+ .set_num = 2,
+ .reg_num = 6,
+ .modes = {LRE_SCRUB, LRE_DEMAND},
+ .offsets = {
+ {0x2860, 0x2854, 0x2b08, 0x2858, 0x2828, 0x0ed8},
+ {0x2a54, 0x2a60, 0x2b10, 0x2a58, 0x2a5c, 0x0ee0},
+ },
+ .widths = {4, 4, 8, 4, 4, 8},
+ .v_mask = BIT(0),
+ .uc_mask = BIT(1),
+ .over_mask = BIT(2),
+ .en_patspr_mask = BIT(13),
+ .noover_mask = BIT(14),
+ .en_mask = BIT(15),
+
+ .cecnt_num = 4,
+ .cecnt_offsets = {0x2818, 0x281c, 0x2820, 0x2824},
+ .cecnt_widths = {4, 4, 4, 4},
+};
+
+static struct reg_rrl spr_reg_rrl_hbm_pch1 = {
+ .set_num = 2,
+ .reg_num = 6,
+ .modes = {LRE_SCRUB, LRE_DEMAND},
+ .offsets = {
+ {0x2c60, 0x2c54, 0x2f08, 0x2c58, 0x2c28, 0x0fa8},
+ {0x2e54, 0x2e60, 0x2f10, 0x2e58, 0x2e5c, 0x0fb0},
+ },
+ .widths = {4, 4, 8, 4, 4, 8},
+ .v_mask = BIT(0),
+ .uc_mask = BIT(1),
+ .over_mask = BIT(2),
+ .en_patspr_mask = BIT(13),
+ .noover_mask = BIT(14),
+ .en_mask = BIT(15),
+
+ .cecnt_num = 4,
+ .cecnt_offsets = {0x2c18, 0x2c1c, 0x2c20, 0x2c24},
+ .cecnt_widths = {4, 4, 4, 4},
+};
+
+static struct reg_rrl gnr_reg_rrl_ddr = {
+ .set_num = 4,
+ .reg_num = 6,
+ .modes = {FRE_SCRUB, FRE_DEMAND, LRE_SCRUB, LRE_DEMAND},
+ .offsets = {
+ {0x2f10, 0x2f20, 0x2f30, 0x2f50, 0x2f60, 0xba0},
+ {0x2f14, 0x2f24, 0x2f38, 0x2f54, 0x2f64, 0xba8},
+ {0x2f18, 0x2f28, 0x2f40, 0x2f58, 0x2f68, 0xbb0},
+ {0x2f1c, 0x2f2c, 0x2f48, 0x2f5c, 0x2f6c, 0xbb8},
+ },
+ .widths = {4, 4, 8, 4, 4, 8},
+ .v_mask = BIT(0),
+ .uc_mask = BIT(1),
+ .over_mask = BIT(2),
+ .en_patspr_mask = BIT(14),
+ .noover_mask = BIT(15),
+ .en_mask = BIT(12),
+
+ .cecnt_num = 8,
+ .cecnt_offsets = {0x2c10, 0x2c14, 0x2c18, 0x2c1c, 0x2c20, 0x2c24, 0x2c28, 0x2c2c},
+ .cecnt_widths = {4, 4, 4, 4, 4, 4, 4, 4},
+};
+
+static u64 read_imc_reg(struct skx_imc *imc, int chan, u32 offset, u8 width)
{
- u32 s, d, d2;
+ switch (width) {
+ case 4:
+ return I10NM_GET_REG32(imc, chan, offset);
+ case 8:
+ return I10NM_GET_REG64(imc, chan, offset);
+ default:
+ i10nm_printk(KERN_ERR, "Invalid readd RRL 0x%x width %d\n", offset, width);
+ return 0;
+ }
+}
+
+static void write_imc_reg(struct skx_imc *imc, int chan, u32 offset, u8 width, u64 val)
+{
+ switch (width) {
+ case 4:
+ return I10NM_SET_REG32(imc, chan, offset, (u32)val);
+ default:
+ i10nm_printk(KERN_ERR, "Invalid write RRL 0x%x width %d\n", offset, width);
+ }
+}
+
+static void enable_rrl(struct skx_imc *imc, int chan, struct reg_rrl *rrl,
+ int rrl_set, bool enable, u32 *rrl_ctl)
+{
+ enum rrl_mode mode = rrl->modes[rrl_set];
+ u32 offset = rrl->offsets[rrl_set][0], v;
+ u8 width = rrl->widths[0];
+ bool first, scrub;
- s = I10NM_GET_REG32(imc, chan, offsets_scrub[0]);
- d = I10NM_GET_REG32(imc, chan, offsets_demand[0]);
- if (offsets_demand2)
- d2 = I10NM_GET_REG32(imc, chan, offsets_demand2[0]);
+ /* First or last read error. */
+ first = (mode == FRE_SCRUB || mode == FRE_DEMAND);
+ /* Patrol scrub or on-demand read error. */
+ scrub = (mode == FRE_SCRUB || mode == LRE_SCRUB);
+
+ v = read_imc_reg(imc, chan, offset, width);
if (enable) {
- /* Save default configurations */
- imc->chan[chan].retry_rd_err_log_s = s;
- imc->chan[chan].retry_rd_err_log_d = d;
- if (offsets_demand2)
- imc->chan[chan].retry_rd_err_log_d2 = d2;
-
- s &= ~RETRY_RD_ERR_LOG_NOOVER_UC;
- s |= RETRY_RD_ERR_LOG_EN;
- d &= ~RETRY_RD_ERR_LOG_NOOVER_UC;
- d |= RETRY_RD_ERR_LOG_EN;
-
- if (offsets_demand2) {
- d2 &= ~RETRY_RD_ERR_LOG_UC;
- d2 |= RETRY_RD_ERR_LOG_NOOVER;
- d2 |= RETRY_RD_ERR_LOG_EN;
- }
+ /* Save default configurations. */
+ *rrl_ctl = v;
+ v &= ~rrl->uc_mask;
+
+ if (first)
+ v |= rrl->noover_mask;
+ else
+ v &= ~rrl->noover_mask;
+
+ if (scrub)
+ v |= rrl->en_patspr_mask;
+ else
+ v &= ~rrl->en_patspr_mask;
+
+ v |= rrl->en_mask;
} else {
- /* Restore default configurations */
- if (imc->chan[chan].retry_rd_err_log_s & RETRY_RD_ERR_LOG_UC)
- s |= RETRY_RD_ERR_LOG_UC;
- if (imc->chan[chan].retry_rd_err_log_s & RETRY_RD_ERR_LOG_NOOVER)
- s |= RETRY_RD_ERR_LOG_NOOVER;
- if (!(imc->chan[chan].retry_rd_err_log_s & RETRY_RD_ERR_LOG_EN))
- s &= ~RETRY_RD_ERR_LOG_EN;
- if (imc->chan[chan].retry_rd_err_log_d & RETRY_RD_ERR_LOG_UC)
- d |= RETRY_RD_ERR_LOG_UC;
- if (imc->chan[chan].retry_rd_err_log_d & RETRY_RD_ERR_LOG_NOOVER)
- d |= RETRY_RD_ERR_LOG_NOOVER;
- if (!(imc->chan[chan].retry_rd_err_log_d & RETRY_RD_ERR_LOG_EN))
- d &= ~RETRY_RD_ERR_LOG_EN;
-
- if (offsets_demand2) {
- if (imc->chan[chan].retry_rd_err_log_d2 & RETRY_RD_ERR_LOG_UC)
- d2 |= RETRY_RD_ERR_LOG_UC;
- if (!(imc->chan[chan].retry_rd_err_log_d2 & RETRY_RD_ERR_LOG_NOOVER))
- d2 &= ~RETRY_RD_ERR_LOG_NOOVER;
- if (!(imc->chan[chan].retry_rd_err_log_d2 & RETRY_RD_ERR_LOG_EN))
- d2 &= ~RETRY_RD_ERR_LOG_EN;
+ /* Restore default configurations. */
+ if (*rrl_ctl & rrl->uc_mask)
+ v |= rrl->uc_mask;
+
+ if (first) {
+ if (!(*rrl_ctl & rrl->noover_mask))
+ v &= ~rrl->noover_mask;
+ } else {
+ if (*rrl_ctl & rrl->noover_mask)
+ v |= rrl->noover_mask;
}
+
+ if (scrub) {
+ if (!(*rrl_ctl & rrl->en_patspr_mask))
+ v &= ~rrl->en_patspr_mask;
+ } else {
+ if (*rrl_ctl & rrl->en_patspr_mask)
+ v |= rrl->en_patspr_mask;
+ }
+
+ if (!(*rrl_ctl & rrl->en_mask))
+ v &= ~rrl->en_mask;
}
- I10NM_SET_REG32(imc, chan, offsets_scrub[0], s);
- I10NM_SET_REG32(imc, chan, offsets_demand[0], d);
- if (offsets_demand2)
- I10NM_SET_REG32(imc, chan, offsets_demand2[0], d2);
+ write_imc_reg(imc, chan, offset, width, v);
+}
+
+static void enable_rrls(struct skx_imc *imc, int chan, struct reg_rrl *rrl,
+ bool enable, u32 *rrl_ctl)
+{
+ for (int i = 0; i < rrl->set_num; i++)
+ enable_rrl(imc, chan, rrl, i, enable, rrl_ctl + i);
+}
+
+static void enable_rrls_ddr(struct skx_imc *imc, bool enable)
+{
+ struct reg_rrl *rrl_ddr = res_cfg->reg_rrl_ddr;
+ int i, chan_num = res_cfg->ddr_chan_num;
+ struct skx_channel *chan = imc->chan;
+
+ if (!imc->mbase)
+ return;
+
+ for (i = 0; i < chan_num; i++)
+ enable_rrls(imc, i, rrl_ddr, enable, chan[i].rrl_ctl[0]);
+}
+
+static void enable_rrls_hbm(struct skx_imc *imc, bool enable)
+{
+ struct reg_rrl **rrl_hbm = res_cfg->reg_rrl_hbm;
+ int i, chan_num = res_cfg->hbm_chan_num;
+ struct skx_channel *chan = imc->chan;
+
+ if (!imc->mbase || !imc->hbm_mc || !rrl_hbm[0] || !rrl_hbm[1])
+ return;
+
+ for (i = 0; i < chan_num; i++) {
+ enable_rrls(imc, i, rrl_hbm[0], enable, chan[i].rrl_ctl[0]);
+ enable_rrls(imc, i, rrl_hbm[1], enable, chan[i].rrl_ctl[1]);
+ }
}
static void enable_retry_rd_err_log(bool enable)
{
- int i, j, imc_num, chan_num;
- struct skx_imc *imc;
struct skx_dev *d;
+ int i, imc_num;
edac_dbg(2, "\n");
list_for_each_entry(d, i10nm_edac_list, list) {
imc_num = res_cfg->ddr_imc_num;
- chan_num = res_cfg->ddr_chan_num;
-
- for (i = 0; i < imc_num; i++) {
- imc = &d->imc[i];
- if (!imc->mbase)
- continue;
-
- for (j = 0; j < chan_num; j++)
- __enable_retry_rd_err_log(imc, j, enable,
- res_cfg->offsets_scrub,
- res_cfg->offsets_demand,
- res_cfg->offsets_demand2);
- }
+ for (i = 0; i < imc_num; i++)
+ enable_rrls_ddr(&d->imc[i], enable);
imc_num += res_cfg->hbm_imc_num;
- chan_num = res_cfg->hbm_chan_num;
-
- for (; i < imc_num; i++) {
- imc = &d->imc[i];
- if (!imc->mbase || !imc->hbm_mc)
- continue;
-
- for (j = 0; j < chan_num; j++) {
- __enable_retry_rd_err_log(imc, j, enable,
- res_cfg->offsets_scrub_hbm0,
- res_cfg->offsets_demand_hbm0,
- NULL);
- __enable_retry_rd_err_log(imc, j, enable,
- res_cfg->offsets_scrub_hbm1,
- res_cfg->offsets_demand_hbm1,
- NULL);
- }
- }
+ for (; i < imc_num; i++)
+ enable_rrls_hbm(&d->imc[i], enable);
}
}
static void show_retry_rd_err_log(struct decoded_addr *res, char *msg,
int len, bool scrub_err)
{
+ int i, j, n, ch = res->channel, pch = res->cs & 1;
struct skx_imc *imc = &res->dev->imc[res->imc];
- u32 log0, log1, log2, log3, log4;
- u32 corr0, corr1, corr2, corr3;
- u32 lxg0, lxg1, lxg3, lxg4;
- u32 *xffsets = NULL;
- u64 log2a, log5;
- u64 lxg2a, lxg5;
- u32 *offsets;
- int n, pch;
+ u64 log, corr, status_mask;
+ struct reg_rrl *rrl;
+ bool scrub;
+ u32 offset;
+ u8 width;
if (!imc->mbase)
return;
- if (imc->hbm_mc) {
- pch = res->cs & 1;
+ rrl = imc->hbm_mc ? res_cfg->reg_rrl_hbm[pch] : res_cfg->reg_rrl_ddr;
- if (pch)
- offsets = scrub_err ? res_cfg->offsets_scrub_hbm1 :
- res_cfg->offsets_demand_hbm1;
- else
- offsets = scrub_err ? res_cfg->offsets_scrub_hbm0 :
- res_cfg->offsets_demand_hbm0;
- } else {
- if (scrub_err) {
- offsets = res_cfg->offsets_scrub;
- } else {
- offsets = res_cfg->offsets_demand;
- xffsets = res_cfg->offsets_demand2;
- }
- }
+ if (!rrl)
+ return;
- log0 = I10NM_GET_REG32(imc, res->channel, offsets[0]);
- log1 = I10NM_GET_REG32(imc, res->channel, offsets[1]);
- log3 = I10NM_GET_REG32(imc, res->channel, offsets[3]);
- log4 = I10NM_GET_REG32(imc, res->channel, offsets[4]);
- log5 = I10NM_GET_REG64(imc, res->channel, offsets[5]);
-
- if (xffsets) {
- lxg0 = I10NM_GET_REG32(imc, res->channel, xffsets[0]);
- lxg1 = I10NM_GET_REG32(imc, res->channel, xffsets[1]);
- lxg3 = I10NM_GET_REG32(imc, res->channel, xffsets[3]);
- lxg4 = I10NM_GET_REG32(imc, res->channel, xffsets[4]);
- lxg5 = I10NM_GET_REG64(imc, res->channel, xffsets[5]);
- }
+ status_mask = rrl->over_mask | rrl->uc_mask | rrl->v_mask;
- if (res_cfg->type == SPR) {
- log2a = I10NM_GET_REG64(imc, res->channel, offsets[2]);
- n = snprintf(msg, len, " retry_rd_err_log[%.8x %.8x %.16llx %.8x %.8x %.16llx",
- log0, log1, log2a, log3, log4, log5);
+ n = scnprintf(msg, len, " retry_rd_err_log[");
+ for (i = 0; i < rrl->set_num; i++) {
+ scrub = (rrl->modes[i] == FRE_SCRUB || rrl->modes[i] == LRE_SCRUB);
+ if (scrub_err != scrub)
+ continue;
- if (len - n > 0) {
- if (xffsets) {
- lxg2a = I10NM_GET_REG64(imc, res->channel, xffsets[2]);
- n += snprintf(msg + n, len - n, " %.8x %.8x %.16llx %.8x %.8x %.16llx]",
- lxg0, lxg1, lxg2a, lxg3, lxg4, lxg5);
- } else {
- n += snprintf(msg + n, len - n, "]");
- }
- }
- } else {
- log2 = I10NM_GET_REG32(imc, res->channel, offsets[2]);
- n = snprintf(msg, len, " retry_rd_err_log[%.8x %.8x %.8x %.8x %.8x %.16llx]",
- log0, log1, log2, log3, log4, log5);
- }
+ for (j = 0; j < rrl->reg_num && len - n > 0; j++) {
+ offset = rrl->offsets[i][j];
+ width = rrl->widths[j];
+ log = read_imc_reg(imc, ch, offset, width);
- if (imc->hbm_mc) {
- if (pch) {
- corr0 = I10NM_GET_REG32(imc, res->channel, 0x2c18);
- corr1 = I10NM_GET_REG32(imc, res->channel, 0x2c1c);
- corr2 = I10NM_GET_REG32(imc, res->channel, 0x2c20);
- corr3 = I10NM_GET_REG32(imc, res->channel, 0x2c24);
- } else {
- corr0 = I10NM_GET_REG32(imc, res->channel, 0x2818);
- corr1 = I10NM_GET_REG32(imc, res->channel, 0x281c);
- corr2 = I10NM_GET_REG32(imc, res->channel, 0x2820);
- corr3 = I10NM_GET_REG32(imc, res->channel, 0x2824);
+ if (width == 4)
+ n += scnprintf(msg + n, len - n, "%.8llx ", log);
+ else
+ n += scnprintf(msg + n, len - n, "%.16llx ", log);
+
+ /* Clear RRL status if RRL in Linux control mode. */
+ if (retry_rd_err_log == 2 && !j && (log & status_mask))
+ write_imc_reg(imc, ch, offset, width, log & ~status_mask);
}
- } else {
- corr0 = I10NM_GET_REG32(imc, res->channel, 0x22c18);
- corr1 = I10NM_GET_REG32(imc, res->channel, 0x22c1c);
- corr2 = I10NM_GET_REG32(imc, res->channel, 0x22c20);
- corr3 = I10NM_GET_REG32(imc, res->channel, 0x22c24);
}
- if (len - n > 0)
- snprintf(msg + n, len - n,
- " correrrcnt[%.4x %.4x %.4x %.4x %.4x %.4x %.4x %.4x]",
- corr0 & 0xffff, corr0 >> 16,
- corr1 & 0xffff, corr1 >> 16,
- corr2 & 0xffff, corr2 >> 16,
- corr3 & 0xffff, corr3 >> 16);
-
- /* Clear status bits */
- if (retry_rd_err_log == 2) {
- if (log0 & RETRY_RD_ERR_LOG_OVER_UC_V) {
- log0 &= ~RETRY_RD_ERR_LOG_OVER_UC_V;
- I10NM_SET_REG32(imc, res->channel, offsets[0], log0);
+ /* Move back one space. */
+ n--;
+ n += scnprintf(msg + n, len - n, "]");
+
+ if (len - n > 0) {
+ n += scnprintf(msg + n, len - n, " correrrcnt[");
+ for (i = 0; i < rrl->cecnt_num && len - n > 0; i++) {
+ offset = rrl->cecnt_offsets[i];
+ width = rrl->cecnt_widths[i];
+ corr = read_imc_reg(imc, ch, offset, width);
+
+ /* CPUs {ICX,SPR} encode two counters per 4-byte CORRERRCNT register. */
+ if (res_cfg->type <= SPR) {
+ n += scnprintf(msg + n, len - n, "%.4llx %.4llx ",
+ corr & 0xffff, corr >> 16);
+ } else {
+ /* CPUs {GNR} encode one counter per CORRERRCNT register. */
+ if (width == 4)
+ n += scnprintf(msg + n, len - n, "%.8llx ", corr);
+ else
+ n += scnprintf(msg + n, len - n, "%.16llx ", corr);
+ }
}
- if (xffsets && (lxg0 & RETRY_RD_ERR_LOG_OVER_UC_V)) {
- lxg0 &= ~RETRY_RD_ERR_LOG_OVER_UC_V;
- I10NM_SET_REG32(imc, res->channel, xffsets[0], lxg0);
- }
+ /* Move back one space. */
+ n--;
+ n += scnprintf(msg + n, len - n, "]");
}
}
@@ -381,17 +468,18 @@ static int i10nm_get_imc_num(struct res_config *cfg)
return -ENODEV;
}
- if (imc_num > I10NM_NUM_DDR_IMC) {
- i10nm_printk(KERN_ERR, "Need to make I10NM_NUM_DDR_IMC >= %d\n", imc_num);
- return -EINVAL;
- }
-
if (cfg->ddr_imc_num != imc_num) {
/*
- * Store the number of present DDR memory controllers.
+ * Update the configuration data to reflect the number of
+ * present DDR memory controllers.
*/
cfg->ddr_imc_num = imc_num;
edac_dbg(2, "Set DDR MC number: %d", imc_num);
+
+ /* Release and reallocate skx_dev list with the updated number. */
+ skx_remove();
+ if (skx_get_all_bus_mappings(cfg, &i10nm_edac_list) <= 0)
+ return -ENODEV;
}
return 0;
@@ -601,6 +689,14 @@ static struct pci_dev *get_gnr_mdev(struct skx_dev *d, int logical_idx, int *phy
return NULL;
}
+static u32 get_gnr_imc_mmio_offset(void)
+{
+ if (boot_cpu_data.x86_vfm == INTEL_GRANITERAPIDS_D)
+ return I10NM_GNR_D_IMC_MMIO_OFFSET;
+
+ return I10NM_GNR_IMC_MMIO_OFFSET;
+}
+
/**
* get_ddr_munit() - Get the resource of the i-th DDR memory controller.
*
@@ -629,7 +725,7 @@ static struct pci_dev *get_ddr_munit(struct skx_dev *d, int i, u32 *offset, unsi
return NULL;
*offset = I10NM_GET_IMC_MMIO_OFFSET(reg) +
- I10NM_GNR_IMC_MMIO_OFFSET +
+ get_gnr_imc_mmio_offset() +
physical_idx * I10NM_GNR_IMC_MMIO_SIZE;
*size = I10NM_GNR_IMC_MMIO_SIZE;
@@ -751,6 +847,8 @@ static int i10nm_get_ddr_munits(void)
continue;
} else {
d->imc[lmc].mdev = mdev;
+ if (res_cfg->type == SPR)
+ skx_set_mc_mapping(d, i, lmc);
lmc++;
}
}
@@ -868,8 +966,7 @@ static struct res_config i10nm_cfg0 = {
.ddr_mdev_bdf = {0, 12, 0},
.hbm_mdev_bdf = {0, 12, 1},
.sad_all_offset = 0x108,
- .offsets_scrub = offsets_scrub_icx,
- .offsets_demand = offsets_demand_icx,
+ .reg_rrl_ddr = &icx_reg_rrl_ddr,
};
static struct res_config i10nm_cfg1 = {
@@ -887,8 +984,7 @@ static struct res_config i10nm_cfg1 = {
.ddr_mdev_bdf = {0, 12, 0},
.hbm_mdev_bdf = {0, 12, 1},
.sad_all_offset = 0x108,
- .offsets_scrub = offsets_scrub_icx,
- .offsets_demand = offsets_demand_icx,
+ .reg_rrl_ddr = &icx_reg_rrl_ddr,
};
static struct res_config spr_cfg = {
@@ -911,13 +1007,9 @@ static struct res_config spr_cfg = {
.ddr_mdev_bdf = {0, 12, 0},
.hbm_mdev_bdf = {0, 12, 1},
.sad_all_offset = 0x300,
- .offsets_scrub = offsets_scrub_spr,
- .offsets_scrub_hbm0 = offsets_scrub_spr_hbm0,
- .offsets_scrub_hbm1 = offsets_scrub_spr_hbm1,
- .offsets_demand = offsets_demand_spr,
- .offsets_demand2 = offsets_demand2_spr,
- .offsets_demand_hbm0 = offsets_demand_spr_hbm0,
- .offsets_demand_hbm1 = offsets_demand_spr_hbm1,
+ .reg_rrl_ddr = &spr_reg_rrl_ddr,
+ .reg_rrl_hbm[0] = &spr_reg_rrl_hbm_pch0,
+ .reg_rrl_hbm[1] = &spr_reg_rrl_hbm_pch1,
};
static struct res_config gnr_cfg = {
@@ -935,19 +1027,23 @@ static struct res_config gnr_cfg = {
.uracu_bdf = {0, 0, 1},
.ddr_mdev_bdf = {0, 5, 1},
.sad_all_offset = 0x300,
+ .reg_rrl_ddr = &gnr_reg_rrl_ddr,
};
static const struct x86_cpu_id i10nm_cpuids[] = {
- X86_MATCH_VFM_STEPPINGS(INTEL_ATOM_TREMONT_D, X86_STEPPINGS(0x0, 0x3), &i10nm_cfg0),
- X86_MATCH_VFM_STEPPINGS(INTEL_ATOM_TREMONT_D, X86_STEPPINGS(0x4, 0xf), &i10nm_cfg1),
- X86_MATCH_VFM_STEPPINGS(INTEL_ICELAKE_X, X86_STEPPINGS(0x0, 0x3), &i10nm_cfg0),
- X86_MATCH_VFM_STEPPINGS(INTEL_ICELAKE_X, X86_STEPPINGS(0x4, 0xf), &i10nm_cfg1),
- X86_MATCH_VFM_STEPPINGS(INTEL_ICELAKE_D, X86_STEPPINGS(0x0, 0xf), &i10nm_cfg1),
- X86_MATCH_VFM_STEPPINGS(INTEL_SAPPHIRERAPIDS_X, X86_STEPPINGS(0x0, 0xf), &spr_cfg),
- X86_MATCH_VFM_STEPPINGS(INTEL_EMERALDRAPIDS_X, X86_STEPPINGS(0x0, 0xf), &spr_cfg),
- X86_MATCH_VFM_STEPPINGS(INTEL_GRANITERAPIDS_X, X86_STEPPINGS(0x0, 0xf), &gnr_cfg),
- X86_MATCH_VFM_STEPPINGS(INTEL_ATOM_CRESTMONT_X, X86_STEPPINGS(0x0, 0xf), &gnr_cfg),
- X86_MATCH_VFM_STEPPINGS(INTEL_ATOM_CRESTMONT, X86_STEPPINGS(0x0, 0xf), &gnr_cfg),
+ X86_MATCH_VFM_STEPS(INTEL_ATOM_TREMONT_D, X86_STEP_MIN, 0x3, &i10nm_cfg0),
+ X86_MATCH_VFM_STEPS(INTEL_ATOM_TREMONT_D, 0x4, X86_STEP_MAX, &i10nm_cfg1),
+ X86_MATCH_VFM_STEPS(INTEL_ICELAKE_X, X86_STEP_MIN, 0x3, &i10nm_cfg0),
+ X86_MATCH_VFM_STEPS(INTEL_ICELAKE_X, 0x4, X86_STEP_MAX, &i10nm_cfg1),
+ X86_MATCH_VFM( INTEL_ICELAKE_D, &i10nm_cfg1),
+
+ X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, &spr_cfg),
+ X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, &spr_cfg),
+ X86_MATCH_VFM(INTEL_GRANITERAPIDS_X, &gnr_cfg),
+ X86_MATCH_VFM(INTEL_GRANITERAPIDS_D, &gnr_cfg),
+ X86_MATCH_VFM(INTEL_ATOM_CRESTMONT_X, &gnr_cfg),
+ X86_MATCH_VFM(INTEL_ATOM_CRESTMONT, &gnr_cfg),
+ X86_MATCH_VFM(INTEL_ATOM_DARKMONT_X, &gnr_cfg),
{}
};
MODULE_DEVICE_TABLE(x86cpu, i10nm_cpuids);
@@ -962,6 +1058,15 @@ static bool i10nm_check_ecc(struct skx_imc *imc, int chan)
return !!GET_BITFIELD(mcmtr, 2, 2);
}
+static bool i10nm_channel_disabled(struct skx_imc *imc, int chan)
+{
+ u32 mcmtr = I10NM_GET_MCMTR(imc, chan);
+
+ edac_dbg(1, "mc%d ch%d mcmtr reg %x\n", imc->mc, chan, mcmtr);
+
+ return (mcmtr == ~0 || GET_BITFIELD(mcmtr, 18, 18));
+}
+
static int i10nm_get_dimm_config(struct mem_ctl_info *mci,
struct res_config *cfg)
{
@@ -975,6 +1080,11 @@ static int i10nm_get_dimm_config(struct mem_ctl_info *mci,
if (!imc->mbase)
continue;
+ if (i10nm_channel_disabled(imc, i)) {
+ edac_dbg(1, "mc%d ch%d is disabled.\n", imc->mc, i);
+ continue;
+ }
+
ndimms = 0;
if (res_cfg->type != GNR)
@@ -1010,7 +1120,7 @@ static struct notifier_block i10nm_mce_dec = {
static int __init i10nm_init(void)
{
- u8 mc = 0, src_id = 0, node_id = 0;
+ u8 mc = 0, src_id = 0;
const struct x86_cpu_id *id;
struct res_config *cfg;
const char *owner;
@@ -1036,6 +1146,7 @@ static int __init i10nm_init(void)
return -ENODEV;
cfg = (struct res_config *)id->driver_data;
+ skx_set_res_cfg(cfg);
res_cfg = cfg;
rc = skx_get_hi_lo(0x09a2, off, &tolm, &tohm);
@@ -1069,19 +1180,14 @@ static int __init i10nm_init(void)
if (rc < 0)
goto fail;
- rc = skx_get_node_id(d, &node_id);
- if (rc < 0)
- goto fail;
-
- edac_dbg(2, "src_id = %d node_id = %d\n", src_id, node_id);
+ edac_dbg(2, "src_id = %d\n", src_id);
for (i = 0; i < imc_num; i++) {
if (!d->imc[i].mdev)
continue;
d->imc[i].mc = mc++;
d->imc[i].lmc = i;
- d->imc[i].src_id = src_id;
- d->imc[i].node_id = node_id;
+ d->imc[i].src_id = src_id;
if (d->imc[i].hbm_mc) {
d->imc[i].chan_mmio_sz = cfg->hbm_chan_mmio_sz;
d->imc[i].num_channels = cfg->hbm_chan_num;
@@ -1108,7 +1214,7 @@ static int __init i10nm_init(void)
mce_register_decode_chain(&i10nm_mce_dec);
skx_setup_debug("i10nm_test");
- if (retry_rd_err_log && res_cfg->offsets_scrub && res_cfg->offsets_demand) {
+ if (retry_rd_err_log && res_cfg->reg_rrl_ddr) {
skx_set_decode(i10nm_mc_decode, show_retry_rd_err_log);
if (retry_rd_err_log == 2)
enable_retry_rd_err_log(true);
@@ -1128,7 +1234,7 @@ static void __exit i10nm_exit(void)
{
edac_dbg(2, "\n");
- if (retry_rd_err_log && res_cfg->offsets_scrub && res_cfg->offsets_demand) {
+ if (retry_rd_err_log && res_cfg->reg_rrl_ddr) {
skx_set_decode(NULL, NULL);
if (retry_rd_err_log == 2)
enable_retry_rd_err_log(false);
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index 4b5a71f8739d..4a1bebc1ff14 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -338,11 +338,11 @@ struct i5000_pvt {
u16 mir0, mir1, mir2;
- u16 b0_mtr[NUM_MTRS]; /* Memory Technlogy Reg */
+ u16 b0_mtr[NUM_MTRS]; /* Memory Technology Reg */
u16 b0_ambpresent0; /* Branch 0, Channel 0 */
- u16 b0_ambpresent1; /* Brnach 0, Channel 1 */
+ u16 b0_ambpresent1; /* Branch 0, Channel 1 */
- u16 b1_mtr[NUM_MTRS]; /* Memory Technlogy Reg */
+ u16 b1_mtr[NUM_MTRS]; /* Memory Technology Reg */
u16 b1_ambpresent0; /* Branch 1, Channel 8 */
u16 b1_ambpresent1; /* Branch 1, Channel 1 */
@@ -1210,7 +1210,7 @@ static void i5000_get_mc_regs(struct mem_ctl_info *mci)
&pvt->b0_ambpresent1);
edac_dbg(2, "\t\tAMB-Branch 0-present1 0x%x:\n", pvt->b0_ambpresent1);
- /* Only if we have 2 branchs (4 channels) */
+ /* Only if we have 2 branches (4 channels) */
if (pvt->maxch < CHANNELS_PER_BRANCH) {
pvt->b1_ambpresent0 = 0;
pvt->b1_ambpresent1 = 0;
diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
index 49b4499269fb..b5cf25905b05 100644
--- a/drivers/edac/i5400_edac.c
+++ b/drivers/edac/i5400_edac.c
@@ -31,6 +31,7 @@
#include <linux/slab.h>
#include <linux/edac.h>
#include <linux/mmzone.h>
+#include <linux/string_choices.h>
#include "edac_module.h"
@@ -899,7 +900,7 @@ static void decode_mtr(int slot_row, u16 mtr)
edac_dbg(2, "\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr));
edac_dbg(2, "\t\tELECTRICAL THROTTLING is %s\n",
- MTR_DIMMS_ETHROTTLE(mtr) ? "enabled" : "disabled");
+ str_enabled_disabled(MTR_DIMMS_ETHROTTLE(mtr)));
edac_dbg(2, "\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr));
edac_dbg(2, "\t\tNUMRANK: %s\n",
diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
index 61adaa872ba7..69068f8d0cad 100644
--- a/drivers/edac/i7300_edac.c
+++ b/drivers/edac/i7300_edac.c
@@ -23,6 +23,7 @@
#include <linux/slab.h>
#include <linux/edac.h>
#include <linux/mmzone.h>
+#include <linux/string_choices.h>
#include "edac_module.h"
@@ -620,7 +621,7 @@ static int decode_mtr(struct i7300_pvt *pvt,
edac_dbg(2, "\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr));
edac_dbg(2, "\t\tELECTRICAL THROTTLING is %s\n",
- MTR_DIMMS_ETHROTTLE(mtr) ? "enabled" : "disabled");
+ str_enabled_disabled(MTR_DIMMS_ETHROTTLE(mtr)));
edac_dbg(2, "\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr));
edac_dbg(2, "\t\tNUMRANK: %s\n",
@@ -871,9 +872,9 @@ static int i7300_get_mc_regs(struct mem_ctl_info *mci)
IS_MIRRORED(pvt->mc_settings) ? "" : "non-");
edac_dbg(0, "Error detection is %s\n",
- IS_ECC_ENABLED(pvt->mc_settings) ? "enabled" : "disabled");
+ str_enabled_disabled(IS_ECC_ENABLED(pvt->mc_settings)));
edac_dbg(0, "Retry is %s\n",
- IS_RETRY_ENABLED(pvt->mc_settings) ? "enabled" : "disabled");
+ str_enabled_disabled(IS_RETRY_ENABLED(pvt->mc_settings)));
/* Get Memory Interleave Range registers */
pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR0,
diff --git a/drivers/edac/ie31200_edac.c b/drivers/edac/ie31200_edac.c
index 9ef13570f2e5..5a080ab65476 100644
--- a/drivers/edac/ie31200_edac.c
+++ b/drivers/edac/ie31200_edac.c
@@ -19,7 +19,8 @@
* 0c04: Xeon E3-1200 v3/4th Gen Core Processor DRAM Controller
* 0c08: Xeon E3-1200 v3 Processor DRAM Controller
* 1918: Xeon E3-1200 v5 Skylake Host Bridge/DRAM Registers
- * 5918: Xeon E3-1200 Xeon E3-1200 v6/7th Gen Core Processor Host Bridge/DRAM Registers
+ * 590f: Xeon E3-1200 v6/7th Gen Core Processor Host Bridge/DRAM Registers
+ * 5918: Xeon E3-1200 v6/7th Gen Core Processor Host Bridge/DRAM Registers
* 190f: 6th Gen Core Dual-Core Processor Host Bridge/DRAM Registers
* 191f: 6th Gen Core Quad-Core Processor Host Bridge/DRAM Registers
* 3e..: 8th/9th Gen Core Processor Host Bridge/DRAM Registers
@@ -50,6 +51,8 @@
#include <linux/edac.h>
#include <linux/io-64-nonatomic-lo-hi.h>
+#include <asm/mce.h>
+#include <asm/msr.h>
#include "edac_module.h"
#define EDAC_MOD_STR "ie31200_edac"
@@ -67,7 +70,8 @@
#define PCI_DEVICE_ID_INTEL_IE31200_HB_8 0x190F
#define PCI_DEVICE_ID_INTEL_IE31200_HB_9 0x1918
#define PCI_DEVICE_ID_INTEL_IE31200_HB_10 0x191F
-#define PCI_DEVICE_ID_INTEL_IE31200_HB_11 0x5918
+#define PCI_DEVICE_ID_INTEL_IE31200_HB_11 0x590f
+#define PCI_DEVICE_ID_INTEL_IE31200_HB_12 0x5918
/* Coffee Lake-S */
#define PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_MASK 0x3e00
@@ -82,43 +86,46 @@
#define PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_9 0x3ec6
#define PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_10 0x3eca
-/* Test if HB is for Skylake or later. */
-#define DEVICE_ID_SKYLAKE_OR_LATER(did) \
- (((did) == PCI_DEVICE_ID_INTEL_IE31200_HB_8) || \
- ((did) == PCI_DEVICE_ID_INTEL_IE31200_HB_9) || \
- ((did) == PCI_DEVICE_ID_INTEL_IE31200_HB_10) || \
- ((did) == PCI_DEVICE_ID_INTEL_IE31200_HB_11) || \
- (((did) & PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_MASK) == \
- PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_MASK))
-
-#define IE31200_DIMMS 4
-#define IE31200_RANKS 8
-#define IE31200_RANKS_PER_CHANNEL 4
+/* Raptor Lake-S */
+#define PCI_DEVICE_ID_INTEL_IE31200_RPL_S_1 0xa703 /* 8P+8E, e.g. i7-13700 */
+#define PCI_DEVICE_ID_INTEL_IE31200_RPL_S_2 0x4640 /* 6P+8E, e.g. i5-13500, i5-13600, i5-14500 */
+#define PCI_DEVICE_ID_INTEL_IE31200_RPL_S_3 0x4630 /* 4P+0E, e.g. i3-13100E */
+#define PCI_DEVICE_ID_INTEL_IE31200_RPL_S_4 0xa700 /* 8P+16E, e.g. i9-13900, i9-14900 */
+#define PCI_DEVICE_ID_INTEL_IE31200_RPL_S_5 0xa740 /* 8P+12E, e.g. i7-14700 */
+#define PCI_DEVICE_ID_INTEL_IE31200_RPL_S_6 0xa704 /* 6P+8E, e.g. i5-14600 */
+
+/* Raptor Lake-HX */
+#define PCI_DEVICE_ID_INTEL_IE31200_RPL_HX_1 0xa702 /* 8P+16E, e.g. i9-13950HX */
+
+/* Alder Lake-S */
+#define PCI_DEVICE_ID_INTEL_IE31200_ADL_S_1 0x4660
+#define PCI_DEVICE_ID_INTEL_IE31200_ADL_S_2 0x4668 /* 8P+4E, e.g. i7-12700K */
+#define PCI_DEVICE_ID_INTEL_IE31200_ADL_S_3 0x4648 /* 6P+4E, e.g. i5-12600K */
+
+/* Bartlett Lake-S */
+#define PCI_DEVICE_ID_INTEL_IE31200_BTL_S_1 0x4639
+#define PCI_DEVICE_ID_INTEL_IE31200_BTL_S_2 0x463c
+#define PCI_DEVICE_ID_INTEL_IE31200_BTL_S_3 0x4642
+#define PCI_DEVICE_ID_INTEL_IE31200_BTL_S_4 0x4643
+#define PCI_DEVICE_ID_INTEL_IE31200_BTL_S_5 0xa731
+#define PCI_DEVICE_ID_INTEL_IE31200_BTL_S_6 0xa732
+#define PCI_DEVICE_ID_INTEL_IE31200_BTL_S_7 0xa733
+#define PCI_DEVICE_ID_INTEL_IE31200_BTL_S_8 0xa741
+#define PCI_DEVICE_ID_INTEL_IE31200_BTL_S_9 0xa744
+#define PCI_DEVICE_ID_INTEL_IE31200_BTL_S_10 0xa745
+
+#define IE31200_RANKS_PER_CHANNEL 8
#define IE31200_DIMMS_PER_CHANNEL 2
#define IE31200_CHANNELS 2
+#define IE31200_IMC_NUM 2
/* Intel IE31200 register addresses - device 0 function 0 - DRAM Controller */
#define IE31200_MCHBAR_LOW 0x48
#define IE31200_MCHBAR_HIGH 0x4c
-#define IE31200_MCHBAR_MASK GENMASK_ULL(38, 15)
-#define IE31200_MMR_WINDOW_SIZE BIT(15)
/*
* Error Status Register (16b)
*
- * 15 reserved
- * 14 Isochronous TBWRR Run Behind FIFO Full
- * (ITCV)
- * 13 Isochronous TBWRR Run Behind FIFO Put
- * (ITSTV)
- * 12 reserved
- * 11 MCH Thermal Sensor Event
- * for SMI/SCI/SERR (GTSE)
- * 10 reserved
- * 9 LOCK to non-DRAM Memory Flag (LCKF)
- * 8 reserved
- * 7 DRAM Throttle Flag (DTF)
- * 6:2 reserved
* 1 Multi-bit DRAM ECC Error Flag (DMERR)
* 0 Single-bit DRAM ECC Error Flag (DSERR)
*/
@@ -127,68 +134,60 @@
#define IE31200_ERRSTS_CE BIT(0)
#define IE31200_ERRSTS_BITS (IE31200_ERRSTS_UE | IE31200_ERRSTS_CE)
-/*
- * Channel 0 ECC Error Log (64b)
- *
- * 63:48 Error Column Address (ERRCOL)
- * 47:32 Error Row Address (ERRROW)
- * 31:29 Error Bank Address (ERRBANK)
- * 28:27 Error Rank Address (ERRRANK)
- * 26:24 reserved
- * 23:16 Error Syndrome (ERRSYND)
- * 15: 2 reserved
- * 1 Multiple Bit Error Status (MERRSTS)
- * 0 Correctable Error Status (CERRSTS)
- */
-
-#define IE31200_C0ECCERRLOG 0x40c8
-#define IE31200_C1ECCERRLOG 0x44c8
-#define IE31200_C0ECCERRLOG_SKL 0x4048
-#define IE31200_C1ECCERRLOG_SKL 0x4448
-#define IE31200_ECCERRLOG_CE BIT(0)
-#define IE31200_ECCERRLOG_UE BIT(1)
-#define IE31200_ECCERRLOG_RANK_BITS GENMASK_ULL(28, 27)
-#define IE31200_ECCERRLOG_RANK_SHIFT 27
-#define IE31200_ECCERRLOG_SYNDROME_BITS GENMASK_ULL(23, 16)
-#define IE31200_ECCERRLOG_SYNDROME_SHIFT 16
-
-#define IE31200_ECCERRLOG_SYNDROME(log) \
- ((log & IE31200_ECCERRLOG_SYNDROME_BITS) >> \
- IE31200_ECCERRLOG_SYNDROME_SHIFT)
-
#define IE31200_CAPID0 0xe4
#define IE31200_CAPID0_PDCD BIT(4)
#define IE31200_CAPID0_DDPCD BIT(6)
#define IE31200_CAPID0_ECC BIT(1)
-#define IE31200_MAD_DIMM_0_OFFSET 0x5004
-#define IE31200_MAD_DIMM_0_OFFSET_SKL 0x500C
-#define IE31200_MAD_DIMM_SIZE GENMASK_ULL(7, 0)
-#define IE31200_MAD_DIMM_A_RANK BIT(17)
-#define IE31200_MAD_DIMM_A_RANK_SHIFT 17
-#define IE31200_MAD_DIMM_A_RANK_SKL BIT(10)
-#define IE31200_MAD_DIMM_A_RANK_SKL_SHIFT 10
-#define IE31200_MAD_DIMM_A_WIDTH BIT(19)
-#define IE31200_MAD_DIMM_A_WIDTH_SHIFT 19
-#define IE31200_MAD_DIMM_A_WIDTH_SKL GENMASK_ULL(9, 8)
-#define IE31200_MAD_DIMM_A_WIDTH_SKL_SHIFT 8
-
-/* Skylake reports 1GB increments, everything else is 256MB */
-#define IE31200_PAGES(n, skl) \
- (n << (28 + (2 * skl) - PAGE_SHIFT))
+/* Non-constant mask variant of FIELD_GET() */
+#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
static int nr_channels;
static struct pci_dev *mci_pdev;
static int ie31200_registered = 1;
+struct res_config {
+ enum mem_type mtype;
+ bool cmci;
+ int imc_num;
+ /* Host MMIO configuration register */
+ u64 reg_mchbar_mask;
+ u64 reg_mchbar_window_size;
+ /* ECC error log register */
+ u64 reg_eccerrlog_offset[IE31200_CHANNELS];
+ u64 reg_eccerrlog_ce_mask;
+ u64 reg_eccerrlog_ce_ovfl_mask;
+ u64 reg_eccerrlog_ue_mask;
+ u64 reg_eccerrlog_ue_ovfl_mask;
+ u64 reg_eccerrlog_rank_mask;
+ u64 reg_eccerrlog_syndrome_mask;
+ /* MSR to clear ECC error log register */
+ u32 msr_clear_eccerrlog_offset;
+ /* DIMM characteristics register */
+ u64 reg_mad_dimm_size_granularity;
+ u64 reg_mad_dimm_offset[IE31200_CHANNELS];
+ u32 reg_mad_dimm_size_mask[IE31200_DIMMS_PER_CHANNEL];
+ u32 reg_mad_dimm_rank_mask[IE31200_DIMMS_PER_CHANNEL];
+ u32 reg_mad_dimm_width_mask[IE31200_DIMMS_PER_CHANNEL];
+};
+
struct ie31200_priv {
void __iomem *window;
void __iomem *c0errlog;
void __iomem *c1errlog;
+ struct res_config *cfg;
+ struct mem_ctl_info *mci;
+ struct pci_dev *pdev;
+ struct device dev;
};
+static struct ie31200_pvt {
+ struct ie31200_priv *priv[IE31200_IMC_NUM];
+} ie31200_pvt;
+
enum ie31200_chips {
IE31200 = 0,
+ IE31200_1 = 1,
};
struct ie31200_dev_info {
@@ -199,18 +198,22 @@ struct ie31200_error_info {
u16 errsts;
u16 errsts2;
u64 eccerrlog[IE31200_CHANNELS];
+ u64 erraddr;
};
static const struct ie31200_dev_info ie31200_devs[] = {
[IE31200] = {
.ctl_name = "IE31200"
},
+ [IE31200_1] = {
+ .ctl_name = "IE31200_1"
+ },
};
struct dimm_data {
- u8 size; /* in multiples of 256MB, except Skylake is 1GB */
- u8 dual_rank : 1,
- x16_width : 2; /* 0 means x8 width */
+ u64 size; /* in bytes */
+ u8 ranks;
+ enum dev_type dtype;
};
static int how_many_channels(struct pci_dev *pdev)
@@ -248,29 +251,54 @@ static bool ecc_capable(struct pci_dev *pdev)
return true;
}
-static int eccerrlog_row(u64 log)
-{
- return ((log & IE31200_ECCERRLOG_RANK_BITS) >>
- IE31200_ECCERRLOG_RANK_SHIFT);
-}
+#define mci_to_pci_dev(mci) (((struct ie31200_priv *)(mci)->pvt_info)->pdev)
static void ie31200_clear_error_info(struct mem_ctl_info *mci)
{
+ struct ie31200_priv *priv = mci->pvt_info;
+ struct res_config *cfg = priv->cfg;
+
+ /*
+ * The PCI ERRSTS register is deprecated. Write the MSR to clear
+ * the ECC error log registers in all memory controllers.
+ */
+ if (cfg->msr_clear_eccerrlog_offset) {
+ if (wrmsr_safe(cfg->msr_clear_eccerrlog_offset,
+ cfg->reg_eccerrlog_ce_mask |
+ cfg->reg_eccerrlog_ce_ovfl_mask |
+ cfg->reg_eccerrlog_ue_mask |
+ cfg->reg_eccerrlog_ue_ovfl_mask, 0) < 0)
+ ie31200_printk(KERN_ERR, "Failed to wrmsr.\n");
+
+ return;
+ }
+
/*
* Clear any error bits.
* (Yes, we really clear bits by writing 1 to them.)
*/
- pci_write_bits16(to_pci_dev(mci->pdev), IE31200_ERRSTS,
+ pci_write_bits16(mci_to_pci_dev(mci), IE31200_ERRSTS,
IE31200_ERRSTS_BITS, IE31200_ERRSTS_BITS);
}
static void ie31200_get_and_clear_error_info(struct mem_ctl_info *mci,
struct ie31200_error_info *info)
{
- struct pci_dev *pdev;
+ struct pci_dev *pdev = mci_to_pci_dev(mci);
struct ie31200_priv *priv = mci->pvt_info;
- pdev = to_pci_dev(mci->pdev);
+ /*
+ * The PCI ERRSTS register is deprecated, directly read the
+ * MMIO-mapped ECC error log registers.
+ */
+ if (priv->cfg->msr_clear_eccerrlog_offset) {
+ info->eccerrlog[0] = lo_hi_readq(priv->c0errlog);
+ if (nr_channels == 2)
+ info->eccerrlog[1] = lo_hi_readq(priv->c1errlog);
+
+ ie31200_clear_error_info(mci);
+ return;
+ }
/*
* This is a mess because there is no atomic way to read all the
@@ -306,46 +334,56 @@ static void ie31200_get_and_clear_error_info(struct mem_ctl_info *mci,
static void ie31200_process_error_info(struct mem_ctl_info *mci,
struct ie31200_error_info *info)
{
+ struct ie31200_priv *priv = mci->pvt_info;
+ struct res_config *cfg = priv->cfg;
int channel;
u64 log;
- if (!(info->errsts & IE31200_ERRSTS_BITS))
- return;
+ if (!cfg->msr_clear_eccerrlog_offset) {
+ if (!(info->errsts & IE31200_ERRSTS_BITS))
+ return;
- if ((info->errsts ^ info->errsts2) & IE31200_ERRSTS_BITS) {
- edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
- -1, -1, -1, "UE overwrote CE", "");
- info->errsts = info->errsts2;
+ if ((info->errsts ^ info->errsts2) & IE31200_ERRSTS_BITS) {
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
+ -1, -1, -1, "UE overwrote CE", "");
+ info->errsts = info->errsts2;
+ }
}
for (channel = 0; channel < nr_channels; channel++) {
log = info->eccerrlog[channel];
- if (log & IE31200_ECCERRLOG_UE) {
+ if (log & cfg->reg_eccerrlog_ue_mask) {
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
- 0, 0, 0,
- eccerrlog_row(log),
+ info->erraddr >> PAGE_SHIFT, 0, 0,
+ field_get(cfg->reg_eccerrlog_rank_mask, log),
channel, -1,
"ie31200 UE", "");
- } else if (log & IE31200_ECCERRLOG_CE) {
+ } else if (log & cfg->reg_eccerrlog_ce_mask) {
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
- 0, 0,
- IE31200_ECCERRLOG_SYNDROME(log),
- eccerrlog_row(log),
+ info->erraddr >> PAGE_SHIFT, 0,
+ field_get(cfg->reg_eccerrlog_syndrome_mask, log),
+ field_get(cfg->reg_eccerrlog_rank_mask, log),
channel, -1,
"ie31200 CE", "");
}
}
}
-static void ie31200_check(struct mem_ctl_info *mci)
+static void __ie31200_check(struct mem_ctl_info *mci, struct mce *mce)
{
struct ie31200_error_info info;
+ info.erraddr = mce ? mce->addr : 0;
ie31200_get_and_clear_error_info(mci, &info);
ie31200_process_error_info(mci, &info);
}
-static void __iomem *ie31200_map_mchbar(struct pci_dev *pdev)
+static void ie31200_check(struct mem_ctl_info *mci)
+{
+ __ie31200_check(mci, NULL);
+}
+
+static void __iomem *ie31200_map_mchbar(struct pci_dev *pdev, struct res_config *cfg, int mc)
{
union {
u64 mchbar;
@@ -358,7 +396,8 @@ static void __iomem *ie31200_map_mchbar(struct pci_dev *pdev)
pci_read_config_dword(pdev, IE31200_MCHBAR_LOW, &u.mchbar_low);
pci_read_config_dword(pdev, IE31200_MCHBAR_HIGH, &u.mchbar_high);
- u.mchbar &= IE31200_MCHBAR_MASK;
+ u.mchbar &= cfg->reg_mchbar_mask;
+ u.mchbar += cfg->reg_mchbar_window_size * mc;
if (u.mchbar != (resource_size_t)u.mchbar) {
ie31200_printk(KERN_ERR, "mmio space beyond accessible range (0x%llx)\n",
@@ -366,7 +405,7 @@ static void __iomem *ie31200_map_mchbar(struct pci_dev *pdev)
return NULL;
}
- window = ioremap(u.mchbar, IE31200_MMR_WINDOW_SIZE);
+ window = ioremap(u.mchbar, cfg->reg_mchbar_window_size);
if (!window)
ie31200_printk(KERN_ERR, "Cannot map mmio space at 0x%llx\n",
(unsigned long long)u.mchbar);
@@ -374,155 +413,108 @@ static void __iomem *ie31200_map_mchbar(struct pci_dev *pdev)
return window;
}
-static void __skl_populate_dimm_info(struct dimm_data *dd, u32 addr_decode,
- int chan)
+static void populate_dimm_info(struct dimm_data *dd, u32 addr_decode, int dimm,
+ struct res_config *cfg)
{
- dd->size = (addr_decode >> (chan << 4)) & IE31200_MAD_DIMM_SIZE;
- dd->dual_rank = (addr_decode & (IE31200_MAD_DIMM_A_RANK_SKL << (chan << 4))) ? 1 : 0;
- dd->x16_width = ((addr_decode & (IE31200_MAD_DIMM_A_WIDTH_SKL << (chan << 4))) >>
- (IE31200_MAD_DIMM_A_WIDTH_SKL_SHIFT + (chan << 4)));
+ dd->size = field_get(cfg->reg_mad_dimm_size_mask[dimm], addr_decode) * cfg->reg_mad_dimm_size_granularity;
+ dd->ranks = field_get(cfg->reg_mad_dimm_rank_mask[dimm], addr_decode) + 1;
+ dd->dtype = field_get(cfg->reg_mad_dimm_width_mask[dimm], addr_decode) + DEV_X8;
}
-static void __populate_dimm_info(struct dimm_data *dd, u32 addr_decode,
- int chan)
+static void ie31200_get_dimm_config(struct mem_ctl_info *mci, void __iomem *window,
+ struct res_config *cfg, int mc)
{
- dd->size = (addr_decode >> (chan << 3)) & IE31200_MAD_DIMM_SIZE;
- dd->dual_rank = (addr_decode & (IE31200_MAD_DIMM_A_RANK << chan)) ? 1 : 0;
- dd->x16_width = (addr_decode & (IE31200_MAD_DIMM_A_WIDTH << chan)) ? 1 : 0;
-}
+ struct dimm_data dimm_info;
+ struct dimm_info *dimm;
+ unsigned long nr_pages;
+ u32 addr_decode;
+ int i, j, k;
-static void populate_dimm_info(struct dimm_data *dd, u32 addr_decode, int chan,
- bool skl)
-{
- if (skl)
- __skl_populate_dimm_info(dd, addr_decode, chan);
- else
- __populate_dimm_info(dd, addr_decode, chan);
-}
+ for (i = 0; i < IE31200_CHANNELS; i++) {
+ addr_decode = readl(window + cfg->reg_mad_dimm_offset[i]);
+ edac_dbg(0, "addr_decode: 0x%x\n", addr_decode);
+ for (j = 0; j < IE31200_DIMMS_PER_CHANNEL; j++) {
+ populate_dimm_info(&dimm_info, addr_decode, j, cfg);
+ edac_dbg(0, "mc: %d, channel: %d, dimm: %d, size: %lld MiB, ranks: %d, DRAM chip type: %d\n",
+ mc, i, j, dimm_info.size >> 20,
+ dimm_info.ranks,
+ dimm_info.dtype);
-static int ie31200_probe1(struct pci_dev *pdev, int dev_idx)
+ nr_pages = MiB_TO_PAGES(dimm_info.size >> 20);
+ if (nr_pages == 0)
+ continue;
+
+ nr_pages = nr_pages / dimm_info.ranks;
+ for (k = 0; k < dimm_info.ranks; k++) {
+ dimm = edac_get_dimm(mci, (j * dimm_info.ranks) + k, i, 0);
+ dimm->nr_pages = nr_pages;
+ edac_dbg(0, "set nr pages: 0x%lx\n", nr_pages);
+ dimm->grain = 8; /* just a guess */
+ dimm->mtype = cfg->mtype;
+ dimm->dtype = dimm_info.dtype;
+ dimm->edac_mode = EDAC_UNKNOWN;
+ }
+ }
+ }
+}
+
+static int ie31200_register_mci(struct pci_dev *pdev, struct res_config *cfg, int mc)
{
- int i, j, ret;
- struct mem_ctl_info *mci = NULL;
struct edac_mc_layer layers[2];
- struct dimm_data dimm_info[IE31200_CHANNELS][IE31200_DIMMS_PER_CHANNEL];
- void __iomem *window;
struct ie31200_priv *priv;
- u32 addr_decode, mad_offset;
-
- /*
- * Kaby Lake, Coffee Lake seem to work like Skylake. Please re-visit
- * this logic when adding new CPU support.
- */
- bool skl = DEVICE_ID_SKYLAKE_OR_LATER(pdev->device);
-
- edac_dbg(0, "MC:\n");
-
- if (!ecc_capable(pdev)) {
- ie31200_printk(KERN_INFO, "No ECC support\n");
- return -ENODEV;
- }
+ struct mem_ctl_info *mci;
+ void __iomem *window;
+ int ret;
nr_channels = how_many_channels(pdev);
layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
- layers[0].size = IE31200_DIMMS;
+ layers[0].size = IE31200_RANKS_PER_CHANNEL;
layers[0].is_virt_csrow = true;
layers[1].type = EDAC_MC_LAYER_CHANNEL;
layers[1].size = nr_channels;
layers[1].is_virt_csrow = false;
- mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
+ mci = edac_mc_alloc(mc, ARRAY_SIZE(layers), layers,
sizeof(struct ie31200_priv));
if (!mci)
return -ENOMEM;
- window = ie31200_map_mchbar(pdev);
+ window = ie31200_map_mchbar(pdev, cfg, mc);
if (!window) {
ret = -ENODEV;
goto fail_free;
}
edac_dbg(3, "MC: init mci\n");
- mci->pdev = &pdev->dev;
- if (skl)
- mci->mtype_cap = MEM_FLAG_DDR4;
- else
- mci->mtype_cap = MEM_FLAG_DDR3;
+ mci->mtype_cap = BIT(cfg->mtype);
mci->edac_ctl_cap = EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_SECDED;
mci->mod_name = EDAC_MOD_STR;
- mci->ctl_name = ie31200_devs[dev_idx].ctl_name;
+ mci->ctl_name = ie31200_devs[mc].ctl_name;
mci->dev_name = pci_name(pdev);
- mci->edac_check = ie31200_check;
+ mci->edac_check = cfg->cmci ? NULL : ie31200_check;
mci->ctl_page_to_phys = NULL;
priv = mci->pvt_info;
priv->window = window;
- if (skl) {
- priv->c0errlog = window + IE31200_C0ECCERRLOG_SKL;
- priv->c1errlog = window + IE31200_C1ECCERRLOG_SKL;
- mad_offset = IE31200_MAD_DIMM_0_OFFSET_SKL;
- } else {
- priv->c0errlog = window + IE31200_C0ECCERRLOG;
- priv->c1errlog = window + IE31200_C1ECCERRLOG;
- mad_offset = IE31200_MAD_DIMM_0_OFFSET;
- }
-
- /* populate DIMM info */
- for (i = 0; i < IE31200_CHANNELS; i++) {
- addr_decode = readl(window + mad_offset +
- (i * 4));
- edac_dbg(0, "addr_decode: 0x%x\n", addr_decode);
- for (j = 0; j < IE31200_DIMMS_PER_CHANNEL; j++) {
- populate_dimm_info(&dimm_info[i][j], addr_decode, j,
- skl);
- edac_dbg(0, "size: 0x%x, rank: %d, width: %d\n",
- dimm_info[i][j].size,
- dimm_info[i][j].dual_rank,
- dimm_info[i][j].x16_width);
- }
- }
-
+ priv->c0errlog = window + cfg->reg_eccerrlog_offset[0];
+ priv->c1errlog = window + cfg->reg_eccerrlog_offset[1];
+ priv->cfg = cfg;
+ priv->mci = mci;
+ priv->pdev = pdev;
+ device_initialize(&priv->dev);
/*
- * The dram rank boundary (DRB) reg values are boundary addresses
- * for each DRAM rank with a granularity of 64MB. DRB regs are
- * cumulative; the last one will contain the total memory
- * contained in all ranks.
+ * The EDAC core uses mci->pdev (pointer to the structure device)
+ * as the memory controller ID. The SoCs attach one or more memory
+ * controllers to a single pci_dev (a single pci_dev->dev can
+ * correspond to multiple memory controllers).
+ *
+ * To make mci->pdev unique, assign pci_dev->dev to mci->pdev
+ * for the first memory controller and assign a unique priv->dev
+ * to mci->pdev for each additional memory controller.
*/
- for (i = 0; i < IE31200_DIMMS_PER_CHANNEL; i++) {
- for (j = 0; j < IE31200_CHANNELS; j++) {
- struct dimm_info *dimm;
- unsigned long nr_pages;
-
- nr_pages = IE31200_PAGES(dimm_info[j][i].size, skl);
- if (nr_pages == 0)
- continue;
-
- if (dimm_info[j][i].dual_rank) {
- nr_pages = nr_pages / 2;
- dimm = edac_get_dimm(mci, (i * 2) + 1, j, 0);
- dimm->nr_pages = nr_pages;
- edac_dbg(0, "set nr pages: 0x%lx\n", nr_pages);
- dimm->grain = 8; /* just a guess */
- if (skl)
- dimm->mtype = MEM_DDR4;
- else
- dimm->mtype = MEM_DDR3;
- dimm->dtype = DEV_UNKNOWN;
- dimm->edac_mode = EDAC_UNKNOWN;
- }
- dimm = edac_get_dimm(mci, i * 2, j, 0);
- dimm->nr_pages = nr_pages;
- edac_dbg(0, "set nr pages: 0x%lx\n", nr_pages);
- dimm->grain = 8; /* same guess */
- if (skl)
- dimm->mtype = MEM_DDR4;
- else
- dimm->mtype = MEM_DDR3;
- dimm->dtype = DEV_UNKNOWN;
- dimm->edac_mode = EDAC_UNKNOWN;
- }
- }
+ mci->pdev = mc ? &priv->dev : &pdev->dev;
+ ie31200_get_dimm_config(mci, window, cfg, mc);
ie31200_clear_error_info(mci);
if (edac_mc_add_mc(mci)) {
@@ -531,16 +523,115 @@ static int ie31200_probe1(struct pci_dev *pdev, int dev_idx)
goto fail_unmap;
}
- /* get this far and it's successful */
- edac_dbg(3, "MC: success\n");
+ ie31200_pvt.priv[mc] = priv;
return 0;
-
fail_unmap:
iounmap(window);
-
fail_free:
edac_mc_free(mci);
+ return ret;
+}
+
+static void mce_check(struct mce *mce)
+{
+ struct ie31200_priv *priv;
+ int i;
+
+ for (i = 0; i < IE31200_IMC_NUM; i++) {
+ priv = ie31200_pvt.priv[i];
+ if (!priv)
+ continue;
+ __ie31200_check(priv->mci, mce);
+ }
+}
+
+static int mce_handler(struct notifier_block *nb, unsigned long val, void *data)
+{
+ struct mce *mce = (struct mce *)data;
+ char *type;
+
+ if (mce->kflags & MCE_HANDLED_CEC)
+ return NOTIFY_DONE;
+
+ /*
+ * Ignore unless this is a memory related error.
+ * Don't check MCI_STATUS_ADDRV since it's not set on some CPUs.
+ */
+ if ((mce->status & 0xefff) >> 7 != 1)
+ return NOTIFY_DONE;
+
+ type = mce->mcgstatus & MCG_STATUS_MCIP ? "Exception" : "Event";
+
+ edac_dbg(0, "CPU %d: Machine Check %s: 0x%llx Bank %d: 0x%llx\n",
+ mce->extcpu, type, mce->mcgstatus,
+ mce->bank, mce->status);
+ edac_dbg(0, "TSC 0x%llx\n", mce->tsc);
+ edac_dbg(0, "ADDR 0x%llx\n", mce->addr);
+ edac_dbg(0, "MISC 0x%llx\n", mce->misc);
+ edac_dbg(0, "PROCESSOR %u:0x%x TIME %llu SOCKET %u APIC 0x%x\n",
+ mce->cpuvendor, mce->cpuid, mce->time,
+ mce->socketid, mce->apicid);
+
+ mce_check(mce);
+ mce->kflags |= MCE_HANDLED_EDAC;
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block ie31200_mce_dec = {
+ .notifier_call = mce_handler,
+ .priority = MCE_PRIO_EDAC,
+};
+
+static void ie31200_unregister_mcis(void)
+{
+ struct ie31200_priv *priv;
+ struct mem_ctl_info *mci;
+ int i;
+
+ for (i = 0; i < IE31200_IMC_NUM; i++) {
+ priv = ie31200_pvt.priv[i];
+ if (!priv)
+ continue;
+
+ mci = priv->mci;
+ edac_mc_del_mc(mci->pdev);
+ iounmap(priv->window);
+ edac_mc_free(mci);
+ }
+}
+
+static int ie31200_probe1(struct pci_dev *pdev, struct res_config *cfg)
+{
+ int i, ret;
+
+ edac_dbg(0, "MC:\n");
+
+ if (!ecc_capable(pdev)) {
+ ie31200_printk(KERN_INFO, "No ECC support\n");
+ return -ENODEV;
+ }
+
+ for (i = 0; i < cfg->imc_num; i++) {
+ ret = ie31200_register_mci(pdev, cfg, i);
+ if (ret)
+ goto fail_register;
+ }
+
+ if (cfg->cmci) {
+ mce_register_decode_chain(&ie31200_mce_dec);
+ edac_op_state = EDAC_OPSTATE_INT;
+ } else {
+ edac_op_state = EDAC_OPSTATE_POLL;
+ }
+
+ /* get this far and it's successful. */
+ edac_dbg(3, "MC: success\n");
+ return 0;
+
+fail_register:
+ ie31200_unregister_mcis();
return ret;
}
@@ -552,7 +643,7 @@ static int ie31200_init_one(struct pci_dev *pdev,
edac_dbg(0, "MC:\n");
if (pci_enable_device(pdev) < 0)
return -EIO;
- rc = ie31200_probe1(pdev, ent->driver_data);
+ rc = ie31200_probe1(pdev, (struct res_config *)ent->driver_data);
if (rc == 0 && !mci_pdev)
mci_pdev = pci_dev_get(pdev);
@@ -561,42 +652,129 @@ static int ie31200_init_one(struct pci_dev *pdev,
static void ie31200_remove_one(struct pci_dev *pdev)
{
- struct mem_ctl_info *mci;
- struct ie31200_priv *priv;
+ struct ie31200_priv *priv = ie31200_pvt.priv[0];
edac_dbg(0, "\n");
pci_dev_put(mci_pdev);
mci_pdev = NULL;
- mci = edac_mc_del_mc(&pdev->dev);
- if (!mci)
- return;
- priv = mci->pvt_info;
- iounmap(priv->window);
- edac_mc_free(mci);
+ if (priv->cfg->cmci)
+ mce_unregister_decode_chain(&ie31200_mce_dec);
+ ie31200_unregister_mcis();
}
+static struct res_config snb_cfg = {
+ .mtype = MEM_DDR3,
+ .imc_num = 1,
+ .reg_mchbar_mask = GENMASK_ULL(38, 15),
+ .reg_mchbar_window_size = BIT_ULL(15),
+ .reg_eccerrlog_offset[0] = 0x40c8,
+ .reg_eccerrlog_offset[1] = 0x44c8,
+ .reg_eccerrlog_ce_mask = BIT_ULL(0),
+ .reg_eccerrlog_ue_mask = BIT_ULL(1),
+ .reg_eccerrlog_rank_mask = GENMASK_ULL(28, 27),
+ .reg_eccerrlog_syndrome_mask = GENMASK_ULL(23, 16),
+ .reg_mad_dimm_size_granularity = BIT_ULL(28),
+ .reg_mad_dimm_offset[0] = 0x5004,
+ .reg_mad_dimm_offset[1] = 0x5008,
+ .reg_mad_dimm_size_mask[0] = GENMASK(7, 0),
+ .reg_mad_dimm_size_mask[1] = GENMASK(15, 8),
+ .reg_mad_dimm_rank_mask[0] = BIT(17),
+ .reg_mad_dimm_rank_mask[1] = BIT(18),
+ .reg_mad_dimm_width_mask[0] = BIT(19),
+ .reg_mad_dimm_width_mask[1] = BIT(20),
+};
+
+static struct res_config skl_cfg = {
+ .mtype = MEM_DDR4,
+ .imc_num = 1,
+ .reg_mchbar_mask = GENMASK_ULL(38, 15),
+ .reg_mchbar_window_size = BIT_ULL(15),
+ .reg_eccerrlog_offset[0] = 0x4048,
+ .reg_eccerrlog_offset[1] = 0x4448,
+ .reg_eccerrlog_ce_mask = BIT_ULL(0),
+ .reg_eccerrlog_ue_mask = BIT_ULL(1),
+ .reg_eccerrlog_rank_mask = GENMASK_ULL(28, 27),
+ .reg_eccerrlog_syndrome_mask = GENMASK_ULL(23, 16),
+ .reg_mad_dimm_size_granularity = BIT_ULL(30),
+ .reg_mad_dimm_offset[0] = 0x500c,
+ .reg_mad_dimm_offset[1] = 0x5010,
+ .reg_mad_dimm_size_mask[0] = GENMASK(5, 0),
+ .reg_mad_dimm_size_mask[1] = GENMASK(21, 16),
+ .reg_mad_dimm_rank_mask[0] = BIT(10),
+ .reg_mad_dimm_rank_mask[1] = BIT(26),
+ .reg_mad_dimm_width_mask[0] = GENMASK(9, 8),
+ .reg_mad_dimm_width_mask[1] = GENMASK(25, 24),
+};
+
+struct res_config rpl_s_cfg = {
+ .mtype = MEM_DDR5,
+ .cmci = true,
+ .imc_num = 2,
+ .reg_mchbar_mask = GENMASK_ULL(41, 17),
+ .reg_mchbar_window_size = BIT_ULL(16),
+ .reg_eccerrlog_offset[0] = 0xe048,
+ .reg_eccerrlog_offset[1] = 0xe848,
+ .reg_eccerrlog_ce_mask = BIT_ULL(0),
+ .reg_eccerrlog_ce_ovfl_mask = BIT_ULL(1),
+ .reg_eccerrlog_ue_mask = BIT_ULL(2),
+ .reg_eccerrlog_ue_ovfl_mask = BIT_ULL(3),
+ .reg_eccerrlog_rank_mask = GENMASK_ULL(28, 27),
+ .reg_eccerrlog_syndrome_mask = GENMASK_ULL(23, 16),
+ .msr_clear_eccerrlog_offset = 0x791,
+ .reg_mad_dimm_offset[0] = 0xd80c,
+ .reg_mad_dimm_offset[1] = 0xd810,
+ .reg_mad_dimm_size_granularity = BIT_ULL(29),
+ .reg_mad_dimm_size_mask[0] = GENMASK(6, 0),
+ .reg_mad_dimm_size_mask[1] = GENMASK(22, 16),
+ .reg_mad_dimm_rank_mask[0] = GENMASK(10, 9),
+ .reg_mad_dimm_rank_mask[1] = GENMASK(27, 26),
+ .reg_mad_dimm_width_mask[0] = GENMASK(8, 7),
+ .reg_mad_dimm_width_mask[1] = GENMASK(25, 24),
+};
+
static const struct pci_device_id ie31200_pci_tbl[] = {
- { PCI_VEND_DEV(INTEL, IE31200_HB_1), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_2), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_3), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_4), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_5), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_6), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_7), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_8), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_9), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_10), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_11), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_CFL_1), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_CFL_2), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_CFL_3), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_CFL_4), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_CFL_5), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_CFL_6), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_CFL_7), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_CFL_8), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_CFL_9), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_CFL_10), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_1), (kernel_ulong_t)&snb_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_2), (kernel_ulong_t)&snb_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_3), (kernel_ulong_t)&snb_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_4), (kernel_ulong_t)&snb_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_5), (kernel_ulong_t)&snb_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_6), (kernel_ulong_t)&snb_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_7), (kernel_ulong_t)&snb_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_8), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_9), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_10), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_11), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_12), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_1), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_2), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_3), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_4), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_5), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_6), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_7), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_8), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_9), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_10), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_RPL_S_1), (kernel_ulong_t)&rpl_s_cfg},
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_RPL_S_2), (kernel_ulong_t)&rpl_s_cfg},
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_RPL_S_3), (kernel_ulong_t)&rpl_s_cfg},
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_RPL_S_4), (kernel_ulong_t)&rpl_s_cfg},
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_RPL_S_5), (kernel_ulong_t)&rpl_s_cfg},
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_RPL_S_6), (kernel_ulong_t)&rpl_s_cfg},
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_RPL_HX_1), (kernel_ulong_t)&rpl_s_cfg},
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_ADL_S_1), (kernel_ulong_t)&rpl_s_cfg},
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_ADL_S_2), (kernel_ulong_t)&rpl_s_cfg},
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_ADL_S_3), (kernel_ulong_t)&rpl_s_cfg},
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_BTL_S_1), (kernel_ulong_t)&rpl_s_cfg},
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_BTL_S_2), (kernel_ulong_t)&rpl_s_cfg},
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_BTL_S_3), (kernel_ulong_t)&rpl_s_cfg},
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_BTL_S_4), (kernel_ulong_t)&rpl_s_cfg},
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_BTL_S_5), (kernel_ulong_t)&rpl_s_cfg},
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_BTL_S_6), (kernel_ulong_t)&rpl_s_cfg},
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_BTL_S_7), (kernel_ulong_t)&rpl_s_cfg},
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_BTL_S_8), (kernel_ulong_t)&rpl_s_cfg},
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_BTL_S_9), (kernel_ulong_t)&rpl_s_cfg},
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_BTL_S_10), (kernel_ulong_t)&rpl_s_cfg},
{ 0, } /* 0 terminated list. */
};
MODULE_DEVICE_TABLE(pci, ie31200_pci_tbl);
@@ -613,12 +791,10 @@ static int __init ie31200_init(void)
int pci_rc, i;
edac_dbg(3, "MC:\n");
- /* Ensure that the OPSTATE is set correctly for POLL or NMI */
- opstate_init();
pci_rc = pci_register_driver(&ie31200_driver);
if (pci_rc < 0)
- goto fail0;
+ return pci_rc;
if (!mci_pdev) {
ie31200_registered = 0;
@@ -629,11 +805,13 @@ static int __init ie31200_init(void)
if (mci_pdev)
break;
}
+
if (!mci_pdev) {
edac_dbg(0, "ie31200 pci_get_device fail\n");
pci_rc = -ENODEV;
- goto fail1;
+ goto fail0;
}
+
pci_rc = ie31200_init_one(mci_pdev, &ie31200_pci_tbl[i]);
if (pci_rc < 0) {
edac_dbg(0, "ie31200 init fail\n");
@@ -641,12 +819,12 @@ static int __init ie31200_init(void)
goto fail1;
}
}
- return 0;
+ return 0;
fail1:
- pci_unregister_driver(&ie31200_driver);
-fail0:
pci_dev_put(mci_pdev);
+fail0:
+ pci_unregister_driver(&ie31200_driver);
return pci_rc;
}
diff --git a/drivers/edac/igen6_edac.c b/drivers/edac/igen6_edac.c
index 189a2fc29e74..2fc59f9eed69 100644
--- a/drivers/edac/igen6_edac.c
+++ b/drivers/edac/igen6_edac.c
@@ -127,6 +127,7 @@
static struct res_config {
bool machine_check;
+ /* The number of present memory controllers. */
int num_imc;
u32 imc_base;
u32 cmf_base;
@@ -240,6 +241,12 @@ static struct work_struct ecclog_work;
#define DID_ADL_N_SKU11 0x467c
#define DID_ADL_N_SKU12 0x4632
+/* Compute die IDs for Arizona Beach with IBECC */
+#define DID_AZB_SKU1 0x4676
+
+/* Compute did IDs for Amston Lake with IBECC */
+#define DID_ASL_SKU1 0x464a
+
/* Compute die IDs for Raptor Lake-P with IBECC */
#define DID_RPL_P_SKU1 0xa706
#define DID_RPL_P_SKU2 0xa707
@@ -263,6 +270,14 @@ static struct work_struct ecclog_work;
#define DID_ARL_UH_SKU2 0x7d20
#define DID_ARL_UH_SKU3 0x7d30
+/* Compute die IDs for Panther Lake-H with IBECC */
+#define DID_PTL_H_SKU1 0xb000
+#define DID_PTL_H_SKU2 0xb001
+#define DID_PTL_H_SKU3 0xb002
+
+/* Compute die IDs for Wildcat Lake with IBECC */
+#define DID_WCL_SKU1 0xfd00
+
static int get_mchbar(struct pci_dev *pdev, u64 *mchbar)
{
union {
@@ -557,7 +572,18 @@ static struct res_config mtl_p_cfg = {
.err_addr_to_imc_addr = adl_err_addr_to_imc_addr,
};
-static const struct pci_device_id igen6_pci_tbl[] = {
+static struct res_config wcl_cfg = {
+ .machine_check = true,
+ .num_imc = 1,
+ .imc_base = 0xd800,
+ .ibecc_base = 0xd400,
+ .ibecc_error_log_offset = 0x170,
+ .ibecc_available = mtl_p_ibecc_available,
+ .err_addr_to_sys_addr = adl_err_addr_to_sys_addr,
+ .err_addr_to_imc_addr = adl_err_addr_to_imc_addr,
+};
+
+static struct pci_device_id igen6_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, DID_EHL_SKU5), (kernel_ulong_t)&ehl_cfg },
{ PCI_VDEVICE(INTEL, DID_EHL_SKU6), (kernel_ulong_t)&ehl_cfg },
{ PCI_VDEVICE(INTEL, DID_EHL_SKU7), (kernel_ulong_t)&ehl_cfg },
@@ -590,6 +616,8 @@ static const struct pci_device_id igen6_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, DID_ADL_N_SKU10), (kernel_ulong_t)&adl_n_cfg },
{ PCI_VDEVICE(INTEL, DID_ADL_N_SKU11), (kernel_ulong_t)&adl_n_cfg },
{ PCI_VDEVICE(INTEL, DID_ADL_N_SKU12), (kernel_ulong_t)&adl_n_cfg },
+ { PCI_VDEVICE(INTEL, DID_AZB_SKU1), (kernel_ulong_t)&adl_n_cfg },
+ { PCI_VDEVICE(INTEL, DID_ASL_SKU1), (kernel_ulong_t)&adl_n_cfg },
{ PCI_VDEVICE(INTEL, DID_RPL_P_SKU1), (kernel_ulong_t)&rpl_p_cfg },
{ PCI_VDEVICE(INTEL, DID_RPL_P_SKU2), (kernel_ulong_t)&rpl_p_cfg },
{ PCI_VDEVICE(INTEL, DID_RPL_P_SKU3), (kernel_ulong_t)&rpl_p_cfg },
@@ -605,6 +633,10 @@ static const struct pci_device_id igen6_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, DID_ARL_UH_SKU1), (kernel_ulong_t)&mtl_p_cfg },
{ PCI_VDEVICE(INTEL, DID_ARL_UH_SKU2), (kernel_ulong_t)&mtl_p_cfg },
{ PCI_VDEVICE(INTEL, DID_ARL_UH_SKU3), (kernel_ulong_t)&mtl_p_cfg },
+ { PCI_VDEVICE(INTEL, DID_PTL_H_SKU1), (kernel_ulong_t)&mtl_p_cfg },
+ { PCI_VDEVICE(INTEL, DID_PTL_H_SKU2), (kernel_ulong_t)&mtl_p_cfg },
+ { PCI_VDEVICE(INTEL, DID_PTL_H_SKU3), (kernel_ulong_t)&mtl_p_cfg },
+ { PCI_VDEVICE(INTEL, DID_WCL_SKU1), (kernel_ulong_t)&wcl_cfg },
{ },
};
MODULE_DEVICE_TABLE(pci, igen6_pci_tbl);
@@ -777,13 +809,22 @@ static u64 ecclog_read_and_clear(struct igen6_imc *imc)
{
u64 ecclog = readq(imc->window + ECC_ERROR_LOG_OFFSET);
- if (ecclog & (ECC_ERROR_LOG_CE | ECC_ERROR_LOG_UE)) {
- /* Clear CE/UE bits by writing 1s */
- writeq(ecclog, imc->window + ECC_ERROR_LOG_OFFSET);
- return ecclog;
- }
+ /*
+ * Quirk: The ECC_ERROR_LOG register of certain SoCs may contain
+ * the invalid value ~0. This will result in a flood of invalid
+ * error reports in polling mode. Skip it.
+ */
+ if (ecclog == ~0)
+ return 0;
- return 0;
+ /* Neither a CE nor a UE. Skip it.*/
+ if (!(ecclog & (ECC_ERROR_LOG_CE | ECC_ERROR_LOG_UE)))
+ return 0;
+
+ /* Clear CE/UE bits by writing 1s */
+ writeq(ecclog, imc->window + ECC_ERROR_LOG_OFFSET);
+
+ return ecclog;
}
static void errsts_clear(struct igen6_imc *imc)
@@ -1170,23 +1211,35 @@ fail:
return -ENODEV;
}
-static int igen6_register_mci(int mc, u64 mchbar, struct pci_dev *pdev)
+static void igen6_check(struct mem_ctl_info *mci)
+{
+ struct igen6_imc *imc = mci->pvt_info;
+ u64 ecclog;
+
+ /* errsts_clear() isn't NMI-safe. Delay it in the IRQ context */
+ ecclog = ecclog_read_and_clear(imc);
+ if (!ecclog)
+ return;
+
+ if (!ecclog_gen_pool_add(imc->mc, ecclog))
+ irq_work_queue(&ecclog_irq_work);
+}
+
+/* Check whether the memory controller is absent. */
+static bool igen6_imc_absent(void __iomem *window)
+{
+ return readl(window + MAD_INTER_CHANNEL_OFFSET) == ~0;
+}
+
+static int igen6_register_mci(int mc, void __iomem *window, struct pci_dev *pdev)
{
struct edac_mc_layer layers[2];
struct mem_ctl_info *mci;
struct igen6_imc *imc;
- void __iomem *window;
int rc;
edac_dbg(2, "\n");
- mchbar += mc * MCHBAR_SIZE;
- window = ioremap(mchbar, MCHBAR_SIZE);
- if (!window) {
- igen6_printk(KERN_ERR, "Failed to ioremap 0x%llx\n", mchbar);
- return -ENODEV;
- }
-
layers[0].type = EDAC_MC_LAYER_CHANNEL;
layers[0].size = NUM_CHANNELS;
layers[0].is_virt_csrow = false;
@@ -1211,6 +1264,8 @@ static int igen6_register_mci(int mc, u64 mchbar, struct pci_dev *pdev)
mci->edac_cap = EDAC_FLAG_SECDED;
mci->mod_name = EDAC_MOD_STR;
mci->dev_name = pci_name(pdev);
+ if (edac_op_state == EDAC_OPSTATE_POLL)
+ mci->edac_check = igen6_check;
mci->pvt_info = &igen6_pvt->imc[mc];
imc = mci->pvt_info;
@@ -1245,11 +1300,11 @@ static int igen6_register_mci(int mc, u64 mchbar, struct pci_dev *pdev)
imc->mci = mci;
return 0;
fail3:
+ mci->pvt_info = NULL;
kfree(mci->ctl_name);
fail2:
edac_mc_free(mci);
fail:
- iounmap(window);
return rc;
}
@@ -1269,11 +1324,64 @@ static void igen6_unregister_mcis(void)
edac_mc_del_mc(mci->pdev);
kfree(mci->ctl_name);
+ mci->pvt_info = NULL;
edac_mc_free(mci);
iounmap(imc->window);
}
}
+static int igen6_register_mcis(struct pci_dev *pdev, u64 mchbar)
+{
+ void __iomem *window;
+ int lmc, pmc, rc;
+ u64 base;
+
+ for (lmc = 0, pmc = 0; pmc < NUM_IMC; pmc++) {
+ base = mchbar + pmc * MCHBAR_SIZE;
+ window = ioremap(base, MCHBAR_SIZE);
+ if (!window) {
+ igen6_printk(KERN_ERR, "Failed to ioremap 0x%llx for mc%d\n", base, pmc);
+ rc = -ENOMEM;
+ goto out_unregister_mcis;
+ }
+
+ if (igen6_imc_absent(window)) {
+ iounmap(window);
+ edac_dbg(2, "Skip absent mc%d\n", pmc);
+ continue;
+ }
+
+ rc = igen6_register_mci(lmc, window, pdev);
+ if (rc)
+ goto out_iounmap;
+
+ /* Done, if all present MCs are detected and registered. */
+ if (++lmc >= res_cfg->num_imc)
+ break;
+ }
+
+ if (!lmc) {
+ igen6_printk(KERN_ERR, "No mc found.\n");
+ return -ENODEV;
+ }
+
+ if (lmc < res_cfg->num_imc) {
+ igen6_printk(KERN_DEBUG, "Expected %d mcs, but only %d detected.",
+ res_cfg->num_imc, lmc);
+ res_cfg->num_imc = lmc;
+ }
+
+ return 0;
+
+out_iounmap:
+ iounmap(window);
+
+out_unregister_mcis:
+ igen6_unregister_mcis();
+
+ return rc;
+}
+
static int igen6_mem_slice_setup(u64 mchbar)
{
struct igen6_imc *imc = &igen6_pvt->imc[0];
@@ -1348,10 +1456,29 @@ static void unregister_err_handler(void)
unregister_nmi_handler(NMI_SERR, IGEN6_NMI_NAME);
}
+static void opstate_set(const struct res_config *cfg, const struct pci_device_id *ent)
+{
+ /*
+ * Quirk: Certain SoCs' error reporting interrupts don't work.
+ * Force polling mode for them to ensure that memory error
+ * events can be handled.
+ */
+ if (ent->device == DID_ADL_N_SKU4) {
+ edac_op_state = EDAC_OPSTATE_POLL;
+ return;
+ }
+
+ /* Set the mode according to the configuration data. */
+ if (cfg->machine_check)
+ edac_op_state = EDAC_OPSTATE_INT;
+ else
+ edac_op_state = EDAC_OPSTATE_NMI;
+}
+
static int igen6_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
u64 mchbar;
- int i, rc;
+ int rc;
edac_dbg(2, "\n");
@@ -1365,11 +1492,11 @@ static int igen6_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto fail;
- for (i = 0; i < res_cfg->num_imc; i++) {
- rc = igen6_register_mci(i, mchbar, pdev);
- if (rc)
- goto fail2;
- }
+ opstate_set(res_cfg, ent);
+
+ rc = igen6_register_mcis(pdev, mchbar);
+ if (rc)
+ goto fail;
if (res_cfg->num_imc > 1) {
rc = igen6_mem_slice_setup(mchbar);
@@ -1448,8 +1575,6 @@ static int __init igen6_init(void)
if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
return -EBUSY;
- edac_op_state = EDAC_OPSTATE_NMI;
-
rc = pci_register_driver(&igen6_driver);
if (rc)
return rc;
diff --git a/drivers/edac/layerscape_edac.c b/drivers/edac/layerscape_edac.c
index 0d42c1238908..a2caa7fc5412 100644
--- a/drivers/edac/layerscape_edac.c
+++ b/drivers/edac/layerscape_edac.c
@@ -21,13 +21,14 @@
static const struct of_device_id fsl_ddr_mc_err_of_match[] = {
{ .compatible = "fsl,qoriq-memory-controller", },
+ { .compatible = "nxp,imx9-memory-controller", .data = (void *)TYPE_IMX9, },
{},
};
MODULE_DEVICE_TABLE(of, fsl_ddr_mc_err_of_match);
static struct platform_driver fsl_ddr_mc_err_driver = {
.probe = fsl_mc_err_probe,
- .remove_new = fsl_mc_err_remove,
+ .remove = fsl_mc_err_remove,
.driver = {
.name = "fsl_ddr_mc_err",
.of_match_table = fsl_ddr_mc_err_of_match,
diff --git a/drivers/edac/loongson_edac.c b/drivers/edac/loongson_edac.c
new file mode 100644
index 000000000000..38745800ed01
--- /dev/null
+++ b/drivers/edac/loongson_edac.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024 Loongson Technology Corporation Limited.
+ */
+
+#include <linux/acpi.h>
+#include <linux/edac.h>
+#include <linux/init.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include "edac_module.h"
+
+#define ECC_CS_COUNT_REG 0x18
+
+struct loongson_edac_pvt {
+ void __iomem *ecc_base;
+
+ /*
+ * The ECC register in this controller records the number of errors
+ * encountered since reset and cannot be zeroed so in order to be able
+ * to report the error count at each check, this records the previous
+ * register state.
+ */
+ int last_ce_count;
+};
+
+static int read_ecc(struct mem_ctl_info *mci)
+{
+ struct loongson_edac_pvt *pvt = mci->pvt_info;
+ u64 ecc;
+ int cs;
+
+ ecc = readq(pvt->ecc_base + ECC_CS_COUNT_REG);
+ /* cs0 -- cs3 */
+ cs = ecc & 0xff;
+ cs += (ecc >> 8) & 0xff;
+ cs += (ecc >> 16) & 0xff;
+ cs += (ecc >> 24) & 0xff;
+
+ return cs;
+}
+
+static void edac_check(struct mem_ctl_info *mci)
+{
+ struct loongson_edac_pvt *pvt = mci->pvt_info;
+ int new, add;
+
+ new = read_ecc(mci);
+ add = new - pvt->last_ce_count;
+ pvt->last_ce_count = new;
+ if (add <= 0)
+ return;
+
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, add,
+ 0, 0, 0, 0, 0, -1, "error", "");
+}
+
+static void dimm_config_init(struct mem_ctl_info *mci)
+{
+ struct dimm_info *dimm;
+ u32 size, npages;
+
+ /* size not used */
+ size = -1;
+ npages = MiB_TO_PAGES(size);
+
+ dimm = edac_get_dimm(mci, 0, 0, 0);
+ dimm->nr_pages = npages;
+ snprintf(dimm->label, sizeof(dimm->label),
+ "MC#%uChannel#%u_DIMM#%u", mci->mc_idx, 0, 0);
+ dimm->grain = 8;
+}
+
+static void pvt_init(struct mem_ctl_info *mci, void __iomem *vbase)
+{
+ struct loongson_edac_pvt *pvt = mci->pvt_info;
+
+ pvt->ecc_base = vbase;
+ pvt->last_ce_count = read_ecc(mci);
+}
+
+static int edac_probe(struct platform_device *pdev)
+{
+ struct edac_mc_layer layers[2];
+ struct mem_ctl_info *mci;
+ void __iomem *vbase;
+ int ret;
+
+ vbase = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(vbase))
+ return PTR_ERR(vbase);
+
+ layers[0].type = EDAC_MC_LAYER_CHANNEL;
+ layers[0].size = 1;
+ layers[0].is_virt_csrow = false;
+ layers[1].type = EDAC_MC_LAYER_SLOT;
+ layers[1].size = 1;
+ layers[1].is_virt_csrow = true;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
+ sizeof(struct loongson_edac_pvt));
+ if (mci == NULL)
+ return -ENOMEM;
+
+ mci->mc_idx = edac_device_alloc_index();
+ mci->mtype_cap = MEM_FLAG_RDDR4;
+ mci->edac_ctl_cap = EDAC_FLAG_NONE;
+ mci->edac_cap = EDAC_FLAG_NONE;
+ mci->mod_name = "loongson_edac.c";
+ mci->ctl_name = "loongson_edac_ctl";
+ mci->dev_name = "loongson_edac_dev";
+ mci->ctl_page_to_phys = NULL;
+ mci->pdev = &pdev->dev;
+ mci->error_desc.grain = 8;
+ mci->edac_check = edac_check;
+
+ pvt_init(mci, vbase);
+ dimm_config_init(mci);
+
+ ret = edac_mc_add_mc(mci);
+ if (ret) {
+ edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
+ edac_mc_free(mci);
+ return ret;
+ }
+ edac_op_state = EDAC_OPSTATE_POLL;
+
+ return 0;
+}
+
+static void edac_remove(struct platform_device *pdev)
+{
+ struct mem_ctl_info *mci = edac_mc_del_mc(&pdev->dev);
+
+ if (mci)
+ edac_mc_free(mci);
+}
+
+static const struct acpi_device_id loongson_edac_acpi_match[] = {
+ {"LOON0010", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, loongson_edac_acpi_match);
+
+static struct platform_driver loongson_edac_driver = {
+ .probe = edac_probe,
+ .remove = edac_remove,
+ .driver = {
+ .name = "loongson-mc-edac",
+ .acpi_match_table = loongson_edac_acpi_match,
+ },
+};
+module_platform_driver(loongson_edac_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Zhao Qunqin <zhaoqunqin@loongson.cn>");
+MODULE_DESCRIPTION("EDAC driver for loongson memory controller");
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index 194d9fd47d20..af3c12284a1e 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -3,6 +3,7 @@
#include <linux/slab.h>
#include <asm/cpu.h>
+#include <asm/msr.h>
#include "mce_amd.h"
@@ -795,6 +796,7 @@ amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
struct mce *m = (struct mce *)data;
struct mce_hw_err *err = to_mce_hw_err(m);
unsigned int fam = x86_family(m->cpuid);
+ u32 mca_config_lo = 0, dummy;
int ecc;
if (m->kflags & MCE_HANDLED_CEC)
@@ -814,11 +816,9 @@ amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
((m->status & MCI_STATUS_PCC) ? "PCC" : "-"));
if (boot_cpu_has(X86_FEATURE_SMCA)) {
- u32 low, high;
- u32 addr = MSR_AMD64_SMCA_MCx_CONFIG(m->bank);
+ rdmsr_safe(MSR_AMD64_SMCA_MCx_CONFIG(m->bank), &mca_config_lo, &dummy);
- if (!rdmsr_safe(addr, &low, &high) &&
- (low & MCI_CONFIG_MCAX))
+ if (mca_config_lo & MCI_CONFIG_MCAX)
pr_cont("|%s", ((m->status & MCI_STATUS_TCC) ? "TCC" : "-"));
pr_cont("|%s", ((m->status & MCI_STATUS_SYNDV) ? "SyndV" : "-"));
@@ -853,8 +853,15 @@ amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
if (m->status & MCI_STATUS_SYNDV) {
pr_cont(", Syndrome: 0x%016llx\n", m->synd);
- pr_emerg(HW_ERR "Syndrome1: 0x%016llx, Syndrome2: 0x%016llx",
- err->vendor.amd.synd1, err->vendor.amd.synd2);
+ if (mca_config_lo & MCI_CONFIG_FRUTEXT) {
+ char frutext[17];
+
+ frutext[16] = '\0';
+ memcpy(&frutext[0], &err->vendor.amd.synd1, 8);
+ memcpy(&frutext[8], &err->vendor.amd.synd2, 8);
+
+ pr_emerg(HW_ERR "FRU Text: %s", frutext);
+ }
}
pr_cont("\n");
diff --git a/drivers/edac/mem_repair.c b/drivers/edac/mem_repair.c
new file mode 100644
index 000000000000..108d69209146
--- /dev/null
+++ b/drivers/edac/mem_repair.c
@@ -0,0 +1,357 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * The generic EDAC memory repair driver is designed to control the memory
+ * devices with memory repair features, such as Post Package Repair (PPR),
+ * memory sparing etc. The common sysfs memory repair interface abstracts
+ * the control of various arbitrary memory repair functionalities into a
+ * unified set of functions.
+ *
+ * Copyright (c) 2024-2025 HiSilicon Limited.
+ */
+
+#include <linux/edac.h>
+
+enum edac_mem_repair_attributes {
+ MR_TYPE,
+ MR_PERSIST_MODE,
+ MR_SAFE_IN_USE,
+ MR_HPA,
+ MR_MIN_HPA,
+ MR_MAX_HPA,
+ MR_DPA,
+ MR_MIN_DPA,
+ MR_MAX_DPA,
+ MR_NIBBLE_MASK,
+ MR_BANK_GROUP,
+ MR_BANK,
+ MR_RANK,
+ MR_ROW,
+ MR_COLUMN,
+ MR_CHANNEL,
+ MR_SUB_CHANNEL,
+ MEM_DO_REPAIR,
+ MR_MAX_ATTRS
+};
+
+struct edac_mem_repair_dev_attr {
+ struct device_attribute dev_attr;
+ u8 instance;
+};
+
+struct edac_mem_repair_context {
+ char name[EDAC_FEAT_NAME_LEN];
+ struct edac_mem_repair_dev_attr mem_repair_dev_attr[MR_MAX_ATTRS];
+ struct attribute *mem_repair_attrs[MR_MAX_ATTRS + 1];
+ struct attribute_group group;
+};
+
+const char * const edac_repair_type[] = {
+ [EDAC_REPAIR_PPR] = "ppr",
+ [EDAC_REPAIR_CACHELINE_SPARING] = "cacheline-sparing",
+ [EDAC_REPAIR_ROW_SPARING] = "row-sparing",
+ [EDAC_REPAIR_BANK_SPARING] = "bank-sparing",
+ [EDAC_REPAIR_RANK_SPARING] = "rank-sparing",
+};
+EXPORT_SYMBOL_GPL(edac_repair_type);
+
+#define TO_MR_DEV_ATTR(_dev_attr) \
+ container_of(_dev_attr, struct edac_mem_repair_dev_attr, dev_attr)
+
+#define MR_ATTR_SHOW(attrib, cb, type, format) \
+static ssize_t attrib##_show(struct device *ras_feat_dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ u8 inst = TO_MR_DEV_ATTR(attr)->instance; \
+ struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev); \
+ const struct edac_mem_repair_ops *ops = \
+ ctx->mem_repair[inst].mem_repair_ops; \
+ type data; \
+ int ret; \
+ \
+ ret = ops->cb(ras_feat_dev->parent, ctx->mem_repair[inst].private, \
+ &data); \
+ if (ret) \
+ return ret; \
+ \
+ return sysfs_emit(buf, format, data); \
+}
+
+MR_ATTR_SHOW(repair_type, get_repair_type, const char *, "%s\n")
+MR_ATTR_SHOW(persist_mode, get_persist_mode, bool, "%u\n")
+MR_ATTR_SHOW(repair_safe_when_in_use, get_repair_safe_when_in_use, bool, "%u\n")
+MR_ATTR_SHOW(hpa, get_hpa, u64, "0x%llx\n")
+MR_ATTR_SHOW(min_hpa, get_min_hpa, u64, "0x%llx\n")
+MR_ATTR_SHOW(max_hpa, get_max_hpa, u64, "0x%llx\n")
+MR_ATTR_SHOW(dpa, get_dpa, u64, "0x%llx\n")
+MR_ATTR_SHOW(min_dpa, get_min_dpa, u64, "0x%llx\n")
+MR_ATTR_SHOW(max_dpa, get_max_dpa, u64, "0x%llx\n")
+MR_ATTR_SHOW(nibble_mask, get_nibble_mask, u32, "0x%x\n")
+MR_ATTR_SHOW(bank_group, get_bank_group, u32, "%u\n")
+MR_ATTR_SHOW(bank, get_bank, u32, "%u\n")
+MR_ATTR_SHOW(rank, get_rank, u32, "%u\n")
+MR_ATTR_SHOW(row, get_row, u32, "0x%x\n")
+MR_ATTR_SHOW(column, get_column, u32, "%u\n")
+MR_ATTR_SHOW(channel, get_channel, u32, "%u\n")
+MR_ATTR_SHOW(sub_channel, get_sub_channel, u32, "%u\n")
+
+#define MR_ATTR_STORE(attrib, cb, type, conv_func) \
+static ssize_t attrib##_store(struct device *ras_feat_dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t len) \
+{ \
+ u8 inst = TO_MR_DEV_ATTR(attr)->instance; \
+ struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev); \
+ const struct edac_mem_repair_ops *ops = \
+ ctx->mem_repair[inst].mem_repair_ops; \
+ type data; \
+ int ret; \
+ \
+ ret = conv_func(buf, 0, &data); \
+ if (ret < 0) \
+ return ret; \
+ \
+ ret = ops->cb(ras_feat_dev->parent, ctx->mem_repair[inst].private, \
+ data); \
+ if (ret) \
+ return ret; \
+ \
+ return len; \
+}
+
+MR_ATTR_STORE(persist_mode, set_persist_mode, unsigned long, kstrtoul)
+MR_ATTR_STORE(hpa, set_hpa, u64, kstrtou64)
+MR_ATTR_STORE(dpa, set_dpa, u64, kstrtou64)
+MR_ATTR_STORE(nibble_mask, set_nibble_mask, unsigned long, kstrtoul)
+MR_ATTR_STORE(bank_group, set_bank_group, unsigned long, kstrtoul)
+MR_ATTR_STORE(bank, set_bank, unsigned long, kstrtoul)
+MR_ATTR_STORE(rank, set_rank, unsigned long, kstrtoul)
+MR_ATTR_STORE(row, set_row, unsigned long, kstrtoul)
+MR_ATTR_STORE(column, set_column, unsigned long, kstrtoul)
+MR_ATTR_STORE(channel, set_channel, unsigned long, kstrtoul)
+MR_ATTR_STORE(sub_channel, set_sub_channel, unsigned long, kstrtoul)
+
+#define MR_DO_OP(attrib, cb) \
+static ssize_t attrib##_store(struct device *ras_feat_dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t len) \
+{ \
+ u8 inst = TO_MR_DEV_ATTR(attr)->instance; \
+ struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev); \
+ const struct edac_mem_repair_ops *ops = ctx->mem_repair[inst].mem_repair_ops; \
+ unsigned long data; \
+ int ret; \
+ \
+ ret = kstrtoul(buf, 0, &data); \
+ if (ret < 0) \
+ return ret; \
+ \
+ ret = ops->cb(ras_feat_dev->parent, ctx->mem_repair[inst].private, data); \
+ if (ret) \
+ return ret; \
+ \
+ return len; \
+}
+
+MR_DO_OP(repair, do_repair)
+
+static umode_t mem_repair_attr_visible(struct kobject *kobj, struct attribute *a, int attr_id)
+{
+ struct device *ras_feat_dev = kobj_to_dev(kobj);
+ struct device_attribute *dev_attr = container_of(a, struct device_attribute, attr);
+ struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev);
+ u8 inst = TO_MR_DEV_ATTR(dev_attr)->instance;
+ const struct edac_mem_repair_ops *ops = ctx->mem_repair[inst].mem_repair_ops;
+
+ switch (attr_id) {
+ case MR_TYPE:
+ if (ops->get_repair_type)
+ return a->mode;
+ break;
+ case MR_PERSIST_MODE:
+ if (ops->get_persist_mode) {
+ if (ops->set_persist_mode)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case MR_SAFE_IN_USE:
+ if (ops->get_repair_safe_when_in_use)
+ return a->mode;
+ break;
+ case MR_HPA:
+ if (ops->get_hpa) {
+ if (ops->set_hpa)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case MR_MIN_HPA:
+ if (ops->get_min_hpa)
+ return a->mode;
+ break;
+ case MR_MAX_HPA:
+ if (ops->get_max_hpa)
+ return a->mode;
+ break;
+ case MR_DPA:
+ if (ops->get_dpa) {
+ if (ops->set_dpa)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case MR_MIN_DPA:
+ if (ops->get_min_dpa)
+ return a->mode;
+ break;
+ case MR_MAX_DPA:
+ if (ops->get_max_dpa)
+ return a->mode;
+ break;
+ case MR_NIBBLE_MASK:
+ if (ops->get_nibble_mask) {
+ if (ops->set_nibble_mask)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case MR_BANK_GROUP:
+ if (ops->get_bank_group) {
+ if (ops->set_bank_group)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case MR_BANK:
+ if (ops->get_bank) {
+ if (ops->set_bank)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case MR_RANK:
+ if (ops->get_rank) {
+ if (ops->set_rank)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case MR_ROW:
+ if (ops->get_row) {
+ if (ops->set_row)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case MR_COLUMN:
+ if (ops->get_column) {
+ if (ops->set_column)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case MR_CHANNEL:
+ if (ops->get_channel) {
+ if (ops->set_channel)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case MR_SUB_CHANNEL:
+ if (ops->get_sub_channel) {
+ if (ops->set_sub_channel)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case MEM_DO_REPAIR:
+ if (ops->do_repair)
+ return a->mode;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static const struct device_attribute mem_repair_dev_attr[] = {
+ [MR_TYPE] = __ATTR_RO(repair_type),
+ [MR_PERSIST_MODE] = __ATTR_RW(persist_mode),
+ [MR_SAFE_IN_USE] = __ATTR_RO(repair_safe_when_in_use),
+ [MR_HPA] = __ATTR_RW(hpa),
+ [MR_MIN_HPA] = __ATTR_RO(min_hpa),
+ [MR_MAX_HPA] = __ATTR_RO(max_hpa),
+ [MR_DPA] = __ATTR_RW(dpa),
+ [MR_MIN_DPA] = __ATTR_RO(min_dpa),
+ [MR_MAX_DPA] = __ATTR_RO(max_dpa),
+ [MR_NIBBLE_MASK] = __ATTR_RW(nibble_mask),
+ [MR_BANK_GROUP] = __ATTR_RW(bank_group),
+ [MR_BANK] = __ATTR_RW(bank),
+ [MR_RANK] = __ATTR_RW(rank),
+ [MR_ROW] = __ATTR_RW(row),
+ [MR_COLUMN] = __ATTR_RW(column),
+ [MR_CHANNEL] = __ATTR_RW(channel),
+ [MR_SUB_CHANNEL] = __ATTR_RW(sub_channel),
+ [MEM_DO_REPAIR] = __ATTR_WO(repair)
+};
+
+static int mem_repair_create_desc(struct device *dev,
+ const struct attribute_group **attr_groups,
+ u8 instance)
+{
+ struct edac_mem_repair_context *ctx;
+ struct attribute_group *group;
+ int i;
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ for (i = 0; i < MR_MAX_ATTRS; i++) {
+ ctx->mem_repair_dev_attr[i].dev_attr = mem_repair_dev_attr[i];
+ ctx->mem_repair_dev_attr[i].instance = instance;
+ sysfs_attr_init(&ctx->mem_repair_dev_attr[i].dev_attr.attr);
+ ctx->mem_repair_attrs[i] =
+ &ctx->mem_repair_dev_attr[i].dev_attr.attr;
+ }
+
+ sprintf(ctx->name, "%s%d", "mem_repair", instance);
+ group = &ctx->group;
+ group->name = ctx->name;
+ group->attrs = ctx->mem_repair_attrs;
+ group->is_visible = mem_repair_attr_visible;
+ attr_groups[0] = group;
+
+ return 0;
+}
+
+/**
+ * edac_mem_repair_get_desc - get EDAC memory repair descriptors
+ * @dev: client device with memory repair feature
+ * @attr_groups: pointer to attribute group container
+ * @instance: device's memory repair instance number.
+ *
+ * Return:
+ * * %0 - Success.
+ * * %-EINVAL - Invalid parameters passed.
+ * * %-ENOMEM - Dynamic memory allocation failed.
+ */
+int edac_mem_repair_get_desc(struct device *dev,
+ const struct attribute_group **attr_groups, u8 instance)
+{
+ if (!dev || !attr_groups)
+ return -EINVAL;
+
+ return mem_repair_create_desc(dev, attr_groups, instance);
+}
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index d0266cbcbeda..a45dc6b35ede 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -323,7 +323,7 @@ static const struct platform_device_id mpc85xx_pci_err_match[] = {
static struct platform_driver mpc85xx_pci_err_driver = {
.probe = mpc85xx_pci_err_probe,
- .remove_new = mpc85xx_pci_err_remove,
+ .remove = mpc85xx_pci_err_remove,
.id_table = mpc85xx_pci_err_match,
.driver = {
.name = "mpc85xx_pci_err",
@@ -627,7 +627,7 @@ MODULE_DEVICE_TABLE(of, mpc85xx_l2_err_of_match);
static struct platform_driver mpc85xx_l2_err_driver = {
.probe = mpc85xx_l2_err_probe,
- .remove_new = mpc85xx_l2_err_remove,
+ .remove = mpc85xx_l2_err_remove,
.driver = {
.name = "mpc85xx_l2_err",
.of_match_table = mpc85xx_l2_err_of_match,
@@ -656,7 +656,7 @@ MODULE_DEVICE_TABLE(of, mpc85xx_mc_err_of_match);
static struct platform_driver mpc85xx_mc_err_driver = {
.probe = fsl_mc_err_probe,
- .remove_new = fsl_mc_err_remove,
+ .remove = fsl_mc_err_remove,
.driver = {
.name = "mpc85xx_mc_err",
.of_match_table = mpc85xx_mc_err_of_match,
diff --git a/drivers/edac/npcm_edac.c b/drivers/edac/npcm_edac.c
index 2e2133b784e9..e60a99eb8cfb 100644
--- a/drivers/edac/npcm_edac.c
+++ b/drivers/edac/npcm_edac.c
@@ -531,7 +531,7 @@ static struct platform_driver npcm_edac_driver = {
.of_match_table = npcm_edac_of_match,
},
.probe = edac_probe,
- .remove_new = edac_remove,
+ .remove = edac_remove,
};
module_platform_driver(npcm_edac_driver);
diff --git a/drivers/edac/octeon_edac-l2c.c b/drivers/edac/octeon_edac-l2c.c
index 2adb9c8093f8..e6b1595a3cb5 100644
--- a/drivers/edac/octeon_edac-l2c.c
+++ b/drivers/edac/octeon_edac-l2c.c
@@ -194,7 +194,7 @@ static void octeon_l2c_remove(struct platform_device *pdev)
static struct platform_driver octeon_l2c_driver = {
.probe = octeon_l2c_probe,
- .remove_new = octeon_l2c_remove,
+ .remove = octeon_l2c_remove,
.driver = {
.name = "octeon_l2c_edac",
}
diff --git a/drivers/edac/octeon_edac-lmc.c b/drivers/edac/octeon_edac-lmc.c
index 4112c2ee34b8..f7176b95b4fe 100644
--- a/drivers/edac/octeon_edac-lmc.c
+++ b/drivers/edac/octeon_edac-lmc.c
@@ -312,7 +312,7 @@ static void octeon_lmc_edac_remove(struct platform_device *pdev)
static struct platform_driver octeon_lmc_edac_driver = {
.probe = octeon_lmc_edac_probe,
- .remove_new = octeon_lmc_edac_remove,
+ .remove = octeon_lmc_edac_remove,
.driver = {
.name = "octeon_lmc_edac",
}
diff --git a/drivers/edac/octeon_edac-pc.c b/drivers/edac/octeon_edac-pc.c
index d9eeb40d2784..aa1219db0b17 100644
--- a/drivers/edac/octeon_edac-pc.c
+++ b/drivers/edac/octeon_edac-pc.c
@@ -130,7 +130,7 @@ static void co_cache_error_remove(struct platform_device *pdev)
static struct platform_driver co_cache_error_driver = {
.probe = co_cache_error_probe,
- .remove_new = co_cache_error_remove,
+ .remove = co_cache_error_remove,
.driver = {
.name = "octeon_pc_edac",
}
diff --git a/drivers/edac/octeon_edac-pci.c b/drivers/edac/octeon_edac-pci.c
index 4d368af2c5f0..c4f3bc33a971 100644
--- a/drivers/edac/octeon_edac-pci.c
+++ b/drivers/edac/octeon_edac-pci.c
@@ -97,7 +97,7 @@ static void octeon_pci_remove(struct platform_device *pdev)
static struct platform_driver octeon_pci_driver = {
.probe = octeon_pci_probe,
- .remove_new = octeon_pci_remove,
+ .remove = octeon_pci_remove,
.driver = {
.name = "octeon_pci_edac",
}
diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c
index f93f2f2b1cf2..af14c8a3279f 100644
--- a/drivers/edac/pnd2_edac.c
+++ b/drivers/edac/pnd2_edac.c
@@ -372,7 +372,7 @@ static int gen_asym_mask(struct b_cr_slice_channel_hash *p,
struct b_cr_asym_mem_region1_mchbar *as1,
struct b_cr_asym_2way_mem_region_mchbar *as2way)
{
- const int intlv[] = { 0x5, 0xA, 0x3, 0xC };
+ static const int intlv[] = { 0x5, 0xA, 0x3, 0xC };
int mask = 0;
if (as2way->asym_2way_interleave_enable)
@@ -489,7 +489,7 @@ static int dnv_get_registers(void)
*/
static int get_registers(void)
{
- const int intlv[] = { 10, 11, 12, 12 };
+ static const int intlv[] = { 10, 11, 12, 12 };
if (RD_REG(&tolud, b_cr_tolud_pci) ||
RD_REG(&touud_lo, b_cr_touud_lo_pci) ||
diff --git a/drivers/edac/qcom_edac.c b/drivers/edac/qcom_edac.c
index d3cd4cc54ace..f3da9385ca0d 100644
--- a/drivers/edac/qcom_edac.c
+++ b/drivers/edac/qcom_edac.c
@@ -95,7 +95,7 @@ static int qcom_llcc_core_setup(struct llcc_drv_data *drv, struct regmap *llcc_b
* Configure interrupt enable registers such that Tag, Data RAM related
* interrupts are propagated to interrupt controller for servicing
*/
- ret = regmap_update_bits(llcc_bcast_regmap, drv->edac_reg_offset->cmn_interrupt_2_enable,
+ ret = regmap_update_bits(llcc_bcast_regmap, drv->edac_reg_offset->cmn_interrupt_0_enable,
TRP0_INTERRUPT_ENABLE,
TRP0_INTERRUPT_ENABLE);
if (ret)
@@ -113,7 +113,7 @@ static int qcom_llcc_core_setup(struct llcc_drv_data *drv, struct regmap *llcc_b
if (ret)
return ret;
- ret = regmap_update_bits(llcc_bcast_regmap, drv->edac_reg_offset->cmn_interrupt_2_enable,
+ ret = regmap_update_bits(llcc_bcast_regmap, drv->edac_reg_offset->cmn_interrupt_0_enable,
DRP0_INTERRUPT_ENABLE,
DRP0_INTERRUPT_ENABLE);
if (ret)
@@ -342,9 +342,11 @@ static int qcom_llcc_edac_probe(struct platform_device *pdev)
int ecc_irq;
int rc;
- rc = qcom_llcc_core_setup(llcc_driv_data, llcc_driv_data->bcast_regmap);
- if (rc)
- return rc;
+ if (!llcc_driv_data->ecc_irq_configured) {
+ rc = qcom_llcc_core_setup(llcc_driv_data, llcc_driv_data->bcast_regmap);
+ if (rc)
+ return rc;
+ }
/* Allocate edac control info */
edev_ctl = edac_device_alloc_ctl_info(0, "qcom-llcc", 1, "bank",
@@ -405,7 +407,7 @@ MODULE_DEVICE_TABLE(platform, qcom_llcc_edac_id_table);
static struct platform_driver qcom_llcc_edac_driver = {
.probe = qcom_llcc_edac_probe,
- .remove_new = qcom_llcc_edac_remove,
+ .remove = qcom_llcc_edac_remove,
.driver = {
.name = "qcom_llcc_edac",
},
diff --git a/drivers/edac/scrub.c b/drivers/edac/scrub.c
new file mode 100644
index 000000000000..f9d02af2fc3a
--- /dev/null
+++ b/drivers/edac/scrub.c
@@ -0,0 +1,210 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * The generic EDAC scrub driver controls the memory scrubbers in the
+ * system. The common sysfs scrub interface abstracts the control of
+ * various arbitrary scrubbing functionalities into a unified set of
+ * functions.
+ *
+ * Copyright (c) 2024-2025 HiSilicon Limited.
+ */
+
+#include <linux/edac.h>
+
+enum edac_scrub_attributes {
+ SCRUB_ADDRESS,
+ SCRUB_SIZE,
+ SCRUB_ENABLE_BACKGROUND,
+ SCRUB_MIN_CYCLE_DURATION,
+ SCRUB_MAX_CYCLE_DURATION,
+ SCRUB_CUR_CYCLE_DURATION,
+ SCRUB_MAX_ATTRS
+};
+
+struct edac_scrub_dev_attr {
+ struct device_attribute dev_attr;
+ u8 instance;
+};
+
+struct edac_scrub_context {
+ char name[EDAC_FEAT_NAME_LEN];
+ struct edac_scrub_dev_attr scrub_dev_attr[SCRUB_MAX_ATTRS];
+ struct attribute *scrub_attrs[SCRUB_MAX_ATTRS + 1];
+ struct attribute_group group;
+};
+
+#define TO_SCRUB_DEV_ATTR(_dev_attr) \
+ container_of(_dev_attr, struct edac_scrub_dev_attr, dev_attr)
+
+#define EDAC_SCRUB_ATTR_SHOW(attrib, cb, type, format) \
+static ssize_t attrib##_show(struct device *ras_feat_dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ u8 inst = TO_SCRUB_DEV_ATTR(attr)->instance; \
+ struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev); \
+ const struct edac_scrub_ops *ops = ctx->scrub[inst].scrub_ops; \
+ type data; \
+ int ret; \
+ \
+ ret = ops->cb(ras_feat_dev->parent, ctx->scrub[inst].private, &data); \
+ if (ret) \
+ return ret; \
+ \
+ return sysfs_emit(buf, format, data); \
+}
+
+EDAC_SCRUB_ATTR_SHOW(addr, read_addr, u64, "0x%llx\n")
+EDAC_SCRUB_ATTR_SHOW(size, read_size, u64, "0x%llx\n")
+EDAC_SCRUB_ATTR_SHOW(enable_background, get_enabled_bg, bool, "%u\n")
+EDAC_SCRUB_ATTR_SHOW(min_cycle_duration, get_min_cycle, u32, "%u\n")
+EDAC_SCRUB_ATTR_SHOW(max_cycle_duration, get_max_cycle, u32, "%u\n")
+EDAC_SCRUB_ATTR_SHOW(current_cycle_duration, get_cycle_duration, u32, "%u\n")
+
+#define EDAC_SCRUB_ATTR_STORE(attrib, cb, type, conv_func) \
+static ssize_t attrib##_store(struct device *ras_feat_dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t len) \
+{ \
+ u8 inst = TO_SCRUB_DEV_ATTR(attr)->instance; \
+ struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev); \
+ const struct edac_scrub_ops *ops = ctx->scrub[inst].scrub_ops; \
+ type data; \
+ int ret; \
+ \
+ ret = conv_func(buf, 0, &data); \
+ if (ret < 0) \
+ return ret; \
+ \
+ ret = ops->cb(ras_feat_dev->parent, ctx->scrub[inst].private, data); \
+ if (ret) \
+ return ret; \
+ \
+ return len; \
+}
+
+EDAC_SCRUB_ATTR_STORE(addr, write_addr, u64, kstrtou64)
+EDAC_SCRUB_ATTR_STORE(size, write_size, u64, kstrtou64)
+EDAC_SCRUB_ATTR_STORE(enable_background, set_enabled_bg, unsigned long, kstrtoul)
+EDAC_SCRUB_ATTR_STORE(current_cycle_duration, set_cycle_duration, unsigned long, kstrtoul)
+
+static umode_t scrub_attr_visible(struct kobject *kobj, struct attribute *a, int attr_id)
+{
+ struct device *ras_feat_dev = kobj_to_dev(kobj);
+ struct device_attribute *dev_attr = container_of(a, struct device_attribute, attr);
+ u8 inst = TO_SCRUB_DEV_ATTR(dev_attr)->instance;
+ struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev);
+ const struct edac_scrub_ops *ops = ctx->scrub[inst].scrub_ops;
+
+ switch (attr_id) {
+ case SCRUB_ADDRESS:
+ if (ops->read_addr) {
+ if (ops->write_addr)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case SCRUB_SIZE:
+ if (ops->read_size) {
+ if (ops->write_size)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case SCRUB_ENABLE_BACKGROUND:
+ if (ops->get_enabled_bg) {
+ if (ops->set_enabled_bg)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case SCRUB_MIN_CYCLE_DURATION:
+ if (ops->get_min_cycle)
+ return a->mode;
+ break;
+ case SCRUB_MAX_CYCLE_DURATION:
+ if (ops->get_max_cycle)
+ return a->mode;
+ break;
+ case SCRUB_CUR_CYCLE_DURATION:
+ if (ops->get_cycle_duration) {
+ if (ops->set_cycle_duration)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+#define EDAC_SCRUB_ATTR_RO(_name, _instance) \
+ ((struct edac_scrub_dev_attr) { .dev_attr = __ATTR_RO(_name), \
+ .instance = _instance })
+
+#define EDAC_SCRUB_ATTR_WO(_name, _instance) \
+ ((struct edac_scrub_dev_attr) { .dev_attr = __ATTR_WO(_name), \
+ .instance = _instance })
+
+#define EDAC_SCRUB_ATTR_RW(_name, _instance) \
+ ((struct edac_scrub_dev_attr) { .dev_attr = __ATTR_RW(_name), \
+ .instance = _instance })
+
+static int scrub_create_desc(struct device *scrub_dev,
+ const struct attribute_group **attr_groups, u8 instance)
+{
+ struct edac_scrub_context *scrub_ctx;
+ struct attribute_group *group;
+ int i;
+ struct edac_scrub_dev_attr dev_attr[] = {
+ [SCRUB_ADDRESS] = EDAC_SCRUB_ATTR_RW(addr, instance),
+ [SCRUB_SIZE] = EDAC_SCRUB_ATTR_RW(size, instance),
+ [SCRUB_ENABLE_BACKGROUND] = EDAC_SCRUB_ATTR_RW(enable_background, instance),
+ [SCRUB_MIN_CYCLE_DURATION] = EDAC_SCRUB_ATTR_RO(min_cycle_duration, instance),
+ [SCRUB_MAX_CYCLE_DURATION] = EDAC_SCRUB_ATTR_RO(max_cycle_duration, instance),
+ [SCRUB_CUR_CYCLE_DURATION] = EDAC_SCRUB_ATTR_RW(current_cycle_duration, instance)
+ };
+
+ scrub_ctx = devm_kzalloc(scrub_dev, sizeof(*scrub_ctx), GFP_KERNEL);
+ if (!scrub_ctx)
+ return -ENOMEM;
+
+ group = &scrub_ctx->group;
+ for (i = 0; i < SCRUB_MAX_ATTRS; i++) {
+ memcpy(&scrub_ctx->scrub_dev_attr[i], &dev_attr[i], sizeof(dev_attr[i]));
+ sysfs_attr_init(&scrub_ctx->scrub_dev_attr[i].dev_attr.attr);
+ scrub_ctx->scrub_attrs[i] = &scrub_ctx->scrub_dev_attr[i].dev_attr.attr;
+ }
+ sprintf(scrub_ctx->name, "%s%d", "scrub", instance);
+ group->name = scrub_ctx->name;
+ group->attrs = scrub_ctx->scrub_attrs;
+ group->is_visible = scrub_attr_visible;
+
+ attr_groups[0] = group;
+
+ return 0;
+}
+
+/**
+ * edac_scrub_get_desc - get EDAC scrub descriptors
+ * @scrub_dev: client device, with scrub support
+ * @attr_groups: pointer to attribute group container
+ * @instance: device's scrub instance number.
+ *
+ * Return:
+ * * %0 - Success.
+ * * %-EINVAL - Invalid parameters passed.
+ * * %-ENOMEM - Dynamic memory allocation failed.
+ */
+int edac_scrub_get_desc(struct device *scrub_dev,
+ const struct attribute_group **attr_groups, u8 instance)
+{
+ if (!scrub_dev || !attr_groups)
+ return -EINVAL;
+
+ return scrub_create_desc(scrub_dev, attr_groups, instance);
+}
diff --git a/drivers/edac/skx_base.c b/drivers/edac/skx_base.c
index 14cfd394b469..078ddf95cc6e 100644
--- a/drivers/edac/skx_base.c
+++ b/drivers/edac/skx_base.c
@@ -33,6 +33,15 @@ static unsigned int nvdimm_count;
#define MASK26 0x3FFFFFF /* Mask for 2^26 */
#define MASK29 0x1FFFFFFF /* Mask for 2^29 */
+static struct res_config skx_cfg = {
+ .type = SKX,
+ .decs_did = 0x2016,
+ .busno_cfg_offset = 0xcc,
+ .ddr_imc_num = 2,
+ .ddr_chan_num = 3,
+ .ddr_dimm_num = 2,
+};
+
static struct skx_dev *get_skx_dev(struct pci_bus *bus, u8 idx)
{
struct skx_dev *d;
@@ -52,7 +61,7 @@ enum munittype {
struct munit {
u16 did;
- u16 devfn[SKX_NUM_IMC];
+ u16 devfn[2];
u8 busidx;
u8 per_socket;
enum munittype mtype;
@@ -89,11 +98,11 @@ static int get_all_munits(const struct munit *m)
if (!pdev)
break;
ndev++;
- if (m->per_socket == SKX_NUM_IMC) {
- for (i = 0; i < SKX_NUM_IMC; i++)
+ if (m->per_socket == skx_cfg.ddr_imc_num) {
+ for (i = 0; i < skx_cfg.ddr_imc_num; i++)
if (m->devfn[i] == pdev->devfn)
break;
- if (i == SKX_NUM_IMC)
+ if (i == skx_cfg.ddr_imc_num)
goto fail;
}
d = get_skx_dev(pdev->bus, m->busidx);
@@ -157,14 +166,8 @@ fail:
return -ENODEV;
}
-static struct res_config skx_cfg = {
- .type = SKX,
- .decs_did = 0x2016,
- .busno_cfg_offset = 0xcc,
-};
-
static const struct x86_cpu_id skx_cpuids[] = {
- X86_MATCH_VFM_STEPPINGS(INTEL_SKYLAKE_X, X86_STEPPINGS(0x0, 0xf), &skx_cfg),
+ X86_MATCH_VFM(INTEL_SKYLAKE_X, &skx_cfg),
{ }
};
MODULE_DEVICE_TABLE(x86cpu, skx_cpuids);
@@ -186,11 +189,11 @@ static int skx_get_dimm_config(struct mem_ctl_info *mci, struct res_config *cfg)
/* Only the mcmtr on the first channel is effective */
pci_read_config_dword(imc->chan[0].cdev, 0x87c, &mcmtr);
- for (i = 0; i < SKX_NUM_CHANNELS; i++) {
+ for (i = 0; i < cfg->ddr_chan_num; i++) {
ndimms = 0;
pci_read_config_dword(imc->chan[i].cdev, 0x8C, &amap);
pci_read_config_dword(imc->chan[i].cdev, 0x400, &mcddrtcfg);
- for (j = 0; j < SKX_NUM_DIMMS; j++) {
+ for (j = 0; j < cfg->ddr_dimm_num; j++) {
dimm = edac_get_dimm(mci, i, j, 0);
pci_read_config_dword(imc->chan[i].cdev,
0x80 + 4 * j, &mtr);
@@ -600,7 +603,7 @@ static int __init skx_init(void)
const struct munit *m;
const char *owner;
int rc = 0, i, off[3] = {0xd0, 0xd4, 0xd8};
- u8 mc = 0, src_id, node_id;
+ u8 mc = 0, src_id;
struct skx_dev *d;
edac_dbg(2, "\n");
@@ -620,6 +623,7 @@ static int __init skx_init(void)
return -ENODEV;
cfg = (struct res_config *)id->driver_data;
+ skx_set_res_cfg(cfg);
rc = skx_get_hi_lo(0x2034, off, &skx_tolm, &skx_tohm);
if (rc)
@@ -650,15 +654,15 @@ static int __init skx_init(void)
rc = skx_get_src_id(d, 0xf0, &src_id);
if (rc < 0)
goto fail;
- rc = skx_get_node_id(d, &node_id);
- if (rc < 0)
- goto fail;
- edac_dbg(2, "src_id=%d node_id=%d\n", src_id, node_id);
- for (i = 0; i < SKX_NUM_IMC; i++) {
+
+ edac_dbg(2, "src_id = %d\n", src_id);
+ for (i = 0; i < cfg->ddr_imc_num; i++) {
d->imc[i].mc = mc++;
d->imc[i].lmc = i;
d->imc[i].src_id = src_id;
- d->imc[i].node_id = node_id;
+ d->imc[i].num_channels = cfg->ddr_chan_num;
+ d->imc[i].num_dimms = cfg->ddr_dimm_num;
+
rc = skx_register_mci(&d->imc[i], d->imc[i].chan[0].cdev,
"Skylake Socket", EDAC_MOD_STR,
skx_get_dimm_config, cfg);
diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c
index 85713646957b..724842f512ac 100644
--- a/drivers/edac/skx_common.c
+++ b/drivers/edac/skx_common.c
@@ -14,11 +14,14 @@
* Copyright (c) 2018, Intel Corporation.
*/
+#include <linux/topology.h>
#include <linux/acpi.h>
#include <linux/dmi.h>
#include <linux/adxl.h>
+#include <linux/overflow.h>
#include <acpi/nfit.h>
#include <asm/mce.h>
+#include <asm/uv/uv.h>
#include "edac_module.h"
#include "skx_common.h"
@@ -47,6 +50,7 @@ static skx_show_retry_log_f skx_show_retry_rd_err_log;
static u64 skx_tolm, skx_tohm;
static LIST_HEAD(dev_edac_list);
static bool skx_mem_cfg_2lm;
+static struct res_config *skx_res_cfg;
int skx_adxl_get(void)
{
@@ -114,15 +118,51 @@ EXPORT_SYMBOL_GPL(skx_adxl_get);
void skx_adxl_put(void)
{
+ adxl_component_count = 0;
kfree(adxl_values);
kfree(adxl_msg);
}
EXPORT_SYMBOL_GPL(skx_adxl_put);
-static bool skx_adxl_decode(struct decoded_addr *res, bool error_in_1st_level_mem)
+static void skx_init_mc_mapping(struct skx_dev *d)
{
+ /*
+ * By default, the BIOS presents all memory controllers within each
+ * socket to the EDAC driver. The physical indices are the same as
+ * the logical indices of the memory controllers enumerated by the
+ * EDAC driver.
+ */
+ for (int i = 0; i < d->num_imc; i++)
+ d->imc[i].mc_mapping = i;
+}
+
+void skx_set_mc_mapping(struct skx_dev *d, u8 pmc, u8 lmc)
+{
+ edac_dbg(0, "Set the mapping of mc phy idx to logical idx: %02d -> %02d\n",
+ pmc, lmc);
+
+ d->imc[lmc].mc_mapping = pmc;
+}
+EXPORT_SYMBOL_GPL(skx_set_mc_mapping);
+
+static int skx_get_mc_mapping(struct skx_dev *d, u8 pmc)
+{
+ for (int lmc = 0; lmc < d->num_imc; lmc++) {
+ if (d->imc[lmc].mc_mapping == pmc) {
+ edac_dbg(0, "Get the mapping of mc phy idx to logical idx: %02d -> %02d\n",
+ pmc, lmc);
+
+ return lmc;
+ }
+ }
+
+ return -1;
+}
+
+static bool skx_adxl_decode(struct decoded_addr *res, enum error_source err_src)
+{
+ int i, lmc, len = 0;
struct skx_dev *d;
- int i, len = 0;
if (res->addr >= skx_tohm || (res->addr >= skx_tolm &&
res->addr < BIT_ULL(32))) {
@@ -135,8 +175,24 @@ static bool skx_adxl_decode(struct decoded_addr *res, bool error_in_1st_level_me
return false;
}
+ /*
+ * GNR with a Flat2LM memory configuration may mistakenly classify
+ * a near-memory error(DDR5) as a far-memory error(CXL), resulting
+ * in the incorrect selection of decoded ADXL components.
+ * To address this, prefetch the decoded far-memory controller ID
+ * and adjust the error source to near-memory if the far-memory
+ * controller ID is invalid.
+ */
+ if (skx_res_cfg && skx_res_cfg->type == GNR && err_src == ERR_SRC_2LM_FM) {
+ res->imc = (int)adxl_values[component_indices[INDEX_MEMCTRL]];
+ if (res->imc == -1) {
+ err_src = ERR_SRC_2LM_NM;
+ edac_dbg(0, "Adjust the error source to near-memory.\n");
+ }
+ }
+
res->socket = (int)adxl_values[component_indices[INDEX_SOCKET]];
- if (error_in_1st_level_mem) {
+ if (err_src == ERR_SRC_2LM_NM) {
res->imc = (adxl_nm_bitmap & BIT_NM_MEMCTRL) ?
(int)adxl_values[component_indices[INDEX_NM_MEMCTRL]] : -1;
res->channel = (adxl_nm_bitmap & BIT_NM_CHANNEL) ?
@@ -152,7 +208,7 @@ static bool skx_adxl_decode(struct decoded_addr *res, bool error_in_1st_level_me
res->cs = (int)adxl_values[component_indices[INDEX_CS]];
}
- if (res->imc > NUM_IMC - 1 || res->imc < 0) {
+ if (res->imc < 0) {
skx_printk(KERN_ERR, "Bad imc %d\n", res->imc);
return false;
}
@@ -170,6 +226,14 @@ static bool skx_adxl_decode(struct decoded_addr *res, bool error_in_1st_level_me
return false;
}
+ lmc = skx_get_mc_mapping(d, res->imc);
+ if (lmc < 0) {
+ skx_printk(KERN_ERR, "No lmc for imc %d\n", res->imc);
+ return false;
+ }
+
+ res->imc = lmc;
+
for (i = 0; i < adxl_component_count; i++) {
if (adxl_values[i] == ~0x0ull)
continue;
@@ -191,6 +255,12 @@ void skx_set_mem_cfg(bool mem_cfg_2lm)
}
EXPORT_SYMBOL_GPL(skx_set_mem_cfg);
+void skx_set_res_cfg(struct res_config *cfg)
+{
+ skx_res_cfg = cfg;
+}
+EXPORT_SYMBOL_GPL(skx_set_res_cfg);
+
void skx_set_decode(skx_decode_f decode, skx_show_retry_log_f show_retry_log)
{
driver_decode = decode;
@@ -198,33 +268,51 @@ void skx_set_decode(skx_decode_f decode, skx_show_retry_log_f show_retry_log)
}
EXPORT_SYMBOL_GPL(skx_set_decode);
-int skx_get_src_id(struct skx_dev *d, int off, u8 *id)
+static int skx_get_pkg_id(struct skx_dev *d, u8 *id)
{
- u32 reg;
+ int node;
+ int cpu;
- if (pci_read_config_dword(d->util_all, off, &reg)) {
- skx_printk(KERN_ERR, "Failed to read src id\n");
- return -ENODEV;
+ node = pcibus_to_node(d->util_all->bus);
+ if (numa_valid_node(node)) {
+ for_each_cpu(cpu, cpumask_of_pcibus(d->util_all->bus)) {
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
+
+ if (c->initialized && cpu_to_node(cpu) == node) {
+ *id = topology_physical_package_id(cpu);
+ return 0;
+ }
+ }
}
- *id = GET_BITFIELD(reg, 12, 14);
- return 0;
+ skx_printk(KERN_ERR, "Failed to get package ID from NUMA information\n");
+ return -ENODEV;
}
-EXPORT_SYMBOL_GPL(skx_get_src_id);
-int skx_get_node_id(struct skx_dev *d, u8 *id)
+int skx_get_src_id(struct skx_dev *d, int off, u8 *id)
{
u32 reg;
- if (pci_read_config_dword(d->util_all, 0xf4, &reg)) {
- skx_printk(KERN_ERR, "Failed to read node id\n");
+ /*
+ * The 3-bit source IDs in PCI configuration space registers are limited
+ * to 8 unique IDs, and each ID is local to a UPI/QPI domain.
+ *
+ * Source IDs cannot be used to map devices to sockets on UV systems
+ * because they can exceed 8 sockets and have multiple UPI/QPI domains
+ * with identical, repeating source IDs.
+ */
+ if (is_uv_system())
+ return skx_get_pkg_id(d, id);
+
+ if (pci_read_config_dword(d->util_all, off, &reg)) {
+ skx_printk(KERN_ERR, "Failed to read src id\n");
return -ENODEV;
}
- *id = GET_BITFIELD(reg, 0, 2);
+ *id = GET_BITFIELD(reg, 12, 14);
return 0;
}
-EXPORT_SYMBOL_GPL(skx_get_node_id);
+EXPORT_SYMBOL_GPL(skx_get_src_id);
static int get_width(u32 mtr)
{
@@ -246,10 +334,10 @@ static int get_width(u32 mtr)
*/
int skx_get_all_bus_mappings(struct res_config *cfg, struct list_head **list)
{
+ int ndev = 0, imc_num = cfg->ddr_imc_num + cfg->hbm_imc_num;
struct pci_dev *pdev, *prev;
struct skx_dev *d;
u32 reg;
- int ndev = 0;
prev = NULL;
for (;;) {
@@ -257,7 +345,7 @@ int skx_get_all_bus_mappings(struct res_config *cfg, struct list_head **list)
if (!pdev)
break;
ndev++;
- d = kzalloc(sizeof(*d), GFP_KERNEL);
+ d = kzalloc(struct_size(d, imc, imc_num), GFP_KERNEL);
if (!d) {
pci_dev_put(pdev);
return -ENOMEM;
@@ -280,10 +368,14 @@ int skx_get_all_bus_mappings(struct res_config *cfg, struct list_head **list)
d->seg = GET_BITFIELD(reg, 16, 23);
}
- edac_dbg(2, "busses: 0x%x, 0x%x, 0x%x, 0x%x\n",
- d->bus[0], d->bus[1], d->bus[2], d->bus[3]);
+ d->num_imc = imc_num;
+
+ edac_dbg(2, "busses: 0x%x, 0x%x, 0x%x, 0x%x, imcs %d\n",
+ d->bus[0], d->bus[1], d->bus[2], d->bus[3], imc_num);
list_add_tail(&d->list, &dev_edac_list);
prev = pdev;
+
+ skx_init_mc_mapping(d);
}
if (list)
@@ -465,10 +557,10 @@ int skx_register_mci(struct skx_imc *imc, struct pci_dev *pdev,
/* Allocate a new MC control structure */
layers[0].type = EDAC_MC_LAYER_CHANNEL;
- layers[0].size = NUM_CHANNELS;
+ layers[0].size = imc->num_channels;
layers[0].is_virt_csrow = false;
layers[1].type = EDAC_MC_LAYER_SLOT;
- layers[1].size = NUM_DIMMS;
+ layers[1].size = imc->num_dimms;
layers[1].is_virt_csrow = true;
mci = edac_mc_alloc(imc->mc, ARRAY_SIZE(layers), layers,
sizeof(struct skx_pvt));
@@ -484,7 +576,7 @@ int skx_register_mci(struct skx_imc *imc, struct pci_dev *pdev,
pvt->imc = imc;
mci->ctl_name = kasprintf(GFP_KERNEL, "%s#%d IMC#%d", ctl_name,
- imc->node_id, imc->lmc);
+ imc->src_id, imc->lmc);
if (!mci->ctl_name) {
rc = -ENOMEM;
goto fail0;
@@ -594,12 +686,12 @@ static void skx_mce_output_error(struct mem_ctl_info *mci,
}
if (res->decoded_by_adxl) {
- len = snprintf(skx_msg, MSG_SIZE, "%s%s err_code:0x%04x:0x%04x %s",
+ len = scnprintf(skx_msg, MSG_SIZE, "%s%s err_code:0x%04x:0x%04x %s",
overflow ? " OVERFLOW" : "",
(uncorrected_error && recoverable) ? " recoverable" : "",
mscod, errcode, adxl_msg);
} else {
- len = snprintf(skx_msg, MSG_SIZE,
+ len = scnprintf(skx_msg, MSG_SIZE,
"%s%s err_code:0x%04x:0x%04x ProcessorSocketId:0x%x MemoryControllerId:0x%x PhysicalRankId:0x%x Row:0x%x Column:0x%x Bank:0x%x BankGroup:0x%x",
overflow ? " OVERFLOW" : "",
(uncorrected_error && recoverable) ? " recoverable" : "",
@@ -620,31 +712,27 @@ static void skx_mce_output_error(struct mem_ctl_info *mci,
optype, skx_msg);
}
-static bool skx_error_in_1st_level_mem(const struct mce *m)
+static enum error_source skx_error_source(const struct mce *m)
{
- u32 errcode;
-
- if (!skx_mem_cfg_2lm)
- return false;
+ u32 errcode = GET_BITFIELD(m->status, 0, 15) & MCACOD_MEM_ERR_MASK;
- errcode = GET_BITFIELD(m->status, 0, 15) & MCACOD_MEM_ERR_MASK;
-
- return errcode == MCACOD_EXT_MEM_ERR;
-}
+ if (errcode != MCACOD_MEM_CTL_ERR && errcode != MCACOD_EXT_MEM_ERR)
+ return ERR_SRC_NOT_MEMORY;
-static bool skx_error_in_mem(const struct mce *m)
-{
- u32 errcode;
+ if (!skx_mem_cfg_2lm)
+ return ERR_SRC_1LM;
- errcode = GET_BITFIELD(m->status, 0, 15) & MCACOD_MEM_ERR_MASK;
+ if (errcode == MCACOD_EXT_MEM_ERR)
+ return ERR_SRC_2LM_NM;
- return (errcode == MCACOD_MEM_CTL_ERR || errcode == MCACOD_EXT_MEM_ERR);
+ return ERR_SRC_2LM_FM;
}
int skx_mce_check_error(struct notifier_block *nb, unsigned long val,
void *data)
{
struct mce *mce = (struct mce *)data;
+ enum error_source err_src;
struct decoded_addr res;
struct mem_ctl_info *mci;
char *type;
@@ -652,8 +740,10 @@ int skx_mce_check_error(struct notifier_block *nb, unsigned long val,
if (mce->kflags & MCE_HANDLED_CEC)
return NOTIFY_DONE;
+ err_src = skx_error_source(mce);
+
/* Ignore unless this is memory related with an address */
- if (!skx_error_in_mem(mce) || !(mce->status & MCI_STATUS_ADDRV))
+ if (err_src == ERR_SRC_NOT_MEMORY || !(mce->status & MCI_STATUS_ADDRV))
return NOTIFY_DONE;
memset(&res, 0, sizeof(res));
@@ -667,7 +757,7 @@ int skx_mce_check_error(struct notifier_block *nb, unsigned long val,
/* Try driver decoder first */
if (!(driver_decode && driver_decode(&res))) {
/* Then try firmware decoder (ACPI DSM methods) */
- if (!(adxl_component_count && skx_adxl_decode(&res, skx_error_in_1st_level_mem(mce))))
+ if (!(adxl_component_count && skx_adxl_decode(&res, err_src)))
return NOTIFY_DONE;
}
@@ -710,7 +800,7 @@ void skx_remove(void)
list_for_each_entry_safe(d, tmp, &dev_edac_list, list) {
list_del(&d->list);
- for (i = 0; i < NUM_IMC; i++) {
+ for (i = 0; i < d->num_imc; i++) {
if (d->imc[i].mci)
skx_unregister_mci(&d->imc[i]);
@@ -720,7 +810,7 @@ void skx_remove(void)
if (d->imc[i].mbase)
iounmap(d->imc[i].mbase);
- for (j = 0; j < NUM_CHANNELS; j++) {
+ for (j = 0; j < d->imc[i].num_channels; j++) {
if (d->imc[i].chan[j].cdev)
pci_dev_put(d->imc[i].chan[j].cdev);
}
diff --git a/drivers/edac/skx_common.h b/drivers/edac/skx_common.h
index f945c1bf5ca4..73ba89786cdf 100644
--- a/drivers/edac/skx_common.h
+++ b/drivers/edac/skx_common.h
@@ -29,23 +29,18 @@
#define GET_BITFIELD(v, lo, hi) \
(((v) & GENMASK_ULL((hi), (lo))) >> (lo))
-#define SKX_NUM_IMC 2 /* Memory controllers per socket */
#define SKX_NUM_CHANNELS 3 /* Channels per memory controller */
#define SKX_NUM_DIMMS 2 /* Max DIMMS per channel */
-#define I10NM_NUM_DDR_IMC 12
#define I10NM_NUM_DDR_CHANNELS 2
#define I10NM_NUM_DDR_DIMMS 2
-#define I10NM_NUM_HBM_IMC 16
#define I10NM_NUM_HBM_CHANNELS 2
#define I10NM_NUM_HBM_DIMMS 1
-#define I10NM_NUM_IMC (I10NM_NUM_DDR_IMC + I10NM_NUM_HBM_IMC)
#define I10NM_NUM_CHANNELS MAX(I10NM_NUM_DDR_CHANNELS, I10NM_NUM_HBM_CHANNELS)
#define I10NM_NUM_DIMMS MAX(I10NM_NUM_DDR_DIMMS, I10NM_NUM_HBM_DIMMS)
-#define NUM_IMC MAX(SKX_NUM_IMC, I10NM_NUM_IMC)
#define NUM_CHANNELS MAX(SKX_NUM_CHANNELS, I10NM_NUM_CHANNELS)
#define NUM_DIMMS MAX(SKX_NUM_DIMMS, I10NM_NUM_DIMMS)
@@ -79,6 +74,47 @@
*/
#define MCACOD_EXT_MEM_ERR 0x280
+/* Max RRL register sets per {,sub-,pseudo-}channel. */
+#define NUM_RRL_SET 4
+/* Max RRL registers per set. */
+#define NUM_RRL_REG 6
+/* Max correctable error count registers. */
+#define NUM_CECNT_REG 8
+
+/* Modes of RRL register set. */
+enum rrl_mode {
+ /* Last read error from patrol scrub. */
+ LRE_SCRUB,
+ /* Last read error from demand. */
+ LRE_DEMAND,
+ /* First read error from patrol scrub. */
+ FRE_SCRUB,
+ /* First read error from demand. */
+ FRE_DEMAND,
+};
+
+/* RRL registers per {,sub-,pseudo-}channel. */
+struct reg_rrl {
+ /* RRL register parts. */
+ int set_num, reg_num;
+ enum rrl_mode modes[NUM_RRL_SET];
+ u32 offsets[NUM_RRL_SET][NUM_RRL_REG];
+ /* RRL register widths in byte per set. */
+ u8 widths[NUM_RRL_REG];
+ /* RRL control bits of the first register per set. */
+ u32 v_mask;
+ u32 uc_mask;
+ u32 over_mask;
+ u32 en_patspr_mask;
+ u32 noover_mask;
+ u32 en_mask;
+
+ /* CORRERRCNT register parts. */
+ int cecnt_num;
+ u32 cecnt_offsets[NUM_CECNT_REG];
+ u8 cecnt_widths[NUM_CECNT_REG];
+};
+
/*
* Each cpu socket contains some pci devices that provide global
* information, and also some that are local to each of the two
@@ -93,6 +129,7 @@ struct skx_dev {
struct pci_dev *uracu; /* for i10nm CPU */
struct pci_dev *pcu_cr3; /* for HBM memory detection */
u32 mcroute;
+ int num_imc;
struct skx_imc {
struct mem_ctl_info *mci;
struct pci_dev *mdev; /* for i10nm CPU */
@@ -103,13 +140,25 @@ struct skx_dev {
bool hbm_mc;
u8 mc; /* system wide mc# */
u8 lmc; /* socket relative mc# */
- u8 src_id, node_id;
+ u8 src_id;
+ /*
+ * Some server BIOS may hide certain memory controllers, and the
+ * EDAC driver skips those hidden memory controllers. However, the
+ * ADXL still decodes memory error address using physical memory
+ * controller indices. The mapping table is used to convert the
+ * physical indices (reported by ADXL) to the logical indices
+ * (used the EDAC driver) of present memory controllers during the
+ * error handling process.
+ */
+ u8 mc_mapping;
struct skx_channel {
struct pci_dev *cdev;
struct pci_dev *edev;
- u32 retry_rd_err_log_s;
- u32 retry_rd_err_log_d;
- u32 retry_rd_err_log_d2;
+ /*
+ * Two groups of RRL control registers per channel to save default RRL
+ * settings of two {sub-,pseudo-}channels in Linux RRL control mode.
+ */
+ u32 rrl_ctl[2][NUM_RRL_SET];
struct skx_dimm {
u8 close_pg;
u8 bank_xor_enable;
@@ -118,7 +167,7 @@ struct skx_dev {
u8 colbits;
} dimms[NUM_DIMMS];
} chan[NUM_CHANNELS];
- } imc[NUM_IMC];
+ } imc[];
};
struct skx_pvt {
@@ -146,6 +195,13 @@ enum {
INDEX_MAX
};
+enum error_source {
+ ERR_SRC_1LM,
+ ERR_SRC_2LM_NM,
+ ERR_SRC_2LM_FM,
+ ERR_SRC_NOT_MEMORY,
+};
+
#define BIT_NM_MEMCTRL BIT_ULL(INDEX_NM_MEMCTRL)
#define BIT_NM_CHANNEL BIT_ULL(INDEX_NM_CHANNEL)
#define BIT_NM_DIMM BIT_ULL(INDEX_NM_DIMM)
@@ -215,14 +271,10 @@ struct res_config {
/* HBM mdev device BDF */
struct pci_bdf hbm_mdev_bdf;
int sad_all_offset;
- /* Offsets of retry_rd_err_log registers */
- u32 *offsets_scrub;
- u32 *offsets_scrub_hbm0;
- u32 *offsets_scrub_hbm1;
- u32 *offsets_demand;
- u32 *offsets_demand2;
- u32 *offsets_demand_hbm0;
- u32 *offsets_demand_hbm1;
+ /* RRL register sets per DDR channel */
+ struct reg_rrl *reg_rrl_ddr;
+ /* RRL register sets per HBM channel */
+ struct reg_rrl *reg_rrl_hbm[2];
};
typedef int (*get_dimm_config_f)(struct mem_ctl_info *mci,
@@ -234,9 +286,10 @@ int skx_adxl_get(void);
void skx_adxl_put(void);
void skx_set_decode(skx_decode_f decode, skx_show_retry_log_f show_retry_log);
void skx_set_mem_cfg(bool mem_cfg_2lm);
+void skx_set_res_cfg(struct res_config *cfg);
+void skx_set_mc_mapping(struct skx_dev *d, u8 pmc, u8 lmc);
int skx_get_src_id(struct skx_dev *d, int off, u8 *id);
-int skx_get_node_id(struct skx_dev *d, u8 *id);
int skx_get_all_bus_mappings(struct res_config *cfg, struct list_head **list);
diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c
index d7416166fd8a..51143b3257de 100644
--- a/drivers/edac/synopsys_edac.c
+++ b/drivers/edac/synopsys_edac.c
@@ -332,20 +332,26 @@ struct synps_edac_priv {
#endif
};
+enum synps_platform_type {
+ ZYNQ,
+ ZYNQMP,
+ SYNPS,
+};
+
/**
* struct synps_platform_data - synps platform data structure.
+ * @platform: Identifies the target hardware platform
* @get_error_info: Get EDAC error info.
* @get_mtype: Get mtype.
* @get_dtype: Get dtype.
- * @get_ecc_state: Get ECC state.
* @get_mem_info: Get EDAC memory info
* @quirks: To differentiate IPs.
*/
struct synps_platform_data {
+ enum synps_platform_type platform;
int (*get_error_info)(struct synps_edac_priv *priv);
enum mem_type (*get_mtype)(const void __iomem *base);
enum dev_type (*get_dtype)(const void __iomem *base);
- bool (*get_ecc_state)(void __iomem *base);
#ifdef CONFIG_EDAC_DEBUG
u64 (*get_mem_info)(struct synps_edac_priv *priv);
#endif
@@ -720,51 +726,38 @@ static enum dev_type zynqmp_get_dtype(const void __iomem *base)
return dt;
}
-/**
- * zynq_get_ecc_state - Return the controller ECC enable/disable status.
- * @base: DDR memory controller base address.
- *
- * Get the ECC enable/disable status of the controller.
- *
- * Return: true if enabled, otherwise false.
- */
-static bool zynq_get_ecc_state(void __iomem *base)
+static bool get_ecc_state(struct synps_edac_priv *priv)
{
+ u32 ecctype, clearval;
enum dev_type dt;
- u32 ecctype;
-
- dt = zynq_get_dtype(base);
- if (dt == DEV_UNKNOWN)
- return false;
- ecctype = readl(base + SCRUB_OFST) & SCRUB_MODE_MASK;
- if ((ecctype == SCRUB_MODE_SECDED) && (dt == DEV_X2))
- return true;
-
- return false;
-}
-
-/**
- * zynqmp_get_ecc_state - Return the controller ECC enable/disable status.
- * @base: DDR memory controller base address.
- *
- * Get the ECC enable/disable status for the controller.
- *
- * Return: a ECC status boolean i.e true/false - enabled/disabled.
- */
-static bool zynqmp_get_ecc_state(void __iomem *base)
-{
- enum dev_type dt;
- u32 ecctype;
-
- dt = zynqmp_get_dtype(base);
- if (dt == DEV_UNKNOWN)
- return false;
-
- ecctype = readl(base + ECC_CFG0_OFST) & SCRUB_MODE_MASK;
- if ((ecctype == SCRUB_MODE_SECDED) &&
- ((dt == DEV_X2) || (dt == DEV_X4) || (dt == DEV_X8)))
- return true;
+ if (priv->p_data->platform == ZYNQ) {
+ dt = zynq_get_dtype(priv->baseaddr);
+ if (dt == DEV_UNKNOWN)
+ return false;
+
+ ecctype = readl(priv->baseaddr + SCRUB_OFST) & SCRUB_MODE_MASK;
+ if (ecctype == SCRUB_MODE_SECDED && dt == DEV_X2) {
+ clearval = ECC_CTRL_CLR_CE_ERR | ECC_CTRL_CLR_UE_ERR;
+ writel(clearval, priv->baseaddr + ECC_CTRL_OFST);
+ writel(0x0, priv->baseaddr + ECC_CTRL_OFST);
+ return true;
+ }
+ } else {
+ dt = zynqmp_get_dtype(priv->baseaddr);
+ if (dt == DEV_UNKNOWN)
+ return false;
+
+ ecctype = readl(priv->baseaddr + ECC_CFG0_OFST) & SCRUB_MODE_MASK;
+ if (ecctype == SCRUB_MODE_SECDED &&
+ (dt == DEV_X2 || dt == DEV_X4 || dt == DEV_X8)) {
+ clearval = readl(priv->baseaddr + ECC_CLR_OFST) |
+ ECC_CTRL_CLR_CE_ERR | ECC_CTRL_CLR_CE_ERRCNT |
+ ECC_CTRL_CLR_UE_ERR | ECC_CTRL_CLR_UE_ERRCNT;
+ writel(clearval, priv->baseaddr + ECC_CLR_OFST);
+ return true;
+ }
+ }
return false;
}
@@ -934,18 +927,18 @@ static int setup_irq(struct mem_ctl_info *mci,
}
static const struct synps_platform_data zynq_edac_def = {
+ .platform = ZYNQ,
.get_error_info = zynq_get_error_info,
.get_mtype = zynq_get_mtype,
.get_dtype = zynq_get_dtype,
- .get_ecc_state = zynq_get_ecc_state,
.quirks = 0,
};
static const struct synps_platform_data zynqmp_edac_def = {
+ .platform = ZYNQMP,
.get_error_info = zynqmp_get_error_info,
.get_mtype = zynqmp_get_mtype,
.get_dtype = zynqmp_get_dtype,
- .get_ecc_state = zynqmp_get_ecc_state,
#ifdef CONFIG_EDAC_DEBUG
.get_mem_info = zynqmp_get_mem_info,
#endif
@@ -957,10 +950,10 @@ static const struct synps_platform_data zynqmp_edac_def = {
};
static const struct synps_platform_data synopsys_edac_def = {
+ .platform = SYNPS,
.get_error_info = zynqmp_get_error_info,
.get_mtype = zynqmp_get_mtype,
.get_dtype = zynqmp_get_dtype,
- .get_ecc_state = zynqmp_get_ecc_state,
.quirks = (DDR_ECC_INTR_SUPPORT | DDR_ECC_INTR_SELF_CLEAR
#ifdef CONFIG_EDAC_DEBUG
| DDR_ECC_DATA_POISON_SUPPORT
@@ -1390,10 +1383,6 @@ static int mc_probe(struct platform_device *pdev)
if (!p_data)
return -ENODEV;
- if (!p_data->get_ecc_state(baseaddr)) {
- edac_printk(KERN_INFO, EDAC_MC, "ECC not enabled\n");
- return -ENXIO;
- }
layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
layers[0].size = SYNPS_EDAC_NR_CSROWS;
@@ -1413,6 +1402,12 @@ static int mc_probe(struct platform_device *pdev)
priv = mci->pvt_info;
priv->baseaddr = baseaddr;
priv->p_data = p_data;
+ if (!get_ecc_state(priv)) {
+ edac_printk(KERN_INFO, EDAC_MC, "ECC not enabled\n");
+ rc = -ENODEV;
+ goto free_edac_mc;
+ }
+
spin_lock_init(&priv->reglock);
mc_init(mci, pdev);
@@ -1488,7 +1483,7 @@ static struct platform_driver synps_edac_mc_driver = {
.of_match_table = synps_edac_match,
},
.probe = mc_probe,
- .remove_new = mc_remove,
+ .remove = mc_remove,
};
module_platform_driver(synps_edac_mc_driver);
diff --git a/drivers/edac/ti_edac.c b/drivers/edac/ti_edac.c
index 29723c9592f7..39cc2ef9cac4 100644
--- a/drivers/edac/ti_edac.c
+++ b/drivers/edac/ti_edac.c
@@ -322,7 +322,7 @@ static void ti_edac_remove(struct platform_device *pdev)
static struct platform_driver ti_edac_driver = {
.probe = ti_edac_probe,
- .remove_new = ti_edac_remove,
+ .remove = ti_edac_remove,
.driver = {
.name = EDAC_MOD_NAME,
.of_match_table = ti_edac_of_match,
diff --git a/drivers/edac/versal_edac.c b/drivers/edac/versal_edac.c
index a556d23e8261..5a43b5d43ca2 100644
--- a/drivers/edac/versal_edac.c
+++ b/drivers/edac/versal_edac.c
@@ -1186,7 +1186,7 @@ static struct platform_driver xilinx_ddr_edac_mc_driver = {
.of_match_table = xlnx_edac_match,
},
.probe = mc_probe,
- .remove_new = mc_remove,
+ .remove = mc_remove,
};
module_platform_driver(xilinx_ddr_edac_mc_driver);
diff --git a/drivers/edac/versalnet_edac.c b/drivers/edac/versalnet_edac.c
new file mode 100644
index 000000000000..7c5db8bf0595
--- /dev/null
+++ b/drivers/edac/versalnet_edac.c
@@ -0,0 +1,960 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AMD Versal NET memory controller driver
+ * Copyright (C) 2025 Advanced Micro Devices, Inc.
+ */
+
+#include <linux/cdx/edac_cdx_pcol.h>
+#include <linux/edac.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/ras.h>
+#include <linux/remoteproc.h>
+#include <linux/rpmsg.h>
+#include <linux/sizes.h>
+#include <ras/ras_event.h>
+
+#include "edac_module.h"
+
+/* Granularity of reported error in bytes */
+#define MC5_ERR_GRAIN 1
+#define MC_GET_DDR_CONFIG_IN_LEN 4
+
+#define MC5_IRQ_CE_MASK GENMASK(18, 15)
+#define MC5_IRQ_UE_MASK GENMASK(14, 11)
+
+#define MC5_RANK_1_MASK GENMASK(11, 6)
+#define MASK_24 GENMASK(29, 24)
+#define MASK_0 GENMASK(5, 0)
+
+#define MC5_LRANK_1_MASK GENMASK(11, 6)
+#define MC5_LRANK_2_MASK GENMASK(17, 12)
+#define MC5_BANK1_MASK GENMASK(11, 6)
+#define MC5_GRP_0_MASK GENMASK(17, 12)
+#define MC5_GRP_1_MASK GENMASK(23, 18)
+
+#define MC5_REGHI_ROW 7
+#define MC5_EACHBIT 1
+#define MC5_ERR_TYPE_CE 0
+#define MC5_ERR_TYPE_UE 1
+#define MC5_HIGH_MEM_EN BIT(20)
+#define MC5_MEM_MASK GENMASK(19, 0)
+#define MC5_X16_BASE 256
+#define MC5_X16_ECC 32
+#define MC5_X16_SIZE (MC5_X16_BASE + MC5_X16_ECC)
+#define MC5_X32_SIZE 576
+#define MC5_HIMEM_BASE (256 * SZ_1M)
+#define MC5_ILC_HIMEM_EN BIT(28)
+#define MC5_ILC_MEM GENMASK(27, 0)
+#define MC5_INTERLEAVE_SEL GENMASK(3, 0)
+#define MC5_BUS_WIDTH_MASK GENMASK(19, 18)
+#define MC5_NUM_CHANS_MASK BIT(17)
+#define MC5_RANK_MASK GENMASK(15, 14)
+
+#define ERROR_LEVEL 2
+#define ERROR_ID 3
+#define TOTAL_ERR_LENGTH 5
+#define MSG_ERR_OFFSET 8
+#define MSG_ERR_LENGTH 9
+#define ERROR_DATA 10
+#define MCDI_RESPONSE 0xFF
+
+#define REG_MAX 152
+#define ADEC_MAX 152
+#define NUM_CONTROLLERS 8
+#define REGS_PER_CONTROLLER 19
+#define ADEC_NUM 19
+#define BUFFER_SZ 80
+
+#define XDDR5_BUS_WIDTH_64 0
+#define XDDR5_BUS_WIDTH_32 1
+#define XDDR5_BUS_WIDTH_16 2
+
+/**
+ * struct ecc_error_info - ECC error log information.
+ * @burstpos: Burst position.
+ * @lrank: Logical Rank number.
+ * @rank: Rank number.
+ * @group: Group number.
+ * @bank: Bank number.
+ * @col: Column number.
+ * @row: Row number.
+ * @rowhi: Row number higher bits.
+ * @i: Combined ECC error vector containing encoded values of burst position,
+ * rank, bank, column, and row information.
+ */
+union ecc_error_info {
+ struct {
+ u32 burstpos:3;
+ u32 lrank:4;
+ u32 rank:2;
+ u32 group:3;
+ u32 bank:2;
+ u32 col:11;
+ u32 row:7;
+ u32 rowhi;
+ };
+ u64 i;
+} __packed;
+
+/* Row and column bit positions in the address decoder (ADEC) registers. */
+union row_col_mapping {
+ struct {
+ u32 row0:6;
+ u32 row1:6;
+ u32 row2:6;
+ u32 row3:6;
+ u32 row4:6;
+ u32 reserved:2;
+ };
+ struct {
+ u32 col1:6;
+ u32 col2:6;
+ u32 col3:6;
+ u32 col4:6;
+ u32 col5:6;
+ u32 reservedcol:2;
+ };
+ u32 i;
+} __packed;
+
+/**
+ * struct ecc_status - ECC status information to report.
+ * @ceinfo: Correctable errors.
+ * @ueinfo: Uncorrected errors.
+ * @channel: Channel number.
+ * @error_type: Error type.
+ */
+struct ecc_status {
+ union ecc_error_info ceinfo[2];
+ union ecc_error_info ueinfo[2];
+ u8 channel;
+ u8 error_type;
+};
+
+/**
+ * struct mc_priv - DDR memory controller private instance data.
+ * @message: Buffer for framing the event specific info.
+ * @stat: ECC status information.
+ * @error_id: The error id.
+ * @error_level: The error level.
+ * @dwidth: Width of data bus excluding ECC bits.
+ * @part_len: The support of the message received.
+ * @regs: The registers sent on the rpmsg.
+ * @adec: Address decode registers.
+ * @mci: Memory controller interface.
+ * @ept: rpmsg endpoint.
+ * @mcdi: The mcdi handle.
+ */
+struct mc_priv {
+ char message[256];
+ struct ecc_status stat;
+ u32 error_id;
+ u32 error_level;
+ u32 dwidth;
+ u32 part_len;
+ u32 regs[REG_MAX];
+ u32 adec[ADEC_MAX];
+ struct mem_ctl_info *mci[NUM_CONTROLLERS];
+ struct rpmsg_endpoint *ept;
+ struct cdx_mcdi *mcdi;
+};
+
+/*
+ * Address decoder (ADEC) registers to match the order in which the register
+ * information is received from the firmware.
+ */
+enum adec_info {
+ CONF = 0,
+ ADEC0,
+ ADEC1,
+ ADEC2,
+ ADEC3,
+ ADEC4,
+ ADEC5,
+ ADEC6,
+ ADEC7,
+ ADEC8,
+ ADEC9,
+ ADEC10,
+ ADEC11,
+ ADEC12,
+ ADEC13,
+ ADEC14,
+ ADEC15,
+ ADEC16,
+ ADECILC,
+};
+
+enum reg_info {
+ ISR = 0,
+ IMR,
+ ECCR0_ERR_STATUS,
+ ECCR0_ADDR_LO,
+ ECCR0_ADDR_HI,
+ ECCR0_DATA_LO,
+ ECCR0_DATA_HI,
+ ECCR0_PAR,
+ ECCR1_ERR_STATUS,
+ ECCR1_ADDR_LO,
+ ECCR1_ADDR_HI,
+ ECCR1_DATA_LO,
+ ECCR1_DATA_HI,
+ ECCR1_PAR,
+ XMPU_ERR,
+ XMPU_ERR_ADDR_L0,
+ XMPU_ERR_ADDR_HI,
+ XMPU_ERR_AXI_ID,
+ ADEC_CHK_ERR_LOG,
+};
+
+static bool get_ddr_info(u32 *error_data, struct mc_priv *priv)
+{
+ u32 reglo, reghi, parity, eccr0_val, eccr1_val, isr;
+ struct ecc_status *p;
+
+ isr = error_data[ISR];
+
+ if (!(isr & (MC5_IRQ_UE_MASK | MC5_IRQ_CE_MASK)))
+ return false;
+
+ eccr0_val = error_data[ECCR0_ERR_STATUS];
+ eccr1_val = error_data[ECCR1_ERR_STATUS];
+
+ if (!eccr0_val && !eccr1_val)
+ return false;
+
+ p = &priv->stat;
+
+ if (!eccr0_val)
+ p->channel = 1;
+ else
+ p->channel = 0;
+
+ reglo = error_data[ECCR0_ADDR_LO];
+ reghi = error_data[ECCR0_ADDR_HI];
+ if (isr & MC5_IRQ_CE_MASK)
+ p->ceinfo[0].i = reglo | (u64)reghi << 32;
+ else if (isr & MC5_IRQ_UE_MASK)
+ p->ueinfo[0].i = reglo | (u64)reghi << 32;
+
+ parity = error_data[ECCR0_PAR];
+ edac_dbg(2, "ERR DATA: 0x%08X%08X PARITY: 0x%08X\n",
+ reghi, reglo, parity);
+
+ reglo = error_data[ECCR1_ADDR_LO];
+ reghi = error_data[ECCR1_ADDR_HI];
+ if (isr & MC5_IRQ_CE_MASK)
+ p->ceinfo[1].i = reglo | (u64)reghi << 32;
+ else if (isr & MC5_IRQ_UE_MASK)
+ p->ueinfo[1].i = reglo | (u64)reghi << 32;
+
+ parity = error_data[ECCR1_PAR];
+ edac_dbg(2, "ERR DATA: 0x%08X%08X PARITY: 0x%08X\n",
+ reghi, reglo, parity);
+
+ return true;
+}
+
+/**
+ * convert_to_physical - Convert @error_data to a physical address.
+ * @priv: DDR memory controller private instance data.
+ * @pinf: ECC error info structure.
+ * @controller: Controller number of the MC5
+ * @error_data: the DDRMC5 ADEC address decoder register data
+ *
+ * Return: physical address of the DDR memory.
+ */
+static unsigned long convert_to_physical(struct mc_priv *priv,
+ union ecc_error_info pinf,
+ int controller, int *error_data)
+{
+ u32 row, blk, rsh_req_addr, interleave, ilc_base_ctrl_add, ilc_himem_en, reg, offset;
+ u64 high_mem_base, high_mem_offset, low_mem_offset, ilcmem_base;
+ unsigned long err_addr = 0, addr;
+ union row_col_mapping cols;
+ union row_col_mapping rows;
+ u32 col_bit_0;
+
+ row = pinf.rowhi << MC5_REGHI_ROW | pinf.row;
+ offset = controller * ADEC_NUM;
+
+ reg = error_data[ADEC6];
+ rows.i = reg;
+ err_addr |= (row & BIT(0)) << rows.row0;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row1;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row2;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row3;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row4;
+ row >>= MC5_EACHBIT;
+
+ reg = error_data[ADEC7];
+ rows.i = reg;
+ err_addr |= (row & BIT(0)) << rows.row0;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row1;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row2;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row3;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row4;
+ row >>= MC5_EACHBIT;
+
+ reg = error_data[ADEC8];
+ rows.i = reg;
+ err_addr |= (row & BIT(0)) << rows.row0;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row1;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row2;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row3;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row4;
+
+ reg = error_data[ADEC9];
+ rows.i = reg;
+
+ err_addr |= (row & BIT(0)) << rows.row0;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row1;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row2;
+ row >>= MC5_EACHBIT;
+
+ col_bit_0 = FIELD_GET(MASK_24, error_data[ADEC9]);
+ pinf.col >>= 1;
+ err_addr |= (pinf.col & 1) << col_bit_0;
+
+ cols.i = error_data[ADEC10];
+ err_addr |= (pinf.col & 1) << cols.col1;
+ pinf.col >>= 1;
+ err_addr |= (pinf.col & 1) << cols.col2;
+ pinf.col >>= 1;
+ err_addr |= (pinf.col & 1) << cols.col3;
+ pinf.col >>= 1;
+ err_addr |= (pinf.col & 1) << cols.col4;
+ pinf.col >>= 1;
+ err_addr |= (pinf.col & 1) << cols.col5;
+ pinf.col >>= 1;
+
+ cols.i = error_data[ADEC11];
+ err_addr |= (pinf.col & 1) << cols.col1;
+ pinf.col >>= 1;
+ err_addr |= (pinf.col & 1) << cols.col2;
+ pinf.col >>= 1;
+ err_addr |= (pinf.col & 1) << cols.col3;
+ pinf.col >>= 1;
+ err_addr |= (pinf.col & 1) << cols.col4;
+ pinf.col >>= 1;
+ err_addr |= (pinf.col & 1) << cols.col5;
+ pinf.col >>= 1;
+
+ reg = error_data[ADEC12];
+ err_addr |= (pinf.bank & BIT(0)) << (reg & MASK_0);
+ pinf.bank >>= MC5_EACHBIT;
+ err_addr |= (pinf.bank & BIT(0)) << FIELD_GET(MC5_BANK1_MASK, reg);
+ pinf.bank >>= MC5_EACHBIT;
+
+ err_addr |= (pinf.bank & BIT(0)) << FIELD_GET(MC5_GRP_0_MASK, reg);
+ pinf.group >>= MC5_EACHBIT;
+ err_addr |= (pinf.bank & BIT(0)) << FIELD_GET(MC5_GRP_1_MASK, reg);
+ pinf.group >>= MC5_EACHBIT;
+ err_addr |= (pinf.bank & BIT(0)) << FIELD_GET(MASK_24, reg);
+ pinf.group >>= MC5_EACHBIT;
+
+ reg = error_data[ADEC4];
+ err_addr |= (pinf.rank & BIT(0)) << (reg & MASK_0);
+ pinf.rank >>= MC5_EACHBIT;
+ err_addr |= (pinf.rank & BIT(0)) << FIELD_GET(MC5_RANK_1_MASK, reg);
+ pinf.rank >>= MC5_EACHBIT;
+
+ reg = error_data[ADEC5];
+ err_addr |= (pinf.lrank & BIT(0)) << (reg & MASK_0);
+ pinf.lrank >>= MC5_EACHBIT;
+ err_addr |= (pinf.lrank & BIT(0)) << FIELD_GET(MC5_LRANK_1_MASK, reg);
+ pinf.lrank >>= MC5_EACHBIT;
+ err_addr |= (pinf.lrank & BIT(0)) << FIELD_GET(MC5_LRANK_2_MASK, reg);
+ pinf.lrank >>= MC5_EACHBIT;
+ err_addr |= (pinf.lrank & BIT(0)) << FIELD_GET(MASK_24, reg);
+ pinf.lrank >>= MC5_EACHBIT;
+
+ high_mem_base = (priv->adec[ADEC2 + offset] & MC5_MEM_MASK) * MC5_HIMEM_BASE;
+ interleave = priv->adec[ADEC13 + offset] & MC5_INTERLEAVE_SEL;
+
+ high_mem_offset = priv->adec[ADEC3 + offset] & MC5_MEM_MASK;
+ low_mem_offset = priv->adec[ADEC1 + offset] & MC5_MEM_MASK;
+ reg = priv->adec[ADEC14 + offset];
+ ilc_himem_en = !!(reg & MC5_ILC_HIMEM_EN);
+ ilcmem_base = (reg & MC5_ILC_MEM) * SZ_1M;
+ if (ilc_himem_en)
+ ilc_base_ctrl_add = ilcmem_base - high_mem_offset;
+ else
+ ilc_base_ctrl_add = ilcmem_base - low_mem_offset;
+
+ if (priv->dwidth == DEV_X16) {
+ blk = err_addr / MC5_X16_SIZE;
+ rsh_req_addr = (blk << 8) + ilc_base_ctrl_add;
+ err_addr = rsh_req_addr * interleave * 2;
+ } else {
+ blk = err_addr / MC5_X32_SIZE;
+ rsh_req_addr = (blk << 9) + ilc_base_ctrl_add;
+ err_addr = rsh_req_addr * interleave * 2;
+ }
+
+ if ((priv->adec[ADEC2 + offset] & MC5_HIGH_MEM_EN) && err_addr >= high_mem_base)
+ addr = err_addr - high_mem_offset;
+ else
+ addr = err_addr - low_mem_offset;
+
+ return addr;
+}
+
+/**
+ * handle_error - Handle errors.
+ * @priv: DDR memory controller private instance data.
+ * @stat: ECC status structure.
+ * @ctl_num: Controller number of the MC5
+ * @error_data: the MC5 ADEC address decoder register data
+ *
+ * Handles ECC correctable and uncorrectable errors.
+ */
+static void handle_error(struct mc_priv *priv, struct ecc_status *stat,
+ int ctl_num, int *error_data)
+{
+ union ecc_error_info pinf;
+ struct mem_ctl_info *mci;
+ unsigned long pa;
+ phys_addr_t pfn;
+ int err;
+
+ if (WARN_ON_ONCE(ctl_num > NUM_CONTROLLERS))
+ return;
+
+ mci = priv->mci[ctl_num];
+
+ if (stat->error_type == MC5_ERR_TYPE_CE) {
+ pinf = stat->ceinfo[stat->channel];
+ snprintf(priv->message, sizeof(priv->message),
+ "Error type:%s Controller %d Addr at %lx\n",
+ "CE", ctl_num, convert_to_physical(priv, pinf, ctl_num, error_data));
+
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ 1, 0, 0, 0, 0, 0, -1,
+ priv->message, "");
+ }
+
+ if (stat->error_type == MC5_ERR_TYPE_UE) {
+ pinf = stat->ueinfo[stat->channel];
+ snprintf(priv->message, sizeof(priv->message),
+ "Error type:%s controller %d Addr at %lx\n",
+ "UE", ctl_num, convert_to_physical(priv, pinf, ctl_num, error_data));
+
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ 1, 0, 0, 0, 0, 0, -1,
+ priv->message, "");
+ pa = convert_to_physical(priv, pinf, ctl_num, error_data);
+ pfn = PHYS_PFN(pa);
+
+ if (IS_ENABLED(CONFIG_MEMORY_FAILURE)) {
+ err = memory_failure(pfn, MF_ACTION_REQUIRED);
+ if (err)
+ edac_dbg(2, "memory_failure() error: %d", err);
+ else
+ edac_dbg(2, "Poison page at PA 0x%lx\n", pa);
+ }
+ }
+}
+
+static void mc_init(struct mem_ctl_info *mci, struct device *dev)
+{
+ struct mc_priv *priv = mci->pvt_info;
+ struct csrow_info *csi;
+ struct dimm_info *dimm;
+ u32 row;
+ int ch;
+
+ /* Initialize controller capabilities and configuration */
+ mci->mtype_cap = MEM_FLAG_DDR5;
+ mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
+ mci->scrub_cap = SCRUB_HW_SRC;
+ mci->scrub_mode = SCRUB_NONE;
+
+ mci->edac_cap = EDAC_FLAG_SECDED;
+ mci->ctl_name = "VersalNET DDR5";
+ mci->dev_name = dev_name(dev);
+ mci->mod_name = "versalnet_edac";
+
+ edac_op_state = EDAC_OPSTATE_INT;
+
+ for (row = 0; row < mci->nr_csrows; row++) {
+ csi = mci->csrows[row];
+ for (ch = 0; ch < csi->nr_channels; ch++) {
+ dimm = csi->channels[ch]->dimm;
+ dimm->edac_mode = EDAC_SECDED;
+ dimm->mtype = MEM_DDR5;
+ dimm->grain = MC5_ERR_GRAIN;
+ dimm->dtype = priv->dwidth;
+ }
+ }
+}
+
+#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
+
+static unsigned int mcdi_rpc_timeout(struct cdx_mcdi *cdx, unsigned int cmd)
+{
+ return MCDI_RPC_TIMEOUT;
+}
+
+static void mcdi_request(struct cdx_mcdi *cdx,
+ const struct cdx_dword *hdr, size_t hdr_len,
+ const struct cdx_dword *sdu, size_t sdu_len)
+{
+ void *send_buf;
+ int ret;
+
+ send_buf = kzalloc(hdr_len + sdu_len, GFP_KERNEL);
+ if (!send_buf)
+ return;
+
+ memcpy(send_buf, hdr, hdr_len);
+ memcpy(send_buf + hdr_len, sdu, sdu_len);
+
+ ret = rpmsg_send(cdx->ept, send_buf, hdr_len + sdu_len);
+ if (ret)
+ dev_err(&cdx->rpdev->dev, "Failed to send rpmsg data: %d\n", ret);
+
+ kfree(send_buf);
+}
+
+static const struct cdx_mcdi_ops mcdi_ops = {
+ .mcdi_rpc_timeout = mcdi_rpc_timeout,
+ .mcdi_request = mcdi_request,
+};
+
+static void get_ddr_config(u32 index, u32 *buffer, struct cdx_mcdi *amd_mcdi)
+{
+ size_t outlen;
+ int ret;
+
+ MCDI_DECLARE_BUF(inbuf, MC_GET_DDR_CONFIG_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, BUFFER_SZ);
+
+ MCDI_SET_DWORD(inbuf, EDAC_GET_DDR_CONFIG_IN_CONTROLLER_INDEX, index);
+
+ ret = cdx_mcdi_rpc(amd_mcdi, MC_CMD_EDAC_GET_DDR_CONFIG, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (!ret)
+ memcpy(buffer, MCDI_PTR(outbuf, GET_DDR_CONFIG),
+ (ADEC_NUM * 4));
+}
+
+static int setup_mcdi(struct mc_priv *mc_priv)
+{
+ struct cdx_mcdi *amd_mcdi;
+ int ret, i;
+
+ amd_mcdi = kzalloc(sizeof(*amd_mcdi), GFP_KERNEL);
+ if (!amd_mcdi)
+ return -ENOMEM;
+
+ amd_mcdi->mcdi_ops = &mcdi_ops;
+ ret = cdx_mcdi_init(amd_mcdi);
+ if (ret) {
+ kfree(amd_mcdi);
+ return ret;
+ }
+
+ amd_mcdi->ept = mc_priv->ept;
+ mc_priv->mcdi = amd_mcdi;
+
+ for (i = 0; i < NUM_CONTROLLERS; i++)
+ get_ddr_config(i, &mc_priv->adec[ADEC_NUM * i], amd_mcdi);
+
+ return 0;
+}
+
+static const guid_t amd_versalnet_guid = GUID_INIT(0x82678888, 0xa556, 0x44f2,
+ 0xb8, 0xb4, 0x45, 0x56, 0x2e,
+ 0x8c, 0x5b, 0xec);
+
+static int rpmsg_cb(struct rpmsg_device *rpdev, void *data,
+ int len, void *priv, u32 src)
+{
+ struct mc_priv *mc_priv = dev_get_drvdata(&rpdev->dev);
+ const guid_t *sec_type = &guid_null;
+ u32 length, offset, error_id;
+ u32 *result = (u32 *)data;
+ struct ecc_status *p;
+ int i, j, k, sec_sev;
+ const char *err_str;
+ u32 *adec_data;
+
+ if (*(u8 *)data == MCDI_RESPONSE) {
+ cdx_mcdi_process_cmd(mc_priv->mcdi, (struct cdx_dword *)data, len);
+ return 0;
+ }
+
+ sec_sev = result[ERROR_LEVEL];
+ error_id = result[ERROR_ID];
+ length = result[MSG_ERR_LENGTH];
+ offset = result[MSG_ERR_OFFSET];
+
+ if (result[TOTAL_ERR_LENGTH] > length) {
+ if (!mc_priv->part_len)
+ mc_priv->part_len = length;
+ else
+ mc_priv->part_len += length;
+ /*
+ * The data can come in 2 stretches. Construct the regs from 2
+ * messages the offset indicates the offset from which the data is to
+ * be taken
+ */
+ for (i = 0 ; i < length; i++) {
+ k = offset + i;
+ j = ERROR_DATA + i;
+ mc_priv->regs[k] = result[j];
+ }
+ if (mc_priv->part_len < result[TOTAL_ERR_LENGTH])
+ return 0;
+ mc_priv->part_len = 0;
+ }
+
+ mc_priv->error_id = error_id;
+ mc_priv->error_level = result[ERROR_LEVEL];
+
+ switch (error_id) {
+ case 5: err_str = "General Software Non-Correctable error"; break;
+ case 6: err_str = "CFU error"; break;
+ case 7: err_str = "CFRAME error"; break;
+ case 10: err_str = "DDRMC Microblaze Correctable ECC error"; break;
+ case 11: err_str = "DDRMC Microblaze Non-Correctable ECC error"; break;
+ case 15: err_str = "MMCM error"; break;
+ case 16: err_str = "HNICX Correctable error"; break;
+ case 17: err_str = "HNICX Non-Correctable error"; break;
+
+ case 18:
+ p = &mc_priv->stat;
+ memset(p, 0, sizeof(struct ecc_status));
+ p->error_type = MC5_ERR_TYPE_CE;
+ for (i = 0 ; i < NUM_CONTROLLERS; i++) {
+ if (get_ddr_info(&mc_priv->regs[i * REGS_PER_CONTROLLER], mc_priv)) {
+ adec_data = mc_priv->adec + ADEC_NUM * i;
+ handle_error(mc_priv, &mc_priv->stat, i, adec_data);
+ }
+ }
+ return 0;
+ case 19:
+ p = &mc_priv->stat;
+ memset(p, 0, sizeof(struct ecc_status));
+ p->error_type = MC5_ERR_TYPE_UE;
+ for (i = 0 ; i < NUM_CONTROLLERS; i++) {
+ if (get_ddr_info(&mc_priv->regs[i * REGS_PER_CONTROLLER], mc_priv)) {
+ adec_data = mc_priv->adec + ADEC_NUM * i;
+ handle_error(mc_priv, &mc_priv->stat, i, adec_data);
+ }
+ }
+ return 0;
+
+ case 21: err_str = "GT Non-Correctable error"; break;
+ case 22: err_str = "PL Sysmon Correctable error"; break;
+ case 23: err_str = "PL Sysmon Non-Correctable error"; break;
+ case 111: err_str = "LPX unexpected dfx activation error"; break;
+ case 114: err_str = "INT_LPD Non-Correctable error"; break;
+ case 116: err_str = "INT_OCM Non-Correctable error"; break;
+ case 117: err_str = "INT_FPD Correctable error"; break;
+ case 118: err_str = "INT_FPD Non-Correctable error"; break;
+ case 120: err_str = "INT_IOU Non-Correctable error"; break;
+ case 123: err_str = "err_int_irq from APU GIC Distributor"; break;
+ case 124: err_str = "fault_int_irq from APU GIC Distribute"; break;
+ case 132 ... 139: err_str = "FPX SPLITTER error"; break;
+ case 140: err_str = "APU Cluster 0 error"; break;
+ case 141: err_str = "APU Cluster 1 error"; break;
+ case 142: err_str = "APU Cluster 2 error"; break;
+ case 143: err_str = "APU Cluster 3 error"; break;
+ case 145: err_str = "WWDT1 LPX error"; break;
+ case 147: err_str = "IPI error"; break;
+ case 152 ... 153: err_str = "AFIFS error"; break;
+ case 154 ... 155: err_str = "LPX glitch error"; break;
+ case 185 ... 186: err_str = "FPX AFIFS error"; break;
+ case 195 ... 199: err_str = "AFIFM error"; break;
+ case 108: err_str = "PSM Correctable error"; break;
+ case 59: err_str = "PMC correctable error"; break;
+ case 60: err_str = "PMC Un correctable error"; break;
+ case 43 ... 47: err_str = "PMC Sysmon error"; break;
+ case 163 ... 184: err_str = "RPU error"; break;
+ case 148: err_str = "OCM0 correctable error"; break;
+ case 149: err_str = "OCM1 correctable error"; break;
+ case 150: err_str = "OCM0 Un-correctable error"; break;
+ case 151: err_str = "OCM1 Un-correctable error"; break;
+ case 189: err_str = "PSX_CMN_3 PD block consolidated error"; break;
+ case 191: err_str = "FPD_INT_WRAP PD block consolidated error"; break;
+ case 232: err_str = "CRAM Un-Correctable error"; break;
+ default: err_str = "VERSAL_EDAC_ERR_ID: %d"; break;
+ }
+
+ snprintf(mc_priv->message,
+ sizeof(mc_priv->message),
+ "[VERSAL_EDAC_ERR_ID: %d] Error type: %s", error_id, err_str);
+
+ /* Convert to bytes */
+ length = result[TOTAL_ERR_LENGTH] * 4;
+ log_non_standard_event(sec_type, &amd_versalnet_guid, mc_priv->message,
+ sec_sev, (void *)&result[ERROR_DATA], length);
+
+ return 0;
+}
+
+static struct rpmsg_device_id amd_rpmsg_id_table[] = {
+ { .name = "error_ipc" },
+ { },
+};
+MODULE_DEVICE_TABLE(rpmsg, amd_rpmsg_id_table);
+
+static int rpmsg_probe(struct rpmsg_device *rpdev)
+{
+ struct rpmsg_channel_info chinfo;
+ struct mc_priv *pg;
+
+ pg = (struct mc_priv *)amd_rpmsg_id_table[0].driver_data;
+ chinfo.src = RPMSG_ADDR_ANY;
+ chinfo.dst = rpdev->dst;
+ strscpy(chinfo.name, amd_rpmsg_id_table[0].name,
+ strlen(amd_rpmsg_id_table[0].name));
+
+ pg->ept = rpmsg_create_ept(rpdev, rpmsg_cb, NULL, chinfo);
+ if (!pg->ept)
+ return dev_err_probe(&rpdev->dev, -ENXIO, "Failed to create ept for channel %s\n",
+ chinfo.name);
+
+ dev_set_drvdata(&rpdev->dev, pg);
+
+ return 0;
+}
+
+static void rpmsg_remove(struct rpmsg_device *rpdev)
+{
+ struct mc_priv *mc_priv = dev_get_drvdata(&rpdev->dev);
+
+ rpmsg_destroy_ept(mc_priv->ept);
+ dev_set_drvdata(&rpdev->dev, NULL);
+}
+
+static struct rpmsg_driver amd_rpmsg_driver = {
+ .drv.name = KBUILD_MODNAME,
+ .probe = rpmsg_probe,
+ .remove = rpmsg_remove,
+ .callback = rpmsg_cb,
+ .id_table = amd_rpmsg_id_table,
+};
+
+static void versal_edac_release(struct device *dev)
+{
+ kfree(dev);
+}
+
+static int init_versalnet(struct mc_priv *priv, struct platform_device *pdev)
+{
+ u32 num_chans, rank, dwidth, config;
+ struct edac_mc_layer layers[2];
+ struct mem_ctl_info *mci;
+ struct device *dev;
+ enum dev_type dt;
+ char *name;
+ int rc, i;
+
+ for (i = 0; i < NUM_CONTROLLERS; i++) {
+ config = priv->adec[CONF + i * ADEC_NUM];
+ num_chans = FIELD_GET(MC5_NUM_CHANS_MASK, config);
+ rank = 1 << FIELD_GET(MC5_RANK_MASK, config);
+ dwidth = FIELD_GET(MC5_BUS_WIDTH_MASK, config);
+
+ switch (dwidth) {
+ case XDDR5_BUS_WIDTH_16:
+ dt = DEV_X16;
+ break;
+ case XDDR5_BUS_WIDTH_32:
+ dt = DEV_X32;
+ break;
+ case XDDR5_BUS_WIDTH_64:
+ dt = DEV_X64;
+ break;
+ default:
+ dt = DEV_UNKNOWN;
+ }
+
+ if (dt == DEV_UNKNOWN)
+ continue;
+
+ /* Find the first enabled device and register that one. */
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = rank;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = num_chans;
+ layers[1].is_virt_csrow = false;
+
+ rc = -ENOMEM;
+ mci = edac_mc_alloc(i, ARRAY_SIZE(layers), layers,
+ sizeof(struct mc_priv));
+ if (!mci) {
+ edac_printk(KERN_ERR, EDAC_MC, "Failed memory allocation for MC%d\n", i);
+ goto err_alloc;
+ }
+
+ priv->mci[i] = mci;
+ priv->dwidth = dt;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ dev->release = versal_edac_release;
+ name = kmalloc(32, GFP_KERNEL);
+ sprintf(name, "versal-net-ddrmc5-edac-%d", i);
+ dev->init_name = name;
+ rc = device_register(dev);
+ if (rc)
+ goto err_alloc;
+
+ mci->pdev = dev;
+
+ platform_set_drvdata(pdev, priv);
+
+ mc_init(mci, dev);
+ rc = edac_mc_add_mc(mci);
+ if (rc) {
+ edac_printk(KERN_ERR, EDAC_MC, "Failed to register MC%d with EDAC core\n", i);
+ goto err_alloc;
+ }
+ }
+ return 0;
+
+err_alloc:
+ while (i--) {
+ mci = priv->mci[i];
+ if (!mci)
+ continue;
+
+ if (mci->pdev) {
+ device_unregister(mci->pdev);
+ edac_mc_del_mc(mci->pdev);
+ }
+
+ edac_mc_free(mci);
+ }
+
+ return rc;
+}
+
+static void remove_versalnet(struct mc_priv *priv)
+{
+ struct mem_ctl_info *mci;
+ int i;
+
+ for (i = 0; i < NUM_CONTROLLERS; i++) {
+ device_unregister(priv->mci[i]->pdev);
+ mci = edac_mc_del_mc(priv->mci[i]->pdev);
+ if (!mci)
+ return;
+
+ edac_mc_free(mci);
+ }
+}
+
+static int mc_probe(struct platform_device *pdev)
+{
+ struct device_node *r5_core_node;
+ struct mc_priv *priv;
+ struct rproc *rp;
+ int rc;
+
+ r5_core_node = of_parse_phandle(pdev->dev.of_node, "amd,rproc", 0);
+ if (!r5_core_node) {
+ dev_err(&pdev->dev, "amd,rproc: invalid phandle\n");
+ return -EINVAL;
+ }
+
+ rp = rproc_get_by_phandle(r5_core_node->phandle);
+ if (!rp)
+ return -EPROBE_DEFER;
+
+ rc = rproc_boot(rp);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed to attach to remote processor\n");
+ goto err_rproc_boot;
+ }
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ rc = -ENOMEM;
+ goto err_alloc;
+ }
+
+ amd_rpmsg_id_table[0].driver_data = (kernel_ulong_t)priv;
+
+ rc = register_rpmsg_driver(&amd_rpmsg_driver);
+ if (rc) {
+ edac_printk(KERN_ERR, EDAC_MC, "Failed to register RPMsg driver: %d\n", rc);
+ goto err_alloc;
+ }
+
+ rc = setup_mcdi(priv);
+ if (rc)
+ goto err_unreg;
+
+ priv->mcdi->r5_rproc = rp;
+
+ rc = init_versalnet(priv, pdev);
+ if (rc)
+ goto err_init;
+
+ return 0;
+
+err_init:
+ cdx_mcdi_finish(priv->mcdi);
+
+err_unreg:
+ unregister_rpmsg_driver(&amd_rpmsg_driver);
+
+err_alloc:
+ rproc_shutdown(rp);
+
+err_rproc_boot:
+ rproc_put(rp);
+
+ return rc;
+}
+
+static void mc_remove(struct platform_device *pdev)
+{
+ struct mc_priv *priv = platform_get_drvdata(pdev);
+
+ unregister_rpmsg_driver(&amd_rpmsg_driver);
+ remove_versalnet(priv);
+ rproc_shutdown(priv->mcdi->r5_rproc);
+ cdx_mcdi_finish(priv->mcdi);
+}
+
+static const struct of_device_id amd_edac_match[] = {
+ { .compatible = "xlnx,versal-net-ddrmc5", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, amd_edac_match);
+
+static struct platform_driver amd_ddr_edac_mc_driver = {
+ .driver = {
+ .name = "versal-net-edac",
+ .of_match_table = amd_edac_match,
+ },
+ .probe = mc_probe,
+ .remove = mc_remove,
+};
+
+module_platform_driver(amd_ddr_edac_mc_driver);
+
+MODULE_AUTHOR("AMD Inc");
+MODULE_DESCRIPTION("Versal NET EDAC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/edac/xgene_edac.c b/drivers/edac/xgene_edac.c
index fd87f1b2c145..9955396c9a52 100644
--- a/drivers/edac/xgene_edac.c
+++ b/drivers/edac/xgene_edac.c
@@ -15,6 +15,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/regmap.h>
+#include <linux/string_choices.h>
#include "edac_module.h"
@@ -1407,7 +1408,7 @@ static void xgene_edac_iob_gic_report(struct edac_device_ctl_info *edac_dev)
dev_err(edac_dev->dev, "Multiple XGIC write size error\n");
info = readl(ctx->dev_csr + XGICTRANSERRREQINFO);
dev_err(edac_dev->dev, "XGIC %s access @ 0x%08X (0x%08X)\n",
- info & REQTYPE_MASK ? "read" : "write", ERRADDR_RD(info),
+ str_read_write(info & REQTYPE_MASK), ERRADDR_RD(info),
info);
writel(reg, ctx->dev_csr + XGICTRANSERRINTSTS);
@@ -1489,19 +1490,19 @@ static void xgene_edac_rb_report(struct edac_device_ctl_info *edac_dev)
if (reg & AGENT_OFFLINE_ERR_MASK)
dev_err(edac_dev->dev,
"IOB bus %s access to offline agent error\n",
- write ? "write" : "read");
+ str_write_read(write));
if (reg & UNIMPL_RBPAGE_ERR_MASK)
dev_err(edac_dev->dev,
"IOB bus %s access to unimplemented page error\n",
- write ? "write" : "read");
+ str_write_read(write));
if (reg & WORD_ALIGNED_ERR_MASK)
dev_err(edac_dev->dev,
"IOB bus %s word aligned access error\n",
- write ? "write" : "read");
+ str_write_read(write));
if (reg & PAGE_ACCESS_ERR_MASK)
dev_err(edac_dev->dev,
"IOB bus %s to page out of range access error\n",
- write ? "write" : "read");
+ str_write_read(write));
if (regmap_write(ctx->edac->rb_map, RBEIR, 0))
return;
if (regmap_write(ctx->edac->rb_map, RBCSR, 0))
@@ -1560,7 +1561,7 @@ rb_skip:
err_addr_lo = readl(ctx->dev_csr + IOBBATRANSERRREQINFOL);
err_addr_hi = readl(ctx->dev_csr + IOBBATRANSERRREQINFOH);
dev_err(edac_dev->dev, "IOB BA %s access at 0x%02X.%08X (0x%08X)\n",
- REQTYPE_F2_RD(err_addr_hi) ? "read" : "write",
+ str_read_write(REQTYPE_F2_RD(err_addr_hi)),
ERRADDRH_F2_RD(err_addr_hi), err_addr_lo, err_addr_hi);
if (reg & WRERR_RESP_MASK)
dev_err(edac_dev->dev, "IOB BA requestor ID 0x%08X\n",
@@ -1611,7 +1612,7 @@ chk_iob_axi0:
dev_err(edac_dev->dev,
"%sAXI slave 0 illegal %s access @ 0x%02X.%08X (0x%08X)\n",
reg & IOBAXIS0_M_ILLEGAL_ACCESS_MASK ? "Multiple " : "",
- REQTYPE_RD(err_addr_hi) ? "read" : "write",
+ str_read_write(REQTYPE_RD(err_addr_hi)),
ERRADDRH_RD(err_addr_hi), err_addr_lo, err_addr_hi);
writel(reg, ctx->dev_csr + IOBAXIS0TRANSERRINTSTS);
@@ -1625,7 +1626,7 @@ chk_iob_axi1:
dev_err(edac_dev->dev,
"%sAXI slave 1 illegal %s access @ 0x%02X.%08X (0x%08X)\n",
reg & IOBAXIS0_M_ILLEGAL_ACCESS_MASK ? "Multiple " : "",
- REQTYPE_RD(err_addr_hi) ? "read" : "write",
+ str_read_write(REQTYPE_RD(err_addr_hi)),
ERRADDRH_RD(err_addr_hi), err_addr_lo, err_addr_hi);
writel(reg, ctx->dev_csr + IOBAXIS1TRANSERRINTSTS);
}
@@ -1989,7 +1990,7 @@ MODULE_DEVICE_TABLE(of, xgene_edac_of_match);
static struct platform_driver xgene_edac_driver = {
.probe = xgene_edac_probe,
- .remove_new = xgene_edac_remove,
+ .remove = xgene_edac_remove,
.driver = {
.name = "xgene-edac",
.of_match_table = xgene_edac_of_match,
diff --git a/drivers/edac/zynqmp_edac.c b/drivers/edac/zynqmp_edac.c
index c9dc78d8c824..cdffc9e4194d 100644
--- a/drivers/edac/zynqmp_edac.c
+++ b/drivers/edac/zynqmp_edac.c
@@ -455,7 +455,7 @@ static struct platform_driver zynqmp_ocm_edac_driver = {
.of_match_table = zynqmp_ocm_edac_match,
},
.probe = edac_probe,
- .remove_new = edac_remove,
+ .remove = edac_remove,
};
module_platform_driver(zynqmp_ocm_edac_driver);
diff --git a/drivers/eisa/Makefile b/drivers/eisa/Makefile
index a1dd0eaec2d4..552bd9478340 100644
--- a/drivers/eisa/Makefile
+++ b/drivers/eisa/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
# Makefile for the Linux device tree
+always-$(CONFIG_EISA) += devlist.h
obj-$(CONFIG_EISA) += eisa-bus.o
obj-${CONFIG_EISA_PCI_EISA} += pci_eisa.o
@@ -9,14 +10,11 @@ obj-${CONFIG_EISA_PCI_EISA} += pci_eisa.o
obj-${CONFIG_EISA_VIRTUAL_ROOT} += virtual_root.o
-# Ugly hack to get DEVICE_NAME_SIZE value...
-DEVICE_NAME_SIZE = 50
-
$(obj)/eisa-bus.o: $(obj)/devlist.h
quiet_cmd_eisaid = GEN $@
- cmd_eisaid = sed -e '/^\#/D' -e 's/^\([[:alnum:]]\{7\}\) \+"\([^"]\{1,$(DEVICE_NAME_SIZE)\}\).*"/EISA_DEVINFO ("\1", "\2"),/' $< > $@
+ cmd_eisaid = sed -e '/^\#/D' -e 's/^\([[:alnum:]]\{7\}\) \+"\([^"]*\)"/EISA_DEVINFO ("\1", "\2"),/' $< > $@
clean-files := devlist.h
-$(obj)/devlist.h: $(src)/eisa.ids include/linux/device.h
- $(call cmd,eisaid)
+$(obj)/devlist.h: $(src)/eisa.ids include/linux/device.h FORCE
+ $(call if_changed,eisaid)
diff --git a/drivers/eisa/eisa-bus.c b/drivers/eisa/eisa-bus.c
index cb586a362944..edceea083b98 100644
--- a/drivers/eisa/eisa-bus.c
+++ b/drivers/eisa/eisa-bus.c
@@ -21,7 +21,7 @@
struct eisa_device_info {
struct eisa_device_id id;
- char name[50];
+ char name[EISA_DEVICE_INFO_NAME_SIZE];
};
#ifdef CONFIG_EISA_NAMES
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
index a6f6d467aacf..aec46bf03302 100644
--- a/drivers/extcon/Kconfig
+++ b/drivers/extcon/Kconfig
@@ -134,6 +134,19 @@ config EXTCON_MAX8997
Maxim MAX8997 PMIC. The MAX8997 MUIC is a USB port accessory
detector and switch.
+config EXTCON_MAX14526
+ tristate "Maxim MAX14526 EXTCON Support"
+ depends on I2C
+ select IRQ_DOMAIN
+ select REGMAP_I2C
+ help
+ If you say yes here you get support for the Maxim MAX14526
+ MUIC device. The MAX14526 MUIC is a USB port accessory
+ detector and switch. The MAX14526 is designed to simplify
+ interface requirements on portable devices by multiplexing
+ common inputs (USB, UART, Microphone, Stereo Audio and
+ Composite Video) on a single micro/mini USB connector.
+
config EXTCON_PALMAS
tristate "Palmas USB EXTCON support"
depends on MFD_PALMAS
diff --git a/drivers/extcon/Makefile b/drivers/extcon/Makefile
index 0d6d23faf748..6482f2bfd661 100644
--- a/drivers/extcon/Makefile
+++ b/drivers/extcon/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_EXTCON_MAX3355) += extcon-max3355.o
obj-$(CONFIG_EXTCON_MAX77693) += extcon-max77693.o
obj-$(CONFIG_EXTCON_MAX77843) += extcon-max77843.o
obj-$(CONFIG_EXTCON_MAX8997) += extcon-max8997.o
+obj-$(CONFIG_EXTCON_MAX14526) += extcon-max14526.o
obj-$(CONFIG_EXTCON_PALMAS) += extcon-palmas.o
obj-$(CONFIG_EXTCON_PTN5150) += extcon-ptn5150.o
obj-$(CONFIG_EXTCON_QCOM_SPMI_MISC) += extcon-qcom-spmi-misc.o
diff --git a/drivers/extcon/extcon-adc-jack.c b/drivers/extcon/extcon-adc-jack.c
index 125016da7fde..7e3c9f38297b 100644
--- a/drivers/extcon/extcon-adc-jack.c
+++ b/drivers/extcon/extcon-adc-jack.c
@@ -164,6 +164,8 @@ static void adc_jack_remove(struct platform_device *pdev)
{
struct adc_jack_data *data = platform_get_drvdata(pdev);
+ if (data->wakeup_source)
+ device_init_wakeup(&pdev->dev, false);
free_irq(data->irq, data);
cancel_work_sync(&data->handler.work);
}
@@ -196,7 +198,7 @@ static SIMPLE_DEV_PM_OPS(adc_jack_pm_ops,
static struct platform_driver adc_jack_driver = {
.probe = adc_jack_probe,
- .remove_new = adc_jack_remove,
+ .remove = adc_jack_remove,
.driver = {
.name = "adc-jack",
.pm = &adc_jack_pm_ops,
diff --git a/drivers/extcon/extcon-axp288.c b/drivers/extcon/extcon-axp288.c
index d3bcbe839c09..19856dddade6 100644
--- a/drivers/extcon/extcon-axp288.c
+++ b/drivers/extcon/extcon-axp288.c
@@ -470,7 +470,7 @@ static int axp288_extcon_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- device_init_wakeup(dev, true);
+ devm_device_init_wakeup(dev);
platform_set_drvdata(pdev, info);
return 0;
diff --git a/drivers/extcon/extcon-fsa9480.c b/drivers/extcon/extcon-fsa9480.c
index e458ce0c45ab..a031eb0914a0 100644
--- a/drivers/extcon/extcon-fsa9480.c
+++ b/drivers/extcon/extcon-fsa9480.c
@@ -317,7 +317,7 @@ static int fsa9480_probe(struct i2c_client *client)
return ret;
}
- device_init_wakeup(info->dev, true);
+ devm_device_init_wakeup(info->dev);
fsa9480_detect_dev(info);
return 0;
@@ -350,7 +350,7 @@ static const struct dev_pm_ops fsa9480_pm_ops = {
};
static const struct i2c_device_id fsa9480_id[] = {
- { "fsa9480", 0 },
+ { "fsa9480" },
{}
};
MODULE_DEVICE_TABLE(i2c, fsa9480_id);
diff --git a/drivers/extcon/extcon-intel-cht-wc.c b/drivers/extcon/extcon-intel-cht-wc.c
index 93552dc3c895..8131a3d7d562 100644
--- a/drivers/extcon/extcon-intel-cht-wc.c
+++ b/drivers/extcon/extcon-intel-cht-wc.c
@@ -627,7 +627,7 @@ MODULE_DEVICE_TABLE(platform, cht_wc_extcon_table);
static struct platform_driver cht_wc_extcon_driver = {
.probe = cht_wc_extcon_probe,
- .remove_new = cht_wc_extcon_remove,
+ .remove = cht_wc_extcon_remove,
.id_table = cht_wc_extcon_table,
.driver = {
.name = "cht_wcove_pwrsrc",
diff --git a/drivers/extcon/extcon-intel-mrfld.c b/drivers/extcon/extcon-intel-mrfld.c
index a1f737f13d49..9219f4328d70 100644
--- a/drivers/extcon/extcon-intel-mrfld.c
+++ b/drivers/extcon/extcon-intel-mrfld.c
@@ -275,7 +275,7 @@ static struct platform_driver mrfld_extcon_driver = {
.name = "mrfld_bcove_pwrsrc",
},
.probe = mrfld_extcon_probe,
- .remove_new = mrfld_extcon_remove,
+ .remove = mrfld_extcon_remove,
.id_table = mrfld_extcon_id_table,
};
module_platform_driver(mrfld_extcon_driver);
diff --git a/drivers/extcon/extcon-max14526.c b/drivers/extcon/extcon-max14526.c
new file mode 100644
index 000000000000..3750a5c20612
--- /dev/null
+++ b/drivers/extcon/extcon-max14526.c
@@ -0,0 +1,302 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/device.h>
+#include <linux/devm-helpers.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/extcon-provider.h>
+#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/regmap.h>
+
+/* I2C addresses of MUIC internal registers */
+#define MAX14526_DEVICE_ID 0x00
+#define MAX14526_ID 0x02
+
+/* CONTROL_1 register masks */
+#define MAX14526_CONTROL_1 0x01
+#define ID_2P2 BIT(6)
+#define ID_620 BIT(5)
+#define ID_200 BIT(4)
+#define VLDO BIT(3)
+#define SEMREN BIT(2)
+#define ADC_EN BIT(1)
+#define CP_EN BIT(0)
+
+/* CONTROL_2 register masks */
+#define MAX14526_CONTROL_2 0x02
+#define INTPOL BIT(7)
+#define INT_EN BIT(6)
+#define MIC_LP BIT(5)
+#define CP_AUD BIT(4)
+#define CHG_TYPE BIT(1)
+#define USB_DET_DIS BIT(0)
+
+/* SW_CONTROL register masks */
+#define MAX14526_SW_CONTROL 0x03
+#define SW_DATA 0x00
+#define SW_UART 0x01
+#define SW_AUDIO 0x02
+#define SW_OPEN 0x07
+
+/* INT_STATUS register masks */
+#define MAX14526_INT_STAT 0x04
+#define CHGDET BIT(7)
+#define MR_COMP BIT(6)
+#define SENDEND BIT(5)
+#define V_VBUS BIT(4)
+
+/* STATUS register masks */
+#define MAX14526_STATUS 0x05
+#define CPORT BIT(7)
+#define CHPORT BIT(6)
+#define C1COMP BIT(0)
+
+enum max14526_idno_resistance {
+ MAX14526_GND,
+ MAX14526_24KOHM,
+ MAX14526_56KOHM,
+ MAX14526_100KOHM,
+ MAX14526_130KOHM,
+ MAX14526_180KOHM,
+ MAX14526_240KOHM,
+ MAX14526_330KOHM,
+ MAX14526_430KOHM,
+ MAX14526_620KOHM,
+ MAX14526_910KOHM,
+ MAX14526_OPEN
+};
+
+enum max14526_field_idx {
+ VENDOR_ID, CHIP_REV, /* DEVID */
+ DM, DP, /* SW_CONTROL */
+ MAX14526_N_REGMAP_FIELDS
+};
+
+static const struct reg_field max14526_reg_field[MAX14526_N_REGMAP_FIELDS] = {
+ [VENDOR_ID] = REG_FIELD(MAX14526_DEVICE_ID, 4, 7),
+ [CHIP_REV] = REG_FIELD(MAX14526_DEVICE_ID, 0, 3),
+ [DM] = REG_FIELD(MAX14526_SW_CONTROL, 0, 2),
+ [DP] = REG_FIELD(MAX14526_SW_CONTROL, 3, 5),
+};
+
+struct max14526_data {
+ struct i2c_client *client;
+ struct extcon_dev *edev;
+
+ struct regmap *regmap;
+ struct regmap_field *rfield[MAX14526_N_REGMAP_FIELDS];
+
+ int last_state;
+ int cable;
+};
+
+enum max14526_muic_modes {
+ MAX14526_OTG = MAX14526_GND, /* no power */
+ MAX14526_MHL = MAX14526_56KOHM, /* no power */
+ MAX14526_OTG_Y = MAX14526_GND | V_VBUS,
+ MAX14526_MHL_CHG = MAX14526_GND | V_VBUS | CHGDET,
+ MAX14526_NONE = MAX14526_OPEN,
+ MAX14526_USB = MAX14526_OPEN | V_VBUS,
+ MAX14526_CHG = MAX14526_OPEN | V_VBUS | CHGDET,
+};
+
+static const unsigned int max14526_extcon_cable[] = {
+ EXTCON_USB,
+ EXTCON_USB_HOST,
+ EXTCON_CHG_USB_FAST,
+ EXTCON_DISP_MHL,
+ EXTCON_NONE,
+};
+
+static int max14526_ap_usb_mode(struct max14526_data *priv)
+{
+ struct device *dev = &priv->client->dev;
+ int ret;
+
+ /* Enable USB Path */
+ ret = regmap_field_write(priv->rfield[DM], SW_DATA);
+ if (ret)
+ return ret;
+
+ ret = regmap_field_write(priv->rfield[DP], SW_DATA);
+ if (ret)
+ return ret;
+
+ /* Enable 200K, Charger Pump and ADC */
+ ret = regmap_write(priv->regmap, MAX14526_CONTROL_1,
+ ID_200 | ADC_EN | CP_EN);
+ if (ret)
+ return ret;
+
+ dev_dbg(dev, "AP USB mode set\n");
+
+ return 0;
+}
+
+static irqreturn_t max14526_interrupt(int irq, void *dev_id)
+{
+ struct max14526_data *priv = dev_id;
+ struct device *dev = &priv->client->dev;
+ int state, ret;
+
+ /*
+ * Upon an MUIC IRQ (MUIC_INT_N falls), wait at least 70ms
+ * before reading INT_STAT and STATUS. After the reads,
+ * MUIC_INT_N returns to high (but the INT_STAT and STATUS
+ * contents will be held).
+ */
+ msleep(100);
+
+ ret = regmap_read(priv->regmap, MAX14526_INT_STAT, &state);
+ if (ret)
+ dev_err(dev, "failed to read MUIC state %d\n", ret);
+
+ if (state == priv->last_state)
+ return IRQ_HANDLED;
+
+ /* Detach previous device */
+ extcon_set_state_sync(priv->edev, priv->cable, false);
+
+ switch (state) {
+ case MAX14526_USB:
+ priv->cable = EXTCON_USB;
+ break;
+
+ case MAX14526_CHG:
+ priv->cable = EXTCON_CHG_USB_FAST;
+ break;
+
+ case MAX14526_OTG:
+ case MAX14526_OTG_Y:
+ priv->cable = EXTCON_USB_HOST;
+ break;
+
+ case MAX14526_MHL:
+ case MAX14526_MHL_CHG:
+ priv->cable = EXTCON_DISP_MHL;
+ break;
+
+ case MAX14526_NONE:
+ default:
+ priv->cable = EXTCON_NONE;
+ break;
+ }
+
+ extcon_set_state_sync(priv->edev, priv->cable, true);
+
+ priv->last_state = state;
+
+ return IRQ_HANDLED;
+}
+
+static const struct regmap_config max14526_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = MAX14526_STATUS,
+};
+
+static int max14526_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct max14526_data *priv;
+ int ret, dev_id, rev, i;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->client = client;
+ i2c_set_clientdata(client, priv);
+
+ priv->regmap = devm_regmap_init_i2c(client, &max14526_regmap_config);
+ if (IS_ERR(priv->regmap))
+ return dev_err_probe(dev, PTR_ERR(priv->regmap), "cannot allocate regmap\n");
+
+ for (i = 0; i < MAX14526_N_REGMAP_FIELDS; i++) {
+ priv->rfield[i] = devm_regmap_field_alloc(dev, priv->regmap,
+ max14526_reg_field[i]);
+ if (IS_ERR(priv->rfield[i]))
+ return dev_err_probe(dev, PTR_ERR(priv->rfield[i]),
+ "cannot allocate regmap field\n");
+ }
+
+ /* Detect if MUIC version is supported */
+ ret = regmap_field_read(priv->rfield[VENDOR_ID], &dev_id);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to read MUIC ID\n");
+
+ regmap_field_read(priv->rfield[CHIP_REV], &rev);
+
+ if (dev_id == MAX14526_ID)
+ dev_info(dev, "detected MAX14526 MUIC with id 0x%x, rev 0x%x\n", dev_id, rev);
+ else
+ dev_err_probe(dev, -EINVAL, "MUIC vendor id 0x%X is not recognized\n", dev_id);
+
+ priv->edev = devm_extcon_dev_allocate(dev, max14526_extcon_cable);
+ if (IS_ERR(priv->edev))
+ return dev_err_probe(dev, (IS_ERR(priv->edev)),
+ "failed to allocate extcon device\n");
+
+ ret = devm_extcon_dev_register(dev, priv->edev);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to register extcon device\n");
+
+ ret = max14526_ap_usb_mode(priv);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to set AP USB mode\n");
+
+ regmap_write_bits(priv->regmap, MAX14526_CONTROL_2, INT_EN, INT_EN);
+ regmap_write_bits(priv->regmap, MAX14526_CONTROL_2, USB_DET_DIS, (u32)~USB_DET_DIS);
+
+ ret = devm_request_threaded_irq(dev, client->irq, NULL, &max14526_interrupt,
+ IRQF_ONESHOT | IRQF_SHARED, client->name, priv);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to register IRQ\n");
+
+ irq_wake_thread(client->irq, priv);
+
+ return 0;
+}
+
+static int max14526_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct max14526_data *priv = i2c_get_clientdata(client);
+
+ irq_wake_thread(client->irq, priv);
+
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(max14526_pm_ops, NULL, max14526_resume);
+
+static const struct of_device_id max14526_match[] = {
+ { .compatible = "maxim,max14526" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, max14526_match);
+
+static const struct i2c_device_id max14526_id[] = {
+ { "max14526" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max14526_id);
+
+static struct i2c_driver max14526_driver = {
+ .driver = {
+ .name = "max14526",
+ .of_match_table = max14526_match,
+ .pm = &max14526_pm_ops,
+ },
+ .probe = max14526_probe,
+ .id_table = max14526_id,
+};
+module_i2c_driver(max14526_driver);
+
+MODULE_AUTHOR("Svyatoslav Ryhel <clamor95@gmail.com>");
+MODULE_DESCRIPTION("MAX14526 extcon driver to support MUIC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/extcon/extcon-max3355.c b/drivers/extcon/extcon-max3355.c
index e62ce7a8d131..b2ee4ff8b04d 100644
--- a/drivers/extcon/extcon-max3355.c
+++ b/drivers/extcon/extcon-max3355.c
@@ -127,7 +127,7 @@ MODULE_DEVICE_TABLE(of, max3355_match_table);
static struct platform_driver max3355_driver = {
.probe = max3355_probe,
- .remove_new = max3355_remove,
+ .remove = max3355_remove,
.driver = {
.name = "extcon-max3355",
.of_match_table = max3355_match_table,
diff --git a/drivers/extcon/extcon-max77843.c b/drivers/extcon/extcon-max77843.c
index 9849e3b8327e..2ae9f7f1a67f 100644
--- a/drivers/extcon/extcon-max77843.c
+++ b/drivers/extcon/extcon-max77843.c
@@ -956,7 +956,7 @@ static struct platform_driver max77843_muic_driver = {
.of_match_table = of_max77843_muic_dt_match,
},
.probe = max77843_muic_probe,
- .remove_new = max77843_muic_remove,
+ .remove = max77843_muic_remove,
.id_table = max77843_muic_id,
};
diff --git a/drivers/extcon/extcon-ptn5150.c b/drivers/extcon/extcon-ptn5150.c
index 4616da7e5430..78ad86c4a3be 100644
--- a/drivers/extcon/extcon-ptn5150.c
+++ b/drivers/extcon/extcon-ptn5150.c
@@ -338,7 +338,7 @@ static const struct of_device_id ptn5150_dt_match[] = {
MODULE_DEVICE_TABLE(of, ptn5150_dt_match);
static const struct i2c_device_id ptn5150_i2c_id[] = {
- { "ptn5150", 0 },
+ { "ptn5150" },
{ }
};
MODULE_DEVICE_TABLE(i2c, ptn5150_i2c_id);
diff --git a/drivers/extcon/extcon-qcom-spmi-misc.c b/drivers/extcon/extcon-qcom-spmi-misc.c
index 53de581a393a..afaba5685c3d 100644
--- a/drivers/extcon/extcon-qcom-spmi-misc.c
+++ b/drivers/extcon/extcon-qcom-spmi-misc.c
@@ -155,7 +155,7 @@ static int qcom_usb_extcon_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, info);
- device_init_wakeup(dev, 1);
+ devm_device_init_wakeup(dev);
/* Perform initial detection */
qcom_usb_extcon_detect_cable(&info->wq_detcable.work);
diff --git a/drivers/extcon/extcon-rtk-type-c.c b/drivers/extcon/extcon-rtk-type-c.c
index 19a01e663733..82b60b927e41 100644
--- a/drivers/extcon/extcon-rtk-type-c.c
+++ b/drivers/extcon/extcon-rtk-type-c.c
@@ -1369,6 +1369,8 @@ static int extcon_rtk_type_c_probe(struct platform_device *pdev)
}
type_c->type_c_cfg = devm_kzalloc(dev, sizeof(*type_c_cfg), GFP_KERNEL);
+ if (!type_c->type_c_cfg)
+ return -ENOMEM;
memcpy(type_c->type_c_cfg, type_c_cfg, sizeof(*type_c_cfg));
@@ -1778,7 +1780,7 @@ static const struct dev_pm_ops extcon_rtk_type_c_pm_ops = {
static struct platform_driver extcon_rtk_type_c_driver = {
.probe = extcon_rtk_type_c_probe,
- .remove_new = extcon_rtk_type_c_remove,
+ .remove = extcon_rtk_type_c_remove,
.driver = {
.name = "extcon-rtk-type_c",
.of_match_table = extcon_rtk_type_c_match,
diff --git a/drivers/extcon/extcon-usb-gpio.c b/drivers/extcon/extcon-usb-gpio.c
index 9b61eb99b7dc..5e8ad21ad206 100644
--- a/drivers/extcon/extcon-usb-gpio.c
+++ b/drivers/extcon/extcon-usb-gpio.c
@@ -279,7 +279,7 @@ MODULE_DEVICE_TABLE(platform, usb_extcon_platform_ids);
static struct platform_driver usb_extcon_driver = {
.probe = usb_extcon_probe,
- .remove_new = usb_extcon_remove,
+ .remove = usb_extcon_remove,
.driver = {
.name = "extcon-usb-gpio",
.pm = &usb_extcon_pm_ops,
diff --git a/drivers/extcon/extcon-usbc-cros-ec.c b/drivers/extcon/extcon-usbc-cros-ec.c
index 805a47230689..1fb627ea8b50 100644
--- a/drivers/extcon/extcon-usbc-cros-ec.c
+++ b/drivers/extcon/extcon-usbc-cros-ec.c
@@ -529,7 +529,7 @@ static struct platform_driver extcon_cros_ec_driver = {
.of_match_table = of_match_ptr(extcon_cros_ec_of_match),
.pm = DEV_PM_OPS,
},
- .remove_new = extcon_cros_ec_remove,
+ .remove = extcon_cros_ec_remove,
.probe = extcon_cros_ec_probe,
};
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig
index 905c82e26ce7..a5f5e250223a 100644
--- a/drivers/firewire/Kconfig
+++ b/drivers/firewire/Kconfig
@@ -83,7 +83,7 @@ config FIREWIRE_KUNIT_SELF_ID_SEQUENCE_HELPER_TEST
config FIREWIRE_OHCI
tristate "OHCI-1394 controllers"
- depends on PCI && FIREWIRE && MMU
+ depends on PCI && FIREWIRE
help
Enable this driver if you have a FireWire controller based
on the OHCI specification. For all practical purposes, this
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
index 01354b9de8b2..e5e0174a0335 100644
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
@@ -229,22 +229,21 @@ void fw_schedule_bus_reset(struct fw_card *card, bool delayed, bool short_reset)
/* Use an arbitrary short delay to combine multiple reset requests. */
fw_card_get(card);
- if (!queue_delayed_work(fw_workqueue, &card->br_work,
- delayed ? DIV_ROUND_UP(HZ, 100) : 0))
+ if (!queue_delayed_work(fw_workqueue, &card->br_work, delayed ? msecs_to_jiffies(10) : 0))
fw_card_put(card);
}
EXPORT_SYMBOL(fw_schedule_bus_reset);
static void br_work(struct work_struct *work)
{
- struct fw_card *card = container_of(work, struct fw_card, br_work.work);
+ struct fw_card *card = from_work(card, work, br_work.work);
/* Delay for 2s after last reset per IEEE 1394 clause 8.2.1. */
if (card->reset_jiffies != 0 &&
- time_before64(get_jiffies_64(), card->reset_jiffies + 2 * HZ)) {
+ time_is_after_jiffies64(card->reset_jiffies + secs_to_jiffies(2))) {
trace_bus_reset_postpone(card->index, card->generation, card->br_short);
- if (!queue_delayed_work(fw_workqueue, &card->br_work, 2 * HZ))
+ if (!queue_delayed_work(fw_workqueue, &card->br_work, secs_to_jiffies(2)))
fw_card_put(card);
return;
}
@@ -273,10 +272,6 @@ static void allocate_broadcast_channel(struct fw_card *card, int generation)
fw_device_set_broadcast_channel);
}
-static const char gap_count_table[] = {
- 63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40
-};
-
void fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
{
fw_card_get(card);
@@ -284,222 +279,254 @@ void fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
fw_card_put(card);
}
-static void bm_work(struct work_struct *work)
-{
- struct fw_card *card = container_of(work, struct fw_card, bm_work.work);
- struct fw_device *root_device, *irm_device;
- struct fw_node *root_node;
- int root_id, new_root_id, irm_id, bm_id, local_id;
- int gap_count, generation, grace, rcode;
- bool do_reset = false;
- bool root_device_is_running;
- bool root_device_is_cmc;
- bool irm_is_1394_1995_only;
- bool keep_this_irm;
- __be32 transaction_data[2];
+enum bm_contention_outcome {
+ // The bus management contention window is not expired.
+ BM_CONTENTION_OUTCOME_WITHIN_WINDOW = 0,
+ // The IRM node has link off.
+ BM_CONTENTION_OUTCOME_IRM_HAS_LINK_OFF,
+ // The IRM node complies IEEE 1394:1994 only.
+ BM_CONTENTION_OUTCOME_IRM_COMPLIES_1394_1995_ONLY,
+ // Another bus reset, BM work has been rescheduled.
+ BM_CONTENTION_OUTCOME_AT_NEW_GENERATION,
+ // We have been unable to send the lock request to IRM node due to some local problem.
+ BM_CONTENTION_OUTCOME_LOCAL_PROBLEM_AT_TRANSACTION,
+ // The lock request failed, maybe the IRM isn't really IRM capable after all.
+ BM_CONTENTION_OUTCOME_IRM_IS_NOT_CAPABLE_FOR_IRM,
+ // Somebody else is BM.
+ BM_CONTENTION_OUTCOME_IRM_HOLDS_ANOTHER_NODE_AS_BM,
+ // The local node succeeds after contending for bus manager.
+ BM_CONTENTION_OUTCOME_IRM_HOLDS_LOCAL_NODE_AS_BM,
+};
- spin_lock_irq(&card->lock);
+static enum bm_contention_outcome contend_for_bm(struct fw_card *card)
+__must_hold(&card->lock)
+{
+ int generation = card->generation;
+ int local_id = card->local_node->node_id;
+ __be32 data[2] = {
+ cpu_to_be32(BUS_MANAGER_ID_NOT_REGISTERED),
+ cpu_to_be32(local_id),
+ };
+ bool grace = time_is_before_jiffies64(card->reset_jiffies + msecs_to_jiffies(125));
+ bool irm_is_1394_1995_only = false;
+ bool keep_this_irm = false;
+ struct fw_node *irm_node;
+ struct fw_device *irm_device;
+ int irm_node_id;
+ int rcode;
+
+ lockdep_assert_held(&card->lock);
+
+ if (!grace) {
+ if (!is_next_generation(generation, card->bm_generation) || card->bm_abdicate)
+ return BM_CONTENTION_OUTCOME_WITHIN_WINDOW;
+ }
- if (card->local_node == NULL) {
- spin_unlock_irq(&card->lock);
- goto out_put_card;
+ irm_node = card->irm_node;
+ if (!irm_node->link_on) {
+ fw_notice(card, "IRM has link off, making local node (%02x) root\n", local_id);
+ return BM_CONTENTION_OUTCOME_IRM_HAS_LINK_OFF;
}
- generation = card->generation;
+ irm_device = fw_node_get_device(irm_node);
+ if (irm_device && irm_device->config_rom) {
+ irm_is_1394_1995_only = (irm_device->config_rom[2] & 0x000000f0) == 0;
- root_node = card->root_node;
- fw_node_get(root_node);
- root_device = root_node->data;
- root_device_is_running = root_device &&
- atomic_read(&root_device->state) == FW_DEVICE_RUNNING;
- root_device_is_cmc = root_device && root_device->cmc;
+ // Canon MV5i works unreliably if it is not root node.
+ keep_this_irm = irm_device->config_rom[3] >> 8 == CANON_OUI;
+ }
- irm_device = card->irm_node->data;
- irm_is_1394_1995_only = irm_device && irm_device->config_rom &&
- (irm_device->config_rom[2] & 0x000000f0) == 0;
+ if (irm_is_1394_1995_only && !keep_this_irm) {
+ fw_notice(card, "IRM is not 1394a compliant, making local node (%02x) root\n",
+ local_id);
+ return BM_CONTENTION_OUTCOME_IRM_COMPLIES_1394_1995_ONLY;
+ }
- /* Canon MV5i works unreliably if it is not root node. */
- keep_this_irm = irm_device && irm_device->config_rom &&
- irm_device->config_rom[3] >> 8 == CANON_OUI;
+ irm_node_id = irm_node->node_id;
- root_id = root_node->node_id;
- irm_id = card->irm_node->node_id;
- local_id = card->local_node->node_id;
+ spin_unlock_irq(&card->lock);
- grace = time_after64(get_jiffies_64(),
- card->reset_jiffies + DIV_ROUND_UP(HZ, 8));
+ rcode = fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, irm_node_id, generation,
+ SCODE_100, CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID, data,
+ sizeof(data));
- if ((is_next_generation(generation, card->bm_generation) &&
- !card->bm_abdicate) ||
- (card->bm_generation != generation && grace)) {
- /*
- * This first step is to figure out who is IRM and
- * then try to become bus manager. If the IRM is not
- * well defined (e.g. does not have an active link
- * layer or does not responds to our lock request, we
- * will have to do a little vigilante bus management.
- * In that case, we do a goto into the gap count logic
- * so that when we do the reset, we still optimize the
- * gap count. That could well save a reset in the
- * next generation.
- */
+ spin_lock_irq(&card->lock);
- if (!card->irm_node->link_on) {
- new_root_id = local_id;
- fw_notice(card, "%s, making local node (%02x) root\n",
- "IRM has link off", new_root_id);
- goto pick_me;
+ switch (rcode) {
+ case RCODE_GENERATION:
+ return BM_CONTENTION_OUTCOME_AT_NEW_GENERATION;
+ case RCODE_SEND_ERROR:
+ return BM_CONTENTION_OUTCOME_LOCAL_PROBLEM_AT_TRANSACTION;
+ case RCODE_COMPLETE:
+ {
+ int bm_id = be32_to_cpu(data[0]);
+
+ // Used by cdev layer for "struct fw_cdev_event_bus_reset".
+ if (bm_id != BUS_MANAGER_ID_NOT_REGISTERED)
+ card->bm_node_id = 0xffc0 & bm_id;
+ else
+ card->bm_node_id = local_id;
+
+ if (bm_id != BUS_MANAGER_ID_NOT_REGISTERED)
+ return BM_CONTENTION_OUTCOME_IRM_HOLDS_ANOTHER_NODE_AS_BM;
+ else
+ return BM_CONTENTION_OUTCOME_IRM_HOLDS_LOCAL_NODE_AS_BM;
+ }
+ default:
+ if (!keep_this_irm) {
+ fw_notice(card, "BM lock failed (%s), making local node (%02x) root\n",
+ fw_rcode_string(rcode), local_id);
+ return BM_CONTENTION_OUTCOME_IRM_COMPLIES_1394_1995_ONLY;
+ } else {
+ return BM_CONTENTION_OUTCOME_IRM_IS_NOT_CAPABLE_FOR_IRM;
}
+ }
+}
- if (irm_is_1394_1995_only && !keep_this_irm) {
- new_root_id = local_id;
- fw_notice(card, "%s, making local node (%02x) root\n",
- "IRM is not 1394a compliant", new_root_id);
- goto pick_me;
- }
+DEFINE_FREE(node_unref, struct fw_node *, if (_T) fw_node_put(_T))
+DEFINE_FREE(card_unref, struct fw_card *, if (_T) fw_card_put(_T))
- transaction_data[0] = cpu_to_be32(0x3f);
- transaction_data[1] = cpu_to_be32(local_id);
+static void bm_work(struct work_struct *work)
+{
+ static const char gap_count_table[] = {
+ 63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40
+ };
+ struct fw_card *card __free(card_unref) = from_work(card, work, bm_work.work);
+ struct fw_node *root_node __free(node_unref) = NULL;
+ int root_id, new_root_id, irm_id, local_id;
+ int expected_gap_count, generation;
+ bool stand_for_root = false;
+
+ spin_lock_irq(&card->lock);
+ if (card->local_node == NULL) {
spin_unlock_irq(&card->lock);
+ return;
+ }
- rcode = fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
- irm_id, generation, SCODE_100,
- CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID,
- transaction_data, 8);
+ generation = card->generation;
- if (rcode == RCODE_GENERATION)
- /* Another bus reset, BM work has been rescheduled. */
- goto out;
+ root_node = fw_node_get(card->root_node);
- bm_id = be32_to_cpu(transaction_data[0]);
+ root_id = root_node->node_id;
+ irm_id = card->irm_node->node_id;
+ local_id = card->local_node->node_id;
- scoped_guard(spinlock_irq, &card->lock) {
- if (rcode == RCODE_COMPLETE && generation == card->generation)
- card->bm_node_id =
- bm_id == 0x3f ? local_id : 0xffc0 | bm_id;
- }
+ if (card->bm_generation != generation) {
+ enum bm_contention_outcome result = contend_for_bm(card);
- if (rcode == RCODE_COMPLETE && bm_id != 0x3f) {
- /* Somebody else is BM. Only act as IRM. */
- if (local_id == irm_id)
+ switch (result) {
+ case BM_CONTENTION_OUTCOME_WITHIN_WINDOW:
+ spin_unlock_irq(&card->lock);
+ fw_schedule_bm_work(card, msecs_to_jiffies(125));
+ return;
+ case BM_CONTENTION_OUTCOME_IRM_HAS_LINK_OFF:
+ stand_for_root = true;
+ break;
+ case BM_CONTENTION_OUTCOME_IRM_COMPLIES_1394_1995_ONLY:
+ stand_for_root = true;
+ break;
+ case BM_CONTENTION_OUTCOME_AT_NEW_GENERATION:
+ // BM work has been rescheduled.
+ spin_unlock_irq(&card->lock);
+ return;
+ case BM_CONTENTION_OUTCOME_LOCAL_PROBLEM_AT_TRANSACTION:
+ // Let's try again later and hope that the local problem has gone away by
+ // then.
+ spin_unlock_irq(&card->lock);
+ fw_schedule_bm_work(card, msecs_to_jiffies(125));
+ return;
+ case BM_CONTENTION_OUTCOME_IRM_IS_NOT_CAPABLE_FOR_IRM:
+ // Let's do a bus reset and pick the local node as root, and thus, IRM.
+ stand_for_root = true;
+ break;
+ case BM_CONTENTION_OUTCOME_IRM_HOLDS_ANOTHER_NODE_AS_BM:
+ if (local_id == irm_id) {
+ // Only acts as IRM.
+ spin_unlock_irq(&card->lock);
allocate_broadcast_channel(card, generation);
-
- goto out;
+ spin_lock_irq(&card->lock);
+ }
+ fallthrough;
+ case BM_CONTENTION_OUTCOME_IRM_HOLDS_LOCAL_NODE_AS_BM:
+ default:
+ card->bm_generation = generation;
+ break;
}
+ }
- if (rcode == RCODE_SEND_ERROR) {
- /*
- * We have been unable to send the lock request due to
- * some local problem. Let's try again later and hope
- * that the problem has gone away by then.
- */
- fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8));
- goto out;
- }
-
- spin_lock_irq(&card->lock);
-
- if (rcode != RCODE_COMPLETE && !keep_this_irm) {
- /*
- * The lock request failed, maybe the IRM
- * isn't really IRM capable after all. Let's
- * do a bus reset and pick the local node as
- * root, and thus, IRM.
- */
- new_root_id = local_id;
- fw_notice(card, "BM lock failed (%s), making local node (%02x) root\n",
- fw_rcode_string(rcode), new_root_id);
- goto pick_me;
+ // We're bus manager for this generation, so next step is to make sure we have an active
+ // cycle master and do gap count optimization.
+ if (!stand_for_root) {
+ if (card->gap_count == GAP_COUNT_MISMATCHED) {
+ // If self IDs have inconsistent gap counts, do a
+ // bus reset ASAP. The config rom read might never
+ // complete, so don't wait for it. However, still
+ // send a PHY configuration packet prior to the
+ // bus reset. The PHY configuration packet might
+ // fail, but 1394-2008 8.4.5.2 explicitly permits
+ // it in this case, so it should be safe to try.
+ stand_for_root = true;
+
+ // We must always send a bus reset if the gap count
+ // is inconsistent, so bypass the 5-reset limit.
+ card->bm_retries = 0;
+ } else {
+ // Now investigate root node.
+ struct fw_device *root_device = fw_node_get_device(root_node);
+
+ if (root_device == NULL) {
+ // Either link_on is false, or we failed to read the
+ // config rom. In either case, pick another root.
+ stand_for_root = true;
+ } else {
+ bool root_device_is_running =
+ atomic_read(&root_device->state) == FW_DEVICE_RUNNING;
+
+ if (!root_device_is_running) {
+ // If we haven't probed this device yet, bail out now
+ // and let's try again once that's done.
+ spin_unlock_irq(&card->lock);
+ return;
+ } else if (!root_device->cmc) {
+ // Current root has an active link layer and we
+ // successfully read the config rom, but it's not
+ // cycle master capable.
+ stand_for_root = true;
+ }
+ }
}
- } else if (card->bm_generation != generation) {
- /*
- * We weren't BM in the last generation, and the last
- * bus reset is less than 125ms ago. Reschedule this job.
- */
- spin_unlock_irq(&card->lock);
- fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8));
- goto out;
}
- /*
- * We're bus manager for this generation, so next step is to
- * make sure we have an active cycle master and do gap count
- * optimization.
- */
- card->bm_generation = generation;
-
- if (card->gap_count == 0) {
- /*
- * If self IDs have inconsistent gap counts, do a
- * bus reset ASAP. The config rom read might never
- * complete, so don't wait for it. However, still
- * send a PHY configuration packet prior to the
- * bus reset. The PHY configuration packet might
- * fail, but 1394-2008 8.4.5.2 explicitly permits
- * it in this case, so it should be safe to try.
- */
+ if (stand_for_root) {
new_root_id = local_id;
- /*
- * We must always send a bus reset if the gap count
- * is inconsistent, so bypass the 5-reset limit.
- */
- card->bm_retries = 0;
- } else if (root_device == NULL) {
- /*
- * Either link_on is false, or we failed to read the
- * config rom. In either case, pick another root.
- */
- new_root_id = local_id;
- } else if (!root_device_is_running) {
- /*
- * If we haven't probed this device yet, bail out now
- * and let's try again once that's done.
- */
- spin_unlock_irq(&card->lock);
- goto out;
- } else if (root_device_is_cmc) {
- /*
- * We will send out a force root packet for this
- * node as part of the gap count optimization.
- */
- new_root_id = root_id;
} else {
- /*
- * Current root has an active link layer and we
- * successfully read the config rom, but it's not
- * cycle master capable.
- */
- new_root_id = local_id;
+ // We will send out a force root packet for this node as part of the gap count
+ // optimization on behalf of the node.
+ new_root_id = root_id;
}
- pick_me:
/*
* Pick a gap count from 1394a table E-1. The table doesn't cover
* the typically much larger 1394b beta repeater delays though.
*/
if (!card->beta_repeaters_present &&
root_node->max_hops < ARRAY_SIZE(gap_count_table))
- gap_count = gap_count_table[root_node->max_hops];
+ expected_gap_count = gap_count_table[root_node->max_hops];
else
- gap_count = 63;
+ expected_gap_count = 63;
- /*
- * Finally, figure out if we should do a reset or not. If we have
- * done less than 5 resets with the same physical topology and we
- * have either a new root or a new gap count setting, let's do it.
- */
-
- if (card->bm_retries++ < 5 &&
- (card->gap_count != gap_count || new_root_id != root_id))
- do_reset = true;
+ // Finally, figure out if we should do a reset or not. If we have done less than 5 resets
+ // with the same physical topology and we have either a new root or a new gap count
+ // setting, let's do it.
+ if (card->bm_retries++ < 5 && (card->gap_count != expected_gap_count || new_root_id != root_id)) {
+ int card_gap_count = card->gap_count;
- spin_unlock_irq(&card->lock);
+ spin_unlock_irq(&card->lock);
- if (do_reset) {
fw_notice(card, "phy config: new root=%x, gap_count=%d\n",
- new_root_id, gap_count);
- fw_send_phy_config(card, new_root_id, generation, gap_count);
+ new_root_id, expected_gap_count);
+ fw_send_phy_config(card, new_root_id, generation, expected_gap_count);
/*
* Where possible, use a short bus reset to minimize
* disruption to isochronous transfers. But in the event
@@ -512,31 +539,27 @@ static void bm_work(struct work_struct *work)
* may treat it as two, causing a gap count inconsistency
* again. Using a long bus reset prevents this.
*/
- reset_bus(card, card->gap_count != 0);
+ reset_bus(card, card_gap_count != 0);
/* Will allocate broadcast channel after the reset. */
- goto out;
- }
+ } else {
+ struct fw_device *root_device = fw_node_get_device(root_node);
- if (root_device_is_cmc) {
- /*
- * Make sure that the cycle master sends cycle start packets.
- */
- transaction_data[0] = cpu_to_be32(CSR_STATE_BIT_CMSTR);
- rcode = fw_run_transaction(card, TCODE_WRITE_QUADLET_REQUEST,
- root_id, generation, SCODE_100,
- CSR_REGISTER_BASE + CSR_STATE_SET,
- transaction_data, 4);
- if (rcode == RCODE_GENERATION)
- goto out;
- }
+ spin_unlock_irq(&card->lock);
- if (local_id == irm_id)
- allocate_broadcast_channel(card, generation);
+ if (root_device && root_device->cmc) {
+ // Make sure that the cycle master sends cycle start packets.
+ __be32 data = cpu_to_be32(CSR_STATE_BIT_CMSTR);
+ int rcode = fw_run_transaction(card, TCODE_WRITE_QUADLET_REQUEST,
+ root_id, generation, SCODE_100,
+ CSR_REGISTER_BASE + CSR_STATE_SET,
+ &data, sizeof(data));
+ if (rcode == RCODE_GENERATION)
+ return;
+ }
- out:
- fw_node_put(root_node);
- out_put_card:
- fw_card_put(card);
+ if (local_id == irm_id)
+ allocate_broadcast_channel(card, generation);
+ }
}
void fw_card_initialize(struct fw_card *card,
@@ -548,20 +571,24 @@ void fw_card_initialize(struct fw_card *card,
card->index = atomic_inc_return(&index);
card->driver = driver;
card->device = device;
- card->current_tlabel = 0;
- card->tlabel_mask = 0;
- card->split_timeout_hi = DEFAULT_SPLIT_TIMEOUT / 8000;
- card->split_timeout_lo = (DEFAULT_SPLIT_TIMEOUT % 8000) << 19;
- card->split_timeout_cycles = DEFAULT_SPLIT_TIMEOUT;
- card->split_timeout_jiffies =
- DIV_ROUND_UP(DEFAULT_SPLIT_TIMEOUT * HZ, 8000);
+
+ card->transactions.current_tlabel = 0;
+ card->transactions.tlabel_mask = 0;
+ INIT_LIST_HEAD(&card->transactions.list);
+ spin_lock_init(&card->transactions.lock);
+
+ card->split_timeout.hi = DEFAULT_SPLIT_TIMEOUT / 8000;
+ card->split_timeout.lo = (DEFAULT_SPLIT_TIMEOUT % 8000) << 19;
+ card->split_timeout.cycles = DEFAULT_SPLIT_TIMEOUT;
+ card->split_timeout.jiffies = isoc_cycles_to_jiffies(DEFAULT_SPLIT_TIMEOUT);
+ spin_lock_init(&card->split_timeout.lock);
+
card->color = 0;
card->broadcast_channel = BROADCAST_CHANNEL_INITIAL;
kref_init(&card->kref);
init_completion(&card->done);
- INIT_LIST_HEAD(&card->transaction_list);
- INIT_LIST_HEAD(&card->phy_receiver_list);
+
spin_lock_init(&card->lock);
card->local_node = NULL;
@@ -571,10 +598,13 @@ void fw_card_initialize(struct fw_card *card,
}
EXPORT_SYMBOL(fw_card_initialize);
+DEFINE_FREE(workqueue_destroy, struct workqueue_struct *, if (_T) destroy_workqueue(_T))
+
int fw_card_add(struct fw_card *card, u32 max_receive, u32 link_speed, u64 guid,
unsigned int supported_isoc_contexts)
{
- struct workqueue_struct *isoc_wq;
+ struct workqueue_struct *isoc_wq __free(workqueue_destroy) = NULL;
+ struct workqueue_struct *async_wq __free(workqueue_destroy) = NULL;
int ret;
// This workqueue should be:
@@ -595,22 +625,41 @@ int fw_card_add(struct fw_card *card, u32 max_receive, u32 link_speed, u64 guid,
if (!isoc_wq)
return -ENOMEM;
+ // This workqueue should be:
+ // * != WQ_BH Sleepable.
+ // * == WQ_UNBOUND Any core can process data for asynchronous context.
+ // * == WQ_MEM_RECLAIM Used for any backend of block device.
+ // * == WQ_FREEZABLE The target device would not be available when being freezed.
+ // * == WQ_HIGHPRI High priority to process semi-realtime timestamped data.
+ // * == WQ_SYSFS Parameters are available via sysfs.
+ // * max_active == 4 A hardIRQ could notify events for a pair of requests and
+ // response AR/AT contexts.
+ async_wq = alloc_workqueue("firewire-async-card%u",
+ WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_HIGHPRI | WQ_SYSFS,
+ 4, card->index);
+ if (!async_wq)
+ return -ENOMEM;
+
+ card->isoc_wq = isoc_wq;
+ card->async_wq = async_wq;
card->max_receive = max_receive;
card->link_speed = link_speed;
card->guid = guid;
- guard(mutex)(&card_mutex);
+ scoped_guard(mutex, &card_mutex) {
+ generate_config_rom(card, tmp_config_rom);
+ ret = card->driver->enable(card, tmp_config_rom, config_rom_length);
+ if (ret < 0) {
+ card->isoc_wq = NULL;
+ card->async_wq = NULL;
+ return ret;
+ }
+ retain_and_null_ptr(isoc_wq);
+ retain_and_null_ptr(async_wq);
- generate_config_rom(card, tmp_config_rom);
- ret = card->driver->enable(card, tmp_config_rom, config_rom_length);
- if (ret < 0) {
- destroy_workqueue(isoc_wq);
- return ret;
+ list_add_tail(&card->link, &card_list);
}
- card->isoc_wq = isoc_wq;
- list_add_tail(&card->link, &card_list);
-
return 0;
}
EXPORT_SYMBOL(fw_card_add);
@@ -744,6 +793,7 @@ void fw_core_remove_card(struct fw_card *card)
dummy_driver.stop_iso = card->driver->stop_iso;
card->driver = &dummy_driver;
drain_workqueue(card->isoc_wq);
+ drain_workqueue(card->async_wq);
scoped_guard(spinlock_irqsave, &card->lock)
fw_destroy_nodes(card);
@@ -753,8 +803,9 @@ void fw_core_remove_card(struct fw_card *card)
wait_for_completion(&card->done);
destroy_workqueue(card->isoc_wq);
+ destroy_workqueue(card->async_wq);
- WARN_ON(!list_empty(&card->transaction_list));
+ WARN_ON(!list_empty(&card->transactions.list));
}
EXPORT_SYMBOL(fw_core_remove_card);
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index b360dca2c69e..49dc1612c691 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -41,12 +41,15 @@
/*
* ABI version history is documented in linux/firewire-cdev.h.
*/
-#define FW_CDEV_KERNEL_VERSION 5
+#define FW_CDEV_KERNEL_VERSION 6
#define FW_CDEV_VERSION_EVENT_REQUEST2 4
#define FW_CDEV_VERSION_ALLOCATE_REGION_END 4
#define FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW 5
#define FW_CDEV_VERSION_EVENT_ASYNC_TSTAMP 6
+static DEFINE_SPINLOCK(phy_receiver_list_lock);
+static LIST_HEAD(phy_receiver_list);
+
struct client {
u32 version;
struct fw_device *device;
@@ -937,11 +940,12 @@ static int ioctl_add_descriptor(struct client *client, union ioctl_arg *arg)
if (a->length > 256)
return -EINVAL;
- r = kmalloc(sizeof(*r) + a->length * 4, GFP_KERNEL);
+ r = kmalloc(struct_size(r, data, a->length), GFP_KERNEL);
if (r == NULL)
return -ENOMEM;
- if (copy_from_user(r->data, u64_to_uptr(a->data), a->length * 4)) {
+ if (copy_from_user(r->data, u64_to_uptr(a->data),
+ flex_array_size(r, data, a->length))) {
ret = -EFAULT;
goto failed;
}
@@ -1137,10 +1141,7 @@ static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg)
unsigned long payload, buffer_end, transmit_header_bytes = 0;
u32 control;
int count;
- struct {
- struct fw_iso_packet packet;
- u8 header[256];
- } u;
+ DEFINE_RAW_FLEX(struct fw_iso_packet, u, header, 64);
if (ctx == NULL || a->handle != 0)
return -EINVAL;
@@ -1172,29 +1173,29 @@ static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg)
while (p < end) {
if (get_user(control, &p->control))
return -EFAULT;
- u.packet.payload_length = GET_PAYLOAD_LENGTH(control);
- u.packet.interrupt = GET_INTERRUPT(control);
- u.packet.skip = GET_SKIP(control);
- u.packet.tag = GET_TAG(control);
- u.packet.sy = GET_SY(control);
- u.packet.header_length = GET_HEADER_LENGTH(control);
+ u->payload_length = GET_PAYLOAD_LENGTH(control);
+ u->interrupt = GET_INTERRUPT(control);
+ u->skip = GET_SKIP(control);
+ u->tag = GET_TAG(control);
+ u->sy = GET_SY(control);
+ u->header_length = GET_HEADER_LENGTH(control);
switch (ctx->type) {
case FW_ISO_CONTEXT_TRANSMIT:
- if (u.packet.header_length & 3)
+ if (u->header_length & 3)
return -EINVAL;
- transmit_header_bytes = u.packet.header_length;
+ transmit_header_bytes = u->header_length;
break;
case FW_ISO_CONTEXT_RECEIVE:
- if (u.packet.header_length == 0 ||
- u.packet.header_length % ctx->header_size != 0)
+ if (u->header_length == 0 ||
+ u->header_length % ctx->header_size != 0)
return -EINVAL;
break;
case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
- if (u.packet.payload_length == 0 ||
- u.packet.payload_length & 3)
+ if (u->payload_length == 0 ||
+ u->payload_length & 3)
return -EINVAL;
break;
}
@@ -1204,20 +1205,19 @@ static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg)
if (next > end)
return -EINVAL;
if (copy_from_user
- (u.packet.header, p->header, transmit_header_bytes))
+ (u->header, p->header, transmit_header_bytes))
return -EFAULT;
- if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT &&
- u.packet.header_length + u.packet.payload_length > 0)
+ if (u->skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT &&
+ u->header_length + u->payload_length > 0)
return -EINVAL;
- if (payload + u.packet.payload_length > buffer_end)
+ if (payload + u->payload_length > buffer_end)
return -EINVAL;
- if (fw_iso_context_queue(ctx, &u.packet,
- &client->buffer, payload))
+ if (fw_iso_context_queue(ctx, u, &client->buffer, payload))
break;
p = next;
- payload += u.packet.payload_length;
+ payload += u->payload_length;
count++;
}
fw_iso_context_queue_flush(ctx);
@@ -1317,8 +1317,7 @@ static int ioctl_get_cycle_timer(struct client *client, union ioctl_arg *arg)
static void iso_resource_work(struct work_struct *work)
{
struct iso_resource_event *e;
- struct iso_resource *r =
- container_of(work, struct iso_resource, work.work);
+ struct iso_resource *r = from_work(r, work, work.work);
struct client *client = r->client;
unsigned long index = r->resource.handle;
int generation, channel, bandwidth, todo;
@@ -1329,8 +1328,8 @@ static void iso_resource_work(struct work_struct *work)
todo = r->todo;
// Allow 1000ms grace period for other reallocations.
if (todo == ISO_RES_ALLOC &&
- time_before64(get_jiffies_64(), client->device->card->reset_jiffies + HZ)) {
- schedule_iso_resource(r, DIV_ROUND_UP(HZ, 3));
+ time_is_after_jiffies64(client->device->card->reset_jiffies + secs_to_jiffies(1))) {
+ schedule_iso_resource(r, msecs_to_jiffies(333));
skip = true;
} else {
// We could be called twice within the same generation.
@@ -1674,15 +1673,16 @@ static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg)
static int ioctl_receive_phy_packets(struct client *client, union ioctl_arg *arg)
{
struct fw_cdev_receive_phy_packets *a = &arg->receive_phy_packets;
- struct fw_card *card = client->device->card;
/* Access policy: Allow this ioctl only on local nodes' device files. */
if (!client->device->is_local)
return -ENOSYS;
- guard(spinlock_irq)(&card->lock);
+ // NOTE: This can be without irq when we can guarantee that __fw_send_request() for local
+ // destination never runs in any type of IRQ context.
+ scoped_guard(spinlock_irq, &phy_receiver_list_lock)
+ list_move_tail(&client->phy_receiver_link, &phy_receiver_list);
- list_move_tail(&client->phy_receiver_link, &card->phy_receiver_list);
client->phy_receiver_closure = a->closure;
return 0;
@@ -1692,10 +1692,17 @@ void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p)
{
struct client *client;
- guard(spinlock_irqsave)(&card->lock);
+ // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for local
+ // destination never runs in any type of IRQ context.
+ guard(spinlock_irqsave)(&phy_receiver_list_lock);
+
+ list_for_each_entry(client, &phy_receiver_list, phy_receiver_link) {
+ struct inbound_phy_packet_event *e;
+
+ if (client->device->card != card)
+ continue;
- list_for_each_entry(client, &card->phy_receiver_list, phy_receiver_link) {
- struct inbound_phy_packet_event *e = kmalloc(sizeof(*e) + 8, GFP_ATOMIC);
+ e = kmalloc(sizeof(*e) + 8, GFP_ATOMIC);
if (e == NULL)
break;
@@ -1862,7 +1869,9 @@ static int fw_device_op_release(struct inode *inode, struct file *file)
struct client_resource *resource;
unsigned long index;
- scoped_guard(spinlock_irq, &client->device->card->lock)
+ // NOTE: This can be without irq when we can guarantee that __fw_send_request() for local
+ // destination never runs in any type of IRQ context.
+ scoped_guard(spinlock_irq, &phy_receiver_list_lock)
list_del(&client->phy_receiver_link);
scoped_guard(mutex, &client->device->client_list_mutex)
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index a99fe35f1f0d..457a0da024a7 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -847,17 +847,15 @@ static void fw_schedule_device_work(struct fw_device *device,
*/
#define MAX_RETRIES 10
-#define RETRY_DELAY (3 * HZ)
-#define INITIAL_DELAY (HZ / 2)
-#define SHUTDOWN_DELAY (2 * HZ)
+#define RETRY_DELAY secs_to_jiffies(3)
+#define INITIAL_DELAY msecs_to_jiffies(500)
+#define SHUTDOWN_DELAY secs_to_jiffies(2)
static void fw_device_shutdown(struct work_struct *work)
{
- struct fw_device *device =
- container_of(work, struct fw_device, work.work);
+ struct fw_device *device = from_work(device, work, work.work);
- if (time_before64(get_jiffies_64(),
- device->card->reset_jiffies + SHUTDOWN_DELAY)
+ if (time_is_after_jiffies64(device->card->reset_jiffies + SHUTDOWN_DELAY)
&& !list_empty(&device->card->link)) {
fw_schedule_device_work(device, SHUTDOWN_DELAY);
return;
@@ -888,7 +886,7 @@ static void fw_device_release(struct device *dev)
* bus manager work looks at this node.
*/
scoped_guard(spinlock_irqsave, &card->lock)
- device->node->data = NULL;
+ fw_node_set_device(device->node, NULL);
fw_node_put(device->node);
kfree(device->config_rom);
@@ -921,8 +919,7 @@ static int update_unit(struct device *dev, void *data)
static void fw_device_update(struct work_struct *work)
{
- struct fw_device *device =
- container_of(work, struct fw_device, work.work);
+ struct fw_device *device = from_work(device, work, work.work);
fw_device_cdev_update(device);
device_for_each_child(&device->device, NULL, update_unit);
@@ -988,7 +985,7 @@ int fw_device_set_broadcast_channel(struct device *dev, void *gen)
return 0;
}
-static int compare_configuration_rom(struct device *dev, void *data)
+static int compare_configuration_rom(struct device *dev, const void *data)
{
const struct fw_device *old = fw_device(dev);
const u32 *config_rom = data;
@@ -1002,15 +999,14 @@ static int compare_configuration_rom(struct device *dev, void *data)
static void fw_device_init(struct work_struct *work)
{
- struct fw_device *device =
- container_of(work, struct fw_device, work.work);
+ struct fw_device *device = from_work(device, work, work.work);
struct fw_card *card = device->card;
struct device *found;
u32 minor;
int ret;
/*
- * All failure paths here set node->data to NULL, so that we
+ * All failure paths here call fw_node_set_device(node, NULL), so that we
* don't try to do device_for_each_child() on a kfree()'d
* device.
*/
@@ -1039,7 +1035,7 @@ static void fw_device_init(struct work_struct *work)
//
// serialize config_rom access.
scoped_guard(rwsem_read, &fw_device_rwsem) {
- found = device_find_child(card->device, (void *)device->config_rom,
+ found = device_find_child(card->device, device->config_rom,
compare_configuration_rom);
}
if (found) {
@@ -1054,9 +1050,9 @@ static void fw_device_init(struct work_struct *work)
struct fw_node *obsolete_node = reused->node;
device->node = obsolete_node;
- device->node->data = device;
+ fw_node_set_device(device->node, device);
reused->node = current_node;
- reused->node->data = reused;
+ fw_node_set_device(reused->node, reused);
reused->max_speed = device->max_speed;
reused->node_id = current_node->node_id;
@@ -1184,8 +1180,7 @@ static int reread_config_rom(struct fw_device *device, int generation,
static void fw_device_refresh(struct work_struct *work)
{
- struct fw_device *device =
- container_of(work, struct fw_device, work.work);
+ struct fw_device *device = from_work(device, work, work.work);
struct fw_card *card = device->card;
int ret, node_id = device->node_id;
bool changed;
@@ -1251,8 +1246,7 @@ static void fw_device_refresh(struct work_struct *work)
static void fw_device_workfn(struct work_struct *work)
{
- struct fw_device *device = container_of(to_delayed_work(work),
- struct fw_device, work);
+ struct fw_device *device = from_work(device, to_delayed_work(work), work);
device->workfn(work);
}
@@ -1297,7 +1291,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
* FW_NODE_UPDATED callbacks can update the node_id
* and generation for the device.
*/
- node->data = device;
+ fw_node_set_device(node, device);
/*
* Many devices are slow to respond after bus resets,
@@ -1312,7 +1306,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
case FW_NODE_INITIATED_RESET:
case FW_NODE_LINK_ON:
- device = node->data;
+ device = fw_node_get_device(node);
if (device == NULL)
goto create;
@@ -1329,7 +1323,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
break;
case FW_NODE_UPDATED:
- device = node->data;
+ device = fw_node_get_device(node);
if (device == NULL)
break;
@@ -1344,7 +1338,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
case FW_NODE_DESTROYED:
case FW_NODE_LINK_OFF:
- if (!node->data)
+ if (!fw_node_get_device(node))
break;
/*
@@ -1359,7 +1353,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
* the device in shutdown state to have that code fail
* to create the device.
*/
- device = node->data;
+ device = fw_node_get_device(node);
if (atomic_xchg(&device->state,
FW_DEVICE_GONE) == FW_DEVICE_RUNNING) {
device->workfn = fw_device_shutdown;
diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c
index 892b94cfd626..2f73bcd5696f 100644
--- a/drivers/firewire/core-topology.c
+++ b/drivers/firewire/core-topology.c
@@ -56,7 +56,7 @@ static struct fw_node *fw_node_create(u32 sid, int port_count, int color)
* two cases: either the path goes through this node, in which case
* the hop count is the sum of the two biggest child depths plus 2.
* Or it could be the case that the max hop path is entirely
- * containted in a child tree, in which case the max hop count is just
+ * contained in a child tree, in which case the max hop count is just
* the max hop count of this child.
*/
static void update_hop_count(struct fw_node *node)
@@ -241,7 +241,7 @@ static struct fw_node *build_tree(struct fw_card *card, const u32 *sid, int self
// If PHYs report different gap counts, set an invalid count which will force a gap
// count reconfiguration and a reset.
if (phy_packet_self_id_zero_get_gap_count(self_id_sequence[0]) != gap_count)
- gap_count = 0;
+ gap_count = GAP_COUNT_MISMATCHED;
update_hop_count(node);
@@ -325,9 +325,11 @@ static void report_found_node(struct fw_card *card,
card->bm_retries = 0;
}
-/* Must be called with card->lock held */
void fw_destroy_nodes(struct fw_card *card)
+__must_hold(&card->lock)
{
+ lockdep_assert_held(&card->lock);
+
card->color++;
if (card->local_node != NULL)
for_each_fw_node(card, card->local_node, report_lost_node);
@@ -435,20 +437,22 @@ static void update_tree(struct fw_card *card, struct fw_node *root)
}
}
-static void update_topology_map(struct fw_card *card,
- u32 *self_ids, int self_id_count)
+static void update_topology_map(__be32 *buffer, size_t buffer_size, int root_node_id,
+ const u32 *self_ids, int self_id_count)
{
- int node_count = (card->root_node->node_id & 0x3f) + 1;
- __be32 *map = card->topology_map;
+ __be32 *map = buffer;
+ int node_count = (root_node_id & 0x3f) + 1;
+
+ memset(map, 0, buffer_size);
*map++ = cpu_to_be32((self_id_count + 2) << 16);
- *map++ = cpu_to_be32(be32_to_cpu(card->topology_map[1]) + 1);
+ *map++ = cpu_to_be32(be32_to_cpu(buffer[1]) + 1);
*map++ = cpu_to_be32((node_count << 16) | self_id_count);
while (self_id_count--)
*map++ = cpu_to_be32p(self_ids++);
- fw_compute_block_crc(card->topology_map);
+ fw_compute_block_crc(buffer);
}
void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
@@ -458,46 +462,45 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
trace_bus_reset_handle(card->index, generation, node_id, bm_abdicate, self_ids, self_id_count);
- guard(spinlock_irqsave)(&card->lock);
-
- /*
- * If the selfID buffer is not the immediate successor of the
- * previously processed one, we cannot reliably compare the
- * old and new topologies.
- */
- if (!is_next_generation(generation, card->generation) &&
- card->local_node != NULL) {
- fw_destroy_nodes(card);
- card->bm_retries = 0;
+ scoped_guard(spinlock, &card->lock) {
+ // If the selfID buffer is not the immediate successor of the
+ // previously processed one, we cannot reliably compare the
+ // old and new topologies.
+ if (!is_next_generation(generation, card->generation) && card->local_node != NULL) {
+ fw_destroy_nodes(card);
+ card->bm_retries = 0;
+ }
+ card->broadcast_channel_allocated = card->broadcast_channel_auto_allocated;
+ card->node_id = node_id;
+ // Update node_id before generation to prevent anybody from using
+ // a stale node_id together with a current generation.
+ smp_wmb();
+ card->generation = generation;
+ card->reset_jiffies = get_jiffies_64();
+ card->bm_node_id = 0xffff;
+ card->bm_abdicate = bm_abdicate;
+
+ local_node = build_tree(card, self_ids, self_id_count, generation);
+
+ card->color++;
+
+ if (local_node == NULL) {
+ fw_err(card, "topology build failed\n");
+ // FIXME: We need to issue a bus reset in this case.
+ } else if (card->local_node == NULL) {
+ card->local_node = local_node;
+ for_each_fw_node(card, local_node, report_found_node);
+ } else {
+ update_tree(card, local_node);
+ }
}
- card->broadcast_channel_allocated = card->broadcast_channel_auto_allocated;
- card->node_id = node_id;
- /*
- * Update node_id before generation to prevent anybody from using
- * a stale node_id together with a current generation.
- */
- smp_wmb();
- card->generation = generation;
- card->reset_jiffies = get_jiffies_64();
- card->bm_node_id = 0xffff;
- card->bm_abdicate = bm_abdicate;
fw_schedule_bm_work(card, 0);
- local_node = build_tree(card, self_ids, self_id_count, generation);
-
- update_topology_map(card, self_ids, self_id_count);
-
- card->color++;
-
- if (local_node == NULL) {
- fw_err(card, "topology build failed\n");
- /* FIXME: We need to issue a bus reset in this case. */
- } else if (card->local_node == NULL) {
- card->local_node = local_node;
- for_each_fw_node(card, local_node, report_found_node);
- } else {
- update_tree(card, local_node);
+ // Just used by transaction layer.
+ scoped_guard(spinlock, &card->topology_map.lock) {
+ update_topology_map(card->topology_map.buffer, sizeof(card->topology_map.buffer),
+ card->root_node->node_id, self_ids, self_id_count);
}
}
EXPORT_SYMBOL(fw_core_handle_bus_reset);
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index e141d24a7644..dd3656a0c1ff 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -39,7 +39,7 @@
static int try_cancel_split_timeout(struct fw_transaction *t)
{
if (t->is_split_transaction)
- return del_timer(&t->split_timeout_timer);
+ return timer_delete(&t->split_timeout_timer);
else
return 1;
}
@@ -49,12 +49,14 @@ static int close_transaction(struct fw_transaction *transaction, struct fw_card
{
struct fw_transaction *t = NULL, *iter;
- scoped_guard(spinlock_irqsave, &card->lock) {
- list_for_each_entry(iter, &card->transaction_list, link) {
+ // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for
+ // local destination never runs in any type of IRQ context.
+ scoped_guard(spinlock_irqsave, &card->transactions.lock) {
+ list_for_each_entry(iter, &card->transactions.list, link) {
if (iter == transaction) {
if (try_cancel_split_timeout(iter)) {
list_del_init(&iter->link);
- card->tlabel_mask &= ~(1ULL << iter->tlabel);
+ card->transactions.tlabel_mask &= ~(1ULL << iter->tlabel);
t = iter;
}
break;
@@ -114,14 +116,14 @@ EXPORT_SYMBOL(fw_cancel_transaction);
static void split_transaction_timeout_callback(struct timer_list *timer)
{
- struct fw_transaction *t = from_timer(t, timer, split_timeout_timer);
+ struct fw_transaction *t = timer_container_of(t, timer, split_timeout_timer);
struct fw_card *card = t->card;
- scoped_guard(spinlock_irqsave, &card->lock) {
+ scoped_guard(spinlock_irqsave, &card->transactions.lock) {
if (list_empty(&t->link))
return;
list_del(&t->link);
- card->tlabel_mask &= ~(1ULL << t->tlabel);
+ card->transactions.tlabel_mask &= ~(1ULL << t->tlabel);
}
if (!t->with_tstamp) {
@@ -135,14 +137,18 @@ static void split_transaction_timeout_callback(struct timer_list *timer)
static void start_split_transaction_timeout(struct fw_transaction *t,
struct fw_card *card)
{
- guard(spinlock_irqsave)(&card->lock);
+ unsigned long delta;
if (list_empty(&t->link) || WARN_ON(t->is_split_transaction))
return;
t->is_split_transaction = true;
- mod_timer(&t->split_timeout_timer,
- jiffies + card->split_timeout_jiffies);
+
+ // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for
+ // local destination never runs in any type of IRQ context.
+ scoped_guard(spinlock_irqsave, &card->split_timeout.lock)
+ delta = card->split_timeout.jiffies;
+ mod_timer(&t->split_timeout_timer, jiffies + delta);
}
static u32 compute_split_timeout_timestamp(struct fw_card *card, u32 request_timestamp);
@@ -162,8 +168,12 @@ static void transmit_complete_callback(struct fw_packet *packet,
break;
case ACK_PENDING:
{
- t->split_timeout_cycle =
- compute_split_timeout_timestamp(card, packet->timestamp) & 0xffff;
+ // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for
+ // local destination never runs in any type of IRQ context.
+ scoped_guard(spinlock_irqsave, &card->split_timeout.lock) {
+ t->split_timeout_cycle =
+ compute_split_timeout_timestamp(card, packet->timestamp) & 0xffff;
+ }
start_split_transaction_timeout(t, card);
break;
}
@@ -259,18 +269,21 @@ static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
}
static int allocate_tlabel(struct fw_card *card)
+__must_hold(&card->transactions_lock)
{
int tlabel;
- tlabel = card->current_tlabel;
- while (card->tlabel_mask & (1ULL << tlabel)) {
+ lockdep_assert_held(&card->transactions.lock);
+
+ tlabel = card->transactions.current_tlabel;
+ while (card->transactions.tlabel_mask & (1ULL << tlabel)) {
tlabel = (tlabel + 1) & 0x3f;
- if (tlabel == card->current_tlabel)
+ if (tlabel == card->transactions.current_tlabel)
return -EBUSY;
}
- card->current_tlabel = (tlabel + 1) & 0x3f;
- card->tlabel_mask |= 1ULL << tlabel;
+ card->transactions.current_tlabel = (tlabel + 1) & 0x3f;
+ card->transactions.tlabel_mask |= 1ULL << tlabel;
return tlabel;
}
@@ -331,7 +344,6 @@ void __fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode
void *payload, size_t length, union fw_transaction_callback callback,
bool with_tstamp, void *callback_data)
{
- unsigned long flags;
int tlabel;
/*
@@ -339,11 +351,11 @@ void __fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode
* the list while holding the card spinlock.
*/
- spin_lock_irqsave(&card->lock, flags);
-
- tlabel = allocate_tlabel(card);
+ // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for
+ // local destination never runs in any type of IRQ context.
+ scoped_guard(spinlock_irqsave, &card->transactions.lock)
+ tlabel = allocate_tlabel(card);
if (tlabel < 0) {
- spin_unlock_irqrestore(&card->lock, flags);
if (!with_tstamp) {
callback.without_tstamp(card, RCODE_SEND_ERROR, NULL, 0, callback_data);
} else {
@@ -368,15 +380,22 @@ void __fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode
t->callback = callback;
t->with_tstamp = with_tstamp;
t->callback_data = callback_data;
-
- fw_fill_request(&t->packet, tcode, t->tlabel, destination_id, card->node_id, generation,
- speed, offset, payload, length);
t->packet.callback = transmit_complete_callback;
- list_add_tail(&t->link, &card->transaction_list);
+ // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for
+ // local destination never runs in any type of IRQ context.
+ scoped_guard(spinlock_irqsave, &card->lock) {
+ // The node_id field of fw_card can be updated when handling SelfIDComplete.
+ fw_fill_request(&t->packet, tcode, t->tlabel, destination_id, card->node_id,
+ generation, speed, offset, payload, length);
+ }
- spin_unlock_irqrestore(&card->lock, flags);
+ // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for
+ // local destination never runs in any type of IRQ context.
+ scoped_guard(spinlock_irqsave, &card->transactions.lock)
+ list_add_tail(&t->link, &card->transactions.list);
+ // Safe with no lock, since the index field of fw_card is immutable once assigned.
trace_async_request_outbound_initiate((uintptr_t)t, card->index, generation, speed,
t->packet.header, payload,
tcode_is_read_request(tcode) ? 0 : length / 4);
@@ -431,7 +450,7 @@ int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
fw_send_request(card, &t, tcode, destination_id, generation, speed,
offset, payload, length, transaction_callback, &d);
wait_for_completion(&d.done);
- destroy_timer_on_stack(&t.split_timeout_timer);
+ timer_destroy_on_stack(&t.split_timeout_timer);
return d.rcode;
}
@@ -458,7 +477,7 @@ static struct fw_packet phy_config_packet = {
void fw_send_phy_config(struct fw_card *card,
int node_id, int generation, int gap_count)
{
- long timeout = DIV_ROUND_UP(HZ, 10);
+ long timeout = msecs_to_jiffies(100);
u32 data = 0;
phy_packet_set_packet_identifier(&data, PHY_PACKET_PACKET_IDENTIFIER_PHY_CONFIG);
@@ -550,6 +569,23 @@ const struct fw_address_region fw_unit_space_region =
{ .start = 0xfffff0000900ULL, .end = 0x1000000000000ULL, };
#endif /* 0 */
+static void complete_address_handler(struct kref *kref)
+{
+ struct fw_address_handler *handler = container_of(kref, struct fw_address_handler, kref);
+
+ complete(&handler->done);
+}
+
+static void get_address_handler(struct fw_address_handler *handler)
+{
+ kref_get(&handler->kref);
+}
+
+static int put_address_handler(struct fw_address_handler *handler)
+{
+ return kref_put(&handler->kref, complete_address_handler);
+}
+
/**
* fw_core_add_address_handler() - register for incoming requests
* @handler: callback
@@ -557,9 +593,10 @@ const struct fw_address_region fw_unit_space_region =
*
* region->start, ->end, and handler->length have to be quadlet-aligned.
*
- * When a request is received that falls within the specified address range,
- * the specified callback is invoked. The parameters passed to the callback
- * give the details of the particular request.
+ * When a request is received that falls within the specified address range, the specified callback
+ * is invoked. The parameters passed to the callback give the details of the particular request.
+ * The callback is invoked in the workqueue context in most cases. However, if the request is
+ * initiated by the local node, the callback is invoked in the initiator's context.
*
* To be called in process context.
* Return value: 0 on success, non-zero otherwise.
@@ -595,6 +632,8 @@ int fw_core_add_address_handler(struct fw_address_handler *handler,
if (other != NULL) {
handler->offset += other->length;
} else {
+ init_completion(&handler->done);
+ kref_init(&handler->kref);
list_add_tail_rcu(&handler->link, &address_handler_list);
ret = 0;
break;
@@ -620,6 +659,9 @@ void fw_core_remove_address_handler(struct fw_address_handler *handler)
list_del_rcu(&handler->link);
synchronize_rcu();
+
+ if (!put_address_handler(handler))
+ wait_for_completion(&handler->done);
}
EXPORT_SYMBOL(fw_core_remove_address_handler);
@@ -756,11 +798,14 @@ EXPORT_SYMBOL(fw_fill_response);
static u32 compute_split_timeout_timestamp(struct fw_card *card,
u32 request_timestamp)
+__must_hold(&card->split_timeout.lock)
{
unsigned int cycles;
u32 timestamp;
- cycles = card->split_timeout_cycles;
+ lockdep_assert_held(&card->split_timeout.lock);
+
+ cycles = card->split_timeout.cycles;
cycles += request_timestamp & 0x1fff;
timestamp = request_timestamp & ~0x1fff;
@@ -811,9 +856,12 @@ static struct fw_request *allocate_request(struct fw_card *card,
return NULL;
kref_init(&request->kref);
+ // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for
+ // local destination never runs in any type of IRQ context.
+ scoped_guard(spinlock_irqsave, &card->split_timeout.lock)
+ request->response.timestamp = compute_split_timeout_timestamp(card, p->timestamp);
+
request->response.speed = p->speed;
- request->response.timestamp =
- compute_split_timeout_timestamp(card, p->timestamp);
request->response.generation = p->generation;
request->response.ack = 0;
request->response.callback = free_response_callback;
@@ -913,22 +961,31 @@ static void handle_exclusive_region_request(struct fw_card *card,
handler = lookup_enclosing_address_handler(&address_handler_list, offset,
request->length);
if (handler)
- handler->address_callback(card, request, tcode, destination, source,
- p->generation, offset, request->data,
- request->length, handler->callback_data);
+ get_address_handler(handler);
}
- if (!handler)
+ if (!handler) {
fw_send_response(card, request, RCODE_ADDRESS_ERROR);
+ return;
+ }
+
+ // Outside the RCU read-side critical section. Without spinlock. With reference count.
+ handler->address_callback(card, request, tcode, destination, source, p->generation, offset,
+ request->data, request->length, handler->callback_data);
+ put_address_handler(handler);
}
+// To use kmalloc allocator efficiently, this should be power of two.
+#define BUFFER_ON_KERNEL_STACK_SIZE 4
+
static void handle_fcp_region_request(struct fw_card *card,
struct fw_packet *p,
struct fw_request *request,
unsigned long long offset)
{
- struct fw_address_handler *handler;
- int tcode, destination, source;
+ struct fw_address_handler *buffer_on_kernel_stack[BUFFER_ON_KERNEL_STACK_SIZE];
+ struct fw_address_handler *handler, **handlers;
+ int tcode, destination, source, i, count, buffer_size;
if ((offset != (CSR_REGISTER_BASE | CSR_FCP_COMMAND) &&
offset != (CSR_REGISTER_BASE | CSR_FCP_RESPONSE)) ||
@@ -949,15 +1006,55 @@ static void handle_fcp_region_request(struct fw_card *card,
return;
}
+ count = 0;
+ handlers = buffer_on_kernel_stack;
+ buffer_size = ARRAY_SIZE(buffer_on_kernel_stack);
scoped_guard(rcu) {
list_for_each_entry_rcu(handler, &address_handler_list, link) {
- if (is_enclosing_handler(handler, offset, request->length))
- handler->address_callback(card, request, tcode, destination, source,
- p->generation, offset, request->data,
- request->length, handler->callback_data);
+ if (is_enclosing_handler(handler, offset, request->length)) {
+ if (count >= buffer_size) {
+ int next_size = buffer_size * 2;
+ struct fw_address_handler **buffer_on_kernel_heap;
+
+ if (handlers == buffer_on_kernel_stack)
+ buffer_on_kernel_heap = NULL;
+ else
+ buffer_on_kernel_heap = handlers;
+
+ buffer_on_kernel_heap =
+ krealloc_array(buffer_on_kernel_heap, next_size,
+ sizeof(*buffer_on_kernel_heap), GFP_ATOMIC);
+ // FCP is used for purposes unrelated to significant system
+ // resources (e.g. storage or networking), so allocation
+ // failures are not considered so critical.
+ if (!buffer_on_kernel_heap)
+ break;
+
+ if (handlers == buffer_on_kernel_stack) {
+ memcpy(buffer_on_kernel_heap, buffer_on_kernel_stack,
+ sizeof(buffer_on_kernel_stack));
+ }
+
+ handlers = buffer_on_kernel_heap;
+ buffer_size = next_size;
+ }
+ get_address_handler(handler);
+ handlers[count++] = handler;
+ }
}
}
+ for (i = 0; i < count; ++i) {
+ handler = handlers[i];
+ handler->address_callback(card, request, tcode, destination, source,
+ p->generation, offset, request->data,
+ request->length, handler->callback_data);
+ put_address_handler(handler);
+ }
+
+ if (handlers != buffer_on_kernel_stack)
+ kfree(handlers);
+
fw_send_response(card, request, RCODE_COMPLETE);
}
@@ -1039,12 +1136,14 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
break;
}
- scoped_guard(spinlock_irqsave, &card->lock) {
- list_for_each_entry(iter, &card->transaction_list, link) {
+ // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for
+ // local destination never runs in any type of IRQ context.
+ scoped_guard(spinlock_irqsave, &card->transactions.lock) {
+ list_for_each_entry(iter, &card->transactions.list, link) {
if (iter->node_id == source && iter->tlabel == tlabel) {
if (try_cancel_split_timeout(iter)) {
list_del_init(&iter->link);
- card->tlabel_mask &= ~(1ULL << iter->tlabel);
+ card->transactions.tlabel_mask &= ~(1ULL << iter->tlabel);
t = iter;
}
break;
@@ -1124,7 +1223,11 @@ static void handle_topology_map(struct fw_card *card, struct fw_request *request
}
start = (offset - topology_map_region.start) / 4;
- memcpy(payload, &card->topology_map[start], length);
+
+ // NOTE: This can be without irqsave when we can guarantee that fw_send_request() for local
+ // destination never runs in any type of IRQ context.
+ scoped_guard(spinlock_irqsave, &card->topology_map.lock)
+ memcpy(payload, &card->topology_map.buffer[start], length);
fw_send_response(card, request, RCODE_COMPLETE);
}
@@ -1139,16 +1242,17 @@ static const struct fw_address_region registers_region =
.end = CSR_REGISTER_BASE | CSR_CONFIG_ROM, };
static void update_split_timeout(struct fw_card *card)
+__must_hold(&card->split_timeout.lock)
{
unsigned int cycles;
- cycles = card->split_timeout_hi * 8000 + (card->split_timeout_lo >> 19);
+ cycles = card->split_timeout.hi * 8000 + (card->split_timeout.lo >> 19);
/* minimum per IEEE 1394, maximum which doesn't overflow OHCI */
cycles = clamp(cycles, 800u, 3u * 8000u);
- card->split_timeout_cycles = cycles;
- card->split_timeout_jiffies = DIV_ROUND_UP(cycles * HZ, 8000);
+ card->split_timeout.cycles = cycles;
+ card->split_timeout.jiffies = isoc_cycles_to_jiffies(cycles);
}
static void handle_registers(struct fw_card *card, struct fw_request *request,
@@ -1198,12 +1302,15 @@ static void handle_registers(struct fw_card *card, struct fw_request *request,
case CSR_SPLIT_TIMEOUT_HI:
if (tcode == TCODE_READ_QUADLET_REQUEST) {
- *data = cpu_to_be32(card->split_timeout_hi);
+ *data = cpu_to_be32(card->split_timeout.hi);
} else if (tcode == TCODE_WRITE_QUADLET_REQUEST) {
- guard(spinlock_irqsave)(&card->lock);
-
- card->split_timeout_hi = be32_to_cpu(*data) & 7;
- update_split_timeout(card);
+ // NOTE: This can be without irqsave when we can guarantee that
+ // __fw_send_request() for local destination never runs in any type of IRQ
+ // context.
+ scoped_guard(spinlock_irqsave, &card->split_timeout.lock) {
+ card->split_timeout.hi = be32_to_cpu(*data) & 7;
+ update_split_timeout(card);
+ }
} else {
rcode = RCODE_TYPE_ERROR;
}
@@ -1211,12 +1318,15 @@ static void handle_registers(struct fw_card *card, struct fw_request *request,
case CSR_SPLIT_TIMEOUT_LO:
if (tcode == TCODE_READ_QUADLET_REQUEST) {
- *data = cpu_to_be32(card->split_timeout_lo);
+ *data = cpu_to_be32(card->split_timeout.lo);
} else if (tcode == TCODE_WRITE_QUADLET_REQUEST) {
- guard(spinlock_irqsave)(&card->lock);
-
- card->split_timeout_lo = be32_to_cpu(*data) & 0xfff80000;
- update_split_timeout(card);
+ // NOTE: This can be without irqsave when we can guarantee that
+ // __fw_send_request() for local destination never runs in any type of IRQ
+ // context.
+ scoped_guard(spinlock_irqsave, &card->split_timeout.lock) {
+ card->split_timeout.lo = be32_to_cpu(*data) & 0xfff80000;
+ update_split_timeout(card);
+ }
} else {
rcode = RCODE_TYPE_ERROR;
}
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
index 0ae2c84ecafe..e67395ce26b5 100644
--- a/drivers/firewire/core.h
+++ b/drivers/firewire/core.h
@@ -27,6 +27,11 @@ struct fw_packet;
/* -card */
+// This is the arbitrary value we use to indicate a mismatched gap count.
+#define GAP_COUNT_MISMATCHED 0
+
+#define isoc_cycles_to_jiffies(cycles) usecs_to_jiffies((u32)div_u64((u64)cycles * USEC_PER_SEC, 8000))
+
extern __printf(2, 3)
void fw_err(const struct fw_card *card, const char *fmt, ...);
extern __printf(2, 3)
@@ -80,7 +85,7 @@ struct fw_card_driver {
/*
* Allow the specified node ID to do direct DMA out and in of
* host memory. The card will disable this for all node when
- * a bus reset happens, so driver need to reenable this after
+ * a bus reset happens, so driver need to re-enable this after
* bus reset. Returns 0 on success, -ENODEV if the card
* doesn't support this, -ESTALE if the generation doesn't
* match.
@@ -167,6 +172,9 @@ static inline void fw_iso_context_init_work(struct fw_iso_context *ctx, work_fun
/* -topology */
+// The initial value of BUS_MANAGER_ID register, to express nothing registered.
+#define BUS_MANAGER_ID_NOT_REGISTERED 0x3f
+
enum {
FW_NODE_CREATED,
FW_NODE_UPDATED,
@@ -194,8 +202,8 @@ struct fw_node {
/* For serializing node topology into a list. */
struct list_head link;
- /* Upper layer specific data. */
- void *data;
+ // The device when already associated, else NULL.
+ struct fw_device *device;
struct fw_node *ports[] __counted_by(port_count);
};
@@ -219,6 +227,16 @@ static inline void fw_node_put(struct fw_node *node)
kref_put(&node->kref, release_node);
}
+static inline struct fw_device *fw_node_get_device(struct fw_node *node)
+{
+ return node->device;
+}
+
+static inline void fw_node_set_device(struct fw_node *node, struct fw_device *device)
+{
+ node->device = device;
+}
+
void fw_core_handle_bus_reset(struct fw_card *card, int node_id,
int generation, int self_id_count, u32 *self_ids, bool bm_abdicate);
void fw_destroy_nodes(struct fw_card *card);
diff --git a/drivers/firewire/device-attribute-test.c b/drivers/firewire/device-attribute-test.c
index 2f123c6b0a16..97478a96d1c9 100644
--- a/drivers/firewire/device-attribute-test.c
+++ b/drivers/firewire/device-attribute-test.c
@@ -99,6 +99,7 @@ static void device_attr_simple_avc(struct kunit *test)
struct device *unit0_dev = (struct device *)&unit0.device;
static const int unit0_expected_ids[] = {0x00ffffff, 0x00ffffff, 0x0000a02d, 0x00010001};
char *buf = kunit_kzalloc(test, PAGE_SIZE, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
int ids[4] = {0, 0, 0, 0};
// Ensure associations for node and unit devices.
@@ -180,6 +181,7 @@ static void device_attr_legacy_avc(struct kunit *test)
struct device *unit0_dev = (struct device *)&unit0.device;
static const int unit0_expected_ids[] = {0x00012345, 0x00fedcba, 0x00abcdef, 0x00543210};
char *buf = kunit_kzalloc(test, PAGE_SIZE, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
int ids[4] = {0, 0, 0, 0};
// Ensure associations for node and unit devices.
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index 1bf0e15c1540..6d6446713539 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -1007,7 +1007,7 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask)
spin_lock_irqsave(&dev->lock, flags);
- /* If the AT tasklet already ran, we may be last user. */
+ /* If the AT work item already ran, we may be last user. */
free = (ptask->outstanding_pkts == 0 && !ptask->enqueued);
if (!free)
ptask->enqueued = true;
@@ -1026,7 +1026,7 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask)
spin_lock_irqsave(&dev->lock, flags);
- /* If the AT tasklet already ran, we may be last user. */
+ /* If the AT work item already ran, we may be last user. */
free = (ptask->outstanding_pkts == 0 && !ptask->enqueued);
if (!free)
ptask->enqueued = true;
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 7ee55c2804de..030aed5453a1 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -101,7 +101,7 @@ struct ar_context {
void *pointer;
unsigned int last_buffer_index;
u32 regs;
- struct tasklet_struct tasklet;
+ struct work_struct work;
};
struct context;
@@ -128,7 +128,6 @@ struct context {
int total_allocation;
u32 current_bus;
bool running;
- bool flushing;
/*
* List of page-sized buffers for storing DMA descriptors.
@@ -157,8 +156,12 @@ struct context {
int prev_z;
descriptor_callback_t callback;
+};
- struct tasklet_struct tasklet;
+struct at_context {
+ struct context context;
+ struct work_struct work;
+ bool flushing;
};
struct iso_context {
@@ -204,8 +207,8 @@ struct fw_ohci {
struct ar_context ar_request_ctx;
struct ar_context ar_response_ctx;
- struct context at_request_ctx;
- struct context at_response_ctx;
+ struct at_context at_request_ctx;
+ struct at_context at_response_ctx;
u32 it_context_support;
u32 it_context_mask; /* unoccupied IT contexts */
@@ -225,13 +228,10 @@ struct fw_ohci {
__le32 *self_id;
dma_addr_t self_id_bus;
- struct work_struct bus_reset_work;
u32 self_id_buffer[512];
};
-static struct workqueue_struct *selfid_workqueue;
-
static inline struct fw_ohci *fw_ohci(struct fw_card *card)
{
return container_of(card, struct fw_ohci, card);
@@ -390,225 +390,10 @@ MODULE_PARM_DESC(quirks, "Chip quirks (default = 0"
", IR wake unreliable = " __stringify(QUIRK_IR_WAKE)
")");
-#define OHCI_PARAM_DEBUG_AT_AR 1
-#define OHCI_PARAM_DEBUG_SELFIDS 2
-#define OHCI_PARAM_DEBUG_IRQS 4
-
-static int param_debug;
-module_param_named(debug, param_debug, int, 0644);
-MODULE_PARM_DESC(debug, "Verbose logging, deprecated in v6.11 kernel or later. (default = 0"
- ", AT/AR events = " __stringify(OHCI_PARAM_DEBUG_AT_AR)
- ", self-IDs = " __stringify(OHCI_PARAM_DEBUG_SELFIDS)
- ", IRQs = " __stringify(OHCI_PARAM_DEBUG_IRQS)
- ", or a combination, or all = -1)");
-
static bool param_remote_dma;
module_param_named(remote_dma, param_remote_dma, bool, 0444);
MODULE_PARM_DESC(remote_dma, "Enable unfiltered remote DMA (default = N)");
-static void log_irqs(struct fw_ohci *ohci, u32 evt)
-{
- if (likely(!(param_debug & OHCI_PARAM_DEBUG_IRQS)))
- return;
-
- ohci_notice(ohci, "IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
- evt & OHCI1394_selfIDComplete ? " selfID" : "",
- evt & OHCI1394_RQPkt ? " AR_req" : "",
- evt & OHCI1394_RSPkt ? " AR_resp" : "",
- evt & OHCI1394_reqTxComplete ? " AT_req" : "",
- evt & OHCI1394_respTxComplete ? " AT_resp" : "",
- evt & OHCI1394_isochRx ? " IR" : "",
- evt & OHCI1394_isochTx ? " IT" : "",
- evt & OHCI1394_postedWriteErr ? " postedWriteErr" : "",
- evt & OHCI1394_cycleTooLong ? " cycleTooLong" : "",
- evt & OHCI1394_cycle64Seconds ? " cycle64Seconds" : "",
- evt & OHCI1394_cycleInconsistent ? " cycleInconsistent" : "",
- evt & OHCI1394_regAccessFail ? " regAccessFail" : "",
- evt & OHCI1394_unrecoverableError ? " unrecoverableError" : "",
- evt & OHCI1394_busReset ? " busReset" : "",
- evt & ~(OHCI1394_selfIDComplete | OHCI1394_RQPkt |
- OHCI1394_RSPkt | OHCI1394_reqTxComplete |
- OHCI1394_respTxComplete | OHCI1394_isochRx |
- OHCI1394_isochTx | OHCI1394_postedWriteErr |
- OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds |
- OHCI1394_cycleInconsistent |
- OHCI1394_regAccessFail | OHCI1394_busReset)
- ? " ?" : "");
-}
-
-static void log_selfids(struct fw_ohci *ohci, int generation, int self_id_count)
-{
- static const char *const speed[] = {
- [0] = "S100", [1] = "S200", [2] = "S400", [3] = "beta",
- };
- static const char *const power[] = {
- [0] = "+0W", [1] = "+15W", [2] = "+30W", [3] = "+45W",
- [4] = "-3W", [5] = " ?W", [6] = "-3..-6W", [7] = "-3..-10W",
- };
- static const char port[] = {
- [PHY_PACKET_SELF_ID_PORT_STATUS_NONE] = '.',
- [PHY_PACKET_SELF_ID_PORT_STATUS_NCONN] = '-',
- [PHY_PACKET_SELF_ID_PORT_STATUS_PARENT] = 'p',
- [PHY_PACKET_SELF_ID_PORT_STATUS_CHILD] = 'c',
- };
- struct self_id_sequence_enumerator enumerator = {
- .cursor = ohci->self_id_buffer,
- .quadlet_count = self_id_count,
- };
-
- if (likely(!(param_debug & OHCI_PARAM_DEBUG_SELFIDS)))
- return;
-
- ohci_notice(ohci, "%d selfIDs, generation %d, local node ID %04x\n",
- self_id_count, generation, ohci->node_id);
-
- while (enumerator.quadlet_count > 0) {
- unsigned int quadlet_count;
- unsigned int port_index;
- const u32 *s;
- int i;
-
- s = self_id_sequence_enumerator_next(&enumerator, &quadlet_count);
- if (IS_ERR(s))
- break;
-
- ohci_notice(ohci,
- "selfID 0: %08x, phy %d [%c%c%c] %s gc=%d %s %s%s%s\n",
- *s,
- phy_packet_self_id_get_phy_id(*s),
- port[self_id_sequence_get_port_status(s, quadlet_count, 0)],
- port[self_id_sequence_get_port_status(s, quadlet_count, 1)],
- port[self_id_sequence_get_port_status(s, quadlet_count, 2)],
- speed[*s >> 14 & 3], *s >> 16 & 63,
- power[*s >> 8 & 7], *s >> 22 & 1 ? "L" : "",
- *s >> 11 & 1 ? "c" : "", *s & 2 ? "i" : "");
-
- port_index = 3;
- for (i = 1; i < quadlet_count; ++i) {
- ohci_notice(ohci,
- "selfID n: %08x, phy %d [%c%c%c%c%c%c%c%c]\n",
- s[i],
- phy_packet_self_id_get_phy_id(s[i]),
- port[self_id_sequence_get_port_status(s, quadlet_count, port_index)],
- port[self_id_sequence_get_port_status(s, quadlet_count, port_index + 1)],
- port[self_id_sequence_get_port_status(s, quadlet_count, port_index + 2)],
- port[self_id_sequence_get_port_status(s, quadlet_count, port_index + 3)],
- port[self_id_sequence_get_port_status(s, quadlet_count, port_index + 4)],
- port[self_id_sequence_get_port_status(s, quadlet_count, port_index + 5)],
- port[self_id_sequence_get_port_status(s, quadlet_count, port_index + 6)],
- port[self_id_sequence_get_port_status(s, quadlet_count, port_index + 7)]
- );
-
- port_index += 8;
- }
- }
-}
-
-static const char *evts[] = {
- [0x00] = "evt_no_status", [0x01] = "-reserved-",
- [0x02] = "evt_long_packet", [0x03] = "evt_missing_ack",
- [0x04] = "evt_underrun", [0x05] = "evt_overrun",
- [0x06] = "evt_descriptor_read", [0x07] = "evt_data_read",
- [0x08] = "evt_data_write", [0x09] = "evt_bus_reset",
- [0x0a] = "evt_timeout", [0x0b] = "evt_tcode_err",
- [0x0c] = "-reserved-", [0x0d] = "-reserved-",
- [0x0e] = "evt_unknown", [0x0f] = "evt_flushed",
- [0x10] = "-reserved-", [0x11] = "ack_complete",
- [0x12] = "ack_pending ", [0x13] = "-reserved-",
- [0x14] = "ack_busy_X", [0x15] = "ack_busy_A",
- [0x16] = "ack_busy_B", [0x17] = "-reserved-",
- [0x18] = "-reserved-", [0x19] = "-reserved-",
- [0x1a] = "-reserved-", [0x1b] = "ack_tardy",
- [0x1c] = "-reserved-", [0x1d] = "ack_data_error",
- [0x1e] = "ack_type_error", [0x1f] = "-reserved-",
- [0x20] = "pending/cancelled",
-};
-
-static void log_ar_at_event(struct fw_ohci *ohci,
- char dir, int speed, u32 *header, int evt)
-{
- static const char *const tcodes[] = {
- [TCODE_WRITE_QUADLET_REQUEST] = "QW req",
- [TCODE_WRITE_BLOCK_REQUEST] = "BW req",
- [TCODE_WRITE_RESPONSE] = "W resp",
- [0x3] = "-reserved-",
- [TCODE_READ_QUADLET_REQUEST] = "QR req",
- [TCODE_READ_BLOCK_REQUEST] = "BR req",
- [TCODE_READ_QUADLET_RESPONSE] = "QR resp",
- [TCODE_READ_BLOCK_RESPONSE] = "BR resp",
- [TCODE_CYCLE_START] = "cycle start",
- [TCODE_LOCK_REQUEST] = "Lk req",
- [TCODE_STREAM_DATA] = "async stream packet",
- [TCODE_LOCK_RESPONSE] = "Lk resp",
- [0xc] = "-reserved-",
- [0xd] = "-reserved-",
- [TCODE_LINK_INTERNAL] = "link internal",
- [0xf] = "-reserved-",
- };
- int tcode = async_header_get_tcode(header);
- char specific[12];
-
- if (likely(!(param_debug & OHCI_PARAM_DEBUG_AT_AR)))
- return;
-
- if (unlikely(evt >= ARRAY_SIZE(evts)))
- evt = 0x1f;
-
- if (evt == OHCI1394_evt_bus_reset) {
- ohci_notice(ohci, "A%c evt_bus_reset, generation %d\n",
- dir, (header[2] >> 16) & 0xff);
- return;
- }
-
- switch (tcode) {
- case TCODE_WRITE_QUADLET_REQUEST:
- case TCODE_READ_QUADLET_RESPONSE:
- case TCODE_CYCLE_START:
- snprintf(specific, sizeof(specific), " = %08x",
- be32_to_cpu((__force __be32)header[3]));
- break;
- case TCODE_WRITE_BLOCK_REQUEST:
- case TCODE_READ_BLOCK_REQUEST:
- case TCODE_READ_BLOCK_RESPONSE:
- case TCODE_LOCK_REQUEST:
- case TCODE_LOCK_RESPONSE:
- snprintf(specific, sizeof(specific), " %x,%x",
- async_header_get_data_length(header),
- async_header_get_extended_tcode(header));
- break;
- default:
- specific[0] = '\0';
- }
-
- switch (tcode) {
- case TCODE_STREAM_DATA:
- ohci_notice(ohci, "A%c %s, %s\n",
- dir, evts[evt], tcodes[tcode]);
- break;
- case TCODE_LINK_INTERNAL:
- ohci_notice(ohci, "A%c %s, PHY %08x %08x\n",
- dir, evts[evt], header[1], header[2]);
- break;
- case TCODE_WRITE_QUADLET_REQUEST:
- case TCODE_WRITE_BLOCK_REQUEST:
- case TCODE_READ_QUADLET_REQUEST:
- case TCODE_READ_BLOCK_REQUEST:
- case TCODE_LOCK_REQUEST:
- ohci_notice(ohci,
- "A%c spd %x tl %02x, %04x -> %04x, %s, %s, %012llx%s\n",
- dir, speed, async_header_get_tlabel(header),
- async_header_get_source(header), async_header_get_destination(header),
- evts[evt], tcodes[tcode], async_header_get_offset(header), specific);
- break;
- default:
- ohci_notice(ohci,
- "A%c spd %x tl %02x, %04x -> %04x, %s, %s%s\n",
- dir, speed, async_header_get_tlabel(header),
- async_header_get_source(header), async_header_get_destination(header),
- evts[evt], tcodes[tcode], specific);
- }
-}
-
static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data)
{
writel(data, ohci->registers + offset);
@@ -954,8 +739,6 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
p.timestamp = status & 0xffff;
p.generation = ohci->request_generation;
- log_ar_at_event(ohci, 'R', p.speed, p.header, evt);
-
/*
* Several controllers, notably from NEC and VIA, forget to
* write ack_complete status at PHY packet reception.
@@ -974,7 +757,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
*
* Alas some chips sometimes emit bus reset packets with a
* wrong generation. We set the correct generation for these
- * at a slightly incorrect time (in bus_reset_work).
+ * at a slightly incorrect time (in handle_selfid_complete_event).
*/
if (evt == OHCI1394_evt_bus_reset) {
if (!(ohci->quirks & QUIRK_RESET_PACKET))
@@ -1016,9 +799,9 @@ static void ar_recycle_buffers(struct ar_context *ctx, unsigned int end_buffer)
}
}
-static void ar_context_tasklet(unsigned long data)
+static void ohci_ar_context_work(struct work_struct *work)
{
- struct ar_context *ctx = (struct ar_context *)data;
+ struct ar_context *ctx = from_work(ctx, work, work);
unsigned int end_buffer_index, end_buffer_offset;
void *p, *end;
@@ -1026,23 +809,19 @@ static void ar_context_tasklet(unsigned long data)
if (!p)
return;
- end_buffer_index = ar_search_last_active_buffer(ctx,
- &end_buffer_offset);
+ end_buffer_index = ar_search_last_active_buffer(ctx, &end_buffer_offset);
ar_sync_buffers_for_cpu(ctx, end_buffer_index, end_buffer_offset);
end = ctx->buffer + end_buffer_index * PAGE_SIZE + end_buffer_offset;
if (end_buffer_index < ar_first_buffer_index(ctx)) {
- /*
- * The filled part of the overall buffer wraps around; handle
- * all packets up to the buffer end here. If the last packet
- * wraps around, its tail will be visible after the buffer end
- * because the buffer start pages are mapped there again.
- */
+ // The filled part of the overall buffer wraps around; handle all packets up to the
+ // buffer end here. If the last packet wraps around, its tail will be visible after
+ // the buffer end because the buffer start pages are mapped there again.
void *buffer_end = ctx->buffer + AR_BUFFERS * PAGE_SIZE;
p = handle_ar_packets(ctx, p, buffer_end);
if (p < buffer_end)
goto error;
- /* adjust p to point back into the actual buffer */
+ // adjust p to point back into the actual buffer
p -= AR_BUFFERS * PAGE_SIZE;
}
@@ -1057,7 +836,6 @@ static void ar_context_tasklet(unsigned long data)
ar_recycle_buffers(ctx, end_buffer_index);
return;
-
error:
ctx->pointer = NULL;
}
@@ -1073,7 +851,7 @@ static int ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci,
ctx->regs = regs;
ctx->ohci = ohci;
- tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx);
+ INIT_WORK(&ctx->work, ohci_ar_context_work);
for (i = 0; i < AR_BUFFERS; i++) {
ctx->pages[i] = dma_alloc_pages(dev, PAGE_SIZE, &dma_addr,
@@ -1181,16 +959,16 @@ static void context_retire_descriptors(struct context *ctx)
}
}
-static void context_tasklet(unsigned long data)
+static void ohci_at_context_work(struct work_struct *work)
{
- struct context *ctx = (struct context *) data;
+ struct at_context *ctx = from_work(ctx, work, work);
- context_retire_descriptors(ctx);
+ context_retire_descriptors(&ctx->context);
}
static void ohci_isoc_context_work(struct work_struct *work)
{
- struct fw_iso_context *base = container_of(work, struct fw_iso_context, work);
+ struct fw_iso_context *base = from_work(base, work, work);
struct iso_context *isoc_ctx = container_of(base, struct iso_context, base);
context_retire_descriptors(&isoc_ctx->context);
@@ -1248,7 +1026,6 @@ static int context_init(struct context *ctx, struct fw_ohci *ohci,
ctx->buffer_tail = list_entry(ctx->buffer_list.next,
struct descriptor_buffer, list);
- tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx);
ctx->callback = callback;
/*
@@ -1384,21 +1161,21 @@ struct driver_data {
};
/*
- * This function apppends a packet to the DMA queue for transmission.
+ * This function appends a packet to the DMA queue for transmission.
* Must always be called with the ochi->lock held to ensure proper
* generation handling and locking around packet queue manipulation.
*/
-static int at_context_queue_packet(struct context *ctx,
- struct fw_packet *packet)
+static int at_context_queue_packet(struct at_context *ctx, struct fw_packet *packet)
{
- struct fw_ohci *ohci = ctx->ohci;
+ struct context *context = &ctx->context;
+ struct fw_ohci *ohci = context->ohci;
dma_addr_t d_bus, payload_bus;
struct driver_data *driver_data;
struct descriptor *d, *last;
__le32 *header;
int z, tcode;
- d = context_get_descriptors(ctx, 4, &d_bus);
+ d = context_get_descriptors(context, 4, &d_bus);
if (d == NULL) {
packet->ack = RCODE_SEND_ERROR;
return -1;
@@ -1428,7 +1205,7 @@ static int at_context_queue_packet(struct context *ctx,
ohci1394_at_data_set_destination_id(header,
async_header_get_destination(packet->header));
- if (ctx == &ctx->ohci->at_response_ctx) {
+ if (ctx == &ohci->at_response_ctx) {
ohci1394_at_data_set_rcode(header, async_header_get_rcode(packet->header));
} else {
ohci1394_at_data_set_destination_offset(header,
@@ -1517,37 +1294,42 @@ static int at_context_queue_packet(struct context *ctx,
return -1;
}
- context_append(ctx, d, z, 4 - z);
+ context_append(context, d, z, 4 - z);
- if (ctx->running)
- reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
+ if (context->running)
+ reg_write(ohci, CONTROL_SET(context->regs), CONTEXT_WAKE);
else
- context_run(ctx, 0);
+ context_run(context, 0);
return 0;
}
-static void at_context_flush(struct context *ctx)
+static void at_context_flush(struct at_context *ctx)
{
- tasklet_disable(&ctx->tasklet);
+ // Avoid dead lock due to programming mistake.
+ if (WARN_ON_ONCE(current_work() == &ctx->work))
+ return;
+
+ disable_work_sync(&ctx->work);
- ctx->flushing = true;
- context_tasklet((unsigned long)ctx);
- ctx->flushing = false;
+ WRITE_ONCE(ctx->flushing, true);
+ ohci_at_context_work(&ctx->work);
+ WRITE_ONCE(ctx->flushing, false);
- tasklet_enable(&ctx->tasklet);
+ enable_work(&ctx->work);
}
static int handle_at_packet(struct context *context,
struct descriptor *d,
struct descriptor *last)
{
+ struct at_context *ctx = container_of(context, struct at_context, context);
+ struct fw_ohci *ohci = ctx->context.ohci;
struct driver_data *driver_data;
struct fw_packet *packet;
- struct fw_ohci *ohci = context->ohci;
int evt;
- if (last->transfer_status == 0 && !context->flushing)
+ if (last->transfer_status == 0 && !READ_ONCE(ctx->flushing))
/* This descriptor isn't done yet, stop iteration. */
return 0;
@@ -1564,8 +1346,6 @@ static int handle_at_packet(struct context *context,
evt = le16_to_cpu(last->transfer_status) & 0x1f;
packet->timestamp = le16_to_cpu(last->res_count);
- log_ar_at_event(ohci, 'T', packet->speed, packet->header, evt);
-
switch (evt) {
case OHCI1394_evt_timeout:
/* Async response transmit timed out. */
@@ -1581,7 +1361,7 @@ static int handle_at_packet(struct context *context,
break;
case OHCI1394_evt_missing_ack:
- if (context->flushing)
+ if (READ_ONCE(ctx->flushing))
packet->ack = RCODE_GENERATION;
else {
/*
@@ -1603,7 +1383,7 @@ static int handle_at_packet(struct context *context,
break;
case OHCI1394_evt_no_status:
- if (context->flushing) {
+ if (READ_ONCE(ctx->flushing)) {
packet->ack = RCODE_GENERATION;
break;
}
@@ -1700,13 +1480,14 @@ static void handle_local_lock(struct fw_ohci *ohci,
fw_core_handle_response(&ohci->card, &response);
}
-static void handle_local_request(struct context *ctx, struct fw_packet *packet)
+static void handle_local_request(struct at_context *ctx, struct fw_packet *packet)
{
+ struct fw_ohci *ohci = ctx->context.ohci;
u64 offset, csr;
- if (ctx == &ctx->ohci->at_request_ctx) {
+ if (ctx == &ohci->at_request_ctx) {
packet->ack = ACK_PENDING;
- packet->callback(packet, &ctx->ohci->card, packet->ack);
+ packet->callback(packet, &ohci->card, packet->ack);
}
offset = async_header_get_offset(packet->header);
@@ -1714,60 +1495,80 @@ static void handle_local_request(struct context *ctx, struct fw_packet *packet)
/* Handle config rom reads. */
if (csr >= CSR_CONFIG_ROM && csr < CSR_CONFIG_ROM_END)
- handle_local_rom(ctx->ohci, packet, csr);
+ handle_local_rom(ohci, packet, csr);
else switch (csr) {
case CSR_BUS_MANAGER_ID:
case CSR_BANDWIDTH_AVAILABLE:
case CSR_CHANNELS_AVAILABLE_HI:
case CSR_CHANNELS_AVAILABLE_LO:
- handle_local_lock(ctx->ohci, packet, csr);
+ handle_local_lock(ohci, packet, csr);
break;
default:
- if (ctx == &ctx->ohci->at_request_ctx)
- fw_core_handle_request(&ctx->ohci->card, packet);
+ if (ctx == &ohci->at_request_ctx)
+ fw_core_handle_request(&ohci->card, packet);
else
- fw_core_handle_response(&ctx->ohci->card, packet);
+ fw_core_handle_response(&ohci->card, packet);
break;
}
- if (ctx == &ctx->ohci->at_response_ctx) {
+ if (ctx == &ohci->at_response_ctx) {
packet->ack = ACK_COMPLETE;
- packet->callback(packet, &ctx->ohci->card, packet->ack);
+ packet->callback(packet, &ohci->card, packet->ack);
}
}
-static void at_context_transmit(struct context *ctx, struct fw_packet *packet)
+static void at_context_transmit(struct at_context *ctx, struct fw_packet *packet)
{
+ struct fw_ohci *ohci = ctx->context.ohci;
unsigned long flags;
int ret;
- spin_lock_irqsave(&ctx->ohci->lock, flags);
+ spin_lock_irqsave(&ohci->lock, flags);
- if (async_header_get_destination(packet->header) == ctx->ohci->node_id &&
- ctx->ohci->generation == packet->generation) {
- spin_unlock_irqrestore(&ctx->ohci->lock, flags);
+ if (async_header_get_destination(packet->header) == ohci->node_id &&
+ ohci->generation == packet->generation) {
+ spin_unlock_irqrestore(&ohci->lock, flags);
// Timestamping on behalf of the hardware.
- packet->timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ctx->ohci));
+ packet->timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ohci));
handle_local_request(ctx, packet);
return;
}
ret = at_context_queue_packet(ctx, packet);
- spin_unlock_irqrestore(&ctx->ohci->lock, flags);
+ spin_unlock_irqrestore(&ohci->lock, flags);
if (ret < 0) {
// Timestamping on behalf of the hardware.
- packet->timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ctx->ohci));
+ packet->timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ohci));
- packet->callback(packet, &ctx->ohci->card, packet->ack);
+ packet->callback(packet, &ohci->card, packet->ack);
}
}
static void detect_dead_context(struct fw_ohci *ohci,
const char *name, unsigned int regs)
{
+ static const char *const evts[] = {
+ [0x00] = "evt_no_status", [0x01] = "-reserved-",
+ [0x02] = "evt_long_packet", [0x03] = "evt_missing_ack",
+ [0x04] = "evt_underrun", [0x05] = "evt_overrun",
+ [0x06] = "evt_descriptor_read", [0x07] = "evt_data_read",
+ [0x08] = "evt_data_write", [0x09] = "evt_bus_reset",
+ [0x0a] = "evt_timeout", [0x0b] = "evt_tcode_err",
+ [0x0c] = "-reserved-", [0x0d] = "-reserved-",
+ [0x0e] = "evt_unknown", [0x0f] = "evt_flushed",
+ [0x10] = "-reserved-", [0x11] = "ack_complete",
+ [0x12] = "ack_pending ", [0x13] = "-reserved-",
+ [0x14] = "ack_busy_X", [0x15] = "ack_busy_A",
+ [0x16] = "ack_busy_B", [0x17] = "-reserved-",
+ [0x18] = "-reserved-", [0x19] = "-reserved-",
+ [0x1a] = "-reserved-", [0x1b] = "ack_tardy",
+ [0x1c] = "-reserved-", [0x1d] = "ack_data_error",
+ [0x1e] = "ack_type_error", [0x1f] = "-reserved-",
+ [0x20] = "pending/cancelled",
+ };
u32 ctl;
ctl = reg_read(ohci, CONTROL_SET(regs));
@@ -2026,10 +1827,9 @@ static int find_and_insert_self_id(struct fw_ohci *ohci, int self_id_count)
return self_id_count;
}
-static void bus_reset_work(struct work_struct *work)
+static irqreturn_t handle_selfid_complete_event(int irq, void *data)
{
- struct fw_ohci *ohci =
- container_of(work, struct fw_ohci, bus_reset_work);
+ struct fw_ohci *ohci = data;
int self_id_count, generation, new_generation, i, j;
u32 reg, quadlet;
void *free_rom = NULL;
@@ -2040,11 +1840,11 @@ static void bus_reset_work(struct work_struct *work)
if (!(reg & OHCI1394_NodeID_idValid)) {
ohci_notice(ohci,
"node ID not valid, new bus reset in progress\n");
- return;
+ goto end;
}
if ((reg & OHCI1394_NodeID_nodeNumber) == 63) {
ohci_notice(ohci, "malconfigured bus\n");
- return;
+ goto end;
}
ohci->node_id = reg & (OHCI1394_NodeID_busNumber |
OHCI1394_NodeID_nodeNumber);
@@ -2058,8 +1858,11 @@ static void bus_reset_work(struct work_struct *work)
reg = reg_read(ohci, OHCI1394_SelfIDCount);
if (ohci1394_self_id_count_is_error(reg)) {
ohci_notice(ohci, "self ID receive error\n");
- return;
+ goto end;
}
+
+ trace_self_id_complete(ohci->card.index, reg, ohci->self_id, has_be_header_quirk(ohci));
+
/*
* The count in the SelfIDCount register is the number of
* bytes in the self ID receive buffer. Since we also receive
@@ -2070,7 +1873,7 @@ static void bus_reset_work(struct work_struct *work)
if (self_id_count > 252) {
ohci_notice(ohci, "bad selfIDSize (%08x)\n", reg);
- return;
+ goto end;
}
quadlet = cond_le32_to_cpu(ohci->self_id[0], has_be_header_quirk(ohci));
@@ -2097,7 +1900,7 @@ static void bus_reset_work(struct work_struct *work)
ohci_notice(ohci, "bad self ID %d/%d (%08x != ~%08x)\n",
j, self_id_count, id, id2);
- return;
+ goto end;
}
ohci->self_id_buffer[j] = id;
}
@@ -2107,13 +1910,13 @@ static void bus_reset_work(struct work_struct *work)
if (self_id_count < 0) {
ohci_notice(ohci,
"could not construct local self ID\n");
- return;
+ goto end;
}
}
if (self_id_count == 0) {
ohci_notice(ohci, "no self IDs\n");
- return;
+ goto end;
}
rmb();
@@ -2135,14 +1938,14 @@ static void bus_reset_work(struct work_struct *work)
new_generation = ohci1394_self_id_count_get_generation(reg);
if (new_generation != generation) {
ohci_notice(ohci, "new bus reset, discarding self ids\n");
- return;
+ goto end;
}
// FIXME: Document how the locking works.
scoped_guard(spinlock_irq, &ohci->lock) {
ohci->generation = -1; // prevent AT packet queueing
- context_stop(&ohci->at_request_ctx);
- context_stop(&ohci->at_response_ctx);
+ context_stop(&ohci->at_request_ctx.context);
+ context_stop(&ohci->at_response_ctx.context);
}
/*
@@ -2192,12 +1995,12 @@ static void bus_reset_work(struct work_struct *work)
if (free_rom)
dmam_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, free_rom, free_rom_bus);
- log_selfids(ohci, generation, self_id_count);
-
fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation,
self_id_count, ohci->self_id_buffer,
ohci->csr_state_setclear_abdicate);
ohci->csr_state_setclear_abdicate = false;
+end:
+ return IRQ_HANDLED;
}
static irqreturn_t irq_handler(int irq, void *data)
@@ -2211,11 +2014,6 @@ static irqreturn_t irq_handler(int irq, void *data)
if (!event || !~event)
return IRQ_NONE;
- if (unlikely(param_debug > 0)) {
- dev_notice_ratelimited(ohci->card.device,
- "The debug parameter is superceded by tracepoints events, and deprecated.");
- }
-
/*
* busReset and postedWriteErr events must not be cleared yet
* (OHCI 1.1 clauses 7.2.3.2 and 13.2.8.1)
@@ -2223,32 +2021,22 @@ static irqreturn_t irq_handler(int irq, void *data)
reg_write(ohci, OHCI1394_IntEventClear,
event & ~(OHCI1394_busReset | OHCI1394_postedWriteErr));
trace_irqs(ohci->card.index, event);
- log_irqs(ohci, event);
- // The flag is masked again at bus_reset_work() scheduled by selfID event.
+
+ // The flag is masked again at handle_selfid_complete_event() scheduled by selfID event.
if (event & OHCI1394_busReset)
reg_write(ohci, OHCI1394_IntMaskClear, OHCI1394_busReset);
- if (event & OHCI1394_selfIDComplete) {
- if (trace_self_id_complete_enabled()) {
- u32 reg = reg_read(ohci, OHCI1394_SelfIDCount);
-
- trace_self_id_complete(ohci->card.index, reg, ohci->self_id,
- has_be_header_quirk(ohci));
- }
- queue_work(selfid_workqueue, &ohci->bus_reset_work);
- }
-
if (event & OHCI1394_RQPkt)
- tasklet_schedule(&ohci->ar_request_ctx.tasklet);
+ queue_work(ohci->card.async_wq, &ohci->ar_request_ctx.work);
if (event & OHCI1394_RSPkt)
- tasklet_schedule(&ohci->ar_response_ctx.tasklet);
+ queue_work(ohci->card.async_wq, &ohci->ar_response_ctx.work);
if (event & OHCI1394_reqTxComplete)
- tasklet_schedule(&ohci->at_request_ctx.tasklet);
+ queue_work(ohci->card.async_wq, &ohci->at_request_ctx.work);
if (event & OHCI1394_respTxComplete)
- tasklet_schedule(&ohci->at_response_ctx.tasklet);
+ queue_work(ohci->card.async_wq, &ohci->at_response_ctx.work);
if (event & OHCI1394_isochRx) {
iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear);
@@ -2308,7 +2096,10 @@ static irqreturn_t irq_handler(int irq, void *data)
} else
flush_writes(ohci);
- return IRQ_HANDLED;
+ if (event & OHCI1394_selfIDComplete)
+ return IRQ_WAKE_THREAD;
+ else
+ return IRQ_HANDLED;
}
static int software_reset(struct fw_ohci *ohci)
@@ -2528,7 +2319,7 @@ static int ohci_enable(struct fw_card *card,
* They shouldn't do that in this initial case where the link
* isn't enabled. This means we have to use the same
* workaround here, setting the bus header to 0 and then write
- * the right values in the bus reset tasklet.
+ * the right values in the bus reset work item.
*/
if (config_rom) {
@@ -2614,14 +2405,14 @@ static int ohci_set_config_rom(struct fw_card *card,
* ConfigRomHeader and BusOptions doesn't honor the
* noByteSwapData bit, so with a be32 config rom, the
* controller will load be32 values in to these registers
- * during the atomic update, even on litte endian
+ * during the atomic update, even on little endian
* architectures. The workaround we use is to put a 0 in the
* header quadlet; 0 is endian agnostic and means that the
- * config rom isn't ready yet. In the bus reset tasklet we
+ * config rom isn't ready yet. In the bus reset work item we
* then set up the real values for the two registers.
*
* We use ohci->lock to avoid racing with the code that sets
- * ohci->next_config_rom to NULL (see bus_reset_work).
+ * ohci->next_config_rom to NULL (see handle_selfid_complete_event).
*/
next_config_rom = dmam_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
@@ -2659,7 +2450,7 @@ static int ohci_set_config_rom(struct fw_card *card,
/*
* Now initiate a bus reset to have the changes take
* effect. We clean up the old config rom memory and DMA
- * mappings in the bus reset tasklet, since the OHCI
+ * mappings in the bus reset work item, since the OHCI
* controller could need to access it before the bus reset
* takes effect.
*/
@@ -2686,11 +2477,14 @@ static void ohci_send_response(struct fw_card *card, struct fw_packet *packet)
static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
{
struct fw_ohci *ohci = fw_ohci(card);
- struct context *ctx = &ohci->at_request_ctx;
+ struct at_context *ctx = &ohci->at_request_ctx;
struct driver_data *driver_data = packet->driver_data;
int ret = -ENOENT;
- tasklet_disable_in_atomic(&ctx->tasklet);
+ // Avoid dead lock due to programming mistake.
+ if (WARN_ON_ONCE(current_work() == &ctx->work))
+ return 0;
+ disable_work_sync(&ctx->work);
if (packet->ack != 0)
goto out;
@@ -2699,7 +2493,6 @@ static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
dma_unmap_single(ohci->card.device, packet->payload_bus,
packet->payload_length, DMA_TO_DEVICE);
- log_ar_at_event(ohci, 'T', packet->speed, packet->header, 0x20);
driver_data->packet = NULL;
packet->ack = RCODE_CANCELLED;
@@ -2709,7 +2502,7 @@ static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
packet->callback(packet, &ohci->card, packet->ack);
ret = 0;
out:
- tasklet_enable(&ctx->tasklet);
+ enable_work(&ctx->work);
return ret;
}
@@ -3301,8 +3094,7 @@ static int ohci_set_iso_channels(struct fw_iso_context *base, u64 *channels)
}
}
-#ifdef CONFIG_PM
-static void ohci_resume_iso_dma(struct fw_ohci *ohci)
+static void __maybe_unused ohci_resume_iso_dma(struct fw_ohci *ohci)
{
int i;
struct iso_context *ctx;
@@ -3319,7 +3111,6 @@ static void ohci_resume_iso_dma(struct fw_ohci *ohci)
ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags);
}
}
-#endif
static int queue_iso_transmit(struct iso_context *ctx,
struct fw_iso_packet *packet,
@@ -3691,7 +3482,6 @@ static int pci_probe(struct pci_dev *dev,
u32 bus_options, max_receive, link_speed, version;
u64 guid;
int i, flags, irq, err;
- size_t size;
if (dev->vendor == PCI_VENDOR_ID_PINNACLE_SYSTEMS) {
dev_err(&dev->dev, "Pinnacle MovieBoard is not yet supported\n");
@@ -3718,20 +3508,17 @@ static int pci_probe(struct pci_dev *dev,
spin_lock_init(&ohci->lock);
mutex_init(&ohci->phy_reg_mutex);
- INIT_WORK(&ohci->bus_reset_work, bus_reset_work);
-
if (!(pci_resource_flags(dev, 0) & IORESOURCE_MEM) ||
pci_resource_len(dev, 0) < OHCI1394_REGISTER_SIZE) {
ohci_err(ohci, "invalid MMIO resource\n");
return -ENXIO;
}
- err = pcim_iomap_regions(dev, 1 << 0, ohci_driver_name);
- if (err) {
+ ohci->registers = pcim_iomap_region(dev, 0, ohci_driver_name);
+ if (IS_ERR(ohci->registers)) {
ohci_err(ohci, "request and map MMIO resource unavailable\n");
return -ENXIO;
}
- ohci->registers = pcim_iomap_table(dev)[0];
for (i = 0; i < ARRAY_SIZE(ohci_quirks); i++)
if ((ohci_quirks[i].vendor == dev->vendor) &&
@@ -3770,15 +3557,17 @@ static int pci_probe(struct pci_dev *dev,
if (err < 0)
return err;
- err = context_init(&ohci->at_request_ctx, ohci,
+ err = context_init(&ohci->at_request_ctx.context, ohci,
OHCI1394_AsReqTrContextControlSet, handle_at_packet);
if (err < 0)
return err;
+ INIT_WORK(&ohci->at_request_ctx.work, ohci_at_context_work);
- err = context_init(&ohci->at_response_ctx, ohci,
+ err = context_init(&ohci->at_response_ctx.context, ohci,
OHCI1394_AsRspTrContextControlSet, handle_at_packet);
if (err < 0)
return err;
+ INIT_WORK(&ohci->at_response_ctx.work, ohci_at_context_work);
reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);
ohci->ir_context_channels = ~0ULL;
@@ -3786,8 +3575,7 @@ static int pci_probe(struct pci_dev *dev,
reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0);
ohci->ir_context_mask = ohci->ir_context_support;
ohci->n_ir = hweight32(ohci->ir_context_mask);
- size = sizeof(struct iso_context) * ohci->n_ir;
- ohci->ir_context_list = devm_kzalloc(&dev->dev, size, GFP_KERNEL);
+ ohci->ir_context_list = devm_kcalloc(&dev->dev, ohci->n_ir, sizeof(struct iso_context), GFP_KERNEL);
if (!ohci->ir_context_list)
return -ENOMEM;
@@ -3801,8 +3589,7 @@ static int pci_probe(struct pci_dev *dev,
reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
ohci->it_context_mask = ohci->it_context_support;
ohci->n_it = hweight32(ohci->it_context_mask);
- size = sizeof(struct iso_context) * ohci->n_it;
- ohci->it_context_list = devm_kzalloc(&dev->dev, size, GFP_KERNEL);
+ ohci->it_context_list = devm_kcalloc(&dev->dev, ohci->n_it, sizeof(struct iso_context), GFP_KERNEL);
if (!ohci->it_context_list)
return -ENOMEM;
@@ -3827,7 +3614,9 @@ static int pci_probe(struct pci_dev *dev,
goto fail_msi;
}
- err = request_threaded_irq(irq, irq_handler, NULL,
+ // IRQF_ONESHOT is not applied so that any events are handled in the hardIRQ handler during
+ // invoking the threaded IRQ handler for SelfIDComplete event.
+ err = request_threaded_irq(irq, irq_handler, handle_selfid_complete_event,
pci_dev_msi_enabled(dev) ? 0 : IRQF_SHARED, ohci_driver_name,
ohci);
if (err < 0) {
@@ -3871,7 +3660,6 @@ static void pci_remove(struct pci_dev *dev)
reg_write(ohci, OHCI1394_IntMaskClear, ~0);
flush_writes(ohci);
}
- cancel_work_sync(&ohci->bus_reset_work);
fw_core_remove_card(&ohci->card);
/*
@@ -3889,39 +3677,25 @@ static void pci_remove(struct pci_dev *dev)
dev_notice(&dev->dev, "removing fw-ohci device\n");
}
-#ifdef CONFIG_PM
-static int pci_suspend(struct pci_dev *dev, pm_message_t state)
+static int __maybe_unused pci_suspend(struct device *dev)
{
- struct fw_ohci *ohci = pci_get_drvdata(dev);
- int err;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct fw_ohci *ohci = pci_get_drvdata(pdev);
software_reset(ohci);
- err = pci_save_state(dev);
- if (err) {
- ohci_err(ohci, "pci_save_state failed\n");
- return err;
- }
- err = pci_set_power_state(dev, pci_choose_state(dev, state));
- if (err)
- ohci_err(ohci, "pci_set_power_state failed with %d\n", err);
- pmac_ohci_off(dev);
+ pmac_ohci_off(pdev);
return 0;
}
-static int pci_resume(struct pci_dev *dev)
+
+static int __maybe_unused pci_resume(struct device *dev)
{
- struct fw_ohci *ohci = pci_get_drvdata(dev);
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct fw_ohci *ohci = pci_get_drvdata(pdev);
int err;
- pmac_ohci_on(dev);
- pci_set_power_state(dev, PCI_D0);
- pci_restore_state(dev);
- err = pci_enable_device(dev);
- if (err) {
- ohci_err(ohci, "pci_enable_device failed\n");
- return err;
- }
+ pmac_ohci_on(pdev);
/* Some systems don't setup GUID register on resume from ram */
if (!reg_read(ohci, OHCI1394_GUIDLo) &&
@@ -3938,7 +3712,6 @@ static int pci_resume(struct pci_dev *dev)
return 0;
}
-#endif
static const struct pci_device_id pci_table[] = {
{ PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) },
@@ -3947,30 +3720,24 @@ static const struct pci_device_id pci_table[] = {
MODULE_DEVICE_TABLE(pci, pci_table);
+static SIMPLE_DEV_PM_OPS(pci_pm_ops, pci_suspend, pci_resume);
+
static struct pci_driver fw_ohci_pci_driver = {
.name = ohci_driver_name,
.id_table = pci_table,
.probe = pci_probe,
.remove = pci_remove,
-#ifdef CONFIG_PM
- .resume = pci_resume,
- .suspend = pci_suspend,
-#endif
+ .driver.pm = &pci_pm_ops,
};
static int __init fw_ohci_init(void)
{
- selfid_workqueue = alloc_workqueue(KBUILD_MODNAME, WQ_MEM_RECLAIM, 0);
- if (!selfid_workqueue)
- return -ENOMEM;
-
return pci_register_driver(&fw_ohci_pci_driver);
}
static void __exit fw_ohci_cleanup(void)
{
pci_unregister_driver(&fw_ohci_pci_driver);
- destroy_workqueue(selfid_workqueue);
}
module_init(fw_ohci_init);
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index 827dee0f57dd..1a19828114cf 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -1490,7 +1490,7 @@ static int sbp2_scsi_queuecommand(struct Scsi_Host *shost,
return retval;
}
-static int sbp2_scsi_slave_alloc(struct scsi_device *sdev)
+static int sbp2_scsi_sdev_init(struct scsi_device *sdev)
{
struct sbp2_logical_unit *lu = sdev->hostdata;
@@ -1506,8 +1506,8 @@ static int sbp2_scsi_slave_alloc(struct scsi_device *sdev)
return 0;
}
-static int sbp2_scsi_device_configure(struct scsi_device *sdev,
- struct queue_limits *lim)
+static int sbp2_scsi_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct sbp2_logical_unit *lu = sdev->hostdata;
@@ -1590,8 +1590,8 @@ static const struct scsi_host_template scsi_driver_template = {
.name = "SBP-2 IEEE-1394",
.proc_name = "sbp2",
.queuecommand = sbp2_scsi_queuecommand,
- .slave_alloc = sbp2_scsi_slave_alloc,
- .device_configure = sbp2_scsi_device_configure,
+ .sdev_init = sbp2_scsi_sdev_init,
+ .sdev_configure = sbp2_scsi_sdev_configure,
.eh_abort_handler = sbp2_scsi_abort,
.this_id = -1,
.sg_tablesize = SG_ALL,
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 71d8b26c4103..bbd2155d8483 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -31,7 +31,6 @@ config ARM_SCPI_PROTOCOL
config ARM_SDE_INTERFACE
bool "ARM Software Delegated Exception Interface (SDEI)"
depends on ARM64
- depends on ACPI_APEI_GHES
help
The Software Delegated Exception Interface (SDEI) is an ARM
standard for registering callbacks from the platform firmware
@@ -106,7 +105,7 @@ config ISCSI_IBFT
select ISCSI_BOOT_SYSFS
select ISCSI_IBFT_FIND if X86
depends on ACPI && SCSI && SCSI_LOWLEVEL
- default n
+ default n
help
This option enables support for detection and exposing of iSCSI
Boot Firmware Table (iBFT) via sysfs to userspace. If you wish to
@@ -212,9 +211,20 @@ config SYSFB_SIMPLEFB
If unsure, say Y.
+config TH1520_AON_PROTOCOL
+ tristate "Always-On firmware protocol"
+ depends on ARCH_THEAD || COMPILE_TEST
+ depends on MAILBOX
+ help
+ Power, clock, and resource management capabilities on the TH1520 SoC are
+ managed by the E902 core. Firmware running on this core communicates with
+ the kernel through the Always-On protocol, using hardware mailbox as a medium.
+ Say yes if you need such capabilities.
+
config TI_SCI_PROTOCOL
tristate "TI System Control Interface (TISCI) Message Protocol"
depends on TI_MESSAGE_MANAGER
+ default ARCH_K3
help
TI System Control Interface (TISCI) Message Protocol is used to manage
compute systems such as ARM, DSP etc with the system controller in
@@ -257,6 +267,23 @@ config TURRIS_MOX_RWTM
other manufacturing data and also utilize the Entropy Bit Generator
for hardware random number generation.
+if TURRIS_MOX_RWTM
+
+config TURRIS_MOX_RWTM_KEYCTL
+ bool "Turris Mox rWTM ECDSA message signing"
+ default y
+ depends on KEYS
+ depends on ASYMMETRIC_KEY_TYPE
+ select CZNIC_PLATFORMS
+ select TURRIS_SIGNING_KEY
+ help
+ Say Y here to add support for ECDSA message signing with board private
+ key (each Turris Mox has an ECDSA private key generated in the secure
+ coprocessor when manufactured). This functionality is exposed via the
+ keyctl() syscall.
+
+endif # TURRIS_MOX_RWTM
+
source "drivers/firmware/arm_ffa/Kconfig"
source "drivers/firmware/broadcom/Kconfig"
source "drivers/firmware/cirrus/Kconfig"
@@ -267,6 +294,7 @@ source "drivers/firmware/meson/Kconfig"
source "drivers/firmware/microchip/Kconfig"
source "drivers/firmware/psci/Kconfig"
source "drivers/firmware/qcom/Kconfig"
+source "drivers/firmware/samsung/Kconfig"
source "drivers/firmware/smccc/Kconfig"
source "drivers/firmware/tegra/Kconfig"
source "drivers/firmware/xilinx/Kconfig"
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 7a8d486e718f..4ddec2820c96 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_RASPBERRYPI_FIRMWARE) += raspberrypi.o
obj-$(CONFIG_FW_CFG_SYSFS) += qemu_fw_cfg.o
obj-$(CONFIG_SYSFB) += sysfb.o
obj-$(CONFIG_SYSFB_SIMPLEFB) += sysfb_simplefb.o
+obj-$(CONFIG_TH1520_AON_PROTOCOL) += thead,th1520-aon.o
obj-$(CONFIG_TI_SCI_PROTOCOL) += ti_sci.o
obj-$(CONFIG_TRUSTED_FOUNDATIONS) += trusted_foundations.o
obj-$(CONFIG_TURRIS_MOX_RWTM) += turris-mox-rwtm.o
@@ -33,6 +34,7 @@ obj-y += efi/
obj-y += imx/
obj-y += psci/
obj-y += qcom/
+obj-y += samsung/
obj-y += smccc/
obj-y += tegra/
obj-y += xilinx/
diff --git a/drivers/firmware/arm_ffa/bus.c b/drivers/firmware/arm_ffa/bus.c
index eb17d03b66fe..50bfe56c755e 100644
--- a/drivers/firmware/arm_ffa/bus.c
+++ b/drivers/firmware/arm_ffa/bus.c
@@ -15,7 +15,7 @@
#include "common.h"
-#define SCMI_UEVENT_MODALIAS_FMT "arm_ffa:%04x:%pUb"
+#define FFA_UEVENT_MODALIAS_FMT "arm_ffa:%04x:%pUb"
static DEFINE_IDA(ffa_bus_id);
@@ -68,7 +68,7 @@ static int ffa_device_uevent(const struct device *dev, struct kobj_uevent_env *e
{
const struct ffa_device *ffa_dev = to_ffa_dev(dev);
- return add_uevent_var(env, "MODALIAS=" SCMI_UEVENT_MODALIAS_FMT,
+ return add_uevent_var(env, "MODALIAS=" FFA_UEVENT_MODALIAS_FMT,
ffa_dev->vm_id, &ffa_dev->uuid);
}
@@ -77,7 +77,7 @@ static ssize_t modalias_show(struct device *dev,
{
struct ffa_device *ffa_dev = to_ffa_dev(dev);
- return sysfs_emit(buf, SCMI_UEVENT_MODALIAS_FMT, ffa_dev->vm_id,
+ return sysfs_emit(buf, FFA_UEVENT_MODALIAS_FMT, ffa_dev->vm_id,
&ffa_dev->uuid);
}
static DEVICE_ATTR_RO(modalias);
@@ -160,11 +160,12 @@ static int __ffa_devices_unregister(struct device *dev, void *data)
return 0;
}
-static void ffa_devices_unregister(void)
+void ffa_devices_unregister(void)
{
bus_for_each_dev(&ffa_bus_type, NULL, NULL,
__ffa_devices_unregister);
}
+EXPORT_SYMBOL_GPL(ffa_devices_unregister);
bool ffa_device_is_valid(struct ffa_device *ffa_dev)
{
@@ -187,13 +188,17 @@ bool ffa_device_is_valid(struct ffa_device *ffa_dev)
return valid;
}
-struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id,
- const struct ffa_ops *ops)
+struct ffa_device *
+ffa_device_register(const struct ffa_partition_info *part_info,
+ const struct ffa_ops *ops)
{
int id, ret;
struct device *dev;
struct ffa_device *ffa_dev;
+ if (!part_info)
+ return NULL;
+
id = ida_alloc_min(&ffa_bus_id, 1, GFP_KERNEL);
if (id < 0)
return NULL;
@@ -207,12 +212,14 @@ struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id,
dev = &ffa_dev->dev;
dev->bus = &ffa_bus_type;
dev->release = ffa_release_device;
+ dev->dma_mask = &dev->coherent_dma_mask;
dev_set_name(&ffa_dev->dev, "arm-ffa-%d", id);
ffa_dev->id = id;
- ffa_dev->vm_id = vm_id;
+ ffa_dev->vm_id = part_info->id;
+ ffa_dev->properties = part_info->properties;
ffa_dev->ops = ops;
- uuid_copy(&ffa_dev->uuid, uuid);
+ uuid_copy(&ffa_dev->uuid, &part_info->uuid);
ret = device_register(&ffa_dev->dev);
if (ret) {
diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c
index b14cbdae94e8..65bf1685350a 100644
--- a/drivers/firmware/arm_ffa/driver.c
+++ b/drivers/firmware/arm_ffa/driver.c
@@ -44,7 +44,7 @@
#include "common.h"
-#define FFA_DRIVER_VERSION FFA_VERSION_1_1
+#define FFA_DRIVER_VERSION FFA_VERSION_1_2
#define FFA_MIN_VERSION FFA_VERSION_1_0
#define SENDER_ID_MASK GENMASK(31, 16)
@@ -110,11 +110,10 @@ struct ffa_drv_info {
struct work_struct sched_recv_irq_work;
struct xarray partition_info;
DECLARE_HASHTABLE(notifier_hash, ilog2(FFA_MAX_NOTIFICATIONS));
- struct mutex notify_lock; /* lock to protect notifier hashtable */
+ rwlock_t notify_lock; /* lock to protect notifier hashtable */
};
static struct ffa_drv_info *drv_info;
-static void ffa_partitions_cleanup(void);
/*
* The driver must be able to support all the versions from the earliest
@@ -145,11 +144,19 @@ static int ffa_version_check(u32 *version)
.a0 = FFA_VERSION, .a1 = FFA_DRIVER_VERSION,
}, &ver);
- if (ver.a0 == FFA_RET_NOT_SUPPORTED) {
+ if ((s32)ver.a0 == FFA_RET_NOT_SUPPORTED) {
pr_info("FFA_VERSION returned not supported\n");
return -EOPNOTSUPP;
}
+ if (FFA_MAJOR_VERSION(ver.a0) > FFA_MAJOR_VERSION(FFA_DRIVER_VERSION)) {
+ pr_err("Incompatible v%d.%d! Latest supported v%d.%d\n",
+ FFA_MAJOR_VERSION(ver.a0), FFA_MINOR_VERSION(ver.a0),
+ FFA_MAJOR_VERSION(FFA_DRIVER_VERSION),
+ FFA_MINOR_VERSION(FFA_DRIVER_VERSION));
+ return -EINVAL;
+ }
+
if (ver.a0 < FFA_MIN_VERSION) {
pr_err("Incompatible v%d.%d! Earliest supported v%d.%d\n",
FFA_MAJOR_VERSION(ver.a0), FFA_MINOR_VERSION(ver.a0),
@@ -276,11 +283,24 @@ __ffa_partition_info_get(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3,
}
if (buffer && count <= num_partitions)
- for (idx = 0; idx < count; idx++)
- memcpy(buffer + idx, drv_info->rx_buffer + idx * sz,
- buf_sz);
+ for (idx = 0; idx < count; idx++) {
+ struct ffa_partition_info_le {
+ __le16 id;
+ __le16 exec_ctxt;
+ __le32 properties;
+ uuid_t uuid;
+ } *rx_buf = drv_info->rx_buffer + idx * sz;
+ struct ffa_partition_info *buf = buffer + idx;
+
+ buf->id = le16_to_cpu(rx_buf->id);
+ buf->exec_ctxt = le16_to_cpu(rx_buf->exec_ctxt);
+ buf->properties = le32_to_cpu(rx_buf->properties);
+ if (buf_sz > 8)
+ import_uuid(&buf->uuid, (u8 *)&rx_buf->uuid);
+ }
- ffa_rx_release();
+ if (!(flags & PARTITION_INFO_GET_RETURN_COUNT_ONLY))
+ ffa_rx_release();
mutex_unlock(&drv_info->rx_lock);
@@ -295,14 +315,24 @@ __ffa_partition_info_get(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3,
#define CURRENT_INDEX(x) ((u16)(FIELD_GET(CURRENT_INDEX_MASK, (x))))
#define UUID_INFO_TAG(x) ((u16)(FIELD_GET(UUID_INFO_TAG_MASK, (x))))
#define PARTITION_INFO_SZ(x) ((u16)(FIELD_GET(PARTITION_INFO_SZ_MASK, (x))))
+#define PART_INFO_ID_MASK GENMASK(15, 0)
+#define PART_INFO_EXEC_CXT_MASK GENMASK(31, 16)
+#define PART_INFO_PROPS_MASK GENMASK(63, 32)
+#define PART_INFO_ID(x) ((u16)(FIELD_GET(PART_INFO_ID_MASK, (x))))
+#define PART_INFO_EXEC_CXT(x) ((u16)(FIELD_GET(PART_INFO_EXEC_CXT_MASK, (x))))
+#define PART_INFO_PROPERTIES(x) ((u32)(FIELD_GET(PART_INFO_PROPS_MASK, (x))))
static int
__ffa_partition_info_get_regs(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3,
struct ffa_partition_info *buffer, int num_parts)
{
u16 buf_sz, start_idx, cur_idx, count = 0, prev_idx = 0, tag = 0;
+ struct ffa_partition_info *buf = buffer;
ffa_value_t partition_info;
do {
+ __le64 *regs;
+ int idx;
+
start_idx = prev_idx ? prev_idx + 1 : 0;
invoke_ffa_fn((ffa_value_t){
@@ -326,8 +356,25 @@ __ffa_partition_info_get_regs(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3,
if (buf_sz > sizeof(*buffer))
buf_sz = sizeof(*buffer);
- memcpy(buffer + prev_idx * buf_sz, &partition_info.a3,
- (cur_idx - start_idx + 1) * buf_sz);
+ regs = (void *)&partition_info.a3;
+ for (idx = 0; idx < cur_idx - start_idx + 1; idx++, buf++) {
+ union {
+ uuid_t uuid;
+ u64 regs[2];
+ } uuid_regs = {
+ .regs = {
+ le64_to_cpu(*(regs + 1)),
+ le64_to_cpu(*(regs + 2)),
+ }
+ };
+ u64 val = *(u64 *)regs;
+
+ buf->id = PART_INFO_ID(val);
+ buf->exec_ctxt = PART_INFO_EXEC_CXT(val);
+ buf->properties = PART_INFO_PROPERTIES(val);
+ uuid_copy(&buf->uuid, &uuid_regs.uuid);
+ regs += 3;
+ }
prev_idx = cur_idx;
} while (cur_idx < (count - 1));
@@ -445,9 +492,9 @@ static int ffa_msg_send_direct_req(u16 src_id, u16 dst_id, bool mode_32bit,
return -EINVAL;
}
-static int ffa_msg_send2(u16 src_id, u16 dst_id, void *buf, size_t sz)
+static int ffa_msg_send2(struct ffa_device *dev, u16 src_id, void *buf, size_t sz)
{
- u32 src_dst_ids = PACK_TARGET_INFO(src_id, dst_id);
+ u32 src_dst_ids = PACK_TARGET_INFO(src_id, dev->vm_id);
struct ffa_indirect_msg_hdr *msg;
ffa_value_t ret;
int retval = 0;
@@ -463,6 +510,7 @@ static int ffa_msg_send2(u16 src_id, u16 dst_id, void *buf, size_t sz)
msg->offset = sizeof(*msg);
msg->send_recv_id = src_dst_ids;
msg->size = sz;
+ uuid_copy(&msg->uuid, &dev->uuid);
memcpy((u8 *)msg + msg->offset, buf, sz);
/* flags = 0, sender VMID = 0 works for both physical/virtual NS */
@@ -760,6 +808,13 @@ static int ffa_notification_bitmap_destroy(void)
return 0;
}
+enum notify_type {
+ SECURE_PARTITION,
+ NON_SECURE_VM,
+ SPM_FRAMEWORK,
+ NS_HYP_FRAMEWORK,
+};
+
#define NOTIFICATION_LOW_MASK GENMASK(31, 0)
#define NOTIFICATION_HIGH_MASK GENMASK(63, 32)
#define NOTIFICATION_BITMAP_HIGH(x) \
@@ -783,10 +838,22 @@ static int ffa_notification_bitmap_destroy(void)
#define MAX_IDS_32 10
#define PER_VCPU_NOTIFICATION_FLAG BIT(0)
-#define SECURE_PARTITION_BITMAP BIT(0)
-#define NON_SECURE_VM_BITMAP BIT(1)
-#define SPM_FRAMEWORK_BITMAP BIT(2)
-#define NS_HYP_FRAMEWORK_BITMAP BIT(3)
+#define SECURE_PARTITION_BITMAP_ENABLE BIT(SECURE_PARTITION)
+#define NON_SECURE_VM_BITMAP_ENABLE BIT(NON_SECURE_VM)
+#define SPM_FRAMEWORK_BITMAP_ENABLE BIT(SPM_FRAMEWORK)
+#define NS_HYP_FRAMEWORK_BITMAP_ENABLE BIT(NS_HYP_FRAMEWORK)
+#define FFA_BITMAP_SECURE_ENABLE_MASK \
+ (SECURE_PARTITION_BITMAP_ENABLE | SPM_FRAMEWORK_BITMAP_ENABLE)
+#define FFA_BITMAP_NS_ENABLE_MASK \
+ (NON_SECURE_VM_BITMAP_ENABLE | NS_HYP_FRAMEWORK_BITMAP_ENABLE)
+#define FFA_BITMAP_ALL_ENABLE_MASK \
+ (FFA_BITMAP_SECURE_ENABLE_MASK | FFA_BITMAP_NS_ENABLE_MASK)
+
+#define FFA_SECURE_PARTITION_ID_FLAG BIT(15)
+
+#define SPM_FRAMEWORK_BITMAP(x) NOTIFICATION_BITMAP_LOW(x)
+#define NS_HYP_FRAMEWORK_BITMAP(x) NOTIFICATION_BITMAP_HIGH(x)
+#define FRAMEWORK_NOTIFY_RX_BUFFER_FULL BIT(0)
static int ffa_notification_bind_common(u16 dst_id, u64 bitmap,
u32 flags, bool is_bind)
@@ -852,9 +919,15 @@ static int ffa_notification_get(u32 flags, struct ffa_notify_bitmaps *notify)
else if (ret.a0 != FFA_SUCCESS)
return -EINVAL; /* Something else went wrong. */
- notify->sp_map = PACK_NOTIFICATION_BITMAP(ret.a2, ret.a3);
- notify->vm_map = PACK_NOTIFICATION_BITMAP(ret.a4, ret.a5);
- notify->arch_map = PACK_NOTIFICATION_BITMAP(ret.a6, ret.a7);
+ if (flags & SECURE_PARTITION_BITMAP_ENABLE)
+ notify->sp_map = PACK_NOTIFICATION_BITMAP(ret.a2, ret.a3);
+ if (flags & NON_SECURE_VM_BITMAP_ENABLE)
+ notify->vm_map = PACK_NOTIFICATION_BITMAP(ret.a4, ret.a5);
+ if (flags & SPM_FRAMEWORK_BITMAP_ENABLE)
+ notify->arch_map = SPM_FRAMEWORK_BITMAP(ret.a6);
+ if (flags & NS_HYP_FRAMEWORK_BITMAP_ENABLE)
+ notify->arch_map = PACK_NOTIFICATION_BITMAP(notify->arch_map,
+ ret.a7);
return 0;
}
@@ -863,27 +936,32 @@ struct ffa_dev_part_info {
ffa_sched_recv_cb callback;
void *cb_data;
rwlock_t rw_lock;
+ struct ffa_device *dev;
+ struct list_head node;
};
static void __do_sched_recv_cb(u16 part_id, u16 vcpu, bool is_per_vcpu)
{
- struct ffa_dev_part_info *partition;
+ struct ffa_dev_part_info *partition = NULL, *tmp;
ffa_sched_recv_cb callback;
+ struct list_head *phead;
void *cb_data;
- partition = xa_load(&drv_info->partition_info, part_id);
- if (!partition) {
+ phead = xa_load(&drv_info->partition_info, part_id);
+ if (!phead) {
pr_err("%s: Invalid partition ID 0x%x\n", __func__, part_id);
return;
}
- read_lock(&partition->rw_lock);
- callback = partition->callback;
- cb_data = partition->cb_data;
- read_unlock(&partition->rw_lock);
+ list_for_each_entry_safe(partition, tmp, phead, node) {
+ read_lock(&partition->rw_lock);
+ callback = partition->callback;
+ cb_data = partition->cb_data;
+ read_unlock(&partition->rw_lock);
- if (callback)
- callback(vcpu, is_per_vcpu, cb_data);
+ if (callback)
+ callback(vcpu, is_per_vcpu, cb_data);
+ }
}
static void ffa_notification_info_get(void)
@@ -899,7 +977,7 @@ static void ffa_notification_info_get(void)
}, &ret);
if (ret.a0 != FFA_FN_NATIVE(SUCCESS) && ret.a0 != FFA_SUCCESS) {
- if (ret.a2 != FFA_RET_NO_DATA)
+ if ((s32)ret.a2 != FFA_RET_NO_DATA)
pr_err("Notification Info fetch failed: 0x%lx (0x%lx)",
ret.a0, ret.a2);
return;
@@ -935,7 +1013,7 @@ static void ffa_notification_info_get(void)
}
/* Per vCPU Notification */
- for (idx = 0; idx < ids_count[list]; idx++) {
+ for (idx = 1; idx < ids_count[list]; idx++) {
if (ids_processed >= max_ids - 1)
break;
@@ -1015,17 +1093,17 @@ static int ffa_sync_send_receive(struct ffa_device *dev,
static int ffa_indirect_msg_send(struct ffa_device *dev, void *buf, size_t sz)
{
- return ffa_msg_send2(drv_info->vm_id, dev->vm_id, buf, sz);
+ return ffa_msg_send2(dev, drv_info->vm_id, buf, sz);
}
-static int ffa_sync_send_receive2(struct ffa_device *dev, const uuid_t *uuid,
+static int ffa_sync_send_receive2(struct ffa_device *dev,
struct ffa_send_direct_data2 *data)
{
if (!drv_info->msg_direct_req2_supp)
return -EOPNOTSUPP;
return ffa_msg_send_direct_req2(drv_info->vm_id, dev->vm_id,
- uuid, data);
+ &dev->uuid, data);
}
static int ffa_memory_share(struct ffa_mem_ops_args *args)
@@ -1051,35 +1129,39 @@ static int ffa_memory_lend(struct ffa_mem_ops_args *args)
return ffa_memory_ops(FFA_MEM_LEND, args);
}
-#define FFA_SECURE_PARTITION_ID_FLAG BIT(15)
-
#define ffa_notifications_disabled() (!drv_info->notif_enabled)
-enum notify_type {
- NON_SECURE_VM,
- SECURE_PARTITION,
- FRAMEWORK,
-};
-
struct notifier_cb_info {
struct hlist_node hnode;
+ struct ffa_device *dev;
+ ffa_fwk_notifier_cb fwk_cb;
ffa_notifier_cb cb;
void *cb_data;
- enum notify_type type;
};
-static int ffa_sched_recv_cb_update(u16 part_id, ffa_sched_recv_cb callback,
- void *cb_data, bool is_registration)
+static int
+ffa_sched_recv_cb_update(struct ffa_device *dev, ffa_sched_recv_cb callback,
+ void *cb_data, bool is_registration)
{
- struct ffa_dev_part_info *partition;
+ struct ffa_dev_part_info *partition = NULL, *tmp;
+ struct list_head *phead;
bool cb_valid;
if (ffa_notifications_disabled())
return -EOPNOTSUPP;
- partition = xa_load(&drv_info->partition_info, part_id);
+ phead = xa_load(&drv_info->partition_info, dev->vm_id);
+ if (!phead) {
+ pr_err("%s: Invalid partition ID 0x%x\n", __func__, dev->vm_id);
+ return -EINVAL;
+ }
+
+ list_for_each_entry_safe(partition, tmp, phead, node)
+ if (partition->dev == dev)
+ break;
+
if (!partition) {
- pr_err("%s: Invalid partition ID 0x%x\n", __func__, part_id);
+ pr_err("%s: No such partition ID 0x%x\n", __func__, dev->vm_id);
return -EINVAL;
}
@@ -1101,12 +1183,12 @@ static int ffa_sched_recv_cb_update(u16 part_id, ffa_sched_recv_cb callback,
static int ffa_sched_recv_cb_register(struct ffa_device *dev,
ffa_sched_recv_cb cb, void *cb_data)
{
- return ffa_sched_recv_cb_update(dev->vm_id, cb, cb_data, true);
+ return ffa_sched_recv_cb_update(dev, cb, cb_data, true);
}
static int ffa_sched_recv_cb_unregister(struct ffa_device *dev)
{
- return ffa_sched_recv_cb_update(dev->vm_id, NULL, NULL, false);
+ return ffa_sched_recv_cb_update(dev, NULL, NULL, false);
}
static int ffa_notification_bind(u16 dst_id, u64 bitmap, u32 flags)
@@ -1119,61 +1201,87 @@ static int ffa_notification_unbind(u16 dst_id, u64 bitmap)
return ffa_notification_bind_common(dst_id, bitmap, 0, false);
}
-/* Should be called while the notify_lock is taken */
+static enum notify_type ffa_notify_type_get(u16 vm_id)
+{
+ if (vm_id & FFA_SECURE_PARTITION_ID_FLAG)
+ return SECURE_PARTITION;
+ else
+ return NON_SECURE_VM;
+}
+
+/* notifier_hnode_get* should be called with notify_lock held */
static struct notifier_cb_info *
-notifier_hash_node_get(u16 notify_id, enum notify_type type)
+notifier_hnode_get_by_vmid(u16 notify_id, int vmid)
{
struct notifier_cb_info *node;
hash_for_each_possible(drv_info->notifier_hash, node, hnode, notify_id)
- if (type == node->type)
+ if (node->fwk_cb && vmid == node->dev->vm_id)
return node;
return NULL;
}
-static int
-update_notifier_cb(int notify_id, enum notify_type type, ffa_notifier_cb cb,
- void *cb_data, bool is_registration)
+static struct notifier_cb_info *
+notifier_hnode_get_by_vmid_uuid(u16 notify_id, int vmid, const uuid_t *uuid)
+{
+ struct notifier_cb_info *node;
+
+ if (uuid_is_null(uuid))
+ return notifier_hnode_get_by_vmid(notify_id, vmid);
+
+ hash_for_each_possible(drv_info->notifier_hash, node, hnode, notify_id)
+ if (node->fwk_cb && vmid == node->dev->vm_id &&
+ uuid_equal(&node->dev->uuid, uuid))
+ return node;
+
+ return NULL;
+}
+
+static struct notifier_cb_info *
+notifier_hnode_get_by_type(u16 notify_id, enum notify_type type)
+{
+ struct notifier_cb_info *node;
+
+ hash_for_each_possible(drv_info->notifier_hash, node, hnode, notify_id)
+ if (node->cb && type == ffa_notify_type_get(node->dev->vm_id))
+ return node;
+
+ return NULL;
+}
+
+static int update_notifier_cb(struct ffa_device *dev, int notify_id,
+ struct notifier_cb_info *cb, bool is_framework)
{
struct notifier_cb_info *cb_info = NULL;
- bool cb_found;
+ enum notify_type type = ffa_notify_type_get(dev->vm_id);
+ bool cb_found, is_registration = !!cb;
+
+ if (is_framework)
+ cb_info = notifier_hnode_get_by_vmid_uuid(notify_id, dev->vm_id,
+ &dev->uuid);
+ else
+ cb_info = notifier_hnode_get_by_type(notify_id, type);
- cb_info = notifier_hash_node_get(notify_id, type);
cb_found = !!cb_info;
if (!(is_registration ^ cb_found))
return -EINVAL;
if (is_registration) {
- cb_info = kzalloc(sizeof(*cb_info), GFP_KERNEL);
- if (!cb_info)
- return -ENOMEM;
-
- cb_info->type = type;
- cb_info->cb = cb;
- cb_info->cb_data = cb_data;
-
- hash_add(drv_info->notifier_hash, &cb_info->hnode, notify_id);
+ hash_add(drv_info->notifier_hash, &cb->hnode, notify_id);
} else {
hash_del(&cb_info->hnode);
+ kfree(cb_info);
}
return 0;
}
-static enum notify_type ffa_notify_type_get(u16 vm_id)
-{
- if (vm_id & FFA_SECURE_PARTITION_ID_FLAG)
- return SECURE_PARTITION;
- else
- return NON_SECURE_VM;
-}
-
-static int ffa_notify_relinquish(struct ffa_device *dev, int notify_id)
+static int __ffa_notify_relinquish(struct ffa_device *dev, int notify_id,
+ bool is_framework)
{
int rc;
- enum notify_type type = ffa_notify_type_get(dev->vm_id);
if (ffa_notifications_disabled())
return -EOPNOTSUPP;
@@ -1181,28 +1289,40 @@ static int ffa_notify_relinquish(struct ffa_device *dev, int notify_id)
if (notify_id >= FFA_MAX_NOTIFICATIONS)
return -EINVAL;
- mutex_lock(&drv_info->notify_lock);
+ write_lock(&drv_info->notify_lock);
- rc = update_notifier_cb(notify_id, type, NULL, NULL, false);
+ rc = update_notifier_cb(dev, notify_id, NULL, is_framework);
if (rc) {
pr_err("Could not unregister notification callback\n");
- mutex_unlock(&drv_info->notify_lock);
+ write_unlock(&drv_info->notify_lock);
return rc;
}
- rc = ffa_notification_unbind(dev->vm_id, BIT(notify_id));
+ if (!is_framework)
+ rc = ffa_notification_unbind(dev->vm_id, BIT(notify_id));
- mutex_unlock(&drv_info->notify_lock);
+ write_unlock(&drv_info->notify_lock);
return rc;
}
-static int ffa_notify_request(struct ffa_device *dev, bool is_per_vcpu,
- ffa_notifier_cb cb, void *cb_data, int notify_id)
+static int ffa_notify_relinquish(struct ffa_device *dev, int notify_id)
+{
+ return __ffa_notify_relinquish(dev, notify_id, false);
+}
+
+static int ffa_fwk_notify_relinquish(struct ffa_device *dev, int notify_id)
+{
+ return __ffa_notify_relinquish(dev, notify_id, true);
+}
+
+static int __ffa_notify_request(struct ffa_device *dev, bool is_per_vcpu,
+ void *cb, void *cb_data,
+ int notify_id, bool is_framework)
{
int rc;
u32 flags = 0;
- enum notify_type type = ffa_notify_type_get(dev->vm_id);
+ struct notifier_cb_info *cb_info = NULL;
if (ffa_notifications_disabled())
return -EOPNOTSUPP;
@@ -1210,28 +1330,58 @@ static int ffa_notify_request(struct ffa_device *dev, bool is_per_vcpu,
if (notify_id >= FFA_MAX_NOTIFICATIONS)
return -EINVAL;
- mutex_lock(&drv_info->notify_lock);
+ cb_info = kzalloc(sizeof(*cb_info), GFP_KERNEL);
+ if (!cb_info)
+ return -ENOMEM;
- if (is_per_vcpu)
- flags = PER_VCPU_NOTIFICATION_FLAG;
+ cb_info->dev = dev;
+ cb_info->cb_data = cb_data;
+ if (is_framework)
+ cb_info->fwk_cb = cb;
+ else
+ cb_info->cb = cb;
- rc = ffa_notification_bind(dev->vm_id, BIT(notify_id), flags);
- if (rc) {
- mutex_unlock(&drv_info->notify_lock);
- return rc;
+ write_lock(&drv_info->notify_lock);
+
+ if (!is_framework) {
+ if (is_per_vcpu)
+ flags = PER_VCPU_NOTIFICATION_FLAG;
+
+ rc = ffa_notification_bind(dev->vm_id, BIT(notify_id), flags);
+ if (rc)
+ goto out_unlock_free;
}
- rc = update_notifier_cb(notify_id, type, cb, cb_data, true);
+ rc = update_notifier_cb(dev, notify_id, cb_info, is_framework);
if (rc) {
pr_err("Failed to register callback for %d - %d\n",
notify_id, rc);
- ffa_notification_unbind(dev->vm_id, BIT(notify_id));
+ if (!is_framework)
+ ffa_notification_unbind(dev->vm_id, BIT(notify_id));
}
- mutex_unlock(&drv_info->notify_lock);
+
+out_unlock_free:
+ write_unlock(&drv_info->notify_lock);
+ if (rc)
+ kfree(cb_info);
return rc;
}
+static int ffa_notify_request(struct ffa_device *dev, bool is_per_vcpu,
+ ffa_notifier_cb cb, void *cb_data, int notify_id)
+{
+ return __ffa_notify_request(dev, is_per_vcpu, cb, cb_data, notify_id,
+ false);
+}
+
+static int
+ffa_fwk_notify_request(struct ffa_device *dev, ffa_fwk_notifier_cb cb,
+ void *cb_data, int notify_id)
+{
+ return __ffa_notify_request(dev, false, cb, cb_data, notify_id, true);
+}
+
static int ffa_notify_send(struct ffa_device *dev, int notify_id,
bool is_per_vcpu, u16 vcpu)
{
@@ -1257,30 +1407,77 @@ static void handle_notif_callbacks(u64 bitmap, enum notify_type type)
if (!(bitmap & 1))
continue;
- mutex_lock(&drv_info->notify_lock);
- cb_info = notifier_hash_node_get(notify_id, type);
- mutex_unlock(&drv_info->notify_lock);
+ read_lock(&drv_info->notify_lock);
+ cb_info = notifier_hnode_get_by_type(notify_id, type);
+ read_unlock(&drv_info->notify_lock);
if (cb_info && cb_info->cb)
cb_info->cb(notify_id, cb_info->cb_data);
}
}
-static void notif_get_and_handle(void *unused)
+static void handle_fwk_notif_callbacks(u32 bitmap)
+{
+ void *buf;
+ uuid_t uuid;
+ int notify_id = 0, target;
+ struct ffa_indirect_msg_hdr *msg;
+ struct notifier_cb_info *cb_info = NULL;
+
+ /* Only one framework notification defined and supported for now */
+ if (!(bitmap & FRAMEWORK_NOTIFY_RX_BUFFER_FULL))
+ return;
+
+ mutex_lock(&drv_info->rx_lock);
+
+ msg = drv_info->rx_buffer;
+ buf = kmemdup((void *)msg + msg->offset, msg->size, GFP_KERNEL);
+ if (!buf) {
+ mutex_unlock(&drv_info->rx_lock);
+ return;
+ }
+
+ target = SENDER_ID(msg->send_recv_id);
+ if (msg->offset >= sizeof(*msg))
+ uuid_copy(&uuid, &msg->uuid);
+ else
+ uuid_copy(&uuid, &uuid_null);
+
+ mutex_unlock(&drv_info->rx_lock);
+
+ ffa_rx_release();
+
+ read_lock(&drv_info->notify_lock);
+ cb_info = notifier_hnode_get_by_vmid_uuid(notify_id, target, &uuid);
+ read_unlock(&drv_info->notify_lock);
+
+ if (cb_info && cb_info->fwk_cb)
+ cb_info->fwk_cb(notify_id, cb_info->cb_data, buf);
+ kfree(buf);
+}
+
+static void notif_get_and_handle(void *cb_data)
{
int rc;
- struct ffa_notify_bitmaps bitmaps;
+ u32 flags;
+ struct ffa_drv_info *info = cb_data;
+ struct ffa_notify_bitmaps bitmaps = { 0 };
- rc = ffa_notification_get(SECURE_PARTITION_BITMAP |
- SPM_FRAMEWORK_BITMAP, &bitmaps);
+ if (info->vm_id == 0) /* Non secure physical instance */
+ flags = FFA_BITMAP_SECURE_ENABLE_MASK;
+ else
+ flags = FFA_BITMAP_ALL_ENABLE_MASK;
+
+ rc = ffa_notification_get(flags, &bitmaps);
if (rc) {
pr_err("Failed to retrieve notifications with %d!\n", rc);
return;
}
+ handle_fwk_notif_callbacks(SPM_FRAMEWORK_BITMAP(bitmaps.arch_map));
+ handle_fwk_notif_callbacks(NS_HYP_FRAMEWORK_BITMAP(bitmaps.arch_map));
handle_notif_callbacks(bitmaps.vm_map, NON_SECURE_VM);
handle_notif_callbacks(bitmaps.sp_map, SECURE_PARTITION);
- handle_notif_callbacks(bitmaps.arch_map, FRAMEWORK);
}
static void
@@ -1329,6 +1526,8 @@ static const struct ffa_notifier_ops ffa_drv_notifier_ops = {
.sched_recv_cb_unregister = ffa_sched_recv_cb_unregister,
.notify_request = ffa_notify_request,
.notify_relinquish = ffa_notify_relinquish,
+ .fwk_notify_request = ffa_fwk_notify_request,
+ .fwk_notify_relinquish = ffa_fwk_notify_relinquish,
.notify_send = ffa_notify_send,
};
@@ -1384,12 +1583,110 @@ static struct notifier_block ffa_bus_nb = {
.notifier_call = ffa_bus_notifier,
};
+static int ffa_xa_add_partition_info(struct ffa_device *dev)
+{
+ struct ffa_dev_part_info *info;
+ struct list_head *head, *phead;
+ int ret = -ENOMEM;
+
+ phead = xa_load(&drv_info->partition_info, dev->vm_id);
+ if (phead) {
+ head = phead;
+ list_for_each_entry(info, head, node) {
+ if (info->dev == dev) {
+ pr_err("%s: duplicate dev %p part ID 0x%x\n",
+ __func__, dev, dev->vm_id);
+ return -EEXIST;
+ }
+ }
+ }
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return ret;
+
+ rwlock_init(&info->rw_lock);
+ info->dev = dev;
+
+ if (!phead) {
+ phead = kzalloc(sizeof(*phead), GFP_KERNEL);
+ if (!phead)
+ goto free_out;
+
+ INIT_LIST_HEAD(phead);
+
+ ret = xa_insert(&drv_info->partition_info, dev->vm_id, phead,
+ GFP_KERNEL);
+ if (ret) {
+ pr_err("%s: failed to save part ID 0x%x Ret:%d\n",
+ __func__, dev->vm_id, ret);
+ goto free_out;
+ }
+ }
+ list_add(&info->node, phead);
+ return 0;
+
+free_out:
+ kfree(phead);
+ kfree(info);
+ return ret;
+}
+
+static int ffa_setup_host_partition(int vm_id)
+{
+ struct ffa_partition_info buf = { 0 };
+ struct ffa_device *ffa_dev;
+ int ret;
+
+ buf.id = vm_id;
+ ffa_dev = ffa_device_register(&buf, &ffa_drv_ops);
+ if (!ffa_dev) {
+ pr_err("%s: failed to register host partition ID 0x%x\n",
+ __func__, vm_id);
+ return -EINVAL;
+ }
+
+ ret = ffa_xa_add_partition_info(ffa_dev);
+ if (ret)
+ return ret;
+
+ if (ffa_notifications_disabled())
+ return 0;
+
+ ret = ffa_sched_recv_cb_update(ffa_dev, ffa_self_notif_handle,
+ drv_info, true);
+ if (ret)
+ pr_info("Failed to register driver sched callback %d\n", ret);
+
+ return ret;
+}
+
+static void ffa_partitions_cleanup(void)
+{
+ struct list_head *phead;
+ unsigned long idx;
+
+ /* Clean up/free all registered devices */
+ ffa_devices_unregister();
+
+ xa_for_each(&drv_info->partition_info, idx, phead) {
+ struct ffa_dev_part_info *info, *tmp;
+
+ xa_erase(&drv_info->partition_info, idx);
+ list_for_each_entry_safe(info, tmp, phead, node) {
+ list_del(&info->node);
+ kfree(info);
+ }
+ kfree(phead);
+ }
+
+ xa_destroy(&drv_info->partition_info);
+}
+
static int ffa_setup_partitions(void)
{
int count, idx, ret;
- uuid_t uuid;
struct ffa_device *ffa_dev;
- struct ffa_dev_part_info *info;
struct ffa_partition_info *pbuf, *tpbuf;
if (drv_info->version == FFA_VERSION_1_0) {
@@ -1406,80 +1703,47 @@ static int ffa_setup_partitions(void)
xa_init(&drv_info->partition_info);
for (idx = 0, tpbuf = pbuf; idx < count; idx++, tpbuf++) {
- import_uuid(&uuid, (u8 *)tpbuf->uuid);
-
/* Note that if the UUID will be uuid_null, that will require
* ffa_bus_notifier() to find the UUID of this partition id
* with help of ffa_device_match_uuid(). FF-A v1.1 and above
* provides UUID here for each partition as part of the
* discovery API and the same is passed.
*/
- ffa_dev = ffa_device_register(&uuid, tpbuf->id, &ffa_drv_ops);
+ ffa_dev = ffa_device_register(tpbuf, &ffa_drv_ops);
if (!ffa_dev) {
pr_err("%s: failed to register partition ID 0x%x\n",
__func__, tpbuf->id);
continue;
}
- ffa_dev->properties = tpbuf->properties;
-
if (drv_info->version > FFA_VERSION_1_0 &&
!(tpbuf->properties & FFA_PARTITION_AARCH64_EXEC))
ffa_mode_32bit_set(ffa_dev);
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info) {
+ if (ffa_xa_add_partition_info(ffa_dev)) {
ffa_device_unregister(ffa_dev);
continue;
}
- rwlock_init(&info->rw_lock);
- ret = xa_insert(&drv_info->partition_info, tpbuf->id,
- info, GFP_KERNEL);
- if (ret) {
- pr_err("%s: failed to save partition ID 0x%x - ret:%d\n",
- __func__, tpbuf->id, ret);
- ffa_device_unregister(ffa_dev);
- kfree(info);
- }
}
kfree(pbuf);
- /* Allocate for the host */
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info) {
- /* Already registered devices are freed on bus_exit */
- ffa_partitions_cleanup();
- return -ENOMEM;
- }
+ /*
+ * Check if the host is already added as part of partition info
+ * No multiple UUID possible for the host, so just checking if
+ * there is an entry will suffice
+ */
+ if (xa_load(&drv_info->partition_info, drv_info->vm_id))
+ return 0;
- rwlock_init(&info->rw_lock);
- ret = xa_insert(&drv_info->partition_info, drv_info->vm_id,
- info, GFP_KERNEL);
- if (ret) {
- pr_err("%s: failed to save Host partition ID 0x%x - ret:%d. Abort.\n",
- __func__, drv_info->vm_id, ret);
- kfree(info);
- /* Already registered devices are freed on bus_exit */
+ /* Allocate for the host */
+ ret = ffa_setup_host_partition(drv_info->vm_id);
+ if (ret)
ffa_partitions_cleanup();
- }
return ret;
}
-static void ffa_partitions_cleanup(void)
-{
- struct ffa_dev_part_info *info;
- unsigned long idx;
-
- xa_for_each(&drv_info->partition_info, idx, info) {
- xa_erase(&drv_info->partition_info, idx);
- kfree(info);
- }
-
- xa_destroy(&drv_info->partition_info);
-}
-
/* FFA FEATURE IDs */
#define FFA_FEAT_NOTIFICATION_PENDING_INT (1)
#define FFA_FEAT_SCHEDULE_RECEIVER_INT (2)
@@ -1710,7 +1974,7 @@ static void ffa_notifications_setup(void)
goto cleanup;
hash_init(drv_info->notifier_hash);
- mutex_init(&drv_info->notify_lock);
+ rwlock_init(&drv_info->notify_lock);
drv_info->notif_enabled = true;
return;
@@ -1782,19 +2046,10 @@ static int __init ffa_init(void)
ffa_notifications_setup();
ret = ffa_setup_partitions();
- if (ret) {
- pr_err("failed to setup partitions\n");
- goto cleanup_notifs;
- }
-
- ret = ffa_sched_recv_cb_update(drv_info->vm_id, ffa_self_notif_handle,
- drv_info, true);
- if (ret)
- pr_info("Failed to register driver sched callback %d\n", ret);
-
- return 0;
+ if (!ret)
+ return ret;
-cleanup_notifs:
+ pr_err("failed to setup partitions\n");
ffa_notifications_cleanup();
free_pages:
if (drv_info->tx_buffer)
@@ -1804,7 +2059,7 @@ free_drv_info:
kfree(drv_info);
return ret;
}
-module_init(ffa_init);
+rootfs_initcall(ffa_init);
static void __exit ffa_exit(void)
{
diff --git a/drivers/firmware/arm_scmi/Kconfig b/drivers/firmware/arm_scmi/Kconfig
index dabd874641d0..e3fb36825978 100644
--- a/drivers/firmware/arm_scmi/Kconfig
+++ b/drivers/firmware/arm_scmi/Kconfig
@@ -69,6 +69,19 @@ config ARM_SCMI_DEBUG_COUNTERS
such useful debug counters. This can be helpful for debugging and
SCMI monitoring.
+config ARM_SCMI_QUIRKS
+ bool "Enable SCMI Quirks framework"
+ depends on JUMP_LABEL || COMPILE_TEST
+ default y
+ help
+ Enables support for SCMI Quirks framework to workaround SCMI platform
+ firmware bugs on system already deployed in the wild.
+
+ The framework allows the definition of platform-specific code quirks
+ that will be associated and enabled only on the desired platforms
+ depending on the SCMI firmware advertised versions and/or machine
+ compatibles.
+
source "drivers/firmware/arm_scmi/transports/Kconfig"
source "drivers/firmware/arm_scmi/vendors/imx/Kconfig"
diff --git a/drivers/firmware/arm_scmi/Makefile b/drivers/firmware/arm_scmi/Makefile
index 9ac81adff567..780cd62b2f78 100644
--- a/drivers/firmware/arm_scmi/Makefile
+++ b/drivers/firmware/arm_scmi/Makefile
@@ -3,6 +3,7 @@ scmi-bus-y = bus.o
scmi-core-objs := $(scmi-bus-y)
scmi-driver-y = driver.o notify.o
+scmi-driver-$(CONFIG_ARM_SCMI_QUIRKS) += quirks.o
scmi-driver-$(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT) += raw_mode.o
scmi-transport-$(CONFIG_ARM_SCMI_HAVE_SHMEM) = shmem.o
scmi-transport-$(CONFIG_ARM_SCMI_HAVE_MSG) += msg.o
diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c
index 96b2e5f9a8ef..c7698cfaa4e8 100644
--- a/drivers/firmware/arm_scmi/bus.c
+++ b/drivers/firmware/arm_scmi/bus.c
@@ -17,6 +17,8 @@
#include "common.h"
+#define SCMI_UEVENT_MODALIAS_FMT "%s:%02x:%s"
+
BLOCKING_NOTIFIER_HEAD(scmi_requested_devices_nh);
EXPORT_SYMBOL_GPL(scmi_requested_devices_nh);
@@ -42,7 +44,7 @@ static atomic_t scmi_syspower_registered = ATOMIC_INIT(0);
* This helper let an SCMI driver request specific devices identified by the
* @id_table to be created for each active SCMI instance.
*
- * The requested device name MUST NOT be already existent for any protocol;
+ * The requested device name MUST NOT be already existent for this protocol;
* at first the freshly requested @id_table is annotated in the IDR table
* @scmi_requested_devices and then the requested device is advertised to any
* registered party via the @scmi_requested_devices_nh notification chain.
@@ -52,7 +54,6 @@ static atomic_t scmi_syspower_registered = ATOMIC_INIT(0);
static int scmi_protocol_device_request(const struct scmi_device_id *id_table)
{
int ret = 0;
- unsigned int id = 0;
struct list_head *head, *phead = NULL;
struct scmi_requested_dev *rdev;
@@ -67,19 +68,13 @@ static int scmi_protocol_device_request(const struct scmi_device_id *id_table)
}
/*
- * Search for the matching protocol rdev list and then search
- * of any existent equally named device...fails if any duplicate found.
+ * Find the matching protocol rdev list and then search of any
+ * existent equally named device...fails if any duplicate found.
*/
mutex_lock(&scmi_requested_devices_mtx);
- idr_for_each_entry(&scmi_requested_devices, head, id) {
- if (!phead) {
- /* A list found registered in the IDR is never empty */
- rdev = list_first_entry(head, struct scmi_requested_dev,
- node);
- if (rdev->id_table->protocol_id ==
- id_table->protocol_id)
- phead = head;
- }
+ phead = idr_find(&scmi_requested_devices, id_table->protocol_id);
+ if (phead) {
+ head = phead;
list_for_each_entry(rdev, head, node) {
if (!strcmp(rdev->id_table->name, id_table->name)) {
pr_err("Ignoring duplicate request [%d] %s\n",
@@ -206,60 +201,59 @@ scmi_protocol_table_unregister(const struct scmi_device_id *id_table)
scmi_protocol_device_unrequest(entry);
}
-static const struct scmi_device_id *
-scmi_dev_match_id(struct scmi_device *scmi_dev, const struct scmi_driver *scmi_drv)
+static int scmi_dev_match_by_id_table(struct scmi_device *scmi_dev,
+ const struct scmi_device_id *id_table)
{
- const struct scmi_device_id *id = scmi_drv->id_table;
-
- if (!id)
- return NULL;
-
- for (; id->protocol_id; id++)
- if (id->protocol_id == scmi_dev->protocol_id) {
- if (!id->name)
- return id;
- else if (!strcmp(id->name, scmi_dev->name))
- return id;
- }
+ if (!id_table || !id_table->name)
+ return 0;
+
+ /* Always skip transport devices from matching */
+ for (; id_table->protocol_id && id_table->name; id_table++)
+ if (id_table->protocol_id == scmi_dev->protocol_id &&
+ strncmp(scmi_dev->name, "__scmi_transport_device", 23) &&
+ !strcmp(id_table->name, scmi_dev->name))
+ return 1;
+ return 0;
+}
- return NULL;
+static int scmi_dev_match_id(struct scmi_device *scmi_dev,
+ const struct scmi_driver *scmi_drv)
+{
+ return scmi_dev_match_by_id_table(scmi_dev, scmi_drv->id_table);
}
static int scmi_dev_match(struct device *dev, const struct device_driver *drv)
{
const struct scmi_driver *scmi_drv = to_scmi_driver(drv);
struct scmi_device *scmi_dev = to_scmi_dev(dev);
- const struct scmi_device_id *id;
-
- id = scmi_dev_match_id(scmi_dev, scmi_drv);
- if (id)
- return 1;
- return 0;
+ return scmi_dev_match_id(scmi_dev, scmi_drv);
}
-static int scmi_match_by_id_table(struct device *dev, void *data)
+static int scmi_match_by_id_table(struct device *dev, const void *data)
{
- struct scmi_device *sdev = to_scmi_dev(dev);
- struct scmi_device_id *id_table = data;
+ struct scmi_device *scmi_dev = to_scmi_dev(dev);
+ const struct scmi_device_id *id_table = data;
- return sdev->protocol_id == id_table->protocol_id &&
- (id_table->name && !strcmp(sdev->name, id_table->name));
+ return scmi_dev_match_by_id_table(scmi_dev, id_table);
}
static struct scmi_device *scmi_child_dev_find(struct device *parent,
int prot_id, const char *name)
{
- struct scmi_device_id id_table;
+ struct scmi_device_id id_table[2] = { 0 };
struct device *dev;
- id_table.protocol_id = prot_id;
- id_table.name = name;
+ id_table[0].protocol_id = prot_id;
+ id_table[0].name = name;
dev = device_find_child(parent, &id_table, scmi_match_by_id_table);
if (!dev)
return NULL;
+ /* Drop the refcnt bumped implicitly by device_find_child */
+ put_device(dev);
+
return to_scmi_dev(dev);
}
@@ -283,11 +277,85 @@ static void scmi_dev_remove(struct device *dev)
scmi_drv->remove(scmi_dev);
}
+static int scmi_device_uevent(const struct device *dev, struct kobj_uevent_env *env)
+{
+ const struct scmi_device *scmi_dev = to_scmi_dev(dev);
+
+ return add_uevent_var(env, "MODALIAS=" SCMI_UEVENT_MODALIAS_FMT,
+ dev_name(&scmi_dev->dev), scmi_dev->protocol_id,
+ scmi_dev->name);
+}
+
+static ssize_t modalias_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scmi_device *scmi_dev = to_scmi_dev(dev);
+
+ return sysfs_emit(buf, SCMI_UEVENT_MODALIAS_FMT,
+ dev_name(&scmi_dev->dev), scmi_dev->protocol_id,
+ scmi_dev->name);
+}
+static DEVICE_ATTR_RO(modalias);
+
+static ssize_t protocol_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scmi_device *scmi_dev = to_scmi_dev(dev);
+
+ return sprintf(buf, "0x%02x\n", scmi_dev->protocol_id);
+}
+static DEVICE_ATTR_RO(protocol_id);
+
+static ssize_t name_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scmi_device *scmi_dev = to_scmi_dev(dev);
+
+ return sprintf(buf, "%s\n", scmi_dev->name);
+}
+static DEVICE_ATTR_RO(name);
+
+static struct attribute *scmi_device_attributes_attrs[] = {
+ &dev_attr_protocol_id.attr,
+ &dev_attr_name.attr,
+ &dev_attr_modalias.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(scmi_device_attributes);
+
+static int scmi_pm_suspend(struct device *dev)
+{
+ const struct device_driver *drv = dev->driver;
+
+ if (drv && drv->pm && drv->pm->suspend)
+ return drv->pm->suspend(dev);
+
+ return 0;
+}
+
+static int scmi_pm_resume(struct device *dev)
+{
+ const struct device_driver *drv = dev->driver;
+
+ if (drv && drv->pm && drv->pm->resume)
+ return drv->pm->resume(dev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops scmi_dev_pm_ops = {
+ .suspend = pm_sleep_ptr(scmi_pm_suspend),
+ .resume = pm_sleep_ptr(scmi_pm_resume),
+};
+
const struct bus_type scmi_bus_type = {
.name = "scmi_protocol",
.match = scmi_dev_match,
.probe = scmi_dev_probe,
.remove = scmi_dev_remove,
+ .uevent = scmi_device_uevent,
+ .dev_groups = scmi_device_attributes_groups,
+ .pm = &scmi_dev_pm_ops,
};
EXPORT_SYMBOL_GPL(scmi_bus_type);
@@ -325,20 +393,22 @@ EXPORT_SYMBOL_GPL(scmi_driver_unregister);
static void scmi_device_release(struct device *dev)
{
- kfree(to_scmi_dev(dev));
+ struct scmi_device *scmi_dev = to_scmi_dev(dev);
+
+ kfree_const(scmi_dev->name);
+ kfree(scmi_dev);
}
static void __scmi_device_destroy(struct scmi_device *scmi_dev)
{
- pr_debug("(%s) Destroying SCMI device '%s' for protocol 0x%x (%s)\n",
- of_node_full_name(scmi_dev->dev.parent->of_node),
+ pr_debug("(%pOF) Destroying SCMI device '%s' for protocol 0x%x (%s)\n",
+ scmi_dev->dev.parent->of_node,
dev_name(&scmi_dev->dev), scmi_dev->protocol_id,
scmi_dev->name);
if (scmi_dev->protocol_id == SCMI_PROTOCOL_SYSTEM)
atomic_set(&scmi_syspower_registered, 0);
- kfree_const(scmi_dev->name);
ida_free(&scmi_bus_id, scmi_dev->id);
device_unregister(&scmi_dev->dev);
}
@@ -404,18 +474,30 @@ __scmi_device_create(struct device_node *np, struct device *parent,
if (retval)
goto put_dev;
- pr_debug("(%s) Created SCMI device '%s' for protocol 0x%x (%s)\n",
- of_node_full_name(parent->of_node),
- dev_name(&scmi_dev->dev), protocol, name);
+ pr_debug("(%pOF) Created SCMI device '%s' for protocol 0x%x (%s)\n",
+ parent->of_node, dev_name(&scmi_dev->dev), protocol, name);
return scmi_dev;
put_dev:
- kfree_const(scmi_dev->name);
put_device(&scmi_dev->dev);
ida_free(&scmi_bus_id, id);
return NULL;
}
+static struct scmi_device *
+_scmi_device_create(struct device_node *np, struct device *parent,
+ int protocol, const char *name)
+{
+ struct scmi_device *sdev;
+
+ sdev = __scmi_device_create(np, parent, protocol, name);
+ if (!sdev)
+ pr_err("(%pOF) Failed to create device for protocol 0x%x (%s)\n",
+ parent->of_node, protocol, name);
+
+ return sdev;
+}
+
/**
* scmi_device_create - A method to create one or more SCMI devices
*
@@ -448,7 +530,7 @@ struct scmi_device *scmi_device_create(struct device_node *np,
struct scmi_device *scmi_dev = NULL;
if (name)
- return __scmi_device_create(np, parent, protocol, name);
+ return _scmi_device_create(np, parent, protocol, name);
mutex_lock(&scmi_requested_devices_mtx);
phead = idr_find(&scmi_requested_devices, protocol);
@@ -462,18 +544,13 @@ struct scmi_device *scmi_device_create(struct device_node *np,
list_for_each_entry(rdev, phead, node) {
struct scmi_device *sdev;
- sdev = __scmi_device_create(np, parent,
- rdev->id_table->protocol_id,
- rdev->id_table->name);
- /* Report errors and carry on... */
+ sdev = _scmi_device_create(np, parent,
+ rdev->id_table->protocol_id,
+ rdev->id_table->name);
if (sdev)
scmi_dev = sdev;
- else
- pr_err("(%s) Failed to create device for protocol 0x%x (%s)\n",
- of_node_full_name(parent->of_node),
- rdev->id_table->protocol_id,
- rdev->id_table->name);
}
+
mutex_unlock(&scmi_requested_devices_mtx);
return scmi_dev;
diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c
index 2ed2279388f0..afa7981efe82 100644
--- a/drivers/firmware/arm_scmi/clock.c
+++ b/drivers/firmware/arm_scmi/clock.c
@@ -11,6 +11,7 @@
#include "protocols.h"
#include "notify.h"
+#include "quirks.h"
/* Updated only after ALL the mandatory features for that version are merged */
#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x30000
@@ -429,6 +430,23 @@ static void iter_clk_describe_prepare_message(void *message,
msg->rate_index = cpu_to_le32(desc_index);
}
+#define QUIRK_OUT_OF_SPEC_TRIPLET \
+ ({ \
+ /* \
+ * A known quirk: a triplet is returned but num_returned != 3 \
+ * Check for a safe payload size and fix. \
+ */ \
+ if (st->num_returned != 3 && st->num_remaining == 0 && \
+ st->rx_len == sizeof(*r) + sizeof(__le32) * 2 * 3) { \
+ st->num_returned = 3; \
+ st->num_remaining = 0; \
+ } else { \
+ dev_err(p->dev, \
+ "Cannot fix out-of-spec reply !\n"); \
+ return -EPROTO; \
+ } \
+ })
+
static int
iter_clk_describe_update_state(struct scmi_iterator_state *st,
const void *response, void *priv)
@@ -450,19 +468,8 @@ iter_clk_describe_update_state(struct scmi_iterator_state *st,
p->clk->name, st->num_returned, st->num_remaining,
st->rx_len);
- /*
- * A known quirk: a triplet is returned but num_returned != 3
- * Check for a safe payload size and fix.
- */
- if (st->num_returned != 3 && st->num_remaining == 0 &&
- st->rx_len == sizeof(*r) + sizeof(__le32) * 2 * 3) {
- st->num_returned = 3;
- st->num_remaining = 0;
- } else {
- dev_err(p->dev,
- "Cannot fix out-of-spec reply !\n");
- return -EPROTO;
- }
+ SCMI_QUIRK(clock_rates_triplet_out_of_spec,
+ QUIRK_OUT_OF_SPEC_TRIPLET);
}
return 0;
diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h
index c4b8e7ff88aa..07b9e629276d 100644
--- a/drivers/firmware/arm_scmi/common.h
+++ b/drivers/firmware/arm_scmi/common.h
@@ -31,6 +31,8 @@
#define SCMI_MAX_RESPONSE_TIMEOUT (2 * MSEC_PER_SEC)
+#define SCMI_SHMEM_MAX_PAYLOAD_SIZE 104
+
enum scmi_error_codes {
SCMI_SUCCESS = 0, /* Success */
SCMI_ERR_SUPPORT = -1, /* Not supported */
@@ -163,7 +165,9 @@ void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id);
* used to initialize this channel
* @dev: Reference to device in the SCMI hierarchy corresponding to this
* channel
+ * @is_p2a: A flag to identify a channel as P2A (RX)
* @rx_timeout_ms: The configured RX timeout in milliseconds.
+ * @max_msg_size: Maximum size of message payload.
* @handle: Pointer to SCMI entity handle
* @no_completion_irq: Flag to indicate that this channel has no completion
* interrupt mechanism for synchronous commands.
@@ -174,7 +178,9 @@ void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id);
struct scmi_chan_info {
int id;
struct device *dev;
+ bool is_p2a;
unsigned int rx_timeout_ms;
+ unsigned int max_msg_size;
struct scmi_handle *handle;
bool no_completion_irq;
void *transport_info;
@@ -222,7 +228,13 @@ struct scmi_transport_ops {
* @max_msg: Maximum number of messages for a channel type (tx or rx) that can
* be pending simultaneously in the system. May be overridden by the
* get_max_msg op.
- * @max_msg_size: Maximum size of data per message that can be handled.
+ * @max_msg_size: Maximum size of data payload per message that can be handled.
+ * @atomic_threshold: Optional system wide DT-configured threshold, expressed
+ * in microseconds, for atomic operations.
+ * Only SCMI synchronous commands reported by the platform
+ * to have an execution latency lesser-equal to the threshold
+ * should be considered for atomic mode operation: such
+ * decision is finally left up to the SCMI drivers.
* @force_polling: Flag to force this whole transport to use SCMI core polling
* mechanism instead of completion interrupts even if available.
* @sync_cmds_completed_on_ret: Flag to indicate that the transport assures
@@ -241,6 +253,7 @@ struct scmi_desc {
int max_rx_timeout_ms;
int max_msg;
int max_msg_size;
+ unsigned int atomic_threshold;
const bool force_polling;
const bool sync_cmds_completed_on_ret;
const bool atomic_enabled;
@@ -292,6 +305,7 @@ enum debug_counters {
ERR_MSG_INVALID,
ERR_MSG_NOMEM,
ERR_PROTOCOL,
+ XFERS_INFLIGHT,
SCMI_DEBUG_COUNTERS_LAST
};
@@ -301,6 +315,12 @@ static inline void scmi_inc_count(atomic_t *arr, int stat)
atomic_inc(&arr[stat]);
}
+static inline void scmi_dec_count(atomic_t *arr, int stat)
+{
+ if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS))
+ atomic_dec(&arr[stat]);
+}
+
enum scmi_bad_msg {
MSG_UNEXPECTED = -1,
MSG_INVALID = -2,
@@ -309,6 +329,26 @@ enum scmi_bad_msg {
MSG_MBOX_SPURIOUS = -5,
};
+/* Used for compactness and signature validation of the function pointers being
+ * passed.
+ */
+typedef void (*shmem_copy_toio_t)(void __iomem *to, const void *from,
+ size_t count);
+typedef void (*shmem_copy_fromio_t)(void *to, const void __iomem *from,
+ size_t count);
+
+/**
+ * struct scmi_shmem_io_ops - I/O operations to read from/write to
+ * Shared Memory
+ *
+ * @toio: Copy data to the shared memory area
+ * @fromio: Copy data from the shared memory area
+ */
+struct scmi_shmem_io_ops {
+ shmem_copy_fromio_t fromio;
+ shmem_copy_toio_t toio;
+};
+
/* shmem related declarations */
struct scmi_shared_mem;
@@ -329,13 +369,16 @@ struct scmi_shared_mem;
struct scmi_shared_mem_operations {
void (*tx_prepare)(struct scmi_shared_mem __iomem *shmem,
struct scmi_xfer *xfer,
- struct scmi_chan_info *cinfo);
+ struct scmi_chan_info *cinfo,
+ shmem_copy_toio_t toio);
u32 (*read_header)(struct scmi_shared_mem __iomem *shmem);
void (*fetch_response)(struct scmi_shared_mem __iomem *shmem,
- struct scmi_xfer *xfer);
+ struct scmi_xfer *xfer,
+ shmem_copy_fromio_t fromio);
void (*fetch_notification)(struct scmi_shared_mem __iomem *shmem,
- size_t max_len, struct scmi_xfer *xfer);
+ size_t max_len, struct scmi_xfer *xfer,
+ shmem_copy_fromio_t fromio);
void (*clear_channel)(struct scmi_shared_mem __iomem *shmem);
bool (*poll_done)(struct scmi_shared_mem __iomem *shmem,
struct scmi_xfer *xfer);
@@ -343,7 +386,8 @@ struct scmi_shared_mem_operations {
bool (*channel_intr_enabled)(struct scmi_shared_mem __iomem *shmem);
void __iomem *(*setup_iomap)(struct scmi_chan_info *cinfo,
struct device *dev,
- bool tx, struct resource *res);
+ bool tx, struct resource *res,
+ struct scmi_shmem_io_ops **ops);
};
const struct scmi_shared_mem_operations *scmi_shared_mem_operations_get(void);
@@ -405,7 +449,7 @@ struct scmi_transport_core_operations {
*/
struct scmi_transport {
struct device *supplier;
- struct scmi_desc *desc;
+ struct scmi_desc desc;
struct scmi_transport_core_operations **core_ops;
};
@@ -431,13 +475,14 @@ static int __tag##_probe(struct platform_device *pdev) \
device_set_of_node_from_dev(&spdev->dev, dev); \
\
strans.supplier = dev; \
- strans.desc = &(__desc); \
+ memcpy(&strans.desc, &(__desc), sizeof(strans.desc)); \
strans.core_ops = &(__core_ops); \
\
ret = platform_device_add_data(spdev, &strans, sizeof(strans)); \
if (ret) \
goto err; \
\
+ spdev->dev.parent = dev; \
ret = platform_device_add(spdev); \
if (ret) \
goto err; \
@@ -460,4 +505,5 @@ static struct platform_driver __drv = { \
void scmi_notification_instance_data_set(const struct scmi_handle *handle,
void *priv);
void *scmi_notification_instance_data_get(const struct scmi_handle *handle);
+int scmi_inflight_count(const struct scmi_handle *handle);
#endif /* _SCMI_COMMON_H */
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index a477b5ade38d..bd56a877fdfc 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -11,7 +11,7 @@
* various power domain DVFS including the core/cluster, certain system
* clocks configuration, thermal sensors and many others.
*
- * Copyright (C) 2018-2024 ARM Ltd.
+ * Copyright (C) 2018-2025 ARM Ltd.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -24,6 +24,7 @@
#include <linux/io.h>
#include <linux/io-64-nonatomic-hi-lo.h>
#include <linux/kernel.h>
+#include <linux/kmod.h>
#include <linux/ktime.h>
#include <linux/hashtable.h>
#include <linux/list.h>
@@ -37,12 +38,15 @@
#include "common.h"
#include "notify.h"
+#include "quirks.h"
#include "raw_mode.h"
#define CREATE_TRACE_POINTS
#include <trace/events/scmi.h>
+#define SCMI_VENDOR_MODULE_ALIAS_FMT "scmi-protocol-0x%02x-%s"
+
static DEFINE_IDA(scmi_id);
static DEFINE_XARRAY(scmi_protocols);
@@ -149,12 +153,6 @@ struct scmi_debug_info {
* base protocol
* @active_protocols: IDR storing device_nodes for protocols actually defined
* in the DT and confirmed as implemented by fw.
- * @atomic_threshold: Optional system wide DT-configured threshold, expressed
- * in microseconds, for atomic operations.
- * Only SCMI synchronous commands reported by the platform
- * to have an execution latency lesser-equal to the threshold
- * should be considered for atomic mode operation: such
- * decision is finally left up to the SCMI drivers.
* @notify_priv: Pointer to private data structure specific to notifications.
* @node: List head
* @users: Number of users of this instance
@@ -180,7 +178,6 @@ struct scmi_info {
struct mutex protocols_mtx;
u8 *protocols_imp;
struct idr active_protocols;
- unsigned int atomic_threshold;
void *notify_priv;
struct list_head node;
int users;
@@ -193,6 +190,7 @@ struct scmi_info {
};
#define handle_to_scmi_info(h) container_of(h, struct scmi_info, handle)
+#define tx_minfo_to_scmi_info(h) container_of(h, struct scmi_info, tx_minfo)
#define bus_nb_to_scmi_info(nb) container_of(nb, struct scmi_info, bus_nb)
#define req_nb_to_scmi_info(nb) container_of(nb, struct scmi_info, dev_req_nb)
@@ -283,6 +281,44 @@ scmi_vendor_protocol_lookup(int protocol_id, char *vendor_id,
}
static const struct scmi_protocol *
+scmi_vendor_protocol_get(int protocol_id, struct scmi_revision_info *version)
+{
+ const struct scmi_protocol *proto;
+
+ proto = scmi_vendor_protocol_lookup(protocol_id, version->vendor_id,
+ version->sub_vendor_id,
+ version->impl_ver);
+ if (!proto) {
+ int ret;
+
+ pr_debug("Looking for '" SCMI_VENDOR_MODULE_ALIAS_FMT "'\n",
+ protocol_id, version->vendor_id);
+
+ /* Note that vendor_id is mandatory for vendor protocols */
+ ret = request_module(SCMI_VENDOR_MODULE_ALIAS_FMT,
+ protocol_id, version->vendor_id);
+ if (ret) {
+ pr_warn("Problem loading module for protocol 0x%x\n",
+ protocol_id);
+ return NULL;
+ }
+
+ /* Lookup again, once modules loaded */
+ proto = scmi_vendor_protocol_lookup(protocol_id,
+ version->vendor_id,
+ version->sub_vendor_id,
+ version->impl_ver);
+ }
+
+ if (proto)
+ pr_info("Loaded SCMI Vendor Protocol 0x%x - %s %s %X\n",
+ protocol_id, proto->vendor_id ?: "",
+ proto->sub_vendor_id ?: "", proto->impl_ver);
+
+ return proto;
+}
+
+static const struct scmi_protocol *
scmi_protocol_get(int protocol_id, struct scmi_revision_info *version)
{
const struct scmi_protocol *proto = NULL;
@@ -290,10 +326,8 @@ scmi_protocol_get(int protocol_id, struct scmi_revision_info *version)
if (protocol_id < SCMI_PROTOCOL_VENDOR_BASE)
proto = xa_load(&scmi_protocols, protocol_id);
else
- proto = scmi_vendor_protocol_lookup(protocol_id,
- version->vendor_id,
- version->sub_vendor_id,
- version->impl_ver);
+ proto = scmi_vendor_protocol_get(protocol_id, version);
+
if (!proto || !try_module_get(proto->owner)) {
pr_warn("SCMI Protocol 0x%x not found!\n", protocol_id);
return NULL;
@@ -301,11 +335,6 @@ scmi_protocol_get(int protocol_id, struct scmi_revision_info *version)
pr_debug("Found SCMI Protocol 0x%x\n", protocol_id);
- if (protocol_id >= SCMI_PROTOCOL_VENDOR_BASE)
- pr_info("Loaded SCMI Vendor Protocol 0x%x - %s %s %X\n",
- protocol_id, proto->vendor_id ?: "",
- proto->sub_vendor_id ?: "", proto->impl_ver);
-
return proto;
}
@@ -373,7 +402,9 @@ int scmi_protocol_register(const struct scmi_protocol *proto)
return ret;
}
- pr_debug("Registered SCMI Protocol 0x%x\n", proto->id);
+ pr_debug("Registered SCMI Protocol 0x%x - %s %s 0x%08X\n",
+ proto->id, proto->vendor_id, proto->sub_vendor_id,
+ proto->impl_ver);
return 0;
}
@@ -410,14 +441,8 @@ static void scmi_create_protocol_devices(struct device_node *np,
struct scmi_info *info,
int prot_id, const char *name)
{
- struct scmi_device *sdev;
-
mutex_lock(&info->devreq_mtx);
- sdev = scmi_device_create(np, info->dev, prot_id, name);
- if (name && !sdev)
- dev_err(info->dev,
- "failed to create device for protocol 0x%X (%s)\n",
- prot_id, name);
+ scmi_device_create(np, info->dev, prot_id, name);
mutex_unlock(&info->devreq_mtx);
}
@@ -579,9 +604,14 @@ static inline void
scmi_xfer_inflight_register_unlocked(struct scmi_xfer *xfer,
struct scmi_xfers_info *minfo)
{
+ /* In this context minfo will be tx_minfo due to the xfer pending */
+ struct scmi_info *info = tx_minfo_to_scmi_info(minfo);
+
/* Set in-flight */
set_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
hash_add(minfo->pending_xfers, &xfer->node, xfer->hdr.seq);
+ scmi_inc_count(info->dbg->counters, XFERS_INFLIGHT);
+
xfer->pending = true;
}
@@ -783,9 +813,13 @@ __scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
spin_lock_irqsave(&minfo->xfer_lock, flags);
if (refcount_dec_and_test(&xfer->users)) {
if (xfer->pending) {
+ struct scmi_info *info = tx_minfo_to_scmi_info(minfo);
+
scmi_xfer_token_clear(minfo, xfer);
hash_del(&xfer->node);
xfer->pending = false;
+
+ scmi_dec_count(info->dbg->counters, XFERS_INFLIGHT);
}
hlist_add_head(&xfer->node, &minfo->free_xfers);
}
@@ -1048,6 +1082,11 @@ static inline void scmi_xfer_command_release(struct scmi_info *info,
static inline void scmi_clear_channel(struct scmi_info *info,
struct scmi_chan_info *cinfo)
{
+ if (!cinfo->is_p2a) {
+ dev_warn(cinfo->dev, "Invalid clear on A2P channel !\n");
+ return;
+ }
+
if (info->desc->ops->clear_channel)
info->desc->ops->clear_channel(cinfo);
}
@@ -1156,7 +1195,8 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo,
* RX path since it will be already queued at the end of the TX
* poll loop.
*/
- if (!xfer->hdr.poll_completion)
+ if (!xfer->hdr.poll_completion ||
+ xfer->hdr.type == MSG_TYPE_DELAYED_RESP)
scmi_raw_message_report(info->raw, xfer,
SCMI_RAW_REPLY_QUEUE,
cinfo->id);
@@ -1214,7 +1254,8 @@ static void xfer_put(const struct scmi_protocol_handle *ph,
}
static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
- struct scmi_xfer *xfer, ktime_t stop)
+ struct scmi_xfer *xfer, ktime_t stop,
+ bool *ooo)
{
struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
@@ -1223,7 +1264,7 @@ static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
* in case of out-of-order receptions of delayed responses
*/
return info->desc->ops->poll_done(cinfo, xfer) ||
- try_wait_for_completion(&xfer->done) ||
+ (*ooo = try_wait_for_completion(&xfer->done)) ||
ktime_after(ktime_get(), stop);
}
@@ -1240,15 +1281,17 @@ static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc,
* itself to support synchronous commands replies.
*/
if (!desc->sync_cmds_completed_on_ret) {
+ bool ooo = false;
+
/*
* Poll on xfer using transport provided .poll_done();
* assumes no completion interrupt was available.
*/
ktime_t stop = ktime_add_ms(ktime_get(), timeout_ms);
- spin_until_cond(scmi_xfer_done_no_timeout(cinfo,
- xfer, stop));
- if (ktime_after(ktime_get(), stop)) {
+ spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer,
+ stop, &ooo));
+ if (!ooo && !info->desc->ops->poll_done(cinfo, xfer)) {
dev_err(dev,
"timed out in resp(caller: %pS) - polling\n",
(void *)_RET_IP_);
@@ -1400,7 +1443,8 @@ static int do_xfer(const struct scmi_protocol_handle *ph,
trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id,
xfer->hdr.protocol_id, xfer->hdr.seq,
- xfer->hdr.poll_completion);
+ xfer->hdr.poll_completion,
+ scmi_inflight_count(&info->handle));
/* Clear any stale status */
xfer->hdr.status = SCMI_SUCCESS;
@@ -1436,7 +1480,8 @@ static int do_xfer(const struct scmi_protocol_handle *ph,
info->desc->ops->mark_txdone(cinfo, ret, xfer);
trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
- xfer->hdr.protocol_id, xfer->hdr.seq, ret);
+ xfer->hdr.protocol_id, xfer->hdr.seq, ret,
+ scmi_inflight_count(&info->handle));
return ret;
}
@@ -1701,6 +1746,39 @@ static int scmi_common_get_max_msg_size(const struct scmi_protocol_handle *ph)
}
/**
+ * scmi_protocol_msg_check - Check protocol message attributes
+ *
+ * @ph: A reference to the protocol handle.
+ * @message_id: The ID of the message to check.
+ * @attributes: A parameter to optionally return the retrieved message
+ * attributes, in case of Success.
+ *
+ * An helper to check protocol message attributes for a specific protocol
+ * and message pair.
+ *
+ * Return: 0 on SUCCESS
+ */
+static int scmi_protocol_msg_check(const struct scmi_protocol_handle *ph,
+ u32 message_id, u32 *attributes)
+{
+ int ret;
+ struct scmi_xfer *t;
+
+ ret = xfer_get_init(ph, PROTOCOL_MESSAGE_ATTRIBUTES,
+ sizeof(__le32), 0, &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(message_id, t->tx.buf);
+ ret = do_xfer(ph, t);
+ if (!ret && attributes)
+ *attributes = get_unaligned_le32(t->rx.buf);
+ xfer_put(ph, t);
+
+ return ret;
+}
+
+/**
* struct scmi_iterator - Iterator descriptor
* @msg: A reference to the message TX buffer; filled by @prepare_message with
* a proper custom command payload for each multi-part command request.
@@ -1832,6 +1910,13 @@ struct scmi_msg_resp_desc_fc {
__le32 db_preserve_hmask;
};
+#define QUIRK_PERF_FC_FORCE \
+ ({ \
+ if (pi->proto->id == SCMI_PROTOCOL_PERF && \
+ message_id == 0x8 /* PERF_LEVEL_GET */) \
+ attributes |= BIT(0); \
+ })
+
static void
scmi_common_fastchannel_init(const struct scmi_protocol_handle *ph,
u8 describe_id, u32 message_id, u32 valid_size,
@@ -1841,6 +1926,7 @@ scmi_common_fastchannel_init(const struct scmi_protocol_handle *ph,
int ret;
u32 flags;
u64 phys_addr;
+ u32 attributes;
u8 size;
void __iomem *addr;
struct scmi_xfer *t;
@@ -1849,6 +1935,16 @@ scmi_common_fastchannel_init(const struct scmi_protocol_handle *ph,
struct scmi_msg_resp_desc_fc *resp;
const struct scmi_protocol_instance *pi = ph_to_pi(ph);
+ /* Check if the MSG_ID supports fastchannel */
+ ret = scmi_protocol_msg_check(ph, message_id, &attributes);
+ SCMI_QUIRK(perf_level_get_fc_force, QUIRK_PERF_FC_FORCE);
+ if (ret || !MSG_SUPPORTS_FASTCHANNEL(attributes)) {
+ dev_dbg(ph->dev,
+ "Skip FC init for 0x%02X/%d domain:%d - ret:%d\n",
+ pi->proto->id, message_id, domain, ret);
+ return;
+ }
+
if (!p_addr) {
ret = -EINVAL;
goto err_out;
@@ -1963,50 +2059,7 @@ static void scmi_common_fastchannel_db_ring(struct scmi_fc_db_info *db)
else if (db->width == 4)
SCMI_PROTO_FC_RING_DB(32);
else /* db->width == 8 */
-#ifdef CONFIG_64BIT
SCMI_PROTO_FC_RING_DB(64);
-#else
- {
- u64 val = 0;
-
- if (db->mask)
- val = ioread64_hi_lo(db->addr) & db->mask;
- iowrite64_hi_lo(db->set | val, db->addr);
- }
-#endif
-}
-
-/**
- * scmi_protocol_msg_check - Check protocol message attributes
- *
- * @ph: A reference to the protocol handle.
- * @message_id: The ID of the message to check.
- * @attributes: A parameter to optionally return the retrieved message
- * attributes, in case of Success.
- *
- * An helper to check protocol message attributes for a specific protocol
- * and message pair.
- *
- * Return: 0 on SUCCESS
- */
-static int scmi_protocol_msg_check(const struct scmi_protocol_handle *ph,
- u32 message_id, u32 *attributes)
-{
- int ret;
- struct scmi_xfer *t;
-
- ret = xfer_get_init(ph, PROTOCOL_MESSAGE_ATTRIBUTES,
- sizeof(__le32), 0, &t);
- if (ret)
- return ret;
-
- put_unaligned_le32(message_id, t->tx.buf);
- ret = do_xfer(ph, t);
- if (!ret && attributes)
- *attributes = get_unaligned_le32(t->rx.buf);
- xfer_put(ph, t);
-
- return ret;
}
static const struct scmi_proto_helpers_ops helpers_ops = {
@@ -2440,7 +2493,7 @@ static bool scmi_is_transport_atomic(const struct scmi_handle *handle,
ret = info->desc->atomic_enabled &&
is_transport_polling_capable(info->desc);
if (ret && atomic_threshold)
- *atomic_threshold = info->atomic_threshold;
+ *atomic_threshold = info->desc->atomic_threshold;
return ret;
}
@@ -2638,7 +2691,9 @@ static int scmi_chan_setup(struct scmi_info *info, struct device_node *of_node,
if (!cinfo)
return -ENOMEM;
+ cinfo->is_p2a = !tx;
cinfo->rx_timeout_ms = info->desc->max_rx_timeout_ms;
+ cinfo->max_msg_size = info->desc->max_msg_size;
/* Create a unique name for this transport device */
snprintf(name, 32, "__scmi_transport_device_%s_%02X",
@@ -2799,9 +2854,8 @@ static int scmi_bus_notifier(struct notifier_block *nb,
struct scmi_info *info = bus_nb_to_scmi_info(nb);
struct scmi_device *sdev = to_scmi_dev(data);
- /* Skip transport devices and devices of different SCMI instances */
- if (!strncmp(sdev->name, "__scmi_transport_device", 23) ||
- sdev->dev.parent != info->dev)
+ /* Skip devices of different SCMI instances */
+ if (sdev->dev.parent != info->dev)
return NOTIFY_DONE;
switch (action) {
@@ -2870,6 +2924,7 @@ static const char * const dbg_counter_strs[] = {
"err_msg_invalid",
"err_msg_nomem",
"err_protocol",
+ "xfers_inflight",
};
static ssize_t reset_all_on_write(struct file *filp, const char __user *buf,
@@ -2952,7 +3007,7 @@ static struct scmi_debug_info *scmi_debugfs_common_setup(struct scmi_info *info)
(char **)&dbg->name);
debugfs_create_u32("atomic_threshold_us", 0400, top_dentry,
- &info->atomic_threshold);
+ (u32 *)&info->desc->atomic_threshold);
debugfs_create_str("type", 0400, trans, (char **)&dbg->type);
@@ -3028,7 +3083,7 @@ static const struct scmi_desc *scmi_transport_setup(struct device *dev)
int ret;
trans = dev_get_platdata(dev);
- if (!trans || !trans->desc || !trans->supplier || !trans->core_ops)
+ if (!trans || !trans->supplier || !trans->core_ops)
return NULL;
if (!device_link_add(dev, trans->supplier, DL_FLAG_AUTOREMOVE_CONSUMER)) {
@@ -3042,15 +3097,46 @@ static const struct scmi_desc *scmi_transport_setup(struct device *dev)
dev_info(dev, "Using %s\n", dev_driver_string(trans->supplier));
- ret = of_property_read_u32(dev->of_node, "max-rx-timeout-ms",
- &trans->desc->max_rx_timeout_ms);
+ ret = of_property_read_u32(dev->of_node, "arm,max-rx-timeout-ms",
+ &trans->desc.max_rx_timeout_ms);
+ if (ret && ret != -EINVAL)
+ dev_err(dev, "Malformed arm,max-rx-timeout-ms DT property.\n");
+
+ ret = of_property_read_u32(dev->of_node, "arm,max-msg-size",
+ &trans->desc.max_msg_size);
+ if (ret && ret != -EINVAL)
+ dev_err(dev, "Malformed arm,max-msg-size DT property.\n");
+
+ ret = of_property_read_u32(dev->of_node, "arm,max-msg",
+ &trans->desc.max_msg);
if (ret && ret != -EINVAL)
- dev_err(dev, "Malformed max-rx-timeout-ms DT property.\n");
+ dev_err(dev, "Malformed arm,max-msg DT property.\n");
+
+ dev_info(dev,
+ "SCMI max-rx-timeout: %dms / max-msg-size: %dbytes / max-msg: %d\n",
+ trans->desc.max_rx_timeout_ms, trans->desc.max_msg_size,
+ trans->desc.max_msg);
+
+ /* System wide atomic threshold for atomic ops .. if any */
+ if (!of_property_read_u32(dev->of_node, "atomic-threshold-us",
+ &trans->desc.atomic_threshold))
+ dev_info(dev,
+ "SCMI System wide atomic threshold set to %u us\n",
+ trans->desc.atomic_threshold);
+
+ return &trans->desc;
+}
+
+static void scmi_enable_matching_quirks(struct scmi_info *info)
+{
+ struct scmi_revision_info *rev = &info->version;
- dev_info(dev, "SCMI max-rx-timeout: %dms\n",
- trans->desc->max_rx_timeout_ms);
+ dev_dbg(info->dev, "Looking for quirks matching: %s/%s/0x%08X\n",
+ rev->vendor_id, rev->sub_vendor_id, rev->impl_ver);
- return trans->desc;
+ /* Enable applicable quirks */
+ scmi_quirks_enable(info->dev, rev->vendor_id,
+ rev->sub_vendor_id, rev->impl_ver);
}
static int scmi_probe(struct platform_device *pdev)
@@ -3099,13 +3185,6 @@ static int scmi_probe(struct platform_device *pdev)
handle->devm_protocol_acquire = scmi_devm_protocol_acquire;
handle->devm_protocol_get = scmi_devm_protocol_get;
handle->devm_protocol_put = scmi_devm_protocol_put;
-
- /* System wide atomic threshold for atomic ops .. if any */
- if (!of_property_read_u32(np, "atomic-threshold-us",
- &info->atomic_threshold))
- dev_info(dev,
- "SCMI System wide atomic threshold set to %d us\n",
- info->atomic_threshold);
handle->is_transport_atomic = scmi_is_transport_atomic;
/* Setup all channels described in the DT at first */
@@ -3181,6 +3260,8 @@ static int scmi_probe(struct platform_device *pdev)
list_add_tail(&info->node, &scmi_list);
mutex_unlock(&scmi_list_mutex);
+ scmi_enable_matching_quirks(info);
+
for_each_available_child_of_node(np, child) {
u32 prot_id;
@@ -3321,7 +3402,7 @@ static struct platform_driver scmi_driver = {
.dev_groups = versions_groups,
},
.probe = scmi_probe,
- .remove_new = scmi_remove,
+ .remove = scmi_remove,
};
static struct dentry *scmi_debugfs_init(void)
@@ -3337,8 +3418,21 @@ static struct dentry *scmi_debugfs_init(void)
return d;
}
+int scmi_inflight_count(const struct scmi_handle *handle)
+{
+ if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS)) {
+ struct scmi_info *info = handle_to_scmi_info(handle);
+
+ return atomic_read(&info->dbg->counters[XFERS_INFLIGHT]);
+ } else {
+ return 0;
+ }
+}
+
static int __init scmi_driver_init(void)
{
+ scmi_quirks_initialize();
+
/* Bail out if no SCMI transport was configured */
if (WARN_ON(!IS_ENABLED(CONFIG_ARM_SCMI_HAVE_TRANSPORT)))
return -EINVAL;
diff --git a/drivers/firmware/arm_scmi/notify.c b/drivers/firmware/arm_scmi/notify.c
index e160ecb22948..dee9f238f6fd 100644
--- a/drivers/firmware/arm_scmi/notify.c
+++ b/drivers/firmware/arm_scmi/notify.c
@@ -318,6 +318,9 @@ struct scmi_registered_events_desc {
* customized event report
* @num_sources: The number of possible sources for this event as stated at
* events' registration time
+ * @not_supported_by_platform: A flag to indicate that not even one source was
+ * found to be supported by the platform for this
+ * event
* @sources: A reference to a dynamically allocated array used to refcount the
* events' enable requests for all the existing sources
* @sources_mtx: A mutex to serialize the access to @sources
@@ -334,6 +337,7 @@ struct scmi_registered_event {
const struct scmi_event *evt;
void *report;
u32 num_sources;
+ bool not_supported_by_platform;
refcount_t *sources;
/* locking to serialize the access to sources */
struct mutex sources_mtx;
@@ -811,10 +815,19 @@ int scmi_register_protocol_events(const struct scmi_handle *handle, u8 proto_id,
if (!r_evt->report)
return -ENOMEM;
- for (id = 0; id < r_evt->num_sources; id++)
- if (ee->ops->is_notify_supported &&
- !ee->ops->is_notify_supported(ph, r_evt->evt->id, id))
- refcount_set(&r_evt->sources[id], NOTIF_UNSUPP);
+ if (ee->ops->is_notify_supported) {
+ int supported = 0;
+
+ for (id = 0; id < r_evt->num_sources; id++) {
+ if (!ee->ops->is_notify_supported(ph, r_evt->evt->id, id))
+ refcount_set(&r_evt->sources[id], NOTIF_UNSUPP);
+ else
+ supported++;
+ }
+
+ /* Not even one source has been found to be supported */
+ r_evt->not_supported_by_platform = !supported;
+ }
pd->registered_events[i] = r_evt;
/* Ensure events are updated */
@@ -936,6 +949,11 @@ static inline int scmi_bind_event_handler(struct scmi_notify_instance *ni,
* of protocol instance.
*/
hash_del(&hndl->hash);
+
+ /* Bailout if event is not supported at all */
+ if (r_evt->not_supported_by_platform)
+ return -EOPNOTSUPP;
+
/*
* Acquire protocols only for NON pending handlers, so as NOT to trigger
* protocol initialization when a notifier is registered against a still
@@ -1060,6 +1078,9 @@ __scmi_event_handler_get_ops(struct scmi_notify_instance *ni,
r_evt = SCMI_GET_REVT(ni, KEY_XTRACT_PROTO_ID(evt_key),
KEY_XTRACT_EVT_ID(evt_key));
+ if (r_evt && r_evt->not_supported_by_platform)
+ return ERR_PTR(-EOPNOTSUPP);
+
mutex_lock(&ni->pending_mtx);
/* Search registered events at first ... if possible at all */
if (r_evt) {
@@ -1087,7 +1108,7 @@ __scmi_event_handler_get_ops(struct scmi_notify_instance *ni,
hndl->key);
/* this hndl can be only a pending one */
scmi_put_handler_unlocked(ni, hndl);
- hndl = NULL;
+ hndl = ERR_PTR(-EINVAL);
}
}
mutex_unlock(&ni->pending_mtx);
@@ -1370,8 +1391,8 @@ static int scmi_notifier_register(const struct scmi_handle *handle,
evt_key = MAKE_HASH_KEY(proto_id, evt_id,
src_id ? *src_id : SRC_ID_MASK);
hndl = scmi_get_or_create_handler(ni, evt_key);
- if (!hndl)
- return -EINVAL;
+ if (IS_ERR(hndl))
+ return PTR_ERR(hndl);
blocking_notifier_chain_register(&hndl->chain, nb);
@@ -1416,8 +1437,8 @@ static int scmi_notifier_unregister(const struct scmi_handle *handle,
evt_key = MAKE_HASH_KEY(proto_id, evt_id,
src_id ? *src_id : SRC_ID_MASK);
hndl = scmi_get_handler(ni, evt_key);
- if (!hndl)
- return -EINVAL;
+ if (IS_ERR(hndl))
+ return PTR_ERR(hndl);
/*
* Note that this chain unregistration call is safe on its own
diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c
index 2d77b5f40ca7..683fd9b85c5c 100644
--- a/drivers/firmware/arm_scmi/perf.c
+++ b/drivers/firmware/arm_scmi/perf.c
@@ -373,7 +373,7 @@ static int iter_perf_levels_update_state(struct scmi_iterator_state *st,
return 0;
}
-static inline void
+static inline int
process_response_opp(struct device *dev, struct perf_dom_info *dom,
struct scmi_opp *opp, unsigned int loop_idx,
const struct scmi_msg_resp_perf_describe_levels *r)
@@ -386,12 +386,16 @@ process_response_opp(struct device *dev, struct perf_dom_info *dom,
le16_to_cpu(r->opp[loop_idx].transition_latency_us);
ret = xa_insert(&dom->opps_by_lvl, opp->perf, opp, GFP_KERNEL);
- if (ret)
- dev_warn(dev, "Failed to add opps_by_lvl at %d for %s - ret:%d\n",
+ if (ret) {
+ dev_info(dev, FW_BUG "Failed to add opps_by_lvl at %d for %s - ret:%d\n",
opp->perf, dom->info.name, ret);
+ return ret;
+ }
+
+ return 0;
}
-static inline void
+static inline int
process_response_opp_v4(struct device *dev, struct perf_dom_info *dom,
struct scmi_opp *opp, unsigned int loop_idx,
const struct scmi_msg_resp_perf_describe_levels_v4 *r)
@@ -404,9 +408,11 @@ process_response_opp_v4(struct device *dev, struct perf_dom_info *dom,
le16_to_cpu(r->opp[loop_idx].transition_latency_us);
ret = xa_insert(&dom->opps_by_lvl, opp->perf, opp, GFP_KERNEL);
- if (ret)
- dev_warn(dev, "Failed to add opps_by_lvl at %d for %s - ret:%d\n",
+ if (ret) {
+ dev_info(dev, FW_BUG "Failed to add opps_by_lvl at %d for %s - ret:%d\n",
opp->perf, dom->info.name, ret);
+ return ret;
+ }
/* Note that PERF v4 reports always five 32-bit words */
opp->indicative_freq = le32_to_cpu(r->opp[loop_idx].indicative_freq);
@@ -415,13 +421,21 @@ process_response_opp_v4(struct device *dev, struct perf_dom_info *dom,
ret = xa_insert(&dom->opps_by_idx, opp->level_index, opp,
GFP_KERNEL);
- if (ret)
+ if (ret) {
dev_warn(dev,
"Failed to add opps_by_idx at %d for %s - ret:%d\n",
opp->level_index, dom->info.name, ret);
+ /* Cleanup by_lvl too */
+ xa_erase(&dom->opps_by_lvl, opp->perf);
+
+ return ret;
+ }
+
hash_add(dom->opps_by_freq, &opp->hash, opp->indicative_freq);
}
+
+ return 0;
}
static int
@@ -429,16 +443,22 @@ iter_perf_levels_process_response(const struct scmi_protocol_handle *ph,
const void *response,
struct scmi_iterator_state *st, void *priv)
{
+ int ret;
struct scmi_opp *opp;
struct scmi_perf_ipriv *p = priv;
- opp = &p->perf_dom->opp[st->desc_index + st->loop_idx];
+ opp = &p->perf_dom->opp[p->perf_dom->opp_count];
if (PROTOCOL_REV_MAJOR(p->version) <= 0x3)
- process_response_opp(ph->dev, p->perf_dom, opp, st->loop_idx,
- response);
+ ret = process_response_opp(ph->dev, p->perf_dom, opp,
+ st->loop_idx, response);
else
- process_response_opp_v4(ph->dev, p->perf_dom, opp, st->loop_idx,
- response);
+ ret = process_response_opp_v4(ph->dev, p->perf_dom, opp,
+ st->loop_idx, response);
+
+ /* Skip BAD duplicates received from firmware */
+ if (ret)
+ return ret == -EBUSY ? 0 : ret;
+
p->perf_dom->opp_count++;
dev_dbg(ph->dev, "Level %d Power %d Latency %dus Ifreq %d Index %d\n",
@@ -872,7 +892,7 @@ static int scmi_dvfs_device_opps_add(const struct scmi_protocol_handle *ph,
freq = dom->opp[idx].indicative_freq * dom->mult_factor;
/* All OPPs above the sustained frequency are treated as turbo */
- data.turbo = freq > dom->sustained_freq_khz * 1000;
+ data.turbo = freq > dom->sustained_freq_khz * 1000UL;
data.level = dom->opp[idx].perf;
data.freq = freq;
diff --git a/drivers/firmware/arm_scmi/protocols.h b/drivers/firmware/arm_scmi/protocols.h
index aaee57cdcd55..d62c4469d1fd 100644
--- a/drivers/firmware/arm_scmi/protocols.h
+++ b/drivers/firmware/arm_scmi/protocols.h
@@ -31,6 +31,8 @@
#define SCMI_PROTOCOL_VENDOR_BASE 0x80
+#define MSG_SUPPORTS_FASTCHANNEL(x) ((x) & BIT(0))
+
enum scmi_common_cmd {
PROTOCOL_VERSION = 0x0,
PROTOCOL_ATTRIBUTES = 0x1,
diff --git a/drivers/firmware/arm_scmi/quirks.c b/drivers/firmware/arm_scmi/quirks.c
new file mode 100644
index 000000000000..03848283c2a0
--- /dev/null
+++ b/drivers/firmware/arm_scmi/quirks.c
@@ -0,0 +1,327 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Message Protocol Quirks
+ *
+ * Copyright (C) 2025 ARM Ltd.
+ */
+
+/**
+ * DOC: Theory of operation
+ *
+ * A framework to define SCMI quirks and their activation conditions based on
+ * existing static_keys kernel facilities.
+ *
+ * Quirks are named and their activation conditions defined using the macro
+ * DEFINE_SCMI_QUIRK() in this file.
+ *
+ * After a quirk is defined, a corresponding entry must also be added to the
+ * global @scmi_quirks_table in this file using __DECLARE_SCMI_QUIRK_ENTRY().
+ *
+ * Additionally a corresponding quirk declaration must be added also to the
+ * quirk.h file using DECLARE_SCMI_QUIRK().
+ *
+ * The needed quirk code-snippet itself will be defined local to the SCMI code
+ * that is meant to fix and will be associated to the previously defined quirk
+ * and related activation conditions using the macro SCMI_QUIRK().
+ *
+ * At runtime, during the SCMI stack probe sequence, once the SCMI Server had
+ * advertised the running platform Vendor, SubVendor and Implementation Version
+ * data, all the defined quirks matching the activation conditions will be
+ * enabled.
+ *
+ * Example
+ *
+ * quirk.c
+ * -------
+ * DEFINE_SCMI_QUIRK(fix_me, "vendor", "subvend", "0x12000-0x30000",
+ * "someone,plat_A", "another,plat_b", "vend,sku");
+ *
+ * static struct scmi_quirk *scmi_quirks_table[] = {
+ * ...
+ * __DECLARE_SCMI_QUIRK_ENTRY(fix_me),
+ * NULL
+ * };
+ *
+ * quirk.h
+ * -------
+ * DECLARE_SCMI_QUIRK(fix_me);
+ *
+ * <somewhere_in_the_scmi_stack.c>
+ * ------------------------------
+ *
+ * #define QUIRK_CODE_SNIPPET_FIX_ME() \
+ * ({ \
+ * if (p->condition) \
+ * a_ptr->calculated_val = 123; \
+ * })
+ *
+ *
+ * int some_function_to_fix(int param, struct something *p)
+ * {
+ * struct some_strut *a_ptr;
+ *
+ * a_ptr = some_load_func(p);
+ * SCMI_QUIRK(fix_me, QUIRK_CODE_SNIPPET_FIX_ME);
+ * some_more_func(a_ptr);
+ * ...
+ *
+ * return 0;
+ * }
+ *
+ */
+
+#include <linux/ctype.h>
+#include <linux/cleanup.h>
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/hashtable.h>
+#include <linux/kstrtox.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/static_key.h>
+#include <linux/string.h>
+#include <linux/stringhash.h>
+#include <linux/types.h>
+
+#include "quirks.h"
+
+#define SCMI_QUIRKS_HT_SZ 4
+
+struct scmi_quirk {
+ bool enabled;
+ const char *name;
+ const char *vendor;
+ const char *sub_vendor_id;
+ const char *impl_ver_range;
+ u32 start_range;
+ u32 end_range;
+ struct static_key_false *key;
+ struct hlist_node hash;
+ unsigned int hkey;
+ const char *const compats[];
+};
+
+#define __DEFINE_SCMI_QUIRK_ENTRY(_qn, _ven, _sub, _impl, ...) \
+ static struct scmi_quirk scmi_quirk_entry_ ## _qn = { \
+ .name = __stringify(quirk_ ## _qn), \
+ .vendor = _ven, \
+ .sub_vendor_id = _sub, \
+ .impl_ver_range = _impl, \
+ .key = &(scmi_quirk_ ## _qn), \
+ .compats = { __VA_ARGS__ __VA_OPT__(,) NULL }, \
+ }
+
+#define __DECLARE_SCMI_QUIRK_ENTRY(_qn) (&(scmi_quirk_entry_ ## _qn))
+
+/*
+ * Define a quirk by name and provide the matching tokens where:
+ *
+ * _qn: A string which will be used to build the quirk and the global
+ * static_key names.
+ * _ven : SCMI Vendor ID string match, NULL means any.
+ * _sub : SCMI SubVendor ID string match, NULL means any.
+ * _impl : SCMI Implementation Version string match, NULL means any.
+ * This string can be used to express version ranges which will be
+ * interpreted as follows:
+ *
+ * NULL [0, 0xFFFFFFFF]
+ * "X" [X, X]
+ * "X-" [X, 0xFFFFFFFF]
+ * "-X" [0, X]
+ * "X-Y" [X, Y]
+ *
+ * with X <= Y and <v> in [X, Y] meaning X <= <v> <= Y
+ *
+ * ... : An optional variadic macros argument used to provide a comma-separated
+ * list of compatible strings matches; when no variadic argument is
+ * provided, ANY compatible will match this quirk.
+ *
+ * This implicitly define also a properly named global static-key that
+ * will be used to dynamically enable the quirk at initialization time.
+ *
+ * Note that it is possible to associate multiple quirks to the same
+ * matching pattern, if your firmware quality is really astounding :P
+ *
+ * Example:
+ *
+ * Compatibles list NOT provided, so ANY compatible will match:
+ *
+ * DEFINE_SCMI_QUIRK(my_new_issue, "Vend", "SVend", "0x12000-0x30000");
+ *
+ *
+ * A few compatibles provided to match against:
+ *
+ * DEFINE_SCMI_QUIRK(my_new_issue, "Vend", "SVend", "0x12000-0x30000",
+ * "xvend,plat_a", "xvend,plat_b", "xvend,sku_name");
+ */
+#define DEFINE_SCMI_QUIRK(_qn, _ven, _sub, _impl, ...) \
+ DEFINE_STATIC_KEY_FALSE(scmi_quirk_ ## _qn); \
+ __DEFINE_SCMI_QUIRK_ENTRY(_qn, _ven, _sub, _impl, ##__VA_ARGS__)
+
+/*
+ * Same as DEFINE_SCMI_QUIRK but EXPORTED: this is meant to address quirks
+ * that possibly reside in code that is included in loadable kernel modules
+ * that needs to be able to access the global static keys at runtime to
+ * determine if enabled or not. (see SCMI_QUIRK to understand usage)
+ */
+#define DEFINE_SCMI_QUIRK_EXPORTED(_qn, _ven, _sub, _impl, ...) \
+ DEFINE_STATIC_KEY_FALSE(scmi_quirk_ ## _qn); \
+ EXPORT_SYMBOL_GPL(scmi_quirk_ ## _qn); \
+ __DEFINE_SCMI_QUIRK_ENTRY(_qn, _ven, _sub, _impl, ##__VA_ARGS__)
+
+/* Global Quirks Definitions */
+DEFINE_SCMI_QUIRK(clock_rates_triplet_out_of_spec, NULL, NULL, NULL);
+DEFINE_SCMI_QUIRK(perf_level_get_fc_force, "Qualcomm", NULL, "0x20000-");
+
+/*
+ * Quirks Pointers Array
+ *
+ * This is filled at compile-time with the list of pointers to all the currently
+ * defined quirks descriptors.
+ */
+static struct scmi_quirk *scmi_quirks_table[] = {
+ __DECLARE_SCMI_QUIRK_ENTRY(clock_rates_triplet_out_of_spec),
+ __DECLARE_SCMI_QUIRK_ENTRY(perf_level_get_fc_force),
+ NULL
+};
+
+/*
+ * Quirks HashTable
+ *
+ * A run-time populated hashtable containing all the defined quirks descriptors
+ * hashed by matching pattern.
+ */
+static DEFINE_READ_MOSTLY_HASHTABLE(scmi_quirks_ht, SCMI_QUIRKS_HT_SZ);
+
+static unsigned int scmi_quirk_signature(const char *vend, const char *sub_vend)
+{
+ char *signature, *p;
+ unsigned int hash32;
+ unsigned long hash = 0;
+
+ /* vendor_id/sub_vendor_id guaranteed <= SCMI_SHORT_NAME_MAX_SIZE */
+ signature = kasprintf(GFP_KERNEL, "|%s|%s|", vend ?: "", sub_vend ?: "");
+ if (!signature)
+ return 0;
+
+ pr_debug("SCMI Quirk Signature >>>%s<<<\n", signature);
+
+ p = signature;
+ while (*p)
+ hash = partial_name_hash(tolower(*p++), hash);
+ hash32 = end_name_hash(hash);
+
+ kfree(signature);
+
+ return hash32;
+}
+
+static int scmi_quirk_range_parse(struct scmi_quirk *quirk)
+{
+ const char *last, *first __free(kfree) = NULL;
+ size_t len;
+ char *sep;
+ int ret;
+
+ quirk->start_range = 0;
+ quirk->end_range = 0xFFFFFFFF;
+ len = quirk->impl_ver_range ? strlen(quirk->impl_ver_range) : 0;
+ if (!len)
+ return 0;
+
+ first = kmemdup(quirk->impl_ver_range, len + 1, GFP_KERNEL);
+ if (!first)
+ return -ENOMEM;
+
+ last = first + len - 1;
+ sep = strchr(first, '-');
+ if (sep)
+ *sep = '\0';
+
+ if (sep == first) /* -X */
+ ret = kstrtouint(first + 1, 0, &quirk->end_range);
+ else /* X OR X- OR X-y */
+ ret = kstrtouint(first, 0, &quirk->start_range);
+ if (ret)
+ return ret;
+
+ if (!sep)
+ quirk->end_range = quirk->start_range;
+ else if (sep != last) /* x-Y */
+ ret = kstrtouint(sep + 1, 0, &quirk->end_range);
+
+ if (quirk->start_range > quirk->end_range)
+ return -EINVAL;
+
+ return ret;
+}
+
+void scmi_quirks_initialize(void)
+{
+ struct scmi_quirk *quirk;
+ int i;
+
+ for (i = 0, quirk = scmi_quirks_table[0]; quirk;
+ i++, quirk = scmi_quirks_table[i]) {
+ int ret;
+
+ ret = scmi_quirk_range_parse(quirk);
+ if (ret) {
+ pr_err("SCMI skip QUIRK [%s] - BAD RANGE - |%s|\n",
+ quirk->name, quirk->impl_ver_range);
+ continue;
+ }
+ quirk->hkey = scmi_quirk_signature(quirk->vendor,
+ quirk->sub_vendor_id);
+
+ hash_add(scmi_quirks_ht, &quirk->hash, quirk->hkey);
+
+ pr_debug("Registered SCMI QUIRK [%s] -- %p - Key [0x%08X] - %s/%s/[0x%08X-0x%08X]\n",
+ quirk->name, quirk, quirk->hkey,
+ quirk->vendor, quirk->sub_vendor_id,
+ quirk->start_range, quirk->end_range);
+ }
+
+ pr_debug("SCMI Quirks initialized\n");
+}
+
+void scmi_quirks_enable(struct device *dev, const char *vend,
+ const char *subv, const u32 impl)
+{
+ for (int i = 3; i >= 0; i--) {
+ struct scmi_quirk *quirk;
+ unsigned int hkey;
+
+ hkey = scmi_quirk_signature(i > 1 ? vend : NULL,
+ i > 2 ? subv : NULL);
+
+ /*
+ * Note that there could be multiple matches so we
+ * will enable multiple quirk part of a hash collision
+ * domain...BUT we cannot assume that ALL quirks on the
+ * same collision domain are a full match.
+ */
+ hash_for_each_possible(scmi_quirks_ht, quirk, hash, hkey) {
+ if (quirk->enabled || quirk->hkey != hkey ||
+ impl < quirk->start_range ||
+ impl > quirk->end_range)
+ continue;
+
+ if (quirk->compats[0] &&
+ !of_machine_compatible_match(quirk->compats))
+ continue;
+
+ dev_info(dev, "Enabling SCMI Quirk [%s]\n",
+ quirk->name);
+
+ dev_dbg(dev,
+ "Quirk matched on: %s/%s/%s/[0x%08X-0x%08X]\n",
+ quirk->compats[0], quirk->vendor,
+ quirk->sub_vendor_id,
+ quirk->start_range, quirk->end_range);
+
+ static_branch_enable(quirk->key);
+ quirk->enabled = true;
+ }
+ }
+}
diff --git a/drivers/firmware/arm_scmi/quirks.h b/drivers/firmware/arm_scmi/quirks.h
new file mode 100644
index 000000000000..a71fde85a527
--- /dev/null
+++ b/drivers/firmware/arm_scmi/quirks.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * System Control and Management Interface (SCMI) Message Protocol Quirks
+ *
+ * Copyright (C) 2025 ARM Ltd.
+ */
+#ifndef _SCMI_QUIRKS_H
+#define _SCMI_QUIRKS_H
+
+#include <linux/static_key.h>
+#include <linux/types.h>
+
+#ifdef CONFIG_ARM_SCMI_QUIRKS
+
+#define DECLARE_SCMI_QUIRK(_qn) \
+ DECLARE_STATIC_KEY_FALSE(scmi_quirk_ ## _qn)
+
+/*
+ * A helper to associate the actual code snippet to use as a quirk
+ * named as _qn.
+ */
+#define SCMI_QUIRK(_qn, _blk) \
+ do { \
+ if (static_branch_unlikely(&(scmi_quirk_ ## _qn))) \
+ (_blk); \
+ } while (0)
+
+void scmi_quirks_initialize(void);
+void scmi_quirks_enable(struct device *dev, const char *vend,
+ const char *subv, const u32 impl);
+
+#else
+
+#define DECLARE_SCMI_QUIRK(_qn)
+/* Force quirks compilation even when SCMI Quirks are disabled */
+#define SCMI_QUIRK(_qn, _blk) \
+ do { \
+ if (0) \
+ (_blk); \
+ } while (0)
+
+static inline void scmi_quirks_initialize(void) { }
+static inline void scmi_quirks_enable(struct device *dev, const char *vend,
+ const char *sub_vend, const u32 impl) { }
+
+#endif /* CONFIG_ARM_SCMI_QUIRKS */
+
+/* Quirk delarations */
+DECLARE_SCMI_QUIRK(clock_rates_triplet_out_of_spec);
+DECLARE_SCMI_QUIRK(perf_level_get_fc_force);
+
+#endif /* _SCMI_QUIRKS_H */
diff --git a/drivers/firmware/arm_scmi/raw_mode.c b/drivers/firmware/arm_scmi/raw_mode.c
index 9e89a6a763da..73db5492ab44 100644
--- a/drivers/firmware/arm_scmi/raw_mode.c
+++ b/drivers/firmware/arm_scmi/raw_mode.c
@@ -475,7 +475,8 @@ static void scmi_xfer_raw_worker(struct work_struct *work)
raw->desc->ops->mark_txdone(rw->cinfo, ret, xfer);
trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
- xfer->hdr.protocol_id, xfer->hdr.seq, ret);
+ xfer->hdr.protocol_id, xfer->hdr.seq,
+ ret, scmi_inflight_count(raw->handle));
/* Wait also for an async delayed response if needed */
if (!ret && xfer->async_done) {
@@ -642,7 +643,8 @@ static int scmi_do_xfer_raw_start(struct scmi_raw_mode_info *raw,
trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id,
xfer->hdr.protocol_id, xfer->hdr.seq,
- xfer->hdr.poll_completion);
+ xfer->hdr.poll_completion,
+ scmi_inflight_count(raw->handle));
ret = raw->desc->ops->send_message(rw->cinfo, xfer);
if (ret) {
@@ -671,11 +673,13 @@ static int scmi_do_xfer_raw_start(struct scmi_raw_mode_info *raw,
* @len: Length of the message in @buf.
* @chan_id: The channel ID to use.
* @async: A flag stating if an asynchronous command is required.
+ * @poll: A flag stating if a polling transmission is required.
*
* Return: 0 on Success
*/
static int scmi_raw_message_send(struct scmi_raw_mode_info *raw,
- void *buf, size_t len, u8 chan_id, bool async)
+ void *buf, size_t len, u8 chan_id,
+ bool async, bool poll)
{
int ret;
struct scmi_xfer *xfer;
@@ -684,6 +688,16 @@ static int scmi_raw_message_send(struct scmi_raw_mode_info *raw,
if (ret)
return ret;
+ if (poll) {
+ if (is_transport_polling_capable(raw->desc)) {
+ xfer->hdr.poll_completion = true;
+ } else {
+ dev_err(raw->handle->dev,
+ "Failed to send RAW message - Polling NOT supported\n");
+ return -EINVAL;
+ }
+ }
+
ret = scmi_do_xfer_raw_start(raw, xfer, chan_id, async);
if (ret)
scmi_xfer_raw_put(raw->handle, xfer);
@@ -801,7 +815,7 @@ static ssize_t scmi_dbg_raw_mode_common_read(struct file *filp,
static ssize_t scmi_dbg_raw_mode_common_write(struct file *filp,
const char __user *buf,
size_t count, loff_t *ppos,
- bool async)
+ bool async, bool poll)
{
int ret;
struct scmi_dbg_raw_data *rd = filp->private_data;
@@ -831,7 +845,7 @@ static ssize_t scmi_dbg_raw_mode_common_write(struct file *filp,
}
ret = scmi_raw_message_send(rd->raw, rd->tx.buf, rd->tx_size,
- rd->chan_id, async);
+ rd->chan_id, async, poll);
/* Reset ppos for next message ... */
rd->tx_size = 0;
@@ -875,7 +889,8 @@ static ssize_t scmi_dbg_raw_mode_message_write(struct file *filp,
const char __user *buf,
size_t count, loff_t *ppos)
{
- return scmi_dbg_raw_mode_common_write(filp, buf, count, ppos, false);
+ return scmi_dbg_raw_mode_common_write(filp, buf, count, ppos,
+ false, false);
}
static __poll_t scmi_dbg_raw_mode_message_poll(struct file *filp,
@@ -886,10 +901,8 @@ static __poll_t scmi_dbg_raw_mode_message_poll(struct file *filp,
static int scmi_dbg_raw_mode_open(struct inode *inode, struct file *filp)
{
- u8 id;
struct scmi_raw_mode_info *raw;
struct scmi_dbg_raw_data *rd;
- const char *id_str = filp->f_path.dentry->d_parent->d_name.name;
if (!inode->i_private)
return -ENODEV;
@@ -915,8 +928,8 @@ static int scmi_dbg_raw_mode_open(struct inode *inode, struct file *filp)
}
/* Grab channel ID from debugfs entry naming if any */
- if (!kstrtou8(id_str, 16, &id))
- rd->chan_id = id;
+ /* not set - reassing 0 we already had after kzalloc() */
+ rd->chan_id = debugfs_get_aux_num(filp);
rd->raw = raw;
filp->private_data = rd;
@@ -966,7 +979,8 @@ static ssize_t scmi_dbg_raw_mode_message_async_write(struct file *filp,
const char __user *buf,
size_t count, loff_t *ppos)
{
- return scmi_dbg_raw_mode_common_write(filp, buf, count, ppos, true);
+ return scmi_dbg_raw_mode_common_write(filp, buf, count, ppos,
+ true, false);
}
static const struct file_operations scmi_dbg_raw_mode_message_async_fops = {
@@ -978,6 +992,40 @@ static const struct file_operations scmi_dbg_raw_mode_message_async_fops = {
.owner = THIS_MODULE,
};
+static ssize_t scmi_dbg_raw_mode_message_poll_write(struct file *filp,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return scmi_dbg_raw_mode_common_write(filp, buf, count, ppos,
+ false, true);
+}
+
+static const struct file_operations scmi_dbg_raw_mode_message_poll_fops = {
+ .open = scmi_dbg_raw_mode_open,
+ .release = scmi_dbg_raw_mode_release,
+ .read = scmi_dbg_raw_mode_message_read,
+ .write = scmi_dbg_raw_mode_message_poll_write,
+ .poll = scmi_dbg_raw_mode_message_poll,
+ .owner = THIS_MODULE,
+};
+
+static ssize_t scmi_dbg_raw_mode_message_poll_async_write(struct file *filp,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return scmi_dbg_raw_mode_common_write(filp, buf, count, ppos,
+ true, true);
+}
+
+static const struct file_operations scmi_dbg_raw_mode_message_poll_async_fops = {
+ .open = scmi_dbg_raw_mode_open,
+ .release = scmi_dbg_raw_mode_release,
+ .read = scmi_dbg_raw_mode_message_read,
+ .write = scmi_dbg_raw_mode_message_poll_async_write,
+ .poll = scmi_dbg_raw_mode_message_poll,
+ .owner = THIS_MODULE,
+};
+
static ssize_t scmi_test_dbg_raw_mode_notif_read(struct file *filp,
char __user *buf,
size_t count, loff_t *ppos)
@@ -1201,6 +1249,12 @@ void *scmi_raw_mode_init(const struct scmi_handle *handle,
debugfs_create_file("message_async", 0600, raw->dentry, raw,
&scmi_dbg_raw_mode_message_async_fops);
+ debugfs_create_file("message_poll", 0600, raw->dentry, raw,
+ &scmi_dbg_raw_mode_message_poll_fops);
+
+ debugfs_create_file("message_poll_async", 0600, raw->dentry, raw,
+ &scmi_dbg_raw_mode_message_poll_async_fops);
+
debugfs_create_file("notification", 0400, raw->dentry, raw,
&scmi_dbg_raw_mode_notification_fops);
@@ -1225,11 +1279,21 @@ void *scmi_raw_mode_init(const struct scmi_handle *handle,
snprintf(cdir, 8, "0x%02X", channels[i]);
chd = debugfs_create_dir(cdir, top_chans);
- debugfs_create_file("message", 0600, chd, raw,
+ debugfs_create_file_aux_num("message", 0600, chd,
+ raw, channels[i],
&scmi_dbg_raw_mode_message_fops);
- debugfs_create_file("message_async", 0600, chd, raw,
+ debugfs_create_file_aux_num("message_async", 0600, chd,
+ raw, channels[i],
&scmi_dbg_raw_mode_message_async_fops);
+
+ debugfs_create_file_aux_num("message_poll", 0600, chd,
+ raw, channels[i],
+ &scmi_dbg_raw_mode_message_poll_fops);
+
+ debugfs_create_file_aux_num("message_poll_async", 0600,
+ chd, raw, channels[i],
+ &scmi_dbg_raw_mode_message_poll_async_fops);
}
}
diff --git a/drivers/firmware/arm_scmi/scmi_power_control.c b/drivers/firmware/arm_scmi/scmi_power_control.c
index 21f467a92942..955736336061 100644
--- a/drivers/firmware/arm_scmi/scmi_power_control.c
+++ b/drivers/firmware/arm_scmi/scmi_power_control.c
@@ -46,6 +46,7 @@
#include <linux/math.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/pm.h>
#include <linux/printk.h>
#include <linux/reboot.h>
#include <linux/scmi_protocol.h>
@@ -324,12 +325,7 @@ static int scmi_userspace_notifier(struct notifier_block *nb,
static void scmi_suspend_work_func(struct work_struct *work)
{
- struct scmi_syspower_conf *sc =
- container_of(work, struct scmi_syspower_conf, suspend_work);
-
pm_suspend(PM_SUSPEND_MEM);
-
- sc->state = SCMI_SYSPOWER_IDLE;
}
static int scmi_syspower_probe(struct scmi_device *sdev)
@@ -354,6 +350,7 @@ static int scmi_syspower_probe(struct scmi_device *sdev)
sc->required_transition = SCMI_SYSTEM_MAX;
sc->userspace_nb.notifier_call = &scmi_userspace_notifier;
sc->dev = &sdev->dev;
+ dev_set_drvdata(&sdev->dev, sc);
INIT_WORK(&sc->suspend_work, scmi_suspend_work_func);
@@ -363,6 +360,18 @@ static int scmi_syspower_probe(struct scmi_device *sdev)
NULL, &sc->userspace_nb);
}
+static int scmi_system_power_resume(struct device *dev)
+{
+ struct scmi_syspower_conf *sc = dev_get_drvdata(dev);
+
+ sc->state = SCMI_SYSPOWER_IDLE;
+ return 0;
+}
+
+static const struct dev_pm_ops scmi_system_power_pmops = {
+ SYSTEM_SLEEP_PM_OPS(NULL, scmi_system_power_resume)
+};
+
static const struct scmi_device_id scmi_id_table[] = {
{ SCMI_PROTOCOL_SYSTEM, "syspower" },
{ },
@@ -370,6 +379,9 @@ static const struct scmi_device_id scmi_id_table[] = {
MODULE_DEVICE_TABLE(scmi, scmi_id_table);
static struct scmi_driver scmi_system_power_driver = {
+ .driver = {
+ .pm = pm_sleep_ptr(&scmi_system_power_pmops),
+ },
.name = "scmi-system-power",
.probe = scmi_syspower_probe,
.id_table = scmi_id_table,
diff --git a/drivers/firmware/arm_scmi/shmem.c b/drivers/firmware/arm_scmi/shmem.c
index 01d8a9398fe8..11c347bff766 100644
--- a/drivers/firmware/arm_scmi/shmem.c
+++ b/drivers/firmware/arm_scmi/shmem.c
@@ -16,6 +16,8 @@
#include "common.h"
+#define SCMI_SHMEM_LAYOUT_OVERHEAD 24
+
/*
* SCMI specification requires all parameters, message headers, return
* arguments or any protocol data to be expressed in little endian
@@ -34,9 +36,59 @@ struct scmi_shared_mem {
u8 msg_payload[];
};
+static inline void shmem_memcpy_fromio32(void *to,
+ const void __iomem *from,
+ size_t count)
+{
+ WARN_ON(!IS_ALIGNED((unsigned long)from, 4) ||
+ !IS_ALIGNED((unsigned long)to, 4) ||
+ count % 4);
+
+ __ioread32_copy(to, from, count / 4);
+}
+
+static inline void shmem_memcpy_toio32(void __iomem *to,
+ const void *from,
+ size_t count)
+{
+ WARN_ON(!IS_ALIGNED((unsigned long)to, 4) ||
+ !IS_ALIGNED((unsigned long)from, 4) ||
+ count % 4);
+
+ __iowrite32_copy(to, from, count / 4);
+}
+
+static struct scmi_shmem_io_ops shmem_io_ops32 = {
+ .fromio = shmem_memcpy_fromio32,
+ .toio = shmem_memcpy_toio32,
+};
+
+/* Wrappers are needed for proper memcpy_{from,to}_io expansion by the
+ * pre-processor.
+ */
+static inline void shmem_memcpy_fromio(void *to,
+ const void __iomem *from,
+ size_t count)
+{
+ memcpy_fromio(to, from, count);
+}
+
+static inline void shmem_memcpy_toio(void __iomem *to,
+ const void *from,
+ size_t count)
+{
+ memcpy_toio(to, from, count);
+}
+
+static struct scmi_shmem_io_ops shmem_io_ops_default = {
+ .fromio = shmem_memcpy_fromio,
+ .toio = shmem_memcpy_toio,
+};
+
static void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem,
struct scmi_xfer *xfer,
- struct scmi_chan_info *cinfo)
+ struct scmi_chan_info *cinfo,
+ shmem_copy_toio_t copy_toio)
{
ktime_t stop;
@@ -73,7 +125,7 @@ static void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem,
iowrite32(sizeof(shmem->msg_header) + xfer->tx.len, &shmem->length);
iowrite32(pack_scmi_header(&xfer->hdr), &shmem->msg_header);
if (xfer->tx.buf)
- memcpy_toio(shmem->msg_payload, xfer->tx.buf, xfer->tx.len);
+ copy_toio(shmem->msg_payload, xfer->tx.buf, xfer->tx.len);
}
static u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem)
@@ -82,7 +134,8 @@ static u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem)
}
static void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem,
- struct scmi_xfer *xfer)
+ struct scmi_xfer *xfer,
+ shmem_copy_fromio_t copy_fromio)
{
size_t len = ioread32(&shmem->length);
@@ -91,11 +144,12 @@ static void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem,
xfer->rx.len = min_t(size_t, xfer->rx.len, len > 8 ? len - 8 : 0);
/* Take a copy to the rx buffer.. */
- memcpy_fromio(xfer->rx.buf, shmem->msg_payload + 4, xfer->rx.len);
+ copy_fromio(xfer->rx.buf, shmem->msg_payload + 4, xfer->rx.len);
}
static void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem,
- size_t max_len, struct scmi_xfer *xfer)
+ size_t max_len, struct scmi_xfer *xfer,
+ shmem_copy_fromio_t copy_fromio)
{
size_t len = ioread32(&shmem->length);
@@ -103,7 +157,7 @@ static void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem,
xfer->rx.len = min_t(size_t, max_len, len > 4 ? len - 4 : 0);
/* Take a copy to the rx buffer.. */
- memcpy_fromio(xfer->rx.buf, shmem->msg_payload, xfer->rx.len);
+ copy_fromio(xfer->rx.buf, shmem->msg_payload, xfer->rx.len);
}
static void shmem_clear_channel(struct scmi_shared_mem __iomem *shmem)
@@ -139,7 +193,8 @@ static bool shmem_channel_intr_enabled(struct scmi_shared_mem __iomem *shmem)
static void __iomem *shmem_setup_iomap(struct scmi_chan_info *cinfo,
struct device *dev, bool tx,
- struct resource *res)
+ struct resource *res,
+ struct scmi_shmem_io_ops **ops)
{
struct device_node *shmem __free(device_node);
const char *desc = tx ? "Tx" : "Rx";
@@ -148,6 +203,7 @@ static void __iomem *shmem_setup_iomap(struct scmi_chan_info *cinfo,
struct resource lres = {};
resource_size_t size;
void __iomem *addr;
+ u32 reg_io_width;
shmem = of_parse_phandle(cdev->of_node, "shmem", idx);
if (!shmem)
@@ -167,12 +223,27 @@ static void __iomem *shmem_setup_iomap(struct scmi_chan_info *cinfo,
}
size = resource_size(res);
+ if (cinfo->max_msg_size + SCMI_SHMEM_LAYOUT_OVERHEAD > size) {
+ dev_err(dev, "misconfigured SCMI shared memory\n");
+ return IOMEM_ERR_PTR(-ENOSPC);
+ }
+
addr = devm_ioremap(dev, res->start, size);
if (!addr) {
dev_err(dev, "failed to ioremap SCMI %s shared memory\n", desc);
return IOMEM_ERR_PTR(-EADDRNOTAVAIL);
}
+ of_property_read_u32(shmem, "reg-io-width", &reg_io_width);
+ switch (reg_io_width) {
+ case 4:
+ *ops = &shmem_io_ops32;
+ break;
+ default:
+ *ops = &shmem_io_ops_default;
+ break;
+ }
+
return addr;
}
diff --git a/drivers/firmware/arm_scmi/transports/mailbox.c b/drivers/firmware/arm_scmi/transports/mailbox.c
index e3d5f7560990..ae0f67e6cc45 100644
--- a/drivers/firmware/arm_scmi/transports/mailbox.c
+++ b/drivers/firmware/arm_scmi/transports/mailbox.c
@@ -26,6 +26,7 @@
* @cinfo: SCMI channel info
* @shmem: Transmit/Receive shared memory area
* @chan_lock: Lock that prevents multiple xfers from being queued
+ * @io_ops: Transport specific I/O operations
*/
struct scmi_mailbox {
struct mbox_client cl;
@@ -35,6 +36,7 @@ struct scmi_mailbox {
struct scmi_chan_info *cinfo;
struct scmi_shared_mem __iomem *shmem;
struct mutex chan_lock;
+ struct scmi_shmem_io_ops *io_ops;
};
#define client_to_scmi_mailbox(c) container_of(c, struct scmi_mailbox, cl)
@@ -45,7 +47,8 @@ static void tx_prepare(struct mbox_client *cl, void *m)
{
struct scmi_mailbox *smbox = client_to_scmi_mailbox(cl);
- core->shmem->tx_prepare(smbox->shmem, m, smbox->cinfo);
+ core->shmem->tx_prepare(smbox->shmem, m, smbox->cinfo,
+ smbox->io_ops->toio);
}
static void rx_callback(struct mbox_client *cl, void *m)
@@ -124,8 +127,8 @@ static int mailbox_chan_validate(struct device *cdev, int *a2p_rx_chan,
(num_mb == 1 && num_sh != 1) || (num_mb == 3 && num_sh != 2) ||
(num_mb == 4 && num_sh != 2)) {
dev_warn(cdev,
- "Invalid channel descriptor for '%s' - mbs:%d shm:%d\n",
- of_node_full_name(np), num_mb, num_sh);
+ "Invalid channel descriptor for '%pOF' - mbs:%d shm:%d\n",
+ np, num_mb, num_sh);
return -EINVAL;
}
@@ -137,8 +140,7 @@ static int mailbox_chan_validate(struct device *cdev, int *a2p_rx_chan,
of_parse_phandle(np, "shmem", 1);
if (!np_tx || !np_rx || np_tx == np_rx) {
- dev_warn(cdev, "Invalid shmem descriptor for '%s'\n",
- of_node_full_name(np));
+ dev_warn(cdev, "Invalid shmem descriptor for '%pOF'\n", np);
ret = -EINVAL;
}
}
@@ -197,7 +199,8 @@ static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
if (!smbox)
return -ENOMEM;
- smbox->shmem = core->shmem->setup_iomap(cinfo, dev, tx, NULL);
+ smbox->shmem = core->shmem->setup_iomap(cinfo, dev, tx, NULL,
+ &smbox->io_ops);
if (IS_ERR(smbox->shmem))
return PTR_ERR(smbox->shmem);
@@ -305,7 +308,7 @@ static void mailbox_fetch_response(struct scmi_chan_info *cinfo,
{
struct scmi_mailbox *smbox = cinfo->transport_info;
- core->shmem->fetch_response(smbox->shmem, xfer);
+ core->shmem->fetch_response(smbox->shmem, xfer, smbox->io_ops->fromio);
}
static void mailbox_fetch_notification(struct scmi_chan_info *cinfo,
@@ -313,7 +316,8 @@ static void mailbox_fetch_notification(struct scmi_chan_info *cinfo,
{
struct scmi_mailbox *smbox = cinfo->transport_info;
- core->shmem->fetch_notification(smbox->shmem, max_len, xfer);
+ core->shmem->fetch_notification(smbox->shmem, max_len, xfer,
+ smbox->io_ops->fromio);
}
static void mailbox_clear_channel(struct scmi_chan_info *cinfo)
@@ -366,13 +370,14 @@ static struct scmi_desc scmi_mailbox_desc = {
.ops = &scmi_mailbox_ops,
.max_rx_timeout_ms = 30, /* We may increase this if required */
.max_msg = 20, /* Limited by MBOX_TX_QUEUE_LEN */
- .max_msg_size = 128,
+ .max_msg_size = SCMI_SHMEM_MAX_PAYLOAD_SIZE,
};
static const struct of_device_id scmi_of_match[] = {
{ .compatible = "arm,scmi" },
{ /* Sentinel */ },
};
+MODULE_DEVICE_TABLE(of, scmi_of_match);
DEFINE_SCMI_TRANSPORT_DRIVER(scmi_mailbox, scmi_mailbox_driver,
scmi_mailbox_desc, scmi_of_match, core);
diff --git a/drivers/firmware/arm_scmi/transports/optee.c b/drivers/firmware/arm_scmi/transports/optee.c
index 56fc63edf51e..dc0f46340153 100644
--- a/drivers/firmware/arm_scmi/transports/optee.c
+++ b/drivers/firmware/arm_scmi/transports/optee.c
@@ -17,8 +17,6 @@
#include "../common.h"
-#define SCMI_OPTEE_MAX_MSG_SIZE 128
-
enum scmi_optee_pta_cmd {
/*
* PTA_SCMI_CMD_CAPABILITIES - Get channel capabilities
@@ -114,6 +112,7 @@ enum scmi_optee_pta_cmd {
* @req.shmem: Virtual base address of the shared memory
* @req.msg: Shared memory protocol handle for SCMI request and
* synchronous response
+ * @io_ops: Transport specific I/O operations
* @tee_shm: TEE shared memory handle @req or NULL if using IOMEM shmem
* @link: Reference in agent's channel list
*/
@@ -128,6 +127,7 @@ struct scmi_optee_channel {
struct scmi_shared_mem __iomem *shmem;
struct scmi_msg_payld *msg;
} req;
+ struct scmi_shmem_io_ops *io_ops;
struct tee_shm *tee_shm;
struct list_head link;
};
@@ -297,7 +297,7 @@ static int invoke_process_msg_channel(struct scmi_optee_channel *channel, size_t
param[2].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT;
param[2].u.memref.shm = channel->tee_shm;
- param[2].u.memref.size = SCMI_OPTEE_MAX_MSG_SIZE;
+ param[2].u.memref.size = SCMI_SHMEM_MAX_PAYLOAD_SIZE;
ret = tee_client_invoke_func(scmi_optee_private->tee_ctx, &arg, param);
if (ret < 0 || arg.ret) {
@@ -330,7 +330,7 @@ static void scmi_optee_clear_channel(struct scmi_chan_info *cinfo)
static int setup_dynamic_shmem(struct device *dev, struct scmi_optee_channel *channel)
{
- const size_t msg_size = SCMI_OPTEE_MAX_MSG_SIZE;
+ const size_t msg_size = SCMI_SHMEM_MAX_PAYLOAD_SIZE;
void *shbuf;
channel->tee_shm = tee_shm_alloc_kernel_buf(scmi_optee_private->tee_ctx, msg_size);
@@ -350,7 +350,8 @@ static int setup_dynamic_shmem(struct device *dev, struct scmi_optee_channel *ch
static int setup_static_shmem(struct device *dev, struct scmi_chan_info *cinfo,
struct scmi_optee_channel *channel)
{
- channel->req.shmem = core->shmem->setup_iomap(cinfo, dev, true, NULL);
+ channel->req.shmem = core->shmem->setup_iomap(cinfo, dev, true, NULL,
+ &channel->io_ops);
if (IS_ERR(channel->req.shmem))
return PTR_ERR(channel->req.shmem);
@@ -465,7 +466,8 @@ static int scmi_optee_send_message(struct scmi_chan_info *cinfo,
ret = invoke_process_msg_channel(channel,
core->msg->command_size(xfer));
} else {
- core->shmem->tx_prepare(channel->req.shmem, xfer, cinfo);
+ core->shmem->tx_prepare(channel->req.shmem, xfer, cinfo,
+ channel->io_ops->toio);
ret = invoke_process_smt_channel(channel);
}
@@ -484,7 +486,8 @@ static void scmi_optee_fetch_response(struct scmi_chan_info *cinfo,
core->msg->fetch_response(channel->req.msg,
channel->rx_len, xfer);
else
- core->shmem->fetch_response(channel->req.shmem, xfer);
+ core->shmem->fetch_response(channel->req.shmem, xfer,
+ channel->io_ops->fromio);
}
static void scmi_optee_mark_txdone(struct scmi_chan_info *cinfo, int ret,
@@ -495,7 +498,7 @@ static void scmi_optee_mark_txdone(struct scmi_chan_info *cinfo, int ret,
mutex_unlock(&channel->mu);
}
-static struct scmi_transport_ops scmi_optee_ops = {
+static const struct scmi_transport_ops scmi_optee_ops = {
.chan_available = scmi_optee_chan_available,
.chan_setup = scmi_optee_chan_setup,
.chan_free = scmi_optee_chan_free,
@@ -514,7 +517,7 @@ static struct scmi_desc scmi_optee_desc = {
.ops = &scmi_optee_ops,
.max_rx_timeout_ms = 30,
.max_msg = 20,
- .max_msg_size = SCMI_OPTEE_MAX_MSG_SIZE,
+ .max_msg_size = SCMI_SHMEM_MAX_PAYLOAD_SIZE,
.sync_cmds_completed_on_ret = true,
};
diff --git a/drivers/firmware/arm_scmi/transports/smc.c b/drivers/firmware/arm_scmi/transports/smc.c
index f8dd108777f9..21abb571e4f2 100644
--- a/drivers/firmware/arm_scmi/transports/smc.c
+++ b/drivers/firmware/arm_scmi/transports/smc.c
@@ -45,6 +45,7 @@
* @irq: An optional IRQ for completion
* @cinfo: SCMI channel info
* @shmem: Transmit/Receive shared memory area
+ * @io_ops: Transport specific I/O operations
* @shmem_lock: Lock to protect access to Tx/Rx shared memory area.
* Used when NOT operating in atomic mode.
* @inflight: Atomic flag to protect access to Tx/Rx shared memory area.
@@ -60,6 +61,7 @@ struct scmi_smc {
int irq;
struct scmi_chan_info *cinfo;
struct scmi_shared_mem __iomem *shmem;
+ struct scmi_shmem_io_ops *io_ops;
/* Protect access to shmem area */
struct mutex shmem_lock;
#define INFLIGHT_NONE MSG_TOKEN_MAX
@@ -144,7 +146,8 @@ static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
if (!scmi_info)
return -ENOMEM;
- scmi_info->shmem = core->shmem->setup_iomap(cinfo, dev, tx, &res);
+ scmi_info->shmem = core->shmem->setup_iomap(cinfo, dev, tx, &res,
+ &scmi_info->io_ops);
if (IS_ERR(scmi_info->shmem))
return PTR_ERR(scmi_info->shmem);
@@ -229,7 +232,8 @@ static int smc_send_message(struct scmi_chan_info *cinfo,
*/
smc_channel_lock_acquire(scmi_info, xfer);
- core->shmem->tx_prepare(scmi_info->shmem, xfer, cinfo);
+ core->shmem->tx_prepare(scmi_info->shmem, xfer, cinfo,
+ scmi_info->io_ops->toio);
if (scmi_info->cap_id != ULONG_MAX)
arm_smccc_1_1_invoke(scmi_info->func_id, scmi_info->cap_id, 0,
@@ -253,7 +257,8 @@ static void smc_fetch_response(struct scmi_chan_info *cinfo,
{
struct scmi_smc *scmi_info = cinfo->transport_info;
- core->shmem->fetch_response(scmi_info->shmem, xfer);
+ core->shmem->fetch_response(scmi_info->shmem, xfer,
+ scmi_info->io_ops->fromio);
}
static void smc_mark_txdone(struct scmi_chan_info *cinfo, int ret,
@@ -277,7 +282,7 @@ static struct scmi_desc scmi_smc_desc = {
.ops = &scmi_smc_ops,
.max_rx_timeout_ms = 30,
.max_msg = 20,
- .max_msg_size = 128,
+ .max_msg_size = SCMI_SHMEM_MAX_PAYLOAD_SIZE,
/*
* Setting .sync_cmds_atomic_replies to true for SMC assumes that,
* once the SMC instruction has completed successfully, the issued
@@ -296,6 +301,7 @@ static const struct of_device_id scmi_of_match[] = {
{ .compatible = "qcom,scmi-smc" },
{ /* Sentinel */ },
};
+MODULE_DEVICE_TABLE(of, scmi_of_match);
DEFINE_SCMI_TRANSPORT_DRIVER(scmi_smc, scmi_smc_driver, scmi_smc_desc,
scmi_of_match, core);
diff --git a/drivers/firmware/arm_scmi/transports/virtio.c b/drivers/firmware/arm_scmi/transports/virtio.c
index d349766bc0b2..326c4a93e44b 100644
--- a/drivers/firmware/arm_scmi/transports/virtio.c
+++ b/drivers/firmware/arm_scmi/transports/virtio.c
@@ -32,8 +32,8 @@
#define VIRTIO_MAX_RX_TIMEOUT_MS 60000
#define VIRTIO_SCMI_MAX_MSG_SIZE 128 /* Value may be increased. */
-#define VIRTIO_SCMI_MAX_PDU_SIZE \
- (VIRTIO_SCMI_MAX_MSG_SIZE + SCMI_MSG_MAX_PROT_OVERHEAD)
+#define VIRTIO_SCMI_MAX_PDU_SIZE(ci) \
+ ((ci)->max_msg_size + SCMI_MSG_MAX_PROT_OVERHEAD)
#define DESCRIPTORS_PER_TX_MSG 2
/**
@@ -90,6 +90,7 @@ enum poll_states {
* @input: SDU used for (delayed) responses and notifications
* @list: List which scmi_vio_msg may be part of
* @rx_len: Input SDU size in bytes, once input has been received
+ * @max_len: Maximumm allowed SDU size in bytes
* @poll_idx: Last used index registered for polling purposes if this message
* transaction reply was configured for polling.
* @poll_status: Polling state for this message.
@@ -102,6 +103,7 @@ struct scmi_vio_msg {
struct scmi_msg_payld *input;
struct list_head list;
unsigned int rx_len;
+ unsigned int max_len;
unsigned int poll_idx;
enum poll_states poll_status;
/* Lock to protect access to poll_status */
@@ -234,7 +236,7 @@ static int scmi_vio_feed_vq_rx(struct scmi_vio_channel *vioch,
unsigned long flags;
struct device *dev = &vioch->vqueue->vdev->dev;
- sg_init_one(&sg_in, msg->input, VIRTIO_SCMI_MAX_PDU_SIZE);
+ sg_init_one(&sg_in, msg->input, msg->max_len);
spin_lock_irqsave(&vioch->lock, flags);
@@ -439,9 +441,9 @@ static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
if (!msg)
return -ENOMEM;
+ msg->max_len = VIRTIO_SCMI_MAX_PDU_SIZE(cinfo);
if (tx) {
- msg->request = devm_kzalloc(dev,
- VIRTIO_SCMI_MAX_PDU_SIZE,
+ msg->request = devm_kzalloc(dev, msg->max_len,
GFP_KERNEL);
if (!msg->request)
return -ENOMEM;
@@ -449,8 +451,7 @@ static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
refcount_set(&msg->users, 1);
}
- msg->input = devm_kzalloc(dev, VIRTIO_SCMI_MAX_PDU_SIZE,
- GFP_KERNEL);
+ msg->input = devm_kzalloc(dev, msg->max_len, GFP_KERNEL);
if (!msg->input)
return -ENOMEM;
@@ -870,6 +871,9 @@ static int scmi_vio_probe(struct virtio_device *vdev)
/* Ensure initialized scmi_vdev is visible */
smp_store_mb(scmi_vdev, vdev);
+ /* Set device ready */
+ virtio_device_ready(vdev);
+
ret = platform_driver_register(&scmi_virtio_driver);
if (ret) {
vdev->priv = NULL;
@@ -920,6 +924,7 @@ static const struct virtio_device_id id_table[] = {
{ VIRTIO_ID_SCMI, VIRTIO_DEV_ANY_ID },
{ 0 }
};
+MODULE_DEVICE_TABLE(virtio, id_table);
static struct virtio_driver virtio_scmi_driver = {
.driver.name = "scmi-virtio",
diff --git a/drivers/firmware/arm_scmi/vendors/imx/Kconfig b/drivers/firmware/arm_scmi/vendors/imx/Kconfig
index 2883ed24a84d..c34c8c837441 100644
--- a/drivers/firmware/arm_scmi/vendors/imx/Kconfig
+++ b/drivers/firmware/arm_scmi/vendors/imx/Kconfig
@@ -12,9 +12,34 @@ config IMX_SCMI_BBM_EXT
To compile this driver as a module, choose M here: the
module will be called imx-sm-bbm.
+config IMX_SCMI_CPU_EXT
+ tristate "i.MX SCMI CPU EXTENSION"
+ depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF)
+ depends on IMX_SCMI_CPU_DRV
+ default y if ARCH_MXC
+ help
+ This enables i.MX System CPU Protocol to manage cpu
+ start, stop and etc.
+
+ To compile this driver as a module, choose M here: the
+ module will be called imx-sm-cpu.
+
+config IMX_SCMI_LMM_EXT
+ tristate "i.MX SCMI LMM EXTENSION"
+ depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF)
+ depends on IMX_SCMI_LMM_DRV
+ default y if ARCH_MXC
+ help
+ This enables i.MX System Logical Machine Protocol to
+ manage Logical Machines boot, shutdown and etc.
+
+ To compile this driver as a module, choose M here: the
+ module will be called imx-sm-lmm.
+
config IMX_SCMI_MISC_EXT
tristate "i.MX SCMI MISC EXTENSION"
depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF)
+ depends on IMX_SCMI_MISC_DRV
default y if ARCH_MXC
help
This enables i.MX System MISC control logic such as gpio expander
diff --git a/drivers/firmware/arm_scmi/vendors/imx/Makefile b/drivers/firmware/arm_scmi/vendors/imx/Makefile
index d3ee6d544924..e3a5ea46345c 100644
--- a/drivers/firmware/arm_scmi/vendors/imx/Makefile
+++ b/drivers/firmware/arm_scmi/vendors/imx/Makefile
@@ -1,3 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_IMX_SCMI_BBM_EXT) += imx-sm-bbm.o
+obj-$(CONFIG_IMX_SCMI_CPU_EXT) += imx-sm-cpu.o
+obj-$(CONFIG_IMX_SCMI_LMM_EXT) += imx-sm-lmm.o
obj-$(CONFIG_IMX_SCMI_MISC_EXT) += imx-sm-misc.o
diff --git a/drivers/firmware/arm_scmi/vendors/imx/imx-sm-bbm.c b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-bbm.c
index 17799eacf06c..aa176c1a5eef 100644
--- a/drivers/firmware/arm_scmi/vendors/imx/imx-sm-bbm.c
+++ b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-bbm.c
@@ -374,10 +374,11 @@ static const struct scmi_protocol scmi_imx_bbm = {
.ops = &scmi_imx_bbm_proto_ops,
.events = &scmi_imx_bbm_protocol_events,
.supported_version = SCMI_PROTOCOL_SUPPORTED_VERSION,
- .vendor_id = "NXP",
- .sub_vendor_id = "IMX",
+ .vendor_id = SCMI_IMX_VENDOR,
+ .sub_vendor_id = SCMI_IMX_SUBVENDOR,
};
module_scmi_protocol(scmi_imx_bbm);
+MODULE_ALIAS("scmi-protocol-" __stringify(SCMI_PROTOCOL_IMX_BBM) "-" SCMI_IMX_VENDOR);
MODULE_DESCRIPTION("i.MX SCMI BBM driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/arm_scmi/vendors/imx/imx-sm-cpu.c b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-cpu.c
new file mode 100644
index 000000000000..66f47f5371e5
--- /dev/null
+++ b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-cpu.c
@@ -0,0 +1,276 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System control and Management Interface (SCMI) NXP CPU Protocol
+ *
+ * Copyright 2025 NXP
+ */
+
+#include <linux/bits.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/scmi_protocol.h>
+#include <linux/scmi_imx_protocol.h>
+
+#include "../../protocols.h"
+#include "../../notify.h"
+
+#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x10000
+
+enum scmi_imx_cpu_protocol_cmd {
+ SCMI_IMX_CPU_ATTRIBUTES = 0x3,
+ SCMI_IMX_CPU_START = 0x4,
+ SCMI_IMX_CPU_STOP = 0x5,
+ SCMI_IMX_CPU_RESET_VECTOR_SET = 0x6,
+ SCMI_IMX_CPU_INFO_GET = 0xC,
+};
+
+struct scmi_imx_cpu_info {
+ u32 nr_cpu;
+};
+
+#define SCMI_IMX_CPU_NR_CPU_MASK GENMASK(15, 0)
+struct scmi_msg_imx_cpu_protocol_attributes {
+ __le32 attributes;
+};
+
+struct scmi_msg_imx_cpu_attributes_out {
+ __le32 attributes;
+#define CPU_MAX_NAME 16
+ u8 name[CPU_MAX_NAME];
+};
+
+struct scmi_imx_cpu_reset_vector_set_in {
+ __le32 cpuid;
+#define CPU_VEC_FLAGS_RESUME BIT(31)
+#define CPU_VEC_FLAGS_START BIT(30)
+#define CPU_VEC_FLAGS_BOOT BIT(29)
+ __le32 flags;
+ __le32 resetvectorlow;
+ __le32 resetvectorhigh;
+};
+
+struct scmi_imx_cpu_info_get_out {
+#define CPU_RUN_MODE_START 0
+#define CPU_RUN_MODE_HOLD 1
+#define CPU_RUN_MODE_STOP 2
+#define CPU_RUN_MODE_SLEEP 3
+ __le32 runmode;
+ __le32 sleepmode;
+ __le32 resetvectorlow;
+ __le32 resetvectorhigh;
+};
+
+static int scmi_imx_cpu_validate_cpuid(const struct scmi_protocol_handle *ph,
+ u32 cpuid)
+{
+ struct scmi_imx_cpu_info *info = ph->get_priv(ph);
+
+ if (cpuid >= info->nr_cpu)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int scmi_imx_cpu_start(const struct scmi_protocol_handle *ph,
+ u32 cpuid, bool start)
+{
+ struct scmi_xfer *t;
+ u8 msg_id;
+ int ret;
+
+ ret = scmi_imx_cpu_validate_cpuid(ph, cpuid);
+ if (ret)
+ return ret;
+
+ if (start)
+ msg_id = SCMI_IMX_CPU_START;
+ else
+ msg_id = SCMI_IMX_CPU_STOP;
+
+ ret = ph->xops->xfer_get_init(ph, msg_id, sizeof(u32), 0, &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(cpuid, t->tx.buf);
+ ret = ph->xops->do_xfer(ph, t);
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int scmi_imx_cpu_reset_vector_set(const struct scmi_protocol_handle *ph,
+ u32 cpuid, u64 vector, bool start,
+ bool boot, bool resume)
+{
+ struct scmi_imx_cpu_reset_vector_set_in *in;
+ struct scmi_xfer *t;
+ int ret;
+
+ ret = scmi_imx_cpu_validate_cpuid(ph, cpuid);
+ if (ret)
+ return ret;
+
+ ret = ph->xops->xfer_get_init(ph, SCMI_IMX_CPU_RESET_VECTOR_SET, sizeof(*in),
+ 0, &t);
+ if (ret)
+ return ret;
+
+ in = t->tx.buf;
+ in->cpuid = cpu_to_le32(cpuid);
+ in->flags = cpu_to_le32(0);
+ if (start)
+ in->flags |= le32_encode_bits(1, CPU_VEC_FLAGS_START);
+ if (boot)
+ in->flags |= le32_encode_bits(1, CPU_VEC_FLAGS_BOOT);
+ if (resume)
+ in->flags |= le32_encode_bits(1, CPU_VEC_FLAGS_RESUME);
+ in->resetvectorlow = cpu_to_le32(lower_32_bits(vector));
+ in->resetvectorhigh = cpu_to_le32(upper_32_bits(vector));
+ ret = ph->xops->do_xfer(ph, t);
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int scmi_imx_cpu_started(const struct scmi_protocol_handle *ph, u32 cpuid,
+ bool *started)
+{
+ struct scmi_imx_cpu_info_get_out *out;
+ struct scmi_xfer *t;
+ u32 mode;
+ int ret;
+
+ if (!started)
+ return -EINVAL;
+
+ *started = false;
+ ret = scmi_imx_cpu_validate_cpuid(ph, cpuid);
+ if (ret)
+ return ret;
+
+ ret = ph->xops->xfer_get_init(ph, SCMI_IMX_CPU_INFO_GET, sizeof(u32),
+ 0, &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(cpuid, t->tx.buf);
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ out = t->rx.buf;
+ mode = le32_to_cpu(out->runmode);
+ if (mode == CPU_RUN_MODE_START || mode == CPU_RUN_MODE_SLEEP)
+ *started = true;
+ }
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static const struct scmi_imx_cpu_proto_ops scmi_imx_cpu_proto_ops = {
+ .cpu_reset_vector_set = scmi_imx_cpu_reset_vector_set,
+ .cpu_start = scmi_imx_cpu_start,
+ .cpu_started = scmi_imx_cpu_started,
+};
+
+static int scmi_imx_cpu_protocol_attributes_get(const struct scmi_protocol_handle *ph,
+ struct scmi_imx_cpu_info *info)
+{
+ struct scmi_msg_imx_cpu_protocol_attributes *attr;
+ struct scmi_xfer *t;
+ int ret;
+
+ ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, 0,
+ sizeof(*attr), &t);
+ if (ret)
+ return ret;
+
+ attr = t->rx.buf;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ info->nr_cpu = le32_get_bits(attr->attributes, SCMI_IMX_CPU_NR_CPU_MASK);
+ dev_info(ph->dev, "i.MX SM CPU: %d cpus\n",
+ info->nr_cpu);
+ }
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int scmi_imx_cpu_attributes_get(const struct scmi_protocol_handle *ph,
+ u32 cpuid)
+{
+ struct scmi_msg_imx_cpu_attributes_out *out;
+ char name[SCMI_SHORT_NAME_MAX_SIZE] = {'\0'};
+ struct scmi_xfer *t;
+ int ret;
+
+ ret = ph->xops->xfer_get_init(ph, SCMI_IMX_CPU_ATTRIBUTES, sizeof(u32), 0, &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(cpuid, t->tx.buf);
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ out = t->rx.buf;
+ strscpy(name, out->name, SCMI_SHORT_NAME_MAX_SIZE);
+ dev_info(ph->dev, "i.MX CPU: name: %s\n", name);
+ } else {
+ dev_err(ph->dev, "i.MX cpu: Failed to get info of cpu(%u)\n", cpuid);
+ }
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int scmi_imx_cpu_protocol_init(const struct scmi_protocol_handle *ph)
+{
+ struct scmi_imx_cpu_info *info;
+ u32 version;
+ int ret, i;
+
+ ret = ph->xops->version_get(ph, &version);
+ if (ret)
+ return ret;
+
+ dev_info(ph->dev, "NXP SM CPU Protocol Version %d.%d\n",
+ PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
+
+ info = devm_kzalloc(ph->dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ ret = scmi_imx_cpu_protocol_attributes_get(ph, info);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < info->nr_cpu; i++) {
+ ret = scmi_imx_cpu_attributes_get(ph, i);
+ if (ret)
+ return ret;
+ }
+
+ return ph->set_priv(ph, info, version);
+}
+
+static const struct scmi_protocol scmi_imx_cpu = {
+ .id = SCMI_PROTOCOL_IMX_CPU,
+ .owner = THIS_MODULE,
+ .instance_init = &scmi_imx_cpu_protocol_init,
+ .ops = &scmi_imx_cpu_proto_ops,
+ .supported_version = SCMI_PROTOCOL_SUPPORTED_VERSION,
+ .vendor_id = SCMI_IMX_VENDOR,
+ .sub_vendor_id = SCMI_IMX_SUBVENDOR,
+};
+module_scmi_protocol(scmi_imx_cpu);
+
+MODULE_ALIAS("scmi-protocol-" __stringify(SCMI_PROTOCOL_IMX_CPU) "-" SCMI_IMX_VENDOR);
+MODULE_DESCRIPTION("i.MX SCMI CPU driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/arm_scmi/vendors/imx/imx-sm-lmm.c b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-lmm.c
new file mode 100644
index 000000000000..b519c67fe920
--- /dev/null
+++ b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-lmm.c
@@ -0,0 +1,263 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System control and Management Interface (SCMI) NXP LMM Protocol
+ *
+ * Copyright 2025 NXP
+ */
+
+#include <linux/bits.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/scmi_protocol.h>
+#include <linux/scmi_imx_protocol.h>
+
+#include "../../protocols.h"
+#include "../../notify.h"
+
+#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x10000
+
+enum scmi_imx_lmm_protocol_cmd {
+ SCMI_IMX_LMM_ATTRIBUTES = 0x3,
+ SCMI_IMX_LMM_BOOT = 0x4,
+ SCMI_IMX_LMM_RESET = 0x5,
+ SCMI_IMX_LMM_SHUTDOWN = 0x6,
+ SCMI_IMX_LMM_WAKE = 0x7,
+ SCMI_IMX_LMM_SUSPEND = 0x8,
+ SCMI_IMX_LMM_NOTIFY = 0x9,
+ SCMI_IMX_LMM_RESET_REASON = 0xA,
+ SCMI_IMX_LMM_POWER_ON = 0xB,
+ SCMI_IMX_LMM_RESET_VECTOR_SET = 0xC,
+};
+
+struct scmi_imx_lmm_priv {
+ u32 nr_lmm;
+};
+
+#define SCMI_IMX_LMM_NR_LM_MASK GENMASK(5, 0)
+#define SCMI_IMX_LMM_NR_MAX 16
+struct scmi_msg_imx_lmm_protocol_attributes {
+ __le32 attributes;
+};
+
+struct scmi_msg_imx_lmm_attributes_out {
+ __le32 lmid;
+ __le32 attributes;
+ __le32 state;
+ __le32 errstatus;
+ u8 name[LMM_MAX_NAME];
+};
+
+struct scmi_imx_lmm_reset_vector_set_in {
+ __le32 lmid;
+ __le32 cpuid;
+ __le32 flags; /* reserved for future extension */
+ __le32 resetvectorlow;
+ __le32 resetvectorhigh;
+};
+
+struct scmi_imx_lmm_shutdown_in {
+ __le32 lmid;
+#define SCMI_IMX_LMM_SHUTDOWN_GRACEFUL BIT(0)
+ __le32 flags;
+};
+
+static int scmi_imx_lmm_validate_lmid(const struct scmi_protocol_handle *ph, u32 lmid)
+{
+ struct scmi_imx_lmm_priv *priv = ph->get_priv(ph);
+
+ if (lmid >= priv->nr_lmm)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int scmi_imx_lmm_attributes(const struct scmi_protocol_handle *ph,
+ u32 lmid, struct scmi_imx_lmm_info *info)
+{
+ struct scmi_msg_imx_lmm_attributes_out *out;
+ struct scmi_xfer *t;
+ int ret;
+
+ ret = ph->xops->xfer_get_init(ph, SCMI_IMX_LMM_ATTRIBUTES, sizeof(u32), 0, &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(lmid, t->tx.buf);
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ out = t->rx.buf;
+ info->lmid = le32_to_cpu(out->lmid);
+ info->state = le32_to_cpu(out->state);
+ info->errstatus = le32_to_cpu(out->errstatus);
+ strscpy(info->name, out->name);
+ dev_dbg(ph->dev, "i.MX LMM: Logical Machine(%d), name: %s\n",
+ info->lmid, info->name);
+ } else {
+ dev_err(ph->dev, "i.MX LMM: Failed to get info of Logical Machine(%u)\n", lmid);
+ }
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int
+scmi_imx_lmm_power_boot(const struct scmi_protocol_handle *ph, u32 lmid, bool boot)
+{
+ struct scmi_xfer *t;
+ u8 msg_id;
+ int ret;
+
+ ret = scmi_imx_lmm_validate_lmid(ph, lmid);
+ if (ret)
+ return ret;
+
+ if (boot)
+ msg_id = SCMI_IMX_LMM_BOOT;
+ else
+ msg_id = SCMI_IMX_LMM_POWER_ON;
+
+ ret = ph->xops->xfer_get_init(ph, msg_id, sizeof(u32), 0, &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(lmid, t->tx.buf);
+ ret = ph->xops->do_xfer(ph, t);
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int scmi_imx_lmm_reset_vector_set(const struct scmi_protocol_handle *ph,
+ u32 lmid, u32 cpuid, u32 flags, u64 vector)
+{
+ struct scmi_imx_lmm_reset_vector_set_in *in;
+ struct scmi_xfer *t;
+ int ret;
+
+ ret = ph->xops->xfer_get_init(ph, SCMI_IMX_LMM_RESET_VECTOR_SET, sizeof(*in),
+ 0, &t);
+ if (ret)
+ return ret;
+
+ in = t->tx.buf;
+ in->lmid = cpu_to_le32(lmid);
+ in->cpuid = cpu_to_le32(cpuid);
+ in->flags = cpu_to_le32(0);
+ in->resetvectorlow = cpu_to_le32(lower_32_bits(vector));
+ in->resetvectorhigh = cpu_to_le32(upper_32_bits(vector));
+ ret = ph->xops->do_xfer(ph, t);
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int scmi_imx_lmm_shutdown(const struct scmi_protocol_handle *ph, u32 lmid,
+ u32 flags)
+{
+ struct scmi_imx_lmm_shutdown_in *in;
+ struct scmi_xfer *t;
+ int ret;
+
+ ret = scmi_imx_lmm_validate_lmid(ph, lmid);
+ if (ret)
+ return ret;
+
+ ret = ph->xops->xfer_get_init(ph, SCMI_IMX_LMM_SHUTDOWN, sizeof(*in),
+ 0, &t);
+ if (ret)
+ return ret;
+
+ in = t->tx.buf;
+ in->lmid = cpu_to_le32(lmid);
+ if (flags & SCMI_IMX_LMM_SHUTDOWN_GRACEFUL)
+ in->flags = cpu_to_le32(SCMI_IMX_LMM_SHUTDOWN_GRACEFUL);
+ else
+ in->flags = cpu_to_le32(0);
+ ret = ph->xops->do_xfer(ph, t);
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static const struct scmi_imx_lmm_proto_ops scmi_imx_lmm_proto_ops = {
+ .lmm_power_boot = scmi_imx_lmm_power_boot,
+ .lmm_info = scmi_imx_lmm_attributes,
+ .lmm_reset_vector_set = scmi_imx_lmm_reset_vector_set,
+ .lmm_shutdown = scmi_imx_lmm_shutdown,
+};
+
+static int scmi_imx_lmm_protocol_attributes_get(const struct scmi_protocol_handle *ph,
+ struct scmi_imx_lmm_priv *priv)
+{
+ struct scmi_msg_imx_lmm_protocol_attributes *attr;
+ struct scmi_xfer *t;
+ int ret;
+
+ ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, 0,
+ sizeof(*attr), &t);
+ if (ret)
+ return ret;
+
+ attr = t->rx.buf;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ priv->nr_lmm = le32_get_bits(attr->attributes, SCMI_IMX_LMM_NR_LM_MASK);
+ if (priv->nr_lmm > SCMI_IMX_LMM_NR_MAX) {
+ dev_err(ph->dev, "i.MX LMM: %d:Exceed max supported Logical Machines\n",
+ priv->nr_lmm);
+ ret = -EINVAL;
+ } else {
+ dev_info(ph->dev, "i.MX LMM: %d Logical Machines\n", priv->nr_lmm);
+ }
+ }
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int scmi_imx_lmm_protocol_init(const struct scmi_protocol_handle *ph)
+{
+ struct scmi_imx_lmm_priv *info;
+ u32 version;
+ int ret;
+
+ ret = ph->xops->version_get(ph, &version);
+ if (ret)
+ return ret;
+
+ dev_info(ph->dev, "NXP SM LMM Version %d.%d\n",
+ PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
+
+ info = devm_kzalloc(ph->dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ ret = scmi_imx_lmm_protocol_attributes_get(ph, info);
+ if (ret)
+ return ret;
+
+ return ph->set_priv(ph, info, version);
+}
+
+static const struct scmi_protocol scmi_imx_lmm = {
+ .id = SCMI_PROTOCOL_IMX_LMM,
+ .owner = THIS_MODULE,
+ .instance_init = &scmi_imx_lmm_protocol_init,
+ .ops = &scmi_imx_lmm_proto_ops,
+ .supported_version = SCMI_PROTOCOL_SUPPORTED_VERSION,
+ .vendor_id = SCMI_IMX_VENDOR,
+ .sub_vendor_id = SCMI_IMX_SUBVENDOR,
+};
+module_scmi_protocol(scmi_imx_lmm);
+
+MODULE_ALIAS("scmi-protocol-" __stringify(SCMI_PROTOCOL_IMX_LMM) "-" SCMI_IMX_VENDOR);
+MODULE_DESCRIPTION("i.MX SCMI LMM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c
index a86ab9b35953..700a3f24f4ef 100644
--- a/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c
+++ b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c
@@ -25,7 +25,10 @@
enum scmi_imx_misc_protocol_cmd {
SCMI_IMX_MISC_CTRL_SET = 0x3,
SCMI_IMX_MISC_CTRL_GET = 0x4,
+ SCMI_IMX_MISC_DISCOVER_BUILD_INFO = 0x6,
SCMI_IMX_MISC_CTRL_NOTIFY = 0x8,
+ SCMI_IMX_MISC_CFG_INFO_GET = 0xC,
+ SCMI_IMX_MISC_BOARD_INFO = 0xE,
};
struct scmi_imx_misc_info {
@@ -65,6 +68,27 @@ struct scmi_imx_misc_ctrl_get_out {
__le32 val[];
};
+struct scmi_imx_misc_buildinfo_out {
+ __le32 buildnum;
+ __le32 buildcommit;
+#define MISC_MAX_BUILDDATE 16
+ u8 builddate[MISC_MAX_BUILDDATE];
+#define MISC_MAX_BUILDTIME 16
+ u8 buildtime[MISC_MAX_BUILDTIME];
+};
+
+struct scmi_imx_misc_board_info_out {
+ __le32 attributes;
+#define MISC_MAX_BRDNAME 16
+ u8 brdname[MISC_MAX_BRDNAME];
+};
+
+struct scmi_imx_misc_cfg_info_out {
+ __le32 msel;
+#define MISC_MAX_CFGNAME 16
+ u8 cfgname[MISC_MAX_CFGNAME];
+};
+
static int scmi_imx_misc_attributes_get(const struct scmi_protocol_handle *ph,
struct scmi_imx_misc_info *mi)
{
@@ -254,8 +278,8 @@ static int scmi_imx_misc_ctrl_set(const struct scmi_protocol_handle *ph,
if (num > max_num)
return -EINVAL;
- ret = ph->xops->xfer_get_init(ph, SCMI_IMX_MISC_CTRL_SET, sizeof(*in),
- 0, &t);
+ ret = ph->xops->xfer_get_init(ph, SCMI_IMX_MISC_CTRL_SET,
+ sizeof(*in) + num * sizeof(__le32), 0, &t);
if (ret)
return ret;
@@ -272,6 +296,81 @@ static int scmi_imx_misc_ctrl_set(const struct scmi_protocol_handle *ph,
return ret;
}
+static int scmi_imx_misc_build_info_discover(const struct scmi_protocol_handle *ph)
+{
+ char date[MISC_MAX_BUILDDATE], time[MISC_MAX_BUILDTIME];
+ struct scmi_imx_misc_buildinfo_out *out;
+ struct scmi_xfer *t;
+ int ret;
+
+ ret = ph->xops->xfer_get_init(ph, SCMI_IMX_MISC_DISCOVER_BUILD_INFO, 0,
+ sizeof(*out), &t);
+ if (ret)
+ return ret;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ out = t->rx.buf;
+ strscpy(date, out->builddate, MISC_MAX_BUILDDATE);
+ strscpy(time, out->buildtime, MISC_MAX_BUILDTIME);
+ dev_info(ph->dev, "SM Version\t= Build %u, Commit %08x %s %s\n",
+ le32_to_cpu(out->buildnum), le32_to_cpu(out->buildcommit),
+ date, time);
+ }
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int scmi_imx_misc_board_info(const struct scmi_protocol_handle *ph)
+{
+ struct scmi_imx_misc_board_info_out *out;
+ char name[MISC_MAX_BRDNAME];
+ struct scmi_xfer *t;
+ int ret;
+
+ ret = ph->xops->xfer_get_init(ph, SCMI_IMX_MISC_BOARD_INFO, 0, sizeof(*out), &t);
+ if (ret)
+ return ret;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ out = t->rx.buf;
+ strscpy(name, out->brdname, MISC_MAX_BRDNAME);
+ dev_info(ph->dev, "Board\t\t= %s, attr=0x%08x\n",
+ name, le32_to_cpu(out->attributes));
+ }
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int scmi_imx_misc_cfg_info_get(const struct scmi_protocol_handle *ph)
+{
+ struct scmi_imx_misc_cfg_info_out *out;
+ char name[MISC_MAX_CFGNAME];
+ struct scmi_xfer *t;
+ int ret;
+
+ ret = ph->xops->xfer_get_init(ph, SCMI_IMX_MISC_CFG_INFO_GET, 0, sizeof(*out), &t);
+ if (ret)
+ return ret;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ out = t->rx.buf;
+ strscpy(name, out->cfgname, MISC_MAX_CFGNAME);
+ dev_info(ph->dev, "SM Config\t= %s, mSel = %u\n",
+ name, le32_to_cpu(out->msel));
+ }
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
static const struct scmi_imx_misc_proto_ops scmi_imx_misc_proto_ops = {
.misc_ctrl_set = scmi_imx_misc_ctrl_set,
.misc_ctrl_get = scmi_imx_misc_ctrl_get,
@@ -299,6 +398,18 @@ static int scmi_imx_misc_protocol_init(const struct scmi_protocol_handle *ph)
if (ret)
return ret;
+ ret = scmi_imx_misc_build_info_discover(ph);
+ if (ret && ret != -EOPNOTSUPP)
+ return ret;
+
+ ret = scmi_imx_misc_board_info(ph);
+ if (ret && ret != -EOPNOTSUPP)
+ return ret;
+
+ ret = scmi_imx_misc_cfg_info_get(ph);
+ if (ret && ret != -EOPNOTSUPP)
+ return ret;
+
return ph->set_priv(ph, minfo, version);
}
@@ -309,10 +420,11 @@ static const struct scmi_protocol scmi_imx_misc = {
.ops = &scmi_imx_misc_proto_ops,
.events = &scmi_imx_misc_protocol_events,
.supported_version = SCMI_PROTOCOL_SUPPORTED_VERSION,
- .vendor_id = "NXP",
- .sub_vendor_id = "IMX",
+ .vendor_id = SCMI_IMX_VENDOR,
+ .sub_vendor_id = SCMI_IMX_SUBVENDOR,
};
module_scmi_protocol(scmi_imx_misc);
+MODULE_ALIAS("scmi-protocol-" __stringify(SCMI_PROTOCOL_IMX_MISC) "-" SCMI_IMX_VENDOR);
MODULE_DESCRIPTION("i.MX SCMI MISC driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/arm_scmi/vendors/imx/imx95.rst b/drivers/firmware/arm_scmi/vendors/imx/imx95.rst
index b2dfd6c46ca2..741f4eace350 100644
--- a/drivers/firmware/arm_scmi/vendors/imx/imx95.rst
+++ b/drivers/firmware/arm_scmi/vendors/imx/imx95.rst
@@ -32,6 +32,518 @@ port, and deploy the SM on supported processors.
The SM implements an interface compliant with the Arm SCMI Specification
with additional vendor specific extensions.
+System Control and Management Logical Machine Management Vendor Protocol
+========================================================================
+
+The SM adds the concept of logical machines (LMs). These are analogous to
+VMs and each has its own instance of SCMI. All normal SCMI calls only apply
+the LM running the calling agent. That includes boot, shutdown, reset,
+suspend, wake, etc. If a caller makes the SCMI base call to get a list
+of agents, it will only get those on that LM. Each LM is completely isolated
+from the others. This is mandatory for these to operate independently.
+
+This protocol is intended to support boot, shutdown, and reset of other logical
+machines (LM). It is usually used to allow one LM(e.g. OSPM) to manage
+another LM which is usually an offload or accelerator engine. Notifications
+from this protocol can also be used to manage a communication link to another
+LM. The LMM protocol provides commands to:
+
+- Describe the protocol version.
+- Discover implementation attributes.
+- Discover all the LMs defined in the system.
+- Boot a target LM.
+- Shutdown a target LM (gracefully or forcibly).
+- Reset a target LM (gracefully or forcibly).
+- Wake a target LM from suspend.
+- Suspend a target LM (gracefully).
+- Read boot/shutdown/reset information for a target LM.
+- Get notifications when a target LM boots or shuts down (e.g. LM 'X' requested
+ notification of LM 'Y' boots or shuts down, when LM 'Y' boots or shuts down,
+ SCMI firmware will send notification to LM 'X').
+
+'Graceful' means asking LM itself to shutdown/reset/etc (e.g. sending
+notification to Linux, Then Linux reboots or powers down itself). It is async
+command that the SUCCESS of the command just means the command successfully
+return, not means reboot/reset successfully finished.
+
+'Forceful' means the SM will force shutdown/reset/etc the LM. It is sync
+command that the SUCCESS of the command means the LM has been successfully
+shutdown/reset/etc.
+If the commands not have Graceful/Forceful flag settings, such as WAKE, SUSEND,
+it is a Graceful command.
+
+Commands:
+_________
+
+PROTOCOL_VERSION
+~~~~~~~~~~~~~~~~
+
+message_id: 0x0
+protocol_id: 0x80
+This command is mandatory.
+
++---------------+--------------------------------------------------------------+
+|Return values |
++---------------+--------------------------------------------------------------+
+|Name |Description |
++---------------+--------------------------------------------------------------+
+|int32 status | See ARM SCMI Specification for status code definitions. |
++---------------+--------------------------------------------------------------+
+|uint32 version | For this revision of the specification, this value must be |
+| | 0x10000. |
++---------------+--------------------------------------------------------------+
+
+PROTOCOL_ATTRIBUTES
+~~~~~~~~~~~~~~~~~~~
+
+message_id: 0x1
+protocol_id: 0x80
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status | See ARM SCMI Specification for status code definitions. |
++------------------+-----------------------------------------------------------+
+|uint32 attributes |Protocol attributes: |
+| |Bits[31:5] Reserved, must be zero. |
+| |Bits[4:0] Number of Logical Machines |
+| |Note that due to both hardware limitations and reset reason|
+| |field limitations, the max number of LM is 16. The minimum |
+| |is 1. |
++------------------+-----------------------------------------------------------+
+
+PROTOCOL_MESSAGE_ATTRIBUTES
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+message_id: 0x2
+protocol_id: 0x80
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: in case the message is implemented and available |
+| |to use. |
+| |NOT_FOUND: if the message identified by message_id is |
+| |invalid or not implemented |
++------------------+-----------------------------------------------------------+
+|uint32 attributes |Flags that are associated with a specific command in the |
+| |protocol. For all commands in this protocol, this |
+| |parameter has a value of 0 |
++------------------+-----------------------------------------------------------+
+
+LMM_ATTRIBUTES
+~~~~~~~~~~~~~~
+
+message_id: 0x3
+protocol_id: 0x80
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 lmid |ID of the Logical Machine |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: if valid attributes are returned. |
+| |NOT_FOUND: if lmid not points to a valid logical machine. |
+| |DENIED: if the agent does not have permission to get info |
+| |for the LM specified by lmid. |
++------------------+-----------------------------------------------------------+
+|uint32 lmid |Identifier of the LM whose identification is requested. |
+| |This field is: Populated with the lmid of the calling |
+| |agent, when the lmid parameter passed via the command is |
+| |0xFFFFFFFF. Identical to the lmid field passed via the |
+| |calling parameters, in all other cases |
++------------------+-----------------------------------------------------------+
+|uint32 attributes | Bits[31:0] reserved. must be zero |
++------------------+-----------------------------------------------------------+
+|uint32 state | Current state of the LM |
++------------------+-----------------------------------------------------------+
+|uint32 errStatus | Last error status recorded |
++------------------+-----------------------------------------------------------+
+|char name[16] | A NULL terminated ASCII string with the LM name, of up |
+| | to 16 bytes |
++------------------+-----------------------------------------------------------+
+
+LMM_BOOT
+~~~~~~~~
+
+message_id: 0x4
+protocol_id: 0x80
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 lmid |ID of the Logical Machine |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: if LM boots successfully started. |
+| |NOT_FOUND: if lmid not points to a valid logical machine. |
+| |INVALID_PARAMETERS: if lmid is same as the caller. |
+| |DENIED: if the agent does not have permission to manage the|
+| |the LM specified by lmid. |
++------------------+-----------------------------------------------------------+
+
+LMM_RESET
+~~~~~~~~~
+
+message_id: 0x5
+protocol_id: 0x80
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 lmid |ID of the Logical Machine |
++------------------+-----------------------------------------------------------+
+|uint32 flags |Reset flags: |
+| |Bits[31:1] Reserved, must be zero. |
+| |Bit[0] Graceful request: |
+| |Set to 1 if the request is a graceful request. |
+| |Set to 0 if the request is a forceful request. |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: The LMM RESET command finished successfully in |
+| |graceful reset or LM successfully resets in forceful reset.|
+| |NOT_FOUND: if lmid not points to a valid logical machine. |
+| |INVALID_PARAMETERS: if lmid is same as the caller. |
+| |DENIED: if the agent does not have permission to manage the|
+| |the LM specified by lmid. |
++------------------+-----------------------------------------------------------+
+
+LMM_SHUTDOWN
+~~~~~~~~~~~~
+
+message_id: 0x6
+protocol_id: 0x80
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 lmid |ID of the Logical Machine |
++------------------+-----------------------------------------------------------+
+|uint32 flags |Reset flags: |
+| |Bits[31:1] Reserved, must be zero. |
+| |Bit[0] Graceful request: |
+| |Set to 1 if the request is a graceful request. |
+| |Set to 0 if the request is a forceful request. |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: The LMM shutdown command finished successfully in |
+| |graceful request or LM successfully shutdown in forceful |
+| |request. |
+| |NOT_FOUND: if lmid not points to a valid logical machine. |
+| |INVALID_PARAMETERS: if lmid is same as the caller. |
+| |DENIED: if the agent does not have permission to manage the|
+| |the LM specified by lmid. |
++------------------+-----------------------------------------------------------+
+
+LMM_WAKE
+~~~~~~~~
+
+message_id: 0x7
+protocol_id: 0x80
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 lmid |ID of the Logical Machine |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: if LM wake command successfully returns. |
+| |NOT_FOUND: if lmid not points to a valid logical machine. |
+| |INVALID_PARAMETERS: if lmid is same as the caller. |
+| |DENIED: if the agent does not have permission to manage the|
+| |the LM specified by lmid. |
++------------------+-----------------------------------------------------------+
+
+LMM_SUSPEND
+~~~~~~~~~~~
+
+message_id: 0x8
+protocol_id: 0x80
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 lmid |ID of the Logical Machine |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: if LM suspend command successfully returns. |
+| |NOT_FOUND: if lmid not points to a valid logical machine. |
+| |INVALID_PARAMETERS: if lmid is same as the caller. |
+| |DENIED: if the agent does not have permission to manage the|
+| |the LM specified by lmid. |
++------------------+-----------------------------------------------------------+
+
+LMM_NOTIFY
+~~~~~~~~~~
+
+message_id: 0x9
+protocol_id: 0x80
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 lmid |ID of the Logical Machine |
++------------------+-----------------------------------------------------------+
+|uint32 flags |Notification flags: |
+| |Bits[31:3] Reserved, must be zero. |
+| |Bit[3] Wake (resume) notification: |
+| |Set to 1 to send notification. |
+| |Set to 0 if no notification. |
+| |Bit[2] Suspend (sleep) notification: |
+| |Set to 1 to send notification. |
+| |Set to 0 if no notification. |
+| |Bit[1] Shutdown (off) notification: |
+| |Set to 1 to send notification. |
+| |Set to 0 if no notification. |
+| |Bit[0] Boot (on) notification: |
+| |Set to 1 to send notification. |
+| |Set to 0 if no notification |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: if the notification state successfully updated. |
+| |NOT_FOUND: if lmid not points to a valid logical machine. |
+| |INVALID_PARAMETERS: if input attributes flag specifies |
+| |unsupported or invalid configurations. |
+| |DENIED: if the agent does not have permission to request |
+| |the notification. |
++------------------+-----------------------------------------------------------+
+
+LMM_RESET_REASON
+~~~~~~~~~~~~~~~~
+
+message_id: 0xA
+protocol_id: 0x80
+This command is mandatory.
+
+This command is to return the reset reason that caused the last reset, such as
+POR, WDOG, JTAG and etc.
+
++---------------------+--------------------------------------------------------+
+|Parameters |
++---------------------+--------------------------------------------------------+
+|Name |Description |
++---------------------+--------------------------------------------------------+
+|uint32 lmid |ID of the Logical Machine |
++---------------------+--------------------------------------------------------+
+|Return values |
++---------------------+--------------------------------------------------------+
+|Name |Description |
++---------------------+--------------------------------------------------------+
+|int32 status |SUCCESS: if the reset reason of the LM successfully |
+| |updated. |
+| |NOT_FOUND: if lmid not points to a valid logical machine|
+| |DENIED: if the agent does not have permission to request|
+| |the reset reason. |
++---------------------+--------------------------------------------------------+
+|uint32 bootflags |Boot reason flags. This parameter has the format: |
+| |Bits[31] Valid. |
+| |Set to 1 if the entire reason is valid. |
+| |Set to 0 if the entire reason is not valid. |
+| |Bits[30:29] Reserved, must be zero. |
+| |Bit[28] Valid origin: |
+| |Set to 1 if the origin field is valid. |
+| |Set to 0 if the origin field is not valid. |
+| |Bits[27:24] Origin. |
+| |Logical Machine(LM) ID that causes the BOOT of this LM |
+| |Bit[23] Valid err ID: |
+| |Set to 1 if the error ID field is valid. |
+| |Set to 0 if the error ID field is not valid. |
+| |Bits[22:8] Error ID(Agent ID of the system). |
+| |Bit[7:0] Reason(WDOG, POR, FCCU and etc): |
+| |See the SRESR register description in the System |
+| |Reset Controller (SRC) section in SoC reference mannual |
+| |One reason maps to BIT(reason) in SRESR |
++---------------------+--------------------------------------------------------+
+|uint32 shutdownflags |Shutdown reason flags. This parameter has the format: |
+| |Bits[31] Valid. |
+| |Set to 1 if the entire reason is valid. |
+| |Set to 0 if the entire reason is not valid. |
+| |Bits[30:29] Number of valid extended info words. |
+| |Bit[28] Valid origin: |
+| |Set to 1 if the origin field is valid. |
+| |Set to 0 if the origin field is not valid. |
+| |Bits[27:24] Origin. |
+| |Logical Machine(LM) ID that causes the BOOT of this LM |
+| |Bit[23] Valid err ID: |
+| |Set to 1 if the error ID field is valid. |
+| |Set to 0 if the error ID field is not valid. |
+| |Bits[22:8] Error ID(Agent ID of the System). |
+| |Bit[7:0] Reason |
+| |See the SRESR register description in the System |
+| |Reset Controller (SRC) section in SoC reference mannual |
+| |One reason maps to BIT(reason) in SRESR |
++---------------------+--------------------------------------------------------+
+|uint32 extinfo[3] |Array of extended info words(e.g. fault pc) |
++---------------------+--------------------------------------------------------+
+
+LMM_POWER_ON
+~~~~~~~~~~~~
+
+message_id: 0xB
+protocol_id: 0x80
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 lmid |ID of the Logical Machine |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: if LM successfully powers on. |
+| |NOT_FOUND: if lmid not points to a valid logical machine. |
+| |INVALID_PARAMETERS: if lmid is same as the caller. |
+| |DENIED: if the agent does not have permission to manage the|
+| |the LM specified by lmid. |
++------------------+-----------------------------------------------------------+
+
+LMM_RESET_VECTOR_SET
+~~~~~~~~~~~~~~~~~~~~
+
+message_id: 0xC
+protocol_id: 0x80
+This command is mandatory.
+
++-----------------------+------------------------------------------------------+
+|Parameters |
++-----------------------+------------------------------------------------------+
+|Name |Description |
++-----------------------+------------------------------------------------------+
+|uint32 lmid |ID of the Logical Machine |
++-----------------------+------------------------------------------------------+
+|uint32 cpuid |ID of the CPU inside the LM |
++-----------------------+------------------------------------------------------+
+|uint32 flags |Reset vector flags |
+| |Bits[31:0] Reserved, must be zero. |
++-----------------------+------------------------------------------------------+
+|uint32 resetVectorLow |Lower vector |
++-----------------------+------------------------------------------------------+
+|uint32 resetVectorHigh |Higher vector |
++-----------------------+------------------------------------------------------+
+|Return values |
++-----------------------+------------------------------------------------------+
+|Name |Description |
++-----------------------+------------------------------------------------------+
+|int32 status |SUCCESS: If reset vector is set successfully. |
+| |NOT_FOUND: if lmid not points to a valid logical |
+| |machine, or cpuId is not valid. |
+| |INVALID_PARAMETERS: if reset vector is invalid. |
+| |DENIED: if the agent does not have permission to set |
+| |the reset vector for the CPU in the LM. |
++-----------------------+------------------------------------------------------+
+
+NEGOTIATE_PROTOCOL_VERSION
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+message_id: 0x10
+protocol_id: 0x80
+This command is mandatory.
+
++--------------------+---------------------------------------------------------+
+|Parameters |
++--------------------+---------------------------------------------------------+
+|Name |Description |
++--------------------+---------------------------------------------------------+
+|uint32 version |The negotiated protocol version the agent intends to use |
++--------------------+---------------------------------------------------------+
+|Return values |
++--------------------+---------------------------------------------------------+
+|Name |Description |
++--------------------+---------------------------------------------------------+
+|int32 status |SUCCESS: if the negotiated protocol version is supported |
+| |by the platform. All commands, responses, and |
+| |notifications post successful return of this command must|
+| |comply with the negotiated version. |
+| |NOT_SUPPORTED: if the protocol version is not supported. |
++--------------------+---------------------------------------------------------+
+
+Notifications
+_____________
+
+LMM_EVENT
+~~~~~~~~~
+
+message_id: 0x0
+protocol_id: 0x80
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 lmid |Identifier for the LM that caused the transition. |
++------------------+-----------------------------------------------------------+
+|uint32 eventlm |Identifier of the LM this event refers to. |
++------------------+-----------------------------------------------------------+
+|uint32 flags |LM events: |
+| |Bits[31:3] Reserved, must be zero. |
+| |Bit[3] Wake (resume) event: |
+| |1 LM has awakened. |
+| |0 not a wake event. |
+| |Bit[2] Suspend (sleep) event: |
+| |1 LM has suspended. |
+| |0 not a suspend event. |
+| |Bit[1] Shutdown (off) event: |
+| |1 LM has shutdown. |
+| |0 not a shutdown event. |
+| |Bit[0] Boot (on) event: |
+| |1 LM has booted. |
+| |0 not a boot event. |
++------------------+-----------------------------------------------------------+
+
SCMI_BBM: System Control and Management BBM Vendor Protocol
==============================================================
@@ -436,6 +948,322 @@ protocol_id: 0x81
| |0 no button change detected. |
+------------------+-----------------------------------------------------------+
+System Control and Management CPU Vendor Protocol
+=================================================
+
+This protocol allows an agent to start or stop a CPU. It is used to manage
+auxiliary CPUs in a target LM (e.g. additional cores in an AP cluster or
+Cortex-M cores).
+Note:
+ - For cores in AP cluster, PSCI should be used and PSCI firmware will use CPU
+ protocol to handle them. For cores in non-AP cluster, Operating System(e.g.
+ Linux OS) could use CPU protocols to control Cortex-M7 cores.
+ - CPU indicates the core and its auxiliary peripherals(e.g. TCM) inside
+ i.MX SoC
+
+There are cases where giving an agent full control of a CPU via the CPU
+protocol is not desired. The LMM protocol is more restricted to just boot,
+shutdown, etc. So an agent might boot another logical machine but not be
+able to directly mess the state of its CPUs. Its also the reason there is an
+LMM power on command even though that could have been done through the
+power protocol.
+
+The CPU protocol provides commands to:
+
+- Describe the protocol version.
+- Discover implementation attributes.
+- Discover the CPUs defined in the system.
+- Start a CPU.
+- Stop a CPU.
+- Set the boot and resume addresses for a CPU.
+- Set the sleep mode of a CPU.
+- Configure wake-up sources for a CPU.
+- Configure power domain reactions (LPM mode and retention mask) for a CPU.
+- The CPU IDs can be found in the CPU section of the SoC DEVICE: SM Device
+ Interface. They can also be found in the SoC RM. See the CPU Mode Control
+ (CMC) list in General Power Controller (GPC) section.
+
+CPU settings are not aggregated and setting their state is normally exclusive
+to one client.
+
+Commands:
+_________
+
+PROTOCOL_VERSION
+~~~~~~~~~~~~~~~~
+
+message_id: 0x0
+protocol_id: 0x82
+This command is mandatory.
+
++---------------+--------------------------------------------------------------+
+|Return values |
++---------------+--------------------------------------------------------------+
+|Name |Description |
++---------------+--------------------------------------------------------------+
+|int32 status | See ARM SCMI Specification for status code definitions. |
++---------------+--------------------------------------------------------------+
+|uint32 version | For this revision of the specification, this value must be |
+| | 0x10000. |
++---------------+--------------------------------------------------------------+
+
+PROTOCOL_ATTRIBUTES
+~~~~~~~~~~~~~~~~~~~
+
+message_id: 0x1
+protocol_id: 0x82
+This command is mandatory.
+
++---------------+--------------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status | See ARM SCMI Specification for status code definitions. |
++------------------+-----------------------------------------------------------+
+|uint32 attributes |Protocol attributes: |
+| |Bits[31:16] Reserved, must be zero. |
+| |Bits[15:0] Number of CPUs |
++------------------+-----------------------------------------------------------+
+
+PROTOCOL_MESSAGE_ATTRIBUTES
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+message_id: 0x2
+protocol_id: 0x82
+This command is mandatory.
+
++---------------+--------------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: in case the message is implemented and available |
+| |to use. |
+| |NOT_FOUND: if the message identified by message_id is |
+| |invalid or not implemented |
++------------------+-----------------------------------------------------------+
+|uint32 attributes |Flags that are associated with a specific command in the |
+| |protocol. For all commands in this protocol, this |
+| |parameter has a value of 0 |
++------------------+-----------------------------------------------------------+
+
+CPU_ATTRIBUTES
+~~~~~~~~~~~~~~
+
+message_id: 0x4
+protocol_id: 0x82
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 cpuid |Identifier for the CPU |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: if valid attributes are returned successfully. |
+| |NOT_FOUND: if the cpuid is not valid. |
++------------------+-----------------------------------------------------------+
+|uint32 attributes |Bits[31:0] Reserved, must be zero |
++------------------+-----------------------------------------------------------+
+|char name[16] |NULL terminated ASCII string with CPU name up to 16 bytes |
++------------------+-----------------------------------------------------------+
+
+CPU_START
+~~~~~~~~~
+
+message_id: 0x4
+protocol_id: 0x82
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 cpuid |Identifier for the CPU |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: if the cpu is started successfully. |
+| |NOT_FOUND: if cpuid is not valid. |
+| |DENIED: the calling agent is not allowed to start this CPU.|
++------------------+-----------------------------------------------------------+
+
+CPU_STOP
+~~~~~~~~
+
+message_id: 0x5
+protocol_id: 0x82
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 cpuid |Identifier for the CPU |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: if the cpu is started successfully. |
+| |NOT_FOUND: if cpuid is not valid. |
+| |DENIED: the calling agent is not allowed to stop this CPU. |
++------------------+-----------------------------------------------------------+
+
+CPU_RESET_VECTOR_SET
+~~~~~~~~~~~~~~~~~~~~
+
+message_id: 0x6
+protocol_id: 0x82
+This command is mandatory.
+
++----------------------+-------------------------------------------------------+
+|Parameters |
++----------------------+-------------------------------------------------------+
+|Name |Description |
++----------------------+-------------------------------------------------------+
+|uint32 cpuid |Identifier for the CPU |
++----------------------+-------------------------------------------------------+
+|uint32 flags |Reset vector flags: |
+| |Bit[31] Resume flag. |
+| |Set to 1 to update the reset vector used on resume. |
+| |Bit[30] Boot flag. |
+| |Set to 1 to update the reset vector used for boot. |
+| |Bits[29:1] Reserved, must be zero. |
+| |Bit[0] Table flag. |
+| |Set to 1 if vector is the vector table base address. |
++----------------------+-------------------------------------------------------+
+|uint32 resetVectorLow |Lower vector: |
+| |If bit[0] of flags is 0, the lower 32 bits of the |
+| |physical address where the CPU should execute from on |
+| |reset. If bit[0] of flags is 1, the lower 32 bits of |
+| |the vector table base address |
++----------------------+-------------------------------------------------------+
+|uint32 resetVectorhigh|Upper vector: |
+| |If bit[0] of flags is 0, the upper 32 bits of the |
+| |physical address where the CPU should execute from on |
+| |reset. If bit[0] of flags is 1, the upper 32 bits of |
+| |the vector table base address |
++----------------------+-------------------------------------------------------+
+|Return values |
++----------------------+-------------------------------------------------------+
+|Name |Description |
++----------------------+-------------------------------------------------------+
+|int32 status |SUCCESS: if the CPU reset vector is set successfully. |
+| |NOT_FOUND: if cpuId does not point to a valid CPU. |
+| |INVALID_PARAMETERS: the requested vector type is not |
+| |supported by this CPU. |
+| |DENIED: the calling agent is not allowed to set the |
+| |reset vector of this CPU |
++----------------------+-------------------------------------------------------+
+
+CPU_SLEEP_MODE_SET
+~~~~~~~~~~~~~~~~~~
+
+message_id: 0x7
+protocol_id: 0x82
+This command is mandatory.
+
++----------------------+-------------------------------------------------------+
+|Parameters |
++----------------------+-------------------------------------------------------+
+|Name |Description |
++----------------------+-------------------------------------------------------+
+|uint32 cpuid |Identifier for the CPU |
++----------------------+-------------------------------------------------------+
+|uint32 flags |Sleep mode flags: |
+| |Bits[31:1] Reserved, must be zero. |
+| |Bit[0] IRQ mux: |
+| |If set to 1 the wakeup mux source is the GIC, else if 0|
+| |then the GPC |
++----------------------+-------------------------------------------------------+
+|uint32 sleepmode |target sleep mode. When CPU runs into WFI, the GPC mode|
+| |will be triggered to be in below modes: |
+| |RUN: (0) |
+| |WAIT: (1) |
+| |STOP: (2) |
+| |SUSPEND: (3) |
++----------------------+-------------------------------------------------------+
+|Return values |
++----------------------+-------------------------------------------------------+
+|Name |Description |
++----------------------+-------------------------------------------------------+
+|int32 status |SUCCESS: if the CPU sleep mode is set successfully. |
+| |NOT_FOUND: if cpuId does not point to a valid CPU. |
+| |INVALID_PARAMETERS: the sleepmode or flags is invalid. |
+| |DENIED: the calling agent is not allowed to configure |
+| |the CPU |
++----------------------+-------------------------------------------------------+
+
+CPU_INFO_GET
+~~~~~~~~~~~~
+
+message_id: 0xC
+protocol_id: 0x82
+This command is mandatory.
+
++----------------------+-------------------------------------------------------+
+|Parameters |
++----------------------+-------------------------------------------------------+
+|Name |Description |
++----------------------+-------------------------------------------------------+
+|uint32 cpuid |Identifier for the CPU |
++----------------------+-------------------------------------------------------+
+|Return values |
++----------------------+-------------------------------------------------------+
+|Name |Description |
++----------------------+-------------------------------------------------------+
+|int32 status |SUCCESS: if valid attributes are returned successfully.|
+| |NOT_FOUND: if the cpuid is not valid. |
++----------------------+-------------------------------------------------------+
+|uint32 runmode |Run mode for the CPU |
+| |RUN(0):cpu started |
+| |HOLD(1):cpu powered up and reset asserted |
+| |STOP(2):cpu reseted and hold cpu |
+| |SUSPEND(3):in cpuidle state |
++----------------------+-------------------------------------------------------+
+|uint32 sleepmode |Sleep mode for the CPU, see CPU_SLEEP_MODE_SET |
++----------------------+-------------------------------------------------------+
+|uint32 resetvectorlow |Reset vector low 32 bits for the CPU |
++----------------------+-------------------------------------------------------+
+|uint32 resetvecothigh |Reset vector high 32 bits for the CPU |
++----------------------+-------------------------------------------------------+
+
+NEGOTIATE_PROTOCOL_VERSION
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+message_id: 0x10
+protocol_id: 0x82
+This command is mandatory.
+
++--------------------+---------------------------------------------------------+
+|Parameters |
++--------------------+---------------------------------------------------------+
+|Name |Description |
++--------------------+---------------------------------------------------------+
+|uint32 version |The negotiated protocol version the agent intends to use |
++--------------------+---------------------------------------------------------+
+|Return values |
++--------------------+---------------------------------------------------------+
+|Name |Description |
++--------------------+---------------------------------------------------------+
+|int32 status |SUCCESS: if the negotiated protocol version is supported |
+| |by the platform. All commands, responses, and |
+| |notifications post successful return of this command must|
+| |comply with the negotiated version. |
+| |NOT_SUPPORTED: if the protocol version is not supported. |
++--------------------+---------------------------------------------------------+
+
SCMI_MISC: System Control and Management MISC Vendor Protocol
================================================================
@@ -832,6 +1660,7 @@ protocol_id: 0x84
|Name |Description |
+--------------------+---------------------------------------------------------+
|int32 status |SUCCESS: system log return |
+| |NOT_SUPPORTED: system log not available |
+--------------------+---------------------------------------------------------+
|uint32 numLogflags |Descriptor for the log data returned by this call. |
| |Bits[31:20] Number of remaining log words. |
@@ -842,6 +1671,30 @@ protocol_id: 0x84
|uint32 syslog[N] |Log data array, N is defined in bits[11:0] of numLogflags|
+--------------------+---------------------------------------------------------+
+MISC_BOARD_INFO
+~~~~~~~~~~~~~~~
+
+message_id: 0xE
+protocol_id: 0x84
+
++--------------------+---------------------------------------------------------+
+|Return values |
++--------------------+---------------------------------------------------------+
+|Name |Description |
++--------------------+---------------------------------------------------------+
+|int32 status |SUCCESS: config name return |
+| |NOT_SUPPORTED: name not available |
++--------------------+---------------------------------------------------------+
+|uint32 attributes |Board-specific attributes reserved for future expansion |
+| |without breaking backwards compatibility. The firmware |
+| |sets the value to 0 |
++--------------------+---------------------------------------------------------+
+|uint8 boardname[16] |Board name. NULL terminated ASCII string, up to 16 bytes |
+| |in length. This is System Manager(SM) firmware-exported |
+| |board-name and may not align with the board name in the |
+| |device tree. |
++--------------------+---------------------------------------------------------+
+
NEGOTIATE_PROTOCOL_VERSION
~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/drivers/firmware/arm_scmi/voltage.c b/drivers/firmware/arm_scmi/voltage.c
index fda6a1573609..17127880e10a 100644
--- a/drivers/firmware/arm_scmi/voltage.c
+++ b/drivers/firmware/arm_scmi/voltage.c
@@ -393,7 +393,7 @@ static int scmi_voltage_domains_num_get(const struct scmi_protocol_handle *ph)
return vinfo->num_domains;
}
-static struct scmi_voltage_proto_ops voltage_proto_ops = {
+static const struct scmi_voltage_proto_ops voltage_proto_ops = {
.num_domains_get = scmi_voltage_domains_num_get,
.info_get = scmi_voltage_info_get,
.config_set = scmi_voltage_config_set,
diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c
index 94a6b4e667de..87c323de17b9 100644
--- a/drivers/firmware/arm_scpi.c
+++ b/drivers/firmware/arm_scpi.c
@@ -630,6 +630,9 @@ static struct scpi_dvfs_info *scpi_dvfs_get_info(u8 domain)
if (ret)
return ERR_PTR(ret);
+ if (!buf.opp_count)
+ return ERR_PTR(-ENOENT);
+
info = kmalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return ERR_PTR(-ENOMEM);
@@ -1046,7 +1049,7 @@ static struct platform_driver scpi_driver = {
.dev_groups = versions_groups,
},
.probe = scpi_probe,
- .remove_new = scpi_remove,
+ .remove = scpi_remove,
};
module_platform_driver(scpi_driver);
diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c
index 285fe7ad490d..71e2a9a89f6a 100644
--- a/drivers/firmware/arm_sdei.c
+++ b/drivers/firmware/arm_sdei.c
@@ -763,7 +763,7 @@ static int sdei_device_freeze(struct device *dev)
int err;
/* unregister private events */
- cpuhp_remove_state(sdei_entry_point);
+ cpuhp_remove_state(sdei_hp_state);
err = sdei_unregister_shared();
if (err)
@@ -1062,13 +1062,12 @@ static bool __init sdei_present_acpi(void)
return true;
}
-void __init sdei_init(void)
+void __init acpi_sdei_init(void)
{
struct platform_device *pdev;
int ret;
- ret = platform_driver_register(&sdei_driver);
- if (ret || !sdei_present_acpi())
+ if (!sdei_present_acpi())
return;
pdev = platform_device_register_simple(sdei_driver.driver.name,
@@ -1081,6 +1080,12 @@ void __init sdei_init(void)
}
}
+static int __init sdei_init(void)
+{
+ return platform_driver_register(&sdei_driver);
+}
+arch_initcall(sdei_init);
+
int sdei_event_handler(struct pt_regs *regs,
struct sdei_registered_event *arg)
{
diff --git a/drivers/firmware/broadcom/bcm47xx_sprom.c b/drivers/firmware/broadcom/bcm47xx_sprom.c
index 14fbcd11657c..fdcd3a07abcd 100644
--- a/drivers/firmware/broadcom/bcm47xx_sprom.c
+++ b/drivers/firmware/broadcom/bcm47xx_sprom.c
@@ -404,7 +404,7 @@ static void bcm47xx_sprom_fill_auto(struct ssb_sprom *sprom,
ENTRY(0x00000700, u8, pre, "noiselvl5gua1", noiselvl5gua[1], 0, fb);
ENTRY(0x00000700, u8, pre, "noiselvl5gua2", noiselvl5gua[2], 0, fb);
}
-#undef ENTRY /* It's specififc, uses local variable, don't use it (again). */
+#undef ENTRY /* It's specific, uses local variable, don't use it (again). */
static void bcm47xx_fill_sprom_path_r4589(struct ssb_sprom *sprom,
const char *prefix, bool fallback)
diff --git a/drivers/firmware/cirrus/Kconfig b/drivers/firmware/cirrus/Kconfig
index 3ccbe14e4b0c..e3c2e38b746d 100644
--- a/drivers/firmware/cirrus/Kconfig
+++ b/drivers/firmware/cirrus/Kconfig
@@ -3,3 +3,18 @@
config FW_CS_DSP
tristate
default n
+
+config FW_CS_DSP_KUNIT_TEST_UTILS
+ tristate
+
+config FW_CS_DSP_KUNIT_TEST
+ tristate "KUnit tests for Cirrus Logic cs_dsp" if !KUNIT_ALL_TESTS
+ depends on KUNIT && REGMAP && FW_CS_DSP
+ default KUNIT_ALL_TESTS
+ select FW_CS_DSP_KUNIT_TEST_UTILS
+ help
+ This builds KUnit tests for cs_dsp.
+ For more information on KUnit and unit tests in general,
+ please refer to the KUnit documentation in
+ Documentation/dev-tools/kunit/.
+ If in doubt, say "N".
diff --git a/drivers/firmware/cirrus/Makefile b/drivers/firmware/cirrus/Makefile
index b91318ca0ff4..b32dfa869491 100644
--- a/drivers/firmware/cirrus/Makefile
+++ b/drivers/firmware/cirrus/Makefile
@@ -1,3 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
#
obj-$(CONFIG_FW_CS_DSP) += cs_dsp.o
+
+obj-y += test/
diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c
index 419220fa42fd..f51047d8ea64 100644
--- a/drivers/firmware/cirrus/cs_dsp.c
+++ b/drivers/firmware/cirrus/cs_dsp.c
@@ -311,6 +311,11 @@ static const struct cs_dsp_ops cs_dsp_adsp2_ops[];
static const struct cs_dsp_ops cs_dsp_halo_ops;
static const struct cs_dsp_ops cs_dsp_halo_ao_ops;
+struct cs_dsp_alg_region_list_item {
+ struct list_head list;
+ struct cs_dsp_alg_region alg_region;
+};
+
struct cs_dsp_buf {
struct list_head list;
void *buf;
@@ -378,7 +383,7 @@ const char *cs_dsp_mem_region_name(unsigned int type)
return NULL;
}
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_mem_region_name, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mem_region_name, "FW_CS_DSP");
#ifdef CONFIG_DEBUG_FS
static void cs_dsp_debugfs_save_wmfwname(struct cs_dsp *dsp, const char *s)
@@ -519,7 +524,7 @@ void cs_dsp_init_debugfs(struct cs_dsp *dsp, struct dentry *debugfs_root)
dsp->debugfs_root = root;
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_init_debugfs, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_init_debugfs, "FW_CS_DSP");
/**
* cs_dsp_cleanup_debugfs() - Removes DSP representation from debugfs
@@ -531,17 +536,17 @@ void cs_dsp_cleanup_debugfs(struct cs_dsp *dsp)
debugfs_remove_recursive(dsp->debugfs_root);
dsp->debugfs_root = ERR_PTR(-ENODEV);
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_cleanup_debugfs, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_cleanup_debugfs, "FW_CS_DSP");
#else
void cs_dsp_init_debugfs(struct cs_dsp *dsp, struct dentry *debugfs_root)
{
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_init_debugfs, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_init_debugfs, "FW_CS_DSP");
void cs_dsp_cleanup_debugfs(struct cs_dsp *dsp)
{
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_cleanup_debugfs, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_cleanup_debugfs, "FW_CS_DSP");
static inline void cs_dsp_debugfs_save_wmfwname(struct cs_dsp *dsp,
const char *s)
@@ -749,7 +754,7 @@ int cs_dsp_coeff_write_acked_control(struct cs_dsp_coeff_ctl *ctl, unsigned int
return -ETIMEDOUT;
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_coeff_write_acked_control, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_coeff_write_acked_control, "FW_CS_DSP");
static int cs_dsp_coeff_write_ctrl_raw(struct cs_dsp_coeff_ctl *ctl,
unsigned int off, const void *buf, size_t len)
@@ -827,7 +832,7 @@ int cs_dsp_coeff_write_ctrl(struct cs_dsp_coeff_ctl *ctl,
return 1;
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_coeff_write_ctrl, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_coeff_write_ctrl, "FW_CS_DSP");
/**
* cs_dsp_coeff_lock_and_write_ctrl() - Writes the given buffer to the given coefficient control
@@ -926,7 +931,7 @@ int cs_dsp_coeff_read_ctrl(struct cs_dsp_coeff_ctl *ctl,
return ret;
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_coeff_read_ctrl, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_coeff_read_ctrl, "FW_CS_DSP");
/**
* cs_dsp_coeff_lock_and_read_ctrl() - Reads the given coefficient control into the given buffer
@@ -1609,8 +1614,8 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
goto out_fw;
}
- ret = regmap_raw_write_async(regmap, reg, buf->buf,
- le32_to_cpu(region->len));
+ ret = regmap_raw_write(regmap, reg, buf->buf,
+ le32_to_cpu(region->len));
if (ret != 0) {
cs_dsp_err(dsp,
"%s.%d: Failed to write %d bytes at %d in %s: %d\n",
@@ -1625,20 +1630,14 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
regions++;
}
- ret = regmap_async_complete(regmap);
- if (ret != 0) {
- cs_dsp_err(dsp, "Failed to complete async write: %d\n", ret);
- goto out_fw;
- }
-
if (pos > firmware->size)
cs_dsp_warn(dsp, "%s.%d: %zu bytes at end of file\n",
file, regions, pos - firmware->size);
cs_dsp_debugfs_save_wmfwname(dsp, file);
+ ret = 0;
out_fw:
- regmap_async_complete(regmap);
cs_dsp_buf_free(&buf_list);
if (ret == -EOVERFLOW)
@@ -1679,7 +1678,7 @@ struct cs_dsp_coeff_ctl *cs_dsp_get_ctl(struct cs_dsp *dsp, const char *name, in
return rslt;
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_get_ctl, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_get_ctl, "FW_CS_DSP");
static void cs_dsp_ctl_fixup_base(struct cs_dsp *dsp,
const struct cs_dsp_alg_region *alg_region)
@@ -1758,52 +1757,52 @@ static void *cs_dsp_read_algs(struct cs_dsp *dsp, size_t n_algs,
struct cs_dsp_alg_region *cs_dsp_find_alg_region(struct cs_dsp *dsp,
int type, unsigned int id)
{
- struct cs_dsp_alg_region *alg_region;
+ struct cs_dsp_alg_region_list_item *item;
lockdep_assert_held(&dsp->pwr_lock);
- list_for_each_entry(alg_region, &dsp->alg_regions, list) {
- if (id == alg_region->alg && type == alg_region->type)
- return alg_region;
+ list_for_each_entry(item, &dsp->alg_regions, list) {
+ if (id == item->alg_region.alg && type == item->alg_region.type)
+ return &item->alg_region;
}
return NULL;
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_find_alg_region, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_find_alg_region, "FW_CS_DSP");
static struct cs_dsp_alg_region *cs_dsp_create_region(struct cs_dsp *dsp,
int type, __be32 id,
__be32 ver, __be32 base)
{
- struct cs_dsp_alg_region *alg_region;
+ struct cs_dsp_alg_region_list_item *item;
- alg_region = kzalloc(sizeof(*alg_region), GFP_KERNEL);
- if (!alg_region)
+ item = kzalloc(sizeof(*item), GFP_KERNEL);
+ if (!item)
return ERR_PTR(-ENOMEM);
- alg_region->type = type;
- alg_region->alg = be32_to_cpu(id);
- alg_region->ver = be32_to_cpu(ver);
- alg_region->base = be32_to_cpu(base);
+ item->alg_region.type = type;
+ item->alg_region.alg = be32_to_cpu(id);
+ item->alg_region.ver = be32_to_cpu(ver);
+ item->alg_region.base = be32_to_cpu(base);
- list_add_tail(&alg_region->list, &dsp->alg_regions);
+ list_add_tail(&item->list, &dsp->alg_regions);
if (dsp->wmfw_ver > 0)
- cs_dsp_ctl_fixup_base(dsp, alg_region);
+ cs_dsp_ctl_fixup_base(dsp, &item->alg_region);
- return alg_region;
+ return &item->alg_region;
}
static void cs_dsp_free_alg_regions(struct cs_dsp *dsp)
{
- struct cs_dsp_alg_region *alg_region;
+ struct cs_dsp_alg_region_list_item *item;
while (!list_empty(&dsp->alg_regions)) {
- alg_region = list_first_entry(&dsp->alg_regions,
- struct cs_dsp_alg_region,
- list);
- list_del(&alg_region->list);
- kfree(alg_region);
+ item = list_first_entry(&dsp->alg_regions,
+ struct cs_dsp_alg_region_list_item,
+ list);
+ list_del(&item->list);
+ kfree(item);
}
}
@@ -2326,8 +2325,8 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
cs_dsp_dbg(dsp, "%s.%d: Writing %d bytes at %x\n",
file, blocks, le32_to_cpu(blk->len),
reg);
- ret = regmap_raw_write_async(regmap, reg, buf->buf,
- le32_to_cpu(blk->len));
+ ret = regmap_raw_write(regmap, reg, buf->buf,
+ le32_to_cpu(blk->len));
if (ret != 0) {
cs_dsp_err(dsp,
"%s.%d: Failed to write to %x in %s: %d\n",
@@ -2339,18 +2338,14 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
blocks++;
}
- ret = regmap_async_complete(regmap);
- if (ret != 0)
- cs_dsp_err(dsp, "Failed to complete async write: %d\n", ret);
-
if (pos > firmware->size)
cs_dsp_warn(dsp, "%s.%d: %zu bytes at end of file\n",
file, blocks, pos - firmware->size);
cs_dsp_debugfs_save_binname(dsp, file);
+ ret = 0;
out_fw:
- regmap_async_complete(regmap);
cs_dsp_buf_free(&buf_list);
if (ret == -EOVERFLOW)
@@ -2404,7 +2399,7 @@ int cs_dsp_adsp1_init(struct cs_dsp *dsp)
return cs_dsp_common_init(dsp);
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_adsp1_init, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_adsp1_init, "FW_CS_DSP");
/**
* cs_dsp_adsp1_power_up() - Load and start the named firmware
@@ -2496,7 +2491,7 @@ err_mutex:
mutex_unlock(&dsp->pwr_lock);
return ret;
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_adsp1_power_up, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_adsp1_power_up, "FW_CS_DSP");
/**
* cs_dsp_adsp1_power_down() - Halts the DSP
@@ -2528,7 +2523,7 @@ void cs_dsp_adsp1_power_down(struct cs_dsp *dsp)
mutex_unlock(&dsp->pwr_lock);
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_adsp1_power_down, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_adsp1_power_down, "FW_CS_DSP");
static int cs_dsp_adsp2v2_enable_core(struct cs_dsp *dsp)
{
@@ -2561,8 +2556,8 @@ static int cs_dsp_adsp2_enable_core(struct cs_dsp *dsp)
{
int ret;
- ret = regmap_update_bits_async(dsp->regmap, dsp->base + ADSP2_CONTROL,
- ADSP2_SYS_ENA, ADSP2_SYS_ENA);
+ ret = regmap_update_bits(dsp->regmap, dsp->base + ADSP2_CONTROL,
+ ADSP2_SYS_ENA, ADSP2_SYS_ENA);
if (ret != 0)
return ret;
@@ -2680,7 +2675,7 @@ int cs_dsp_set_dspclk(struct cs_dsp *dsp, unsigned int freq)
return ret;
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_set_dspclk, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_set_dspclk, "FW_CS_DSP");
static void cs_dsp_stop_watchdog(struct cs_dsp *dsp)
{
@@ -2770,7 +2765,7 @@ err_mutex:
return ret;
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_power_up, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_power_up, "FW_CS_DSP");
/**
* cs_dsp_power_down() - Powers-down the DSP
@@ -2804,7 +2799,7 @@ void cs_dsp_power_down(struct cs_dsp *dsp)
cs_dsp_dbg(dsp, "Shutdown complete\n");
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_power_down, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_power_down, "FW_CS_DSP");
static int cs_dsp_adsp2_start_core(struct cs_dsp *dsp)
{
@@ -2890,7 +2885,7 @@ err:
return ret;
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_run, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_run, "FW_CS_DSP");
/**
* cs_dsp_stop() - Stops the firmware
@@ -2929,7 +2924,7 @@ void cs_dsp_stop(struct cs_dsp *dsp)
cs_dsp_dbg(dsp, "Execution stopped\n");
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_stop, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_stop, "FW_CS_DSP");
static int cs_dsp_halo_start_core(struct cs_dsp *dsp)
{
@@ -2991,7 +2986,7 @@ int cs_dsp_adsp2_init(struct cs_dsp *dsp)
return cs_dsp_common_init(dsp);
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_adsp2_init, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_adsp2_init, "FW_CS_DSP");
/**
* cs_dsp_halo_init() - Initialise a cs_dsp structure representing a HALO Core DSP
@@ -3008,7 +3003,7 @@ int cs_dsp_halo_init(struct cs_dsp *dsp)
return cs_dsp_common_init(dsp);
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_halo_init, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_halo_init, "FW_CS_DSP");
/**
* cs_dsp_remove() - Clean a cs_dsp before deletion
@@ -3028,7 +3023,7 @@ void cs_dsp_remove(struct cs_dsp *dsp)
cs_dsp_free_ctl_blk(ctl);
}
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_remove, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_remove, "FW_CS_DSP");
/**
* cs_dsp_read_raw_data_block() - Reads a block of data from DSP memory
@@ -3065,7 +3060,7 @@ int cs_dsp_read_raw_data_block(struct cs_dsp *dsp, int mem_type, unsigned int me
return 0;
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_read_raw_data_block, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_read_raw_data_block, "FW_CS_DSP");
/**
* cs_dsp_read_data_word() - Reads a word from DSP memory
@@ -3089,7 +3084,7 @@ int cs_dsp_read_data_word(struct cs_dsp *dsp, int mem_type, unsigned int mem_add
return 0;
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_read_data_word, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_read_data_word, "FW_CS_DSP");
/**
* cs_dsp_write_data_word() - Writes a word to DSP memory
@@ -3115,7 +3110,7 @@ int cs_dsp_write_data_word(struct cs_dsp *dsp, int mem_type, unsigned int mem_ad
return regmap_raw_write(dsp->regmap, reg, &val, sizeof(val));
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_write_data_word, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_write_data_word, "FW_CS_DSP");
/**
* cs_dsp_remove_padding() - Convert unpacked words to packed bytes
@@ -3139,7 +3134,7 @@ void cs_dsp_remove_padding(u32 *buf, int nwords)
*pack_out++ = (u8)(word >> 16);
}
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_remove_padding, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_remove_padding, "FW_CS_DSP");
/**
* cs_dsp_adsp2_bus_error() - Handle a DSP bus error interrupt
@@ -3209,7 +3204,7 @@ void cs_dsp_adsp2_bus_error(struct cs_dsp *dsp)
error:
mutex_unlock(&dsp->pwr_lock);
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_adsp2_bus_error, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_adsp2_bus_error, "FW_CS_DSP");
/**
* cs_dsp_halo_bus_error() - Handle a DSP bus error interrupt
@@ -3269,7 +3264,7 @@ void cs_dsp_halo_bus_error(struct cs_dsp *dsp)
exit_unlock:
mutex_unlock(&dsp->pwr_lock);
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_halo_bus_error, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_halo_bus_error, "FW_CS_DSP");
/**
* cs_dsp_halo_wdt_expire() - Handle DSP watchdog expiry
@@ -3289,7 +3284,7 @@ void cs_dsp_halo_wdt_expire(struct cs_dsp *dsp)
mutex_unlock(&dsp->pwr_lock);
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_halo_wdt_expire, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_halo_wdt_expire, "FW_CS_DSP");
static const struct cs_dsp_ops cs_dsp_adsp1_ops = {
.validate_version = cs_dsp_validate_version,
@@ -3419,7 +3414,7 @@ int cs_dsp_chunk_write(struct cs_dsp_chunk *ch, int nbits, u32 val)
return 0;
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_chunk_write, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_chunk_write, "FW_CS_DSP");
/**
* cs_dsp_chunk_flush() - Pad remaining data with zero and commit to chunk
@@ -3438,7 +3433,7 @@ int cs_dsp_chunk_flush(struct cs_dsp_chunk *ch)
return cs_dsp_chunk_write(ch, CS_DSP_DATA_WORD_BITS - ch->cachebits, 0);
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_chunk_flush, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_chunk_flush, "FW_CS_DSP");
/**
* cs_dsp_chunk_read() - Parse data from a DSP memory chunk
@@ -3480,7 +3475,7 @@ int cs_dsp_chunk_read(struct cs_dsp_chunk *ch, int nbits)
return result;
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_chunk_read, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_chunk_read, "FW_CS_DSP");
struct cs_dsp_wseq_op {
@@ -3605,7 +3600,7 @@ int cs_dsp_wseq_init(struct cs_dsp *dsp, struct cs_dsp_wseq *wseqs, unsigned int
return 0;
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_wseq_init, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_wseq_init, "FW_CS_DSP");
static struct cs_dsp_wseq_op *cs_dsp_wseq_find_op(u32 addr, u8 op_code,
struct list_head *wseq_ops)
@@ -3720,7 +3715,7 @@ op_new_free:
return ret;
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_wseq_write, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_wseq_write, "FW_CS_DSP");
/**
* cs_dsp_wseq_multi_write() - Add or update multiple entries in a write sequence
@@ -3752,7 +3747,7 @@ int cs_dsp_wseq_multi_write(struct cs_dsp *dsp, struct cs_dsp_wseq *wseq,
return 0;
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_wseq_multi_write, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_wseq_multi_write, "FW_CS_DSP");
MODULE_DESCRIPTION("Cirrus Logic DSP Support");
MODULE_AUTHOR("Simon Trimmer <simont@opensource.cirrus.com>");
diff --git a/drivers/firmware/cirrus/test/Makefile b/drivers/firmware/cirrus/test/Makefile
new file mode 100644
index 000000000000..7a24a6079ddc
--- /dev/null
+++ b/drivers/firmware/cirrus/test/Makefile
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+
+cs_dsp_test_utils-objs := \
+ cs_dsp_mock_mem_maps.o \
+ cs_dsp_mock_bin.o \
+ cs_dsp_mock_regmap.o \
+ cs_dsp_mock_utils.o \
+ cs_dsp_mock_wmfw.o
+
+cs_dsp_test-objs := \
+ cs_dsp_test_bin.o \
+ cs_dsp_test_bin_error.o \
+ cs_dsp_test_callbacks.o \
+ cs_dsp_test_control_parse.o \
+ cs_dsp_test_control_cache.o \
+ cs_dsp_test_control_rw.o \
+ cs_dsp_test_wmfw.o \
+ cs_dsp_test_wmfw_error.o \
+ cs_dsp_tests.o
+
+obj-$(CONFIG_FW_CS_DSP_KUNIT_TEST_UTILS) += cs_dsp_test_utils.o
+obj-$(CONFIG_FW_CS_DSP_KUNIT_TEST) += cs_dsp_test.o
diff --git a/drivers/firmware/cirrus/test/cs_dsp_mock_bin.c b/drivers/firmware/cirrus/test/cs_dsp_mock_bin.c
new file mode 100644
index 000000000000..3f8777ee4dc0
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_mock_bin.c
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// bin file builder for cs_dsp KUnit tests.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/firmware.h>
+#include <linux/math.h>
+#include <linux/overflow.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+
+/* Buffer large enough for bin file content */
+#define CS_DSP_MOCK_BIN_BUF_SIZE 32768
+
+KUNIT_DEFINE_ACTION_WRAPPER(vfree_action_wrapper, vfree, void *)
+
+struct cs_dsp_mock_bin_builder {
+ struct cs_dsp_test *test_priv;
+ void *buf;
+ void *write_p;
+ size_t bytes_used;
+};
+
+/**
+ * cs_dsp_mock_bin_get_firmware() - Get struct firmware wrapper for data.
+ *
+ * @builder: Pointer to struct cs_dsp_mock_bin_builder.
+ *
+ * Return: Pointer to a struct firmware wrapper for the data.
+ */
+struct firmware *cs_dsp_mock_bin_get_firmware(struct cs_dsp_mock_bin_builder *builder)
+{
+ struct firmware *fw;
+
+ fw = kunit_kzalloc(builder->test_priv->test, sizeof(*fw), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(builder->test_priv->test, fw);
+
+ fw->data = builder->buf;
+ fw->size = builder->bytes_used;
+
+ return fw;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_bin_get_firmware, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_bin_add_raw_block() - Add a data block to the bin file.
+ *
+ * @builder: Pointer to struct cs_dsp_mock_bin_builder.
+ * @alg_id: Algorithm ID.
+ * @alg_ver: Algorithm version.
+ * @type: Type of the block.
+ * @offset: Offset.
+ * @payload_data: Pointer to buffer containing the payload data.
+ * @payload_len_bytes: Length of payload data in bytes.
+ */
+void cs_dsp_mock_bin_add_raw_block(struct cs_dsp_mock_bin_builder *builder,
+ unsigned int alg_id, unsigned int alg_ver,
+ int type, unsigned int offset,
+ const void *payload_data, size_t payload_len_bytes)
+{
+ struct wmfw_coeff_item *item;
+ size_t bytes_needed = struct_size_t(struct wmfw_coeff_item, data, payload_len_bytes);
+
+ KUNIT_ASSERT_TRUE(builder->test_priv->test,
+ (builder->write_p + bytes_needed) <
+ (builder->buf + CS_DSP_MOCK_BIN_BUF_SIZE));
+
+ item = builder->write_p;
+
+ item->offset = cpu_to_le16(offset);
+ item->type = cpu_to_le16(type);
+ item->id = cpu_to_le32(alg_id);
+ item->ver = cpu_to_le32(alg_ver << 8);
+ item->len = cpu_to_le32(payload_len_bytes);
+
+ if (payload_len_bytes)
+ memcpy(item->data, payload_data, payload_len_bytes);
+
+ builder->write_p += bytes_needed;
+ builder->bytes_used += bytes_needed;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_bin_add_raw_block, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+static void cs_dsp_mock_bin_add_name_or_info(struct cs_dsp_mock_bin_builder *builder,
+ const char *info, int type)
+{
+ size_t info_len = strlen(info);
+ char *tmp = NULL;
+
+ if (info_len % 4) {
+ /* Create a padded string with length a multiple of 4 */
+ size_t copy_len = info_len;
+ info_len = round_up(info_len, 4);
+ tmp = kunit_kzalloc(builder->test_priv->test, info_len, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(builder->test_priv->test, tmp);
+ memcpy(tmp, info, copy_len);
+ info = tmp;
+ }
+
+ cs_dsp_mock_bin_add_raw_block(builder, 0, 0, WMFW_INFO_TEXT, 0, info, info_len);
+ kunit_kfree(builder->test_priv->test, tmp);
+}
+
+/**
+ * cs_dsp_mock_bin_add_info() - Add an info block to the bin file.
+ *
+ * @builder: Pointer to struct cs_dsp_mock_bin_builder.
+ * @info: Pointer to info string to be copied into the file.
+ *
+ * The string will be padded to a length that is a multiple of 4 bytes.
+ */
+void cs_dsp_mock_bin_add_info(struct cs_dsp_mock_bin_builder *builder,
+ const char *info)
+{
+ cs_dsp_mock_bin_add_name_or_info(builder, info, WMFW_INFO_TEXT);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_bin_add_info, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_bin_add_name() - Add a name block to the bin file.
+ *
+ * @builder: Pointer to struct cs_dsp_mock_bin_builder.
+ * @name: Pointer to name string to be copied into the file.
+ */
+void cs_dsp_mock_bin_add_name(struct cs_dsp_mock_bin_builder *builder,
+ const char *name)
+{
+ cs_dsp_mock_bin_add_name_or_info(builder, name, WMFW_NAME_TEXT);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_bin_add_name, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_bin_add_patch() - Add a patch data block to the bin file.
+ *
+ * @builder: Pointer to struct cs_dsp_mock_bin_builder.
+ * @alg_id: Algorithm ID for the patch.
+ * @alg_ver: Algorithm version for the patch.
+ * @mem_region: Memory region for the patch.
+ * @reg_addr_offset: Offset to start of data in register addresses.
+ * @payload_data: Pointer to buffer containing the payload data.
+ * @payload_len_bytes: Length of payload data in bytes.
+ */
+void cs_dsp_mock_bin_add_patch(struct cs_dsp_mock_bin_builder *builder,
+ unsigned int alg_id, unsigned int alg_ver,
+ int mem_region, unsigned int reg_addr_offset,
+ const void *payload_data, size_t payload_len_bytes)
+{
+ /* Payload length must be a multiple of 4 */
+ KUNIT_ASSERT_EQ(builder->test_priv->test, payload_len_bytes % 4, 0);
+
+ cs_dsp_mock_bin_add_raw_block(builder, alg_id, alg_ver,
+ mem_region, reg_addr_offset,
+ payload_data, payload_len_bytes);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_bin_add_patch, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_bin_init() - Initialize a struct cs_dsp_mock_bin_builder.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ * @format_version: Required bin format version.
+ * @fw_version: Firmware version to put in bin file.
+ *
+ * Return: Pointer to created struct cs_dsp_mock_bin_builder.
+ */
+struct cs_dsp_mock_bin_builder *cs_dsp_mock_bin_init(struct cs_dsp_test *priv,
+ int format_version,
+ unsigned int fw_version)
+{
+ struct cs_dsp_mock_bin_builder *builder;
+ struct wmfw_coeff_hdr *hdr;
+
+ KUNIT_ASSERT_LE(priv->test, format_version, 0xff);
+ KUNIT_ASSERT_LE(priv->test, fw_version, 0xffffff);
+
+ builder = kunit_kzalloc(priv->test, sizeof(*builder), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(priv->test, builder);
+ builder->test_priv = priv;
+
+ builder->buf = vmalloc(CS_DSP_MOCK_BIN_BUF_SIZE);
+ KUNIT_ASSERT_NOT_NULL(priv->test, builder->buf);
+ kunit_add_action_or_reset(priv->test, vfree_action_wrapper, builder->buf);
+
+ /* Create header */
+ hdr = builder->buf;
+ memcpy(hdr->magic, "WMDR", sizeof(hdr->magic));
+ hdr->len = cpu_to_le32(offsetof(struct wmfw_coeff_hdr, data));
+ hdr->ver = cpu_to_le32(fw_version | (format_version << 24));
+ hdr->core_ver = cpu_to_le32(((u32)priv->dsp->type << 24) | priv->dsp->rev);
+
+ builder->write_p = hdr->data;
+ builder->bytes_used = offsetof(struct wmfw_coeff_hdr, data);
+
+ return builder;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_bin_init, "FW_CS_DSP_KUNIT_TEST_UTILS");
diff --git a/drivers/firmware/cirrus/test/cs_dsp_mock_mem_maps.c b/drivers/firmware/cirrus/test/cs_dsp_mock_mem_maps.c
new file mode 100644
index 000000000000..95946fac5563
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_mock_mem_maps.c
@@ -0,0 +1,725 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Mock DSP memory maps for cs_dsp KUnit tests.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+
+#include <kunit/test.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/math.h>
+
+const struct cs_dsp_region cs_dsp_mock_halo_dsp1_regions[] = {
+ { .type = WMFW_HALO_PM_PACKED, .base = 0x3800000 },
+ { .type = WMFW_HALO_XM_PACKED, .base = 0x2000000 },
+ { .type = WMFW_HALO_YM_PACKED, .base = 0x2C00000 },
+ { .type = WMFW_ADSP2_XM, .base = 0x2800000 },
+ { .type = WMFW_ADSP2_YM, .base = 0x3400000 },
+};
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_halo_dsp1_regions, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/* List of sizes in bytes, for each entry above */
+const unsigned int cs_dsp_mock_halo_dsp1_region_sizes[] = {
+ 0x5000, /* PM_PACKED */
+ 0x6000, /* XM_PACKED */
+ 0x47F4, /* YM_PACKED */
+ 0x8000, /* XM_UNPACKED_24 */
+ 0x5FF8, /* YM_UNPACKED_24 */
+
+ 0 /* terminator */
+};
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_halo_dsp1_region_sizes, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+const struct cs_dsp_region cs_dsp_mock_adsp2_32bit_dsp1_regions[] = {
+ { .type = WMFW_ADSP2_PM, .base = 0x080000 },
+ { .type = WMFW_ADSP2_XM, .base = 0x0a0000 },
+ { .type = WMFW_ADSP2_YM, .base = 0x0c0000 },
+ { .type = WMFW_ADSP2_ZM, .base = 0x0e0000 },
+};
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_adsp2_32bit_dsp1_regions, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/* List of sizes in bytes, for each entry above */
+const unsigned int cs_dsp_mock_adsp2_32bit_dsp1_region_sizes[] = {
+ 0x9000, /* PM */
+ 0xa000, /* ZM */
+ 0x2000, /* XM */
+ 0x2000, /* YM */
+
+ 0 /* terminator */
+};
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_adsp2_32bit_dsp1_region_sizes, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+const struct cs_dsp_region cs_dsp_mock_adsp2_16bit_dsp1_regions[] = {
+ { .type = WMFW_ADSP2_PM, .base = 0x100000 },
+ { .type = WMFW_ADSP2_ZM, .base = 0x180000 },
+ { .type = WMFW_ADSP2_XM, .base = 0x190000 },
+ { .type = WMFW_ADSP2_YM, .base = 0x1a8000 },
+};
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_adsp2_16bit_dsp1_regions, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/* List of sizes in bytes, for each entry above */
+const unsigned int cs_dsp_mock_adsp2_16bit_dsp1_region_sizes[] = {
+ 0x6000, /* PM */
+ 0x800, /* ZM */
+ 0x800, /* XM */
+ 0x800, /* YM */
+
+ 0 /* terminator */
+};
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_adsp2_16bit_dsp1_region_sizes, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+int cs_dsp_mock_count_regions(const unsigned int *region_sizes)
+{
+ int i;
+
+ for (i = 0; region_sizes[i]; ++i)
+ ;
+
+ return i;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_count_regions, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_size_of_region() - Return size of given memory region.
+ *
+ * @dsp: Pointer to struct cs_dsp.
+ * @mem_type: Memory region type.
+ *
+ * Return: Size of region in bytes.
+ */
+unsigned int cs_dsp_mock_size_of_region(const struct cs_dsp *dsp, int mem_type)
+{
+ const unsigned int *sizes;
+ int i;
+
+ if (dsp->mem == cs_dsp_mock_halo_dsp1_regions)
+ sizes = cs_dsp_mock_halo_dsp1_region_sizes;
+ else if (dsp->mem == cs_dsp_mock_adsp2_32bit_dsp1_regions)
+ sizes = cs_dsp_mock_adsp2_32bit_dsp1_region_sizes;
+ else if (dsp->mem == cs_dsp_mock_adsp2_16bit_dsp1_regions)
+ sizes = cs_dsp_mock_adsp2_16bit_dsp1_region_sizes;
+ else
+ return 0;
+
+ for (i = 0; i < dsp->num_mems; ++i) {
+ if (dsp->mem[i].type == mem_type)
+ return sizes[i];
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_size_of_region, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_base_addr_for_mem() - Base register address for memory region.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ * @mem_type: Memory region type.
+ *
+ * Return: Base register address of region.
+ */
+unsigned int cs_dsp_mock_base_addr_for_mem(struct cs_dsp_test *priv, int mem_type)
+{
+ int num_mems = priv->dsp->num_mems;
+ const struct cs_dsp_region *region = priv->dsp->mem;
+ int i;
+
+ for (i = 0; i < num_mems; ++i) {
+ if (region[i].type == mem_type)
+ return region[i].base;
+ }
+
+ KUNIT_FAIL(priv->test, "Unexpected region %d\n", mem_type);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_base_addr_for_mem, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_reg_addr_inc_per_unpacked_word() - Unpacked register address increment per DSP word.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ *
+ * Return: Amount by which register address increments to move to the next
+ * DSP word in unpacked XM/YM/ZM.
+ */
+unsigned int cs_dsp_mock_reg_addr_inc_per_unpacked_word(struct cs_dsp_test *priv)
+{
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ return 2; /* two 16-bit register indexes per XM/YM/ZM word */
+ case WMFW_HALO:
+ return 4; /* one byte-addressed 32-bit register per XM/YM/ZM word */
+ default:
+ KUNIT_FAIL(priv->test, "Unexpected DSP type\n");
+ return -1;
+ }
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_reg_addr_inc_per_unpacked_word, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_reg_block_length_bytes() - Number of bytes in an access block.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ * @mem_type: Memory region type.
+ *
+ * Return: Total number of bytes in a group of registers forming the
+ * smallest bus access size (including any padding bits). For unpacked
+ * memory this is the number of registers containing one DSP word.
+ * For packed memory this is the number of registers in one packed
+ * access block.
+ */
+unsigned int cs_dsp_mock_reg_block_length_bytes(struct cs_dsp_test *priv, int mem_type)
+{
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ switch (mem_type) {
+ case WMFW_ADSP2_PM:
+ return 3 * regmap_get_val_bytes(priv->dsp->regmap);
+ case WMFW_ADSP2_XM:
+ case WMFW_ADSP2_YM:
+ case WMFW_ADSP2_ZM:
+ return sizeof(u32);
+ default:
+ break;
+ }
+ break;
+ case WMFW_HALO:
+ switch (mem_type) {
+ case WMFW_ADSP2_XM:
+ case WMFW_ADSP2_YM:
+ return sizeof(u32);
+ case WMFW_HALO_PM_PACKED:
+ return 5 * sizeof(u32);
+ case WMFW_HALO_XM_PACKED:
+ case WMFW_HALO_YM_PACKED:
+ return 3 * sizeof(u32);
+ default:
+ break;
+ }
+ break;
+ default:
+ KUNIT_FAIL(priv->test, "Unexpected DSP type\n");
+ return 0;
+ }
+
+ KUNIT_FAIL(priv->test, "Unexpected mem type\n");
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_reg_block_length_bytes, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_reg_block_length_registers() - Number of registers in an access block.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ * @mem_type: Memory region type.
+ *
+ * Return: Total number of register forming the smallest bus access size.
+ * For unpacked memory this is the number of registers containing one
+ * DSP word. For packed memory this is the number of registers in one
+ * packed access block.
+ */
+unsigned int cs_dsp_mock_reg_block_length_registers(struct cs_dsp_test *priv, int mem_type)
+{
+ return cs_dsp_mock_reg_block_length_bytes(priv, mem_type) /
+ regmap_get_val_bytes(priv->dsp->regmap);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_reg_block_length_registers, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_reg_block_length_dsp_words() - Number of dsp_words in an access block.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ * @mem_type: Memory region type.
+ *
+ * Return: Total number of DSP words in a group of registers forming the
+ * smallest bus access size.
+ */
+unsigned int cs_dsp_mock_reg_block_length_dsp_words(struct cs_dsp_test *priv, int mem_type)
+{
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ switch (mem_type) {
+ case WMFW_ADSP2_PM:
+ return regmap_get_val_bytes(priv->dsp->regmap) / 2;
+ case WMFW_ADSP2_XM:
+ case WMFW_ADSP2_YM:
+ case WMFW_ADSP2_ZM:
+ return 1;
+ default:
+ break;
+ }
+ break;
+ case WMFW_HALO:
+ switch (mem_type) {
+ case WMFW_ADSP2_XM:
+ case WMFW_ADSP2_YM:
+ return 1;
+ case WMFW_HALO_PM_PACKED:
+ case WMFW_HALO_XM_PACKED:
+ case WMFW_HALO_YM_PACKED:
+ return 4;
+ default:
+ break;
+ }
+ break;
+ default:
+ KUNIT_FAIL(priv->test, "Unexpected DSP type\n");
+ return 0;
+ }
+
+ KUNIT_FAIL(priv->test, "Unexpected mem type\n");
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_reg_block_length_dsp_words, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_has_zm() - DSP has ZM
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ *
+ * Return: True if DSP has ZM.
+ */
+bool cs_dsp_mock_has_zm(struct cs_dsp_test *priv)
+{
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ return true;
+ default:
+ return false;
+ }
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_has_zm, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_packed_to_unpacked_mem_type() - Unpacked region that is
+ * the same memory as a packed region.
+ *
+ * @packed_mem_type: Type of packed memory region.
+ *
+ * Return: unpacked type that is the same memory as packed_mem_type.
+ */
+int cs_dsp_mock_packed_to_unpacked_mem_type(int packed_mem_type)
+{
+ switch (packed_mem_type) {
+ case WMFW_HALO_XM_PACKED:
+ return WMFW_ADSP2_XM;
+ case WMFW_HALO_YM_PACKED:
+ return WMFW_ADSP2_YM;
+ default:
+ return -1;
+ }
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_packed_to_unpacked_mem_type, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_num_dsp_words_to_num_packed_regs() - Number of DSP words
+ * to number of packed registers.
+ *
+ * @num_dsp_words: Number of DSP words.
+ *
+ * Convert number of DSP words to number of packed registers rounded
+ * down to the nearest register.
+ *
+ * Return: Number of packed registers.
+ */
+unsigned int cs_dsp_mock_num_dsp_words_to_num_packed_regs(unsigned int num_dsp_words)
+{
+ /* There are 3 registers for every 4 packed words */
+ return (num_dsp_words * 3) / 4;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_num_dsp_words_to_num_packed_regs, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+static const struct wmfw_halo_id_hdr cs_dsp_mock_halo_xm_hdr = {
+ .fw = {
+ .core_id = cpu_to_be32(WMFW_HALO << 16),
+ .block_rev = cpu_to_be32(3 << 16),
+ .vendor_id = cpu_to_be32(0x2),
+ .id = cpu_to_be32(0xabcdef),
+ .ver = cpu_to_be32(0x090101),
+ },
+
+ /*
+ * Leave enough space for this header and 40 algorithm descriptors.
+ * base and size are counted in DSP words.
+ */
+ .xm_base = cpu_to_be32(((sizeof(struct wmfw_halo_id_hdr) +
+ (40 * sizeof(struct wmfw_halo_alg_hdr)))
+ / 4) * 3),
+ .xm_size = cpu_to_be32(0x20),
+
+ /* Allocate a dummy word of YM */
+ .ym_base = cpu_to_be32(0),
+ .ym_size = cpu_to_be32(1),
+
+ .n_algs = 0,
+};
+
+static const struct wmfw_adsp2_id_hdr cs_dsp_mock_adsp2_xm_hdr = {
+ .fw = {
+ .core_id = cpu_to_be32(WMFW_ADSP2 << 16),
+ .core_rev = cpu_to_be32(2 << 16),
+ .id = cpu_to_be32(0xabcdef),
+ .ver = cpu_to_be32(0x090101),
+ },
+
+ /*
+ * Leave enough space for this header and 40 algorithm descriptors.
+ * base and size are counted in DSP words.
+ */
+ .xm = cpu_to_be32(((sizeof(struct wmfw_adsp2_id_hdr) +
+ (40 * sizeof(struct wmfw_adsp2_alg_hdr)))
+ / 4) * 3),
+
+ .ym = cpu_to_be32(0),
+ .zm = cpu_to_be32(0),
+
+ .n_algs = 0,
+};
+
+/**
+ * cs_dsp_mock_xm_header_get_alg_base_in_words() - Algorithm base offset in DSP words.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ * @alg_id: Algorithm ID.
+ * @mem_type: Memory region type.
+ *
+ * Lookup an algorithm in the XM header and return the base offset in
+ * DSP words of the algorithm data in the requested memory region.
+ *
+ * Return: Offset in DSP words.
+ */
+unsigned int cs_dsp_mock_xm_header_get_alg_base_in_words(struct cs_dsp_test *priv,
+ unsigned int alg_id,
+ int mem_type)
+{
+ unsigned int xm = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_XM);
+ union {
+ struct wmfw_adsp2_alg_hdr adsp2;
+ struct wmfw_halo_alg_hdr halo;
+ } alg;
+ unsigned int alg_hdr_addr;
+ unsigned int val, xm_base = 0, ym_base = 0, zm_base = 0;
+ int ret;
+
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ alg_hdr_addr = xm + (sizeof(struct wmfw_adsp2_id_hdr) / 2);
+ for (;; alg_hdr_addr += sizeof(alg.adsp2) / 2) {
+ ret = regmap_read(priv->dsp->regmap, alg_hdr_addr, &val);
+ KUNIT_ASSERT_GE(priv->test, ret, 0);
+ KUNIT_ASSERT_NE(priv->test, val, 0xbedead);
+ ret = regmap_raw_read(priv->dsp->regmap, alg_hdr_addr,
+ &alg.adsp2, sizeof(alg.adsp2));
+ KUNIT_ASSERT_GE(priv->test, ret, 0);
+ if (be32_to_cpu(alg.adsp2.alg.id) == alg_id) {
+ xm_base = be32_to_cpu(alg.adsp2.xm);
+ ym_base = be32_to_cpu(alg.adsp2.ym);
+ zm_base = be32_to_cpu(alg.adsp2.zm);
+ break;
+ }
+ }
+ break;
+ case WMFW_HALO:
+ alg_hdr_addr = xm + sizeof(struct wmfw_halo_id_hdr);
+ for (;; alg_hdr_addr += sizeof(alg.halo)) {
+ ret = regmap_read(priv->dsp->regmap, alg_hdr_addr, &val);
+ KUNIT_ASSERT_GE(priv->test, ret, 0);
+ KUNIT_ASSERT_NE(priv->test, val, 0xbedead);
+ ret = regmap_raw_read(priv->dsp->regmap, alg_hdr_addr,
+ &alg.halo, sizeof(alg.halo));
+ KUNIT_ASSERT_GE(priv->test, ret, 0);
+ if (be32_to_cpu(alg.halo.alg.id) == alg_id) {
+ xm_base = be32_to_cpu(alg.halo.xm_base);
+ ym_base = be32_to_cpu(alg.halo.ym_base);
+ break;
+ }
+ }
+ break;
+ default:
+ KUNIT_FAIL(priv->test, "Unexpected DSP type %d\n", priv->dsp->type);
+ return 0;
+ }
+
+ switch (mem_type) {
+ case WMFW_ADSP2_XM:
+ case WMFW_HALO_XM_PACKED:
+ return xm_base;
+ case WMFW_ADSP2_YM:
+ case WMFW_HALO_YM_PACKED:
+ return ym_base;
+ case WMFW_ADSP2_ZM:
+ return zm_base;
+ default:
+ KUNIT_FAIL(priv->test, "Bad mem_type\n");
+ return 0;
+ }
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_xm_header_get_alg_base_in_words, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_xm_header_get_fw_version() - Firmware version.
+ *
+ * @header: Pointer to struct cs_dsp_mock_xm_header.
+ *
+ * Return: Firmware version word value.
+ */
+unsigned int cs_dsp_mock_xm_header_get_fw_version(struct cs_dsp_mock_xm_header *header)
+{
+ const struct wmfw_id_hdr *adsp2_hdr;
+ const struct wmfw_v3_id_hdr *halo_hdr;
+
+ switch (header->test_priv->dsp->type) {
+ case WMFW_ADSP2:
+ adsp2_hdr = header->blob_data;
+ return be32_to_cpu(adsp2_hdr->ver);
+ case WMFW_HALO:
+ halo_hdr = header->blob_data;
+ return be32_to_cpu(halo_hdr->ver);
+ default:
+ KUNIT_FAIL(header->test_priv->test, NULL);
+ return 0;
+ }
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_xm_header_get_fw_version, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_xm_header_drop_from_regmap_cache() - Drop XM header from regmap cache.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ */
+void cs_dsp_mock_xm_header_drop_from_regmap_cache(struct cs_dsp_test *priv)
+{
+ unsigned int xm = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_XM);
+ unsigned int bytes;
+ __be32 num_algs_be32;
+ unsigned int num_algs;
+
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ /*
+ * Could be one 32-bit register or two 16-bit registers.
+ * A raw read will read the requested number of bytes.
+ */
+ KUNIT_ASSERT_GE(priv->test, 0,
+ regmap_raw_read(priv->dsp->regmap,
+ xm +
+ (offsetof(struct wmfw_adsp2_id_hdr, n_algs) / 2),
+ &num_algs_be32, sizeof(num_algs_be32)));
+ num_algs = be32_to_cpu(num_algs_be32);
+ bytes = sizeof(struct wmfw_adsp2_id_hdr) +
+ (num_algs * sizeof(struct wmfw_adsp2_alg_hdr)) +
+ 4 /* terminator word */;
+
+ regcache_drop_region(priv->dsp->regmap, xm, xm + (bytes / 2) - 1);
+ break;
+ case WMFW_HALO:
+ KUNIT_ASSERT_GE(priv->test, 0,
+ regmap_read(priv->dsp->regmap,
+ xm + offsetof(struct wmfw_halo_id_hdr, n_algs),
+ &num_algs));
+ bytes = sizeof(struct wmfw_halo_id_hdr) +
+ (num_algs * sizeof(struct wmfw_halo_alg_hdr)) +
+ 4 /* terminator word */;
+
+ regcache_drop_region(priv->dsp->regmap, xm, xm + bytes - 4);
+ break;
+ default:
+ break;
+ }
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_xm_header_drop_from_regmap_cache, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+static void cs_dsp_mock_xm_header_add_adsp2_algs(struct cs_dsp_mock_xm_header *builder,
+ const struct cs_dsp_mock_alg_def *algs,
+ size_t num_algs)
+{
+ struct wmfw_adsp2_id_hdr *hdr = builder->blob_data;
+ unsigned int next_free_xm_word, next_free_ym_word, next_free_zm_word;
+
+ next_free_xm_word = be32_to_cpu(hdr->xm);
+ next_free_ym_word = be32_to_cpu(hdr->ym);
+ next_free_zm_word = be32_to_cpu(hdr->zm);
+
+ /* Set num_algs in XM header. */
+ hdr->n_algs = cpu_to_be32(num_algs);
+
+ /* Create algorithm descriptor list */
+ struct wmfw_adsp2_alg_hdr *alg_info =
+ (struct wmfw_adsp2_alg_hdr *)(&hdr[1]);
+
+ for (; num_algs > 0; num_algs--, algs++, alg_info++) {
+ unsigned int alg_xm_last, alg_ym_last, alg_zm_last;
+
+ alg_info->alg.id = cpu_to_be32(algs->id);
+ alg_info->alg.ver = cpu_to_be32(algs->ver);
+ alg_info->xm = cpu_to_be32(algs->xm_base_words);
+ alg_info->ym = cpu_to_be32(algs->ym_base_words);
+ alg_info->zm = cpu_to_be32(algs->zm_base_words);
+
+ /* Check if we need to auto-allocate base addresses */
+ if (!alg_info->xm && algs->xm_size_words)
+ alg_info->xm = cpu_to_be32(next_free_xm_word);
+
+ if (!alg_info->ym && algs->ym_size_words)
+ alg_info->ym = cpu_to_be32(next_free_ym_word);
+
+ if (!alg_info->zm && algs->zm_size_words)
+ alg_info->zm = cpu_to_be32(next_free_zm_word);
+
+ alg_xm_last = be32_to_cpu(alg_info->xm) + algs->xm_size_words - 1;
+ if (alg_xm_last > next_free_xm_word)
+ next_free_xm_word = alg_xm_last;
+
+ alg_ym_last = be32_to_cpu(alg_info->ym) + algs->ym_size_words - 1;
+ if (alg_ym_last > next_free_ym_word)
+ next_free_ym_word = alg_ym_last;
+
+ alg_zm_last = be32_to_cpu(alg_info->zm) + algs->zm_size_words - 1;
+ if (alg_zm_last > next_free_zm_word)
+ next_free_zm_word = alg_zm_last;
+ }
+
+ /* Write list terminator */
+ *(__be32 *)(alg_info) = cpu_to_be32(0xbedead);
+}
+
+static void cs_dsp_mock_xm_header_add_halo_algs(struct cs_dsp_mock_xm_header *builder,
+ const struct cs_dsp_mock_alg_def *algs,
+ size_t num_algs)
+{
+ struct wmfw_halo_id_hdr *hdr = builder->blob_data;
+ unsigned int next_free_xm_word, next_free_ym_word;
+
+ /* Assume we're starting with bare header */
+ next_free_xm_word = be32_to_cpu(hdr->xm_base) + be32_to_cpu(hdr->xm_size) - 1;
+ next_free_ym_word = be32_to_cpu(hdr->ym_base) + be32_to_cpu(hdr->ym_size) - 1;
+
+ /* Set num_algs in XM header */
+ hdr->n_algs = cpu_to_be32(num_algs);
+
+ /* Create algorithm descriptor list */
+ struct wmfw_halo_alg_hdr *alg_info =
+ (struct wmfw_halo_alg_hdr *)(&hdr[1]);
+
+ for (; num_algs > 0; num_algs--, algs++, alg_info++) {
+ unsigned int alg_xm_last, alg_ym_last;
+
+ alg_info->alg.id = cpu_to_be32(algs->id);
+ alg_info->alg.ver = cpu_to_be32(algs->ver);
+ alg_info->xm_base = cpu_to_be32(algs->xm_base_words);
+ alg_info->xm_size = cpu_to_be32(algs->xm_size_words);
+ alg_info->ym_base = cpu_to_be32(algs->ym_base_words);
+ alg_info->ym_size = cpu_to_be32(algs->ym_size_words);
+
+ /* Check if we need to auto-allocate base addresses */
+ if (!alg_info->xm_base && alg_info->xm_size)
+ alg_info->xm_base = cpu_to_be32(next_free_xm_word);
+
+ if (!alg_info->ym_base && alg_info->ym_size)
+ alg_info->ym_base = cpu_to_be32(next_free_ym_word);
+
+ alg_xm_last = be32_to_cpu(alg_info->xm_base) + be32_to_cpu(alg_info->xm_size) - 1;
+ if (alg_xm_last > next_free_xm_word)
+ next_free_xm_word = alg_xm_last;
+
+ alg_ym_last = be32_to_cpu(alg_info->ym_base) + be32_to_cpu(alg_info->ym_size) - 1;
+ if (alg_ym_last > next_free_ym_word)
+ next_free_ym_word = alg_ym_last;
+ }
+
+ /* Write list terminator */
+ *(__be32 *)(alg_info) = cpu_to_be32(0xbedead);
+}
+
+/**
+ * cs_dsp_mock_xm_header_write_to_regmap() - Write XM header to regmap.
+ *
+ * @header: Pointer to struct cs_dsp_mock_xm_header.
+ *
+ * The data in header is written to the XM addresses in the regmap.
+ *
+ * Return: 0 on success, else negative error code.
+ */
+int cs_dsp_mock_xm_header_write_to_regmap(struct cs_dsp_mock_xm_header *header)
+{
+ struct cs_dsp_test *priv = header->test_priv;
+ unsigned int reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_XM);
+
+ /*
+ * One 32-bit word corresponds to one 32-bit unpacked XM word so the
+ * blob can be written directly to the regmap.
+ */
+ return regmap_raw_write(priv->dsp->regmap, reg_addr,
+ header->blob_data, header->blob_size_bytes);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_xm_header_write_to_regmap, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_create_mock_xm_header() - Create a dummy XM header.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ * @algs: Pointer to array of struct cs_dsp_mock_alg_def listing the
+ * dummy algorithm entries to include in the XM header.
+ * @num_algs: Number of entries in the algs array.
+ *
+ * Return: Pointer to created struct cs_dsp_mock_xm_header.
+ */
+struct cs_dsp_mock_xm_header *cs_dsp_create_mock_xm_header(struct cs_dsp_test *priv,
+ const struct cs_dsp_mock_alg_def *algs,
+ size_t num_algs)
+{
+ struct cs_dsp_mock_xm_header *builder;
+ size_t total_bytes_required;
+ const void *header;
+ size_t header_size_bytes;
+
+ builder = kunit_kzalloc(priv->test, sizeof(*builder), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(priv->test, builder);
+ builder->test_priv = priv;
+
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ header = &cs_dsp_mock_adsp2_xm_hdr;
+ header_size_bytes = sizeof(cs_dsp_mock_adsp2_xm_hdr);
+ total_bytes_required = header_size_bytes +
+ (num_algs * sizeof(struct wmfw_adsp2_alg_hdr))
+ + 4; /* terminator word */
+ break;
+ case WMFW_HALO:
+ header = &cs_dsp_mock_halo_xm_hdr,
+ header_size_bytes = sizeof(cs_dsp_mock_halo_xm_hdr);
+ total_bytes_required = header_size_bytes +
+ (num_algs * sizeof(struct wmfw_halo_alg_hdr))
+ + 4; /* terminator word */
+ break;
+ default:
+ KUNIT_FAIL(priv->test, "%s unexpected DSP type %d\n",
+ __func__, priv->dsp->type);
+ return NULL;
+ }
+
+ builder->blob_data = kunit_kzalloc(priv->test, total_bytes_required, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(priv->test, builder->blob_data);
+ builder->blob_size_bytes = total_bytes_required;
+
+ memcpy(builder->blob_data, header, header_size_bytes);
+
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ cs_dsp_mock_xm_header_add_adsp2_algs(builder, algs, num_algs);
+ break;
+ case WMFW_HALO:
+ cs_dsp_mock_xm_header_add_halo_algs(builder, algs, num_algs);
+ break;
+ default:
+ break;
+ }
+
+ return builder;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_create_mock_xm_header, "FW_CS_DSP_KUNIT_TEST_UTILS");
diff --git a/drivers/firmware/cirrus/test/cs_dsp_mock_regmap.c b/drivers/firmware/cirrus/test/cs_dsp_mock_regmap.c
new file mode 100644
index 000000000000..fb8e4a5d189a
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_mock_regmap.c
@@ -0,0 +1,367 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Mock regmap for cs_dsp KUnit tests.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+
+#include <kunit/test.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/regmap.h>
+
+static int cs_dsp_mock_regmap_read(void *context, const void *reg_buf,
+ const size_t reg_size, void *val_buf,
+ size_t val_size)
+{
+ struct cs_dsp_test *priv = context;
+
+ /* Should never get here because the regmap is cache-only */
+ KUNIT_FAIL(priv->test, "Unexpected bus read @%#x", *(u32 *)reg_buf);
+
+ return -EIO;
+}
+
+static int cs_dsp_mock_regmap_gather_write(void *context,
+ const void *reg_buf, size_t reg_size,
+ const void *val_buf, size_t val_size)
+{
+ struct cs_dsp_test *priv = context;
+
+ priv->saw_bus_write = true;
+
+ /* Should never get here because the regmap is cache-only */
+ KUNIT_FAIL(priv->test, "Unexpected bus gather_write @%#x", *(u32 *)reg_buf);
+
+ return -EIO;
+}
+
+static int cs_dsp_mock_regmap_write(void *context, const void *val_buf, size_t val_size)
+{
+ struct cs_dsp_test *priv = context;
+
+ priv->saw_bus_write = true;
+
+ /* Should never get here because the regmap is cache-only */
+ KUNIT_FAIL(priv->test, "Unexpected bus write @%#x", *(u32 *)val_buf);
+
+ return -EIO;
+}
+
+static const struct regmap_bus cs_dsp_mock_regmap_bus = {
+ .read = cs_dsp_mock_regmap_read,
+ .write = cs_dsp_mock_regmap_write,
+ .gather_write = cs_dsp_mock_regmap_gather_write,
+ .reg_format_endian_default = REGMAP_ENDIAN_LITTLE,
+ .val_format_endian_default = REGMAP_ENDIAN_LITTLE,
+};
+
+static const struct reg_default adsp2_32bit_register_defaults[] = {
+ { 0xffe00, 0x0000 }, /* CONTROL */
+ { 0xffe02, 0x0000 }, /* CLOCKING */
+ { 0xffe04, 0x0001 }, /* STATUS1: RAM_RDY=1 */
+ { 0xffe30, 0x0000 }, /* WDMW_CONFIG_1 */
+ { 0xffe32, 0x0000 }, /* WDMA_CONFIG_2 */
+ { 0xffe34, 0x0000 }, /* RDMA_CONFIG_1 */
+ { 0xffe40, 0x0000 }, /* SCRATCH_0_1 */
+ { 0xffe42, 0x0000 }, /* SCRATCH_2_3 */
+};
+
+static const struct regmap_range adsp2_32bit_registers[] = {
+ regmap_reg_range(0x80000, 0x88ffe), /* PM */
+ regmap_reg_range(0xa0000, 0xa9ffe), /* XM */
+ regmap_reg_range(0xc0000, 0xc1ffe), /* YM */
+ regmap_reg_range(0xe0000, 0xe1ffe), /* ZM */
+ regmap_reg_range(0xffe00, 0xffe7c), /* CORE CTRL */
+};
+
+const unsigned int cs_dsp_mock_adsp2_32bit_sysbase = 0xffe00;
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_adsp2_32bit_sysbase, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+static const struct regmap_access_table adsp2_32bit_rw = {
+ .yes_ranges = adsp2_32bit_registers,
+ .n_yes_ranges = ARRAY_SIZE(adsp2_32bit_registers),
+};
+
+static const struct regmap_config cs_dsp_mock_regmap_adsp2_32bit = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 2,
+ .reg_format_endian = REGMAP_ENDIAN_LITTLE,
+ .val_format_endian = REGMAP_ENDIAN_BIG,
+ .wr_table = &adsp2_32bit_rw,
+ .rd_table = &adsp2_32bit_rw,
+ .max_register = 0xffe7c,
+ .reg_defaults = adsp2_32bit_register_defaults,
+ .num_reg_defaults = ARRAY_SIZE(adsp2_32bit_register_defaults),
+ .cache_type = REGCACHE_MAPLE,
+};
+
+static const struct reg_default adsp2_16bit_register_defaults[] = {
+ { 0x1100, 0x0000 }, /* CONTROL */
+ { 0x1101, 0x0000 }, /* CLOCKING */
+ { 0x1104, 0x0001 }, /* STATUS1: RAM_RDY=1 */
+ { 0x1130, 0x0000 }, /* WDMW_CONFIG_1 */
+ { 0x1131, 0x0000 }, /* WDMA_CONFIG_2 */
+ { 0x1134, 0x0000 }, /* RDMA_CONFIG_1 */
+ { 0x1140, 0x0000 }, /* SCRATCH_0 */
+ { 0x1141, 0x0000 }, /* SCRATCH_1 */
+ { 0x1142, 0x0000 }, /* SCRATCH_2 */
+ { 0x1143, 0x0000 }, /* SCRATCH_3 */
+};
+
+static const struct regmap_range adsp2_16bit_registers[] = {
+ regmap_reg_range(0x001100, 0x001143), /* CORE CTRL */
+ regmap_reg_range(0x100000, 0x105fff), /* PM */
+ regmap_reg_range(0x180000, 0x1807ff), /* ZM */
+ regmap_reg_range(0x190000, 0x1947ff), /* XM */
+ regmap_reg_range(0x1a8000, 0x1a97ff), /* YM */
+};
+
+const unsigned int cs_dsp_mock_adsp2_16bit_sysbase = 0x001100;
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_adsp2_16bit_sysbase, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+static const struct regmap_access_table adsp2_16bit_rw = {
+ .yes_ranges = adsp2_16bit_registers,
+ .n_yes_ranges = ARRAY_SIZE(adsp2_16bit_registers),
+};
+
+static const struct regmap_config cs_dsp_mock_regmap_adsp2_16bit = {
+ .reg_bits = 32,
+ .val_bits = 16,
+ .reg_stride = 1,
+ .reg_format_endian = REGMAP_ENDIAN_LITTLE,
+ .val_format_endian = REGMAP_ENDIAN_BIG,
+ .wr_table = &adsp2_16bit_rw,
+ .rd_table = &adsp2_16bit_rw,
+ .max_register = 0x1a97ff,
+ .reg_defaults = adsp2_16bit_register_defaults,
+ .num_reg_defaults = ARRAY_SIZE(adsp2_16bit_register_defaults),
+ .cache_type = REGCACHE_MAPLE,
+};
+
+static const struct reg_default halo_register_defaults[] = {
+ /* CORE */
+ { 0x2b80010, 0 }, /* HALO_CORE_SOFT_RESET */
+ { 0x2b805c0, 0 }, /* HALO_SCRATCH1 */
+ { 0x2b805c8, 0 }, /* HALO_SCRATCH2 */
+ { 0x2b805d0, 0 }, /* HALO_SCRATCH3 */
+ { 0x2b805c8, 0 }, /* HALO_SCRATCH4 */
+ { 0x2bc1000, 0 }, /* HALO_CCM_CORE_CONTROL */
+ { 0x2bc7000, 0 }, /* HALO_WDT_CONTROL */
+
+ /* SYSINFO */
+ { 0x25e2040, 0 }, /* HALO_AHBM_WINDOW_DEBUG_0 */
+ { 0x25e2044, 0 }, /* HALO_AHBM_WINDOW_DEBUG_1 */
+};
+
+static const struct regmap_range halo_readable_registers[] = {
+ regmap_reg_range(0x2000000, 0x2005fff), /* XM_PACKED */
+ regmap_reg_range(0x25e0000, 0x25e004f), /* SYSINFO */
+ regmap_reg_range(0x25e2000, 0x25e2047), /* SYSINFO */
+ regmap_reg_range(0x2800000, 0x2807fff), /* XM */
+ regmap_reg_range(0x2b80000, 0x2bc700b), /* CORE CTRL */
+ regmap_reg_range(0x2c00000, 0x2c047f3), /* YM_PACKED */
+ regmap_reg_range(0x3400000, 0x3405ff7), /* YM */
+ regmap_reg_range(0x3800000, 0x3804fff), /* PM_PACKED */
+};
+
+static const struct regmap_range halo_writeable_registers[] = {
+ regmap_reg_range(0x2000000, 0x2005fff), /* XM_PACKED */
+ regmap_reg_range(0x2800000, 0x2807fff), /* XM */
+ regmap_reg_range(0x2b80000, 0x2bc700b), /* CORE CTRL */
+ regmap_reg_range(0x2c00000, 0x2c047f3), /* YM_PACKED */
+ regmap_reg_range(0x3400000, 0x3405ff7), /* YM */
+ regmap_reg_range(0x3800000, 0x3804fff), /* PM_PACKED */
+};
+
+const unsigned int cs_dsp_mock_halo_core_base = 0x2b80000;
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_halo_core_base, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+const unsigned int cs_dsp_mock_halo_sysinfo_base = 0x25e0000;
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_halo_sysinfo_base, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+static const struct regmap_access_table halo_readable = {
+ .yes_ranges = halo_readable_registers,
+ .n_yes_ranges = ARRAY_SIZE(halo_readable_registers),
+};
+
+static const struct regmap_access_table halo_writeable = {
+ .yes_ranges = halo_writeable_registers,
+ .n_yes_ranges = ARRAY_SIZE(halo_writeable_registers),
+};
+
+static const struct regmap_config cs_dsp_mock_regmap_halo = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .reg_format_endian = REGMAP_ENDIAN_LITTLE,
+ .val_format_endian = REGMAP_ENDIAN_BIG,
+ .wr_table = &halo_writeable,
+ .rd_table = &halo_readable,
+ .max_register = 0x3804ffc,
+ .reg_defaults = halo_register_defaults,
+ .num_reg_defaults = ARRAY_SIZE(halo_register_defaults),
+ .cache_type = REGCACHE_MAPLE,
+};
+
+/**
+ * cs_dsp_mock_regmap_drop_range() - drop a range of registers from the cache.
+ *
+ * @priv: Pointer to struct cs_dsp_test object.
+ * @first_reg: Address of first register to drop.
+ * @last_reg: Address of last register to drop.
+ */
+void cs_dsp_mock_regmap_drop_range(struct cs_dsp_test *priv,
+ unsigned int first_reg, unsigned int last_reg)
+{
+ regcache_drop_region(priv->dsp->regmap, first_reg, last_reg);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_regmap_drop_range, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_regmap_drop_regs() - drop a number of registers from the cache.
+ *
+ * @priv: Pointer to struct cs_dsp_test object.
+ * @first_reg: Address of first register to drop.
+ * @num_regs: Number of registers to drop.
+ */
+void cs_dsp_mock_regmap_drop_regs(struct cs_dsp_test *priv,
+ unsigned int first_reg, size_t num_regs)
+{
+ int stride = regmap_get_reg_stride(priv->dsp->regmap);
+ unsigned int last = first_reg + (stride * (num_regs - 1));
+
+ cs_dsp_mock_regmap_drop_range(priv, first_reg, last);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_regmap_drop_regs, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_regmap_drop_bytes() - drop a number of bytes from the cache.
+ *
+ * @priv: Pointer to struct cs_dsp_test object.
+ * @first_reg: Address of first register to drop.
+ * @num_bytes: Number of bytes to drop from the cache. Will be rounded
+ * down to a whole number of registers. Trailing bytes that
+ * are not a multiple of the register size will not be dropped.
+ * (This is intended to help detect math errors in test code.)
+ */
+void cs_dsp_mock_regmap_drop_bytes(struct cs_dsp_test *priv,
+ unsigned int first_reg, size_t num_bytes)
+{
+ size_t num_regs = num_bytes / regmap_get_val_bytes(priv->dsp->regmap);
+
+ cs_dsp_mock_regmap_drop_regs(priv, first_reg, num_regs);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_regmap_drop_bytes, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_regmap_drop_system_regs() - Drop DSP system registers from the cache.
+ *
+ * @priv: Pointer to struct cs_dsp_test object.
+ *
+ * Drops all DSP system registers from the regmap cache.
+ */
+void cs_dsp_mock_regmap_drop_system_regs(struct cs_dsp_test *priv)
+{
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ if (priv->dsp->base) {
+ regcache_drop_region(priv->dsp->regmap,
+ priv->dsp->base,
+ priv->dsp->base + 0x7c);
+ }
+ return;
+ case WMFW_HALO:
+ if (priv->dsp->base) {
+ regcache_drop_region(priv->dsp->regmap,
+ priv->dsp->base,
+ priv->dsp->base + 0x47000);
+ }
+
+ /* sysinfo registers are read-only so don't drop them */
+ return;
+ default:
+ return;
+ }
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_regmap_drop_system_regs, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_regmap_is_dirty() - Test for dirty registers in the cache.
+ *
+ * @priv: Pointer to struct cs_dsp_test object.
+ * @drop_system_regs: If true the DSP system regs will be dropped from
+ * the cache before checking for dirty.
+ *
+ * All registers that are expected to be written must have been dropped
+ * from the cache (DSP system registers can be dropped by passing
+ * drop_system_regs == true). If any unexpected registers were written
+ * there will still be dirty entries in the cache and a cache sync will
+ * cause a write.
+ *
+ * Returns: true if there were dirty entries, false if not.
+ */
+bool cs_dsp_mock_regmap_is_dirty(struct cs_dsp_test *priv, bool drop_system_regs)
+{
+ if (drop_system_regs)
+ cs_dsp_mock_regmap_drop_system_regs(priv);
+
+ priv->saw_bus_write = false;
+ regcache_cache_only(priv->dsp->regmap, false);
+ regcache_sync(priv->dsp->regmap);
+ regcache_cache_only(priv->dsp->regmap, true);
+
+ return priv->saw_bus_write;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_regmap_is_dirty, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_regmap_init() - Initialize a mock regmap.
+ *
+ * @priv: Pointer to struct cs_dsp_test object. This must have a
+ * valid pointer to a struct cs_dsp in which the type and
+ * rev fields are set to the type of DSP to be simulated.
+ *
+ * On success the priv->dsp->regmap will point to the created
+ * regmap instance.
+ *
+ * Return: zero on success, else negative error code.
+ */
+int cs_dsp_mock_regmap_init(struct cs_dsp_test *priv)
+{
+ const struct regmap_config *config;
+ int ret;
+
+ switch (priv->dsp->type) {
+ case WMFW_HALO:
+ config = &cs_dsp_mock_regmap_halo;
+ break;
+ case WMFW_ADSP2:
+ if (priv->dsp->rev == 0)
+ config = &cs_dsp_mock_regmap_adsp2_16bit;
+ else
+ config = &cs_dsp_mock_regmap_adsp2_32bit;
+ break;
+ default:
+ config = NULL;
+ break;
+ }
+
+ priv->dsp->regmap = devm_regmap_init(priv->dsp->dev,
+ &cs_dsp_mock_regmap_bus,
+ priv,
+ config);
+ if (IS_ERR(priv->dsp->regmap)) {
+ ret = PTR_ERR(priv->dsp->regmap);
+ kunit_err(priv->test, "Failed to allocate register map: %d\n", ret);
+ return ret;
+ }
+
+ /* Put regmap in cache-only so it accumulates the writes done by cs_dsp */
+ regcache_cache_only(priv->dsp->regmap, true);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_regmap_init, "FW_CS_DSP_KUNIT_TEST_UTILS");
diff --git a/drivers/firmware/cirrus/test/cs_dsp_mock_utils.c b/drivers/firmware/cirrus/test/cs_dsp_mock_utils.c
new file mode 100644
index 000000000000..cbd0bf72b7de
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_mock_utils.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Utility module for cs_dsp KUnit testing.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+
+#include <linux/module.h>
+
+MODULE_DESCRIPTION("Utilities for Cirrus Logic DSP driver testing");
+MODULE_AUTHOR("Richard Fitzgerald <rf@opensource.cirrus.com>");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("FW_CS_DSP");
diff --git a/drivers/firmware/cirrus/test/cs_dsp_mock_wmfw.c b/drivers/firmware/cirrus/test/cs_dsp_mock_wmfw.c
new file mode 100644
index 000000000000..5e1d5a810afe
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_mock_wmfw.c
@@ -0,0 +1,478 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// wmfw file builder for cs_dsp KUnit tests.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/firmware.h>
+#include <linux/math.h>
+#include <linux/overflow.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+
+/* Buffer large enough for bin file content */
+#define CS_DSP_MOCK_WMFW_BUF_SIZE 131072
+
+struct cs_dsp_mock_wmfw_builder {
+ struct cs_dsp_test *test_priv;
+ int format_version;
+ void *buf;
+ size_t buf_size_bytes;
+ void *write_p;
+ size_t bytes_used;
+
+ void *alg_data_header;
+ unsigned int num_coeffs;
+};
+
+struct wmfw_adsp2_halo_header {
+ struct wmfw_header header;
+ struct wmfw_adsp2_sizes sizes;
+ struct wmfw_footer footer;
+} __packed;
+
+struct wmfw_long_string {
+ __le16 len;
+ u8 data[] __nonstring __counted_by(len);
+} __packed;
+
+struct wmfw_short_string {
+ u8 len;
+ u8 data[] __nonstring __counted_by(len);
+} __packed;
+
+KUNIT_DEFINE_ACTION_WRAPPER(vfree_action_wrapper, vfree, void *)
+
+/**
+ * cs_dsp_mock_wmfw_format_version() - Return format version.
+ *
+ * @builder: Pointer to struct cs_dsp_mock_wmfw_builder.
+ *
+ * Return: Format version.
+ */
+int cs_dsp_mock_wmfw_format_version(struct cs_dsp_mock_wmfw_builder *builder)
+{
+ return builder->format_version;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_wmfw_format_version, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_wmfw_get_firmware() - Get struct firmware wrapper for data.
+ *
+ * @builder: Pointer to struct cs_dsp_mock_wmfw_builder.
+ *
+ * Return: Pointer to a struct firmware wrapper for the data.
+ */
+struct firmware *cs_dsp_mock_wmfw_get_firmware(struct cs_dsp_mock_wmfw_builder *builder)
+{
+ struct firmware *fw;
+
+ if (!builder)
+ return NULL;
+
+ fw = kunit_kzalloc(builder->test_priv->test, sizeof(*fw), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(builder->test_priv->test, fw);
+
+ fw->data = builder->buf;
+ fw->size = builder->bytes_used;
+
+ return fw;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_wmfw_get_firmware, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_wmfw_add_raw_block() - Add a block to the wmfw file.
+ *
+ * @builder: Pointer to struct cs_dsp_mock_bin_builder.
+ * @block_type: Block type.
+ * @offset: Offset.
+ * @payload_data: Pointer to buffer containing the payload data,
+ * or NULL if no data.
+ * @payload_len_bytes: Length of payload data in bytes, or zero.
+ */
+void cs_dsp_mock_wmfw_add_raw_block(struct cs_dsp_mock_wmfw_builder *builder,
+ int block_type, unsigned int offset,
+ const void *payload_data, size_t payload_len_bytes)
+{
+ struct wmfw_region *header = builder->write_p;
+ unsigned int bytes_needed = struct_size_t(struct wmfw_region, data, payload_len_bytes);
+
+ KUNIT_ASSERT_TRUE(builder->test_priv->test,
+ (builder->write_p + bytes_needed) <
+ (builder->buf + CS_DSP_MOCK_WMFW_BUF_SIZE));
+
+ header->offset = cpu_to_le32(offset | (block_type << 24));
+ header->len = cpu_to_le32(payload_len_bytes);
+ if (payload_len_bytes > 0)
+ memcpy(header->data, payload_data, payload_len_bytes);
+
+ builder->write_p += bytes_needed;
+ builder->bytes_used += bytes_needed;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_wmfw_add_raw_block, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_wmfw_add_info() - Add an info block to the wmfw file.
+ *
+ * @builder: Pointer to struct cs_dsp_mock_bin_builder.
+ * @info: Pointer to info string to be copied into the file.
+ *
+ * The string will be padded to a length that is a multiple of 4 bytes.
+ */
+void cs_dsp_mock_wmfw_add_info(struct cs_dsp_mock_wmfw_builder *builder,
+ const char *info)
+{
+ size_t info_len = strlen(info);
+ char *tmp = NULL;
+
+ if (info_len % 4) {
+ /* Create a padded string with length a multiple of 4 */
+ size_t copy_len = info_len;
+ info_len = round_up(info_len, 4);
+ tmp = kunit_kzalloc(builder->test_priv->test, info_len, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(builder->test_priv->test, tmp);
+ memcpy(tmp, info, copy_len);
+ info = tmp;
+ }
+
+ cs_dsp_mock_wmfw_add_raw_block(builder, WMFW_INFO_TEXT, 0, info, info_len);
+ kunit_kfree(builder->test_priv->test, tmp);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_wmfw_add_info, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_wmfw_add_data_block() - Add a data block to the wmfw file.
+ *
+ * @builder: Pointer to struct cs_dsp_mock_bin_builder.
+ * @mem_region: Memory region for the block.
+ * @mem_offset_dsp_words: Offset to start of destination in DSP words.
+ * @payload_data: Pointer to buffer containing the payload data.
+ * @payload_len_bytes: Length of payload data in bytes.
+ */
+void cs_dsp_mock_wmfw_add_data_block(struct cs_dsp_mock_wmfw_builder *builder,
+ int mem_region, unsigned int mem_offset_dsp_words,
+ const void *payload_data, size_t payload_len_bytes)
+{
+ /* Blob payload length must be a multiple of 4 */
+ KUNIT_ASSERT_EQ(builder->test_priv->test, payload_len_bytes % 4, 0);
+
+ cs_dsp_mock_wmfw_add_raw_block(builder, mem_region, mem_offset_dsp_words,
+ payload_data, payload_len_bytes);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_wmfw_add_data_block, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+void cs_dsp_mock_wmfw_start_alg_info_block(struct cs_dsp_mock_wmfw_builder *builder,
+ unsigned int alg_id,
+ const char *name,
+ const char *description)
+{
+ struct wmfw_region *rgn = builder->write_p;
+ struct wmfw_adsp_alg_data *v1;
+ struct wmfw_short_string *shortstring;
+ struct wmfw_long_string *longstring;
+ size_t bytes_needed, name_len, description_len;
+ int offset;
+
+ KUNIT_ASSERT_LE(builder->test_priv->test, alg_id, 0xffffff);
+
+ /* Bytes needed for region header */
+ bytes_needed = offsetof(struct wmfw_region, data);
+
+ builder->alg_data_header = builder->write_p;
+ builder->num_coeffs = 0;
+
+ switch (builder->format_version) {
+ case 0:
+ KUNIT_FAIL(builder->test_priv->test, "wmfwV0 does not have alg blocks\n");
+ return;
+ case 1:
+ bytes_needed += offsetof(struct wmfw_adsp_alg_data, data);
+ KUNIT_ASSERT_TRUE(builder->test_priv->test,
+ (builder->write_p + bytes_needed) <
+ (builder->buf + CS_DSP_MOCK_WMFW_BUF_SIZE));
+
+ memset(builder->write_p, 0, bytes_needed);
+
+ /* Create region header */
+ rgn->offset = cpu_to_le32(WMFW_ALGORITHM_DATA << 24);
+
+ /* Create algorithm entry */
+ v1 = (struct wmfw_adsp_alg_data *)&rgn->data[0];
+ v1->id = cpu_to_le32(alg_id);
+ if (name)
+ strscpy(v1->name, name, sizeof(v1->name));
+
+ if (description)
+ strscpy(v1->descr, description, sizeof(v1->descr));
+ break;
+ default:
+ name_len = 0;
+ description_len = 0;
+
+ if (name)
+ name_len = strlen(name);
+
+ if (description)
+ description_len = strlen(description);
+
+ bytes_needed += sizeof(__le32); /* alg id */
+ bytes_needed += round_up(name_len + sizeof(u8), sizeof(__le32));
+ bytes_needed += round_up(description_len + sizeof(__le16), sizeof(__le32));
+ bytes_needed += sizeof(__le32); /* coeff count */
+
+ KUNIT_ASSERT_TRUE(builder->test_priv->test,
+ (builder->write_p + bytes_needed) <
+ (builder->buf + CS_DSP_MOCK_WMFW_BUF_SIZE));
+
+ memset(builder->write_p, 0, bytes_needed);
+
+ /* Create region header */
+ rgn->offset = cpu_to_le32(WMFW_ALGORITHM_DATA << 24);
+
+ /* Create algorithm entry */
+ *(__force __le32 *)&rgn->data[0] = cpu_to_le32(alg_id);
+
+ shortstring = (struct wmfw_short_string *)&rgn->data[4];
+ shortstring->len = name_len;
+
+ if (name_len)
+ memcpy(shortstring->data, name, name_len);
+
+ /* Round up to next __le32 */
+ offset = round_up(4 + struct_size_t(struct wmfw_short_string, data, name_len),
+ sizeof(__le32));
+
+ longstring = (struct wmfw_long_string *)&rgn->data[offset];
+ longstring->len = cpu_to_le16(description_len);
+
+ if (description_len)
+ memcpy(longstring->data, description, description_len);
+ break;
+ }
+
+ builder->write_p += bytes_needed;
+ builder->bytes_used += bytes_needed;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_wmfw_start_alg_info_block, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+void cs_dsp_mock_wmfw_add_coeff_desc(struct cs_dsp_mock_wmfw_builder *builder,
+ const struct cs_dsp_mock_coeff_def *def)
+{
+ struct wmfw_adsp_coeff_data *v1;
+ struct wmfw_short_string *shortstring;
+ struct wmfw_long_string *longstring;
+ size_t bytes_needed, shortname_len, fullname_len, description_len;
+ __le32 *ple32;
+
+ KUNIT_ASSERT_NOT_NULL(builder->test_priv->test, builder->alg_data_header);
+
+ switch (builder->format_version) {
+ case 0:
+ return;
+ case 1:
+ bytes_needed = offsetof(struct wmfw_adsp_coeff_data, data);
+ KUNIT_ASSERT_TRUE(builder->test_priv->test,
+ (builder->write_p + bytes_needed) <
+ (builder->buf + CS_DSP_MOCK_WMFW_BUF_SIZE));
+
+ v1 = (struct wmfw_adsp_coeff_data *)builder->write_p;
+ memset(v1, 0, sizeof(*v1));
+ v1->hdr.offset = cpu_to_le16(def->offset_dsp_words);
+ v1->hdr.type = cpu_to_le16(def->mem_type);
+ v1->hdr.size = cpu_to_le32(bytes_needed - sizeof(v1->hdr));
+ v1->ctl_type = cpu_to_le16(def->type);
+ v1->flags = cpu_to_le16(def->flags);
+ v1->len = cpu_to_le32(def->length_bytes);
+
+ if (def->fullname)
+ strscpy(v1->name, def->fullname, sizeof(v1->name));
+
+ if (def->description)
+ strscpy(v1->descr, def->description, sizeof(v1->descr));
+ break;
+ default:
+ fullname_len = 0;
+ description_len = 0;
+ shortname_len = strlen(def->shortname);
+
+ if (def->fullname)
+ fullname_len = strlen(def->fullname);
+
+ if (def->description)
+ description_len = strlen(def->description);
+
+ bytes_needed = sizeof(__le32) * 2; /* type, offset and size */
+ bytes_needed += round_up(shortname_len + sizeof(u8), sizeof(__le32));
+ bytes_needed += round_up(fullname_len + sizeof(u8), sizeof(__le32));
+ bytes_needed += round_up(description_len + sizeof(__le16), sizeof(__le32));
+ bytes_needed += sizeof(__le32) * 2; /* flags, type and length */
+ KUNIT_ASSERT_TRUE(builder->test_priv->test,
+ (builder->write_p + bytes_needed) <
+ (builder->buf + CS_DSP_MOCK_WMFW_BUF_SIZE));
+
+ ple32 = (__force __le32 *)builder->write_p;
+ *ple32++ = cpu_to_le32(def->offset_dsp_words | (def->mem_type << 16));
+ *ple32++ = cpu_to_le32(bytes_needed - sizeof(__le32) - sizeof(__le32));
+
+ shortstring = (__force struct wmfw_short_string *)ple32;
+ shortstring->len = shortname_len;
+ memcpy(shortstring->data, def->shortname, shortname_len);
+
+ /* Round up to next __le32 multiple */
+ ple32 += round_up(struct_size_t(struct wmfw_short_string, data, shortname_len),
+ sizeof(*ple32)) / sizeof(*ple32);
+
+ shortstring = (__force struct wmfw_short_string *)ple32;
+ shortstring->len = fullname_len;
+ memcpy(shortstring->data, def->fullname, fullname_len);
+
+ /* Round up to next __le32 multiple */
+ ple32 += round_up(struct_size_t(struct wmfw_short_string, data, fullname_len),
+ sizeof(*ple32)) / sizeof(*ple32);
+
+ longstring = (__force struct wmfw_long_string *)ple32;
+ longstring->len = cpu_to_le16(description_len);
+ memcpy(longstring->data, def->description, description_len);
+
+ /* Round up to next __le32 multiple */
+ ple32 += round_up(struct_size_t(struct wmfw_long_string, data, description_len),
+ sizeof(*ple32)) / sizeof(*ple32);
+
+ *ple32++ = cpu_to_le32(def->type | (def->flags << 16));
+ *ple32 = cpu_to_le32(def->length_bytes);
+ break;
+ }
+
+ builder->write_p += bytes_needed;
+ builder->bytes_used += bytes_needed;
+ builder->num_coeffs++;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_wmfw_add_coeff_desc, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+void cs_dsp_mock_wmfw_end_alg_info_block(struct cs_dsp_mock_wmfw_builder *builder)
+{
+ struct wmfw_region *rgn = builder->alg_data_header;
+ struct wmfw_adsp_alg_data *v1;
+ const struct wmfw_short_string *shortstring;
+ const struct wmfw_long_string *longstring;
+ size_t offset;
+
+ KUNIT_ASSERT_NOT_NULL(builder->test_priv->test, rgn);
+
+ /* Fill in data size */
+ rgn->len = cpu_to_le32((u8 *)builder->write_p - (u8 *)rgn->data);
+
+ /* Fill in coefficient count */
+ switch (builder->format_version) {
+ case 0:
+ return;
+ case 1:
+ v1 = (struct wmfw_adsp_alg_data *)&rgn->data[0];
+ v1->ncoeff = cpu_to_le32(builder->num_coeffs);
+ break;
+ default:
+ offset = 4; /* skip alg id */
+
+ /* Get name length and round up to __le32 multiple */
+ shortstring = (const struct wmfw_short_string *)&rgn->data[offset];
+ offset += round_up(struct_size_t(struct wmfw_short_string, data, shortstring->len),
+ sizeof(__le32));
+
+ /* Get description length and round up to __le32 multiple */
+ longstring = (const struct wmfw_long_string *)&rgn->data[offset];
+ offset += round_up(struct_size_t(struct wmfw_long_string, data,
+ le16_to_cpu(longstring->len)),
+ sizeof(__le32));
+
+ *(__force __le32 *)&rgn->data[offset] = cpu_to_le32(builder->num_coeffs);
+ break;
+ }
+
+ builder->alg_data_header = NULL;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_wmfw_end_alg_info_block, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+static void cs_dsp_init_adsp2_halo_wmfw(struct cs_dsp_mock_wmfw_builder *builder)
+{
+ struct wmfw_adsp2_halo_header *hdr = builder->buf;
+ const struct cs_dsp *dsp = builder->test_priv->dsp;
+
+ memcpy(hdr->header.magic, "WMFW", sizeof(hdr->header.magic));
+ hdr->header.len = cpu_to_le32(sizeof(*hdr));
+ hdr->header.ver = builder->format_version;
+ hdr->header.core = dsp->type;
+ hdr->header.rev = cpu_to_le16(dsp->rev);
+
+ hdr->sizes.pm = cpu_to_le32(cs_dsp_mock_size_of_region(dsp, WMFW_ADSP2_PM));
+ hdr->sizes.xm = cpu_to_le32(cs_dsp_mock_size_of_region(dsp, WMFW_ADSP2_XM));
+ hdr->sizes.ym = cpu_to_le32(cs_dsp_mock_size_of_region(dsp, WMFW_ADSP2_YM));
+
+ switch (dsp->type) {
+ case WMFW_ADSP2:
+ hdr->sizes.zm = cpu_to_le32(cs_dsp_mock_size_of_region(dsp, WMFW_ADSP2_ZM));
+ break;
+ default:
+ break;
+ }
+
+ builder->write_p = &hdr[1];
+ builder->bytes_used += sizeof(*hdr);
+}
+
+/**
+ * cs_dsp_mock_wmfw_init() - Initialize a struct cs_dsp_mock_wmfw_builder.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ * @format_version: Required wmfw format version.
+ *
+ * Return: Pointer to created struct cs_dsp_mock_wmfw_builder.
+ */
+struct cs_dsp_mock_wmfw_builder *cs_dsp_mock_wmfw_init(struct cs_dsp_test *priv,
+ int format_version)
+{
+ struct cs_dsp_mock_wmfw_builder *builder;
+
+ KUNIT_ASSERT_LE(priv->test, format_version, 0xff);
+
+ /* If format version isn't given use the default for the target core */
+ if (format_version < 0) {
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ format_version = 2;
+ break;
+ default:
+ format_version = 3;
+ break;
+ }
+ }
+
+ builder = kunit_kzalloc(priv->test, sizeof(*builder), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(priv->test, builder);
+
+ builder->test_priv = priv;
+ builder->format_version = format_version;
+
+ builder->buf = vmalloc(CS_DSP_MOCK_WMFW_BUF_SIZE);
+ KUNIT_ASSERT_NOT_NULL(priv->test, builder->buf);
+ kunit_add_action_or_reset(priv->test, vfree_action_wrapper, builder->buf);
+
+ builder->buf_size_bytes = CS_DSP_MOCK_WMFW_BUF_SIZE;
+
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ case WMFW_HALO:
+ cs_dsp_init_adsp2_halo_wmfw(builder);
+ break;
+ default:
+ break;
+ }
+
+ return builder;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_wmfw_init, "FW_CS_DSP_KUNIT_TEST_UTILS");
diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_bin.c b/drivers/firmware/cirrus/test/cs_dsp_test_bin.c
new file mode 100644
index 000000000000..163b7faecff4
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_test_bin.c
@@ -0,0 +1,2556 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// KUnit tests for cs_dsp.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+
+#include <kunit/device.h>
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <linux/build_bug.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/firmware.h>
+#include <linux/math.h>
+#include <linux/random.h>
+#include <linux/regmap.h>
+
+/*
+ * Test method is:
+ *
+ * 1) Create a mock regmap in cache-only mode so that all writes will be cached.
+ * 2) Create a XM header with an algorithm list in the cached regmap.
+ * 3) Create dummy wmfw file to satisfy cs_dsp.
+ * 4) Create bin file content.
+ * 5) Call cs_dsp_power_up() with the bin file.
+ * 6) Readback the cached value of registers that should have been written and
+ * check they have the correct value.
+ * 7) All the registers that are expected to have been written are dropped from
+ * the cache (including the XM header). This should leave the cache clean.
+ * 8) If the cache is still dirty there have been unexpected writes.
+ *
+ * There are multiple different schemes used for addressing across
+ * ADSP2 and Halo Core DSPs:
+ *
+ * dsp words: The addressing scheme used by the DSP, pointers and lengths
+ * in DSP memory use this. A memory region (XM, YM, ZM) is
+ * also required to create a unique DSP memory address.
+ * registers: Addresses in the register map. Older ADSP2 devices have
+ * 16-bit registers with an address stride of 1. Newer ADSP2
+ * devices have 32-bit registers with an address stride of 2.
+ * Halo Core devices have 32-bit registers with a stride of 4.
+ * unpacked: Registers that have a 1:1 mapping to DSP words
+ * packed: Registers that pack multiple DSP words more efficiently into
+ * multiple 32-bit registers. Because of this the relationship
+ * between a packed _register_ address and the corresponding
+ * _dsp word_ address is different from unpacked registers.
+ * Packed registers can only be accessed as a group of
+ * multiple registers, therefore can only read/write a group
+ * of multiple DSP words.
+ * Packed registers only exist on Halo Core DSPs.
+ *
+ * Addresses can also be relative to the start of an algorithm, and this
+ * can be expressed in dsp words, register addresses, or bytes.
+ */
+
+KUNIT_DEFINE_ACTION_WRAPPER(_put_device_wrapper, put_device, struct device *)
+KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_remove_wrapper, cs_dsp_remove, struct cs_dsp *)
+
+struct cs_dsp_test_local {
+ struct cs_dsp_mock_bin_builder *bin_builder;
+ struct cs_dsp_mock_wmfw_builder *wmfw_builder;
+ struct firmware *wmfw;
+};
+
+struct bin_test_param {
+ const char *name;
+ int mem_type;
+ unsigned int offset_words;
+ int alg_idx;
+};
+
+static const struct cs_dsp_mock_alg_def bin_test_mock_algs[] = {
+ {
+ .id = 0xfafa,
+ .ver = 0x100000,
+ .xm_size_words = 164,
+ .ym_size_words = 164,
+ .zm_size_words = 164,
+ },
+ {
+ .id = 0xfbfb,
+ .ver = 0x100000,
+ .xm_size_words = 99,
+ .ym_size_words = 99,
+ .zm_size_words = 99,
+ },
+ {
+ .id = 0xc321,
+ .ver = 0x100000,
+ .xm_size_words = 120,
+ .ym_size_words = 120,
+ .zm_size_words = 120,
+ },
+ {
+ .id = 0xb123,
+ .ver = 0x100000,
+ .xm_size_words = 96,
+ .ym_size_words = 96,
+ .zm_size_words = 96,
+ },
+};
+
+/*
+ * Convert number of DSP words to number of packed registers rounded
+ * down to the nearest register.
+ * There are 3 registers for every 4 packed words.
+ */
+static unsigned int _num_words_to_num_packed_regs(unsigned int num_dsp_words)
+{
+ return (num_dsp_words * 3) / 4;
+}
+
+/* bin file that patches a single DSP word */
+static void bin_patch_one_word(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ u32 reg_val, payload_data;
+ unsigned int alg_base_words, reg_addr;
+ struct firmware *fw;
+
+ get_random_bytes(&payload_data, sizeof(payload_data));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ param->offset_words * reg_inc_per_word,
+ &payload_data, sizeof(payload_data));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of registers should match payload_data */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ ((alg_base_words + param->offset_words) * reg_inc_per_word);
+ reg_val = 0;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr,
+ &reg_val, sizeof(reg_val)),
+ 0);
+ KUNIT_EXPECT_EQ(test, reg_val, payload_data);
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_regmap_drop_range(priv, reg_addr, reg_addr + reg_inc_per_word - 1);
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/* bin file with a single payload that patches consecutive words */
+static void bin_patch_one_multiword(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ u32 payload_data[16], readback[16];
+ unsigned int alg_base_words, reg_addr;
+ struct firmware *fw;
+
+ static_assert(ARRAY_SIZE(readback) == ARRAY_SIZE(payload_data));
+
+ get_random_bytes(&payload_data, sizeof(payload_data));
+ memset(readback, 0, sizeof(readback));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ param->offset_words * reg_inc_per_word,
+ payload_data, sizeof(payload_data));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of registers should match payload_data */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ ((alg_base_words + param->offset_words) * reg_inc_per_word);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, sizeof(payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_regmap_drop_range(priv, reg_addr,
+ reg_addr + (reg_inc_per_word * ARRAY_SIZE(payload_data)));
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/* bin file with a multiple one-word payloads that patch consecutive words */
+static void bin_patch_multi_oneword(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ u32 payload_data[16], readback[16];
+ unsigned int alg_base_words, reg_addr;
+ struct firmware *fw;
+ int i;
+
+ static_assert(ARRAY_SIZE(readback) == ARRAY_SIZE(payload_data));
+
+ get_random_bytes(&payload_data, sizeof(payload_data));
+ memset(readback, 0, sizeof(readback));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+
+ /* Add one payload per word */
+ for (i = 0; i < ARRAY_SIZE(payload_data); ++i) {
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (param->offset_words + i) * reg_inc_per_word,
+ &payload_data[i], sizeof(payload_data[i]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of registers should match payload_data */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ ((alg_base_words + param->offset_words) * reg_inc_per_word);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, sizeof(payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_range(priv, reg_addr,
+ reg_addr + (reg_inc_per_word * ARRAY_SIZE(payload_data)));
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file with a multiple one-word payloads that patch a block of consecutive
+ * words but the payloads are not in address order.
+ */
+static void bin_patch_multi_oneword_unordered(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ u32 payload_data[16], readback[16];
+ static const u8 word_order[] = { 10, 2, 12, 4, 0, 11, 6, 1, 3, 15, 5, 13, 8, 7, 9, 14 };
+ unsigned int alg_base_words, reg_addr;
+ struct firmware *fw;
+ int i;
+
+ static_assert(ARRAY_SIZE(readback) == ARRAY_SIZE(payload_data));
+ static_assert(ARRAY_SIZE(word_order) == ARRAY_SIZE(payload_data));
+
+ get_random_bytes(&payload_data, sizeof(payload_data));
+ memset(readback, 0, sizeof(readback));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+
+ /* Add one payload per word */
+ for (i = 0; i < ARRAY_SIZE(word_order); ++i) {
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (param->offset_words + word_order[i]) *
+ reg_inc_per_word,
+ &payload_data[word_order[i]], sizeof(payload_data[0]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of registers should match payload_data */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ ((alg_base_words + param->offset_words) * reg_inc_per_word);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, sizeof(payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_range(priv, reg_addr,
+ reg_addr + (reg_inc_per_word * ARRAY_SIZE(payload_data)));
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file with a multiple one-word payloads. The payloads are not in address
+ * order and collectively do not patch a contiguous block of memory.
+ */
+static void bin_patch_multi_oneword_sparse_unordered(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ static const u8 word_offsets[] = {
+ 11, 69, 59, 61, 32, 75, 4, 38, 70, 13, 79, 47, 46, 53, 18, 44,
+ 54, 35, 51, 21, 26, 45, 27, 41, 66, 2, 17, 56, 40, 9, 8, 20,
+ 29, 19, 63, 42, 12, 16, 43, 3, 5, 55, 52, 22
+ };
+ u32 payload_data[44];
+ unsigned int alg_base_words, reg_addr;
+ struct firmware *fw;
+ u32 reg_val;
+ int i;
+
+ static_assert(ARRAY_SIZE(word_offsets) == ARRAY_SIZE(payload_data));
+
+ get_random_bytes(&payload_data, sizeof(payload_data));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+
+ /* Add one payload per word */
+ for (i = 0; i < ARRAY_SIZE(word_offsets); ++i) {
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ word_offsets[i] * reg_inc_per_word,
+ &payload_data[i], sizeof(payload_data[i]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of registers should match payload_data */
+ for (i = 0; i < ARRAY_SIZE(word_offsets); ++i) {
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ ((alg_base_words + word_offsets[i]) * reg_inc_per_word);
+ reg_val = 0;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &reg_val,
+ sizeof(reg_val)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &reg_val, &payload_data[i], sizeof(reg_val));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_range(priv, reg_addr, reg_addr + reg_inc_per_word - 1);
+ }
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file that patches a single DSP word in each of the memory regions
+ * of one algorithm.
+ */
+static void bin_patch_one_word_multiple_mems(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ unsigned int alg_xm_base_words, alg_ym_base_words, alg_zm_base_words;
+ unsigned int reg_addr;
+ u32 payload_data[3];
+ struct firmware *fw;
+ u32 reg_val;
+
+ get_random_bytes(&payload_data, sizeof(payload_data));
+
+ alg_xm_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ WMFW_ADSP2_XM);
+ alg_ym_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ WMFW_ADSP2_YM);
+
+ if (cs_dsp_mock_has_zm(priv)) {
+ alg_zm_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ WMFW_ADSP2_ZM);
+ } else {
+ alg_zm_base_words = 0;
+ }
+
+ /* Add words to XM, YM and ZM */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ WMFW_ADSP2_XM,
+ param->offset_words * reg_inc_per_word,
+ &payload_data[0], sizeof(payload_data[0]));
+
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ WMFW_ADSP2_YM,
+ param->offset_words * reg_inc_per_word,
+ &payload_data[1], sizeof(payload_data[1]));
+
+ if (cs_dsp_mock_has_zm(priv)) {
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ WMFW_ADSP2_ZM,
+ param->offset_words * reg_inc_per_word,
+ &payload_data[2], sizeof(payload_data[2]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of registers should match payload_data */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_XM) +
+ ((alg_xm_base_words + param->offset_words) * reg_inc_per_word);
+ reg_val = 0;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &reg_val, sizeof(reg_val)),
+ 0);
+ KUNIT_EXPECT_EQ(test, reg_val, payload_data[0]);
+
+ cs_dsp_mock_regmap_drop_range(priv, reg_addr, reg_addr + reg_inc_per_word - 1);
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_YM) +
+ ((alg_ym_base_words + param->offset_words) * reg_inc_per_word);
+ reg_val = 0;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &reg_val, sizeof(reg_val)),
+ 0);
+ KUNIT_EXPECT_EQ(test, reg_val, payload_data[1]);
+
+ cs_dsp_mock_regmap_drop_range(priv, reg_addr, reg_addr + reg_inc_per_word - 1);
+
+ if (cs_dsp_mock_has_zm(priv)) {
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_ZM) +
+ ((alg_zm_base_words + param->offset_words) * reg_inc_per_word);
+ reg_val = 0;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &reg_val,
+ sizeof(reg_val)),
+ 0);
+ KUNIT_EXPECT_EQ(test, reg_val, payload_data[2]);
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_range(priv, reg_addr, reg_addr + reg_inc_per_word - 1);
+ }
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file that patches a single DSP word in multiple algorithms.
+ */
+static void bin_patch_one_word_multiple_algs(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ u32 payload_data[ARRAY_SIZE(bin_test_mock_algs)];
+ unsigned int alg_base_words;
+ unsigned int reg_inc_per_word, reg_addr;
+ struct firmware *fw;
+ u32 reg_val;
+ int i;
+
+ get_random_bytes(&payload_data, sizeof(payload_data));
+
+ /* Add one payload per algorithm */
+ for (i = 0; i < ARRAY_SIZE(bin_test_mock_algs); ++i) {
+ reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[i].id,
+ bin_test_mock_algs[i].ver,
+ param->mem_type,
+ param->offset_words * reg_inc_per_word,
+ &payload_data[i], sizeof(payload_data[i]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of registers should match payload_data */
+ for (i = 0; i < ARRAY_SIZE(bin_test_mock_algs); ++i) {
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[i].id,
+ param->mem_type);
+ reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ ((alg_base_words + param->offset_words) * reg_inc_per_word);
+ reg_val = 0;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &reg_val,
+ sizeof(reg_val)),
+ 0);
+ KUNIT_EXPECT_EQ(test, reg_val, payload_data[i]);
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_range(priv, reg_addr, reg_addr + reg_inc_per_word - 1);
+ }
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file that patches a single DSP word in multiple algorithms.
+ * The algorithms are not patched in the same order they appear in the XM header.
+ */
+static void bin_patch_one_word_multiple_algs_unordered(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ static const u8 alg_order[] = { 3, 0, 2, 1 };
+ u32 payload_data[ARRAY_SIZE(bin_test_mock_algs)];
+ unsigned int alg_base_words;
+ unsigned int reg_inc_per_word, reg_addr;
+ struct firmware *fw;
+ u32 reg_val;
+ int i, alg_idx;
+
+ static_assert(ARRAY_SIZE(alg_order) == ARRAY_SIZE(bin_test_mock_algs));
+
+ get_random_bytes(&payload_data, sizeof(payload_data));
+
+ /* Add one payload per algorithm */
+ for (i = 0; i < ARRAY_SIZE(bin_test_mock_algs); ++i) {
+ alg_idx = alg_order[i];
+ reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[alg_idx].id,
+ bin_test_mock_algs[alg_idx].ver,
+ param->mem_type,
+ param->offset_words * reg_inc_per_word,
+ &payload_data[i], sizeof(payload_data[i]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of registers should match payload_data */
+ for (i = 0; i < ARRAY_SIZE(bin_test_mock_algs); ++i) {
+ alg_idx = alg_order[i];
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[alg_idx].id,
+ param->mem_type);
+ reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ ((alg_base_words + param->offset_words) * reg_inc_per_word);
+ reg_val = 0;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &reg_val,
+ sizeof(reg_val)),
+ 0);
+ KUNIT_EXPECT_EQ(test, reg_val, payload_data[i]);
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_range(priv, reg_addr, reg_addr + reg_inc_per_word - 1);
+ }
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/* bin file that patches a single packed block of DSP words */
+static void bin_patch_1_packed(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ u32 packed_payload[3], readback[3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ packed_payload, sizeof(packed_payload));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of registers should match payload_data */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Patch data that is one word longer than a packed block using one
+ * packed block followed by one unpacked word.
+ */
+static void bin_patch_1_packed_1_single_trailing(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ u32 packed_payload[3], unpacked_payload[1], readback[3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+ static_assert(sizeof(readback) >= sizeof(unpacked_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+ get_random_bytes(unpacked_payload, sizeof(unpacked_payload));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ /* Patch packed block */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ &packed_payload, sizeof(packed_payload));
+
+ /* ... and the unpacked word following that */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((patch_pos_words + 4) - alg_base_words) * 4,
+ unpacked_payload, sizeof(unpacked_payload));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+
+ /* Content of unpacked registers should match unpacked_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ (patch_pos_words + 4) * 4;
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback,
+ sizeof(unpacked_payload)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payload, sizeof(unpacked_payload));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Patch data that is two words longer than a packed block using one
+ * packed block followed by two blocks of one unpacked word.
+ */
+static void bin_patch_1_packed_2_single_trailing(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ u32 packed_payload[3], unpacked_payloads[2], readback[3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+ static_assert(sizeof(readback) >= sizeof(unpacked_payloads));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+ get_random_bytes(unpacked_payloads, sizeof(unpacked_payloads));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ /* Patch packed block */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ &packed_payload, sizeof(packed_payload));
+
+ /* ... and the unpacked words following that */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((patch_pos_words + 4) - alg_base_words) * 4,
+ &unpacked_payloads[0], sizeof(unpacked_payloads[0]));
+
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((patch_pos_words + 5) - alg_base_words) * 4,
+ &unpacked_payloads[1], sizeof(unpacked_payloads[1]));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+
+ /* Content of unpacked registers should match unpacked_payloads */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ (patch_pos_words + 4) * 4;
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback,
+ sizeof(unpacked_payloads)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payloads, sizeof(unpacked_payloads));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payloads));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Patch data that is three words longer than a packed block using one
+ * packed block followed by three blocks of one unpacked word.
+ */
+static void bin_patch_1_packed_3_single_trailing(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ u32 packed_payload[3], unpacked_payloads[3], readback[3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+ static_assert(sizeof(readback) >= sizeof(unpacked_payloads));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+ get_random_bytes(unpacked_payloads, sizeof(unpacked_payloads));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ /* Patch packed block */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ &packed_payload, sizeof(packed_payload));
+
+ /* ... and the unpacked words following that */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((patch_pos_words + 4) - alg_base_words) * 4,
+ &unpacked_payloads[0], sizeof(unpacked_payloads[0]));
+
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((patch_pos_words + 5) - alg_base_words) * 4,
+ &unpacked_payloads[1], sizeof(unpacked_payloads[1]));
+
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((patch_pos_words + 6) - alg_base_words) * 4,
+ &unpacked_payloads[2], sizeof(unpacked_payloads[2]));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+
+ /* Content of unpacked registers should match unpacked_payloads */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ (patch_pos_words + 4) * 4;
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback,
+ sizeof(unpacked_payloads)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payloads, sizeof(unpacked_payloads));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payloads));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Patch data that is two words longer than a packed block using one
+ * packed block followed by a block of two unpacked words.
+ */
+static void bin_patch_1_packed_2_trailing(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ u32 packed_payload[3], unpacked_payload[2], readback[3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+ static_assert(sizeof(readback) >= sizeof(unpacked_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+ get_random_bytes(unpacked_payload, sizeof(unpacked_payload));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ /* Patch packed block */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ &packed_payload, sizeof(packed_payload));
+
+ /* ... and the unpacked words following that */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((patch_pos_words + 4) - alg_base_words) * 4,
+ unpacked_payload, sizeof(unpacked_payload));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+
+ /* Content of unpacked registers should match unpacked_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ (patch_pos_words + 4) * 4;
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback,
+ sizeof(unpacked_payload)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payload, sizeof(unpacked_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Patch data that is three words longer than a packed block using one
+ * packed block followed by a block of three unpacked words.
+ */
+static void bin_patch_1_packed_3_trailing(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ u32 packed_payload[3], unpacked_payload[3], readback[3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+ static_assert(sizeof(readback) >= sizeof(unpacked_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+ get_random_bytes(unpacked_payload, sizeof(unpacked_payload));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ /* Patch packed block */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ &packed_payload, sizeof(packed_payload));
+
+ /* ... and the unpacked words following that */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((patch_pos_words + 4) - alg_base_words) * 4,
+ unpacked_payload, sizeof(unpacked_payload));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+
+ /* Content of unpacked registers should match unpacked_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ (patch_pos_words + 4) * 4;
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback,
+ sizeof(unpacked_payload)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payload, sizeof(unpacked_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Patch data that starts one word before a packed boundary using one
+ * unpacked word followed by one packed block.
+ */
+static void bin_patch_1_single_leading_1_packed(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ u32 packed_payload[3], unpacked_payload[1], readback[3];
+ unsigned int alg_base_words, packed_patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+ static_assert(sizeof(readback) >= sizeof(unpacked_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+ get_random_bytes(unpacked_payload, sizeof(unpacked_payload));
+ memset(readback, 0, sizeof(readback));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round packed start word up to a packed boundary and move to the next boundary */
+ packed_patch_pos_words = round_up(alg_base_words + param->offset_words, 4) + 4;
+
+ /* Patch the leading unpacked word */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((packed_patch_pos_words - 1) - alg_base_words) * 4,
+ unpacked_payload, sizeof(unpacked_payload));
+ /* ... then the packed block */
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(packed_patch_pos_words);
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ &packed_payload, sizeof(packed_payload));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+
+ /* Content of unpacked registers should match unpacked_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ (packed_patch_pos_words - 1) * 4;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback,
+ sizeof(unpacked_payload)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payload, sizeof(unpacked_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Patch data that starts two words before a packed boundary using two
+ * unpacked words followed by one packed block.
+ */
+static void bin_patch_2_single_leading_1_packed(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ u32 packed_payload[3], unpacked_payload[2], readback[3];
+ unsigned int alg_base_words, packed_patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+ static_assert(sizeof(readback) >= sizeof(unpacked_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+ get_random_bytes(unpacked_payload, sizeof(unpacked_payload));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round packed start word up to a packed boundary and move to the next boundary */
+ packed_patch_pos_words = round_up(alg_base_words + param->offset_words, 4) + 4;
+
+ /* Patch the leading unpacked words */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((packed_patch_pos_words - 2) - alg_base_words) * 4,
+ &unpacked_payload[0], sizeof(unpacked_payload[0]));
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((packed_patch_pos_words - 1) - alg_base_words) * 4,
+ &unpacked_payload[1], sizeof(unpacked_payload[1]));
+ /* ... then the packed block */
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(packed_patch_pos_words);
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ &packed_payload, sizeof(packed_payload));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+
+ /* Content of unpacked registers should match unpacked_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ (packed_patch_pos_words - 2) * 4;
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback,
+ sizeof(unpacked_payload)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payload, sizeof(unpacked_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Patch data that starts two words before a packed boundary using one
+ * block of two unpacked words followed by one packed block.
+ */
+static void bin_patch_2_leading_1_packed(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ u32 packed_payload[3], unpacked_payload[2], readback[3];
+ unsigned int alg_base_words, packed_patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+ static_assert(sizeof(readback) >= sizeof(unpacked_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+ get_random_bytes(unpacked_payload, sizeof(unpacked_payload));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round packed start word up to a packed boundary and move to the next boundary */
+ packed_patch_pos_words = round_up(alg_base_words + param->offset_words, 4) + 4;
+
+ /* Patch the leading unpacked words */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((packed_patch_pos_words - 2) - alg_base_words) * 4,
+ unpacked_payload, sizeof(unpacked_payload));
+ /* ... then the packed block */
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(packed_patch_pos_words);
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ &packed_payload, sizeof(packed_payload));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+
+ /* Content of unpacked registers should match unpacked_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ (packed_patch_pos_words - 2) * 4;
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback,
+ sizeof(unpacked_payload)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payload, sizeof(unpacked_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Patch data that starts three words before a packed boundary using three
+ * unpacked words followed by one packed block.
+ */
+static void bin_patch_3_single_leading_1_packed(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ u32 packed_payload[3], unpacked_payload[3], readback[3];
+ unsigned int alg_base_words, packed_patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+ static_assert(sizeof(readback) >= sizeof(unpacked_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+ get_random_bytes(unpacked_payload, sizeof(unpacked_payload));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round packed start word up to a packed boundary and move to the next boundary */
+ packed_patch_pos_words = round_up(alg_base_words + param->offset_words, 4) + 4;
+
+ /* Patch the leading unpacked words */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((packed_patch_pos_words - 3) - alg_base_words) * 4,
+ &unpacked_payload[0], sizeof(unpacked_payload[0]));
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((packed_patch_pos_words - 2) - alg_base_words) * 4,
+ &unpacked_payload[1], sizeof(unpacked_payload[1]));
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((packed_patch_pos_words - 1) - alg_base_words) * 4,
+ &unpacked_payload[2], sizeof(unpacked_payload[2]));
+ /* ... then the packed block */
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(packed_patch_pos_words);
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ &packed_payload, sizeof(packed_payload));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+
+ /* Content of unpacked registers should match unpacked_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ (packed_patch_pos_words - 3) * 4;
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback,
+ sizeof(unpacked_payload)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payload, sizeof(unpacked_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Patch data that starts three words before a packed boundary using one
+ * block of three unpacked words followed by one packed block.
+ */
+static void bin_patch_3_leading_1_packed(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ u32 packed_payload[3], unpacked_payload[3], readback[3];
+ unsigned int alg_base_words, packed_patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+ static_assert(sizeof(readback) >= sizeof(unpacked_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+ get_random_bytes(unpacked_payload, sizeof(unpacked_payload));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round packed start word up to a packed boundary and move to the next boundary */
+ packed_patch_pos_words = round_up(alg_base_words + param->offset_words, 4) + 4;
+
+ /* Patch the leading unpacked words */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((packed_patch_pos_words - 3) - alg_base_words) * 4,
+ unpacked_payload, sizeof(unpacked_payload));
+ /* ... then the packed block */
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(packed_patch_pos_words);
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ &packed_payload, sizeof(packed_payload));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+
+ /* Content of unpacked registers should match unpacked_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ (packed_patch_pos_words - 3) * 4;
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback,
+ sizeof(unpacked_payload)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payload, sizeof(unpacked_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/* bin file with a multiple payloads that each patch one packed block. */
+static void bin_patch_multi_onepacked(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ u32 packed_payloads[8][3], readback[8][3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int payload_offset;
+ unsigned int reg_addr;
+ struct firmware *fw;
+ int i;
+
+ static_assert(sizeof(readback) == sizeof(packed_payloads));
+
+ get_random_bytes(packed_payloads, sizeof(packed_payloads));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+
+ /* Add one payload per packed block */
+ for (i = 0; i < ARRAY_SIZE(packed_payloads); ++i) {
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words + (i * 4));
+ payload_offset = (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4;
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ payload_offset,
+ &packed_payloads[i], sizeof(packed_payloads[i]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payloads */
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payloads, sizeof(packed_payloads));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payloads));
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file with a multiple payloads that each patch one packed block.
+ * The payloads are not in address order.
+ */
+static void bin_patch_multi_onepacked_unordered(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ static const u8 payload_order[] = { 4, 3, 6, 1, 0, 7, 5, 2 };
+ u32 packed_payloads[8][3], readback[8][3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int payload_offset;
+ unsigned int reg_addr;
+ struct firmware *fw;
+ int i;
+
+ static_assert(ARRAY_SIZE(payload_order) == ARRAY_SIZE(packed_payloads));
+ static_assert(sizeof(readback) == sizeof(packed_payloads));
+
+ get_random_bytes(packed_payloads, sizeof(packed_payloads));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+
+ /* Add one payload per packed block */
+ for (i = 0; i < ARRAY_SIZE(payload_order); ++i) {
+ patch_pos_in_packed_regs =
+ _num_words_to_num_packed_regs(patch_pos_words + (payload_order[i] * 4));
+ payload_offset = (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4;
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ payload_offset,
+ &packed_payloads[payload_order[i]],
+ sizeof(packed_payloads[0]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content in registers should match the order of data in packed_payloads */
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payloads, sizeof(packed_payloads));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payloads));
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file with a multiple payloads that each patch one packed block.
+ * The payloads are not in address order. The patched memory is not contiguous.
+ */
+static void bin_patch_multi_onepacked_sparse_unordered(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ static const u8 word_offsets[] = { 60, 24, 76, 4, 40, 52, 48, 36, 12 };
+ u32 packed_payloads[9][3], readback[3];
+ unsigned int alg_base_words, alg_base_in_packed_regs;
+ unsigned int patch_pos_words, patch_pos_in_packed_regs, payload_offset;
+ unsigned int reg_addr;
+ struct firmware *fw;
+ int i;
+
+ static_assert(ARRAY_SIZE(word_offsets) == ARRAY_SIZE(packed_payloads));
+ static_assert(sizeof(readback) == sizeof(packed_payloads[0]));
+
+ get_random_bytes(packed_payloads, sizeof(packed_payloads));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Add one payload per packed block */
+ for (i = 0; i < ARRAY_SIZE(word_offsets); ++i) {
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + word_offsets[i], 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+ payload_offset = (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4;
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ payload_offset,
+ &packed_payloads[i],
+ sizeof(packed_payloads[0]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payloads */
+ for (i = 0; i < ARRAY_SIZE(word_offsets); ++i) {
+ patch_pos_words = round_up(alg_base_words + word_offsets[i], 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payloads[i], sizeof(packed_payloads[i]));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payloads[i]));
+ }
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file that patches a single packed block in each of the memory regions
+ * of one algorithm.
+ */
+static void bin_patch_1_packed_multiple_mems(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ u32 packed_xm_payload[3], packed_ym_payload[3], readback[3];
+ unsigned int alg_xm_base_words, alg_ym_base_words;
+ unsigned int xm_patch_pos_words, ym_patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_xm_payload));
+ static_assert(sizeof(readback) == sizeof(packed_ym_payload));
+
+ get_random_bytes(packed_xm_payload, sizeof(packed_xm_payload));
+ get_random_bytes(packed_ym_payload, sizeof(packed_ym_payload));
+
+ alg_xm_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ WMFW_HALO_XM_PACKED);
+ alg_ym_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ WMFW_HALO_YM_PACKED);
+
+ /* Round patch start word up to a packed boundary */
+ xm_patch_pos_words = round_up(alg_xm_base_words + param->offset_words, 4);
+ ym_patch_pos_words = round_up(alg_ym_base_words + param->offset_words, 4);
+
+ /* Add XM and YM patches */
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_xm_base_words);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(xm_patch_pos_words);
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ WMFW_HALO_XM_PACKED,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ packed_xm_payload, sizeof(packed_xm_payload));
+
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_ym_base_words);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(ym_patch_pos_words);
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ WMFW_HALO_YM_PACKED,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ packed_ym_payload, sizeof(packed_ym_payload));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed XM registers should match packed_xm_payload */
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(xm_patch_pos_words);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_HALO_XM_PACKED) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_xm_payload, sizeof(packed_xm_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_xm_payload));
+
+ /* Content of packed YM registers should match packed_ym_payload */
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(ym_patch_pos_words);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_HALO_YM_PACKED) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_ym_payload, sizeof(packed_ym_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_ym_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file that patches a single packed block in multiple algorithms.
+ */
+static void bin_patch_1_packed_multiple_algs(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ u32 packed_payload[ARRAY_SIZE(bin_test_mock_algs)][3];
+ u32 readback[ARRAY_SIZE(bin_test_mock_algs)][3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr, payload_offset;
+ struct firmware *fw;
+ int i;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+
+ /* For each algorithm patch one DSP word to a value from packed_payload */
+ for (i = 0; i < ARRAY_SIZE(bin_test_mock_algs); ++i) {
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[i].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ payload_offset = (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4;
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[i].id,
+ bin_test_mock_algs[i].ver,
+ param->mem_type,
+ payload_offset,
+ packed_payload[i], sizeof(packed_payload[i]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ memset(readback, 0, sizeof(readback));
+
+ /*
+ * Readback the registers that should have been written. Place
+ * the values into the expected location in readback[] so that
+ * the content of readback[] should match packed_payload[]
+ */
+ for (i = 0; i < ARRAY_SIZE(bin_test_mock_algs); ++i) {
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[i].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr,
+ readback[i], sizeof(readback[i])),
+ 0);
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload[i]));
+ }
+
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file that patches a single packed block in multiple algorithms.
+ * The algorithms are not patched in the same order they appear in the XM header.
+ */
+static void bin_patch_1_packed_multiple_algs_unordered(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ static const u8 alg_order[] = { 3, 0, 2, 1 };
+ u32 packed_payload[ARRAY_SIZE(bin_test_mock_algs)][3];
+ u32 readback[ARRAY_SIZE(bin_test_mock_algs)][3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr, payload_offset;
+ struct firmware *fw;
+ int i, alg_idx;
+
+ static_assert(ARRAY_SIZE(alg_order) == ARRAY_SIZE(bin_test_mock_algs));
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+
+ /*
+ * For each algorithm index in alg_order[] patch one DSP word in
+ * that algorithm to a value from packed_payload.
+ */
+ for (i = 0; i < ARRAY_SIZE(alg_order); ++i) {
+ alg_idx = alg_order[i];
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ payload_offset = (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4;
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[alg_idx].id,
+ bin_test_mock_algs[alg_idx].ver,
+ param->mem_type,
+ payload_offset,
+ packed_payload[i], sizeof(packed_payload[i]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ memset(readback, 0, sizeof(readback));
+
+ /*
+ * Readback the registers that should have been written. Place
+ * the values into the expected location in readback[] so that
+ * the content of readback[] should match packed_payload[]
+ */
+ for (i = 0; i < ARRAY_SIZE(alg_order); ++i) {
+ alg_idx = alg_order[i];
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[alg_idx].id,
+ param->mem_type);
+
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr,
+ readback[i], sizeof(readback[i])),
+ 0);
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload[i]));
+ }
+
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file that contains a mix of packed and unpacked words.
+ * payloads are in random offset order. Offsets that are on a packed boundary
+ * are written as a packed block. Offsets that are not on a packed boundary
+ * are written as a single unpacked word.
+ */
+static void bin_patch_mixed_packed_unpacked_random(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ static const u8 offset_words[] = {
+ 58, 68, 50, 10, 44, 17, 74, 36, 8, 7, 49, 11, 78, 57, 65, 2,
+ 48, 38, 22, 70, 77, 21, 61, 56, 75, 34, 27, 3, 31, 20, 43, 63,
+ 5, 30, 32, 25, 33, 79, 29, 0, 37, 60, 69, 52, 13, 12, 24, 26,
+ 4, 51, 76, 72, 16, 6, 39, 62, 15, 41, 28, 73, 53, 40, 45, 54,
+ 14, 55, 46, 66, 64, 59, 23, 9, 67, 47, 19, 71, 35, 18, 42, 1,
+ };
+ struct {
+ u32 packed[80][3];
+ u32 unpacked[80];
+ } *payload;
+ u32 readback[3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr, payload_offset;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ struct firmware *fw;
+ int i;
+
+ payload = kunit_kmalloc(test, sizeof(*payload), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, payload);
+
+ get_random_bytes(payload->packed, sizeof(payload->packed));
+ get_random_bytes(payload->unpacked, sizeof(payload->unpacked));
+
+ /* Create a patch entry for every offset in offset_words[] */
+ for (i = 0; i < ARRAY_SIZE(offset_words); ++i) {
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[0].id,
+ param->mem_type);
+ /*
+ * If the offset is on a packed boundary use a packed payload else
+ * use an unpacked word
+ */
+ patch_pos_words = alg_base_words + offset_words[i];
+ if ((patch_pos_words % 4) == 0) {
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+ payload_offset = (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4;
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[0].id,
+ bin_test_mock_algs[0].ver,
+ param->mem_type,
+ payload_offset,
+ payload->packed[i],
+ sizeof(payload->packed[i]));
+ } else {
+ payload_offset = offset_words[i] * 4;
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[0].id,
+ bin_test_mock_algs[0].ver,
+ unpacked_mem_type,
+ payload_offset,
+ &payload->unpacked[i],
+ sizeof(payload->unpacked[i]));
+ }
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /*
+ * Readback the packed registers that should have been written.
+ * Place the values into the expected location in readback[] so
+ * that the content of readback[] should match payload->packed[]
+ */
+ for (i = 0; i < ARRAY_SIZE(offset_words); ++i) {
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[0].id,
+ param->mem_type);
+ patch_pos_words = alg_base_words + offset_words[i];
+
+ /* Skip if the offset is not on a packed boundary */
+ if ((patch_pos_words % 4) != 0)
+ continue;
+
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload->packed[i], sizeof(payload->packed[i]));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(payload->packed[i]));
+ }
+
+ /*
+ * Readback the unpacked registers that should have been written.
+ * Place the values into the expected location in readback[] so
+ * that the content of readback[] should match payload->unpacked[]
+ */
+ for (i = 0; i < ARRAY_SIZE(offset_words); ++i) {
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[0].id,
+ unpacked_mem_type);
+
+ patch_pos_words = alg_base_words + offset_words[i];
+
+ /* Skip if the offset is on a packed boundary */
+ if ((patch_pos_words % 4) == 0)
+ continue;
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ ((patch_pos_words) * 4);
+
+ readback[0] = 0;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr,
+ &readback[0], sizeof(readback[0])),
+ 0);
+ KUNIT_EXPECT_EQ(test, readback[0], payload->unpacked[i]);
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(payload->unpacked[i]));
+ }
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/* Bin file with name and multiple info blocks */
+static void bin_patch_name_and_info(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ unsigned int reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ u32 reg_val, payload_data;
+ char *infobuf;
+ unsigned int alg_base_words, reg_addr;
+ struct firmware *fw;
+
+ get_random_bytes(&payload_data, sizeof(payload_data));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[0].id,
+ WMFW_ADSP2_YM);
+
+ /* Add a name block and info block */
+ cs_dsp_mock_bin_add_name(priv->local->bin_builder, "The name");
+ cs_dsp_mock_bin_add_info(priv->local->bin_builder, "Some info");
+
+ /* Add a big block of info */
+ infobuf = kunit_kzalloc(test, 512, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, infobuf);
+
+ for (; strlcat(infobuf, "Waffle{Blah}\n", 512) < 512; )
+ ;
+
+ cs_dsp_mock_bin_add_info(priv->local->bin_builder, infobuf);
+
+ /* Add a patch */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[0].id,
+ bin_test_mock_algs[0].ver,
+ WMFW_ADSP2_YM,
+ 0,
+ &payload_data, sizeof(payload_data));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of registers should match payload_data */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_YM);
+ reg_addr += alg_base_words * reg_inc_per_word;
+ reg_val = 0;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr,
+ &reg_val, sizeof(reg_val)),
+ 0);
+ KUNIT_EXPECT_EQ(test, reg_val, payload_data);
+}
+
+static int cs_dsp_bin_test_common_init(struct kunit *test, struct cs_dsp *dsp)
+{
+ struct cs_dsp_test *priv;
+ struct cs_dsp_mock_xm_header *xm_hdr;
+ struct device *test_dev;
+ int ret;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->local = kunit_kzalloc(test, sizeof(struct cs_dsp_test_local), GFP_KERNEL);
+ if (!priv->local)
+ return -ENOMEM;
+
+ priv->test = test;
+ priv->dsp = dsp;
+ test->priv = priv;
+
+ /* Create dummy struct device */
+ test_dev = kunit_device_register(test, "cs_dsp_test_drv");
+ if (IS_ERR(test_dev))
+ return PTR_ERR(test_dev);
+
+ dsp->dev = get_device(test_dev);
+ if (!dsp->dev)
+ return -ENODEV;
+
+ ret = kunit_add_action_or_reset(test, _put_device_wrapper, dsp->dev);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dsp->dev, priv);
+
+ /* Allocate regmap */
+ ret = cs_dsp_mock_regmap_init(priv);
+ if (ret)
+ return ret;
+
+ /* Create an XM header */
+ xm_hdr = cs_dsp_create_mock_xm_header(priv,
+ bin_test_mock_algs,
+ ARRAY_SIZE(bin_test_mock_algs));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xm_hdr);
+ ret = cs_dsp_mock_xm_header_write_to_regmap(xm_hdr);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ priv->local->bin_builder =
+ cs_dsp_mock_bin_init(priv, 1,
+ cs_dsp_mock_xm_header_get_fw_version(xm_hdr));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->local->bin_builder);
+
+ /* We must provide a dummy wmfw to load */
+ priv->local->wmfw_builder = cs_dsp_mock_wmfw_init(priv, -1);
+ priv->local->wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ dsp->client_ops = kunit_kzalloc(test, sizeof(*dsp->client_ops), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dsp->client_ops);
+
+ switch (dsp->type) {
+ case WMFW_ADSP2:
+ ret = cs_dsp_adsp2_init(dsp);
+ break;
+ case WMFW_HALO:
+ ret = cs_dsp_halo_init(dsp);
+ break;
+ default:
+ KUNIT_FAIL(test, "Untested DSP type %d\n", dsp->type);
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ /* Automatically call cs_dsp_remove() when test case ends */
+ return kunit_add_action_or_reset(priv->test, _cs_dsp_remove_wrapper, dsp);
+}
+
+static int cs_dsp_bin_test_halo_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_HALO;
+ dsp->mem = cs_dsp_mock_halo_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_halo_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_halo_core_base;
+ dsp->base_sysinfo = cs_dsp_mock_halo_sysinfo_base;
+
+ return cs_dsp_bin_test_common_init(test, dsp);
+}
+
+static int cs_dsp_bin_test_adsp2_32bit_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 1;
+ dsp->mem = cs_dsp_mock_adsp2_32bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_32bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_32bit_sysbase;
+
+ return cs_dsp_bin_test_common_init(test, dsp);
+}
+
+static int cs_dsp_bin_test_adsp2_16bit_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 0;
+ dsp->mem = cs_dsp_mock_adsp2_16bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_16bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_16bit_sysbase;
+
+ return cs_dsp_bin_test_common_init(test, dsp);
+}
+
+/* Parameterize on choice of XM or YM with a range of word offsets */
+static const struct bin_test_param x_or_y_and_offset_param_cases[] = {
+ { .mem_type = WMFW_ADSP2_XM, .offset_words = 0 },
+ { .mem_type = WMFW_ADSP2_XM, .offset_words = 1 },
+ { .mem_type = WMFW_ADSP2_XM, .offset_words = 2 },
+ { .mem_type = WMFW_ADSP2_XM, .offset_words = 3 },
+ { .mem_type = WMFW_ADSP2_XM, .offset_words = 4 },
+ { .mem_type = WMFW_ADSP2_XM, .offset_words = 23 },
+ { .mem_type = WMFW_ADSP2_XM, .offset_words = 22 },
+ { .mem_type = WMFW_ADSP2_XM, .offset_words = 21 },
+ { .mem_type = WMFW_ADSP2_XM, .offset_words = 20 },
+
+ { .mem_type = WMFW_ADSP2_YM, .offset_words = 0 },
+ { .mem_type = WMFW_ADSP2_YM, .offset_words = 1 },
+ { .mem_type = WMFW_ADSP2_YM, .offset_words = 2 },
+ { .mem_type = WMFW_ADSP2_YM, .offset_words = 3 },
+ { .mem_type = WMFW_ADSP2_YM, .offset_words = 4 },
+ { .mem_type = WMFW_ADSP2_YM, .offset_words = 23 },
+ { .mem_type = WMFW_ADSP2_YM, .offset_words = 22 },
+ { .mem_type = WMFW_ADSP2_YM, .offset_words = 21 },
+ { .mem_type = WMFW_ADSP2_YM, .offset_words = 20 },
+};
+
+/* Parameterize on ZM with a range of word offsets */
+static const struct bin_test_param z_and_offset_param_cases[] = {
+ { .mem_type = WMFW_ADSP2_ZM, .offset_words = 0 },
+ { .mem_type = WMFW_ADSP2_ZM, .offset_words = 1 },
+ { .mem_type = WMFW_ADSP2_ZM, .offset_words = 2 },
+ { .mem_type = WMFW_ADSP2_ZM, .offset_words = 3 },
+ { .mem_type = WMFW_ADSP2_ZM, .offset_words = 4 },
+ { .mem_type = WMFW_ADSP2_ZM, .offset_words = 23 },
+ { .mem_type = WMFW_ADSP2_ZM, .offset_words = 22 },
+ { .mem_type = WMFW_ADSP2_ZM, .offset_words = 21 },
+ { .mem_type = WMFW_ADSP2_ZM, .offset_words = 20 },
+};
+
+/* Parameterize on choice of packed XM or YM with a range of word offsets */
+static const struct bin_test_param packed_x_or_y_and_offset_param_cases[] = {
+ { .mem_type = WMFW_HALO_XM_PACKED, .offset_words = 0 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .offset_words = 4 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .offset_words = 8 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .offset_words = 12 },
+
+ { .mem_type = WMFW_HALO_YM_PACKED, .offset_words = 0 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .offset_words = 4 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .offset_words = 8 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .offset_words = 12 },
+};
+
+static void x_or_y_or_z_and_offset_param_desc(const struct bin_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s@%u",
+ cs_dsp_mem_region_name(param->mem_type),
+ param->offset_words);
+}
+
+KUNIT_ARRAY_PARAM(x_or_y_and_offset,
+ x_or_y_and_offset_param_cases,
+ x_or_y_or_z_and_offset_param_desc);
+
+KUNIT_ARRAY_PARAM(z_and_offset,
+ z_and_offset_param_cases,
+ x_or_y_or_z_and_offset_param_desc);
+
+KUNIT_ARRAY_PARAM(packed_x_or_y_and_offset,
+ packed_x_or_y_and_offset_param_cases,
+ x_or_y_or_z_and_offset_param_desc);
+
+/* Parameterize on choice of packed XM or YM */
+static const struct bin_test_param packed_x_or_y_param_cases[] = {
+ { .mem_type = WMFW_HALO_XM_PACKED, .offset_words = 0 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .offset_words = 0 },
+};
+
+static void x_or_y_or_z_param_desc(const struct bin_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s", cs_dsp_mem_region_name(param->mem_type));
+}
+
+KUNIT_ARRAY_PARAM(packed_x_or_y, packed_x_or_y_param_cases, x_or_y_or_z_param_desc);
+
+static const struct bin_test_param offset_param_cases[] = {
+ { .offset_words = 0 },
+ { .offset_words = 1 },
+ { .offset_words = 2 },
+ { .offset_words = 3 },
+ { .offset_words = 4 },
+ { .offset_words = 23 },
+ { .offset_words = 22 },
+ { .offset_words = 21 },
+ { .offset_words = 20 },
+};
+
+static void offset_param_desc(const struct bin_test_param *param, char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "@%u", param->offset_words);
+}
+
+KUNIT_ARRAY_PARAM(offset, offset_param_cases, offset_param_desc);
+
+static const struct bin_test_param alg_param_cases[] = {
+ { .alg_idx = 0 },
+ { .alg_idx = 1 },
+ { .alg_idx = 2 },
+ { .alg_idx = 3 },
+};
+
+static void alg_param_desc(const struct bin_test_param *param, char *desc)
+{
+ WARN_ON(param->alg_idx >= ARRAY_SIZE(bin_test_mock_algs));
+
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "alg[%u] (%#x)",
+ param->alg_idx, bin_test_mock_algs[param->alg_idx].id);
+}
+
+KUNIT_ARRAY_PARAM(alg, alg_param_cases, alg_param_desc);
+
+static const struct bin_test_param x_or_y_and_alg_param_cases[] = {
+ { .mem_type = WMFW_ADSP2_XM, .alg_idx = 0 },
+ { .mem_type = WMFW_ADSP2_XM, .alg_idx = 1 },
+ { .mem_type = WMFW_ADSP2_XM, .alg_idx = 2 },
+ { .mem_type = WMFW_ADSP2_XM, .alg_idx = 3 },
+
+ { .mem_type = WMFW_ADSP2_YM, .alg_idx = 0 },
+ { .mem_type = WMFW_ADSP2_YM, .alg_idx = 1 },
+ { .mem_type = WMFW_ADSP2_YM, .alg_idx = 2 },
+ { .mem_type = WMFW_ADSP2_YM, .alg_idx = 3 },
+};
+
+static void x_or_y_or_z_and_alg_param_desc(const struct bin_test_param *param, char *desc)
+{
+ WARN_ON(param->alg_idx >= ARRAY_SIZE(bin_test_mock_algs));
+
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s alg[%u] (%#x)",
+ cs_dsp_mem_region_name(param->mem_type),
+ param->alg_idx, bin_test_mock_algs[param->alg_idx].id);
+}
+
+KUNIT_ARRAY_PARAM(x_or_y_and_alg, x_or_y_and_alg_param_cases, x_or_y_or_z_and_alg_param_desc);
+
+static const struct bin_test_param z_and_alg_param_cases[] = {
+ { .mem_type = WMFW_ADSP2_ZM, .alg_idx = 0 },
+ { .mem_type = WMFW_ADSP2_ZM, .alg_idx = 1 },
+ { .mem_type = WMFW_ADSP2_ZM, .alg_idx = 2 },
+ { .mem_type = WMFW_ADSP2_ZM, .alg_idx = 3 },
+};
+
+KUNIT_ARRAY_PARAM(z_and_alg, z_and_alg_param_cases, x_or_y_or_z_and_alg_param_desc);
+
+static const struct bin_test_param packed_x_or_y_and_alg_param_cases[] = {
+ { .mem_type = WMFW_HALO_XM_PACKED, .alg_idx = 0 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .alg_idx = 1 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .alg_idx = 2 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .alg_idx = 3 },
+
+ { .mem_type = WMFW_HALO_YM_PACKED, .alg_idx = 0 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .alg_idx = 1 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .alg_idx = 2 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .alg_idx = 3 },
+};
+
+KUNIT_ARRAY_PARAM(packed_x_or_y_and_alg, packed_x_or_y_and_alg_param_cases,
+ x_or_y_or_z_and_alg_param_desc);
+
+static struct kunit_case cs_dsp_bin_test_cases_halo[] = {
+ /* Unpacked memory */
+ KUNIT_CASE_PARAM(bin_patch_one_word, x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_multiword, x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_oneword, x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_oneword_unordered, x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_word_multiple_mems, offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_word_multiple_mems, alg_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_oneword_sparse_unordered, x_or_y_and_alg_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_word_multiple_algs, x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_word_multiple_algs_unordered, x_or_y_and_offset_gen_params),
+
+ /* Packed memory tests */
+ KUNIT_CASE_PARAM(bin_patch_1_packed,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_1_packed_1_single_trailing,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_1_packed_2_single_trailing,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_1_packed_3_single_trailing,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_1_packed_2_trailing,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_1_packed_3_trailing,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_1_single_leading_1_packed,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_2_single_leading_1_packed,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_2_leading_1_packed,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_3_single_leading_1_packed,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_3_leading_1_packed,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_onepacked,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_onepacked_unordered,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_1_packed_multiple_mems, offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_1_packed_multiple_mems, alg_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_onepacked_sparse_unordered,
+ packed_x_or_y_and_alg_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_1_packed_multiple_algs,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_1_packed_multiple_algs_unordered,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_mixed_packed_unpacked_random,
+ packed_x_or_y_gen_params),
+
+ KUNIT_CASE(bin_patch_name_and_info),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_bin_test_cases_adsp2[] = {
+ /* XM and YM */
+ KUNIT_CASE_PARAM(bin_patch_one_word, x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_multiword, x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_oneword, x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_oneword_unordered, x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_oneword_sparse_unordered, x_or_y_and_alg_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_word_multiple_algs, x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_word_multiple_algs_unordered, x_or_y_and_offset_gen_params),
+
+ /* ZM */
+ KUNIT_CASE_PARAM(bin_patch_one_word, z_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_multiword, z_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_oneword, z_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_oneword_unordered, z_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_oneword_sparse_unordered, z_and_alg_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_word_multiple_algs, z_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_word_multiple_algs_unordered, z_and_offset_gen_params),
+
+ /* Other */
+ KUNIT_CASE_PARAM(bin_patch_one_word_multiple_mems, offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_word_multiple_mems, alg_gen_params),
+
+ KUNIT_CASE(bin_patch_name_and_info),
+
+ { } /* terminator */
+};
+
+static struct kunit_suite cs_dsp_bin_test_halo = {
+ .name = "cs_dsp_bin_halo",
+ .init = cs_dsp_bin_test_halo_init,
+ .test_cases = cs_dsp_bin_test_cases_halo,
+};
+
+static struct kunit_suite cs_dsp_bin_test_adsp2_32bit = {
+ .name = "cs_dsp_bin_adsp2_32bit",
+ .init = cs_dsp_bin_test_adsp2_32bit_init,
+ .test_cases = cs_dsp_bin_test_cases_adsp2,
+};
+
+static struct kunit_suite cs_dsp_bin_test_adsp2_16bit = {
+ .name = "cs_dsp_bin_adsp2_16bit",
+ .init = cs_dsp_bin_test_adsp2_16bit_init,
+ .test_cases = cs_dsp_bin_test_cases_adsp2,
+};
+
+kunit_test_suites(&cs_dsp_bin_test_halo,
+ &cs_dsp_bin_test_adsp2_32bit,
+ &cs_dsp_bin_test_adsp2_16bit);
diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_bin_error.c b/drivers/firmware/cirrus/test/cs_dsp_test_bin_error.c
new file mode 100644
index 000000000000..a7ec956d2724
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_test_bin_error.c
@@ -0,0 +1,595 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// KUnit tests for cs_dsp.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+//
+
+#include <kunit/device.h>
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <linux/build_bug.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/random.h>
+#include <linux/regmap.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+
+KUNIT_DEFINE_ACTION_WRAPPER(_put_device_wrapper, put_device, struct device *);
+KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_remove_wrapper, cs_dsp_remove, struct cs_dsp *);
+
+struct cs_dsp_test_local {
+ struct cs_dsp_mock_bin_builder *bin_builder;
+ struct cs_dsp_mock_xm_header *xm_header;
+ struct cs_dsp_mock_wmfw_builder *wmfw_builder;
+ struct firmware *wmfw;
+ int wmfw_version;
+};
+
+struct cs_dsp_bin_test_param {
+ int block_type;
+};
+
+static const struct cs_dsp_mock_alg_def cs_dsp_bin_err_test_mock_algs[] = {
+ {
+ .id = 0xfafa,
+ .ver = 0x100000,
+ .xm_size_words = 164,
+ .ym_size_words = 164,
+ .zm_size_words = 164,
+ },
+};
+
+/* Load a bin containing unknown blocks. They should be skipped. */
+static void bin_load_with_unknown_blocks(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *bin;
+ unsigned int reg_addr;
+ u8 *payload_data, *readback;
+ u8 random_data[8];
+ const unsigned int payload_size_bytes = 64;
+
+ payload_data = kunit_kmalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+ get_random_bytes(payload_data, payload_size_bytes);
+
+ readback = kunit_kzalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Add some unknown blocks at the start of the bin */
+ get_random_bytes(random_data, sizeof(random_data));
+ cs_dsp_mock_bin_add_raw_block(local->bin_builder,
+ cs_dsp_bin_err_test_mock_algs[0].id,
+ cs_dsp_bin_err_test_mock_algs[0].ver,
+ 0xf5, 0,
+ random_data, sizeof(random_data));
+ cs_dsp_mock_bin_add_raw_block(local->bin_builder,
+ cs_dsp_bin_err_test_mock_algs[0].id,
+ cs_dsp_bin_err_test_mock_algs[0].ver,
+ 0xf500, 0,
+ random_data, sizeof(random_data));
+ cs_dsp_mock_bin_add_raw_block(local->bin_builder,
+ cs_dsp_bin_err_test_mock_algs[0].id,
+ cs_dsp_bin_err_test_mock_algs[0].ver,
+ 0xc300, 0,
+ random_data, sizeof(random_data));
+
+ /* Add a single payload to be written to DSP memory */
+ cs_dsp_mock_bin_add_raw_block(local->bin_builder,
+ cs_dsp_bin_err_test_mock_algs[0].id,
+ cs_dsp_bin_err_test_mock_algs[0].ver,
+ WMFW_ADSP2_YM, 0,
+ payload_data, payload_size_bytes);
+
+ bin = cs_dsp_mock_bin_get_firmware(local->bin_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ /* Check that the payload was written to memory */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_YM);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, payload_size_bytes);
+}
+
+/* Load a bin that doesn't have a valid magic marker. */
+static void bin_err_wrong_magic(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *bin;
+
+ /* Sanity-check that the wmfw loads ok without the bin */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ bin = cs_dsp_mock_bin_get_firmware(local->bin_builder);
+
+ memcpy((void *)bin->data, "WMFW", 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ memcpy((void *)bin->data, "xMDR", 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ memcpy((void *)bin->data, "WxDR", 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ memcpy((void *)bin->data, "WMxR", 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ memcpy((void *)bin->data, "WMDx", 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ memset((void *)bin->data, 0, 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+}
+
+/* Load a bin that is too short for a valid header. */
+static void bin_err_too_short_for_header(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *bin;
+
+ /* Sanity-check that the wmfw loads ok without the bin */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ bin = cs_dsp_mock_bin_get_firmware(local->bin_builder);
+ do {
+ bin->size--;
+
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+ } while (bin->size > 0);
+}
+
+/* Header length field isn't a valid header length. */
+static void bin_err_bad_header_length(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *bin;
+ struct wmfw_coeff_hdr *header;
+ unsigned int real_len, len;
+
+ /* Sanity-check that the wmfw loads ok without the bin */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ bin = cs_dsp_mock_bin_get_firmware(local->bin_builder);
+ header = (struct wmfw_coeff_hdr *)bin->data;
+ real_len = le32_to_cpu(header->len);
+
+ for (len = 0; len < real_len; len++) {
+ header->len = cpu_to_le32(len);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+ }
+
+ for (len = real_len + 1; len < real_len + 7; len++) {
+ header->len = cpu_to_le32(len);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+ }
+
+ header->len = cpu_to_le32(0xffffffff);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ header->len = cpu_to_le32(0x80000000);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ header->len = cpu_to_le32(0x7fffffff);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+}
+
+/* Wrong core type in header. */
+static void bin_err_bad_core_type(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *bin;
+ struct wmfw_coeff_hdr *header;
+
+ /* Sanity-check that the wmfw loads ok without the bin */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ bin = cs_dsp_mock_bin_get_firmware(local->bin_builder);
+ header = (struct wmfw_coeff_hdr *)bin->data;
+
+ header->core_ver = cpu_to_le32(0);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ header->core_ver = cpu_to_le32(1);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ header->core_ver = cpu_to_le32(priv->dsp->type + 1);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ header->core_ver = cpu_to_le32(0xff);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+}
+
+/* File too short to contain a full block header */
+static void bin_too_short_for_block_header(struct kunit *test)
+{
+ const struct cs_dsp_bin_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *bin;
+ unsigned int header_length;
+
+ /* Sanity-check that the wmfw loads ok without the bin */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ bin = cs_dsp_mock_bin_get_firmware(local->bin_builder);
+ header_length = bin->size;
+ kunit_kfree(test, bin);
+
+ cs_dsp_mock_bin_add_raw_block(local->bin_builder,
+ cs_dsp_bin_err_test_mock_algs[0].id,
+ cs_dsp_bin_err_test_mock_algs[0].ver,
+ param->block_type, 0,
+ NULL, 0);
+
+ bin = cs_dsp_mock_bin_get_firmware(local->bin_builder);
+ KUNIT_ASSERT_GT(test, bin->size, header_length);
+
+ for (bin->size--; bin->size > header_length; bin->size--) {
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+ }
+}
+
+/* File too short to contain the block payload */
+static void bin_too_short_for_block_payload(struct kunit *test)
+{
+ const struct cs_dsp_bin_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *bin;
+ static const u8 payload[256] = { };
+ int i;
+
+ /* Sanity-check that the wmfw loads ok without the bin */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ cs_dsp_mock_bin_add_raw_block(local->bin_builder,
+ cs_dsp_bin_err_test_mock_algs[0].id,
+ cs_dsp_bin_err_test_mock_algs[0].ver,
+ param->block_type, 0,
+ payload, sizeof(payload));
+
+ bin = cs_dsp_mock_bin_get_firmware(local->bin_builder);
+ for (i = 0; i < sizeof(payload); i++) {
+ bin->size--;
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+ }
+}
+
+/* Block payload length is a garbage value */
+static void bin_block_payload_len_garbage(struct kunit *test)
+{
+ const struct cs_dsp_bin_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *bin;
+ struct wmfw_coeff_hdr *header;
+ struct wmfw_coeff_item *block;
+ u32 payload = 0;
+
+ /* Sanity-check that the wmfw loads ok without the bin */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ cs_dsp_mock_bin_add_raw_block(local->bin_builder,
+ cs_dsp_bin_err_test_mock_algs[0].id,
+ cs_dsp_bin_err_test_mock_algs[0].ver,
+ param->block_type, 0,
+ &payload, sizeof(payload));
+
+ bin = cs_dsp_mock_bin_get_firmware(local->bin_builder);
+ header = (struct wmfw_coeff_hdr *)bin->data;
+ block = (struct wmfw_coeff_item *)&bin->data[le32_to_cpu(header->len)];
+
+ /* Sanity check that we're looking at the correct part of the bin */
+ KUNIT_ASSERT_EQ(test, le16_to_cpu(block->type), param->block_type);
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(block->len), sizeof(payload));
+
+ block->len = cpu_to_le32(0x8000);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ block->len = cpu_to_le32(0xffff);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ block->len = cpu_to_le32(0x7fffffff);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ block->len = cpu_to_le32(0x80000000);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ block->len = cpu_to_le32(0xffffffff);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+}
+
+static void cs_dsp_bin_err_test_exit(struct kunit *test)
+{
+ /*
+ * Testing error conditions can produce a lot of log output
+ * from cs_dsp error messages, so rate limit the test cases.
+ */
+ usleep_range(200, 500);
+}
+
+static int cs_dsp_bin_err_test_common_init(struct kunit *test, struct cs_dsp *dsp,
+ int wmfw_version)
+{
+ struct cs_dsp_test *priv;
+ struct cs_dsp_test_local *local;
+ struct device *test_dev;
+ int ret;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ local = kunit_kzalloc(test, sizeof(struct cs_dsp_test_local), GFP_KERNEL);
+ if (!local)
+ return -ENOMEM;
+
+ priv->test = test;
+ priv->dsp = dsp;
+ test->priv = priv;
+ priv->local = local;
+ priv->local->wmfw_version = wmfw_version;
+
+ /* Create dummy struct device */
+ test_dev = kunit_device_register(test, "cs_dsp_test_drv");
+ if (IS_ERR(test_dev))
+ return PTR_ERR(test_dev);
+
+ dsp->dev = get_device(test_dev);
+ if (!dsp->dev)
+ return -ENODEV;
+
+ ret = kunit_add_action_or_reset(test, _put_device_wrapper, dsp->dev);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dsp->dev, priv);
+
+ /* Allocate regmap */
+ ret = cs_dsp_mock_regmap_init(priv);
+ if (ret)
+ return ret;
+
+ /*
+ * There must always be a XM header with at least 1 algorithm, so create
+ * a dummy one that tests can use and extract it to a data payload.
+ */
+ local->xm_header = cs_dsp_create_mock_xm_header(priv,
+ cs_dsp_bin_err_test_mock_algs,
+ ARRAY_SIZE(cs_dsp_bin_err_test_mock_algs));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->xm_header);
+
+ local->wmfw_builder = cs_dsp_mock_wmfw_init(priv, priv->local->wmfw_version);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->wmfw_builder);
+
+ /* Add dummy XM header payload to wmfw */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ WMFW_ADSP2_XM, 0,
+ local->xm_header->blob_data,
+ local->xm_header->blob_size_bytes);
+
+ local->wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ local->bin_builder =
+ cs_dsp_mock_bin_init(priv, 1,
+ cs_dsp_mock_xm_header_get_fw_version(local->xm_header));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->bin_builder);
+
+ /* Init cs_dsp */
+ dsp->client_ops = kunit_kzalloc(test, sizeof(*dsp->client_ops), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dsp->client_ops);
+
+ switch (dsp->type) {
+ case WMFW_ADSP2:
+ ret = cs_dsp_adsp2_init(dsp);
+ break;
+ case WMFW_HALO:
+ ret = cs_dsp_halo_init(dsp);
+ break;
+ default:
+ KUNIT_FAIL(test, "Untested DSP type %d\n", dsp->type);
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ /* Automatically call cs_dsp_remove() when test case ends */
+ return kunit_add_action_or_reset(priv->test, _cs_dsp_remove_wrapper, dsp);
+}
+
+static int cs_dsp_bin_err_test_halo_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_HALO;
+ dsp->mem = cs_dsp_mock_halo_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_halo_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_halo_core_base;
+ dsp->base_sysinfo = cs_dsp_mock_halo_sysinfo_base;
+
+ return cs_dsp_bin_err_test_common_init(test, dsp, 3);
+}
+
+static int cs_dsp_bin_err_test_adsp2_32bit_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 1;
+ dsp->mem = cs_dsp_mock_adsp2_32bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_32bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_32bit_sysbase;
+
+ return cs_dsp_bin_err_test_common_init(test, dsp, 2);
+}
+
+static int cs_dsp_bin_err_test_adsp2_16bit_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 0;
+ dsp->mem = cs_dsp_mock_adsp2_16bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_16bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_16bit_sysbase;
+
+ return cs_dsp_bin_err_test_common_init(test, dsp, 1);
+}
+
+static void cs_dsp_bin_err_block_types_desc(const struct cs_dsp_bin_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "block_type:%#x", param->block_type);
+}
+
+/* Some block types to test against, including illegal types */
+static const struct cs_dsp_bin_test_param bin_test_block_types_cases[] = {
+ { .block_type = WMFW_INFO_TEXT << 8 },
+ { .block_type = WMFW_METADATA << 8 },
+ { .block_type = WMFW_ADSP2_PM },
+ { .block_type = WMFW_ADSP2_XM },
+ { .block_type = 0x33 },
+ { .block_type = 0xf500 },
+ { .block_type = 0xc000 },
+};
+
+KUNIT_ARRAY_PARAM(bin_test_block_types,
+ bin_test_block_types_cases,
+ cs_dsp_bin_err_block_types_desc);
+
+static struct kunit_case cs_dsp_bin_err_test_cases[] = {
+ KUNIT_CASE(bin_load_with_unknown_blocks),
+ KUNIT_CASE(bin_err_wrong_magic),
+ KUNIT_CASE(bin_err_too_short_for_header),
+ KUNIT_CASE(bin_err_bad_header_length),
+ KUNIT_CASE(bin_err_bad_core_type),
+
+ KUNIT_CASE_PARAM(bin_too_short_for_block_header, bin_test_block_types_gen_params),
+ KUNIT_CASE_PARAM(bin_too_short_for_block_payload, bin_test_block_types_gen_params),
+ KUNIT_CASE_PARAM(bin_block_payload_len_garbage, bin_test_block_types_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_suite cs_dsp_bin_err_test_halo = {
+ .name = "cs_dsp_bin_err_halo",
+ .init = cs_dsp_bin_err_test_halo_init,
+ .exit = cs_dsp_bin_err_test_exit,
+ .test_cases = cs_dsp_bin_err_test_cases,
+};
+
+static struct kunit_suite cs_dsp_bin_err_test_adsp2_32bit = {
+ .name = "cs_dsp_bin_err_adsp2_32bit",
+ .init = cs_dsp_bin_err_test_adsp2_32bit_init,
+ .exit = cs_dsp_bin_err_test_exit,
+ .test_cases = cs_dsp_bin_err_test_cases,
+};
+
+static struct kunit_suite cs_dsp_bin_err_test_adsp2_16bit = {
+ .name = "cs_dsp_bin_err_adsp2_16bit",
+ .init = cs_dsp_bin_err_test_adsp2_16bit_init,
+ .exit = cs_dsp_bin_err_test_exit,
+ .test_cases = cs_dsp_bin_err_test_cases,
+};
+
+kunit_test_suites(&cs_dsp_bin_err_test_halo,
+ &cs_dsp_bin_err_test_adsp2_32bit,
+ &cs_dsp_bin_err_test_adsp2_16bit);
diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_callbacks.c b/drivers/firmware/cirrus/test/cs_dsp_test_callbacks.c
new file mode 100644
index 000000000000..8a9b66a3b7d3
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_test_callbacks.c
@@ -0,0 +1,688 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// KUnit tests for cs_dsp.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+//
+
+#include <kunit/device.h>
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <kunit/test-bug.h>
+#include <linux/build_bug.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/random.h>
+#include <linux/regmap.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+
+#define ADSP2_LOCK_REGION_CTRL 0x7A
+#define ADSP2_WDT_TIMEOUT_STS_MASK 0x2000
+
+KUNIT_DEFINE_ACTION_WRAPPER(_put_device_wrapper, put_device, struct device *)
+KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_remove_wrapper, cs_dsp_remove, struct cs_dsp *)
+
+struct cs_dsp_test_local {
+ struct cs_dsp_mock_wmfw_builder *wmfw_builder;
+
+ int num_control_add;
+ int num_control_remove;
+ int num_pre_run;
+ int num_post_run;
+ int num_pre_stop;
+ int num_post_stop;
+ int num_watchdog_expired;
+
+ struct cs_dsp_coeff_ctl *passed_ctl[16];
+ struct cs_dsp *passed_dsp;
+};
+
+struct cs_dsp_callbacks_test_param {
+ const struct cs_dsp_client_ops *ops;
+ const char *case_name;
+};
+
+static const struct cs_dsp_mock_alg_def cs_dsp_callbacks_test_mock_algs[] = {
+ {
+ .id = 0xfafa,
+ .ver = 0x100000,
+ .xm_size_words = 164,
+ .ym_size_words = 164,
+ .zm_size_words = 164,
+ },
+};
+
+static const struct cs_dsp_mock_coeff_def mock_coeff_template = {
+ .shortname = "Dummy Coeff",
+ .type = WMFW_CTL_TYPE_BYTES,
+ .mem_type = WMFW_ADSP2_YM,
+ .flags = WMFW_CTL_FLAG_VOLATILE,
+ .length_bytes = 4,
+};
+
+static int cs_dsp_test_control_add_callback(struct cs_dsp_coeff_ctl *ctl)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+
+ local->passed_ctl[local->num_control_add] = ctl;
+ local->num_control_add++;
+
+ return 0;
+}
+
+static void cs_dsp_test_control_remove_callback(struct cs_dsp_coeff_ctl *ctl)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+
+ local->passed_ctl[local->num_control_remove] = ctl;
+ local->num_control_remove++;
+}
+
+static int cs_dsp_test_pre_run_callback(struct cs_dsp *dsp)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+
+ local->passed_dsp = dsp;
+ local->num_pre_run++;
+
+ return 0;
+}
+
+static int cs_dsp_test_post_run_callback(struct cs_dsp *dsp)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+
+ local->passed_dsp = dsp;
+ local->num_post_run++;
+
+ return 0;
+}
+
+static void cs_dsp_test_pre_stop_callback(struct cs_dsp *dsp)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+
+ local->passed_dsp = dsp;
+ local->num_pre_stop++;
+}
+
+static void cs_dsp_test_post_stop_callback(struct cs_dsp *dsp)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+
+ local->passed_dsp = dsp;
+ local->num_post_stop++;
+}
+
+static void cs_dsp_test_watchdog_expired_callback(struct cs_dsp *dsp)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+
+ local->passed_dsp = dsp;
+ local->num_watchdog_expired++;
+}
+
+static const struct cs_dsp_client_ops cs_dsp_callback_test_client_ops = {
+ .control_add = cs_dsp_test_control_add_callback,
+ .control_remove = cs_dsp_test_control_remove_callback,
+ .pre_run = cs_dsp_test_pre_run_callback,
+ .post_run = cs_dsp_test_post_run_callback,
+ .pre_stop = cs_dsp_test_pre_stop_callback,
+ .post_stop = cs_dsp_test_post_stop_callback,
+ .watchdog_expired = cs_dsp_test_watchdog_expired_callback,
+};
+
+static const struct cs_dsp_client_ops cs_dsp_callback_test_empty_client_ops = {
+ /* No entries */
+};
+
+static void cs_dsp_test_run_stop_callbacks(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+
+ KUNIT_EXPECT_EQ(test, cs_dsp_run(priv->dsp), 0);
+ KUNIT_EXPECT_EQ(test, local->num_pre_run, 1);
+ KUNIT_EXPECT_EQ(test, local->num_post_run, 1);
+ KUNIT_EXPECT_EQ(test, local->num_pre_stop, 0);
+ KUNIT_EXPECT_EQ(test, local->num_post_stop, 0);
+ KUNIT_EXPECT_PTR_EQ(test, local->passed_dsp, priv->dsp);
+ local->passed_dsp = NULL;
+
+ cs_dsp_stop(priv->dsp);
+ KUNIT_EXPECT_EQ(test, local->num_pre_run, 1);
+ KUNIT_EXPECT_EQ(test, local->num_post_run, 1);
+ KUNIT_EXPECT_EQ(test, local->num_pre_stop, 1);
+ KUNIT_EXPECT_EQ(test, local->num_post_stop, 1);
+ KUNIT_EXPECT_PTR_EQ(test, local->passed_dsp, priv->dsp);
+ local->passed_dsp = NULL;
+
+ KUNIT_EXPECT_EQ(test, cs_dsp_run(priv->dsp), 0);
+ KUNIT_EXPECT_EQ(test, local->num_pre_run, 2);
+ KUNIT_EXPECT_EQ(test, local->num_post_run, 2);
+ KUNIT_EXPECT_EQ(test, local->num_pre_stop, 1);
+ KUNIT_EXPECT_EQ(test, local->num_post_stop, 1);
+ KUNIT_EXPECT_PTR_EQ(test, local->passed_dsp, priv->dsp);
+ local->passed_dsp = NULL;
+
+ cs_dsp_stop(priv->dsp);
+ KUNIT_EXPECT_EQ(test, local->num_pre_run, 2);
+ KUNIT_EXPECT_EQ(test, local->num_post_run, 2);
+ KUNIT_EXPECT_EQ(test, local->num_pre_stop, 2);
+ KUNIT_EXPECT_EQ(test, local->num_post_stop, 2);
+ KUNIT_EXPECT_PTR_EQ(test, local->passed_dsp, priv->dsp);
+ local->passed_dsp = NULL;
+}
+
+static void cs_dsp_test_ctl_v1_callbacks(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ int i;
+
+ /* Add a control for each memory */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_callbacks_test_mock_algs[0].id,
+ "dummyalg", NULL);
+ def.shortname = "zm";
+ def.mem_type = WMFW_ADSP2_ZM;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+
+ def.shortname = "ym";
+ def.mem_type = WMFW_ADSP2_YM;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+
+ def.shortname = "xm";
+ def.mem_type = WMFW_ADSP2_XM;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+
+ /* There should have been an add callback for each control */
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list), 3);
+ KUNIT_EXPECT_EQ(test, local->num_control_add, 3);
+ KUNIT_EXPECT_EQ(test, local->num_control_remove, 0);
+
+ i = 0;
+ list_for_each_entry_reverse(ctl, &priv->dsp->ctl_list, list)
+ KUNIT_EXPECT_PTR_EQ(test, local->passed_ctl[i++], ctl);
+
+ /*
+ * Call cs_dsp_remove() and there should be a remove callback
+ * for each control
+ */
+ memset(local->passed_ctl, 0, sizeof(local->passed_ctl));
+ cs_dsp_remove(priv->dsp);
+
+ /* Prevent double cleanup */
+ kunit_remove_action(priv->test, _cs_dsp_remove_wrapper, priv->dsp);
+
+ KUNIT_EXPECT_EQ(test, local->num_control_add, 3);
+ KUNIT_EXPECT_EQ(test, local->num_control_remove, 3);
+
+ i = 0;
+ list_for_each_entry_reverse(ctl, &priv->dsp->ctl_list, list)
+ KUNIT_EXPECT_PTR_EQ(test, local->passed_ctl[i++], ctl);
+}
+
+static void cs_dsp_test_ctl_v2_callbacks(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ char name[2] = { };
+ int i;
+
+ /* Add some controls */
+ def.shortname = name;
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_callbacks_test_mock_algs[0].id,
+ "dummyalg", NULL);
+ for (i = 0; i < ARRAY_SIZE(local->passed_ctl); ++i) {
+ name[0] = 'A' + i;
+ def.offset_dsp_words = i;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ }
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+
+ /* There should have been an add callback for each control */
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list),
+ ARRAY_SIZE(local->passed_ctl));
+ KUNIT_EXPECT_EQ(test, local->num_control_add, ARRAY_SIZE(local->passed_ctl));
+ KUNIT_EXPECT_EQ(test, local->num_control_remove, 0);
+
+ i = 0;
+ list_for_each_entry_reverse(ctl, &priv->dsp->ctl_list, list)
+ KUNIT_EXPECT_PTR_EQ(test, local->passed_ctl[i++], ctl);
+
+ /*
+ * Call cs_dsp_remove() and there should be a remove callback
+ * for each control
+ */
+ memset(local->passed_ctl, 0, sizeof(local->passed_ctl));
+ cs_dsp_remove(priv->dsp);
+
+ /* Prevent double cleanup */
+ kunit_remove_action(priv->test, _cs_dsp_remove_wrapper, priv->dsp);
+
+ KUNIT_EXPECT_EQ(test, local->num_control_add, ARRAY_SIZE(local->passed_ctl));
+ KUNIT_EXPECT_EQ(test, local->num_control_remove, ARRAY_SIZE(local->passed_ctl));
+
+ i = 0;
+ list_for_each_entry_reverse(ctl, &priv->dsp->ctl_list, list)
+ KUNIT_EXPECT_PTR_EQ(test, local->passed_ctl[i++], ctl);
+}
+
+static void cs_dsp_test_no_callbacks(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct firmware *wmfw;
+
+ /* Add a controls */
+ def.shortname = "A";
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_callbacks_test_mock_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Run a sequence of ops that would invoke callbacks */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ KUNIT_EXPECT_EQ(test, cs_dsp_run(priv->dsp), 0);
+ cs_dsp_stop(priv->dsp);
+ cs_dsp_remove(priv->dsp);
+
+ /* Prevent double cleanup */
+ kunit_remove_action(priv->test, _cs_dsp_remove_wrapper, priv->dsp);
+
+ /* Something went very wrong if any of our callbacks were called */
+ KUNIT_EXPECT_EQ(test, local->num_control_add, 0);
+ KUNIT_EXPECT_EQ(test, local->num_control_remove, 0);
+ KUNIT_EXPECT_EQ(test, local->num_pre_run, 0);
+ KUNIT_EXPECT_EQ(test, local->num_post_run, 0);
+ KUNIT_EXPECT_EQ(test, local->num_pre_stop, 0);
+ KUNIT_EXPECT_EQ(test, local->num_post_stop, 0);
+}
+
+static void cs_dsp_test_adsp2v2_watchdog_callback(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+
+ KUNIT_EXPECT_EQ(test, cs_dsp_run(priv->dsp), 0);
+
+ /* Set the watchdog timeout bit */
+ regmap_write(priv->dsp->regmap, priv->dsp->base + ADSP2_LOCK_REGION_CTRL,
+ ADSP2_WDT_TIMEOUT_STS_MASK);
+
+ /* Notify an interrupt and the watchdog callback should be called */
+ cs_dsp_adsp2_bus_error(priv->dsp);
+ KUNIT_EXPECT_EQ(test, local->num_watchdog_expired, 1);
+ KUNIT_EXPECT_PTR_EQ(test, local->passed_dsp, priv->dsp);
+}
+
+static void cs_dsp_test_adsp2v2_watchdog_no_callbacks(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ KUNIT_EXPECT_EQ(test, cs_dsp_run(priv->dsp), 0);
+
+ /* Set the watchdog timeout bit */
+ regmap_write(priv->dsp->regmap, priv->dsp->base + ADSP2_LOCK_REGION_CTRL,
+ ADSP2_WDT_TIMEOUT_STS_MASK);
+
+ /* Notify an interrupt, which will look for a watchdog callback */
+ cs_dsp_adsp2_bus_error(priv->dsp);
+ KUNIT_EXPECT_EQ(test, local->num_watchdog_expired, 0);
+}
+
+static void cs_dsp_test_halo_watchdog_callback(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+
+ KUNIT_EXPECT_EQ(test, cs_dsp_run(priv->dsp), 0);
+
+ /* Notify an interrupt and the watchdog callback should be called */
+ cs_dsp_halo_wdt_expire(priv->dsp);
+ KUNIT_EXPECT_EQ(test, local->num_watchdog_expired, 1);
+ KUNIT_EXPECT_PTR_EQ(test, local->passed_dsp, priv->dsp);
+}
+
+static void cs_dsp_test_halo_watchdog_no_callbacks(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ KUNIT_EXPECT_EQ(test, cs_dsp_run(priv->dsp), 0);
+
+ /* Notify an interrupt, which will look for a watchdog callback */
+ cs_dsp_halo_wdt_expire(priv->dsp);
+ KUNIT_EXPECT_EQ(test, local->num_watchdog_expired, 0);
+}
+
+static int cs_dsp_callbacks_test_common_init(struct kunit *test, struct cs_dsp *dsp,
+ int wmfw_version)
+{
+ const struct cs_dsp_callbacks_test_param *param = test->param_value;
+ struct cs_dsp_test *priv;
+ struct cs_dsp_test_local *local;
+ struct device *test_dev;
+ struct cs_dsp_mock_xm_header *xm_header;
+ int ret;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ local = kunit_kzalloc(test, sizeof(struct cs_dsp_test_local), GFP_KERNEL);
+ if (!local)
+ return -ENOMEM;
+
+ priv->test = test;
+ priv->dsp = dsp;
+ test->priv = priv;
+ priv->local = local;
+
+ /* Create dummy struct device */
+ test_dev = kunit_device_register(test, "cs_dsp_test_drv");
+ if (IS_ERR(test_dev))
+ return PTR_ERR(test_dev);
+
+ dsp->dev = get_device(test_dev);
+ if (!dsp->dev)
+ return -ENODEV;
+
+ ret = kunit_add_action_or_reset(test, _put_device_wrapper, dsp->dev);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dsp->dev, priv);
+
+ /* Allocate regmap */
+ ret = cs_dsp_mock_regmap_init(priv);
+ if (ret)
+ return ret;
+
+ /*
+ * There must always be a XM header with at least 1 algorithm,
+ * so create a dummy one and pre-populate XM so the wmfw doesn't
+ * have to contain an XM blob.
+ */
+ xm_header = cs_dsp_create_mock_xm_header(priv,
+ cs_dsp_callbacks_test_mock_algs,
+ ARRAY_SIZE(cs_dsp_callbacks_test_mock_algs));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xm_header);
+ cs_dsp_mock_xm_header_write_to_regmap(xm_header);
+
+ local->wmfw_builder = cs_dsp_mock_wmfw_init(priv, wmfw_version);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->wmfw_builder);
+
+ /* Add dummy XM header payload to wmfw */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ WMFW_ADSP2_XM, 0,
+ xm_header->blob_data,
+ xm_header->blob_size_bytes);
+
+ /* Init cs_dsp */
+ dsp->client_ops = param->ops;
+
+ switch (dsp->type) {
+ case WMFW_ADSP2:
+ ret = cs_dsp_adsp2_init(dsp);
+ break;
+ case WMFW_HALO:
+ ret = cs_dsp_halo_init(dsp);
+ break;
+ default:
+ KUNIT_FAIL(test, "Untested DSP type %d\n", dsp->type);
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ /* Automatically call cs_dsp_remove() when test case ends */
+ return kunit_add_action_or_reset(priv->test, _cs_dsp_remove_wrapper, dsp);
+}
+
+static int cs_dsp_callbacks_test_halo_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_HALO;
+ dsp->mem = cs_dsp_mock_halo_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_halo_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_halo_core_base;
+ dsp->base_sysinfo = cs_dsp_mock_halo_sysinfo_base;
+
+ return cs_dsp_callbacks_test_common_init(test, dsp, 3);
+}
+
+static int cs_dsp_callbacks_test_adsp2_32bit_init(struct kunit *test, int rev)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = rev;
+ dsp->mem = cs_dsp_mock_adsp2_32bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_32bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_32bit_sysbase;
+
+ return cs_dsp_callbacks_test_common_init(test, dsp, 2);
+}
+
+static int cs_dsp_callbacks_test_adsp2v2_32bit_init(struct kunit *test)
+{
+ return cs_dsp_callbacks_test_adsp2_32bit_init(test, 2);
+}
+
+static int cs_dsp_callbacks_test_adsp2v1_32bit_init(struct kunit *test)
+{
+ return cs_dsp_callbacks_test_adsp2_32bit_init(test, 1);
+}
+
+static int cs_dsp_callbacks_test_adsp2_16bit_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 0;
+ dsp->mem = cs_dsp_mock_adsp2_16bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_16bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_16bit_sysbase;
+
+ return cs_dsp_callbacks_test_common_init(test, dsp, 1);
+}
+
+static void cs_dsp_callbacks_param_desc(const struct cs_dsp_callbacks_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s", param->case_name);
+}
+
+/* Parameterize on different client callback ops tables */
+static const struct cs_dsp_callbacks_test_param cs_dsp_callbacks_ops_cases[] = {
+ { .ops = &cs_dsp_callback_test_client_ops, .case_name = "all ops" },
+};
+
+KUNIT_ARRAY_PARAM(cs_dsp_callbacks_ops,
+ cs_dsp_callbacks_ops_cases,
+ cs_dsp_callbacks_param_desc);
+
+static const struct cs_dsp_callbacks_test_param cs_dsp_no_callbacks_cases[] = {
+ { .ops = &cs_dsp_callback_test_empty_client_ops, .case_name = "empty ops" },
+};
+
+KUNIT_ARRAY_PARAM(cs_dsp_no_callbacks,
+ cs_dsp_no_callbacks_cases,
+ cs_dsp_callbacks_param_desc);
+
+static struct kunit_case cs_dsp_callbacks_adsp2_wmfwv1_test_cases[] = {
+ KUNIT_CASE_PARAM(cs_dsp_test_run_stop_callbacks, cs_dsp_callbacks_ops_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_test_ctl_v1_callbacks, cs_dsp_callbacks_ops_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_test_no_callbacks, cs_dsp_no_callbacks_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_callbacks_adsp2_wmfwv2_test_cases[] = {
+ KUNIT_CASE_PARAM(cs_dsp_test_run_stop_callbacks, cs_dsp_callbacks_ops_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_test_ctl_v2_callbacks, cs_dsp_callbacks_ops_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_test_no_callbacks, cs_dsp_no_callbacks_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_callbacks_halo_test_cases[] = {
+ KUNIT_CASE_PARAM(cs_dsp_test_run_stop_callbacks, cs_dsp_callbacks_ops_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_test_ctl_v2_callbacks, cs_dsp_callbacks_ops_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_test_no_callbacks, cs_dsp_no_callbacks_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_watchdog_adsp2v2_test_cases[] = {
+ KUNIT_CASE_PARAM(cs_dsp_test_adsp2v2_watchdog_callback, cs_dsp_callbacks_ops_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_test_adsp2v2_watchdog_no_callbacks, cs_dsp_no_callbacks_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_watchdog_halo_test_cases[] = {
+ KUNIT_CASE_PARAM(cs_dsp_test_halo_watchdog_callback, cs_dsp_callbacks_ops_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_test_halo_watchdog_no_callbacks, cs_dsp_no_callbacks_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_suite cs_dsp_callbacks_test_halo = {
+ .name = "cs_dsp_callbacks_halo",
+ .init = cs_dsp_callbacks_test_halo_init,
+ .test_cases = cs_dsp_callbacks_halo_test_cases,
+};
+
+static struct kunit_suite cs_dsp_callbacks_test_adsp2v2_32bit = {
+ .name = "cs_dsp_callbacks_adsp2v2_32bit_wmfwv2",
+ .init = cs_dsp_callbacks_test_adsp2v2_32bit_init,
+ .test_cases = cs_dsp_callbacks_adsp2_wmfwv2_test_cases,
+};
+
+static struct kunit_suite cs_dsp_callbacks_test_adsp2v1_32bit = {
+ .name = "cs_dsp_callbacks_adsp2v1_32bit_wmfwv2",
+ .init = cs_dsp_callbacks_test_adsp2v1_32bit_init,
+ .test_cases = cs_dsp_callbacks_adsp2_wmfwv2_test_cases,
+};
+
+static struct kunit_suite cs_dsp_callbacks_test_adsp2_16bit = {
+ .name = "cs_dsp_callbacks_adsp2_16bit_wmfwv1",
+ .init = cs_dsp_callbacks_test_adsp2_16bit_init,
+ .test_cases = cs_dsp_callbacks_adsp2_wmfwv1_test_cases,
+};
+
+static struct kunit_suite cs_dsp_watchdog_test_adsp2v2_32bit = {
+ .name = "cs_dsp_watchdog_adsp2v2_32bit",
+ .init = cs_dsp_callbacks_test_adsp2v2_32bit_init,
+ .test_cases = cs_dsp_watchdog_adsp2v2_test_cases,
+};
+
+static struct kunit_suite cs_dsp_watchdog_test_halo_32bit = {
+ .name = "cs_dsp_watchdog_halo",
+ .init = cs_dsp_callbacks_test_halo_init,
+ .test_cases = cs_dsp_watchdog_halo_test_cases,
+};
+
+kunit_test_suites(&cs_dsp_callbacks_test_halo,
+ &cs_dsp_callbacks_test_adsp2v2_32bit,
+ &cs_dsp_callbacks_test_adsp2v1_32bit,
+ &cs_dsp_callbacks_test_adsp2_16bit,
+ &cs_dsp_watchdog_test_adsp2v2_32bit,
+ &cs_dsp_watchdog_test_halo_32bit);
diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_control_cache.c b/drivers/firmware/cirrus/test/cs_dsp_test_control_cache.c
new file mode 100644
index 000000000000..ebca3a4ab0f1
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_test_control_cache.c
@@ -0,0 +1,3281 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// KUnit tests for cs_dsp.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+
+#include <kunit/device.h>
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <linux/build_bug.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/list.h>
+#include <linux/random.h>
+#include <linux/regmap.h>
+
+KUNIT_DEFINE_ACTION_WRAPPER(_put_device_wrapper, put_device, struct device *);
+KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_stop_wrapper, cs_dsp_stop, struct cs_dsp *);
+KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_remove_wrapper, cs_dsp_remove, struct cs_dsp *);
+
+struct cs_dsp_test_local {
+ struct cs_dsp_mock_xm_header *xm_header;
+ struct cs_dsp_mock_wmfw_builder *wmfw_builder;
+ int wmfw_version;
+};
+
+struct cs_dsp_ctl_cache_test_param {
+ int mem_type;
+ int alg_id;
+ unsigned int offs_words;
+ unsigned int len_bytes;
+ u16 ctl_type;
+ u16 flags;
+};
+
+static const struct cs_dsp_mock_alg_def cs_dsp_ctl_cache_test_algs[] = {
+ {
+ .id = 0xfafa,
+ .ver = 0x100000,
+ .xm_base_words = 60,
+ .xm_size_words = 1000,
+ .ym_base_words = 0,
+ .ym_size_words = 1000,
+ .zm_base_words = 0,
+ .zm_size_words = 1000,
+ },
+ {
+ .id = 0xb,
+ .ver = 0x100001,
+ .xm_base_words = 1060,
+ .xm_size_words = 1000,
+ .ym_base_words = 1000,
+ .ym_size_words = 1000,
+ .zm_base_words = 1000,
+ .zm_size_words = 1000,
+ },
+ {
+ .id = 0x9f1234,
+ .ver = 0x100500,
+ .xm_base_words = 2060,
+ .xm_size_words = 32,
+ .ym_base_words = 2000,
+ .ym_size_words = 32,
+ .zm_base_words = 2000,
+ .zm_size_words = 32,
+ },
+ {
+ .id = 0xff00ff,
+ .ver = 0x300113,
+ .xm_base_words = 2100,
+ .xm_size_words = 32,
+ .ym_base_words = 2032,
+ .ym_size_words = 32,
+ .zm_base_words = 2032,
+ .zm_size_words = 32,
+ },
+};
+
+static const struct cs_dsp_mock_coeff_def mock_coeff_template = {
+ .shortname = "Dummy Coeff",
+ .type = WMFW_CTL_TYPE_BYTES,
+ .mem_type = WMFW_ADSP2_YM,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ .length_bytes = 4,
+};
+
+static const char * const cs_dsp_ctl_cache_test_fw_names[] = {
+ "misc", "mbc/vss", "haps",
+};
+
+static int _find_alg_entry(struct kunit *test, unsigned int alg_id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_cache_test_algs); ++i) {
+ if (cs_dsp_ctl_cache_test_algs[i].id == alg_id)
+ break;
+ }
+
+ KUNIT_ASSERT_LT(test, i, ARRAY_SIZE(cs_dsp_ctl_cache_test_algs));
+
+ return i;
+}
+
+static int _get_alg_mem_base_words(struct kunit *test, int alg_index, int mem_type)
+{
+ switch (mem_type) {
+ case WMFW_ADSP2_XM:
+ return cs_dsp_ctl_cache_test_algs[alg_index].xm_base_words;
+ case WMFW_ADSP2_YM:
+ return cs_dsp_ctl_cache_test_algs[alg_index].ym_base_words;
+ case WMFW_ADSP2_ZM:
+ return cs_dsp_ctl_cache_test_algs[alg_index].zm_base_words;
+ default:
+ KUNIT_FAIL(test, "Bug in test: illegal memory type %d\n", mem_type);
+ return 0;
+ }
+}
+
+static struct cs_dsp_mock_wmfw_builder *_create_dummy_wmfw(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_wmfw_builder *builder;
+
+ builder = cs_dsp_mock_wmfw_init(priv, local->wmfw_version);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, builder);
+
+ /* Init an XM header */
+ cs_dsp_mock_wmfw_add_data_block(builder,
+ WMFW_ADSP2_XM, 0,
+ local->xm_header->blob_data,
+ local->xm_header->blob_size_bytes);
+
+ return builder;
+}
+
+/*
+ * Memory allocated for control cache must be large enough.
+ * This creates multiple controls of different sizes so only works on
+ * wmfw V2 and later.
+ */
+static void cs_dsp_ctl_v2_cache_alloc(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ unsigned int reg, alg_base_words, alg_size_bytes;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ char ctl_name[4];
+ u32 *reg_vals;
+ int num_ctls;
+
+ /* Create some DSP data to initialize the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, 0, WMFW_ADSP2_YM);
+ alg_size_bytes = cs_dsp_ctl_cache_test_algs[0].ym_size_words *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ reg_vals = kunit_kzalloc(test, alg_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_YM);
+ reg += alg_base_words * cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, alg_size_bytes);
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[0].id,
+ "dummyalg", NULL);
+
+ /* Create controls of different sizes */
+ def.mem_type = WMFW_ADSP2_YM;
+ def.shortname = ctl_name;
+ num_ctls = 0;
+ for (def.length_bytes = 4; def.length_bytes <= 64; def.length_bytes += 4) {
+ snprintf(ctl_name, ARRAY_SIZE(ctl_name), "%x", def.length_bytes);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ num_ctls++;
+ def.offset_dsp_words += def.length_bytes / sizeof(u32);
+ }
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&dsp->ctl_list), num_ctls);
+
+ /* Check that the block allocated for the cache is large enough */
+ list_for_each_entry(ctl, &dsp->ctl_list, list)
+ KUNIT_EXPECT_GE(test, ksize(ctl->cache), ctl->len);
+}
+
+/*
+ * Content of registers backing a control should be read into the
+ * control cache when the firmware is downloaded.
+ */
+static void cs_dsp_ctl_cache_init(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /*
+ * The data should have been populated into the control cache
+ * so should be readable through the control.
+ */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * For a non-volatile write-only control the cache should be zero-filled
+ * when the firmware is downloaded.
+ */
+static void cs_dsp_ctl_cache_init_write_only(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *readback, *zeros;
+
+ zeros = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, zeros);
+
+ readback = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create a non-volatile write-only control */
+ def.flags = param->flags & ~WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /*
+ * The control cache should have been zero-filled so should be
+ * readable through the control.
+ */
+ get_random_bytes(readback, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, zeros, param->len_bytes);
+}
+
+/*
+ * Multiple different firmware with identical controls.
+ * This is legal because different firmwares could contain the same
+ * algorithm.
+ * The control cache should be initialized only with the data from
+ * the firmware containing it.
+ */
+static void cs_dsp_ctl_cache_init_multiple_fw_same_controls(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_mock_wmfw_builder *builder[3];
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *walkctl, *ctl[3];
+ struct firmware *wmfw;
+ u32 *reg_vals[3], *readback;
+ int i;
+
+ static_assert(ARRAY_SIZE(ctl) == ARRAY_SIZE(builder));
+ static_assert(ARRAY_SIZE(reg_vals) == ARRAY_SIZE(builder));
+ static_assert(ARRAY_SIZE(cs_dsp_ctl_cache_test_fw_names) >= ARRAY_SIZE(builder));
+
+ /* Create an identical control in each firmware but with different alg id */
+ for (i = 0; i < ARRAY_SIZE(builder); i++) {
+ builder[i] = _create_dummy_wmfw(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, builder[i]);
+
+ cs_dsp_mock_wmfw_start_alg_info_block(builder[i],
+ cs_dsp_ctl_cache_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(builder[i], &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(builder[i]);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(reg_vals); i++) {
+ reg_vals[i] = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals[i]);
+ }
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /*
+ * For each firmware create random content in the register backing
+ * the control. Then download, start, stop and power-down.
+ */
+ for (i = 0; i < ARRAY_SIZE(builder); i++) {
+ alg_base_words = _get_alg_mem_base_words(test, 0, def.mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, def.mem_type);
+ reg += (alg_base_words + def.offset_dsp_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+
+ get_random_bytes(reg_vals[i], def.length_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals[i], def.length_bytes);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder[i]);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(dsp, wmfw,
+ cs_dsp_ctl_cache_test_fw_names[i],
+ NULL, NULL,
+ cs_dsp_ctl_cache_test_fw_names[i]),
+ 0);
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+ cs_dsp_power_down(dsp);
+ }
+
+ /* There should now be 3 controls */
+ KUNIT_ASSERT_EQ(test, list_count_nodes(&dsp->ctl_list), 3);
+
+ /*
+ * There's no requirement for the control list to be in any
+ * particular order, so don't assume the order.
+ */
+ for (i = 0; i < ARRAY_SIZE(ctl); i++)
+ ctl[i] = NULL;
+
+ list_for_each_entry(walkctl, &dsp->ctl_list, list) {
+ if (strcmp(walkctl->fw_name, cs_dsp_ctl_cache_test_fw_names[0]) == 0)
+ ctl[0] = walkctl;
+ else if (strcmp(walkctl->fw_name, cs_dsp_ctl_cache_test_fw_names[1]) == 0)
+ ctl[1] = walkctl;
+ else if (strcmp(walkctl->fw_name, cs_dsp_ctl_cache_test_fw_names[2]) == 0)
+ ctl[2] = walkctl;
+ }
+
+ KUNIT_ASSERT_NOT_NULL(test, ctl[0]);
+ KUNIT_ASSERT_NOT_NULL(test, ctl[1]);
+ KUNIT_ASSERT_NOT_NULL(test, ctl[2]);
+
+ /*
+ * The data should have been populated into the control cache
+ * so should be readable through the control.
+ */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[0], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[0], def.length_bytes);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[1], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[1], def.length_bytes);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[2], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[2], def.length_bytes);
+}
+
+/*
+ * Multiple different firmware with controls identical except for alg id.
+ * This is legal because the controls are qualified by algorithm id.
+ * The control cache should be initialized only with the data from
+ * the firmware containing it.
+ */
+static void cs_dsp_ctl_cache_init_multiple_fwalgid_same_controls(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_mock_wmfw_builder *builder[3];
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *walkctl, *ctl[3];
+ struct firmware *wmfw;
+ u32 *reg_vals[3], *readback;
+ int i;
+
+ static_assert(ARRAY_SIZE(ctl) == ARRAY_SIZE(builder));
+ static_assert(ARRAY_SIZE(reg_vals) == ARRAY_SIZE(builder));
+ static_assert(ARRAY_SIZE(cs_dsp_ctl_cache_test_fw_names) >= ARRAY_SIZE(builder));
+
+ /* Create an identical control in each firmware but with different alg id */
+ for (i = 0; i < ARRAY_SIZE(builder); i++) {
+ builder[i] = _create_dummy_wmfw(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, builder[i]);
+
+ cs_dsp_mock_wmfw_start_alg_info_block(builder[i],
+ cs_dsp_ctl_cache_test_algs[i].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(builder[i], &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(builder[i]);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(reg_vals); i++) {
+ reg_vals[i] = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals[i]);
+ }
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /*
+ * For each firmware create random content in the register backing
+ * the control. Then download, start, stop and power-down.
+ */
+ for (i = 0; i < ARRAY_SIZE(builder); i++) {
+ alg_base_words = _get_alg_mem_base_words(test, i, def.mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, def.mem_type);
+ reg += (alg_base_words + def.offset_dsp_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+
+ get_random_bytes(reg_vals[i], def.length_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals[i], def.length_bytes);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder[i]);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(dsp, wmfw,
+ cs_dsp_ctl_cache_test_fw_names[i],
+ NULL, NULL,
+ cs_dsp_ctl_cache_test_fw_names[i]),
+ 0);
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+ cs_dsp_power_down(dsp);
+ }
+
+ /* There should now be 3 controls */
+ KUNIT_ASSERT_EQ(test, list_count_nodes(&dsp->ctl_list), 3);
+
+ /*
+ * There's no requirement for the control list to be in any
+ * particular order, so don't assume the order.
+ */
+ for (i = 0; i < ARRAY_SIZE(ctl); i++)
+ ctl[i] = NULL;
+
+ list_for_each_entry(walkctl, &dsp->ctl_list, list) {
+ if (cs_dsp_ctl_cache_test_algs[0].id == walkctl->alg_region.alg)
+ ctl[0] = walkctl;
+ else if (cs_dsp_ctl_cache_test_algs[1].id == walkctl->alg_region.alg)
+ ctl[1] = walkctl;
+ else if (cs_dsp_ctl_cache_test_algs[2].id == walkctl->alg_region.alg)
+ ctl[2] = walkctl;
+ }
+
+ KUNIT_ASSERT_NOT_NULL(test, ctl[0]);
+ KUNIT_ASSERT_NOT_NULL(test, ctl[1]);
+ KUNIT_ASSERT_NOT_NULL(test, ctl[2]);
+
+ /*
+ * The data should have been populated into the control cache
+ * so should be readable through the control.
+ */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[0], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[0], def.length_bytes);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[1], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[1], def.length_bytes);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[2], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[2], def.length_bytes);
+}
+
+/*
+ * Firmware with controls at the same position in different memories.
+ * The control cache should be initialized with content from the
+ * correct memory region.
+ */
+static void cs_dsp_ctl_cache_init_multiple_mems(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *walkctl, *ctl[3];
+ struct firmware *wmfw;
+ u32 *reg_vals[3], *readback;
+ int i;
+
+ static_assert(ARRAY_SIZE(ctl) == ARRAY_SIZE(reg_vals));
+
+ for (i = 0; i < ARRAY_SIZE(reg_vals); i++) {
+ reg_vals[i] = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals[i]);
+ get_random_bytes(reg_vals[i], def.length_bytes);
+ }
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[0].id,
+ "dummyalg", NULL);
+
+ /* Create controls identical except for memory region */
+ def.mem_type = WMFW_ADSP2_YM;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+
+ def.mem_type = WMFW_ADSP2_XM;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+
+ if (cs_dsp_mock_has_zm(priv)) {
+ def.mem_type = WMFW_ADSP2_ZM;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ }
+
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Create random content in the registers backing each control */
+ alg_base_words = _get_alg_mem_base_words(test, 0, WMFW_ADSP2_YM);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_YM);
+ reg += (alg_base_words + def.offset_dsp_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals[0], def.length_bytes);
+
+ alg_base_words = _get_alg_mem_base_words(test, 0, WMFW_ADSP2_XM);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_XM);
+ reg += (alg_base_words + def.offset_dsp_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals[1], def.length_bytes);
+
+ if (cs_dsp_mock_has_zm(priv)) {
+ alg_base_words = _get_alg_mem_base_words(test, 0, WMFW_ADSP2_ZM);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_ZM);
+ reg += (alg_base_words + def.offset_dsp_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals[2], def.length_bytes);
+ }
+
+ /* Download, run, stop and power-down the firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+ cs_dsp_power_down(dsp);
+
+ /* There should now be 2 or 3 controls */
+ KUNIT_ASSERT_EQ(test, list_count_nodes(&dsp->ctl_list),
+ cs_dsp_mock_has_zm(priv) ? 3 : 2);
+
+ /*
+ * There's no requirement for the control list to be in any
+ * particular order, so don't assume the order.
+ */
+ for (i = 0; i < ARRAY_SIZE(ctl); i++)
+ ctl[i] = NULL;
+
+ list_for_each_entry(walkctl, &dsp->ctl_list, list) {
+ if (walkctl->alg_region.type == WMFW_ADSP2_YM)
+ ctl[0] = walkctl;
+ if (walkctl->alg_region.type == WMFW_ADSP2_XM)
+ ctl[1] = walkctl;
+ if (walkctl->alg_region.type == WMFW_ADSP2_ZM)
+ ctl[2] = walkctl;
+ }
+
+
+ /*
+ * The data should have been populated into the control cache
+ * so should be readable through the control.
+ */
+ KUNIT_ASSERT_NOT_NULL(test, ctl[0]);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[0], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[0], def.length_bytes);
+
+ KUNIT_ASSERT_NOT_NULL(test, ctl[1]);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[1], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[1], def.length_bytes);
+
+ if (cs_dsp_mock_has_zm(priv)) {
+ KUNIT_ASSERT_NOT_NULL(test, ctl[2]);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[2], 0, readback,
+ def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[2], def.length_bytes);
+ }
+}
+
+/*
+ * Firmware with controls at the same position in different algorithms
+ * The control cache should be initialized with content from the
+ * memory of the algorithm it points to.
+ */
+static void cs_dsp_ctl_cache_init_multiple_algs(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *walkctl, *ctl[3];
+ struct firmware *wmfw;
+ u32 *reg_vals[3], *readback;
+ int i;
+
+ static_assert(ARRAY_SIZE(ctl) == ARRAY_SIZE(reg_vals));
+ static_assert(ARRAY_SIZE(reg_vals) <= ARRAY_SIZE(cs_dsp_ctl_cache_test_algs));
+
+ for (i = 0; i < ARRAY_SIZE(reg_vals); i++) {
+ reg_vals[i] = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals[i]);
+ get_random_bytes(reg_vals[i], def.length_bytes);
+ }
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create controls identical except for algorithm */
+ for (i = 0; i < ARRAY_SIZE(reg_vals); i++) {
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[i].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+ }
+
+ /* Create random content in the registers backing each control */
+ for (i = 0; i < ARRAY_SIZE(reg_vals); i++) {
+ alg_base_words = _get_alg_mem_base_words(test, i, def.mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, def.mem_type);
+ reg += (alg_base_words + def.offset_dsp_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals[i], def.length_bytes);
+ }
+
+ /* Download, run, stop and power-down the firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+ cs_dsp_power_down(dsp);
+
+ /* There should now be 3 controls */
+ KUNIT_ASSERT_EQ(test, list_count_nodes(&dsp->ctl_list), 3);
+
+ /*
+ * There's no requirement for the control list to be in any
+ * particular order, so don't assume the order.
+ */
+ for (i = 0; i < ARRAY_SIZE(ctl); i++)
+ ctl[i] = NULL;
+
+ list_for_each_entry(walkctl, &dsp->ctl_list, list) {
+ if (walkctl->alg_region.alg == cs_dsp_ctl_cache_test_algs[0].id)
+ ctl[0] = walkctl;
+ if (walkctl->alg_region.alg == cs_dsp_ctl_cache_test_algs[1].id)
+ ctl[1] = walkctl;
+ if (walkctl->alg_region.alg == cs_dsp_ctl_cache_test_algs[2].id)
+ ctl[2] = walkctl;
+ }
+
+ KUNIT_ASSERT_NOT_NULL(test, ctl[0]);
+ KUNIT_ASSERT_NOT_NULL(test, ctl[1]);
+ KUNIT_ASSERT_NOT_NULL(test, ctl[2]);
+
+ /*
+ * The data should have been populated into the control cache
+ * so should be readable through the control.
+ */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[0], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[0], def.length_bytes);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[1], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[1], def.length_bytes);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[2], 0, readback,
+ def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[2], def.length_bytes);
+}
+
+/*
+ * Firmware with controls in the same algorithm and memory but at
+ * different offsets.
+ * The control cache should be initialized with content from the
+ * correct offset.
+ * Only for wmfw format V2 and later. V1 only supports one control per
+ * memory per algorithm.
+ */
+static void cs_dsp_ctl_cache_init_multiple_offsets(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ unsigned int reg, alg_base_words, alg_base_reg;
+ struct cs_dsp_coeff_ctl *walkctl, *ctl[3];
+ struct firmware *wmfw;
+ u32 *reg_vals[3], *readback;
+ int i;
+
+ static_assert(ARRAY_SIZE(ctl) == ARRAY_SIZE(reg_vals));
+ static_assert(ARRAY_SIZE(reg_vals) <= ARRAY_SIZE(cs_dsp_ctl_cache_test_algs));
+
+ for (i = 0; i < ARRAY_SIZE(reg_vals); i++) {
+ reg_vals[i] = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals[i]);
+ get_random_bytes(reg_vals[i], def.length_bytes);
+ }
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[0].id,
+ "dummyalg", NULL);
+
+ /* Create controls identical except for offset */
+ def.offset_dsp_words = 0;
+ def.shortname = "CtlA";
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+
+ def.offset_dsp_words = 5;
+ def.shortname = "CtlB";
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+
+ def.offset_dsp_words = 8;
+ def.shortname = "CtlC";
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Create random content in the registers backing each control */
+ alg_base_words = _get_alg_mem_base_words(test, 0, def.mem_type);
+ alg_base_reg = cs_dsp_mock_base_addr_for_mem(priv, def.mem_type);
+ alg_base_reg += alg_base_words * cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+
+ reg = alg_base_reg;
+ regmap_raw_write(dsp->regmap, reg, reg_vals[0], def.length_bytes);
+ reg = alg_base_reg + (5 * cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv));
+ regmap_raw_write(dsp->regmap, reg, reg_vals[1], def.length_bytes);
+ reg = alg_base_reg + (8 * cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv));
+ regmap_raw_write(dsp->regmap, reg, reg_vals[2], def.length_bytes);
+
+ /* Download, run, stop and power-down the firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+ cs_dsp_power_down(dsp);
+
+ /* There should now be 3 controls */
+ KUNIT_ASSERT_EQ(test, list_count_nodes(&dsp->ctl_list), 3);
+
+ /*
+ * There's no requirement for the control list to be in any
+ * particular order, so don't assume the order.
+ */
+ for (i = 0; i < ARRAY_SIZE(ctl); i++)
+ ctl[i] = NULL;
+
+ list_for_each_entry(walkctl, &dsp->ctl_list, list) {
+ if (walkctl->offset == 0)
+ ctl[0] = walkctl;
+ if (walkctl->offset == 5)
+ ctl[1] = walkctl;
+ if (walkctl->offset == 8)
+ ctl[2] = walkctl;
+ }
+
+ KUNIT_ASSERT_NOT_NULL(test, ctl[0]);
+ KUNIT_ASSERT_NOT_NULL(test, ctl[1]);
+ KUNIT_ASSERT_NOT_NULL(test, ctl[2]);
+
+ /*
+ * The data should have been populated into the control cache
+ * so should be readable through the control.
+ */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[0], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[0], def.length_bytes);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[1], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[1], def.length_bytes);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[2], 0, readback,
+ def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[2], def.length_bytes);
+}
+
+/*
+ * Read from a cached control before the firmware is started.
+ * Should return the data in the cache.
+ */
+static void cs_dsp_ctl_cache_read_not_started(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP but don't start firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the data from the control cache */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Read from a cached control after the firmware has been stopped.
+ * Should return the data in the cache.
+ */
+static void cs_dsp_ctl_cache_read_stopped(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start and stop the firmware */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the data from the control cache */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Read from a cached control after the DSP has been powered-up and
+ * then powered-down without running.
+ * Should return the data in the cache.
+ */
+static void cs_dsp_ctl_cache_read_powered_down(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP then power-down */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(dsp);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the data from the control cache */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Read from a cached control after the firmware has been run and
+ * stopped, then the DSP has been powered-down.
+ * Should return the data in the cache.
+ */
+static void cs_dsp_ctl_cache_read_stopped_powered_down(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start and stop the firmware then power-down */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+ cs_dsp_power_down(dsp);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the data from the control cache */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Read from a cached control when a different firmware is currently
+ * loaded into the DSP.
+ * Should return the data in the cache.
+ */
+static void cs_dsp_ctl_cache_read_not_current_loaded_fw(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test);
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Power-down DSP then power-up with a different firmware */
+ cs_dsp_power_down(dsp);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the data from the control cache */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Read from a cached control when a different firmware is currently
+ * running.
+ * Should return the data in the cache.
+ */
+static void cs_dsp_ctl_cache_read_not_current_running_fw(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test);
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP then power-down */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(dsp);
+
+ /* Power-up with a different firmware and run it */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0);
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the data from the control cache */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Read from a cached control with non-zero flags while the firmware is
+ * running.
+ * Should return the data in the cache, not from the registers.
+ */
+static void cs_dsp_ctl_cache_read_running(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *init_reg_vals, *new_reg_vals, *readback;
+
+ init_reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, init_reg_vals);
+
+ new_reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, new_reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create data in the registers backing the control */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(init_reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, init_reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start the firmware running */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /*
+ * Change the values in the registers backing the control then drop
+ * them from the regmap cache. This allows checking that the control
+ * read is returning values from the control cache and not accessing
+ * the registers.
+ */
+ KUNIT_ASSERT_EQ(test,
+ regmap_raw_write(dsp->regmap, reg, new_reg_vals, param->len_bytes),
+ 0);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+
+ /* Control should readback the origin data from its cache */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, init_reg_vals, param->len_bytes);
+
+ /* Stop and power-down the DSP */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ cs_dsp_power_down(dsp);
+
+ /* Control should readback from the cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, init_reg_vals, param->len_bytes);
+}
+
+/*
+ * Read from a cached control with flags == 0 while the firmware is
+ * running.
+ * Should behave as volatile and read from the registers.
+ * (This is for backwards compatibility with old firmware versions)
+ */
+static void cs_dsp_ctl_cache_read_running_zero_flags(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *init_reg_vals, *new_reg_vals, *readback;
+
+ init_reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, init_reg_vals);
+
+ new_reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, new_reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Zero-fill the registers backing the control */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, init_reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = 0;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start the firmware running */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Change the values in the registers backing the control */
+ get_random_bytes(new_reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, new_reg_vals, param->len_bytes);
+
+ /* Control should readback the new data from the registers */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, new_reg_vals, param->len_bytes);
+
+ /* Stop and power-down the DSP */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ cs_dsp_power_down(dsp);
+
+ /* Change the values in the registers backing the control */
+ regmap_raw_write(dsp->regmap, reg, init_reg_vals, param->len_bytes);
+
+ /* Control should readback from the cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, new_reg_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control while the firmware is running.
+ * This should be a writethrough operation, writing to the cache and
+ * the registers.
+ */
+static void cs_dsp_ctl_cache_writethrough(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ memset(reg_vals, 0, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Write new data to the control, it should be written to the registers */
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+ KUNIT_ASSERT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write unchanged data to a cached control while the firmware is running.
+ * The control write should return 0 to indicate that the content
+ * didn't change.
+ */
+static void cs_dsp_ctl_cache_writethrough_unchanged(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /*
+ * If the control is write-only the cache will have been zero-initialized
+ * so the first write will always indicate a change.
+ */
+ if (def.flags && !(def.flags & WMFW_CTL_FLAG_READABLE)) {
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals,
+ param->len_bytes),
+ 1);
+ }
+
+ /*
+ * Write the same data to the control, cs_dsp_coeff_lock_and_write_ctrl()
+ * should return 0 to indicate the content didn't change.
+ */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+ KUNIT_ASSERT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write unchanged data to a cached control while the firmware is not started.
+ * The control write should return 0 to indicate that the cache content
+ * didn't change.
+ */
+static void cs_dsp_ctl_cache_write_unchanged_not_started(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /*
+ * If the control is write-only the cache will have been zero-initialized
+ * so the first write will always indicate a change.
+ */
+ if (def.flags && !(def.flags & WMFW_CTL_FLAG_READABLE)) {
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals,
+ param->len_bytes),
+ 1);
+ }
+
+ /*
+ * Write the same data to the control, cs_dsp_coeff_lock_and_write_ctrl()
+ * should return 0 to indicate the content didn't change.
+ */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+ KUNIT_ASSERT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control while the firmware is loaded but not
+ * started.
+ * This should write to the cache only.
+ */
+static void cs_dsp_ctl_cache_write_not_started(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP but don't start firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Write new data to the control, it should not be written to the registers */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+
+ /* Registers should not have been written so regmap cache should still be clean */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control after the firmware has been loaded,
+ * started and stopped.
+ * This should write to the cache only.
+ */
+static void cs_dsp_ctl_cache_write_stopped(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start and stop the firmware */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Write new data to the control, it should not be written to the registers */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+
+ /* Registers should not have been written so regmap cache should still be clean */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control after the firmware has been loaded,
+ * then the DSP powered-down.
+ * This should write to the cache only.
+ */
+static void cs_dsp_ctl_cache_write_powered_down(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP then power-down */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(dsp);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Write new data to the control, it should not be written to the registers */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+
+ /* Registers should not have been written so regmap cache should still be clean */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control after the firmware has been loaded,
+ * started, stopped, and then the DSP powered-down.
+ * This should write to the cache only.
+ */
+static void cs_dsp_ctl_cache_write_stopped_powered_down(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start and stop the firmware then power-down */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+ cs_dsp_power_down(dsp);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Write new data to the control, it should not be written to the registers */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+
+ /* Registers should not have been written so regmap cache should still be clean */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control that is not in the currently loaded firmware.
+ * This should write to the cache only.
+ */
+static void cs_dsp_ctl_cache_write_not_current_loaded_fw(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test);
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Get the control */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Power-down DSP then power-up with a different firmware */
+ cs_dsp_power_down(dsp);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0);
+
+ /* Control from unloaded firmware should be disabled */
+ KUNIT_EXPECT_FALSE(test, ctl->enabled);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /*
+ * It should be possible to write new data to the control from
+ * the first firmware. But this should not be written to the
+ * registers.
+ */
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+
+ /* Registers should not have been written so regmap cache should still be clean */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control that is not in the currently running firmware.
+ * This should write to the cache only.
+ */
+static void cs_dsp_ctl_cache_write_not_current_running_fw(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test);
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP then power-down */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(dsp);
+
+ /* Get the control */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Power-up with a different firmware and run it */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0);
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Control from unloaded firmware should be disabled */
+ KUNIT_EXPECT_FALSE(test, ctl->enabled);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /*
+ * It should be possible to write new data to the control from
+ * the first firmware. But this should not be written to the
+ * registers.
+ */
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+
+ /* Registers should not have been written so regmap cache should still be clean */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control before running the firmware.
+ * The value written to the cache should be synced out to the registers
+ * backing the control when the firmware is run.
+ */
+static void cs_dsp_ctl_cache_sync_write_before_run(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP but don't start firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Write new data to the control, it should not be written to the registers */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMNEQ(test, readback, reg_vals, param->len_bytes);
+
+ /* Start the firmware and the cached data should be written to registers */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control while the firmware is running.
+ * The value written should be synced out to the registers
+ * backing the control when the firmware is next run.
+ */
+static void cs_dsp_ctl_cache_sync_write_while_running(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *init_vals, *ctl_vals, *readback;
+
+ init_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, init_vals);
+
+ ctl_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctl_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Zero-fill the registers backing the control */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, init_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP and start firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Write new data to the control */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ get_random_bytes(ctl_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, ctl_vals, param->len_bytes),
+ 1);
+
+ /* Stop firmware and zero the registers backing the control */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ regmap_raw_write(dsp->regmap, reg, init_vals, param->len_bytes);
+ KUNIT_ASSERT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, init_vals, param->len_bytes);
+
+ /* Start the firmware and the cached data should be written to registers */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control after stopping the firmware.
+ * The value written to the cache should be synced out to the registers
+ * backing the control when the firmware is next run.
+ */
+static void cs_dsp_ctl_cache_sync_write_after_stop(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP but don't start firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start and stop the firmware */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+
+ /* Write new data to the control, it should not be written to the registers */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMNEQ(test, readback, reg_vals, param->len_bytes);
+
+ /* Start the firmware and the cached data should be written to registers */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control that is not in the currently loaded firmware.
+ * The value written to the cache should be synced out to the registers
+ * backing the control the next time the firmware containing the
+ * control is run.
+ */
+static void cs_dsp_ctl_cache_sync_write_not_current_fw(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP but don't start firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Get the control */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Power-down DSP then power-up with a different firmware */
+ cs_dsp_power_down(dsp);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0);
+
+ /* Write new data to the control, it should not be written to the registers */
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMNEQ(test, readback, reg_vals, param->len_bytes);
+
+ /* Power-down DSP then power-up with the original firmware */
+ cs_dsp_power_down(dsp);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start the firmware and the cached data should be written to registers */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * The value in the control cache should be synced out to the registers
+ * backing the control every time the firmware containing the control
+ * is run.
+ */
+static void cs_dsp_ctl_cache_sync_reapply_every_run(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *init_vals, *readback, *ctl_vals;
+
+ init_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, init_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ ctl_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctl_vals);
+
+ /* Zero-fill the registers backing the control */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, init_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP but don't start firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Write new data to the control */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ get_random_bytes(ctl_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, ctl_vals, param->len_bytes),
+ 1);
+
+ /* Start the firmware and the cached data should be written to registers */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+
+ /* Stop the firmware and reset the registers */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ regmap_raw_write(dsp->regmap, reg, init_vals, param->len_bytes);
+
+ /* Start the firmware again and the cached data should be written to registers */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+}
+
+/*
+ * The value in the control cache should be retained if the same
+ * firmware is downloaded again. It should be synced out to the
+ * registers backing the control after the firmware containing the
+ * control is downloaded again and run.
+ */
+static void cs_dsp_ctl_cache_sync_reapply_after_fw_reload(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *init_vals, *readback, *ctl_vals;
+
+ init_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, init_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ ctl_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctl_vals);
+
+ /* Zero-fill the registers backing the control */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, init_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP but don't start firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Write new data to the control */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ get_random_bytes(ctl_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, ctl_vals, param->len_bytes),
+ 1);
+
+ /* Start the firmware and the cached data should be written to registers */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+
+ /* Stop the firmware and power-down the DSP */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ cs_dsp_power_down(dsp);
+
+ /* Reset the registers */
+ regmap_raw_write(dsp->regmap, reg, init_vals, param->len_bytes);
+
+ /* Download the firmware again, the cache content should not change */
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start the firmware and the cached data should be written to registers */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+}
+
+/*
+ * The value in the control cache should be retained after a different
+ * firmware is downloaded.
+ * When the firmware containing the control is downloaded and run
+ * the value in the control cache should be synced out to the registers
+ * backing the control.
+ */
+static void cs_dsp_ctl_cache_sync_reapply_after_fw_swap(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *init_vals, *readback, *ctl_vals;
+
+ init_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, init_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ ctl_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctl_vals);
+
+ /* Zero-fill the registers backing the control */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, init_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP but don't start firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Write new data to the control */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ get_random_bytes(ctl_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, ctl_vals, param->len_bytes),
+ 1);
+
+ /* Start the firmware and the cached data should be written to registers */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+
+ /* Stop the firmware and power-down the DSP */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ cs_dsp_power_down(dsp);
+
+ /* Reset the registers */
+ regmap_raw_write(dsp->regmap, reg, init_vals, param->len_bytes);
+
+ /* Download and run a different firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0);
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_power_down(dsp);
+
+ /* Reset the registers */
+ regmap_raw_write(dsp->regmap, reg, init_vals, param->len_bytes);
+
+ /* Download the original firmware again */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ KUNIT_EXPECT_TRUE(test, ctl->set);
+
+ /* Start the firmware and the cached data should be written to registers */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+}
+
+static int cs_dsp_ctl_cache_test_common_init(struct kunit *test, struct cs_dsp *dsp,
+ int wmfw_version)
+{
+ struct cs_dsp_test *priv;
+ struct cs_dsp_test_local *local;
+ struct device *test_dev;
+ int ret;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ local = kunit_kzalloc(test, sizeof(struct cs_dsp_test_local), GFP_KERNEL);
+ if (!local)
+ return -ENOMEM;
+
+ priv->test = test;
+ priv->dsp = dsp;
+ test->priv = priv;
+ priv->local = local;
+ priv->local->wmfw_version = wmfw_version;
+
+ /* Create dummy struct device */
+ test_dev = kunit_device_register(test, "cs_dsp_test_drv");
+ if (IS_ERR(test_dev))
+ return PTR_ERR(test_dev);
+
+ dsp->dev = get_device(test_dev);
+ if (!dsp->dev)
+ return -ENODEV;
+
+ ret = kunit_add_action_or_reset(test, _put_device_wrapper, dsp->dev);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dsp->dev, priv);
+
+ /* Allocate regmap */
+ ret = cs_dsp_mock_regmap_init(priv);
+ if (ret)
+ return ret;
+
+ /*
+ * There must always be a XM header with at least 1 algorithm, so create
+ * a dummy one that tests can use and extract it to a data blob.
+ */
+ local->xm_header = cs_dsp_create_mock_xm_header(priv,
+ cs_dsp_ctl_cache_test_algs,
+ ARRAY_SIZE(cs_dsp_ctl_cache_test_algs));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->xm_header);
+
+ /* Create wmfw builder */
+ local->wmfw_builder = _create_dummy_wmfw(test);
+
+ /* Init cs_dsp */
+ dsp->client_ops = kunit_kzalloc(test, sizeof(*dsp->client_ops), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dsp->client_ops);
+
+ switch (dsp->type) {
+ case WMFW_ADSP2:
+ ret = cs_dsp_adsp2_init(dsp);
+ break;
+ case WMFW_HALO:
+ ret = cs_dsp_halo_init(dsp);
+ break;
+ default:
+ KUNIT_FAIL(test, "Untested DSP type %d\n", dsp->type);
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ /* Automatically call cs_dsp_remove() when test case ends */
+ return kunit_add_action_or_reset(priv->test, _cs_dsp_remove_wrapper, dsp);
+}
+
+static int cs_dsp_ctl_cache_test_halo_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_HALO;
+ dsp->mem = cs_dsp_mock_halo_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_halo_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_halo_core_base;
+ dsp->base_sysinfo = cs_dsp_mock_halo_sysinfo_base;
+
+ return cs_dsp_ctl_cache_test_common_init(test, dsp, 3);
+}
+
+static int cs_dsp_ctl_cache_test_adsp2_32bit_init(struct kunit *test, int wmfw_ver)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 1;
+ dsp->mem = cs_dsp_mock_adsp2_32bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_32bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_32bit_sysbase;
+
+ return cs_dsp_ctl_cache_test_common_init(test, dsp, wmfw_ver);
+}
+
+static int cs_dsp_ctl_cache_test_adsp2_32bit_wmfw1_init(struct kunit *test)
+{
+ return cs_dsp_ctl_cache_test_adsp2_32bit_init(test, 1);
+}
+
+static int cs_dsp_ctl_cache_test_adsp2_32bit_wmfw2_init(struct kunit *test)
+{
+ return cs_dsp_ctl_cache_test_adsp2_32bit_init(test, 2);
+}
+
+static int cs_dsp_ctl_cache_test_adsp2_16bit_init(struct kunit *test, int wmfw_ver)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 0;
+ dsp->mem = cs_dsp_mock_adsp2_16bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_16bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_16bit_sysbase;
+
+ return cs_dsp_ctl_cache_test_common_init(test, dsp, wmfw_ver);
+}
+
+static int cs_dsp_ctl_cache_test_adsp2_16bit_wmfw1_init(struct kunit *test)
+{
+ return cs_dsp_ctl_cache_test_adsp2_16bit_init(test, 1);
+}
+
+static int cs_dsp_ctl_cache_test_adsp2_16bit_wmfw2_init(struct kunit *test)
+{
+ return cs_dsp_ctl_cache_test_adsp2_16bit_init(test, 2);
+}
+
+static void cs_dsp_ctl_all_param_desc(const struct cs_dsp_ctl_cache_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "alg:%#x %s@%u len:%u flags:%#x",
+ param->alg_id, cs_dsp_mem_region_name(param->mem_type),
+ param->offs_words, param->len_bytes, param->flags);
+}
+
+/* All parameters populated, with various lengths */
+static const struct cs_dsp_ctl_cache_test_param all_pop_varying_len_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 8 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 12 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 16 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 48 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 100 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 512 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 1000 },
+};
+KUNIT_ARRAY_PARAM(all_pop_varying_len, all_pop_varying_len_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/* All parameters populated, with various offsets */
+static const struct cs_dsp_ctl_cache_test_param all_pop_varying_offset_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 0, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 2, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 3, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 8, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 10, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 128, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 180, .len_bytes = 4 },
+};
+KUNIT_ARRAY_PARAM(all_pop_varying_offset, all_pop_varying_offset_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/* All parameters populated, with various X and Y memory regions */
+static const struct cs_dsp_ctl_cache_test_param all_pop_varying_xy_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_XM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+};
+KUNIT_ARRAY_PARAM(all_pop_varying_xy, all_pop_varying_xy_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/* All parameters populated, using ZM */
+static const struct cs_dsp_ctl_cache_test_param all_pop_z_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_ZM, .offs_words = 1, .len_bytes = 4 },
+};
+KUNIT_ARRAY_PARAM(all_pop_z, all_pop_z_cases, cs_dsp_ctl_all_param_desc);
+
+/* All parameters populated, with various algorithm ids */
+static const struct cs_dsp_ctl_cache_test_param all_pop_varying_alg_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0xb, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0x9f1234, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0xff00ff, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+};
+KUNIT_ARRAY_PARAM(all_pop_varying_alg, all_pop_varying_alg_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * non-volatile readable control
+ */
+static const struct cs_dsp_ctl_cache_test_param all_pop_nonvol_readable_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = 0
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_nonvol_readable_flags,
+ all_pop_nonvol_readable_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * non-volatile readable control, except flags==0
+ */
+static const struct cs_dsp_ctl_cache_test_param all_pop_nonvol_readable_nonzero_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_nonvol_readable_nonzero_flags,
+ all_pop_nonvol_readable_nonzero_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * non-volatile writeable control
+ */
+static const struct cs_dsp_ctl_cache_test_param all_pop_nonvol_writeable_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = 0
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_nonvol_writeable_flags,
+ all_pop_nonvol_writeable_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * non-volatile write-only control of varying lengths
+ */
+static const struct cs_dsp_ctl_cache_test_param all_pop_nonvol_write_only_length_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 512,
+ .flags = WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 512,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_nonvol_write_only_length,
+ all_pop_nonvol_write_only_length_cases,
+ cs_dsp_ctl_all_param_desc);
+
+static struct kunit_case cs_dsp_ctl_cache_test_cases_v1[] = {
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_z_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_nonvol_readable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init_write_only,
+ all_pop_nonvol_write_only_length_gen_params),
+
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_fw_same_controls),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_fwalgid_same_controls),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_mems),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_algs),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_not_started,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_stopped,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_powered_down,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_stopped_powered_down,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_not_current_loaded_fw,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_not_current_running_fw,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_running,
+ all_pop_nonvol_readable_nonzero_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_running_zero_flags,
+ all_pop_varying_len_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_z_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_z_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_unchanged_not_started,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_not_started,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_stopped,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_powered_down,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_stopped_powered_down,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_not_current_loaded_fw,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_not_current_running_fw,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_before_run,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_while_running,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_after_stop,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_not_current_fw,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_reapply_every_run,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_reapply_after_fw_reload,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_reapply_after_fw_swap,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_ctl_cache_test_cases_v2[] = {
+ KUNIT_CASE(cs_dsp_ctl_v2_cache_alloc),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_z_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_nonvol_readable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init_write_only,
+ all_pop_nonvol_write_only_length_gen_params),
+
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_fw_same_controls),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_fwalgid_same_controls),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_mems),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_algs),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_offsets),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_not_started,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_stopped,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_powered_down,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_stopped_powered_down,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_not_current_loaded_fw,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_not_current_running_fw,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_running,
+ all_pop_nonvol_readable_nonzero_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_running_zero_flags,
+ all_pop_varying_len_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_z_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_z_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_unchanged_not_started,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_not_started,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_stopped,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_powered_down,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_stopped_powered_down,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_not_current_loaded_fw,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_not_current_running_fw,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_before_run,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_while_running,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_after_stop,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_not_current_fw,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_reapply_every_run,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_reapply_after_fw_reload,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_reapply_after_fw_swap,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_ctl_cache_test_cases_v3[] = {
+ KUNIT_CASE(cs_dsp_ctl_v2_cache_alloc),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_nonvol_readable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init_write_only,
+ all_pop_nonvol_write_only_length_gen_params),
+
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_fw_same_controls),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_fwalgid_same_controls),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_mems),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_algs),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_offsets),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_not_started,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_stopped,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_powered_down,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_stopped_powered_down,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_not_current_loaded_fw,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_not_current_running_fw,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_running,
+ all_pop_nonvol_readable_nonzero_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_unchanged_not_started,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_not_started,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_stopped,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_powered_down,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_stopped_powered_down,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_not_current_loaded_fw,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_not_current_running_fw,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_before_run,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_while_running,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_after_stop,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_not_current_fw,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_reapply_every_run,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_reapply_after_fw_reload,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_reapply_after_fw_swap,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_suite cs_dsp_ctl_cache_test_halo = {
+ .name = "cs_dsp_ctl_cache_wmfwV3_halo",
+ .init = cs_dsp_ctl_cache_test_halo_init,
+ .test_cases = cs_dsp_ctl_cache_test_cases_v3,
+};
+
+static struct kunit_suite cs_dsp_ctl_cache_test_adsp2_32bit_wmfw1 = {
+ .name = "cs_dsp_ctl_cache_wmfwV1_adsp2_32bit",
+ .init = cs_dsp_ctl_cache_test_adsp2_32bit_wmfw1_init,
+ .test_cases = cs_dsp_ctl_cache_test_cases_v1,
+};
+
+static struct kunit_suite cs_dsp_ctl_cache_test_adsp2_32bit_wmfw2 = {
+ .name = "cs_dsp_ctl_cache_wmfwV2_adsp2_32bit",
+ .init = cs_dsp_ctl_cache_test_adsp2_32bit_wmfw2_init,
+ .test_cases = cs_dsp_ctl_cache_test_cases_v2,
+};
+
+static struct kunit_suite cs_dsp_ctl_cache_test_adsp2_16bit_wmfw1 = {
+ .name = "cs_dsp_ctl_cache_wmfwV1_adsp2_16bit",
+ .init = cs_dsp_ctl_cache_test_adsp2_16bit_wmfw1_init,
+ .test_cases = cs_dsp_ctl_cache_test_cases_v1,
+};
+
+static struct kunit_suite cs_dsp_ctl_cache_test_adsp2_16bit_wmfw2 = {
+ .name = "cs_dsp_ctl_cache_wmfwV2_adsp2_16bit",
+ .init = cs_dsp_ctl_cache_test_adsp2_16bit_wmfw2_init,
+ .test_cases = cs_dsp_ctl_cache_test_cases_v2,
+};
+
+kunit_test_suites(&cs_dsp_ctl_cache_test_halo,
+ &cs_dsp_ctl_cache_test_adsp2_32bit_wmfw1,
+ &cs_dsp_ctl_cache_test_adsp2_32bit_wmfw2,
+ &cs_dsp_ctl_cache_test_adsp2_16bit_wmfw1,
+ &cs_dsp_ctl_cache_test_adsp2_16bit_wmfw2);
diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_control_parse.c b/drivers/firmware/cirrus/test/cs_dsp_test_control_parse.c
new file mode 100644
index 000000000000..942ba1af5e7c
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_test_control_parse.c
@@ -0,0 +1,1838 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// KUnit tests for cs_dsp.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+
+#include <kunit/device.h>
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <linux/build_bug.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+
+KUNIT_DEFINE_ACTION_WRAPPER(_put_device_wrapper, put_device, struct device *);
+KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_remove_wrapper, cs_dsp_remove, struct cs_dsp *);
+
+struct cs_dsp_test_local {
+ struct cs_dsp_mock_xm_header *xm_header;
+ struct cs_dsp_mock_wmfw_builder *wmfw_builder;
+ int wmfw_version;
+};
+
+struct cs_dsp_ctl_parse_test_param {
+ int mem_type;
+ int alg_id;
+ unsigned int offset;
+ unsigned int length;
+ u16 ctl_type;
+ u16 flags;
+};
+
+static const struct cs_dsp_mock_alg_def cs_dsp_ctl_parse_test_algs[] = {
+ {
+ .id = 0xfafa,
+ .ver = 0x100000,
+ .xm_size_words = 164,
+ .ym_size_words = 164,
+ .zm_size_words = 164,
+ },
+ {
+ .id = 0xb,
+ .ver = 0x100001,
+ .xm_size_words = 8,
+ .ym_size_words = 8,
+ .zm_size_words = 8,
+ },
+ {
+ .id = 0x9f1234,
+ .ver = 0x100500,
+ .xm_size_words = 16,
+ .ym_size_words = 16,
+ .zm_size_words = 16,
+ },
+ {
+ .id = 0xff00ff,
+ .ver = 0x300113,
+ .xm_size_words = 16,
+ .ym_size_words = 16,
+ .zm_size_words = 16,
+ },
+};
+
+static const struct cs_dsp_mock_coeff_def mock_coeff_template = {
+ .shortname = "Dummy Coeff",
+ .type = WMFW_CTL_TYPE_BYTES,
+ .mem_type = WMFW_ADSP2_YM,
+ .flags = WMFW_CTL_FLAG_VOLATILE,
+ .length_bytes = 4,
+};
+
+static char *cs_dsp_ctl_alloc_test_string(struct kunit *test, char c, size_t len)
+{
+ char *str;
+
+ str = kunit_kmalloc(test, len + 1, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, str);
+ memset(str, c, len);
+ str[len] = '\0';
+
+ return str;
+}
+
+/* Algorithm info block without controls should load */
+static void cs_dsp_ctl_parse_no_coeffs(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+}
+
+/*
+ * V1 controls do not have names, the name field in the coefficient entry
+ * should be ignored.
+ */
+static void cs_dsp_ctl_parse_v1_name(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ def.fullname = "Dummy";
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, 0);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * V1 controls do not have names, the name field in the coefficient entry
+ * should be ignored. Test with a zero-length name string.
+ */
+static void cs_dsp_ctl_parse_empty_v1_name(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ def.fullname = "\0";
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, 0);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * V1 controls do not have names, the name field in the coefficient entry
+ * should be ignored. Test with a maximum length name string.
+ */
+static void cs_dsp_ctl_parse_max_v1_name(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ def.fullname = cs_dsp_ctl_alloc_test_string(test, 'A', 255);
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, 0);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/* Short name from coeff descriptor should be used as control name. */
+static void cs_dsp_ctl_parse_short_name(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, strlen(def.shortname));
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, def.shortname, ctl->subname_len);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * Short name from coeff descriptor should be used as control name.
+ * Test with a short name that is a single character.
+ */
+static void cs_dsp_ctl_parse_min_short_name(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ def.shortname = "Q";
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, 1);
+ KUNIT_EXPECT_EQ(test, ctl->subname[0], 'Q');
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * Short name from coeff descriptor should be used as control name.
+ * Test with a maximum length name.
+ */
+static void cs_dsp_ctl_parse_max_short_name(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ def.shortname = cs_dsp_ctl_alloc_test_string(test, 'A', 255);
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, 255);
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, def.shortname, ctl->subname_len);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * Full name from coeff descriptor should be ignored. It is a variable
+ * length field so affects the position of subsequent fields.
+ * Test with a 1-character full name.
+ */
+static void cs_dsp_ctl_parse_with_min_fullname(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ def.fullname = "Q";
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, strlen(def.shortname));
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, def.shortname, ctl->subname_len);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * Full name from coeff descriptor should be ignored. It is a variable
+ * length field so affects the position of subsequent fields.
+ * Test with a maximum length full name.
+ */
+static void cs_dsp_ctl_parse_with_max_fullname(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ def.fullname = cs_dsp_ctl_alloc_test_string(test, 'A', 255);
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, strlen(def.shortname));
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, def.shortname, ctl->subname_len);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * Description from coeff descriptor should be ignored. It is a variable
+ * length field so affects the position of subsequent fields.
+ * Test with a 1-character description
+ */
+static void cs_dsp_ctl_parse_with_min_description(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ def.description = "Q";
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, strlen(def.shortname));
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, def.shortname, ctl->subname_len);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * Description from coeff descriptor should be ignored. It is a variable
+ * length field so affects the position of subsequent fields.
+ * Test with a maximum length description
+ */
+static void cs_dsp_ctl_parse_with_max_description(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ def.description = cs_dsp_ctl_alloc_test_string(test, 'A', 65535);
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, strlen(def.shortname));
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, def.shortname, ctl->subname_len);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * Full name and description from coeff descriptor are variable length
+ * fields so affects the position of subsequent fields.
+ * Test with a maximum length full name and description
+ */
+static void cs_dsp_ctl_parse_with_max_fullname_and_description(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ def.fullname = cs_dsp_ctl_alloc_test_string(test, 'A', 255);
+ def.description = cs_dsp_ctl_alloc_test_string(test, 'A', 65535);
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, strlen(def.shortname));
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, def.shortname, ctl->subname_len);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+static const char * const cs_dsp_ctl_alignment_test_names[] = {
+ "1", "12", "123", "1234", "12345", "123456", "1234567",
+ "12345678", "123456789", "123456789A", "123456789AB",
+ "123456789ABC", "123456789ABCD", "123456789ABCDE",
+ "123456789ABCDEF",
+};
+
+/*
+ * Variable-length string fields are padded to a multiple of 4-bytes.
+ * Test this with various lengths of short name.
+ */
+static void cs_dsp_ctl_shortname_alignment(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ int i;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_alignment_test_names); i++) {
+ def.shortname = cs_dsp_ctl_alignment_test_names[i];
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ }
+
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_alignment_test_names); i++) {
+ mutex_lock(&priv->dsp->pwr_lock);
+ ctl = cs_dsp_get_ctl(priv->dsp, cs_dsp_ctl_alignment_test_names[i],
+ def.mem_type, cs_dsp_ctl_parse_test_algs[0].id);
+ mutex_unlock(&priv->dsp->pwr_lock);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, i + 1);
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, cs_dsp_ctl_alignment_test_names[i],
+ ctl->subname_len);
+ /* Test fields that are parsed after the variable-length fields */
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+ }
+}
+
+/*
+ * Variable-length string fields are padded to a multiple of 4-bytes.
+ * Test this with various lengths of full name.
+ */
+static void cs_dsp_ctl_fullname_alignment(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ char ctl_name[4];
+ struct firmware *wmfw;
+ int i;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_alignment_test_names); i++) {
+ /*
+ * Create a unique control name of 3 characters so that
+ * the shortname field is exactly 4 bytes long including
+ * the length byte.
+ */
+ snprintf(ctl_name, sizeof(ctl_name), "%03d", i);
+ KUNIT_ASSERT_EQ(test, strlen(ctl_name), 3);
+ def.shortname = ctl_name;
+
+ def.fullname = cs_dsp_ctl_alignment_test_names[i];
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ }
+
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_alignment_test_names); i++) {
+ snprintf(ctl_name, sizeof(ctl_name), "%03d", i);
+
+ mutex_lock(&priv->dsp->pwr_lock);
+ ctl = cs_dsp_get_ctl(priv->dsp, ctl_name, def.mem_type,
+ cs_dsp_ctl_parse_test_algs[0].id);
+ mutex_unlock(&priv->dsp->pwr_lock);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, 3);
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, ctl_name, ctl->subname_len);
+ /* Test fields that are parsed after the variable-length fields */
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+ }
+}
+
+/*
+ * Variable-length string fields are padded to a multiple of 4-bytes.
+ * Test this with various lengths of description.
+ */
+static void cs_dsp_ctl_description_alignment(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ char ctl_name[4];
+ struct firmware *wmfw;
+ int i;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_alignment_test_names); i++) {
+ /*
+ * Create a unique control name of 3 characters so that
+ * the shortname field is exactly 4 bytes long including
+ * the length byte.
+ */
+ snprintf(ctl_name, sizeof(ctl_name), "%03d", i);
+ KUNIT_ASSERT_EQ(test, strlen(ctl_name), 3);
+ def.shortname = ctl_name;
+
+ def.description = cs_dsp_ctl_alignment_test_names[i];
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ }
+
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_alignment_test_names); i++) {
+ snprintf(ctl_name, sizeof(ctl_name), "%03d", i);
+
+ mutex_lock(&priv->dsp->pwr_lock);
+ ctl = cs_dsp_get_ctl(priv->dsp, ctl_name, def.mem_type,
+ cs_dsp_ctl_parse_test_algs[0].id);
+ mutex_unlock(&priv->dsp->pwr_lock);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, 3);
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, ctl_name, ctl->subname_len);
+ /* Test fields that are parsed after the variable-length fields */
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+ }
+}
+
+static const char * const cs_dsp_get_ctl_test_names[] = {
+ "Up", "Down", "Switch", "Mute",
+ "Left Up", "Left Down", "Right Up", "Right Down",
+ "Left Mute", "Right Mute",
+ "_trunc_1", "_trunc_2", " trunc",
+};
+
+/* Test using cs_dsp_get_ctl() to lookup various controls. */
+static void cs_dsp_get_ctl_test(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ int i;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_get_ctl_test_names); i++) {
+ def.shortname = cs_dsp_get_ctl_test_names[i];
+ def.offset_dsp_words = i;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ }
+
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_get_ctl_test_names); i++) {
+ mutex_lock(&priv->dsp->pwr_lock);
+ ctl = cs_dsp_get_ctl(priv->dsp, cs_dsp_get_ctl_test_names[i],
+ def.mem_type, cs_dsp_ctl_parse_test_algs[0].id);
+ mutex_unlock(&priv->dsp->pwr_lock);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, strlen(cs_dsp_get_ctl_test_names[i]));
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, cs_dsp_get_ctl_test_names[i],
+ ctl->subname_len);
+ KUNIT_EXPECT_EQ(test, ctl->offset, i);
+ }
+}
+
+/*
+ * cs_dsp_get_ctl() searches for the control in the currently loaded
+ * firmware, so create identical controls in multiple firmware and
+ * test that the correct one is found.
+ */
+static void cs_dsp_get_ctl_test_multiple_wmfw(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct cs_dsp_mock_wmfw_builder *builder2;
+ struct firmware *wmfw;
+
+ def.shortname = "_A_CONTROL";
+
+ /* Create a second mock wmfw builder */
+ builder2 = cs_dsp_mock_wmfw_init(priv,
+ cs_dsp_mock_wmfw_format_version(local->wmfw_builder));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, builder2);
+ cs_dsp_mock_wmfw_add_data_block(builder2,
+ WMFW_ADSP2_XM, 0,
+ local->xm_header->blob_data,
+ local->xm_header->blob_size_bytes);
+
+ /* Load a 'misc' firmware with a control */
+ def.offset_dsp_words = 1;
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(priv->dsp);
+
+ /* Load a 'mbc/vss' firmware with a control of the same name */
+ def.offset_dsp_words = 2;
+ cs_dsp_mock_wmfw_start_alg_info_block(builder2,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(builder2, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(builder2);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_fw2", NULL, NULL, "mbc/vss"), 0);
+
+ /* A lookup should return the control for the current firmware */
+ mutex_lock(&priv->dsp->pwr_lock);
+ ctl = cs_dsp_get_ctl(priv->dsp, def.shortname,
+ def.mem_type, cs_dsp_ctl_parse_test_algs[0].id);
+ mutex_unlock(&priv->dsp->pwr_lock);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->offset, 2);
+
+ /* Re-load the 'misc' firmware and a lookup should return its control */
+ cs_dsp_power_down(priv->dsp);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ mutex_lock(&priv->dsp->pwr_lock);
+ ctl = cs_dsp_get_ctl(priv->dsp, def.shortname,
+ def.mem_type, cs_dsp_ctl_parse_test_algs[0].id);
+ mutex_unlock(&priv->dsp->pwr_lock);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->offset, 1);
+}
+
+/* Test that the value of the memory type field is parsed correctly. */
+static void cs_dsp_ctl_parse_memory_type(struct kunit *test)
+{
+ const struct cs_dsp_ctl_parse_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ /* kunit_skip() marks the test skipped forever, so just return */
+ if ((param->mem_type == WMFW_ADSP2_ZM) && !cs_dsp_mock_has_zm(priv))
+ return;
+
+ def.mem_type = param->mem_type;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->alg_region.type, param->mem_type);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * Test that the algorithm id from the parent alg-info block is
+ * correctly stored in the cs_dsp_coeff_ctl.
+ */
+static void cs_dsp_ctl_parse_alg_id(struct kunit *test)
+{
+ const struct cs_dsp_ctl_parse_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ param->alg_id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->alg_region.alg, param->alg_id);
+ KUNIT_EXPECT_EQ(test, ctl->alg_region.type, def.mem_type);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * Test that the values of (alg id, memory type) tuple is parsed correctly.
+ * The alg id is parsed from the alg-info block, but the memory type is
+ * parsed from the coefficient info descriptor.
+ */
+static void cs_dsp_ctl_parse_alg_mem(struct kunit *test)
+{
+ const struct cs_dsp_ctl_parse_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ /* kunit_skip() marks the test skipped forever, so just return */
+ if ((param->mem_type == WMFW_ADSP2_ZM) && !cs_dsp_mock_has_zm(priv))
+ return;
+
+ def.mem_type = param->mem_type;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ param->alg_id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->alg_region.alg, param->alg_id);
+ KUNIT_EXPECT_EQ(test, ctl->alg_region.type, param->mem_type);
+}
+
+/* Test that the value of the offset field is parsed correctly. */
+static void cs_dsp_ctl_parse_offset(struct kunit *test)
+{
+ const struct cs_dsp_ctl_parse_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ def.offset_dsp_words = param->offset;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->offset, param->offset);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/* Test that the value of the length field is parsed correctly. */
+static void cs_dsp_ctl_parse_length(struct kunit *test)
+{
+ const struct cs_dsp_ctl_parse_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ def.length_bytes = param->length;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->offset, def.offset_dsp_words);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, param->length);
+}
+
+/* Test that the value of the control type field is parsed correctly. */
+static void cs_dsp_ctl_parse_ctl_type(struct kunit *test)
+{
+ const struct cs_dsp_ctl_parse_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ def.type = param->ctl_type;
+ def.flags = param->flags;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->type, param->ctl_type);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/* Test that the value of the flags field is parsed correctly. */
+static void cs_dsp_ctl_parse_flags(struct kunit *test)
+{
+ const struct cs_dsp_ctl_parse_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 reg_val;
+
+ /*
+ * Non volatile controls will be read to initialize the cache
+ * so the regmap cache must contain something to read.
+ */
+ reg_val = 0xf11100;
+ regmap_raw_write(priv->dsp->regmap,
+ cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_YM),
+ &reg_val, sizeof(reg_val));
+
+ def.flags = param->flags;
+ def.mem_type = WMFW_ADSP2_YM;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->flags, param->flags);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/* Test that invalid combinations of (control type, flags) are rejected. */
+static void cs_dsp_ctl_illegal_type_flags(struct kunit *test)
+{
+ const struct cs_dsp_ctl_parse_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct firmware *wmfw;
+ u32 reg_val;
+
+ /*
+ * Non volatile controls will be read to initialize the cache
+ * so the regmap cache must contain something to read.
+ */
+ reg_val = 0xf11100;
+ regmap_raw_write(priv->dsp->regmap,
+ cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_YM),
+ &reg_val, sizeof(reg_val));
+
+ def.type = param->ctl_type;
+ def.flags = param->flags;
+ def.mem_type = WMFW_ADSP2_YM;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_LT(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+}
+
+/* Test that the correct firmware name is entered in the cs_dsp_coeff_ctl. */
+static void cs_dsp_ctl_parse_fw_name(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *walkctl, *ctl1, *ctl2;
+ struct cs_dsp_mock_wmfw_builder *builder2;
+ struct firmware *wmfw;
+
+ /* Create a second mock wmfw builder */
+ builder2 = cs_dsp_mock_wmfw_init(priv,
+ cs_dsp_mock_wmfw_format_version(local->wmfw_builder));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, builder2);
+ cs_dsp_mock_wmfw_add_data_block(builder2,
+ WMFW_ADSP2_XM, 0,
+ local->xm_header->blob_data,
+ local->xm_header->blob_size_bytes);
+
+ /* Load a 'misc' firmware with a control */
+ def.offset_dsp_words = 1;
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(priv->dsp);
+
+ /* Load a 'mbc/vss' firmware with a control */
+ def.offset_dsp_words = 2;
+ cs_dsp_mock_wmfw_start_alg_info_block(builder2,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(builder2, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(builder2);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_fw2", NULL, NULL, "mbc/vss"), 0);
+
+ /* Both controls should be in the list (order not guaranteed) */
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list), 2);
+ ctl1 = NULL;
+ ctl2 = NULL;
+ list_for_each_entry(walkctl, &priv->dsp->ctl_list, list) {
+ if (strcmp(walkctl->fw_name, "misc") == 0)
+ ctl1 = walkctl;
+ else if (strcmp(walkctl->fw_name, "mbc/vss") == 0)
+ ctl2 = walkctl;
+ }
+
+ KUNIT_EXPECT_NOT_NULL(test, ctl1);
+ KUNIT_EXPECT_NOT_NULL(test, ctl2);
+ KUNIT_EXPECT_EQ(test, ctl1->offset, 1);
+ KUNIT_EXPECT_EQ(test, ctl2->offset, 2);
+}
+
+/* Controls are unique if the algorithm ID is different */
+static void cs_dsp_ctl_alg_id_uniqueness(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl1, *ctl2;
+ struct firmware *wmfw;
+
+ /* Create an algorithm containing the control */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Create a different algorithm containing an identical control */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[1].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(priv->dsp);
+
+ /* Both controls should be in the list */
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list), 2);
+ ctl1 = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ ctl2 = list_next_entry(ctl1, list);
+ KUNIT_EXPECT_NOT_NULL(test, ctl1);
+ KUNIT_EXPECT_NOT_NULL(test, ctl2);
+ KUNIT_EXPECT_NE(test, ctl1->alg_region.alg, ctl2->alg_region.alg);
+ KUNIT_EXPECT_EQ(test, ctl1->alg_region.type, ctl2->alg_region.type);
+ KUNIT_EXPECT_EQ(test, ctl1->offset, ctl2->offset);
+ KUNIT_EXPECT_EQ(test, ctl1->type, ctl2->type);
+ KUNIT_EXPECT_EQ(test, ctl1->flags, ctl2->flags);
+ KUNIT_EXPECT_EQ(test, ctl1->len, ctl2->len);
+ KUNIT_EXPECT_STREQ(test, ctl1->fw_name, ctl2->fw_name);
+ KUNIT_EXPECT_EQ(test, ctl1->subname_len, ctl2->subname_len);
+ if (ctl1->subname_len)
+ KUNIT_EXPECT_MEMEQ(test, ctl1->subname, ctl2->subname, ctl1->subname_len);
+}
+
+/* Controls are unique if the memory region is different */
+static void cs_dsp_ctl_mem_uniqueness(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl1, *ctl2;
+ struct firmware *wmfw;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ /* Create control in XM */
+ def.mem_type = WMFW_ADSP2_XM;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+
+ /* Create control in YM */
+ def.mem_type = WMFW_ADSP2_YM;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(priv->dsp);
+
+ /* Both controls should be in the list */
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list), 2);
+ ctl1 = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ ctl2 = list_next_entry(ctl1, list);
+ KUNIT_EXPECT_NOT_NULL(test, ctl1);
+ KUNIT_EXPECT_NOT_NULL(test, ctl2);
+ KUNIT_EXPECT_EQ(test, ctl1->alg_region.alg, ctl2->alg_region.alg);
+ KUNIT_EXPECT_NE(test, ctl1->alg_region.type, ctl2->alg_region.type);
+ KUNIT_EXPECT_EQ(test, ctl1->offset, ctl2->offset);
+ KUNIT_EXPECT_EQ(test, ctl1->type, ctl2->type);
+ KUNIT_EXPECT_EQ(test, ctl1->flags, ctl2->flags);
+ KUNIT_EXPECT_EQ(test, ctl1->len, ctl2->len);
+ KUNIT_EXPECT_STREQ(test, ctl1->fw_name, ctl2->fw_name);
+ KUNIT_EXPECT_EQ(test, ctl1->subname_len, ctl2->subname_len);
+ if (ctl1->subname_len)
+ KUNIT_EXPECT_MEMEQ(test, ctl1->subname, ctl2->subname, ctl1->subname_len);
+}
+
+/* Controls are unique if they are in different firmware */
+static void cs_dsp_ctl_fw_uniqueness(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl1, *ctl2;
+ struct cs_dsp_mock_wmfw_builder *builder2;
+ struct firmware *wmfw;
+
+ /* Create a second mock wmfw builder */
+ builder2 = cs_dsp_mock_wmfw_init(priv,
+ cs_dsp_mock_wmfw_format_version(local->wmfw_builder));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, builder2);
+ cs_dsp_mock_wmfw_add_data_block(builder2,
+ WMFW_ADSP2_XM, 0,
+ local->xm_header->blob_data,
+ local->xm_header->blob_size_bytes);
+
+ /* Load a 'misc' firmware with a control */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(priv->dsp);
+
+ /* Load a 'mbc/vss' firmware with the same control */
+ cs_dsp_mock_wmfw_start_alg_info_block(builder2,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(builder2, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(builder2);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw2",
+ NULL, NULL, "mbc/vss"), 0);
+ cs_dsp_power_down(priv->dsp);
+
+ /* Both controls should be in the list */
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list), 2);
+ ctl1 = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ ctl2 = list_next_entry(ctl1, list);
+ KUNIT_EXPECT_NOT_NULL(test, ctl1);
+ KUNIT_EXPECT_NOT_NULL(test, ctl2);
+ KUNIT_EXPECT_EQ(test, ctl1->alg_region.alg, ctl2->alg_region.alg);
+ KUNIT_EXPECT_EQ(test, ctl1->alg_region.type, ctl2->alg_region.type);
+ KUNIT_EXPECT_EQ(test, ctl1->offset, ctl2->offset);
+ KUNIT_EXPECT_EQ(test, ctl1->type, ctl2->type);
+ KUNIT_EXPECT_EQ(test, ctl1->flags, ctl2->flags);
+ KUNIT_EXPECT_EQ(test, ctl1->len, ctl2->len);
+ KUNIT_EXPECT_STRNEQ(test, ctl1->fw_name, ctl2->fw_name);
+ KUNIT_EXPECT_EQ(test, ctl1->subname_len, ctl2->subname_len);
+ if (ctl1->subname_len)
+ KUNIT_EXPECT_MEMEQ(test, ctl1->subname, ctl2->subname, ctl1->subname_len);
+}
+
+/*
+ * Controls from a wmfw are only added to the list once. If the same
+ * wmfw is reloaded the controls are not added again.
+ * This creates multiple algorithms with one control each, which will
+ * work on both V1 format and >=V2 format controls.
+ */
+static void cs_dsp_ctl_squash_reloaded_controls(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctls[ARRAY_SIZE(cs_dsp_ctl_parse_test_algs)];
+ struct cs_dsp_coeff_ctl *walkctl;
+ struct firmware *wmfw;
+ int i;
+
+ /* Create some algorithms with a control */
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_parse_test_algs); i++) {
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[i].id,
+ "dummyalg", NULL);
+ def.mem_type = WMFW_ADSP2_YM;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+ }
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(priv->dsp);
+
+ /* All controls should be in the list */
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list),
+ ARRAY_SIZE(cs_dsp_ctl_parse_test_algs));
+
+ /* Take a copy of the pointers to controls to compare against. */
+ i = 0;
+ list_for_each_entry(walkctl, &priv->dsp->ctl_list, list) {
+ KUNIT_ASSERT_LT(test, i, ARRAY_SIZE(ctls));
+ ctls[i++] = walkctl;
+ }
+
+
+ /* Load the wmfw again */
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(priv->dsp);
+
+ /* The number of controls should be the same */
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list),
+ ARRAY_SIZE(cs_dsp_ctl_parse_test_algs));
+
+ /* And they should be the same objects */
+ i = 0;
+ list_for_each_entry(walkctl, &priv->dsp->ctl_list, list) {
+ KUNIT_ASSERT_LT(test, i, ARRAY_SIZE(ctls));
+ KUNIT_ASSERT_PTR_EQ(test, walkctl, ctls[i++]);
+ }
+}
+
+/*
+ * Controls from a wmfw are only added to the list once. If the same
+ * wmfw is reloaded the controls are not added again.
+ * This tests >=V2 firmware that can have multiple named controls in
+ * the same algorithm.
+ */
+static void cs_dsp_ctl_v2_squash_reloaded_controls(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctls[ARRAY_SIZE(cs_dsp_get_ctl_test_names)];
+ struct cs_dsp_coeff_ctl *walkctl;
+ struct firmware *wmfw;
+ int i;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+
+ /* Create some controls */
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_get_ctl_test_names); i++) {
+ def.shortname = cs_dsp_get_ctl_test_names[i];
+ def.offset_dsp_words = i;
+ if (i & BIT(0))
+ def.mem_type = WMFW_ADSP2_XM;
+ else
+ def.mem_type = WMFW_ADSP2_YM;
+
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ }
+
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(priv->dsp);
+
+ /* All controls should be in the list */
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list),
+ ARRAY_SIZE(cs_dsp_get_ctl_test_names));
+
+ /* Take a copy of the pointers to controls to compare against. */
+ i = 0;
+ list_for_each_entry(walkctl, &priv->dsp->ctl_list, list) {
+ KUNIT_ASSERT_LT(test, i, ARRAY_SIZE(ctls));
+ ctls[i++] = walkctl;
+ }
+
+
+ /* Load the wmfw again */
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(priv->dsp);
+
+ /* The number of controls should be the same */
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list),
+ ARRAY_SIZE(cs_dsp_get_ctl_test_names));
+
+ /* And they should be the same objects */
+ i = 0;
+ list_for_each_entry(walkctl, &priv->dsp->ctl_list, list) {
+ KUNIT_ASSERT_LT(test, i, ARRAY_SIZE(ctls));
+ KUNIT_ASSERT_PTR_EQ(test, walkctl, ctls[i++]);
+ }
+}
+
+static const char * const cs_dsp_ctl_v2_compare_len_names[] = {
+ "LEFT",
+ "LEFT_",
+ "LEFT_SPK",
+ "LEFT_SPK_V",
+ "LEFT_SPK_VOL",
+ "LEFT_SPK_MUTE",
+ "LEFT_SPK_1",
+ "LEFT_X",
+ "LEFT2",
+};
+
+/*
+ * When comparing shortnames the full length of both strings is
+ * considered, not only the characters in of the shortest string.
+ * So that "LEFT" is not the same as "LEFT2".
+ * This is specifically to test for the bug that was fixed by commit:
+ * 7ac1102b227b ("firmware: cs_dsp: Fix new control name check")
+ */
+static void cs_dsp_ctl_v2_compare_len(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ int i;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_v2_compare_len_names); i++) {
+ def.shortname = cs_dsp_ctl_v2_compare_len_names[i];
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ }
+
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_v2_compare_len_names); i++) {
+ mutex_lock(&priv->dsp->pwr_lock);
+ ctl = cs_dsp_get_ctl(priv->dsp, cs_dsp_ctl_v2_compare_len_names[i],
+ def.mem_type, cs_dsp_ctl_parse_test_algs[0].id);
+ mutex_unlock(&priv->dsp->pwr_lock);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len,
+ strlen(cs_dsp_ctl_v2_compare_len_names[i]));
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, cs_dsp_ctl_v2_compare_len_names[i],
+ ctl->subname_len);
+ }
+}
+
+static int cs_dsp_ctl_parse_test_common_init(struct kunit *test, struct cs_dsp *dsp,
+ int wmfw_version)
+{
+ struct cs_dsp_test *priv;
+ struct cs_dsp_test_local *local;
+ struct device *test_dev;
+ int ret;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ local = kunit_kzalloc(test, sizeof(struct cs_dsp_test_local), GFP_KERNEL);
+ if (!local)
+ return -ENOMEM;
+
+ priv->test = test;
+ priv->dsp = dsp;
+ test->priv = priv;
+ priv->local = local;
+ priv->local->wmfw_version = wmfw_version;
+
+ /* Create dummy struct device */
+ test_dev = kunit_device_register(test, "cs_dsp_test_drv");
+ if (IS_ERR(test_dev))
+ return PTR_ERR(test_dev);
+
+ dsp->dev = get_device(test_dev);
+ if (!dsp->dev)
+ return -ENODEV;
+
+ ret = kunit_add_action_or_reset(test, _put_device_wrapper, dsp->dev);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dsp->dev, priv);
+
+ /* Allocate regmap */
+ ret = cs_dsp_mock_regmap_init(priv);
+ if (ret)
+ return ret;
+
+ /*
+ * There must always be a XM header with at least 1 algorithm, so create
+ * a dummy one that tests can use and extract it to a data blob.
+ */
+ local->xm_header = cs_dsp_create_mock_xm_header(priv,
+ cs_dsp_ctl_parse_test_algs,
+ ARRAY_SIZE(cs_dsp_ctl_parse_test_algs));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->xm_header);
+
+ local->wmfw_builder = cs_dsp_mock_wmfw_init(priv, priv->local->wmfw_version);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->wmfw_builder);
+
+ /* Add dummy XM header blob to wmfw */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ WMFW_ADSP2_XM, 0,
+ local->xm_header->blob_data,
+ local->xm_header->blob_size_bytes);
+
+ /* Init cs_dsp */
+ dsp->client_ops = kunit_kzalloc(test, sizeof(*dsp->client_ops), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dsp->client_ops);
+
+ switch (dsp->type) {
+ case WMFW_ADSP2:
+ ret = cs_dsp_adsp2_init(dsp);
+ break;
+ case WMFW_HALO:
+ ret = cs_dsp_halo_init(dsp);
+ break;
+ default:
+ KUNIT_FAIL(test, "Untested DSP type %d\n", dsp->type);
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ /* Automatically call cs_dsp_remove() when test case ends */
+ return kunit_add_action_or_reset(priv->test, _cs_dsp_remove_wrapper, dsp);
+}
+
+static int cs_dsp_ctl_parse_test_halo_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_HALO;
+ dsp->mem = cs_dsp_mock_halo_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_halo_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_halo_core_base;
+ dsp->base_sysinfo = cs_dsp_mock_halo_sysinfo_base;
+
+ return cs_dsp_ctl_parse_test_common_init(test, dsp, 3);
+}
+
+static int cs_dsp_ctl_parse_test_adsp2_32bit_init(struct kunit *test, int wmfw_ver)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 1;
+ dsp->mem = cs_dsp_mock_adsp2_32bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_32bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_32bit_sysbase;
+
+ return cs_dsp_ctl_parse_test_common_init(test, dsp, wmfw_ver);
+}
+
+static int cs_dsp_ctl_parse_test_adsp2_32bit_wmfw1_init(struct kunit *test)
+{
+ return cs_dsp_ctl_parse_test_adsp2_32bit_init(test, 1);
+}
+
+static int cs_dsp_ctl_parse_test_adsp2_32bit_wmfw2_init(struct kunit *test)
+{
+ return cs_dsp_ctl_parse_test_adsp2_32bit_init(test, 2);
+}
+
+static int cs_dsp_ctl_parse_test_adsp2_16bit_init(struct kunit *test, int wmfw_ver)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 0;
+ dsp->mem = cs_dsp_mock_adsp2_16bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_16bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_16bit_sysbase;
+
+ return cs_dsp_ctl_parse_test_common_init(test, dsp, wmfw_ver);
+}
+
+static int cs_dsp_ctl_parse_test_adsp2_16bit_wmfw1_init(struct kunit *test)
+{
+ return cs_dsp_ctl_parse_test_adsp2_16bit_init(test, 1);
+}
+
+static int cs_dsp_ctl_parse_test_adsp2_16bit_wmfw2_init(struct kunit *test)
+{
+ return cs_dsp_ctl_parse_test_adsp2_16bit_init(test, 2);
+}
+
+static const struct cs_dsp_ctl_parse_test_param cs_dsp_ctl_mem_type_param_cases[] = {
+ { .mem_type = WMFW_ADSP2_XM },
+ { .mem_type = WMFW_ADSP2_YM },
+ { .mem_type = WMFW_ADSP2_ZM },
+};
+
+static void cs_dsp_ctl_mem_type_desc(const struct cs_dsp_ctl_parse_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s",
+ cs_dsp_mem_region_name(param->mem_type));
+}
+
+KUNIT_ARRAY_PARAM(cs_dsp_ctl_mem_type,
+ cs_dsp_ctl_mem_type_param_cases,
+ cs_dsp_ctl_mem_type_desc);
+
+static const struct cs_dsp_ctl_parse_test_param cs_dsp_ctl_alg_id_param_cases[] = {
+ { .alg_id = 0xb },
+ { .alg_id = 0xfafa },
+ { .alg_id = 0x9f1234 },
+ { .alg_id = 0xff00ff },
+};
+
+static void cs_dsp_ctl_alg_id_desc(const struct cs_dsp_ctl_parse_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "alg_id:%#x", param->alg_id);
+}
+
+KUNIT_ARRAY_PARAM(cs_dsp_ctl_alg_id,
+ cs_dsp_ctl_alg_id_param_cases,
+ cs_dsp_ctl_alg_id_desc);
+
+static const struct cs_dsp_ctl_parse_test_param cs_dsp_ctl_offset_param_cases[] = {
+ { .offset = 0x0 },
+ { .offset = 0x1 },
+ { .offset = 0x2 },
+ { .offset = 0x3 },
+ { .offset = 0x4 },
+ { .offset = 0x5 },
+ { .offset = 0x6 },
+ { .offset = 0x7 },
+ { .offset = 0xe0 },
+ { .offset = 0xf1 },
+ { .offset = 0xfffe },
+ { .offset = 0xffff },
+};
+
+static void cs_dsp_ctl_offset_desc(const struct cs_dsp_ctl_parse_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "offset:%#x", param->offset);
+}
+
+KUNIT_ARRAY_PARAM(cs_dsp_ctl_offset,
+ cs_dsp_ctl_offset_param_cases,
+ cs_dsp_ctl_offset_desc);
+
+static const struct cs_dsp_ctl_parse_test_param cs_dsp_ctl_length_param_cases[] = {
+ { .length = 0x4 },
+ { .length = 0x8 },
+ { .length = 0x18 },
+ { .length = 0xf000 },
+};
+
+static void cs_dsp_ctl_length_desc(const struct cs_dsp_ctl_parse_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "length:%#x", param->length);
+}
+
+KUNIT_ARRAY_PARAM(cs_dsp_ctl_length,
+ cs_dsp_ctl_length_param_cases,
+ cs_dsp_ctl_length_desc);
+
+/* Note: some control types mandate specific flags settings */
+static const struct cs_dsp_ctl_parse_test_param cs_dsp_ctl_type_param_cases[] = {
+ { .ctl_type = WMFW_CTL_TYPE_BYTES,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_ACKED,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE |
+ WMFW_CTL_FLAG_SYS },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_SYS },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE |
+ WMFW_CTL_FLAG_SYS },
+};
+
+static void cs_dsp_ctl_type_flags_desc(const struct cs_dsp_ctl_parse_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "ctl_type:%#x flags:%#x",
+ param->ctl_type, param->flags);
+}
+
+KUNIT_ARRAY_PARAM(cs_dsp_ctl_type,
+ cs_dsp_ctl_type_param_cases,
+ cs_dsp_ctl_type_flags_desc);
+
+static const struct cs_dsp_ctl_parse_test_param cs_dsp_ctl_flags_param_cases[] = {
+ { .flags = 0 },
+ { .flags = WMFW_CTL_FLAG_READABLE },
+ { .flags = WMFW_CTL_FLAG_WRITEABLE },
+ { .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+ { .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE },
+ { .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+ { .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE },
+ { .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE },
+ { .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+ { .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE },
+ { .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE |
+ WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+};
+
+static void cs_dsp_ctl_flags_desc(const struct cs_dsp_ctl_parse_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "flags:%#x", param->flags);
+}
+
+KUNIT_ARRAY_PARAM(cs_dsp_ctl_flags,
+ cs_dsp_ctl_flags_param_cases,
+ cs_dsp_ctl_flags_desc);
+
+static const struct cs_dsp_ctl_parse_test_param cs_dsp_ctl_illegal_type_flags_param_cases[] = {
+ /* ACKED control must be volatile + read + write */
+ { .ctl_type = WMFW_CTL_TYPE_ACKED, .flags = 0 },
+ { .ctl_type = WMFW_CTL_TYPE_ACKED, .flags = WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_ACKED, .flags = WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_ACKED, .flags = WMFW_CTL_FLAG_VOLATILE },
+ { .ctl_type = WMFW_CTL_TYPE_ACKED,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_ACKED,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE },
+
+ /* HOSTEVENT must be system + volatile + read + write */
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT, .flags = 0 },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT, .flags = WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT, .flags = WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT, .flags = WMFW_CTL_FLAG_VOLATILE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT, .flags = WMFW_CTL_FLAG_SYS },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE },
+
+ /* FWEVENT rules same as HOSTEVENT */
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT, .flags = 0 },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT, .flags = WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT, .flags = WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT, .flags = WMFW_CTL_FLAG_VOLATILE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT, .flags = WMFW_CTL_FLAG_SYS },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE },
+
+ /*
+ * HOSTBUFFER must be system + volatile + readable or
+ * system + volatile + readable + writeable
+ */
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER, .flags = 0 },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER, .flags = WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER, .flags = WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE},
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER, .flags = WMFW_CTL_FLAG_VOLATILE },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER, .flags = WMFW_CTL_FLAG_SYS },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE },
+};
+
+KUNIT_ARRAY_PARAM(cs_dsp_ctl_illegal_type_flags,
+ cs_dsp_ctl_illegal_type_flags_param_cases,
+ cs_dsp_ctl_type_flags_desc);
+
+static struct kunit_case cs_dsp_ctl_parse_test_cases_v1[] = {
+ KUNIT_CASE(cs_dsp_ctl_parse_no_coeffs),
+ KUNIT_CASE(cs_dsp_ctl_parse_v1_name),
+ KUNIT_CASE(cs_dsp_ctl_parse_empty_v1_name),
+ KUNIT_CASE(cs_dsp_ctl_parse_max_v1_name),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_memory_type, cs_dsp_ctl_mem_type_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_alg_id, cs_dsp_ctl_alg_id_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_alg_mem, cs_dsp_ctl_mem_type_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_offset, cs_dsp_ctl_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_length, cs_dsp_ctl_length_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_ctl_type, cs_dsp_ctl_type_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_flags, cs_dsp_ctl_flags_gen_params),
+ KUNIT_CASE(cs_dsp_ctl_parse_fw_name),
+
+ KUNIT_CASE(cs_dsp_ctl_alg_id_uniqueness),
+ KUNIT_CASE(cs_dsp_ctl_mem_uniqueness),
+ KUNIT_CASE(cs_dsp_ctl_fw_uniqueness),
+ KUNIT_CASE(cs_dsp_ctl_squash_reloaded_controls),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_ctl_parse_test_cases_v2_v3[] = {
+ KUNIT_CASE(cs_dsp_ctl_parse_no_coeffs),
+ KUNIT_CASE(cs_dsp_ctl_parse_short_name),
+ KUNIT_CASE(cs_dsp_ctl_parse_min_short_name),
+ KUNIT_CASE(cs_dsp_ctl_parse_max_short_name),
+ KUNIT_CASE(cs_dsp_ctl_parse_with_min_fullname),
+ KUNIT_CASE(cs_dsp_ctl_parse_with_max_fullname),
+ KUNIT_CASE(cs_dsp_ctl_parse_with_min_description),
+ KUNIT_CASE(cs_dsp_ctl_parse_with_max_description),
+ KUNIT_CASE(cs_dsp_ctl_parse_with_max_fullname_and_description),
+ KUNIT_CASE(cs_dsp_ctl_shortname_alignment),
+ KUNIT_CASE(cs_dsp_ctl_fullname_alignment),
+ KUNIT_CASE(cs_dsp_ctl_description_alignment),
+ KUNIT_CASE(cs_dsp_get_ctl_test),
+ KUNIT_CASE(cs_dsp_get_ctl_test_multiple_wmfw),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_memory_type, cs_dsp_ctl_mem_type_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_alg_id, cs_dsp_ctl_alg_id_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_alg_mem, cs_dsp_ctl_mem_type_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_offset, cs_dsp_ctl_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_length, cs_dsp_ctl_length_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_ctl_type, cs_dsp_ctl_type_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_flags, cs_dsp_ctl_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_illegal_type_flags,
+ cs_dsp_ctl_illegal_type_flags_gen_params),
+ KUNIT_CASE(cs_dsp_ctl_parse_fw_name),
+
+ KUNIT_CASE(cs_dsp_ctl_alg_id_uniqueness),
+ KUNIT_CASE(cs_dsp_ctl_mem_uniqueness),
+ KUNIT_CASE(cs_dsp_ctl_fw_uniqueness),
+ KUNIT_CASE(cs_dsp_ctl_squash_reloaded_controls),
+ KUNIT_CASE(cs_dsp_ctl_v2_squash_reloaded_controls),
+ KUNIT_CASE(cs_dsp_ctl_v2_compare_len),
+
+ { } /* terminator */
+};
+
+static struct kunit_suite cs_dsp_ctl_parse_test_halo = {
+ .name = "cs_dsp_ctl_parse_wmfwV3_halo",
+ .init = cs_dsp_ctl_parse_test_halo_init,
+ .test_cases = cs_dsp_ctl_parse_test_cases_v2_v3,
+};
+
+static struct kunit_suite cs_dsp_ctl_parse_test_adsp2_32bit_wmfw1 = {
+ .name = "cs_dsp_ctl_parse_wmfwV1_adsp2_32bit",
+ .init = cs_dsp_ctl_parse_test_adsp2_32bit_wmfw1_init,
+ .test_cases = cs_dsp_ctl_parse_test_cases_v1,
+};
+
+static struct kunit_suite cs_dsp_ctl_parse_test_adsp2_32bit_wmfw2 = {
+ .name = "cs_dsp_ctl_parse_wmfwV2_adsp2_32bit",
+ .init = cs_dsp_ctl_parse_test_adsp2_32bit_wmfw2_init,
+ .test_cases = cs_dsp_ctl_parse_test_cases_v2_v3,
+};
+
+static struct kunit_suite cs_dsp_ctl_parse_test_adsp2_16bit_wmfw1 = {
+ .name = "cs_dsp_ctl_parse_wmfwV1_adsp2_16bit",
+ .init = cs_dsp_ctl_parse_test_adsp2_16bit_wmfw1_init,
+ .test_cases = cs_dsp_ctl_parse_test_cases_v1,
+};
+
+static struct kunit_suite cs_dsp_ctl_parse_test_adsp2_16bit_wmfw2 = {
+ .name = "cs_dsp_ctl_parse_wmfwV2_adsp2_16bit",
+ .init = cs_dsp_ctl_parse_test_adsp2_16bit_wmfw2_init,
+ .test_cases = cs_dsp_ctl_parse_test_cases_v2_v3,
+};
+
+kunit_test_suites(&cs_dsp_ctl_parse_test_halo,
+ &cs_dsp_ctl_parse_test_adsp2_32bit_wmfw1,
+ &cs_dsp_ctl_parse_test_adsp2_32bit_wmfw2,
+ &cs_dsp_ctl_parse_test_adsp2_16bit_wmfw1,
+ &cs_dsp_ctl_parse_test_adsp2_16bit_wmfw2);
diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_control_rw.c b/drivers/firmware/cirrus/test/cs_dsp_test_control_rw.c
new file mode 100644
index 000000000000..bda00a95d4f9
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_test_control_rw.c
@@ -0,0 +1,2669 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// KUnit tests for cs_dsp.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+
+#include <kunit/device.h>
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <linux/build_bug.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/list.h>
+#include <linux/random.h>
+#include <linux/regmap.h>
+
+KUNIT_DEFINE_ACTION_WRAPPER(_put_device_wrapper, put_device, struct device *);
+KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_stop_wrapper, cs_dsp_stop, struct cs_dsp *);
+KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_remove_wrapper, cs_dsp_remove, struct cs_dsp *);
+
+struct cs_dsp_test_local {
+ struct cs_dsp_mock_xm_header *xm_header;
+ struct cs_dsp_mock_wmfw_builder *wmfw_builder;
+ int wmfw_version;
+};
+
+struct cs_dsp_ctl_rw_test_param {
+ int mem_type;
+ int alg_id;
+ unsigned int offs_words;
+ unsigned int len_bytes;
+ u16 ctl_type;
+ u16 flags;
+};
+
+static const struct cs_dsp_mock_alg_def cs_dsp_ctl_rw_test_algs[] = {
+ {
+ .id = 0xfafa,
+ .ver = 0x100000,
+ .xm_base_words = 60,
+ .xm_size_words = 1000,
+ .ym_base_words = 0,
+ .ym_size_words = 1000,
+ .zm_base_words = 0,
+ .zm_size_words = 1000,
+ },
+ {
+ .id = 0xb,
+ .ver = 0x100001,
+ .xm_base_words = 1060,
+ .xm_size_words = 1000,
+ .ym_base_words = 1000,
+ .ym_size_words = 1000,
+ .zm_base_words = 1000,
+ .zm_size_words = 1000,
+ },
+ {
+ .id = 0x9f1234,
+ .ver = 0x100500,
+ .xm_base_words = 2060,
+ .xm_size_words = 32,
+ .ym_base_words = 2000,
+ .ym_size_words = 32,
+ .zm_base_words = 2000,
+ .zm_size_words = 32,
+ },
+ {
+ .id = 0xff00ff,
+ .ver = 0x300113,
+ .xm_base_words = 2100,
+ .xm_size_words = 32,
+ .ym_base_words = 2032,
+ .ym_size_words = 32,
+ .zm_base_words = 2032,
+ .zm_size_words = 32,
+ },
+};
+
+static const struct cs_dsp_mock_coeff_def mock_coeff_template = {
+ .shortname = "Dummy Coeff",
+ .type = WMFW_CTL_TYPE_BYTES,
+ .mem_type = WMFW_ADSP2_YM,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ .length_bytes = 4,
+};
+
+static int _find_alg_entry(struct kunit *test, unsigned int alg_id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_rw_test_algs); ++i) {
+ if (cs_dsp_ctl_rw_test_algs[i].id == alg_id)
+ break;
+ }
+
+ KUNIT_ASSERT_LT(test, i, ARRAY_SIZE(cs_dsp_ctl_rw_test_algs));
+
+ return i;
+}
+
+static int _get_alg_mem_base_words(struct kunit *test, int alg_index, int mem_type)
+{
+ switch (mem_type) {
+ case WMFW_ADSP2_XM:
+ return cs_dsp_ctl_rw_test_algs[alg_index].xm_base_words;
+ case WMFW_ADSP2_YM:
+ return cs_dsp_ctl_rw_test_algs[alg_index].ym_base_words;
+ case WMFW_ADSP2_ZM:
+ return cs_dsp_ctl_rw_test_algs[alg_index].zm_base_words;
+ default:
+ KUNIT_FAIL(test, "Bug in test: illegal memory type %d\n", mem_type);
+ return 0;
+ }
+}
+
+static struct cs_dsp_mock_wmfw_builder *_create_dummy_wmfw(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_wmfw_builder *builder;
+
+ builder = cs_dsp_mock_wmfw_init(priv, local->wmfw_version);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, builder);
+
+ /* Init an XM header */
+ cs_dsp_mock_wmfw_add_data_block(builder,
+ WMFW_ADSP2_XM, 0,
+ local->xm_header->blob_data,
+ local->xm_header->blob_size_bytes);
+
+ return builder;
+}
+
+/*
+ * Write to a control while the firmware is running.
+ * This should write to the underlying registers.
+ */
+static void cs_dsp_ctl_write_running(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ memset(reg_vals, 0, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /*
+ * Write new data to the control, it should be written to the registers
+ * and cs_dsp_coeff_lock_and_write_ctrl() should return 1 to indicate
+ * that the control content changed.
+ */
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+ KUNIT_ASSERT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Read from a volatile control while the firmware is running.
+ * This should return the current state of the underlying registers.
+ */
+static void cs_dsp_ctl_read_volatile_running(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ memset(reg_vals, 0, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Read the control, it should return the current register content */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+
+ /*
+ * Change the register content and read the control, it should return
+ * the new register content
+ */
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_ASSERT_EQ(test, regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes), 0);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Read from a volatile control before the firmware is started.
+ * This should return an error.
+ */
+static void cs_dsp_ctl_read_volatile_not_started(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Read the control, it should return an error */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+}
+
+/*
+ * Read from a volatile control after the firmware has stopped.
+ * This should return an error.
+ */
+static void cs_dsp_ctl_read_volatile_stopped(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start and stop the firmware */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+
+ /* Read the control, it should return an error */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+}
+
+/*
+ * Read from a volatile control after the DSP has been powered down.
+ * This should return an error.
+ */
+static void cs_dsp_ctl_read_volatile_stopped_powered_down(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start and stop the firmware then power down */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+ cs_dsp_power_down(dsp);
+
+ /* Read the control, it should return an error */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+}
+
+/*
+ * Read from a volatile control when a different firmware is currently
+ * loaded into the DSP.
+ * Should return an error.
+ */
+static void cs_dsp_ctl_read_volatile_not_current_loaded_fw(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test);
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Power-down DSP then power-up with a different firmware */
+ cs_dsp_power_down(dsp);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0);
+
+ /* Read the control, it should return an error */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+}
+
+/*
+ * Read from a volatile control when a different firmware is currently
+ * running.
+ * Should return an error.
+ */
+static void cs_dsp_ctl_read_volatile_not_current_running_fw(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test);
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Power-down DSP then power-up with a different firmware */
+ cs_dsp_power_down(dsp);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Read the control, it should return an error */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+}
+
+/*
+ * Write to a volatile control before the firmware is started.
+ * This should return an error.
+ */
+static void cs_dsp_ctl_write_volatile_not_started(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+
+ /* Write the control, it should return an error */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+
+ /* Should not have been any writes to registers */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write to a volatile control after the firmware has stopped.
+ * This should return an error.
+ */
+static void cs_dsp_ctl_write_volatile_stopped(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start and stop the firmware */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+
+ /* Write the control, it should return an error */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+
+ /* Should not have been any writes to registers */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write to a volatile control after the DSP has been powered down.
+ * This should return an error.
+ */
+static void cs_dsp_ctl_write_volatile_stopped_powered_down(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start and stop the firmware then power down */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+ cs_dsp_power_down(dsp);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+
+ /* Write the control, it should return an error */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+
+ /* Should not have been any writes to registers */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write to a volatile control when a different firmware is currently
+ * loaded into the DSP.
+ * Should return an error.
+ */
+static void cs_dsp_ctl_write_volatile_not_current_loaded_fw(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test);
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Power-down DSP then power-up with a different firmware */
+ cs_dsp_power_down(dsp);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+
+ /* Write the control, it should return an error */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+
+ /* Should not have been any writes to registers */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write to a volatile control when a different firmware is currently
+ * running.
+ * Should return an error.
+ */
+static void cs_dsp_ctl_write_volatile_not_current_running_fw(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test);
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Power-down DSP then power-up with a different firmware */
+ cs_dsp_power_down(dsp);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+
+ /* Write the control, it should return an error */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+
+ /* Should not have been any writes to registers */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Read from an offset into the control data. Should return only the
+ * portion of data from the offset position.
+ */
+static void cs_dsp_ctl_read_with_seek(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+ unsigned int seek_words;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = 48;
+
+ reg_vals = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, def.length_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ for (seek_words = 1; seek_words < (def.length_bytes / sizeof(u32)); seek_words++) {
+ unsigned int len_bytes = def.length_bytes - (seek_words * sizeof(u32));
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, seek_words,
+ readback, len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, &reg_vals[seek_words], len_bytes);
+ }
+}
+
+/*
+ * Read from an offset into the control cache. Should return only the
+ * portion of data from the offset position.
+ * Same as cs_dsp_ctl_read_with_seek() except the control is cached
+ * and the firmware is not running.
+ */
+static void cs_dsp_ctl_read_cache_with_seek(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+ unsigned int seek_words;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = 48;
+
+ reg_vals = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, def.length_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start and stop the firmware so the read will come from the cache */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+
+ for (seek_words = 1; seek_words < (def.length_bytes / sizeof(u32)); seek_words++) {
+ unsigned int len_bytes = def.length_bytes - (seek_words * sizeof(u32));
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, seek_words,
+ readback, len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, &reg_vals[seek_words], len_bytes);
+ }
+}
+
+/*
+ * Read less than the full length of data from a control. Should return
+ * only the requested number of bytes.
+ */
+static void cs_dsp_ctl_read_truncated(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+ unsigned int len_bytes;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = 48;
+
+ reg_vals = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, def.length_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Reads are only allowed to be a multiple of the DSP word length */
+ for (len_bytes = sizeof(u32); len_bytes < def.length_bytes; len_bytes += sizeof(u32)) {
+ memset(readback, 0, def.length_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, len_bytes);
+ KUNIT_EXPECT_MEMNEQ(test,
+ (u8 *)readback + len_bytes,
+ (u8 *)reg_vals + len_bytes,
+ def.length_bytes - len_bytes);
+ }
+}
+
+/*
+ * Read less than the full length of data from a cached control.
+ * Should return only the requested number of bytes.
+ * Same as cs_dsp_ctl_read_truncated() except the control is cached
+ * and the firmware is not running.
+ */
+static void cs_dsp_ctl_read_cache_truncated(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+ unsigned int len_bytes;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = 48;
+
+ reg_vals = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, def.length_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start and stop the firmware so the read will come from the cache */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+
+ /* Reads are only allowed to be a multiple of the DSP word length */
+ for (len_bytes = sizeof(u32); len_bytes < def.length_bytes; len_bytes += sizeof(u32)) {
+ memset(readback, 0, def.length_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, len_bytes);
+ KUNIT_EXPECT_MEMNEQ(test,
+ (u8 *)readback + len_bytes,
+ (u8 *)reg_vals + len_bytes,
+ def.length_bytes - len_bytes);
+ }
+}
+
+/*
+ * Write to an offset into the control data. Should only change the
+ * portion of data from the offset position.
+ */
+static void cs_dsp_ctl_write_with_seek(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback, *new_data;
+ unsigned int seek_words;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = 48;
+
+ reg_vals = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ new_data = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, new_data);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, def.length_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ for (seek_words = 1; seek_words < (def.length_bytes / sizeof(u32)); seek_words++) {
+ unsigned int len_bytes = def.length_bytes - (seek_words * sizeof(u32));
+
+ /* Reset the register values to the test data */
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ get_random_bytes(new_data, def.length_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, seek_words,
+ new_data, len_bytes),
+ 1);
+ KUNIT_ASSERT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, def.length_bytes),
+ 0);
+ /* Initial portion of readback should be unchanged */
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, seek_words * sizeof(u32));
+ KUNIT_EXPECT_MEMEQ(test, &readback[seek_words], new_data, len_bytes);
+ }
+}
+
+/*
+ * Write to an offset into the control cache. Should only change the
+ * portion of data from the offset position.
+ * Same as cs_dsp_ctl_write_with_seek() except the control is cached
+ * and the firmware is not running.
+ */
+static void cs_dsp_ctl_write_cache_with_seek(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback, *new_data;
+ unsigned int seek_words;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = 48;
+
+ reg_vals = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ new_data = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, new_data);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, def.length_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start and stop the firmware so the read will come from the cache */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+
+ for (seek_words = 1; seek_words < (def.length_bytes / sizeof(u32)); seek_words++) {
+ unsigned int len_bytes = def.length_bytes - (seek_words * sizeof(u32));
+
+ /* Reset the cache to the test data */
+ KUNIT_EXPECT_GE(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals,
+ def.length_bytes),
+ 0);
+
+ get_random_bytes(new_data, def.length_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, seek_words,
+ new_data, len_bytes),
+ 1);
+
+ memset(readback, 0, def.length_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback,
+ def.length_bytes),
+ 0);
+ /* Initial portion of readback should be unchanged */
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, seek_words * sizeof(u32));
+ KUNIT_EXPECT_MEMEQ(test, &readback[seek_words], new_data, len_bytes);
+ }
+}
+
+/*
+ * Write less than the full length of data to a control. Should only
+ * change the requested number of bytes.
+ */
+static void cs_dsp_ctl_write_truncated(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback, *new_data;
+ unsigned int len_bytes;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = 48;
+
+ reg_vals = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ new_data = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, new_data);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, def.length_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Writes are only allowed to be a multiple of the DSP word length */
+ for (len_bytes = sizeof(u32); len_bytes < def.length_bytes; len_bytes += sizeof(u32)) {
+ /* Reset the register values to the test data */
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ get_random_bytes(new_data, def.length_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, new_data, len_bytes),
+ 1);
+
+ memset(readback, 0, def.length_bytes);
+ KUNIT_ASSERT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, new_data, len_bytes);
+ KUNIT_EXPECT_MEMEQ(test,
+ (u8 *)readback + len_bytes,
+ (u8 *)reg_vals + len_bytes,
+ def.length_bytes - len_bytes);
+ }
+}
+
+/*
+ * Write less than the full length of data to a cached control.
+ * Should only change the requested number of bytes.
+ * Same as cs_dsp_ctl_write_truncated() except the control is cached
+ * and the firmware is not running.
+ */
+static void cs_dsp_ctl_write_cache_truncated(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback, *new_data;
+ unsigned int len_bytes;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = 48;
+
+ reg_vals = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ new_data = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, new_data);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, def.length_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start and stop the firmware so the read will come from the cache */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+
+ /* Writes are only allowed to be a multiple of the DSP word length */
+ for (len_bytes = sizeof(u32); len_bytes < def.length_bytes; len_bytes += sizeof(u32)) {
+ /* Reset the cache to the test data */
+ KUNIT_EXPECT_GE(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals,
+ def.length_bytes),
+ 0);
+
+ get_random_bytes(new_data, def.length_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, new_data, len_bytes),
+ 1);
+
+ memset(readback, 0, def.length_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback,
+ def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, new_data, len_bytes);
+ KUNIT_EXPECT_MEMEQ(test,
+ (u8 *)readback + len_bytes,
+ (u8 *)reg_vals + len_bytes,
+ def.length_bytes - len_bytes);
+ }
+}
+
+/*
+ * Read from an offset that is beyond the end of the control data.
+ * Should return an error.
+ */
+static void cs_dsp_ctl_read_with_seek_oob(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+ unsigned int seek_words;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ reg_vals = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ seek_words = def.length_bytes / sizeof(u32);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, seek_words,
+ reg_vals, def.length_bytes),
+ 0);
+
+ if (!(def.flags & WMFW_CTL_FLAG_VOLATILE)) {
+ /* Stop firmware and repeat the read from the cache */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ KUNIT_ASSERT_FALSE(test, dsp->running);
+
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, seek_words,
+ reg_vals, def.length_bytes),
+ 0);
+ }
+}
+
+/*
+ * Read more data than the length of the control data.
+ * Should return an error.
+ */
+static void cs_dsp_ctl_read_with_length_overflow(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ reg_vals = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, reg_vals, def.length_bytes + 1),
+ 0);
+
+ if (!(def.flags & WMFW_CTL_FLAG_VOLATILE)) {
+ /* Stop firmware and repeat the read from the cache */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ KUNIT_ASSERT_FALSE(test, dsp->running);
+
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, reg_vals,
+ def.length_bytes + 1),
+ 0);
+ }
+}
+
+/*
+ * Read with a seek and length that ends beyond the end of control data.
+ * Should return an error.
+ */
+static void cs_dsp_ctl_read_with_seek_and_length_oob(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ reg_vals = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /*
+ * Read full control length but at a start offset of 1 so that
+ * offset + length exceeds the length of the control.
+ */
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 1, reg_vals, def.length_bytes),
+ 0);
+
+ if (!(def.flags & WMFW_CTL_FLAG_VOLATILE)) {
+ /* Stop firmware and repeat the read from the cache */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ KUNIT_ASSERT_FALSE(test, dsp->running);
+
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 1, reg_vals,
+ def.length_bytes),
+ 0);
+ }
+}
+
+/*
+ * Write to an offset that is beyond the end of the control data.
+ * Should return an error without touching any registers.
+ */
+static void cs_dsp_ctl_write_with_seek_oob(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+ unsigned int seek_words;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ reg_vals = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+
+ get_random_bytes(reg_vals, def.length_bytes);
+ seek_words = def.length_bytes / sizeof(u32);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, seek_words,
+ reg_vals, def.length_bytes),
+ 0);
+
+ if (!(def.flags & WMFW_CTL_FLAG_VOLATILE)) {
+ /* Stop firmware and repeat the write to the cache */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ KUNIT_ASSERT_FALSE(test, dsp->running);
+
+ get_random_bytes(reg_vals, def.length_bytes);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, seek_words,
+ reg_vals, def.length_bytes),
+ 0);
+ }
+
+ /* Check that it didn't write any registers */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write more data than the length of the control data.
+ * Should return an error.
+ */
+static void cs_dsp_ctl_write_with_length_overflow(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ reg_vals = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+
+ get_random_bytes(reg_vals, def.length_bytes);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, def.length_bytes + 1),
+ 0);
+
+ if (!(def.flags & WMFW_CTL_FLAG_VOLATILE)) {
+ /* Stop firmware and repeat the write to the cache */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ KUNIT_ASSERT_FALSE(test, dsp->running);
+
+ get_random_bytes(reg_vals, def.length_bytes);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals,
+ def.length_bytes + 1),
+ 0);
+ }
+
+ /* Check that it didn't write any registers */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write with a seek and length that ends beyond the end of control data.
+ * Should return an error.
+ */
+static void cs_dsp_ctl_write_with_seek_and_length_oob(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ reg_vals = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+
+ /*
+ * Write full control length but at a start offset of 1 so that
+ * offset + length exceeeds the length of the control.
+ */
+ get_random_bytes(reg_vals, def.length_bytes);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 1, reg_vals, def.length_bytes),
+ 0);
+
+ if (!(def.flags & WMFW_CTL_FLAG_VOLATILE)) {
+ /* Stop firmware and repeat the write to the cache */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ KUNIT_ASSERT_FALSE(test, dsp->running);
+
+ get_random_bytes(reg_vals, def.length_bytes);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 1, reg_vals,
+ def.length_bytes),
+ 0);
+ }
+
+ /* Check that it didn't write any registers */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Read from a write-only control. This is legal because controls can
+ * always be read. Write-only only indicates that it is not useful to
+ * populate the cache from the DSP memory.
+ */
+static void cs_dsp_ctl_read_from_writeonly(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *ctl_vals, *readback;
+
+ /* Sanity check parameters */
+ KUNIT_ASSERT_TRUE(test, param->flags & WMFW_CTL_FLAG_WRITEABLE);
+ KUNIT_ASSERT_FALSE(test, param->flags & WMFW_CTL_FLAG_READABLE);
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ ctl_vals = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctl_vals);
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Write some test data to the control */
+ get_random_bytes(ctl_vals, def.length_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, ctl_vals, def.length_bytes),
+ 1);
+
+ /* Read back the data */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, def.length_bytes);
+
+ if (!(def.flags & WMFW_CTL_FLAG_VOLATILE)) {
+ /* Stop firmware and repeat the read from the cache */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ KUNIT_ASSERT_FALSE(test, dsp->running);
+
+ memset(readback, 0, def.length_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback,
+ def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, def.length_bytes);
+ }
+}
+
+/*
+ * Write to a read-only control.
+ * This should return an error without writing registers.
+ */
+static void cs_dsp_ctl_write_to_readonly(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ /* Sanity check parameters */
+ KUNIT_ASSERT_FALSE(test, param->flags & WMFW_CTL_FLAG_WRITEABLE);
+ KUNIT_ASSERT_TRUE(test, param->flags & WMFW_CTL_FLAG_READABLE);
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ reg_vals = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+
+ get_random_bytes(reg_vals, def.length_bytes);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, def.length_bytes),
+ 0);
+
+ if (!(def.flags & WMFW_CTL_FLAG_VOLATILE)) {
+ /* Stop firmware and repeat the write to the cache */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ KUNIT_ASSERT_FALSE(test, dsp->running);
+
+ get_random_bytes(reg_vals, def.length_bytes);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals,
+ def.length_bytes),
+ 0);
+ }
+
+ /* Check that it didn't write any registers */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+static int cs_dsp_ctl_rw_test_common_init(struct kunit *test, struct cs_dsp *dsp,
+ int wmfw_version)
+{
+ struct cs_dsp_test *priv;
+ struct cs_dsp_test_local *local;
+ struct device *test_dev;
+ int ret;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ local = kunit_kzalloc(test, sizeof(struct cs_dsp_test_local), GFP_KERNEL);
+ if (!local)
+ return -ENOMEM;
+
+ priv->test = test;
+ priv->dsp = dsp;
+ test->priv = priv;
+ priv->local = local;
+ priv->local->wmfw_version = wmfw_version;
+
+ /* Create dummy struct device */
+ test_dev = kunit_device_register(test, "cs_dsp_test_drv");
+ if (IS_ERR(test_dev))
+ return PTR_ERR(test_dev);
+
+ dsp->dev = get_device(test_dev);
+ if (!dsp->dev)
+ return -ENODEV;
+
+ ret = kunit_add_action_or_reset(test, _put_device_wrapper, dsp->dev);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dsp->dev, priv);
+
+ /* Allocate regmap */
+ ret = cs_dsp_mock_regmap_init(priv);
+ if (ret)
+ return ret;
+
+ /*
+ * There must always be a XM header with at least 1 algorithm, so create
+ * a dummy one that tests can use and extract it to a data blob.
+ */
+ local->xm_header = cs_dsp_create_mock_xm_header(priv,
+ cs_dsp_ctl_rw_test_algs,
+ ARRAY_SIZE(cs_dsp_ctl_rw_test_algs));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->xm_header);
+
+ /* Create wmfw builder */
+ local->wmfw_builder = _create_dummy_wmfw(test);
+
+ /* Init cs_dsp */
+ dsp->client_ops = kunit_kzalloc(test, sizeof(*dsp->client_ops), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dsp->client_ops);
+
+ switch (dsp->type) {
+ case WMFW_ADSP2:
+ ret = cs_dsp_adsp2_init(dsp);
+ break;
+ case WMFW_HALO:
+ ret = cs_dsp_halo_init(dsp);
+ break;
+ default:
+ KUNIT_FAIL(test, "Untested DSP type %d\n", dsp->type);
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ /* Automatically call cs_dsp_remove() when test case ends */
+ return kunit_add_action_or_reset(priv->test, _cs_dsp_remove_wrapper, dsp);
+}
+
+static int cs_dsp_ctl_rw_test_halo_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_HALO;
+ dsp->mem = cs_dsp_mock_halo_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_halo_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_halo_core_base;
+ dsp->base_sysinfo = cs_dsp_mock_halo_sysinfo_base;
+
+ return cs_dsp_ctl_rw_test_common_init(test, dsp, 3);
+}
+
+static int cs_dsp_ctl_rw_test_adsp2_32bit_init(struct kunit *test, int wmfw_ver)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 1;
+ dsp->mem = cs_dsp_mock_adsp2_32bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_32bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_32bit_sysbase;
+
+ return cs_dsp_ctl_rw_test_common_init(test, dsp, wmfw_ver);
+}
+
+static int cs_dsp_ctl_rw_test_adsp2_32bit_wmfw1_init(struct kunit *test)
+{
+ return cs_dsp_ctl_rw_test_adsp2_32bit_init(test, 1);
+}
+
+static int cs_dsp_ctl_rw_test_adsp2_32bit_wmfw2_init(struct kunit *test)
+{
+ return cs_dsp_ctl_rw_test_adsp2_32bit_init(test, 2);
+}
+
+static int cs_dsp_ctl_rw_test_adsp2_16bit_init(struct kunit *test, int wmfw_ver)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 0;
+ dsp->mem = cs_dsp_mock_adsp2_16bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_16bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_16bit_sysbase;
+
+ return cs_dsp_ctl_rw_test_common_init(test, dsp, wmfw_ver);
+}
+
+static int cs_dsp_ctl_rw_test_adsp2_16bit_wmfw1_init(struct kunit *test)
+{
+ return cs_dsp_ctl_rw_test_adsp2_16bit_init(test, 1);
+}
+
+static int cs_dsp_ctl_rw_test_adsp2_16bit_wmfw2_init(struct kunit *test)
+{
+ return cs_dsp_ctl_rw_test_adsp2_16bit_init(test, 2);
+}
+
+static void cs_dsp_ctl_all_param_desc(const struct cs_dsp_ctl_rw_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "alg:%#x %s@%u len:%u flags:%#x",
+ param->alg_id, cs_dsp_mem_region_name(param->mem_type),
+ param->offs_words, param->len_bytes, param->flags);
+}
+
+/* All parameters populated, with various lengths */
+static const struct cs_dsp_ctl_rw_test_param all_pop_varying_len_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 8 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 12 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 16 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 48 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 100 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 512 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 1000 },
+};
+KUNIT_ARRAY_PARAM(all_pop_varying_len, all_pop_varying_len_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/* All parameters populated, with various offsets */
+static const struct cs_dsp_ctl_rw_test_param all_pop_varying_offset_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 0, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 2, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 3, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 8, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 10, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 128, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 180, .len_bytes = 4 },
+};
+KUNIT_ARRAY_PARAM(all_pop_varying_offset, all_pop_varying_offset_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/* All parameters populated, with various X and Y memory regions */
+static const struct cs_dsp_ctl_rw_test_param all_pop_varying_xy_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_XM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+};
+KUNIT_ARRAY_PARAM(all_pop_varying_xy, all_pop_varying_xy_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/* All parameters populated, using ZM */
+static const struct cs_dsp_ctl_rw_test_param all_pop_z_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_ZM, .offs_words = 1, .len_bytes = 4 },
+};
+KUNIT_ARRAY_PARAM(all_pop_z, all_pop_z_cases, cs_dsp_ctl_all_param_desc);
+
+/* All parameters populated, with various algorithm ids */
+static const struct cs_dsp_ctl_rw_test_param all_pop_varying_alg_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0xb, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0x9f1234, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0xff00ff, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+};
+KUNIT_ARRAY_PARAM(all_pop_varying_alg, all_pop_varying_alg_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * readable control.
+ */
+static const struct cs_dsp_ctl_rw_test_param all_pop_readable_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = 0
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS |
+ WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_readable_flags,
+ all_pop_readable_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * read-only control
+ */
+static const struct cs_dsp_ctl_rw_test_param all_pop_readonly_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_readonly_flags,
+ all_pop_readonly_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * non-volatile readable control
+ */
+static const struct cs_dsp_ctl_rw_test_param all_pop_nonvol_readable_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = 0
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_nonvol_readable_flags,
+ all_pop_nonvol_readable_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * writeable control
+ */
+static const struct cs_dsp_ctl_rw_test_param all_pop_writeable_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = 0
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS |
+ WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_writeable_flags,
+ all_pop_writeable_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * write-only control
+ */
+static const struct cs_dsp_ctl_rw_test_param all_pop_writeonly_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_writeonly_flags,
+ all_pop_writeonly_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * non-volatile writeable control
+ */
+static const struct cs_dsp_ctl_rw_test_param all_pop_nonvol_writeable_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = 0
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_nonvol_writeable_flags,
+ all_pop_nonvol_writeable_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * volatile readable control.
+ */
+static const struct cs_dsp_ctl_rw_test_param all_pop_volatile_readable_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = 0 /* flags == 0 is volatile while firmware is running */
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS |
+ WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_volatile_readable_flags,
+ all_pop_volatile_readable_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * volatile readable control.
+ */
+static const struct cs_dsp_ctl_rw_test_param all_pop_volatile_writeable_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = 0 /* flags == 0 is volatile while firmware is running */
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS |
+ WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_volatile_writeable_flags,
+ all_pop_volatile_writeable_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+static struct kunit_case cs_dsp_ctl_rw_test_cases_adsp[] = {
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_z_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_running, all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_running, all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_running, all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_running, all_pop_z_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_running,
+ all_pop_volatile_readable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_not_started,
+ all_pop_volatile_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_stopped,
+ all_pop_volatile_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_stopped_powered_down,
+ all_pop_volatile_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_not_current_loaded_fw,
+ all_pop_volatile_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_not_current_running_fw,
+ all_pop_volatile_readable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_not_started,
+ all_pop_volatile_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_stopped,
+ all_pop_volatile_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_stopped_powered_down,
+ all_pop_volatile_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_not_current_loaded_fw,
+ all_pop_volatile_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_not_current_running_fw,
+ all_pop_volatile_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_with_seek,
+ all_pop_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_cache_with_seek,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_truncated,
+ all_pop_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_cache_truncated,
+ all_pop_nonvol_readable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_with_seek,
+ all_pop_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_cache_with_seek,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_truncated,
+ all_pop_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_cache_truncated,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_with_seek_oob,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_with_length_overflow,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_with_seek_and_length_oob,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_with_seek_oob,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_with_length_overflow,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_with_seek_and_length_oob,
+ all_pop_varying_len_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_from_writeonly,
+ all_pop_writeonly_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_to_readonly,
+ all_pop_readonly_flags_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_ctl_rw_test_cases_halo[] = {
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_running, all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_running, all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_running, all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_running,
+ all_pop_volatile_readable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_not_started,
+ all_pop_volatile_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_stopped,
+ all_pop_volatile_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_stopped_powered_down,
+ all_pop_volatile_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_not_current_loaded_fw,
+ all_pop_volatile_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_not_current_running_fw,
+ all_pop_volatile_readable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_not_started,
+ all_pop_volatile_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_stopped,
+ all_pop_volatile_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_stopped_powered_down,
+ all_pop_volatile_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_not_current_loaded_fw,
+ all_pop_volatile_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_not_current_running_fw,
+ all_pop_volatile_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_with_seek,
+ all_pop_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_cache_with_seek,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_truncated,
+ all_pop_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_cache_truncated,
+ all_pop_nonvol_readable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_with_seek,
+ all_pop_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_cache_with_seek,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_truncated,
+ all_pop_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_cache_truncated,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_with_seek_oob,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_with_length_overflow,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_with_seek_and_length_oob,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_with_seek_oob,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_with_length_overflow,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_with_seek_and_length_oob,
+ all_pop_varying_len_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_from_writeonly,
+ all_pop_writeonly_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_to_readonly,
+ all_pop_readonly_flags_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_suite cs_dsp_ctl_rw_test_halo = {
+ .name = "cs_dsp_ctl_rw_wmfwV3_halo",
+ .init = cs_dsp_ctl_rw_test_halo_init,
+ .test_cases = cs_dsp_ctl_rw_test_cases_halo,
+};
+
+static struct kunit_suite cs_dsp_ctl_rw_test_adsp2_32bit_wmfw1 = {
+ .name = "cs_dsp_ctl_rw_wmfwV1_adsp2_32bit",
+ .init = cs_dsp_ctl_rw_test_adsp2_32bit_wmfw1_init,
+ .test_cases = cs_dsp_ctl_rw_test_cases_adsp,
+};
+
+static struct kunit_suite cs_dsp_ctl_rw_test_adsp2_32bit_wmfw2 = {
+ .name = "cs_dsp_ctl_rw_wmfwV2_adsp2_32bit",
+ .init = cs_dsp_ctl_rw_test_adsp2_32bit_wmfw2_init,
+ .test_cases = cs_dsp_ctl_rw_test_cases_adsp,
+};
+
+static struct kunit_suite cs_dsp_ctl_rw_test_adsp2_16bit_wmfw1 = {
+ .name = "cs_dsp_ctl_rw_wmfwV1_adsp2_16bit",
+ .init = cs_dsp_ctl_rw_test_adsp2_16bit_wmfw1_init,
+ .test_cases = cs_dsp_ctl_rw_test_cases_adsp,
+};
+
+static struct kunit_suite cs_dsp_ctl_rw_test_adsp2_16bit_wmfw2 = {
+ .name = "cs_dsp_ctl_rw_wmfwV2_adsp2_16bit",
+ .init = cs_dsp_ctl_rw_test_adsp2_16bit_wmfw2_init,
+ .test_cases = cs_dsp_ctl_rw_test_cases_adsp,
+};
+
+kunit_test_suites(&cs_dsp_ctl_rw_test_halo,
+ &cs_dsp_ctl_rw_test_adsp2_32bit_wmfw1,
+ &cs_dsp_ctl_rw_test_adsp2_32bit_wmfw2,
+ &cs_dsp_ctl_rw_test_adsp2_16bit_wmfw1,
+ &cs_dsp_ctl_rw_test_adsp2_16bit_wmfw2);
diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_wmfw.c b/drivers/firmware/cirrus/test/cs_dsp_test_wmfw.c
new file mode 100644
index 000000000000..9e997c4ee2d6
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_test_wmfw.c
@@ -0,0 +1,2211 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// KUnit tests for cs_dsp.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+//
+
+#include <kunit/device.h>
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <linux/build_bug.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/random.h>
+#include <linux/regmap.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+
+/*
+ * Test method is:
+ *
+ * 1) Create a mock regmap in cache-only mode so that all writes will be cached.
+ * 2) Create dummy wmfw file.
+ * 3) Call cs_dsp_power_up() with the bin file.
+ * 4) Readback the cached value of registers that should have been written and
+ * check they have the correct value.
+ * 5) All the registers that are expected to have been written are dropped from
+ * the cache. This should leave the cache clean.
+ * 6) If the cache is still dirty there have been unexpected writes.
+ */
+
+KUNIT_DEFINE_ACTION_WRAPPER(_put_device_wrapper, put_device, struct device *)
+KUNIT_DEFINE_ACTION_WRAPPER(_vfree_wrapper, vfree, void *)
+KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_remove_wrapper, cs_dsp_remove, struct cs_dsp *)
+
+struct cs_dsp_test_local {
+ struct cs_dsp_mock_xm_header *xm_header;
+ struct cs_dsp_mock_wmfw_builder *wmfw_builder;
+ int wmfw_version;
+};
+
+struct cs_dsp_wmfw_test_param {
+ unsigned int num_blocks;
+ int mem_type;
+};
+
+static const struct cs_dsp_mock_alg_def cs_dsp_wmfw_test_mock_algs[] = {
+ {
+ .id = 0xfafa,
+ .ver = 0x100000,
+ .xm_size_words = 164,
+ .ym_size_words = 164,
+ .zm_size_words = 164,
+ },
+};
+
+/*
+ * wmfw that writes the XM header.
+ * cs_dsp always reads this back from unpacked XM.
+ */
+static void wmfw_write_xm_header_unpacked(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ unsigned int reg_addr;
+ u8 *readback;
+
+ /* XM header payload was added to wmfw by test case init function */
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ /* Read raw so endianness and register width don't matter */
+ readback = kunit_kzalloc(test, local->xm_header->blob_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_XM);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ local->xm_header->blob_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, local->xm_header->blob_data,
+ local->xm_header->blob_size_bytes);
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/* Write one payload of length param->num_blocks */
+static void wmfw_write_one_payload(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ u8 *payload_data, *readback;
+ unsigned int mem_offset_dsp_words = 0;
+ unsigned int payload_size_bytes;
+
+ payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, param->mem_type);
+
+ /* payloads must be a multiple of 4 bytes and a whole number of DSP registers */
+ do {
+ payload_size_bytes += cs_dsp_mock_reg_block_length_bytes(priv, param->mem_type);
+ } while (payload_size_bytes % 4);
+
+ payload_data = kunit_kmalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+ get_random_bytes(payload_data, payload_size_bytes);
+
+ readback = kunit_kzalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Tests on XM must be after the XM header */
+ if (param->mem_type == WMFW_ADSP2_XM)
+ mem_offset_dsp_words += local->xm_header->blob_size_bytes / sizeof(u32);
+
+ /* Add a single payload */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ param->mem_type, mem_offset_dsp_words,
+ payload_data, payload_size_bytes);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg_addr += cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv) * mem_offset_dsp_words;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, payload_size_bytes);
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, payload_size_bytes);
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/* Write several smallest possible payloads for the given memory type */
+static void wmfw_write_multiple_oneblock_payloads(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ u8 *payload_data, *readback;
+ unsigned int mem_offset_dsp_words = 0;
+ unsigned int payload_size_bytes, payload_size_dsp_words;
+ const unsigned int num_payloads = param->num_blocks;
+ int i;
+
+ /* payloads must be a multiple of 4 bytes and a whole number of DSP registers */
+ payload_size_dsp_words = 0;
+ payload_size_bytes = 0;
+ do {
+ payload_size_dsp_words += cs_dsp_mock_reg_block_length_dsp_words(priv,
+ param->mem_type);
+ payload_size_bytes += cs_dsp_mock_reg_block_length_bytes(priv, param->mem_type);
+ } while (payload_size_bytes % 4);
+
+ payload_data = kunit_kcalloc(test, num_payloads, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+
+ readback = kunit_kcalloc(test, num_payloads, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ get_random_bytes(payload_data, num_payloads * payload_size_bytes);
+
+ /* Tests on XM must be after the XM header */
+ if (param->mem_type == WMFW_ADSP2_XM)
+ mem_offset_dsp_words += local->xm_header->blob_size_bytes / payload_size_bytes;
+
+ /* Add multiple payloads of one block each */
+ for (i = 0; i < num_payloads; ++i) {
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ param->mem_type,
+ mem_offset_dsp_words + (i * payload_size_dsp_words),
+ &payload_data[i * payload_size_bytes],
+ payload_size_bytes);
+ }
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg_addr += cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv) * mem_offset_dsp_words;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ num_payloads * payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, num_payloads * payload_size_bytes);
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, num_payloads * payload_size_bytes);
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write several smallest possible payloads of the given memory type
+ * in reverse address order
+ */
+static void wmfw_write_multiple_oneblock_payloads_reverse(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ u8 *payload_data, *readback;
+ unsigned int mem_offset_dsp_words = 0;
+ unsigned int payload_size_bytes, payload_size_dsp_words;
+ const unsigned int num_payloads = param->num_blocks;
+ int i;
+
+ /* payloads must be a multiple of 4 bytes and a whole number of DSP registers */
+ payload_size_dsp_words = 0;
+ payload_size_bytes = 0;
+ do {
+ payload_size_dsp_words += cs_dsp_mock_reg_block_length_dsp_words(priv,
+ param->mem_type);
+ payload_size_bytes += cs_dsp_mock_reg_block_length_bytes(priv, param->mem_type);
+ } while (payload_size_bytes % 4);
+
+ payload_data = kunit_kcalloc(test, num_payloads, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+
+ readback = kunit_kcalloc(test, num_payloads, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ get_random_bytes(payload_data, num_payloads * payload_size_bytes);
+
+ /* Tests on XM must be after the XM header */
+ if (param->mem_type == WMFW_ADSP2_XM)
+ mem_offset_dsp_words += local->xm_header->blob_size_bytes / payload_size_bytes;
+
+ /* Add multiple payloads of one block each */
+ for (i = num_payloads - 1; i >= 0; --i) {
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ param->mem_type,
+ mem_offset_dsp_words + (i * payload_size_dsp_words),
+ &payload_data[i * payload_size_bytes],
+ payload_size_bytes);
+ }
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg_addr += cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv) * mem_offset_dsp_words;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ num_payloads * payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, num_payloads * payload_size_bytes);
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, num_payloads * payload_size_bytes);
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write multiple payloads of length param->num_blocks.
+ * The payloads are not in address order and collectively do not patch
+ * a contiguous block of memory.
+ */
+static void wmfw_write_multiple_payloads_sparse_unordered(struct kunit *test)
+{
+ static const unsigned int random_offsets[] = {
+ 11, 69, 59, 61, 32, 75, 4, 38, 70, 13, 79, 47, 46, 53, 18, 44,
+ 54, 35, 51, 21, 26, 45, 27, 41, 66, 2, 17, 56, 40, 9, 8, 20,
+ 29, 19, 63, 42, 12, 16, 43, 3, 5, 55, 52, 22
+ };
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ u8 *payload_data, *readback;
+ unsigned int mem_offset_dsp_words = 0;
+ unsigned int payload_size_bytes, payload_size_dsp_words;
+ const int num_payloads = ARRAY_SIZE(random_offsets);
+ int i;
+
+ payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, param->mem_type);
+ payload_size_dsp_words = param->num_blocks *
+ cs_dsp_mock_reg_block_length_dsp_words(priv, param->mem_type);
+
+ /* payloads must be a multiple of 4 bytes and a whole number of DSP registers */
+ do {
+ payload_size_dsp_words += cs_dsp_mock_reg_block_length_dsp_words(priv,
+ param->mem_type);
+ payload_size_bytes += cs_dsp_mock_reg_block_length_bytes(priv, param->mem_type);
+ } while (payload_size_bytes % 4);
+
+ payload_data = kunit_kcalloc(test, num_payloads, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+ get_random_bytes(payload_data, payload_size_bytes);
+
+ readback = kunit_kcalloc(test, num_payloads, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Tests on XM must be after the XM header */
+ if (param->mem_type == WMFW_ADSP2_XM)
+ mem_offset_dsp_words += local->xm_header->blob_size_bytes / payload_size_bytes;
+
+ /* Add multiple payloads of one block each at "random" locations */
+ for (i = 0; i < num_payloads; ++i) {
+ unsigned int offset = random_offsets[i] * payload_size_dsp_words;
+
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ param->mem_type,
+ mem_offset_dsp_words + offset,
+ &payload_data[i * payload_size_bytes],
+ payload_size_bytes);
+ }
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ for (i = 0; i < num_payloads; ++i) {
+ unsigned int offset_num_regs = (random_offsets[i] * payload_size_bytes) /
+ regmap_get_val_bytes(priv->dsp->regmap);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ reg_addr += cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv) * mem_offset_dsp_words;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr,
+ &readback[i * payload_size_bytes],
+ payload_size_bytes),
+ 0);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, payload_size_bytes);
+ }
+
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, payload_size_bytes);
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/* Write the whole of PM in a single unpacked payload */
+static void wmfw_write_all_unpacked_pm(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ u8 *payload_data, *readback;
+ unsigned int payload_size_bytes;
+
+ payload_size_bytes = cs_dsp_mock_size_of_region(priv->dsp, WMFW_ADSP2_PM);
+ payload_data = vmalloc(payload_size_bytes);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+ kunit_add_action_or_reset(priv->test, _vfree_wrapper, payload_data);
+
+ readback = vmalloc(payload_size_bytes);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+ kunit_add_action_or_reset(priv->test, _vfree_wrapper, readback);
+ memset(readback, 0, payload_size_bytes);
+
+ /* Add a single PM payload */
+ get_random_bytes(payload_data, payload_size_bytes);
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ WMFW_ADSP2_PM, 0,
+ payload_data, payload_size_bytes);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_PM);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, payload_size_bytes);
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, payload_size_bytes);
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/* Write the whole of PM in a single packed payload */
+static void wmfw_write_all_packed_pm(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ u8 *payload_data, *readback;
+ unsigned int payload_size_bytes;
+
+ payload_size_bytes = cs_dsp_mock_size_of_region(priv->dsp, WMFW_HALO_PM_PACKED);
+ payload_data = vmalloc(payload_size_bytes);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+ kunit_add_action_or_reset(priv->test, _vfree_wrapper, payload_data);
+
+ readback = vmalloc(payload_size_bytes);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+ kunit_add_action_or_reset(priv->test, _vfree_wrapper, readback);
+ memset(readback, 0, payload_size_bytes);
+
+ /* Add a single PM payload */
+ get_random_bytes(payload_data, payload_size_bytes);
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ WMFW_HALO_PM_PACKED, 0,
+ payload_data, payload_size_bytes);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_HALO_PM_PACKED);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, payload_size_bytes);
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, payload_size_bytes);
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write a series of payloads to various unpacked memory regions.
+ * The payloads are of various lengths and offsets, driven by the
+ * payload_defs table. The offset and length are both given as a
+ * number of minimum-sized register blocks to keep the maths simpler.
+ * (Where a minimum-sized register block is the smallest number of
+ * registers that contain a whole number of DSP words.)
+ */
+static void wmfw_write_multiple_unpacked_mem(struct kunit *test)
+{
+ static const struct {
+ int mem_type;
+ unsigned int offset_num_blocks;
+ unsigned int num_blocks;
+ } payload_defs[] = {
+ { WMFW_ADSP2_PM, 11, 60 },
+ { WMFW_ADSP2_ZM, 69, 8 },
+ { WMFW_ADSP2_YM, 32, 74 },
+ { WMFW_ADSP2_XM, 70, 38 },
+ { WMFW_ADSP2_PM, 84, 48 },
+ { WMFW_ADSP2_XM, 46, 18 },
+ { WMFW_ADSP2_PM, 0, 8 },
+ { WMFW_ADSP2_YM, 0, 30 },
+ { WMFW_ADSP2_PM, 160, 50 },
+ { WMFW_ADSP2_ZM, 21, 26 },
+ };
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int payload_size_bytes, offset_num_dsp_words;
+ unsigned int reg_addr, offset_bytes, offset_num_regs;
+ void **payload_data;
+ void *readback;
+ int i, ret;
+
+ payload_data = kunit_kcalloc(test, ARRAY_SIZE(payload_defs), sizeof(*payload_data),
+ GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+
+ for (i = 0; i < ARRAY_SIZE(payload_defs); ++i) {
+ payload_size_bytes = payload_defs[i].num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv,
+ payload_defs[i].mem_type);
+
+ payload_data[i] = kunit_kmalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data[i]);
+ get_random_bytes(payload_data[i], payload_size_bytes);
+
+ offset_num_dsp_words = payload_defs[i].offset_num_blocks *
+ cs_dsp_mock_reg_block_length_dsp_words(priv,
+ payload_defs[i].mem_type);
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ payload_defs[i].mem_type,
+ offset_num_dsp_words,
+ payload_data[i],
+ payload_size_bytes);
+ }
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ for (i = 0; i < ARRAY_SIZE(payload_defs); ++i) {
+ payload_size_bytes = payload_defs[i].num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv,
+ payload_defs[i].mem_type);
+
+ readback = kunit_kzalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ offset_bytes = payload_defs[i].offset_num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, payload_defs[i].mem_type);
+ offset_num_regs = offset_bytes / regmap_get_val_bytes(priv->dsp->regmap);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, payload_defs[i].mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ ret = regmap_raw_read(priv->dsp->regmap, reg_addr, readback, payload_size_bytes);
+ KUNIT_EXPECT_EQ_MSG(test, ret, 0, "%s @%u num:%u\n",
+ cs_dsp_mem_region_name(payload_defs[i].mem_type),
+ payload_defs[i].offset_num_blocks, payload_defs[i].num_blocks);
+ KUNIT_EXPECT_MEMEQ_MSG(test, readback, payload_data[i], payload_size_bytes,
+ "%s @%u num:%u\n",
+ cs_dsp_mem_region_name(payload_defs[i].mem_type),
+ payload_defs[i].offset_num_blocks,
+ payload_defs[i].num_blocks);
+
+ kunit_kfree(test, readback);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, payload_size_bytes);
+ }
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write a series of payloads to various packed and unpacked memory regions.
+ * The payloads are of various lengths and offsets, driven by the
+ * payload_defs table. The offset and length are both given as a
+ * number of minimum-sized register blocks to keep the maths simpler.
+ * (Where a minimum-sized register block is the smallest number of
+ * registers that contain a whole number of DSP words.)
+ */
+static void wmfw_write_multiple_packed_unpacked_mem(struct kunit *test)
+{
+ static const struct {
+ int mem_type;
+ unsigned int offset_num_blocks;
+ unsigned int num_blocks;
+ } payload_defs[] = {
+ { WMFW_HALO_PM_PACKED, 11, 60 },
+ { WMFW_ADSP2_YM, 69, 8 },
+ { WMFW_HALO_YM_PACKED, 32, 74 },
+ { WMFW_HALO_XM_PACKED, 70, 38 },
+ { WMFW_HALO_PM_PACKED, 84, 48 },
+ { WMFW_HALO_XM_PACKED, 46, 18 },
+ { WMFW_HALO_PM_PACKED, 0, 8 },
+ { WMFW_HALO_YM_PACKED, 0, 30 },
+ { WMFW_HALO_PM_PACKED, 160, 50 },
+ { WMFW_ADSP2_XM, 21, 26 },
+ };
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int payload_size_bytes, offset_num_dsp_words;
+ unsigned int reg_addr, offset_bytes, offset_num_regs;
+ void **payload_data;
+ void *readback;
+ int i, ret;
+
+ payload_data = kunit_kcalloc(test, ARRAY_SIZE(payload_defs), sizeof(*payload_data),
+ GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+
+ for (i = 0; i < ARRAY_SIZE(payload_defs); ++i) {
+ payload_size_bytes = payload_defs[i].num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv,
+ payload_defs[i].mem_type);
+
+ payload_data[i] = kunit_kmalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data[i]);
+ get_random_bytes(payload_data[i], payload_size_bytes);
+
+ offset_num_dsp_words = payload_defs[i].offset_num_blocks *
+ cs_dsp_mock_reg_block_length_dsp_words(priv,
+ payload_defs[i].mem_type);
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ payload_defs[i].mem_type,
+ offset_num_dsp_words,
+ payload_data[i],
+ payload_size_bytes);
+ }
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ for (i = 0; i < ARRAY_SIZE(payload_defs); ++i) {
+ payload_size_bytes = payload_defs[i].num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv,
+ payload_defs[i].mem_type);
+
+ readback = kunit_kzalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ offset_bytes = payload_defs[i].offset_num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, payload_defs[i].mem_type);
+ offset_num_regs = offset_bytes / regmap_get_val_bytes(priv->dsp->regmap);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, payload_defs[i].mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ ret = regmap_raw_read(priv->dsp->regmap, reg_addr, readback, payload_size_bytes);
+ KUNIT_EXPECT_EQ_MSG(test, ret, 0, "%s @%u num:%u\n",
+ cs_dsp_mem_region_name(payload_defs[i].mem_type),
+ payload_defs[i].offset_num_blocks,
+ payload_defs[i].num_blocks);
+ KUNIT_EXPECT_MEMEQ_MSG(test, readback, payload_data[i], payload_size_bytes,
+ "%s @%u num:%u\n",
+ cs_dsp_mem_region_name(payload_defs[i].mem_type),
+ payload_defs[i].offset_num_blocks,
+ payload_defs[i].num_blocks);
+
+ kunit_kfree(test, readback);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, payload_size_bytes);
+ }
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write XM/YM data that is one word longer than a packed block multiple,
+ * using one packed payload followed by one unpacked word.
+ */
+static void wmfw_write_packed_1_unpacked_trailing(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ int packed_mem_type = param->mem_type;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ unsigned int dsp_words_per_packed_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type);
+ unsigned int dsp_words_per_unpacked_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type);
+ unsigned int mem_offset_dsp_words = 0;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ void *packed_payload_data, *readback;
+ u32 unpacked_payload_data[1];
+ unsigned int packed_payload_size_bytes, packed_payload_size_dsp_words;
+ unsigned int offset_num_regs;
+
+ packed_payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type);
+ packed_payload_size_dsp_words = param->num_blocks * dsp_words_per_packed_block;
+
+ packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data);
+ get_random_bytes(packed_payload_data, packed_payload_size_bytes);
+
+ get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+
+ /* Tests on XM must be after the XM header */
+ if (unpacked_mem_type == WMFW_ADSP2_XM) {
+ mem_offset_dsp_words += local->xm_header->blob_size_bytes / sizeof(u32);
+
+ /* Round up to multiple of packed block length */
+ mem_offset_dsp_words = roundup(mem_offset_dsp_words, dsp_words_per_packed_block);
+ }
+
+ /* Add a single packed payload */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ packed_mem_type, mem_offset_dsp_words,
+ packed_payload_data, packed_payload_size_bytes);
+ /*
+ * Add payload of one unpacked word to DSP memory right after
+ * the packed payload words.
+ */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ mem_offset_dsp_words + packed_payload_size_dsp_words,
+ unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ /* Download the wmfw */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ /*
+ * Check that the packed payload was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = (mem_offset_dsp_words / dsp_words_per_packed_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ packed_payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes);
+
+ /*
+ * Check that the unpacked word was written correctly and drop
+ * it from the regmap cache. The unpacked payload is offset within
+ * unpacked register space by the number of DSP words that were
+ * written in the packed payload.
+ */
+ offset_num_regs = (mem_offset_dsp_words / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ offset_num_regs += (packed_payload_size_dsp_words / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(unpacked_payload_data)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write XM/YM data that is two words longer than a packed block multiple,
+ * using one packed payload followed by one payload of two unpacked words.
+ */
+static void wmfw_write_packed_2_unpacked_trailing(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ int packed_mem_type = param->mem_type;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ unsigned int dsp_words_per_packed_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type);
+ unsigned int dsp_words_per_unpacked_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type);
+ unsigned int mem_offset_dsp_words = 0;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ void *packed_payload_data, *readback;
+ u32 unpacked_payload_data[2];
+ unsigned int packed_payload_size_bytes, packed_payload_size_dsp_words;
+ unsigned int offset_num_regs;
+
+ packed_payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type);
+ packed_payload_size_dsp_words = param->num_blocks * dsp_words_per_packed_block;
+
+ packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data);
+ get_random_bytes(packed_payload_data, packed_payload_size_bytes);
+
+ get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+
+ /* Tests on XM must be after the XM header */
+ if (unpacked_mem_type == WMFW_ADSP2_XM) {
+ mem_offset_dsp_words += local->xm_header->blob_size_bytes / sizeof(u32);
+
+ /* Round up to multiple of packed block length */
+ mem_offset_dsp_words = roundup(mem_offset_dsp_words, dsp_words_per_packed_block);
+ }
+
+ /* Add a single packed payload */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ packed_mem_type, mem_offset_dsp_words,
+ packed_payload_data, packed_payload_size_bytes);
+ /*
+ * Add payload of two unpacked words to DSP memory right after
+ * the packed payload words.
+ */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ mem_offset_dsp_words + packed_payload_size_dsp_words,
+ unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ /* Download the wmfw */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ /*
+ * Check that the packed payload was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = (mem_offset_dsp_words / dsp_words_per_packed_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ packed_payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes);
+
+ /*
+ * Check that the unpacked words were written correctly and drop
+ * them from the regmap cache. The unpacked payload is offset
+ * within unpacked register space by the number of DSP words
+ * that were written in the packed payload.
+ */
+ offset_num_regs = (mem_offset_dsp_words / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ offset_num_regs += (packed_payload_size_dsp_words / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(unpacked_payload_data)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write XM/YM data that is three words longer than a packed block multiple,
+ * using one packed payload followed by one payload of three unpacked words.
+ */
+static void wmfw_write_packed_3_unpacked_trailing(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ int packed_mem_type = param->mem_type;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ unsigned int dsp_words_per_packed_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type);
+ unsigned int dsp_words_per_unpacked_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type);
+ unsigned int mem_offset_dsp_words = 0;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ void *packed_payload_data, *readback;
+ u32 unpacked_payload_data[3];
+ unsigned int packed_payload_size_bytes, packed_payload_size_dsp_words;
+ unsigned int offset_num_regs;
+
+ packed_payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type);
+ packed_payload_size_dsp_words = param->num_blocks * dsp_words_per_packed_block;
+
+ packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data);
+ get_random_bytes(packed_payload_data, packed_payload_size_bytes);
+
+ get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+
+ /* Tests on XM must be after the XM header */
+ if (unpacked_mem_type == WMFW_ADSP2_XM) {
+ mem_offset_dsp_words += local->xm_header->blob_size_bytes / sizeof(u32);
+
+ /* Round up to multiple of packed block length */
+ mem_offset_dsp_words = roundup(mem_offset_dsp_words, dsp_words_per_packed_block);
+ }
+
+ /* Add a single packed payload */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ packed_mem_type, mem_offset_dsp_words,
+ packed_payload_data, packed_payload_size_bytes);
+ /*
+ * Add payload of three unpacked words to DSP memory right after
+ * the packed payload words.
+ */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ mem_offset_dsp_words + packed_payload_size_dsp_words,
+ unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ /* Download the wmfw */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ /*
+ * Check that the packed payload was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = (mem_offset_dsp_words / dsp_words_per_packed_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ packed_payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes);
+
+ /*
+ * Check that the unpacked words were written correctly and drop
+ * them from the regmap cache. The unpacked payload is offset
+ * within unpacked register space by the number of DSP words
+ * that were written in the packed payload.
+ */
+ offset_num_regs = (mem_offset_dsp_words / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ offset_num_regs += (packed_payload_size_dsp_words / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(unpacked_payload_data)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write XM/YM data that is two words longer than a packed block multiple,
+ * using one packed payload followed by two payloads of one unpacked word each.
+ */
+static void wmfw_write_packed_2_single_unpacked_trailing(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ int packed_mem_type = param->mem_type;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ unsigned int dsp_words_per_packed_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type);
+ unsigned int dsp_words_per_unpacked_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type);
+ unsigned int mem_offset_dsp_words = 0;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ void *packed_payload_data, *readback;
+ u32 unpacked_payload_data[2];
+ unsigned int packed_payload_size_bytes, packed_payload_size_dsp_words;
+ unsigned int offset_num_regs;
+
+ packed_payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type);
+ packed_payload_size_dsp_words = param->num_blocks * dsp_words_per_packed_block;
+
+ packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data);
+ get_random_bytes(packed_payload_data, packed_payload_size_bytes);
+
+ get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+
+ /* Tests on XM must be after the XM header */
+ if (unpacked_mem_type == WMFW_ADSP2_XM) {
+ mem_offset_dsp_words += local->xm_header->blob_size_bytes / sizeof(u32);
+
+ /* Round up to multiple of packed block length */
+ mem_offset_dsp_words = roundup(mem_offset_dsp_words, dsp_words_per_packed_block);
+ }
+
+ /* Add a single packed payload */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ packed_mem_type, mem_offset_dsp_words,
+ packed_payload_data, packed_payload_size_bytes);
+ /*
+ * Add two unpacked words to DSP memory right after the packed
+ * payload words. Each unpacked word in its own payload.
+ */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ mem_offset_dsp_words + packed_payload_size_dsp_words,
+ &unpacked_payload_data[0],
+ sizeof(unpacked_payload_data[0]));
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ mem_offset_dsp_words + packed_payload_size_dsp_words + 1,
+ &unpacked_payload_data[1],
+ sizeof(unpacked_payload_data[1]));
+
+ /* Download the wmfw */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ /*
+ * Check that the packed payload was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = (mem_offset_dsp_words / dsp_words_per_packed_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ packed_payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes);
+
+ /*
+ * Check that the unpacked words were written correctly and drop
+ * them from the regmap cache. The unpacked words are offset
+ * within unpacked register space by the number of DSP words
+ * that were written in the packed payload.
+ */
+ offset_num_regs = (mem_offset_dsp_words / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ offset_num_regs += (packed_payload_size_dsp_words / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(unpacked_payload_data)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write XM/YM data that is three words longer than a packed block multiple,
+ * using one packed payload followed by three payloads of one unpacked word each.
+ */
+static void wmfw_write_packed_3_single_unpacked_trailing(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ int packed_mem_type = param->mem_type;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ unsigned int dsp_words_per_packed_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type);
+ unsigned int dsp_words_per_unpacked_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type);
+ unsigned int mem_offset_dsp_words = 0;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ void *packed_payload_data, *readback;
+ u32 unpacked_payload_data[3];
+ unsigned int packed_payload_size_bytes, packed_payload_size_dsp_words;
+ unsigned int offset_num_regs;
+
+ packed_payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type);
+ packed_payload_size_dsp_words = param->num_blocks * dsp_words_per_packed_block;
+
+ packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data);
+ get_random_bytes(packed_payload_data, packed_payload_size_bytes);
+
+ get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+
+ /* Tests on XM must be after the XM header */
+ if (unpacked_mem_type == WMFW_ADSP2_XM) {
+ mem_offset_dsp_words += local->xm_header->blob_size_bytes / sizeof(u32);
+
+ /* Round up to multiple of packed block length */
+ mem_offset_dsp_words = roundup(mem_offset_dsp_words, dsp_words_per_packed_block);
+ }
+
+ /* Add a single packed payload */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ packed_mem_type, mem_offset_dsp_words,
+ packed_payload_data, packed_payload_size_bytes);
+ /*
+ * Add three unpacked words to DSP memory right after the packed
+ * payload words. Each unpacked word in its own payload.
+ */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ mem_offset_dsp_words + packed_payload_size_dsp_words,
+ &unpacked_payload_data[0],
+ sizeof(unpacked_payload_data[0]));
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ mem_offset_dsp_words + packed_payload_size_dsp_words + 1,
+ &unpacked_payload_data[1],
+ sizeof(unpacked_payload_data[1]));
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ mem_offset_dsp_words + packed_payload_size_dsp_words + 2,
+ &unpacked_payload_data[2],
+ sizeof(unpacked_payload_data[2]));
+
+ /* Download the wmfw */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+ /*
+ * Check that the packed payload was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = (mem_offset_dsp_words / dsp_words_per_packed_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ packed_payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes);
+
+ /*
+ * Check that the unpacked words were written correctly and drop
+ * them from the regmap cache. The unpacked words are offset
+ * within unpacked register space by the number of DSP words
+ * that were written in the packed payload.
+ */
+ offset_num_regs = (mem_offset_dsp_words / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ offset_num_regs += (packed_payload_size_dsp_words / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(unpacked_payload_data)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write XM/YM data that is one word longer than a packed block multiple,
+ * and does not start on a packed alignment. Use one unpacked word
+ * followed by a packed payload.
+ */
+static void wmfw_write_packed_1_unpacked_leading(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ int packed_mem_type = param->mem_type;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ unsigned int dsp_words_per_packed_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type);
+ unsigned int dsp_words_per_unpacked_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type);
+ unsigned int packed_payload_offset_dsp_words = 0;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ void *packed_payload_data, *readback;
+ u32 unpacked_payload_data[1];
+ unsigned int packed_payload_size_bytes;
+ unsigned int offset_num_regs;
+
+ packed_payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type);
+
+ packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data);
+ get_random_bytes(packed_payload_data, packed_payload_size_bytes);
+
+ get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+
+ /* Tests on XM must be after the XM header */
+ if (unpacked_mem_type == WMFW_ADSP2_XM)
+ packed_payload_offset_dsp_words += local->xm_header->blob_size_bytes /
+ sizeof(u32);
+ /*
+ * Leave space for an unaligned word before the packed block and
+ * round the packed block start to multiple of packed block length.
+ */
+ packed_payload_offset_dsp_words += 1;
+ packed_payload_offset_dsp_words = roundup(packed_payload_offset_dsp_words,
+ dsp_words_per_packed_block);
+
+ /* Add a single unpacked word right before the first word of packed data */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ packed_payload_offset_dsp_words - 1,
+ unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ /* Add payload of packed data to the DSP memory after the unpacked word. */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ packed_mem_type,
+ packed_payload_offset_dsp_words,
+ packed_payload_data, packed_payload_size_bytes);
+
+ /* Download the wmfw */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+ /*
+ * Check that the packed payload was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = (packed_payload_offset_dsp_words / dsp_words_per_packed_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ packed_payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes);
+
+ /*
+ * Check that the unpacked word was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = ((packed_payload_offset_dsp_words - 1) / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(unpacked_payload_data)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write XM/YM data that is two words longer than a packed block multiple,
+ * and does not start on a packed alignment. Use one payload of two unpacked
+ * words followed by a packed payload.
+ */
+static void wmfw_write_packed_2_unpacked_leading(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ int packed_mem_type = param->mem_type;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ unsigned int dsp_words_per_packed_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type);
+ unsigned int dsp_words_per_unpacked_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type);
+ unsigned int packed_payload_offset_dsp_words = 0;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ void *packed_payload_data, *readback;
+ u32 unpacked_payload_data[2];
+ unsigned int packed_payload_size_bytes;
+ unsigned int offset_num_regs;
+
+ packed_payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type);
+
+ packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data);
+ get_random_bytes(packed_payload_data, packed_payload_size_bytes);
+
+ get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+
+ /* Tests on XM must be after the XM header */
+ if (unpacked_mem_type == WMFW_ADSP2_XM)
+ packed_payload_offset_dsp_words += local->xm_header->blob_size_bytes /
+ sizeof(u32);
+ /*
+ * Leave space for two unaligned words before the packed block and
+ * round the packed block start to multiple of packed block length.
+ */
+ packed_payload_offset_dsp_words += 2;
+ packed_payload_offset_dsp_words = roundup(packed_payload_offset_dsp_words,
+ dsp_words_per_packed_block);
+
+ /*
+ * Add two unpacked words as a single payload right before the
+ * first word of packed data
+ */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ packed_payload_offset_dsp_words - 2,
+ unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ /* Add payload of packed data to the DSP memory after the unpacked words. */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ packed_mem_type,
+ packed_payload_offset_dsp_words,
+ packed_payload_data, packed_payload_size_bytes);
+
+ /* Download the wmfw */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+ /*
+ * Check that the packed payload was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = (packed_payload_offset_dsp_words / dsp_words_per_packed_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ packed_payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes);
+
+ /*
+ * Check that the unpacked words were written correctly and drop
+ * them from the regmap cache.
+ */
+ offset_num_regs = ((packed_payload_offset_dsp_words - 2) / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(unpacked_payload_data)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write XM/YM data that is three words longer than a packed block multiple,
+ * and does not start on a packed alignment. Use one payload of three unpacked
+ * words followed by a packed payload.
+ */
+static void wmfw_write_packed_3_unpacked_leading(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ int packed_mem_type = param->mem_type;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ unsigned int dsp_words_per_packed_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type);
+ unsigned int dsp_words_per_unpacked_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type);
+ unsigned int packed_payload_offset_dsp_words = 0;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ void *packed_payload_data, *readback;
+ u32 unpacked_payload_data[3];
+ unsigned int packed_payload_size_bytes;
+ unsigned int offset_num_regs;
+
+ packed_payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type);
+
+ packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data);
+ get_random_bytes(packed_payload_data, packed_payload_size_bytes);
+
+ get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+
+ /* Tests on XM must be after the XM header */
+ if (unpacked_mem_type == WMFW_ADSP2_XM)
+ packed_payload_offset_dsp_words += local->xm_header->blob_size_bytes /
+ sizeof(u32);
+ /*
+ * Leave space for three unaligned words before the packed block and
+ * round the packed block start to multiple of packed block length.
+ */
+ packed_payload_offset_dsp_words += 3;
+ packed_payload_offset_dsp_words = roundup(packed_payload_offset_dsp_words,
+ dsp_words_per_packed_block);
+
+ /*
+ * Add three unpacked words as a single payload right before the
+ * first word of packed data
+ */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ packed_payload_offset_dsp_words - 3,
+ unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ /* Add payload of packed data to the DSP memory after the unpacked words. */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ packed_mem_type,
+ packed_payload_offset_dsp_words,
+ packed_payload_data, packed_payload_size_bytes);
+
+ /* Download the wmfw */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+ /*
+ * Check that the packed payload was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = (packed_payload_offset_dsp_words / dsp_words_per_packed_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ packed_payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes);
+
+ /*
+ * Check that the unpacked words were written correctly and drop
+ * them from the regmap cache.
+ */
+ offset_num_regs = ((packed_payload_offset_dsp_words - 3) / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(unpacked_payload_data)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write XM/YM data that is two words longer than a packed block multiple,
+ * and does not start on a packed alignment. Use two payloads of one unpacked
+ * word each, followed by a packed payload.
+ */
+static void wmfw_write_packed_2_single_unpacked_leading(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ int packed_mem_type = param->mem_type;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ unsigned int dsp_words_per_packed_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type);
+ unsigned int dsp_words_per_unpacked_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type);
+ unsigned int packed_payload_offset_dsp_words = 0;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ void *packed_payload_data, *readback;
+ u32 unpacked_payload_data[2];
+ unsigned int packed_payload_size_bytes;
+ unsigned int offset_num_regs;
+
+ packed_payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type);
+
+ packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data);
+ get_random_bytes(packed_payload_data, packed_payload_size_bytes);
+
+ get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+
+ /* Tests on XM must be after the XM header */
+ if (unpacked_mem_type == WMFW_ADSP2_XM)
+ packed_payload_offset_dsp_words += local->xm_header->blob_size_bytes /
+ sizeof(u32);
+ /*
+ * Leave space for two unaligned words before the packed block and
+ * round the packed block start to multiple of packed block length.
+ */
+ packed_payload_offset_dsp_words += 2;
+ packed_payload_offset_dsp_words = roundup(packed_payload_offset_dsp_words,
+ dsp_words_per_packed_block);
+
+ /*
+ * Add two unpacked words as two payloads each containing a single
+ * unpacked word.
+ */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ packed_payload_offset_dsp_words - 2,
+ &unpacked_payload_data[0],
+ sizeof(unpacked_payload_data[0]));
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ packed_payload_offset_dsp_words - 1,
+ &unpacked_payload_data[1],
+ sizeof(unpacked_payload_data[1]));
+
+ /* Add payload of packed data to the DSP memory after the unpacked words. */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ packed_mem_type,
+ packed_payload_offset_dsp_words,
+ packed_payload_data, packed_payload_size_bytes);
+
+ /* Download the wmfw */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+ /*
+ * Check that the packed payload was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = (packed_payload_offset_dsp_words / dsp_words_per_packed_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ packed_payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes);
+
+ /*
+ * Check that the unpacked words were written correctly and drop
+ * them from the regmap cache.
+ */
+ offset_num_regs = ((packed_payload_offset_dsp_words - 2) / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(unpacked_payload_data)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write XM/YM data that is three words longer than a packed block multiple,
+ * and does not start on a packed alignment. Use three payloads of one unpacked
+ * word each, followed by a packed payload.
+ */
+static void wmfw_write_packed_3_single_unpacked_leading(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ int packed_mem_type = param->mem_type;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ unsigned int dsp_words_per_packed_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type);
+ unsigned int dsp_words_per_unpacked_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type);
+ unsigned int packed_payload_offset_dsp_words = 0;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ void *packed_payload_data, *readback;
+ u32 unpacked_payload_data[3];
+ unsigned int packed_payload_size_bytes;
+ unsigned int offset_num_regs;
+
+ packed_payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type);
+
+ packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data);
+ get_random_bytes(packed_payload_data, packed_payload_size_bytes);
+
+ get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+
+ /* Tests on XM must be after the XM header */
+ if (unpacked_mem_type == WMFW_ADSP2_XM)
+ packed_payload_offset_dsp_words += local->xm_header->blob_size_bytes /
+ sizeof(u32);
+ /*
+ * Leave space for two unaligned words before the packed block and
+ * round the packed block start to multiple of packed block length.
+ */
+ packed_payload_offset_dsp_words += 3;
+ packed_payload_offset_dsp_words = roundup(packed_payload_offset_dsp_words,
+ dsp_words_per_packed_block);
+
+ /*
+ * Add three unpacked words as three payloads each containing a single
+ * unpacked word.
+ */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ packed_payload_offset_dsp_words - 3,
+ &unpacked_payload_data[0],
+ sizeof(unpacked_payload_data[0]));
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ packed_payload_offset_dsp_words - 2,
+ &unpacked_payload_data[1],
+ sizeof(unpacked_payload_data[1]));
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ packed_payload_offset_dsp_words - 1,
+ &unpacked_payload_data[2],
+ sizeof(unpacked_payload_data[2]));
+
+ /* Add payload of packed data to the DSP memory after the unpacked words. */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ packed_mem_type,
+ packed_payload_offset_dsp_words,
+ packed_payload_data, packed_payload_size_bytes);
+
+ /* Download the wmfw */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+ /*
+ * Check that the packed payload was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = (packed_payload_offset_dsp_words / dsp_words_per_packed_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ packed_payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes);
+
+ /*
+ * Check that the unpacked words were written correctly and drop
+ * them from the regmap cache.
+ */
+ offset_num_regs = ((packed_payload_offset_dsp_words - 3) / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(unpacked_payload_data)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/* Load a wmfw containing multiple info blocks */
+static void wmfw_load_with_info(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ u8 *payload_data, *readback;
+ char *infobuf;
+ const unsigned int payload_size_bytes = 48;
+ int ret;
+
+ payload_data = kunit_kmalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+ get_random_bytes(payload_data, payload_size_bytes);
+
+ readback = kunit_kzalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Add a couple of info blocks at the start of the wmfw */
+ cs_dsp_mock_wmfw_add_info(local->wmfw_builder, "This is a timestamp");
+ cs_dsp_mock_wmfw_add_info(local->wmfw_builder, "This is some more info");
+
+ /* Add a single payload */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ WMFW_ADSP2_YM, 0,
+ payload_data, payload_size_bytes);
+
+ /* Add a bigger info block then another small one*/
+ infobuf = kunit_kzalloc(test, 512, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, infobuf);
+
+ for (; strlcat(infobuf, "Waffle{Blah}\n", 512) < 512;)
+ ;
+
+ cs_dsp_mock_wmfw_add_info(local->wmfw_builder, infobuf);
+ cs_dsp_mock_wmfw_add_info(local->wmfw_builder, "Another block of info");
+
+ /* Add another payload */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ WMFW_ADSP2_YM, 64,
+ payload_data, payload_size_bytes);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ ret = cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc");
+ KUNIT_EXPECT_EQ_MSG(test, ret, 0, "cs_dsp_power_up failed: %d\n", ret);
+
+ /* Check first payload was written */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_YM);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, payload_size_bytes);
+
+ /* Check second payload was written */
+ reg_addr += cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv) * 64;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, payload_size_bytes);
+}
+
+static int cs_dsp_wmfw_test_common_init(struct kunit *test, struct cs_dsp *dsp,
+ int wmfw_version)
+{
+ struct cs_dsp_test *priv;
+ struct cs_dsp_test_local *local;
+ struct device *test_dev;
+ int ret;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ local = kunit_kzalloc(test, sizeof(struct cs_dsp_test_local), GFP_KERNEL);
+ if (!local)
+ return -ENOMEM;
+
+ priv->test = test;
+ priv->dsp = dsp;
+ test->priv = priv;
+ priv->local = local;
+ priv->local->wmfw_version = wmfw_version;
+
+ /* Create dummy struct device */
+ test_dev = kunit_device_register(test, "cs_dsp_test_drv");
+ if (IS_ERR(test_dev))
+ return PTR_ERR(test_dev);
+
+ dsp->dev = get_device(test_dev);
+ if (!dsp->dev)
+ return -ENODEV;
+
+ ret = kunit_add_action_or_reset(test, _put_device_wrapper, dsp->dev);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dsp->dev, priv);
+
+ /* Allocate regmap */
+ ret = cs_dsp_mock_regmap_init(priv);
+ if (ret)
+ return ret;
+
+ /*
+ * There must always be a XM header with at least 1 algorithm, so create
+ * a dummy one that tests can use and extract it to a data payload.
+ */
+ local->xm_header = cs_dsp_create_mock_xm_header(priv,
+ cs_dsp_wmfw_test_mock_algs,
+ ARRAY_SIZE(cs_dsp_wmfw_test_mock_algs));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->xm_header);
+
+ local->wmfw_builder = cs_dsp_mock_wmfw_init(priv, priv->local->wmfw_version);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->wmfw_builder);
+
+ /* Add dummy XM header payload to wmfw */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ WMFW_ADSP2_XM, 0,
+ local->xm_header->blob_data,
+ local->xm_header->blob_size_bytes);
+
+ /* Init cs_dsp */
+ dsp->client_ops = kunit_kzalloc(test, sizeof(*dsp->client_ops), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dsp->client_ops);
+
+ switch (dsp->type) {
+ case WMFW_ADSP2:
+ ret = cs_dsp_adsp2_init(dsp);
+ break;
+ case WMFW_HALO:
+ ret = cs_dsp_halo_init(dsp);
+ break;
+ default:
+ KUNIT_FAIL(test, "Untested DSP type %d\n", dsp->type);
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ /* Automatically call cs_dsp_remove() when test case ends */
+ return kunit_add_action_or_reset(priv->test, _cs_dsp_remove_wrapper, dsp);
+}
+
+static int cs_dsp_wmfw_test_halo_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_HALO;
+ dsp->mem = cs_dsp_mock_halo_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_halo_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_halo_core_base;
+ dsp->base_sysinfo = cs_dsp_mock_halo_sysinfo_base;
+
+ return cs_dsp_wmfw_test_common_init(test, dsp, 3);
+}
+
+static int cs_dsp_wmfw_test_adsp2_32bit_init(struct kunit *test, int wmfw_ver)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 1;
+ dsp->mem = cs_dsp_mock_adsp2_32bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_32bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_32bit_sysbase;
+
+ return cs_dsp_wmfw_test_common_init(test, dsp, wmfw_ver);
+}
+
+static int cs_dsp_wmfw_test_adsp2_32bit_wmfw0_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_test_adsp2_32bit_init(test, 0);
+}
+
+static int cs_dsp_wmfw_test_adsp2_32bit_wmfw1_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_test_adsp2_32bit_init(test, 1);
+}
+
+static int cs_dsp_wmfw_test_adsp2_32bit_wmfw2_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_test_adsp2_32bit_init(test, 2);
+}
+
+static int cs_dsp_wmfw_test_adsp2_16bit_init(struct kunit *test, int wmfw_ver)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 0;
+ dsp->mem = cs_dsp_mock_adsp2_16bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_16bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_16bit_sysbase;
+
+ return cs_dsp_wmfw_test_common_init(test, dsp, wmfw_ver);
+}
+
+static int cs_dsp_wmfw_test_adsp2_16bit_wmfw0_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_test_adsp2_16bit_init(test, 0);
+}
+
+static int cs_dsp_wmfw_test_adsp2_16bit_wmfw1_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_test_adsp2_16bit_init(test, 1);
+}
+
+static int cs_dsp_wmfw_test_adsp2_16bit_wmfw2_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_test_adsp2_16bit_init(test, 2);
+}
+
+static void cs_dsp_mem_param_desc(const struct cs_dsp_wmfw_test_param *param, char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s num_blocks:%u",
+ cs_dsp_mem_region_name(param->mem_type),
+ param->num_blocks);
+}
+
+static const struct cs_dsp_wmfw_test_param adsp2_all_num_blocks_param_cases[] = {
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 1 },
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 2 },
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 3 },
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 4 },
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 5 },
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 6 },
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 12 },
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 13 },
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 14 },
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 15 },
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 16 },
+
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 1 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 2 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 3 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 4 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 5 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 6 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 12 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 13 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 14 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 15 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 16 },
+
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 1 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 2 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 3 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 4 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 5 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 6 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 12 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 13 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 14 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 15 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 16 },
+
+ { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 1 },
+ { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 2 },
+ { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 3 },
+ { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 4 },
+ { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 5 },
+ { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 6 },
+ { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 12 },
+ { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 13 },
+ { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 14 },
+ { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 15 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 16 },
+};
+
+KUNIT_ARRAY_PARAM(adsp2_all_num_blocks,
+ adsp2_all_num_blocks_param_cases,
+ cs_dsp_mem_param_desc);
+
+static const struct cs_dsp_wmfw_test_param halo_all_num_blocks_param_cases[] = {
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 1 },
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 2 },
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 3 },
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 4 },
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 5 },
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 6 },
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 12 },
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 13 },
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 14 },
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 15 },
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 16 },
+
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 1 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 2 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 3 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 4 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 5 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 6 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 12 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 13 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 14 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 15 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 16 },
+
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 1 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 2 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 3 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 4 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 5 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 6 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 12 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 13 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 14 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 15 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 16 },
+
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 1 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 2 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 3 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 4 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 5 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 6 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 12 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 13 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 14 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 15 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 16 },
+
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 1 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 2 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 3 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 4 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 5 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 6 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 12 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 13 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 14 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 15 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 16 },
+};
+
+KUNIT_ARRAY_PARAM(halo_all_num_blocks,
+ halo_all_num_blocks_param_cases,
+ cs_dsp_mem_param_desc);
+
+static const struct cs_dsp_wmfw_test_param packed_xy_num_blocks_param_cases[] = {
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 1 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 2 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 3 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 4 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 5 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 6 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 12 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 13 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 14 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 15 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 16 },
+
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 1 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 2 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 3 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 4 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 5 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 6 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 12 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 13 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 14 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 15 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 16 },
+};
+
+KUNIT_ARRAY_PARAM(packed_xy_num_blocks,
+ packed_xy_num_blocks_param_cases,
+ cs_dsp_mem_param_desc);
+
+static struct kunit_case cs_dsp_wmfw_test_cases_halo[] = {
+ KUNIT_CASE(wmfw_write_xm_header_unpacked),
+
+ KUNIT_CASE_PARAM(wmfw_write_one_payload,
+ halo_all_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_multiple_oneblock_payloads,
+ halo_all_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_multiple_oneblock_payloads_reverse,
+ halo_all_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_multiple_payloads_sparse_unordered,
+ halo_all_num_blocks_gen_params),
+
+ KUNIT_CASE(wmfw_write_all_packed_pm),
+ KUNIT_CASE(wmfw_write_multiple_packed_unpacked_mem),
+
+ KUNIT_CASE_PARAM(wmfw_write_packed_1_unpacked_trailing,
+ packed_xy_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_packed_2_unpacked_trailing,
+ packed_xy_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_packed_3_unpacked_trailing,
+ packed_xy_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_packed_2_single_unpacked_trailing,
+ packed_xy_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_packed_3_single_unpacked_trailing,
+ packed_xy_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_packed_1_unpacked_leading,
+ packed_xy_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_packed_2_unpacked_leading,
+ packed_xy_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_packed_3_unpacked_leading,
+ packed_xy_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_packed_2_single_unpacked_leading,
+ packed_xy_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_packed_3_single_unpacked_leading,
+ packed_xy_num_blocks_gen_params),
+
+ KUNIT_CASE(wmfw_load_with_info),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_wmfw_test_cases_adsp2[] = {
+ KUNIT_CASE(wmfw_write_xm_header_unpacked),
+ KUNIT_CASE_PARAM(wmfw_write_one_payload,
+ adsp2_all_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_multiple_oneblock_payloads,
+ adsp2_all_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_multiple_oneblock_payloads_reverse,
+ adsp2_all_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_multiple_payloads_sparse_unordered,
+ adsp2_all_num_blocks_gen_params),
+
+ KUNIT_CASE(wmfw_write_all_unpacked_pm),
+ KUNIT_CASE(wmfw_write_multiple_unpacked_mem),
+
+ KUNIT_CASE(wmfw_load_with_info),
+
+ { } /* terminator */
+};
+
+static struct kunit_suite cs_dsp_wmfw_test_halo = {
+ .name = "cs_dsp_wmfwV3_halo",
+ .init = cs_dsp_wmfw_test_halo_init,
+ .test_cases = cs_dsp_wmfw_test_cases_halo,
+};
+
+static struct kunit_suite cs_dsp_wmfw_test_adsp2_32bit_wmfw0 = {
+ .name = "cs_dsp_wmfwV0_adsp2_32bit",
+ .init = cs_dsp_wmfw_test_adsp2_32bit_wmfw0_init,
+ .test_cases = cs_dsp_wmfw_test_cases_adsp2,
+};
+
+static struct kunit_suite cs_dsp_wmfw_test_adsp2_32bit_wmfw1 = {
+ .name = "cs_dsp_wmfwV1_adsp2_32bit",
+ .init = cs_dsp_wmfw_test_adsp2_32bit_wmfw1_init,
+ .test_cases = cs_dsp_wmfw_test_cases_adsp2,
+};
+
+static struct kunit_suite cs_dsp_wmfw_test_adsp2_32bit_wmfw2 = {
+ .name = "cs_dsp_wmfwV2_adsp2_32bit",
+ .init = cs_dsp_wmfw_test_adsp2_32bit_wmfw2_init,
+ .test_cases = cs_dsp_wmfw_test_cases_adsp2,
+};
+
+static struct kunit_suite cs_dsp_wmfw_test_adsp2_16bit_wmfw0 = {
+ .name = "cs_dsp_wmfwV0_adsp2_16bit",
+ .init = cs_dsp_wmfw_test_adsp2_16bit_wmfw0_init,
+ .test_cases = cs_dsp_wmfw_test_cases_adsp2,
+};
+
+static struct kunit_suite cs_dsp_wmfw_test_adsp2_16bit_wmfw1 = {
+ .name = "cs_dsp_wmfwV1_adsp2_16bit",
+ .init = cs_dsp_wmfw_test_adsp2_16bit_wmfw1_init,
+ .test_cases = cs_dsp_wmfw_test_cases_adsp2,
+};
+
+static struct kunit_suite cs_dsp_wmfw_test_adsp2_16bit_wmfw2 = {
+ .name = "cs_dsp_wmfwV2_adsp2_16bit",
+ .init = cs_dsp_wmfw_test_adsp2_16bit_wmfw2_init,
+ .test_cases = cs_dsp_wmfw_test_cases_adsp2,
+};
+
+kunit_test_suites(&cs_dsp_wmfw_test_halo,
+ &cs_dsp_wmfw_test_adsp2_32bit_wmfw0,
+ &cs_dsp_wmfw_test_adsp2_32bit_wmfw1,
+ &cs_dsp_wmfw_test_adsp2_32bit_wmfw2,
+ &cs_dsp_wmfw_test_adsp2_16bit_wmfw0,
+ &cs_dsp_wmfw_test_adsp2_16bit_wmfw1,
+ &cs_dsp_wmfw_test_adsp2_16bit_wmfw2);
diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_wmfw_error.c b/drivers/firmware/cirrus/test/cs_dsp_test_wmfw_error.c
new file mode 100644
index 000000000000..c309843261d7
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_test_wmfw_error.c
@@ -0,0 +1,1347 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// KUnit tests for cs_dsp.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+//
+
+#include <kunit/device.h>
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <linux/build_bug.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/random.h>
+#include <linux/regmap.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+
+KUNIT_DEFINE_ACTION_WRAPPER(_put_device_wrapper, put_device, struct device *);
+KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_remove_wrapper, cs_dsp_remove, struct cs_dsp *);
+
+struct cs_dsp_test_local {
+ struct cs_dsp_mock_xm_header *xm_header;
+ struct cs_dsp_mock_wmfw_builder *wmfw_builder;
+ int wmfw_version;
+};
+
+struct cs_dsp_wmfw_test_param {
+ int block_type;
+};
+
+static const struct cs_dsp_mock_alg_def cs_dsp_wmfw_err_test_mock_algs[] = {
+ {
+ .id = 0xfafa,
+ .ver = 0x100000,
+ .xm_size_words = 164,
+ .ym_size_words = 164,
+ .zm_size_words = 164,
+ },
+};
+
+static const struct cs_dsp_mock_coeff_def mock_coeff_template = {
+ .shortname = "Dummy Coeff",
+ .type = WMFW_CTL_TYPE_BYTES,
+ .mem_type = WMFW_ADSP2_YM,
+ .flags = WMFW_CTL_FLAG_VOLATILE,
+ .length_bytes = 4,
+};
+
+/* Load a wmfw containing unknown blocks. They should be skipped. */
+static void wmfw_load_with_unknown_blocks(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ u8 *payload_data, *readback;
+ u8 random_data[8];
+ const unsigned int payload_size_bytes = 64;
+
+ /* Add dummy XM header payload to wmfw */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ WMFW_ADSP2_XM, 0,
+ local->xm_header->blob_data,
+ local->xm_header->blob_size_bytes);
+
+ payload_data = kunit_kmalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+ get_random_bytes(payload_data, payload_size_bytes);
+
+ readback = kunit_kzalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Add some unknown blocks at the start of the wmfw */
+ get_random_bytes(random_data, sizeof(random_data));
+ cs_dsp_mock_wmfw_add_raw_block(local->wmfw_builder, 0xf5, 0,
+ random_data, sizeof(random_data));
+ cs_dsp_mock_wmfw_add_raw_block(local->wmfw_builder, 0xc0, 0, random_data,
+ sizeof(random_data));
+ cs_dsp_mock_wmfw_add_raw_block(local->wmfw_builder, 0x33, 0, NULL, 0);
+
+ /* Add a single payload to be written to DSP memory */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ WMFW_ADSP2_YM, 0,
+ payload_data, payload_size_bytes);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ /* Check that the payload was written to memory */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_YM);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, payload_size_bytes);
+}
+
+/* Load a wmfw that doesn't have a valid magic marker. */
+static void wmfw_err_wrong_magic(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ memcpy((void *)wmfw->data, "WMDR", 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ memcpy((void *)wmfw->data, "xMFW", 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ memcpy((void *)wmfw->data, "WxFW", 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ memcpy((void *)wmfw->data, "WMxW", 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ memcpy((void *)wmfw->data, "WMFx", 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ memset((void *)wmfw->data, 0, 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+}
+
+/* Load a wmfw that is too short for a valid header. */
+static void wmfw_err_too_short_for_header(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ do {
+ wmfw->size--;
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+ } while (wmfw->size > 0);
+}
+
+/* Header length field isn't a valid header length. */
+static void wmfw_err_bad_header_length(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ unsigned int real_len, len;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+ real_len = le32_to_cpu(header->len);
+
+ for (len = 0; len < real_len; len++) {
+ header->len = cpu_to_le32(len);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+ }
+
+ for (len = real_len + 1; len < real_len + 7; len++) {
+ header->len = cpu_to_le32(len);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+ }
+
+ header->len = cpu_to_le32(0xffffffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ header->len = cpu_to_le32(0x80000000);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ header->len = cpu_to_le32(0x7fffffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+}
+
+/* Wrong core type in header. */
+static void wmfw_err_bad_core_type(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+
+ header->core = 0;
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ header->core = 1;
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ header->core = priv->dsp->type + 1;
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ header->core = 0xff;
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+}
+
+/* File too short to contain a full block header */
+static void wmfw_too_short_for_block_header(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int header_length;
+ u32 dummy_payload = 0;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ header_length = wmfw->size;
+ kunit_kfree(test, wmfw);
+
+ /* Add the block. A block must have at least 4 bytes of payload */
+ cs_dsp_mock_wmfw_add_raw_block(local->wmfw_builder, param->block_type, 0,
+ &dummy_payload, sizeof(dummy_payload));
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ KUNIT_ASSERT_GT(test, wmfw->size, header_length);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ for (wmfw->size--; wmfw->size > header_length; wmfw->size--) {
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+ }
+}
+
+/* File too short to contain the block payload */
+static void wmfw_too_short_for_block_payload(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ static const u8 payload[256] = { };
+ int i;
+
+ cs_dsp_mock_wmfw_add_raw_block(local->wmfw_builder, param->block_type, 0,
+ payload, sizeof(payload));
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ for (i = 0; i < sizeof(payload); i++) {
+ wmfw->size--;
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+ }
+}
+
+/* Block payload length is a garbage value */
+static void wmfw_block_payload_len_garbage(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ u32 payload = 0;
+
+
+ cs_dsp_mock_wmfw_add_raw_block(local->wmfw_builder, param->block_type, 0,
+ &payload, sizeof(payload));
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+
+ /* Sanity check that we're looking at the correct part of the wmfw */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(region->offset) >> 24, param->block_type);
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(region->len), sizeof(payload));
+
+ region->len = cpu_to_le32(0x8000);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ region->len = cpu_to_le32(0xffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ region->len = cpu_to_le32(0x7fffffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ region->len = cpu_to_le32(0x80000000);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ region->len = cpu_to_le32(0xffffffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+}
+
+/* File too short to contain an algorithm header */
+static void wmfw_too_short_for_alg_header(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int header_length;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ header_length = wmfw->size;
+ kunit_kfree(test, wmfw);
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ NULL, NULL);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ KUNIT_ASSERT_GT(test, wmfw->size, header_length);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ for (wmfw->size--; wmfw->size > header_length; wmfw->size--) {
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+ }
+}
+
+/* V1 algorithm name does not have NUL terminator */
+static void wmfw_v1_alg_name_unterminated(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ struct wmfw_adsp_alg_data *alg_data;
+ struct cs_dsp_coeff_ctl *ctl;
+
+ /* Create alg info block with a coefficient */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ "abc", "de");
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &mock_coeff_template);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+ alg_data = (struct wmfw_adsp_alg_data *)region->data;
+
+ /* Sanity check we're pointing at the alg header */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data->id), cs_dsp_wmfw_err_test_mock_algs[0].id);
+
+ /* Write a string to the alg name that overflows the array */
+ memset(alg_data->descr, 0, sizeof(alg_data->descr));
+ memset(alg_data->name, 'A', sizeof(alg_data->name));
+ memset(alg_data->descr, 'A', sizeof(alg_data->descr) - 1);
+
+ /*
+ * Sanity-check that a strlen would overflow alg_data->name.
+ * FORTIFY_STRING obstructs testing what strlen() would actually
+ * return, so instead verify that a strnlen() returns
+ * sizeof(alg_data->name[]), therefore it doesn't have a NUL.
+ */
+ KUNIT_ASSERT_EQ(test, strnlen(alg_data->name, sizeof(alg_data->name)),
+ sizeof(alg_data->name));
+
+ /*
+ * The alg name isn't stored, but cs_dsp parses the name field.
+ * It should load the file successfully and create the control.
+ * If FORTIFY_STRING is enabled it will detect a buffer overflow
+ * if cs_dsp string length walks past end of alg name array.
+ */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, 0);
+}
+
+/* V2+ algorithm name exceeds length of containing block */
+static void wmfw_v2_alg_name_exceeds_block(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ __le32 *alg_data;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ "abc", NULL);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+ alg_data = (__force __le32 *)region->data;
+
+ /*
+ * Sanity check we're pointing at the alg header of
+ * [ alg_id ][name_len]abc
+ */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data[0]), cs_dsp_wmfw_err_test_mock_algs[0].id);
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data[1]), 3 | ('a' << 8) | ('b' << 16) | ('c' << 24));
+ KUNIT_ASSERT_EQ(test, *(u8 *)&alg_data[1], 3);
+
+ /* Set name string length longer than available space */
+ *(u8 *)&alg_data[1] = 4;
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ *(u8 *)&alg_data[1] = 7;
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ *(u8 *)&alg_data[1] = 0x80;
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ *(u8 *)&alg_data[1] = 0xff;
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+}
+
+/* V2+ algorithm description exceeds length of containing block */
+static void wmfw_v2_alg_description_exceeds_block(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ __le32 *alg_data;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ "abc", "de");
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+ alg_data = (__force __le32 *)region->data;
+
+ /*
+ * Sanity check we're pointing at the alg header of
+ * [ alg_id ][name_len]abc[desc_len]de
+ */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data[0]), cs_dsp_wmfw_err_test_mock_algs[0].id);
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data[2]), 2 | ('d' << 16) | ('e' << 24));
+ KUNIT_ASSERT_EQ(test, le16_to_cpu(*(__le16 *)&alg_data[2]), 2);
+
+ /* Set name string length longer than available space */
+ *(__le16 *)&alg_data[2] = cpu_to_le16(4);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ *(__le16 *)&alg_data[2] = cpu_to_le16(7);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ *(__le16 *)&alg_data[2] = cpu_to_le16(0x80);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ *(__le16 *)&alg_data[2] = cpu_to_le16(0xff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ *(__le16 *)&alg_data[2] = cpu_to_le16(0x8000);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ *(__le16 *)&alg_data[2] = cpu_to_le16(0xffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+}
+
+/* V1 coefficient count exceeds length of containing block */
+static void wmfw_v1_coeff_count_exceeds_block(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ struct wmfw_adsp_alg_data *alg_data;
+
+ /* Create alg info block with a coefficient */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ "abc", "de");
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &mock_coeff_template);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+ alg_data = (struct wmfw_adsp_alg_data *)region->data;
+
+ /* Sanity check we're pointing at the alg header */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data->id), cs_dsp_wmfw_err_test_mock_algs[0].id);
+
+ /* Add one to the coefficient count */
+ alg_data->ncoeff = cpu_to_le32(le32_to_cpu(alg_data->ncoeff) + 1);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ /* Make the coefficient count garbage */
+ alg_data->ncoeff = cpu_to_le32(0xffffffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ alg_data->ncoeff = cpu_to_le32(0x7fffffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ alg_data->ncoeff = cpu_to_le32(0x80000000);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+}
+
+/* V2+ coefficient count exceeds length of containing block */
+static void wmfw_v2_coeff_count_exceeds_block(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ __le32 *alg_data, *ncoeff;
+
+ /* Create alg info block with a coefficient */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ "abc", "de");
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &mock_coeff_template);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+ alg_data = (__force __le32 *)region->data;
+
+ /* Sanity check we're pointing at the alg header */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data[0]), cs_dsp_wmfw_err_test_mock_algs[0].id);
+
+ ncoeff = (__force __le32 *)&alg_data[3];
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(*ncoeff), 1);
+
+ /* Add one to the coefficient count */
+ *ncoeff = cpu_to_le32(2);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ /* Make the coefficient count garbage */
+ *ncoeff = cpu_to_le32(0xffffffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ *ncoeff = cpu_to_le32(0x7fffffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ *ncoeff = cpu_to_le32(0x80000000);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+}
+
+/* V2+ coefficient block size exceeds length of containing block */
+static void wmfw_v2_coeff_block_size_exceeds_block(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ __le32 *alg_data, *coeff;
+
+ /* Create alg info block with a coefficient */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ "abc", "de");
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &mock_coeff_template);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+ alg_data = (__force __le32 *)region->data;
+
+ /* Sanity check we're pointing at the alg header */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data[0]), cs_dsp_wmfw_err_test_mock_algs[0].id);
+
+ /* Sanity check we're pointing at the coeff block */
+ coeff = (__force __le32 *)&alg_data[4];
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(coeff[0]), mock_coeff_template.mem_type << 16);
+
+ /* Add one to the block size */
+ coeff[1] = cpu_to_le32(le32_to_cpu(coeff[1]) + 1);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ /* Make the block size garbage */
+ coeff[1] = cpu_to_le32(0xffffffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ coeff[1] = cpu_to_le32(0x7fffffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ coeff[1] = cpu_to_le32(0x80000000);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+}
+
+/* V1 coeff name does not have NUL terminator */
+static void wmfw_v1_coeff_name_unterminated(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ struct wmfw_adsp_alg_data *alg_data;
+ struct wmfw_adsp_coeff_data *coeff;
+ struct cs_dsp_coeff_ctl *ctl;
+
+ /* Create alg info block with a coefficient */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ "abc", "de");
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &mock_coeff_template);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+ alg_data = (struct wmfw_adsp_alg_data *)region->data;
+
+ /* Sanity check we're pointing at the alg header */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data->id), cs_dsp_wmfw_err_test_mock_algs[0].id);
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data->ncoeff), 1);
+
+ coeff = (void *)alg_data->data;
+
+ /* Write a string to the coeff name that overflows the array */
+ memset(coeff->descr, 0, sizeof(coeff->descr));
+ memset(coeff->name, 'A', sizeof(coeff->name));
+ memset(coeff->descr, 'A', sizeof(coeff->descr) - 1);
+
+ /*
+ * Sanity-check that a strlen would overflow coeff->name.
+ * FORTIFY_STRING obstructs testing what strlen() would actually
+ * return, so instead verify that a strnlen() returns
+ * sizeof(coeff->name[]), therefore it doesn't have a NUL.
+ */
+ KUNIT_ASSERT_EQ(test, strnlen(coeff->name, sizeof(coeff->name)),
+ sizeof(coeff->name));
+
+ /*
+ * V1 controls do not have names, but cs_dsp parses the name
+ * field. It should load the file successfully and create the
+ * control.
+ * If FORTIFY_STRING is enabled it will detect a buffer overflow
+ * if cs_dsp string length walks past end of coeff name array.
+ */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, 0);
+}
+
+/* V2+ coefficient shortname exceeds length of coeff block */
+static void wmfw_v2_coeff_shortname_exceeds_block(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ __le32 *alg_data, *coeff;
+
+ /* Create alg info block with a coefficient */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ "abc", "de");
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &mock_coeff_template);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+ alg_data = (__force __le32 *)region->data;
+
+ /* Sanity check we're pointing at the alg header */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data[0]), cs_dsp_wmfw_err_test_mock_algs[0].id);
+
+ /* Sanity check we're pointing at the coeff block */
+ coeff = (__force __le32 *)&alg_data[4];
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(coeff[0]), mock_coeff_template.mem_type << 16);
+
+ /* Add one to the shortname length */
+ coeff[2] = cpu_to_le32(le32_to_cpu(coeff[2]) + 1);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ /* Maximum shortname length */
+ coeff[2] = cpu_to_le32(255);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+}
+
+/* V2+ coefficient fullname exceeds length of coeff block */
+static void wmfw_v2_coeff_fullname_exceeds_block(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ __le32 *alg_data, *coeff, *fullname;
+ size_t shortlen;
+
+ /* Create alg info block with a coefficient */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ "abc", "de");
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &mock_coeff_template);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+ alg_data = (__force __le32 *)region->data;
+
+ /* Sanity check we're pointing at the alg header */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data[0]), cs_dsp_wmfw_err_test_mock_algs[0].id);
+
+ /* Sanity check we're pointing at the coeff block */
+ coeff = (__force __le32 *)&alg_data[4];
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(coeff[0]), mock_coeff_template.mem_type << 16);
+
+ /* Fullname follows the shortname rounded up to a __le32 boundary */
+ shortlen = round_up(le32_to_cpu(coeff[2]) & 0xff, sizeof(__le32));
+ fullname = &coeff[2] + (shortlen / sizeof(*coeff));
+
+ /* Fullname increases in blocks of __le32 so increase past the current __le32 */
+ fullname[0] = cpu_to_le32(round_up(le32_to_cpu(fullname[0]) + 1, sizeof(__le32)));
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ /* Maximum fullname length */
+ fullname[0] = cpu_to_le32(255);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+}
+
+/* V2+ coefficient description exceeds length of coeff block */
+static void wmfw_v2_coeff_description_exceeds_block(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ __le32 *alg_data, *coeff, *fullname, *description;
+ size_t namelen;
+
+ /* Create alg info block with a coefficient */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ "abc", "de");
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &mock_coeff_template);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+ alg_data = (__force __le32 *)region->data;
+
+ /* Sanity check we're pointing at the alg header */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data[0]), cs_dsp_wmfw_err_test_mock_algs[0].id);
+
+ /* Sanity check we're pointing at the coeff block */
+ coeff = (__force __le32 *)&alg_data[4];
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(coeff[0]), mock_coeff_template.mem_type << 16);
+
+ /* Description follows the shortname and fullname rounded up to __le32 boundaries */
+ namelen = round_up(le32_to_cpu(coeff[2]) & 0xff, sizeof(__le32));
+ fullname = &coeff[2] + (namelen / sizeof(*coeff));
+ namelen = round_up(le32_to_cpu(fullname[0]) & 0xff, sizeof(__le32));
+ description = fullname + (namelen / sizeof(*fullname));
+
+ /* Description increases in blocks of __le32 so increase past the current __le32 */
+ description[0] = cpu_to_le32(round_up(le32_to_cpu(fullname[0]) + 1, sizeof(__le32)));
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ /* Maximum description length */
+ fullname[0] = cpu_to_le32(0xffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+}
+
+static void cs_dsp_wmfw_err_test_exit(struct kunit *test)
+{
+ /*
+ * Testing error conditions can produce a lot of log output
+ * from cs_dsp error messages, so rate limit the test cases.
+ */
+ usleep_range(200, 500);
+}
+
+static int cs_dsp_wmfw_err_test_common_init(struct kunit *test, struct cs_dsp *dsp,
+ int wmfw_version)
+{
+ struct cs_dsp_test *priv;
+ struct cs_dsp_test_local *local;
+ struct device *test_dev;
+ int ret;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ local = kunit_kzalloc(test, sizeof(struct cs_dsp_test_local), GFP_KERNEL);
+ if (!local)
+ return -ENOMEM;
+
+ priv->test = test;
+ priv->dsp = dsp;
+ test->priv = priv;
+ priv->local = local;
+ local->wmfw_version = wmfw_version;
+
+ /* Create dummy struct device */
+ test_dev = kunit_device_register(test, "cs_dsp_test_drv");
+ if (IS_ERR(test_dev))
+ return PTR_ERR(test_dev);
+
+ dsp->dev = get_device(test_dev);
+ if (!dsp->dev)
+ return -ENODEV;
+
+ ret = kunit_add_action_or_reset(test, _put_device_wrapper, dsp->dev);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dsp->dev, priv);
+
+ /* Allocate regmap */
+ ret = cs_dsp_mock_regmap_init(priv);
+ if (ret)
+ return ret;
+
+ /*
+ * There must always be a XM header with at least 1 algorithm,
+ * so create a dummy one and pre-populate XM so the wmfw doesn't
+ * have to contain an XM blob.
+ */
+ local->xm_header = cs_dsp_create_mock_xm_header(priv,
+ cs_dsp_wmfw_err_test_mock_algs,
+ ARRAY_SIZE(cs_dsp_wmfw_err_test_mock_algs));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->xm_header);
+ cs_dsp_mock_xm_header_write_to_regmap(local->xm_header);
+
+ local->wmfw_builder = cs_dsp_mock_wmfw_init(priv, local->wmfw_version);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->wmfw_builder);
+
+ /* Init cs_dsp */
+ dsp->client_ops = kunit_kzalloc(test, sizeof(*dsp->client_ops), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dsp->client_ops);
+
+ switch (dsp->type) {
+ case WMFW_ADSP2:
+ ret = cs_dsp_adsp2_init(dsp);
+ break;
+ case WMFW_HALO:
+ ret = cs_dsp_halo_init(dsp);
+ break;
+ default:
+ KUNIT_FAIL(test, "Untested DSP type %d\n", dsp->type);
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ /* Automatically call cs_dsp_remove() when test case ends */
+ return kunit_add_action_or_reset(priv->test, _cs_dsp_remove_wrapper, dsp);
+}
+
+static int cs_dsp_wmfw_err_test_halo_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_HALO;
+ dsp->mem = cs_dsp_mock_halo_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_halo_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_halo_core_base;
+ dsp->base_sysinfo = cs_dsp_mock_halo_sysinfo_base;
+
+ return cs_dsp_wmfw_err_test_common_init(test, dsp, 3);
+}
+
+static int cs_dsp_wmfw_err_test_adsp2_32bit_init(struct kunit *test, int wmfw_ver)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 1;
+ dsp->mem = cs_dsp_mock_adsp2_32bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_32bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_32bit_sysbase;
+
+ return cs_dsp_wmfw_err_test_common_init(test, dsp, wmfw_ver);
+}
+
+static int cs_dsp_wmfw_err_test_adsp2_32bit_wmfw0_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_err_test_adsp2_32bit_init(test, 0);
+}
+
+static int cs_dsp_wmfw_err_test_adsp2_32bit_wmfw1_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_err_test_adsp2_32bit_init(test, 1);
+}
+
+static int cs_dsp_wmfw_err_test_adsp2_32bit_wmfw2_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_err_test_adsp2_32bit_init(test, 2);
+}
+
+static int cs_dsp_wmfw_err_test_adsp2_16bit_init(struct kunit *test, int wmfw_ver)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 0;
+ dsp->mem = cs_dsp_mock_adsp2_16bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_16bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_16bit_sysbase;
+
+ return cs_dsp_wmfw_err_test_common_init(test, dsp, wmfw_ver);
+}
+
+static int cs_dsp_wmfw_err_test_adsp2_16bit_wmfw0_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_err_test_adsp2_16bit_init(test, 0);
+}
+
+static int cs_dsp_wmfw_err_test_adsp2_16bit_wmfw1_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_err_test_adsp2_16bit_init(test, 1);
+}
+
+static int cs_dsp_wmfw_err_test_adsp2_16bit_wmfw2_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_err_test_adsp2_16bit_init(test, 2);
+}
+
+static void cs_dsp_wmfw_err_block_types_desc(const struct cs_dsp_wmfw_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "block_type:%#x", param->block_type);
+}
+
+static const struct cs_dsp_wmfw_test_param wmfw_valid_block_types_adsp2_cases[] = {
+ { .block_type = WMFW_INFO_TEXT },
+ { .block_type = WMFW_ADSP2_PM },
+ { .block_type = WMFW_ADSP2_YM },
+};
+
+KUNIT_ARRAY_PARAM(wmfw_valid_block_types_adsp2,
+ wmfw_valid_block_types_adsp2_cases,
+ cs_dsp_wmfw_err_block_types_desc);
+
+static const struct cs_dsp_wmfw_test_param wmfw_valid_block_types_halo_cases[] = {
+ { .block_type = WMFW_INFO_TEXT },
+ { .block_type = WMFW_HALO_PM_PACKED },
+ { .block_type = WMFW_ADSP2_YM },
+};
+
+KUNIT_ARRAY_PARAM(wmfw_valid_block_types_halo,
+ wmfw_valid_block_types_halo_cases,
+ cs_dsp_wmfw_err_block_types_desc);
+
+static const struct cs_dsp_wmfw_test_param wmfw_invalid_block_types_cases[] = {
+ { .block_type = 0x33 },
+ { .block_type = 0xf5 },
+ { .block_type = 0xc0 },
+};
+
+KUNIT_ARRAY_PARAM(wmfw_invalid_block_types,
+ wmfw_invalid_block_types_cases,
+ cs_dsp_wmfw_err_block_types_desc);
+
+static struct kunit_case cs_dsp_wmfw_err_test_cases_v0[] = {
+ KUNIT_CASE(wmfw_load_with_unknown_blocks),
+ KUNIT_CASE(wmfw_err_wrong_magic),
+ KUNIT_CASE(wmfw_err_too_short_for_header),
+ KUNIT_CASE(wmfw_err_bad_header_length),
+ KUNIT_CASE(wmfw_err_bad_core_type),
+
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_valid_block_types_adsp2_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_payload, wmfw_valid_block_types_adsp2_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+ KUNIT_CASE_PARAM(wmfw_block_payload_len_garbage, wmfw_valid_block_types_adsp2_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_wmfw_err_test_cases_v1[] = {
+ KUNIT_CASE(wmfw_load_with_unknown_blocks),
+ KUNIT_CASE(wmfw_err_wrong_magic),
+ KUNIT_CASE(wmfw_err_too_short_for_header),
+ KUNIT_CASE(wmfw_err_bad_header_length),
+ KUNIT_CASE(wmfw_err_bad_core_type),
+
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_valid_block_types_adsp2_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_payload, wmfw_valid_block_types_adsp2_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+ KUNIT_CASE_PARAM(wmfw_block_payload_len_garbage, wmfw_valid_block_types_adsp2_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+
+ KUNIT_CASE(wmfw_too_short_for_alg_header),
+ KUNIT_CASE(wmfw_v1_alg_name_unterminated),
+ KUNIT_CASE(wmfw_v1_coeff_count_exceeds_block),
+ KUNIT_CASE(wmfw_v1_coeff_name_unterminated),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_wmfw_err_test_cases_v2[] = {
+ KUNIT_CASE(wmfw_load_with_unknown_blocks),
+ KUNIT_CASE(wmfw_err_wrong_magic),
+ KUNIT_CASE(wmfw_err_too_short_for_header),
+ KUNIT_CASE(wmfw_err_bad_header_length),
+ KUNIT_CASE(wmfw_err_bad_core_type),
+
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_valid_block_types_adsp2_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_payload, wmfw_valid_block_types_adsp2_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+ KUNIT_CASE_PARAM(wmfw_block_payload_len_garbage, wmfw_valid_block_types_adsp2_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+
+ KUNIT_CASE(wmfw_too_short_for_alg_header),
+ KUNIT_CASE(wmfw_v2_alg_name_exceeds_block),
+ KUNIT_CASE(wmfw_v2_alg_description_exceeds_block),
+ KUNIT_CASE(wmfw_v2_coeff_count_exceeds_block),
+ KUNIT_CASE(wmfw_v2_coeff_block_size_exceeds_block),
+ KUNIT_CASE(wmfw_v2_coeff_shortname_exceeds_block),
+ KUNIT_CASE(wmfw_v2_coeff_fullname_exceeds_block),
+ KUNIT_CASE(wmfw_v2_coeff_description_exceeds_block),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_wmfw_err_test_cases_v3[] = {
+ KUNIT_CASE(wmfw_load_with_unknown_blocks),
+ KUNIT_CASE(wmfw_err_wrong_magic),
+ KUNIT_CASE(wmfw_err_too_short_for_header),
+ KUNIT_CASE(wmfw_err_bad_header_length),
+ KUNIT_CASE(wmfw_err_bad_core_type),
+
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_valid_block_types_halo_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_payload, wmfw_valid_block_types_halo_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+ KUNIT_CASE_PARAM(wmfw_block_payload_len_garbage, wmfw_valid_block_types_halo_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+
+ KUNIT_CASE(wmfw_too_short_for_alg_header),
+ KUNIT_CASE(wmfw_v2_alg_name_exceeds_block),
+ KUNIT_CASE(wmfw_v2_alg_description_exceeds_block),
+ KUNIT_CASE(wmfw_v2_coeff_count_exceeds_block),
+ KUNIT_CASE(wmfw_v2_coeff_block_size_exceeds_block),
+ KUNIT_CASE(wmfw_v2_coeff_shortname_exceeds_block),
+ KUNIT_CASE(wmfw_v2_coeff_fullname_exceeds_block),
+ KUNIT_CASE(wmfw_v2_coeff_description_exceeds_block),
+
+ { } /* terminator */
+};
+
+static struct kunit_suite cs_dsp_wmfw_err_test_halo = {
+ .name = "cs_dsp_wmfwV3_err_halo",
+ .init = cs_dsp_wmfw_err_test_halo_init,
+ .exit = cs_dsp_wmfw_err_test_exit,
+ .test_cases = cs_dsp_wmfw_err_test_cases_v3,
+};
+
+static struct kunit_suite cs_dsp_wmfw_err_test_adsp2_32bit_wmfw0 = {
+ .name = "cs_dsp_wmfwV0_err_adsp2_32bit",
+ .init = cs_dsp_wmfw_err_test_adsp2_32bit_wmfw0_init,
+ .exit = cs_dsp_wmfw_err_test_exit,
+ .test_cases = cs_dsp_wmfw_err_test_cases_v0,
+};
+
+static struct kunit_suite cs_dsp_wmfw_err_test_adsp2_32bit_wmfw1 = {
+ .name = "cs_dsp_wmfwV1_err_adsp2_32bit",
+ .init = cs_dsp_wmfw_err_test_adsp2_32bit_wmfw1_init,
+ .exit = cs_dsp_wmfw_err_test_exit,
+ .test_cases = cs_dsp_wmfw_err_test_cases_v1,
+};
+
+static struct kunit_suite cs_dsp_wmfw_err_test_adsp2_32bit_wmfw2 = {
+ .name = "cs_dsp_wmfwV2_err_adsp2_32bit",
+ .init = cs_dsp_wmfw_err_test_adsp2_32bit_wmfw2_init,
+ .exit = cs_dsp_wmfw_err_test_exit,
+ .test_cases = cs_dsp_wmfw_err_test_cases_v2,
+};
+
+static struct kunit_suite cs_dsp_wmfw_err_test_adsp2_16bit_wmfw0 = {
+ .name = "cs_dsp_wmfwV0_err_adsp2_16bit",
+ .init = cs_dsp_wmfw_err_test_adsp2_16bit_wmfw0_init,
+ .exit = cs_dsp_wmfw_err_test_exit,
+ .test_cases = cs_dsp_wmfw_err_test_cases_v0,
+};
+
+static struct kunit_suite cs_dsp_wmfw_err_test_adsp2_16bit_wmfw1 = {
+ .name = "cs_dsp_wmfwV1_err_adsp2_16bit",
+ .init = cs_dsp_wmfw_err_test_adsp2_16bit_wmfw1_init,
+ .exit = cs_dsp_wmfw_err_test_exit,
+ .test_cases = cs_dsp_wmfw_err_test_cases_v1,
+};
+
+static struct kunit_suite cs_dsp_wmfw_err_test_adsp2_16bit_wmfw2 = {
+ .name = "cs_dsp_wmfwV2_err_adsp2_16bit",
+ .init = cs_dsp_wmfw_err_test_adsp2_16bit_wmfw2_init,
+ .exit = cs_dsp_wmfw_err_test_exit,
+ .test_cases = cs_dsp_wmfw_err_test_cases_v2,
+};
+
+kunit_test_suites(&cs_dsp_wmfw_err_test_halo,
+ &cs_dsp_wmfw_err_test_adsp2_32bit_wmfw0,
+ &cs_dsp_wmfw_err_test_adsp2_32bit_wmfw1,
+ &cs_dsp_wmfw_err_test_adsp2_32bit_wmfw2,
+ &cs_dsp_wmfw_err_test_adsp2_16bit_wmfw0,
+ &cs_dsp_wmfw_err_test_adsp2_16bit_wmfw1,
+ &cs_dsp_wmfw_err_test_adsp2_16bit_wmfw2);
diff --git a/drivers/firmware/cirrus/test/cs_dsp_tests.c b/drivers/firmware/cirrus/test/cs_dsp_tests.c
new file mode 100644
index 000000000000..7b829a03ca52
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_tests.c
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Utility module for cs_dsp KUnit testing.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+
+#include <linux/module.h>
+
+MODULE_DESCRIPTION("KUnit tests for Cirrus Logic DSP driver");
+MODULE_AUTHOR("Richard Fitzgerald <rf@opensource.cirrus.com>");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("FW_CS_DSP");
+MODULE_IMPORT_NS("FW_CS_DSP_KUNIT_TEST_UTILS");
diff --git a/drivers/firmware/dmi-sysfs.c b/drivers/firmware/dmi-sysfs.c
index 8d91997036e4..9cc963b2edc0 100644
--- a/drivers/firmware/dmi-sysfs.c
+++ b/drivers/firmware/dmi-sysfs.c
@@ -431,9 +431,9 @@ static ssize_t dmi_sel_raw_read_helper(struct dmi_sysfs_entry *entry,
}
}
-static ssize_t dmi_sel_raw_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t pos, size_t count)
+static ssize_t raw_event_log_read(struct file *filp, struct kobject *kobj,
+ const struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t count)
{
struct dmi_sysfs_entry *entry = to_entry(kobj->parent);
struct dmi_read_state state = {
@@ -445,10 +445,7 @@ static ssize_t dmi_sel_raw_read(struct file *filp, struct kobject *kobj,
return find_dmi_entry(entry, dmi_sel_raw_read_helper, &state);
}
-static struct bin_attribute dmi_sel_raw_attr = {
- .attr = {.name = "raw_event_log", .mode = 0400},
- .read = dmi_sel_raw_read,
-};
+static const BIN_ATTR_ADMIN_RO(raw_event_log, 0);
static int dmi_system_event_log(struct dmi_sysfs_entry *entry)
{
@@ -464,7 +461,7 @@ static int dmi_system_event_log(struct dmi_sysfs_entry *entry)
if (ret)
goto out_free;
- ret = sysfs_create_bin_file(entry->child, &dmi_sel_raw_attr);
+ ret = sysfs_create_bin_file(entry->child, &bin_attr_raw_event_log);
if (ret)
goto out_del;
@@ -537,10 +534,10 @@ static ssize_t dmi_entry_raw_read_helper(struct dmi_sysfs_entry *entry,
&state->pos, dh, entry_length);
}
-static ssize_t dmi_entry_raw_read(struct file *filp,
- struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t pos, size_t count)
+static ssize_t raw_read(struct file *filp,
+ struct kobject *kobj,
+ const struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t count)
{
struct dmi_sysfs_entry *entry = to_entry(kobj);
struct dmi_read_state state = {
@@ -552,10 +549,7 @@ static ssize_t dmi_entry_raw_read(struct file *filp,
return find_dmi_entry(entry, dmi_entry_raw_read_helper, &state);
}
-static const struct bin_attribute dmi_entry_raw_attr = {
- .attr = {.name = "raw", .mode = 0400},
- .read = dmi_entry_raw_read,
-};
+static const BIN_ATTR_ADMIN_RO(raw, 0);
static void dmi_sysfs_entry_release(struct kobject *kobj)
{
@@ -630,7 +624,7 @@ static void __init dmi_sysfs_register_handle(const struct dmi_header *dh,
goto out_err;
/* Create the raw binary file to access the entry */
- *ret = sysfs_create_bin_file(&entry->kobj, &dmi_entry_raw_attr);
+ *ret = sysfs_create_bin_file(&entry->kobj, &bin_attr_raw);
if (*ret)
goto out_err;
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index fde0656481cc..70d39adf50dc 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -761,8 +761,8 @@ static void __init dmi_scan_machine(void)
pr_info("DMI not present or invalid.\n");
}
-static BIN_ATTR_SIMPLE_ADMIN_RO(smbios_entry_point);
-static BIN_ATTR_SIMPLE_ADMIN_RO(DMI);
+static __ro_after_init BIN_ATTR_SIMPLE_ADMIN_RO(smbios_entry_point);
+static __ro_after_init BIN_ATTR_SIMPLE_ADMIN_RO(DMI);
static int __init dmi_init(void)
{
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index 72f2537d90ca..29e0729299f5 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -76,20 +76,14 @@ config EFI_ZBOOT
bool "Enable the generic EFI decompressor"
depends on EFI_GENERIC_STUB && !ARM
select HAVE_KERNEL_GZIP
- select HAVE_KERNEL_LZ4
- select HAVE_KERNEL_LZMA
- select HAVE_KERNEL_LZO
- select HAVE_KERNEL_XZ
select HAVE_KERNEL_ZSTD
help
Create the bootable image as an EFI application that carries the
actual kernel image in compressed form, and decompresses it into
- memory before executing it via LoadImage/StartImage EFI boot service
- calls. For compatibility with non-EFI loaders, the payload can be
- decompressed and executed by the loader as well, provided that the
- loader implements the decompression algorithm and that non-EFI boot
- is supported by the encapsulated image. (The compression algorithm
- used is described in the zboot image header)
+ memory before executing it. For compatibility with non-EFI loaders,
+ the payload can be decompressed and executed by the loader as well,
+ provided that the loader implements the decompression algorithm.
+ (The compression algorithm used is described in the zboot header)
config EFI_ARMSTUB_DTB_LOADER
bool "Enable the DTB loader"
@@ -269,6 +263,15 @@ config EFI_COCO_SECRET
virt/coco/efi_secret module to access the secrets, which in turn
allows userspace programs to access the injected secrets.
+config OVMF_DEBUG_LOG
+ bool "Expose OVMF firmware debug log via sysfs"
+ depends on EFI
+ help
+ Recent versions of the Open Virtual Machine Firmware
+ (edk2-stable202508 + newer) can write their debug log to a memory
+ buffer. This driver exposes the log content via sysfs
+ (/sys/firmware/efi/ovmf_debug_log).
+
config UNACCEPTED_MEMORY
bool
depends on EFI_STUB
@@ -287,6 +290,30 @@ config EFI_EMBEDDED_FIRMWARE
bool
select CRYPTO_LIB_SHA256
+config EFI_SBAT
+ def_bool y if EFI_SBAT_FILE!=""
+
+config EFI_SBAT_FILE
+ string "Embedded SBAT section file path"
+ depends on EFI_ZBOOT || (EFI_STUB && X86)
+ help
+ SBAT section provides a way to improve SecureBoot revocations of UEFI
+ binaries by introducing a generation-based mechanism. With SBAT, older
+ UEFI binaries can be prevented from booting by bumping the minimal
+ required generation for the specific component in the bootloader.
+
+ Note: SBAT information is distribution specific, i.e. the owner of the
+ signing SecureBoot certificate must define the SBAT policy. Linux
+ kernel upstream does not define SBAT components and their generations.
+
+ See https://github.com/rhboot/shim/blob/main/SBAT.md for the additional
+ details.
+
+ Specify a file with SBAT data which is going to be embedded as '.sbat'
+ section into the kernel.
+
+ If unsure, leave blank.
+
endmenu
config UEFI_CPER
diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile
index a2d0009560d0..8efbcf699e4f 100644
--- a/drivers/firmware/efi/Makefile
+++ b/drivers/firmware/efi/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_APPLE_PROPERTIES) += apple-properties.o
obj-$(CONFIG_EFI_RCI2_TABLE) += rci2-table.o
obj-$(CONFIG_EFI_EMBEDDED_FIRMWARE) += embedded-firmware.o
obj-$(CONFIG_LOAD_UEFI_KEYS) += mokvar-table.o
+obj-$(CONFIG_OVMF_DEBUG_LOG) += ovmf-debug-log.o
obj-$(CONFIG_SYSFB) += sysfb_efi.o
diff --git a/drivers/firmware/efi/cper-arm.c b/drivers/firmware/efi/cper-arm.c
index fa9c1c3bf168..f0a63d09d3c4 100644
--- a/drivers/firmware/efi/cper-arm.c
+++ b/drivers/firmware/efi/cper-arm.c
@@ -311,7 +311,7 @@ void cper_print_proc_arm(const char *pfx,
ctx_info = (struct cper_arm_ctx_info *)err_info;
max_ctx_type = ARRAY_SIZE(arm_reg_ctx_strs) - 1;
for (i = 0; i < proc->context_info_num; i++) {
- int size = sizeof(*ctx_info) + ctx_info->size;
+ int size = ALIGN(sizeof(*ctx_info) + ctx_info->size, 16);
printk("%sContext info structure %d:\n", pfx, i);
if (len < size) {
diff --git a/drivers/firmware/efi/cper-x86.c b/drivers/firmware/efi/cper-x86.c
index 438ed9eff6d0..3949d7b5e808 100644
--- a/drivers/firmware/efi/cper-x86.c
+++ b/drivers/firmware/efi/cper-x86.c
@@ -325,7 +325,7 @@ void cper_print_proc_ia(const char *pfx, const struct cper_sec_proc_ia *proc)
ctx_info = (struct cper_ia_proc_ctx *)err_info;
for (i = 0; i < VALID_PROC_CXT_INFO_NUM(proc->validation_bits); i++) {
- int size = sizeof(*ctx_info) + ctx_info->reg_arr_size;
+ int size = ALIGN(sizeof(*ctx_info) + ctx_info->reg_arr_size, 16);
int groupsize = 4;
printk("%sContext Information Structure %d:\n", pfx, i);
diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
index b69e68ef3f02..928409199a1a 100644
--- a/drivers/firmware/efi/cper.c
+++ b/drivers/firmware/efi/cper.c
@@ -24,7 +24,7 @@
#include <linux/bcd.h>
#include <acpi/ghes.h>
#include <ras/ras_event.h>
-#include "cper_cxl.h"
+#include <cxl/event.h>
/*
* CPER record ID need to be unique even after reboot, because record
@@ -624,11 +624,11 @@ cper_estatus_print_section(const char *pfx, struct acpi_hest_generic_data *gdata
else
goto err_section_too_small;
} else if (guid_equal(sec_type, &CPER_SEC_CXL_PROT_ERR)) {
- struct cper_sec_prot_err *prot_err = acpi_hest_get_payload(gdata);
+ struct cxl_cper_sec_prot_err *prot_err = acpi_hest_get_payload(gdata);
printk("%ssection_type: CXL Protocol Error\n", newpfx);
if (gdata->error_data_length >= sizeof(*prot_err))
- cper_print_prot_err(newpfx, prot_err);
+ cxl_cper_print_prot_err(newpfx, prot_err);
else
goto err_section_too_small;
} else {
diff --git a/drivers/firmware/efi/cper_cxl.c b/drivers/firmware/efi/cper_cxl.c
index a55771b99a97..8a7667faf953 100644
--- a/drivers/firmware/efi/cper_cxl.c
+++ b/drivers/firmware/efi/cper_cxl.c
@@ -8,26 +8,7 @@
*/
#include <linux/cper.h>
-#include "cper_cxl.h"
-
-#define PROT_ERR_VALID_AGENT_TYPE BIT_ULL(0)
-#define PROT_ERR_VALID_AGENT_ADDRESS BIT_ULL(1)
-#define PROT_ERR_VALID_DEVICE_ID BIT_ULL(2)
-#define PROT_ERR_VALID_SERIAL_NUMBER BIT_ULL(3)
-#define PROT_ERR_VALID_CAPABILITY BIT_ULL(4)
-#define PROT_ERR_VALID_DVSEC BIT_ULL(5)
-#define PROT_ERR_VALID_ERROR_LOG BIT_ULL(6)
-
-/* CXL RAS Capability Structure, CXL v3.0 sec 8.2.4.16 */
-struct cxl_ras_capability_regs {
- u32 uncor_status;
- u32 uncor_mask;
- u32 uncor_severity;
- u32 cor_status;
- u32 cor_mask;
- u32 cap_control;
- u32 header_log[16];
-};
+#include <cxl/event.h>
static const char * const prot_err_agent_type_strs[] = {
"Restricted CXL Device",
@@ -40,22 +21,8 @@ static const char * const prot_err_agent_type_strs[] = {
"CXL Upstream Switch Port",
};
-/*
- * The layout of the enumeration and the values matches CXL Agent Type
- * field in the UEFI 2.10 Section N.2.13,
- */
-enum {
- RCD, /* Restricted CXL Device */
- RCH_DP, /* Restricted CXL Host Downstream Port */
- DEVICE, /* CXL Device */
- LD, /* CXL Logical Device */
- FMLD, /* CXL Fabric Manager managed Logical Device */
- RP, /* CXL Root Port */
- DSP, /* CXL Downstream Switch Port */
- USP, /* CXL Upstream Switch Port */
-};
-
-void cper_print_prot_err(const char *pfx, const struct cper_sec_prot_err *prot_err)
+void cxl_cper_print_prot_err(const char *pfx,
+ const struct cxl_cper_sec_prot_err *prot_err)
{
if (prot_err->valid_bits & PROT_ERR_VALID_AGENT_TYPE)
pr_info("%s agent_type: %d, %s\n", pfx, prot_err->agent_type,
diff --git a/drivers/firmware/efi/cper_cxl.h b/drivers/firmware/efi/cper_cxl.h
deleted file mode 100644
index 86bfcf7909ec..000000000000
--- a/drivers/firmware/efi/cper_cxl.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * UEFI Common Platform Error Record (CPER) support for CXL Section.
- *
- * Copyright (C) 2022 Advanced Micro Devices, Inc.
- *
- * Author: Smita Koralahalli <Smita.KoralahalliChannabasappa@amd.com>
- */
-
-#ifndef LINUX_CPER_CXL_H
-#define LINUX_CPER_CXL_H
-
-/* CXL Protocol Error Section */
-#define CPER_SEC_CXL_PROT_ERR \
- GUID_INIT(0x80B9EFB4, 0x52B5, 0x4DE3, 0xA7, 0x77, 0x68, 0x78, \
- 0x4B, 0x77, 0x10, 0x48)
-
-#pragma pack(1)
-
-/* Compute Express Link Protocol Error Section, UEFI v2.10 sec N.2.13 */
-struct cper_sec_prot_err {
- u64 valid_bits;
- u8 agent_type;
- u8 reserved[7];
-
- /*
- * Except for RCH Downstream Port, all the remaining CXL Agent
- * types are uniquely identified by the PCIe compatible SBDF number.
- */
- union {
- u64 rcrb_base_addr;
- struct {
- u8 function;
- u8 device;
- u8 bus;
- u16 segment;
- u8 reserved_1[3];
- };
- } agent_addr;
-
- struct {
- u16 vendor_id;
- u16 device_id;
- u16 subsystem_vendor_id;
- u16 subsystem_id;
- u8 class_code[2];
- u16 slot;
- u8 reserved_1[4];
- } device_id;
-
- struct {
- u32 lower_dw;
- u32 upper_dw;
- } dev_serial_num;
-
- u8 capability[60];
- u16 dvsec_len;
- u16 err_len;
- u8 reserved_2[4];
-};
-
-#pragma pack()
-
-void cper_print_prot_err(const char *pfx, const struct cper_sec_prot_err *prot_err);
-
-#endif //__CPER_CXL_
diff --git a/drivers/firmware/efi/dev-path-parser.c b/drivers/firmware/efi/dev-path-parser.c
index 937be269fee8..13ea141c0def 100644
--- a/drivers/firmware/efi/dev-path-parser.c
+++ b/drivers/firmware/efi/dev-path-parser.c
@@ -47,9 +47,9 @@ static long __init parse_acpi_path(const struct efi_dev_path *node,
return 0;
}
-static int __init match_pci_dev(struct device *dev, void *data)
+static int __init match_pci_dev(struct device *dev, const void *data)
{
- unsigned int devfn = *(unsigned int *)data;
+ unsigned int devfn = *(const unsigned int *)data;
return dev_is_pci(dev) && to_pci_dev(dev)->devfn == devfn;
}
diff --git a/drivers/firmware/efi/efi-init.c b/drivers/firmware/efi/efi-init.c
index a00e07b853f2..a65c2d5b9e7b 100644
--- a/drivers/firmware/efi/efi-init.c
+++ b/drivers/firmware/efi/efi-init.c
@@ -12,6 +12,7 @@
#include <linux/efi.h>
#include <linux/fwnode.h>
#include <linux/init.h>
+#include <linux/kexec_handover.h>
#include <linux/memblock.h>
#include <linux/mm_types.h>
#include <linux/of.h>
@@ -164,12 +165,32 @@ static __init void reserve_regions(void)
pr_info("Processing EFI memory map:\n");
/*
- * Discard memblocks discovered so far: if there are any at this
- * point, they originate from memory nodes in the DT, and UEFI
- * uses its own memory map instead.
+ * Discard memblocks discovered so far except for KHO scratch
+ * regions. Most memblocks at this point originate from memory nodes
+ * in the DT and UEFI uses its own memory map instead. However, if
+ * KHO is enabled, scratch regions, which are good known memory
+ * must be preserved.
*/
memblock_dump_all();
- memblock_remove(0, PHYS_ADDR_MAX);
+
+ if (is_kho_boot()) {
+ struct memblock_region *r;
+
+ /* Remove all non-KHO regions */
+ for_each_mem_region(r) {
+ if (!memblock_is_kho_scratch(r)) {
+ memblock_remove(r->base, r->size);
+ r--;
+ }
+ }
+ } else {
+ /*
+ * KHO is disabled. Discard memblocks discovered so far:
+ * if there are any at this point, they originate from memory
+ * nodes in the DT, and UEFI uses its own memory map instead.
+ */
+ memblock_remove(0, PHYS_ADDR_MAX);
+ }
for_each_efi_memory_desc(md) {
paddr = md->phys_addr;
diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c
index 552c78f5f059..a253b6144945 100644
--- a/drivers/firmware/efi/efi-pstore.c
+++ b/drivers/firmware/efi/efi-pstore.c
@@ -6,7 +6,7 @@
#include <linux/slab.h>
#include <linux/ucs2_string.h>
-MODULE_IMPORT_NS(EFIVAR);
+MODULE_IMPORT_NS("EFIVAR");
#define DUMP_NAME_LEN 66
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 70490bf2697b..1ce428e2ac8a 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -45,6 +45,7 @@ struct efi __read_mostly efi = {
.esrt = EFI_INVALID_TABLE_ADDR,
.tpm_log = EFI_INVALID_TABLE_ADDR,
.tpm_final_log = EFI_INVALID_TABLE_ADDR,
+ .ovmf_debug_log = EFI_INVALID_TABLE_ADDR,
#ifdef CONFIG_LOAD_UEFI_KEYS
.mokvar_table = EFI_INVALID_TABLE_ADDR,
#endif
@@ -148,9 +149,6 @@ static ssize_t systab_show(struct kobject *kobj,
if (efi.smbios != EFI_INVALID_TABLE_ADDR)
str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios);
- if (IS_ENABLED(CONFIG_X86))
- str = efi_systab_show_arch(str);
-
return str - buf;
}
@@ -273,6 +271,7 @@ static __init int efivar_ssdt_load(void)
efi_char16_t *name = NULL;
efi_status_t status;
efi_guid_t guid;
+ int ret = 0;
if (!efivar_ssdt[0])
return 0;
@@ -294,8 +293,8 @@ static __init int efivar_ssdt_load(void)
efi_char16_t *name_tmp =
krealloc(name, name_size, GFP_KERNEL);
if (!name_tmp) {
- kfree(name);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out;
}
name = name_tmp;
continue;
@@ -309,26 +308,38 @@ static __init int efivar_ssdt_load(void)
pr_info("loading SSDT from variable %s-%pUl\n", efivar_ssdt, &guid);
status = efi.get_variable(name, &guid, NULL, &data_size, NULL);
- if (status != EFI_BUFFER_TOO_SMALL || !data_size)
- return -EIO;
+ if (status != EFI_BUFFER_TOO_SMALL || !data_size) {
+ ret = -EIO;
+ goto out;
+ }
data = kmalloc(data_size, GFP_KERNEL);
- if (!data)
- return -ENOMEM;
+ if (!data) {
+ ret = -ENOMEM;
+ goto out;
+ }
status = efi.get_variable(name, &guid, NULL, &data_size, data);
if (status == EFI_SUCCESS) {
- acpi_status ret = acpi_load_table(data, NULL);
- if (ret)
- pr_err("failed to load table: %u\n", ret);
- else
+ acpi_status acpi_ret = acpi_load_table(data, NULL);
+ if (ACPI_FAILURE(acpi_ret)) {
+ pr_err("efivar_ssdt: failed to load table: %u\n",
+ acpi_ret);
+ } else {
+ /*
+ * The @data will be in use by ACPI engine,
+ * do not free it!
+ */
continue;
+ }
} else {
- pr_err("failed to get var data: 0x%lx\n", status);
+ pr_err("efivar_ssdt: failed to get var data: 0x%lx\n", status);
}
kfree(data);
}
- return 0;
+out:
+ kfree(name);
+ return ret;
}
#else
static inline int efivar_ssdt_load(void) { return 0; }
@@ -433,7 +444,9 @@ static int __init efisubsys_init(void)
error = generic_ops_register();
if (error)
goto err_put;
- efivar_ssdt_load();
+ error = efivar_ssdt_load();
+ if (error)
+ pr_err("efi: failed to load SSDT, error %d.\n", error);
platform_device_register_simple("efivars", 0, NULL, 0);
}
@@ -461,6 +474,10 @@ static int __init efisubsys_init(void)
platform_device_register_simple("efi_secret", 0, NULL, 0);
#endif
+ if (IS_ENABLED(CONFIG_OVMF_DEBUG_LOG) &&
+ efi.ovmf_debug_log != EFI_INVALID_TABLE_ADDR)
+ ovmf_log_probe(efi.ovmf_debug_log);
+
return 0;
err_remove_group:
@@ -546,6 +563,7 @@ int __efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
__weak __alias(__efi_mem_desc_lookup);
+EXPORT_SYMBOL_GPL(efi_mem_desc_lookup);
/*
* Calculate the highest address of an efi memory descriptor.
@@ -604,6 +622,9 @@ static const efi_config_table_type_t common_tables[] __initconst = {
{LINUX_EFI_MEMRESERVE_TABLE_GUID, &mem_reserve, "MEMRESERVE" },
{LINUX_EFI_INITRD_MEDIA_GUID, &initrd, "INITRD" },
{EFI_RT_PROPERTIES_TABLE_GUID, &rt_prop, "RTPROP" },
+#ifdef CONFIG_OVMF_DEBUG_LOG
+ {OVMF_MEMORY_LOG_TABLE_GUID, &efi.ovmf_debug_log, "OvmfDebugLog" },
+#endif
#ifdef CONFIG_EFI_RCI2_TABLE
{DELLEMC_EFI_RCI2_TABLE_GUID, &rci2_table_phys },
#endif
@@ -922,13 +943,15 @@ char * __init efi_md_typeattr_format(char *buf, size_t size,
EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_RO |
EFI_MEMORY_WP | EFI_MEMORY_RP | EFI_MEMORY_XP |
EFI_MEMORY_NV | EFI_MEMORY_SP | EFI_MEMORY_CPU_CRYPTO |
- EFI_MEMORY_RUNTIME | EFI_MEMORY_MORE_RELIABLE))
+ EFI_MEMORY_MORE_RELIABLE | EFI_MEMORY_HOT_PLUGGABLE |
+ EFI_MEMORY_RUNTIME))
snprintf(pos, size, "|attr=0x%016llx]",
(unsigned long long)attr);
else
snprintf(pos, size,
- "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
+ "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
attr & EFI_MEMORY_RUNTIME ? "RUN" : "",
+ attr & EFI_MEMORY_HOT_PLUGGABLE ? "HP" : "",
attr & EFI_MEMORY_MORE_RELIABLE ? "MR" : "",
attr & EFI_MEMORY_CPU_CRYPTO ? "CC" : "",
attr & EFI_MEMORY_SP ? "SP" : "",
diff --git a/drivers/firmware/efi/efibc.c b/drivers/firmware/efi/efibc.c
index 4f9fb086eab7..0a7c764dcc61 100644
--- a/drivers/firmware/efi/efibc.c
+++ b/drivers/firmware/efi/efibc.c
@@ -47,7 +47,7 @@ static int efibc_reboot_notifier_call(struct notifier_block *notifier,
if (ret || !data)
return NOTIFY_DONE;
- wdata = kmalloc(MAX_DATA_LEN * sizeof(efi_char16_t), GFP_KERNEL);
+ wdata = kmalloc_array(MAX_DATA_LEN, sizeof(efi_char16_t), GFP_KERNEL);
if (!wdata)
return NOTIFY_DONE;
diff --git a/drivers/firmware/efi/embedded-firmware.c b/drivers/firmware/efi/embedded-firmware.c
index f5be8e22305b..b49a09d7e665 100644
--- a/drivers/firmware/efi/embedded-firmware.c
+++ b/drivers/firmware/efi/embedded-firmware.c
@@ -16,9 +16,9 @@
/* Exported for use by lib/test_firmware.c only */
LIST_HEAD(efi_embedded_fw_list);
-EXPORT_SYMBOL_NS_GPL(efi_embedded_fw_list, TEST_FIRMWARE);
+EXPORT_SYMBOL_NS_GPL(efi_embedded_fw_list, "TEST_FIRMWARE");
bool efi_embedded_fw_checked;
-EXPORT_SYMBOL_NS_GPL(efi_embedded_fw_checked, TEST_FIRMWARE);
+EXPORT_SYMBOL_NS_GPL(efi_embedded_fw_checked, "TEST_FIRMWARE");
static const struct dmi_system_id * const embedded_fw_table[] = {
#ifdef CONFIG_TOUCHSCREEN_DMI
diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c
index 7a81c0ce4780..4bb7b0584bc9 100644
--- a/drivers/firmware/efi/esrt.c
+++ b/drivers/firmware/efi/esrt.c
@@ -75,8 +75,6 @@ static LIST_HEAD(entry_list);
struct esre_attribute {
struct attribute attr;
ssize_t (*show)(struct esre_entry *entry, char *buf);
- ssize_t (*store)(struct esre_entry *entry,
- const char *buf, size_t count);
};
static struct esre_entry *to_entry(struct kobject *kobj)
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index ed4e8ddbe76a..94b05e4451dd 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -11,7 +11,7 @@ cflags-y := $(KBUILD_CFLAGS)
cflags-$(CONFIG_X86_32) := -march=i386
cflags-$(CONFIG_X86_64) := -mcmodel=small
-cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ \
+cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ -std=gnu11 \
-fPIC -fno-strict-aliasing -mno-red-zone \
-mno-mmx -mno-sse -fshort-wchar \
-Wno-pointer-sign \
@@ -22,16 +22,16 @@ cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ \
# arm64 uses the full KBUILD_CFLAGS so it's necessary to explicitly
# disable the stackleak plugin
-cflags-$(CONFIG_ARM64) += -fpie $(DISABLE_STACKLEAK_PLUGIN) \
+cflags-$(CONFIG_ARM64) += -fpie $(DISABLE_KSTACK_ERASE) \
-fno-unwind-tables -fno-asynchronous-unwind-tables
cflags-$(CONFIG_ARM) += -DEFI_HAVE_STRLEN -DEFI_HAVE_STRNLEN \
-DEFI_HAVE_MEMCHR -DEFI_HAVE_STRRCHR \
-DEFI_HAVE_STRCMP -fno-builtin -fpic \
$(call cc-option,-mno-single-pic-base) \
- $(DISABLE_STACKLEAK_PLUGIN)
+ $(DISABLE_KSTACK_ERASE)
cflags-$(CONFIG_RISCV) += -fpic -DNO_ALTERNATIVE -mno-relax \
- $(DISABLE_STACKLEAK_PLUGIN)
-cflags-$(CONFIG_LOONGARCH) += -fpie
+ $(DISABLE_KSTACK_ERASE)
+cflags-$(CONFIG_LOONGARCH) += -fpie $(DISABLE_KSTACK_ERASE)
cflags-$(CONFIG_EFI_PARAMS_FROM_FDT) += -I$(srctree)/scripts/dtc/libfdt
@@ -62,6 +62,8 @@ KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_LTO), $(KBUILD_CFLAGS))
# `-fdata-sections` flag from KBUILD_CFLAGS_KERNEL
KBUILD_CFLAGS_KERNEL := $(filter-out -fdata-sections, $(KBUILD_CFLAGS_KERNEL))
+KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
+
lib-y := efi-stub-helper.o gop.o secureboot.o tpm.o \
file.o mem.o random.o randomalloc.o pci.o \
skip_spaces.o lib-cmdline.o lib-ctype.o \
@@ -89,12 +91,17 @@ lib-$(CONFIG_LOONGARCH) += loongarch.o loongarch-stub.o
CFLAGS_arm32-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
-zboot-obj-$(CONFIG_RISCV) := lib-clz_ctz.o lib-ashldi3.o
+zboot-obj-y := zboot-decompress-gzip.o
+CFLAGS_zboot-decompress-gzip.o += -I$(srctree)/lib/zlib_inflate
+zboot-obj-$(CONFIG_KERNEL_ZSTD) := zboot-decompress-zstd.o lib-xxhash.o
+CFLAGS_zboot-decompress-zstd.o += -I$(srctree)/lib/zstd
+
+zboot-obj-$(CONFIG_RISCV) += lib-clz_ctz.o lib-ashldi3.o
lib-$(CONFIG_EFI_ZBOOT) += zboot.o $(zboot-obj-y)
lib-$(CONFIG_UNACCEPTED_MEMORY) += unaccepted_memory.o bitmap.o find.o
-extra-y := $(lib-y)
+targets := $(lib-y)
lib-y := $(patsubst %.o,%.stub.o,$(lib-y))
# Even when -mbranch-protection=none is set, Clang will generate a
diff --git a/drivers/firmware/efi/libstub/Makefile.zboot b/drivers/firmware/efi/libstub/Makefile.zboot
index 65ffd0b760b2..832deee36e48 100644
--- a/drivers/firmware/efi/libstub/Makefile.zboot
+++ b/drivers/firmware/efi/libstub/Makefile.zboot
@@ -12,22 +12,16 @@ quiet_cmd_copy_and_pad = PAD $@
$(obj)/vmlinux.bin: $(obj)/$(EFI_ZBOOT_PAYLOAD) FORCE
$(call if_changed,copy_and_pad)
-comp-type-$(CONFIG_KERNEL_GZIP) := gzip
-comp-type-$(CONFIG_KERNEL_LZ4) := lz4
-comp-type-$(CONFIG_KERNEL_LZMA) := lzma
-comp-type-$(CONFIG_KERNEL_LZO) := lzo
-comp-type-$(CONFIG_KERNEL_XZ) := xzkern
-comp-type-$(CONFIG_KERNEL_ZSTD) := zstd22
-
# in GZIP, the appended le32 carrying the uncompressed size is part of the
# format, but in other cases, we just append it at the end for convenience,
# causing the original tools to complain when checking image integrity.
-# So disregard it when calculating the payload size in the zimage header.
-zboot-method-y := $(comp-type-y)_with_size
-zboot-size-len-y := 4
+comp-type-y := gzip
+zboot-method-y := gzip
+zboot-size-len-y := 0
-zboot-method-$(CONFIG_KERNEL_GZIP) := gzip
-zboot-size-len-$(CONFIG_KERNEL_GZIP) := 0
+comp-type-$(CONFIG_KERNEL_ZSTD) := zstd
+zboot-method-$(CONFIG_KERNEL_ZSTD) := zstd22_with_size
+zboot-size-len-$(CONFIG_KERNEL_ZSTD) := 4
$(obj)/vmlinuz: $(obj)/vmlinux.bin FORCE
$(call if_changed,$(zboot-method-y))
@@ -42,7 +36,7 @@ aflags-zboot-header-$(EFI_ZBOOT_FORWARD_CFI) := \
-DPE_DLL_CHAR_EX=IMAGE_DLLCHARACTERISTICS_EX_FORWARD_CFI_COMPAT
AFLAGS_zboot-header.o += -DMACHINE_TYPE=IMAGE_FILE_MACHINE_$(EFI_ZBOOT_MACH_TYPE) \
- -DZBOOT_EFI_PATH="\"$(realpath $(obj)/vmlinuz.efi.elf)\"" \
+ -DZBOOT_EFI_PATH="\"$(abspath $(obj)/vmlinuz.efi.elf)\"" \
-DZBOOT_SIZE_LEN=$(zboot-size-len-y) \
-DCOMP_TYPE="\"$(comp-type-y)\"" \
$(aflags-zboot-header-y)
@@ -50,6 +44,10 @@ AFLAGS_zboot-header.o += -DMACHINE_TYPE=IMAGE_FILE_MACHINE_$(EFI_ZBOOT_MACH_TYPE
$(obj)/zboot-header.o: $(srctree)/drivers/firmware/efi/libstub/zboot-header.S FORCE
$(call if_changed_rule,as_o_S)
+ifneq ($(CONFIG_EFI_SBAT_FILE),)
+$(obj)/zboot-header.o: $(CONFIG_EFI_SBAT_FILE)
+endif
+
ZBOOT_DEPS := $(obj)/zboot-header.o $(objtree)/drivers/firmware/efi/libstub/lib.a
LDFLAGS_vmlinuz.efi.elf := -T $(srctree)/drivers/firmware/efi/libstub/zboot.lds
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index de659f6a815f..7aa2f9ad2935 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -47,9 +47,10 @@ bool __pure __efi_soft_reserve_enabled(void)
*/
efi_status_t efi_parse_options(char const *cmdline)
{
- size_t len;
+ char *buf __free(efi_pool) = NULL;
efi_status_t status;
- char *str, *buf;
+ size_t len;
+ char *str;
if (!cmdline)
return EFI_SUCCESS;
@@ -102,7 +103,6 @@ efi_status_t efi_parse_options(char const *cmdline)
efi_parse_option_graphics(val + strlen("efifb:"));
}
}
- efi_bs_call(free_pool, buf);
return EFI_SUCCESS;
}
@@ -250,7 +250,7 @@ static efi_status_t efi_measure_tagged_event(unsigned long load_addr,
u64, const union efistub_event *);
struct { u32 hash_log_extend_event; } mixed_mode;
} method;
- struct efistub_measured_event *evt;
+ struct efistub_measured_event *evt __free(efi_pool) = NULL;
int size = struct_size(evt, tagged_event.tagged_event_data,
events[event].event_data_len);
efi_guid_t tcg2_guid = EFI_TCG2_PROTOCOL_GUID;
@@ -312,7 +312,6 @@ static efi_status_t efi_measure_tagged_event(unsigned long load_addr,
status = efi_fn_call(&method, hash_log_extend_event, protocol, 0,
load_addr, load_size, &evt->event_data);
- efi_bs_call(free_pool, evt);
if (status == EFI_SUCCESS)
return EFI_SUCCESS;
@@ -327,7 +326,7 @@ fail:
* Size of memory allocated return in *cmd_line_len.
* Returns NULL on error.
*/
-char *efi_convert_cmdline(efi_loaded_image_t *image, int *cmd_line_len)
+char *efi_convert_cmdline(efi_loaded_image_t *image)
{
const efi_char16_t *options = efi_table_attr(image, load_options);
u32 options_size = efi_table_attr(image, load_options_size);
@@ -405,7 +404,6 @@ char *efi_convert_cmdline(efi_loaded_image_t *image, int *cmd_line_len)
snprintf((char *)cmdline_addr, options_bytes, "%.*ls",
options_bytes - 1, options);
- *cmd_line_len = options_bytes;
return (char *)cmdline_addr;
}
@@ -603,6 +601,7 @@ efi_status_t efi_load_initrd_cmdline(efi_loaded_image_t *image,
* @image: EFI loaded image protocol
* @soft_limit: preferred address for loading the initrd
* @hard_limit: upper limit address for loading the initrd
+ * @out: pointer to store the address of the initrd table
*
* Return: status code
*/
@@ -621,10 +620,6 @@ efi_status_t efi_load_initrd(efi_loaded_image_t *image,
status = efi_load_initrd_dev_path(&initrd, hard_limit);
if (status == EFI_SUCCESS) {
efi_info("Loaded initrd from LINUX_EFI_INITRD_MEDIA_GUID device path\n");
- if (initrd.size > 0 &&
- efi_measure_tagged_event(initrd.base, initrd.size,
- EFISTUB_EVT_INITRD) == EFI_SUCCESS)
- efi_info("Measured initrd data into PCR 9\n");
} else if (status == EFI_NOT_FOUND) {
status = efi_load_initrd_cmdline(image, &initrd, soft_limit,
hard_limit);
@@ -637,6 +632,11 @@ efi_status_t efi_load_initrd(efi_loaded_image_t *image,
if (status != EFI_SUCCESS)
goto failed;
+ if (initrd.size > 0 &&
+ efi_measure_tagged_event(initrd.base, initrd.size,
+ EFISTUB_EVT_INITRD) == EFI_SUCCESS)
+ efi_info("Measured initrd data into PCR 9\n");
+
status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, sizeof(initrd),
(void **)&tbl);
if (status != EFI_SUCCESS)
diff --git a/drivers/firmware/efi/libstub/efi-stub.c b/drivers/firmware/efi/libstub/efi-stub.c
index 958a680e0660..874f63b4a383 100644
--- a/drivers/firmware/efi/libstub/efi-stub.c
+++ b/drivers/firmware/efi/libstub/efi-stub.c
@@ -10,6 +10,7 @@
*/
#include <linux/efi.h>
+#include <linux/screen_info.h>
#include <asm/efi.h>
#include "efistub.h"
@@ -53,25 +54,16 @@ void __weak free_screen_info(struct screen_info *si)
static struct screen_info *setup_graphics(void)
{
- efi_guid_t gop_proto = EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID;
- efi_status_t status;
- unsigned long size;
- void **gop_handle = NULL;
- struct screen_info *si = NULL;
+ struct screen_info *si, tmp = {};
- size = 0;
- status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL,
- &gop_proto, NULL, &size, gop_handle);
- if (status == EFI_BUFFER_TOO_SMALL) {
- si = alloc_screen_info();
- if (!si)
- return NULL;
- status = efi_setup_gop(si, &gop_proto, size);
- if (status != EFI_SUCCESS) {
- free_screen_info(si);
- return NULL;
- }
- }
+ if (efi_setup_gop(&tmp) != EFI_SUCCESS)
+ return NULL;
+
+ si = alloc_screen_info();
+ if (!si)
+ return NULL;
+
+ *si = tmp;
return si;
}
@@ -112,45 +104,40 @@ static u32 get_supported_rt_services(void)
efi_status_t efi_handle_cmdline(efi_loaded_image_t *image, char **cmdline_ptr)
{
- int cmdline_size = 0;
+ char *cmdline __free(efi_pool) = NULL;
efi_status_t status;
- char *cmdline;
/*
* Get the command line from EFI, using the LOADED_IMAGE
* protocol. We are going to copy the command line into the
* device tree, so this can be allocated anywhere.
*/
- cmdline = efi_convert_cmdline(image, &cmdline_size);
+ cmdline = efi_convert_cmdline(image);
if (!cmdline) {
efi_err("getting command line via LOADED_IMAGE_PROTOCOL\n");
return EFI_OUT_OF_RESOURCES;
}
- if (IS_ENABLED(CONFIG_CMDLINE_EXTEND) ||
- IS_ENABLED(CONFIG_CMDLINE_FORCE) ||
- cmdline_size == 0) {
- status = efi_parse_options(CONFIG_CMDLINE);
+ if (!IS_ENABLED(CONFIG_CMDLINE_FORCE)) {
+ status = efi_parse_options(cmdline);
if (status != EFI_SUCCESS) {
- efi_err("Failed to parse options\n");
- goto fail_free_cmdline;
+ efi_err("Failed to parse EFI load options\n");
+ return status;
}
}
- if (!IS_ENABLED(CONFIG_CMDLINE_FORCE) && cmdline_size > 0) {
- status = efi_parse_options(cmdline);
+ if (IS_ENABLED(CONFIG_CMDLINE_EXTEND) ||
+ IS_ENABLED(CONFIG_CMDLINE_FORCE) ||
+ cmdline[0] == 0) {
+ status = efi_parse_options(CONFIG_CMDLINE);
if (status != EFI_SUCCESS) {
- efi_err("Failed to parse options\n");
- goto fail_free_cmdline;
+ efi_err("Failed to parse built-in command line\n");
+ return status;
}
}
- *cmdline_ptr = cmdline;
+ *cmdline_ptr = no_free_ptr(cmdline);
return EFI_SUCCESS;
-
-fail_free_cmdline:
- efi_bs_call(free_pool, cmdline_ptr);
- return status;
}
efi_status_t efi_stub_common(efi_handle_t handle,
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index 685098f9626f..f5ba032863a9 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -4,6 +4,7 @@
#define _DRIVERS_FIRMWARE_EFI_EFISTUB_H
#include <linux/compiler.h>
+#include <linux/cleanup.h>
#include <linux/efi.h>
#include <linux/kernel.h>
#include <linux/kern_levels.h>
@@ -122,11 +123,10 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
#define efi_get_handle_num(size) \
((size) / (efi_is_native() ? sizeof(efi_handle_t) : sizeof(u32)))
-#define for_each_efi_handle(handle, array, size, i) \
- for (i = 0; \
- i < efi_get_handle_num(size) && \
- ((handle = efi_get_handle_at((array), i)) || true); \
- i++)
+#define for_each_efi_handle(handle, array, num) \
+ for (int __i = 0; __i < (num) && \
+ ((handle = efi_get_handle_at((array), __i)) || true); \
+ __i++)
static inline
void efi_set_u64_split(u64 data, u32 *lo, u32 *hi)
@@ -171,7 +171,7 @@ void efi_set_u64_split(u64 data, u32 *lo, u32 *hi)
* the EFI memory map. Other related structures, e.g. x86 e820ext, need
* to factor in this headroom requirement as well.
*/
-#define EFI_MMAP_NR_SLACK_SLOTS 8
+#define EFI_MMAP_NR_SLACK_SLOTS 32
typedef struct efi_generic_dev_path efi_device_path_protocol_t;
@@ -314,7 +314,9 @@ union efi_boot_services {
void *close_protocol;
void *open_protocol_information;
void *protocols_per_handle;
- void *locate_handle_buffer;
+ efi_status_t (__efiapi *locate_handle_buffer)(int, efi_guid_t *,
+ void *, unsigned long *,
+ efi_handle_t **);
efi_status_t (__efiapi *locate_protocol)(efi_guid_t *, void *,
void **);
efi_status_t (__efiapi *install_multiple_protocol_interfaces)(efi_handle_t *, ...);
@@ -1053,10 +1055,11 @@ void efi_puts(const char *str);
__printf(1, 2) int efi_printk(char const *fmt, ...);
void efi_free(unsigned long size, unsigned long addr);
+DEFINE_FREE(efi_pool, void *, if (_T) efi_bs_call(free_pool, _T));
void efi_apply_loadoptions_quirk(const void **load_options, u32 *load_options_size);
-char *efi_convert_cmdline(efi_loaded_image_t *image, int *cmd_line_len);
+char *efi_convert_cmdline(efi_loaded_image_t *image);
efi_status_t efi_get_memory_map(struct efi_boot_memmap **map,
bool install_cfg_tbl);
@@ -1082,8 +1085,7 @@ efi_status_t efi_parse_options(char const *cmdline);
void efi_parse_option_graphics(char *option);
-efi_status_t efi_setup_gop(struct screen_info *si, efi_guid_t *proto,
- unsigned long size);
+efi_status_t efi_setup_gop(struct screen_info *si);
efi_status_t handle_cmdline_files(efi_loaded_image_t *image,
const efi_char16_t *optstr,
@@ -1232,4 +1234,7 @@ void process_unaccepted_memory(u64 start, u64 end);
void accept_memory(phys_addr_t start, unsigned long size);
void arch_accept_memory(phys_addr_t start, phys_addr_t end);
+efi_status_t efi_zboot_decompress_init(unsigned long *alloc_size);
+efi_status_t efi_zboot_decompress(u8 *out, unsigned long outlen);
+
#endif
diff --git a/drivers/firmware/efi/libstub/file.c b/drivers/firmware/efi/libstub/file.c
index d6a025df07dc..bd626d55dcbc 100644
--- a/drivers/firmware/efi/libstub/file.c
+++ b/drivers/firmware/efi/libstub/file.c
@@ -175,6 +175,12 @@ static efi_status_t efi_open_device_path(efi_file_protocol_t **volume,
return status;
}
+#ifndef CONFIG_CMDLINE
+#define CONFIG_CMDLINE
+#endif
+
+static const efi_char16_t builtin_cmdline[] = L"" CONFIG_CMDLINE;
+
/*
* Check the cmdline for a LILO-style file= arguments.
*
@@ -189,6 +195,8 @@ efi_status_t handle_cmdline_files(efi_loaded_image_t *image,
unsigned long *load_addr,
unsigned long *load_size)
{
+ const bool ignore_load_options = IS_ENABLED(CONFIG_CMDLINE_OVERRIDE) ||
+ IS_ENABLED(CONFIG_CMDLINE_FORCE);
const efi_char16_t *cmdline = efi_table_attr(image, load_options);
u32 cmdline_len = efi_table_attr(image, load_options_size);
unsigned long efi_chunk_size = ULONG_MAX;
@@ -197,6 +205,7 @@ efi_status_t handle_cmdline_files(efi_loaded_image_t *image,
unsigned long alloc_addr;
unsigned long alloc_size;
efi_status_t status;
+ bool twopass;
int offset;
if (!load_addr || !load_size)
@@ -209,6 +218,16 @@ efi_status_t handle_cmdline_files(efi_loaded_image_t *image,
efi_chunk_size = EFI_READ_CHUNK_SIZE;
alloc_addr = alloc_size = 0;
+
+ if (!ignore_load_options && cmdline_len > 0) {
+ twopass = IS_ENABLED(CONFIG_CMDLINE_BOOL) ||
+ IS_ENABLED(CONFIG_CMDLINE_EXTEND);
+ } else {
+do_builtin: cmdline = builtin_cmdline;
+ cmdline_len = ARRAY_SIZE(builtin_cmdline) - 1;
+ twopass = false;
+ }
+
do {
struct finfo fi;
unsigned long size;
@@ -290,6 +309,9 @@ efi_status_t handle_cmdline_files(efi_loaded_image_t *image,
efi_call_proto(volume, close);
} while (offset > 0);
+ if (twopass)
+ goto do_builtin;
+
*load_addr = alloc_addr;
*load_size = alloc_size;
diff --git a/drivers/firmware/efi/libstub/gop.c b/drivers/firmware/efi/libstub/gop.c
index ea5da307d542..3785fb4986b4 100644
--- a/drivers/firmware/efi/libstub/gop.c
+++ b/drivers/firmware/efi/libstub/gop.c
@@ -133,13 +133,11 @@ void efi_parse_option_graphics(char *option)
static u32 choose_mode_modenum(efi_graphics_output_protocol_t *gop)
{
- efi_status_t status;
-
+ efi_graphics_output_mode_info_t *info __free(efi_pool) = NULL;
efi_graphics_output_protocol_mode_t *mode;
- efi_graphics_output_mode_info_t *info;
unsigned long info_size;
-
u32 max_mode, cur_mode;
+ efi_status_t status;
int pf;
mode = efi_table_attr(gop, mode);
@@ -154,17 +152,13 @@ static u32 choose_mode_modenum(efi_graphics_output_protocol_t *gop)
return cur_mode;
}
- status = efi_call_proto(gop, query_mode, cmdline.mode,
- &info_size, &info);
+ status = efi_call_proto(gop, query_mode, cmdline.mode, &info_size, &info);
if (status != EFI_SUCCESS) {
efi_err("Couldn't get mode information\n");
return cur_mode;
}
pf = info->pixel_format;
-
- efi_bs_call(free_pool, info);
-
if (pf == PIXEL_BLT_ONLY || pf >= PIXEL_FORMAT_MAX) {
efi_err("Invalid PixelFormat\n");
return cur_mode;
@@ -173,6 +167,28 @@ static u32 choose_mode_modenum(efi_graphics_output_protocol_t *gop)
return cmdline.mode;
}
+static u32 choose_mode(efi_graphics_output_protocol_t *gop,
+ bool (*match)(const efi_graphics_output_mode_info_t *, u32, void *),
+ void *ctx)
+{
+ efi_graphics_output_protocol_mode_t *mode = efi_table_attr(gop, mode);
+ u32 max_mode = efi_table_attr(mode, max_mode);
+
+ for (u32 m = 0; m < max_mode; m++) {
+ efi_graphics_output_mode_info_t *info __free(efi_pool) = NULL;
+ unsigned long info_size;
+ efi_status_t status;
+
+ status = efi_call_proto(gop, query_mode, m, &info_size, &info);
+ if (status != EFI_SUCCESS)
+ continue;
+
+ if (match(info, m, ctx))
+ return m;
+ }
+ return (unsigned long)ctx;
+}
+
static u8 pixel_bpp(int pixel_format, efi_pixel_bitmask_t pixel_info)
{
if (pixel_format == PIXEL_BIT_MASK) {
@@ -185,192 +201,117 @@ static u8 pixel_bpp(int pixel_format, efi_pixel_bitmask_t pixel_info)
return 32;
}
-static u32 choose_mode_res(efi_graphics_output_protocol_t *gop)
+static bool match_res(const efi_graphics_output_mode_info_t *info, u32 mode, void *ctx)
{
- efi_status_t status;
+ efi_pixel_bitmask_t pi = info->pixel_information;
+ int pf = info->pixel_format;
- efi_graphics_output_protocol_mode_t *mode;
- efi_graphics_output_mode_info_t *info;
- unsigned long info_size;
-
- u32 max_mode, cur_mode;
- int pf;
- efi_pixel_bitmask_t pi;
- u32 m, w, h;
+ if (pf == PIXEL_BLT_ONLY || pf >= PIXEL_FORMAT_MAX)
+ return false;
- mode = efi_table_attr(gop, mode);
+ return cmdline.res.width == info->horizontal_resolution &&
+ cmdline.res.height == info->vertical_resolution &&
+ (cmdline.res.format < 0 || cmdline.res.format == pf) &&
+ (!cmdline.res.depth || cmdline.res.depth == pixel_bpp(pf, pi));
+}
- cur_mode = efi_table_attr(mode, mode);
- info = efi_table_attr(mode, info);
- pf = info->pixel_format;
- pi = info->pixel_information;
- w = info->horizontal_resolution;
- h = info->vertical_resolution;
+static u32 choose_mode_res(efi_graphics_output_protocol_t *gop)
+{
+ efi_graphics_output_protocol_mode_t *mode = efi_table_attr(gop, mode);
+ unsigned long cur_mode = efi_table_attr(mode, mode);
- if (w == cmdline.res.width && h == cmdline.res.height &&
- (cmdline.res.format < 0 || cmdline.res.format == pf) &&
- (!cmdline.res.depth || cmdline.res.depth == pixel_bpp(pf, pi)))
+ if (match_res(efi_table_attr(mode, info), cur_mode, NULL))
return cur_mode;
- max_mode = efi_table_attr(mode, max_mode);
-
- for (m = 0; m < max_mode; m++) {
- if (m == cur_mode)
- continue;
-
- status = efi_call_proto(gop, query_mode, m,
- &info_size, &info);
- if (status != EFI_SUCCESS)
- continue;
+ return choose_mode(gop, match_res, (void *)cur_mode);
+}
- pf = info->pixel_format;
- pi = info->pixel_information;
- w = info->horizontal_resolution;
- h = info->vertical_resolution;
+struct match {
+ u32 mode;
+ u32 area;
+ u8 depth;
+};
- efi_bs_call(free_pool, info);
+static bool match_auto(const efi_graphics_output_mode_info_t *info, u32 mode, void *ctx)
+{
+ u32 area = info->horizontal_resolution * info->vertical_resolution;
+ efi_pixel_bitmask_t pi = info->pixel_information;
+ int pf = info->pixel_format;
+ u8 depth = pixel_bpp(pf, pi);
+ struct match *m = ctx;
- if (pf == PIXEL_BLT_ONLY || pf >= PIXEL_FORMAT_MAX)
- continue;
- if (w == cmdline.res.width && h == cmdline.res.height &&
- (cmdline.res.format < 0 || cmdline.res.format == pf) &&
- (!cmdline.res.depth || cmdline.res.depth == pixel_bpp(pf, pi)))
- return m;
- }
+ if (pf == PIXEL_BLT_ONLY || pf >= PIXEL_FORMAT_MAX)
+ return false;
- efi_err("Couldn't find requested mode\n");
+ if (area > m->area || (area == m->area && depth > m->depth))
+ *m = (struct match){ mode, area, depth };
- return cur_mode;
+ return false;
}
static u32 choose_mode_auto(efi_graphics_output_protocol_t *gop)
{
- efi_status_t status;
-
- efi_graphics_output_protocol_mode_t *mode;
- efi_graphics_output_mode_info_t *info;
- unsigned long info_size;
-
- u32 max_mode, cur_mode, best_mode, area;
- u8 depth;
- int pf;
- efi_pixel_bitmask_t pi;
- u32 m, w, h, a;
- u8 d;
-
- mode = efi_table_attr(gop, mode);
-
- cur_mode = efi_table_attr(mode, mode);
- max_mode = efi_table_attr(mode, max_mode);
+ struct match match = {};
- info = efi_table_attr(mode, info);
-
- pf = info->pixel_format;
- pi = info->pixel_information;
- w = info->horizontal_resolution;
- h = info->vertical_resolution;
-
- best_mode = cur_mode;
- area = w * h;
- depth = pixel_bpp(pf, pi);
+ choose_mode(gop, match_auto, &match);
- for (m = 0; m < max_mode; m++) {
- if (m == cur_mode)
- continue;
-
- status = efi_call_proto(gop, query_mode, m,
- &info_size, &info);
- if (status != EFI_SUCCESS)
- continue;
+ return match.mode;
+}
- pf = info->pixel_format;
- pi = info->pixel_information;
- w = info->horizontal_resolution;
- h = info->vertical_resolution;
+static bool match_list(const efi_graphics_output_mode_info_t *info, u32 mode, void *ctx)
+{
+ efi_pixel_bitmask_t pi = info->pixel_information;
+ u32 cur_mode = (unsigned long)ctx;
+ int pf = info->pixel_format;
+ const char *dstr;
+ u8 depth = 0;
+ bool valid;
- efi_bs_call(free_pool, info);
+ valid = !(pf == PIXEL_BLT_ONLY || pf >= PIXEL_FORMAT_MAX);
- if (pf == PIXEL_BLT_ONLY || pf >= PIXEL_FORMAT_MAX)
- continue;
- a = w * h;
- if (a < area)
- continue;
- d = pixel_bpp(pf, pi);
- if (a > area || d > depth) {
- best_mode = m;
- area = a;
- depth = d;
- }
+ switch (pf) {
+ case PIXEL_RGB_RESERVED_8BIT_PER_COLOR:
+ dstr = "rgb";
+ break;
+ case PIXEL_BGR_RESERVED_8BIT_PER_COLOR:
+ dstr = "bgr";
+ break;
+ case PIXEL_BIT_MASK:
+ dstr = "";
+ depth = pixel_bpp(pf, pi);
+ break;
+ case PIXEL_BLT_ONLY:
+ dstr = "blt";
+ break;
+ default:
+ dstr = "xxx";
+ break;
}
- return best_mode;
+ efi_printk("Mode %3u %c%c: Resolution %ux%u-%s%.0hhu\n",
+ mode,
+ (mode == cur_mode) ? '*' : ' ',
+ !valid ? '-' : ' ',
+ info->horizontal_resolution,
+ info->vertical_resolution,
+ dstr, depth);
+
+ return false;
}
static u32 choose_mode_list(efi_graphics_output_protocol_t *gop)
{
- efi_status_t status;
-
- efi_graphics_output_protocol_mode_t *mode;
- efi_graphics_output_mode_info_t *info;
- unsigned long info_size;
-
- u32 max_mode, cur_mode;
- int pf;
- efi_pixel_bitmask_t pi;
- u32 m, w, h;
- u8 d;
- const char *dstr;
- bool valid;
+ efi_graphics_output_protocol_mode_t *mode = efi_table_attr(gop, mode);
+ unsigned long cur_mode = efi_table_attr(mode, mode);
+ u32 max_mode = efi_table_attr(mode, max_mode);
efi_input_key_t key;
-
- mode = efi_table_attr(gop, mode);
-
- cur_mode = efi_table_attr(mode, mode);
- max_mode = efi_table_attr(mode, max_mode);
+ efi_status_t status;
efi_printk("Available graphics modes are 0-%u\n", max_mode-1);
efi_puts(" * = current mode\n"
" - = unusable mode\n");
- for (m = 0; m < max_mode; m++) {
- status = efi_call_proto(gop, query_mode, m,
- &info_size, &info);
- if (status != EFI_SUCCESS)
- continue;
- pf = info->pixel_format;
- pi = info->pixel_information;
- w = info->horizontal_resolution;
- h = info->vertical_resolution;
-
- efi_bs_call(free_pool, info);
-
- valid = !(pf == PIXEL_BLT_ONLY || pf >= PIXEL_FORMAT_MAX);
- d = 0;
- switch (pf) {
- case PIXEL_RGB_RESERVED_8BIT_PER_COLOR:
- dstr = "rgb";
- break;
- case PIXEL_BGR_RESERVED_8BIT_PER_COLOR:
- dstr = "bgr";
- break;
- case PIXEL_BIT_MASK:
- dstr = "";
- d = pixel_bpp(pf, pi);
- break;
- case PIXEL_BLT_ONLY:
- dstr = "blt";
- break;
- default:
- dstr = "xxx";
- break;
- }
-
- efi_printk("Mode %3u %c%c: Resolution %ux%u-%s%.0hhu\n",
- m,
- m == cur_mode ? '*' : ' ',
- !valid ? '-' : ' ',
- w, h, dstr, d);
- }
+ choose_mode(gop, match_list, (void *)cur_mode);
efi_puts("\nPress any key to continue (or wait 10 seconds)\n");
status = efi_wait_for_key(10 * EFI_USEC_PER_SEC, &key);
@@ -461,26 +402,25 @@ setup_pixel_info(struct screen_info *si, u32 pixels_per_scan_line,
}
}
-static efi_graphics_output_protocol_t *
-find_gop(efi_guid_t *proto, unsigned long size, void **handles)
+static efi_graphics_output_protocol_t *find_gop(unsigned long num,
+ const efi_handle_t handles[])
{
efi_graphics_output_protocol_t *first_gop;
efi_handle_t h;
- int i;
first_gop = NULL;
- for_each_efi_handle(h, handles, size, i) {
+ for_each_efi_handle(h, handles, num) {
efi_status_t status;
efi_graphics_output_protocol_t *gop;
efi_graphics_output_protocol_mode_t *mode;
efi_graphics_output_mode_info_t *info;
-
- efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID;
void *dummy = NULL;
- status = efi_bs_call(handle_protocol, h, proto, (void **)&gop);
+ status = efi_bs_call(handle_protocol, h,
+ &EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID,
+ (void **)&gop);
if (status != EFI_SUCCESS)
continue;
@@ -500,7 +440,8 @@ find_gop(efi_guid_t *proto, unsigned long size, void **handles)
* Once we've found a GOP supporting ConOut,
* don't bother looking any further.
*/
- status = efi_bs_call(handle_protocol, h, &conout_proto, &dummy);
+ status = efi_bs_call(handle_protocol, h,
+ &EFI_CONSOLE_OUT_DEVICE_GUID, &dummy);
if (status == EFI_SUCCESS)
return gop;
@@ -511,16 +452,22 @@ find_gop(efi_guid_t *proto, unsigned long size, void **handles)
return first_gop;
}
-static efi_status_t setup_gop(struct screen_info *si, efi_guid_t *proto,
- unsigned long size, void **handles)
+efi_status_t efi_setup_gop(struct screen_info *si)
{
- efi_graphics_output_protocol_t *gop;
+ efi_handle_t *handles __free(efi_pool) = NULL;
efi_graphics_output_protocol_mode_t *mode;
efi_graphics_output_mode_info_t *info;
+ efi_graphics_output_protocol_t *gop;
+ efi_status_t status;
+ unsigned long num;
- gop = find_gop(proto, size, handles);
+ status = efi_bs_call(locate_handle_buffer, EFI_LOCATE_BY_PROTOCOL,
+ &EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID, NULL, &num,
+ &handles);
+ if (status != EFI_SUCCESS)
+ return status;
- /* Did we find any GOPs? */
+ gop = find_gop(num, handles);
if (!gop)
return EFI_NOT_FOUND;
@@ -552,29 +499,3 @@ static efi_status_t setup_gop(struct screen_info *si, efi_guid_t *proto,
return EFI_SUCCESS;
}
-
-/*
- * See if we have Graphics Output Protocol
- */
-efi_status_t efi_setup_gop(struct screen_info *si, efi_guid_t *proto,
- unsigned long size)
-{
- efi_status_t status;
- void **gop_handle = NULL;
-
- status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, size,
- (void **)&gop_handle);
- if (status != EFI_SUCCESS)
- return status;
-
- status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, proto, NULL,
- &size, gop_handle);
- if (status != EFI_SUCCESS)
- goto free_handle;
-
- status = setup_gop(si, proto, size, gop_handle);
-
-free_handle:
- efi_bs_call(free_pool, gop_handle);
- return status;
-}
diff --git a/drivers/firmware/efi/libstub/intrinsics.c b/drivers/firmware/efi/libstub/intrinsics.c
index 965e734f6f98..418cd2e6dccc 100644
--- a/drivers/firmware/efi/libstub/intrinsics.c
+++ b/drivers/firmware/efi/libstub/intrinsics.c
@@ -15,8 +15,31 @@ void *__memmove(void *__dest, const void *__src, size_t count) __alias(memmove);
void *__memset(void *s, int c, size_t count) __alias(memset);
#endif
+static void *efistub_memmove(u8 *dst, const u8 *src, size_t len)
+{
+ if (src > dst || dst >= (src + len))
+ for (size_t i = 0; i < len; i++)
+ dst[i] = src[i];
+ else
+ for (ssize_t i = len - 1; i >= 0; i--)
+ dst[i] = src[i];
+
+ return dst;
+}
+
+static void *efistub_memset(void *dst, int c, size_t len)
+{
+ for (u8 *d = dst; len--; d++)
+ *d = c;
+
+ return dst;
+}
+
void *memcpy(void *dst, const void *src, size_t len)
{
+ if (efi_table_attr(efi_system_table, boottime) == NULL)
+ return efistub_memmove(dst, src, len);
+
efi_bs_call(copy_mem, dst, src, len);
return dst;
}
@@ -25,6 +48,9 @@ extern void *memmove(void *dst, const void *src, size_t len) __alias(memcpy);
void *memset(void *dst, int c, size_t len)
{
+ if (efi_table_attr(efi_system_table, boottime) == NULL)
+ return efistub_memset(dst, c, len);
+
efi_bs_call(set_mem, dst, len, c & U8_MAX);
return dst;
}
diff --git a/drivers/firmware/efi/libstub/kaslr.c b/drivers/firmware/efi/libstub/kaslr.c
index 6318c40bda38..4bc963e999eb 100644
--- a/drivers/firmware/efi/libstub/kaslr.c
+++ b/drivers/firmware/efi/libstub/kaslr.c
@@ -57,7 +57,7 @@ u32 efi_kaslr_get_phys_seed(efi_handle_t image_handle)
*/
static bool check_image_region(u64 base, u64 size)
{
- struct efi_boot_memmap *map;
+ struct efi_boot_memmap *map __free(efi_pool) = NULL;
efi_status_t status;
bool ret = false;
int map_offset;
@@ -80,8 +80,6 @@ static bool check_image_region(u64 base, u64 size)
}
}
- efi_bs_call(free_pool, map);
-
return ret;
}
diff --git a/drivers/firmware/efi/libstub/mem.c b/drivers/firmware/efi/libstub/mem.c
index 4f1fa302234d..9c82259eea81 100644
--- a/drivers/firmware/efi/libstub/mem.c
+++ b/drivers/firmware/efi/libstub/mem.c
@@ -20,10 +20,10 @@
efi_status_t efi_get_memory_map(struct efi_boot_memmap **map,
bool install_cfg_tbl)
{
+ struct efi_boot_memmap tmp, *m __free(efi_pool) = NULL;
int memtype = install_cfg_tbl ? EFI_ACPI_RECLAIM_MEMORY
: EFI_LOADER_DATA;
efi_guid_t tbl_guid = LINUX_EFI_BOOT_MEMMAP_GUID;
- struct efi_boot_memmap *m, tmp;
efi_status_t status;
unsigned long size;
@@ -48,24 +48,20 @@ efi_status_t efi_get_memory_map(struct efi_boot_memmap **map,
*/
status = efi_bs_call(install_configuration_table, &tbl_guid, m);
if (status != EFI_SUCCESS)
- goto free_map;
+ return status;
}
m->buff_size = m->map_size = size;
status = efi_bs_call(get_memory_map, &m->map_size, m->map, &m->map_key,
&m->desc_size, &m->desc_ver);
- if (status != EFI_SUCCESS)
- goto uninstall_table;
+ if (status != EFI_SUCCESS) {
+ if (install_cfg_tbl)
+ efi_bs_call(install_configuration_table, &tbl_guid, NULL);
+ return status;
+ }
- *map = m;
+ *map = no_free_ptr(m);
return EFI_SUCCESS;
-
-uninstall_table:
- if (install_cfg_tbl)
- efi_bs_call(install_configuration_table, &tbl_guid, NULL);
-free_map:
- efi_bs_call(free_pool, m);
- return status;
}
/**
diff --git a/drivers/firmware/efi/libstub/pci.c b/drivers/firmware/efi/libstub/pci.c
index 99fb25d2bcf5..1dccf77958d3 100644
--- a/drivers/firmware/efi/libstub/pci.c
+++ b/drivers/firmware/efi/libstub/pci.c
@@ -16,37 +16,20 @@
void efi_pci_disable_bridge_busmaster(void)
{
efi_guid_t pci_proto = EFI_PCI_IO_PROTOCOL_GUID;
- unsigned long pci_handle_size = 0;
- efi_handle_t *pci_handle = NULL;
+ efi_handle_t *pci_handle __free(efi_pool) = NULL;
+ unsigned long pci_handle_num;
efi_handle_t handle;
efi_status_t status;
u16 class, command;
- int i;
- status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, &pci_proto,
- NULL, &pci_handle_size, NULL);
-
- if (status != EFI_BUFFER_TOO_SMALL) {
- if (status != EFI_SUCCESS && status != EFI_NOT_FOUND)
- efi_err("Failed to locate PCI I/O handles'\n");
- return;
- }
-
- status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, pci_handle_size,
- (void **)&pci_handle);
+ status = efi_bs_call(locate_handle_buffer, EFI_LOCATE_BY_PROTOCOL,
+ &pci_proto, NULL, &pci_handle_num, &pci_handle);
if (status != EFI_SUCCESS) {
- efi_err("Failed to allocate memory for 'pci_handle'\n");
+ efi_err("Failed to locate PCI I/O handles\n");
return;
}
- status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, &pci_proto,
- NULL, &pci_handle_size, pci_handle);
- if (status != EFI_SUCCESS) {
- efi_err("Failed to locate PCI I/O handles'\n");
- goto free_handle;
- }
-
- for_each_efi_handle(handle, pci_handle, pci_handle_size, i) {
+ for_each_efi_handle(handle, pci_handle, pci_handle_num) {
efi_pci_io_protocol_t *pci;
unsigned long segment_nr, bus_nr, device_nr, func_nr;
@@ -82,7 +65,7 @@ void efi_pci_disable_bridge_busmaster(void)
efi_bs_call(disconnect_controller, handle, NULL, NULL);
}
- for_each_efi_handle(handle, pci_handle, pci_handle_size, i) {
+ for_each_efi_handle(handle, pci_handle, pci_handle_num) {
efi_pci_io_protocol_t *pci;
status = efi_bs_call(handle_protocol, handle, &pci_proto,
@@ -108,7 +91,4 @@ void efi_pci_disable_bridge_busmaster(void)
if (status != EFI_SUCCESS)
efi_err("Failed to disable PCI busmastering\n");
}
-
-free_handle:
- efi_bs_call(free_pool, pci_handle);
}
diff --git a/drivers/firmware/efi/libstub/printk.c b/drivers/firmware/efi/libstub/printk.c
index 3a67a2cea7bd..bc599212c05d 100644
--- a/drivers/firmware/efi/libstub/printk.c
+++ b/drivers/firmware/efi/libstub/printk.c
@@ -5,13 +5,13 @@
#include <linux/ctype.h>
#include <linux/efi.h>
#include <linux/kernel.h>
-#include <linux/printk.h> /* For CONSOLE_LOGLEVEL_* */
+#include <linux/kern_levels.h>
#include <asm/efi.h>
#include <asm/setup.h>
#include "efistub.h"
-int efi_loglevel = CONSOLE_LOGLEVEL_DEFAULT;
+int efi_loglevel = LOGLEVEL_NOTICE;
/**
* efi_char16_puts() - Write a UCS-2 encoded string to the console
diff --git a/drivers/firmware/efi/libstub/randomalloc.c b/drivers/firmware/efi/libstub/randomalloc.c
index c41e7b2091cd..fd80b2f3233a 100644
--- a/drivers/firmware/efi/libstub/randomalloc.c
+++ b/drivers/firmware/efi/libstub/randomalloc.c
@@ -25,6 +25,9 @@ static unsigned long get_entry_num_slots(efi_memory_desc_t *md,
if (md->type != EFI_CONVENTIONAL_MEMORY)
return 0;
+ if (md->attribute & EFI_MEMORY_HOT_PLUGGABLE)
+ return 0;
+
if (efi_soft_reserve_enabled() &&
(md->attribute & EFI_MEMORY_SP))
return 0;
@@ -59,9 +62,9 @@ efi_status_t efi_random_alloc(unsigned long size,
unsigned long alloc_min,
unsigned long alloc_max)
{
+ struct efi_boot_memmap *map __free(efi_pool) = NULL;
unsigned long total_slots = 0, target_slot;
unsigned long total_mirrored_slots = 0;
- struct efi_boot_memmap *map;
efi_status_t status;
int map_offset;
@@ -72,6 +75,10 @@ efi_status_t efi_random_alloc(unsigned long size,
if (align < EFI_ALLOC_ALIGN)
align = EFI_ALLOC_ALIGN;
+ /* Avoid address 0x0, as it can be mistaken for NULL */
+ if (alloc_min == 0)
+ alloc_min = align;
+
size = round_up(size, EFI_ALLOC_ALIGN);
/* count the suitable slots in each memory map entry */
@@ -130,7 +137,5 @@ efi_status_t efi_random_alloc(unsigned long size,
break;
}
- efi_bs_call(free_pool, map);
-
return status;
}
diff --git a/drivers/firmware/efi/libstub/relocate.c b/drivers/firmware/efi/libstub/relocate.c
index d694bcfa1074..d4264bfb6dc1 100644
--- a/drivers/firmware/efi/libstub/relocate.c
+++ b/drivers/firmware/efi/libstub/relocate.c
@@ -23,14 +23,14 @@
efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align,
unsigned long *addr, unsigned long min)
{
- struct efi_boot_memmap *map;
+ struct efi_boot_memmap *map __free(efi_pool) = NULL;
efi_status_t status;
unsigned long nr_pages;
int i;
status = efi_get_memory_map(&map, false);
if (status != EFI_SUCCESS)
- goto fail;
+ return status;
/*
* Enforce minimum alignment that EFI or Linux requires when
@@ -53,6 +53,9 @@ efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align,
if (desc->type != EFI_CONVENTIONAL_MEMORY)
continue;
+ if (desc->attribute & EFI_MEMORY_HOT_PLUGGABLE)
+ continue;
+
if (efi_soft_reserve_enabled() &&
(desc->attribute & EFI_MEMORY_SP))
continue;
@@ -79,11 +82,9 @@ efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align,
}
if (i == map->map_size / map->desc_size)
- status = EFI_NOT_FOUND;
+ return EFI_NOT_FOUND;
- efi_bs_call(free_pool, map);
-fail:
- return status;
+ return EFI_SUCCESS;
}
/**
diff --git a/drivers/firmware/efi/libstub/tpm.c b/drivers/firmware/efi/libstub/tpm.c
index 1fd6823248ab..a5c6c4f163fc 100644
--- a/drivers/firmware/efi/libstub/tpm.c
+++ b/drivers/firmware/efi/libstub/tpm.c
@@ -57,7 +57,7 @@ static void efi_retrieve_tcg2_eventlog(int version, efi_physical_addr_t log_loca
struct linux_efi_tpm_eventlog *log_tbl = NULL;
unsigned long first_entry_addr, last_entry_addr;
size_t log_size, last_entry_size;
- int final_events_size = 0;
+ u32 final_events_size = 0;
first_entry_addr = (unsigned long) log_location;
@@ -110,9 +110,9 @@ static void efi_retrieve_tcg2_eventlog(int version, efi_physical_addr_t log_loca
*/
if (final_events_table && final_events_table->nr_events) {
struct tcg_pcr_event2_head *header;
- int offset;
+ u32 offset;
void *data;
- int event_size;
+ u32 event_size;
int i = final_events_table->nr_events;
data = (void *)final_events_table;
@@ -124,6 +124,9 @@ static void efi_retrieve_tcg2_eventlog(int version, efi_physical_addr_t log_loca
event_size = __calc_tpm2_event_size(header,
(void *)(long)log_location,
false);
+ /* If calc fails this is a malformed log */
+ if (!event_size)
+ break;
final_events_size += event_size;
i--;
}
diff --git a/drivers/firmware/efi/libstub/x86-5lvl.c b/drivers/firmware/efi/libstub/x86-5lvl.c
index 77359e802181..f1c5fb45d5f7 100644
--- a/drivers/firmware/efi/libstub/x86-5lvl.c
+++ b/drivers/firmware/efi/libstub/x86-5lvl.c
@@ -62,7 +62,7 @@ efi_status_t efi_setup_5level_paging(void)
void efi_5level_switch(void)
{
- bool want_la57 = IS_ENABLED(CONFIG_X86_5LEVEL) && !efi_no5lvl;
+ bool want_la57 = !efi_no5lvl;
bool have_la57 = native_read_cr4() & X86_CR4_LA57;
bool need_toggle = want_la57 ^ have_la57;
u64 *pgt = (void *)la57_toggle + PAGE_SIZE;
diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
index f8e465da344d..761121a77f9e 100644
--- a/drivers/firmware/efi/libstub/x86-stub.c
+++ b/drivers/firmware/efi/libstub/x86-stub.c
@@ -42,7 +42,7 @@ union sev_memory_acceptance_protocol {
static efi_status_t
preserve_pci_rom_image(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom)
{
- struct pci_setup_rom *rom = NULL;
+ struct pci_setup_rom *rom __free(efi_pool) = NULL;
efi_status_t status;
unsigned long size;
uint64_t romsize;
@@ -75,14 +75,13 @@ preserve_pci_rom_image(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom)
rom->data.len = size - sizeof(struct setup_data);
rom->data.next = 0;
rom->pcilen = romsize;
- *__rom = rom;
status = efi_call_proto(pci, pci.read, EfiPciIoWidthUint16,
PCI_VENDOR_ID, 1, &rom->vendor);
if (status != EFI_SUCCESS) {
efi_err("Failed to read rom->vendor\n");
- goto free_struct;
+ return status;
}
status = efi_call_proto(pci, pci.read, EfiPciIoWidthUint16,
@@ -90,21 +89,18 @@ preserve_pci_rom_image(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom)
if (status != EFI_SUCCESS) {
efi_err("Failed to read rom->devid\n");
- goto free_struct;
+ return status;
}
status = efi_call_proto(pci, get_location, &rom->segment, &rom->bus,
&rom->device, &rom->function);
if (status != EFI_SUCCESS)
- goto free_struct;
+ return status;
memcpy(rom->romdata, romimage, romsize);
- return status;
-
-free_struct:
- efi_bs_call(free_pool, rom);
- return status;
+ *__rom = no_free_ptr(rom);
+ return EFI_SUCCESS;
}
/*
@@ -119,38 +115,23 @@ free_struct:
static void setup_efi_pci(struct boot_params *params)
{
efi_status_t status;
- void **pci_handle = NULL;
+ efi_handle_t *pci_handle __free(efi_pool) = NULL;
efi_guid_t pci_proto = EFI_PCI_IO_PROTOCOL_GUID;
- unsigned long size = 0;
struct setup_data *data;
+ unsigned long num;
efi_handle_t h;
- int i;
-
- status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL,
- &pci_proto, NULL, &size, pci_handle);
-
- if (status == EFI_BUFFER_TOO_SMALL) {
- status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, size,
- (void **)&pci_handle);
-
- if (status != EFI_SUCCESS) {
- efi_err("Failed to allocate memory for 'pci_handle'\n");
- return;
- }
-
- status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL,
- &pci_proto, NULL, &size, pci_handle);
- }
+ status = efi_bs_call(locate_handle_buffer, EFI_LOCATE_BY_PROTOCOL,
+ &pci_proto, NULL, &num, &pci_handle);
if (status != EFI_SUCCESS)
- goto free_handle;
+ return;
data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
while (data && data->next)
data = (struct setup_data *)(unsigned long)data->next;
- for_each_efi_handle(h, pci_handle, size, i) {
+ for_each_efi_handle(h, pci_handle, num) {
efi_pci_io_protocol_t *pci = NULL;
struct pci_setup_rom *rom;
@@ -170,9 +151,6 @@ static void setup_efi_pci(struct boot_params *params)
data = (struct setup_data *)rom;
}
-
-free_handle:
- efi_bs_call(free_pool, pci_handle);
}
static void retrieve_apple_device_properties(struct boot_params *boot_params)
@@ -322,7 +300,7 @@ efi_status_t efi_adjust_memory_range_protection(unsigned long start,
return EFI_SUCCESS;
/*
- * Don't modify memory region attributes, they are
+ * Don't modify memory region attributes, if they are
* already suitable, to lower the possibility to
* encounter firmware bugs.
*/
@@ -337,11 +315,13 @@ efi_status_t efi_adjust_memory_range_protection(unsigned long start,
next = desc.base_address + desc.length;
/*
- * Only system memory is suitable for trampoline/kernel image placement,
- * so only this type of memory needs its attributes to be modified.
+ * Only system memory and more reliable memory are suitable for
+ * trampoline/kernel image placement. So only those memory types
+ * may need to have attributes modified.
*/
- if (desc.gcd_memory_type != EfiGcdMemoryTypeSystemMemory ||
+ if ((desc.gcd_memory_type != EfiGcdMemoryTypeSystemMemory &&
+ desc.gcd_memory_type != EfiGcdMemoryTypeMoreReliable) ||
(desc.attributes & (EFI_MEMORY_RO | EFI_MEMORY_XP)) == 0)
continue;
@@ -405,116 +385,13 @@ static void setup_quirks(struct boot_params *boot_params)
}
}
-/*
- * See if we have Universal Graphics Adapter (UGA) protocol
- */
-static efi_status_t
-setup_uga(struct screen_info *si, efi_guid_t *uga_proto, unsigned long size)
-{
- efi_status_t status;
- u32 width, height;
- void **uga_handle = NULL;
- efi_uga_draw_protocol_t *uga = NULL, *first_uga;
- efi_handle_t handle;
- int i;
-
- status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, size,
- (void **)&uga_handle);
- if (status != EFI_SUCCESS)
- return status;
-
- status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL,
- uga_proto, NULL, &size, uga_handle);
- if (status != EFI_SUCCESS)
- goto free_handle;
-
- height = 0;
- width = 0;
-
- first_uga = NULL;
- for_each_efi_handle(handle, uga_handle, size, i) {
- efi_guid_t pciio_proto = EFI_PCI_IO_PROTOCOL_GUID;
- u32 w, h, depth, refresh;
- void *pciio;
-
- status = efi_bs_call(handle_protocol, handle, uga_proto,
- (void **)&uga);
- if (status != EFI_SUCCESS)
- continue;
-
- pciio = NULL;
- efi_bs_call(handle_protocol, handle, &pciio_proto, &pciio);
-
- status = efi_call_proto(uga, get_mode, &w, &h, &depth, &refresh);
- if (status == EFI_SUCCESS && (!first_uga || pciio)) {
- width = w;
- height = h;
-
- /*
- * Once we've found a UGA supporting PCIIO,
- * don't bother looking any further.
- */
- if (pciio)
- break;
-
- first_uga = uga;
- }
- }
-
- if (!width && !height)
- goto free_handle;
-
- /* EFI framebuffer */
- si->orig_video_isVGA = VIDEO_TYPE_EFI;
-
- si->lfb_depth = 32;
- si->lfb_width = width;
- si->lfb_height = height;
-
- si->red_size = 8;
- si->red_pos = 16;
- si->green_size = 8;
- si->green_pos = 8;
- si->blue_size = 8;
- si->blue_pos = 0;
- si->rsvd_size = 8;
- si->rsvd_pos = 24;
-
-free_handle:
- efi_bs_call(free_pool, uga_handle);
-
- return status;
-}
-
static void setup_graphics(struct boot_params *boot_params)
{
- efi_guid_t graphics_proto = EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID;
- struct screen_info *si;
- efi_guid_t uga_proto = EFI_UGA_PROTOCOL_GUID;
- efi_status_t status;
- unsigned long size;
- void **gop_handle = NULL;
- void **uga_handle = NULL;
-
- si = &boot_params->screen_info;
- memset(si, 0, sizeof(*si));
-
- size = 0;
- status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL,
- &graphics_proto, NULL, &size, gop_handle);
- if (status == EFI_BUFFER_TOO_SMALL)
- status = efi_setup_gop(si, &graphics_proto, size);
+ struct screen_info *si = memset(&boot_params->screen_info, 0, sizeof(*si));
- if (status != EFI_SUCCESS) {
- size = 0;
- status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL,
- &uga_proto, NULL, &size, uga_handle);
- if (status == EFI_BUFFER_TOO_SMALL)
- setup_uga(si, &uga_proto, size);
- }
+ efi_setup_gop(si);
}
-
static void __noreturn efi_exit(efi_handle_t handle, efi_status_t status)
{
efi_bs_call(exit, handle, status, 0, NULL);
@@ -522,41 +399,30 @@ static void __noreturn efi_exit(efi_handle_t handle, efi_status_t status)
asm("hlt");
}
-void __noreturn efi_stub_entry(efi_handle_t handle,
- efi_system_table_t *sys_table_arg,
- struct boot_params *boot_params);
-
/*
* Because the x86 boot code expects to be passed a boot_params we
* need to create one ourselves (usually the bootloader would create
* one for us).
*/
-efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
- efi_system_table_t *sys_table_arg)
+static efi_status_t efi_allocate_bootparams(efi_handle_t handle,
+ struct boot_params **bp)
{
efi_guid_t proto = LOADED_IMAGE_PROTOCOL_GUID;
struct boot_params *boot_params;
struct setup_header *hdr;
- int options_size = 0;
efi_status_t status;
unsigned long alloc;
char *cmdline_ptr;
- efi_system_table = sys_table_arg;
-
- /* Check if we were booted by the EFI firmware */
- if (efi_system_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
- efi_exit(handle, EFI_INVALID_PARAMETER);
-
status = efi_bs_call(handle_protocol, handle, &proto, (void **)&image);
if (status != EFI_SUCCESS) {
efi_err("Failed to get handle for LOADED_IMAGE_PROTOCOL\n");
- efi_exit(handle, status);
+ return status;
}
status = efi_allocate_pages(PARAM_SIZE, &alloc, ULONG_MAX);
if (status != EFI_SUCCESS)
- efi_exit(handle, status);
+ return status;
boot_params = memset((void *)alloc, 0x0, PARAM_SIZE);
hdr = &boot_params->hdr;
@@ -569,17 +435,17 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
hdr->initrd_addr_max = INT_MAX;
/* Convert unicode cmdline to ascii */
- cmdline_ptr = efi_convert_cmdline(image, &options_size);
+ cmdline_ptr = efi_convert_cmdline(image);
if (!cmdline_ptr) {
efi_free(PARAM_SIZE, alloc);
- efi_exit(handle, EFI_OUT_OF_RESOURCES);
+ return EFI_OUT_OF_RESOURCES;
}
efi_set_u64_split((unsigned long)cmdline_ptr, &hdr->cmd_line_ptr,
&boot_params->ext_cmd_line_ptr);
- efi_stub_entry(handle, sys_table_arg, boot_params);
- /* not reached */
+ *bp = boot_params;
+ return EFI_SUCCESS;
}
static void add_e820ext(struct boot_params *params,
@@ -738,7 +604,7 @@ static efi_status_t allocate_e820(struct boot_params *params,
struct setup_data **e820ext,
u32 *e820ext_size)
{
- struct efi_boot_memmap *map;
+ struct efi_boot_memmap *map __free(efi_pool) = NULL;
efi_status_t status;
__u32 nr_desc;
@@ -752,13 +618,14 @@ static efi_status_t allocate_e820(struct boot_params *params,
EFI_MMAP_NR_SLACK_SLOTS;
status = alloc_e820ext(nr_e820ext, e820ext, e820ext_size);
+ if (status != EFI_SUCCESS)
+ return status;
}
- if (IS_ENABLED(CONFIG_UNACCEPTED_MEMORY) && status == EFI_SUCCESS)
- status = allocate_unaccepted_bitmap(nr_desc, map);
+ if (IS_ENABLED(CONFIG_UNACCEPTED_MEMORY))
+ return allocate_unaccepted_bitmap(nr_desc, map);
- efi_bs_call(free_pool, map);
- return status;
+ return EFI_SUCCESS;
}
struct exit_boot_struct {
@@ -865,13 +732,16 @@ static efi_status_t parse_options(const char *cmdline)
return efi_parse_options(cmdline);
}
-static efi_status_t efi_decompress_kernel(unsigned long *kernel_entry)
+static efi_status_t efi_decompress_kernel(unsigned long *kernel_entry,
+ struct boot_params *boot_params)
{
unsigned long virt_addr = LOAD_PHYSICAL_ADDR;
unsigned long addr, alloc_size, entry;
efi_status_t status;
u32 seed[2] = {};
+ boot_params_ptr = boot_params;
+
/* determine the required size of the allocation */
alloc_size = ALIGN(max_t(unsigned long, output_len, kernel_total_size),
MIN_KERNEL_ALIGN);
@@ -902,7 +772,7 @@ static efi_status_t efi_decompress_kernel(unsigned long *kernel_entry)
seed[0] = 0;
}
- boot_params_ptr->hdr.loadflags |= KASLR_FLAG;
+ boot_params->hdr.loadflags |= KASLR_FLAG;
}
status = efi_random_alloc(alloc_size, CONFIG_PHYSICAL_ALIGN, &addr,
@@ -920,7 +790,9 @@ static efi_status_t efi_decompress_kernel(unsigned long *kernel_entry)
*kernel_entry = addr + entry;
- return efi_adjust_memory_range_protection(addr, kernel_text_size);
+ return efi_adjust_memory_range_protection(addr, kernel_text_size) ?:
+ efi_adjust_memory_range_protection(addr + kernel_inittext_offset,
+ kernel_inittext_size);
}
static void __noreturn enter_kernel(unsigned long kernel_addr,
@@ -940,20 +812,27 @@ static void __noreturn enter_kernel(unsigned long kernel_addr,
void __noreturn efi_stub_entry(efi_handle_t handle,
efi_system_table_t *sys_table_arg,
struct boot_params *boot_params)
+
{
efi_guid_t guid = EFI_MEMORY_ATTRIBUTE_PROTOCOL_GUID;
- struct setup_header *hdr = &boot_params->hdr;
const struct linux_efi_initrd *initrd = NULL;
unsigned long kernel_entry;
+ struct setup_header *hdr;
efi_status_t status;
- boot_params_ptr = boot_params;
-
efi_system_table = sys_table_arg;
/* Check if we were booted by the EFI firmware */
if (efi_system_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
efi_exit(handle, EFI_INVALID_PARAMETER);
+ if (!IS_ENABLED(CONFIG_EFI_HANDOVER_PROTOCOL) || !boot_params) {
+ status = efi_allocate_bootparams(handle, &boot_params);
+ if (status != EFI_SUCCESS)
+ efi_exit(handle, status);
+ }
+
+ hdr = &boot_params->hdr;
+
if (have_unsupported_snp_features())
efi_exit(handle, EFI_UNSUPPORTED);
@@ -995,7 +874,7 @@ void __noreturn efi_stub_entry(efi_handle_t handle,
if (efi_mem_encrypt > 0)
hdr->xloadflags |= XLF_MEM_ENCRYPTION;
- status = efi_decompress_kernel(&kernel_entry);
+ status = efi_decompress_kernel(&kernel_entry, boot_params);
if (status != EFI_SUCCESS) {
efi_err("Failed to decompress kernel\n");
goto fail;
@@ -1065,6 +944,12 @@ fail:
efi_exit(handle, status);
}
+efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
+ efi_system_table_t *sys_table_arg)
+{
+ efi_stub_entry(handle, sys_table_arg, NULL);
+}
+
#ifdef CONFIG_EFI_HANDOVER_PROTOCOL
void efi_handover_entry(efi_handle_t handle, efi_system_table_t *sys_table_arg,
struct boot_params *boot_params)
diff --git a/drivers/firmware/efi/libstub/zboot-decompress-gzip.c b/drivers/firmware/efi/libstub/zboot-decompress-gzip.c
new file mode 100644
index 000000000000..e97a7e9d3c98
--- /dev/null
+++ b/drivers/firmware/efi/libstub/zboot-decompress-gzip.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/efi.h>
+#include <linux/zlib.h>
+
+#include <asm/efi.h>
+
+#include "efistub.h"
+
+#include "inftrees.c"
+#include "inffast.c"
+#include "inflate.c"
+
+extern unsigned char _gzdata_start[], _gzdata_end[];
+extern u32 __aligned(1) payload_size;
+
+static struct z_stream_s stream;
+
+efi_status_t efi_zboot_decompress_init(unsigned long *alloc_size)
+{
+ efi_status_t status;
+ int rc;
+
+ /* skip the 10 byte header, assume no recorded filename */
+ stream.next_in = _gzdata_start + 10;
+ stream.avail_in = _gzdata_end - stream.next_in;
+
+ status = efi_allocate_pages(zlib_inflate_workspacesize(),
+ (unsigned long *)&stream.workspace,
+ ULONG_MAX);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ rc = zlib_inflateInit2(&stream, -MAX_WBITS);
+ if (rc != Z_OK) {
+ efi_err("failed to initialize GZIP decompressor: %d\n", rc);
+ status = EFI_LOAD_ERROR;
+ goto out;
+ }
+
+ *alloc_size = payload_size;
+ return EFI_SUCCESS;
+out:
+ efi_free(zlib_inflate_workspacesize(), (unsigned long)stream.workspace);
+ return status;
+}
+
+efi_status_t efi_zboot_decompress(u8 *out, unsigned long outlen)
+{
+ int rc;
+
+ stream.next_out = out;
+ stream.avail_out = outlen;
+
+ rc = zlib_inflate(&stream, 0);
+ zlib_inflateEnd(&stream);
+
+ efi_free(zlib_inflate_workspacesize(), (unsigned long)stream.workspace);
+
+ if (rc != Z_STREAM_END) {
+ efi_err("GZIP decompression failed with status %d\n", rc);
+ return EFI_LOAD_ERROR;
+ }
+
+ efi_cache_sync_image((unsigned long)out, outlen);
+
+ return EFI_SUCCESS;
+}
diff --git a/drivers/firmware/efi/libstub/zboot-decompress-zstd.c b/drivers/firmware/efi/libstub/zboot-decompress-zstd.c
new file mode 100644
index 000000000000..bde9d94dd2e3
--- /dev/null
+++ b/drivers/firmware/efi/libstub/zboot-decompress-zstd.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/efi.h>
+#include <linux/zstd.h>
+
+#include <asm/efi.h>
+
+#include "decompress_sources.h"
+#include "efistub.h"
+
+extern unsigned char _gzdata_start[], _gzdata_end[];
+extern u32 __aligned(1) payload_size;
+
+static size_t wksp_size;
+static void *wksp;
+
+efi_status_t efi_zboot_decompress_init(unsigned long *alloc_size)
+{
+ efi_status_t status;
+
+ wksp_size = zstd_dctx_workspace_bound();
+ status = efi_allocate_pages(wksp_size, (unsigned long *)&wksp, ULONG_MAX);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ *alloc_size = payload_size;
+ return EFI_SUCCESS;
+}
+
+efi_status_t efi_zboot_decompress(u8 *out, unsigned long outlen)
+{
+ zstd_dctx *dctx = zstd_init_dctx(wksp, wksp_size);
+ size_t ret;
+ int retval;
+
+ ret = zstd_decompress_dctx(dctx, out, outlen, _gzdata_start,
+ _gzdata_end - _gzdata_start - 4);
+ efi_free(wksp_size, (unsigned long)wksp);
+
+ retval = zstd_get_error_code(ret);
+ if (retval) {
+ efi_err("ZSTD-decompression failed with status %d\n", retval);
+ return EFI_LOAD_ERROR;
+ }
+
+ efi_cache_sync_image((unsigned long)out, outlen);
+
+ return EFI_SUCCESS;
+}
diff --git a/drivers/firmware/efi/libstub/zboot-header.S b/drivers/firmware/efi/libstub/zboot-header.S
index fb676ded47fa..b6431edd0fc9 100644
--- a/drivers/firmware/efi/libstub/zboot-header.S
+++ b/drivers/firmware/efi/libstub/zboot-header.S
@@ -4,17 +4,17 @@
#ifdef CONFIG_64BIT
.set .Lextra_characteristics, 0x0
- .set .Lpe_opt_magic, PE_OPT_MAGIC_PE32PLUS
+ .set .Lpe_opt_magic, IMAGE_NT_OPTIONAL_HDR64_MAGIC
#else
.set .Lextra_characteristics, IMAGE_FILE_32BIT_MACHINE
- .set .Lpe_opt_magic, PE_OPT_MAGIC_PE32
+ .set .Lpe_opt_magic, IMAGE_NT_OPTIONAL_HDR32_MAGIC
#endif
.section ".head", "a"
.globl __efistub_efi_zboot_header
__efistub_efi_zboot_header:
.Ldoshdr:
- .long MZ_MAGIC
+ .long IMAGE_DOS_SIGNATURE
.ascii "zimg" // image type
.long __efistub__gzdata_start - .Ldoshdr // payload offset
.long __efistub__gzdata_size - ZBOOT_SIZE_LEN // payload size
@@ -25,7 +25,7 @@ __efistub_efi_zboot_header:
.long .Lpehdr - .Ldoshdr // PE header offset
.Lpehdr:
- .long PE_MAGIC
+ .long IMAGE_NT_SIGNATURE
.short MACHINE_TYPE
.short .Lsection_count
.long 0
@@ -63,7 +63,7 @@ __efistub_efi_zboot_header:
.long .Lefi_header_end - .Ldoshdr
.long 0
.short IMAGE_SUBSYSTEM_EFI_APPLICATION
- .short IMAGE_DLL_CHARACTERISTICS_NX_COMPAT
+ .short IMAGE_DLLCHARACTERISTICS_NX_COMPAT
#ifdef CONFIG_64BIT
.quad 0, 0, 0, 0
#else
@@ -123,11 +123,29 @@ __efistub_efi_zboot_header:
IMAGE_SCN_MEM_READ | \
IMAGE_SCN_MEM_EXECUTE
+#ifdef CONFIG_EFI_SBAT
+ .ascii ".sbat\0\0\0"
+ .long __sbat_size
+ .long _sbat - .Ldoshdr
+ .long __sbat_size
+ .long _sbat - .Ldoshdr
+
+ .long 0, 0
+ .short 0, 0
+ .long IMAGE_SCN_CNT_INITIALIZED_DATA | \
+ IMAGE_SCN_MEM_READ | \
+ IMAGE_SCN_MEM_DISCARDABLE
+
+ .pushsection ".sbat", "a", @progbits
+ .incbin CONFIG_EFI_SBAT_FILE
+ .popsection
+#endif
+
.ascii ".data\0\0\0"
.long __data_size
- .long _etext - .Ldoshdr
+ .long _data - .Ldoshdr
.long __data_rawsize
- .long _etext - .Ldoshdr
+ .long _data - .Ldoshdr
.long 0, 0
.short 0, 0
diff --git a/drivers/firmware/efi/libstub/zboot.c b/drivers/firmware/efi/libstub/zboot.c
index af23b3c50228..c47ace06f010 100644
--- a/drivers/firmware/efi/libstub/zboot.c
+++ b/drivers/firmware/efi/libstub/zboot.c
@@ -7,36 +7,6 @@
#include "efistub.h"
-static unsigned char zboot_heap[SZ_256K] __aligned(64);
-static unsigned long free_mem_ptr, free_mem_end_ptr;
-
-#define STATIC static
-#if defined(CONFIG_KERNEL_GZIP)
-#include "../../../../lib/decompress_inflate.c"
-#elif defined(CONFIG_KERNEL_LZ4)
-#include "../../../../lib/decompress_unlz4.c"
-#elif defined(CONFIG_KERNEL_LZMA)
-#include "../../../../lib/decompress_unlzma.c"
-#elif defined(CONFIG_KERNEL_LZO)
-#include "../../../../lib/decompress_unlzo.c"
-#elif defined(CONFIG_KERNEL_XZ)
-#undef memcpy
-#define memcpy memcpy
-#undef memmove
-#define memmove memmove
-#include "../../../../lib/decompress_unxz.c"
-#elif defined(CONFIG_KERNEL_ZSTD)
-#include "../../../../lib/decompress_unzstd.c"
-#endif
-
-extern char efi_zboot_header[];
-extern char _gzdata_start[], _gzdata_end[];
-
-static void error(char *x)
-{
- efi_err("EFI decompressor: %s\n", x);
-}
-
static unsigned long alloc_preferred_address(unsigned long alloc_size)
{
#ifdef EFI_KIMG_PREFERRED_ADDRESS
@@ -64,22 +34,17 @@ struct screen_info *alloc_screen_info(void)
asmlinkage efi_status_t __efiapi
efi_zboot_entry(efi_handle_t handle, efi_system_table_t *systab)
{
- unsigned long compressed_size = _gzdata_end - _gzdata_start;
+ char *cmdline_ptr __free(efi_pool) = NULL;
unsigned long image_base, alloc_size;
efi_loaded_image_t *image;
efi_status_t status;
- char *cmdline_ptr;
- int ret;
WRITE_ONCE(efi_system_table, systab);
- free_mem_ptr = (unsigned long)&zboot_heap;
- free_mem_end_ptr = free_mem_ptr + sizeof(zboot_heap);
-
status = efi_bs_call(handle_protocol, handle,
&LOADED_IMAGE_PROTOCOL_GUID, (void **)&image);
if (status != EFI_SUCCESS) {
- error("Failed to locate parent's loaded image protocol");
+ efi_err("Failed to locate parent's loaded image protocol\n");
return status;
}
@@ -89,9 +54,9 @@ efi_zboot_entry(efi_handle_t handle, efi_system_table_t *systab)
efi_info("Decompressing Linux Kernel...\n");
- // SizeOfImage from the compressee's PE/COFF header
- alloc_size = round_up(get_unaligned_le32(_gzdata_end - 4),
- EFI_ALLOC_ALIGN);
+ status = efi_zboot_decompress_init(&alloc_size);
+ if (status != EFI_SUCCESS)
+ return status;
// If the architecture has a preferred address for the image,
// try that first.
@@ -122,26 +87,14 @@ efi_zboot_entry(efi_handle_t handle, efi_system_table_t *systab)
seed, EFI_LOADER_CODE, 0, EFI_ALLOC_LIMIT);
if (status != EFI_SUCCESS) {
efi_err("Failed to allocate memory\n");
- goto free_cmdline;
+ return status;
}
}
- // Decompress the payload into the newly allocated buffer.
- ret = __decompress(_gzdata_start, compressed_size, NULL, NULL,
- (void *)image_base, alloc_size, NULL, error);
- if (ret < 0) {
- error("Decompression failed");
- status = EFI_DEVICE_ERROR;
- goto free_image;
- }
-
- efi_cache_sync_image(image_base, alloc_size);
-
- status = efi_stub_common(handle, image, image_base, cmdline_ptr);
+ // Decompress the payload into the newly allocated buffer
+ status = efi_zboot_decompress((void *)image_base, alloc_size) ?:
+ efi_stub_common(handle, image, image_base, cmdline_ptr);
-free_image:
efi_free(alloc_size, image_base);
-free_cmdline:
- efi_bs_call(free_pool, cmdline_ptr);
return status;
}
diff --git a/drivers/firmware/efi/libstub/zboot.lds b/drivers/firmware/efi/libstub/zboot.lds
index af2c82f7bd90..367907eb7d86 100644
--- a/drivers/firmware/efi/libstub/zboot.lds
+++ b/drivers/firmware/efi/libstub/zboot.lds
@@ -17,6 +17,7 @@ SECTIONS
.rodata : ALIGN(8) {
__efistub__gzdata_start = .;
*(.gzdata)
+ __efistub_payload_size = . - 4;
__efistub__gzdata_end = .;
*(.rodata* .init.rodata* .srodata*)
@@ -28,7 +29,15 @@ SECTIONS
. = _etext;
}
+ .sbat : ALIGN(4096) {
+ _sbat = .;
+ *(.sbat)
+ _esbat = ALIGN(4096);
+ . = _esbat;
+ }
+
.data : ALIGN(4096) {
+ _data = .;
*(.data* .init.data*)
_edata = ALIGN(512);
. = _edata;
@@ -49,5 +58,6 @@ SECTIONS
PROVIDE(__efistub__gzdata_size =
ABSOLUTE(__efistub__gzdata_end - __efistub__gzdata_start));
-PROVIDE(__data_rawsize = ABSOLUTE(_edata - _etext));
-PROVIDE(__data_size = ABSOLUTE(_end - _etext));
+PROVIDE(__data_rawsize = ABSOLUTE(_edata - _data));
+PROVIDE(__data_size = ABSOLUTE(_end - _data));
+PROVIDE(__sbat_size = ABSOLUTE(_esbat - _sbat));
diff --git a/drivers/firmware/efi/memattr.c b/drivers/firmware/efi/memattr.c
index 164203429fa7..c38b1a335590 100644
--- a/drivers/firmware/efi/memattr.c
+++ b/drivers/firmware/efi/memattr.c
@@ -22,6 +22,7 @@ unsigned long __ro_after_init efi_mem_attr_table = EFI_INVALID_TABLE_ADDR;
int __init efi_memattr_init(void)
{
efi_memory_attributes_table_t *tbl;
+ unsigned long size;
if (efi_mem_attr_table == EFI_INVALID_TABLE_ADDR)
return 0;
@@ -39,7 +40,22 @@ int __init efi_memattr_init(void)
goto unmap;
}
- tbl_size = sizeof(*tbl) + tbl->num_entries * tbl->desc_size;
+
+ /*
+ * Sanity check: the Memory Attributes Table contains up to 3 entries
+ * for each entry of type EfiRuntimeServicesCode in the EFI memory map.
+ * So if the size of the table exceeds 3x the size of the entire EFI
+ * memory map, there is clearly something wrong, and the table should
+ * just be ignored altogether.
+ */
+ size = tbl->num_entries * tbl->desc_size;
+ if (size > 3 * efi.memmap.nr_map * efi.memmap.desc_size) {
+ pr_warn(FW_BUG "Corrupted EFI Memory Attributes Table detected! (version == %u, desc_size == %u, num_entries == %u)\n",
+ tbl->version, tbl->desc_size, tbl->num_entries);
+ goto unmap;
+ }
+
+ tbl_size = sizeof(*tbl) + size;
memblock_reserve(efi_mem_attr_table, tbl_size);
set_bit(EFI_MEM_ATTR, &efi.flags);
diff --git a/drivers/firmware/efi/memmap.c b/drivers/firmware/efi/memmap.c
index 34109fd86c55..f1c04d7cfd71 100644
--- a/drivers/firmware/efi/memmap.c
+++ b/drivers/firmware/efi/memmap.c
@@ -43,7 +43,8 @@ int __init __efi_memmap_init(struct efi_memory_map_data *data)
map.map = early_memremap(phys_map, data->size);
if (!map.map) {
- pr_err("Could not map the memory map!\n");
+ pr_err("Could not map the memory map! phys_map=%pa, size=0x%lx\n",
+ &phys_map, data->size);
return -ENOMEM;
}
diff --git a/drivers/firmware/efi/mokvar-table.c b/drivers/firmware/efi/mokvar-table.c
index 5ed0602c2f75..aedbbd627706 100644
--- a/drivers/firmware/efi/mokvar-table.c
+++ b/drivers/firmware/efi/mokvar-table.c
@@ -99,14 +99,13 @@ static struct kobject *mokvar_kobj;
*/
void __init efi_mokvar_table_init(void)
{
+ struct efi_mokvar_table_entry __aligned(1) *mokvar_entry, *next_entry;
efi_memory_desc_t md;
void *va = NULL;
unsigned long cur_offset = 0;
unsigned long offset_limit;
- unsigned long map_size = 0;
unsigned long map_size_needed = 0;
unsigned long size;
- struct efi_mokvar_table_entry *mokvar_entry;
int err;
if (!efi_enabled(EFI_MEMMAP))
@@ -134,48 +133,46 @@ void __init efi_mokvar_table_init(void)
*/
err = -EINVAL;
while (cur_offset + sizeof(*mokvar_entry) <= offset_limit) {
- mokvar_entry = va + cur_offset;
- map_size_needed = cur_offset + sizeof(*mokvar_entry);
- if (map_size_needed > map_size) {
- if (va)
- early_memunmap(va, map_size);
- /*
- * Map a little more than the fixed size entry
- * header, anticipating some data. It's safe to
- * do so as long as we stay within current memory
- * descriptor.
- */
- map_size = min(map_size_needed + 2*EFI_PAGE_SIZE,
- offset_limit);
- va = early_memremap(efi.mokvar_table, map_size);
- if (!va) {
- pr_err("Failed to map EFI MOKvar config table pa=0x%lx, size=%lu.\n",
- efi.mokvar_table, map_size);
- return;
- }
- mokvar_entry = va + cur_offset;
+ if (va)
+ early_memunmap(va, sizeof(*mokvar_entry));
+ va = early_memremap(efi.mokvar_table + cur_offset, sizeof(*mokvar_entry));
+ if (!va) {
+ pr_err("Failed to map EFI MOKvar config table pa=0x%lx, size=%zu.\n",
+ efi.mokvar_table + cur_offset, sizeof(*mokvar_entry));
+ return;
}
-
+ mokvar_entry = va;
+next:
/* Check for last sentinel entry */
if (mokvar_entry->name[0] == '\0') {
if (mokvar_entry->data_size != 0)
break;
err = 0;
+ map_size_needed = cur_offset + sizeof(*mokvar_entry);
break;
}
- /* Sanity check that the name is null terminated */
- size = strnlen(mokvar_entry->name,
- sizeof(mokvar_entry->name));
- if (size >= sizeof(mokvar_entry->name))
- break;
+ /* Enforce that the name is NUL terminated */
+ mokvar_entry->name[sizeof(mokvar_entry->name) - 1] = '\0';
/* Advance to the next entry */
- cur_offset = map_size_needed + mokvar_entry->data_size;
+ size = sizeof(*mokvar_entry) + mokvar_entry->data_size;
+ cur_offset += size;
+
+ /*
+ * Don't bother remapping if the current entry header and the
+ * next one end on the same page.
+ */
+ next_entry = (void *)((unsigned long)mokvar_entry + size);
+ if (((((unsigned long)(mokvar_entry + 1) - 1) ^
+ ((unsigned long)(next_entry + 1) - 1)) & PAGE_MASK) == 0) {
+ mokvar_entry = next_entry;
+ goto next;
+ }
}
if (va)
- early_memunmap(va, map_size);
+ early_memunmap(va, sizeof(*mokvar_entry));
if (err) {
pr_err("EFI MOKvar config table is not valid\n");
return;
@@ -266,7 +263,7 @@ struct efi_mokvar_table_entry *efi_mokvar_entry_find(const char *name)
* amount of data in this mokvar config table entry.
*/
static ssize_t efi_mokvar_sysfs_read(struct file *file, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct efi_mokvar_table_entry *mokvar_entry = bin_attr->private;
diff --git a/drivers/firmware/efi/ovmf-debug-log.c b/drivers/firmware/efi/ovmf-debug-log.c
new file mode 100644
index 000000000000..5b2471ffaeed
--- /dev/null
+++ b/drivers/firmware/efi/ovmf-debug-log.c
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/efi.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/kobject.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sysfs.h>
+
+#define OVMF_DEBUG_LOG_MAGIC1 0x3167646d666d766f // "ovmfmdg1"
+#define OVMF_DEBUG_LOG_MAGIC2 0x3267646d666d766f // "ovmfmdg2"
+
+struct ovmf_debug_log_header {
+ u64 magic1;
+ u64 magic2;
+ u64 hdr_size;
+ u64 log_size;
+ u64 lock; // edk2 spinlock
+ u64 head_off;
+ u64 tail_off;
+ u64 truncated;
+ u8 fw_version[128];
+};
+
+static struct ovmf_debug_log_header *hdr;
+static u8 *logbuf;
+static u64 logbufsize;
+
+static ssize_t ovmf_log_read(struct file *filp, struct kobject *kobj,
+ const struct bin_attribute *attr, char *buf,
+ loff_t offset, size_t count)
+{
+ u64 start, end;
+
+ start = hdr->head_off + offset;
+ if (hdr->head_off > hdr->tail_off && start >= hdr->log_size)
+ start -= hdr->log_size;
+
+ end = start + count;
+ if (start > hdr->tail_off) {
+ if (end > hdr->log_size)
+ end = hdr->log_size;
+ } else {
+ if (end > hdr->tail_off)
+ end = hdr->tail_off;
+ }
+
+ if (start > logbufsize || end > logbufsize)
+ return 0;
+ if (start >= end)
+ return 0;
+
+ memcpy(buf, logbuf + start, end - start);
+ return end - start;
+}
+
+static struct bin_attribute ovmf_log_bin_attr = {
+ .attr = {
+ .name = "ovmf_debug_log",
+ .mode = 0444,
+ },
+ .read = ovmf_log_read,
+};
+
+int __init ovmf_log_probe(unsigned long ovmf_debug_log_table)
+{
+ int ret = -EINVAL;
+ u64 size;
+
+ /* map + verify header */
+ hdr = memremap(ovmf_debug_log_table, sizeof(*hdr), MEMREMAP_WB);
+ if (!hdr) {
+ pr_err("OVMF debug log: header map failed\n");
+ return -EINVAL;
+ }
+
+ if (hdr->magic1 != OVMF_DEBUG_LOG_MAGIC1 ||
+ hdr->magic2 != OVMF_DEBUG_LOG_MAGIC2) {
+ printk(KERN_ERR "OVMF debug log: magic mismatch\n");
+ goto err_unmap;
+ }
+
+ size = hdr->hdr_size + hdr->log_size;
+ pr_info("OVMF debug log: firmware version: \"%s\"\n", hdr->fw_version);
+ pr_info("OVMF debug log: buffer size: %lluk\n", size / 1024);
+
+ /* map complete log buffer */
+ memunmap(hdr);
+ hdr = memremap(ovmf_debug_log_table, size, MEMREMAP_WB);
+ if (!hdr) {
+ pr_err("OVMF debug log: buffer map failed\n");
+ return -EINVAL;
+ }
+ logbuf = (void *)hdr + hdr->hdr_size;
+ logbufsize = hdr->log_size;
+
+ ovmf_log_bin_attr.size = size;
+ ret = sysfs_create_bin_file(efi_kobj, &ovmf_log_bin_attr);
+ if (ret != 0) {
+ pr_err("OVMF debug log: sysfs register failed\n");
+ goto err_unmap;
+ }
+
+ return 0;
+
+err_unmap:
+ memunmap(hdr);
+ return ret;
+}
diff --git a/drivers/firmware/efi/rci2-table.c b/drivers/firmware/efi/rci2-table.c
index 4fd45d6f69a4..c1bedd244817 100644
--- a/drivers/firmware/efi/rci2-table.c
+++ b/drivers/firmware/efi/rci2-table.c
@@ -40,7 +40,7 @@ static u8 *rci2_base;
static u32 rci2_table_len;
unsigned long rci2_table_phys __ro_after_init = EFI_INVALID_TABLE_ADDR;
-static BIN_ATTR_SIMPLE_ADMIN_RO(rci2);
+static __ro_after_init BIN_ATTR_SIMPLE_ADMIN_RO(rci2);
static u16 checksum(void)
{
diff --git a/drivers/firmware/efi/stmm/tee_stmm_efi.c b/drivers/firmware/efi/stmm/tee_stmm_efi.c
index f741ca279052..65c0fe1ba275 100644
--- a/drivers/firmware/efi/stmm/tee_stmm_efi.c
+++ b/drivers/firmware/efi/stmm/tee_stmm_efi.c
@@ -143,6 +143,10 @@ static efi_status_t mm_communicate(u8 *comm_buf, size_t payload_size)
return var_hdr->ret_status;
}
+#define COMM_BUF_SIZE(__payload_size) (MM_COMMUNICATE_HEADER_SIZE + \
+ MM_VARIABLE_COMMUNICATE_SIZE + \
+ (__payload_size))
+
/**
* setup_mm_hdr() - Allocate a buffer for StandAloneMM and initialize the
* header data.
@@ -150,11 +154,9 @@ static efi_status_t mm_communicate(u8 *comm_buf, size_t payload_size)
* @dptr: pointer address to store allocated buffer
* @payload_size: payload size
* @func: standAloneMM function number
- * @ret: EFI return code
* Return: pointer to corresponding StandAloneMM function buffer or NULL
*/
-static void *setup_mm_hdr(u8 **dptr, size_t payload_size, size_t func,
- efi_status_t *ret)
+static void *setup_mm_hdr(u8 **dptr, size_t payload_size, size_t func)
{
const efi_guid_t mm_var_guid = EFI_MM_VARIABLE_GUID;
struct efi_mm_communicate_header *mm_hdr;
@@ -169,17 +171,13 @@ static void *setup_mm_hdr(u8 **dptr, size_t payload_size, size_t func,
if (max_buffer_size &&
max_buffer_size < (MM_COMMUNICATE_HEADER_SIZE +
MM_VARIABLE_COMMUNICATE_SIZE + payload_size)) {
- *ret = EFI_INVALID_PARAMETER;
return NULL;
}
- comm_buf = kzalloc(MM_COMMUNICATE_HEADER_SIZE +
- MM_VARIABLE_COMMUNICATE_SIZE + payload_size,
- GFP_KERNEL);
- if (!comm_buf) {
- *ret = EFI_OUT_OF_RESOURCES;
+ comm_buf = alloc_pages_exact(COMM_BUF_SIZE(payload_size),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!comm_buf)
return NULL;
- }
mm_hdr = (struct efi_mm_communicate_header *)comm_buf;
memcpy(&mm_hdr->header_guid, &mm_var_guid, sizeof(mm_hdr->header_guid));
@@ -187,9 +185,7 @@ static void *setup_mm_hdr(u8 **dptr, size_t payload_size, size_t func,
var_hdr = (struct smm_variable_communicate_header *)mm_hdr->data;
var_hdr->function = func;
- if (dptr)
- *dptr = comm_buf;
- *ret = EFI_SUCCESS;
+ *dptr = comm_buf;
return var_hdr->data;
}
@@ -212,10 +208,9 @@ static efi_status_t get_max_payload(size_t *size)
payload_size = sizeof(*var_payload);
var_payload = setup_mm_hdr(&comm_buf, payload_size,
- SMM_VARIABLE_FUNCTION_GET_PAYLOAD_SIZE,
- &ret);
+ SMM_VARIABLE_FUNCTION_GET_PAYLOAD_SIZE);
if (!var_payload)
- return EFI_OUT_OF_RESOURCES;
+ return EFI_DEVICE_ERROR;
ret = mm_communicate(comm_buf, payload_size);
if (ret != EFI_SUCCESS)
@@ -239,7 +234,7 @@ static efi_status_t get_max_payload(size_t *size)
*/
*size -= 2;
out:
- kfree(comm_buf);
+ free_pages_exact(comm_buf, COMM_BUF_SIZE(payload_size));
return ret;
}
@@ -259,9 +254,9 @@ static efi_status_t get_property_int(u16 *name, size_t name_size,
smm_property = setup_mm_hdr(
&comm_buf, payload_size,
- SMM_VARIABLE_FUNCTION_VAR_CHECK_VARIABLE_PROPERTY_GET, &ret);
+ SMM_VARIABLE_FUNCTION_VAR_CHECK_VARIABLE_PROPERTY_GET);
if (!smm_property)
- return EFI_OUT_OF_RESOURCES;
+ return EFI_DEVICE_ERROR;
memcpy(&smm_property->guid, vendor, sizeof(smm_property->guid));
smm_property->name_size = name_size;
@@ -282,7 +277,7 @@ static efi_status_t get_property_int(u16 *name, size_t name_size,
memcpy(var_property, &smm_property->property, sizeof(*var_property));
out:
- kfree(comm_buf);
+ free_pages_exact(comm_buf, COMM_BUF_SIZE(payload_size));
return ret;
}
@@ -315,9 +310,9 @@ static efi_status_t tee_get_variable(u16 *name, efi_guid_t *vendor,
payload_size = MM_VARIABLE_ACCESS_HEADER_SIZE + name_size + tmp_dsize;
var_acc = setup_mm_hdr(&comm_buf, payload_size,
- SMM_VARIABLE_FUNCTION_GET_VARIABLE, &ret);
+ SMM_VARIABLE_FUNCTION_GET_VARIABLE);
if (!var_acc)
- return EFI_OUT_OF_RESOURCES;
+ return EFI_DEVICE_ERROR;
/* Fill in contents */
memcpy(&var_acc->guid, vendor, sizeof(var_acc->guid));
@@ -347,7 +342,7 @@ static efi_status_t tee_get_variable(u16 *name, efi_guid_t *vendor,
memcpy(data, (u8 *)var_acc->name + var_acc->name_size,
var_acc->data_size);
out:
- kfree(comm_buf);
+ free_pages_exact(comm_buf, COMM_BUF_SIZE(payload_size));
return ret;
}
@@ -380,10 +375,9 @@ static efi_status_t tee_get_next_variable(unsigned long *name_size,
payload_size = MM_VARIABLE_GET_NEXT_HEADER_SIZE + out_name_size;
var_getnext = setup_mm_hdr(&comm_buf, payload_size,
- SMM_VARIABLE_FUNCTION_GET_NEXT_VARIABLE_NAME,
- &ret);
+ SMM_VARIABLE_FUNCTION_GET_NEXT_VARIABLE_NAME);
if (!var_getnext)
- return EFI_OUT_OF_RESOURCES;
+ return EFI_DEVICE_ERROR;
/* Fill in contents */
memcpy(&var_getnext->guid, guid, sizeof(var_getnext->guid));
@@ -404,7 +398,7 @@ static efi_status_t tee_get_next_variable(unsigned long *name_size,
memcpy(name, var_getnext->name, var_getnext->name_size);
out:
- kfree(comm_buf);
+ free_pages_exact(comm_buf, COMM_BUF_SIZE(payload_size));
return ret;
}
@@ -437,9 +431,9 @@ static efi_status_t tee_set_variable(efi_char16_t *name, efi_guid_t *vendor,
* the properties, if the allocation fails
*/
var_acc = setup_mm_hdr(&comm_buf, payload_size,
- SMM_VARIABLE_FUNCTION_SET_VARIABLE, &ret);
+ SMM_VARIABLE_FUNCTION_SET_VARIABLE);
if (!var_acc)
- return EFI_OUT_OF_RESOURCES;
+ return EFI_DEVICE_ERROR;
/*
* The API has the ability to override RO flags. If no RO check was
@@ -467,7 +461,7 @@ static efi_status_t tee_set_variable(efi_char16_t *name, efi_guid_t *vendor,
ret = mm_communicate(comm_buf, payload_size);
dev_dbg(pvt_data.dev, "Set Variable %s %d %lx\n", __FILE__, __LINE__, ret);
out:
- kfree(comm_buf);
+ free_pages_exact(comm_buf, COMM_BUF_SIZE(payload_size));
return ret;
}
@@ -492,10 +486,9 @@ static efi_status_t tee_query_variable_info(u32 attributes,
payload_size = sizeof(*mm_query_info);
mm_query_info = setup_mm_hdr(&comm_buf, payload_size,
- SMM_VARIABLE_FUNCTION_QUERY_VARIABLE_INFO,
- &ret);
+ SMM_VARIABLE_FUNCTION_QUERY_VARIABLE_INFO);
if (!mm_query_info)
- return EFI_OUT_OF_RESOURCES;
+ return EFI_DEVICE_ERROR;
mm_query_info->attr = attributes;
ret = mm_communicate(comm_buf, payload_size);
@@ -507,7 +500,7 @@ static efi_status_t tee_query_variable_info(u32 attributes,
*max_variable_size = mm_query_info->max_variable_size;
out:
- kfree(comm_buf);
+ free_pages_exact(comm_buf, COMM_BUF_SIZE(payload_size));
return ret;
}
diff --git a/drivers/firmware/efi/sysfb_efi.c b/drivers/firmware/efi/sysfb_efi.c
index cc807ed35aed..1e509595ac03 100644
--- a/drivers/firmware/efi/sysfb_efi.c
+++ b/drivers/firmware/efi/sysfb_efi.c
@@ -91,6 +91,7 @@ void efifb_setup_from_dmi(struct screen_info *si, const char *opt)
_ret_; \
})
+#ifdef CONFIG_EFI
static int __init efifb_set_system(const struct dmi_system_id *id)
{
struct efifb_dmi_info *info = id->driver_data;
@@ -346,7 +347,6 @@ static const struct fwnode_operations efifb_fwnode_ops = {
.add_links = efifb_add_links,
};
-#ifdef CONFIG_EFI
static struct fwnode_handle efifb_fwnode;
__init void sysfb_apply_efi_quirks(void)
diff --git a/drivers/firmware/efi/test/efi_test.c b/drivers/firmware/efi/test/efi_test.c
index 9e2628728aad..77b5f7ac3e20 100644
--- a/drivers/firmware/efi/test/efi_test.c
+++ b/drivers/firmware/efi/test/efi_test.c
@@ -361,6 +361,10 @@ static long efi_runtime_get_waketime(unsigned long arg)
getwakeuptime.enabled))
return -EFAULT;
+ if (getwakeuptime.pending && put_user(pending,
+ getwakeuptime.pending))
+ return -EFAULT;
+
if (getwakeuptime.time) {
if (copy_to_user(getwakeuptime.time, &efi_time,
sizeof(efi_time_t)))
diff --git a/drivers/firmware/efi/tpm.c b/drivers/firmware/efi/tpm.c
index e8d69bd548f3..cdd431027065 100644
--- a/drivers/firmware/efi/tpm.c
+++ b/drivers/firmware/efi/tpm.c
@@ -19,7 +19,7 @@ EXPORT_SYMBOL(efi_tpm_final_log_size);
static int __init tpm2_calc_event_log_size(void *data, int count, void *size_info)
{
struct tcg_pcr_event2_head *header;
- int event_size, size = 0;
+ u32 event_size, size = 0;
while (count > 0) {
header = data + size;
@@ -40,7 +40,8 @@ int __init efi_tpm_eventlog_init(void)
{
struct linux_efi_tpm_eventlog *log_tbl;
struct efi_tcg2_final_events_table *final_tbl;
- int tbl_size;
+ unsigned int tbl_size;
+ int final_tbl_size;
int ret = 0;
if (efi.tpm_log == EFI_INVALID_TABLE_ADDR) {
@@ -60,7 +61,12 @@ int __init efi_tpm_eventlog_init(void)
}
tbl_size = sizeof(*log_tbl) + log_tbl->size;
- memblock_reserve(efi.tpm_log, tbl_size);
+ if (memblock_reserve(efi.tpm_log, tbl_size)) {
+ pr_err("TPM Event Log memblock reserve fails (0x%lx, 0x%x)\n",
+ efi.tpm_log, tbl_size);
+ ret = -ENOMEM;
+ goto out;
+ }
if (efi.tpm_final_log == EFI_INVALID_TABLE_ADDR) {
pr_info("TPM Final Events table not present\n");
@@ -80,26 +86,26 @@ int __init efi_tpm_eventlog_init(void)
goto out;
}
- tbl_size = 0;
+ final_tbl_size = 0;
if (final_tbl->nr_events != 0) {
void *events = (void *)efi.tpm_final_log
+ sizeof(final_tbl->version)
+ sizeof(final_tbl->nr_events);
- tbl_size = tpm2_calc_event_log_size(events,
- final_tbl->nr_events,
- log_tbl->log);
+ final_tbl_size = tpm2_calc_event_log_size(events,
+ final_tbl->nr_events,
+ log_tbl->log);
}
- if (tbl_size < 0) {
+ if (final_tbl_size < 0) {
pr_err(FW_BUG "Failed to parse event in TPM Final Events Log\n");
ret = -EINVAL;
goto out_calc;
}
memblock_reserve(efi.tpm_final_log,
- tbl_size + sizeof(*final_tbl));
- efi_tpm_final_log_size = tbl_size;
+ final_tbl_size + sizeof(*final_tbl));
+ efi_tpm_final_log_size = final_tbl_size;
out_calc:
early_memunmap(final_tbl, sizeof(*final_tbl));
diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
index 4056ba7f3440..3700e9869767 100644
--- a/drivers/firmware/efi/vars.c
+++ b/drivers/firmware/efi/vars.c
@@ -149,7 +149,7 @@ int efivar_lock(void)
}
return 0;
}
-EXPORT_SYMBOL_NS_GPL(efivar_lock, EFIVAR);
+EXPORT_SYMBOL_NS_GPL(efivar_lock, "EFIVAR");
/*
* efivar_lock() - obtain the efivar lock if it is free
@@ -165,7 +165,7 @@ int efivar_trylock(void)
}
return 0;
}
-EXPORT_SYMBOL_NS_GPL(efivar_trylock, EFIVAR);
+EXPORT_SYMBOL_NS_GPL(efivar_trylock, "EFIVAR");
/*
* efivar_unlock() - release the efivar lock
@@ -174,7 +174,7 @@ void efivar_unlock(void)
{
up(&efivars_lock);
}
-EXPORT_SYMBOL_NS_GPL(efivar_unlock, EFIVAR);
+EXPORT_SYMBOL_NS_GPL(efivar_unlock, "EFIVAR");
/*
* efivar_get_variable() - retrieve a variable identified by name/vendor
@@ -186,7 +186,7 @@ efi_status_t efivar_get_variable(efi_char16_t *name, efi_guid_t *vendor,
{
return __efivars->ops->get_variable(name, vendor, attr, size, data);
}
-EXPORT_SYMBOL_NS_GPL(efivar_get_variable, EFIVAR);
+EXPORT_SYMBOL_NS_GPL(efivar_get_variable, "EFIVAR");
/*
* efivar_get_next_variable() - enumerate the next name/vendor pair
@@ -198,7 +198,7 @@ efi_status_t efivar_get_next_variable(unsigned long *name_size,
{
return __efivars->ops->get_next_variable(name_size, name, vendor);
}
-EXPORT_SYMBOL_NS_GPL(efivar_get_next_variable, EFIVAR);
+EXPORT_SYMBOL_NS_GPL(efivar_get_next_variable, "EFIVAR");
/*
* efivar_set_variable_locked() - set a variable identified by name/vendor
@@ -230,7 +230,7 @@ efi_status_t efivar_set_variable_locked(efi_char16_t *name, efi_guid_t *vendor,
return setvar(name, vendor, attr, data_size, data);
}
-EXPORT_SYMBOL_NS_GPL(efivar_set_variable_locked, EFIVAR);
+EXPORT_SYMBOL_NS_GPL(efivar_set_variable_locked, "EFIVAR");
/*
* efivar_set_variable() - set a variable identified by name/vendor
@@ -252,7 +252,7 @@ efi_status_t efivar_set_variable(efi_char16_t *name, efi_guid_t *vendor,
efivar_unlock();
return status;
}
-EXPORT_SYMBOL_NS_GPL(efivar_set_variable, EFIVAR);
+EXPORT_SYMBOL_NS_GPL(efivar_set_variable, "EFIVAR");
efi_status_t efivar_query_variable_info(u32 attr,
u64 *storage_space,
@@ -264,4 +264,4 @@ efi_status_t efivar_query_variable_info(u32 attr,
return __efivars->ops->query_variable_info(attr, storage_space,
remaining_space, max_variable_size);
}
-EXPORT_SYMBOL_NS_GPL(efivar_query_variable_info, EFIVAR);
+EXPORT_SYMBOL_NS_GPL(efivar_query_variable_info, "EFIVAR");
diff --git a/drivers/firmware/google/cbmem.c b/drivers/firmware/google/cbmem.c
index 66042160b361..54c3b8b05e5d 100644
--- a/drivers/firmware/google/cbmem.c
+++ b/drivers/firmware/google/cbmem.c
@@ -30,7 +30,7 @@ static struct cbmem_entry *to_cbmem_entry(struct kobject *kobj)
}
static ssize_t mem_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf, loff_t pos,
+ const struct bin_attribute *bin_attr, char *buf, loff_t pos,
size_t count)
{
struct cbmem_entry *entry = to_cbmem_entry(kobj);
@@ -40,7 +40,7 @@ static ssize_t mem_read(struct file *filp, struct kobject *kobj,
}
static ssize_t mem_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf, loff_t pos,
+ const struct bin_attribute *bin_attr, char *buf, loff_t pos,
size_t count)
{
struct cbmem_entry *entry = to_cbmem_entry(kobj);
@@ -53,7 +53,7 @@ static ssize_t mem_write(struct file *filp, struct kobject *kobj,
memcpy(entry->mem_file_buf + pos, buf, count);
return count;
}
-static BIN_ATTR_ADMIN_RW(mem, 0);
+static const BIN_ATTR_ADMIN_RW(mem, 0);
static ssize_t address_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -79,7 +79,7 @@ static struct attribute *attrs[] = {
NULL,
};
-static struct bin_attribute *bin_attrs[] = {
+static const struct bin_attribute *const bin_attrs[] = {
&bin_attr_mem,
NULL,
};
diff --git a/drivers/firmware/google/coreboot_table.c b/drivers/firmware/google/coreboot_table.c
index 208652a8087c..882db32e51be 100644
--- a/drivers/firmware/google/coreboot_table.c
+++ b/drivers/firmware/google/coreboot_table.c
@@ -220,7 +220,7 @@ MODULE_DEVICE_TABLE(of, coreboot_of_match);
static struct platform_driver coreboot_table_driver = {
.probe = coreboot_table_probe,
- .remove_new = coreboot_table_remove,
+ .remove = coreboot_table_remove,
.driver = {
.name = "coreboot_table",
.acpi_match_table = ACPI_PTR(cros_coreboot_acpi_match),
diff --git a/drivers/firmware/google/framebuffer-coreboot.c b/drivers/firmware/google/framebuffer-coreboot.c
index daadd71d8ddd..c68c9f56370f 100644
--- a/drivers/firmware/google/framebuffer-coreboot.c
+++ b/drivers/firmware/google/framebuffer-coreboot.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/platform_data/simplefb.h>
#include <linux/platform_device.h>
+#include <linux/sysfb.h>
#include "coreboot_table.h"
@@ -36,6 +37,19 @@ static int framebuffer_probe(struct coreboot_device *dev)
.format = NULL,
};
+ /*
+ * On coreboot systems, the advertised LB_TAG_FRAMEBUFFER entry
+ * in the coreboot table should only be used if the payload did
+ * not pass a framebuffer information to the Linux kernel.
+ *
+ * If the global screen_info data has been filled, the Generic
+ * System Framebuffers (sysfb) will already register a platform
+ * device and pass that screen_info as platform_data to a driver
+ * that can scan-out using the system provided framebuffer.
+ */
+ if (sysfb_handles_screen_info())
+ return -ENODEV;
+
if (!fb->physical_address)
return -ENODEV;
diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
index d304913314e4..0ceccde5a302 100644
--- a/drivers/firmware/google/gsmi.c
+++ b/drivers/firmware/google/gsmi.c
@@ -488,7 +488,7 @@ static const struct efivar_operations efivar_ops = {
#endif /* CONFIG_EFI */
static ssize_t eventlog_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t count)
{
struct gsmi_set_eventlog_param param = {
@@ -528,7 +528,7 @@ static ssize_t eventlog_write(struct file *filp, struct kobject *kobj,
}
-static struct bin_attribute eventlog_bin_attr = {
+static const struct bin_attribute eventlog_bin_attr = {
.attr = {.name = "append_to_eventlog", .mode = 0200},
.write = eventlog_write,
};
@@ -918,7 +918,8 @@ static __init int gsmi_init(void)
gsmi_dev.pdev = platform_device_register_full(&gsmi_dev_info);
if (IS_ERR(gsmi_dev.pdev)) {
printk(KERN_ERR "gsmi: unable to register platform device\n");
- return PTR_ERR(gsmi_dev.pdev);
+ ret = PTR_ERR(gsmi_dev.pdev);
+ goto out_unregister;
}
/* SMI access needs to be serialized */
@@ -1056,10 +1057,11 @@ out_err:
gsmi_buf_free(gsmi_dev.name_buf);
kmem_cache_destroy(gsmi_dev.mem_pool);
platform_device_unregister(gsmi_dev.pdev);
- pr_info("gsmi: failed to load: %d\n", ret);
+out_unregister:
#ifdef CONFIG_PM
platform_driver_unregister(&gsmi_driver_info);
#endif
+ pr_info("gsmi: failed to load: %d\n", ret);
return ret;
}
diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
index b9d99fe1ff0f..6138a1653ec5 100644
--- a/drivers/firmware/google/memconsole.c
+++ b/drivers/firmware/google/memconsole.c
@@ -14,7 +14,7 @@
#include "memconsole.h"
static ssize_t memconsole_read(struct file *filp, struct kobject *kobp,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t pos, size_t count)
{
ssize_t (*memconsole_read_func)(char *, loff_t, size_t);
diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c
index 1749529f63d4..339a3f74b247 100644
--- a/drivers/firmware/google/vpd.c
+++ b/drivers/firmware/google/vpd.c
@@ -56,7 +56,7 @@ static struct vpd_section ro_vpd;
static struct vpd_section rw_vpd;
static ssize_t vpd_attrib_read(struct file *filp, struct kobject *kobp,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t pos, size_t count)
{
struct vpd_attrib_info *info = bin_attr->private;
@@ -156,7 +156,7 @@ static void vpd_section_attrib_destroy(struct vpd_section *sec)
}
static ssize_t vpd_section_read(struct file *filp, struct kobject *kobp,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t pos, size_t count)
{
struct vpd_section *sec = bin_attr->private;
diff --git a/drivers/firmware/imx/Kconfig b/drivers/firmware/imx/Kconfig
index 477d3f32d99a..127ad752acf8 100644
--- a/drivers/firmware/imx/Kconfig
+++ b/drivers/firmware/imx/Kconfig
@@ -23,9 +23,31 @@ config IMX_SCU
This driver manages the IPC interface between host CPU and the
SCU firmware running on M4.
+config IMX_SCMI_CPU_DRV
+ tristate "IMX SCMI CPU Protocol driver"
+ depends on ARCH_MXC || COMPILE_TEST
+ default y if ARCH_MXC
+ help
+ The System Controller Management Interface firmware (SCMI FW) is
+ a low-level system function which runs on a dedicated Cortex-M
+ core that could provide cpu management features.
+
+ This driver can also be built as a module.
+
+config IMX_SCMI_LMM_DRV
+ tristate "IMX SCMI LMM Protocol driver"
+ depends on ARCH_MXC || COMPILE_TEST
+ default y if ARCH_MXC
+ help
+ The System Controller Management Interface firmware (SCMI FW) is
+ a low-level system function which runs on a dedicated Cortex-M
+ core that could provide Logical Machine management features.
+
+ This driver can also be built as a module.
+
config IMX_SCMI_MISC_DRV
tristate "IMX SCMI MISC Protocol driver"
- depends on IMX_SCMI_MISC_EXT || COMPILE_TEST
+ depends on ARCH_MXC || COMPILE_TEST
default y if ARCH_MXC
help
The System Controller Management Interface firmware (SCMI FW) is
diff --git a/drivers/firmware/imx/Makefile b/drivers/firmware/imx/Makefile
index 8d046c341be8..3bbaffa6e347 100644
--- a/drivers/firmware/imx/Makefile
+++ b/drivers/firmware/imx/Makefile
@@ -1,4 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_IMX_DSP) += imx-dsp.o
obj-$(CONFIG_IMX_SCU) += imx-scu.o misc.o imx-scu-irq.o rm.o imx-scu-soc.o
+obj-${CONFIG_IMX_SCMI_CPU_DRV} += sm-cpu.o
obj-${CONFIG_IMX_SCMI_MISC_DRV} += sm-misc.o
+obj-${CONFIG_IMX_SCMI_LMM_DRV} += sm-lmm.o
diff --git a/drivers/firmware/imx/imx-dsp.c b/drivers/firmware/imx/imx-dsp.c
index 01c8ef14eaec..ed79e823157a 100644
--- a/drivers/firmware/imx/imx-dsp.c
+++ b/drivers/firmware/imx/imx-dsp.c
@@ -180,7 +180,7 @@ static struct platform_driver imx_dsp_driver = {
.name = "imx-dsp",
},
.probe = imx_dsp_probe,
- .remove_new = imx_dsp_remove,
+ .remove = imx_dsp_remove,
};
builtin_platform_driver(imx_dsp_driver);
diff --git a/drivers/firmware/imx/imx-scu.c b/drivers/firmware/imx/imx-scu.c
index 1dd4362ef9a3..8c28e25ddc8a 100644
--- a/drivers/firmware/imx/imx-scu.c
+++ b/drivers/firmware/imx/imx-scu.c
@@ -280,6 +280,7 @@ static int imx_scu_probe(struct platform_device *pdev)
return ret;
sc_ipc->fast_ipc = of_device_is_compatible(args.np, "fsl,imx8-mu-scu");
+ of_node_put(args.np);
num_channel = sc_ipc->fast_ipc ? 2 : SCU_MU_CHAN_NUM;
for (i = 0; i < num_channel; i++) {
diff --git a/drivers/firmware/imx/sm-cpu.c b/drivers/firmware/imx/sm-cpu.c
new file mode 100644
index 000000000000..091b014f739f
--- /dev/null
+++ b/drivers/firmware/imx/sm-cpu.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2025 NXP
+ */
+
+#include <linux/firmware/imx/sm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/scmi_protocol.h>
+#include <linux/scmi_imx_protocol.h>
+
+static const struct scmi_imx_cpu_proto_ops *imx_cpu_ops;
+static struct scmi_protocol_handle *ph;
+
+int scmi_imx_cpu_reset_vector_set(u32 cpuid, u64 vector, bool start, bool boot,
+ bool resume)
+{
+ if (!ph)
+ return -EPROBE_DEFER;
+
+ return imx_cpu_ops->cpu_reset_vector_set(ph, cpuid, vector, start,
+ boot, resume);
+}
+EXPORT_SYMBOL(scmi_imx_cpu_reset_vector_set);
+
+int scmi_imx_cpu_start(u32 cpuid, bool start)
+{
+ if (!ph)
+ return -EPROBE_DEFER;
+
+ if (start)
+ return imx_cpu_ops->cpu_start(ph, cpuid, true);
+
+ return imx_cpu_ops->cpu_start(ph, cpuid, false);
+};
+EXPORT_SYMBOL(scmi_imx_cpu_start);
+
+int scmi_imx_cpu_started(u32 cpuid, bool *started)
+{
+ if (!ph)
+ return -EPROBE_DEFER;
+
+ if (!started)
+ return -EINVAL;
+
+ return imx_cpu_ops->cpu_started(ph, cpuid, started);
+};
+EXPORT_SYMBOL(scmi_imx_cpu_started);
+
+static int scmi_imx_cpu_probe(struct scmi_device *sdev)
+{
+ const struct scmi_handle *handle = sdev->handle;
+
+ if (!handle)
+ return -ENODEV;
+
+ if (imx_cpu_ops) {
+ dev_err(&sdev->dev, "sm cpu already initialized\n");
+ return -EEXIST;
+ }
+
+ imx_cpu_ops = handle->devm_protocol_get(sdev, SCMI_PROTOCOL_IMX_CPU, &ph);
+ if (IS_ERR(imx_cpu_ops))
+ return PTR_ERR(imx_cpu_ops);
+
+ return 0;
+}
+
+static const struct scmi_device_id scmi_id_table[] = {
+ { SCMI_PROTOCOL_IMX_CPU, "imx-cpu" },
+ { },
+};
+MODULE_DEVICE_TABLE(scmi, scmi_id_table);
+
+static struct scmi_driver scmi_imx_cpu_driver = {
+ .name = "scmi-imx-cpu",
+ .probe = scmi_imx_cpu_probe,
+ .id_table = scmi_id_table,
+};
+module_scmi_driver(scmi_imx_cpu_driver);
+
+MODULE_AUTHOR("Peng Fan <peng.fan@nxp.com>");
+MODULE_DESCRIPTION("IMX SM CPU driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/imx/sm-lmm.c b/drivers/firmware/imx/sm-lmm.c
new file mode 100644
index 000000000000..6807bf563c03
--- /dev/null
+++ b/drivers/firmware/imx/sm-lmm.c
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2025 NXP
+ */
+
+#include <linux/firmware/imx/sm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/scmi_protocol.h>
+#include <linux/scmi_imx_protocol.h>
+
+static const struct scmi_imx_lmm_proto_ops *imx_lmm_ops;
+static struct scmi_protocol_handle *ph;
+
+int scmi_imx_lmm_info(u32 lmid, struct scmi_imx_lmm_info *info)
+{
+ if (!ph)
+ return -EPROBE_DEFER;
+
+ if (!info)
+ return -EINVAL;
+
+ return imx_lmm_ops->lmm_info(ph, lmid, info);
+};
+EXPORT_SYMBOL(scmi_imx_lmm_info);
+
+int scmi_imx_lmm_reset_vector_set(u32 lmid, u32 cpuid, u32 flags, u64 vector)
+{
+ if (!ph)
+ return -EPROBE_DEFER;
+
+ return imx_lmm_ops->lmm_reset_vector_set(ph, lmid, cpuid, flags, vector);
+}
+EXPORT_SYMBOL(scmi_imx_lmm_reset_vector_set);
+
+int scmi_imx_lmm_operation(u32 lmid, enum scmi_imx_lmm_op op, u32 flags)
+{
+ if (!ph)
+ return -EPROBE_DEFER;
+
+ switch (op) {
+ case SCMI_IMX_LMM_BOOT:
+ return imx_lmm_ops->lmm_power_boot(ph, lmid, true);
+ case SCMI_IMX_LMM_POWER_ON:
+ return imx_lmm_ops->lmm_power_boot(ph, lmid, false);
+ case SCMI_IMX_LMM_SHUTDOWN:
+ return imx_lmm_ops->lmm_shutdown(ph, lmid, flags);
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(scmi_imx_lmm_operation);
+
+static int scmi_imx_lmm_probe(struct scmi_device *sdev)
+{
+ const struct scmi_handle *handle = sdev->handle;
+
+ if (!handle)
+ return -ENODEV;
+
+ if (imx_lmm_ops) {
+ dev_err(&sdev->dev, "lmm already initialized\n");
+ return -EEXIST;
+ }
+
+ imx_lmm_ops = handle->devm_protocol_get(sdev, SCMI_PROTOCOL_IMX_LMM, &ph);
+ if (IS_ERR(imx_lmm_ops))
+ return PTR_ERR(imx_lmm_ops);
+
+ return 0;
+}
+
+static const struct scmi_device_id scmi_id_table[] = {
+ { SCMI_PROTOCOL_IMX_LMM, "imx-lmm" },
+ { },
+};
+MODULE_DEVICE_TABLE(scmi, scmi_id_table);
+
+static struct scmi_driver scmi_imx_lmm_driver = {
+ .name = "scmi-imx-lmm",
+ .probe = scmi_imx_lmm_probe,
+ .id_table = scmi_id_table,
+};
+module_scmi_driver(scmi_imx_lmm_driver);
+
+MODULE_AUTHOR("Peng Fan <peng.fan@nxp.com>");
+MODULE_DESCRIPTION("IMX SM LMM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
index 6e9788324fea..371f24569b3b 100644
--- a/drivers/firmware/iscsi_ibft.c
+++ b/drivers/firmware/iscsi_ibft.c
@@ -310,7 +310,10 @@ static ssize_t ibft_attr_show_nic(void *data, int type, char *buf)
str += sprintf_ipaddr(str, nic->ip_addr);
break;
case ISCSI_BOOT_ETH_SUBNET_MASK:
- val = cpu_to_be32(~((1 << (32-nic->subnet_mask_prefix))-1));
+ if (nic->subnet_mask_prefix > 32)
+ val = cpu_to_be32(~0);
+ else
+ val = cpu_to_be32(~((1 << (32-nic->subnet_mask_prefix))-1));
str += sprintf(str, "%pI4", &val);
break;
case ISCSI_BOOT_ETH_PREFIX_LEN:
diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
index 8e59be3782cb..55b9cfad8a04 100644
--- a/drivers/firmware/memmap.c
+++ b/drivers/firmware/memmap.c
@@ -116,7 +116,7 @@ static void __meminit release_firmware_map_entry(struct kobject *kobj)
kfree(entry);
}
-static struct kobj_type __refdata memmap_ktype = {
+static const struct kobj_type memmap_ktype = {
.release = release_firmware_map_entry,
.sysfs_ops = &memmap_attr_ops,
.default_groups = def_groups,
diff --git a/drivers/firmware/meson/Kconfig b/drivers/firmware/meson/Kconfig
index f2fdd3756648..179f5d46d8dd 100644
--- a/drivers/firmware/meson/Kconfig
+++ b/drivers/firmware/meson/Kconfig
@@ -5,7 +5,7 @@
config MESON_SM
tristate "Amlogic Secure Monitor driver"
depends on ARCH_MESON || COMPILE_TEST
- default y
+ default ARCH_MESON
depends on ARM64_4K_PAGES
help
Say y here to enable the Amlogic secure monitor driver
diff --git a/drivers/firmware/meson/meson_sm.c b/drivers/firmware/meson/meson_sm.c
index f25a9746249b..3ab67aaa9e5d 100644
--- a/drivers/firmware/meson/meson_sm.c
+++ b/drivers/firmware/meson/meson_sm.c
@@ -232,11 +232,16 @@ EXPORT_SYMBOL(meson_sm_call_write);
struct meson_sm_firmware *meson_sm_get(struct device_node *sm_node)
{
struct platform_device *pdev = of_find_device_by_node(sm_node);
+ struct meson_sm_firmware *fw;
if (!pdev)
return NULL;
- return platform_get_drvdata(pdev);
+ fw = platform_get_drvdata(pdev);
+
+ put_device(&pdev->dev);
+
+ return fw;
}
EXPORT_SYMBOL_GPL(meson_sm_get);
diff --git a/drivers/firmware/microchip/mpfs-auto-update.c b/drivers/firmware/microchip/mpfs-auto-update.c
index 9ca5ee58edbd..e194f7acb2a9 100644
--- a/drivers/firmware/microchip/mpfs-auto-update.c
+++ b/drivers/firmware/microchip/mpfs-auto-update.c
@@ -76,14 +76,11 @@
#define AUTO_UPDATE_INFO_SIZE SZ_1M
#define AUTO_UPDATE_BITSTREAM_BASE (AUTO_UPDATE_DIRECTORY_SIZE + AUTO_UPDATE_INFO_SIZE)
-#define AUTO_UPDATE_TIMEOUT_MS 60000
-
struct mpfs_auto_update_priv {
struct mpfs_sys_controller *sys_controller;
struct device *dev;
struct mtd_info *flash;
struct fw_upload *fw_uploader;
- struct completion programming_complete;
size_t size_per_bitstream;
bool cancel_request;
};
@@ -156,19 +153,6 @@ static void mpfs_auto_update_cancel(struct fw_upload *fw_uploader)
static enum fw_upload_err mpfs_auto_update_poll_complete(struct fw_upload *fw_uploader)
{
- struct mpfs_auto_update_priv *priv = fw_uploader->dd_handle;
- int ret;
-
- /*
- * There is no meaningful way to get the status of the programming while
- * it is in progress, so attempting anything other than waiting for it
- * to complete would be misplaced.
- */
- ret = wait_for_completion_timeout(&priv->programming_complete,
- msecs_to_jiffies(AUTO_UPDATE_TIMEOUT_MS));
- if (!ret)
- return FW_UPLOAD_ERR_TIMEOUT;
-
return FW_UPLOAD_ERR_NONE;
}
@@ -349,33 +333,23 @@ static enum fw_upload_err mpfs_auto_update_write(struct fw_upload *fw_uploader,
u32 offset, u32 size, u32 *written)
{
struct mpfs_auto_update_priv *priv = fw_uploader->dd_handle;
- enum fw_upload_err err = FW_UPLOAD_ERR_NONE;
int ret;
- reinit_completion(&priv->programming_complete);
-
ret = mpfs_auto_update_write_bitstream(fw_uploader, data, offset, size, written);
- if (ret) {
- err = FW_UPLOAD_ERR_RW_ERROR;
- goto out;
- }
+ if (ret)
+ return FW_UPLOAD_ERR_RW_ERROR;
- if (priv->cancel_request) {
- err = FW_UPLOAD_ERR_CANCELED;
- goto out;
- }
+ if (priv->cancel_request)
+ return FW_UPLOAD_ERR_CANCELED;
if (mpfs_auto_update_is_bitstream_info(data, size))
- goto out;
+ return FW_UPLOAD_ERR_NONE;
ret = mpfs_auto_update_verify_image(fw_uploader);
if (ret)
- err = FW_UPLOAD_ERR_FW_INVALID;
+ return FW_UPLOAD_ERR_FW_INVALID;
-out:
- complete(&priv->programming_complete);
-
- return err;
+ return FW_UPLOAD_ERR_NONE;
}
static const struct fw_upload_ops mpfs_auto_update_ops = {
@@ -428,10 +402,10 @@ static int mpfs_auto_update_available(struct mpfs_auto_update_priv *priv)
return -EIO;
/*
- * Bit 5 of byte 1 is "UL_Auto Update" & if it is set, Auto Update is
+ * Bit 5 of byte 1 is "UL_IAP" & if it is set, Auto Update is
* not possible.
*/
- if (response_msg[1] & AUTO_UPDATE_FEATURE_ENABLED)
+ if ((((u8 *)response_msg)[1] & AUTO_UPDATE_FEATURE_ENABLED))
return -EPERM;
return 0;
@@ -461,8 +435,6 @@ static int mpfs_auto_update_probe(struct platform_device *pdev)
return dev_err_probe(dev, ret,
"The current bitstream does not support auto-update\n");
- init_completion(&priv->programming_complete);
-
fw_uploader = firmware_upload_register(THIS_MODULE, dev, "mpfs-auto-update",
&mpfs_auto_update_ops, priv);
if (IS_ERR(fw_uploader))
@@ -486,7 +458,7 @@ static struct platform_driver mpfs_auto_update_driver = {
.name = "mpfs-auto-update",
},
.probe = mpfs_auto_update_probe,
- .remove_new = mpfs_auto_update_remove,
+ .remove = mpfs_auto_update_remove,
};
module_platform_driver(mpfs_auto_update_driver);
diff --git a/drivers/firmware/mtk-adsp-ipc.c b/drivers/firmware/mtk-adsp-ipc.c
index a762302978de..2b79371c61c9 100644
--- a/drivers/firmware/mtk-adsp-ipc.c
+++ b/drivers/firmware/mtk-adsp-ipc.c
@@ -95,10 +95,9 @@ static int mtk_adsp_ipc_probe(struct platform_device *pdev)
adsp_chan->idx = i;
adsp_chan->ch = mbox_request_channel_byname(cl, adsp_mbox_ch_names[i]);
if (IS_ERR(adsp_chan->ch)) {
- ret = PTR_ERR(adsp_chan->ch);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to request mbox chan %s ret %d\n",
- adsp_mbox_ch_names[i], ret);
+ ret = dev_err_probe(dev, PTR_ERR(adsp_chan->ch),
+ "Failed to request mbox channel %s\n",
+ adsp_mbox_ch_names[i]);
for (j = 0; j < i; j++) {
adsp_chan = &adsp_ipc->chans[j];
@@ -133,7 +132,7 @@ static struct platform_driver mtk_adsp_ipc_driver = {
.name = "mtk-adsp-ipc",
},
.probe = mtk_adsp_ipc_probe,
- .remove_new = mtk_adsp_ipc_remove,
+ .remove = mtk_adsp_ipc_remove,
};
builtin_platform_driver(mtk_adsp_ipc_driver);
diff --git a/drivers/firmware/psci/psci.c b/drivers/firmware/psci/psci.c
index 2328ca58bba6..38ca190d4a22 100644
--- a/drivers/firmware/psci/psci.c
+++ b/drivers/firmware/psci/psci.c
@@ -78,6 +78,7 @@ struct psci_0_1_function_ids get_psci_0_1_function_ids(void)
static u32 psci_cpu_suspend_feature;
static bool psci_system_reset2_supported;
+static bool psci_system_off2_hibernate_supported;
static inline bool psci_has_ext_power_state(void)
{
@@ -333,6 +334,36 @@ static void psci_sys_poweroff(void)
invoke_psci_fn(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0);
}
+#ifdef CONFIG_HIBERNATION
+static int psci_sys_hibernate(struct sys_off_data *data)
+{
+ /*
+ * If no hibernate type is specified SYSTEM_OFF2 defaults to selecting
+ * HIBERNATE_OFF.
+ *
+ * There are hypervisors in the wild that do not align with the spec and
+ * reject calls that explicitly provide a hibernate type. For
+ * compatibility with these nonstandard implementations, pass 0 as the
+ * type.
+ */
+ if (system_entering_hibernation())
+ invoke_psci_fn(PSCI_FN_NATIVE(1_3, SYSTEM_OFF2), 0, 0, 0);
+ return NOTIFY_DONE;
+}
+
+static int __init psci_hibernate_init(void)
+{
+ if (psci_system_off2_hibernate_supported) {
+ /* Higher priority than EFI shutdown, but only for hibernate */
+ register_sys_off_handler(SYS_OFF_MODE_POWER_OFF,
+ SYS_OFF_PRIO_FIRMWARE + 2,
+ psci_sys_hibernate, NULL);
+ }
+ return 0;
+}
+subsys_initcall(psci_hibernate_init);
+#endif
+
static int psci_features(u32 psci_func_id)
{
return invoke_psci_fn(PSCI_1_0_FN_PSCI_FEATURES,
@@ -364,6 +395,7 @@ static const struct {
PSCI_ID_NATIVE(1_1, SYSTEM_RESET2),
PSCI_ID(1_1, MEM_PROTECT),
PSCI_ID_NATIVE(1_1, MEM_PROTECT_CHECK_RANGE),
+ PSCI_ID_NATIVE(1_3, SYSTEM_OFF2),
};
static int psci_debugfs_read(struct seq_file *s, void *data)
@@ -525,6 +557,18 @@ static void __init psci_init_system_reset2(void)
psci_system_reset2_supported = true;
}
+static void __init psci_init_system_off2(void)
+{
+ int ret;
+
+ ret = psci_features(PSCI_FN_NATIVE(1_3, SYSTEM_OFF2));
+ if (ret < 0)
+ return;
+
+ if (ret & PSCI_1_3_OFF_TYPE_HIBERNATE_OFF)
+ psci_system_off2_hibernate_supported = true;
+}
+
static void __init psci_init_system_suspend(void)
{
int ret;
@@ -655,6 +699,7 @@ static int __init psci_probe(void)
psci_init_cpu_suspend();
psci_init_system_suspend();
psci_init_system_reset2();
+ psci_init_system_off2();
kvm_init_hyp_services();
}
@@ -759,8 +804,10 @@ int __init psci_dt_init(void)
np = of_find_matching_node_and_match(NULL, psci_of_match, &matched_np);
- if (!np || !of_device_is_available(np))
+ if (!np || !of_device_is_available(np)) {
+ of_node_put(np);
return -ENODEV;
+ }
init_fn = (psci_initcall_t)matched_np->data;
ret = init_fn(np);
diff --git a/drivers/firmware/psci/psci_checker.c b/drivers/firmware/psci/psci_checker.c
index 116eb465cdb4..df02a4ec3398 100644
--- a/drivers/firmware/psci/psci_checker.c
+++ b/drivers/firmware/psci/psci_checker.c
@@ -342,8 +342,8 @@ static int suspend_test_thread(void *arg)
* Disable the timer to make sure that the timer will not trigger
* later.
*/
- del_timer(&wakeup_timer);
- destroy_timer_on_stack(&wakeup_timer);
+ timer_delete(&wakeup_timer);
+ timer_destroy_on_stack(&wakeup_timer);
if (atomic_dec_return_relaxed(&nb_active_threads) == 0)
complete(&suspend_threads_done);
diff --git a/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c b/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c
index 447246bd04be..98a463e9774b 100644
--- a/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c
+++ b/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c
@@ -814,15 +814,6 @@ static int qcom_uefisecapp_probe(struct auxiliary_device *aux_dev,
qcuefi->client = container_of(aux_dev, struct qseecom_client, aux_dev);
- auxiliary_set_drvdata(aux_dev, qcuefi);
- status = qcuefi_set_reference(qcuefi);
- if (status)
- return status;
-
- status = efivars_register(&qcuefi->efivars, &qcom_efivar_ops);
- if (status)
- qcuefi_set_reference(NULL);
-
memset(&pool_config, 0, sizeof(pool_config));
pool_config.initial_size = SZ_4K;
pool_config.policy = QCOM_TZMEM_POLICY_MULTIPLIER;
@@ -833,6 +824,15 @@ static int qcom_uefisecapp_probe(struct auxiliary_device *aux_dev,
if (IS_ERR(qcuefi->mempool))
return PTR_ERR(qcuefi->mempool);
+ auxiliary_set_drvdata(aux_dev, qcuefi);
+ status = qcuefi_set_reference(qcuefi);
+ if (status)
+ return status;
+
+ status = efivars_register(&qcuefi->efivars, &qcom_efivar_ops);
+ if (status)
+ qcuefi_set_reference(NULL);
+
return status;
}
diff --git a/drivers/firmware/qcom/qcom_scm-smc.c b/drivers/firmware/qcom/qcom_scm-smc.c
index 2b4c2826f572..574930729ddd 100644
--- a/drivers/firmware/qcom/qcom_scm-smc.c
+++ b/drivers/firmware/qcom/qcom_scm-smc.c
@@ -152,7 +152,6 @@ int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
enum qcom_scm_convention qcom_convention,
struct qcom_scm_res *res, bool atomic)
{
- struct qcom_tzmem_pool *mempool = qcom_scm_get_tzmem_pool();
int arglen = desc->arginfo & 0xf;
int i, ret;
void *args_virt __free(qcom_tzmem) = NULL;
@@ -173,6 +172,11 @@ int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
smc.args[i + SCM_SMC_FIRST_REG_IDX] = desc->args[i];
if (unlikely(arglen > SCM_SMC_N_REG_ARGS)) {
+ struct qcom_tzmem_pool *mempool = qcom_scm_get_tzmem_pool();
+
+ if (!mempool)
+ return -EINVAL;
+
args_virt = qcom_tzmem_alloc(mempool,
SCM_SMC_N_EXT_ARGS * sizeof(u64),
flag);
diff --git a/drivers/firmware/qcom/qcom_scm.c b/drivers/firmware/qcom/qcom_scm.c
index 10986cb11ec0..e777b7cb9b12 100644
--- a/drivers/firmware/qcom/qcom_scm.c
+++ b/drivers/firmware/qcom/qcom_scm.c
@@ -112,6 +112,7 @@ enum qcom_scm_qseecom_tz_cmd_info {
};
#define QSEECOM_MAX_APP_NAME_SIZE 64
+#define SHMBRIDGE_RESULT_NOTSUPP 4
/* Each bit configures cold/warm boot address for one of the 4 CPUs */
static const u8 qcom_scm_cpu_cold_bits[QCOM_SCM_BOOT_MAX_CPUS] = {
@@ -216,6 +217,9 @@ static DEFINE_SPINLOCK(scm_query_lock);
struct qcom_tzmem_pool *qcom_scm_get_tzmem_pool(void)
{
+ if (!qcom_scm_is_available())
+ return NULL;
+
return __scm->mempool;
}
@@ -545,7 +549,7 @@ static void qcom_scm_set_download_mode(u32 dload_mode)
} else if (__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_BOOT,
QCOM_SCM_BOOT_SET_DLOAD_MODE)) {
ret = __qcom_scm_set_dload_mode(__scm->dev, !!dload_mode);
- } else {
+ } else if (dload_mode) {
dev_err(__scm->dev,
"No available mechanism for setting download mode\n");
}
@@ -903,6 +907,32 @@ int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
}
EXPORT_SYMBOL_GPL(qcom_scm_restore_sec_cfg);
+#define QCOM_SCM_CP_APERTURE_CONTEXT_MASK GENMASK(7, 0)
+
+bool qcom_scm_set_gpu_smmu_aperture_is_available(void)
+{
+ return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP,
+ QCOM_SCM_MP_CP_SMMU_APERTURE_ID);
+}
+EXPORT_SYMBOL_GPL(qcom_scm_set_gpu_smmu_aperture_is_available);
+
+int qcom_scm_set_gpu_smmu_aperture(unsigned int context_bank)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_MP,
+ .cmd = QCOM_SCM_MP_CP_SMMU_APERTURE_ID,
+ .arginfo = QCOM_SCM_ARGS(4),
+ .args[0] = 0xffff0000 | FIELD_PREP(QCOM_SCM_CP_APERTURE_CONTEXT_MASK, context_bank),
+ .args[1] = 0xffffffff,
+ .args[2] = 0xffffffff,
+ .args[3] = 0xffffffff,
+ .owner = ARM_SMCCC_OWNER_SIP
+ };
+
+ return qcom_scm_call(__scm->dev, &desc, NULL);
+}
+EXPORT_SYMBOL_GPL(qcom_scm_set_gpu_smmu_aperture);
+
int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size)
{
struct qcom_scm_desc desc = {
@@ -1089,7 +1119,7 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
if (ret) {
dev_err(__scm->dev,
"Assign memory protection call failed %d\n", ret);
- return -EINVAL;
+ return ret;
}
*srcvm = next_vm;
@@ -1252,6 +1282,220 @@ int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size,
}
EXPORT_SYMBOL_GPL(qcom_scm_ice_set_key);
+bool qcom_scm_has_wrapped_key_support(void)
+{
+ return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
+ QCOM_SCM_ES_DERIVE_SW_SECRET) &&
+ __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
+ QCOM_SCM_ES_GENERATE_ICE_KEY) &&
+ __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
+ QCOM_SCM_ES_PREPARE_ICE_KEY) &&
+ __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
+ QCOM_SCM_ES_IMPORT_ICE_KEY);
+}
+EXPORT_SYMBOL_GPL(qcom_scm_has_wrapped_key_support);
+
+/**
+ * qcom_scm_derive_sw_secret() - Derive software secret from wrapped key
+ * @eph_key: an ephemerally-wrapped key
+ * @eph_key_size: size of @eph_key in bytes
+ * @sw_secret: output buffer for the software secret
+ * @sw_secret_size: size of the software secret to derive in bytes
+ *
+ * Derive a software secret from an ephemerally-wrapped key for software crypto
+ * operations. This is done by calling into the secure execution environment,
+ * which then calls into the hardware to unwrap and derive the secret.
+ *
+ * For more information on sw_secret, see the "Hardware-wrapped keys" section of
+ * Documentation/block/inline-encryption.rst.
+ *
+ * Return: 0 on success; -errno on failure.
+ */
+int qcom_scm_derive_sw_secret(const u8 *eph_key, size_t eph_key_size,
+ u8 *sw_secret, size_t sw_secret_size)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_ES,
+ .cmd = QCOM_SCM_ES_DERIVE_SW_SECRET,
+ .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_RW, QCOM_SCM_VAL,
+ QCOM_SCM_RW, QCOM_SCM_VAL),
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ int ret;
+
+ void *eph_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
+ eph_key_size,
+ GFP_KERNEL);
+ if (!eph_key_buf)
+ return -ENOMEM;
+
+ void *sw_secret_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
+ sw_secret_size,
+ GFP_KERNEL);
+ if (!sw_secret_buf)
+ return -ENOMEM;
+
+ memcpy(eph_key_buf, eph_key, eph_key_size);
+ desc.args[0] = qcom_tzmem_to_phys(eph_key_buf);
+ desc.args[1] = eph_key_size;
+ desc.args[2] = qcom_tzmem_to_phys(sw_secret_buf);
+ desc.args[3] = sw_secret_size;
+
+ ret = qcom_scm_call(__scm->dev, &desc, NULL);
+ if (!ret)
+ memcpy(sw_secret, sw_secret_buf, sw_secret_size);
+
+ memzero_explicit(eph_key_buf, eph_key_size);
+ memzero_explicit(sw_secret_buf, sw_secret_size);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(qcom_scm_derive_sw_secret);
+
+/**
+ * qcom_scm_generate_ice_key() - Generate a wrapped key for storage encryption
+ * @lt_key: output buffer for the long-term wrapped key
+ * @lt_key_size: size of @lt_key in bytes. Must be the exact wrapped key size
+ * used by the SoC.
+ *
+ * Generate a key using the built-in HW module in the SoC. The resulting key is
+ * returned wrapped with the platform-specific Key Encryption Key.
+ *
+ * Return: 0 on success; -errno on failure.
+ */
+int qcom_scm_generate_ice_key(u8 *lt_key, size_t lt_key_size)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_ES,
+ .cmd = QCOM_SCM_ES_GENERATE_ICE_KEY,
+ .arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_RW, QCOM_SCM_VAL),
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ int ret;
+
+ void *lt_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
+ lt_key_size,
+ GFP_KERNEL);
+ if (!lt_key_buf)
+ return -ENOMEM;
+
+ desc.args[0] = qcom_tzmem_to_phys(lt_key_buf);
+ desc.args[1] = lt_key_size;
+
+ ret = qcom_scm_call(__scm->dev, &desc, NULL);
+ if (!ret)
+ memcpy(lt_key, lt_key_buf, lt_key_size);
+
+ memzero_explicit(lt_key_buf, lt_key_size);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(qcom_scm_generate_ice_key);
+
+/**
+ * qcom_scm_prepare_ice_key() - Re-wrap a key with the per-boot ephemeral key
+ * @lt_key: a long-term wrapped key
+ * @lt_key_size: size of @lt_key in bytes
+ * @eph_key: output buffer for the ephemerally-wrapped key
+ * @eph_key_size: size of @eph_key in bytes. Must be the exact wrapped key size
+ * used by the SoC.
+ *
+ * Given a long-term wrapped key, re-wrap it with the per-boot ephemeral key for
+ * added protection. The resulting key will only be valid for the current boot.
+ *
+ * Return: 0 on success; -errno on failure.
+ */
+int qcom_scm_prepare_ice_key(const u8 *lt_key, size_t lt_key_size,
+ u8 *eph_key, size_t eph_key_size)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_ES,
+ .cmd = QCOM_SCM_ES_PREPARE_ICE_KEY,
+ .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_RO, QCOM_SCM_VAL,
+ QCOM_SCM_RW, QCOM_SCM_VAL),
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ int ret;
+
+ void *lt_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
+ lt_key_size,
+ GFP_KERNEL);
+ if (!lt_key_buf)
+ return -ENOMEM;
+
+ void *eph_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
+ eph_key_size,
+ GFP_KERNEL);
+ if (!eph_key_buf)
+ return -ENOMEM;
+
+ memcpy(lt_key_buf, lt_key, lt_key_size);
+ desc.args[0] = qcom_tzmem_to_phys(lt_key_buf);
+ desc.args[1] = lt_key_size;
+ desc.args[2] = qcom_tzmem_to_phys(eph_key_buf);
+ desc.args[3] = eph_key_size;
+
+ ret = qcom_scm_call(__scm->dev, &desc, NULL);
+ if (!ret)
+ memcpy(eph_key, eph_key_buf, eph_key_size);
+
+ memzero_explicit(lt_key_buf, lt_key_size);
+ memzero_explicit(eph_key_buf, eph_key_size);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(qcom_scm_prepare_ice_key);
+
+/**
+ * qcom_scm_import_ice_key() - Import key for storage encryption
+ * @raw_key: the raw key to import
+ * @raw_key_size: size of @raw_key in bytes
+ * @lt_key: output buffer for the long-term wrapped key
+ * @lt_key_size: size of @lt_key in bytes. Must be the exact wrapped key size
+ * used by the SoC.
+ *
+ * Import a raw key and return a long-term wrapped key. Uses the SoC's HWKM to
+ * wrap the raw key using the platform-specific Key Encryption Key.
+ *
+ * Return: 0 on success; -errno on failure.
+ */
+int qcom_scm_import_ice_key(const u8 *raw_key, size_t raw_key_size,
+ u8 *lt_key, size_t lt_key_size)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_ES,
+ .cmd = QCOM_SCM_ES_IMPORT_ICE_KEY,
+ .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_RO, QCOM_SCM_VAL,
+ QCOM_SCM_RW, QCOM_SCM_VAL),
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ int ret;
+
+ void *raw_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
+ raw_key_size,
+ GFP_KERNEL);
+ if (!raw_key_buf)
+ return -ENOMEM;
+
+ void *lt_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
+ lt_key_size,
+ GFP_KERNEL);
+ if (!lt_key_buf)
+ return -ENOMEM;
+
+ memcpy(raw_key_buf, raw_key, raw_key_size);
+ desc.args[0] = qcom_tzmem_to_phys(raw_key_buf);
+ desc.args[1] = raw_key_size;
+ desc.args[2] = qcom_tzmem_to_phys(lt_key_buf);
+ desc.args[3] = lt_key_size;
+
+ ret = qcom_scm_call(__scm->dev, &desc, NULL);
+ if (!ret)
+ memcpy(lt_key, lt_key_buf, lt_key_size);
+
+ memzero_explicit(raw_key_buf, raw_key_size);
+ memzero_explicit(lt_key_buf, lt_key_size);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(qcom_scm_import_ice_key);
+
/**
* qcom_scm_hdcp_available() - Check if secure environment supports HDCP.
*
@@ -1359,8 +1603,16 @@ bool qcom_scm_lmh_dcvsh_available(void)
}
EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh_available);
-int qcom_scm_shm_bridge_enable(void)
+/*
+ * This is only supposed to be called once by the TZMem module. It takes the
+ * SCM struct device as argument and uses it to pass the call as at the time
+ * the SHM Bridge is enabled, the SCM is not yet fully set up and doesn't
+ * accept global user calls. Don't try to use the __scm pointer here.
+ */
+int qcom_scm_shm_bridge_enable(struct device *scm_dev)
{
+ int ret;
+
struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_MP,
.cmd = QCOM_SCM_MP_SHM_BRIDGE_ENABLE,
@@ -1369,15 +1621,23 @@ int qcom_scm_shm_bridge_enable(void)
struct qcom_scm_res res;
- if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP,
+ if (!__qcom_scm_is_call_available(scm_dev, QCOM_SCM_SVC_MP,
QCOM_SCM_MP_SHM_BRIDGE_ENABLE))
return -EOPNOTSUPP;
- return qcom_scm_call(__scm->dev, &desc, &res) ?: res.result[0];
+ ret = qcom_scm_call(scm_dev, &desc, &res);
+
+ if (ret)
+ return ret;
+
+ if (res.result[0] == SHMBRIDGE_RESULT_NOTSUPP)
+ return -EOPNOTSUPP;
+
+ return res.result[0];
}
EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_enable);
-int qcom_scm_shm_bridge_create(struct device *dev, u64 pfn_and_ns_perm_flags,
+int qcom_scm_shm_bridge_create(u64 pfn_and_ns_perm_flags,
u64 ipfn_and_s_perm_flags, u64 size_and_flags,
u64 ns_vmids, u64 *handle)
{
@@ -1405,7 +1665,7 @@ int qcom_scm_shm_bridge_create(struct device *dev, u64 pfn_and_ns_perm_flags,
}
EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_create);
-int qcom_scm_shm_bridge_delete(struct device *dev, u64 handle)
+int qcom_scm_shm_bridge_delete(u64 handle)
{
struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_MP,
@@ -1731,14 +1991,30 @@ EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_send);
+ any potential issues with this, only allow validated machines for now.
*/
static const struct of_device_id qcom_scm_qseecom_allowlist[] __maybe_unused = {
+ { .compatible = "asus,vivobook-s15" },
+ { .compatible = "asus,zenbook-a14-ux3407qa" },
+ { .compatible = "asus,zenbook-a14-ux3407ra" },
+ { .compatible = "dell,inspiron-14-plus-7441" },
+ { .compatible = "dell,latitude-7455" },
+ { .compatible = "dell,xps13-9345" },
+ { .compatible = "hp,elitebook-ultra-g1q" },
+ { .compatible = "hp,omnibook-x14" },
+ { .compatible = "huawei,gaokun3" },
{ .compatible = "lenovo,flex-5g" },
+ { .compatible = "lenovo,thinkbook-16" },
{ .compatible = "lenovo,thinkpad-t14s" },
{ .compatible = "lenovo,thinkpad-x13s", },
+ { .compatible = "lenovo,yoga-slim7x" },
+ { .compatible = "microsoft,arcata", },
+ { .compatible = "microsoft,blackrock" },
{ .compatible = "microsoft,romulus13", },
{ .compatible = "microsoft,romulus15", },
+ { .compatible = "qcom,hamoa-iot-evk" },
{ .compatible = "qcom,sc8180x-primus" },
+ { .compatible = "qcom,x1e001de-devkit" },
{ .compatible = "qcom,x1e80100-crd" },
{ .compatible = "qcom,x1e80100-qcp" },
+ { .compatible = "qcom,x1p42100-crd" },
{ }
};
@@ -1822,11 +2098,128 @@ static int qcom_scm_qseecom_init(struct qcom_scm *scm)
#endif /* CONFIG_QCOM_QSEECOM */
/**
+ * qcom_scm_qtee_invoke_smc() - Invoke a QTEE object.
+ * @inbuf: start address of memory area used for inbound buffer.
+ * @inbuf_size: size of the memory area used for inbound buffer.
+ * @outbuf: start address of memory area used for outbound buffer.
+ * @outbuf_size: size of the memory area used for outbound buffer.
+ * @result: result of QTEE object invocation.
+ * @response_type: response type returned by QTEE.
+ *
+ * @response_type determines how the contents of @inbuf and @outbuf
+ * should be processed.
+ *
+ * Return: On success, return 0 or <0 on failure.
+ */
+int qcom_scm_qtee_invoke_smc(phys_addr_t inbuf, size_t inbuf_size,
+ phys_addr_t outbuf, size_t outbuf_size,
+ u64 *result, u64 *response_type)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_SMCINVOKE,
+ .cmd = QCOM_SCM_SMCINVOKE_INVOKE,
+ .owner = ARM_SMCCC_OWNER_TRUSTED_OS,
+ .args[0] = inbuf,
+ .args[1] = inbuf_size,
+ .args[2] = outbuf,
+ .args[3] = outbuf_size,
+ .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_RW, QCOM_SCM_VAL,
+ QCOM_SCM_RW, QCOM_SCM_VAL),
+ };
+ struct qcom_scm_res res;
+ int ret;
+
+ ret = qcom_scm_call(__scm->dev, &desc, &res);
+ if (ret)
+ return ret;
+
+ if (response_type)
+ *response_type = res.result[0];
+
+ if (result)
+ *result = res.result[1];
+
+ return 0;
+}
+EXPORT_SYMBOL(qcom_scm_qtee_invoke_smc);
+
+/**
+ * qcom_scm_qtee_callback_response() - Submit response for callback request.
+ * @buf: start address of memory area used for outbound buffer.
+ * @buf_size: size of the memory area used for outbound buffer.
+ * @result: Result of QTEE object invocation.
+ * @response_type: Response type returned by QTEE.
+ *
+ * @response_type determines how the contents of @buf should be processed.
+ *
+ * Return: On success, return 0 or <0 on failure.
+ */
+int qcom_scm_qtee_callback_response(phys_addr_t buf, size_t buf_size,
+ u64 *result, u64 *response_type)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_SMCINVOKE,
+ .cmd = QCOM_SCM_SMCINVOKE_CB_RSP,
+ .owner = ARM_SMCCC_OWNER_TRUSTED_OS,
+ .args[0] = buf,
+ .args[1] = buf_size,
+ .arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_RW, QCOM_SCM_VAL),
+ };
+ struct qcom_scm_res res;
+ int ret;
+
+ ret = qcom_scm_call(__scm->dev, &desc, &res);
+ if (ret)
+ return ret;
+
+ if (response_type)
+ *response_type = res.result[0];
+
+ if (result)
+ *result = res.result[1];
+
+ return 0;
+}
+EXPORT_SYMBOL(qcom_scm_qtee_callback_response);
+
+static void qcom_scm_qtee_free(void *data)
+{
+ struct platform_device *qtee_dev = data;
+
+ platform_device_unregister(qtee_dev);
+}
+
+static void qcom_scm_qtee_init(struct qcom_scm *scm)
+{
+ struct platform_device *qtee_dev;
+ u64 result, response_type;
+ int ret;
+
+ /*
+ * Probe for smcinvoke support. This will fail due to invalid buffers,
+ * but first, it checks whether the call is supported in QTEE syscall
+ * handler. If it is not supported, -EIO is returned.
+ */
+ ret = qcom_scm_qtee_invoke_smc(0, 0, 0, 0, &result, &response_type);
+ if (ret == -EIO)
+ return;
+
+ /* Setup QTEE interface device. */
+ qtee_dev = platform_device_register_data(scm->dev, "qcomtee",
+ PLATFORM_DEVID_NONE, NULL, 0);
+ if (IS_ERR(qtee_dev))
+ return;
+
+ devm_add_action_or_reset(scm->dev, qcom_scm_qtee_free, qtee_dev);
+}
+
+/**
* qcom_scm_is_available() - Checks if SCM is available
*/
bool qcom_scm_is_available(void)
{
- return !!READ_ONCE(__scm);
+ /* Paired with smp_store_release() in qcom_scm_probe */
+ return !!smp_load_acquire(&__scm);
}
EXPORT_SYMBOL_GPL(qcom_scm_is_available);
@@ -1983,20 +2376,47 @@ static int qcom_scm_probe(struct platform_device *pdev)
if (ret)
return ret;
- /* Let all above stores be available after this */
- smp_store_release(&__scm, scm);
+ ret = of_reserved_mem_device_init(scm->dev);
+ if (ret && ret != -ENODEV)
+ return dev_err_probe(scm->dev, ret,
+ "Failed to setup the reserved memory region for TZ mem\n");
+
+ ret = qcom_tzmem_enable(scm->dev);
+ if (ret)
+ return dev_err_probe(scm->dev, ret,
+ "Failed to enable the TrustZone memory allocator\n");
+
+ memset(&pool_config, 0, sizeof(pool_config));
+ pool_config.initial_size = 0;
+ pool_config.policy = QCOM_TZMEM_POLICY_ON_DEMAND;
+ pool_config.max_size = SZ_256K;
+
+ scm->mempool = devm_qcom_tzmem_pool_new(scm->dev, &pool_config);
+ if (IS_ERR(scm->mempool))
+ return dev_err_probe(scm->dev, PTR_ERR(scm->mempool),
+ "Failed to create the SCM memory pool\n");
irq = platform_get_irq_optional(pdev, 0);
if (irq < 0) {
if (irq != -ENXIO)
return irq;
} else {
- ret = devm_request_threaded_irq(__scm->dev, irq, NULL, qcom_scm_irq_handler,
- IRQF_ONESHOT, "qcom-scm", __scm);
+ ret = devm_request_threaded_irq(scm->dev, irq, NULL, qcom_scm_irq_handler,
+ IRQF_ONESHOT, "qcom-scm", scm);
if (ret < 0)
- return dev_err_probe(scm->dev, ret, "Failed to request qcom-scm irq\n");
+ return dev_err_probe(scm->dev, ret,
+ "Failed to request qcom-scm irq\n");
}
+ /*
+ * Paired with smp_load_acquire() in qcom_scm_is_available().
+ *
+ * This marks the SCM API as ready to accept user calls and can only
+ * be called after the TrustZone memory pool is initialized and the
+ * waitqueue interrupt requested.
+ */
+ smp_store_release(&__scm, scm);
+
__get_convention();
/*
@@ -2012,26 +2432,6 @@ static int qcom_scm_probe(struct platform_device *pdev)
if (of_property_read_bool(pdev->dev.of_node, "qcom,sdi-enabled") || !download_mode)
qcom_scm_disable_sdi();
- ret = of_reserved_mem_device_init(__scm->dev);
- if (ret && ret != -ENODEV)
- return dev_err_probe(__scm->dev, ret,
- "Failed to setup the reserved memory region for TZ mem\n");
-
- ret = qcom_tzmem_enable(__scm->dev);
- if (ret)
- return dev_err_probe(__scm->dev, ret,
- "Failed to enable the TrustZone memory allocator\n");
-
- memset(&pool_config, 0, sizeof(pool_config));
- pool_config.initial_size = 0;
- pool_config.policy = QCOM_TZMEM_POLICY_ON_DEMAND;
- pool_config.max_size = SZ_256K;
-
- __scm->mempool = devm_qcom_tzmem_pool_new(__scm->dev, &pool_config);
- if (IS_ERR(__scm->mempool))
- return dev_err_probe(__scm->dev, PTR_ERR(__scm->mempool),
- "Failed to create the SCM memory pool\n");
-
/*
* Initialize the QSEECOM interface.
*
@@ -2045,6 +2445,9 @@ static int qcom_scm_probe(struct platform_device *pdev)
ret = qcom_scm_qseecom_init(scm);
WARN(ret < 0, "failed to initialize qseecom: %d\n", ret);
+ /* Initialize the QTEE object interface. */
+ qcom_scm_qtee_init(scm);
+
return 0;
}
diff --git a/drivers/firmware/qcom/qcom_scm.h b/drivers/firmware/qcom/qcom_scm.h
index 685b8f59e7a6..a56c8212cc0c 100644
--- a/drivers/firmware/qcom/qcom_scm.h
+++ b/drivers/firmware/qcom/qcom_scm.h
@@ -44,8 +44,11 @@ enum qcom_scm_arg_types {
/**
* struct qcom_scm_desc
+ * @svc: Service identifier
+ * @cmd: Command identifier
* @arginfo: Metadata describing the arguments in args[]
* @args: The array of arguments for the secure syscall
+ * @owner: Owner identifier
*/
struct qcom_scm_desc {
u32 svc;
@@ -80,6 +83,7 @@ int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc,
struct qcom_scm_res *res);
struct qcom_tzmem_pool *qcom_scm_get_tzmem_pool(void);
+int qcom_scm_shm_bridge_enable(struct device *scm_dev);
#define QCOM_SCM_SVC_BOOT 0x01
#define QCOM_SCM_BOOT_SET_ADDR 0x01
@@ -116,6 +120,7 @@ struct qcom_tzmem_pool *qcom_scm_get_tzmem_pool(void);
#define QCOM_SCM_MP_IOMMU_SET_CP_POOL_SIZE 0x05
#define QCOM_SCM_MP_VIDEO_VAR 0x08
#define QCOM_SCM_MP_ASSIGN 0x16
+#define QCOM_SCM_MP_CP_SMMU_APERTURE_ID 0x1b
#define QCOM_SCM_MP_SHM_BRIDGE_ENABLE 0x1c
#define QCOM_SCM_MP_SHM_BRIDGE_DELETE 0x1d
#define QCOM_SCM_MP_SHM_BRIDGE_CREATE 0x1e
@@ -127,6 +132,10 @@ struct qcom_tzmem_pool *qcom_scm_get_tzmem_pool(void);
#define QCOM_SCM_SVC_ES 0x10 /* Enterprise Security */
#define QCOM_SCM_ES_INVALIDATE_ICE_KEY 0x03
#define QCOM_SCM_ES_CONFIG_SET_ICE_KEY 0x04
+#define QCOM_SCM_ES_DERIVE_SW_SECRET 0x07
+#define QCOM_SCM_ES_GENERATE_ICE_KEY 0x08
+#define QCOM_SCM_ES_PREPARE_ICE_KEY 0x09
+#define QCOM_SCM_ES_IMPORT_ICE_KEY 0x0a
#define QCOM_SCM_SVC_HDCP 0x11
#define QCOM_SCM_HDCP_INVOKE 0x01
@@ -147,6 +156,13 @@ struct qcom_tzmem_pool *qcom_scm_get_tzmem_pool(void);
#define QCOM_SCM_SVC_GPU 0x28
#define QCOM_SCM_SVC_GPU_INIT_REGS 0x01
+/* ARM_SMCCC_OWNER_TRUSTED_OS calls */
+
+#define QCOM_SCM_SVC_SMCINVOKE 0x06
+#define QCOM_SCM_SMCINVOKE_INVOKE_LEGACY 0x00
+#define QCOM_SCM_SMCINVOKE_CB_RSP 0x01
+#define QCOM_SCM_SMCINVOKE_INVOKE 0x02
+
/* common error codes */
#define QCOM_SCM_V2_EBUSY -12
#define QCOM_SCM_ENOMEM -5
diff --git a/drivers/firmware/qcom/qcom_tzmem.c b/drivers/firmware/qcom/qcom_tzmem.c
index 92b365178235..9f232e53115e 100644
--- a/drivers/firmware/qcom/qcom_tzmem.c
+++ b/drivers/firmware/qcom/qcom_tzmem.c
@@ -20,6 +20,7 @@
#include <linux/spinlock.h>
#include <linux/types.h>
+#include "qcom_scm.h"
#include "qcom_tzmem.h"
struct qcom_tzmem_area {
@@ -76,9 +77,11 @@ static bool qcom_tzmem_using_shm_bridge;
/* List of machines that are known to not support SHM bridge correctly. */
static const char *const qcom_tzmem_blacklist[] = {
+ "qcom,sc7180", /* hang in rmtfs memory assignment */
"qcom,sc8180x",
"qcom,sdm670", /* failure in GPU firmware loading */
"qcom,sdm845", /* reset in rmtfs memory assignment */
+ "qcom,sm7150", /* reset in rmtfs memory assignment */
"qcom,sm8150", /* reset in rmtfs memory assignment */
NULL
};
@@ -93,7 +96,7 @@ static int qcom_tzmem_init(void)
goto notsupp;
}
- ret = qcom_scm_shm_bridge_enable();
+ ret = qcom_scm_shm_bridge_enable(qcom_tzmem_dev);
if (ret == -EOPNOTSUPP)
goto notsupp;
@@ -107,7 +110,19 @@ notsupp:
return 0;
}
-static int qcom_tzmem_init_area(struct qcom_tzmem_area *area)
+/**
+ * qcom_tzmem_shm_bridge_create() - Create a SHM bridge.
+ * @paddr: Physical address of the memory to share.
+ * @size: Size of the memory to share.
+ * @handle: Handle to the SHM bridge.
+ *
+ * On platforms that support SHM bridge, this function creates a SHM bridge
+ * for the given memory region with QTEE. The handle returned by this function
+ * must be passed to qcom_tzmem_shm_bridge_delete() to free the SHM bridge.
+ *
+ * Return: On success, returns 0; on failure, returns < 0.
+ */
+int qcom_tzmem_shm_bridge_create(phys_addr_t paddr, size_t size, u64 *handle)
{
u64 pfn_and_ns_perm, ipfn_and_s_perm, size_and_flags;
int ret;
@@ -115,17 +130,49 @@ static int qcom_tzmem_init_area(struct qcom_tzmem_area *area)
if (!qcom_tzmem_using_shm_bridge)
return 0;
- pfn_and_ns_perm = (u64)area->paddr | QCOM_SCM_PERM_RW;
- ipfn_and_s_perm = (u64)area->paddr | QCOM_SCM_PERM_RW;
- size_and_flags = area->size | (1 << QCOM_SHM_BRIDGE_NUM_VM_SHIFT);
+ pfn_and_ns_perm = paddr | QCOM_SCM_PERM_RW;
+ ipfn_and_s_perm = paddr | QCOM_SCM_PERM_RW;
+ size_and_flags = size | (1 << QCOM_SHM_BRIDGE_NUM_VM_SHIFT);
+
+ ret = qcom_scm_shm_bridge_create(pfn_and_ns_perm, ipfn_and_s_perm,
+ size_and_flags, QCOM_SCM_VMID_HLOS,
+ handle);
+ if (ret) {
+ dev_err(qcom_tzmem_dev,
+ "SHM Bridge failed: ret %d paddr 0x%pa, size %zu\n",
+ ret, &paddr, size);
+
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qcom_tzmem_shm_bridge_create);
+
+/**
+ * qcom_tzmem_shm_bridge_delete() - Delete a SHM bridge.
+ * @handle: Handle to the SHM bridge.
+ *
+ * On platforms that support SHM bridge, this function deletes the SHM bridge
+ * for the given memory region. The handle must be the same as the one
+ * returned by qcom_tzmem_shm_bridge_create().
+ */
+void qcom_tzmem_shm_bridge_delete(u64 handle)
+{
+ if (qcom_tzmem_using_shm_bridge)
+ qcom_scm_shm_bridge_delete(handle);
+}
+EXPORT_SYMBOL_GPL(qcom_tzmem_shm_bridge_delete);
+
+static int qcom_tzmem_init_area(struct qcom_tzmem_area *area)
+{
+ int ret;
u64 *handle __free(kfree) = kzalloc(sizeof(*handle), GFP_KERNEL);
if (!handle)
return -ENOMEM;
- ret = qcom_scm_shm_bridge_create(qcom_tzmem_dev, pfn_and_ns_perm,
- ipfn_and_s_perm, size_and_flags,
- QCOM_SCM_VMID_HLOS, handle);
+ ret = qcom_tzmem_shm_bridge_create(area->paddr, area->size, handle);
if (ret)
return ret;
@@ -138,10 +185,7 @@ static void qcom_tzmem_cleanup_area(struct qcom_tzmem_area *area)
{
u64 *handle = area->priv;
- if (!qcom_tzmem_using_shm_bridge)
- return;
-
- qcom_scm_shm_bridge_delete(qcom_tzmem_dev, *handle);
+ qcom_tzmem_shm_bridge_delete(*handle);
kfree(handle);
}
diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c
index 85c525745b31..0eebd572f9a5 100644
--- a/drivers/firmware/qemu_fw_cfg.c
+++ b/drivers/firmware/qemu_fw_cfg.c
@@ -460,7 +460,7 @@ static const struct kobj_type fw_cfg_sysfs_entry_ktype = {
/* raw-read method and attribute */
static ssize_t fw_cfg_sysfs_read_raw(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t count)
{
struct fw_cfg_sysfs_entry *entry = to_entry(kobj);
@@ -474,7 +474,7 @@ static ssize_t fw_cfg_sysfs_read_raw(struct file *filp, struct kobject *kobj,
return fw_cfg_read_blob(entry->select, buf, pos, count);
}
-static struct bin_attribute fw_cfg_sysfs_attr_raw = {
+static const struct bin_attribute fw_cfg_sysfs_attr_raw = {
.attr = { .name = "raw", .mode = S_IRUSR },
.read = fw_cfg_sysfs_read_raw,
};
@@ -757,7 +757,7 @@ MODULE_DEVICE_TABLE(acpi, fw_cfg_sysfs_acpi_match);
static struct platform_driver fw_cfg_sysfs_driver = {
.probe = fw_cfg_sysfs_probe,
- .remove_new = fw_cfg_sysfs_remove,
+ .remove = fw_cfg_sysfs_remove,
.driver = {
.name = "fw_cfg",
.of_match_table = fw_cfg_sysfs_mmio_match,
diff --git a/drivers/firmware/raspberrypi.c b/drivers/firmware/raspberrypi.c
index 18cc34987108..7ecde6921a0a 100644
--- a/drivers/firmware/raspberrypi.c
+++ b/drivers/firmware/raspberrypi.c
@@ -406,7 +406,7 @@ static struct platform_driver rpi_firmware_driver = {
},
.probe = rpi_firmware_probe,
.shutdown = rpi_firmware_shutdown,
- .remove_new = rpi_firmware_remove,
+ .remove = rpi_firmware_remove,
};
module_platform_driver(rpi_firmware_driver);
diff --git a/drivers/firmware/samsung/Kconfig b/drivers/firmware/samsung/Kconfig
new file mode 100644
index 000000000000..16d81aeb1d41
--- /dev/null
+++ b/drivers/firmware/samsung/Kconfig
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config EXYNOS_ACPM_PROTOCOL
+ tristate "Exynos Alive Clock and Power Manager (ACPM) Message Protocol"
+ depends on ARCH_EXYNOS || COMPILE_TEST
+ depends on MAILBOX
+ help
+ Alive Clock and Power Manager (ACPM) Message Protocol is defined for
+ the purpose of communication between the ACPM firmware and masters
+ (AP, AOC, ...). ACPM firmware operates on the Active Power Management
+ (APM) module that handles overall power activities.
+
+ This protocol driver provides interface for all the client drivers
+ making use of the features offered by the APM.
diff --git a/drivers/firmware/samsung/Makefile b/drivers/firmware/samsung/Makefile
new file mode 100644
index 000000000000..7b4c9f6f34f5
--- /dev/null
+++ b/drivers/firmware/samsung/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+acpm-protocol-objs := exynos-acpm.o exynos-acpm-pmic.o
+obj-$(CONFIG_EXYNOS_ACPM_PROTOCOL) += acpm-protocol.o
diff --git a/drivers/firmware/samsung/exynos-acpm-pmic.c b/drivers/firmware/samsung/exynos-acpm-pmic.c
new file mode 100644
index 000000000000..961d7599e422
--- /dev/null
+++ b/drivers/firmware/samsung/exynos-acpm-pmic.c
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2020 Samsung Electronics Co., Ltd.
+ * Copyright 2020 Google LLC.
+ * Copyright 2024 Linaro Ltd.
+ */
+#include <linux/array_size.h>
+#include <linux/bitfield.h>
+#include <linux/errno.h>
+#include <linux/firmware/samsung/exynos-acpm-protocol.h>
+#include <linux/ktime.h>
+#include <linux/types.h>
+
+#include "exynos-acpm.h"
+#include "exynos-acpm-pmic.h"
+
+#define ACPM_PMIC_CHANNEL GENMASK(15, 12)
+#define ACPM_PMIC_TYPE GENMASK(11, 8)
+#define ACPM_PMIC_REG GENMASK(7, 0)
+
+#define ACPM_PMIC_RETURN GENMASK(31, 24)
+#define ACPM_PMIC_MASK GENMASK(23, 16)
+#define ACPM_PMIC_VALUE GENMASK(15, 8)
+#define ACPM_PMIC_FUNC GENMASK(7, 0)
+
+#define ACPM_PMIC_BULK_SHIFT 8
+#define ACPM_PMIC_BULK_MASK GENMASK(7, 0)
+#define ACPM_PMIC_BULK_MAX_COUNT 8
+
+enum exynos_acpm_pmic_func {
+ ACPM_PMIC_READ,
+ ACPM_PMIC_WRITE,
+ ACPM_PMIC_UPDATE,
+ ACPM_PMIC_BULK_READ,
+ ACPM_PMIC_BULK_WRITE,
+};
+
+static const int acpm_pmic_linux_errmap[] = {
+ [0] = 0, /* ACPM_PMIC_SUCCESS */
+ [1] = -EACCES, /* Read register can't be accessed or issues to access it. */
+ [2] = -EACCES, /* Write register can't be accessed or issues to access it. */
+};
+
+static int acpm_pmic_to_linux_err(int err)
+{
+ if (err >= 0 && err < ARRAY_SIZE(acpm_pmic_linux_errmap))
+ return acpm_pmic_linux_errmap[err];
+ return -EIO;
+}
+
+static inline u32 acpm_pmic_set_bulk(u32 data, unsigned int i)
+{
+ return (data & ACPM_PMIC_BULK_MASK) << (ACPM_PMIC_BULK_SHIFT * i);
+}
+
+static inline u32 acpm_pmic_get_bulk(u32 data, unsigned int i)
+{
+ return (data >> (ACPM_PMIC_BULK_SHIFT * i)) & ACPM_PMIC_BULK_MASK;
+}
+
+static void acpm_pmic_set_xfer(struct acpm_xfer *xfer, u32 *cmd, size_t cmdlen,
+ unsigned int acpm_chan_id)
+{
+ xfer->txd = cmd;
+ xfer->rxd = cmd;
+ xfer->txlen = cmdlen;
+ xfer->rxlen = cmdlen;
+ xfer->acpm_chan_id = acpm_chan_id;
+}
+
+static void acpm_pmic_init_read_cmd(u32 cmd[4], u8 type, u8 reg, u8 chan)
+{
+ cmd[0] = FIELD_PREP(ACPM_PMIC_TYPE, type) |
+ FIELD_PREP(ACPM_PMIC_REG, reg) |
+ FIELD_PREP(ACPM_PMIC_CHANNEL, chan);
+ cmd[1] = FIELD_PREP(ACPM_PMIC_FUNC, ACPM_PMIC_READ);
+ cmd[3] = ktime_to_ms(ktime_get());
+}
+
+int acpm_pmic_read_reg(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 *buf)
+{
+ struct acpm_xfer xfer;
+ u32 cmd[4] = {0};
+ int ret;
+
+ acpm_pmic_init_read_cmd(cmd, type, reg, chan);
+ acpm_pmic_set_xfer(&xfer, cmd, sizeof(cmd), acpm_chan_id);
+
+ ret = acpm_do_xfer(handle, &xfer);
+ if (ret)
+ return ret;
+
+ *buf = FIELD_GET(ACPM_PMIC_VALUE, xfer.rxd[1]);
+
+ return acpm_pmic_to_linux_err(FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]));
+}
+
+static void acpm_pmic_init_bulk_read_cmd(u32 cmd[4], u8 type, u8 reg, u8 chan,
+ u8 count)
+{
+ cmd[0] = FIELD_PREP(ACPM_PMIC_TYPE, type) |
+ FIELD_PREP(ACPM_PMIC_REG, reg) |
+ FIELD_PREP(ACPM_PMIC_CHANNEL, chan);
+ cmd[1] = FIELD_PREP(ACPM_PMIC_FUNC, ACPM_PMIC_BULK_READ) |
+ FIELD_PREP(ACPM_PMIC_VALUE, count);
+}
+
+int acpm_pmic_bulk_read(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 count, u8 *buf)
+{
+ struct acpm_xfer xfer;
+ u32 cmd[4] = {0};
+ int i, ret;
+
+ if (count > ACPM_PMIC_BULK_MAX_COUNT)
+ return -EINVAL;
+
+ acpm_pmic_init_bulk_read_cmd(cmd, type, reg, chan, count);
+ acpm_pmic_set_xfer(&xfer, cmd, sizeof(cmd), acpm_chan_id);
+
+ ret = acpm_do_xfer(handle, &xfer);
+ if (ret)
+ return ret;
+
+ ret = acpm_pmic_to_linux_err(FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]));
+ if (ret)
+ return ret;
+
+ for (i = 0; i < count; i++) {
+ if (i < 4)
+ buf[i] = acpm_pmic_get_bulk(xfer.rxd[2], i);
+ else
+ buf[i] = acpm_pmic_get_bulk(xfer.rxd[3], i - 4);
+ }
+
+ return 0;
+}
+
+static void acpm_pmic_init_write_cmd(u32 cmd[4], u8 type, u8 reg, u8 chan,
+ u8 value)
+{
+ cmd[0] = FIELD_PREP(ACPM_PMIC_TYPE, type) |
+ FIELD_PREP(ACPM_PMIC_REG, reg) |
+ FIELD_PREP(ACPM_PMIC_CHANNEL, chan);
+ cmd[1] = FIELD_PREP(ACPM_PMIC_FUNC, ACPM_PMIC_WRITE) |
+ FIELD_PREP(ACPM_PMIC_VALUE, value);
+ cmd[3] = ktime_to_ms(ktime_get());
+}
+
+int acpm_pmic_write_reg(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 value)
+{
+ struct acpm_xfer xfer;
+ u32 cmd[4] = {0};
+ int ret;
+
+ acpm_pmic_init_write_cmd(cmd, type, reg, chan, value);
+ acpm_pmic_set_xfer(&xfer, cmd, sizeof(cmd), acpm_chan_id);
+
+ ret = acpm_do_xfer(handle, &xfer);
+ if (ret)
+ return ret;
+
+ return acpm_pmic_to_linux_err(FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]));
+}
+
+static void acpm_pmic_init_bulk_write_cmd(u32 cmd[4], u8 type, u8 reg, u8 chan,
+ u8 count, const u8 *buf)
+{
+ int i;
+
+ cmd[0] = FIELD_PREP(ACPM_PMIC_TYPE, type) |
+ FIELD_PREP(ACPM_PMIC_REG, reg) |
+ FIELD_PREP(ACPM_PMIC_CHANNEL, chan);
+ cmd[1] = FIELD_PREP(ACPM_PMIC_FUNC, ACPM_PMIC_BULK_WRITE) |
+ FIELD_PREP(ACPM_PMIC_VALUE, count);
+
+ for (i = 0; i < count; i++) {
+ if (i < 4)
+ cmd[2] |= acpm_pmic_set_bulk(buf[i], i);
+ else
+ cmd[3] |= acpm_pmic_set_bulk(buf[i], i - 4);
+ }
+}
+
+int acpm_pmic_bulk_write(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 count, const u8 *buf)
+{
+ struct acpm_xfer xfer;
+ u32 cmd[4] = {0};
+ int ret;
+
+ if (count > ACPM_PMIC_BULK_MAX_COUNT)
+ return -EINVAL;
+
+ acpm_pmic_init_bulk_write_cmd(cmd, type, reg, chan, count, buf);
+ acpm_pmic_set_xfer(&xfer, cmd, sizeof(cmd), acpm_chan_id);
+
+ ret = acpm_do_xfer(handle, &xfer);
+ if (ret)
+ return ret;
+
+ return acpm_pmic_to_linux_err(FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]));
+}
+
+static void acpm_pmic_init_update_cmd(u32 cmd[4], u8 type, u8 reg, u8 chan,
+ u8 value, u8 mask)
+{
+ cmd[0] = FIELD_PREP(ACPM_PMIC_TYPE, type) |
+ FIELD_PREP(ACPM_PMIC_REG, reg) |
+ FIELD_PREP(ACPM_PMIC_CHANNEL, chan);
+ cmd[1] = FIELD_PREP(ACPM_PMIC_FUNC, ACPM_PMIC_UPDATE) |
+ FIELD_PREP(ACPM_PMIC_VALUE, value) |
+ FIELD_PREP(ACPM_PMIC_MASK, mask);
+ cmd[3] = ktime_to_ms(ktime_get());
+}
+
+int acpm_pmic_update_reg(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 value, u8 mask)
+{
+ struct acpm_xfer xfer;
+ u32 cmd[4] = {0};
+ int ret;
+
+ acpm_pmic_init_update_cmd(cmd, type, reg, chan, value, mask);
+ acpm_pmic_set_xfer(&xfer, cmd, sizeof(cmd), acpm_chan_id);
+
+ ret = acpm_do_xfer(handle, &xfer);
+ if (ret)
+ return ret;
+
+ return acpm_pmic_to_linux_err(FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]));
+}
diff --git a/drivers/firmware/samsung/exynos-acpm-pmic.h b/drivers/firmware/samsung/exynos-acpm-pmic.h
new file mode 100644
index 000000000000..078421888a14
--- /dev/null
+++ b/drivers/firmware/samsung/exynos-acpm-pmic.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2020 Samsung Electronics Co., Ltd.
+ * Copyright 2020 Google LLC.
+ * Copyright 2024 Linaro Ltd.
+ */
+#ifndef __EXYNOS_ACPM_PMIC_H__
+#define __EXYNOS_ACPM_PMIC_H__
+
+#include <linux/types.h>
+
+struct acpm_handle;
+
+int acpm_pmic_read_reg(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 *buf);
+int acpm_pmic_bulk_read(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 count, u8 *buf);
+int acpm_pmic_write_reg(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 value);
+int acpm_pmic_bulk_write(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 count, const u8 *buf);
+int acpm_pmic_update_reg(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 value, u8 mask);
+#endif /* __EXYNOS_ACPM_PMIC_H__ */
diff --git a/drivers/firmware/samsung/exynos-acpm.c b/drivers/firmware/samsung/exynos-acpm.c
new file mode 100644
index 000000000000..3a69fe3234c7
--- /dev/null
+++ b/drivers/firmware/samsung/exynos-acpm.c
@@ -0,0 +1,766 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2020 Samsung Electronics Co., Ltd.
+ * Copyright 2020 Google LLC.
+ * Copyright 2024 Linaro Ltd.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitmap.h>
+#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/container_of.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/firmware/samsung/exynos-acpm-protocol.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/ktime.h>
+#include <linux/mailbox/exynos-message.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/math.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "exynos-acpm.h"
+#include "exynos-acpm-pmic.h"
+
+#define ACPM_PROTOCOL_SEQNUM GENMASK(21, 16)
+
+#define ACPM_POLL_TIMEOUT_US (100 * USEC_PER_MSEC)
+#define ACPM_TX_TIMEOUT_US 500000
+
+#define ACPM_GS101_INITDATA_BASE 0xa000
+
+/**
+ * struct acpm_shmem - shared memory configuration information.
+ * @reserved: unused fields.
+ * @chans: offset to array of struct acpm_chan_shmem.
+ * @reserved1: unused fields.
+ * @num_chans: number of channels.
+ */
+struct acpm_shmem {
+ u32 reserved[2];
+ u32 chans;
+ u32 reserved1[3];
+ u32 num_chans;
+};
+
+/**
+ * struct acpm_chan_shmem - descriptor of a shared memory channel.
+ *
+ * @id: channel ID.
+ * @reserved: unused fields.
+ * @rx_rear: rear pointer of APM RX queue (TX for AP).
+ * @rx_front: front pointer of APM RX queue (TX for AP).
+ * @rx_base: base address of APM RX queue (TX for AP).
+ * @reserved1: unused fields.
+ * @tx_rear: rear pointer of APM TX queue (RX for AP).
+ * @tx_front: front pointer of APM TX queue (RX for AP).
+ * @tx_base: base address of APM TX queue (RX for AP).
+ * @qlen: queue length. Applies to both TX/RX queues.
+ * @mlen: message length. Applies to both TX/RX queues.
+ * @reserved2: unused fields.
+ * @poll_completion: true when the channel works on polling.
+ */
+struct acpm_chan_shmem {
+ u32 id;
+ u32 reserved[3];
+ u32 rx_rear;
+ u32 rx_front;
+ u32 rx_base;
+ u32 reserved1[3];
+ u32 tx_rear;
+ u32 tx_front;
+ u32 tx_base;
+ u32 qlen;
+ u32 mlen;
+ u32 reserved2[2];
+ u32 poll_completion;
+};
+
+/**
+ * struct acpm_queue - exynos acpm queue.
+ *
+ * @rear: rear address of the queue.
+ * @front: front address of the queue.
+ * @base: base address of the queue.
+ */
+struct acpm_queue {
+ void __iomem *rear;
+ void __iomem *front;
+ void __iomem *base;
+};
+
+/**
+ * struct acpm_rx_data - RX queue data.
+ *
+ * @cmd: pointer to where the data shall be saved.
+ * @n_cmd: number of 32-bit commands.
+ * @response: true if the client expects the RX data.
+ */
+struct acpm_rx_data {
+ u32 *cmd;
+ size_t n_cmd;
+ bool response;
+};
+
+#define ACPM_SEQNUM_MAX 64
+
+/**
+ * struct acpm_chan - driver internal representation of a channel.
+ * @cl: mailbox client.
+ * @chan: mailbox channel.
+ * @acpm: pointer to driver private data.
+ * @tx: TX queue. The enqueue is done by the host.
+ * - front index is written by the host.
+ * - rear index is written by the firmware.
+ *
+ * @rx: RX queue. The enqueue is done by the firmware.
+ * - front index is written by the firmware.
+ * - rear index is written by the host.
+ * @tx_lock: protects TX queue.
+ * @rx_lock: protects RX queue.
+ * @qlen: queue length. Applies to both TX/RX queues.
+ * @mlen: message length. Applies to both TX/RX queues.
+ * @seqnum: sequence number of the last message enqueued on TX queue.
+ * @id: channel ID.
+ * @poll_completion: indicates if the transfer needs to be polled for
+ * completion or interrupt mode is used.
+ * @bitmap_seqnum: bitmap that tracks the messages on the TX/RX queues.
+ * @rx_data: internal buffer used to drain the RX queue.
+ */
+struct acpm_chan {
+ struct mbox_client cl;
+ struct mbox_chan *chan;
+ struct acpm_info *acpm;
+ struct acpm_queue tx;
+ struct acpm_queue rx;
+ struct mutex tx_lock;
+ struct mutex rx_lock;
+
+ unsigned int qlen;
+ unsigned int mlen;
+ u8 seqnum;
+ u8 id;
+ bool poll_completion;
+
+ DECLARE_BITMAP(bitmap_seqnum, ACPM_SEQNUM_MAX - 1);
+ struct acpm_rx_data rx_data[ACPM_SEQNUM_MAX];
+};
+
+/**
+ * struct acpm_info - driver's private data.
+ * @shmem: pointer to the SRAM configuration data.
+ * @sram_base: base address of SRAM.
+ * @chans: pointer to the ACPM channel parameters retrieved from SRAM.
+ * @dev: pointer to the exynos-acpm device.
+ * @handle: instance of acpm_handle to send to clients.
+ * @num_chans: number of channels available for this controller.
+ */
+struct acpm_info {
+ struct acpm_shmem __iomem *shmem;
+ void __iomem *sram_base;
+ struct acpm_chan *chans;
+ struct device *dev;
+ struct acpm_handle handle;
+ u32 num_chans;
+};
+
+/**
+ * struct acpm_match_data - of_device_id data.
+ * @initdata_base: offset in SRAM where the channels configuration resides.
+ */
+struct acpm_match_data {
+ loff_t initdata_base;
+};
+
+#define client_to_acpm_chan(c) container_of(c, struct acpm_chan, cl)
+#define handle_to_acpm_info(h) container_of(h, struct acpm_info, handle)
+
+/**
+ * acpm_get_saved_rx() - get the response if it was already saved.
+ * @achan: ACPM channel info.
+ * @xfer: reference to the transfer to get response for.
+ * @tx_seqnum: xfer TX sequence number.
+ */
+static void acpm_get_saved_rx(struct acpm_chan *achan,
+ const struct acpm_xfer *xfer, u32 tx_seqnum)
+{
+ const struct acpm_rx_data *rx_data = &achan->rx_data[tx_seqnum - 1];
+ u32 rx_seqnum;
+
+ if (!rx_data->response)
+ return;
+
+ rx_seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM, rx_data->cmd[0]);
+
+ if (rx_seqnum == tx_seqnum) {
+ memcpy(xfer->rxd, rx_data->cmd, xfer->rxlen);
+ clear_bit(rx_seqnum - 1, achan->bitmap_seqnum);
+ }
+}
+
+/**
+ * acpm_get_rx() - get response from RX queue.
+ * @achan: ACPM channel info.
+ * @xfer: reference to the transfer to get response for.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int acpm_get_rx(struct acpm_chan *achan, const struct acpm_xfer *xfer)
+{
+ u32 rx_front, rx_seqnum, tx_seqnum, seqnum;
+ const void __iomem *base, *addr;
+ struct acpm_rx_data *rx_data;
+ u32 i, val, mlen;
+ bool rx_set = false;
+
+ guard(mutex)(&achan->rx_lock);
+
+ rx_front = readl(achan->rx.front);
+ i = readl(achan->rx.rear);
+
+ tx_seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM, xfer->txd[0]);
+
+ if (i == rx_front) {
+ acpm_get_saved_rx(achan, xfer, tx_seqnum);
+ return 0;
+ }
+
+ base = achan->rx.base;
+ mlen = achan->mlen;
+
+ /* Drain RX queue. */
+ do {
+ /* Read RX seqnum. */
+ addr = base + mlen * i;
+ val = readl(addr);
+
+ rx_seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM, val);
+ if (!rx_seqnum)
+ return -EIO;
+ /*
+ * mssg seqnum starts with value 1, whereas the driver considers
+ * the first mssg at index 0.
+ */
+ seqnum = rx_seqnum - 1;
+ rx_data = &achan->rx_data[seqnum];
+
+ if (rx_data->response) {
+ if (rx_seqnum == tx_seqnum) {
+ __ioread32_copy(xfer->rxd, addr,
+ xfer->rxlen / 4);
+ rx_set = true;
+ clear_bit(seqnum, achan->bitmap_seqnum);
+ } else {
+ /*
+ * The RX data corresponds to another request.
+ * Save the data to drain the queue, but don't
+ * clear yet the bitmap. It will be cleared
+ * after the response is copied to the request.
+ */
+ __ioread32_copy(rx_data->cmd, addr,
+ xfer->rxlen / 4);
+ }
+ } else {
+ clear_bit(seqnum, achan->bitmap_seqnum);
+ }
+
+ i = (i + 1) % achan->qlen;
+ } while (i != rx_front);
+
+ /* We saved all responses, mark RX empty. */
+ writel(rx_front, achan->rx.rear);
+
+ /*
+ * If the response was not in this iteration of the queue, check if the
+ * RX data was previously saved.
+ */
+ if (!rx_set)
+ acpm_get_saved_rx(achan, xfer, tx_seqnum);
+
+ return 0;
+}
+
+/**
+ * acpm_dequeue_by_polling() - RX dequeue by polling.
+ * @achan: ACPM channel info.
+ * @xfer: reference to the transfer being waited for.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int acpm_dequeue_by_polling(struct acpm_chan *achan,
+ const struct acpm_xfer *xfer)
+{
+ struct device *dev = achan->acpm->dev;
+ ktime_t timeout;
+ u32 seqnum;
+ int ret;
+
+ seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM, xfer->txd[0]);
+
+ timeout = ktime_add_us(ktime_get(), ACPM_POLL_TIMEOUT_US);
+ do {
+ ret = acpm_get_rx(achan, xfer);
+ if (ret)
+ return ret;
+
+ if (!test_bit(seqnum - 1, achan->bitmap_seqnum))
+ return 0;
+
+ /* Determined experimentally. */
+ udelay(20);
+ } while (ktime_before(ktime_get(), timeout));
+
+ dev_err(dev, "Timeout! ch:%u s:%u bitmap:%lx.\n",
+ achan->id, seqnum, achan->bitmap_seqnum[0]);
+
+ return -ETIME;
+}
+
+/**
+ * acpm_wait_for_queue_slots() - wait for queue slots.
+ *
+ * @achan: ACPM channel info.
+ * @next_tx_front: next front index of the TX queue.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int acpm_wait_for_queue_slots(struct acpm_chan *achan, u32 next_tx_front)
+{
+ u32 val, ret;
+
+ /*
+ * Wait for RX front to keep up with TX front. Make sure there's at
+ * least one element between them.
+ */
+ ret = readl_poll_timeout(achan->rx.front, val, next_tx_front != val, 0,
+ ACPM_TX_TIMEOUT_US);
+ if (ret) {
+ dev_err(achan->acpm->dev, "RX front can not keep up with TX front.\n");
+ return ret;
+ }
+
+ ret = readl_poll_timeout(achan->tx.rear, val, next_tx_front != val, 0,
+ ACPM_TX_TIMEOUT_US);
+ if (ret)
+ dev_err(achan->acpm->dev, "TX queue is full.\n");
+
+ return ret;
+}
+
+/**
+ * acpm_prepare_xfer() - prepare a transfer before writing the message to the
+ * TX queue.
+ * @achan: ACPM channel info.
+ * @xfer: reference to the transfer being prepared.
+ */
+static void acpm_prepare_xfer(struct acpm_chan *achan,
+ const struct acpm_xfer *xfer)
+{
+ struct acpm_rx_data *rx_data;
+ u32 *txd = (u32 *)xfer->txd;
+
+ /* Prevent chan->seqnum from being re-used */
+ do {
+ if (++achan->seqnum == ACPM_SEQNUM_MAX)
+ achan->seqnum = 1;
+ } while (test_bit(achan->seqnum - 1, achan->bitmap_seqnum));
+
+ txd[0] |= FIELD_PREP(ACPM_PROTOCOL_SEQNUM, achan->seqnum);
+
+ /* Clear data for upcoming responses */
+ rx_data = &achan->rx_data[achan->seqnum - 1];
+ memset(rx_data->cmd, 0, sizeof(*rx_data->cmd) * rx_data->n_cmd);
+ if (xfer->rxd)
+ rx_data->response = true;
+
+ /* Flag the index based on seqnum. (seqnum: 1~63, bitmap: 0~62) */
+ set_bit(achan->seqnum - 1, achan->bitmap_seqnum);
+}
+
+/**
+ * acpm_wait_for_message_response - an helper to group all possible ways of
+ * waiting for a synchronous message response.
+ *
+ * @achan: ACPM channel info.
+ * @xfer: reference to the transfer being waited for.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int acpm_wait_for_message_response(struct acpm_chan *achan,
+ const struct acpm_xfer *xfer)
+{
+ /* Just polling mode supported for now. */
+ return acpm_dequeue_by_polling(achan, xfer);
+}
+
+/**
+ * acpm_do_xfer() - do one transfer.
+ * @handle: pointer to the acpm handle.
+ * @xfer: transfer to initiate and wait for response.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int acpm_do_xfer(const struct acpm_handle *handle, const struct acpm_xfer *xfer)
+{
+ struct acpm_info *acpm = handle_to_acpm_info(handle);
+ struct exynos_mbox_msg msg;
+ struct acpm_chan *achan;
+ u32 idx, tx_front;
+ int ret;
+
+ if (xfer->acpm_chan_id >= acpm->num_chans)
+ return -EINVAL;
+
+ achan = &acpm->chans[xfer->acpm_chan_id];
+
+ if (!xfer->txd || xfer->txlen > achan->mlen || xfer->rxlen > achan->mlen)
+ return -EINVAL;
+
+ if (!achan->poll_completion) {
+ dev_err(achan->acpm->dev, "Interrupt mode not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ msg.chan_id = xfer->acpm_chan_id;
+ msg.chan_type = EXYNOS_MBOX_CHAN_TYPE_DOORBELL;
+
+ scoped_guard(mutex, &achan->tx_lock) {
+ tx_front = readl(achan->tx.front);
+ idx = (tx_front + 1) % achan->qlen;
+
+ ret = acpm_wait_for_queue_slots(achan, idx);
+ if (ret)
+ return ret;
+
+ acpm_prepare_xfer(achan, xfer);
+
+ /* Write TX command. */
+ __iowrite32_copy(achan->tx.base + achan->mlen * tx_front,
+ xfer->txd, xfer->txlen / 4);
+
+ /* Advance TX front. */
+ writel(idx, achan->tx.front);
+
+ ret = mbox_send_message(achan->chan, (void *)&msg);
+ if (ret < 0)
+ return ret;
+
+ mbox_client_txdone(achan->chan, 0);
+ }
+
+ return acpm_wait_for_message_response(achan, xfer);
+}
+
+/**
+ * acpm_chan_shmem_get_params() - get channel parameters and addresses of the
+ * TX/RX queues.
+ * @achan: ACPM channel info.
+ * @chan_shmem: __iomem pointer to a channel described in shared memory.
+ */
+static void acpm_chan_shmem_get_params(struct acpm_chan *achan,
+ struct acpm_chan_shmem __iomem *chan_shmem)
+{
+ void __iomem *base = achan->acpm->sram_base;
+ struct acpm_queue *rx = &achan->rx;
+ struct acpm_queue *tx = &achan->tx;
+
+ achan->mlen = readl(&chan_shmem->mlen);
+ achan->poll_completion = readl(&chan_shmem->poll_completion);
+ achan->id = readl(&chan_shmem->id);
+ achan->qlen = readl(&chan_shmem->qlen);
+
+ tx->base = base + readl(&chan_shmem->rx_base);
+ tx->rear = base + readl(&chan_shmem->rx_rear);
+ tx->front = base + readl(&chan_shmem->rx_front);
+
+ rx->base = base + readl(&chan_shmem->tx_base);
+ rx->rear = base + readl(&chan_shmem->tx_rear);
+ rx->front = base + readl(&chan_shmem->tx_front);
+
+ dev_vdbg(achan->acpm->dev, "ID = %d poll = %d, mlen = %d, qlen = %d\n",
+ achan->id, achan->poll_completion, achan->mlen, achan->qlen);
+}
+
+/**
+ * acpm_achan_alloc_cmds() - allocate buffers for retrieving data from the ACPM
+ * firmware.
+ * @achan: ACPM channel info.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int acpm_achan_alloc_cmds(struct acpm_chan *achan)
+{
+ struct device *dev = achan->acpm->dev;
+ struct acpm_rx_data *rx_data;
+ size_t cmd_size, n_cmd;
+ int i;
+
+ if (achan->mlen == 0)
+ return 0;
+
+ cmd_size = sizeof(*(achan->rx_data[0].cmd));
+ n_cmd = DIV_ROUND_UP_ULL(achan->mlen, cmd_size);
+
+ for (i = 0; i < ACPM_SEQNUM_MAX; i++) {
+ rx_data = &achan->rx_data[i];
+ rx_data->n_cmd = n_cmd;
+ rx_data->cmd = devm_kcalloc(dev, n_cmd, cmd_size, GFP_KERNEL);
+ if (!rx_data->cmd)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * acpm_free_mbox_chans() - free mailbox channels.
+ * @acpm: pointer to driver data.
+ */
+static void acpm_free_mbox_chans(struct acpm_info *acpm)
+{
+ int i;
+
+ for (i = 0; i < acpm->num_chans; i++)
+ if (!IS_ERR_OR_NULL(acpm->chans[i].chan))
+ mbox_free_channel(acpm->chans[i].chan);
+}
+
+/**
+ * acpm_channels_init() - initialize channels based on the configuration data in
+ * the shared memory.
+ * @acpm: pointer to driver data.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int acpm_channels_init(struct acpm_info *acpm)
+{
+ struct acpm_shmem __iomem *shmem = acpm->shmem;
+ struct acpm_chan_shmem __iomem *chans_shmem;
+ struct device *dev = acpm->dev;
+ int i, ret;
+
+ acpm->num_chans = readl(&shmem->num_chans);
+ acpm->chans = devm_kcalloc(dev, acpm->num_chans, sizeof(*acpm->chans),
+ GFP_KERNEL);
+ if (!acpm->chans)
+ return -ENOMEM;
+
+ chans_shmem = acpm->sram_base + readl(&shmem->chans);
+
+ for (i = 0; i < acpm->num_chans; i++) {
+ struct acpm_chan_shmem __iomem *chan_shmem = &chans_shmem[i];
+ struct acpm_chan *achan = &acpm->chans[i];
+ struct mbox_client *cl = &achan->cl;
+
+ achan->acpm = acpm;
+
+ acpm_chan_shmem_get_params(achan, chan_shmem);
+
+ ret = acpm_achan_alloc_cmds(achan);
+ if (ret)
+ return ret;
+
+ mutex_init(&achan->rx_lock);
+ mutex_init(&achan->tx_lock);
+
+ cl->dev = dev;
+
+ achan->chan = mbox_request_channel(cl, 0);
+ if (IS_ERR(achan->chan)) {
+ acpm_free_mbox_chans(acpm);
+ return PTR_ERR(achan->chan);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * acpm_setup_ops() - setup the operations structures.
+ * @acpm: pointer to the driver data.
+ */
+static void acpm_setup_ops(struct acpm_info *acpm)
+{
+ struct acpm_pmic_ops *pmic_ops = &acpm->handle.ops.pmic_ops;
+
+ pmic_ops->read_reg = acpm_pmic_read_reg;
+ pmic_ops->bulk_read = acpm_pmic_bulk_read;
+ pmic_ops->write_reg = acpm_pmic_write_reg;
+ pmic_ops->bulk_write = acpm_pmic_bulk_write;
+ pmic_ops->update_reg = acpm_pmic_update_reg;
+}
+
+static int acpm_probe(struct platform_device *pdev)
+{
+ const struct acpm_match_data *match_data;
+ struct device *dev = &pdev->dev;
+ struct device_node *shmem;
+ struct acpm_info *acpm;
+ resource_size_t size;
+ struct resource res;
+ int ret;
+
+ acpm = devm_kzalloc(dev, sizeof(*acpm), GFP_KERNEL);
+ if (!acpm)
+ return -ENOMEM;
+
+ shmem = of_parse_phandle(dev->of_node, "shmem", 0);
+ ret = of_address_to_resource(shmem, 0, &res);
+ of_node_put(shmem);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to get shared memory.\n");
+
+ size = resource_size(&res);
+ acpm->sram_base = devm_ioremap(dev, res.start, size);
+ if (!acpm->sram_base)
+ return dev_err_probe(dev, -ENOMEM,
+ "Failed to ioremap shared memory.\n");
+
+ match_data = of_device_get_match_data(dev);
+ if (!match_data)
+ return dev_err_probe(dev, -EINVAL,
+ "Failed to get match data.\n");
+
+ acpm->shmem = acpm->sram_base + match_data->initdata_base;
+ acpm->dev = dev;
+
+ ret = acpm_channels_init(acpm);
+ if (ret)
+ return ret;
+
+ acpm_setup_ops(acpm);
+
+ platform_set_drvdata(pdev, acpm);
+
+ return devm_of_platform_populate(dev);
+}
+
+/**
+ * acpm_handle_put() - release the handle acquired by acpm_get_by_phandle.
+ * @handle: Handle acquired by acpm_get_by_phandle.
+ */
+static void acpm_handle_put(const struct acpm_handle *handle)
+{
+ struct acpm_info *acpm = handle_to_acpm_info(handle);
+ struct device *dev = acpm->dev;
+
+ module_put(dev->driver->owner);
+ /* Drop reference taken with of_find_device_by_node(). */
+ put_device(dev);
+}
+
+/**
+ * devm_acpm_release() - devres release method.
+ * @dev: pointer to device.
+ * @res: pointer to resource.
+ */
+static void devm_acpm_release(struct device *dev, void *res)
+{
+ acpm_handle_put(*(struct acpm_handle **)res);
+}
+
+/**
+ * acpm_get_by_node() - get the ACPM handle using node pointer.
+ * @dev: device pointer requesting ACPM handle.
+ * @np: ACPM device tree node.
+ *
+ * Return: pointer to handle on success, ERR_PTR(-errno) otherwise.
+ */
+static const struct acpm_handle *acpm_get_by_node(struct device *dev,
+ struct device_node *np)
+{
+ struct platform_device *pdev;
+ struct device_link *link;
+ struct acpm_info *acpm;
+
+ pdev = of_find_device_by_node(np);
+ if (!pdev)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ acpm = platform_get_drvdata(pdev);
+ if (!acpm) {
+ platform_device_put(pdev);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ if (!try_module_get(pdev->dev.driver->owner)) {
+ platform_device_put(pdev);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ link = device_link_add(dev, &pdev->dev, DL_FLAG_AUTOREMOVE_SUPPLIER);
+ if (!link) {
+ dev_err(&pdev->dev,
+ "Failed to create device link to consumer %s.\n",
+ dev_name(dev));
+ platform_device_put(pdev);
+ module_put(pdev->dev.driver->owner);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return &acpm->handle;
+}
+
+/**
+ * devm_acpm_get_by_node() - managed get handle using node pointer.
+ * @dev: device pointer requesting ACPM handle.
+ * @np: ACPM device tree node.
+ *
+ * Return: pointer to handle on success, ERR_PTR(-errno) otherwise.
+ */
+const struct acpm_handle *devm_acpm_get_by_node(struct device *dev,
+ struct device_node *np)
+{
+ const struct acpm_handle **ptr, *handle;
+
+ ptr = devres_alloc(devm_acpm_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ handle = acpm_get_by_node(dev, np);
+ if (!IS_ERR(handle)) {
+ *ptr = handle;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return handle;
+}
+EXPORT_SYMBOL_GPL(devm_acpm_get_by_node);
+
+static const struct acpm_match_data acpm_gs101 = {
+ .initdata_base = ACPM_GS101_INITDATA_BASE,
+};
+
+static const struct of_device_id acpm_match[] = {
+ {
+ .compatible = "google,gs101-acpm-ipc",
+ .data = &acpm_gs101,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, acpm_match);
+
+static struct platform_driver acpm_driver = {
+ .probe = acpm_probe,
+ .driver = {
+ .name = "exynos-acpm-protocol",
+ .of_match_table = acpm_match,
+ },
+};
+module_platform_driver(acpm_driver);
+
+MODULE_AUTHOR("Tudor Ambarus <tudor.ambarus@linaro.org>");
+MODULE_DESCRIPTION("Samsung Exynos ACPM mailbox protocol driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/samsung/exynos-acpm.h b/drivers/firmware/samsung/exynos-acpm.h
new file mode 100644
index 000000000000..2d14cb58f98c
--- /dev/null
+++ b/drivers/firmware/samsung/exynos-acpm.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2020 Samsung Electronics Co., Ltd.
+ * Copyright 2020 Google LLC.
+ * Copyright 2024 Linaro Ltd.
+ */
+#ifndef __EXYNOS_ACPM_H__
+#define __EXYNOS_ACPM_H__
+
+struct acpm_xfer {
+ const u32 *txd;
+ u32 *rxd;
+ size_t txlen;
+ size_t rxlen;
+ unsigned int acpm_chan_id;
+};
+
+struct acpm_handle;
+
+int acpm_do_xfer(const struct acpm_handle *handle,
+ const struct acpm_xfer *xfer);
+
+#endif /* __EXYNOS_ACPM_H__ */
diff --git a/drivers/firmware/smccc/kvm_guest.c b/drivers/firmware/smccc/kvm_guest.c
index f3319be20b36..49e1de83d2e8 100644
--- a/drivers/firmware/smccc/kvm_guest.c
+++ b/drivers/firmware/smccc/kvm_guest.c
@@ -6,25 +6,22 @@
#include <linux/bitmap.h>
#include <linux/cache.h>
#include <linux/kernel.h>
+#include <linux/memblock.h>
#include <linux/string.h>
+#include <uapi/linux/psci.h>
+
#include <asm/hypervisor.h>
static DECLARE_BITMAP(__kvm_arm_hyp_services, ARM_SMCCC_KVM_NUM_FUNCS) __ro_after_init = { };
void __init kvm_init_hyp_services(void)
{
+ uuid_t kvm_uuid = ARM_SMCCC_VENDOR_HYP_UID_KVM;
struct arm_smccc_res res;
u32 val[4];
- if (arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_HVC)
- return;
-
- arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID, &res);
- if (res.a0 != ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_0 ||
- res.a1 != ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_1 ||
- res.a2 != ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_2 ||
- res.a3 != ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_3)
+ if (!arm_smccc_hypervisor_has_uuid(&kvm_uuid))
return;
memset(&res, 0, sizeof(res));
@@ -51,3 +48,66 @@ bool kvm_arm_hyp_service_available(u32 func_id)
return test_bit(func_id, __kvm_arm_hyp_services);
}
EXPORT_SYMBOL_GPL(kvm_arm_hyp_service_available);
+
+#ifdef CONFIG_ARM64
+void __init kvm_arm_target_impl_cpu_init(void)
+{
+ int i;
+ u32 ver;
+ u64 max_cpus;
+ struct arm_smccc_res res;
+ struct target_impl_cpu *target;
+
+ if (!kvm_arm_hyp_service_available(ARM_SMCCC_KVM_FUNC_DISCOVER_IMPL_VER) ||
+ !kvm_arm_hyp_service_available(ARM_SMCCC_KVM_FUNC_DISCOVER_IMPL_CPUS))
+ return;
+
+ arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_KVM_DISCOVER_IMPL_VER_FUNC_ID,
+ 0, &res);
+ if (res.a0 != SMCCC_RET_SUCCESS)
+ return;
+
+ /* Version info is in lower 32 bits and is in SMMCCC_VERSION format */
+ ver = lower_32_bits(res.a1);
+ if (PSCI_VERSION_MAJOR(ver) != 1) {
+ pr_warn("Unsupported target CPU implementation version v%d.%d\n",
+ PSCI_VERSION_MAJOR(ver), PSCI_VERSION_MINOR(ver));
+ return;
+ }
+
+ if (!res.a2) {
+ pr_warn("No target implementation CPUs specified\n");
+ return;
+ }
+
+ max_cpus = res.a2;
+ target = memblock_alloc(sizeof(*target) * max_cpus, __alignof__(*target));
+ if (!target) {
+ pr_warn("Not enough memory for struct target_impl_cpu\n");
+ return;
+ }
+
+ for (i = 0; i < max_cpus; i++) {
+ arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_KVM_DISCOVER_IMPL_CPUS_FUNC_ID,
+ i, 0, 0, &res);
+ if (res.a0 != SMCCC_RET_SUCCESS) {
+ pr_warn("Discovering target implementation CPUs failed\n");
+ goto mem_free;
+ }
+ target[i].midr = res.a1;
+ target[i].revidr = res.a2;
+ target[i].aidr = res.a3;
+ }
+
+ if (!cpu_errata_set_target_impl(max_cpus, target)) {
+ pr_warn("Failed to set target implementation CPUs\n");
+ goto mem_free;
+ }
+
+ pr_info("Number of target implementation CPUs is %lld\n", max_cpus);
+ return;
+
+mem_free:
+ memblock_free(target, sizeof(*target) * max_cpus);
+}
+#endif
diff --git a/drivers/firmware/smccc/smccc.c b/drivers/firmware/smccc/smccc.c
index d670635914ec..bdee057db2fd 100644
--- a/drivers/firmware/smccc/smccc.c
+++ b/drivers/firmware/smccc/smccc.c
@@ -16,7 +16,6 @@ static u32 smccc_version = ARM_SMCCC_VERSION_1_0;
static enum arm_smccc_conduit smccc_conduit = SMCCC_CONDUIT_NONE;
bool __ro_after_init smccc_trng_available = false;
-u64 __ro_after_init smccc_has_sve_hint = false;
s32 __ro_after_init smccc_soc_id_version = SMCCC_RET_NOT_SUPPORTED;
s32 __ro_after_init smccc_soc_id_revision = SMCCC_RET_NOT_SUPPORTED;
@@ -28,9 +27,6 @@ void __init arm_smccc_version_init(u32 version, enum arm_smccc_conduit conduit)
smccc_conduit = conduit;
smccc_trng_available = smccc_probe_trng();
- if (IS_ENABLED(CONFIG_ARM64_SVE) &&
- smccc_version >= ARM_SMCCC_VERSION_1_3)
- smccc_has_sve_hint = true;
if ((smccc_version >= ARM_SMCCC_VERSION_1_2) &&
(smccc_conduit != SMCCC_CONDUIT_NONE)) {
@@ -71,6 +67,20 @@ s32 arm_smccc_get_soc_id_revision(void)
}
EXPORT_SYMBOL_GPL(arm_smccc_get_soc_id_revision);
+bool arm_smccc_hypervisor_has_uuid(const uuid_t *hyp_uuid)
+{
+ struct arm_smccc_res res = {};
+ uuid_t uuid;
+
+ arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID, &res);
+ if (res.a0 == SMCCC_RET_NOT_SUPPORTED)
+ return false;
+
+ uuid = smccc_res_to_uuid(res.a0, res.a1, res.a2, res.a3);
+ return uuid_equal(&uuid, hyp_uuid);
+}
+EXPORT_SYMBOL_GPL(arm_smccc_hypervisor_has_uuid);
+
static int __init smccc_devices_init(void)
{
struct platform_device *pdev;
diff --git a/drivers/firmware/smccc/soc_id.c b/drivers/firmware/smccc/soc_id.c
index 1990263fbba0..c24b3fca1cfe 100644
--- a/drivers/firmware/smccc/soc_id.c
+++ b/drivers/firmware/smccc/soc_id.c
@@ -32,6 +32,85 @@
static struct soc_device *soc_dev;
static struct soc_device_attribute *soc_dev_attr;
+#ifdef CONFIG_ARM64
+
+static char __ro_after_init smccc_soc_id_name[136] = "";
+
+static inline void str_fragment_from_reg(char *dst, unsigned long reg)
+{
+ dst[0] = (reg >> 0) & 0xff;
+ dst[1] = (reg >> 8) & 0xff;
+ dst[2] = (reg >> 16) & 0xff;
+ dst[3] = (reg >> 24) & 0xff;
+ dst[4] = (reg >> 32) & 0xff;
+ dst[5] = (reg >> 40) & 0xff;
+ dst[6] = (reg >> 48) & 0xff;
+ dst[7] = (reg >> 56) & 0xff;
+}
+
+static char __init *smccc_soc_name_init(void)
+{
+ struct arm_smccc_1_2_regs args;
+ struct arm_smccc_1_2_regs res;
+ size_t len;
+
+ /*
+ * Issue Number 1.6 of the Arm SMC Calling Convention
+ * specification introduces an optional "name" string
+ * to the ARM_SMCCC_ARCH_SOC_ID function. Fetch it if
+ * available.
+ */
+ args.a0 = ARM_SMCCC_ARCH_SOC_ID;
+ args.a1 = 2; /* SOC_ID name */
+ arm_smccc_1_2_invoke(&args, &res);
+
+ if ((u32)res.a0 == 0) {
+ /*
+ * Copy res.a1..res.a17 to the smccc_soc_id_name string
+ * 8 bytes at a time. As per Issue 1.6 of the Arm SMC
+ * Calling Convention, the string will be NUL terminated
+ * and padded, from the end of the string to the end of the
+ * 136 byte buffer, with NULs.
+ */
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 0, res.a1);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 1, res.a2);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 2, res.a3);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 3, res.a4);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 4, res.a5);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 5, res.a6);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 6, res.a7);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 7, res.a8);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 8, res.a9);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 9, res.a10);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 10, res.a11);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 11, res.a12);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 12, res.a13);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 13, res.a14);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 14, res.a15);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 15, res.a16);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 16, res.a17);
+
+ len = strnlen(smccc_soc_id_name, sizeof(smccc_soc_id_name));
+ if (len) {
+ if (len == sizeof(smccc_soc_id_name))
+ pr_warn(FW_BUG "Ignoring improperly formatted name\n");
+ else
+ return smccc_soc_id_name;
+ }
+ }
+
+ return NULL;
+}
+
+#else
+
+static char __init *smccc_soc_name_init(void)
+{
+ return NULL;
+}
+
+#endif
+
static int __init smccc_soc_init(void)
{
int soc_id_rev, soc_id_version;
@@ -72,6 +151,7 @@ static int __init smccc_soc_init(void)
soc_dev_attr->soc_id = soc_id_str;
soc_dev_attr->revision = soc_id_rev_str;
soc_dev_attr->family = soc_id_jep106_id_str;
+ soc_dev_attr->machine = smccc_soc_name_init();
soc_dev = soc_device_register(soc_dev_attr);
if (IS_ERR(soc_dev)) {
diff --git a/drivers/firmware/stratix10-rsu.c b/drivers/firmware/stratix10-rsu.c
index e20cee9c2d32..1ea39a0a76c7 100644
--- a/drivers/firmware/stratix10-rsu.c
+++ b/drivers/firmware/stratix10-rsu.c
@@ -802,7 +802,7 @@ static void stratix10_rsu_remove(struct platform_device *pdev)
static struct platform_driver stratix10_rsu_driver = {
.probe = stratix10_rsu_probe,
- .remove_new = stratix10_rsu_remove,
+ .remove = stratix10_rsu_remove,
.driver = {
.name = "stratix10-rsu",
.dev_groups = rsu_groups,
diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c
index 528f37417aea..e3f990d888d7 100644
--- a/drivers/firmware/stratix10-svc.c
+++ b/drivers/firmware/stratix10-svc.c
@@ -967,18 +967,15 @@ int stratix10_svc_send(struct stratix10_svc_chan *chan, void *msg)
/* first client will create kernel thread */
if (!chan->ctrl->task) {
chan->ctrl->task =
- kthread_create_on_node(svc_normal_to_secure_thread,
- (void *)chan->ctrl,
- cpu_to_node(cpu),
- "svc_smc_hvc_thread");
+ kthread_run_on_cpu(svc_normal_to_secure_thread,
+ (void *)chan->ctrl,
+ cpu, "svc_smc_hvc_thread");
if (IS_ERR(chan->ctrl->task)) {
dev_err(chan->ctrl->dev,
"failed to create svc_smc_hvc_thread\n");
kfree(p_data);
return -EINVAL;
}
- kthread_bind(chan->ctrl->task, cpu);
- wake_up_process(chan->ctrl->task);
}
pr_debug("%s: sent P-va=%p, P-com=%x, P-size=%u\n", __func__,
@@ -1227,22 +1224,28 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
if (!svc->intel_svc_fcs) {
dev_err(dev, "failed to allocate %s device\n", INTEL_FCS);
ret = -ENOMEM;
- goto err_unregister_dev;
+ goto err_unregister_rsu_dev;
}
ret = platform_device_add(svc->intel_svc_fcs);
if (ret) {
platform_device_put(svc->intel_svc_fcs);
- goto err_unregister_dev;
+ goto err_unregister_rsu_dev;
}
+ ret = of_platform_default_populate(dev_of_node(dev), NULL, dev);
+ if (ret)
+ goto err_unregister_fcs_dev;
+
dev_set_drvdata(dev, svc);
pr_info("Intel Service Layer Driver Initialized\n");
return 0;
-err_unregister_dev:
+err_unregister_fcs_dev:
+ platform_device_unregister(svc->intel_svc_fcs);
+err_unregister_rsu_dev:
platform_device_unregister(svc->stratix10_svc_rsu);
err_free_kfifo:
kfifo_free(&controller->svc_fifo);
@@ -1256,6 +1259,8 @@ static void stratix10_svc_drv_remove(struct platform_device *pdev)
struct stratix10_svc *svc = dev_get_drvdata(&pdev->dev);
struct stratix10_svc_controller *ctrl = platform_get_drvdata(pdev);
+ of_platform_depopulate(ctrl->dev);
+
platform_device_unregister(svc->intel_svc_fcs);
platform_device_unregister(svc->stratix10_svc_rsu);
@@ -1271,7 +1276,7 @@ static void stratix10_svc_drv_remove(struct platform_device *pdev)
static struct platform_driver stratix10_svc_driver = {
.probe = stratix10_svc_drv_probe,
- .remove_new = stratix10_svc_drv_remove,
+ .remove = stratix10_svc_drv_remove,
.driver = {
.name = "stratix10-svc",
.of_match_table = stratix10_svc_drv_match,
diff --git a/drivers/firmware/sysfb.c b/drivers/firmware/sysfb.c
index a3df782fa687..889e5b05c739 100644
--- a/drivers/firmware/sysfb.c
+++ b/drivers/firmware/sysfb.c
@@ -79,6 +79,25 @@ void sysfb_disable(struct device *dev)
}
EXPORT_SYMBOL_GPL(sysfb_disable);
+/**
+ * sysfb_handles_screen_info() - reports if sysfb handles the global screen_info
+ *
+ * Callers can use sysfb_handles_screen_info() to determine whether the Generic
+ * System Framebuffers (sysfb) can handle the global screen_info data structure
+ * or not. Drivers might need this information to know if they have to setup the
+ * system framebuffer, or if they have to delegate this action to sysfb instead.
+ *
+ * Returns:
+ * True if sysfb handles the global screen_info data structure.
+ */
+bool sysfb_handles_screen_info(void)
+{
+ const struct screen_info *si = &screen_info;
+
+ return !!screen_info_video_type(si);
+}
+EXPORT_SYMBOL_GPL(sysfb_handles_screen_info);
+
#if defined(CONFIG_PCI)
static bool sysfb_pci_dev_is_enabled(struct pci_dev *pdev)
{
@@ -124,6 +143,7 @@ static __init int sysfb_init(void)
{
struct screen_info *si = &screen_info;
struct device *parent;
+ unsigned int type;
struct simplefb_platform_data mode;
const char *name;
bool compatible;
@@ -151,17 +171,26 @@ static __init int sysfb_init(void)
goto put_device;
}
+ type = screen_info_video_type(si);
+
/* if the FB is incompatible, create a legacy framebuffer device */
- if (si->orig_video_isVGA == VIDEO_TYPE_EFI)
- name = "efi-framebuffer";
- else if (si->orig_video_isVGA == VIDEO_TYPE_VLFB)
- name = "vesa-framebuffer";
- else if (si->orig_video_isVGA == VIDEO_TYPE_VGAC)
- name = "vga-framebuffer";
- else if (si->orig_video_isVGA == VIDEO_TYPE_EGAC)
+ switch (type) {
+ case VIDEO_TYPE_EGAC:
name = "ega-framebuffer";
- else
+ break;
+ case VIDEO_TYPE_VGAC:
+ name = "vga-framebuffer";
+ break;
+ case VIDEO_TYPE_VLFB:
+ name = "vesa-framebuffer";
+ break;
+ case VIDEO_TYPE_EFI:
+ name = "efi-framebuffer";
+ break;
+ default:
name = "platform-framebuffer";
+ break;
+ }
pd = platform_device_alloc(name, 0);
if (!pd) {
diff --git a/drivers/firmware/sysfb_simplefb.c b/drivers/firmware/sysfb_simplefb.c
index 75a186bf8f8e..592d8a644619 100644
--- a/drivers/firmware/sysfb_simplefb.c
+++ b/drivers/firmware/sysfb_simplefb.c
@@ -35,36 +35,7 @@ __init bool sysfb_parse_mode(const struct screen_info *si,
if (type != VIDEO_TYPE_VLFB && type != VIDEO_TYPE_EFI)
return false;
- /*
- * The meaning of depth and bpp for direct-color formats is
- * inconsistent:
- *
- * - DRM format info specifies depth as the number of color
- * bits; including alpha, but not including filler bits.
- * - Linux' EFI platform code computes lfb_depth from the
- * individual color channels, including the reserved bits.
- * - VBE 1.1 defines lfb_depth for XRGB1555 as 16, but later
- * versions use 15.
- * - On the kernel command line, 'bpp' of 32 is usually
- * XRGB8888 including the filler bits, but 15 is XRGB1555
- * not including the filler bit.
- *
- * It's not easily possible to fix this in struct screen_info,
- * as this could break UAPI. The best solution is to compute
- * bits_per_pixel from the color bits, reserved bits and
- * reported lfb_depth, whichever is highest. In the loop below,
- * ignore simplefb formats with alpha bits, as EFI and VESA
- * don't specify alpha channels.
- */
- if (si->lfb_depth > 8) {
- bits_per_pixel = max(max3(si->red_size + si->red_pos,
- si->green_size + si->green_pos,
- si->blue_size + si->blue_pos),
- si->rsvd_size + si->rsvd_pos);
- bits_per_pixel = max_t(u32, bits_per_pixel, si->lfb_depth);
- } else {
- bits_per_pixel = si->lfb_depth;
- }
+ bits_per_pixel = __screen_info_lfb_bits_per_pixel(si);
for (i = 0; i < ARRAY_SIZE(formats); ++i) {
const struct simplefb_format *f = &formats[i];
diff --git a/drivers/firmware/tegra/Kconfig b/drivers/firmware/tegra/Kconfig
index cde1ab8bd9d1..91f2320c0d0f 100644
--- a/drivers/firmware/tegra/Kconfig
+++ b/drivers/firmware/tegra/Kconfig
@@ -2,7 +2,7 @@
menu "Tegra firmware driver"
config TEGRA_IVC
- bool "Tegra IVC protocol"
+ bool "Tegra IVC protocol" if COMPILE_TEST
depends on ARCH_TEGRA
help
IVC (Inter-VM Communication) protocol is part of the IPC
@@ -13,8 +13,9 @@ config TEGRA_IVC
config TEGRA_BPMP
bool "Tegra BPMP driver"
- depends on ARCH_TEGRA && TEGRA_HSP_MBOX && TEGRA_IVC
+ depends on ARCH_TEGRA && TEGRA_HSP_MBOX
depends on !CPU_BIG_ENDIAN
+ select TEGRA_IVC
help
BPMP (Boot and Power Management Processor) is designed to off-loading
the PM functions which include clock/DVFS/thermal/power from the CPU.
diff --git a/drivers/firmware/tegra/Makefile b/drivers/firmware/tegra/Makefile
index 620cf3fdd607..41e2e4dc31d6 100644
--- a/drivers/firmware/tegra/Makefile
+++ b/drivers/firmware/tegra/Makefile
@@ -4,6 +4,7 @@ tegra-bpmp-$(CONFIG_ARCH_TEGRA_210_SOC) += bpmp-tegra210.o
tegra-bpmp-$(CONFIG_ARCH_TEGRA_186_SOC) += bpmp-tegra186.o
tegra-bpmp-$(CONFIG_ARCH_TEGRA_194_SOC) += bpmp-tegra186.o
tegra-bpmp-$(CONFIG_ARCH_TEGRA_234_SOC) += bpmp-tegra186.o
+tegra-bpmp-$(CONFIG_ARCH_TEGRA_264_SOC) += bpmp-tegra186.o
tegra-bpmp-$(CONFIG_DEBUG_FS) += bpmp-debugfs.o
obj-$(CONFIG_TEGRA_BPMP) += tegra-bpmp.o
obj-$(CONFIG_TEGRA_IVC) += ivc.o
diff --git a/drivers/firmware/tegra/bpmp-private.h b/drivers/firmware/tegra/bpmp-private.h
index 182bfe396516..07c3d46abb87 100644
--- a/drivers/firmware/tegra/bpmp-private.h
+++ b/drivers/firmware/tegra/bpmp-private.h
@@ -23,13 +23,7 @@ struct tegra_bpmp_ops {
int (*resume)(struct tegra_bpmp *bpmp);
};
-#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) || \
- IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) || \
- IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC)
extern const struct tegra_bpmp_ops tegra186_bpmp_ops;
-#endif
-#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
extern const struct tegra_bpmp_ops tegra210_bpmp_ops;
-#endif
#endif
diff --git a/drivers/firmware/tegra/bpmp-tegra186.c b/drivers/firmware/tegra/bpmp-tegra186.c
index 6f0d0511b486..64863db7a715 100644
--- a/drivers/firmware/tegra/bpmp-tegra186.c
+++ b/drivers/firmware/tegra/bpmp-tegra186.c
@@ -6,7 +6,7 @@
#include <linux/genalloc.h>
#include <linux/io.h>
#include <linux/mailbox_client.h>
-#include <linux/of_address.h>
+#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <soc/tegra/bpmp.h>
@@ -192,18 +192,16 @@ static void tegra186_bpmp_teardown_channels(struct tegra_bpmp *bpmp)
static int tegra186_bpmp_dram_init(struct tegra_bpmp *bpmp)
{
struct tegra186_bpmp *priv = bpmp->priv;
- struct device_node *np;
struct resource res;
size_t size;
int err;
- np = of_parse_phandle(bpmp->dev->of_node, "memory-region", 0);
- if (!np)
- return -ENODEV;
-
- err = of_address_to_resource(np, 0, &res);
+ err = of_reserved_mem_region_to_resource(bpmp->dev->of_node, 0, &res);
if (err < 0) {
- dev_warn(bpmp->dev, "failed to parse memory region: %d\n", err);
+ if (err != -ENODEV)
+ dev_warn(bpmp->dev,
+ "failed to parse memory region: %d\n", err);
+
return err;
}
diff --git a/drivers/firmware/tegra/bpmp.c b/drivers/firmware/tegra/bpmp.c
index 2bee6e918f81..e74bba7ccc44 100644
--- a/drivers/firmware/tegra/bpmp.c
+++ b/drivers/firmware/tegra/bpmp.c
@@ -3,7 +3,6 @@
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*/
-#include <linux/cleanup.h>
#include <linux/clk/tegra.h>
#include <linux/genalloc.h>
#include <linux/mailbox_client.h>
@@ -35,24 +34,29 @@ channel_to_ops(struct tegra_bpmp_channel *channel)
struct tegra_bpmp *tegra_bpmp_get(struct device *dev)
{
- struct device_node *np __free(device_node);
struct platform_device *pdev;
struct tegra_bpmp *bpmp;
+ struct device_node *np;
np = of_parse_phandle(dev->of_node, "nvidia,bpmp", 0);
if (!np)
return ERR_PTR(-ENOENT);
pdev = of_find_device_by_node(np);
- if (!pdev)
- return ERR_PTR(-ENODEV);
+ if (!pdev) {
+ bpmp = ERR_PTR(-ENODEV);
+ goto put;
+ }
bpmp = platform_get_drvdata(pdev);
if (!bpmp) {
+ bpmp = ERR_PTR(-EPROBE_DEFER);
put_device(&pdev->dev);
- return ERR_PTR(-EPROBE_DEFER);
+ goto put;
}
+put:
+ of_node_put(np);
return bpmp;
}
EXPORT_SYMBOL_GPL(tegra_bpmp_get);
@@ -832,7 +836,8 @@ static const struct dev_pm_ops tegra_bpmp_pm_ops = {
#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) || \
IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) || \
- IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC)
+ IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC) || \
+ IS_ENABLED(CONFIG_ARCH_TEGRA_264_SOC)
static const struct tegra_bpmp_soc tegra186_soc = {
.channels = {
.cpu_tx = {
@@ -880,7 +885,8 @@ static const struct tegra_bpmp_soc tegra210_soc = {
static const struct of_device_id tegra_bpmp_match[] = {
#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) || \
IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) || \
- IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC)
+ IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC) || \
+ IS_ENABLED(CONFIG_ARCH_TEGRA_264_SOC)
{ .compatible = "nvidia,tegra186-bpmp", .data = &tegra186_soc },
#endif
#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
diff --git a/drivers/firmware/thead,th1520-aon.c b/drivers/firmware/thead,th1520-aon.c
new file mode 100644
index 000000000000..38f812ac9920
--- /dev/null
+++ b/drivers/firmware/thead,th1520-aon.c
@@ -0,0 +1,250 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Alibaba Group Holding Limited.
+ * Copyright (c) 2024 Samsung Electronics Co., Ltd.
+ * Author: Michal Wilczynski <m.wilczynski@samsung.com>
+ */
+
+#include <linux/device.h>
+#include <linux/firmware/thead/thead,th1520-aon.h>
+#include <linux/mailbox_client.h>
+#include <linux/mailbox_controller.h>
+#include <linux/slab.h>
+
+#define MAX_RX_TIMEOUT (msecs_to_jiffies(3000))
+#define MAX_TX_TIMEOUT 500
+
+struct th1520_aon_chan {
+ struct mbox_chan *ch;
+ struct th1520_aon_rpc_ack_common ack_msg;
+ struct mbox_client cl;
+ struct completion done;
+
+ /* make sure only one RPC is performed at a time */
+ struct mutex transaction_lock;
+};
+
+struct th1520_aon_msg_req_set_resource_power_mode {
+ struct th1520_aon_rpc_msg_hdr hdr;
+ u16 resource;
+ u16 mode;
+ u16 reserved[10];
+} __packed __aligned(1);
+
+/*
+ * This type is used to indicate error response for most functions.
+ */
+enum th1520_aon_error_codes {
+ LIGHT_AON_ERR_NONE = 0, /* Success */
+ LIGHT_AON_ERR_VERSION = 1, /* Incompatible API version */
+ LIGHT_AON_ERR_CONFIG = 2, /* Configuration error */
+ LIGHT_AON_ERR_PARM = 3, /* Bad parameter */
+ LIGHT_AON_ERR_NOACCESS = 4, /* Permission error (no access) */
+ LIGHT_AON_ERR_LOCKED = 5, /* Permission error (locked) */
+ LIGHT_AON_ERR_UNAVAILABLE = 6, /* Unavailable (out of resources) */
+ LIGHT_AON_ERR_NOTFOUND = 7, /* Not found */
+ LIGHT_AON_ERR_NOPOWER = 8, /* No power */
+ LIGHT_AON_ERR_IPC = 9, /* Generic IPC error */
+ LIGHT_AON_ERR_BUSY = 10, /* Resource is currently busy/active */
+ LIGHT_AON_ERR_FAIL = 11, /* General I/O failure */
+ LIGHT_AON_ERR_LAST
+};
+
+static int th1520_aon_linux_errmap[LIGHT_AON_ERR_LAST] = {
+ 0, /* LIGHT_AON_ERR_NONE */
+ -EINVAL, /* LIGHT_AON_ERR_VERSION */
+ -EINVAL, /* LIGHT_AON_ERR_CONFIG */
+ -EINVAL, /* LIGHT_AON_ERR_PARM */
+ -EACCES, /* LIGHT_AON_ERR_NOACCESS */
+ -EACCES, /* LIGHT_AON_ERR_LOCKED */
+ -ERANGE, /* LIGHT_AON_ERR_UNAVAILABLE */
+ -EEXIST, /* LIGHT_AON_ERR_NOTFOUND */
+ -EPERM, /* LIGHT_AON_ERR_NOPOWER */
+ -EPIPE, /* LIGHT_AON_ERR_IPC */
+ -EBUSY, /* LIGHT_AON_ERR_BUSY */
+ -EIO, /* LIGHT_AON_ERR_FAIL */
+};
+
+static inline int th1520_aon_to_linux_errno(int errno)
+{
+ if (errno >= LIGHT_AON_ERR_NONE && errno < LIGHT_AON_ERR_LAST)
+ return th1520_aon_linux_errmap[errno];
+
+ return -EIO;
+}
+
+static void th1520_aon_rx_callback(struct mbox_client *c, void *rx_msg)
+{
+ struct th1520_aon_chan *aon_chan =
+ container_of(c, struct th1520_aon_chan, cl);
+ struct th1520_aon_rpc_msg_hdr *hdr =
+ (struct th1520_aon_rpc_msg_hdr *)rx_msg;
+ u8 recv_size = sizeof(struct th1520_aon_rpc_msg_hdr) + hdr->size;
+
+ if (recv_size != sizeof(struct th1520_aon_rpc_ack_common)) {
+ dev_err(c->dev, "Invalid ack size, not completing\n");
+ return;
+ }
+
+ memcpy(&aon_chan->ack_msg, rx_msg, recv_size);
+ complete(&aon_chan->done);
+}
+
+/**
+ * th1520_aon_call_rpc() - Send an RPC request to the TH1520 AON subsystem
+ * @aon_chan: Pointer to the AON channel structure
+ * @msg: Pointer to the message (RPC payload) that will be sent
+ *
+ * This function sends an RPC message to the TH1520 AON subsystem via mailbox.
+ * It takes the provided @msg buffer, formats it with version and service flags,
+ * then blocks until the RPC completes or times out. The completion is signaled
+ * by the `aon_chan->done` completion, which is waited upon for a duration
+ * defined by `MAX_RX_TIMEOUT`.
+ *
+ * Return:
+ * * 0 on success
+ * * -ETIMEDOUT if the RPC call times out
+ * * A negative error code if the mailbox send fails or if AON responds with
+ * a non-zero error code (converted via th1520_aon_to_linux_errno()).
+ */
+int th1520_aon_call_rpc(struct th1520_aon_chan *aon_chan, void *msg)
+{
+ struct th1520_aon_rpc_msg_hdr *hdr = msg;
+ int ret;
+
+ mutex_lock(&aon_chan->transaction_lock);
+ reinit_completion(&aon_chan->done);
+
+ RPC_SET_VER(hdr, TH1520_AON_RPC_VERSION);
+ RPC_SET_SVC_ID(hdr, hdr->svc);
+ RPC_SET_SVC_FLAG_MSG_TYPE(hdr, RPC_SVC_MSG_TYPE_DATA);
+ RPC_SET_SVC_FLAG_ACK_TYPE(hdr, RPC_SVC_MSG_NEED_ACK);
+
+ ret = mbox_send_message(aon_chan->ch, msg);
+ if (ret < 0) {
+ dev_err(aon_chan->cl.dev, "RPC send msg failed: %d\n", ret);
+ goto out;
+ }
+
+ if (!wait_for_completion_timeout(&aon_chan->done, MAX_RX_TIMEOUT)) {
+ dev_err(aon_chan->cl.dev, "RPC send msg timeout\n");
+ mutex_unlock(&aon_chan->transaction_lock);
+ return -ETIMEDOUT;
+ }
+
+ ret = aon_chan->ack_msg.err_code;
+
+out:
+ mutex_unlock(&aon_chan->transaction_lock);
+
+ return th1520_aon_to_linux_errno(ret);
+}
+EXPORT_SYMBOL_GPL(th1520_aon_call_rpc);
+
+/**
+ * th1520_aon_power_update() - Change power state of a resource via TH1520 AON
+ * @aon_chan: Pointer to the AON channel structure
+ * @rsrc: Resource ID whose power state needs to be updated
+ * @power_on: Boolean indicating whether the resource should be powered on (true)
+ * or powered off (false)
+ *
+ * This function requests the TH1520 AON subsystem to set the power mode of the
+ * given resource (@rsrc) to either on or off. It constructs the message in
+ * `struct th1520_aon_msg_req_set_resource_power_mode` and then invokes
+ * th1520_aon_call_rpc() to make the request. If the AON call fails, an error
+ * message is logged along with the specific return code.
+ *
+ * Return:
+ * * 0 on success
+ * * A negative error code in case of failures (propagated from
+ * th1520_aon_call_rpc()).
+ */
+int th1520_aon_power_update(struct th1520_aon_chan *aon_chan, u16 rsrc,
+ bool power_on)
+{
+ struct th1520_aon_msg_req_set_resource_power_mode msg = {};
+ struct th1520_aon_rpc_msg_hdr *hdr = &msg.hdr;
+ int ret;
+
+ hdr->svc = TH1520_AON_RPC_SVC_PM;
+ hdr->func = TH1520_AON_PM_FUNC_SET_RESOURCE_POWER_MODE;
+ hdr->size = TH1520_AON_RPC_MSG_NUM;
+
+ RPC_SET_BE16(&msg.resource, 0, rsrc);
+ RPC_SET_BE16(&msg.resource, 2,
+ (power_on ? TH1520_AON_PM_PW_MODE_ON :
+ TH1520_AON_PM_PW_MODE_OFF));
+
+ ret = th1520_aon_call_rpc(aon_chan, &msg);
+ if (ret)
+ dev_err(aon_chan->cl.dev, "failed to power %s resource %d ret %d\n",
+ power_on ? "up" : "off", rsrc, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(th1520_aon_power_update);
+
+/**
+ * th1520_aon_init() - Initialize TH1520 AON firmware protocol interface
+ * @dev: Device pointer for the AON subsystem
+ *
+ * This function initializes the TH1520 AON firmware protocol interface by:
+ * - Allocating and initializing the AON channel structure
+ * - Setting up the mailbox client
+ * - Requesting the AON mailbox channel
+ * - Initializing synchronization primitives
+ *
+ * Return:
+ * * Valid pointer to th1520_aon_chan structure on success
+ * * ERR_PTR(-ENOMEM) if memory allocation fails
+ * * ERR_PTR() with other negative error codes from mailbox operations
+ */
+struct th1520_aon_chan *th1520_aon_init(struct device *dev)
+{
+ struct th1520_aon_chan *aon_chan;
+ struct mbox_client *cl;
+ int ret;
+
+ aon_chan = kzalloc(sizeof(*aon_chan), GFP_KERNEL);
+ if (!aon_chan)
+ return ERR_PTR(-ENOMEM);
+
+ cl = &aon_chan->cl;
+ cl->dev = dev;
+ cl->tx_block = true;
+ cl->tx_tout = MAX_TX_TIMEOUT;
+ cl->rx_callback = th1520_aon_rx_callback;
+
+ aon_chan->ch = mbox_request_channel_byname(cl, "aon");
+ if (IS_ERR(aon_chan->ch)) {
+ dev_err(dev, "Failed to request aon mbox chan\n");
+ ret = PTR_ERR(aon_chan->ch);
+ kfree(aon_chan);
+ return ERR_PTR(ret);
+ }
+
+ mutex_init(&aon_chan->transaction_lock);
+ init_completion(&aon_chan->done);
+
+ return aon_chan;
+}
+EXPORT_SYMBOL_GPL(th1520_aon_init);
+
+/**
+ * th1520_aon_deinit() - Clean up TH1520 AON firmware protocol interface
+ * @aon_chan: Pointer to the AON channel structure to clean up
+ *
+ * This function cleans up resources allocated by th1520_aon_init():
+ * - Frees the mailbox channel
+ * - Frees the AON channel
+ */
+void th1520_aon_deinit(struct th1520_aon_chan *aon_chan)
+{
+ mbox_free_channel(aon_chan->ch);
+ kfree(aon_chan);
+}
+EXPORT_SYMBOL_GPL(th1520_aon_deinit);
+
+MODULE_AUTHOR("Michal Wilczynski <m.wilczynski@samsung.com>");
+MODULE_DESCRIPTION("T-HEAD TH1520 Always-On firmware protocol library");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
index 160968301b1f..49fd2ae01055 100644
--- a/drivers/firmware/ti_sci.c
+++ b/drivers/firmware/ti_sci.c
@@ -2,13 +2,14 @@
/*
* Texas Instruments System Control Interface Protocol Driver
*
- * Copyright (C) 2015-2022 Texas Instruments Incorporated - https://www.ti.com/
+ * Copyright (C) 2015-2025 Texas Instruments Incorporated - https://www.ti.com/
* Nishanth Menon
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/bitmap.h>
+#include <linux/cpu.h>
#include <linux/debugfs.h>
#include <linux/export.h>
#include <linux/io.h>
@@ -19,11 +20,14 @@
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <linux/pm_qos.h>
#include <linux/property.h>
#include <linux/semaphore.h>
#include <linux/slab.h>
#include <linux/soc/ti/ti-msgmgr.h>
#include <linux/soc/ti/ti_sci_protocol.h>
+#include <linux/suspend.h>
+#include <linux/sys_soc.h>
#include <linux/reboot.h>
#include "ti_sci.h"
@@ -98,6 +102,7 @@ struct ti_sci_desc {
* @minfo: Message info
* @node: list head
* @host_id: Host ID
+ * @fw_caps: FW/SoC low power capabilities
* @users: Number of users of this instance
*/
struct ti_sci_info {
@@ -114,6 +119,7 @@ struct ti_sci_info {
struct ti_sci_xfers_info minfo;
struct list_head node;
u8 host_id;
+ u64 fw_caps;
/* protected by ti_sci_list_mutex */
int users;
};
@@ -1651,6 +1657,405 @@ fail:
return ret;
}
+/**
+ * ti_sci_cmd_prepare_sleep() - Prepare system for system suspend
+ * @handle: pointer to TI SCI handle
+ * @mode: Device identifier
+ * @ctx_lo: Low part of address for context save
+ * @ctx_hi: High part of address for context save
+ * @debug_flags: Debug flags to pass to firmware
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_prepare_sleep(const struct ti_sci_handle *handle, u8 mode,
+ u32 ctx_lo, u32 ctx_hi, u32 debug_flags)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_prepare_sleep *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PREPARE_SLEEP,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+
+ req = (struct ti_sci_msg_req_prepare_sleep *)xfer->xfer_buf;
+ req->mode = mode;
+ req->ctx_lo = ctx_lo;
+ req->ctx_hi = ctx_hi;
+ req->debug_flags = debug_flags;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+ if (!ti_sci_is_response_ack(resp)) {
+ dev_err(dev, "Failed to prepare sleep\n");
+ ret = -ENODEV;
+ }
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_msg_cmd_query_fw_caps() - Get the FW/SoC capabilities
+ * @handle: Pointer to TI SCI handle
+ * @fw_caps: Each bit in fw_caps indicating one FW/SOC capability
+ *
+ * Check if the firmware supports any optional low power modes.
+ * Old revisions of TIFS (< 08.04) will NACK the request which results in
+ * -ENODEV being returned.
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_msg_cmd_query_fw_caps(const struct ti_sci_handle *handle,
+ u64 *fw_caps)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_xfer *xfer;
+ struct ti_sci_msg_resp_query_fw_caps *resp;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_QUERY_FW_CAPS,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(struct ti_sci_msg_hdr),
+ sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_resp_query_fw_caps *)xfer->xfer_buf;
+
+ if (!ti_sci_is_response_ack(resp)) {
+ dev_err(dev, "Failed to get capabilities\n");
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ if (fw_caps)
+ *fw_caps = resp->fw_caps;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_set_io_isolation() - Enable IO isolation in LPM
+ * @handle: Pointer to TI SCI handle
+ * @state: The desired state of the IO isolation
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_set_io_isolation(const struct ti_sci_handle *handle,
+ u8 state)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_set_io_isolation *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_IO_ISOLATION,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_set_io_isolation *)xfer->xfer_buf;
+ req->state = state;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+ if (!ti_sci_is_response_ack(resp)) {
+ dev_err(dev, "Failed to set IO isolation\n");
+ ret = -ENODEV;
+ }
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_msg_cmd_lpm_wake_reason() - Get the wakeup source from LPM
+ * @handle: Pointer to TI SCI handle
+ * @source: The wakeup source that woke the SoC from LPM
+ * @timestamp: Timestamp of the wakeup event
+ * @pin: The pin that has triggered wake up
+ * @mode: The last entered low power mode
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_msg_cmd_lpm_wake_reason(const struct ti_sci_handle *handle,
+ u32 *source, u64 *timestamp, u8 *pin, u8 *mode)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_xfer *xfer;
+ struct ti_sci_msg_resp_lpm_wake_reason *resp;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_LPM_WAKE_REASON,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(struct ti_sci_msg_hdr),
+ sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_resp_lpm_wake_reason *)xfer->xfer_buf;
+
+ if (!ti_sci_is_response_ack(resp)) {
+ dev_err(dev, "Failed to get wake reason\n");
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ if (source)
+ *source = resp->wake_source;
+ if (timestamp)
+ *timestamp = resp->wake_timestamp;
+ if (pin)
+ *pin = resp->wake_pin;
+ if (mode)
+ *mode = resp->mode;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_set_device_constraint() - Set LPM constraint on behalf of a device
+ * @handle: pointer to TI SCI handle
+ * @id: Device identifier
+ * @state: The desired state of device constraint: set or clear
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_set_device_constraint(const struct ti_sci_handle *handle,
+ u32 id, u8 state)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_lpm_set_device_constraint *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_LPM_SET_DEVICE_CONSTRAINT,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_lpm_set_device_constraint *)xfer->xfer_buf;
+ req->id = id;
+ req->state = state;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+ if (!ti_sci_is_response_ack(resp)) {
+ dev_err(dev, "Failed to set device constraint\n");
+ ret = -ENODEV;
+ }
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_set_latency_constraint() - Set LPM resume latency constraint
+ * @handle: pointer to TI SCI handle
+ * @latency: maximum acceptable latency (in ms) to wake up from LPM
+ * @state: The desired state of latency constraint: set or clear
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_set_latency_constraint(const struct ti_sci_handle *handle,
+ u16 latency, u8 state)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_lpm_set_latency_constraint *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_LPM_SET_LATENCY_CONSTRAINT,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_lpm_set_latency_constraint *)xfer->xfer_buf;
+ req->latency = latency;
+ req->state = state;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+ if (!ti_sci_is_response_ack(resp)) {
+ dev_err(dev, "Failed to set device constraint\n");
+ ret = -ENODEV;
+ }
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_lpm_abort() - Abort entry to LPM by clearing selection of LPM to enter
+ * @dev: Device pointer corresponding to the SCI entity
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_lpm_abort(struct device *dev)
+{
+ struct ti_sci_info *info = dev_get_drvdata(dev);
+ struct ti_sci_msg_hdr *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ int ret = 0;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_LPM_ABORT,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+ if (!ti_sci_is_response_ack(resp))
+ ret = -ENODEV;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
static int ti_sci_cmd_core_reboot(const struct ti_sci_handle *handle)
{
struct ti_sci_info *info;
@@ -2793,6 +3198,7 @@ static void ti_sci_setup_ops(struct ti_sci_info *info)
struct ti_sci_core_ops *core_ops = &ops->core_ops;
struct ti_sci_dev_ops *dops = &ops->dev_ops;
struct ti_sci_clk_ops *cops = &ops->clk_ops;
+ struct ti_sci_pm_ops *pmops = &ops->pm_ops;
struct ti_sci_rm_core_ops *rm_core_ops = &ops->rm_core_ops;
struct ti_sci_rm_irq_ops *iops = &ops->rm_irq_ops;
struct ti_sci_rm_ringacc_ops *rops = &ops->rm_ring_ops;
@@ -2832,6 +3238,13 @@ static void ti_sci_setup_ops(struct ti_sci_info *info)
cops->set_freq = ti_sci_cmd_clk_set_freq;
cops->get_freq = ti_sci_cmd_clk_get_freq;
+ if (info->fw_caps & MSG_FLAG_CAPS_LPM_DM_MANAGED) {
+ pr_debug("detected DM managed LPM in fw_caps\n");
+ pmops->lpm_wake_reason = ti_sci_msg_cmd_lpm_wake_reason;
+ pmops->set_device_constraint = ti_sci_cmd_set_device_constraint;
+ pmops->set_latency_constraint = ti_sci_cmd_set_latency_constraint;
+ }
+
rm_core_ops->get_range = ti_sci_cmd_get_resource_range;
rm_core_ops->get_range_from_shost =
ti_sci_cmd_get_resource_range_from_shost;
@@ -3262,6 +3675,130 @@ static int tisci_reboot_handler(struct sys_off_data *data)
return NOTIFY_BAD;
}
+static int ti_sci_prepare_system_suspend(struct ti_sci_info *info)
+{
+ /*
+ * Map and validate the target Linux suspend state to TISCI LPM.
+ * Default is to let Device Manager select the low power mode.
+ */
+ switch (pm_suspend_target_state) {
+ case PM_SUSPEND_MEM:
+ if (info->fw_caps & MSG_FLAG_CAPS_LPM_DM_MANAGED) {
+ /*
+ * For the DM_MANAGED mode the context is reserved for
+ * internal use and can be 0
+ */
+ return ti_sci_cmd_prepare_sleep(&info->handle,
+ TISCI_MSG_VALUE_SLEEP_MODE_DM_MANAGED,
+ 0, 0, 0);
+ } else {
+ /* DM Managed is not supported by the firmware. */
+ dev_err(info->dev, "Suspend to memory is not supported by the firmware\n");
+ return -EOPNOTSUPP;
+ }
+ break;
+ default:
+ /*
+ * Do not fail if we don't have action to take for a
+ * specific suspend mode.
+ */
+ return 0;
+ }
+}
+
+static int __maybe_unused ti_sci_suspend(struct device *dev)
+{
+ struct ti_sci_info *info = dev_get_drvdata(dev);
+ struct device *cpu_dev, *cpu_dev_max = NULL;
+ s32 val, cpu_lat = 0;
+ u16 cpu_lat_ms;
+ int i, ret;
+
+ if (info->fw_caps & MSG_FLAG_CAPS_LPM_DM_MANAGED) {
+ for_each_possible_cpu(i) {
+ cpu_dev = get_cpu_device(i);
+ val = dev_pm_qos_read_value(cpu_dev, DEV_PM_QOS_RESUME_LATENCY);
+ if (val != PM_QOS_RESUME_LATENCY_NO_CONSTRAINT) {
+ cpu_lat = max(cpu_lat, val);
+ cpu_dev_max = cpu_dev;
+ }
+ }
+ if (cpu_dev_max) {
+ /*
+ * PM QoS latency unit is usecs, device manager uses msecs.
+ * Convert to msecs and round down for device manager.
+ */
+ cpu_lat_ms = cpu_lat / USEC_PER_MSEC;
+ dev_dbg(cpu_dev_max, "%s: sending max CPU latency=%u ms\n", __func__,
+ cpu_lat_ms);
+ ret = ti_sci_cmd_set_latency_constraint(&info->handle,
+ cpu_lat_ms,
+ TISCI_MSG_CONSTRAINT_SET);
+ if (ret)
+ return ret;
+ }
+ }
+
+ ret = ti_sci_prepare_system_suspend(info);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int __maybe_unused ti_sci_suspend_noirq(struct device *dev)
+{
+ struct ti_sci_info *info = dev_get_drvdata(dev);
+ int ret = 0;
+
+ ret = ti_sci_cmd_set_io_isolation(&info->handle, TISCI_MSG_VALUE_IO_ENABLE);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int __maybe_unused ti_sci_resume_noirq(struct device *dev)
+{
+ struct ti_sci_info *info = dev_get_drvdata(dev);
+ int ret = 0;
+ u32 source;
+ u64 time;
+ u8 pin;
+ u8 mode;
+
+ ret = ti_sci_cmd_set_io_isolation(&info->handle, TISCI_MSG_VALUE_IO_DISABLE);
+ if (ret)
+ return ret;
+
+ ret = ti_sci_msg_cmd_lpm_wake_reason(&info->handle, &source, &time, &pin, &mode);
+ /* Do not fail to resume on error as the wake reason is not critical */
+ if (!ret)
+ dev_info(dev, "ti_sci: wakeup source:0x%x, pin:0x%x, mode:0x%x\n",
+ source, pin, mode);
+
+ return 0;
+}
+
+static void __maybe_unused ti_sci_pm_complete(struct device *dev)
+{
+ struct ti_sci_info *info = dev_get_drvdata(dev);
+
+ if (info->fw_caps & MSG_FLAG_CAPS_LPM_ABORT) {
+ if (ti_sci_cmd_lpm_abort(dev))
+ dev_err(dev, "LPM clear selection failed.\n");
+ }
+}
+
+static const struct dev_pm_ops ti_sci_pm_ops = {
+#ifdef CONFIG_PM_SLEEP
+ .suspend = ti_sci_suspend,
+ .suspend_noirq = ti_sci_suspend_noirq,
+ .resume_noirq = ti_sci_resume_noirq,
+ .complete = ti_sci_pm_complete,
+#endif
+};
+
/* Description for K2G */
static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = {
.default_host_id = 2,
@@ -3390,6 +3927,14 @@ static int ti_sci_probe(struct platform_device *pdev)
goto out;
}
+ ti_sci_msg_cmd_query_fw_caps(&info->handle, &info->fw_caps);
+ dev_dbg(dev, "Detected firmware capabilities: %s%s%s%s\n",
+ info->fw_caps & MSG_FLAG_CAPS_GENERIC ? "Generic" : "",
+ info->fw_caps & MSG_FLAG_CAPS_LPM_PARTIAL_IO ? " Partial-IO" : "",
+ info->fw_caps & MSG_FLAG_CAPS_LPM_DM_MANAGED ? " DM-Managed" : "",
+ info->fw_caps & MSG_FLAG_CAPS_LPM_ABORT ? " LPM-Abort" : ""
+ );
+
ti_sci_setup_ops(info);
ret = devm_register_restart_handler(dev, tisci_reboot_handler, info);
@@ -3421,8 +3966,9 @@ static struct platform_driver ti_sci_driver = {
.probe = ti_sci_probe,
.driver = {
.name = "ti-sci",
- .of_match_table = of_match_ptr(ti_sci_of_match),
+ .of_match_table = ti_sci_of_match,
.suppress_bind_attrs = true,
+ .pm = &ti_sci_pm_ops,
},
};
module_platform_driver(ti_sci_driver);
diff --git a/drivers/firmware/ti_sci.h b/drivers/firmware/ti_sci.h
index 5846c60220f5..701c416b2e78 100644
--- a/drivers/firmware/ti_sci.h
+++ b/drivers/firmware/ti_sci.h
@@ -6,7 +6,7 @@
* The system works in a message response protocol
* See: https://software-dl.ti.com/tisci/esd/latest/index.html for details
*
- * Copyright (C) 2015-2016 Texas Instruments Incorporated - https://www.ti.com/
+ * Copyright (C) 2015-2024 Texas Instruments Incorporated - https://www.ti.com/
*/
#ifndef __TI_SCI_H
@@ -19,6 +19,7 @@
#define TI_SCI_MSG_WAKE_REASON 0x0003
#define TI_SCI_MSG_GOODBYE 0x0004
#define TI_SCI_MSG_SYS_RESET 0x0005
+#define TI_SCI_MSG_QUERY_FW_CAPS 0x0022
/* Device requests */
#define TI_SCI_MSG_SET_DEVICE_STATE 0x0200
@@ -35,6 +36,14 @@
#define TI_SCI_MSG_QUERY_CLOCK_FREQ 0x010d
#define TI_SCI_MSG_GET_CLOCK_FREQ 0x010e
+/* Low Power Mode Requests */
+#define TI_SCI_MSG_PREPARE_SLEEP 0x0300
+#define TI_SCI_MSG_LPM_WAKE_REASON 0x0306
+#define TI_SCI_MSG_SET_IO_ISOLATION 0x0307
+#define TI_SCI_MSG_LPM_SET_DEVICE_CONSTRAINT 0x0309
+#define TI_SCI_MSG_LPM_SET_LATENCY_CONSTRAINT 0x030A
+#define TI_SCI_MSG_LPM_ABORT 0x0311
+
/* Resource Management Requests */
#define TI_SCI_MSG_GET_RESOURCE_RANGE 0x1500
@@ -133,6 +142,29 @@ struct ti_sci_msg_req_reboot {
} __packed;
/**
+ * struct ti_sci_msg_resp_query_fw_caps - Response for query firmware caps
+ * @hdr: Generic header
+ * @fw_caps: Each bit in fw_caps indicating one FW/SOC capability
+ * MSG_FLAG_CAPS_GENERIC: Generic capability (LPM not supported)
+ * MSG_FLAG_CAPS_LPM_PARTIAL_IO: Partial IO in LPM
+ * MSG_FLAG_CAPS_LPM_DM_MANAGED: LPM can be managed by DM
+ * MSG_FLAG_CAPS_LPM_ABORT: Abort entry to LPM
+ *
+ * Response to a generic message with message type TI_SCI_MSG_QUERY_FW_CAPS
+ * providing currently available SOC/firmware capabilities. SoC that don't
+ * support low power modes return only MSG_FLAG_CAPS_GENERIC capability.
+ */
+struct ti_sci_msg_resp_query_fw_caps {
+ struct ti_sci_msg_hdr hdr;
+#define MSG_FLAG_CAPS_GENERIC TI_SCI_MSG_FLAG(0)
+#define MSG_FLAG_CAPS_LPM_PARTIAL_IO TI_SCI_MSG_FLAG(4)
+#define MSG_FLAG_CAPS_LPM_DM_MANAGED TI_SCI_MSG_FLAG(5)
+#define MSG_FLAG_CAPS_LPM_ABORT TI_SCI_MSG_FLAG(9)
+#define MSG_MASK_CAPS_LPM GENMASK_ULL(4, 1)
+ u64 fw_caps;
+} __packed;
+
+/**
* struct ti_sci_msg_req_set_device_state - Set the desired state of the device
* @hdr: Generic header
* @id: Indicates which device to modify
@@ -545,6 +577,118 @@ struct ti_sci_msg_resp_get_clock_freq {
u64 freq_hz;
} __packed;
+/**
+ * struct tisci_msg_req_prepare_sleep - Request for TISCI_MSG_PREPARE_SLEEP.
+ *
+ * @hdr TISCI header to provide ACK/NAK flags to the host.
+ * @mode Low power mode to enter.
+ * @ctx_lo Low 32-bits of physical pointer to address to use for context save.
+ * @ctx_hi High 32-bits of physical pointer to address to use for context save.
+ * @debug_flags Flags that can be set to halt the sequence during suspend or
+ * resume to allow JTAG connection and debug.
+ *
+ * This message is used as the first step of entering a low power mode. It
+ * allows configurable information, including which state to enter to be
+ * easily shared from the application, as this is a non-secure message and
+ * therefore can be sent by anyone.
+ */
+struct ti_sci_msg_req_prepare_sleep {
+ struct ti_sci_msg_hdr hdr;
+
+#define TISCI_MSG_VALUE_SLEEP_MODE_DM_MANAGED 0xfd
+ u8 mode;
+ u32 ctx_lo;
+ u32 ctx_hi;
+ u32 debug_flags;
+} __packed;
+
+/**
+ * struct tisci_msg_set_io_isolation_req - Request for TI_SCI_MSG_SET_IO_ISOLATION.
+ *
+ * @hdr: Generic header
+ * @state: The deseared state of the IO isolation.
+ *
+ * This message is used to enable/disable IO isolation for low power modes.
+ * Response is generic ACK / NACK message.
+ */
+struct ti_sci_msg_req_set_io_isolation {
+ struct ti_sci_msg_hdr hdr;
+ u8 state;
+} __packed;
+
+/**
+ * struct ti_sci_msg_resp_lpm_wake_reason - Response for TI_SCI_MSG_LPM_WAKE_REASON.
+ *
+ * @hdr: Generic header.
+ * @wake_source: The wake up source that woke soc from LPM.
+ * @wake_timestamp: Timestamp at which soc woke.
+ * @wake_pin: The pin that has triggered wake up.
+ * @mode: The last entered low power mode.
+ * @rsvd: Reserved for future use.
+ *
+ * Response to a generic message with message type TI_SCI_MSG_LPM_WAKE_REASON,
+ * used to query the wake up source, pin and entered low power mode.
+ */
+struct ti_sci_msg_resp_lpm_wake_reason {
+ struct ti_sci_msg_hdr hdr;
+ u32 wake_source;
+ u64 wake_timestamp;
+ u8 wake_pin;
+ u8 mode;
+ u32 rsvd[2];
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_lpm_set_device_constraint - Request for
+ * TISCI_MSG_LPM_SET_DEVICE_CONSTRAINT.
+ *
+ * @hdr: TISCI header to provide ACK/NAK flags to the host.
+ * @id: Device ID of device whose constraint has to be modified.
+ * @state: The desired state of device constraint: set or clear.
+ * @rsvd: Reserved for future use.
+ *
+ * This message is used by host to set constraint on the device. This can be
+ * sent anytime after boot before prepare sleep message. Any device can set a
+ * constraint on the low power mode that the SoC can enter. It allows
+ * configurable information to be easily shared from the application, as this
+ * is a non-secure message and therefore can be sent by anyone. By setting a
+ * constraint, the device ensures that it will not be powered off or reset in
+ * the selected mode. Note: Access Restriction: Exclusivity flag of Device will
+ * be honored. If some other host already has constraint on this device ID,
+ * NACK will be returned.
+ */
+struct ti_sci_msg_req_lpm_set_device_constraint {
+ struct ti_sci_msg_hdr hdr;
+ u32 id;
+ u8 state;
+ u32 rsvd[2];
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_lpm_set_latency_constraint - Request for
+ * TISCI_MSG_LPM_SET_LATENCY_CONSTRAINT.
+ *
+ * @hdr: TISCI header to provide ACK/NAK flags to the host.
+ * @wkup_latency: The maximum acceptable latency to wake up from low power mode
+ * in milliseconds. The deeper the state, the higher the latency.
+ * @state: The desired state of wakeup latency constraint: set or clear.
+ * @rsvd: Reserved for future use.
+ *
+ * This message is used by host to set wakeup latency from low power mode. This can
+ * be sent anytime after boot before prepare sleep message, and can be sent after
+ * current low power mode is exited. Any device can set a constraint on the low power
+ * mode that the SoC can enter. It allows configurable information to be easily shared
+ * from the application, as this is a non-secure message and therefore can be sent by
+ * anyone. By setting a wakeup latency constraint, the host ensures that the resume time
+ * from selected low power mode will be less than the constraint value.
+ */
+struct ti_sci_msg_req_lpm_set_latency_constraint {
+ struct ti_sci_msg_hdr hdr;
+ u16 latency;
+ u8 state;
+ u32 rsvd;
+} __packed;
+
#define TI_SCI_IRQ_SECONDARY_HOST_INVALID 0xff
/**
diff --git a/drivers/firmware/turris-mox-rwtm.c b/drivers/firmware/turris-mox-rwtm.c
index f3bc0d427825..1eac9948148f 100644
--- a/drivers/firmware/turris-mox-rwtm.c
+++ b/drivers/firmware/turris-mox-rwtm.c
@@ -2,29 +2,31 @@
/*
* Turris Mox rWTM firmware driver
*
- * Copyright (C) 2019, 2024 Marek Behún <kabel@kernel.org>
+ * Copyright (C) 2019, 2024, 2025 Marek Behún <kabel@kernel.org>
*/
#include <crypto/sha2.h>
#include <linux/align.h>
#include <linux/armada-37xx-rwtm-mailbox.h>
+#include <linux/cleanup.h>
#include <linux/completion.h>
#include <linux/container_of.h>
-#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
-#include <linux/fs.h>
#include <linux/hw_random.h>
#include <linux/if_ether.h>
+#include <linux/key.h>
#include <linux/kobject.h>
#include <linux/mailbox_client.h>
+#include <linux/math.h>
#include <linux/minmax.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/sizes.h>
#include <linux/sysfs.h>
+#include <linux/turris-signing-key.h>
#include <linux/types.h>
#define DRIVER_NAME "turris-mox-rwtm"
@@ -37,10 +39,13 @@
* https://gitlab.labs.nic.cz/turris/mox-boot-builder/tree/master/wtmi.
*/
-#define MOX_ECC_NUMBER_WORDS 17
-#define MOX_ECC_NUMBER_LEN (MOX_ECC_NUMBER_WORDS * sizeof(u32))
-
-#define MOX_ECC_SIGNATURE_WORDS (2 * MOX_ECC_NUMBER_WORDS)
+enum {
+ MOX_ECC_NUM_BITS = 521,
+ MOX_ECC_NUM_LEN = DIV_ROUND_UP(MOX_ECC_NUM_BITS, 8),
+ MOX_ECC_NUM_WORDS = DIV_ROUND_UP(MOX_ECC_NUM_BITS, 32),
+ MOX_ECC_SIG_LEN = 2 * MOX_ECC_NUM_LEN,
+ MOX_ECC_PUBKEY_LEN = 1 + MOX_ECC_NUM_LEN,
+};
#define MBOX_STS_SUCCESS (0 << 30)
#define MBOX_STS_FAIL (1 << 30)
@@ -61,6 +66,24 @@ enum mbox_cmd {
MBOX_CMD_OTP_WRITE = 8,
};
+/**
+ * struct mox_rwtm - driver private data structure
+ * @mbox_client: rWTM mailbox client
+ * @mbox: rWTM mailbox channel
+ * @hwrng: RNG driver structure
+ * @reply: last mailbox reply, filled in receive callback
+ * @buf: DMA buffer
+ * @buf_phys: physical address of the DMA buffer
+ * @busy: mutex to protect mailbox command execution
+ * @cmd_done: command done completion
+ * @has_board_info: whether board information is present
+ * @serial_number: serial number of the device
+ * @board_version: board version / revision of the device
+ * @ram_size: RAM size of the device
+ * @mac_address1: first MAC address of the device
+ * @mac_address2: second MAC address of the device
+ * @pubkey: board ECDSA public key
+ */
struct mox_rwtm {
struct mbox_client mbox_client;
struct mbox_chan *mbox;
@@ -74,25 +97,13 @@ struct mox_rwtm {
struct mutex busy;
struct completion cmd_done;
- /* board information */
bool has_board_info;
u64 serial_number;
int board_version, ram_size;
u8 mac_address1[ETH_ALEN], mac_address2[ETH_ALEN];
- /* public key burned in eFuse */
- bool has_pubkey;
- u8 pubkey[135];
-
-#ifdef CONFIG_DEBUG_FS
- /*
- * Signature process. This is currently done via debugfs, because it
- * does not conform to the sysfs standard "one file per attribute".
- * It should be rewritten via crypto API once akcipher API is available
- * from userspace.
- */
- u32 last_sig[MOX_ECC_SIGNATURE_WORDS];
- bool last_sig_done;
+#ifdef CONFIG_TURRIS_MOX_RWTM_KEYCTL
+ u8 pubkey[MOX_ECC_PUBKEY_LEN];
#endif
};
@@ -101,24 +112,23 @@ static inline struct device *rwtm_dev(struct mox_rwtm *rwtm)
return rwtm->mbox_client.dev;
}
-#define MOX_ATTR_RO(name, format, cat) \
+#define MOX_ATTR_RO(name, format) \
static ssize_t \
name##_show(struct device *dev, struct device_attribute *a, \
char *buf) \
{ \
struct mox_rwtm *rwtm = dev_get_drvdata(dev); \
- if (!rwtm->has_##cat) \
+ if (!rwtm->has_board_info) \
return -ENODATA; \
return sysfs_emit(buf, format, rwtm->name); \
} \
static DEVICE_ATTR_RO(name)
-MOX_ATTR_RO(serial_number, "%016llX\n", board_info);
-MOX_ATTR_RO(board_version, "%i\n", board_info);
-MOX_ATTR_RO(ram_size, "%i\n", board_info);
-MOX_ATTR_RO(mac_address1, "%pM\n", board_info);
-MOX_ATTR_RO(mac_address2, "%pM\n", board_info);
-MOX_ATTR_RO(pubkey, "%s\n", pubkey);
+MOX_ATTR_RO(serial_number, "%016llX\n");
+MOX_ATTR_RO(board_version, "%i\n");
+MOX_ATTR_RO(ram_size, "%i\n");
+MOX_ATTR_RO(mac_address1, "%pM\n");
+MOX_ATTR_RO(mac_address2, "%pM\n");
static struct attribute *turris_mox_rwtm_attrs[] = {
&dev_attr_serial_number.attr,
@@ -126,7 +136,6 @@ static struct attribute *turris_mox_rwtm_attrs[] = {
&dev_attr_ram_size.attr,
&dev_attr_mac_address1.attr,
&dev_attr_mac_address2.attr,
- &dev_attr_pubkey.attr,
NULL
};
ATTRIBUTE_GROUPS(turris_mox_rwtm);
@@ -228,24 +237,6 @@ static int mox_get_board_info(struct mox_rwtm *rwtm)
pr_info(" burned RAM size %i MiB\n", rwtm->ram_size);
}
- ret = mox_rwtm_exec(rwtm, MBOX_CMD_ECDSA_PUB_KEY, NULL, false);
- if (ret == -ENODATA) {
- dev_warn(dev, "Board has no public key burned!\n");
- } else if (ret == -EOPNOTSUPP) {
- dev_notice(dev,
- "Firmware does not support the ECDSA_PUB_KEY command\n");
- } else if (ret < 0) {
- return ret;
- } else {
- u32 *s = reply->status;
-
- rwtm->has_pubkey = true;
- sprintf(rwtm->pubkey,
- "%06x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x",
- ret, s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7],
- s[8], s[9], s[10], s[11], s[12], s[13], s[14], s[15]);
- }
-
return 0;
}
@@ -287,127 +278,139 @@ unlock_mutex:
return ret;
}
-#ifdef CONFIG_DEBUG_FS
-static int rwtm_debug_open(struct inode *inode, struct file *file)
-{
- file->private_data = inode->i_private;
-
- return nonseekable_open(inode, file);
-}
+#ifdef CONFIG_TURRIS_MOX_RWTM_KEYCTL
-static ssize_t do_sign_read(struct file *file, char __user *buf, size_t len,
- loff_t *ppos)
+static void mox_ecc_number_to_bin(void *dst, const u32 *src)
{
- struct mox_rwtm *rwtm = file->private_data;
- ssize_t ret;
+ __be32 tmp[MOX_ECC_NUM_WORDS];
- /* only allow one read, of whole signature, from position 0 */
- if (*ppos != 0)
- return 0;
+ cpu_to_be32_array(tmp, src, MOX_ECC_NUM_WORDS);
- if (len < sizeof(rwtm->last_sig))
- return -EINVAL;
+ memcpy(dst, (void *)tmp + 2, MOX_ECC_NUM_LEN);
+}
- if (!rwtm->last_sig_done)
- return -ENODATA;
+static void mox_ecc_public_key_to_bin(void *dst, u32 src_first,
+ const u32 *src_rest)
+{
+ __be32 tmp[MOX_ECC_NUM_WORDS - 1];
+ u8 *p = dst;
- ret = simple_read_from_buffer(buf, len, ppos, rwtm->last_sig,
- sizeof(rwtm->last_sig));
- rwtm->last_sig_done = false;
+ /* take 3 bytes from the first word */
+ *p++ = src_first >> 16;
+ *p++ = src_first >> 8;
+ *p++ = src_first;
- return ret;
+ /* take the rest of the words */
+ cpu_to_be32_array(tmp, src_rest, MOX_ECC_NUM_WORDS - 1);
+ memcpy(p, tmp, sizeof(tmp));
}
-static ssize_t do_sign_write(struct file *file, const char __user *buf,
- size_t len, loff_t *ppos)
+static int mox_rwtm_sign(const struct key *key, const void *data, void *signature)
{
- struct mox_rwtm *rwtm = file->private_data;
- struct armada_37xx_rwtm_tx_msg msg;
- loff_t dummy = 0;
- ssize_t ret;
-
- if (len != SHA512_DIGEST_SIZE)
- return -EINVAL;
-
- /* if last result is not zero user has not read that information yet */
- if (rwtm->last_sig_done)
- return -EBUSY;
+ struct mox_rwtm *rwtm = dev_get_drvdata(turris_signing_key_get_dev(key));
+ struct armada_37xx_rwtm_tx_msg msg = {};
+ u32 offset_r, offset_s;
+ int ret;
- if (!mutex_trylock(&rwtm->busy))
- return -EBUSY;
+ guard(mutex)(&rwtm->busy);
/*
- * Here we have to send:
- * 1. Address of the input to sign.
- * The input is an array of 17 32-bit words, the first (most
- * significat) is 0, the rest 16 words are copied from the SHA-512
- * hash given by the user and converted from BE to LE.
- * 2. Address of the buffer where ECDSA signature value R shall be
- * stored by the rWTM firmware.
- * 3. Address of the buffer where ECDSA signature value S shall be
- * stored by the rWTM firmware.
+ * For MBOX_CMD_SIGN command:
+ * args[0] - must be 1
+ * args[1] - address of message M to sign; message is a 521-bit number
+ * args[2] - address where the R part of the signature will be stored
+ * args[3] - address where the S part of the signature will be stored
+ *
+ * M, R and S are 521-bit numbers encoded as seventeen 32-bit words,
+ * most significat word first.
+ * Since the message in @data is a sha512 digest, the most significat
+ * word is always zero.
*/
+
+ offset_r = MOX_ECC_NUM_WORDS * sizeof(u32);
+ offset_s = 2 * MOX_ECC_NUM_WORDS * sizeof(u32);
+
memset(rwtm->buf, 0, sizeof(u32));
- ret = simple_write_to_buffer(rwtm->buf + sizeof(u32),
- SHA512_DIGEST_SIZE, &dummy, buf, len);
- if (ret < 0)
- goto unlock_mutex;
- be32_to_cpu_array(rwtm->buf, rwtm->buf, MOX_ECC_NUMBER_WORDS);
+ memcpy(rwtm->buf + sizeof(u32), data, SHA512_DIGEST_SIZE);
+ be32_to_cpu_array(rwtm->buf, rwtm->buf, MOX_ECC_NUM_WORDS);
msg.args[0] = 1;
msg.args[1] = rwtm->buf_phys;
- msg.args[2] = rwtm->buf_phys + MOX_ECC_NUMBER_LEN;
- msg.args[3] = rwtm->buf_phys + 2 * MOX_ECC_NUMBER_LEN;
+ msg.args[2] = rwtm->buf_phys + offset_r;
+ msg.args[3] = rwtm->buf_phys + offset_s;
ret = mox_rwtm_exec(rwtm, MBOX_CMD_SIGN, &msg, true);
if (ret < 0)
- goto unlock_mutex;
+ return ret;
- /*
- * Here we read the R and S values of the ECDSA signature
- * computed by the rWTM firmware and convert their words from
- * LE to BE.
- */
- memcpy(rwtm->last_sig, rwtm->buf + MOX_ECC_NUMBER_LEN,
- sizeof(rwtm->last_sig));
- cpu_to_be32_array(rwtm->last_sig, rwtm->last_sig,
- MOX_ECC_SIGNATURE_WORDS);
- rwtm->last_sig_done = true;
+ /* convert R and S parts of the signature */
+ mox_ecc_number_to_bin(signature, rwtm->buf + offset_r);
+ mox_ecc_number_to_bin(signature + MOX_ECC_NUM_LEN, rwtm->buf + offset_s);
- mutex_unlock(&rwtm->busy);
- return len;
-unlock_mutex:
- mutex_unlock(&rwtm->busy);
- return ret;
+ return 0;
}
-static const struct file_operations do_sign_fops = {
- .owner = THIS_MODULE,
- .open = rwtm_debug_open,
- .read = do_sign_read,
- .write = do_sign_write,
-};
-
-static void rwtm_debugfs_release(void *root)
+static const void *mox_rwtm_get_public_key(const struct key *key)
{
- debugfs_remove_recursive(root);
+ struct mox_rwtm *rwtm = dev_get_drvdata(turris_signing_key_get_dev(key));
+
+ return rwtm->pubkey;
}
-static void rwtm_register_debugfs(struct mox_rwtm *rwtm)
+static const struct turris_signing_key_subtype mox_signing_key_subtype = {
+ .key_size = MOX_ECC_NUM_BITS,
+ .data_size = SHA512_DIGEST_SIZE,
+ .sig_size = MOX_ECC_SIG_LEN,
+ .public_key_size = MOX_ECC_PUBKEY_LEN,
+ .hash_algo = "sha512",
+ .get_public_key = mox_rwtm_get_public_key,
+ .sign = mox_rwtm_sign,
+};
+
+static int mox_register_signing_key(struct mox_rwtm *rwtm)
{
- struct dentry *root;
+ struct armada_37xx_rwtm_rx_msg *reply = &rwtm->reply;
+ struct device *dev = rwtm_dev(rwtm);
+ int ret;
- root = debugfs_create_dir("turris-mox-rwtm", NULL);
+ ret = mox_rwtm_exec(rwtm, MBOX_CMD_ECDSA_PUB_KEY, NULL, false);
+ if (ret == -ENODATA) {
+ dev_warn(dev, "Board has no public key burned!\n");
+ } else if (ret == -EOPNOTSUPP) {
+ dev_notice(dev,
+ "Firmware does not support the ECDSA_PUB_KEY command\n");
+ } else if (ret < 0) {
+ return ret;
+ } else {
+ char sn[17] = "unknown";
+ char desc[46];
+
+ if (rwtm->has_board_info)
+ sprintf(sn, "%016llX", rwtm->serial_number);
+
+ sprintf(desc, "Turris MOX SN %s rWTM ECDSA key", sn);
- debugfs_create_file_unsafe("do_sign", 0600, root, rwtm, &do_sign_fops);
+ mox_ecc_public_key_to_bin(rwtm->pubkey, ret, reply->status);
- devm_add_action_or_reset(rwtm_dev(rwtm), rwtm_debugfs_release, root);
+ ret = devm_turris_signing_key_create(dev,
+ &mox_signing_key_subtype,
+ desc);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Cannot create signing key\n");
+ }
+
+ return 0;
}
-#else
-static inline void rwtm_register_debugfs(struct mox_rwtm *rwtm)
+
+#else /* CONFIG_TURRIS_MOX_RWTM_KEYCTL */
+
+static inline int mox_register_signing_key(struct mox_rwtm *rwtm)
{
+ return 0;
}
-#endif
+
+#endif /* !CONFIG_TURRIS_MOX_RWTM_KEYCTL */
static void rwtm_devm_mbox_release(void *mbox)
{
@@ -458,6 +461,10 @@ static int turris_mox_rwtm_probe(struct platform_device *pdev)
if (ret < 0)
dev_warn(dev, "Cannot read board information: %i\n", ret);
+ ret = mox_register_signing_key(rwtm);
+ if (ret < 0)
+ return ret;
+
ret = check_get_random_support(rwtm);
if (ret < 0) {
dev_notice(dev,
@@ -472,8 +479,6 @@ static int turris_mox_rwtm_probe(struct platform_device *pdev)
if (ret)
return dev_err_probe(dev, ret, "Cannot register HWRNG!\n");
- rwtm_register_debugfs(rwtm);
-
dev_info(dev, "HWRNG successfully registered\n");
/*
diff --git a/drivers/firmware/xilinx/zynqmp-debug.c b/drivers/firmware/xilinx/zynqmp-debug.c
index 8528850af889..22853ae0efdf 100644
--- a/drivers/firmware/xilinx/zynqmp-debug.c
+++ b/drivers/firmware/xilinx/zynqmp-debug.c
@@ -31,13 +31,51 @@ static char debugfs_buf[PAGE_SIZE];
#define PM_API(id) {id, #id, strlen(#id)}
static struct pm_api_info pm_api_list[] = {
+ PM_API(PM_FORCE_POWERDOWN),
+ PM_API(PM_REQUEST_WAKEUP),
+ PM_API(PM_SYSTEM_SHUTDOWN),
+ PM_API(PM_REQUEST_NODE),
+ PM_API(PM_RELEASE_NODE),
+ PM_API(PM_SET_REQUIREMENT),
PM_API(PM_GET_API_VERSION),
+ PM_API(PM_REGISTER_NOTIFIER),
+ PM_API(PM_RESET_ASSERT),
+ PM_API(PM_RESET_GET_STATUS),
+ PM_API(PM_GET_CHIPID),
+ PM_API(PM_PINCTRL_SET_FUNCTION),
+ PM_API(PM_PINCTRL_CONFIG_PARAM_GET),
+ PM_API(PM_PINCTRL_CONFIG_PARAM_SET),
+ PM_API(PM_IOCTL),
+ PM_API(PM_CLOCK_ENABLE),
+ PM_API(PM_CLOCK_DISABLE),
+ PM_API(PM_CLOCK_GETSTATE),
+ PM_API(PM_CLOCK_SETDIVIDER),
+ PM_API(PM_CLOCK_GETDIVIDER),
+ PM_API(PM_CLOCK_SETPARENT),
+ PM_API(PM_CLOCK_GETPARENT),
PM_API(PM_QUERY_DATA),
};
static struct dentry *firmware_debugfs_root;
/**
+ * zynqmp_pm_ioctl - PM IOCTL for device control and configs
+ * @node: Node ID of the device
+ * @ioctl: ID of the requested IOCTL
+ * @arg1: Argument 1 of requested IOCTL call
+ * @arg2: Argument 2 of requested IOCTL call
+ * @arg3: Argument 3 of requested IOCTL call
+ * @out: Returned output value
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_ioctl(const u32 node, const u32 ioctl, const u32 arg1,
+ const u32 arg2, const u32 arg3, u32 *out)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, out, 5, node, ioctl, arg1, arg2, arg3);
+}
+
+/**
* zynqmp_pm_argument_value() - Extract argument value from a PM-API request
* @arg: Entered PM-API argument in string format
*
@@ -95,6 +133,128 @@ static int process_api_request(u32 pm_id, u64 *pm_api_arg, u32 *pm_api_ret)
sprintf(debugfs_buf, "PM-API Version = %d.%d\n",
pm_api_version >> 16, pm_api_version & 0xffff);
break;
+ case PM_FORCE_POWERDOWN:
+ ret = zynqmp_pm_force_pwrdwn(pm_api_arg[0],
+ pm_api_arg[1] ? pm_api_arg[1] :
+ ZYNQMP_PM_REQUEST_ACK_NO);
+ break;
+ case PM_REQUEST_WAKEUP:
+ ret = zynqmp_pm_request_wake(pm_api_arg[0],
+ pm_api_arg[1], pm_api_arg[2],
+ pm_api_arg[3] ? pm_api_arg[3] :
+ ZYNQMP_PM_REQUEST_ACK_NO);
+ break;
+ case PM_SYSTEM_SHUTDOWN:
+ ret = zynqmp_pm_system_shutdown(pm_api_arg[0], pm_api_arg[1]);
+ break;
+ case PM_REQUEST_NODE:
+ ret = zynqmp_pm_request_node(pm_api_arg[0],
+ pm_api_arg[1] ? pm_api_arg[1] :
+ ZYNQMP_PM_CAPABILITY_ACCESS,
+ pm_api_arg[2] ? pm_api_arg[2] : 0,
+ pm_api_arg[3] ? pm_api_arg[3] :
+ ZYNQMP_PM_REQUEST_ACK_BLOCKING);
+ break;
+ case PM_RELEASE_NODE:
+ ret = zynqmp_pm_release_node(pm_api_arg[0]);
+ break;
+ case PM_SET_REQUIREMENT:
+ ret = zynqmp_pm_set_requirement(pm_api_arg[0],
+ pm_api_arg[1] ? pm_api_arg[1] :
+ ZYNQMP_PM_CAPABILITY_CONTEXT,
+ pm_api_arg[2] ?
+ pm_api_arg[2] : 0,
+ pm_api_arg[3] ? pm_api_arg[3] :
+ ZYNQMP_PM_REQUEST_ACK_BLOCKING);
+ break;
+ case PM_REGISTER_NOTIFIER:
+ ret = zynqmp_pm_register_notifier(pm_api_arg[0],
+ pm_api_arg[1] ?
+ pm_api_arg[1] : 0,
+ pm_api_arg[2] ?
+ pm_api_arg[2] : 0,
+ pm_api_arg[3] ?
+ pm_api_arg[3] : 0);
+ break;
+ case PM_RESET_ASSERT:
+ ret = zynqmp_pm_reset_assert(pm_api_arg[0], pm_api_arg[1]);
+ break;
+ case PM_RESET_GET_STATUS:
+ ret = zynqmp_pm_reset_get_status(pm_api_arg[0], &pm_api_ret[0]);
+ if (!ret)
+ sprintf(debugfs_buf, "Reset status: %u\n",
+ pm_api_ret[0]);
+ break;
+ case PM_GET_CHIPID:
+ ret = zynqmp_pm_get_chipid(&pm_api_ret[0], &pm_api_ret[1]);
+ if (!ret)
+ sprintf(debugfs_buf, "Idcode: %#x, Version:%#x\n",
+ pm_api_ret[0], pm_api_ret[1]);
+ break;
+ case PM_PINCTRL_SET_FUNCTION:
+ ret = zynqmp_pm_pinctrl_set_function(pm_api_arg[0],
+ pm_api_arg[1]);
+ break;
+ case PM_PINCTRL_CONFIG_PARAM_GET:
+ ret = zynqmp_pm_pinctrl_get_config(pm_api_arg[0], pm_api_arg[1],
+ &pm_api_ret[0]);
+ if (!ret)
+ sprintf(debugfs_buf,
+ "Pin: %llu, Param: %llu, Value: %u\n",
+ pm_api_arg[0], pm_api_arg[1],
+ pm_api_ret[0]);
+ break;
+ case PM_PINCTRL_CONFIG_PARAM_SET:
+ ret = zynqmp_pm_pinctrl_set_config(pm_api_arg[0],
+ pm_api_arg[1],
+ pm_api_arg[2]);
+ break;
+ case PM_IOCTL:
+ ret = zynqmp_pm_ioctl(pm_api_arg[0], pm_api_arg[1],
+ pm_api_arg[2], pm_api_arg[3],
+ pm_api_arg[4], &pm_api_ret[0]);
+ if (!ret && (pm_api_arg[1] == IOCTL_GET_RPU_OPER_MODE ||
+ pm_api_arg[1] == IOCTL_GET_PLL_FRAC_MODE ||
+ pm_api_arg[1] == IOCTL_GET_PLL_FRAC_DATA ||
+ pm_api_arg[1] == IOCTL_READ_GGS ||
+ pm_api_arg[1] == IOCTL_READ_PGGS ||
+ pm_api_arg[1] == IOCTL_READ_REG))
+ sprintf(debugfs_buf, "IOCTL return value: %u\n",
+ pm_api_ret[1]);
+ if (!ret && pm_api_arg[1] == IOCTL_GET_QOS)
+ sprintf(debugfs_buf, "Default QoS: %u\nCurrent QoS: %u\n",
+ pm_api_ret[1], pm_api_ret[2]);
+ break;
+ case PM_CLOCK_ENABLE:
+ ret = zynqmp_pm_clock_enable(pm_api_arg[0]);
+ break;
+ case PM_CLOCK_DISABLE:
+ ret = zynqmp_pm_clock_disable(pm_api_arg[0]);
+ break;
+ case PM_CLOCK_GETSTATE:
+ ret = zynqmp_pm_clock_getstate(pm_api_arg[0], &pm_api_ret[0]);
+ if (!ret)
+ sprintf(debugfs_buf, "Clock state: %u\n",
+ pm_api_ret[0]);
+ break;
+ case PM_CLOCK_SETDIVIDER:
+ ret = zynqmp_pm_clock_setdivider(pm_api_arg[0], pm_api_arg[1]);
+ break;
+ case PM_CLOCK_GETDIVIDER:
+ ret = zynqmp_pm_clock_getdivider(pm_api_arg[0], &pm_api_ret[0]);
+ if (!ret)
+ sprintf(debugfs_buf, "Divider Value: %d\n",
+ pm_api_ret[0]);
+ break;
+ case PM_CLOCK_SETPARENT:
+ ret = zynqmp_pm_clock_setparent(pm_api_arg[0], pm_api_arg[1]);
+ break;
+ case PM_CLOCK_GETPARENT:
+ ret = zynqmp_pm_clock_getparent(pm_api_arg[0], &pm_api_ret[0]);
+ if (!ret)
+ sprintf(debugfs_buf,
+ "Clock parent Index: %u\n", pm_api_ret[0]);
+ break;
case PM_QUERY_DATA:
qdata.qid = pm_api_arg[0];
qdata.arg1 = pm_api_arg[1];
@@ -150,7 +310,7 @@ static ssize_t zynqmp_pm_debugfs_api_write(struct file *file,
char *kern_buff, *tmp_buff;
char *pm_api_req;
u32 pm_id = 0;
- u64 pm_api_arg[4] = {0, 0, 0, 0};
+ u64 pm_api_arg[5] = {0, 0, 0, 0, 0};
/* Return values from PM APIs calls */
u32 pm_api_ret[4] = {0, 0, 0, 0};
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index add8acf66a9c..02da3e48bc8f 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -3,7 +3,7 @@
* Xilinx Zynq MPSoC Firmware layer
*
* Copyright (C) 2014-2022 Xilinx, Inc.
- * Copyright (C) 2022 - 2023, Advanced Micro Devices, Inc.
+ * Copyright (C) 2022 - 2024, Advanced Micro Devices, Inc.
*
* Michal Simek <michal.simek@amd.com>
* Davorin Mista <davorin.mista@aggios.com>
@@ -20,6 +20,7 @@
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/hashtable.h>
@@ -46,6 +47,7 @@ static DEFINE_HASHTABLE(pm_api_features_map, PM_API_FEATURE_CHECK_MAX_ORDER);
static u32 ioctl_features[FEATURE_PAYLOAD_SIZE];
static u32 query_features[FEATURE_PAYLOAD_SIZE];
+static u32 sip_svc_version;
static struct platform_device *em_dev;
/**
@@ -151,6 +153,9 @@ static noinline int do_fw_call_smc(u32 *ret_payload, u32 num_args, ...)
ret_payload[1] = upper_32_bits(res.a0);
ret_payload[2] = lower_32_bits(res.a1);
ret_payload[3] = upper_32_bits(res.a1);
+ ret_payload[4] = lower_32_bits(res.a2);
+ ret_payload[5] = upper_32_bits(res.a2);
+ ret_payload[6] = lower_32_bits(res.a3);
}
return zynqmp_pm_ret_code((enum pm_ret_status)res.a0);
@@ -191,6 +196,9 @@ static noinline int do_fw_call_hvc(u32 *ret_payload, u32 num_args, ...)
ret_payload[1] = upper_32_bits(res.a0);
ret_payload[2] = lower_32_bits(res.a1);
ret_payload[3] = upper_32_bits(res.a1);
+ ret_payload[4] = lower_32_bits(res.a2);
+ ret_payload[5] = upper_32_bits(res.a2);
+ ret_payload[6] = lower_32_bits(res.a3);
}
return zynqmp_pm_ret_code((enum pm_ret_status)res.a0);
@@ -218,11 +226,14 @@ static int __do_feature_check_call(const u32 api_id, u32 *ret_payload)
* Feature check of TF-A APIs is done in the TF-A layer and it expects for
* MODULE_ID_MASK bits of SMC's arg[0] to be the same as PM_MODULE_ID.
*/
- if (module_id == TF_A_MODULE_ID)
+ if (module_id == TF_A_MODULE_ID) {
module_id = PM_MODULE_ID;
+ smc_arg[1] = api_id;
+ } else {
+ smc_arg[1] = (api_id & API_ID_MASK);
+ }
smc_arg[0] = PM_SIP_SVC | FIELD_PREP(MODULE_ID_MASK, module_id) | feature_check_api_id;
- smc_arg[1] = (api_id & API_ID_MASK);
ret = do_fw_call(ret_payload, 2, smc_arg[0], smc_arg[1]);
if (ret)
@@ -332,6 +343,70 @@ int zynqmp_pm_is_function_supported(const u32 api_id, const u32 id)
EXPORT_SYMBOL_GPL(zynqmp_pm_is_function_supported);
/**
+ * zynqmp_pm_invoke_fw_fn() - Invoke the system-level platform management layer
+ * caller function depending on the configuration
+ * @pm_api_id: Requested PM-API call
+ * @ret_payload: Returned value array
+ * @num_args: Number of arguments to requested PM-API call
+ *
+ * Invoke platform management function for SMC or HVC call, depending on
+ * configuration.
+ * Following SMC Calling Convention (SMCCC) for SMC64:
+ * Pm Function Identifier,
+ * PM_SIP_SVC + PASS_THROUGH_FW_CMD_ID =
+ * ((SMC_TYPE_FAST << FUNCID_TYPE_SHIFT)
+ * ((SMC_64) << FUNCID_CC_SHIFT)
+ * ((SIP_START) << FUNCID_OEN_SHIFT)
+ * (PASS_THROUGH_FW_CMD_ID))
+ *
+ * PM_SIP_SVC - Registered ZynqMP SIP Service Call.
+ * PASS_THROUGH_FW_CMD_ID - Fixed SiP SVC call ID for FW specific calls.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_invoke_fw_fn(u32 pm_api_id, u32 *ret_payload, u32 num_args, ...)
+{
+ /*
+ * Added SIP service call Function Identifier
+ * Make sure to stay in x0 register
+ */
+ u64 smc_arg[SMC_ARG_CNT_64];
+ int ret, i;
+ va_list arg_list;
+ u32 args[SMC_ARG_CNT_32] = {0};
+ u32 module_id;
+
+ if (num_args > SMC_ARG_CNT_32)
+ return -EINVAL;
+
+ va_start(arg_list, num_args);
+
+ /* Check if feature is supported or not */
+ ret = zynqmp_pm_feature(pm_api_id);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < num_args; i++)
+ args[i] = va_arg(arg_list, u32);
+
+ va_end(arg_list);
+
+ module_id = FIELD_GET(PLM_MODULE_ID_MASK, pm_api_id);
+
+ if (module_id == 0)
+ module_id = XPM_MODULE_ID;
+
+ smc_arg[0] = PM_SIP_SVC | PASS_THROUGH_FW_CMD_ID;
+ smc_arg[1] = ((u64)args[0] << 32U) | FIELD_PREP(PLM_MODULE_ID_MASK, module_id) |
+ (pm_api_id & API_ID_MASK);
+ for (i = 1; i < (SMC_ARG_CNT_64 - 1); i++)
+ smc_arg[i + 1] = ((u64)args[(i * 2)] << 32U) | args[(i * 2) - 1];
+
+ return do_fw_call(ret_payload, 8, smc_arg[0], smc_arg[1], smc_arg[2], smc_arg[3],
+ smc_arg[4], smc_arg[5], smc_arg[6], smc_arg[7]);
+}
+
+/**
* zynqmp_pm_invoke_fn() - Invoke the system-level platform management layer
* caller function depending on the configuration
* @pm_api_id: Requested PM-API call
@@ -489,6 +564,35 @@ int zynqmp_pm_get_family_info(u32 *family, u32 *subfamily)
EXPORT_SYMBOL_GPL(zynqmp_pm_get_family_info);
/**
+ * zynqmp_pm_get_sip_svc_version() - Get SiP service call version
+ * @version: Returned version value
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_get_sip_svc_version(u32 *version)
+{
+ struct arm_smccc_res res;
+ u64 args[SMC_ARG_CNT_64] = {0};
+
+ if (!version)
+ return -EINVAL;
+
+ /* Check if SiP SVC version already verified */
+ if (sip_svc_version > 0) {
+ *version = sip_svc_version;
+ return 0;
+ }
+
+ args[0] = GET_SIP_SVC_VERSION;
+
+ arm_smccc_smc(args[0], args[1], args[2], args[3], args[4], args[5], args[6], args[7], &res);
+
+ *version = ((lower_32_bits(res.a0) << 16U) | lower_32_bits(res.a1));
+
+ return zynqmp_pm_ret_code(XST_PM_SUCCESS);
+}
+
+/**
* zynqmp_pm_get_trustzone_version() - Get secure trustzone firmware version
* @version: Returned version value
*
@@ -552,10 +656,34 @@ static int get_set_conduit_method(struct device_node *np)
*/
int zynqmp_pm_query_data(struct zynqmp_pm_query_data qdata, u32 *out)
{
- int ret;
+ int ret, i = 0;
+ u32 ret_payload[PAYLOAD_ARG_CNT] = {0};
+
+ if (sip_svc_version >= SIP_SVC_PASSTHROUGH_VERSION) {
+ ret = zynqmp_pm_invoke_fw_fn(PM_QUERY_DATA, ret_payload, 4,
+ qdata.qid, qdata.arg1,
+ qdata.arg2, qdata.arg3);
+ /* To support backward compatibility */
+ if (!ret && !ret_payload[0]) {
+ /*
+ * TF-A passes return status on 0th index but
+ * api to get clock name reads data from 0th
+ * index so pass data at 0th index instead of
+ * return status
+ */
+ if (qdata.qid == PM_QID_CLOCK_GET_NAME ||
+ qdata.qid == PM_QID_PINCTRL_GET_FUNCTION_NAME)
+ i = 1;
+
+ for (; i < PAYLOAD_ARG_CNT; i++, out++)
+ *out = ret_payload[i];
- ret = zynqmp_pm_invoke_fn(PM_QUERY_DATA, out, 4, qdata.qid, qdata.arg1, qdata.arg2,
- qdata.arg3);
+ return ret;
+ }
+ }
+
+ ret = zynqmp_pm_invoke_fn(PM_QUERY_DATA, out, 4, qdata.qid,
+ qdata.arg1, qdata.arg2, qdata.arg3);
/*
* For clock name query, all bytes in SMC response are clock name
@@ -920,7 +1048,7 @@ int zynqmp_pm_set_boot_health_status(u32 value)
*
* Return: Returns status, either success or error+reason
*/
-int zynqmp_pm_reset_assert(const enum zynqmp_pm_reset reset,
+int zynqmp_pm_reset_assert(const u32 reset,
const enum zynqmp_pm_reset_action assert_flag)
{
return zynqmp_pm_invoke_fn(PM_RESET_ASSERT, NULL, 2, reset, assert_flag);
@@ -934,7 +1062,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_reset_assert);
*
* Return: Returns status, either success or error+reason
*/
-int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset, u32 *status)
+int zynqmp_pm_reset_get_status(const u32 reset, u32 *status)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
@@ -1012,17 +1140,13 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_fpga_get_status);
int zynqmp_pm_fpga_get_config_status(u32 *value)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
- u32 buf, lower_addr, upper_addr;
int ret;
if (!value)
return -EINVAL;
- lower_addr = lower_32_bits((u64)&buf);
- upper_addr = upper_32_bits((u64)&buf);
-
ret = zynqmp_pm_invoke_fn(PM_FPGA_READ, ret_payload, 4,
- XILINX_ZYNQMP_PM_FPGA_CONFIG_STAT_OFFSET, lower_addr, upper_addr,
+ XILINX_ZYNQMP_PM_FPGA_CONFIG_STAT_OFFSET, 0, 0,
XILINX_ZYNQMP_PM_FPGA_READ_CONFIG_REG);
*value = ret_payload[1];
@@ -1118,8 +1242,11 @@ int zynqmp_pm_pinctrl_set_config(const u32 pin, const u32 param,
if (pm_family_code == ZYNQMP_FAMILY_CODE &&
param == PM_PINCTRL_CONFIG_TRI_STATE) {
ret = zynqmp_pm_feature(PM_PINCTRL_CONFIG_PARAM_SET);
- if (ret < PM_PINCTRL_PARAM_SET_VERSION)
+ if (ret < PM_PINCTRL_PARAM_SET_VERSION) {
+ pr_warn("The requested pinctrl feature is not supported in the current firmware.\n"
+ "Expected firmware version is 2023.1 and above for this feature to work.\r\n");
return -EOPNOTSUPP;
+ }
}
return zynqmp_pm_invoke_fn(PM_PINCTRL_CONFIG_PARAM_SET, NULL, 3, pin, param, value);
@@ -1173,11 +1300,10 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_bootmode_write);
* This API function is to be used for notify the power management controller
* about the completed power management initialization.
*/
-int zynqmp_pm_init_finalize(void)
+static int zynqmp_pm_init_finalize(void)
{
return zynqmp_pm_invoke_fn(PM_PM_INIT_FINALIZE, NULL, 0);
}
-EXPORT_SYMBOL_GPL(zynqmp_pm_init_finalize);
/**
* zynqmp_pm_set_suspend_mode() - Set system suspend mode
@@ -1887,6 +2013,11 @@ static int zynqmp_firmware_probe(struct platform_device *pdev)
if (ret)
return ret;
+ /* Get SiP SVC version number */
+ ret = zynqmp_pm_get_sip_svc_version(&sip_svc_version);
+ if (ret)
+ return ret;
+
ret = do_feature_check_call(PM_FEATURE_CHECK);
if (ret >= 0 && ((ret & FIRMWARE_VERSION_MASK) >= PM_API_VERSION_1))
feature_check_enabled = true;
@@ -1969,6 +2100,19 @@ static void zynqmp_firmware_remove(struct platform_device *pdev)
platform_device_unregister(em_dev);
}
+static void zynqmp_firmware_sync_state(struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+
+ if (!of_device_is_compatible(np, "xlnx,zynqmp-firmware"))
+ return;
+
+ of_genpd_sync_state(np);
+
+ if (zynqmp_pm_init_finalize())
+ dev_warn(dev, "failed to release power management to firmware\n");
+}
+
static const struct of_device_id zynqmp_firmware_of_match[] = {
{.compatible = "xlnx,zynqmp-firmware"},
{.compatible = "xlnx,versal-firmware"},
@@ -1981,8 +2125,9 @@ static struct platform_driver zynqmp_firmware_driver = {
.name = "zynqmp_firmware",
.of_match_table = zynqmp_firmware_of_match,
.dev_groups = zynqmp_firmware_groups,
+ .sync_state = zynqmp_firmware_sync_state,
},
.probe = zynqmp_firmware_probe,
- .remove_new = zynqmp_firmware_remove,
+ .remove = zynqmp_firmware_remove,
};
module_platform_driver(zynqmp_firmware_driver);
diff --git a/drivers/fpga/altera-cvp.c b/drivers/fpga/altera-cvp.c
index 6b0914432445..5af0bd33890c 100644
--- a/drivers/fpga/altera-cvp.c
+++ b/drivers/fpga/altera-cvp.c
@@ -52,7 +52,7 @@
/* V2 Defines */
#define VSE_CVP_TX_CREDITS 0x49 /* 8bit */
-#define V2_CREDIT_TIMEOUT_US 20000
+#define V2_CREDIT_TIMEOUT_US 40000
#define V2_CHECK_CREDIT_US 10
#define V2_POLL_TIMEOUT_US 1000000
#define V2_USER_TIMEOUT_US 500000
diff --git a/drivers/fpga/altera-fpga2sdram.c b/drivers/fpga/altera-fpga2sdram.c
index f4de3fea0b2d..e41492988dd6 100644
--- a/drivers/fpga/altera-fpga2sdram.c
+++ b/drivers/fpga/altera-fpga2sdram.c
@@ -152,7 +152,7 @@ MODULE_DEVICE_TABLE(of, altera_fpga_of_match);
static struct platform_driver altera_fpga_driver = {
.probe = alt_fpga_bridge_probe,
- .remove_new = alt_fpga_bridge_remove,
+ .remove = alt_fpga_bridge_remove,
.driver = {
.name = "altera_fpga2sdram_bridge",
.of_match_table = of_match_ptr(altera_fpga_of_match),
diff --git a/drivers/fpga/altera-freeze-bridge.c b/drivers/fpga/altera-freeze-bridge.c
index 44061cb16f87..594693ff786e 100644
--- a/drivers/fpga/altera-freeze-bridge.c
+++ b/drivers/fpga/altera-freeze-bridge.c
@@ -262,7 +262,7 @@ static void altera_freeze_br_remove(struct platform_device *pdev)
static struct platform_driver altera_freeze_br_driver = {
.probe = altera_freeze_br_probe,
- .remove_new = altera_freeze_br_remove,
+ .remove = altera_freeze_br_remove,
.driver = {
.name = "altera_freeze_br",
.of_match_table = altera_freeze_br_of_match,
diff --git a/drivers/fpga/altera-hps2fpga.c b/drivers/fpga/altera-hps2fpga.c
index 6f8e24be19c6..f2f1250689cb 100644
--- a/drivers/fpga/altera-hps2fpga.c
+++ b/drivers/fpga/altera-hps2fpga.c
@@ -205,7 +205,7 @@ MODULE_DEVICE_TABLE(of, altera_fpga_of_match);
static struct platform_driver alt_fpga_bridge_driver = {
.probe = alt_fpga_bridge_probe,
- .remove_new = alt_fpga_bridge_remove,
+ .remove = alt_fpga_bridge_remove,
.driver = {
.name = "altera_hps2fpga_bridge",
.of_match_table = of_match_ptr(altera_fpga_of_match),
diff --git a/drivers/fpga/dfl-afu-dma-region.c b/drivers/fpga/dfl-afu-dma-region.c
index 02b60fde0430..5aa7b8884374 100644
--- a/drivers/fpga/dfl-afu-dma-region.c
+++ b/drivers/fpga/dfl-afu-dma-region.c
@@ -16,26 +16,26 @@
#include "dfl-afu.h"
-void afu_dma_region_init(struct dfl_feature_platform_data *pdata)
+void afu_dma_region_init(struct dfl_feature_dev_data *fdata)
{
- struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_afu *afu = dfl_fpga_fdata_get_private(fdata);
afu->dma_regions = RB_ROOT;
}
/**
* afu_dma_pin_pages - pin pages of given dma memory region
- * @pdata: feature device platform data
+ * @fdata: feature dev data
* @region: dma memory region to be pinned
*
* Pin all the pages of given dfl_afu_dma_region.
* Return 0 for success or negative error code.
*/
-static int afu_dma_pin_pages(struct dfl_feature_platform_data *pdata,
+static int afu_dma_pin_pages(struct dfl_feature_dev_data *fdata,
struct dfl_afu_dma_region *region)
{
int npages = region->length >> PAGE_SHIFT;
- struct device *dev = &pdata->dev->dev;
+ struct device *dev = &fdata->dev->dev;
int ret, pinned;
ret = account_locked_vm(current->mm, npages, true);
@@ -73,17 +73,17 @@ unlock_vm:
/**
* afu_dma_unpin_pages - unpin pages of given dma memory region
- * @pdata: feature device platform data
+ * @fdata: feature dev data
* @region: dma memory region to be unpinned
*
* Unpin all the pages of given dfl_afu_dma_region.
* Return 0 for success or negative error code.
*/
-static void afu_dma_unpin_pages(struct dfl_feature_platform_data *pdata,
+static void afu_dma_unpin_pages(struct dfl_feature_dev_data *fdata,
struct dfl_afu_dma_region *region)
{
long npages = region->length >> PAGE_SHIFT;
- struct device *dev = &pdata->dev->dev;
+ struct device *dev = &fdata->dev->dev;
unpin_user_pages(region->pages, npages);
kfree(region->pages);
@@ -133,20 +133,20 @@ static bool dma_region_check_iova(struct dfl_afu_dma_region *region,
/**
* afu_dma_region_add - add given dma region to rbtree
- * @pdata: feature device platform data
+ * @fdata: feature dev data
* @region: dma region to be added
*
* Return 0 for success, -EEXIST if dma region has already been added.
*
- * Needs to be called with pdata->lock heold.
+ * Needs to be called with fdata->lock held.
*/
-static int afu_dma_region_add(struct dfl_feature_platform_data *pdata,
+static int afu_dma_region_add(struct dfl_feature_dev_data *fdata,
struct dfl_afu_dma_region *region)
{
- struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_afu *afu = dfl_fpga_fdata_get_private(fdata);
struct rb_node **new, *parent = NULL;
- dev_dbg(&pdata->dev->dev, "add region (iova = %llx)\n",
+ dev_dbg(&fdata->dev->dev, "add region (iova = %llx)\n",
(unsigned long long)region->iova);
new = &afu->dma_regions.rb_node;
@@ -177,50 +177,50 @@ static int afu_dma_region_add(struct dfl_feature_platform_data *pdata,
/**
* afu_dma_region_remove - remove given dma region from rbtree
- * @pdata: feature device platform data
+ * @fdata: feature dev data
* @region: dma region to be removed
*
- * Needs to be called with pdata->lock heold.
+ * Needs to be called with fdata->lock held.
*/
-static void afu_dma_region_remove(struct dfl_feature_platform_data *pdata,
+static void afu_dma_region_remove(struct dfl_feature_dev_data *fdata,
struct dfl_afu_dma_region *region)
{
struct dfl_afu *afu;
- dev_dbg(&pdata->dev->dev, "del region (iova = %llx)\n",
+ dev_dbg(&fdata->dev->dev, "del region (iova = %llx)\n",
(unsigned long long)region->iova);
- afu = dfl_fpga_pdata_get_private(pdata);
+ afu = dfl_fpga_fdata_get_private(fdata);
rb_erase(&region->node, &afu->dma_regions);
}
/**
* afu_dma_region_destroy - destroy all regions in rbtree
- * @pdata: feature device platform data
+ * @fdata: feature dev data
*
- * Needs to be called with pdata->lock heold.
+ * Needs to be called with fdata->lock held.
*/
-void afu_dma_region_destroy(struct dfl_feature_platform_data *pdata)
+void afu_dma_region_destroy(struct dfl_feature_dev_data *fdata)
{
- struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_afu *afu = dfl_fpga_fdata_get_private(fdata);
struct rb_node *node = rb_first(&afu->dma_regions);
struct dfl_afu_dma_region *region;
while (node) {
region = container_of(node, struct dfl_afu_dma_region, node);
- dev_dbg(&pdata->dev->dev, "del region (iova = %llx)\n",
+ dev_dbg(&fdata->dev->dev, "del region (iova = %llx)\n",
(unsigned long long)region->iova);
rb_erase(node, &afu->dma_regions);
if (region->iova)
- dma_unmap_page(dfl_fpga_pdata_to_parent(pdata),
+ dma_unmap_page(dfl_fpga_fdata_to_parent(fdata),
region->iova, region->length,
DMA_BIDIRECTIONAL);
if (region->pages)
- afu_dma_unpin_pages(pdata, region);
+ afu_dma_unpin_pages(fdata, region);
node = rb_next(node);
kfree(region);
@@ -229,7 +229,7 @@ void afu_dma_region_destroy(struct dfl_feature_platform_data *pdata)
/**
* afu_dma_region_find - find the dma region from rbtree based on iova and size
- * @pdata: feature device platform data
+ * @fdata: feature dev data
* @iova: address of the dma memory area
* @size: size of the dma memory area
*
@@ -239,14 +239,14 @@ void afu_dma_region_destroy(struct dfl_feature_platform_data *pdata)
* [@iova, @iova+size)
* If nothing is matched returns NULL.
*
- * Needs to be called with pdata->lock held.
+ * Needs to be called with fdata->lock held.
*/
struct dfl_afu_dma_region *
-afu_dma_region_find(struct dfl_feature_platform_data *pdata, u64 iova, u64 size)
+afu_dma_region_find(struct dfl_feature_dev_data *fdata, u64 iova, u64 size)
{
- struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_afu *afu = dfl_fpga_fdata_get_private(fdata);
struct rb_node *node = afu->dma_regions.rb_node;
- struct device *dev = &pdata->dev->dev;
+ struct device *dev = &fdata->dev->dev;
while (node) {
struct dfl_afu_dma_region *region;
@@ -276,20 +276,20 @@ afu_dma_region_find(struct dfl_feature_platform_data *pdata, u64 iova, u64 size)
/**
* afu_dma_region_find_iova - find the dma region from rbtree by iova
- * @pdata: feature device platform data
+ * @fdata: feature dev data
* @iova: address of the dma region
*
- * Needs to be called with pdata->lock held.
+ * Needs to be called with fdata->lock held.
*/
static struct dfl_afu_dma_region *
-afu_dma_region_find_iova(struct dfl_feature_platform_data *pdata, u64 iova)
+afu_dma_region_find_iova(struct dfl_feature_dev_data *fdata, u64 iova)
{
- return afu_dma_region_find(pdata, iova, 0);
+ return afu_dma_region_find(fdata, iova, 0);
}
/**
* afu_dma_map_region - map memory region for dma
- * @pdata: feature device platform data
+ * @fdata: feature dev data
* @user_addr: address of the memory region
* @length: size of the memory region
* @iova: pointer of iova address
@@ -298,9 +298,10 @@ afu_dma_region_find_iova(struct dfl_feature_platform_data *pdata, u64 iova)
* of the memory region via @iova.
* Return 0 for success, otherwise error code.
*/
-int afu_dma_map_region(struct dfl_feature_platform_data *pdata,
+int afu_dma_map_region(struct dfl_feature_dev_data *fdata,
u64 user_addr, u64 length, u64 *iova)
{
+ struct device *dev = &fdata->dev->dev;
struct dfl_afu_dma_region *region;
int ret;
@@ -323,47 +324,47 @@ int afu_dma_map_region(struct dfl_feature_platform_data *pdata,
region->length = length;
/* Pin the user memory region */
- ret = afu_dma_pin_pages(pdata, region);
+ ret = afu_dma_pin_pages(fdata, region);
if (ret) {
- dev_err(&pdata->dev->dev, "failed to pin memory region\n");
+ dev_err(dev, "failed to pin memory region\n");
goto free_region;
}
/* Only accept continuous pages, return error else */
if (!afu_dma_check_continuous_pages(region)) {
- dev_err(&pdata->dev->dev, "pages are not continuous\n");
+ dev_err(dev, "pages are not continuous\n");
ret = -EINVAL;
goto unpin_pages;
}
/* As pages are continuous then start to do DMA mapping */
- region->iova = dma_map_page(dfl_fpga_pdata_to_parent(pdata),
+ region->iova = dma_map_page(dfl_fpga_fdata_to_parent(fdata),
region->pages[0], 0,
region->length,
DMA_BIDIRECTIONAL);
- if (dma_mapping_error(dfl_fpga_pdata_to_parent(pdata), region->iova)) {
- dev_err(&pdata->dev->dev, "failed to map for dma\n");
+ if (dma_mapping_error(dfl_fpga_fdata_to_parent(fdata), region->iova)) {
+ dev_err(dev, "failed to map for dma\n");
ret = -EFAULT;
goto unpin_pages;
}
*iova = region->iova;
- mutex_lock(&pdata->lock);
- ret = afu_dma_region_add(pdata, region);
- mutex_unlock(&pdata->lock);
+ mutex_lock(&fdata->lock);
+ ret = afu_dma_region_add(fdata, region);
+ mutex_unlock(&fdata->lock);
if (ret) {
- dev_err(&pdata->dev->dev, "failed to add dma region\n");
+ dev_err(dev, "failed to add dma region\n");
goto unmap_dma;
}
return 0;
unmap_dma:
- dma_unmap_page(dfl_fpga_pdata_to_parent(pdata),
+ dma_unmap_page(dfl_fpga_fdata_to_parent(fdata),
region->iova, region->length, DMA_BIDIRECTIONAL);
unpin_pages:
- afu_dma_unpin_pages(pdata, region);
+ afu_dma_unpin_pages(fdata, region);
free_region:
kfree(region);
return ret;
@@ -371,34 +372,34 @@ free_region:
/**
* afu_dma_unmap_region - unmap dma memory region
- * @pdata: feature device platform data
+ * @fdata: feature dev data
* @iova: dma address of the region
*
* Unmap dma memory region based on @iova.
* Return 0 for success, otherwise error code.
*/
-int afu_dma_unmap_region(struct dfl_feature_platform_data *pdata, u64 iova)
+int afu_dma_unmap_region(struct dfl_feature_dev_data *fdata, u64 iova)
{
struct dfl_afu_dma_region *region;
- mutex_lock(&pdata->lock);
- region = afu_dma_region_find_iova(pdata, iova);
+ mutex_lock(&fdata->lock);
+ region = afu_dma_region_find_iova(fdata, iova);
if (!region) {
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return -EINVAL;
}
if (region->in_use) {
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return -EBUSY;
}
- afu_dma_region_remove(pdata, region);
- mutex_unlock(&pdata->lock);
+ afu_dma_region_remove(fdata, region);
+ mutex_unlock(&fdata->lock);
- dma_unmap_page(dfl_fpga_pdata_to_parent(pdata),
+ dma_unmap_page(dfl_fpga_fdata_to_parent(fdata),
region->iova, region->length, DMA_BIDIRECTIONAL);
- afu_dma_unpin_pages(pdata, region);
+ afu_dma_unpin_pages(fdata, region);
kfree(region);
return 0;
diff --git a/drivers/fpga/dfl-afu-error.c b/drivers/fpga/dfl-afu-error.c
index ab7be6217368..0f392d1f6d45 100644
--- a/drivers/fpga/dfl-afu-error.c
+++ b/drivers/fpga/dfl-afu-error.c
@@ -28,37 +28,36 @@
#define ERROR_MASK GENMASK_ULL(63, 0)
/* mask or unmask port errors by the error mask register. */
-static void __afu_port_err_mask(struct device *dev, bool mask)
+static void __afu_port_err_mask(struct dfl_feature_dev_data *fdata, bool mask)
{
void __iomem *base;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_ERROR);
writeq(mask ? ERROR_MASK : 0, base + PORT_ERROR_MASK);
}
static void afu_port_err_mask(struct device *dev, bool mask)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
- mutex_lock(&pdata->lock);
- __afu_port_err_mask(dev, mask);
- mutex_unlock(&pdata->lock);
+ mutex_lock(&fdata->lock);
+ __afu_port_err_mask(fdata, mask);
+ mutex_unlock(&fdata->lock);
}
/* clear port errors. */
static int afu_port_err_clear(struct device *dev, u64 err)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
- struct platform_device *pdev = to_platform_device(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base_err, *base_hdr;
int enable_ret = 0, ret = -EBUSY;
u64 v;
- base_err = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
- base_hdr = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base_err = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_ERROR);
+ base_hdr = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
/*
* clear Port Errors
@@ -80,12 +79,12 @@ static int afu_port_err_clear(struct device *dev, u64 err)
}
/* Halt Port by keeping Port in reset */
- ret = __afu_port_disable(pdev);
+ ret = __afu_port_disable(fdata);
if (ret)
goto done;
/* Mask all errors */
- __afu_port_err_mask(dev, true);
+ __afu_port_err_mask(fdata, true);
/* Clear errors if err input matches with current port errors.*/
v = readq(base_err + PORT_ERROR);
@@ -102,28 +101,28 @@ static int afu_port_err_clear(struct device *dev, u64 err)
}
/* Clear mask */
- __afu_port_err_mask(dev, false);
+ __afu_port_err_mask(fdata, false);
/* Enable the Port by clearing the reset */
- enable_ret = __afu_port_enable(pdev);
+ enable_ret = __afu_port_enable(fdata);
done:
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return enable_ret ? enable_ret : ret;
}
static ssize_t errors_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 error;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_ERROR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
error = readq(base + PORT_ERROR);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "0x%llx\n", (unsigned long long)error);
}
@@ -146,15 +145,15 @@ static DEVICE_ATTR_RW(errors);
static ssize_t first_error_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 error;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_ERROR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
error = readq(base + PORT_FIRST_ERROR);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "0x%llx\n", (unsigned long long)error);
}
@@ -164,16 +163,16 @@ static ssize_t first_malformed_req_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 req0, req1;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_ERROR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
req0 = readq(base + PORT_MALFORMED_REQ0);
req1 = readq(base + PORT_MALFORMED_REQ1);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "0x%016llx%016llx\n",
(unsigned long long)req1, (unsigned long long)req0);
@@ -191,12 +190,14 @@ static umode_t port_err_attrs_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct device *dev = kobj_to_dev(kobj);
+ struct dfl_feature_dev_data *fdata;
+ fdata = to_dfl_feature_dev_data(dev);
/*
* sysfs entries are visible only if related private feature is
* enumerated.
*/
- if (!dfl_get_feature_by_id(dev, PORT_FEATURE_ID_ERROR))
+ if (!dfl_get_feature_by_id(fdata, PORT_FEATURE_ID_ERROR))
return 0;
return attr->mode;
diff --git a/drivers/fpga/dfl-afu-main.c b/drivers/fpga/dfl-afu-main.c
index 6b97c073849e..3bf8e7338dbe 100644
--- a/drivers/fpga/dfl-afu-main.c
+++ b/drivers/fpga/dfl-afu-main.c
@@ -26,7 +26,7 @@
/**
* __afu_port_enable - enable a port by clear reset
- * @pdev: port platform device.
+ * @fdata: port feature dev data.
*
* Enable Port by clear the port soft reset bit, which is set by default.
* The AFU is unable to respond to any MMIO access while in reset.
@@ -35,18 +35,17 @@
*
* The caller needs to hold lock for protection.
*/
-int __afu_port_enable(struct platform_device *pdev)
+int __afu_port_enable(struct dfl_feature_dev_data *fdata)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
void __iomem *base;
u64 v;
- WARN_ON(!pdata->disable_count);
+ WARN_ON(!fdata->disable_count);
- if (--pdata->disable_count != 0)
+ if (--fdata->disable_count != 0)
return 0;
- base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
/* Clear port soft reset */
v = readq(base + PORT_HDR_CTRL);
@@ -60,7 +59,8 @@ int __afu_port_enable(struct platform_device *pdev)
if (readq_poll_timeout(base + PORT_HDR_CTRL, v,
!(v & PORT_CTRL_SFTRST_ACK),
RST_POLL_INVL, RST_POLL_TIMEOUT)) {
- dev_err(&pdev->dev, "timeout, failure to enable device\n");
+ dev_err(fdata->dfl_cdev->parent,
+ "timeout, failure to enable device\n");
return -ETIMEDOUT;
}
@@ -69,22 +69,21 @@ int __afu_port_enable(struct platform_device *pdev)
/**
* __afu_port_disable - disable a port by hold reset
- * @pdev: port platform device.
+ * @fdata: port feature dev data.
*
* Disable Port by setting the port soft reset bit, it puts the port into reset.
*
* The caller needs to hold lock for protection.
*/
-int __afu_port_disable(struct platform_device *pdev)
+int __afu_port_disable(struct dfl_feature_dev_data *fdata)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
void __iomem *base;
u64 v;
- if (pdata->disable_count++ != 0)
+ if (fdata->disable_count++ != 0)
return 0;
- base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
/* Set port soft reset */
v = readq(base + PORT_HDR_CTRL);
@@ -99,7 +98,8 @@ int __afu_port_disable(struct platform_device *pdev)
if (readq_poll_timeout(base + PORT_HDR_CTRL, v,
v & PORT_CTRL_SFTRST_ACK,
RST_POLL_INVL, RST_POLL_TIMEOUT)) {
- dev_err(&pdev->dev, "timeout, failure to disable device\n");
+ dev_err(fdata->dfl_cdev->parent,
+ "timeout, failure to disable device\n");
return -ETIMEDOUT;
}
@@ -118,34 +118,34 @@ int __afu_port_disable(struct platform_device *pdev)
* (disabled). Any attempts on MMIO access to AFU while in reset, will
* result errors reported via port error reporting sub feature (if present).
*/
-static int __port_reset(struct platform_device *pdev)
+static int __port_reset(struct dfl_feature_dev_data *fdata)
{
int ret;
- ret = __afu_port_disable(pdev);
+ ret = __afu_port_disable(fdata);
if (ret)
return ret;
- return __afu_port_enable(pdev);
+ return __afu_port_enable(fdata);
}
static int port_reset(struct platform_device *pdev)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
int ret;
- mutex_lock(&pdata->lock);
- ret = __port_reset(pdev);
- mutex_unlock(&pdata->lock);
+ mutex_lock(&fdata->lock);
+ ret = __port_reset(fdata);
+ mutex_unlock(&fdata->lock);
return ret;
}
-static int port_get_id(struct platform_device *pdev)
+static int port_get_id(struct dfl_feature_dev_data *fdata)
{
void __iomem *base;
- base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
return FIELD_GET(PORT_CAP_PORT_NUM, readq(base + PORT_HDR_CAP));
}
@@ -153,7 +153,8 @@ static int port_get_id(struct platform_device *pdev)
static ssize_t
id_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- int id = port_get_id(to_platform_device(dev));
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
+ int id = port_get_id(fdata);
return scnprintf(buf, PAGE_SIZE, "%d\n", id);
}
@@ -162,15 +163,15 @@ static DEVICE_ATTR_RO(id);
static ssize_t
ltr_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
v = readq(base + PORT_HDR_CTRL);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_CTRL_LATENCY, v));
}
@@ -179,7 +180,7 @@ static ssize_t
ltr_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
bool ltr;
u64 v;
@@ -187,14 +188,14 @@ ltr_store(struct device *dev, struct device_attribute *attr,
if (kstrtobool(buf, &ltr))
return -EINVAL;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
v = readq(base + PORT_HDR_CTRL);
v &= ~PORT_CTRL_LATENCY;
v |= FIELD_PREP(PORT_CTRL_LATENCY, ltr ? 1 : 0);
writeq(v, base + PORT_HDR_CTRL);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return count;
}
@@ -203,15 +204,15 @@ static DEVICE_ATTR_RW(ltr);
static ssize_t
ap1_event_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
v = readq(base + PORT_HDR_STS);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_STS_AP1_EVT, v));
}
@@ -220,18 +221,18 @@ static ssize_t
ap1_event_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
bool clear;
if (kstrtobool(buf, &clear) || !clear)
return -EINVAL;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
writeq(PORT_STS_AP1_EVT, base + PORT_HDR_STS);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return count;
}
@@ -241,15 +242,15 @@ static ssize_t
ap2_event_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
v = readq(base + PORT_HDR_STS);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_STS_AP2_EVT, v));
}
@@ -258,18 +259,18 @@ static ssize_t
ap2_event_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
bool clear;
if (kstrtobool(buf, &clear) || !clear)
return -EINVAL;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
writeq(PORT_STS_AP2_EVT, base + PORT_HDR_STS);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return count;
}
@@ -278,15 +279,15 @@ static DEVICE_ATTR_RW(ap2_event);
static ssize_t
power_state_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
v = readq(base + PORT_HDR_STS);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "0x%x\n", (u8)FIELD_GET(PORT_STS_PWR_STATE, v));
}
@@ -296,18 +297,18 @@ static ssize_t
userclk_freqcmd_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
u64 userclk_freq_cmd;
void __iomem *base;
if (kstrtou64(buf, 0, &userclk_freq_cmd))
return -EINVAL;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
writeq(userclk_freq_cmd, base + PORT_HDR_USRCLK_CMD0);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return count;
}
@@ -317,18 +318,18 @@ static ssize_t
userclk_freqcntrcmd_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
u64 userclk_freqcntr_cmd;
void __iomem *base;
if (kstrtou64(buf, 0, &userclk_freqcntr_cmd))
return -EINVAL;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
writeq(userclk_freqcntr_cmd, base + PORT_HDR_USRCLK_CMD1);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return count;
}
@@ -338,15 +339,15 @@ static ssize_t
userclk_freqsts_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
u64 userclk_freqsts;
void __iomem *base;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
userclk_freqsts = readq(base + PORT_HDR_USRCLK_STS0);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "0x%llx\n", (unsigned long long)userclk_freqsts);
}
@@ -356,15 +357,15 @@ static ssize_t
userclk_freqcntrsts_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
u64 userclk_freqcntrsts;
void __iomem *base;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
userclk_freqcntrsts = readq(base + PORT_HDR_USRCLK_STS1);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "0x%llx\n",
(unsigned long long)userclk_freqcntrsts);
@@ -388,10 +389,12 @@ static umode_t port_hdr_attrs_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct device *dev = kobj_to_dev(kobj);
+ struct dfl_feature_dev_data *fdata;
umode_t mode = attr->mode;
void __iomem *base;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ fdata = to_dfl_feature_dev_data(dev);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
if (dfl_feature_revision(base) > 0) {
/*
@@ -456,21 +459,21 @@ static const struct dfl_feature_ops port_hdr_ops = {
static ssize_t
afu_id_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 guidl, guidh;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_AFU);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_AFU);
- mutex_lock(&pdata->lock);
- if (pdata->disable_count) {
- mutex_unlock(&pdata->lock);
+ mutex_lock(&fdata->lock);
+ if (fdata->disable_count) {
+ mutex_unlock(&fdata->lock);
return -EBUSY;
}
guidl = readq(base + GUID_L);
guidh = readq(base + GUID_H);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return scnprintf(buf, PAGE_SIZE, "%016llx%016llx\n", guidh, guidl);
}
@@ -485,12 +488,14 @@ static umode_t port_afu_attrs_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct device *dev = kobj_to_dev(kobj);
+ struct dfl_feature_dev_data *fdata;
+ fdata = to_dfl_feature_dev_data(dev);
/*
* sysfs entries are visible only if related private feature is
* enumerated.
*/
- if (!dfl_get_feature_by_id(dev, PORT_FEATURE_ID_AFU))
+ if (!dfl_get_feature_by_id(fdata, PORT_FEATURE_ID_AFU))
return 0;
return attr->mode;
@@ -504,9 +509,10 @@ static const struct attribute_group port_afu_group = {
static int port_afu_init(struct platform_device *pdev,
struct dfl_feature *feature)
{
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
struct resource *res = &pdev->resource[feature->resource_index];
- return afu_mmio_region_add(dev_get_platdata(&pdev->dev),
+ return afu_mmio_region_add(fdata,
DFL_PORT_REGION_INDEX_AFU,
resource_size(res), res->start,
DFL_PORT_REGION_MMAP | DFL_PORT_REGION_READ |
@@ -525,9 +531,10 @@ static const struct dfl_feature_ops port_afu_ops = {
static int port_stp_init(struct platform_device *pdev,
struct dfl_feature *feature)
{
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
struct resource *res = &pdev->resource[feature->resource_index];
- return afu_mmio_region_add(dev_get_platdata(&pdev->dev),
+ return afu_mmio_region_add(fdata,
DFL_PORT_REGION_INDEX_STP,
resource_size(res), res->start,
DFL_PORT_REGION_MMAP | DFL_PORT_REGION_READ |
@@ -595,22 +602,18 @@ static struct dfl_feature_driver port_feature_drvs[] = {
static int afu_open(struct inode *inode, struct file *filp)
{
- struct platform_device *fdev = dfl_fpga_inode_to_feature_dev(inode);
- struct dfl_feature_platform_data *pdata;
+ struct dfl_feature_dev_data *fdata = dfl_fpga_inode_to_feature_dev_data(inode);
+ struct platform_device *fdev = fdata->dev;
int ret;
- pdata = dev_get_platdata(&fdev->dev);
- if (WARN_ON(!pdata))
- return -ENODEV;
-
- mutex_lock(&pdata->lock);
- ret = dfl_feature_dev_use_begin(pdata, filp->f_flags & O_EXCL);
+ mutex_lock(&fdata->lock);
+ ret = dfl_feature_dev_use_begin(fdata, filp->f_flags & O_EXCL);
if (!ret) {
dev_dbg(&fdev->dev, "Device File Opened %d Times\n",
- dfl_feature_dev_use_count(pdata));
+ dfl_feature_dev_use_count(fdata));
filp->private_data = fdev;
}
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return ret;
}
@@ -618,29 +621,29 @@ static int afu_open(struct inode *inode, struct file *filp)
static int afu_release(struct inode *inode, struct file *filp)
{
struct platform_device *pdev = filp->private_data;
- struct dfl_feature_platform_data *pdata;
+ struct dfl_feature_dev_data *fdata;
struct dfl_feature *feature;
dev_dbg(&pdev->dev, "Device File Release\n");
- pdata = dev_get_platdata(&pdev->dev);
+ fdata = to_dfl_feature_dev_data(&pdev->dev);
- mutex_lock(&pdata->lock);
- dfl_feature_dev_use_end(pdata);
+ mutex_lock(&fdata->lock);
+ dfl_feature_dev_use_end(fdata);
- if (!dfl_feature_dev_use_count(pdata)) {
- dfl_fpga_dev_for_each_feature(pdata, feature)
+ if (!dfl_feature_dev_use_count(fdata)) {
+ dfl_fpga_dev_for_each_feature(fdata, feature)
dfl_fpga_set_irq_triggers(feature, 0,
feature->nr_irqs, NULL);
- __port_reset(pdev);
- afu_dma_region_destroy(pdata);
+ __port_reset(fdata);
+ afu_dma_region_destroy(fdata);
}
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return 0;
}
-static long afu_ioctl_check_extension(struct dfl_feature_platform_data *pdata,
+static long afu_ioctl_check_extension(struct dfl_feature_dev_data *fdata,
unsigned long arg)
{
/* No extension support for now */
@@ -648,7 +651,7 @@ static long afu_ioctl_check_extension(struct dfl_feature_platform_data *pdata,
}
static long
-afu_ioctl_get_info(struct dfl_feature_platform_data *pdata, void __user *arg)
+afu_ioctl_get_info(struct dfl_feature_dev_data *fdata, void __user *arg)
{
struct dfl_fpga_port_info info;
struct dfl_afu *afu;
@@ -662,12 +665,12 @@ afu_ioctl_get_info(struct dfl_feature_platform_data *pdata, void __user *arg)
if (info.argsz < minsz)
return -EINVAL;
- mutex_lock(&pdata->lock);
- afu = dfl_fpga_pdata_get_private(pdata);
+ mutex_lock(&fdata->lock);
+ afu = dfl_fpga_fdata_get_private(fdata);
info.flags = 0;
info.num_regions = afu->num_regions;
info.num_umsgs = afu->num_umsgs;
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
if (copy_to_user(arg, &info, sizeof(info)))
return -EFAULT;
@@ -675,7 +678,7 @@ afu_ioctl_get_info(struct dfl_feature_platform_data *pdata, void __user *arg)
return 0;
}
-static long afu_ioctl_get_region_info(struct dfl_feature_platform_data *pdata,
+static long afu_ioctl_get_region_info(struct dfl_feature_dev_data *fdata,
void __user *arg)
{
struct dfl_fpga_port_region_info rinfo;
@@ -691,7 +694,7 @@ static long afu_ioctl_get_region_info(struct dfl_feature_platform_data *pdata,
if (rinfo.argsz < minsz || rinfo.padding)
return -EINVAL;
- ret = afu_mmio_region_get_by_index(pdata, rinfo.index, &region);
+ ret = afu_mmio_region_get_by_index(fdata, rinfo.index, &region);
if (ret)
return ret;
@@ -706,7 +709,7 @@ static long afu_ioctl_get_region_info(struct dfl_feature_platform_data *pdata,
}
static long
-afu_ioctl_dma_map(struct dfl_feature_platform_data *pdata, void __user *arg)
+afu_ioctl_dma_map(struct dfl_feature_dev_data *fdata, void __user *arg)
{
struct dfl_fpga_port_dma_map map;
unsigned long minsz;
@@ -720,16 +723,16 @@ afu_ioctl_dma_map(struct dfl_feature_platform_data *pdata, void __user *arg)
if (map.argsz < minsz || map.flags)
return -EINVAL;
- ret = afu_dma_map_region(pdata, map.user_addr, map.length, &map.iova);
+ ret = afu_dma_map_region(fdata, map.user_addr, map.length, &map.iova);
if (ret)
return ret;
if (copy_to_user(arg, &map, sizeof(map))) {
- afu_dma_unmap_region(pdata, map.iova);
+ afu_dma_unmap_region(fdata, map.iova);
return -EFAULT;
}
- dev_dbg(&pdata->dev->dev, "dma map: ua=%llx, len=%llx, iova=%llx\n",
+ dev_dbg(&fdata->dev->dev, "dma map: ua=%llx, len=%llx, iova=%llx\n",
(unsigned long long)map.user_addr,
(unsigned long long)map.length,
(unsigned long long)map.iova);
@@ -738,7 +741,7 @@ afu_ioctl_dma_map(struct dfl_feature_platform_data *pdata, void __user *arg)
}
static long
-afu_ioctl_dma_unmap(struct dfl_feature_platform_data *pdata, void __user *arg)
+afu_ioctl_dma_unmap(struct dfl_feature_dev_data *fdata, void __user *arg)
{
struct dfl_fpga_port_dma_unmap unmap;
unsigned long minsz;
@@ -751,33 +754,33 @@ afu_ioctl_dma_unmap(struct dfl_feature_platform_data *pdata, void __user *arg)
if (unmap.argsz < minsz || unmap.flags)
return -EINVAL;
- return afu_dma_unmap_region(pdata, unmap.iova);
+ return afu_dma_unmap_region(fdata, unmap.iova);
}
static long afu_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct platform_device *pdev = filp->private_data;
- struct dfl_feature_platform_data *pdata;
+ struct dfl_feature_dev_data *fdata;
struct dfl_feature *f;
long ret;
dev_dbg(&pdev->dev, "%s cmd 0x%x\n", __func__, cmd);
- pdata = dev_get_platdata(&pdev->dev);
+ fdata = to_dfl_feature_dev_data(&pdev->dev);
switch (cmd) {
case DFL_FPGA_GET_API_VERSION:
return DFL_FPGA_API_VERSION;
case DFL_FPGA_CHECK_EXTENSION:
- return afu_ioctl_check_extension(pdata, arg);
+ return afu_ioctl_check_extension(fdata, arg);
case DFL_FPGA_PORT_GET_INFO:
- return afu_ioctl_get_info(pdata, (void __user *)arg);
+ return afu_ioctl_get_info(fdata, (void __user *)arg);
case DFL_FPGA_PORT_GET_REGION_INFO:
- return afu_ioctl_get_region_info(pdata, (void __user *)arg);
+ return afu_ioctl_get_region_info(fdata, (void __user *)arg);
case DFL_FPGA_PORT_DMA_MAP:
- return afu_ioctl_dma_map(pdata, (void __user *)arg);
+ return afu_ioctl_dma_map(fdata, (void __user *)arg);
case DFL_FPGA_PORT_DMA_UNMAP:
- return afu_ioctl_dma_unmap(pdata, (void __user *)arg);
+ return afu_ioctl_dma_unmap(fdata, (void __user *)arg);
default:
/*
* Let sub-feature's ioctl function to handle the cmd
@@ -785,7 +788,7 @@ static long afu_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
* handled in this sub feature, and returns 0 and other
* error code if cmd is handled.
*/
- dfl_fpga_dev_for_each_feature(pdata, f)
+ dfl_fpga_dev_for_each_feature(fdata, f)
if (f->ops && f->ops->ioctl) {
ret = f->ops->ioctl(pdev, f, cmd, arg);
if (ret != -ENODEV)
@@ -805,8 +808,8 @@ static const struct vm_operations_struct afu_vma_ops = {
static int afu_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct platform_device *pdev = filp->private_data;
- struct dfl_feature_platform_data *pdata;
u64 size = vma->vm_end - vma->vm_start;
+ struct dfl_feature_dev_data *fdata;
struct dfl_afu_mmio_region region;
u64 offset;
int ret;
@@ -814,10 +817,10 @@ static int afu_mmap(struct file *filp, struct vm_area_struct *vma)
if (!(vma->vm_flags & VM_SHARED))
return -EINVAL;
- pdata = dev_get_platdata(&pdev->dev);
+ fdata = to_dfl_feature_dev_data(&pdev->dev);
offset = vma->vm_pgoff << PAGE_SHIFT;
- ret = afu_mmio_region_get_by_offset(pdata, offset, size, &region);
+ ret = afu_mmio_region_get_by_offset(fdata, offset, size, &region);
if (ret)
return ret;
@@ -851,46 +854,45 @@ static const struct file_operations afu_fops = {
static int afu_dev_init(struct platform_device *pdev)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
struct dfl_afu *afu;
afu = devm_kzalloc(&pdev->dev, sizeof(*afu), GFP_KERNEL);
if (!afu)
return -ENOMEM;
- mutex_lock(&pdata->lock);
- dfl_fpga_pdata_set_private(pdata, afu);
- afu_mmio_region_init(pdata);
- afu_dma_region_init(pdata);
- mutex_unlock(&pdata->lock);
+ mutex_lock(&fdata->lock);
+ dfl_fpga_fdata_set_private(fdata, afu);
+ afu_mmio_region_init(fdata);
+ afu_dma_region_init(fdata);
+ mutex_unlock(&fdata->lock);
return 0;
}
static int afu_dev_destroy(struct platform_device *pdev)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
- mutex_lock(&pdata->lock);
- afu_mmio_region_destroy(pdata);
- afu_dma_region_destroy(pdata);
- dfl_fpga_pdata_set_private(pdata, NULL);
- mutex_unlock(&pdata->lock);
+ mutex_lock(&fdata->lock);
+ afu_mmio_region_destroy(fdata);
+ afu_dma_region_destroy(fdata);
+ dfl_fpga_fdata_set_private(fdata, NULL);
+ mutex_unlock(&fdata->lock);
return 0;
}
-static int port_enable_set(struct platform_device *pdev, bool enable)
+static int port_enable_set(struct dfl_feature_dev_data *fdata, bool enable)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
int ret;
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
if (enable)
- ret = __afu_port_enable(pdev);
+ ret = __afu_port_enable(fdata);
else
- ret = __afu_port_disable(pdev);
- mutex_unlock(&pdata->lock);
+ ret = __afu_port_disable(fdata);
+ mutex_unlock(&fdata->lock);
return ret;
}
@@ -947,12 +949,12 @@ static const struct attribute_group *afu_dev_groups[] = {
};
static struct platform_driver afu_driver = {
- .driver = {
- .name = DFL_FPGA_FEATURE_DEV_PORT,
+ .driver = {
+ .name = DFL_FPGA_FEATURE_DEV_PORT,
.dev_groups = afu_dev_groups,
},
- .probe = afu_probe,
- .remove_new = afu_remove,
+ .probe = afu_probe,
+ .remove = afu_remove,
};
static int __init afu_init(void)
diff --git a/drivers/fpga/dfl-afu-region.c b/drivers/fpga/dfl-afu-region.c
index 2e7b41629406..b11a5b21e666 100644
--- a/drivers/fpga/dfl-afu-region.c
+++ b/drivers/fpga/dfl-afu-region.c
@@ -12,11 +12,11 @@
/**
* afu_mmio_region_init - init function for afu mmio region support
- * @pdata: afu platform device's pdata.
+ * @fdata: afu feature dev data
*/
-void afu_mmio_region_init(struct dfl_feature_platform_data *pdata)
+void afu_mmio_region_init(struct dfl_feature_dev_data *fdata)
{
- struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_afu *afu = dfl_fpga_fdata_get_private(fdata);
INIT_LIST_HEAD(&afu->regions);
}
@@ -39,7 +39,7 @@ static struct dfl_afu_mmio_region *get_region_by_index(struct dfl_afu *afu,
/**
* afu_mmio_region_add - add a mmio region to given feature dev.
*
- * @pdata: afu platform device's pdata.
+ * @fdata: afu feature dev data
* @region_index: region index.
* @region_size: region size.
* @phys: region's physical address of this region.
@@ -47,14 +47,15 @@ static struct dfl_afu_mmio_region *get_region_by_index(struct dfl_afu *afu,
*
* Return: 0 on success, negative error code otherwise.
*/
-int afu_mmio_region_add(struct dfl_feature_platform_data *pdata,
+int afu_mmio_region_add(struct dfl_feature_dev_data *fdata,
u32 region_index, u64 region_size, u64 phys, u32 flags)
{
+ struct device *dev = &fdata->dev->dev;
struct dfl_afu_mmio_region *region;
struct dfl_afu *afu;
int ret = 0;
- region = devm_kzalloc(&pdata->dev->dev, sizeof(*region), GFP_KERNEL);
+ region = devm_kzalloc(dev, sizeof(*region), GFP_KERNEL);
if (!region)
return -ENOMEM;
@@ -63,13 +64,13 @@ int afu_mmio_region_add(struct dfl_feature_platform_data *pdata,
region->phys = phys;
region->flags = flags;
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
- afu = dfl_fpga_pdata_get_private(pdata);
+ afu = dfl_fpga_fdata_get_private(fdata);
/* check if @index already exists */
if (get_region_by_index(afu, region_index)) {
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
ret = -EEXIST;
goto exit;
}
@@ -80,37 +81,37 @@ int afu_mmio_region_add(struct dfl_feature_platform_data *pdata,
afu->region_cur_offset += region_size;
afu->num_regions++;
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return 0;
exit:
- devm_kfree(&pdata->dev->dev, region);
+ devm_kfree(dev, region);
return ret;
}
/**
* afu_mmio_region_destroy - destroy all mmio regions under given feature dev.
- * @pdata: afu platform device's pdata.
+ * @fdata: afu feature dev data
*/
-void afu_mmio_region_destroy(struct dfl_feature_platform_data *pdata)
+void afu_mmio_region_destroy(struct dfl_feature_dev_data *fdata)
{
- struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_afu *afu = dfl_fpga_fdata_get_private(fdata);
struct dfl_afu_mmio_region *tmp, *region;
list_for_each_entry_safe(region, tmp, &afu->regions, node)
- devm_kfree(&pdata->dev->dev, region);
+ devm_kfree(&fdata->dev->dev, region);
}
/**
* afu_mmio_region_get_by_index - find an afu region by index.
- * @pdata: afu platform device's pdata.
+ * @fdata: afu feature dev data
* @region_index: region index.
* @pregion: ptr to region for result.
*
* Return: 0 on success, negative error code otherwise.
*/
-int afu_mmio_region_get_by_index(struct dfl_feature_platform_data *pdata,
+int afu_mmio_region_get_by_index(struct dfl_feature_dev_data *fdata,
u32 region_index,
struct dfl_afu_mmio_region *pregion)
{
@@ -118,8 +119,8 @@ int afu_mmio_region_get_by_index(struct dfl_feature_platform_data *pdata,
struct dfl_afu *afu;
int ret = 0;
- mutex_lock(&pdata->lock);
- afu = dfl_fpga_pdata_get_private(pdata);
+ mutex_lock(&fdata->lock);
+ afu = dfl_fpga_fdata_get_private(fdata);
region = get_region_by_index(afu, region_index);
if (!region) {
ret = -EINVAL;
@@ -127,14 +128,14 @@ int afu_mmio_region_get_by_index(struct dfl_feature_platform_data *pdata,
}
*pregion = *region;
exit:
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return ret;
}
/**
* afu_mmio_region_get_by_offset - find an afu mmio region by offset and size
*
- * @pdata: afu platform device's pdata.
+ * @fdata: afu feature dev data
* @offset: region offset from start of the device fd.
* @size: region size.
* @pregion: ptr to region for result.
@@ -144,7 +145,7 @@ exit:
*
* Return: 0 on success, negative error code otherwise.
*/
-int afu_mmio_region_get_by_offset(struct dfl_feature_platform_data *pdata,
+int afu_mmio_region_get_by_offset(struct dfl_feature_dev_data *fdata,
u64 offset, u64 size,
struct dfl_afu_mmio_region *pregion)
{
@@ -152,8 +153,8 @@ int afu_mmio_region_get_by_offset(struct dfl_feature_platform_data *pdata,
struct dfl_afu *afu;
int ret = 0;
- mutex_lock(&pdata->lock);
- afu = dfl_fpga_pdata_get_private(pdata);
+ mutex_lock(&fdata->lock);
+ afu = dfl_fpga_fdata_get_private(fdata);
for_each_region(region, afu)
if (region->offset <= offset &&
region->offset + region->size >= offset + size) {
@@ -162,6 +163,6 @@ int afu_mmio_region_get_by_offset(struct dfl_feature_platform_data *pdata,
}
ret = -EINVAL;
exit:
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return ret;
}
diff --git a/drivers/fpga/dfl-afu.h b/drivers/fpga/dfl-afu.h
index 7bef3e300aa2..03be4f0969c7 100644
--- a/drivers/fpga/dfl-afu.h
+++ b/drivers/fpga/dfl-afu.h
@@ -76,27 +76,27 @@ struct dfl_afu {
struct rb_root dma_regions;
};
-/* hold pdata->lock when call __afu_port_enable/disable */
-int __afu_port_enable(struct platform_device *pdev);
-int __afu_port_disable(struct platform_device *pdev);
+/* hold fdata->lock when call __afu_port_enable/disable */
+int __afu_port_enable(struct dfl_feature_dev_data *fdata);
+int __afu_port_disable(struct dfl_feature_dev_data *fdata);
-void afu_mmio_region_init(struct dfl_feature_platform_data *pdata);
-int afu_mmio_region_add(struct dfl_feature_platform_data *pdata,
+void afu_mmio_region_init(struct dfl_feature_dev_data *fdata);
+int afu_mmio_region_add(struct dfl_feature_dev_data *fdata,
u32 region_index, u64 region_size, u64 phys, u32 flags);
-void afu_mmio_region_destroy(struct dfl_feature_platform_data *pdata);
-int afu_mmio_region_get_by_index(struct dfl_feature_platform_data *pdata,
+void afu_mmio_region_destroy(struct dfl_feature_dev_data *fdata);
+int afu_mmio_region_get_by_index(struct dfl_feature_dev_data *fdata,
u32 region_index,
struct dfl_afu_mmio_region *pregion);
-int afu_mmio_region_get_by_offset(struct dfl_feature_platform_data *pdata,
+int afu_mmio_region_get_by_offset(struct dfl_feature_dev_data *fdata,
u64 offset, u64 size,
struct dfl_afu_mmio_region *pregion);
-void afu_dma_region_init(struct dfl_feature_platform_data *pdata);
-void afu_dma_region_destroy(struct dfl_feature_platform_data *pdata);
-int afu_dma_map_region(struct dfl_feature_platform_data *pdata,
+void afu_dma_region_init(struct dfl_feature_dev_data *fdata);
+void afu_dma_region_destroy(struct dfl_feature_dev_data *fdata);
+int afu_dma_map_region(struct dfl_feature_dev_data *fdata,
u64 user_addr, u64 length, u64 *iova);
-int afu_dma_unmap_region(struct dfl_feature_platform_data *pdata, u64 iova);
+int afu_dma_unmap_region(struct dfl_feature_dev_data *fdata, u64 iova);
struct dfl_afu_dma_region *
-afu_dma_region_find(struct dfl_feature_platform_data *pdata,
+afu_dma_region_find(struct dfl_feature_dev_data *fdata,
u64 iova, u64 size);
extern const struct dfl_feature_ops port_err_ops;
diff --git a/drivers/fpga/dfl-fme-br.c b/drivers/fpga/dfl-fme-br.c
index 0b01b3895277..28b0f9d062ac 100644
--- a/drivers/fpga/dfl-fme-br.c
+++ b/drivers/fpga/dfl-fme-br.c
@@ -22,34 +22,34 @@
struct fme_br_priv {
struct dfl_fme_br_pdata *pdata;
struct dfl_fpga_port_ops *port_ops;
- struct platform_device *port_pdev;
+ struct dfl_feature_dev_data *port_fdata;
};
static int fme_bridge_enable_set(struct fpga_bridge *bridge, bool enable)
{
struct fme_br_priv *priv = bridge->priv;
- struct platform_device *port_pdev;
+ struct dfl_feature_dev_data *port_fdata;
struct dfl_fpga_port_ops *ops;
- if (!priv->port_pdev) {
- port_pdev = dfl_fpga_cdev_find_port(priv->pdata->cdev,
- &priv->pdata->port_id,
- dfl_fpga_check_port_id);
- if (!port_pdev)
+ if (!priv->port_fdata) {
+ port_fdata = dfl_fpga_cdev_find_port_data(priv->pdata->cdev,
+ &priv->pdata->port_id,
+ dfl_fpga_check_port_id);
+ if (!port_fdata)
return -ENODEV;
- priv->port_pdev = port_pdev;
+ priv->port_fdata = port_fdata;
}
- if (priv->port_pdev && !priv->port_ops) {
- ops = dfl_fpga_port_ops_get(priv->port_pdev);
+ if (priv->port_fdata && !priv->port_ops) {
+ ops = dfl_fpga_port_ops_get(priv->port_fdata);
if (!ops || !ops->enable_set)
return -ENOENT;
priv->port_ops = ops;
}
- return priv->port_ops->enable_set(priv->port_pdev, enable);
+ return priv->port_ops->enable_set(priv->port_fdata, enable);
}
static const struct fpga_bridge_ops fme_bridge_ops = {
@@ -85,18 +85,16 @@ static void fme_br_remove(struct platform_device *pdev)
fpga_bridge_unregister(br);
- if (priv->port_pdev)
- put_device(&priv->port_pdev->dev);
if (priv->port_ops)
dfl_fpga_port_ops_put(priv->port_ops);
}
static struct platform_driver fme_br_driver = {
- .driver = {
- .name = DFL_FPGA_FME_BRIDGE,
+ .driver = {
+ .name = DFL_FPGA_FME_BRIDGE,
},
- .probe = fme_br_probe,
- .remove_new = fme_br_remove,
+ .probe = fme_br_probe,
+ .remove = fme_br_remove,
};
module_platform_driver(fme_br_driver);
diff --git a/drivers/fpga/dfl-fme-error.c b/drivers/fpga/dfl-fme-error.c
index 51c2892ec06d..f00d949efe69 100644
--- a/drivers/fpga/dfl-fme-error.c
+++ b/drivers/fpga/dfl-fme-error.c
@@ -42,15 +42,15 @@
static ssize_t pcie0_errors_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 value;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
value = readq(base + PCIE0_ERROR);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "0x%llx\n", (unsigned long long)value);
}
@@ -59,7 +59,7 @@ static ssize_t pcie0_errors_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
int ret = 0;
u64 v, val;
@@ -67,9 +67,9 @@ static ssize_t pcie0_errors_store(struct device *dev,
if (kstrtou64(buf, 0, &val))
return -EINVAL;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
writeq(GENMASK_ULL(63, 0), base + PCIE0_ERROR_MASK);
v = readq(base + PCIE0_ERROR);
@@ -79,7 +79,7 @@ static ssize_t pcie0_errors_store(struct device *dev,
ret = -EINVAL;
writeq(0ULL, base + PCIE0_ERROR_MASK);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return ret ? ret : count;
}
static DEVICE_ATTR_RW(pcie0_errors);
@@ -87,15 +87,15 @@ static DEVICE_ATTR_RW(pcie0_errors);
static ssize_t pcie1_errors_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 value;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
value = readq(base + PCIE1_ERROR);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "0x%llx\n", (unsigned long long)value);
}
@@ -104,7 +104,7 @@ static ssize_t pcie1_errors_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
int ret = 0;
u64 v, val;
@@ -112,9 +112,9 @@ static ssize_t pcie1_errors_store(struct device *dev,
if (kstrtou64(buf, 0, &val))
return -EINVAL;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
writeq(GENMASK_ULL(63, 0), base + PCIE1_ERROR_MASK);
v = readq(base + PCIE1_ERROR);
@@ -124,7 +124,7 @@ static ssize_t pcie1_errors_store(struct device *dev,
ret = -EINVAL;
writeq(0ULL, base + PCIE1_ERROR_MASK);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return ret ? ret : count;
}
static DEVICE_ATTR_RW(pcie1_errors);
@@ -132,9 +132,10 @@ static DEVICE_ATTR_RW(pcie1_errors);
static ssize_t nonfatal_errors_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
return sprintf(buf, "0x%llx\n",
(unsigned long long)readq(base + RAS_NONFAT_ERROR));
@@ -144,9 +145,10 @@ static DEVICE_ATTR_RO(nonfatal_errors);
static ssize_t catfatal_errors_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
return sprintf(buf, "0x%llx\n",
(unsigned long long)readq(base + RAS_CATFAT_ERROR));
@@ -156,15 +158,15 @@ static DEVICE_ATTR_RO(catfatal_errors);
static ssize_t inject_errors_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
v = readq(base + RAS_ERROR_INJECT);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "0x%llx\n",
(unsigned long long)FIELD_GET(INJECT_ERROR_MASK, v));
@@ -174,7 +176,7 @@ static ssize_t inject_errors_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u8 inject_error;
u64 v;
@@ -185,14 +187,14 @@ static ssize_t inject_errors_store(struct device *dev,
if (inject_error & ~INJECT_ERROR_MASK)
return -EINVAL;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
v = readq(base + RAS_ERROR_INJECT);
v &= ~INJECT_ERROR_MASK;
v |= FIELD_PREP(INJECT_ERROR_MASK, inject_error);
writeq(v, base + RAS_ERROR_INJECT);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return count;
}
@@ -201,15 +203,15 @@ static DEVICE_ATTR_RW(inject_errors);
static ssize_t fme_errors_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 value;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
value = readq(base + FME_ERROR);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "0x%llx\n", (unsigned long long)value);
}
@@ -218,7 +220,7 @@ static ssize_t fme_errors_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v, val;
int ret = 0;
@@ -226,9 +228,9 @@ static ssize_t fme_errors_store(struct device *dev,
if (kstrtou64(buf, 0, &val))
return -EINVAL;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
writeq(GENMASK_ULL(63, 0), base + FME_ERROR_MASK);
v = readq(base + FME_ERROR);
@@ -240,7 +242,7 @@ static ssize_t fme_errors_store(struct device *dev,
/* Workaround: disable MBP_ERROR if feature revision is 0 */
writeq(dfl_feature_revision(base) ? 0ULL : MBP_ERROR,
base + FME_ERROR_MASK);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return ret ? ret : count;
}
static DEVICE_ATTR_RW(fme_errors);
@@ -248,15 +250,15 @@ static DEVICE_ATTR_RW(fme_errors);
static ssize_t first_error_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 value;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
value = readq(base + FME_FIRST_ERROR);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "0x%llx\n", (unsigned long long)value);
}
@@ -265,15 +267,15 @@ static DEVICE_ATTR_RO(first_error);
static ssize_t next_error_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 value;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
value = readq(base + FME_NEXT_ERROR);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "0x%llx\n", (unsigned long long)value);
}
@@ -295,12 +297,14 @@ static umode_t fme_global_err_attrs_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct device *dev = kobj_to_dev(kobj);
+ struct dfl_feature_dev_data *fdata;
+ fdata = to_dfl_feature_dev_data(dev);
/*
* sysfs entries are visible only if related private feature is
* enumerated.
*/
- if (!dfl_get_feature_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR))
+ if (!dfl_get_feature_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR))
return 0;
return attr->mode;
@@ -314,12 +318,12 @@ const struct attribute_group fme_global_err_group = {
static void fme_err_mask(struct device *dev, bool mask)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
/* Workaround: keep MBP_ERROR always masked if revision is 0 */
if (dfl_feature_revision(base))
@@ -332,7 +336,7 @@ static void fme_err_mask(struct device *dev, bool mask)
writeq(mask ? ERROR_MASK : 0, base + RAS_NONFAT_ERROR_MASK);
writeq(mask ? ERROR_MASK : 0, base + RAS_CATFAT_ERROR_MASK);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
}
static int fme_global_err_init(struct platform_device *pdev,
diff --git a/drivers/fpga/dfl-fme-main.c b/drivers/fpga/dfl-fme-main.c
index 864924f68f5e..8aca2fb20e87 100644
--- a/drivers/fpga/dfl-fme-main.c
+++ b/drivers/fpga/dfl-fme-main.c
@@ -28,10 +28,11 @@
static ssize_t ports_num_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER);
v = readq(base + FME_HDR_CAP);
@@ -47,10 +48,11 @@ static DEVICE_ATTR_RO(ports_num);
static ssize_t bitstream_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER);
v = readq(base + FME_HDR_BITSTREAM_ID);
@@ -65,10 +67,11 @@ static DEVICE_ATTR_RO(bitstream_id);
static ssize_t bitstream_metadata_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER);
v = readq(base + FME_HDR_BITSTREAM_MD);
@@ -79,10 +82,11 @@ static DEVICE_ATTR_RO(bitstream_metadata);
static ssize_t cache_size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER);
v = readq(base + FME_HDR_CAP);
@@ -94,10 +98,11 @@ static DEVICE_ATTR_RO(cache_size);
static ssize_t fabric_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER);
v = readq(base + FME_HDR_CAP);
@@ -109,10 +114,11 @@ static DEVICE_ATTR_RO(fabric_version);
static ssize_t socket_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER);
v = readq(base + FME_HDR_CAP);
@@ -135,10 +141,10 @@ static const struct attribute_group fme_hdr_group = {
.attrs = fme_hdr_attrs,
};
-static long fme_hdr_ioctl_release_port(struct dfl_feature_platform_data *pdata,
+static long fme_hdr_ioctl_release_port(struct dfl_feature_dev_data *fdata,
unsigned long arg)
{
- struct dfl_fpga_cdev *cdev = pdata->dfl_cdev;
+ struct dfl_fpga_cdev *cdev = fdata->dfl_cdev;
int port_id;
if (get_user(port_id, (int __user *)arg))
@@ -147,10 +153,10 @@ static long fme_hdr_ioctl_release_port(struct dfl_feature_platform_data *pdata,
return dfl_fpga_cdev_release_port(cdev, port_id);
}
-static long fme_hdr_ioctl_assign_port(struct dfl_feature_platform_data *pdata,
+static long fme_hdr_ioctl_assign_port(struct dfl_feature_dev_data *fdata,
unsigned long arg)
{
- struct dfl_fpga_cdev *cdev = pdata->dfl_cdev;
+ struct dfl_fpga_cdev *cdev = fdata->dfl_cdev;
int port_id;
if (get_user(port_id, (int __user *)arg))
@@ -163,13 +169,13 @@ static long fme_hdr_ioctl(struct platform_device *pdev,
struct dfl_feature *feature,
unsigned int cmd, unsigned long arg)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
switch (cmd) {
case DFL_FPGA_FME_PORT_RELEASE:
- return fme_hdr_ioctl_release_port(pdata, arg);
+ return fme_hdr_ioctl_release_port(fdata, arg);
case DFL_FPGA_FME_PORT_ASSIGN:
- return fme_hdr_ioctl_assign_port(pdata, arg);
+ return fme_hdr_ioctl_assign_port(fdata, arg);
}
return -ENODEV;
@@ -411,14 +417,14 @@ static int power_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
static int power_hwmon_write(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long val)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev->parent);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev->parent);
struct dfl_feature *feature = dev_get_drvdata(dev);
int ret = 0;
u64 v;
val = clamp_val(val / MICRO, 0, PWR_THRESHOLD_MAX);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
switch (attr) {
case hwmon_power_max:
@@ -438,7 +444,7 @@ static int power_hwmon_write(struct device *dev, enum hwmon_sensor_types type,
break;
}
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return ret;
}
@@ -589,7 +595,7 @@ static struct dfl_feature_driver fme_feature_drvs[] = {
},
};
-static long fme_ioctl_check_extension(struct dfl_feature_platform_data *pdata,
+static long fme_ioctl_check_extension(struct dfl_feature_dev_data *fdata,
unsigned long arg)
{
/* No extension support for now */
@@ -598,49 +604,46 @@ static long fme_ioctl_check_extension(struct dfl_feature_platform_data *pdata,
static int fme_open(struct inode *inode, struct file *filp)
{
- struct platform_device *fdev = dfl_fpga_inode_to_feature_dev(inode);
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&fdev->dev);
+ struct dfl_feature_dev_data *fdata = dfl_fpga_inode_to_feature_dev_data(inode);
+ struct platform_device *fdev = fdata->dev;
int ret;
- if (WARN_ON(!pdata))
- return -ENODEV;
-
- mutex_lock(&pdata->lock);
- ret = dfl_feature_dev_use_begin(pdata, filp->f_flags & O_EXCL);
+ mutex_lock(&fdata->lock);
+ ret = dfl_feature_dev_use_begin(fdata, filp->f_flags & O_EXCL);
if (!ret) {
dev_dbg(&fdev->dev, "Device File Opened %d Times\n",
- dfl_feature_dev_use_count(pdata));
- filp->private_data = pdata;
+ dfl_feature_dev_use_count(fdata));
+ filp->private_data = fdata;
}
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return ret;
}
static int fme_release(struct inode *inode, struct file *filp)
{
- struct dfl_feature_platform_data *pdata = filp->private_data;
- struct platform_device *pdev = pdata->dev;
+ struct dfl_feature_dev_data *fdata = filp->private_data;
+ struct platform_device *pdev = fdata->dev;
struct dfl_feature *feature;
dev_dbg(&pdev->dev, "Device File Release\n");
- mutex_lock(&pdata->lock);
- dfl_feature_dev_use_end(pdata);
+ mutex_lock(&fdata->lock);
+ dfl_feature_dev_use_end(fdata);
- if (!dfl_feature_dev_use_count(pdata))
- dfl_fpga_dev_for_each_feature(pdata, feature)
+ if (!dfl_feature_dev_use_count(fdata))
+ dfl_fpga_dev_for_each_feature(fdata, feature)
dfl_fpga_set_irq_triggers(feature, 0,
feature->nr_irqs, NULL);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return 0;
}
static long fme_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
- struct dfl_feature_platform_data *pdata = filp->private_data;
- struct platform_device *pdev = pdata->dev;
+ struct dfl_feature_dev_data *fdata = filp->private_data;
+ struct platform_device *pdev = fdata->dev;
struct dfl_feature *f;
long ret;
@@ -650,7 +653,7 @@ static long fme_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
case DFL_FPGA_GET_API_VERSION:
return DFL_FPGA_API_VERSION;
case DFL_FPGA_CHECK_EXTENSION:
- return fme_ioctl_check_extension(pdata, arg);
+ return fme_ioctl_check_extension(fdata, arg);
default:
/*
* Let sub-feature's ioctl function to handle the cmd.
@@ -658,7 +661,7 @@ static long fme_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
* handled in this sub feature, and returns 0 or other
* error code if cmd is handled.
*/
- dfl_fpga_dev_for_each_feature(pdata, f) {
+ dfl_fpga_dev_for_each_feature(fdata, f) {
if (f->ops && f->ops->ioctl) {
ret = f->ops->ioctl(pdev, f, cmd, arg);
if (ret != -ENODEV)
@@ -672,27 +675,27 @@ static long fme_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
static int fme_dev_init(struct platform_device *pdev)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
struct dfl_fme *fme;
fme = devm_kzalloc(&pdev->dev, sizeof(*fme), GFP_KERNEL);
if (!fme)
return -ENOMEM;
- mutex_lock(&pdata->lock);
- dfl_fpga_pdata_set_private(pdata, fme);
- mutex_unlock(&pdata->lock);
+ mutex_lock(&fdata->lock);
+ dfl_fpga_fdata_set_private(fdata, fme);
+ mutex_unlock(&fdata->lock);
return 0;
}
static void fme_dev_destroy(struct platform_device *pdev)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
- mutex_lock(&pdata->lock);
- dfl_fpga_pdata_set_private(pdata, NULL);
- mutex_unlock(&pdata->lock);
+ mutex_lock(&fdata->lock);
+ dfl_fpga_fdata_set_private(fdata, NULL);
+ mutex_unlock(&fdata->lock);
}
static const struct file_operations fme_fops = {
@@ -742,12 +745,12 @@ static const struct attribute_group *fme_dev_groups[] = {
};
static struct platform_driver fme_driver = {
- .driver = {
- .name = DFL_FPGA_FEATURE_DEV_FME,
+ .driver = {
+ .name = DFL_FPGA_FEATURE_DEV_FME,
.dev_groups = fme_dev_groups,
},
- .probe = fme_probe,
- .remove_new = fme_remove,
+ .probe = fme_probe,
+ .remove = fme_remove,
};
module_platform_driver(fme_driver);
diff --git a/drivers/fpga/dfl-fme-pr.c b/drivers/fpga/dfl-fme-pr.c
index cdcf6dea4cc9..b878b260af38 100644
--- a/drivers/fpga/dfl-fme-pr.c
+++ b/drivers/fpga/dfl-fme-pr.c
@@ -65,7 +65,7 @@ static struct fpga_region *dfl_fme_region_find(struct dfl_fme *fme, int port_id)
static int fme_pr(struct platform_device *pdev, unsigned long arg)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
void __user *argp = (void __user *)arg;
struct dfl_fpga_fme_port_pr port_pr;
struct fpga_image_info *info;
@@ -87,8 +87,7 @@ static int fme_pr(struct platform_device *pdev, unsigned long arg)
return -EINVAL;
/* get fme header region */
- fme_hdr = dfl_get_feature_ioaddr_by_id(&pdev->dev,
- FME_FEATURE_ID_HEADER);
+ fme_hdr = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER);
/* check port id */
v = readq(fme_hdr + FME_HDR_CAP);
@@ -123,8 +122,8 @@ static int fme_pr(struct platform_device *pdev, unsigned long arg)
info->flags |= FPGA_MGR_PARTIAL_RECONFIG;
- mutex_lock(&pdata->lock);
- fme = dfl_fpga_pdata_get_private(pdata);
+ mutex_lock(&fdata->lock);
+ fme = dfl_fpga_fdata_get_private(fdata);
/* fme device has been unregistered. */
if (!fme) {
ret = -EINVAL;
@@ -156,7 +155,7 @@ static int fme_pr(struct platform_device *pdev, unsigned long arg)
put_device(&region->dev);
unlock_exit:
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
free_exit:
vfree(buf);
return ret;
@@ -164,16 +163,16 @@ free_exit:
/**
* dfl_fme_create_mgr - create fpga mgr platform device as child device
+ * @fdata: fme feature dev data
* @feature: sub feature info
- * @pdata: fme platform_device's pdata
*
* Return: mgr platform device if successful, and error code otherwise.
*/
static struct platform_device *
-dfl_fme_create_mgr(struct dfl_feature_platform_data *pdata,
+dfl_fme_create_mgr(struct dfl_feature_dev_data *fdata,
struct dfl_feature *feature)
{
- struct platform_device *mgr, *fme = pdata->dev;
+ struct platform_device *mgr, *fme = fdata->dev;
struct dfl_fme_mgr_pdata mgr_pdata;
int ret = -ENOMEM;
@@ -209,11 +208,11 @@ create_mgr_err:
/**
* dfl_fme_destroy_mgr - destroy fpga mgr platform device
- * @pdata: fme platform device's pdata
+ * @fdata: fme feature dev data
*/
-static void dfl_fme_destroy_mgr(struct dfl_feature_platform_data *pdata)
+static void dfl_fme_destroy_mgr(struct dfl_feature_dev_data *fdata)
{
- struct dfl_fme *priv = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_fme *priv = dfl_fpga_fdata_get_private(fdata);
platform_device_unregister(priv->mgr);
}
@@ -221,15 +220,15 @@ static void dfl_fme_destroy_mgr(struct dfl_feature_platform_data *pdata)
/**
* dfl_fme_create_bridge - create fme fpga bridge platform device as child
*
- * @pdata: fme platform device's pdata
+ * @fdata: fme feature dev data
* @port_id: port id for the bridge to be created.
*
* Return: bridge platform device if successful, and error code otherwise.
*/
static struct dfl_fme_bridge *
-dfl_fme_create_bridge(struct dfl_feature_platform_data *pdata, int port_id)
+dfl_fme_create_bridge(struct dfl_feature_dev_data *fdata, int port_id)
{
- struct device *dev = &pdata->dev->dev;
+ struct device *dev = &fdata->dev->dev;
struct dfl_fme_br_pdata br_pdata;
struct dfl_fme_bridge *fme_br;
int ret = -ENOMEM;
@@ -238,7 +237,7 @@ dfl_fme_create_bridge(struct dfl_feature_platform_data *pdata, int port_id)
if (!fme_br)
return ERR_PTR(ret);
- br_pdata.cdev = pdata->dfl_cdev;
+ br_pdata.cdev = fdata->dfl_cdev;
br_pdata.port_id = port_id;
fme_br->br = platform_device_alloc(DFL_FPGA_FME_BRIDGE,
@@ -274,11 +273,11 @@ static void dfl_fme_destroy_bridge(struct dfl_fme_bridge *fme_br)
/**
* dfl_fme_destroy_bridges - destroy all fpga bridge platform device
- * @pdata: fme platform device's pdata
+ * @fdata: fme feature dev data
*/
-static void dfl_fme_destroy_bridges(struct dfl_feature_platform_data *pdata)
+static void dfl_fme_destroy_bridges(struct dfl_feature_dev_data *fdata)
{
- struct dfl_fme *priv = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_fme *priv = dfl_fpga_fdata_get_private(fdata);
struct dfl_fme_bridge *fbridge, *tmp;
list_for_each_entry_safe(fbridge, tmp, &priv->bridge_list, node) {
@@ -290,7 +289,7 @@ static void dfl_fme_destroy_bridges(struct dfl_feature_platform_data *pdata)
/**
* dfl_fme_create_region - create fpga region platform device as child
*
- * @pdata: fme platform device's pdata
+ * @fdata: fme feature dev data
* @mgr: mgr platform device needed for region
* @br: br platform device needed for region
* @port_id: port id
@@ -298,12 +297,12 @@ static void dfl_fme_destroy_bridges(struct dfl_feature_platform_data *pdata)
* Return: fme region if successful, and error code otherwise.
*/
static struct dfl_fme_region *
-dfl_fme_create_region(struct dfl_feature_platform_data *pdata,
+dfl_fme_create_region(struct dfl_feature_dev_data *fdata,
struct platform_device *mgr,
struct platform_device *br, int port_id)
{
struct dfl_fme_region_pdata region_pdata;
- struct device *dev = &pdata->dev->dev;
+ struct device *dev = &fdata->dev->dev;
struct dfl_fme_region *fme_region;
int ret = -ENOMEM;
@@ -353,11 +352,11 @@ static void dfl_fme_destroy_region(struct dfl_fme_region *fme_region)
/**
* dfl_fme_destroy_regions - destroy all fme regions
- * @pdata: fme platform device's pdata
+ * @fdata: fme feature dev data
*/
-static void dfl_fme_destroy_regions(struct dfl_feature_platform_data *pdata)
+static void dfl_fme_destroy_regions(struct dfl_feature_dev_data *fdata)
{
- struct dfl_fme *priv = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_fme *priv = dfl_fpga_fdata_get_private(fdata);
struct dfl_fme_region *fme_region, *tmp;
list_for_each_entry_safe(fme_region, tmp, &priv->region_list, node) {
@@ -369,7 +368,7 @@ static void dfl_fme_destroy_regions(struct dfl_feature_platform_data *pdata)
static int pr_mgmt_init(struct platform_device *pdev,
struct dfl_feature *feature)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
struct dfl_fme_region *fme_region;
struct dfl_fme_bridge *fme_br;
struct platform_device *mgr;
@@ -378,18 +377,17 @@ static int pr_mgmt_init(struct platform_device *pdev,
int ret = -ENODEV, i = 0;
u64 fme_cap, port_offset;
- fme_hdr = dfl_get_feature_ioaddr_by_id(&pdev->dev,
- FME_FEATURE_ID_HEADER);
+ fme_hdr = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
- priv = dfl_fpga_pdata_get_private(pdata);
+ mutex_lock(&fdata->lock);
+ priv = dfl_fpga_fdata_get_private(fdata);
/* Initialize the region and bridge sub device list */
INIT_LIST_HEAD(&priv->region_list);
INIT_LIST_HEAD(&priv->bridge_list);
/* Create fpga mgr platform device */
- mgr = dfl_fme_create_mgr(pdata, feature);
+ mgr = dfl_fme_create_mgr(fdata, feature);
if (IS_ERR(mgr)) {
dev_err(&pdev->dev, "fail to create fpga mgr pdev\n");
goto unlock;
@@ -405,7 +403,7 @@ static int pr_mgmt_init(struct platform_device *pdev,
continue;
/* Create bridge for each port */
- fme_br = dfl_fme_create_bridge(pdata, i);
+ fme_br = dfl_fme_create_bridge(fdata, i);
if (IS_ERR(fme_br)) {
ret = PTR_ERR(fme_br);
goto destroy_region;
@@ -414,7 +412,7 @@ static int pr_mgmt_init(struct platform_device *pdev,
list_add(&fme_br->node, &priv->bridge_list);
/* Create region for each port */
- fme_region = dfl_fme_create_region(pdata, mgr,
+ fme_region = dfl_fme_create_region(fdata, mgr,
fme_br->br, i);
if (IS_ERR(fme_region)) {
ret = PTR_ERR(fme_region);
@@ -423,30 +421,30 @@ static int pr_mgmt_init(struct platform_device *pdev,
list_add(&fme_region->node, &priv->region_list);
}
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return 0;
destroy_region:
- dfl_fme_destroy_regions(pdata);
- dfl_fme_destroy_bridges(pdata);
- dfl_fme_destroy_mgr(pdata);
+ dfl_fme_destroy_regions(fdata);
+ dfl_fme_destroy_bridges(fdata);
+ dfl_fme_destroy_mgr(fdata);
unlock:
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return ret;
}
static void pr_mgmt_uinit(struct platform_device *pdev,
struct dfl_feature *feature)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
- dfl_fme_destroy_regions(pdata);
- dfl_fme_destroy_bridges(pdata);
- dfl_fme_destroy_mgr(pdata);
- mutex_unlock(&pdata->lock);
+ dfl_fme_destroy_regions(fdata);
+ dfl_fme_destroy_bridges(fdata);
+ dfl_fme_destroy_mgr(fdata);
+ mutex_unlock(&fdata->lock);
}
static long fme_pr_ioctl(struct platform_device *pdev,
diff --git a/drivers/fpga/dfl-fme-region.c b/drivers/fpga/dfl-fme-region.c
index 71616f8b4982..c6cd63063c82 100644
--- a/drivers/fpga/dfl-fme-region.c
+++ b/drivers/fpga/dfl-fme-region.c
@@ -71,11 +71,11 @@ static void fme_region_remove(struct platform_device *pdev)
}
static struct platform_driver fme_region_driver = {
- .driver = {
- .name = DFL_FPGA_FME_REGION,
+ .driver = {
+ .name = DFL_FPGA_FME_REGION,
},
- .probe = fme_region_probe,
- .remove_new = fme_region_remove,
+ .probe = fme_region_probe,
+ .remove = fme_region_remove,
};
module_platform_driver(fme_region_driver);
diff --git a/drivers/fpga/dfl-pci.c b/drivers/fpga/dfl-pci.c
index 80cac3a5f976..602807d6afcc 100644
--- a/drivers/fpga/dfl-pci.c
+++ b/drivers/fpga/dfl-pci.c
@@ -39,14 +39,6 @@ struct cci_drvdata {
struct dfl_fpga_cdev *cdev; /* container device */
};
-static void __iomem *cci_pci_ioremap_bar0(struct pci_dev *pcidev)
-{
- if (pcim_iomap_regions(pcidev, BIT(0), DRV_NAME))
- return NULL;
-
- return pcim_iomap_table(pcidev)[0];
-}
-
static int cci_pci_alloc_irq(struct pci_dev *pcidev)
{
int ret, nvec = pci_msix_vec_count(pcidev);
@@ -235,9 +227,9 @@ static int find_dfls_by_default(struct pci_dev *pcidev,
u64 v;
/* start to find Device Feature List from Bar 0 */
- base = cci_pci_ioremap_bar0(pcidev);
- if (!base)
- return -ENOMEM;
+ base = pcim_iomap_region(pcidev, 0, DRV_NAME);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
/*
* PF device has FME and Ports/AFUs, and VF device only has one
@@ -296,7 +288,7 @@ static int find_dfls_by_default(struct pci_dev *pcidev,
}
/* release I/O mappings for next step enumeration */
- pcim_iounmap_regions(pcidev, BIT(0));
+ pcim_iounmap_region(pcidev, 0);
return ret;
}
diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c
index c406b949026f..7022657243c0 100644
--- a/drivers/fpga/dfl.c
+++ b/drivers/fpga/dfl.c
@@ -119,17 +119,6 @@ static void dfl_id_free(enum dfl_id_type type, int id)
mutex_unlock(&dfl_id_mutex);
}
-static enum dfl_id_type feature_dev_id_type(struct platform_device *pdev)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
- if (!strcmp(dfl_devs[i].name, pdev->name))
- return i;
-
- return DFL_ID_MAX;
-}
-
static enum dfl_id_type dfh_id_to_type(u16 id)
{
int i;
@@ -156,12 +145,12 @@ static LIST_HEAD(dfl_port_ops_list);
/**
* dfl_fpga_port_ops_get - get matched port ops from the global list
- * @pdev: platform device to match with associated port ops.
+ * @fdata: feature dev data to match with associated port ops.
* Return: matched port ops on success, NULL otherwise.
*
* Please note that must dfl_fpga_port_ops_put after use the port_ops.
*/
-struct dfl_fpga_port_ops *dfl_fpga_port_ops_get(struct platform_device *pdev)
+struct dfl_fpga_port_ops *dfl_fpga_port_ops_get(struct dfl_feature_dev_data *fdata)
{
struct dfl_fpga_port_ops *ops = NULL;
@@ -171,7 +160,7 @@ struct dfl_fpga_port_ops *dfl_fpga_port_ops_get(struct platform_device *pdev)
list_for_each_entry(ops, &dfl_port_ops_list, node) {
/* match port_ops using the name of platform device */
- if (!strcmp(pdev->name, ops->name)) {
+ if (!strcmp(fdata->pdev_name, ops->name)) {
if (!try_module_get(ops->owner))
ops = NULL;
goto done;
@@ -222,27 +211,26 @@ EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_del);
/**
* dfl_fpga_check_port_id - check the port id
- * @pdev: port platform device.
+ * @fdata: port feature dev data.
* @pport_id: port id to compare.
*
* Return: 1 if port device matches with given port id, otherwise 0.
*/
-int dfl_fpga_check_port_id(struct platform_device *pdev, void *pport_id)
+int dfl_fpga_check_port_id(struct dfl_feature_dev_data *fdata, void *pport_id)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct dfl_fpga_port_ops *port_ops;
- if (pdata->id != FEATURE_DEV_ID_UNUSED)
- return pdata->id == *(int *)pport_id;
+ if (fdata->id != FEATURE_DEV_ID_UNUSED)
+ return fdata->id == *(int *)pport_id;
- port_ops = dfl_fpga_port_ops_get(pdev);
+ port_ops = dfl_fpga_port_ops_get(fdata);
if (!port_ops || !port_ops->get_id)
return 0;
- pdata->id = port_ops->get_id(pdev);
+ fdata->id = port_ops->get_id(fdata);
dfl_fpga_port_ops_put(port_ops);
- return pdata->id == *(int *)pport_id;
+ return fdata->id == *(int *)pport_id;
}
EXPORT_SYMBOL_GPL(dfl_fpga_check_port_id);
@@ -351,10 +339,10 @@ static void release_dfl_dev(struct device *dev)
}
static struct dfl_device *
-dfl_dev_add(struct dfl_feature_platform_data *pdata,
+dfl_dev_add(struct dfl_feature_dev_data *fdata,
struct dfl_feature *feature)
{
- struct platform_device *pdev = pdata->dev;
+ struct platform_device *pdev = fdata->dev;
struct resource *parent_res;
struct dfl_device *ddev;
int id, i, ret;
@@ -380,11 +368,11 @@ dfl_dev_add(struct dfl_feature_platform_data *pdata,
if (ret)
goto put_dev;
- ddev->type = feature_dev_id_type(pdev);
+ ddev->type = fdata->type;
ddev->feature_id = feature->id;
ddev->revision = feature->revision;
ddev->dfh_version = feature->dfh_version;
- ddev->cdev = pdata->dfl_cdev;
+ ddev->cdev = fdata->dfl_cdev;
if (feature->param_size) {
ddev->params = kmemdup(feature->params, feature->param_size, GFP_KERNEL);
if (!ddev->params) {
@@ -435,11 +423,11 @@ put_dev:
return ERR_PTR(ret);
}
-static void dfl_devs_remove(struct dfl_feature_platform_data *pdata)
+static void dfl_devs_remove(struct dfl_feature_dev_data *fdata)
{
struct dfl_feature *feature;
- dfl_fpga_dev_for_each_feature(pdata, feature) {
+ dfl_fpga_dev_for_each_feature(fdata, feature) {
if (feature->ddev) {
device_unregister(&feature->ddev->dev);
feature->ddev = NULL;
@@ -447,13 +435,13 @@ static void dfl_devs_remove(struct dfl_feature_platform_data *pdata)
}
}
-static int dfl_devs_add(struct dfl_feature_platform_data *pdata)
+static int dfl_devs_add(struct dfl_feature_dev_data *fdata)
{
struct dfl_feature *feature;
struct dfl_device *ddev;
int ret;
- dfl_fpga_dev_for_each_feature(pdata, feature) {
+ dfl_fpga_dev_for_each_feature(fdata, feature) {
if (feature->ioaddr)
continue;
@@ -462,7 +450,7 @@ static int dfl_devs_add(struct dfl_feature_platform_data *pdata)
goto err;
}
- ddev = dfl_dev_add(pdata, feature);
+ ddev = dfl_dev_add(fdata, feature);
if (IS_ERR(ddev)) {
ret = PTR_ERR(ddev);
goto err;
@@ -474,7 +462,7 @@ static int dfl_devs_add(struct dfl_feature_platform_data *pdata)
return 0;
err:
- dfl_devs_remove(pdata);
+ dfl_devs_remove(fdata);
return ret;
}
@@ -504,12 +492,12 @@ EXPORT_SYMBOL(dfl_driver_unregister);
*/
void dfl_fpga_dev_feature_uinit(struct platform_device *pdev)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
struct dfl_feature *feature;
- dfl_devs_remove(pdata);
+ dfl_devs_remove(fdata);
- dfl_fpga_dev_for_each_feature(pdata, feature) {
+ dfl_fpga_dev_for_each_feature(fdata, feature) {
if (feature->ops) {
if (feature->ops->uinit)
feature->ops->uinit(pdev, feature);
@@ -520,7 +508,6 @@ void dfl_fpga_dev_feature_uinit(struct platform_device *pdev)
EXPORT_SYMBOL_GPL(dfl_fpga_dev_feature_uinit);
static int dfl_feature_instance_init(struct platform_device *pdev,
- struct dfl_feature_platform_data *pdata,
struct dfl_feature *feature,
struct dfl_feature_driver *drv)
{
@@ -579,16 +566,15 @@ static bool dfl_feature_drv_match(struct dfl_feature *feature,
int dfl_fpga_dev_feature_init(struct platform_device *pdev,
struct dfl_feature_driver *feature_drvs)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
struct dfl_feature_driver *drv = feature_drvs;
struct dfl_feature *feature;
int ret;
while (drv->ops) {
- dfl_fpga_dev_for_each_feature(pdata, feature) {
+ dfl_fpga_dev_for_each_feature(fdata, feature) {
if (dfl_feature_drv_match(feature, drv)) {
- ret = dfl_feature_instance_init(pdev, pdata,
- feature, drv);
+ ret = dfl_feature_instance_init(pdev, feature, drv);
if (ret)
goto exit;
}
@@ -596,7 +582,7 @@ int dfl_fpga_dev_feature_init(struct platform_device *pdev,
drv++;
}
- ret = dfl_devs_add(pdata);
+ ret = dfl_devs_add(fdata);
if (ret)
goto exit;
@@ -695,7 +681,7 @@ EXPORT_SYMBOL_GPL(dfl_fpga_dev_ops_unregister);
* @nr_irqs: number of irqs for all feature devices.
* @irq_table: Linux IRQ numbers for all irqs, indexed by local irq index of
* this device.
- * @feature_dev: current feature device.
+ * @type: the current FIU type.
* @ioaddr: header register region address of current FIU in enumeration.
* @start: register resource start of current FIU.
* @len: max register resource length of current FIU.
@@ -708,7 +694,7 @@ struct build_feature_devs_info {
unsigned int nr_irqs;
int *irq_table;
- struct platform_device *feature_dev;
+ enum dfl_id_type type;
void __iomem *ioaddr;
resource_size_t start;
resource_size_t len;
@@ -743,50 +729,62 @@ struct dfl_feature_info {
u64 params[];
};
-static void dfl_fpga_cdev_add_port_dev(struct dfl_fpga_cdev *cdev,
- struct platform_device *port)
+static void dfl_fpga_cdev_add_port_data(struct dfl_fpga_cdev *cdev,
+ struct dfl_feature_dev_data *fdata)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&port->dev);
-
mutex_lock(&cdev->lock);
- list_add(&pdata->node, &cdev->port_dev_list);
- get_device(&pdata->dev->dev);
+ list_add(&fdata->node, &cdev->port_dev_list);
mutex_unlock(&cdev->lock);
}
-/*
- * register current feature device, it is called when we need to switch to
- * another feature parsing or we have parsed all features on given device
- * feature list.
- */
-static int build_info_commit_dev(struct build_feature_devs_info *binfo)
+static void dfl_id_free_action(void *arg)
+{
+ struct dfl_feature_dev_data *fdata = arg;
+
+ dfl_id_free(fdata->type, fdata->pdev_id);
+}
+
+static struct dfl_feature_dev_data *
+binfo_create_feature_dev_data(struct build_feature_devs_info *binfo)
{
- struct platform_device *fdev = binfo->feature_dev;
- struct dfl_feature_platform_data *pdata;
+ enum dfl_id_type type = binfo->type;
struct dfl_feature_info *finfo, *p;
- enum dfl_id_type type;
+ struct dfl_feature_dev_data *fdata;
int ret, index = 0, res_idx = 0;
- type = feature_dev_id_type(fdev);
if (WARN_ON_ONCE(type >= DFL_ID_MAX))
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
- /*
- * we do not need to care for the memory which is associated with
- * the platform device. After calling platform_device_unregister(),
- * it will be automatically freed by device's release() callback,
- * platform_device_release().
- */
- pdata = kzalloc(struct_size(pdata, features, binfo->feature_num), GFP_KERNEL);
- if (!pdata)
- return -ENOMEM;
+ fdata = devm_kzalloc(binfo->dev, sizeof(*fdata), GFP_KERNEL);
+ if (!fdata)
+ return ERR_PTR(-ENOMEM);
+
+ fdata->features = devm_kcalloc(binfo->dev, binfo->feature_num,
+ sizeof(*fdata->features), GFP_KERNEL);
+ if (!fdata->features)
+ return ERR_PTR(-ENOMEM);
+
+ fdata->resources = devm_kcalloc(binfo->dev, binfo->feature_num,
+ sizeof(*fdata->resources), GFP_KERNEL);
+ if (!fdata->resources)
+ return ERR_PTR(-ENOMEM);
+
+ fdata->type = type;
- pdata->dev = fdev;
- pdata->num = binfo->feature_num;
- pdata->dfl_cdev = binfo->cdev;
- pdata->id = FEATURE_DEV_ID_UNUSED;
- mutex_init(&pdata->lock);
- lockdep_set_class_and_name(&pdata->lock, &dfl_pdata_keys[type],
+ fdata->pdev_id = dfl_id_alloc(type, binfo->dev);
+ if (fdata->pdev_id < 0)
+ return ERR_PTR(fdata->pdev_id);
+
+ ret = devm_add_action_or_reset(binfo->dev, dfl_id_free_action, fdata);
+ if (ret)
+ return ERR_PTR(ret);
+
+ fdata->pdev_name = dfl_devs[type].name;
+ fdata->num = binfo->feature_num;
+ fdata->dfl_cdev = binfo->cdev;
+ fdata->id = FEATURE_DEV_ID_UNUSED;
+ mutex_init(&fdata->lock);
+ lockdep_set_class_and_name(&fdata->lock, &dfl_pdata_keys[type],
dfl_pdata_key_strings[type]);
/*
@@ -795,25 +793,15 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo)
* works properly for port device.
* and it should always be 0 for fme device.
*/
- WARN_ON(pdata->disable_count);
-
- fdev->dev.platform_data = pdata;
-
- /* each sub feature has one MMIO resource */
- fdev->num_resources = binfo->feature_num;
- fdev->resource = kcalloc(binfo->feature_num, sizeof(*fdev->resource),
- GFP_KERNEL);
- if (!fdev->resource)
- return -ENOMEM;
+ WARN_ON(fdata->disable_count);
/* fill features and resource information for feature dev */
list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) {
- struct dfl_feature *feature = &pdata->features[index++];
+ struct dfl_feature *feature = &fdata->features[index++];
struct dfl_feature_irq_ctx *ctx;
unsigned int i;
/* save resource information for each feature */
- feature->dev = fdev;
feature->id = finfo->fid;
feature->revision = finfo->revision;
feature->dfh_version = finfo->dfh_version;
@@ -823,7 +811,7 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo)
finfo->params, finfo->param_size,
GFP_KERNEL);
if (!feature->params)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
feature->param_size = finfo->param_size;
}
@@ -840,17 +828,17 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo)
devm_ioremap_resource(binfo->dev,
&finfo->mmio_res);
if (IS_ERR(feature->ioaddr))
- return PTR_ERR(feature->ioaddr);
+ return ERR_CAST(feature->ioaddr);
} else {
feature->resource_index = res_idx;
- fdev->resource[res_idx++] = finfo->mmio_res;
+ fdata->resources[res_idx++] = finfo->mmio_res;
}
if (finfo->nr_irqs) {
ctx = devm_kcalloc(binfo->dev, finfo->nr_irqs,
sizeof(*ctx), GFP_KERNEL);
if (!ctx)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
for (i = 0; i < finfo->nr_irqs; i++)
ctx[i].irq =
@@ -864,55 +852,94 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo)
kfree(finfo);
}
- ret = platform_device_add(binfo->feature_dev);
- if (!ret) {
- if (type == PORT_ID)
- dfl_fpga_cdev_add_port_dev(binfo->cdev,
- binfo->feature_dev);
- else
- binfo->cdev->fme_dev =
- get_device(&binfo->feature_dev->dev);
- /*
- * reset it to avoid build_info_free() freeing their resource.
- *
- * The resource of successfully registered feature devices
- * will be freed by platform_device_unregister(). See the
- * comments in build_info_create_dev().
- */
- binfo->feature_dev = NULL;
- }
+ fdata->resource_num = res_idx;
- return ret;
+ return fdata;
}
-static int
-build_info_create_dev(struct build_feature_devs_info *binfo,
- enum dfl_id_type type)
+/*
+ * register current feature device, it is called when we need to switch to
+ * another feature parsing or we have parsed all features on given device
+ * feature list.
+ */
+static int feature_dev_register(struct dfl_feature_dev_data *fdata)
{
+ struct dfl_feature_platform_data pdata = {};
struct platform_device *fdev;
+ struct dfl_feature *feature;
+ int ret;
- if (type >= DFL_ID_MAX)
- return -EINVAL;
-
- /*
- * we use -ENODEV as the initialization indicator which indicates
- * whether the id need to be reclaimed
- */
- fdev = platform_device_alloc(dfl_devs[type].name, -ENODEV);
+ fdev = platform_device_alloc(fdata->pdev_name, fdata->pdev_id);
if (!fdev)
return -ENOMEM;
- binfo->feature_dev = fdev;
- binfo->feature_num = 0;
+ fdata->dev = fdev;
- INIT_LIST_HEAD(&binfo->sub_features);
+ fdev->dev.parent = &fdata->dfl_cdev->region->dev;
+ fdev->dev.devt = dfl_get_devt(dfl_devs[fdata->type].devt_type, fdev->id);
- fdev->id = dfl_id_alloc(type, &fdev->dev);
- if (fdev->id < 0)
- return fdev->id;
+ dfl_fpga_dev_for_each_feature(fdata, feature)
+ feature->dev = fdev;
+
+ ret = platform_device_add_resources(fdev, fdata->resources,
+ fdata->resource_num);
+ if (ret)
+ goto err_put_dev;
+
+ pdata.fdata = fdata;
+ ret = platform_device_add_data(fdev, &pdata, sizeof(pdata));
+ if (ret)
+ goto err_put_dev;
+
+ ret = platform_device_add(fdev);
+ if (ret)
+ goto err_put_dev;
+
+ return 0;
+
+err_put_dev:
+ platform_device_put(fdev);
+
+ fdata->dev = NULL;
+
+ dfl_fpga_dev_for_each_feature(fdata, feature)
+ feature->dev = NULL;
+
+ return ret;
+}
+
+static void feature_dev_unregister(struct dfl_feature_dev_data *fdata)
+{
+ struct dfl_feature *feature;
+
+ platform_device_unregister(fdata->dev);
+
+ fdata->dev = NULL;
+
+ dfl_fpga_dev_for_each_feature(fdata, feature)
+ feature->dev = NULL;
+}
+
+static int build_info_commit_dev(struct build_feature_devs_info *binfo)
+{
+ struct dfl_feature_dev_data *fdata;
+ int ret;
+
+ fdata = binfo_create_feature_dev_data(binfo);
+ if (IS_ERR(fdata))
+ return PTR_ERR(fdata);
+
+ ret = feature_dev_register(fdata);
+ if (ret)
+ return ret;
+
+ if (binfo->type == PORT_ID)
+ dfl_fpga_cdev_add_port_data(binfo->cdev, fdata);
+ else
+ binfo->cdev->fme_dev = get_device(&fdata->dev->dev);
- fdev->dev.parent = &binfo->cdev->region->dev;
- fdev->dev.devt = dfl_get_devt(dfl_devs[type].devt_type, fdev->id);
+ /* reset the binfo for next FIU */
+ binfo->type = DFL_ID_MAX;
return 0;
}
@@ -921,22 +948,11 @@ static void build_info_free(struct build_feature_devs_info *binfo)
{
struct dfl_feature_info *finfo, *p;
- /*
- * it is a valid id, free it. See comments in
- * build_info_create_dev()
- */
- if (binfo->feature_dev && binfo->feature_dev->id >= 0) {
- dfl_id_free(feature_dev_id_type(binfo->feature_dev),
- binfo->feature_dev->id);
-
- list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) {
- list_del(&finfo->node);
- kfree(finfo);
- }
+ list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) {
+ list_del(&finfo->node);
+ kfree(finfo);
}
- platform_device_put(binfo->feature_dev);
-
devm_kfree(binfo->dev, binfo);
}
@@ -1025,7 +1041,7 @@ static int parse_feature_irqs(struct build_feature_devs_info *binfo,
* Instead, features with interrupt functionality provide
* the information in feature specific registers.
*/
- type = feature_dev_id_type(binfo->feature_dev);
+ type = binfo->type;
if (type == PORT_ID) {
switch (fid) {
case PORT_FEATURE_ID_UINT:
@@ -1217,7 +1233,7 @@ static int parse_feature_port_afu(struct build_feature_devs_info *binfo,
return create_feature_instance(binfo, ofst, size, FEATURE_ID_AFU);
}
-#define is_feature_dev_detected(binfo) (!!(binfo)->feature_dev)
+#define is_feature_dev_detected(binfo) ((binfo)->type != DFL_ID_MAX)
static int parse_feature_afu(struct build_feature_devs_info *binfo,
resource_size_t ofst)
@@ -1227,12 +1243,11 @@ static int parse_feature_afu(struct build_feature_devs_info *binfo,
return -EINVAL;
}
- switch (feature_dev_id_type(binfo->feature_dev)) {
+ switch (binfo->type) {
case PORT_ID:
return parse_feature_port_afu(binfo, ofst);
default:
- dev_info(binfo->dev, "AFU belonging to FIU %s is not supported yet.\n",
- binfo->feature_dev->name);
+ dev_info(binfo->dev, "AFU belonging to FIU is not supported yet.\n");
}
return 0;
@@ -1273,6 +1288,7 @@ static void build_info_complete(struct build_feature_devs_info *binfo)
static int parse_feature_fiu(struct build_feature_devs_info *binfo,
resource_size_t ofst)
{
+ enum dfl_id_type type;
int ret = 0;
u32 offset;
u16 id;
@@ -1294,10 +1310,13 @@ static int parse_feature_fiu(struct build_feature_devs_info *binfo,
v = readq(binfo->ioaddr + DFH);
id = FIELD_GET(DFH_ID, v);
- /* create platform device for dfl feature dev */
- ret = build_info_create_dev(binfo, dfh_id_to_type(id));
- if (ret)
- return ret;
+ type = dfh_id_to_type(id);
+ if (type >= DFL_ID_MAX)
+ return -EINVAL;
+
+ binfo->type = type;
+ binfo->feature_num = 0;
+ INIT_LIST_HEAD(&binfo->sub_features);
ret = create_feature_instance(binfo, 0, 0, 0);
if (ret)
@@ -1515,13 +1534,9 @@ EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_add_irq);
static int remove_feature_dev(struct device *dev, void *data)
{
- struct platform_device *pdev = to_platform_device(dev);
- enum dfl_id_type type = feature_dev_id_type(pdev);
- int id = pdev->id;
-
- platform_device_unregister(pdev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
- dfl_id_free(type, id);
+ feature_dev_unregister(fdata);
return 0;
}
@@ -1573,6 +1588,7 @@ dfl_fpga_feature_devs_enumerate(struct dfl_fpga_enum_info *info)
goto unregister_region_exit;
}
+ binfo->type = DFL_ID_MAX;
binfo->dev = info->dev;
binfo->cdev = cdev;
@@ -1614,25 +1630,10 @@ EXPORT_SYMBOL_GPL(dfl_fpga_feature_devs_enumerate);
*/
void dfl_fpga_feature_devs_remove(struct dfl_fpga_cdev *cdev)
{
- struct dfl_feature_platform_data *pdata, *ptmp;
-
mutex_lock(&cdev->lock);
if (cdev->fme_dev)
put_device(cdev->fme_dev);
- list_for_each_entry_safe(pdata, ptmp, &cdev->port_dev_list, node) {
- struct platform_device *port_dev = pdata->dev;
-
- /* remove released ports */
- if (!device_is_registered(&port_dev->dev)) {
- dfl_id_free(feature_dev_id_type(port_dev),
- port_dev->id);
- platform_device_put(port_dev);
- }
-
- list_del(&pdata->node);
- put_device(&port_dev->dev);
- }
mutex_unlock(&cdev->lock);
remove_feature_devs(cdev);
@@ -1643,7 +1644,7 @@ void dfl_fpga_feature_devs_remove(struct dfl_fpga_cdev *cdev)
EXPORT_SYMBOL_GPL(dfl_fpga_feature_devs_remove);
/**
- * __dfl_fpga_cdev_find_port - find a port under given container device
+ * __dfl_fpga_cdev_find_port_data - find a port under given container device
*
* @cdev: container device
* @data: data passed to match function
@@ -1656,23 +1657,20 @@ EXPORT_SYMBOL_GPL(dfl_fpga_feature_devs_remove);
*
* NOTE: you will need to drop the device reference with put_device() after use.
*/
-struct platform_device *
-__dfl_fpga_cdev_find_port(struct dfl_fpga_cdev *cdev, void *data,
- int (*match)(struct platform_device *, void *))
+struct dfl_feature_dev_data *
+__dfl_fpga_cdev_find_port_data(struct dfl_fpga_cdev *cdev, void *data,
+ int (*match)(struct dfl_feature_dev_data *, void *))
{
- struct dfl_feature_platform_data *pdata;
- struct platform_device *port_dev;
-
- list_for_each_entry(pdata, &cdev->port_dev_list, node) {
- port_dev = pdata->dev;
+ struct dfl_feature_dev_data *fdata;
- if (match(port_dev, data) && get_device(&port_dev->dev))
- return port_dev;
+ list_for_each_entry(fdata, &cdev->port_dev_list, node) {
+ if (match(fdata, data))
+ return fdata;
}
return NULL;
}
-EXPORT_SYMBOL_GPL(__dfl_fpga_cdev_find_port);
+EXPORT_SYMBOL_GPL(__dfl_fpga_cdev_find_port_data);
static int __init dfl_fpga_init(void)
{
@@ -1706,33 +1704,28 @@ static int __init dfl_fpga_init(void)
*/
int dfl_fpga_cdev_release_port(struct dfl_fpga_cdev *cdev, int port_id)
{
- struct dfl_feature_platform_data *pdata;
- struct platform_device *port_pdev;
+ struct dfl_feature_dev_data *fdata;
int ret = -ENODEV;
mutex_lock(&cdev->lock);
- port_pdev = __dfl_fpga_cdev_find_port(cdev, &port_id,
- dfl_fpga_check_port_id);
- if (!port_pdev)
+ fdata = __dfl_fpga_cdev_find_port_data(cdev, &port_id,
+ dfl_fpga_check_port_id);
+ if (!fdata)
goto unlock_exit;
- if (!device_is_registered(&port_pdev->dev)) {
+ if (!fdata->dev) {
ret = -EBUSY;
- goto put_dev_exit;
+ goto unlock_exit;
}
- pdata = dev_get_platdata(&port_pdev->dev);
-
- mutex_lock(&pdata->lock);
- ret = dfl_feature_dev_use_begin(pdata, true);
- mutex_unlock(&pdata->lock);
+ mutex_lock(&fdata->lock);
+ ret = dfl_feature_dev_use_begin(fdata, true);
+ mutex_unlock(&fdata->lock);
if (ret)
- goto put_dev_exit;
+ goto unlock_exit;
- platform_device_del(port_pdev);
+ feature_dev_unregister(fdata);
cdev->released_port_num++;
-put_dev_exit:
- put_device(&port_pdev->dev);
unlock_exit:
mutex_unlock(&cdev->lock);
return ret;
@@ -1752,34 +1745,29 @@ EXPORT_SYMBOL_GPL(dfl_fpga_cdev_release_port);
*/
int dfl_fpga_cdev_assign_port(struct dfl_fpga_cdev *cdev, int port_id)
{
- struct dfl_feature_platform_data *pdata;
- struct platform_device *port_pdev;
+ struct dfl_feature_dev_data *fdata;
int ret = -ENODEV;
mutex_lock(&cdev->lock);
- port_pdev = __dfl_fpga_cdev_find_port(cdev, &port_id,
- dfl_fpga_check_port_id);
- if (!port_pdev)
+ fdata = __dfl_fpga_cdev_find_port_data(cdev, &port_id,
+ dfl_fpga_check_port_id);
+ if (!fdata)
goto unlock_exit;
- if (device_is_registered(&port_pdev->dev)) {
+ if (fdata->dev) {
ret = -EBUSY;
- goto put_dev_exit;
+ goto unlock_exit;
}
- ret = platform_device_add(port_pdev);
+ ret = feature_dev_register(fdata);
if (ret)
- goto put_dev_exit;
-
- pdata = dev_get_platdata(&port_pdev->dev);
+ goto unlock_exit;
- mutex_lock(&pdata->lock);
- dfl_feature_dev_use_end(pdata);
- mutex_unlock(&pdata->lock);
+ mutex_lock(&fdata->lock);
+ dfl_feature_dev_use_end(fdata);
+ mutex_unlock(&fdata->lock);
cdev->released_port_num--;
-put_dev_exit:
- put_device(&port_pdev->dev);
unlock_exit:
mutex_unlock(&cdev->lock);
return ret;
@@ -1789,10 +1777,11 @@ EXPORT_SYMBOL_GPL(dfl_fpga_cdev_assign_port);
static void config_port_access_mode(struct device *fme_dev, int port_id,
bool is_vf)
{
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(fme_dev);
void __iomem *base;
u64 v;
- base = dfl_get_feature_ioaddr_by_id(fme_dev, FME_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER);
v = readq(base + FME_HDR_PORT_OFST(port_id));
@@ -1816,14 +1805,14 @@ static void config_port_access_mode(struct device *fme_dev, int port_id,
*/
void dfl_fpga_cdev_config_ports_pf(struct dfl_fpga_cdev *cdev)
{
- struct dfl_feature_platform_data *pdata;
+ struct dfl_feature_dev_data *fdata;
mutex_lock(&cdev->lock);
- list_for_each_entry(pdata, &cdev->port_dev_list, node) {
- if (device_is_registered(&pdata->dev->dev))
+ list_for_each_entry(fdata, &cdev->port_dev_list, node) {
+ if (fdata->dev)
continue;
- config_port_pf_mode(cdev->fme_dev, pdata->id);
+ config_port_pf_mode(cdev->fme_dev, fdata->id);
}
mutex_unlock(&cdev->lock);
}
@@ -1842,7 +1831,7 @@ EXPORT_SYMBOL_GPL(dfl_fpga_cdev_config_ports_pf);
*/
int dfl_fpga_cdev_config_ports_vf(struct dfl_fpga_cdev *cdev, int num_vfs)
{
- struct dfl_feature_platform_data *pdata;
+ struct dfl_feature_dev_data *fdata;
int ret = 0;
mutex_lock(&cdev->lock);
@@ -1856,11 +1845,11 @@ int dfl_fpga_cdev_config_ports_vf(struct dfl_fpga_cdev *cdev, int num_vfs)
goto done;
}
- list_for_each_entry(pdata, &cdev->port_dev_list, node) {
- if (device_is_registered(&pdata->dev->dev))
+ list_for_each_entry(fdata, &cdev->port_dev_list, node) {
+ if (fdata->dev)
continue;
- config_port_vf_mode(cdev->fme_dev, pdata->id);
+ config_port_vf_mode(cdev->fme_dev, fdata->id);
}
done:
mutex_unlock(&cdev->lock);
@@ -1993,7 +1982,7 @@ long dfl_feature_ioctl_set_irq(struct platform_device *pdev,
struct dfl_feature *feature,
unsigned long arg)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
struct dfl_fpga_irq_set hdr;
s32 *fds;
long ret;
@@ -2013,9 +2002,9 @@ long dfl_feature_ioctl_set_irq(struct platform_device *pdev,
if (IS_ERR(fds))
return PTR_ERR(fds);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
ret = dfl_fpga_set_irq_triggers(feature, hdr.start, hdr.count, fds);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
kfree(fds);
return ret;
diff --git a/drivers/fpga/dfl.h b/drivers/fpga/dfl.h
index 5063d73b0d82..95539f1213cb 100644
--- a/drivers/fpga/dfl.h
+++ b/drivers/fpga/dfl.h
@@ -17,6 +17,7 @@
#include <linux/bitfield.h>
#include <linux/cdev.h>
#include <linux/delay.h>
+#include <linux/dfl.h>
#include <linux/eventfd.h>
#include <linux/fs.h>
#include <linux/interrupt.h>
@@ -206,6 +207,8 @@
#define PORT_UINT_CAP_INT_NUM GENMASK_ULL(11, 0) /* Interrupts num */
#define PORT_UINT_CAP_FST_VECT GENMASK_ULL(23, 12) /* First Vector */
+struct dfl_feature_dev_data;
+
/**
* struct dfl_fpga_port_ops - port ops
*
@@ -219,15 +222,15 @@ struct dfl_fpga_port_ops {
const char *name;
struct module *owner;
struct list_head node;
- int (*get_id)(struct platform_device *pdev);
- int (*enable_set)(struct platform_device *pdev, bool enable);
+ int (*get_id)(struct dfl_feature_dev_data *fdata);
+ int (*enable_set)(struct dfl_feature_dev_data *fdata, bool enable);
};
void dfl_fpga_port_ops_add(struct dfl_fpga_port_ops *ops);
void dfl_fpga_port_ops_del(struct dfl_fpga_port_ops *ops);
-struct dfl_fpga_port_ops *dfl_fpga_port_ops_get(struct platform_device *pdev);
+struct dfl_fpga_port_ops *dfl_fpga_port_ops_get(struct dfl_feature_dev_data *fdata);
void dfl_fpga_port_ops_put(struct dfl_fpga_port_ops *ops);
-int dfl_fpga_check_port_id(struct platform_device *pdev, void *pport_id);
+int dfl_fpga_check_port_id(struct dfl_feature_dev_data *fdata, void *pport_id);
/**
* struct dfl_feature_id - dfl private feature id
@@ -300,26 +303,32 @@ struct dfl_feature {
#define FEATURE_DEV_ID_UNUSED (-1)
/**
- * struct dfl_feature_platform_data - platform data for feature devices
+ * struct dfl_feature_dev_data - dfl enumeration data for dfl feature dev.
*
- * @node: node to link feature devs to container device's port_dev_list.
- * @lock: mutex to protect platform data.
- * @cdev: cdev of feature dev.
- * @dev: ptr to platform device linked with this platform data.
+ * @node: node to link the data structure to container device's port_dev_list.
+ * @lock: mutex to protect feature dev data.
+ * @dev: ptr to the feature's platform device linked with this structure.
+ * @type: type of DFL FIU for the feature dev. See enum dfl_id_type.
+ * @pdev_id: platform device id for the feature dev.
+ * @pdev_name: platform device name for the feature dev.
* @dfl_cdev: ptr to container device.
- * @id: id used for this feature device.
+ * @id: id used for the feature device.
* @disable_count: count for port disable.
* @excl_open: set on feature device exclusive open.
* @open_count: count for feature device open.
* @num: number for sub features.
* @private: ptr to feature dev private data.
- * @features: sub features of this feature dev.
+ * @features: sub features for the feature dev.
+ * @resource_num: number of resources for the feature dev.
+ * @resources: resources for the feature dev.
*/
-struct dfl_feature_platform_data {
+struct dfl_feature_dev_data {
struct list_head node;
struct mutex lock;
- struct cdev cdev;
struct platform_device *dev;
+ enum dfl_id_type type;
+ int pdev_id;
+ const char *pdev_name;
struct dfl_fpga_cdev *dfl_cdev;
int id;
unsigned int disable_count;
@@ -327,55 +336,68 @@ struct dfl_feature_platform_data {
int open_count;
void *private;
int num;
- struct dfl_feature features[];
+ struct dfl_feature *features;
+ int resource_num;
+ struct resource *resources;
+};
+
+/**
+ * struct dfl_feature_platform_data - platform data for feature devices
+ *
+ * @cdev: cdev of feature dev.
+ * @fdata: dfl enumeration data for the dfl feature device.
+ */
+struct dfl_feature_platform_data {
+ struct cdev cdev;
+ struct dfl_feature_dev_data *fdata;
};
static inline
-int dfl_feature_dev_use_begin(struct dfl_feature_platform_data *pdata,
+int dfl_feature_dev_use_begin(struct dfl_feature_dev_data *fdata,
bool excl)
{
- if (pdata->excl_open)
+ if (fdata->excl_open)
return -EBUSY;
if (excl) {
- if (pdata->open_count)
+ if (fdata->open_count)
return -EBUSY;
- pdata->excl_open = true;
+ fdata->excl_open = true;
}
- pdata->open_count++;
+ fdata->open_count++;
return 0;
}
static inline
-void dfl_feature_dev_use_end(struct dfl_feature_platform_data *pdata)
+void dfl_feature_dev_use_end(struct dfl_feature_dev_data *fdata)
{
- pdata->excl_open = false;
+ fdata->excl_open = false;
- if (WARN_ON(pdata->open_count <= 0))
+ if (WARN_ON(fdata->open_count <= 0))
return;
- pdata->open_count--;
+ fdata->open_count--;
}
static inline
-int dfl_feature_dev_use_count(struct dfl_feature_platform_data *pdata)
+int dfl_feature_dev_use_count(struct dfl_feature_dev_data *fdata)
{
- return pdata->open_count;
+ return fdata->open_count;
}
static inline
-void dfl_fpga_pdata_set_private(struct dfl_feature_platform_data *pdata,
+void dfl_fpga_fdata_set_private(struct dfl_feature_dev_data *fdata,
void *private)
{
- pdata->private = private;
+ fdata->private = private;
}
static inline
-void *dfl_fpga_pdata_get_private(struct dfl_feature_platform_data *pdata)
+void *dfl_fpga_fdata_get_private(struct dfl_feature_dev_data *fdata)
{
- return pdata->private;
+ return fdata->private;
}
struct dfl_feature_ops {
@@ -398,37 +420,36 @@ int dfl_fpga_dev_ops_register(struct platform_device *pdev,
struct module *owner);
void dfl_fpga_dev_ops_unregister(struct platform_device *pdev);
-static inline
-struct platform_device *dfl_fpga_inode_to_feature_dev(struct inode *inode)
+static inline struct dfl_feature_dev_data *
+dfl_fpga_inode_to_feature_dev_data(struct inode *inode)
{
struct dfl_feature_platform_data *pdata;
pdata = container_of(inode->i_cdev, struct dfl_feature_platform_data,
cdev);
- return pdata->dev;
+ return pdata->fdata;
}
-#define dfl_fpga_dev_for_each_feature(pdata, feature) \
- for ((feature) = (pdata)->features; \
- (feature) < (pdata)->features + (pdata)->num; (feature)++)
+#define dfl_fpga_dev_for_each_feature(fdata, feature) \
+ for ((feature) = (fdata)->features; \
+ (feature) < (fdata)->features + (fdata)->num; (feature)++)
-static inline
-struct dfl_feature *dfl_get_feature_by_id(struct device *dev, u16 id)
+static inline struct dfl_feature *
+dfl_get_feature_by_id(struct dfl_feature_dev_data *fdata, u16 id)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
struct dfl_feature *feature;
- dfl_fpga_dev_for_each_feature(pdata, feature)
+ dfl_fpga_dev_for_each_feature(fdata, feature)
if (feature->id == id)
return feature;
return NULL;
}
-static inline
-void __iomem *dfl_get_feature_ioaddr_by_id(struct device *dev, u16 id)
+static inline void __iomem *
+dfl_get_feature_ioaddr_by_id(struct dfl_feature_dev_data *fdata, u16 id)
{
- struct dfl_feature *feature = dfl_get_feature_by_id(dev, id);
+ struct dfl_feature *feature = dfl_get_feature_by_id(fdata, id);
if (feature && feature->ioaddr)
return feature->ioaddr;
@@ -437,10 +458,18 @@ void __iomem *dfl_get_feature_ioaddr_by_id(struct device *dev, u16 id)
return NULL;
}
+static inline struct dfl_feature_dev_data *
+to_dfl_feature_dev_data(struct device *dev)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+
+ return pdata->fdata;
+}
+
static inline
-struct device *dfl_fpga_pdata_to_parent(struct dfl_feature_platform_data *pdata)
+struct device *dfl_fpga_fdata_to_parent(struct dfl_feature_dev_data *fdata)
{
- return pdata->dev->dev.parent->parent;
+ return fdata->dev->dev.parent->parent;
}
static inline bool dfl_feature_is_fme(void __iomem *base)
@@ -522,26 +551,21 @@ struct dfl_fpga_cdev *
dfl_fpga_feature_devs_enumerate(struct dfl_fpga_enum_info *info);
void dfl_fpga_feature_devs_remove(struct dfl_fpga_cdev *cdev);
-/*
- * need to drop the device reference with put_device() after use port platform
- * device returned by __dfl_fpga_cdev_find_port and dfl_fpga_cdev_find_port
- * functions.
- */
-struct platform_device *
-__dfl_fpga_cdev_find_port(struct dfl_fpga_cdev *cdev, void *data,
- int (*match)(struct platform_device *, void *));
+struct dfl_feature_dev_data *
+__dfl_fpga_cdev_find_port_data(struct dfl_fpga_cdev *cdev, void *data,
+ int (*match)(struct dfl_feature_dev_data *, void *));
-static inline struct platform_device *
-dfl_fpga_cdev_find_port(struct dfl_fpga_cdev *cdev, void *data,
- int (*match)(struct platform_device *, void *))
+static inline struct dfl_feature_dev_data *
+dfl_fpga_cdev_find_port_data(struct dfl_fpga_cdev *cdev, void *data,
+ int (*match)(struct dfl_feature_dev_data *, void *))
{
- struct platform_device *pdev;
+ struct dfl_feature_dev_data *fdata;
mutex_lock(&cdev->lock);
- pdev = __dfl_fpga_cdev_find_port(cdev, data, match);
+ fdata = __dfl_fpga_cdev_find_port_data(cdev, data, match);
mutex_unlock(&cdev->lock);
- return pdev;
+ return fdata;
}
int dfl_fpga_cdev_release_port(struct dfl_fpga_cdev *cdev, int port_id);
diff --git a/drivers/fpga/intel-m10-bmc-sec-update.c b/drivers/fpga/intel-m10-bmc-sec-update.c
index 7ac9f9f5af12..10f678b9ed36 100644
--- a/drivers/fpga/intel-m10-bmc-sec-update.c
+++ b/drivers/fpga/intel-m10-bmc-sec-update.c
@@ -759,7 +759,7 @@ MODULE_DEVICE_TABLE(platform, intel_m10bmc_sec_ids);
static struct platform_driver intel_m10bmc_sec_driver = {
.probe = m10bmc_sec_probe,
- .remove_new = m10bmc_sec_remove,
+ .remove = m10bmc_sec_remove,
.driver = {
.name = "intel-m10bmc-sec-update",
.dev_groups = m10bmc_sec_attr_groups,
@@ -771,4 +771,4 @@ module_platform_driver(intel_m10bmc_sec_driver);
MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("Intel MAX10 BMC Secure Update");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(INTEL_M10_BMC_CORE);
+MODULE_IMPORT_NS("INTEL_M10_BMC_CORE");
diff --git a/drivers/fpga/of-fpga-region.c b/drivers/fpga/of-fpga-region.c
index 8526a5a86f0c..43db4bb77138 100644
--- a/drivers/fpga/of-fpga-region.c
+++ b/drivers/fpga/of-fpga-region.c
@@ -436,7 +436,7 @@ static void of_fpga_region_remove(struct platform_device *pdev)
static struct platform_driver of_fpga_region_driver = {
.probe = of_fpga_region_probe,
- .remove_new = of_fpga_region_remove,
+ .remove = of_fpga_region_remove,
.driver = {
.name = "of-fpga-region",
.of_match_table = of_match_ptr(fpga_region_of_match),
diff --git a/drivers/fpga/socfpga-a10.c b/drivers/fpga/socfpga-a10.c
index 4c03513b8f03..0165a3c86932 100644
--- a/drivers/fpga/socfpga-a10.c
+++ b/drivers/fpga/socfpga-a10.c
@@ -535,7 +535,7 @@ MODULE_DEVICE_TABLE(of, socfpga_a10_fpga_of_match);
static struct platform_driver socfpga_a10_fpga_driver = {
.probe = socfpga_a10_fpga_probe,
- .remove_new = socfpga_a10_fpga_remove,
+ .remove = socfpga_a10_fpga_remove,
.driver = {
.name = "socfpga_a10_fpga_manager",
.of_match_table = socfpga_a10_fpga_of_match,
diff --git a/drivers/fpga/stratix10-soc.c b/drivers/fpga/stratix10-soc.c
index 2c0def7d7cbb..0a295ccf1644 100644
--- a/drivers/fpga/stratix10-soc.c
+++ b/drivers/fpga/stratix10-soc.c
@@ -455,7 +455,7 @@ MODULE_DEVICE_TABLE(of, s10_of_match);
static struct platform_driver s10_driver = {
.probe = s10_probe,
- .remove_new = s10_remove,
+ .remove = s10_remove,
.driver = {
.name = "Stratix10 SoC FPGA manager",
.of_match_table = of_match_ptr(s10_of_match),
diff --git a/drivers/fpga/tests/fpga-bridge-test.c b/drivers/fpga/tests/fpga-bridge-test.c
index b9ab29809e96..124ba40e32b1 100644
--- a/drivers/fpga/tests/fpga-bridge-test.c
+++ b/drivers/fpga/tests/fpga-bridge-test.c
@@ -170,4 +170,5 @@ static struct kunit_suite fpga_bridge_suite = {
kunit_test_suite(fpga_bridge_suite);
+MODULE_DESCRIPTION("KUnit test for the FPGA Bridge");
MODULE_LICENSE("GPL");
diff --git a/drivers/fpga/tests/fpga-mgr-test.c b/drivers/fpga/tests/fpga-mgr-test.c
index 9cb37aefbac4..62975a39ee14 100644
--- a/drivers/fpga/tests/fpga-mgr-test.c
+++ b/drivers/fpga/tests/fpga-mgr-test.c
@@ -263,6 +263,7 @@ static void fpga_mgr_test_img_load_sgt(struct kunit *test)
img_buf = init_test_buffer(test, IMAGE_SIZE);
sgt = kunit_kzalloc(test, sizeof(*sgt), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt);
ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
KUNIT_ASSERT_EQ(test, ret, 0);
sg_init_one(sgt->sgl, img_buf, IMAGE_SIZE);
@@ -330,4 +331,5 @@ static struct kunit_suite fpga_mgr_suite = {
kunit_test_suite(fpga_mgr_suite);
+MODULE_DESCRIPTION("KUnit test for the FPGA Manager");
MODULE_LICENSE("GPL");
diff --git a/drivers/fpga/tests/fpga-region-test.c b/drivers/fpga/tests/fpga-region-test.c
index 6a108cafded8..020ceac48509 100644
--- a/drivers/fpga/tests/fpga-region-test.c
+++ b/drivers/fpga/tests/fpga-region-test.c
@@ -214,4 +214,5 @@ static struct kunit_suite fpga_region_suite = {
kunit_test_suite(fpga_region_suite);
+MODULE_DESCRIPTION("KUnit test for the FPGA Region");
MODULE_LICENSE("GPL");
diff --git a/drivers/fpga/versal-fpga.c b/drivers/fpga/versal-fpga.c
index 3710e8f01be2..e6189106c468 100644
--- a/drivers/fpga/versal-fpga.c
+++ b/drivers/fpga/versal-fpga.c
@@ -69,7 +69,7 @@ static struct platform_driver versal_fpga_driver = {
.probe = versal_fpga_probe,
.driver = {
.name = "versal_fpga_manager",
- .of_match_table = of_match_ptr(versal_fpga_of_match),
+ .of_match_table = versal_fpga_of_match,
},
};
module_platform_driver(versal_fpga_driver);
diff --git a/drivers/fpga/xilinx-pr-decoupler.c b/drivers/fpga/xilinx-pr-decoupler.c
index 788dd2f63a65..822751fad18a 100644
--- a/drivers/fpga/xilinx-pr-decoupler.c
+++ b/drivers/fpga/xilinx-pr-decoupler.c
@@ -162,7 +162,7 @@ static void xlnx_pr_decoupler_remove(struct platform_device *pdev)
static struct platform_driver xlnx_pr_decoupler_driver = {
.probe = xlnx_pr_decoupler_probe,
- .remove_new = xlnx_pr_decoupler_remove,
+ .remove = xlnx_pr_decoupler_remove,
.driver = {
.name = "xlnx_pr_decoupler",
.of_match_table = xlnx_pr_decoupler_of_match,
diff --git a/drivers/fpga/zynq-fpga.c b/drivers/fpga/zynq-fpga.c
index 4db3d80e10b0..b7629a0e4813 100644
--- a/drivers/fpga/zynq-fpga.c
+++ b/drivers/fpga/zynq-fpga.c
@@ -405,12 +405,12 @@ static int zynq_fpga_ops_write(struct fpga_manager *mgr, struct sg_table *sgt)
}
}
- priv->dma_nelms =
- dma_map_sg(mgr->dev.parent, sgt->sgl, sgt->nents, DMA_TO_DEVICE);
- if (priv->dma_nelms == 0) {
+ err = dma_map_sgtable(mgr->dev.parent, sgt, DMA_TO_DEVICE, 0);
+ if (err) {
dev_err(&mgr->dev, "Unable to DMA map (TO_DEVICE)\n");
- return -ENOMEM;
+ return err;
}
+ priv->dma_nelms = sgt->nents;
/* enable clock */
err = clk_enable(priv->clk);
@@ -478,7 +478,7 @@ out_clk:
clk_disable(priv->clk);
out_free:
- dma_unmap_sg(mgr->dev.parent, sgt->sgl, sgt->nents, DMA_TO_DEVICE);
+ dma_unmap_sgtable(mgr->dev.parent, sgt, DMA_TO_DEVICE, 0);
return err;
}
@@ -642,7 +642,7 @@ MODULE_DEVICE_TABLE(of, zynq_fpga_of_match);
static struct platform_driver zynq_fpga_driver = {
.probe = zynq_fpga_probe,
- .remove_new = zynq_fpga_remove,
+ .remove = zynq_fpga_remove,
.driver = {
.name = "zynq_fpga_manager",
.of_match_table = of_match_ptr(zynq_fpga_of_match),
diff --git a/drivers/fsi/fsi-core.c b/drivers/fsi/fsi-core.c
index e2e1e9df6115..c6c115993ebc 100644
--- a/drivers/fsi/fsi-core.c
+++ b/drivers/fsi/fsi-core.c
@@ -554,7 +554,7 @@ static unsigned long aligned_access_size(size_t offset, size_t count)
}
static ssize_t fsi_slave_sysfs_raw_read(struct file *file,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
+ struct kobject *kobj, const struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
struct fsi_slave *slave = to_fsi_slave(kobj_to_dev(kobj));
@@ -581,7 +581,7 @@ static ssize_t fsi_slave_sysfs_raw_read(struct file *file,
}
static ssize_t fsi_slave_sysfs_raw_write(struct file *file,
- struct kobject *kobj, struct bin_attribute *attr,
+ struct kobject *kobj, const struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct fsi_slave *slave = to_fsi_slave(kobj_to_dev(kobj));
@@ -1404,7 +1404,7 @@ void fsi_driver_unregister(struct fsi_driver *fsi_drv)
}
EXPORT_SYMBOL_GPL(fsi_driver_unregister);
-struct bus_type fsi_bus_type = {
+const struct bus_type fsi_bus_type = {
.name = "fsi",
.match = fsi_bus_match,
};
diff --git a/drivers/fsi/fsi-master-aspeed.c b/drivers/fsi/fsi-master-aspeed.c
index 6f5e1bdf7e40..bff897f77fe5 100644
--- a/drivers/fsi/fsi-master-aspeed.c
+++ b/drivers/fsi/fsi-master-aspeed.c
@@ -666,7 +666,7 @@ static struct platform_driver fsi_master_aspeed_driver = {
.of_match_table = fsi_master_aspeed_match,
},
.probe = fsi_master_aspeed_probe,
- .remove_new = fsi_master_aspeed_remove,
+ .remove = fsi_master_aspeed_remove,
};
module_platform_driver(fsi_master_aspeed_driver);
diff --git a/drivers/fsi/fsi-master-ast-cf.c b/drivers/fsi/fsi-master-ast-cf.c
index a4c37ff8edd6..e67d7cd30fca 100644
--- a/drivers/fsi/fsi-master-ast-cf.c
+++ b/drivers/fsi/fsi-master-ast-cf.c
@@ -13,13 +13,13 @@
#include <linux/irqflags.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/regmap.h>
#include <linux/firmware.h>
#include <linux/gpio/aspeed.h>
#include <linux/mfd/syscon.h>
-#include <linux/of_address.h>
#include <linux/genalloc.h>
#include "fsi-master.h"
@@ -1285,14 +1285,7 @@ static int fsi_master_acf_probe(struct platform_device *pdev)
master->gpio_mux = gpio;
/* Grab the reserved memory region (use DMA API instead ?) */
- np = of_parse_phandle(mnode, "memory-region", 0);
- if (!np) {
- dev_err(&pdev->dev, "Didn't find reserved memory\n");
- rc = -EINVAL;
- goto err_free;
- }
- rc = of_address_to_resource(np, 0, &res);
- of_node_put(np);
+ rc = of_reserved_mem_region_to_resource(mnode, 0, &res);
if (rc) {
dev_err(&pdev->dev, "Couldn't address to resource for reserved memory\n");
rc = -ENOMEM;
@@ -1434,7 +1427,7 @@ static struct platform_driver fsi_master_acf = {
.of_match_table = fsi_master_acf_match,
},
.probe = fsi_master_acf_probe,
- .remove_new = fsi_master_acf_remove,
+ .remove = fsi_master_acf_remove,
};
module_platform_driver(fsi_master_acf);
diff --git a/drivers/fsi/fsi-master-gpio.c b/drivers/fsi/fsi-master-gpio.c
index f761344f4873..69de0b5b9cbd 100644
--- a/drivers/fsi/fsi-master-gpio.c
+++ b/drivers/fsi/fsi-master-gpio.c
@@ -888,7 +888,7 @@ static struct platform_driver fsi_master_gpio_driver = {
.of_match_table = fsi_master_gpio_match,
},
.probe = fsi_master_gpio_probe,
- .remove_new = fsi_master_gpio_remove,
+ .remove = fsi_master_gpio_remove,
};
module_platform_driver(fsi_master_gpio_driver);
diff --git a/drivers/fsi/fsi-occ.c b/drivers/fsi/fsi-occ.c
index a6d4c8f123a5..d3e6bf37878a 100644
--- a/drivers/fsi/fsi-occ.c
+++ b/drivers/fsi/fsi-occ.c
@@ -740,7 +740,7 @@ static struct platform_driver occ_driver = {
.of_match_table = occ_match,
},
.probe = occ_probe,
- .remove_new = occ_remove,
+ .remove = occ_remove,
};
static int occ_init(void)
diff --git a/drivers/fwctl/Kconfig b/drivers/fwctl/Kconfig
new file mode 100644
index 000000000000..b5583b12a011
--- /dev/null
+++ b/drivers/fwctl/Kconfig
@@ -0,0 +1,33 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menuconfig FWCTL
+ tristate "fwctl device firmware access framework"
+ help
+ fwctl provides a userspace API for restricted access to communicate
+ with on-device firmware. The communication channel is intended to
+ support a wide range of lockdown compatible device behaviors including
+ manipulating device FLASH, debugging, and other activities that don't
+ fit neatly into an existing subsystem.
+
+if FWCTL
+config FWCTL_MLX5
+ tristate "mlx5 ConnectX control fwctl driver"
+ depends on MLX5_CORE
+ help
+ MLX5 provides interface for the user process to access the debug and
+ configuration registers of the ConnectX hardware family
+ (NICs, PCI switches and SmartNIC SoCs).
+ This will allow configuration and debug tools to work out of the box on
+ mainstream kernel.
+
+ If you don't know what to do here, say N.
+
+config FWCTL_PDS
+ tristate "AMD/Pensando pds fwctl driver"
+ depends on PDS_CORE
+ help
+ The pds_fwctl driver provides an fwctl interface for a user process
+ to access the debug and configuration information of the AMD/Pensando
+ DSC hardware family.
+
+ If you don't know what to do here, say N.
+endif
diff --git a/drivers/fwctl/Makefile b/drivers/fwctl/Makefile
new file mode 100644
index 000000000000..c093b5f661d6
--- /dev/null
+++ b/drivers/fwctl/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_FWCTL) += fwctl.o
+obj-$(CONFIG_FWCTL_MLX5) += mlx5/
+obj-$(CONFIG_FWCTL_PDS) += pds/
+
+fwctl-y += main.o
diff --git a/drivers/fwctl/main.c b/drivers/fwctl/main.c
new file mode 100644
index 000000000000..bc6378506296
--- /dev/null
+++ b/drivers/fwctl/main.c
@@ -0,0 +1,421 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024-2025, NVIDIA CORPORATION & AFFILIATES
+ */
+#define pr_fmt(fmt) "fwctl: " fmt
+#include <linux/fwctl.h>
+
+#include <linux/container_of.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+
+#include <uapi/fwctl/fwctl.h>
+
+enum {
+ FWCTL_MAX_DEVICES = 4096,
+ MAX_RPC_LEN = SZ_2M,
+};
+static_assert(FWCTL_MAX_DEVICES < (1U << MINORBITS));
+
+static dev_t fwctl_dev;
+static DEFINE_IDA(fwctl_ida);
+static unsigned long fwctl_tainted;
+
+struct fwctl_ucmd {
+ struct fwctl_uctx *uctx;
+ void __user *ubuffer;
+ void *cmd;
+ u32 user_size;
+};
+
+static int ucmd_respond(struct fwctl_ucmd *ucmd, size_t cmd_len)
+{
+ if (copy_to_user(ucmd->ubuffer, ucmd->cmd,
+ min_t(size_t, ucmd->user_size, cmd_len)))
+ return -EFAULT;
+ return 0;
+}
+
+static int copy_to_user_zero_pad(void __user *to, const void *from,
+ size_t from_len, size_t user_len)
+{
+ size_t copy_len;
+
+ copy_len = min(from_len, user_len);
+ if (copy_to_user(to, from, copy_len))
+ return -EFAULT;
+ if (copy_len < user_len) {
+ if (clear_user(to + copy_len, user_len - copy_len))
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static int fwctl_cmd_info(struct fwctl_ucmd *ucmd)
+{
+ struct fwctl_device *fwctl = ucmd->uctx->fwctl;
+ struct fwctl_info *cmd = ucmd->cmd;
+ size_t driver_info_len = 0;
+
+ if (cmd->flags)
+ return -EOPNOTSUPP;
+
+ if (!fwctl->ops->info && cmd->device_data_len) {
+ if (clear_user(u64_to_user_ptr(cmd->out_device_data),
+ cmd->device_data_len))
+ return -EFAULT;
+ } else if (cmd->device_data_len) {
+ void *driver_info __free(kfree) =
+ fwctl->ops->info(ucmd->uctx, &driver_info_len);
+ if (IS_ERR(driver_info))
+ return PTR_ERR(driver_info);
+
+ if (copy_to_user_zero_pad(u64_to_user_ptr(cmd->out_device_data),
+ driver_info, driver_info_len,
+ cmd->device_data_len))
+ return -EFAULT;
+ }
+
+ cmd->out_device_type = fwctl->ops->device_type;
+ cmd->device_data_len = driver_info_len;
+ return ucmd_respond(ucmd, sizeof(*cmd));
+}
+
+static int fwctl_cmd_rpc(struct fwctl_ucmd *ucmd)
+{
+ struct fwctl_device *fwctl = ucmd->uctx->fwctl;
+ struct fwctl_rpc *cmd = ucmd->cmd;
+ size_t out_len;
+
+ if (cmd->in_len > MAX_RPC_LEN || cmd->out_len > MAX_RPC_LEN)
+ return -EMSGSIZE;
+
+ switch (cmd->scope) {
+ case FWCTL_RPC_CONFIGURATION:
+ case FWCTL_RPC_DEBUG_READ_ONLY:
+ break;
+
+ case FWCTL_RPC_DEBUG_WRITE_FULL:
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+ fallthrough;
+ case FWCTL_RPC_DEBUG_WRITE:
+ if (!test_and_set_bit(0, &fwctl_tainted)) {
+ dev_warn(
+ &fwctl->dev,
+ "%s(%d): has requested full access to the physical device",
+ current->comm, task_pid_nr(current));
+ add_taint(TAINT_FWCTL, LOCKDEP_STILL_OK);
+ }
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ void *inbuf __free(kvfree) = kvzalloc(cmd->in_len, GFP_KERNEL_ACCOUNT);
+ if (!inbuf)
+ return -ENOMEM;
+ if (copy_from_user(inbuf, u64_to_user_ptr(cmd->in), cmd->in_len))
+ return -EFAULT;
+
+ out_len = cmd->out_len;
+ void *outbuf __free(kvfree) = fwctl->ops->fw_rpc(
+ ucmd->uctx, cmd->scope, inbuf, cmd->in_len, &out_len);
+ if (IS_ERR(outbuf))
+ return PTR_ERR(outbuf);
+ if (outbuf == inbuf) {
+ /* The driver can re-use inbuf as outbuf */
+ inbuf = NULL;
+ }
+
+ if (copy_to_user(u64_to_user_ptr(cmd->out), outbuf,
+ min(cmd->out_len, out_len)))
+ return -EFAULT;
+
+ cmd->out_len = out_len;
+ return ucmd_respond(ucmd, sizeof(*cmd));
+}
+
+/* On stack memory for the ioctl structs */
+union fwctl_ucmd_buffer {
+ struct fwctl_info info;
+ struct fwctl_rpc rpc;
+};
+
+struct fwctl_ioctl_op {
+ unsigned int size;
+ unsigned int min_size;
+ unsigned int ioctl_num;
+ int (*execute)(struct fwctl_ucmd *ucmd);
+};
+
+#define IOCTL_OP(_ioctl, _fn, _struct, _last) \
+ [_IOC_NR(_ioctl) - FWCTL_CMD_BASE] = { \
+ .size = sizeof(_struct) + \
+ BUILD_BUG_ON_ZERO(sizeof(union fwctl_ucmd_buffer) < \
+ sizeof(_struct)), \
+ .min_size = offsetofend(_struct, _last), \
+ .ioctl_num = _ioctl, \
+ .execute = _fn, \
+ }
+static const struct fwctl_ioctl_op fwctl_ioctl_ops[] = {
+ IOCTL_OP(FWCTL_INFO, fwctl_cmd_info, struct fwctl_info, out_device_data),
+ IOCTL_OP(FWCTL_RPC, fwctl_cmd_rpc, struct fwctl_rpc, out),
+};
+
+static long fwctl_fops_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct fwctl_uctx *uctx = filp->private_data;
+ const struct fwctl_ioctl_op *op;
+ struct fwctl_ucmd ucmd = {};
+ union fwctl_ucmd_buffer buf;
+ unsigned int nr;
+ int ret;
+
+ nr = _IOC_NR(cmd);
+ if ((nr - FWCTL_CMD_BASE) >= ARRAY_SIZE(fwctl_ioctl_ops))
+ return -ENOIOCTLCMD;
+
+ op = &fwctl_ioctl_ops[nr - FWCTL_CMD_BASE];
+ if (op->ioctl_num != cmd)
+ return -ENOIOCTLCMD;
+
+ ucmd.uctx = uctx;
+ ucmd.cmd = &buf;
+ ucmd.ubuffer = (void __user *)arg;
+ ret = get_user(ucmd.user_size, (u32 __user *)ucmd.ubuffer);
+ if (ret)
+ return ret;
+
+ if (ucmd.user_size < op->min_size)
+ return -EINVAL;
+
+ ret = copy_struct_from_user(ucmd.cmd, op->size, ucmd.ubuffer,
+ ucmd.user_size);
+ if (ret)
+ return ret;
+
+ guard(rwsem_read)(&uctx->fwctl->registration_lock);
+ if (!uctx->fwctl->ops)
+ return -ENODEV;
+ return op->execute(&ucmd);
+}
+
+static int fwctl_fops_open(struct inode *inode, struct file *filp)
+{
+ struct fwctl_device *fwctl =
+ container_of(inode->i_cdev, struct fwctl_device, cdev);
+ int ret;
+
+ guard(rwsem_read)(&fwctl->registration_lock);
+ if (!fwctl->ops)
+ return -ENODEV;
+
+ struct fwctl_uctx *uctx __free(kfree) =
+ kzalloc(fwctl->ops->uctx_size, GFP_KERNEL_ACCOUNT);
+ if (!uctx)
+ return -ENOMEM;
+
+ uctx->fwctl = fwctl;
+ ret = fwctl->ops->open_uctx(uctx);
+ if (ret)
+ return ret;
+
+ scoped_guard(mutex, &fwctl->uctx_list_lock) {
+ list_add_tail(&uctx->uctx_list_entry, &fwctl->uctx_list);
+ }
+
+ get_device(&fwctl->dev);
+ filp->private_data = no_free_ptr(uctx);
+ return 0;
+}
+
+static void fwctl_destroy_uctx(struct fwctl_uctx *uctx)
+{
+ lockdep_assert_held(&uctx->fwctl->uctx_list_lock);
+ list_del(&uctx->uctx_list_entry);
+ uctx->fwctl->ops->close_uctx(uctx);
+}
+
+static int fwctl_fops_release(struct inode *inode, struct file *filp)
+{
+ struct fwctl_uctx *uctx = filp->private_data;
+ struct fwctl_device *fwctl = uctx->fwctl;
+
+ scoped_guard(rwsem_read, &fwctl->registration_lock) {
+ /*
+ * NULL ops means fwctl_unregister() has already removed the
+ * driver and destroyed the uctx.
+ */
+ if (fwctl->ops) {
+ guard(mutex)(&fwctl->uctx_list_lock);
+ fwctl_destroy_uctx(uctx);
+ }
+ }
+
+ kfree(uctx);
+ fwctl_put(fwctl);
+ return 0;
+}
+
+static const struct file_operations fwctl_fops = {
+ .owner = THIS_MODULE,
+ .open = fwctl_fops_open,
+ .release = fwctl_fops_release,
+ .unlocked_ioctl = fwctl_fops_ioctl,
+};
+
+static void fwctl_device_release(struct device *device)
+{
+ struct fwctl_device *fwctl =
+ container_of(device, struct fwctl_device, dev);
+
+ ida_free(&fwctl_ida, fwctl->dev.devt - fwctl_dev);
+ mutex_destroy(&fwctl->uctx_list_lock);
+ kfree(fwctl);
+}
+
+static char *fwctl_devnode(const struct device *dev, umode_t *mode)
+{
+ return kasprintf(GFP_KERNEL, "fwctl/%s", dev_name(dev));
+}
+
+static struct class fwctl_class = {
+ .name = "fwctl",
+ .dev_release = fwctl_device_release,
+ .devnode = fwctl_devnode,
+};
+
+static struct fwctl_device *
+_alloc_device(struct device *parent, const struct fwctl_ops *ops, size_t size)
+{
+ struct fwctl_device *fwctl __free(kfree) = kzalloc(size, GFP_KERNEL);
+ int devnum;
+
+ if (!fwctl)
+ return NULL;
+
+ devnum = ida_alloc_max(&fwctl_ida, FWCTL_MAX_DEVICES - 1, GFP_KERNEL);
+ if (devnum < 0)
+ return NULL;
+
+ fwctl->dev.devt = fwctl_dev + devnum;
+ fwctl->dev.class = &fwctl_class;
+ fwctl->dev.parent = parent;
+
+ init_rwsem(&fwctl->registration_lock);
+ mutex_init(&fwctl->uctx_list_lock);
+ INIT_LIST_HEAD(&fwctl->uctx_list);
+
+ device_initialize(&fwctl->dev);
+ return_ptr(fwctl);
+}
+
+/* Drivers use the fwctl_alloc_device() wrapper */
+struct fwctl_device *_fwctl_alloc_device(struct device *parent,
+ const struct fwctl_ops *ops,
+ size_t size)
+{
+ struct fwctl_device *fwctl __free(fwctl) =
+ _alloc_device(parent, ops, size);
+
+ if (!fwctl)
+ return NULL;
+
+ cdev_init(&fwctl->cdev, &fwctl_fops);
+ /*
+ * The driver module is protected by fwctl_register/unregister(),
+ * unregister won't complete until we are done with the driver's module.
+ */
+ fwctl->cdev.owner = THIS_MODULE;
+
+ if (dev_set_name(&fwctl->dev, "fwctl%d", fwctl->dev.devt - fwctl_dev))
+ return NULL;
+
+ fwctl->ops = ops;
+ return_ptr(fwctl);
+}
+EXPORT_SYMBOL_NS_GPL(_fwctl_alloc_device, "FWCTL");
+
+/**
+ * fwctl_register - Register a new device to the subsystem
+ * @fwctl: Previously allocated fwctl_device
+ *
+ * On return the device is visible through sysfs and /dev, driver ops may be
+ * called.
+ */
+int fwctl_register(struct fwctl_device *fwctl)
+{
+ return cdev_device_add(&fwctl->cdev, &fwctl->dev);
+}
+EXPORT_SYMBOL_NS_GPL(fwctl_register, "FWCTL");
+
+/**
+ * fwctl_unregister - Unregister a device from the subsystem
+ * @fwctl: Previously allocated and registered fwctl_device
+ *
+ * Undoes fwctl_register(). On return no driver ops will be called. The
+ * caller must still call fwctl_put() to free the fwctl.
+ *
+ * Unregister will return even if userspace still has file descriptors open.
+ * This will call ops->close_uctx() on any open FDs and after return no driver
+ * op will be called. The FDs remain open but all fops will return -ENODEV.
+ *
+ * The design of fwctl allows this sort of disassociation of the driver from the
+ * subsystem primarily by keeping memory allocations owned by the core subsytem.
+ * The fwctl_device and fwctl_uctx can both be freed without requiring a driver
+ * callback. This allows the module to remain unlocked while FDs are open.
+ */
+void fwctl_unregister(struct fwctl_device *fwctl)
+{
+ struct fwctl_uctx *uctx;
+
+ cdev_device_del(&fwctl->cdev, &fwctl->dev);
+
+ /* Disable and free the driver's resources for any still open FDs. */
+ guard(rwsem_write)(&fwctl->registration_lock);
+ guard(mutex)(&fwctl->uctx_list_lock);
+ while ((uctx = list_first_entry_or_null(&fwctl->uctx_list,
+ struct fwctl_uctx,
+ uctx_list_entry)))
+ fwctl_destroy_uctx(uctx);
+
+ /*
+ * The driver module may unload after this returns, the op pointer will
+ * not be valid.
+ */
+ fwctl->ops = NULL;
+}
+EXPORT_SYMBOL_NS_GPL(fwctl_unregister, "FWCTL");
+
+static int __init fwctl_init(void)
+{
+ int ret;
+
+ ret = alloc_chrdev_region(&fwctl_dev, 0, FWCTL_MAX_DEVICES, "fwctl");
+ if (ret)
+ return ret;
+
+ ret = class_register(&fwctl_class);
+ if (ret)
+ goto err_chrdev;
+ return 0;
+
+err_chrdev:
+ unregister_chrdev_region(fwctl_dev, FWCTL_MAX_DEVICES);
+ return ret;
+}
+
+static void __exit fwctl_exit(void)
+{
+ class_unregister(&fwctl_class);
+ unregister_chrdev_region(fwctl_dev, FWCTL_MAX_DEVICES);
+}
+
+module_init(fwctl_init);
+module_exit(fwctl_exit);
+MODULE_DESCRIPTION("fwctl device firmware access framework");
+MODULE_LICENSE("GPL");
diff --git a/drivers/fwctl/mlx5/Makefile b/drivers/fwctl/mlx5/Makefile
new file mode 100644
index 000000000000..139a23e3c7c5
--- /dev/null
+++ b/drivers/fwctl/mlx5/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_FWCTL_MLX5) += mlx5_fwctl.o
+
+mlx5_fwctl-y += main.o
diff --git a/drivers/fwctl/mlx5/main.c b/drivers/fwctl/mlx5/main.c
new file mode 100644
index 000000000000..3dacccf7855c
--- /dev/null
+++ b/drivers/fwctl/mlx5/main.c
@@ -0,0 +1,418 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/*
+ * Copyright (c) 2024-2025, NVIDIA CORPORATION & AFFILIATES
+ */
+#include <linux/fwctl.h>
+#include <linux/auxiliary_bus.h>
+#include <linux/mlx5/device.h>
+#include <linux/mlx5/driver.h>
+#include <uapi/fwctl/mlx5.h>
+
+#define mlx5ctl_err(mcdev, format, ...) \
+ dev_err(&mcdev->fwctl.dev, format, ##__VA_ARGS__)
+
+#define mlx5ctl_dbg(mcdev, format, ...) \
+ dev_dbg(&mcdev->fwctl.dev, "PID %u: " format, current->pid, \
+ ##__VA_ARGS__)
+
+struct mlx5ctl_uctx {
+ struct fwctl_uctx uctx;
+ u32 uctx_caps;
+ u32 uctx_uid;
+};
+
+struct mlx5ctl_dev {
+ struct fwctl_device fwctl;
+ struct mlx5_core_dev *mdev;
+};
+DEFINE_FREE(mlx5ctl, struct mlx5ctl_dev *, if (_T) fwctl_put(&_T->fwctl));
+
+struct mlx5_ifc_mbox_in_hdr_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_mbox_out_hdr_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+enum {
+ MLX5_UCTX_OBJECT_CAP_TOOLS_RESOURCES = 0x4,
+};
+
+enum {
+ MLX5_CMD_OP_QUERY_DRIVER_VERSION = 0x10c,
+ MLX5_CMD_OP_QUERY_OTHER_HCA_CAP = 0x10e,
+ MLX5_CMD_OP_QUERY_RDB = 0x512,
+ MLX5_CMD_OP_QUERY_PSV = 0x602,
+ MLX5_CMD_OP_QUERY_DC_CNAK_TRACE = 0x716,
+ MLX5_CMD_OP_QUERY_NVMF_BACKEND_CONTROLLER = 0x722,
+ MLX5_CMD_OP_QUERY_NVMF_NAMESPACE_CONTEXT = 0x728,
+ MLX5_CMD_OP_QUERY_ADJACENT_FUNCTIONS_ID = 0x730,
+ MLX5_CMD_OP_DELEGATE_VHCA_MANAGEMENT = 0x731,
+ MLX5_CMD_OP_QUERY_DELEGATED_VHCA = 0x732,
+ MLX5_CMD_OP_QUERY_BURST_SIZE = 0x813,
+ MLX5_CMD_OP_QUERY_DIAGNOSTIC_PARAMS = 0x819,
+ MLX5_CMD_OP_SET_DIAGNOSTIC_PARAMS = 0x820,
+ MLX5_CMD_OP_QUERY_DIAGNOSTIC_COUNTERS = 0x821,
+ MLX5_CMD_OP_QUERY_DELAY_DROP_PARAMS = 0x911,
+ MLX5_CMD_OP_QUERY_AFU = 0x971,
+ MLX5_CMD_OP_QUERY_CAPI_PEC = 0x981,
+ MLX5_CMD_OP_QUERY_UCTX = 0xa05,
+ MLX5_CMD_OP_QUERY_UMEM = 0xa09,
+ MLX5_CMD_OP_QUERY_NVMF_CC_RESPONSE = 0xb02,
+ MLX5_CMD_OP_QUERY_EMULATED_FUNCTIONS_INFO = 0xb03,
+ MLX5_CMD_OP_QUERY_REGEXP_PARAMS = 0xb05,
+ MLX5_CMD_OP_QUERY_REGEXP_REGISTER = 0xb07,
+ MLX5_CMD_OP_USER_QUERY_XRQ_DC_PARAMS_ENTRY = 0xb08,
+ MLX5_CMD_OP_USER_QUERY_XRQ_ERROR_PARAMS = 0xb0a,
+ MLX5_CMD_OP_ACCESS_REGISTER_USER = 0xb0c,
+ MLX5_CMD_OP_QUERY_EMULATION_DEVICE_EQ_MSIX_MAPPING = 0xb0f,
+ MLX5_CMD_OP_QUERY_MATCH_SAMPLE_INFO = 0xb13,
+ MLX5_CMD_OP_QUERY_CRYPTO_STATE = 0xb14,
+ MLX5_CMD_OP_QUERY_VUID = 0xb22,
+ MLX5_CMD_OP_QUERY_DPA_PARTITION = 0xb28,
+ MLX5_CMD_OP_QUERY_DPA_PARTITIONS = 0xb2a,
+ MLX5_CMD_OP_POSTPONE_CONNECTED_QP_TIMEOUT = 0xb2e,
+ MLX5_CMD_OP_QUERY_EMULATED_RESOURCES_INFO = 0xb2f,
+ MLX5_CMD_OP_QUERY_RSV_RESOURCES = 0x8000,
+ MLX5_CMD_OP_QUERY_MTT = 0x8001,
+ MLX5_CMD_OP_QUERY_SCHED_QUEUE = 0x8006,
+};
+
+static int mlx5ctl_alloc_uid(struct mlx5ctl_dev *mcdev, u32 cap)
+{
+ u32 out[MLX5_ST_SZ_DW(create_uctx_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(create_uctx_in)] = {};
+ void *uctx;
+ int ret;
+ u16 uid;
+
+ uctx = MLX5_ADDR_OF(create_uctx_in, in, uctx);
+
+ mlx5ctl_dbg(mcdev, "%s: caps 0x%x\n", __func__, cap);
+ MLX5_SET(create_uctx_in, in, opcode, MLX5_CMD_OP_CREATE_UCTX);
+ MLX5_SET(uctx, uctx, cap, cap);
+
+ ret = mlx5_cmd_exec(mcdev->mdev, in, sizeof(in), out, sizeof(out));
+ if (ret)
+ return ret;
+
+ uid = MLX5_GET(create_uctx_out, out, uid);
+ mlx5ctl_dbg(mcdev, "allocated uid %u with caps 0x%x\n", uid, cap);
+ return uid;
+}
+
+static void mlx5ctl_release_uid(struct mlx5ctl_dev *mcdev, u16 uid)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_uctx_in)] = {};
+ struct mlx5_core_dev *mdev = mcdev->mdev;
+ int ret;
+
+ MLX5_SET(destroy_uctx_in, in, opcode, MLX5_CMD_OP_DESTROY_UCTX);
+ MLX5_SET(destroy_uctx_in, in, uid, uid);
+
+ ret = mlx5_cmd_exec_in(mdev, destroy_uctx, in);
+ mlx5ctl_dbg(mcdev, "released uid %u %pe\n", uid, ERR_PTR(ret));
+}
+
+static int mlx5ctl_open_uctx(struct fwctl_uctx *uctx)
+{
+ struct mlx5ctl_uctx *mfd =
+ container_of(uctx, struct mlx5ctl_uctx, uctx);
+ struct mlx5ctl_dev *mcdev =
+ container_of(uctx->fwctl, struct mlx5ctl_dev, fwctl);
+ int uid;
+
+ /*
+ * New FW supports the TOOLS_RESOURCES uid security label
+ * which allows commands to manipulate the global device state.
+ * Otherwise only basic existing RDMA devx privilege are allowed.
+ */
+ if (MLX5_CAP_GEN(mcdev->mdev, uctx_cap) &
+ MLX5_UCTX_OBJECT_CAP_TOOLS_RESOURCES)
+ mfd->uctx_caps |= MLX5_UCTX_OBJECT_CAP_TOOLS_RESOURCES;
+
+ uid = mlx5ctl_alloc_uid(mcdev, mfd->uctx_caps);
+ if (uid < 0)
+ return uid;
+
+ mfd->uctx_uid = uid;
+ return 0;
+}
+
+static void mlx5ctl_close_uctx(struct fwctl_uctx *uctx)
+{
+ struct mlx5ctl_dev *mcdev =
+ container_of(uctx->fwctl, struct mlx5ctl_dev, fwctl);
+ struct mlx5ctl_uctx *mfd =
+ container_of(uctx, struct mlx5ctl_uctx, uctx);
+
+ mlx5ctl_release_uid(mcdev, mfd->uctx_uid);
+}
+
+static void *mlx5ctl_info(struct fwctl_uctx *uctx, size_t *length)
+{
+ struct mlx5ctl_uctx *mfd =
+ container_of(uctx, struct mlx5ctl_uctx, uctx);
+ struct fwctl_info_mlx5 *info;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return ERR_PTR(-ENOMEM);
+
+ info->uid = mfd->uctx_uid;
+ info->uctx_caps = mfd->uctx_caps;
+ *length = sizeof(*info);
+ return info;
+}
+
+static bool mlx5ctl_validate_rpc(const void *in, enum fwctl_rpc_scope scope)
+{
+ u16 opcode = MLX5_GET(mbox_in_hdr, in, opcode);
+ u16 op_mod = MLX5_GET(mbox_in_hdr, in, op_mod);
+
+ /*
+ * Currently the driver can't keep track of commands that allocate
+ * objects in the FW, these commands are safe from a security
+ * perspective but nothing will free the memory when the FD is closed.
+ * For now permit only query commands and set commands that don't alter
+ * objects. Also the caps for the scope have not been defined yet,
+ * filter commands manually for now.
+ */
+ switch (opcode) {
+ case MLX5_CMD_OP_MODIFY_CONG_STATUS:
+ case MLX5_CMD_OP_POSTPONE_CONNECTED_QP_TIMEOUT:
+ case MLX5_CMD_OP_QUERY_ADAPTER:
+ case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS:
+ case MLX5_CMD_OP_QUERY_HCA_CAP:
+ case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
+ case MLX5_CMD_OP_QUERY_OTHER_HCA_CAP:
+ case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
+ case MLX5_CMD_OPCODE_QUERY_VUID:
+ case MLX5_CMD_OP_DELEGATE_VHCA_MANAGEMENT:
+ /*
+ * FW limits SET_HCA_CAP on the tools UID to only the other function
+ * mode which is used for function pre-configuration
+ */
+ case MLX5_CMD_OP_SET_HCA_CAP:
+ return true; /* scope >= FWCTL_RPC_CONFIGURATION; */
+
+ case MLX5_CMD_OP_FPGA_QUERY_QP_COUNTERS:
+ case MLX5_CMD_OP_FPGA_QUERY_QP:
+ case MLX5_CMD_OP_NOP:
+ case MLX5_CMD_OP_QUERY_AFU:
+ case MLX5_CMD_OP_QUERY_BURST_SIZE:
+ case MLX5_CMD_OP_QUERY_CAPI_PEC:
+ case MLX5_CMD_OP_QUERY_CONG_PARAMS:
+ case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
+ case MLX5_CMD_OP_QUERY_CONG_STATUS:
+ case MLX5_CMD_OP_QUERY_CQ:
+ case MLX5_CMD_OP_QUERY_CRYPTO_STATE:
+ case MLX5_CMD_OP_QUERY_DC_CNAK_TRACE:
+ case MLX5_CMD_OP_QUERY_DCT:
+ case MLX5_CMD_OP_QUERY_DELAY_DROP_PARAMS:
+ case MLX5_CMD_OP_QUERY_DIAGNOSTIC_COUNTERS:
+ case MLX5_CMD_OP_QUERY_DIAGNOSTIC_PARAMS:
+ case MLX5_CMD_OP_QUERY_DPA_PARTITION:
+ case MLX5_CMD_OP_QUERY_DPA_PARTITIONS:
+ case MLX5_CMD_OP_QUERY_DRIVER_VERSION:
+ case MLX5_CMD_OP_QUERY_EMULATED_FUNCTIONS_INFO:
+ case MLX5_CMD_OP_QUERY_EMULATED_RESOURCES_INFO:
+ case MLX5_CMD_OP_QUERY_EMULATION_DEVICE_EQ_MSIX_MAPPING:
+ case MLX5_CMD_OP_QUERY_EQ:
+ case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
+ case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
+ case MLX5_CMD_OP_QUERY_FLOW_GROUP:
+ case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
+ case MLX5_CMD_OP_QUERY_FLOW_TABLE:
+ case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
+ case MLX5_CMD_OP_QUERY_HCA_VPORT_GID:
+ case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY:
+ case MLX5_CMD_OP_QUERY_ISSI:
+ case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
+ case MLX5_CMD_OP_QUERY_LAG:
+ case MLX5_CMD_OP_QUERY_MAD_DEMUX:
+ case MLX5_CMD_OP_QUERY_MATCH_SAMPLE_INFO:
+ case MLX5_CMD_OP_QUERY_MKEY:
+ case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
+ case MLX5_CMD_OP_QUERY_MTT:
+ case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
+ case MLX5_CMD_OP_QUERY_NVMF_BACKEND_CONTROLLER:
+ case MLX5_CMD_OP_QUERY_NVMF_CC_RESPONSE:
+ case MLX5_CMD_OP_QUERY_NVMF_NAMESPACE_CONTEXT:
+ case MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT:
+ case MLX5_CMD_OP_QUERY_PAGES:
+ case MLX5_CMD_OP_QUERY_PSV:
+ case MLX5_CMD_OP_QUERY_Q_COUNTER:
+ case MLX5_CMD_OP_QUERY_QP:
+ case MLX5_CMD_OP_QUERY_RATE_LIMIT:
+ case MLX5_CMD_OP_QUERY_RDB:
+ case MLX5_CMD_OP_QUERY_REGEXP_PARAMS:
+ case MLX5_CMD_OP_QUERY_REGEXP_REGISTER:
+ case MLX5_CMD_OP_QUERY_RMP:
+ case MLX5_CMD_OP_QUERY_RQ:
+ case MLX5_CMD_OP_QUERY_RQT:
+ case MLX5_CMD_OP_QUERY_RSV_RESOURCES:
+ case MLX5_CMD_OP_QUERY_SCHED_QUEUE:
+ case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
+ case MLX5_CMD_OP_QUERY_SF_PARTITION:
+ case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS:
+ case MLX5_CMD_OP_QUERY_SQ:
+ case MLX5_CMD_OP_QUERY_SRQ:
+ case MLX5_CMD_OP_QUERY_TIR:
+ case MLX5_CMD_OP_QUERY_TIS:
+ case MLX5_CMD_OP_QUERY_UCTX:
+ case MLX5_CMD_OP_QUERY_UMEM:
+ case MLX5_CMD_OP_QUERY_VHCA_MIGRATION_STATE:
+ case MLX5_CMD_OP_QUERY_VHCA_STATE:
+ case MLX5_CMD_OP_QUERY_VNIC_ENV:
+ case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
+ case MLX5_CMD_OP_QUERY_VPORT_STATE:
+ case MLX5_CMD_OP_QUERY_WOL_ROL:
+ case MLX5_CMD_OP_QUERY_XRC_SRQ:
+ case MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY:
+ case MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS:
+ case MLX5_CMD_OP_QUERY_XRQ:
+ case MLX5_CMD_OP_USER_QUERY_XRQ_DC_PARAMS_ENTRY:
+ case MLX5_CMD_OP_USER_QUERY_XRQ_ERROR_PARAMS:
+ case MLX5_CMD_OP_QUERY_ADJACENT_FUNCTIONS_ID:
+ case MLX5_CMD_OP_QUERY_DELEGATED_VHCA:
+ return scope >= FWCTL_RPC_DEBUG_READ_ONLY;
+
+ case MLX5_CMD_OP_SET_DIAGNOSTIC_PARAMS:
+ return scope >= FWCTL_RPC_DEBUG_WRITE;
+
+ case MLX5_CMD_OP_ACCESS_REG:
+ case MLX5_CMD_OP_ACCESS_REGISTER_USER:
+ if (op_mod == 0) /* write */
+ return true; /* scope >= FWCTL_RPC_CONFIGURATION; */
+ return scope >= FWCTL_RPC_DEBUG_READ_ONLY;
+ default:
+ return false;
+ }
+}
+
+static void *mlx5ctl_fw_rpc(struct fwctl_uctx *uctx, enum fwctl_rpc_scope scope,
+ void *rpc_in, size_t in_len, size_t *out_len)
+{
+ struct mlx5ctl_dev *mcdev =
+ container_of(uctx->fwctl, struct mlx5ctl_dev, fwctl);
+ struct mlx5ctl_uctx *mfd =
+ container_of(uctx, struct mlx5ctl_uctx, uctx);
+ void *rpc_out;
+ int ret;
+
+ if (in_len < MLX5_ST_SZ_BYTES(mbox_in_hdr) ||
+ *out_len < MLX5_ST_SZ_BYTES(mbox_out_hdr))
+ return ERR_PTR(-EMSGSIZE);
+
+ mlx5ctl_dbg(mcdev, "[UID %d] cmdif: opcode 0x%x inlen %zu outlen %zu\n",
+ mfd->uctx_uid, MLX5_GET(mbox_in_hdr, rpc_in, opcode),
+ in_len, *out_len);
+
+ if (!mlx5ctl_validate_rpc(rpc_in, scope))
+ return ERR_PTR(-EBADMSG);
+
+ /*
+ * mlx5_cmd_do() copies the input message to its own buffer before
+ * executing it, so we can reuse the allocation for the output.
+ */
+ if (*out_len <= in_len) {
+ rpc_out = rpc_in;
+ } else {
+ rpc_out = kvzalloc(*out_len, GFP_KERNEL);
+ if (!rpc_out)
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* Enforce the user context for the command */
+ MLX5_SET(mbox_in_hdr, rpc_in, uid, mfd->uctx_uid);
+ ret = mlx5_cmd_do(mcdev->mdev, rpc_in, in_len, rpc_out, *out_len);
+
+ mlx5ctl_dbg(mcdev,
+ "[UID %d] cmdif: opcode 0x%x status 0x%x retval %pe\n",
+ mfd->uctx_uid, MLX5_GET(mbox_in_hdr, rpc_in, opcode),
+ MLX5_GET(mbox_out_hdr, rpc_out, status), ERR_PTR(ret));
+
+ /*
+ * -EREMOTEIO means execution succeeded and the out is valid,
+ * but an error code was returned inside out. Everything else
+ * means the RPC did not make it to the device.
+ */
+ if (ret && ret != -EREMOTEIO) {
+ if (rpc_out != rpc_in)
+ kvfree(rpc_out);
+ return ERR_PTR(ret);
+ }
+ return rpc_out;
+}
+
+static const struct fwctl_ops mlx5ctl_ops = {
+ .device_type = FWCTL_DEVICE_TYPE_MLX5,
+ .uctx_size = sizeof(struct mlx5ctl_uctx),
+ .open_uctx = mlx5ctl_open_uctx,
+ .close_uctx = mlx5ctl_close_uctx,
+ .info = mlx5ctl_info,
+ .fw_rpc = mlx5ctl_fw_rpc,
+};
+
+static int mlx5ctl_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+
+{
+ struct mlx5_adev *madev = container_of(adev, struct mlx5_adev, adev);
+ struct mlx5_core_dev *mdev = madev->mdev;
+ struct mlx5ctl_dev *mcdev __free(mlx5ctl) = fwctl_alloc_device(
+ &mdev->pdev->dev, &mlx5ctl_ops, struct mlx5ctl_dev, fwctl);
+ int ret;
+
+ if (!mcdev)
+ return -ENOMEM;
+
+ mcdev->mdev = mdev;
+
+ ret = fwctl_register(&mcdev->fwctl);
+ if (ret)
+ return ret;
+ auxiliary_set_drvdata(adev, no_free_ptr(mcdev));
+ return 0;
+}
+
+static void mlx5ctl_remove(struct auxiliary_device *adev)
+{
+ struct mlx5ctl_dev *mcdev = auxiliary_get_drvdata(adev);
+
+ fwctl_unregister(&mcdev->fwctl);
+ fwctl_put(&mcdev->fwctl);
+}
+
+static const struct auxiliary_device_id mlx5ctl_id_table[] = {
+ {.name = MLX5_ADEV_NAME ".fwctl",},
+ {}
+};
+MODULE_DEVICE_TABLE(auxiliary, mlx5ctl_id_table);
+
+static struct auxiliary_driver mlx5ctl_driver = {
+ .name = "mlx5_fwctl",
+ .probe = mlx5ctl_probe,
+ .remove = mlx5ctl_remove,
+ .id_table = mlx5ctl_id_table,
+};
+
+module_auxiliary_driver(mlx5ctl_driver);
+
+MODULE_IMPORT_NS("FWCTL");
+MODULE_DESCRIPTION("mlx5 ConnectX fwctl driver");
+MODULE_AUTHOR("Saeed Mahameed <saeedm@nvidia.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/fwctl/pds/Makefile b/drivers/fwctl/pds/Makefile
new file mode 100644
index 000000000000..cc2317c07be1
--- /dev/null
+++ b/drivers/fwctl/pds/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_FWCTL_PDS) += pds_fwctl.o
+
+pds_fwctl-y += main.o
diff --git a/drivers/fwctl/pds/main.c b/drivers/fwctl/pds/main.c
new file mode 100644
index 000000000000..1809853f6353
--- /dev/null
+++ b/drivers/fwctl/pds/main.c
@@ -0,0 +1,535 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) Advanced Micro Devices, Inc */
+
+#include <linux/module.h>
+#include <linux/auxiliary_bus.h>
+#include <linux/pci.h>
+#include <linux/vmalloc.h>
+#include <linux/bitfield.h>
+#include <linux/string.h>
+
+#include <uapi/fwctl/fwctl.h>
+#include <uapi/fwctl/pds.h>
+#include <linux/fwctl.h>
+
+#include <linux/pds/pds_common.h>
+#include <linux/pds/pds_core_if.h>
+#include <linux/pds/pds_adminq.h>
+#include <linux/pds/pds_auxbus.h>
+
+struct pdsfc_uctx {
+ struct fwctl_uctx uctx;
+ u32 uctx_caps;
+};
+
+struct pdsfc_rpc_endpoint_info {
+ u32 endpoint;
+ dma_addr_t operations_pa;
+ struct pds_fwctl_query_data *operations;
+ struct mutex lock; /* lock for endpoint info management */
+};
+
+struct pdsfc_dev {
+ struct fwctl_device fwctl;
+ struct pds_auxiliary_dev *padev;
+ u32 caps;
+ struct pds_fwctl_ident ident;
+ dma_addr_t endpoints_pa;
+ struct pds_fwctl_query_data *endpoints;
+ struct pdsfc_rpc_endpoint_info *endpoint_info;
+};
+
+static int pdsfc_open_uctx(struct fwctl_uctx *uctx)
+{
+ struct pdsfc_dev *pdsfc = container_of(uctx->fwctl, struct pdsfc_dev, fwctl);
+ struct pdsfc_uctx *pdsfc_uctx = container_of(uctx, struct pdsfc_uctx, uctx);
+
+ pdsfc_uctx->uctx_caps = pdsfc->caps;
+
+ return 0;
+}
+
+static void pdsfc_close_uctx(struct fwctl_uctx *uctx)
+{
+}
+
+static void *pdsfc_info(struct fwctl_uctx *uctx, size_t *length)
+{
+ struct pdsfc_uctx *pdsfc_uctx = container_of(uctx, struct pdsfc_uctx, uctx);
+ struct fwctl_info_pds *info;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return ERR_PTR(-ENOMEM);
+
+ info->uctx_caps = pdsfc_uctx->uctx_caps;
+
+ return info;
+}
+
+static int pdsfc_identify(struct pdsfc_dev *pdsfc)
+{
+ struct device *dev = &pdsfc->fwctl.dev;
+ union pds_core_adminq_comp comp = {0};
+ union pds_core_adminq_cmd cmd;
+ struct pds_fwctl_ident *ident;
+ dma_addr_t ident_pa;
+ int err;
+
+ ident = dma_alloc_coherent(dev->parent, sizeof(*ident), &ident_pa, GFP_KERNEL);
+ if (!ident) {
+ dev_err(dev, "Failed to map ident buffer\n");
+ return -ENOMEM;
+ }
+
+ cmd = (union pds_core_adminq_cmd) {
+ .fwctl_ident = {
+ .opcode = PDS_FWCTL_CMD_IDENT,
+ .version = 0,
+ .len = cpu_to_le32(sizeof(*ident)),
+ .ident_pa = cpu_to_le64(ident_pa),
+ }
+ };
+
+ err = pds_client_adminq_cmd(pdsfc->padev, &cmd, sizeof(cmd), &comp, 0);
+ if (err)
+ dev_err(dev, "Failed to send adminq cmd opcode: %u err: %d\n",
+ cmd.fwctl_ident.opcode, err);
+ else
+ pdsfc->ident = *ident;
+
+ dma_free_coherent(dev->parent, sizeof(*ident), ident, ident_pa);
+
+ return err;
+}
+
+static void pdsfc_free_endpoints(struct pdsfc_dev *pdsfc)
+{
+ struct device *dev = &pdsfc->fwctl.dev;
+ u32 num_endpoints;
+ int i;
+
+ if (!pdsfc->endpoints)
+ return;
+
+ num_endpoints = le32_to_cpu(pdsfc->endpoints->num_entries);
+ for (i = 0; pdsfc->endpoint_info && i < num_endpoints; i++)
+ mutex_destroy(&pdsfc->endpoint_info[i].lock);
+ vfree(pdsfc->endpoint_info);
+ pdsfc->endpoint_info = NULL;
+ dma_free_coherent(dev->parent, PAGE_SIZE,
+ pdsfc->endpoints, pdsfc->endpoints_pa);
+ pdsfc->endpoints = NULL;
+ pdsfc->endpoints_pa = DMA_MAPPING_ERROR;
+}
+
+static void pdsfc_free_operations(struct pdsfc_dev *pdsfc)
+{
+ struct device *dev = &pdsfc->fwctl.dev;
+ u32 num_endpoints;
+ int i;
+
+ num_endpoints = le32_to_cpu(pdsfc->endpoints->num_entries);
+ for (i = 0; i < num_endpoints; i++) {
+ struct pdsfc_rpc_endpoint_info *ei = &pdsfc->endpoint_info[i];
+
+ if (ei->operations) {
+ dma_free_coherent(dev->parent, PAGE_SIZE,
+ ei->operations, ei->operations_pa);
+ ei->operations = NULL;
+ ei->operations_pa = DMA_MAPPING_ERROR;
+ }
+ }
+}
+
+static struct pds_fwctl_query_data *pdsfc_get_endpoints(struct pdsfc_dev *pdsfc,
+ dma_addr_t *pa)
+{
+ struct device *dev = &pdsfc->fwctl.dev;
+ union pds_core_adminq_comp comp = {0};
+ struct pds_fwctl_query_data *data;
+ union pds_core_adminq_cmd cmd;
+ dma_addr_t data_pa;
+ int err;
+
+ data = dma_alloc_coherent(dev->parent, PAGE_SIZE, &data_pa, GFP_KERNEL);
+ if (!data) {
+ dev_err(dev, "Failed to map endpoint list\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ cmd = (union pds_core_adminq_cmd) {
+ .fwctl_query = {
+ .opcode = PDS_FWCTL_CMD_QUERY,
+ .entity = PDS_FWCTL_RPC_ROOT,
+ .version = 0,
+ .query_data_buf_len = cpu_to_le32(PAGE_SIZE),
+ .query_data_buf_pa = cpu_to_le64(data_pa),
+ }
+ };
+
+ err = pds_client_adminq_cmd(pdsfc->padev, &cmd, sizeof(cmd), &comp, 0);
+ if (err) {
+ dev_err(dev, "Failed to send adminq cmd opcode: %u entity: %u err: %d\n",
+ cmd.fwctl_query.opcode, cmd.fwctl_query.entity, err);
+ dma_free_coherent(dev->parent, PAGE_SIZE, data, data_pa);
+ return ERR_PTR(err);
+ }
+
+ *pa = data_pa;
+
+ return data;
+}
+
+static int pdsfc_init_endpoints(struct pdsfc_dev *pdsfc)
+{
+ struct pds_fwctl_query_data_endpoint *ep_entry;
+ u32 num_endpoints;
+ int i;
+
+ pdsfc->endpoints = pdsfc_get_endpoints(pdsfc, &pdsfc->endpoints_pa);
+ if (IS_ERR(pdsfc->endpoints))
+ return PTR_ERR(pdsfc->endpoints);
+
+ num_endpoints = le32_to_cpu(pdsfc->endpoints->num_entries);
+ pdsfc->endpoint_info = vcalloc(num_endpoints,
+ sizeof(*pdsfc->endpoint_info));
+ if (!pdsfc->endpoint_info) {
+ pdsfc_free_endpoints(pdsfc);
+ return -ENOMEM;
+ }
+
+ ep_entry = (struct pds_fwctl_query_data_endpoint *)pdsfc->endpoints->entries;
+ for (i = 0; i < num_endpoints; i++) {
+ mutex_init(&pdsfc->endpoint_info[i].lock);
+ pdsfc->endpoint_info[i].endpoint = le32_to_cpu(ep_entry[i].id);
+ }
+
+ return 0;
+}
+
+static struct pds_fwctl_query_data *pdsfc_get_operations(struct pdsfc_dev *pdsfc,
+ dma_addr_t *pa, u32 ep)
+{
+ struct pds_fwctl_query_data_operation *entries;
+ struct device *dev = &pdsfc->fwctl.dev;
+ union pds_core_adminq_comp comp = {0};
+ struct pds_fwctl_query_data *data;
+ union pds_core_adminq_cmd cmd;
+ dma_addr_t data_pa;
+ u32 num_entries;
+ int err;
+ int i;
+
+ /* Query the operations list for the given endpoint */
+ data = dma_alloc_coherent(dev->parent, PAGE_SIZE, &data_pa, GFP_KERNEL);
+ if (!data) {
+ dev_err(dev, "Failed to map operations list\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ cmd = (union pds_core_adminq_cmd) {
+ .fwctl_query = {
+ .opcode = PDS_FWCTL_CMD_QUERY,
+ .entity = PDS_FWCTL_RPC_ENDPOINT,
+ .version = 0,
+ .query_data_buf_len = cpu_to_le32(PAGE_SIZE),
+ .query_data_buf_pa = cpu_to_le64(data_pa),
+ .ep = cpu_to_le32(ep),
+ }
+ };
+
+ err = pds_client_adminq_cmd(pdsfc->padev, &cmd, sizeof(cmd), &comp, 0);
+ if (err) {
+ dev_err(dev, "Failed to send adminq cmd opcode: %u entity: %u err: %d\n",
+ cmd.fwctl_query.opcode, cmd.fwctl_query.entity, err);
+ dma_free_coherent(dev->parent, PAGE_SIZE, data, data_pa);
+ return ERR_PTR(err);
+ }
+
+ *pa = data_pa;
+
+ entries = (struct pds_fwctl_query_data_operation *)data->entries;
+ num_entries = le32_to_cpu(data->num_entries);
+ dev_dbg(dev, "num_entries %d\n", num_entries);
+ for (i = 0; i < num_entries; i++) {
+
+ /* Translate FW command attribute to fwctl scope */
+ switch (entries[i].scope) {
+ case PDSFC_FW_CMD_ATTR_READ:
+ case PDSFC_FW_CMD_ATTR_WRITE:
+ case PDSFC_FW_CMD_ATTR_SYNC:
+ entries[i].scope = FWCTL_RPC_CONFIGURATION;
+ break;
+ case PDSFC_FW_CMD_ATTR_DEBUG_READ:
+ entries[i].scope = FWCTL_RPC_DEBUG_READ_ONLY;
+ break;
+ case PDSFC_FW_CMD_ATTR_DEBUG_WRITE:
+ entries[i].scope = FWCTL_RPC_DEBUG_WRITE;
+ break;
+ default:
+ entries[i].scope = FWCTL_RPC_DEBUG_WRITE_FULL;
+ break;
+ }
+ dev_dbg(dev, "endpoint %d operation: id %x scope %d\n",
+ ep, le32_to_cpu(entries[i].id), entries[i].scope);
+ }
+
+ return data;
+}
+
+static int pdsfc_validate_rpc(struct pdsfc_dev *pdsfc,
+ struct fwctl_rpc_pds *rpc,
+ enum fwctl_rpc_scope scope)
+{
+ struct pds_fwctl_query_data_operation *op_entry;
+ struct pdsfc_rpc_endpoint_info *ep_info = NULL;
+ struct device *dev = &pdsfc->fwctl.dev;
+ u32 num_entries;
+ int i;
+
+ /* validate rpc in_len & out_len based
+ * on ident.max_req_sz & max_resp_sz
+ */
+ if (rpc->in.len > le32_to_cpu(pdsfc->ident.max_req_sz)) {
+ dev_dbg(dev, "Invalid request size %u, max %u\n",
+ rpc->in.len, le32_to_cpu(pdsfc->ident.max_req_sz));
+ return -EINVAL;
+ }
+
+ if (rpc->out.len > le32_to_cpu(pdsfc->ident.max_resp_sz)) {
+ dev_dbg(dev, "Invalid response size %u, max %u\n",
+ rpc->out.len, le32_to_cpu(pdsfc->ident.max_resp_sz));
+ return -EINVAL;
+ }
+
+ num_entries = le32_to_cpu(pdsfc->endpoints->num_entries);
+ for (i = 0; i < num_entries; i++) {
+ if (pdsfc->endpoint_info[i].endpoint == rpc->in.ep) {
+ ep_info = &pdsfc->endpoint_info[i];
+ break;
+ }
+ }
+ if (!ep_info) {
+ dev_dbg(dev, "Invalid endpoint %d\n", rpc->in.ep);
+ return -EINVAL;
+ }
+
+ /* query and cache this endpoint's operations */
+ mutex_lock(&ep_info->lock);
+ if (!ep_info->operations) {
+ struct pds_fwctl_query_data *operations;
+
+ operations = pdsfc_get_operations(pdsfc,
+ &ep_info->operations_pa,
+ rpc->in.ep);
+ if (IS_ERR(operations)) {
+ mutex_unlock(&ep_info->lock);
+ return -ENOMEM;
+ }
+ ep_info->operations = operations;
+ }
+ mutex_unlock(&ep_info->lock);
+
+ /* reject unsupported and/or out of scope commands */
+ op_entry = (struct pds_fwctl_query_data_operation *)ep_info->operations->entries;
+ num_entries = le32_to_cpu(ep_info->operations->num_entries);
+ for (i = 0; i < num_entries; i++) {
+ if (PDS_FWCTL_RPC_OPCODE_CMP(rpc->in.op, le32_to_cpu(op_entry[i].id))) {
+ if (scope < op_entry[i].scope)
+ return -EPERM;
+ return 0;
+ }
+ }
+
+ dev_dbg(dev, "Invalid operation %d for endpoint %d\n", rpc->in.op, rpc->in.ep);
+
+ return -EINVAL;
+}
+
+static void *pdsfc_fw_rpc(struct fwctl_uctx *uctx, enum fwctl_rpc_scope scope,
+ void *in, size_t in_len, size_t *out_len)
+{
+ struct pdsfc_dev *pdsfc = container_of(uctx->fwctl, struct pdsfc_dev, fwctl);
+ struct device *dev = &uctx->fwctl->dev;
+ union pds_core_adminq_comp comp = {0};
+ dma_addr_t out_payload_dma_addr = 0;
+ dma_addr_t in_payload_dma_addr = 0;
+ struct fwctl_rpc_pds *rpc = in;
+ union pds_core_adminq_cmd cmd;
+ void *out_payload = NULL;
+ void *in_payload = NULL;
+ void *out = NULL;
+ int err;
+
+ err = pdsfc_validate_rpc(pdsfc, rpc, scope);
+ if (err)
+ return ERR_PTR(err);
+
+ if (rpc->in.len > 0) {
+ in_payload = memdup_user(u64_to_user_ptr(rpc->in.payload), rpc->in.len);
+ if (IS_ERR(in_payload)) {
+ dev_dbg(dev, "Failed to copy in_payload from user\n");
+ return in_payload;
+ }
+
+ in_payload_dma_addr = dma_map_single(dev->parent, in_payload,
+ rpc->in.len, DMA_TO_DEVICE);
+ err = dma_mapping_error(dev->parent, in_payload_dma_addr);
+ if (err) {
+ dev_dbg(dev, "Failed to map in_payload\n");
+ goto err_in_payload;
+ }
+ }
+
+ if (rpc->out.len > 0) {
+ out_payload = kzalloc(rpc->out.len, GFP_KERNEL);
+ if (!out_payload) {
+ dev_dbg(dev, "Failed to allocate out_payload\n");
+ err = -ENOMEM;
+ goto err_out_payload;
+ }
+
+ out_payload_dma_addr = dma_map_single(dev->parent, out_payload,
+ rpc->out.len, DMA_FROM_DEVICE);
+ err = dma_mapping_error(dev->parent, out_payload_dma_addr);
+ if (err) {
+ dev_dbg(dev, "Failed to map out_payload\n");
+ goto err_out_payload;
+ }
+ }
+
+ cmd = (union pds_core_adminq_cmd) {
+ .fwctl_rpc = {
+ .opcode = PDS_FWCTL_CMD_RPC,
+ .flags = cpu_to_le16(PDS_FWCTL_RPC_IND_REQ | PDS_FWCTL_RPC_IND_RESP),
+ .ep = cpu_to_le32(rpc->in.ep),
+ .op = cpu_to_le32(rpc->in.op),
+ .req_pa = cpu_to_le64(in_payload_dma_addr),
+ .req_sz = cpu_to_le32(rpc->in.len),
+ .resp_pa = cpu_to_le64(out_payload_dma_addr),
+ .resp_sz = cpu_to_le32(rpc->out.len),
+ }
+ };
+
+ err = pds_client_adminq_cmd(pdsfc->padev, &cmd, sizeof(cmd), &comp, 0);
+ if (err) {
+ dev_dbg(dev, "%s: ep %d op %x req_pa %llx req_sz %d req_sg %d resp_pa %llx resp_sz %d resp_sg %d err %d\n",
+ __func__, rpc->in.ep, rpc->in.op,
+ cmd.fwctl_rpc.req_pa, cmd.fwctl_rpc.req_sz, cmd.fwctl_rpc.req_sg_elems,
+ cmd.fwctl_rpc.resp_pa, cmd.fwctl_rpc.resp_sz, cmd.fwctl_rpc.resp_sg_elems,
+ err);
+ goto done;
+ }
+
+ dynamic_hex_dump("out ", DUMP_PREFIX_OFFSET, 16, 1, out_payload, rpc->out.len, true);
+
+ if (copy_to_user(u64_to_user_ptr(rpc->out.payload), out_payload, rpc->out.len)) {
+ dev_dbg(dev, "Failed to copy out_payload to user\n");
+ out = ERR_PTR(-EFAULT);
+ goto done;
+ }
+
+ rpc->out.retval = le32_to_cpu(comp.fwctl_rpc.err);
+ *out_len = in_len;
+ out = in;
+
+done:
+ if (out_payload_dma_addr)
+ dma_unmap_single(dev->parent, out_payload_dma_addr,
+ rpc->out.len, DMA_FROM_DEVICE);
+err_out_payload:
+ kfree(out_payload);
+
+ if (in_payload_dma_addr)
+ dma_unmap_single(dev->parent, in_payload_dma_addr,
+ rpc->in.len, DMA_TO_DEVICE);
+err_in_payload:
+ kfree(in_payload);
+ if (err)
+ return ERR_PTR(err);
+
+ return out;
+}
+
+static const struct fwctl_ops pdsfc_ops = {
+ .device_type = FWCTL_DEVICE_TYPE_PDS,
+ .uctx_size = sizeof(struct pdsfc_uctx),
+ .open_uctx = pdsfc_open_uctx,
+ .close_uctx = pdsfc_close_uctx,
+ .info = pdsfc_info,
+ .fw_rpc = pdsfc_fw_rpc,
+};
+
+static int pdsfc_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct pds_auxiliary_dev *padev =
+ container_of(adev, struct pds_auxiliary_dev, aux_dev);
+ struct device *dev = &adev->dev;
+ struct pdsfc_dev *pdsfc;
+ int err;
+
+ pdsfc = fwctl_alloc_device(&padev->vf_pdev->dev, &pdsfc_ops,
+ struct pdsfc_dev, fwctl);
+ if (!pdsfc)
+ return -ENOMEM;
+ pdsfc->padev = padev;
+
+ err = pdsfc_identify(pdsfc);
+ if (err) {
+ fwctl_put(&pdsfc->fwctl);
+ return dev_err_probe(dev, err, "Failed to identify device\n");
+ }
+
+ err = pdsfc_init_endpoints(pdsfc);
+ if (err) {
+ fwctl_put(&pdsfc->fwctl);
+ return dev_err_probe(dev, err, "Failed to init endpoints\n");
+ }
+
+ pdsfc->caps = PDS_FWCTL_QUERY_CAP | PDS_FWCTL_SEND_CAP;
+
+ err = fwctl_register(&pdsfc->fwctl);
+ if (err) {
+ pdsfc_free_endpoints(pdsfc);
+ fwctl_put(&pdsfc->fwctl);
+ return dev_err_probe(dev, err, "Failed to register device\n");
+ }
+
+ auxiliary_set_drvdata(adev, pdsfc);
+
+ return 0;
+}
+
+static void pdsfc_remove(struct auxiliary_device *adev)
+{
+ struct pdsfc_dev *pdsfc = auxiliary_get_drvdata(adev);
+
+ fwctl_unregister(&pdsfc->fwctl);
+ pdsfc_free_operations(pdsfc);
+ pdsfc_free_endpoints(pdsfc);
+
+ fwctl_put(&pdsfc->fwctl);
+}
+
+static const struct auxiliary_device_id pdsfc_id_table[] = {
+ {.name = PDS_CORE_DRV_NAME "." PDS_DEV_TYPE_FWCTL_STR },
+ {}
+};
+MODULE_DEVICE_TABLE(auxiliary, pdsfc_id_table);
+
+static struct auxiliary_driver pdsfc_driver = {
+ .name = "pds_fwctl",
+ .probe = pdsfc_probe,
+ .remove = pdsfc_remove,
+ .id_table = pdsfc_id_table,
+};
+
+module_auxiliary_driver(pdsfc_driver);
+
+MODULE_IMPORT_NS("FWCTL");
+MODULE_DESCRIPTION("pds fwctl driver");
+MODULE_AUTHOR("Shannon Nelson <shannon.nelson@amd.com>");
+MODULE_AUTHOR("Brett Creeley <brett.creeley@amd.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index d93cd4f722b4..7ee3afbc2b05 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -3,6 +3,9 @@
# GPIO infrastructure and drivers
#
+config GPIOLIB_LEGACY
+ def_bool y
+
menuconfig GPIOLIB
bool "GPIO Support"
help
@@ -69,9 +72,16 @@ config GPIO_SYSFS
use the character device /dev/gpiochipN with the appropriate
ioctl() operations instead.
+config GPIO_SYSFS_LEGACY
+ bool "Enable legacy functionalities of the sysfs interface"
+ depends on GPIO_SYSFS
+ default y if GPIO_SYSFS
+ help
+ Say Y here if you want to enable the legacy, global GPIO
+ numberspace-based functionalities of the sysfs interface.
+
config GPIO_CDEV
- bool
- prompt "Character device (/dev/gpiochipN) support" if EXPERT
+ bool "Character device (/dev/gpiochipN) support" if EXPERT
default y
help
Say Y here to add the character device /dev/gpiochipN interface
@@ -149,9 +159,7 @@ config GPIO_74XX_MMIO
config GPIO_ALTERA
tristate "Altera GPIO"
- depends on OF_GPIO
select GPIOLIB_IRQCHIP
- select OF_GPIO_MM_GPIOCHIP
help
Say Y or M here to build support for the Altera PIO device.
@@ -204,6 +212,7 @@ config GPIO_RASPBERRYPI_EXP
config GPIO_BCM_KONA
bool "Broadcom Kona GPIO"
depends on ARCH_BCM_MOBILE || COMPILE_TEST
+ select GPIOLIB_IRQCHIP
help
Turn on GPIO support for Broadcom "Kona" chips.
@@ -216,6 +225,18 @@ config GPIO_BCM_XGS_IPROC
help
Say yes here to enable GPIO support for Broadcom XGS iProc SoCs.
+config GPIO_BLZP1600
+ tristate "Blaize BLZP1600 GPIO support"
+ default y if ARCH_BLAIZE
+ depends on ARCH_BLAIZE || COMPILE_TEST
+ depends on OF_GPIO
+ select GPIO_GENERIC
+ select GPIOLIB_IRQCHIP
+ help
+ Say Y or M here to add support for the Blaize BLZP1600 GPIO device.
+ The controller is based on the Verisilicon Microelectronics GPIO APB v0.2
+ IP block.
+
config GPIO_BRCMSTB
tristate "BRCMSTB GPIO support"
default y if (ARCH_BRCMSTB || BMIPS_GENERIC)
@@ -243,7 +264,8 @@ config GPIO_CLPS711X
config GPIO_DAVINCI
tristate "TI Davinci/Keystone GPIO support"
default y if ARCH_DAVINCI
- depends on (ARM || ARM64) && (ARCH_DAVINCI || ARCH_KEYSTONE || ARCH_K3)
+ depends on ((ARM || ARM64) && (ARCH_DAVINCI || ARCH_KEYSTONE || ARCH_K3)) || COMPILE_TEST
+ select GPIOLIB_IRQCHIP
help
Say yes here to enable GPIO support for TI Davinci/Keystone SoCs.
@@ -281,7 +303,7 @@ config GPIO_EN7523
config GPIO_EP93XX
def_bool y
- depends on ARCH_EP93XX
+ depends on ARCH_EP93XX || COMPILE_TEST
select GPIO_GENERIC
select GPIOLIB_IRQCHIP
@@ -341,9 +363,9 @@ config GPIO_GRANITERAPIDS
config GPIO_GRGPIO
tristate "Aeroflex Gaisler GRGPIO support"
- depends on OF_GPIO
+ depends on OF || COMPILE_TEST
select GPIO_GENERIC
- select IRQ_DOMAIN
+ select GPIOLIB_IRQCHIP
help
Select this to support Aeroflex Gaisler GRGPIO cores from the GRLIB
VHDL IP core library.
@@ -371,8 +393,7 @@ config GPIO_HLWD
config GPIO_ICH
tristate "Intel ICH GPIO"
- depends on X86
- depends on LPC_ICH
+ depends on (X86 && LPC_ICH) || (COMPILE_TEST && HAS_IOPORT)
help
Say yes here to support the GPIO functionality of a number of Intel
ICH-based chipsets. Currently supported devices: ICH6, ICH7, ICH8
@@ -387,8 +408,7 @@ config GPIO_IMX_SCU
config GPIO_IXP4XX
bool "Intel IXP4xx GPIO"
- depends on ARCH_IXP4XX
- depends on OF
+ depends on (ARCH_IXP4XX && OF) || COMPILE_TEST
select GPIO_GENERIC
select GPIOLIB_IRQCHIP
select IRQ_DOMAIN_HIERARCHY
@@ -416,6 +436,7 @@ config GPIO_LOONGSON_64BIT
depends on LOONGARCH || COMPILE_TEST
depends on OF_GPIO
select GPIO_GENERIC
+ select GPIOLIB_IRQCHIP
help
Say yes here to support the GPIO functionality of a number of
Loongson series of chips. The Loongson GPIO controller supports
@@ -428,6 +449,7 @@ config GPIO_LPC18XX
default y if ARCH_LPC18XX
depends on OF_GPIO && (ARCH_LPC18XX || COMPILE_TEST)
select IRQ_DOMAIN_HIERARCHY
+ select GPIOLIB_IRQCHIP
help
Select this option to enable GPIO driver for
NXP LPC18XX/43XX devices.
@@ -463,7 +485,6 @@ config GPIO_MM_LANTIQ
config GPIO_MPC5200
def_bool y
depends on PPC_MPC52xx
- select OF_GPIO_MM_GPIOCHIP
config GPIO_MPC8XXX
bool "MPC512x/MPC8xxx/QorIQ GPIO support"
@@ -471,7 +492,7 @@ config GPIO_MPC8XXX
FSL_SOC_BOOKE || PPC_86xx || ARCH_LAYERSCAPE || ARM || \
COMPILE_TEST
select GPIO_GENERIC
- select IRQ_DOMAIN
+ select GPIOLIB_IRQCHIP
help
Say Y here if you're going to use hardware that connects to the
MPC512x/831x/834x/837x/8572/8610/QorIQ GPIOs.
@@ -486,9 +507,9 @@ config GPIO_MT7621
Say yes here to support the Mediatek MT7621 SoC GPIO device.
config GPIO_MVEBU
- def_bool y
- depends on PLAT_ORION || ARCH_MVEBU
- depends on OF_GPIO
+ bool "Marvell Orion and EBU GPIO support" if COMPILE_TEST
+ depends on PLAT_ORION || ARCH_MVEBU || COMPILE_TEST
+ default PLAT_ORION || ARCH_MVEBU
select GENERIC_IRQ_CHIP
select REGMAP_MMIO
@@ -533,9 +554,9 @@ config GPIO_OCTEON
family of SOCs.
config GPIO_OMAP
- tristate "TI OMAP GPIO support" if ARCH_OMAP2PLUS || COMPILE_TEST
+ tristate "TI OMAP GPIO support"
+ depends on ARCH_OMAP || COMPILE_TEST
default y if ARCH_OMAP
- depends on ARM
select GENERIC_IRQ_CHIP
select GPIOLIB_IRQCHIP
help
@@ -543,15 +564,22 @@ config GPIO_OMAP
config GPIO_PL061
tristate "PrimeCell PL061 GPIO support"
- depends on ARM_AMBA
+ depends on ARM_AMBA || COMPILE_TEST
select IRQ_DOMAIN
select GPIOLIB_IRQCHIP
help
Say yes here to support the PrimeCell PL061 GPIO device.
+config GPIO_POLARFIRE_SOC
+ bool "Microchip FPGA GPIO support"
+ select REGMAP_MMIO
+ help
+ Say yes here to support the GPIO controllers on Microchip FPGAs.
+
config GPIO_PXA
bool "PXA GPIO support"
depends on ARCH_PXA || ARCH_MMP || COMPILE_TEST
+ select GPIOLIB_IRQCHIP
help
Say yes here to support the PXA GPIO device.
@@ -601,7 +629,7 @@ config GPIO_ROCKCHIP
config GPIO_RTD
tristate "Realtek DHC GPIO support"
- depends on ARCH_REALTEK
+ depends on ARCH_REALTEK || COMPILE_TEST
default y
select GPIOLIB_IRQCHIP
help
@@ -653,6 +681,15 @@ config GPIO_SNPS_CREG
where only several fields in register belong to GPIO lines and
each GPIO line owns a field with different length and on/off value.
+config GPIO_SPACEMIT_K1
+ tristate "SPACEMIT K1 GPIO support"
+ depends on ARCH_SPACEMIT || COMPILE_TEST
+ depends on OF_GPIO
+ select GPIO_GENERIC
+ select GPIOLIB_IRQCHIP
+ help
+ Say yes here to support the SpacemiT's K1 GPIO device.
+
config GPIO_SPEAR_SPICS
bool "ST SPEAr13xx SPI Chip Select as GPIO support"
depends on PLAT_SPEAR
@@ -697,7 +734,8 @@ config GPIO_TANGIER
If built as a module its name will be gpio-tangier.
config GPIO_TB10X
- bool
+ bool "Abilis Systems TB10x GPIO controller"
+ depends on ARC_PLAT_TB10X || COMPILE_TEST
select GPIO_GENERIC
select GENERIC_IRQ_CHIP
select OF_GPIO
@@ -714,13 +752,13 @@ config GPIO_TEGRA
config GPIO_TEGRA186
tristate "NVIDIA Tegra186 GPIO support"
- default ARCH_TEGRA_186_SOC || ARCH_TEGRA_194_SOC
- depends on ARCH_TEGRA_186_SOC || ARCH_TEGRA_194_SOC || COMPILE_TEST
+ default ARCH_TEGRA_186_SOC || ARCH_TEGRA_194_SOC || ARCH_TEGRA_234_SOC
+ depends on ARCH_TEGRA_186_SOC || ARCH_TEGRA_194_SOC || ARCH_TEGRA_234_SOC || COMPILE_TEST
depends on OF_GPIO
select GPIOLIB_IRQCHIP
select IRQ_DOMAIN_HIERARCHY
help
- Say yes here to support GPIO pins on NVIDIA Tegra186 SoCs.
+ Say yes here to support GPIO pins on NVIDIA Tegra186, 194 and 234 SoCs.
config GPIO_TS4800
tristate "TS-4800 DIO blocks and compatibles"
@@ -750,10 +788,11 @@ config GPIO_UNIPHIER
Say yes here to support UniPhier GPIOs.
config GPIO_VF610
- bool "VF610 GPIO support"
+ tristate "VF610 GPIO support"
default y if SOC_VF610
depends on ARCH_MXC || COMPILE_TEST
select GPIOLIB_IRQCHIP
+ select GPIO_GENERIC
help
Say yes here to support i.MX or Vybrid vf610 GPIOs.
@@ -796,7 +835,6 @@ config GPIO_XGENE_SB
config GPIO_XILINX
tristate "Xilinx GPIO support"
select GPIOLIB_IRQCHIP
- depends on OF_GPIO
help
Say yes here to support the Xilinx FPGA GPIO device.
@@ -827,14 +865,14 @@ config GPIO_ZEVIO
config GPIO_ZYNQ
tristate "Xilinx Zynq GPIO support"
- depends on ARCH_ZYNQ || ARCH_ZYNQMP
+ depends on ARCH_ZYNQ || ARCH_ZYNQMP || COMPILE_TEST
select GPIOLIB_IRQCHIP
help
Say yes here to support Xilinx Zynq GPIO controller.
config GPIO_ZYNQMP_MODEPIN
tristate "ZynqMP ps-mode pin GPIO configuration driver"
- depends on ZYNQMP_FIRMWARE
+ depends on ZYNQMP_FIRMWARE || COMPILE_TEST
default ZYNQMP_FIRMWARE
help
Say yes here to support the ZynqMP ps-mode pin GPIO configuration
@@ -846,7 +884,7 @@ config GPIO_ZYNQMP_MODEPIN
config GPIO_LOONGSON1
tristate "Loongson1 GPIO support"
- depends on MACH_LOONGSON32
+ depends on MACH_LOONGSON32 || COMPILE_TEST
select GPIO_GENERIC
help
Say Y or M here to support GPIO on Loongson1 SoCs.
@@ -863,7 +901,7 @@ config GPIO_AMD_FCH
config GPIO_MSC313
bool "MStar MSC313 GPIO support"
- depends on ARCH_MSTARV7
+ depends on ARCH_MSTARV7 || COMPILE_TEST
default ARCH_MSTARV7
select GPIOLIB_IRQCHIP
select IRQ_DOMAIN_HIERARCHY
@@ -1156,14 +1194,18 @@ config GPIO_PCA953X
4 bits: pca9536, pca9537
8 bits: max7310, max7315, pca6107, pca9534, pca9538, pca9554,
- pca9556, pca9557, pca9574, tca6408, tca9554, xra1202
+ pca9556, pca9557, pca9574, tca6408, tca9554, xra1202,
+ pcal6408, pcal9554b, tca9538
16 bits: max7312, max7313, pca9535, pca9539, pca9555, pca9575,
- tca6416
+ tca6416, pca6416, pcal6416, pcal9535, pcal9555a, max7318,
+ tca9539
- 24 bits: tca6424
+ 18 bits: tca6418
- 40 bits: pca9505, pca9698
+ 24 bits: tca6424, pcal6524
+
+ 40 bits: pca9505, pca9698, pca9506
config GPIO_PCA953X_IRQ
bool "Interrupt controller support for PCA953x"
@@ -1236,6 +1278,7 @@ config GPIO_ADP5520
config GPIO_ADP5585
tristate "GPIO Support for ADP5585"
depends on MFD_ADP5585
+ select GPIOLIB_IRQCHIP
help
This option enables support for the GPIO function found in the Analog
Devices ADP5585.
@@ -1287,6 +1330,16 @@ config GPIO_BD9571MWV
This driver can also be built as a module. If so, the module
will be called gpio-bd9571mwv.
+config GPIO_CGBC
+ tristate "Congatec Board Controller GPIO support"
+ depends on MFD_CGBC
+ help
+ Select this option to enable GPIO support for the Congatec Board
+ Controller.
+
+ This driver can also be built as a module. If so, the module will be
+ called gpio-cgbc.
+
config GPIO_CROS_EC
tristate "ChromeOS EC GPIO support"
depends on CROS_EC
@@ -1352,7 +1405,7 @@ config GPIO_DLN2
config HTC_EGPIO
bool "HTC EGPIO support"
- depends on ARM
+ depends on ARM || COMPILE_TEST
help
This driver supports the CPLD egpio chip present on
several HTC phones. It provides basic support for input
@@ -1427,12 +1480,34 @@ config GPIO_LP87565
This driver can also be built as a module. If so, the module will be
called gpio-lp87565.
+config GPIO_MACSMC
+ tristate "Apple Mac SMC GPIO"
+ depends on MFD_MACSMC
+ help
+ Support for GPIOs controlled by the SMC microcontroller on Apple Mac
+ systems.
+
+ This driver can also be built as a module. If so, the module will be
+ called gpio-macsmc.
+
config GPIO_MADERA
tristate "Cirrus Logic Madera class codecs"
depends on PINCTRL_MADERA
help
Support for GPIOs on Cirrus Logic Madera class codecs.
+config GPIO_MAX7360
+ tristate "MAX7360 GPIO support"
+ depends on MFD_MAX7360
+ select GPIO_REGMAP
+ select REGMAP_IRQ
+ help
+ Allows to use MAX7360 I/O Expander PWM lines as GPIO and keypad COL
+ lines as GPO.
+
+ This driver can also be built as a module. If so, the module will be
+ called gpio-max7360.
+
config GPIO_MAX77620
tristate "GPIO support for PMIC MAX77620 and MAX20024"
depends on MFD_MAX77620
@@ -1450,8 +1525,33 @@ config GPIO_MAX77650
GPIO driver for MAX77650/77651 PMIC from Maxim Semiconductor.
These chips have a single pin that can be configured as GPIO.
+config GPIO_MAX77759
+ tristate "Maxim Integrated MAX77759 GPIO support"
+ depends on MFD_MAX77759
+ default MFD_MAX77759
+ select GPIOLIB_IRQCHIP
+ help
+ GPIO driver for MAX77759 PMIC from Maxim Integrated.
+ There are two GPIOs available on these chips in total, both of
+ which can also generate interrupts.
+
+ This driver can also be built as a module. If so, the module will be
+ called gpio-max77759.
+
+config GPIO_NCT6694
+ tristate "Nuvoton NCT6694 GPIO controller support"
+ depends on MFD_NCT6694
+ select GENERIC_IRQ_CHIP
+ select GPIOLIB_IRQCHIP
+ help
+ This driver supports 8 GPIO pins per bank that can all be interrupt
+ sources.
+
+ This driver can also be built as a module. If so, the module will be
+ called gpio-nct6694.
+
config GPIO_PALMAS
- bool "TI PALMAS series PMICs GPIO"
+ tristate "TI PALMAS series PMICs GPIO"
depends on MFD_PALMAS
help
Select this option to enable GPIO driver for the TI PALMAS
@@ -1487,7 +1587,7 @@ config GPIO_SL28CPLD
called gpio-sl28cpld.
config GPIO_STMPE
- bool "STMPE GPIOs"
+ tristate "STMPE GPIOs"
depends on MFD_STMPE
depends on OF_GPIO
select GPIOLIB_IRQCHIP
@@ -1507,12 +1607,13 @@ config GPIO_TC3589X
config GPIO_TIMBERDALE
bool "Support for timberdale GPIO IP"
depends on MFD_TIMBERDALE
+ select GPIOLIB_IRQCHIP
help
Add support for the GPIO IP in the timberdale FPGA.
config GPIO_TN48M_CPLD
tristate "Delta Networks TN48M switch CPLD GPIO driver"
- depends on MFD_TN48M_CPLD
+ depends on MFD_TN48M_CPLD || COMPILE_TEST
select GPIO_REGMAP
help
This enables support for the GPIOs found on the Delta
@@ -1658,7 +1759,7 @@ config GPIO_AMD8111
config GPIO_BT8XX
tristate "BT8XX GPIO abuser"
- depends on VIDEO_BT848=n
+ depends on VIDEO_BT848=n || COMPILE_TEST
help
The BT8xx frame grabber chip has 24 GPIO pins that can be abused
as a cheap PCI GPIO card.
@@ -1779,7 +1880,6 @@ menu "SPI GPIO expanders"
config GPIO_74X164
tristate "74x164 serial-in/parallel-out 8-bits shift register"
- depends on OF_GPIO
help
Driver for 74x164 compatible serial-in/parallel-out 8-outputs
shift registers. This driver can be used to provide access
@@ -1844,12 +1944,32 @@ config GPIO_VIPERBOARD
River Tech's viperboard.h for detailed meaning
of the module parameters.
+config GPIO_MPSSE
+ tristate "FTDI MPSSE GPIO support"
+ select GPIOLIB_IRQCHIP
+ help
+ GPIO driver for FTDI's MPSSE interface. These can do input and
+ output. Each MPSSE provides 16 IO pins.
+
+config GPIO_USBIO
+ tristate "Intel USBIO GPIO support"
+ depends on USB_USBIO
+ default USB_USBIO
+ help
+ Select this option to enable GPIO driver for the INTEL
+ USBIO driver stack.
+
+ This driver can also be built as a module. If so, the module
+ will be called gpio_usbio.
+
endmenu
menu "Virtual GPIO drivers"
config GPIO_AGGREGATOR
tristate "GPIO Aggregator"
+ select CONFIGFS_FS
+ select DEV_SYNC_PROBE
help
Say yes here to enable the GPIO Aggregator, which provides a way to
aggregate existing GPIO lines into a new virtual GPIO chip.
@@ -1892,6 +2012,7 @@ config GPIO_SIM
tristate "GPIO Simulator Module"
select IRQ_SIM
select CONFIGFS_FS
+ select DEV_SYNC_PROBE
help
This enables the GPIO simulator - a configfs-based GPIO testing
driver.
@@ -1920,6 +2041,7 @@ config GPIO_VIRTUSER
select DEBUG_FS
select CONFIGFS_FS
select IRQ_WORK
+ select DEV_SYNC_PROBE
help
Say Y here to enable the configurable, configfs-based virtual GPIO
consumer testing driver.
@@ -1930,3 +2052,6 @@ config GPIO_VIRTUSER
endmenu
endif
+
+config DEV_SYNC_PROBE
+ tristate
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 1429e8c0229b..ec296fa14bfd 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -5,11 +5,12 @@ ccflags-$(CONFIG_DEBUG_GPIO) += -DDEBUG
obj-$(CONFIG_GPIOLIB) += gpiolib.o
obj-$(CONFIG_GPIOLIB) += gpiolib-devres.o
-obj-$(CONFIG_GPIOLIB) += gpiolib-legacy.o
+obj-$(CONFIG_GPIOLIB_LEGACY) += gpiolib-legacy.o
obj-$(CONFIG_OF_GPIO) += gpiolib-of.o
obj-$(CONFIG_GPIO_CDEV) += gpiolib-cdev.o
obj-$(CONFIG_GPIO_SYSFS) += gpiolib-sysfs.o
obj-$(CONFIG_GPIO_ACPI) += gpiolib-acpi.o
+gpiolib-acpi-y := gpiolib-acpi-core.o gpiolib-acpi-quirks.o
obj-$(CONFIG_GPIOLIB) += gpiolib-swnode.o
# Device drivers. Generally keep list sorted alphabetically
@@ -19,6 +20,9 @@ obj-$(CONFIG_GPIO_GENERIC) += gpio-generic.o
# directly supported by gpio-generic
gpio-generic-$(CONFIG_GPIO_GENERIC) += gpio-mmio.o
+# Utilities for drivers that need synchronous fake device creation
+obj-$(CONFIG_DEV_SYNC_PROBE) += dev-sync-probe.o
+
obj-$(CONFIG_GPIO_104_DIO_48E) += gpio-104-dio-48e.o
obj-$(CONFIG_GPIO_104_IDI_48) += gpio-104-idi-48.o
obj-$(CONFIG_GPIO_104_IDIO_16) += gpio-104-idio-16.o
@@ -42,9 +46,11 @@ obj-$(CONFIG_GPIO_BCM_XGS_IPROC) += gpio-xgs-iproc.o
obj-$(CONFIG_GPIO_BD71815) += gpio-bd71815.o
obj-$(CONFIG_GPIO_BD71828) += gpio-bd71828.o
obj-$(CONFIG_GPIO_BD9571MWV) += gpio-bd9571mwv.o
+obj-$(CONFIG_GPIO_BLZP1600) += gpio-blzp1600.o
obj-$(CONFIG_GPIO_BRCMSTB) += gpio-brcmstb.o
obj-$(CONFIG_GPIO_BT8XX) += gpio-bt8xx.o
obj-$(CONFIG_GPIO_CADENCE) += gpio-cadence.o
+obj-$(CONFIG_GPIO_CGBC) += gpio-cgbc.o
obj-$(CONFIG_GPIO_CLPS711X) += gpio-clps711x.o
obj-$(CONFIG_GPIO_SNPS_CREG) += gpio-creg-snps.o
obj-$(CONFIG_GPIO_CROS_EC) += gpio-cros-ec.o
@@ -93,14 +99,17 @@ obj-$(CONFIG_GPIO_LP873X) += gpio-lp873x.o
obj-$(CONFIG_GPIO_LP87565) += gpio-lp87565.o
obj-$(CONFIG_GPIO_LPC18XX) += gpio-lpc18xx.o
obj-$(CONFIG_GPIO_LPC32XX) += gpio-lpc32xx.o
+obj-$(CONFIG_GPIO_MACSMC) += gpio-macsmc.o
obj-$(CONFIG_GPIO_MADERA) += gpio-madera.o
obj-$(CONFIG_GPIO_MAX3191X) += gpio-max3191x.o
obj-$(CONFIG_GPIO_MAX7300) += gpio-max7300.o
obj-$(CONFIG_GPIO_MAX7301) += gpio-max7301.o
obj-$(CONFIG_GPIO_MAX730X) += gpio-max730x.o
obj-$(CONFIG_GPIO_MAX732X) += gpio-max732x.o
+obj-$(CONFIG_GPIO_MAX7360) += gpio-max7360.o
obj-$(CONFIG_GPIO_MAX77620) += gpio-max77620.o
obj-$(CONFIG_GPIO_MAX77650) += gpio-max77650.o
+obj-$(CONFIG_GPIO_MAX77759) += gpio-max77759.o
obj-$(CONFIG_GPIO_MB86S7X) += gpio-mb86s7x.o
obj-$(CONFIG_GPIO_MC33880) += gpio-mc33880.o
obj-$(CONFIG_GPIO_MENZ127) += gpio-menz127.o
@@ -114,11 +123,13 @@ obj-$(CONFIG_GPIO_MOCKUP) += gpio-mockup.o
obj-$(CONFIG_GPIO_MOXTET) += gpio-moxtet.o
obj-$(CONFIG_GPIO_MPC5200) += gpio-mpc5200.o
obj-$(CONFIG_GPIO_MPC8XXX) += gpio-mpc8xxx.o
+obj-$(CONFIG_GPIO_MPSSE) += gpio-mpsse.o
obj-$(CONFIG_GPIO_MSC313) += gpio-msc313.o
obj-$(CONFIG_GPIO_MT7621) += gpio-mt7621.o
obj-$(CONFIG_GPIO_MVEBU) += gpio-mvebu.o
obj-$(CONFIG_GPIO_MXC) += gpio-mxc.o
obj-$(CONFIG_GPIO_MXS) += gpio-mxs.o
+obj-$(CONFIG_GPIO_NCT6694) += gpio-nct6694.o
obj-$(CONFIG_GPIO_NOMADIK) += gpio-nomadik.o
obj-$(CONFIG_GPIO_NPCM_SGPIO) += gpio-npcm-sgpio.o
obj-$(CONFIG_GPIO_OCTEON) += gpio-octeon.o
@@ -133,6 +144,7 @@ obj-$(CONFIG_GPIO_PCI_IDIO_16) += gpio-pci-idio-16.o
obj-$(CONFIG_GPIO_PISOSR) += gpio-pisosr.o
obj-$(CONFIG_GPIO_PL061) += gpio-pl061.o
obj-$(CONFIG_GPIO_PMIC_EIC_SPRD) += gpio-pmic-eic-sprd.o
+obj-$(CONFIG_GPIO_POLARFIRE_SOC) += gpio-mpfs.o
obj-$(CONFIG_GPIO_PXA) += gpio-pxa.o
obj-$(CONFIG_GPIO_RASPBERRYPI_EXP) += gpio-raspberrypi-exp.o
obj-$(CONFIG_GPIO_RC5T583) += gpio-rc5t583.o
@@ -153,6 +165,7 @@ obj-$(CONFIG_GPIO_SIOX) += gpio-siox.o
obj-$(CONFIG_GPIO_SL28CPLD) += gpio-sl28cpld.o
obj-$(CONFIG_GPIO_SLOPPY_LOGIC_ANALYZER) += gpio-sloppy-logic-analyzer.o
obj-$(CONFIG_GPIO_SODAVILLE) += gpio-sodaville.o
+obj-$(CONFIG_GPIO_SPACEMIT_K1) += gpio-spacemit-k1.o
obj-$(CONFIG_GPIO_SPEAR_SPICS) += gpio-spear-spics.o
obj-$(CONFIG_GPIO_SPRD) += gpio-sprd.o
obj-$(CONFIG_GPIO_STMPE) += gpio-stmpe.o
@@ -181,6 +194,7 @@ obj-$(CONFIG_GPIO_TS5500) += gpio-ts5500.o
obj-$(CONFIG_GPIO_TWL4030) += gpio-twl4030.o
obj-$(CONFIG_GPIO_TWL6040) += gpio-twl6040.o
obj-$(CONFIG_GPIO_UNIPHIER) += gpio-uniphier.o
+obj-$(CONFIG_GPIO_USBIO) += gpio-usbio.o
obj-$(CONFIG_GPIO_VF610) += gpio-vf610.o
obj-$(CONFIG_GPIO_VIPERBOARD) += gpio-viperboard.o
obj-$(CONFIG_GPIO_VIRTUSER) += gpio-virtuser.o
diff --git a/drivers/gpio/TODO b/drivers/gpio/TODO
index 189c3abe7e79..8ed74e05903a 100644
--- a/drivers/gpio/TODO
+++ b/drivers/gpio/TODO
@@ -1,6 +1,7 @@
This is a place for planning the ongoing long-term work in the GPIO
subsystem.
+===============================================================================
GPIO descriptors
@@ -43,11 +44,19 @@ Work items:
to a machine description such as device tree, ACPI or fwnode that
implicitly does not use global GPIO numbers.
+- Fix drivers to not read back struct gpio_chip::base. Some drivers do
+ that and would be broken by attempts to poison it or make it dynamic.
+ Example in AT91 pinctrl driver:
+ https://lore.kernel.org/all/1d00c056-3d61-4c22-bedd-3bae0bf1ddc4@pengutronix.de/
+ This particular driver is also DT-only, so with the above fixed, the
+ base can be made dynamic (set to -1) if CONFIG_GPIO_SYSFS is disabled.
+
- When this work is complete (will require some of the items in the
following ongoing work as well) we can delete the old global
numberspace accessors from <linux/gpio.h> and eventually delete
<linux/gpio.h> altogether.
+-------------------------------------------------------------------------------
Get rid of <linux/of_gpio.h>
@@ -61,8 +70,8 @@ Work items:
- Change all consumer drivers that #include <linux/of_gpio.h> to
#include <linux/gpio/consumer.h> and stop doing custom parsing of the
- GPIO lines from the device tree. This can be tricky and often ivolves
- changing boardfiles, etc.
+ GPIO lines from the device tree. This can be tricky and often involves
+ changing board files, etc.
- Pull semantics for legacy device tree (OF) GPIO lookups into
gpiolib-of.c: in some cases subsystems are doing custom flags and
@@ -75,6 +84,7 @@ Work items:
- Delete <linux/of_gpio.h> when all the above is complete and everything
uses <linux/gpio/consumer.h> or <linux/gpio/driver.h> instead.
+-------------------------------------------------------------------------------
Get rid of <linux/gpio/legacy-of-mm-gpiochip.h>
@@ -85,15 +95,7 @@ Work items:
to_of_mm_gpio_chip(), of_mm_gpiochip_add_data(), of_mm_gpiochip_remove(),
CONFIG_OF_GPIO_MM_GPIOCHIP from the kernel.
-
-Get rid of <linux/gpio.h>
-
-This legacy header is a one stop shop for anything GPIO is closely tied
-to the global GPIO numberspace. The endgame of the above refactorings will
-be the removal of <linux/gpio.h> and from that point only the specialized
-headers under <linux/gpio/*.h> will be used. This requires all the above to
-be completed and is expected to take a long time.
-
+-------------------------------------------------------------------------------
Collect drivers
@@ -108,6 +110,7 @@ At the same time it makes sense to get rid of code duplication in existing or
new coming drivers. For example, gpio-ml-ioh should be incorporated into
gpio-pch.
+-------------------------------------------------------------------------------
Generic MMIO GPIO
@@ -128,6 +131,7 @@ Work items:
helpers (x86 inb()/outb()) and convert port-mapped I/O drivers to use
this with dry-coding and sending to maintainers to test
+-------------------------------------------------------------------------------
Generic regmap GPIO
@@ -135,6 +139,7 @@ In the very similar way to Generic MMIO GPIO convert the users which can
take advantage of using regmap over direct IO accessors. Note, even in
MMIO case the regmap MMIO with gpio-regmap.c is preferable over gpio-mmio.c.
+-------------------------------------------------------------------------------
GPIOLIB irqchip
@@ -144,53 +149,7 @@ try to cover any generic kind of irqchip cascaded from a GPIO.
- Look over and identify any remaining easily converted drivers and
dry-code conversions to gpiolib irqchip for maintainers to test
-
-Increase integration with pin control
-
-There are already ways to use pin control as back-end for GPIO and
-it may make sense to bring these subsystems closer. One reason for
-creating pin control as its own subsystem was that we could avoid any
-use of the global GPIO numbers. Once the above is complete, it may
-make sense to simply join the subsystems into one and make pin
-multiplexing, pin configuration, GPIO, etc selectable options in one
-and the same pin control and GPIO subsystem.
-
-
-Debugfs in place of sysfs
-
-The old sysfs code that enables simple uses of GPIOs from the
-command line is still popular despite the existance of the proper
-character device. The reason is that it is simple to use on
-root filesystems where you only have a minimal set of tools such
-as "cat", "echo" etc.
-
-The old sysfs still need to be strongly deprecated and removed
-as it relies on the global GPIO numberspace that assume a strict
-order of global GPIO numbers that do not change between boots
-and is independent of probe order.
-
-To solve this and provide an ABI that people can use for hacks
-and development, implement a debugfs interface to manipulate
-GPIO lines that can do everything that sysfs can do today: one
-directory per gpiochip and one file entry per line:
-
-/sys/kernel/debug/gpiochip/gpiochip0
-/sys/kernel/debug/gpiochip/gpiochip0/gpio0
-/sys/kernel/debug/gpiochip/gpiochip0/gpio1
-/sys/kernel/debug/gpiochip/gpiochip0/gpio2
-/sys/kernel/debug/gpiochip/gpiochip0/gpio3
-...
-/sys/kernel/debug/gpiochip/gpiochip1
-/sys/kernel/debug/gpiochip/gpiochip1/gpio0
-/sys/kernel/debug/gpiochip/gpiochip1/gpio1
-...
-
-The exact files and design of the debugfs interface can be
-discussed but the idea is to provide a low-level access point
-for debugging and hacking and to expose all lines without the
-need of any exporting. Also provide ample ammunition to shoot
-oneself in the foot, because this is debugfs after all.
-
+-------------------------------------------------------------------------------
Moving over to immutable irq_chip structures
@@ -209,3 +168,46 @@ A small number of drivers have been converted (pl061, tegra186, msm,
amd, apple), and can be used as examples of how to proceed with this
conversion. Note that drivers using the generic irqchip framework
cannot be converted yet, but watch this space!
+
+-------------------------------------------------------------------------------
+
+Remove legacy sysfs features
+
+We have two parallel per-chip class devices and per-exported-line attribute
+groups in sysfs. One is using the obsolete global GPIO numberspace and the
+second relies on hardware offsets of pins within the chip. Remove the former
+once user-space has switched to using the latter.
+
+-------------------------------------------------------------------------------
+
+Remove GPIOD_FLAGS_BIT_NONEXCLUSIVE
+
+GPIOs in the linux kernel are meant to be an exclusive resource. This means
+that the GPIO descriptors (the software representation of the hardware concept)
+are not reference counted and - in general - only one user at a time can
+request a GPIO line and control its settings. The consumer API is designed
+around full control of the line's state as evidenced by the fact that, for
+instance, gpiod_set_value() does indeed drive the line as requested, instead
+of bumping an enable counter of some sort.
+
+A problematic use-case for GPIOs is when two consumers want to use the same
+descriptor independently. An example of such a user is the regulator subsystem
+which may instantiate several struct regulator_dev instances containing
+a struct device but using the same enable GPIO line.
+
+A workaround was introduced in the form of the GPIOD_FLAGS_BIT_NONEXCLUSIVE
+flag but its implementation is problematic: it does not provide any
+synchronization of usage nor did it introduce any enable count meaning the
+non-exclusive users of the same descriptor will in fact "fight" for the
+control over it. This flag should be removed and replaced with a better
+solution, possibly based on the new power sequencing subsystem.
+
+-------------------------------------------------------------------------------
+
+Remove devm_gpiod_unhinge()
+
+devm_gpiod_unhinge() is provided as a way to transfer the ownership of managed
+enable GPIOs to the regulator core. Rather than doing that however, we should
+make it possible for the regulator subsystem to deal with GPIO resources the
+lifetime of which it doesn't control as logically, a GPIO obtained by a caller
+should also be freed by it.
diff --git a/drivers/gpio/dev-sync-probe.c b/drivers/gpio/dev-sync-probe.c
new file mode 100644
index 000000000000..9ea733b863b2
--- /dev/null
+++ b/drivers/gpio/dev-sync-probe.c
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Common code for drivers creating fake platform devices.
+ *
+ * Provides synchronous device creation: waits for probe completion and
+ * returns the probe success or error status to the device creator.
+ *
+ * Copyright (C) 2021 Bartosz Golaszewski <brgl@bgdev.pl>
+ * Copyright (C) 2025 Koichiro Den <koichiro.den@canonical.com>
+ */
+
+#include <linux/device.h>
+#include <linux/slab.h>
+
+#include "dev-sync-probe.h"
+
+static int dev_sync_probe_notifier_call(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct dev_sync_probe_data *pdata;
+ struct device *dev = data;
+
+ pdata = container_of(nb, struct dev_sync_probe_data, bus_notifier);
+ if (!device_match_name(dev, pdata->name))
+ return NOTIFY_DONE;
+
+ switch (action) {
+ case BUS_NOTIFY_BOUND_DRIVER:
+ pdata->driver_bound = true;
+ break;
+ case BUS_NOTIFY_DRIVER_NOT_BOUND:
+ pdata->driver_bound = false;
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ complete(&pdata->probe_completion);
+ return NOTIFY_OK;
+}
+
+void dev_sync_probe_init(struct dev_sync_probe_data *data)
+{
+ memset(data, 0, sizeof(*data));
+ init_completion(&data->probe_completion);
+ data->bus_notifier.notifier_call = dev_sync_probe_notifier_call;
+}
+EXPORT_SYMBOL_GPL(dev_sync_probe_init);
+
+int dev_sync_probe_register(struct dev_sync_probe_data *data,
+ struct platform_device_info *pdevinfo)
+{
+ struct platform_device *pdev;
+ char *name;
+
+ name = kasprintf(GFP_KERNEL, "%s.%d", pdevinfo->name, pdevinfo->id);
+ if (!name)
+ return -ENOMEM;
+
+ data->driver_bound = false;
+ data->name = name;
+ reinit_completion(&data->probe_completion);
+ bus_register_notifier(&platform_bus_type, &data->bus_notifier);
+
+ pdev = platform_device_register_full(pdevinfo);
+ if (IS_ERR(pdev)) {
+ bus_unregister_notifier(&platform_bus_type, &data->bus_notifier);
+ kfree(data->name);
+ return PTR_ERR(pdev);
+ }
+
+ wait_for_completion(&data->probe_completion);
+ bus_unregister_notifier(&platform_bus_type, &data->bus_notifier);
+
+ if (!data->driver_bound) {
+ platform_device_unregister(pdev);
+ kfree(data->name);
+ return -ENXIO;
+ }
+
+ data->pdev = pdev;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dev_sync_probe_register);
+
+void dev_sync_probe_unregister(struct dev_sync_probe_data *data)
+{
+ platform_device_unregister(data->pdev);
+ kfree(data->name);
+ data->pdev = NULL;
+}
+EXPORT_SYMBOL_GPL(dev_sync_probe_unregister);
+
+MODULE_AUTHOR("Bartosz Golaszewski <brgl@bgdev.pl>");
+MODULE_AUTHOR("Koichiro Den <koichiro.den@canonical.com>");
+MODULE_DESCRIPTION("Utilities for synchronous fake device creation");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/dev-sync-probe.h b/drivers/gpio/dev-sync-probe.h
new file mode 100644
index 000000000000..4b3d52b70519
--- /dev/null
+++ b/drivers/gpio/dev-sync-probe.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef DEV_SYNC_PROBE_H
+#define DEV_SYNC_PROBE_H
+
+#include <linux/completion.h>
+#include <linux/notifier.h>
+#include <linux/platform_device.h>
+
+struct dev_sync_probe_data {
+ struct platform_device *pdev;
+ const char *name;
+
+ /* Synchronize with probe */
+ struct notifier_block bus_notifier;
+ struct completion probe_completion;
+ bool driver_bound;
+};
+
+void dev_sync_probe_init(struct dev_sync_probe_data *data);
+int dev_sync_probe_register(struct dev_sync_probe_data *data,
+ struct platform_device_info *pdevinfo);
+void dev_sync_probe_unregister(struct dev_sync_probe_data *data);
+
+#endif /* DEV_SYNC_PROBE_H */
diff --git a/drivers/gpio/gpio-104-dio-48e.c b/drivers/gpio/gpio-104-dio-48e.c
index 4df9becaf349..cf5a50102d49 100644
--- a/drivers/gpio/gpio-104-dio-48e.c
+++ b/drivers/gpio/gpio-104-dio-48e.c
@@ -22,7 +22,7 @@
#include "gpio-i8255.h"
-MODULE_IMPORT_NS(I8255);
+MODULE_IMPORT_NS("I8255");
#define DIO48E_EXTENT 16
#define MAX_NUM_DIO48E max_num_isa_dev(DIO48E_EXTENT)
@@ -339,4 +339,4 @@ module_isa_driver_with_irq(dio48e_driver, num_dio48e, num_irq);
MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>");
MODULE_DESCRIPTION("ACCES 104-DIO-48E GPIO driver");
MODULE_LICENSE("GPL v2");
-MODULE_IMPORT_NS(I8254);
+MODULE_IMPORT_NS("I8254");
diff --git a/drivers/gpio/gpio-104-idio-16.c b/drivers/gpio/gpio-104-idio-16.c
index f03ccd0f534c..ffe7e1cb6b23 100644
--- a/drivers/gpio/gpio-104-idio-16.c
+++ b/drivers/gpio/gpio-104-idio-16.c
@@ -126,4 +126,4 @@ module_isa_driver_with_irq(idio_16_driver, num_idio_16, num_irq);
MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>");
MODULE_DESCRIPTION("ACCES 104-IDIO-16 GPIO driver");
MODULE_LICENSE("GPL v2");
-MODULE_IMPORT_NS(GPIO_IDIO_16);
+MODULE_IMPORT_NS("GPIO_IDIO_16");
diff --git a/drivers/gpio/gpio-74x164.c b/drivers/gpio/gpio-74x164.c
index 753e7be039e4..c226524efeba 100644
--- a/drivers/gpio/gpio-74x164.c
+++ b/drivers/gpio/gpio-74x164.c
@@ -7,6 +7,7 @@
*/
#include <linux/bitops.h>
+#include <linux/cleanup.h>
#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
#include <linux/module.h>
@@ -29,7 +30,7 @@ struct gen_74x164_chip {
* register at the end of the transfer. So, to have a logical
* numbering, store the bytes in reverse order.
*/
- u8 buffer[];
+ u8 buffer[] __counted_by(registers);
};
static int __gen_74x164_write_config(struct gen_74x164_chip *chip)
@@ -43,34 +44,31 @@ static int gen_74x164_get_value(struct gpio_chip *gc, unsigned offset)
struct gen_74x164_chip *chip = gpiochip_get_data(gc);
u8 bank = chip->registers - 1 - offset / 8;
u8 pin = offset % 8;
- int ret;
- mutex_lock(&chip->lock);
- ret = (chip->buffer[bank] >> pin) & 0x1;
- mutex_unlock(&chip->lock);
+ guard(mutex)(&chip->lock);
- return ret;
+ return !!(chip->buffer[bank] & BIT(pin));
}
-static void gen_74x164_set_value(struct gpio_chip *gc,
- unsigned offset, int val)
+static int gen_74x164_set_value(struct gpio_chip *gc,
+ unsigned int offset, int val)
{
struct gen_74x164_chip *chip = gpiochip_get_data(gc);
u8 bank = chip->registers - 1 - offset / 8;
u8 pin = offset % 8;
- mutex_lock(&chip->lock);
+ guard(mutex)(&chip->lock);
+
if (val)
- chip->buffer[bank] |= (1 << pin);
+ chip->buffer[bank] |= BIT(pin);
else
- chip->buffer[bank] &= ~(1 << pin);
+ chip->buffer[bank] &= ~BIT(pin);
- __gen_74x164_write_config(chip);
- mutex_unlock(&chip->lock);
+ return __gen_74x164_write_config(chip);
}
-static void gen_74x164_set_multiple(struct gpio_chip *gc, unsigned long *mask,
- unsigned long *bits)
+static int gen_74x164_set_multiple(struct gpio_chip *gc, unsigned long *mask,
+ unsigned long *bits)
{
struct gen_74x164_chip *chip = gpiochip_get_data(gc);
unsigned long offset;
@@ -78,7 +76,8 @@ static void gen_74x164_set_multiple(struct gpio_chip *gc, unsigned long *mask,
size_t bank;
unsigned long bitmask;
- mutex_lock(&chip->lock);
+ guard(mutex)(&chip->lock);
+
for_each_set_clump8(offset, bankmask, mask, chip->registers * 8) {
bank = chip->registers - 1 - offset / 8;
bitmask = bitmap_get_value8(bits, offset) & bankmask;
@@ -86,8 +85,7 @@ static void gen_74x164_set_multiple(struct gpio_chip *gc, unsigned long *mask,
chip->buffer[bank] &= ~bankmask;
chip->buffer[bank] |= bitmask;
}
- __gen_74x164_write_config(chip);
- mutex_unlock(&chip->lock);
+ return __gen_74x164_write_config(chip);
}
static int gen_74x164_direction_output(struct gpio_chip *gc,
@@ -97,8 +95,22 @@ static int gen_74x164_direction_output(struct gpio_chip *gc,
return 0;
}
+static void gen_74x164_deactivate(void *data)
+{
+ struct gen_74x164_chip *chip = data;
+
+ gpiod_set_value_cansleep(chip->gpiod_oe, 0);
+}
+
+static int gen_74x164_activate(struct device *dev, struct gen_74x164_chip *chip)
+{
+ gpiod_set_value_cansleep(chip->gpiod_oe, 1);
+ return devm_add_action_or_reset(dev, gen_74x164_deactivate, chip);
+}
+
static int gen_74x164_probe(struct spi_device *spi)
{
+ struct device *dev = &spi->dev;
struct gen_74x164_chip *chip;
u32 nregs;
int ret;
@@ -112,64 +124,44 @@ static int gen_74x164_probe(struct spi_device *spi)
if (ret < 0)
return ret;
- ret = device_property_read_u32(&spi->dev, "registers-number", &nregs);
- if (ret) {
- dev_err(&spi->dev, "Missing 'registers-number' property.\n");
- return -EINVAL;
- }
+ ret = device_property_read_u32(dev, "registers-number", &nregs);
+ if (ret)
+ return dev_err_probe(dev, ret, "Missing 'registers-number' property.\n");
- chip = devm_kzalloc(&spi->dev, sizeof(*chip) + nregs, GFP_KERNEL);
+ chip = devm_kzalloc(dev, struct_size(chip, buffer, nregs), GFP_KERNEL);
if (!chip)
return -ENOMEM;
- chip->gpiod_oe = devm_gpiod_get_optional(&spi->dev, "enable",
- GPIOD_OUT_LOW);
+ chip->registers = nregs;
+
+ chip->gpiod_oe = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW);
if (IS_ERR(chip->gpiod_oe))
return PTR_ERR(chip->gpiod_oe);
- spi_set_drvdata(spi, chip);
-
chip->gpio_chip.label = spi->modalias;
chip->gpio_chip.direction_output = gen_74x164_direction_output;
chip->gpio_chip.get = gen_74x164_get_value;
chip->gpio_chip.set = gen_74x164_set_value;
chip->gpio_chip.set_multiple = gen_74x164_set_multiple;
chip->gpio_chip.base = -1;
-
- chip->registers = nregs;
chip->gpio_chip.ngpio = GEN_74X164_NUMBER_GPIOS * chip->registers;
-
chip->gpio_chip.can_sleep = true;
- chip->gpio_chip.parent = &spi->dev;
+ chip->gpio_chip.parent = dev;
chip->gpio_chip.owner = THIS_MODULE;
- mutex_init(&chip->lock);
+ ret = devm_mutex_init(dev, &chip->lock);
+ if (ret)
+ return ret;
ret = __gen_74x164_write_config(chip);
- if (ret) {
- dev_err(&spi->dev, "Failed writing: %d\n", ret);
- goto exit_destroy;
- }
-
- gpiod_set_value_cansleep(chip->gpiod_oe, 1);
+ if (ret)
+ return dev_err_probe(dev, ret, "Config write failed\n");
- ret = gpiochip_add_data(&chip->gpio_chip, chip);
- if (!ret)
- return 0;
-
-exit_destroy:
- mutex_destroy(&chip->lock);
-
- return ret;
-}
-
-static void gen_74x164_remove(struct spi_device *spi)
-{
- struct gen_74x164_chip *chip = spi_get_drvdata(spi);
+ ret = gen_74x164_activate(dev, chip);
+ if (ret)
+ return ret;
- gpiod_set_value_cansleep(chip->gpiod_oe, 0);
- gpiochip_remove(&chip->gpio_chip);
- mutex_destroy(&chip->lock);
+ return devm_gpiochip_add_data(dev, &chip->gpio_chip, chip);
}
static const struct spi_device_id gen_74x164_spi_ids[] = {
@@ -192,7 +184,6 @@ static struct spi_driver gen_74x164_driver = {
.of_match_table = gen_74x164_dt_ids,
},
.probe = gen_74x164_probe,
- .remove = gen_74x164_remove,
.id_table = gen_74x164_spi_ids,
};
module_spi_driver(gen_74x164_driver);
diff --git a/drivers/gpio/gpio-74xx-mmio.c b/drivers/gpio/gpio-74xx-mmio.c
index c7ac5a9ffb1f..bd2cc5f4f851 100644
--- a/drivers/gpio/gpio-74xx-mmio.c
+++ b/drivers/gpio/gpio-74xx-mmio.c
@@ -8,6 +8,7 @@
#include <linux/bits.h>
#include <linux/err.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -18,8 +19,8 @@
#define MMIO_74XX_BIT_CNT(x) ((x) & GENMASK(7, 0))
struct mmio_74xx_gpio_priv {
- struct gpio_chip gc;
- unsigned flags;
+ struct gpio_generic_chip gen_gc;
+ unsigned int flags;
};
static const struct of_device_id mmio_74xx_gpio_ids[] = {
@@ -99,16 +100,15 @@ static int mmio_74xx_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
{
struct mmio_74xx_gpio_priv *priv = gpiochip_get_data(gc);
- if (priv->flags & MMIO_74XX_DIR_OUT) {
- gc->set(gc, gpio, val);
- return 0;
- }
+ if (priv->flags & MMIO_74XX_DIR_OUT)
+ return gpio_generic_chip_set(&priv->gen_gc, gpio, val);
return -ENOTSUPP;
}
static int mmio_74xx_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config = { };
struct mmio_74xx_gpio_priv *priv;
void __iomem *dat;
int err;
@@ -123,19 +123,21 @@ static int mmio_74xx_gpio_probe(struct platform_device *pdev)
if (IS_ERR(dat))
return PTR_ERR(dat);
- err = bgpio_init(&priv->gc, &pdev->dev,
- DIV_ROUND_UP(MMIO_74XX_BIT_CNT(priv->flags), 8),
- dat, NULL, NULL, NULL, NULL, 0);
+ config.dev = &pdev->dev;
+ config.sz = DIV_ROUND_UP(MMIO_74XX_BIT_CNT(priv->flags), 8);
+ config.dat = dat;
+
+ err = gpio_generic_chip_init(&priv->gen_gc, &config);
if (err)
return err;
- priv->gc.direction_input = mmio_74xx_dir_in;
- priv->gc.direction_output = mmio_74xx_dir_out;
- priv->gc.get_direction = mmio_74xx_get_direction;
- priv->gc.ngpio = MMIO_74XX_BIT_CNT(priv->flags);
- priv->gc.owner = THIS_MODULE;
+ priv->gen_gc.gc.direction_input = mmio_74xx_dir_in;
+ priv->gen_gc.gc.direction_output = mmio_74xx_dir_out;
+ priv->gen_gc.gc.get_direction = mmio_74xx_get_direction;
+ priv->gen_gc.gc.ngpio = MMIO_74XX_BIT_CNT(priv->flags);
+ priv->gen_gc.gc.owner = THIS_MODULE;
- return devm_gpiochip_add_data(&pdev->dev, &priv->gc, priv);
+ return devm_gpiochip_add_data(&pdev->dev, &priv->gen_gc.gc, priv);
}
static struct platform_driver mmio_74xx_gpio_driver = {
diff --git a/drivers/gpio/gpio-adnp.c b/drivers/gpio/gpio-adnp.c
index 6dafab0cf964..e5ac2d211013 100644
--- a/drivers/gpio/gpio-adnp.c
+++ b/drivers/gpio/gpio-adnp.c
@@ -3,11 +3,13 @@
* Copyright (C) 2011-2012 Avionic Design GmbH
*/
+#include <linux/cleanup.h>
#include <linux/gpio/driver.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/property.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
@@ -78,7 +80,7 @@ static int adnp_gpio_get(struct gpio_chip *chip, unsigned offset)
return (value & BIT(pos)) ? 1 : 0;
}
-static void __adnp_gpio_set(struct adnp *adnp, unsigned offset, int value)
+static int __adnp_gpio_set(struct adnp *adnp, unsigned int offset, int value)
{
unsigned int reg = offset >> adnp->reg_shift;
unsigned int pos = offset & 7;
@@ -87,23 +89,23 @@ static void __adnp_gpio_set(struct adnp *adnp, unsigned offset, int value)
err = adnp_read(adnp, GPIO_PLR(adnp) + reg, &val);
if (err < 0)
- return;
+ return err;
if (value)
val |= BIT(pos);
else
val &= ~BIT(pos);
- adnp_write(adnp, GPIO_PLR(adnp) + reg, val);
+ return adnp_write(adnp, GPIO_PLR(adnp) + reg, val);
}
-static void adnp_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int adnp_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct adnp *adnp = gpiochip_get_data(chip);
- mutex_lock(&adnp->i2c_lock);
- __adnp_gpio_set(adnp, offset, value);
- mutex_unlock(&adnp->i2c_lock);
+ guard(mutex)(&adnp->i2c_lock);
+
+ return __adnp_gpio_set(adnp, offset, value);
}
static int adnp_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
@@ -114,32 +116,26 @@ static int adnp_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
u8 value;
int err;
- mutex_lock(&adnp->i2c_lock);
+ guard(mutex)(&adnp->i2c_lock);
err = adnp_read(adnp, GPIO_DDR(adnp) + reg, &value);
if (err < 0)
- goto out;
+ return err;
value &= ~BIT(pos);
err = adnp_write(adnp, GPIO_DDR(adnp) + reg, value);
if (err < 0)
- goto out;
+ return err;
err = adnp_read(adnp, GPIO_DDR(adnp) + reg, &value);
if (err < 0)
- goto out;
-
- if (value & BIT(pos)) {
- err = -EPERM;
- goto out;
- }
+ return err;
- err = 0;
+ if (value & BIT(pos))
+ return -EPERM;
-out:
- mutex_unlock(&adnp->i2c_lock);
- return err;
+ return 0;
}
static int adnp_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
@@ -151,33 +147,28 @@ static int adnp_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
int err;
u8 val;
- mutex_lock(&adnp->i2c_lock);
+ guard(mutex)(&adnp->i2c_lock);
err = adnp_read(adnp, GPIO_DDR(adnp) + reg, &val);
if (err < 0)
- goto out;
+ return err;
val |= BIT(pos);
err = adnp_write(adnp, GPIO_DDR(adnp) + reg, val);
if (err < 0)
- goto out;
+ return err;
err = adnp_read(adnp, GPIO_DDR(adnp) + reg, &val);
if (err < 0)
- goto out;
+ return err;
- if (!(val & BIT(pos))) {
- err = -EPERM;
- goto out;
- }
+ if (!(val & BIT(pos)))
+ return -EPERM;
__adnp_gpio_set(adnp, offset, value);
- err = 0;
-out:
- mutex_unlock(&adnp->i2c_lock);
- return err;
+ return 0;
}
static void adnp_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
@@ -187,27 +178,26 @@ static void adnp_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
int err;
for (i = 0; i < num_regs; i++) {
- u8 ddr, plr, ier, isr;
+ u8 ddr = 0, plr = 0, ier = 0, isr = 0;
- mutex_lock(&adnp->i2c_lock);
+ scoped_guard(mutex, &adnp->i2c_lock) {
+ err = adnp_read(adnp, GPIO_DDR(adnp) + i, &ddr);
+ if (err < 0)
+ return;
- err = adnp_read(adnp, GPIO_DDR(adnp) + i, &ddr);
- if (err < 0)
- goto unlock;
+ err = adnp_read(adnp, GPIO_PLR(adnp) + i, &plr);
+ if (err < 0)
+ return;
- err = adnp_read(adnp, GPIO_PLR(adnp) + i, &plr);
- if (err < 0)
- goto unlock;
+ err = adnp_read(adnp, GPIO_IER(adnp) + i, &ier);
+ if (err < 0)
+ return;
- err = adnp_read(adnp, GPIO_IER(adnp) + i, &ier);
- if (err < 0)
- goto unlock;
-
- err = adnp_read(adnp, GPIO_ISR(adnp) + i, &isr);
- if (err < 0)
- goto unlock;
+ err = adnp_read(adnp, GPIO_ISR(adnp) + i, &isr);
+ if (err < 0)
+ return;
- mutex_unlock(&adnp->i2c_lock);
+ }
for (j = 0; j < 8; j++) {
unsigned int bit = (i << adnp->reg_shift) + j;
@@ -232,11 +222,6 @@ static void adnp_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
direction, level, interrupt, pending);
}
}
-
- return;
-
-unlock:
- mutex_unlock(&adnp->i2c_lock);
}
static irqreturn_t adnp_irq(int irq, void *data)
@@ -248,32 +233,24 @@ static irqreturn_t adnp_irq(int irq, void *data)
for (i = 0; i < num_regs; i++) {
unsigned int base = i << adnp->reg_shift, bit;
- u8 changed, level, isr, ier;
+ u8 changed, level = 0, isr = 0, ier = 0;
unsigned long pending;
int err;
- mutex_lock(&adnp->i2c_lock);
+ scoped_guard(mutex, &adnp->i2c_lock) {
+ err = adnp_read(adnp, GPIO_PLR(adnp) + i, &level);
+ if (err < 0)
+ continue;
- err = adnp_read(adnp, GPIO_PLR(adnp) + i, &level);
- if (err < 0) {
- mutex_unlock(&adnp->i2c_lock);
- continue;
- }
+ err = adnp_read(adnp, GPIO_ISR(adnp) + i, &isr);
+ if (err < 0)
+ continue;
- err = adnp_read(adnp, GPIO_ISR(adnp) + i, &isr);
- if (err < 0) {
- mutex_unlock(&adnp->i2c_lock);
- continue;
+ err = adnp_read(adnp, GPIO_IER(adnp) + i, &ier);
+ if (err < 0)
+ continue;
}
- err = adnp_read(adnp, GPIO_IER(adnp) + i, &ier);
- if (err < 0) {
- mutex_unlock(&adnp->i2c_lock);
- continue;
- }
-
- mutex_unlock(&adnp->i2c_lock);
-
/* determine pins that changed levels */
changed = level ^ adnp->irq_level[i];
@@ -365,12 +342,12 @@ static void adnp_irq_bus_unlock(struct irq_data *d)
struct adnp *adnp = gpiochip_get_data(gc);
unsigned int num_regs = 1 << adnp->reg_shift, i;
- mutex_lock(&adnp->i2c_lock);
-
- for (i = 0; i < num_regs; i++)
- adnp_write(adnp, GPIO_IER(adnp) + i, adnp->irq_enable[i]);
+ scoped_guard(mutex, &adnp->i2c_lock) {
+ for (i = 0; i < num_regs; i++)
+ adnp_write(adnp, GPIO_IER(adnp) + i,
+ adnp->irq_enable[i]);
+ }
- mutex_unlock(&adnp->i2c_lock);
mutex_unlock(&adnp->irq_lock);
}
@@ -506,7 +483,10 @@ static int adnp_i2c_probe(struct i2c_client *client)
if (!adnp)
return -ENOMEM;
- mutex_init(&adnp->i2c_lock);
+ err = devm_mutex_init(&client->dev, &adnp->i2c_lock);
+ if (err)
+ return err;
+
adnp->client = client;
err = adnp_gpio_setup(adnp, num_gpios, device_property_read_bool(dev, "interrupt-controller"));
diff --git a/drivers/gpio/gpio-adp5520.c b/drivers/gpio/gpio-adp5520.c
index c55e821c63b6..6305c8b7dc05 100644
--- a/drivers/gpio/gpio-adp5520.c
+++ b/drivers/gpio/gpio-adp5520.c
@@ -40,16 +40,18 @@ static int adp5520_gpio_get_value(struct gpio_chip *chip, unsigned off)
return !!(reg_val & dev->lut[off]);
}
-static void adp5520_gpio_set_value(struct gpio_chip *chip,
- unsigned off, int val)
+static int adp5520_gpio_set_value(struct gpio_chip *chip,
+ unsigned int off, int val)
{
struct adp5520_gpio *dev;
dev = gpiochip_get_data(chip);
if (val)
- adp5520_set_bits(dev->master, ADP5520_GPIO_OUT, dev->lut[off]);
+ return adp5520_set_bits(dev->master, ADP5520_GPIO_OUT,
+ dev->lut[off]);
else
- adp5520_clr_bits(dev->master, ADP5520_GPIO_OUT, dev->lut[off]);
+ return adp5520_clr_bits(dev->master, ADP5520_GPIO_OUT,
+ dev->lut[off]);
}
static int adp5520_gpio_direction_input(struct gpio_chip *chip, unsigned off)
diff --git a/drivers/gpio/gpio-adp5585.c b/drivers/gpio/gpio-adp5585.c
index 000d31f09671..0fd3cc26d017 100644
--- a/drivers/gpio/gpio-adp5585.c
+++ b/drivers/gpio/gpio-adp5585.c
@@ -4,67 +4,131 @@
*
* Copyright 2022 NXP
* Copyright 2024 Ideas on Board Oy
+ * Copyright 2025 Analog Devices, Inc.
*/
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/container_of.h>
#include <linux/device.h>
#include <linux/gpio/driver.h>
#include <linux/mfd/adp5585.h>
#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/types.h>
-#define ADP5585_GPIO_MAX 11
+/*
+ * Bank 0 covers pins "GPIO 1/R0" to "GPIO 6/R5", numbered 0 to 5 by the
+ * driver, and bank 1 covers pins "GPIO 7/C0" to "GPIO 11/C4", numbered 6 to
+ * 10. Some variants of the ADP5585 don't support "GPIO 6/R5". As the driver
+ * uses identical GPIO numbering for all variants to avoid confusion, GPIO 5 is
+ * marked as reserved in the device tree for variants that don't support it.
+ */
+#define ADP5585_BANK(n) ((n) >= 6 ? 1 : 0)
+#define ADP5585_BIT(n) ((n) >= 6 ? BIT((n) - 6) : BIT(n))
+
+/*
+ * Bank 0 covers pins "GPIO 1/R0" to "GPIO 8/R7", numbered 0 to 7 by the
+ * driver, bank 1 covers pins "GPIO 9/C0" to "GPIO 16/C7", numbered 8 to
+ * 15 and bank 3 covers pins "GPIO 17/C8" to "GPIO 19/C10", numbered 16 to 18.
+ */
+#define ADP5589_BANK(n) ((n) >> 3)
+#define ADP5589_BIT(n) BIT((n) & 0x7)
+
+struct adp5585_gpio_chip {
+ int (*bank)(unsigned int off);
+ int (*bit)(unsigned int off);
+ unsigned int debounce_dis_a;
+ unsigned int rpull_cfg_a;
+ unsigned int gpo_data_a;
+ unsigned int gpo_out_a;
+ unsigned int gpio_dir_a;
+ unsigned int gpi_stat_a;
+ unsigned int gpi_int_lvl_a;
+ unsigned int gpi_ev_a;
+ unsigned int gpi_ev_min;
+ unsigned int gpi_ev_max;
+ bool has_bias_hole;
+};
struct adp5585_gpio_dev {
struct gpio_chip gpio_chip;
+ struct notifier_block nb;
+ const struct adp5585_gpio_chip *info;
struct regmap *regmap;
+ unsigned long irq_mask;
+ unsigned long irq_en;
+ unsigned long irq_active_high;
+ /* used for irqchip bus locking */
+ struct mutex bus_lock;
};
+static int adp5585_gpio_bank(unsigned int off)
+{
+ return ADP5585_BANK(off);
+}
+
+static int adp5585_gpio_bit(unsigned int off)
+{
+ return ADP5585_BIT(off);
+}
+
+static int adp5589_gpio_bank(unsigned int off)
+{
+ return ADP5589_BANK(off);
+}
+
+static int adp5589_gpio_bit(unsigned int off)
+{
+ return ADP5589_BIT(off);
+}
+
static int adp5585_gpio_get_direction(struct gpio_chip *chip, unsigned int off)
{
struct adp5585_gpio_dev *adp5585_gpio = gpiochip_get_data(chip);
- unsigned int bank = ADP5585_BANK(off);
- unsigned int bit = ADP5585_BIT(off);
+ const struct adp5585_gpio_chip *info = adp5585_gpio->info;
unsigned int val;
- regmap_read(adp5585_gpio->regmap, ADP5585_GPIO_DIRECTION_A + bank, &val);
+ regmap_read(adp5585_gpio->regmap, info->gpio_dir_a + info->bank(off), &val);
- return val & bit ? GPIO_LINE_DIRECTION_OUT : GPIO_LINE_DIRECTION_IN;
+ return val & info->bit(off) ? GPIO_LINE_DIRECTION_OUT : GPIO_LINE_DIRECTION_IN;
}
static int adp5585_gpio_direction_input(struct gpio_chip *chip, unsigned int off)
{
struct adp5585_gpio_dev *adp5585_gpio = gpiochip_get_data(chip);
- unsigned int bank = ADP5585_BANK(off);
- unsigned int bit = ADP5585_BIT(off);
+ const struct adp5585_gpio_chip *info = adp5585_gpio->info;
- return regmap_clear_bits(adp5585_gpio->regmap,
- ADP5585_GPIO_DIRECTION_A + bank, bit);
+ return regmap_clear_bits(adp5585_gpio->regmap, info->gpio_dir_a + info->bank(off),
+ info->bit(off));
}
static int adp5585_gpio_direction_output(struct gpio_chip *chip, unsigned int off, int val)
{
struct adp5585_gpio_dev *adp5585_gpio = gpiochip_get_data(chip);
- unsigned int bank = ADP5585_BANK(off);
- unsigned int bit = ADP5585_BIT(off);
+ const struct adp5585_gpio_chip *info = adp5585_gpio->info;
+ unsigned int bank = info->bank(off);
+ unsigned int bit = info->bit(off);
int ret;
- ret = regmap_update_bits(adp5585_gpio->regmap,
- ADP5585_GPO_DATA_OUT_A + bank, bit,
- val ? bit : 0);
+ ret = regmap_update_bits(adp5585_gpio->regmap, info->gpo_data_a + bank,
+ bit, val ? bit : 0);
if (ret)
return ret;
- return regmap_set_bits(adp5585_gpio->regmap,
- ADP5585_GPIO_DIRECTION_A + bank, bit);
+ return regmap_set_bits(adp5585_gpio->regmap, info->gpio_dir_a + bank,
+ bit);
}
static int adp5585_gpio_get_value(struct gpio_chip *chip, unsigned int off)
{
struct adp5585_gpio_dev *adp5585_gpio = gpiochip_get_data(chip);
- unsigned int bank = ADP5585_BANK(off);
- unsigned int bit = ADP5585_BIT(off);
+ const struct adp5585_gpio_chip *info = adp5585_gpio->info;
+ unsigned int bank = info->bank(off);
+ unsigned int bit = info->bit(off);
unsigned int reg;
unsigned int val;
@@ -79,26 +143,28 @@ static int adp5585_gpio_get_value(struct gpio_chip *chip, unsigned int off)
* .direction_input(), .direction_output() or .set() operations racing
* with this.
*/
- regmap_read(adp5585_gpio->regmap, ADP5585_GPIO_DIRECTION_A + bank, &val);
- reg = val & bit ? ADP5585_GPO_DATA_OUT_A : ADP5585_GPI_STATUS_A;
+ regmap_read(adp5585_gpio->regmap, info->gpio_dir_a + bank, &val);
+ reg = val & bit ? info->gpo_data_a : info->gpi_stat_a;
regmap_read(adp5585_gpio->regmap, reg + bank, &val);
return !!(val & bit);
}
-static void adp5585_gpio_set_value(struct gpio_chip *chip, unsigned int off, int val)
+static int adp5585_gpio_set_value(struct gpio_chip *chip, unsigned int off,
+ int val)
{
struct adp5585_gpio_dev *adp5585_gpio = gpiochip_get_data(chip);
- unsigned int bank = ADP5585_BANK(off);
- unsigned int bit = ADP5585_BIT(off);
+ const struct adp5585_gpio_chip *info = adp5585_gpio->info;
+ unsigned int bit = adp5585_gpio->info->bit(off);
- regmap_update_bits(adp5585_gpio->regmap, ADP5585_GPO_DATA_OUT_A + bank,
- bit, val ? bit : 0);
+ return regmap_update_bits(adp5585_gpio->regmap, info->gpo_data_a + info->bank(off),
+ bit, val ? bit : 0);
}
static int adp5585_gpio_set_bias(struct adp5585_gpio_dev *adp5585_gpio,
unsigned int off, unsigned int bias)
{
+ const struct adp5585_gpio_chip *info = adp5585_gpio->info;
unsigned int bit, reg, mask, val;
/*
@@ -106,8 +172,10 @@ static int adp5585_gpio_set_bias(struct adp5585_gpio_dev *adp5585_gpio,
* consecutive registers ADP5585_RPULL_CONFIG_*, with a hole of 4 bits
* after R5.
*/
- bit = off * 2 + (off > 5 ? 4 : 0);
- reg = ADP5585_RPULL_CONFIG_A + bit / 8;
+ bit = off * 2;
+ if (info->has_bias_hole)
+ bit += (off > 5 ? 4 : 0);
+ reg = info->rpull_cfg_a + bit / 8;
mask = ADP5585_Rx_PULL_CFG_MASK << (bit % 8);
val = bias << (bit % 8);
@@ -117,22 +185,22 @@ static int adp5585_gpio_set_bias(struct adp5585_gpio_dev *adp5585_gpio,
static int adp5585_gpio_set_drive(struct adp5585_gpio_dev *adp5585_gpio,
unsigned int off, enum pin_config_param drive)
{
- unsigned int bank = ADP5585_BANK(off);
- unsigned int bit = ADP5585_BIT(off);
+ const struct adp5585_gpio_chip *info = adp5585_gpio->info;
+ unsigned int bit = adp5585_gpio->info->bit(off);
return regmap_update_bits(adp5585_gpio->regmap,
- ADP5585_GPO_OUT_MODE_A + bank, bit,
+ info->gpo_out_a + info->bank(off), bit,
drive == PIN_CONFIG_DRIVE_OPEN_DRAIN ? bit : 0);
}
static int adp5585_gpio_set_debounce(struct adp5585_gpio_dev *adp5585_gpio,
unsigned int off, unsigned int debounce)
{
- unsigned int bank = ADP5585_BANK(off);
- unsigned int bit = ADP5585_BIT(off);
+ const struct adp5585_gpio_chip *info = adp5585_gpio->info;
+ unsigned int bit = adp5585_gpio->info->bit(off);
return regmap_update_bits(adp5585_gpio->regmap,
- ADP5585_DEBOUNCE_DIS_A + bank, bit,
+ info->debounce_dis_a + info->bank(off), bit,
debounce ? 0 : bit);
}
@@ -170,11 +238,175 @@ static int adp5585_gpio_set_config(struct gpio_chip *chip, unsigned int off,
};
}
+static int adp5585_gpio_request(struct gpio_chip *chip, unsigned int off)
+{
+ struct adp5585_gpio_dev *adp5585_gpio = gpiochip_get_data(chip);
+ const struct adp5585_gpio_chip *info = adp5585_gpio->info;
+ struct device *dev = chip->parent;
+ struct adp5585_dev *adp5585 = dev_get_drvdata(dev->parent);
+ const struct adp5585_regs *regs = adp5585->regs;
+ int ret;
+
+ ret = test_and_set_bit(off, adp5585->pin_usage);
+ if (ret)
+ return -EBUSY;
+
+ /* make sure it's configured for GPIO */
+ return regmap_clear_bits(adp5585_gpio->regmap,
+ regs->pin_cfg_a + info->bank(off),
+ info->bit(off));
+}
+
+static void adp5585_gpio_free(struct gpio_chip *chip, unsigned int off)
+{
+ struct device *dev = chip->parent;
+ struct adp5585_dev *adp5585 = dev_get_drvdata(dev->parent);
+
+ clear_bit(off, adp5585->pin_usage);
+}
+
+static int adp5585_gpio_key_event(struct notifier_block *nb, unsigned long key,
+ void *data)
+{
+ struct adp5585_gpio_dev *adp5585_gpio = container_of(nb, struct adp5585_gpio_dev, nb);
+ struct device *dev = adp5585_gpio->gpio_chip.parent;
+ unsigned long key_press = (unsigned long)data;
+ unsigned int irq, irq_type;
+ struct irq_data *irqd;
+ bool active_high;
+ unsigned int off;
+
+ /* make sure the event is for me */
+ if (key < adp5585_gpio->info->gpi_ev_min || key > adp5585_gpio->info->gpi_ev_max)
+ return NOTIFY_DONE;
+
+ off = key - adp5585_gpio->info->gpi_ev_min;
+ active_high = test_bit(off, &adp5585_gpio->irq_active_high);
+
+ irq = irq_find_mapping(adp5585_gpio->gpio_chip.irq.domain, off);
+ if (!irq)
+ return NOTIFY_BAD;
+
+ irqd = irq_get_irq_data(irq);
+ if (!irqd) {
+ dev_err(dev, "Could not get irq(%u) data\n", irq);
+ return NOTIFY_BAD;
+ }
+
+ dev_dbg_ratelimited(dev, "gpio-keys event(%u) press=%lu, a_high=%u\n",
+ off, key_press, active_high);
+
+ if (!active_high)
+ key_press = !key_press;
+
+ irq_type = irqd_get_trigger_type(irqd);
+
+ if ((irq_type & IRQ_TYPE_EDGE_RISING && key_press) ||
+ (irq_type & IRQ_TYPE_EDGE_FALLING && !key_press))
+ handle_nested_irq(irq);
+
+ return NOTIFY_STOP;
+}
+
+static void adp5585_irq_bus_lock(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct adp5585_gpio_dev *adp5585_gpio = gpiochip_get_data(gc);
+
+ mutex_lock(&adp5585_gpio->bus_lock);
+}
+
+static void adp5585_irq_bus_sync_unlock(struct irq_data *d)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
+ struct adp5585_gpio_dev *adp5585_gpio = gpiochip_get_data(chip);
+ const struct adp5585_gpio_chip *info = adp5585_gpio->info;
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ bool active_high = test_bit(hwirq, &adp5585_gpio->irq_active_high);
+ bool enabled = test_bit(hwirq, &adp5585_gpio->irq_en);
+ bool masked = test_bit(hwirq, &adp5585_gpio->irq_mask);
+ unsigned int bank = adp5585_gpio->info->bank(hwirq);
+ unsigned int bit = adp5585_gpio->info->bit(hwirq);
+
+ if (masked && !enabled)
+ goto out_unlock;
+ if (!masked && enabled)
+ goto out_unlock;
+
+ regmap_update_bits(adp5585_gpio->regmap, info->gpi_int_lvl_a + bank, bit,
+ active_high ? bit : 0);
+ regmap_update_bits(adp5585_gpio->regmap, info->gpi_ev_a + bank, bit,
+ masked ? 0 : bit);
+ assign_bit(hwirq, &adp5585_gpio->irq_en, !masked);
+
+out_unlock:
+ mutex_unlock(&adp5585_gpio->bus_lock);
+}
+
+static void adp5585_irq_mask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct adp5585_gpio_dev *adp5585_gpio = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+ __set_bit(hwirq, &adp5585_gpio->irq_mask);
+ gpiochip_disable_irq(gc, hwirq);
+}
+
+static void adp5585_irq_unmask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct adp5585_gpio_dev *adp5585_gpio = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+ gpiochip_enable_irq(gc, hwirq);
+ __clear_bit(hwirq, &adp5585_gpio->irq_mask);
+}
+
+static int adp5585_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct adp5585_gpio_dev *adp5585_gpio = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+ if (!(type & IRQ_TYPE_EDGE_BOTH))
+ return -EINVAL;
+
+ assign_bit(hwirq, &adp5585_gpio->irq_active_high,
+ type == IRQ_TYPE_EDGE_RISING);
+
+ irq_set_handler_locked(d, handle_edge_irq);
+ return 0;
+}
+
+static const struct irq_chip adp5585_irq_chip = {
+ .name = "adp5585",
+ .irq_mask = adp5585_irq_mask,
+ .irq_unmask = adp5585_irq_unmask,
+ .irq_bus_lock = adp5585_irq_bus_lock,
+ .irq_bus_sync_unlock = adp5585_irq_bus_sync_unlock,
+ .irq_set_type = adp5585_irq_set_type,
+ .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
+static void adp5585_gpio_unreg_notifier(void *data)
+{
+ struct adp5585_gpio_dev *adp5585_gpio = data;
+ struct device *dev = adp5585_gpio->gpio_chip.parent;
+ struct adp5585_dev *adp5585 = dev_get_drvdata(dev->parent);
+
+ blocking_notifier_chain_unregister(&adp5585->event_notifier,
+ &adp5585_gpio->nb);
+}
+
static int adp5585_gpio_probe(struct platform_device *pdev)
{
struct adp5585_dev *adp5585 = dev_get_drvdata(pdev->dev.parent);
+ const struct platform_device_id *id = platform_get_device_id(pdev);
struct adp5585_gpio_dev *adp5585_gpio;
struct device *dev = &pdev->dev;
+ struct gpio_irq_chip *girq;
struct gpio_chip *gc;
int ret;
@@ -184,6 +416,10 @@ static int adp5585_gpio_probe(struct platform_device *pdev)
adp5585_gpio->regmap = adp5585->regmap;
+ adp5585_gpio->info = (const struct adp5585_gpio_chip *)id->driver_data;
+ if (!adp5585_gpio->info)
+ return -ENODEV;
+
device_set_of_node_from_dev(dev, dev->parent);
gc = &adp5585_gpio->gpio_chip;
@@ -194,13 +430,43 @@ static int adp5585_gpio_probe(struct platform_device *pdev)
gc->get = adp5585_gpio_get_value;
gc->set = adp5585_gpio_set_value;
gc->set_config = adp5585_gpio_set_config;
+ gc->request = adp5585_gpio_request;
+ gc->free = adp5585_gpio_free;
gc->can_sleep = true;
gc->base = -1;
- gc->ngpio = ADP5585_GPIO_MAX;
+ gc->ngpio = adp5585->n_pins;
gc->label = pdev->name;
gc->owner = THIS_MODULE;
+ if (device_property_present(dev->parent, "interrupt-controller")) {
+ if (!adp5585->irq)
+ return dev_err_probe(dev, -EINVAL,
+ "Unable to serve as interrupt controller without IRQ\n");
+
+ girq = &adp5585_gpio->gpio_chip.irq;
+ gpio_irq_chip_set_chip(girq, &adp5585_irq_chip);
+ girq->handler = handle_bad_irq;
+ girq->threaded = true;
+
+ adp5585_gpio->nb.notifier_call = adp5585_gpio_key_event;
+ ret = blocking_notifier_chain_register(&adp5585->event_notifier,
+ &adp5585_gpio->nb);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, adp5585_gpio_unreg_notifier,
+ adp5585_gpio);
+ if (ret)
+ return ret;
+ }
+
+ /* everything masked by default */
+ adp5585_gpio->irq_mask = ~0UL;
+
+ ret = devm_mutex_init(dev, &adp5585_gpio->bus_lock);
+ if (ret)
+ return ret;
ret = devm_gpiochip_add_data(dev, &adp5585_gpio->gpio_chip,
adp5585_gpio);
if (ret)
@@ -209,8 +475,40 @@ static int adp5585_gpio_probe(struct platform_device *pdev)
return 0;
}
+static const struct adp5585_gpio_chip adp5585_gpio_chip_info = {
+ .bank = adp5585_gpio_bank,
+ .bit = adp5585_gpio_bit,
+ .debounce_dis_a = ADP5585_DEBOUNCE_DIS_A,
+ .rpull_cfg_a = ADP5585_RPULL_CONFIG_A,
+ .gpo_data_a = ADP5585_GPO_DATA_OUT_A,
+ .gpo_out_a = ADP5585_GPO_OUT_MODE_A,
+ .gpio_dir_a = ADP5585_GPIO_DIRECTION_A,
+ .gpi_stat_a = ADP5585_GPI_STATUS_A,
+ .has_bias_hole = true,
+ .gpi_ev_min = ADP5585_GPI_EVENT_START,
+ .gpi_ev_max = ADP5585_GPI_EVENT_END,
+ .gpi_int_lvl_a = ADP5585_GPI_INT_LEVEL_A,
+ .gpi_ev_a = ADP5585_GPI_EVENT_EN_A,
+};
+
+static const struct adp5585_gpio_chip adp5589_gpio_chip_info = {
+ .bank = adp5589_gpio_bank,
+ .bit = adp5589_gpio_bit,
+ .debounce_dis_a = ADP5589_DEBOUNCE_DIS_A,
+ .rpull_cfg_a = ADP5589_RPULL_CONFIG_A,
+ .gpo_data_a = ADP5589_GPO_DATA_OUT_A,
+ .gpo_out_a = ADP5589_GPO_OUT_MODE_A,
+ .gpio_dir_a = ADP5589_GPIO_DIRECTION_A,
+ .gpi_stat_a = ADP5589_GPI_STATUS_A,
+ .gpi_ev_min = ADP5589_GPI_EVENT_START,
+ .gpi_ev_max = ADP5589_GPI_EVENT_END,
+ .gpi_int_lvl_a = ADP5589_GPI_INT_LEVEL_A,
+ .gpi_ev_a = ADP5589_GPI_EVENT_EN_A,
+};
+
static const struct platform_device_id adp5585_gpio_id_table[] = {
- { "adp5585-gpio" },
+ { "adp5585-gpio", (kernel_ulong_t)&adp5585_gpio_chip_info },
+ { "adp5589-gpio", (kernel_ulong_t)&adp5589_gpio_chip_info },
{ /* Sentinel */ }
};
MODULE_DEVICE_TABLE(platform, adp5585_gpio_id_table);
diff --git a/drivers/gpio/gpio-aggregator.c b/drivers/gpio/gpio-aggregator.c
index 38e0fff9afe7..37600faf4a4b 100644
--- a/drivers/gpio/gpio-aggregator.c
+++ b/drivers/gpio/gpio-aggregator.c
@@ -9,10 +9,14 @@
#include <linux/bitmap.h>
#include <linux/bitops.h>
+#include <linux/configfs.h>
#include <linux/ctype.h>
#include <linux/delay.h>
+#include <linux/export.h>
#include <linux/idr.h>
#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/lockdep.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/mutex.h>
@@ -25,25 +29,91 @@
#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/forwarder.h>
#include <linux/gpio/machine.h>
+#include "dev-sync-probe.h"
+
#define AGGREGATOR_MAX_GPIOS 512
+#define AGGREGATOR_LEGACY_PREFIX "_sysfs"
/*
* GPIO Aggregator sysfs interface
*/
struct gpio_aggregator {
+ struct dev_sync_probe_data probe_data;
+ struct config_group group;
struct gpiod_lookup_table *lookups;
- struct platform_device *pdev;
+ struct mutex lock;
+ int id;
+
+ /* List of gpio_aggregator_line. Always added in order */
+ struct list_head list_head;
+
+ /* used by legacy sysfs interface only */
+ bool init_via_sysfs;
char args[];
};
+struct gpio_aggregator_line {
+ struct config_group group;
+ struct gpio_aggregator *parent;
+ struct list_head entry;
+
+ /* Line index within the aggregator device */
+ unsigned int idx;
+
+ /* Custom name for the virtual line */
+ const char *name;
+ /* GPIO chip label or line name */
+ const char *key;
+ /* Can be negative to indicate lookup by line name */
+ int offset;
+
+ enum gpio_lookup_flags flags;
+};
+
+struct gpio_aggregator_pdev_meta {
+ bool init_via_sysfs;
+};
+
static DEFINE_MUTEX(gpio_aggregator_lock); /* protects idr */
static DEFINE_IDR(gpio_aggregator_idr);
-static int aggr_add_gpio(struct gpio_aggregator *aggr, const char *key,
- int hwnum, unsigned int *n)
+static int gpio_aggregator_alloc(struct gpio_aggregator **aggr, size_t arg_size)
+{
+ int ret;
+
+ struct gpio_aggregator *new __free(kfree) = kzalloc(
+ sizeof(*new) + arg_size, GFP_KERNEL);
+ if (!new)
+ return -ENOMEM;
+
+ scoped_guard(mutex, &gpio_aggregator_lock)
+ ret = idr_alloc(&gpio_aggregator_idr, new, 0, 0, GFP_KERNEL);
+
+ if (ret < 0)
+ return ret;
+
+ new->id = ret;
+ INIT_LIST_HEAD(&new->list_head);
+ mutex_init(&new->lock);
+ *aggr = no_free_ptr(new);
+ return 0;
+}
+
+static void gpio_aggregator_free(struct gpio_aggregator *aggr)
+{
+ scoped_guard(mutex, &gpio_aggregator_lock)
+ idr_remove(&gpio_aggregator_idr, aggr->id);
+
+ mutex_destroy(&aggr->lock);
+ kfree(aggr);
+}
+
+static int gpio_aggregator_add_gpio(struct gpio_aggregator *aggr,
+ const char *key, int hwnum, unsigned int *n)
{
struct gpiod_lookup_table *lookups;
@@ -61,180 +131,101 @@ static int aggr_add_gpio(struct gpio_aggregator *aggr, const char *key,
return 0;
}
-static int aggr_parse(struct gpio_aggregator *aggr)
+static bool gpio_aggregator_is_active(struct gpio_aggregator *aggr)
{
- char *args = skip_spaces(aggr->args);
- char *name, *offsets, *p;
- unsigned long *bitmap;
- unsigned int i, n = 0;
- int error = 0;
-
- bitmap = bitmap_alloc(AGGREGATOR_MAX_GPIOS, GFP_KERNEL);
- if (!bitmap)
- return -ENOMEM;
-
- args = next_arg(args, &name, &p);
- while (*args) {
- args = next_arg(args, &offsets, &p);
+ lockdep_assert_held(&aggr->lock);
- p = get_options(offsets, 0, &error);
- if (error == 0 || *p) {
- /* Named GPIO line */
- error = aggr_add_gpio(aggr, name, U16_MAX, &n);
- if (error)
- goto free_bitmap;
-
- name = offsets;
- continue;
- }
-
- /* GPIO chip + offset(s) */
- error = bitmap_parselist(offsets, bitmap, AGGREGATOR_MAX_GPIOS);
- if (error) {
- pr_err("Cannot parse %s: %d\n", offsets, error);
- goto free_bitmap;
- }
-
- for_each_set_bit(i, bitmap, AGGREGATOR_MAX_GPIOS) {
- error = aggr_add_gpio(aggr, name, i, &n);
- if (error)
- goto free_bitmap;
- }
-
- args = next_arg(args, &name, &p);
- }
-
- if (!n) {
- pr_err("No GPIOs specified\n");
- error = -EINVAL;
- }
-
-free_bitmap:
- bitmap_free(bitmap);
- return error;
+ return aggr->probe_data.pdev && platform_get_drvdata(aggr->probe_data.pdev);
}
-static ssize_t new_device_store(struct device_driver *driver, const char *buf,
- size_t count)
+/* Only aggregators created via legacy sysfs can be "activating". */
+static bool gpio_aggregator_is_activating(struct gpio_aggregator *aggr)
{
- struct gpio_aggregator *aggr;
- struct platform_device *pdev;
- int res, id;
+ lockdep_assert_held(&aggr->lock);
- /* kernfs guarantees string termination, so count + 1 is safe */
- aggr = kzalloc(sizeof(*aggr) + count + 1, GFP_KERNEL);
- if (!aggr)
- return -ENOMEM;
-
- memcpy(aggr->args, buf, count + 1);
-
- aggr->lookups = kzalloc(struct_size(aggr->lookups, table, 1),
- GFP_KERNEL);
- if (!aggr->lookups) {
- res = -ENOMEM;
- goto free_ga;
- }
-
- mutex_lock(&gpio_aggregator_lock);
- id = idr_alloc(&gpio_aggregator_idr, aggr, 0, 0, GFP_KERNEL);
- mutex_unlock(&gpio_aggregator_lock);
+ return aggr->probe_data.pdev && !platform_get_drvdata(aggr->probe_data.pdev);
+}
- if (id < 0) {
- res = id;
- goto free_table;
- }
+static size_t gpio_aggregator_count_lines(struct gpio_aggregator *aggr)
+{
+ lockdep_assert_held(&aggr->lock);
- aggr->lookups->dev_id = kasprintf(GFP_KERNEL, "%s.%d", DRV_NAME, id);
- if (!aggr->lookups->dev_id) {
- res = -ENOMEM;
- goto remove_idr;
- }
+ return list_count_nodes(&aggr->list_head);
+}
- res = aggr_parse(aggr);
- if (res)
- goto free_dev_id;
+static struct gpio_aggregator_line *
+gpio_aggregator_line_alloc(struct gpio_aggregator *parent, unsigned int idx,
+ char *key, int offset)
+{
+ struct gpio_aggregator_line *line;
- gpiod_add_lookup_table(aggr->lookups);
+ line = kzalloc(sizeof(*line), GFP_KERNEL);
+ if (!line)
+ return ERR_PTR(-ENOMEM);
- pdev = platform_device_register_simple(DRV_NAME, id, NULL, 0);
- if (IS_ERR(pdev)) {
- res = PTR_ERR(pdev);
- goto remove_table;
+ if (key) {
+ line->key = kstrdup(key, GFP_KERNEL);
+ if (!line->key) {
+ kfree(line);
+ return ERR_PTR(-ENOMEM);
+ }
}
- aggr->pdev = pdev;
- return count;
-
-remove_table:
- gpiod_remove_lookup_table(aggr->lookups);
-free_dev_id:
- kfree(aggr->lookups->dev_id);
-remove_idr:
- mutex_lock(&gpio_aggregator_lock);
- idr_remove(&gpio_aggregator_idr, id);
- mutex_unlock(&gpio_aggregator_lock);
-free_table:
- kfree(aggr->lookups);
-free_ga:
- kfree(aggr);
- return res;
-}
+ line->flags = GPIO_LOOKUP_FLAGS_DEFAULT;
+ line->parent = parent;
+ line->idx = idx;
+ line->offset = offset;
+ INIT_LIST_HEAD(&line->entry);
-static DRIVER_ATTR_WO(new_device);
-
-static void gpio_aggregator_free(struct gpio_aggregator *aggr)
-{
- platform_device_unregister(aggr->pdev);
- gpiod_remove_lookup_table(aggr->lookups);
- kfree(aggr->lookups->dev_id);
- kfree(aggr->lookups);
- kfree(aggr);
+ return line;
}
-static ssize_t delete_device_store(struct device_driver *driver,
- const char *buf, size_t count)
+static void gpio_aggregator_line_add(struct gpio_aggregator *aggr,
+ struct gpio_aggregator_line *line)
{
- struct gpio_aggregator *aggr;
- unsigned int id;
- int error;
+ struct gpio_aggregator_line *tmp;
- if (!str_has_prefix(buf, DRV_NAME "."))
- return -EINVAL;
-
- error = kstrtouint(buf + strlen(DRV_NAME "."), 10, &id);
- if (error)
- return error;
-
- mutex_lock(&gpio_aggregator_lock);
- aggr = idr_remove(&gpio_aggregator_idr, id);
- mutex_unlock(&gpio_aggregator_lock);
- if (!aggr)
- return -ENOENT;
+ lockdep_assert_held(&aggr->lock);
- gpio_aggregator_free(aggr);
- return count;
+ list_for_each_entry(tmp, &aggr->list_head, entry) {
+ if (tmp->idx > line->idx) {
+ list_add_tail(&line->entry, &tmp->entry);
+ return;
+ }
+ }
+ list_add_tail(&line->entry, &aggr->list_head);
}
-static DRIVER_ATTR_WO(delete_device);
-static struct attribute *gpio_aggregator_attrs[] = {
- &driver_attr_new_device.attr,
- &driver_attr_delete_device.attr,
- NULL
-};
-ATTRIBUTE_GROUPS(gpio_aggregator);
-
-static int __exit gpio_aggregator_idr_remove(int id, void *p, void *data)
+static void gpio_aggregator_line_del(struct gpio_aggregator *aggr,
+ struct gpio_aggregator_line *line)
{
- gpio_aggregator_free(p);
- return 0;
+ lockdep_assert_held(&aggr->lock);
+
+ list_del(&line->entry);
}
-static void __exit gpio_aggregator_remove_all(void)
+static void gpio_aggregator_free_lines(struct gpio_aggregator *aggr)
{
- mutex_lock(&gpio_aggregator_lock);
- idr_for_each(&gpio_aggregator_idr, gpio_aggregator_idr_remove, NULL);
- idr_destroy(&gpio_aggregator_idr);
- mutex_unlock(&gpio_aggregator_lock);
+ struct gpio_aggregator_line *line, *tmp;
+
+ list_for_each_entry_safe(line, tmp, &aggr->list_head, entry) {
+ configfs_unregister_group(&line->group);
+ /*
+ * Normally, we acquire aggr->lock within the configfs
+ * callback. However, in the legacy sysfs interface case,
+ * calling configfs_(un)register_group while holding
+ * aggr->lock could cause a deadlock. Fortunately, this is
+ * unnecessary because the new_device/delete_device path
+ * and the module unload path are mutually exclusive,
+ * thanks to an explicit try_module_get. That's why this
+ * minimal scoped_guard suffices.
+ */
+ scoped_guard(mutex, &aggr->lock)
+ gpio_aggregator_line_del(aggr, line);
+ kfree(line->key);
+ kfree(line->name);
+ kfree(line);
+ }
}
@@ -255,18 +246,34 @@ struct gpiochip_fwd {
spinlock_t slock; /* protects tmp[] if !can_sleep */
};
struct gpiochip_fwd_timing *delay_timings;
+ void *data;
+ unsigned long *valid_mask;
unsigned long tmp[]; /* values and descs for multiple ops */
};
-#define fwd_tmp_values(fwd) &(fwd)->tmp[0]
-#define fwd_tmp_descs(fwd) (void *)&(fwd)->tmp[BITS_TO_LONGS((fwd)->chip.ngpio)]
+#define fwd_tmp_values(fwd) (&(fwd)->tmp[0])
+#define fwd_tmp_descs(fwd) ((void *)&(fwd)->tmp[BITS_TO_LONGS((fwd)->chip.ngpio)])
#define fwd_tmp_size(ngpios) (BITS_TO_LONGS((ngpios)) + (ngpios))
+static int gpio_fwd_request(struct gpio_chip *chip, unsigned int offset)
+{
+ struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
+
+ return test_bit(offset, fwd->valid_mask) ? 0 : -ENODEV;
+}
+
static int gpio_fwd_get_direction(struct gpio_chip *chip, unsigned int offset)
{
struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
+ /*
+ * get_direction() is called during gpiochip registration, return
+ * -ENODEV if there is no GPIO desc for the line.
+ */
+ if (!test_bit(offset, fwd->valid_mask))
+ return -ENODEV;
+
return gpiod_get_direction(fwd->descs[offset]);
}
@@ -360,25 +367,30 @@ static void gpio_fwd_delay(struct gpio_chip *chip, unsigned int offset, int valu
udelay(delay_us);
}
-static void gpio_fwd_set(struct gpio_chip *chip, unsigned int offset, int value)
+static int gpio_fwd_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
+ int ret;
if (chip->can_sleep)
- gpiod_set_value_cansleep(fwd->descs[offset], value);
+ ret = gpiod_set_value_cansleep(fwd->descs[offset], value);
else
- gpiod_set_value(fwd->descs[offset], value);
+ ret = gpiod_set_value(fwd->descs[offset], value);
+ if (ret)
+ return ret;
if (fwd->delay_timings)
gpio_fwd_delay(chip, offset, value);
+
+ return ret;
}
-static void gpio_fwd_set_multiple(struct gpiochip_fwd *fwd, unsigned long *mask,
- unsigned long *bits)
+static int gpio_fwd_set_multiple(struct gpiochip_fwd *fwd, unsigned long *mask,
+ unsigned long *bits)
{
struct gpio_desc **descs = fwd_tmp_descs(fwd);
unsigned long *values = fwd_tmp_values(fwd);
- unsigned int i, j = 0;
+ unsigned int i, j = 0, ret;
for_each_set_bit(i, mask, fwd->chip.ngpio) {
__assign_bit(j, values, test_bit(i, bits));
@@ -386,26 +398,31 @@ static void gpio_fwd_set_multiple(struct gpiochip_fwd *fwd, unsigned long *mask,
}
if (fwd->chip.can_sleep)
- gpiod_set_array_value_cansleep(j, descs, NULL, values);
+ ret = gpiod_set_array_value_cansleep(j, descs, NULL, values);
else
- gpiod_set_array_value(j, descs, NULL, values);
+ ret = gpiod_set_array_value(j, descs, NULL, values);
+
+ return ret;
}
-static void gpio_fwd_set_multiple_locked(struct gpio_chip *chip,
- unsigned long *mask, unsigned long *bits)
+static int gpio_fwd_set_multiple_locked(struct gpio_chip *chip,
+ unsigned long *mask, unsigned long *bits)
{
struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
unsigned long flags;
+ int ret;
if (chip->can_sleep) {
mutex_lock(&fwd->mlock);
- gpio_fwd_set_multiple(fwd, mask, bits);
+ ret = gpio_fwd_set_multiple(fwd, mask, bits);
mutex_unlock(&fwd->mlock);
} else {
spin_lock_irqsave(&fwd->slock, flags);
- gpio_fwd_set_multiple(fwd, mask, bits);
+ ret = gpio_fwd_set_multiple(fwd, mask, bits);
spin_unlock_irqrestore(&fwd->slock, flags);
}
+
+ return ret;
}
static int gpio_fwd_set_config(struct gpio_chip *chip, unsigned int offset,
@@ -454,10 +471,11 @@ static int gpiochip_fwd_delay_of_xlate(struct gpio_chip *chip,
return line;
}
-static int gpiochip_fwd_setup_delay_line(struct device *dev, struct gpio_chip *chip,
- struct gpiochip_fwd *fwd)
+static int gpiochip_fwd_setup_delay_line(struct gpiochip_fwd *fwd)
{
- fwd->delay_timings = devm_kcalloc(dev, chip->ngpio,
+ struct gpio_chip *chip = &fwd->chip;
+
+ fwd->delay_timings = devm_kcalloc(chip->parent, chip->ngpio,
sizeof(*fwd->delay_timings),
GFP_KERNEL);
if (!fwd->delay_timings)
@@ -469,67 +487,235 @@ static int gpiochip_fwd_setup_delay_line(struct device *dev, struct gpio_chip *c
return 0;
}
#else
-static int gpiochip_fwd_setup_delay_line(struct device *dev, struct gpio_chip *chip,
- struct gpiochip_fwd *fwd)
+static int gpiochip_fwd_setup_delay_line(struct gpiochip_fwd *fwd)
{
return 0;
}
#endif /* !CONFIG_OF_GPIO */
/**
- * gpiochip_fwd_create() - Create a new GPIO forwarder
- * @dev: Parent device pointer
- * @ngpios: Number of GPIOs in the forwarder.
- * @descs: Array containing the GPIO descriptors to forward to.
- * This array must contain @ngpios entries, and must not be deallocated
- * before the forwarder has been destroyed again.
- * @features: Bitwise ORed features as defined with FWD_FEATURE_*.
+ * gpiochip_fwd_get_gpiochip - Get the GPIO chip for the GPIO forwarder
+ * @fwd: GPIO forwarder
*
- * This function creates a new gpiochip, which forwards all GPIO operations to
- * the passed GPIO descriptors.
+ * Returns: The GPIO chip for the GPIO forwarder
+ */
+struct gpio_chip *gpiochip_fwd_get_gpiochip(struct gpiochip_fwd *fwd)
+{
+ return &fwd->chip;
+}
+EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_get_gpiochip, "GPIO_FORWARDER");
+
+/**
+ * gpiochip_fwd_get_data - Get driver-private data for the GPIO forwarder
+ * @fwd: GPIO forwarder
*
- * Return: An opaque object pointer, or an ERR_PTR()-encoded negative error
- * code on failure.
+ * Returns: The driver-private data for the GPIO forwarder
*/
-static struct gpiochip_fwd *gpiochip_fwd_create(struct device *dev,
- unsigned int ngpios,
- struct gpio_desc *descs[],
- unsigned long features)
+void *gpiochip_fwd_get_data(struct gpiochip_fwd *fwd)
+{
+ return fwd->data;
+}
+EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_get_data, "GPIO_FORWARDER");
+
+/**
+ * gpiochip_fwd_gpio_request - Request a line of the GPIO forwarder
+ * @fwd: GPIO forwarder
+ * @offset: the offset of the line to request
+ *
+ * Returns: 0 on success, or negative errno on failure.
+ */
+int gpiochip_fwd_gpio_request(struct gpiochip_fwd *fwd, unsigned int offset)
+{
+ struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd);
+
+ return gpio_fwd_request(gc, offset);
+}
+EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_request, "GPIO_FORWARDER");
+
+/**
+ * gpiochip_fwd_gpio_get_direction - Return the current direction of a GPIO forwarder line
+ * @fwd: GPIO forwarder
+ * @offset: the offset of the line
+ *
+ * Returns: 0 for output, 1 for input, or an error code in case of error.
+ */
+int gpiochip_fwd_gpio_get_direction(struct gpiochip_fwd *fwd, unsigned int offset)
+{
+ struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd);
+
+ return gpio_fwd_get_direction(gc, offset);
+}
+EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_get_direction, "GPIO_FORWARDER");
+
+/**
+ * gpiochip_fwd_gpio_direction_output - Set a GPIO forwarder line direction to
+ * output
+ * @fwd: GPIO forwarder
+ * @offset: the offset of the line
+ * @value: value to set
+ *
+ * Returns: 0 on success, or negative errno on failure.
+ */
+int gpiochip_fwd_gpio_direction_output(struct gpiochip_fwd *fwd, unsigned int offset,
+ int value)
+{
+ struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd);
+
+ return gpio_fwd_direction_output(gc, offset, value);
+}
+EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_direction_output, "GPIO_FORWARDER");
+
+/**
+ * gpiochip_fwd_gpio_direction_input - Set a GPIO forwarder line direction to input
+ * @fwd: GPIO forwarder
+ * @offset: the offset of the line
+ *
+ * Returns: 0 on success, or negative errno on failure.
+ */
+int gpiochip_fwd_gpio_direction_input(struct gpiochip_fwd *fwd, unsigned int offset)
+{
+ struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd);
+
+ return gpio_fwd_direction_input(gc, offset);
+}
+EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_direction_input, "GPIO_FORWARDER");
+
+/**
+ * gpiochip_fwd_gpio_get - Return a GPIO forwarder line's value
+ * @fwd: GPIO forwarder
+ * @offset: the offset of the line
+ *
+ * Returns: The GPIO's logical value, i.e. taking the ACTIVE_LOW status into
+ * account, or negative errno on failure.
+ */
+int gpiochip_fwd_gpio_get(struct gpiochip_fwd *fwd, unsigned int offset)
+{
+ struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd);
+
+ return gpio_fwd_get(gc, offset);
+}
+EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_get, "GPIO_FORWARDER");
+
+/**
+ * gpiochip_fwd_gpio_get_multiple - Get values for multiple GPIO forwarder lines
+ * @fwd: GPIO forwarder
+ * @mask: bit mask array; one bit per line; BITS_PER_LONG bits per word defines
+ * which lines are to be read
+ * @bits: bit value array; one bit per line; BITS_PER_LONG bits per word will
+ * contains the read values for the lines specified by mask
+ *
+ * Returns: 0 on success, or negative errno on failure.
+ */
+int gpiochip_fwd_gpio_get_multiple(struct gpiochip_fwd *fwd, unsigned long *mask,
+ unsigned long *bits)
+{
+ struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd);
+
+ return gpio_fwd_get_multiple_locked(gc, mask, bits);
+}
+EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_get_multiple, "GPIO_FORWARDER");
+
+/**
+ * gpiochip_fwd_gpio_set - Assign value to a GPIO forwarder line.
+ * @fwd: GPIO forwarder
+ * @offset: the offset of the line
+ * @value: value to set
+ *
+ * Returns: 0 on success, or negative errno on failure.
+ */
+int gpiochip_fwd_gpio_set(struct gpiochip_fwd *fwd, unsigned int offset, int value)
+{
+ struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd);
+
+ return gpio_fwd_set(gc, offset, value);
+}
+EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_set, "GPIO_FORWARDER");
+
+/**
+ * gpiochip_fwd_gpio_set_multiple - Assign values to multiple GPIO forwarder lines
+ * @fwd: GPIO forwarder
+ * @mask: bit mask array; one bit per output; BITS_PER_LONG bits per word
+ * defines which outputs are to be changed
+ * @bits: bit value array; one bit per output; BITS_PER_LONG bits per word
+ * defines the values the outputs specified by mask are to be set to
+ *
+ * Returns: 0 on success, or negative errno on failure.
+ */
+int gpiochip_fwd_gpio_set_multiple(struct gpiochip_fwd *fwd, unsigned long *mask,
+ unsigned long *bits)
+{
+ struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd);
+
+ return gpio_fwd_set_multiple_locked(gc, mask, bits);
+}
+EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_set_multiple, "GPIO_FORWARDER");
+
+/**
+ * gpiochip_fwd_gpio_set_config - Set @config for a GPIO forwarder line
+ * @fwd: GPIO forwarder
+ * @offset: the offset of the line
+ * @config: Same packed config format as generic pinconf
+ *
+ * Returns: 0 on success, %-ENOTSUPP if the controller doesn't support setting
+ * the configuration.
+ */
+int gpiochip_fwd_gpio_set_config(struct gpiochip_fwd *fwd, unsigned int offset,
+ unsigned long config)
+{
+ struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd);
+
+ return gpio_fwd_set_config(gc, offset, config);
+}
+EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_set_config, "GPIO_FORWARDER");
+
+/**
+ * gpiochip_fwd_gpio_to_irq - Return the IRQ corresponding to a GPIO forwarder line
+ * @fwd: GPIO forwarder
+ * @offset: the offset of the line
+ *
+ * Returns: The Linux IRQ corresponding to the passed line, or an error code in
+ * case of error.
+ */
+int gpiochip_fwd_gpio_to_irq(struct gpiochip_fwd *fwd, unsigned int offset)
+{
+ struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd);
+
+ return gpio_fwd_to_irq(gc, offset);
+}
+EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_to_irq, "GPIO_FORWARDER");
+
+/**
+ * devm_gpiochip_fwd_alloc - Allocate and initialize a new GPIO forwarder
+ * @dev: Parent device pointer
+ * @ngpios: Number of GPIOs in the forwarder
+ *
+ * Returns: An opaque object pointer, or an ERR_PTR()-encoded negative error
+ * code on failure.
+ */
+struct gpiochip_fwd *devm_gpiochip_fwd_alloc(struct device *dev,
+ unsigned int ngpios)
{
- const char *label = dev_name(dev);
struct gpiochip_fwd *fwd;
struct gpio_chip *chip;
- unsigned int i;
- int error;
- fwd = devm_kzalloc(dev, struct_size(fwd, tmp, fwd_tmp_size(ngpios)),
- GFP_KERNEL);
+ fwd = devm_kzalloc(dev, struct_size(fwd, tmp, fwd_tmp_size(ngpios)), GFP_KERNEL);
if (!fwd)
return ERR_PTR(-ENOMEM);
- chip = &fwd->chip;
-
- /*
- * If any of the GPIO lines are sleeping, then the entire forwarder
- * will be sleeping.
- * If any of the chips support .set_config(), then the forwarder will
- * support setting configs.
- */
- for (i = 0; i < ngpios; i++) {
- struct gpio_chip *parent = gpiod_to_chip(descs[i]);
+ fwd->descs = devm_kcalloc(dev, ngpios, sizeof(*fwd->descs), GFP_KERNEL);
+ if (!fwd->descs)
+ return ERR_PTR(-ENOMEM);
- dev_dbg(dev, "%u => gpio %d irq %d\n", i,
- desc_to_gpio(descs[i]), gpiod_to_irq(descs[i]));
+ fwd->valid_mask = devm_bitmap_zalloc(dev, ngpios, GFP_KERNEL);
+ if (!fwd->valid_mask)
+ return ERR_PTR(-ENOMEM);
- if (gpiod_cansleep(descs[i]))
- chip->can_sleep = true;
- if (parent && parent->set_config)
- chip->set_config = gpio_fwd_set_config;
- }
+ chip = &fwd->chip;
- chip->label = label;
+ chip->label = dev_name(dev);
chip->parent = dev;
chip->owner = THIS_MODULE;
+ chip->request = gpio_fwd_request;
chip->get_direction = gpio_fwd_get_direction;
chip->direction_input = gpio_fwd_direction_input;
chip->direction_output = gpio_fwd_direction_output;
@@ -540,26 +726,856 @@ static struct gpiochip_fwd *gpiochip_fwd_create(struct device *dev,
chip->to_irq = gpio_fwd_to_irq;
chip->base = -1;
chip->ngpio = ngpios;
- fwd->descs = descs;
+
+ return fwd;
+}
+EXPORT_SYMBOL_NS_GPL(devm_gpiochip_fwd_alloc, "GPIO_FORWARDER");
+
+/**
+ * gpiochip_fwd_desc_add - Add a GPIO desc in the forwarder
+ * @fwd: GPIO forwarder
+ * @desc: GPIO descriptor to register
+ * @offset: offset for the GPIO in the forwarder
+ *
+ * Returns: 0 on success, or negative errno on failure.
+ */
+int gpiochip_fwd_desc_add(struct gpiochip_fwd *fwd, struct gpio_desc *desc,
+ unsigned int offset)
+{
+ struct gpio_chip *chip = &fwd->chip;
+
+ if (offset >= chip->ngpio)
+ return -EINVAL;
+
+ if (test_and_set_bit(offset, fwd->valid_mask))
+ return -EEXIST;
+
+ /*
+ * If any of the GPIO lines are sleeping, then the entire forwarder
+ * will be sleeping.
+ */
+ if (gpiod_cansleep(desc))
+ chip->can_sleep = true;
+
+ fwd->descs[offset] = desc;
+
+ dev_dbg(chip->parent, "%u => gpio %d irq %d\n", offset,
+ desc_to_gpio(desc), gpiod_to_irq(desc));
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_desc_add, "GPIO_FORWARDER");
+
+/**
+ * gpiochip_fwd_desc_free - Remove a GPIO desc from the forwarder
+ * @fwd: GPIO forwarder
+ * @offset: offset of GPIO desc to remove
+ */
+void gpiochip_fwd_desc_free(struct gpiochip_fwd *fwd, unsigned int offset)
+{
+ if (test_and_clear_bit(offset, fwd->valid_mask))
+ gpiod_put(fwd->descs[offset]);
+}
+EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_desc_free, "GPIO_FORWARDER");
+
+/**
+ * gpiochip_fwd_register - Register a GPIO forwarder
+ * @fwd: GPIO forwarder
+ * @data: driver-private data associated with this forwarder
+ *
+ * Returns: 0 on success, or negative errno on failure.
+ */
+int gpiochip_fwd_register(struct gpiochip_fwd *fwd, void *data)
+{
+ struct gpio_chip *chip = &fwd->chip;
+
+ /*
+ * Some gpio_desc were not registered. They will be registered at runtime
+ * but we have to suppose they can sleep.
+ */
+ if (!bitmap_full(fwd->valid_mask, chip->ngpio))
+ chip->can_sleep = true;
if (chip->can_sleep)
mutex_init(&fwd->mlock);
else
spin_lock_init(&fwd->slock);
+ fwd->data = data;
+
+ return devm_gpiochip_add_data(chip->parent, chip, fwd);
+}
+EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_register, "GPIO_FORWARDER");
+
+/**
+ * gpiochip_fwd_create() - Create a new GPIO forwarder
+ * @dev: Parent device pointer
+ * @ngpios: Number of GPIOs in the forwarder.
+ * @descs: Array containing the GPIO descriptors to forward to.
+ * This array must contain @ngpios entries, and can be deallocated
+ * as the forwarder has its own array.
+ * @features: Bitwise ORed features as defined with FWD_FEATURE_*.
+ *
+ * This function creates a new gpiochip, which forwards all GPIO operations to
+ * the passed GPIO descriptors.
+ *
+ * Return: An opaque object pointer, or an ERR_PTR()-encoded negative error
+ * code on failure.
+ */
+static struct gpiochip_fwd *gpiochip_fwd_create(struct device *dev,
+ unsigned int ngpios,
+ struct gpio_desc *descs[],
+ unsigned long features)
+{
+ struct gpiochip_fwd *fwd;
+ unsigned int i;
+ int error;
+
+ fwd = devm_gpiochip_fwd_alloc(dev, ngpios);
+ if (IS_ERR(fwd))
+ return fwd;
+
+ for (i = 0; i < ngpios; i++) {
+ error = gpiochip_fwd_desc_add(fwd, descs[i], i);
+ if (error)
+ return ERR_PTR(error);
+ }
+
if (features & FWD_FEATURE_DELAY) {
- error = gpiochip_fwd_setup_delay_line(dev, chip, fwd);
+ error = gpiochip_fwd_setup_delay_line(fwd);
if (error)
return ERR_PTR(error);
}
- error = devm_gpiochip_add_data(dev, chip, fwd);
+ error = gpiochip_fwd_register(fwd, NULL);
if (error)
return ERR_PTR(error);
return fwd;
}
+/*
+ * Configfs interface
+ */
+
+static struct gpio_aggregator *
+to_gpio_aggregator(struct config_item *item)
+{
+ struct config_group *group = to_config_group(item);
+
+ return container_of(group, struct gpio_aggregator, group);
+}
+
+static struct gpio_aggregator_line *
+to_gpio_aggregator_line(struct config_item *item)
+{
+ struct config_group *group = to_config_group(item);
+
+ return container_of(group, struct gpio_aggregator_line, group);
+}
+
+static struct fwnode_handle *
+gpio_aggregator_make_device_sw_node(struct gpio_aggregator *aggr)
+{
+ struct property_entry properties[2];
+ struct gpio_aggregator_line *line;
+ size_t num_lines;
+ int n = 0;
+
+ memset(properties, 0, sizeof(properties));
+
+ num_lines = gpio_aggregator_count_lines(aggr);
+ if (num_lines == 0)
+ return NULL;
+
+ const char **line_names __free(kfree) = kcalloc(
+ num_lines, sizeof(*line_names), GFP_KERNEL);
+ if (!line_names)
+ return ERR_PTR(-ENOMEM);
+
+ /* The list is always sorted as new elements are inserted in order. */
+ list_for_each_entry(line, &aggr->list_head, entry)
+ line_names[n++] = line->name ?: "";
+
+ properties[0] = PROPERTY_ENTRY_STRING_ARRAY_LEN(
+ "gpio-line-names",
+ line_names, num_lines);
+
+ return fwnode_create_software_node(properties, NULL);
+}
+
+static int gpio_aggregator_activate(struct gpio_aggregator *aggr)
+{
+ struct platform_device_info pdevinfo;
+ struct gpio_aggregator_line *line;
+ struct fwnode_handle *swnode;
+ unsigned int n = 0;
+ int ret = 0;
+
+ if (gpio_aggregator_count_lines(aggr) == 0)
+ return -EINVAL;
+
+ aggr->lookups = kzalloc(struct_size(aggr->lookups, table, 1),
+ GFP_KERNEL);
+ if (!aggr->lookups)
+ return -ENOMEM;
+
+ swnode = gpio_aggregator_make_device_sw_node(aggr);
+ if (IS_ERR(swnode)) {
+ ret = PTR_ERR(swnode);
+ goto err_remove_lookups;
+ }
+
+ memset(&pdevinfo, 0, sizeof(pdevinfo));
+ pdevinfo.name = DRV_NAME;
+ pdevinfo.id = aggr->id;
+ pdevinfo.fwnode = swnode;
+
+ /* The list is always sorted as new elements are inserted in order. */
+ list_for_each_entry(line, &aggr->list_head, entry) {
+ /*
+ * - Either GPIO chip label or line name must be configured
+ * (i.e. line->key must be non-NULL)
+ * - Line directories must be named with sequential numeric
+ * suffixes starting from 0. (i.e. ./line0, ./line1, ...)
+ */
+ if (!line->key || line->idx != n) {
+ ret = -EINVAL;
+ goto err_remove_swnode;
+ }
+
+ if (line->offset < 0)
+ ret = gpio_aggregator_add_gpio(aggr, line->key,
+ U16_MAX, &n);
+ else
+ ret = gpio_aggregator_add_gpio(aggr, line->key,
+ line->offset, &n);
+ if (ret)
+ goto err_remove_swnode;
+ }
+
+ aggr->lookups->dev_id = kasprintf(GFP_KERNEL, "%s.%d", DRV_NAME, aggr->id);
+ if (!aggr->lookups->dev_id) {
+ ret = -ENOMEM;
+ goto err_remove_swnode;
+ }
+
+ gpiod_add_lookup_table(aggr->lookups);
+
+ ret = dev_sync_probe_register(&aggr->probe_data, &pdevinfo);
+ if (ret)
+ goto err_remove_lookup_table;
+
+ return 0;
+
+err_remove_lookup_table:
+ kfree(aggr->lookups->dev_id);
+ gpiod_remove_lookup_table(aggr->lookups);
+err_remove_swnode:
+ fwnode_remove_software_node(swnode);
+err_remove_lookups:
+ kfree(aggr->lookups);
+
+ return ret;
+}
+
+static void gpio_aggregator_deactivate(struct gpio_aggregator *aggr)
+{
+ dev_sync_probe_unregister(&aggr->probe_data);
+ gpiod_remove_lookup_table(aggr->lookups);
+ kfree(aggr->lookups->dev_id);
+ kfree(aggr->lookups);
+}
+
+static void gpio_aggregator_lockup_configfs(struct gpio_aggregator *aggr,
+ bool lock)
+{
+ struct configfs_subsystem *subsys = aggr->group.cg_subsys;
+ struct gpio_aggregator_line *line;
+
+ /*
+ * The device only needs to depend on leaf lines. This is
+ * sufficient to lock up all the configfs entries that the
+ * instantiated, alive device depends on.
+ */
+ list_for_each_entry(line, &aggr->list_head, entry) {
+ if (lock)
+ configfs_depend_item_unlocked(
+ subsys, &line->group.cg_item);
+ else
+ configfs_undepend_item_unlocked(
+ &line->group.cg_item);
+ }
+}
+
+static ssize_t
+gpio_aggregator_line_key_show(struct config_item *item, char *page)
+{
+ struct gpio_aggregator_line *line = to_gpio_aggregator_line(item);
+ struct gpio_aggregator *aggr = line->parent;
+
+ guard(mutex)(&aggr->lock);
+
+ return sysfs_emit(page, "%s\n", line->key ?: "");
+}
+
+static ssize_t
+gpio_aggregator_line_key_store(struct config_item *item, const char *page,
+ size_t count)
+{
+ struct gpio_aggregator_line *line = to_gpio_aggregator_line(item);
+ struct gpio_aggregator *aggr = line->parent;
+
+ char *key __free(kfree) = kstrndup(skip_spaces(page), count,
+ GFP_KERNEL);
+ if (!key)
+ return -ENOMEM;
+
+ strim(key);
+
+ guard(mutex)(&aggr->lock);
+
+ if (gpio_aggregator_is_activating(aggr) ||
+ gpio_aggregator_is_active(aggr))
+ return -EBUSY;
+
+ kfree(line->key);
+ line->key = no_free_ptr(key);
+
+ return count;
+}
+CONFIGFS_ATTR(gpio_aggregator_line_, key);
+
+static ssize_t
+gpio_aggregator_line_name_show(struct config_item *item, char *page)
+{
+ struct gpio_aggregator_line *line = to_gpio_aggregator_line(item);
+ struct gpio_aggregator *aggr = line->parent;
+
+ guard(mutex)(&aggr->lock);
+
+ return sysfs_emit(page, "%s\n", line->name ?: "");
+}
+
+static ssize_t
+gpio_aggregator_line_name_store(struct config_item *item, const char *page,
+ size_t count)
+{
+ struct gpio_aggregator_line *line = to_gpio_aggregator_line(item);
+ struct gpio_aggregator *aggr = line->parent;
+
+ char *name __free(kfree) = kstrndup(skip_spaces(page), count,
+ GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+
+ strim(name);
+
+ guard(mutex)(&aggr->lock);
+
+ if (gpio_aggregator_is_activating(aggr) ||
+ gpio_aggregator_is_active(aggr))
+ return -EBUSY;
+
+ kfree(line->name);
+ line->name = no_free_ptr(name);
+
+ return count;
+}
+CONFIGFS_ATTR(gpio_aggregator_line_, name);
+
+static ssize_t
+gpio_aggregator_line_offset_show(struct config_item *item, char *page)
+{
+ struct gpio_aggregator_line *line = to_gpio_aggregator_line(item);
+ struct gpio_aggregator *aggr = line->parent;
+
+ guard(mutex)(&aggr->lock);
+
+ return sysfs_emit(page, "%d\n", line->offset);
+}
+
+static ssize_t
+gpio_aggregator_line_offset_store(struct config_item *item, const char *page,
+ size_t count)
+{
+ struct gpio_aggregator_line *line = to_gpio_aggregator_line(item);
+ struct gpio_aggregator *aggr = line->parent;
+ int offset, ret;
+
+ ret = kstrtoint(page, 0, &offset);
+ if (ret)
+ return ret;
+
+ /*
+ * When offset == -1, 'key' represents a line name to lookup.
+ * When 0 <= offset < 65535, 'key' represents the label of the chip with
+ * the 'offset' value representing the line within that chip.
+ *
+ * GPIOLIB uses the U16_MAX value to indicate lookup by line name so
+ * the greatest offset we can accept is (U16_MAX - 1).
+ */
+ if (offset > (U16_MAX - 1) || offset < -1)
+ return -EINVAL;
+
+ guard(mutex)(&aggr->lock);
+
+ if (gpio_aggregator_is_activating(aggr) ||
+ gpio_aggregator_is_active(aggr))
+ return -EBUSY;
+
+ line->offset = offset;
+
+ return count;
+}
+CONFIGFS_ATTR(gpio_aggregator_line_, offset);
+
+static struct configfs_attribute *gpio_aggregator_line_attrs[] = {
+ &gpio_aggregator_line_attr_key,
+ &gpio_aggregator_line_attr_name,
+ &gpio_aggregator_line_attr_offset,
+ NULL
+};
+
+static ssize_t
+gpio_aggregator_device_dev_name_show(struct config_item *item, char *page)
+{
+ struct gpio_aggregator *aggr = to_gpio_aggregator(item);
+ struct platform_device *pdev;
+
+ guard(mutex)(&aggr->lock);
+
+ pdev = aggr->probe_data.pdev;
+ if (pdev)
+ return sysfs_emit(page, "%s\n", dev_name(&pdev->dev));
+
+ return sysfs_emit(page, "%s.%d\n", DRV_NAME, aggr->id);
+}
+CONFIGFS_ATTR_RO(gpio_aggregator_device_, dev_name);
+
+static ssize_t
+gpio_aggregator_device_live_show(struct config_item *item, char *page)
+{
+ struct gpio_aggregator *aggr = to_gpio_aggregator(item);
+
+ guard(mutex)(&aggr->lock);
+
+ return sysfs_emit(page, "%c\n",
+ gpio_aggregator_is_active(aggr) ? '1' : '0');
+}
+
+static ssize_t
+gpio_aggregator_device_live_store(struct config_item *item, const char *page,
+ size_t count)
+{
+ struct gpio_aggregator *aggr = to_gpio_aggregator(item);
+ int ret = 0;
+ bool live;
+
+ ret = kstrtobool(page, &live);
+ if (ret)
+ return ret;
+
+ if (!try_module_get(THIS_MODULE))
+ return -ENOENT;
+
+ if (live && !aggr->init_via_sysfs)
+ gpio_aggregator_lockup_configfs(aggr, true);
+
+ scoped_guard(mutex, &aggr->lock) {
+ if (gpio_aggregator_is_activating(aggr) ||
+ (live == gpio_aggregator_is_active(aggr)))
+ ret = -EPERM;
+ else if (live)
+ ret = gpio_aggregator_activate(aggr);
+ else
+ gpio_aggregator_deactivate(aggr);
+ }
+
+ /*
+ * Undepend is required only if device disablement (live == 0)
+ * succeeds or if device enablement (live == 1) fails.
+ */
+ if (live == !!ret && !aggr->init_via_sysfs)
+ gpio_aggregator_lockup_configfs(aggr, false);
+
+ module_put(THIS_MODULE);
+
+ return ret ?: count;
+}
+CONFIGFS_ATTR(gpio_aggregator_device_, live);
+
+static struct configfs_attribute *gpio_aggregator_device_attrs[] = {
+ &gpio_aggregator_device_attr_dev_name,
+ &gpio_aggregator_device_attr_live,
+ NULL
+};
+
+static void
+gpio_aggregator_line_release(struct config_item *item)
+{
+ struct gpio_aggregator_line *line = to_gpio_aggregator_line(item);
+ struct gpio_aggregator *aggr = line->parent;
+
+ guard(mutex)(&aggr->lock);
+
+ gpio_aggregator_line_del(aggr, line);
+ kfree(line->key);
+ kfree(line->name);
+ kfree(line);
+}
+
+static struct configfs_item_operations gpio_aggregator_line_item_ops = {
+ .release = gpio_aggregator_line_release,
+};
+
+static const struct config_item_type gpio_aggregator_line_type = {
+ .ct_item_ops = &gpio_aggregator_line_item_ops,
+ .ct_attrs = gpio_aggregator_line_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static void gpio_aggregator_device_release(struct config_item *item)
+{
+ struct gpio_aggregator *aggr = to_gpio_aggregator(item);
+
+ /*
+ * At this point, aggr is neither active nor activating,
+ * so calling gpio_aggregator_deactivate() is always unnecessary.
+ */
+ gpio_aggregator_free(aggr);
+}
+
+static struct configfs_item_operations gpio_aggregator_device_item_ops = {
+ .release = gpio_aggregator_device_release,
+};
+
+static struct config_group *
+gpio_aggregator_device_make_group(struct config_group *group, const char *name)
+{
+ struct gpio_aggregator *aggr = to_gpio_aggregator(&group->cg_item);
+ struct gpio_aggregator_line *line;
+ unsigned int idx;
+ int ret, nchar;
+
+ ret = sscanf(name, "line%u%n", &idx, &nchar);
+ if (ret != 1 || nchar != strlen(name))
+ return ERR_PTR(-EINVAL);
+
+ if (aggr->init_via_sysfs)
+ /*
+ * Aggregators created via legacy sysfs interface are exposed as
+ * default groups, which means rmdir(2) is prohibited for them.
+ * For simplicity, and to avoid confusion, we also prohibit
+ * mkdir(2).
+ */
+ return ERR_PTR(-EPERM);
+
+ guard(mutex)(&aggr->lock);
+
+ if (gpio_aggregator_is_active(aggr))
+ return ERR_PTR(-EBUSY);
+
+ list_for_each_entry(line, &aggr->list_head, entry)
+ if (line->idx == idx)
+ return ERR_PTR(-EINVAL);
+
+ line = gpio_aggregator_line_alloc(aggr, idx, NULL, -1);
+ if (IS_ERR(line))
+ return ERR_CAST(line);
+
+ config_group_init_type_name(&line->group, name, &gpio_aggregator_line_type);
+
+ gpio_aggregator_line_add(aggr, line);
+
+ return &line->group;
+}
+
+static struct configfs_group_operations gpio_aggregator_device_group_ops = {
+ .make_group = gpio_aggregator_device_make_group,
+};
+
+static const struct config_item_type gpio_aggregator_device_type = {
+ .ct_group_ops = &gpio_aggregator_device_group_ops,
+ .ct_item_ops = &gpio_aggregator_device_item_ops,
+ .ct_attrs = gpio_aggregator_device_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group *
+gpio_aggregator_make_group(struct config_group *group, const char *name)
+{
+ struct gpio_aggregator *aggr;
+ int ret;
+
+ /*
+ * "_sysfs" prefix is reserved for auto-generated config group
+ * for devices create via legacy sysfs interface.
+ */
+ if (strncmp(name, AGGREGATOR_LEGACY_PREFIX,
+ sizeof(AGGREGATOR_LEGACY_PREFIX) - 1) == 0)
+ return ERR_PTR(-EINVAL);
+
+ /* arg space is unneeded */
+ ret = gpio_aggregator_alloc(&aggr, 0);
+ if (ret)
+ return ERR_PTR(ret);
+
+ config_group_init_type_name(&aggr->group, name, &gpio_aggregator_device_type);
+ dev_sync_probe_init(&aggr->probe_data);
+
+ return &aggr->group;
+}
+
+static struct configfs_group_operations gpio_aggregator_group_ops = {
+ .make_group = gpio_aggregator_make_group,
+};
+
+static const struct config_item_type gpio_aggregator_type = {
+ .ct_group_ops = &gpio_aggregator_group_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct configfs_subsystem gpio_aggregator_subsys = {
+ .su_group = {
+ .cg_item = {
+ .ci_namebuf = DRV_NAME,
+ .ci_type = &gpio_aggregator_type,
+ },
+ },
+};
+
+/*
+ * Sysfs interface
+ */
+static int gpio_aggregator_parse(struct gpio_aggregator *aggr)
+{
+ char *args = skip_spaces(aggr->args);
+ struct gpio_aggregator_line *line;
+ char name[CONFIGFS_ITEM_NAME_LEN];
+ char *key, *offsets, *p;
+ unsigned int i, n = 0;
+ int error = 0;
+
+ unsigned long *bitmap __free(bitmap) =
+ bitmap_alloc(AGGREGATOR_MAX_GPIOS, GFP_KERNEL);
+ if (!bitmap)
+ return -ENOMEM;
+
+ args = next_arg(args, &key, &p);
+ while (*args) {
+ args = next_arg(args, &offsets, &p);
+
+ p = get_options(offsets, 0, &error);
+ if (error == 0 || *p) {
+ /* Named GPIO line */
+ scnprintf(name, sizeof(name), "line%u", n);
+ line = gpio_aggregator_line_alloc(aggr, n, key, -1);
+ if (IS_ERR(line)) {
+ error = PTR_ERR(line);
+ goto err;
+ }
+ config_group_init_type_name(&line->group, name,
+ &gpio_aggregator_line_type);
+ error = configfs_register_group(&aggr->group,
+ &line->group);
+ if (error)
+ goto err;
+ scoped_guard(mutex, &aggr->lock)
+ gpio_aggregator_line_add(aggr, line);
+
+ error = gpio_aggregator_add_gpio(aggr, key, U16_MAX, &n);
+ if (error)
+ goto err;
+
+ key = offsets;
+ continue;
+ }
+
+ /* GPIO chip + offset(s) */
+ error = bitmap_parselist(offsets, bitmap, AGGREGATOR_MAX_GPIOS);
+ if (error) {
+ pr_err("Cannot parse %s: %d\n", offsets, error);
+ goto err;
+ }
+
+ for_each_set_bit(i, bitmap, AGGREGATOR_MAX_GPIOS) {
+ scnprintf(name, sizeof(name), "line%u", n);
+ line = gpio_aggregator_line_alloc(aggr, n, key, i);
+ if (IS_ERR(line)) {
+ error = PTR_ERR(line);
+ goto err;
+ }
+ config_group_init_type_name(&line->group, name,
+ &gpio_aggregator_line_type);
+ error = configfs_register_group(&aggr->group,
+ &line->group);
+ if (error)
+ goto err;
+ scoped_guard(mutex, &aggr->lock)
+ gpio_aggregator_line_add(aggr, line);
+
+ error = gpio_aggregator_add_gpio(aggr, key, i, &n);
+ if (error)
+ goto err;
+ }
+
+ args = next_arg(args, &key, &p);
+ }
+
+ if (!n) {
+ pr_err("No GPIOs specified\n");
+ error = -EINVAL;
+ goto err;
+ }
+
+ return 0;
+
+err:
+ gpio_aggregator_free_lines(aggr);
+ return error;
+}
+
+static ssize_t gpio_aggregator_new_device_store(struct device_driver *driver,
+ const char *buf, size_t count)
+{
+ struct gpio_aggregator_pdev_meta meta = { .init_via_sysfs = true };
+ char name[CONFIGFS_ITEM_NAME_LEN];
+ struct gpio_aggregator *aggr;
+ struct platform_device *pdev;
+ int res;
+
+ if (!try_module_get(THIS_MODULE))
+ return -ENOENT;
+
+ /* kernfs guarantees string termination, so count + 1 is safe */
+ res = gpio_aggregator_alloc(&aggr, count + 1);
+ if (res)
+ goto put_module;
+
+ memcpy(aggr->args, buf, count + 1);
+
+ aggr->init_via_sysfs = true;
+ aggr->lookups = kzalloc(struct_size(aggr->lookups, table, 1),
+ GFP_KERNEL);
+ if (!aggr->lookups) {
+ res = -ENOMEM;
+ goto free_ga;
+ }
+
+ aggr->lookups->dev_id = kasprintf(GFP_KERNEL, "%s.%d", DRV_NAME, aggr->id);
+ if (!aggr->lookups->dev_id) {
+ res = -ENOMEM;
+ goto free_table;
+ }
+
+ scnprintf(name, sizeof(name), "%s.%d", AGGREGATOR_LEGACY_PREFIX, aggr->id);
+ config_group_init_type_name(&aggr->group, name, &gpio_aggregator_device_type);
+
+ /*
+ * Since the device created by sysfs might be toggled via configfs
+ * 'live' attribute later, this initialization is needed.
+ */
+ dev_sync_probe_init(&aggr->probe_data);
+
+ /* Expose to configfs */
+ res = configfs_register_group(&gpio_aggregator_subsys.su_group,
+ &aggr->group);
+ if (res)
+ goto free_dev_id;
+
+ res = gpio_aggregator_parse(aggr);
+ if (res)
+ goto unregister_group;
+
+ gpiod_add_lookup_table(aggr->lookups);
+
+ pdev = platform_device_register_data(NULL, DRV_NAME, aggr->id, &meta, sizeof(meta));
+ if (IS_ERR(pdev)) {
+ res = PTR_ERR(pdev);
+ goto remove_table;
+ }
+
+ aggr->probe_data.pdev = pdev;
+ module_put(THIS_MODULE);
+ return count;
+
+remove_table:
+ gpiod_remove_lookup_table(aggr->lookups);
+unregister_group:
+ configfs_unregister_group(&aggr->group);
+free_dev_id:
+ kfree(aggr->lookups->dev_id);
+free_table:
+ kfree(aggr->lookups);
+free_ga:
+ gpio_aggregator_free(aggr);
+put_module:
+ module_put(THIS_MODULE);
+ return res;
+}
+
+static struct driver_attribute driver_attr_gpio_aggregator_new_device =
+ __ATTR(new_device, 0200, NULL, gpio_aggregator_new_device_store);
+
+static void gpio_aggregator_destroy(struct gpio_aggregator *aggr)
+{
+ scoped_guard(mutex, &aggr->lock) {
+ if (gpio_aggregator_is_activating(aggr) ||
+ gpio_aggregator_is_active(aggr))
+ gpio_aggregator_deactivate(aggr);
+ }
+ gpio_aggregator_free_lines(aggr);
+ configfs_unregister_group(&aggr->group);
+ kfree(aggr);
+}
+
+static ssize_t gpio_aggregator_delete_device_store(struct device_driver *driver,
+ const char *buf, size_t count)
+{
+ struct gpio_aggregator *aggr;
+ unsigned int id;
+ int error;
+
+ if (!str_has_prefix(buf, DRV_NAME "."))
+ return -EINVAL;
+
+ error = kstrtouint(buf + strlen(DRV_NAME "."), 10, &id);
+ if (error)
+ return error;
+
+ if (!try_module_get(THIS_MODULE))
+ return -ENOENT;
+
+ mutex_lock(&gpio_aggregator_lock);
+ aggr = idr_find(&gpio_aggregator_idr, id);
+ /*
+ * For simplicity, devices created via configfs cannot be deleted
+ * via sysfs.
+ */
+ if (aggr && aggr->init_via_sysfs)
+ idr_remove(&gpio_aggregator_idr, id);
+ else {
+ mutex_unlock(&gpio_aggregator_lock);
+ module_put(THIS_MODULE);
+ return -ENOENT;
+ }
+ mutex_unlock(&gpio_aggregator_lock);
+
+ gpio_aggregator_destroy(aggr);
+ module_put(THIS_MODULE);
+ return count;
+}
+
+static struct driver_attribute driver_attr_gpio_aggregator_delete_device =
+ __ATTR(delete_device, 0200, NULL, gpio_aggregator_delete_device_store);
+
+static struct attribute *gpio_aggregator_attrs[] = {
+ &driver_attr_gpio_aggregator_new_device.attr,
+ &driver_attr_gpio_aggregator_delete_device.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(gpio_aggregator);
/*
* GPIO Aggregator platform device
@@ -567,7 +1583,9 @@ static struct gpiochip_fwd *gpiochip_fwd_create(struct device *dev,
static int gpio_aggregator_probe(struct platform_device *pdev)
{
+ struct gpio_aggregator_pdev_meta *meta;
struct device *dev = &pdev->dev;
+ bool init_via_sysfs = false;
struct gpio_desc **descs;
struct gpiochip_fwd *fwd;
unsigned long features;
@@ -581,10 +1599,28 @@ static int gpio_aggregator_probe(struct platform_device *pdev)
if (!descs)
return -ENOMEM;
+ meta = dev_get_platdata(&pdev->dev);
+ if (meta && meta->init_via_sysfs)
+ init_via_sysfs = true;
+
for (i = 0; i < n; i++) {
descs[i] = devm_gpiod_get_index(dev, NULL, i, GPIOD_ASIS);
- if (IS_ERR(descs[i]))
+ if (IS_ERR(descs[i])) {
+ /*
+ * Deferred probing is not suitable when the aggregator
+ * is created via configfs. They should just retry later
+ * whenever they like. For device creation via sysfs,
+ * error is propagated without overriding for backward
+ * compatibility. .prevent_deferred_probe is kept unset
+ * for other cases.
+ */
+ if (!init_via_sysfs && !dev_of_node(dev) &&
+ descs[i] == ERR_PTR(-EPROBE_DEFER)) {
+ pr_warn("Deferred probe canceled for creation via configfs.\n");
+ return -ENODEV;
+ }
return PTR_ERR(descs[i]);
+ }
}
features = (uintptr_t)device_get_match_data(dev);
@@ -593,6 +1629,7 @@ static int gpio_aggregator_probe(struct platform_device *pdev)
return PTR_ERR(fwd);
platform_set_drvdata(pdev, fwd);
+ devm_kfree(dev, descs);
return 0;
}
@@ -618,9 +1655,63 @@ static struct platform_driver gpio_aggregator_driver = {
},
};
+static int __exit gpio_aggregator_idr_remove(int id, void *p, void *data)
+{
+ /*
+ * There should be no aggregator created via configfs, as their
+ * presence would prevent module unloading.
+ */
+ gpio_aggregator_destroy(p);
+ return 0;
+}
+
+static void __exit gpio_aggregator_remove_all(void)
+{
+ /*
+ * Configfs callbacks acquire gpio_aggregator_lock when accessing
+ * gpio_aggregator_idr, so to prevent lock inversion deadlock, we
+ * cannot protect idr_for_each invocation here with
+ * gpio_aggregator_lock, as gpio_aggregator_idr_remove() accesses
+ * configfs groups. Fortunately, the new_device/delete_device path
+ * and the module unload path are mutually exclusive, thanks to an
+ * explicit try_module_get inside of those driver attr handlers.
+ * Also, when we reach here, no configfs entries present or being
+ * created. Therefore, no need to protect with gpio_aggregator_lock
+ * below.
+ */
+ idr_for_each(&gpio_aggregator_idr, gpio_aggregator_idr_remove, NULL);
+ idr_destroy(&gpio_aggregator_idr);
+}
+
static int __init gpio_aggregator_init(void)
{
- return platform_driver_register(&gpio_aggregator_driver);
+ int ret = 0;
+
+ config_group_init(&gpio_aggregator_subsys.su_group);
+ mutex_init(&gpio_aggregator_subsys.su_mutex);
+ ret = configfs_register_subsystem(&gpio_aggregator_subsys);
+ if (ret) {
+ pr_err("Failed to register the '%s' configfs subsystem: %d\n",
+ gpio_aggregator_subsys.su_group.cg_item.ci_namebuf, ret);
+ mutex_destroy(&gpio_aggregator_subsys.su_mutex);
+ return ret;
+ }
+
+ /*
+ * CAVEAT: This must occur after configfs registration. Otherwise,
+ * a race condition could arise: driver attribute groups might be
+ * exposed and accessed by users before configfs registration
+ * completes. new_device_store() does not expect a partially
+ * initialized configfs state.
+ */
+ ret = platform_driver_register(&gpio_aggregator_driver);
+ if (ret) {
+ pr_err("Failed to register the platform driver: %d\n", ret);
+ mutex_destroy(&gpio_aggregator_subsys.su_mutex);
+ configfs_unregister_subsystem(&gpio_aggregator_subsys);
+ }
+
+ return ret;
}
module_init(gpio_aggregator_init);
@@ -628,6 +1719,7 @@ static void __exit gpio_aggregator_exit(void)
{
gpio_aggregator_remove_all();
platform_driver_unregister(&gpio_aggregator_driver);
+ configfs_unregister_subsystem(&gpio_aggregator_subsys);
}
module_exit(gpio_aggregator_exit);
diff --git a/drivers/gpio/gpio-altera-a10sr.c b/drivers/gpio/gpio-altera-a10sr.c
index 11edf1fe6c90..4524c18a87e7 100644
--- a/drivers/gpio/gpio-altera-a10sr.c
+++ b/drivers/gpio/gpio-altera-a10sr.c
@@ -35,15 +35,15 @@ static int altr_a10sr_gpio_get(struct gpio_chip *chip, unsigned int offset)
return !!(val & BIT(offset - ALTR_A10SR_LED_VALID_SHIFT));
}
-static void altr_a10sr_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
+static int altr_a10sr_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct altr_a10sr_gpio *gpio = gpiochip_get_data(chip);
- regmap_update_bits(gpio->regmap, ALTR_A10SR_LED_REG,
- BIT(ALTR_A10SR_LED_VALID_SHIFT + offset),
- value ? BIT(ALTR_A10SR_LED_VALID_SHIFT + offset)
- : 0);
+ return regmap_update_bits(gpio->regmap, ALTR_A10SR_LED_REG,
+ BIT(ALTR_A10SR_LED_VALID_SHIFT + offset),
+ value ?
+ BIT(ALTR_A10SR_LED_VALID_SHIFT + offset) : 0);
}
static int altr_a10sr_gpio_direction_input(struct gpio_chip *gc,
diff --git a/drivers/gpio/gpio-altera.c b/drivers/gpio/gpio-altera.c
index c2edfbb231fc..9508d764cce4 100644
--- a/drivers/gpio/gpio-altera.c
+++ b/drivers/gpio/gpio-altera.c
@@ -4,11 +4,19 @@
* Based on gpio-mpc8xxx.c
*/
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/err.h>
#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/gpio/driver.h>
-#include <linux/gpio/legacy-of-mm-gpiochip.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include <linux/gpio/driver.h>
#define ALTERA_GPIO_MAX_NGPIO 32
#define ALTERA_GPIO_DATA 0x0
@@ -18,56 +26,52 @@
/**
* struct altera_gpio_chip
-* @mmchip : memory mapped chip structure.
+* @gc : GPIO chip structure.
+* @regs : memory mapped IO address for the controller registers.
* @gpio_lock : synchronization lock so that new irq/set/get requests
* will be blocked until the current one completes.
* @interrupt_trigger : specifies the hardware configured IRQ trigger type
* (rising, falling, both, high)
-* @mapped_irq : kernel mapped irq number.
*/
struct altera_gpio_chip {
- struct of_mm_gpio_chip mmchip;
+ struct gpio_chip gc;
+ void __iomem *regs;
raw_spinlock_t gpio_lock;
int interrupt_trigger;
- int mapped_irq;
};
static void altera_gpio_irq_unmask(struct irq_data *d)
{
- struct altera_gpio_chip *altera_gc;
- struct of_mm_gpio_chip *mm_gc;
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct altera_gpio_chip *altera_gc = gpiochip_get_data(gc);
unsigned long flags;
u32 intmask;
- altera_gc = gpiochip_get_data(irq_data_get_irq_chip_data(d));
- mm_gc = &altera_gc->mmchip;
- gpiochip_enable_irq(&mm_gc->gc, irqd_to_hwirq(d));
+ gpiochip_enable_irq(gc, irqd_to_hwirq(d));
raw_spin_lock_irqsave(&altera_gc->gpio_lock, flags);
- intmask = readl(mm_gc->regs + ALTERA_GPIO_IRQ_MASK);
+ intmask = readl(altera_gc->regs + ALTERA_GPIO_IRQ_MASK);
/* Set ALTERA_GPIO_IRQ_MASK bit to unmask */
intmask |= BIT(irqd_to_hwirq(d));
- writel(intmask, mm_gc->regs + ALTERA_GPIO_IRQ_MASK);
+ writel(intmask, altera_gc->regs + ALTERA_GPIO_IRQ_MASK);
raw_spin_unlock_irqrestore(&altera_gc->gpio_lock, flags);
}
static void altera_gpio_irq_mask(struct irq_data *d)
{
- struct altera_gpio_chip *altera_gc;
- struct of_mm_gpio_chip *mm_gc;
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct altera_gpio_chip *altera_gc = gpiochip_get_data(gc);
unsigned long flags;
u32 intmask;
- altera_gc = gpiochip_get_data(irq_data_get_irq_chip_data(d));
- mm_gc = &altera_gc->mmchip;
-
raw_spin_lock_irqsave(&altera_gc->gpio_lock, flags);
- intmask = readl(mm_gc->regs + ALTERA_GPIO_IRQ_MASK);
+ intmask = readl(altera_gc->regs + ALTERA_GPIO_IRQ_MASK);
/* Clear ALTERA_GPIO_IRQ_MASK bit to mask */
intmask &= ~BIT(irqd_to_hwirq(d));
- writel(intmask, mm_gc->regs + ALTERA_GPIO_IRQ_MASK);
+ writel(intmask, altera_gc->regs + ALTERA_GPIO_IRQ_MASK);
raw_spin_unlock_irqrestore(&altera_gc->gpio_lock, flags);
- gpiochip_disable_irq(&mm_gc->gc, irqd_to_hwirq(d));
+
+ gpiochip_disable_irq(gc, irqd_to_hwirq(d));
}
/*
@@ -77,9 +81,8 @@ static void altera_gpio_irq_mask(struct irq_data *d)
static int altera_gpio_irq_set_type(struct irq_data *d,
unsigned int type)
{
- struct altera_gpio_chip *altera_gc;
-
- altera_gc = gpiochip_get_data(irq_data_get_irq_chip_data(d));
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct altera_gpio_chip *altera_gc = gpiochip_get_data(gc);
if (type == IRQ_TYPE_NONE) {
irq_set_handler_locked(d, handle_bad_irq);
@@ -105,49 +108,41 @@ static unsigned int altera_gpio_irq_startup(struct irq_data *d)
static int altera_gpio_get(struct gpio_chip *gc, unsigned offset)
{
- struct of_mm_gpio_chip *mm_gc;
+ struct altera_gpio_chip *altera_gc = gpiochip_get_data(gc);
- mm_gc = to_of_mm_gpio_chip(gc);
-
- return !!(readl(mm_gc->regs + ALTERA_GPIO_DATA) & BIT(offset));
+ return !!(readl(altera_gc->regs + ALTERA_GPIO_DATA) & BIT(offset));
}
-static void altera_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
+static int altera_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
{
- struct of_mm_gpio_chip *mm_gc;
- struct altera_gpio_chip *chip;
+ struct altera_gpio_chip *altera_gc = gpiochip_get_data(gc);
unsigned long flags;
unsigned int data_reg;
- mm_gc = to_of_mm_gpio_chip(gc);
- chip = gpiochip_get_data(gc);
-
- raw_spin_lock_irqsave(&chip->gpio_lock, flags);
- data_reg = readl(mm_gc->regs + ALTERA_GPIO_DATA);
+ raw_spin_lock_irqsave(&altera_gc->gpio_lock, flags);
+ data_reg = readl(altera_gc->regs + ALTERA_GPIO_DATA);
if (value)
data_reg |= BIT(offset);
else
data_reg &= ~BIT(offset);
- writel(data_reg, mm_gc->regs + ALTERA_GPIO_DATA);
- raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
+ writel(data_reg, altera_gc->regs + ALTERA_GPIO_DATA);
+ raw_spin_unlock_irqrestore(&altera_gc->gpio_lock, flags);
+
+ return 0;
}
static int altera_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
{
- struct of_mm_gpio_chip *mm_gc;
- struct altera_gpio_chip *chip;
+ struct altera_gpio_chip *altera_gc = gpiochip_get_data(gc);
unsigned long flags;
unsigned int gpio_ddr;
- mm_gc = to_of_mm_gpio_chip(gc);
- chip = gpiochip_get_data(gc);
-
- raw_spin_lock_irqsave(&chip->gpio_lock, flags);
+ raw_spin_lock_irqsave(&altera_gc->gpio_lock, flags);
/* Set pin as input, assumes software controlled IP */
- gpio_ddr = readl(mm_gc->regs + ALTERA_GPIO_DIR);
+ gpio_ddr = readl(altera_gc->regs + ALTERA_GPIO_DIR);
gpio_ddr &= ~BIT(offset);
- writel(gpio_ddr, mm_gc->regs + ALTERA_GPIO_DIR);
- raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
+ writel(gpio_ddr, altera_gc->regs + ALTERA_GPIO_DIR);
+ raw_spin_unlock_irqrestore(&altera_gc->gpio_lock, flags);
return 0;
}
@@ -155,53 +150,46 @@ static int altera_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
static int altera_gpio_direction_output(struct gpio_chip *gc,
unsigned offset, int value)
{
- struct of_mm_gpio_chip *mm_gc;
- struct altera_gpio_chip *chip;
+ struct altera_gpio_chip *altera_gc = gpiochip_get_data(gc);
unsigned long flags;
unsigned int data_reg, gpio_ddr;
- mm_gc = to_of_mm_gpio_chip(gc);
- chip = gpiochip_get_data(gc);
-
- raw_spin_lock_irqsave(&chip->gpio_lock, flags);
+ raw_spin_lock_irqsave(&altera_gc->gpio_lock, flags);
/* Sets the GPIO value */
- data_reg = readl(mm_gc->regs + ALTERA_GPIO_DATA);
+ data_reg = readl(altera_gc->regs + ALTERA_GPIO_DATA);
if (value)
data_reg |= BIT(offset);
else
data_reg &= ~BIT(offset);
- writel(data_reg, mm_gc->regs + ALTERA_GPIO_DATA);
+ writel(data_reg, altera_gc->regs + ALTERA_GPIO_DATA);
/* Set pin as output, assumes software controlled IP */
- gpio_ddr = readl(mm_gc->regs + ALTERA_GPIO_DIR);
+ gpio_ddr = readl(altera_gc->regs + ALTERA_GPIO_DIR);
gpio_ddr |= BIT(offset);
- writel(gpio_ddr, mm_gc->regs + ALTERA_GPIO_DIR);
- raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
+ writel(gpio_ddr, altera_gc->regs + ALTERA_GPIO_DIR);
+ raw_spin_unlock_irqrestore(&altera_gc->gpio_lock, flags);
return 0;
}
static void altera_gpio_irq_edge_handler(struct irq_desc *desc)
{
- struct altera_gpio_chip *altera_gc;
+ struct gpio_chip *gc = irq_desc_get_handler_data(desc);
+ struct altera_gpio_chip *altera_gc = gpiochip_get_data(gc);
+ struct irq_domain *irqdomain = gc->irq.domain;
struct irq_chip *chip;
- struct of_mm_gpio_chip *mm_gc;
- struct irq_domain *irqdomain;
unsigned long status;
int i;
- altera_gc = gpiochip_get_data(irq_desc_get_handler_data(desc));
chip = irq_desc_get_chip(desc);
- mm_gc = &altera_gc->mmchip;
- irqdomain = altera_gc->mmchip.gc.irq.domain;
chained_irq_enter(chip, desc);
while ((status =
- (readl(mm_gc->regs + ALTERA_GPIO_EDGE_CAP) &
- readl(mm_gc->regs + ALTERA_GPIO_IRQ_MASK)))) {
- writel(status, mm_gc->regs + ALTERA_GPIO_EDGE_CAP);
- for_each_set_bit(i, &status, mm_gc->gc.ngpio)
+ (readl(altera_gc->regs + ALTERA_GPIO_EDGE_CAP) &
+ readl(altera_gc->regs + ALTERA_GPIO_IRQ_MASK)))) {
+ writel(status, altera_gc->regs + ALTERA_GPIO_EDGE_CAP);
+ for_each_set_bit(i, &status, gc->ngpio)
generic_handle_domain_irq(irqdomain, i);
}
@@ -210,24 +198,21 @@ static void altera_gpio_irq_edge_handler(struct irq_desc *desc)
static void altera_gpio_irq_leveL_high_handler(struct irq_desc *desc)
{
- struct altera_gpio_chip *altera_gc;
+ struct gpio_chip *gc = irq_desc_get_handler_data(desc);
+ struct altera_gpio_chip *altera_gc = gpiochip_get_data(gc);
+ struct irq_domain *irqdomain = gc->irq.domain;
struct irq_chip *chip;
- struct of_mm_gpio_chip *mm_gc;
- struct irq_domain *irqdomain;
unsigned long status;
int i;
- altera_gc = gpiochip_get_data(irq_desc_get_handler_data(desc));
chip = irq_desc_get_chip(desc);
- mm_gc = &altera_gc->mmchip;
- irqdomain = altera_gc->mmchip.gc.irq.domain;
chained_irq_enter(chip, desc);
- status = readl(mm_gc->regs + ALTERA_GPIO_DATA);
- status &= readl(mm_gc->regs + ALTERA_GPIO_IRQ_MASK);
+ status = readl(altera_gc->regs + ALTERA_GPIO_DATA);
+ status &= readl(altera_gc->regs + ALTERA_GPIO_IRQ_MASK);
- for_each_set_bit(i, &status, mm_gc->gc.ngpio)
+ for_each_set_bit(i, &status, gc->ngpio)
generic_handle_domain_irq(irqdomain, i);
chained_irq_exit(chip, desc);
@@ -246,10 +231,11 @@ static const struct irq_chip altera_gpio_irq_chip = {
static int altera_gpio_probe(struct platform_device *pdev)
{
- struct device_node *node = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
int reg, ret;
struct altera_gpio_chip *altera_gc;
struct gpio_irq_chip *girq;
+ int mapped_irq;
altera_gc = devm_kzalloc(&pdev->dev, sizeof(*altera_gc), GFP_KERNEL);
if (!altera_gc)
@@ -257,39 +243,47 @@ static int altera_gpio_probe(struct platform_device *pdev)
raw_spin_lock_init(&altera_gc->gpio_lock);
- if (of_property_read_u32(node, "altr,ngpio", &reg))
+ if (device_property_read_u32(dev, "altr,ngpio", &reg))
/* By default assume maximum ngpio */
- altera_gc->mmchip.gc.ngpio = ALTERA_GPIO_MAX_NGPIO;
+ altera_gc->gc.ngpio = ALTERA_GPIO_MAX_NGPIO;
else
- altera_gc->mmchip.gc.ngpio = reg;
+ altera_gc->gc.ngpio = reg;
- if (altera_gc->mmchip.gc.ngpio > ALTERA_GPIO_MAX_NGPIO) {
+ if (altera_gc->gc.ngpio > ALTERA_GPIO_MAX_NGPIO) {
dev_warn(&pdev->dev,
"ngpio is greater than %d, defaulting to %d\n",
ALTERA_GPIO_MAX_NGPIO, ALTERA_GPIO_MAX_NGPIO);
- altera_gc->mmchip.gc.ngpio = ALTERA_GPIO_MAX_NGPIO;
+ altera_gc->gc.ngpio = ALTERA_GPIO_MAX_NGPIO;
}
- altera_gc->mmchip.gc.direction_input = altera_gpio_direction_input;
- altera_gc->mmchip.gc.direction_output = altera_gpio_direction_output;
- altera_gc->mmchip.gc.get = altera_gpio_get;
- altera_gc->mmchip.gc.set = altera_gpio_set;
- altera_gc->mmchip.gc.owner = THIS_MODULE;
- altera_gc->mmchip.gc.parent = &pdev->dev;
+ altera_gc->gc.direction_input = altera_gpio_direction_input;
+ altera_gc->gc.direction_output = altera_gpio_direction_output;
+ altera_gc->gc.get = altera_gpio_get;
+ altera_gc->gc.set = altera_gpio_set;
+ altera_gc->gc.owner = THIS_MODULE;
+ altera_gc->gc.parent = &pdev->dev;
+ altera_gc->gc.base = -1;
+
+ altera_gc->gc.label = devm_kasprintf(dev, GFP_KERNEL, "%pfw", dev_fwnode(dev));
+ if (!altera_gc->gc.label)
+ return -ENOMEM;
- altera_gc->mapped_irq = platform_get_irq_optional(pdev, 0);
+ altera_gc->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(altera_gc->regs))
+ return dev_err_probe(dev, PTR_ERR(altera_gc->regs), "failed to ioremap memory resource\n");
- if (altera_gc->mapped_irq < 0)
+ mapped_irq = platform_get_irq_optional(pdev, 0);
+ if (mapped_irq < 0)
goto skip_irq;
- if (of_property_read_u32(node, "altr,interrupt-type", &reg)) {
+ if (device_property_read_u32(dev, "altr,interrupt-type", &reg)) {
dev_err(&pdev->dev,
"altr,interrupt-type value not set in device tree\n");
return -EINVAL;
}
altera_gc->interrupt_trigger = reg;
- girq = &altera_gc->mmchip.gc.irq;
+ girq = &altera_gc->gc.irq;
gpio_irq_chip_set_chip(girq, &altera_gpio_irq_chip);
if (altera_gc->interrupt_trigger == IRQ_TYPE_LEVEL_HIGH)
@@ -303,27 +297,18 @@ static int altera_gpio_probe(struct platform_device *pdev)
return -ENOMEM;
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_bad_irq;
- girq->parents[0] = altera_gc->mapped_irq;
+ girq->parents[0] = mapped_irq;
skip_irq:
- ret = of_mm_gpiochip_add_data(node, &altera_gc->mmchip, altera_gc);
+ ret = devm_gpiochip_add_data(dev, &altera_gc->gc, altera_gc);
if (ret) {
dev_err(&pdev->dev, "Failed adding memory mapped gpiochip\n");
return ret;
}
- platform_set_drvdata(pdev, altera_gc);
-
return 0;
}
-static void altera_gpio_remove(struct platform_device *pdev)
-{
- struct altera_gpio_chip *altera_gc = platform_get_drvdata(pdev);
-
- of_mm_gpiochip_remove(&altera_gc->mmchip);
-}
-
static const struct of_device_id altera_gpio_of_match[] = {
{ .compatible = "altr,pio-1.0", },
{},
@@ -336,7 +321,6 @@ static struct platform_driver altera_gpio_driver = {
.of_match_table = altera_gpio_of_match,
},
.probe = altera_gpio_probe,
- .remove_new = altera_gpio_remove,
};
static int __init altera_gpio_init(void)
diff --git a/drivers/gpio/gpio-amd-fch.c b/drivers/gpio/gpio-amd-fch.c
index 2a21354ed6a0..e6c6c3ec7656 100644
--- a/drivers/gpio/gpio-amd-fch.c
+++ b/drivers/gpio/gpio-amd-fch.c
@@ -95,8 +95,7 @@ static int amd_fch_gpio_get_direction(struct gpio_chip *gc, unsigned int gpio)
return ret ? GPIO_LINE_DIRECTION_OUT : GPIO_LINE_DIRECTION_IN;
}
-static void amd_fch_gpio_set(struct gpio_chip *gc,
- unsigned int gpio, int value)
+static int amd_fch_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value)
{
unsigned long flags;
struct amd_fch_gpio_priv *priv = gpiochip_get_data(gc);
@@ -113,6 +112,8 @@ static void amd_fch_gpio_set(struct gpio_chip *gc,
writel_relaxed(mask, ptr);
spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
}
static int amd_fch_gpio_get(struct gpio_chip *gc,
diff --git a/drivers/gpio/gpio-amd8111.c b/drivers/gpio/gpio-amd8111.c
index 3377667a28de..15fd5e210d74 100644
--- a/drivers/gpio/gpio-amd8111.c
+++ b/drivers/gpio/gpio-amd8111.c
@@ -94,7 +94,7 @@ static void amd_gpio_free(struct gpio_chip *chip, unsigned offset)
iowrite8(agp->orig[offset], agp->pm + AMD_REG_GPIO(offset));
}
-static void amd_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int amd_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct amd_gpio *agp = gpiochip_get_data(chip);
u8 temp;
@@ -107,6 +107,8 @@ static void amd_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
spin_unlock_irqrestore(&agp->lock, flags);
dev_dbg(&agp->pdev->dev, "Setting gpio %d, value %d, reg=%02x\n", offset, !!value, temp);
+
+ return 0;
}
static int amd_gpio_get(struct gpio_chip *chip, unsigned offset)
diff --git a/drivers/gpio/gpio-amdpt.c b/drivers/gpio/gpio-amdpt.c
index 0a2ea9db4682..8458a6949c65 100644
--- a/drivers/gpio/gpio-amdpt.c
+++ b/drivers/gpio/gpio-amdpt.c
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/spinlock.h>
#include <linux/acpi.h>
#include <linux/platform_device.h>
@@ -24,54 +25,50 @@
#define PT_SYNC_REG 0x28
struct pt_gpio_chip {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
void __iomem *reg_base;
};
static int pt_gpio_request(struct gpio_chip *gc, unsigned offset)
{
+ struct gpio_generic_chip *gen_gc = to_gpio_generic_chip(gc);
struct pt_gpio_chip *pt_gpio = gpiochip_get_data(gc);
- unsigned long flags;
u32 using_pins;
dev_dbg(gc->parent, "pt_gpio_request offset=%x\n", offset);
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(gen_gc);
using_pins = readl(pt_gpio->reg_base + PT_SYNC_REG);
if (using_pins & BIT(offset)) {
dev_warn(gc->parent, "PT GPIO pin %x reconfigured\n",
offset);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
return -EINVAL;
}
writel(using_pins | BIT(offset), pt_gpio->reg_base + PT_SYNC_REG);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
-
return 0;
}
static void pt_gpio_free(struct gpio_chip *gc, unsigned offset)
{
+ struct gpio_generic_chip *gen_gc = to_gpio_generic_chip(gc);
struct pt_gpio_chip *pt_gpio = gpiochip_get_data(gc);
- unsigned long flags;
u32 using_pins;
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(gen_gc);
using_pins = readl(pt_gpio->reg_base + PT_SYNC_REG);
using_pins &= ~BIT(offset);
writel(using_pins, pt_gpio->reg_base + PT_SYNC_REG);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
-
dev_dbg(gc->parent, "pt_gpio_free offset=%x\n", offset);
}
static int pt_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct device *dev = &pdev->dev;
struct pt_gpio_chip *pt_gpio;
int ret = 0;
@@ -91,22 +88,27 @@ static int pt_gpio_probe(struct platform_device *pdev)
return PTR_ERR(pt_gpio->reg_base);
}
- ret = bgpio_init(&pt_gpio->gc, dev, 4,
- pt_gpio->reg_base + PT_INPUTDATA_REG,
- pt_gpio->reg_base + PT_OUTPUTDATA_REG, NULL,
- pt_gpio->reg_base + PT_DIRECTION_REG, NULL,
- BGPIOF_READ_OUTPUT_REG_SET);
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = pt_gpio->reg_base + PT_INPUTDATA_REG,
+ .set = pt_gpio->reg_base + PT_OUTPUTDATA_REG,
+ .dirout = pt_gpio->reg_base + PT_DIRECTION_REG,
+ .flags = GPIO_GENERIC_READ_OUTPUT_REG_SET,
+ };
+
+ ret = gpio_generic_chip_init(&pt_gpio->chip, &config);
if (ret) {
- dev_err(dev, "bgpio_init failed\n");
+ dev_err(dev, "failed to initialize the generic GPIO chip\n");
return ret;
}
- pt_gpio->gc.owner = THIS_MODULE;
- pt_gpio->gc.request = pt_gpio_request;
- pt_gpio->gc.free = pt_gpio_free;
- pt_gpio->gc.ngpio = (uintptr_t)device_get_match_data(dev);
+ pt_gpio->chip.gc.owner = THIS_MODULE;
+ pt_gpio->chip.gc.request = pt_gpio_request;
+ pt_gpio->chip.gc.free = pt_gpio_free;
+ pt_gpio->chip.gc.ngpio = (uintptr_t)device_get_match_data(dev);
- ret = gpiochip_add_data(&pt_gpio->gc, pt_gpio);
+ ret = devm_gpiochip_add_data(dev, &pt_gpio->chip.gc, pt_gpio);
if (ret) {
dev_err(dev, "Failed to register GPIO lib\n");
return ret;
@@ -122,13 +124,6 @@ static int pt_gpio_probe(struct platform_device *pdev)
return ret;
}
-static void pt_gpio_remove(struct platform_device *pdev)
-{
- struct pt_gpio_chip *pt_gpio = platform_get_drvdata(pdev);
-
- gpiochip_remove(&pt_gpio->gc);
-}
-
static const struct acpi_device_id pt_gpio_acpi_match[] = {
{ "AMDF030", PT_TOTAL_GPIO },
{ "AMDIF030", PT_TOTAL_GPIO },
@@ -143,7 +138,6 @@ static struct platform_driver pt_gpio_driver = {
.acpi_match_table = ACPI_PTR(pt_gpio_acpi_match),
},
.probe = pt_gpio_probe,
- .remove_new = pt_gpio_remove,
};
module_platform_driver(pt_gpio_driver);
diff --git a/drivers/gpio/gpio-arizona.c b/drivers/gpio/gpio-arizona.c
index c15fda99120a..a7e98d395d8e 100644
--- a/drivers/gpio/gpio-arizona.c
+++ b/drivers/gpio/gpio-arizona.c
@@ -39,7 +39,6 @@ static int arizona_gpio_direction_in(struct gpio_chip *chip, unsigned offset)
return ret;
if (change && persistent) {
- pm_runtime_mark_last_busy(chip->parent);
pm_runtime_put_autosuspend(chip->parent);
}
@@ -82,7 +81,6 @@ static int arizona_gpio_get(struct gpio_chip *chip, unsigned offset)
return ret;
}
- pm_runtime_mark_last_busy(chip->parent);
pm_runtime_put_autosuspend(chip->parent);
}
@@ -121,7 +119,8 @@ static int arizona_gpio_direction_out(struct gpio_chip *chip,
ARIZONA_GPN_DIR | ARIZONA_GPN_LVL, value);
}
-static void arizona_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int arizona_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct arizona_gpio *arizona_gpio = gpiochip_get_data(chip);
struct arizona *arizona = arizona_gpio->arizona;
@@ -129,8 +128,8 @@ static void arizona_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
if (value)
value = ARIZONA_GPN_LVL;
- regmap_update_bits(arizona->regmap, ARIZONA_GPIO1_CTRL + offset,
- ARIZONA_GPN_LVL, value);
+ return regmap_update_bits(arizona->regmap, ARIZONA_GPIO1_CTRL + offset,
+ ARIZONA_GPN_LVL, value);
}
static const struct gpio_chip template_chip = {
diff --git a/drivers/gpio/gpio-aspeed-sgpio.c b/drivers/gpio/gpio-aspeed-sgpio.c
index 72755fee6478..7622f9e9f54a 100644
--- a/drivers/gpio/gpio-aspeed-sgpio.c
+++ b/drivers/gpio/gpio-aspeed-sgpio.c
@@ -6,6 +6,7 @@
*/
#include <linux/bitfield.h>
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/gpio/driver.h>
#include <linux/hashtable.h>
@@ -170,17 +171,14 @@ static int aspeed_sgpio_get(struct gpio_chip *gc, unsigned int offset)
{
struct aspeed_sgpio *gpio = gpiochip_get_data(gc);
const struct aspeed_sgpio_bank *bank = to_bank(offset);
- unsigned long flags;
enum aspeed_sgpio_reg reg;
int rc = 0;
- raw_spin_lock_irqsave(&gpio->lock, flags);
+ guard(raw_spinlock_irqsave)(&gpio->lock);
reg = aspeed_sgpio_is_input(offset) ? reg_val : reg_rdata;
rc = !!(ioread32(bank_reg(gpio, bank, reg)) & GPIO_BIT(offset));
- raw_spin_unlock_irqrestore(&gpio->lock, flags);
-
return rc;
}
@@ -211,16 +209,13 @@ static int sgpio_set_value(struct gpio_chip *gc, unsigned int offset, int val)
return 0;
}
-static void aspeed_sgpio_set(struct gpio_chip *gc, unsigned int offset, int val)
+static int aspeed_sgpio_set(struct gpio_chip *gc, unsigned int offset, int val)
{
struct aspeed_sgpio *gpio = gpiochip_get_data(gc);
- unsigned long flags;
-
- raw_spin_lock_irqsave(&gpio->lock, flags);
- sgpio_set_value(gc, offset, val);
+ guard(raw_spinlock_irqsave)(&gpio->lock);
- raw_spin_unlock_irqrestore(&gpio->lock, flags);
+ return sgpio_set_value(gc, offset, val);
}
static int aspeed_sgpio_dir_in(struct gpio_chip *gc, unsigned int offset)
@@ -231,15 +226,14 @@ static int aspeed_sgpio_dir_in(struct gpio_chip *gc, unsigned int offset)
static int aspeed_sgpio_dir_out(struct gpio_chip *gc, unsigned int offset, int val)
{
struct aspeed_sgpio *gpio = gpiochip_get_data(gc);
- unsigned long flags;
int rc;
/* No special action is required for setting the direction; we'll
* error-out in sgpio_set_value if this isn't an output GPIO */
- raw_spin_lock_irqsave(&gpio->lock, flags);
+ guard(raw_spinlock_irqsave)(&gpio->lock);
+
rc = sgpio_set_value(gc, offset, val);
- raw_spin_unlock_irqrestore(&gpio->lock, flags);
return rc;
}
@@ -269,7 +263,6 @@ static void aspeed_sgpio_irq_ack(struct irq_data *d)
{
const struct aspeed_sgpio_bank *bank;
struct aspeed_sgpio *gpio;
- unsigned long flags;
void __iomem *status_addr;
int offset;
u32 bit;
@@ -278,18 +271,15 @@ static void aspeed_sgpio_irq_ack(struct irq_data *d)
status_addr = bank_reg(gpio, bank, reg_irq_status);
- raw_spin_lock_irqsave(&gpio->lock, flags);
+ guard(raw_spinlock_irqsave)(&gpio->lock);
iowrite32(bit, status_addr);
-
- raw_spin_unlock_irqrestore(&gpio->lock, flags);
}
static void aspeed_sgpio_irq_set_mask(struct irq_data *d, bool set)
{
const struct aspeed_sgpio_bank *bank;
struct aspeed_sgpio *gpio;
- unsigned long flags;
u32 reg, bit;
void __iomem *addr;
int offset;
@@ -301,17 +291,15 @@ static void aspeed_sgpio_irq_set_mask(struct irq_data *d, bool set)
if (set)
gpiochip_enable_irq(&gpio->chip, irqd_to_hwirq(d));
- raw_spin_lock_irqsave(&gpio->lock, flags);
-
- reg = ioread32(addr);
- if (set)
- reg |= bit;
- else
- reg &= ~bit;
-
- iowrite32(reg, addr);
+ scoped_guard(raw_spinlock_irqsave, &gpio->lock) {
+ reg = ioread32(addr);
+ if (set)
+ reg |= bit;
+ else
+ reg &= ~bit;
- raw_spin_unlock_irqrestore(&gpio->lock, flags);
+ iowrite32(reg, addr);
+ }
/* Masking the IRQ */
if (!set)
@@ -339,7 +327,6 @@ static int aspeed_sgpio_set_type(struct irq_data *d, unsigned int type)
const struct aspeed_sgpio_bank *bank;
irq_flow_handler_t handler;
struct aspeed_sgpio *gpio;
- unsigned long flags;
void __iomem *addr;
int offset;
@@ -366,24 +353,22 @@ static int aspeed_sgpio_set_type(struct irq_data *d, unsigned int type)
return -EINVAL;
}
- raw_spin_lock_irqsave(&gpio->lock, flags);
-
- addr = bank_reg(gpio, bank, reg_irq_type0);
- reg = ioread32(addr);
- reg = (reg & ~bit) | type0;
- iowrite32(reg, addr);
-
- addr = bank_reg(gpio, bank, reg_irq_type1);
- reg = ioread32(addr);
- reg = (reg & ~bit) | type1;
- iowrite32(reg, addr);
-
- addr = bank_reg(gpio, bank, reg_irq_type2);
- reg = ioread32(addr);
- reg = (reg & ~bit) | type2;
- iowrite32(reg, addr);
-
- raw_spin_unlock_irqrestore(&gpio->lock, flags);
+ scoped_guard(raw_spinlock_irqsave, &gpio->lock) {
+ addr = bank_reg(gpio, bank, reg_irq_type0);
+ reg = ioread32(addr);
+ reg = (reg & ~bit) | type0;
+ iowrite32(reg, addr);
+
+ addr = bank_reg(gpio, bank, reg_irq_type1);
+ reg = ioread32(addr);
+ reg = (reg & ~bit) | type1;
+ iowrite32(reg, addr);
+
+ addr = bank_reg(gpio, bank, reg_irq_type2);
+ reg = ioread32(addr);
+ reg = (reg & ~bit) | type2;
+ iowrite32(reg, addr);
+ }
irq_set_handler_locked(d, handler);
@@ -420,7 +405,7 @@ static void aspeed_sgpio_irq_print_chip(struct irq_data *d, struct seq_file *p)
int offset;
irqd_to_aspeed_sgpio_data(d, &gpio, &bank, &bit, &offset);
- seq_printf(p, dev_name(gpio->dev));
+ seq_puts(p, dev_name(gpio->dev));
}
static const struct irq_chip aspeed_sgpio_irq_chip = {
@@ -487,13 +472,12 @@ static int aspeed_sgpio_reset_tolerance(struct gpio_chip *chip,
unsigned int offset, bool enable)
{
struct aspeed_sgpio *gpio = gpiochip_get_data(chip);
- unsigned long flags;
void __iomem *reg;
u32 val;
reg = bank_reg(gpio, to_bank(offset), reg_tolerance);
- raw_spin_lock_irqsave(&gpio->lock, flags);
+ guard(raw_spinlock_irqsave)(&gpio->lock);
val = readl(reg);
@@ -504,8 +488,6 @@ static int aspeed_sgpio_reset_tolerance(struct gpio_chip *chip,
writel(val, reg);
- raw_spin_unlock_irqrestore(&gpio->lock, flags);
-
return 0;
}
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
index ea40ad43a79b..7953a9c4e36d 100644
--- a/drivers/gpio/gpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed.c
@@ -5,6 +5,7 @@
* Joel Stanley <joel@jms.id.au>
*/
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/gpio/aspeed.h>
#include <linux/gpio/driver.h>
@@ -30,6 +31,27 @@
#include <linux/gpio/consumer.h>
#include "gpiolib.h"
+/* Non-constant mask variant of FIELD_GET() and FIELD_PREP() */
+#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
+#define field_prep(_mask, _val) (((_val) << (ffs(_mask) - 1)) & (_mask))
+
+#define GPIO_G7_IRQ_STS_BASE 0x100
+#define GPIO_G7_IRQ_STS_OFFSET(x) (GPIO_G7_IRQ_STS_BASE + (x) * 0x4)
+#define GPIO_G7_CTRL_REG_BASE 0x180
+#define GPIO_G7_CTRL_REG_OFFSET(x) (GPIO_G7_CTRL_REG_BASE + (x) * 0x4)
+#define GPIO_G7_CTRL_OUT_DATA BIT(0)
+#define GPIO_G7_CTRL_DIR BIT(1)
+#define GPIO_G7_CTRL_IRQ_EN BIT(2)
+#define GPIO_G7_CTRL_IRQ_TYPE0 BIT(3)
+#define GPIO_G7_CTRL_IRQ_TYPE1 BIT(4)
+#define GPIO_G7_CTRL_IRQ_TYPE2 BIT(5)
+#define GPIO_G7_CTRL_RST_TOLERANCE BIT(6)
+#define GPIO_G7_CTRL_DEBOUNCE_SEL1 BIT(7)
+#define GPIO_G7_CTRL_DEBOUNCE_SEL2 BIT(8)
+#define GPIO_G7_CTRL_INPUT_MASK BIT(9)
+#define GPIO_G7_CTRL_IRQ_STS BIT(12)
+#define GPIO_G7_CTRL_IN_DATA BIT(13)
+
struct aspeed_bank_props {
unsigned int bank;
u32 input;
@@ -39,6 +61,10 @@ struct aspeed_bank_props {
struct aspeed_gpio_config {
unsigned int nr_gpios;
const struct aspeed_bank_props *props;
+ const struct aspeed_gpio_llops *llops;
+ const int *debounce_timers_array;
+ int debounce_timers_num;
+ bool require_dcache;
};
/*
@@ -77,7 +103,6 @@ struct aspeed_gpio_bank {
uint16_t debounce_regs;
uint16_t tolerance_regs;
uint16_t cmdsrc_regs;
- const char names[4][3];
};
/*
@@ -92,6 +117,22 @@ struct aspeed_gpio_bank {
*/
static const int debounce_timers[4] = { 0x00, 0x50, 0x54, 0x58 };
+static const int g7_debounce_timers[4] = { 0x00, 0x00, 0x04, 0x08 };
+
+/*
+ * The debounce timers array is used to configure the debounce timer settings.Here’s how it works:
+ * Array Value: Indicates the offset for configuring the debounce timer.
+ * Array Index: Corresponds to the debounce setting register.
+ * The debounce timers array follows this pattern for configuring the debounce setting registers:
+ * Array Index 0: No debounce timer is set;
+ * Array Value is irrelevant (don’t care).
+ * Array Index 1: Debounce setting #2 is set to 1, and debounce setting #1 is set to 0.
+ * Array Value: offset for configuring debounce timer 0 (g4: 0x50, g7: 0x00)
+ * Array Index 2: Debounce setting #2 is set to 0, and debounce setting #1 is set to 1.
+ * Array Value: offset for configuring debounce timer 1 (g4: 0x54, g7: 0x04)
+ * Array Index 3: Debounce setting #2 is set to 1, and debounce setting #1 is set to 1.
+ * Array Value: offset for configuring debounce timer 2 (g4: 0x58, g7: 0x8)
+ */
static const struct aspeed_gpio_copro_ops *copro_ops;
static void *copro_data;
@@ -104,7 +145,6 @@ static const struct aspeed_gpio_bank aspeed_gpio_banks[] = {
.debounce_regs = 0x0040,
.tolerance_regs = 0x001c,
.cmdsrc_regs = 0x0060,
- .names = { "A", "B", "C", "D" },
},
{
.val_regs = 0x0020,
@@ -113,7 +153,6 @@ static const struct aspeed_gpio_bank aspeed_gpio_banks[] = {
.debounce_regs = 0x0048,
.tolerance_regs = 0x003c,
.cmdsrc_regs = 0x0068,
- .names = { "E", "F", "G", "H" },
},
{
.val_regs = 0x0070,
@@ -122,7 +161,6 @@ static const struct aspeed_gpio_bank aspeed_gpio_banks[] = {
.debounce_regs = 0x00b0,
.tolerance_regs = 0x00ac,
.cmdsrc_regs = 0x0090,
- .names = { "I", "J", "K", "L" },
},
{
.val_regs = 0x0078,
@@ -131,7 +169,6 @@ static const struct aspeed_gpio_bank aspeed_gpio_banks[] = {
.debounce_regs = 0x0100,
.tolerance_regs = 0x00fc,
.cmdsrc_regs = 0x00e0,
- .names = { "M", "N", "O", "P" },
},
{
.val_regs = 0x0080,
@@ -140,7 +177,6 @@ static const struct aspeed_gpio_bank aspeed_gpio_banks[] = {
.debounce_regs = 0x0130,
.tolerance_regs = 0x012c,
.cmdsrc_regs = 0x0110,
- .names = { "Q", "R", "S", "T" },
},
{
.val_regs = 0x0088,
@@ -149,7 +185,6 @@ static const struct aspeed_gpio_bank aspeed_gpio_banks[] = {
.debounce_regs = 0x0160,
.tolerance_regs = 0x015c,
.cmdsrc_regs = 0x0140,
- .names = { "U", "V", "W", "X" },
},
{
.val_regs = 0x01E0,
@@ -158,7 +193,6 @@ static const struct aspeed_gpio_bank aspeed_gpio_banks[] = {
.debounce_regs = 0x0190,
.tolerance_regs = 0x018c,
.cmdsrc_regs = 0x0170,
- .names = { "Y", "Z", "AA", "AB" },
},
{
.val_regs = 0x01e8,
@@ -167,7 +201,6 @@ static const struct aspeed_gpio_bank aspeed_gpio_banks[] = {
.debounce_regs = 0x01c0,
.tolerance_regs = 0x01bc,
.cmdsrc_regs = 0x01a0,
- .names = { "AC", "", "", "" },
},
};
@@ -187,6 +220,19 @@ enum aspeed_gpio_reg {
reg_cmdsrc1,
};
+struct aspeed_gpio_llops {
+ void (*reg_bit_set)(struct aspeed_gpio *gpio, unsigned int offset,
+ const enum aspeed_gpio_reg reg, bool val);
+ bool (*reg_bit_get)(struct aspeed_gpio *gpio, unsigned int offset,
+ const enum aspeed_gpio_reg reg);
+ int (*reg_bank_get)(struct aspeed_gpio *gpio, unsigned int offset,
+ const enum aspeed_gpio_reg reg);
+ void (*privilege_ctrl)(struct aspeed_gpio *gpio, unsigned int offset, int owner);
+ void (*privilege_init)(struct aspeed_gpio *gpio);
+ bool (*copro_request)(struct aspeed_gpio *gpio, unsigned int offset);
+ void (*copro_release)(struct aspeed_gpio *gpio, unsigned int offset);
+};
+
#define GPIO_VAL_VALUE 0x00
#define GPIO_VAL_DIR 0x04
@@ -207,9 +253,9 @@ enum aspeed_gpio_reg {
#define GPIO_CMDSRC_RESERVED 3
/* This will be resolved at compile time */
-static inline void __iomem *bank_reg(struct aspeed_gpio *gpio,
- const struct aspeed_gpio_bank *bank,
- const enum aspeed_gpio_reg reg)
+static void __iomem *aspeed_gpio_g4_bank_reg(struct aspeed_gpio *gpio,
+ const struct aspeed_gpio_bank *bank,
+ const enum aspeed_gpio_reg reg)
{
switch (reg) {
case reg_val:
@@ -242,14 +288,43 @@ static inline void __iomem *bank_reg(struct aspeed_gpio *gpio,
BUG();
}
+static u32 aspeed_gpio_g7_reg_mask(const enum aspeed_gpio_reg reg)
+{
+ switch (reg) {
+ case reg_val:
+ return GPIO_G7_CTRL_OUT_DATA;
+ case reg_dir:
+ return GPIO_G7_CTRL_DIR;
+ case reg_irq_enable:
+ return GPIO_G7_CTRL_IRQ_EN;
+ case reg_irq_type0:
+ return GPIO_G7_CTRL_IRQ_TYPE0;
+ case reg_irq_type1:
+ return GPIO_G7_CTRL_IRQ_TYPE1;
+ case reg_irq_type2:
+ return GPIO_G7_CTRL_IRQ_TYPE2;
+ case reg_tolerance:
+ return GPIO_G7_CTRL_RST_TOLERANCE;
+ case reg_debounce_sel1:
+ return GPIO_G7_CTRL_DEBOUNCE_SEL1;
+ case reg_debounce_sel2:
+ return GPIO_G7_CTRL_DEBOUNCE_SEL2;
+ case reg_rdata:
+ return GPIO_G7_CTRL_OUT_DATA;
+ case reg_irq_status:
+ return GPIO_G7_CTRL_IRQ_STS;
+ case reg_cmdsrc0:
+ case reg_cmdsrc1:
+ default:
+ WARN_ON_ONCE(1);
+ return 0;
+ }
+}
+
#define GPIO_BANK(x) ((x) >> 5)
#define GPIO_OFFSET(x) ((x) & 0x1f)
#define GPIO_BIT(x) BIT(GPIO_OFFSET(x))
-#define _GPIO_SET_DEBOUNCE(t, o, i) ((!!((t) & BIT(i))) << GPIO_OFFSET(o))
-#define GPIO_SET_DEBOUNCE1(t, o) _GPIO_SET_DEBOUNCE(t, o, 1)
-#define GPIO_SET_DEBOUNCE2(t, o) _GPIO_SET_DEBOUNCE(t, o, 0)
-
static const struct aspeed_gpio_bank *to_bank(unsigned int offset)
{
unsigned int bank = GPIO_BANK(offset);
@@ -280,11 +355,11 @@ static inline const struct aspeed_bank_props *find_bank_props(
static inline bool have_gpio(struct aspeed_gpio *gpio, unsigned int offset)
{
const struct aspeed_bank_props *props = find_bank_props(gpio, offset);
- const struct aspeed_gpio_bank *bank = to_bank(offset);
- unsigned int group = GPIO_OFFSET(offset) / 8;
- return bank->names[group][0] != '\0' &&
- (!props || ((props->input | props->output) & GPIO_BIT(offset)));
+ if (offset >= gpio->chip.ngpio)
+ return false;
+
+ return (!props || ((props->input | props->output) & GPIO_BIT(offset)));
}
static inline bool have_input(struct aspeed_gpio *gpio, unsigned int offset)
@@ -304,153 +379,83 @@ static inline bool have_output(struct aspeed_gpio *gpio, unsigned int offset)
return !props || (props->output & GPIO_BIT(offset));
}
-static void aspeed_gpio_change_cmd_source(struct aspeed_gpio *gpio,
- const struct aspeed_gpio_bank *bank,
- int bindex, int cmdsrc)
+static void aspeed_gpio_change_cmd_source(struct aspeed_gpio *gpio, unsigned int offset, int cmdsrc)
{
- void __iomem *c0 = bank_reg(gpio, bank, reg_cmdsrc0);
- void __iomem *c1 = bank_reg(gpio, bank, reg_cmdsrc1);
- u32 bit, reg;
-
- /*
- * Each register controls 4 banks, so take the bottom 2
- * bits of the bank index, and use them to select the
- * right control bit (0, 8, 16 or 24).
- */
- bit = BIT((bindex & 3) << 3);
-
- /* Source 1 first to avoid illegal 11 combination */
- reg = ioread32(c1);
- if (cmdsrc & 2)
- reg |= bit;
- else
- reg &= ~bit;
- iowrite32(reg, c1);
-
- /* Then Source 0 */
- reg = ioread32(c0);
- if (cmdsrc & 1)
- reg |= bit;
- else
- reg &= ~bit;
- iowrite32(reg, c0);
+ if (gpio->config->llops->privilege_ctrl)
+ gpio->config->llops->privilege_ctrl(gpio, offset, cmdsrc);
}
static bool aspeed_gpio_copro_request(struct aspeed_gpio *gpio,
unsigned int offset)
{
- const struct aspeed_gpio_bank *bank = to_bank(offset);
-
- if (!copro_ops || !gpio->cf_copro_bankmap)
- return false;
- if (!gpio->cf_copro_bankmap[offset >> 3])
- return false;
- if (!copro_ops->request_access)
- return false;
-
- /* Pause the coprocessor */
- copro_ops->request_access(copro_data);
-
- /* Change command source back to ARM */
- aspeed_gpio_change_cmd_source(gpio, bank, offset >> 3, GPIO_CMDSRC_ARM);
-
- /* Update cache */
- gpio->dcache[GPIO_BANK(offset)] = ioread32(bank_reg(gpio, bank, reg_rdata));
+ if (gpio->config->llops->copro_request)
+ return gpio->config->llops->copro_request(gpio, offset);
- return true;
+ return false;
}
static void aspeed_gpio_copro_release(struct aspeed_gpio *gpio,
unsigned int offset)
{
- const struct aspeed_gpio_bank *bank = to_bank(offset);
-
- if (!copro_ops || !gpio->cf_copro_bankmap)
- return;
- if (!gpio->cf_copro_bankmap[offset >> 3])
- return;
- if (!copro_ops->release_access)
- return;
-
- /* Change command source back to ColdFire */
- aspeed_gpio_change_cmd_source(gpio, bank, offset >> 3,
- GPIO_CMDSRC_COLDFIRE);
+ if (gpio->config->llops->copro_release)
+ gpio->config->llops->copro_release(gpio, offset);
+}
- /* Restart the coprocessor */
- copro_ops->release_access(copro_data);
+static bool aspeed_gpio_support_copro(struct aspeed_gpio *gpio)
+{
+ return gpio->config->llops->copro_request && gpio->config->llops->copro_release &&
+ gpio->config->llops->privilege_ctrl && gpio->config->llops->privilege_init;
}
static int aspeed_gpio_get(struct gpio_chip *gc, unsigned int offset)
{
struct aspeed_gpio *gpio = gpiochip_get_data(gc);
- const struct aspeed_gpio_bank *bank = to_bank(offset);
- return !!(ioread32(bank_reg(gpio, bank, reg_val)) & GPIO_BIT(offset));
+ return gpio->config->llops->reg_bit_get(gpio, offset, reg_val);
}
static void __aspeed_gpio_set(struct gpio_chip *gc, unsigned int offset,
int val)
{
struct aspeed_gpio *gpio = gpiochip_get_data(gc);
- const struct aspeed_gpio_bank *bank = to_bank(offset);
- void __iomem *addr;
- u32 reg;
-
- addr = bank_reg(gpio, bank, reg_val);
- reg = gpio->dcache[GPIO_BANK(offset)];
- if (val)
- reg |= GPIO_BIT(offset);
- else
- reg &= ~GPIO_BIT(offset);
- gpio->dcache[GPIO_BANK(offset)] = reg;
-
- iowrite32(reg, addr);
+ gpio->config->llops->reg_bit_set(gpio, offset, reg_val, val);
/* Flush write */
- ioread32(addr);
+ gpio->config->llops->reg_bit_get(gpio, offset, reg_val);
}
-static void aspeed_gpio_set(struct gpio_chip *gc, unsigned int offset,
- int val)
+static int aspeed_gpio_set(struct gpio_chip *gc, unsigned int offset, int val)
{
struct aspeed_gpio *gpio = gpiochip_get_data(gc);
- unsigned long flags;
- bool copro;
+ bool copro = false;
+
+ guard(raw_spinlock_irqsave)(&gpio->lock);
- raw_spin_lock_irqsave(&gpio->lock, flags);
copro = aspeed_gpio_copro_request(gpio, offset);
__aspeed_gpio_set(gc, offset, val);
if (copro)
aspeed_gpio_copro_release(gpio, offset);
- raw_spin_unlock_irqrestore(&gpio->lock, flags);
+
+ return 0;
}
static int aspeed_gpio_dir_in(struct gpio_chip *gc, unsigned int offset)
{
struct aspeed_gpio *gpio = gpiochip_get_data(gc);
- const struct aspeed_gpio_bank *bank = to_bank(offset);
- void __iomem *addr = bank_reg(gpio, bank, reg_dir);
- unsigned long flags;
- bool copro;
- u32 reg;
+ bool copro = false;
if (!have_input(gpio, offset))
return -ENOTSUPP;
- raw_spin_lock_irqsave(&gpio->lock, flags);
-
- reg = ioread32(addr);
- reg &= ~GPIO_BIT(offset);
+ guard(raw_spinlock_irqsave)(&gpio->lock);
copro = aspeed_gpio_copro_request(gpio, offset);
- iowrite32(reg, addr);
+ gpio->config->llops->reg_bit_set(gpio, offset, reg_dir, 0);
if (copro)
aspeed_gpio_copro_release(gpio, offset);
- raw_spin_unlock_irqrestore(&gpio->lock, flags);
-
return 0;
}
@@ -458,27 +463,19 @@ static int aspeed_gpio_dir_out(struct gpio_chip *gc,
unsigned int offset, int val)
{
struct aspeed_gpio *gpio = gpiochip_get_data(gc);
- const struct aspeed_gpio_bank *bank = to_bank(offset);
- void __iomem *addr = bank_reg(gpio, bank, reg_dir);
- unsigned long flags;
- bool copro;
- u32 reg;
+ bool copro = false;
if (!have_output(gpio, offset))
return -ENOTSUPP;
- raw_spin_lock_irqsave(&gpio->lock, flags);
-
- reg = ioread32(addr);
- reg |= GPIO_BIT(offset);
+ guard(raw_spinlock_irqsave)(&gpio->lock);
copro = aspeed_gpio_copro_request(gpio, offset);
__aspeed_gpio_set(gc, offset, val);
- iowrite32(reg, addr);
+ gpio->config->llops->reg_bit_set(gpio, offset, reg_dir, 1);
if (copro)
aspeed_gpio_copro_release(gpio, offset);
- raw_spin_unlock_irqrestore(&gpio->lock, flags);
return 0;
}
@@ -486,8 +483,6 @@ static int aspeed_gpio_dir_out(struct gpio_chip *gc,
static int aspeed_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
{
struct aspeed_gpio *gpio = gpiochip_get_data(gc);
- const struct aspeed_gpio_bank *bank = to_bank(offset);
- unsigned long flags;
u32 val;
if (!have_input(gpio, offset))
@@ -496,19 +491,16 @@ static int aspeed_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
if (!have_output(gpio, offset))
return GPIO_LINE_DIRECTION_IN;
- raw_spin_lock_irqsave(&gpio->lock, flags);
-
- val = ioread32(bank_reg(gpio, bank, reg_dir)) & GPIO_BIT(offset);
+ guard(raw_spinlock_irqsave)(&gpio->lock);
- raw_spin_unlock_irqrestore(&gpio->lock, flags);
+ val = gpio->config->llops->reg_bit_get(gpio, offset, reg_dir);
return val ? GPIO_LINE_DIRECTION_OUT : GPIO_LINE_DIRECTION_IN;
}
static inline int irqd_to_aspeed_gpio_data(struct irq_data *d,
struct aspeed_gpio **gpio,
- const struct aspeed_gpio_bank **bank,
- u32 *bit, int *offset)
+ int *offset)
{
struct aspeed_gpio *internal;
@@ -521,71 +513,52 @@ static inline int irqd_to_aspeed_gpio_data(struct irq_data *d,
return -ENOTSUPP;
*gpio = internal;
- *bank = to_bank(*offset);
- *bit = GPIO_BIT(*offset);
return 0;
}
static void aspeed_gpio_irq_ack(struct irq_data *d)
{
- const struct aspeed_gpio_bank *bank;
struct aspeed_gpio *gpio;
- unsigned long flags;
- void __iomem *status_addr;
int rc, offset;
- bool copro;
- u32 bit;
+ bool copro = false;
- rc = irqd_to_aspeed_gpio_data(d, &gpio, &bank, &bit, &offset);
+ rc = irqd_to_aspeed_gpio_data(d, &gpio, &offset);
if (rc)
return;
- status_addr = bank_reg(gpio, bank, reg_irq_status);
+ guard(raw_spinlock_irqsave)(&gpio->lock);
- raw_spin_lock_irqsave(&gpio->lock, flags);
copro = aspeed_gpio_copro_request(gpio, offset);
- iowrite32(bit, status_addr);
+ gpio->config->llops->reg_bit_set(gpio, offset, reg_irq_status, 1);
if (copro)
aspeed_gpio_copro_release(gpio, offset);
- raw_spin_unlock_irqrestore(&gpio->lock, flags);
}
static void aspeed_gpio_irq_set_mask(struct irq_data *d, bool set)
{
- const struct aspeed_gpio_bank *bank;
struct aspeed_gpio *gpio;
- unsigned long flags;
- u32 reg, bit;
- void __iomem *addr;
int rc, offset;
- bool copro;
+ bool copro = false;
- rc = irqd_to_aspeed_gpio_data(d, &gpio, &bank, &bit, &offset);
+ rc = irqd_to_aspeed_gpio_data(d, &gpio, &offset);
if (rc)
return;
- addr = bank_reg(gpio, bank, reg_irq_enable);
-
/* Unmasking the IRQ */
if (set)
gpiochip_enable_irq(&gpio->chip, irqd_to_hwirq(d));
- raw_spin_lock_irqsave(&gpio->lock, flags);
+ guard(raw_spinlock_irqsave)(&gpio->lock);
+
copro = aspeed_gpio_copro_request(gpio, offset);
- reg = ioread32(addr);
- if (set)
- reg |= bit;
- else
- reg &= ~bit;
- iowrite32(reg, addr);
+ gpio->config->llops->reg_bit_set(gpio, offset, reg_irq_enable, set);
if (copro)
aspeed_gpio_copro_release(gpio, offset);
- raw_spin_unlock_irqrestore(&gpio->lock, flags);
/* Masking the IRQ */
if (!set)
@@ -607,61 +580,49 @@ static int aspeed_gpio_set_type(struct irq_data *d, unsigned int type)
u32 type0 = 0;
u32 type1 = 0;
u32 type2 = 0;
- u32 bit, reg;
- const struct aspeed_gpio_bank *bank;
irq_flow_handler_t handler;
struct aspeed_gpio *gpio;
- unsigned long flags;
- void __iomem *addr;
int rc, offset;
- bool copro;
+ bool copro = false;
- rc = irqd_to_aspeed_gpio_data(d, &gpio, &bank, &bit, &offset);
+ rc = irqd_to_aspeed_gpio_data(d, &gpio, &offset);
if (rc)
return -EINVAL;
switch (type & IRQ_TYPE_SENSE_MASK) {
case IRQ_TYPE_EDGE_BOTH:
- type2 |= bit;
+ type2 = 1;
fallthrough;
case IRQ_TYPE_EDGE_RISING:
- type0 |= bit;
+ type0 = 1;
fallthrough;
case IRQ_TYPE_EDGE_FALLING:
handler = handle_edge_irq;
break;
case IRQ_TYPE_LEVEL_HIGH:
- type0 |= bit;
+ type0 = 1;
fallthrough;
case IRQ_TYPE_LEVEL_LOW:
- type1 |= bit;
+ type1 = 1;
handler = handle_level_irq;
break;
default:
return -EINVAL;
}
- raw_spin_lock_irqsave(&gpio->lock, flags);
- copro = aspeed_gpio_copro_request(gpio, offset);
-
- addr = bank_reg(gpio, bank, reg_irq_type0);
- reg = ioread32(addr);
- reg = (reg & ~bit) | type0;
- iowrite32(reg, addr);
+ scoped_guard(raw_spinlock_irqsave, &gpio->lock) {
+ copro = aspeed_gpio_copro_request(gpio, offset);
- addr = bank_reg(gpio, bank, reg_irq_type1);
- reg = ioread32(addr);
- reg = (reg & ~bit) | type1;
- iowrite32(reg, addr);
+ gpio->config->llops->reg_bit_set(gpio, offset, reg_irq_type0,
+ type0);
+ gpio->config->llops->reg_bit_set(gpio, offset, reg_irq_type1,
+ type1);
+ gpio->config->llops->reg_bit_set(gpio, offset, reg_irq_type2,
+ type2);
- addr = bank_reg(gpio, bank, reg_irq_type2);
- reg = ioread32(addr);
- reg = (reg & ~bit) | type2;
- iowrite32(reg, addr);
-
- if (copro)
- aspeed_gpio_copro_release(gpio, offset);
- raw_spin_unlock_irqrestore(&gpio->lock, flags);
+ if (copro)
+ aspeed_gpio_copro_release(gpio, offset);
+ }
irq_set_handler_locked(d, handler);
@@ -672,7 +633,6 @@ static void aspeed_gpio_irq_handler(struct irq_desc *desc)
{
struct gpio_chip *gc = irq_desc_get_handler_data(desc);
struct irq_chip *ic = irq_desc_get_chip(desc);
- struct aspeed_gpio *data = gpiochip_get_data(gc);
unsigned int i, p, banks;
unsigned long reg;
struct aspeed_gpio *gpio = gpiochip_get_data(gc);
@@ -681,9 +641,7 @@ static void aspeed_gpio_irq_handler(struct irq_desc *desc)
banks = DIV_ROUND_UP(gpio->chip.ngpio, 32);
for (i = 0; i < banks; i++) {
- const struct aspeed_gpio_bank *bank = &aspeed_gpio_banks[i];
-
- reg = ioread32(bank_reg(data, bank, reg_irq_status));
+ reg = gpio->config->llops->reg_bank_get(gpio, i * 32, reg_irq_status);
for_each_set_bit(p, &reg, 32)
generic_handle_domain_irq(gc->irq.domain, i * 32 + p);
@@ -721,28 +679,16 @@ static int aspeed_gpio_reset_tolerance(struct gpio_chip *chip,
unsigned int offset, bool enable)
{
struct aspeed_gpio *gpio = gpiochip_get_data(chip);
- unsigned long flags;
- void __iomem *treg;
- bool copro;
- u32 val;
+ bool copro = false;
- treg = bank_reg(gpio, to_bank(offset), reg_tolerance);
+ guard(raw_spinlock_irqsave)(&gpio->lock);
- raw_spin_lock_irqsave(&gpio->lock, flags);
copro = aspeed_gpio_copro_request(gpio, offset);
- val = readl(treg);
-
- if (enable)
- val |= GPIO_BIT(offset);
- else
- val &= ~GPIO_BIT(offset);
-
- writel(val, treg);
+ gpio->config->llops->reg_bit_set(gpio, offset, reg_tolerance, enable);
if (copro)
aspeed_gpio_copro_release(gpio, offset);
- raw_spin_unlock_irqrestore(&gpio->lock, flags);
return 0;
}
@@ -832,21 +778,11 @@ static inline bool timer_allocation_registered(struct aspeed_gpio *gpio,
static void configure_timer(struct aspeed_gpio *gpio, unsigned int offset,
unsigned int timer)
{
- const struct aspeed_gpio_bank *bank = to_bank(offset);
- const u32 mask = GPIO_BIT(offset);
- void __iomem *addr;
- u32 val;
-
/* Note: Debounce timer isn't under control of the command
* source registers, so no need to sync with the coprocessor
*/
- addr = bank_reg(gpio, bank, reg_debounce_sel1);
- val = ioread32(addr);
- iowrite32((val & ~mask) | GPIO_SET_DEBOUNCE1(timer, offset), addr);
-
- addr = bank_reg(gpio, bank, reg_debounce_sel2);
- val = ioread32(addr);
- iowrite32((val & ~mask) | GPIO_SET_DEBOUNCE2(timer, offset), addr);
+ gpio->config->llops->reg_bit_set(gpio, offset, reg_debounce_sel1, !!(timer & BIT(1)));
+ gpio->config->llops->reg_bit_set(gpio, offset, reg_debounce_sel2, !!(timer & BIT(0)));
}
static int enable_debounce(struct gpio_chip *chip, unsigned int offset,
@@ -854,7 +790,6 @@ static int enable_debounce(struct gpio_chip *chip, unsigned int offset,
{
struct aspeed_gpio *gpio = gpiochip_get_data(chip);
u32 requested_cycles;
- unsigned long flags;
int rc;
int i;
@@ -868,24 +803,24 @@ static int enable_debounce(struct gpio_chip *chip, unsigned int offset,
return rc;
}
- raw_spin_lock_irqsave(&gpio->lock, flags);
+ guard(raw_spinlock_irqsave)(&gpio->lock);
if (timer_allocation_registered(gpio, offset)) {
rc = unregister_allocated_timer(gpio, offset);
if (rc < 0)
- goto out;
+ return rc;
}
/* Try to find a timer already configured for the debounce period */
- for (i = 1; i < ARRAY_SIZE(debounce_timers); i++) {
+ for (i = 1; i < gpio->config->debounce_timers_num; i++) {
u32 cycles;
- cycles = ioread32(gpio->base + debounce_timers[i]);
+ cycles = ioread32(gpio->base + gpio->config->debounce_timers_array[i]);
if (requested_cycles == cycles)
break;
}
- if (i == ARRAY_SIZE(debounce_timers)) {
+ if (i == gpio->config->debounce_timers_num) {
int j;
/*
@@ -899,8 +834,8 @@ static int enable_debounce(struct gpio_chip *chip, unsigned int offset,
if (j == ARRAY_SIZE(gpio->timer_users)) {
dev_warn(chip->parent,
- "Debounce timers exhausted, cannot debounce for period %luus\n",
- usecs);
+ "Debounce timers exhausted, cannot debounce for period %luus\n",
+ usecs);
rc = -EPERM;
@@ -911,42 +846,34 @@ static int enable_debounce(struct gpio_chip *chip, unsigned int offset,
* consistency.
*/
configure_timer(gpio, offset, 0);
- goto out;
+ return rc;
}
i = j;
- iowrite32(requested_cycles, gpio->base + debounce_timers[i]);
+ iowrite32(requested_cycles, gpio->base + gpio->config->debounce_timers_array[i]);
}
- if (WARN(i == 0, "Cannot register index of disabled timer\n")) {
- rc = -EINVAL;
- goto out;
- }
+ if (WARN(i == 0, "Cannot register index of disabled timer\n"))
+ return -EINVAL;
register_allocated_timer(gpio, offset, i);
configure_timer(gpio, offset, i);
-out:
- raw_spin_unlock_irqrestore(&gpio->lock, flags);
-
return rc;
}
static int disable_debounce(struct gpio_chip *chip, unsigned int offset)
{
struct aspeed_gpio *gpio = gpiochip_get_data(chip);
- unsigned long flags;
int rc;
- raw_spin_lock_irqsave(&gpio->lock, flags);
+ guard(raw_spinlock_irqsave)(&gpio->lock);
rc = unregister_allocated_timer(gpio, offset);
if (!rc)
configure_timer(gpio, offset, 0);
- raw_spin_unlock_irqrestore(&gpio->lock, flags);
-
return rc;
}
@@ -1017,7 +944,9 @@ int aspeed_gpio_copro_grab_gpio(struct gpio_desc *desc,
struct aspeed_gpio *gpio = gpiochip_get_data(chip);
int rc = 0, bindex, offset = gpio_chip_hwgpio(desc);
const struct aspeed_gpio_bank *bank = to_bank(offset);
- unsigned long flags;
+
+ if (!aspeed_gpio_support_copro(gpio))
+ return -EOPNOTSUPP;
if (!gpio->cf_copro_bankmap)
gpio->cf_copro_bankmap = kzalloc(gpio->chip.ngpio >> 3, GFP_KERNEL);
@@ -1027,18 +956,17 @@ int aspeed_gpio_copro_grab_gpio(struct gpio_desc *desc,
return -EINVAL;
bindex = offset >> 3;
- raw_spin_lock_irqsave(&gpio->lock, flags);
+ guard(raw_spinlock_irqsave)(&gpio->lock);
/* Sanity check, this shouldn't happen */
- if (gpio->cf_copro_bankmap[bindex] == 0xff) {
- rc = -EIO;
- goto bail;
- }
+ if (gpio->cf_copro_bankmap[bindex] == 0xff)
+ return -EIO;
+
gpio->cf_copro_bankmap[bindex]++;
/* Switch command source */
if (gpio->cf_copro_bankmap[bindex] == 1)
- aspeed_gpio_change_cmd_source(gpio, bank, bindex,
+ aspeed_gpio_change_cmd_source(gpio, offset,
GPIO_CMDSRC_COLDFIRE);
if (vreg_offset)
@@ -1047,8 +975,6 @@ int aspeed_gpio_copro_grab_gpio(struct gpio_desc *desc,
*dreg_offset = bank->rdata_reg;
if (bit)
*bit = GPIO_OFFSET(offset);
- bail:
- raw_spin_unlock_irqrestore(&gpio->lock, flags);
return rc;
}
EXPORT_SYMBOL_GPL(aspeed_gpio_copro_grab_gpio);
@@ -1062,8 +988,9 @@ int aspeed_gpio_copro_release_gpio(struct gpio_desc *desc)
struct gpio_chip *chip = gpiod_to_chip(desc);
struct aspeed_gpio *gpio = gpiochip_get_data(chip);
int rc = 0, bindex, offset = gpio_chip_hwgpio(desc);
- const struct aspeed_gpio_bank *bank = to_bank(offset);
- unsigned long flags;
+
+ if (!aspeed_gpio_support_copro(gpio))
+ return -EOPNOTSUPP;
if (!gpio->cf_copro_bankmap)
return -ENXIO;
@@ -1072,37 +999,33 @@ int aspeed_gpio_copro_release_gpio(struct gpio_desc *desc)
return -EINVAL;
bindex = offset >> 3;
- raw_spin_lock_irqsave(&gpio->lock, flags);
+ guard(raw_spinlock_irqsave)(&gpio->lock);
/* Sanity check, this shouldn't happen */
- if (gpio->cf_copro_bankmap[bindex] == 0) {
- rc = -EIO;
- goto bail;
- }
+ if (gpio->cf_copro_bankmap[bindex] == 0)
+ return -EIO;
+
gpio->cf_copro_bankmap[bindex]--;
/* Switch command source */
if (gpio->cf_copro_bankmap[bindex] == 0)
- aspeed_gpio_change_cmd_source(gpio, bank, bindex,
+ aspeed_gpio_change_cmd_source(gpio, offset,
GPIO_CMDSRC_ARM);
- bail:
- raw_spin_unlock_irqrestore(&gpio->lock, flags);
+
return rc;
}
EXPORT_SYMBOL_GPL(aspeed_gpio_copro_release_gpio);
static void aspeed_gpio_irq_print_chip(struct irq_data *d, struct seq_file *p)
{
- const struct aspeed_gpio_bank *bank;
struct aspeed_gpio *gpio;
- u32 bit;
int rc, offset;
- rc = irqd_to_aspeed_gpio_data(d, &gpio, &bank, &bit, &offset);
+ rc = irqd_to_aspeed_gpio_data(d, &gpio, &offset);
if (rc)
return;
- seq_printf(p, dev_name(gpio->dev));
+ seq_puts(p, dev_name(gpio->dev));
}
static const struct irq_chip aspeed_gpio_irq_chip = {
@@ -1115,6 +1038,173 @@ static const struct irq_chip aspeed_gpio_irq_chip = {
GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
+static void aspeed_g4_reg_bit_set(struct aspeed_gpio *gpio, unsigned int offset,
+ const enum aspeed_gpio_reg reg, bool val)
+{
+ const struct aspeed_gpio_bank *bank = to_bank(offset);
+ void __iomem *addr = aspeed_gpio_g4_bank_reg(gpio, bank, reg);
+ u32 temp;
+
+ if (reg == reg_val)
+ temp = gpio->dcache[GPIO_BANK(offset)];
+ else
+ temp = ioread32(addr);
+
+ if (val)
+ temp |= GPIO_BIT(offset);
+ else
+ temp &= ~GPIO_BIT(offset);
+
+ if (reg == reg_val)
+ gpio->dcache[GPIO_BANK(offset)] = temp;
+ iowrite32(temp, addr);
+}
+
+static bool aspeed_g4_reg_bit_get(struct aspeed_gpio *gpio, unsigned int offset,
+ const enum aspeed_gpio_reg reg)
+{
+ const struct aspeed_gpio_bank *bank = to_bank(offset);
+ void __iomem *addr = aspeed_gpio_g4_bank_reg(gpio, bank, reg);
+
+ return !!(ioread32(addr) & GPIO_BIT(offset));
+}
+
+static int aspeed_g4_reg_bank_get(struct aspeed_gpio *gpio, unsigned int offset,
+ const enum aspeed_gpio_reg reg)
+{
+ const struct aspeed_gpio_bank *bank = to_bank(offset);
+ void __iomem *addr = aspeed_gpio_g4_bank_reg(gpio, bank, reg);
+
+ if (reg == reg_rdata || reg == reg_irq_status)
+ return ioread32(addr);
+ else
+ return -EOPNOTSUPP;
+}
+
+static void aspeed_g4_privilege_ctrl(struct aspeed_gpio *gpio, unsigned int offset, int cmdsrc)
+{
+ /*
+ * The command source register is only valid in bits 0, 8, 16, and 24, so we use
+ * (offset & ~(0x7)) to ensure that reg_bits_set always targets a valid bit.
+ */
+ /* Source 1 first to avoid illegal 11 combination */
+ aspeed_g4_reg_bit_set(gpio, offset & ~(0x7), reg_cmdsrc1, !!(cmdsrc & BIT(1)));
+ /* Then Source 0 */
+ aspeed_g4_reg_bit_set(gpio, offset & ~(0x7), reg_cmdsrc0, !!(cmdsrc & BIT(0)));
+}
+
+static void aspeed_g4_privilege_init(struct aspeed_gpio *gpio)
+{
+ u32 i;
+
+ /* Switch all command sources to the ARM by default */
+ for (i = 0; i < DIV_ROUND_UP(gpio->chip.ngpio, 32); i++) {
+ aspeed_g4_privilege_ctrl(gpio, (i << 5) + 0, GPIO_CMDSRC_ARM);
+ aspeed_g4_privilege_ctrl(gpio, (i << 5) + 8, GPIO_CMDSRC_ARM);
+ aspeed_g4_privilege_ctrl(gpio, (i << 5) + 16, GPIO_CMDSRC_ARM);
+ aspeed_g4_privilege_ctrl(gpio, (i << 5) + 24, GPIO_CMDSRC_ARM);
+ }
+}
+
+static bool aspeed_g4_copro_request(struct aspeed_gpio *gpio, unsigned int offset)
+{
+ if (!copro_ops || !gpio->cf_copro_bankmap)
+ return false;
+ if (!gpio->cf_copro_bankmap[offset >> 3])
+ return false;
+ if (!copro_ops->request_access)
+ return false;
+
+ /* Pause the coprocessor */
+ copro_ops->request_access(copro_data);
+
+ /* Change command source back to ARM */
+ aspeed_g4_privilege_ctrl(gpio, offset, GPIO_CMDSRC_ARM);
+
+ /* Update cache */
+ gpio->dcache[GPIO_BANK(offset)] = aspeed_g4_reg_bank_get(gpio, offset, reg_rdata);
+
+ return true;
+}
+
+static void aspeed_g4_copro_release(struct aspeed_gpio *gpio, unsigned int offset)
+{
+ if (!copro_ops || !gpio->cf_copro_bankmap)
+ return;
+ if (!gpio->cf_copro_bankmap[offset >> 3])
+ return;
+ if (!copro_ops->release_access)
+ return;
+
+ /* Change command source back to ColdFire */
+ aspeed_g4_privilege_ctrl(gpio, offset, GPIO_CMDSRC_COLDFIRE);
+
+ /* Restart the coprocessor */
+ copro_ops->release_access(copro_data);
+}
+
+static const struct aspeed_gpio_llops aspeed_g4_llops = {
+ .reg_bit_set = aspeed_g4_reg_bit_set,
+ .reg_bit_get = aspeed_g4_reg_bit_get,
+ .reg_bank_get = aspeed_g4_reg_bank_get,
+ .privilege_ctrl = aspeed_g4_privilege_ctrl,
+ .privilege_init = aspeed_g4_privilege_init,
+ .copro_request = aspeed_g4_copro_request,
+ .copro_release = aspeed_g4_copro_release,
+};
+
+static void aspeed_g7_reg_bit_set(struct aspeed_gpio *gpio, unsigned int offset,
+ const enum aspeed_gpio_reg reg, bool val)
+{
+ u32 mask = aspeed_gpio_g7_reg_mask(reg);
+ void __iomem *addr = gpio->base + GPIO_G7_CTRL_REG_OFFSET(offset);
+ u32 write_val;
+
+ if (mask) {
+ write_val = (ioread32(addr) & ~(mask)) | field_prep(mask, val);
+ iowrite32(write_val, addr);
+ }
+}
+
+static bool aspeed_g7_reg_bit_get(struct aspeed_gpio *gpio, unsigned int offset,
+ const enum aspeed_gpio_reg reg)
+{
+ u32 mask = aspeed_gpio_g7_reg_mask(reg);
+ void __iomem *addr;
+
+ addr = gpio->base + GPIO_G7_CTRL_REG_OFFSET(offset);
+ if (reg == reg_val)
+ mask = GPIO_G7_CTRL_IN_DATA;
+
+ if (mask)
+ return field_get(mask, ioread32(addr));
+ else
+ return 0;
+}
+
+static int aspeed_g7_reg_bank_get(struct aspeed_gpio *gpio, unsigned int offset,
+ const enum aspeed_gpio_reg reg)
+{
+ void __iomem *addr;
+
+ if (reg == reg_irq_status) {
+ addr = gpio->base + GPIO_G7_IRQ_STS_OFFSET(offset >> 5);
+ return ioread32(addr);
+ } else {
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct aspeed_gpio_llops aspeed_g7_llops = {
+ .reg_bit_set = aspeed_g7_reg_bit_set,
+ .reg_bit_get = aspeed_g7_reg_bit_get,
+ .reg_bank_get = aspeed_g7_reg_bank_get,
+ .privilege_ctrl = NULL,
+ .privilege_init = NULL,
+ .copro_request = NULL,
+ .copro_release = NULL,
+};
+
/*
* Any banks not specified in a struct aspeed_bank_props array are assumed to
* have the properties:
@@ -1131,7 +1221,14 @@ static const struct aspeed_bank_props ast2400_bank_props[] = {
static const struct aspeed_gpio_config ast2400_config =
/* 220 for simplicity, really 216 with two 4-GPIO holes, four at end */
- { .nr_gpios = 220, .props = ast2400_bank_props, };
+ {
+ .nr_gpios = 220,
+ .props = ast2400_bank_props,
+ .llops = &aspeed_g4_llops,
+ .debounce_timers_array = debounce_timers,
+ .debounce_timers_num = ARRAY_SIZE(debounce_timers),
+ .require_dcache = true,
+ };
static const struct aspeed_bank_props ast2500_bank_props[] = {
/* input output */
@@ -1143,7 +1240,14 @@ static const struct aspeed_bank_props ast2500_bank_props[] = {
static const struct aspeed_gpio_config ast2500_config =
/* 232 for simplicity, actual number is 228 (4-GPIO hole in GPIOAB) */
- { .nr_gpios = 232, .props = ast2500_bank_props, };
+ {
+ .nr_gpios = 232,
+ .props = ast2500_bank_props,
+ .llops = &aspeed_g4_llops,
+ .debounce_timers_array = debounce_timers,
+ .debounce_timers_num = ARRAY_SIZE(debounce_timers),
+ .require_dcache = true,
+ };
static const struct aspeed_bank_props ast2600_bank_props[] = {
/* input output */
@@ -1159,17 +1263,48 @@ static const struct aspeed_gpio_config ast2600_config =
* We expect ngpio being set in the device tree and this is a fallback
* option.
*/
- { .nr_gpios = 208, .props = ast2600_bank_props, };
+ {
+ .nr_gpios = 208,
+ .props = ast2600_bank_props,
+ .llops = &aspeed_g4_llops,
+ .debounce_timers_array = debounce_timers,
+ .debounce_timers_num = ARRAY_SIZE(debounce_timers),
+ .require_dcache = true,
+ };
+
+static const struct aspeed_bank_props ast2700_bank_props[] = {
+ /* input output */
+ { 1, 0x0fffffff, 0x0fffffff }, /* E/F/G/H, 4-GPIO hole */
+ { 6, 0x00ffffff, 0x00ff0000 }, /* Y/Z/AA */
+ {},
+};
+
+static const struct aspeed_gpio_config ast2700_config =
+ /*
+ * ast2700 has two controllers one with 212 GPIOs and one with 16 GPIOs.
+ * 216 for simplicity, actual number is 212 (4-GPIO hole in GPIOH)
+ * We expect ngpio being set in the device tree and this is a fallback
+ * option.
+ */
+ {
+ .nr_gpios = 216,
+ .props = ast2700_bank_props,
+ .llops = &aspeed_g7_llops,
+ .debounce_timers_array = g7_debounce_timers,
+ .debounce_timers_num = ARRAY_SIZE(g7_debounce_timers),
+ .require_dcache = false,
+ };
static const struct of_device_id aspeed_gpio_of_table[] = {
{ .compatible = "aspeed,ast2400-gpio", .data = &ast2400_config, },
{ .compatible = "aspeed,ast2500-gpio", .data = &ast2500_config, },
{ .compatible = "aspeed,ast2600-gpio", .data = &ast2600_config, },
+ { .compatible = "aspeed,ast2700-gpio", .data = &ast2700_config, },
{}
};
MODULE_DEVICE_TABLE(of, aspeed_gpio_of_table);
-static int __init aspeed_gpio_probe(struct platform_device *pdev)
+static int aspeed_gpio_probe(struct platform_device *pdev)
{
const struct of_device_id *gpio_id;
struct gpio_irq_chip *girq;
@@ -1202,6 +1337,10 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev)
gpio->config = gpio_id->data;
+ if (!gpio->config->llops->reg_bit_set || !gpio->config->llops->reg_bit_get ||
+ !gpio->config->llops->reg_bank_get)
+ return -EINVAL;
+
gpio->chip.parent = &pdev->dev;
err = of_property_read_u32(pdev->dev.of_node, "ngpios", &ngpio);
gpio->chip.ngpio = (u16) ngpio;
@@ -1218,27 +1357,23 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev)
gpio->chip.label = dev_name(&pdev->dev);
gpio->chip.base = -1;
- /* Allocate a cache of the output registers */
- banks = DIV_ROUND_UP(gpio->chip.ngpio, 32);
- gpio->dcache = devm_kcalloc(&pdev->dev,
- banks, sizeof(u32), GFP_KERNEL);
- if (!gpio->dcache)
- return -ENOMEM;
-
- /*
- * Populate it with initial values read from the HW and switch
- * all command sources to the ARM by default
- */
- for (i = 0; i < banks; i++) {
- const struct aspeed_gpio_bank *bank = &aspeed_gpio_banks[i];
- void __iomem *addr = bank_reg(gpio, bank, reg_rdata);
- gpio->dcache[i] = ioread32(addr);
- aspeed_gpio_change_cmd_source(gpio, bank, 0, GPIO_CMDSRC_ARM);
- aspeed_gpio_change_cmd_source(gpio, bank, 1, GPIO_CMDSRC_ARM);
- aspeed_gpio_change_cmd_source(gpio, bank, 2, GPIO_CMDSRC_ARM);
- aspeed_gpio_change_cmd_source(gpio, bank, 3, GPIO_CMDSRC_ARM);
+ if (gpio->config->require_dcache) {
+ /* Allocate a cache of the output registers */
+ banks = DIV_ROUND_UP(gpio->chip.ngpio, 32);
+ gpio->dcache = devm_kcalloc(&pdev->dev, banks, sizeof(u32), GFP_KERNEL);
+ if (!gpio->dcache)
+ return -ENOMEM;
+ /*
+ * Populate it with initial values read from the HW
+ */
+ for (i = 0; i < banks; i++)
+ gpio->dcache[i] =
+ gpio->config->llops->reg_bank_get(gpio, (i << 5), reg_rdata);
}
+ if (gpio->config->llops->privilege_init)
+ gpio->config->llops->privilege_init(gpio);
+
/* Set up an irqchip */
irq = platform_get_irq(pdev, 0);
if (irq < 0)
@@ -1270,13 +1405,14 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev)
}
static struct platform_driver aspeed_gpio_driver = {
+ .probe = aspeed_gpio_probe,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = aspeed_gpio_of_table,
},
};
-module_platform_driver_probe(aspeed_gpio_driver, aspeed_gpio_probe);
+module_platform_driver(aspeed_gpio_driver);
MODULE_DESCRIPTION("Aspeed GPIO Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-ath79.c b/drivers/gpio/gpio-ath79.c
index de4cc12e5e03..2ad9f6ac6636 100644
--- a/drivers/gpio/gpio-ath79.c
+++ b/drivers/gpio/gpio-ath79.c
@@ -10,6 +10,7 @@
#include <linux/device.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/mod_devicetable.h>
@@ -28,17 +29,17 @@
#define AR71XX_GPIO_REG_INT_MASK 0x24
struct ath79_gpio_ctrl {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
void __iomem *base;
- raw_spinlock_t lock;
unsigned long both_edges;
};
static struct ath79_gpio_ctrl *irq_data_to_ath79_gpio(struct irq_data *data)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+ struct gpio_generic_chip *gen_gc = to_gpio_generic_chip(gc);
- return container_of(gc, struct ath79_gpio_ctrl, gc);
+ return container_of(gen_gc, struct ath79_gpio_ctrl, chip);
}
static u32 ath79_gpio_read(struct ath79_gpio_ctrl *ctrl, unsigned reg)
@@ -70,48 +71,43 @@ static void ath79_gpio_irq_unmask(struct irq_data *data)
{
struct ath79_gpio_ctrl *ctrl = irq_data_to_ath79_gpio(data);
u32 mask = BIT(irqd_to_hwirq(data));
- unsigned long flags;
- gpiochip_enable_irq(&ctrl->gc, irqd_to_hwirq(data));
- raw_spin_lock_irqsave(&ctrl->lock, flags);
+ gpiochip_enable_irq(&ctrl->chip.gc, irqd_to_hwirq(data));
+
+ guard(gpio_generic_lock_irqsave)(&ctrl->chip);
+
ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_MASK, mask, mask);
- raw_spin_unlock_irqrestore(&ctrl->lock, flags);
}
static void ath79_gpio_irq_mask(struct irq_data *data)
{
struct ath79_gpio_ctrl *ctrl = irq_data_to_ath79_gpio(data);
u32 mask = BIT(irqd_to_hwirq(data));
- unsigned long flags;
- raw_spin_lock_irqsave(&ctrl->lock, flags);
- ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_MASK, mask, 0);
- raw_spin_unlock_irqrestore(&ctrl->lock, flags);
- gpiochip_disable_irq(&ctrl->gc, irqd_to_hwirq(data));
+ scoped_guard(gpio_generic_lock_irqsave, &ctrl->chip)
+ ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_MASK, mask, 0);
+
+ gpiochip_disable_irq(&ctrl->chip.gc, irqd_to_hwirq(data));
}
static void ath79_gpio_irq_enable(struct irq_data *data)
{
struct ath79_gpio_ctrl *ctrl = irq_data_to_ath79_gpio(data);
u32 mask = BIT(irqd_to_hwirq(data));
- unsigned long flags;
- raw_spin_lock_irqsave(&ctrl->lock, flags);
+ guard(gpio_generic_lock_irqsave)(&ctrl->chip);
ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_ENABLE, mask, mask);
ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_MASK, mask, mask);
- raw_spin_unlock_irqrestore(&ctrl->lock, flags);
}
static void ath79_gpio_irq_disable(struct irq_data *data)
{
struct ath79_gpio_ctrl *ctrl = irq_data_to_ath79_gpio(data);
u32 mask = BIT(irqd_to_hwirq(data));
- unsigned long flags;
- raw_spin_lock_irqsave(&ctrl->lock, flags);
+ guard(gpio_generic_lock_irqsave)(&ctrl->chip);
ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_MASK, mask, 0);
ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_ENABLE, mask, 0);
- raw_spin_unlock_irqrestore(&ctrl->lock, flags);
}
static int ath79_gpio_irq_set_type(struct irq_data *data,
@@ -120,7 +116,6 @@ static int ath79_gpio_irq_set_type(struct irq_data *data,
struct ath79_gpio_ctrl *ctrl = irq_data_to_ath79_gpio(data);
u32 mask = BIT(irqd_to_hwirq(data));
u32 type = 0, polarity = 0;
- unsigned long flags;
bool disabled;
switch (flow_type) {
@@ -142,7 +137,7 @@ static int ath79_gpio_irq_set_type(struct irq_data *data,
return -EINVAL;
}
- raw_spin_lock_irqsave(&ctrl->lock, flags);
+ guard(gpio_generic_lock_irqsave)(&ctrl->chip);
if (flow_type == IRQ_TYPE_EDGE_BOTH) {
ctrl->both_edges |= mask;
@@ -167,8 +162,6 @@ static int ath79_gpio_irq_set_type(struct irq_data *data,
ath79_gpio_update_bits(
ctrl, AR71XX_GPIO_REG_INT_ENABLE, mask, mask);
- raw_spin_unlock_irqrestore(&ctrl->lock, flags);
-
return 0;
}
@@ -187,28 +180,27 @@ static void ath79_gpio_irq_handler(struct irq_desc *desc)
{
struct gpio_chip *gc = irq_desc_get_handler_data(desc);
struct irq_chip *irqchip = irq_desc_get_chip(desc);
+ struct gpio_generic_chip *gen_gc = to_gpio_generic_chip(gc);
struct ath79_gpio_ctrl *ctrl =
- container_of(gc, struct ath79_gpio_ctrl, gc);
- unsigned long flags, pending;
+ container_of(gen_gc, struct ath79_gpio_ctrl, chip);
+ unsigned long pending;
u32 both_edges, state;
int irq;
chained_irq_enter(irqchip, desc);
- raw_spin_lock_irqsave(&ctrl->lock, flags);
-
- pending = ath79_gpio_read(ctrl, AR71XX_GPIO_REG_INT_PENDING);
+ scoped_guard(gpio_generic_lock_irqsave, &ctrl->chip) {
+ pending = ath79_gpio_read(ctrl, AR71XX_GPIO_REG_INT_PENDING);
- /* Update the polarity of the both edges irqs */
- both_edges = ctrl->both_edges & pending;
- if (both_edges) {
- state = ath79_gpio_read(ctrl, AR71XX_GPIO_REG_IN);
- ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_POLARITY,
- both_edges, ~state);
+ /* Update the polarity of the both edges irqs */
+ both_edges = ctrl->both_edges & pending;
+ if (both_edges) {
+ state = ath79_gpio_read(ctrl, AR71XX_GPIO_REG_IN);
+ ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_POLARITY,
+ both_edges, ~state);
+ }
}
- raw_spin_unlock_irqrestore(&ctrl->lock, flags);
-
for_each_set_bit(irq, &pending, gc->ngpio)
generic_handle_domain_irq(gc->irq.domain, irq);
@@ -224,6 +216,7 @@ MODULE_DEVICE_TABLE(of, ath79_gpio_of_match);
static int ath79_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct device *dev = &pdev->dev;
struct ath79_gpio_ctrl *ctrl;
struct gpio_irq_chip *girq;
@@ -252,22 +245,25 @@ static int ath79_gpio_probe(struct platform_device *pdev)
if (IS_ERR(ctrl->base))
return PTR_ERR(ctrl->base);
- raw_spin_lock_init(&ctrl->lock);
- err = bgpio_init(&ctrl->gc, dev, 4,
- ctrl->base + AR71XX_GPIO_REG_IN,
- ctrl->base + AR71XX_GPIO_REG_SET,
- ctrl->base + AR71XX_GPIO_REG_CLEAR,
- oe_inverted ? NULL : ctrl->base + AR71XX_GPIO_REG_OE,
- oe_inverted ? ctrl->base + AR71XX_GPIO_REG_OE : NULL,
- 0);
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = ctrl->base + AR71XX_GPIO_REG_IN,
+ .set = ctrl->base + AR71XX_GPIO_REG_SET,
+ .clr = ctrl->base + AR71XX_GPIO_REG_CLEAR,
+ .dirout = oe_inverted ? NULL : ctrl->base + AR71XX_GPIO_REG_OE,
+ .dirin = oe_inverted ? ctrl->base + AR71XX_GPIO_REG_OE : NULL,
+ };
+
+ err = gpio_generic_chip_init(&ctrl->chip, &config);
if (err) {
- dev_err(dev, "bgpio_init failed\n");
+ dev_err(dev, "failed to initialize generic GPIO chip\n");
return err;
}
/* Optional interrupt setup */
if (device_property_read_bool(dev, "interrupt-controller")) {
- girq = &ctrl->gc.irq;
+ girq = &ctrl->chip.gc.irq;
gpio_irq_chip_set_chip(girq, &ath79_gpio_irqchip);
girq->parent_handler = ath79_gpio_irq_handler;
girq->num_parents = 1;
@@ -280,7 +276,7 @@ static int ath79_gpio_probe(struct platform_device *pdev)
girq->handler = handle_simple_irq;
}
- return devm_gpiochip_add_data(dev, &ctrl->gc, ctrl);
+ return devm_gpiochip_add_data(dev, &ctrl->chip.gc, ctrl);
}
static struct platform_driver ath79_gpio_driver = {
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index 5321ef98f442..208b71c59d58 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -7,6 +7,7 @@
*/
#include <linux/bitops.h>
+#include <linux/cleanup.h>
#include <linux/err.h>
#include <linux/gpio/driver.h>
#include <linux/init.h>
@@ -69,6 +70,22 @@ struct bcm_kona_gpio {
struct bcm_kona_gpio_bank {
int id;
int irq;
+ /*
+ * Used to keep track of lock/unlock operations for each GPIO in the
+ * bank.
+ *
+ * All GPIOs are locked by default (see bcm_kona_gpio_reset), and the
+ * unlock count for all GPIOs is 0 by default. Each unlock increments
+ * the counter, and each lock decrements the counter.
+ *
+ * The lock function only locks the GPIO once its unlock counter is
+ * down to 0. This is necessary because the GPIO is unlocked in two
+ * places in this driver: once for requested GPIOs, and once for
+ * requested IRQs. Since it is possible for a GPIO to be requested
+ * as both a GPIO and an IRQ, we need to ensure that we don't lock it
+ * too early.
+ */
+ u8 gpio_unlock_count[GPIO_PER_BANK];
/* Used in the interrupt handler */
struct bcm_kona_gpio *kona_gpio;
};
@@ -84,32 +101,42 @@ static void bcm_kona_gpio_lock_gpio(struct bcm_kona_gpio *kona_gpio,
unsigned gpio)
{
u32 val;
- unsigned long flags;
int bank_id = GPIO_BANK(gpio);
+ int bit = GPIO_BIT(gpio);
+ struct bcm_kona_gpio_bank *bank = &kona_gpio->banks[bank_id];
- raw_spin_lock_irqsave(&kona_gpio->lock, flags);
+ if (bank->gpio_unlock_count[bit] == 0) {
+ dev_err(kona_gpio->gpio_chip.parent,
+ "Unbalanced locks for GPIO %u\n", gpio);
+ return;
+ }
- val = readl(kona_gpio->reg_base + GPIO_PWD_STATUS(bank_id));
- val |= BIT(gpio);
- bcm_kona_gpio_write_lock_regs(kona_gpio->reg_base, bank_id, val);
+ if (--bank->gpio_unlock_count[bit] == 0) {
+ guard(raw_spinlock_irqsave)(&kona_gpio->lock);
- raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
+ val = readl(kona_gpio->reg_base + GPIO_PWD_STATUS(bank_id));
+ val |= BIT(bit);
+ bcm_kona_gpio_write_lock_regs(kona_gpio->reg_base, bank_id, val);
+ }
}
static void bcm_kona_gpio_unlock_gpio(struct bcm_kona_gpio *kona_gpio,
unsigned gpio)
{
u32 val;
- unsigned long flags;
int bank_id = GPIO_BANK(gpio);
+ int bit = GPIO_BIT(gpio);
+ struct bcm_kona_gpio_bank *bank = &kona_gpio->banks[bank_id];
- raw_spin_lock_irqsave(&kona_gpio->lock, flags);
+ if (bank->gpio_unlock_count[bit] == 0) {
+ guard(raw_spinlock_irqsave)(&kona_gpio->lock);
- val = readl(kona_gpio->reg_base + GPIO_PWD_STATUS(bank_id));
- val &= ~BIT(gpio);
- bcm_kona_gpio_write_lock_regs(kona_gpio->reg_base, bank_id, val);
+ val = readl(kona_gpio->reg_base + GPIO_PWD_STATUS(bank_id));
+ val &= ~BIT(bit);
+ bcm_kona_gpio_write_lock_regs(kona_gpio->reg_base, bank_id, val);
+ }
- raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
+ ++bank->gpio_unlock_count[bit];
}
static int bcm_kona_gpio_get_dir(struct gpio_chip *chip, unsigned gpio)
@@ -122,22 +149,23 @@ static int bcm_kona_gpio_get_dir(struct gpio_chip *chip, unsigned gpio)
return val ? GPIO_LINE_DIRECTION_IN : GPIO_LINE_DIRECTION_OUT;
}
-static void bcm_kona_gpio_set(struct gpio_chip *chip, unsigned gpio, int value)
+static int bcm_kona_gpio_set(struct gpio_chip *chip, unsigned int gpio,
+ int value)
{
struct bcm_kona_gpio *kona_gpio;
void __iomem *reg_base;
int bank_id = GPIO_BANK(gpio);
int bit = GPIO_BIT(gpio);
u32 val, reg_offset;
- unsigned long flags;
kona_gpio = gpiochip_get_data(chip);
reg_base = kona_gpio->reg_base;
- raw_spin_lock_irqsave(&kona_gpio->lock, flags);
+
+ guard(raw_spinlock_irqsave)(&kona_gpio->lock);
/* this function only applies to output pin */
if (bcm_kona_gpio_get_dir(chip, gpio) == GPIO_LINE_DIRECTION_IN)
- goto out;
+ return 0;
reg_offset = value ? GPIO_OUT_SET(bank_id) : GPIO_OUT_CLEAR(bank_id);
@@ -145,8 +173,7 @@ static void bcm_kona_gpio_set(struct gpio_chip *chip, unsigned gpio, int value)
val |= BIT(bit);
writel(val, reg_base + reg_offset);
-out:
- raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
+ return 0;
}
static int bcm_kona_gpio_get(struct gpio_chip *chip, unsigned gpio)
@@ -156,11 +183,11 @@ static int bcm_kona_gpio_get(struct gpio_chip *chip, unsigned gpio)
int bank_id = GPIO_BANK(gpio);
int bit = GPIO_BIT(gpio);
u32 val, reg_offset;
- unsigned long flags;
kona_gpio = gpiochip_get_data(chip);
reg_base = kona_gpio->reg_base;
- raw_spin_lock_irqsave(&kona_gpio->lock, flags);
+
+ guard(raw_spinlock_irqsave)(&kona_gpio->lock);
if (bcm_kona_gpio_get_dir(chip, gpio) == GPIO_LINE_DIRECTION_IN)
reg_offset = GPIO_IN_STATUS(bank_id);
@@ -170,8 +197,6 @@ static int bcm_kona_gpio_get(struct gpio_chip *chip, unsigned gpio)
/* read the GPIO bank status */
val = readl(reg_base + reg_offset);
- raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
-
/* return the specified bit status */
return !!(val & BIT(bit));
}
@@ -196,19 +221,17 @@ static int bcm_kona_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
struct bcm_kona_gpio *kona_gpio;
void __iomem *reg_base;
u32 val;
- unsigned long flags;
kona_gpio = gpiochip_get_data(chip);
reg_base = kona_gpio->reg_base;
- raw_spin_lock_irqsave(&kona_gpio->lock, flags);
+
+ guard(raw_spinlock_irqsave)(&kona_gpio->lock);
val = readl(reg_base + GPIO_CONTROL(gpio));
val &= ~GPIO_GPCTR0_IOTR_MASK;
val |= GPIO_GPCTR0_IOTR_CMD_INPUT;
writel(val, reg_base + GPIO_CONTROL(gpio));
- raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
-
return 0;
}
@@ -220,11 +243,11 @@ static int bcm_kona_gpio_direction_output(struct gpio_chip *chip,
int bank_id = GPIO_BANK(gpio);
int bit = GPIO_BIT(gpio);
u32 val, reg_offset;
- unsigned long flags;
kona_gpio = gpiochip_get_data(chip);
reg_base = kona_gpio->reg_base;
- raw_spin_lock_irqsave(&kona_gpio->lock, flags);
+
+ guard(raw_spinlock_irqsave)(&kona_gpio->lock);
val = readl(reg_base + GPIO_CONTROL(gpio));
val &= ~GPIO_GPCTR0_IOTR_MASK;
@@ -236,8 +259,6 @@ static int bcm_kona_gpio_direction_output(struct gpio_chip *chip,
val |= BIT(bit);
writel(val, reg_base + reg_offset);
- raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
-
return 0;
}
@@ -257,7 +278,6 @@ static int bcm_kona_gpio_set_debounce(struct gpio_chip *chip, unsigned gpio,
struct bcm_kona_gpio *kona_gpio;
void __iomem *reg_base;
u32 val, res;
- unsigned long flags;
kona_gpio = gpiochip_get_data(chip);
reg_base = kona_gpio->reg_base;
@@ -280,7 +300,7 @@ static int bcm_kona_gpio_set_debounce(struct gpio_chip *chip, unsigned gpio,
}
/* spin lock for read-modify-write of the GPIO register */
- raw_spin_lock_irqsave(&kona_gpio->lock, flags);
+ guard(raw_spinlock_irqsave)(&kona_gpio->lock);
val = readl(reg_base + GPIO_CONTROL(gpio));
val &= ~GPIO_GPCTR0_DBR_MASK;
@@ -295,8 +315,6 @@ static int bcm_kona_gpio_set_debounce(struct gpio_chip *chip, unsigned gpio,
writel(val, reg_base + GPIO_CONTROL(gpio));
- raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
-
return 0;
}
@@ -335,17 +353,15 @@ static void bcm_kona_gpio_irq_ack(struct irq_data *d)
int bank_id = GPIO_BANK(gpio);
int bit = GPIO_BIT(gpio);
u32 val;
- unsigned long flags;
kona_gpio = irq_data_get_irq_chip_data(d);
reg_base = kona_gpio->reg_base;
- raw_spin_lock_irqsave(&kona_gpio->lock, flags);
+
+ guard(raw_spinlock_irqsave)(&kona_gpio->lock);
val = readl(reg_base + GPIO_INT_STATUS(bank_id));
val |= BIT(bit);
writel(val, reg_base + GPIO_INT_STATUS(bank_id));
-
- raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
}
static void bcm_kona_gpio_irq_mask(struct irq_data *d)
@@ -356,18 +372,16 @@ static void bcm_kona_gpio_irq_mask(struct irq_data *d)
int bank_id = GPIO_BANK(gpio);
int bit = GPIO_BIT(gpio);
u32 val;
- unsigned long flags;
kona_gpio = irq_data_get_irq_chip_data(d);
reg_base = kona_gpio->reg_base;
- raw_spin_lock_irqsave(&kona_gpio->lock, flags);
+
+ guard(raw_spinlock_irqsave)(&kona_gpio->lock);
val = readl(reg_base + GPIO_INT_MASK(bank_id));
val |= BIT(bit);
writel(val, reg_base + GPIO_INT_MASK(bank_id));
gpiochip_disable_irq(&kona_gpio->gpio_chip, gpio);
-
- raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
}
static void bcm_kona_gpio_irq_unmask(struct irq_data *d)
@@ -378,18 +392,16 @@ static void bcm_kona_gpio_irq_unmask(struct irq_data *d)
int bank_id = GPIO_BANK(gpio);
int bit = GPIO_BIT(gpio);
u32 val;
- unsigned long flags;
kona_gpio = irq_data_get_irq_chip_data(d);
reg_base = kona_gpio->reg_base;
- raw_spin_lock_irqsave(&kona_gpio->lock, flags);
+
+ guard(raw_spinlock_irqsave)(&kona_gpio->lock);
val = readl(reg_base + GPIO_INT_MSKCLR(bank_id));
val |= BIT(bit);
writel(val, reg_base + GPIO_INT_MSKCLR(bank_id));
gpiochip_enable_irq(&kona_gpio->gpio_chip, gpio);
-
- raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
}
static int bcm_kona_gpio_irq_set_type(struct irq_data *d, unsigned int type)
@@ -399,7 +411,6 @@ static int bcm_kona_gpio_irq_set_type(struct irq_data *d, unsigned int type)
unsigned gpio = d->hwirq;
u32 lvl_type;
u32 val;
- unsigned long flags;
kona_gpio = irq_data_get_irq_chip_data(d);
reg_base = kona_gpio->reg_base;
@@ -425,15 +436,13 @@ static int bcm_kona_gpio_irq_set_type(struct irq_data *d, unsigned int type)
return -EINVAL;
}
- raw_spin_lock_irqsave(&kona_gpio->lock, flags);
+ guard(raw_spinlock_irqsave)(&kona_gpio->lock);
val = readl(reg_base + GPIO_CONTROL(gpio));
val &= ~GPIO_GPCTR0_ITR_MASK;
val |= lvl_type << GPIO_GPCTR0_ITR_SHIFT;
writel(val, reg_base + GPIO_CONTROL(gpio));
- raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
-
return 0;
}
@@ -477,15 +486,26 @@ static void bcm_kona_gpio_irq_handler(struct irq_desc *desc)
static int bcm_kona_gpio_irq_reqres(struct irq_data *d)
{
struct bcm_kona_gpio *kona_gpio = irq_data_get_irq_chip_data(d);
+ unsigned int gpio = d->hwirq;
- return gpiochip_reqres_irq(&kona_gpio->gpio_chip, d->hwirq);
+ /*
+ * We need to unlock the GPIO before any other operations are performed
+ * on the relevant GPIO configuration registers
+ */
+ bcm_kona_gpio_unlock_gpio(kona_gpio, gpio);
+
+ return gpiochip_reqres_irq(&kona_gpio->gpio_chip, gpio);
}
static void bcm_kona_gpio_irq_relres(struct irq_data *d)
{
struct bcm_kona_gpio *kona_gpio = irq_data_get_irq_chip_data(d);
+ unsigned int gpio = d->hwirq;
+
+ /* Once we no longer use it, lock the GPIO again */
+ bcm_kona_gpio_lock_gpio(kona_gpio, gpio);
- gpiochip_relres_irq(&kona_gpio->gpio_chip, d->hwirq);
+ gpiochip_relres_irq(&kona_gpio->gpio_chip, gpio);
}
static struct irq_chip bcm_gpio_irq_chip = {
@@ -496,6 +516,7 @@ static struct irq_chip bcm_gpio_irq_chip = {
.irq_set_type = bcm_kona_gpio_irq_set_type,
.irq_request_resources = bcm_kona_gpio_irq_reqres,
.irq_release_resources = bcm_kona_gpio_irq_relres,
+ .flags = IRQCHIP_IMMUTABLE,
};
static struct of_device_id const bcm_kona_gpio_of_match[] = {
@@ -614,7 +635,7 @@ static int bcm_kona_gpio_probe(struct platform_device *pdev)
bank->irq = platform_get_irq(pdev, i);
bank->kona_gpio = kona_gpio;
if (bank->irq < 0) {
- dev_err(dev, "Couldn't get IRQ for bank %d", i);
+ dev_err(dev, "Couldn't get IRQ for bank %d\n", i);
ret = -ENOENT;
goto err_irq_domain;
}
diff --git a/drivers/gpio/gpio-bd71815.c b/drivers/gpio/gpio-bd71815.c
index 08ff2857256f..afb18a5a9d79 100644
--- a/drivers/gpio/gpio-bd71815.c
+++ b/drivers/gpio/gpio-bd71815.c
@@ -37,21 +37,18 @@ static int bd71815gpo_get(struct gpio_chip *chip, unsigned int offset)
return (val >> offset) & 1;
}
-static void bd71815gpo_set(struct gpio_chip *chip, unsigned int offset,
- int value)
+static int bd71815gpo_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct bd71815_gpio *bd71815 = gpiochip_get_data(chip);
- int ret, bit;
+ int bit;
bit = BIT(offset);
if (value)
- ret = regmap_set_bits(bd71815->regmap, BD71815_REG_GPO, bit);
- else
- ret = regmap_clear_bits(bd71815->regmap, BD71815_REG_GPO, bit);
+ return regmap_set_bits(bd71815->regmap, BD71815_REG_GPO, bit);
- if (ret)
- dev_warn(bd71815->dev, "failed to toggle GPO\n");
+ return regmap_clear_bits(bd71815->regmap, BD71815_REG_GPO, bit);
}
static int bd71815_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
diff --git a/drivers/gpio/gpio-bd71828.c b/drivers/gpio/gpio-bd71828.c
index b2ccc320c7b5..e439dbfffc62 100644
--- a/drivers/gpio/gpio-bd71828.c
+++ b/drivers/gpio/gpio-bd71828.c
@@ -16,10 +16,9 @@ struct bd71828_gpio {
struct gpio_chip gpio;
};
-static void bd71828_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
+static int bd71828_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
- int ret;
struct bd71828_gpio *bdgpio = gpiochip_get_data(chip);
u8 val = (value) ? BD71828_GPIO_OUT_HI : BD71828_GPIO_OUT_LO;
@@ -28,12 +27,10 @@ static void bd71828_gpio_set(struct gpio_chip *chip, unsigned int offset,
* we are dealing with - then we are done
*/
if (offset == HALL_GPIO_OFFSET)
- return;
+ return 0;
- ret = regmap_update_bits(bdgpio->regmap, GPIO_OUT_REG(offset),
- BD71828_GPIO_OUT_MASK, val);
- if (ret)
- dev_err(bdgpio->dev, "Could not set gpio to %d\n", value);
+ return regmap_update_bits(bdgpio->regmap, GPIO_OUT_REG(offset),
+ BD71828_GPIO_OUT_MASK, val);
}
static int bd71828_gpio_get(struct gpio_chip *chip, unsigned int offset)
diff --git a/drivers/gpio/gpio-bd9571mwv.c b/drivers/gpio/gpio-bd9571mwv.c
index 9a4d55f703bb..7c95bb36511e 100644
--- a/drivers/gpio/gpio-bd9571mwv.c
+++ b/drivers/gpio/gpio-bd9571mwv.c
@@ -72,13 +72,13 @@ static int bd9571mwv_gpio_get(struct gpio_chip *chip, unsigned int offset)
return val & BIT(offset);
}
-static void bd9571mwv_gpio_set(struct gpio_chip *chip, unsigned int offset,
+static int bd9571mwv_gpio_set(struct gpio_chip *chip, unsigned int offset,
int value)
{
struct bd9571mwv_gpio *gpio = gpiochip_get_data(chip);
- regmap_update_bits(gpio->regmap, BD9571MWV_GPIO_OUT,
- BIT(offset), value ? BIT(offset) : 0);
+ return regmap_update_bits(gpio->regmap, BD9571MWV_GPIO_OUT,
+ BIT(offset), value ? BIT(offset) : 0);
}
static const struct gpio_chip template_chip = {
diff --git a/drivers/gpio/gpio-blzp1600.c b/drivers/gpio/gpio-blzp1600.c
new file mode 100644
index 000000000000..0f8c826ba876
--- /dev/null
+++ b/drivers/gpio/gpio-blzp1600.c
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2019 VeriSilicon Limited.
+ * Copyright (C) 2025 Blaize, Inc.
+ */
+
+#include <linux/errno.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#define GPIO_DIR_REG 0x00
+#define GPIO_CTRL_REG 0x04
+#define GPIO_SET_REG 0x08
+#define GPIO_CLR_REG 0x0C
+#define GPIO_ODATA_REG 0x10
+#define GPIO_IDATA_REG 0x14
+#define GPIO_IEN_REG 0x18
+#define GPIO_IS_REG 0x1C
+#define GPIO_IBE_REG 0x20
+#define GPIO_IEV_REG 0x24
+#define GPIO_RIS_REG 0x28
+#define GPIO_IM_REG 0x2C
+#define GPIO_MIS_REG 0x30
+#define GPIO_IC_REG 0x34
+#define GPIO_DB_REG 0x38
+#define GPIO_DFG_REG 0x3C
+
+#define DRIVER_NAME "blzp1600-gpio"
+
+struct blzp1600_gpio {
+ void __iomem *base;
+ struct gpio_generic_chip gen_gc;
+ int irq;
+};
+
+static inline struct blzp1600_gpio *get_blzp1600_gpio_from_irq_data(struct irq_data *d)
+{
+ return gpiochip_get_data(irq_data_get_irq_chip_data(d));
+}
+
+static inline struct blzp1600_gpio *get_blzp1600_gpio_from_irq_desc(struct irq_desc *d)
+{
+ return gpiochip_get_data(irq_desc_get_handler_data(d));
+}
+
+static inline u32 blzp1600_gpio_read(struct blzp1600_gpio *chip, unsigned int offset)
+{
+ return readl_relaxed(chip->base + offset);
+}
+
+static inline void blzp1600_gpio_write(struct blzp1600_gpio *chip, unsigned int offset, u32 val)
+{
+ writel_relaxed(val, chip->base + offset);
+}
+
+static inline void blzp1600_gpio_rmw(void __iomem *reg, u32 mask, bool set)
+{
+ u32 val = readl_relaxed(reg);
+
+ if (set)
+ val |= mask;
+ else
+ val &= ~mask;
+
+ writel_relaxed(val, reg);
+}
+
+static void blzp1600_gpio_irq_mask(struct irq_data *d)
+{
+ struct blzp1600_gpio *chip = get_blzp1600_gpio_from_irq_data(d);
+
+ guard(gpio_generic_lock_irqsave)(&chip->gen_gc);
+ blzp1600_gpio_rmw(chip->base + GPIO_IM_REG, BIT(d->hwirq), 1);
+}
+
+static void blzp1600_gpio_irq_unmask(struct irq_data *d)
+{
+ struct blzp1600_gpio *chip = get_blzp1600_gpio_from_irq_data(d);
+
+ guard(gpio_generic_lock_irqsave)(&chip->gen_gc);
+ blzp1600_gpio_rmw(chip->base + GPIO_IM_REG, BIT(d->hwirq), 0);
+}
+
+static void blzp1600_gpio_irq_ack(struct irq_data *d)
+{
+ struct blzp1600_gpio *chip = get_blzp1600_gpio_from_irq_data(d);
+
+ blzp1600_gpio_write(chip, GPIO_IC_REG, BIT(d->hwirq));
+}
+
+static void blzp1600_gpio_irq_enable(struct irq_data *d)
+{
+ struct blzp1600_gpio *chip = get_blzp1600_gpio_from_irq_data(d);
+
+ gpiochip_enable_irq(&chip->gen_gc.gc, irqd_to_hwirq(d));
+
+ guard(gpio_generic_lock_irqsave)(&chip->gen_gc);
+ blzp1600_gpio_rmw(chip->base + GPIO_DIR_REG, BIT(d->hwirq), 0);
+ blzp1600_gpio_rmw(chip->base + GPIO_IEN_REG, BIT(d->hwirq), 1);
+}
+
+static void blzp1600_gpio_irq_disable(struct irq_data *d)
+{
+ struct blzp1600_gpio *chip = get_blzp1600_gpio_from_irq_data(d);
+
+ guard(gpio_generic_lock_irqsave)(&chip->gen_gc);
+ blzp1600_gpio_rmw(chip->base + GPIO_IEN_REG, BIT(d->hwirq), 0);
+ gpiochip_disable_irq(&chip->gen_gc.gc, irqd_to_hwirq(d));
+}
+
+static int blzp1600_gpio_irq_set_type(struct irq_data *d, u32 type)
+{
+ struct blzp1600_gpio *chip = get_blzp1600_gpio_from_irq_data(d);
+ u32 edge_level, single_both, fall_rise;
+ int mask = BIT(d->hwirq);
+
+ guard(gpio_generic_lock_irqsave)(&chip->gen_gc);
+ edge_level = blzp1600_gpio_read(chip, GPIO_IS_REG);
+ single_both = blzp1600_gpio_read(chip, GPIO_IBE_REG);
+ fall_rise = blzp1600_gpio_read(chip, GPIO_IEV_REG);
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_BOTH:
+ edge_level &= ~mask;
+ single_both |= mask;
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ edge_level &= ~mask;
+ single_both &= ~mask;
+ fall_rise |= mask;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ edge_level &= ~mask;
+ single_both &= ~mask;
+ fall_rise &= ~mask;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ edge_level |= mask;
+ fall_rise |= mask;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ edge_level |= mask;
+ fall_rise &= ~mask;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ blzp1600_gpio_write(chip, GPIO_IS_REG, edge_level);
+ blzp1600_gpio_write(chip, GPIO_IBE_REG, single_both);
+ blzp1600_gpio_write(chip, GPIO_IEV_REG, fall_rise);
+
+ if (type & IRQ_TYPE_LEVEL_MASK)
+ irq_set_handler_locked(d, handle_level_irq);
+ else
+ irq_set_handler_locked(d, handle_edge_irq);
+
+ return 0;
+}
+
+static const struct irq_chip blzp1600_gpio_irqchip = {
+ .name = DRIVER_NAME,
+ .irq_ack = blzp1600_gpio_irq_ack,
+ .irq_mask = blzp1600_gpio_irq_mask,
+ .irq_unmask = blzp1600_gpio_irq_unmask,
+ .irq_set_type = blzp1600_gpio_irq_set_type,
+ .irq_enable = blzp1600_gpio_irq_enable,
+ .irq_disable = blzp1600_gpio_irq_disable,
+ .flags = IRQCHIP_IMMUTABLE | IRQCHIP_MASK_ON_SUSPEND,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
+static void blzp1600_gpio_irqhandler(struct irq_desc *desc)
+{
+ struct blzp1600_gpio *gpio = get_blzp1600_gpio_from_irq_desc(desc);
+ struct irq_chip *irqchip = irq_desc_get_chip(desc);
+ unsigned long irq_status;
+ int hwirq = 0;
+
+ chained_irq_enter(irqchip, desc);
+ irq_status = blzp1600_gpio_read(gpio, GPIO_RIS_REG);
+ for_each_set_bit(hwirq, &irq_status, gpio->gen_gc.gc.ngpio)
+ generic_handle_domain_irq(gpio->gen_gc.gc.irq.domain, hwirq);
+
+ chained_irq_exit(irqchip, desc);
+}
+
+static int blzp1600_gpio_set_debounce(struct gpio_chip *gc, unsigned int offset,
+ unsigned int debounce)
+{
+ struct blzp1600_gpio *chip = gpiochip_get_data(gc);
+
+ guard(gpio_generic_lock_irqsave)(&chip->gen_gc);
+ blzp1600_gpio_rmw(chip->base + GPIO_DB_REG, BIT(offset), debounce);
+
+ return 0;
+}
+
+static int blzp1600_gpio_set_config(struct gpio_chip *gc, unsigned int offset, unsigned long config)
+{
+ u32 debounce;
+
+ if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE)
+ return -ENOTSUPP;
+
+ debounce = pinconf_to_config_argument(config);
+ return blzp1600_gpio_set_debounce(gc, offset, debounce);
+}
+
+static int blzp1600_gpio_probe(struct platform_device *pdev)
+{
+ struct gpio_generic_chip_config config;
+ struct blzp1600_gpio *chip;
+ struct gpio_chip *gc;
+ int ret;
+
+ chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(chip->base))
+ return PTR_ERR(chip->base);
+
+ config = (struct gpio_generic_chip_config) {
+ .dev = &pdev->dev,
+ .sz = 4,
+ .dat = chip->base + GPIO_IDATA_REG,
+ .set = chip->base + GPIO_SET_REG,
+ .clr = chip->base + GPIO_CLR_REG,
+ .dirout = chip->base + GPIO_DIR_REG,
+ };
+
+ ret = gpio_generic_chip_init(&chip->gen_gc, &config);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Failed to register generic gpio\n");
+
+ /* configure the gpio chip */
+ gc = &chip->gen_gc.gc;
+ gc->set_config = blzp1600_gpio_set_config;
+
+ if (device_property_present(&pdev->dev, "interrupt-controller")) {
+ struct gpio_irq_chip *girq;
+
+ chip->irq = platform_get_irq(pdev, 0);
+ if (chip->irq < 0)
+ return chip->irq;
+
+ girq = &gc->irq;
+ gpio_irq_chip_set_chip(girq, &blzp1600_gpio_irqchip);
+ girq->parent_handler = blzp1600_gpio_irqhandler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(&pdev->dev, 1, sizeof(*girq->parents), GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+
+ girq->parents[0] = chip->irq;
+ girq->default_type = IRQ_TYPE_NONE;
+ }
+
+ return devm_gpiochip_add_data(&pdev->dev, gc, chip);
+}
+
+static const struct of_device_id blzp1600_gpio_of_match[] = {
+ { .compatible = "blaize,blzp1600-gpio", },
+ { /* Sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, blzp1600_gpio_of_match);
+
+static struct platform_driver blzp1600_gpio_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = blzp1600_gpio_of_match,
+ },
+ .probe = blzp1600_gpio_probe,
+};
+
+module_platform_driver(blzp1600_gpio_driver);
+
+MODULE_AUTHOR("Nikolaos Pasaloukos <nikolaos.pasaloukos@blaize.com>");
+MODULE_DESCRIPTION("Blaize BLZP1600 GPIO driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c
index 5762e517338e..f40c9472588b 100644
--- a/drivers/gpio/gpio-brcmstb.c
+++ b/drivers/gpio/gpio-brcmstb.c
@@ -3,12 +3,14 @@
#include <linux/bitops.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/of.h>
#include <linux/module.h>
#include <linux/irqdomain.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
+#include <linux/string_choices.h>
enum gio_reg_index {
GIO_REG_ODEN = 0,
@@ -36,7 +38,7 @@ enum gio_reg_index {
struct brcmstb_gpio_bank {
struct list_head node;
int id;
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
struct brcmstb_gpio_priv *parent_priv;
u32 width;
u32 wake_active;
@@ -71,19 +73,18 @@ __brcmstb_gpio_get_active_irqs(struct brcmstb_gpio_bank *bank)
{
void __iomem *reg_base = bank->parent_priv->reg_base;
- return bank->gc.read_reg(reg_base + GIO_STAT(bank->id)) &
- bank->gc.read_reg(reg_base + GIO_MASK(bank->id));
+ return gpio_generic_read_reg(&bank->chip, reg_base + GIO_STAT(bank->id)) &
+ gpio_generic_read_reg(&bank->chip, reg_base + GIO_MASK(bank->id));
}
static unsigned long
brcmstb_gpio_get_active_irqs(struct brcmstb_gpio_bank *bank)
{
unsigned long status;
- unsigned long flags;
- raw_spin_lock_irqsave(&bank->gc.bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(&bank->chip);
+
status = __brcmstb_gpio_get_active_irqs(bank);
- raw_spin_unlock_irqrestore(&bank->gc.bgpio_lock, flags);
return status;
}
@@ -91,26 +92,26 @@ brcmstb_gpio_get_active_irqs(struct brcmstb_gpio_bank *bank)
static int brcmstb_gpio_hwirq_to_offset(irq_hw_number_t hwirq,
struct brcmstb_gpio_bank *bank)
{
- return hwirq - bank->gc.offset;
+ return hwirq - bank->chip.gc.offset;
}
static void brcmstb_gpio_set_imask(struct brcmstb_gpio_bank *bank,
unsigned int hwirq, bool enable)
{
- struct gpio_chip *gc = &bank->gc;
struct brcmstb_gpio_priv *priv = bank->parent_priv;
u32 mask = BIT(brcmstb_gpio_hwirq_to_offset(hwirq, bank));
u32 imask;
- unsigned long flags;
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
- imask = gc->read_reg(priv->reg_base + GIO_MASK(bank->id));
+ guard(gpio_generic_lock_irqsave)(&bank->chip);
+
+ imask = gpio_generic_read_reg(&bank->chip,
+ priv->reg_base + GIO_MASK(bank->id));
if (enable)
imask |= mask;
else
imask &= ~mask;
- gc->write_reg(priv->reg_base + GIO_MASK(bank->id), imask);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+ gpio_generic_write_reg(&bank->chip,
+ priv->reg_base + GIO_MASK(bank->id), imask);
}
static int brcmstb_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
@@ -149,7 +150,8 @@ static void brcmstb_gpio_irq_ack(struct irq_data *d)
struct brcmstb_gpio_priv *priv = bank->parent_priv;
u32 mask = BIT(brcmstb_gpio_hwirq_to_offset(d->hwirq, bank));
- gc->write_reg(priv->reg_base + GIO_STAT(bank->id), mask);
+ gpio_generic_write_reg(&bank->chip,
+ priv->reg_base + GIO_STAT(bank->id), mask);
}
static int brcmstb_gpio_irq_set_type(struct irq_data *d, unsigned int type)
@@ -161,7 +163,6 @@ static int brcmstb_gpio_irq_set_type(struct irq_data *d, unsigned int type)
u32 edge_insensitive, iedge_insensitive;
u32 edge_config, iedge_config;
u32 level, ilevel;
- unsigned long flags;
switch (type) {
case IRQ_TYPE_LEVEL_LOW:
@@ -193,23 +194,25 @@ static int brcmstb_gpio_irq_set_type(struct irq_data *d, unsigned int type)
return -EINVAL;
}
- raw_spin_lock_irqsave(&bank->gc.bgpio_lock, flags);
-
- iedge_config = bank->gc.read_reg(priv->reg_base +
- GIO_EC(bank->id)) & ~mask;
- iedge_insensitive = bank->gc.read_reg(priv->reg_base +
- GIO_EI(bank->id)) & ~mask;
- ilevel = bank->gc.read_reg(priv->reg_base +
- GIO_LEVEL(bank->id)) & ~mask;
-
- bank->gc.write_reg(priv->reg_base + GIO_EC(bank->id),
- iedge_config | edge_config);
- bank->gc.write_reg(priv->reg_base + GIO_EI(bank->id),
- iedge_insensitive | edge_insensitive);
- bank->gc.write_reg(priv->reg_base + GIO_LEVEL(bank->id),
- ilevel | level);
+ guard(gpio_generic_lock_irqsave)(&bank->chip);
+
+ iedge_config = gpio_generic_read_reg(&bank->chip,
+ priv->reg_base + GIO_EC(bank->id)) & ~mask;
+ iedge_insensitive = gpio_generic_read_reg(&bank->chip,
+ priv->reg_base + GIO_EI(bank->id)) & ~mask;
+ ilevel = gpio_generic_read_reg(&bank->chip,
+ priv->reg_base + GIO_LEVEL(bank->id)) & ~mask;
+
+ gpio_generic_write_reg(&bank->chip,
+ priv->reg_base + GIO_EC(bank->id),
+ iedge_config | edge_config);
+ gpio_generic_write_reg(&bank->chip,
+ priv->reg_base + GIO_EI(bank->id),
+ iedge_insensitive | edge_insensitive);
+ gpio_generic_write_reg(&bank->chip,
+ priv->reg_base + GIO_LEVEL(bank->id),
+ ilevel | level);
- raw_spin_unlock_irqrestore(&bank->gc.bgpio_lock, flags);
return 0;
}
@@ -224,7 +227,7 @@ static int brcmstb_gpio_priv_set_wake(struct brcmstb_gpio_priv *priv,
ret = disable_irq_wake(priv->parent_wake_irq);
if (ret)
dev_err(&priv->pdev->dev, "failed to %s wake-up interrupt\n",
- enable ? "enable" : "disable");
+ str_enable_disable(enable));
return ret;
}
@@ -262,7 +265,7 @@ static void brcmstb_gpio_irq_bank_handler(struct brcmstb_gpio_bank *bank)
{
struct brcmstb_gpio_priv *priv = bank->parent_priv;
struct irq_domain *domain = priv->irq_domain;
- int hwbase = bank->gc.offset;
+ int hwbase = bank->chip.gc.offset;
unsigned long status;
while ((status = brcmstb_gpio_get_active_irqs(bank))) {
@@ -302,7 +305,7 @@ static struct brcmstb_gpio_bank *brcmstb_gpio_hwirq_to_bank(
/* banks are in descending order */
list_for_each_entry_reverse(bank, &priv->bank_list, node) {
- i += bank->gc.ngpio;
+ i += bank->chip.gc.ngpio;
if (hwirq < i)
return bank;
}
@@ -331,7 +334,7 @@ static int brcmstb_gpio_irq_map(struct irq_domain *d, unsigned int irq,
dev_dbg(&pdev->dev, "Mapping irq %d for gpio line %d (bank %d)\n",
irq, (int)hwirq, bank->id);
- ret = irq_set_chip_data(irq, &bank->gc);
+ ret = irq_set_chip_data(irq, &bank->chip.gc);
if (ret < 0)
return ret;
irq_set_lockdep_class(irq, &brcmstb_gpio_irq_lock_class,
@@ -393,7 +396,7 @@ static void brcmstb_gpio_remove(struct platform_device *pdev)
* more important to actually perform all of the steps.
*/
list_for_each_entry(bank, &priv->bank_list, node)
- gpiochip_remove(&bank->gc);
+ gpiochip_remove(&bank->chip.gc);
}
static int brcmstb_gpio_of_xlate(struct gpio_chip *gc,
@@ -411,7 +414,7 @@ static int brcmstb_gpio_of_xlate(struct gpio_chip *gc,
if (WARN_ON(gpiospec->args_count < gc->of_gpio_n_cells))
return -EINVAL;
- offset = gpiospec->args[0] - bank->gc.offset;
+ offset = gpiospec->args[0] - bank->chip.gc.offset;
if (offset >= gc->ngpio || offset < 0)
return -EINVAL;
@@ -435,10 +438,8 @@ static int brcmstb_gpio_irq_setup(struct platform_device *pdev,
struct device_node *np = dev->of_node;
int err;
- priv->irq_domain =
- irq_domain_add_linear(np, priv->num_gpios,
- &brcmstb_gpio_irq_domain_ops,
- priv);
+ priv->irq_domain = irq_domain_create_linear(dev_fwnode(dev), priv->num_gpios,
+ &brcmstb_gpio_irq_domain_ops, priv);
if (!priv->irq_domain) {
dev_err(dev, "Couldn't allocate IRQ domain\n");
return -ENXIO;
@@ -494,19 +495,17 @@ out_free_domain:
static void brcmstb_gpio_bank_save(struct brcmstb_gpio_priv *priv,
struct brcmstb_gpio_bank *bank)
{
- struct gpio_chip *gc = &bank->gc;
unsigned int i;
for (i = 0; i < GIO_REG_STAT; i++)
- bank->saved_regs[i] = gc->read_reg(priv->reg_base +
- GIO_BANK_OFF(bank->id, i));
+ bank->saved_regs[i] = gpio_generic_read_reg(&bank->chip,
+ priv->reg_base + GIO_BANK_OFF(bank->id, i));
}
static void brcmstb_gpio_quiesce(struct device *dev, bool save)
{
struct brcmstb_gpio_priv *priv = dev_get_drvdata(dev);
struct brcmstb_gpio_bank *bank;
- struct gpio_chip *gc;
u32 imask;
/* disable non-wake interrupt */
@@ -514,8 +513,6 @@ static void brcmstb_gpio_quiesce(struct device *dev, bool save)
disable_irq(priv->parent_irq);
list_for_each_entry(bank, &priv->bank_list, node) {
- gc = &bank->gc;
-
if (save)
brcmstb_gpio_bank_save(priv, bank);
@@ -524,8 +521,9 @@ static void brcmstb_gpio_quiesce(struct device *dev, bool save)
imask = bank->wake_active;
else
imask = 0;
- gc->write_reg(priv->reg_base + GIO_MASK(bank->id),
- imask);
+ gpio_generic_write_reg(&bank->chip,
+ priv->reg_base + GIO_MASK(bank->id),
+ imask);
}
}
@@ -539,12 +537,12 @@ static void brcmstb_gpio_shutdown(struct platform_device *pdev)
static void brcmstb_gpio_bank_restore(struct brcmstb_gpio_priv *priv,
struct brcmstb_gpio_bank *bank)
{
- struct gpio_chip *gc = &bank->gc;
unsigned int i;
for (i = 0; i < GIO_REG_STAT; i++)
- gc->write_reg(priv->reg_base + GIO_BANK_OFF(bank->id, i),
- bank->saved_regs[i]);
+ gpio_generic_write_reg(&bank->chip,
+ priv->reg_base + GIO_BANK_OFF(bank->id, i),
+ bank->saved_regs[i]);
}
static int brcmstb_gpio_suspend(struct device *dev)
@@ -586,6 +584,7 @@ static const struct dev_pm_ops brcmstb_gpio_pm_ops = {
static int brcmstb_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
void __iomem *reg_base;
@@ -631,7 +630,7 @@ static int brcmstb_gpio_probe(struct platform_device *pdev)
* else leave I/O in little endian mode.
*/
#if defined(CONFIG_MIPS) && defined(__BIG_ENDIAN)
- flags = BGPIOF_BIG_ENDIAN_BYTE_ORDER;
+ flags = GPIO_GENERIC_BIG_ENDIAN_BYTE_ORDER;
#endif
of_property_for_each_u32(np, "brcm,gpio-bank-widths", bank_width) {
@@ -666,17 +665,24 @@ static int brcmstb_gpio_probe(struct platform_device *pdev)
bank->width = bank_width;
}
+ gc = &bank->chip.gc;
+
/*
* Regs are 4 bytes wide, have data reg, no set/clear regs,
* and direction bits have 0 = output and 1 = input
*/
- gc = &bank->gc;
- err = bgpio_init(gc, dev, 4,
- reg_base + GIO_DATA(bank->id),
- NULL, NULL, NULL,
- reg_base + GIO_IODIR(bank->id), flags);
+
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = reg_base + GIO_DATA(bank->id),
+ .dirin = reg_base + GIO_IODIR(bank->id),
+ .flags = flags,
+ };
+
+ err = gpio_generic_chip_init(&bank->chip, &config);
if (err) {
- dev_err(dev, "bgpio_init() failed\n");
+ dev_err(dev, "failed to initialize generic GPIO chip\n");
goto fail;
}
@@ -701,7 +707,8 @@ static int brcmstb_gpio_probe(struct platform_device *pdev)
* be retained from S5 cold boot
*/
need_wakeup_event |= !!__brcmstb_gpio_get_active_irqs(bank);
- gc->write_reg(reg_base + GIO_MASK(bank->id), 0);
+ gpio_generic_write_reg(&bank->chip,
+ reg_base + GIO_MASK(bank->id), 0);
err = gpiochip_add_data(gc, bank);
if (err) {
@@ -751,7 +758,7 @@ static struct platform_driver brcmstb_gpio_driver = {
.pm = &brcmstb_gpio_pm_ops,
},
.probe = brcmstb_gpio_probe,
- .remove_new = brcmstb_gpio_remove,
+ .remove = brcmstb_gpio_remove,
.shutdown = brcmstb_gpio_shutdown,
};
module_platform_driver(brcmstb_gpio_driver);
diff --git a/drivers/gpio/gpio-bt8xx.c b/drivers/gpio/gpio-bt8xx.c
index 7920cf256798..05401da03ca3 100644
--- a/drivers/gpio/gpio-bt8xx.c
+++ b/drivers/gpio/gpio-bt8xx.c
@@ -31,6 +31,7 @@
*/
+#include <linux/cleanup.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
@@ -69,10 +70,9 @@ MODULE_PARM_DESC(gpiobase, "The GPIO number base. -1 means dynamic, which is the
static int bt8xxgpio_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
{
struct bt8xxgpio *bg = gpiochip_get_data(gpio);
- unsigned long flags;
u32 outen, data;
- spin_lock_irqsave(&bg->lock, flags);
+ guard(spinlock_irqsave)(&bg->lock);
data = bgread(BT848_GPIO_DATA);
data &= ~(1 << nr);
@@ -82,20 +82,17 @@ static int bt8xxgpio_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
outen &= ~(1 << nr);
bgwrite(outen, BT848_GPIO_OUT_EN);
- spin_unlock_irqrestore(&bg->lock, flags);
-
return 0;
}
static int bt8xxgpio_gpio_get(struct gpio_chip *gpio, unsigned nr)
{
struct bt8xxgpio *bg = gpiochip_get_data(gpio);
- unsigned long flags;
u32 val;
- spin_lock_irqsave(&bg->lock, flags);
+ guard(spinlock_irqsave)(&bg->lock);
+
val = bgread(BT848_GPIO_DATA);
- spin_unlock_irqrestore(&bg->lock, flags);
return !!(val & (1 << nr));
}
@@ -104,10 +101,9 @@ static int bt8xxgpio_gpio_direction_output(struct gpio_chip *gpio,
unsigned nr, int val)
{
struct bt8xxgpio *bg = gpiochip_get_data(gpio);
- unsigned long flags;
u32 outen, data;
- spin_lock_irqsave(&bg->lock, flags);
+ guard(spinlock_irqsave)(&bg->lock);
outen = bgread(BT848_GPIO_OUT_EN);
outen |= (1 << nr);
@@ -120,19 +116,15 @@ static int bt8xxgpio_gpio_direction_output(struct gpio_chip *gpio,
data &= ~(1 << nr);
bgwrite(data, BT848_GPIO_DATA);
- spin_unlock_irqrestore(&bg->lock, flags);
-
return 0;
}
-static void bt8xxgpio_gpio_set(struct gpio_chip *gpio,
- unsigned nr, int val)
+static int bt8xxgpio_gpio_set(struct gpio_chip *gpio, unsigned int nr, int val)
{
struct bt8xxgpio *bg = gpiochip_get_data(gpio);
- unsigned long flags;
u32 data;
- spin_lock_irqsave(&bg->lock, flags);
+ guard(spinlock_irqsave)(&bg->lock);
data = bgread(BT848_GPIO_DATA);
if (val)
@@ -141,7 +133,7 @@ static void bt8xxgpio_gpio_set(struct gpio_chip *gpio,
data &= ~(1 << nr);
bgwrite(data, BT848_GPIO_DATA);
- spin_unlock_irqrestore(&bg->lock, flags);
+ return 0;
}
static void bt8xxgpio_gpio_setup(struct bt8xxgpio *bg)
@@ -236,18 +228,15 @@ static void bt8xxgpio_remove(struct pci_dev *pdev)
static int bt8xxgpio_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct bt8xxgpio *bg = pci_get_drvdata(pdev);
- unsigned long flags;
-
- spin_lock_irqsave(&bg->lock, flags);
- bg->saved_outen = bgread(BT848_GPIO_OUT_EN);
- bg->saved_data = bgread(BT848_GPIO_DATA);
+ scoped_guard(spinlock_irqsave, &bg->lock) {
+ bg->saved_outen = bgread(BT848_GPIO_OUT_EN);
+ bg->saved_data = bgread(BT848_GPIO_DATA);
- bgwrite(0, BT848_INT_MASK);
- bgwrite(~0x0, BT848_INT_STAT);
- bgwrite(0x0, BT848_GPIO_OUT_EN);
-
- spin_unlock_irqrestore(&bg->lock, flags);
+ bgwrite(0, BT848_INT_MASK);
+ bgwrite(~0x0, BT848_INT_STAT);
+ bgwrite(0x0, BT848_GPIO_OUT_EN);
+ }
pci_save_state(pdev);
pci_disable_device(pdev);
@@ -259,7 +248,6 @@ static int bt8xxgpio_suspend(struct pci_dev *pdev, pm_message_t state)
static int bt8xxgpio_resume(struct pci_dev *pdev)
{
struct bt8xxgpio *bg = pci_get_drvdata(pdev);
- unsigned long flags;
int err;
pci_set_power_state(pdev, PCI_D0);
@@ -268,7 +256,7 @@ static int bt8xxgpio_resume(struct pci_dev *pdev)
return err;
pci_restore_state(pdev);
- spin_lock_irqsave(&bg->lock, flags);
+ guard(spinlock_irqsave)(&bg->lock);
bgwrite(0, BT848_INT_MASK);
bgwrite(0, BT848_GPIO_DMA_CTL);
@@ -277,8 +265,6 @@ static int bt8xxgpio_resume(struct pci_dev *pdev)
bgwrite(bg->saved_data & bg->saved_outen,
BT848_GPIO_DATA);
- spin_unlock_irqrestore(&bg->lock, flags);
-
return 0;
}
#else
diff --git a/drivers/gpio/gpio-cadence.c b/drivers/gpio/gpio-cadence.c
index 1b8ffd0ddab6..b75734ca22dd 100644
--- a/drivers/gpio/gpio-cadence.c
+++ b/drivers/gpio/gpio-cadence.c
@@ -8,9 +8,11 @@
* Boris Brezillon <boris.brezillon@free-electrons.com>
*/
-#include <linux/gpio/driver.h>
+#include <linux/cleanup.h>
#include <linux/clk.h>
+#include <linux/gpio/driver.h>
#include <linux/interrupt.h>
+#include <linux/gpio/generic.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -30,7 +32,7 @@
#define CDNS_GPIO_IRQ_ANY_EDGE 0x2c
struct cdns_gpio_chip {
- struct gpio_chip gc;
+ struct gpio_generic_chip gen_gc;
void __iomem *regs;
u32 bypass_orig;
};
@@ -38,29 +40,24 @@ struct cdns_gpio_chip {
static int cdns_gpio_request(struct gpio_chip *chip, unsigned int offset)
{
struct cdns_gpio_chip *cgpio = gpiochip_get_data(chip);
- unsigned long flags;
- raw_spin_lock_irqsave(&chip->bgpio_lock, flags);
+ guard(gpio_generic_lock)(&cgpio->gen_gc);
iowrite32(ioread32(cgpio->regs + CDNS_GPIO_BYPASS_MODE) & ~BIT(offset),
cgpio->regs + CDNS_GPIO_BYPASS_MODE);
- raw_spin_unlock_irqrestore(&chip->bgpio_lock, flags);
return 0;
}
static void cdns_gpio_free(struct gpio_chip *chip, unsigned int offset)
{
struct cdns_gpio_chip *cgpio = gpiochip_get_data(chip);
- unsigned long flags;
- raw_spin_lock_irqsave(&chip->bgpio_lock, flags);
+ guard(gpio_generic_lock)(&cgpio->gen_gc);
iowrite32(ioread32(cgpio->regs + CDNS_GPIO_BYPASS_MODE) |
(BIT(offset) & cgpio->bypass_orig),
cgpio->regs + CDNS_GPIO_BYPASS_MODE);
-
- raw_spin_unlock_irqrestore(&chip->bgpio_lock, flags);
}
static void cdns_gpio_irq_mask(struct irq_data *d)
@@ -85,13 +82,12 @@ static int cdns_gpio_irq_set_type(struct irq_data *d, unsigned int type)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
struct cdns_gpio_chip *cgpio = gpiochip_get_data(chip);
- unsigned long flags;
u32 int_value;
u32 int_type;
u32 mask = BIT(d->hwirq);
int ret = 0;
- raw_spin_lock_irqsave(&chip->bgpio_lock, flags);
+ guard(gpio_generic_lock)(&cgpio->gen_gc);
int_value = ioread32(cgpio->regs + CDNS_GPIO_IRQ_VALUE) & ~mask;
int_type = ioread32(cgpio->regs + CDNS_GPIO_IRQ_TYPE) & ~mask;
@@ -108,15 +104,12 @@ static int cdns_gpio_irq_set_type(struct irq_data *d, unsigned int type)
} else if (type == IRQ_TYPE_LEVEL_LOW) {
int_type |= mask;
} else {
- ret = -EINVAL;
- goto err_irq_type;
+ return -EINVAL;
}
iowrite32(int_value, cgpio->regs + CDNS_GPIO_IRQ_VALUE);
iowrite32(int_type, cgpio->regs + CDNS_GPIO_IRQ_TYPE);
-err_irq_type:
- raw_spin_unlock_irqrestore(&chip->bgpio_lock, flags);
return ret;
}
@@ -150,6 +143,7 @@ static const struct irq_chip cdns_gpio_irqchip = {
static int cdns_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config = { };
struct cdns_gpio_chip *cgpio;
int ret, irq;
u32 dir_prev;
@@ -176,32 +170,33 @@ static int cdns_gpio_probe(struct platform_device *pdev)
* gpiochip_lock_as_irq:
* tried to flag a GPIO set as output for IRQ
* Generic GPIO driver stores the direction value internally,
- * so it needs to be changed before bgpio_init() is called.
+ * so it needs to be changed before gpio_generic_chip_init() is called.
*/
dir_prev = ioread32(cgpio->regs + CDNS_GPIO_DIRECTION_MODE);
iowrite32(GENMASK(num_gpios - 1, 0),
cgpio->regs + CDNS_GPIO_DIRECTION_MODE);
- ret = bgpio_init(&cgpio->gc, &pdev->dev, 4,
- cgpio->regs + CDNS_GPIO_INPUT_VALUE,
- cgpio->regs + CDNS_GPIO_OUTPUT_VALUE,
- NULL,
- NULL,
- cgpio->regs + CDNS_GPIO_DIRECTION_MODE,
- BGPIOF_READ_OUTPUT_REG_SET);
+ config.dev = &pdev->dev;
+ config.sz = 4;
+ config.dat = cgpio->regs + CDNS_GPIO_INPUT_VALUE;
+ config.set = cgpio->regs + CDNS_GPIO_OUTPUT_VALUE;
+ config.dirin = cgpio->regs + CDNS_GPIO_DIRECTION_MODE;
+ config.flags = GPIO_GENERIC_READ_OUTPUT_REG_SET;
+
+ ret = gpio_generic_chip_init(&cgpio->gen_gc, &config);
if (ret) {
dev_err(&pdev->dev, "Failed to register generic gpio, %d\n",
ret);
goto err_revert_dir;
}
- cgpio->gc.label = dev_name(&pdev->dev);
- cgpio->gc.ngpio = num_gpios;
- cgpio->gc.parent = &pdev->dev;
- cgpio->gc.base = -1;
- cgpio->gc.owner = THIS_MODULE;
- cgpio->gc.request = cdns_gpio_request;
- cgpio->gc.free = cdns_gpio_free;
+ cgpio->gen_gc.gc.label = dev_name(&pdev->dev);
+ cgpio->gen_gc.gc.ngpio = num_gpios;
+ cgpio->gen_gc.gc.parent = &pdev->dev;
+ cgpio->gen_gc.gc.base = -1;
+ cgpio->gen_gc.gc.owner = THIS_MODULE;
+ cgpio->gen_gc.gc.request = cdns_gpio_request;
+ cgpio->gen_gc.gc.free = cdns_gpio_free;
clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(clk)) {
@@ -218,7 +213,7 @@ static int cdns_gpio_probe(struct platform_device *pdev)
if (irq >= 0) {
struct gpio_irq_chip *girq;
- girq = &cgpio->gc.irq;
+ girq = &cgpio->gen_gc.gc.irq;
gpio_irq_chip_set_chip(girq, &cdns_gpio_irqchip);
girq->parent_handler = cdns_gpio_irq_handler;
girq->num_parents = 1;
@@ -234,7 +229,7 @@ static int cdns_gpio_probe(struct platform_device *pdev)
girq->handler = handle_level_irq;
}
- ret = devm_gpiochip_add_data(&pdev->dev, &cgpio->gc, cgpio);
+ ret = devm_gpiochip_add_data(&pdev->dev, &cgpio->gen_gc.gc, cgpio);
if (ret < 0) {
dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret);
goto err_revert_dir;
@@ -277,7 +272,7 @@ static struct platform_driver cdns_gpio_driver = {
.of_match_table = cdns_of_ids,
},
.probe = cdns_gpio_probe,
- .remove_new = cdns_gpio_remove,
+ .remove = cdns_gpio_remove,
};
module_platform_driver(cdns_gpio_driver);
diff --git a/drivers/gpio/gpio-cgbc.c b/drivers/gpio/gpio-cgbc.c
new file mode 100644
index 000000000000..0efa1b61001a
--- /dev/null
+++ b/drivers/gpio/gpio-cgbc.c
@@ -0,0 +1,200 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Congatec Board Controller GPIO driver
+ *
+ * Copyright (C) 2024 Bootlin
+ * Author: Thomas Richard <thomas.richard@bootlin.com>
+ */
+
+#include <linux/gpio/driver.h>
+#include <linux/mfd/cgbc.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+
+#define CGBC_GPIO_NGPIO 14
+
+#define CGBC_GPIO_CMD_GET 0x64
+#define CGBC_GPIO_CMD_SET 0x65
+#define CGBC_GPIO_CMD_DIR_GET 0x66
+#define CGBC_GPIO_CMD_DIR_SET 0x67
+
+struct cgbc_gpio_data {
+ struct gpio_chip chip;
+ struct cgbc_device_data *cgbc;
+ struct mutex lock;
+};
+
+static int cgbc_gpio_cmd(struct cgbc_device_data *cgbc,
+ u8 cmd0, u8 cmd1, u8 cmd2, u8 *value)
+{
+ u8 cmd[3] = {cmd0, cmd1, cmd2};
+
+ return cgbc_command(cgbc, cmd, sizeof(cmd), value, 1, NULL);
+}
+
+static int cgbc_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct cgbc_gpio_data *gpio = gpiochip_get_data(chip);
+ struct cgbc_device_data *cgbc = gpio->cgbc;
+ int ret;
+ u8 val;
+
+ scoped_guard(mutex, &gpio->lock)
+ ret = cgbc_gpio_cmd(cgbc, CGBC_GPIO_CMD_GET, (offset > 7) ? 1 : 0, 0, &val);
+
+ offset %= 8;
+
+ if (ret)
+ return ret;
+ else
+ return (int)(val & (u8)BIT(offset));
+}
+
+static int __cgbc_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ struct cgbc_gpio_data *gpio = gpiochip_get_data(chip);
+ struct cgbc_device_data *cgbc = gpio->cgbc;
+ u8 val;
+ int ret;
+
+ ret = cgbc_gpio_cmd(cgbc, CGBC_GPIO_CMD_GET, (offset > 7) ? 1 : 0, 0, &val);
+ if (ret)
+ return ret;
+
+ if (value)
+ val |= BIT(offset % 8);
+ else
+ val &= ~(BIT(offset % 8));
+
+ return cgbc_gpio_cmd(cgbc, CGBC_GPIO_CMD_SET, (offset > 7) ? 1 : 0, val, &val);
+}
+
+static int cgbc_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
+{
+ struct cgbc_gpio_data *gpio = gpiochip_get_data(chip);
+
+ guard(mutex)(&gpio->lock);
+
+ return __cgbc_gpio_set(chip, offset, value);
+}
+
+static int cgbc_gpio_direction_set(struct gpio_chip *chip,
+ unsigned int offset, int direction)
+{
+ struct cgbc_gpio_data *gpio = gpiochip_get_data(chip);
+ struct cgbc_device_data *cgbc = gpio->cgbc;
+ int ret;
+ u8 val;
+
+ ret = cgbc_gpio_cmd(cgbc, CGBC_GPIO_CMD_DIR_GET, (offset > 7) ? 1 : 0, 0, &val);
+ if (ret)
+ goto end;
+
+ if (direction == GPIO_LINE_DIRECTION_IN)
+ val &= ~(BIT(offset % 8));
+ else
+ val |= BIT(offset % 8);
+
+ ret = cgbc_gpio_cmd(cgbc, CGBC_GPIO_CMD_DIR_SET, (offset > 7) ? 1 : 0, val, &val);
+
+end:
+ return ret;
+}
+
+static int cgbc_gpio_direction_input(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct cgbc_gpio_data *gpio = gpiochip_get_data(chip);
+
+ guard(mutex)(&gpio->lock);
+ return cgbc_gpio_direction_set(chip, offset, GPIO_LINE_DIRECTION_IN);
+}
+
+static int cgbc_gpio_direction_output(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ struct cgbc_gpio_data *gpio = gpiochip_get_data(chip);
+ int ret;
+
+ guard(mutex)(&gpio->lock);
+
+ ret = __cgbc_gpio_set(chip, offset, value);
+ if (ret)
+ return ret;
+
+ return cgbc_gpio_direction_set(chip, offset, GPIO_LINE_DIRECTION_OUT);
+}
+
+static int cgbc_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
+{
+ struct cgbc_gpio_data *gpio = gpiochip_get_data(chip);
+ struct cgbc_device_data *cgbc = gpio->cgbc;
+ int ret;
+ u8 val;
+
+ scoped_guard(mutex, &gpio->lock)
+ ret = cgbc_gpio_cmd(cgbc, CGBC_GPIO_CMD_DIR_GET, (offset > 7) ? 1 : 0, 0, &val);
+
+ if (ret)
+ return ret;
+
+ if (val & BIT(offset % 8))
+ return GPIO_LINE_DIRECTION_OUT;
+ else
+ return GPIO_LINE_DIRECTION_IN;
+}
+
+static int cgbc_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct cgbc_device_data *cgbc = dev_get_drvdata(dev->parent);
+ struct cgbc_gpio_data *gpio;
+ struct gpio_chip *chip;
+ int ret;
+
+ gpio = devm_kzalloc(dev, sizeof(*gpio), GFP_KERNEL);
+ if (!gpio)
+ return -ENOMEM;
+
+ gpio->cgbc = cgbc;
+
+ platform_set_drvdata(pdev, gpio);
+
+ chip = &gpio->chip;
+ chip->label = dev_name(&pdev->dev);
+ chip->owner = THIS_MODULE;
+ chip->parent = dev;
+ chip->base = -1;
+ chip->direction_input = cgbc_gpio_direction_input;
+ chip->direction_output = cgbc_gpio_direction_output;
+ chip->get_direction = cgbc_gpio_get_direction;
+ chip->get = cgbc_gpio_get;
+ chip->set = cgbc_gpio_set;
+ chip->ngpio = CGBC_GPIO_NGPIO;
+
+ ret = devm_mutex_init(dev, &gpio->lock);
+ if (ret)
+ return ret;
+
+ ret = devm_gpiochip_add_data(dev, chip, gpio);
+ if (ret)
+ return dev_err_probe(dev, ret, "Could not register GPIO chip\n");
+
+ return 0;
+}
+
+static struct platform_driver cgbc_gpio_driver = {
+ .driver = {
+ .name = "cgbc-gpio",
+ },
+ .probe = cgbc_gpio_probe,
+};
+
+module_platform_driver(cgbc_gpio_driver);
+
+MODULE_DESCRIPTION("Congatec Board Controller GPIO Driver");
+MODULE_AUTHOR("Thomas Richard <thomas.richard@bootlin.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:cgbc-gpio");
diff --git a/drivers/gpio/gpio-clps711x.c b/drivers/gpio/gpio-clps711x.c
index d69a24dd4828..24ff2347d599 100644
--- a/drivers/gpio/gpio-clps711x.c
+++ b/drivers/gpio/gpio-clps711x.c
@@ -8,13 +8,15 @@
#include <linux/err.h>
#include <linux/module.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/platform_device.h>
static int clps711x_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config = { };
struct device_node *np = pdev->dev.of_node;
+ struct gpio_generic_chip *gen_gc;
void __iomem *dat, *dir;
- struct gpio_chip *gc;
int err, id;
if (!np)
@@ -24,8 +26,8 @@ static int clps711x_gpio_probe(struct platform_device *pdev)
if ((id < 0) || (id > 4))
return -ENODEV;
- gc = devm_kzalloc(&pdev->dev, sizeof(*gc), GFP_KERNEL);
- if (!gc)
+ gen_gc = devm_kzalloc(&pdev->dev, sizeof(*gen_gc), GFP_KERNEL);
+ if (!gen_gc)
return -ENOMEM;
dat = devm_platform_ioremap_resource(pdev, 0);
@@ -36,35 +38,37 @@ static int clps711x_gpio_probe(struct platform_device *pdev)
if (IS_ERR(dir))
return PTR_ERR(dir);
+ config.dev = &pdev->dev;
+ config.sz = 1;
+ config.dat = dat;
+
switch (id) {
case 3:
/* PORTD is inverted logic for direction register */
- err = bgpio_init(gc, &pdev->dev, 1, dat, NULL, NULL,
- NULL, dir, 0);
+ config.dirin = dir;
break;
default:
- err = bgpio_init(gc, &pdev->dev, 1, dat, NULL, NULL,
- dir, NULL, 0);
+ config.dirout = dir;
break;
}
+ err = gpio_generic_chip_init(gen_gc, &config);
if (err)
return err;
switch (id) {
case 4:
/* PORTE is 3 lines only */
- gc->ngpio = 3;
+ gen_gc->gc.ngpio = 3;
break;
default:
break;
}
- gc->base = -1;
- gc->owner = THIS_MODULE;
- platform_set_drvdata(pdev, gc);
+ gen_gc->gc.base = -1;
+ gen_gc->gc.owner = THIS_MODULE;
- return devm_gpiochip_add_data(&pdev->dev, gc, NULL);
+ return devm_gpiochip_add_data(&pdev->dev, &gen_gc->gc, NULL);
}
static const struct of_device_id clps711x_gpio_ids[] = {
diff --git a/drivers/gpio/gpio-creg-snps.c b/drivers/gpio/gpio-creg-snps.c
index 4968232f70f2..f8ea961fa1de 100644
--- a/drivers/gpio/gpio-creg-snps.c
+++ b/drivers/gpio/gpio-creg-snps.c
@@ -27,7 +27,7 @@ struct creg_gpio {
const struct creg_layout *layout;
};
-static void creg_gpio_set(struct gpio_chip *gc, unsigned int offset, int val)
+static int creg_gpio_set(struct gpio_chip *gc, unsigned int offset, int val)
{
struct creg_gpio *hcg = gpiochip_get_data(gc);
const struct creg_layout *layout = hcg->layout;
@@ -47,13 +47,13 @@ static void creg_gpio_set(struct gpio_chip *gc, unsigned int offset, int val)
reg |= (value << reg_shift);
writel(reg, hcg->regs);
spin_unlock_irqrestore(&hcg->lock, flags);
+
+ return 0;
}
static int creg_gpio_dir_out(struct gpio_chip *gc, unsigned int offset, int val)
{
- creg_gpio_set(gc, offset, val);
-
- return 0;
+ return creg_gpio_set(gc, offset, val);
}
static int creg_gpio_validate_pg(struct device *dev, struct creg_gpio *hcg,
diff --git a/drivers/gpio/gpio-cros-ec.c b/drivers/gpio/gpio-cros-ec.c
index 0c09bb54dc0c..435483826c6e 100644
--- a/drivers/gpio/gpio-cros-ec.c
+++ b/drivers/gpio/gpio-cros-ec.c
@@ -24,24 +24,21 @@
static const char cros_ec_gpio_prefix[] = "EC:";
/* Setting gpios is only supported when the system is unlocked */
-static void cros_ec_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
+static int cros_ec_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
const char *name = gc->names[gpio] + strlen(cros_ec_gpio_prefix);
struct cros_ec_device *cros_ec = gpiochip_get_data(gc);
struct ec_params_gpio_set params = {
.val = val,
};
- int ret;
ssize_t copied;
copied = strscpy(params.name, name, sizeof(params.name));
if (copied < 0)
- return;
+ return copied;
- ret = cros_ec_cmd(cros_ec, 0, EC_CMD_GPIO_SET, &params,
- sizeof(params), NULL, 0);
- if (ret < 0)
- dev_err(gc->parent, "error setting gpio%d (%s) on EC: %d\n", gpio, name, ret);
+ return cros_ec_cmd(cros_ec, 0, EC_CMD_GPIO_SET, &params,
+ sizeof(params), NULL, 0);
}
static int cros_ec_gpio_get(struct gpio_chip *gc, unsigned int gpio)
diff --git a/drivers/gpio/gpio-crystalcove.c b/drivers/gpio/gpio-crystalcove.c
index 25db014494a4..0fb5c06d0886 100644
--- a/drivers/gpio/gpio-crystalcove.c
+++ b/drivers/gpio/gpio-crystalcove.c
@@ -15,6 +15,7 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/seq_file.h>
+#include <linux/string_choices.h>
#include <linux/types.h>
#define CRYSTALCOVE_GPIO_NUM 16
@@ -167,18 +168,18 @@ static int crystalcove_gpio_get(struct gpio_chip *chip, unsigned int gpio)
return val & 0x1;
}
-static void crystalcove_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value)
+static int crystalcove_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value)
{
struct crystalcove_gpio *cg = gpiochip_get_data(chip);
int reg = to_reg(gpio, CTRL_OUT);
if (reg < 0)
- return;
+ return 0;
if (value)
- regmap_update_bits(cg->regmap, reg, 1, 1);
- else
- regmap_update_bits(cg->regmap, reg, 1, 0);
+ return regmap_update_bits(cg->regmap, reg, 1, 1);
+
+ return regmap_update_bits(cg->regmap, reg, 1, 0);
}
static int crystalcove_irq_type(struct irq_data *data, unsigned int type)
@@ -317,7 +318,7 @@ static void crystalcove_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip
offset = gpio % 8;
seq_printf(s, " gpio-%-2d %s %s %s %s ctlo=%2x,%s %s %s\n",
gpio, ctlo & CTLO_DIR_OUT ? "out" : "in ",
- ctli & 0x1 ? "hi" : "lo",
+ str_hi_lo(ctli & 0x1),
ctli & CTLI_INTCNT_NE ? "fall" : " ",
ctli & CTLI_INTCNT_PE ? "rise" : " ",
ctlo,
diff --git a/drivers/gpio/gpio-cs5535.c b/drivers/gpio/gpio-cs5535.c
index 6da3a247614a..8affe4e9f90e 100644
--- a/drivers/gpio/gpio-cs5535.c
+++ b/drivers/gpio/gpio-cs5535.c
@@ -232,12 +232,14 @@ static int chip_gpio_get(struct gpio_chip *chip, unsigned offset)
return cs5535_gpio_isset(offset, GPIO_READ_BACK);
}
-static void chip_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
+static int chip_gpio_set(struct gpio_chip *chip, unsigned int offset, int val)
{
if (val)
cs5535_gpio_set(offset, GPIO_OUTPUT_VAL);
else
cs5535_gpio_clear(offset, GPIO_OUTPUT_VAL);
+
+ return 0;
}
static int chip_direction_input(struct gpio_chip *c, unsigned offset)
diff --git a/drivers/gpio/gpio-da9052.c b/drivers/gpio/gpio-da9052.c
index 6f3905f1b8f5..495f0ee58505 100644
--- a/drivers/gpio/gpio-da9052.c
+++ b/drivers/gpio/gpio-da9052.c
@@ -89,30 +89,20 @@ static int da9052_gpio_get(struct gpio_chip *gc, unsigned offset)
}
}
-static void da9052_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
+static int da9052_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
{
struct da9052_gpio *gpio = gpiochip_get_data(gc);
- int ret;
- if (da9052_gpio_port_odd(offset)) {
- ret = da9052_reg_update(gpio->da9052, (offset >> 1) +
- DA9052_GPIO_0_1_REG,
- DA9052_GPIO_ODD_PORT_MODE,
- value << DA9052_GPIO_ODD_SHIFT);
- if (ret != 0)
- dev_err(gpio->da9052->dev,
- "Failed to updated gpio odd reg,%d",
- ret);
- } else {
- ret = da9052_reg_update(gpio->da9052, (offset >> 1) +
- DA9052_GPIO_0_1_REG,
- DA9052_GPIO_EVEN_PORT_MODE,
- value << DA9052_GPIO_EVEN_SHIFT);
- if (ret != 0)
- dev_err(gpio->da9052->dev,
- "Failed to updated gpio even reg,%d",
- ret);
- }
+ if (da9052_gpio_port_odd(offset))
+ return da9052_reg_update(gpio->da9052, (offset >> 1) +
+ DA9052_GPIO_0_1_REG,
+ DA9052_GPIO_ODD_PORT_MODE,
+ value << DA9052_GPIO_ODD_SHIFT);
+
+ return da9052_reg_update(gpio->da9052,
+ (offset >> 1) + DA9052_GPIO_0_1_REG,
+ DA9052_GPIO_EVEN_PORT_MODE,
+ value << DA9052_GPIO_EVEN_SHIFT);
}
static int da9052_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
diff --git a/drivers/gpio/gpio-da9055.c b/drivers/gpio/gpio-da9055.c
index 49446a030f10..a09bd6eb93cf 100644
--- a/drivers/gpio/gpio-da9055.c
+++ b/drivers/gpio/gpio-da9055.c
@@ -59,14 +59,12 @@ static int da9055_gpio_get(struct gpio_chip *gc, unsigned offset)
}
-static void da9055_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
+static int da9055_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
{
struct da9055_gpio *gpio = gpiochip_get_data(gc);
- da9055_reg_update(gpio->da9055,
- DA9055_REG_GPIO_MODE0_2,
- 1 << offset,
- value << offset);
+ return da9055_reg_update(gpio->da9055, DA9055_REG_GPIO_MODE0_2,
+ 1 << offset, value << offset);
}
static int da9055_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
@@ -102,9 +100,7 @@ static int da9055_gpio_direction_output(struct gpio_chip *gc,
if (ret < 0)
return ret;
- da9055_gpio_set(gc, offset, value);
-
- return 0;
+ return da9055_gpio_set(gc, offset, value);
}
static int da9055_gpio_to_irq(struct gpio_chip *gc, u32 offset)
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index 76b58c70b257..538f27209ce7 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -15,7 +15,6 @@
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/property.h>
@@ -69,15 +68,6 @@ static inline u32 __gpio_mask(unsigned gpio)
return 1 << (gpio % 32);
}
-static inline struct davinci_gpio_regs __iomem *irq2regs(struct irq_data *d)
-{
- struct davinci_gpio_regs __iomem *g;
-
- g = (__force struct davinci_gpio_regs __iomem *)irq_data_get_irq_chip_data(d);
-
- return g;
-}
-
static int davinci_gpio_irq_setup(struct platform_device *pdev);
/*--------------------------------------------------------------------------*/
@@ -140,7 +130,7 @@ static int davinci_gpio_get(struct gpio_chip *chip, unsigned offset)
/*
* Assuming the pin is muxed as a gpio output, set its output value.
*/
-static void
+static int
davinci_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
struct davinci_gpio_controller *d = gpiochip_get_data(chip);
@@ -151,6 +141,8 @@ davinci_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
writel_relaxed(__gpio_mask(offset),
value ? &g->set_data : &g->clr_data);
+
+ return 0;
}
static int davinci_gpio_probe(struct platform_device *pdev)
@@ -159,14 +151,13 @@ static int davinci_gpio_probe(struct platform_device *pdev)
unsigned int ngpio, nbank, nirq, gpio_unbanked;
struct davinci_gpio_controller *chips;
struct device *dev = &pdev->dev;
- struct device_node *dn = dev_of_node(dev);
/*
* The gpio banks conceptually expose a segmented bitmap,
* and "ngpio" is one more than the largest zero-based
* bit index that's valid.
*/
- ret = of_property_read_u32(dn, "ti,ngpio", &ngpio);
+ ret = device_property_read_u32(dev, "ti,ngpio", &ngpio);
if (ret)
return dev_err_probe(dev, ret, "Failed to get the number of GPIOs\n");
if (ngpio == 0)
@@ -177,8 +168,8 @@ static int davinci_gpio_probe(struct platform_device *pdev)
* interrupts is equal to number of gpios else all are banked so
* number of interrupts is equal to number of banks(each with 16 gpios)
*/
- ret = of_property_read_u32(dn, "ti,davinci-gpio-unbanked",
- &gpio_unbanked);
+ ret = device_property_read_u32(dev, "ti,davinci-gpio-unbanked",
+ &gpio_unbanked);
if (ret)
return dev_err_probe(dev, ret, "Failed to get the unbanked GPIOs property\n");
@@ -255,19 +246,27 @@ static int davinci_gpio_probe(struct platform_device *pdev)
static void gpio_irq_mask(struct irq_data *d)
{
- struct davinci_gpio_regs __iomem *g = irq2regs(d);
+ struct davinci_gpio_controller *chips = irq_data_get_irq_chip_data(d);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ struct davinci_gpio_regs __iomem *g = chips->regs[hwirq / 32];
uintptr_t mask = (uintptr_t)irq_data_get_irq_handler_data(d);
writel_relaxed(mask, &g->clr_falling);
writel_relaxed(mask, &g->clr_rising);
+
+ gpiochip_disable_irq(&chips->chip, hwirq);
}
static void gpio_irq_unmask(struct irq_data *d)
{
- struct davinci_gpio_regs __iomem *g = irq2regs(d);
+ struct davinci_gpio_controller *chips = irq_data_get_irq_chip_data(d);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ struct davinci_gpio_regs __iomem *g = chips->regs[hwirq / 32];
uintptr_t mask = (uintptr_t)irq_data_get_irq_handler_data(d);
unsigned status = irqd_get_trigger_type(d);
+ gpiochip_enable_irq(&chips->chip, hwirq);
+
status &= IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING;
if (!status)
status = IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING;
@@ -286,12 +285,13 @@ static int gpio_irq_type(struct irq_data *d, unsigned trigger)
return 0;
}
-static struct irq_chip gpio_irqchip = {
+static const struct irq_chip gpio_irqchip = {
.name = "GPIO",
.irq_unmask = gpio_irq_unmask,
.irq_mask = gpio_irq_mask,
.irq_set_type = gpio_irq_type,
- .flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_SKIP_SET_WAKE,
+ .flags = IRQCHIP_IMMUTABLE | IRQCHIP_SET_TYPE_MASKED | IRQCHIP_SKIP_SET_WAKE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static void gpio_irq_handler(struct irq_desc *desc)
@@ -399,12 +399,11 @@ davinci_gpio_irq_map(struct irq_domain *d, unsigned int irq,
{
struct davinci_gpio_controller *chips =
(struct davinci_gpio_controller *)d->host_data;
- struct davinci_gpio_regs __iomem *g = chips->regs[hw / 32];
irq_set_chip_and_handler_name(irq, &gpio_irqchip, handle_simple_irq,
"davinci_gpio");
irq_set_irq_type(irq, IRQ_TYPE_NONE);
- irq_set_chip_data(irq, (__force void *)g);
+ irq_set_chip_data(irq, (__force void *)chips);
irq_set_handler_data(irq, (void *)(uintptr_t)__gpio_mask(hw));
return 0;
@@ -479,9 +478,8 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
return irq;
}
- irq_domain = irq_domain_add_legacy(dev->of_node, ngpio, irq, 0,
- &davinci_gpio_irq_ops,
- chips);
+ irq_domain = irq_domain_create_legacy(dev_fwnode(dev), ngpio, irq, 0,
+ &davinci_gpio_irq_ops, chips);
if (!irq_domain) {
dev_err(dev, "Couldn't register an IRQ domain\n");
return -ENODEV;
@@ -662,7 +660,7 @@ static struct platform_driver davinci_gpio_driver = {
.driver = {
.name = "davinci_gpio",
.pm = pm_sleep_ptr(&davinci_gpio_dev_pm_ops),
- .of_match_table = of_match_ptr(davinci_gpio_ids),
+ .of_match_table = davinci_gpio_ids,
},
};
diff --git a/drivers/gpio/gpio-dln2.c b/drivers/gpio/gpio-dln2.c
index 7ead1f51128a..4670ffd7ea7f 100644
--- a/drivers/gpio/gpio-dln2.c
+++ b/drivers/gpio/gpio-dln2.c
@@ -220,11 +220,12 @@ static int dln2_gpio_get(struct gpio_chip *chip, unsigned int offset)
return dln2_gpio_pin_get_out_val(dln2, offset);
}
-static void dln2_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int dln2_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct dln2_gpio *dln2 = gpiochip_get_data(chip);
- dln2_gpio_pin_set_out_val(dln2, offset, value);
+ return dln2_gpio_pin_set_out_val(dln2, offset, value);
}
static int dln2_gpio_set_direction(struct gpio_chip *chip, unsigned offset,
@@ -512,7 +513,7 @@ static void dln2_gpio_remove(struct platform_device *pdev)
static struct platform_driver dln2_gpio_driver = {
.driver.name = "dln2-gpio",
.probe = dln2_gpio_probe,
- .remove_new = dln2_gpio_remove,
+ .remove = dln2_gpio_remove,
};
module_platform_driver(dln2_gpio_driver);
diff --git a/drivers/gpio/gpio-ds4520.c b/drivers/gpio/gpio-ds4520.c
index 1903deaef3e9..f52ecae382a4 100644
--- a/drivers/gpio/gpio-ds4520.c
+++ b/drivers/gpio/gpio-ds4520.c
@@ -25,7 +25,6 @@ static int ds4520_gpio_probe(struct i2c_client *client)
struct gpio_regmap_config config = { };
struct device *dev = &client->dev;
struct regmap *regmap;
- u32 ngpio;
u32 base;
int ret;
@@ -33,10 +32,6 @@ static int ds4520_gpio_probe(struct i2c_client *client)
if (ret)
return dev_err_probe(dev, ret, "Missing 'reg' property.\n");
- ret = device_property_read_u32(dev, "ngpios", &ngpio);
- if (ret)
- return dev_err_probe(dev, ret, "Missing 'ngpios' property.\n");
-
regmap = devm_regmap_init_i2c(client, &ds4520_regmap_config);
if (IS_ERR(regmap))
return dev_err_probe(dev, PTR_ERR(regmap),
@@ -44,7 +39,6 @@ static int ds4520_gpio_probe(struct i2c_client *client)
config.regmap = regmap;
config.parent = dev;
- config.ngpio = ngpio;
config.reg_dat_base = base + DS4520_IO_STATUS0;
config.reg_set_base = base + DS4520_PULLUP0;
diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c
index 798235791f70..b42ff46d292b 100644
--- a/drivers/gpio/gpio-dwapb.c
+++ b/drivers/gpio/gpio-dwapb.c
@@ -8,6 +8,7 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -99,7 +100,7 @@ struct dwapb_gpio_port_irqchip {
};
struct dwapb_gpio_port {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
struct dwapb_gpio_port_irqchip *pirq;
struct dwapb_gpio *gpio;
#ifdef CONFIG_PM_SLEEP
@@ -107,8 +108,12 @@ struct dwapb_gpio_port {
#endif
unsigned int idx;
};
-#define to_dwapb_gpio(_gc) \
- (container_of(_gc, struct dwapb_gpio_port, gc)->gpio)
+
+static inline struct dwapb_gpio *to_dwapb_gpio(struct gpio_chip *gc)
+{
+ return container_of(to_gpio_generic_chip(gc),
+ struct dwapb_gpio_port, chip)->gpio;
+}
struct dwapb_gpio {
struct device *dev;
@@ -148,19 +153,19 @@ static inline u32 gpio_reg_convert(struct dwapb_gpio *gpio, unsigned int offset)
static inline u32 dwapb_read(struct dwapb_gpio *gpio, unsigned int offset)
{
- struct gpio_chip *gc = &gpio->ports[0].gc;
- void __iomem *reg_base = gpio->regs;
+ struct gpio_generic_chip *chip = &gpio->ports[0].chip;
+ void __iomem *reg_base = gpio->regs;
- return gc->read_reg(reg_base + gpio_reg_convert(gpio, offset));
+ return gpio_generic_read_reg(chip, reg_base + gpio_reg_convert(gpio, offset));
}
static inline void dwapb_write(struct dwapb_gpio *gpio, unsigned int offset,
u32 val)
{
- struct gpio_chip *gc = &gpio->ports[0].gc;
- void __iomem *reg_base = gpio->regs;
+ struct gpio_generic_chip *chip = &gpio->ports[0].chip;
+ void __iomem *reg_base = gpio->regs;
- gc->write_reg(reg_base + gpio_reg_convert(gpio, offset), val);
+ gpio_generic_write_reg(chip, reg_base + gpio_reg_convert(gpio, offset), val);
}
static struct dwapb_gpio_port *dwapb_offs_to_port(struct dwapb_gpio *gpio, unsigned int offs)
@@ -186,7 +191,7 @@ static void dwapb_toggle_trigger(struct dwapb_gpio *gpio, unsigned int offs)
if (!port)
return;
- gc = &port->gc;
+ gc = &port->chip.gc;
pol = dwapb_read(gpio, GPIO_INT_POLARITY);
/* Just read the current value right out of the data register */
@@ -201,13 +206,13 @@ static void dwapb_toggle_trigger(struct dwapb_gpio *gpio, unsigned int offs)
static u32 dwapb_do_irq(struct dwapb_gpio *gpio)
{
- struct gpio_chip *gc = &gpio->ports[0].gc;
+ struct gpio_generic_chip *gen_gc = &gpio->ports[0].chip;
unsigned long irq_status;
irq_hw_number_t hwirq;
irq_status = dwapb_read(gpio, GPIO_INTSTATUS);
for_each_set_bit(hwirq, &irq_status, DWAPB_MAX_GPIOS) {
- int gpio_irq = irq_find_mapping(gc->irq.domain, hwirq);
+ int gpio_irq = irq_find_mapping(gen_gc->gc.irq.domain, hwirq);
u32 irq_type = irq_get_trigger_type(gpio_irq);
generic_handle_irq(gpio_irq);
@@ -237,27 +242,27 @@ static irqreturn_t dwapb_irq_handler_mfd(int irq, void *dev_id)
static void dwapb_irq_ack(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct gpio_generic_chip *gen_gc = to_gpio_generic_chip(gc);
struct dwapb_gpio *gpio = to_dwapb_gpio(gc);
u32 val = BIT(irqd_to_hwirq(d));
- unsigned long flags;
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(gen_gc);
+
dwapb_write(gpio, GPIO_PORTA_EOI, val);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
}
static void dwapb_irq_mask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct gpio_generic_chip *gen_gc = to_gpio_generic_chip(gc);
struct dwapb_gpio *gpio = to_dwapb_gpio(gc);
irq_hw_number_t hwirq = irqd_to_hwirq(d);
- unsigned long flags;
u32 val;
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
- val = dwapb_read(gpio, GPIO_INTMASK) | BIT(hwirq);
- dwapb_write(gpio, GPIO_INTMASK, val);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+ scoped_guard(gpio_generic_lock_irqsave, gen_gc) {
+ val = dwapb_read(gpio, GPIO_INTMASK) | BIT(hwirq);
+ dwapb_write(gpio, GPIO_INTMASK, val);
+ }
gpiochip_disable_irq(gc, hwirq);
}
@@ -265,59 +270,61 @@ static void dwapb_irq_mask(struct irq_data *d)
static void dwapb_irq_unmask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct gpio_generic_chip *gen_gc = to_gpio_generic_chip(gc);
struct dwapb_gpio *gpio = to_dwapb_gpio(gc);
irq_hw_number_t hwirq = irqd_to_hwirq(d);
- unsigned long flags;
u32 val;
gpiochip_enable_irq(gc, hwirq);
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(gen_gc);
+
val = dwapb_read(gpio, GPIO_INTMASK) & ~BIT(hwirq);
dwapb_write(gpio, GPIO_INTMASK, val);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
}
static void dwapb_irq_enable(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct gpio_generic_chip *gen_gc = to_gpio_generic_chip(gc);
struct dwapb_gpio *gpio = to_dwapb_gpio(gc);
irq_hw_number_t hwirq = irqd_to_hwirq(d);
- unsigned long flags;
u32 val;
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(gen_gc);
+
val = dwapb_read(gpio, GPIO_INTEN) | BIT(hwirq);
dwapb_write(gpio, GPIO_INTEN, val);
val = dwapb_read(gpio, GPIO_INTMASK) & ~BIT(hwirq);
dwapb_write(gpio, GPIO_INTMASK, val);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
}
static void dwapb_irq_disable(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct gpio_generic_chip *gen_gc = to_gpio_generic_chip(gc);
struct dwapb_gpio *gpio = to_dwapb_gpio(gc);
irq_hw_number_t hwirq = irqd_to_hwirq(d);
- unsigned long flags;
u32 val;
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(gen_gc);
+
val = dwapb_read(gpio, GPIO_INTMASK) | BIT(hwirq);
dwapb_write(gpio, GPIO_INTMASK, val);
val = dwapb_read(gpio, GPIO_INTEN) & ~BIT(hwirq);
dwapb_write(gpio, GPIO_INTEN, val);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
}
static int dwapb_irq_set_type(struct irq_data *d, u32 type)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct gpio_generic_chip *gen_gc = to_gpio_generic_chip(gc);
struct dwapb_gpio *gpio = to_dwapb_gpio(gc);
irq_hw_number_t bit = irqd_to_hwirq(d);
- unsigned long level, polarity, flags;
+ unsigned long level, polarity;
+
+ guard(gpio_generic_lock_irqsave)(gen_gc);
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
level = dwapb_read(gpio, GPIO_INTTYPE_LEVEL);
polarity = dwapb_read(gpio, GPIO_INT_POLARITY);
@@ -352,7 +359,6 @@ static int dwapb_irq_set_type(struct irq_data *d, u32 type)
dwapb_write(gpio, GPIO_INTTYPE_LEVEL, level);
if (type != IRQ_TYPE_EDGE_BOTH)
dwapb_write(gpio, GPIO_INT_POLARITY, polarity);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
return 0;
}
@@ -393,11 +399,12 @@ static int dwapb_gpio_set_debounce(struct gpio_chip *gc,
unsigned offset, unsigned debounce)
{
struct dwapb_gpio_port *port = gpiochip_get_data(gc);
+ struct gpio_generic_chip *gen_gc = to_gpio_generic_chip(gc);
struct dwapb_gpio *gpio = port->gpio;
- unsigned long flags, val_deb;
+ unsigned long val_deb;
unsigned long mask = BIT(offset);
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(gen_gc);
val_deb = dwapb_read(gpio, GPIO_PORTA_DEBOUNCE);
if (debounce)
@@ -406,8 +413,6 @@ static int dwapb_gpio_set_debounce(struct gpio_chip *gc,
val_deb &= ~mask;
dwapb_write(gpio, GPIO_PORTA_DEBOUNCE, val_deb);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
-
return 0;
}
@@ -445,7 +450,7 @@ static void dwapb_configure_irqs(struct dwapb_gpio *gpio,
struct dwapb_port_property *pp)
{
struct dwapb_gpio_port_irqchip *pirq;
- struct gpio_chip *gc = &port->gc;
+ struct gpio_chip *gc = &port->chip.gc;
struct gpio_irq_chip *girq;
int err;
@@ -501,6 +506,7 @@ static int dwapb_gpio_add_port(struct dwapb_gpio *gpio,
struct dwapb_port_property *pp,
unsigned int offs)
{
+ struct gpio_generic_chip_config config;
struct dwapb_gpio_port *port;
void __iomem *dat, *set, *dirout;
int err;
@@ -519,32 +525,39 @@ static int dwapb_gpio_add_port(struct dwapb_gpio *gpio,
set = gpio->regs + GPIO_SWPORTA_DR + pp->idx * GPIO_SWPORT_DR_STRIDE;
dirout = gpio->regs + GPIO_SWPORTA_DDR + pp->idx * GPIO_SWPORT_DDR_STRIDE;
+ config = (struct gpio_generic_chip_config) {
+ .dev = gpio->dev,
+ .sz = 4,
+ .dat = dat,
+ .set = set,
+ .dirout = dirout,
+ };
+
/* This registers 32 GPIO lines per port */
- err = bgpio_init(&port->gc, gpio->dev, 4, dat, set, NULL, dirout,
- NULL, 0);
+ err = gpio_generic_chip_init(&port->chip, &config);
if (err) {
dev_err(gpio->dev, "failed to init gpio chip for port%d\n",
port->idx);
return err;
}
- port->gc.fwnode = pp->fwnode;
- port->gc.ngpio = pp->ngpio;
- port->gc.base = pp->gpio_base;
- port->gc.request = gpiochip_generic_request;
- port->gc.free = gpiochip_generic_free;
+ port->chip.gc.fwnode = pp->fwnode;
+ port->chip.gc.ngpio = pp->ngpio;
+ port->chip.gc.base = pp->gpio_base;
+ port->chip.gc.request = gpiochip_generic_request;
+ port->chip.gc.free = gpiochip_generic_free;
/* Only port A support debounce */
if (pp->idx == 0)
- port->gc.set_config = dwapb_gpio_set_config;
+ port->chip.gc.set_config = dwapb_gpio_set_config;
else
- port->gc.set_config = gpiochip_generic_config;
+ port->chip.gc.set_config = gpiochip_generic_config;
/* Only port A can provide interrupts in all configurations of the IP */
if (pp->idx == 0)
dwapb_configure_irqs(gpio, port, pp);
- err = devm_gpiochip_add_data(gpio->dev, &port->gc, port);
+ err = devm_gpiochip_add_data(gpio->dev, &port->chip.gc, port);
if (err) {
dev_err(gpio->dev, "failed to register gpiochip for port%d\n",
port->idx);
@@ -571,7 +584,6 @@ static void dwapb_get_irq(struct device *dev, struct fwnode_handle *fwnode,
static struct dwapb_platform_data *dwapb_gpio_get_pdata(struct device *dev)
{
- struct fwnode_handle *fwnode;
struct dwapb_platform_data *pdata;
struct dwapb_port_property *pp;
int nports;
@@ -592,7 +604,7 @@ static struct dwapb_platform_data *dwapb_gpio_get_pdata(struct device *dev)
pdata->nports = nports;
i = 0;
- device_for_each_child_node(dev, fwnode) {
+ device_for_each_child_node_scoped(dev, fwnode) {
pp = &pdata->properties[i++];
pp->fwnode = fwnode;
@@ -600,7 +612,6 @@ static struct dwapb_platform_data *dwapb_gpio_get_pdata(struct device *dev)
pp->idx >= DWAPB_MAX_PORTS) {
dev_err(dev,
"missing/invalid port index for port%d\n", i);
- fwnode_handle_put(fwnode);
return ERR_PTR(-EINVAL);
}
@@ -694,6 +705,7 @@ static const struct acpi_device_id dwapb_acpi_match[] = {
{"HISI0181", GPIO_REG_OFFSET_V1},
{"APMC0D07", GPIO_REG_OFFSET_V1},
{"APMC0D81", GPIO_REG_OFFSET_V2},
+ {"FUJI200A", GPIO_REG_OFFSET_V1},
{ }
};
MODULE_DEVICE_TABLE(acpi, dwapb_acpi_match);
@@ -751,38 +763,37 @@ static int dwapb_gpio_probe(struct platform_device *pdev)
static int dwapb_gpio_suspend(struct device *dev)
{
struct dwapb_gpio *gpio = dev_get_drvdata(dev);
- struct gpio_chip *gc = &gpio->ports[0].gc;
- unsigned long flags;
+ struct gpio_generic_chip *gen_gc = &gpio->ports[0].chip;
int i;
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
- for (i = 0; i < gpio->nr_ports; i++) {
- unsigned int offset;
- unsigned int idx = gpio->ports[i].idx;
- struct dwapb_context *ctx = gpio->ports[i].ctx;
+ scoped_guard(gpio_generic_lock_irqsave, gen_gc) {
+ for (i = 0; i < gpio->nr_ports; i++) {
+ unsigned int offset;
+ unsigned int idx = gpio->ports[i].idx;
+ struct dwapb_context *ctx = gpio->ports[i].ctx;
- offset = GPIO_SWPORTA_DDR + idx * GPIO_SWPORT_DDR_STRIDE;
- ctx->dir = dwapb_read(gpio, offset);
+ offset = GPIO_SWPORTA_DDR + idx * GPIO_SWPORT_DDR_STRIDE;
+ ctx->dir = dwapb_read(gpio, offset);
- offset = GPIO_SWPORTA_DR + idx * GPIO_SWPORT_DR_STRIDE;
- ctx->data = dwapb_read(gpio, offset);
+ offset = GPIO_SWPORTA_DR + idx * GPIO_SWPORT_DR_STRIDE;
+ ctx->data = dwapb_read(gpio, offset);
- offset = GPIO_EXT_PORTA + idx * GPIO_EXT_PORT_STRIDE;
- ctx->ext = dwapb_read(gpio, offset);
+ offset = GPIO_EXT_PORTA + idx * GPIO_EXT_PORT_STRIDE;
+ ctx->ext = dwapb_read(gpio, offset);
- /* Only port A can provide interrupts */
- if (idx == 0) {
- ctx->int_mask = dwapb_read(gpio, GPIO_INTMASK);
- ctx->int_en = dwapb_read(gpio, GPIO_INTEN);
- ctx->int_pol = dwapb_read(gpio, GPIO_INT_POLARITY);
- ctx->int_type = dwapb_read(gpio, GPIO_INTTYPE_LEVEL);
- ctx->int_deb = dwapb_read(gpio, GPIO_PORTA_DEBOUNCE);
-
- /* Mask out interrupts */
- dwapb_write(gpio, GPIO_INTMASK, ~ctx->wake_en);
+ /* Only port A can provide interrupts */
+ if (idx == 0) {
+ ctx->int_mask = dwapb_read(gpio, GPIO_INTMASK);
+ ctx->int_en = dwapb_read(gpio, GPIO_INTEN);
+ ctx->int_pol = dwapb_read(gpio, GPIO_INT_POLARITY);
+ ctx->int_type = dwapb_read(gpio, GPIO_INTTYPE_LEVEL);
+ ctx->int_deb = dwapb_read(gpio, GPIO_PORTA_DEBOUNCE);
+
+ /* Mask out interrupts */
+ dwapb_write(gpio, GPIO_INTMASK, ~ctx->wake_en);
+ }
}
}
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
clk_bulk_disable_unprepare(DWAPB_NR_CLOCKS, gpio->clks);
@@ -792,8 +803,8 @@ static int dwapb_gpio_suspend(struct device *dev)
static int dwapb_gpio_resume(struct device *dev)
{
struct dwapb_gpio *gpio = dev_get_drvdata(dev);
- struct gpio_chip *gc = &gpio->ports[0].gc;
- unsigned long flags;
+ struct gpio_chip *gc = &gpio->ports[0].chip.gc;
+ struct gpio_generic_chip *gen_gc = to_gpio_generic_chip(gc);
int i, err;
err = clk_bulk_prepare_enable(DWAPB_NR_CLOCKS, gpio->clks);
@@ -802,7 +813,8 @@ static int dwapb_gpio_resume(struct device *dev)
return err;
}
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(gen_gc);
+
for (i = 0; i < gpio->nr_ports; i++) {
unsigned int offset;
unsigned int idx = gpio->ports[i].idx;
@@ -829,7 +841,6 @@ static int dwapb_gpio_resume(struct device *dev)
dwapb_write(gpio, GPIO_PORTA_EOI, 0xffffffff);
}
}
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
return 0;
}
diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c
index 2dd0e46c42ad..50fafeda8d7e 100644
--- a/drivers/gpio/gpio-eic-sprd.c
+++ b/drivers/gpio/gpio-eic-sprd.c
@@ -10,8 +10,8 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/notifier.h>
-#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/spinlock.h>
/* EIC registers definition */
@@ -203,9 +203,10 @@ static int sprd_eic_direction_input(struct gpio_chip *chip, unsigned int offset)
return 0;
}
-static void sprd_eic_set(struct gpio_chip *chip, unsigned int offset, int value)
+static int sprd_eic_set(struct gpio_chip *chip, unsigned int offset, int value)
{
/* EICs are always input, nothing need to do here. */
+ return 0;
}
static int sprd_eic_set_debounce(struct gpio_chip *chip, unsigned int offset,
@@ -617,7 +618,7 @@ static int sprd_eic_probe(struct platform_device *pdev)
u16 num_banks = 0;
int ret, i;
- pdata = of_device_get_match_data(dev);
+ pdata = device_get_match_data(dev);
if (!pdata) {
dev_err(dev, "No matching driver data found.\n");
return -EINVAL;
diff --git a/drivers/gpio/gpio-elkhartlake.c b/drivers/gpio/gpio-elkhartlake.c
index 887c0fe99d39..95de52d2cc63 100644
--- a/drivers/gpio/gpio-elkhartlake.c
+++ b/drivers/gpio/gpio-elkhartlake.c
@@ -75,4 +75,4 @@ MODULE_AUTHOR("Pandith N <pandith.n@intel.com>");
MODULE_AUTHOR("Raag Jadav <raag.jadav@intel.com>");
MODULE_DESCRIPTION("Intel Elkhart Lake PSE GPIO driver");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(GPIO_TANGIER);
+MODULE_IMPORT_NS("GPIO_TANGIER");
diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
index 6c862c572322..a214b0672726 100644
--- a/drivers/gpio/gpio-em.c
+++ b/drivers/gpio/gpio-em.c
@@ -204,13 +204,15 @@ static void __em_gio_set(struct gpio_chip *chip, unsigned int reg,
(BIT(shift + 16)) | (value << shift));
}
-static void em_gio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int em_gio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
/* output is split into two registers */
if (offset < 16)
__em_gio_set(chip, GIO_OL, offset, value);
else
__em_gio_set(chip, GIO_OH, offset - 16, value);
+
+ return 0;
}
static int em_gio_direction_output(struct gpio_chip *chip, unsigned offset,
@@ -323,8 +325,8 @@ static int em_gio_probe(struct platform_device *pdev)
irq_chip->irq_release_resources = em_gio_irq_relres;
irq_chip->flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND;
- p->irq_domain = irq_domain_add_simple(dev->of_node, ngpios, 0,
- &em_gio_irq_domain_ops, p);
+ p->irq_domain = irq_domain_create_simple(dev_fwnode(dev), ngpios, 0,
+ &em_gio_irq_domain_ops, p);
if (!p->irq_domain) {
dev_err(dev, "cannot initialize irq domain\n");
return -ENXIO;
diff --git a/drivers/gpio/gpio-en7523.c b/drivers/gpio/gpio-en7523.c
index 69834db2c1cf..cf47afc578a9 100644
--- a/drivers/gpio/gpio-en7523.c
+++ b/drivers/gpio/gpio-en7523.c
@@ -4,6 +4,7 @@
#include <linux/io.h>
#include <linux/bits.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -13,28 +14,23 @@
/**
* struct airoha_gpio_ctrl - Airoha GPIO driver data
- * @gc: Associated gpio_chip instance.
+ * @gen_gc: Associated gpio_generic_chip instance.
* @data: The data register.
* @dir: [0] The direction register for the lower 16 pins.
* [1]: The direction register for the higher 16 pins.
* @output: The output enable register.
*/
struct airoha_gpio_ctrl {
- struct gpio_chip gc;
+ struct gpio_generic_chip gen_gc;
void __iomem *data;
void __iomem *dir[2];
void __iomem *output;
};
-static struct airoha_gpio_ctrl *gc_to_ctrl(struct gpio_chip *gc)
-{
- return container_of(gc, struct airoha_gpio_ctrl, gc);
-}
-
static int airoha_dir_set(struct gpio_chip *gc, unsigned int gpio,
int val, int out)
{
- struct airoha_gpio_ctrl *ctrl = gc_to_ctrl(gc);
+ struct airoha_gpio_ctrl *ctrl = gpiochip_get_data(gc);
u32 dir = ioread32(ctrl->dir[gpio / 16]);
u32 output = ioread32(ctrl->output);
u32 mask = BIT((gpio % 16) * 2);
@@ -50,7 +46,7 @@ static int airoha_dir_set(struct gpio_chip *gc, unsigned int gpio,
iowrite32(dir, ctrl->dir[gpio / 16]);
if (out)
- gc->set(gc, gpio, val);
+ gpio_generic_chip_set(&ctrl->gen_gc, gpio, val);
iowrite32(output, ctrl->output);
@@ -70,7 +66,7 @@ static int airoha_dir_in(struct gpio_chip *gc, unsigned int gpio)
static int airoha_get_dir(struct gpio_chip *gc, unsigned int gpio)
{
- struct airoha_gpio_ctrl *ctrl = gc_to_ctrl(gc);
+ struct airoha_gpio_ctrl *ctrl = gpiochip_get_data(gc);
u32 dir = ioread32(ctrl->dir[gpio / 16]);
u32 mask = BIT((gpio % 16) * 2);
@@ -79,6 +75,7 @@ static int airoha_get_dir(struct gpio_chip *gc, unsigned int gpio)
static int airoha_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config = { };
struct device *dev = &pdev->dev;
struct airoha_gpio_ctrl *ctrl;
int err;
@@ -103,18 +100,21 @@ static int airoha_gpio_probe(struct platform_device *pdev)
if (IS_ERR(ctrl->output))
return PTR_ERR(ctrl->output);
- err = bgpio_init(&ctrl->gc, dev, 4, ctrl->data, NULL,
- NULL, NULL, NULL, 0);
+ config.dev = dev;
+ config.sz = 4;
+ config.dat = ctrl->data;
+
+ err = gpio_generic_chip_init(&ctrl->gen_gc, &config);
if (err)
return dev_err_probe(dev, err, "unable to init generic GPIO");
- ctrl->gc.ngpio = AIROHA_GPIO_MAX;
- ctrl->gc.owner = THIS_MODULE;
- ctrl->gc.direction_output = airoha_dir_out;
- ctrl->gc.direction_input = airoha_dir_in;
- ctrl->gc.get_direction = airoha_get_dir;
+ ctrl->gen_gc.gc.ngpio = AIROHA_GPIO_MAX;
+ ctrl->gen_gc.gc.owner = THIS_MODULE;
+ ctrl->gen_gc.gc.direction_output = airoha_dir_out;
+ ctrl->gen_gc.gc.direction_input = airoha_dir_in;
+ ctrl->gen_gc.gc.get_direction = airoha_get_dir;
- return devm_gpiochip_add_data(dev, &ctrl->gc, ctrl);
+ return devm_gpiochip_add_data(dev, &ctrl->gen_gc.gc, ctrl);
}
static const struct of_device_id airoha_gpio_of_match[] = {
diff --git a/drivers/gpio/gpio-ep93xx.c b/drivers/gpio/gpio-ep93xx.c
index ab798c848215..1f56e44ffc9a 100644
--- a/drivers/gpio/gpio-ep93xx.c
+++ b/drivers/gpio/gpio-ep93xx.c
@@ -9,16 +9,17 @@
* linux/arch/arm/mach-ep93xx/core.c
*/
+#include <linux/bitops.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
-#include <linux/slab.h>
-#include <linux/gpio/driver.h>
-#include <linux/bitops.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
#include <linux/seq_file.h>
+#include <linux/slab.h>
struct ep93xx_gpio_irq_chip {
void __iomem *base;
@@ -31,11 +32,14 @@ struct ep93xx_gpio_irq_chip {
struct ep93xx_gpio_chip {
void __iomem *base;
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
struct ep93xx_gpio_irq_chip *eic;
};
-#define to_ep93xx_gpio_chip(x) container_of(x, struct ep93xx_gpio_chip, gc)
+static struct ep93xx_gpio_chip *to_ep93xx_gpio_chip(struct gpio_chip *gc)
+{
+ return container_of(to_gpio_generic_chip(gc), struct ep93xx_gpio_chip, chip);
+}
static struct ep93xx_gpio_irq_chip *to_ep93xx_gpio_irq_chip(struct gpio_chip *gc)
{
@@ -249,7 +253,7 @@ static void ep93xx_irq_print_chip(struct irq_data *data, struct seq_file *p)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
- seq_printf(p, dev_name(gc->parent));
+ seq_puts(p, dev_name(gc->parent));
}
static const struct irq_chip gpio_eic_irq_chip = {
@@ -267,7 +271,7 @@ static const struct irq_chip gpio_eic_irq_chip = {
static int ep93xx_setup_irqs(struct platform_device *pdev,
struct ep93xx_gpio_chip *egc)
{
- struct gpio_chip *gc = &egc->gc;
+ struct gpio_chip *gc = &egc->chip.gc;
struct device *dev = &pdev->dev;
struct gpio_irq_chip *girq = &gc->irq;
int ret, irq, i;
@@ -327,6 +331,7 @@ static int ep93xx_setup_irqs(struct platform_device *pdev,
static int ep93xx_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct ep93xx_gpio_chip *egc;
struct gpio_chip *gc;
void __iomem *data;
@@ -345,8 +350,16 @@ static int ep93xx_gpio_probe(struct platform_device *pdev)
if (IS_ERR(dir))
return PTR_ERR(dir);
- gc = &egc->gc;
- ret = bgpio_init(gc, &pdev->dev, 1, data, NULL, NULL, dir, NULL, 0);
+ gc = &egc->chip.gc;
+
+ config = (struct gpio_generic_chip_config) {
+ .dev = &pdev->dev,
+ .sz = 1,
+ .dat = data,
+ .dirout = dir,
+ };
+
+ ret = gpio_generic_chip_init(&egc->chip, &config);
if (ret)
return dev_err_probe(&pdev->dev, ret, "unable to init generic GPIO\n");
diff --git a/drivers/gpio/gpio-exar.c b/drivers/gpio/gpio-exar.c
index 5170fe7599cd..9053662f1817 100644
--- a/drivers/gpio/gpio-exar.c
+++ b/drivers/gpio/gpio-exar.c
@@ -93,17 +93,19 @@ static int exar_get_value(struct gpio_chip *chip, unsigned int offset)
return !!(regmap_test_bits(exar_gpio->regmap, addr, BIT(bit)));
}
-static void exar_set_value(struct gpio_chip *chip, unsigned int offset,
- int value)
+static int exar_set_value(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct exar_gpio_chip *exar_gpio = gpiochip_get_data(chip);
unsigned int addr = exar_offset_to_lvl_addr(exar_gpio, offset);
unsigned int bit = exar_offset_to_bit(exar_gpio, offset);
+ unsigned int bit_value = value ? BIT(bit) : 0;
- if (value)
- regmap_set_bits(exar_gpio->regmap, addr, BIT(bit));
- else
- regmap_clear_bits(exar_gpio->regmap, addr, BIT(bit));
+ /*
+ * regmap_write_bits() forces value to be written when an external
+ * pull up/down might otherwise indicate value was already set.
+ */
+ return regmap_write_bits(exar_gpio->regmap, addr, BIT(bit), bit_value);
}
static int exar_direction_output(struct gpio_chip *chip, unsigned int offset,
@@ -112,11 +114,13 @@ static int exar_direction_output(struct gpio_chip *chip, unsigned int offset,
struct exar_gpio_chip *exar_gpio = gpiochip_get_data(chip);
unsigned int addr = exar_offset_to_sel_addr(exar_gpio, offset);
unsigned int bit = exar_offset_to_bit(exar_gpio, offset);
+ int ret;
- exar_set_value(chip, offset, value);
- regmap_clear_bits(exar_gpio->regmap, addr, BIT(bit));
+ ret = exar_set_value(chip, offset, value);
+ if (ret)
+ return ret;
- return 0;
+ return regmap_clear_bits(exar_gpio->regmap, addr, BIT(bit));
}
static int exar_direction_input(struct gpio_chip *chip, unsigned int offset)
diff --git a/drivers/gpio/gpio-f7188x.c b/drivers/gpio/gpio-f7188x.c
index 3875fd940ccb..4d5b927ad70f 100644
--- a/drivers/gpio/gpio-f7188x.c
+++ b/drivers/gpio/gpio-f7188x.c
@@ -159,7 +159,8 @@ static int f7188x_gpio_direction_in(struct gpio_chip *chip, unsigned offset);
static int f7188x_gpio_get(struct gpio_chip *chip, unsigned offset);
static int f7188x_gpio_direction_out(struct gpio_chip *chip,
unsigned offset, int value);
-static void f7188x_gpio_set(struct gpio_chip *chip, unsigned offset, int value);
+static int f7188x_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value);
static int f7188x_gpio_set_config(struct gpio_chip *chip, unsigned offset,
unsigned long config);
@@ -391,7 +392,8 @@ static int f7188x_gpio_direction_out(struct gpio_chip *chip,
return 0;
}
-static void f7188x_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int f7188x_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
int err;
struct f7188x_gpio_bank *bank = gpiochip_get_data(chip);
@@ -400,7 +402,8 @@ static void f7188x_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
err = superio_enter(sio->addr);
if (err)
- return;
+ return err;
+
superio_select(sio->addr, sio->device);
data_out = superio_inb(sio->addr, f7188x_gpio_data_out(bank->regbase));
@@ -411,6 +414,8 @@ static void f7188x_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
superio_outb(sio->addr, f7188x_gpio_data_out(bank->regbase), data_out);
superio_exit(sio->addr);
+
+ return 0;
}
static int f7188x_gpio_set_config(struct gpio_chip *chip, unsigned offset,
diff --git a/drivers/gpio/gpio-ftgpio010.c b/drivers/gpio/gpio-ftgpio010.c
index 97d345b59352..11e6907c3b54 100644
--- a/drivers/gpio/gpio-ftgpio010.c
+++ b/drivers/gpio/gpio-ftgpio010.c
@@ -10,12 +10,14 @@
* MXC GPIO support. (c) 2008 Daniel Mack <daniel@caiaq.de>
* Copyright 2008 Juergen Beisert, kernel@pengutronix.de
*/
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
#include <linux/gpio/driver.h>
-#include <linux/io.h>
+#include <linux/gpio/generic.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/platform_device.h>
-#include <linux/bitops.h>
-#include <linux/clk.h>
/* GPIO registers definition */
#define GPIO_DATA_OUT 0x00
@@ -40,13 +42,13 @@
/**
* struct ftgpio_gpio - Gemini GPIO state container
* @dev: containing device for this instance
- * @gc: gpiochip for this instance
+ * @chip: generic GPIO chip for this instance
* @base: remapped I/O-memory base
* @clk: silicon clock
*/
struct ftgpio_gpio {
struct device *dev;
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
void __iomem *base;
struct clk *clk;
};
@@ -233,6 +235,7 @@ static const struct irq_chip ftgpio_irq_chip = {
static int ftgpio_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct device *dev = &pdev->dev;
struct ftgpio_gpio *g;
struct gpio_irq_chip *girq;
@@ -253,50 +256,46 @@ static int ftgpio_gpio_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- g->clk = devm_clk_get(dev, NULL);
- if (!IS_ERR(g->clk)) {
- ret = clk_prepare_enable(g->clk);
- if (ret)
- return ret;
- } else if (PTR_ERR(g->clk) == -EPROBE_DEFER) {
+ g->clk = devm_clk_get_enabled(dev, NULL);
+ if (IS_ERR(g->clk) && PTR_ERR(g->clk) == -EPROBE_DEFER)
/*
* Percolate deferrals, for anything else,
* just live without the clocking.
*/
return PTR_ERR(g->clk);
- }
- ret = bgpio_init(&g->gc, dev, 4,
- g->base + GPIO_DATA_IN,
- g->base + GPIO_DATA_SET,
- g->base + GPIO_DATA_CLR,
- g->base + GPIO_DIR,
- NULL,
- 0);
- if (ret) {
- dev_err(dev, "unable to init generic GPIO\n");
- goto dis_clk;
- }
- g->gc.label = dev_name(dev);
- g->gc.base = -1;
- g->gc.parent = dev;
- g->gc.owner = THIS_MODULE;
- /* ngpio is set by bgpio_init() */
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = g->base + GPIO_DATA_IN,
+ .set = g->base + GPIO_DATA_SET,
+ .clr = g->base + GPIO_DATA_CLR,
+ .dirout = g->base + GPIO_DIR,
+ };
+
+ ret = gpio_generic_chip_init(&g->chip, &config);
+ if (ret)
+ return dev_err_probe(dev, ret, "unable to init generic GPIO\n");
+
+ g->chip.gc.label = dev_name(dev);
+ g->chip.gc.base = -1;
+ g->chip.gc.parent = dev;
+ g->chip.gc.owner = THIS_MODULE;
+ /* ngpio is set by gpio_generic_chip_init() */
/* We need a silicon clock to do debounce */
if (!IS_ERR(g->clk))
- g->gc.set_config = ftgpio_gpio_set_config;
+ g->chip.gc.set_config = ftgpio_gpio_set_config;
- girq = &g->gc.irq;
+ girq = &g->chip.gc.irq;
gpio_irq_chip_set_chip(girq, &ftgpio_irq_chip);
girq->parent_handler = ftgpio_gpio_irq_handler;
girq->num_parents = 1;
girq->parents = devm_kcalloc(dev, 1, sizeof(*girq->parents),
GFP_KERNEL);
- if (!girq->parents) {
- ret = -ENOMEM;
- goto dis_clk;
- }
+ if (!girq->parents)
+ return -ENOMEM;
+
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_bad_irq;
girq->parents[0] = irq;
@@ -309,26 +308,7 @@ static int ftgpio_gpio_probe(struct platform_device *pdev)
/* Clear any use of debounce */
writel(0x0, g->base + GPIO_DEBOUNCE_EN);
- ret = devm_gpiochip_add_data(dev, &g->gc, g);
- if (ret)
- goto dis_clk;
-
- platform_set_drvdata(pdev, g);
- dev_info(dev, "FTGPIO010 @%p registered\n", g->base);
-
- return 0;
-
-dis_clk:
- clk_disable_unprepare(g->clk);
-
- return ret;
-}
-
-static void ftgpio_gpio_remove(struct platform_device *pdev)
-{
- struct ftgpio_gpio *g = platform_get_drvdata(pdev);
-
- clk_disable_unprepare(g->clk);
+ return devm_gpiochip_add_data(dev, &g->chip.gc, g);
}
static const struct of_device_id ftgpio_gpio_of_match[] = {
@@ -350,6 +330,5 @@ static struct platform_driver ftgpio_gpio_driver = {
.of_match_table = ftgpio_gpio_of_match,
},
.probe = ftgpio_gpio_probe,
- .remove_new = ftgpio_gpio_remove,
};
builtin_platform_driver(ftgpio_gpio_driver);
diff --git a/drivers/gpio/gpio-ge.c b/drivers/gpio/gpio-ge.c
index 5dc49648d8e3..66bdff36eb61 100644
--- a/drivers/gpio/gpio-ge.c
+++ b/drivers/gpio/gpio-ge.c
@@ -16,6 +16,7 @@
*/
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mod_devicetable.h>
@@ -51,24 +52,36 @@ MODULE_DEVICE_TABLE(of, gef_gpio_ids);
static int __init gef_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct device *dev = &pdev->dev;
+ struct gpio_generic_chip *chip;
struct gpio_chip *gc;
void __iomem *regs;
int ret;
- gc = devm_kzalloc(dev, sizeof(*gc), GFP_KERNEL);
- if (!gc)
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
return -ENOMEM;
regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);
- ret = bgpio_init(gc, dev, 4, regs + GEF_GPIO_IN, regs + GEF_GPIO_OUT,
- NULL, NULL, regs + GEF_GPIO_DIRECT,
- BGPIOF_BIG_ENDIAN_BYTE_ORDER);
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = regs + GEF_GPIO_IN,
+ .set = regs + GEF_GPIO_OUT,
+ .dirin = regs + GEF_GPIO_DIRECT,
+ .flags = GPIO_GENERIC_BIG_ENDIAN_BYTE_ORDER,
+ };
+
+ ret = gpio_generic_chip_init(chip, &config);
if (ret)
- return dev_err_probe(dev, ret, "bgpio_init failed\n");
+ return dev_err_probe(dev, ret,
+ "failed to initialize the generic GPIO chip\n");
+
+ gc = &chip->gc;
/* Setup pointers to chip functions */
gc->label = devm_kasprintf(dev, GFP_KERNEL, "%pfw", dev_fwnode(dev));
diff --git a/drivers/gpio/gpio-gpio-mm.c b/drivers/gpio/gpio-gpio-mm.c
index 43d823a56e59..fb7c510bf2fa 100644
--- a/drivers/gpio/gpio-gpio-mm.c
+++ b/drivers/gpio/gpio-gpio-mm.c
@@ -18,7 +18,7 @@
#include "gpio-i8255.h"
-MODULE_IMPORT_NS(I8255);
+MODULE_IMPORT_NS("I8255");
#define GPIOMM_EXTENT 8
#define MAX_NUM_GPIOMM max_num_isa_dev(GPIOMM_EXTENT)
diff --git a/drivers/gpio/gpio-graniterapids.c b/drivers/gpio/gpio-graniterapids.c
index f2e911a3d2ca..121bf29a27f5 100644
--- a/drivers/gpio/gpio-graniterapids.c
+++ b/drivers/gpio/gpio-graniterapids.c
@@ -32,12 +32,14 @@
#define GNR_PINS_PER_REG 32
#define GNR_NUM_REGS DIV_ROUND_UP(GNR_NUM_PINS, GNR_PINS_PER_REG)
-#define GNR_CFG_BAR 0x00
+#define GNR_CFG_PADBAR 0x00
#define GNR_CFG_LOCK_OFFSET 0x04
-#define GNR_GPI_STATUS_OFFSET 0x20
+#define GNR_GPI_STATUS_OFFSET 0x14
#define GNR_GPI_ENABLE_OFFSET 0x24
-#define GNR_CFG_DW_RX_MASK GENMASK(25, 22)
+#define GNR_CFG_DW_HOSTSW_MODE BIT(27)
+#define GNR_CFG_DW_RX_MASK GENMASK(23, 22)
+#define GNR_CFG_DW_INTSEL_MASK GENMASK(21, 14)
#define GNR_CFG_DW_RX_DISABLE FIELD_PREP(GNR_CFG_DW_RX_MASK, 2)
#define GNR_CFG_DW_RX_EDGE FIELD_PREP(GNR_CFG_DW_RX_MASK, 1)
#define GNR_CFG_DW_RX_LEVEL FIELD_PREP(GNR_CFG_DW_RX_MASK, 0)
@@ -50,6 +52,7 @@
* struct gnr_gpio - Intel Granite Rapids-D vGPIO driver state
* @gc: GPIO controller interface
* @reg_base: base address of the GPIO registers
+ * @pad_base: base address of the vGPIO pad configuration registers
* @ro_bitmap: bitmap of read-only pins
* @lock: guard the registers
* @pad_backup: backup of the register state for suspend
@@ -57,6 +60,7 @@
struct gnr_gpio {
struct gpio_chip gc;
void __iomem *reg_base;
+ void __iomem *pad_base;
DECLARE_BITMAP(ro_bitmap, GNR_NUM_PINS);
raw_spinlock_t lock;
u32 pad_backup[];
@@ -65,7 +69,7 @@ struct gnr_gpio {
static void __iomem *gnr_gpio_get_padcfg_addr(const struct gnr_gpio *priv,
unsigned int gpio)
{
- return priv->reg_base + gpio * sizeof(u32);
+ return priv->pad_base + gpio * sizeof(u32);
}
static int gnr_gpio_configure_line(struct gpio_chip *gc, unsigned int gpio,
@@ -88,6 +92,20 @@ static int gnr_gpio_configure_line(struct gpio_chip *gc, unsigned int gpio,
return 0;
}
+static int gnr_gpio_request(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct gnr_gpio *priv = gpiochip_get_data(gc);
+ u32 dw;
+
+ dw = readl(gnr_gpio_get_padcfg_addr(priv, gpio));
+ if (!(dw & GNR_CFG_DW_HOSTSW_MODE)) {
+ dev_warn(gc->parent, "GPIO %u is not owned by host", gpio);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
static int gnr_gpio_get(struct gpio_chip *gc, unsigned int gpio)
{
const struct gnr_gpio *priv = gpiochip_get_data(gc);
@@ -98,7 +116,7 @@ static int gnr_gpio_get(struct gpio_chip *gc, unsigned int gpio)
return !!(dw & GNR_CFG_DW_RXSTATE);
}
-static void gnr_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value)
+static int gnr_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value)
{
u32 clear = 0;
u32 set = 0;
@@ -108,7 +126,7 @@ static void gnr_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value)
else
clear = GNR_CFG_DW_TXSTATE;
- gnr_gpio_configure_line(gc, gpio, clear, set);
+ return gnr_gpio_configure_line(gc, gpio, clear, set);
}
static int gnr_gpio_get_direction(struct gpio_chip *gc, unsigned int gpio)
@@ -139,6 +157,7 @@ static int gnr_gpio_direction_output(struct gpio_chip *gc, unsigned int gpio, in
static const struct gpio_chip gnr_gpio_chip = {
.owner = THIS_MODULE,
+ .request = gnr_gpio_request,
.get = gnr_gpio_get,
.set = gnr_gpio_set,
.get_direction = gnr_gpio_get_direction,
@@ -166,7 +185,7 @@ static void gnr_gpio_irq_ack(struct irq_data *d)
guard(raw_spinlock_irqsave)(&priv->lock);
reg = readl(addr);
- reg &= ~BIT(bit_idx);
+ reg |= BIT(bit_idx);
writel(reg, addr);
}
@@ -209,10 +228,18 @@ static void gnr_gpio_irq_unmask(struct irq_data *d)
static int gnr_gpio_irq_set_type(struct irq_data *d, unsigned int type)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- irq_hw_number_t pin = irqd_to_hwirq(d);
- u32 mask = GNR_CFG_DW_RX_MASK;
+ struct gnr_gpio *priv = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ u32 reg;
u32 set;
+ /* Allow interrupts only if Interrupt Select field is non-zero */
+ reg = readl(gnr_gpio_get_padcfg_addr(priv, hwirq));
+ if (!(reg & GNR_CFG_DW_INTSEL_MASK)) {
+ dev_dbg(gc->parent, "GPIO %lu cannot be used as IRQ", hwirq);
+ return -EPERM;
+ }
+
/* Falling edge and level low triggers not supported by the GPIO controller */
switch (type) {
case IRQ_TYPE_NONE:
@@ -230,10 +257,11 @@ static int gnr_gpio_irq_set_type(struct irq_data *d, unsigned int type)
return -EINVAL;
}
- return gnr_gpio_configure_line(gc, pin, mask, set);
+ return gnr_gpio_configure_line(gc, hwirq, GNR_CFG_DW_RX_MASK, set);
}
static const struct irq_chip gnr_gpio_irq_chip = {
+ .name = "gpio-graniterapids",
.irq_ack = gnr_gpio_irq_ack,
.irq_mask = gnr_gpio_irq_mask,
.irq_unmask = gnr_gpio_irq_unmask,
@@ -291,6 +319,7 @@ static int gnr_gpio_probe(struct platform_device *pdev)
struct gnr_gpio *priv;
void __iomem *regs;
int irq, ret;
+ u32 offset;
priv = devm_kzalloc(dev, struct_size(priv, pad_backup, num_backup_pins), GFP_KERNEL);
if (!priv)
@@ -302,6 +331,10 @@ static int gnr_gpio_probe(struct platform_device *pdev)
if (IS_ERR(regs))
return PTR_ERR(regs);
+ priv->reg_base = regs;
+ offset = readl(priv->reg_base + GNR_CFG_PADBAR);
+ priv->pad_base = priv->reg_base + offset;
+
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
@@ -311,8 +344,6 @@ static int gnr_gpio_probe(struct platform_device *pdev)
if (ret)
return dev_err_probe(dev, ret, "failed to request interrupt\n");
- priv->reg_base = regs + readl(regs + GNR_CFG_BAR);
-
gnr_gpio_init_pin_ro_bits(dev, priv->reg_base + GNR_CFG_LOCK_OFFSET,
priv->ro_bitmap);
@@ -324,7 +355,6 @@ static int gnr_gpio_probe(struct platform_device *pdev)
girq = &priv->gc.irq;
gpio_irq_chip_set_chip(girq, &gnr_gpio_irq_chip);
- girq->chip->name = dev_name(dev);
girq->parent_handler = NULL;
girq->num_parents = 0;
girq->parents = NULL;
diff --git a/drivers/gpio/gpio-grgpio.c b/drivers/gpio/gpio-grgpio.c
index 017c7170eb57..0c0f97fa14fc 100644
--- a/drivers/gpio/gpio-grgpio.c
+++ b/drivers/gpio/gpio-grgpio.c
@@ -16,20 +16,22 @@
* Contributors: Andreas Larsson <andreas@gaisler.com>
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/spinlock.h>
-#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/gpio/driver.h>
-#include <linux/slab.h>
+#include <linux/bitops.h>
#include <linux/err.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
+#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
-#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string_choices.h>
#define GRGPIO_MAX_NGPIO 32
@@ -58,7 +60,7 @@ struct grgpio_lirq {
};
struct grgpio_priv {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
void __iomem *regs;
struct device *dev;
@@ -90,13 +92,12 @@ struct grgpio_priv {
static void grgpio_set_imask(struct grgpio_priv *priv, unsigned int offset,
int val)
{
- struct gpio_chip *gc = &priv->gc;
-
if (val)
priv->imask |= BIT(offset);
else
priv->imask &= ~BIT(offset);
- gc->write_reg(priv->regs + GRGPIO_IMASK, priv->imask);
+
+ gpio_generic_write_reg(&priv->chip, priv->regs + GRGPIO_IMASK, priv->imask);
}
static int grgpio_to_irq(struct gpio_chip *gc, unsigned offset)
@@ -117,7 +118,6 @@ static int grgpio_to_irq(struct gpio_chip *gc, unsigned offset)
static int grgpio_irq_set_type(struct irq_data *d, unsigned int type)
{
struct grgpio_priv *priv = irq_data_get_irq_chip_data(d);
- unsigned long flags;
u32 mask = BIT(d->hwirq);
u32 ipol;
u32 iedge;
@@ -145,15 +145,13 @@ static int grgpio_irq_set_type(struct irq_data *d, unsigned int type)
return -EINVAL;
}
- raw_spin_lock_irqsave(&priv->gc.bgpio_lock, flags);
-
- ipol = priv->gc.read_reg(priv->regs + GRGPIO_IPOL) & ~mask;
- iedge = priv->gc.read_reg(priv->regs + GRGPIO_IEDGE) & ~mask;
+ guard(gpio_generic_lock_irqsave)(&priv->chip);
- priv->gc.write_reg(priv->regs + GRGPIO_IPOL, ipol | pol);
- priv->gc.write_reg(priv->regs + GRGPIO_IEDGE, iedge | edge);
+ ipol = gpio_generic_read_reg(&priv->chip, priv->regs + GRGPIO_IPOL) & ~mask;
+ iedge = gpio_generic_read_reg(&priv->chip, priv->regs + GRGPIO_IEDGE) & ~mask;
- raw_spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
+ gpio_generic_write_reg(&priv->chip, priv->regs + GRGPIO_IPOL, ipol | pol);
+ gpio_generic_write_reg(&priv->chip, priv->regs + GRGPIO_IEDGE, iedge | edge);
return 0;
}
@@ -162,44 +160,42 @@ static void grgpio_irq_mask(struct irq_data *d)
{
struct grgpio_priv *priv = irq_data_get_irq_chip_data(d);
int offset = d->hwirq;
- unsigned long flags;
- raw_spin_lock_irqsave(&priv->gc.bgpio_lock, flags);
+ scoped_guard(gpio_generic_lock_irqsave, &priv->chip)
+ grgpio_set_imask(priv, offset, 0);
- grgpio_set_imask(priv, offset, 0);
-
- raw_spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
+ gpiochip_disable_irq(&priv->chip.gc, d->hwirq);
}
static void grgpio_irq_unmask(struct irq_data *d)
{
struct grgpio_priv *priv = irq_data_get_irq_chip_data(d);
int offset = d->hwirq;
- unsigned long flags;
- raw_spin_lock_irqsave(&priv->gc.bgpio_lock, flags);
+ gpiochip_enable_irq(&priv->chip.gc, d->hwirq);
- grgpio_set_imask(priv, offset, 1);
+ guard(gpio_generic_lock_irqsave)(&priv->chip);
- raw_spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
+ grgpio_set_imask(priv, offset, 1);
}
-static struct irq_chip grgpio_irq_chip = {
+static const struct irq_chip grgpio_irq_chip = {
.name = "grgpio",
.irq_mask = grgpio_irq_mask,
.irq_unmask = grgpio_irq_unmask,
.irq_set_type = grgpio_irq_set_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static irqreturn_t grgpio_irq_handler(int irq, void *dev)
{
struct grgpio_priv *priv = dev;
- int ngpio = priv->gc.ngpio;
- unsigned long flags;
+ int ngpio = priv->chip.gc.ngpio;
int i;
int match = 0;
- raw_spin_lock_irqsave(&priv->gc.bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(&priv->chip);
/*
* For each gpio line, call its interrupt handler if it its underlying
@@ -215,8 +211,6 @@ static irqreturn_t grgpio_irq_handler(int irq, void *dev)
}
}
- raw_spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
-
if (!match)
dev_warn(priv->dev, "No gpio line matched irq %d\n", irq);
@@ -247,13 +241,18 @@ static int grgpio_irq_map(struct irq_domain *d, unsigned int irq,
dev_dbg(priv->dev, "Mapping irq %d for gpio line %d\n",
irq, offset);
- raw_spin_lock_irqsave(&priv->gc.bgpio_lock, flags);
+ gpio_generic_chip_lock_irqsave(&priv->chip, flags);
/* Request underlying irq if not already requested */
lirq->irq = irq;
uirq = &priv->uirqs[lirq->index];
if (uirq->refcnt == 0) {
- raw_spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
+ /*
+ * FIXME: This is not how locking works at all, you can't just
+ * release the lock for a moment to do something that can't
+ * sleep...
+ */
+ gpio_generic_chip_unlock_irqrestore(&priv->chip, flags);
ret = request_irq(uirq->uirq, grgpio_irq_handler, 0,
dev_name(priv->dev), priv);
if (ret) {
@@ -262,11 +261,11 @@ static int grgpio_irq_map(struct irq_domain *d, unsigned int irq,
uirq->uirq);
return ret;
}
- raw_spin_lock_irqsave(&priv->gc.bgpio_lock, flags);
+ gpio_generic_chip_lock_irqsave(&priv->chip, flags);
}
uirq->refcnt++;
- raw_spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
+ gpio_generic_chip_unlock_irqrestore(&priv->chip, flags);
/* Setup irq */
irq_set_chip_data(irq, priv);
@@ -284,13 +283,13 @@ static void grgpio_irq_unmap(struct irq_domain *d, unsigned int irq)
struct grgpio_lirq *lirq;
struct grgpio_uirq *uirq;
unsigned long flags;
- int ngpio = priv->gc.ngpio;
+ int ngpio = priv->chip.gc.ngpio;
int i;
irq_set_chip_and_handler(irq, NULL, NULL);
irq_set_chip_data(irq, NULL);
- raw_spin_lock_irqsave(&priv->gc.bgpio_lock, flags);
+ gpio_generic_chip_lock_irqsave(&priv->chip, flags);
/* Free underlying irq if last user unmapped */
index = -1;
@@ -309,13 +308,20 @@ static void grgpio_irq_unmap(struct irq_domain *d, unsigned int irq)
uirq = &priv->uirqs[lirq->index];
uirq->refcnt--;
if (uirq->refcnt == 0) {
- raw_spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
+ gpio_generic_chip_unlock_irqrestore(&priv->chip, flags);
free_irq(uirq->uirq, priv);
return;
}
}
- raw_spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
+ gpio_generic_chip_unlock_irqrestore(&priv->chip, flags);
+}
+
+static void grgpio_irq_domain_remove(void *data)
+{
+ struct irq_domain *domain = data;
+
+ irq_domain_remove(domain);
}
static const struct irq_domain_ops grgpio_irq_domain_ops = {
@@ -328,6 +334,8 @@ static const struct irq_domain_ops grgpio_irq_domain_ops = {
static int grgpio_probe(struct platform_device *ofdev)
{
struct device_node *np = ofdev->dev.of_node;
+ struct gpio_generic_chip_config config;
+ struct device *dev = &ofdev->dev;
void __iomem *regs;
struct gpio_chip *gc;
struct grgpio_priv *priv;
@@ -337,7 +345,7 @@ static int grgpio_probe(struct platform_device *ofdev)
int size;
int i;
- priv = devm_kzalloc(&ofdev->dev, sizeof(*priv), GFP_KERNEL);
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -345,29 +353,39 @@ static int grgpio_probe(struct platform_device *ofdev)
if (IS_ERR(regs))
return PTR_ERR(regs);
- gc = &priv->gc;
- err = bgpio_init(gc, &ofdev->dev, 4, regs + GRGPIO_DATA,
- regs + GRGPIO_OUTPUT, NULL, regs + GRGPIO_DIR, NULL,
- BGPIOF_BIG_ENDIAN_BYTE_ORDER);
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = regs + GRGPIO_DATA,
+ .set = regs + GRGPIO_OUTPUT,
+ .dirout = regs + GRGPIO_DIR,
+ .flags = GPIO_GENERIC_BIG_ENDIAN_BYTE_ORDER,
+ };
+
+ gc = &priv->chip.gc;
+ err = gpio_generic_chip_init(&priv->chip, &config);
if (err) {
- dev_err(&ofdev->dev, "bgpio_init() failed\n");
+ dev_err(dev, "failed to initialize the generic GPIO chip\n");
return err;
}
priv->regs = regs;
- priv->imask = gc->read_reg(regs + GRGPIO_IMASK);
- priv->dev = &ofdev->dev;
+ priv->imask = gpio_generic_read_reg(&priv->chip, regs + GRGPIO_IMASK);
+ priv->dev = dev;
gc->owner = THIS_MODULE;
gc->to_irq = grgpio_to_irq;
- gc->label = devm_kasprintf(&ofdev->dev, GFP_KERNEL, "%pOF", np);
+ gc->label = devm_kasprintf(dev, GFP_KERNEL, "%pOF", np);
+ if (!gc->label)
+ return -ENOMEM;
+
gc->base = -1;
err = of_property_read_u32(np, "nbits", &prop);
if (err || prop <= 0 || prop > GRGPIO_MAX_NGPIO) {
gc->ngpio = GRGPIO_MAX_NGPIO;
- dev_dbg(&ofdev->dev,
- "No or invalid nbits property: assume %d\n", gc->ngpio);
+ dev_dbg(dev, "No or invalid nbits property: assume %d\n",
+ gc->ngpio);
} else {
gc->ngpio = prop;
}
@@ -379,20 +397,24 @@ static int grgpio_probe(struct platform_device *ofdev)
irqmap = (s32 *)of_get_property(np, "irqmap", &size);
if (irqmap) {
if (size < gc->ngpio) {
- dev_err(&ofdev->dev,
+ dev_err(dev,
"irqmap shorter than ngpio (%d < %d)\n",
size, gc->ngpio);
return -EINVAL;
}
- priv->domain = irq_domain_add_linear(np, gc->ngpio,
- &grgpio_irq_domain_ops,
- priv);
+ priv->domain = irq_domain_create_linear(dev_fwnode(&ofdev->dev), gc->ngpio,
+ &grgpio_irq_domain_ops, priv);
if (!priv->domain) {
- dev_err(&ofdev->dev, "Could not add irq domain\n");
+ dev_err(dev, "Could not add irq domain\n");
return -EINVAL;
}
+ err = devm_add_action_or_reset(dev, grgpio_irq_domain_remove,
+ priv->domain);
+ if (err)
+ return err;
+
for (i = 0; i < gc->ngpio; i++) {
struct grgpio_lirq *lirq;
int ret;
@@ -415,32 +437,18 @@ static int grgpio_probe(struct platform_device *ofdev)
}
}
- platform_set_drvdata(ofdev, priv);
-
- err = gpiochip_add_data(gc, priv);
+ err = devm_gpiochip_add_data(dev, gc, priv);
if (err) {
- dev_err(&ofdev->dev, "Could not add gpiochip\n");
- if (priv->domain)
- irq_domain_remove(priv->domain);
+ dev_err(dev, "Could not add gpiochip\n");
return err;
}
- dev_info(&ofdev->dev, "regs=0x%p, base=%d, ngpio=%d, irqs=%s\n",
- priv->regs, gc->base, gc->ngpio, priv->domain ? "on" : "off");
+ dev_info(dev, "regs=0x%p, base=%d, ngpio=%d, irqs=%s\n",
+ priv->regs, gc->base, gc->ngpio, str_on_off(priv->domain));
return 0;
}
-static void grgpio_remove(struct platform_device *ofdev)
-{
- struct grgpio_priv *priv = platform_get_drvdata(ofdev);
-
- gpiochip_remove(&priv->gc);
-
- if (priv->domain)
- irq_domain_remove(priv->domain);
-}
-
static const struct of_device_id grgpio_match[] = {
{.name = "GAISLER_GPIO"},
{.name = "01_01a"},
@@ -455,7 +463,6 @@ static struct platform_driver grgpio_driver = {
.of_match_table = grgpio_match,
},
.probe = grgpio_probe,
- .remove_new = grgpio_remove,
};
module_platform_driver(grgpio_driver);
diff --git a/drivers/gpio/gpio-gw-pld.c b/drivers/gpio/gpio-gw-pld.c
index 7e29a2d8de1a..2e5d97b7363f 100644
--- a/drivers/gpio/gpio-gw-pld.c
+++ b/drivers/gpio/gpio-gw-pld.c
@@ -62,9 +62,9 @@ static int gw_pld_output8(struct gpio_chip *gc, unsigned offset, int value)
return i2c_smbus_write_byte(gw->client, gw->out);
}
-static void gw_pld_set8(struct gpio_chip *gc, unsigned offset, int value)
+static int gw_pld_set8(struct gpio_chip *gc, unsigned int offset, int value)
{
- gw_pld_output8(gc, offset, value);
+ return gw_pld_output8(gc, offset, value);
}
static int gw_pld_probe(struct i2c_client *client)
diff --git a/drivers/gpio/gpio-hisi.c b/drivers/gpio/gpio-hisi.c
index ef5cc654a24e..d26298c8351b 100644
--- a/drivers/gpio/gpio-hisi.c
+++ b/drivers/gpio/gpio-hisi.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2020 HiSilicon Limited. */
+
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
@@ -33,7 +35,7 @@
#define HISI_GPIO_DRIVER_NAME "gpio-hisi"
struct hisi_gpio {
- struct gpio_chip chip;
+ struct gpio_generic_chip chip;
struct device *dev;
void __iomem *reg_base;
unsigned int line_num;
@@ -43,8 +45,8 @@ struct hisi_gpio {
static inline u32 hisi_gpio_read_reg(struct gpio_chip *chip,
unsigned int off)
{
- struct hisi_gpio *hisi_gpio =
- container_of(chip, struct hisi_gpio, chip);
+ struct hisi_gpio *hisi_gpio = container_of(to_gpio_generic_chip(chip),
+ struct hisi_gpio, chip);
void __iomem *reg = hisi_gpio->reg_base + off;
return readl(reg);
@@ -53,8 +55,8 @@ static inline u32 hisi_gpio_read_reg(struct gpio_chip *chip,
static inline void hisi_gpio_write_reg(struct gpio_chip *chip,
unsigned int off, u32 val)
{
- struct hisi_gpio *hisi_gpio =
- container_of(chip, struct hisi_gpio, chip);
+ struct hisi_gpio *hisi_gpio = container_of(to_gpio_generic_chip(chip),
+ struct hisi_gpio, chip);
void __iomem *reg = hisi_gpio->reg_base + off;
writel(val, reg);
@@ -180,14 +182,14 @@ static void hisi_gpio_irq_disable(struct irq_data *d)
static void hisi_gpio_irq_handler(struct irq_desc *desc)
{
struct hisi_gpio *hisi_gpio = irq_desc_get_handler_data(desc);
- unsigned long irq_msk = hisi_gpio_read_reg(&hisi_gpio->chip,
+ unsigned long irq_msk = hisi_gpio_read_reg(&hisi_gpio->chip.gc,
HISI_GPIO_INTSTATUS_WX);
struct irq_chip *irq_c = irq_desc_get_chip(desc);
int hwirq;
chained_irq_enter(irq_c, desc);
for_each_set_bit(hwirq, &irq_msk, HISI_GPIO_LINE_NUM_MAX)
- generic_handle_domain_irq(hisi_gpio->chip.irq.domain,
+ generic_handle_domain_irq(hisi_gpio->chip.gc.irq.domain,
hwirq);
chained_irq_exit(irq_c, desc);
}
@@ -206,7 +208,7 @@ static const struct irq_chip hisi_gpio_irq_chip = {
static void hisi_gpio_init_irq(struct hisi_gpio *hisi_gpio)
{
- struct gpio_chip *chip = &hisi_gpio->chip;
+ struct gpio_chip *chip = &hisi_gpio->chip.gc;
struct gpio_irq_chip *girq_chip = &chip->irq;
gpio_irq_chip_set_chip(girq_chip, &hisi_gpio_irq_chip);
@@ -264,6 +266,7 @@ static void hisi_gpio_get_pdata(struct device *dev,
static int hisi_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct device *dev = &pdev->dev;
struct hisi_gpio *hisi_gpio;
int port_num;
@@ -289,27 +292,32 @@ static int hisi_gpio_probe(struct platform_device *pdev)
hisi_gpio->dev = dev;
- ret = bgpio_init(&hisi_gpio->chip, hisi_gpio->dev, 0x4,
- hisi_gpio->reg_base + HISI_GPIO_EXT_PORT_WX,
- hisi_gpio->reg_base + HISI_GPIO_SWPORT_DR_SET_WX,
- hisi_gpio->reg_base + HISI_GPIO_SWPORT_DR_CLR_WX,
- hisi_gpio->reg_base + HISI_GPIO_SWPORT_DDR_SET_WX,
- hisi_gpio->reg_base + HISI_GPIO_SWPORT_DDR_CLR_WX,
- BGPIOF_NO_SET_ON_INPUT);
+ config = (struct gpio_generic_chip_config) {
+ .dev = hisi_gpio->dev,
+ .sz = 4,
+ .dat = hisi_gpio->reg_base + HISI_GPIO_EXT_PORT_WX,
+ .set = hisi_gpio->reg_base + HISI_GPIO_SWPORT_DR_SET_WX,
+ .clr = hisi_gpio->reg_base + HISI_GPIO_SWPORT_DR_CLR_WX,
+ .dirout = hisi_gpio->reg_base + HISI_GPIO_SWPORT_DDR_SET_WX,
+ .dirin = hisi_gpio->reg_base + HISI_GPIO_SWPORT_DDR_CLR_WX,
+ .flags = GPIO_GENERIC_NO_SET_ON_INPUT |
+ GPIO_GENERIC_UNREADABLE_REG_DIR,
+ };
+
+ ret = gpio_generic_chip_init(&hisi_gpio->chip, &config);
if (ret) {
dev_err(dev, "failed to init, ret = %d\n", ret);
return ret;
}
- hisi_gpio->chip.set_config = hisi_gpio_set_config;
- hisi_gpio->chip.ngpio = hisi_gpio->line_num;
- hisi_gpio->chip.bgpio_dir_unreadable = 1;
- hisi_gpio->chip.base = -1;
+ hisi_gpio->chip.gc.set_config = hisi_gpio_set_config;
+ hisi_gpio->chip.gc.ngpio = hisi_gpio->line_num;
+ hisi_gpio->chip.gc.base = -1;
if (hisi_gpio->irq > 0)
hisi_gpio_init_irq(hisi_gpio);
- ret = devm_gpiochip_add_data(dev, &hisi_gpio->chip, hisi_gpio);
+ ret = devm_gpiochip_add_data(dev, &hisi_gpio->chip.gc, hisi_gpio);
if (ret) {
dev_err(dev, "failed to register gpiochip, ret = %d\n", ret);
return ret;
diff --git a/drivers/gpio/gpio-hlwd.c b/drivers/gpio/gpio-hlwd.c
index 1bcfc1835dae..043ce5ef3b07 100644
--- a/drivers/gpio/gpio-hlwd.c
+++ b/drivers/gpio/gpio-hlwd.c
@@ -6,6 +6,7 @@
// Nintendo Wii (Hollywood) GPIO driver
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -48,7 +49,7 @@
#define HW_GPIO_OWNER 0x3c
struct hlwd_gpio {
- struct gpio_chip gpioc;
+ struct gpio_generic_chip gpioc;
struct device *dev;
void __iomem *regs;
int irq;
@@ -61,45 +62,44 @@ static void hlwd_gpio_irqhandler(struct irq_desc *desc)
struct hlwd_gpio *hlwd =
gpiochip_get_data(irq_desc_get_handler_data(desc));
struct irq_chip *chip = irq_desc_get_chip(desc);
- unsigned long flags;
unsigned long pending;
int hwirq;
u32 emulated_pending;
- raw_spin_lock_irqsave(&hlwd->gpioc.bgpio_lock, flags);
- pending = ioread32be(hlwd->regs + HW_GPIOB_INTFLAG);
- pending &= ioread32be(hlwd->regs + HW_GPIOB_INTMASK);
+ scoped_guard(gpio_generic_lock_irqsave, &hlwd->gpioc) {
+ pending = ioread32be(hlwd->regs + HW_GPIOB_INTFLAG);
+ pending &= ioread32be(hlwd->regs + HW_GPIOB_INTMASK);
- /* Treat interrupts due to edge trigger emulation separately */
- emulated_pending = hlwd->edge_emulation & pending;
- pending &= ~emulated_pending;
- if (emulated_pending) {
- u32 level, rising, falling;
+ /* Treat interrupts due to edge trigger emulation separately */
+ emulated_pending = hlwd->edge_emulation & pending;
+ pending &= ~emulated_pending;
+ if (emulated_pending) {
+ u32 level, rising, falling;
- level = ioread32be(hlwd->regs + HW_GPIOB_INTLVL);
- rising = level & emulated_pending;
- falling = ~level & emulated_pending;
+ level = ioread32be(hlwd->regs + HW_GPIOB_INTLVL);
+ rising = level & emulated_pending;
+ falling = ~level & emulated_pending;
- /* Invert the levels */
- iowrite32be(level ^ emulated_pending,
- hlwd->regs + HW_GPIOB_INTLVL);
+ /* Invert the levels */
+ iowrite32be(level ^ emulated_pending,
+ hlwd->regs + HW_GPIOB_INTLVL);
- /* Ack all emulated-edge interrupts */
- iowrite32be(emulated_pending, hlwd->regs + HW_GPIOB_INTFLAG);
+ /* Ack all emulated-edge interrupts */
+ iowrite32be(emulated_pending, hlwd->regs + HW_GPIOB_INTFLAG);
- /* Signal interrupts only on the correct edge */
- rising &= hlwd->rising_edge;
- falling &= hlwd->falling_edge;
+ /* Signal interrupts only on the correct edge */
+ rising &= hlwd->rising_edge;
+ falling &= hlwd->falling_edge;
- /* Mark emulated interrupts as pending */
- pending |= rising | falling;
+ /* Mark emulated interrupts as pending */
+ pending |= rising | falling;
+ }
}
- raw_spin_unlock_irqrestore(&hlwd->gpioc.bgpio_lock, flags);
chained_irq_enter(chip, desc);
for_each_set_bit(hwirq, &pending, 32)
- generic_handle_domain_irq(hlwd->gpioc.irq.domain, hwirq);
+ generic_handle_domain_irq(hlwd->gpioc.gc.irq.domain, hwirq);
chained_irq_exit(chip, desc);
}
@@ -116,30 +116,29 @@ static void hlwd_gpio_irq_mask(struct irq_data *data)
{
struct hlwd_gpio *hlwd =
gpiochip_get_data(irq_data_get_irq_chip_data(data));
- unsigned long flags;
u32 mask;
- raw_spin_lock_irqsave(&hlwd->gpioc.bgpio_lock, flags);
- mask = ioread32be(hlwd->regs + HW_GPIOB_INTMASK);
- mask &= ~BIT(data->hwirq);
- iowrite32be(mask, hlwd->regs + HW_GPIOB_INTMASK);
- raw_spin_unlock_irqrestore(&hlwd->gpioc.bgpio_lock, flags);
- gpiochip_disable_irq(&hlwd->gpioc, irqd_to_hwirq(data));
+ scoped_guard(gpio_generic_lock_irqsave, &hlwd->gpioc) {
+ mask = ioread32be(hlwd->regs + HW_GPIOB_INTMASK);
+ mask &= ~BIT(data->hwirq);
+ iowrite32be(mask, hlwd->regs + HW_GPIOB_INTMASK);
+ }
+ gpiochip_disable_irq(&hlwd->gpioc.gc, irqd_to_hwirq(data));
}
static void hlwd_gpio_irq_unmask(struct irq_data *data)
{
struct hlwd_gpio *hlwd =
gpiochip_get_data(irq_data_get_irq_chip_data(data));
- unsigned long flags;
u32 mask;
- gpiochip_enable_irq(&hlwd->gpioc, irqd_to_hwirq(data));
- raw_spin_lock_irqsave(&hlwd->gpioc.bgpio_lock, flags);
+ gpiochip_enable_irq(&hlwd->gpioc.gc, irqd_to_hwirq(data));
+
+ guard(gpio_generic_lock_irqsave)(&hlwd->gpioc);
+
mask = ioread32be(hlwd->regs + HW_GPIOB_INTMASK);
mask |= BIT(data->hwirq);
iowrite32be(mask, hlwd->regs + HW_GPIOB_INTMASK);
- raw_spin_unlock_irqrestore(&hlwd->gpioc.bgpio_lock, flags);
}
static void hlwd_gpio_irq_enable(struct irq_data *data)
@@ -173,10 +172,9 @@ static int hlwd_gpio_irq_set_type(struct irq_data *data, unsigned int flow_type)
{
struct hlwd_gpio *hlwd =
gpiochip_get_data(irq_data_get_irq_chip_data(data));
- unsigned long flags;
u32 level;
- raw_spin_lock_irqsave(&hlwd->gpioc.bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(&hlwd->gpioc);
hlwd->edge_emulation &= ~BIT(data->hwirq);
@@ -197,11 +195,9 @@ static int hlwd_gpio_irq_set_type(struct irq_data *data, unsigned int flow_type)
hlwd_gpio_irq_setup_emulation(hlwd, data->hwirq, flow_type);
break;
default:
- raw_spin_unlock_irqrestore(&hlwd->gpioc.bgpio_lock, flags);
return -EINVAL;
}
- raw_spin_unlock_irqrestore(&hlwd->gpioc.bgpio_lock, flags);
return 0;
}
@@ -210,7 +206,7 @@ static void hlwd_gpio_irq_print_chip(struct irq_data *data, struct seq_file *p)
struct hlwd_gpio *hlwd =
gpiochip_get_data(irq_data_get_irq_chip_data(data));
- seq_printf(p, dev_name(hlwd->dev));
+ seq_puts(p, dev_name(hlwd->dev));
}
static const struct irq_chip hlwd_gpio_irq_chip = {
@@ -225,6 +221,7 @@ static const struct irq_chip hlwd_gpio_irq_chip = {
static int hlwd_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct hlwd_gpio *hlwd;
u32 ngpios;
int res;
@@ -244,25 +241,31 @@ static int hlwd_gpio_probe(struct platform_device *pdev)
* systems where the AHBPROT memory firewall hasn't been configured to
* permit PPC access to HW_GPIO_*.
*
- * Note that this has to happen before bgpio_init reads the
- * HW_GPIOB_OUT and HW_GPIOB_DIR, because otherwise it reads the wrong
- * values.
+ * Note that this has to happen before gpio_generic_chip_init() reads
+ * the HW_GPIOB_OUT and HW_GPIOB_DIR, because otherwise it reads the
+ * wrong values.
*/
iowrite32be(0xffffffff, hlwd->regs + HW_GPIO_OWNER);
- res = bgpio_init(&hlwd->gpioc, &pdev->dev, 4,
- hlwd->regs + HW_GPIOB_IN, hlwd->regs + HW_GPIOB_OUT,
- NULL, hlwd->regs + HW_GPIOB_DIR, NULL,
- BGPIOF_BIG_ENDIAN_BYTE_ORDER);
+ config = (struct gpio_generic_chip_config) {
+ .dev = &pdev->dev,
+ .sz = 4,
+ .dat = hlwd->regs + HW_GPIOB_IN,
+ .set = hlwd->regs + HW_GPIOB_OUT,
+ .dirout = hlwd->regs + HW_GPIOB_DIR,
+ .flags = GPIO_GENERIC_BIG_ENDIAN_BYTE_ORDER,
+ };
+
+ res = gpio_generic_chip_init(&hlwd->gpioc, &config);
if (res < 0) {
- dev_warn(&pdev->dev, "bgpio_init failed: %d\n", res);
+ dev_warn(&pdev->dev, "failed to initialize generic GPIO chip: %d\n", res);
return res;
}
res = of_property_read_u32(pdev->dev.of_node, "ngpios", &ngpios);
if (res)
ngpios = 32;
- hlwd->gpioc.ngpio = ngpios;
+ hlwd->gpioc.gc.ngpio = ngpios;
/* Mask and ack all interrupts */
iowrite32be(0, hlwd->regs + HW_GPIOB_INTMASK);
@@ -282,7 +285,7 @@ static int hlwd_gpio_probe(struct platform_device *pdev)
return hlwd->irq;
}
- girq = &hlwd->gpioc.irq;
+ girq = &hlwd->gpioc.gc.irq;
gpio_irq_chip_set_chip(girq, &hlwd_gpio_irq_chip);
girq->parent_handler = hlwd_gpio_irqhandler;
girq->num_parents = 1;
@@ -296,7 +299,7 @@ static int hlwd_gpio_probe(struct platform_device *pdev)
girq->handler = handle_level_irq;
}
- return devm_gpiochip_add_data(&pdev->dev, &hlwd->gpioc, hlwd);
+ return devm_gpiochip_add_data(&pdev->dev, &hlwd->gpioc.gc, hlwd);
}
static const struct of_device_id hlwd_gpio_match[] = {
diff --git a/drivers/gpio/gpio-htc-egpio.c b/drivers/gpio/gpio-htc-egpio.c
index a40bd56673fe..2eaed83214d8 100644
--- a/drivers/gpio/gpio-htc-egpio.c
+++ b/drivers/gpio/gpio-htc-egpio.c
@@ -170,7 +170,7 @@ static int egpio_direction_input(struct gpio_chip *chip, unsigned offset)
* Output pins
*/
-static void egpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int egpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
unsigned long flag;
struct egpio_chip *egpio;
@@ -198,6 +198,8 @@ static void egpio_set(struct gpio_chip *chip, unsigned offset, int value)
egpio->cached_values &= ~(1 << offset);
egpio_writew((egpio->cached_values >> shift) & ei->reg_mask, ei, reg);
spin_unlock_irqrestore(&ei->lock, flag);
+
+ return 0;
}
static int egpio_direction_output(struct gpio_chip *chip,
@@ -206,12 +208,10 @@ static int egpio_direction_output(struct gpio_chip *chip,
struct egpio_chip *egpio;
egpio = gpiochip_get_data(chip);
- if (test_bit(offset, &egpio->is_out)) {
- egpio_set(chip, offset, value);
- return 0;
- } else {
- return -EINVAL;
- }
+ if (test_bit(offset, &egpio->is_out))
+ return egpio_set(chip, offset, value);
+
+ return -EINVAL;
}
static int egpio_get_direction(struct gpio_chip *chip, unsigned offset)
diff --git a/drivers/gpio/gpio-i8255.c b/drivers/gpio/gpio-i8255.c
index 64ab80fc4a1e..953018bfa2b1 100644
--- a/drivers/gpio/gpio-i8255.c
+++ b/drivers/gpio/gpio-i8255.c
@@ -134,7 +134,7 @@ int devm_i8255_regmap_register(struct device *const dev,
return PTR_ERR_OR_ZERO(devm_gpio_regmap_register(dev, &gpio_config));
}
-EXPORT_SYMBOL_NS_GPL(devm_i8255_regmap_register, I8255);
+EXPORT_SYMBOL_NS_GPL(devm_i8255_regmap_register, "I8255");
MODULE_AUTHOR("William Breathitt Gray");
MODULE_DESCRIPTION("Intel 8255 Programmable Peripheral Interface");
diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
index 0be9285efebc..1802c9116ffe 100644
--- a/drivers/gpio/gpio-ich.c
+++ b/drivers/gpio/gpio-ich.c
@@ -175,12 +175,16 @@ static int ichx_gpio_direction_input(struct gpio_chip *gpio, unsigned int nr)
static int ichx_gpio_direction_output(struct gpio_chip *gpio, unsigned int nr,
int val)
{
+ int ret;
+
/* Disable blink hardware which is available for GPIOs from 0 to 31. */
if (nr < 32 && ichx_priv.desc->have_blink)
ichx_write_bit(GPO_BLINK, nr, 0, 0);
/* Set GPIO output value. */
- ichx_write_bit(GPIO_LVL, nr, val, 0);
+ ret = ichx_write_bit(GPIO_LVL, nr, val, 0);
+ if (ret)
+ return ret;
/*
* Try setting pin as an output and verify it worked since many pins
@@ -252,9 +256,9 @@ static int ich6_gpio_request(struct gpio_chip *chip, unsigned int nr)
return ichx_gpio_request(chip, nr);
}
-static void ichx_gpio_set(struct gpio_chip *chip, unsigned int nr, int val)
+static int ichx_gpio_set(struct gpio_chip *chip, unsigned int nr, int val)
{
- ichx_write_bit(GPIO_LVL, nr, val, 0);
+ return ichx_write_bit(GPIO_LVL, nr, val, 0);
}
static void ichx_gpiolib_setup(struct gpio_chip *chip)
diff --git a/drivers/gpio/gpio-idio-16.c b/drivers/gpio/gpio-idio-16.c
index 53b1eb876a12..0103be977c66 100644
--- a/drivers/gpio/gpio-idio-16.c
+++ b/drivers/gpio/gpio-idio-16.c
@@ -3,6 +3,9 @@
* GPIO library for the ACCES IDIO-16 family
* Copyright (C) 2022 William Breathitt Gray
*/
+
+#define DEFAULT_SYMBOL_NAMESPACE "GPIO_IDIO_16"
+
#include <linux/bits.h>
#include <linux/device.h>
#include <linux/err.h>
@@ -14,8 +17,6 @@
#include "gpio-idio-16.h"
-#define DEFAULT_SYMBOL_NAMESPACE GPIO_IDIO_16
-
#define IDIO_16_DAT_BASE 0x0
#define IDIO_16_OUT_BASE IDIO_16_DAT_BASE
#define IDIO_16_IN_BASE (IDIO_16_DAT_BASE + 1)
diff --git a/drivers/gpio/gpio-idt3243x.c b/drivers/gpio/gpio-idt3243x.c
index 00f547d26254..56f1f1e57b69 100644
--- a/drivers/gpio/gpio-idt3243x.c
+++ b/drivers/gpio/gpio-idt3243x.c
@@ -3,6 +3,7 @@
#include <linux/bitops.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
@@ -18,7 +19,7 @@
#define IDT_GPIO_ISTAT 0x0C
struct idt_gpio_ctrl {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
void __iomem *pic;
void __iomem *gpio;
u32 mask_cache;
@@ -37,7 +38,7 @@ static void idt_gpio_dispatch(struct irq_desc *desc)
pending = readl(ctrl->pic + IDT_PIC_IRQ_PEND);
pending &= ~ctrl->mask_cache;
for_each_set_bit(bit, &pending, gc->ngpio) {
- virq = irq_linear_revmap(gc->irq.domain, bit);
+ virq = irq_find_mapping(gc->irq.domain, bit);
if (virq)
generic_handle_irq(virq);
}
@@ -50,14 +51,13 @@ static int idt_gpio_irq_set_type(struct irq_data *d, unsigned int flow_type)
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct idt_gpio_ctrl *ctrl = gpiochip_get_data(gc);
unsigned int sense = flow_type & IRQ_TYPE_SENSE_MASK;
- unsigned long flags;
u32 ilevel;
/* hardware only supports level triggered */
if (sense == IRQ_TYPE_NONE || (sense & IRQ_TYPE_EDGE_BOTH))
return -EINVAL;
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(&ctrl->chip);
ilevel = readl(ctrl->gpio + IDT_GPIO_ILEVEL);
if (sense & IRQ_TYPE_LEVEL_HIGH)
@@ -68,7 +68,6 @@ static int idt_gpio_irq_set_type(struct irq_data *d, unsigned int flow_type)
writel(ilevel, ctrl->gpio + IDT_GPIO_ILEVEL);
irq_set_handler_locked(d, handle_level_irq);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
return 0;
}
@@ -84,14 +83,11 @@ static void idt_gpio_mask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct idt_gpio_ctrl *ctrl = gpiochip_get_data(gc);
- unsigned long flags;
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
-
- ctrl->mask_cache |= BIT(d->hwirq);
- writel(ctrl->mask_cache, ctrl->pic + IDT_PIC_IRQ_MASK);
-
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+ scoped_guard(gpio_generic_lock_irqsave, &ctrl->chip) {
+ ctrl->mask_cache |= BIT(d->hwirq);
+ writel(ctrl->mask_cache, ctrl->pic + IDT_PIC_IRQ_MASK);
+ }
gpiochip_disable_irq(gc, irqd_to_hwirq(d));
}
@@ -100,15 +96,13 @@ static void idt_gpio_unmask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct idt_gpio_ctrl *ctrl = gpiochip_get_data(gc);
- unsigned long flags;
gpiochip_enable_irq(gc, irqd_to_hwirq(d));
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+
+ guard(gpio_generic_lock_irqsave)(&ctrl->chip);
ctrl->mask_cache &= ~BIT(d->hwirq);
writel(ctrl->mask_cache, ctrl->pic + IDT_PIC_IRQ_MASK);
-
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
}
static int idt_gpio_irq_init_hw(struct gpio_chip *gc)
@@ -134,6 +128,7 @@ static const struct irq_chip idt_gpio_irqchip = {
static int idt_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct device *dev = &pdev->dev;
struct gpio_irq_chip *girq;
struct idt_gpio_ctrl *ctrl;
@@ -150,18 +145,24 @@ static int idt_gpio_probe(struct platform_device *pdev)
if (IS_ERR(ctrl->gpio))
return PTR_ERR(ctrl->gpio);
- ctrl->gc.parent = dev;
+ ctrl->chip.gc.parent = dev;
+
+ config = (struct gpio_generic_chip_config) {
+ .dev = &pdev->dev,
+ .sz = 4,
+ .dat = ctrl->gpio + IDT_GPIO_DATA,
+ .dirout = ctrl->gpio + IDT_GPIO_DIR,
+ };
- ret = bgpio_init(&ctrl->gc, &pdev->dev, 4, ctrl->gpio + IDT_GPIO_DATA,
- NULL, NULL, ctrl->gpio + IDT_GPIO_DIR, NULL, 0);
+ ret = gpio_generic_chip_init(&ctrl->chip, &config);
if (ret) {
- dev_err(dev, "bgpio_init failed\n");
+ dev_err(dev, "failed to initialize the generic GPIO chip\n");
return ret;
}
ret = device_property_read_u32(dev, "ngpios", &ngpios);
if (!ret)
- ctrl->gc.ngpio = ngpios;
+ ctrl->chip.gc.ngpio = ngpios;
if (device_property_read_bool(dev, "interrupt-controller")) {
ctrl->pic = devm_platform_ioremap_resource_byname(pdev, "pic");
@@ -172,7 +173,7 @@ static int idt_gpio_probe(struct platform_device *pdev)
if (parent_irq < 0)
return parent_irq;
- girq = &ctrl->gc.irq;
+ girq = &ctrl->chip.gc.irq;
gpio_irq_chip_set_chip(girq, &idt_gpio_irqchip);
girq->init_hw = idt_gpio_irq_init_hw;
girq->parent_handler = idt_gpio_dispatch;
@@ -188,7 +189,7 @@ static int idt_gpio_probe(struct platform_device *pdev)
girq->handler = handle_bad_irq;
}
- return devm_gpiochip_add_data(&pdev->dev, &ctrl->gc, ctrl);
+ return devm_gpiochip_add_data(&pdev->dev, &ctrl->chip.gc, ctrl);
}
static const struct of_device_id idt_gpio_of_match[] = {
diff --git a/drivers/gpio/gpio-imx-scu.c b/drivers/gpio/gpio-imx-scu.c
index 13baf465aedf..0a75afecf9f8 100644
--- a/drivers/gpio/gpio-imx-scu.c
+++ b/drivers/gpio/gpio-imx-scu.c
@@ -6,8 +6,10 @@
* to control the PIN resources on SCU domain.
*/
+#include <linux/cleanup.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/gpio/driver.h>
#include <linux/platform_device.h>
#include <linux/firmware/imx/svc/rm.h>
@@ -37,16 +39,11 @@ static int imx_scu_gpio_get(struct gpio_chip *chip, unsigned int offset)
int level;
int err;
- if (offset >= chip->ngpio)
- return -EINVAL;
-
- mutex_lock(&priv->lock);
-
- /* to read PIN state via scu api */
- err = imx_sc_misc_get_control(priv->handle,
- scu_rsrc_arr[offset], 0, &level);
- mutex_unlock(&priv->lock);
-
+ scoped_guard(mutex, &priv->lock) {
+ /* to read PIN state via scu api */
+ err = imx_sc_misc_get_control(priv->handle,
+ scu_rsrc_arr[offset], 0, &level);
+ }
if (err) {
dev_err(priv->dev, "SCU get failed: %d\n", err);
return err;
@@ -55,31 +52,26 @@ static int imx_scu_gpio_get(struct gpio_chip *chip, unsigned int offset)
return level;
}
-static void imx_scu_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
+static int imx_scu_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct scu_gpio_priv *priv = gpiochip_get_data(chip);
int err;
- if (offset >= chip->ngpio)
- return;
-
- mutex_lock(&priv->lock);
-
- /* to set PIN output level via scu api */
- err = imx_sc_misc_set_control(priv->handle,
- scu_rsrc_arr[offset], 0, value);
- mutex_unlock(&priv->lock);
-
+ scoped_guard(mutex, &priv->lock) {
+ /* to set PIN output level via scu api */
+ err = imx_sc_misc_set_control(priv->handle,
+ scu_rsrc_arr[offset], 0, value);
+ }
if (err)
dev_err(priv->dev, "SCU set (%d) failed: %d\n",
scu_rsrc_arr[offset], err);
+
+ return err;
}
static int imx_scu_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
{
- if (offset >= chip->ngpio)
- return -EINVAL;
-
return GPIO_LINE_DIRECTION_OUT;
}
@@ -99,7 +91,10 @@ static int imx_scu_gpio_probe(struct platform_device *pdev)
return ret;
priv->dev = dev;
- mutex_init(&priv->lock);
+
+ ret = devm_mutex_init(&pdev->dev, &priv->lock);
+ if (ret)
+ return ret;
gc = &priv->chip;
gc->base = -1;
diff --git a/drivers/gpio/gpio-it87.c b/drivers/gpio/gpio-it87.c
index f332341fd4c8..5d677bcfccf2 100644
--- a/drivers/gpio/gpio-it87.c
+++ b/drivers/gpio/gpio-it87.c
@@ -213,8 +213,7 @@ exit:
return rc;
}
-static void it87_gpio_set(struct gpio_chip *chip,
- unsigned gpio_num, int val)
+static int it87_gpio_set(struct gpio_chip *chip, unsigned int gpio_num, int val)
{
u8 mask, curr_vals;
u16 reg;
@@ -228,6 +227,8 @@ static void it87_gpio_set(struct gpio_chip *chip,
outb(curr_vals | mask, reg);
else
outb(curr_vals & ~mask, reg);
+
+ return 0;
}
static int it87_gpio_direction_out(struct gpio_chip *chip,
@@ -249,7 +250,9 @@ static int it87_gpio_direction_out(struct gpio_chip *chip,
/* set the output enable bit */
superio_set_mask(mask, group + it87_gpio->output_base);
- it87_gpio_set(chip, gpio_num, val);
+ rc = it87_gpio_set(chip, gpio_num, val);
+ if (rc)
+ goto exit;
superio_exit();
diff --git a/drivers/gpio/gpio-ixp4xx.c b/drivers/gpio/gpio-ixp4xx.c
index 28a8a6a8f05f..f34d87869c8b 100644
--- a/drivers/gpio/gpio-ixp4xx.c
+++ b/drivers/gpio/gpio-ixp4xx.c
@@ -8,6 +8,7 @@
#include <linux/bitops.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
@@ -53,14 +54,14 @@
/**
* struct ixp4xx_gpio - IXP4 GPIO state container
+ * @chip: generic GPIO chip for this instance
* @dev: containing device for this instance
- * @gc: gpiochip for this instance
* @base: remapped I/O-memory base
* @irq_edge: Each bit represents an IRQ: 1: edge-triggered,
* 0: level triggered
*/
struct ixp4xx_gpio {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
struct device *dev;
void __iomem *base;
unsigned long long irq_edge;
@@ -100,7 +101,6 @@ static int ixp4xx_gpio_irq_set_type(struct irq_data *d, unsigned int type)
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct ixp4xx_gpio *g = gpiochip_get_data(gc);
int line = d->hwirq;
- unsigned long flags;
u32 int_style;
u32 int_reg;
u32 val;
@@ -144,26 +144,24 @@ static int ixp4xx_gpio_irq_set_type(struct irq_data *d, unsigned int type)
int_reg = IXP4XX_REG_GPIT1;
}
- raw_spin_lock_irqsave(&g->gc.bgpio_lock, flags);
-
- /* Clear the style for the appropriate pin */
- val = __raw_readl(g->base + int_reg);
- val &= ~(IXP4XX_GPIO_STYLE_MASK << (line * IXP4XX_GPIO_STYLE_SIZE));
- __raw_writel(val, g->base + int_reg);
-
- __raw_writel(BIT(line), g->base + IXP4XX_REG_GPIS);
+ scoped_guard(gpio_generic_lock_irqsave, &g->chip) {
+ /* Clear the style for the appropriate pin */
+ val = __raw_readl(g->base + int_reg);
+ val &= ~(IXP4XX_GPIO_STYLE_MASK << (line * IXP4XX_GPIO_STYLE_SIZE));
+ __raw_writel(val, g->base + int_reg);
- /* Set the new style */
- val = __raw_readl(g->base + int_reg);
- val |= (int_style << (line * IXP4XX_GPIO_STYLE_SIZE));
- __raw_writel(val, g->base + int_reg);
+ __raw_writel(BIT(line), g->base + IXP4XX_REG_GPIS);
- /* Force-configure this line as an input */
- val = __raw_readl(g->base + IXP4XX_REG_GPOE);
- val |= BIT(d->hwirq);
- __raw_writel(val, g->base + IXP4XX_REG_GPOE);
+ /* Set the new style */
+ val = __raw_readl(g->base + int_reg);
+ val |= (int_style << (line * IXP4XX_GPIO_STYLE_SIZE));
+ __raw_writel(val, g->base + int_reg);
- raw_spin_unlock_irqrestore(&g->gc.bgpio_lock, flags);
+ /* Force-configure this line as an input */
+ val = __raw_readl(g->base + IXP4XX_REG_GPOE);
+ val |= BIT(d->hwirq);
+ __raw_writel(val, g->base + IXP4XX_REG_GPOE);
+ }
/* This parent only accept level high (asserted) */
return irq_chip_set_type_parent(d, IRQ_TYPE_LEVEL_HIGH);
@@ -206,6 +204,7 @@ static int ixp4xx_gpio_child_to_parent_hwirq(struct gpio_chip *gc,
static int ixp4xx_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
unsigned long flags;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
@@ -290,35 +289,38 @@ static int ixp4xx_gpio_probe(struct platform_device *pdev)
* for big endian.
*/
#if defined(CONFIG_CPU_BIG_ENDIAN)
- flags = BGPIOF_BIG_ENDIAN_BYTE_ORDER;
+ flags = GPIO_GENERIC_BIG_ENDIAN_BYTE_ORDER;
#else
flags = 0;
#endif
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = g->base + IXP4XX_REG_GPIN,
+ .set = g->base + IXP4XX_REG_GPOUT,
+ .dirin = g->base + IXP4XX_REG_GPOE,
+ .flags = flags,
+ };
+
/* Populate and register gpio chip */
- ret = bgpio_init(&g->gc, dev, 4,
- g->base + IXP4XX_REG_GPIN,
- g->base + IXP4XX_REG_GPOUT,
- NULL,
- NULL,
- g->base + IXP4XX_REG_GPOE,
- flags);
+ ret = gpio_generic_chip_init(&g->chip, &config);
if (ret) {
dev_err(dev, "unable to init generic GPIO\n");
return ret;
}
- g->gc.ngpio = 16;
- g->gc.label = "IXP4XX_GPIO_CHIP";
+ g->chip.gc.ngpio = 16;
+ g->chip.gc.label = "IXP4XX_GPIO_CHIP";
/*
* TODO: when we have migrated to device tree and all GPIOs
* are fetched using phandles, set this to -1 to get rid of
* the fixed gpiochip base.
*/
- g->gc.base = 0;
- g->gc.parent = &pdev->dev;
- g->gc.owner = THIS_MODULE;
+ g->chip.gc.base = 0;
+ g->chip.gc.parent = &pdev->dev;
+ g->chip.gc.owner = THIS_MODULE;
- girq = &g->gc.irq;
+ girq = &g->chip.gc.irq;
gpio_irq_chip_set_chip(girq, &ixp4xx_gpio_irqchip);
girq->fwnode = dev_fwnode(dev);
girq->parent_domain = parent;
@@ -326,7 +328,7 @@ static int ixp4xx_gpio_probe(struct platform_device *pdev)
girq->handler = handle_bad_irq;
girq->default_type = IRQ_TYPE_NONE;
- ret = devm_gpiochip_add_data(dev, &g->gc, g);
+ ret = devm_gpiochip_add_data(dev, &g->chip.gc, g);
if (ret) {
dev_err(dev, "failed to add SoC gpiochip\n");
return ret;
diff --git a/drivers/gpio/gpio-janz-ttl.c b/drivers/gpio/gpio-janz-ttl.c
index cdf50e4ea165..b0c4a3346e7d 100644
--- a/drivers/gpio/gpio-janz-ttl.c
+++ b/drivers/gpio/gpio-janz-ttl.c
@@ -76,7 +76,7 @@ static int ttl_get_value(struct gpio_chip *gpio, unsigned offset)
return !!ret;
}
-static void ttl_set_value(struct gpio_chip *gpio, unsigned offset, int value)
+static int ttl_set_value(struct gpio_chip *gpio, unsigned int offset, int value)
{
struct ttl_module *mod = dev_get_drvdata(gpio->parent);
void __iomem *port;
@@ -103,6 +103,8 @@ static void ttl_set_value(struct gpio_chip *gpio, unsigned offset, int value)
iowrite16be(*shadow, port);
spin_unlock(&mod->lock);
+
+ return 0;
}
static void ttl_write_reg(struct ttl_module *mod, u8 reg, u16 val)
diff --git a/drivers/gpio/gpio-kempld.c b/drivers/gpio/gpio-kempld.c
index 4ea15f08e0f4..923aad3ab4d4 100644
--- a/drivers/gpio/gpio-kempld.c
+++ b/drivers/gpio/gpio-kempld.c
@@ -63,7 +63,8 @@ static int kempld_gpio_get(struct gpio_chip *chip, unsigned offset)
return !!kempld_gpio_get_bit(pld, KEMPLD_GPIO_LVL_NUM(offset), offset);
}
-static void kempld_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int kempld_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct kempld_gpio_data *gpio = gpiochip_get_data(chip);
struct kempld_device_data *pld = gpio->pld;
@@ -71,6 +72,8 @@ static void kempld_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
kempld_get_mutex(pld);
kempld_gpio_bitop(pld, KEMPLD_GPIO_LVL_NUM(offset), offset, value);
kempld_release_mutex(pld);
+
+ return 0;
}
static int kempld_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
diff --git a/drivers/gpio/gpio-latch.c b/drivers/gpio/gpio-latch.c
index d7c3b20c8482..c64aaa896766 100644
--- a/drivers/gpio/gpio-latch.c
+++ b/drivers/gpio/gpio-latch.c
@@ -38,12 +38,14 @@
* in the corresponding device tree properties.
*/
+#include <linux/cleanup.h>
#include <linux/err.h>
#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/delay.h>
#include "gpiolib.h"
@@ -71,46 +73,46 @@ static int gpio_latch_get_direction(struct gpio_chip *gc, unsigned int offset)
return GPIO_LINE_DIRECTION_OUT;
}
-static void gpio_latch_set_unlocked(struct gpio_latch_priv *priv,
- void (*set)(struct gpio_desc *desc, int value),
- unsigned int offset, bool val)
+static int gpio_latch_set_unlocked(struct gpio_latch_priv *priv,
+ int (*set)(struct gpio_desc *desc, int value),
+ unsigned int offset, bool val)
{
- int latch = offset / priv->n_latched_gpios;
- int i;
+ int latch = offset / priv->n_latched_gpios, i, ret;
assign_bit(offset, priv->shadow, val);
- for (i = 0; i < priv->n_latched_gpios; i++)
- set(priv->latched_gpios->desc[i],
- test_bit(latch * priv->n_latched_gpios + i, priv->shadow));
+ for (i = 0; i < priv->n_latched_gpios; i++) {
+ ret = set(priv->latched_gpios->desc[i],
+ test_bit(latch * priv->n_latched_gpios + i,
+ priv->shadow));
+ if (ret)
+ return ret;
+ }
ndelay(priv->setup_duration_ns);
set(priv->clk_gpios->desc[latch], 1);
ndelay(priv->clock_duration_ns);
set(priv->clk_gpios->desc[latch], 0);
+
+ return 0;
}
-static void gpio_latch_set(struct gpio_chip *gc, unsigned int offset, int val)
+static int gpio_latch_set(struct gpio_chip *gc, unsigned int offset, int val)
{
struct gpio_latch_priv *priv = gpiochip_get_data(gc);
- unsigned long flags;
-
- spin_lock_irqsave(&priv->spinlock, flags);
- gpio_latch_set_unlocked(priv, gpiod_set_value, offset, val);
+ guard(spinlock_irqsave)(&priv->spinlock);
- spin_unlock_irqrestore(&priv->spinlock, flags);
+ return gpio_latch_set_unlocked(priv, gpiod_set_value, offset, val);
}
-static void gpio_latch_set_can_sleep(struct gpio_chip *gc, unsigned int offset, int val)
+static int gpio_latch_set_can_sleep(struct gpio_chip *gc, unsigned int offset, int val)
{
struct gpio_latch_priv *priv = gpiochip_get_data(gc);
- mutex_lock(&priv->mutex);
-
- gpio_latch_set_unlocked(priv, gpiod_set_value_cansleep, offset, val);
+ guard(mutex)(&priv->mutex);
- mutex_unlock(&priv->mutex);
+ return gpio_latch_set_unlocked(priv, gpiod_set_value_cansleep, offset, val);
}
static bool gpio_latch_can_sleep(struct gpio_latch_priv *priv, unsigned int n_latches)
@@ -138,26 +140,26 @@ static bool gpio_latch_can_sleep(struct gpio_latch_priv *priv, unsigned int n_la
static int gpio_latch_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct gpio_latch_priv *priv;
unsigned int n_latches;
- struct device_node *np = pdev->dev.of_node;
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- priv->clk_gpios = devm_gpiod_get_array(&pdev->dev, "clk", GPIOD_OUT_LOW);
+ priv->clk_gpios = devm_gpiod_get_array(dev, "clk", GPIOD_OUT_LOW);
if (IS_ERR(priv->clk_gpios))
return PTR_ERR(priv->clk_gpios);
- priv->latched_gpios = devm_gpiod_get_array(&pdev->dev, "latched", GPIOD_OUT_LOW);
+ priv->latched_gpios = devm_gpiod_get_array(dev, "latched", GPIOD_OUT_LOW);
if (IS_ERR(priv->latched_gpios))
return PTR_ERR(priv->latched_gpios);
n_latches = priv->clk_gpios->ndescs;
priv->n_latched_gpios = priv->latched_gpios->ndescs;
- priv->shadow = devm_bitmap_zalloc(&pdev->dev, n_latches * priv->n_latched_gpios,
+ priv->shadow = devm_bitmap_zalloc(dev, n_latches * priv->n_latched_gpios,
GFP_KERNEL);
if (!priv->shadow)
return -ENOMEM;
@@ -172,16 +174,18 @@ static int gpio_latch_probe(struct platform_device *pdev)
spin_lock_init(&priv->spinlock);
}
- of_property_read_u32(np, "setup-duration-ns", &priv->setup_duration_ns);
+ device_property_read_u32(dev, "setup-duration-ns",
+ &priv->setup_duration_ns);
if (priv->setup_duration_ns > DURATION_NS_MAX) {
- dev_warn(&pdev->dev, "setup-duration-ns too high, limit to %d\n",
+ dev_warn(dev, "setup-duration-ns too high, limit to %d\n",
DURATION_NS_MAX);
priv->setup_duration_ns = DURATION_NS_MAX;
}
- of_property_read_u32(np, "clock-duration-ns", &priv->clock_duration_ns);
+ device_property_read_u32(dev, "clock-duration-ns",
+ &priv->clock_duration_ns);
if (priv->clock_duration_ns > DURATION_NS_MAX) {
- dev_warn(&pdev->dev, "clock-duration-ns too high, limit to %d\n",
+ dev_warn(dev, "clock-duration-ns too high, limit to %d\n",
DURATION_NS_MAX);
priv->clock_duration_ns = DURATION_NS_MAX;
}
@@ -190,11 +194,11 @@ static int gpio_latch_probe(struct platform_device *pdev)
priv->gc.ngpio = n_latches * priv->n_latched_gpios;
priv->gc.owner = THIS_MODULE;
priv->gc.base = -1;
- priv->gc.parent = &pdev->dev;
+ priv->gc.parent = dev;
platform_set_drvdata(pdev, priv);
- return devm_gpiochip_add_data(&pdev->dev, &priv->gc, priv);
+ return devm_gpiochip_add_data(dev, &priv->gc, priv);
}
static const struct of_device_id gpio_latch_ids[] = {
diff --git a/drivers/gpio/gpio-ljca.c b/drivers/gpio/gpio-ljca.c
index dfec9fbfc7a9..3b4f8830c741 100644
--- a/drivers/gpio/gpio-ljca.c
+++ b/drivers/gpio/gpio-ljca.c
@@ -82,9 +82,9 @@ static int ljca_gpio_config(struct ljca_gpio_dev *ljca_gpio, u8 gpio_id,
int ret;
mutex_lock(&ljca_gpio->trans_lock);
+ packet->num = 1;
packet->item[0].index = gpio_id;
packet->item[0].value = config | ljca_gpio->connect_mode[gpio_id];
- packet->num = 1;
ret = ljca_transfer(ljca_gpio->ljca, LJCA_GPIO_CONFIG, (u8 *)packet,
struct_size(packet, item, packet->num), NULL, 0);
@@ -144,8 +144,8 @@ static int ljca_gpio_get_value(struct gpio_chip *chip, unsigned int offset)
return ljca_gpio_read(ljca_gpio, offset);
}
-static void ljca_gpio_set_value(struct gpio_chip *chip, unsigned int offset,
- int val)
+static int ljca_gpio_set_value(struct gpio_chip *chip, unsigned int offset,
+ int val)
{
struct ljca_gpio_dev *ljca_gpio = gpiochip_get_data(chip);
int ret;
@@ -155,6 +155,8 @@ static void ljca_gpio_set_value(struct gpio_chip *chip, unsigned int offset,
dev_err(chip->parent,
"set value failed offset: %u val: %d ret: %d\n",
offset, val, ret);
+
+ return ret;
}
static int ljca_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
@@ -183,7 +185,10 @@ static int ljca_gpio_direction_output(struct gpio_chip *chip,
if (ret)
return ret;
- ljca_gpio_set_value(chip, offset, val);
+ ret = ljca_gpio_set_value(chip, offset, val);
+ if (ret)
+ return ret;
+
set_bit(offset, ljca_gpio->output_enabled);
return 0;
@@ -420,8 +425,14 @@ static int ljca_gpio_probe(struct auxiliary_device *auxdev,
if (!ljca_gpio->connect_mode)
return -ENOMEM;
- mutex_init(&ljca_gpio->irq_lock);
- mutex_init(&ljca_gpio->trans_lock);
+ ret = devm_mutex_init(&auxdev->dev, &ljca_gpio->irq_lock);
+ if (ret)
+ return ret;
+
+ ret = devm_mutex_init(&auxdev->dev, &ljca_gpio->trans_lock);
+ if (ret)
+ return ret;
+
ljca_gpio->gc.direction_input = ljca_gpio_direction_input;
ljca_gpio->gc.direction_output = ljca_gpio_direction_output;
ljca_gpio->gc.get_direction = ljca_gpio_get_direction;
@@ -453,11 +464,8 @@ static int ljca_gpio_probe(struct auxiliary_device *auxdev,
INIT_WORK(&ljca_gpio->work, ljca_gpio_async);
ret = gpiochip_add_data(&ljca_gpio->gc, ljca_gpio);
- if (ret) {
+ if (ret)
ljca_unregister_event_cb(ljca);
- mutex_destroy(&ljca_gpio->irq_lock);
- mutex_destroy(&ljca_gpio->trans_lock);
- }
return ret;
}
@@ -469,8 +477,6 @@ static void ljca_gpio_remove(struct auxiliary_device *auxdev)
gpiochip_remove(&ljca_gpio->gc);
ljca_unregister_event_cb(ljca_gpio->ljca);
cancel_work_sync(&ljca_gpio->work);
- mutex_destroy(&ljca_gpio->irq_lock);
- mutex_destroy(&ljca_gpio->trans_lock);
}
static const struct auxiliary_device_id ljca_gpio_id_table[] = {
@@ -491,4 +497,4 @@ MODULE_AUTHOR("Zhifeng Wang <zhifeng.wang@intel.com>");
MODULE_AUTHOR("Lixu Zhang <lixu.zhang@intel.com>");
MODULE_DESCRIPTION("Intel La Jolla Cove Adapter USB-GPIO driver");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(LJCA);
+MODULE_IMPORT_NS("LJCA");
diff --git a/drivers/gpio/gpio-logicvc.c b/drivers/gpio/gpio-logicvc.c
index 05d62011f335..cb9dbcc290ad 100644
--- a/drivers/gpio/gpio-logicvc.c
+++ b/drivers/gpio/gpio-logicvc.c
@@ -61,23 +61,22 @@ static int logicvc_gpio_get(struct gpio_chip *chip, unsigned offset)
return !!(value & bit);
}
-static void logicvc_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int logicvc_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct logicvc_gpio *logicvc = gpiochip_get_data(chip);
unsigned int reg, bit;
logicvc_gpio_offset(logicvc, offset, &reg, &bit);
- regmap_update_bits(logicvc->regmap, reg, bit, value ? bit : 0);
+ return regmap_update_bits(logicvc->regmap, reg, bit, value ? bit : 0);
}
static int logicvc_gpio_direction_output(struct gpio_chip *chip,
unsigned offset, int value)
{
/* Pins are always configured as output, so just set the value. */
- logicvc_gpio_set(chip, offset, value);
-
- return 0;
+ return logicvc_gpio_set(chip, offset, value);
}
static struct regmap_config logicvc_gpio_regmap_config = {
diff --git a/drivers/gpio/gpio-loongson-64bit.c b/drivers/gpio/gpio-loongson-64bit.c
index 6749d4dd6d64..02f181cb219e 100644
--- a/drivers/gpio/gpio-loongson-64bit.c
+++ b/drivers/gpio/gpio-loongson-64bit.c
@@ -7,12 +7,16 @@
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/irqdesc.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/err.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/platform_device.h>
#include <linux/bitops.h>
+#include <linux/reset.h>
#include <asm/types.h>
enum loongson_gpio_mode {
@@ -27,11 +31,18 @@ struct loongson_gpio_chip_data {
unsigned int out_offset;
unsigned int in_offset;
unsigned int inten_offset;
+ unsigned int intpol_offset;
+ unsigned int intedge_offset;
+ unsigned int intclr_offset;
+ unsigned int intsts_offset;
+ unsigned int intdual_offset;
+ unsigned int intr_num;
+ irq_flow_handler_t irq_handler;
+ const struct irq_chip *girqchip;
};
struct loongson_gpio_chip {
- struct gpio_chip chip;
- struct fwnode_handle *fwnode;
+ struct gpio_generic_chip chip;
spinlock_t lock;
void __iomem *reg_base;
const struct loongson_gpio_chip_data *chip_data;
@@ -39,7 +50,8 @@ struct loongson_gpio_chip {
static inline struct loongson_gpio_chip *to_loongson_gpio_chip(struct gpio_chip *chip)
{
- return container_of(chip, struct loongson_gpio_chip, chip);
+ return container_of(to_gpio_generic_chip(chip),
+ struct loongson_gpio_chip, chip);
}
static inline void loongson_commit_direction(struct loongson_gpio_chip *lgpio, unsigned int pin,
@@ -106,7 +118,7 @@ static int loongson_gpio_get_direction(struct gpio_chip *chip, unsigned int pin)
return GPIO_LINE_DIRECTION_OUT;
}
-static void loongson_gpio_set(struct gpio_chip *chip, unsigned int pin, int value)
+static int loongson_gpio_set(struct gpio_chip *chip, unsigned int pin, int value)
{
unsigned long flags;
struct loongson_gpio_chip *lgpio = to_loongson_gpio_chip(chip);
@@ -114,6 +126,8 @@ static void loongson_gpio_set(struct gpio_chip *chip, unsigned int pin, int valu
spin_lock_irqsave(&lgpio->lock, flags);
loongson_commit_level(lgpio, pin, value);
spin_unlock_irqrestore(&lgpio->lock, flags);
+
+ return 0;
}
static int loongson_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
@@ -134,42 +148,184 @@ static int loongson_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
return platform_get_irq(pdev, offset);
}
-static int loongson_gpio_init(struct device *dev, struct loongson_gpio_chip *lgpio,
+static void loongson_gpio_irq_ack(struct irq_data *data)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct loongson_gpio_chip *lgpio = to_loongson_gpio_chip(chip);
+ irq_hw_number_t hwirq = irqd_to_hwirq(data);
+
+ writeb(0x1, lgpio->reg_base + lgpio->chip_data->intclr_offset + hwirq);
+}
+
+static void loongson_gpio_irq_mask(struct irq_data *data)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct loongson_gpio_chip *lgpio = to_loongson_gpio_chip(chip);
+ irq_hw_number_t hwirq = irqd_to_hwirq(data);
+
+ writeb(0x0, lgpio->reg_base + lgpio->chip_data->inten_offset + hwirq);
+}
+
+static void loongson_gpio_irq_unmask(struct irq_data *data)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct loongson_gpio_chip *lgpio = to_loongson_gpio_chip(chip);
+ irq_hw_number_t hwirq = irqd_to_hwirq(data);
+
+ writeb(0x1, lgpio->reg_base + lgpio->chip_data->inten_offset + hwirq);
+}
+
+static int loongson_gpio_irq_set_type(struct irq_data *data, unsigned int type)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct loongson_gpio_chip *lgpio = to_loongson_gpio_chip(chip);
+ irq_hw_number_t hwirq = irqd_to_hwirq(data);
+ u8 pol = 0, edge = 0, dual = 0;
+
+ if ((type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) {
+ edge = 1;
+ dual = 1;
+ irq_set_handler_locked(data, handle_edge_irq);
+ } else {
+ switch (type) {
+ case IRQ_TYPE_LEVEL_HIGH:
+ pol = 1;
+ fallthrough;
+ case IRQ_TYPE_LEVEL_LOW:
+ irq_set_handler_locked(data, handle_level_irq);
+ break;
+
+ case IRQ_TYPE_EDGE_RISING:
+ pol = 1;
+ fallthrough;
+ case IRQ_TYPE_EDGE_FALLING:
+ edge = 1;
+ irq_set_handler_locked(data, handle_edge_irq);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ }
+
+ writeb(pol, lgpio->reg_base + lgpio->chip_data->intpol_offset + hwirq);
+ writeb(edge, lgpio->reg_base + lgpio->chip_data->intedge_offset + hwirq);
+ writeb(dual, lgpio->reg_base + lgpio->chip_data->intdual_offset + hwirq);
+
+ return 0;
+}
+
+static void loongson_gpio_ls2k0300_irq_handler(struct irq_desc *desc)
+{
+ struct loongson_gpio_chip *lgpio = irq_desc_get_handler_data(desc);
+ struct irq_chip *girqchip = irq_desc_get_chip(desc);
+ int i;
+
+ chained_irq_enter(girqchip, desc);
+
+ for (i = 0; i < lgpio->chip.gc.ngpio; i++) {
+ /*
+ * For the GPIO controller of LS2K0300, interrupts status bits
+ * may be wrongly set even if the corresponding interrupt is
+ * disabled. Thus interrupt enable bits are checked along with
+ * status bits to detect interrupts reliably.
+ */
+ if (readb(lgpio->reg_base + lgpio->chip_data->intsts_offset + i) &&
+ readb(lgpio->reg_base + lgpio->chip_data->inten_offset + i))
+ generic_handle_domain_irq(lgpio->chip.gc.irq.domain, i);
+ }
+
+ chained_irq_exit(girqchip, desc);
+}
+
+static const struct irq_chip loongson_gpio_ls2k0300_irqchip = {
+ .irq_ack = loongson_gpio_irq_ack,
+ .irq_mask = loongson_gpio_irq_mask,
+ .irq_unmask = loongson_gpio_irq_unmask,
+ .irq_set_type = loongson_gpio_irq_set_type,
+ .flags = IRQCHIP_IMMUTABLE | IRQCHIP_SKIP_SET_WAKE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
+static int loongson_gpio_init_irqchip(struct platform_device *pdev,
+ struct loongson_gpio_chip *lgpio)
+{
+ const struct loongson_gpio_chip_data *data = lgpio->chip_data;
+ struct gpio_chip *chip = &lgpio->chip.gc;
+ int i;
+
+ chip->irq.default_type = IRQ_TYPE_NONE;
+ chip->irq.handler = handle_bad_irq;
+ chip->irq.parent_handler = data->irq_handler;
+ chip->irq.parent_handler_data = lgpio;
+ gpio_irq_chip_set_chip(&chip->irq, data->girqchip);
+
+ chip->irq.num_parents = data->intr_num;
+ chip->irq.parents = devm_kcalloc(&pdev->dev, data->intr_num,
+ sizeof(*chip->irq.parents), GFP_KERNEL);
+ if (!chip->parent)
+ return -ENOMEM;
+
+ for (i = 0; i < data->intr_num; i++) {
+ int ret;
+
+ ret = platform_get_irq(pdev, i);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to get IRQ %d\n", i);
+ chip->irq.parents[i] = ret;
+ }
+
+ for (i = 0; i < data->intr_num; i++) {
+ writeb(0x0, lgpio->reg_base + data->inten_offset + i);
+ writeb(0x1, lgpio->reg_base + data->intclr_offset + i);
+ }
+
+ return 0;
+}
+
+static int loongson_gpio_init(struct platform_device *pdev, struct loongson_gpio_chip *lgpio,
void __iomem *reg_base)
{
+ struct gpio_generic_chip_config config;
int ret;
- u32 ngpios;
lgpio->reg_base = reg_base;
if (lgpio->chip_data->mode == BIT_CTRL_MODE) {
- ret = bgpio_init(&lgpio->chip, dev, 8,
- lgpio->reg_base + lgpio->chip_data->in_offset,
- lgpio->reg_base + lgpio->chip_data->out_offset,
- NULL, NULL,
- lgpio->reg_base + lgpio->chip_data->conf_offset,
- 0);
+ config = (struct gpio_generic_chip_config) {
+ .dev = &pdev->dev,
+ .sz = 8,
+ .dat = lgpio->reg_base + lgpio->chip_data->in_offset,
+ .set = lgpio->reg_base + lgpio->chip_data->out_offset,
+ .dirin = lgpio->reg_base + lgpio->chip_data->conf_offset,
+ };
+
+ ret = gpio_generic_chip_init(&lgpio->chip, &config);
if (ret) {
- dev_err(dev, "unable to init generic GPIO\n");
+ dev_err(&pdev->dev, "unable to init generic GPIO\n");
return ret;
}
} else {
- lgpio->chip.direction_input = loongson_gpio_direction_input;
- lgpio->chip.get = loongson_gpio_get;
- lgpio->chip.get_direction = loongson_gpio_get_direction;
- lgpio->chip.direction_output = loongson_gpio_direction_output;
- lgpio->chip.set = loongson_gpio_set;
- lgpio->chip.parent = dev;
- device_property_read_u32(dev, "ngpios", &ngpios);
- lgpio->chip.ngpio = ngpios;
+ lgpio->chip.gc.direction_input = loongson_gpio_direction_input;
+ lgpio->chip.gc.get = loongson_gpio_get;
+ lgpio->chip.gc.get_direction = loongson_gpio_get_direction;
+ lgpio->chip.gc.direction_output = loongson_gpio_direction_output;
+ lgpio->chip.gc.set = loongson_gpio_set;
+ lgpio->chip.gc.parent = &pdev->dev;
spin_lock_init(&lgpio->lock);
}
- lgpio->chip.label = lgpio->chip_data->label;
- lgpio->chip.can_sleep = false;
- if (lgpio->chip_data->inten_offset)
- lgpio->chip.to_irq = loongson_gpio_to_irq;
+ lgpio->chip.gc.label = lgpio->chip_data->label;
+ lgpio->chip.gc.can_sleep = false;
+ if (lgpio->chip_data->girqchip) {
+ ret = loongson_gpio_init_irqchip(pdev, lgpio);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "failed to initialize irqchip\n");
+ } else if (lgpio->chip_data->inten_offset) {
+ lgpio->chip.gc.to_irq = loongson_gpio_to_irq;
+ }
- return devm_gpiochip_add_data(dev, &lgpio->chip, lgpio);
+ return devm_gpiochip_add_data(&pdev->dev, &lgpio->chip.gc, lgpio);
}
static int loongson_gpio_probe(struct platform_device *pdev)
@@ -177,6 +333,7 @@ static int loongson_gpio_probe(struct platform_device *pdev)
void __iomem *reg_base;
struct loongson_gpio_chip *lgpio;
struct device *dev = &pdev->dev;
+ struct reset_control *rst;
lgpio = devm_kzalloc(dev, sizeof(*lgpio), GFP_KERNEL);
if (!lgpio)
@@ -188,7 +345,11 @@ static int loongson_gpio_probe(struct platform_device *pdev)
if (IS_ERR(reg_base))
return PTR_ERR(reg_base);
- return loongson_gpio_init(dev, lgpio, reg_base);
+ rst = devm_reset_control_get_optional_exclusive_deasserted(&pdev->dev, NULL);
+ if (IS_ERR(rst))
+ return dev_err_probe(&pdev->dev, PTR_ERR(rst), "failed to get reset control\n");
+
+ return loongson_gpio_init(pdev, lgpio, reg_base);
}
static const struct loongson_gpio_chip_data loongson_gpio_ls2k_data = {
@@ -200,6 +361,23 @@ static const struct loongson_gpio_chip_data loongson_gpio_ls2k_data = {
.inten_offset = 0x30,
};
+static const struct loongson_gpio_chip_data loongson_gpio_ls2k0300_data = {
+ .label = "ls2k0300_gpio",
+ .mode = BYTE_CTRL_MODE,
+ .conf_offset = 0x800,
+ .in_offset = 0xa00,
+ .out_offset = 0x900,
+ .inten_offset = 0xb00,
+ .intpol_offset = 0xc00,
+ .intedge_offset = 0xd00,
+ .intclr_offset = 0xe00,
+ .intsts_offset = 0xf00,
+ .intdual_offset = 0xf80,
+ .intr_num = 7,
+ .irq_handler = loongson_gpio_ls2k0300_irq_handler,
+ .girqchip = &loongson_gpio_ls2k0300_irqchip,
+};
+
static const struct loongson_gpio_chip_data loongson_gpio_ls2k0500_data0 = {
.label = "ls2k0500_gpio",
.mode = BIT_CTRL_MODE,
@@ -224,6 +402,7 @@ static const struct loongson_gpio_chip_data loongson_gpio_ls2k2000_data0 = {
.conf_offset = 0x0,
.in_offset = 0xc,
.out_offset = 0x8,
+ .inten_offset = 0x14,
};
static const struct loongson_gpio_chip_data loongson_gpio_ls2k2000_data1 = {
@@ -232,14 +411,15 @@ static const struct loongson_gpio_chip_data loongson_gpio_ls2k2000_data1 = {
.conf_offset = 0x0,
.in_offset = 0x20,
.out_offset = 0x10,
+ .inten_offset = 0x30,
};
static const struct loongson_gpio_chip_data loongson_gpio_ls2k2000_data2 = {
.label = "ls2k2000_gpio",
.mode = BIT_CTRL_MODE,
- .conf_offset = 0x84,
- .in_offset = 0x88,
- .out_offset = 0x80,
+ .conf_offset = 0x4,
+ .in_offset = 0x8,
+ .out_offset = 0x0,
};
static const struct loongson_gpio_chip_data loongson_gpio_ls3a5000_data = {
@@ -248,6 +428,7 @@ static const struct loongson_gpio_chip_data loongson_gpio_ls3a5000_data = {
.conf_offset = 0x0,
.in_offset = 0xc,
.out_offset = 0x8,
+ .inten_offset = 0x14,
};
static const struct loongson_gpio_chip_data loongson_gpio_ls7a_data = {
@@ -256,6 +437,36 @@ static const struct loongson_gpio_chip_data loongson_gpio_ls7a_data = {
.conf_offset = 0x800,
.in_offset = 0xa00,
.out_offset = 0x900,
+ .inten_offset = 0xb00,
+};
+
+/* LS7A2000 chipset GPIO */
+static const struct loongson_gpio_chip_data loongson_gpio_ls7a2000_data0 = {
+ .label = "ls7a2000_gpio",
+ .mode = BYTE_CTRL_MODE,
+ .conf_offset = 0x800,
+ .in_offset = 0xa00,
+ .out_offset = 0x900,
+ .inten_offset = 0xb00,
+};
+
+/* LS7A2000 ACPI GPIO */
+static const struct loongson_gpio_chip_data loongson_gpio_ls7a2000_data1 = {
+ .label = "ls7a2000_gpio",
+ .mode = BIT_CTRL_MODE,
+ .conf_offset = 0x4,
+ .in_offset = 0x8,
+ .out_offset = 0x0,
+};
+
+/* Loongson-3A6000 node GPIO */
+static const struct loongson_gpio_chip_data loongson_gpio_ls3a6000_data = {
+ .label = "ls3a6000_gpio",
+ .mode = BIT_CTRL_MODE,
+ .conf_offset = 0x0,
+ .in_offset = 0xc,
+ .out_offset = 0x8,
+ .inten_offset = 0x14,
};
static const struct of_device_id loongson_gpio_of_match[] = {
@@ -264,6 +475,10 @@ static const struct of_device_id loongson_gpio_of_match[] = {
.data = &loongson_gpio_ls2k_data,
},
{
+ .compatible = "loongson,ls2k0300-gpio",
+ .data = &loongson_gpio_ls2k0300_data,
+ },
+ {
.compatible = "loongson,ls2k0500-gpio0",
.data = &loongson_gpio_ls2k0500_data0,
},
@@ -291,6 +506,18 @@ static const struct of_device_id loongson_gpio_of_match[] = {
.compatible = "loongson,ls7a-gpio",
.data = &loongson_gpio_ls7a_data,
},
+ {
+ .compatible = "loongson,ls7a2000-gpio1",
+ .data = &loongson_gpio_ls7a2000_data0,
+ },
+ {
+ .compatible = "loongson,ls7a2000-gpio2",
+ .data = &loongson_gpio_ls7a2000_data1,
+ },
+ {
+ .compatible = "loongson,ls3a6000-gpio",
+ .data = &loongson_gpio_ls3a6000_data,
+ },
{}
};
MODULE_DEVICE_TABLE(of, loongson_gpio_of_match);
@@ -316,6 +543,18 @@ static const struct acpi_device_id loongson_gpio_acpi_match[] = {
.id = "LOON000C",
.driver_data = (kernel_ulong_t)&loongson_gpio_ls2k2000_data2,
},
+ {
+ .id = "LOON000D",
+ .driver_data = (kernel_ulong_t)&loongson_gpio_ls7a2000_data0,
+ },
+ {
+ .id = "LOON000E",
+ .driver_data = (kernel_ulong_t)&loongson_gpio_ls7a2000_data1,
+ },
+ {
+ .id = "LOON000F",
+ .driver_data = (kernel_ulong_t)&loongson_gpio_ls3a6000_data,
+ },
{}
};
MODULE_DEVICE_TABLE(acpi, loongson_gpio_acpi_match);
diff --git a/drivers/gpio/gpio-loongson.c b/drivers/gpio/gpio-loongson.c
index a42145873cc9..f3e0559f969d 100644
--- a/drivers/gpio/gpio-loongson.c
+++ b/drivers/gpio/gpio-loongson.c
@@ -48,8 +48,8 @@ static int loongson_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
return !!(val & BIT(gpio + LOONGSON_GPIO_IN_OFFSET));
}
-static void loongson_gpio_set_value(struct gpio_chip *chip,
- unsigned gpio, int value)
+static int loongson_gpio_set_value(struct gpio_chip *chip, unsigned int gpio,
+ int value)
{
u32 val;
@@ -61,6 +61,8 @@ static void loongson_gpio_set_value(struct gpio_chip *chip,
val &= ~BIT(gpio);
LOONGSON_GPIODATA = val;
spin_unlock(&gpio_lock);
+
+ return 0;
}
static int loongson_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
diff --git a/drivers/gpio/gpio-loongson1.c b/drivers/gpio/gpio-loongson1.c
index 6ca3b969db4d..9750a7a17508 100644
--- a/drivers/gpio/gpio-loongson1.c
+++ b/drivers/gpio/gpio-loongson1.c
@@ -5,10 +5,11 @@
* Copyright (C) 2015-2023 Keguang Zhang <keguang.zhang@gmail.com>
*/
+#include <linux/bitops.h>
#include <linux/module.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/platform_device.h>
-#include <linux/bitops.h>
/* Loongson 1 GPIO Register Definitions */
#define GPIO_CFG 0x0
@@ -17,19 +18,18 @@
#define GPIO_OUTPUT 0x30
struct ls1x_gpio_chip {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
void __iomem *reg_base;
};
static int ls1x_gpio_request(struct gpio_chip *gc, unsigned int offset)
{
struct ls1x_gpio_chip *ls1x_gc = gpiochip_get_data(gc);
- unsigned long flags;
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(&ls1x_gc->chip);
+
__raw_writel(__raw_readl(ls1x_gc->reg_base + GPIO_CFG) | BIT(offset),
ls1x_gc->reg_base + GPIO_CFG);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
return 0;
}
@@ -37,16 +37,16 @@ static int ls1x_gpio_request(struct gpio_chip *gc, unsigned int offset)
static void ls1x_gpio_free(struct gpio_chip *gc, unsigned int offset)
{
struct ls1x_gpio_chip *ls1x_gc = gpiochip_get_data(gc);
- unsigned long flags;
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(&ls1x_gc->chip);
+
__raw_writel(__raw_readl(ls1x_gc->reg_base + GPIO_CFG) & ~BIT(offset),
ls1x_gc->reg_base + GPIO_CFG);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
}
static int ls1x_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct device *dev = &pdev->dev;
struct ls1x_gpio_chip *ls1x_gc;
int ret;
@@ -59,29 +59,35 @@ static int ls1x_gpio_probe(struct platform_device *pdev)
if (IS_ERR(ls1x_gc->reg_base))
return PTR_ERR(ls1x_gc->reg_base);
- ret = bgpio_init(&ls1x_gc->gc, dev, 4, ls1x_gc->reg_base + GPIO_DATA,
- ls1x_gc->reg_base + GPIO_OUTPUT, NULL,
- NULL, ls1x_gc->reg_base + GPIO_DIR, 0);
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = ls1x_gc->reg_base + GPIO_DATA,
+ .set = ls1x_gc->reg_base + GPIO_OUTPUT,
+ .dirin = ls1x_gc->reg_base + GPIO_DIR,
+ };
+
+ ret = gpio_generic_chip_init(&ls1x_gc->chip, &config);
if (ret)
goto err;
- ls1x_gc->gc.owner = THIS_MODULE;
- ls1x_gc->gc.request = ls1x_gpio_request;
- ls1x_gc->gc.free = ls1x_gpio_free;
+ ls1x_gc->chip.gc.owner = THIS_MODULE;
+ ls1x_gc->chip.gc.request = ls1x_gpio_request;
+ ls1x_gc->chip.gc.free = ls1x_gpio_free;
/*
* Clear ngpio to let gpiolib get the correct number
* by reading ngpios property
*/
- ls1x_gc->gc.ngpio = 0;
+ ls1x_gc->chip.gc.ngpio = 0;
- ret = devm_gpiochip_add_data(dev, &ls1x_gc->gc, ls1x_gc);
+ ret = devm_gpiochip_add_data(dev, &ls1x_gc->chip.gc, ls1x_gc);
if (ret)
goto err;
platform_set_drvdata(pdev, ls1x_gc);
dev_info(dev, "GPIO controller registered with %d pins\n",
- ls1x_gc->gc.ngpio);
+ ls1x_gc->chip.gc.ngpio);
return 0;
err:
diff --git a/drivers/gpio/gpio-lp3943.c b/drivers/gpio/gpio-lp3943.c
index 8e58242f5123..e8e00daff7df 100644
--- a/drivers/gpio/gpio-lp3943.c
+++ b/drivers/gpio/gpio-lp3943.c
@@ -147,7 +147,8 @@ static int lp3943_gpio_get(struct gpio_chip *chip, unsigned int offset)
return lp3943_get_gpio_out_status(lp3943_gpio, chip, offset);
}
-static void lp3943_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
+static int lp3943_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct lp3943_gpio *lp3943_gpio = gpiochip_get_data(chip);
u8 data;
@@ -157,15 +158,19 @@ static void lp3943_gpio_set(struct gpio_chip *chip, unsigned int offset, int val
else
data = LP3943_GPIO_OUT_LOW;
- lp3943_gpio_set_mode(lp3943_gpio, offset, data);
+ return lp3943_gpio_set_mode(lp3943_gpio, offset, data);
}
static int lp3943_gpio_direction_output(struct gpio_chip *chip, unsigned int offset,
int value)
{
struct lp3943_gpio *lp3943_gpio = gpiochip_get_data(chip);
+ int ret;
+
+ ret = lp3943_gpio_set(chip, offset, value);
+ if (ret)
+ return ret;
- lp3943_gpio_set(chip, offset, value);
lp3943_gpio->input_mask &= ~BIT(offset);
return 0;
diff --git a/drivers/gpio/gpio-lp873x.c b/drivers/gpio/gpio-lp873x.c
index 5c79ba1f229c..5376708a81bf 100644
--- a/drivers/gpio/gpio-lp873x.c
+++ b/drivers/gpio/gpio-lp873x.c
@@ -58,14 +58,14 @@ static int lp873x_gpio_get(struct gpio_chip *chip, unsigned int offset)
return val & BIT(offset * BITS_PER_GPO);
}
-static void lp873x_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
+static int lp873x_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct lp873x_gpio *gpio = gpiochip_get_data(chip);
- regmap_update_bits(gpio->lp873->regmap, LP873X_REG_GPO_CTRL,
- BIT(offset * BITS_PER_GPO),
- value ? BIT(offset * BITS_PER_GPO) : 0);
+ return regmap_update_bits(gpio->lp873->regmap, LP873X_REG_GPO_CTRL,
+ BIT(offset * BITS_PER_GPO),
+ value ? BIT(offset * BITS_PER_GPO) : 0);
}
static int lp873x_gpio_request(struct gpio_chip *gc, unsigned int offset)
diff --git a/drivers/gpio/gpio-lp87565.c b/drivers/gpio/gpio-lp87565.c
index d3ce027de081..0f337c1283b2 100644
--- a/drivers/gpio/gpio-lp87565.c
+++ b/drivers/gpio/gpio-lp87565.c
@@ -30,13 +30,13 @@ static int lp87565_gpio_get(struct gpio_chip *chip, unsigned int offset)
return !!(val & BIT(offset));
}
-static void lp87565_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
+static int lp87565_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct lp87565_gpio *gpio = gpiochip_get_data(chip);
- regmap_update_bits(gpio->map, LP87565_REG_GPIO_OUT,
- BIT(offset), value ? BIT(offset) : 0);
+ return regmap_update_bits(gpio->map, LP87565_REG_GPIO_OUT,
+ BIT(offset), value ? BIT(offset) : 0);
}
static int lp87565_gpio_get_direction(struct gpio_chip *chip,
@@ -69,8 +69,11 @@ static int lp87565_gpio_direction_output(struct gpio_chip *chip,
unsigned int offset, int value)
{
struct lp87565_gpio *gpio = gpiochip_get_data(chip);
+ int ret;
- lp87565_gpio_set(chip, offset, value);
+ ret = lp87565_gpio_set(chip, offset, value);
+ if (ret)
+ return ret;
return regmap_update_bits(gpio->map,
LP87565_REG_GPIO_CONFIG,
diff --git a/drivers/gpio/gpio-lpc18xx.c b/drivers/gpio/gpio-lpc18xx.c
index e7c0ef6e54fa..37a2342eb2e6 100644
--- a/drivers/gpio/gpio-lpc18xx.c
+++ b/drivers/gpio/gpio-lpc18xx.c
@@ -42,6 +42,7 @@ struct lpc18xx_gpio_pin_ic {
void __iomem *base;
struct irq_domain *domain;
struct raw_spinlock lock;
+ struct gpio_chip *gpio;
};
struct lpc18xx_gpio_chip {
@@ -74,6 +75,7 @@ static void lpc18xx_gpio_pin_ic_mask(struct irq_data *d)
{
struct lpc18xx_gpio_pin_ic *ic = d->chip_data;
u32 type = irqd_get_trigger_type(d);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
raw_spin_lock(&ic->lock);
@@ -88,12 +90,17 @@ static void lpc18xx_gpio_pin_ic_mask(struct irq_data *d)
raw_spin_unlock(&ic->lock);
irq_chip_mask_parent(d);
+
+ gpiochip_disable_irq(ic->gpio, hwirq);
}
static void lpc18xx_gpio_pin_ic_unmask(struct irq_data *d)
{
struct lpc18xx_gpio_pin_ic *ic = d->chip_data;
u32 type = irqd_get_trigger_type(d);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+ gpiochip_enable_irq(ic->gpio, hwirq);
raw_spin_lock(&ic->lock);
@@ -149,13 +156,14 @@ static int lpc18xx_gpio_pin_ic_set_type(struct irq_data *d, unsigned int type)
return 0;
}
-static struct irq_chip lpc18xx_gpio_pin_ic = {
+static const struct irq_chip lpc18xx_gpio_pin_ic = {
.name = "LPC18xx GPIO pin",
.irq_mask = lpc18xx_gpio_pin_ic_mask,
.irq_unmask = lpc18xx_gpio_pin_ic_unmask,
.irq_eoi = lpc18xx_gpio_pin_ic_eoi,
.irq_set_type = lpc18xx_gpio_pin_ic_set_type,
- .flags = IRQCHIP_SET_TYPE_MASKED,
+ .flags = IRQCHIP_IMMUTABLE | IRQCHIP_SET_TYPE_MASKED,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static int lpc18xx_gpio_pin_ic_domain_alloc(struct irq_domain *domain,
@@ -240,17 +248,16 @@ static int lpc18xx_gpio_pin_ic_probe(struct lpc18xx_gpio_chip *gc)
raw_spin_lock_init(&ic->lock);
- ic->domain = irq_domain_add_hierarchy(parent_domain, 0,
- NR_LPC18XX_GPIO_PIN_IC_IRQS,
- dev->of_node,
- &lpc18xx_gpio_pin_ic_domain_ops,
- ic);
+ ic->domain = irq_domain_create_hierarchy(parent_domain, 0, NR_LPC18XX_GPIO_PIN_IC_IRQS,
+ dev_fwnode(dev), &lpc18xx_gpio_pin_ic_domain_ops,
+ ic);
if (!ic->domain) {
pr_err("unable to add irq domain\n");
ret = -ENODEV;
goto free_iomap;
}
+ ic->gpio = &gc->gpio;
gc->pin_ic = ic;
return 0;
@@ -263,10 +270,14 @@ free_ic:
return ret;
}
-static void lpc18xx_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int lpc18xx_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct lpc18xx_gpio_chip *gc = gpiochip_get_data(chip);
+
writeb(value ? 1 : 0, gc->base + offset);
+
+ return 0;
}
static int lpc18xx_gpio_get(struct gpio_chip *chip, unsigned offset)
@@ -388,7 +399,7 @@ MODULE_DEVICE_TABLE(of, lpc18xx_gpio_match);
static struct platform_driver lpc18xx_gpio_driver = {
.probe = lpc18xx_gpio_probe,
- .remove_new = lpc18xx_gpio_remove,
+ .remove = lpc18xx_gpio_remove,
.driver = {
.name = "lpc18xx-gpio",
.of_match_table = lpc18xx_gpio_match,
diff --git a/drivers/gpio/gpio-lpc32xx.c b/drivers/gpio/gpio-lpc32xx.c
index c097e310c9e8..37fc54fc7385 100644
--- a/drivers/gpio/gpio-lpc32xx.c
+++ b/drivers/gpio/gpio-lpc32xx.c
@@ -340,28 +340,34 @@ static int lpc32xx_gpio_dir_out_always(struct gpio_chip *chip, unsigned pin,
return 0;
}
-static void lpc32xx_gpio_set_value_p012(struct gpio_chip *chip, unsigned pin,
- int value)
+static int lpc32xx_gpio_set_value_p012(struct gpio_chip *chip,
+ unsigned int pin, int value)
{
struct lpc32xx_gpio_chip *group = gpiochip_get_data(chip);
__set_gpio_level_p012(group, pin, value);
+
+ return 0;
}
-static void lpc32xx_gpio_set_value_p3(struct gpio_chip *chip, unsigned pin,
- int value)
+static int lpc32xx_gpio_set_value_p3(struct gpio_chip *chip,
+ unsigned int pin, int value)
{
struct lpc32xx_gpio_chip *group = gpiochip_get_data(chip);
__set_gpio_level_p3(group, pin, value);
+
+ return 0;
}
-static void lpc32xx_gpo_set_value(struct gpio_chip *chip, unsigned pin,
- int value)
+static int lpc32xx_gpo_set_value(struct gpio_chip *chip, unsigned int pin,
+ int value)
{
struct lpc32xx_gpio_chip *group = gpiochip_get_data(chip);
__set_gpo_level_p3(group, pin, value);
+
+ return 0;
}
static int lpc32xx_gpo_get_value(struct gpio_chip *chip, unsigned pin)
diff --git a/drivers/gpio/gpio-macsmc.c b/drivers/gpio/gpio-macsmc.c
new file mode 100644
index 000000000000..30ef258e7655
--- /dev/null
+++ b/drivers/gpio/gpio-macsmc.c
@@ -0,0 +1,292 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Apple SMC GPIO driver
+ * Copyright The Asahi Linux Contributors
+ *
+ * This driver implements basic SMC PMU GPIO support that can read inputs
+ * and write outputs. Mode changes and IRQ config are not yet implemented.
+ */
+
+#include <linux/bitmap.h>
+#include <linux/device.h>
+#include <linux/gpio/driver.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/macsmc.h>
+
+#define MAX_GPIO 64
+
+/*
+ * Commands 0-6 are, presumably, the intended API.
+ * Command 0xff lets you get/set the pin configuration in detail directly,
+ * but the bit meanings seem not to be stable between devices/PMU hardware
+ * versions.
+ *
+ * We're going to try to make do with the low commands for now.
+ * We don't implement pin mode changes at this time.
+ */
+
+#define CMD_ACTION (0 << 24)
+#define CMD_OUTPUT (1 << 24)
+#define CMD_INPUT (2 << 24)
+#define CMD_PINMODE (3 << 24)
+#define CMD_IRQ_ENABLE (4 << 24)
+#define CMD_IRQ_ACK (5 << 24)
+#define CMD_IRQ_MODE (6 << 24)
+#define CMD_CONFIG (0xff << 24)
+
+#define MODE_INPUT 0
+#define MODE_OUTPUT 1
+#define MODE_VALUE_0 0
+#define MODE_VALUE_1 2
+
+#define IRQ_MODE_HIGH 0
+#define IRQ_MODE_LOW 1
+#define IRQ_MODE_RISING 2
+#define IRQ_MODE_FALLING 3
+#define IRQ_MODE_BOTH 4
+
+#define CONFIG_MASK GENMASK(23, 16)
+#define CONFIG_VAL GENMASK(7, 0)
+
+#define CONFIG_OUTMODE GENMASK(7, 6)
+#define CONFIG_IRQMODE GENMASK(5, 3)
+#define CONFIG_PULLDOWN BIT(2)
+#define CONFIG_PULLUP BIT(1)
+#define CONFIG_OUTVAL BIT(0)
+
+/*
+ * Output modes seem to differ depending on the PMU in use... ?
+ * j274 / M1 (Sera PMU):
+ * 0 = input
+ * 1 = output
+ * 2 = open drain
+ * 3 = disable
+ * j314 / M1Pro (Maverick PMU):
+ * 0 = input
+ * 1 = open drain
+ * 2 = output
+ * 3 = ?
+ */
+
+struct macsmc_gpio {
+ struct device *dev;
+ struct apple_smc *smc;
+ struct gpio_chip gc;
+
+ int first_index;
+};
+
+static int macsmc_gpio_nr(smc_key key)
+{
+ int low = hex_to_bin(key & 0xff);
+ int high = hex_to_bin((key >> 8) & 0xff);
+
+ if (low < 0 || high < 0)
+ return -1;
+
+ return low | (high << 4);
+}
+
+static int macsmc_gpio_key(unsigned int offset)
+{
+ return _SMC_KEY("gP\0\0") | hex_asc_hi(offset) << 8 | hex_asc_lo(offset);
+}
+
+static int macsmc_gpio_find_first_gpio_index(struct macsmc_gpio *smcgp)
+{
+ struct apple_smc *smc = smcgp->smc;
+ smc_key key = macsmc_gpio_key(0);
+ smc_key first_key, last_key;
+ int start, count, ret;
+
+ /* Return early if the key is out of bounds */
+ ret = apple_smc_get_key_by_index(smc, 0, &first_key);
+ if (ret)
+ return ret;
+ if (key <= first_key)
+ return -ENODEV;
+
+ ret = apple_smc_get_key_by_index(smc, smc->key_count - 1, &last_key);
+ if (ret)
+ return ret;
+ if (key > last_key)
+ return -ENODEV;
+
+ /* Binary search to find index of first SMC key bigger or equal to key */
+ start = 0;
+ count = smc->key_count;
+ while (count > 1) {
+ smc_key pkey;
+ int pivot = start + ((count - 1) >> 1);
+
+ ret = apple_smc_get_key_by_index(smc, pivot, &pkey);
+ if (ret < 0)
+ return ret;
+
+ if (pkey == key)
+ return pivot;
+
+ pivot++;
+
+ if (pkey < key) {
+ count -= pivot - start;
+ start = pivot;
+ } else {
+ count = pivot - start;
+ }
+ }
+
+ return start;
+}
+
+static int macsmc_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
+{
+ struct macsmc_gpio *smcgp = gpiochip_get_data(gc);
+ smc_key key = macsmc_gpio_key(offset);
+ u32 val;
+ int ret;
+
+ /* First try reading the explicit pin mode register */
+ ret = apple_smc_rw_u32(smcgp->smc, key, CMD_PINMODE, &val);
+ if (!ret)
+ return (val & MODE_OUTPUT) ? GPIO_LINE_DIRECTION_OUT : GPIO_LINE_DIRECTION_IN;
+
+ /*
+ * Less common IRQ configs cause CMD_PINMODE to fail, and so does open drain mode.
+ * Fall back to reading IRQ mode, which will only succeed for inputs.
+ */
+ ret = apple_smc_rw_u32(smcgp->smc, key, CMD_IRQ_MODE, &val);
+ return ret ? GPIO_LINE_DIRECTION_OUT : GPIO_LINE_DIRECTION_IN;
+}
+
+static int macsmc_gpio_get(struct gpio_chip *gc, unsigned int offset)
+{
+ struct macsmc_gpio *smcgp = gpiochip_get_data(gc);
+ smc_key key = macsmc_gpio_key(offset);
+ u32 cmd, val;
+ int ret;
+
+ ret = macsmc_gpio_get_direction(gc, offset);
+ if (ret < 0)
+ return ret;
+
+ if (ret == GPIO_LINE_DIRECTION_OUT)
+ cmd = CMD_OUTPUT;
+ else
+ cmd = CMD_INPUT;
+
+ ret = apple_smc_rw_u32(smcgp->smc, key, cmd, &val);
+ if (ret < 0)
+ return ret;
+
+ return val ? 1 : 0;
+}
+
+static int macsmc_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
+{
+ struct macsmc_gpio *smcgp = gpiochip_get_data(gc);
+ smc_key key = macsmc_gpio_key(offset);
+ int ret;
+
+ value |= CMD_OUTPUT;
+ ret = apple_smc_write_u32(smcgp->smc, key, CMD_OUTPUT | value);
+ if (ret < 0)
+ dev_err(smcgp->dev, "GPIO set failed %p4ch = 0x%x\n",
+ &key, value);
+
+ return ret;
+}
+
+static int macsmc_gpio_init_valid_mask(struct gpio_chip *gc,
+ unsigned long *valid_mask, unsigned int ngpios)
+{
+ struct macsmc_gpio *smcgp = gpiochip_get_data(gc);
+ int count;
+ int i;
+
+ count = min(smcgp->smc->key_count, MAX_GPIO);
+
+ bitmap_zero(valid_mask, ngpios);
+
+ for (i = 0; i < count; i++) {
+ int ret, gpio_nr;
+ smc_key key;
+
+ ret = apple_smc_get_key_by_index(smcgp->smc, smcgp->first_index + i, &key);
+ if (ret < 0)
+ return ret;
+
+ if (key > SMC_KEY(gPff))
+ break;
+
+ gpio_nr = macsmc_gpio_nr(key);
+ if (gpio_nr < 0 || gpio_nr > MAX_GPIO) {
+ dev_err(smcgp->dev, "Bad GPIO key %p4ch\n", &key);
+ continue;
+ }
+
+ set_bit(gpio_nr, valid_mask);
+ }
+
+ return 0;
+}
+
+static int macsmc_gpio_probe(struct platform_device *pdev)
+{
+ struct macsmc_gpio *smcgp;
+ struct apple_smc *smc = dev_get_drvdata(pdev->dev.parent);
+ smc_key key;
+ int ret;
+
+ smcgp = devm_kzalloc(&pdev->dev, sizeof(*smcgp), GFP_KERNEL);
+ if (!smcgp)
+ return -ENOMEM;
+
+ smcgp->dev = &pdev->dev;
+ smcgp->smc = smc;
+
+ smcgp->first_index = macsmc_gpio_find_first_gpio_index(smcgp);
+ if (smcgp->first_index < 0)
+ return smcgp->first_index;
+
+ ret = apple_smc_get_key_by_index(smc, smcgp->first_index, &key);
+ if (ret < 0)
+ return ret;
+
+ if (key > macsmc_gpio_key(MAX_GPIO - 1))
+ return -ENODEV;
+
+ dev_info(smcgp->dev, "First GPIO key: %p4ch\n", &key);
+
+ smcgp->gc.label = "macsmc-pmu-gpio";
+ smcgp->gc.owner = THIS_MODULE;
+ smcgp->gc.get = macsmc_gpio_get;
+ smcgp->gc.set = macsmc_gpio_set;
+ smcgp->gc.get_direction = macsmc_gpio_get_direction;
+ smcgp->gc.init_valid_mask = macsmc_gpio_init_valid_mask;
+ smcgp->gc.can_sleep = true;
+ smcgp->gc.ngpio = MAX_GPIO;
+ smcgp->gc.base = -1;
+ smcgp->gc.parent = &pdev->dev;
+
+ return devm_gpiochip_add_data(&pdev->dev, &smcgp->gc, smcgp);
+}
+
+static const struct of_device_id macsmc_gpio_of_table[] = {
+ { .compatible = "apple,smc-gpio", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, macsmc_gpio_of_table);
+
+static struct platform_driver macsmc_gpio_driver = {
+ .driver = {
+ .name = "macsmc-gpio",
+ .of_match_table = macsmc_gpio_of_table,
+ },
+ .probe = macsmc_gpio_probe,
+};
+module_platform_driver(macsmc_gpio_driver);
+
+MODULE_AUTHOR("Hector Martin <marcan@marcan.st>");
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_DESCRIPTION("Apple SMC GPIO driver");
diff --git a/drivers/gpio/gpio-madera.c b/drivers/gpio/gpio-madera.c
index 8f38303fcbc4..551faf9655b2 100644
--- a/drivers/gpio/gpio-madera.c
+++ b/drivers/gpio/gpio-madera.c
@@ -87,23 +87,17 @@ static int madera_gpio_direction_out(struct gpio_chip *chip,
MADERA_GP1_LVL_MASK, reg_val);
}
-static void madera_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
+static int madera_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct madera_gpio *madera_gpio = gpiochip_get_data(chip);
struct madera *madera = madera_gpio->madera;
unsigned int reg_offset = 2 * offset;
unsigned int reg_val = value ? MADERA_GP1_LVL : 0;
- int ret;
-
- ret = regmap_update_bits(madera->regmap,
- MADERA_GPIO1_CTRL_1 + reg_offset,
- MADERA_GP1_LVL_MASK, reg_val);
- /* set() doesn't return an error so log a warning */
- if (ret)
- dev_warn(madera->dev, "Failed to write to 0x%x (%d)\n",
- MADERA_GPIO1_CTRL_1 + reg_offset, ret);
+ return regmap_update_bits(madera->regmap,
+ MADERA_GPIO1_CTRL_1 + reg_offset,
+ MADERA_GP1_LVL_MASK, reg_val);
}
static const struct gpio_chip madera_gpio_chip = {
diff --git a/drivers/gpio/gpio-max3191x.c b/drivers/gpio/gpio-max3191x.c
index bbacc714632b..6e6504ab740a 100644
--- a/drivers/gpio/gpio-max3191x.c
+++ b/drivers/gpio/gpio-max3191x.c
@@ -103,19 +103,6 @@ static int max3191x_direction_input(struct gpio_chip *gpio, unsigned int offset)
return 0;
}
-static int max3191x_direction_output(struct gpio_chip *gpio,
- unsigned int offset, int value)
-{
- return -EINVAL;
-}
-
-static void max3191x_set(struct gpio_chip *gpio, unsigned int offset, int value)
-{ }
-
-static void max3191x_set_multiple(struct gpio_chip *gpio, unsigned long *mask,
- unsigned long *bits)
-{ }
-
static unsigned int max3191x_wordlen(struct max3191x_chip *max3191x)
{
return max3191x->mode == STATUS_BYTE_ENABLED ? 2 : 1;
@@ -309,23 +296,21 @@ static int max3191x_set_config(struct gpio_chip *gpio, unsigned int offset,
return 0;
}
-static void gpiod_set_array_single_value_cansleep(unsigned int ndescs,
- struct gpio_desc **desc,
- struct gpio_array *info,
+static void max3191x_gpiod_multi_set_single_value(struct gpio_descs *descs,
int value)
{
unsigned long *values;
- values = bitmap_alloc(ndescs, GFP_KERNEL);
+ values = bitmap_alloc(descs->ndescs, GFP_KERNEL);
if (!values)
return;
if (value)
- bitmap_fill(values, ndescs);
+ bitmap_fill(values, descs->ndescs);
else
- bitmap_zero(values, ndescs);
+ bitmap_zero(values, descs->ndescs);
- gpiod_set_array_value_cansleep(ndescs, desc, info, values);
+ gpiod_multi_set_value_cansleep(descs, values);
bitmap_free(values);
}
@@ -396,10 +381,8 @@ static int max3191x_probe(struct spi_device *spi)
max3191x->mode = device_property_read_bool(dev, "maxim,modesel-8bit")
? STATUS_BYTE_DISABLED : STATUS_BYTE_ENABLED;
if (max3191x->modesel_pins)
- gpiod_set_array_single_value_cansleep(
- max3191x->modesel_pins->ndescs,
- max3191x->modesel_pins->desc,
- max3191x->modesel_pins->info, max3191x->mode);
+ max3191x_gpiod_multi_set_single_value(max3191x->modesel_pins,
+ max3191x->mode);
max3191x->ignore_uv = device_property_read_bool(dev,
"maxim,ignore-undervoltage");
@@ -425,9 +408,6 @@ static int max3191x_probe(struct spi_device *spi)
max3191x->gpio.get_direction = max3191x_get_direction;
max3191x->gpio.direction_input = max3191x_direction_input;
- max3191x->gpio.direction_output = max3191x_direction_output;
- max3191x->gpio.set = max3191x_set;
- max3191x->gpio.set_multiple = max3191x_set_multiple;
max3191x->gpio.get = max3191x_get;
max3191x->gpio.get_multiple = max3191x_get_multiple;
max3191x->gpio.set_config = max3191x_set_config;
diff --git a/drivers/gpio/gpio-max730x.c b/drivers/gpio/gpio-max730x.c
index 701795b9d329..84c7c2dca822 100644
--- a/drivers/gpio/gpio-max730x.c
+++ b/drivers/gpio/gpio-max730x.c
@@ -143,18 +143,21 @@ static int max7301_get(struct gpio_chip *chip, unsigned offset)
return level;
}
-static void max7301_set(struct gpio_chip *chip, unsigned offset, int value)
+static int max7301_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct max7301 *ts = gpiochip_get_data(chip);
+ int ret;
/* First 4 pins are unused in the controller */
offset += 4;
mutex_lock(&ts->lock);
- __max7301_set(ts, offset, value);
+ ret = __max7301_set(ts, offset, value);
mutex_unlock(&ts->lock);
+
+ return ret;
}
int __max730x_probe(struct max7301 *ts)
@@ -165,7 +168,10 @@ int __max730x_probe(struct max7301 *ts)
pdata = dev_get_platdata(dev);
- mutex_init(&ts->lock);
+ ret = devm_mutex_init(ts->dev, &ts->lock);
+ if (ret)
+ return ret;
+
dev_set_drvdata(dev, ts);
/* Power up the chip and disable IRQ output */
@@ -206,17 +212,11 @@ int __max730x_probe(struct max7301 *ts)
int offset = (i - 1) * 4 + j;
ret = max7301_direction_input(&ts->chip, offset);
if (ret)
- goto exit_destroy;
+ return ret;
}
}
- ret = gpiochip_add_data(&ts->chip, ts);
- if (!ret)
- return ret;
-
-exit_destroy:
- mutex_destroy(&ts->lock);
- return ret;
+ return devm_gpiochip_add_data(ts->dev, &ts->chip, ts);
}
EXPORT_SYMBOL_GPL(__max730x_probe);
@@ -226,8 +226,6 @@ void __max730x_remove(struct device *dev)
/* Power down the chip and disable IRQ output */
ts->write(dev, 0x04, 0x00);
- gpiochip_remove(&ts->chip);
- mutex_destroy(&ts->lock);
}
EXPORT_SYMBOL_GPL(__max730x_remove);
diff --git a/drivers/gpio/gpio-max732x.c b/drivers/gpio/gpio-max732x.c
index 49d362907bc7..a61d670ceeda 100644
--- a/drivers/gpio/gpio-max732x.c
+++ b/drivers/gpio/gpio-max732x.c
@@ -225,16 +225,19 @@ out:
mutex_unlock(&chip->lock);
}
-static void max732x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
+static int max732x_gpio_set_value(struct gpio_chip *gc, unsigned int off,
+ int val)
{
unsigned base = off & ~0x7;
uint8_t mask = 1u << (off & 0x7);
max732x_gpio_set_mask(gc, base, mask, val << (off & 0x7));
+
+ return 0;
}
-static void max732x_gpio_set_multiple(struct gpio_chip *gc,
- unsigned long *mask, unsigned long *bits)
+static int max732x_gpio_set_multiple(struct gpio_chip *gc,
+ unsigned long *mask, unsigned long *bits)
{
unsigned mask_lo = mask[0] & 0xff;
unsigned mask_hi = (mask[0] >> 8) & 0xff;
@@ -243,6 +246,8 @@ static void max732x_gpio_set_multiple(struct gpio_chip *gc,
max732x_gpio_set_mask(gc, 0, mask_lo, bits[0] & 0xff);
if (mask_hi)
max732x_gpio_set_mask(gc, 8, mask_hi, (bits[0] >> 8) & 0xff);
+
+ return 0;
}
static int max732x_gpio_direction_input(struct gpio_chip *gc, unsigned off)
diff --git a/drivers/gpio/gpio-max7360.c b/drivers/gpio/gpio-max7360.c
new file mode 100644
index 000000000000..db92a43776a9
--- /dev/null
+++ b/drivers/gpio/gpio-max7360.c
@@ -0,0 +1,257 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2025 Bootlin
+ *
+ * Author: Kamel BOUHARA <kamel.bouhara@bootlin.com>
+ * Author: Mathieu Dubois-Briand <mathieu.dubois-briand@bootlin.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitmap.h>
+#include <linux/err.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio/regmap.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/max7360.h>
+#include <linux/minmax.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+
+#define MAX7360_GPIO_PORT 1
+#define MAX7360_GPIO_COL 2
+
+struct max7360_gpio_plat_data {
+ unsigned int function;
+};
+
+static struct max7360_gpio_plat_data max7360_gpio_port_plat = { .function = MAX7360_GPIO_PORT };
+static struct max7360_gpio_plat_data max7360_gpio_col_plat = { .function = MAX7360_GPIO_COL };
+
+static int max7360_get_available_gpos(struct device *dev, unsigned int *available_gpios)
+{
+ u32 columns;
+ int ret;
+
+ ret = device_property_read_u32(dev->parent, "keypad,num-columns", &columns);
+ if (ret) {
+ dev_err(dev, "Failed to read columns count\n");
+ return ret;
+ }
+
+ *available_gpios = min(MAX7360_MAX_GPO, MAX7360_MAX_KEY_COLS - columns);
+
+ return 0;
+}
+
+static int max7360_gpo_init_valid_mask(struct gpio_chip *gc,
+ unsigned long *valid_mask,
+ unsigned int ngpios)
+{
+ unsigned int available_gpios;
+ int ret;
+
+ ret = max7360_get_available_gpos(gc->parent, &available_gpios);
+ if (ret)
+ return ret;
+
+ bitmap_clear(valid_mask, 0, MAX7360_MAX_KEY_COLS - available_gpios);
+
+ return 0;
+}
+
+static int max7360_set_gpos_count(struct device *dev, struct regmap *regmap)
+{
+ /*
+ * MAX7360 COL0 to COL7 pins can be used either as keypad columns,
+ * general purpose output or a mix of both.
+ * By default, all pins are used as keypad, here we update this
+ * configuration to allow to use some of them as GPIOs.
+ */
+ unsigned int available_gpios;
+ unsigned int val;
+ int ret;
+
+ ret = max7360_get_available_gpos(dev, &available_gpios);
+ if (ret)
+ return ret;
+
+ /*
+ * Configure which GPIOs will be used for keypad.
+ * MAX7360_REG_DEBOUNCE contains configuration both for keypad debounce
+ * timings and gpos/keypad columns repartition. Only the later is
+ * modified here.
+ */
+ val = FIELD_PREP(MAX7360_PORTS, available_gpios);
+ ret = regmap_write_bits(regmap, MAX7360_REG_DEBOUNCE, MAX7360_PORTS, val);
+ if (ret)
+ dev_err(dev, "Failed to write max7360 columns/gpos configuration");
+
+ return ret;
+}
+
+static int max7360_gpio_reg_mask_xlate(struct gpio_regmap *gpio,
+ unsigned int base, unsigned int offset,
+ unsigned int *reg, unsigned int *mask)
+{
+ if (base == MAX7360_REG_PWMBASE) {
+ /*
+ * GPIO output is using PWM duty cycle registers: one register
+ * per line, with value being either 0 or 255.
+ */
+ *reg = base + offset;
+ *mask = GENMASK(7, 0);
+ } else {
+ *reg = base;
+ *mask = BIT(offset);
+ }
+
+ return 0;
+}
+
+static const struct regmap_irq max7360_regmap_irqs[MAX7360_MAX_GPIO] = {
+ REGMAP_IRQ_REG(0, 0, BIT(0)),
+ REGMAP_IRQ_REG(1, 0, BIT(1)),
+ REGMAP_IRQ_REG(2, 0, BIT(2)),
+ REGMAP_IRQ_REG(3, 0, BIT(3)),
+ REGMAP_IRQ_REG(4, 0, BIT(4)),
+ REGMAP_IRQ_REG(5, 0, BIT(5)),
+ REGMAP_IRQ_REG(6, 0, BIT(6)),
+ REGMAP_IRQ_REG(7, 0, BIT(7)),
+};
+
+static int max7360_handle_mask_sync(const int index,
+ const unsigned int mask_buf_def,
+ const unsigned int mask_buf,
+ void *const irq_drv_data)
+{
+ struct regmap *regmap = irq_drv_data;
+ int ret;
+
+ for (unsigned int i = 0; i < MAX7360_MAX_GPIO; i++) {
+ ret = regmap_assign_bits(regmap, MAX7360_REG_PWMCFG(i),
+ MAX7360_PORT_CFG_INTERRUPT_MASK, mask_buf & BIT(i));
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int max7360_gpio_probe(struct platform_device *pdev)
+{
+ const struct max7360_gpio_plat_data *plat_data;
+ struct gpio_regmap_config gpio_config = { };
+ struct regmap_irq_chip *irq_chip;
+ struct device *dev = &pdev->dev;
+ struct regmap *regmap;
+ unsigned int outconf;
+ int ret;
+
+ regmap = dev_get_regmap(dev->parent, NULL);
+ if (!regmap)
+ return dev_err_probe(dev, -ENODEV, "could not get parent regmap\n");
+
+ plat_data = device_get_match_data(dev);
+ if (plat_data->function == MAX7360_GPIO_PORT) {
+ if (device_property_read_bool(dev, "interrupt-controller")) {
+ /*
+ * Port GPIOs with interrupt-controller property: add IRQ
+ * controller.
+ */
+ gpio_config.regmap_irq_flags = IRQF_ONESHOT | IRQF_SHARED;
+ gpio_config.regmap_irq_line =
+ fwnode_irq_get_byname(dev_fwnode(dev->parent), "inti");
+ if (gpio_config.regmap_irq_line < 0)
+ return dev_err_probe(dev, gpio_config.regmap_irq_line,
+ "Failed to get IRQ\n");
+
+ /* Create custom IRQ configuration. */
+ irq_chip = devm_kzalloc(dev, sizeof(*irq_chip), GFP_KERNEL);
+ gpio_config.regmap_irq_chip = irq_chip;
+ if (!irq_chip)
+ return -ENOMEM;
+
+ irq_chip->name = dev_name(dev);
+ irq_chip->status_base = MAX7360_REG_GPIOIN;
+ irq_chip->status_is_level = true;
+ irq_chip->num_regs = 1;
+ irq_chip->num_irqs = MAX7360_MAX_GPIO;
+ irq_chip->irqs = max7360_regmap_irqs;
+ irq_chip->handle_mask_sync = max7360_handle_mask_sync;
+ irq_chip->irq_drv_data = regmap;
+
+ for (unsigned int i = 0; i < MAX7360_MAX_GPIO; i++) {
+ ret = regmap_write_bits(regmap, MAX7360_REG_PWMCFG(i),
+ MAX7360_PORT_CFG_INTERRUPT_EDGES,
+ MAX7360_PORT_CFG_INTERRUPT_EDGES);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to enable interrupts\n");
+ }
+ }
+
+ /*
+ * Port GPIOs: set output mode configuration (constant-current or not).
+ * This property is optional.
+ */
+ ret = device_property_read_u32(dev, "maxim,constant-current-disable", &outconf);
+ if (!ret) {
+ ret = regmap_write(regmap, MAX7360_REG_GPIOOUTM, outconf);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to set constant-current configuration\n");
+ }
+ }
+
+ /* Add gpio device. */
+ gpio_config.parent = dev;
+ gpio_config.regmap = regmap;
+ if (plat_data->function == MAX7360_GPIO_PORT) {
+ gpio_config.ngpio = MAX7360_MAX_GPIO;
+ gpio_config.reg_dat_base = GPIO_REGMAP_ADDR(MAX7360_REG_GPIOIN);
+ gpio_config.reg_set_base = GPIO_REGMAP_ADDR(MAX7360_REG_PWMBASE);
+ gpio_config.reg_dir_out_base = GPIO_REGMAP_ADDR(MAX7360_REG_GPIOCTRL);
+ gpio_config.ngpio_per_reg = MAX7360_MAX_GPIO;
+ gpio_config.reg_mask_xlate = max7360_gpio_reg_mask_xlate;
+ } else {
+ ret = max7360_set_gpos_count(dev, regmap);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to set GPOS pin count\n");
+
+ gpio_config.reg_set_base = GPIO_REGMAP_ADDR(MAX7360_REG_PORTS);
+ gpio_config.ngpio = MAX7360_MAX_KEY_COLS;
+ gpio_config.init_valid_mask = max7360_gpo_init_valid_mask;
+ }
+
+ return PTR_ERR_OR_ZERO(devm_gpio_regmap_register(dev, &gpio_config));
+}
+
+static const struct of_device_id max7360_gpio_of_match[] = {
+ {
+ .compatible = "maxim,max7360-gpo",
+ .data = &max7360_gpio_col_plat
+ }, {
+ .compatible = "maxim,max7360-gpio",
+ .data = &max7360_gpio_port_plat
+ }, {
+ }
+};
+MODULE_DEVICE_TABLE(of, max7360_gpio_of_match);
+
+static struct platform_driver max7360_gpio_driver = {
+ .driver = {
+ .name = "max7360-gpio",
+ .of_match_table = max7360_gpio_of_match,
+ },
+ .probe = max7360_gpio_probe,
+};
+module_platform_driver(max7360_gpio_driver);
+
+MODULE_DESCRIPTION("MAX7360 GPIO driver");
+MODULE_AUTHOR("Kamel BOUHARA <kamel.bouhara@bootlin.com>");
+MODULE_AUTHOR("Mathieu Dubois-Briand <mathieu.dubois-briand@bootlin.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-max77620.c b/drivers/gpio/gpio-max77620.c
index 8c2a5609161f..02eca400b307 100644
--- a/drivers/gpio/gpio-max77620.c
+++ b/drivers/gpio/gpio-max77620.c
@@ -223,20 +223,17 @@ static int max77620_gpio_set_debounce(struct max77620_gpio *mgpio,
return ret;
}
-static void max77620_gpio_set(struct gpio_chip *gc, unsigned int offset,
- int value)
+static int max77620_gpio_set(struct gpio_chip *gc, unsigned int offset,
+ int value)
{
struct max77620_gpio *mgpio = gpiochip_get_data(gc);
u8 val;
- int ret;
val = (value) ? MAX77620_CNFG_GPIO_OUTPUT_VAL_HIGH :
MAX77620_CNFG_GPIO_OUTPUT_VAL_LOW;
- ret = regmap_update_bits(mgpio->rmap, GPIO_REG_ADDR(offset),
- MAX77620_CNFG_GPIO_OUTPUT_VAL_MASK, val);
- if (ret < 0)
- dev_err(mgpio->dev, "CNFG_GPIO_OUT update failed: %d\n", ret);
+ return regmap_update_bits(mgpio->rmap, GPIO_REG_ADDR(offset),
+ MAX77620_CNFG_GPIO_OUTPUT_VAL_MASK, val);
}
static int max77620_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
diff --git a/drivers/gpio/gpio-max77650.c b/drivers/gpio/gpio-max77650.c
index 3075f2513c6f..4540da4c1418 100644
--- a/drivers/gpio/gpio-max77650.c
+++ b/drivers/gpio/gpio-max77650.c
@@ -62,18 +62,16 @@ static int max77650_gpio_direction_output(struct gpio_chip *gc,
MAX77650_REG_CNFG_GPIO, mask, regval);
}
-static void max77650_gpio_set_value(struct gpio_chip *gc,
- unsigned int offset, int value)
+static int max77650_gpio_set_value(struct gpio_chip *gc,
+ unsigned int offset, int value)
{
struct max77650_gpio_chip *chip = gpiochip_get_data(gc);
- int rv, regval;
+ int regval;
regval = value ? MAX77650_GPIO_OUT_HIGH : MAX77650_GPIO_OUT_LOW;
- rv = regmap_update_bits(chip->map, MAX77650_REG_CNFG_GPIO,
- MAX77650_GPIO_OUTVAL_MASK, regval);
- if (rv)
- dev_err(gc->parent, "cannot set GPIO value: %d\n", rv);
+ return regmap_update_bits(chip->map, MAX77650_REG_CNFG_GPIO,
+ MAX77650_GPIO_OUTVAL_MASK, regval);
}
static int max77650_gpio_get_value(struct gpio_chip *gc,
diff --git a/drivers/gpio/gpio-max77759.c b/drivers/gpio/gpio-max77759.c
new file mode 100644
index 000000000000..5e48eb03e7b3
--- /dev/null
+++ b/drivers/gpio/gpio-max77759.c
@@ -0,0 +1,530 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright 2020 Google Inc
+// Copyright 2025 Linaro Ltd.
+//
+// GPIO driver for Maxim MAX77759
+
+#include <linux/dev_printk.h>
+#include <linux/device.h>
+#include <linux/device/driver.h>
+#include <linux/gpio/driver.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqreturn.h>
+#include <linux/lockdep.h>
+#include <linux/mfd/max77759.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/overflow.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/seq_file.h>
+
+#define MAX77759_N_GPIOS ARRAY_SIZE(max77759_gpio_line_names)
+static const char * const max77759_gpio_line_names[] = { "GPIO5", "GPIO6" };
+
+struct max77759_gpio_chip {
+ struct regmap *map;
+ struct max77759 *max77759;
+ struct gpio_chip gc;
+ struct mutex maxq_lock; /* protect MaxQ r/m/w operations */
+
+ struct mutex irq_lock; /* protect irq bus */
+ int irq_mask;
+ int irq_mask_changed;
+ int irq_trig;
+ int irq_trig_changed;
+};
+
+#define MAX77759_GPIOx_TRIGGER(offs, val) (((val) & 1) << (offs))
+#define MAX77759_GPIOx_TRIGGER_MASK(offs) MAX77759_GPIOx_TRIGGER(offs, ~0)
+enum max77759_trigger_gpio_type {
+ MAX77759_GPIO_TRIGGER_RISING = 0,
+ MAX77759_GPIO_TRIGGER_FALLING = 1
+};
+
+#define MAX77759_GPIOx_DIR(offs, dir) (((dir) & 1) << (2 + (3 * (offs))))
+#define MAX77759_GPIOx_DIR_MASK(offs) MAX77759_GPIOx_DIR(offs, ~0)
+enum max77759_control_gpio_dir {
+ MAX77759_GPIO_DIR_IN = 0,
+ MAX77759_GPIO_DIR_OUT = 1
+};
+
+#define MAX77759_GPIOx_OUTVAL(offs, val) (((val) & 1) << (3 + (3 * (offs))))
+#define MAX77759_GPIOx_OUTVAL_MASK(offs) MAX77759_GPIOx_OUTVAL(offs, ~0)
+
+#define MAX77759_GPIOx_INVAL_MASK(offs) (BIT(4) << (3 * (offs)))
+
+static int max77759_gpio_maxq_gpio_trigger_read(struct max77759_gpio_chip *chip)
+{
+ DEFINE_FLEX(struct max77759_maxq_command, cmd, cmd, length, 1);
+ DEFINE_FLEX(struct max77759_maxq_response, rsp, rsp, length, 2);
+ int ret;
+
+ cmd->cmd[0] = MAX77759_MAXQ_OPCODE_GPIO_TRIGGER_READ;
+
+ ret = max77759_maxq_command(chip->max77759, cmd, rsp);
+ if (ret < 0)
+ return ret;
+
+ return rsp->rsp[1];
+}
+
+static int max77759_gpio_maxq_gpio_trigger_write(struct max77759_gpio_chip *chip,
+ u8 trigger)
+{
+ DEFINE_FLEX(struct max77759_maxq_command, cmd, cmd, length, 2);
+
+ cmd->cmd[0] = MAX77759_MAXQ_OPCODE_GPIO_TRIGGER_WRITE;
+ cmd->cmd[1] = trigger;
+
+ return max77759_maxq_command(chip->max77759, cmd, NULL);
+}
+
+static int max77759_gpio_maxq_gpio_control_read(struct max77759_gpio_chip *chip)
+{
+ DEFINE_FLEX(struct max77759_maxq_command, cmd, cmd, length, 1);
+ DEFINE_FLEX(struct max77759_maxq_response, rsp, rsp, length, 2);
+ int ret;
+
+ cmd->cmd[0] = MAX77759_MAXQ_OPCODE_GPIO_CONTROL_READ;
+
+ ret = max77759_maxq_command(chip->max77759, cmd, rsp);
+ if (ret < 0)
+ return ret;
+
+ return rsp->rsp[1];
+}
+
+static int max77759_gpio_maxq_gpio_control_write(struct max77759_gpio_chip *chip,
+ u8 ctrl)
+{
+ DEFINE_FLEX(struct max77759_maxq_command, cmd, cmd, length, 2);
+
+ cmd->cmd[0] = MAX77759_MAXQ_OPCODE_GPIO_CONTROL_WRITE;
+ cmd->cmd[1] = ctrl;
+
+ return max77759_maxq_command(chip->max77759, cmd, NULL);
+}
+
+static int
+max77759_gpio_direction_from_control(int ctrl, unsigned int offset)
+{
+ enum max77759_control_gpio_dir dir;
+
+ dir = !!(ctrl & MAX77759_GPIOx_DIR_MASK(offset));
+ return ((dir == MAX77759_GPIO_DIR_OUT)
+ ? GPIO_LINE_DIRECTION_OUT
+ : GPIO_LINE_DIRECTION_IN);
+}
+
+static int max77759_gpio_get_direction(struct gpio_chip *gc,
+ unsigned int offset)
+{
+ struct max77759_gpio_chip *chip = gpiochip_get_data(gc);
+ int ctrl;
+
+ ctrl = max77759_gpio_maxq_gpio_control_read(chip);
+ if (ctrl < 0)
+ return ctrl;
+
+ return max77759_gpio_direction_from_control(ctrl, offset);
+}
+
+static int max77759_gpio_direction_helper(struct gpio_chip *gc,
+ unsigned int offset,
+ enum max77759_control_gpio_dir dir,
+ int value)
+{
+ struct max77759_gpio_chip *chip = gpiochip_get_data(gc);
+ int ctrl, new_ctrl;
+
+ guard(mutex)(&chip->maxq_lock);
+
+ ctrl = max77759_gpio_maxq_gpio_control_read(chip);
+ if (ctrl < 0)
+ return ctrl;
+
+ new_ctrl = ctrl & ~MAX77759_GPIOx_DIR_MASK(offset);
+ new_ctrl |= MAX77759_GPIOx_DIR(offset, dir);
+
+ if (dir == MAX77759_GPIO_DIR_OUT) {
+ new_ctrl &= ~MAX77759_GPIOx_OUTVAL_MASK(offset);
+ new_ctrl |= MAX77759_GPIOx_OUTVAL(offset, value);
+ }
+
+ if (new_ctrl == ctrl)
+ return 0;
+
+ return max77759_gpio_maxq_gpio_control_write(chip, new_ctrl);
+}
+
+static int max77759_gpio_direction_input(struct gpio_chip *gc,
+ unsigned int offset)
+{
+ return max77759_gpio_direction_helper(gc, offset,
+ MAX77759_GPIO_DIR_IN, -1);
+}
+
+static int max77759_gpio_direction_output(struct gpio_chip *gc,
+ unsigned int offset, int value)
+{
+ return max77759_gpio_direction_helper(gc, offset,
+ MAX77759_GPIO_DIR_OUT, value);
+}
+
+static int max77759_gpio_get_value(struct gpio_chip *gc, unsigned int offset)
+{
+ struct max77759_gpio_chip *chip = gpiochip_get_data(gc);
+ int ctrl, mask;
+
+ ctrl = max77759_gpio_maxq_gpio_control_read(chip);
+ if (ctrl < 0)
+ return ctrl;
+
+ /*
+ * The input status bit doesn't reflect the pin state when the GPIO is
+ * configured as an output. Check the direction, and inspect the input
+ * or output bit accordingly.
+ */
+ mask = ((max77759_gpio_direction_from_control(ctrl, offset)
+ == GPIO_LINE_DIRECTION_IN)
+ ? MAX77759_GPIOx_INVAL_MASK(offset)
+ : MAX77759_GPIOx_OUTVAL_MASK(offset));
+
+ return !!(ctrl & mask);
+}
+
+static int max77759_gpio_set_value(struct gpio_chip *gc,
+ unsigned int offset, int value)
+{
+ struct max77759_gpio_chip *chip = gpiochip_get_data(gc);
+ int ctrl, new_ctrl;
+
+ guard(mutex)(&chip->maxq_lock);
+
+ ctrl = max77759_gpio_maxq_gpio_control_read(chip);
+ if (ctrl < 0)
+ return ctrl;
+
+ new_ctrl = ctrl & ~MAX77759_GPIOx_OUTVAL_MASK(offset);
+ new_ctrl |= MAX77759_GPIOx_OUTVAL(offset, value);
+
+ if (new_ctrl == ctrl)
+ return 0;
+
+ return max77759_gpio_maxq_gpio_control_write(chip, new_ctrl);
+}
+
+static void max77759_gpio_irq_mask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct max77759_gpio_chip *chip = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+ chip->irq_mask &= ~MAX77759_MAXQ_REG_UIC_INT1_GPIOxI_MASK(hwirq);
+ chip->irq_mask |= MAX77759_MAXQ_REG_UIC_INT1_GPIOxI(hwirq, 1);
+ chip->irq_mask_changed |= MAX77759_MAXQ_REG_UIC_INT1_GPIOxI(hwirq, 1);
+
+ gpiochip_disable_irq(gc, hwirq);
+}
+
+static void max77759_gpio_irq_unmask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct max77759_gpio_chip *chip = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+ gpiochip_enable_irq(gc, hwirq);
+
+ chip->irq_mask &= ~MAX77759_MAXQ_REG_UIC_INT1_GPIOxI_MASK(hwirq);
+ chip->irq_mask |= MAX77759_MAXQ_REG_UIC_INT1_GPIOxI(hwirq, 0);
+ chip->irq_mask_changed |= MAX77759_MAXQ_REG_UIC_INT1_GPIOxI(hwirq, 1);
+}
+
+static int max77759_gpio_set_irq_type(struct irq_data *d, unsigned int type)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct max77759_gpio_chip *chip = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+ chip->irq_trig &= ~MAX77759_GPIOx_TRIGGER_MASK(hwirq);
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ chip->irq_trig |= MAX77759_GPIOx_TRIGGER(hwirq,
+ MAX77759_GPIO_TRIGGER_RISING);
+ break;
+
+ case IRQ_TYPE_EDGE_FALLING:
+ chip->irq_trig |= MAX77759_GPIOx_TRIGGER(hwirq,
+ MAX77759_GPIO_TRIGGER_FALLING);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ chip->irq_trig_changed |= MAX77759_GPIOx_TRIGGER(hwirq, 1);
+
+ return 0;
+}
+
+static void max77759_gpio_bus_lock(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct max77759_gpio_chip *chip = gpiochip_get_data(gc);
+
+ mutex_lock(&chip->irq_lock);
+}
+
+static int max77759_gpio_bus_sync_unlock_helper(struct gpio_chip *gc,
+ struct max77759_gpio_chip *chip)
+ __must_hold(&chip->maxq_lock)
+{
+ int ctrl, trigger, new_trigger, new_ctrl;
+ unsigned long irq_trig_changed;
+ int offset;
+ int ret;
+
+ lockdep_assert_held(&chip->maxq_lock);
+
+ ctrl = max77759_gpio_maxq_gpio_control_read(chip);
+ trigger = max77759_gpio_maxq_gpio_trigger_read(chip);
+ if (ctrl < 0 || trigger < 0) {
+ dev_err(gc->parent, "failed to read current state: %d / %d\n",
+ ctrl, trigger);
+ return (ctrl < 0) ? ctrl : trigger;
+ }
+
+ new_trigger = trigger & ~chip->irq_trig_changed;
+ new_trigger |= (chip->irq_trig & chip->irq_trig_changed);
+
+ /* change GPIO direction if required */
+ new_ctrl = ctrl;
+ irq_trig_changed = chip->irq_trig_changed;
+ for_each_set_bit(offset, &irq_trig_changed, MAX77759_N_GPIOS) {
+ new_ctrl &= ~MAX77759_GPIOx_DIR_MASK(offset);
+ new_ctrl |= MAX77759_GPIOx_DIR(offset, MAX77759_GPIO_DIR_IN);
+ }
+
+ if (new_trigger != trigger) {
+ ret = max77759_gpio_maxq_gpio_trigger_write(chip, new_trigger);
+ if (ret) {
+ dev_err(gc->parent,
+ "failed to write new trigger: %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (new_ctrl != ctrl) {
+ ret = max77759_gpio_maxq_gpio_control_write(chip, new_ctrl);
+ if (ret) {
+ dev_err(gc->parent,
+ "failed to write new control: %d\n", ret);
+ return ret;
+ }
+ }
+
+ chip->irq_trig_changed = 0;
+
+ return 0;
+}
+
+static void max77759_gpio_bus_sync_unlock(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct max77759_gpio_chip *chip = gpiochip_get_data(gc);
+ int ret;
+
+ scoped_guard(mutex, &chip->maxq_lock) {
+ ret = max77759_gpio_bus_sync_unlock_helper(gc, chip);
+ if (ret)
+ goto out_unlock;
+ }
+
+ ret = regmap_update_bits(chip->map,
+ MAX77759_MAXQ_REG_UIC_INT1_M,
+ chip->irq_mask_changed, chip->irq_mask);
+ if (ret) {
+ dev_err(gc->parent,
+ "failed to update UIC_INT1 irq mask: %d\n", ret);
+ goto out_unlock;
+ }
+
+ chip->irq_mask_changed = 0;
+
+out_unlock:
+ mutex_unlock(&chip->irq_lock);
+}
+
+static void max77759_gpio_irq_print_chip(struct irq_data *d, struct seq_file *p)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+
+ seq_puts(p, dev_name(gc->parent));
+}
+
+static const struct irq_chip max77759_gpio_irq_chip = {
+ .irq_mask = max77759_gpio_irq_mask,
+ .irq_unmask = max77759_gpio_irq_unmask,
+ .irq_set_type = max77759_gpio_set_irq_type,
+ .irq_bus_lock = max77759_gpio_bus_lock,
+ .irq_bus_sync_unlock = max77759_gpio_bus_sync_unlock,
+ .irq_print_chip = max77759_gpio_irq_print_chip,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
+static irqreturn_t max77759_gpio_irqhandler(int irq, void *data)
+{
+ struct max77759_gpio_chip *chip = data;
+ struct gpio_chip *gc = &chip->gc;
+ bool handled = false;
+
+ /* iterate until no interrupt is pending */
+ while (true) {
+ unsigned int uic_int1;
+ int ret;
+ unsigned long pending;
+ int offset;
+
+ ret = regmap_read(chip->map, MAX77759_MAXQ_REG_UIC_INT1,
+ &uic_int1);
+ if (ret < 0) {
+ dev_err_ratelimited(gc->parent,
+ "failed to read IRQ status: %d\n",
+ ret);
+ /*
+ * If !handled, we have looped not even once, which
+ * means we should return IRQ_NONE in that case (and
+ * of course IRQ_HANDLED otherwise).
+ */
+ return IRQ_RETVAL(handled);
+ }
+
+ pending = uic_int1;
+ pending &= (MAX77759_MAXQ_REG_UIC_INT1_GPIO6I
+ | MAX77759_MAXQ_REG_UIC_INT1_GPIO5I);
+ if (!pending)
+ break;
+
+ for_each_set_bit(offset, &pending, MAX77759_N_GPIOS) {
+ /*
+ * ACK interrupt by writing 1 to bit 'offset', all
+ * others need to be written as 0. This needs to be
+ * done unconditionally hence regmap_set_bits() is
+ * inappropriate here.
+ */
+ regmap_write(chip->map, MAX77759_MAXQ_REG_UIC_INT1,
+ BIT(offset));
+
+ handle_nested_irq(irq_find_mapping(gc->irq.domain,
+ offset));
+
+ handled = true;
+ }
+ }
+
+ return IRQ_RETVAL(handled);
+}
+
+static int max77759_gpio_probe(struct platform_device *pdev)
+{
+ struct max77759_gpio_chip *chip;
+ int irq;
+ struct gpio_irq_chip *girq;
+ int ret;
+ unsigned long irq_flags;
+ struct irq_data *irqd;
+
+ chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->map = dev_get_regmap(pdev->dev.parent, "maxq");
+ if (!chip->map)
+ return dev_err_probe(&pdev->dev, -ENODEV, "Missing regmap\n");
+
+ irq = platform_get_irq_byname(pdev, "GPI");
+ if (irq < 0)
+ return dev_err_probe(&pdev->dev, irq, "Failed to get IRQ\n");
+
+ chip->max77759 = dev_get_drvdata(pdev->dev.parent);
+ ret = devm_mutex_init(&pdev->dev, &chip->maxq_lock);
+ if (ret)
+ return ret;
+ ret = devm_mutex_init(&pdev->dev, &chip->irq_lock);
+ if (ret)
+ return ret;
+
+ chip->gc.base = -1;
+ chip->gc.label = dev_name(&pdev->dev);
+ chip->gc.parent = &pdev->dev;
+ chip->gc.can_sleep = true;
+
+ chip->gc.names = max77759_gpio_line_names;
+ chip->gc.ngpio = MAX77759_N_GPIOS;
+ chip->gc.get_direction = max77759_gpio_get_direction;
+ chip->gc.direction_input = max77759_gpio_direction_input;
+ chip->gc.direction_output = max77759_gpio_direction_output;
+ chip->gc.get = max77759_gpio_get_value;
+ chip->gc.set = max77759_gpio_set_value;
+
+ girq = &chip->gc.irq;
+ gpio_irq_chip_set_chip(girq, &max77759_gpio_irq_chip);
+ /* This will let us handle the parent IRQ in the driver */
+ girq->parent_handler = NULL;
+ girq->num_parents = 0;
+ girq->parents = NULL;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_simple_irq;
+ girq->threaded = true;
+
+ ret = devm_gpiochip_add_data(&pdev->dev, &chip->gc, chip);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to add GPIO chip\n");
+
+ irq_flags = IRQF_ONESHOT | IRQF_SHARED;
+ irqd = irq_get_irq_data(irq);
+ if (irqd)
+ irq_flags |= irqd_get_trigger_type(irqd);
+
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ max77759_gpio_irqhandler, irq_flags,
+ dev_name(&pdev->dev), chip);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to request IRQ\n");
+
+ return ret;
+}
+
+static const struct of_device_id max77759_gpio_of_id[] = {
+ { .compatible = "maxim,max77759-gpio", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, max77759_gpio_of_id);
+
+static const struct platform_device_id max77759_gpio_platform_id[] = {
+ { "max77759-gpio", },
+ { }
+};
+MODULE_DEVICE_TABLE(platform, max77759_gpio_platform_id);
+
+static struct platform_driver max77759_gpio_driver = {
+ .driver = {
+ .name = "max77759-gpio",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ .of_match_table = max77759_gpio_of_id,
+ },
+ .probe = max77759_gpio_probe,
+ .id_table = max77759_gpio_platform_id,
+};
+
+module_platform_driver(max77759_gpio_driver);
+
+MODULE_AUTHOR("André Draszik <andre.draszik@linaro.org>");
+MODULE_DESCRIPTION("GPIO driver for Maxim MAX77759");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-mb86s7x.c b/drivers/gpio/gpio-mb86s7x.c
index ccbb63c21d6f..581a71872eab 100644
--- a/drivers/gpio/gpio-mb86s7x.c
+++ b/drivers/gpio/gpio-mb86s7x.c
@@ -119,7 +119,7 @@ static int mb86s70_gpio_get(struct gpio_chip *gc, unsigned gpio)
return !!(readl(gchip->base + PDR(gpio)) & OFFSET(gpio));
}
-static void mb86s70_gpio_set(struct gpio_chip *gc, unsigned gpio, int value)
+static int mb86s70_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value)
{
struct mb86s70_gpio_chip *gchip = gpiochip_get_data(gc);
unsigned long flags;
@@ -135,6 +135,8 @@ static void mb86s70_gpio_set(struct gpio_chip *gc, unsigned gpio, int value)
writel(val, gchip->base + PDR(gpio));
spin_unlock_irqrestore(&gchip->lock, flags);
+
+ return 0;
}
static int mb86s70_gpio_to_irq(struct gpio_chip *gc, unsigned int offset)
@@ -145,8 +147,6 @@ static int mb86s70_gpio_to_irq(struct gpio_chip *gc, unsigned int offset)
irq = platform_get_irq(to_platform_device(gc->parent), index);
if (irq < 0)
return irq;
- if (irq == 0)
- break;
if (irq_get_irq_data(irq)->hwirq == offset)
return irq;
}
@@ -227,7 +227,7 @@ static struct platform_driver mb86s70_gpio_driver = {
.acpi_match_table = ACPI_PTR(mb86s70_gpio_acpi_ids),
},
.probe = mb86s70_gpio_probe,
- .remove_new = mb86s70_gpio_remove,
+ .remove = mb86s70_gpio_remove,
};
module_platform_driver(mb86s70_gpio_driver);
diff --git a/drivers/gpio/gpio-mc33880.c b/drivers/gpio/gpio-mc33880.c
index 5fb357d7b78a..9a40e9579e95 100644
--- a/drivers/gpio/gpio-mc33880.c
+++ b/drivers/gpio/gpio-mc33880.c
@@ -57,15 +57,18 @@ static int __mc33880_set(struct mc33880 *mc, unsigned offset, int value)
}
-static void mc33880_set(struct gpio_chip *chip, unsigned offset, int value)
+static int mc33880_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct mc33880 *mc = gpiochip_get_data(chip);
+ int ret;
mutex_lock(&mc->lock);
- __mc33880_set(mc, offset, value);
+ ret = __mc33880_set(mc, offset, value);
mutex_unlock(&mc->lock);
+
+ return ret;
}
static int mc33880_probe(struct spi_device *spi)
diff --git a/drivers/gpio/gpio-menz127.c b/drivers/gpio/gpio-menz127.c
index a035a9bcb57c..da2bf9381cc4 100644
--- a/drivers/gpio/gpio-menz127.c
+++ b/drivers/gpio/gpio-menz127.c
@@ -12,6 +12,7 @@
#include <linux/mcb.h>
#include <linux/bitops.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#define MEN_Z127_CTRL 0x00
#define MEN_Z127_PSR 0x04
@@ -30,7 +31,7 @@
(db <= MEN_Z127_DB_MAX_US))
struct men_z127_gpio {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
void __iomem *reg_base;
struct resource *mem;
};
@@ -64,7 +65,7 @@ static int men_z127_debounce(struct gpio_chip *gc, unsigned gpio,
debounce /= 50;
}
- raw_spin_lock(&gc->bgpio_lock);
+ guard(gpio_generic_lock)(&priv->chip);
db_en = readl(priv->reg_base + MEN_Z127_DBER);
@@ -79,8 +80,6 @@ static int men_z127_debounce(struct gpio_chip *gc, unsigned gpio,
writel(db_en, priv->reg_base + MEN_Z127_DBER);
writel(db_cnt, priv->reg_base + GPIO_TO_DBCNT_REG(gpio));
- raw_spin_unlock(&gc->bgpio_lock);
-
return 0;
}
@@ -91,7 +90,8 @@ static int men_z127_set_single_ended(struct gpio_chip *gc,
struct men_z127_gpio *priv = gpiochip_get_data(gc);
u32 od_en;
- raw_spin_lock(&gc->bgpio_lock);
+ guard(gpio_generic_lock)(&priv->chip);
+
od_en = readl(priv->reg_base + MEN_Z127_ODER);
if (param == PIN_CONFIG_DRIVE_OPEN_DRAIN)
@@ -101,7 +101,6 @@ static int men_z127_set_single_ended(struct gpio_chip *gc,
od_en &= ~BIT(offset);
writel(od_en, priv->reg_base + MEN_Z127_ODER);
- raw_spin_unlock(&gc->bgpio_lock);
return 0;
}
@@ -127,9 +126,17 @@ static int men_z127_set_config(struct gpio_chip *gc, unsigned offset,
return -ENOTSUPP;
}
+static void men_z127_release_mem(void *data)
+{
+ struct resource *res = data;
+
+ mcb_release_mem(res);
+}
+
static int men_z127_probe(struct mcb_device *mdev,
const struct mcb_device_id *id)
{
+ struct gpio_generic_chip_config config;
struct men_z127_gpio *men_z127_gpio;
struct device *dev = &mdev->dev;
int ret;
@@ -140,55 +147,42 @@ static int men_z127_probe(struct mcb_device *mdev,
return -ENOMEM;
men_z127_gpio->mem = mcb_request_mem(mdev, dev_name(dev));
- if (IS_ERR(men_z127_gpio->mem)) {
- dev_err(dev, "failed to request device memory");
- return PTR_ERR(men_z127_gpio->mem);
- }
+ if (IS_ERR(men_z127_gpio->mem))
+ return dev_err_probe(dev, PTR_ERR(men_z127_gpio->mem),
+ "failed to request device memory");
- men_z127_gpio->reg_base = ioremap(men_z127_gpio->mem->start,
- resource_size(men_z127_gpio->mem));
- if (men_z127_gpio->reg_base == NULL) {
- ret = -ENXIO;
- goto err_release;
- }
-
- mcb_set_drvdata(mdev, men_z127_gpio);
-
- ret = bgpio_init(&men_z127_gpio->gc, &mdev->dev, 4,
- men_z127_gpio->reg_base + MEN_Z127_PSR,
- men_z127_gpio->reg_base + MEN_Z127_CTRL,
- NULL,
- men_z127_gpio->reg_base + MEN_Z127_GPIODR,
- NULL, 0);
+ ret = devm_add_action_or_reset(dev, men_z127_release_mem,
+ men_z127_gpio->mem);
if (ret)
- goto err_unmap;
+ return ret;
- men_z127_gpio->gc.set_config = men_z127_set_config;
+ men_z127_gpio->reg_base = devm_ioremap(dev, men_z127_gpio->mem->start,
+ resource_size(men_z127_gpio->mem));
+ if (men_z127_gpio->reg_base == NULL)
+ return -ENXIO;
- ret = gpiochip_add_data(&men_z127_gpio->gc, men_z127_gpio);
- if (ret) {
- dev_err(dev, "failed to register MEN 16Z127 GPIO controller");
- goto err_unmap;
- }
+ mcb_set_drvdata(mdev, men_z127_gpio);
- dev_info(dev, "MEN 16Z127 GPIO driver registered");
+ config = (struct gpio_generic_chip_config) {
+ .dev = &mdev->dev,
+ .sz = 4,
+ .dat = men_z127_gpio->reg_base + MEN_Z127_PSR,
+ .set = men_z127_gpio->reg_base + MEN_Z127_CTRL,
+ .dirout = men_z127_gpio->reg_base + MEN_Z127_GPIODR,
+ };
- return 0;
+ ret = gpio_generic_chip_init(&men_z127_gpio->chip, &config);
+ if (ret)
+ return ret;
-err_unmap:
- iounmap(men_z127_gpio->reg_base);
-err_release:
- mcb_release_mem(men_z127_gpio->mem);
- return ret;
-}
+ men_z127_gpio->chip.gc.set_config = men_z127_set_config;
-static void men_z127_remove(struct mcb_device *mdev)
-{
- struct men_z127_gpio *men_z127_gpio = mcb_get_drvdata(mdev);
+ ret = devm_gpiochip_add_data(dev, &men_z127_gpio->chip.gc, men_z127_gpio);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to register MEN 16Z127 GPIO controller");
- gpiochip_remove(&men_z127_gpio->gc);
- iounmap(men_z127_gpio->reg_base);
- mcb_release_mem(men_z127_gpio->mem);
+ return 0;
}
static const struct mcb_device_id men_z127_ids[] = {
@@ -202,7 +196,6 @@ static struct mcb_driver men_z127_driver = {
.name = "z127-gpio",
},
.probe = men_z127_probe,
- .remove = men_z127_remove,
.id_table = men_z127_ids,
};
module_mcb_driver(men_z127_driver);
@@ -211,4 +204,4 @@ MODULE_AUTHOR("Andreas Werner <andreas.werner@men.de>");
MODULE_DESCRIPTION("MEN 16z127 GPIO Controller");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("mcb:16z127");
-MODULE_IMPORT_NS(MCB);
+MODULE_IMPORT_NS("MCB");
diff --git a/drivers/gpio/gpio-merrifield.c b/drivers/gpio/gpio-merrifield.c
index 421d7e3a6c66..4335a5d8e4f6 100644
--- a/drivers/gpio/gpio-merrifield.c
+++ b/drivers/gpio/gpio-merrifield.c
@@ -78,24 +78,25 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id
if (retval)
return retval;
- retval = pcim_iomap_regions(pdev, BIT(1) | BIT(0), pci_name(pdev));
- if (retval)
- return dev_err_probe(dev, retval, "I/O memory mapping error\n");
-
- base = pcim_iomap_table(pdev)[1];
+ base = pcim_iomap_region(pdev, 1, pci_name(pdev));
+ if (IS_ERR(base))
+ return dev_err_probe(dev, PTR_ERR(base), "I/O memory mapping error\n");
irq_base = readl(base + 0 * sizeof(u32));
gpio_base = readl(base + 1 * sizeof(u32));
/* Release the IO mapping, since we already get the info from BAR1 */
- pcim_iounmap_regions(pdev, BIT(1));
+ pcim_iounmap_region(pdev, 1);
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->dev = dev;
- priv->reg_base = pcim_iomap_table(pdev)[0];
+ priv->reg_base = pcim_iomap_region(pdev, 0, pci_name(pdev));
+ if (IS_ERR(priv->reg_base))
+ return dev_err_probe(dev, PTR_ERR(priv->reg_base),
+ "I/O memory mapping error\n");
priv->pin_info.pin_ranges = mrfld_gpio_ranges;
priv->pin_info.nranges = ARRAY_SIZE(mrfld_gpio_ranges);
@@ -141,4 +142,4 @@ module_pci_driver(mrfld_gpio_driver);
MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
MODULE_DESCRIPTION("Intel Merrifield SoC GPIO driver");
MODULE_LICENSE("GPL v2");
-MODULE_IMPORT_NS(GPIO_TANGIER);
+MODULE_IMPORT_NS("GPIO_TANGIER");
diff --git a/drivers/gpio/gpio-ml-ioh.c b/drivers/gpio/gpio-ml-ioh.c
index 48e3768a830e..f6af81bf2b13 100644
--- a/drivers/gpio/gpio-ml-ioh.c
+++ b/drivers/gpio/gpio-ml-ioh.c
@@ -89,7 +89,7 @@ struct ioh_gpio {
static const int num_ports[] = {6, 12, 16, 16, 15, 16, 16, 12};
-static void ioh_gpio_set(struct gpio_chip *gpio, unsigned nr, int val)
+static int ioh_gpio_set(struct gpio_chip *gpio, unsigned int nr, int val)
{
u32 reg_val;
struct ioh_gpio *chip = gpiochip_get_data(gpio);
@@ -104,6 +104,8 @@ static void ioh_gpio_set(struct gpio_chip *gpio, unsigned nr, int val)
iowrite32(reg_val, &chip->reg->regs[chip->ch].po);
spin_unlock_irqrestore(&chip->spinlock, flags);
+
+ return 0;
}
static int ioh_gpio_get(struct gpio_chip *gpio, unsigned nr)
diff --git a/drivers/gpio/gpio-mlxbf.c b/drivers/gpio/gpio-mlxbf.c
index 1fa9973f55b9..a18fedbc463e 100644
--- a/drivers/gpio/gpio-mlxbf.c
+++ b/drivers/gpio/gpio-mlxbf.c
@@ -4,6 +4,7 @@
#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -37,7 +38,7 @@ struct mlxbf_gpio_context_save_regs {
/* Device state structure. */
struct mlxbf_gpio_state {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
/* Memory Address */
void __iomem *base;
@@ -49,6 +50,7 @@ struct mlxbf_gpio_state {
static int mlxbf_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct mlxbf_gpio_state *gs;
struct device *dev = &pdev->dev;
struct gpio_chip *gc;
@@ -62,21 +64,24 @@ static int mlxbf_gpio_probe(struct platform_device *pdev)
if (IS_ERR(gs->base))
return PTR_ERR(gs->base);
- gc = &gs->gc;
- ret = bgpio_init(gc, dev, 8,
- gs->base + MLXBF_GPIO_PIN_STATE,
- NULL,
- NULL,
- gs->base + MLXBF_GPIO_PIN_DIR_O,
- gs->base + MLXBF_GPIO_PIN_DIR_I,
- 0);
+ gc = &gs->chip.gc;
+
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 8,
+ .dat = gs->base + MLXBF_GPIO_PIN_STATE,
+ .dirout = gs->base + MLXBF_GPIO_PIN_DIR_O,
+ .dirin = gs->base + MLXBF_GPIO_PIN_DIR_I,
+ };
+
+ ret = gpio_generic_chip_init(&gs->chip, &config);
if (ret)
return -ENODEV;
gc->owner = THIS_MODULE;
gc->ngpio = MLXBF_GPIO_NR;
- ret = devm_gpiochip_add_data(dev, &gs->gc, gs);
+ ret = devm_gpiochip_add_data(dev, &gs->chip.gc, gs);
if (ret) {
dev_err(&pdev->dev, "Failed adding memory mapped gpiochip\n");
return ret;
diff --git a/drivers/gpio/gpio-mlxbf2.c b/drivers/gpio/gpio-mlxbf2.c
index 6abe01bc39c3..abffce3894fc 100644
--- a/drivers/gpio/gpio-mlxbf2.c
+++ b/drivers/gpio/gpio-mlxbf2.c
@@ -6,8 +6,10 @@
#include <linux/bitfield.h>
#include <linux/bitops.h>
+#include <linux/cleanup.h>
#include <linux/device.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/ioport.h>
@@ -65,7 +67,7 @@ struct mlxbf2_gpio_context_save_regs {
/* BlueField-2 gpio block context structure. */
struct mlxbf2_gpio_context {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
/* YU GPIO blocks address */
void __iomem *gpio_io;
@@ -132,7 +134,7 @@ static int mlxbf2_gpio_lock_acquire(struct mlxbf2_gpio_context *gs)
u32 arm_gpio_lock_val;
mutex_lock(yu_arm_gpio_lock_param.lock);
- raw_spin_lock(&gs->gc.bgpio_lock);
+ gpio_generic_chip_lock(&gs->chip);
arm_gpio_lock_val = readl(yu_arm_gpio_lock_param.io);
@@ -140,7 +142,7 @@ static int mlxbf2_gpio_lock_acquire(struct mlxbf2_gpio_context *gs)
* When lock active bit[31] is set, ModeX is write enabled
*/
if (YU_LOCK_ACTIVE_BIT(arm_gpio_lock_val)) {
- raw_spin_unlock(&gs->gc.bgpio_lock);
+ gpio_generic_chip_unlock(&gs->chip);
mutex_unlock(yu_arm_gpio_lock_param.lock);
return -EINVAL;
}
@@ -154,11 +156,11 @@ static int mlxbf2_gpio_lock_acquire(struct mlxbf2_gpio_context *gs)
* Release the YU arm_gpio_lock after changing the direction mode.
*/
static void mlxbf2_gpio_lock_release(struct mlxbf2_gpio_context *gs)
- __releases(&gs->gc.bgpio_lock)
+ __releases(&gs->chip.lock)
__releases(yu_arm_gpio_lock_param.lock)
{
writel(YU_ARM_GPIO_LOCK_RELEASE, yu_arm_gpio_lock_param.io);
- raw_spin_unlock(&gs->gc.bgpio_lock);
+ gpio_generic_chip_unlock(&gs->chip);
mutex_unlock(yu_arm_gpio_lock_param.lock);
}
@@ -235,11 +237,10 @@ static void mlxbf2_gpio_irq_enable(struct irq_data *irqd)
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
struct mlxbf2_gpio_context *gs = gpiochip_get_data(gc);
int offset = irqd_to_hwirq(irqd);
- unsigned long flags;
u32 val;
gpiochip_enable_irq(gc, irqd_to_hwirq(irqd));
- raw_spin_lock_irqsave(&gs->gc.bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(&gs->chip);
val = readl(gs->gpio_io + YU_GPIO_CAUSE_OR_CLRCAUSE);
val |= BIT(offset);
writel(val, gs->gpio_io + YU_GPIO_CAUSE_OR_CLRCAUSE);
@@ -247,7 +248,6 @@ static void mlxbf2_gpio_irq_enable(struct irq_data *irqd)
val = readl(gs->gpio_io + YU_GPIO_CAUSE_OR_EVTEN0);
val |= BIT(offset);
writel(val, gs->gpio_io + YU_GPIO_CAUSE_OR_EVTEN0);
- raw_spin_unlock_irqrestore(&gs->gc.bgpio_lock, flags);
}
static void mlxbf2_gpio_irq_disable(struct irq_data *irqd)
@@ -255,21 +255,21 @@ static void mlxbf2_gpio_irq_disable(struct irq_data *irqd)
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
struct mlxbf2_gpio_context *gs = gpiochip_get_data(gc);
int offset = irqd_to_hwirq(irqd);
- unsigned long flags;
u32 val;
- raw_spin_lock_irqsave(&gs->gc.bgpio_lock, flags);
- val = readl(gs->gpio_io + YU_GPIO_CAUSE_OR_EVTEN0);
- val &= ~BIT(offset);
- writel(val, gs->gpio_io + YU_GPIO_CAUSE_OR_EVTEN0);
- raw_spin_unlock_irqrestore(&gs->gc.bgpio_lock, flags);
+ scoped_guard(gpio_generic_lock_irqsave, &gs->chip) {
+ val = readl(gs->gpio_io + YU_GPIO_CAUSE_OR_EVTEN0);
+ val &= ~BIT(offset);
+ writel(val, gs->gpio_io + YU_GPIO_CAUSE_OR_EVTEN0);
+ }
+
gpiochip_disable_irq(gc, irqd_to_hwirq(irqd));
}
static irqreturn_t mlxbf2_gpio_irq_handler(int irq, void *ptr)
{
struct mlxbf2_gpio_context *gs = ptr;
- struct gpio_chip *gc = &gs->gc;
+ struct gpio_chip *gc = &gs->chip.gc;
unsigned long pending;
u32 level;
@@ -288,7 +288,6 @@ mlxbf2_gpio_irq_set_type(struct irq_data *irqd, unsigned int type)
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
struct mlxbf2_gpio_context *gs = gpiochip_get_data(gc);
int offset = irqd_to_hwirq(irqd);
- unsigned long flags;
bool fall = false;
bool rise = false;
u32 val;
@@ -308,7 +307,8 @@ mlxbf2_gpio_irq_set_type(struct irq_data *irqd, unsigned int type)
return -EINVAL;
}
- raw_spin_lock_irqsave(&gs->gc.bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(&gs->chip);
+
if (fall) {
val = readl(gs->gpio_io + YU_GPIO_CAUSE_FALL_EN);
val |= BIT(offset);
@@ -320,7 +320,6 @@ mlxbf2_gpio_irq_set_type(struct irq_data *irqd, unsigned int type)
val |= BIT(offset);
writel(val, gs->gpio_io + YU_GPIO_CAUSE_RISE_EN);
}
- raw_spin_unlock_irqrestore(&gs->gc.bgpio_lock, flags);
return 0;
}
@@ -331,7 +330,7 @@ static void mlxbf2_gpio_irq_print_chip(struct irq_data *irqd,
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
struct mlxbf2_gpio_context *gs = gpiochip_get_data(gc);
- seq_printf(p, dev_name(gs->dev));
+ seq_puts(p, dev_name(gs->dev));
}
static const struct irq_chip mlxbf2_gpio_irq_chip = {
@@ -347,6 +346,7 @@ static const struct irq_chip mlxbf2_gpio_irq_chip = {
static int
mlxbf2_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct mlxbf2_gpio_context *gs;
struct device *dev = &pdev->dev;
struct gpio_irq_chip *girq;
@@ -369,37 +369,34 @@ mlxbf2_gpio_probe(struct platform_device *pdev)
return PTR_ERR(gs->gpio_io);
ret = mlxbf2_gpio_get_lock_res(pdev);
- if (ret) {
- dev_err(dev, "Failed to get yu_arm_gpio_lock resource\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get yu_arm_gpio_lock resource\n");
if (device_property_read_u32(dev, "npins", &npins))
npins = MLXBF2_GPIO_MAX_PINS_PER_BLOCK;
- gc = &gs->gc;
+ gc = &gs->chip.gc;
- ret = bgpio_init(gc, dev, 4,
- gs->gpio_io + YU_GPIO_DATAIN,
- gs->gpio_io + YU_GPIO_DATASET,
- gs->gpio_io + YU_GPIO_DATACLEAR,
- NULL,
- NULL,
- 0);
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = gs->gpio_io + YU_GPIO_DATAIN,
+ .set = gs->gpio_io + YU_GPIO_DATASET,
+ .clr = gs->gpio_io + YU_GPIO_DATACLEAR,
+ };
- if (ret) {
- dev_err(dev, "bgpio_init failed\n");
- return ret;
- }
+ ret = gpio_generic_chip_init(&gs->chip, &config);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to initialize the generic GPIO chip\n");
gc->direction_input = mlxbf2_gpio_direction_input;
gc->direction_output = mlxbf2_gpio_direction_output;
gc->ngpio = npins;
gc->owner = THIS_MODULE;
- irq = platform_get_irq(pdev, 0);
+ irq = platform_get_irq_optional(pdev, 0);
if (irq >= 0) {
- girq = &gs->gc.irq;
+ girq = &gs->chip.gc.irq;
gpio_irq_chip_set_chip(girq, &mlxbf2_gpio_irq_chip);
girq->handler = handle_simple_irq;
girq->default_type = IRQ_TYPE_NONE;
@@ -414,19 +411,15 @@ mlxbf2_gpio_probe(struct platform_device *pdev)
*/
ret = devm_request_irq(dev, irq, mlxbf2_gpio_irq_handler,
IRQF_SHARED, name, gs);
- if (ret) {
- dev_err(dev, "failed to request IRQ");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to request IRQ");
}
platform_set_drvdata(pdev, gs);
- ret = devm_gpiochip_add_data(dev, &gs->gc, gs);
- if (ret) {
- dev_err(dev, "Failed adding memory mapped gpiochip\n");
- return ret;
- }
+ ret = devm_gpiochip_add_data(dev, &gs->chip.gc, gs);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed adding memory mapped gpiochip\n");
return 0;
}
diff --git a/drivers/gpio/gpio-mlxbf3.c b/drivers/gpio/gpio-mlxbf3.c
index 10ea71273c89..4770578269ba 100644
--- a/drivers/gpio/gpio-mlxbf3.c
+++ b/drivers/gpio/gpio-mlxbf3.c
@@ -6,6 +6,7 @@
#include <linux/device.h>
#include <linux/err.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
@@ -42,7 +43,7 @@
#define MLXBF_GPIO_CLR_ALL_INTS GENMASK(31, 0)
struct mlxbf3_gpio_context {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
/* YU GPIO block address */
void __iomem *gpio_set_io;
@@ -58,18 +59,17 @@ static void mlxbf3_gpio_irq_enable(struct irq_data *irqd)
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
struct mlxbf3_gpio_context *gs = gpiochip_get_data(gc);
irq_hw_number_t offset = irqd_to_hwirq(irqd);
- unsigned long flags;
u32 val;
gpiochip_enable_irq(gc, offset);
- raw_spin_lock_irqsave(&gs->gc.bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(&gs->chip);
+
writel(BIT(offset), gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_CLRCAUSE);
val = readl(gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_EVTEN0);
val |= BIT(offset);
writel(val, gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_EVTEN0);
- raw_spin_unlock_irqrestore(&gs->gc.bgpio_lock, flags);
}
static void mlxbf3_gpio_irq_disable(struct irq_data *irqd)
@@ -77,16 +77,15 @@ static void mlxbf3_gpio_irq_disable(struct irq_data *irqd)
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
struct mlxbf3_gpio_context *gs = gpiochip_get_data(gc);
irq_hw_number_t offset = irqd_to_hwirq(irqd);
- unsigned long flags;
u32 val;
- raw_spin_lock_irqsave(&gs->gc.bgpio_lock, flags);
- val = readl(gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_EVTEN0);
- val &= ~BIT(offset);
- writel(val, gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_EVTEN0);
+ scoped_guard(gpio_generic_lock_irqsave, &gs->chip) {
+ val = readl(gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_EVTEN0);
+ val &= ~BIT(offset);
+ writel(val, gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_EVTEN0);
- writel(BIT(offset), gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_CLRCAUSE);
- raw_spin_unlock_irqrestore(&gs->gc.bgpio_lock, flags);
+ writel(BIT(offset), gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_CLRCAUSE);
+ }
gpiochip_disable_irq(gc, offset);
}
@@ -94,7 +93,7 @@ static void mlxbf3_gpio_irq_disable(struct irq_data *irqd)
static irqreturn_t mlxbf3_gpio_irq_handler(int irq, void *ptr)
{
struct mlxbf3_gpio_context *gs = ptr;
- struct gpio_chip *gc = &gs->gc;
+ struct gpio_chip *gc = &gs->chip.gc;
unsigned long pending;
u32 level;
@@ -113,37 +112,33 @@ mlxbf3_gpio_irq_set_type(struct irq_data *irqd, unsigned int type)
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
struct mlxbf3_gpio_context *gs = gpiochip_get_data(gc);
irq_hw_number_t offset = irqd_to_hwirq(irqd);
- unsigned long flags;
u32 val;
- raw_spin_lock_irqsave(&gs->gc.bgpio_lock, flags);
-
- switch (type & IRQ_TYPE_SENSE_MASK) {
- case IRQ_TYPE_EDGE_BOTH:
- val = readl(gs->gpio_io + MLXBF_GPIO_CAUSE_FALL_EN);
- val |= BIT(offset);
- writel(val, gs->gpio_io + MLXBF_GPIO_CAUSE_FALL_EN);
- val = readl(gs->gpio_io + MLXBF_GPIO_CAUSE_RISE_EN);
- val |= BIT(offset);
- writel(val, gs->gpio_io + MLXBF_GPIO_CAUSE_RISE_EN);
- break;
- case IRQ_TYPE_EDGE_RISING:
- val = readl(gs->gpio_io + MLXBF_GPIO_CAUSE_RISE_EN);
- val |= BIT(offset);
- writel(val, gs->gpio_io + MLXBF_GPIO_CAUSE_RISE_EN);
- break;
- case IRQ_TYPE_EDGE_FALLING:
- val = readl(gs->gpio_io + MLXBF_GPIO_CAUSE_FALL_EN);
- val |= BIT(offset);
- writel(val, gs->gpio_io + MLXBF_GPIO_CAUSE_FALL_EN);
- break;
- default:
- raw_spin_unlock_irqrestore(&gs->gc.bgpio_lock, flags);
- return -EINVAL;
+ scoped_guard(gpio_generic_lock_irqsave, &gs->chip) {
+ switch (type & IRQ_TYPE_SENSE_MASK) {
+ case IRQ_TYPE_EDGE_BOTH:
+ val = readl(gs->gpio_io + MLXBF_GPIO_CAUSE_FALL_EN);
+ val |= BIT(offset);
+ writel(val, gs->gpio_io + MLXBF_GPIO_CAUSE_FALL_EN);
+ val = readl(gs->gpio_io + MLXBF_GPIO_CAUSE_RISE_EN);
+ val |= BIT(offset);
+ writel(val, gs->gpio_io + MLXBF_GPIO_CAUSE_RISE_EN);
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ val = readl(gs->gpio_io + MLXBF_GPIO_CAUSE_RISE_EN);
+ val |= BIT(offset);
+ writel(val, gs->gpio_io + MLXBF_GPIO_CAUSE_RISE_EN);
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ val = readl(gs->gpio_io + MLXBF_GPIO_CAUSE_FALL_EN);
+ val |= BIT(offset);
+ writel(val, gs->gpio_io + MLXBF_GPIO_CAUSE_FALL_EN);
+ break;
+ default:
+ return -EINVAL;
+ }
}
- raw_spin_unlock_irqrestore(&gs->gc.bgpio_lock, flags);
-
irq_set_handler_locked(irqd, handle_edge_irq);
return 0;
@@ -186,6 +181,7 @@ static int mlxbf3_gpio_add_pin_ranges(struct gpio_chip *chip)
static int mlxbf3_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct device *dev = &pdev->dev;
struct mlxbf3_gpio_context *gs;
struct gpio_irq_chip *girq;
@@ -211,25 +207,32 @@ static int mlxbf3_gpio_probe(struct platform_device *pdev)
gs->gpio_clr_io = devm_platform_ioremap_resource(pdev, 3);
if (IS_ERR(gs->gpio_clr_io))
return PTR_ERR(gs->gpio_clr_io);
- gc = &gs->gc;
-
- ret = bgpio_init(gc, dev, 4,
- gs->gpio_io + MLXBF_GPIO_READ_DATA_IN,
- gs->gpio_set_io + MLXBF_GPIO_FW_DATA_OUT_SET,
- gs->gpio_clr_io + MLXBF_GPIO_FW_DATA_OUT_CLEAR,
- gs->gpio_set_io + MLXBF_GPIO_FW_OUTPUT_ENABLE_SET,
- gs->gpio_clr_io + MLXBF_GPIO_FW_OUTPUT_ENABLE_CLEAR, 0);
+ gc = &gs->chip.gc;
+
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = gs->gpio_io + MLXBF_GPIO_READ_DATA_IN,
+ .set = gs->gpio_set_io + MLXBF_GPIO_FW_DATA_OUT_SET,
+ .clr = gs->gpio_clr_io + MLXBF_GPIO_FW_DATA_OUT_CLEAR,
+ .dirout = gs->gpio_set_io + MLXBF_GPIO_FW_OUTPUT_ENABLE_SET,
+ .dirin = gs->gpio_clr_io + MLXBF_GPIO_FW_OUTPUT_ENABLE_CLEAR,
+ };
+
+ ret = gpio_generic_chip_init(&gs->chip, &config);
if (ret)
- return dev_err_probe(dev, ret, "%s: bgpio_init() failed", __func__);
+ return dev_err_probe(dev, ret,
+ "%s: failed to initialize the generic GPIO chip",
+ __func__);
gc->request = gpiochip_generic_request;
gc->free = gpiochip_generic_free;
gc->owner = THIS_MODULE;
gc->add_pin_ranges = mlxbf3_gpio_add_pin_ranges;
- irq = platform_get_irq(pdev, 0);
+ irq = platform_get_irq_optional(pdev, 0);
if (irq >= 0) {
- girq = &gs->gc.irq;
+ girq = &gs->chip.gc.irq;
gpio_irq_chip_set_chip(girq, &gpio_mlxbf3_irqchip);
girq->default_type = IRQ_TYPE_NONE;
/* This will let us handle the parent IRQ in the driver */
@@ -250,7 +253,7 @@ static int mlxbf3_gpio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, gs);
- ret = devm_gpiochip_add_data(dev, &gs->gc, gs);
+ ret = devm_gpiochip_add_data(dev, gc, gs);
if (ret)
dev_err_probe(dev, ret, "Failed adding memory mapped gpiochip\n");
diff --git a/drivers/gpio/gpio-mm-lantiq.c b/drivers/gpio/gpio-mm-lantiq.c
index e855c68c981b..8f1405733d98 100644
--- a/drivers/gpio/gpio-mm-lantiq.c
+++ b/drivers/gpio/gpio-mm-lantiq.c
@@ -55,9 +55,9 @@ static void ltq_mm_apply(struct ltq_mm *chip)
* @gpio: GPIO signal number.
* @val: Value to be written to specified signal.
*
- * Set the shadow value and call ltq_mm_apply.
+ * Set the shadow value and call ltq_mm_apply. Always returns 0.
*/
-static void ltq_mm_set(struct gpio_chip *gc, unsigned offset, int value)
+static int ltq_mm_set(struct gpio_chip *gc, unsigned int offset, int value)
{
struct ltq_mm *chip = gpiochip_get_data(gc);
@@ -66,6 +66,8 @@ static void ltq_mm_set(struct gpio_chip *gc, unsigned offset, int value)
else
chip->shadow &= ~(1 << offset);
ltq_mm_apply(chip);
+
+ return 0;
}
/**
@@ -78,9 +80,7 @@ static void ltq_mm_set(struct gpio_chip *gc, unsigned offset, int value)
*/
static int ltq_mm_dir_out(struct gpio_chip *gc, unsigned offset, int value)
{
- ltq_mm_set(gc, offset, value);
-
- return 0;
+ return ltq_mm_set(gc, offset, value);
}
/**
@@ -136,7 +136,7 @@ MODULE_DEVICE_TABLE(of, ltq_mm_match);
static struct platform_driver ltq_mm_driver = {
.probe = ltq_mm_probe,
- .remove_new = ltq_mm_remove,
+ .remove = ltq_mm_remove,
.driver = {
.name = "gpio-mm-ltq",
.of_match_table = ltq_mm_match,
diff --git a/drivers/gpio/gpio-mmio.c b/drivers/gpio/gpio-mmio.c
index d89e78f0ead3..7d6dd36cf1ae 100644
--- a/drivers/gpio/gpio-mmio.c
+++ b/drivers/gpio/gpio-mmio.c
@@ -49,6 +49,7 @@ o ` ~~~~\___/~~~~ ` controller in FPGA is ,.`
#include <linux/log2.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/slab.h>
@@ -56,6 +57,7 @@ o ` ~~~~\___/~~~~ ` controller in FPGA is ,.`
#include <linux/types.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include "gpiolib.h"
@@ -123,20 +125,23 @@ static unsigned long bgpio_read32be(void __iomem *reg)
static unsigned long bgpio_line2mask(struct gpio_chip *gc, unsigned int line)
{
- if (gc->be_bits)
- return BIT(gc->bgpio_bits - 1 - line);
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
+
+ if (chip->be_bits)
+ return BIT(chip->bits - 1 - line);
return BIT(line);
}
static int bgpio_get_set(struct gpio_chip *gc, unsigned int gpio)
{
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
unsigned long pinmask = bgpio_line2mask(gc, gpio);
- bool dir = !!(gc->bgpio_dir & pinmask);
+ bool dir = !!(chip->sdir & pinmask);
if (dir)
- return !!(gc->read_reg(gc->reg_set) & pinmask);
- else
- return !!(gc->read_reg(gc->reg_dat) & pinmask);
+ return !!(chip->read_reg(chip->reg_set) & pinmask);
+
+ return !!(chip->read_reg(chip->reg_dat) & pinmask);
}
/*
@@ -146,26 +151,28 @@ static int bgpio_get_set(struct gpio_chip *gc, unsigned int gpio)
static int bgpio_get_set_multiple(struct gpio_chip *gc, unsigned long *mask,
unsigned long *bits)
{
- unsigned long get_mask = 0;
- unsigned long set_mask = 0;
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
+ unsigned long get_mask = 0, set_mask = 0;
/* Make sure we first clear any bits that are zero when we read the register */
*bits &= ~*mask;
- set_mask = *mask & gc->bgpio_dir;
- get_mask = *mask & ~gc->bgpio_dir;
+ set_mask = *mask & chip->sdir;
+ get_mask = *mask & ~chip->sdir;
if (set_mask)
- *bits |= gc->read_reg(gc->reg_set) & set_mask;
+ *bits |= chip->read_reg(chip->reg_set) & set_mask;
if (get_mask)
- *bits |= gc->read_reg(gc->reg_dat) & get_mask;
+ *bits |= chip->read_reg(chip->reg_dat) & get_mask;
return 0;
}
static int bgpio_get(struct gpio_chip *gc, unsigned int gpio)
{
- return !!(gc->read_reg(gc->reg_dat) & bgpio_line2mask(gc, gpio));
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
+
+ return !!(chip->read_reg(chip->reg_dat) & bgpio_line2mask(gc, gpio));
}
/*
@@ -174,9 +181,11 @@ static int bgpio_get(struct gpio_chip *gc, unsigned int gpio)
static int bgpio_get_multiple(struct gpio_chip *gc, unsigned long *mask,
unsigned long *bits)
{
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
+
/* Make sure we first clear any bits that are zero when we read the register */
*bits &= ~*mask;
- *bits |= gc->read_reg(gc->reg_dat) & *mask;
+ *bits |= chip->read_reg(chip->reg_dat) & *mask;
return 0;
}
@@ -186,6 +195,7 @@ static int bgpio_get_multiple(struct gpio_chip *gc, unsigned long *mask,
static int bgpio_get_multiple_be(struct gpio_chip *gc, unsigned long *mask,
unsigned long *bits)
{
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
unsigned long readmask = 0;
unsigned long val;
int bit;
@@ -198,7 +208,7 @@ static int bgpio_get_multiple_be(struct gpio_chip *gc, unsigned long *mask,
readmask |= bgpio_line2mask(gc, bit);
/* Read the register */
- val = gc->read_reg(gc->reg_dat) & readmask;
+ val = chip->read_reg(chip->reg_dat) & readmask;
/*
* Mirror the result into the "bits" result, this will give line 0
@@ -210,53 +220,62 @@ static int bgpio_get_multiple_be(struct gpio_chip *gc, unsigned long *mask,
return 0;
}
-static void bgpio_set_none(struct gpio_chip *gc, unsigned int gpio, int val)
+static int bgpio_set_none(struct gpio_chip *gc, unsigned int gpio, int val)
{
+ return 0;
}
-static void bgpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
+static int bgpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
unsigned long mask = bgpio_line2mask(gc, gpio);
unsigned long flags;
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ raw_spin_lock_irqsave(&chip->lock, flags);
if (val)
- gc->bgpio_data |= mask;
+ chip->sdata |= mask;
else
- gc->bgpio_data &= ~mask;
+ chip->sdata &= ~mask;
+
+ chip->write_reg(chip->reg_dat, chip->sdata);
- gc->write_reg(gc->reg_dat, gc->bgpio_data);
+ raw_spin_unlock_irqrestore(&chip->lock, flags);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+ return 0;
}
-static void bgpio_set_with_clear(struct gpio_chip *gc, unsigned int gpio,
- int val)
+static int bgpio_set_with_clear(struct gpio_chip *gc, unsigned int gpio,
+ int val)
{
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
unsigned long mask = bgpio_line2mask(gc, gpio);
if (val)
- gc->write_reg(gc->reg_set, mask);
+ chip->write_reg(chip->reg_set, mask);
else
- gc->write_reg(gc->reg_clr, mask);
+ chip->write_reg(chip->reg_clr, mask);
+
+ return 0;
}
-static void bgpio_set_set(struct gpio_chip *gc, unsigned int gpio, int val)
+static int bgpio_set_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
- unsigned long mask = bgpio_line2mask(gc, gpio);
- unsigned long flags;
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
+ unsigned long mask = bgpio_line2mask(gc, gpio), flags;
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ raw_spin_lock_irqsave(&chip->lock, flags);
if (val)
- gc->bgpio_data |= mask;
+ chip->sdata |= mask;
else
- gc->bgpio_data &= ~mask;
+ chip->sdata &= ~mask;
- gc->write_reg(gc->reg_set, gc->bgpio_data);
+ chip->write_reg(chip->reg_set, chip->sdata);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+ raw_spin_unlock_irqrestore(&chip->lock, flags);
+
+ return 0;
}
static void bgpio_multiple_get_masks(struct gpio_chip *gc,
@@ -264,12 +283,13 @@ static void bgpio_multiple_get_masks(struct gpio_chip *gc,
unsigned long *set_mask,
unsigned long *clear_mask)
{
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
int i;
*set_mask = 0;
*clear_mask = 0;
- for_each_set_bit(i, mask, gc->bgpio_bits) {
+ for_each_set_bit(i, mask, chip->bits) {
if (test_bit(i, bits))
*set_mask |= bgpio_line2mask(gc, i);
else
@@ -282,50 +302,79 @@ static void bgpio_set_multiple_single_reg(struct gpio_chip *gc,
unsigned long *bits,
void __iomem *reg)
{
- unsigned long flags;
- unsigned long set_mask, clear_mask;
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
+ unsigned long flags, set_mask, clear_mask;
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ raw_spin_lock_irqsave(&chip->lock, flags);
bgpio_multiple_get_masks(gc, mask, bits, &set_mask, &clear_mask);
- gc->bgpio_data |= set_mask;
- gc->bgpio_data &= ~clear_mask;
+ chip->sdata |= set_mask;
+ chip->sdata &= ~clear_mask;
- gc->write_reg(reg, gc->bgpio_data);
+ chip->write_reg(reg, chip->sdata);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+ raw_spin_unlock_irqrestore(&chip->lock, flags);
}
-static void bgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
+static int bgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
unsigned long *bits)
{
- bgpio_set_multiple_single_reg(gc, mask, bits, gc->reg_dat);
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
+
+ bgpio_set_multiple_single_reg(gc, mask, bits, chip->reg_dat);
+
+ return 0;
}
-static void bgpio_set_multiple_set(struct gpio_chip *gc, unsigned long *mask,
- unsigned long *bits)
+static int bgpio_set_multiple_set(struct gpio_chip *gc, unsigned long *mask,
+ unsigned long *bits)
{
- bgpio_set_multiple_single_reg(gc, mask, bits, gc->reg_set);
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
+
+ bgpio_set_multiple_single_reg(gc, mask, bits, chip->reg_set);
+
+ return 0;
}
-static void bgpio_set_multiple_with_clear(struct gpio_chip *gc,
- unsigned long *mask,
- unsigned long *bits)
+static int bgpio_set_multiple_with_clear(struct gpio_chip *gc,
+ unsigned long *mask,
+ unsigned long *bits)
{
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
unsigned long set_mask, clear_mask;
bgpio_multiple_get_masks(gc, mask, bits, &set_mask, &clear_mask);
if (set_mask)
- gc->write_reg(gc->reg_set, set_mask);
+ chip->write_reg(chip->reg_set, set_mask);
if (clear_mask)
- gc->write_reg(gc->reg_clr, clear_mask);
+ chip->write_reg(chip->reg_clr, clear_mask);
+
+ return 0;
+}
+
+static int bgpio_dir_return(struct gpio_chip *gc, unsigned int gpio, bool dir_out)
+{
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
+
+ if (!chip->pinctrl)
+ return 0;
+
+ if (dir_out)
+ return pinctrl_gpio_direction_output(gc, gpio);
+ else
+ return pinctrl_gpio_direction_input(gc, gpio);
+}
+
+static int bgpio_dir_in_err(struct gpio_chip *gc, unsigned int gpio)
+{
+ return -EINVAL;
}
static int bgpio_simple_dir_in(struct gpio_chip *gc, unsigned int gpio)
{
- return 0;
+ return bgpio_dir_return(gc, gpio, false);
}
static int bgpio_dir_out_err(struct gpio_chip *gc, unsigned int gpio,
@@ -339,44 +388,47 @@ static int bgpio_simple_dir_out(struct gpio_chip *gc, unsigned int gpio,
{
gc->set(gc, gpio, val);
- return 0;
+ return bgpio_dir_return(gc, gpio, true);
}
static int bgpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
{
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
unsigned long flags;
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ raw_spin_lock_irqsave(&chip->lock, flags);
- gc->bgpio_dir &= ~bgpio_line2mask(gc, gpio);
+ chip->sdir &= ~bgpio_line2mask(gc, gpio);
- if (gc->reg_dir_in)
- gc->write_reg(gc->reg_dir_in, ~gc->bgpio_dir);
- if (gc->reg_dir_out)
- gc->write_reg(gc->reg_dir_out, gc->bgpio_dir);
+ if (chip->reg_dir_in)
+ chip->write_reg(chip->reg_dir_in, ~chip->sdir);
+ if (chip->reg_dir_out)
+ chip->write_reg(chip->reg_dir_out, chip->sdir);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+ raw_spin_unlock_irqrestore(&chip->lock, flags);
- return 0;
+ return bgpio_dir_return(gc, gpio, false);
}
static int bgpio_get_dir(struct gpio_chip *gc, unsigned int gpio)
{
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
+
/* Return 0 if output, 1 if input */
- if (gc->bgpio_dir_unreadable) {
- if (gc->bgpio_dir & bgpio_line2mask(gc, gpio))
+ if (chip->dir_unreadable) {
+ if (chip->sdir & bgpio_line2mask(gc, gpio))
return GPIO_LINE_DIRECTION_OUT;
return GPIO_LINE_DIRECTION_IN;
}
- if (gc->reg_dir_out) {
- if (gc->read_reg(gc->reg_dir_out) & bgpio_line2mask(gc, gpio))
+ if (chip->reg_dir_out) {
+ if (chip->read_reg(chip->reg_dir_out) & bgpio_line2mask(gc, gpio))
return GPIO_LINE_DIRECTION_OUT;
return GPIO_LINE_DIRECTION_IN;
}
- if (gc->reg_dir_in)
- if (!(gc->read_reg(gc->reg_dir_in) & bgpio_line2mask(gc, gpio)))
+ if (chip->reg_dir_in)
+ if (!(chip->read_reg(chip->reg_dir_in) & bgpio_line2mask(gc, gpio)))
return GPIO_LINE_DIRECTION_OUT;
return GPIO_LINE_DIRECTION_IN;
@@ -384,18 +436,19 @@ static int bgpio_get_dir(struct gpio_chip *gc, unsigned int gpio)
static void bgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
{
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
unsigned long flags;
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ raw_spin_lock_irqsave(&chip->lock, flags);
- gc->bgpio_dir |= bgpio_line2mask(gc, gpio);
+ chip->sdir |= bgpio_line2mask(gc, gpio);
- if (gc->reg_dir_in)
- gc->write_reg(gc->reg_dir_in, ~gc->bgpio_dir);
- if (gc->reg_dir_out)
- gc->write_reg(gc->reg_dir_out, gc->bgpio_dir);
+ if (chip->reg_dir_in)
+ chip->write_reg(chip->reg_dir_in, ~chip->sdir);
+ if (chip->reg_dir_out)
+ chip->write_reg(chip->reg_dir_out, chip->sdir);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+ raw_spin_unlock_irqrestore(&chip->lock, flags);
}
static int bgpio_dir_out_dir_first(struct gpio_chip *gc, unsigned int gpio,
@@ -403,7 +456,7 @@ static int bgpio_dir_out_dir_first(struct gpio_chip *gc, unsigned int gpio,
{
bgpio_dir_out(gc, gpio, val);
gc->set(gc, gpio, val);
- return 0;
+ return bgpio_dir_return(gc, gpio, true);
}
static int bgpio_dir_out_val_first(struct gpio_chip *gc, unsigned int gpio,
@@ -411,35 +464,34 @@ static int bgpio_dir_out_val_first(struct gpio_chip *gc, unsigned int gpio,
{
gc->set(gc, gpio, val);
bgpio_dir_out(gc, gpio, val);
- return 0;
+ return bgpio_dir_return(gc, gpio, true);
}
static int bgpio_setup_accessors(struct device *dev,
- struct gpio_chip *gc,
+ struct gpio_generic_chip *chip,
bool byte_be)
{
-
- switch (gc->bgpio_bits) {
+ switch (chip->bits) {
case 8:
- gc->read_reg = bgpio_read8;
- gc->write_reg = bgpio_write8;
+ chip->read_reg = bgpio_read8;
+ chip->write_reg = bgpio_write8;
break;
case 16:
if (byte_be) {
- gc->read_reg = bgpio_read16be;
- gc->write_reg = bgpio_write16be;
+ chip->read_reg = bgpio_read16be;
+ chip->write_reg = bgpio_write16be;
} else {
- gc->read_reg = bgpio_read16;
- gc->write_reg = bgpio_write16;
+ chip->read_reg = bgpio_read16;
+ chip->write_reg = bgpio_write16;
}
break;
case 32:
if (byte_be) {
- gc->read_reg = bgpio_read32be;
- gc->write_reg = bgpio_write32be;
+ chip->read_reg = bgpio_read32be;
+ chip->write_reg = bgpio_write32be;
} else {
- gc->read_reg = bgpio_read32;
- gc->write_reg = bgpio_write32;
+ chip->read_reg = bgpio_read32;
+ chip->write_reg = bgpio_write32;
}
break;
#if BITS_PER_LONG >= 64
@@ -449,13 +501,13 @@ static int bgpio_setup_accessors(struct device *dev,
"64 bit big endian byte order unsupported\n");
return -EINVAL;
} else {
- gc->read_reg = bgpio_read64;
- gc->write_reg = bgpio_write64;
+ chip->read_reg = bgpio_read64;
+ chip->write_reg = bgpio_write64;
}
break;
#endif /* BITS_PER_LONG >= 64 */
default:
- dev_err(dev, "unsupported data width %u bits\n", gc->bgpio_bits);
+ dev_err(dev, "unsupported data width %u bits\n", chip->bits);
return -EINVAL;
}
@@ -484,27 +536,25 @@ static int bgpio_setup_accessors(struct device *dev,
* - an input direction register (named "dirin") where a 1 bit indicates
* the GPIO is an input.
*/
-static int bgpio_setup_io(struct gpio_chip *gc,
- void __iomem *dat,
- void __iomem *set,
- void __iomem *clr,
- unsigned long flags)
+static int bgpio_setup_io(struct gpio_generic_chip *chip,
+ const struct gpio_generic_chip_config *cfg)
{
+ struct gpio_chip *gc = &chip->gc;
- gc->reg_dat = dat;
- if (!gc->reg_dat)
+ chip->reg_dat = cfg->dat;
+ if (!chip->reg_dat)
return -EINVAL;
- if (set && clr) {
- gc->reg_set = set;
- gc->reg_clr = clr;
+ if (cfg->set && cfg->clr) {
+ chip->reg_set = cfg->set;
+ chip->reg_clr = cfg->clr;
gc->set = bgpio_set_with_clear;
gc->set_multiple = bgpio_set_multiple_with_clear;
- } else if (set && !clr) {
- gc->reg_set = set;
+ } else if (cfg->set && !cfg->clr) {
+ chip->reg_set = cfg->set;
gc->set = bgpio_set_set;
gc->set_multiple = bgpio_set_multiple_set;
- } else if (flags & BGPIOF_NO_OUTPUT) {
+ } else if (cfg->flags & GPIO_GENERIC_NO_OUTPUT) {
gc->set = bgpio_set_none;
gc->set_multiple = NULL;
} else {
@@ -512,10 +562,10 @@ static int bgpio_setup_io(struct gpio_chip *gc,
gc->set_multiple = bgpio_set_multiple;
}
- if (!(flags & BGPIOF_UNREADABLE_REG_SET) &&
- (flags & BGPIOF_READ_OUTPUT_REG_SET)) {
+ if (!(cfg->flags & GPIO_GENERIC_UNREADABLE_REG_SET) &&
+ (cfg->flags & GPIO_GENERIC_READ_OUTPUT_REG_SET)) {
gc->get = bgpio_get_set;
- if (!gc->be_bits)
+ if (!chip->be_bits)
gc->get_multiple = bgpio_get_set_multiple;
/*
* We deliberately avoid assigning the ->get_multiple() call
@@ -526,7 +576,7 @@ static int bgpio_setup_io(struct gpio_chip *gc,
*/
} else {
gc->get = bgpio_get;
- if (gc->be_bits)
+ if (chip->be_bits)
gc->get_multiple = bgpio_get_multiple_be;
else
gc->get_multiple = bgpio_get_multiple;
@@ -535,133 +585,130 @@ static int bgpio_setup_io(struct gpio_chip *gc,
return 0;
}
-static int bgpio_setup_direction(struct gpio_chip *gc,
- void __iomem *dirout,
- void __iomem *dirin,
- unsigned long flags)
+static int bgpio_setup_direction(struct gpio_generic_chip *chip,
+ const struct gpio_generic_chip_config *cfg)
{
- if (dirout || dirin) {
- gc->reg_dir_out = dirout;
- gc->reg_dir_in = dirin;
- if (flags & BGPIOF_NO_SET_ON_INPUT)
+ struct gpio_chip *gc = &chip->gc;
+
+ if (cfg->dirout || cfg->dirin) {
+ chip->reg_dir_out = cfg->dirout;
+ chip->reg_dir_in = cfg->dirin;
+ if (cfg->flags & GPIO_GENERIC_NO_SET_ON_INPUT)
gc->direction_output = bgpio_dir_out_dir_first;
else
gc->direction_output = bgpio_dir_out_val_first;
gc->direction_input = bgpio_dir_in;
gc->get_direction = bgpio_get_dir;
} else {
- if (flags & BGPIOF_NO_OUTPUT)
+ if (cfg->flags & GPIO_GENERIC_NO_OUTPUT)
gc->direction_output = bgpio_dir_out_err;
else
gc->direction_output = bgpio_simple_dir_out;
- gc->direction_input = bgpio_simple_dir_in;
+
+ if (cfg->flags & GPIO_GENERIC_NO_INPUT)
+ gc->direction_input = bgpio_dir_in_err;
+ else
+ gc->direction_input = bgpio_simple_dir_in;
}
return 0;
}
-static int bgpio_request(struct gpio_chip *chip, unsigned gpio_pin)
+static int bgpio_request(struct gpio_chip *gc, unsigned int gpio_pin)
{
- if (gpio_pin < chip->ngpio)
- return 0;
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
- return -EINVAL;
+ if (gpio_pin >= gc->ngpio)
+ return -EINVAL;
+
+ if (chip->pinctrl)
+ return gpiochip_generic_request(gc, gpio_pin);
+
+ return 0;
}
/**
- * bgpio_init() - Initialize generic GPIO accessor functions
- * @gc: the GPIO chip to set up
- * @dev: the parent device of the new GPIO chip (compulsory)
- * @sz: the size (width) of the MMIO registers in bytes, typically 1, 2 or 4
- * @dat: MMIO address for the register to READ the value of the GPIO lines, it
- * is expected that a 1 in the corresponding bit in this register means the
- * line is asserted
- * @set: MMIO address for the register to SET the value of the GPIO lines, it is
- * expected that we write the line with 1 in this register to drive the GPIO line
- * high.
- * @clr: MMIO address for the register to CLEAR the value of the GPIO lines, it is
- * expected that we write the line with 1 in this register to drive the GPIO line
- * low. It is allowed to leave this address as NULL, in that case the SET register
- * will be assumed to also clear the GPIO lines, by actively writing the line
- * with 0.
- * @dirout: MMIO address for the register to set the line as OUTPUT. It is assumed
- * that setting a line to 1 in this register will turn that line into an
- * output line. Conversely, setting the line to 0 will turn that line into
- * an input.
- * @dirin: MMIO address for the register to set this line as INPUT. It is assumed
- * that setting a line to 1 in this register will turn that line into an
- * input line. Conversely, setting the line to 0 will turn that line into
- * an output.
- * @flags: Different flags that will affect the behaviour of the device, such as
- * endianness etc.
+ * gpio_generic_chip_init() - Initialize a generic GPIO chip.
+ * @chip: Generic GPIO chip to set up.
+ * @cfg: Generic GPIO chip configuration.
+ *
+ * Returns 0 on success, negative error number on failure.
*/
-int bgpio_init(struct gpio_chip *gc, struct device *dev,
- unsigned long sz, void __iomem *dat, void __iomem *set,
- void __iomem *clr, void __iomem *dirout, void __iomem *dirin,
- unsigned long flags)
+int gpio_generic_chip_init(struct gpio_generic_chip *chip,
+ const struct gpio_generic_chip_config *cfg)
{
+ struct gpio_chip *gc = &chip->gc;
+ unsigned long flags = cfg->flags;
+ struct device *dev = cfg->dev;
int ret;
- if (!is_power_of_2(sz))
+ if (!is_power_of_2(cfg->sz))
return -EINVAL;
- gc->bgpio_bits = sz * 8;
- if (gc->bgpio_bits > BITS_PER_LONG)
+ chip->bits = cfg->sz * 8;
+ if (chip->bits > BITS_PER_LONG)
return -EINVAL;
- raw_spin_lock_init(&gc->bgpio_lock);
+ raw_spin_lock_init(&chip->lock);
gc->parent = dev;
gc->label = dev_name(dev);
gc->base = -1;
gc->request = bgpio_request;
- gc->be_bits = !!(flags & BGPIOF_BIG_ENDIAN);
+ chip->be_bits = !!(flags & GPIO_GENERIC_BIG_ENDIAN);
ret = gpiochip_get_ngpios(gc, dev);
if (ret)
- gc->ngpio = gc->bgpio_bits;
+ gc->ngpio = chip->bits;
- ret = bgpio_setup_io(gc, dat, set, clr, flags);
+ ret = bgpio_setup_io(chip, cfg);
if (ret)
return ret;
- ret = bgpio_setup_accessors(dev, gc, flags & BGPIOF_BIG_ENDIAN_BYTE_ORDER);
+ ret = bgpio_setup_accessors(dev, chip,
+ flags & GPIO_GENERIC_BIG_ENDIAN_BYTE_ORDER);
if (ret)
return ret;
- ret = bgpio_setup_direction(gc, dirout, dirin, flags);
+ ret = bgpio_setup_direction(chip, cfg);
if (ret)
return ret;
- gc->bgpio_data = gc->read_reg(gc->reg_dat);
+ if (flags & GPIO_GENERIC_PINCTRL_BACKEND) {
+ chip->pinctrl = true;
+ /* Currently this callback is only used for pincontrol */
+ gc->free = gpiochip_generic_free;
+ }
+
+ chip->sdata = chip->read_reg(chip->reg_dat);
if (gc->set == bgpio_set_set &&
- !(flags & BGPIOF_UNREADABLE_REG_SET))
- gc->bgpio_data = gc->read_reg(gc->reg_set);
+ !(flags & GPIO_GENERIC_UNREADABLE_REG_SET))
+ chip->sdata = chip->read_reg(chip->reg_set);
- if (flags & BGPIOF_UNREADABLE_REG_DIR)
- gc->bgpio_dir_unreadable = true;
+ if (flags & GPIO_GENERIC_UNREADABLE_REG_DIR)
+ chip->dir_unreadable = true;
/*
* Inspect hardware to find initial direction setting.
*/
- if ((gc->reg_dir_out || gc->reg_dir_in) &&
- !(flags & BGPIOF_UNREADABLE_REG_DIR)) {
- if (gc->reg_dir_out)
- gc->bgpio_dir = gc->read_reg(gc->reg_dir_out);
- else if (gc->reg_dir_in)
- gc->bgpio_dir = ~gc->read_reg(gc->reg_dir_in);
+ if ((chip->reg_dir_out || chip->reg_dir_in) &&
+ !(flags & GPIO_GENERIC_UNREADABLE_REG_DIR)) {
+ if (chip->reg_dir_out)
+ chip->sdir = chip->read_reg(chip->reg_dir_out);
+ else if (chip->reg_dir_in)
+ chip->sdir = ~chip->read_reg(chip->reg_dir_in);
/*
* If we have two direction registers, synchronise
* input setting to output setting, the library
* can not handle a line being input and output at
* the same time.
*/
- if (gc->reg_dir_out && gc->reg_dir_in)
- gc->write_reg(gc->reg_dir_in, ~gc->bgpio_dir);
+ if (chip->reg_dir_out && chip->reg_dir_in)
+ chip->write_reg(chip->reg_dir_in, ~chip->sdir);
}
return ret;
}
-EXPORT_SYMBOL_GPL(bgpio_init);
+EXPORT_SYMBOL_GPL(gpio_generic_chip_init);
#if IS_ENABLED(CONFIG_GPIO_GENERIC_PLATFORM)
@@ -687,34 +734,15 @@ static const struct of_device_id bgpio_of_match[] = {
{ .compatible = "brcm,bcm6345-gpio" },
{ .compatible = "wd,mbl-gpio" },
{ .compatible = "ni,169445-nand-gpio" },
+ { .compatible = "intel,ixp4xx-expansion-bus-mmio-gpio" },
{ }
};
MODULE_DEVICE_TABLE(of, bgpio_of_match);
-static struct bgpio_pdata *bgpio_parse_fw(struct device *dev, unsigned long *flags)
-{
- struct bgpio_pdata *pdata;
-
- if (!dev_fwnode(dev))
- return NULL;
-
- pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return ERR_PTR(-ENOMEM);
-
- pdata->base = -1;
-
- if (device_is_big_endian(dev))
- *flags |= BGPIOF_BIG_ENDIAN_BYTE_ORDER;
-
- if (device_property_read_bool(dev, "no-output"))
- *flags |= BGPIOF_NO_OUTPUT;
-
- return pdata;
-}
-
static int bgpio_pdev_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
+ struct gpio_generic_chip *gen_gc;
struct device *dev = &pdev->dev;
struct resource *r;
void __iomem *dat;
@@ -724,18 +752,9 @@ static int bgpio_pdev_probe(struct platform_device *pdev)
void __iomem *dirin;
unsigned long sz;
unsigned long flags = 0;
+ unsigned int base;
int err;
- struct gpio_chip *gc;
- struct bgpio_pdata *pdata;
-
- pdata = bgpio_parse_fw(dev, &flags);
- if (IS_ERR(pdata))
- return PTR_ERR(pdata);
-
- if (!pdata) {
- pdata = dev_get_platdata(dev);
- flags = pdev->id_entry->driver_data;
- }
+ const char *label;
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dat");
if (!r)
@@ -763,34 +782,52 @@ static int bgpio_pdev_probe(struct platform_device *pdev)
if (IS_ERR(dirin))
return PTR_ERR(dirin);
- gc = devm_kzalloc(&pdev->dev, sizeof(*gc), GFP_KERNEL);
- if (!gc)
+ gen_gc = devm_kzalloc(&pdev->dev, sizeof(*gen_gc), GFP_KERNEL);
+ if (!gen_gc)
return -ENOMEM;
- err = bgpio_init(gc, dev, sz, dat, set, clr, dirout, dirin, flags);
+ if (device_is_big_endian(dev))
+ flags |= GPIO_GENERIC_BIG_ENDIAN_BYTE_ORDER;
+
+ if (device_property_read_bool(dev, "no-output"))
+ flags |= GPIO_GENERIC_NO_OUTPUT;
+
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = sz,
+ .dat = dat,
+ .set = set,
+ .clr = clr,
+ .dirout = dirout,
+ .dirin = dirin,
+ .flags = flags,
+ };
+
+ err = gpio_generic_chip_init(gen_gc, &config);
if (err)
return err;
- if (pdata) {
- if (pdata->label)
- gc->label = pdata->label;
- gc->base = pdata->base;
- if (pdata->ngpio > 0)
- gc->ngpio = pdata->ngpio;
- }
+ err = device_property_read_string(dev, "label", &label);
+ if (!err)
+ gen_gc->gc.label = label;
+
+ /*
+ * This property *must not* be used in device-tree sources, it's only
+ * meant to be passed to the driver from board files and MFD core.
+ */
+ err = device_property_read_u32(dev, "gpio-mmio,base", &base);
+ if (!err && base <= INT_MAX)
+ gen_gc->gc.base = base;
- platform_set_drvdata(pdev, gc);
+ platform_set_drvdata(pdev, &gen_gc->gc);
- return devm_gpiochip_add_data(&pdev->dev, gc, NULL);
+ return devm_gpiochip_add_data(&pdev->dev, &gen_gc->gc, NULL);
}
static const struct platform_device_id bgpio_id_table[] = {
{
.name = "basic-mmio-gpio",
.driver_data = 0,
- }, {
- .name = "basic-mmio-gpio-be",
- .driver_data = BGPIOF_BIG_ENDIAN,
},
{ }
};
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index d39c6618bade..a7d69f3835c1 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -122,7 +122,7 @@ static void __gpio_mockup_set(struct gpio_mockup_chip *chip,
chip->lines[offset].value = !!value;
}
-static void gpio_mockup_set(struct gpio_chip *gc,
+static int gpio_mockup_set(struct gpio_chip *gc,
unsigned int offset, int value)
{
struct gpio_mockup_chip *chip = gpiochip_get_data(gc);
@@ -130,10 +130,12 @@ static void gpio_mockup_set(struct gpio_chip *gc,
guard(mutex)(&chip->lock);
__gpio_mockup_set(chip, offset, value);
+
+ return 0;
}
-static void gpio_mockup_set_multiple(struct gpio_chip *gc,
- unsigned long *mask, unsigned long *bits)
+static int gpio_mockup_set_multiple(struct gpio_chip *gc,
+ unsigned long *mask, unsigned long *bits)
{
struct gpio_mockup_chip *chip = gpiochip_get_data(gc);
unsigned int bit;
@@ -142,6 +144,8 @@ static void gpio_mockup_set_multiple(struct gpio_chip *gc,
for_each_set_bit(bit, mask, gc->ngpio)
__gpio_mockup_set(chip, bit, test_bit(bit, bits));
+
+ return 0;
}
static int gpio_mockup_apply_pull(struct gpio_mockup_chip *chip,
diff --git a/drivers/gpio/gpio-moxtet.c b/drivers/gpio/gpio-moxtet.c
index 61f9efd6c64f..4eb9f1a2779b 100644
--- a/drivers/gpio/gpio-moxtet.c
+++ b/drivers/gpio/gpio-moxtet.c
@@ -52,15 +52,15 @@ static int moxtet_gpio_get_value(struct gpio_chip *gc, unsigned int offset)
return !!(ret & BIT(offset));
}
-static void moxtet_gpio_set_value(struct gpio_chip *gc, unsigned int offset,
- int val)
+static int moxtet_gpio_set_value(struct gpio_chip *gc, unsigned int offset,
+ int val)
{
struct moxtet_gpio_chip *chip = gpiochip_get_data(gc);
int state;
state = moxtet_device_written(chip->dev);
if (state < 0)
- return;
+ return state;
offset -= MOXTET_GPIO_INPUTS;
@@ -69,7 +69,7 @@ static void moxtet_gpio_set_value(struct gpio_chip *gc, unsigned int offset,
else
state &= ~BIT(offset);
- moxtet_device_write(chip->dev, state);
+ return moxtet_device_write(chip->dev, state);
}
static int moxtet_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
@@ -104,13 +104,11 @@ static int moxtet_gpio_direction_output(struct gpio_chip *gc,
struct moxtet_gpio_chip *chip = gpiochip_get_data(gc);
if (chip->desc->out_mask & BIT(offset))
- moxtet_gpio_set_value(gc, offset, val);
+ return moxtet_gpio_set_value(gc, offset, val);
else if (chip->desc->in_mask & BIT(offset))
return -ENOTSUPP;
- else
- return -EINVAL;
- return 0;
+ return -EINVAL;
}
static int moxtet_gpio_probe(struct device *dev)
diff --git a/drivers/gpio/gpio-mpc5200.c b/drivers/gpio/gpio-mpc5200.c
index a199dce3394a..00f209157fd0 100644
--- a/drivers/gpio/gpio-mpc5200.c
+++ b/drivers/gpio/gpio-mpc5200.c
@@ -8,7 +8,7 @@
#include <linux/of.h>
#include <linux/kernel.h>
#include <linux/slab.h>
-#include <linux/gpio/legacy-of-mm-gpiochip.h>
+#include <linux/gpio/driver.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/module.h>
@@ -19,7 +19,8 @@
static DEFINE_SPINLOCK(gpio_lock);
struct mpc52xx_gpiochip {
- struct of_mm_gpio_chip mmchip;
+ struct gpio_chip gc;
+ void __iomem *regs;
unsigned int shadow_dvo;
unsigned int shadow_gpioe;
unsigned int shadow_ddr;
@@ -43,8 +44,8 @@ struct mpc52xx_gpiochip {
*/
static int mpc52xx_wkup_gpio_get(struct gpio_chip *gc, unsigned int gpio)
{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
- struct mpc52xx_gpio_wkup __iomem *regs = mm_gc->regs;
+ struct mpc52xx_gpiochip *chip = gpiochip_get_data(gc);
+ struct mpc52xx_gpio_wkup __iomem *regs = chip->regs;
unsigned int ret;
ret = (in_8(&regs->wkup_ival) >> (7 - gpio)) & 1;
@@ -57,9 +58,8 @@ static int mpc52xx_wkup_gpio_get(struct gpio_chip *gc, unsigned int gpio)
static inline void
__mpc52xx_wkup_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct mpc52xx_gpiochip *chip = gpiochip_get_data(gc);
- struct mpc52xx_gpio_wkup __iomem *regs = mm_gc->regs;
+ struct mpc52xx_gpio_wkup __iomem *regs = chip->regs;
if (val)
chip->shadow_dvo |= 1 << (7 - gpio);
@@ -69,7 +69,7 @@ __mpc52xx_wkup_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
out_8(&regs->wkup_dvo, chip->shadow_dvo);
}
-static void
+static int
mpc52xx_wkup_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
unsigned long flags;
@@ -81,13 +81,14 @@ mpc52xx_wkup_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
spin_unlock_irqrestore(&gpio_lock, flags);
pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val);
+
+ return 0;
}
static int mpc52xx_wkup_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct mpc52xx_gpiochip *chip = gpiochip_get_data(gc);
- struct mpc52xx_gpio_wkup __iomem *regs = mm_gc->regs;
+ struct mpc52xx_gpio_wkup __iomem *regs = chip->regs;
unsigned long flags;
spin_lock_irqsave(&gpio_lock, flags);
@@ -108,9 +109,8 @@ static int mpc52xx_wkup_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
static int
mpc52xx_wkup_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
- struct mpc52xx_gpio_wkup __iomem *regs = mm_gc->regs;
struct mpc52xx_gpiochip *chip = gpiochip_get_data(gc);
+ struct mpc52xx_gpio_wkup __iomem *regs = chip->regs;
unsigned long flags;
spin_lock_irqsave(&gpio_lock, flags);
@@ -134,30 +134,41 @@ mpc52xx_wkup_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
static int mpc52xx_wkup_gpiochip_probe(struct platform_device *ofdev)
{
+ struct device *dev = &ofdev->dev;
+ struct device_node *np = dev->of_node;
struct mpc52xx_gpiochip *chip;
struct mpc52xx_gpio_wkup __iomem *regs;
struct gpio_chip *gc;
int ret;
- chip = devm_kzalloc(&ofdev->dev, sizeof(*chip), GFP_KERNEL);
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
platform_set_drvdata(ofdev, chip);
- gc = &chip->mmchip.gc;
+ gc = &chip->gc;
+ gc->base = -1;
gc->ngpio = 8;
gc->direction_input = mpc52xx_wkup_gpio_dir_in;
gc->direction_output = mpc52xx_wkup_gpio_dir_out;
gc->get = mpc52xx_wkup_gpio_get;
gc->set = mpc52xx_wkup_gpio_set;
- ret = of_mm_gpiochip_add_data(ofdev->dev.of_node, &chip->mmchip, chip);
+ gc->label = devm_kasprintf(dev, GFP_KERNEL, "%pOF", np);
+ if (!gc->label)
+ return -ENOMEM;
+
+ chip->regs = devm_of_iomap(dev, np, 0, NULL);
+ if (IS_ERR(chip->regs))
+ return PTR_ERR(chip->regs);
+
+ ret = devm_gpiochip_add_data(dev, gc, chip);
if (ret)
return ret;
- regs = chip->mmchip.regs;
+ regs = chip->regs;
chip->shadow_gpioe = in_8(&regs->wkup_gpioe);
chip->shadow_ddr = in_8(&regs->wkup_ddr);
chip->shadow_dvo = in_8(&regs->wkup_dvo);
@@ -165,13 +176,6 @@ static int mpc52xx_wkup_gpiochip_probe(struct platform_device *ofdev)
return 0;
}
-static void mpc52xx_gpiochip_remove(struct platform_device *ofdev)
-{
- struct mpc52xx_gpiochip *chip = platform_get_drvdata(ofdev);
-
- of_mm_gpiochip_remove(&chip->mmchip);
-}
-
static const struct of_device_id mpc52xx_wkup_gpiochip_match[] = {
{ .compatible = "fsl,mpc5200-gpio-wkup", },
{}
@@ -183,7 +187,6 @@ static struct platform_driver mpc52xx_wkup_gpiochip_driver = {
.of_match_table = mpc52xx_wkup_gpiochip_match,
},
.probe = mpc52xx_wkup_gpiochip_probe,
- .remove_new = mpc52xx_gpiochip_remove,
};
/*
@@ -205,8 +208,8 @@ static struct platform_driver mpc52xx_wkup_gpiochip_driver = {
*/
static int mpc52xx_simple_gpio_get(struct gpio_chip *gc, unsigned int gpio)
{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
- struct mpc52xx_gpio __iomem *regs = mm_gc->regs;
+ struct mpc52xx_gpiochip *chip = gpiochip_get_data(gc);
+ struct mpc52xx_gpio __iomem *regs = chip->regs;
unsigned int ret;
ret = (in_be32(&regs->simple_ival) >> (31 - gpio)) & 1;
@@ -217,9 +220,8 @@ static int mpc52xx_simple_gpio_get(struct gpio_chip *gc, unsigned int gpio)
static inline void
__mpc52xx_simple_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct mpc52xx_gpiochip *chip = gpiochip_get_data(gc);
- struct mpc52xx_gpio __iomem *regs = mm_gc->regs;
+ struct mpc52xx_gpio __iomem *regs = chip->regs;
if (val)
chip->shadow_dvo |= 1 << (31 - gpio);
@@ -228,7 +230,7 @@ __mpc52xx_simple_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
out_be32(&regs->simple_dvo, chip->shadow_dvo);
}
-static void
+static int
mpc52xx_simple_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
unsigned long flags;
@@ -240,13 +242,14 @@ mpc52xx_simple_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
spin_unlock_irqrestore(&gpio_lock, flags);
pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val);
+
+ return 0;
}
static int mpc52xx_simple_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct mpc52xx_gpiochip *chip = gpiochip_get_data(gc);
- struct mpc52xx_gpio __iomem *regs = mm_gc->regs;
+ struct mpc52xx_gpio __iomem *regs = chip->regs;
unsigned long flags;
spin_lock_irqsave(&gpio_lock, flags);
@@ -267,9 +270,8 @@ static int mpc52xx_simple_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
static int
mpc52xx_simple_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct mpc52xx_gpiochip *chip = gpiochip_get_data(gc);
- struct mpc52xx_gpio __iomem *regs = mm_gc->regs;
+ struct mpc52xx_gpio __iomem *regs = chip->regs;
unsigned long flags;
spin_lock_irqsave(&gpio_lock, flags);
@@ -294,30 +296,41 @@ mpc52xx_simple_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
static int mpc52xx_simple_gpiochip_probe(struct platform_device *ofdev)
{
+ struct device *dev = &ofdev->dev;
+ struct device_node *np = dev->of_node;
struct mpc52xx_gpiochip *chip;
struct gpio_chip *gc;
struct mpc52xx_gpio __iomem *regs;
int ret;
- chip = devm_kzalloc(&ofdev->dev, sizeof(*chip), GFP_KERNEL);
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
platform_set_drvdata(ofdev, chip);
- gc = &chip->mmchip.gc;
+ gc = &chip->gc;
+ gc->base = -1;
gc->ngpio = 32;
gc->direction_input = mpc52xx_simple_gpio_dir_in;
gc->direction_output = mpc52xx_simple_gpio_dir_out;
gc->get = mpc52xx_simple_gpio_get;
gc->set = mpc52xx_simple_gpio_set;
- ret = of_mm_gpiochip_add_data(ofdev->dev.of_node, &chip->mmchip, chip);
+ gc->label = devm_kasprintf(dev, GFP_KERNEL, "%pOF", np);
+ if (!gc->label)
+ return -ENOMEM;
+
+ chip->regs = devm_of_iomap(dev, np, 0, NULL);
+ if (IS_ERR(chip->regs))
+ return PTR_ERR(chip->regs);
+
+ ret = devm_gpiochip_add_data(dev, gc, chip);
if (ret)
return ret;
- regs = chip->mmchip.regs;
+ regs = chip->regs;
chip->shadow_gpioe = in_be32(&regs->simple_gpioe);
chip->shadow_ddr = in_be32(&regs->simple_ddr);
chip->shadow_dvo = in_be32(&regs->simple_dvo);
@@ -336,7 +349,6 @@ static struct platform_driver mpc52xx_simple_gpiochip_driver = {
.of_match_table = mpc52xx_simple_gpiochip_match,
},
.probe = mpc52xx_simple_gpiochip_probe,
- .remove_new = mpc52xx_gpiochip_remove,
};
static struct platform_driver * const drivers[] = {
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index 685ec31db409..bfe828734ee1 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -9,13 +9,13 @@
#include <linux/acpi.h>
#include <linux/bitops.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/mod_devicetable.h>
-#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
@@ -35,7 +35,7 @@
#define GPIO_IBE 0x18
struct mpc8xxx_gpio_chip {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
void __iomem *regs;
raw_spinlock_t lock;
@@ -67,9 +67,11 @@ static int mpc8572_gpio_get(struct gpio_chip *gc, unsigned int gpio)
struct mpc8xxx_gpio_chip *mpc8xxx_gc = gpiochip_get_data(gc);
u32 out_mask, out_shadow;
- out_mask = gc->read_reg(mpc8xxx_gc->regs + GPIO_DIR);
- val = gc->read_reg(mpc8xxx_gc->regs + GPIO_DAT) & ~out_mask;
- out_shadow = gc->bgpio_data & out_mask;
+ out_mask = gpio_generic_read_reg(&mpc8xxx_gc->chip,
+ mpc8xxx_gc->regs + GPIO_DIR);
+ val = gpio_generic_read_reg(&mpc8xxx_gc->chip,
+ mpc8xxx_gc->regs + GPIO_DAT) & ~out_mask;
+ out_shadow = mpc8xxx_gc->chip.sdata & out_mask;
return !!((val | out_shadow) & mpc_pin2mask(gpio));
}
@@ -109,12 +111,13 @@ static int mpc8xxx_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
static irqreturn_t mpc8xxx_gpio_irq_cascade(int irq, void *data)
{
struct mpc8xxx_gpio_chip *mpc8xxx_gc = data;
- struct gpio_chip *gc = &mpc8xxx_gc->gc;
unsigned long mask;
int i;
- mask = gc->read_reg(mpc8xxx_gc->regs + GPIO_IER)
- & gc->read_reg(mpc8xxx_gc->regs + GPIO_IMR);
+ mask = gpio_generic_read_reg(&mpc8xxx_gc->chip,
+ mpc8xxx_gc->regs + GPIO_IER) &
+ gpio_generic_read_reg(&mpc8xxx_gc->chip,
+ mpc8xxx_gc->regs + GPIO_IMR);
for_each_set_bit(i, &mask, 32)
generic_handle_domain_irq(mpc8xxx_gc->irq, 31 - i);
@@ -124,13 +127,18 @@ static irqreturn_t mpc8xxx_gpio_irq_cascade(int irq, void *data)
static void mpc8xxx_irq_unmask(struct irq_data *d)
{
struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d);
- struct gpio_chip *gc = &mpc8xxx_gc->gc;
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ struct gpio_chip *gc = &mpc8xxx_gc->chip.gc;
unsigned long flags;
+ gpiochip_enable_irq(gc, hwirq);
+
raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
- gc->write_reg(mpc8xxx_gc->regs + GPIO_IMR,
- gc->read_reg(mpc8xxx_gc->regs + GPIO_IMR)
+ gpio_generic_write_reg(&mpc8xxx_gc->chip,
+ mpc8xxx_gc->regs + GPIO_IMR,
+ gpio_generic_read_reg(&mpc8xxx_gc->chip,
+ mpc8xxx_gc->regs + GPIO_IMR)
| mpc_pin2mask(irqd_to_hwirq(d)));
raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
@@ -139,47 +147,53 @@ static void mpc8xxx_irq_unmask(struct irq_data *d)
static void mpc8xxx_irq_mask(struct irq_data *d)
{
struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d);
- struct gpio_chip *gc = &mpc8xxx_gc->gc;
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ struct gpio_chip *gc = &mpc8xxx_gc->chip.gc;
unsigned long flags;
raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
- gc->write_reg(mpc8xxx_gc->regs + GPIO_IMR,
- gc->read_reg(mpc8xxx_gc->regs + GPIO_IMR)
+ gpio_generic_write_reg(&mpc8xxx_gc->chip, mpc8xxx_gc->regs + GPIO_IMR,
+ gpio_generic_read_reg(&mpc8xxx_gc->chip,
+ mpc8xxx_gc->regs + GPIO_IMR)
& ~mpc_pin2mask(irqd_to_hwirq(d)));
raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+
+ gpiochip_disable_irq(gc, hwirq);
}
static void mpc8xxx_irq_ack(struct irq_data *d)
{
struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d);
- struct gpio_chip *gc = &mpc8xxx_gc->gc;
- gc->write_reg(mpc8xxx_gc->regs + GPIO_IER,
+ gpio_generic_write_reg(&mpc8xxx_gc->chip, mpc8xxx_gc->regs + GPIO_IER,
mpc_pin2mask(irqd_to_hwirq(d)));
}
static int mpc8xxx_irq_set_type(struct irq_data *d, unsigned int flow_type)
{
struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d);
- struct gpio_chip *gc = &mpc8xxx_gc->gc;
unsigned long flags;
switch (flow_type) {
case IRQ_TYPE_EDGE_FALLING:
case IRQ_TYPE_LEVEL_LOW:
raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
- gc->write_reg(mpc8xxx_gc->regs + GPIO_ICR,
- gc->read_reg(mpc8xxx_gc->regs + GPIO_ICR)
+ gpio_generic_write_reg(&mpc8xxx_gc->chip,
+ mpc8xxx_gc->regs + GPIO_ICR,
+ gpio_generic_read_reg(&mpc8xxx_gc->chip,
+ mpc8xxx_gc->regs + GPIO_ICR)
| mpc_pin2mask(irqd_to_hwirq(d)));
raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
break;
case IRQ_TYPE_EDGE_BOTH:
raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
- gc->write_reg(mpc8xxx_gc->regs + GPIO_ICR,
- gc->read_reg(mpc8xxx_gc->regs + GPIO_ICR)
+ gpio_generic_write_reg(&mpc8xxx_gc->chip,
+ mpc8xxx_gc->regs + GPIO_ICR,
+ gpio_generic_read_reg(&mpc8xxx_gc->chip,
+ mpc8xxx_gc->regs + GPIO_ICR)
& ~mpc_pin2mask(irqd_to_hwirq(d)));
raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
break;
@@ -194,7 +208,6 @@ static int mpc8xxx_irq_set_type(struct irq_data *d, unsigned int flow_type)
static int mpc512x_irq_set_type(struct irq_data *d, unsigned int flow_type)
{
struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d);
- struct gpio_chip *gc = &mpc8xxx_gc->gc;
unsigned long gpio = irqd_to_hwirq(d);
void __iomem *reg;
unsigned int shift;
@@ -212,7 +225,9 @@ static int mpc512x_irq_set_type(struct irq_data *d, unsigned int flow_type)
case IRQ_TYPE_EDGE_FALLING:
case IRQ_TYPE_LEVEL_LOW:
raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
- gc->write_reg(reg, (gc->read_reg(reg) & ~(3 << shift))
+ gpio_generic_write_reg(&mpc8xxx_gc->chip, reg,
+ (gpio_generic_read_reg(&mpc8xxx_gc->chip,
+ reg) & ~(3 << shift))
| (2 << shift));
raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
break;
@@ -220,14 +235,18 @@ static int mpc512x_irq_set_type(struct irq_data *d, unsigned int flow_type)
case IRQ_TYPE_EDGE_RISING:
case IRQ_TYPE_LEVEL_HIGH:
raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
- gc->write_reg(reg, (gc->read_reg(reg) & ~(3 << shift))
+ gpio_generic_write_reg(&mpc8xxx_gc->chip, reg,
+ (gpio_generic_read_reg(&mpc8xxx_gc->chip,
+ reg) & ~(3 << shift))
| (1 << shift));
raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
break;
case IRQ_TYPE_EDGE_BOTH:
raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
- gc->write_reg(reg, (gc->read_reg(reg) & ~(3 << shift)));
+ gpio_generic_write_reg(&mpc8xxx_gc->chip, reg,
+ (gpio_generic_read_reg(&mpc8xxx_gc->chip,
+ reg) & ~(3 << shift)));
raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
break;
@@ -245,6 +264,8 @@ static struct irq_chip mpc8xxx_irq_chip = {
.irq_ack = mpc8xxx_irq_ack,
/* this might get overwritten in mpc8xxx_probe() */
.irq_set_type = mpc8xxx_irq_set_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static int mpc8xxx_gpio_irq_map(struct irq_domain *h, unsigned int irq,
@@ -286,6 +307,7 @@ static const struct mpc8xxx_gpio_devtype mpc8xxx_gpio_devtype_default = {
};
static const struct of_device_id mpc8xxx_gpio_ids[] = {
+ { .compatible = "fsl,mpc8314-gpio", },
{ .compatible = "fsl,mpc8349-gpio", },
{ .compatible = "fsl,mpc8572-gpio", .data = &mpc8572_gpio_devtype, },
{ .compatible = "fsl,mpc8610-gpio", },
@@ -300,14 +322,15 @@ static const struct of_device_id mpc8xxx_gpio_ids[] = {
static int mpc8xxx_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
- struct mpc8xxx_gpio_chip *mpc8xxx_gc;
- struct gpio_chip *gc;
const struct mpc8xxx_gpio_devtype *devtype = NULL;
+ struct gpio_generic_chip_config config;
+ struct mpc8xxx_gpio_chip *mpc8xxx_gc;
+ struct device *dev = &pdev->dev;
struct fwnode_handle *fwnode;
+ struct gpio_chip *gc;
int ret;
- mpc8xxx_gc = devm_kzalloc(&pdev->dev, sizeof(*mpc8xxx_gc), GFP_KERNEL);
+ mpc8xxx_gc = devm_kzalloc(dev, sizeof(*mpc8xxx_gc), GFP_KERNEL);
if (!mpc8xxx_gc)
return -ENOMEM;
@@ -319,33 +342,31 @@ static int mpc8xxx_probe(struct platform_device *pdev)
if (IS_ERR(mpc8xxx_gc->regs))
return PTR_ERR(mpc8xxx_gc->regs);
- gc = &mpc8xxx_gc->gc;
- gc->parent = &pdev->dev;
-
- if (device_property_read_bool(&pdev->dev, "little-endian")) {
- ret = bgpio_init(gc, &pdev->dev, 4,
- mpc8xxx_gc->regs + GPIO_DAT,
- NULL, NULL,
- mpc8xxx_gc->regs + GPIO_DIR, NULL,
- BGPIOF_BIG_ENDIAN);
- if (ret)
- return ret;
- dev_dbg(&pdev->dev, "GPIO registers are LITTLE endian\n");
+ gc = &mpc8xxx_gc->chip.gc;
+ gc->parent = dev;
+
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = mpc8xxx_gc->regs + GPIO_DAT,
+ .dirout = mpc8xxx_gc->regs + GPIO_DIR,
+ .flags = GPIO_GENERIC_BIG_ENDIAN
+ };
+
+ if (device_property_read_bool(dev, "little-endian")) {
+ dev_dbg(dev, "GPIO registers are LITTLE endian\n");
} else {
- ret = bgpio_init(gc, &pdev->dev, 4,
- mpc8xxx_gc->regs + GPIO_DAT,
- NULL, NULL,
- mpc8xxx_gc->regs + GPIO_DIR, NULL,
- BGPIOF_BIG_ENDIAN
- | BGPIOF_BIG_ENDIAN_BYTE_ORDER);
- if (ret)
- return ret;
- dev_dbg(&pdev->dev, "GPIO registers are BIG endian\n");
+ config.flags |= GPIO_GENERIC_BIG_ENDIAN_BYTE_ORDER;
+ dev_dbg(dev, "GPIO registers are BIG endian\n");
}
+ ret = gpio_generic_chip_init(&mpc8xxx_gc->chip, &config);
+ if (ret)
+ return ret;
+
mpc8xxx_gc->direction_output = gc->direction_output;
- devtype = device_get_match_data(&pdev->dev);
+ devtype = device_get_match_data(dev);
if (!devtype)
devtype = &mpc8xxx_gpio_devtype_default;
@@ -370,20 +391,24 @@ static int mpc8xxx_probe(struct platform_device *pdev)
* associated input enable must be set (GPIOxGPIE[IEn]=1) to propagate
* the port value to the GPIO Data Register.
*/
- fwnode = dev_fwnode(&pdev->dev);
- if (of_device_is_compatible(np, "fsl,qoriq-gpio") ||
- of_device_is_compatible(np, "fsl,ls1028a-gpio") ||
- of_device_is_compatible(np, "fsl,ls1088a-gpio") ||
+ fwnode = dev_fwnode(dev);
+ if (device_is_compatible(dev, "fsl,qoriq-gpio") ||
+ device_is_compatible(dev, "fsl,ls1028a-gpio") ||
+ device_is_compatible(dev, "fsl,ls1088a-gpio") ||
is_acpi_node(fwnode)) {
- gc->write_reg(mpc8xxx_gc->regs + GPIO_IBE, 0xffffffff);
+ gpio_generic_write_reg(&mpc8xxx_gc->chip,
+ mpc8xxx_gc->regs + GPIO_IBE, 0xffffffff);
/* Also, latch state of GPIOs configured as output by bootloader. */
- gc->bgpio_data = gc->read_reg(mpc8xxx_gc->regs + GPIO_DAT) &
- gc->read_reg(mpc8xxx_gc->regs + GPIO_DIR);
+ mpc8xxx_gc->chip.sdata =
+ gpio_generic_read_reg(&mpc8xxx_gc->chip,
+ mpc8xxx_gc->regs + GPIO_DAT) &
+ gpio_generic_read_reg(&mpc8xxx_gc->chip,
+ mpc8xxx_gc->regs + GPIO_DIR);
}
- ret = devm_gpiochip_add_data(&pdev->dev, gc, mpc8xxx_gc);
+ ret = devm_gpiochip_add_data(dev, gc, mpc8xxx_gc);
if (ret) {
- dev_err(&pdev->dev,
+ dev_err(dev,
"GPIO chip registration failed with status %d\n", ret);
return ret;
}
@@ -401,21 +426,24 @@ static int mpc8xxx_probe(struct platform_device *pdev)
return 0;
/* ack and mask all irqs */
- gc->write_reg(mpc8xxx_gc->regs + GPIO_IER, 0xffffffff);
- gc->write_reg(mpc8xxx_gc->regs + GPIO_IMR, 0);
+ gpio_generic_write_reg(&mpc8xxx_gc->chip,
+ mpc8xxx_gc->regs + GPIO_IER, 0xffffffff);
+ gpio_generic_write_reg(&mpc8xxx_gc->chip,
+ mpc8xxx_gc->regs + GPIO_IMR, 0);
- ret = devm_request_irq(&pdev->dev, mpc8xxx_gc->irqn,
+ ret = devm_request_irq(dev, mpc8xxx_gc->irqn,
mpc8xxx_gpio_irq_cascade,
IRQF_NO_THREAD | IRQF_SHARED, "gpio-cascade",
mpc8xxx_gc);
if (ret) {
- dev_err(&pdev->dev,
- "failed to devm_request_irq(%d), ret = %d\n",
+ dev_err(dev, "failed to devm_request_irq(%d), ret = %d\n",
mpc8xxx_gc->irqn, ret);
goto err;
}
- device_init_wakeup(&pdev->dev, true);
+ ret = devm_device_init_wakeup(dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to init wakeup\n");
return 0;
err:
@@ -466,7 +494,7 @@ MODULE_DEVICE_TABLE(acpi, gpio_acpi_ids);
static struct platform_driver mpc8xxx_plat_driver = {
.probe = mpc8xxx_probe,
- .remove_new = mpc8xxx_remove,
+ .remove = mpc8xxx_remove,
.driver = {
.name = "gpio-mpc8xxx",
.of_match_table = mpc8xxx_gpio_ids,
diff --git a/drivers/gpio/gpio-mpfs.c b/drivers/gpio/gpio-mpfs.c
new file mode 100644
index 000000000000..9468795b9634
--- /dev/null
+++ b/drivers/gpio/gpio-mpfs.c
@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: (GPL-2.0)
+/*
+ * Microchip PolarFire SoC (MPFS) GPIO controller driver
+ *
+ * Copyright (c) 2018-2024 Microchip Technology Inc. and its subsidiaries
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/gpio/driver.h>
+#include <linux/init.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/spinlock.h>
+
+#define MPFS_GPIO_CTRL(i) (0x4 * (i))
+#define MPFS_MAX_NUM_GPIO 32
+#define MPFS_GPIO_EN_INT 3
+#define MPFS_GPIO_EN_OUT_BUF BIT(2)
+#define MPFS_GPIO_EN_IN BIT(1)
+#define MPFS_GPIO_EN_OUT BIT(0)
+#define MPFS_GPIO_DIR_MASK GENMASK(2, 0)
+
+#define MPFS_GPIO_TYPE_INT_EDGE_BOTH 0x80
+#define MPFS_GPIO_TYPE_INT_EDGE_NEG 0x60
+#define MPFS_GPIO_TYPE_INT_EDGE_POS 0x40
+#define MPFS_GPIO_TYPE_INT_LEVEL_LOW 0x20
+#define MPFS_GPIO_TYPE_INT_LEVEL_HIGH 0x00
+#define MPFS_GPIO_TYPE_INT_MASK GENMASK(7, 5)
+#define MPFS_IRQ_REG 0x80
+
+#define MPFS_INP_REG 0x84
+#define COREGPIO_INP_REG 0x90
+#define MPFS_OUTP_REG 0x88
+#define COREGPIO_OUTP_REG 0xA0
+
+struct mpfs_gpio_reg_offsets {
+ u8 inp;
+ u8 outp;
+};
+
+struct mpfs_gpio_chip {
+ struct regmap *regs;
+ const struct mpfs_gpio_reg_offsets *offsets;
+ struct gpio_chip gc;
+};
+
+static const struct regmap_config mpfs_gpio_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+};
+
+static int mpfs_gpio_direction_input(struct gpio_chip *gc, unsigned int gpio_index)
+{
+ struct mpfs_gpio_chip *mpfs_gpio = gpiochip_get_data(gc);
+
+ regmap_update_bits(mpfs_gpio->regs, MPFS_GPIO_CTRL(gpio_index),
+ MPFS_GPIO_DIR_MASK, MPFS_GPIO_EN_IN);
+
+ return 0;
+}
+
+static int mpfs_gpio_direction_output(struct gpio_chip *gc, unsigned int gpio_index, int value)
+{
+ struct mpfs_gpio_chip *mpfs_gpio = gpiochip_get_data(gc);
+
+ regmap_update_bits(mpfs_gpio->regs, MPFS_GPIO_CTRL(gpio_index),
+ MPFS_GPIO_DIR_MASK, MPFS_GPIO_EN_OUT | MPFS_GPIO_EN_OUT_BUF);
+ regmap_update_bits(mpfs_gpio->regs, mpfs_gpio->offsets->outp, BIT(gpio_index),
+ value << gpio_index);
+
+ return 0;
+}
+
+static int mpfs_gpio_get_direction(struct gpio_chip *gc,
+ unsigned int gpio_index)
+{
+ struct mpfs_gpio_chip *mpfs_gpio = gpiochip_get_data(gc);
+ unsigned int gpio_cfg;
+
+ regmap_read(mpfs_gpio->regs, MPFS_GPIO_CTRL(gpio_index), &gpio_cfg);
+ if (gpio_cfg & MPFS_GPIO_EN_IN)
+ return GPIO_LINE_DIRECTION_IN;
+
+ return GPIO_LINE_DIRECTION_OUT;
+}
+
+static int mpfs_gpio_get(struct gpio_chip *gc, unsigned int gpio_index)
+{
+ struct mpfs_gpio_chip *mpfs_gpio = gpiochip_get_data(gc);
+
+ if (mpfs_gpio_get_direction(gc, gpio_index) == GPIO_LINE_DIRECTION_OUT)
+ return regmap_test_bits(mpfs_gpio->regs, mpfs_gpio->offsets->outp, BIT(gpio_index));
+ else
+ return regmap_test_bits(mpfs_gpio->regs, mpfs_gpio->offsets->inp, BIT(gpio_index));
+}
+
+static int mpfs_gpio_set(struct gpio_chip *gc, unsigned int gpio_index, int value)
+{
+ struct mpfs_gpio_chip *mpfs_gpio = gpiochip_get_data(gc);
+ int ret;
+
+ mpfs_gpio_get(gc, gpio_index);
+
+ ret = regmap_update_bits(mpfs_gpio->regs, mpfs_gpio->offsets->outp,
+ BIT(gpio_index), value << gpio_index);
+
+ mpfs_gpio_get(gc, gpio_index);
+
+ return ret;
+}
+
+static int mpfs_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mpfs_gpio_chip *mpfs_gpio;
+ struct clk *clk;
+ void __iomem *base;
+ int ngpios;
+
+ mpfs_gpio = devm_kzalloc(dev, sizeof(*mpfs_gpio), GFP_KERNEL);
+ if (!mpfs_gpio)
+ return -ENOMEM;
+
+ mpfs_gpio->offsets = device_get_match_data(&pdev->dev);
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return dev_err_probe(dev, PTR_ERR(base), "failed to ioremap memory resource\n");
+
+ mpfs_gpio->regs = devm_regmap_init_mmio(dev, base, &mpfs_gpio_regmap_config);
+ if (IS_ERR(mpfs_gpio->regs))
+ return dev_err_probe(dev, PTR_ERR(mpfs_gpio->regs),
+ "failed to initialise regmap\n");
+
+ clk = devm_clk_get_enabled(dev, NULL);
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk), "failed to get and enable clock\n");
+
+ ngpios = MPFS_MAX_NUM_GPIO;
+ device_property_read_u32(dev, "ngpios", &ngpios);
+ if (ngpios > MPFS_MAX_NUM_GPIO)
+ ngpios = MPFS_MAX_NUM_GPIO;
+
+ mpfs_gpio->gc.direction_input = mpfs_gpio_direction_input;
+ mpfs_gpio->gc.direction_output = mpfs_gpio_direction_output;
+ mpfs_gpio->gc.get_direction = mpfs_gpio_get_direction;
+ mpfs_gpio->gc.get = mpfs_gpio_get;
+ mpfs_gpio->gc.set = mpfs_gpio_set;
+ mpfs_gpio->gc.base = -1;
+ mpfs_gpio->gc.ngpio = ngpios;
+ mpfs_gpio->gc.label = dev_name(dev);
+ mpfs_gpio->gc.parent = dev;
+ mpfs_gpio->gc.owner = THIS_MODULE;
+
+ return devm_gpiochip_add_data(dev, &mpfs_gpio->gc, mpfs_gpio);
+}
+
+static const struct mpfs_gpio_reg_offsets mpfs_reg_offsets = {
+ .inp = MPFS_INP_REG,
+ .outp = MPFS_OUTP_REG,
+};
+
+static const struct mpfs_gpio_reg_offsets coregpio_reg_offsets = {
+ .inp = COREGPIO_INP_REG,
+ .outp = COREGPIO_OUTP_REG,
+};
+
+static const struct of_device_id mpfs_gpio_of_ids[] = {
+ {
+ .compatible = "microchip,mpfs-gpio",
+ .data = &mpfs_reg_offsets,
+ }, {
+ .compatible = "microchip,coregpio-rtl-v3",
+ .data = &coregpio_reg_offsets,
+ },
+ { /* end of list */ }
+};
+
+static struct platform_driver mpfs_gpio_driver = {
+ .probe = mpfs_gpio_probe,
+ .driver = {
+ .name = "microchip,mpfs-gpio",
+ .of_match_table = mpfs_gpio_of_ids,
+ },
+};
+builtin_platform_driver(mpfs_gpio_driver);
diff --git a/drivers/gpio/gpio-mpsse.c b/drivers/gpio/gpio-mpsse.c
new file mode 100644
index 000000000000..9f42bb30b4ec
--- /dev/null
+++ b/drivers/gpio/gpio-mpsse.c
@@ -0,0 +1,525 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * FTDI MPSSE GPIO support
+ *
+ * Based on code by Anatolij Gustschin
+ *
+ * Copyright (C) 2024 Mary Strodl <mstrodl@csh.rit.edu>
+ */
+
+#include <linux/cleanup.h>
+#include <linux/gpio/driver.h>
+#include <linux/mutex.h>
+#include <linux/usb.h>
+
+struct mpsse_priv {
+ struct gpio_chip gpio;
+ struct usb_device *udev; /* USB device encompassing all MPSSEs */
+ struct usb_interface *intf; /* USB interface for this MPSSE */
+ u8 intf_id; /* USB interface number for this MPSSE */
+ struct work_struct irq_work; /* polling work thread */
+ struct mutex irq_mutex; /* lock over irq_data */
+ atomic_t irq_type[16]; /* pin -> edge detection type */
+ atomic_t irq_enabled;
+ int id;
+
+ u8 gpio_outputs[2]; /* Output states for GPIOs [L, H] */
+ u8 gpio_dir[2]; /* Directions for GPIOs [L, H] */
+
+ u8 *bulk_in_buf; /* Extra recv buffer to grab status bytes */
+
+ struct usb_endpoint_descriptor *bulk_in;
+ struct usb_endpoint_descriptor *bulk_out;
+
+ struct mutex io_mutex; /* sync I/O with disconnect */
+};
+
+struct bulk_desc {
+ bool tx; /* direction of bulk transfer */
+ u8 *data; /* input (tx) or output (rx) */
+ int len; /* Length of `data` if tx, or length of */
+ /* Data to read if rx */
+ int len_actual; /* Length successfully transferred */
+ int timeout;
+};
+
+static const struct usb_device_id gpio_mpsse_table[] = {
+ { USB_DEVICE(0x0c52, 0xa064) }, /* SeaLevel Systems, Inc. */
+ { } /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(usb, gpio_mpsse_table);
+
+static DEFINE_IDA(gpio_mpsse_ida);
+
+/* MPSSE commands */
+#define SET_BITS_CMD 0x80
+#define GET_BITS_CMD 0x81
+
+#define SET_BITMODE_REQUEST 0x0B
+#define MODE_MPSSE (2 << 8)
+#define MODE_RESET 0
+
+/* Arbitrarily decided. This could probably be much less */
+#define MPSSE_WRITE_TIMEOUT 5000
+#define MPSSE_READ_TIMEOUT 5000
+
+/* 1 millisecond, also pretty arbitrary */
+#define MPSSE_POLL_INTERVAL 1000
+
+static int mpsse_bulk_xfer(struct usb_interface *intf, struct bulk_desc *desc)
+{
+ struct mpsse_priv *priv = usb_get_intfdata(intf);
+ struct usb_device *udev = priv->udev;
+ unsigned int pipe;
+ int ret;
+
+ if (desc->tx)
+ pipe = usb_sndbulkpipe(udev, priv->bulk_out->bEndpointAddress);
+ else
+ pipe = usb_rcvbulkpipe(udev, priv->bulk_in->bEndpointAddress);
+
+ ret = usb_bulk_msg(udev, pipe, desc->data, desc->len,
+ &desc->len_actual, desc->timeout);
+ if (ret)
+ dev_dbg(&udev->dev, "mpsse: bulk transfer failed: %d\n", ret);
+
+ return ret;
+}
+
+static int mpsse_write(struct usb_interface *intf,
+ u8 *buf, size_t len)
+{
+ int ret;
+ struct bulk_desc desc;
+
+ desc.len_actual = 0;
+ desc.tx = true;
+ desc.data = buf;
+ desc.len = len;
+ desc.timeout = MPSSE_WRITE_TIMEOUT;
+
+ ret = mpsse_bulk_xfer(intf, &desc);
+
+ return ret;
+}
+
+static int mpsse_read(struct usb_interface *intf, u8 *buf, size_t len)
+{
+ int ret;
+ struct bulk_desc desc;
+ struct mpsse_priv *priv = usb_get_intfdata(intf);
+
+ desc.len_actual = 0;
+ desc.tx = false;
+ desc.data = priv->bulk_in_buf;
+ /* Device sends 2 additional status bytes, read len + 2 */
+ desc.len = min_t(size_t, len + 2, usb_endpoint_maxp(priv->bulk_in));
+ desc.timeout = MPSSE_READ_TIMEOUT;
+
+ ret = mpsse_bulk_xfer(intf, &desc);
+ if (ret)
+ return ret;
+
+ /* Did we get enough data? */
+ if (desc.len_actual < desc.len)
+ return -EIO;
+
+ memcpy(buf, desc.data + 2, desc.len_actual - 2);
+
+ return ret;
+}
+
+static int gpio_mpsse_set_bank(struct mpsse_priv *priv, u8 bank)
+{
+ int ret;
+ u8 tx_buf[3] = {
+ SET_BITS_CMD | (bank << 1),
+ priv->gpio_outputs[bank],
+ priv->gpio_dir[bank],
+ };
+
+ ret = mpsse_write(priv->intf, tx_buf, 3);
+
+ return ret;
+}
+
+static int gpio_mpsse_get_bank(struct mpsse_priv *priv, u8 bank)
+{
+ int ret;
+ u8 buf = GET_BITS_CMD | (bank << 1);
+
+ ret = mpsse_write(priv->intf, &buf, 1);
+ if (ret)
+ return ret;
+
+ ret = mpsse_read(priv->intf, &buf, 1);
+ if (ret)
+ return ret;
+
+ return buf;
+}
+
+static int gpio_mpsse_set_multiple(struct gpio_chip *chip, unsigned long *mask,
+ unsigned long *bits)
+{
+ unsigned long i, bank, bank_mask, bank_bits;
+ int ret;
+ struct mpsse_priv *priv = gpiochip_get_data(chip);
+
+ guard(mutex)(&priv->io_mutex);
+ for_each_set_clump8(i, bank_mask, mask, chip->ngpio) {
+ bank = i / 8;
+
+ if (bank_mask) {
+ bank_bits = bitmap_get_value8(bits, i);
+ /* Zero out pins we want to change */
+ priv->gpio_outputs[bank] &= ~bank_mask;
+ /* Set pins we care about */
+ priv->gpio_outputs[bank] |= bank_bits & bank_mask;
+
+ ret = gpio_mpsse_set_bank(priv, bank);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int gpio_mpsse_get_multiple(struct gpio_chip *chip, unsigned long *mask,
+ unsigned long *bits)
+{
+ unsigned long i, bank, bank_mask;
+ int ret;
+ struct mpsse_priv *priv = gpiochip_get_data(chip);
+
+ guard(mutex)(&priv->io_mutex);
+ for_each_set_clump8(i, bank_mask, mask, chip->ngpio) {
+ bank = i / 8;
+
+ if (bank_mask) {
+ ret = gpio_mpsse_get_bank(priv, bank);
+ if (ret < 0)
+ return ret;
+
+ bitmap_set_value8(bits, ret & bank_mask, i);
+ }
+ }
+
+ return 0;
+}
+
+static int gpio_mpsse_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ int err;
+ unsigned long mask = 0, bits = 0;
+
+ __set_bit(offset, &mask);
+ err = gpio_mpsse_get_multiple(chip, &mask, &bits);
+ if (err)
+ return err;
+
+ /* == is not guaranteed to give 1 if true */
+ if (bits)
+ return 1;
+ else
+ return 0;
+}
+
+static int gpio_mpsse_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ unsigned long mask = 0, bits = 0;
+
+ __set_bit(offset, &mask);
+ if (value)
+ __set_bit(offset, &bits);
+
+ return gpio_mpsse_set_multiple(chip, &mask, &bits);
+}
+
+static int gpio_mpsse_direction_output(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ struct mpsse_priv *priv = gpiochip_get_data(chip);
+ int bank = (offset & 8) >> 3;
+ int bank_offset = offset & 7;
+
+ scoped_guard(mutex, &priv->io_mutex)
+ priv->gpio_dir[bank] |= BIT(bank_offset);
+
+ return gpio_mpsse_gpio_set(chip, offset, value);
+}
+
+static int gpio_mpsse_direction_input(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct mpsse_priv *priv = gpiochip_get_data(chip);
+ int bank = (offset & 8) >> 3;
+ int bank_offset = offset & 7;
+
+ guard(mutex)(&priv->io_mutex);
+ priv->gpio_dir[bank] &= ~BIT(bank_offset);
+ gpio_mpsse_set_bank(priv, bank);
+
+ return 0;
+}
+
+static int gpio_mpsse_get_direction(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ int ret;
+ int bank = (offset & 8) >> 3;
+ int bank_offset = offset & 7;
+ struct mpsse_priv *priv = gpiochip_get_data(chip);
+
+ guard(mutex)(&priv->io_mutex);
+ /* MPSSE directions are inverted */
+ if (priv->gpio_dir[bank] & BIT(bank_offset))
+ ret = GPIO_LINE_DIRECTION_OUT;
+ else
+ ret = GPIO_LINE_DIRECTION_IN;
+
+ return ret;
+}
+
+static void gpio_mpsse_poll(struct work_struct *work)
+{
+ unsigned long pin_mask, pin_states, flags;
+ int irq_enabled, offset, err, value, fire_irq,
+ irq, old_value[16], irq_type[16];
+ struct mpsse_priv *priv = container_of(work, struct mpsse_priv,
+ irq_work);
+
+ for (offset = 0; offset < priv->gpio.ngpio; ++offset)
+ old_value[offset] = -1;
+
+ while ((irq_enabled = atomic_read(&priv->irq_enabled))) {
+ usleep_range(MPSSE_POLL_INTERVAL, MPSSE_POLL_INTERVAL + 1000);
+ /* Cleanup will trigger at the end of the loop */
+ guard(mutex)(&priv->irq_mutex);
+
+ pin_mask = 0;
+ pin_states = 0;
+ for (offset = 0; offset < priv->gpio.ngpio; ++offset) {
+ irq_type[offset] = atomic_read(&priv->irq_type[offset]);
+ if (irq_type[offset] != IRQ_TYPE_NONE &&
+ irq_enabled & BIT(offset))
+ pin_mask |= BIT(offset);
+ else
+ old_value[offset] = -1;
+ }
+
+ err = gpio_mpsse_get_multiple(&priv->gpio, &pin_mask,
+ &pin_states);
+ if (err) {
+ dev_err_ratelimited(&priv->intf->dev,
+ "Error polling!\n");
+ continue;
+ }
+
+ /* Check each value */
+ for (offset = 0; offset < priv->gpio.ngpio; ++offset) {
+ if (old_value[offset] == -1)
+ continue;
+
+ fire_irq = 0;
+ value = pin_states & BIT(offset);
+
+ switch (irq_type[offset]) {
+ case IRQ_TYPE_EDGE_RISING:
+ fire_irq = value > old_value[offset];
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ fire_irq = value < old_value[offset];
+ break;
+ case IRQ_TYPE_EDGE_BOTH:
+ fire_irq = value != old_value[offset];
+ break;
+ }
+ if (!fire_irq)
+ continue;
+
+ irq = irq_find_mapping(priv->gpio.irq.domain,
+ offset);
+ local_irq_save(flags);
+ generic_handle_irq(irq);
+ local_irq_disable();
+ local_irq_restore(flags);
+ }
+
+ /* Sync back values so we can refer to them next tick */
+ for (offset = 0; offset < priv->gpio.ngpio; ++offset)
+ if (irq_type[offset] != IRQ_TYPE_NONE &&
+ irq_enabled & BIT(offset))
+ old_value[offset] = pin_states & BIT(offset);
+ }
+}
+
+static int gpio_mpsse_set_irq_type(struct irq_data *irqd, unsigned int type)
+{
+ int offset;
+ struct mpsse_priv *priv = irq_data_get_irq_chip_data(irqd);
+
+ offset = irqd->hwirq;
+ atomic_set(&priv->irq_type[offset], type & IRQ_TYPE_EDGE_BOTH);
+
+ return 0;
+}
+
+static void gpio_mpsse_irq_disable(struct irq_data *irqd)
+{
+ struct mpsse_priv *priv = irq_data_get_irq_chip_data(irqd);
+
+ atomic_and(~BIT(irqd->hwirq), &priv->irq_enabled);
+ gpiochip_disable_irq(&priv->gpio, irqd->hwirq);
+}
+
+static void gpio_mpsse_irq_enable(struct irq_data *irqd)
+{
+ struct mpsse_priv *priv = irq_data_get_irq_chip_data(irqd);
+
+ gpiochip_enable_irq(&priv->gpio, irqd->hwirq);
+ /* If no-one else was using the IRQ, enable it */
+ if (!atomic_fetch_or(BIT(irqd->hwirq), &priv->irq_enabled)) {
+ INIT_WORK(&priv->irq_work, gpio_mpsse_poll);
+ schedule_work(&priv->irq_work);
+ }
+}
+
+static const struct irq_chip gpio_mpsse_irq_chip = {
+ .name = "gpio-mpsse-irq",
+ .irq_enable = gpio_mpsse_irq_enable,
+ .irq_disable = gpio_mpsse_irq_disable,
+ .irq_set_type = gpio_mpsse_set_irq_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
+static void gpio_mpsse_ida_remove(void *data)
+{
+ struct mpsse_priv *priv = data;
+
+ ida_free(&gpio_mpsse_ida, priv->id);
+}
+
+static int gpio_mpsse_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
+{
+ struct mpsse_priv *priv;
+ struct device *dev;
+ int err;
+
+ dev = &interface->dev;
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->udev = usb_get_dev(interface_to_usbdev(interface));
+ priv->intf = interface;
+ priv->intf_id = interface->cur_altsetting->desc.bInterfaceNumber;
+
+ priv->id = ida_alloc(&gpio_mpsse_ida, GFP_KERNEL);
+ if (priv->id < 0)
+ return priv->id;
+
+ err = devm_add_action_or_reset(dev, gpio_mpsse_ida_remove, priv);
+ if (err)
+ return err;
+
+ err = devm_mutex_init(dev, &priv->io_mutex);
+ if (err)
+ return err;
+
+ err = devm_mutex_init(dev, &priv->irq_mutex);
+ if (err)
+ return err;
+
+ priv->gpio.label = devm_kasprintf(dev, GFP_KERNEL,
+ "gpio-mpsse.%d.%d",
+ priv->id, priv->intf_id);
+ if (!priv->gpio.label)
+ return -ENOMEM;
+
+ priv->gpio.owner = THIS_MODULE;
+ priv->gpio.parent = interface->usb_dev;
+ priv->gpio.get_direction = gpio_mpsse_get_direction;
+ priv->gpio.direction_input = gpio_mpsse_direction_input;
+ priv->gpio.direction_output = gpio_mpsse_direction_output;
+ priv->gpio.get = gpio_mpsse_gpio_get;
+ priv->gpio.set = gpio_mpsse_gpio_set;
+ priv->gpio.get_multiple = gpio_mpsse_get_multiple;
+ priv->gpio.set_multiple = gpio_mpsse_set_multiple;
+ priv->gpio.base = -1;
+ priv->gpio.ngpio = 16;
+ priv->gpio.offset = priv->intf_id * priv->gpio.ngpio;
+ priv->gpio.can_sleep = 1;
+
+ err = usb_find_common_endpoints(interface->cur_altsetting,
+ &priv->bulk_in, &priv->bulk_out,
+ NULL, NULL);
+ if (err)
+ return err;
+
+ priv->bulk_in_buf = devm_kmalloc(dev, usb_endpoint_maxp(priv->bulk_in),
+ GFP_KERNEL);
+ if (!priv->bulk_in_buf)
+ return -ENOMEM;
+
+ usb_set_intfdata(interface, priv);
+
+ /* Reset mode, needed to correctly enter MPSSE mode */
+ err = usb_control_msg(priv->udev, usb_sndctrlpipe(priv->udev, 0),
+ SET_BITMODE_REQUEST,
+ USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
+ MODE_RESET, priv->intf_id + 1, NULL, 0,
+ USB_CTRL_SET_TIMEOUT);
+ if (err)
+ return err;
+
+ /* Enter MPSSE mode */
+ err = usb_control_msg(priv->udev, usb_sndctrlpipe(priv->udev, 0),
+ SET_BITMODE_REQUEST,
+ USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
+ MODE_MPSSE, priv->intf_id + 1, NULL, 0,
+ USB_CTRL_SET_TIMEOUT);
+ if (err)
+ return err;
+
+ gpio_irq_chip_set_chip(&priv->gpio.irq, &gpio_mpsse_irq_chip);
+
+ priv->gpio.irq.parent_handler = NULL;
+ priv->gpio.irq.num_parents = 0;
+ priv->gpio.irq.parents = NULL;
+ priv->gpio.irq.default_type = IRQ_TYPE_NONE;
+ priv->gpio.irq.handler = handle_simple_irq;
+
+ err = devm_gpiochip_add_data(dev, &priv->gpio, priv);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static void gpio_mpsse_disconnect(struct usb_interface *intf)
+{
+ struct mpsse_priv *priv = usb_get_intfdata(intf);
+
+ priv->intf = NULL;
+ usb_set_intfdata(intf, NULL);
+ usb_put_dev(priv->udev);
+}
+
+static struct usb_driver gpio_mpsse_driver = {
+ .name = "gpio-mpsse",
+ .probe = gpio_mpsse_probe,
+ .disconnect = gpio_mpsse_disconnect,
+ .id_table = gpio_mpsse_table,
+};
+
+module_usb_driver(gpio_mpsse_driver);
+
+MODULE_AUTHOR("Mary Strodl <mstrodl@csh.rit.edu>");
+MODULE_DESCRIPTION("MPSSE GPIO driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-msc313.c b/drivers/gpio/gpio-msc313.c
index 6db9e469e0dc..b0cccd856840 100644
--- a/drivers/gpio/gpio-msc313.c
+++ b/drivers/gpio/gpio-msc313.c
@@ -486,7 +486,7 @@ struct msc313_gpio {
u8 *saved;
};
-static void msc313_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
+static int msc313_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct msc313_gpio *gpio = gpiochip_get_data(chip);
u8 gpioreg = readb_relaxed(gpio->base + gpio->gpio_data->offsets[offset]);
@@ -497,6 +497,8 @@ static void msc313_gpio_set(struct gpio_chip *chip, unsigned int offset, int val
gpioreg &= ~MSC313_GPIO_OUT;
writeb_relaxed(gpioreg, gpio->base + gpio->gpio_data->offsets[offset]);
+
+ return 0;
}
static int msc313_gpio_get(struct gpio_chip *chip, unsigned int offset)
diff --git a/drivers/gpio/gpio-mt7621.c b/drivers/gpio/gpio-mt7621.c
index 93facbebb80e..91230be51587 100644
--- a/drivers/gpio/gpio-mt7621.c
+++ b/drivers/gpio/gpio-mt7621.c
@@ -6,11 +6,11 @@
#include <linux/err.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/spinlock.h>
#define MTK_BANK_CNT 3
#define MTK_BANK_WIDTH 32
@@ -30,8 +30,7 @@
struct mtk_gc {
struct irq_chip irq_chip;
- struct gpio_chip chip;
- spinlock_t lock;
+ struct gpio_generic_chip chip;
int bank;
u32 rising;
u32 falling;
@@ -59,27 +58,29 @@ struct mtk {
static inline struct mtk_gc *
to_mediatek_gpio(struct gpio_chip *chip)
{
- return container_of(chip, struct mtk_gc, chip);
+ struct gpio_generic_chip *gen_gc = to_gpio_generic_chip(chip);
+
+ return container_of(gen_gc, struct mtk_gc, chip);
}
static inline void
mtk_gpio_w32(struct mtk_gc *rg, u32 offset, u32 val)
{
- struct gpio_chip *gc = &rg->chip;
+ struct gpio_chip *gc = &rg->chip.gc;
struct mtk *mtk = gpiochip_get_data(gc);
offset = (rg->bank * GPIO_BANK_STRIDE) + offset;
- gc->write_reg(mtk->base + offset, val);
+ gpio_generic_write_reg(&rg->chip, mtk->base + offset, val);
}
static inline u32
mtk_gpio_r32(struct mtk_gc *rg, u32 offset)
{
- struct gpio_chip *gc = &rg->chip;
+ struct gpio_chip *gc = &rg->chip.gc;
struct mtk *mtk = gpiochip_get_data(gc);
offset = (rg->bank * GPIO_BANK_STRIDE) + offset;
- return gc->read_reg(mtk->base + offset);
+ return gpio_generic_read_reg(&rg->chip, mtk->base + offset);
}
static irqreturn_t
@@ -108,12 +109,12 @@ mediatek_gpio_irq_unmask(struct irq_data *d)
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct mtk_gc *rg = to_mediatek_gpio(gc);
int pin = d->hwirq;
- unsigned long flags;
u32 rise, fall, high, low;
gpiochip_enable_irq(gc, d->hwirq);
- spin_lock_irqsave(&rg->lock, flags);
+ guard(gpio_generic_lock_irqsave)(&rg->chip);
+
rise = mtk_gpio_r32(rg, GPIO_REG_REDGE);
fall = mtk_gpio_r32(rg, GPIO_REG_FEDGE);
high = mtk_gpio_r32(rg, GPIO_REG_HLVL);
@@ -122,7 +123,6 @@ mediatek_gpio_irq_unmask(struct irq_data *d)
mtk_gpio_w32(rg, GPIO_REG_FEDGE, fall | (BIT(pin) & rg->falling));
mtk_gpio_w32(rg, GPIO_REG_HLVL, high | (BIT(pin) & rg->hlevel));
mtk_gpio_w32(rg, GPIO_REG_LLVL, low | (BIT(pin) & rg->llevel));
- spin_unlock_irqrestore(&rg->lock, flags);
}
static void
@@ -131,19 +131,18 @@ mediatek_gpio_irq_mask(struct irq_data *d)
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct mtk_gc *rg = to_mediatek_gpio(gc);
int pin = d->hwirq;
- unsigned long flags;
u32 rise, fall, high, low;
- spin_lock_irqsave(&rg->lock, flags);
- rise = mtk_gpio_r32(rg, GPIO_REG_REDGE);
- fall = mtk_gpio_r32(rg, GPIO_REG_FEDGE);
- high = mtk_gpio_r32(rg, GPIO_REG_HLVL);
- low = mtk_gpio_r32(rg, GPIO_REG_LLVL);
- mtk_gpio_w32(rg, GPIO_REG_FEDGE, fall & ~BIT(pin));
- mtk_gpio_w32(rg, GPIO_REG_REDGE, rise & ~BIT(pin));
- mtk_gpio_w32(rg, GPIO_REG_HLVL, high & ~BIT(pin));
- mtk_gpio_w32(rg, GPIO_REG_LLVL, low & ~BIT(pin));
- spin_unlock_irqrestore(&rg->lock, flags);
+ scoped_guard(gpio_generic_lock_irqsave, &rg->chip) {
+ rise = mtk_gpio_r32(rg, GPIO_REG_REDGE);
+ fall = mtk_gpio_r32(rg, GPIO_REG_FEDGE);
+ high = mtk_gpio_r32(rg, GPIO_REG_HLVL);
+ low = mtk_gpio_r32(rg, GPIO_REG_LLVL);
+ mtk_gpio_w32(rg, GPIO_REG_FEDGE, fall & ~BIT(pin));
+ mtk_gpio_w32(rg, GPIO_REG_REDGE, rise & ~BIT(pin));
+ mtk_gpio_w32(rg, GPIO_REG_HLVL, high & ~BIT(pin));
+ mtk_gpio_w32(rg, GPIO_REG_LLVL, low & ~BIT(pin));
+ }
gpiochip_disable_irq(gc, d->hwirq);
}
@@ -220,6 +219,7 @@ static const struct irq_chip mt7621_irq_chip = {
static int
mediatek_gpio_bank_probe(struct device *dev, int bank)
{
+ struct gpio_generic_chip_config config;
struct mtk *mtk = dev_get_drvdata(dev);
struct mtk_gc *rg;
void __iomem *dat, *set, *ctrl, *diro;
@@ -228,7 +228,6 @@ mediatek_gpio_bank_probe(struct device *dev, int bank)
rg = &mtk->gc_map[bank];
memset(rg, 0, sizeof(*rg));
- spin_lock_init(&rg->lock);
rg->bank = bank;
dat = mtk->base + GPIO_REG_DATA + (rg->bank * GPIO_BANK_STRIDE);
@@ -236,21 +235,30 @@ mediatek_gpio_bank_probe(struct device *dev, int bank)
ctrl = mtk->base + GPIO_REG_DCLR + (rg->bank * GPIO_BANK_STRIDE);
diro = mtk->base + GPIO_REG_CTRL + (rg->bank * GPIO_BANK_STRIDE);
- ret = bgpio_init(&rg->chip, dev, 4, dat, set, ctrl, diro, NULL,
- BGPIOF_NO_SET_ON_INPUT);
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = dat,
+ .set = set,
+ .clr = ctrl,
+ .dirout = diro,
+ .flags = GPIO_GENERIC_NO_SET_ON_INPUT,
+ };
+
+ ret = gpio_generic_chip_init(&rg->chip, &config);
if (ret) {
- dev_err(dev, "bgpio_init() failed\n");
+ dev_err(dev, "failed to initialize generic GPIO chip\n");
return ret;
}
- rg->chip.of_gpio_n_cells = 2;
- rg->chip.of_xlate = mediatek_gpio_xlate;
- rg->chip.label = devm_kasprintf(dev, GFP_KERNEL, "%s-bank%d",
+ rg->chip.gc.of_gpio_n_cells = 2;
+ rg->chip.gc.of_xlate = mediatek_gpio_xlate;
+ rg->chip.gc.label = devm_kasprintf(dev, GFP_KERNEL, "%s-bank%d",
dev_name(dev), bank);
- if (!rg->chip.label)
+ if (!rg->chip.gc.label)
return -ENOMEM;
- rg->chip.offset = bank * MTK_BANK_WIDTH;
+ rg->chip.gc.offset = bank * MTK_BANK_WIDTH;
if (mtk->gpio_irq) {
struct gpio_irq_chip *girq;
@@ -261,7 +269,7 @@ mediatek_gpio_bank_probe(struct device *dev, int bank)
*/
ret = devm_request_irq(dev, mtk->gpio_irq,
mediatek_gpio_irq_handler, IRQF_SHARED,
- rg->chip.label, &rg->chip);
+ rg->chip.gc.label, &rg->chip.gc);
if (ret) {
dev_err(dev, "Error requesting IRQ %d: %d\n",
@@ -269,7 +277,7 @@ mediatek_gpio_bank_probe(struct device *dev, int bank)
return ret;
}
- girq = &rg->chip.irq;
+ girq = &rg->chip.gc.irq;
gpio_irq_chip_set_chip(girq, &mt7621_irq_chip);
/* This will let us handle the parent IRQ in the driver */
girq->parent_handler = NULL;
@@ -279,17 +287,17 @@ mediatek_gpio_bank_probe(struct device *dev, int bank)
girq->handler = handle_simple_irq;
}
- ret = devm_gpiochip_add_data(dev, &rg->chip, mtk);
+ ret = devm_gpiochip_add_data(dev, &rg->chip.gc, mtk);
if (ret < 0) {
dev_err(dev, "Could not register gpio %d, ret=%d\n",
- rg->chip.ngpio, ret);
+ rg->chip.gc.ngpio, ret);
return ret;
}
/* set polarity to low for all gpios */
mtk_gpio_w32(rg, GPIO_REG_POL, 0);
- dev_info(dev, "registering %d gpios\n", rg->chip.ngpio);
+ dev_info(dev, "registering %d gpios\n", rg->chip.gc.ngpio);
return 0;
}
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 8cfd3a89c018..ac799fced950 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -49,6 +49,7 @@
#include <linux/pwm.h>
#include <linux/regmap.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
/*
* GPIO unit register offsets.
@@ -297,12 +298,12 @@ static unsigned int mvebu_pwmreg_blink_off_duration(struct mvebu_pwm *mvpwm)
/*
* Functions implementing the gpio_chip methods
*/
-static void mvebu_gpio_set(struct gpio_chip *chip, unsigned int pin, int value)
+static int mvebu_gpio_set(struct gpio_chip *chip, unsigned int pin, int value)
{
struct mvebu_gpio_chip *mvchip = gpiochip_get_data(chip);
- regmap_update_bits(mvchip->regs, GPIO_OUT_OFF + mvchip->offset,
- BIT(pin), value ? BIT(pin) : 0);
+ return regmap_update_bits(mvchip->regs, GPIO_OUT_OFF + mvchip->offset,
+ BIT(pin), value ? BIT(pin) : 0);
}
static int mvebu_gpio_get(struct gpio_chip *chip, unsigned int pin)
@@ -407,9 +408,8 @@ static void mvebu_gpio_irq_ack(struct irq_data *d)
struct mvebu_gpio_chip *mvchip = gc->private;
u32 mask = d->mask;
- irq_gc_lock(gc);
+ guard(raw_spinlock)(&gc->lock);
mvebu_gpio_write_edge_cause(mvchip, ~mask);
- irq_gc_unlock(gc);
}
static void mvebu_gpio_edge_irq_mask(struct irq_data *d)
@@ -419,10 +419,9 @@ static void mvebu_gpio_edge_irq_mask(struct irq_data *d)
struct irq_chip_type *ct = irq_data_get_chip_type(d);
u32 mask = d->mask;
- irq_gc_lock(gc);
+ guard(raw_spinlock)(&gc->lock);
ct->mask_cache_priv &= ~mask;
mvebu_gpio_write_edge_mask(mvchip, ct->mask_cache_priv);
- irq_gc_unlock(gc);
}
static void mvebu_gpio_edge_irq_unmask(struct irq_data *d)
@@ -432,11 +431,10 @@ static void mvebu_gpio_edge_irq_unmask(struct irq_data *d)
struct irq_chip_type *ct = irq_data_get_chip_type(d);
u32 mask = d->mask;
- irq_gc_lock(gc);
+ guard(raw_spinlock)(&gc->lock);
mvebu_gpio_write_edge_cause(mvchip, ~mask);
ct->mask_cache_priv |= mask;
mvebu_gpio_write_edge_mask(mvchip, ct->mask_cache_priv);
- irq_gc_unlock(gc);
}
static void mvebu_gpio_level_irq_mask(struct irq_data *d)
@@ -446,10 +444,9 @@ static void mvebu_gpio_level_irq_mask(struct irq_data *d)
struct irq_chip_type *ct = irq_data_get_chip_type(d);
u32 mask = d->mask;
- irq_gc_lock(gc);
+ guard(raw_spinlock)(&gc->lock);
ct->mask_cache_priv &= ~mask;
mvebu_gpio_write_level_mask(mvchip, ct->mask_cache_priv);
- irq_gc_unlock(gc);
}
static void mvebu_gpio_level_irq_unmask(struct irq_data *d)
@@ -459,10 +456,9 @@ static void mvebu_gpio_level_irq_unmask(struct irq_data *d)
struct irq_chip_type *ct = irq_data_get_chip_type(d);
u32 mask = d->mask;
- irq_gc_lock(gc);
+ guard(raw_spinlock)(&gc->lock);
ct->mask_cache_priv |= mask;
mvebu_gpio_write_level_mask(mvchip, ct->mask_cache_priv);
- irq_gc_unlock(gc);
}
/*****************************************************************************
@@ -606,7 +602,6 @@ static const struct regmap_config mvebu_gpio_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
- .fast_io = true,
};
/*
@@ -794,8 +789,8 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
u32 set;
if (mvchip->soc_variant == MVEBU_GPIO_SOC_VARIANT_A8K) {
- int ret = of_property_read_u32(dev->of_node,
- "marvell,pwm-offset", &offset);
+ int ret = device_property_read_u32(dev, "marvell,pwm-offset",
+ &offset);
if (ret < 0)
return 0;
} else {
@@ -903,18 +898,18 @@ static void mvebu_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
msk = BIT(i);
is_out = !(io_conf & msk);
- seq_printf(s, " gpio-%-3d (%-20.20s)", chip->base + i, label);
+ seq_printf(s, " gpio-%-3d (%-20.20s)", i, label);
if (is_out) {
seq_printf(s, " out %s %s\n",
- out & msk ? "hi" : "lo",
+ str_hi_lo(out & msk),
blink & msk ? "(blink )" : "");
continue;
}
seq_printf(s, " in %s (act %s) - IRQ",
- (data_in ^ in_pol) & msk ? "hi" : "lo",
- in_pol & msk ? "lo" : "hi");
+ str_hi_lo((data_in ^ in_pol) & msk),
+ str_lo_hi(in_pol & msk));
if (!((edg_msk | lvl_msk) & msk)) {
seq_puts(s, " disabled\n");
continue;
@@ -1106,7 +1101,7 @@ static int mvebu_gpio_probe_syscon(struct platform_device *pdev,
if (IS_ERR(mvchip->regs))
return PTR_ERR(mvchip->regs);
- if (of_property_read_u32(pdev->dev.of_node, "offset", &mvchip->offset))
+ if (device_property_read_u32(&pdev->dev, "offset", &mvchip->offset))
return -EINVAL;
return 0;
@@ -1147,7 +1142,7 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, mvchip);
- if (of_property_read_u32(pdev->dev.of_node, "ngpios", &ngpios)) {
+ if (device_property_read_u32(&pdev->dev, "ngpios", &ngpios)) {
dev_err(&pdev->dev, "Missing ngpios OF property\n");
return -ENODEV;
}
@@ -1240,8 +1235,8 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
if (!have_irqs)
return 0;
- mvchip->domain =
- irq_domain_add_linear(np, ngpios, &irq_generic_chip_ops, NULL);
+ mvchip->domain = irq_domain_create_linear(dev_fwnode(&pdev->dev), ngpios,
+ &irq_generic_chip_ops, NULL);
if (!mvchip->domain) {
dev_err(&pdev->dev, "couldn't allocate irq domain %s (DT).\n",
mvchip->chip.label);
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index 4cb455b2bdee..52060b3ec745 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -7,6 +7,7 @@
// Authors: Daniel Mack, Juergen Beisert.
// Copyright (C) 2004-2010 Freescale Semiconductor, Inc. All Rights Reserved.
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/init.h>
@@ -22,6 +23,7 @@
#include <linux/spinlock.h>
#include <linux/syscore_ops.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/of.h>
#include <linux/bug.h>
@@ -64,7 +66,7 @@ struct mxc_gpio_port {
int irq_high;
void (*mx_irq_handler)(struct irq_desc *desc);
struct irq_domain *domain;
- struct gpio_chip gc;
+ struct gpio_generic_chip gen_gc;
struct device *dev;
u32 both_edges;
struct mxc_gpio_reg_saved gpio_saved_reg;
@@ -161,7 +163,6 @@ static int gpio_set_irq_type(struct irq_data *d, u32 type)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct mxc_gpio_port *port = gc->private;
- unsigned long flags;
u32 bit, val;
u32 gpio_idx = d->hwirq;
int edge;
@@ -179,7 +180,7 @@ static int gpio_set_irq_type(struct irq_data *d, u32 type)
if (GPIO_EDGE_SEL >= 0) {
edge = GPIO_INT_BOTH_EDGES;
} else {
- val = port->gc.get(&port->gc, gpio_idx);
+ val = port->gen_gc.gc.get(&port->gen_gc.gc, gpio_idx);
if (val) {
edge = GPIO_INT_LOW_LEV;
pr_debug("mxc: set GPIO %d to low trigger\n", gpio_idx);
@@ -200,41 +201,38 @@ static int gpio_set_irq_type(struct irq_data *d, u32 type)
return -EINVAL;
}
- raw_spin_lock_irqsave(&port->gc.bgpio_lock, flags);
+ scoped_guard(gpio_generic_lock_irqsave, &port->gen_gc) {
+ if (GPIO_EDGE_SEL >= 0) {
+ val = readl(port->base + GPIO_EDGE_SEL);
+ if (edge == GPIO_INT_BOTH_EDGES)
+ writel(val | (1 << gpio_idx),
+ port->base + GPIO_EDGE_SEL);
+ else
+ writel(val & ~(1 << gpio_idx),
+ port->base + GPIO_EDGE_SEL);
+ }
- if (GPIO_EDGE_SEL >= 0) {
- val = readl(port->base + GPIO_EDGE_SEL);
- if (edge == GPIO_INT_BOTH_EDGES)
- writel(val | (1 << gpio_idx),
- port->base + GPIO_EDGE_SEL);
- else
- writel(val & ~(1 << gpio_idx),
- port->base + GPIO_EDGE_SEL);
- }
+ if (edge != GPIO_INT_BOTH_EDGES) {
+ reg += GPIO_ICR1 + ((gpio_idx & 0x10) >> 2); /* lower or upper register */
+ bit = gpio_idx & 0xf;
+ val = readl(reg) & ~(0x3 << (bit << 1));
+ writel(val | (edge << (bit << 1)), reg);
+ }
- if (edge != GPIO_INT_BOTH_EDGES) {
- reg += GPIO_ICR1 + ((gpio_idx & 0x10) >> 2); /* lower or upper register */
- bit = gpio_idx & 0xf;
- val = readl(reg) & ~(0x3 << (bit << 1));
- writel(val | (edge << (bit << 1)), reg);
+ writel(1 << gpio_idx, port->base + GPIO_ISR);
+ port->pad_type[gpio_idx] = type;
}
- writel(1 << gpio_idx, port->base + GPIO_ISR);
- port->pad_type[gpio_idx] = type;
-
- raw_spin_unlock_irqrestore(&port->gc.bgpio_lock, flags);
-
- return port->gc.direction_input(&port->gc, gpio_idx);
+ return port->gen_gc.gc.direction_input(&port->gen_gc.gc, gpio_idx);
}
static void mxc_flip_edge(struct mxc_gpio_port *port, u32 gpio)
{
void __iomem *reg = port->base;
- unsigned long flags;
u32 bit, val;
int edge;
- raw_spin_lock_irqsave(&port->gc.bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(&port->gen_gc);
reg += GPIO_ICR1 + ((gpio & 0x10) >> 2); /* lower or upper register */
bit = gpio & 0xf;
@@ -250,12 +248,9 @@ static void mxc_flip_edge(struct mxc_gpio_port *port, u32 gpio)
} else {
pr_err("mxc: invalid configuration for GPIO %d: %x\n",
gpio, edge);
- goto unlock;
+ return;
}
writel(val | (edge << (bit << 1)), reg);
-
-unlock:
- raw_spin_unlock_irqrestore(&port->gc.bgpio_lock, flags);
}
/* handle 32 interrupts in one status register */
@@ -420,6 +415,7 @@ static void mxc_update_irq_chained_handler(struct mxc_gpio_port *port, bool enab
static int mxc_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config = { };
struct device_node *np = pdev->dev.of_node;
struct mxc_gpio_port *port;
int irq_count;
@@ -479,21 +475,31 @@ static int mxc_gpio_probe(struct platform_device *pdev)
port->mx_irq_handler = mx3_gpio_irq_handler;
mxc_update_irq_chained_handler(port, true);
- err = bgpio_init(&port->gc, &pdev->dev, 4,
- port->base + GPIO_PSR,
- port->base + GPIO_DR, NULL,
- port->base + GPIO_GDIR, NULL,
- BGPIOF_READ_OUTPUT_REG_SET);
+
+ config.dev = &pdev->dev;
+ config.sz = 4;
+ config.dat = port->base + GPIO_PSR;
+ config.set = port->base + GPIO_DR;
+ config.dirout = port->base + GPIO_GDIR;
+ config.flags = GPIO_GENERIC_READ_OUTPUT_REG_SET;
+
+ err = gpio_generic_chip_init(&port->gen_gc, &config);
if (err)
goto out_bgio;
- port->gc.request = mxc_gpio_request;
- port->gc.free = mxc_gpio_free;
- port->gc.to_irq = mxc_gpio_to_irq;
- port->gc.base = (pdev->id < 0) ? of_alias_get_id(np, "gpio") * 32 :
- pdev->id * 32;
-
- err = devm_gpiochip_add_data(&pdev->dev, &port->gc, port);
+ port->gen_gc.gc.request = mxc_gpio_request;
+ port->gen_gc.gc.free = mxc_gpio_free;
+ port->gen_gc.gc.to_irq = mxc_gpio_to_irq;
+ /*
+ * Driver is DT-only, so a fixed base needs only be maintained for legacy
+ * userspace with sysfs interface.
+ */
+ if (IS_ENABLED(CONFIG_GPIO_SYSFS))
+ port->gen_gc.gc.base = of_alias_get_id(np, "gpio") * 32;
+ else /* silence boot time warning */
+ port->gen_gc.gc.base = -1;
+
+ err = devm_gpiochip_add_data(&pdev->dev, &port->gen_gc.gc, port);
if (err)
goto out_bgio;
@@ -503,8 +509,8 @@ static int mxc_gpio_probe(struct platform_device *pdev)
goto out_bgio;
}
- port->domain = irq_domain_add_legacy(np, 32, irq_base, 0,
- &irq_domain_simple_ops, NULL);
+ port->domain = irq_domain_create_legacy(dev_fwnode(&pdev->dev), 32, irq_base, 0,
+ &irq_domain_simple_ops, NULL);
if (!port->domain) {
err = -ENODEV;
goto out_bgio;
@@ -567,7 +573,8 @@ static bool mxc_gpio_generic_config(struct mxc_gpio_port *port,
if (of_device_is_compatible(np, "fsl,imx8dxl-gpio") ||
of_device_is_compatible(np, "fsl,imx8qxp-gpio") ||
of_device_is_compatible(np, "fsl,imx8qm-gpio"))
- return (gpiochip_generic_config(&port->gc, offset, conf) == 0);
+ return (gpiochip_generic_config(&port->gen_gc.gc,
+ offset, conf) == 0);
return false;
}
diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
index 024ad077e98d..5635694bf9f4 100644
--- a/drivers/gpio/gpio-mxs.c
+++ b/drivers/gpio/gpio-mxs.c
@@ -7,17 +7,18 @@
// Copyright (C) 2004-2010 Freescale Semiconductor, Inc. All Rights Reserved.
#include <linux/err.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-#include <linux/gpio/driver.h>
-#include <linux/module.h>
#define MXS_SET 0x4
#define MXS_CLR 0x8
@@ -48,7 +49,7 @@ struct mxs_gpio_port {
int id;
int irq;
struct irq_domain *domain;
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
struct device *dev;
enum mxs_gpio_id devid;
u32 both_edges;
@@ -258,6 +259,7 @@ MODULE_DEVICE_TABLE(of, mxs_gpio_dt_ids);
static int mxs_gpio_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
+ struct gpio_generic_chip_config config;
struct device_node *parent;
static void __iomem *base;
struct mxs_gpio_port *port;
@@ -303,8 +305,8 @@ static int mxs_gpio_probe(struct platform_device *pdev)
goto out_iounmap;
}
- port->domain = irq_domain_add_legacy(np, 32, irq_base, 0,
- &irq_domain_simple_ops, NULL);
+ port->domain = irq_domain_create_legacy(dev_fwnode(&pdev->dev), 32, irq_base, 0,
+ &irq_domain_simple_ops, NULL);
if (!port->domain) {
err = -ENODEV;
goto out_iounmap;
@@ -319,19 +321,24 @@ static int mxs_gpio_probe(struct platform_device *pdev)
irq_set_chained_handler_and_data(port->irq, mxs_gpio_irq_handler,
port);
- err = bgpio_init(&port->gc, &pdev->dev, 4,
- port->base + PINCTRL_DIN(port),
- port->base + PINCTRL_DOUT(port) + MXS_SET,
- port->base + PINCTRL_DOUT(port) + MXS_CLR,
- port->base + PINCTRL_DOE(port), NULL, 0);
+ config = (struct gpio_generic_chip_config) {
+ .dev = &pdev->dev,
+ .sz = 4,
+ .dat = port->base + PINCTRL_DIN(port),
+ .set = port->base + PINCTRL_DOUT(port) + MXS_SET,
+ .clr = port->base + PINCTRL_DOUT(port) + MXS_CLR,
+ .dirout = port->base + PINCTRL_DOE(port),
+ };
+
+ err = gpio_generic_chip_init(&port->chip, &config);
if (err)
goto out_irqdomain_remove;
- port->gc.to_irq = mxs_gpio_to_irq;
- port->gc.get_direction = mxs_gpio_get_direction;
- port->gc.base = port->id * 32;
+ port->chip.gc.to_irq = mxs_gpio_to_irq;
+ port->chip.gc.get_direction = mxs_gpio_get_direction;
+ port->chip.gc.base = port->id * 32;
- err = gpiochip_add_data(&port->gc, port);
+ err = gpiochip_add_data(&port->chip.gc, port);
if (err)
goto out_irqdomain_remove;
diff --git a/drivers/gpio/gpio-nct6694.c b/drivers/gpio/gpio-nct6694.c
new file mode 100644
index 000000000000..a8607f0d9915
--- /dev/null
+++ b/drivers/gpio/gpio-nct6694.c
@@ -0,0 +1,499 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Nuvoton NCT6694 GPIO controller driver based on USB interface.
+ *
+ * Copyright (C) 2025 Nuvoton Technology Corp.
+ */
+
+#include <linux/bits.h>
+#include <linux/gpio/driver.h>
+#include <linux/idr.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/nct6694.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+/*
+ * USB command module type for NCT6694 GPIO controller.
+ * This defines the module type used for communication with the NCT6694
+ * GPIO controller over the USB interface.
+ */
+#define NCT6694_GPIO_MOD 0xFF
+
+#define NCT6694_GPIO_VER 0x90
+#define NCT6694_GPIO_VALID 0x110
+#define NCT6694_GPI_DATA 0x120
+#define NCT6694_GPO_DIR 0x170
+#define NCT6694_GPO_TYPE 0x180
+#define NCT6694_GPO_DATA 0x190
+
+#define NCT6694_GPI_STS 0x130
+#define NCT6694_GPI_CLR 0x140
+#define NCT6694_GPI_FALLING 0x150
+#define NCT6694_GPI_RISING 0x160
+
+#define NCT6694_NR_GPIO 8
+
+struct nct6694_gpio_data {
+ struct nct6694 *nct6694;
+ struct gpio_chip gpio;
+ struct mutex lock;
+ /* Protect irq operation */
+ struct mutex irq_lock;
+
+ unsigned char reg_val;
+ unsigned char irq_trig_falling;
+ unsigned char irq_trig_rising;
+
+ /* Current gpio group */
+ unsigned char group;
+ int irq;
+};
+
+static int nct6694_get_direction(struct gpio_chip *gpio, unsigned int offset)
+{
+ struct nct6694_gpio_data *data = gpiochip_get_data(gpio);
+ const struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_GPIO_MOD,
+ .offset = cpu_to_le16(NCT6694_GPO_DIR + data->group),
+ .len = cpu_to_le16(sizeof(data->reg_val))
+ };
+ int ret;
+
+ guard(mutex)(&data->lock);
+
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd, &data->reg_val);
+ if (ret < 0)
+ return ret;
+
+ return !(BIT(offset) & data->reg_val);
+}
+
+static int nct6694_direction_input(struct gpio_chip *gpio, unsigned int offset)
+{
+ struct nct6694_gpio_data *data = gpiochip_get_data(gpio);
+ const struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_GPIO_MOD,
+ .offset = cpu_to_le16(NCT6694_GPO_DIR + data->group),
+ .len = cpu_to_le16(sizeof(data->reg_val))
+ };
+ int ret;
+
+ guard(mutex)(&data->lock);
+
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd, &data->reg_val);
+ if (ret < 0)
+ return ret;
+
+ data->reg_val &= ~BIT(offset);
+
+ return nct6694_write_msg(data->nct6694, &cmd_hd, &data->reg_val);
+}
+
+static int nct6694_direction_output(struct gpio_chip *gpio,
+ unsigned int offset, int val)
+{
+ struct nct6694_gpio_data *data = gpiochip_get_data(gpio);
+ struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_GPIO_MOD,
+ .offset = cpu_to_le16(NCT6694_GPO_DIR + data->group),
+ .len = cpu_to_le16(sizeof(data->reg_val))
+ };
+ int ret;
+
+ guard(mutex)(&data->lock);
+
+ /* Set direction to output */
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd, &data->reg_val);
+ if (ret < 0)
+ return ret;
+
+ data->reg_val |= BIT(offset);
+ ret = nct6694_write_msg(data->nct6694, &cmd_hd, &data->reg_val);
+ if (ret < 0)
+ return ret;
+
+ /* Then set output level */
+ cmd_hd.offset = cpu_to_le16(NCT6694_GPO_DATA + data->group);
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd, &data->reg_val);
+ if (ret < 0)
+ return ret;
+
+ if (val)
+ data->reg_val |= BIT(offset);
+ else
+ data->reg_val &= ~BIT(offset);
+
+ return nct6694_write_msg(data->nct6694, &cmd_hd, &data->reg_val);
+}
+
+static int nct6694_get_value(struct gpio_chip *gpio, unsigned int offset)
+{
+ struct nct6694_gpio_data *data = gpiochip_get_data(gpio);
+ struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_GPIO_MOD,
+ .offset = cpu_to_le16(NCT6694_GPO_DIR + data->group),
+ .len = cpu_to_le16(sizeof(data->reg_val))
+ };
+ int ret;
+
+ guard(mutex)(&data->lock);
+
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd, &data->reg_val);
+ if (ret < 0)
+ return ret;
+
+ if (BIT(offset) & data->reg_val) {
+ cmd_hd.offset = cpu_to_le16(NCT6694_GPO_DATA + data->group);
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd, &data->reg_val);
+ if (ret < 0)
+ return ret;
+
+ return !!(BIT(offset) & data->reg_val);
+ }
+
+ cmd_hd.offset = cpu_to_le16(NCT6694_GPI_DATA + data->group);
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd, &data->reg_val);
+ if (ret < 0)
+ return ret;
+
+ return !!(BIT(offset) & data->reg_val);
+}
+
+static int nct6694_set_value(struct gpio_chip *gpio, unsigned int offset,
+ int val)
+{
+ struct nct6694_gpio_data *data = gpiochip_get_data(gpio);
+ const struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_GPIO_MOD,
+ .offset = cpu_to_le16(NCT6694_GPO_DATA + data->group),
+ .len = cpu_to_le16(sizeof(data->reg_val))
+ };
+ int ret;
+
+ guard(mutex)(&data->lock);
+
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd, &data->reg_val);
+ if (ret < 0)
+ return ret;
+
+ if (val)
+ data->reg_val |= BIT(offset);
+ else
+ data->reg_val &= ~BIT(offset);
+
+ return nct6694_write_msg(data->nct6694, &cmd_hd, &data->reg_val);
+}
+
+static int nct6694_set_config(struct gpio_chip *gpio, unsigned int offset,
+ unsigned long config)
+{
+ struct nct6694_gpio_data *data = gpiochip_get_data(gpio);
+ const struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_GPIO_MOD,
+ .offset = cpu_to_le16(NCT6694_GPO_TYPE + data->group),
+ .len = cpu_to_le16(sizeof(data->reg_val))
+ };
+ int ret;
+
+ guard(mutex)(&data->lock);
+
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd, &data->reg_val);
+ if (ret < 0)
+ return ret;
+
+ switch (pinconf_to_config_param(config)) {
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ data->reg_val |= BIT(offset);
+ break;
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ data->reg_val &= ~BIT(offset);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return nct6694_write_msg(data->nct6694, &cmd_hd, &data->reg_val);
+}
+
+static int nct6694_init_valid_mask(struct gpio_chip *gpio,
+ unsigned long *valid_mask,
+ unsigned int ngpios)
+{
+ struct nct6694_gpio_data *data = gpiochip_get_data(gpio);
+ const struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_GPIO_MOD,
+ .offset = cpu_to_le16(NCT6694_GPIO_VALID + data->group),
+ .len = cpu_to_le16(sizeof(data->reg_val))
+ };
+ int ret;
+
+ guard(mutex)(&data->lock);
+
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd, &data->reg_val);
+ if (ret < 0)
+ return ret;
+
+ *valid_mask = data->reg_val;
+
+ return ret;
+}
+
+static irqreturn_t nct6694_irq_handler(int irq, void *priv)
+{
+ struct nct6694_gpio_data *data = priv;
+ struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_GPIO_MOD,
+ .offset = cpu_to_le16(NCT6694_GPI_STS + data->group),
+ .len = cpu_to_le16(sizeof(data->reg_val))
+ };
+ unsigned char status;
+ int ret;
+
+ guard(mutex)(&data->lock);
+
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd, &data->reg_val);
+ if (ret)
+ return IRQ_NONE;
+
+ status = data->reg_val;
+
+ while (status) {
+ int bit = __ffs(status);
+
+ data->reg_val = BIT(bit);
+ handle_nested_irq(irq_find_mapping(data->gpio.irq.domain, bit));
+ status &= ~BIT(bit);
+ cmd_hd.offset = cpu_to_le16(NCT6694_GPI_CLR + data->group);
+ nct6694_write_msg(data->nct6694, &cmd_hd, &data->reg_val);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int nct6694_get_irq_trig(struct nct6694_gpio_data *data)
+{
+ struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_GPIO_MOD,
+ .offset = cpu_to_le16(NCT6694_GPI_FALLING + data->group),
+ .len = cpu_to_le16(sizeof(data->reg_val))
+ };
+ int ret;
+
+ guard(mutex)(&data->lock);
+
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd, &data->irq_trig_falling);
+ if (ret)
+ return ret;
+
+ cmd_hd.offset = cpu_to_le16(NCT6694_GPI_RISING + data->group);
+ return nct6694_read_msg(data->nct6694, &cmd_hd, &data->irq_trig_rising);
+}
+
+static void nct6694_irq_mask(struct irq_data *d)
+{
+ struct gpio_chip *gpio = irq_data_get_irq_chip_data(d);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+ gpiochip_disable_irq(gpio, hwirq);
+}
+
+static void nct6694_irq_unmask(struct irq_data *d)
+{
+ struct gpio_chip *gpio = irq_data_get_irq_chip_data(d);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+ gpiochip_enable_irq(gpio, hwirq);
+}
+
+static int nct6694_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct gpio_chip *gpio = irq_data_get_irq_chip_data(d);
+ struct nct6694_gpio_data *data = gpiochip_get_data(gpio);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+ guard(mutex)(&data->lock);
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ data->irq_trig_rising |= BIT(hwirq);
+ break;
+
+ case IRQ_TYPE_EDGE_FALLING:
+ data->irq_trig_falling |= BIT(hwirq);
+ break;
+
+ case IRQ_TYPE_EDGE_BOTH:
+ data->irq_trig_rising |= BIT(hwirq);
+ data->irq_trig_falling |= BIT(hwirq);
+ break;
+
+ default:
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static void nct6694_irq_bus_lock(struct irq_data *d)
+{
+ struct gpio_chip *gpio = irq_data_get_irq_chip_data(d);
+ struct nct6694_gpio_data *data = gpiochip_get_data(gpio);
+
+ mutex_lock(&data->irq_lock);
+}
+
+static void nct6694_irq_bus_sync_unlock(struct irq_data *d)
+{
+ struct gpio_chip *gpio = irq_data_get_irq_chip_data(d);
+ struct nct6694_gpio_data *data = gpiochip_get_data(gpio);
+ struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_GPIO_MOD,
+ .offset = cpu_to_le16(NCT6694_GPI_FALLING + data->group),
+ .len = cpu_to_le16(sizeof(data->reg_val))
+ };
+
+ scoped_guard(mutex, &data->lock) {
+ nct6694_write_msg(data->nct6694, &cmd_hd, &data->irq_trig_falling);
+
+ cmd_hd.offset = cpu_to_le16(NCT6694_GPI_RISING + data->group);
+ nct6694_write_msg(data->nct6694, &cmd_hd, &data->irq_trig_rising);
+ }
+
+ mutex_unlock(&data->irq_lock);
+}
+
+static const struct irq_chip nct6694_irq_chip = {
+ .name = "gpio-nct6694",
+ .irq_mask = nct6694_irq_mask,
+ .irq_unmask = nct6694_irq_unmask,
+ .irq_set_type = nct6694_irq_set_type,
+ .irq_bus_lock = nct6694_irq_bus_lock,
+ .irq_bus_sync_unlock = nct6694_irq_bus_sync_unlock,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
+static void nct6694_irq_dispose_mapping(void *d)
+{
+ struct nct6694_gpio_data *data = d;
+
+ irq_dispose_mapping(data->irq);
+}
+
+static void nct6694_gpio_ida_free(void *d)
+{
+ struct nct6694_gpio_data *data = d;
+ struct nct6694 *nct6694 = data->nct6694;
+
+ ida_free(&nct6694->gpio_ida, data->group);
+}
+
+static int nct6694_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct nct6694 *nct6694 = dev_get_drvdata(dev->parent);
+ struct nct6694_gpio_data *data;
+ struct gpio_irq_chip *girq;
+ int ret, i;
+ char **names;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->nct6694 = nct6694;
+
+ ret = ida_alloc(&nct6694->gpio_ida, GFP_KERNEL);
+ if (ret < 0)
+ return ret;
+ data->group = ret;
+
+ ret = devm_add_action_or_reset(dev, nct6694_gpio_ida_free, data);
+ if (ret)
+ return ret;
+
+ names = devm_kcalloc(dev, NCT6694_NR_GPIO, sizeof(char *),
+ GFP_KERNEL);
+ if (!names)
+ return -ENOMEM;
+
+ for (i = 0; i < NCT6694_NR_GPIO; i++) {
+ names[i] = devm_kasprintf(dev, GFP_KERNEL, "GPIO%X%d",
+ data->group, i);
+ if (!names[i])
+ return -ENOMEM;
+ }
+
+ data->irq = irq_create_mapping(nct6694->domain,
+ NCT6694_IRQ_GPIO0 + data->group);
+ if (!data->irq)
+ return -EINVAL;
+
+ ret = devm_add_action_or_reset(dev, nct6694_irq_dispose_mapping, data);
+ if (ret)
+ return ret;
+
+ data->gpio.names = (const char * const*)names;
+ data->gpio.label = pdev->name;
+ data->gpio.direction_input = nct6694_direction_input;
+ data->gpio.get = nct6694_get_value;
+ data->gpio.direction_output = nct6694_direction_output;
+ data->gpio.set = nct6694_set_value;
+ data->gpio.get_direction = nct6694_get_direction;
+ data->gpio.set_config = nct6694_set_config;
+ data->gpio.init_valid_mask = nct6694_init_valid_mask;
+ data->gpio.base = -1;
+ data->gpio.can_sleep = false;
+ data->gpio.owner = THIS_MODULE;
+ data->gpio.ngpio = NCT6694_NR_GPIO;
+
+ platform_set_drvdata(pdev, data);
+
+ ret = devm_mutex_init(dev, &data->lock);
+ if (ret)
+ return ret;
+
+ ret = devm_mutex_init(dev, &data->irq_lock);
+ if (ret)
+ return ret;
+
+ ret = nct6694_get_irq_trig(data);
+ if (ret) {
+ dev_err_probe(dev, ret, "Failed to get irq trigger type\n");
+ return ret;
+ }
+
+ girq = &data->gpio.irq;
+ gpio_irq_chip_set_chip(girq, &nct6694_irq_chip);
+ girq->parent_handler = NULL;
+ girq->num_parents = 0;
+ girq->parents = NULL;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_level_irq;
+ girq->threaded = true;
+
+ ret = devm_request_threaded_irq(dev, data->irq, NULL, nct6694_irq_handler,
+ IRQF_ONESHOT | IRQF_SHARED,
+ "gpio-nct6694", data);
+ if (ret) {
+ dev_err_probe(dev, ret, "Failed to request irq\n");
+ return ret;
+ }
+
+ return devm_gpiochip_add_data(dev, &data->gpio, data);
+}
+
+static struct platform_driver nct6694_gpio_driver = {
+ .driver = {
+ .name = "nct6694-gpio",
+ },
+ .probe = nct6694_gpio_probe,
+};
+
+module_platform_driver(nct6694_gpio_driver);
+
+MODULE_DESCRIPTION("USB-GPIO controller driver for NCT6694");
+MODULE_AUTHOR("Ming Yu <tmyu0@nuvoton.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:nct6694-gpio");
diff --git a/drivers/gpio/gpio-nomadik.c b/drivers/gpio/gpio-nomadik.c
index 836f1cc760c2..97c5cd33279d 100644
--- a/drivers/gpio/gpio-nomadik.c
+++ b/drivers/gpio/gpio-nomadik.c
@@ -20,6 +20,7 @@
*/
#include <linux/cleanup.h>
#include <linux/clk.h>
+#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
@@ -30,6 +31,7 @@
#include <linux/reset.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/types.h>
#include <linux/gpio/gpio-nomadik.h>
@@ -346,8 +348,8 @@ static int nmk_gpio_get_input(struct gpio_chip *chip, unsigned int offset)
return value;
}
-static void nmk_gpio_set_output(struct gpio_chip *chip, unsigned int offset,
- int val)
+static int nmk_gpio_set_output(struct gpio_chip *chip, unsigned int offset,
+ int val)
{
struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(chip);
@@ -356,6 +358,8 @@ static void nmk_gpio_set_output(struct gpio_chip *chip, unsigned int offset,
__nmk_gpio_set_output(nmk_chip, offset, val);
clk_disable(nmk_chip->clk);
+
+ return 0;
}
static int nmk_gpio_make_output(struct gpio_chip *chip, unsigned int offset,
@@ -393,10 +397,12 @@ static int nmk_gpio_get_mode(struct nmk_gpio_chip *nmk_chip, int offset)
}
void nmk_gpio_dbg_show_one(struct seq_file *s, struct pinctrl_dev *pctldev,
- struct gpio_chip *chip, unsigned int offset,
- unsigned int gpio)
+ struct gpio_chip *chip, unsigned int offset)
{
struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(chip);
+#ifdef CONFIG_PINCTRL_NOMADIK
+ struct gpio_desc *desc;
+#endif
int mode;
bool is_out;
bool data_out;
@@ -422,15 +428,15 @@ void nmk_gpio_dbg_show_one(struct seq_file *s, struct pinctrl_dev *pctldev,
data_out = !!(readl(nmk_chip->addr + NMK_GPIO_DAT) & BIT(offset));
mode = nmk_gpio_get_mode(nmk_chip, offset);
#ifdef CONFIG_PINCTRL_NOMADIK
- if (mode == NMK_GPIO_ALT_C && pctldev)
- mode = nmk_prcm_gpiocr_get_mode(pctldev, gpio);
+ if (mode == NMK_GPIO_ALT_C && pctldev) {
+ desc = gpio_device_get_desc(chip->gpiodev, offset);
+ mode = nmk_prcm_gpiocr_get_mode(pctldev, desc_to_gpio(desc));
+ }
#endif
if (is_out) {
seq_printf(s, " gpio-%-3d (%-20.20s) out %s %s",
- gpio,
- label ?: "(none)",
- data_out ? "hi" : "lo",
+ offset, label ?: "(none)", str_hi_lo(data_out),
(mode < 0) ? "unknown" : modes[mode]);
} else {
int irq = chip->to_irq(chip, offset);
@@ -442,9 +448,7 @@ void nmk_gpio_dbg_show_one(struct seq_file *s, struct pinctrl_dev *pctldev,
};
seq_printf(s, " gpio-%-3d (%-20.20s) in %s %s",
- gpio,
- label ?: "(none)",
- pulls[pullidx],
+ offset, label ?: "(none)", pulls[pullidx],
(mode < 0) ? "unknown" : modes[mode]);
val = nmk_gpio_get_input(chip, offset);
@@ -476,10 +480,10 @@ void nmk_gpio_dbg_show_one(struct seq_file *s, struct pinctrl_dev *pctldev,
static void nmk_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
{
- unsigned int i, gpio = chip->base;
+ unsigned int i;
- for (i = 0; i < chip->ngpio; i++, gpio++) {
- nmk_gpio_dbg_show_one(s, NULL, chip, i, gpio);
+ for (i = 0; i < chip->ngpio; i++) {
+ nmk_gpio_dbg_show_one(s, NULL, chip, i);
seq_puts(s, "\n");
}
}
diff --git a/drivers/gpio/gpio-npcm-sgpio.c b/drivers/gpio/gpio-npcm-sgpio.c
index 260570614543..83c77a2c0623 100644
--- a/drivers/gpio/gpio-npcm-sgpio.c
+++ b/drivers/gpio/gpio-npcm-sgpio.c
@@ -211,9 +211,7 @@ static int npcm_sgpio_dir_in(struct gpio_chip *gc, unsigned int offset)
static int npcm_sgpio_dir_out(struct gpio_chip *gc, unsigned int offset, int val)
{
- gc->set(gc, offset, val);
-
- return 0;
+ return gc->set(gc, offset, val);
}
static int npcm_sgpio_get_direction(struct gpio_chip *gc, unsigned int offset)
@@ -226,7 +224,7 @@ static int npcm_sgpio_get_direction(struct gpio_chip *gc, unsigned int offset)
return GPIO_LINE_DIRECTION_IN;
}
-static void npcm_sgpio_set(struct gpio_chip *gc, unsigned int offset, int val)
+static int npcm_sgpio_set(struct gpio_chip *gc, unsigned int offset, int val)
{
struct npcm_sgpio *gpio = gpiochip_get_data(gc);
const struct npcm_sgpio_bank *bank = offset_to_bank(offset);
@@ -242,6 +240,8 @@ static void npcm_sgpio_set(struct gpio_chip *gc, unsigned int offset, int val)
reg &= ~BIT(GPIO_BIT(offset));
iowrite8(reg, addr);
+
+ return 0;
}
static int npcm_sgpio_get(struct gpio_chip *gc, unsigned int offset)
diff --git a/drivers/gpio/gpio-octeon.c b/drivers/gpio/gpio-octeon.c
index afb0e8a791e5..777e20c608dc 100644
--- a/drivers/gpio/gpio-octeon.c
+++ b/drivers/gpio/gpio-octeon.c
@@ -47,12 +47,15 @@ static int octeon_gpio_dir_in(struct gpio_chip *chip, unsigned offset)
return 0;
}
-static void octeon_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int octeon_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct octeon_gpio *gpio = gpiochip_get_data(chip);
u64 mask = 1ull << offset;
u64 reg = gpio->register_base + (value ? TX_SET : TX_CLEAR);
cvmx_write_csr(reg, mask);
+
+ return 0;
}
static int octeon_gpio_dir_out(struct gpio_chip *chip, unsigned offset,
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 76d5d87e9681..a268c76bdca6 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -715,7 +715,7 @@ static void omap_gpio_irq_print_chip(struct irq_data *d, struct seq_file *p)
{
struct gpio_bank *bank = omap_irq_data_get_bank(d);
- seq_printf(p, dev_name(bank->dev));
+ seq_puts(p, dev_name(bank->dev));
}
static const struct irq_chip omap_gpio_irq_chip = {
@@ -953,7 +953,7 @@ static int omap_gpio_set_config(struct gpio_chip *chip, unsigned offset,
return ret;
}
-static void omap_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int omap_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct gpio_bank *bank;
unsigned long flags;
@@ -962,10 +962,12 @@ static void omap_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
raw_spin_lock_irqsave(&bank->lock, flags);
bank->set_dataout(bank, offset, value);
raw_spin_unlock_irqrestore(&bank->lock, flags);
+
+ return 0;
}
-static void omap_gpio_set_multiple(struct gpio_chip *chip, unsigned long *mask,
- unsigned long *bits)
+static int omap_gpio_set_multiple(struct gpio_chip *chip, unsigned long *mask,
+ unsigned long *bits)
{
struct gpio_bank *bank = gpiochip_get_data(chip);
void __iomem *reg = bank->base + bank->regs->dataout;
@@ -977,6 +979,8 @@ static void omap_gpio_set_multiple(struct gpio_chip *chip, unsigned long *mask,
writel_relaxed(l, reg);
bank->context.dataout = l;
raw_spin_unlock_irqrestore(&bank->lock, flags);
+
+ return 0;
}
/*---------------------------------------------------------------------*/
@@ -1557,7 +1561,7 @@ static const struct dev_pm_ops gpio_pm_ops = {
static struct platform_driver omap_gpio_driver = {
.probe = omap_gpio_probe,
- .remove_new = omap_gpio_remove,
+ .remove = omap_gpio_remove,
.driver = {
.name = "omap_gpio",
.pm = &gpio_pm_ops,
diff --git a/drivers/gpio/gpio-palmas.c b/drivers/gpio/gpio-palmas.c
index 28dba7048509..e377f6dd4ccf 100644
--- a/drivers/gpio/gpio-palmas.c
+++ b/drivers/gpio/gpio-palmas.c
@@ -54,12 +54,11 @@ static int palmas_gpio_get(struct gpio_chip *gc, unsigned offset)
return !!(val & BIT(offset));
}
-static void palmas_gpio_set(struct gpio_chip *gc, unsigned offset,
- int value)
+static int palmas_gpio_set(struct gpio_chip *gc, unsigned int offset,
+ int value)
{
struct palmas_gpio *pg = gpiochip_get_data(gc);
struct palmas *palmas = pg->palmas;
- int ret;
unsigned int reg;
int gpio16 = (offset/8);
@@ -71,9 +70,7 @@ static void palmas_gpio_set(struct gpio_chip *gc, unsigned offset,
reg = (value) ?
PALMAS_GPIO_SET_DATA_OUT : PALMAS_GPIO_CLEAR_DATA_OUT;
- ret = palmas_write(palmas, PALMAS_GPIO_BASE, reg, BIT(offset));
- if (ret < 0)
- dev_err(gc->parent, "Reg 0x%02x write failed, %d\n", reg, ret);
+ return palmas_write(palmas, PALMAS_GPIO_BASE, reg, BIT(offset));
}
static int palmas_gpio_output(struct gpio_chip *gc, unsigned offset,
@@ -89,7 +86,9 @@ static int palmas_gpio_output(struct gpio_chip *gc, unsigned offset,
reg = (gpio16) ? PALMAS_GPIO_DATA_DIR2 : PALMAS_GPIO_DATA_DIR;
/* Set the initial value */
- palmas_gpio_set(gc, offset, value);
+ ret = palmas_gpio_set(gc, offset, value);
+ if (ret)
+ return ret;
ret = palmas_update_bits(palmas, PALMAS_GPIO_BASE, reg,
BIT(offset), BIT(offset));
@@ -140,6 +139,7 @@ static const struct of_device_id of_palmas_gpio_match[] = {
{ .compatible = "ti,tps80036-gpio", .data = &tps80036_dev_data,},
{ },
};
+MODULE_DEVICE_TABLE(of, of_palmas_gpio_match);
static int palmas_gpio_probe(struct platform_device *pdev)
{
@@ -197,3 +197,13 @@ static int __init palmas_gpio_init(void)
return platform_driver_register(&palmas_gpio_driver);
}
subsys_initcall(palmas_gpio_init);
+
+static void __exit palmas_gpio_exit(void)
+{
+ platform_driver_unregister(&palmas_gpio_driver);
+}
+module_exit(palmas_gpio_exit);
+
+MODULE_DESCRIPTION("TI PALMAS series GPIO driver");
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 3f2d33ee20cc..b46927f55038 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -38,6 +38,10 @@
#define PCA953X_INVERT 0x02
#define PCA953X_DIRECTION 0x03
+#define TCA6418_INPUT 0x14
+#define TCA6418_OUTPUT 0x17
+#define TCA6418_DIRECTION 0x23
+
#define REG_ADDR_MASK GENMASK(5, 0)
#define REG_ADDR_EXT BIT(6)
#define REG_ADDR_AI BIT(7)
@@ -76,7 +80,8 @@
#define PCA953X_TYPE BIT(12)
#define PCA957X_TYPE BIT(13)
#define PCAL653X_TYPE BIT(14)
-#define PCA_TYPE_MASK GENMASK(15, 12)
+#define TCA6418_TYPE BIT(16)
+#define PCA_TYPE_MASK GENMASK(16, 12)
#define PCA_CHIP_TYPE(x) ((x) & PCA_TYPE_MASK)
@@ -115,6 +120,7 @@ static const struct i2c_device_id pca953x_id[] = {
{ "pca6107", 8 | PCA953X_TYPE | PCA_INT, },
{ "tca6408", 8 | PCA953X_TYPE | PCA_INT, },
{ "tca6416", 16 | PCA953X_TYPE | PCA_INT, },
+ { "tca6418", 18 | TCA6418_TYPE | PCA_INT, },
{ "tca6424", 24 | PCA953X_TYPE | PCA_INT, },
{ "tca9538", 8 | PCA953X_TYPE | PCA_INT, },
{ "tca9539", 16 | PCA953X_TYPE | PCA_INT, },
@@ -204,6 +210,13 @@ static const struct pca953x_reg_config pca957x_regs = {
.invert = PCA957X_INVRT,
};
+static const struct pca953x_reg_config tca6418_regs = {
+ .direction = TCA6418_DIRECTION,
+ .output = TCA6418_OUTPUT,
+ .input = TCA6418_INPUT,
+ .invert = 0xFF, /* Does not apply */
+};
+
struct pca953x_chip {
unsigned gpio_start;
struct mutex i2c_lock;
@@ -215,6 +228,8 @@ struct pca953x_chip {
DECLARE_BITMAP(irq_stat, MAX_LINE);
DECLARE_BITMAP(irq_trig_raise, MAX_LINE);
DECLARE_BITMAP(irq_trig_fall, MAX_LINE);
+ DECLARE_BITMAP(irq_trig_level_high, MAX_LINE);
+ DECLARE_BITMAP(irq_trig_level_low, MAX_LINE);
#endif
atomic_t wakeup_path;
@@ -235,6 +250,22 @@ static int pca953x_bank_shift(struct pca953x_chip *chip)
return fls((chip->gpio_chip.ngpio - 1) / BANK_SZ);
}
+/*
+ * Helper function to get the correct bit mask for a given offset and chip type.
+ * The TCA6418's input, output, and direction banks have a peculiar bit order:
+ * the first byte uses reversed bit order, while the second byte uses standard order.
+ */
+static inline u8 pca953x_get_bit_mask(struct pca953x_chip *chip, unsigned int offset)
+{
+ unsigned int bit_pos_in_bank = offset % BANK_SZ;
+ int msb = BANK_SZ - 1;
+
+ if (PCA_CHIP_TYPE(chip->driver_data) == TCA6418_TYPE && offset <= msb)
+ return BIT(msb - bit_pos_in_bank);
+
+ return BIT(bit_pos_in_bank);
+}
+
#define PCA953x_BANK_INPUT BIT(0)
#define PCA953x_BANK_OUTPUT BIT(1)
#define PCA953x_BANK_POLARITY BIT(2)
@@ -351,18 +382,43 @@ static bool pcal6534_check_register(struct pca953x_chip *chip, unsigned int reg,
return true;
}
+/* TCA6418 breaks the PCA953x register order rule */
+static bool tca6418_check_register(struct pca953x_chip *chip, unsigned int reg,
+ u32 access_type_mask)
+{
+ /* Valid Input Registers - BIT(0) for readable access */
+ if (reg >= TCA6418_INPUT && reg < (TCA6418_INPUT + NBANK(chip)))
+ return (access_type_mask & BIT(0));
+
+ /* Valid Output Registers - BIT(1) for writeable access */
+ if (reg >= TCA6418_OUTPUT && reg < (TCA6418_OUTPUT + NBANK(chip)))
+ return (access_type_mask & (BIT(0) | BIT(1)));
+
+ /* Valid Direction Registers - BIT(2) for volatile access */
+ if (reg >= TCA6418_DIRECTION && reg < (TCA6418_DIRECTION + NBANK(chip)))
+ return (access_type_mask & (BIT(0) | BIT(1)));
+
+ return false;
+}
+
static bool pca953x_readable_register(struct device *dev, unsigned int reg)
{
struct pca953x_chip *chip = dev_get_drvdata(dev);
u32 bank;
- if (PCA_CHIP_TYPE(chip->driver_data) == PCA957X_TYPE) {
+ switch (PCA_CHIP_TYPE(chip->driver_data)) {
+ case PCA957X_TYPE:
bank = PCA957x_BANK_INPUT | PCA957x_BANK_OUTPUT |
PCA957x_BANK_POLARITY | PCA957x_BANK_CONFIG |
PCA957x_BANK_BUSHOLD;
- } else {
+ break;
+ case TCA6418_TYPE:
+ /* BIT(0) to indicate read access */
+ return tca6418_check_register(chip, reg, BIT(0));
+ default:
bank = PCA953x_BANK_INPUT | PCA953x_BANK_OUTPUT |
PCA953x_BANK_POLARITY | PCA953x_BANK_CONFIG;
+ break;
}
if (chip->driver_data & PCA_PCAL) {
@@ -379,12 +435,18 @@ static bool pca953x_writeable_register(struct device *dev, unsigned int reg)
struct pca953x_chip *chip = dev_get_drvdata(dev);
u32 bank;
- if (PCA_CHIP_TYPE(chip->driver_data) == PCA957X_TYPE) {
+ switch (PCA_CHIP_TYPE(chip->driver_data)) {
+ case PCA957X_TYPE:
bank = PCA957x_BANK_OUTPUT | PCA957x_BANK_POLARITY |
PCA957x_BANK_CONFIG | PCA957x_BANK_BUSHOLD;
- } else {
+ break;
+ case TCA6418_TYPE:
+ /* BIT(1) for write access */
+ return tca6418_check_register(chip, reg, BIT(1));
+ default:
bank = PCA953x_BANK_OUTPUT | PCA953x_BANK_POLARITY |
PCA953x_BANK_CONFIG;
+ break;
}
if (chip->driver_data & PCA_PCAL)
@@ -399,10 +461,17 @@ static bool pca953x_volatile_register(struct device *dev, unsigned int reg)
struct pca953x_chip *chip = dev_get_drvdata(dev);
u32 bank;
- if (PCA_CHIP_TYPE(chip->driver_data) == PCA957X_TYPE)
+ switch (PCA_CHIP_TYPE(chip->driver_data)) {
+ case PCA957X_TYPE:
bank = PCA957x_BANK_INPUT;
- else
+ break;
+ case TCA6418_TYPE:
+ /* BIT(2) for volatile access */
+ return tca6418_check_register(chip, reg, BIT(2));
+ default:
bank = PCA953x_BANK_INPUT;
+ break;
+ }
if (chip->driver_data & PCA_PCAL)
bank |= PCAL9xxx_BANK_IRQ_STAT;
@@ -487,6 +556,16 @@ static u8 pcal6534_recalc_addr(struct pca953x_chip *chip, int reg, int off)
return pinctrl + addr + (off / BANK_SZ);
}
+static u8 tca6418_recalc_addr(struct pca953x_chip *chip, int reg_base, int offset)
+{
+ /*
+ * reg_base will be TCA6418_INPUT, TCA6418_OUTPUT, or TCA6418_DIRECTION
+ * offset is the global GPIO line offset (0-17)
+ * BANK_SZ is 8 for TCA6418 (8 bits per register bank)
+ */
+ return reg_base + (offset / BANK_SZ);
+}
+
static int pca953x_write_regs(struct pca953x_chip *chip, int reg, unsigned long *val)
{
u8 regaddr = chip->recalc_addr(chip, reg, 0);
@@ -527,11 +606,14 @@ static int pca953x_gpio_direction_input(struct gpio_chip *gc, unsigned off)
{
struct pca953x_chip *chip = gpiochip_get_data(gc);
u8 dirreg = chip->recalc_addr(chip, chip->regs->direction, off);
- u8 bit = BIT(off % BANK_SZ);
+ u8 bit = pca953x_get_bit_mask(chip, off);
guard(mutex)(&chip->i2c_lock);
- return regmap_write_bits(chip->regmap, dirreg, bit, bit);
+ if (PCA_CHIP_TYPE(chip->driver_data) == TCA6418_TYPE)
+ return regmap_update_bits(chip->regmap, dirreg, bit, 0);
+
+ return regmap_update_bits(chip->regmap, dirreg, bit, bit);
}
static int pca953x_gpio_direction_output(struct gpio_chip *gc,
@@ -540,25 +622,31 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc,
struct pca953x_chip *chip = gpiochip_get_data(gc);
u8 dirreg = chip->recalc_addr(chip, chip->regs->direction, off);
u8 outreg = chip->recalc_addr(chip, chip->regs->output, off);
- u8 bit = BIT(off % BANK_SZ);
+ u8 bit = pca953x_get_bit_mask(chip, off);
int ret;
guard(mutex)(&chip->i2c_lock);
/* set output level */
- ret = regmap_write_bits(chip->regmap, outreg, bit, val ? bit : 0);
+ ret = regmap_update_bits(chip->regmap, outreg, bit, val ? bit : 0);
if (ret)
return ret;
- /* then direction */
- return regmap_write_bits(chip->regmap, dirreg, bit, 0);
+ /*
+ * then direction
+ * (in/out logic is inverted on TCA6418)
+ */
+ if (PCA_CHIP_TYPE(chip->driver_data) == TCA6418_TYPE)
+ return regmap_update_bits(chip->regmap, dirreg, bit, bit);
+
+ return regmap_update_bits(chip->regmap, dirreg, bit, 0);
}
static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
{
struct pca953x_chip *chip = gpiochip_get_data(gc);
u8 inreg = chip->recalc_addr(chip, chip->regs->input, off);
- u8 bit = BIT(off % BANK_SZ);
+ u8 bit = pca953x_get_bit_mask(chip, off);
u32 reg_val;
int ret;
@@ -570,22 +658,23 @@ static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
return !!(reg_val & bit);
}
-static void pca953x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
+static int pca953x_gpio_set_value(struct gpio_chip *gc, unsigned int off,
+ int val)
{
struct pca953x_chip *chip = gpiochip_get_data(gc);
u8 outreg = chip->recalc_addr(chip, chip->regs->output, off);
- u8 bit = BIT(off % BANK_SZ);
+ u8 bit = pca953x_get_bit_mask(chip, off);
guard(mutex)(&chip->i2c_lock);
- regmap_write_bits(chip->regmap, outreg, bit, val ? bit : 0);
+ return regmap_update_bits(chip->regmap, outreg, bit, val ? bit : 0);
}
static int pca953x_gpio_get_direction(struct gpio_chip *gc, unsigned off)
{
struct pca953x_chip *chip = gpiochip_get_data(gc);
u8 dirreg = chip->recalc_addr(chip, chip->regs->direction, off);
- u8 bit = BIT(off % BANK_SZ);
+ u8 bit = pca953x_get_bit_mask(chip, off);
u32 reg_val;
int ret;
@@ -594,7 +683,14 @@ static int pca953x_gpio_get_direction(struct gpio_chip *gc, unsigned off)
if (ret < 0)
return ret;
- if (reg_val & bit)
+ /* (in/out logic is inverted on TCA6418) */
+ if (reg_val & bit) {
+ if (PCA_CHIP_TYPE(chip->driver_data) == TCA6418_TYPE)
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
+ }
+ if (PCA_CHIP_TYPE(chip->driver_data) == TCA6418_TYPE)
return GPIO_LINE_DIRECTION_IN;
return GPIO_LINE_DIRECTION_OUT;
@@ -616,8 +712,8 @@ static int pca953x_gpio_get_multiple(struct gpio_chip *gc,
return 0;
}
-static void pca953x_gpio_set_multiple(struct gpio_chip *gc,
- unsigned long *mask, unsigned long *bits)
+static int pca953x_gpio_set_multiple(struct gpio_chip *gc,
+ unsigned long *mask, unsigned long *bits)
{
struct pca953x_chip *chip = gpiochip_get_data(gc);
DECLARE_BITMAP(reg_val, MAX_LINE);
@@ -627,11 +723,11 @@ static void pca953x_gpio_set_multiple(struct gpio_chip *gc,
ret = pca953x_read_regs(chip, chip->regs->output, reg_val);
if (ret)
- return;
+ return ret;
bitmap_replace(reg_val, reg_val, bits, mask, gc->ngpio);
- pca953x_write_regs(chip, chip->regs->output, reg_val);
+ return pca953x_write_regs(chip, chip->regs->output, reg_val);
}
static int pca953x_gpio_set_pull_up_down(struct pca953x_chip *chip,
@@ -655,9 +751,9 @@ static int pca953x_gpio_set_pull_up_down(struct pca953x_chip *chip,
/* Configure pull-up/pull-down */
if (param == PIN_CONFIG_BIAS_PULL_UP)
- ret = regmap_write_bits(chip->regmap, pull_sel_reg, bit, bit);
+ ret = regmap_update_bits(chip->regmap, pull_sel_reg, bit, bit);
else if (param == PIN_CONFIG_BIAS_PULL_DOWN)
- ret = regmap_write_bits(chip->regmap, pull_sel_reg, bit, 0);
+ ret = regmap_update_bits(chip->regmap, pull_sel_reg, bit, 0);
else
ret = 0;
if (ret)
@@ -665,9 +761,9 @@ static int pca953x_gpio_set_pull_up_down(struct pca953x_chip *chip,
/* Disable/Enable pull-up/pull-down */
if (param == PIN_CONFIG_BIAS_DISABLE)
- return regmap_write_bits(chip->regmap, pull_en_reg, bit, 0);
+ return regmap_update_bits(chip->regmap, pull_en_reg, bit, 0);
else
- return regmap_write_bits(chip->regmap, pull_en_reg, bit, bit);
+ return regmap_update_bits(chip->regmap, pull_en_reg, bit, bit);
}
static int pca953x_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
@@ -773,6 +869,8 @@ static void pca953x_irq_bus_sync_unlock(struct irq_data *d)
pca953x_read_regs(chip, chip->regs->direction, reg_direction);
bitmap_or(irq_mask, chip->irq_trig_fall, chip->irq_trig_raise, gc->ngpio);
+ bitmap_or(irq_mask, irq_mask, chip->irq_trig_level_high, gc->ngpio);
+ bitmap_or(irq_mask, irq_mask, chip->irq_trig_level_low, gc->ngpio);
bitmap_complement(reg_direction, reg_direction, gc->ngpio);
bitmap_and(irq_mask, irq_mask, reg_direction, gc->ngpio);
@@ -790,13 +888,15 @@ static int pca953x_irq_set_type(struct irq_data *d, unsigned int type)
struct device *dev = &chip->client->dev;
irq_hw_number_t hwirq = irqd_to_hwirq(d);
- if (!(type & IRQ_TYPE_EDGE_BOTH)) {
+ if (!(type & IRQ_TYPE_SENSE_MASK)) {
dev_err(dev, "irq %d: unsupported type %d\n", d->irq, type);
return -EINVAL;
}
assign_bit(hwirq, chip->irq_trig_fall, type & IRQ_TYPE_EDGE_FALLING);
assign_bit(hwirq, chip->irq_trig_raise, type & IRQ_TYPE_EDGE_RISING);
+ assign_bit(hwirq, chip->irq_trig_level_low, type & IRQ_TYPE_LEVEL_LOW);
+ assign_bit(hwirq, chip->irq_trig_level_high, type & IRQ_TYPE_LEVEL_HIGH);
return 0;
}
@@ -809,13 +909,15 @@ static void pca953x_irq_shutdown(struct irq_data *d)
clear_bit(hwirq, chip->irq_trig_raise);
clear_bit(hwirq, chip->irq_trig_fall);
+ clear_bit(hwirq, chip->irq_trig_level_low);
+ clear_bit(hwirq, chip->irq_trig_level_high);
}
static void pca953x_irq_print_chip(struct irq_data *data, struct seq_file *p)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
- seq_printf(p, dev_name(gc->parent));
+ seq_puts(p, dev_name(gc->parent));
}
static const struct irq_chip pca953x_irq_chip = {
@@ -839,27 +941,9 @@ static bool pca953x_irq_pending(struct pca953x_chip *chip, unsigned long *pendin
DECLARE_BITMAP(cur_stat, MAX_LINE);
DECLARE_BITMAP(new_stat, MAX_LINE);
DECLARE_BITMAP(trigger, MAX_LINE);
+ DECLARE_BITMAP(edges, MAX_LINE);
int ret;
- if (chip->driver_data & PCA_PCAL) {
- /* Read the current interrupt status from the device */
- ret = pca953x_read_regs(chip, PCAL953X_INT_STAT, trigger);
- if (ret)
- return false;
-
- /* Check latched inputs and clear interrupt status */
- ret = pca953x_read_regs(chip, chip->regs->input, cur_stat);
- if (ret)
- return false;
-
- /* Apply filter for rising/falling edge selection */
- bitmap_replace(new_stat, chip->irq_trig_fall, chip->irq_trig_raise, cur_stat, gc->ngpio);
-
- bitmap_and(pending, new_stat, trigger, gc->ngpio);
-
- return !bitmap_empty(pending, gc->ngpio);
- }
-
ret = pca953x_read_regs(chip, chip->regs->input, cur_stat);
if (ret)
return false;
@@ -875,13 +959,26 @@ static bool pca953x_irq_pending(struct pca953x_chip *chip, unsigned long *pendin
bitmap_copy(chip->irq_stat, new_stat, gc->ngpio);
- if (bitmap_empty(trigger, gc->ngpio))
- return false;
+ if (bitmap_empty(chip->irq_trig_level_high, gc->ngpio) &&
+ bitmap_empty(chip->irq_trig_level_low, gc->ngpio)) {
+ if (bitmap_empty(trigger, gc->ngpio))
+ return false;
+ }
bitmap_and(cur_stat, chip->irq_trig_fall, old_stat, gc->ngpio);
bitmap_and(old_stat, chip->irq_trig_raise, new_stat, gc->ngpio);
- bitmap_or(new_stat, old_stat, cur_stat, gc->ngpio);
- bitmap_and(pending, new_stat, trigger, gc->ngpio);
+ bitmap_or(edges, old_stat, cur_stat, gc->ngpio);
+ bitmap_and(pending, edges, trigger, gc->ngpio);
+
+ bitmap_and(cur_stat, new_stat, chip->irq_trig_level_high, gc->ngpio);
+ bitmap_and(cur_stat, cur_stat, chip->irq_mask, gc->ngpio);
+ bitmap_or(pending, pending, cur_stat, gc->ngpio);
+
+ bitmap_complement(cur_stat, new_stat, gc->ngpio);
+ bitmap_and(cur_stat, cur_stat, reg_direction, gc->ngpio);
+ bitmap_and(old_stat, cur_stat, chip->irq_trig_level_low, gc->ngpio);
+ bitmap_and(old_stat, old_stat, chip->irq_mask, gc->ngpio);
+ bitmap_or(pending, pending, old_stat, gc->ngpio);
return !bitmap_empty(pending, gc->ngpio);
}
@@ -970,7 +1067,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip, int irq_base)
IRQF_ONESHOT | IRQF_SHARED, dev_name(dev),
chip);
if (ret)
- return dev_err_probe(dev, client->irq, "failed to request irq\n");
+ return dev_err_probe(dev, ret, "failed to request irq\n");
return 0;
}
@@ -1088,7 +1185,8 @@ static int pca953x_probe(struct i2c_client *client)
*/
reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(reset_gpio))
- return PTR_ERR(reset_gpio);
+ return dev_err_probe(dev, PTR_ERR(reset_gpio),
+ "Failed to get reset gpio\n");
}
chip->client = client;
@@ -1112,12 +1210,22 @@ static int pca953x_probe(struct i2c_client *client)
regmap_config = &pca953x_i2c_regmap;
}
- if (PCA_CHIP_TYPE(chip->driver_data) == PCAL653X_TYPE) {
+ switch (PCA_CHIP_TYPE(chip->driver_data)) {
+ case PCAL653X_TYPE:
chip->recalc_addr = pcal6534_recalc_addr;
chip->check_reg = pcal6534_check_register;
- } else {
+ break;
+ case TCA6418_TYPE:
+ chip->recalc_addr = tca6418_recalc_addr;
+ /*
+ * We don't assign chip->check_reg = tca6418_check_register directly here.
+ * Instead, the wrappers handle the dispatch based on PCA_CHIP_TYPE.
+ */
+ break;
+ default:
chip->recalc_addr = pca953x_recalc_addr;
chip->check_reg = pca953x_check_register;
+ break;
}
chip->regmap = devm_regmap_init_i2c(client, regmap_config);
@@ -1146,15 +1254,22 @@ static int pca953x_probe(struct i2c_client *client)
lockdep_set_subclass(&chip->i2c_lock,
i2c_adapter_depth(client->adapter));
- /* initialize cached registers from their original values.
+ /*
+ * initialize cached registers from their original values.
* we can't share this chip with another i2c master.
*/
- if (PCA_CHIP_TYPE(chip->driver_data) == PCA957X_TYPE) {
+ switch (PCA_CHIP_TYPE(chip->driver_data)) {
+ case PCA957X_TYPE:
chip->regs = &pca957x_regs;
ret = device_pca957x_init(chip);
- } else {
+ break;
+ case TCA6418_TYPE:
+ chip->regs = &tca6418_regs;
+ break;
+ default:
chip->regs = &pca953x_regs;
ret = device_pca95xx_init(chip);
+ break;
}
if (ret)
return ret;
@@ -1221,6 +1336,8 @@ static int pca953x_restore_context(struct pca953x_chip *chip)
guard(mutex)(&chip->i2c_lock);
+ if (chip->client->irq > 0)
+ enable_irq(chip->client->irq);
regcache_cache_only(chip->regmap, false);
regcache_mark_dirty(chip->regmap);
ret = pca953x_regcache_sync(chip);
@@ -1233,6 +1350,10 @@ static int pca953x_restore_context(struct pca953x_chip *chip)
static void pca953x_save_context(struct pca953x_chip *chip)
{
guard(mutex)(&chip->i2c_lock);
+
+ /* Disable IRQ to prevent early triggering while regmap "cache only" is on */
+ if (chip->client->irq > 0)
+ disable_irq(chip->client->irq);
regcache_cache_only(chip->regmap, true);
}
@@ -1314,6 +1435,7 @@ static const struct of_device_id pca953x_dt_ids[] = {
{ .compatible = "ti,pca9536", .data = OF_953X( 4, 0), },
{ .compatible = "ti,tca6408", .data = OF_953X( 8, PCA_INT), },
{ .compatible = "ti,tca6416", .data = OF_953X(16, PCA_INT), },
+ { .compatible = "ti,tca6418", .data = (void *)(18 | TCA6418_TYPE | PCA_INT), },
{ .compatible = "ti,tca6424", .data = OF_953X(24, PCA_INT), },
{ .compatible = "ti,tca9535", .data = OF_953X(16, PCA_INT), },
{ .compatible = "ti,tca9538", .data = OF_953X( 8, PCA_INT), },
@@ -1344,7 +1466,9 @@ static int __init pca953x_init(void)
{
return i2c_add_driver(&pca953x_driver);
}
-/* register after i2c postcore initcall and before
+
+/*
+ * register after i2c postcore initcall and before
* subsys initcalls that may rely on these GPIOs
*/
subsys_initcall(pca953x_init);
diff --git a/drivers/gpio/gpio-pca9570.c b/drivers/gpio/gpio-pca9570.c
index d37ba4049368..c5a1287079a0 100644
--- a/drivers/gpio/gpio-pca9570.c
+++ b/drivers/gpio/gpio-pca9570.c
@@ -88,7 +88,7 @@ static int pca9570_get(struct gpio_chip *chip, unsigned offset)
return !!(buffer & BIT(offset));
}
-static void pca9570_set(struct gpio_chip *chip, unsigned offset, int value)
+static int pca9570_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct pca9570 *gpio = gpiochip_get_data(chip);
u8 buffer;
@@ -110,6 +110,7 @@ static void pca9570_set(struct gpio_chip *chip, unsigned offset, int value)
out:
mutex_unlock(&gpio->lock);
+ return ret;
}
static int pca9570_probe(struct i2c_client *client)
diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c
index 7c57eaeb0afe..3b9de8c3d924 100644
--- a/drivers/gpio/gpio-pcf857x.c
+++ b/drivers/gpio/gpio-pcf857x.c
@@ -5,6 +5,8 @@
* Copyright (C) 2007 David Brownell
*/
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
@@ -169,21 +171,24 @@ static int pcf857x_output(struct gpio_chip *chip, unsigned int offset, int value
return status;
}
-static void pcf857x_set(struct gpio_chip *chip, unsigned int offset, int value)
+static int pcf857x_set(struct gpio_chip *chip, unsigned int offset, int value)
{
- pcf857x_output(chip, offset, value);
+ return pcf857x_output(chip, offset, value);
}
-static void pcf857x_set_multiple(struct gpio_chip *chip, unsigned long *mask,
- unsigned long *bits)
+static int pcf857x_set_multiple(struct gpio_chip *chip, unsigned long *mask,
+ unsigned long *bits)
{
struct pcf857x *gpio = gpiochip_get_data(chip);
+ int status;
mutex_lock(&gpio->lock);
gpio->out &= ~*mask;
gpio->out |= *bits & *mask;
- gpio->write(gpio->client, gpio->out);
+ status = gpio->write(gpio->client, gpio->out);
mutex_unlock(&gpio->lock);
+
+ return status;
}
/*-------------------------------------------------------------------------*/
@@ -272,12 +277,11 @@ static const struct irq_chip pcf857x_irq_chip = {
static int pcf857x_probe(struct i2c_client *client)
{
+ struct gpio_desc *reset_gpio;
struct pcf857x *gpio;
unsigned int n_latch = 0;
int status;
- device_property_read_u32(&client->dev, "lines-initial-states", &n_latch);
-
/* Allocate, initialize, and register this gpio_chip. */
gpio = devm_kzalloc(&client->dev, sizeof(*gpio), GFP_KERNEL);
if (!gpio)
@@ -297,6 +301,30 @@ static int pcf857x_probe(struct i2c_client *client)
gpio->chip.direction_output = pcf857x_output;
gpio->chip.ngpio = (uintptr_t)i2c_get_match_data(client);
+ reset_gpio = devm_gpiod_get_optional(&client->dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(reset_gpio))
+ return dev_err_probe(&client->dev, PTR_ERR(reset_gpio),
+ "failed to get reset GPIO\n");
+
+ if (reset_gpio) {
+ /* Reset already held with devm_gpiod_get_optional with GPIOD_OUT_HIGH */
+ fsleep(4); /* tw(rst) > 4us */
+ gpiod_set_value_cansleep(reset_gpio, 0);
+ fsleep(100); /* trst > 100uS */
+
+ /*
+ * Performing a reset means "The PCA9670 registers and I2C-bus
+ * state machine will be held in their default state until the
+ * RESET input is once again HIGH".
+ *
+ * This is the same as writing 1 for all pins, which is the same
+ * as n_latch=0, the default value of the variable.
+ */
+ } else {
+ device_property_read_u32(&client->dev, "lines-initial-states",
+ &n_latch);
+ }
+
/* NOTE: the OnSemi jlc1562b is also largely compatible with
* these parts, notably for output. It has a low-resolution
* DAC instead of pin change IRQs; and its inputs can be the
diff --git a/drivers/gpio/gpio-pch.c b/drivers/gpio/gpio-pch.c
index 63f25c72eac2..9925687e05fb 100644
--- a/drivers/gpio/gpio-pch.c
+++ b/drivers/gpio/gpio-pch.c
@@ -99,7 +99,7 @@ struct pch_gpio {
spinlock_t spinlock;
};
-static void pch_gpio_set(struct gpio_chip *gpio, unsigned int nr, int val)
+static int pch_gpio_set(struct gpio_chip *gpio, unsigned int nr, int val)
{
u32 reg_val;
struct pch_gpio *chip = gpiochip_get_data(gpio);
@@ -114,6 +114,8 @@ static void pch_gpio_set(struct gpio_chip *gpio, unsigned int nr, int val)
iowrite32(reg_val, &chip->reg->po);
spin_unlock_irqrestore(&chip->spinlock, flags);
+
+ return 0;
}
static int pch_gpio_get(struct gpio_chip *gpio, unsigned int nr)
diff --git a/drivers/gpio/gpio-pci-idio-16.c b/drivers/gpio/gpio-pci-idio-16.c
index 44c0a21b1d1d..476cea1b5ed7 100644
--- a/drivers/gpio/gpio-pci-idio-16.c
+++ b/drivers/gpio/gpio-pci-idio-16.c
@@ -70,24 +70,17 @@ static int idio_16_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct device *const dev = &pdev->dev;
int err;
const size_t pci_bar_index = 2;
- const char *const name = pci_name(pdev);
struct idio_16_regmap_config config = {};
void __iomem *regs;
struct regmap *map;
err = pcim_enable_device(pdev);
- if (err) {
- dev_err(dev, "Failed to enable PCI device (%d)\n", err);
- return err;
- }
-
- err = pcim_iomap_regions(pdev, BIT(pci_bar_index), name);
- if (err) {
- dev_err(dev, "Unable to map PCI I/O addresses (%d)\n", err);
- return err;
- }
+ if (err)
+ return dev_err_probe(dev, err, "Failed to enable PCI device\n");
- regs = pcim_iomap_table(pdev)[pci_bar_index];
+ regs = pcim_iomap_region(pdev, pci_bar_index, pci_name(pdev));
+ if (IS_ERR(regs))
+ return dev_err_probe(dev, PTR_ERR(regs), "Unable to map PCI I/O addresses\n");
map = devm_regmap_init_mmio(dev, regs, &idio_16_regmap_config);
if (IS_ERR(map))
@@ -119,4 +112,4 @@ module_pci_driver(idio_16_driver);
MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>");
MODULE_DESCRIPTION("ACCES PCI-IDIO-16 GPIO driver");
MODULE_LICENSE("GPL v2");
-MODULE_IMPORT_NS(GPIO_IDIO_16);
+MODULE_IMPORT_NS("GPIO_IDIO_16");
diff --git a/drivers/gpio/gpio-pcie-idio-24.c b/drivers/gpio/gpio-pcie-idio-24.c
index 7f7f95ad4343..80c0ba0afa67 100644
--- a/drivers/gpio/gpio-pcie-idio-24.c
+++ b/drivers/gpio/gpio-pcie-idio-24.c
@@ -305,19 +305,16 @@ static int idio_24_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct regmap_irq_chip_data *chip_data;
err = pcim_enable_device(pdev);
- if (err) {
- dev_err(dev, "Failed to enable PCI device (%d)\n", err);
- return err;
- }
+ if (err)
+ return dev_err_probe(dev, err, "Failed to enable PCI device\n");
- err = pcim_iomap_regions(pdev, BIT(pci_plx_bar_index) | BIT(pci_bar_index), name);
- if (err) {
- dev_err(dev, "Unable to map PCI I/O addresses (%d)\n", err);
- return err;
- }
+ pex8311_regs = pcim_iomap_region(pdev, pci_plx_bar_index, "pex8311");
+ if (IS_ERR(pex8311_regs))
+ return dev_err_probe(dev, PTR_ERR(pex8311_regs), "Unable to map PEX 8311 I/O addresses\n");
- pex8311_regs = pcim_iomap_table(pdev)[pci_plx_bar_index];
- idio_24_regs = pcim_iomap_table(pdev)[pci_bar_index];
+ idio_24_regs = pcim_iomap_region(pdev, pci_bar_index, name);
+ if (IS_ERR(idio_24_regs))
+ return dev_err_probe(dev, PTR_ERR(idio_24_regs), "Unable to map PCIe-IDIO-24 I/O addresses\n");
intcsr_map = devm_regmap_init_mmio(dev, pex8311_regs, &pex8311_intcsr_regmap_config);
if (IS_ERR(intcsr_map))
diff --git a/drivers/gpio/gpio-pisosr.c b/drivers/gpio/gpio-pisosr.c
index e3013e778e15..7ec6a46ed600 100644
--- a/drivers/gpio/gpio-pisosr.c
+++ b/drivers/gpio/gpio-pisosr.c
@@ -67,13 +67,6 @@ static int pisosr_gpio_direction_input(struct gpio_chip *chip,
return 0;
}
-static int pisosr_gpio_direction_output(struct gpio_chip *chip,
- unsigned offset, int value)
-{
- /* This device is input only */
- return -EINVAL;
-}
-
static int pisosr_gpio_get(struct gpio_chip *chip, unsigned offset)
{
struct pisosr_gpio *gpio = gpiochip_get_data(chip);
@@ -108,7 +101,6 @@ static const struct gpio_chip template_chip = {
.owner = THIS_MODULE,
.get_direction = pisosr_gpio_get_direction,
.direction_input = pisosr_gpio_direction_input,
- .direction_output = pisosr_gpio_direction_output,
.get = pisosr_gpio_get,
.get_multiple = pisosr_gpio_get_multiple,
.base = -1,
@@ -116,11 +108,6 @@ static const struct gpio_chip template_chip = {
.can_sleep = true,
};
-static void pisosr_mutex_destroy(void *lock)
-{
- mutex_destroy(lock);
-}
-
static int pisosr_gpio_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
@@ -147,8 +134,7 @@ static int pisosr_gpio_probe(struct spi_device *spi)
return dev_err_probe(dev, PTR_ERR(gpio->load_gpio),
"Unable to allocate load GPIO\n");
- mutex_init(&gpio->lock);
- ret = devm_add_action_or_reset(dev, pisosr_mutex_destroy, &gpio->lock);
+ ret = devm_mutex_init(dev, &gpio->lock);
if (ret)
return ret;
diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c
index a211a02d4b4a..02e4ffcf5a6f 100644
--- a/drivers/gpio/gpio-pl061.c
+++ b/drivers/gpio/gpio-pl061.c
@@ -115,11 +115,13 @@ static int pl061_get_value(struct gpio_chip *gc, unsigned offset)
return !!readb(pl061->base + (BIT(offset + 2)));
}
-static void pl061_set_value(struct gpio_chip *gc, unsigned offset, int value)
+static int pl061_set_value(struct gpio_chip *gc, unsigned int offset, int value)
{
struct pl061 *pl061 = gpiochip_get_data(gc);
writeb(!!value << offset, pl061->base + (BIT(offset + 2)));
+
+ return 0;
}
static int pl061_irq_type(struct irq_data *d, unsigned trigger)
@@ -291,7 +293,7 @@ static void pl061_irq_print_chip(struct irq_data *data, struct seq_file *p)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
- seq_printf(p, dev_name(gc->parent));
+ seq_puts(p, dev_name(gc->parent));
}
static const struct irq_chip pl061_irq_chip = {
diff --git a/drivers/gpio/gpio-pmic-eic-sprd.c b/drivers/gpio/gpio-pmic-eic-sprd.c
index d9b228bea42e..cb015fb5c946 100644
--- a/drivers/gpio/gpio-pmic-eic-sprd.c
+++ b/drivers/gpio/gpio-pmic-eic-sprd.c
@@ -109,12 +109,6 @@ static int sprd_pmic_eic_direction_input(struct gpio_chip *chip,
return 0;
}
-static void sprd_pmic_eic_set(struct gpio_chip *chip, unsigned int offset,
- int value)
-{
- /* EICs are always input, nothing need to do here. */
-}
-
static int sprd_pmic_eic_set_debounce(struct gpio_chip *chip,
unsigned int offset,
unsigned int debounce)
@@ -351,7 +345,6 @@ static int sprd_pmic_eic_probe(struct platform_device *pdev)
pmic_eic->chip.request = sprd_pmic_eic_request;
pmic_eic->chip.free = sprd_pmic_eic_free;
pmic_eic->chip.set_config = sprd_pmic_eic_set_config;
- pmic_eic->chip.set = sprd_pmic_eic_set;
pmic_eic->chip.get = sprd_pmic_eic_get;
pmic_eic->chip.can_sleep = true;
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index 91cea97255fa..fa22f3faa163 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -315,12 +315,14 @@ static int pxa_gpio_get(struct gpio_chip *chip, unsigned offset)
return !!(gplr & GPIO_bit(offset));
}
-static void pxa_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int pxa_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
void __iomem *base = gpio_bank_base(chip, offset);
writel_relaxed(GPIO_bit(offset),
base + (value ? GPSR_OFFSET : GPCR_OFFSET));
+
+ return 0;
}
#ifdef CONFIG_OF_GPIO
@@ -636,9 +638,8 @@ static int pxa_gpio_probe(struct platform_device *pdev)
if (!pxa_last_gpio)
return -EINVAL;
- pchip->irqdomain = irq_domain_add_legacy(pdev->dev.of_node,
- pxa_last_gpio + 1, irq_base,
- 0, &pxa_irq_domain_ops, pchip);
+ pchip->irqdomain = irq_domain_create_legacy(dev_fwnode(&pdev->dev), pxa_last_gpio + 1,
+ irq_base, 0, &pxa_irq_domain_ops, pchip);
if (!pchip->irqdomain)
return -ENOMEM;
diff --git a/drivers/gpio/gpio-raspberrypi-exp.c b/drivers/gpio/gpio-raspberrypi-exp.c
index 9d1b95e429f1..40413e06b69c 100644
--- a/drivers/gpio/gpio-raspberrypi-exp.c
+++ b/drivers/gpio/gpio-raspberrypi-exp.c
@@ -175,7 +175,7 @@ static int rpi_exp_gpio_get(struct gpio_chip *gc, unsigned int off)
return !!get.state;
}
-static void rpi_exp_gpio_set(struct gpio_chip *gc, unsigned int off, int val)
+static int rpi_exp_gpio_set(struct gpio_chip *gc, unsigned int off, int val)
{
struct rpi_exp_gpio *gpio;
struct gpio_get_set_state set;
@@ -188,10 +188,14 @@ static void rpi_exp_gpio_set(struct gpio_chip *gc, unsigned int off, int val)
ret = rpi_firmware_property(gpio->fw, RPI_FIRMWARE_SET_GPIO_STATE,
&set, sizeof(set));
- if (ret || set.gpio != 0)
+ if (ret || set.gpio != 0) {
dev_err(gc->parent,
"Failed to set GPIO %u state (%d %x)\n", off, ret,
set.gpio);
+ return ret ? ret : -EIO;
+ }
+
+ return 0;
}
static int rpi_exp_gpio_probe(struct platform_device *pdev)
diff --git a/drivers/gpio/gpio-rc5t583.c b/drivers/gpio/gpio-rc5t583.c
index c34dcadaee36..5a69e4534591 100644
--- a/drivers/gpio/gpio-rc5t583.c
+++ b/drivers/gpio/gpio-rc5t583.c
@@ -35,14 +35,20 @@ static int rc5t583_gpio_get(struct gpio_chip *gc, unsigned int offset)
return !!(val & BIT(offset));
}
-static void rc5t583_gpio_set(struct gpio_chip *gc, unsigned int offset, int val)
+static int rc5t583_gpio_set(struct gpio_chip *gc, unsigned int offset, int val)
{
struct rc5t583_gpio *rc5t583_gpio = gpiochip_get_data(gc);
struct device *parent = rc5t583_gpio->rc5t583->dev;
+ int ret;
+
if (val)
- rc5t583_set_bits(parent, RC5T583_GPIO_IOOUT, BIT(offset));
+ ret = rc5t583_set_bits(parent, RC5T583_GPIO_IOOUT,
+ BIT(offset));
else
- rc5t583_clear_bits(parent, RC5T583_GPIO_IOOUT, BIT(offset));
+ ret = rc5t583_clear_bits(parent, RC5T583_GPIO_IOOUT,
+ BIT(offset));
+
+ return ret;
}
static int rc5t583_gpio_dir_input(struct gpio_chip *gc, unsigned int offset)
@@ -66,7 +72,10 @@ static int rc5t583_gpio_dir_output(struct gpio_chip *gc, unsigned offset,
struct device *parent = rc5t583_gpio->rc5t583->dev;
int ret;
- rc5t583_gpio_set(gc, offset, value);
+ ret = rc5t583_gpio_set(gc, offset, value);
+ if (ret)
+ return ret;
+
ret = rc5t583_set_bits(parent, RC5T583_GPIO_IOSEL, BIT(offset));
if (ret < 0)
return ret;
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index 6159fda38d5d..86777e097fd8 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -40,7 +40,7 @@ struct gpio_rcar_info {
struct gpio_rcar_priv {
void __iomem *base;
- spinlock_t lock;
+ raw_spinlock_t lock;
struct device *dev;
struct gpio_chip gpio_chip;
unsigned int irq_parent;
@@ -123,7 +123,7 @@ static void gpio_rcar_config_interrupt_input_mode(struct gpio_rcar_priv *p,
* "Setting Level-Sensitive Interrupt Input Mode"
*/
- spin_lock_irqsave(&p->lock, flags);
+ raw_spin_lock_irqsave(&p->lock, flags);
/* Configure positive or negative logic in POSNEG */
gpio_rcar_modify_bit(p, POSNEG, hwirq, !active_high_rising_edge);
@@ -142,7 +142,7 @@ static void gpio_rcar_config_interrupt_input_mode(struct gpio_rcar_priv *p,
if (!level_trigger)
gpio_rcar_write(p, INTCLR, BIT(hwirq));
- spin_unlock_irqrestore(&p->lock, flags);
+ raw_spin_unlock_irqrestore(&p->lock, flags);
}
static int gpio_rcar_irq_set_type(struct irq_data *d, unsigned int type)
@@ -246,7 +246,7 @@ static void gpio_rcar_config_general_input_output_mode(struct gpio_chip *chip,
* "Setting General Input Mode"
*/
- spin_lock_irqsave(&p->lock, flags);
+ raw_spin_lock_irqsave(&p->lock, flags);
/* Configure positive logic in POSNEG */
gpio_rcar_modify_bit(p, POSNEG, gpio, false);
@@ -261,7 +261,7 @@ static void gpio_rcar_config_general_input_output_mode(struct gpio_chip *chip,
if (p->info.has_outdtsel && output)
gpio_rcar_modify_bit(p, OUTDTSEL, gpio, false);
- spin_unlock_irqrestore(&p->lock, flags);
+ raw_spin_unlock_irqrestore(&p->lock, flags);
}
static int gpio_rcar_request(struct gpio_chip *chip, unsigned offset)
@@ -331,23 +331,17 @@ static int gpio_rcar_get(struct gpio_chip *chip, unsigned offset)
static int gpio_rcar_get_multiple(struct gpio_chip *chip, unsigned long *mask,
unsigned long *bits)
{
+ u32 bankmask = mask[0] & GENMASK(chip->ngpio - 1, 0);
struct gpio_rcar_priv *p = gpiochip_get_data(chip);
- u32 bankmask, outputs, m, val = 0;
+ u32 outputs, m, val = 0;
unsigned long flags;
- bankmask = mask[0] & GENMASK(chip->ngpio - 1, 0);
- if (chip->valid_mask)
- bankmask &= chip->valid_mask[0];
-
- if (!bankmask)
- return 0;
-
if (p->info.has_always_in) {
bits[0] = gpio_rcar_read(p, INDT) & bankmask;
return 0;
}
- spin_lock_irqsave(&p->lock, flags);
+ raw_spin_lock_irqsave(&p->lock, flags);
outputs = gpio_rcar_read(p, INOUTSEL);
m = outputs & bankmask;
if (m)
@@ -356,42 +350,40 @@ static int gpio_rcar_get_multiple(struct gpio_chip *chip, unsigned long *mask,
m = ~outputs & bankmask;
if (m)
val |= gpio_rcar_read(p, INDT) & m;
- spin_unlock_irqrestore(&p->lock, flags);
+ raw_spin_unlock_irqrestore(&p->lock, flags);
bits[0] = val;
return 0;
}
-static void gpio_rcar_set(struct gpio_chip *chip, unsigned offset, int value)
+static int gpio_rcar_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct gpio_rcar_priv *p = gpiochip_get_data(chip);
unsigned long flags;
- spin_lock_irqsave(&p->lock, flags);
+ raw_spin_lock_irqsave(&p->lock, flags);
gpio_rcar_modify_bit(p, OUTDT, offset, value);
- spin_unlock_irqrestore(&p->lock, flags);
+ raw_spin_unlock_irqrestore(&p->lock, flags);
+
+ return 0;
}
-static void gpio_rcar_set_multiple(struct gpio_chip *chip, unsigned long *mask,
- unsigned long *bits)
+static int gpio_rcar_set_multiple(struct gpio_chip *chip, unsigned long *mask,
+ unsigned long *bits)
{
+ u32 bankmask = mask[0] & GENMASK(chip->ngpio - 1, 0);
struct gpio_rcar_priv *p = gpiochip_get_data(chip);
unsigned long flags;
- u32 val, bankmask;
-
- bankmask = mask[0] & GENMASK(chip->ngpio - 1, 0);
- if (chip->valid_mask)
- bankmask &= chip->valid_mask[0];
+ u32 val;
- if (!bankmask)
- return;
-
- spin_lock_irqsave(&p->lock, flags);
+ raw_spin_lock_irqsave(&p->lock, flags);
val = gpio_rcar_read(p, OUTDT);
val &= ~bankmask;
val |= (bankmask & bits[0]);
gpio_rcar_write(p, OUTDT, val);
- spin_unlock_irqrestore(&p->lock, flags);
+ raw_spin_unlock_irqrestore(&p->lock, flags);
+
+ return 0;
}
static int gpio_rcar_direction_output(struct gpio_chip *chip, unsigned offset,
@@ -468,7 +460,12 @@ static int gpio_rcar_parse_dt(struct gpio_rcar_priv *p, unsigned int *npins)
p->info = *info;
ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, 0, &args);
- *npins = ret == 0 ? args.args[2] : RCAR_MAX_GPIO_PER_BANK;
+ if (ret) {
+ *npins = RCAR_MAX_GPIO_PER_BANK;
+ } else {
+ *npins = args.args[2];
+ of_node_put(args.np);
+ }
if (*npins == 0 || *npins > RCAR_MAX_GPIO_PER_BANK) {
dev_warn(p->dev, "Invalid number of gpio lines %u, using %u\n",
@@ -482,10 +479,13 @@ static int gpio_rcar_parse_dt(struct gpio_rcar_priv *p, unsigned int *npins)
static void gpio_rcar_enable_inputs(struct gpio_rcar_priv *p)
{
u32 mask = GENMASK(p->gpio_chip.ngpio - 1, 0);
+ const unsigned long *valid_mask;
+
+ valid_mask = gpiochip_query_valid_mask(&p->gpio_chip);
/* Select "Input Enable" in INEN */
- if (p->gpio_chip.valid_mask)
- mask &= p->gpio_chip.valid_mask[0];
+ if (valid_mask)
+ mask &= valid_mask[0];
if (mask)
gpio_rcar_write(p, INEN, gpio_rcar_read(p, INEN) | mask);
}
@@ -505,7 +505,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
return -ENOMEM;
p->dev = dev;
- spin_lock_init(&p->lock);
+ raw_spin_lock_init(&p->lock);
/* Get device configuration from DT node */
ret = gpio_rcar_parse_dt(p, &npins);
@@ -592,7 +592,6 @@ static void gpio_rcar_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
}
-#ifdef CONFIG_PM_SLEEP
static int gpio_rcar_suspend(struct device *dev)
{
struct gpio_rcar_priv *p = dev_get_drvdata(dev);
@@ -651,16 +650,16 @@ static int gpio_rcar_resume(struct device *dev)
return 0;
}
-#endif /* CONFIG_PM_SLEEP*/
-static SIMPLE_DEV_PM_OPS(gpio_rcar_pm_ops, gpio_rcar_suspend, gpio_rcar_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(gpio_rcar_pm_ops, gpio_rcar_suspend,
+ gpio_rcar_resume);
static struct platform_driver gpio_rcar_device_driver = {
.probe = gpio_rcar_probe,
- .remove_new = gpio_rcar_remove,
+ .remove = gpio_rcar_remove,
.driver = {
.name = "gpio_rcar",
- .pm = &gpio_rcar_pm_ops,
+ .pm = pm_sleep_ptr(&gpio_rcar_pm_ops),
.of_match_table = gpio_rcar_of_table,
}
};
diff --git a/drivers/gpio/gpio-rda.c b/drivers/gpio/gpio-rda.c
index cb2f63eee2aa..7bbc6f0ce4c8 100644
--- a/drivers/gpio/gpio-rda.c
+++ b/drivers/gpio/gpio-rda.c
@@ -8,6 +8,7 @@
#include <linux/bitops.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -35,7 +36,7 @@
#define RDA_GPIO_BANK_NR 32
struct rda_gpio {
- struct gpio_chip chip;
+ struct gpio_generic_chip chip;
void __iomem *base;
spinlock_t lock;
int irq;
@@ -208,6 +209,7 @@ static const struct irq_chip rda_gpio_irq_chip = {
static int rda_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct device *dev = &pdev->dev;
struct gpio_irq_chip *girq;
struct rda_gpio *rda_gpio;
@@ -235,24 +237,29 @@ static int rda_gpio_probe(struct platform_device *pdev)
spin_lock_init(&rda_gpio->lock);
- ret = bgpio_init(&rda_gpio->chip, dev, 4,
- rda_gpio->base + RDA_GPIO_VAL,
- rda_gpio->base + RDA_GPIO_SET,
- rda_gpio->base + RDA_GPIO_CLR,
- rda_gpio->base + RDA_GPIO_OEN_SET_OUT,
- rda_gpio->base + RDA_GPIO_OEN_SET_IN,
- BGPIOF_READ_OUTPUT_REG_SET);
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = rda_gpio->base + RDA_GPIO_VAL,
+ .set = rda_gpio->base + RDA_GPIO_SET,
+ .clr = rda_gpio->base + RDA_GPIO_CLR,
+ .dirout = rda_gpio->base + RDA_GPIO_OEN_SET_OUT,
+ .dirin = rda_gpio->base + RDA_GPIO_OEN_SET_IN,
+ .flags = GPIO_GENERIC_READ_OUTPUT_REG_SET,
+ };
+
+ ret = gpio_generic_chip_init(&rda_gpio->chip, &config);
if (ret) {
- dev_err(dev, "bgpio_init failed\n");
+ dev_err(dev, "failed to initialize the generic GPIO chip\n");
return ret;
}
- rda_gpio->chip.label = dev_name(dev);
- rda_gpio->chip.ngpio = ngpios;
- rda_gpio->chip.base = -1;
+ rda_gpio->chip.gc.label = dev_name(dev);
+ rda_gpio->chip.gc.ngpio = ngpios;
+ rda_gpio->chip.gc.base = -1;
if (rda_gpio->irq >= 0) {
- girq = &rda_gpio->chip.irq;
+ girq = &rda_gpio->chip.gc.irq;
gpio_irq_chip_set_chip(girq, &rda_gpio_irq_chip);
girq->handler = handle_bad_irq;
girq->default_type = IRQ_TYPE_NONE;
@@ -269,7 +276,7 @@ static int rda_gpio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, rda_gpio);
- return devm_gpiochip_add_data(dev, &rda_gpio->chip, rda_gpio);
+ return devm_gpiochip_add_data(dev, &rda_gpio->chip.gc, rda_gpio);
}
static const struct of_device_id rda_gpio_of_match[] = {
diff --git a/drivers/gpio/gpio-rdc321x.c b/drivers/gpio/gpio-rdc321x.c
index ec7fb9220a47..ba62b81aa8ae 100644
--- a/drivers/gpio/gpio-rdc321x.c
+++ b/drivers/gpio/gpio-rdc321x.c
@@ -64,8 +64,8 @@ static void rdc_gpio_set_value_impl(struct gpio_chip *chip,
}
/* set GPIO pin to value */
-static void rdc_gpio_set_value(struct gpio_chip *chip,
- unsigned gpio, int value)
+static int rdc_gpio_set_value(struct gpio_chip *chip, unsigned int gpio,
+ int value)
{
struct rdc321x_gpio *gpch;
@@ -73,6 +73,8 @@ static void rdc_gpio_set_value(struct gpio_chip *chip,
spin_lock(&gpch->lock);
rdc_gpio_set_value_impl(chip, gpio, value);
spin_unlock(&gpch->lock);
+
+ return 0;
}
static int rdc_gpio_config(struct gpio_chip *chip,
diff --git a/drivers/gpio/gpio-realtek-otto.c b/drivers/gpio/gpio-realtek-otto.c
index d6418f89d3f6..de527f4fc6c2 100644
--- a/drivers/gpio/gpio-realtek-otto.c
+++ b/drivers/gpio/gpio-realtek-otto.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
-#include <linux/gpio/driver.h>
#include <linux/cpumask.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/irq.h>
#include <linux/minmax.h>
#include <linux/mod_devicetable.h>
@@ -41,7 +42,7 @@
/**
* realtek_gpio_ctrl - Realtek Otto GPIO driver data
*
- * @gc: Associated gpio_chip instance
+ * @chip: Associated gpio_generic_chip instance
* @base: Base address of the register block for a GPIO bank
* @lock: Lock for accessing the IRQ registers and values
* @intr_mask: Mask for interrupts lines
@@ -64,7 +65,7 @@
* IMR on changes.
*/
struct realtek_gpio_ctrl {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
void __iomem *base;
void __iomem *cpumask_base;
struct cpumask cpu_irq_maskable;
@@ -101,7 +102,7 @@ static struct realtek_gpio_ctrl *irq_data_to_ctrl(struct irq_data *data)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
- return container_of(gc, struct realtek_gpio_ctrl, gc);
+ return container_of(to_gpio_generic_chip(gc), struct realtek_gpio_ctrl, chip);
}
/*
@@ -194,7 +195,7 @@ static void realtek_gpio_irq_unmask(struct irq_data *data)
unsigned int line = irqd_to_hwirq(data);
unsigned long flags;
- gpiochip_enable_irq(&ctrl->gc, line);
+ gpiochip_enable_irq(&ctrl->chip.gc, line);
raw_spin_lock_irqsave(&ctrl->lock, flags);
ctrl->intr_mask[line] = REALTEK_GPIO_IMR_LINE_MASK;
@@ -213,7 +214,7 @@ static void realtek_gpio_irq_mask(struct irq_data *data)
realtek_gpio_update_line_imr(ctrl, line);
raw_spin_unlock_irqrestore(&ctrl->lock, flags);
- gpiochip_disable_irq(&ctrl->gc, line);
+ gpiochip_disable_irq(&ctrl->chip.gc, line);
}
static int realtek_gpio_irq_set_type(struct irq_data *data, unsigned int flow_type)
@@ -356,8 +357,9 @@ MODULE_DEVICE_TABLE(of, realtek_gpio_of_match);
static int realtek_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct device *dev = &pdev->dev;
- unsigned long bgpio_flags;
+ unsigned long gen_gc_flags;
unsigned int dev_flags;
struct gpio_irq_chip *girq;
struct realtek_gpio_ctrl *ctrl;
@@ -388,32 +390,37 @@ static int realtek_gpio_probe(struct platform_device *pdev)
raw_spin_lock_init(&ctrl->lock);
if (dev_flags & GPIO_PORTS_REVERSED) {
- bgpio_flags = 0;
+ gen_gc_flags = 0;
ctrl->bank_read = realtek_gpio_bank_read;
ctrl->bank_write = realtek_gpio_bank_write;
ctrl->line_imr_pos = realtek_gpio_line_imr_pos;
} else {
- bgpio_flags = BGPIOF_BIG_ENDIAN_BYTE_ORDER;
+ gen_gc_flags = GPIO_GENERIC_BIG_ENDIAN_BYTE_ORDER;
ctrl->bank_read = realtek_gpio_bank_read_swapped;
ctrl->bank_write = realtek_gpio_bank_write_swapped;
ctrl->line_imr_pos = realtek_gpio_line_imr_pos_swapped;
}
- err = bgpio_init(&ctrl->gc, dev, 4,
- ctrl->base + REALTEK_GPIO_REG_DATA, NULL, NULL,
- ctrl->base + REALTEK_GPIO_REG_DIR, NULL,
- bgpio_flags);
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = ctrl->base + REALTEK_GPIO_REG_DATA,
+ .dirout = ctrl->base + REALTEK_GPIO_REG_DIR,
+ .flags = gen_gc_flags,
+ };
+
+ err = gpio_generic_chip_init(&ctrl->chip, &config);
if (err) {
dev_err(dev, "unable to init generic GPIO");
return err;
}
- ctrl->gc.ngpio = ngpios;
- ctrl->gc.owner = THIS_MODULE;
+ ctrl->chip.gc.ngpio = ngpios;
+ ctrl->chip.gc.owner = THIS_MODULE;
irq = platform_get_irq_optional(pdev, 0);
if (!(dev_flags & GPIO_INTERRUPTS_DISABLED) && irq > 0) {
- girq = &ctrl->gc.irq;
+ girq = &ctrl->chip.gc.irq;
gpio_irq_chip_set_chip(girq, &realtek_gpio_irq_chip);
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_bad_irq;
@@ -442,7 +449,7 @@ static int realtek_gpio_probe(struct platform_device *pdev)
cpumask_set_cpu(cpu, &ctrl->cpu_irq_maskable);
}
- return devm_gpiochip_add_data(dev, &ctrl->gc, ctrl);
+ return devm_gpiochip_add_data(dev, &ctrl->chip.gc, ctrl);
}
static struct platform_driver realtek_gpio_driver = {
diff --git a/drivers/gpio/gpio-reg.c b/drivers/gpio/gpio-reg.c
index 73c7260d89c0..f2238196faf1 100644
--- a/drivers/gpio/gpio-reg.c
+++ b/drivers/gpio/gpio-reg.c
@@ -57,7 +57,7 @@ static int gpio_reg_direction_input(struct gpio_chip *gc, unsigned offset)
return r->direction & BIT(offset) ? 0 : -ENOTSUPP;
}
-static void gpio_reg_set(struct gpio_chip *gc, unsigned offset, int value)
+static int gpio_reg_set(struct gpio_chip *gc, unsigned int offset, int value)
{
struct gpio_reg *r = to_gpio_reg(gc);
unsigned long flags;
@@ -72,6 +72,8 @@ static void gpio_reg_set(struct gpio_chip *gc, unsigned offset, int value)
r->out = val;
writel_relaxed(val, r->reg);
spin_unlock_irqrestore(&r->lock, flags);
+
+ return 0;
}
static int gpio_reg_get(struct gpio_chip *gc, unsigned offset)
@@ -92,8 +94,8 @@ static int gpio_reg_get(struct gpio_chip *gc, unsigned offset)
return !!(val & mask);
}
-static void gpio_reg_set_multiple(struct gpio_chip *gc, unsigned long *mask,
- unsigned long *bits)
+static int gpio_reg_set_multiple(struct gpio_chip *gc, unsigned long *mask,
+ unsigned long *bits)
{
struct gpio_reg *r = to_gpio_reg(gc);
unsigned long flags;
@@ -102,6 +104,8 @@ static void gpio_reg_set_multiple(struct gpio_chip *gc, unsigned long *mask,
r->out = (r->out & ~*mask) | (*bits & *mask);
writel_relaxed(r->out, r->reg);
spin_unlock_irqrestore(&r->lock, flags);
+
+ return 0;
}
static int gpio_reg_to_irq(struct gpio_chip *gc, unsigned offset)
diff --git a/drivers/gpio/gpio-regmap.c b/drivers/gpio/gpio-regmap.c
index 71684dee2ca5..ab9e4077fa60 100644
--- a/drivers/gpio/gpio-regmap.c
+++ b/drivers/gpio/gpio-regmap.c
@@ -17,6 +17,8 @@
#include <linux/gpio/driver.h>
#include <linux/gpio/regmap.h>
+#include "gpiolib.h"
+
struct gpio_regmap {
struct device *parent;
struct regmap *regmap;
@@ -30,6 +32,11 @@ struct gpio_regmap {
unsigned int reg_dir_in_base;
unsigned int reg_dir_out_base;
+#ifdef CONFIG_REGMAP_IRQ
+ int regmap_irq_line;
+ struct regmap_irq_chip_data *irq_chip_data;
+#endif
+
int (*reg_mask_xlate)(struct gpio_regmap *gpio, unsigned int base,
unsigned int offset, unsigned int *reg,
unsigned int *mask);
@@ -81,33 +88,43 @@ static int gpio_regmap_get(struct gpio_chip *chip, unsigned int offset)
return !!(val & mask);
}
-static void gpio_regmap_set(struct gpio_chip *chip, unsigned int offset,
- int val)
+static int gpio_regmap_set(struct gpio_chip *chip, unsigned int offset,
+ int val)
{
struct gpio_regmap *gpio = gpiochip_get_data(chip);
unsigned int base = gpio_regmap_addr(gpio->reg_set_base);
unsigned int reg, mask;
+ int ret;
+
+ ret = gpio->reg_mask_xlate(gpio, base, offset, &reg, &mask);
+ if (ret)
+ return ret;
- gpio->reg_mask_xlate(gpio, base, offset, &reg, &mask);
if (val)
- regmap_update_bits(gpio->regmap, reg, mask, mask);
+ ret = regmap_update_bits(gpio->regmap, reg, mask, mask);
else
- regmap_update_bits(gpio->regmap, reg, mask, 0);
+ ret = regmap_update_bits(gpio->regmap, reg, mask, 0);
+
+ return ret;
}
-static void gpio_regmap_set_with_clear(struct gpio_chip *chip,
- unsigned int offset, int val)
+static int gpio_regmap_set_with_clear(struct gpio_chip *chip,
+ unsigned int offset, int val)
{
struct gpio_regmap *gpio = gpiochip_get_data(chip);
unsigned int base, reg, mask;
+ int ret;
if (val)
base = gpio_regmap_addr(gpio->reg_set_base);
else
base = gpio_regmap_addr(gpio->reg_clr_base);
- gpio->reg_mask_xlate(gpio, base, offset, &reg, &mask);
- regmap_write(gpio->regmap, reg, mask);
+ ret = gpio->reg_mask_xlate(gpio, base, offset, &reg, &mask);
+ if (ret)
+ return ret;
+
+ return regmap_write(gpio->regmap, reg, mask);
}
static int gpio_regmap_get_direction(struct gpio_chip *chip,
@@ -203,6 +220,7 @@ EXPORT_SYMBOL_GPL(gpio_regmap_get_drvdata);
*/
struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config)
{
+ struct irq_domain *irq_domain;
struct gpio_regmap *gpio;
struct gpio_chip *chip;
int ret;
@@ -210,9 +228,6 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config
if (!config->parent)
return ERR_PTR(-EINVAL);
- if (!config->ngpio)
- return ERR_PTR(-EINVAL);
-
/* we need at least one */
if (!config->reg_dat_base && !config->reg_set_base)
return ERR_PTR(-EINVAL);
@@ -233,35 +248,23 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config
gpio->parent = config->parent;
gpio->driver_data = config->drvdata;
gpio->regmap = config->regmap;
- gpio->ngpio_per_reg = config->ngpio_per_reg;
- gpio->reg_stride = config->reg_stride;
- gpio->reg_mask_xlate = config->reg_mask_xlate;
gpio->reg_dat_base = config->reg_dat_base;
gpio->reg_set_base = config->reg_set_base;
gpio->reg_clr_base = config->reg_clr_base;
gpio->reg_dir_in_base = config->reg_dir_in_base;
gpio->reg_dir_out_base = config->reg_dir_out_base;
- /* if not set, assume there is only one register */
- if (!gpio->ngpio_per_reg)
- gpio->ngpio_per_reg = config->ngpio;
-
- /* if not set, assume they are consecutive */
- if (!gpio->reg_stride)
- gpio->reg_stride = 1;
-
- if (!gpio->reg_mask_xlate)
- gpio->reg_mask_xlate = gpio_regmap_simple_xlate;
-
chip = &gpio->gpio_chip;
chip->parent = config->parent;
chip->fwnode = config->fwnode;
chip->base = -1;
- chip->ngpio = config->ngpio;
chip->names = config->names;
chip->label = config->label ?: dev_name(config->parent);
chip->can_sleep = regmap_might_sleep(config->regmap);
+ chip->init_valid_mask = config->init_valid_mask;
+ chip->request = gpiochip_generic_request;
+ chip->free = gpiochip_generic_free;
chip->get = gpio_regmap_get;
if (gpio->reg_set_base && gpio->reg_clr_base)
chip->set = gpio_regmap_set_with_clear;
@@ -274,12 +277,47 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config
chip->direction_output = gpio_regmap_direction_output;
}
+ chip->ngpio = config->ngpio;
+ if (!chip->ngpio) {
+ ret = gpiochip_get_ngpios(chip, chip->parent);
+ if (ret)
+ goto err_free_gpio;
+ }
+
+ /* if not set, assume there is only one register */
+ gpio->ngpio_per_reg = config->ngpio_per_reg;
+ if (!gpio->ngpio_per_reg)
+ gpio->ngpio_per_reg = config->ngpio;
+
+ /* if not set, assume they are consecutive */
+ gpio->reg_stride = config->reg_stride;
+ if (!gpio->reg_stride)
+ gpio->reg_stride = 1;
+
+ gpio->reg_mask_xlate = config->reg_mask_xlate;
+ if (!gpio->reg_mask_xlate)
+ gpio->reg_mask_xlate = gpio_regmap_simple_xlate;
+
ret = gpiochip_add_data(chip, gpio);
if (ret < 0)
goto err_free_gpio;
- if (config->irq_domain) {
- ret = gpiochip_irqchip_add_domain(chip, config->irq_domain);
+#ifdef CONFIG_REGMAP_IRQ
+ if (config->regmap_irq_chip) {
+ gpio->regmap_irq_line = config->regmap_irq_line;
+ ret = regmap_add_irq_chip_fwnode(dev_fwnode(config->parent), config->regmap,
+ config->regmap_irq_line, config->regmap_irq_flags,
+ 0, config->regmap_irq_chip, &gpio->irq_chip_data);
+ if (ret)
+ goto err_free_gpio;
+
+ irq_domain = regmap_irq_get_domain(gpio->irq_chip_data);
+ } else
+#endif
+ irq_domain = config->irq_domain;
+
+ if (irq_domain) {
+ ret = gpiochip_irqchip_add_domain(chip, irq_domain);
if (ret)
goto err_remove_gpiochip;
}
@@ -300,6 +338,11 @@ EXPORT_SYMBOL_GPL(gpio_regmap_register);
*/
void gpio_regmap_unregister(struct gpio_regmap *gpio)
{
+#ifdef CONFIG_REGMAP_IRQ
+ if (gpio->irq_chip_data)
+ regmap_del_irq_chip(gpio->regmap_irq_line, gpio->irq_chip_data);
+#endif
+
gpiochip_remove(&gpio->gpio_chip);
kfree(gpio);
}
diff --git a/drivers/gpio/gpio-rockchip.c b/drivers/gpio/gpio-rockchip.c
index 365ab947983c..47174eb3ba76 100644
--- a/drivers/gpio/gpio-rockchip.c
+++ b/drivers/gpio/gpio-rockchip.c
@@ -26,9 +26,16 @@
#include "../pinctrl/core.h"
#include "../pinctrl/pinctrl-rockchip.h"
+/*
+ * Version ID Register
+ * Bits [31:24] - Major Version
+ * Bits [23:16] - Minor Version
+ * Bits [15:0] - Revision Number
+ */
#define GPIO_TYPE_V1 (0) /* GPIO Version ID reserved */
-#define GPIO_TYPE_V2 (0x01000C2B) /* GPIO Version ID 0x01000C2B */
-#define GPIO_TYPE_V2_1 (0x0101157C) /* GPIO Version ID 0x0101157C */
+#define GPIO_TYPE_V2 (0x01000C2B)
+#define GPIO_TYPE_V2_1 (0x0101157C)
+#define GPIO_TYPE_V2_2 (0x010219C8)
static const struct rockchip_gpio_regs gpio_regs_v1 = {
.port_dr = 0x00,
@@ -170,8 +177,8 @@ static int rockchip_gpio_set_direction(struct gpio_chip *chip,
return 0;
}
-static void rockchip_gpio_set(struct gpio_chip *gc, unsigned int offset,
- int value)
+static int rockchip_gpio_set(struct gpio_chip *gc, unsigned int offset,
+ int value)
{
struct rockchip_pin_bank *bank = gpiochip_get_data(gc);
unsigned long flags;
@@ -179,6 +186,8 @@ static void rockchip_gpio_set(struct gpio_chip *gc, unsigned int offset,
raw_spin_lock_irqsave(&bank->slock, flags);
rockchip_gpio_writel_bit(bank, offset, value, bank->gpio_regs->port_dr);
raw_spin_unlock_irqrestore(&bank->slock, flags);
+
+ return 0;
}
static int rockchip_gpio_get(struct gpio_chip *gc, unsigned int offset)
@@ -514,8 +523,8 @@ static int rockchip_interrupts_register(struct rockchip_pin_bank *bank)
struct irq_chip_generic *gc;
int ret;
- bank->domain = irq_domain_add_linear(bank->of_node, 32,
- &irq_generic_chip_ops, NULL);
+ bank->domain = irq_domain_create_linear(dev_fwnode(bank->dev), 32, &irq_generic_chip_ops,
+ NULL);
if (!bank->domain) {
dev_warn(bank->dev, "could not init irq domain for bank %s\n",
bank->name);
@@ -602,7 +611,7 @@ static int rockchip_gpiolib_register(struct rockchip_pin_bank *bank)
* files which don't set the "gpio-ranges" property or systems that
* utilize ACPI the driver has to call gpiochip_add_pin_range().
*/
- if (!of_property_read_bool(bank->of_node, "gpio-ranges")) {
+ if (!of_property_present(bank->of_node, "gpio-ranges")) {
struct device_node *pctlnp = of_get_parent(bank->of_node);
struct pinctrl_dev *pctldev = NULL;
@@ -661,8 +670,10 @@ static int rockchip_get_bank_data(struct rockchip_pin_bank *bank)
clk_prepare_enable(bank->clk);
id = readl(bank->reg_base + gpio_regs_v2.version_id);
- /* If not gpio v2, that is default to v1. */
- if (id == GPIO_TYPE_V2 || id == GPIO_TYPE_V2_1) {
+ switch (id) {
+ case GPIO_TYPE_V2:
+ case GPIO_TYPE_V2_1:
+ case GPIO_TYPE_V2_2:
bank->gpio_regs = &gpio_regs_v2;
bank->gpio_type = GPIO_TYPE_V2;
bank->db_clk = of_clk_get(bank->of_node, 1);
@@ -671,9 +682,14 @@ static int rockchip_get_bank_data(struct rockchip_pin_bank *bank)
clk_disable_unprepare(bank->clk);
return -EINVAL;
}
- } else {
+ break;
+ case GPIO_TYPE_V1:
bank->gpio_regs = &gpio_regs_v1;
bank->gpio_type = GPIO_TYPE_V1;
+ break;
+ default:
+ dev_err(bank->dev, "unsupported version ID: 0x%08x\n", id);
+ return -ENODEV;
}
return 0;
@@ -753,7 +769,7 @@ static int rockchip_gpio_probe(struct platform_device *pdev)
list_del(&cfg->head);
switch (cfg->param) {
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
ret = rockchip_gpio_direction_output(&bank->gpio_chip, cfg->pin, cfg->arg);
if (ret)
dev_warn(dev, "setting output pin %u to %u failed\n", cfg->pin,
@@ -795,7 +811,7 @@ static const struct of_device_id rockchip_gpio_match[] = {
static struct platform_driver rockchip_gpio_driver = {
.probe = rockchip_gpio_probe,
- .remove_new = rockchip_gpio_remove,
+ .remove = rockchip_gpio_remove,
.driver = {
.name = "rockchip-gpio",
.of_match_table = rockchip_gpio_match,
diff --git a/drivers/gpio/gpio-rtd.c b/drivers/gpio/gpio-rtd.c
index bf7f008f58d7..d46b40dd5283 100644
--- a/drivers/gpio/gpio-rtd.c
+++ b/drivers/gpio/gpio-rtd.c
@@ -275,7 +275,7 @@ static int rtd_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
}
}
-static void rtd_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
+static int rtd_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct rtd_gpio *data = gpiochip_get_data(chip);
u32 mask = BIT(offset % 32);
@@ -292,6 +292,8 @@ static void rtd_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
else
val &= ~mask;
writel_relaxed(val, data->base + dato_reg_offset);
+
+ return 0;
}
static int rtd_gpio_get(struct gpio_chip *chip, unsigned int offset)
diff --git a/drivers/gpio/gpio-sa1100.c b/drivers/gpio/gpio-sa1100.c
index 242dad763ac4..7f6a62f5d1ee 100644
--- a/drivers/gpio/gpio-sa1100.c
+++ b/drivers/gpio/gpio-sa1100.c
@@ -43,11 +43,14 @@ static int sa1100_gpio_get(struct gpio_chip *chip, unsigned offset)
BIT(offset);
}
-static void sa1100_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int sa1100_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
int reg = value ? R_GPSR : R_GPCR;
writel_relaxed(BIT(offset), sa1100_gpio_chip(chip)->membase + reg);
+
+ return 0;
}
static int sa1100_get_direction(struct gpio_chip *chip, unsigned offset)
@@ -319,7 +322,7 @@ void __init sa1100_init_gpio(void)
gpiochip_add_data(&sa1100_gpio_chip.chip, NULL);
- sa1100_gpio_irqdomain = irq_domain_add_simple(NULL,
+ sa1100_gpio_irqdomain = irq_domain_create_simple(NULL,
28, IRQ_GPIO0,
&sa1100_gpio_irqdomain_ops, sgc);
diff --git a/drivers/gpio/gpio-sama5d2-piobu.c b/drivers/gpio/gpio-sama5d2-piobu.c
index d770a6f3d846..5005688f6e67 100644
--- a/drivers/gpio/gpio-sama5d2-piobu.c
+++ b/drivers/gpio/gpio-sama5d2-piobu.c
@@ -169,15 +169,15 @@ static int sama5d2_piobu_get(struct gpio_chip *chip, unsigned int pin)
/*
* sama5d2_piobu_set() - gpiochip set
*/
-static void sama5d2_piobu_set(struct gpio_chip *chip, unsigned int pin,
- int value)
+static int sama5d2_piobu_set(struct gpio_chip *chip, unsigned int pin,
+ int value)
{
if (!value)
value = PIOBU_LOW;
else
value = PIOBU_HIGH;
- sama5d2_piobu_write_value(chip, pin, PIOBU_SOD, value);
+ return sama5d2_piobu_write_value(chip, pin, PIOBU_SOD, value);
}
static int sama5d2_piobu_probe(struct platform_device *pdev)
diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c
index ff0341b1222f..966d16a6d515 100644
--- a/drivers/gpio/gpio-sch.c
+++ b/drivers/gpio/gpio-sch.c
@@ -117,7 +117,7 @@ static int sch_gpio_get(struct gpio_chip *gc, unsigned int gpio_num)
return sch_gpio_reg_get(sch, gpio_num, GLV);
}
-static void sch_gpio_set(struct gpio_chip *gc, unsigned int gpio_num, int val)
+static int sch_gpio_set(struct gpio_chip *gc, unsigned int gpio_num, int val)
{
struct sch_gpio *sch = gpiochip_get_data(gc);
unsigned long flags;
@@ -125,6 +125,8 @@ static void sch_gpio_set(struct gpio_chip *gc, unsigned int gpio_num, int val)
spin_lock_irqsave(&sch->lock, flags);
sch_gpio_reg_set(sch, gpio_num, GLV, val);
spin_unlock_irqrestore(&sch->lock, flags);
+
+ return 0;
}
static int sch_gpio_direction_out(struct gpio_chip *gc, unsigned int gpio_num,
@@ -146,8 +148,7 @@ static int sch_gpio_direction_out(struct gpio_chip *gc, unsigned int gpio_num,
* But we cannot prevent a short low pulse if direction is set to high
* and an external pull-up is connected.
*/
- sch_gpio_set(gc, gpio_num, val);
- return 0;
+ return sch_gpio_set(gc, gpio_num, val);
}
static int sch_gpio_get_direction(struct gpio_chip *gc, unsigned int gpio_num)
diff --git a/drivers/gpio/gpio-sch311x.c b/drivers/gpio/gpio-sch311x.c
index ba4fccf3cc94..f95566998d30 100644
--- a/drivers/gpio/gpio-sch311x.c
+++ b/drivers/gpio/gpio-sch311x.c
@@ -178,14 +178,16 @@ static void __sch311x_gpio_set(struct sch311x_gpio_block *block,
outb(data, block->runtime_reg + block->data_reg);
}
-static void sch311x_gpio_set(struct gpio_chip *chip, unsigned offset,
- int value)
+static int sch311x_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct sch311x_gpio_block *block = gpiochip_get_data(chip);
spin_lock(&block->lock);
__sch311x_gpio_set(block, offset, value);
spin_unlock(&block->lock);
+
+ return 0;
}
static int sch311x_gpio_direction_in(struct gpio_chip *chip, unsigned offset)
diff --git a/drivers/gpio/gpio-sifive.c b/drivers/gpio/gpio-sifive.c
index 067c8edb62e2..94ef2efbd14f 100644
--- a/drivers/gpio/gpio-sifive.c
+++ b/drivers/gpio/gpio-sifive.c
@@ -7,6 +7,7 @@
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/property.h>
@@ -32,7 +33,7 @@
struct sifive_gpio {
void __iomem *base;
- struct gpio_chip gc;
+ struct gpio_generic_chip gen_gc;
struct regmap *regs;
unsigned long irq_state;
unsigned int trigger[SIFIVE_GPIO_MAX];
@@ -41,10 +42,10 @@ struct sifive_gpio {
static void sifive_gpio_set_ie(struct sifive_gpio *chip, unsigned int offset)
{
- unsigned long flags;
unsigned int trigger;
- raw_spin_lock_irqsave(&chip->gc.bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(&chip->gen_gc);
+
trigger = (chip->irq_state & BIT(offset)) ? chip->trigger[offset] : 0;
regmap_update_bits(chip->regs, SIFIVE_GPIO_RISE_IE, BIT(offset),
(trigger & IRQ_TYPE_EDGE_RISING) ? BIT(offset) : 0);
@@ -54,7 +55,6 @@ static void sifive_gpio_set_ie(struct sifive_gpio *chip, unsigned int offset)
(trigger & IRQ_TYPE_LEVEL_HIGH) ? BIT(offset) : 0);
regmap_update_bits(chip->regs, SIFIVE_GPIO_LOW_IE, BIT(offset),
(trigger & IRQ_TYPE_LEVEL_LOW) ? BIT(offset) : 0);
- raw_spin_unlock_irqrestore(&chip->gc.bgpio_lock, flags);
}
static int sifive_gpio_irq_set_type(struct irq_data *d, unsigned int trigger)
@@ -72,13 +72,12 @@ static int sifive_gpio_irq_set_type(struct irq_data *d, unsigned int trigger)
}
static void sifive_gpio_irq_enable(struct irq_data *d)
-{
+ {
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct sifive_gpio *chip = gpiochip_get_data(gc);
irq_hw_number_t hwirq = irqd_to_hwirq(d);
int offset = hwirq % SIFIVE_GPIO_MAX;
u32 bit = BIT(offset);
- unsigned long flags;
gpiochip_enable_irq(gc, hwirq);
irq_chip_enable_parent(d);
@@ -86,13 +85,13 @@ static void sifive_gpio_irq_enable(struct irq_data *d)
/* Switch to input */
gc->direction_input(gc, offset);
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
- /* Clear any sticky pending interrupts */
- regmap_write(chip->regs, SIFIVE_GPIO_RISE_IP, bit);
- regmap_write(chip->regs, SIFIVE_GPIO_FALL_IP, bit);
- regmap_write(chip->regs, SIFIVE_GPIO_HIGH_IP, bit);
- regmap_write(chip->regs, SIFIVE_GPIO_LOW_IP, bit);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+ scoped_guard(gpio_generic_lock_irqsave, &chip->gen_gc) {
+ /* Clear any sticky pending interrupts */
+ regmap_write(chip->regs, SIFIVE_GPIO_RISE_IP, bit);
+ regmap_write(chip->regs, SIFIVE_GPIO_FALL_IP, bit);
+ regmap_write(chip->regs, SIFIVE_GPIO_HIGH_IP, bit);
+ regmap_write(chip->regs, SIFIVE_GPIO_LOW_IP, bit);
+ }
/* Enable interrupts */
assign_bit(offset, &chip->irq_state, 1);
@@ -118,15 +117,14 @@ static void sifive_gpio_irq_eoi(struct irq_data *d)
struct sifive_gpio *chip = gpiochip_get_data(gc);
int offset = irqd_to_hwirq(d) % SIFIVE_GPIO_MAX;
u32 bit = BIT(offset);
- unsigned long flags;
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
- /* Clear all pending interrupts */
- regmap_write(chip->regs, SIFIVE_GPIO_RISE_IP, bit);
- regmap_write(chip->regs, SIFIVE_GPIO_FALL_IP, bit);
- regmap_write(chip->regs, SIFIVE_GPIO_HIGH_IP, bit);
- regmap_write(chip->regs, SIFIVE_GPIO_LOW_IP, bit);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+ scoped_guard(gpio_generic_lock_irqsave, &chip->gen_gc) {
+ /* Clear all pending interrupts */
+ regmap_write(chip->regs, SIFIVE_GPIO_RISE_IP, bit);
+ regmap_write(chip->regs, SIFIVE_GPIO_FALL_IP, bit);
+ regmap_write(chip->regs, SIFIVE_GPIO_HIGH_IP, bit);
+ regmap_write(chip->regs, SIFIVE_GPIO_LOW_IP, bit);
+ }
irq_chip_eoi_parent(d);
}
@@ -174,12 +172,12 @@ static const struct regmap_config sifive_gpio_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
- .fast_io = true,
.disable_locking = true,
};
static int sifive_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct device *dev = &pdev->dev;
struct irq_domain *parent;
struct gpio_irq_chip *girq;
@@ -218,13 +216,17 @@ static int sifive_gpio_probe(struct platform_device *pdev)
*/
parent = irq_get_irq_data(chip->irq_number[0])->domain;
- ret = bgpio_init(&chip->gc, dev, 4,
- chip->base + SIFIVE_GPIO_INPUT_VAL,
- chip->base + SIFIVE_GPIO_OUTPUT_VAL,
- NULL,
- chip->base + SIFIVE_GPIO_OUTPUT_EN,
- chip->base + SIFIVE_GPIO_INPUT_EN,
- BGPIOF_READ_OUTPUT_REG_SET);
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = chip->base + SIFIVE_GPIO_INPUT_VAL,
+ .set = chip->base + SIFIVE_GPIO_OUTPUT_VAL,
+ .dirout = chip->base + SIFIVE_GPIO_OUTPUT_EN,
+ .dirin = chip->base + SIFIVE_GPIO_INPUT_EN,
+ .flags = GPIO_GENERIC_READ_OUTPUT_REG_SET,
+ };
+
+ ret = gpio_generic_chip_init(&chip->gen_gc, &config);
if (ret) {
dev_err(dev, "unable to init generic GPIO\n");
return ret;
@@ -237,12 +239,12 @@ static int sifive_gpio_probe(struct platform_device *pdev)
regmap_write(chip->regs, SIFIVE_GPIO_LOW_IE, 0);
chip->irq_state = 0;
- chip->gc.base = -1;
- chip->gc.ngpio = ngpio;
- chip->gc.label = dev_name(dev);
- chip->gc.parent = dev;
- chip->gc.owner = THIS_MODULE;
- girq = &chip->gc.irq;
+ chip->gen_gc.gc.base = -1;
+ chip->gen_gc.gc.ngpio = ngpio;
+ chip->gen_gc.gc.label = dev_name(dev);
+ chip->gen_gc.gc.parent = dev;
+ chip->gen_gc.gc.owner = THIS_MODULE;
+ girq = &chip->gen_gc.gc.irq;
gpio_irq_chip_set_chip(girq, &sifive_gpio_irqchip);
girq->fwnode = dev_fwnode(dev);
girq->parent_domain = parent;
@@ -250,7 +252,7 @@ static int sifive_gpio_probe(struct platform_device *pdev)
girq->handler = handle_bad_irq;
girq->default_type = IRQ_TYPE_NONE;
- return gpiochip_add_data(&chip->gc, chip);
+ return gpiochip_add_data(&chip->gen_gc.gc, chip);
}
static const struct of_device_id sifive_gpio_match[] = {
diff --git a/drivers/gpio/gpio-sim.c b/drivers/gpio/gpio-sim.c
index dcca1d7f173e..a83f5238427c 100644
--- a/drivers/gpio/gpio-sim.c
+++ b/drivers/gpio/gpio-sim.c
@@ -10,7 +10,6 @@
#include <linux/array_size.h>
#include <linux/bitmap.h>
#include <linux/cleanup.h>
-#include <linux/completion.h>
#include <linux/configfs.h>
#include <linux/device.h>
#include <linux/err.h>
@@ -37,8 +36,10 @@
#include <linux/sysfs.h>
#include <linux/types.h>
+#include "dev-sync-probe.h"
+
#define GPIO_SIM_NGPIO_MAX 1024
-#define GPIO_SIM_PROP_MAX 4 /* Max 3 properties + sentinel. */
+#define GPIO_SIM_PROP_MAX 5 /* Max 4 properties + sentinel. */
#define GPIO_SIM_NUM_ATTRS 3 /* value, pull and sentinel */
static DEFINE_IDA(gpio_sim_ida);
@@ -119,12 +120,14 @@ static int gpio_sim_get(struct gpio_chip *gc, unsigned int offset)
return !!test_bit(offset, chip->value_map);
}
-static void gpio_sim_set(struct gpio_chip *gc, unsigned int offset, int value)
+static int gpio_sim_set(struct gpio_chip *gc, unsigned int offset, int value)
{
struct gpio_sim_chip *chip = gpiochip_get_data(gc);
scoped_guard(mutex, &chip->lock)
__assign_bit(offset, chip->value_map, value);
+
+ return 0;
}
static int gpio_sim_get_multiple(struct gpio_chip *gc,
@@ -138,14 +141,16 @@ static int gpio_sim_get_multiple(struct gpio_chip *gc,
return 0;
}
-static void gpio_sim_set_multiple(struct gpio_chip *gc,
- unsigned long *mask, unsigned long *bits)
+static int gpio_sim_set_multiple(struct gpio_chip *gc,
+ unsigned long *mask, unsigned long *bits)
{
struct gpio_sim_chip *chip = gpiochip_get_data(gc);
scoped_guard(mutex, &chip->lock)
bitmap_replace(chip->value_map, chip->value_map, bits, mask,
gc->ngpio);
+
+ return 0;
}
static int gpio_sim_direction_output(struct gpio_chip *gc,
@@ -257,8 +262,7 @@ static void gpio_sim_dbg_show(struct seq_file *seq, struct gpio_chip *gc)
guard(mutex)(&chip->lock);
for_each_hwgpio(gc, i, label)
- seq_printf(seq, " gpio-%-3d (%s) %s,%s\n",
- gc->base + i,
+ seq_printf(seq, " gpio-%-3d (%s) %s,%s\n", i,
label ?: "<unused>",
test_bit(i, chip->direction_map) ? "input" :
test_bit(i, chip->value_map) ? "output-high" :
@@ -413,11 +417,6 @@ static int gpio_sim_setup_sysfs(struct gpio_sim_chip *chip)
return devm_add_action_or_reset(dev, gpio_sim_sysfs_remove, chip);
}
-static int gpio_sim_dev_match_fwnode(struct device *dev, void *data)
-{
- return device_match_fwnode(dev, data);
-}
-
static int gpio_sim_add_bank(struct fwnode_handle *swnode, struct device *dev)
{
struct gpio_sim_chip *chip;
@@ -503,7 +502,7 @@ static int gpio_sim_add_bank(struct fwnode_handle *swnode, struct device *dev)
if (ret)
return ret;
- chip->dev = device_find_child(dev, swnode, gpio_sim_dev_match_fwnode);
+ chip->dev = device_find_child(dev, swnode, device_match_fwnode);
if (!chip->dev)
return -ENODEV;
@@ -520,15 +519,12 @@ static int gpio_sim_add_bank(struct fwnode_handle *swnode, struct device *dev)
static int gpio_sim_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct fwnode_handle *swnode;
int ret;
- device_for_each_child_node(dev, swnode) {
+ device_for_each_child_node_scoped(dev, swnode) {
ret = gpio_sim_add_bank(swnode, dev);
- if (ret) {
- fwnode_handle_put(swnode);
+ if (ret)
return ret;
- }
}
return 0;
@@ -549,14 +545,9 @@ static struct platform_driver gpio_sim_driver = {
};
struct gpio_sim_device {
+ struct dev_sync_probe_data probe_data;
struct config_group group;
- /*
- * If pdev is NULL, the device is 'pending' (waiting for configuration).
- * Once the pointer is assigned, the device has been created and the
- * item is 'live'.
- */
- struct platform_device *pdev;
int id;
/*
@@ -570,46 +561,11 @@ struct gpio_sim_device {
*/
struct mutex lock;
- /*
- * This is used to synchronously wait for the driver's probe to complete
- * and notify the user-space about any errors.
- */
- struct notifier_block bus_notifier;
- struct completion probe_completion;
- bool driver_bound;
-
struct gpiod_hog *hogs;
struct list_head bank_list;
};
-/* This is called with dev->lock already taken. */
-static int gpio_sim_bus_notifier_call(struct notifier_block *nb,
- unsigned long action, void *data)
-{
- struct gpio_sim_device *simdev = container_of(nb,
- struct gpio_sim_device,
- bus_notifier);
- struct device *dev = data;
- char devname[32];
-
- snprintf(devname, sizeof(devname), "gpio-sim.%u", simdev->id);
-
- if (!device_match_name(dev, devname))
- return NOTIFY_DONE;
-
- if (action == BUS_NOTIFY_BOUND_DRIVER)
- simdev->driver_bound = true;
- else if (action == BUS_NOTIFY_DRIVER_NOT_BOUND)
- simdev->driver_bound = false;
- else
- return NOTIFY_DONE;
-
- complete(&simdev->probe_completion);
-
- return NOTIFY_OK;
-}
-
static struct gpio_sim_device *to_gpio_sim_device(struct config_item *item)
{
struct config_group *group = to_config_group(item);
@@ -672,6 +628,7 @@ struct gpio_sim_line {
unsigned int offset;
char *name;
+ bool valid;
/* There can only be one hog per line. */
struct gpio_sim_hog *hog;
@@ -716,7 +673,7 @@ static bool gpio_sim_device_is_live(struct gpio_sim_device *dev)
{
lockdep_assert_held(&dev->lock);
- return !!dev->pdev;
+ return !!dev->probe_data.pdev;
}
static char *gpio_sim_strdup_trimmed(const char *str, size_t count)
@@ -738,7 +695,7 @@ static ssize_t gpio_sim_device_config_dev_name_show(struct config_item *item,
guard(mutex)(&dev->lock);
- pdev = dev->pdev;
+ pdev = dev->probe_data.pdev;
if (pdev)
return sprintf(page, "%s\n", dev_name(&pdev->dev));
@@ -787,6 +744,36 @@ gpio_sim_set_line_names(struct gpio_sim_bank *bank, char **line_names)
}
}
+static unsigned int gpio_sim_get_reserved_ranges_size(struct gpio_sim_bank *bank)
+{
+ struct gpio_sim_line *line;
+ unsigned int size = 0;
+
+ list_for_each_entry(line, &bank->line_list, siblings) {
+ if (line->valid)
+ continue;
+
+ size += 2;
+ }
+
+ return size;
+}
+
+static void gpio_sim_set_reserved_ranges(struct gpio_sim_bank *bank,
+ u32 *ranges)
+{
+ struct gpio_sim_line *line;
+ int i = 0;
+
+ list_for_each_entry(line, &bank->line_list, siblings) {
+ if (line->valid)
+ continue;
+
+ ranges[i++] = line->offset;
+ ranges[i++] = 1;
+ }
+}
+
static void gpio_sim_remove_hogs(struct gpio_sim_device *dev)
{
struct gpiod_hog *hog;
@@ -887,9 +874,10 @@ static struct fwnode_handle *
gpio_sim_make_bank_swnode(struct gpio_sim_bank *bank,
struct fwnode_handle *parent)
{
+ unsigned int prop_idx = 0, line_names_size, ranges_size;
struct property_entry properties[GPIO_SIM_PROP_MAX];
- unsigned int prop_idx = 0, line_names_size;
char **line_names __free(kfree) = NULL;
+ u32 *ranges __free(kfree) = NULL;
memset(properties, 0, sizeof(properties));
@@ -913,6 +901,19 @@ gpio_sim_make_bank_swnode(struct gpio_sim_bank *bank,
line_names, line_names_size);
}
+ ranges_size = gpio_sim_get_reserved_ranges_size(bank);
+ if (ranges_size) {
+ ranges = kcalloc(ranges_size, sizeof(u32), GFP_KERNEL);
+ if (!ranges)
+ return ERR_PTR(-ENOMEM);
+
+ gpio_sim_set_reserved_ranges(bank, ranges);
+
+ properties[prop_idx++] = PROPERTY_ENTRY_U32_ARRAY_LEN(
+ "gpio-reserved-ranges",
+ ranges, ranges_size);
+ }
+
return fwnode_create_software_node(properties, parent);
}
@@ -947,7 +948,6 @@ static int gpio_sim_device_activate(struct gpio_sim_device *dev)
{
struct platform_device_info pdevinfo;
struct fwnode_handle *swnode;
- struct platform_device *pdev;
struct gpio_sim_bank *bank;
int ret;
@@ -989,31 +989,13 @@ static int gpio_sim_device_activate(struct gpio_sim_device *dev)
pdevinfo.fwnode = swnode;
pdevinfo.id = dev->id;
- reinit_completion(&dev->probe_completion);
- dev->driver_bound = false;
- bus_register_notifier(&platform_bus_type, &dev->bus_notifier);
-
- pdev = platform_device_register_full(&pdevinfo);
- if (IS_ERR(pdev)) {
- bus_unregister_notifier(&platform_bus_type, &dev->bus_notifier);
- gpio_sim_remove_hogs(dev);
- gpio_sim_remove_swnode_recursive(swnode);
- return PTR_ERR(pdev);
- }
-
- wait_for_completion(&dev->probe_completion);
- bus_unregister_notifier(&platform_bus_type, &dev->bus_notifier);
-
- if (!dev->driver_bound) {
- /* Probe failed, check kernel log. */
- platform_device_unregister(pdev);
+ ret = dev_sync_probe_register(&dev->probe_data, &pdevinfo);
+ if (ret) {
gpio_sim_remove_hogs(dev);
gpio_sim_remove_swnode_recursive(swnode);
- return -ENXIO;
+ return ret;
}
- dev->pdev = pdev;
-
return 0;
}
@@ -1023,11 +1005,37 @@ static void gpio_sim_device_deactivate(struct gpio_sim_device *dev)
lockdep_assert_held(&dev->lock);
- swnode = dev_fwnode(&dev->pdev->dev);
- platform_device_unregister(dev->pdev);
+ swnode = dev_fwnode(&dev->probe_data.pdev->dev);
+ dev_sync_probe_unregister(&dev->probe_data);
gpio_sim_remove_hogs(dev);
gpio_sim_remove_swnode_recursive(swnode);
- dev->pdev = NULL;
+}
+
+static void
+gpio_sim_device_lockup_configfs(struct gpio_sim_device *dev, bool lock)
+{
+ struct configfs_subsystem *subsys = dev->group.cg_subsys;
+ struct gpio_sim_bank *bank;
+ struct gpio_sim_line *line;
+ struct config_item *item;
+
+ /*
+ * The device only needs to depend on leaf entries. This is
+ * sufficient to lock up all the configfs entries that the
+ * instantiated, alive device depends on.
+ */
+ list_for_each_entry(bank, &dev->bank_list, siblings) {
+ list_for_each_entry(line, &bank->line_list, siblings) {
+ item = line->hog ? &line->hog->item
+ : &line->group.cg_item;
+
+ if (lock)
+ WARN_ON(configfs_depend_item_unlocked(subsys,
+ item));
+ else
+ configfs_undepend_item_unlocked(item);
+ }
+ }
}
static ssize_t
@@ -1042,14 +1050,24 @@ gpio_sim_device_config_live_store(struct config_item *item,
if (ret)
return ret;
- guard(mutex)(&dev->lock);
+ if (live)
+ gpio_sim_device_lockup_configfs(dev, true);
- if (live == gpio_sim_device_is_live(dev))
- ret = -EPERM;
- else if (live)
- ret = gpio_sim_device_activate(dev);
- else
- gpio_sim_device_deactivate(dev);
+ scoped_guard(mutex, &dev->lock) {
+ if (live == gpio_sim_device_is_live(dev))
+ ret = -EPERM;
+ else if (live)
+ ret = gpio_sim_device_activate(dev);
+ else
+ gpio_sim_device_deactivate(dev);
+ }
+
+ /*
+ * Undepend is required only if device disablement (live == 0)
+ * succeeds or if device enablement (live == 1) fails.
+ */
+ if (live == !!ret)
+ gpio_sim_device_lockup_configfs(dev, false);
return ret ?: count;
}
@@ -1091,7 +1109,7 @@ static ssize_t gpio_sim_bank_config_chip_name_show(struct config_item *item,
guard(mutex)(&dev->lock);
if (gpio_sim_device_is_live(dev))
- return device_for_each_child(&dev->pdev->dev, &ctx,
+ return device_for_each_child(&dev->probe_data.pdev->dev, &ctx,
gpio_sim_emit_chip_name);
return sprintf(page, "none\n");
@@ -1215,8 +1233,41 @@ static ssize_t gpio_sim_line_config_name_store(struct config_item *item,
CONFIGFS_ATTR(gpio_sim_line_config_, name);
+static ssize_t
+gpio_sim_line_config_valid_show(struct config_item *item, char *page)
+{
+ struct gpio_sim_line *line = to_gpio_sim_line(item);
+ struct gpio_sim_device *dev = gpio_sim_line_get_device(line);
+
+ guard(mutex)(&dev->lock);
+
+ return sprintf(page, "%c\n", line->valid ? '1' : '0');
+}
+
+static ssize_t gpio_sim_line_config_valid_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct gpio_sim_line *line = to_gpio_sim_line(item);
+ struct gpio_sim_device *dev = gpio_sim_line_get_device(line);
+ bool valid;
+ int ret;
+
+ ret = kstrtobool(page, &valid);
+ if (ret)
+ return ret;
+
+ guard(mutex)(&dev->lock);
+
+ line->valid = valid;
+
+ return count;
+}
+
+CONFIGFS_ATTR(gpio_sim_line_config_, valid);
+
static struct configfs_attribute *gpio_sim_line_config_attrs[] = {
&gpio_sim_line_config_attr_name,
+ &gpio_sim_line_config_attr_valid,
NULL
};
@@ -1425,6 +1476,7 @@ gpio_sim_bank_config_make_line_group(struct config_group *group,
line->parent = bank;
line->offset = offset;
+ line->valid = true;
list_add_tail(&line->siblings, &bank->line_list);
return &line->group;
@@ -1532,8 +1584,7 @@ gpio_sim_config_make_device_group(struct config_group *group, const char *name)
mutex_init(&dev->lock);
INIT_LIST_HEAD(&dev->bank_list);
- dev->bus_notifier.notifier_call = gpio_sim_bus_notifier_call;
- init_completion(&dev->probe_completion);
+ dev_sync_probe_init(&dev->probe_data);
return &no_free_ptr(dev)->group;
}
diff --git a/drivers/gpio/gpio-siox.c b/drivers/gpio/gpio-siox.c
index 051bc99bdfb2..958034b9f3f3 100644
--- a/drivers/gpio/gpio-siox.c
+++ b/drivers/gpio/gpio-siox.c
@@ -160,8 +160,8 @@ static int gpio_siox_get(struct gpio_chip *chip, unsigned int offset)
return ret;
}
-static void gpio_siox_set(struct gpio_chip *chip,
- unsigned int offset, int value)
+static int gpio_siox_set(struct gpio_chip *chip,
+ unsigned int offset, int value)
{
struct gpio_siox_ddata *ddata = gpiochip_get_data(chip);
u8 mask = 1 << (19 - offset);
@@ -174,6 +174,8 @@ static void gpio_siox_set(struct gpio_chip *chip,
ddata->setdata[0] &= ~mask;
mutex_unlock(&ddata->lock);
+
+ return 0;
}
static int gpio_siox_direction_input(struct gpio_chip *chip,
@@ -191,8 +193,7 @@ static int gpio_siox_direction_output(struct gpio_chip *chip,
if (offset < 12)
return -EINVAL;
- gpio_siox_set(chip, offset, value);
- return 0;
+ return gpio_siox_set(chip, offset, value);
}
static int gpio_siox_get_direction(struct gpio_chip *chip, unsigned int offset)
diff --git a/drivers/gpio/gpio-sloppy-logic-analyzer.c b/drivers/gpio/gpio-sloppy-logic-analyzer.c
index 07e0d7180579..969dddd3d6fa 100644
--- a/drivers/gpio/gpio-sloppy-logic-analyzer.c
+++ b/drivers/gpio/gpio-sloppy-logic-analyzer.c
@@ -234,7 +234,9 @@ static int gpio_la_poll_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- devm_mutex_init(dev, &priv->blob_lock);
+ ret = devm_mutex_init(dev, &priv->blob_lock);
+ if (ret)
+ return ret;
fops_buf_size_set(priv, GPIO_LA_DEFAULT_BUF_SIZE);
@@ -304,14 +306,14 @@ static void gpio_la_poll_remove(struct platform_device *pdev)
}
static const struct of_device_id gpio_la_poll_of_match[] = {
- { .compatible = GPIO_LA_NAME },
+ { .compatible = "gpio-sloppy-logic-analyzer" },
{ }
};
MODULE_DEVICE_TABLE(of, gpio_la_poll_of_match);
static struct platform_driver gpio_la_poll_device_driver = {
.probe = gpio_la_poll_probe,
- .remove_new = gpio_la_poll_remove,
+ .remove = gpio_la_poll_remove,
.driver = {
.name = GPIO_LA_NAME,
.of_match_table = gpio_la_poll_of_match,
diff --git a/drivers/gpio/gpio-sodaville.c b/drivers/gpio/gpio-sodaville.c
index c2a2c76c1652..37c133837729 100644
--- a/drivers/gpio/gpio-sodaville.c
+++ b/drivers/gpio/gpio-sodaville.c
@@ -9,6 +9,7 @@
#include <linux/errno.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -39,7 +40,7 @@ struct sdv_gpio_chip_data {
void __iomem *gpio_pub_base;
struct irq_domain *id;
struct irq_chip_generic *gc;
- struct gpio_chip chip;
+ struct gpio_generic_chip gen_gc;
};
static int sdv_gpio_pub_set_type(struct irq_data *d, unsigned int type)
@@ -169,8 +170,8 @@ static int sdv_register_irqsupport(struct sdv_gpio_chip_data *sd,
IRQ_GC_INIT_MASK_CACHE, IRQ_NOREQUEST,
IRQ_LEVEL | IRQ_NOPROBE);
- sd->id = irq_domain_add_legacy(pdev->dev.of_node, SDV_NUM_PUB_GPIOS,
- sd->irq_base, 0, &irq_domain_sdv_ops, sd);
+ sd->id = irq_domain_create_legacy(dev_fwnode(&pdev->dev), SDV_NUM_PUB_GPIOS, sd->irq_base,
+ 0, &irq_domain_sdv_ops, sd);
if (!sd->id)
return -ENODEV;
@@ -180,6 +181,7 @@ static int sdv_register_irqsupport(struct sdv_gpio_chip_data *sd,
static int sdv_gpio_probe(struct pci_dev *pdev,
const struct pci_device_id *pci_id)
{
+ struct gpio_generic_chip_config config;
struct sdv_gpio_chip_data *sd;
int ret;
u32 mux_val;
@@ -206,15 +208,21 @@ static int sdv_gpio_probe(struct pci_dev *pdev,
if (!ret)
writel(mux_val, sd->gpio_pub_base + GPMUXCTL);
- ret = bgpio_init(&sd->chip, &pdev->dev, 4,
- sd->gpio_pub_base + GPINR, sd->gpio_pub_base + GPOUTR,
- NULL, sd->gpio_pub_base + GPOER, NULL, 0);
+ config = (struct gpio_generic_chip_config) {
+ .dev = &pdev->dev,
+ .sz = 4,
+ .dat = sd->gpio_pub_base + GPINR,
+ .set = sd->gpio_pub_base + GPOUTR,
+ .dirout = sd->gpio_pub_base + GPOER,
+ };
+
+ ret = gpio_generic_chip_init(&sd->gen_gc, &config);
if (ret)
return ret;
- sd->chip.ngpio = SDV_NUM_PUB_GPIOS;
+ sd->gen_gc.gc.ngpio = SDV_NUM_PUB_GPIOS;
- ret = devm_gpiochip_add_data(&pdev->dev, &sd->chip, sd);
+ ret = devm_gpiochip_add_data(&pdev->dev, &sd->gen_gc.gc, sd);
if (ret < 0) {
dev_err(&pdev->dev, "gpiochip_add() failed.\n");
return ret;
diff --git a/drivers/gpio/gpio-spacemit-k1.c b/drivers/gpio/gpio-spacemit-k1.c
new file mode 100644
index 000000000000..eb66a15c002f
--- /dev/null
+++ b/drivers/gpio/gpio-spacemit-k1.c
@@ -0,0 +1,307 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright (C) 2023-2025 SpacemiT (Hangzhou) Technology Co. Ltd
+ * Copyright (C) 2025 Yixun Lan <dlan@gentoo.org>
+ */
+
+#include <linux/clk.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/seq_file.h>
+
+/* register offset */
+#define SPACEMIT_GPLR 0x00 /* port level - R */
+#define SPACEMIT_GPDR 0x0c /* port direction - R/W */
+#define SPACEMIT_GPSR 0x18 /* port set - W */
+#define SPACEMIT_GPCR 0x24 /* port clear - W */
+#define SPACEMIT_GRER 0x30 /* port rising edge R/W */
+#define SPACEMIT_GFER 0x3c /* port falling edge R/W */
+#define SPACEMIT_GEDR 0x48 /* edge detect status - R/W1C */
+#define SPACEMIT_GSDR 0x54 /* (set) direction - W */
+#define SPACEMIT_GCDR 0x60 /* (clear) direction - W */
+#define SPACEMIT_GSRER 0x6c /* (set) rising edge detect enable - W */
+#define SPACEMIT_GCRER 0x78 /* (clear) rising edge detect enable - W */
+#define SPACEMIT_GSFER 0x84 /* (set) falling edge detect enable - W */
+#define SPACEMIT_GCFER 0x90 /* (clear) falling edge detect enable - W */
+#define SPACEMIT_GAPMASK 0x9c /* interrupt mask , 0 disable, 1 enable - R/W */
+
+#define SPACEMIT_NR_BANKS 4
+#define SPACEMIT_NR_GPIOS_PER_BANK 32
+
+#define to_spacemit_gpio_bank(x) container_of((x), struct spacemit_gpio_bank, gc)
+
+struct spacemit_gpio;
+
+struct spacemit_gpio_bank {
+ struct gpio_generic_chip chip;
+ struct spacemit_gpio *sg;
+ void __iomem *base;
+ u32 irq_mask;
+ u32 irq_rising_edge;
+ u32 irq_falling_edge;
+};
+
+struct spacemit_gpio {
+ struct device *dev;
+ struct spacemit_gpio_bank sgb[SPACEMIT_NR_BANKS];
+};
+
+static u32 spacemit_gpio_bank_index(struct spacemit_gpio_bank *gb)
+{
+ return (u32)(gb - gb->sg->sgb);
+}
+
+static irqreturn_t spacemit_gpio_irq_handler(int irq, void *dev_id)
+{
+ struct spacemit_gpio_bank *gb = dev_id;
+ unsigned long pending;
+ u32 n, gedr;
+
+ gedr = readl(gb->base + SPACEMIT_GEDR);
+ if (!gedr)
+ return IRQ_NONE;
+ writel(gedr, gb->base + SPACEMIT_GEDR);
+
+ pending = gedr & gb->irq_mask;
+ if (!pending)
+ return IRQ_NONE;
+
+ for_each_set_bit(n, &pending, BITS_PER_LONG)
+ handle_nested_irq(irq_find_mapping(gb->chip.gc.irq.domain, n));
+
+ return IRQ_HANDLED;
+}
+
+static void spacemit_gpio_irq_ack(struct irq_data *d)
+{
+ struct spacemit_gpio_bank *gb = irq_data_get_irq_chip_data(d);
+
+ writel(BIT(irqd_to_hwirq(d)), gb->base + SPACEMIT_GEDR);
+}
+
+static void spacemit_gpio_irq_mask(struct irq_data *d)
+{
+ struct spacemit_gpio_bank *gb = irq_data_get_irq_chip_data(d);
+ u32 bit = BIT(irqd_to_hwirq(d));
+
+ gb->irq_mask &= ~bit;
+ writel(gb->irq_mask, gb->base + SPACEMIT_GAPMASK);
+
+ if (bit & gb->irq_rising_edge)
+ writel(bit, gb->base + SPACEMIT_GCRER);
+
+ if (bit & gb->irq_falling_edge)
+ writel(bit, gb->base + SPACEMIT_GCFER);
+}
+
+static void spacemit_gpio_irq_unmask(struct irq_data *d)
+{
+ struct spacemit_gpio_bank *gb = irq_data_get_irq_chip_data(d);
+ u32 bit = BIT(irqd_to_hwirq(d));
+
+ gb->irq_mask |= bit;
+
+ if (bit & gb->irq_rising_edge)
+ writel(bit, gb->base + SPACEMIT_GSRER);
+
+ if (bit & gb->irq_falling_edge)
+ writel(bit, gb->base + SPACEMIT_GSFER);
+
+ writel(gb->irq_mask, gb->base + SPACEMIT_GAPMASK);
+}
+
+static int spacemit_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct spacemit_gpio_bank *gb = irq_data_get_irq_chip_data(d);
+ u32 bit = BIT(irqd_to_hwirq(d));
+
+ if (type & IRQ_TYPE_EDGE_RISING) {
+ gb->irq_rising_edge |= bit;
+ writel(bit, gb->base + SPACEMIT_GSRER);
+ } else {
+ gb->irq_rising_edge &= ~bit;
+ writel(bit, gb->base + SPACEMIT_GCRER);
+ }
+
+ if (type & IRQ_TYPE_EDGE_FALLING) {
+ gb->irq_falling_edge |= bit;
+ writel(bit, gb->base + SPACEMIT_GSFER);
+ } else {
+ gb->irq_falling_edge &= ~bit;
+ writel(bit, gb->base + SPACEMIT_GCFER);
+ }
+
+ return 0;
+}
+
+static void spacemit_gpio_irq_print_chip(struct irq_data *data, struct seq_file *p)
+{
+ struct spacemit_gpio_bank *gb = irq_data_get_irq_chip_data(data);
+
+ seq_printf(p, "%s-%d", dev_name(gb->chip.gc.parent), spacemit_gpio_bank_index(gb));
+}
+
+static struct irq_chip spacemit_gpio_chip = {
+ .name = "k1-gpio-irqchip",
+ .irq_ack = spacemit_gpio_irq_ack,
+ .irq_mask = spacemit_gpio_irq_mask,
+ .irq_unmask = spacemit_gpio_irq_unmask,
+ .irq_set_type = spacemit_gpio_irq_set_type,
+ .irq_print_chip = spacemit_gpio_irq_print_chip,
+ .flags = IRQCHIP_IMMUTABLE | IRQCHIP_SKIP_SET_WAKE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
+static bool spacemit_of_node_instance_match(struct gpio_chip *gc, unsigned int i)
+{
+ struct spacemit_gpio_bank *gb = gpiochip_get_data(gc);
+ struct spacemit_gpio *sg = gb->sg;
+
+ if (i >= SPACEMIT_NR_BANKS)
+ return false;
+
+ return (gc == &sg->sgb[i].chip.gc);
+}
+
+static int spacemit_gpio_add_bank(struct spacemit_gpio *sg,
+ void __iomem *regs,
+ int index, int irq)
+{
+ struct spacemit_gpio_bank *gb = &sg->sgb[index];
+ struct gpio_generic_chip_config config;
+ struct gpio_chip *gc = &gb->chip.gc;
+ struct device *dev = sg->dev;
+ struct gpio_irq_chip *girq;
+ void __iomem *dat, *set, *clr, *dirin, *dirout;
+ int ret, bank_base[] = { 0x0, 0x4, 0x8, 0x100 };
+
+ gb->base = regs + bank_base[index];
+
+ dat = gb->base + SPACEMIT_GPLR;
+ set = gb->base + SPACEMIT_GPSR;
+ clr = gb->base + SPACEMIT_GPCR;
+ dirin = gb->base + SPACEMIT_GCDR;
+ dirout = gb->base + SPACEMIT_GSDR;
+
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = dat,
+ .set = set,
+ .clr = clr,
+ .dirout = dirout,
+ .dirin = dirin,
+ .flags = GPIO_GENERIC_UNREADABLE_REG_SET |
+ GPIO_GENERIC_UNREADABLE_REG_DIR,
+ };
+
+ /* This registers 32 GPIO lines per bank */
+ ret = gpio_generic_chip_init(&gb->chip, &config);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to init gpio chip\n");
+
+ gb->sg = sg;
+
+ gc->label = dev_name(dev);
+ gc->request = gpiochip_generic_request;
+ gc->free = gpiochip_generic_free;
+ gc->ngpio = SPACEMIT_NR_GPIOS_PER_BANK;
+ gc->base = -1;
+ gc->of_gpio_n_cells = 3;
+ gc->of_node_instance_match = spacemit_of_node_instance_match;
+
+ girq = &gc->irq;
+ girq->threaded = true;
+ girq->handler = handle_simple_irq;
+
+ gpio_irq_chip_set_chip(girq, &spacemit_gpio_chip);
+
+ /* Disable Interrupt */
+ writel(0, gb->base + SPACEMIT_GAPMASK);
+ /* Disable Edge Detection Settings */
+ writel(0x0, gb->base + SPACEMIT_GRER);
+ writel(0x0, gb->base + SPACEMIT_GFER);
+ /* Clear Interrupt */
+ writel(0xffffffff, gb->base + SPACEMIT_GCRER);
+ writel(0xffffffff, gb->base + SPACEMIT_GCFER);
+
+ ret = devm_request_threaded_irq(dev, irq, NULL,
+ spacemit_gpio_irq_handler,
+ IRQF_ONESHOT | IRQF_SHARED,
+ gb->chip.gc.label, gb);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to register IRQ\n");
+
+ ret = devm_gpiochip_add_data(dev, gc, gb);
+ if (ret)
+ return ret;
+
+ /* Distuingish IRQ domain, for selecting threecells mode */
+ irq_domain_update_bus_token(girq->domain, DOMAIN_BUS_WIRED);
+
+ return 0;
+}
+
+static int spacemit_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct spacemit_gpio *sg;
+ struct clk *core_clk, *bus_clk;
+ void __iomem *regs;
+ int i, irq, ret;
+
+ sg = devm_kzalloc(dev, sizeof(*sg), GFP_KERNEL);
+ if (!sg)
+ return -ENOMEM;
+
+ regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ sg->dev = dev;
+
+ core_clk = devm_clk_get_enabled(dev, "core");
+ if (IS_ERR(core_clk))
+ return dev_err_probe(dev, PTR_ERR(core_clk), "failed to get clock\n");
+
+ bus_clk = devm_clk_get_enabled(dev, "bus");
+ if (IS_ERR(bus_clk))
+ return dev_err_probe(dev, PTR_ERR(bus_clk), "failed to get bus clock\n");
+
+ for (i = 0; i < SPACEMIT_NR_BANKS; i++) {
+ ret = spacemit_gpio_add_bank(sg, regs, i, irq);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id spacemit_gpio_dt_ids[] = {
+ { .compatible = "spacemit,k1-gpio" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, spacemit_gpio_dt_ids);
+
+static struct platform_driver spacemit_gpio_driver = {
+ .probe = spacemit_gpio_probe,
+ .driver = {
+ .name = "k1-gpio",
+ .of_match_table = spacemit_gpio_dt_ids,
+ },
+};
+module_platform_driver(spacemit_gpio_driver);
+
+MODULE_AUTHOR("Yixun Lan <dlan@gentoo.org>");
+MODULE_DESCRIPTION("GPIO driver for SpacemiT K1 SoC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-spear-spics.c b/drivers/gpio/gpio-spear-spics.c
index 51539185400d..96a0e1211500 100644
--- a/drivers/gpio/gpio-spear-spics.c
+++ b/drivers/gpio/gpio-spear-spics.c
@@ -51,13 +51,8 @@ struct spear_spics {
struct gpio_chip chip;
};
-/* gpio framework specific routines */
-static int spics_get_value(struct gpio_chip *chip, unsigned offset)
-{
- return -ENXIO;
-}
-
-static void spics_set_value(struct gpio_chip *chip, unsigned offset, int value)
+static int spics_set_value(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct spear_spics *spics = gpiochip_get_data(chip);
u32 tmp;
@@ -74,18 +69,14 @@ static void spics_set_value(struct gpio_chip *chip, unsigned offset, int value)
tmp &= ~(0x1 << spics->cs_value_bit);
tmp |= value << spics->cs_value_bit;
writel_relaxed(tmp, spics->base + spics->perip_cfg);
-}
-static int spics_direction_input(struct gpio_chip *chip, unsigned offset)
-{
- return -ENXIO;
+ return 0;
}
static int spics_direction_output(struct gpio_chip *chip, unsigned offset,
int value)
{
- spics_set_value(chip, offset, value);
- return 0;
+ return spics_set_value(chip, offset, value);
}
static int spics_request(struct gpio_chip *chip, unsigned offset)
@@ -148,9 +139,7 @@ static int spics_gpio_probe(struct platform_device *pdev)
spics->chip.base = -1;
spics->chip.request = spics_request;
spics->chip.free = spics_free;
- spics->chip.direction_input = spics_direction_input;
spics->chip.direction_output = spics_direction_output;
- spics->chip.get = spics_get_value;
spics->chip.set = spics_set_value;
spics->chip.label = dev_name(&pdev->dev);
spics->chip.parent = &pdev->dev;
diff --git a/drivers/gpio/gpio-sprd.c b/drivers/gpio/gpio-sprd.c
index c117c11bfb29..413bcd0a4240 100644
--- a/drivers/gpio/gpio-sprd.c
+++ b/drivers/gpio/gpio-sprd.c
@@ -108,10 +108,12 @@ static int sprd_gpio_get(struct gpio_chip *chip, unsigned int offset)
return sprd_gpio_read(chip, offset, SPRD_GPIO_DATA);
}
-static void sprd_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
+static int sprd_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
sprd_gpio_update(chip, offset, SPRD_GPIO_DATA, value);
+
+ return 0;
}
static void sprd_gpio_irq_mask(struct irq_data *data)
diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c
index 75a3633ceddb..6faf30347a36 100644
--- a/drivers/gpio/gpio-stmpe.c
+++ b/drivers/gpio/gpio-stmpe.c
@@ -15,6 +15,7 @@
#include <linux/platform_device.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
/*
* These registers are modified under the irq bus lock and cached to avoid
@@ -53,7 +54,7 @@ static int stmpe_gpio_get(struct gpio_chip *chip, unsigned offset)
return !!(ret & mask);
}
-static void stmpe_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
+static int stmpe_gpio_set(struct gpio_chip *chip, unsigned int offset, int val)
{
struct stmpe_gpio *stmpe_gpio = gpiochip_get_data(chip);
struct stmpe *stmpe = stmpe_gpio->stmpe;
@@ -66,9 +67,9 @@ static void stmpe_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
* For them we need to write 0 to clear and 1 to set.
*/
if (stmpe->regs[STMPE_IDX_GPSR_LSB] == stmpe->regs[STMPE_IDX_GPCR_LSB])
- stmpe_set_bits(stmpe, reg, mask, val ? mask : 0);
- else
- stmpe_reg_write(stmpe, reg, mask);
+ return stmpe_set_bits(stmpe, reg, mask, val ? mask : 0);
+
+ return stmpe_reg_write(stmpe, reg, mask);
}
static int stmpe_gpio_get_direction(struct gpio_chip *chip,
@@ -97,8 +98,11 @@ static int stmpe_gpio_direction_output(struct gpio_chip *chip,
struct stmpe *stmpe = stmpe_gpio->stmpe;
u8 reg = stmpe->regs[STMPE_IDX_GPDR_LSB + (offset / 8)];
u8 mask = BIT(offset % 8);
+ int ret;
- stmpe_gpio_set(chip, offset, val);
+ ret = stmpe_gpio_set(chip, offset, val);
+ if (ret)
+ return ret;
return stmpe_set_bits(stmpe, reg, mask, mask);
}
@@ -191,7 +195,7 @@ static void stmpe_gpio_irq_sync_unlock(struct irq_data *d)
[REG_IE][CSB] = STMPE_IDX_IEGPIOR_CSB,
[REG_IE][MSB] = STMPE_IDX_IEGPIOR_MSB,
};
- int i, j;
+ int ret, i, j;
/*
* STMPE1600: to be able to get IRQ from pins,
@@ -199,8 +203,16 @@ static void stmpe_gpio_irq_sync_unlock(struct irq_data *d)
* GPSR or GPCR registers
*/
if (stmpe->partnum == STMPE1600) {
- stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_GPMR_LSB]);
- stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_GPMR_CSB]);
+ ret = stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_GPMR_LSB]);
+ if (ret < 0) {
+ dev_err(stmpe->dev, "Failed to read GPMR_LSB: %d\n", ret);
+ goto err;
+ }
+ ret = stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_GPMR_CSB]);
+ if (ret < 0) {
+ dev_err(stmpe->dev, "Failed to read GPMR_CSB: %d\n", ret);
+ goto err;
+ }
}
for (i = 0; i < CACHE_NR_REGS; i++) {
@@ -222,6 +234,7 @@ static void stmpe_gpio_irq_sync_unlock(struct irq_data *d)
}
}
+err:
mutex_unlock(&stmpe_gpio->irq_lock);
}
@@ -249,9 +262,8 @@ static void stmpe_gpio_irq_unmask(struct irq_data *d)
stmpe_gpio->regs[REG_IE][regoffset] |= mask;
}
-static void stmpe_dbg_show_one(struct seq_file *s,
- struct gpio_chip *gc,
- unsigned offset, unsigned gpio)
+static void stmpe_dbg_show_one(struct seq_file *s, struct gpio_chip *gc,
+ unsigned int offset)
{
struct stmpe_gpio *stmpe_gpio = gpiochip_get_data(gc);
struct stmpe *stmpe = stmpe_gpio->stmpe;
@@ -273,8 +285,7 @@ static void stmpe_dbg_show_one(struct seq_file *s,
if (dir) {
seq_printf(s, " gpio-%-3d (%-20.20s) out %s",
- gpio, label ?: "(none)",
- val ? "hi" : "lo");
+ offset, label ?: "(none)", str_hi_lo(val));
} else {
u8 edge_det_reg;
u8 rise_reg;
@@ -342,8 +353,8 @@ static void stmpe_dbg_show_one(struct seq_file *s,
irqen = !!(ret & mask);
seq_printf(s, " gpio-%-3d (%-20.20s) in %s %13s %13s %25s %25s",
- gpio, label ?: "(none)",
- val ? "hi" : "lo",
+ offset, label ?: "(none)",
+ str_hi_lo(val),
edge_det_values[edge_det],
irqen ? "IRQ-enabled" : "IRQ-disabled",
rise_values[rise],
@@ -354,10 +365,9 @@ static void stmpe_dbg_show_one(struct seq_file *s,
static void stmpe_dbg_show(struct seq_file *s, struct gpio_chip *gc)
{
unsigned i;
- unsigned gpio = gc->base;
- for (i = 0; i < gc->ngpio; i++, gpio++) {
- stmpe_dbg_show_one(s, gc, i, gpio);
+ for (i = 0; i < gc->ngpio; i++) {
+ stmpe_dbg_show_one(s, gc, i);
seq_putc(s, '\n');
}
}
@@ -522,10 +532,16 @@ static int stmpe_gpio_probe(struct platform_device *pdev)
return devm_gpiochip_add_data(dev, &stmpe_gpio->chip, stmpe_gpio);
}
+static const struct of_device_id stmpe_gpio_of_matches[] = {
+ { .compatible = "st,stmpe-gpio", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, stmpe_gpio_of_matches);
+
static struct platform_driver stmpe_gpio_driver = {
.driver = {
- .suppress_bind_attrs = true,
- .name = "stmpe-gpio",
+ .name = "stmpe-gpio",
+ .of_match_table = stmpe_gpio_of_matches,
},
.probe = stmpe_gpio_probe,
};
@@ -535,3 +551,13 @@ static int __init stmpe_gpio_init(void)
return platform_driver_register(&stmpe_gpio_driver);
}
subsys_initcall(stmpe_gpio_init);
+
+static void __exit stmpe_gpio_exit(void)
+{
+ platform_driver_unregister(&stmpe_gpio_driver);
+}
+module_exit(stmpe_gpio_exit);
+
+MODULE_DESCRIPTION("STMPE expander GPIO");
+MODULE_AUTHOR("Rabin Vincent <rabin.vincent@stericsson.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-stp-xway.c b/drivers/gpio/gpio-stp-xway.c
index 5a6406d1f03a..493c027afdd6 100644
--- a/drivers/gpio/gpio-stp-xway.c
+++ b/drivers/gpio/gpio-stp-xway.c
@@ -113,7 +113,7 @@ static int xway_stp_get(struct gpio_chip *gc, unsigned int gpio)
*
* Set the shadow value and call ltq_ebu_apply.
*/
-static void xway_stp_set(struct gpio_chip *gc, unsigned gpio, int val)
+static int xway_stp_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
struct xway_stp *chip = gpiochip_get_data(gc);
@@ -124,6 +124,8 @@ static void xway_stp_set(struct gpio_chip *gc, unsigned gpio, int val)
xway_stp_w32(chip->virt, chip->shadow, XWAY_STP_CPU0);
if (!chip->reserved)
xway_stp_w32_mask(chip->virt, 0, XWAY_STP_CON_SWU, XWAY_STP_CON0);
+
+ return 0;
}
/**
@@ -136,9 +138,7 @@ static void xway_stp_set(struct gpio_chip *gc, unsigned gpio, int val)
*/
static int xway_stp_dir_out(struct gpio_chip *gc, unsigned gpio, int val)
{
- xway_stp_set(gc, gpio, val);
-
- return 0;
+ return xway_stp_set(gc, gpio, val);
}
/**
diff --git a/drivers/gpio/gpio-syscon.c b/drivers/gpio/gpio-syscon.c
index 5ab394ec81e6..40064d4cf47f 100644
--- a/drivers/gpio/gpio-syscon.c
+++ b/drivers/gpio/gpio-syscon.c
@@ -40,8 +40,8 @@ struct syscon_gpio_data {
unsigned int bit_count;
unsigned int dat_bit_offset;
unsigned int dir_bit_offset;
- void (*set)(struct gpio_chip *chip,
- unsigned offset, int value);
+ int (*set)(struct gpio_chip *chip, unsigned int offset,
+ int value);
};
struct syscon_gpio_priv {
@@ -68,17 +68,17 @@ static int syscon_gpio_get(struct gpio_chip *chip, unsigned offset)
return !!(val & BIT(offs % SYSCON_REG_BITS));
}
-static void syscon_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
+static int syscon_gpio_set(struct gpio_chip *chip, unsigned int offset, int val)
{
struct syscon_gpio_priv *priv = gpiochip_get_data(chip);
unsigned int offs;
offs = priv->dreg_offset + priv->data->dat_bit_offset + offset;
- regmap_update_bits(priv->syscon,
- (offs / SYSCON_REG_BITS) * SYSCON_REG_SIZE,
- BIT(offs % SYSCON_REG_BITS),
- val ? BIT(offs % SYSCON_REG_BITS) : 0);
+ return regmap_update_bits(priv->syscon,
+ (offs / SYSCON_REG_BITS) * SYSCON_REG_SIZE,
+ BIT(offs % SYSCON_REG_BITS),
+ val ? BIT(offs % SYSCON_REG_BITS) : 0);
}
static int syscon_gpio_dir_in(struct gpio_chip *chip, unsigned offset)
@@ -115,9 +115,7 @@ static int syscon_gpio_dir_out(struct gpio_chip *chip, unsigned offset, int val)
BIT(offs % SYSCON_REG_BITS));
}
- chip->set(chip, offset, val);
-
- return 0;
+ return chip->set(chip, offset, val);
}
static const struct syscon_gpio_data clps711x_mctrl_gpio = {
@@ -127,8 +125,8 @@ static const struct syscon_gpio_data clps711x_mctrl_gpio = {
.dat_bit_offset = 0x40 * 8 + 8,
};
-static void rockchip_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int val)
+static int rockchip_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int val)
{
struct syscon_gpio_priv *priv = gpiochip_get_data(chip);
unsigned int offs;
@@ -144,6 +142,8 @@ static void rockchip_gpio_set(struct gpio_chip *chip, unsigned int offset,
data);
if (ret < 0)
dev_err(chip->parent, "gpio write failed ret(%d)\n", ret);
+
+ return ret;
}
static const struct syscon_gpio_data rockchip_rk3328_gpio_mute = {
@@ -156,7 +156,8 @@ static const struct syscon_gpio_data rockchip_rk3328_gpio_mute = {
#define KEYSTONE_LOCK_BIT BIT(0)
-static void keystone_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
+static int keystone_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int val)
{
struct syscon_gpio_priv *priv = gpiochip_get_data(chip);
unsigned int offs;
@@ -165,7 +166,7 @@ static void keystone_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
offs = priv->dreg_offset + priv->data->dat_bit_offset + offset;
if (!val)
- return;
+ return 0;
ret = regmap_update_bits(
priv->syscon,
@@ -174,6 +175,8 @@ static void keystone_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
BIT(offs % SYSCON_REG_BITS) | KEYSTONE_LOCK_BIT);
if (ret < 0)
dev_err(chip->parent, "gpio write failed ret(%d)\n", ret);
+
+ return ret;
}
static const struct syscon_gpio_data keystone_dsp_gpio = {
diff --git a/drivers/gpio/gpio-tangier.c b/drivers/gpio/gpio-tangier.c
index 4b29abafecf6..ba5a8ede8912 100644
--- a/drivers/gpio/gpio-tangier.c
+++ b/drivers/gpio/gpio-tangier.c
@@ -90,7 +90,7 @@ static int tng_gpio_get(struct gpio_chip *chip, unsigned int offset)
return !!(readl(gplr) & BIT(shift));
}
-static void tng_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
+static int tng_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct tng_gpio *priv = gpiochip_get_data(chip);
void __iomem *reg;
@@ -101,6 +101,8 @@ static void tng_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
guard(raw_spinlock_irqsave)(&priv->lock);
writel(BIT(shift), reg);
+
+ return 0;
}
static int tng_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
@@ -459,7 +461,7 @@ int devm_tng_gpio_probe(struct device *dev, struct tng_gpio *gpio)
return 0;
}
-EXPORT_SYMBOL_NS_GPL(devm_tng_gpio_probe, GPIO_TANGIER);
+EXPORT_SYMBOL_NS_GPL(devm_tng_gpio_probe, "GPIO_TANGIER");
static int tng_gpio_suspend(struct device *dev)
{
diff --git a/drivers/gpio/gpio-tb10x.c b/drivers/gpio/gpio-tb10x.c
index e8c1485b9c73..09a448ce3eec 100644
--- a/drivers/gpio/gpio-tb10x.c
+++ b/drivers/gpio/gpio-tb10x.c
@@ -7,20 +7,20 @@
* Christian Ruppert <christian.ruppert@abilis.com>
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
+#include <linux/bitops.h>
#include <linux/gpio/driver.h>
-#include <linux/slab.h>
-#include <linux/irq.h>
-#include <linux/irqdomain.h>
+#include <linux/gpio/generic.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
-#include <linux/spinlock.h>
-#include <linux/bitops.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
#define TB10X_GPIO_DIR_IN (0x00000000)
#define TB10X_GPIO_DIR_OUT (0x00000001)
@@ -36,13 +36,13 @@
* @base: register base address
* @domain: IRQ domain of GPIO generated interrupts managed by this controller
* @irq: Interrupt line of parent interrupt controller
- * @gc: gpio_chip structure associated to this GPIO controller
+ * @chip: Generic GPIO chip structure associated with this GPIO controller
*/
struct tb10x_gpio {
void __iomem *base;
struct irq_domain *domain;
int irq;
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
};
static inline u32 tb10x_reg_read(struct tb10x_gpio *gpio, unsigned int offs)
@@ -60,16 +60,13 @@ static inline void tb10x_set_bits(struct tb10x_gpio *gpio, unsigned int offs,
u32 mask, u32 val)
{
u32 r;
- unsigned long flags;
- raw_spin_lock_irqsave(&gpio->gc.bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(&gpio->chip);
r = tb10x_reg_read(gpio, offs);
r = (r & ~mask) | (val & mask);
tb10x_reg_write(gpio, offs, r);
-
- raw_spin_unlock_irqrestore(&gpio->gc.bgpio_lock, flags);
}
static int tb10x_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
@@ -107,6 +104,7 @@ static irqreturn_t tb10x_gpio_irq_cascade(int irq, void *data)
static int tb10x_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct tb10x_gpio *tb10x_gpio;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
@@ -127,9 +125,9 @@ static int tb10x_gpio_probe(struct platform_device *pdev)
if (IS_ERR(tb10x_gpio->base))
return PTR_ERR(tb10x_gpio->base);
- tb10x_gpio->gc.label =
+ tb10x_gpio->chip.gc.label =
devm_kasprintf(dev, GFP_KERNEL, "%pOF", pdev->dev.of_node);
- if (!tb10x_gpio->gc.label)
+ if (!tb10x_gpio->chip.gc.label)
return -ENOMEM;
/*
@@ -137,29 +135,30 @@ static int tb10x_gpio_probe(struct platform_device *pdev)
* the lines, no special set or clear registers and a data direction register
* wher 1 means "output".
*/
- ret = bgpio_init(&tb10x_gpio->gc, dev, 4,
- tb10x_gpio->base + OFFSET_TO_REG_DATA,
- NULL,
- NULL,
- tb10x_gpio->base + OFFSET_TO_REG_DDR,
- NULL,
- 0);
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = tb10x_gpio->base + OFFSET_TO_REG_DATA,
+ .dirout = tb10x_gpio->base + OFFSET_TO_REG_DDR,
+ };
+
+ ret = gpio_generic_chip_init(&tb10x_gpio->chip, &config);
if (ret) {
dev_err(dev, "unable to init generic GPIO\n");
return ret;
}
- tb10x_gpio->gc.base = -1;
- tb10x_gpio->gc.parent = dev;
- tb10x_gpio->gc.owner = THIS_MODULE;
+ tb10x_gpio->chip.gc.base = -1;
+ tb10x_gpio->chip.gc.parent = dev;
+ tb10x_gpio->chip.gc.owner = THIS_MODULE;
/*
- * ngpio is set by bgpio_init() but we override it, this .request()
- * callback also overrides the one set up by generic GPIO.
+ * ngpio is set by gpio_generic_chip_init() but we override it, this
+ * .request() callback also overrides the one set up by generic GPIO.
*/
- tb10x_gpio->gc.ngpio = ngpio;
- tb10x_gpio->gc.request = gpiochip_generic_request;
- tb10x_gpio->gc.free = gpiochip_generic_free;
+ tb10x_gpio->chip.gc.ngpio = ngpio;
+ tb10x_gpio->chip.gc.request = gpiochip_generic_request;
+ tb10x_gpio->chip.gc.free = gpiochip_generic_free;
- ret = devm_gpiochip_add_data(dev, &tb10x_gpio->gc, tb10x_gpio);
+ ret = devm_gpiochip_add_data(dev, &tb10x_gpio->chip.gc, tb10x_gpio);
if (ret < 0) {
dev_err(dev, "Could not add gpiochip.\n");
return ret;
@@ -174,7 +173,7 @@ static int tb10x_gpio_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- tb10x_gpio->gc.to_irq = tb10x_gpio_to_irq;
+ tb10x_gpio->chip.gc.to_irq = tb10x_gpio_to_irq;
tb10x_gpio->irq = ret;
ret = devm_request_irq(dev, ret, tb10x_gpio_irq_cascade,
@@ -183,15 +182,15 @@ static int tb10x_gpio_probe(struct platform_device *pdev)
if (ret != 0)
return ret;
- tb10x_gpio->domain = irq_domain_add_linear(np,
- tb10x_gpio->gc.ngpio,
- &irq_generic_chip_ops, NULL);
+ tb10x_gpio->domain = irq_domain_create_linear(dev_fwnode(dev),
+ tb10x_gpio->chip.gc.ngpio,
+ &irq_generic_chip_ops, NULL);
if (!tb10x_gpio->domain) {
return -ENOMEM;
}
ret = irq_alloc_domain_generic_chips(tb10x_gpio->domain,
- tb10x_gpio->gc.ngpio, 1, tb10x_gpio->gc.label,
+ tb10x_gpio->chip.gc.ngpio, 1, tb10x_gpio->chip.gc.label,
handle_edge_irq, IRQ_NOREQUEST, IRQ_NOPROBE,
IRQ_GC_INIT_MASK_CACHE);
if (ret)
@@ -219,9 +218,9 @@ static void tb10x_gpio_remove(struct platform_device *pdev)
{
struct tb10x_gpio *tb10x_gpio = platform_get_drvdata(pdev);
- if (tb10x_gpio->gc.to_irq) {
+ if (tb10x_gpio->chip.gc.to_irq) {
irq_remove_generic_chip(tb10x_gpio->domain->gc->gc[0],
- BIT(tb10x_gpio->gc.ngpio) - 1, 0, 0);
+ BIT(tb10x_gpio->chip.gc.ngpio) - 1, 0, 0);
kfree(tb10x_gpio->domain->gc);
irq_domain_remove(tb10x_gpio->domain);
}
@@ -235,7 +234,7 @@ MODULE_DEVICE_TABLE(of, tb10x_gpio_dt_ids);
static struct platform_driver tb10x_gpio_driver = {
.probe = tb10x_gpio_probe,
- .remove_new = tb10x_gpio_remove,
+ .remove = tb10x_gpio_remove,
.driver = {
.name = "tb10x-gpio",
.of_match_table = tb10x_gpio_dt_ids,
diff --git a/drivers/gpio/gpio-tc3589x.c b/drivers/gpio/gpio-tc3589x.c
index e62ee7e56908..90d048f9da08 100644
--- a/drivers/gpio/gpio-tc3589x.c
+++ b/drivers/gpio/gpio-tc3589x.c
@@ -49,7 +49,7 @@ static int tc3589x_gpio_get(struct gpio_chip *chip, unsigned int offset)
return !!(ret & mask);
}
-static void tc3589x_gpio_set(struct gpio_chip *chip, unsigned int offset, int val)
+static int tc3589x_gpio_set(struct gpio_chip *chip, unsigned int offset, int val)
{
struct tc3589x_gpio *tc3589x_gpio = gpiochip_get_data(chip);
struct tc3589x *tc3589x = tc3589x_gpio->tc3589x;
@@ -57,7 +57,7 @@ static void tc3589x_gpio_set(struct gpio_chip *chip, unsigned int offset, int va
unsigned int pos = offset % 8;
u8 data[] = {val ? BIT(pos) : 0, BIT(pos)};
- tc3589x_block_write(tc3589x, reg, ARRAY_SIZE(data), data);
+ return tc3589x_block_write(tc3589x, reg, ARRAY_SIZE(data), data);
}
static int tc3589x_gpio_direction_output(struct gpio_chip *chip,
@@ -67,8 +67,11 @@ static int tc3589x_gpio_direction_output(struct gpio_chip *chip,
struct tc3589x *tc3589x = tc3589x_gpio->tc3589x;
u8 reg = TC3589x_GPIODIR0 + offset / 8;
unsigned int pos = offset % 8;
+ int ret;
- tc3589x_gpio_set(chip, offset, val);
+ ret = tc3589x_gpio_set(chip, offset, val);
+ if (ret)
+ return ret;
return tc3589x_set_bits(tc3589x, reg, BIT(pos), BIT(pos));
}
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 6d3a39a03f58..15a5762a82c2 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -146,12 +146,14 @@ static void tegra_gpio_free(struct gpio_chip *chip, unsigned int offset)
tegra_gpio_disable(tgi, offset);
}
-static void tegra_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
+static int tegra_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct tegra_gpio_info *tgi = gpiochip_get_data(chip);
tegra_gpio_mask_write(tgi, GPIO_MSK_OUT(tgi, offset), offset, value);
+
+ return 0;
}
static int tegra_gpio_get(struct gpio_chip *chip, unsigned int offset)
@@ -600,7 +602,7 @@ static void tegra_gpio_irq_print_chip(struct irq_data *d, struct seq_file *s)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
- seq_printf(s, dev_name(chip->parent));
+ seq_puts(s, dev_name(chip->parent));
}
static const struct irq_chip tegra_gpio_irq_chip = {
diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c
index 1ecb733a5e88..4d3db6e06eeb 100644
--- a/drivers/gpio/gpio-tegra186.c
+++ b/drivers/gpio/gpio-tegra186.c
@@ -20,6 +20,7 @@
#include <dt-bindings/gpio/tegra194-gpio.h>
#include <dt-bindings/gpio/tegra234-gpio.h>
#include <dt-bindings/gpio/tegra241-gpio.h>
+#include <dt-bindings/gpio/tegra256-gpio.h>
/* security registers */
#define TEGRA186_GPIO_CTL_SCR 0x0c
@@ -202,6 +203,28 @@ static int tegra186_init_valid_mask(struct gpio_chip *chip,
return 0;
}
+static int tegra186_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int level)
+{
+ struct tegra_gpio *gpio = gpiochip_get_data(chip);
+ void __iomem *base;
+ u32 value;
+
+ base = tegra186_gpio_get_base(gpio, offset);
+ if (WARN_ON(base == NULL))
+ return -ENODEV;
+
+ value = readl(base + TEGRA186_GPIO_OUTPUT_VALUE);
+ if (level == 0)
+ value &= ~TEGRA186_GPIO_OUTPUT_VALUE_HIGH;
+ else
+ value |= TEGRA186_GPIO_OUTPUT_VALUE_HIGH;
+
+ writel(value, base + TEGRA186_GPIO_OUTPUT_VALUE);
+
+ return 0;
+}
+
static int tegra186_gpio_get_direction(struct gpio_chip *chip,
unsigned int offset)
{
@@ -249,9 +272,12 @@ static int tegra186_gpio_direction_output(struct gpio_chip *chip,
struct tegra_gpio *gpio = gpiochip_get_data(chip);
void __iomem *base;
u32 value;
+ int ret;
/* configure output level first */
- chip->set(chip, offset, level);
+ ret = tegra186_gpio_set(chip, offset, level);
+ if (ret)
+ return ret;
base = tegra186_gpio_get_base(gpio, offset);
if (WARN_ON(base == NULL))
@@ -359,26 +385,6 @@ static int tegra186_gpio_get(struct gpio_chip *chip, unsigned int offset)
return value & BIT(0);
}
-static void tegra186_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int level)
-{
- struct tegra_gpio *gpio = gpiochip_get_data(chip);
- void __iomem *base;
- u32 value;
-
- base = tegra186_gpio_get_base(gpio, offset);
- if (WARN_ON(base == NULL))
- return;
-
- value = readl(base + TEGRA186_GPIO_OUTPUT_VALUE);
- if (level == 0)
- value &= ~TEGRA186_GPIO_OUTPUT_VALUE_HIGH;
- else
- value |= TEGRA186_GPIO_OUTPUT_VALUE_HIGH;
-
- writel(value, base + TEGRA186_GPIO_OUTPUT_VALUE);
-}
-
static int tegra186_gpio_set_config(struct gpio_chip *chip,
unsigned int offset,
unsigned long config)
@@ -610,7 +616,7 @@ static void tegra186_irq_print_chip(struct irq_data *data, struct seq_file *p)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
- seq_printf(p, dev_name(gc->parent));
+ seq_puts(p, dev_name(gc->parent));
}
static const struct irq_chip tegra186_gpio_irq_chip = {
@@ -823,6 +829,7 @@ static int tegra186_gpio_probe(struct platform_device *pdev)
struct gpio_irq_chip *irq;
struct tegra_gpio *gpio;
struct device_node *np;
+ struct resource *res;
char **names;
int err;
@@ -842,19 +849,19 @@ static int tegra186_gpio_probe(struct platform_device *pdev)
gpio->num_banks++;
/* get register apertures */
- gpio->secure = devm_platform_ioremap_resource_byname(pdev, "security");
- if (IS_ERR(gpio->secure)) {
- gpio->secure = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(gpio->secure))
- return PTR_ERR(gpio->secure);
- }
-
- gpio->base = devm_platform_ioremap_resource_byname(pdev, "gpio");
- if (IS_ERR(gpio->base)) {
- gpio->base = devm_platform_ioremap_resource(pdev, 1);
- if (IS_ERR(gpio->base))
- return PTR_ERR(gpio->base);
- }
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "security");
+ if (!res)
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ gpio->secure = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(gpio->secure))
+ return PTR_ERR(gpio->secure);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gpio");
+ if (!res)
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ gpio->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(gpio->base))
+ return PTR_ERR(gpio->base);
err = platform_irq_count(pdev);
if (err < 0)
@@ -1273,6 +1280,30 @@ static const struct tegra_gpio_soc tegra241_aon_soc = {
.has_vm_support = false,
};
+#define TEGRA256_MAIN_GPIO_PORT(_name, _bank, _port, _pins) \
+ [TEGRA256_MAIN_GPIO_PORT_##_name] = { \
+ .name = #_name, \
+ .bank = _bank, \
+ .port = _port, \
+ .pins = _pins, \
+ }
+
+static const struct tegra_gpio_port tegra256_main_ports[] = {
+ TEGRA256_MAIN_GPIO_PORT(A, 0, 0, 8),
+ TEGRA256_MAIN_GPIO_PORT(B, 0, 1, 8),
+ TEGRA256_MAIN_GPIO_PORT(C, 0, 2, 8),
+ TEGRA256_MAIN_GPIO_PORT(D, 0, 3, 8),
+};
+
+static const struct tegra_gpio_soc tegra256_main_soc = {
+ .num_ports = ARRAY_SIZE(tegra256_main_ports),
+ .ports = tegra256_main_ports,
+ .name = "tegra256-gpio-main",
+ .instance = 1,
+ .num_irqs_per_bank = 8,
+ .has_vm_support = true,
+};
+
static const struct of_device_id tegra186_gpio_of_match[] = {
{
.compatible = "nvidia,tegra186-gpio",
@@ -1293,6 +1324,9 @@ static const struct of_device_id tegra186_gpio_of_match[] = {
.compatible = "nvidia,tegra234-gpio-aon",
.data = &tegra234_aon_soc
}, {
+ .compatible = "nvidia,tegra256-gpio",
+ .data = &tegra256_main_soc
+ }, {
/* sentinel */
}
};
diff --git a/drivers/gpio/gpio-thunderx.c b/drivers/gpio/gpio-thunderx.c
index 5b851e904c11..be96853063ba 100644
--- a/drivers/gpio/gpio-thunderx.c
+++ b/drivers/gpio/gpio-thunderx.c
@@ -116,8 +116,8 @@ static int thunderx_gpio_dir_in(struct gpio_chip *chip, unsigned int line)
return 0;
}
-static void thunderx_gpio_set(struct gpio_chip *chip, unsigned int line,
- int value)
+static int thunderx_gpio_set(struct gpio_chip *chip, unsigned int line,
+ int value)
{
struct thunderx_gpio *txgpio = gpiochip_get_data(chip);
int bank = line / 64;
@@ -127,6 +127,8 @@ static void thunderx_gpio_set(struct gpio_chip *chip, unsigned int line,
(bank * GPIO_2ND_BANK) + (value ? GPIO_TX_SET : GPIO_TX_CLR);
writeq(BIT_ULL(bank_bit), reg);
+
+ return 0;
}
static int thunderx_gpio_dir_out(struct gpio_chip *chip, unsigned int line,
@@ -269,9 +271,9 @@ static int thunderx_gpio_get(struct gpio_chip *chip, unsigned int line)
return masked_bits != 0;
}
-static void thunderx_gpio_set_multiple(struct gpio_chip *chip,
- unsigned long *mask,
- unsigned long *bits)
+static int thunderx_gpio_set_multiple(struct gpio_chip *chip,
+ unsigned long *mask,
+ unsigned long *bits)
{
int bank;
u64 set_bits, clear_bits;
@@ -283,6 +285,8 @@ static void thunderx_gpio_set_multiple(struct gpio_chip *chip,
writeq(set_bits, txgpio->register_base + (bank * GPIO_2ND_BANK) + GPIO_TX_SET);
writeq(clear_bits, txgpio->register_base + (bank * GPIO_2ND_BANK) + GPIO_TX_CLR);
}
+
+ return 0;
}
static void thunderx_gpio_irq_ack(struct irq_data *d)
diff --git a/drivers/gpio/gpio-timberdale.c b/drivers/gpio/gpio-timberdale.c
index fad979797486..f488939dd00a 100644
--- a/drivers/gpio/gpio-timberdale.c
+++ b/drivers/gpio/gpio-timberdale.c
@@ -80,10 +80,9 @@ static int timbgpio_gpio_direction_output(struct gpio_chip *gpio,
return timbgpio_update_bit(gpio, nr, TGPIODIR, false);
}
-static void timbgpio_gpio_set(struct gpio_chip *gpio,
- unsigned nr, int val)
+static int timbgpio_gpio_set(struct gpio_chip *gpio, unsigned int nr, int val)
{
- timbgpio_update_bit(gpio, nr, TGPIOVAL, val != 0);
+ return timbgpio_update_bit(gpio, nr, TGPIOVAL, val != 0);
}
static int timbgpio_to_irq(struct gpio_chip *gpio, unsigned offset)
@@ -103,20 +102,26 @@ static void timbgpio_irq_disable(struct irq_data *d)
{
struct timbgpio *tgpio = irq_data_get_irq_chip_data(d);
int offset = d->irq - tgpio->irq_base;
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
unsigned long flags;
spin_lock_irqsave(&tgpio->lock, flags);
tgpio->last_ier &= ~(1UL << offset);
iowrite32(tgpio->last_ier, tgpio->membase + TGPIO_IER);
spin_unlock_irqrestore(&tgpio->lock, flags);
+
+ gpiochip_disable_irq(&tgpio->gpio, hwirq);
}
static void timbgpio_irq_enable(struct irq_data *d)
{
struct timbgpio *tgpio = irq_data_get_irq_chip_data(d);
int offset = d->irq - tgpio->irq_base;
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
unsigned long flags;
+ gpiochip_enable_irq(&tgpio->gpio, hwirq);
+
spin_lock_irqsave(&tgpio->lock, flags);
tgpio->last_ier |= 1UL << offset;
iowrite32(tgpio->last_ier, tgpio->membase + TGPIO_IER);
@@ -132,7 +137,7 @@ static int timbgpio_irq_type(struct irq_data *d, unsigned trigger)
u32 ver;
int ret = 0;
- if (offset < 0 || offset > tgpio->gpio.ngpio)
+ if (offset < 0 || offset >= tgpio->gpio.ngpio)
return -EINVAL;
ver = ioread32(tgpio->membase + TGPIO_VER);
@@ -205,11 +210,13 @@ static void timbgpio_irq(struct irq_desc *desc)
iowrite32(tgpio->last_ier, tgpio->membase + TGPIO_IER);
}
-static struct irq_chip timbgpio_irqchip = {
+static const struct irq_chip timbgpio_irqchip = {
.name = "GPIO",
.irq_enable = timbgpio_irq_enable,
.irq_disable = timbgpio_irq_disable,
.irq_set_type = timbgpio_irq_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static int timbgpio_probe(struct platform_device *pdev)
diff --git a/drivers/gpio/gpio-tpic2810.c b/drivers/gpio/gpio-tpic2810.c
index effb7b8ff81f..866ff2d436d5 100644
--- a/drivers/gpio/gpio-tpic2810.c
+++ b/drivers/gpio/gpio-tpic2810.c
@@ -25,7 +25,7 @@ struct tpic2810 {
struct mutex lock;
};
-static void tpic2810_set(struct gpio_chip *chip, unsigned offset, int value);
+static int tpic2810_set(struct gpio_chip *chip, unsigned int offset, int value);
static int tpic2810_get_direction(struct gpio_chip *chip,
unsigned offset)
@@ -34,19 +34,11 @@ static int tpic2810_get_direction(struct gpio_chip *chip,
return GPIO_LINE_DIRECTION_OUT;
}
-static int tpic2810_direction_input(struct gpio_chip *chip,
- unsigned offset)
-{
- /* This device is output only */
- return -EINVAL;
-}
-
static int tpic2810_direction_output(struct gpio_chip *chip,
unsigned offset, int value)
{
/* This device always output */
- tpic2810_set(chip, offset, value);
- return 0;
+ return tpic2810_set(chip, offset, value);
}
static void tpic2810_set_mask_bits(struct gpio_chip *chip, u8 mask, u8 bits)
@@ -68,22 +60,25 @@ static void tpic2810_set_mask_bits(struct gpio_chip *chip, u8 mask, u8 bits)
mutex_unlock(&gpio->lock);
}
-static void tpic2810_set(struct gpio_chip *chip, unsigned offset, int value)
+static int tpic2810_set(struct gpio_chip *chip, unsigned int offset, int value)
{
tpic2810_set_mask_bits(chip, BIT(offset), value ? BIT(offset) : 0);
+
+ return 0;
}
-static void tpic2810_set_multiple(struct gpio_chip *chip, unsigned long *mask,
- unsigned long *bits)
+static int tpic2810_set_multiple(struct gpio_chip *chip, unsigned long *mask,
+ unsigned long *bits)
{
tpic2810_set_mask_bits(chip, *mask, *bits);
+
+ return 0;
}
static const struct gpio_chip template_chip = {
.label = "tpic2810",
.owner = THIS_MODULE,
.get_direction = tpic2810_get_direction,
- .direction_input = tpic2810_direction_input,
.direction_output = tpic2810_direction_output,
.set = tpic2810_set,
.set_multiple = tpic2810_set_multiple,
diff --git a/drivers/gpio/gpio-tps65086.c b/drivers/gpio/gpio-tps65086.c
index 8f5827554e1e..84b17b83476f 100644
--- a/drivers/gpio/gpio-tps65086.c
+++ b/drivers/gpio/gpio-tps65086.c
@@ -37,10 +37,8 @@ static int tps65086_gpio_direction_output(struct gpio_chip *chip,
struct tps65086_gpio *gpio = gpiochip_get_data(chip);
/* Set the initial value */
- regmap_update_bits(gpio->tps->regmap, TPS65086_GPOCTRL,
- BIT(4 + offset), value ? BIT(4 + offset) : 0);
-
- return 0;
+ return regmap_update_bits(gpio->tps->regmap, TPS65086_GPOCTRL,
+ BIT(4 + offset), value ? BIT(4 + offset) : 0);
}
static int tps65086_gpio_get(struct gpio_chip *chip, unsigned offset)
@@ -55,13 +53,13 @@ static int tps65086_gpio_get(struct gpio_chip *chip, unsigned offset)
return val & BIT(4 + offset);
}
-static void tps65086_gpio_set(struct gpio_chip *chip, unsigned offset,
- int value)
+static int tps65086_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct tps65086_gpio *gpio = gpiochip_get_data(chip);
- regmap_update_bits(gpio->tps->regmap, TPS65086_GPOCTRL,
- BIT(4 + offset), value ? BIT(4 + offset) : 0);
+ return regmap_update_bits(gpio->tps->regmap, TPS65086_GPOCTRL,
+ BIT(4 + offset), value ? BIT(4 + offset) : 0);
}
static const struct gpio_chip template_chip = {
diff --git a/drivers/gpio/gpio-tps65218.c b/drivers/gpio/gpio-tps65218.c
index d7d9d50dcddf..3b4c41f5ef55 100644
--- a/drivers/gpio/gpio-tps65218.c
+++ b/drivers/gpio/gpio-tps65218.c
@@ -34,34 +34,28 @@ static int tps65218_gpio_get(struct gpio_chip *gc, unsigned offset)
return !!(val & (TPS65218_ENABLE2_GPIO1 << offset));
}
-static void tps65218_gpio_set(struct gpio_chip *gc, unsigned offset,
- int value)
+static int tps65218_gpio_set(struct gpio_chip *gc, unsigned int offset,
+ int value)
{
struct tps65218_gpio *tps65218_gpio = gpiochip_get_data(gc);
struct tps65218 *tps65218 = tps65218_gpio->tps65218;
if (value)
- tps65218_set_bits(tps65218, TPS65218_REG_ENABLE2,
- TPS65218_ENABLE2_GPIO1 << offset,
- TPS65218_ENABLE2_GPIO1 << offset,
- TPS65218_PROTECT_L1);
- else
- tps65218_clear_bits(tps65218, TPS65218_REG_ENABLE2,
- TPS65218_ENABLE2_GPIO1 << offset,
- TPS65218_PROTECT_L1);
+ return tps65218_set_bits(tps65218, TPS65218_REG_ENABLE2,
+ TPS65218_ENABLE2_GPIO1 << offset,
+ TPS65218_ENABLE2_GPIO1 << offset,
+ TPS65218_PROTECT_L1);
+
+ return tps65218_clear_bits(tps65218, TPS65218_REG_ENABLE2,
+ TPS65218_ENABLE2_GPIO1 << offset,
+ TPS65218_PROTECT_L1);
}
static int tps65218_gpio_output(struct gpio_chip *gc, unsigned offset,
int value)
{
/* Only drives GPOs */
- tps65218_gpio_set(gc, offset, value);
- return 0;
-}
-
-static int tps65218_gpio_input(struct gpio_chip *gc, unsigned offset)
-{
- return -EPERM;
+ return tps65218_gpio_set(gc, offset, value);
}
static int tps65218_gpio_request(struct gpio_chip *gc, unsigned offset)
@@ -174,7 +168,6 @@ static const struct gpio_chip template_chip = {
.owner = THIS_MODULE,
.request = tps65218_gpio_request,
.direction_output = tps65218_gpio_output,
- .direction_input = tps65218_gpio_input,
.get = tps65218_gpio_get,
.set = tps65218_gpio_set,
.set_config = tps65218_gpio_set_config,
diff --git a/drivers/gpio/gpio-tps65219.c b/drivers/gpio/gpio-tps65219.c
index cd1f17041f8c..158f63bcf10c 100644
--- a/drivers/gpio/gpio-tps65219.c
+++ b/drivers/gpio/gpio-tps65219.c
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * GPIO driver for TI TPS65219 PMICs
+ * GPIO driver for TI TPS65214/TPS65215/TPS65219 PMICs
*
- * Copyright (C) 2022 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2022, 2025 Texas Instruments Incorporated - http://www.ti.com/
*/
#include <linux/bits.h>
@@ -13,22 +13,48 @@
#include <linux/regmap.h>
#define TPS65219_GPIO0_DIR_MASK BIT(3)
-#define TPS65219_GPIO0_OFFSET 2
-#define TPS65219_GPIO0_IDX 0
-#define TPS65219_GPIO_DIR_IN 1
-#define TPS65219_GPIO_DIR_OUT 0
+#define TPS65214_GPIO0_DIR_MASK BIT(1)
+#define TPS6521X_GPIO0_OFFSET 2
+#define TPS6521X_GPIO0_IDX 0
+
+/*
+ * TPS65214 GPIO mapping
+ * Linux gpio offset 0 -> GPIO (pin16) -> bit_offset 2
+ * Linux gpio offset 1 -> GPO1 (pin9 ) -> bit_offset 0
+ *
+ * TPS65215 & TPS65219 GPIO mapping
+ * Linux gpio offset 0 -> GPIO (pin16) -> bit_offset 2
+ * Linux gpio offset 1 -> GPO1 (pin8 ) -> bit_offset 0
+ * Linux gpio offset 2 -> GPO2 (pin17) -> bit_offset 1
+ */
struct tps65219_gpio {
+ int (*change_dir)(struct gpio_chip *gc, unsigned int offset, unsigned int dir);
struct gpio_chip gpio_chip;
struct tps65219 *tps;
};
+static int tps65214_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
+{
+ struct tps65219_gpio *gpio = gpiochip_get_data(gc);
+ int ret, val;
+
+ if (offset != TPS6521X_GPIO0_IDX)
+ return GPIO_LINE_DIRECTION_OUT;
+
+ ret = regmap_read(gpio->tps->regmap, TPS65219_REG_GENERAL_CONFIG, &val);
+ if (ret)
+ return ret;
+
+ return !(val & TPS65214_GPIO0_DIR_MASK);
+}
+
static int tps65219_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
{
struct tps65219_gpio *gpio = gpiochip_get_data(gc);
int ret, val;
- if (offset != TPS65219_GPIO0_IDX)
+ if (offset != TPS6521X_GPIO0_IDX)
return GPIO_LINE_DIRECTION_OUT;
ret = regmap_read(gpio->tps->regmap, TPS65219_REG_MFP_1_CONFIG, &val);
@@ -44,7 +70,7 @@ static int tps65219_gpio_get(struct gpio_chip *gc, unsigned int offset)
struct device *dev = gpio->tps->dev;
int ret, val;
- if (offset != TPS65219_GPIO0_IDX) {
+ if (offset != TPS6521X_GPIO0_IDX) {
dev_err(dev, "GPIO%d is output only, cannot get\n", offset);
return -ENOTSUPP;
}
@@ -61,25 +87,24 @@ static int tps65219_gpio_get(struct gpio_chip *gc, unsigned int offset)
* status bit.
*/
- if (tps65219_gpio_get_direction(gc, offset) == TPS65219_GPIO_DIR_OUT)
+ if (tps65219_gpio_get_direction(gc, offset) == GPIO_LINE_DIRECTION_OUT)
return -ENOTSUPP;
return ret;
}
-static void tps65219_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
+static int tps65219_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
{
struct tps65219_gpio *gpio = gpiochip_get_data(gc);
- struct device *dev = gpio->tps->dev;
int v, mask, bit;
- bit = (offset == TPS65219_GPIO0_IDX) ? TPS65219_GPIO0_OFFSET : offset - 1;
+ bit = (offset == TPS6521X_GPIO0_IDX) ? TPS6521X_GPIO0_OFFSET : offset - 1;
mask = BIT(bit);
v = value ? mask : 0;
- if (regmap_update_bits(gpio->tps->regmap, TPS65219_REG_GENERAL_CONFIG, mask, v))
- dev_err(dev, "GPIO%d, set to value %d failed.\n", offset, value);
+ return regmap_update_bits(gpio->tps->regmap,
+ TPS65219_REG_GENERAL_CONFIG, mask, v);
}
static int tps65219_gpio_change_direction(struct gpio_chip *gc, unsigned int offset,
@@ -114,34 +139,76 @@ static int tps65219_gpio_change_direction(struct gpio_chip *gc, unsigned int off
return -ENOTSUPP;
}
+static int tps65214_gpio_change_direction(struct gpio_chip *gc, unsigned int offset,
+ unsigned int direction)
+{
+ struct tps65219_gpio *gpio = gpiochip_get_data(gc);
+ struct device *dev = gpio->tps->dev;
+ int val, ret;
+
+ /**
+ * Verified if GPIO or GPO in parent function
+ * Masked value: 0 = GPIO, 1 = VSEL
+ */
+ ret = regmap_read(gpio->tps->regmap, TPS65219_REG_MFP_1_CONFIG, &val);
+ if (ret)
+ return ret;
+
+ ret = !!(val & BIT(TPS65219_GPIO0_DIR_MASK));
+ if (ret)
+ dev_err(dev, "GPIO%d configured as VSEL, not GPIO\n", offset);
+
+ ret = regmap_update_bits(gpio->tps->regmap, TPS65219_REG_GENERAL_CONFIG,
+ TPS65214_GPIO0_DIR_MASK, direction);
+ if (ret)
+ dev_err(dev, "Fail to change direction to %u for GPIO%d.\n", direction, offset);
+
+ return ret;
+}
+
static int tps65219_gpio_direction_input(struct gpio_chip *gc, unsigned int offset)
{
struct tps65219_gpio *gpio = gpiochip_get_data(gc);
struct device *dev = gpio->tps->dev;
- if (offset != TPS65219_GPIO0_IDX) {
+ if (offset != TPS6521X_GPIO0_IDX) {
dev_err(dev, "GPIO%d is output only, cannot change to input\n", offset);
return -ENOTSUPP;
}
- if (tps65219_gpio_get_direction(gc, offset) == TPS65219_GPIO_DIR_IN)
+ if (tps65219_gpio_get_direction(gc, offset) == GPIO_LINE_DIRECTION_IN)
return 0;
- return tps65219_gpio_change_direction(gc, offset, TPS65219_GPIO_DIR_IN);
+ return gpio->change_dir(gc, offset, GPIO_LINE_DIRECTION_IN);
}
static int tps65219_gpio_direction_output(struct gpio_chip *gc, unsigned int offset, int value)
{
+ struct tps65219_gpio *gpio = gpiochip_get_data(gc);
+
tps65219_gpio_set(gc, offset, value);
- if (offset != TPS65219_GPIO0_IDX)
+ if (offset != TPS6521X_GPIO0_IDX)
return 0;
- if (tps65219_gpio_get_direction(gc, offset) == TPS65219_GPIO_DIR_OUT)
+ if (tps65219_gpio_get_direction(gc, offset) == GPIO_LINE_DIRECTION_OUT)
return 0;
- return tps65219_gpio_change_direction(gc, offset, TPS65219_GPIO_DIR_OUT);
+ return gpio->change_dir(gc, offset, GPIO_LINE_DIRECTION_OUT);
}
+static const struct gpio_chip tps65214_template_chip = {
+ .label = "tps65214-gpio",
+ .owner = THIS_MODULE,
+ .get_direction = tps65214_gpio_get_direction,
+ .direction_input = tps65219_gpio_direction_input,
+ .direction_output = tps65219_gpio_direction_output,
+ .get = tps65219_gpio_get,
+ .set = tps65219_gpio_set,
+ .base = -1,
+ .ngpio = 2,
+ .can_sleep = true,
+};
+
static const struct gpio_chip tps65219_template_chip = {
.label = "tps65219-gpio",
.owner = THIS_MODULE,
@@ -157,6 +224,7 @@ static const struct gpio_chip tps65219_template_chip = {
static int tps65219_gpio_probe(struct platform_device *pdev)
{
+ enum pmic_id chip = platform_get_device_id(pdev)->driver_data;
struct tps65219 *tps = dev_get_drvdata(pdev->dev.parent);
struct tps65219_gpio *gpio;
@@ -164,22 +232,38 @@ static int tps65219_gpio_probe(struct platform_device *pdev)
if (!gpio)
return -ENOMEM;
+ if (chip == TPS65214) {
+ gpio->gpio_chip = tps65214_template_chip;
+ gpio->change_dir = tps65214_gpio_change_direction;
+ } else if (chip == TPS65219) {
+ gpio->gpio_chip = tps65219_template_chip;
+ gpio->change_dir = tps65219_gpio_change_direction;
+ } else {
+ return -ENODATA;
+ }
+
gpio->tps = tps;
- gpio->gpio_chip = tps65219_template_chip;
gpio->gpio_chip.parent = tps->dev;
return devm_gpiochip_add_data(&pdev->dev, &gpio->gpio_chip, gpio);
}
+static const struct platform_device_id tps6521x_gpio_id_table[] = {
+ { "tps65214-gpio", TPS65214 },
+ { "tps65219-gpio", TPS65219 },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, tps6521x_gpio_id_table);
+
static struct platform_driver tps65219_gpio_driver = {
.driver = {
.name = "tps65219-gpio",
},
.probe = tps65219_gpio_probe,
+ .id_table = tps6521x_gpio_id_table,
};
module_platform_driver(tps65219_gpio_driver);
-MODULE_ALIAS("platform:tps65219-gpio");
MODULE_AUTHOR("Jonathan Cormier <jcormier@criticallink.com>");
-MODULE_DESCRIPTION("TPS65219 GPIO driver");
+MODULE_DESCRIPTION("TPS65214/TPS65215/TPS65219 GPIO driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-tps6586x.c b/drivers/gpio/gpio-tps6586x.c
index d277aa951143..aaacbb54bf5d 100644
--- a/drivers/gpio/gpio-tps6586x.c
+++ b/drivers/gpio/gpio-tps6586x.c
@@ -40,13 +40,13 @@ static int tps6586x_gpio_get(struct gpio_chip *gc, unsigned offset)
return !!(val & (1 << offset));
}
-static void tps6586x_gpio_set(struct gpio_chip *gc, unsigned offset,
- int value)
+static int tps6586x_gpio_set(struct gpio_chip *gc, unsigned int offset,
+ int value)
{
struct tps6586x_gpio *tps6586x_gpio = gpiochip_get_data(gc);
- tps6586x_update(tps6586x_gpio->parent, TPS6586X_GPIOSET2,
- value << offset, 1 << offset);
+ return tps6586x_update(tps6586x_gpio->parent, TPS6586X_GPIOSET2,
+ value << offset, 1 << offset);
}
static int tps6586x_gpio_output(struct gpio_chip *gc, unsigned offset,
@@ -54,8 +54,11 @@ static int tps6586x_gpio_output(struct gpio_chip *gc, unsigned offset,
{
struct tps6586x_gpio *tps6586x_gpio = gpiochip_get_data(gc);
uint8_t val, mask;
+ int ret;
- tps6586x_gpio_set(gc, offset, value);
+ ret = tps6586x_gpio_set(gc, offset, value);
+ if (ret)
+ return ret;
val = 0x1 << (offset * 2);
mask = 0x3 << (offset * 2);
diff --git a/drivers/gpio/gpio-tps65910.c b/drivers/gpio/gpio-tps65910.c
index 187d21580573..25e9f41efe78 100644
--- a/drivers/gpio/gpio-tps65910.c
+++ b/drivers/gpio/gpio-tps65910.c
@@ -36,18 +36,18 @@ static int tps65910_gpio_get(struct gpio_chip *gc, unsigned offset)
return 0;
}
-static void tps65910_gpio_set(struct gpio_chip *gc, unsigned offset,
- int value)
+static int tps65910_gpio_set(struct gpio_chip *gc, unsigned int offset,
+ int value)
{
struct tps65910_gpio *tps65910_gpio = gpiochip_get_data(gc);
struct tps65910 *tps65910 = tps65910_gpio->tps65910;
if (value)
- regmap_set_bits(tps65910->regmap, TPS65910_GPIO0 + offset,
- GPIO_SET_MASK);
- else
- regmap_clear_bits(tps65910->regmap, TPS65910_GPIO0 + offset,
- GPIO_SET_MASK);
+ return regmap_set_bits(tps65910->regmap,
+ TPS65910_GPIO0 + offset, GPIO_SET_MASK);
+
+ return regmap_clear_bits(tps65910->regmap, TPS65910_GPIO0 + offset,
+ GPIO_SET_MASK);
}
static int tps65910_gpio_output(struct gpio_chip *gc, unsigned offset,
@@ -55,9 +55,12 @@ static int tps65910_gpio_output(struct gpio_chip *gc, unsigned offset,
{
struct tps65910_gpio *tps65910_gpio = gpiochip_get_data(gc);
struct tps65910 *tps65910 = tps65910_gpio->tps65910;
+ int ret;
/* Set the initial value */
- tps65910_gpio_set(gc, offset, value);
+ ret = tps65910_gpio_set(gc, offset, value);
+ if (ret)
+ return ret;
return regmap_set_bits(tps65910->regmap, TPS65910_GPIO0 + offset,
GPIO_CFG_MASK);
diff --git a/drivers/gpio/gpio-tps65912.c b/drivers/gpio/gpio-tps65912.c
index fab771cb6a87..7a2c5685c2fd 100644
--- a/drivers/gpio/gpio-tps65912.c
+++ b/drivers/gpio/gpio-tps65912.c
@@ -49,10 +49,13 @@ static int tps65912_gpio_direction_output(struct gpio_chip *gc,
unsigned offset, int value)
{
struct tps65912_gpio *gpio = gpiochip_get_data(gc);
+ int ret;
/* Set the initial value */
- regmap_update_bits(gpio->tps->regmap, TPS65912_GPIO1 + offset,
- GPIO_SET_MASK, value ? GPIO_SET_MASK : 0);
+ ret = regmap_update_bits(gpio->tps->regmap, TPS65912_GPIO1 + offset,
+ GPIO_SET_MASK, value ? GPIO_SET_MASK : 0);
+ if (ret)
+ return ret;
return regmap_update_bits(gpio->tps->regmap, TPS65912_GPIO1 + offset,
GPIO_CFG_MASK, GPIO_CFG_MASK);
@@ -73,13 +76,13 @@ static int tps65912_gpio_get(struct gpio_chip *gc, unsigned offset)
return 0;
}
-static void tps65912_gpio_set(struct gpio_chip *gc, unsigned offset,
- int value)
+static int tps65912_gpio_set(struct gpio_chip *gc, unsigned int offset,
+ int value)
{
struct tps65912_gpio *gpio = gpiochip_get_data(gc);
- regmap_update_bits(gpio->tps->regmap, TPS65912_GPIO1 + offset,
- GPIO_SET_MASK, value ? GPIO_SET_MASK : 0);
+ return regmap_update_bits(gpio->tps->regmap, TPS65912_GPIO1 + offset,
+ GPIO_SET_MASK, value ? GPIO_SET_MASK : 0);
}
static const struct gpio_chip template_chip = {
diff --git a/drivers/gpio/gpio-tps68470.c b/drivers/gpio/gpio-tps68470.c
index 532deaddfd4e..d4fbdf90e190 100644
--- a/drivers/gpio/gpio-tps68470.c
+++ b/drivers/gpio/gpio-tps68470.c
@@ -70,8 +70,8 @@ static int tps68470_gpio_get_direction(struct gpio_chip *gc,
GPIO_LINE_DIRECTION_IN;
}
-static void tps68470_gpio_set(struct gpio_chip *gc, unsigned int offset,
- int value)
+static int tps68470_gpio_set(struct gpio_chip *gc, unsigned int offset,
+ int value)
{
struct tps68470_gpio_data *tps68470_gpio = gpiochip_get_data(gc);
struct regmap *regmap = tps68470_gpio->tps68470_regmap;
@@ -82,7 +82,8 @@ static void tps68470_gpio_set(struct gpio_chip *gc, unsigned int offset,
offset -= TPS68470_N_REGULAR_GPIO;
}
- regmap_update_bits(regmap, reg, BIT(offset), value ? BIT(offset) : 0);
+ return regmap_update_bits(regmap, reg, BIT(offset),
+ value ? BIT(offset) : 0);
}
static int tps68470_gpio_output(struct gpio_chip *gc, unsigned int offset,
@@ -90,9 +91,12 @@ static int tps68470_gpio_output(struct gpio_chip *gc, unsigned int offset,
{
struct tps68470_gpio_data *tps68470_gpio = gpiochip_get_data(gc);
struct regmap *regmap = tps68470_gpio->tps68470_regmap;
+ int ret;
/* Set the initial value */
- tps68470_gpio_set(gc, offset, value);
+ ret = tps68470_gpio_set(gc, offset, value);
+ if (ret)
+ return ret;
/* rest are always outputs */
if (offset >= TPS68470_N_REGULAR_GPIO)
diff --git a/drivers/gpio/gpio-tqmx86.c b/drivers/gpio/gpio-tqmx86.c
index f2e7e8754d95..27dd09273292 100644
--- a/drivers/gpio/gpio-tqmx86.c
+++ b/drivers/gpio/gpio-tqmx86.c
@@ -29,18 +29,22 @@
#define TQMX86_GPIIC 3 /* GPI Interrupt Configuration Register */
#define TQMX86_GPIIS 4 /* GPI Interrupt Status Register */
-#define TQMX86_GPII_NONE 0
-#define TQMX86_GPII_FALLING BIT(0)
-#define TQMX86_GPII_RISING BIT(1)
-/* Stored in irq_type as a trigger type, but not actually valid as a register
- * value, so the name doesn't use "GPII"
+/*
+ * NONE, FALLING and RISING use the same bit patterns that can be programmed to
+ * the GPII register (after passing them to the TQMX86_GPII_ macros to shift
+ * them to the right position)
*/
-#define TQMX86_INT_BOTH (BIT(0) | BIT(1))
-#define TQMX86_GPII_MASK (BIT(0) | BIT(1))
-#define TQMX86_GPII_BITS 2
+#define TQMX86_INT_TRIG_NONE 0
+#define TQMX86_INT_TRIG_FALLING BIT(0)
+#define TQMX86_INT_TRIG_RISING BIT(1)
+#define TQMX86_INT_TRIG_BOTH (BIT(0) | BIT(1))
+#define TQMX86_INT_TRIG_MASK (BIT(0) | BIT(1))
/* Stored in irq_type with GPII bits */
#define TQMX86_INT_UNMASKED BIT(2)
+#define TQMX86_GPIIC_CONFIG(i, v) ((v) << (2 * (i)))
+#define TQMX86_GPIIC_MASK(i) TQMX86_GPIIC_CONFIG(i, TQMX86_INT_TRIG_MASK)
+
struct tqmx86_gpio_data {
struct gpio_chip chip;
void __iomem *io_base;
@@ -48,7 +52,7 @@ struct tqmx86_gpio_data {
/* Lock must be held for accessing output and irq_type fields */
raw_spinlock_t spinlock;
DECLARE_BITMAP(output, TQMX86_NGPIO);
- u8 irq_type[TQMX86_NGPI];
+ u8 irq_type[TQMX86_NGPIO];
};
static u8 tqmx86_gpio_read(struct tqmx86_gpio_data *gd, unsigned int reg)
@@ -62,6 +66,18 @@ static void tqmx86_gpio_write(struct tqmx86_gpio_data *gd, u8 val,
iowrite8(val, gd->io_base + reg);
}
+static void tqmx86_gpio_clrsetbits(struct tqmx86_gpio_data *gpio,
+ u8 clr, u8 set, unsigned int reg)
+ __must_hold(&gpio->spinlock)
+{
+ u8 val = tqmx86_gpio_read(gpio, reg);
+
+ val &= ~clr;
+ val |= set;
+
+ tqmx86_gpio_write(gpio, val, reg);
+}
+
static int tqmx86_gpio_get(struct gpio_chip *chip, unsigned int offset)
{
struct tqmx86_gpio_data *gpio = gpiochip_get_data(chip);
@@ -69,127 +85,139 @@ static int tqmx86_gpio_get(struct gpio_chip *chip, unsigned int offset)
return !!(tqmx86_gpio_read(gpio, TQMX86_GPIOD) & BIT(offset));
}
-static void tqmx86_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
+static void _tqmx86_gpio_set(struct tqmx86_gpio_data *gpio, unsigned int offset,
+ int value)
+ __must_hold(&gpio->spinlock)
{
- struct tqmx86_gpio_data *gpio = gpiochip_get_data(chip);
- unsigned long flags;
-
- raw_spin_lock_irqsave(&gpio->spinlock, flags);
__assign_bit(offset, gpio->output, value);
tqmx86_gpio_write(gpio, bitmap_get_value8(gpio->output, 0), TQMX86_GPIOD);
- raw_spin_unlock_irqrestore(&gpio->spinlock, flags);
+}
+
+static int tqmx86_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ struct tqmx86_gpio_data *gpio = gpiochip_get_data(chip);
+
+ guard(raw_spinlock_irqsave)(&gpio->spinlock);
+
+ _tqmx86_gpio_set(gpio, offset, value);
+
+ return 0;
}
static int tqmx86_gpio_direction_input(struct gpio_chip *chip,
unsigned int offset)
{
- /* Direction cannot be changed. Validate is an input. */
- if (BIT(offset) & TQMX86_DIR_INPUT_MASK)
- return 0;
- else
- return -EINVAL;
+ struct tqmx86_gpio_data *gpio = gpiochip_get_data(chip);
+
+ guard(raw_spinlock_irqsave)(&gpio->spinlock);
+
+ tqmx86_gpio_clrsetbits(gpio, BIT(offset), 0, TQMX86_GPIODD);
+
+ return 0;
}
static int tqmx86_gpio_direction_output(struct gpio_chip *chip,
unsigned int offset,
int value)
{
- /* Direction cannot be changed, validate is an output */
- if (BIT(offset) & TQMX86_DIR_INPUT_MASK)
- return -EINVAL;
+ struct tqmx86_gpio_data *gpio = gpiochip_get_data(chip);
+
+ guard(raw_spinlock_irqsave)(&gpio->spinlock);
+
+ _tqmx86_gpio_set(gpio, offset, value);
+ tqmx86_gpio_clrsetbits(gpio, 0, BIT(offset), TQMX86_GPIODD);
- tqmx86_gpio_set(chip, offset, value);
return 0;
}
static int tqmx86_gpio_get_direction(struct gpio_chip *chip,
unsigned int offset)
{
- if (TQMX86_DIR_INPUT_MASK & BIT(offset))
- return GPIO_LINE_DIRECTION_IN;
+ struct tqmx86_gpio_data *gpio = gpiochip_get_data(chip);
+ u8 val;
+
+ val = tqmx86_gpio_read(gpio, TQMX86_GPIODD);
+
+ if (val & BIT(offset))
+ return GPIO_LINE_DIRECTION_OUT;
- return GPIO_LINE_DIRECTION_OUT;
+ return GPIO_LINE_DIRECTION_IN;
}
-static void tqmx86_gpio_irq_config(struct tqmx86_gpio_data *gpio, int offset)
+static void tqmx86_gpio_irq_config(struct tqmx86_gpio_data *gpio, int hwirq)
__must_hold(&gpio->spinlock)
{
- u8 type = TQMX86_GPII_NONE, gpiic;
+ u8 type = TQMX86_INT_TRIG_NONE;
+ int gpiic_irq = hwirq - TQMX86_NGPO;
- if (gpio->irq_type[offset] & TQMX86_INT_UNMASKED) {
- type = gpio->irq_type[offset] & TQMX86_GPII_MASK;
+ if (gpio->irq_type[hwirq] & TQMX86_INT_UNMASKED) {
+ type = gpio->irq_type[hwirq] & TQMX86_INT_TRIG_MASK;
- if (type == TQMX86_INT_BOTH)
- type = tqmx86_gpio_get(&gpio->chip, offset + TQMX86_NGPO)
- ? TQMX86_GPII_FALLING
- : TQMX86_GPII_RISING;
+ if (type == TQMX86_INT_TRIG_BOTH)
+ type = tqmx86_gpio_get(&gpio->chip, hwirq)
+ ? TQMX86_INT_TRIG_FALLING
+ : TQMX86_INT_TRIG_RISING;
}
- gpiic = tqmx86_gpio_read(gpio, TQMX86_GPIIC);
- gpiic &= ~(TQMX86_GPII_MASK << (offset * TQMX86_GPII_BITS));
- gpiic |= type << (offset * TQMX86_GPII_BITS);
- tqmx86_gpio_write(gpio, gpiic, TQMX86_GPIIC);
+ tqmx86_gpio_clrsetbits(gpio,
+ TQMX86_GPIIC_MASK(gpiic_irq),
+ TQMX86_GPIIC_CONFIG(gpiic_irq, type),
+ TQMX86_GPIIC);
}
static void tqmx86_gpio_irq_mask(struct irq_data *data)
{
- unsigned int offset = (data->hwirq - TQMX86_NGPO);
struct tqmx86_gpio_data *gpio = gpiochip_get_data(
irq_data_get_irq_chip_data(data));
- unsigned long flags;
- raw_spin_lock_irqsave(&gpio->spinlock, flags);
- gpio->irq_type[offset] &= ~TQMX86_INT_UNMASKED;
- tqmx86_gpio_irq_config(gpio, offset);
- raw_spin_unlock_irqrestore(&gpio->spinlock, flags);
+ scoped_guard(raw_spinlock_irqsave, &gpio->spinlock) {
+ gpio->irq_type[data->hwirq] &= ~TQMX86_INT_UNMASKED;
+ tqmx86_gpio_irq_config(gpio, data->hwirq);
+ }
gpiochip_disable_irq(&gpio->chip, irqd_to_hwirq(data));
}
static void tqmx86_gpio_irq_unmask(struct irq_data *data)
{
- unsigned int offset = (data->hwirq - TQMX86_NGPO);
struct tqmx86_gpio_data *gpio = gpiochip_get_data(
irq_data_get_irq_chip_data(data));
- unsigned long flags;
gpiochip_enable_irq(&gpio->chip, irqd_to_hwirq(data));
- raw_spin_lock_irqsave(&gpio->spinlock, flags);
- gpio->irq_type[offset] |= TQMX86_INT_UNMASKED;
- tqmx86_gpio_irq_config(gpio, offset);
- raw_spin_unlock_irqrestore(&gpio->spinlock, flags);
+ guard(raw_spinlock_irqsave)(&gpio->spinlock);
+
+ gpio->irq_type[data->hwirq] |= TQMX86_INT_UNMASKED;
+ tqmx86_gpio_irq_config(gpio, data->hwirq);
}
static int tqmx86_gpio_irq_set_type(struct irq_data *data, unsigned int type)
{
struct tqmx86_gpio_data *gpio = gpiochip_get_data(
irq_data_get_irq_chip_data(data));
- unsigned int offset = (data->hwirq - TQMX86_NGPO);
unsigned int edge_type = type & IRQF_TRIGGER_MASK;
- unsigned long flags;
u8 new_type;
switch (edge_type) {
case IRQ_TYPE_EDGE_RISING:
- new_type = TQMX86_GPII_RISING;
+ new_type = TQMX86_INT_TRIG_RISING;
break;
case IRQ_TYPE_EDGE_FALLING:
- new_type = TQMX86_GPII_FALLING;
+ new_type = TQMX86_INT_TRIG_FALLING;
break;
case IRQ_TYPE_EDGE_BOTH:
- new_type = TQMX86_INT_BOTH;
+ new_type = TQMX86_INT_TRIG_BOTH;
break;
default:
return -EINVAL; /* not supported */
}
- raw_spin_lock_irqsave(&gpio->spinlock, flags);
- gpio->irq_type[offset] &= ~TQMX86_GPII_MASK;
- gpio->irq_type[offset] |= new_type;
- tqmx86_gpio_irq_config(gpio, offset);
- raw_spin_unlock_irqrestore(&gpio->spinlock, flags);
+ guard(raw_spinlock_irqsave)(&gpio->spinlock);
+
+ gpio->irq_type[data->hwirq] &= ~TQMX86_INT_TRIG_MASK;
+ gpio->irq_type[data->hwirq] |= new_type;
+ tqmx86_gpio_irq_config(gpio, data->hwirq);
return 0;
}
@@ -199,8 +227,8 @@ static void tqmx86_gpio_irq_handler(struct irq_desc *desc)
struct gpio_chip *chip = irq_desc_get_handler_data(desc);
struct tqmx86_gpio_data *gpio = gpiochip_get_data(chip);
struct irq_chip *irq_chip = irq_desc_get_chip(desc);
- unsigned long irq_bits, flags;
- int i;
+ unsigned long irq_bits;
+ int i, hwirq;
u8 irq_status;
chained_irq_enter(irq_chip, desc);
@@ -210,32 +238,38 @@ static void tqmx86_gpio_irq_handler(struct irq_desc *desc)
irq_bits = irq_status;
- raw_spin_lock_irqsave(&gpio->spinlock, flags);
- for_each_set_bit(i, &irq_bits, TQMX86_NGPI) {
- /*
- * Edge-both triggers are implemented by flipping the edge
- * trigger after each interrupt, as the controller only supports
- * either rising or falling edge triggers, but not both.
- *
- * Internally, the TQMx86 GPIO controller has separate status
- * registers for rising and falling edge interrupts. GPIIC
- * configures which bits from which register are visible in the
- * interrupt status register GPIIS and defines what triggers the
- * parent IRQ line. Writing to GPIIS always clears both rising
- * and falling interrupt flags internally, regardless of the
- * currently configured trigger.
- *
- * In consequence, we can cleanly implement the edge-both
- * trigger in software by first clearing the interrupt and then
- * setting the new trigger based on the current GPIO input in
- * tqmx86_gpio_irq_config() - even if an edge arrives between
- * reading the input and setting the trigger, we will have a new
- * interrupt pending.
- */
- if ((gpio->irq_type[i] & TQMX86_GPII_MASK) == TQMX86_INT_BOTH)
- tqmx86_gpio_irq_config(gpio, i);
+ scoped_guard(raw_spinlock_irqsave, &gpio->spinlock) {
+ for_each_set_bit(i, &irq_bits, TQMX86_NGPI) {
+ hwirq = i + TQMX86_NGPO;
+
+ /*
+ * Edge-both triggers are implemented by flipping the
+ * edge trigger after each interrupt, as the controller
+ * only supports either rising or falling edge triggers,
+ * but not both.
+ *
+ * Internally, the TQMx86 GPIO controller has separate
+ * status registers for rising and falling edge
+ * interrupts. GPIIC configures which bits from which
+ * register are visible in the interrupt status register
+ * GPIIS and defines what triggers the parent IRQ line.
+ * Writing to GPIIS always clears both rising and
+ * falling interrupt flags internally, regardless of the
+ * currently configured trigger.
+ *
+ * In consequence, we can cleanly implement the
+ * edge-both trigger in software by first clearing the
+ * interrupt and then setting the new trigger based on
+ * the current GPIO input in tqmx86_gpio_irq_config() -
+ * even if an edge arrives between reading the input and
+ * setting the trigger, we will have a new interrupt
+ * pending.
+ */
+ if ((gpio->irq_type[hwirq] & TQMX86_INT_TRIG_MASK) ==
+ TQMX86_INT_TRIG_BOTH)
+ tqmx86_gpio_irq_config(gpio, hwirq);
+ }
}
- raw_spin_unlock_irqrestore(&gpio->spinlock, flags);
for_each_set_bit(i, &irq_bits, TQMX86_NGPI)
generic_handle_domain_irq(gpio->chip.irq.domain,
@@ -275,7 +309,7 @@ static void tqmx86_gpio_irq_print_chip(struct irq_data *d, struct seq_file *p)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- seq_printf(p, gc->label);
+ seq_puts(p, gc->label);
}
static const struct irq_chip tqmx86_gpio_irq_chip = {
diff --git a/drivers/gpio/gpio-ts4800.c b/drivers/gpio/gpio-ts4800.c
index 4748e3d47106..992ee231db9f 100644
--- a/drivers/gpio/gpio-ts4800.c
+++ b/drivers/gpio/gpio-ts4800.c
@@ -6,9 +6,10 @@
*/
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#define DEFAULT_PIN_NUMBER 16
#define INPUT_REG_OFFSET 0x00
@@ -17,13 +18,14 @@
static int ts4800_gpio_probe(struct platform_device *pdev)
{
- struct device_node *node;
- struct gpio_chip *chip;
+ struct gpio_generic_chip_config config;
+ struct device *dev = &pdev->dev;
+ struct gpio_generic_chip *chip;
void __iomem *base_addr;
int retval;
u32 ngpios;
- chip = devm_kzalloc(&pdev->dev, sizeof(struct gpio_chip), GFP_KERNEL);
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
@@ -31,29 +33,28 @@ static int ts4800_gpio_probe(struct platform_device *pdev)
if (IS_ERR(base_addr))
return PTR_ERR(base_addr);
- node = pdev->dev.of_node;
- if (!node)
- return -EINVAL;
-
- retval = of_property_read_u32(node, "ngpios", &ngpios);
+ retval = device_property_read_u32(dev, "ngpios", &ngpios);
if (retval == -EINVAL)
ngpios = DEFAULT_PIN_NUMBER;
else if (retval)
return retval;
- retval = bgpio_init(chip, &pdev->dev, 2, base_addr + INPUT_REG_OFFSET,
- base_addr + OUTPUT_REG_OFFSET, NULL,
- base_addr + DIRECTION_REG_OFFSET, NULL, 0);
- if (retval) {
- dev_err(&pdev->dev, "bgpio_init failed\n");
- return retval;
- }
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 2,
+ .dat = base_addr + INPUT_REG_OFFSET,
+ .set = base_addr + OUTPUT_REG_OFFSET,
+ .dirout = base_addr + DIRECTION_REG_OFFSET,
+ };
- chip->ngpio = ngpios;
+ retval = gpio_generic_chip_init(chip, &config);
+ if (retval)
+ return dev_err_probe(dev, retval,
+ "failed to initialize the generic GPIO chip\n");
- platform_set_drvdata(pdev, chip);
+ chip->gc.ngpio = ngpios;
- return devm_gpiochip_add_data(&pdev->dev, chip, NULL);
+ return devm_gpiochip_add_data(dev, &chip->gc, NULL);
}
static const struct of_device_id ts4800_gpio_of_match[] = {
diff --git a/drivers/gpio/gpio-ts4900.c b/drivers/gpio/gpio-ts4900.c
index 0f6397b77c9d..d9ee8fc77ccd 100644
--- a/drivers/gpio/gpio-ts4900.c
+++ b/drivers/gpio/gpio-ts4900.c
@@ -8,8 +8,8 @@
#include <linux/gpio/driver.h>
#include <linux/i2c.h>
-#include <linux/of.h>
#include <linux/module.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#define DEFAULT_PIN_NUMBER 32
@@ -95,16 +95,16 @@ static int ts4900_gpio_get(struct gpio_chip *chip, unsigned int offset)
return !!(reg & priv->input_bit);
}
-static void ts4900_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
+static int ts4900_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct ts4900_gpio_priv *priv = gpiochip_get_data(chip);
if (value)
- regmap_update_bits(priv->regmap, offset, TS4900_GPIO_OUT,
- TS4900_GPIO_OUT);
- else
- regmap_update_bits(priv->regmap, offset, TS4900_GPIO_OUT, 0);
+ return regmap_update_bits(priv->regmap, offset,
+ TS4900_GPIO_OUT, TS4900_GPIO_OUT);
+
+ return regmap_update_bits(priv->regmap, offset, TS4900_GPIO_OUT, 0);
}
static const struct regmap_config ts4900_regmap_config = {
@@ -142,7 +142,7 @@ static int ts4900_gpio_probe(struct i2c_client *client)
u32 ngpio;
int ret;
- if (of_property_read_u32(client->dev.of_node, "ngpios", &ngpio))
+ if (device_property_read_u32(&client->dev, "ngpios", &ngpio))
ngpio = DEFAULT_PIN_NUMBER;
priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL);
@@ -153,7 +153,7 @@ static int ts4900_gpio_probe(struct i2c_client *client)
priv->gpio_chip.label = "ts4900-gpio";
priv->gpio_chip.ngpio = ngpio;
priv->gpio_chip.parent = &client->dev;
- priv->input_bit = (uintptr_t)of_device_get_match_data(&client->dev);
+ priv->input_bit = (uintptr_t)device_get_match_data(&client->dev);
priv->regmap = devm_regmap_init_i2c(client, &ts4900_regmap_config);
if (IS_ERR(priv->regmap)) {
diff --git a/drivers/gpio/gpio-ts5500.c b/drivers/gpio/gpio-ts5500.c
index 90f8e9e9915e..3c7f2efe10fd 100644
--- a/drivers/gpio/gpio-ts5500.c
+++ b/drivers/gpio/gpio-ts5500.c
@@ -244,7 +244,7 @@ static int ts5500_gpio_output(struct gpio_chip *chip, unsigned offset, int val)
return 0;
}
-static void ts5500_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
+static int ts5500_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
{
struct ts5500_priv *priv = gpiochip_get_data(chip);
const struct ts5500_dio line = priv->pinout[offset];
@@ -256,6 +256,8 @@ static void ts5500_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
else
ts5500_clear_mask(line.value_mask, line.value_addr);
spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
}
static int ts5500_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
@@ -433,7 +435,7 @@ static struct platform_driver ts5500_dio_driver = {
.name = "ts5500-dio",
},
.probe = ts5500_dio_probe,
- .remove_new = ts5500_dio_remove,
+ .remove = ts5500_dio_remove,
.id_table = ts5500_dio_ids,
};
diff --git a/drivers/gpio/gpio-twl4030.c b/drivers/gpio/gpio-twl4030.c
index bcd692229c7c..a851702befde 100644
--- a/drivers/gpio/gpio-twl4030.c
+++ b/drivers/gpio/gpio-twl4030.c
@@ -120,7 +120,7 @@ static u8 cached_leden;
* external pullup is needed. We could also expose the integrated PWM
* as a LED brightness control; we initialize it as "always on".
*/
-static void twl4030_led_set_value(int led, int value)
+static int twl4030_led_set_value(int led, int value)
{
u8 mask = LEDEN_LEDAON | LEDEN_LEDAPWM;
@@ -132,8 +132,8 @@ static void twl4030_led_set_value(int led, int value)
else
cached_leden |= mask;
- WARN_ON_ONCE(twl_i2c_write_u8(TWL4030_MODULE_LED, cached_leden,
- TWL4030_LED_LEDEN_REG));
+ return twl_i2c_write_u8(TWL4030_MODULE_LED, cached_leden,
+ TWL4030_LED_LEDEN_REG);
}
static int twl4030_set_gpio_direction(int gpio, int is_input)
@@ -278,7 +278,7 @@ static void twl_free(struct gpio_chip *chip, unsigned offset)
mutex_lock(&priv->mutex);
if (offset >= TWL4030_GPIO_MAX) {
- twl4030_led_set_value(offset - TWL4030_GPIO_MAX, 1);
+ WARN_ON_ONCE(twl4030_led_set_value(offset - TWL4030_GPIO_MAX, 1));
goto out;
}
@@ -334,15 +334,16 @@ out:
return ret;
}
-static void twl_set(struct gpio_chip *chip, unsigned offset, int value)
+static int twl_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct gpio_twl4030_priv *priv = gpiochip_get_data(chip);
+ int ret;
mutex_lock(&priv->mutex);
if (offset < TWL4030_GPIO_MAX)
- twl4030_set_gpio_dataout(offset, value);
+ ret = twl4030_set_gpio_dataout(offset, value);
else
- twl4030_led_set_value(offset - TWL4030_GPIO_MAX, value);
+ ret = twl4030_led_set_value(offset - TWL4030_GPIO_MAX, value);
if (value)
priv->out_state |= BIT(offset);
@@ -350,6 +351,8 @@ static void twl_set(struct gpio_chip *chip, unsigned offset, int value)
priv->out_state &= ~BIT(offset);
mutex_unlock(&priv->mutex);
+
+ return ret;
}
static int twl_direction_out(struct gpio_chip *chip, unsigned offset, int value)
@@ -373,9 +376,7 @@ static int twl_direction_out(struct gpio_chip *chip, unsigned offset, int value)
priv->direction |= BIT(offset);
mutex_unlock(&priv->mutex);
- twl_set(chip, offset, value);
-
- return ret;
+ return twl_set(chip, offset, value);
}
static int twl_get_direction(struct gpio_chip *chip, unsigned offset)
@@ -502,7 +503,6 @@ static void gpio_twl4030_power_off_action(void *data)
static int gpio_twl4030_probe(struct platform_device *pdev)
{
struct twl4030_gpio_platform_data *pdata;
- struct device_node *node = pdev->dev.of_node;
struct gpio_twl4030_priv *priv;
int ret, irq_base;
@@ -524,8 +524,8 @@ static int gpio_twl4030_probe(struct platform_device *pdev)
return irq_base;
}
- irq_domain_add_legacy(node, TWL4030_GPIO_MAX, irq_base, 0,
- &irq_domain_simple_ops, NULL);
+ irq_domain_create_legacy(dev_fwnode(&pdev->dev), TWL4030_GPIO_MAX, irq_base, 0,
+ &irq_domain_simple_ops, NULL);
ret = twl4030_sih_setup(&pdev->dev, TWL4030_MODULE_GPIO, irq_base);
if (ret < 0)
@@ -597,9 +597,7 @@ no_irqs:
ret = devm_add_action_or_reset(&pdev->dev, gpio_twl4030_power_off_action, d);
if (ret)
- return dev_err_probe(&pdev->dev, ret,
- "failed to install power off handler\n");
-
+ return ret;
}
return 0;
diff --git a/drivers/gpio/gpio-twl6040.c b/drivers/gpio/gpio-twl6040.c
index 6c3fbf382dba..4ec9bcd40439 100644
--- a/drivers/gpio/gpio-twl6040.c
+++ b/drivers/gpio/gpio-twl6040.c
@@ -22,7 +22,7 @@
static int twl6040gpo_get(struct gpio_chip *chip, unsigned offset)
{
- struct twl6040 *twl6040 = dev_get_drvdata(chip->parent->parent);
+ struct twl6040 *twl6040 = gpiochip_get_data(chip);
int ret = 0;
ret = twl6040_reg_read(twl6040, TWL6040_REG_GPOCTL);
@@ -37,29 +37,30 @@ static int twl6040gpo_get_direction(struct gpio_chip *chip, unsigned offset)
return GPIO_LINE_DIRECTION_OUT;
}
-static int twl6040gpo_direction_out(struct gpio_chip *chip, unsigned offset,
- int value)
-{
- /* This only drives GPOs, and can't change direction */
- return 0;
-}
-
-static void twl6040gpo_set(struct gpio_chip *chip, unsigned offset, int value)
+static int twl6040gpo_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
- struct twl6040 *twl6040 = dev_get_drvdata(chip->parent->parent);
+ struct twl6040 *twl6040 = gpiochip_get_data(chip);
int ret;
u8 gpoctl;
ret = twl6040_reg_read(twl6040, TWL6040_REG_GPOCTL);
if (ret < 0)
- return;
+ return ret;
if (value)
gpoctl = ret | BIT(offset);
else
gpoctl = ret & ~BIT(offset);
- twl6040_reg_write(twl6040, TWL6040_REG_GPOCTL, gpoctl);
+ return twl6040_reg_write(twl6040, TWL6040_REG_GPOCTL, gpoctl);
+}
+
+static int twl6040gpo_direction_out(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ /* This only drives GPOs, and can't change direction */
+ return twl6040gpo_set(chip, offset, value);
}
static struct gpio_chip twl6040gpo_chip = {
@@ -91,7 +92,7 @@ static int gpo_twl6040_probe(struct platform_device *pdev)
twl6040gpo_chip.parent = &pdev->dev;
- ret = devm_gpiochip_add_data(&pdev->dev, &twl6040gpo_chip, NULL);
+ ret = devm_gpiochip_add_data(&pdev->dev, &twl6040gpo_chip, twl6040);
if (ret < 0) {
dev_err(&pdev->dev, "could not register gpiochip, %d\n", ret);
twl6040gpo_chip.ngpio = 0;
diff --git a/drivers/gpio/gpio-uniphier.c b/drivers/gpio/gpio-uniphier.c
index da99ba13e82d..197bb1d22b3c 100644
--- a/drivers/gpio/gpio-uniphier.c
+++ b/drivers/gpio/gpio-uniphier.c
@@ -138,14 +138,16 @@ static int uniphier_gpio_get(struct gpio_chip *chip, unsigned int offset)
return uniphier_gpio_offset_read(chip, offset, UNIPHIER_GPIO_PORT_DATA);
}
-static void uniphier_gpio_set(struct gpio_chip *chip,
- unsigned int offset, int val)
+static int uniphier_gpio_set(struct gpio_chip *chip,
+ unsigned int offset, int val)
{
uniphier_gpio_offset_write(chip, offset, UNIPHIER_GPIO_PORT_DATA, val);
+
+ return 0;
}
-static void uniphier_gpio_set_multiple(struct gpio_chip *chip,
- unsigned long *mask, unsigned long *bits)
+static int uniphier_gpio_set_multiple(struct gpio_chip *chip,
+ unsigned long *mask, unsigned long *bits)
{
unsigned long i, bank, bank_mask, bank_bits;
@@ -156,6 +158,8 @@ static void uniphier_gpio_set_multiple(struct gpio_chip *chip,
uniphier_gpio_bank_write(chip, bank, UNIPHIER_GPIO_PORT_DATA,
bank_mask, bank_bits);
}
+
+ return 0;
}
static int uniphier_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
@@ -481,7 +485,7 @@ MODULE_DEVICE_TABLE(of, uniphier_gpio_match);
static struct platform_driver uniphier_gpio_driver = {
.probe = uniphier_gpio_probe,
- .remove_new = uniphier_gpio_remove,
+ .remove = uniphier_gpio_remove,
.driver = {
.name = "uniphier-gpio",
.of_match_table = uniphier_gpio_match,
diff --git a/drivers/gpio/gpio-usbio.c b/drivers/gpio/gpio-usbio.c
new file mode 100644
index 000000000000..34d42c743d5b
--- /dev/null
+++ b/drivers/gpio/gpio-usbio.c
@@ -0,0 +1,248 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2025 Intel Corporation.
+ * Copyright (c) 2025 Red Hat, Inc.
+ */
+
+#include <linux/acpi.h>
+#include <linux/auxiliary_bus.h>
+#include <linux/cleanup.h>
+#include <linux/device.h>
+#include <linux/gpio/driver.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/usb/usbio.h>
+
+struct usbio_gpio_bank {
+ u8 config[USBIO_GPIOSPERBANK];
+ u32 bitmap;
+};
+
+struct usbio_gpio {
+ struct mutex config_mutex; /* Protects banks[x].config */
+ struct usbio_gpio_bank banks[USBIO_MAX_GPIOBANKS];
+ struct gpio_chip gc;
+ struct auxiliary_device *adev;
+};
+
+static const struct acpi_device_id usbio_gpio_acpi_hids[] = {
+ { "INTC1007" }, /* MTL */
+ { "INTC10B2" }, /* ARL */
+ { "INTC10B5" }, /* LNL */
+ { "INTC10D1" }, /* MTL-CVF */
+ { "INTC10E2" }, /* PTL */
+ { }
+};
+
+static void usbio_gpio_get_bank_and_pin(struct gpio_chip *gc, unsigned int offset,
+ struct usbio_gpio_bank **bank_ret,
+ unsigned int *pin_ret)
+{
+ struct usbio_gpio *gpio = gpiochip_get_data(gc);
+ struct device *dev = &gpio->adev->dev;
+ struct usbio_gpio_bank *bank;
+ unsigned int pin;
+
+ bank = &gpio->banks[offset / USBIO_GPIOSPERBANK];
+ pin = offset % USBIO_GPIOSPERBANK;
+ if (~bank->bitmap & BIT(pin)) {
+ /* The FW bitmap sometimes is invalid, warn and continue */
+ dev_warn_once(dev, FW_BUG "GPIO %u is not in FW pins bitmap\n", offset);
+ }
+
+ *bank_ret = bank;
+ *pin_ret = pin;
+}
+
+static int usbio_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
+{
+ struct usbio_gpio_bank *bank;
+ unsigned int pin;
+ u8 cfg;
+
+ usbio_gpio_get_bank_and_pin(gc, offset, &bank, &pin);
+
+ cfg = bank->config[pin] & USBIO_GPIO_PINMOD_MASK;
+
+ return (cfg == USBIO_GPIO_PINMOD_OUTPUT) ?
+ GPIO_LINE_DIRECTION_OUT : GPIO_LINE_DIRECTION_IN;
+}
+
+static int usbio_gpio_get(struct gpio_chip *gc, unsigned int offset)
+{
+ struct usbio_gpio *gpio = gpiochip_get_data(gc);
+ struct usbio_gpio_bank *bank;
+ struct usbio_gpio_rw gbuf;
+ unsigned int pin;
+ int ret;
+
+ usbio_gpio_get_bank_and_pin(gc, offset, &bank, &pin);
+
+ gbuf.bankid = offset / USBIO_GPIOSPERBANK;
+ gbuf.pincount = 1;
+ gbuf.pin = pin;
+
+ ret = usbio_control_msg(gpio->adev, USBIO_PKTTYPE_GPIO, USBIO_GPIOCMD_READ,
+ &gbuf, sizeof(gbuf) - sizeof(gbuf.value),
+ &gbuf, sizeof(gbuf));
+ if (ret != sizeof(gbuf))
+ return (ret < 0) ? ret : -EPROTO;
+
+ return (le32_to_cpu(gbuf.value) >> pin) & 1;
+}
+
+static int usbio_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
+{
+ struct usbio_gpio *gpio = gpiochip_get_data(gc);
+ struct usbio_gpio_bank *bank;
+ struct usbio_gpio_rw gbuf;
+ unsigned int pin;
+
+ usbio_gpio_get_bank_and_pin(gc, offset, &bank, &pin);
+
+ gbuf.bankid = offset / USBIO_GPIOSPERBANK;
+ gbuf.pincount = 1;
+ gbuf.pin = pin;
+ gbuf.value = cpu_to_le32(value << pin);
+
+ return usbio_control_msg(gpio->adev, USBIO_PKTTYPE_GPIO, USBIO_GPIOCMD_WRITE,
+ &gbuf, sizeof(gbuf), NULL, 0);
+}
+
+static int usbio_gpio_update_config(struct gpio_chip *gc, unsigned int offset,
+ u8 mask, u8 value)
+{
+ struct usbio_gpio *gpio = gpiochip_get_data(gc);
+ struct usbio_gpio_bank *bank;
+ struct usbio_gpio_init gbuf;
+ unsigned int pin;
+
+ usbio_gpio_get_bank_and_pin(gc, offset, &bank, &pin);
+
+ guard(mutex)(&gpio->config_mutex);
+
+ bank->config[pin] &= ~mask;
+ bank->config[pin] |= value;
+
+ gbuf.bankid = offset / USBIO_GPIOSPERBANK;
+ gbuf.config = bank->config[pin];
+ gbuf.pincount = 1;
+ gbuf.pin = pin;
+
+ return usbio_control_msg(gpio->adev, USBIO_PKTTYPE_GPIO, USBIO_GPIOCMD_INIT,
+ &gbuf, sizeof(gbuf), NULL, 0);
+}
+
+static int usbio_gpio_direction_input(struct gpio_chip *gc, unsigned int offset)
+{
+ return usbio_gpio_update_config(gc, offset, USBIO_GPIO_PINMOD_MASK,
+ USBIO_GPIO_SET_PINMOD(USBIO_GPIO_PINMOD_INPUT));
+}
+
+static int usbio_gpio_direction_output(struct gpio_chip *gc,
+ unsigned int offset, int value)
+{
+ int ret;
+
+ ret = usbio_gpio_update_config(gc, offset, USBIO_GPIO_PINMOD_MASK,
+ USBIO_GPIO_SET_PINMOD(USBIO_GPIO_PINMOD_OUTPUT));
+ if (ret)
+ return ret;
+
+ return usbio_gpio_set(gc, offset, value);
+}
+
+static int usbio_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
+ unsigned long config)
+{
+ u8 value;
+
+ switch (pinconf_to_config_param(config)) {
+ case PIN_CONFIG_BIAS_PULL_PIN_DEFAULT:
+ value = USBIO_GPIO_SET_PINCFG(USBIO_GPIO_PINCFG_DEFAULT);
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ value = USBIO_GPIO_SET_PINCFG(USBIO_GPIO_PINCFG_PULLUP);
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ value = USBIO_GPIO_SET_PINCFG(USBIO_GPIO_PINCFG_PULLDOWN);
+ break;
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ value = USBIO_GPIO_SET_PINCFG(USBIO_GPIO_PINCFG_PUSHPULL);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return usbio_gpio_update_config(gc, offset, USBIO_GPIO_PINCFG_MASK, value);
+}
+
+static int usbio_gpio_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *adev_id)
+{
+ struct usbio_gpio_bank_desc *bank_desc;
+ struct device *dev = &adev->dev;
+ struct usbio_gpio *gpio;
+ int bank, ret;
+
+ bank_desc = dev_get_platdata(dev);
+ if (!bank_desc)
+ return -EINVAL;
+
+ gpio = devm_kzalloc(dev, sizeof(*gpio), GFP_KERNEL);
+ if (!gpio)
+ return -ENOMEM;
+
+ ret = devm_mutex_init(dev, &gpio->config_mutex);
+ if (ret)
+ return ret;
+
+ gpio->adev = adev;
+
+ usbio_acpi_bind(gpio->adev, usbio_gpio_acpi_hids);
+
+ for (bank = 0; bank < USBIO_MAX_GPIOBANKS && bank_desc[bank].bmap; bank++)
+ gpio->banks[bank].bitmap = le32_to_cpu(bank_desc[bank].bmap);
+
+ gpio->gc.label = ACPI_COMPANION(dev) ?
+ acpi_dev_name(ACPI_COMPANION(dev)) : dev_name(dev);
+ gpio->gc.parent = dev;
+ gpio->gc.owner = THIS_MODULE;
+ gpio->gc.get_direction = usbio_gpio_get_direction;
+ gpio->gc.direction_input = usbio_gpio_direction_input;
+ gpio->gc.direction_output = usbio_gpio_direction_output;
+ gpio->gc.get = usbio_gpio_get;
+ gpio->gc.set = usbio_gpio_set;
+ gpio->gc.set_config = usbio_gpio_set_config;
+ gpio->gc.base = -1;
+ gpio->gc.ngpio = bank * USBIO_GPIOSPERBANK;
+ gpio->gc.can_sleep = true;
+
+ ret = devm_gpiochip_add_data(dev, &gpio->gc, gpio);
+ if (ret)
+ return ret;
+
+ if (has_acpi_companion(dev))
+ acpi_dev_clear_dependencies(ACPI_COMPANION(dev));
+
+ return 0;
+}
+
+static const struct auxiliary_device_id usbio_gpio_id_table[] = {
+ { "usbio.usbio-gpio" },
+ { }
+};
+MODULE_DEVICE_TABLE(auxiliary, usbio_gpio_id_table);
+
+static struct auxiliary_driver usbio_gpio_driver = {
+ .name = USBIO_GPIO_CLIENT,
+ .probe = usbio_gpio_probe,
+ .id_table = usbio_gpio_id_table
+};
+module_auxiliary_driver(usbio_gpio_driver);
+
+MODULE_DESCRIPTION("Intel USBIO GPIO driver");
+MODULE_AUTHOR("Israel Cepeda <israel.a.cepeda.lopez@intel.com>");
+MODULE_AUTHOR("Hans de Goede <hansg@kernel.org>");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("USBIO");
diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
index 27eff741fe9a..aa8586d8a787 100644
--- a/drivers/gpio/gpio-vf610.c
+++ b/drivers/gpio/gpio-vf610.c
@@ -10,15 +10,15 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/irq.h>
-#include <linux/platform_device.h>
-#include <linux/of.h>
-#include <linux/of_irq.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
#define VF610_GPIO_PER_PORT 32
@@ -29,7 +29,7 @@ struct fsl_gpio_soc_data {
};
struct vf610_gpio_port {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
void __iomem *base;
void __iomem *gpio_base;
const struct fsl_gpio_soc_data *sdata;
@@ -94,76 +94,6 @@ static inline u32 vf610_gpio_readl(void __iomem *reg)
return readl_relaxed(reg);
}
-static int vf610_gpio_get(struct gpio_chip *gc, unsigned int gpio)
-{
- struct vf610_gpio_port *port = gpiochip_get_data(gc);
- u32 mask = BIT(gpio);
- unsigned long offset = GPIO_PDIR;
-
- if (port->sdata->have_paddr) {
- mask &= vf610_gpio_readl(port->gpio_base + GPIO_PDDR);
- if (mask)
- offset = GPIO_PDOR;
- }
-
- return !!(vf610_gpio_readl(port->gpio_base + offset) & BIT(gpio));
-}
-
-static void vf610_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
-{
- struct vf610_gpio_port *port = gpiochip_get_data(gc);
- u32 mask = BIT(gpio);
- unsigned long offset = val ? GPIO_PSOR : GPIO_PCOR;
-
- vf610_gpio_writel(mask, port->gpio_base + offset);
-}
-
-static int vf610_gpio_direction_input(struct gpio_chip *chip, unsigned int gpio)
-{
- struct vf610_gpio_port *port = gpiochip_get_data(chip);
- u32 mask = BIT(gpio);
- u32 val;
-
- if (port->sdata->have_paddr) {
- val = vf610_gpio_readl(port->gpio_base + GPIO_PDDR);
- val &= ~mask;
- vf610_gpio_writel(val, port->gpio_base + GPIO_PDDR);
- }
-
- return pinctrl_gpio_direction_input(chip, gpio);
-}
-
-static int vf610_gpio_direction_output(struct gpio_chip *chip, unsigned int gpio,
- int value)
-{
- struct vf610_gpio_port *port = gpiochip_get_data(chip);
- u32 mask = BIT(gpio);
- u32 val;
-
- vf610_gpio_set(chip, gpio, value);
-
- if (port->sdata->have_paddr) {
- val = vf610_gpio_readl(port->gpio_base + GPIO_PDDR);
- val |= mask;
- vf610_gpio_writel(val, port->gpio_base + GPIO_PDDR);
- }
-
- return pinctrl_gpio_direction_output(chip, gpio);
-}
-
-static int vf610_gpio_get_direction(struct gpio_chip *gc, unsigned int gpio)
-{
- struct vf610_gpio_port *port = gpiochip_get_data(gc);
- u32 mask = BIT(gpio);
-
- mask &= vf610_gpio_readl(port->gpio_base + GPIO_PDDR);
-
- if (mask)
- return GPIO_LINE_DIRECTION_OUT;
-
- return GPIO_LINE_DIRECTION_IN;
-}
-
static void vf610_gpio_irq_handler(struct irq_desc *desc)
{
struct vf610_gpio_port *port =
@@ -179,7 +109,7 @@ static void vf610_gpio_irq_handler(struct irq_desc *desc)
for_each_set_bit(pin, &irq_isfr, VF610_GPIO_PER_PORT) {
vf610_gpio_writel(BIT(pin), port->base + PORT_ISFR);
- generic_handle_domain_irq(port->gc.irq.domain, pin);
+ generic_handle_domain_irq(port->chip.gc.irq.domain, pin);
}
chained_irq_exit(chip, desc);
@@ -285,10 +215,12 @@ static void vf610_gpio_disable_clk(void *data)
static int vf610_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct device *dev = &pdev->dev;
struct vf610_gpio_port *port;
struct gpio_chip *gc;
struct gpio_irq_chip *girq;
+ unsigned long flags;
int i;
int ret;
bool dual_base;
@@ -297,7 +229,7 @@ static int vf610_gpio_probe(struct platform_device *pdev)
if (!port)
return -ENOMEM;
- port->sdata = of_device_get_match_data(dev);
+ port->sdata = device_get_match_data(dev);
dual_base = port->sdata->have_dual_base;
@@ -363,24 +295,31 @@ static int vf610_gpio_probe(struct platform_device *pdev)
return ret;
}
- gc = &port->gc;
- gc->parent = dev;
- gc->label = dev_name(dev);
- gc->ngpio = VF610_GPIO_PER_PORT;
- gc->base = -1;
-
- gc->request = gpiochip_generic_request;
- gc->free = gpiochip_generic_free;
- gc->direction_input = vf610_gpio_direction_input;
- gc->get = vf610_gpio_get;
- gc->direction_output = vf610_gpio_direction_output;
- gc->set = vf610_gpio_set;
+ gc = &port->chip.gc;
+ flags = GPIO_GENERIC_PINCTRL_BACKEND;
/*
- * only IP has Port Data Direction Register(PDDR) can
- * support get direction
+ * We only read the output register for current value on output
+ * lines if the direction register is available so we can switch
+ * direction.
*/
if (port->sdata->have_paddr)
- gc->get_direction = vf610_gpio_get_direction;
+ flags |= GPIO_GENERIC_READ_OUTPUT_REG_SET;
+
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = port->gpio_base + GPIO_PDIR,
+ .set = port->gpio_base + GPIO_PDOR,
+ .dirout = port->sdata->have_paddr ?
+ port->gpio_base + GPIO_PDDR : NULL,
+ .flags = flags,
+ };
+
+ ret = gpio_generic_chip_init(&port->chip, &config);
+ if (ret)
+ return dev_err_probe(dev, ret, "unable to init generic GPIO\n");
+ gc->label = dev_name(dev);
+ gc->base = -1;
/* Mask all GPIO interrupts */
for (i = 0; i < gc->ngpio; i++)
@@ -413,4 +352,6 @@ static struct platform_driver vf610_gpio_driver = {
.probe = vf610_gpio_probe,
};
-builtin_platform_driver(vf610_gpio_driver);
+module_platform_driver(vf610_gpio_driver);
+MODULE_DESCRIPTION("VF610 GPIO driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-viperboard.c b/drivers/gpio/gpio-viperboard.c
index e55d28a8a66f..15e495c109d2 100644
--- a/drivers/gpio/gpio-viperboard.c
+++ b/drivers/gpio/gpio-viperboard.c
@@ -128,45 +128,50 @@ static int vprbrd_gpioa_get(struct gpio_chip *chip,
return answer;
}
-static void vprbrd_gpioa_set(struct gpio_chip *chip,
- unsigned int offset, int value)
+static int vprbrd_gpioa_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
- int ret;
+ int ret = 0;
struct vprbrd_gpio *gpio = gpiochip_get_data(chip);
struct vprbrd *vb = gpio->vb;
struct vprbrd_gpioa_msg *gamsg = (struct vprbrd_gpioa_msg *)vb->buf;
- if (gpio->gpioa_out & (1 << offset)) {
- if (value)
- gpio->gpioa_val |= (1 << offset);
- else
- gpio->gpioa_val &= ~(1 << offset);
-
- mutex_lock(&vb->lock);
-
- gamsg->cmd = VPRBRD_GPIOA_CMD_SETOUT;
- gamsg->clk = 0x00;
- gamsg->offset = offset;
- gamsg->t1 = 0x00;
- gamsg->t2 = 0x00;
- gamsg->invert = 0x00;
- gamsg->pwmlevel = 0x00;
- gamsg->outval = value;
- gamsg->risefall = 0x00;
- gamsg->answer = 0x00;
- gamsg->__fill = 0x00;
-
- ret = usb_control_msg(vb->usb_dev,
- usb_sndctrlpipe(vb->usb_dev, 0),
- VPRBRD_USB_REQUEST_GPIOA, VPRBRD_USB_TYPE_OUT,
- 0x0000, 0x0000, gamsg,
- sizeof(struct vprbrd_gpioa_msg), VPRBRD_USB_TIMEOUT_MS);
-
- mutex_unlock(&vb->lock);
-
- if (ret != sizeof(struct vprbrd_gpioa_msg))
- dev_err(chip->parent, "usb error setting pin value\n");
+ if (!(gpio->gpioa_out & (1 << offset)))
+ return 0;
+
+ if (value)
+ gpio->gpioa_val |= (1 << offset);
+ else
+ gpio->gpioa_val &= ~(1 << offset);
+
+ mutex_lock(&vb->lock);
+
+ gamsg->cmd = VPRBRD_GPIOA_CMD_SETOUT;
+ gamsg->clk = 0x00;
+ gamsg->offset = offset;
+ gamsg->t1 = 0x00;
+ gamsg->t2 = 0x00;
+ gamsg->invert = 0x00;
+ gamsg->pwmlevel = 0x00;
+ gamsg->outval = value;
+ gamsg->risefall = 0x00;
+ gamsg->answer = 0x00;
+ gamsg->__fill = 0x00;
+
+ ret = usb_control_msg(vb->usb_dev, usb_sndctrlpipe(vb->usb_dev, 0),
+ VPRBRD_USB_REQUEST_GPIOA, VPRBRD_USB_TYPE_OUT,
+ 0x0000, 0x0000, gamsg,
+ sizeof(struct vprbrd_gpioa_msg),
+ VPRBRD_USB_TIMEOUT_MS);
+
+ mutex_unlock(&vb->lock);
+
+ if (ret != sizeof(struct vprbrd_gpioa_msg)) {
+ dev_err(chip->parent, "usb error setting pin value\n");
+ return -EREMOTEIO;
}
+
+ return 0;
}
static int vprbrd_gpioa_direction_input(struct gpio_chip *chip,
@@ -304,37 +309,42 @@ static int vprbrd_gpiob_get(struct gpio_chip *chip,
return (gpio->gpiob_val >> offset) & 0x1;
}
-static void vprbrd_gpiob_set(struct gpio_chip *chip,
- unsigned int offset, int value)
+static int vprbrd_gpiob_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
int ret;
struct vprbrd_gpio *gpio = gpiochip_get_data(chip);
struct vprbrd *vb = gpio->vb;
struct vprbrd_gpiob_msg *gbmsg = (struct vprbrd_gpiob_msg *)vb->buf;
- if (gpio->gpiob_out & (1 << offset)) {
- if (value)
- gpio->gpiob_val |= (1 << offset);
- else
- gpio->gpiob_val &= ~(1 << offset);
+ if (!(gpio->gpiob_out & (1 << offset)))
+ return 0;
+
+ if (value)
+ gpio->gpiob_val |= (1 << offset);
+ else
+ gpio->gpiob_val &= ~(1 << offset);
- mutex_lock(&vb->lock);
+ mutex_lock(&vb->lock);
- gbmsg->cmd = VPRBRD_GPIOB_CMD_SETVAL;
- gbmsg->val = cpu_to_be16(value << offset);
- gbmsg->mask = cpu_to_be16(0x0001 << offset);
+ gbmsg->cmd = VPRBRD_GPIOB_CMD_SETVAL;
+ gbmsg->val = cpu_to_be16(value << offset);
+ gbmsg->mask = cpu_to_be16(0x0001 << offset);
- ret = usb_control_msg(vb->usb_dev,
- usb_sndctrlpipe(vb->usb_dev, 0),
- VPRBRD_USB_REQUEST_GPIOB, VPRBRD_USB_TYPE_OUT,
- 0x0000, 0x0000, gbmsg,
- sizeof(struct vprbrd_gpiob_msg), VPRBRD_USB_TIMEOUT_MS);
+ ret = usb_control_msg(vb->usb_dev, usb_sndctrlpipe(vb->usb_dev, 0),
+ VPRBRD_USB_REQUEST_GPIOB, VPRBRD_USB_TYPE_OUT,
+ 0x0000, 0x0000, gbmsg,
+ sizeof(struct vprbrd_gpiob_msg),
+ VPRBRD_USB_TIMEOUT_MS);
- mutex_unlock(&vb->lock);
+ mutex_unlock(&vb->lock);
- if (ret != sizeof(struct vprbrd_gpiob_msg))
- dev_err(chip->parent, "usb error setting pin value\n");
+ if (ret != sizeof(struct vprbrd_gpiob_msg)) {
+ dev_err(chip->parent, "usb error setting pin value\n");
+ return -EREMOTEIO;
}
+
+ return 0;
}
static int vprbrd_gpiob_direction_input(struct gpio_chip *chip,
@@ -368,16 +378,14 @@ static int vprbrd_gpiob_direction_output(struct gpio_chip *chip,
gpio->gpiob_out |= (1 << offset);
mutex_lock(&vb->lock);
-
ret = vprbrd_gpiob_setdir(vb, offset, 1);
- if (ret)
- dev_err(chip->parent, "usb error setting pin to output\n");
-
mutex_unlock(&vb->lock);
+ if (ret) {
+ dev_err(chip->parent, "usb error setting pin to output\n");
+ return ret;
+ }
- vprbrd_gpiob_set(chip, offset, value);
-
- return ret;
+ return vprbrd_gpiob_set(chip, offset, value);
}
/* ----- end of gpio b chip ---------------------------------------------- */
diff --git a/drivers/gpio/gpio-virtio.c b/drivers/gpio/gpio-virtio.c
index 93544ff62513..17e040991e46 100644
--- a/drivers/gpio/gpio-virtio.c
+++ b/drivers/gpio/gpio-virtio.c
@@ -194,11 +194,12 @@ static int virtio_gpio_get(struct gpio_chip *gc, unsigned int gpio)
return ret ? ret : value;
}
-static void virtio_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value)
+static int virtio_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value)
{
struct virtio_gpio *vgpio = gpiochip_get_data(gc);
- virtio_gpio_req(vgpio, VIRTIO_GPIO_MSG_SET_VALUE, gpio, value, NULL);
+ return virtio_gpio_req(vgpio, VIRTIO_GPIO_MSG_SET_VALUE, gpio, value,
+ NULL);
}
/* Interrupt handling */
@@ -350,19 +351,6 @@ static void virtio_gpio_irq_bus_sync_unlock(struct irq_data *d)
mutex_unlock(&vgpio->irq_lock);
}
-static struct irq_chip vgpio_irq_chip = {
- .name = "virtio-gpio",
- .irq_enable = virtio_gpio_irq_enable,
- .irq_disable = virtio_gpio_irq_disable,
- .irq_mask = virtio_gpio_irq_mask,
- .irq_unmask = virtio_gpio_irq_unmask,
- .irq_set_type = virtio_gpio_irq_set_type,
-
- /* These are required to implement irqchip for slow busses */
- .irq_bus_lock = virtio_gpio_irq_bus_lock,
- .irq_bus_sync_unlock = virtio_gpio_irq_bus_sync_unlock,
-};
-
static bool ignore_irq(struct virtio_gpio *vgpio, int gpio,
struct vgpio_irq_line *irq_line)
{
@@ -539,9 +527,9 @@ static const char **virtio_gpio_get_names(struct virtio_gpio *vgpio,
static int virtio_gpio_probe(struct virtio_device *vdev)
{
- struct virtio_gpio_config config;
struct device *dev = &vdev->dev;
struct virtio_gpio *vgpio;
+ struct irq_chip *gpio_irq_chip;
u32 gpio_names_size;
u16 ngpio;
int ret, i;
@@ -551,9 +539,11 @@ static int virtio_gpio_probe(struct virtio_device *vdev)
return -ENOMEM;
/* Read configuration */
- virtio_cread_bytes(vdev, 0, &config, sizeof(config));
- gpio_names_size = le32_to_cpu(config.gpio_names_size);
- ngpio = le16_to_cpu(config.ngpio);
+ gpio_names_size =
+ virtio_cread32(vdev, offsetof(struct virtio_gpio_config,
+ gpio_names_size));
+ ngpio = virtio_cread16(vdev, offsetof(struct virtio_gpio_config,
+ ngpio));
if (!ngpio) {
dev_err(dev, "Number of GPIOs can't be zero\n");
return -EINVAL;
@@ -591,13 +581,26 @@ static int virtio_gpio_probe(struct virtio_device *vdev)
if (!vgpio->irq_lines)
return -ENOMEM;
+ gpio_irq_chip = devm_kzalloc(dev, sizeof(*gpio_irq_chip), GFP_KERNEL);
+ if (!gpio_irq_chip)
+ return -ENOMEM;
+
+ gpio_irq_chip->name = dev_name(dev);
+ gpio_irq_chip->irq_enable = virtio_gpio_irq_enable;
+ gpio_irq_chip->irq_disable = virtio_gpio_irq_disable;
+ gpio_irq_chip->irq_mask = virtio_gpio_irq_mask;
+ gpio_irq_chip->irq_unmask = virtio_gpio_irq_unmask;
+ gpio_irq_chip->irq_set_type = virtio_gpio_irq_set_type;
+ gpio_irq_chip->irq_bus_lock = virtio_gpio_irq_bus_lock;
+ gpio_irq_chip->irq_bus_sync_unlock = virtio_gpio_irq_bus_sync_unlock;
+
/* The event comes from the outside so no parent handler */
vgpio->gc.irq.parent_handler = NULL;
vgpio->gc.irq.num_parents = 0;
vgpio->gc.irq.parents = NULL;
vgpio->gc.irq.default_type = IRQ_TYPE_NONE;
vgpio->gc.irq.handler = handle_level_irq;
- vgpio->gc.irq.chip = &vgpio_irq_chip;
+ vgpio->gc.irq.chip = gpio_irq_chip;
for (i = 0; i < ngpio; i++) {
vgpio->irq_lines[i].type = VIRTIO_GPIO_IRQ_TYPE_NONE;
diff --git a/drivers/gpio/gpio-virtuser.c b/drivers/gpio/gpio-virtuser.c
index 91b6352c957c..a10eab7d2617 100644
--- a/drivers/gpio/gpio-virtuser.c
+++ b/drivers/gpio/gpio-virtuser.c
@@ -11,7 +11,6 @@
#include <linux/atomic.h>
#include <linux/bitmap.h>
#include <linux/cleanup.h>
-#include <linux/completion.h>
#include <linux/configfs.h>
#include <linux/debugfs.h>
#include <linux/device.h>
@@ -37,6 +36,8 @@
#include <linux/string_helpers.h>
#include <linux/types.h>
+#include "dev-sync-probe.h"
+
#define GPIO_VIRTUSER_NAME_BUF_LEN 32
static DEFINE_IDA(gpio_virtuser_ida);
@@ -214,9 +215,7 @@ static int gpio_virtuser_set_array_value(struct gpio_descs *descs,
struct gpio_virtuser_irq_work_context ctx;
if (!atomic)
- return gpiod_set_array_value_cansleep(descs->ndescs,
- descs->desc,
- descs->info, values);
+ return gpiod_multi_set_value_cansleep(descs, values);
gpio_virtuser_init_irq_work_context(&ctx);
ctx.work = IRQ_WORK_INIT_HARD(gpio_virtuser_set_value_array_atomic);
@@ -400,10 +399,15 @@ static ssize_t gpio_virtuser_direction_do_write(struct file *file,
char buf[32], *trimmed;
int ret, dir, val = 0;
- ret = simple_write_to_buffer(buf, sizeof(buf), ppos, user_buf, count);
+ if (count >= sizeof(buf))
+ return -EINVAL;
+
+ ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
if (ret < 0)
return ret;
+ buf[ret] = '\0';
+
trimmed = strim(buf);
if (strcmp(trimmed, "input") == 0) {
@@ -622,12 +626,15 @@ static ssize_t gpio_virtuser_consumer_write(struct file *file,
char buf[GPIO_VIRTUSER_NAME_BUF_LEN + 2];
int ret;
+ if (count >= sizeof(buf))
+ return -EINVAL;
+
ret = simple_write_to_buffer(buf, GPIO_VIRTUSER_NAME_BUF_LEN, ppos,
user_buf, count);
if (ret < 0)
return ret;
- buf[strlen(buf) - 1] = '\0';
+ buf[ret] = '\0';
ret = gpiod_set_consumer_name(data->ad.desc, buf);
if (ret)
@@ -973,49 +980,17 @@ static struct platform_driver gpio_virtuser_driver = {
};
struct gpio_virtuser_device {
+ struct dev_sync_probe_data probe_data;
struct config_group group;
- struct platform_device *pdev;
int id;
struct mutex lock;
- struct notifier_block bus_notifier;
- struct completion probe_completion;
- bool driver_bound;
-
struct gpiod_lookup_table *lookup_table;
struct list_head lookup_list;
};
-static int gpio_virtuser_bus_notifier_call(struct notifier_block *nb,
- unsigned long action, void *data)
-{
- struct gpio_virtuser_device *vdev;
- struct device *dev = data;
- char devname[32];
-
- vdev = container_of(nb, struct gpio_virtuser_device, bus_notifier);
- snprintf(devname, sizeof(devname), "gpio-virtuser.%d", vdev->id);
-
- if (!device_match_name(dev, devname))
- return NOTIFY_DONE;
-
- switch (action) {
- case BUS_NOTIFY_BOUND_DRIVER:
- vdev->driver_bound = true;
- break;
- case BUS_NOTIFY_DRIVER_NOT_BOUND:
- vdev->driver_bound = false;
- break;
- default:
- return NOTIFY_DONE;
- }
-
- complete(&vdev->probe_completion);
- return NOTIFY_OK;
-}
-
static struct gpio_virtuser_device *
to_gpio_virtuser_device(struct config_item *item)
{
@@ -1029,7 +1004,7 @@ gpio_virtuser_device_is_live(struct gpio_virtuser_device *dev)
{
lockdep_assert_held(&dev->lock);
- return !!dev->pdev;
+ return !!dev->probe_data.pdev;
}
struct gpio_virtuser_lookup {
@@ -1369,7 +1344,7 @@ gpio_virtuser_device_config_dev_name_show(struct config_item *item,
guard(mutex)(&dev->lock);
- pdev = dev->pdev;
+ pdev = dev->probe_data.pdev;
if (pdev)
return sprintf(page, "%s\n", dev_name(&pdev->dev));
@@ -1410,7 +1385,7 @@ gpio_virtuser_make_lookup_table(struct gpio_virtuser_device *dev)
size_t num_entries = gpio_virtuser_get_lookup_count(dev);
struct gpio_virtuser_lookup_entry *entry;
struct gpio_virtuser_lookup *lookup;
- unsigned int i = 0;
+ unsigned int i = 0, idx;
lockdep_assert_held(&dev->lock);
@@ -1424,12 +1399,12 @@ gpio_virtuser_make_lookup_table(struct gpio_virtuser_device *dev)
return -ENOMEM;
list_for_each_entry(lookup, &dev->lookup_list, siblings) {
+ idx = 0;
list_for_each_entry(entry, &lookup->entry_list, siblings) {
- table->table[i] =
+ table->table[i++] =
GPIO_LOOKUP_IDX(entry->key,
entry->offset < 0 ? U16_MAX : entry->offset,
- lookup->con_id, i, entry->flags);
- i++;
+ lookup->con_id, idx++, entry->flags);
}
}
@@ -1439,6 +1414,15 @@ gpio_virtuser_make_lookup_table(struct gpio_virtuser_device *dev)
return 0;
}
+static void
+gpio_virtuser_remove_lookup_table(struct gpio_virtuser_device *dev)
+{
+ gpiod_remove_lookup_table(dev->lookup_table);
+ kfree(dev->lookup_table->dev_id);
+ kfree(dev->lookup_table);
+ dev->lookup_table = NULL;
+}
+
static struct fwnode_handle *
gpio_virtuser_make_device_swnode(struct gpio_virtuser_device *dev)
{
@@ -1469,7 +1453,6 @@ gpio_virtuser_device_activate(struct gpio_virtuser_device *dev)
{
struct platform_device_info pdevinfo;
struct fwnode_handle *swnode;
- struct platform_device *pdev;
int ret;
lockdep_assert_held(&dev->lock);
@@ -1487,34 +1470,21 @@ gpio_virtuser_device_activate(struct gpio_virtuser_device *dev)
pdevinfo.fwnode = swnode;
ret = gpio_virtuser_make_lookup_table(dev);
- if (ret) {
- fwnode_remove_software_node(swnode);
- return ret;
- }
-
- reinit_completion(&dev->probe_completion);
- dev->driver_bound = false;
- bus_register_notifier(&platform_bus_type, &dev->bus_notifier);
-
- pdev = platform_device_register_full(&pdevinfo);
- if (IS_ERR(pdev)) {
- bus_unregister_notifier(&platform_bus_type, &dev->bus_notifier);
- fwnode_remove_software_node(swnode);
- return PTR_ERR(pdev);
- }
+ if (ret)
+ goto err_remove_swnode;
- wait_for_completion(&dev->probe_completion);
- bus_unregister_notifier(&platform_bus_type, &dev->bus_notifier);
+ ret = dev_sync_probe_register(&dev->probe_data, &pdevinfo);
+ if (ret)
+ goto err_remove_lookup_table;
- if (!dev->driver_bound) {
- platform_device_unregister(pdev);
- fwnode_remove_software_node(swnode);
- return -ENXIO;
- }
+ return 0;
- dev->pdev = pdev;
+err_remove_lookup_table:
+ gpio_virtuser_remove_lookup_table(dev);
+err_remove_swnode:
+ fwnode_remove_software_node(swnode);
- return 0;
+ return ret;
}
static void
@@ -1524,12 +1494,34 @@ gpio_virtuser_device_deactivate(struct gpio_virtuser_device *dev)
lockdep_assert_held(&dev->lock);
- swnode = dev_fwnode(&dev->pdev->dev);
- platform_device_unregister(dev->pdev);
+ swnode = dev_fwnode(&dev->probe_data.pdev->dev);
+ dev_sync_probe_unregister(&dev->probe_data);
+ gpio_virtuser_remove_lookup_table(dev);
fwnode_remove_software_node(swnode);
- dev->pdev = NULL;
- gpiod_remove_lookup_table(dev->lookup_table);
- kfree(dev->lookup_table);
+}
+
+static void
+gpio_virtuser_device_lockup_configfs(struct gpio_virtuser_device *dev, bool lock)
+{
+ struct configfs_subsystem *subsys = dev->group.cg_subsys;
+ struct gpio_virtuser_lookup_entry *entry;
+ struct gpio_virtuser_lookup *lookup;
+
+ /*
+ * The device only needs to depend on leaf lookup entries. This is
+ * sufficient to lock up all the configfs entries that the
+ * instantiated, alive device depends on.
+ */
+ list_for_each_entry(lookup, &dev->lookup_list, siblings) {
+ list_for_each_entry(entry, &lookup->entry_list, siblings) {
+ if (lock)
+ WARN_ON(configfs_depend_item_unlocked(
+ subsys, &entry->group.cg_item));
+ else
+ configfs_undepend_item_unlocked(
+ &entry->group.cg_item);
+ }
+ }
}
static ssize_t
@@ -1544,15 +1536,24 @@ gpio_virtuser_device_config_live_store(struct config_item *item,
if (ret)
return ret;
- guard(mutex)(&dev->lock);
+ if (live)
+ gpio_virtuser_device_lockup_configfs(dev, true);
- if (live == gpio_virtuser_device_is_live(dev))
- return -EPERM;
+ scoped_guard(mutex, &dev->lock) {
+ if (live == gpio_virtuser_device_is_live(dev))
+ ret = -EPERM;
+ else if (live)
+ ret = gpio_virtuser_device_activate(dev);
+ else
+ gpio_virtuser_device_deactivate(dev);
+ }
- if (live)
- ret = gpio_virtuser_device_activate(dev);
- else
- gpio_virtuser_device_deactivate(dev);
+ /*
+ * Undepend is required only if device disablement (live == 0)
+ * succeeds or if device enablement (live == 1) fails.
+ */
+ if (live == !!ret)
+ gpio_virtuser_device_lockup_configfs(dev, false);
return ret ?: count;
}
@@ -1725,8 +1726,7 @@ gpio_virtuser_config_make_device_group(struct config_group *group,
&gpio_virtuser_device_config_group_type);
mutex_init(&dev->lock);
INIT_LIST_HEAD(&dev->lookup_list);
- dev->bus_notifier.notifier_call = gpio_virtuser_bus_notifier_call;
- init_completion(&dev->probe_completion);
+ dev_sync_probe_init(&dev->probe_data);
return &no_free_ptr(dev)->group;
}
diff --git a/drivers/gpio/gpio-visconti.c b/drivers/gpio/gpio-visconti.c
index ebc71ecdb6cf..6d5d829634ad 100644
--- a/drivers/gpio/gpio-visconti.c
+++ b/drivers/gpio/gpio-visconti.c
@@ -10,6 +10,7 @@
#include <linux/bitops.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/module.h>
@@ -32,7 +33,7 @@
struct visconti_gpio {
void __iomem *base;
spinlock_t lock; /* protect gpio register */
- struct gpio_chip gpio_chip;
+ struct gpio_generic_chip chip;
struct device *dev;
};
@@ -142,7 +143,7 @@ static void visconti_gpio_irq_print_chip(struct irq_data *d, struct seq_file *p)
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct visconti_gpio *priv = gpiochip_get_data(gc);
- seq_printf(p, dev_name(priv->dev));
+ seq_puts(p, dev_name(priv->dev));
}
static const struct irq_chip visconti_gpio_irq_chip = {
@@ -158,6 +159,7 @@ static const struct irq_chip visconti_gpio_irq_chip = {
static int visconti_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct device *dev = &pdev->dev;
struct visconti_gpio *priv;
struct gpio_irq_chip *girq;
@@ -189,19 +191,22 @@ static int visconti_gpio_probe(struct platform_device *pdev)
return -ENODEV;
}
- ret = bgpio_init(&priv->gpio_chip, dev, 4,
- priv->base + GPIO_IDATA,
- priv->base + GPIO_OSET,
- priv->base + GPIO_OCLR,
- priv->base + GPIO_DIR,
- NULL,
- 0);
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = priv->base + GPIO_IDATA,
+ .set = priv->base + GPIO_OSET,
+ .clr = priv->base + GPIO_OCLR,
+ .dirout = priv->base + GPIO_DIR,
+ };
+
+ ret = gpio_generic_chip_init(&priv->chip, &config);
if (ret) {
dev_err(dev, "unable to init generic GPIO\n");
return ret;
}
- girq = &priv->gpio_chip.irq;
+ girq = &priv->chip.gc.irq;
gpio_irq_chip_set_chip(girq, &visconti_gpio_irq_chip);
girq->fwnode = dev_fwnode(dev);
girq->parent_domain = parent;
@@ -210,7 +215,7 @@ static int visconti_gpio_probe(struct platform_device *pdev)
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_level_irq;
- return devm_gpiochip_add_data(dev, &priv->gpio_chip, priv);
+ return devm_gpiochip_add_data(dev, &priv->chip.gc, priv);
}
static const struct of_device_id visconti_gpio_of_match[] = {
diff --git a/drivers/gpio/gpio-vx855.c b/drivers/gpio/gpio-vx855.c
index 8fd6c3913d69..84b3a973a503 100644
--- a/drivers/gpio/gpio-vx855.c
+++ b/drivers/gpio/gpio-vx855.c
@@ -127,8 +127,7 @@ static int vx855gpio_get(struct gpio_chip *gpio, unsigned int nr)
return ret;
}
-static void vx855gpio_set(struct gpio_chip *gpio, unsigned int nr,
- int val)
+static int vx855gpio_set(struct gpio_chip *gpio, unsigned int nr, int val)
{
struct vx855_gpio *vg = gpiochip_get_data(gpio);
unsigned long flags;
@@ -136,7 +135,7 @@ static void vx855gpio_set(struct gpio_chip *gpio, unsigned int nr,
/* True GPI cannot be switched to output mode */
if (nr < NR_VX855_GPI)
- return;
+ return -EPERM;
spin_lock_irqsave(&vg->lock, flags);
reg_out = inl(vg->io_gpo);
@@ -153,6 +152,8 @@ static void vx855gpio_set(struct gpio_chip *gpio, unsigned int nr,
}
outl(reg_out, vg->io_gpo);
spin_unlock_irqrestore(&vg->lock, flags);
+
+ return 0;
}
static int vx855gpio_direction_output(struct gpio_chip *gpio,
diff --git a/drivers/gpio/gpio-wcd934x.c b/drivers/gpio/gpio-wcd934x.c
index 2bba27b13947..572b85e77370 100644
--- a/drivers/gpio/gpio-wcd934x.c
+++ b/drivers/gpio/gpio-wcd934x.c
@@ -46,9 +46,12 @@ static int wcd_gpio_direction_output(struct gpio_chip *chip, unsigned int pin,
int val)
{
struct wcd_gpio_data *data = gpiochip_get_data(chip);
+ int ret;
- regmap_update_bits(data->map, WCD_REG_DIR_CTL_OFFSET,
- WCD_PIN_MASK(pin), WCD_PIN_MASK(pin));
+ ret = regmap_update_bits(data->map, WCD_REG_DIR_CTL_OFFSET,
+ WCD_PIN_MASK(pin), WCD_PIN_MASK(pin));
+ if (ret)
+ return ret;
return regmap_update_bits(data->map, WCD_REG_VAL_CTL_OFFSET,
WCD_PIN_MASK(pin),
@@ -65,12 +68,13 @@ static int wcd_gpio_get(struct gpio_chip *chip, unsigned int pin)
return !!(value & WCD_PIN_MASK(pin));
}
-static void wcd_gpio_set(struct gpio_chip *chip, unsigned int pin, int val)
+static int wcd_gpio_set(struct gpio_chip *chip, unsigned int pin, int val)
{
struct wcd_gpio_data *data = gpiochip_get_data(chip);
- regmap_update_bits(data->map, WCD_REG_VAL_CTL_OFFSET,
- WCD_PIN_MASK(pin), val ? WCD_PIN_MASK(pin) : 0);
+ return regmap_update_bits(data->map, WCD_REG_VAL_CTL_OFFSET,
+ WCD_PIN_MASK(pin),
+ val ? WCD_PIN_MASK(pin) : 0);
}
static int wcd_gpio_probe(struct platform_device *pdev)
@@ -99,7 +103,7 @@ static int wcd_gpio_probe(struct platform_device *pdev)
chip->base = -1;
chip->ngpio = WCD934X_NPINS;
chip->label = dev_name(dev);
- chip->can_sleep = false;
+ chip->can_sleep = true;
return devm_gpiochip_add_data(dev, chip, data);
}
diff --git a/drivers/gpio/gpio-wcove.c b/drivers/gpio/gpio-wcove.c
index 94ca9d03c094..4a5e20e936a9 100644
--- a/drivers/gpio/gpio-wcove.c
+++ b/drivers/gpio/gpio-wcove.c
@@ -15,6 +15,7 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/seq_file.h>
+#include <linux/string_choices.h>
/*
* Whiskey Cove PMIC has 13 physical GPIO pins divided into 3 banks:
@@ -199,18 +200,15 @@ static int wcove_gpio_get(struct gpio_chip *chip, unsigned int gpio)
return val & 0x1;
}
-static void wcove_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value)
+static int wcove_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value)
{
struct wcove_gpio *wg = gpiochip_get_data(chip);
int reg = to_reg(gpio, CTRL_OUT);
if (reg < 0)
- return;
+ return 0;
- if (value)
- regmap_set_bits(wg->regmap, reg, 1);
- else
- regmap_clear_bits(wg->regmap, reg, 1);
+ return regmap_assign_bits(wg->regmap, reg, 1, value);
}
static int wcove_gpio_set_config(struct gpio_chip *chip, unsigned int gpio,
@@ -393,7 +391,7 @@ static void wcove_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
seq_printf(s, " gpio-%-2d %s %s %s %s ctlo=%2x,%s %s\n",
gpio, ctlo & CTLO_DIR_OUT ? "out" : "in ",
- ctli & 0x1 ? "hi" : "lo",
+ str_hi_lo(ctli & 0x1),
ctli & CTLI_INTCNT_NE ? "fall" : " ",
ctli & CTLI_INTCNT_PE ? "rise" : " ",
ctlo,
diff --git a/drivers/gpio/gpio-winbond.c b/drivers/gpio/gpio-winbond.c
index 4b61d975cc0e..dcfda738fd69 100644
--- a/drivers/gpio/gpio-winbond.c
+++ b/drivers/gpio/gpio-winbond.c
@@ -458,17 +458,19 @@ static int winbond_gpio_direction_out(struct gpio_chip *gc,
return 0;
}
-static void winbond_gpio_set(struct gpio_chip *gc, unsigned int offset,
- int val)
+static int winbond_gpio_set(struct gpio_chip *gc, unsigned int offset,
+ int val)
{
unsigned long *base = gpiochip_get_data(gc);
const struct winbond_gpio_info *info;
+ int ret;
if (!winbond_gpio_get_info(&offset, &info))
- return;
+ return -EACCES;
- if (winbond_sio_enter(*base) != 0)
- return;
+ ret = winbond_sio_enter(*base);
+ if (ret)
+ return ret;
winbond_sio_select_logical(*base, info->dev);
@@ -481,6 +483,8 @@ static void winbond_gpio_set(struct gpio_chip *gc, unsigned int offset,
winbond_sio_reg_bclear(*base, info->datareg, offset);
winbond_sio_leave(*base);
+
+ return 0;
}
static struct gpio_chip winbond_gpio_chip = {
diff --git a/drivers/gpio/gpio-wm831x.c b/drivers/gpio/gpio-wm831x.c
index f7d5120ff8f1..489479d6f32b 100644
--- a/drivers/gpio/gpio-wm831x.c
+++ b/drivers/gpio/gpio-wm831x.c
@@ -16,6 +16,7 @@
#include <linux/mfd/core.h>
#include <linux/platform_device.h>
#include <linux/seq_file.h>
+#include <linux/string_choices.h>
#include <linux/mfd/wm831x/core.h>
#include <linux/mfd/wm831x/pdata.h>
@@ -57,13 +58,14 @@ static int wm831x_gpio_get(struct gpio_chip *chip, unsigned offset)
return 0;
}
-static void wm831x_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int wm831x_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct wm831x_gpio *wm831x_gpio = gpiochip_get_data(chip);
struct wm831x *wm831x = wm831x_gpio->wm831x;
- wm831x_set_bits(wm831x, WM831X_GPIO_LEVEL, 1 << offset,
- value << offset);
+ return wm831x_set_bits(wm831x, WM831X_GPIO_LEVEL, 1 << offset,
+ value << offset);
}
static int wm831x_gpio_direction_out(struct gpio_chip *chip,
@@ -84,9 +86,7 @@ static int wm831x_gpio_direction_out(struct gpio_chip *chip,
return ret;
/* Can only set GPIO state once it's in output mode */
- wm831x_gpio_set(chip, offset, value);
-
- return 0;
+ return wm831x_gpio_set(chip, offset, value);
}
static int wm831x_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
@@ -159,7 +159,6 @@ static void wm831x_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
int i, tristated;
for (i = 0; i < chip->ngpio; i++) {
- int gpio = i + chip->base;
int reg;
const char *pull, *powerdomain;
@@ -175,13 +174,13 @@ static void wm831x_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
}
seq_printf(s, " gpio-%-3d (%-20.20s) ",
- gpio, label ?: "Unrequested");
+ i, label ?: "Unrequested");
reg = wm831x_reg_read(wm831x, WM831X_GPIO1_CONTROL + i);
if (reg < 0) {
dev_err(wm831x->dev,
"GPIO control %d read failed: %d\n",
- gpio, reg);
+ i, reg);
seq_putc(s, '\n');
continue;
}
@@ -234,7 +233,7 @@ static void wm831x_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
seq_printf(s, " %s %s %s %s%s\n"
" %s%s (0x%4x)\n",
reg & WM831X_GPN_DIR ? "in" : "out",
- wm831x_gpio_get(chip, i) ? "high" : "low",
+ str_high_low(wm831x_gpio_get(chip, i)),
pull,
powerdomain,
reg & WM831X_GPN_POL ? "" : " inverted",
diff --git a/drivers/gpio/gpio-wm8350.c b/drivers/gpio/gpio-wm8350.c
index 2421cf606ed6..46923b23a72e 100644
--- a/drivers/gpio/gpio-wm8350.c
+++ b/drivers/gpio/gpio-wm8350.c
@@ -48,15 +48,16 @@ static int wm8350_gpio_get(struct gpio_chip *chip, unsigned offset)
return 0;
}
-static void wm8350_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int wm8350_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct wm8350_gpio_data *wm8350_gpio = gpiochip_get_data(chip);
struct wm8350 *wm8350 = wm8350_gpio->wm8350;
if (value)
- wm8350_set_bits(wm8350, WM8350_GPIO_LEVEL, 1 << offset);
- else
- wm8350_clear_bits(wm8350, WM8350_GPIO_LEVEL, 1 << offset);
+ return wm8350_set_bits(wm8350, WM8350_GPIO_LEVEL, 1 << offset);
+
+ return wm8350_clear_bits(wm8350, WM8350_GPIO_LEVEL, 1 << offset);
}
static int wm8350_gpio_direction_out(struct gpio_chip *chip,
@@ -72,9 +73,7 @@ static int wm8350_gpio_direction_out(struct gpio_chip *chip,
return ret;
/* Don't have an atomic direction/value setup */
- wm8350_gpio_set(chip, offset, value);
-
- return 0;
+ return wm8350_gpio_set(chip, offset, value);
}
static int wm8350_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
diff --git a/drivers/gpio/gpio-wm8994.c b/drivers/gpio/gpio-wm8994.c
index bf05c9b5882b..a0665cf3ff2f 100644
--- a/drivers/gpio/gpio-wm8994.c
+++ b/drivers/gpio/gpio-wm8994.c
@@ -89,7 +89,8 @@ static int wm8994_gpio_direction_out(struct gpio_chip *chip,
WM8994_GPN_DIR | WM8994_GPN_LVL, value);
}
-static void wm8994_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int wm8994_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct wm8994_gpio *wm8994_gpio = gpiochip_get_data(chip);
struct wm8994 *wm8994 = wm8994_gpio->wm8994;
@@ -97,7 +98,8 @@ static void wm8994_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
if (value)
value = WM8994_GPN_LVL;
- wm8994_set_bits(wm8994, WM8994_GPIO_1 + offset, WM8994_GPN_LVL, value);
+ return wm8994_set_bits(wm8994, WM8994_GPIO_1 + offset, WM8994_GPN_LVL,
+ value);
}
static int wm8994_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
@@ -192,7 +194,6 @@ static void wm8994_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
int i;
for (i = 0; i < chip->ngpio; i++) {
- int gpio = i + chip->base;
int reg;
/* We report the GPIO even if it's not requested since
@@ -206,14 +207,13 @@ static void wm8994_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
continue;
}
- seq_printf(s, " gpio-%-3d (%-20.20s) ", gpio,
+ seq_printf(s, " gpio-%-3d (%-20.20s) ", i,
label ?: "Unrequested");
reg = wm8994_reg_read(wm8994, WM8994_GPIO_1 + i);
if (reg < 0) {
dev_err(wm8994->dev,
- "GPIO control %d read failed: %d\n",
- gpio, reg);
+ "GPIO control %d read failed: %d\n", i, reg);
seq_printf(s, "\n");
continue;
}
diff --git a/drivers/gpio/gpio-xgene-sb.c b/drivers/gpio/gpio-xgene-sb.c
index bd5befa807c3..661259f026e1 100644
--- a/drivers/gpio/gpio-xgene-sb.c
+++ b/drivers/gpio/gpio-xgene-sb.c
@@ -8,20 +8,23 @@
* Quan Nguyen <qnguyen@apm.com>.
*/
-#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/err.h>
#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/types.h>
+
#include <linux/gpio/driver.h>
-#include <linux/acpi.h>
+#include <linux/gpio/generic.h>
#include "gpiolib-acpi.h"
-/* Common property names */
-#define XGENE_NIRQ_PROPERTY "apm,nr-irqs"
-#define XGENE_NGPIO_PROPERTY "apm,nr-gpios"
-#define XGENE_IRQ_START_PROPERTY "apm,irq-start"
-
#define XGENE_DFLT_MAX_NGPIO 22
#define XGENE_DFLT_MAX_NIRQ 6
#define XGENE_DFLT_IRQ_START_PIN 8
@@ -38,7 +41,7 @@
/**
* struct xgene_gpio_sb - GPIO-Standby private data structure.
- * @gc: memory-mapped GPIO controllers.
+ * @chip: Generic GPIO chip data
* @regs: GPIO register base offset
* @irq_domain: GPIO interrupt domain
* @irq_start: GPIO pin that start support interrupt
@@ -46,7 +49,7 @@
* @parent_irq_base: Start parent HWIRQ
*/
struct xgene_gpio_sb {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
void __iomem *regs;
struct irq_domain *irq_domain;
u16 irq_start;
@@ -60,14 +63,15 @@ struct xgene_gpio_sb {
static void xgene_gpio_set_bit(struct gpio_chip *gc,
void __iomem *reg, u32 gpio, int val)
{
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
u32 data;
- data = gc->read_reg(reg);
+ data = gpio_generic_read_reg(chip, reg);
if (val)
data |= GPIO_MASK(gpio);
else
data &= ~GPIO_MASK(gpio);
- gc->write_reg(reg, data);
+ gpio_generic_write_reg(chip, reg, data);
}
static int xgene_gpio_sb_irq_set_type(struct irq_data *d, unsigned int type)
@@ -89,9 +93,9 @@ static int xgene_gpio_sb_irq_set_type(struct irq_data *d, unsigned int type)
break;
}
- xgene_gpio_set_bit(&priv->gc, priv->regs + MPA_GPIO_SEL_LO,
+ xgene_gpio_set_bit(&priv->chip.gc, priv->regs + MPA_GPIO_SEL_LO,
gpio * 2, 1);
- xgene_gpio_set_bit(&priv->gc, priv->regs + MPA_GPIO_INT_LVL,
+ xgene_gpio_set_bit(&priv->chip.gc, priv->regs + MPA_GPIO_INT_LVL,
d->hwirq, lvl_type);
/* Propagate IRQ type setting to parent */
@@ -101,12 +105,32 @@ static int xgene_gpio_sb_irq_set_type(struct irq_data *d, unsigned int type)
return irq_chip_set_type_parent(d, IRQ_TYPE_LEVEL_HIGH);
}
-static struct irq_chip xgene_gpio_sb_irq_chip = {
+static void xgene_gpio_sb_irq_mask(struct irq_data *d)
+{
+ struct xgene_gpio_sb *priv = irq_data_get_irq_chip_data(d);
+
+ irq_chip_mask_parent(d);
+
+ gpiochip_disable_irq(&priv->chip.gc, d->hwirq);
+}
+
+static void xgene_gpio_sb_irq_unmask(struct irq_data *d)
+{
+ struct xgene_gpio_sb *priv = irq_data_get_irq_chip_data(d);
+
+ gpiochip_enable_irq(&priv->chip.gc, d->hwirq);
+
+ irq_chip_unmask_parent(d);
+}
+
+static const struct irq_chip xgene_gpio_sb_irq_chip = {
.name = "sbgpio",
.irq_eoi = irq_chip_eoi_parent,
- .irq_mask = irq_chip_mask_parent,
- .irq_unmask = irq_chip_unmask_parent,
+ .irq_mask = xgene_gpio_sb_irq_mask,
+ .irq_unmask = xgene_gpio_sb_irq_unmask,
.irq_set_type = xgene_gpio_sb_irq_set_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static int xgene_gpio_sb_to_irq(struct gpio_chip *gc, u32 gpio)
@@ -133,15 +157,15 @@ static int xgene_gpio_sb_domain_activate(struct irq_domain *d,
u32 gpio = HWIRQ_TO_GPIO(priv, irq_data->hwirq);
int ret;
- ret = gpiochip_lock_as_irq(&priv->gc, gpio);
+ ret = gpiochip_lock_as_irq(&priv->chip.gc, gpio);
if (ret) {
- dev_err(priv->gc.parent,
+ dev_err(priv->chip.gc.parent,
"Unable to configure XGene GPIO standby pin %d as IRQ\n",
gpio);
return ret;
}
- xgene_gpio_set_bit(&priv->gc, priv->regs + MPA_GPIO_SEL_LO,
+ xgene_gpio_set_bit(&priv->chip.gc, priv->regs + MPA_GPIO_SEL_LO,
gpio * 2, 1);
return 0;
}
@@ -152,8 +176,8 @@ static void xgene_gpio_sb_domain_deactivate(struct irq_domain *d,
struct xgene_gpio_sb *priv = d->host_data;
u32 gpio = HWIRQ_TO_GPIO(priv, irq_data->hwirq);
- gpiochip_unlock_as_irq(&priv->gc, gpio);
- xgene_gpio_set_bit(&priv->gc, priv->regs + MPA_GPIO_SEL_LO,
+ gpiochip_unlock_as_irq(&priv->chip.gc, gpio);
+ xgene_gpio_set_bit(&priv->chip.gc, priv->regs + MPA_GPIO_SEL_LO,
gpio * 2, 0);
}
@@ -215,6 +239,7 @@ static const struct irq_domain_ops xgene_gpio_sb_domain_ops = {
static int xgene_gpio_sb_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct xgene_gpio_sb *priv;
int ret;
void __iomem *regs;
@@ -241,33 +266,37 @@ static int xgene_gpio_sb_probe(struct platform_device *pdev)
return -ENODEV;
}
- ret = bgpio_init(&priv->gc, &pdev->dev, 4,
- regs + MPA_GPIO_IN_ADDR,
- regs + MPA_GPIO_OUT_ADDR, NULL,
- regs + MPA_GPIO_OE_ADDR, NULL, 0);
+ config = (struct gpio_generic_chip_config) {
+ .dev = &pdev->dev,
+ .sz = 4,
+ .dat = regs + MPA_GPIO_IN_ADDR,
+ .set = regs + MPA_GPIO_OUT_ADDR,
+ .dirout = regs + MPA_GPIO_OE_ADDR,
+ };
+
+ ret = gpio_generic_chip_init(&priv->chip, &config);
if (ret)
return ret;
- priv->gc.to_irq = xgene_gpio_sb_to_irq;
+ priv->chip.gc.to_irq = xgene_gpio_sb_to_irq;
/* Retrieve start irq pin, use default if property not found */
priv->irq_start = XGENE_DFLT_IRQ_START_PIN;
- if (!device_property_read_u32(&pdev->dev,
- XGENE_IRQ_START_PROPERTY, &val32))
+ if (!device_property_read_u32(&pdev->dev, "apm,irq-start", &val32))
priv->irq_start = val32;
/* Retrieve number irqs, use default if property not found */
priv->nirq = XGENE_DFLT_MAX_NIRQ;
- if (!device_property_read_u32(&pdev->dev, XGENE_NIRQ_PROPERTY, &val32))
+ if (!device_property_read_u32(&pdev->dev, "apm,nr-irqs", &val32))
priv->nirq = val32;
/* Retrieve number gpio, use default if property not found */
- priv->gc.ngpio = XGENE_DFLT_MAX_NGPIO;
- if (!device_property_read_u32(&pdev->dev, XGENE_NGPIO_PROPERTY, &val32))
- priv->gc.ngpio = val32;
+ priv->chip.gc.ngpio = XGENE_DFLT_MAX_NGPIO;
+ if (!device_property_read_u32(&pdev->dev, "apm,nr-gpios", &val32))
+ priv->chip.gc.ngpio = val32;
dev_info(&pdev->dev, "Support %d gpios, %d irqs start from pin %d\n",
- priv->gc.ngpio, priv->nirq, priv->irq_start);
+ priv->chip.gc.ngpio, priv->nirq, priv->irq_start);
platform_set_drvdata(pdev, priv);
@@ -277,9 +306,9 @@ static int xgene_gpio_sb_probe(struct platform_device *pdev)
if (!priv->irq_domain)
return -ENODEV;
- priv->gc.irq.domain = priv->irq_domain;
+ priv->chip.gc.irq.domain = priv->irq_domain;
- ret = devm_gpiochip_add_data(&pdev->dev, &priv->gc, priv);
+ ret = devm_gpiochip_add_data(&pdev->dev, &priv->chip.gc, priv);
if (ret) {
dev_err(&pdev->dev,
"failed to register X-Gene GPIO Standby driver\n");
@@ -290,7 +319,7 @@ static int xgene_gpio_sb_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "X-Gene GPIO Standby driver registered\n");
/* Register interrupt handlers for GPIO signaled ACPI Events */
- acpi_gpiochip_request_interrupts(&priv->gc);
+ acpi_gpiochip_request_interrupts(&priv->chip.gc);
return ret;
}
@@ -299,33 +328,31 @@ static void xgene_gpio_sb_remove(struct platform_device *pdev)
{
struct xgene_gpio_sb *priv = platform_get_drvdata(pdev);
- acpi_gpiochip_free_interrupts(&priv->gc);
+ acpi_gpiochip_free_interrupts(&priv->chip.gc);
irq_domain_remove(priv->irq_domain);
}
static const struct of_device_id xgene_gpio_sb_of_match[] = {
- {.compatible = "apm,xgene-gpio-sb", },
- {},
+ { .compatible = "apm,xgene-gpio-sb" },
+ {}
};
MODULE_DEVICE_TABLE(of, xgene_gpio_sb_of_match);
-#ifdef CONFIG_ACPI
static const struct acpi_device_id xgene_gpio_sb_acpi_match[] = {
- {"APMC0D15", 0},
- {},
+ { "APMC0D15" },
+ {}
};
MODULE_DEVICE_TABLE(acpi, xgene_gpio_sb_acpi_match);
-#endif
static struct platform_driver xgene_gpio_sb_driver = {
.driver = {
.name = "xgene-gpio-sb",
.of_match_table = xgene_gpio_sb_of_match,
- .acpi_match_table = ACPI_PTR(xgene_gpio_sb_acpi_match),
- },
+ .acpi_match_table = xgene_gpio_sb_acpi_match,
+ },
.probe = xgene_gpio_sb_probe,
- .remove_new = xgene_gpio_sb_remove,
+ .remove = xgene_gpio_sb_remove,
};
module_platform_driver(xgene_gpio_sb_driver);
diff --git a/drivers/gpio/gpio-xgene.c b/drivers/gpio/gpio-xgene.c
index fb4b0c67aeef..4f627de3f56c 100644
--- a/drivers/gpio/gpio-xgene.c
+++ b/drivers/gpio/gpio-xgene.c
@@ -62,7 +62,7 @@ static void __xgene_gpio_set(struct gpio_chip *gc, unsigned int offset, int val)
iowrite32(setval, chip->base + bank_offset);
}
-static void xgene_gpio_set(struct gpio_chip *gc, unsigned int offset, int val)
+static int xgene_gpio_set(struct gpio_chip *gc, unsigned int offset, int val)
{
struct xgene_gpio *chip = gpiochip_get_data(gc);
unsigned long flags;
@@ -70,6 +70,8 @@ static void xgene_gpio_set(struct gpio_chip *gc, unsigned int offset, int val)
spin_lock_irqsave(&chip->lock, flags);
__xgene_gpio_set(gc, offset, val);
spin_unlock_irqrestore(&chip->lock, flags);
+
+ return 0;
}
static int xgene_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
diff --git a/drivers/gpio/gpio-xgs-iproc.c b/drivers/gpio/gpio-xgs-iproc.c
index d445eea03687..77eb29dcc217 100644
--- a/drivers/gpio/gpio-xgs-iproc.c
+++ b/drivers/gpio/gpio-xgs-iproc.c
@@ -3,11 +3,12 @@
* Copyright (C) 2017 Broadcom
*/
-#include <linux/gpio/driver.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -28,7 +29,7 @@
#define IPROC_GPIO_CCA_INT_EDGE 0x24
struct iproc_gpio_chip {
- struct gpio_chip gc;
+ struct gpio_generic_chip gen_gc;
spinlock_t lock;
struct device *dev;
void __iomem *base;
@@ -38,7 +39,7 @@ struct iproc_gpio_chip {
static inline struct iproc_gpio_chip *
to_iproc_gpio(struct gpio_chip *gc)
{
- return container_of(gc, struct iproc_gpio_chip, gc);
+ return container_of(to_gpio_generic_chip(gc), struct iproc_gpio_chip, gen_gc);
}
static void iproc_gpio_irq_ack(struct irq_data *d)
@@ -198,7 +199,7 @@ static void iproc_gpio_irq_print_chip(struct irq_data *d, struct seq_file *p)
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct iproc_gpio_chip *chip = to_iproc_gpio(gc);
- seq_printf(p, dev_name(chip->dev));
+ seq_puts(p, dev_name(chip->dev));
}
static const struct irq_chip iproc_gpio_irq_chip = {
@@ -213,6 +214,7 @@ static const struct irq_chip iproc_gpio_irq_chip = {
static int iproc_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct device *dev = &pdev->dev;
struct device_node *dn = pdev->dev.of_node;
struct iproc_gpio_chip *chip;
@@ -231,21 +233,23 @@ static int iproc_gpio_probe(struct platform_device *pdev)
if (IS_ERR(chip->base))
return PTR_ERR(chip->base);
- ret = bgpio_init(&chip->gc, dev, 4,
- chip->base + IPROC_GPIO_CCA_DIN,
- chip->base + IPROC_GPIO_CCA_DOUT,
- NULL,
- chip->base + IPROC_GPIO_CCA_OUT_EN,
- NULL,
- 0);
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = chip->base + IPROC_GPIO_CCA_DIN,
+ .set = chip->base + IPROC_GPIO_CCA_DOUT,
+ .dirout = chip->base + IPROC_GPIO_CCA_OUT_EN,
+ };
+
+ ret = gpio_generic_chip_init(&chip->gen_gc, &config);
if (ret) {
dev_err(dev, "unable to init GPIO chip\n");
return ret;
}
- chip->gc.label = dev_name(dev);
+ chip->gen_gc.gc.label = dev_name(dev);
if (!of_property_read_u32(dn, "ngpios", &num_gpios))
- chip->gc.ngpio = num_gpios;
+ chip->gen_gc.gc.ngpio = num_gpios;
irq = platform_get_irq(pdev, 0);
if (irq > 0) {
@@ -266,13 +270,13 @@ static int iproc_gpio_probe(struct platform_device *pdev)
* a flow-handler because the irq is shared.
*/
ret = devm_request_irq(dev, irq, iproc_gpio_irq_handler,
- IRQF_SHARED, chip->gc.label, &chip->gc);
+ IRQF_SHARED, chip->gen_gc.gc.label, &chip->gen_gc.gc);
if (ret) {
dev_err(dev, "Fail to request IRQ%d: %d\n", irq, ret);
return ret;
}
- girq = &chip->gc.irq;
+ girq = &chip->gen_gc.gc.irq;
gpio_irq_chip_set_chip(girq, &iproc_gpio_irq_chip);
/* This will let us handle the parent IRQ in the driver */
girq->parent_handler = NULL;
@@ -282,7 +286,7 @@ static int iproc_gpio_probe(struct platform_device *pdev)
girq->handler = handle_simple_irq;
}
- ret = devm_gpiochip_add_data(dev, &chip->gc, chip);
+ ret = devm_gpiochip_add_data(dev, &chip->gen_gc.gc, chip);
if (ret) {
dev_err(dev, "unable to add GPIO chip\n");
return ret;
@@ -316,7 +320,7 @@ static struct platform_driver bcm_iproc_gpio_driver = {
.of_match_table = bcm_iproc_gpio_of_match,
},
.probe = iproc_gpio_probe,
- .remove_new = iproc_gpio_remove,
+ .remove = iproc_gpio_remove,
};
module_platform_driver(bcm_iproc_gpio_driver);
diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c
index afcf432a1573..83675ac81077 100644
--- a/drivers/gpio/gpio-xilinx.c
+++ b/drivers/gpio/gpio-xilinx.c
@@ -15,9 +15,9 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
#include <linux/slab.h>
/* Register Offset Definitions */
@@ -45,8 +45,7 @@
* struct xgpio_instance - Stores information about GPIO device
* @gc: GPIO chip
* @regs: register block
- * @hw_map: GPIO pin mapping on hardware side
- * @sw_map: GPIO pin mapping on software side
+ * @map: GPIO pin mapping on hardware side
* @state: GPIO write state shadow register
* @last_irq_read: GPIO read state register from last interrupt
* @dir: GPIO direction shadow register
@@ -60,12 +59,11 @@
struct xgpio_instance {
struct gpio_chip gc;
void __iomem *regs;
- DECLARE_BITMAP(hw_map, 64);
- DECLARE_BITMAP(sw_map, 64);
+ DECLARE_BITMAP(map, 64);
DECLARE_BITMAP(state, 64);
DECLARE_BITMAP(last_irq_read, 64);
DECLARE_BITMAP(dir, 64);
- spinlock_t gpio_lock; /* For serializing operations */
+ raw_spinlock_t gpio_lock; /* For serializing operations */
int irq;
DECLARE_BITMAP(enable, 64);
DECLARE_BITMAP(rising_edge, 64);
@@ -73,33 +71,6 @@ struct xgpio_instance {
struct clk *clk;
};
-static inline int xgpio_from_bit(struct xgpio_instance *chip, int bit)
-{
- return bitmap_bitremap(bit, chip->hw_map, chip->sw_map, 64);
-}
-
-static inline int xgpio_to_bit(struct xgpio_instance *chip, int gpio)
-{
- return bitmap_bitremap(gpio, chip->sw_map, chip->hw_map, 64);
-}
-
-static inline u32 xgpio_get_value32(const unsigned long *map, int bit)
-{
- const size_t index = BIT_WORD(bit);
- const unsigned long offset = (bit % BITS_PER_LONG) & BIT(5);
-
- return (map[index] >> offset) & 0xFFFFFFFFul;
-}
-
-static inline void xgpio_set_value32(unsigned long *map, int bit, u32 v)
-{
- const size_t index = BIT_WORD(bit);
- const unsigned long offset = (bit % BITS_PER_LONG) & BIT(5);
-
- map[index] &= ~(0xFFFFFFFFul << offset);
- map[index] |= (unsigned long)v << offset;
-}
-
static inline int xgpio_regoffset(struct xgpio_instance *chip, int ch)
{
switch (ch) {
@@ -115,20 +86,23 @@ static inline int xgpio_regoffset(struct xgpio_instance *chip, int ch)
static void xgpio_read_ch(struct xgpio_instance *chip, int reg, int bit, unsigned long *a)
{
void __iomem *addr = chip->regs + reg + xgpio_regoffset(chip, bit / 32);
+ unsigned long value = xgpio_readreg(addr);
- xgpio_set_value32(a, bit, xgpio_readreg(addr));
+ bitmap_write(a, value, round_down(bit, 32), 32);
}
static void xgpio_write_ch(struct xgpio_instance *chip, int reg, int bit, unsigned long *a)
{
void __iomem *addr = chip->regs + reg + xgpio_regoffset(chip, bit / 32);
+ unsigned long value = bitmap_read(a, round_down(bit, 32), 32);
- xgpio_writereg(addr, xgpio_get_value32(a, bit));
+ xgpio_writereg(addr, value);
}
static void xgpio_read_ch_all(struct xgpio_instance *chip, int reg, unsigned long *a)
{
- int bit, lastbit = xgpio_to_bit(chip, chip->gc.ngpio - 1);
+ unsigned long lastbit = find_nth_bit(chip->map, 64, chip->gc.ngpio - 1);
+ int bit;
for (bit = 0; bit <= lastbit ; bit += 32)
xgpio_read_ch(chip, reg, bit, a);
@@ -136,7 +110,8 @@ static void xgpio_read_ch_all(struct xgpio_instance *chip, int reg, unsigned lon
static void xgpio_write_ch_all(struct xgpio_instance *chip, int reg, unsigned long *a)
{
- int bit, lastbit = xgpio_to_bit(chip, chip->gc.ngpio - 1);
+ unsigned long lastbit = find_nth_bit(chip->map, 64, chip->gc.ngpio - 1);
+ int bit;
for (bit = 0; bit <= lastbit ; bit += 32)
xgpio_write_ch(chip, reg, bit, a);
@@ -156,7 +131,7 @@ static void xgpio_write_ch_all(struct xgpio_instance *chip, int reg, unsigned lo
static int xgpio_get(struct gpio_chip *gc, unsigned int gpio)
{
struct xgpio_instance *chip = gpiochip_get_data(gc);
- int bit = xgpio_to_bit(chip, gpio);
+ unsigned long bit = find_nth_bit(chip->map, 64, gpio);
DECLARE_BITMAP(state, 64);
xgpio_read_ch(chip, XGPIO_DATA_OFFSET, bit, state);
@@ -173,20 +148,22 @@ static int xgpio_get(struct gpio_chip *gc, unsigned int gpio)
* This function writes the specified value in to the specified signal of the
* GPIO device.
*/
-static void xgpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
+static int xgpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
unsigned long flags;
struct xgpio_instance *chip = gpiochip_get_data(gc);
- int bit = xgpio_to_bit(chip, gpio);
+ unsigned long bit = find_nth_bit(chip->map, 64, gpio);
- spin_lock_irqsave(&chip->gpio_lock, flags);
+ raw_spin_lock_irqsave(&chip->gpio_lock, flags);
/* Write to GPIO signal and set its direction to output */
__assign_bit(bit, chip->state, val);
xgpio_write_ch(chip, XGPIO_DATA_OFFSET, bit, chip->state);
- spin_unlock_irqrestore(&chip->gpio_lock, flags);
+ raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
+
+ return 0;
}
/**
@@ -198,8 +175,8 @@ static void xgpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
* This function writes the specified values into the specified signals of the
* GPIO devices.
*/
-static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
- unsigned long *bits)
+static int xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
+ unsigned long *bits)
{
DECLARE_BITMAP(hw_mask, 64);
DECLARE_BITMAP(hw_bits, 64);
@@ -207,10 +184,10 @@ static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
unsigned long flags;
struct xgpio_instance *chip = gpiochip_get_data(gc);
- bitmap_remap(hw_mask, mask, chip->sw_map, chip->hw_map, 64);
- bitmap_remap(hw_bits, bits, chip->sw_map, chip->hw_map, 64);
+ bitmap_scatter(hw_mask, mask, chip->map, 64);
+ bitmap_scatter(hw_bits, bits, chip->map, 64);
- spin_lock_irqsave(&chip->gpio_lock, flags);
+ raw_spin_lock_irqsave(&chip->gpio_lock, flags);
bitmap_replace(state, chip->state, hw_bits, hw_mask, 64);
@@ -218,7 +195,9 @@ static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
bitmap_copy(chip->state, state, 64);
- spin_unlock_irqrestore(&chip->gpio_lock, flags);
+ raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
+
+ return 0;
}
/**
@@ -234,15 +213,15 @@ static int xgpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
{
unsigned long flags;
struct xgpio_instance *chip = gpiochip_get_data(gc);
- int bit = xgpio_to_bit(chip, gpio);
+ unsigned long bit = find_nth_bit(chip->map, 64, gpio);
- spin_lock_irqsave(&chip->gpio_lock, flags);
+ raw_spin_lock_irqsave(&chip->gpio_lock, flags);
/* Set the GPIO bit in shadow register and set direction as input */
__set_bit(bit, chip->dir);
xgpio_write_ch(chip, XGPIO_TRI_OFFSET, bit, chip->dir);
- spin_unlock_irqrestore(&chip->gpio_lock, flags);
+ raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
return 0;
}
@@ -263,9 +242,9 @@ static int xgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
{
unsigned long flags;
struct xgpio_instance *chip = gpiochip_get_data(gc);
- int bit = xgpio_to_bit(chip, gpio);
+ unsigned long bit = find_nth_bit(chip->map, 64, gpio);
- spin_lock_irqsave(&chip->gpio_lock, flags);
+ raw_spin_lock_irqsave(&chip->gpio_lock, flags);
/* Write state of GPIO signal */
__assign_bit(bit, chip->state, val);
@@ -275,7 +254,7 @@ static int xgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
__clear_bit(bit, chip->dir);
xgpio_write_ch(chip, XGPIO_TRI_OFFSET, bit, chip->dir);
- spin_unlock_irqrestore(&chip->gpio_lock, flags);
+ raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
return 0;
}
@@ -395,20 +374,21 @@ static void xgpio_irq_mask(struct irq_data *irq_data)
unsigned long flags;
struct xgpio_instance *chip = irq_data_get_irq_chip_data(irq_data);
int irq_offset = irqd_to_hwirq(irq_data);
- int bit = xgpio_to_bit(chip, irq_offset);
+ unsigned long bit = find_nth_bit(chip->map, 64, irq_offset), enable;
u32 mask = BIT(bit / 32), temp;
- spin_lock_irqsave(&chip->gpio_lock, flags);
+ raw_spin_lock_irqsave(&chip->gpio_lock, flags);
__clear_bit(bit, chip->enable);
- if (xgpio_get_value32(chip->enable, bit) == 0) {
+ enable = bitmap_read(chip->enable, round_down(bit, 32), 32);
+ if (enable == 0) {
/* Disable per channel interrupt */
temp = xgpio_readreg(chip->regs + XGPIO_IPIER_OFFSET);
temp &= ~mask;
xgpio_writereg(chip->regs + XGPIO_IPIER_OFFSET, temp);
}
- spin_unlock_irqrestore(&chip->gpio_lock, flags);
+ raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
gpiochip_disable_irq(&chip->gc, irq_offset);
}
@@ -422,17 +402,15 @@ static void xgpio_irq_unmask(struct irq_data *irq_data)
unsigned long flags;
struct xgpio_instance *chip = irq_data_get_irq_chip_data(irq_data);
int irq_offset = irqd_to_hwirq(irq_data);
- int bit = xgpio_to_bit(chip, irq_offset);
- u32 old_enable = xgpio_get_value32(chip->enable, bit);
+ unsigned long bit = find_nth_bit(chip->map, 64, irq_offset), enable;
u32 mask = BIT(bit / 32), val;
gpiochip_enable_irq(&chip->gc, irq_offset);
- spin_lock_irqsave(&chip->gpio_lock, flags);
-
- __set_bit(bit, chip->enable);
+ raw_spin_lock_irqsave(&chip->gpio_lock, flags);
- if (old_enable == 0) {
+ enable = bitmap_read(chip->enable, round_down(bit, 32), 32);
+ if (enable == 0) {
/* Clear any existing per-channel interrupts */
val = xgpio_readreg(chip->regs + XGPIO_IPISR_OFFSET);
val &= mask;
@@ -447,7 +425,9 @@ static void xgpio_irq_unmask(struct irq_data *irq_data)
xgpio_writereg(chip->regs + XGPIO_IPIER_OFFSET, val);
}
- spin_unlock_irqrestore(&chip->gpio_lock, flags);
+ __set_bit(bit, chip->enable);
+
+ raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
}
/**
@@ -462,7 +442,7 @@ static int xgpio_set_irq_type(struct irq_data *irq_data, unsigned int type)
{
struct xgpio_instance *chip = irq_data_get_irq_chip_data(irq_data);
int irq_offset = irqd_to_hwirq(irq_data);
- int bit = xgpio_to_bit(chip, irq_offset);
+ unsigned long bit = find_nth_bit(chip->map, 64, irq_offset);
/*
* The Xilinx GPIO hardware provides a single interrupt status
@@ -502,41 +482,40 @@ static void xgpio_irqhandler(struct irq_desc *desc)
struct irq_chip *irqchip = irq_desc_get_chip(desc);
DECLARE_BITMAP(rising, 64);
DECLARE_BITMAP(falling, 64);
- DECLARE_BITMAP(all, 64);
+ DECLARE_BITMAP(hw, 64);
+ DECLARE_BITMAP(sw, 64);
int irq_offset;
u32 status;
- u32 bit;
status = xgpio_readreg(chip->regs + XGPIO_IPISR_OFFSET);
xgpio_writereg(chip->regs + XGPIO_IPISR_OFFSET, status);
chained_irq_enter(irqchip, desc);
- spin_lock(&chip->gpio_lock);
+ raw_spin_lock(&chip->gpio_lock);
- xgpio_read_ch_all(chip, XGPIO_DATA_OFFSET, all);
+ xgpio_read_ch_all(chip, XGPIO_DATA_OFFSET, hw);
bitmap_complement(rising, chip->last_irq_read, 64);
- bitmap_and(rising, rising, all, 64);
+ bitmap_and(rising, rising, hw, 64);
bitmap_and(rising, rising, chip->enable, 64);
bitmap_and(rising, rising, chip->rising_edge, 64);
- bitmap_complement(falling, all, 64);
+ bitmap_complement(falling, hw, 64);
bitmap_and(falling, falling, chip->last_irq_read, 64);
bitmap_and(falling, falling, chip->enable, 64);
bitmap_and(falling, falling, chip->falling_edge, 64);
- bitmap_copy(chip->last_irq_read, all, 64);
- bitmap_or(all, rising, falling, 64);
+ bitmap_copy(chip->last_irq_read, hw, 64);
+ bitmap_or(hw, rising, falling, 64);
- spin_unlock(&chip->gpio_lock);
+ raw_spin_unlock(&chip->gpio_lock);
dev_dbg(gc->parent, "IRQ rising %*pb falling %*pb\n", 64, rising, 64, falling);
- for_each_set_bit(bit, all, 64) {
- irq_offset = xgpio_from_bit(chip, bit);
+ bitmap_gather(sw, hw, chip->map, 64);
+ for_each_set_bit(irq_offset, sw, 64)
generic_handle_domain_irq(gc->irq.domain, irq_offset);
- }
chained_irq_exit(irqchip, desc);
}
@@ -561,9 +540,9 @@ static const struct irq_chip xgpio_irq_chip = {
*/
static int xgpio_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct xgpio_instance *chip;
int status = 0;
- struct device_node *np = pdev->dev.of_node;
u32 is_dual = 0;
u32 width[2];
u32 state[2];
@@ -571,14 +550,14 @@ static int xgpio_probe(struct platform_device *pdev)
struct gpio_irq_chip *girq;
u32 temp;
- chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
platform_set_drvdata(pdev, chip);
/* First, check if the device is dual-channel */
- of_property_read_u32(np, "xlnx,is-dual", &is_dual);
+ device_property_read_u32(dev, "xlnx,is-dual", &is_dual);
/* Setup defaults */
memset32(width, 0, ARRAY_SIZE(width));
@@ -586,14 +565,14 @@ static int xgpio_probe(struct platform_device *pdev)
memset32(dir, 0xFFFFFFFF, ARRAY_SIZE(dir));
/* Update GPIO state shadow register with default value */
- of_property_read_u32(np, "xlnx,dout-default", &state[0]);
- of_property_read_u32(np, "xlnx,dout-default-2", &state[1]);
+ device_property_read_u32(dev, "xlnx,dout-default", &state[0]);
+ device_property_read_u32(dev, "xlnx,dout-default-2", &state[1]);
bitmap_from_arr32(chip->state, state, 64);
/* Update GPIO direction shadow register with default value */
- of_property_read_u32(np, "xlnx,tri-default", &dir[0]);
- of_property_read_u32(np, "xlnx,tri-default-2", &dir[1]);
+ device_property_read_u32(dev, "xlnx,tri-default", &dir[0]);
+ device_property_read_u32(dev, "xlnx,tri-default-2", &dir[1]);
bitmap_from_arr32(chip->dir, dir, 64);
@@ -601,30 +580,27 @@ static int xgpio_probe(struct platform_device *pdev)
* Check device node and parent device node for device width
* and assume default width of 32
*/
- if (of_property_read_u32(np, "xlnx,gpio-width", &width[0]))
+ if (device_property_read_u32(dev, "xlnx,gpio-width", &width[0]))
width[0] = 32;
if (width[0] > 32)
return -EINVAL;
- if (is_dual && of_property_read_u32(np, "xlnx,gpio2-width", &width[1]))
+ if (is_dual && device_property_read_u32(dev, "xlnx,gpio2-width", &width[1]))
width[1] = 32;
if (width[1] > 32)
return -EINVAL;
- /* Setup software pin mapping */
- bitmap_set(chip->sw_map, 0, width[0] + width[1]);
-
/* Setup hardware pin mapping */
- bitmap_set(chip->hw_map, 0, width[0]);
- bitmap_set(chip->hw_map, 32, width[1]);
+ bitmap_set(chip->map, 0, width[0]);
+ bitmap_set(chip->map, 32, width[1]);
- spin_lock_init(&chip->gpio_lock);
+ raw_spin_lock_init(&chip->gpio_lock);
chip->gc.base = -1;
- chip->gc.ngpio = bitmap_weight(chip->hw_map, 64);
- chip->gc.parent = &pdev->dev;
+ chip->gc.ngpio = bitmap_weight(chip->map, 64);
+ chip->gc.parent = dev;
chip->gc.direction_input = xgpio_dir_in;
chip->gc.direction_output = xgpio_dir_out;
chip->gc.get = xgpio_get;
@@ -633,21 +609,21 @@ static int xgpio_probe(struct platform_device *pdev)
chip->gc.free = xgpio_free;
chip->gc.set_multiple = xgpio_set_multiple;
- chip->gc.label = dev_name(&pdev->dev);
+ chip->gc.label = dev_name(dev);
chip->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(chip->regs)) {
- dev_err(&pdev->dev, "failed to ioremap memory resource\n");
+ dev_err(dev, "failed to ioremap memory resource\n");
return PTR_ERR(chip->regs);
}
- chip->clk = devm_clk_get_optional_enabled(&pdev->dev, NULL);
+ chip->clk = devm_clk_get_optional_enabled(dev, NULL);
if (IS_ERR(chip->clk))
- return dev_err_probe(&pdev->dev, PTR_ERR(chip->clk), "input clock not found.\n");
+ return dev_err_probe(dev, PTR_ERR(chip->clk), "input clock not found.\n");
- pm_runtime_get_noresume(&pdev->dev);
- pm_runtime_set_active(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
xgpio_save_regs(chip);
@@ -667,8 +643,7 @@ static int xgpio_probe(struct platform_device *pdev)
gpio_irq_chip_set_chip(girq, &xgpio_irq_chip);
girq->parent_handler = xgpio_irqhandler;
girq->num_parents = 1;
- girq->parents = devm_kcalloc(&pdev->dev, 1,
- sizeof(*girq->parents),
+ girq->parents = devm_kcalloc(dev, 1, sizeof(*girq->parents),
GFP_KERNEL);
if (!girq->parents) {
status = -ENOMEM;
@@ -679,18 +654,18 @@ static int xgpio_probe(struct platform_device *pdev)
girq->handler = handle_bad_irq;
skip_irq:
- status = devm_gpiochip_add_data(&pdev->dev, &chip->gc, chip);
+ status = devm_gpiochip_add_data(dev, &chip->gc, chip);
if (status) {
- dev_err(&pdev->dev, "failed to add GPIO chip\n");
+ dev_err(dev, "failed to add GPIO chip\n");
goto err_pm_put;
}
- pm_runtime_put(&pdev->dev);
+ pm_runtime_put(dev);
return 0;
err_pm_put:
- pm_runtime_disable(&pdev->dev);
- pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
return status;
}
@@ -703,7 +678,7 @@ MODULE_DEVICE_TABLE(of, xgpio_of_match);
static struct platform_driver xgpio_plat_driver = {
.probe = xgpio_probe,
- .remove_new = xgpio_remove,
+ .remove = xgpio_remove,
.driver = {
.name = "gpio-xilinx",
.of_match_table = xgpio_of_match,
diff --git a/drivers/gpio/gpio-xlp.c b/drivers/gpio/gpio-xlp.c
index b4b52213bcd9..aede6324387f 100644
--- a/drivers/gpio/gpio-xlp.c
+++ b/drivers/gpio/gpio-xlp.c
@@ -206,7 +206,6 @@ static int xlp_gpio_dir_output(struct gpio_chip *gc, unsigned gpio, int state)
{
struct xlp_gpio_priv *priv = gpiochip_get_data(gc);
- BUG_ON(gpio >= gc->ngpio);
xlp_gpio_set_reg(priv->gpio_out_en, gpio, 0x1);
return 0;
@@ -216,7 +215,6 @@ static int xlp_gpio_dir_input(struct gpio_chip *gc, unsigned gpio)
{
struct xlp_gpio_priv *priv = gpiochip_get_data(gc);
- BUG_ON(gpio >= gc->ngpio);
xlp_gpio_set_reg(priv->gpio_out_en, gpio, 0x0);
return 0;
@@ -226,16 +224,16 @@ static int xlp_gpio_get(struct gpio_chip *gc, unsigned gpio)
{
struct xlp_gpio_priv *priv = gpiochip_get_data(gc);
- BUG_ON(gpio >= gc->ngpio);
return xlp_gpio_get_reg(priv->gpio_paddrv, gpio);
}
-static void xlp_gpio_set(struct gpio_chip *gc, unsigned gpio, int state)
+static int xlp_gpio_set(struct gpio_chip *gc, unsigned int gpio, int state)
{
struct xlp_gpio_priv *priv = gpiochip_get_data(gc);
- BUG_ON(gpio >= gc->ngpio);
xlp_gpio_set_reg(priv->gpio_paddrv, gpio, state);
+
+ return 0;
}
static int xlp_gpio_probe(struct platform_device *pdev)
diff --git a/drivers/gpio/gpio-xra1403.c b/drivers/gpio/gpio-xra1403.c
index dc2710c21c50..7f3c98f9f902 100644
--- a/drivers/gpio/gpio-xra1403.c
+++ b/drivers/gpio/gpio-xra1403.c
@@ -13,6 +13,7 @@
#include <linux/mutex.h>
#include <linux/seq_file.h>
#include <linux/spi/spi.h>
+#include <linux/string_choices.h>
#include <linux/regmap.h>
/* XRA1403 registers */
@@ -101,16 +102,13 @@ static int xra1403_get(struct gpio_chip *chip, unsigned int offset)
return !!(val & BIT(offset % 8));
}
-static void xra1403_set(struct gpio_chip *chip, unsigned int offset, int value)
+static int xra1403_set(struct gpio_chip *chip, unsigned int offset, int value)
{
- int ret;
struct xra1403 *xra = gpiochip_get_data(chip);
- ret = regmap_update_bits(xra->regmap, to_reg(XRA_OCR, offset),
- BIT(offset % 8), value ? BIT(offset % 8) : 0);
- if (ret)
- dev_err(chip->parent, "Failed to set pin: %d, ret: %d\n",
- offset, ret);
+ return regmap_update_bits(xra->regmap, to_reg(XRA_OCR, offset),
+ BIT(offset % 8),
+ value ? BIT(offset % 8) : 0);
}
#ifdef CONFIG_DEBUG_FS
@@ -137,10 +135,9 @@ static void xra1403_dbg_show(struct seq_file *s, struct gpio_chip *chip)
gcr = value[XRA_GCR + 1] << 8 | value[XRA_GCR];
gsr = value[XRA_GSR + 1] << 8 | value[XRA_GSR];
for_each_requested_gpio(chip, i, label) {
- seq_printf(s, " gpio-%-3d (%-12s) %s %s\n",
- chip->base + i, label,
+ seq_printf(s, " gpio-%-3d (%-12s) %s %s\n", i, label,
(gcr & BIT(i)) ? "in" : "out",
- (gsr & BIT(i)) ? "hi" : "lo");
+ str_hi_lo(gsr & BIT(i)));
}
}
#else
diff --git a/drivers/gpio/gpio-xtensa.c b/drivers/gpio/gpio-xtensa.c
index c8af34a6368f..4418947a10e5 100644
--- a/drivers/gpio/gpio-xtensa.c
+++ b/drivers/gpio/gpio-xtensa.c
@@ -86,12 +86,6 @@ static int xtensa_impwire_get_value(struct gpio_chip *gc, unsigned offset)
return !!(impwire & BIT(offset));
}
-static void xtensa_impwire_set_value(struct gpio_chip *gc, unsigned offset,
- int value)
-{
- BUG(); /* output only; should never be called */
-}
-
static int xtensa_expstate_get_direction(struct gpio_chip *gc, unsigned offset)
{
return GPIO_LINE_DIRECTION_OUT; /* output only */
@@ -109,7 +103,7 @@ static int xtensa_expstate_get_value(struct gpio_chip *gc, unsigned offset)
return !!(expstate & BIT(offset));
}
-static void xtensa_expstate_set_value(struct gpio_chip *gc, unsigned offset,
+static int xtensa_expstate_set_value(struct gpio_chip *gc, unsigned int offset,
int value)
{
unsigned long flags, saved_cpenable;
@@ -120,6 +114,8 @@ static void xtensa_expstate_set_value(struct gpio_chip *gc, unsigned offset,
__asm__ __volatile__("wrmsk_expstate %0, %1"
:: "a" (val), "a" (mask));
disable_cp(flags, saved_cpenable);
+
+ return 0;
}
static struct gpio_chip impwire_chip = {
@@ -128,7 +124,6 @@ static struct gpio_chip impwire_chip = {
.ngpio = 32,
.get_direction = xtensa_impwire_get_direction,
.get = xtensa_impwire_get_value,
- .set = xtensa_impwire_set_value,
};
static struct gpio_chip expstate_chip = {
diff --git a/drivers/gpio/gpio-zevio.c b/drivers/gpio/gpio-zevio.c
index 2de61337ad3b..29375bea2289 100644
--- a/drivers/gpio/gpio-zevio.c
+++ b/drivers/gpio/gpio-zevio.c
@@ -11,6 +11,7 @@
#include <linux/io.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -90,7 +91,7 @@ static int zevio_gpio_get(struct gpio_chip *chip, unsigned pin)
return (val >> ZEVIO_GPIO_BIT(pin)) & 0x1;
}
-static void zevio_gpio_set(struct gpio_chip *chip, unsigned pin, int value)
+static int zevio_gpio_set(struct gpio_chip *chip, unsigned int pin, int value)
{
struct zevio_gpio *controller = gpiochip_get_data(chip);
u32 val;
@@ -104,6 +105,8 @@ static void zevio_gpio_set(struct gpio_chip *chip, unsigned pin, int value)
zevio_gpio_port_set(controller, pin, ZEVIO_GPIO_OUTPUT, val);
spin_unlock(&controller->lock);
+
+ return 0;
}
static int zevio_gpio_direction_input(struct gpio_chip *chip, unsigned pin)
@@ -169,6 +172,7 @@ static const struct gpio_chip zevio_gpio_chip = {
/* Initialization */
static int zevio_gpio_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct zevio_gpio *controller;
int status, i;
@@ -180,6 +184,10 @@ static int zevio_gpio_probe(struct platform_device *pdev)
controller->chip = zevio_gpio_chip;
controller->chip.parent = &pdev->dev;
+ controller->chip.label = devm_kasprintf(dev, GFP_KERNEL, "%pfw", dev_fwnode(dev));
+ if (!controller->chip.label)
+ return -ENOMEM;
+
controller->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(controller->regs))
return dev_err_probe(&pdev->dev, PTR_ERR(controller->regs),
diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c
index 1a42336dfc1d..0ffd76e8951f 100644
--- a/drivers/gpio/gpio-zynq.c
+++ b/drivers/gpio/gpio-zynq.c
@@ -265,8 +265,8 @@ static int zynq_gpio_get_value(struct gpio_chip *chip, unsigned int pin)
* upper 16 bits) based on the given pin number and sets the state of a
* gpio pin to the specified value. The state is either 0 or non-zero.
*/
-static void zynq_gpio_set_value(struct gpio_chip *chip, unsigned int pin,
- int state)
+static int zynq_gpio_set_value(struct gpio_chip *chip, unsigned int pin,
+ int state)
{
unsigned int reg_offset, bank_num, bank_pin_num;
struct zynq_gpio *gpio = gpiochip_get_data(chip);
@@ -290,6 +290,8 @@ static void zynq_gpio_set_value(struct gpio_chip *chip, unsigned int pin,
((state << bank_pin_num) | ZYNQ_GPIO_UPPER_MASK);
writel_relaxed(state, gpio->base_addr + reg_offset);
+
+ return 0;
}
/**
@@ -1011,6 +1013,7 @@ static void zynq_gpio_remove(struct platform_device *pdev)
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0)
dev_warn(&pdev->dev, "pm_runtime_get_sync() Failed\n");
+ device_init_wakeup(&pdev->dev, 0);
gpiochip_remove(&gpio->chip);
device_set_wakeup_capable(&pdev->dev, 0);
pm_runtime_disable(&pdev->dev);
@@ -1023,7 +1026,7 @@ static struct platform_driver zynq_gpio_driver = {
.of_match_table = zynq_gpio_of_match,
},
.probe = zynq_gpio_probe,
- .remove_new = zynq_gpio_remove,
+ .remove = zynq_gpio_remove,
};
module_platform_driver(zynq_gpio_driver);
diff --git a/drivers/gpio/gpio-zynqmp-modepin.c b/drivers/gpio/gpio-zynqmp-modepin.c
index 2f3c9ebfa78d..5e651482e985 100644
--- a/drivers/gpio/gpio-zynqmp-modepin.c
+++ b/drivers/gpio/gpio-zynqmp-modepin.c
@@ -57,8 +57,8 @@ static int modepin_gpio_get_value(struct gpio_chip *chip, unsigned int pin)
*
* Return: None.
*/
-static void modepin_gpio_set_value(struct gpio_chip *chip, unsigned int pin,
- int state)
+static int modepin_gpio_set_value(struct gpio_chip *chip, unsigned int pin,
+ int state)
{
u32 bootpin_val = 0;
int ret;
@@ -77,6 +77,8 @@ static void modepin_gpio_set_value(struct gpio_chip *chip, unsigned int pin,
ret = zynqmp_pm_bootmode_write(bootpin_val);
if (ret)
pr_err("modepin: set value error %d for pin %d\n", ret, pin);
+
+ return ret;
}
/**
@@ -102,7 +104,7 @@ static int modepin_gpio_dir_in(struct gpio_chip *chip, unsigned int pin)
static int modepin_gpio_dir_out(struct gpio_chip *chip, unsigned int pin,
int state)
{
- return 0;
+ return modepin_gpio_set_value(chip, pin, state);
}
/**
diff --git a/drivers/gpio/gpiolib-acpi-core.c b/drivers/gpio/gpiolib-acpi-core.c
new file mode 100644
index 000000000000..284e762d92c4
--- /dev/null
+++ b/drivers/gpio/gpiolib-acpi-core.c
@@ -0,0 +1,1424 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ACPI helpers for GPIO API
+ *
+ * Copyright (C) 2012, Intel Corporation
+ * Authors: Mathias Nyman <mathias.nyman@linux.intel.com>
+ * Mika Westerberg <mika.westerberg@linux.intel.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/dmi.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/mutex.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio/machine.h>
+
+#include "gpiolib.h"
+#include "gpiolib-acpi.h"
+
+/**
+ * struct acpi_gpio_event - ACPI GPIO event handler data
+ *
+ * @node: list-entry of the events list of the struct acpi_gpio_chip
+ * @handle: handle of ACPI method to execute when the IRQ triggers
+ * @handler: handler function to pass to request_irq() when requesting the IRQ
+ * @pin: GPIO pin number on the struct gpio_chip
+ * @irq: Linux IRQ number for the event, for request_irq() / free_irq()
+ * @irqflags: flags to pass to request_irq() when requesting the IRQ
+ * @irq_is_wake: If the ACPI flags indicate the IRQ is a wakeup source
+ * @irq_requested:True if request_irq() has been done
+ * @desc: struct gpio_desc for the GPIO pin for this event
+ */
+struct acpi_gpio_event {
+ struct list_head node;
+ acpi_handle handle;
+ irq_handler_t handler;
+ unsigned int pin;
+ unsigned int irq;
+ unsigned long irqflags;
+ bool irq_is_wake;
+ bool irq_requested;
+ struct gpio_desc *desc;
+};
+
+struct acpi_gpio_connection {
+ struct list_head node;
+ unsigned int pin;
+ struct gpio_desc *desc;
+};
+
+struct acpi_gpio_chip {
+ /*
+ * ACPICA requires that the first field of the context parameter
+ * passed to acpi_install_address_space_handler() is large enough
+ * to hold struct acpi_connection_info.
+ */
+ struct acpi_connection_info conn_info;
+ struct list_head conns;
+ struct mutex conn_lock;
+ struct gpio_chip *chip;
+ struct list_head events;
+ struct list_head deferred_req_irqs_list_entry;
+};
+
+/**
+ * struct acpi_gpio_info - ACPI GPIO specific information
+ * @adev: reference to ACPI device which consumes GPIO resource
+ * @flags: GPIO initialization flags
+ * @gpioint: if %true this GPIO is of type GpioInt otherwise type is GpioIo
+ * @wake_capable: wake capability as provided by ACPI
+ * @pin_config: pin bias as provided by ACPI
+ * @polarity: interrupt polarity as provided by ACPI
+ * @triggering: triggering type as provided by ACPI
+ * @debounce: debounce timeout as provided by ACPI
+ * @quirks: Linux specific quirks as provided by struct acpi_gpio_mapping
+ */
+struct acpi_gpio_info {
+ struct acpi_device *adev;
+ enum gpiod_flags flags;
+ bool gpioint;
+ bool wake_capable;
+ int pin_config;
+ int polarity;
+ int triggering;
+ unsigned int debounce;
+ unsigned int quirks;
+};
+
+static int acpi_gpiochip_find(struct gpio_chip *gc, const void *data)
+{
+ /* First check the actual GPIO device */
+ if (device_match_acpi_handle(&gc->gpiodev->dev, data))
+ return true;
+
+ /*
+ * When the ACPI device is artificially split to the banks of GPIOs,
+ * where each of them is represented by a separate GPIO device,
+ * the firmware node of the physical device may not be shared among
+ * the banks as they may require different values for the same property,
+ * e.g., number of GPIOs in a certain bank. In such case the ACPI handle
+ * of a GPIO device is NULL and can not be used. Hence we have to check
+ * the parent device to be sure that there is no match before bailing
+ * out.
+ */
+ if (gc->parent)
+ return device_match_acpi_handle(gc->parent, data);
+
+ return false;
+}
+
+/**
+ * acpi_get_gpiod() - Translate ACPI GPIO pin to GPIO descriptor usable with GPIO API
+ * @path: ACPI GPIO controller full path name, (e.g. "\\_SB.GPO1")
+ * @pin: ACPI GPIO pin number (0-based, controller-relative)
+ *
+ * Returns:
+ * GPIO descriptor to use with Linux generic GPIO API.
+ * If the GPIO cannot be translated or there is an error an ERR_PTR is
+ * returned.
+ *
+ * Specifically returns %-EPROBE_DEFER if the referenced GPIO
+ * controller does not have GPIO chip registered at the moment. This is to
+ * support probe deferral.
+ */
+static struct gpio_desc *acpi_get_gpiod(char *path, unsigned int pin)
+{
+ acpi_handle handle;
+ acpi_status status;
+
+ status = acpi_get_handle(NULL, path, &handle);
+ if (ACPI_FAILURE(status))
+ return ERR_PTR(-ENODEV);
+
+ struct gpio_device *gdev __free(gpio_device_put) =
+ gpio_device_find(handle, acpi_gpiochip_find);
+ if (!gdev)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ /*
+ * FIXME: keep track of the reference to the GPIO device somehow
+ * instead of putting it here.
+ */
+ return gpio_device_get_desc(gdev, pin);
+}
+
+static irqreturn_t acpi_gpio_irq_handler(int irq, void *data)
+{
+ struct acpi_gpio_event *event = data;
+
+ acpi_evaluate_object(event->handle, NULL, NULL, NULL);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t acpi_gpio_irq_handler_evt(int irq, void *data)
+{
+ struct acpi_gpio_event *event = data;
+
+ acpi_execute_simple_method(event->handle, NULL, event->pin);
+
+ return IRQ_HANDLED;
+}
+
+static void acpi_gpio_chip_dh(acpi_handle handle, void *data)
+{
+ /* The address of this function is used as a key. */
+}
+
+bool acpi_gpio_get_irq_resource(struct acpi_resource *ares,
+ struct acpi_resource_gpio **agpio)
+{
+ struct acpi_resource_gpio *gpio;
+
+ if (ares->type != ACPI_RESOURCE_TYPE_GPIO)
+ return false;
+
+ gpio = &ares->data.gpio;
+ if (gpio->connection_type != ACPI_RESOURCE_GPIO_TYPE_INT)
+ return false;
+
+ *agpio = gpio;
+ return true;
+}
+EXPORT_SYMBOL_GPL(acpi_gpio_get_irq_resource);
+
+/**
+ * acpi_gpio_get_io_resource - Fetch details of an ACPI resource if it is a GPIO
+ * I/O resource or return False if not.
+ * @ares: Pointer to the ACPI resource to fetch
+ * @agpio: Pointer to a &struct acpi_resource_gpio to store the output pointer
+ *
+ * Returns:
+ * %true if GpioIo resource is found, %false otherwise.
+ */
+bool acpi_gpio_get_io_resource(struct acpi_resource *ares,
+ struct acpi_resource_gpio **agpio)
+{
+ struct acpi_resource_gpio *gpio;
+
+ if (ares->type != ACPI_RESOURCE_TYPE_GPIO)
+ return false;
+
+ gpio = &ares->data.gpio;
+ if (gpio->connection_type != ACPI_RESOURCE_GPIO_TYPE_IO)
+ return false;
+
+ *agpio = gpio;
+ return true;
+}
+EXPORT_SYMBOL_GPL(acpi_gpio_get_io_resource);
+
+static void acpi_gpiochip_request_irq(struct acpi_gpio_chip *acpi_gpio,
+ struct acpi_gpio_event *event)
+{
+ struct device *parent = acpi_gpio->chip->parent;
+ int ret, value;
+
+ ret = request_threaded_irq(event->irq, NULL, event->handler,
+ event->irqflags | IRQF_ONESHOT, "ACPI:Event", event);
+ if (ret) {
+ dev_err(parent, "Failed to setup interrupt handler for %d\n", event->irq);
+ return;
+ }
+
+ if (event->irq_is_wake)
+ enable_irq_wake(event->irq);
+
+ event->irq_requested = true;
+
+ /* Make sure we trigger the initial state of edge-triggered IRQs */
+ if (acpi_gpio_need_run_edge_events_on_boot() &&
+ (event->irqflags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))) {
+ value = gpiod_get_raw_value_cansleep(event->desc);
+ if (((event->irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
+ ((event->irqflags & IRQF_TRIGGER_FALLING) && value == 0))
+ event->handler(event->irq, event);
+ }
+}
+
+static void acpi_gpiochip_request_irqs(struct acpi_gpio_chip *acpi_gpio)
+{
+ struct acpi_gpio_event *event;
+
+ list_for_each_entry(event, &acpi_gpio->events, node)
+ acpi_gpiochip_request_irq(acpi_gpio, event);
+}
+
+static enum gpiod_flags
+acpi_gpio_to_gpiod_flags(const struct acpi_resource_gpio *agpio, int polarity)
+{
+ /* GpioInt() implies input configuration */
+ if (agpio->connection_type == ACPI_RESOURCE_GPIO_TYPE_INT)
+ return GPIOD_IN;
+
+ switch (agpio->io_restriction) {
+ case ACPI_IO_RESTRICT_INPUT:
+ return GPIOD_IN;
+ case ACPI_IO_RESTRICT_OUTPUT:
+ /*
+ * ACPI GPIO resources don't contain an initial value for the
+ * GPIO. Therefore we deduce that value from the pull field
+ * and the polarity instead. If the pin is pulled up we assume
+ * default to be high, if it is pulled down we assume default
+ * to be low, otherwise we leave pin untouched. For active low
+ * polarity values will be switched. See also
+ * Documentation/firmware-guide/acpi/gpio-properties.rst.
+ */
+ switch (agpio->pin_config) {
+ case ACPI_PIN_CONFIG_PULLUP:
+ return polarity == GPIO_ACTIVE_LOW ? GPIOD_OUT_LOW : GPIOD_OUT_HIGH;
+ case ACPI_PIN_CONFIG_PULLDOWN:
+ return polarity == GPIO_ACTIVE_LOW ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * Assume that the BIOS has configured the direction and pull
+ * accordingly.
+ */
+ return GPIOD_ASIS;
+}
+
+static struct gpio_desc *acpi_request_own_gpiod(struct gpio_chip *chip,
+ struct acpi_resource_gpio *agpio,
+ unsigned int index,
+ const char *label)
+{
+ int polarity = GPIO_ACTIVE_HIGH;
+ enum gpiod_flags flags = acpi_gpio_to_gpiod_flags(agpio, polarity);
+ unsigned int pin = agpio->pin_table[index];
+ struct gpio_desc *desc;
+ int ret;
+
+ desc = gpiochip_request_own_desc(chip, pin, label, polarity, flags);
+ if (IS_ERR(desc))
+ return desc;
+
+ /* ACPI uses hundredths of milliseconds units */
+ ret = gpio_set_debounce_timeout(desc, agpio->debounce_timeout * 10);
+ if (ret)
+ dev_warn(chip->parent,
+ "Failed to set debounce-timeout for pin 0x%04X, err %d\n",
+ pin, ret);
+
+ return desc;
+}
+
+static bool acpi_gpio_irq_is_wake(struct device *parent,
+ const struct acpi_resource_gpio *agpio)
+{
+ unsigned int pin = agpio->pin_table[0];
+
+ if (agpio->wake_capable != ACPI_WAKE_CAPABLE)
+ return false;
+
+ if (acpi_gpio_in_ignore_list(ACPI_GPIO_IGNORE_WAKE, dev_name(parent), pin)) {
+ dev_info(parent, "Ignoring wakeup on pin %u\n", pin);
+ return false;
+ }
+
+ return true;
+}
+
+/* Always returns AE_OK so that we keep looping over the resources */
+static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
+ void *context)
+{
+ struct acpi_gpio_chip *acpi_gpio = context;
+ struct gpio_chip *chip = acpi_gpio->chip;
+ struct acpi_resource_gpio *agpio;
+ acpi_handle handle, evt_handle;
+ struct acpi_gpio_event *event;
+ irq_handler_t handler = NULL;
+ struct gpio_desc *desc;
+ unsigned int pin;
+ int ret, irq;
+
+ if (!acpi_gpio_get_irq_resource(ares, &agpio))
+ return AE_OK;
+
+ handle = ACPI_HANDLE(chip->parent);
+ pin = agpio->pin_table[0];
+
+ if (pin <= 255) {
+ char ev_name[8];
+ sprintf(ev_name, "_%c%02X",
+ agpio->triggering == ACPI_EDGE_SENSITIVE ? 'E' : 'L',
+ pin);
+ if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle)))
+ handler = acpi_gpio_irq_handler;
+ }
+ if (!handler) {
+ if (ACPI_SUCCESS(acpi_get_handle(handle, "_EVT", &evt_handle)))
+ handler = acpi_gpio_irq_handler_evt;
+ }
+ if (!handler)
+ return AE_OK;
+
+ if (acpi_gpio_in_ignore_list(ACPI_GPIO_IGNORE_INTERRUPT, dev_name(chip->parent), pin)) {
+ dev_info(chip->parent, "Ignoring interrupt on pin %u\n", pin);
+ return AE_OK;
+ }
+
+ desc = acpi_request_own_gpiod(chip, agpio, 0, "ACPI:Event");
+ if (IS_ERR(desc)) {
+ dev_err(chip->parent,
+ "Failed to request GPIO for pin 0x%04X, err %ld\n",
+ pin, PTR_ERR(desc));
+ return AE_OK;
+ }
+
+ ret = gpiochip_lock_as_irq(chip, pin);
+ if (ret) {
+ dev_err(chip->parent,
+ "Failed to lock GPIO pin 0x%04X as interrupt, err %d\n",
+ pin, ret);
+ goto fail_free_desc;
+ }
+
+ irq = gpiod_to_irq(desc);
+ if (irq < 0) {
+ dev_err(chip->parent,
+ "Failed to translate GPIO pin 0x%04X to IRQ, err %d\n",
+ pin, irq);
+ goto fail_unlock_irq;
+ }
+
+ event = kzalloc(sizeof(*event), GFP_KERNEL);
+ if (!event)
+ goto fail_unlock_irq;
+
+ event->irqflags = IRQF_ONESHOT;
+ if (agpio->triggering == ACPI_LEVEL_SENSITIVE) {
+ if (agpio->polarity == ACPI_ACTIVE_HIGH)
+ event->irqflags |= IRQF_TRIGGER_HIGH;
+ else
+ event->irqflags |= IRQF_TRIGGER_LOW;
+ } else {
+ switch (agpio->polarity) {
+ case ACPI_ACTIVE_HIGH:
+ event->irqflags |= IRQF_TRIGGER_RISING;
+ break;
+ case ACPI_ACTIVE_LOW:
+ event->irqflags |= IRQF_TRIGGER_FALLING;
+ break;
+ default:
+ event->irqflags |= IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING;
+ break;
+ }
+ }
+
+ event->handle = evt_handle;
+ event->handler = handler;
+ event->irq = irq;
+ event->irq_is_wake = acpi_gpio_irq_is_wake(chip->parent, agpio);
+ event->pin = pin;
+ event->desc = desc;
+
+ list_add_tail(&event->node, &acpi_gpio->events);
+
+ return AE_OK;
+
+fail_unlock_irq:
+ gpiochip_unlock_as_irq(chip, pin);
+fail_free_desc:
+ gpiochip_free_own_desc(desc);
+
+ return AE_OK;
+}
+
+/**
+ * acpi_gpiochip_request_interrupts() - Register isr for gpio chip ACPI events
+ * @chip: GPIO chip
+ *
+ * ACPI5 platforms can use GPIO signaled ACPI events. These GPIO interrupts are
+ * handled by ACPI event methods which need to be called from the GPIO
+ * chip's interrupt handler. acpi_gpiochip_request_interrupts() finds out which
+ * GPIO pins have ACPI event methods and assigns interrupt handlers that calls
+ * the ACPI event methods for those pins.
+ */
+void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
+{
+ struct acpi_gpio_chip *acpi_gpio;
+ acpi_handle handle;
+ acpi_status status;
+
+ if (!chip->parent || !chip->to_irq)
+ return;
+
+ handle = ACPI_HANDLE(chip->parent);
+ if (!handle)
+ return;
+
+ status = acpi_get_data(handle, acpi_gpio_chip_dh, (void **)&acpi_gpio);
+ if (ACPI_FAILURE(status))
+ return;
+
+ if (acpi_quirk_skip_gpio_event_handlers())
+ return;
+
+ acpi_walk_resources(handle, METHOD_NAME__AEI,
+ acpi_gpiochip_alloc_event, acpi_gpio);
+
+ if (acpi_gpio_add_to_deferred_list(&acpi_gpio->deferred_req_irqs_list_entry))
+ return;
+
+ acpi_gpiochip_request_irqs(acpi_gpio);
+}
+EXPORT_SYMBOL_GPL(acpi_gpiochip_request_interrupts);
+
+/**
+ * acpi_gpiochip_free_interrupts() - Free GPIO ACPI event interrupts.
+ * @chip: GPIO chip
+ *
+ * Free interrupts associated with GPIO ACPI event method for the given
+ * GPIO chip.
+ */
+void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
+{
+ struct acpi_gpio_chip *acpi_gpio;
+ struct acpi_gpio_event *event, *ep;
+ acpi_handle handle;
+ acpi_status status;
+
+ if (!chip->parent || !chip->to_irq)
+ return;
+
+ handle = ACPI_HANDLE(chip->parent);
+ if (!handle)
+ return;
+
+ status = acpi_get_data(handle, acpi_gpio_chip_dh, (void **)&acpi_gpio);
+ if (ACPI_FAILURE(status))
+ return;
+
+ acpi_gpio_remove_from_deferred_list(&acpi_gpio->deferred_req_irqs_list_entry);
+
+ list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) {
+ if (event->irq_requested) {
+ if (event->irq_is_wake)
+ disable_irq_wake(event->irq);
+
+ free_irq(event->irq, event);
+ }
+
+ gpiochip_unlock_as_irq(chip, event->pin);
+ gpiochip_free_own_desc(event->desc);
+ list_del(&event->node);
+ kfree(event);
+ }
+}
+EXPORT_SYMBOL_GPL(acpi_gpiochip_free_interrupts);
+
+void __init acpi_gpio_process_deferred_list(struct list_head *list)
+{
+ struct acpi_gpio_chip *acpi_gpio, *tmp;
+
+ list_for_each_entry_safe(acpi_gpio, tmp, list, deferred_req_irqs_list_entry)
+ acpi_gpiochip_request_irqs(acpi_gpio);
+}
+
+int acpi_dev_add_driver_gpios(struct acpi_device *adev,
+ const struct acpi_gpio_mapping *gpios)
+{
+ if (adev && gpios) {
+ adev->driver_gpios = gpios;
+ return 0;
+ }
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(acpi_dev_add_driver_gpios);
+
+void acpi_dev_remove_driver_gpios(struct acpi_device *adev)
+{
+ if (adev)
+ adev->driver_gpios = NULL;
+}
+EXPORT_SYMBOL_GPL(acpi_dev_remove_driver_gpios);
+
+static void acpi_dev_release_driver_gpios(void *adev)
+{
+ acpi_dev_remove_driver_gpios(adev);
+}
+
+int devm_acpi_dev_add_driver_gpios(struct device *dev,
+ const struct acpi_gpio_mapping *gpios)
+{
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+ int ret;
+
+ ret = acpi_dev_add_driver_gpios(adev, gpios);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dev, acpi_dev_release_driver_gpios, adev);
+}
+EXPORT_SYMBOL_GPL(devm_acpi_dev_add_driver_gpios);
+
+static bool acpi_get_driver_gpio_data(struct acpi_device *adev,
+ const char *name, int index,
+ struct fwnode_reference_args *args,
+ unsigned int *quirks)
+{
+ const struct acpi_gpio_mapping *gm;
+
+ if (!adev || !adev->driver_gpios)
+ return false;
+
+ for (gm = adev->driver_gpios; gm->name; gm++)
+ if (!strcmp(name, gm->name) && gm->data && index < gm->size) {
+ const struct acpi_gpio_params *params = gm->data + index;
+
+ args->fwnode = acpi_fwnode_handle(adev);
+ args->args[0] = params->crs_entry_index;
+ args->args[1] = params->line_index;
+ args->args[2] = params->active_low;
+ args->nargs = 3;
+
+ *quirks = gm->quirks;
+ return true;
+ }
+
+ return false;
+}
+
+static int
+__acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags, enum gpiod_flags update)
+{
+ const enum gpiod_flags mask =
+ GPIOD_FLAGS_BIT_DIR_SET | GPIOD_FLAGS_BIT_DIR_OUT |
+ GPIOD_FLAGS_BIT_DIR_VAL;
+ int ret = 0;
+
+ /*
+ * Check if the BIOS has IoRestriction with explicitly set direction
+ * and update @flags accordingly. Otherwise use whatever caller asked
+ * for.
+ */
+ if (update & GPIOD_FLAGS_BIT_DIR_SET) {
+ enum gpiod_flags diff = *flags ^ update;
+
+ /*
+ * Check if caller supplied incompatible GPIO initialization
+ * flags.
+ *
+ * Return %-EINVAL to notify that firmware has different
+ * settings and we are going to use them.
+ */
+ if (((*flags & GPIOD_FLAGS_BIT_DIR_SET) && (diff & GPIOD_FLAGS_BIT_DIR_OUT)) ||
+ ((*flags & GPIOD_FLAGS_BIT_DIR_OUT) && (diff & GPIOD_FLAGS_BIT_DIR_VAL)))
+ ret = -EINVAL;
+ *flags = (*flags & ~mask) | (update & mask);
+ }
+ return ret;
+}
+
+static int acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags,
+ struct acpi_gpio_info *info)
+{
+ struct device *dev = &info->adev->dev;
+ enum gpiod_flags old = *flags;
+ int ret;
+
+ ret = __acpi_gpio_update_gpiod_flags(&old, info->flags);
+ if (info->quirks & ACPI_GPIO_QUIRK_NO_IO_RESTRICTION) {
+ if (ret)
+ dev_warn(dev, FW_BUG "GPIO not in correct mode, fixing\n");
+ } else {
+ if (ret)
+ dev_dbg(dev, "Override GPIO initialization flags\n");
+ *flags = old;
+ }
+
+ return ret;
+}
+
+static int acpi_gpio_update_gpiod_lookup_flags(unsigned long *lookupflags,
+ struct acpi_gpio_info *info)
+{
+ switch (info->pin_config) {
+ case ACPI_PIN_CONFIG_PULLUP:
+ *lookupflags |= GPIO_PULL_UP;
+ break;
+ case ACPI_PIN_CONFIG_PULLDOWN:
+ *lookupflags |= GPIO_PULL_DOWN;
+ break;
+ case ACPI_PIN_CONFIG_NOPULL:
+ *lookupflags |= GPIO_PULL_DISABLE;
+ break;
+ default:
+ break;
+ }
+
+ if (info->polarity == GPIO_ACTIVE_LOW)
+ *lookupflags |= GPIO_ACTIVE_LOW;
+
+ return 0;
+}
+
+struct acpi_gpio_lookup {
+ struct acpi_gpio_params params;
+ struct acpi_gpio_info *info;
+ struct gpio_desc *desc;
+ int n;
+};
+
+static int acpi_populate_gpio_lookup(struct acpi_resource *ares, void *data)
+{
+ struct acpi_gpio_lookup *lookup = data;
+ struct acpi_gpio_params *params = &lookup->params;
+ struct acpi_gpio_info *info = lookup->info;
+
+ if (ares->type != ACPI_RESOURCE_TYPE_GPIO)
+ return 1;
+
+ if (!lookup->desc) {
+ const struct acpi_resource_gpio *agpio = &ares->data.gpio;
+ bool gpioint = agpio->connection_type == ACPI_RESOURCE_GPIO_TYPE_INT;
+ struct gpio_desc *desc;
+ u16 pin_index;
+
+ if (info->quirks & ACPI_GPIO_QUIRK_ONLY_GPIOIO && gpioint)
+ params->crs_entry_index++;
+
+ if (lookup->n++ != params->crs_entry_index)
+ return 1;
+
+ pin_index = params->line_index;
+ if (pin_index >= agpio->pin_table_length)
+ return 1;
+
+ if (info->quirks & ACPI_GPIO_QUIRK_ABSOLUTE_NUMBER)
+ desc = gpio_to_desc(agpio->pin_table[pin_index]);
+ else
+ desc = acpi_get_gpiod(agpio->resource_source.string_ptr,
+ agpio->pin_table[pin_index]);
+ lookup->desc = desc;
+ info->pin_config = agpio->pin_config;
+ info->debounce = agpio->debounce_timeout;
+ info->gpioint = gpioint;
+ info->wake_capable = acpi_gpio_irq_is_wake(&info->adev->dev, agpio);
+
+ /*
+ * Polarity and triggering are only specified for GpioInt
+ * resource.
+ * Note: we expect here:
+ * - ACPI_ACTIVE_LOW == GPIO_ACTIVE_LOW
+ * - ACPI_ACTIVE_HIGH == GPIO_ACTIVE_HIGH
+ */
+ if (info->gpioint) {
+ info->polarity = agpio->polarity;
+ info->triggering = agpio->triggering;
+ } else {
+ info->polarity = params->active_low;
+ }
+
+ info->flags = acpi_gpio_to_gpiod_flags(agpio, info->polarity);
+ }
+
+ return 1;
+}
+
+static int acpi_gpio_resource_lookup(struct acpi_gpio_lookup *lookup)
+{
+ struct acpi_gpio_info *info = lookup->info;
+ struct acpi_device *adev = info->adev;
+ struct list_head res_list;
+ int ret;
+
+ INIT_LIST_HEAD(&res_list);
+
+ ret = acpi_dev_get_resources(adev, &res_list,
+ acpi_populate_gpio_lookup,
+ lookup);
+ if (ret < 0)
+ return ret;
+
+ acpi_dev_free_resource_list(&res_list);
+
+ if (!lookup->desc)
+ return -ENOENT;
+
+ return 0;
+}
+
+static int acpi_gpio_property_lookup(struct fwnode_handle *fwnode, const char *propname,
+ struct acpi_gpio_lookup *lookup)
+{
+ struct fwnode_reference_args args;
+ struct acpi_gpio_params *params = &lookup->params;
+ struct acpi_gpio_info *info = lookup->info;
+ unsigned int index = params->crs_entry_index;
+ unsigned int quirks = 0;
+ int ret;
+
+ memset(&args, 0, sizeof(args));
+
+ ret = __acpi_node_get_property_reference(fwnode, propname, index, 3, &args);
+ if (ret) {
+ struct acpi_device *adev;
+
+ adev = to_acpi_device_node(fwnode);
+ if (!acpi_get_driver_gpio_data(adev, propname, index, &args, &quirks))
+ return ret;
+ }
+ /*
+ * The property was found and resolved, so need to lookup the GPIO based
+ * on returned args.
+ */
+ if (!to_acpi_device_node(args.fwnode))
+ return -EINVAL;
+ if (args.nargs != 3)
+ return -EPROTO;
+
+ params->crs_entry_index = args.args[0];
+ params->line_index = args.args[1];
+ params->active_low = !!args.args[2];
+
+ info->adev = to_acpi_device_node(args.fwnode);
+ info->quirks = quirks;
+
+ return 0;
+}
+
+/**
+ * acpi_get_gpiod_by_index() - get a GPIO descriptor from device resources
+ * @adev: pointer to a ACPI device to get GPIO from
+ * @propname: Property name of the GPIO (optional)
+ * @lookup: pointer to struct acpi_gpio_lookup to fill in
+ *
+ * Function goes through ACPI resources for @adev and based on @lookup.index looks
+ * up a GpioIo/GpioInt resource, translates it to the Linux GPIO descriptor,
+ * and returns it. @lookup.index matches GpioIo/GpioInt resources only so if there
+ * are total 3 GPIO resources, the index goes from 0 to 2.
+ *
+ * If @propname is specified the GPIO is looked using device property. In
+ * that case @index is used to select the GPIO entry in the property value
+ * (in case of multiple).
+ *
+ * Returns:
+ * 0 on success, negative errno on failure.
+ *
+ * The @lookup is filled with GPIO descriptor to use with Linux generic GPIO API.
+ * If the GPIO cannot be translated an error will be returned.
+ *
+ * Note: if the GPIO resource has multiple entries in the pin list, this
+ * function only returns the first.
+ */
+static int acpi_get_gpiod_by_index(struct acpi_device *adev, const char *propname,
+ struct acpi_gpio_lookup *lookup)
+{
+ struct acpi_gpio_params *params = &lookup->params;
+ struct acpi_gpio_info *info = lookup->info;
+ int ret;
+
+ if (propname) {
+ dev_dbg(&adev->dev, "GPIO: looking up %s\n", propname);
+
+ ret = acpi_gpio_property_lookup(acpi_fwnode_handle(adev), propname, lookup);
+ if (ret)
+ return ret;
+
+ dev_dbg(&adev->dev, "GPIO: _DSD returned %s %u %u %u\n",
+ dev_name(&info->adev->dev),
+ params->crs_entry_index, params->line_index, params->active_low);
+ } else {
+ dev_dbg(&adev->dev, "GPIO: looking up %u in _CRS\n", params->crs_entry_index);
+ info->adev = adev;
+ }
+
+ return acpi_gpio_resource_lookup(lookup);
+}
+
+/**
+ * acpi_get_gpiod_from_data() - get a GPIO descriptor from ACPI data node
+ * @fwnode: pointer to an ACPI firmware node to get the GPIO information from
+ * @propname: Property name of the GPIO
+ * @lookup: pointer to struct acpi_gpio_lookup to fill in
+ *
+ * This function uses the property-based GPIO lookup to get to the GPIO
+ * resource with the relevant information from a data-only ACPI firmware node
+ * and uses that to obtain the GPIO descriptor to return.
+ *
+ * Returns:
+ * 0 on success, negative errno on failure.
+ *
+ * The @lookup is filled with GPIO descriptor to use with Linux generic GPIO API.
+ * If the GPIO cannot be translated an error will be returned.
+ */
+static int acpi_get_gpiod_from_data(struct fwnode_handle *fwnode, const char *propname,
+ struct acpi_gpio_lookup *lookup)
+{
+ int ret;
+
+ if (!is_acpi_data_node(fwnode))
+ return -ENODEV;
+
+ if (!propname)
+ return -EINVAL;
+
+ ret = acpi_gpio_property_lookup(fwnode, propname, lookup);
+ if (ret)
+ return ret;
+
+ return acpi_gpio_resource_lookup(lookup);
+}
+
+static bool acpi_can_fallback_to_crs(struct acpi_device *adev,
+ const char *con_id)
+{
+ /* If there is no ACPI device, there is no _CRS to fall back to */
+ if (!adev)
+ return false;
+
+ /* Never allow fallback if the device has properties */
+ if (acpi_dev_has_props(adev) || adev->driver_gpios)
+ return false;
+
+ return con_id == NULL;
+}
+
+static struct gpio_desc *
+__acpi_find_gpio(struct fwnode_handle *fwnode, const char *con_id, unsigned int idx,
+ bool can_fallback, struct acpi_gpio_info *info)
+{
+ struct acpi_device *adev = to_acpi_device_node(fwnode);
+ struct acpi_gpio_lookup lookup;
+ struct gpio_desc *desc;
+ char propname[32];
+ int ret;
+
+ memset(&lookup, 0, sizeof(lookup));
+ lookup.params.crs_entry_index = idx;
+ lookup.info = info;
+
+ /* Try first from _DSD */
+ for_each_gpio_property_name(propname, con_id) {
+ if (adev)
+ ret = acpi_get_gpiod_by_index(adev, propname, &lookup);
+ else
+ ret = acpi_get_gpiod_from_data(fwnode, propname, &lookup);
+ if (ret)
+ continue;
+
+ desc = lookup.desc;
+ if (PTR_ERR(desc) == -EPROBE_DEFER)
+ return desc;
+
+ if (!IS_ERR(desc))
+ return desc;
+ }
+
+ /* Then from plain _CRS GPIOs */
+ if (can_fallback) {
+ ret = acpi_get_gpiod_by_index(adev, NULL, &lookup);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return lookup.desc;
+ }
+
+ return ERR_PTR(-ENOENT);
+}
+
+struct gpio_desc *acpi_find_gpio(struct fwnode_handle *fwnode,
+ const char *con_id,
+ unsigned int idx,
+ enum gpiod_flags *dflags,
+ unsigned long *lookupflags)
+{
+ struct acpi_device *adev = to_acpi_device_node(fwnode);
+ bool can_fallback = acpi_can_fallback_to_crs(adev, con_id);
+ struct acpi_gpio_info info = {};
+ struct gpio_desc *desc;
+ int ret;
+
+ desc = __acpi_find_gpio(fwnode, con_id, idx, can_fallback, &info);
+ if (IS_ERR(desc))
+ return desc;
+
+ if (info.gpioint &&
+ (*dflags == GPIOD_OUT_LOW || *dflags == GPIOD_OUT_HIGH)) {
+ dev_dbg(&adev->dev, "refusing GpioInt() entry when doing GPIOD_OUT_* lookup\n");
+ return ERR_PTR(-ENOENT);
+ }
+
+ acpi_gpio_update_gpiod_flags(dflags, &info);
+ acpi_gpio_update_gpiod_lookup_flags(lookupflags, &info);
+
+ /* ACPI uses hundredths of milliseconds units */
+ ret = gpio_set_debounce_timeout(desc, info.debounce * 10);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return desc;
+}
+
+/**
+ * acpi_dev_gpio_irq_wake_get_by() - Find GpioInt and translate it to Linux IRQ number
+ * @adev: pointer to a ACPI device to get IRQ from
+ * @con_id: optional name of GpioInt resource
+ * @index: index of GpioInt resource (starting from %0)
+ * @wake_capable: Set to true if the IRQ is wake capable
+ *
+ * If the device has one or more GpioInt resources, this function can be
+ * used to translate from the GPIO offset in the resource to the Linux IRQ
+ * number.
+ *
+ * The function is idempotent, though each time it runs it will configure GPIO
+ * pin direction according to the flags in GpioInt resource.
+ *
+ * The function takes optional @con_id parameter. If the resource has
+ * a @con_id in a property, then only those will be taken into account.
+ *
+ * The GPIO is considered wake capable if the GpioInt resource specifies
+ * SharedAndWake or ExclusiveAndWake.
+ *
+ * Returns:
+ * Linux IRQ number (> 0) on success, negative errno on failure.
+ */
+int acpi_dev_gpio_irq_wake_get_by(struct acpi_device *adev, const char *con_id, int index,
+ bool *wake_capable)
+{
+ struct fwnode_handle *fwnode = acpi_fwnode_handle(adev);
+ int idx, i;
+ unsigned int irq_flags;
+ int ret;
+
+ for (i = 0, idx = 0; idx <= index; i++) {
+ struct acpi_gpio_info info = {};
+ struct gpio_desc *desc;
+
+ /* Ignore -EPROBE_DEFER, it only matters if idx matches */
+ desc = __acpi_find_gpio(fwnode, con_id, i, true, &info);
+ if (IS_ERR(desc) && PTR_ERR(desc) != -EPROBE_DEFER)
+ return PTR_ERR(desc);
+
+ if (info.gpioint && idx++ == index) {
+ unsigned long lflags = GPIO_LOOKUP_FLAGS_DEFAULT;
+ enum gpiod_flags dflags = GPIOD_ASIS;
+ char label[32];
+ int irq;
+
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ irq = gpiod_to_irq(desc);
+ if (irq < 0)
+ return irq;
+
+ acpi_gpio_update_gpiod_flags(&dflags, &info);
+ acpi_gpio_update_gpiod_lookup_flags(&lflags, &info);
+
+ snprintf(label, sizeof(label), "%pfwP GpioInt(%d)", fwnode, index);
+ ret = gpiod_set_consumer_name(desc, con_id ?: label);
+ if (ret)
+ return ret;
+
+ ret = gpiod_configure_flags(desc, label, lflags, dflags);
+ if (ret < 0)
+ return ret;
+
+ /* ACPI uses hundredths of milliseconds units */
+ ret = gpio_set_debounce_timeout(desc, info.debounce * 10);
+ if (ret)
+ return ret;
+
+ irq_flags = acpi_dev_get_irq_type(info.triggering,
+ info.polarity);
+
+ /*
+ * If the IRQ is not already in use then set type
+ * if specified and different than the current one.
+ */
+ if (can_request_irq(irq, irq_flags)) {
+ if (irq_flags != IRQ_TYPE_NONE &&
+ irq_flags != irq_get_trigger_type(irq))
+ irq_set_irq_type(irq, irq_flags);
+ } else {
+ dev_dbg(&adev->dev, "IRQ %d already in use\n", irq);
+ }
+
+ /* avoid suspend issues with GPIOs when systems are using S3 */
+ if (wake_capable && acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)
+ *wake_capable = info.wake_capable;
+
+ return irq;
+ }
+
+ }
+ return -ENOENT;
+}
+EXPORT_SYMBOL_GPL(acpi_dev_gpio_irq_wake_get_by);
+
+static acpi_status
+acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,
+ u32 bits, u64 *value, void *handler_context,
+ void *region_context)
+{
+ struct acpi_gpio_chip *achip = region_context;
+ struct gpio_chip *chip = achip->chip;
+ struct acpi_resource_gpio *agpio;
+ struct acpi_resource *ares;
+ u16 pin_index = address;
+ acpi_status status;
+ int length;
+ int i;
+
+ status = acpi_buffer_to_resource(achip->conn_info.connection,
+ achip->conn_info.length, &ares);
+ if (ACPI_FAILURE(status))
+ return status;
+
+ if (WARN_ON(ares->type != ACPI_RESOURCE_TYPE_GPIO)) {
+ ACPI_FREE(ares);
+ return AE_BAD_PARAMETER;
+ }
+
+ agpio = &ares->data.gpio;
+
+ if (WARN_ON(agpio->io_restriction == ACPI_IO_RESTRICT_INPUT &&
+ function == ACPI_WRITE)) {
+ ACPI_FREE(ares);
+ return AE_BAD_PARAMETER;
+ }
+
+ length = min_t(u16, agpio->pin_table_length, pin_index + bits);
+ for (i = pin_index; i < length; ++i) {
+ unsigned int pin = agpio->pin_table[i];
+ struct acpi_gpio_connection *conn;
+ struct gpio_desc *desc;
+ bool found;
+
+ mutex_lock(&achip->conn_lock);
+
+ found = false;
+ list_for_each_entry(conn, &achip->conns, node) {
+ if (conn->pin == pin) {
+ found = true;
+ desc = conn->desc;
+ break;
+ }
+ }
+
+ /*
+ * The same GPIO can be shared between operation region and
+ * event but only if the access here is ACPI_READ. In that
+ * case we "borrow" the event GPIO instead.
+ */
+ if (!found && agpio->shareable == ACPI_SHARED &&
+ function == ACPI_READ) {
+ struct acpi_gpio_event *event;
+
+ list_for_each_entry(event, &achip->events, node) {
+ if (event->pin == pin) {
+ desc = event->desc;
+ found = true;
+ break;
+ }
+ }
+ }
+
+ if (!found) {
+ desc = acpi_request_own_gpiod(chip, agpio, i, "ACPI:OpRegion");
+ if (IS_ERR(desc)) {
+ mutex_unlock(&achip->conn_lock);
+ status = AE_ERROR;
+ goto out;
+ }
+
+ conn = kzalloc(sizeof(*conn), GFP_KERNEL);
+ if (!conn) {
+ gpiochip_free_own_desc(desc);
+ mutex_unlock(&achip->conn_lock);
+ status = AE_NO_MEMORY;
+ goto out;
+ }
+
+ conn->pin = pin;
+ conn->desc = desc;
+ list_add_tail(&conn->node, &achip->conns);
+ }
+
+ mutex_unlock(&achip->conn_lock);
+
+ if (function == ACPI_WRITE)
+ gpiod_set_raw_value_cansleep(desc, !!(*value & BIT(i)));
+ else
+ *value |= (u64)gpiod_get_raw_value_cansleep(desc) << i;
+ }
+
+out:
+ ACPI_FREE(ares);
+ return status;
+}
+
+static void acpi_gpiochip_request_regions(struct acpi_gpio_chip *achip)
+{
+ struct gpio_chip *chip = achip->chip;
+ acpi_handle handle = ACPI_HANDLE(chip->parent);
+ acpi_status status;
+
+ INIT_LIST_HEAD(&achip->conns);
+ mutex_init(&achip->conn_lock);
+ status = acpi_install_address_space_handler(handle, ACPI_ADR_SPACE_GPIO,
+ acpi_gpio_adr_space_handler,
+ NULL, achip);
+ if (ACPI_FAILURE(status))
+ dev_err(chip->parent,
+ "Failed to install GPIO OpRegion handler\n");
+}
+
+static void acpi_gpiochip_free_regions(struct acpi_gpio_chip *achip)
+{
+ struct gpio_chip *chip = achip->chip;
+ acpi_handle handle = ACPI_HANDLE(chip->parent);
+ struct acpi_gpio_connection *conn, *tmp;
+ acpi_status status;
+
+ status = acpi_remove_address_space_handler(handle, ACPI_ADR_SPACE_GPIO,
+ acpi_gpio_adr_space_handler);
+ if (ACPI_FAILURE(status)) {
+ dev_err(chip->parent,
+ "Failed to remove GPIO OpRegion handler\n");
+ return;
+ }
+
+ list_for_each_entry_safe_reverse(conn, tmp, &achip->conns, node) {
+ gpiochip_free_own_desc(conn->desc);
+ list_del(&conn->node);
+ kfree(conn);
+ }
+}
+
+static struct gpio_desc *
+acpi_gpiochip_parse_own_gpio(struct acpi_gpio_chip *achip,
+ struct fwnode_handle *fwnode,
+ const char **name,
+ unsigned long *lflags,
+ enum gpiod_flags *dflags)
+{
+ struct gpio_chip *chip = achip->chip;
+ struct gpio_desc *desc;
+ u32 gpios[2];
+ int ret;
+
+ *lflags = GPIO_LOOKUP_FLAGS_DEFAULT;
+ *dflags = GPIOD_ASIS;
+ *name = NULL;
+
+ ret = fwnode_property_read_u32_array(fwnode, "gpios", gpios,
+ ARRAY_SIZE(gpios));
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ desc = gpiochip_get_desc(chip, gpios[0]);
+ if (IS_ERR(desc))
+ return desc;
+
+ if (gpios[1])
+ *lflags |= GPIO_ACTIVE_LOW;
+
+ if (fwnode_property_present(fwnode, "input"))
+ *dflags |= GPIOD_IN;
+ else if (fwnode_property_present(fwnode, "output-low"))
+ *dflags |= GPIOD_OUT_LOW;
+ else if (fwnode_property_present(fwnode, "output-high"))
+ *dflags |= GPIOD_OUT_HIGH;
+ else
+ return ERR_PTR(-EINVAL);
+
+ fwnode_property_read_string(fwnode, "line-name", name);
+
+ return desc;
+}
+
+static void acpi_gpiochip_scan_gpios(struct acpi_gpio_chip *achip)
+{
+ struct gpio_chip *chip = achip->chip;
+
+ device_for_each_child_node_scoped(chip->parent, fwnode) {
+ unsigned long lflags;
+ enum gpiod_flags dflags;
+ struct gpio_desc *desc;
+ const char *name;
+ int ret;
+
+ if (!fwnode_property_present(fwnode, "gpio-hog"))
+ continue;
+
+ desc = acpi_gpiochip_parse_own_gpio(achip, fwnode, &name,
+ &lflags, &dflags);
+ if (IS_ERR(desc))
+ continue;
+
+ ret = gpiod_hog(desc, name, lflags, dflags);
+ if (ret) {
+ dev_err(chip->parent, "Failed to hog GPIO\n");
+ return;
+ }
+ }
+}
+
+void acpi_gpiochip_add(struct gpio_chip *chip)
+{
+ struct acpi_gpio_chip *acpi_gpio;
+ struct acpi_device *adev;
+ acpi_status status;
+
+ if (!chip || !chip->parent)
+ return;
+
+ adev = ACPI_COMPANION(chip->parent);
+ if (!adev)
+ return;
+
+ acpi_gpio = kzalloc(sizeof(*acpi_gpio), GFP_KERNEL);
+ if (!acpi_gpio) {
+ dev_err(chip->parent,
+ "Failed to allocate memory for ACPI GPIO chip\n");
+ return;
+ }
+
+ acpi_gpio->chip = chip;
+ INIT_LIST_HEAD(&acpi_gpio->events);
+ INIT_LIST_HEAD(&acpi_gpio->deferred_req_irqs_list_entry);
+
+ status = acpi_attach_data(adev->handle, acpi_gpio_chip_dh, acpi_gpio);
+ if (ACPI_FAILURE(status)) {
+ dev_err(chip->parent, "Failed to attach ACPI GPIO chip\n");
+ kfree(acpi_gpio);
+ return;
+ }
+
+ acpi_gpiochip_request_regions(acpi_gpio);
+ acpi_gpiochip_scan_gpios(acpi_gpio);
+ acpi_dev_clear_dependencies(adev);
+}
+
+void acpi_gpiochip_remove(struct gpio_chip *chip)
+{
+ struct acpi_gpio_chip *acpi_gpio;
+ acpi_handle handle;
+ acpi_status status;
+
+ if (!chip || !chip->parent)
+ return;
+
+ handle = ACPI_HANDLE(chip->parent);
+ if (!handle)
+ return;
+
+ status = acpi_get_data(handle, acpi_gpio_chip_dh, (void **)&acpi_gpio);
+ if (ACPI_FAILURE(status)) {
+ dev_warn(chip->parent, "Failed to retrieve ACPI GPIO chip\n");
+ return;
+ }
+
+ acpi_gpiochip_free_regions(acpi_gpio);
+
+ acpi_detach_data(handle, acpi_gpio_chip_dh);
+ kfree(acpi_gpio);
+}
+
+static int acpi_gpio_package_count(const union acpi_object *obj)
+{
+ const union acpi_object *element = obj->package.elements;
+ const union acpi_object *end = element + obj->package.count;
+ unsigned int count = 0;
+
+ while (element < end) {
+ switch (element->type) {
+ case ACPI_TYPE_LOCAL_REFERENCE:
+ element += 3;
+ fallthrough;
+ case ACPI_TYPE_INTEGER:
+ element++;
+ count++;
+ break;
+
+ default:
+ return -EPROTO;
+ }
+ }
+
+ return count;
+}
+
+static int acpi_find_gpio_count(struct acpi_resource *ares, void *data)
+{
+ unsigned int *count = data;
+
+ if (ares->type == ACPI_RESOURCE_TYPE_GPIO)
+ *count += ares->data.gpio.pin_table_length;
+
+ return 1;
+}
+
+/**
+ * acpi_gpio_count - count the GPIOs associated with a firmware node / function
+ * @fwnode: firmware node of the GPIO consumer
+ * @con_id: function within the GPIO consumer
+ *
+ * Returns:
+ * The number of GPIOs associated with a firmware node / function or %-ENOENT,
+ * if no GPIO has been assigned to the requested function.
+ */
+int acpi_gpio_count(const struct fwnode_handle *fwnode, const char *con_id)
+{
+ struct acpi_device *adev = to_acpi_device_node(fwnode);
+ const union acpi_object *obj;
+ const struct acpi_gpio_mapping *gm;
+ int count = -ENOENT;
+ int ret;
+ char propname[32];
+
+ /* Try first from _DSD */
+ for_each_gpio_property_name(propname, con_id) {
+ ret = acpi_dev_get_property(adev, propname, ACPI_TYPE_ANY, &obj);
+ if (ret == 0) {
+ if (obj->type == ACPI_TYPE_LOCAL_REFERENCE)
+ count = 1;
+ else if (obj->type == ACPI_TYPE_PACKAGE)
+ count = acpi_gpio_package_count(obj);
+ } else if (adev->driver_gpios) {
+ for (gm = adev->driver_gpios; gm->name; gm++)
+ if (strcmp(propname, gm->name) == 0) {
+ count = gm->size;
+ break;
+ }
+ }
+ if (count > 0)
+ break;
+ }
+
+ /* Then from plain _CRS GPIOs */
+ if (count < 0) {
+ struct list_head resource_list;
+ unsigned int crs_count = 0;
+
+ if (!acpi_can_fallback_to_crs(adev, con_id))
+ return count;
+
+ INIT_LIST_HEAD(&resource_list);
+ acpi_dev_get_resources(adev, &resource_list,
+ acpi_find_gpio_count, &crs_count);
+ acpi_dev_free_resource_list(&resource_list);
+ if (crs_count > 0)
+ count = crs_count;
+ }
+ return count ? count : -ENOENT;
+}
diff --git a/drivers/gpio/gpiolib-acpi-quirks.c b/drivers/gpio/gpiolib-acpi-quirks.c
new file mode 100644
index 000000000000..7b95d1b03361
--- /dev/null
+++ b/drivers/gpio/gpiolib-acpi-quirks.c
@@ -0,0 +1,402 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ACPI quirks for GPIO ACPI helpers
+ *
+ * Author: Hans de Goede <hdegoede@redhat.com>
+ */
+
+#include <linux/dmi.h>
+#include <linux/kstrtox.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/printk.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include "gpiolib-acpi.h"
+
+static int run_edge_events_on_boot = -1;
+module_param(run_edge_events_on_boot, int, 0444);
+MODULE_PARM_DESC(run_edge_events_on_boot,
+ "Run edge _AEI event-handlers at boot: 0=no, 1=yes, -1=auto");
+
+static char *ignore_wake;
+module_param(ignore_wake, charp, 0444);
+MODULE_PARM_DESC(ignore_wake,
+ "controller@pin combos on which to ignore the ACPI wake flag "
+ "ignore_wake=controller@pin[,controller@pin[,...]]");
+
+static char *ignore_interrupt;
+module_param(ignore_interrupt, charp, 0444);
+MODULE_PARM_DESC(ignore_interrupt,
+ "controller@pin combos on which to ignore interrupt "
+ "ignore_interrupt=controller@pin[,controller@pin[,...]]");
+
+/*
+ * For GPIO chips which call acpi_gpiochip_request_interrupts() before late_init
+ * (so builtin drivers) we register the ACPI GpioInt IRQ handlers from a
+ * late_initcall_sync() handler, so that other builtin drivers can register their
+ * OpRegions before the event handlers can run. This list contains GPIO chips
+ * for which the acpi_gpiochip_request_irqs() call has been deferred.
+ */
+static DEFINE_MUTEX(acpi_gpio_deferred_req_irqs_lock);
+static LIST_HEAD(acpi_gpio_deferred_req_irqs_list);
+static bool acpi_gpio_deferred_req_irqs_done;
+
+bool acpi_gpio_add_to_deferred_list(struct list_head *list)
+{
+ bool defer;
+
+ mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
+ defer = !acpi_gpio_deferred_req_irqs_done;
+ if (defer)
+ list_add(list, &acpi_gpio_deferred_req_irqs_list);
+ mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
+
+ return defer;
+}
+
+void acpi_gpio_remove_from_deferred_list(struct list_head *list)
+{
+ mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
+ if (!list_empty(list))
+ list_del_init(list);
+ mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
+}
+
+int acpi_gpio_need_run_edge_events_on_boot(void)
+{
+ return run_edge_events_on_boot;
+}
+
+bool acpi_gpio_in_ignore_list(enum acpi_gpio_ignore_list list,
+ const char *controller_in, unsigned int pin_in)
+{
+ const char *ignore_list, *controller, *pin_str;
+ unsigned int pin;
+ char *endp;
+ int len;
+
+ switch (list) {
+ case ACPI_GPIO_IGNORE_WAKE:
+ ignore_list = ignore_wake;
+ break;
+ case ACPI_GPIO_IGNORE_INTERRUPT:
+ ignore_list = ignore_interrupt;
+ break;
+ default:
+ return false;
+ }
+
+ controller = ignore_list;
+ while (controller) {
+ pin_str = strchr(controller, '@');
+ if (!pin_str)
+ goto err;
+
+ len = pin_str - controller;
+ if (len == strlen(controller_in) &&
+ strncmp(controller, controller_in, len) == 0) {
+ pin = simple_strtoul(pin_str + 1, &endp, 10);
+ if (*endp != 0 && *endp != ',')
+ goto err;
+
+ if (pin == pin_in)
+ return true;
+ }
+
+ controller = strchr(controller, ',');
+ if (controller)
+ controller++;
+ }
+
+ return false;
+err:
+ pr_err_once("Error: Invalid value for gpiolib_acpi.ignore_...: %s\n", ignore_list);
+ return false;
+}
+
+/* Run deferred acpi_gpiochip_request_irqs() */
+static int __init acpi_gpio_handle_deferred_request_irqs(void)
+{
+ mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
+ acpi_gpio_process_deferred_list(&acpi_gpio_deferred_req_irqs_list);
+ acpi_gpio_deferred_req_irqs_done = true;
+ mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
+
+ return 0;
+}
+/* We must use _sync so that this runs after the first deferred_probe run */
+late_initcall_sync(acpi_gpio_handle_deferred_request_irqs);
+
+struct acpi_gpiolib_dmi_quirk {
+ bool no_edge_events_on_boot;
+ char *ignore_wake;
+ char *ignore_interrupt;
+};
+
+static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
+ {
+ /*
+ * The Minix Neo Z83-4 has a micro-USB-B id-pin handler for
+ * a non existing micro-USB-B connector which puts the HDMI
+ * DDC pins in GPIO mode, breaking HDMI support.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MINIX"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Z83-4"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .no_edge_events_on_boot = true,
+ },
+ },
+ {
+ /*
+ * The Terra Pad 1061 has a micro-USB-B id-pin handler, which
+ * instead of controlling the actual micro-USB-B turns the 5V
+ * boost for its USB-A connector off. The actual micro-USB-B
+ * connector is wired for charging only.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Wortmann_AG"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TERRA_PAD_1061"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .no_edge_events_on_boot = true,
+ },
+ },
+ {
+ /*
+ * The Dell Venue 10 Pro 5055, with Bay Trail SoC + TI PMIC uses an
+ * external embedded-controller connected via I2C + an ACPI GPIO
+ * event handler on INT33FFC:02 pin 12, causing spurious wakeups.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Venue 10 Pro 5055"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_wake = "INT33FC:02@12",
+ },
+ },
+ {
+ /*
+ * HP X2 10 models with Cherry Trail SoC + TI PMIC use an
+ * external embedded-controller connected via I2C + an ACPI GPIO
+ * event handler on INT33FF:01 pin 0, causing spurious wakeups.
+ * When suspending by closing the LID, the power to the USB
+ * keyboard is turned off, causing INT0002 ACPI events to
+ * trigger once the XHCI controller notices the keyboard is
+ * gone. So INT0002 events cause spurious wakeups too. Ignoring
+ * EC wakes breaks wakeup when opening the lid, the user needs
+ * to press the power-button to wakeup the system. The
+ * alternative is suspend simply not working, which is worse.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP x2 Detachable 10-p0XX"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_wake = "INT33FF:01@0,INT0002:00@2",
+ },
+ },
+ {
+ /*
+ * HP X2 10 models with Bay Trail SoC + AXP288 PMIC use an
+ * external embedded-controller connected via I2C + an ACPI GPIO
+ * event handler on INT33FC:02 pin 28, causing spurious wakeups.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"),
+ DMI_MATCH(DMI_BOARD_NAME, "815D"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_wake = "INT33FC:02@28",
+ },
+ },
+ {
+ /*
+ * HP X2 10 models with Cherry Trail SoC + AXP288 PMIC use an
+ * external embedded-controller connected via I2C + an ACPI GPIO
+ * event handler on INT33FF:01 pin 0, causing spurious wakeups.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"),
+ DMI_MATCH(DMI_BOARD_NAME, "813E"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_wake = "INT33FF:01@0",
+ },
+ },
+ {
+ /*
+ * Interrupt storm caused from edge triggered floating pin
+ * Found in BIOS UX325UAZ.300
+ * https://bugzilla.kernel.org/show_bug.cgi?id=216208
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX325UAZ_UM325UAZ"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_interrupt = "AMDI0030:00@18",
+ },
+ },
+ {
+ /*
+ * Spurious wakeups from TP_ATTN# pin
+ * Found in BIOS 1.7.8
+ * https://gitlab.freedesktop.org/drm/amd/-/issues/1722#note_1720627
+ */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_wake = "ELAN0415:00@9",
+ },
+ },
+ {
+ /*
+ * Spurious wakeups from TP_ATTN# pin
+ * Found in BIOS 1.7.8
+ * https://gitlab.freedesktop.org/drm/amd/-/issues/1722#note_1720627
+ */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_wake = "ELAN0415:00@9",
+ },
+ },
+ {
+ /*
+ * Spurious wakeups from TP_ATTN# pin
+ * Found in BIOS 1.7.7
+ */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "NH5xAx"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_wake = "SYNA1202:00@16",
+ },
+ },
+ {
+ /*
+ * On the Peaq C1010 2-in-1 INT33FC:00 pin 3 is connected to
+ * a "dolby" button. At the ACPI level an _AEI event-handler
+ * is connected which sets an ACPI variable to 1 on both
+ * edges. This variable can be polled + cleared to 0 using
+ * WMI. But since the variable is set on both edges the WMI
+ * interface is pretty useless even when polling.
+ * So instead the x86-android-tablets code instantiates
+ * a gpio-keys platform device for it.
+ * Ignore the _AEI handler for the pin, so that it is not busy.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "PEAQ"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PEAQ PMM C1010 MD99187"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_interrupt = "INT33FC:00@3",
+ },
+ },
+ {
+ /*
+ * Spurious wakeups from TP_ATTN# pin
+ * Found in BIOS 0.35
+ * https://gitlab.freedesktop.org/drm/amd/-/issues/3073
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GPD"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "G1619-04"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_wake = "PNP0C50:00@8",
+ },
+ },
+ {
+ /*
+ * Same as G1619-04. New model.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GPD"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "G1619-05"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_wake = "PNP0C50:00@8",
+ },
+ },
+ {
+ /*
+ * Spurious wakeups from GPIO 11
+ * Found in BIOS 1.04
+ * https://gitlab.freedesktop.org/drm/amd/-/issues/3954
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "Acer Nitro V 14"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_interrupt = "AMDI0030:00@11",
+ },
+ },
+ {
+ /*
+ * Wakeup only works when keyboard backlight is turned off
+ * https://gitlab.freedesktop.org/drm/amd/-/issues/4169
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "Acer Nitro V 15"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_interrupt = "AMDI0030:00@8",
+ },
+ },
+ {
+ /*
+ * Spurious wakeups from TP_ATTN# pin
+ * Found in BIOS 5.35
+ * https://gitlab.freedesktop.org/drm/amd/-/issues/4482
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "ProArt PX13"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_wake = "ASCP1A00:00@8",
+ },
+ },
+ {} /* Terminating entry */
+};
+
+static int __init acpi_gpio_setup_params(void)
+{
+ const struct acpi_gpiolib_dmi_quirk *quirk = NULL;
+ const struct dmi_system_id *id;
+
+ id = dmi_first_match(gpiolib_acpi_quirks);
+ if (id)
+ quirk = id->driver_data;
+
+ if (run_edge_events_on_boot < 0) {
+ if (quirk && quirk->no_edge_events_on_boot)
+ run_edge_events_on_boot = 0;
+ else
+ run_edge_events_on_boot = 1;
+ }
+
+ if (ignore_wake == NULL && quirk && quirk->ignore_wake)
+ ignore_wake = quirk->ignore_wake;
+
+ if (ignore_interrupt == NULL && quirk && quirk->ignore_interrupt)
+ ignore_interrupt = quirk->ignore_interrupt;
+
+ return 0;
+}
+
+/* Directly after dmi_setup() which runs as core_initcall() */
+postcore_initcall(acpi_gpio_setup_params);
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
deleted file mode 100644
index 78ecd56123a3..000000000000
--- a/drivers/gpio/gpiolib-acpi.c
+++ /dev/null
@@ -1,1723 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * ACPI helpers for GPIO API
- *
- * Copyright (C) 2012, Intel Corporation
- * Authors: Mathias Nyman <mathias.nyman@linux.intel.com>
- * Mika Westerberg <mika.westerberg@linux.intel.com>
- */
-
-#include <linux/acpi.h>
-#include <linux/dmi.h>
-#include <linux/errno.h>
-#include <linux/export.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/mutex.h>
-#include <linux/pinctrl/pinctrl.h>
-
-#include <linux/gpio/consumer.h>
-#include <linux/gpio/driver.h>
-#include <linux/gpio/machine.h>
-
-#include "gpiolib.h"
-#include "gpiolib-acpi.h"
-
-static int run_edge_events_on_boot = -1;
-module_param(run_edge_events_on_boot, int, 0444);
-MODULE_PARM_DESC(run_edge_events_on_boot,
- "Run edge _AEI event-handlers at boot: 0=no, 1=yes, -1=auto");
-
-static char *ignore_wake;
-module_param(ignore_wake, charp, 0444);
-MODULE_PARM_DESC(ignore_wake,
- "controller@pin combos on which to ignore the ACPI wake flag "
- "ignore_wake=controller@pin[,controller@pin[,...]]");
-
-static char *ignore_interrupt;
-module_param(ignore_interrupt, charp, 0444);
-MODULE_PARM_DESC(ignore_interrupt,
- "controller@pin combos on which to ignore interrupt "
- "ignore_interrupt=controller@pin[,controller@pin[,...]]");
-
-struct acpi_gpiolib_dmi_quirk {
- bool no_edge_events_on_boot;
- char *ignore_wake;
- char *ignore_interrupt;
-};
-
-/**
- * struct acpi_gpio_event - ACPI GPIO event handler data
- *
- * @node: list-entry of the events list of the struct acpi_gpio_chip
- * @handle: handle of ACPI method to execute when the IRQ triggers
- * @handler: handler function to pass to request_irq() when requesting the IRQ
- * @pin: GPIO pin number on the struct gpio_chip
- * @irq: Linux IRQ number for the event, for request_irq() / free_irq()
- * @irqflags: flags to pass to request_irq() when requesting the IRQ
- * @irq_is_wake: If the ACPI flags indicate the IRQ is a wakeup source
- * @irq_requested:True if request_irq() has been done
- * @desc: struct gpio_desc for the GPIO pin for this event
- */
-struct acpi_gpio_event {
- struct list_head node;
- acpi_handle handle;
- irq_handler_t handler;
- unsigned int pin;
- unsigned int irq;
- unsigned long irqflags;
- bool irq_is_wake;
- bool irq_requested;
- struct gpio_desc *desc;
-};
-
-struct acpi_gpio_connection {
- struct list_head node;
- unsigned int pin;
- struct gpio_desc *desc;
-};
-
-struct acpi_gpio_chip {
- /*
- * ACPICA requires that the first field of the context parameter
- * passed to acpi_install_address_space_handler() is large enough
- * to hold struct acpi_connection_info.
- */
- struct acpi_connection_info conn_info;
- struct list_head conns;
- struct mutex conn_lock;
- struct gpio_chip *chip;
- struct list_head events;
- struct list_head deferred_req_irqs_list_entry;
-};
-
-/**
- * struct acpi_gpio_info - ACPI GPIO specific information
- * @adev: reference to ACPI device which consumes GPIO resource
- * @flags: GPIO initialization flags
- * @gpioint: if %true this GPIO is of type GpioInt otherwise type is GpioIo
- * @pin_config: pin bias as provided by ACPI
- * @polarity: interrupt polarity as provided by ACPI
- * @triggering: triggering type as provided by ACPI
- * @wake_capable: wake capability as provided by ACPI
- * @debounce: debounce timeout as provided by ACPI
- * @quirks: Linux specific quirks as provided by struct acpi_gpio_mapping
- */
-struct acpi_gpio_info {
- struct acpi_device *adev;
- enum gpiod_flags flags;
- bool gpioint;
- int pin_config;
- int polarity;
- int triggering;
- bool wake_capable;
- unsigned int debounce;
- unsigned int quirks;
-};
-
-/*
- * For GPIO chips which call acpi_gpiochip_request_interrupts() before late_init
- * (so builtin drivers) we register the ACPI GpioInt IRQ handlers from a
- * late_initcall_sync() handler, so that other builtin drivers can register their
- * OpRegions before the event handlers can run. This list contains GPIO chips
- * for which the acpi_gpiochip_request_irqs() call has been deferred.
- */
-static DEFINE_MUTEX(acpi_gpio_deferred_req_irqs_lock);
-static LIST_HEAD(acpi_gpio_deferred_req_irqs_list);
-static bool acpi_gpio_deferred_req_irqs_done;
-
-static int acpi_gpiochip_find(struct gpio_chip *gc, const void *data)
-{
- /* First check the actual GPIO device */
- if (device_match_acpi_handle(&gc->gpiodev->dev, data))
- return true;
-
- /*
- * When the ACPI device is artificially split to the banks of GPIOs,
- * where each of them is represented by a separate GPIO device,
- * the firmware node of the physical device may not be shared among
- * the banks as they may require different values for the same property,
- * e.g., number of GPIOs in a certain bank. In such case the ACPI handle
- * of a GPIO device is NULL and can not be used. Hence we have to check
- * the parent device to be sure that there is no match before bailing
- * out.
- */
- if (gc->parent)
- return device_match_acpi_handle(gc->parent, data);
-
- return false;
-}
-
-/**
- * acpi_get_gpiod() - Translate ACPI GPIO pin to GPIO descriptor usable with GPIO API
- * @path: ACPI GPIO controller full path name, (e.g. "\\_SB.GPO1")
- * @pin: ACPI GPIO pin number (0-based, controller-relative)
- *
- * Returns:
- * GPIO descriptor to use with Linux generic GPIO API.
- * If the GPIO cannot be translated or there is an error an ERR_PTR is
- * returned.
- *
- * Specifically returns %-EPROBE_DEFER if the referenced GPIO
- * controller does not have GPIO chip registered at the moment. This is to
- * support probe deferral.
- */
-static struct gpio_desc *acpi_get_gpiod(char *path, unsigned int pin)
-{
- acpi_handle handle;
- acpi_status status;
-
- status = acpi_get_handle(NULL, path, &handle);
- if (ACPI_FAILURE(status))
- return ERR_PTR(-ENODEV);
-
- struct gpio_device *gdev __free(gpio_device_put) =
- gpio_device_find(handle, acpi_gpiochip_find);
- if (!gdev)
- return ERR_PTR(-EPROBE_DEFER);
-
- /*
- * FIXME: keep track of the reference to the GPIO device somehow
- * instead of putting it here.
- */
- return gpio_device_get_desc(gdev, pin);
-}
-
-static irqreturn_t acpi_gpio_irq_handler(int irq, void *data)
-{
- struct acpi_gpio_event *event = data;
-
- acpi_evaluate_object(event->handle, NULL, NULL, NULL);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t acpi_gpio_irq_handler_evt(int irq, void *data)
-{
- struct acpi_gpio_event *event = data;
-
- acpi_execute_simple_method(event->handle, NULL, event->pin);
-
- return IRQ_HANDLED;
-}
-
-static void acpi_gpio_chip_dh(acpi_handle handle, void *data)
-{
- /* The address of this function is used as a key. */
-}
-
-bool acpi_gpio_get_irq_resource(struct acpi_resource *ares,
- struct acpi_resource_gpio **agpio)
-{
- struct acpi_resource_gpio *gpio;
-
- if (ares->type != ACPI_RESOURCE_TYPE_GPIO)
- return false;
-
- gpio = &ares->data.gpio;
- if (gpio->connection_type != ACPI_RESOURCE_GPIO_TYPE_INT)
- return false;
-
- *agpio = gpio;
- return true;
-}
-EXPORT_SYMBOL_GPL(acpi_gpio_get_irq_resource);
-
-/**
- * acpi_gpio_get_io_resource - Fetch details of an ACPI resource if it is a GPIO
- * I/O resource or return False if not.
- * @ares: Pointer to the ACPI resource to fetch
- * @agpio: Pointer to a &struct acpi_resource_gpio to store the output pointer
- *
- * Returns:
- * %true if GpioIo resource is found, %false otherwise.
- */
-bool acpi_gpio_get_io_resource(struct acpi_resource *ares,
- struct acpi_resource_gpio **agpio)
-{
- struct acpi_resource_gpio *gpio;
-
- if (ares->type != ACPI_RESOURCE_TYPE_GPIO)
- return false;
-
- gpio = &ares->data.gpio;
- if (gpio->connection_type != ACPI_RESOURCE_GPIO_TYPE_IO)
- return false;
-
- *agpio = gpio;
- return true;
-}
-EXPORT_SYMBOL_GPL(acpi_gpio_get_io_resource);
-
-static void acpi_gpiochip_request_irq(struct acpi_gpio_chip *acpi_gpio,
- struct acpi_gpio_event *event)
-{
- struct device *parent = acpi_gpio->chip->parent;
- int ret, value;
-
- ret = request_threaded_irq(event->irq, NULL, event->handler,
- event->irqflags | IRQF_ONESHOT, "ACPI:Event", event);
- if (ret) {
- dev_err(parent, "Failed to setup interrupt handler for %d\n", event->irq);
- return;
- }
-
- if (event->irq_is_wake)
- enable_irq_wake(event->irq);
-
- event->irq_requested = true;
-
- /* Make sure we trigger the initial state of edge-triggered IRQs */
- if (run_edge_events_on_boot &&
- (event->irqflags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))) {
- value = gpiod_get_raw_value_cansleep(event->desc);
- if (((event->irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
- ((event->irqflags & IRQF_TRIGGER_FALLING) && value == 0))
- event->handler(event->irq, event);
- }
-}
-
-static void acpi_gpiochip_request_irqs(struct acpi_gpio_chip *acpi_gpio)
-{
- struct acpi_gpio_event *event;
-
- list_for_each_entry(event, &acpi_gpio->events, node)
- acpi_gpiochip_request_irq(acpi_gpio, event);
-}
-
-static enum gpiod_flags
-acpi_gpio_to_gpiod_flags(const struct acpi_resource_gpio *agpio, int polarity)
-{
- /* GpioInt() implies input configuration */
- if (agpio->connection_type == ACPI_RESOURCE_GPIO_TYPE_INT)
- return GPIOD_IN;
-
- switch (agpio->io_restriction) {
- case ACPI_IO_RESTRICT_INPUT:
- return GPIOD_IN;
- case ACPI_IO_RESTRICT_OUTPUT:
- /*
- * ACPI GPIO resources don't contain an initial value for the
- * GPIO. Therefore we deduce that value from the pull field
- * and the polarity instead. If the pin is pulled up we assume
- * default to be high, if it is pulled down we assume default
- * to be low, otherwise we leave pin untouched. For active low
- * polarity values will be switched. See also
- * Documentation/firmware-guide/acpi/gpio-properties.rst.
- */
- switch (agpio->pin_config) {
- case ACPI_PIN_CONFIG_PULLUP:
- return polarity == GPIO_ACTIVE_LOW ? GPIOD_OUT_LOW : GPIOD_OUT_HIGH;
- case ACPI_PIN_CONFIG_PULLDOWN:
- return polarity == GPIO_ACTIVE_LOW ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
- default:
- break;
- }
- break;
- default:
- break;
- }
-
- /*
- * Assume that the BIOS has configured the direction and pull
- * accordingly.
- */
- return GPIOD_ASIS;
-}
-
-static struct gpio_desc *acpi_request_own_gpiod(struct gpio_chip *chip,
- struct acpi_resource_gpio *agpio,
- unsigned int index,
- const char *label)
-{
- int polarity = GPIO_ACTIVE_HIGH;
- enum gpiod_flags flags = acpi_gpio_to_gpiod_flags(agpio, polarity);
- unsigned int pin = agpio->pin_table[index];
- struct gpio_desc *desc;
- int ret;
-
- desc = gpiochip_request_own_desc(chip, pin, label, polarity, flags);
- if (IS_ERR(desc))
- return desc;
-
- /* ACPI uses hundredths of milliseconds units */
- ret = gpio_set_debounce_timeout(desc, agpio->debounce_timeout * 10);
- if (ret)
- dev_warn(chip->parent,
- "Failed to set debounce-timeout for pin 0x%04X, err %d\n",
- pin, ret);
-
- return desc;
-}
-
-static bool acpi_gpio_in_ignore_list(const char *ignore_list, const char *controller_in,
- unsigned int pin_in)
-{
- const char *controller, *pin_str;
- unsigned int pin;
- char *endp;
- int len;
-
- controller = ignore_list;
- while (controller) {
- pin_str = strchr(controller, '@');
- if (!pin_str)
- goto err;
-
- len = pin_str - controller;
- if (len == strlen(controller_in) &&
- strncmp(controller, controller_in, len) == 0) {
- pin = simple_strtoul(pin_str + 1, &endp, 10);
- if (*endp != 0 && *endp != ',')
- goto err;
-
- if (pin == pin_in)
- return true;
- }
-
- controller = strchr(controller, ',');
- if (controller)
- controller++;
- }
-
- return false;
-err:
- pr_err_once("Error: Invalid value for gpiolib_acpi.ignore_...: %s\n", ignore_list);
- return false;
-}
-
-static bool acpi_gpio_irq_is_wake(struct device *parent,
- const struct acpi_resource_gpio *agpio)
-{
- unsigned int pin = agpio->pin_table[0];
-
- if (agpio->wake_capable != ACPI_WAKE_CAPABLE)
- return false;
-
- if (acpi_gpio_in_ignore_list(ignore_wake, dev_name(parent), pin)) {
- dev_info(parent, "Ignoring wakeup on pin %u\n", pin);
- return false;
- }
-
- return true;
-}
-
-/* Always returns AE_OK so that we keep looping over the resources */
-static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
- void *context)
-{
- struct acpi_gpio_chip *acpi_gpio = context;
- struct gpio_chip *chip = acpi_gpio->chip;
- struct acpi_resource_gpio *agpio;
- acpi_handle handle, evt_handle;
- struct acpi_gpio_event *event;
- irq_handler_t handler = NULL;
- struct gpio_desc *desc;
- unsigned int pin;
- int ret, irq;
-
- if (!acpi_gpio_get_irq_resource(ares, &agpio))
- return AE_OK;
-
- handle = ACPI_HANDLE(chip->parent);
- pin = agpio->pin_table[0];
-
- if (pin <= 255) {
- char ev_name[8];
- sprintf(ev_name, "_%c%02X",
- agpio->triggering == ACPI_EDGE_SENSITIVE ? 'E' : 'L',
- pin);
- if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle)))
- handler = acpi_gpio_irq_handler;
- }
- if (!handler) {
- if (ACPI_SUCCESS(acpi_get_handle(handle, "_EVT", &evt_handle)))
- handler = acpi_gpio_irq_handler_evt;
- }
- if (!handler)
- return AE_OK;
-
- if (acpi_gpio_in_ignore_list(ignore_interrupt, dev_name(chip->parent), pin)) {
- dev_info(chip->parent, "Ignoring interrupt on pin %u\n", pin);
- return AE_OK;
- }
-
- desc = acpi_request_own_gpiod(chip, agpio, 0, "ACPI:Event");
- if (IS_ERR(desc)) {
- dev_err(chip->parent,
- "Failed to request GPIO for pin 0x%04X, err %ld\n",
- pin, PTR_ERR(desc));
- return AE_OK;
- }
-
- ret = gpiochip_lock_as_irq(chip, pin);
- if (ret) {
- dev_err(chip->parent,
- "Failed to lock GPIO pin 0x%04X as interrupt, err %d\n",
- pin, ret);
- goto fail_free_desc;
- }
-
- irq = gpiod_to_irq(desc);
- if (irq < 0) {
- dev_err(chip->parent,
- "Failed to translate GPIO pin 0x%04X to IRQ, err %d\n",
- pin, irq);
- goto fail_unlock_irq;
- }
-
- event = kzalloc(sizeof(*event), GFP_KERNEL);
- if (!event)
- goto fail_unlock_irq;
-
- event->irqflags = IRQF_ONESHOT;
- if (agpio->triggering == ACPI_LEVEL_SENSITIVE) {
- if (agpio->polarity == ACPI_ACTIVE_HIGH)
- event->irqflags |= IRQF_TRIGGER_HIGH;
- else
- event->irqflags |= IRQF_TRIGGER_LOW;
- } else {
- switch (agpio->polarity) {
- case ACPI_ACTIVE_HIGH:
- event->irqflags |= IRQF_TRIGGER_RISING;
- break;
- case ACPI_ACTIVE_LOW:
- event->irqflags |= IRQF_TRIGGER_FALLING;
- break;
- default:
- event->irqflags |= IRQF_TRIGGER_RISING |
- IRQF_TRIGGER_FALLING;
- break;
- }
- }
-
- event->handle = evt_handle;
- event->handler = handler;
- event->irq = irq;
- event->irq_is_wake = acpi_gpio_irq_is_wake(chip->parent, agpio);
- event->pin = pin;
- event->desc = desc;
-
- list_add_tail(&event->node, &acpi_gpio->events);
-
- return AE_OK;
-
-fail_unlock_irq:
- gpiochip_unlock_as_irq(chip, pin);
-fail_free_desc:
- gpiochip_free_own_desc(desc);
-
- return AE_OK;
-}
-
-/**
- * acpi_gpiochip_request_interrupts() - Register isr for gpio chip ACPI events
- * @chip: GPIO chip
- *
- * ACPI5 platforms can use GPIO signaled ACPI events. These GPIO interrupts are
- * handled by ACPI event methods which need to be called from the GPIO
- * chip's interrupt handler. acpi_gpiochip_request_interrupts() finds out which
- * GPIO pins have ACPI event methods and assigns interrupt handlers that calls
- * the ACPI event methods for those pins.
- */
-void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
-{
- struct acpi_gpio_chip *acpi_gpio;
- acpi_handle handle;
- acpi_status status;
- bool defer;
-
- if (!chip->parent || !chip->to_irq)
- return;
-
- handle = ACPI_HANDLE(chip->parent);
- if (!handle)
- return;
-
- status = acpi_get_data(handle, acpi_gpio_chip_dh, (void **)&acpi_gpio);
- if (ACPI_FAILURE(status))
- return;
-
- if (acpi_quirk_skip_gpio_event_handlers())
- return;
-
- acpi_walk_resources(handle, METHOD_NAME__AEI,
- acpi_gpiochip_alloc_event, acpi_gpio);
-
- mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
- defer = !acpi_gpio_deferred_req_irqs_done;
- if (defer)
- list_add(&acpi_gpio->deferred_req_irqs_list_entry,
- &acpi_gpio_deferred_req_irqs_list);
- mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
-
- if (defer)
- return;
-
- acpi_gpiochip_request_irqs(acpi_gpio);
-}
-EXPORT_SYMBOL_GPL(acpi_gpiochip_request_interrupts);
-
-/**
- * acpi_gpiochip_free_interrupts() - Free GPIO ACPI event interrupts.
- * @chip: GPIO chip
- *
- * Free interrupts associated with GPIO ACPI event method for the given
- * GPIO chip.
- */
-void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
-{
- struct acpi_gpio_chip *acpi_gpio;
- struct acpi_gpio_event *event, *ep;
- acpi_handle handle;
- acpi_status status;
-
- if (!chip->parent || !chip->to_irq)
- return;
-
- handle = ACPI_HANDLE(chip->parent);
- if (!handle)
- return;
-
- status = acpi_get_data(handle, acpi_gpio_chip_dh, (void **)&acpi_gpio);
- if (ACPI_FAILURE(status))
- return;
-
- mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
- if (!list_empty(&acpi_gpio->deferred_req_irqs_list_entry))
- list_del_init(&acpi_gpio->deferred_req_irqs_list_entry);
- mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
-
- list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) {
- if (event->irq_requested) {
- if (event->irq_is_wake)
- disable_irq_wake(event->irq);
-
- free_irq(event->irq, event);
- }
-
- gpiochip_unlock_as_irq(chip, event->pin);
- gpiochip_free_own_desc(event->desc);
- list_del(&event->node);
- kfree(event);
- }
-}
-EXPORT_SYMBOL_GPL(acpi_gpiochip_free_interrupts);
-
-int acpi_dev_add_driver_gpios(struct acpi_device *adev,
- const struct acpi_gpio_mapping *gpios)
-{
- if (adev && gpios) {
- adev->driver_gpios = gpios;
- return 0;
- }
- return -EINVAL;
-}
-EXPORT_SYMBOL_GPL(acpi_dev_add_driver_gpios);
-
-void acpi_dev_remove_driver_gpios(struct acpi_device *adev)
-{
- if (adev)
- adev->driver_gpios = NULL;
-}
-EXPORT_SYMBOL_GPL(acpi_dev_remove_driver_gpios);
-
-static void acpi_dev_release_driver_gpios(void *adev)
-{
- acpi_dev_remove_driver_gpios(adev);
-}
-
-int devm_acpi_dev_add_driver_gpios(struct device *dev,
- const struct acpi_gpio_mapping *gpios)
-{
- struct acpi_device *adev = ACPI_COMPANION(dev);
- int ret;
-
- ret = acpi_dev_add_driver_gpios(adev, gpios);
- if (ret)
- return ret;
-
- return devm_add_action_or_reset(dev, acpi_dev_release_driver_gpios, adev);
-}
-EXPORT_SYMBOL_GPL(devm_acpi_dev_add_driver_gpios);
-
-static bool acpi_get_driver_gpio_data(struct acpi_device *adev,
- const char *name, int index,
- struct fwnode_reference_args *args,
- unsigned int *quirks)
-{
- const struct acpi_gpio_mapping *gm;
-
- if (!adev || !adev->driver_gpios)
- return false;
-
- for (gm = adev->driver_gpios; gm->name; gm++)
- if (!strcmp(name, gm->name) && gm->data && index < gm->size) {
- const struct acpi_gpio_params *par = gm->data + index;
-
- args->fwnode = acpi_fwnode_handle(adev);
- args->args[0] = par->crs_entry_index;
- args->args[1] = par->line_index;
- args->args[2] = par->active_low;
- args->nargs = 3;
-
- *quirks = gm->quirks;
- return true;
- }
-
- return false;
-}
-
-static int
-__acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags, enum gpiod_flags update)
-{
- const enum gpiod_flags mask =
- GPIOD_FLAGS_BIT_DIR_SET | GPIOD_FLAGS_BIT_DIR_OUT |
- GPIOD_FLAGS_BIT_DIR_VAL;
- int ret = 0;
-
- /*
- * Check if the BIOS has IoRestriction with explicitly set direction
- * and update @flags accordingly. Otherwise use whatever caller asked
- * for.
- */
- if (update & GPIOD_FLAGS_BIT_DIR_SET) {
- enum gpiod_flags diff = *flags ^ update;
-
- /*
- * Check if caller supplied incompatible GPIO initialization
- * flags.
- *
- * Return %-EINVAL to notify that firmware has different
- * settings and we are going to use them.
- */
- if (((*flags & GPIOD_FLAGS_BIT_DIR_SET) && (diff & GPIOD_FLAGS_BIT_DIR_OUT)) ||
- ((*flags & GPIOD_FLAGS_BIT_DIR_OUT) && (diff & GPIOD_FLAGS_BIT_DIR_VAL)))
- ret = -EINVAL;
- *flags = (*flags & ~mask) | (update & mask);
- }
- return ret;
-}
-
-static int acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags,
- struct acpi_gpio_info *info)
-{
- struct device *dev = &info->adev->dev;
- enum gpiod_flags old = *flags;
- int ret;
-
- ret = __acpi_gpio_update_gpiod_flags(&old, info->flags);
- if (info->quirks & ACPI_GPIO_QUIRK_NO_IO_RESTRICTION) {
- if (ret)
- dev_warn(dev, FW_BUG "GPIO not in correct mode, fixing\n");
- } else {
- if (ret)
- dev_dbg(dev, "Override GPIO initialization flags\n");
- *flags = old;
- }
-
- return ret;
-}
-
-static int acpi_gpio_update_gpiod_lookup_flags(unsigned long *lookupflags,
- struct acpi_gpio_info *info)
-{
- switch (info->pin_config) {
- case ACPI_PIN_CONFIG_PULLUP:
- *lookupflags |= GPIO_PULL_UP;
- break;
- case ACPI_PIN_CONFIG_PULLDOWN:
- *lookupflags |= GPIO_PULL_DOWN;
- break;
- case ACPI_PIN_CONFIG_NOPULL:
- *lookupflags |= GPIO_PULL_DISABLE;
- break;
- default:
- break;
- }
-
- if (info->polarity == GPIO_ACTIVE_LOW)
- *lookupflags |= GPIO_ACTIVE_LOW;
-
- return 0;
-}
-
-struct acpi_gpio_lookup {
- struct acpi_gpio_info info;
- int index;
- u16 pin_index;
- bool active_low;
- struct gpio_desc *desc;
- int n;
-};
-
-static int acpi_populate_gpio_lookup(struct acpi_resource *ares, void *data)
-{
- struct acpi_gpio_lookup *lookup = data;
-
- if (ares->type != ACPI_RESOURCE_TYPE_GPIO)
- return 1;
-
- if (!lookup->desc) {
- const struct acpi_resource_gpio *agpio = &ares->data.gpio;
- bool gpioint = agpio->connection_type == ACPI_RESOURCE_GPIO_TYPE_INT;
- struct gpio_desc *desc;
- u16 pin_index;
-
- if (lookup->info.quirks & ACPI_GPIO_QUIRK_ONLY_GPIOIO && gpioint)
- lookup->index++;
-
- if (lookup->n++ != lookup->index)
- return 1;
-
- pin_index = lookup->pin_index;
- if (pin_index >= agpio->pin_table_length)
- return 1;
-
- if (lookup->info.quirks & ACPI_GPIO_QUIRK_ABSOLUTE_NUMBER)
- desc = gpio_to_desc(agpio->pin_table[pin_index]);
- else
- desc = acpi_get_gpiod(agpio->resource_source.string_ptr,
- agpio->pin_table[pin_index]);
- lookup->desc = desc;
- lookup->info.pin_config = agpio->pin_config;
- lookup->info.debounce = agpio->debounce_timeout;
- lookup->info.gpioint = gpioint;
- lookup->info.wake_capable = acpi_gpio_irq_is_wake(&lookup->info.adev->dev, agpio);
-
- /*
- * Polarity and triggering are only specified for GpioInt
- * resource.
- * Note: we expect here:
- * - ACPI_ACTIVE_LOW == GPIO_ACTIVE_LOW
- * - ACPI_ACTIVE_HIGH == GPIO_ACTIVE_HIGH
- */
- if (lookup->info.gpioint) {
- lookup->info.polarity = agpio->polarity;
- lookup->info.triggering = agpio->triggering;
- } else {
- lookup->info.polarity = lookup->active_low;
- }
-
- lookup->info.flags = acpi_gpio_to_gpiod_flags(agpio, lookup->info.polarity);
- }
-
- return 1;
-}
-
-static int acpi_gpio_resource_lookup(struct acpi_gpio_lookup *lookup,
- struct acpi_gpio_info *info)
-{
- struct acpi_device *adev = lookup->info.adev;
- struct list_head res_list;
- int ret;
-
- INIT_LIST_HEAD(&res_list);
-
- ret = acpi_dev_get_resources(adev, &res_list,
- acpi_populate_gpio_lookup,
- lookup);
- if (ret < 0)
- return ret;
-
- acpi_dev_free_resource_list(&res_list);
-
- if (!lookup->desc)
- return -ENOENT;
-
- if (info)
- *info = lookup->info;
- return 0;
-}
-
-static int acpi_gpio_property_lookup(struct fwnode_handle *fwnode,
- const char *propname, int index,
- struct acpi_gpio_lookup *lookup)
-{
- struct fwnode_reference_args args;
- unsigned int quirks = 0;
- int ret;
-
- memset(&args, 0, sizeof(args));
- ret = __acpi_node_get_property_reference(fwnode, propname, index, 3,
- &args);
- if (ret) {
- struct acpi_device *adev;
-
- adev = to_acpi_device_node(fwnode);
- if (!acpi_get_driver_gpio_data(adev, propname, index, &args, &quirks))
- return ret;
- }
- /*
- * The property was found and resolved, so need to lookup the GPIO based
- * on returned args.
- */
- if (!to_acpi_device_node(args.fwnode))
- return -EINVAL;
- if (args.nargs != 3)
- return -EPROTO;
-
- lookup->index = args.args[0];
- lookup->pin_index = args.args[1];
- lookup->active_low = !!args.args[2];
-
- lookup->info.adev = to_acpi_device_node(args.fwnode);
- lookup->info.quirks = quirks;
-
- return 0;
-}
-
-/**
- * acpi_get_gpiod_by_index() - get a GPIO descriptor from device resources
- * @adev: pointer to a ACPI device to get GPIO from
- * @propname: Property name of the GPIO (optional)
- * @index: index of GpioIo/GpioInt resource (starting from %0)
- * @info: info pointer to fill in (optional)
- *
- * Function goes through ACPI resources for @adev and based on @index looks
- * up a GpioIo/GpioInt resource, translates it to the Linux GPIO descriptor,
- * and returns it. @index matches GpioIo/GpioInt resources only so if there
- * are total %3 GPIO resources, the index goes from %0 to %2.
- *
- * If @propname is specified the GPIO is looked using device property. In
- * that case @index is used to select the GPIO entry in the property value
- * (in case of multiple).
- *
- * Returns:
- * GPIO descriptor to use with Linux generic GPIO API.
- * If the GPIO cannot be translated or there is an error an ERR_PTR is
- * returned.
- *
- * Note: if the GPIO resource has multiple entries in the pin list, this
- * function only returns the first.
- */
-static struct gpio_desc *acpi_get_gpiod_by_index(struct acpi_device *adev,
- const char *propname,
- int index,
- struct acpi_gpio_info *info)
-{
- struct acpi_gpio_lookup lookup;
- int ret;
-
- memset(&lookup, 0, sizeof(lookup));
- lookup.index = index;
-
- if (propname) {
- dev_dbg(&adev->dev, "GPIO: looking up %s\n", propname);
-
- ret = acpi_gpio_property_lookup(acpi_fwnode_handle(adev),
- propname, index, &lookup);
- if (ret)
- return ERR_PTR(ret);
-
- dev_dbg(&adev->dev, "GPIO: _DSD returned %s %d %u %u\n",
- dev_name(&lookup.info.adev->dev), lookup.index,
- lookup.pin_index, lookup.active_low);
- } else {
- dev_dbg(&adev->dev, "GPIO: looking up %d in _CRS\n", index);
- lookup.info.adev = adev;
- }
-
- ret = acpi_gpio_resource_lookup(&lookup, info);
- return ret ? ERR_PTR(ret) : lookup.desc;
-}
-
-/**
- * acpi_get_gpiod_from_data() - get a GPIO descriptor from ACPI data node
- * @fwnode: pointer to an ACPI firmware node to get the GPIO information from
- * @propname: Property name of the GPIO
- * @index: index of GpioIo/GpioInt resource (starting from %0)
- * @info: info pointer to fill in (optional)
- *
- * This function uses the property-based GPIO lookup to get to the GPIO
- * resource with the relevant information from a data-only ACPI firmware node
- * and uses that to obtain the GPIO descriptor to return.
- *
- * Returns:
- * GPIO descriptor to use with Linux generic GPIO API.
- * If the GPIO cannot be translated or there is an error an ERR_PTR is
- * returned.
- */
-static struct gpio_desc *acpi_get_gpiod_from_data(struct fwnode_handle *fwnode,
- const char *propname,
- int index,
- struct acpi_gpio_info *info)
-{
- struct acpi_gpio_lookup lookup;
- int ret;
-
- if (!is_acpi_data_node(fwnode))
- return ERR_PTR(-ENODEV);
-
- if (!propname)
- return ERR_PTR(-EINVAL);
-
- memset(&lookup, 0, sizeof(lookup));
- lookup.index = index;
-
- ret = acpi_gpio_property_lookup(fwnode, propname, index, &lookup);
- if (ret)
- return ERR_PTR(ret);
-
- ret = acpi_gpio_resource_lookup(&lookup, info);
- return ret ? ERR_PTR(ret) : lookup.desc;
-}
-
-static bool acpi_can_fallback_to_crs(struct acpi_device *adev,
- const char *con_id)
-{
- /* If there is no ACPI device, there is no _CRS to fall back to */
- if (!adev)
- return false;
-
- /* Never allow fallback if the device has properties */
- if (acpi_dev_has_props(adev) || adev->driver_gpios)
- return false;
-
- return con_id == NULL;
-}
-
-static struct gpio_desc *
-__acpi_find_gpio(struct fwnode_handle *fwnode, const char *con_id, unsigned int idx,
- bool can_fallback, struct acpi_gpio_info *info)
-{
- struct acpi_device *adev = to_acpi_device_node(fwnode);
- struct gpio_desc *desc;
- char propname[32];
-
- /* Try first from _DSD */
- for_each_gpio_property_name(propname, con_id) {
- if (adev)
- desc = acpi_get_gpiod_by_index(adev,
- propname, idx, info);
- else
- desc = acpi_get_gpiod_from_data(fwnode,
- propname, idx, info);
- if (PTR_ERR(desc) == -EPROBE_DEFER)
- return ERR_CAST(desc);
-
- if (!IS_ERR(desc))
- return desc;
- }
-
- /* Then from plain _CRS GPIOs */
- if (can_fallback)
- return acpi_get_gpiod_by_index(adev, NULL, idx, info);
-
- return ERR_PTR(-ENOENT);
-}
-
-struct gpio_desc *acpi_find_gpio(struct fwnode_handle *fwnode,
- const char *con_id,
- unsigned int idx,
- enum gpiod_flags *dflags,
- unsigned long *lookupflags)
-{
- struct acpi_device *adev = to_acpi_device_node(fwnode);
- bool can_fallback = acpi_can_fallback_to_crs(adev, con_id);
- struct acpi_gpio_info info;
- struct gpio_desc *desc;
-
- desc = __acpi_find_gpio(fwnode, con_id, idx, can_fallback, &info);
- if (IS_ERR(desc))
- return desc;
-
- if (info.gpioint &&
- (*dflags == GPIOD_OUT_LOW || *dflags == GPIOD_OUT_HIGH)) {
- dev_dbg(&adev->dev, "refusing GpioInt() entry when doing GPIOD_OUT_* lookup\n");
- return ERR_PTR(-ENOENT);
- }
-
- acpi_gpio_update_gpiod_flags(dflags, &info);
- acpi_gpio_update_gpiod_lookup_flags(lookupflags, &info);
- return desc;
-}
-
-/**
- * acpi_dev_gpio_irq_wake_get_by() - Find GpioInt and translate it to Linux IRQ number
- * @adev: pointer to a ACPI device to get IRQ from
- * @con_id: optional name of GpioInt resource
- * @index: index of GpioInt resource (starting from %0)
- * @wake_capable: Set to true if the IRQ is wake capable
- *
- * If the device has one or more GpioInt resources, this function can be
- * used to translate from the GPIO offset in the resource to the Linux IRQ
- * number.
- *
- * The function is idempotent, though each time it runs it will configure GPIO
- * pin direction according to the flags in GpioInt resource.
- *
- * The function takes optional @con_id parameter. If the resource has
- * a @con_id in a property, then only those will be taken into account.
- *
- * The GPIO is considered wake capable if the GpioInt resource specifies
- * SharedAndWake or ExclusiveAndWake.
- *
- * Returns:
- * Linux IRQ number (> 0) on success, negative errno on failure.
- */
-int acpi_dev_gpio_irq_wake_get_by(struct acpi_device *adev, const char *con_id, int index,
- bool *wake_capable)
-{
- struct fwnode_handle *fwnode = acpi_fwnode_handle(adev);
- int idx, i;
- unsigned int irq_flags;
- int ret;
-
- for (i = 0, idx = 0; idx <= index; i++) {
- struct acpi_gpio_info info;
- struct gpio_desc *desc;
-
- /* Ignore -EPROBE_DEFER, it only matters if idx matches */
- desc = __acpi_find_gpio(fwnode, con_id, i, true, &info);
- if (IS_ERR(desc) && PTR_ERR(desc) != -EPROBE_DEFER)
- return PTR_ERR(desc);
-
- if (info.gpioint && idx++ == index) {
- unsigned long lflags = GPIO_LOOKUP_FLAGS_DEFAULT;
- enum gpiod_flags dflags = GPIOD_ASIS;
- char label[32];
- int irq;
-
- if (IS_ERR(desc))
- return PTR_ERR(desc);
-
- irq = gpiod_to_irq(desc);
- if (irq < 0)
- return irq;
-
- acpi_gpio_update_gpiod_flags(&dflags, &info);
- acpi_gpio_update_gpiod_lookup_flags(&lflags, &info);
-
- snprintf(label, sizeof(label), "%pfwP GpioInt(%d)", fwnode, index);
- ret = gpiod_set_consumer_name(desc, con_id ?: label);
- if (ret)
- return ret;
-
- ret = gpiod_configure_flags(desc, label, lflags, dflags);
- if (ret < 0)
- return ret;
-
- /* ACPI uses hundredths of milliseconds units */
- ret = gpio_set_debounce_timeout(desc, info.debounce * 10);
- if (ret)
- return ret;
-
- irq_flags = acpi_dev_get_irq_type(info.triggering,
- info.polarity);
-
- /*
- * If the IRQ is not already in use then set type
- * if specified and different than the current one.
- */
- if (can_request_irq(irq, irq_flags)) {
- if (irq_flags != IRQ_TYPE_NONE &&
- irq_flags != irq_get_trigger_type(irq))
- irq_set_irq_type(irq, irq_flags);
- } else {
- dev_dbg(&adev->dev, "IRQ %d already in use\n", irq);
- }
-
- /* avoid suspend issues with GPIOs when systems are using S3 */
- if (wake_capable && acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)
- *wake_capable = info.wake_capable;
-
- return irq;
- }
-
- }
- return -ENOENT;
-}
-EXPORT_SYMBOL_GPL(acpi_dev_gpio_irq_wake_get_by);
-
-static acpi_status
-acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,
- u32 bits, u64 *value, void *handler_context,
- void *region_context)
-{
- struct acpi_gpio_chip *achip = region_context;
- struct gpio_chip *chip = achip->chip;
- struct acpi_resource_gpio *agpio;
- struct acpi_resource *ares;
- u16 pin_index = address;
- acpi_status status;
- int length;
- int i;
-
- status = acpi_buffer_to_resource(achip->conn_info.connection,
- achip->conn_info.length, &ares);
- if (ACPI_FAILURE(status))
- return status;
-
- if (WARN_ON(ares->type != ACPI_RESOURCE_TYPE_GPIO)) {
- ACPI_FREE(ares);
- return AE_BAD_PARAMETER;
- }
-
- agpio = &ares->data.gpio;
-
- if (WARN_ON(agpio->io_restriction == ACPI_IO_RESTRICT_INPUT &&
- function == ACPI_WRITE)) {
- ACPI_FREE(ares);
- return AE_BAD_PARAMETER;
- }
-
- length = min_t(u16, agpio->pin_table_length, pin_index + bits);
- for (i = pin_index; i < length; ++i) {
- unsigned int pin = agpio->pin_table[i];
- struct acpi_gpio_connection *conn;
- struct gpio_desc *desc;
- bool found;
-
- mutex_lock(&achip->conn_lock);
-
- found = false;
- list_for_each_entry(conn, &achip->conns, node) {
- if (conn->pin == pin) {
- found = true;
- desc = conn->desc;
- break;
- }
- }
-
- /*
- * The same GPIO can be shared between operation region and
- * event but only if the access here is ACPI_READ. In that
- * case we "borrow" the event GPIO instead.
- */
- if (!found && agpio->shareable == ACPI_SHARED &&
- function == ACPI_READ) {
- struct acpi_gpio_event *event;
-
- list_for_each_entry(event, &achip->events, node) {
- if (event->pin == pin) {
- desc = event->desc;
- found = true;
- break;
- }
- }
- }
-
- if (!found) {
- desc = acpi_request_own_gpiod(chip, agpio, i, "ACPI:OpRegion");
- if (IS_ERR(desc)) {
- mutex_unlock(&achip->conn_lock);
- status = AE_ERROR;
- goto out;
- }
-
- conn = kzalloc(sizeof(*conn), GFP_KERNEL);
- if (!conn) {
- gpiochip_free_own_desc(desc);
- mutex_unlock(&achip->conn_lock);
- status = AE_NO_MEMORY;
- goto out;
- }
-
- conn->pin = pin;
- conn->desc = desc;
- list_add_tail(&conn->node, &achip->conns);
- }
-
- mutex_unlock(&achip->conn_lock);
-
- if (function == ACPI_WRITE)
- gpiod_set_raw_value_cansleep(desc, !!(*value & BIT(i)));
- else
- *value |= (u64)gpiod_get_raw_value_cansleep(desc) << i;
- }
-
-out:
- ACPI_FREE(ares);
- return status;
-}
-
-static void acpi_gpiochip_request_regions(struct acpi_gpio_chip *achip)
-{
- struct gpio_chip *chip = achip->chip;
- acpi_handle handle = ACPI_HANDLE(chip->parent);
- acpi_status status;
-
- INIT_LIST_HEAD(&achip->conns);
- mutex_init(&achip->conn_lock);
- status = acpi_install_address_space_handler(handle, ACPI_ADR_SPACE_GPIO,
- acpi_gpio_adr_space_handler,
- NULL, achip);
- if (ACPI_FAILURE(status))
- dev_err(chip->parent,
- "Failed to install GPIO OpRegion handler\n");
-}
-
-static void acpi_gpiochip_free_regions(struct acpi_gpio_chip *achip)
-{
- struct gpio_chip *chip = achip->chip;
- acpi_handle handle = ACPI_HANDLE(chip->parent);
- struct acpi_gpio_connection *conn, *tmp;
- acpi_status status;
-
- status = acpi_remove_address_space_handler(handle, ACPI_ADR_SPACE_GPIO,
- acpi_gpio_adr_space_handler);
- if (ACPI_FAILURE(status)) {
- dev_err(chip->parent,
- "Failed to remove GPIO OpRegion handler\n");
- return;
- }
-
- list_for_each_entry_safe_reverse(conn, tmp, &achip->conns, node) {
- gpiochip_free_own_desc(conn->desc);
- list_del(&conn->node);
- kfree(conn);
- }
-}
-
-static struct gpio_desc *
-acpi_gpiochip_parse_own_gpio(struct acpi_gpio_chip *achip,
- struct fwnode_handle *fwnode,
- const char **name,
- unsigned long *lflags,
- enum gpiod_flags *dflags)
-{
- struct gpio_chip *chip = achip->chip;
- struct gpio_desc *desc;
- u32 gpios[2];
- int ret;
-
- *lflags = GPIO_LOOKUP_FLAGS_DEFAULT;
- *dflags = GPIOD_ASIS;
- *name = NULL;
-
- ret = fwnode_property_read_u32_array(fwnode, "gpios", gpios,
- ARRAY_SIZE(gpios));
- if (ret < 0)
- return ERR_PTR(ret);
-
- desc = gpiochip_get_desc(chip, gpios[0]);
- if (IS_ERR(desc))
- return desc;
-
- if (gpios[1])
- *lflags |= GPIO_ACTIVE_LOW;
-
- if (fwnode_property_present(fwnode, "input"))
- *dflags |= GPIOD_IN;
- else if (fwnode_property_present(fwnode, "output-low"))
- *dflags |= GPIOD_OUT_LOW;
- else if (fwnode_property_present(fwnode, "output-high"))
- *dflags |= GPIOD_OUT_HIGH;
- else
- return ERR_PTR(-EINVAL);
-
- fwnode_property_read_string(fwnode, "line-name", name);
-
- return desc;
-}
-
-static void acpi_gpiochip_scan_gpios(struct acpi_gpio_chip *achip)
-{
- struct gpio_chip *chip = achip->chip;
- struct fwnode_handle *fwnode;
-
- device_for_each_child_node(chip->parent, fwnode) {
- unsigned long lflags;
- enum gpiod_flags dflags;
- struct gpio_desc *desc;
- const char *name;
- int ret;
-
- if (!fwnode_property_present(fwnode, "gpio-hog"))
- continue;
-
- desc = acpi_gpiochip_parse_own_gpio(achip, fwnode, &name,
- &lflags, &dflags);
- if (IS_ERR(desc))
- continue;
-
- ret = gpiod_hog(desc, name, lflags, dflags);
- if (ret) {
- dev_err(chip->parent, "Failed to hog GPIO\n");
- fwnode_handle_put(fwnode);
- return;
- }
- }
-}
-
-void acpi_gpiochip_add(struct gpio_chip *chip)
-{
- struct acpi_gpio_chip *acpi_gpio;
- struct acpi_device *adev;
- acpi_status status;
-
- if (!chip || !chip->parent)
- return;
-
- adev = ACPI_COMPANION(chip->parent);
- if (!adev)
- return;
-
- acpi_gpio = kzalloc(sizeof(*acpi_gpio), GFP_KERNEL);
- if (!acpi_gpio) {
- dev_err(chip->parent,
- "Failed to allocate memory for ACPI GPIO chip\n");
- return;
- }
-
- acpi_gpio->chip = chip;
- INIT_LIST_HEAD(&acpi_gpio->events);
- INIT_LIST_HEAD(&acpi_gpio->deferred_req_irqs_list_entry);
-
- status = acpi_attach_data(adev->handle, acpi_gpio_chip_dh, acpi_gpio);
- if (ACPI_FAILURE(status)) {
- dev_err(chip->parent, "Failed to attach ACPI GPIO chip\n");
- kfree(acpi_gpio);
- return;
- }
-
- acpi_gpiochip_request_regions(acpi_gpio);
- acpi_gpiochip_scan_gpios(acpi_gpio);
- acpi_dev_clear_dependencies(adev);
-}
-
-void acpi_gpiochip_remove(struct gpio_chip *chip)
-{
- struct acpi_gpio_chip *acpi_gpio;
- acpi_handle handle;
- acpi_status status;
-
- if (!chip || !chip->parent)
- return;
-
- handle = ACPI_HANDLE(chip->parent);
- if (!handle)
- return;
-
- status = acpi_get_data(handle, acpi_gpio_chip_dh, (void **)&acpi_gpio);
- if (ACPI_FAILURE(status)) {
- dev_warn(chip->parent, "Failed to retrieve ACPI GPIO chip\n");
- return;
- }
-
- acpi_gpiochip_free_regions(acpi_gpio);
-
- acpi_detach_data(handle, acpi_gpio_chip_dh);
- kfree(acpi_gpio);
-}
-
-static int acpi_gpio_package_count(const union acpi_object *obj)
-{
- const union acpi_object *element = obj->package.elements;
- const union acpi_object *end = element + obj->package.count;
- unsigned int count = 0;
-
- while (element < end) {
- switch (element->type) {
- case ACPI_TYPE_LOCAL_REFERENCE:
- element += 3;
- fallthrough;
- case ACPI_TYPE_INTEGER:
- element++;
- count++;
- break;
-
- default:
- return -EPROTO;
- }
- }
-
- return count;
-}
-
-static int acpi_find_gpio_count(struct acpi_resource *ares, void *data)
-{
- unsigned int *count = data;
-
- if (ares->type == ACPI_RESOURCE_TYPE_GPIO)
- *count += ares->data.gpio.pin_table_length;
-
- return 1;
-}
-
-/**
- * acpi_gpio_count - count the GPIOs associated with a firmware node / function
- * @fwnode: firmware node of the GPIO consumer
- * @con_id: function within the GPIO consumer
- *
- * Returns:
- * The number of GPIOs associated with a firmware node / function or %-ENOENT,
- * if no GPIO has been assigned to the requested function.
- */
-int acpi_gpio_count(const struct fwnode_handle *fwnode, const char *con_id)
-{
- struct acpi_device *adev = to_acpi_device_node(fwnode);
- const union acpi_object *obj;
- const struct acpi_gpio_mapping *gm;
- int count = -ENOENT;
- int ret;
- char propname[32];
-
- /* Try first from _DSD */
- for_each_gpio_property_name(propname, con_id) {
- ret = acpi_dev_get_property(adev, propname, ACPI_TYPE_ANY, &obj);
- if (ret == 0) {
- if (obj->type == ACPI_TYPE_LOCAL_REFERENCE)
- count = 1;
- else if (obj->type == ACPI_TYPE_PACKAGE)
- count = acpi_gpio_package_count(obj);
- } else if (adev->driver_gpios) {
- for (gm = adev->driver_gpios; gm->name; gm++)
- if (strcmp(propname, gm->name) == 0) {
- count = gm->size;
- break;
- }
- }
- if (count > 0)
- break;
- }
-
- /* Then from plain _CRS GPIOs */
- if (count < 0) {
- struct list_head resource_list;
- unsigned int crs_count = 0;
-
- if (!acpi_can_fallback_to_crs(adev, con_id))
- return count;
-
- INIT_LIST_HEAD(&resource_list);
- acpi_dev_get_resources(adev, &resource_list,
- acpi_find_gpio_count, &crs_count);
- acpi_dev_free_resource_list(&resource_list);
- if (crs_count > 0)
- count = crs_count;
- }
- return count ? count : -ENOENT;
-}
-
-/* Run deferred acpi_gpiochip_request_irqs() */
-static int __init acpi_gpio_handle_deferred_request_irqs(void)
-{
- struct acpi_gpio_chip *acpi_gpio, *tmp;
-
- mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
- list_for_each_entry_safe(acpi_gpio, tmp,
- &acpi_gpio_deferred_req_irqs_list,
- deferred_req_irqs_list_entry)
- acpi_gpiochip_request_irqs(acpi_gpio);
-
- acpi_gpio_deferred_req_irqs_done = true;
- mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
-
- return 0;
-}
-/* We must use _sync so that this runs after the first deferred_probe run */
-late_initcall_sync(acpi_gpio_handle_deferred_request_irqs);
-
-static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
- {
- /*
- * The Minix Neo Z83-4 has a micro-USB-B id-pin handler for
- * a non existing micro-USB-B connector which puts the HDMI
- * DDC pins in GPIO mode, breaking HDMI support.
- */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "MINIX"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Z83-4"),
- },
- .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
- .no_edge_events_on_boot = true,
- },
- },
- {
- /*
- * The Terra Pad 1061 has a micro-USB-B id-pin handler, which
- * instead of controlling the actual micro-USB-B turns the 5V
- * boost for its USB-A connector off. The actual micro-USB-B
- * connector is wired for charging only.
- */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Wortmann_AG"),
- DMI_MATCH(DMI_PRODUCT_NAME, "TERRA_PAD_1061"),
- },
- .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
- .no_edge_events_on_boot = true,
- },
- },
- {
- /*
- * The Dell Venue 10 Pro 5055, with Bay Trail SoC + TI PMIC uses an
- * external embedded-controller connected via I2C + an ACPI GPIO
- * event handler on INT33FFC:02 pin 12, causing spurious wakeups.
- */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Venue 10 Pro 5055"),
- },
- .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
- .ignore_wake = "INT33FC:02@12",
- },
- },
- {
- /*
- * HP X2 10 models with Cherry Trail SoC + TI PMIC use an
- * external embedded-controller connected via I2C + an ACPI GPIO
- * event handler on INT33FF:01 pin 0, causing spurious wakeups.
- * When suspending by closing the LID, the power to the USB
- * keyboard is turned off, causing INT0002 ACPI events to
- * trigger once the XHCI controller notices the keyboard is
- * gone. So INT0002 events cause spurious wakeups too. Ignoring
- * EC wakes breaks wakeup when opening the lid, the user needs
- * to press the power-button to wakeup the system. The
- * alternative is suspend simply not working, which is worse.
- */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "HP"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP x2 Detachable 10-p0XX"),
- },
- .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
- .ignore_wake = "INT33FF:01@0,INT0002:00@2",
- },
- },
- {
- /*
- * HP X2 10 models with Bay Trail SoC + AXP288 PMIC use an
- * external embedded-controller connected via I2C + an ACPI GPIO
- * event handler on INT33FC:02 pin 28, causing spurious wakeups.
- */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"),
- DMI_MATCH(DMI_BOARD_NAME, "815D"),
- },
- .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
- .ignore_wake = "INT33FC:02@28",
- },
- },
- {
- /*
- * HP X2 10 models with Cherry Trail SoC + AXP288 PMIC use an
- * external embedded-controller connected via I2C + an ACPI GPIO
- * event handler on INT33FF:01 pin 0, causing spurious wakeups.
- */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "HP"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"),
- DMI_MATCH(DMI_BOARD_NAME, "813E"),
- },
- .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
- .ignore_wake = "INT33FF:01@0",
- },
- },
- {
- /*
- * Interrupt storm caused from edge triggered floating pin
- * Found in BIOS UX325UAZ.300
- * https://bugzilla.kernel.org/show_bug.cgi?id=216208
- */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX325UAZ_UM325UAZ"),
- },
- .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
- .ignore_interrupt = "AMDI0030:00@18",
- },
- },
- {
- /*
- * Spurious wakeups from TP_ATTN# pin
- * Found in BIOS 1.7.8
- * https://gitlab.freedesktop.org/drm/amd/-/issues/1722#note_1720627
- */
- .matches = {
- DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"),
- },
- .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
- .ignore_wake = "ELAN0415:00@9",
- },
- },
- {
- /*
- * Spurious wakeups from TP_ATTN# pin
- * Found in BIOS 1.7.8
- * https://gitlab.freedesktop.org/drm/amd/-/issues/1722#note_1720627
- */
- .matches = {
- DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"),
- },
- .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
- .ignore_wake = "ELAN0415:00@9",
- },
- },
- {
- /*
- * Spurious wakeups from TP_ATTN# pin
- * Found in BIOS 1.7.7
- */
- .matches = {
- DMI_MATCH(DMI_BOARD_NAME, "NH5xAx"),
- },
- .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
- .ignore_wake = "SYNA1202:00@16",
- },
- },
- {
- /*
- * On the Peaq C1010 2-in-1 INT33FC:00 pin 3 is connected to
- * a "dolby" button. At the ACPI level an _AEI event-handler
- * is connected which sets an ACPI variable to 1 on both
- * edges. This variable can be polled + cleared to 0 using
- * WMI. But since the variable is set on both edges the WMI
- * interface is pretty useless even when polling.
- * So instead the x86-android-tablets code instantiates
- * a gpio-keys platform device for it.
- * Ignore the _AEI handler for the pin, so that it is not busy.
- */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "PEAQ"),
- DMI_MATCH(DMI_PRODUCT_NAME, "PEAQ PMM C1010 MD99187"),
- },
- .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
- .ignore_interrupt = "INT33FC:00@3",
- },
- },
- {
- /*
- * Spurious wakeups from TP_ATTN# pin
- * Found in BIOS 0.35
- * https://gitlab.freedesktop.org/drm/amd/-/issues/3073
- */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "GPD"),
- DMI_MATCH(DMI_PRODUCT_NAME, "G1619-04"),
- },
- .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
- .ignore_wake = "PNP0C50:00@8",
- },
- },
- {} /* Terminating entry */
-};
-
-static int __init acpi_gpio_setup_params(void)
-{
- const struct acpi_gpiolib_dmi_quirk *quirk = NULL;
- const struct dmi_system_id *id;
-
- id = dmi_first_match(gpiolib_acpi_quirks);
- if (id)
- quirk = id->driver_data;
-
- if (run_edge_events_on_boot < 0) {
- if (quirk && quirk->no_edge_events_on_boot)
- run_edge_events_on_boot = 0;
- else
- run_edge_events_on_boot = 1;
- }
-
- if (ignore_wake == NULL && quirk && quirk->ignore_wake)
- ignore_wake = quirk->ignore_wake;
-
- if (ignore_interrupt == NULL && quirk && quirk->ignore_interrupt)
- ignore_interrupt = quirk->ignore_interrupt;
-
- return 0;
-}
-
-/* Directly after dmi_setup() which runs as core_initcall() */
-postcore_initcall(acpi_gpio_setup_params);
diff --git a/drivers/gpio/gpiolib-acpi.h b/drivers/gpio/gpiolib-acpi.h
index 7e1c51d04040..a90267470a4e 100644
--- a/drivers/gpio/gpiolib-acpi.h
+++ b/drivers/gpio/gpiolib-acpi.h
@@ -58,4 +58,19 @@ static inline int acpi_gpio_count(const struct fwnode_handle *fwnode,
}
#endif
+void acpi_gpio_process_deferred_list(struct list_head *list);
+
+bool acpi_gpio_add_to_deferred_list(struct list_head *list);
+void acpi_gpio_remove_from_deferred_list(struct list_head *list);
+
+int acpi_gpio_need_run_edge_events_on_boot(void);
+
+enum acpi_gpio_ignore_list {
+ ACPI_GPIO_IGNORE_WAKE,
+ ACPI_GPIO_IGNORE_INTERRUPT,
+};
+
+bool acpi_gpio_in_ignore_list(enum acpi_gpio_ignore_list list,
+ const char *controller_in, unsigned int pin_in);
+
#endif /* GPIOLIB_ACPI_H */
diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
index 78c9d9ed3d68..175836467f21 100644
--- a/drivers/gpio/gpiolib-cdev.c
+++ b/drivers/gpio/gpiolib-cdev.c
@@ -16,16 +16,15 @@
#include <linux/hte.h>
#include <linux/interrupt.h>
#include <linux/irqreturn.h>
-#include <linux/kernel.h>
#include <linux/kfifo.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/overflow.h>
#include <linux/pinctrl/consumer.h>
#include <linux/poll.h>
-#include <linux/rbtree.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
+#include <linux/string.h>
#include <linux/timekeeping.h>
#include <linux/uaccess.h>
#include <linux/workqueue.h>
@@ -143,18 +142,22 @@ static int linehandle_validate_flags(u32 flags)
static void linehandle_flags_to_desc_flags(u32 lflags, unsigned long *flagsp)
{
- assign_bit(FLAG_ACTIVE_LOW, flagsp,
+ unsigned long flags = READ_ONCE(*flagsp);
+
+ assign_bit(GPIOD_FLAG_ACTIVE_LOW, &flags,
lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW);
- assign_bit(FLAG_OPEN_DRAIN, flagsp,
+ assign_bit(GPIOD_FLAG_OPEN_DRAIN, &flags,
lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN);
- assign_bit(FLAG_OPEN_SOURCE, flagsp,
+ assign_bit(GPIOD_FLAG_OPEN_SOURCE, &flags,
lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE);
- assign_bit(FLAG_PULL_UP, flagsp,
+ assign_bit(GPIOD_FLAG_PULL_UP, &flags,
lflags & GPIOHANDLE_REQUEST_BIAS_PULL_UP);
- assign_bit(FLAG_PULL_DOWN, flagsp,
+ assign_bit(GPIOD_FLAG_PULL_DOWN, &flags,
lflags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN);
- assign_bit(FLAG_BIAS_DISABLE, flagsp,
+ assign_bit(GPIOD_FLAG_BIAS_DISABLE, &flags,
lflags & GPIOHANDLE_REQUEST_BIAS_DISABLE);
+
+ WRITE_ONCE(*flagsp, flags);
}
static long linehandle_set_config(struct linehandle_state *lh,
@@ -184,11 +187,11 @@ static long linehandle_set_config(struct linehandle_state *lh,
if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
int val = !!gcnf.default_values[i];
- ret = gpiod_direction_output(desc, val);
+ ret = gpiod_direction_output_nonotify(desc, val);
if (ret)
return ret;
} else {
- ret = gpiod_direction_input(desc);
+ ret = gpiod_direction_input_nonotify(desc);
if (ret)
return ret;
}
@@ -235,7 +238,7 @@ static long linehandle_ioctl(struct file *file, unsigned int cmd,
* All line descriptors were created at once with the same
* flags so just check if the first one is really output.
*/
- if (!test_bit(FLAG_IS_OUT, &lh->descs[0]->flags))
+ if (!test_bit(GPIOD_FLAG_IS_OUT, &lh->descs[0]->flags))
return -EPERM;
if (copy_from_user(&ghd, ip, sizeof(ghd)))
@@ -359,11 +362,11 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
int val = !!handlereq.default_values[i];
- ret = gpiod_direction_output(desc, val);
+ ret = gpiod_direction_output_nonotify(desc, val);
if (ret)
goto out_free_lh;
} else if (lflags & GPIOHANDLE_REQUEST_INPUT) {
- ret = gpiod_direction_input(desc);
+ ret = gpiod_direction_input_nonotify(desc);
if (ret)
goto out_free_lh;
}
@@ -417,7 +420,6 @@ out_free_lh:
/**
* struct line - contains the state of a requested line
- * @node: to store the object in supinfo_tree if supplemental
* @desc: the GPIO descriptor for this line.
* @req: the corresponding line request
* @irq: the interrupt triggered in response to events on this GPIO
@@ -430,7 +432,6 @@ out_free_lh:
* @line_seqno: the seqno for the current edge event in the sequence of
* events for this line.
* @work: the worker that implements software debouncing
- * @debounce_period_us: the debounce period in microseconds
* @sw_debounced: flag indicating if the software debouncer is active
* @level: the current debounced physical level of the line
* @hdesc: the Hardware Timestamp Engine (HTE) descriptor
@@ -439,7 +440,6 @@ out_free_lh:
* @last_seqno: the last sequence number before debounce period expires
*/
struct line {
- struct rb_node node;
struct gpio_desc *desc;
/*
* -- edge detector specific fields --
@@ -450,7 +450,7 @@ struct line {
* The flags for the active edge detector configuration.
*
* edflags is set by linereq_create(), linereq_free(), and
- * linereq_set_config_unlocked(), which are themselves mutually
+ * linereq_set_config(), which are themselves mutually
* exclusive, and is accessed by edge_irq_thread(),
* process_hw_ts_thread() and debounce_work_func(),
* which can all live with a slightly stale value.
@@ -474,15 +474,6 @@ struct line {
*/
struct delayed_work work;
/*
- * debounce_period_us is accessed by debounce_irq_handler() and
- * process_hw_ts() which are disabled when modified by
- * debounce_setup(), edge_detector_setup() or edge_detector_stop()
- * or can live with a stale version when updated by
- * edge_detector_update().
- * The modifying functions are themselves mutually exclusive.
- */
- unsigned int debounce_period_us;
- /*
* sw_debounce is accessed by linereq_set_config(), which is the
* only setter, and linereq_get_values(), which can live with a
* slightly stale value.
@@ -514,17 +505,6 @@ struct line {
#endif /* CONFIG_HTE */
};
-/*
- * a rbtree of the struct lines containing supplemental info.
- * Used to populate gpio_v2_line_info with cdev specific fields not contained
- * in the struct gpio_desc.
- * A line is determined to contain supplemental information by
- * line_has_supinfo().
- */
-static struct rb_root supinfo_tree = RB_ROOT;
-/* covers supinfo_tree */
-static DEFINE_SPINLOCK(supinfo_lock);
-
/**
* struct linereq - contains the state of a userspace line request
* @gdev: the GPIO device the line request pertains to
@@ -538,8 +518,7 @@ static DEFINE_SPINLOCK(supinfo_lock);
* this line request. Note that this is not used when @num_lines is 1, as
* the line_seqno is then the same and is cheaper to calculate.
* @config_mutex: mutex for serializing ioctl() calls to ensure consistency
- * of configuration, particularly multi-step accesses to desc flags and
- * changes to supinfo status.
+ * of configuration, particularly multi-step accesses to desc flags.
* @lines: the lines held by this line request, with @num_lines elements.
*/
struct linereq {
@@ -555,103 +534,6 @@ struct linereq {
struct line lines[] __counted_by(num_lines);
};
-static void supinfo_insert(struct line *line)
-{
- struct rb_node **new = &(supinfo_tree.rb_node), *parent = NULL;
- struct line *entry;
-
- guard(spinlock)(&supinfo_lock);
-
- while (*new) {
- entry = container_of(*new, struct line, node);
-
- parent = *new;
- if (line->desc < entry->desc) {
- new = &((*new)->rb_left);
- } else if (line->desc > entry->desc) {
- new = &((*new)->rb_right);
- } else {
- /* this should never happen */
- WARN(1, "duplicate line inserted");
- return;
- }
- }
-
- rb_link_node(&line->node, parent, new);
- rb_insert_color(&line->node, &supinfo_tree);
-}
-
-static void supinfo_erase(struct line *line)
-{
- guard(spinlock)(&supinfo_lock);
-
- rb_erase(&line->node, &supinfo_tree);
-}
-
-static struct line *supinfo_find(struct gpio_desc *desc)
-{
- struct rb_node *node = supinfo_tree.rb_node;
- struct line *line;
-
- while (node) {
- line = container_of(node, struct line, node);
- if (desc < line->desc)
- node = node->rb_left;
- else if (desc > line->desc)
- node = node->rb_right;
- else
- return line;
- }
- return NULL;
-}
-
-static void supinfo_to_lineinfo(struct gpio_desc *desc,
- struct gpio_v2_line_info *info)
-{
- struct gpio_v2_line_attribute *attr;
- struct line *line;
-
- guard(spinlock)(&supinfo_lock);
-
- line = supinfo_find(desc);
- if (!line)
- return;
-
- attr = &info->attrs[info->num_attrs];
- attr->id = GPIO_V2_LINE_ATTR_ID_DEBOUNCE;
- attr->debounce_period_us = READ_ONCE(line->debounce_period_us);
- info->num_attrs++;
-}
-
-static inline bool line_has_supinfo(struct line *line)
-{
- return READ_ONCE(line->debounce_period_us);
-}
-
-/*
- * Checks line_has_supinfo() before and after the change to avoid unnecessary
- * supinfo_tree access.
- * Called indirectly by linereq_create() or linereq_set_config() so line
- * is already protected from concurrent changes.
- */
-static void line_set_debounce_period(struct line *line,
- unsigned int debounce_period_us)
-{
- bool was_suppl = line_has_supinfo(line);
-
- WRITE_ONCE(line->debounce_period_us, debounce_period_us);
-
- /* if supinfo status is unchanged then we're done */
- if (line_has_supinfo(line) == was_suppl)
- return;
-
- /* supinfo status has changed, so update the tree */
- if (was_suppl)
- supinfo_erase(line);
- else
- supinfo_insert(line);
-}
-
#define GPIO_V2_LINE_BIAS_FLAGS \
(GPIO_V2_LINE_FLAG_BIAS_PULL_UP | \
GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN | \
@@ -717,10 +599,10 @@ static void linereq_put_event(struct linereq *lr,
static u64 line_event_timestamp(struct line *line)
{
- if (test_bit(FLAG_EVENT_CLOCK_REALTIME, &line->desc->flags))
+ if (test_bit(GPIOD_FLAG_EVENT_CLOCK_REALTIME, &line->desc->flags))
return ktime_get_real_ns();
else if (IS_ENABLED(CONFIG_HTE) &&
- test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags))
+ test_bit(GPIOD_FLAG_EVENT_CLOCK_HTE, &line->desc->flags))
return line->timestamp_ns;
return ktime_get_ns();
@@ -819,7 +701,7 @@ static enum hte_return process_hw_ts(struct hte_ts_data *ts, void *p)
line->total_discard_seq++;
line->last_seqno = ts->seq;
mod_delayed_work(system_wq, &line->work,
- usecs_to_jiffies(READ_ONCE(line->debounce_period_us)));
+ usecs_to_jiffies(READ_ONCE(line->desc->debounce_period_us)));
} else {
if (unlikely(ts->seq < line->line_seqno))
return HTE_CB_HANDLED;
@@ -843,11 +725,11 @@ static int hte_edge_setup(struct line *line, u64 eflags)
struct hte_ts_desc *hdesc = &line->hdesc;
if (eflags & GPIO_V2_LINE_FLAG_EDGE_RISING)
- flags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
+ flags |= test_bit(GPIOD_FLAG_ACTIVE_LOW, &line->desc->flags) ?
HTE_FALLING_EDGE_TS :
HTE_RISING_EDGE_TS;
if (eflags & GPIO_V2_LINE_FLAG_EDGE_FALLING)
- flags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
+ flags |= test_bit(GPIOD_FLAG_ACTIVE_LOW, &line->desc->flags) ?
HTE_RISING_EDGE_TS :
HTE_FALLING_EDGE_TS;
@@ -949,7 +831,7 @@ static bool debounced_value(struct line *line)
*/
value = READ_ONCE(line->level);
- if (test_bit(FLAG_ACTIVE_LOW, &line->desc->flags))
+ if (test_bit(GPIOD_FLAG_ACTIVE_LOW, &line->desc->flags))
value = !value;
return value;
@@ -960,7 +842,7 @@ static irqreturn_t debounce_irq_handler(int irq, void *p)
struct line *line = p;
mod_delayed_work(system_wq, &line->work,
- usecs_to_jiffies(READ_ONCE(line->debounce_period_us)));
+ usecs_to_jiffies(READ_ONCE(line->desc->debounce_period_us)));
return IRQ_HANDLED;
}
@@ -1040,12 +922,13 @@ static int debounce_setup(struct line *line, unsigned int debounce_period_us)
int ret, level, irq;
char *label;
- /* try hardware */
- ret = gpiod_set_debounce(line->desc, debounce_period_us);
- if (!ret) {
- line_set_debounce_period(line, debounce_period_us);
- return ret;
- }
+ /*
+ * Try hardware. Skip gpiod_set_config() to avoid emitting two
+ * CHANGED_CONFIG line state events.
+ */
+ ret = gpio_do_set_config(line->desc,
+ pinconf_to_config_packed(PIN_CONFIG_INPUT_DEBOUNCE,
+ debounce_period_us));
if (ret != -ENOTSUPP)
return ret;
@@ -1056,7 +939,7 @@ static int debounce_setup(struct line *line, unsigned int debounce_period_us)
return level;
if (!(IS_ENABLED(CONFIG_HTE) &&
- test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags))) {
+ test_bit(GPIOD_FLAG_EVENT_CLOCK_HTE, &line->desc->flags))) {
irq = gpiod_to_irq(line->desc);
if (irq < 0)
return -ENXIO;
@@ -1128,7 +1011,8 @@ static void edge_detector_stop(struct line *line)
cancel_delayed_work_sync(&line->work);
WRITE_ONCE(line->sw_debounced, 0);
WRITE_ONCE(line->edflags, 0);
- line_set_debounce_period(line, 0);
+ if (line->desc)
+ WRITE_ONCE(line->desc->debounce_period_us, 0);
/* do not change line->level - see comment in debounced_value() */
}
@@ -1161,7 +1045,7 @@ static int edge_detector_setup(struct line *line,
ret = debounce_setup(line, debounce_period_us);
if (ret)
return ret;
- line_set_debounce_period(line, debounce_period_us);
+ WRITE_ONCE(line->desc->debounce_period_us, debounce_period_us);
}
/* detection disabled or sw debouncer will provide edge detection */
@@ -1177,10 +1061,10 @@ static int edge_detector_setup(struct line *line,
return -ENXIO;
if (eflags & GPIO_V2_LINE_FLAG_EDGE_RISING)
- irqflags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
+ irqflags |= test_bit(GPIOD_FLAG_ACTIVE_LOW, &line->desc->flags) ?
IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
if (eflags & GPIO_V2_LINE_FLAG_EDGE_FALLING)
- irqflags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
+ irqflags |= test_bit(GPIOD_FLAG_ACTIVE_LOW, &line->desc->flags) ?
IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
irqflags |= IRQF_ONESHOT;
@@ -1209,12 +1093,12 @@ static int edge_detector_update(struct line *line,
gpio_v2_line_config_debounce_period(lc, line_idx);
if ((active_edflags == edflags) &&
- (READ_ONCE(line->debounce_period_us) == debounce_period_us))
+ (READ_ONCE(line->desc->debounce_period_us) == debounce_period_us))
return 0;
/* sw debounced and still will be...*/
if (debounce_period_us && READ_ONCE(line->sw_debounced)) {
- line_set_debounce_period(line, debounce_period_us);
+ WRITE_ONCE(line->desc->debounce_period_us, debounce_period_us);
/*
* ensure event fifo is initialised if edge detection
* is now enabled.
@@ -1331,7 +1215,7 @@ static int gpio_v2_line_config_validate(struct gpio_v2_line_config *lc,
if (lc->num_attrs > GPIO_V2_LINE_NUM_ATTRS_MAX)
return -EINVAL;
- if (memchr_inv(lc->padding, 0, sizeof(lc->padding)))
+ if (!mem_is_zero(lc->padding, sizeof(lc->padding)))
return -EINVAL;
for (i = 0; i < num_lines; i++) {
@@ -1348,38 +1232,42 @@ static int gpio_v2_line_config_validate(struct gpio_v2_line_config *lc,
return 0;
}
-static void gpio_v2_line_config_flags_to_desc_flags(u64 flags,
+static void gpio_v2_line_config_flags_to_desc_flags(u64 lflags,
unsigned long *flagsp)
{
- assign_bit(FLAG_ACTIVE_LOW, flagsp,
- flags & GPIO_V2_LINE_FLAG_ACTIVE_LOW);
+ unsigned long flags = READ_ONCE(*flagsp);
+
+ assign_bit(GPIOD_FLAG_ACTIVE_LOW, &flags,
+ lflags & GPIO_V2_LINE_FLAG_ACTIVE_LOW);
- if (flags & GPIO_V2_LINE_FLAG_OUTPUT)
- set_bit(FLAG_IS_OUT, flagsp);
- else if (flags & GPIO_V2_LINE_FLAG_INPUT)
- clear_bit(FLAG_IS_OUT, flagsp);
+ if (lflags & GPIO_V2_LINE_FLAG_OUTPUT)
+ set_bit(GPIOD_FLAG_IS_OUT, &flags);
+ else if (lflags & GPIO_V2_LINE_FLAG_INPUT)
+ clear_bit(GPIOD_FLAG_IS_OUT, &flags);
- assign_bit(FLAG_EDGE_RISING, flagsp,
- flags & GPIO_V2_LINE_FLAG_EDGE_RISING);
- assign_bit(FLAG_EDGE_FALLING, flagsp,
- flags & GPIO_V2_LINE_FLAG_EDGE_FALLING);
+ assign_bit(GPIOD_FLAG_EDGE_RISING, &flags,
+ lflags & GPIO_V2_LINE_FLAG_EDGE_RISING);
+ assign_bit(GPIOD_FLAG_EDGE_FALLING, &flags,
+ lflags & GPIO_V2_LINE_FLAG_EDGE_FALLING);
- assign_bit(FLAG_OPEN_DRAIN, flagsp,
- flags & GPIO_V2_LINE_FLAG_OPEN_DRAIN);
- assign_bit(FLAG_OPEN_SOURCE, flagsp,
- flags & GPIO_V2_LINE_FLAG_OPEN_SOURCE);
+ assign_bit(GPIOD_FLAG_OPEN_DRAIN, &flags,
+ lflags & GPIO_V2_LINE_FLAG_OPEN_DRAIN);
+ assign_bit(GPIOD_FLAG_OPEN_SOURCE, &flags,
+ lflags & GPIO_V2_LINE_FLAG_OPEN_SOURCE);
- assign_bit(FLAG_PULL_UP, flagsp,
- flags & GPIO_V2_LINE_FLAG_BIAS_PULL_UP);
- assign_bit(FLAG_PULL_DOWN, flagsp,
- flags & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN);
- assign_bit(FLAG_BIAS_DISABLE, flagsp,
- flags & GPIO_V2_LINE_FLAG_BIAS_DISABLED);
+ assign_bit(GPIOD_FLAG_PULL_UP, &flags,
+ lflags & GPIO_V2_LINE_FLAG_BIAS_PULL_UP);
+ assign_bit(GPIOD_FLAG_PULL_DOWN, &flags,
+ lflags & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN);
+ assign_bit(GPIOD_FLAG_BIAS_DISABLE, &flags,
+ lflags & GPIO_V2_LINE_FLAG_BIAS_DISABLED);
- assign_bit(FLAG_EVENT_CLOCK_REALTIME, flagsp,
- flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME);
- assign_bit(FLAG_EVENT_CLOCK_HTE, flagsp,
- flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE);
+ assign_bit(GPIOD_FLAG_EVENT_CLOCK_REALTIME, &flags,
+ lflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME);
+ assign_bit(GPIOD_FLAG_EVENT_CLOCK_HTE, &flags,
+ lflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE);
+
+ WRITE_ONCE(*flagsp, flags);
}
static long linereq_get_values(struct linereq *lr, void __user *ip)
@@ -1478,9 +1366,6 @@ static long linereq_set_values(struct linereq *lr, void __user *ip)
/* scan requested lines to determine the subset to be set */
for (num_set = 0, i = 0; i < lr->num_lines; i++) {
if (lv.mask & BIT_ULL(i)) {
- /* setting inputs is not allowed */
- if (!test_bit(FLAG_IS_OUT, &lr->lines[i].desc->flags))
- return -EPERM;
/* add to compacted values */
if (lv.bits & BIT_ULL(i))
__set_bit(num_set, vals);
@@ -1546,11 +1431,11 @@ static long linereq_set_config(struct linereq *lr, void __user *ip)
int val = gpio_v2_line_config_output_value(&lc, i);
edge_detector_stop(line);
- ret = gpiod_direction_output(desc, val);
+ ret = gpiod_direction_output_nonotify(desc, val);
if (ret)
return ret;
} else {
- ret = gpiod_direction_input(desc);
+ ret = gpiod_direction_input_nonotify(desc);
if (ret)
return ret;
@@ -1669,7 +1554,6 @@ static ssize_t linereq_read(struct file *file, char __user *buf,
static void linereq_free(struct linereq *lr)
{
- struct line *line;
unsigned int i;
if (lr->device_unregistered_nb.notifier_call)
@@ -1677,14 +1561,10 @@ static void linereq_free(struct linereq *lr)
&lr->device_unregistered_nb);
for (i = 0; i < lr->num_lines; i++) {
- line = &lr->lines[i];
- if (!line->desc)
- continue;
-
- edge_detector_stop(line);
- if (line_has_supinfo(line))
- supinfo_erase(line);
- gpiod_free(line->desc);
+ if (lr->lines[i].desc) {
+ edge_detector_stop(&lr->lines[i]);
+ gpiod_free(lr->lines[i].desc);
+ }
}
kfifo_free(&lr->events);
kfree(lr->label);
@@ -1746,7 +1626,7 @@ static int linereq_create(struct gpio_device *gdev, void __user *ip)
if ((ulr.num_lines == 0) || (ulr.num_lines > GPIO_V2_LINES_MAX))
return -EINVAL;
- if (memchr_inv(ulr.padding, 0, sizeof(ulr.padding)))
+ if (!mem_is_zero(ulr.padding, sizeof(ulr.padding)))
return -EINVAL;
lc = &ulr.config;
@@ -1818,11 +1698,11 @@ static int linereq_create(struct gpio_device *gdev, void __user *ip)
if (flags & GPIO_V2_LINE_FLAG_OUTPUT) {
int val = gpio_v2_line_config_output_value(lc, i);
- ret = gpiod_direction_output(desc, val);
+ ret = gpiod_direction_output_nonotify(desc, val);
if (ret)
goto out_free_linereq;
} else if (flags & GPIO_V2_LINE_FLAG_INPUT) {
- ret = gpiod_direction_input(desc);
+ ret = gpiod_direction_input_nonotify(desc);
if (ret)
goto out_free_linereq;
@@ -2235,10 +2115,10 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
}
if (eflags & GPIOEVENT_REQUEST_RISING_EDGE)
- irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
+ irqflags |= test_bit(GPIOD_FLAG_ACTIVE_LOW, &desc->flags) ?
IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
if (eflags & GPIOEVENT_REQUEST_FALLING_EDGE)
- irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
+ irqflags |= test_bit(GPIOD_FLAG_ACTIVE_LOW, &desc->flags) ?
IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
irqflags |= IRQF_ONESHOT;
@@ -2353,8 +2233,9 @@ static void gpio_v2_line_info_changed_to_v1(
#endif /* CONFIG_GPIO_CDEV_V1 */
static void gpio_desc_to_lineinfo(struct gpio_desc *desc,
- struct gpio_v2_line_info *info)
+ struct gpio_v2_line_info *info, bool atomic)
{
+ u32 debounce_period_us;
unsigned long dflags;
const char *label;
@@ -2372,7 +2253,7 @@ static void gpio_desc_to_lineinfo(struct gpio_desc *desc,
scoped_guard(srcu, &desc->gdev->desc_srcu) {
label = gpiod_get_label(desc);
- if (label && test_bit(FLAG_REQUESTED, &dflags))
+ if (label && test_bit(GPIOD_FLAG_REQUESTED, &dflags))
strscpy(info->consumer, label,
sizeof(info->consumer));
}
@@ -2389,44 +2270,54 @@ static void gpio_desc_to_lineinfo(struct gpio_desc *desc,
* The definitive test that a line is available to userspace is to
* request it.
*/
- if (test_bit(FLAG_REQUESTED, &dflags) ||
- test_bit(FLAG_IS_HOGGED, &dflags) ||
- test_bit(FLAG_USED_AS_IRQ, &dflags) ||
- test_bit(FLAG_EXPORT, &dflags) ||
- test_bit(FLAG_SYSFS, &dflags) ||
- !gpiochip_line_is_valid(guard.gc, info->offset) ||
- !pinctrl_gpio_can_use_line(guard.gc, info->offset))
+ if (test_bit(GPIOD_FLAG_REQUESTED, &dflags) ||
+ test_bit(GPIOD_FLAG_IS_HOGGED, &dflags) ||
+ test_bit(GPIOD_FLAG_EXPORT, &dflags) ||
+ test_bit(GPIOD_FLAG_SYSFS, &dflags) ||
+ !gpiochip_line_is_valid(guard.gc, info->offset)) {
info->flags |= GPIO_V2_LINE_FLAG_USED;
+ } else if (!atomic) {
+ if (!pinctrl_gpio_can_use_line(guard.gc, info->offset))
+ info->flags |= GPIO_V2_LINE_FLAG_USED;
+ }
- if (test_bit(FLAG_IS_OUT, &dflags))
+ if (test_bit(GPIOD_FLAG_IS_OUT, &dflags))
info->flags |= GPIO_V2_LINE_FLAG_OUTPUT;
else
info->flags |= GPIO_V2_LINE_FLAG_INPUT;
- if (test_bit(FLAG_ACTIVE_LOW, &dflags))
+ if (test_bit(GPIOD_FLAG_ACTIVE_LOW, &dflags))
info->flags |= GPIO_V2_LINE_FLAG_ACTIVE_LOW;
- if (test_bit(FLAG_OPEN_DRAIN, &dflags))
+ if (test_bit(GPIOD_FLAG_OPEN_DRAIN, &dflags))
info->flags |= GPIO_V2_LINE_FLAG_OPEN_DRAIN;
- if (test_bit(FLAG_OPEN_SOURCE, &dflags))
+ if (test_bit(GPIOD_FLAG_OPEN_SOURCE, &dflags))
info->flags |= GPIO_V2_LINE_FLAG_OPEN_SOURCE;
- if (test_bit(FLAG_BIAS_DISABLE, &dflags))
+ if (test_bit(GPIOD_FLAG_BIAS_DISABLE, &dflags))
info->flags |= GPIO_V2_LINE_FLAG_BIAS_DISABLED;
- if (test_bit(FLAG_PULL_DOWN, &dflags))
+ if (test_bit(GPIOD_FLAG_PULL_DOWN, &dflags))
info->flags |= GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN;
- if (test_bit(FLAG_PULL_UP, &dflags))
+ if (test_bit(GPIOD_FLAG_PULL_UP, &dflags))
info->flags |= GPIO_V2_LINE_FLAG_BIAS_PULL_UP;
- if (test_bit(FLAG_EDGE_RISING, &dflags))
+ if (test_bit(GPIOD_FLAG_EDGE_RISING, &dflags))
info->flags |= GPIO_V2_LINE_FLAG_EDGE_RISING;
- if (test_bit(FLAG_EDGE_FALLING, &dflags))
+ if (test_bit(GPIOD_FLAG_EDGE_FALLING, &dflags))
info->flags |= GPIO_V2_LINE_FLAG_EDGE_FALLING;
- if (test_bit(FLAG_EVENT_CLOCK_REALTIME, &dflags))
+ if (test_bit(GPIOD_FLAG_EVENT_CLOCK_REALTIME, &dflags))
info->flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME;
- else if (test_bit(FLAG_EVENT_CLOCK_HTE, &dflags))
+ else if (test_bit(GPIOD_FLAG_EVENT_CLOCK_HTE, &dflags))
info->flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE;
+
+ debounce_period_us = READ_ONCE(desc->debounce_period_us);
+ if (debounce_period_us) {
+ info->attrs[info->num_attrs].id = GPIO_V2_LINE_ATTR_ID_DEBOUNCE;
+ info->attrs[info->num_attrs].debounce_period_us =
+ debounce_period_us;
+ info->num_attrs++;
+ }
}
struct gpio_chardev_data {
@@ -2439,6 +2330,7 @@ struct gpio_chardev_data {
#ifdef CONFIG_GPIO_CDEV_V1
atomic_t watch_abi_version;
#endif
+ struct file *fp;
};
static int chipinfo_get(struct gpio_chardev_data *cdev, void __user *ip)
@@ -2494,7 +2386,7 @@ static int lineinfo_get_v1(struct gpio_chardev_data *cdev, void __user *ip,
return -EBUSY;
}
- gpio_desc_to_lineinfo(desc, &lineinfo_v2);
+ gpio_desc_to_lineinfo(desc, &lineinfo_v2, false);
gpio_v2_line_info_to_v1(&lineinfo_v2, &lineinfo);
if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) {
@@ -2516,7 +2408,7 @@ static int lineinfo_get(struct gpio_chardev_data *cdev, void __user *ip,
if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
return -EFAULT;
- if (memchr_inv(lineinfo.padding, 0, sizeof(lineinfo.padding)))
+ if (!mem_is_zero(lineinfo.padding, sizeof(lineinfo.padding)))
return -EINVAL;
desc = gpio_device_get_desc(cdev->gdev, lineinfo.offset);
@@ -2531,8 +2423,7 @@ static int lineinfo_get(struct gpio_chardev_data *cdev, void __user *ip,
if (test_and_set_bit(lineinfo.offset, cdev->watched_lines))
return -EBUSY;
}
- gpio_desc_to_lineinfo(desc, &lineinfo);
- supinfo_to_lineinfo(desc, &lineinfo);
+ gpio_desc_to_lineinfo(desc, &lineinfo, false);
if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) {
if (watch)
@@ -2609,29 +2500,86 @@ static long gpio_ioctl_compat(struct file *file, unsigned int cmd,
}
#endif
+struct lineinfo_changed_ctx {
+ struct work_struct work;
+ struct gpio_v2_line_info_changed chg;
+ struct gpio_device *gdev;
+ struct gpio_chardev_data *cdev;
+};
+
+static void lineinfo_changed_func(struct work_struct *work)
+{
+ struct lineinfo_changed_ctx *ctx =
+ container_of(work, struct lineinfo_changed_ctx, work);
+ struct gpio_chip *gc;
+ int ret;
+
+ if (!(ctx->chg.info.flags & GPIO_V2_LINE_FLAG_USED)) {
+ /*
+ * If nobody set the USED flag earlier, let's see with pinctrl
+ * now. We're doing this late because it's a sleeping function.
+ * Pin functions are in general much more static and while it's
+ * not 100% bullet-proof, it's good enough for most cases.
+ */
+ scoped_guard(srcu, &ctx->gdev->srcu) {
+ gc = srcu_dereference(ctx->gdev->chip, &ctx->gdev->srcu);
+ if (gc &&
+ !pinctrl_gpio_can_use_line(gc, ctx->chg.info.offset))
+ ctx->chg.info.flags |= GPIO_V2_LINE_FLAG_USED;
+ }
+ }
+
+ ret = kfifo_in_spinlocked(&ctx->cdev->events, &ctx->chg, 1,
+ &ctx->cdev->wait.lock);
+ if (ret)
+ wake_up_poll(&ctx->cdev->wait, EPOLLIN);
+ else
+ pr_debug_ratelimited("lineinfo event FIFO is full - event dropped\n");
+
+ gpio_device_put(ctx->gdev);
+ fput(ctx->cdev->fp);
+ kfree(ctx);
+}
+
static int lineinfo_changed_notify(struct notifier_block *nb,
unsigned long action, void *data)
{
struct gpio_chardev_data *cdev =
container_of(nb, struct gpio_chardev_data, lineinfo_changed_nb);
- struct gpio_v2_line_info_changed chg;
+ struct lineinfo_changed_ctx *ctx;
struct gpio_desc *desc = data;
- int ret;
if (!test_bit(gpio_chip_hwgpio(desc), cdev->watched_lines))
return NOTIFY_DONE;
- memset(&chg, 0, sizeof(chg));
- chg.event_type = action;
- chg.timestamp_ns = ktime_get_ns();
- gpio_desc_to_lineinfo(desc, &chg.info);
- supinfo_to_lineinfo(desc, &chg.info);
+ /*
+ * If this is called from atomic context (for instance: with a spinlock
+ * taken by the atomic notifier chain), any sleeping calls must be done
+ * outside of this function in process context of the dedicated
+ * workqueue.
+ *
+ * Let's gather as much info as possible from the descriptor and
+ * postpone just the call to pinctrl_gpio_can_use_line() until the work
+ * is executed.
+ */
- ret = kfifo_in_spinlocked(&cdev->events, &chg, 1, &cdev->wait.lock);
- if (ret)
- wake_up_poll(&cdev->wait, EPOLLIN);
- else
- pr_debug_ratelimited("lineinfo event FIFO is full - event dropped\n");
+ ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
+ if (!ctx) {
+ pr_err("Failed to allocate memory for line info notification\n");
+ return NOTIFY_DONE;
+ }
+
+ ctx->chg.event_type = action;
+ ctx->chg.timestamp_ns = ktime_get_ns();
+ gpio_desc_to_lineinfo(desc, &ctx->chg.info, true);
+ /* Keep the GPIO device alive until we emit the event. */
+ ctx->gdev = gpio_device_get(desc->gdev);
+ ctx->cdev = cdev;
+ /* Keep the file descriptor alive too. */
+ get_file(ctx->cdev->fp);
+
+ INIT_WORK(&ctx->work, lineinfo_changed_func);
+ queue_work(ctx->gdev->line_state_wq, &ctx->work);
return NOTIFY_OK;
}
@@ -2778,8 +2726,9 @@ static int gpio_chrdev_open(struct inode *inode, struct file *file)
cdev->gdev = gpio_device_get(gdev);
cdev->lineinfo_changed_nb.notifier_call = lineinfo_changed_notify;
- ret = blocking_notifier_chain_register(&gdev->line_state_notifier,
- &cdev->lineinfo_changed_nb);
+ scoped_guard(write_lock_irqsave, &gdev->line_state_lock)
+ ret = raw_notifier_chain_register(&gdev->line_state_notifier,
+ &cdev->lineinfo_changed_nb);
if (ret)
goto out_free_bitmap;
@@ -2791,6 +2740,7 @@ static int gpio_chrdev_open(struct inode *inode, struct file *file)
goto out_unregister_line_notifier;
file->private_data = cdev;
+ cdev->fp = file;
ret = nonseekable_open(inode, file);
if (ret)
@@ -2802,8 +2752,9 @@ out_unregister_device_notifier:
blocking_notifier_chain_unregister(&gdev->device_notifier,
&cdev->device_unregistered_nb);
out_unregister_line_notifier:
- blocking_notifier_chain_unregister(&gdev->line_state_notifier,
- &cdev->lineinfo_changed_nb);
+ scoped_guard(write_lock_irqsave, &gdev->line_state_lock)
+ raw_notifier_chain_unregister(&gdev->line_state_notifier,
+ &cdev->lineinfo_changed_nb);
out_free_bitmap:
gpio_device_put(gdev);
bitmap_free(cdev->watched_lines);
@@ -2827,8 +2778,9 @@ static int gpio_chrdev_release(struct inode *inode, struct file *file)
blocking_notifier_chain_unregister(&gdev->device_notifier,
&cdev->device_unregistered_nb);
- blocking_notifier_chain_unregister(&gdev->line_state_notifier,
- &cdev->lineinfo_changed_nb);
+ scoped_guard(write_lock_irqsave, &gdev->line_state_lock)
+ raw_notifier_chain_unregister(&gdev->line_state_notifier,
+ &cdev->lineinfo_changed_nb);
bitmap_free(cdev->watched_lines);
gpio_device_put(gdev);
kfree(cdev);
@@ -2857,6 +2809,11 @@ int gpiolib_cdev_register(struct gpio_device *gdev, dev_t devt)
gdev->chrdev.owner = THIS_MODULE;
gdev->dev.devt = MKDEV(MAJOR(devt), gdev->id);
+ gdev->line_state_wq = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
+ dev_name(&gdev->dev));
+ if (!gdev->line_state_wq)
+ return -ENOMEM;
+
ret = cdev_device_add(&gdev->chrdev, &gdev->dev);
if (ret)
return ret;
@@ -2873,6 +2830,7 @@ int gpiolib_cdev_register(struct gpio_device *gdev, dev_t devt)
void gpiolib_cdev_unregister(struct gpio_device *gdev)
{
+ destroy_workqueue(gdev->line_state_wq);
cdev_device_del(&gdev->chrdev, &gdev->dev);
blocking_notifier_call_chain(&gdev->device_notifier, 0, NULL);
}
diff --git a/drivers/gpio/gpiolib-devres.c b/drivers/gpio/gpiolib-devres.c
index 08205f355ceb..72422c5db364 100644
--- a/drivers/gpio/gpiolib-devres.c
+++ b/drivers/gpio/gpiolib-devres.c
@@ -6,7 +6,7 @@
* Copyright (c) 2011 John Crispin <john@phrozen.org>
*/
-#include <linux/device.h>
+#include <linux/device/devres.h>
#include <linux/err.h>
#include <linux/export.h>
#include <linux/gfp.h>
@@ -19,32 +19,14 @@
struct fwnode_handle;
struct lock_class_key;
-static void devm_gpiod_release(struct device *dev, void *res)
+static void devm_gpiod_release(void *desc)
{
- struct gpio_desc **desc = res;
-
- gpiod_put(*desc);
-}
-
-static int devm_gpiod_match(struct device *dev, void *res, void *data)
-{
- struct gpio_desc **this = res, **gpio = data;
-
- return *this == *gpio;
-}
-
-static void devm_gpiod_release_array(struct device *dev, void *res)
-{
- struct gpio_descs **descs = res;
-
- gpiod_put_array(*descs);
+ gpiod_put(desc);
}
-static int devm_gpiod_match_array(struct device *dev, void *res, void *data)
+static void devm_gpiod_release_array(void *descs)
{
- struct gpio_descs **this = res, **gpios = data;
-
- return *this == *gpios;
+ gpiod_put_array(descs);
}
/**
@@ -114,8 +96,8 @@ struct gpio_desc *__must_check devm_gpiod_get_index(struct device *dev,
unsigned int idx,
enum gpiod_flags flags)
{
- struct gpio_desc **dr;
struct gpio_desc *desc;
+ int ret;
desc = gpiod_get_index(dev, con_id, idx, flags);
if (IS_ERR(desc))
@@ -126,23 +108,16 @@ struct gpio_desc *__must_check devm_gpiod_get_index(struct device *dev,
* already under resource management by this device.
*/
if (flags & GPIOD_FLAGS_BIT_NONEXCLUSIVE) {
- struct devres *dres;
+ bool dres;
- dres = devres_find(dev, devm_gpiod_release,
- devm_gpiod_match, &desc);
+ dres = devm_is_action_added(dev, devm_gpiod_release, desc);
if (dres)
return desc;
}
- dr = devres_alloc(devm_gpiod_release, sizeof(struct gpio_desc *),
- GFP_KERNEL);
- if (!dr) {
- gpiod_put(desc);
- return ERR_PTR(-ENOMEM);
- }
-
- *dr = desc;
- devres_add(dev, dr);
+ ret = devm_add_action_or_reset(dev, devm_gpiod_release, desc);
+ if (ret)
+ return ERR_PTR(ret);
return desc;
}
@@ -171,22 +146,16 @@ struct gpio_desc *devm_fwnode_gpiod_get_index(struct device *dev,
enum gpiod_flags flags,
const char *label)
{
- struct gpio_desc **dr;
struct gpio_desc *desc;
-
- dr = devres_alloc(devm_gpiod_release, sizeof(struct gpio_desc *),
- GFP_KERNEL);
- if (!dr)
- return ERR_PTR(-ENOMEM);
+ int ret;
desc = gpiod_find_and_request(dev, fwnode, con_id, index, flags, label, false);
- if (IS_ERR(desc)) {
- devres_free(dr);
+ if (IS_ERR(desc))
return desc;
- }
- *dr = desc;
- devres_add(dev, dr);
+ ret = devm_add_action_or_reset(dev, devm_gpiod_release, desc);
+ if (ret)
+ return ERR_PTR(ret);
return desc;
}
@@ -244,22 +213,16 @@ struct gpio_descs *__must_check devm_gpiod_get_array(struct device *dev,
const char *con_id,
enum gpiod_flags flags)
{
- struct gpio_descs **dr;
struct gpio_descs *descs;
-
- dr = devres_alloc(devm_gpiod_release_array,
- sizeof(struct gpio_descs *), GFP_KERNEL);
- if (!dr)
- return ERR_PTR(-ENOMEM);
+ int ret;
descs = gpiod_get_array(dev, con_id, flags);
- if (IS_ERR(descs)) {
- devres_free(dr);
+ if (IS_ERR(descs))
return descs;
- }
- *dr = descs;
- devres_add(dev, dr);
+ ret = devm_add_action_or_reset(dev, devm_gpiod_release_array, descs);
+ if (ret)
+ return ERR_PTR(ret);
return descs;
}
@@ -307,8 +270,7 @@ EXPORT_SYMBOL_GPL(devm_gpiod_get_array_optional);
*/
void devm_gpiod_put(struct device *dev, struct gpio_desc *desc)
{
- WARN_ON(devres_release(dev, devm_gpiod_release, devm_gpiod_match,
- &desc));
+ devm_release_action(dev, devm_gpiod_release, desc);
}
EXPORT_SYMBOL_GPL(devm_gpiod_put);
@@ -317,24 +279,28 @@ EXPORT_SYMBOL_GPL(devm_gpiod_put);
* @dev: GPIO consumer
* @desc: GPIO descriptor to remove resource management from
*
+ * *DEPRECATED*
+ * This function should not be used. It's been provided as a workaround for
+ * resource ownership issues in the regulator framework and should be replaced
+ * with a better solution.
+ *
* Remove resource management from a GPIO descriptor. This is needed when
* you want to hand over lifecycle management of a descriptor to another
* mechanism.
*/
-
void devm_gpiod_unhinge(struct device *dev, struct gpio_desc *desc)
{
int ret;
if (IS_ERR_OR_NULL(desc))
return;
- ret = devres_destroy(dev, devm_gpiod_release,
- devm_gpiod_match, &desc);
+
/*
* If the GPIO descriptor is requested as nonexclusive, we
* may call this function several times on the same descriptor
* so it is OK if devres_destroy() returns -ENOENT.
*/
+ ret = devm_remove_action_nowarn(dev, devm_gpiod_release, desc);
if (ret == -ENOENT)
return;
/* Anything else we should warn about */
@@ -353,8 +319,7 @@ EXPORT_SYMBOL_GPL(devm_gpiod_unhinge);
*/
void devm_gpiod_put_array(struct device *dev, struct gpio_descs *descs)
{
- WARN_ON(devres_release(dev, devm_gpiod_release_array,
- devm_gpiod_match_array, &descs));
+ devm_release_action(dev, devm_gpiod_release_array, descs);
}
EXPORT_SYMBOL_GPL(devm_gpiod_put_array);
diff --git a/drivers/gpio/gpiolib-legacy.c b/drivers/gpio/gpiolib-legacy.c
index 28f1046fb670..3bc93ccadb5b 100644
--- a/drivers/gpio/gpiolib-legacy.c
+++ b/drivers/gpio/gpiolib-legacy.c
@@ -46,9 +46,6 @@ int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
if (err)
return err;
- if (flags & GPIOF_ACTIVE_LOW)
- set_bit(FLAG_ACTIVE_LOW, &desc->flags);
-
if (flags & GPIOF_IN)
err = gpiod_direction_input(desc);
else
@@ -89,44 +86,6 @@ static void devm_gpio_release(struct device *dev, void *res)
}
/**
- * devm_gpio_request - request a GPIO for a managed device
- * @dev: device to request the GPIO for
- * @gpio: GPIO to allocate
- * @label: the name of the requested GPIO
- *
- * Except for the extra @dev argument, this function takes the
- * same arguments and performs the same function as gpio_request().
- * GPIOs requested with this function will be automatically freed
- * on driver detach.
- *
- * **DEPRECATED** This function is deprecated and must not be used in new code.
- *
- * Returns:
- * 0 on success, or negative errno on failure.
- */
-int devm_gpio_request(struct device *dev, unsigned gpio, const char *label)
-{
- unsigned *dr;
- int rc;
-
- dr = devres_alloc(devm_gpio_release, sizeof(unsigned), GFP_KERNEL);
- if (!dr)
- return -ENOMEM;
-
- rc = gpio_request(gpio, label);
- if (rc) {
- devres_free(dr);
- return rc;
- }
-
- *dr = gpio;
- devres_add(dev, dr);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(devm_gpio_request);
-
-/**
* devm_gpio_request_one - request a single GPIO with initial setup
* @dev: device to request for
* @gpio: the GPIO number
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 880f1efcaca5..fad4edf9cc5c 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -193,6 +193,8 @@ static void of_gpio_try_fixup_polarity(const struct device_node *np,
*/
{ "himax,hx8357", "gpios-reset", false },
{ "himax,hx8369", "gpios-reset", false },
+#endif
+#if IS_ENABLED(CONFIG_MTD_NAND_JZ4780)
/*
* The rb-gpios semantics was undocumented and qi,lb60 (along with
* the ingenic driver) got it wrong. The active state encodes the
@@ -203,6 +205,15 @@ static void of_gpio_try_fixup_polarity(const struct device_node *np,
*/
{ "qi,lb60", "rb-gpios", true },
#endif
+#if IS_ENABLED(CONFIG_IEEE802154_CA8210)
+ /*
+ * According to the datasheet, the NRST pin 27 is an active-low
+ * signal. However, the device tree schema and admittedly
+ * the out-of-tree implementations have been used for a long
+ * time incorrectly by describing reset GPIO as active-high.
+ */
+ { "cascoda,ca8210", "reset-gpio", false },
+#endif
#if IS_ENABLED(CONFIG_PCI_LANTIQ)
/*
* According to the PCI specification, the RST# pin is an
@@ -213,6 +224,15 @@ static void of_gpio_try_fixup_polarity(const struct device_node *np,
*/
{ "lantiq,pci-xway", "gpio-reset", false },
#endif
+#if IS_ENABLED(CONFIG_REGULATOR_S5M8767)
+ /*
+ * According to S5M8767, the DVS and DS pin are
+ * active-high signals. However, exynos5250-spring.dts use
+ * active-low setting.
+ */
+ { "samsung,s5m8767-pmic", "s5m8767,pmic-buck-dvs-gpios", true },
+ { "samsung,s5m8767-pmic", "s5m8767,pmic-buck-ds-gpios", true },
+#endif
#if IS_ENABLED(CONFIG_TOUCHSCREEN_TSC2005)
/*
* DTS for Nokia N900 incorrectly specified "active high"
@@ -257,6 +277,9 @@ static void of_gpio_set_polarity_by_property(const struct device_node *np,
{ "fsl,imx8qm-fec", "phy-reset-gpios", "phy-reset-active-high" },
{ "fsl,s32v234-fec", "phy-reset-gpios", "phy-reset-active-high" },
#endif
+#if IS_ENABLED(CONFIG_MMC_ATMELMCI)
+ { "atmel,hsmci", "cd-gpios", "cd-inverted" },
+#endif
#if IS_ENABLED(CONFIG_PCI_IMX6)
{ "fsl,imx6q-pcie", "reset-gpio", "reset-gpio-active-high" },
{ "fsl,imx6sx-pcie", "reset-gpio", "reset-gpio-active-high" },
@@ -283,9 +306,6 @@ static void of_gpio_set_polarity_by_property(const struct device_node *np,
{ "regulator-gpio", "enable-gpio", "enable-active-high" },
{ "regulator-gpio", "enable-gpios", "enable-active-high" },
#endif
-#if IS_ENABLED(CONFIG_MMC_ATMELMCI)
- { "atmel,hsmci", "cd-gpios", "cd-inverted" },
-#endif
};
unsigned int i;
bool active_high;
@@ -337,7 +357,7 @@ static void of_gpio_flags_quirks(const struct device_node *np,
* to determine if the flags should have inverted semantics.
*/
if (IS_ENABLED(CONFIG_SPI_MASTER) && !strcmp(propname, "cs-gpios") &&
- of_property_read_bool(np, "cs-gpios")) {
+ of_property_present(np, "cs-gpios")) {
u32 cs;
int ret;
@@ -688,7 +708,7 @@ struct gpio_desc *of_find_gpio(struct device_node *np, const char *con_id,
unsigned int idx, unsigned long *flags)
{
char propname[32]; /* 32 is max size of property name */
- enum of_gpio_flags of_flags;
+ enum of_gpio_flags of_flags = 0;
const of_find_gpio_quirk *q;
struct gpio_desc *desc;
@@ -858,7 +878,7 @@ static void of_gpiochip_remove_hog(struct gpio_chip *chip,
{
struct gpio_desc *desc;
- for_each_gpio_desc_with_flag(chip, desc, FLAG_IS_HOGGED)
+ for_each_gpio_desc_with_flag(chip, desc, GPIOD_FLAG_IS_HOGGED)
if (READ_ONCE(desc->hog) == hog)
gpiochip_free_own_desc(desc);
}
@@ -929,7 +949,7 @@ struct notifier_block gpio_of_notifier = {
#endif /* CONFIG_OF_DYNAMIC */
/**
- * of_gpio_simple_xlate - translate gpiospec to the GPIO number and flags
+ * of_gpio_twocell_xlate - translate twocell gpiospec to the GPIO number and flags
* @gc: pointer to the gpio_chip structure
* @gpiospec: GPIO specifier as found in the device tree
* @flags: a flags pointer to fill in
@@ -941,9 +961,9 @@ struct notifier_block gpio_of_notifier = {
* Returns:
* GPIO number (>= 0) on success, negative errno on failure.
*/
-static int of_gpio_simple_xlate(struct gpio_chip *gc,
- const struct of_phandle_args *gpiospec,
- u32 *flags)
+static int of_gpio_twocell_xlate(struct gpio_chip *gc,
+ const struct of_phandle_args *gpiospec,
+ u32 *flags)
{
/*
* We're discouraging gpio_cells < 2, since that way you'll have to
@@ -951,7 +971,7 @@ static int of_gpio_simple_xlate(struct gpio_chip *gc,
* number and the flags from a single gpio cell -- this is possible,
* but not recommended).
*/
- if (gc->of_gpio_n_cells < 2) {
+ if (gc->of_gpio_n_cells != 2) {
WARN_ON(1);
return -EINVAL;
}
@@ -968,6 +988,49 @@ static int of_gpio_simple_xlate(struct gpio_chip *gc,
return gpiospec->args[0];
}
+/**
+ * of_gpio_threecell_xlate - translate threecell gpiospec to the GPIO number and flags
+ * @gc: pointer to the gpio_chip structure
+ * @gpiospec: GPIO specifier as found in the device tree
+ * @flags: a flags pointer to fill in
+ *
+ * This is simple translation function, suitable for the most 1:n mapped
+ * GPIO chips, i.e. several GPIO chip instances from one device tree node.
+ * In this case the following binding is implied:
+ *
+ * foo-gpios = <&gpio instance offset flags>;
+ *
+ * Returns:
+ * GPIO number (>= 0) on success, negative errno on failure.
+ */
+static int of_gpio_threecell_xlate(struct gpio_chip *gc,
+ const struct of_phandle_args *gpiospec,
+ u32 *flags)
+{
+ if (gc->of_gpio_n_cells != 3) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ if (WARN_ON(gpiospec->args_count != 3))
+ return -EINVAL;
+
+ /*
+ * Check chip instance number, the driver responds with true if
+ * this is the chip we are looking for.
+ */
+ if (!gc->of_node_instance_match(gc, gpiospec->args[0]))
+ return -EINVAL;
+
+ if (gpiospec->args[1] >= gc->ngpio)
+ return -EINVAL;
+
+ if (flags)
+ *flags = gpiospec->args[2];
+
+ return gpiospec->args[1];
+}
+
#if IS_ENABLED(CONFIG_OF_GPIO_MM_GPIOCHIP)
#include <linux/gpio/legacy-of-mm-gpiochip.h>
/**
@@ -1057,6 +1120,9 @@ static int of_gpiochip_add_pin_range(struct gpio_chip *chip)
const char *name;
static const char group_names_propname[] = "gpio-ranges-group-names";
bool has_group_names;
+ int offset; /* Offset of the first GPIO line on the chip */
+ int pin; /* Pin base number in the range */
+ int count; /* Number of pins/GPIO lines to map */
np = dev_of_node(&chip->gpiodev->dev);
if (!np)
@@ -1065,7 +1131,15 @@ static int of_gpiochip_add_pin_range(struct gpio_chip *chip)
has_group_names = of_property_present(np, group_names_propname);
for (;; index++) {
- ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3,
+ /*
+ * Ordinary phandles contain 2-3 cells:
+ * gpios = <&gpio [instance] offset flags>;
+ * Ranges always contain one more cell:
+ * gpio-ranges <&pinctrl [gpio_instance] gpio_offet pin_offet count>;
+ * This is why we parse chip->of_gpio_n_cells + 1 cells
+ */
+ ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges",
+ chip->of_gpio_n_cells + 1,
index, &pinspec);
if (ret)
break;
@@ -1075,13 +1149,33 @@ static int of_gpiochip_add_pin_range(struct gpio_chip *chip)
if (!pctldev)
return -EPROBE_DEFER;
+ if (chip->of_gpio_n_cells == 3) {
+ /* First cell is the gpiochip instance number */
+ offset = pinspec.args[1];
+ pin = pinspec.args[2];
+ count = pinspec.args[3];
+ } else {
+ offset = pinspec.args[0];
+ pin = pinspec.args[1];
+ count = pinspec.args[2];
+ }
+
+ /*
+ * With multiple GPIO chips per node, check that this chip is the
+ * right instance.
+ */
+ if (chip->of_node_instance_match &&
+ (chip->of_gpio_n_cells == 3) &&
+ !chip->of_node_instance_match(chip, pinspec.args[0]))
+ continue;
+
/* Ignore ranges outside of this GPIO chip */
- if (pinspec.args[0] >= (chip->offset + chip->ngpio))
+ if (offset >= (chip->offset + chip->ngpio))
continue;
- if (pinspec.args[0] + pinspec.args[2] <= chip->offset)
+ if (offset + count <= chip->offset)
continue;
- if (pinspec.args[2]) {
+ if (count) {
/* npins != 0: linear range */
if (has_group_names) {
of_property_read_string_index(np,
@@ -1095,27 +1189,27 @@ static int of_gpiochip_add_pin_range(struct gpio_chip *chip)
}
/* Trim the range to fit this GPIO chip */
- if (chip->offset > pinspec.args[0]) {
- trim = chip->offset - pinspec.args[0];
- pinspec.args[2] -= trim;
- pinspec.args[1] += trim;
- pinspec.args[0] = 0;
+ if (chip->offset > offset) {
+ trim = chip->offset - offset;
+ count -= trim;
+ pin += trim;
+ offset = 0;
} else {
- pinspec.args[0] -= chip->offset;
+ offset -= chip->offset;
}
- if ((pinspec.args[0] + pinspec.args[2]) > chip->ngpio)
- pinspec.args[2] = chip->ngpio - pinspec.args[0];
+ if ((offset + count) > chip->ngpio)
+ count = chip->ngpio - offset;
ret = gpiochip_add_pin_range(chip,
pinctrl_dev_get_devname(pctldev),
- pinspec.args[0],
- pinspec.args[1],
- pinspec.args[2]);
+ offset,
+ pin,
+ count);
if (ret)
return ret;
} else {
/* npins == 0: special range */
- if (pinspec.args[1]) {
+ if (pin) {
pr_err("%pOF: Illegal gpio-range format.\n",
np);
break;
@@ -1140,7 +1234,7 @@ static int of_gpiochip_add_pin_range(struct gpio_chip *chip)
}
ret = gpiochip_add_pingroup_range(chip, pctldev,
- pinspec.args[0], name);
+ offset, name);
if (ret)
return ret;
}
@@ -1163,8 +1257,14 @@ int of_gpiochip_add(struct gpio_chip *chip)
return 0;
if (!chip->of_xlate) {
- chip->of_gpio_n_cells = 2;
- chip->of_xlate = of_gpio_simple_xlate;
+ if (chip->of_gpio_n_cells == 3) {
+ if (!chip->of_node_instance_match)
+ return -EINVAL;
+ chip->of_xlate = of_gpio_threecell_xlate;
+ } else {
+ chip->of_gpio_n_cells = 2;
+ chip->of_xlate = of_gpio_twocell_xlate;
+ }
}
if (chip->of_gpio_n_cells > MAX_PHANDLE_ARGS)
@@ -1187,3 +1287,11 @@ void of_gpiochip_remove(struct gpio_chip *chip)
{
of_node_put(dev_of_node(&chip->gpiodev->dev));
}
+
+bool of_gpiochip_instance_match(struct gpio_chip *gc, unsigned int index)
+{
+ if (gc->of_node_instance_match)
+ return gc->of_node_instance_match(gc, index);
+
+ return false;
+}
diff --git a/drivers/gpio/gpiolib-of.h b/drivers/gpio/gpiolib-of.h
index 16d6ac8cb156..2257f7a498a1 100644
--- a/drivers/gpio/gpiolib-of.h
+++ b/drivers/gpio/gpiolib-of.h
@@ -8,7 +8,7 @@
#include <linux/notifier.h>
-struct device;
+struct device_node;
struct fwnode_handle;
struct gpio_chip;
@@ -22,6 +22,7 @@ struct gpio_desc *of_find_gpio(struct device_node *np,
unsigned long *lookupflags);
int of_gpiochip_add(struct gpio_chip *gc);
void of_gpiochip_remove(struct gpio_chip *gc);
+bool of_gpiochip_instance_match(struct gpio_chip *gc, unsigned int index);
int of_gpio_count(const struct fwnode_handle *fwnode, const char *con_id);
#else
static inline struct gpio_desc *of_find_gpio(struct device_node *np,
@@ -33,6 +34,11 @@ static inline struct gpio_desc *of_find_gpio(struct device_node *np,
}
static inline int of_gpiochip_add(struct gpio_chip *gc) { return 0; }
static inline void of_gpiochip_remove(struct gpio_chip *gc) { }
+static inline bool of_gpiochip_instance_match(struct gpio_chip *gc,
+ unsigned int index)
+{
+ return false;
+}
static inline int of_gpio_count(const struct fwnode_handle *fwnode,
const char *con_id)
{
diff --git a/drivers/gpio/gpiolib-swnode.c b/drivers/gpio/gpiolib-swnode.c
index 2b2dd7e92211..f21dbc28cf2c 100644
--- a/drivers/gpio/gpiolib-swnode.c
+++ b/drivers/gpio/gpiolib-swnode.c
@@ -64,7 +64,7 @@ struct gpio_desc *swnode_find_gpio(struct fwnode_handle *fwnode,
struct fwnode_reference_args args;
struct gpio_desc *desc;
char propname[32]; /* 32 is max size of property name */
- int ret;
+ int ret = 0;
swnode = to_software_node(fwnode);
if (!swnode)
@@ -141,7 +141,7 @@ int swnode_gpio_count(const struct fwnode_handle *fwnode, const char *con_id)
const struct software_node swnode_gpio_undefined = {
.name = GPIOLIB_SWNODE_UNDEFINED_NAME,
};
-EXPORT_SYMBOL_NS_GPL(swnode_gpio_undefined, GPIO_SWNODE);
+EXPORT_SYMBOL_NS_GPL(swnode_gpio_undefined, "GPIO_SWNODE");
static int __init swnode_gpio_init(void)
{
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
index 17ed229412af..9a849245b358 100644
--- a/drivers/gpio/gpiolib-sysfs.c
+++ b/drivers/gpio/gpiolib-sysfs.c
@@ -3,7 +3,6 @@
#include <linux/bitops.h>
#include <linux/cleanup.h>
#include <linux/device.h>
-#include <linux/idr.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kdev_t.h>
@@ -12,7 +11,6 @@
#include <linux/mutex.h>
#include <linux/printk.h>
#include <linux/slab.h>
-#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/srcu.h>
#include <linux/sysfs.h>
@@ -21,9 +19,13 @@
#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
+#include <uapi/linux/gpio.h>
+
#include "gpiolib.h"
#include "gpiolib-sysfs.h"
+#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY)
+
struct kernfs_node;
#define GPIO_IRQF_TRIGGER_NONE 0
@@ -32,15 +34,64 @@ struct kernfs_node;
#define GPIO_IRQF_TRIGGER_BOTH (GPIO_IRQF_TRIGGER_FALLING | \
GPIO_IRQF_TRIGGER_RISING)
+enum {
+ GPIO_SYSFS_LINE_CLASS_ATTR_DIRECTION = 0,
+ GPIO_SYSFS_LINE_CLASS_ATTR_VALUE,
+ GPIO_SYSFS_LINE_CLASS_ATTR_EDGE,
+ GPIO_SYSFS_LINE_CLASS_ATTR_ACTIVE_LOW,
+ GPIO_SYSFS_LINE_CLASS_ATTR_SENTINEL,
+ GPIO_SYSFS_LINE_CLASS_ATTR_SIZE,
+};
+
+#endif /* CONFIG_GPIO_SYSFS_LEGACY */
+
+enum {
+ GPIO_SYSFS_LINE_CHIP_ATTR_DIRECTION = 0,
+ GPIO_SYSFS_LINE_CHIP_ATTR_VALUE,
+ GPIO_SYSFS_LINE_CHIP_ATTR_SENTINEL,
+ GPIO_SYSFS_LINE_CHIP_ATTR_SIZE,
+};
+
struct gpiod_data {
+ struct list_head list;
+
struct gpio_desc *desc;
+ struct device *dev;
struct mutex mutex;
+#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY)
struct kernfs_node *value_kn;
int irq;
unsigned char irq_flags;
+#endif /* CONFIG_GPIO_SYSFS_LEGACY */
bool direction_can_change;
+
+ struct kobject *parent;
+ struct device_attribute dir_attr;
+ struct device_attribute val_attr;
+
+#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY)
+ struct device_attribute edge_attr;
+ struct device_attribute active_low_attr;
+
+ struct attribute *class_attrs[GPIO_SYSFS_LINE_CLASS_ATTR_SIZE];
+ struct attribute_group class_attr_group;
+ const struct attribute_group *class_attr_groups[2];
+#endif /* CONFIG_GPIO_SYSFS_LEGACY */
+
+ struct attribute *chip_attrs[GPIO_SYSFS_LINE_CHIP_ATTR_SIZE];
+ struct attribute_group chip_attr_group;
+ const struct attribute_group *chip_attr_groups[2];
+};
+
+struct gpiodev_data {
+ struct list_head exported_lines;
+ struct gpio_device *gdev;
+ struct device *cdev_id; /* Class device by GPIO device ID */
+#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY)
+ struct device *cdev_base; /* Class device by GPIO base */
+#endif /* CONFIG_GPIO_SYSFS_LEGACY */
};
/*
@@ -71,30 +122,31 @@ static DEFINE_MUTEX(sysfs_lock);
*/
static ssize_t direction_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
- struct gpiod_data *data = dev_get_drvdata(dev);
+ struct gpiod_data *data = container_of(attr, struct gpiod_data,
+ dir_attr);
struct gpio_desc *desc = data->desc;
int value;
- mutex_lock(&data->mutex);
-
- gpiod_get_direction(desc);
- value = !!test_bit(FLAG_IS_OUT, &desc->flags);
-
- mutex_unlock(&data->mutex);
+ scoped_guard(mutex, &data->mutex) {
+ gpiod_get_direction(desc);
+ value = !!test_bit(GPIOD_FLAG_IS_OUT, &desc->flags);
+ }
return sysfs_emit(buf, "%s\n", value ? "out" : "in");
}
static ssize_t direction_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t size)
+ struct device_attribute *attr, const char *buf,
+ size_t size)
{
- struct gpiod_data *data = dev_get_drvdata(dev);
+ struct gpiod_data *data = container_of(attr, struct gpiod_data,
+ dir_attr);
struct gpio_desc *desc = data->desc;
- ssize_t status;
+ ssize_t status;
- mutex_lock(&data->mutex);
+ guard(mutex)(&data->mutex);
if (sysfs_streq(buf, "high"))
status = gpiod_direction_output_raw(desc, 1);
@@ -105,24 +157,19 @@ static ssize_t direction_store(struct device *dev,
else
status = -EINVAL;
- mutex_unlock(&data->mutex);
-
return status ? : size;
}
-static DEVICE_ATTR_RW(direction);
-static ssize_t value_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t value_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
- struct gpiod_data *data = dev_get_drvdata(dev);
+ struct gpiod_data *data = container_of(attr, struct gpiod_data,
+ val_attr);
struct gpio_desc *desc = data->desc;
- ssize_t status;
-
- mutex_lock(&data->mutex);
-
- status = gpiod_get_value_cansleep(desc);
+ ssize_t status;
- mutex_unlock(&data->mutex);
+ scoped_guard(mutex, &data->mutex)
+ status = gpiod_get_value_cansleep(desc);
if (status < 0)
return status;
@@ -130,31 +177,29 @@ static ssize_t value_show(struct device *dev,
return sysfs_emit(buf, "%zd\n", status);
}
-static ssize_t value_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t size)
+static ssize_t value_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t size)
{
- struct gpiod_data *data = dev_get_drvdata(dev);
+ struct gpiod_data *data = container_of(attr, struct gpiod_data,
+ val_attr);
struct gpio_desc *desc = data->desc;
ssize_t status;
long value;
status = kstrtol(buf, 0, &value);
+ if (status)
+ return status;
- mutex_lock(&data->mutex);
-
- if (!test_bit(FLAG_IS_OUT, &desc->flags)) {
- status = -EPERM;
- } else if (status == 0) {
- gpiod_set_value_cansleep(desc, value);
- status = size;
- }
+ guard(mutex)(&data->mutex);
- mutex_unlock(&data->mutex);
+ status = gpiod_set_value_cansleep(desc, value);
+ if (status)
+ return status;
- return status;
+ return size;
}
-static DEVICE_ATTR_PREALLOC(value, S_IWUSR | S_IRUGO, value_show, value_store);
+#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY)
static irqreturn_t gpio_sysfs_irq(int irq, void *priv)
{
struct gpiod_data *data = priv;
@@ -165,9 +210,8 @@ static irqreturn_t gpio_sysfs_irq(int irq, void *priv)
}
/* Caller holds gpiod-data mutex. */
-static int gpio_sysfs_request_irq(struct device *dev, unsigned char flags)
+static int gpio_sysfs_request_irq(struct gpiod_data *data, unsigned char flags)
{
- struct gpiod_data *data = dev_get_drvdata(dev);
struct gpio_desc *desc = data->desc;
unsigned long irq_flags;
int ret;
@@ -180,29 +224,29 @@ static int gpio_sysfs_request_irq(struct device *dev, unsigned char flags)
if (data->irq < 0)
return -EIO;
- data->value_kn = sysfs_get_dirent(dev->kobj.sd, "value");
- if (!data->value_kn)
- return -ENODEV;
-
irq_flags = IRQF_SHARED;
- if (flags & GPIO_IRQF_TRIGGER_FALLING)
- irq_flags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
- IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
- if (flags & GPIO_IRQF_TRIGGER_RISING)
- irq_flags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
- IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
+ if (flags & GPIO_IRQF_TRIGGER_FALLING) {
+ irq_flags |= test_bit(GPIOD_FLAG_ACTIVE_LOW, &desc->flags) ?
+ IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
+ set_bit(GPIOD_FLAG_EDGE_FALLING, &desc->flags);
+ }
+ if (flags & GPIO_IRQF_TRIGGER_RISING) {
+ irq_flags |= test_bit(GPIOD_FLAG_ACTIVE_LOW, &desc->flags) ?
+ IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
+ set_bit(GPIOD_FLAG_EDGE_RISING, &desc->flags);
+ }
/*
* FIXME: This should be done in the irq_request_resources callback
- * when the irq is requested, but a few drivers currently fail
- * to do so.
+ * when the irq is requested, but a few drivers currently fail to do
+ * so.
*
- * Remove this redundant call (along with the corresponding
- * unlock) when those drivers have been fixed.
+ * Remove this redundant call (along with the corresponding unlock)
+ * when those drivers have been fixed.
*/
ret = gpiochip_lock_as_irq(guard.gc, gpio_chip_hwgpio(desc));
if (ret < 0)
- goto err_put_kn;
+ goto err_clr_bits;
ret = request_any_context_irq(data->irq, gpio_sysfs_irq, irq_flags,
"gpiolib", data);
@@ -215,8 +259,9 @@ static int gpio_sysfs_request_irq(struct device *dev, unsigned char flags)
err_unlock:
gpiochip_unlock_as_irq(guard.gc, gpio_chip_hwgpio(desc));
-err_put_kn:
- sysfs_put(data->value_kn);
+err_clr_bits:
+ clear_bit(GPIOD_FLAG_EDGE_RISING, &desc->flags);
+ clear_bit(GPIOD_FLAG_EDGE_FALLING, &desc->flags);
return ret;
}
@@ -225,9 +270,8 @@ err_put_kn:
* Caller holds gpiod-data mutex (unless called after class-device
* deregistration).
*/
-static void gpio_sysfs_free_irq(struct device *dev)
+static void gpio_sysfs_free_irq(struct gpiod_data *data)
{
- struct gpiod_data *data = dev_get_drvdata(dev);
struct gpio_desc *desc = data->desc;
CLASS(gpio_chip_guard, guard)(desc);
@@ -237,27 +281,26 @@ static void gpio_sysfs_free_irq(struct device *dev)
data->irq_flags = 0;
free_irq(data->irq, data);
gpiochip_unlock_as_irq(guard.gc, gpio_chip_hwgpio(desc));
- sysfs_put(data->value_kn);
+ clear_bit(GPIOD_FLAG_EDGE_RISING, &desc->flags);
+ clear_bit(GPIOD_FLAG_EDGE_FALLING, &desc->flags);
}
-static const char * const trigger_names[] = {
+static const char *const trigger_names[] = {
[GPIO_IRQF_TRIGGER_NONE] = "none",
[GPIO_IRQF_TRIGGER_FALLING] = "falling",
[GPIO_IRQF_TRIGGER_RISING] = "rising",
[GPIO_IRQF_TRIGGER_BOTH] = "both",
};
-static ssize_t edge_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t edge_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
- struct gpiod_data *data = dev_get_drvdata(dev);
+ struct gpiod_data *data = container_of(attr, struct gpiod_data,
+ edge_attr);
int flags;
- mutex_lock(&data->mutex);
-
- flags = data->irq_flags;
-
- mutex_unlock(&data->mutex);
+ scoped_guard(mutex, &data->mutex)
+ flags = data->irq_flags;
if (flags >= ARRAY_SIZE(trigger_names))
return 0;
@@ -265,10 +308,11 @@ static ssize_t edge_show(struct device *dev,
return sysfs_emit(buf, "%s\n", trigger_names[flags]);
}
-static ssize_t edge_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t size)
+static ssize_t edge_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t size)
{
- struct gpiod_data *data = dev_get_drvdata(dev);
+ struct gpiod_data *data = container_of(attr, struct gpiod_data,
+ edge_attr);
ssize_t status = size;
int flags;
@@ -276,73 +320,70 @@ static ssize_t edge_store(struct device *dev,
if (flags < 0)
return flags;
- mutex_lock(&data->mutex);
+ guard(mutex)(&data->mutex);
- if (flags == data->irq_flags) {
- status = size;
- goto out_unlock;
- }
+ if (flags == data->irq_flags)
+ return size;
if (data->irq_flags)
- gpio_sysfs_free_irq(dev);
+ gpio_sysfs_free_irq(data);
- if (flags) {
- status = gpio_sysfs_request_irq(dev, flags);
- if (!status)
- status = size;
- }
+ if (!flags)
+ return size;
-out_unlock:
- mutex_unlock(&data->mutex);
+ status = gpio_sysfs_request_irq(data, flags);
+ if (status)
+ return status;
- return status;
+ gpiod_line_state_notify(data->desc, GPIO_V2_LINE_CHANGED_CONFIG);
+
+ return size;
}
-static DEVICE_ATTR_RW(edge);
/* Caller holds gpiod-data mutex. */
-static int gpio_sysfs_set_active_low(struct device *dev, int value)
+static int gpio_sysfs_set_active_low(struct gpiod_data *data, int value)
{
- struct gpiod_data *data = dev_get_drvdata(dev);
unsigned int flags = data->irq_flags;
struct gpio_desc *desc = data->desc;
int status = 0;
-
- if (!!test_bit(FLAG_ACTIVE_LOW, &desc->flags) == !!value)
+ if (!!test_bit(GPIOD_FLAG_ACTIVE_LOW, &desc->flags) == !!value)
return 0;
- assign_bit(FLAG_ACTIVE_LOW, &desc->flags, value);
+ assign_bit(GPIOD_FLAG_ACTIVE_LOW, &desc->flags, value);
/* reconfigure poll(2) support if enabled on one edge only */
if (flags == GPIO_IRQF_TRIGGER_FALLING ||
- flags == GPIO_IRQF_TRIGGER_RISING) {
- gpio_sysfs_free_irq(dev);
- status = gpio_sysfs_request_irq(dev, flags);
+ flags == GPIO_IRQF_TRIGGER_RISING) {
+ gpio_sysfs_free_irq(data);
+ status = gpio_sysfs_request_irq(data, flags);
}
+ gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_CONFIG);
+
return status;
}
static ssize_t active_low_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
- struct gpiod_data *data = dev_get_drvdata(dev);
+ struct gpiod_data *data = container_of(attr, struct gpiod_data,
+ active_low_attr);
struct gpio_desc *desc = data->desc;
int value;
- mutex_lock(&data->mutex);
-
- value = !!test_bit(FLAG_ACTIVE_LOW, &desc->flags);
-
- mutex_unlock(&data->mutex);
+ scoped_guard(mutex, &data->mutex)
+ value = !!test_bit(GPIOD_FLAG_ACTIVE_LOW, &desc->flags);
return sysfs_emit(buf, "%d\n", value);
}
static ssize_t active_low_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t size)
+ struct device_attribute *attr,
+ const char *buf, size_t size)
{
- struct gpiod_data *data = dev_get_drvdata(dev);
+ struct gpiod_data *data = container_of(attr, struct gpiod_data,
+ active_low_attr);
ssize_t status;
long value;
@@ -350,90 +391,191 @@ static ssize_t active_low_store(struct device *dev,
if (status)
return status;
- mutex_lock(&data->mutex);
+ guard(mutex)(&data->mutex);
- status = gpio_sysfs_set_active_low(dev, value);
-
- mutex_unlock(&data->mutex);
-
- return status ? : size;
+ return gpio_sysfs_set_active_low(data, value) ?: size;
}
-static DEVICE_ATTR_RW(active_low);
+#endif /* CONFIG_GPIO_SYSFS_LEGACY */
static umode_t gpio_is_visible(struct kobject *kobj, struct attribute *attr,
int n)
{
- struct device *dev = kobj_to_dev(kobj);
- struct gpiod_data *data = dev_get_drvdata(dev);
- struct gpio_desc *desc = data->desc;
+ struct device_attribute *dev_attr = container_of(attr,
+ struct device_attribute, attr);
umode_t mode = attr->mode;
- bool show_direction = data->direction_can_change;
+ struct gpiod_data *data;
- if (attr == &dev_attr_direction.attr) {
- if (!show_direction)
+ if (strcmp(attr->name, "direction") == 0) {
+ data = container_of(dev_attr, struct gpiod_data, dir_attr);
+
+ if (!data->direction_can_change)
mode = 0;
- } else if (attr == &dev_attr_edge.attr) {
- if (gpiod_to_irq(desc) < 0)
+#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY)
+ } else if (strcmp(attr->name, "edge") == 0) {
+ data = container_of(dev_attr, struct gpiod_data, edge_attr);
+
+ if (gpiod_to_irq(data->desc) < 0)
mode = 0;
- if (!show_direction && test_bit(FLAG_IS_OUT, &desc->flags))
+
+ if (!data->direction_can_change &&
+ test_bit(GPIOD_FLAG_IS_OUT, &data->desc->flags))
mode = 0;
+#endif /* CONFIG_GPIO_SYSFS_LEGACY */
}
return mode;
}
-static struct attribute *gpio_attrs[] = {
- &dev_attr_direction.attr,
- &dev_attr_edge.attr,
- &dev_attr_value.attr,
- &dev_attr_active_low.attr,
- NULL,
-};
-
-static const struct attribute_group gpio_group = {
- .attrs = gpio_attrs,
- .is_visible = gpio_is_visible,
-};
-
-static const struct attribute_group *gpio_groups[] = {
- &gpio_group,
- NULL
-};
-
/*
* /sys/class/gpio/gpiochipN/
* /base ... matching gpio_chip.base (N)
* /label ... matching gpio_chip.label
* /ngpio ... matching gpio_chip.ngpio
+ *
+ * AND
+ *
+ * /sys/class/gpio/chipX/
+ * /export ... export GPIO at given offset
+ * /unexport ... unexport GPIO at given offset
+ * /label ... matching gpio_chip.label
+ * /ngpio ... matching gpio_chip.ngpio
*/
-static ssize_t base_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY)
+static ssize_t base_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
- const struct gpio_device *gdev = dev_get_drvdata(dev);
+ const struct gpiodev_data *data = dev_get_drvdata(dev);
- return sysfs_emit(buf, "%u\n", gdev->base);
+ return sysfs_emit(buf, "%u\n", data->gdev->base);
}
static DEVICE_ATTR_RO(base);
+#endif /* CONFIG_GPIO_SYSFS_LEGACY */
-static ssize_t label_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t label_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
- const struct gpio_device *gdev = dev_get_drvdata(dev);
+ const struct gpiodev_data *data = dev_get_drvdata(dev);
- return sysfs_emit(buf, "%s\n", gdev->label);
+ return sysfs_emit(buf, "%s\n", data->gdev->label);
}
static DEVICE_ATTR_RO(label);
-static ssize_t ngpio_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t ngpio_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
- const struct gpio_device *gdev = dev_get_drvdata(dev);
+ const struct gpiodev_data *data = dev_get_drvdata(dev);
- return sysfs_emit(buf, "%u\n", gdev->ngpio);
+ return sysfs_emit(buf, "%u\n", data->gdev->ngpio);
}
static DEVICE_ATTR_RO(ngpio);
+static int export_gpio_desc(struct gpio_desc *desc)
+{
+ int offset, ret;
+
+ CLASS(gpio_chip_guard, guard)(desc);
+ if (!guard.gc)
+ return -ENODEV;
+
+ offset = gpio_chip_hwgpio(desc);
+ if (!gpiochip_line_is_valid(guard.gc, offset)) {
+ pr_debug_ratelimited("%s: GPIO %d masked\n", __func__,
+ gpio_chip_hwgpio(desc));
+ return -EINVAL;
+ }
+
+ /*
+ * No extra locking here; GPIOD_FLAG_SYSFS just signifies that the
+ * request and export were done by on behalf of userspace, so
+ * they may be undone on its behalf too.
+ */
+
+ ret = gpiod_request_user(desc, "sysfs");
+ if (ret)
+ return ret;
+
+ ret = gpiod_set_transitory(desc, false);
+ if (ret) {
+ gpiod_free(desc);
+ return ret;
+ }
+
+ ret = gpiod_export(desc, true);
+ if (ret < 0) {
+ gpiod_free(desc);
+ } else {
+ set_bit(GPIOD_FLAG_SYSFS, &desc->flags);
+ gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED);
+ }
+
+ return ret;
+}
+
+static int unexport_gpio_desc(struct gpio_desc *desc)
+{
+ /*
+ * No extra locking here; GPIOD_FLAG_SYSFS just signifies that the
+ * request and export were done by on behalf of userspace, so
+ * they may be undone on its behalf too.
+ */
+ if (!test_and_clear_bit(GPIOD_FLAG_SYSFS, &desc->flags))
+ return -EINVAL;
+
+ gpiod_unexport(desc);
+ gpiod_free(desc);
+
+ return 0;
+}
+
+static ssize_t do_chip_export_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, ssize_t size,
+ int (*handler)(struct gpio_desc *desc))
+{
+ struct gpiodev_data *data = dev_get_drvdata(dev);
+ struct gpio_device *gdev = data->gdev;
+ struct gpio_desc *desc;
+ unsigned int gpio;
+ int ret;
+
+ ret = kstrtouint(buf, 0, &gpio);
+ if (ret)
+ return ret;
+
+ desc = gpio_device_get_desc(gdev, gpio);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ ret = handler(desc);
+ if (ret)
+ return ret;
+
+ return size;
+}
+
+static ssize_t chip_export_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ return do_chip_export_store(dev, attr, buf, size, export_gpio_desc);
+}
+
+static struct device_attribute dev_attr_export = __ATTR(export, 0200, NULL,
+ chip_export_store);
+
+static ssize_t chip_unexport_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ return do_chip_export_store(dev, attr, buf, size, unexport_gpio_desc);
+}
+
+static struct device_attribute dev_attr_unexport = __ATTR(unexport, 0200,
+ NULL,
+ chip_unexport_store);
+
+#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY)
static struct attribute *gpiochip_attrs[] = {
&dev_attr_base.attr,
&dev_attr_label.attr,
@@ -441,7 +583,18 @@ static struct attribute *gpiochip_attrs[] = {
NULL,
};
ATTRIBUTE_GROUPS(gpiochip);
+#endif /* CONFIG_GPIO_SYSFS_LEGACY */
+
+static struct attribute *gpiochip_ext_attrs[] = {
+ &dev_attr_label.attr,
+ &dev_attr_ngpio.attr,
+ &dev_attr_export.attr,
+ &dev_attr_unexport.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(gpiochip_ext);
+#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY)
/*
* /sys/class/gpio/export ... write-only
* integer N ... number of GPIO to export (full access)
@@ -449,11 +602,11 @@ ATTRIBUTE_GROUPS(gpiochip);
* integer N ... number of GPIO to unexport
*/
static ssize_t export_store(const struct class *class,
- const struct class_attribute *attr,
- const char *buf, size_t len)
+ const struct class_attribute *attr,
+ const char *buf, size_t len)
{
struct gpio_desc *desc;
- int status, offset;
+ int status;
long gpio;
status = kstrtol(buf, 0, &gpio);
@@ -463,42 +616,11 @@ static ssize_t export_store(const struct class *class,
desc = gpio_to_desc(gpio);
/* reject invalid GPIOs */
if (!desc) {
- pr_warn("%s: invalid GPIO %ld\n", __func__, gpio);
- return -EINVAL;
- }
-
- CLASS(gpio_chip_guard, guard)(desc);
- if (!guard.gc)
- return -ENODEV;
-
- offset = gpio_chip_hwgpio(desc);
- if (!gpiochip_line_is_valid(guard.gc, offset)) {
- pr_warn("%s: GPIO %ld masked\n", __func__, gpio);
+ pr_debug_ratelimited("%s: invalid GPIO %ld\n", __func__, gpio);
return -EINVAL;
}
- /* No extra locking here; FLAG_SYSFS just signifies that the
- * request and export were done by on behalf of userspace, so
- * they may be undone on its behalf too.
- */
-
- status = gpiod_request_user(desc, "sysfs");
- if (status)
- goto done;
-
- status = gpiod_set_transitory(desc, false);
- if (status) {
- gpiod_free(desc);
- goto done;
- }
-
- status = gpiod_export(desc, true);
- if (status < 0)
- gpiod_free(desc);
- else
- set_bit(FLAG_SYSFS, &desc->flags);
-
-done:
+ status = export_gpio_desc(desc);
if (status)
pr_debug("%s: status %d\n", __func__, status);
return status ? : len;
@@ -506,8 +628,8 @@ done:
static CLASS_ATTR_WO(export);
static ssize_t unexport_store(const struct class *class,
- const struct class_attribute *attr,
- const char *buf, size_t len)
+ const struct class_attribute *attr,
+ const char *buf, size_t len)
{
struct gpio_desc *desc;
int status;
@@ -515,27 +637,16 @@ static ssize_t unexport_store(const struct class *class,
status = kstrtol(buf, 0, &gpio);
if (status < 0)
- goto done;
+ return status;
desc = gpio_to_desc(gpio);
/* reject bogus commands (gpiod_unexport() ignores them) */
if (!desc) {
- pr_warn("%s: invalid GPIO %ld\n", __func__, gpio);
+ pr_debug_ratelimited("%s: invalid GPIO %ld\n", __func__, gpio);
return -EINVAL;
}
- status = -EINVAL;
-
- /* No extra locking here; FLAG_SYSFS just signifies that the
- * request and export were done by on behalf of userspace, so
- * they may be undone on its behalf too.
- */
- if (test_and_clear_bit(FLAG_SYSFS, &desc->flags)) {
- gpiod_unexport(desc);
- gpiod_free(desc);
- status = 0;
- }
-done:
+ status = unexport_gpio_desc(desc);
if (status)
pr_debug("%s: status %d\n", __func__, status);
return status ? : len;
@@ -548,12 +659,54 @@ static struct attribute *gpio_class_attrs[] = {
NULL,
};
ATTRIBUTE_GROUPS(gpio_class);
+#endif /* CONFIG_GPIO_SYSFS_LEGACY */
-static struct class gpio_class = {
+static const struct class gpio_class = {
.name = "gpio",
- .class_groups = gpio_class_groups,
+#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY)
+ .class_groups = gpio_class_groups,
+#endif /* CONFIG_GPIO_SYSFS_LEGACY */
};
+static int match_gdev(struct device *dev, const void *desc)
+{
+ struct gpiodev_data *data = dev_get_drvdata(dev);
+ const struct gpio_device *gdev = desc;
+
+ return data && data->gdev == gdev;
+}
+
+static struct gpiodev_data *
+gdev_get_data(struct gpio_device *gdev) __must_hold(&sysfs_lock)
+{
+ /*
+ * Find the first device in GPIO class that matches. Whether that's
+ * the one indexed by GPIO base or device ID doesn't matter, it has
+ * the same address set as driver data.
+ */
+ struct device *cdev __free(put_device) = class_find_device(&gpio_class,
+ NULL, gdev,
+ match_gdev);
+ if (!cdev)
+ return NULL;
+
+ return dev_get_drvdata(cdev);
+};
+
+static void gpiod_attr_init(struct device_attribute *dev_attr, const char *name,
+ ssize_t (*show)(struct device *dev,
+ struct device_attribute *attr,
+ char *buf),
+ ssize_t (*store)(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count))
+{
+ sysfs_attr_init(&dev_attr->attr);
+ dev_attr->attr.name = name;
+ dev_attr->attr.mode = 0644;
+ dev_attr->show = show;
+ dev_attr->store = store;
+}
/**
* gpiod_export - export a GPIO through sysfs
@@ -573,11 +726,12 @@ static struct class gpio_class = {
*/
int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
{
- const char *ioname = NULL;
+ char *path __free(kfree) = NULL;
+ struct gpiodev_data *gdev_data;
+ struct gpiod_data *desc_data;
struct gpio_device *gdev;
- struct gpiod_data *data;
- struct device *dev;
- int status, offset;
+ struct attribute **attrs;
+ int status;
/* can't export until sysfs is available ... */
if (!class_is_registered(&gpio_class)) {
@@ -594,70 +748,138 @@ int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
if (!guard.gc)
return -ENODEV;
- if (test_and_set_bit(FLAG_EXPORT, &desc->flags))
+ if (test_and_set_bit(GPIOD_FLAG_EXPORT, &desc->flags))
return -EPERM;
gdev = desc->gdev;
- mutex_lock(&sysfs_lock);
-
- /* check if chip is being removed */
- if (!gdev->mockdev) {
- status = -ENODEV;
- goto err_unlock;
- }
+ guard(mutex)(&sysfs_lock);
- if (!test_bit(FLAG_REQUESTED, &desc->flags)) {
+ if (!test_bit(GPIOD_FLAG_REQUESTED, &desc->flags)) {
gpiod_dbg(desc, "%s: unavailable (not requested)\n", __func__);
status = -EPERM;
- goto err_unlock;
+ goto err_clear_bit;
}
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data) {
+ desc_data = kzalloc(sizeof(*desc_data), GFP_KERNEL);
+ if (!desc_data) {
status = -ENOMEM;
- goto err_unlock;
+ goto err_clear_bit;
}
- data->desc = desc;
- mutex_init(&data->mutex);
+ desc_data->desc = desc;
+ mutex_init(&desc_data->mutex);
if (guard.gc->direction_input && guard.gc->direction_output)
- data->direction_can_change = direction_may_change;
+ desc_data->direction_can_change = direction_may_change;
else
- data->direction_can_change = false;
+ desc_data->direction_can_change = false;
- offset = gpio_chip_hwgpio(desc);
- if (guard.gc->names && guard.gc->names[offset])
- ioname = guard.gc->names[offset];
-
- dev = device_create_with_groups(&gpio_class, &gdev->dev,
- MKDEV(0, 0), data, gpio_groups,
- ioname ? ioname : "gpio%u",
- desc_to_gpio(desc));
- if (IS_ERR(dev)) {
- status = PTR_ERR(dev);
+ gpiod_attr_init(&desc_data->dir_attr, "direction",
+ direction_show, direction_store);
+ gpiod_attr_init(&desc_data->val_attr, "value", value_show, value_store);
+
+#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY)
+ gpiod_attr_init(&desc_data->edge_attr, "edge", edge_show, edge_store);
+ gpiod_attr_init(&desc_data->active_low_attr, "active_low",
+ active_low_show, active_low_store);
+
+ attrs = desc_data->class_attrs;
+ desc_data->class_attr_group.is_visible = gpio_is_visible;
+ attrs[GPIO_SYSFS_LINE_CLASS_ATTR_DIRECTION] = &desc_data->dir_attr.attr;
+ attrs[GPIO_SYSFS_LINE_CLASS_ATTR_VALUE] = &desc_data->val_attr.attr;
+ attrs[GPIO_SYSFS_LINE_CLASS_ATTR_EDGE] = &desc_data->edge_attr.attr;
+ attrs[GPIO_SYSFS_LINE_CLASS_ATTR_ACTIVE_LOW] = &desc_data->active_low_attr.attr;
+
+ desc_data->class_attr_group.attrs = desc_data->class_attrs;
+ desc_data->class_attr_groups[0] = &desc_data->class_attr_group;
+
+ /*
+ * Note: we need to continue passing desc_data here as there's still
+ * at least one known user of gpiod_export_link() in the tree. This
+ * function still uses class_find_device() internally.
+ */
+ desc_data->dev = device_create_with_groups(&gpio_class, &gdev->dev,
+ MKDEV(0, 0), desc_data,
+ desc_data->class_attr_groups,
+ "gpio%u",
+ desc_to_gpio(desc));
+ if (IS_ERR(desc_data->dev)) {
+ status = PTR_ERR(desc_data->dev);
goto err_free_data;
}
- mutex_unlock(&sysfs_lock);
+ desc_data->value_kn = sysfs_get_dirent(desc_data->dev->kobj.sd,
+ "value");
+ if (!desc_data->value_kn) {
+ status = -ENODEV;
+ goto err_unregister_device;
+ }
+#endif /* CONFIG_GPIO_SYSFS_LEGACY */
+
+ gdev_data = gdev_get_data(gdev);
+ if (!gdev_data) {
+ status = -ENODEV;
+ goto err_put_dirent;
+ }
+
+ desc_data->chip_attr_group.name = kasprintf(GFP_KERNEL, "gpio%u",
+ gpio_chip_hwgpio(desc));
+ if (!desc_data->chip_attr_group.name) {
+ status = -ENOMEM;
+ goto err_put_dirent;
+ }
+
+ attrs = desc_data->chip_attrs;
+ desc_data->chip_attr_group.is_visible = gpio_is_visible;
+ attrs[GPIO_SYSFS_LINE_CHIP_ATTR_DIRECTION] = &desc_data->dir_attr.attr;
+ attrs[GPIO_SYSFS_LINE_CHIP_ATTR_VALUE] = &desc_data->val_attr.attr;
+
+ desc_data->chip_attr_group.attrs = attrs;
+ desc_data->chip_attr_groups[0] = &desc_data->chip_attr_group;
+
+ desc_data->parent = &gdev_data->cdev_id->kobj;
+ status = sysfs_create_groups(desc_data->parent,
+ desc_data->chip_attr_groups);
+ if (status)
+ goto err_free_name;
+
+ path = kasprintf(GFP_KERNEL, "gpio%u/value", gpio_chip_hwgpio(desc));
+ if (!path) {
+ status = -ENOMEM;
+ goto err_remove_groups;
+ }
+
+ list_add(&desc_data->list, &gdev_data->exported_lines);
+
return 0;
+err_remove_groups:
+ sysfs_remove_groups(desc_data->parent, desc_data->chip_attr_groups);
+err_free_name:
+ kfree(desc_data->chip_attr_group.name);
+err_put_dirent:
+#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY)
+ sysfs_put(desc_data->value_kn);
+err_unregister_device:
+ device_unregister(desc_data->dev);
err_free_data:
- kfree(data);
-err_unlock:
- mutex_unlock(&sysfs_lock);
- clear_bit(FLAG_EXPORT, &desc->flags);
+#endif /* CONFIG_GPIO_SYSFS_LEGACY */
+ kfree(desc_data);
+err_clear_bit:
+ clear_bit(GPIOD_FLAG_EXPORT, &desc->flags);
gpiod_dbg(desc, "%s: status %d\n", __func__, status);
return status;
}
EXPORT_SYMBOL_GPL(gpiod_export);
+#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY)
static int match_export(struct device *dev, const void *desc)
{
struct gpiod_data *data = dev_get_drvdata(dev);
- return data->desc == desc;
+ return gpiod_is_equal(data->desc, desc);
}
+#endif /* CONFIG_GPIO_SYSFS_LEGACY */
/**
* gpiod_export_link - create a sysfs link to an exported GPIO node
@@ -674,6 +896,7 @@ static int match_export(struct device *dev, const void *desc)
int gpiod_export_link(struct device *dev, const char *name,
struct gpio_desc *desc)
{
+#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY)
struct device *cdev;
int ret;
@@ -690,6 +913,9 @@ int gpiod_export_link(struct device *dev, const char *name,
put_device(cdev);
return ret;
+#else
+ return -EOPNOTSUPP;
+#endif /* CONFIG_GPIO_SYSFS_LEGACY */
}
EXPORT_SYMBOL_GPL(gpiod_export_link);
@@ -701,52 +927,63 @@ EXPORT_SYMBOL_GPL(gpiod_export_link);
*/
void gpiod_unexport(struct gpio_desc *desc)
{
- struct gpiod_data *data;
- struct device *dev;
+ struct gpiod_data *tmp, *desc_data = NULL;
+ struct gpiodev_data *gdev_data;
+ struct gpio_device *gdev;
if (!desc) {
pr_warn("%s: invalid GPIO\n", __func__);
return;
}
- mutex_lock(&sysfs_lock);
-
- if (!test_bit(FLAG_EXPORT, &desc->flags))
- goto err_unlock;
-
- dev = class_find_device(&gpio_class, NULL, desc, match_export);
- if (!dev)
- goto err_unlock;
-
- data = dev_get_drvdata(dev);
-
- clear_bit(FLAG_EXPORT, &desc->flags);
-
- device_unregister(dev);
+ scoped_guard(mutex, &sysfs_lock) {
+ if (!test_bit(GPIOD_FLAG_EXPORT, &desc->flags))
+ return;
- /*
- * Release irq after deregistration to prevent race with edge_store.
- */
- if (data->irq_flags)
- gpio_sysfs_free_irq(dev);
+ gdev = gpiod_to_gpio_device(desc);
+ gdev_data = gdev_get_data(gdev);
+ if (!gdev_data)
+ return;
- mutex_unlock(&sysfs_lock);
+ list_for_each_entry(tmp, &gdev_data->exported_lines, list) {
+ if (gpiod_is_equal(desc, tmp->desc)) {
+ desc_data = tmp;
+ break;
+ }
+ }
- put_device(dev);
- kfree(data);
+ if (!desc_data)
+ return;
- return;
+ list_del(&desc_data->list);
+ clear_bit(GPIOD_FLAG_EXPORT, &desc->flags);
+#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY)
+ sysfs_put(desc_data->value_kn);
+ device_unregister(desc_data->dev);
+
+ /*
+ * Release irq after deregistration to prevent race with
+ * edge_store.
+ */
+ if (desc_data->irq_flags)
+ gpio_sysfs_free_irq(desc_data);
+#endif /* CONFIG_GPIO_SYSFS_LEGACY */
+
+ sysfs_remove_groups(desc_data->parent,
+ desc_data->chip_attr_groups);
+ }
-err_unlock:
- mutex_unlock(&sysfs_lock);
+ mutex_destroy(&desc_data->mutex);
+ kfree(desc_data);
}
EXPORT_SYMBOL_GPL(gpiod_unexport);
int gpiochip_sysfs_register(struct gpio_device *gdev)
{
+ struct gpiodev_data *data;
struct gpio_chip *chip;
struct device *parent;
- struct device *dev;
+ int err;
/*
* Many systems add gpio chips for SOC support very early,
@@ -772,33 +1009,61 @@ int gpiochip_sysfs_register(struct gpio_device *gdev)
else
parent = &gdev->dev;
- /* use chip->base for the ID; it's already known to be unique */
- dev = device_create_with_groups(&gpio_class, parent, MKDEV(0, 0), gdev,
- gpiochip_groups, GPIOCHIP_NAME "%d",
- chip->base);
- if (IS_ERR(dev))
- return PTR_ERR(dev);
+ data = kmalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->gdev = gdev;
+ INIT_LIST_HEAD(&data->exported_lines);
- mutex_lock(&sysfs_lock);
- gdev->mockdev = dev;
- mutex_unlock(&sysfs_lock);
+ guard(mutex)(&sysfs_lock);
+
+#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY)
+ /* use chip->base for the ID; it's already known to be unique */
+ data->cdev_base = device_create_with_groups(&gpio_class, parent,
+ MKDEV(0, 0), data,
+ gpiochip_groups,
+ GPIOCHIP_NAME "%d",
+ chip->base);
+ if (IS_ERR(data->cdev_base)) {
+ err = PTR_ERR(data->cdev_base);
+ kfree(data);
+ return err;
+ }
+#endif /* CONFIG_GPIO_SYSFS_LEGACY */
+
+ data->cdev_id = device_create_with_groups(&gpio_class, parent,
+ MKDEV(0, 0), data,
+ gpiochip_ext_groups,
+ "chip%d", gdev->id);
+ if (IS_ERR(data->cdev_id)) {
+#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY)
+ device_unregister(data->cdev_base);
+#endif /* CONFIG_GPIO_SYSFS_LEGACY */
+ err = PTR_ERR(data->cdev_id);
+ kfree(data);
+ return err;
+ }
return 0;
}
void gpiochip_sysfs_unregister(struct gpio_device *gdev)
{
+ struct gpiodev_data *data;
struct gpio_desc *desc;
struct gpio_chip *chip;
scoped_guard(mutex, &sysfs_lock) {
- if (!gdev->mockdev)
+ data = gdev_get_data(gdev);
+ if (!data)
return;
- device_unregister(gdev->mockdev);
-
- /* prevent further gpiod exports */
- gdev->mockdev = NULL;
+#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY)
+ device_unregister(data->cdev_base);
+#endif /* CONFIG_GPIO_SYSFS_LEGACY */
+ device_unregister(data->cdev_id);
+ kfree(data);
}
guard(srcu)(&gdev->srcu);
@@ -808,7 +1073,7 @@ void gpiochip_sysfs_unregister(struct gpio_device *gdev)
return;
/* unregister gpiod class devices owned by sysfs */
- for_each_gpio_desc_with_flag(chip, desc, FLAG_SYSFS) {
+ for_each_gpio_desc_with_flag(chip, desc, GPIOD_FLAG_SYSFS) {
gpiod_unexport(desc);
gpiod_free(desc);
}
@@ -824,9 +1089,6 @@ static int gpiofind_sysfs_register(struct gpio_chip *gc, const void *data)
struct gpio_device *gdev = gc->gpiodev;
int ret;
- if (gdev->mockdev)
- return 0;
-
ret = gpiochip_sysfs_register(gdev);
if (ret)
chip_err(gc, "failed to register the sysfs entry: %d\n", ret);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index d5952ab7752c..9952e412da50 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -14,6 +14,7 @@
#include <linux/idr.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqdesc.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/lockdep.h>
@@ -23,9 +24,9 @@
#include <linux/pinctrl/consumer.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
-#include <linux/spinlock.h>
#include <linux/srcu.h>
#include <linux/string.h>
+#include <linux/string_choices.h>
#include <linux/gpio.h>
#include <linux/gpio/driver.h>
@@ -74,6 +75,19 @@ static const struct bus_type gpio_bus_type = {
};
/*
+ * At the end we want all GPIOs to be dynamically allocated from 0.
+ * However, some legacy drivers still perform fixed allocation.
+ * Until they are all fixed, leave 0-512 space for them.
+ */
+#define GPIO_DYNAMIC_BASE 512
+/*
+ * Define the maximum of the possible GPIO in the global numberspace.
+ * While the GPIO base and numbers are positive, we limit it with signed
+ * maximum as a lot of code is using negative values for special cases.
+ */
+#define GPIO_DYNAMIC_MAX INT_MAX
+
+/*
* Number of GPIOs to use for the fast path in set array
*/
#define FASTPATH_NGPIO CONFIG_GPIOLIB_FASTPATH_LIMIT
@@ -113,10 +127,10 @@ const char *gpiod_get_label(struct gpio_desc *desc)
label = srcu_dereference_check(desc->label, &desc->gdev->desc_srcu,
srcu_read_lock_held(&desc->gdev->desc_srcu));
- if (test_bit(FLAG_USED_AS_IRQ, &flags))
+ if (test_bit(GPIOD_FLAG_USED_AS_IRQ, &flags))
return label ? label->str : "interrupt";
- if (!test_bit(FLAG_REQUESTED, &flags))
+ if (!test_bit(GPIOD_FLAG_REQUESTED, &flags))
return NULL;
return label ? label->str : NULL;
@@ -341,6 +355,71 @@ static int gpiochip_find_base_unlocked(u16 ngpio)
}
}
+/*
+ * This descriptor validation needs to be inserted verbatim into each
+ * function taking a descriptor, so we need to use a preprocessor
+ * macro to avoid endless duplication. If the desc is NULL it is an
+ * optional GPIO and calls should just bail out.
+ */
+static int validate_desc(const struct gpio_desc *desc, const char *func)
+{
+ if (!desc)
+ return 0;
+
+ if (IS_ERR(desc)) {
+ pr_warn("%s: invalid GPIO (errorpointer: %pe)\n", func, desc);
+ return PTR_ERR(desc);
+ }
+
+ return 1;
+}
+
+#define VALIDATE_DESC(desc) do { \
+ int __valid = validate_desc(desc, __func__); \
+ if (__valid <= 0) \
+ return __valid; \
+ } while (0)
+
+#define VALIDATE_DESC_VOID(desc) do { \
+ int __valid = validate_desc(desc, __func__); \
+ if (__valid <= 0) \
+ return; \
+ } while (0)
+
+/**
+ * gpiod_is_equal() - Check if two GPIO descriptors refer to the same pin.
+ * @desc: Descriptor to compare.
+ * @other: The second descriptor to compare against.
+ *
+ * Returns:
+ * True if the descriptors refer to the same physical pin. False otherwise.
+ */
+bool gpiod_is_equal(const struct gpio_desc *desc, const struct gpio_desc *other)
+{
+ return validate_desc(desc, __func__) > 0 &&
+ !IS_ERR_OR_NULL(other) && desc == other;
+}
+EXPORT_SYMBOL_GPL(gpiod_is_equal);
+
+static int gpiochip_get_direction(struct gpio_chip *gc, unsigned int offset)
+{
+ int ret;
+
+ lockdep_assert_held(&gc->gpiodev->srcu);
+
+ if (WARN_ON(!gc->get_direction))
+ return -EOPNOTSUPP;
+
+ ret = gc->get_direction(gc, offset);
+ if (ret < 0)
+ return ret;
+
+ if (ret != GPIO_LINE_DIRECTION_OUT && ret != GPIO_LINE_DIRECTION_IN)
+ ret = -EBADE;
+
+ return ret;
+}
+
/**
* gpiod_get_direction - return the current direction of a GPIO
* @desc: GPIO to get the direction of
@@ -356,11 +435,8 @@ int gpiod_get_direction(struct gpio_desc *desc)
unsigned int offset;
int ret;
- /*
- * We cannot use VALIDATE_DESC() as we must not return 0 for a NULL
- * descriptor like we usually do.
- */
- if (IS_ERR_OR_NULL(desc))
+ ret = validate_desc(desc, __func__);
+ if (ret <= 0)
return -EINVAL;
CLASS(gpio_chip_guard, guard)(desc);
@@ -374,14 +450,14 @@ int gpiod_get_direction(struct gpio_desc *desc)
* Open drain emulation using input mode may incorrectly report
* input here, fix that up.
*/
- if (test_bit(FLAG_OPEN_DRAIN, &flags) &&
- test_bit(FLAG_IS_OUT, &flags))
+ if (test_bit(GPIOD_FLAG_OPEN_DRAIN, &flags) &&
+ test_bit(GPIOD_FLAG_IS_OUT, &flags))
return 0;
if (!guard.gc->get_direction)
return -ENOTSUPP;
- ret = guard.gc->get_direction(guard.gc, offset);
+ ret = gpiochip_get_direction(guard.gc, offset);
if (ret < 0)
return ret;
@@ -392,7 +468,7 @@ int gpiod_get_direction(struct gpio_desc *desc)
if (ret > 0)
ret = 1;
- assign_bit(FLAG_IS_OUT, &flags, !ret);
+ assign_bit(GPIOD_FLAG_IS_OUT, &flags, !ret);
WRITE_ONCE(desc->flags, flags);
return ret;
@@ -652,7 +728,7 @@ static int gpiochip_apply_reserved_ranges(struct gpio_chip *gc)
if (start >= gc->ngpio || start + count > gc->ngpio)
continue;
- bitmap_clear(gc->valid_mask, start, count);
+ bitmap_clear(gc->gpiodev->valid_mask, start, count);
}
kfree(ranges);
@@ -666,8 +742,8 @@ static int gpiochip_init_valid_mask(struct gpio_chip *gc)
if (!(gpiochip_count_reserved_ranges(gc) || gc->init_valid_mask))
return 0;
- gc->valid_mask = gpiochip_allocate_mask(gc);
- if (!gc->valid_mask)
+ gc->gpiodev->valid_mask = gpiochip_allocate_mask(gc);
+ if (!gc->gpiodev->valid_mask)
return -ENOMEM;
ret = gpiochip_apply_reserved_ranges(gc);
@@ -676,7 +752,7 @@ static int gpiochip_init_valid_mask(struct gpio_chip *gc)
if (gc->init_valid_mask)
return gc->init_valid_mask(gc,
- gc->valid_mask,
+ gc->gpiodev->valid_mask,
gc->ngpio);
return 0;
@@ -684,7 +760,7 @@ static int gpiochip_init_valid_mask(struct gpio_chip *gc)
static void gpiochip_free_valid_mask(struct gpio_chip *gc)
{
- gpiochip_free_mask(&gc->valid_mask);
+ gpiochip_free_mask(&gc->gpiodev->valid_mask);
}
static int gpiochip_add_pin_ranges(struct gpio_chip *gc)
@@ -703,16 +779,77 @@ static int gpiochip_add_pin_ranges(struct gpio_chip *gc)
return 0;
}
+/**
+ * gpiochip_query_valid_mask - return the GPIO validity information
+ * @gc: gpio chip which validity information is queried
+ *
+ * Returns: bitmap representing valid GPIOs or NULL if all GPIOs are valid
+ *
+ * Some GPIO chips may support configurations where some of the pins aren't
+ * available. These chips can have valid_mask set to represent the valid
+ * GPIOs. This function can be used to retrieve this information.
+ */
+const unsigned long *gpiochip_query_valid_mask(const struct gpio_chip *gc)
+{
+ return gc->gpiodev->valid_mask;
+}
+EXPORT_SYMBOL_GPL(gpiochip_query_valid_mask);
+
bool gpiochip_line_is_valid(const struct gpio_chip *gc,
unsigned int offset)
{
+ /*
+ * hog pins are requested before registering GPIO chip
+ */
+ if (!gc->gpiodev)
+ return true;
+
/* No mask means all valid */
- if (likely(!gc->valid_mask))
+ if (likely(!gc->gpiodev->valid_mask))
return true;
- return test_bit(offset, gc->valid_mask);
+ return test_bit(offset, gc->gpiodev->valid_mask);
}
EXPORT_SYMBOL_GPL(gpiochip_line_is_valid);
+static void gpiod_free_irqs(struct gpio_desc *desc)
+{
+ int irq = gpiod_to_irq(desc);
+ struct irq_desc *irqd = irq_to_desc(irq);
+ void *cookie;
+
+ for (;;) {
+ /*
+ * Make sure the action doesn't go away while we're
+ * dereferencing it. Retrieve and store the cookie value.
+ * If the irq is freed after we release the lock, that's
+ * alright - the underlying maple tree lookup will return NULL
+ * and nothing will happen in free_irq().
+ */
+ scoped_guard(mutex, &irqd->request_mutex) {
+ if (!irq_desc_has_action(irqd))
+ return;
+
+ cookie = irqd->action->dev_id;
+ }
+
+ free_irq(irq, cookie);
+ }
+}
+
+/*
+ * The chip is going away but there may be users who had requested interrupts
+ * on its GPIO lines who have no idea about its removal and have no way of
+ * being notified about it. We need to free any interrupts still in use here or
+ * we'll leak memory and resources (like procfs files).
+ */
+static void gpiochip_free_remaining_irqs(struct gpio_chip *gc)
+{
+ struct gpio_desc *desc;
+
+ for_each_gpio_desc_with_flag(gc, desc, GPIOD_FLAG_USED_AS_IRQ)
+ gpiod_free_irqs(desc);
+}
+
static void gpiodev_release(struct device *dev)
{
struct gpio_device *gdev = to_gpio_device(dev);
@@ -799,14 +936,12 @@ static void machine_gpiochip_add(struct gpio_chip *gc)
{
struct gpiod_hog *hog;
- mutex_lock(&gpio_machine_hogs_mutex);
+ guard(mutex)(&gpio_machine_hogs_mutex);
list_for_each_entry(hog, &gpio_machine_hogs, list) {
if (!strcmp(gc->label, hog->chip_label))
gpiochip_machine_hog(gc, hog);
}
-
- mutex_unlock(&gpio_machine_hogs_mutex);
}
static void gpiochip_setup_devs(void)
@@ -843,13 +978,29 @@ void *gpiochip_get_data(struct gpio_chip *gc)
}
EXPORT_SYMBOL_GPL(gpiochip_get_data);
+/*
+ * If the calling driver provides the specific firmware node,
+ * use it. Otherwise use the one from the parent device, if any.
+ */
+static struct fwnode_handle *gpiochip_choose_fwnode(struct gpio_chip *gc)
+{
+ if (gc->fwnode)
+ return gc->fwnode;
+
+ if (gc->parent)
+ return dev_fwnode(gc->parent);
+
+ return NULL;
+}
+
int gpiochip_get_ngpios(struct gpio_chip *gc, struct device *dev)
{
+ struct fwnode_handle *fwnode = gpiochip_choose_fwnode(gc);
u32 ngpios = gc->ngpio;
int ret;
if (ngpios == 0) {
- ret = device_property_read_u32(dev, "ngpios", &ngpios);
+ ret = fwnode_property_read_u32(fwnode, "ngpios", &ngpios);
if (ret == -ENODATA)
/*
* -ENODATA means that there is no property found and
@@ -865,13 +1016,13 @@ int gpiochip_get_ngpios(struct gpio_chip *gc, struct device *dev)
}
if (gc->ngpio == 0) {
- chip_err(gc, "tried to insert a GPIO chip with zero lines\n");
+ dev_err(dev, "tried to insert a GPIO chip with zero lines\n");
return -EINVAL;
}
if (gc->ngpio > FASTPATH_NGPIO)
- chip_warn(gc, "line cnt %u is greater than fast path cnt %u\n",
- gc->ngpio, FASTPATH_NGPIO);
+ dev_warn(dev, "line cnt %u is greater than fast path cnt %u\n",
+ gc->ngpio, FASTPATH_NGPIO);
return 0;
}
@@ -884,7 +1035,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
struct gpio_device *gdev;
unsigned int desc_index;
int base = 0;
- int ret = 0;
+ int ret;
/*
* First: allocate and populate the internal stat container, and
@@ -902,20 +1053,12 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
gc->gpiodev = gdev;
gpiochip_set_data(gc, data);
- /*
- * If the calling driver did not initialize firmware node,
- * do it here using the parent device, if any.
- */
- if (gc->fwnode)
- device_set_node(&gdev->dev, gc->fwnode);
- else if (gc->parent)
- device_set_node(&gdev->dev, dev_fwnode(gc->parent));
+ device_set_node(&gdev->dev, gpiochip_choose_fwnode(gc));
- gdev->id = ida_alloc(&gpio_ida, GFP_KERNEL);
- if (gdev->id < 0) {
- ret = gdev->id;
+ ret = ida_alloc(&gpio_ida, GFP_KERNEL);
+ if (ret < 0)
goto err_free_gdev;
- }
+ gdev->id = ret;
ret = dev_set_name(&gdev->dev, GPIOCHIP_NAME "%d", gdev->id);
if (ret)
@@ -986,10 +1129,8 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
}
}
- for (desc_index = 0; desc_index < gc->ngpio; desc_index++)
- gdev->descs[desc_index].gdev = gdev;
-
- BLOCKING_INIT_NOTIFIER_HEAD(&gdev->line_state_notifier);
+ rwlock_init(&gdev->line_state_lock);
+ RAW_INIT_NOTIFIER_HEAD(&gdev->line_state_notifier);
BLOCKING_INIT_NOTIFIER_HEAD(&gdev->device_notifier);
ret = init_srcu_struct(&gdev->srcu);
@@ -1018,13 +1159,21 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
for (desc_index = 0; desc_index < gc->ngpio; desc_index++) {
struct gpio_desc *desc = &gdev->descs[desc_index];
- if (gc->get_direction && gpiochip_line_is_valid(gc, desc_index)) {
- assign_bit(FLAG_IS_OUT,
- &desc->flags, !gc->get_direction(gc, desc_index));
- } else {
- assign_bit(FLAG_IS_OUT,
+ desc->gdev = gdev;
+
+ /*
+ * We would typically want to use gpiochip_get_direction() here
+ * but we must not check the return value and bail-out as pin
+ * controllers can have pins configured to alternate functions
+ * and return -EINVAL. Also: there's no need to take the SRCU
+ * lock here.
+ */
+ if (gc->get_direction && gpiochip_line_is_valid(gc, desc_index))
+ assign_bit(GPIOD_FLAG_IS_OUT, &desc->flags,
+ !gc->get_direction(gc, desc_index));
+ else
+ assign_bit(GPIOD_FLAG_IS_OUT,
&desc->flags, !gc->direction_input);
- }
}
ret = of_gpiochip_add(gc);
@@ -1125,6 +1274,7 @@ void gpiochip_remove(struct gpio_chip *gc)
/* FIXME: should the legacy sysfs handling be moved to gpio_device? */
gpiochip_sysfs_unregister(gdev);
gpiochip_free_hogs(gc);
+ gpiochip_free_remaining_irqs(gc);
scoped_guard(mutex, &gpio_devices_lock)
list_del_rcu(&gdev->list);
@@ -1183,11 +1333,6 @@ struct gpio_device *gpio_device_find(const void *data,
struct gpio_device *gdev;
struct gpio_chip *gc;
- /*
- * Not yet but in the future the spinlock below will become a mutex.
- * Annotate this function before anyone tries to use it in interrupt
- * context like it happened with gpiochip_find().
- */
might_sleep();
guard(srcu)(&gpio_devices_srcu);
@@ -1416,9 +1561,8 @@ static int gpiochip_hierarchy_irq_domain_translate(struct irq_domain *d,
unsigned int *type)
{
/* We support standard DT translation */
- if (is_of_node(fwspec->fwnode) && fwspec->param_count == 2) {
- return irq_domain_translate_twocell(d, fwspec, hwirq, type);
- }
+ if (is_of_node(fwspec->fwnode))
+ return irq_domain_translate_twothreecell(d, fwspec, hwirq, type);
/* This is for board files and others not using DT */
if (is_fwnode_irqchip(fwspec->fwnode)) {
@@ -1720,11 +1864,26 @@ static void gpiochip_irq_unmap(struct irq_domain *d, unsigned int irq)
irq_set_chip_data(irq, NULL);
}
+static int gpiochip_irq_select(struct irq_domain *d, struct irq_fwspec *fwspec,
+ enum irq_domain_bus_token bus_token)
+{
+ struct fwnode_handle *fwnode = fwspec->fwnode;
+ struct gpio_chip *gc = d->host_data;
+ unsigned int index = fwspec->param[0];
+
+ if (fwspec->param_count == 3 && is_of_node(fwnode))
+ return of_gpiochip_instance_match(gc, index);
+
+ /* Fallback for twocells */
+ return (fwnode && (d->fwnode == fwnode) && (d->bus_token == bus_token));
+}
+
static const struct irq_domain_ops gpiochip_domain_ops = {
.map = gpiochip_irq_map,
.unmap = gpiochip_irq_unmap,
+ .select = gpiochip_irq_select,
/* Virtually all GPIO irqchips are twocell:ed */
- .xlate = irq_domain_xlate_twocell,
+ .xlate = irq_domain_xlate_twothreecell,
};
static struct irq_domain *gpiochip_simple_create_domain(struct gpio_chip *gc)
@@ -1744,7 +1903,6 @@ static int gpiochip_to_irq(struct gpio_chip *gc, unsigned int offset)
{
struct irq_domain *domain = gc->irq.domain;
-#ifdef CONFIG_GPIOLIB_IRQCHIP
/*
* Avoid race condition with other code, which tries to lookup
* an IRQ before the irqchip has been properly registered,
@@ -1752,7 +1910,6 @@ static int gpiochip_to_irq(struct gpio_chip *gc, unsigned int offset)
*/
if (!gc->irq.initialized)
return -EPROBE_DEFER;
-#endif
if (!gpiochip_irqchip_irq_valid(gc, offset))
return -ENXIO;
@@ -2192,11 +2349,13 @@ int gpiochip_add_pingroup_range(struct gpio_chip *gc,
EXPORT_SYMBOL_GPL(gpiochip_add_pingroup_range);
/**
- * gpiochip_add_pin_range() - add a range for GPIO <-> pin mapping
+ * gpiochip_add_pin_range_with_pins() - add a range for GPIO <-> pin mapping
* @gc: the gpiochip to add the range for
* @pinctl_name: the dev_name() of the pin controller to map to
* @gpio_offset: the start offset in the current gpio_chip number space
* @pin_offset: the start offset in the pin controller number space
+ * @pins: the list of non consecutive pins to accumulate in this range (if not
+ * NULL, pin_offset is ignored by pinctrl core)
* @npins: the number of pins from the offset of each pin space (GPIO and
* pin controller) to accumulate in this range
*
@@ -2208,9 +2367,12 @@ EXPORT_SYMBOL_GPL(gpiochip_add_pingroup_range);
* Returns:
* 0 on success, or a negative errno on failure.
*/
-int gpiochip_add_pin_range(struct gpio_chip *gc, const char *pinctl_name,
- unsigned int gpio_offset, unsigned int pin_offset,
- unsigned int npins)
+int gpiochip_add_pin_range_with_pins(struct gpio_chip *gc,
+ const char *pinctl_name,
+ unsigned int gpio_offset,
+ unsigned int pin_offset,
+ unsigned int const *pins,
+ unsigned int npins)
{
struct gpio_pin_range *pin_range;
struct gpio_device *gdev = gc->gpiodev;
@@ -2228,6 +2390,7 @@ int gpiochip_add_pin_range(struct gpio_chip *gc, const char *pinctl_name,
pin_range->range.name = gc->label;
pin_range->range.base = gdev->base + gpio_offset;
pin_range->range.pin_base = pin_offset;
+ pin_range->range.pins = pins;
pin_range->range.npins = npins;
pin_range->pctldev = pinctrl_find_and_add_gpio_range(pinctl_name,
&pin_range->range);
@@ -2237,16 +2400,21 @@ int gpiochip_add_pin_range(struct gpio_chip *gc, const char *pinctl_name,
kfree(pin_range);
return ret;
}
- chip_dbg(gc, "created GPIO range %d->%d ==> %s PIN %d->%d\n",
- gpio_offset, gpio_offset + npins - 1,
- pinctl_name,
- pin_offset, pin_offset + npins - 1);
+ if (pin_range->range.pins)
+ chip_dbg(gc, "created GPIO range %d->%d ==> %s %d sparse PIN range { %d, ... }",
+ gpio_offset, gpio_offset + npins - 1,
+ pinctl_name, npins, pins[0]);
+ else
+ chip_dbg(gc, "created GPIO range %d->%d ==> %s PIN %d->%d\n",
+ gpio_offset, gpio_offset + npins - 1,
+ pinctl_name,
+ pin_offset, pin_offset + npins - 1);
list_add_tail(&pin_range->node, &gdev->pin_ranges);
return 0;
}
-EXPORT_SYMBOL_GPL(gpiochip_add_pin_range);
+EXPORT_SYMBOL_GPL(gpiochip_add_pin_range_with_pins);
/**
* gpiochip_remove_pin_ranges() - remove all the GPIO <-> pin mappings
@@ -2281,19 +2449,21 @@ static int gpiod_request_commit(struct gpio_desc *desc, const char *label)
if (!guard.gc)
return -ENODEV;
- if (test_and_set_bit(FLAG_REQUESTED, &desc->flags))
+ if (test_and_set_bit(GPIOD_FLAG_REQUESTED, &desc->flags))
return -EBUSY;
+ offset = gpio_chip_hwgpio(desc);
+ if (!gpiochip_line_is_valid(guard.gc, offset))
+ return -EINVAL;
+
/* NOTE: gpio_request() can be called in early boot,
* before IRQs are enabled, for non-sleeping (SOC) GPIOs.
*/
if (guard.gc->request) {
- offset = gpio_chip_hwgpio(desc);
- if (gpiochip_line_is_valid(guard.gc, offset))
- ret = guard.gc->request(guard.gc, offset);
- else
- ret = -EINVAL;
+ ret = guard.gc->request(guard.gc, offset);
+ if (ret > 0)
+ ret = -EBADE;
if (ret)
goto out_clear_bit;
}
@@ -2308,41 +2478,10 @@ static int gpiod_request_commit(struct gpio_desc *desc, const char *label)
return 0;
out_clear_bit:
- clear_bit(FLAG_REQUESTED, &desc->flags);
+ clear_bit(GPIOD_FLAG_REQUESTED, &desc->flags);
return ret;
}
-/*
- * This descriptor validation needs to be inserted verbatim into each
- * function taking a descriptor, so we need to use a preprocessor
- * macro to avoid endless duplication. If the desc is NULL it is an
- * optional GPIO and calls should just bail out.
- */
-static int validate_desc(const struct gpio_desc *desc, const char *func)
-{
- if (!desc)
- return 0;
-
- if (IS_ERR(desc)) {
- pr_warn("%s: invalid GPIO (errorpointer)\n", func);
- return PTR_ERR(desc);
- }
-
- return 1;
-}
-
-#define VALIDATE_DESC(desc) do { \
- int __valid = validate_desc(desc, __func__); \
- if (__valid <= 0) \
- return __valid; \
- } while (0)
-
-#define VALIDATE_DESC_VOID(desc) do { \
- int __valid = validate_desc(desc, __func__); \
- if (__valid <= 0) \
- return; \
- } while (0)
-
int gpiod_request(struct gpio_desc *desc, const char *label)
{
int ret = -EPROBE_DEFER;
@@ -2373,27 +2512,29 @@ static void gpiod_free_commit(struct gpio_desc *desc)
flags = READ_ONCE(desc->flags);
- if (guard.gc && test_bit(FLAG_REQUESTED, &flags)) {
+ if (guard.gc && test_bit(GPIOD_FLAG_REQUESTED, &flags)) {
if (guard.gc->free)
guard.gc->free(guard.gc, gpio_chip_hwgpio(desc));
- clear_bit(FLAG_ACTIVE_LOW, &flags);
- clear_bit(FLAG_REQUESTED, &flags);
- clear_bit(FLAG_OPEN_DRAIN, &flags);
- clear_bit(FLAG_OPEN_SOURCE, &flags);
- clear_bit(FLAG_PULL_UP, &flags);
- clear_bit(FLAG_PULL_DOWN, &flags);
- clear_bit(FLAG_BIAS_DISABLE, &flags);
- clear_bit(FLAG_EDGE_RISING, &flags);
- clear_bit(FLAG_EDGE_FALLING, &flags);
- clear_bit(FLAG_IS_HOGGED, &flags);
+ clear_bit(GPIOD_FLAG_ACTIVE_LOW, &flags);
+ clear_bit(GPIOD_FLAG_REQUESTED, &flags);
+ clear_bit(GPIOD_FLAG_OPEN_DRAIN, &flags);
+ clear_bit(GPIOD_FLAG_OPEN_SOURCE, &flags);
+ clear_bit(GPIOD_FLAG_PULL_UP, &flags);
+ clear_bit(GPIOD_FLAG_PULL_DOWN, &flags);
+ clear_bit(GPIOD_FLAG_BIAS_DISABLE, &flags);
+ clear_bit(GPIOD_FLAG_EDGE_RISING, &flags);
+ clear_bit(GPIOD_FLAG_EDGE_FALLING, &flags);
+ clear_bit(GPIOD_FLAG_IS_HOGGED, &flags);
#ifdef CONFIG_OF_DYNAMIC
WRITE_ONCE(desc->hog, NULL);
#endif
desc_set_label(desc, NULL);
WRITE_ONCE(desc->flags, flags);
-
- gpiod_line_state_notify(desc, GPIOLINE_CHANGED_RELEASED);
+#ifdef CONFIG_GPIO_CDEV
+ WRITE_ONCE(desc->debounce_period_us, 0);
+#endif
+ gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_RELEASED);
}
}
@@ -2427,7 +2568,7 @@ char *gpiochip_dup_line_label(struct gpio_chip *gc, unsigned int offset)
if (IS_ERR(desc))
return NULL;
- if (!test_bit(FLAG_REQUESTED, &desc->flags))
+ if (!test_bit(GPIOD_FLAG_REQUESTED, &desc->flags))
return NULL;
guard(srcu)(&desc->gdev->desc_srcu);
@@ -2492,6 +2633,8 @@ struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *gc,
return ERR_PTR(ret);
}
+ gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED);
+
return desc;
}
EXPORT_SYMBOL_GPL(gpiochip_request_own_desc);
@@ -2520,13 +2663,31 @@ EXPORT_SYMBOL_GPL(gpiochip_free_own_desc);
* rely on gpio_request() having been called beforehand.
*/
-static int gpio_do_set_config(struct gpio_chip *gc, unsigned int offset,
- unsigned long config)
+int gpio_do_set_config(struct gpio_desc *desc, unsigned long config)
{
- if (!gc->set_config)
+ int ret;
+
+ CLASS(gpio_chip_guard, guard)(desc);
+ if (!guard.gc)
+ return -ENODEV;
+
+ if (!guard.gc->set_config)
return -ENOTSUPP;
- return gc->set_config(gc, offset, config);
+ ret = guard.gc->set_config(guard.gc, gpio_chip_hwgpio(desc), config);
+ if (ret > 0)
+ ret = -EBADE;
+
+#ifdef CONFIG_GPIO_CDEV
+ /*
+ * Special case - if we're setting debounce period, we need to store
+ * it in the descriptor in case user-space wants to know it.
+ */
+ if (!ret && pinconf_to_config_param(config) == PIN_CONFIG_INPUT_DEBOUNCE)
+ WRITE_ONCE(desc->debounce_period_us,
+ pinconf_to_config_argument(config));
+#endif
+ return ret;
}
static int gpio_set_config_with_argument(struct gpio_desc *desc,
@@ -2535,12 +2696,8 @@ static int gpio_set_config_with_argument(struct gpio_desc *desc,
{
unsigned long config;
- CLASS(gpio_chip_guard, guard)(desc);
- if (!guard.gc)
- return -ENODEV;
-
config = pinconf_to_config_packed(mode, argument);
- return gpio_do_set_config(guard.gc, gpio_chip_hwgpio(desc), config);
+ return gpio_do_set_config(desc, config);
}
static int gpio_set_config_with_argument_optional(struct gpio_desc *desc,
@@ -2579,11 +2736,11 @@ static int gpio_set_bias(struct gpio_desc *desc)
flags = READ_ONCE(desc->flags);
- if (test_bit(FLAG_BIAS_DISABLE, &flags))
+ if (test_bit(GPIOD_FLAG_BIAS_DISABLE, &flags))
bias = PIN_CONFIG_BIAS_DISABLE;
- else if (test_bit(FLAG_PULL_UP, &flags))
+ else if (test_bit(GPIOD_FLAG_PULL_UP, &flags))
bias = PIN_CONFIG_BIAS_PULL_UP;
- else if (test_bit(FLAG_PULL_DOWN, &flags))
+ else if (test_bit(GPIOD_FLAG_PULL_DOWN, &flags))
bias = PIN_CONFIG_BIAS_PULL_DOWN;
else
return 0;
@@ -2615,9 +2772,48 @@ static int gpio_set_bias(struct gpio_desc *desc)
*/
int gpio_set_debounce_timeout(struct gpio_desc *desc, unsigned int debounce)
{
- return gpio_set_config_with_argument_optional(desc,
- PIN_CONFIG_INPUT_DEBOUNCE,
- debounce);
+ int ret;
+
+ ret = gpio_set_config_with_argument_optional(desc,
+ PIN_CONFIG_INPUT_DEBOUNCE,
+ debounce);
+ if (!ret)
+ gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_CONFIG);
+
+ return ret;
+}
+
+static int gpiochip_direction_input(struct gpio_chip *gc, unsigned int offset)
+{
+ int ret;
+
+ lockdep_assert_held(&gc->gpiodev->srcu);
+
+ if (WARN_ON(!gc->direction_input))
+ return -EOPNOTSUPP;
+
+ ret = gc->direction_input(gc, offset);
+ if (ret > 0)
+ ret = -EBADE;
+
+ return ret;
+}
+
+static int gpiochip_direction_output(struct gpio_chip *gc, unsigned int offset,
+ int value)
+{
+ int ret;
+
+ lockdep_assert_held(&gc->gpiodev->srcu);
+
+ if (WARN_ON(!gc->direction_output))
+ return -EOPNOTSUPP;
+
+ ret = gc->direction_output(gc, offset, value);
+ if (ret > 0)
+ ret = -EBADE;
+
+ return ret;
}
/**
@@ -2632,10 +2828,22 @@ int gpio_set_debounce_timeout(struct gpio_desc *desc, unsigned int debounce)
*/
int gpiod_direction_input(struct gpio_desc *desc)
{
- int ret = 0;
+ int ret;
VALIDATE_DESC(desc);
+ ret = gpiod_direction_input_nonotify(desc);
+ if (ret == 0)
+ gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_CONFIG);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(gpiod_direction_input);
+
+int gpiod_direction_input_nonotify(struct gpio_desc *desc)
+{
+ int ret = 0, dir;
+
CLASS(gpio_chip_guard, guard)(desc);
if (!guard.gc)
return -ENODEV;
@@ -2659,18 +2867,22 @@ int gpiod_direction_input(struct gpio_desc *desc)
* assume we are in input mode after this.
*/
if (guard.gc->direction_input) {
- ret = guard.gc->direction_input(guard.gc,
- gpio_chip_hwgpio(desc));
- } else if (guard.gc->get_direction &&
- (guard.gc->get_direction(guard.gc,
- gpio_chip_hwgpio(desc)) != 1)) {
- gpiod_warn(desc,
- "%s: missing direction_input() operation and line is output\n",
- __func__);
- return -EIO;
+ ret = gpiochip_direction_input(guard.gc,
+ gpio_chip_hwgpio(desc));
+ } else if (guard.gc->get_direction) {
+ dir = gpiochip_get_direction(guard.gc, gpio_chip_hwgpio(desc));
+ if (dir < 0)
+ return dir;
+
+ if (dir != GPIO_LINE_DIRECTION_IN) {
+ gpiod_warn(desc,
+ "%s: missing direction_input() operation and line is output\n",
+ __func__);
+ return -EIO;
+ }
}
if (ret == 0) {
- clear_bit(FLAG_IS_OUT, &desc->flags);
+ clear_bit(GPIOD_FLAG_IS_OUT, &desc->flags);
ret = gpio_set_bias(desc);
}
@@ -2678,11 +2890,26 @@ int gpiod_direction_input(struct gpio_desc *desc)
return ret;
}
-EXPORT_SYMBOL_GPL(gpiod_direction_input);
+
+static int gpiochip_set(struct gpio_chip *gc, unsigned int offset, int value)
+{
+ int ret;
+
+ lockdep_assert_held(&gc->gpiodev->srcu);
+
+ if (WARN_ON(unlikely(!gc->set)))
+ return -EOPNOTSUPP;
+
+ ret = gc->set(gc, offset, value);
+ if (ret > 0)
+ ret = -EBADE;
+
+ return ret;
+}
static int gpiod_direction_output_raw_commit(struct gpio_desc *desc, int value)
{
- int val = !!value, ret = 0;
+ int val = !!value, ret = 0, dir;
CLASS(gpio_chip_guard, guard)(desc);
if (!guard.gc)
@@ -2701,26 +2928,34 @@ static int gpiod_direction_output_raw_commit(struct gpio_desc *desc, int value)
}
if (guard.gc->direction_output) {
- ret = guard.gc->direction_output(guard.gc,
- gpio_chip_hwgpio(desc), val);
+ ret = gpiochip_direction_output(guard.gc,
+ gpio_chip_hwgpio(desc), val);
} else {
/* Check that we are in output mode if we can */
- if (guard.gc->get_direction &&
- guard.gc->get_direction(guard.gc, gpio_chip_hwgpio(desc))) {
- gpiod_warn(desc,
- "%s: missing direction_output() operation\n",
- __func__);
- return -EIO;
+ if (guard.gc->get_direction) {
+ dir = gpiochip_get_direction(guard.gc,
+ gpio_chip_hwgpio(desc));
+ if (dir < 0)
+ return dir;
+
+ if (dir != GPIO_LINE_DIRECTION_OUT) {
+ gpiod_warn(desc,
+ "%s: missing direction_output() operation\n",
+ __func__);
+ return -EIO;
+ }
}
/*
* If we can't actively set the direction, we are some
* output-only chip, so just drive the output as desired.
*/
- guard.gc->set(guard.gc, gpio_chip_hwgpio(desc), val);
+ ret = gpiochip_set(guard.gc, gpio_chip_hwgpio(desc), val);
+ if (ret)
+ return ret;
}
if (!ret)
- set_bit(FLAG_IS_OUT, &desc->flags);
+ set_bit(GPIOD_FLAG_IS_OUT, &desc->flags);
trace_gpio_value(desc_to_gpio(desc), 0, val);
trace_gpio_direction(desc_to_gpio(desc), 0, ret);
return ret;
@@ -2740,8 +2975,15 @@ static int gpiod_direction_output_raw_commit(struct gpio_desc *desc, int value)
*/
int gpiod_direction_output_raw(struct gpio_desc *desc, int value)
{
+ int ret;
+
VALIDATE_DESC(desc);
- return gpiod_direction_output_raw_commit(desc, value);
+
+ ret = gpiod_direction_output_raw_commit(desc, value);
+ if (ret == 0)
+ gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_CONFIG);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(gpiod_direction_output_raw);
@@ -2760,46 +3002,54 @@ EXPORT_SYMBOL_GPL(gpiod_direction_output_raw);
*/
int gpiod_direction_output(struct gpio_desc *desc, int value)
{
- unsigned long flags;
int ret;
VALIDATE_DESC(desc);
+ ret = gpiod_direction_output_nonotify(desc, value);
+ if (ret == 0)
+ gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_CONFIG);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(gpiod_direction_output);
+
+int gpiod_direction_output_nonotify(struct gpio_desc *desc, int value)
+{
+ unsigned long flags;
+ int ret;
+
flags = READ_ONCE(desc->flags);
- if (test_bit(FLAG_ACTIVE_LOW, &flags))
+ if (test_bit(GPIOD_FLAG_ACTIVE_LOW, &flags))
value = !value;
else
value = !!value;
/* GPIOs used for enabled IRQs shall not be set as output */
- if (test_bit(FLAG_USED_AS_IRQ, &flags) &&
- test_bit(FLAG_IRQ_IS_ENABLED, &flags)) {
+ if (test_bit(GPIOD_FLAG_USED_AS_IRQ, &flags) &&
+ test_bit(GPIOD_FLAG_IRQ_IS_ENABLED, &flags)) {
gpiod_err(desc,
"%s: tried to set a GPIO tied to an IRQ as output\n",
__func__);
return -EIO;
}
- if (test_bit(FLAG_OPEN_DRAIN, &flags)) {
+ if (test_bit(GPIOD_FLAG_OPEN_DRAIN, &flags)) {
/* First see if we can enable open drain in hardware */
ret = gpio_set_config(desc, PIN_CONFIG_DRIVE_OPEN_DRAIN);
if (!ret)
goto set_output_value;
/* Emulate open drain by not actively driving the line high */
- if (value) {
- ret = gpiod_direction_input(desc);
+ if (value)
goto set_output_flag;
- }
- } else if (test_bit(FLAG_OPEN_SOURCE, &flags)) {
+ } else if (test_bit(GPIOD_FLAG_OPEN_SOURCE, &flags)) {
ret = gpio_set_config(desc, PIN_CONFIG_DRIVE_OPEN_SOURCE);
if (!ret)
goto set_output_value;
/* Emulate open source by not actively driving the line low */
- if (!value) {
- ret = gpiod_direction_input(desc);
+ if (!value)
goto set_output_flag;
- }
} else {
gpio_set_config(desc, PIN_CONFIG_DRIVE_PUSH_PULL);
}
@@ -2811,18 +3061,20 @@ set_output_value:
return gpiod_direction_output_raw_commit(desc, value);
set_output_flag:
+ ret = gpiod_direction_input_nonotify(desc);
+ if (ret)
+ return ret;
/*
* When emulating open-source or open-drain functionalities by not
* actively driving the line (setting mode to input) we still need to
* set the IS_OUT flag or otherwise we won't be able to set the line
* value anymore.
*/
- if (ret == 0)
- set_bit(FLAG_IS_OUT, &desc->flags);
- return ret;
+ set_bit(GPIOD_FLAG_IS_OUT, &desc->flags);
+ return 0;
}
-EXPORT_SYMBOL_GPL(gpiod_direction_output);
+#if IS_ENABLED(CONFIG_HTE)
/**
* gpiod_enable_hw_timestamp_ns - Enable hardware timestamp in nanoseconds.
*
@@ -2834,7 +3086,7 @@ EXPORT_SYMBOL_GPL(gpiod_direction_output);
*/
int gpiod_enable_hw_timestamp_ns(struct gpio_desc *desc, unsigned long flags)
{
- int ret = 0;
+ int ret;
VALIDATE_DESC(desc);
@@ -2867,7 +3119,7 @@ EXPORT_SYMBOL_GPL(gpiod_enable_hw_timestamp_ns);
*/
int gpiod_disable_hw_timestamp_ns(struct gpio_desc *desc, unsigned long flags)
{
- int ret = 0;
+ int ret;
VALIDATE_DESC(desc);
@@ -2888,6 +3140,7 @@ int gpiod_disable_hw_timestamp_ns(struct gpio_desc *desc, unsigned long flags)
return ret;
}
EXPORT_SYMBOL_GPL(gpiod_disable_hw_timestamp_ns);
+#endif /* CONFIG_HTE */
/**
* gpiod_set_config - sets @config for a GPIO
@@ -2900,13 +3153,30 @@ EXPORT_SYMBOL_GPL(gpiod_disable_hw_timestamp_ns);
*/
int gpiod_set_config(struct gpio_desc *desc, unsigned long config)
{
+ int ret;
+
VALIDATE_DESC(desc);
- CLASS(gpio_chip_guard, guard)(desc);
- if (!guard.gc)
- return -ENODEV;
+ ret = gpio_do_set_config(desc, config);
+ if (!ret) {
+ /* These are the only options we notify the userspace about. */
+ switch (pinconf_to_config_param(config)) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ case PIN_CONFIG_BIAS_PULL_UP:
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ case PIN_CONFIG_DRIVE_OPEN_SOURCE:
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ case PIN_CONFIG_INPUT_DEBOUNCE:
+ gpiod_line_state_notify(desc,
+ GPIO_V2_LINE_CHANGED_CONFIG);
+ break;
+ default:
+ break;
+ }
+ }
- return gpio_do_set_config(guard.gc, gpio_chip_hwgpio(desc), config);
+ return ret;
}
EXPORT_SYMBOL_GPL(gpiod_set_config);
@@ -2940,10 +3210,10 @@ int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
{
VALIDATE_DESC(desc);
/*
- * Handle FLAG_TRANSITORY first, enabling queries to gpiolib for
+ * Handle GPIOD_FLAG_TRANSITORY first, enabling queries to gpiolib for
* persistence state.
*/
- assign_bit(FLAG_TRANSITORY, &desc->flags, transitory);
+ assign_bit(GPIOD_FLAG_TRANSITORY, &desc->flags, transitory);
/* If the driver supports it, set the persistence state now */
return gpio_set_config_with_argument_optional(desc,
@@ -2961,7 +3231,7 @@ int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
int gpiod_is_active_low(const struct gpio_desc *desc)
{
VALIDATE_DESC(desc);
- return test_bit(FLAG_ACTIVE_LOW, &desc->flags);
+ return test_bit(GPIOD_FLAG_ACTIVE_LOW, &desc->flags);
}
EXPORT_SYMBOL_GPL(gpiod_is_active_low);
@@ -2972,13 +3242,28 @@ EXPORT_SYMBOL_GPL(gpiod_is_active_low);
void gpiod_toggle_active_low(struct gpio_desc *desc)
{
VALIDATE_DESC_VOID(desc);
- change_bit(FLAG_ACTIVE_LOW, &desc->flags);
+ change_bit(GPIOD_FLAG_ACTIVE_LOW, &desc->flags);
+ gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_CONFIG);
}
EXPORT_SYMBOL_GPL(gpiod_toggle_active_low);
+static int gpiochip_get(struct gpio_chip *gc, unsigned int offset)
+{
+ int ret;
+
+ lockdep_assert_held(&gc->gpiodev->srcu);
+
+ /* Make sure this is called after checking for gc->get(). */
+ ret = gc->get(gc, offset);
+ if (ret > 1)
+ ret = -EBADE;
+
+ return ret;
+}
+
static int gpio_chip_get_value(struct gpio_chip *gc, const struct gpio_desc *desc)
{
- return gc->get ? gc->get(gc, gpio_chip_hwgpio(desc)) : -EIO;
+ return gc->get ? gpiochip_get(gc, gpio_chip_hwgpio(desc)) : -EIO;
}
/* I/O calls are only valid after configuration completed; the relevant
@@ -3027,13 +3312,22 @@ static int gpiod_get_raw_value_commit(const struct gpio_desc *desc)
static int gpio_chip_get_multiple(struct gpio_chip *gc,
unsigned long *mask, unsigned long *bits)
{
- if (gc->get_multiple)
- return gc->get_multiple(gc, mask, bits);
+ lockdep_assert_held(&gc->gpiodev->srcu);
+
+ if (gc->get_multiple) {
+ int ret;
+
+ ret = gc->get_multiple(gc, mask, bits);
+ if (ret > 0)
+ return -EBADE;
+ return ret;
+ }
+
if (gc->get) {
int i, value;
for_each_set_bit(i, mask, gc->ngpio) {
- value = gc->get(gc, i);
+ value = gpiochip_get(gc, i);
if (value < 0)
return value;
__assign_bit(i, bits, value);
@@ -3057,6 +3351,7 @@ int gpiod_get_array_value_complex(bool raw, bool can_sleep,
struct gpio_array *array_info,
unsigned long *value_bitmap)
{
+ struct gpio_chip *gc;
int ret, i = 0;
/*
@@ -3068,10 +3363,15 @@ int gpiod_get_array_value_complex(bool raw, bool can_sleep,
array_size <= array_info->size &&
(void *)array_info == desc_array + array_info->size) {
if (!can_sleep)
- WARN_ON(array_info->chip->can_sleep);
+ WARN_ON(array_info->gdev->can_sleep);
+
+ guard(srcu)(&array_info->gdev->srcu);
+ gc = srcu_dereference(array_info->gdev->chip,
+ &array_info->gdev->srcu);
+ if (!gc)
+ return -ENODEV;
- ret = gpio_chip_get_multiple(array_info->chip,
- array_info->get_mask,
+ ret = gpio_chip_get_multiple(gc, array_info->get_mask,
value_bitmap);
if (ret)
return ret;
@@ -3148,7 +3448,7 @@ int gpiod_get_array_value_complex(bool raw, bool can_sleep,
int hwgpio = gpio_chip_hwgpio(desc);
int value = test_bit(hwgpio, bits);
- if (!raw && test_bit(FLAG_ACTIVE_LOW, &desc->flags))
+ if (!raw && test_bit(GPIOD_FLAG_ACTIVE_LOW, &desc->flags))
value = !value;
__assign_bit(j, value_bitmap, value);
trace_gpio_value(desc_to_gpio(desc), 1, value);
@@ -3210,7 +3510,7 @@ int gpiod_get_value(const struct gpio_desc *desc)
if (value < 0)
return value;
- if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
+ if (test_bit(GPIOD_FLAG_ACTIVE_LOW, &desc->flags))
value = !value;
return value;
@@ -3280,26 +3580,28 @@ EXPORT_SYMBOL_GPL(gpiod_get_array_value);
* @desc: gpio descriptor whose state need to be set.
* @value: Non-zero for setting it HIGH otherwise it will set to LOW.
*/
-static void gpio_set_open_drain_value_commit(struct gpio_desc *desc, bool value)
+static int gpio_set_open_drain_value_commit(struct gpio_desc *desc, bool value)
{
int ret = 0, offset = gpio_chip_hwgpio(desc);
CLASS(gpio_chip_guard, guard)(desc);
if (!guard.gc)
- return;
+ return -ENODEV;
if (value) {
- ret = guard.gc->direction_input(guard.gc, offset);
+ ret = gpiochip_direction_input(guard.gc, offset);
} else {
- ret = guard.gc->direction_output(guard.gc, offset, 0);
+ ret = gpiochip_direction_output(guard.gc, offset, 0);
if (!ret)
- set_bit(FLAG_IS_OUT, &desc->flags);
+ set_bit(GPIOD_FLAG_IS_OUT, &desc->flags);
}
trace_gpio_direction(desc_to_gpio(desc), value, ret);
if (ret < 0)
gpiod_err(desc,
"%s: Error in set_value for open drain err %d\n",
__func__, ret);
+
+ return ret;
}
/*
@@ -3307,36 +3609,41 @@ static void gpio_set_open_drain_value_commit(struct gpio_desc *desc, bool value)
* @desc: gpio descriptor whose state need to be set.
* @value: Non-zero for setting it HIGH otherwise it will set to LOW.
*/
-static void gpio_set_open_source_value_commit(struct gpio_desc *desc, bool value)
+static int gpio_set_open_source_value_commit(struct gpio_desc *desc, bool value)
{
int ret = 0, offset = gpio_chip_hwgpio(desc);
CLASS(gpio_chip_guard, guard)(desc);
if (!guard.gc)
- return;
+ return -ENODEV;
if (value) {
- ret = guard.gc->direction_output(guard.gc, offset, 1);
+ ret = gpiochip_direction_output(guard.gc, offset, 1);
if (!ret)
- set_bit(FLAG_IS_OUT, &desc->flags);
+ set_bit(GPIOD_FLAG_IS_OUT, &desc->flags);
} else {
- ret = guard.gc->direction_input(guard.gc, offset);
+ ret = gpiochip_direction_input(guard.gc, offset);
}
trace_gpio_direction(desc_to_gpio(desc), !value, ret);
if (ret < 0)
gpiod_err(desc,
"%s: Error in set_value for open source err %d\n",
__func__, ret);
+
+ return ret;
}
-static void gpiod_set_raw_value_commit(struct gpio_desc *desc, bool value)
+static int gpiod_set_raw_value_commit(struct gpio_desc *desc, bool value)
{
+ if (unlikely(!test_bit(GPIOD_FLAG_IS_OUT, &desc->flags)))
+ return -EPERM;
+
CLASS(gpio_chip_guard, guard)(desc);
if (!guard.gc)
- return;
+ return -ENODEV;
trace_gpio_value(desc_to_gpio(desc), 0, value);
- guard.gc->set(guard.gc, gpio_chip_hwgpio(desc), value);
+ return gpiochip_set(guard.gc, gpio_chip_hwgpio(desc), value);
}
/*
@@ -3348,19 +3655,33 @@ static void gpiod_set_raw_value_commit(struct gpio_desc *desc, bool value)
* defines which outputs are to be changed
* @bits: bit value array; one bit per output; BITS_PER_LONG bits per word
* defines the values the outputs specified by mask are to be set to
+ *
+ * Returns: 0 on success, negative error number on failure.
*/
-static void gpio_chip_set_multiple(struct gpio_chip *gc,
- unsigned long *mask, unsigned long *bits)
+static int gpiochip_set_multiple(struct gpio_chip *gc,
+ unsigned long *mask, unsigned long *bits)
{
+ unsigned int i;
+ int ret;
+
+ lockdep_assert_held(&gc->gpiodev->srcu);
+
if (gc->set_multiple) {
- gc->set_multiple(gc, mask, bits);
- } else {
- unsigned int i;
+ ret = gc->set_multiple(gc, mask, bits);
+ if (ret > 0)
+ ret = -EBADE;
+
+ return ret;
+ }
- /* set outputs if the corresponding mask bit is set */
- for_each_set_bit(i, mask, gc->ngpio)
- gc->set(gc, i, test_bit(i, bits));
+ /* set outputs if the corresponding mask bit is set */
+ for_each_set_bit(i, mask, gc->ngpio) {
+ ret = gpiochip_set(gc, i, test_bit(i, bits));
+ if (ret)
+ break;
}
+
+ return ret;
}
int gpiod_set_array_value_complex(bool raw, bool can_sleep,
@@ -3369,7 +3690,8 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep,
struct gpio_array *array_info,
unsigned long *value_bitmap)
{
- int i = 0;
+ struct gpio_chip *gc;
+ int i = 0, ret;
/*
* Validate array_info against desc_array and its size.
@@ -3380,14 +3702,28 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep,
array_size <= array_info->size &&
(void *)array_info == desc_array + array_info->size) {
if (!can_sleep)
- WARN_ON(array_info->chip->can_sleep);
+ WARN_ON(array_info->gdev->can_sleep);
+
+ for (i = 0; i < array_size; i++) {
+ if (unlikely(!test_bit(GPIOD_FLAG_IS_OUT,
+ &desc_array[i]->flags)))
+ return -EPERM;
+ }
+
+ guard(srcu)(&array_info->gdev->srcu);
+ gc = srcu_dereference(array_info->gdev->chip,
+ &array_info->gdev->srcu);
+ if (!gc)
+ return -ENODEV;
if (!raw && !bitmap_empty(array_info->invert_mask, array_size))
bitmap_xor(value_bitmap, value_bitmap,
array_info->invert_mask, array_size);
- gpio_chip_set_multiple(array_info->chip, array_info->set_mask,
- value_bitmap);
+ ret = gpiochip_set_multiple(gc, array_info->set_mask,
+ value_bitmap);
+ if (ret)
+ return ret;
i = find_first_zero_bit(array_info->set_mask, array_size);
if (i == array_size)
@@ -3433,6 +3769,9 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep,
int hwgpio = gpio_chip_hwgpio(desc);
int value = test_bit(i, value_bitmap);
+ if (unlikely(!test_bit(GPIOD_FLAG_IS_OUT, &desc->flags)))
+ return -EPERM;
+
/*
* Pins applicable for fast input but not for
* fast output processing may have been already
@@ -3440,16 +3779,16 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep,
*/
if (!raw && !(array_info &&
test_bit(i, array_info->invert_mask)) &&
- test_bit(FLAG_ACTIVE_LOW, &desc->flags))
+ test_bit(GPIOD_FLAG_ACTIVE_LOW, &desc->flags))
value = !value;
trace_gpio_value(desc_to_gpio(desc), 0, value);
/*
* collect all normal outputs belonging to the same chip
* open drain and open source outputs are set individually
*/
- if (test_bit(FLAG_OPEN_DRAIN, &desc->flags) && !raw) {
+ if (test_bit(GPIOD_FLAG_OPEN_DRAIN, &desc->flags) && !raw) {
gpio_set_open_drain_value_commit(desc, value);
- } else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags) && !raw) {
+ } else if (test_bit(GPIOD_FLAG_OPEN_SOURCE, &desc->flags) && !raw) {
gpio_set_open_source_value_commit(desc, value);
} else {
__set_bit(hwgpio, mask);
@@ -3464,8 +3803,11 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep,
} while ((i < array_size) &&
gpio_device_chip_cmp(desc_array[i]->gdev, guard.gc));
/* push collected bits to outputs */
- if (count != 0)
- gpio_chip_set_multiple(guard.gc, mask, bits);
+ if (count != 0) {
+ ret = gpiochip_set_multiple(guard.gc, mask, bits);
+ if (ret)
+ return ret;
+ }
if (mask != fastpath_mask)
bitmap_free(mask);
@@ -3485,13 +3827,16 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep,
*
* This function can be called from contexts where we cannot sleep, and will
* complain if the GPIO chip functions potentially sleep.
+ *
+ * Returns:
+ * 0 on success, negative error number on failure.
*/
-void gpiod_set_raw_value(struct gpio_desc *desc, int value)
+int gpiod_set_raw_value(struct gpio_desc *desc, int value)
{
- VALIDATE_DESC_VOID(desc);
+ VALIDATE_DESC(desc);
/* Should be using gpiod_set_raw_value_cansleep() */
WARN_ON(desc->gdev->can_sleep);
- gpiod_set_raw_value_commit(desc, value);
+ return gpiod_set_raw_value_commit(desc, value);
}
EXPORT_SYMBOL_GPL(gpiod_set_raw_value);
@@ -3503,17 +3848,21 @@ EXPORT_SYMBOL_GPL(gpiod_set_raw_value);
* This sets the value of a GPIO line backing a descriptor, applying
* different semantic quirks like active low and open drain/source
* handling.
+ *
+ * Returns:
+ * 0 on success, negative error number on failure.
*/
-static void gpiod_set_value_nocheck(struct gpio_desc *desc, int value)
+static int gpiod_set_value_nocheck(struct gpio_desc *desc, int value)
{
- if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
+ if (test_bit(GPIOD_FLAG_ACTIVE_LOW, &desc->flags))
value = !value;
- if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
- gpio_set_open_drain_value_commit(desc, value);
- else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
- gpio_set_open_source_value_commit(desc, value);
- else
- gpiod_set_raw_value_commit(desc, value);
+
+ if (test_bit(GPIOD_FLAG_OPEN_DRAIN, &desc->flags))
+ return gpio_set_open_drain_value_commit(desc, value);
+ else if (test_bit(GPIOD_FLAG_OPEN_SOURCE, &desc->flags))
+ return gpio_set_open_source_value_commit(desc, value);
+
+ return gpiod_set_raw_value_commit(desc, value);
}
/**
@@ -3526,13 +3875,16 @@ static void gpiod_set_value_nocheck(struct gpio_desc *desc, int value)
*
* This function can be called from contexts where we cannot sleep, and will
* complain if the GPIO chip functions potentially sleep.
+ *
+ * Returns:
+ * 0 on success, negative error number on failure.
*/
-void gpiod_set_value(struct gpio_desc *desc, int value)
+int gpiod_set_value(struct gpio_desc *desc, int value)
{
- VALIDATE_DESC_VOID(desc);
+ VALIDATE_DESC(desc);
/* Should be using gpiod_set_value_cansleep() */
WARN_ON(desc->gdev->can_sleep);
- gpiod_set_value_nocheck(desc, value);
+ return gpiod_set_value_nocheck(desc, value);
}
EXPORT_SYMBOL_GPL(gpiod_set_value);
@@ -3617,9 +3969,15 @@ EXPORT_SYMBOL_GPL(gpiod_cansleep);
*/
int gpiod_set_consumer_name(struct gpio_desc *desc, const char *name)
{
+ int ret;
+
VALIDATE_DESC(desc);
- return desc_set_label(desc, name);
+ ret = desc_set_label(desc, name);
+ if (ret == 0)
+ gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_CONFIG);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(gpiod_set_consumer_name);
@@ -3635,13 +3993,10 @@ int gpiod_to_irq(const struct gpio_desc *desc)
struct gpio_device *gdev;
struct gpio_chip *gc;
int offset;
+ int ret;
- /*
- * Cannot VALIDATE_DESC() here as gpiod_to_irq() consumer semantics
- * requires this function to not return zero on an invalid descriptor
- * but rather a negative error number.
- */
- if (IS_ERR_OR_NULL(desc))
+ ret = validate_desc(desc, __func__);
+ if (ret <= 0)
return -EINVAL;
gdev = desc->gdev;
@@ -3653,13 +4008,12 @@ int gpiod_to_irq(const struct gpio_desc *desc)
offset = gpio_chip_hwgpio(desc);
if (gc->to_irq) {
- int retirq = gc->to_irq(gc, offset);
+ ret = gc->to_irq(gc, offset);
+ if (ret)
+ return ret;
/* Zero means NO_IRQ */
- if (!retirq)
- return -ENXIO;
-
- return retirq;
+ return -ENXIO;
}
#ifdef CONFIG_GPIOLIB_IRQCHIP
if (gc->irq.chip) {
@@ -3709,16 +4063,16 @@ int gpiochip_lock_as_irq(struct gpio_chip *gc, unsigned int offset)
}
/* To be valid for IRQ the line needs to be input or open drain */
- if (test_bit(FLAG_IS_OUT, &desc->flags) &&
- !test_bit(FLAG_OPEN_DRAIN, &desc->flags)) {
+ if (test_bit(GPIOD_FLAG_IS_OUT, &desc->flags) &&
+ !test_bit(GPIOD_FLAG_OPEN_DRAIN, &desc->flags)) {
chip_err(gc,
"%s: tried to flag a GPIO set as output for IRQ\n",
__func__);
return -EIO;
}
- set_bit(FLAG_USED_AS_IRQ, &desc->flags);
- set_bit(FLAG_IRQ_IS_ENABLED, &desc->flags);
+ set_bit(GPIOD_FLAG_USED_AS_IRQ, &desc->flags);
+ set_bit(GPIOD_FLAG_IRQ_IS_ENABLED, &desc->flags);
return 0;
}
@@ -3740,8 +4094,8 @@ void gpiochip_unlock_as_irq(struct gpio_chip *gc, unsigned int offset)
if (IS_ERR(desc))
return;
- clear_bit(FLAG_USED_AS_IRQ, &desc->flags);
- clear_bit(FLAG_IRQ_IS_ENABLED, &desc->flags);
+ clear_bit(GPIOD_FLAG_USED_AS_IRQ, &desc->flags);
+ clear_bit(GPIOD_FLAG_IRQ_IS_ENABLED, &desc->flags);
}
EXPORT_SYMBOL_GPL(gpiochip_unlock_as_irq);
@@ -3750,8 +4104,8 @@ void gpiochip_disable_irq(struct gpio_chip *gc, unsigned int offset)
struct gpio_desc *desc = gpiochip_get_desc(gc, offset);
if (!IS_ERR(desc) &&
- !WARN_ON(!test_bit(FLAG_USED_AS_IRQ, &desc->flags)))
- clear_bit(FLAG_IRQ_IS_ENABLED, &desc->flags);
+ !WARN_ON(!test_bit(GPIOD_FLAG_USED_AS_IRQ, &desc->flags)))
+ clear_bit(GPIOD_FLAG_IRQ_IS_ENABLED, &desc->flags);
}
EXPORT_SYMBOL_GPL(gpiochip_disable_irq);
@@ -3760,14 +4114,14 @@ void gpiochip_enable_irq(struct gpio_chip *gc, unsigned int offset)
struct gpio_desc *desc = gpiochip_get_desc(gc, offset);
if (!IS_ERR(desc) &&
- !WARN_ON(!test_bit(FLAG_USED_AS_IRQ, &desc->flags))) {
+ !WARN_ON(!test_bit(GPIOD_FLAG_USED_AS_IRQ, &desc->flags))) {
/*
* We must not be output when using IRQ UNLESS we are
* open drain.
*/
- WARN_ON(test_bit(FLAG_IS_OUT, &desc->flags) &&
- !test_bit(FLAG_OPEN_DRAIN, &desc->flags));
- set_bit(FLAG_IRQ_IS_ENABLED, &desc->flags);
+ WARN_ON(test_bit(GPIOD_FLAG_IS_OUT, &desc->flags) &&
+ !test_bit(GPIOD_FLAG_OPEN_DRAIN, &desc->flags));
+ set_bit(GPIOD_FLAG_IRQ_IS_ENABLED, &desc->flags);
}
}
EXPORT_SYMBOL_GPL(gpiochip_enable_irq);
@@ -3777,7 +4131,7 @@ bool gpiochip_line_is_irq(struct gpio_chip *gc, unsigned int offset)
if (offset >= gc->ngpio)
return false;
- return test_bit(FLAG_USED_AS_IRQ, &gc->gpiodev->descs[offset].flags);
+ return test_bit(GPIOD_FLAG_USED_AS_IRQ, &gc->gpiodev->descs[offset].flags);
}
EXPORT_SYMBOL_GPL(gpiochip_line_is_irq);
@@ -3810,7 +4164,7 @@ bool gpiochip_line_is_open_drain(struct gpio_chip *gc, unsigned int offset)
if (offset >= gc->ngpio)
return false;
- return test_bit(FLAG_OPEN_DRAIN, &gc->gpiodev->descs[offset].flags);
+ return test_bit(GPIOD_FLAG_OPEN_DRAIN, &gc->gpiodev->descs[offset].flags);
}
EXPORT_SYMBOL_GPL(gpiochip_line_is_open_drain);
@@ -3819,7 +4173,7 @@ bool gpiochip_line_is_open_source(struct gpio_chip *gc, unsigned int offset)
if (offset >= gc->ngpio)
return false;
- return test_bit(FLAG_OPEN_SOURCE, &gc->gpiodev->descs[offset].flags);
+ return test_bit(GPIOD_FLAG_OPEN_SOURCE, &gc->gpiodev->descs[offset].flags);
}
EXPORT_SYMBOL_GPL(gpiochip_line_is_open_source);
@@ -3828,7 +4182,7 @@ bool gpiochip_line_is_persistent(struct gpio_chip *gc, unsigned int offset)
if (offset >= gc->ngpio)
return false;
- return !test_bit(FLAG_TRANSITORY, &gc->gpiodev->descs[offset].flags);
+ return !test_bit(GPIOD_FLAG_TRANSITORY, &gc->gpiodev->descs[offset].flags);
}
EXPORT_SYMBOL_GPL(gpiochip_line_is_persistent);
@@ -3870,7 +4224,7 @@ int gpiod_get_value_cansleep(const struct gpio_desc *desc)
if (value < 0)
return value;
- if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
+ if (test_bit(GPIOD_FLAG_ACTIVE_LOW, &desc->flags))
value = !value;
return value;
@@ -3944,12 +4298,15 @@ EXPORT_SYMBOL_GPL(gpiod_get_array_value_cansleep);
* regard for its ACTIVE_LOW status.
*
* This function is to be called from contexts that can sleep.
+ *
+ * Returns:
+ * 0 on success, negative error number on failure.
*/
-void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value)
+int gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value)
{
might_sleep();
- VALIDATE_DESC_VOID(desc);
- gpiod_set_raw_value_commit(desc, value);
+ VALIDATE_DESC(desc);
+ return gpiod_set_raw_value_commit(desc, value);
}
EXPORT_SYMBOL_GPL(gpiod_set_raw_value_cansleep);
@@ -3962,12 +4319,15 @@ EXPORT_SYMBOL_GPL(gpiod_set_raw_value_cansleep);
* account
*
* This function is to be called from contexts that can sleep.
+ *
+ * Returns:
+ * 0 on success, negative error number on failure.
*/
-void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
+int gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
{
might_sleep();
- VALIDATE_DESC_VOID(desc);
- gpiod_set_value_nocheck(desc, value);
+ VALIDATE_DESC(desc);
+ return gpiod_set_value_nocheck(desc, value);
}
EXPORT_SYMBOL_GPL(gpiod_set_value_cansleep);
@@ -4008,12 +4368,10 @@ void gpiod_add_lookup_tables(struct gpiod_lookup_table **tables, size_t n)
{
unsigned int i;
- mutex_lock(&gpio_lookup_lock);
+ guard(mutex)(&gpio_lookup_lock);
for (i = 0; i < n; i++)
list_add_tail(&tables[i]->list, &gpio_lookup_list);
-
- mutex_unlock(&gpio_lookup_lock);
}
/**
@@ -4047,8 +4405,9 @@ EXPORT_SYMBOL_GPL(gpiod_set_array_value_cansleep);
void gpiod_line_state_notify(struct gpio_desc *desc, unsigned long action)
{
- blocking_notifier_call_chain(&desc->gdev->line_state_notifier,
- action, desc);
+ guard(read_lock_irqsave)(&desc->gdev->line_state_lock);
+
+ raw_notifier_call_chain(&desc->gdev->line_state_notifier, action, desc);
}
/**
@@ -4071,11 +4430,9 @@ void gpiod_remove_lookup_table(struct gpiod_lookup_table *table)
if (!table)
return;
- mutex_lock(&gpio_lookup_lock);
+ guard(mutex)(&gpio_lookup_lock);
list_del(&table->list);
-
- mutex_unlock(&gpio_lookup_lock);
}
EXPORT_SYMBOL_GPL(gpiod_remove_lookup_table);
@@ -4087,7 +4444,7 @@ void gpiod_add_hogs(struct gpiod_hog *hogs)
{
struct gpiod_hog *hog;
- mutex_lock(&gpio_machine_hogs_mutex);
+ guard(mutex)(&gpio_machine_hogs_mutex);
for (hog = &hogs[0]; hog->chip_label; hog++) {
list_add_tail(&hog->list, &gpio_machine_hogs);
@@ -4101,8 +4458,6 @@ void gpiod_add_hogs(struct gpiod_hog *hogs)
if (gdev)
gpiochip_machine_hog(gpio_device_get_chip(gdev), hog);
}
-
- mutex_unlock(&gpio_machine_hogs_mutex);
}
EXPORT_SYMBOL_GPL(gpiod_add_hogs);
@@ -4110,10 +4465,10 @@ void gpiod_remove_hogs(struct gpiod_hog *hogs)
{
struct gpiod_hog *hog;
- mutex_lock(&gpio_machine_hogs_mutex);
+ guard(mutex)(&gpio_machine_hogs_mutex);
+
for (hog = &hogs[0]; hog->chip_label; hog++)
list_del(&hog->list);
- mutex_unlock(&gpio_machine_hogs_mutex);
}
EXPORT_SYMBOL_GPL(gpiod_remove_hogs);
@@ -4260,6 +4615,23 @@ static struct gpio_desc *gpiod_find_by_fwnode(struct fwnode_handle *fwnode,
return desc;
}
+static struct gpio_desc *gpiod_fwnode_lookup(struct fwnode_handle *fwnode,
+ struct device *consumer,
+ const char *con_id,
+ unsigned int idx,
+ enum gpiod_flags *flags,
+ unsigned long *lookupflags)
+{
+ struct gpio_desc *desc;
+
+ desc = gpiod_find_by_fwnode(fwnode, consumer, con_id, idx, flags, lookupflags);
+ if (gpiod_not_found(desc) && !IS_ERR_OR_NULL(fwnode))
+ desc = gpiod_find_by_fwnode(fwnode->secondary, consumer, con_id,
+ idx, flags, lookupflags);
+
+ return desc;
+}
+
struct gpio_desc *gpiod_find_and_request(struct device *consumer,
struct fwnode_handle *fwnode,
const char *con_id,
@@ -4278,8 +4650,8 @@ struct gpio_desc *gpiod_find_and_request(struct device *consumer,
int ret = 0;
scoped_guard(srcu, &gpio_devices_srcu) {
- desc = gpiod_find_by_fwnode(fwnode, consumer, con_id, idx,
- &flags, &lookupflags);
+ desc = gpiod_fwnode_lookup(fwnode, consumer, con_id, idx,
+ &flags, &lookupflags);
if (gpiod_not_found(desc) && platform_lookup_allowed) {
/*
* Either we are not using DT or ACPI, or their lookup
@@ -4325,7 +4697,7 @@ struct gpio_desc *gpiod_find_and_request(struct device *consumer,
return ERR_PTR(ret);
}
- gpiod_line_state_notify(desc, GPIOLINE_CHANGED_REQUESTED);
+ gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED);
return desc;
}
@@ -4451,10 +4823,10 @@ int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
int ret;
if (lflags & GPIO_ACTIVE_LOW)
- set_bit(FLAG_ACTIVE_LOW, &desc->flags);
+ set_bit(GPIOD_FLAG_ACTIVE_LOW, &desc->flags);
if (lflags & GPIO_OPEN_DRAIN)
- set_bit(FLAG_OPEN_DRAIN, &desc->flags);
+ set_bit(GPIOD_FLAG_OPEN_DRAIN, &desc->flags);
else if (dflags & GPIOD_FLAGS_BIT_OPEN_DRAIN) {
/*
* This enforces open drain mode from the consumer side.
@@ -4462,13 +4834,13 @@ int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
* should *REALLY* have specified them as open drain in the
* first place, so print a little warning here.
*/
- set_bit(FLAG_OPEN_DRAIN, &desc->flags);
+ set_bit(GPIOD_FLAG_OPEN_DRAIN, &desc->flags);
gpiod_warn(desc,
"enforced open drain please flag it properly in DT/ACPI DSDT/board file\n");
}
if (lflags & GPIO_OPEN_SOURCE)
- set_bit(FLAG_OPEN_SOURCE, &desc->flags);
+ set_bit(GPIOD_FLAG_OPEN_SOURCE, &desc->flags);
if (((lflags & GPIO_PULL_UP) && (lflags & GPIO_PULL_DOWN)) ||
((lflags & GPIO_PULL_UP) && (lflags & GPIO_PULL_DISABLE)) ||
@@ -4479,11 +4851,11 @@ int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
}
if (lflags & GPIO_PULL_UP)
- set_bit(FLAG_PULL_UP, &desc->flags);
+ set_bit(GPIOD_FLAG_PULL_UP, &desc->flags);
else if (lflags & GPIO_PULL_DOWN)
- set_bit(FLAG_PULL_DOWN, &desc->flags);
+ set_bit(GPIOD_FLAG_PULL_DOWN, &desc->flags);
else if (lflags & GPIO_PULL_DISABLE)
- set_bit(FLAG_BIAS_DISABLE, &desc->flags);
+ set_bit(GPIOD_FLAG_BIAS_DISABLE, &desc->flags);
ret = gpiod_set_transitory(desc, (lflags & GPIO_TRANSITORY));
if (ret < 0)
@@ -4497,10 +4869,10 @@ int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
/* Process flags */
if (dflags & GPIOD_FLAGS_BIT_DIR_OUT)
- ret = gpiod_direction_output(desc,
+ ret = gpiod_direction_output_nonotify(desc,
!!(dflags & GPIOD_FLAGS_BIT_DIR_VAL));
else
- ret = gpiod_direction_input(desc);
+ ret = gpiod_direction_input_nonotify(desc);
return ret;
}
@@ -4588,7 +4960,7 @@ int gpiod_hog(struct gpio_desc *desc, const char *name,
if (!guard.gc)
return -ENODEV;
- if (test_and_set_bit(FLAG_IS_HOGGED, &desc->flags))
+ if (test_and_set_bit(GPIOD_FLAG_IS_HOGGED, &desc->flags))
return 0;
hwnum = gpio_chip_hwgpio(desc);
@@ -4596,17 +4968,17 @@ int gpiod_hog(struct gpio_desc *desc, const char *name,
local_desc = gpiochip_request_own_desc(guard.gc, hwnum, name,
lflags, dflags);
if (IS_ERR(local_desc)) {
- clear_bit(FLAG_IS_HOGGED, &desc->flags);
+ clear_bit(GPIOD_FLAG_IS_HOGGED, &desc->flags);
ret = PTR_ERR(local_desc);
pr_err("requesting hog GPIO %s (chip %s, offset %d) failed, %d\n",
name, gdev->label, hwnum, ret);
return ret;
}
- gpiod_dbg(desc, "hogged as %s%s\n",
+ gpiod_dbg(desc, "hogged as %s/%s\n",
(dflags & GPIOD_FLAGS_BIT_DIR_OUT) ? "output" : "input",
(dflags & GPIOD_FLAGS_BIT_DIR_OUT) ?
- (dflags & GPIOD_FLAGS_BIT_DIR_VAL) ? "/high" : "/low" : "");
+ str_high_low(dflags & GPIOD_FLAGS_BIT_DIR_VAL) : "?");
return 0;
}
@@ -4619,7 +4991,7 @@ static void gpiochip_free_hogs(struct gpio_chip *gc)
{
struct gpio_desc *desc;
- for_each_gpio_desc_with_flag(gc, desc, FLAG_IS_HOGGED)
+ for_each_gpio_desc_with_flag(gc, desc, GPIOD_FLAG_IS_HOGGED)
gpiochip_free_own_desc(desc);
}
@@ -4643,9 +5015,10 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
{
struct gpio_desc *desc;
struct gpio_descs *descs;
+ struct gpio_device *gdev;
struct gpio_array *array_info = NULL;
- struct gpio_chip *gc;
int count, bitmap_size;
+ unsigned long dflags;
size_t descs_size;
count = gpiod_count(dev, con_id);
@@ -4666,7 +5039,7 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
descs->desc[descs->ndescs] = desc;
- gc = gpiod_to_chip(desc);
+ gdev = gpiod_to_gpio_device(desc);
/*
* If pin hardware number of array member 0 is also 0, select
* its chip as a candidate for fast bitmap processing path.
@@ -4674,8 +5047,8 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
if (descs->ndescs == 0 && gpio_chip_hwgpio(desc) == 0) {
struct gpio_descs *array;
- bitmap_size = BITS_TO_LONGS(gc->ngpio > count ?
- gc->ngpio : count);
+ bitmap_size = BITS_TO_LONGS(gdev->ngpio > count ?
+ gdev->ngpio : count);
array = krealloc(descs, descs_size +
struct_size(array_info, invert_mask, 3 * bitmap_size),
@@ -4695,7 +5068,7 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
array_info->desc = descs->desc;
array_info->size = count;
- array_info->chip = gc;
+ array_info->gdev = gdev;
bitmap_set(array_info->get_mask, descs->ndescs,
count - descs->ndescs);
bitmap_set(array_info->set_mask, descs->ndescs,
@@ -4708,7 +5081,7 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
continue;
/* Unmark array members which don't belong to the 'fast' chip */
- if (array_info->chip != gc) {
+ if (array_info->gdev != gdev) {
__clear_bit(descs->ndescs, array_info->get_mask);
__clear_bit(descs->ndescs, array_info->set_mask);
}
@@ -4731,9 +5104,10 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
array_info->set_mask);
}
} else {
+ dflags = READ_ONCE(desc->flags);
/* Exclude open drain or open source from fast output */
- if (gpiochip_line_is_open_drain(gc, descs->ndescs) ||
- gpiochip_line_is_open_source(gc, descs->ndescs))
+ if (test_bit(GPIOD_FLAG_OPEN_DRAIN, &dflags) ||
+ test_bit(GPIOD_FLAG_OPEN_SOURCE, &dflags))
__clear_bit(descs->ndescs,
array_info->set_mask);
/* Identify 'fast' pins which require invertion */
@@ -4745,7 +5119,7 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
if (array_info)
dev_dbg(dev,
"GPIO array info: chip=%s, size=%d, get_mask=%lx, set_mask=%lx, invert_mask=%lx\n",
- array_info->chip->label, array_info->size,
+ array_info->gdev->label, array_info->size,
*array_info->get_mask, *array_info->set_mask,
*array_info->invert_mask);
return descs;
@@ -4790,8 +5164,7 @@ EXPORT_SYMBOL_GPL(gpiod_get_array_optional);
*/
void gpiod_put(struct gpio_desc *desc)
{
- if (desc)
- gpiod_free(desc);
+ gpiod_free(desc);
}
EXPORT_SYMBOL_GPL(gpiod_put);
@@ -4875,9 +5248,10 @@ core_initcall(gpiolib_dev_init);
static void gpiolib_dbg_show(struct seq_file *s, struct gpio_device *gdev)
{
bool active_low, is_irq, is_out;
- unsigned int gpio = gdev->base;
struct gpio_desc *desc;
+ unsigned int gpio = 0;
struct gpio_chip *gc;
+ unsigned long flags;
int value;
guard(srcu)(&gdev->srcu);
@@ -4890,16 +5264,17 @@ static void gpiolib_dbg_show(struct seq_file *s, struct gpio_device *gdev)
for_each_gpio_desc(gc, desc) {
guard(srcu)(&desc->gdev->desc_srcu);
- is_irq = test_bit(FLAG_USED_AS_IRQ, &desc->flags);
- if (is_irq || test_bit(FLAG_REQUESTED, &desc->flags)) {
+ flags = READ_ONCE(desc->flags);
+ is_irq = test_bit(GPIOD_FLAG_USED_AS_IRQ, &flags);
+ if (is_irq || test_bit(GPIOD_FLAG_REQUESTED, &flags)) {
gpiod_get_direction(desc);
- is_out = test_bit(FLAG_IS_OUT, &desc->flags);
+ is_out = test_bit(GPIOD_FLAG_IS_OUT, &flags);
value = gpio_chip_get_value(gc, desc);
- active_low = test_bit(FLAG_ACTIVE_LOW, &desc->flags);
+ active_low = test_bit(GPIOD_FLAG_ACTIVE_LOW, &flags);
seq_printf(s, " gpio-%-3u (%-20.20s|%-20.20s) %s %s %s%s\n",
gpio, desc->name ?: "", gpiod_get_label(desc),
is_out ? "out" : "in ",
- value >= 0 ? (value ? "hi" : "lo") : "? ",
+ value >= 0 ? str_hi_lo(value) : "? ",
is_irq ? "IRQ " : "",
active_low ? "ACTIVE LOW" : "");
} else if (desc->name) {
@@ -4926,6 +5301,8 @@ static void *gpiolib_seq_start(struct seq_file *s, loff_t *pos)
return NULL;
s->private = priv;
+ if (*pos > 0)
+ priv->newline = true;
priv->idx = srcu_read_lock(&gpio_devices_srcu);
list_for_each_entry_srcu(gdev, &gpio_devices, list,
@@ -4965,19 +5342,18 @@ static int gpiolib_seq_show(struct seq_file *s, void *v)
struct gpio_chip *gc;
struct device *parent;
+ if (priv->newline)
+ seq_putc(s, '\n');
+
guard(srcu)(&gdev->srcu);
gc = srcu_dereference(gdev->chip, &gdev->srcu);
if (!gc) {
- seq_printf(s, "%s%s: (dangling chip)",
- priv->newline ? "\n" : "",
- dev_name(&gdev->dev));
+ seq_printf(s, "%s: (dangling chip)\n", dev_name(&gdev->dev));
return 0;
}
- seq_printf(s, "%s%s: GPIOs %u-%u", priv->newline ? "\n" : "",
- dev_name(&gdev->dev),
- gdev->base, gdev->base + gdev->ngpio - 1);
+ seq_printf(s, "%s: %u GPIOs", dev_name(&gdev->dev), gdev->ngpio);
parent = gc->parent;
if (parent)
seq_printf(s, ", parent: %s/%s",
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index 067197d61d57..2a003a7311e7 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -16,7 +16,9 @@
#include <linux/gpio/driver.h>
#include <linux/module.h>
#include <linux/notifier.h>
+#include <linux/spinlock.h>
#include <linux/srcu.h>
+#include <linux/workqueue.h>
#define GPIOCHIP_NAME "gpiochip"
@@ -25,12 +27,12 @@
* @dev: the GPIO device struct
* @chrdev: character device for the GPIO device
* @id: numerical ID number for the GPIO chip
- * @mockdev: class device used by the deprecated sysfs interface (may be
- * NULL)
* @owner: helps prevent removal of modules exporting active GPIOs
* @chip: pointer to the corresponding gpiochip, holding static
* data for this device
* @descs: array of ngpio descriptors.
+ * @valid_mask: If not %NULL, holds bitmask of GPIOs which are valid to be
+ * used from the chip.
* @desc_srcu: ensures consistent state of GPIO descriptors exposed to users
* @ngpio: the number of GPIO lines on this GPIO device, equal to the size
* of the @descs array.
@@ -44,6 +46,9 @@
* @list: links gpio_device:s together for traversal
* @line_state_notifier: used to notify subscribers about lines being
* requested, released or reconfigured
+ * @line_state_lock: RW-spinlock protecting the line state notifier
+ * @line_state_wq: used to emit line state events from a separate thread in
+ * process context
* @device_notifier: used to notify character device wait queues about the GPIO
* device being unregistered
* @srcu: protects the pointer to the underlying GPIO chip
@@ -58,10 +63,10 @@ struct gpio_device {
struct device dev;
struct cdev chrdev;
int id;
- struct device *mockdev;
struct module *owner;
struct gpio_chip __rcu *chip;
struct gpio_desc *descs;
+ unsigned long *valid_mask;
struct srcu_struct desc_srcu;
unsigned int base;
u16 ngpio;
@@ -69,7 +74,9 @@ struct gpio_device {
const char *label;
void *data;
struct list_head list;
- struct blocking_notifier_head line_state_notifier;
+ struct raw_notifier_head line_state_notifier;
+ rwlock_t line_state_lock;
+ struct workqueue_struct *line_state_wq;
struct blocking_notifier_head device_notifier;
struct srcu_struct srcu;
@@ -110,7 +117,7 @@ extern const char *const gpio_suffixes[];
*
* @desc: Array of pointers to the GPIO descriptors
* @size: Number of elements in desc
- * @chip: Parent GPIO chip
+ * @gdev: Parent GPIO device
* @get_mask: Get mask used in fastpath
* @set_mask: Set mask used in fastpath
* @invert_mask: Invert mask used in fastpath
@@ -122,7 +129,7 @@ extern const char *const gpio_suffixes[];
struct gpio_array {
struct gpio_desc **desc;
unsigned int size;
- struct gpio_chip *chip;
+ struct gpio_device *gdev;
unsigned long *get_mask;
unsigned long *set_mask;
unsigned long invert_mask[];
@@ -151,6 +158,8 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep,
int gpiod_set_transitory(struct gpio_desc *desc, bool transitory);
void gpiod_line_state_notify(struct gpio_desc *desc, unsigned long action);
+int gpiod_direction_output_nonotify(struct gpio_desc *desc, int value);
+int gpiod_direction_input_nonotify(struct gpio_desc *desc);
struct gpio_desc_label {
struct rcu_head rh;
@@ -165,6 +174,7 @@ struct gpio_desc_label {
* @label: Name of the consumer
* @name: Line name
* @hog: Pointer to the device node that hogs this line (if any)
+ * @debounce_period_us: Debounce period in microseconds
*
* These are obtained using gpiod_get() and are preferable to the old
* integer-based handles.
@@ -176,24 +186,24 @@ struct gpio_desc {
struct gpio_device *gdev;
unsigned long flags;
/* flag symbols are bit numbers */
-#define FLAG_REQUESTED 0
-#define FLAG_IS_OUT 1
-#define FLAG_EXPORT 2 /* protected by sysfs_lock */
-#define FLAG_SYSFS 3 /* exported via /sys/class/gpio/control */
-#define FLAG_ACTIVE_LOW 6 /* value has active low */
-#define FLAG_OPEN_DRAIN 7 /* Gpio is open drain type */
-#define FLAG_OPEN_SOURCE 8 /* Gpio is open source type */
-#define FLAG_USED_AS_IRQ 9 /* GPIO is connected to an IRQ */
-#define FLAG_IRQ_IS_ENABLED 10 /* GPIO is connected to an enabled IRQ */
-#define FLAG_IS_HOGGED 11 /* GPIO is hogged */
-#define FLAG_TRANSITORY 12 /* GPIO may lose value in sleep or reset */
-#define FLAG_PULL_UP 13 /* GPIO has pull up enabled */
-#define FLAG_PULL_DOWN 14 /* GPIO has pull down enabled */
-#define FLAG_BIAS_DISABLE 15 /* GPIO has pull disabled */
-#define FLAG_EDGE_RISING 16 /* GPIO CDEV detects rising edge events */
-#define FLAG_EDGE_FALLING 17 /* GPIO CDEV detects falling edge events */
-#define FLAG_EVENT_CLOCK_REALTIME 18 /* GPIO CDEV reports REALTIME timestamps in events */
-#define FLAG_EVENT_CLOCK_HTE 19 /* GPIO CDEV reports hardware timestamps in events */
+#define GPIOD_FLAG_REQUESTED 0 /* GPIO is in use */
+#define GPIOD_FLAG_IS_OUT 1 /* GPIO is in output mode */
+#define GPIOD_FLAG_EXPORT 2 /* GPIO is exported to user-space */
+#define GPIOD_FLAG_SYSFS 3 /* GPIO is exported via /sys/class/gpio */
+#define GPIOD_FLAG_ACTIVE_LOW 6 /* GPIO is active-low */
+#define GPIOD_FLAG_OPEN_DRAIN 7 /* GPIO is open drain type */
+#define GPIOD_FLAG_OPEN_SOURCE 8 /* GPIO is open source type */
+#define GPIOD_FLAG_USED_AS_IRQ 9 /* GPIO is connected to an IRQ */
+#define GPIOD_FLAG_IRQ_IS_ENABLED 10 /* GPIO is connected to an enabled IRQ */
+#define GPIOD_FLAG_IS_HOGGED 11 /* GPIO is hogged */
+#define GPIOD_FLAG_TRANSITORY 12 /* GPIO may lose value in sleep or reset */
+#define GPIOD_FLAG_PULL_UP 13 /* GPIO has pull up enabled */
+#define GPIOD_FLAG_PULL_DOWN 14 /* GPIO has pull down enabled */
+#define GPIOD_FLAG_BIAS_DISABLE 15 /* GPIO has pull disabled */
+#define GPIOD_FLAG_EDGE_RISING 16 /* GPIO CDEV detects rising edge events */
+#define GPIOD_FLAG_EDGE_FALLING 17 /* GPIO CDEV detects falling edge events */
+#define GPIOD_FLAG_EVENT_CLOCK_REALTIME 18 /* GPIO CDEV reports REALTIME timestamps in events */
+#define GPIOD_FLAG_EVENT_CLOCK_HTE 19 /* GPIO CDEV reports hardware timestamps in events */
/* Connection label */
struct gpio_desc_label __rcu *label;
@@ -202,6 +212,10 @@ struct gpio_desc {
#ifdef CONFIG_OF_DYNAMIC
struct device_node *hog;
#endif
+#ifdef CONFIG_GPIO_CDEV
+ /* debounce period in microseconds */
+ unsigned int debounce_period_us;
+#endif
};
#define gpiod_not_found(desc) (IS_ERR(desc) && PTR_ERR(desc) == -ENOENT)
@@ -249,6 +263,7 @@ struct gpio_desc *gpiod_find_and_request(struct device *consumer,
const char *label,
bool platform_lookup_allowed);
+int gpio_do_set_config(struct gpio_desc *desc, unsigned long config);
int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
unsigned long lflags, enum gpiod_flags dflags);
int gpio_set_debounce_timeout(struct gpio_desc *desc, unsigned int debounce);
diff --git a/drivers/gpu/Makefile b/drivers/gpu/Makefile
index 8997f0096545..36a54d456630 100644
--- a/drivers/gpu/Makefile
+++ b/drivers/gpu/Makefile
@@ -5,3 +5,4 @@
obj-y += host1x/ drm/ vga/
obj-$(CONFIG_IMX_IPUV3_CORE) += ipu-v3/
obj-$(CONFIG_TRACE_GPU_MEM) += trace/
+obj-$(CONFIG_NOVA_CORE) += nova-core/
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 1cb5a4f19293..7e6bc0b3a589 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -9,9 +9,6 @@ menuconfig DRM
tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)"
depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && HAS_DMA
select DRM_PANEL_ORIENTATION_QUIRKS
- select DRM_KMS_HELPER if DRM_FBDEV_EMULATION
- select FB_CORE if DRM_FBDEV_EMULATION
- select FB_SYSMEM_HELPERS_DEFERRED if DRM_FBDEV_EMULATION
select HDMI
select I2C
select DMA_SHARED_BUFFER
@@ -29,6 +26,11 @@ menuconfig DRM
details. You should also select and configure AGP
(/dev/agpgart) support if it is available for your platform.
+menu "DRM debugging options"
+depends on DRM
+source "drivers/gpu/drm/Kconfig.debug"
+endmenu
+
if DRM
config DRM_MIPI_DBI
@@ -40,75 +42,22 @@ config DRM_MIPI_DSI
bool
depends on DRM
-config DRM_DEBUG_MM
- bool "Insert extra checks and debug info into the DRM range managers"
- default n
- depends on DRM
- depends on STACKTRACE_SUPPORT
- select STACKDEPOT
- help
- Enable allocation tracking of memory manager and leak detection on
- shutdown.
-
- Recommended for driver developers only.
-
- If in doubt, say "N".
-
-config DRM_USE_DYNAMIC_DEBUG
- bool "use dynamic debug to implement drm.debug"
- default n
- depends on BROKEN
- depends on DRM
- depends on DYNAMIC_DEBUG || DYNAMIC_DEBUG_CORE
- depends on JUMP_LABEL
- help
- Use dynamic-debug to avoid drm_debug_enabled() runtime overheads.
- Due to callsite counts in DRM drivers (~4k in amdgpu) and 56
- bytes per callsite, the .data costs can be substantial, and
- are therefore configurable.
-
-config DRM_KUNIT_TEST_HELPERS
- tristate
- depends on DRM && KUNIT
- select DRM_KMS_HELPER
- help
- KUnit Helpers for KMS drivers.
-
-config DRM_KUNIT_TEST
- tristate "KUnit tests for DRM" if !KUNIT_ALL_TESTS
- depends on DRM && KUNIT && MMU
- select DRM_BUDDY
- select DRM_DISPLAY_DP_HELPER
- select DRM_DISPLAY_HDMI_STATE_HELPER
- select DRM_DISPLAY_HELPER
- select DRM_EXEC
- select DRM_EXPORT_FOR_TESTS if m
- select DRM_GEM_SHMEM_HELPER
- select DRM_KUNIT_TEST_HELPERS
- select DRM_LIB_RANDOM
- select PRIME_NUMBERS
- default KUNIT_ALL_TESTS
- help
- This builds unit tests for DRM. This option is not useful for
- distributions or general kernels, but only for kernel
- developers working on DRM and associated drivers.
-
- For more information on KUnit and unit tests in general,
- please refer to the KUnit documentation in
- Documentation/dev-tools/kunit/.
-
- If in doubt, say "N".
-
config DRM_KMS_HELPER
tristate
depends on DRM
+ select FB_CORE if DRM_FBDEV_EMULATION
help
CRTC helpers for KMS drivers.
+config DRM_DRAW
+ bool
+ depends on DRM
+
config DRM_PANIC
bool "Display a user-friendly message when a kernel panic occurs"
depends on DRM
select FONT_SUPPORT
+ select DRM_DRAW
help
Enable a drm panic handler, which will display a user-friendly message
when a kernel panic occurs. It's useful when using a user-space
@@ -152,6 +101,7 @@ config DRM_PANIC_SCREEN
config DRM_PANIC_SCREEN_QR_CODE
bool "Add a panic screen with a QR code"
depends on DRM_PANIC && RUST
+ select ZLIB_DEFLATE
help
This option adds a QR code generator, and a panic screen with a QR
code. The QR code will contain the last lines of kmsg and other debug
@@ -184,7 +134,7 @@ config DRM_DEBUG_DP_MST_TOPOLOGY_REFS
bool "Enable refcount backtrace history in the DP MST helpers"
depends on STACKTRACE_SUPPORT
select STACKDEPOT
- depends on DRM_KMS_HELPER
+ select DRM_KMS_HELPER
depends on DEBUG_KERNEL
depends on EXPERT
help
@@ -210,46 +160,16 @@ config DRM_DEBUG_MODESET_LOCK
If in doubt, say "N".
-config DRM_FBDEV_EMULATION
- bool "Enable legacy fbdev support for your modesetting driver"
+config DRM_CLIENT
+ bool
depends on DRM
- select FRAMEBUFFER_CONSOLE_DETECT_PRIMARY if FRAMEBUFFER_CONSOLE
- default FB
help
- Choose this option if you have a need for the legacy fbdev
- support. Note that this support also provides the linux console
- support on top of your modesetting driver.
+ Enables support for DRM clients. DRM drivers that need
+ struct drm_client_dev and its interfaces should select this
+ option. Drivers that support the default clients should
+ select DRM_CLIENT_SELECTION instead.
- If in doubt, say "Y".
-
-config DRM_FBDEV_OVERALLOC
- int "Overallocation of the fbdev buffer"
- depends on DRM_FBDEV_EMULATION
- default 100
- help
- Defines the fbdev buffer overallocation in percent. Default
- is 100. Typical values for double buffering will be 200,
- triple buffering 300.
-
-config DRM_FBDEV_LEAK_PHYS_SMEM
- bool "Shamelessly allow leaking of fbdev physical address (DANGEROUS)"
- depends on DRM_FBDEV_EMULATION && EXPERT
- default n
- help
- In order to keep user-space compatibility, we want in certain
- use-cases to keep leaking the fbdev physical address to the
- user-space program handling the fbdev buffer.
- This affects, not only, Amlogic, Allwinner or Rockchip devices
- with ARM Mali GPUs using an userspace Blob.
- This option is not supported by upstream developers and should be
- removed as soon as possible and be considered as a broken and
- legacy behaviour from a modern fbdev device driver.
-
- Please send any bug reports when using this to your proprietary
- software vendor that requires this.
-
- If in doubt, say "N" or spread the word to your closed source
- library vendor.
+source "drivers/gpu/drm/clients/Kconfig"
config DRM_LOAD_EDID_FIRMWARE
bool "Allow to specify an EDID data set instead of probing for it"
@@ -268,28 +188,12 @@ source "drivers/gpu/drm/display/Kconfig"
config DRM_TTM
tristate
depends on DRM && MMU
+ select SHMEM
help
GPU memory management subsystem for devices with multiple
GPU memory types. Will be enabled automatically if a device driver
uses it.
-config DRM_TTM_KUNIT_TEST
- tristate "KUnit tests for TTM" if !KUNIT_ALL_TESTS
- default n
- depends on DRM && KUNIT && MMU && (UML || COMPILE_TEST)
- select DRM_TTM
- select DRM_BUDDY
- select DRM_EXPORT_FOR_TESTS if m
- select DRM_KUNIT_TEST_HELPERS
- default KUNIT_ALL_TESTS
- help
- Enables unit tests for TTM, a GPU memory manager subsystem used
- to manage memory buffers. This option is mostly useful for kernel
- developers. It depends on (UML || COMPILE_TEST) since no other driver
- which uses TTM can be loaded while running the tests.
-
- If in doubt, say "N".
-
config DRM_EXEC
tristate
depends on DRM
@@ -304,6 +208,15 @@ config DRM_GPUVM
GPU-VM representation providing helpers to manage a GPUs virtual
address space
+config DRM_GPUSVM
+ tristate
+ depends on DRM && DEVICE_PRIVATE
+ select HMM_MIRROR
+ select MMU_NOTIFIER
+ help
+ GPU-SVM representation providing helpers to manage a GPUs shared
+ virtual memory
+
config DRM_BUDDY
tristate
depends on DRM
@@ -320,19 +233,27 @@ config DRM_TTM_HELPER
tristate
depends on DRM
select DRM_TTM
+ select DRM_KMS_HELPER if DRM_FBDEV_EMULATION
+ select FB_CORE if DRM_FBDEV_EMULATION
+ select FB_SYSMEM_HELPERS_DEFERRED if DRM_FBDEV_EMULATION
help
Helpers for ttm-based gem objects
config DRM_GEM_DMA_HELPER
tristate
depends on DRM
- select FB_DMAMEM_HELPERS if DRM_FBDEV_EMULATION
+ select DRM_KMS_HELPER if DRM_FBDEV_EMULATION
+ select FB_CORE if DRM_FBDEV_EMULATION
+ select FB_DMAMEM_HELPERS_DEFERRED if DRM_FBDEV_EMULATION
help
Choose this if you need the GEM DMA helper functions
config DRM_GEM_SHMEM_HELPER
tristate
depends on DRM && MMU
+ select DRM_KMS_HELPER if DRM_FBDEV_EMULATION
+ select FB_CORE if DRM_FBDEV_EMULATION
+ select FB_SYSMEM_HELPERS_DEFERRED if DRM_FBDEV_EMULATION
help
Choose this if you need the GEM shmem helper functions
@@ -344,7 +265,7 @@ config DRM_SCHED
tristate
depends on DRM
-source "drivers/gpu/drm/i2c/Kconfig"
+source "drivers/gpu/drm/sysfb/Kconfig"
source "drivers/gpu/drm/arm/Kconfig"
@@ -354,6 +275,8 @@ source "drivers/gpu/drm/amd/amdgpu/Kconfig"
source "drivers/gpu/drm/nouveau/Kconfig"
+source "drivers/gpu/drm/nova/Kconfig"
+
source "drivers/gpu/drm/i915/Kconfig"
source "drivers/gpu/drm/xe/Kconfig"
@@ -459,19 +382,26 @@ source "drivers/gpu/drm/mcde/Kconfig"
source "drivers/gpu/drm/tidss/Kconfig"
+source "drivers/gpu/drm/adp/Kconfig"
+
source "drivers/gpu/drm/xlnx/Kconfig"
source "drivers/gpu/drm/gud/Kconfig"
+source "drivers/gpu/drm/sitronix/Kconfig"
+
source "drivers/gpu/drm/solomon/Kconfig"
source "drivers/gpu/drm/sprd/Kconfig"
source "drivers/gpu/drm/imagination/Kconfig"
+source "drivers/gpu/drm/tyr/Kconfig"
+
config DRM_HYPERV
tristate "DRM Support for Hyper-V synthetic video device"
- depends on DRM && PCI && MMU && HYPERV
+ depends on DRM && PCI && HYPERV_VMBUS
+ select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select DRM_GEM_SHMEM_HELPER
help
@@ -482,8 +412,9 @@ config DRM_HYPERV
If M is selected the module will be called hyperv_drm.
-config DRM_EXPORT_FOR_TESTS
- bool
+# Separate option as not all DRM drivers use it
+config DRM_PANEL_BACKLIGHT_QUIRKS
+ tristate
config DRM_LIB_RANDOM
bool
@@ -493,20 +424,6 @@ config DRM_PRIVACY_SCREEN
bool
default n
-config DRM_WERROR
- bool "Compile the drm subsystem with warnings as errors"
- depends on DRM && EXPERT
- depends on !WERROR
- default n
- help
- A kernel build should not cause any compiler warnings, and this
- enables the '-Werror' flag to enforce that rule in the drm subsystem.
-
- The drm subsystem enables more warnings than the kernel default, so
- this config option is disabled by default.
-
- If in doubt, say N.
-
endif
# Separate option because drm_panel_orientation_quirks.c is shared with fbdev
diff --git a/drivers/gpu/drm/Kconfig.debug b/drivers/gpu/drm/Kconfig.debug
new file mode 100644
index 000000000000..05dc43c0b8c5
--- /dev/null
+++ b/drivers/gpu/drm/Kconfig.debug
@@ -0,0 +1,117 @@
+config DRM_USE_DYNAMIC_DEBUG
+ bool "use dynamic debug to implement drm.debug"
+ default n
+ depends on BROKEN
+ depends on DRM
+ depends on DYNAMIC_DEBUG || DYNAMIC_DEBUG_CORE
+ depends on JUMP_LABEL
+ help
+ Use dynamic-debug to avoid drm_debug_enabled() runtime overheads.
+ Due to callsite counts in DRM drivers (~4k in amdgpu) and 56
+ bytes per callsite, the .data costs can be substantial, and
+ are therefore configurable.
+
+config DRM_WERROR
+ bool "Compile the drm subsystem with warnings as errors"
+ depends on DRM && EXPERT
+ depends on !WERROR
+ default n
+ help
+ A kernel build should not cause any compiler warnings, and this
+ enables the '-Werror' flag to enforce that rule in the drm subsystem.
+
+ The drm subsystem enables more warnings than the kernel default, so
+ this config option is disabled by default.
+
+ If in doubt, say N.
+
+config DRM_HEADER_TEST
+ bool "Ensure DRM headers are self-contained and pass kernel-doc"
+ depends on DRM && EXPERT && BROKEN
+ default n
+ help
+ Ensure the DRM subsystem headers both under drivers/gpu/drm and
+ include/drm compile, are self-contained, have header guards, and have
+ no kernel-doc warnings.
+
+ If in doubt, say N.
+
+config DRM_DEBUG_MM
+ bool "Insert extra checks and debug info into the DRM range managers"
+ default n
+ depends on DRM
+ depends on STACKTRACE_SUPPORT
+ select STACKDEPOT
+ help
+ Enable allocation tracking of memory manager and leak detection on
+ shutdown.
+
+ Recommended for driver developers only.
+
+ If in doubt, say "N".
+
+config DRM_KUNIT_TEST_HELPERS
+ tristate
+ depends on DRM && KUNIT
+ select DRM_KMS_HELPER
+ help
+ KUnit Helpers for KMS drivers.
+
+config DRM_KUNIT_TEST
+ tristate "KUnit tests for DRM" if !KUNIT_ALL_TESTS
+ depends on DRM && KUNIT && MMU
+ select DRM_BRIDGE_CONNECTOR
+ select DRM_BUDDY
+ select DRM_DISPLAY_DP_HELPER
+ select DRM_DISPLAY_HDMI_STATE_HELPER
+ select DRM_DISPLAY_HELPER
+ select DRM_EXEC
+ select DRM_EXPORT_FOR_TESTS if m
+ select DRM_GEM_SHMEM_HELPER
+ select DRM_KUNIT_TEST_HELPERS
+ select DRM_LIB_RANDOM
+ select DRM_SYSFB_HELPER
+ select PRIME_NUMBERS
+ default KUNIT_ALL_TESTS
+ help
+ This builds unit tests for DRM. This option is not useful for
+ distributions or general kernels, but only for kernel
+ developers working on DRM and associated drivers.
+
+ For more information on KUnit and unit tests in general,
+ please refer to the KUnit documentation in
+ Documentation/dev-tools/kunit/.
+
+ If in doubt, say "N".
+
+config DRM_TTM_KUNIT_TEST
+ tristate "KUnit tests for TTM" if !KUNIT_ALL_TESTS
+ default n
+ depends on DRM && KUNIT && MMU && (UML || COMPILE_TEST)
+ select DRM_TTM
+ select DRM_BUDDY
+ select DRM_EXPORT_FOR_TESTS if m
+ select DRM_KUNIT_TEST_HELPERS
+ default KUNIT_ALL_TESTS
+ help
+ Enables unit tests for TTM, a GPU memory manager subsystem used
+ to manage memory buffers. This option is mostly useful for kernel
+ developers. It depends on (UML || COMPILE_TEST) since no other driver
+ which uses TTM can be loaded while running the tests.
+
+ If in doubt, say "N".
+
+config DRM_SCHED_KUNIT_TEST
+ tristate "KUnit tests for the DRM scheduler" if !KUNIT_ALL_TESTS
+ select DRM_SCHED
+ depends on DRM && KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ Choose this option to build unit tests for the DRM scheduler.
+
+ Recommended for driver developers only.
+
+ If in doubt, say "N".
+
+config DRM_EXPORT_FOR_TESTS
+ bool
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 784229d4504d..4b2f7d794275 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -34,15 +34,12 @@ endif
subdir-ccflags-$(CONFIG_DRM_WERROR) += -Werror
drm-y := \
- drm_aperture.o \
drm_atomic.o \
drm_atomic_uapi.o \
drm_auth.o \
drm_blend.o \
drm_bridge.o \
drm_cache.o \
- drm_client.o \
- drm_client_modeset.o \
drm_color_mgmt.o \
drm_connector.o \
drm_crtc.o \
@@ -68,6 +65,7 @@ drm-y := \
drm_prime.o \
drm_print.o \
drm_property.o \
+ drm_rect.o \
drm_syncobj.o \
drm_sysfs.o \
drm_trace_points.o \
@@ -75,6 +73,10 @@ drm-y := \
drm_vblank_work.o \
drm_vma_manager.o \
drm_writeback.o
+drm-$(CONFIG_DRM_CLIENT) += \
+ drm_client.o \
+ drm_client_event.o \
+ drm_client_modeset.o
drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o
drm-$(CONFIG_COMPAT) += drm_ioc32.o
drm-$(CONFIG_DRM_PANEL) += drm_panel.o
@@ -89,10 +91,12 @@ drm-$(CONFIG_DRM_PRIVACY_SCREEN) += \
drm_privacy_screen_x86.o
drm-$(CONFIG_DRM_ACCEL) += ../../accel/drm_accel.o
drm-$(CONFIG_DRM_PANIC) += drm_panic.o
+drm-$(CONFIG_DRM_DRAW) += drm_draw.o
drm-$(CONFIG_DRM_PANIC_SCREEN_QR_CODE) += drm_panic_qr.o
obj-$(CONFIG_DRM) += drm.o
obj-$(CONFIG_DRM_PANEL_ORIENTATION_QUIRKS) += drm_panel_orientation_quirks.o
+obj-$(CONFIG_DRM_PANEL_BACKLIGHT_QUIRKS) += drm_panel_backlight_quirks.o
#
# Memory-management helpers
@@ -101,6 +105,11 @@ obj-$(CONFIG_DRM_PANEL_ORIENTATION_QUIRKS) += drm_panel_orientation_quirks.o
obj-$(CONFIG_DRM_EXEC) += drm_exec.o
obj-$(CONFIG_DRM_GPUVM) += drm_gpuvm.o
+drm_gpusvm_helper-y := \
+ drm_gpusvm.o\
+ drm_pagemap.o
+obj-$(CONFIG_DRM_GPUSVM) += drm_gpusvm_helper.o
+
obj-$(CONFIG_DRM_BUDDY) += drm_buddy.o
drm_dma_helper-y := drm_gem_dma_helper.o
@@ -129,9 +138,9 @@ obj-$(CONFIG_DRM_TTM_HELPER) += drm_ttm_helper.o
drm_kms_helper-y := \
drm_atomic_helper.o \
drm_atomic_state_helper.o \
+ drm_bridge_helper.o \
drm_crtc_helper.o \
drm_damage_helper.o \
- drm_encoder_slave.o \
drm_flip_work.o \
drm_format_helper.o \
drm_gem_atomic_helper.o \
@@ -140,7 +149,6 @@ drm_kms_helper-y := \
drm_modeset_helper.o \
drm_plane_helper.o \
drm_probe_helper.o \
- drm_rect.o \
drm_self_refresh_helper.o \
drm_simple_kms_helper.o
drm_kms_helper-$(CONFIG_DRM_PANEL_BRIDGE) += bridge/panel.o
@@ -156,6 +164,7 @@ obj-y += tests/
obj-$(CONFIG_DRM_MIPI_DBI) += drm_mipi_dbi.o
obj-$(CONFIG_DRM_MIPI_DSI) += drm_mipi_dsi.o
obj-y += arm/
+obj-y += clients/
obj-y += display/
obj-$(CONFIG_DRM_TTM) += ttm/
obj-$(CONFIG_DRM_SCHED) += scheduler/
@@ -172,6 +181,7 @@ obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/
obj-$(CONFIG_DRM_VGEM) += vgem/
obj-$(CONFIG_DRM_VKMS) += vkms/
obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/
+obj-$(CONFIG_DRM_NOVA) += nova/
obj-$(CONFIG_DRM_EXYNOS) +=exynos/
obj-$(CONFIG_DRM_ROCKCHIP) +=rockchip/
obj-$(CONFIG_DRM_GMA500) += gma500/
@@ -194,28 +204,49 @@ obj-$(CONFIG_DRM_INGENIC) += ingenic/
obj-$(CONFIG_DRM_LOGICVC) += logicvc/
obj-$(CONFIG_DRM_MEDIATEK) += mediatek/
obj-$(CONFIG_DRM_MESON) += meson/
-obj-y += i2c/
obj-y += panel/
obj-y += bridge/
obj-$(CONFIG_DRM_FSL_DCU) += fsl-dcu/
obj-$(CONFIG_DRM_ETNAVIV) += etnaviv/
obj-y += hisilicon/
obj-y += mxsfb/
+obj-y += sysfb/
obj-y += tiny/
obj-$(CONFIG_DRM_PL111) += pl111/
obj-$(CONFIG_DRM_TVE200) += tve200/
+obj-$(CONFIG_DRM_ADP) += adp/
obj-$(CONFIG_DRM_XEN) += xen/
obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo/
obj-$(CONFIG_DRM_LIMA) += lima/
obj-$(CONFIG_DRM_PANFROST) += panfrost/
obj-$(CONFIG_DRM_PANTHOR) += panthor/
+obj-$(CONFIG_DRM_TYR) += tyr/
obj-$(CONFIG_DRM_ASPEED_GFX) += aspeed/
obj-$(CONFIG_DRM_MCDE) += mcde/
obj-$(CONFIG_DRM_TIDSS) += tidss/
obj-y += xlnx/
obj-y += gud/
obj-$(CONFIG_DRM_HYPERV) += hyperv/
+obj-y += sitronix/
obj-y += solomon/
obj-$(CONFIG_DRM_SPRD) += sprd/
obj-$(CONFIG_DRM_LOONGSON) += loongson/
obj-$(CONFIG_DRM_POWERVR) += imagination/
+
+# Ensure drm headers are self-contained and pass kernel-doc
+hdrtest-files := \
+ $(shell cd $(src) && find . -maxdepth 1 -name 'drm_*.h') \
+ $(shell cd $(src) && find display lib -name '*.h')
+
+always-$(CONFIG_DRM_HEADER_TEST) += \
+ $(patsubst %.h,%.hdrtest, $(hdrtest-files))
+
+# Include the header twice to detect missing include guard.
+quiet_cmd_hdrtest = HDRTEST $(patsubst %.hdrtest,%.h,$@)
+ cmd_hdrtest = \
+ $(CC) $(c_flags) -fsyntax-only -x c /dev/null -include $< -include $<; \
+ PYTHONDONTWRITEBYTECODE=1 $(KERNELDOC) -none $(if $(CONFIG_WERROR)$(CONFIG_DRM_WERROR),-Werror) $<; \
+ touch $@
+
+$(obj)/%.hdrtest: $(src)/%.h FORCE
+ $(call if_changed_dep,hdrtest)
diff --git a/drivers/gpu/drm/adp/Kconfig b/drivers/gpu/drm/adp/Kconfig
new file mode 100644
index 000000000000..9fcc27eb200d
--- /dev/null
+++ b/drivers/gpu/drm/adp/Kconfig
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0-only OR MIT
+config DRM_ADP
+ tristate "DRM Support for pre-DCP Apple display controllers"
+ depends on DRM && OF && ARM64
+ depends on ARCH_APPLE || COMPILE_TEST
+ select DRM_KMS_HELPER
+ select DRM_BRIDGE_CONNECTOR
+ select DRM_DISPLAY_HELPER
+ select DRM_KMS_DMA_HELPER
+ select DRM_GEM_DMA_HELPER
+ select DRM_PANEL_BRIDGE
+ select VIDEOMODE_HELPERS
+ select DRM_MIPI_DSI
+ help
+ Chose this option if you have an Apple Arm laptop with a touchbar.
+
+ If M is selected, this module will be called adpdrm.
diff --git a/drivers/gpu/drm/adp/Makefile b/drivers/gpu/drm/adp/Makefile
new file mode 100644
index 000000000000..8e7b618edd35
--- /dev/null
+++ b/drivers/gpu/drm/adp/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only OR MIT
+
+adpdrm-y := adp_drv.o
+adpdrm-mipi-y := adp-mipi.o
+obj-$(CONFIG_DRM_ADP) += adpdrm.o adpdrm-mipi.o
diff --git a/drivers/gpu/drm/adp/adp-mipi.c b/drivers/gpu/drm/adp/adp-mipi.c
new file mode 100644
index 000000000000..cba7d32150a9
--- /dev/null
+++ b/drivers/gpu/drm/adp/adp-mipi.c
@@ -0,0 +1,277 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/component.h>
+#include <linux/iopoll.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <drm/drm_bridge.h>
+#include <drm/drm_mipi_dsi.h>
+
+#define DSI_GEN_HDR 0x6c
+#define DSI_GEN_PLD_DATA 0x70
+
+#define DSI_CMD_PKT_STATUS 0x74
+
+#define GEN_PLD_R_EMPTY BIT(4)
+#define GEN_PLD_W_FULL BIT(3)
+#define GEN_PLD_W_EMPTY BIT(2)
+#define GEN_CMD_FULL BIT(1)
+#define GEN_CMD_EMPTY BIT(0)
+#define GEN_RD_CMD_BUSY BIT(6)
+#define CMD_PKT_STATUS_TIMEOUT_US 20000
+
+struct adp_mipi_drv_private {
+ struct mipi_dsi_host dsi;
+ struct drm_bridge bridge;
+ struct drm_bridge *next_bridge;
+ void __iomem *mipi;
+};
+
+#define mipi_to_adp(x) container_of(x, struct adp_mipi_drv_private, dsi)
+
+static int adp_dsi_gen_pkt_hdr_write(struct adp_mipi_drv_private *adp, u32 hdr_val)
+{
+ int ret;
+ u32 val, mask;
+
+ ret = readl_poll_timeout(adp->mipi + DSI_CMD_PKT_STATUS,
+ val, !(val & GEN_CMD_FULL), 1000,
+ CMD_PKT_STATUS_TIMEOUT_US);
+ if (ret) {
+ dev_err(adp->dsi.dev, "failed to get available command FIFO\n");
+ return ret;
+ }
+
+ writel(hdr_val, adp->mipi + DSI_GEN_HDR);
+
+ mask = GEN_CMD_EMPTY | GEN_PLD_W_EMPTY;
+ ret = readl_poll_timeout(adp->mipi + DSI_CMD_PKT_STATUS,
+ val, (val & mask) == mask,
+ 1000, CMD_PKT_STATUS_TIMEOUT_US);
+ if (ret) {
+ dev_err(adp->dsi.dev, "failed to write command FIFO\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int adp_dsi_write(struct adp_mipi_drv_private *adp,
+ const struct mipi_dsi_packet *packet)
+{
+ const u8 *tx_buf = packet->payload;
+ int len = packet->payload_length, pld_data_bytes = sizeof(u32), ret;
+ __le32 word;
+ u32 val;
+
+ while (len) {
+ if (len < pld_data_bytes) {
+ word = 0;
+ memcpy(&word, tx_buf, len);
+ writel(le32_to_cpu(word), adp->mipi + DSI_GEN_PLD_DATA);
+ len = 0;
+ } else {
+ memcpy(&word, tx_buf, pld_data_bytes);
+ writel(le32_to_cpu(word), adp->mipi + DSI_GEN_PLD_DATA);
+ tx_buf += pld_data_bytes;
+ len -= pld_data_bytes;
+ }
+
+ ret = readl_poll_timeout(adp->mipi + DSI_CMD_PKT_STATUS,
+ val, !(val & GEN_PLD_W_FULL), 1000,
+ CMD_PKT_STATUS_TIMEOUT_US);
+ if (ret) {
+ dev_err(adp->dsi.dev,
+ "failed to get available write payload FIFO\n");
+ return ret;
+ }
+ }
+
+ word = 0;
+ memcpy(&word, packet->header, sizeof(packet->header));
+ return adp_dsi_gen_pkt_hdr_write(adp, le32_to_cpu(word));
+}
+
+static int adp_dsi_read(struct adp_mipi_drv_private *adp,
+ const struct mipi_dsi_msg *msg)
+{
+ int i, j, ret, len = msg->rx_len;
+ u8 *buf = msg->rx_buf;
+ u32 val;
+
+ /* Wait end of the read operation */
+ ret = readl_poll_timeout(adp->mipi + DSI_CMD_PKT_STATUS,
+ val, !(val & GEN_RD_CMD_BUSY),
+ 1000, CMD_PKT_STATUS_TIMEOUT_US);
+ if (ret) {
+ dev_err(adp->dsi.dev, "Timeout during read operation\n");
+ return ret;
+ }
+
+ for (i = 0; i < len; i += 4) {
+ /* Read fifo must not be empty before all bytes are read */
+ ret = readl_poll_timeout(adp->mipi + DSI_CMD_PKT_STATUS,
+ val, !(val & GEN_PLD_R_EMPTY),
+ 1000, CMD_PKT_STATUS_TIMEOUT_US);
+ if (ret) {
+ dev_err(adp->dsi.dev, "Read payload FIFO is empty\n");
+ return ret;
+ }
+
+ val = readl(adp->mipi + DSI_GEN_PLD_DATA);
+ for (j = 0; j < 4 && j + i < len; j++)
+ buf[i + j] = val >> (8 * j);
+ }
+
+ return ret;
+}
+
+static ssize_t adp_dsi_host_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct adp_mipi_drv_private *adp = mipi_to_adp(host);
+ struct mipi_dsi_packet packet;
+ int ret, nb_bytes;
+
+ ret = mipi_dsi_create_packet(&packet, msg);
+ if (ret) {
+ dev_err(adp->dsi.dev, "failed to create packet: %d\n", ret);
+ return ret;
+ }
+
+ ret = adp_dsi_write(adp, &packet);
+ if (ret)
+ return ret;
+
+ if (msg->rx_buf && msg->rx_len) {
+ ret = adp_dsi_read(adp, msg);
+ if (ret)
+ return ret;
+ nb_bytes = msg->rx_len;
+ } else {
+ nb_bytes = packet.size;
+ }
+
+ return nb_bytes;
+}
+
+static int adp_dsi_bind(struct device *dev, struct device *master, void *data)
+{
+ return 0;
+}
+
+static void adp_dsi_unbind(struct device *dev, struct device *master, void *data)
+{
+}
+
+static const struct component_ops adp_dsi_component_ops = {
+ .bind = adp_dsi_bind,
+ .unbind = adp_dsi_unbind,
+};
+
+static int adp_dsi_host_attach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *dev)
+{
+ struct adp_mipi_drv_private *adp = mipi_to_adp(host);
+ struct drm_bridge *next;
+ int ret;
+
+ next = devm_drm_of_get_bridge(adp->dsi.dev, adp->dsi.dev->of_node, 1, 0);
+ if (IS_ERR(next))
+ return PTR_ERR(next);
+
+ adp->next_bridge = next;
+
+ drm_bridge_add(&adp->bridge);
+
+ ret = component_add(host->dev, &adp_dsi_component_ops);
+ if (ret) {
+ pr_err("failed to add dsi_host component: %d\n", ret);
+ drm_bridge_remove(&adp->bridge);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int adp_dsi_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *dev)
+{
+ struct adp_mipi_drv_private *adp = mipi_to_adp(host);
+
+ component_del(host->dev, &adp_dsi_component_ops);
+ drm_bridge_remove(&adp->bridge);
+ return 0;
+}
+
+static const struct mipi_dsi_host_ops adp_dsi_host_ops = {
+ .transfer = adp_dsi_host_transfer,
+ .attach = adp_dsi_host_attach,
+ .detach = adp_dsi_host_detach,
+};
+
+static int adp_dsi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
+ enum drm_bridge_attach_flags flags)
+{
+ struct adp_mipi_drv_private *adp =
+ container_of(bridge, struct adp_mipi_drv_private, bridge);
+
+ return drm_bridge_attach(encoder, adp->next_bridge, bridge, flags);
+}
+
+static const struct drm_bridge_funcs adp_dsi_bridge_funcs = {
+ .attach = adp_dsi_bridge_attach,
+};
+
+static int adp_mipi_probe(struct platform_device *pdev)
+{
+ struct adp_mipi_drv_private *adp;
+
+ adp = devm_drm_bridge_alloc(&pdev->dev, struct adp_mipi_drv_private,
+ bridge, &adp_dsi_bridge_funcs);
+ if (IS_ERR(adp))
+ return PTR_ERR(adp);
+
+ adp->mipi = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(adp->mipi)) {
+ dev_err(&pdev->dev, "failed to map mipi mmio");
+ return PTR_ERR(adp->mipi);
+ }
+
+ adp->dsi.dev = &pdev->dev;
+ adp->dsi.ops = &adp_dsi_host_ops;
+ adp->bridge.of_node = pdev->dev.of_node;
+ adp->bridge.type = DRM_MODE_CONNECTOR_DSI;
+ dev_set_drvdata(&pdev->dev, adp);
+ return mipi_dsi_host_register(&adp->dsi);
+}
+
+static void adp_mipi_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct adp_mipi_drv_private *adp = dev_get_drvdata(dev);
+
+ mipi_dsi_host_unregister(&adp->dsi);
+}
+
+static const struct of_device_id adp_mipi_of_match[] = {
+ { .compatible = "apple,h7-display-pipe-mipi", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, adp_mipi_of_match);
+
+static struct platform_driver adp_mipi_platform_driver = {
+ .driver = {
+ .name = "adp-mipi",
+ .of_match_table = adp_mipi_of_match,
+ },
+ .probe = adp_mipi_probe,
+ .remove = adp_mipi_remove,
+};
+
+module_platform_driver(adp_mipi_platform_driver);
+
+MODULE_DESCRIPTION("Apple Display Pipe MIPI driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/adp/adp_drv.c b/drivers/gpu/drm/adp/adp_drv.c
new file mode 100644
index 000000000000..54cde090c3f4
--- /dev/null
+++ b/drivers/gpu/drm/adp/adp_drv.c
@@ -0,0 +1,613 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/component.h>
+#include <linux/iopoll.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_dma_helper.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_of.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+
+#define ADP_INT_STATUS 0x34
+#define ADP_INT_STATUS_INT_MASK 0x7
+#define ADP_INT_STATUS_VBLANK 0x1
+#define ADP_CTRL 0x100
+#define ADP_CTRL_VBLANK_ON 0x12
+#define ADP_CTRL_FIFO_ON 0x601
+#define ADP_SCREEN_SIZE 0x0c
+#define ADP_SCREEN_HSIZE GENMASK(15, 0)
+#define ADP_SCREEN_VSIZE GENMASK(31, 16)
+
+#define ADBE_FIFO 0x10c0
+#define ADBE_FIFO_SYNC 0xc0000000
+
+#define ADBE_BLEND_BYPASS 0x2020
+#define ADBE_BLEND_EN1 0x2028
+#define ADBE_BLEND_EN2 0x2074
+#define ADBE_BLEND_EN3 0x202c
+#define ADBE_BLEND_EN4 0x2034
+#define ADBE_MASK_BUF 0x2200
+
+#define ADBE_SRC_START 0x4040
+#define ADBE_SRC_SIZE 0x4048
+#define ADBE_DST_START 0x4050
+#define ADBE_DST_SIZE 0x4054
+#define ADBE_STRIDE 0x4038
+#define ADBE_FB_BASE 0x4030
+
+#define ADBE_LAYER_EN1 0x4020
+#define ADBE_LAYER_EN2 0x4068
+#define ADBE_LAYER_EN3 0x40b4
+#define ADBE_LAYER_EN4 0x40f4
+#define ADBE_SCALE_CTL 0x40ac
+#define ADBE_SCALE_CTL_BYPASS 0x100000
+
+#define ADBE_LAYER_CTL 0x1038
+#define ADBE_LAYER_CTL_ENABLE 0x10000
+
+#define ADBE_PIX_FMT 0x402c
+#define ADBE_PIX_FMT_XRGB32 0x53e4001
+
+static int adp_open(struct inode *inode, struct file *filp)
+{
+ /*
+ * The modesetting driver does not check the non-desktop connector
+ * property and keeps the device open and locked. If the touchbar daemon
+ * opens the device first, modesetting breaks the whole X session.
+ * Simply refuse to open the device for X11 server processes as
+ * workaround.
+ */
+ if (current->comm[0] == 'X')
+ return -EBUSY;
+
+ return drm_open(inode, filp);
+}
+
+static const struct file_operations adp_fops = {
+ .owner = THIS_MODULE,
+ .open = adp_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .compat_ioctl = drm_compat_ioctl,
+ .poll = drm_poll,
+ .read = drm_read,
+ .llseek = noop_llseek,
+ .mmap = drm_gem_mmap,
+ .fop_flags = FOP_UNSIGNED_OFFSET,
+ DRM_GEM_DMA_UNMAPPED_AREA_FOPS
+};
+
+static int adp_drm_gem_dumb_create(struct drm_file *file_priv,
+ struct drm_device *drm,
+ struct drm_mode_create_dumb *args)
+{
+ args->height = ALIGN(args->height, 64);
+ args->size = args->pitch * args->height;
+
+ return drm_gem_dma_dumb_create_internal(file_priv, drm, args);
+}
+
+static const struct drm_driver adp_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
+ .fops = &adp_fops,
+ DRM_GEM_DMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE(adp_drm_gem_dumb_create),
+ .name = "adp",
+ .desc = "Apple Display Pipe DRM Driver",
+ .major = 0,
+ .minor = 1,
+};
+
+struct adp_drv_private {
+ struct drm_device drm;
+ struct drm_crtc crtc;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ struct drm_bridge *next_bridge;
+ void __iomem *be;
+ void __iomem *fe;
+ u32 *mask_buf;
+ u64 mask_buf_size;
+ dma_addr_t mask_iova;
+ int be_irq;
+ int fe_irq;
+ struct drm_pending_vblank_event *event;
+};
+
+#define to_adp(x) container_of(x, struct adp_drv_private, drm)
+#define crtc_to_adp(x) container_of(x, struct adp_drv_private, crtc)
+
+static int adp_plane_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_plane_state;
+ struct drm_crtc_state *crtc_state;
+
+ new_plane_state = drm_atomic_get_new_plane_state(state, plane);
+
+ if (!new_plane_state->crtc)
+ return 0;
+
+ crtc_state = drm_atomic_get_crtc_state(state, new_plane_state->crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ return drm_atomic_helper_check_plane_state(new_plane_state,
+ crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ true, true);
+}
+
+static void adp_plane_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct adp_drv_private *adp;
+ struct drm_rect src_rect;
+ struct drm_gem_dma_object *obj;
+ struct drm_framebuffer *fb;
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane);
+ u32 src_pos, src_size, dst_pos, dst_size;
+
+ if (!plane || !new_state)
+ return;
+
+ fb = new_state->fb;
+ if (!fb)
+ return;
+ adp = to_adp(plane->dev);
+
+ drm_rect_fp_to_int(&src_rect, &new_state->src);
+ src_pos = src_rect.x1 << 16 | src_rect.y1;
+ dst_pos = new_state->dst.x1 << 16 | new_state->dst.y1;
+ src_size = drm_rect_width(&src_rect) << 16 | drm_rect_height(&src_rect);
+ dst_size = drm_rect_width(&new_state->dst) << 16 |
+ drm_rect_height(&new_state->dst);
+ writel(src_pos, adp->be + ADBE_SRC_START);
+ writel(src_size, adp->be + ADBE_SRC_SIZE);
+ writel(dst_pos, adp->be + ADBE_DST_START);
+ writel(dst_size, adp->be + ADBE_DST_SIZE);
+ writel(fb->pitches[0], adp->be + ADBE_STRIDE);
+ obj = drm_fb_dma_get_gem_obj(fb, 0);
+ if (obj)
+ writel(obj->dma_addr + fb->offsets[0], adp->be + ADBE_FB_BASE);
+
+ writel(BIT(0), adp->be + ADBE_LAYER_EN1);
+ writel(BIT(0), adp->be + ADBE_LAYER_EN2);
+ writel(BIT(0), adp->be + ADBE_LAYER_EN3);
+ writel(BIT(0), adp->be + ADBE_LAYER_EN4);
+ writel(ADBE_SCALE_CTL_BYPASS, adp->be + ADBE_SCALE_CTL);
+ writel(ADBE_LAYER_CTL_ENABLE | BIT(0), adp->be + ADBE_LAYER_CTL);
+ writel(ADBE_PIX_FMT_XRGB32, adp->be + ADBE_PIX_FMT);
+}
+
+static void adp_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct adp_drv_private *adp = to_adp(plane->dev);
+
+ writel(0x0, adp->be + ADBE_LAYER_EN1);
+ writel(0x0, adp->be + ADBE_LAYER_EN2);
+ writel(0x0, adp->be + ADBE_LAYER_EN3);
+ writel(0x0, adp->be + ADBE_LAYER_EN4);
+ writel(ADBE_LAYER_CTL_ENABLE, adp->be + ADBE_LAYER_CTL);
+}
+
+static const struct drm_plane_helper_funcs adp_plane_helper_funcs = {
+ .atomic_check = adp_plane_atomic_check,
+ .atomic_update = adp_plane_atomic_update,
+ .atomic_disable = adp_plane_atomic_disable,
+ DRM_GEM_SHADOW_PLANE_HELPER_FUNCS
+};
+
+static const struct drm_plane_funcs adp_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ DRM_GEM_SHADOW_PLANE_FUNCS
+};
+
+static const u32 plane_formats[] = {
+ DRM_FORMAT_XRGB8888,
+};
+
+#define ALL_CRTCS 1
+
+static struct drm_plane *adp_plane_new(struct adp_drv_private *adp)
+{
+ struct drm_device *drm = &adp->drm;
+ struct drm_plane *plane;
+
+ plane = __drmm_universal_plane_alloc(drm, sizeof(struct drm_plane), 0,
+ ALL_CRTCS, &adp_plane_funcs,
+ plane_formats, ARRAY_SIZE(plane_formats),
+ NULL, DRM_PLANE_TYPE_PRIMARY, "plane");
+ if (IS_ERR(plane)) {
+ drm_err(drm, "failed to allocate plane");
+ return plane;
+ }
+
+ drm_plane_helper_add(plane, &adp_plane_helper_funcs);
+ return plane;
+}
+
+static void adp_enable_vblank(struct adp_drv_private *adp)
+{
+ u32 cur_ctrl;
+
+ writel(ADP_INT_STATUS_INT_MASK, adp->fe + ADP_INT_STATUS);
+
+ cur_ctrl = readl(adp->fe + ADP_CTRL);
+ writel(cur_ctrl | ADP_CTRL_VBLANK_ON, adp->fe + ADP_CTRL);
+}
+
+static int adp_crtc_enable_vblank(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct adp_drv_private *adp = to_adp(dev);
+
+ adp_enable_vblank(adp);
+
+ return 0;
+}
+
+static void adp_disable_vblank(struct adp_drv_private *adp)
+{
+ u32 cur_ctrl;
+
+ cur_ctrl = readl(adp->fe + ADP_CTRL);
+ writel(cur_ctrl & ~ADP_CTRL_VBLANK_ON, adp->fe + ADP_CTRL);
+ writel(ADP_INT_STATUS_INT_MASK, adp->fe + ADP_INT_STATUS);
+}
+
+static void adp_crtc_disable_vblank(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct adp_drv_private *adp = to_adp(dev);
+
+ adp_disable_vblank(adp);
+}
+
+static void adp_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct adp_drv_private *adp = crtc_to_adp(crtc);
+
+ writel(BIT(0), adp->be + ADBE_BLEND_EN2);
+ writel(BIT(4), adp->be + ADBE_BLEND_EN1);
+ writel(BIT(0), adp->be + ADBE_BLEND_EN3);
+ writel(BIT(0), adp->be + ADBE_BLEND_BYPASS);
+ writel(BIT(0), adp->be + ADBE_BLEND_EN4);
+ drm_crtc_vblank_on(crtc);
+}
+
+static void adp_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct adp_drv_private *adp = crtc_to_adp(crtc);
+ struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state, crtc);
+
+ drm_atomic_helper_disable_planes_on_crtc(old_state, false);
+
+ writel(0x0, adp->be + ADBE_BLEND_EN2);
+ writel(0x0, adp->be + ADBE_BLEND_EN1);
+ writel(0x0, adp->be + ADBE_BLEND_EN3);
+ writel(0x0, adp->be + ADBE_BLEND_BYPASS);
+ writel(0x0, adp->be + ADBE_BLEND_EN4);
+ drm_crtc_vblank_off(crtc);
+}
+
+static void adp_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ u32 frame_num = 1;
+ unsigned long flags;
+ struct adp_drv_private *adp = crtc_to_adp(crtc);
+ struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state, crtc);
+ u64 new_size = ALIGN(new_state->mode.hdisplay *
+ new_state->mode.vdisplay * 4, PAGE_SIZE);
+
+ if (new_size != adp->mask_buf_size) {
+ if (adp->mask_buf)
+ dma_free_coherent(crtc->dev->dev, adp->mask_buf_size,
+ adp->mask_buf, adp->mask_iova);
+ adp->mask_buf = NULL;
+ if (new_size != 0) {
+ adp->mask_buf = dma_alloc_coherent(crtc->dev->dev, new_size,
+ &adp->mask_iova, GFP_KERNEL);
+ memset(adp->mask_buf, 0xFF, new_size);
+ writel(adp->mask_iova, adp->be + ADBE_MASK_BUF);
+ }
+ adp->mask_buf_size = new_size;
+ }
+ writel(ADBE_FIFO_SYNC | frame_num, adp->be + ADBE_FIFO);
+ //FIXME: use adbe flush interrupt
+ if (crtc->state->event) {
+ struct drm_pending_vblank_event *event = crtc->state->event;
+
+ crtc->state->event = NULL;
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+
+ if (drm_crtc_vblank_get(crtc) != 0)
+ drm_crtc_send_vblank_event(crtc, event);
+ else
+ adp->event = event;
+
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ }
+}
+
+static const struct drm_crtc_funcs adp_crtc_funcs = {
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .reset = drm_atomic_helper_crtc_reset,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .enable_vblank = adp_crtc_enable_vblank,
+ .disable_vblank = adp_crtc_disable_vblank,
+};
+
+
+static const struct drm_crtc_helper_funcs adp_crtc_helper_funcs = {
+ .atomic_enable = adp_crtc_atomic_enable,
+ .atomic_disable = adp_crtc_atomic_disable,
+ .atomic_flush = adp_crtc_atomic_flush,
+};
+
+static int adp_setup_crtc(struct adp_drv_private *adp)
+{
+ struct drm_device *drm = &adp->drm;
+ struct drm_plane *primary;
+ int ret;
+
+ primary = adp_plane_new(adp);
+ if (IS_ERR(primary))
+ return PTR_ERR(primary);
+
+ ret = drm_crtc_init_with_planes(drm, &adp->crtc, primary,
+ NULL, &adp_crtc_funcs, NULL);
+ if (ret)
+ return ret;
+
+ drm_crtc_helper_add(&adp->crtc, &adp_crtc_helper_funcs);
+ return 0;
+}
+
+static const struct drm_mode_config_funcs adp_mode_config_funcs = {
+ .fb_create = drm_gem_fb_create_with_dirty,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static int adp_setup_mode_config(struct adp_drv_private *adp)
+{
+ struct drm_device *drm = &adp->drm;
+ int ret;
+ u32 size;
+
+ ret = drmm_mode_config_init(drm);
+ if (ret)
+ return ret;
+
+ /*
+ * Query screen size restrict the frame buffer size to the screen size
+ * aligned to the next multiple of 64. This is not necessary but can be
+ * used as simple check for non-desktop devices.
+ * Xorg's modesetting driver does not care about the connector
+ * "non-desktop" property. The max frame buffer width or height can be
+ * easily checked and a device can be reject if the max width/height is
+ * smaller than 120 for example.
+ * Any touchbar daemon is not limited by this small framebuffer size.
+ */
+ size = readl(adp->fe + ADP_SCREEN_SIZE);
+
+ drm->mode_config.min_width = 32;
+ drm->mode_config.min_height = 32;
+ drm->mode_config.max_width = ALIGN(FIELD_GET(ADP_SCREEN_HSIZE, size), 64);
+ drm->mode_config.max_height = ALIGN(FIELD_GET(ADP_SCREEN_VSIZE, size), 64);
+ drm->mode_config.preferred_depth = 24;
+ drm->mode_config.prefer_shadow = 0;
+ drm->mode_config.funcs = &adp_mode_config_funcs;
+
+ ret = adp_setup_crtc(adp);
+ if (ret) {
+ drm_err(drm, "failed to create crtc");
+ return ret;
+ }
+
+ adp->encoder = drmm_plain_encoder_alloc(drm, NULL, DRM_MODE_ENCODER_DSI, NULL);
+ if (IS_ERR(adp->encoder)) {
+ drm_err(drm, "failed to init encoder");
+ return PTR_ERR(adp->encoder);
+ }
+ adp->encoder->possible_crtcs = ALL_CRTCS;
+
+ ret = drm_bridge_attach(adp->encoder, adp->next_bridge, NULL,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret) {
+ drm_err(drm, "failed to init bridge chain");
+ return ret;
+ }
+
+ adp->connector = drm_bridge_connector_init(drm, adp->encoder);
+ if (IS_ERR(adp->connector))
+ return PTR_ERR(adp->connector);
+
+ drm_connector_attach_encoder(adp->connector, adp->encoder);
+
+ ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
+ if (ret < 0) {
+ drm_err(drm, "failed to initialize vblank");
+ return ret;
+ }
+
+ drm_mode_config_reset(drm);
+
+ return 0;
+}
+
+static int adp_parse_of(struct platform_device *pdev, struct adp_drv_private *adp)
+{
+ struct device *dev = &pdev->dev;
+
+ adp->be = devm_platform_ioremap_resource_byname(pdev, "be");
+ if (IS_ERR(adp->be)) {
+ dev_err(dev, "failed to map display backend mmio");
+ return PTR_ERR(adp->be);
+ }
+
+ adp->fe = devm_platform_ioremap_resource_byname(pdev, "fe");
+ if (IS_ERR(adp->fe)) {
+ dev_err(dev, "failed to map display pipe mmio");
+ return PTR_ERR(adp->fe);
+ }
+
+ adp->be_irq = platform_get_irq_byname(pdev, "be");
+ if (adp->be_irq < 0)
+ return adp->be_irq;
+
+ adp->fe_irq = platform_get_irq_byname(pdev, "fe");
+ if (adp->fe_irq < 0)
+ return adp->fe_irq;
+
+ return 0;
+}
+
+static irqreturn_t adp_fe_irq(int irq, void *arg)
+{
+ struct adp_drv_private *adp = (struct adp_drv_private *)arg;
+ u32 int_status;
+ u32 int_ctl;
+
+ int_status = readl(adp->fe + ADP_INT_STATUS);
+ if (int_status & ADP_INT_STATUS_VBLANK) {
+ drm_crtc_handle_vblank(&adp->crtc);
+ spin_lock(&adp->crtc.dev->event_lock);
+ if (adp->event) {
+ int_ctl = readl(adp->fe + ADP_CTRL);
+ if ((int_ctl & 0xF00) == 0x600) {
+ drm_crtc_send_vblank_event(&adp->crtc, adp->event);
+ adp->event = NULL;
+ drm_crtc_vblank_put(&adp->crtc);
+ }
+ }
+ spin_unlock(&adp->crtc.dev->event_lock);
+ }
+
+ writel(int_status, adp->fe + ADP_INT_STATUS);
+
+
+ return IRQ_HANDLED;
+}
+
+static int adp_drm_bind(struct device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct adp_drv_private *adp = to_adp(drm);
+ int err;
+
+ writel(ADP_CTRL_FIFO_ON, adp->fe + ADP_CTRL);
+
+ adp->next_bridge = drmm_of_get_bridge(&adp->drm, dev->of_node, 0, 0);
+ if (IS_ERR(adp->next_bridge)) {
+ dev_err(dev, "failed to find next bridge");
+ return PTR_ERR(adp->next_bridge);
+ }
+
+ err = adp_setup_mode_config(adp);
+ if (err < 0)
+ return err;
+
+ err = request_irq(adp->fe_irq, adp_fe_irq, 0, "adp-fe", adp);
+ if (err)
+ return err;
+
+ err = drm_dev_register(&adp->drm, 0);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static void adp_drm_unbind(struct device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct adp_drv_private *adp = to_adp(drm);
+
+ drm_dev_unregister(drm);
+ drm_atomic_helper_shutdown(drm);
+ free_irq(adp->fe_irq, adp);
+}
+
+static const struct component_master_ops adp_master_ops = {
+ .bind = adp_drm_bind,
+ .unbind = adp_drm_unbind,
+};
+
+static int compare_dev(struct device *dev, void *data)
+{
+ return dev->of_node == data;
+}
+
+static int adp_probe(struct platform_device *pdev)
+{
+ struct device_node *port;
+ struct component_match *match = NULL;
+ struct adp_drv_private *adp;
+ int err;
+
+ adp = devm_drm_dev_alloc(&pdev->dev, &adp_driver, struct adp_drv_private, drm);
+ if (IS_ERR(adp))
+ return PTR_ERR(adp);
+
+ dev_set_drvdata(&pdev->dev, &adp->drm);
+
+ err = adp_parse_of(pdev, adp);
+ if (err < 0)
+ return err;
+
+ port = of_graph_get_remote_node(pdev->dev.of_node, 0, 0);
+ if (!port)
+ return -ENODEV;
+
+ drm_of_component_match_add(&pdev->dev, &match, compare_dev, port);
+ of_node_put(port);
+
+ return component_master_add_with_match(&pdev->dev, &adp_master_ops, match);
+}
+
+static void adp_remove(struct platform_device *pdev)
+{
+ component_master_del(&pdev->dev, &adp_master_ops);
+ dev_set_drvdata(&pdev->dev, NULL);
+}
+
+static const struct of_device_id adp_of_match[] = {
+ { .compatible = "apple,h7-display-pipe", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, adp_of_match);
+
+static struct platform_driver adp_platform_driver = {
+ .driver = {
+ .name = "adp",
+ .of_match_table = adp_of_match,
+ },
+ .probe = adp_probe,
+ .remove = adp_remove,
+};
+
+module_platform_driver(adp_platform_driver);
+
+MODULE_DESCRIPTION("Apple Display Pipe DRM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig
index 0051fb1b437f..1acfed2f92ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/Kconfig
+++ b/drivers/gpu/drm/amd/amdgpu/Kconfig
@@ -2,10 +2,13 @@
config DRM_AMDGPU
tristate "AMD GPU"
- depends on DRM && PCI && MMU
+ depends on DRM && PCI
depends on !UML
select FW_LOADER
+ select DRM_CLIENT
+ select DRM_CLIENT_SELECTION
select DRM_DISPLAY_DP_HELPER
+ select DRM_DISPLAY_DSC_HELPER
select DRM_DISPLAY_HDMI_HELPER
select DRM_DISPLAY_HDCP_HELPER
select DRM_DISPLAY_HELPER
@@ -23,6 +26,7 @@ config DRM_AMDGPU
select DRM_BUDDY
select DRM_SUBALLOC_HELPER
select DRM_EXEC
+ select DRM_PANEL_BACKLIGHT_QUIRKS
# amdgpu depends on ACPI_VIDEO when ACPI is enabled, for select to work
# ACPI_VIDEO's dependencies must also be selected.
select INPUT if ACPI
@@ -64,7 +68,6 @@ config DRM_AMDGPU_CIK
config DRM_AMDGPU_USERPTR
bool "Always enable userptr write support"
depends on DRM_AMDGPU
- depends on MMU
select HMM_MIRROR
select MMU_NOTIFIER
help
@@ -73,7 +76,7 @@ config DRM_AMDGPU_USERPTR
config DRM_AMD_ISP
bool "Enable AMD Image Signal Processor IP support"
- depends on DRM_AMDGPU
+ depends on DRM_AMDGPU && ACPI
select MFD_CORE
select PM_GENERIC_DOMAINS if PM
help
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index c7b18c52825d..64e7acff8f18 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -1,5 +1,5 @@
#
-# Copyright 2017 Advanced Micro Devices, Inc.
+# Copyright 2017-2024 Advanced Micro Devices, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
@@ -65,7 +65,8 @@ amdgpu-y += amdgpu_device.o amdgpu_doorbell_mgr.o amdgpu_kms.o \
amdgpu_umc.o smu_v11_0_i2c.o amdgpu_fru_eeprom.o amdgpu_rap.o \
amdgpu_fw_attestation.o amdgpu_securedisplay.o \
amdgpu_eeprom.o amdgpu_mca.o amdgpu_psp_ta.o amdgpu_lsdma.o \
- amdgpu_ring_mux.o amdgpu_xcp.o amdgpu_seq64.o amdgpu_aca.o amdgpu_dev_coredump.o
+ amdgpu_ring_mux.o amdgpu_xcp.o amdgpu_seq64.o amdgpu_aca.o amdgpu_dev_coredump.o \
+ amdgpu_cper.o amdgpu_userq_fence.o amdgpu_eviction_fence.o amdgpu_ip.o
amdgpu-$(CONFIG_PROC_FS) += amdgpu_fdinfo.o
@@ -83,7 +84,8 @@ amdgpu-y += \
vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o arct_reg_init.o mxgpu_nv.o \
nbio_v7_2.o hdp_v4_0.o hdp_v5_0.o aldebaran_reg_init.o aldebaran.o soc21.o soc24.o \
sienna_cichlid.o smu_v13_0_10.o nbio_v4_3.o hdp_v6_0.o nbio_v7_7.o hdp_v5_2.o lsdma_v6_0.o \
- nbio_v7_9.o aqua_vanjaram.o nbio_v7_11.o lsdma_v7_0.o hdp_v7_0.o nbif_v6_3_1.o
+ nbio_v7_9.o aqua_vanjaram.o nbio_v7_11.o lsdma_v7_0.o hdp_v7_0.o nbif_v6_3_1.o \
+ cyan_skillfish_reg_init.o
# add DF block
amdgpu-y += \
@@ -105,7 +107,7 @@ amdgpu-y += \
# add UMC block
amdgpu-y += \
- umc_v6_0.o umc_v6_1.o umc_v6_7.o umc_v8_7.o umc_v8_10.o umc_v12_0.o
+ umc_v6_0.o umc_v6_1.o umc_v6_7.o umc_v8_7.o umc_v8_10.o umc_v12_0.o umc_v8_14.o
# add IH block
amdgpu-y += \
@@ -136,7 +138,6 @@ amdgpu-y += \
# add DCE block
amdgpu-y += \
dce_v10_0.o \
- dce_v11_0.o \
amdgpu_vkms.o
# add GFX block
@@ -173,7 +174,10 @@ amdgpu-y += \
amdgpu-y += \
amdgpu_mes.o \
mes_v11_0.o \
- mes_v12_0.o
+ mes_v12_0.o \
+
+# add GFX userqueue support
+amdgpu-y += mes_userqueue.o
# add UVD block
amdgpu-y += \
@@ -200,6 +204,7 @@ amdgpu-y += \
vcn_v4_0_3.o \
vcn_v4_0_5.o \
vcn_v5_0_0.o \
+ vcn_v5_0_1.o \
amdgpu_jpeg.o \
jpeg_v1_0.o \
jpeg_v2_0.o \
@@ -208,7 +213,8 @@ amdgpu-y += \
jpeg_v4_0.o \
jpeg_v4_0_3.o \
jpeg_v4_0_5.o \
- jpeg_v5_0_0.o
+ jpeg_v5_0_0.o \
+ jpeg_v5_0_1.o
# add VPE block
amdgpu-y += \
@@ -250,6 +256,8 @@ amdgpu-y += \
# add amdkfd interfaces
amdgpu-y += amdgpu_amdkfd.o
+# add gfx usermode queue
+amdgpu-y += amdgpu_userq.o
ifneq ($(CONFIG_HSA_AMD),)
AMDKFD_PATH := ../amdkfd
diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran.c b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
index b0f95a7649bf..9569dc16dd3d 100644
--- a/drivers/gpu/drm/amd/amdgpu/aldebaran.c
+++ b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
@@ -71,30 +71,34 @@ aldebaran_get_reset_handler(struct amdgpu_reset_control *reset_ctl,
return NULL;
}
+static inline uint32_t aldebaran_get_ip_block_mask(struct amdgpu_device *adev)
+{
+ uint32_t ip_block_mask = BIT(AMD_IP_BLOCK_TYPE_GFX) |
+ BIT(AMD_IP_BLOCK_TYPE_SDMA);
+
+ if (adev->aid_mask)
+ ip_block_mask |= BIT(AMD_IP_BLOCK_TYPE_IH);
+
+ return ip_block_mask;
+}
+
static int aldebaran_mode2_suspend_ip(struct amdgpu_device *adev)
{
+ uint32_t ip_block_mask = aldebaran_get_ip_block_mask(adev);
+ uint32_t ip_block;
int r, i;
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
- if (!(adev->ip_blocks[i].version->type ==
- AMD_IP_BLOCK_TYPE_GFX ||
- adev->ip_blocks[i].version->type ==
- AMD_IP_BLOCK_TYPE_SDMA))
+ ip_block = BIT(adev->ip_blocks[i].version->type);
+ if (!(ip_block_mask & ip_block))
continue;
- r = adev->ip_blocks[i].version->funcs->suspend(adev);
-
- if (r) {
- dev_err(adev->dev,
- "suspend of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]);
+ if (r)
return r;
- }
-
- adev->ip_blocks[i].status.hw = false;
}
return 0;
@@ -207,8 +211,10 @@ aldebaran_mode2_perform_reset(struct amdgpu_reset_control *reset_ctl,
static int aldebaran_mode2_restore_ip(struct amdgpu_device *adev)
{
struct amdgpu_firmware_info *ucode_list[AMDGPU_UCODE_ID_MAXIMUM];
+ uint32_t ip_block_mask = aldebaran_get_ip_block_mask(adev);
struct amdgpu_firmware_info *ucode;
struct amdgpu_ip_block *cmn_block;
+ struct amdgpu_ip_block *ih_block;
int ucode_count = 0;
int i, r;
@@ -246,10 +252,22 @@ static int aldebaran_mode2_restore_ip(struct amdgpu_device *adev)
dev_err(adev->dev, "Failed to get BIF handle\n");
return -EINVAL;
}
- r = cmn_block->version->funcs->resume(adev);
+ r = amdgpu_ip_block_resume(cmn_block);
if (r)
return r;
+ if (ip_block_mask & BIT(AMD_IP_BLOCK_TYPE_IH)) {
+ ih_block = amdgpu_device_ip_get_ip_block(adev,
+ AMD_IP_BLOCK_TYPE_IH);
+ if (unlikely(!ih_block)) {
+ dev_err(adev->dev, "Failed to get IH handle\n");
+ return -EINVAL;
+ }
+ r = amdgpu_ip_block_resume(ih_block);
+ if (r)
+ return r;
+ }
+
/* Reinit GFXHUB */
adev->gfxhub.funcs->init(adev);
r = adev->gfxhub.funcs->gart_enable(adev);
@@ -282,15 +300,10 @@ static int aldebaran_mode2_restore_ip(struct amdgpu_device *adev)
adev->ip_blocks[i].version->type ==
AMD_IP_BLOCK_TYPE_SDMA))
continue;
- r = adev->ip_blocks[i].version->funcs->resume(adev);
- if (r) {
- dev_err(adev->dev,
- "resume of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
- return r;
- }
- adev->ip_blocks[i].status.hw = true;
+ r = amdgpu_ip_block_resume(&adev->ip_blocks[i]);
+ if (r)
+ return r;
}
for (i = 0; i < adev->num_ip_blocks; i++) {
@@ -304,7 +317,7 @@ static int aldebaran_mode2_restore_ip(struct amdgpu_device *adev)
if (adev->ip_blocks[i].version->funcs->late_init) {
r = adev->ip_blocks[i].version->funcs->late_init(
- (void *)adev);
+ &adev->ip_blocks[i]);
if (r) {
dev_err(adev->dev,
"late_init of IP block <%s> failed %d after reset\n",
@@ -342,8 +355,12 @@ aldebaran_mode2_restore_hwcontext(struct amdgpu_reset_control *reset_ctl,
}
list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
+ amdgpu_set_init_level(tmp_adev,
+ AMDGPU_INIT_LEVEL_RESET_RECOVERY);
dev_info(tmp_adev->dev,
"GPU reset succeeded, trying to resume\n");
+ /*TBD: Ideally should clear only GFX, SDMA blocks*/
+ amdgpu_ras_clear_err_state(tmp_adev);
r = aldebaran_mode2_restore_ip(tmp_adev);
if (r)
goto end;
@@ -387,6 +404,8 @@ aldebaran_mode2_restore_hwcontext(struct amdgpu_reset_control *reset_ctl,
tmp_adev);
if (!r) {
+ amdgpu_set_init_level(tmp_adev,
+ AMDGPU_INIT_LEVEL_DEFAULT);
amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
r = amdgpu_ib_ring_tests(tmp_adev);
@@ -417,6 +436,7 @@ static struct amdgpu_reset_handler aldebaran_mode2_handler = {
static struct amdgpu_reset_handler
*aldebaran_rst_handlers[AMDGPU_RESET_MAX_HANDLERS] = {
&aldebaran_mode2_handler,
+ &xgmi_reset_on_init_handler,
};
int aldebaran_reset_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 9b1e0ede05a4..2a0df4cabb99 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -63,6 +63,7 @@
#include "kgd_pp_interface.h"
#include "amd_shared.h"
+#include "amdgpu_utils.h"
#include "amdgpu_mode.h"
#include "amdgpu_ih.h"
#include "amdgpu_irq.h"
@@ -109,16 +110,19 @@
#include "amdgpu_mca.h"
#include "amdgpu_aca.h"
#include "amdgpu_ras.h"
+#include "amdgpu_cper.h"
#include "amdgpu_xcp.h"
#include "amdgpu_seq64.h"
#include "amdgpu_reg_state.h"
+#include "amdgpu_userq.h"
+#include "amdgpu_eviction_fence.h"
#if defined(CONFIG_DRM_AMD_ISP)
#include "amdgpu_isp.h"
#endif
#define MAX_GPU_INSTANCE 64
-#define GFX_SLICE_PERIOD msecs_to_jiffies(250)
+#define GFX_SLICE_PERIOD_MS 250
struct amdgpu_gpu_instance {
struct amdgpu_device *adev;
@@ -131,10 +135,6 @@ struct amdgpu_mgpu_info {
uint32_t num_gpu;
uint32_t num_dgpu;
uint32_t num_apu;
-
- /* delayed reset_func for XGMI configuration if necessary */
- struct delayed_work delayed_reset_work;
- bool pending_reset;
};
enum amdgpu_ss {
@@ -231,7 +231,7 @@ extern int amdgpu_force_asic_type;
extern int amdgpu_smartshift_bias;
extern int amdgpu_use_xgmi_p2p;
extern int amdgpu_mtype_local;
-extern bool enforce_isolation;
+extern int amdgpu_enforce_isolation;
#ifdef CONFIG_HSA_AMD
extern int sched_policy;
extern bool debug_evictions;
@@ -269,8 +269,10 @@ extern int amdgpu_umsch_mm_fwlog;
extern int amdgpu_user_partt_mode;
extern int amdgpu_agp;
+extern int amdgpu_rebar;
extern int amdgpu_wbrf;
+extern int amdgpu_user_queue;
#define AMDGPU_VM_MAX_NUM_CTX 4096
#define AMDGPU_SG_THRESHOLD (256*1024*1024)
@@ -303,6 +305,12 @@ extern int amdgpu_wbrf;
#define AMDGPU_RESET_VCE (1 << 13)
#define AMDGPU_RESET_VCE1 (1 << 14)
+/* reset mask */
+#define AMDGPU_RESET_TYPE_FULL (1 << 0) /* full adapter reset, mode1/mode2/BACO/etc. */
+#define AMDGPU_RESET_TYPE_SOFT_RESET (1 << 1) /* IP level soft reset */
+#define AMDGPU_RESET_TYPE_PER_QUEUE (1 << 2) /* per queue */
+#define AMDGPU_RESET_TYPE_PER_PIPE (1 << 3) /* per pipe */
+
/* max cursor sizes (in pixels) */
#define CIK_CURSOR_WIDTH 128
#define CIK_CURSOR_HEIGHT 128
@@ -350,7 +358,6 @@ enum amdgpu_kiq_irq {
AMDGPU_CP_KIQ_IRQ_DRIVER0 = 0,
AMDGPU_CP_KIQ_IRQ_LAST
};
-#define SRIOV_USEC_TIMEOUT 1200000 /* wait 12 * 100ms for SRIOV */
#define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */
#define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */
#define MAX_KIQ_REG_TRY 1000
@@ -365,8 +372,11 @@ void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
u64 *flags);
int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
enum amd_ip_block_type block_type);
-bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
+bool amdgpu_device_ip_is_valid(struct amdgpu_device *adev,
enum amd_ip_block_type block_type);
+int amdgpu_ip_block_suspend(struct amdgpu_ip_block *ip_block);
+
+int amdgpu_ip_block_resume(struct amdgpu_ip_block *ip_block);
#define AMDGPU_MAX_IP_NUM 16
@@ -389,6 +399,7 @@ struct amdgpu_ip_block_version {
struct amdgpu_ip_block {
struct amdgpu_ip_block_status status;
const struct amdgpu_ip_block_version *version;
+ struct amdgpu_device *adev;
};
int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
@@ -409,6 +420,7 @@ bool amdgpu_get_bios(struct amdgpu_device *adev);
bool amdgpu_read_bios(struct amdgpu_device *adev);
bool amdgpu_soc15_read_bios_from_rom(struct amdgpu_device *adev,
u8 *bios, u32 length_bytes);
+void amdgpu_bios_release(struct amdgpu_device *adev);
/*
* Clocks
*/
@@ -423,7 +435,6 @@ struct amdgpu_clock {
uint32_t default_mclk;
uint32_t default_sclk;
uint32_t default_dispclk;
- uint32_t current_dispclk;
uint32_t dp_extclk;
uint32_t max_pixel_clock;
};
@@ -459,9 +470,6 @@ struct amdgpu_sa_manager {
void *cpu_ptr;
};
-int amdgpu_fence_slab_init(void);
-void amdgpu_fence_slab_fini(void);
-
/*
* IRQS.
*/
@@ -481,7 +489,6 @@ struct amdgpu_flip_work {
bool async;
};
-
/*
* file private structure
*/
@@ -494,6 +501,11 @@ struct amdgpu_fpriv {
struct mutex bo_list_lock;
struct idr bo_list_handles;
struct amdgpu_ctx_mgr ctx_mgr;
+ struct amdgpu_userq_mgr userq_mgr;
+
+ /* Eviction fence infra */
+ struct amdgpu_eviction_fence_mgr evf_mgr;
+
/** GPU partition selection */
uint32_t xcp_id;
};
@@ -505,12 +517,62 @@ int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv);
*/
#define AMDGPU_MAX_WB 1024 /* Reserve at most 1024 WB slots for amdgpu-owned rings. */
+/**
+ * amdgpu_wb - This struct is used for small GPU memory allocation.
+ *
+ * This struct is used to allocate a small amount of GPU memory that can be
+ * used to shadow certain states into the memory. This is especially useful for
+ * providing easy CPU access to some states without requiring register access
+ * (e.g., if some block is power gated, reading register may be problematic).
+ *
+ * Note: the term writeback was initially used because many of the amdgpu
+ * components had some level of writeback memory, and this struct initially
+ * described those components.
+ */
struct amdgpu_wb {
+
+ /**
+ * @wb_obj:
+ *
+ * Buffer Object used for the writeback memory.
+ */
struct amdgpu_bo *wb_obj;
- volatile uint32_t *wb;
+
+ /**
+ * @wb:
+ *
+ * Pointer to the first writeback slot. In terms of CPU address
+ * this value can be accessed directly by using the offset as an index.
+ * For the GPU address, it is necessary to use gpu_addr and the offset.
+ */
+ uint32_t *wb;
+
+ /**
+ * @gpu_addr:
+ *
+ * Writeback base address in the GPU.
+ */
uint64_t gpu_addr;
- u32 num_wb; /* Number of wb slots actually reserved for amdgpu. */
+
+ /**
+ * @num_wb:
+ *
+ * Number of writeback slots reserved for amdgpu.
+ */
+ u32 num_wb;
+
+ /**
+ * @used:
+ *
+ * Track the writeback slot already used.
+ */
unsigned long used[DIV_ROUND_UP(AMDGPU_MAX_WB, BITS_PER_LONG)];
+
+ /**
+ * @lock:
+ *
+ * Protects read and write of the used field array.
+ */
spinlock_t lock;
};
@@ -544,6 +606,7 @@ struct amdgpu_allowed_register_entry {
* are reset depends on the ASIC. Notably doesn't reset IPs
* shared with the CPU on APUs or the memory controllers (so
* VRAM is not lost). Not available on all ASICs.
+ * @AMD_RESET_LINK: Triggers SW-UP link reset on other GPUs
* @AMD_RESET_BACO: BACO (Bus Alive, Chip Off) method powers off and on the card
* but without powering off the PCI bus. Suitable only for
* discrete GPUs.
@@ -561,8 +624,10 @@ enum amd_reset_method {
AMD_RESET_METHOD_MODE0,
AMD_RESET_METHOD_MODE1,
AMD_RESET_METHOD_MODE2,
+ AMD_RESET_METHOD_LINK,
AMD_RESET_METHOD_BACO,
AMD_RESET_METHOD_PCI,
+ AMD_RESET_METHOD_ON_INIT,
};
struct amdgpu_video_codec_info {
@@ -656,7 +721,7 @@ int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
/* VRAM scratch page for HDP bug, default vram page */
struct amdgpu_mem_scratch {
struct amdgpu_bo *robj;
- volatile uint32_t *ptr;
+ uint32_t *ptr;
u64 gpu_addr;
};
@@ -687,6 +752,7 @@ typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, u
struct amdgpu_mmio_remap {
u32 reg_offset;
resource_size_t bus_addr;
+ struct amdgpu_bo *bo;
};
/* Define the HW IP blocks will be used in driver , add more if necessary */
@@ -754,6 +820,20 @@ struct amdgpu_ip_map_info {
uint32_t mask);
};
+enum amdgpu_uid_type {
+ AMDGPU_UID_TYPE_XCD,
+ AMDGPU_UID_TYPE_AID,
+ AMDGPU_UID_TYPE_SOC,
+ AMDGPU_UID_TYPE_MAX
+};
+
+#define AMDGPU_UID_INST_MAX 8 /* max number of instances for each UID type */
+
+struct amdgpu_uid {
+ uint64_t uid[AMDGPU_UID_TYPE_MAX][AMDGPU_UID_INST_MAX];
+ struct amdgpu_device *adev;
+};
+
struct amd_powerplay {
void *pp_handle;
const struct amd_pm_funcs *pp_funcs;
@@ -813,6 +893,12 @@ struct amdgpu_mqd_prop {
uint32_t hqd_queue_priority;
bool allow_tunneling;
bool hqd_active;
+ uint64_t shadow_addr;
+ uint64_t gds_bkup_addr;
+ uint64_t csa_addr;
+ uint64_t fence_address;
+ bool tmz_queue;
+ bool kernel_queue;
};
struct amdgpu_mqd {
@@ -821,15 +907,45 @@ struct amdgpu_mqd {
struct amdgpu_mqd_prop *p);
};
+struct amdgpu_pcie_reset_ctx {
+ bool in_link_reset;
+ bool occurs_dpc;
+ bool audio_suspended;
+ struct pci_dev *swus;
+ struct pci_saved_state *swus_pcistate;
+ struct pci_saved_state *swds_pcistate;
+};
+
+/*
+ * Custom Init levels could be defined for different situations where a full
+ * initialization of all hardware blocks are not expected. Sample cases are
+ * custom init sequences after resume after S0i3/S3, reset on initialization,
+ * partial reset of blocks etc. Presently, this defines only two levels. Levels
+ * are described in corresponding struct definitions - amdgpu_init_default,
+ * amdgpu_init_minimal_xgmi.
+ */
+enum amdgpu_init_lvl_id {
+ AMDGPU_INIT_LEVEL_DEFAULT,
+ AMDGPU_INIT_LEVEL_MINIMAL_XGMI,
+ AMDGPU_INIT_LEVEL_RESET_RECOVERY,
+};
+
+struct amdgpu_init_level {
+ enum amdgpu_init_lvl_id level;
+ uint32_t hwini_ip_block_mask;
+};
+
#define AMDGPU_RESET_MAGIC_NUM 64
#define AMDGPU_MAX_DF_PERFMONS 4
struct amdgpu_reset_domain;
struct amdgpu_fru_info;
-/*
- * Non-zero (true) if the GPU has VRAM. Zero (false) otherwise.
- */
-#define AMDGPU_HAS_VRAM(_adev) ((_adev)->gmc.real_vram_size)
+enum amdgpu_enforce_isolation_mode {
+ AMDGPU_ENFORCE_ISOLATION_DISABLE = 0,
+ AMDGPU_ENFORCE_ISOLATION_ENABLE = 1,
+ AMDGPU_ENFORCE_ISOLATION_ENABLE_LEGACY = 2,
+ AMDGPU_ENFORCE_ISOLATION_NO_CLEANER_SHADER = 3,
+};
struct amdgpu_device {
struct device *dev;
@@ -854,6 +970,7 @@ struct amdgpu_device {
bool need_swiotlb;
bool accel_working;
struct notifier_block acpi_nb;
+ struct notifier_block pm_nb;
struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS];
struct debugfs_blob_wrapper debugfs_vbios_blob;
struct debugfs_blob_wrapper debugfs_discovery_blob;
@@ -1033,9 +1150,6 @@ struct amdgpu_device {
/* for userq and VM fences */
struct amdgpu_seq64 seq64;
- /* KFD */
- struct amdgpu_kfd_dev kfd;
-
/* UMC */
struct amdgpu_umc umc;
@@ -1053,6 +1167,13 @@ struct amdgpu_device {
bool enable_uni_mes;
struct amdgpu_mes mes;
struct amdgpu_mqd mqds[AMDGPU_HW_IP_NUM];
+ const struct amdgpu_userq_funcs *userq_funcs[AMDGPU_HW_IP_NUM];
+
+ /* xarray used to retrieve the user queue fence driver reference
+ * in the EOP interrupt handler to signal the particular user
+ * queue fence.
+ */
+ struct xarray userq_xa;
/* df */
struct amdgpu_df df;
@@ -1063,6 +1184,9 @@ struct amdgpu_device {
/* ACA */
struct amdgpu_aca aca;
+ /* CPER */
+ struct amdgpu_cper cper;
+
struct amdgpu_ip_block ip_blocks[AMDGPU_MAX_IP_NUM];
uint32_t harvest_ip_mask;
int num_ip_blocks;
@@ -1092,8 +1216,7 @@ struct amdgpu_device {
bool in_s3;
bool in_s4;
bool in_s0ix;
- /* indicate amdgpu suspension status */
- bool suspend_complete;
+ suspend_state_t last_suspend_state;
enum pp_mp1_state mp1_state;
struct amdgpu_doorbell_index doorbell_index;
@@ -1124,11 +1247,14 @@ struct amdgpu_device {
struct ratelimit_state throttling_logging_rs;
uint32_t ras_hw_enabled;
uint32_t ras_enabled;
+ bool ras_default_ecc_enabled;
bool no_hw_access;
struct pci_saved_state *pci_state;
pci_channel_state_t pci_channel_state;
+ struct amdgpu_pcie_reset_ctx pcie_reset_ctx;
+
/* Track auto wait count on s_barrier settings */
bool barrier_has_auto_waitcnt;
@@ -1150,7 +1276,6 @@ struct amdgpu_device {
struct work_struct reset_work;
- bool job_hang;
bool dc_enabled;
/* Mask of active clusters */
uint32_t aid_mask;
@@ -1162,10 +1287,36 @@ struct amdgpu_device {
bool debug_use_vram_fw_buf;
bool debug_enable_ras_aca;
bool debug_exp_resets;
+ bool debug_disable_gpu_ring_reset;
+ bool debug_vm_userptr;
+ bool debug_disable_ce_logs;
- bool enforce_isolation[MAX_XCP];
- /* Added this mutex for cleaner shader isolation between GFX and compute processes */
+ /* Protection for the following isolation structure */
struct mutex enforce_isolation_mutex;
+ enum amdgpu_enforce_isolation_mode enforce_isolation[MAX_XCP];
+ struct amdgpu_isolation {
+ void *owner;
+ struct dma_fence *spearhead;
+ struct amdgpu_sync active;
+ struct amdgpu_sync prev;
+ } isolation[MAX_XCP];
+
+ struct amdgpu_init_level *init_lvl;
+
+ /* This flag is used to determine how VRAM allocations are handled for APUs
+ * in KFD: VRAM or GTT.
+ */
+ bool apu_prefer_gtt;
+
+ struct list_head userq_mgr_list;
+ struct mutex userq_mutex;
+ bool userq_halt_for_enforce_isolation;
+ struct amdgpu_uid *uid_info;
+
+ /* KFD
+ * Must be last --ends in a flexible-array member.
+ */
+ struct amdgpu_kfd_dev kfd;
};
static inline uint32_t amdgpu_ip_version(const struct amdgpu_device *adev,
@@ -1199,6 +1350,11 @@ static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_device *bdev)
return container_of(bdev, struct amdgpu_device, mman.bdev);
}
+static inline bool amdgpu_is_multi_aid(struct amdgpu_device *adev)
+{
+ return !!adev->aid_mask;
+}
+
int amdgpu_device_init(struct amdgpu_device *adev,
uint32_t flags);
void amdgpu_device_fini_hw(struct amdgpu_device *adev);
@@ -1250,7 +1406,8 @@ void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
void amdgpu_device_indirect_wreg64_ext(struct amdgpu_device *adev,
u64 reg_addr, u64 reg_data);
u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev);
-bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type);
+bool amdgpu_device_asic_has_dc_support(struct pci_dev *pdev,
+ enum amd_asic_type asic_type);
bool amdgpu_device_has_dc_support(struct amdgpu_device *adev);
void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev);
@@ -1261,6 +1418,8 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
int amdgpu_do_asic_reset(struct list_head *device_list_handle,
struct amdgpu_reset_context *reset_context);
+int amdgpu_device_reinit_after_reset(struct amdgpu_reset_context *reset_context);
+
int emu_soc_asic_init(struct amdgpu_device *adev);
/*
@@ -1418,16 +1577,17 @@ void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
const u32 array_size);
int amdgpu_device_mode1_reset(struct amdgpu_device *adev);
-bool amdgpu_device_supports_atpx(struct drm_device *dev);
-bool amdgpu_device_supports_px(struct drm_device *dev);
-bool amdgpu_device_supports_boco(struct drm_device *dev);
-bool amdgpu_device_supports_smart_shift(struct drm_device *dev);
-int amdgpu_device_supports_baco(struct drm_device *dev);
+int amdgpu_device_link_reset(struct amdgpu_device *adev);
+bool amdgpu_device_supports_atpx(struct amdgpu_device *adev);
+bool amdgpu_device_supports_px(struct amdgpu_device *adev);
+bool amdgpu_device_supports_boco(struct amdgpu_device *adev);
+bool amdgpu_device_supports_smart_shift(struct amdgpu_device *adev);
+int amdgpu_device_supports_baco(struct amdgpu_device *adev);
void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev);
bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
struct amdgpu_device *peer_adev);
-int amdgpu_device_baco_enter(struct drm_device *dev);
-int amdgpu_device_baco_exit(struct drm_device *dev);
+int amdgpu_device_baco_enter(struct amdgpu_device *adev);
+int amdgpu_device_baco_exit(struct amdgpu_device *adev);
void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring);
@@ -1442,7 +1602,12 @@ void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
struct dma_fence *amdgpu_device_get_gang(struct amdgpu_device *adev);
struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
struct dma_fence *gang);
+struct dma_fence *amdgpu_device_enforce_isolation(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+ struct amdgpu_job *job);
bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev);
+ssize_t amdgpu_get_soft_full_reset_mask(struct amdgpu_ring *ring);
+ssize_t amdgpu_show_reset_mask(char *buf, uint32_t supported_reset);
/* atpx handler */
#if defined(CONFIG_VGA_SWITCHEROO)
@@ -1450,23 +1615,15 @@ void amdgpu_register_atpx_handler(void);
void amdgpu_unregister_atpx_handler(void);
bool amdgpu_has_atpx_dgpu_power_cntl(void);
bool amdgpu_is_atpx_hybrid(void);
-bool amdgpu_atpx_dgpu_req_power_for_displays(void);
bool amdgpu_has_atpx(void);
#else
static inline void amdgpu_register_atpx_handler(void) {}
static inline void amdgpu_unregister_atpx_handler(void) {}
static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
static inline bool amdgpu_is_atpx_hybrid(void) { return false; }
-static inline bool amdgpu_atpx_dgpu_req_power_for_displays(void) { return false; }
static inline bool amdgpu_has_atpx(void) { return false; }
#endif
-#if defined(CONFIG_VGA_SWITCHEROO) && defined(CONFIG_ACPI)
-void *amdgpu_atpx_get_dhandle(void);
-#else
-static inline void *amdgpu_atpx_get_dhandle(void) { return NULL; }
-#endif
-
/*
* KMS
*/
@@ -1482,6 +1639,7 @@ void amdgpu_driver_release_kms(struct drm_device *dev);
int amdgpu_device_ip_suspend(struct amdgpu_device *adev);
int amdgpu_device_prepare(struct drm_device *dev);
+void amdgpu_device_complete(struct drm_device *dev);
int amdgpu_device_suspend(struct drm_device *dev, bool fbcon);
int amdgpu_device_resume(struct drm_device *dev, bool fbcon);
u32 amdgpu_get_vblank_counter_kms(struct drm_crtc *crtc);
@@ -1532,7 +1690,8 @@ int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
u8 perf_req, bool advertise);
int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
u8 dev_state, bool drv_state);
-int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_state);
+int amdgpu_acpi_smart_shift_update(struct amdgpu_device *adev,
+ enum amdgpu_ss ss_state);
int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
int amdgpu_acpi_get_tmr_info(struct amdgpu_device *adev, u64 *tmr_offset,
u64 *tmr_size);
@@ -1563,19 +1722,24 @@ static inline void amdgpu_acpi_release(void) { }
static inline bool amdgpu_acpi_is_power_shift_control_supported(void) { return false; }
static inline int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
u8 dev_state, bool drv_state) { return 0; }
-static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev,
- enum amdgpu_ss ss_state) { return 0; }
+static inline int amdgpu_acpi_smart_shift_update(struct amdgpu_device *adev,
+ enum amdgpu_ss ss_state)
+{
+ return 0;
+}
static inline void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps) { }
#endif
#if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND)
bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev);
bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
-void amdgpu_choose_low_power_state(struct amdgpu_device *adev);
#else
static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; }
-static inline void amdgpu_choose_low_power_state(struct amdgpu_device *adev) { }
+#endif
+
+#if defined(CONFIG_DRM_AMD_ISP)
+int amdgpu_acpi_get_isp4_dev(struct acpi_device **dev);
#endif
void amdgpu_register_gpu_instance(struct amdgpu_device *adev);
@@ -1619,4 +1783,26 @@ extern const struct attribute_group amdgpu_vram_mgr_attr_group;
extern const struct attribute_group amdgpu_gtt_mgr_attr_group;
extern const struct attribute_group amdgpu_flash_attr_group;
+void amdgpu_set_init_level(struct amdgpu_device *adev,
+ enum amdgpu_init_lvl_id lvl);
+
+static inline int amdgpu_device_bus_status_check(struct amdgpu_device *adev)
+{
+ u32 status;
+ int r;
+
+ r = pci_read_config_dword(adev->pdev, PCI_COMMAND, &status);
+ if (r || PCI_POSSIBLE_ERROR(status)) {
+ dev_err(adev->dev, "device lost from bus!");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+void amdgpu_device_set_uid(struct amdgpu_uid *uid_info,
+ enum amdgpu_uid_type type, uint8_t inst,
+ uint64_t uid);
+uint64_t amdgpu_device_get_uid(struct amdgpu_uid *uid_info,
+ enum amdgpu_uid_type type, uint8_t inst);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
index 2ca127173135..9b3180449150 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
@@ -30,16 +30,6 @@
typedef int bank_handler_t(struct aca_handle *handle, struct aca_bank *bank, enum aca_smu_type type, void *data);
-struct aca_banks {
- int nr_banks;
- struct list_head list;
-};
-
-struct aca_hwip {
- int hwid;
- int mcatype;
-};
-
static struct aca_hwip aca_hwid_mcatypes[ACA_HWIP_TYPE_COUNT] = {
ACA_BANK_HWID(SMU, 0x01, 0x01),
ACA_BANK_HWID(PCS_XGMI, 0x50, 0x00),
@@ -86,6 +76,7 @@ static void aca_banks_release(struct aca_banks *banks)
list_for_each_entry_safe(node, tmp, &banks->list, node) {
list_del(&node->node);
kvfree(node);
+ banks->nr_banks--;
}
}
@@ -111,7 +102,7 @@ static struct aca_regs_dump {
{"STATUS", ACA_REG_IDX_STATUS},
{"ADDR", ACA_REG_IDX_ADDR},
{"MISC", ACA_REG_IDX_MISC0},
- {"CONFIG", ACA_REG_IDX_CONFG},
+ {"CONFIG", ACA_REG_IDX_CONFIG},
{"IPID", ACA_REG_IDX_IPID},
{"SYND", ACA_REG_IDX_SYND},
{"DESTAT", ACA_REG_IDX_DESTAT},
@@ -125,11 +116,40 @@ static void aca_smu_bank_dump(struct amdgpu_device *adev, int idx, int total, st
u64 event_id = qctx ? qctx->evid.event_id : RAS_EVENT_INVALID_ID;
int i;
+ if (adev->debug_disable_ce_logs &&
+ bank->smu_err_type == ACA_SMU_TYPE_CE &&
+ !ACA_BANK_ERR_IS_DEFFERED(bank))
+ return;
+
RAS_EVENT_LOG(adev, event_id, HW_ERR "Accelerator Check Architecture events logged\n");
/* plus 1 for output format, e.g: ACA[08/08]: xxxx */
for (i = 0; i < ARRAY_SIZE(aca_regs); i++)
RAS_EVENT_LOG(adev, event_id, HW_ERR "ACA[%02d/%02d].%s=0x%016llx\n",
idx + 1, total, aca_regs[i].name, bank->regs[aca_regs[i].reg_idx]);
+
+ if (ACA_REG__STATUS__SCRUB(bank->regs[ACA_REG_IDX_STATUS]))
+ RAS_EVENT_LOG(adev, event_id, HW_ERR "hardware error logged by the scrubber\n");
+}
+
+static bool aca_bank_hwip_is_matched(struct aca_bank *bank, enum aca_hwip_type type)
+{
+
+ struct aca_hwip *hwip;
+ int hwid, mcatype;
+ u64 ipid;
+
+ if (!bank || type == ACA_HWIP_TYPE_UNKNOW)
+ return false;
+
+ hwip = &aca_hwid_mcatypes[type];
+ if (!hwip->hwid)
+ return false;
+
+ ipid = bank->regs[ACA_REG_IDX_IPID];
+ hwid = ACA_REG__IPID__HARDWAREID(ipid);
+ mcatype = ACA_REG__IPID__MCATYPE(ipid);
+
+ return hwip->hwid == hwid && hwip->mcatype == mcatype;
}
static int aca_smu_get_valid_aca_banks(struct amdgpu_device *adev, enum aca_smu_type type,
@@ -158,7 +178,7 @@ static int aca_smu_get_valid_aca_banks(struct amdgpu_device *adev, enum aca_smu_
return -EINVAL;
}
- if (start + count >= max_count)
+ if (start + count > max_count)
return -EINVAL;
count = min_t(int, count, max_count);
@@ -168,7 +188,16 @@ static int aca_smu_get_valid_aca_banks(struct amdgpu_device *adev, enum aca_smu_
if (ret)
return ret;
- bank.type = type;
+ bank.smu_err_type = type;
+
+ /*
+ * Poison being consumed when injecting a UE while running background workloads,
+ * which are unexpected.
+ */
+ if (type == ACA_SMU_TYPE_UE &&
+ ACA_REG__STATUS__POISON(bank.regs[ACA_REG_IDX_STATUS]) &&
+ !aca_bank_hwip_is_matched(&bank, ACA_HWIP_TYPE_UMC))
+ continue;
aca_smu_bank_dump(adev, i, count, &bank, qctx);
@@ -180,31 +209,14 @@ static int aca_smu_get_valid_aca_banks(struct amdgpu_device *adev, enum aca_smu_
return 0;
}
-static bool aca_bank_hwip_is_matched(struct aca_bank *bank, enum aca_hwip_type type)
-{
-
- struct aca_hwip *hwip;
- int hwid, mcatype;
- u64 ipid;
-
- if (!bank || type == ACA_HWIP_TYPE_UNKNOW)
- return false;
-
- hwip = &aca_hwid_mcatypes[type];
- if (!hwip->hwid)
- return false;
-
- ipid = bank->regs[ACA_REG_IDX_IPID];
- hwid = ACA_REG__IPID__HARDWAREID(ipid);
- mcatype = ACA_REG__IPID__MCATYPE(ipid);
-
- return hwip->hwid == hwid && hwip->mcatype == mcatype;
-}
-
static bool aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank, enum aca_smu_type type)
{
const struct aca_bank_ops *bank_ops = handle->bank_ops;
+ /* Parse all deferred errors with UMC aca handle */
+ if (ACA_BANK_ERR_IS_DEFFERED(bank))
+ return handle->hwip == ACA_HWIP_TYPE_UMC;
+
if (!aca_bank_hwip_is_matched(bank, handle->hwip))
return false;
@@ -227,6 +239,7 @@ static struct aca_bank_error *new_bank_error(struct aca_error *aerr, struct aca_
mutex_lock(&aerr->lock);
list_add_tail(&bank_error->node, &aerr->list);
+ aerr->nr_errors++;
mutex_unlock(&aerr->lock);
return bank_error;
@@ -394,6 +407,56 @@ static bool aca_bank_should_update(struct amdgpu_device *adev, enum aca_smu_type
return ret;
}
+static void aca_banks_generate_cper(struct amdgpu_device *adev,
+ enum aca_smu_type type,
+ struct aca_banks *banks,
+ int count)
+{
+ struct aca_bank_node *node;
+ struct aca_bank *bank;
+ int r;
+
+ if (!adev->cper.enabled)
+ return;
+
+ if (!banks || !count) {
+ dev_warn(adev->dev, "fail to generate cper records\n");
+ return;
+ }
+
+ /* UEs must be encoded into separate CPER entries */
+ if (type == ACA_SMU_TYPE_UE) {
+ struct aca_banks de_banks;
+
+ aca_banks_init(&de_banks);
+ list_for_each_entry(node, &banks->list, node) {
+ bank = &node->bank;
+ if (bank->aca_err_type == ACA_ERROR_TYPE_DEFERRED) {
+ r = aca_banks_add_bank(&de_banks, bank);
+ if (r)
+ dev_warn(adev->dev, "fail to add de banks, ret = %d\n", r);
+ } else {
+ if (amdgpu_cper_generate_ue_record(adev, bank))
+ dev_warn(adev->dev, "fail to generate ue cper records\n");
+ }
+ }
+
+ if (!list_empty(&de_banks.list)) {
+ if (amdgpu_cper_generate_ce_records(adev, &de_banks, de_banks.nr_banks))
+ dev_warn(adev->dev, "fail to generate de cper records\n");
+ }
+
+ aca_banks_release(&de_banks);
+ } else {
+ /*
+ * SMU_TYPE_CE banks are combined into 1 CPER entries,
+ * they could be CEs or DEs or both
+ */
+ if (amdgpu_cper_generate_ce_records(adev, banks, count))
+ dev_warn(adev->dev, "fail to generate ce cper records\n");
+ }
+}
+
static int aca_banks_update(struct amdgpu_device *adev, enum aca_smu_type type,
bank_handler_t handler, struct ras_query_context *qctx, void *data)
{
@@ -431,6 +494,8 @@ static int aca_banks_update(struct amdgpu_device *adev, enum aca_smu_type type,
if (ret)
goto err_release_banks;
+ aca_banks_generate_cper(adev, type, &banks, count);
+
err_release_banks:
aca_banks_release(&banks);
@@ -516,6 +581,10 @@ static int __aca_get_error_data(struct amdgpu_device *adev, struct aca_handle *h
if (ret)
return ret;
+ /* DEs may contain in CEs or UEs */
+ if (type != ACA_ERROR_TYPE_DEFERRED)
+ aca_log_aca_error(handle, ACA_ERROR_TYPE_DEFERRED, err_data);
+
return aca_log_aca_error(handle, type, err_data);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h
index 5ef6b745f222..38c88897e1ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h
@@ -71,12 +71,21 @@ struct ras_query_context;
#define ACA_ERROR_CE_MASK BIT_MASK(ACA_ERROR_TYPE_CE)
#define ACA_ERROR_DEFERRED_MASK BIT_MASK(ACA_ERROR_TYPE_DEFERRED)
+#define mmSMNAID_AID0_MCA_SMU 0x03b30400 /* SMN AID AID0 */
+#define mmSMNAID_XCD0_MCA_SMU 0x36430400 /* SMN AID XCD0 */
+#define mmSMNAID_XCD1_MCA_SMU 0x38430400 /* SMN AID XCD1 */
+#define mmSMNXCD_XCD0_MCA_SMU 0x40430400 /* SMN XCD XCD0 */
+
+#define ACA_BANK_ERR_IS_DEFFERED(bank) \
+ (ACA_REG__STATUS__POISON((bank)->regs[ACA_REG_IDX_STATUS]) || \
+ ACA_REG__STATUS__DEFERRED((bank)->regs[ACA_REG_IDX_STATUS]))
+
enum aca_reg_idx {
ACA_REG_IDX_CTL = 0,
ACA_REG_IDX_STATUS = 1,
ACA_REG_IDX_ADDR = 2,
ACA_REG_IDX_MISC0 = 3,
- ACA_REG_IDX_CONFG = 4,
+ ACA_REG_IDX_CONFIG = 4,
ACA_REG_IDX_IPID = 5,
ACA_REG_IDX_SYND = 6,
ACA_REG_IDX_DESTAT = 8,
@@ -103,13 +112,20 @@ enum aca_error_type {
};
enum aca_smu_type {
+ ACA_SMU_TYPE_INVALID = -1,
ACA_SMU_TYPE_UE = 0,
ACA_SMU_TYPE_CE,
ACA_SMU_TYPE_COUNT,
};
+struct aca_hwip {
+ int hwid;
+ int mcatype;
+};
+
struct aca_bank {
- enum aca_smu_type type;
+ enum aca_error_type aca_err_type;
+ enum aca_smu_type smu_err_type;
u64 regs[ACA_MAX_REGS_COUNT];
};
@@ -118,6 +134,11 @@ struct aca_bank_node {
struct list_head node;
};
+struct aca_banks {
+ int nr_banks;
+ struct list_head list;
+};
+
struct aca_bank_info {
int die_id;
int socket_id;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index bf6c4a0d0525..4926996f94da 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -98,9 +98,9 @@ enum {
ACP_TILE_DSP2,
};
-static int acp_sw_init(void *handle)
+static int acp_sw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->acp.parent = adev->dev;
@@ -112,9 +112,9 @@ static int acp_sw_init(void *handle)
return 0;
}
-static int acp_sw_fini(void *handle)
+static int acp_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (adev->acp.cgs_device)
amdgpu_cgs_destroy_device(adev->acp.cgs_device);
@@ -140,7 +140,7 @@ static int acp_poweroff(struct generic_pm_domain *genpd)
* 2. power off the acp tiles
* 3. check and enter ulv state
*/
- amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true, 0);
return 0;
}
@@ -157,7 +157,7 @@ static int acp_poweron(struct generic_pm_domain *genpd)
* 2. turn on acp clock
* 3. power on acp tiles
*/
- amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false);
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false, 0);
return 0;
}
@@ -219,10 +219,10 @@ static const struct dmi_system_id acp_quirk_table[] = {
/**
* acp_hw_init - start and test ACP block
*
- * @handle: handle used to pass amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
*/
-static int acp_hw_init(void *handle)
+static int acp_hw_init(struct amdgpu_ip_block *ip_block)
{
int r;
u64 acp_base;
@@ -230,19 +230,13 @@ static int acp_hw_init(void *handle)
u32 count = 0;
struct i2s_platform_data *i2s_pdata = NULL;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- const struct amdgpu_ip_block *ip_block =
- amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ACP);
-
- if (!ip_block)
- return -EINVAL;
+ struct amdgpu_device *adev = ip_block->adev;
r = amd_acp_hw_init(adev->acp.cgs_device,
ip_block->version->major, ip_block->version->minor);
/* -ENODEV means board uses AZ rather than ACP */
if (r == -ENODEV) {
- amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true, 0);
return 0;
} else if (r) {
return r;
@@ -503,18 +497,18 @@ failure:
/**
* acp_hw_fini - stop the hardware block
*
- * @handle: handle used to pass amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
*/
-static int acp_hw_fini(void *handle)
+static int acp_hw_fini(struct amdgpu_ip_block *ip_block)
{
u32 val = 0;
u32 count = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* return early if no ACP */
if (!adev->acp.acp_genpd) {
- amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false);
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false, 0);
return 0;
}
@@ -565,67 +559,50 @@ static int acp_hw_fini(void *handle)
return 0;
}
-static int acp_suspend(void *handle)
+static int acp_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* power up on suspend */
if (!adev->acp.acp_cell)
- amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false);
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false, 0);
return 0;
}
-static int acp_resume(void *handle)
+static int acp_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* power down again on resume */
if (!adev->acp.acp_cell)
- amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
- return 0;
-}
-
-static int acp_early_init(void *handle)
-{
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true, 0);
return 0;
}
-static bool acp_is_idle(void *handle)
+static bool acp_is_idle(struct amdgpu_ip_block *ip_block)
{
return true;
}
-static int acp_wait_for_idle(void *handle)
-{
- return 0;
-}
-
-static int acp_soft_reset(void *handle)
-{
- return 0;
-}
-
-static int acp_set_clockgating_state(void *handle,
+static int acp_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int acp_set_powergating_state(void *handle,
+static int acp_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_PG_STATE_GATE);
- amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, enable);
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, enable, 0);
return 0;
}
static const struct amd_ip_funcs acp_ip_funcs = {
.name = "acp_ip",
- .early_init = acp_early_init,
- .late_init = NULL,
.sw_init = acp_sw_init,
.sw_fini = acp_sw_fini,
.hw_init = acp_hw_init,
@@ -633,12 +610,8 @@ static const struct amd_ip_funcs acp_ip_funcs = {
.suspend = acp_suspend,
.resume = acp_resume,
.is_idle = acp_is_idle,
- .wait_for_idle = acp_wait_for_idle,
- .soft_reset = acp_soft_reset,
.set_clockgating_state = acp_set_clockgating_state,
.set_powergating_state = acp_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
const struct amdgpu_ip_block_version acp_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 1f5a296f5ed2..6c62e27b9800 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -172,8 +172,8 @@ static union acpi_object *amdgpu_atif_call(struct amdgpu_atif *atif,
&buffer);
obj = (union acpi_object *)buffer.pointer;
- /* Fail if calling the method fails and ATIF is supported */
- if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+ /* Fail if calling the method fails */
+ if (ACPI_FAILURE(status)) {
DRM_DEBUG_DRIVER("failed to evaluate ATIF got %s\n",
acpi_format_exception(status));
kfree(obj);
@@ -394,6 +394,10 @@ static int amdgpu_atif_query_backlight_caps(struct amdgpu_atif *atif)
characteristics.max_input_signal;
atif->backlight_caps.ac_level = characteristics.ac_level;
atif->backlight_caps.dc_level = characteristics.dc_level;
+ atif->backlight_caps.data_points = characteristics.number_of_points;
+ memcpy(atif->backlight_caps.luminance_data,
+ characteristics.data_points,
+ sizeof(atif->backlight_caps.luminance_data));
out:
kfree(info);
return err;
@@ -800,24 +804,25 @@ int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
return -EIO;
}
+ kfree(info);
return 0;
}
/**
* amdgpu_acpi_smart_shift_update - update dGPU device state to SBIOS
*
- * @dev: drm_device pointer
+ * @adev: amdgpu device pointer
* @ss_state: current smart shift event
*
* returns 0 on success,
* otherwise return error number.
*/
-int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_state)
+int amdgpu_acpi_smart_shift_update(struct amdgpu_device *adev,
+ enum amdgpu_ss ss_state)
{
- struct amdgpu_device *adev = drm_to_adev(dev);
int r;
- if (!amdgpu_device_supports_smart_shift(dev))
+ if (!amdgpu_device_supports_smart_shift(adev))
return 0;
switch (ss_state) {
@@ -1276,11 +1281,7 @@ void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps)
{
struct amdgpu_atif *atif = &amdgpu_acpi_priv.atif;
- caps->caps_valid = atif->backlight_caps.caps_valid;
- caps->min_input_signal = atif->backlight_caps.min_input_signal;
- caps->max_input_signal = atif->backlight_caps.max_input_signal;
- caps->ac_level = atif->backlight_caps.ac_level;
- caps->dc_level = atif->backlight_caps.dc_level;
+ memcpy(caps, &atif->backlight_caps, sizeof(*caps));
}
/**
@@ -1531,23 +1532,35 @@ bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev)
return true;
#endif /* CONFIG_AMD_PMC */
}
+#endif /* CONFIG_SUSPEND */
-/**
- * amdgpu_choose_low_power_state
- *
- * @adev: amdgpu_device_pointer
- *
- * Choose the target low power state for the GPU
- */
-void amdgpu_choose_low_power_state(struct amdgpu_device *adev)
-{
- if (adev->in_runpm)
- return;
+#if IS_ENABLED(CONFIG_DRM_AMD_ISP)
+static const struct acpi_device_id isp_sensor_ids[] = {
+ { "OMNI5C10" },
+ { }
+};
- if (amdgpu_acpi_is_s0ix_active(adev))
- adev->in_s0ix = true;
- else if (amdgpu_acpi_is_s3_active(adev))
- adev->in_s3 = true;
+static int isp_match_acpi_device_ids(struct device *dev, const void *data)
+{
+ return acpi_match_device(data, dev) ? 1 : 0;
}
-#endif /* CONFIG_SUSPEND */
+int amdgpu_acpi_get_isp4_dev(struct acpi_device **dev)
+{
+ struct device *pdev __free(put_device) = NULL;
+ struct acpi_device *acpi_pdev;
+
+ pdev = bus_find_device(&platform_bus_type, NULL, isp_sensor_ids,
+ isp_match_acpi_device_ids);
+ if (!pdev)
+ return -EINVAL;
+
+ acpi_pdev = ACPI_COMPANION(pdev);
+ if (!acpi_pdev)
+ return -ENODEV;
+
+ *dev = acpi_pdev;
+
+ return 0;
+}
+#endif /* CONFIG_DRM_AMD_ISP */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 4f08b153cb66..a2879d2b7c8e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -248,18 +248,42 @@ void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
kgd2kfd_interrupt(adev->kfd.dev, ih_ring_entry);
}
-void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm)
+void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool suspend_proc)
+{
+ if (adev->kfd.dev) {
+ if (adev->in_s0ix)
+ kgd2kfd_stop_sched_all_nodes(adev->kfd.dev);
+ else
+ kgd2kfd_suspend(adev->kfd.dev, suspend_proc);
+ }
+}
+
+int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool resume_proc)
+{
+ int r = 0;
+
+ if (adev->kfd.dev) {
+ if (adev->in_s0ix)
+ r = kgd2kfd_start_sched_all_nodes(adev->kfd.dev);
+ else
+ r = kgd2kfd_resume(adev->kfd.dev, resume_proc);
+ }
+
+ return r;
+}
+
+void amdgpu_amdkfd_suspend_process(struct amdgpu_device *adev)
{
if (adev->kfd.dev)
- kgd2kfd_suspend(adev->kfd.dev, run_pm);
+ kgd2kfd_suspend_process(adev->kfd.dev);
}
-int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm)
+int amdgpu_amdkfd_resume_process(struct amdgpu_device *adev)
{
int r = 0;
if (adev->kfd.dev)
- r = kgd2kfd_resume(adev->kfd.dev, run_pm);
+ r = kgd2kfd_resume_process(adev->kfd.dev);
return r;
}
@@ -368,7 +392,10 @@ void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void **mem_obj)
{
struct amdgpu_bo **bo = (struct amdgpu_bo **) mem_obj;
- amdgpu_bo_reserve(*bo, true);
+ if (!bo || !*bo)
+ return;
+
+ (void)amdgpu_bo_reserve(*bo, true);
amdgpu_bo_kunmap(*bo);
amdgpu_bo_unpin(*bo);
amdgpu_bo_unreserve(*bo);
@@ -459,7 +486,7 @@ void amdgpu_amdkfd_get_local_mem_info(struct amdgpu_device *adev,
else
mem_info->local_mem_size_private =
KFD_XCP_MEMORY_SIZE(adev, xcp->id);
- } else if (adev->flags & AMD_IS_APU) {
+ } else if (adev->apu_prefer_gtt) {
mem_info->local_mem_size_public = (ttm_tt_pages_limit() << PAGE_SHIFT);
mem_info->local_mem_size_private = 0;
} else {
@@ -555,48 +582,6 @@ out_put:
return r;
}
-uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct amdgpu_device *dst,
- struct amdgpu_device *src)
-{
- struct amdgpu_device *peer_adev = src;
- struct amdgpu_device *adev = dst;
- int ret = amdgpu_xgmi_get_hops_count(adev, peer_adev);
-
- if (ret < 0) {
- DRM_ERROR("amdgpu: failed to get xgmi hops count between node %d and %d. ret = %d\n",
- adev->gmc.xgmi.physical_node_id,
- peer_adev->gmc.xgmi.physical_node_id, ret);
- ret = 0;
- }
- return (uint8_t)ret;
-}
-
-int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct amdgpu_device *dst,
- struct amdgpu_device *src,
- bool is_min)
-{
- struct amdgpu_device *adev = dst, *peer_adev;
- int num_links;
-
- if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 4, 2))
- return 0;
-
- if (src)
- peer_adev = src;
-
- /* num links returns 0 for indirect peers since indirect route is unknown. */
- num_links = is_min ? 1 : amdgpu_xgmi_get_num_links(adev, peer_adev);
- if (num_links < 0) {
- DRM_ERROR("amdgpu: failed to get xgmi num links between node %d and %d. ret = %d\n",
- adev->gmc.xgmi.physical_node_id,
- peer_adev->gmc.xgmi.physical_node_id, num_links);
- num_links = 0;
- }
-
- /* Aldebaran xGMI DPM is defeatured so assume x16 x 25Gbps for bandwidth. */
- return (num_links * 16 * 25000)/BITS_PER_BYTE;
-}
-
int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct amdgpu_device *adev, bool is_min)
{
int num_lanes_shift = (is_min ? ffs(adev->pm.pcie_mlw_mask) :
@@ -681,7 +666,7 @@ int amdgpu_amdkfd_submit_ib(struct amdgpu_device *adev,
goto err;
}
- ret = amdgpu_job_alloc(adev, NULL, NULL, NULL, 1, &job);
+ ret = amdgpu_job_alloc(adev, NULL, NULL, NULL, 1, &job, 0);
if (ret)
goto err;
@@ -715,8 +700,9 @@ err:
void amdgpu_amdkfd_set_compute_idle(struct amdgpu_device *adev, bool idle)
{
enum amd_powergating_state state = idle ? AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE;
- if (IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 11 &&
- ((adev->mes.kiq_version & AMDGPU_MES_VERSION_MASK) <= 64)) {
+ if ((IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 11 &&
+ ((adev->mes.kiq_version & AMDGPU_MES_VERSION_MASK) <= 64)) ||
+ (IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 12)) {
pr_debug("GFXOFF is %s\n", idle ? "enabled" : "disabled");
amdgpu_gfx_off_ctrl(adev, idle);
} else if ((IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 9) &&
@@ -724,7 +710,9 @@ void amdgpu_amdkfd_set_compute_idle(struct amdgpu_device *adev, bool idle)
/* Disable GFXOFF and PG. Temporary workaround
* to fix some compute applications issue on GFX9.
*/
- adev->ip_blocks[AMD_IP_BLOCK_TYPE_GFX].version->funcs->set_powergating_state((void *)adev, state);
+ struct amdgpu_ip_block *gfx_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
+ if (gfx_block != NULL)
+ gfx_block->version->funcs->set_powergating_state((void *)gfx_block, state);
}
amdgpu_dpm_switch_power_profile(adev,
PP_SMC_POWER_PROFILE_COMPUTE,
@@ -785,12 +773,12 @@ int amdgpu_amdkfd_send_close_event_drain_irq(struct amdgpu_device *adev,
int amdgpu_amdkfd_check_and_lock_kfd(struct amdgpu_device *adev)
{
- return kgd2kfd_check_and_lock_kfd();
+ return kgd2kfd_check_and_lock_kfd(adev->kfd.dev);
}
void amdgpu_amdkfd_unlock_kfd(struct amdgpu_device *adev)
{
- kgd2kfd_unlock_kfd();
+ kgd2kfd_unlock_kfd(adev->kfd.dev);
}
@@ -815,7 +803,7 @@ u64 amdgpu_amdkfd_xcp_memory_size(struct amdgpu_device *adev, int xcp_id)
}
do_div(tmp, adev->xcp_mgr->num_xcp_per_mem_partition);
return ALIGN_DOWN(tmp, PAGE_SIZE);
- } else if (adev->flags & AMD_IS_APU) {
+ } else if (adev->apu_prefer_gtt) {
return (ttm_tt_pages_limit() << PAGE_SHIFT);
} else {
return adev->gmc.real_vram_size;
@@ -834,6 +822,9 @@ int amdgpu_amdkfd_unmap_hiq(struct amdgpu_device *adev, u32 doorbell_off,
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
return -EINVAL;
+ if (!kiq_ring->sched.ready || amdgpu_in_reset(adev))
+ return 0;
+
ring_funcs = kzalloc(sizeof(*ring_funcs), GFP_KERNEL);
if (!ring_funcs)
return -ENOMEM;
@@ -858,8 +849,14 @@ int amdgpu_amdkfd_unmap_hiq(struct amdgpu_device *adev, u32 doorbell_off,
kiq->pmf->kiq_unmap_queues(kiq_ring, ring, RESET_QUEUES, 0, 0);
- if (kiq_ring->sched.ready && !adev->job_hang)
- r = amdgpu_ring_test_helper(kiq_ring);
+ /* Submit unmap queue packet */
+ amdgpu_ring_commit(kiq_ring);
+ /*
+ * Ring test will do a basic scratch register change check. Just run
+ * this to ensure that unmap queues that is submitted before got
+ * processed successfully before returning.
+ */
+ r = amdgpu_ring_test_helper(kiq_ring);
spin_unlock(&kiq->ring_lock);
@@ -889,3 +886,27 @@ int amdgpu_amdkfd_start_sched(struct amdgpu_device *adev, uint32_t node_id)
return kgd2kfd_start_sched(adev->kfd.dev, node_id);
}
+
+/* check if there are KFD queues active */
+bool amdgpu_amdkfd_compute_active(struct amdgpu_device *adev, uint32_t node_id)
+{
+ if (!adev->kfd.init_complete)
+ return false;
+
+ return kgd2kfd_compute_active(adev->kfd.dev, node_id);
+}
+
+/* Config CGTT_SQ_CLK_CTRL */
+int amdgpu_amdkfd_config_sq_perfmon(struct amdgpu_device *adev, uint32_t xcp_id,
+ bool core_override_enable, bool reg_override_enable, bool perfmon_override_enable)
+{
+ int r;
+
+ if (!adev->kfd.init_complete)
+ return 0;
+
+ r = psp_config_sq_perfmon(&adev->psp, xcp_id, core_override_enable,
+ reg_override_enable, perfmon_override_enable);
+
+ return r;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index f9d119448442..9e120c934cc1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -47,6 +47,7 @@ enum TLB_FLUSH_TYPE {
};
struct amdgpu_device;
+struct kfd_process_device;
struct amdgpu_reset_context;
enum kfd_mem_attachment_type {
@@ -106,11 +107,13 @@ struct amdgpu_kfd_dev {
bool init_complete;
struct work_struct reset_work;
- /* HMM page migration MEMORY_DEVICE_PRIVATE mapping */
- struct dev_pagemap pgmap;
-
/* Client for KFD BO GEM handle allocations */
struct drm_client_dev client;
+
+ /* HMM page migration MEMORY_DEVICE_PRIVATE mapping
+ * Must be last --ends in a flexible-array member.
+ */
+ struct dev_pagemap pgmap;
};
enum kgd_engine_type {
@@ -153,8 +156,10 @@ struct amdkfd_process_info {
int amdgpu_amdkfd_init(void);
void amdgpu_amdkfd_fini(void);
-void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm);
-int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm);
+void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool suspend_proc);
+int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool resume_proc);
+void amdgpu_amdkfd_suspend_process(struct amdgpu_device *adev);
+int amdgpu_amdkfd_resume_process(struct amdgpu_device *adev);
void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
const void *ih_ring_entry);
void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev);
@@ -192,7 +197,7 @@ int kfd_debugfs_kfd_mem_limits(struct seq_file *m, void *data);
#if IS_ENABLED(CONFIG_HSA_AMD)
bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm);
struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f);
-int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo);
+void amdgpu_amdkfd_remove_all_eviction_fences(struct amdgpu_bo *bo);
int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni,
unsigned long cur_seq, struct kgd_mem *mem);
int amdgpu_amdkfd_bo_validate_and_fence(struct amdgpu_bo *bo,
@@ -212,9 +217,8 @@ struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f)
}
static inline
-int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
+void amdgpu_amdkfd_remove_all_eviction_fences(struct amdgpu_bo *bo)
{
- return 0;
}
static inline
@@ -254,11 +258,6 @@ int amdgpu_amdkfd_get_dmabuf_info(struct amdgpu_device *adev, int dma_buf_fd,
uint64_t *bo_size, void *metadata_buffer,
size_t buffer_size, uint32_t *metadata_size,
uint32_t *flags, int8_t *xcp_id);
-uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct amdgpu_device *dst,
- struct amdgpu_device *src);
-int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct amdgpu_device *dst,
- struct amdgpu_device *src,
- bool is_min);
int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct amdgpu_device *adev, bool is_min);
int amdgpu_amdkfd_send_close_event_drain_irq(struct amdgpu_device *adev,
uint32_t *payload);
@@ -266,6 +265,10 @@ int amdgpu_amdkfd_unmap_hiq(struct amdgpu_device *adev, u32 doorbell_off,
u32 inst);
int amdgpu_amdkfd_start_sched(struct amdgpu_device *adev, uint32_t node_id);
int amdgpu_amdkfd_stop_sched(struct amdgpu_device *adev, uint32_t node_id);
+int amdgpu_amdkfd_config_sq_perfmon(struct amdgpu_device *adev, uint32_t xcp_id,
+ bool core_override_enable, bool reg_override_enable, bool perfmon_override_enable);
+bool amdgpu_amdkfd_compute_active(struct amdgpu_device *adev, uint32_t node_id);
+
/* Read user wptr from a specified user address space with page fault
* disabled. The memory must be pinned and mapped to the hardware when
@@ -295,14 +298,10 @@ int amdgpu_amdkfd_stop_sched(struct amdgpu_device *adev, uint32_t node_id);
(&((struct amdgpu_fpriv *) \
((struct drm_file *)(drm_priv))->driver_priv)->vm)
-int amdgpu_amdkfd_gpuvm_set_vm_pasid(struct amdgpu_device *adev,
- struct amdgpu_vm *avm, u32 pasid);
int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev,
struct amdgpu_vm *avm,
void **process_info,
struct dma_fence **ef);
-void amdgpu_amdkfd_gpuvm_release_process_vm(struct amdgpu_device *adev,
- void *drm_priv);
uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv);
size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev,
uint8_t xcp_id);
@@ -416,18 +415,26 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf);
bool kgd2kfd_device_init(struct kfd_dev *kfd,
const struct kgd2kfd_shared_resources *gpu_resources);
void kgd2kfd_device_exit(struct kfd_dev *kfd);
-void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm);
-int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm);
+void kgd2kfd_suspend(struct kfd_dev *kfd, bool suspend_proc);
+int kgd2kfd_resume(struct kfd_dev *kfd, bool resume_proc);
+void kgd2kfd_suspend_process(struct kfd_dev *kfd);
+int kgd2kfd_resume_process(struct kfd_dev *kfd);
int kgd2kfd_pre_reset(struct kfd_dev *kfd,
struct amdgpu_reset_context *reset_context);
int kgd2kfd_post_reset(struct kfd_dev *kfd);
void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry);
void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd);
void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask);
-int kgd2kfd_check_and_lock_kfd(void);
-void kgd2kfd_unlock_kfd(void);
+int kgd2kfd_check_and_lock_kfd(struct kfd_dev *kfd);
+void kgd2kfd_unlock_kfd(struct kfd_dev *kfd);
int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id);
+int kgd2kfd_start_sched_all_nodes(struct kfd_dev *kfd);
int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id);
+int kgd2kfd_stop_sched_all_nodes(struct kfd_dev *kfd);
+bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id);
+bool kgd2kfd_vmfault_fast_path(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry,
+ bool retry_fault);
+
#else
static inline int kgd2kfd_init(void)
{
@@ -455,11 +462,20 @@ static inline void kgd2kfd_device_exit(struct kfd_dev *kfd)
{
}
-static inline void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
+static inline void kgd2kfd_suspend(struct kfd_dev *kfd, bool suspend_proc)
+{
+}
+
+static inline int kgd2kfd_resume(struct kfd_dev *kfd, bool resume_proc)
+{
+ return 0;
+}
+
+static inline void kgd2kfd_suspend_process(struct kfd_dev *kfd)
{
}
-static inline int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
+static inline int kgd2kfd_resume_process(struct kfd_dev *kfd)
{
return 0;
}
@@ -490,12 +506,12 @@ void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask)
{
}
-static inline int kgd2kfd_check_and_lock_kfd(void)
+static inline int kgd2kfd_check_and_lock_kfd(struct kfd_dev *kfd)
{
return 0;
}
-static inline void kgd2kfd_unlock_kfd(void)
+static inline void kgd2kfd_unlock_kfd(struct kfd_dev *kfd)
{
}
@@ -504,9 +520,31 @@ static inline int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id)
return 0;
}
+static inline int kgd2kfd_start_sched_all_nodes(struct kfd_dev *kfd)
+{
+ return 0;
+}
+
static inline int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id)
{
return 0;
}
+
+static inline int kgd2kfd_stop_sched_all_nodes(struct kfd_dev *kfd)
+{
+ return 0;
+}
+
+static inline bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id)
+{
+ return false;
+}
+
+static inline bool kgd2kfd_vmfault_fast_path(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry,
+ bool retry_fault)
+{
+ return false;
+}
+
#endif
#endif /* AMDGPU_AMDKFD_H_INCLUDED */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c
index 8dfdb18197c4..7e9f7a280c1b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c
@@ -189,8 +189,9 @@ const struct kfd2kgd_calls aldebaran_kfd2kgd = {
.set_address_watch = kgd_gfx_aldebaran_set_address_watch,
.clear_address_watch = kgd_gfx_v9_clear_address_watch,
.get_iq_wait_times = kgd_gfx_v9_get_iq_wait_times,
- .build_grace_period_packet_info = kgd_gfx_v9_build_grace_period_packet_info,
+ .build_dequeue_wait_counts_packet_info = kgd_gfx_v9_build_dequeue_wait_counts_packet_info,
.program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings,
.hqd_get_pq_addr = kgd_gfx_v9_hqd_get_pq_addr,
.hqd_reset = kgd_gfx_v9_hqd_reset,
+ .hqd_sdma_get_doorbell = kgd_gfx_v9_hqd_sdma_get_doorbell
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
index 9435af2e6bdc..1105a09e55dc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
@@ -299,7 +299,7 @@ static int suspend_resume_compute_scheduler(struct amdgpu_device *adev, bool sus
if (r)
goto out;
} else {
- drm_sched_start(&ring->sched);
+ drm_sched_start(&ring->sched, 0);
}
}
@@ -320,7 +320,7 @@ static void set_barrier_auto_waitcnt(struct amdgpu_device *adev, bool enable_wai
if (!down_read_trylock(&adev->reset_domain->sem))
return;
- amdgpu_amdkfd_suspend(adev, false);
+ amdgpu_amdkfd_suspend(adev, true);
if (suspend_resume_compute_scheduler(adev, true))
goto out;
@@ -333,7 +333,7 @@ static void set_barrier_auto_waitcnt(struct amdgpu_device *adev, bool enable_wai
out:
suspend_resume_compute_scheduler(adev, false);
- amdgpu_amdkfd_resume(adev, false);
+ amdgpu_amdkfd_resume(adev, true);
up_read(&adev->reset_domain->sem);
}
@@ -415,9 +415,10 @@ const struct kfd2kgd_calls arcturus_kfd2kgd = {
.set_address_watch = kgd_gfx_v9_set_address_watch,
.clear_address_watch = kgd_gfx_v9_clear_address_watch,
.get_iq_wait_times = kgd_gfx_v9_get_iq_wait_times,
- .build_grace_period_packet_info = kgd_gfx_v9_build_grace_period_packet_info,
+ .build_dequeue_wait_counts_packet_info = kgd_gfx_v9_build_dequeue_wait_counts_packet_info,
.get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy,
.program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings,
.hqd_get_pq_addr = kgd_gfx_v9_hqd_get_pq_addr,
- .hqd_reset = kgd_gfx_v9_hqd_reset
+ .hqd_reset = kgd_gfx_v9_hqd_reset,
+ .hqd_sdma_get_doorbell = kgd_gfx_v9_hqd_sdma_get_doorbell
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c
index e2ae714a700f..89a45a9218f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c
@@ -509,6 +509,17 @@ static uint32_t kgd_gfx_v9_4_3_clear_address_watch(struct amdgpu_device *adev,
return 0;
}
+static uint32_t kgd_gfx_v9_4_3_hqd_sdma_get_doorbell(struct amdgpu_device *adev,
+ int engine, int queue)
+{
+ uint32_t reg_offset = get_sdma_rlc_reg_offset(adev, engine, queue);
+ uint32_t status = RREG32(regSDMA_RLC0_CONTEXT_STATUS + reg_offset);
+ uint32_t doorbell_off = RREG32(regSDMA_RLC0_DOORBELL_OFFSET + reg_offset);
+ bool is_active = !!REG_GET_FIELD(status, SDMA_RLC0_CONTEXT_STATUS, SELECTED);
+
+ return is_active ? doorbell_off >> 2 : 0;
+}
+
const struct kfd2kgd_calls gc_9_4_3_kfd2kgd = {
.program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_gfx_v9_4_3_set_pasid_vmid_mapping,
@@ -530,8 +541,8 @@ const struct kfd2kgd_calls gc_9_4_3_kfd2kgd = {
.get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy,
.program_trap_handler_settings =
kgd_gfx_v9_program_trap_handler_settings,
- .build_grace_period_packet_info =
- kgd_gfx_v9_build_grace_period_packet_info,
+ .build_dequeue_wait_counts_packet_info =
+ kgd_gfx_v9_build_dequeue_wait_counts_packet_info,
.get_iq_wait_times = kgd_gfx_v9_get_iq_wait_times,
.enable_debug_trap = kgd_aldebaran_enable_debug_trap,
.disable_debug_trap = kgd_gfx_v9_4_3_disable_debug_trap,
@@ -543,5 +554,6 @@ const struct kfd2kgd_calls gc_9_4_3_kfd2kgd = {
.set_address_watch = kgd_gfx_v9_4_3_set_address_watch,
.clear_address_watch = kgd_gfx_v9_4_3_clear_address_watch,
.hqd_get_pq_addr = kgd_gfx_v9_hqd_get_pq_addr,
- .hqd_reset = kgd_gfx_v9_hqd_reset
+ .hqd_reset = kgd_gfx_v9_hqd_reset,
+ .hqd_sdma_get_doorbell = kgd_gfx_v9_4_3_hqd_sdma_get_doorbell
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
index 62176d607bef..0239114fb6c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
@@ -352,7 +352,7 @@ static int kgd_hqd_dump(struct amdgpu_device *adev,
(*dump)[i++][1] = RREG32_SOC15_IP(GC, addr); \
} while (0)
- *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
@@ -449,7 +449,7 @@ static int kgd_hqd_sdma_dump(struct amdgpu_device *adev,
#undef HQD_N_REGS
#define HQD_N_REGS (19+6+7+10)
- *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
@@ -1021,25 +1021,25 @@ void kgd_gfx_v10_get_iq_wait_times(struct amdgpu_device *adev,
*wait_times = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_IQ_WAIT_TIME2));
}
-void kgd_gfx_v10_build_grace_period_packet_info(struct amdgpu_device *adev,
+void kgd_gfx_v10_build_dequeue_wait_counts_packet_info(struct amdgpu_device *adev,
uint32_t wait_times,
- uint32_t grace_period,
+ uint32_t sch_wave,
+ uint32_t que_sleep,
uint32_t *reg_offset,
uint32_t *reg_data)
{
*reg_data = wait_times;
- /*
- * The CP cannont handle a 0 grace period input and will result in
- * an infinite grace period being set so set to 1 to prevent this.
- */
- if (grace_period == 0)
- grace_period = 1;
-
- *reg_data = REG_SET_FIELD(*reg_data,
- CP_IQ_WAIT_TIME2,
- SCH_WAVE,
- grace_period);
+ if (sch_wave)
+ *reg_data = REG_SET_FIELD(*reg_data,
+ CP_IQ_WAIT_TIME2,
+ SCH_WAVE,
+ sch_wave);
+ if (que_sleep)
+ *reg_data = REG_SET_FIELD(*reg_data,
+ CP_IQ_WAIT_TIME2,
+ QUE_SLEEP,
+ que_sleep);
*reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_IQ_WAIT_TIME2);
}
@@ -1084,6 +1084,12 @@ uint64_t kgd_gfx_v10_hqd_reset(struct amdgpu_device *adev,
return 0;
}
+uint32_t kgd_gfx_v10_hqd_sdma_get_doorbell(struct amdgpu_device *adev,
+ int engine, int queue)
+{
+ return 0;
+}
+
const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
.program_sh_mem_settings = kgd_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
@@ -1109,8 +1115,9 @@ const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
.set_address_watch = kgd_gfx_v10_set_address_watch,
.clear_address_watch = kgd_gfx_v10_clear_address_watch,
.get_iq_wait_times = kgd_gfx_v10_get_iq_wait_times,
- .build_grace_period_packet_info = kgd_gfx_v10_build_grace_period_packet_info,
+ .build_dequeue_wait_counts_packet_info = kgd_gfx_v10_build_dequeue_wait_counts_packet_info,
.program_trap_handler_settings = program_trap_handler_settings,
.hqd_get_pq_addr = kgd_gfx_v10_hqd_get_pq_addr,
- .hqd_reset = kgd_gfx_v10_hqd_reset
+ .hqd_reset = kgd_gfx_v10_hqd_reset,
+ .hqd_sdma_get_doorbell = kgd_gfx_v10_hqd_sdma_get_doorbell
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h
index 9efd2dd4fdd7..a4c607c88178 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h
@@ -51,9 +51,10 @@ uint32_t kgd_gfx_v10_clear_address_watch(struct amdgpu_device *adev,
void kgd_gfx_v10_get_iq_wait_times(struct amdgpu_device *adev,
uint32_t *wait_times,
uint32_t inst);
-void kgd_gfx_v10_build_grace_period_packet_info(struct amdgpu_device *adev,
+void kgd_gfx_v10_build_dequeue_wait_counts_packet_info(struct amdgpu_device *adev,
uint32_t wait_times,
- uint32_t grace_period,
+ uint32_t sch_wave,
+ uint32_t que_sleep,
uint32_t *reg_offset,
uint32_t *reg_data);
uint64_t kgd_gfx_v10_hqd_get_pq_addr(struct amdgpu_device *adev,
@@ -65,3 +66,5 @@ uint64_t kgd_gfx_v10_hqd_reset(struct amdgpu_device *adev,
uint32_t queue_id,
uint32_t inst,
unsigned int utimeout);
+uint32_t kgd_gfx_v10_hqd_sdma_get_doorbell(struct amdgpu_device *adev,
+ int engine, int queue);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
index c718bedda0ca..f2278a0937ff 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
@@ -338,7 +338,7 @@ static int hqd_dump_v10_3(struct amdgpu_device *adev,
(*dump)[i++][1] = RREG32_SOC15_IP(GC, addr); \
} while (0)
- *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
@@ -435,7 +435,7 @@ static int hqd_sdma_dump_v10_3(struct amdgpu_device *adev,
#undef HQD_N_REGS
#define HQD_N_REGS (19+6+7+12)
- *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
@@ -673,7 +673,7 @@ const struct kfd2kgd_calls gfx_v10_3_kfd2kgd = {
.set_vm_context_page_table_base = set_vm_context_page_table_base_v10_3,
.program_trap_handler_settings = program_trap_handler_settings_v10_3,
.get_iq_wait_times = kgd_gfx_v10_get_iq_wait_times,
- .build_grace_period_packet_info = kgd_gfx_v10_build_grace_period_packet_info,
+ .build_dequeue_wait_counts_packet_info = kgd_gfx_v10_build_dequeue_wait_counts_packet_info,
.enable_debug_trap = kgd_gfx_v10_enable_debug_trap,
.disable_debug_trap = kgd_gfx_v10_disable_debug_trap,
.validate_trap_override_request = kgd_gfx_v10_validate_trap_override_request,
@@ -682,5 +682,6 @@ const struct kfd2kgd_calls gfx_v10_3_kfd2kgd = {
.set_address_watch = kgd_gfx_v10_set_address_watch,
.clear_address_watch = kgd_gfx_v10_clear_address_watch,
.hqd_get_pq_addr = kgd_gfx_v10_hqd_get_pq_addr,
- .hqd_reset = kgd_gfx_v10_hqd_reset
+ .hqd_reset = kgd_gfx_v10_hqd_reset,
+ .hqd_sdma_get_doorbell = kgd_gfx_v10_hqd_sdma_get_doorbell
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c
index a4ba49cb22db..aaccf0b9947d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c
@@ -323,7 +323,7 @@ static int hqd_dump_v11(struct amdgpu_device *adev,
(*dump)[i++][1] = RREG32(addr); \
} while (0)
- *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
@@ -420,7 +420,7 @@ static int hqd_sdma_dump_v11(struct amdgpu_device *adev,
#undef HQD_N_REGS
#define HQD_N_REGS (7+11+1+12+12)
- *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
@@ -800,6 +800,12 @@ static uint64_t kgd_gfx_v11_hqd_reset(struct amdgpu_device *adev,
return 0;
}
+static uint32_t kgd_gfx_v11_hqd_sdma_get_doorbell(struct amdgpu_device *adev,
+ int engine, int queue)
+{
+ return 0;
+}
+
const struct kfd2kgd_calls gfx_v11_kfd2kgd = {
.program_sh_mem_settings = program_sh_mem_settings_v11,
.set_pasid_vmid_mapping = set_pasid_vmid_mapping_v11,
@@ -824,5 +830,6 @@ const struct kfd2kgd_calls gfx_v11_kfd2kgd = {
.set_address_watch = kgd_gfx_v11_set_address_watch,
.clear_address_watch = kgd_gfx_v11_clear_address_watch,
.hqd_get_pq_addr = kgd_gfx_v11_hqd_get_pq_addr,
- .hqd_reset = kgd_gfx_v11_hqd_reset
+ .hqd_reset = kgd_gfx_v11_hqd_reset,
+ .hqd_sdma_get_doorbell = kgd_gfx_v11_hqd_sdma_get_doorbell
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v12.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v12.c
index 0dfe7093bd8a..e0ceab400b2d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v12.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v12.c
@@ -115,7 +115,7 @@ static int hqd_dump_v12(struct amdgpu_device *adev,
(*dump)[i++][1] = RREG32(addr); \
} while (0)
- *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
@@ -146,7 +146,7 @@ static int hqd_sdma_dump_v12(struct amdgpu_device *adev,
#undef HQD_N_REGS
#define HQD_N_REGS (last_reg - first_reg + 1)
- *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
@@ -361,6 +361,12 @@ static uint32_t kgd_gfx_v12_clear_address_watch(struct amdgpu_device *adev,
return 0;
}
+static uint32_t kgd_gfx_v12_hqd_sdma_get_doorbell(struct amdgpu_device *adev,
+ int engine, int queue)
+{
+ return 0;
+}
+
const struct kfd2kgd_calls gfx_v12_kfd2kgd = {
.init_interrupts = init_interrupts_v12,
.hqd_dump = hqd_dump_v12,
@@ -374,4 +380,5 @@ const struct kfd2kgd_calls gfx_v12_kfd2kgd = {
.set_wave_launch_mode = kgd_gfx_v12_set_wave_launch_mode,
.set_address_watch = kgd_gfx_v12_set_address_watch,
.clear_address_watch = kgd_gfx_v12_clear_address_watch,
+ .hqd_sdma_get_doorbell = kgd_gfx_v12_hqd_sdma_get_doorbell
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index ca4a6b82817f..df77558e03ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -561,6 +561,13 @@ static uint32_t read_vmid_from_vmfault_reg(struct amdgpu_device *adev)
return REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
}
+static uint32_t kgd_hqd_sdma_get_doorbell(struct amdgpu_device *adev,
+ int engine, int queue)
+
+{
+ return 0;
+}
+
const struct kfd2kgd_calls gfx_v7_kfd2kgd = {
.program_sh_mem_settings = kgd_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
@@ -578,4 +585,5 @@ const struct kfd2kgd_calls gfx_v7_kfd2kgd = {
.set_scratch_backing_va = set_scratch_backing_va,
.set_vm_context_page_table_base = set_vm_context_page_table_base,
.read_vmid_from_vmfault_reg = read_vmid_from_vmfault_reg,
+ .hqd_sdma_get_doorbell = kgd_hqd_sdma_get_doorbell,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index 0f3e2944edd7..e68c0fa8d751 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -582,6 +582,13 @@ static void set_vm_context_page_table_base(struct amdgpu_device *adev,
lower_32_bits(page_table_base));
}
+static uint32_t kgd_hqd_sdma_get_doorbell(struct amdgpu_device *adev,
+ int engine, int queue)
+
+{
+ return 0;
+}
+
const struct kfd2kgd_calls gfx_v8_kfd2kgd = {
.program_sh_mem_settings = kgd_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
@@ -599,4 +606,5 @@ const struct kfd2kgd_calls gfx_v8_kfd2kgd = {
get_atc_vmid_pasid_mapping_info,
.set_scratch_backing_va = set_scratch_backing_va,
.set_vm_context_page_table_base = set_vm_context_page_table_base,
+ .hqd_sdma_get_doorbell = kgd_hqd_sdma_get_doorbell,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index 3bc0cbf45bc5..088d09cc7a72 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -944,9 +944,7 @@ static void unlock_spi_csq_mutexes(struct amdgpu_device *adev)
*
* @adev: Handle of device whose registers are to be read
* @queue_idx: Index of queue in the queue-map bit-field
- * @wave_cnt: Output parameter updated with number of waves in flight
- * @vmid: Output parameter updated with VMID of queue whose wave count
- * is being collected
+ * @queue_cnt: Stores the wave count and doorbell offset for an active queue
* @inst: xcc's instance number on a multi-XCC setup
*/
static void get_wave_count(struct amdgpu_device *adev, int queue_idx,
@@ -1079,25 +1077,25 @@ void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev,
adev->gfx.cu_info.max_waves_per_simd;
}
-void kgd_gfx_v9_build_grace_period_packet_info(struct amdgpu_device *adev,
+void kgd_gfx_v9_build_dequeue_wait_counts_packet_info(struct amdgpu_device *adev,
uint32_t wait_times,
- uint32_t grace_period,
+ uint32_t sch_wave,
+ uint32_t que_sleep,
uint32_t *reg_offset,
uint32_t *reg_data)
{
*reg_data = wait_times;
- /*
- * The CP cannot handle a 0 grace period input and will result in
- * an infinite grace period being set so set to 1 to prevent this.
- */
- if (grace_period == 0)
- grace_period = 1;
-
- *reg_data = REG_SET_FIELD(*reg_data,
- CP_IQ_WAIT_TIME2,
- SCH_WAVE,
- grace_period);
+ if (sch_wave)
+ *reg_data = REG_SET_FIELD(*reg_data,
+ CP_IQ_WAIT_TIME2,
+ SCH_WAVE,
+ sch_wave);
+ if (que_sleep)
+ *reg_data = REG_SET_FIELD(*reg_data,
+ CP_IQ_WAIT_TIME2,
+ QUE_SLEEP,
+ que_sleep);
*reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_IQ_WAIT_TIME2);
}
@@ -1133,10 +1131,6 @@ uint64_t kgd_gfx_v9_hqd_get_pq_addr(struct amdgpu_device *adev,
uint32_t low, high;
uint64_t queue_addr = 0;
- if (!adev->debug_exp_resets &&
- !adev->gfx.num_gfx_rings)
- return 0;
-
kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst);
amdgpu_gfx_rlc_enter_safe_mode(adev, inst);
@@ -1229,6 +1223,13 @@ unlock_out:
return queue_addr;
}
+uint32_t kgd_gfx_v9_hqd_sdma_get_doorbell(struct amdgpu_device *adev,
+ int engine, int queue)
+
+{
+ return 0;
+}
+
const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
.program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping,
@@ -1254,9 +1255,10 @@ const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
.set_address_watch = kgd_gfx_v9_set_address_watch,
.clear_address_watch = kgd_gfx_v9_clear_address_watch,
.get_iq_wait_times = kgd_gfx_v9_get_iq_wait_times,
- .build_grace_period_packet_info = kgd_gfx_v9_build_grace_period_packet_info,
+ .build_dequeue_wait_counts_packet_info = kgd_gfx_v9_build_dequeue_wait_counts_packet_info,
.get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy,
.program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings,
.hqd_get_pq_addr = kgd_gfx_v9_hqd_get_pq_addr,
- .hqd_reset = kgd_gfx_v9_hqd_reset
+ .hqd_reset = kgd_gfx_v9_hqd_reset,
+ .hqd_sdma_get_doorbell = kgd_gfx_v9_hqd_sdma_get_doorbell
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
index b6a91a552aa4..704452ca62f8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
@@ -97,9 +97,10 @@ uint32_t kgd_gfx_v9_clear_address_watch(struct amdgpu_device *adev,
void kgd_gfx_v9_get_iq_wait_times(struct amdgpu_device *adev,
uint32_t *wait_times,
uint32_t inst);
-void kgd_gfx_v9_build_grace_period_packet_info(struct amdgpu_device *adev,
+void kgd_gfx_v9_build_dequeue_wait_counts_packet_info(struct amdgpu_device *adev,
uint32_t wait_times,
- uint32_t grace_period,
+ uint32_t sch_wave,
+ uint32_t que_sleep,
uint32_t *reg_offset,
uint32_t *reg_data);
uint64_t kgd_gfx_v9_hqd_get_pq_addr(struct amdgpu_device *adev,
@@ -111,3 +112,5 @@ uint64_t kgd_gfx_v9_hqd_reset(struct amdgpu_device *adev,
uint32_t queue_id,
uint32_t inst,
unsigned int utimeout);
+uint32_t kgd_gfx_v9_hqd_sdma_get_doorbell(struct amdgpu_device *adev,
+ int engine, int queue);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index fa572ba7f9fc..83020963dfde 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -197,7 +197,7 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
return -EINVAL;
vram_size = KFD_XCP_MEMORY_SIZE(adev, xcp_id);
- if (adev->flags & AMD_IS_APU) {
+ if (adev->apu_prefer_gtt) {
system_mem_needed = size;
ttm_mem_needed = size;
}
@@ -213,19 +213,35 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
spin_lock(&kfd_mem_limit.mem_limit_lock);
if (kfd_mem_limit.system_mem_used + system_mem_needed >
- kfd_mem_limit.max_system_mem_limit)
+ kfd_mem_limit.max_system_mem_limit) {
pr_debug("Set no_system_mem_limit=1 if using shared memory\n");
+ if (!no_system_mem_limit) {
+ ret = -ENOMEM;
+ goto release;
+ }
+ }
- if ((kfd_mem_limit.system_mem_used + system_mem_needed >
- kfd_mem_limit.max_system_mem_limit && !no_system_mem_limit) ||
- (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
- kfd_mem_limit.max_ttm_mem_limit) ||
- (adev && xcp_id >= 0 && adev->kfd.vram_used[xcp_id] + vram_needed >
- vram_size - reserved_for_pt - reserved_for_ras - atomic64_read(&adev->vram_pin_size))) {
+ if (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
+ kfd_mem_limit.max_ttm_mem_limit) {
ret = -ENOMEM;
goto release;
}
+ /*if is_app_apu is false and apu_prefer_gtt is true, it is an APU with
+ * carve out < gtt. In that case, VRAM allocation will go to gtt domain, skip
+ * VRAM check since ttm_mem_limit check already cover this allocation
+ */
+
+ if (adev && xcp_id >= 0 && (!adev->apu_prefer_gtt || adev->gmc.is_app_apu)) {
+ uint64_t vram_available =
+ vram_size - reserved_for_pt - reserved_for_ras -
+ atomic64_read(&adev->vram_pin_size);
+ if (adev->kfd.vram_used[xcp_id] + vram_needed > vram_available) {
+ ret = -ENOMEM;
+ goto release;
+ }
+ }
+
/* Update memory accounting by decreasing available system
* memory, TTM memory and GPU memory as computed above
*/
@@ -234,7 +250,7 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
if (adev && xcp_id >= 0) {
adev->kfd.vram_used[xcp_id] += vram_needed;
adev->kfd.vram_used_aligned[xcp_id] +=
- (adev->flags & AMD_IS_APU) ?
+ adev->apu_prefer_gtt ?
vram_needed :
ALIGN(vram_needed, VRAM_AVAILABLITY_ALIGN);
}
@@ -262,7 +278,7 @@ void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev,
if (adev) {
adev->kfd.vram_used[xcp_id] -= size;
- if (adev->flags & AMD_IS_APU) {
+ if (adev->apu_prefer_gtt) {
adev->kfd.vram_used_aligned[xcp_id] -= size;
kfd_mem_limit.system_mem_used -= size;
kfd_mem_limit.ttm_mem_used -= size;
@@ -370,40 +386,32 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
return 0;
}
-int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
+/**
+ * amdgpu_amdkfd_remove_all_eviction_fences - Remove all eviction fences
+ * @bo: the BO where to remove the evictions fences from.
+ *
+ * This functions should only be used on release when all references to the BO
+ * are already dropped. We remove the eviction fence from the private copy of
+ * the dma_resv object here since that is what is used during release to
+ * determine of the BO is idle or not.
+ */
+void amdgpu_amdkfd_remove_all_eviction_fences(struct amdgpu_bo *bo)
{
- struct amdgpu_bo *root = bo;
- struct amdgpu_vm_bo_base *vm_bo;
- struct amdgpu_vm *vm;
- struct amdkfd_process_info *info;
- struct amdgpu_amdkfd_fence *ef;
- int ret;
-
- /* we can always get vm_bo from root PD bo.*/
- while (root->parent)
- root = root->parent;
-
- vm_bo = root->vm_bo;
- if (!vm_bo)
- return 0;
-
- vm = vm_bo->vm;
- if (!vm)
- return 0;
-
- info = vm->process_info;
- if (!info || !info->eviction_fence)
- return 0;
+ struct dma_resv *resv = &bo->tbo.base._resv;
+ struct dma_fence *fence, *stub;
+ struct dma_resv_iter cursor;
- ef = container_of(dma_fence_get(&info->eviction_fence->base),
- struct amdgpu_amdkfd_fence, base);
+ dma_resv_assert_held(resv);
- BUG_ON(!dma_resv_trylock(bo->tbo.base.resv));
- ret = amdgpu_amdkfd_remove_eviction_fence(bo, ef);
- dma_resv_unlock(bo->tbo.base.resv);
+ stub = dma_fence_get_stub();
+ dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, fence) {
+ if (!to_amdgpu_amdkfd_fence(fence))
+ continue;
- dma_fence_put(&ef->base);
- return ret;
+ dma_resv_replace_fences(resv, fence->context, stub,
+ DMA_RESV_USAGE_BOOKKEEP);
+ }
+ dma_fence_put(stub);
}
static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
@@ -499,10 +507,11 @@ static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
if (ret)
return ret;
- return amdgpu_sync_fence(sync, vm->last_update);
+ return amdgpu_sync_fence(sync, vm->last_update, GFP_KERNEL);
}
-static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
+static uint64_t get_pte_flags(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct kgd_mem *mem)
{
uint32_t mapping_flags = AMDGPU_VM_PAGE_READABLE |
AMDGPU_VM_MTYPE_DEFAULT;
@@ -512,7 +521,7 @@ static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE)
mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
- return amdgpu_gem_va_map_flags(adev, mapping_flags);
+ return mapping_flags;
}
/**
@@ -603,12 +612,6 @@ kfd_mem_dmamap_dmabuf(struct kfd_mem_attachment *attachment)
{
struct ttm_operation_ctx ctx = {.interruptible = true};
struct amdgpu_bo *bo = attachment->bo_va->base.bo;
- int ret;
-
- amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
- ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
- if (ret)
- return ret;
amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
@@ -730,7 +733,7 @@ kfd_mem_dmaunmap_userptr(struct kgd_mem *mem,
return;
amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
- ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ (void)ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
sg_free_table(ttm->sg);
@@ -779,7 +782,7 @@ kfd_mem_dmaunmap_sg_bo(struct kgd_mem *mem,
}
amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
- ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ (void)ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
dir = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
@@ -890,7 +893,7 @@ static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem,
* if peer device has large BAR. In contrast, access over xGMI is
* allowed for both small and large BAR configurations of peer device
*/
- if ((adev != bo_adev && !(adev->flags & AMD_IS_APU)) &&
+ if ((adev != bo_adev && !adev->apu_prefer_gtt) &&
((mem->domain == AMDGPU_GEM_DOMAIN_VRAM) ||
(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) ||
(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) {
@@ -975,7 +978,7 @@ static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem,
goto unwind;
}
attachment[i]->va = va;
- attachment[i]->pte_flags = get_pte_flags(adev, mem);
+ attachment[i]->pte_flags = get_pte_flags(adev, vm, mem);
attachment[i]->adev = adev;
list_add(&attachment[i]->list, &mem->attachments);
@@ -989,7 +992,7 @@ unwind:
if (!attachment[i])
continue;
if (attachment[i]->bo_va) {
- amdgpu_bo_reserve(bo[i], true);
+ (void)amdgpu_bo_reserve(bo[i], true);
if (--attachment[i]->bo_va->ref_count == 0)
amdgpu_vm_bo_del(adev, attachment[i]->bo_va);
amdgpu_bo_unreserve(bo[i]);
@@ -1086,7 +1089,7 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
return 0;
}
- ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages, &range);
+ ret = amdgpu_ttm_tt_get_user_pages(bo, &range);
if (ret) {
if (ret == -EAGAIN)
pr_debug("Failed to get user pages, try again\n");
@@ -1100,6 +1103,9 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
pr_err("%s: Failed to reserve BO\n", __func__);
goto release_out;
}
+
+ amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, range);
+
amdgpu_bo_placement_from_domain(bo, mem->domain);
ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (ret)
@@ -1259,11 +1265,11 @@ static int unmap_bo_from_gpuvm(struct kgd_mem *mem,
return -EBUSY;
}
- amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
+ (void)amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
- amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
+ (void)amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
- amdgpu_sync_fence(sync, bo_va->last_pt_update);
+ (void)amdgpu_sync_fence(sync, bo_va->last_pt_update, GFP_KERNEL);
return 0;
}
@@ -1287,7 +1293,7 @@ static int update_gpuvm_pte(struct kgd_mem *mem,
return ret;
}
- return amdgpu_sync_fence(sync, bo_va->last_pt_update);
+ return amdgpu_sync_fence(sync, bo_va->last_pt_update, GFP_KERNEL);
}
static int map_bo_to_gpuvm(struct kgd_mem *mem,
@@ -1529,27 +1535,6 @@ static void amdgpu_amdkfd_gpuvm_unpin_bo(struct amdgpu_bo *bo)
amdgpu_bo_unreserve(bo);
}
-int amdgpu_amdkfd_gpuvm_set_vm_pasid(struct amdgpu_device *adev,
- struct amdgpu_vm *avm, u32 pasid)
-
-{
- int ret;
-
- /* Free the original amdgpu allocated pasid,
- * will be replaced with kfd allocated pasid.
- */
- if (avm->pasid) {
- amdgpu_pasid_free(avm->pasid);
- amdgpu_vm_set_pasid(adev, avm, 0);
- }
-
- ret = amdgpu_vm_set_pasid(adev, avm, pasid);
- if (ret)
- return ret;
-
- return 0;
-}
-
int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev,
struct amdgpu_vm *avm,
void **process_info,
@@ -1607,27 +1592,6 @@ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
}
}
-void amdgpu_amdkfd_gpuvm_release_process_vm(struct amdgpu_device *adev,
- void *drm_priv)
-{
- struct amdgpu_vm *avm;
-
- if (WARN_ON(!adev || !drm_priv))
- return;
-
- avm = drm_priv_to_vm(drm_priv);
-
- pr_debug("Releasing process vm %p\n", avm);
-
- /* The original pasid of amdgpu vm has already been
- * released during making a amdgpu vm to a compute vm
- * The current pasid is managed by kfd and will be
- * released on kfd process destroy. Set amdgpu pasid
- * to 0 to avoid duplicate release.
- */
- amdgpu_vm_release_compute(adev, avm);
-}
-
uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv)
{
struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
@@ -1682,13 +1646,17 @@ size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev,
uint64_t vram_available, system_mem_available, ttm_mem_available;
spin_lock(&kfd_mem_limit.mem_limit_lock);
- vram_available = KFD_XCP_MEMORY_SIZE(adev, xcp_id)
- - adev->kfd.vram_used_aligned[xcp_id]
- - atomic64_read(&adev->vram_pin_size)
- - reserved_for_pt
- - reserved_for_ras;
+ if (adev->apu_prefer_gtt && !adev->gmc.is_app_apu)
+ vram_available = KFD_XCP_MEMORY_SIZE(adev, xcp_id)
+ - adev->kfd.vram_used_aligned[xcp_id];
+ else
+ vram_available = KFD_XCP_MEMORY_SIZE(adev, xcp_id)
+ - adev->kfd.vram_used_aligned[xcp_id]
+ - atomic64_read(&adev->vram_pin_size)
+ - reserved_for_pt
+ - reserved_for_ras;
- if (adev->flags & AMD_IS_APU) {
+ if (adev->apu_prefer_gtt) {
system_mem_available = no_system_mem_limit ?
kfd_mem_limit.max_system_mem_limit :
kfd_mem_limit.max_system_mem_limit -
@@ -1736,7 +1704,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
- if (adev->flags & AMD_IS_APU) {
+ if (adev->apu_prefer_gtt) {
domain = AMDGPU_GEM_DOMAIN_GTT;
alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
alloc_flags = 0;
@@ -1987,7 +1955,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
if (size) {
if (!is_imported &&
(mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM ||
- ((adev->flags & AMD_IS_APU) &&
+ (adev->apu_prefer_gtt &&
mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT)))
*size = bo_size;
else
@@ -2352,7 +2320,7 @@ void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_mem *mem)
{
struct amdgpu_bo *bo = mem->bo;
- amdgpu_bo_reserve(bo, true);
+ (void)amdgpu_bo_reserve(bo, true);
amdgpu_bo_kunmap(bo);
amdgpu_bo_unpin(bo);
amdgpu_bo_unreserve(bo);
@@ -2414,7 +2382,7 @@ static int import_obj_create(struct amdgpu_device *adev,
(*mem)->bo = bo;
(*mem)->va = va;
(*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) &&
- !(adev->flags & AMD_IS_APU) ?
+ !adev->apu_prefer_gtt ?
AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
(*mem)->mapped_to_gpu_memory = 0;
@@ -2524,11 +2492,14 @@ int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni,
/* First eviction, stop the queues */
r = kgd2kfd_quiesce_mm(mni->mm,
KFD_QUEUE_EVICTION_TRIGGER_USERPTR);
- if (r)
+
+ if (r && r != -ESRCH)
pr_err("Failed to quiesce KFD\n");
- queue_delayed_work(system_freezable_wq,
- &process_info->restore_userptr_work,
- msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
+
+ if (r != -ESRCH)
+ queue_delayed_work(system_freezable_wq,
+ &process_info->restore_userptr_work,
+ msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
}
mutex_unlock(&process_info->notifier_lock);
@@ -2597,8 +2568,7 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
}
/* Get updated user pages */
- ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages,
- &mem->range);
+ ret = amdgpu_ttm_tt_get_user_pages(bo, &mem->range);
if (ret) {
pr_debug("Failed %d to get user pages\n", ret);
@@ -2612,9 +2582,28 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
if (ret != -EFAULT)
return ret;
+ /* If applications unmap memory before destroying the userptr
+ * from the KFD, trigger a segmentation fault in VM debug mode.
+ */
+ if (amdgpu_ttm_adev(bo->tbo.bdev)->debug_vm_userptr) {
+ struct kfd_process *p;
+
+ pr_err("Pid %d unmapped memory before destroying userptr at GPU addr 0x%llx\n",
+ pid_nr(process_info->pid), mem->va);
+
+ // Send GPU VM fault to user space
+ p = kfd_lookup_process_by_pid(process_info->pid);
+ if (p) {
+ kfd_signal_vm_fault_event_with_userptr(p, mem->va);
+ kfd_unref_process(p);
+ }
+ }
+
ret = 0;
}
+ amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, mem->range);
+
mutex_lock(&process_info->notifier_lock);
/* Mark the BO as valid unless it was invalidated
@@ -2966,7 +2955,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu *
}
dma_resv_for_each_fence(&cursor, bo->tbo.base.resv,
DMA_RESV_USAGE_KERNEL, fence) {
- ret = amdgpu_sync_fence(&sync_obj, fence);
+ ret = amdgpu_sync_fence(&sync_obj, fence, GFP_KERNEL);
if (ret) {
pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
goto validate_map_fail;
@@ -3010,9 +2999,22 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu *
struct amdgpu_device *adev = amdgpu_ttm_adev(
peer_vm->root.bo->tbo.bdev);
+ struct amdgpu_fpriv *fpriv =
+ container_of(peer_vm, struct amdgpu_fpriv, vm);
+
+ ret = amdgpu_vm_bo_update(adev, fpriv->prt_va, false);
+ if (ret) {
+ dev_dbg(adev->dev,
+ "Memory eviction: handle PRT moved failed, pid %8d. Try again.\n",
+ pid_nr(process_info->pid));
+ goto validate_map_fail;
+ }
+
ret = amdgpu_vm_handle_moved(adev, peer_vm, &exec.ticket);
if (ret) {
- pr_debug("Memory eviction: handle moved failed. Try again\n");
+ dev_dbg(adev->dev,
+ "Memory eviction: handle moved failed, pid %8d. Try again.\n",
+ pid_nr(process_info->pid));
goto validate_map_fail;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 0c8975ac5af9..763f2b8dcf13 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -36,13 +36,6 @@
#include "atombios_encoders.h"
#include "bif/bif_4_1_d.h"
-static void amdgpu_atombios_lookup_i2c_gpio_quirks(struct amdgpu_device *adev,
- ATOM_GPIO_I2C_ASSIGMENT *gpio,
- u8 index)
-{
-
-}
-
static struct amdgpu_i2c_bus_rec amdgpu_atombios_get_bus_rec_for_i2c_gpio(ATOM_GPIO_I2C_ASSIGMENT *gpio)
{
struct amdgpu_i2c_bus_rec i2c;
@@ -108,9 +101,6 @@ struct amdgpu_i2c_bus_rec amdgpu_atombios_lookup_i2c_gpio(struct amdgpu_device *
gpio = &i2c_info->asGPIO_Info[0];
for (i = 0; i < num_indices; i++) {
-
- amdgpu_atombios_lookup_i2c_gpio_quirks(adev, gpio, i);
-
if (gpio->sucI2cId.ucAccess == id) {
i2c = amdgpu_atombios_get_bus_rec_for_i2c_gpio(gpio);
break;
@@ -142,8 +132,6 @@ void amdgpu_atombios_i2c_init(struct amdgpu_device *adev)
gpio = &i2c_info->asGPIO_Info[0];
for (i = 0; i < num_indices; i++) {
- amdgpu_atombios_lookup_i2c_gpio_quirks(adev, gpio, i);
-
i2c = amdgpu_atombios_get_bus_rec_for_i2c_gpio(gpio);
if (i2c.valid) {
@@ -156,6 +144,38 @@ void amdgpu_atombios_i2c_init(struct amdgpu_device *adev)
}
}
+void amdgpu_atombios_oem_i2c_init(struct amdgpu_device *adev, u8 i2c_id)
+{
+ struct atom_context *ctx = adev->mode_info.atom_context;
+ ATOM_GPIO_I2C_ASSIGMENT *gpio;
+ struct amdgpu_i2c_bus_rec i2c;
+ int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info);
+ struct _ATOM_GPIO_I2C_INFO *i2c_info;
+ uint16_t data_offset, size;
+ int i, num_indices;
+ char stmp[32];
+
+ if (amdgpu_atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) {
+ i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset);
+
+ num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
+ sizeof(ATOM_GPIO_I2C_ASSIGMENT);
+
+ gpio = &i2c_info->asGPIO_Info[0];
+ for (i = 0; i < num_indices; i++) {
+ i2c = amdgpu_atombios_get_bus_rec_for_i2c_gpio(gpio);
+
+ if (i2c.valid && i2c.i2c_id == i2c_id) {
+ sprintf(stmp, "OEM 0x%x", i2c.i2c_id);
+ adev->i2c_bus[i] = amdgpu_i2c_create(adev_to_drm(adev), &i2c, stmp);
+ break;
+ }
+ gpio = (ATOM_GPIO_I2C_ASSIGMENT *)
+ ((u8 *)gpio + sizeof(ATOM_GPIO_I2C_ASSIGMENT));
+ }
+ }
+}
+
struct amdgpu_gpio_rec
amdgpu_atombios_lookup_gpio(struct amdgpu_device *adev,
u8 id)
@@ -686,7 +706,6 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev)
}
adev->clock.dp_extclk =
le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
- adev->clock.current_dispclk = adev->clock.default_dispclk;
adev->clock.max_pixel_clock = le16_to_cpu(firmware_info->info.usMaxPixelClock);
if (adev->clock.max_pixel_clock == 0)
@@ -1145,8 +1164,8 @@ int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev,
return 0;
}
-void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev,
- u32 eng_clock, u32 mem_clock)
+int amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev,
+ u32 eng_clock, u32 mem_clock)
{
SET_ENGINE_CLOCK_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings);
@@ -1161,8 +1180,8 @@ void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev,
if (mem_clock)
args.sReserved.ulClock = cpu_to_le32(mem_clock & SET_CLOCK_FREQ_MASK);
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args,
- sizeof(args));
+ return amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
+ (uint32_t *)&args, sizeof(args));
}
void amdgpu_atombios_get_default_voltages(struct amdgpu_device *adev,
@@ -1796,16 +1815,43 @@ static ssize_t amdgpu_atombios_get_vbios_version(struct device *dev,
return sysfs_emit(buf, "%s\n", ctx->vbios_pn);
}
+static ssize_t amdgpu_atombios_get_vbios_build(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ struct atom_context *ctx = adev->mode_info.atom_context;
+
+ return sysfs_emit(buf, "%s\n", ctx->build_num);
+}
+
static DEVICE_ATTR(vbios_version, 0444, amdgpu_atombios_get_vbios_version,
NULL);
+static DEVICE_ATTR(vbios_build, 0444, amdgpu_atombios_get_vbios_build, NULL);
static struct attribute *amdgpu_vbios_version_attrs[] = {
- &dev_attr_vbios_version.attr,
- NULL
+ &dev_attr_vbios_version.attr, &dev_attr_vbios_build.attr, NULL
};
+static umode_t amdgpu_vbios_version_attrs_is_visible(struct kobject *kobj,
+ struct attribute *attr,
+ int index)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ struct atom_context *ctx = adev->mode_info.atom_context;
+
+ if (attr == &dev_attr_vbios_build.attr && !strlen(ctx->build_num))
+ return 0;
+
+ return attr->mode;
+}
+
const struct attribute_group amdgpu_vbios_version_attr_group = {
- .attrs = amdgpu_vbios_version_attrs
+ .attrs = amdgpu_vbios_version_attrs,
+ .is_visible = amdgpu_vbios_version_attrs_is_visible,
};
int amdgpu_atombios_sysfs_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
index 0811474e8fd3..867bc5c5ce67 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
@@ -136,6 +136,7 @@ amdgpu_atombios_lookup_gpio(struct amdgpu_device *adev,
struct amdgpu_i2c_bus_rec amdgpu_atombios_lookup_i2c_gpio(struct amdgpu_device *adev,
uint8_t id);
void amdgpu_atombios_i2c_init(struct amdgpu_device *adev);
+void amdgpu_atombios_oem_i2c_init(struct amdgpu_device *adev, u8 i2c_id);
bool amdgpu_atombios_has_dce_engine_info(struct amdgpu_device *adev);
@@ -163,8 +164,8 @@ int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev,
bool strobe_mode,
struct atom_mpll_param *mpll_param);
-void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev,
- u32 eng_clock, u32 mem_clock);
+int amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev,
+ u32 eng_clock, u32 mem_clock);
bool
amdgpu_atombios_is_voltage_gpio(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index f873dd3cae16..c7d32fb216e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -281,6 +281,9 @@ static int convert_atom_mem_type_to_vram_type(struct amdgpu_device *adev,
case ATOM_DGPU_VRAM_TYPE_GDDR6:
vram_type = AMDGPU_VRAM_TYPE_GDDR6;
break;
+ case ATOM_DGPU_VRAM_TYPE_HBM3E:
+ vram_type = AMDGPU_VRAM_TYPE_HBM3E;
+ break;
default:
vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
break;
@@ -549,9 +552,10 @@ bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev)
u16 data_offset, size;
union umc_info *umc_info;
u8 frev, crev;
- bool ecc_default_enabled = false;
+ bool mem_ecc_enabled = false;
u8 umc_config;
u32 umc_config1;
+ adev->ras_default_ecc_enabled = false;
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
umc_info);
@@ -563,20 +567,22 @@ bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev)
switch (crev) {
case 1:
umc_config = le32_to_cpu(umc_info->v31.umc_config);
- ecc_default_enabled =
+ mem_ecc_enabled =
(umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false;
break;
case 2:
umc_config = le32_to_cpu(umc_info->v32.umc_config);
- ecc_default_enabled =
+ mem_ecc_enabled =
(umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false;
break;
case 3:
umc_config = le32_to_cpu(umc_info->v33.umc_config);
umc_config1 = le32_to_cpu(umc_info->v33.umc_config1);
- ecc_default_enabled =
+ mem_ecc_enabled =
((umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ||
(umc_config1 & UMC_CONFIG1__ENABLE_ECC_CAPABLE)) ? true : false;
+ adev->ras_default_ecc_enabled =
+ (umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false;
break;
default:
/* unsupported crev */
@@ -585,9 +591,12 @@ bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev)
} else if (frev == 4) {
switch (crev) {
case 0:
+ umc_config = le32_to_cpu(umc_info->v40.umc_config);
umc_config1 = le32_to_cpu(umc_info->v40.umc_config1);
- ecc_default_enabled =
+ mem_ecc_enabled =
(umc_config1 & UMC_CONFIG1__ENABLE_ECC_CAPABLE) ? true : false;
+ adev->ras_default_ecc_enabled =
+ (umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false;
break;
default:
/* unsupported crev */
@@ -599,7 +608,7 @@ bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev)
}
}
- return ecc_default_enabled;
+ return mem_ecc_enabled;
}
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index 375f02002579..3893e6fc2f03 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -89,18 +89,6 @@ bool amdgpu_is_atpx_hybrid(void)
return amdgpu_atpx_priv.atpx.is_hybrid;
}
-bool amdgpu_atpx_dgpu_req_power_for_displays(void)
-{
- return amdgpu_atpx_priv.atpx.dgpu_req_power_for_displays;
-}
-
-#if defined(CONFIG_ACPI)
-void *amdgpu_atpx_get_dhandle(void)
-{
- return amdgpu_atpx_priv.dhandle;
-}
-#endif
-
/**
* amdgpu_atpx_call - call an ATPX method
*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
index 45affc02548c..00e96419fcda 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
@@ -47,41 +47,50 @@
/* Check if current bios is an ATOM BIOS.
* Return true if it is ATOM BIOS. Otherwise, return false.
*/
-static bool check_atom_bios(uint8_t *bios, size_t size)
+static bool check_atom_bios(struct amdgpu_device *adev, size_t size)
{
uint16_t tmp, bios_header_start;
+ uint8_t *bios = adev->bios;
if (!bios || size < 0x49) {
- DRM_INFO("vbios mem is null or mem size is wrong\n");
+ dev_dbg(adev->dev, "VBIOS mem is null or mem size is wrong\n");
return false;
}
if (!AMD_IS_VALID_VBIOS(bios)) {
- DRM_INFO("BIOS signature incorrect %x %x\n", bios[0], bios[1]);
+ dev_dbg(adev->dev, "VBIOS signature incorrect %x %x\n", bios[0],
+ bios[1]);
return false;
}
bios_header_start = bios[0x48] | (bios[0x49] << 8);
if (!bios_header_start) {
- DRM_INFO("Can't locate bios header\n");
+ dev_dbg(adev->dev, "Can't locate VBIOS header\n");
return false;
}
tmp = bios_header_start + 4;
if (size < tmp) {
- DRM_INFO("BIOS header is broken\n");
+ dev_dbg(adev->dev, "VBIOS header is broken\n");
return false;
}
if (!memcmp(bios + tmp, "ATOM", 4) ||
!memcmp(bios + tmp, "MOTA", 4)) {
- DRM_DEBUG("ATOMBIOS detected\n");
+ dev_dbg(adev->dev, "ATOMBIOS detected\n");
return true;
}
return false;
}
+void amdgpu_bios_release(struct amdgpu_device *adev)
+{
+ kfree(adev->bios);
+ adev->bios = NULL;
+ adev->bios_size = 0;
+}
+
/* If you boot an IGP board with a discrete card as the primary,
* the IGP rom is not accessible via the rom bar as the IGP rom is
* part of the system bios. On boot, the system bios puts a
@@ -118,8 +127,8 @@ static bool amdgpu_read_bios_from_vram(struct amdgpu_device *adev)
memcpy_fromio(adev->bios, bios, size);
iounmap(bios);
- if (!check_atom_bios(adev->bios, size)) {
- kfree(adev->bios);
+ if (!check_atom_bios(adev, size)) {
+ amdgpu_bios_release(adev);
return false;
}
@@ -146,8 +155,8 @@ bool amdgpu_read_bios(struct amdgpu_device *adev)
memcpy_fromio(adev->bios, bios, size);
pci_unmap_rom(adev->pdev, bios);
- if (!check_atom_bios(adev->bios, size)) {
- kfree(adev->bios);
+ if (!check_atom_bios(adev, size)) {
+ amdgpu_bios_release(adev);
return false;
}
@@ -186,8 +195,8 @@ static bool amdgpu_read_bios_from_rom(struct amdgpu_device *adev)
/* read complete BIOS */
amdgpu_asic_read_bios_from_rom(adev, adev->bios, len);
- if (!check_atom_bios(adev->bios, len)) {
- kfree(adev->bios);
+ if (!check_atom_bios(adev, len)) {
+ amdgpu_bios_release(adev);
return false;
}
@@ -216,14 +225,15 @@ static bool amdgpu_read_platform_bios(struct amdgpu_device *adev)
memcpy_fromio(adev->bios, bios, romlen);
iounmap(bios);
- if (!check_atom_bios(adev->bios, romlen))
+ if (!check_atom_bios(adev, romlen))
goto free_bios;
adev->bios_size = romlen;
return true;
free_bios:
- kfree(adev->bios);
+ amdgpu_bios_release(adev);
+
return false;
}
@@ -324,8 +334,8 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev)
break;
}
- if (!check_atom_bios(adev->bios, size)) {
- kfree(adev->bios);
+ if (!check_atom_bios(adev, size)) {
+ amdgpu_bios_release(adev);
return false;
}
adev->bios_size = size;
@@ -389,8 +399,8 @@ static bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev)
vhdr->ImageLength,
GFP_KERNEL);
- if (!check_atom_bios(adev->bios, vhdr->ImageLength)) {
- kfree(adev->bios);
+ if (!check_atom_bios(adev, vhdr->ImageLength)) {
+ amdgpu_bios_release(adev);
return false;
}
adev->bios_size = vhdr->ImageLength;
@@ -437,6 +447,13 @@ success:
return true;
}
+static bool amdgpu_prefer_rom_resource(struct amdgpu_device *adev)
+{
+ struct resource *res = &adev->pdev->resource[PCI_ROM_RESOURCE];
+
+ return (res->flags & IORESOURCE_ROM_SHADOW);
+}
+
static bool amdgpu_get_bios_dgpu(struct amdgpu_device *adev)
{
if (amdgpu_atrm_get_bios(adev)) {
@@ -455,14 +472,27 @@ static bool amdgpu_get_bios_dgpu(struct amdgpu_device *adev)
goto success;
}
- if (amdgpu_read_platform_bios(adev)) {
- dev_info(adev->dev, "Fetched VBIOS from platform\n");
- goto success;
- }
+ if (amdgpu_prefer_rom_resource(adev)) {
+ if (amdgpu_read_bios(adev)) {
+ dev_info(adev->dev, "Fetched VBIOS from ROM BAR\n");
+ goto success;
+ }
- if (amdgpu_read_bios(adev)) {
- dev_info(adev->dev, "Fetched VBIOS from ROM BAR\n");
- goto success;
+ if (amdgpu_read_platform_bios(adev)) {
+ dev_info(adev->dev, "Fetched VBIOS from platform\n");
+ goto success;
+ }
+
+ } else {
+ if (amdgpu_read_platform_bios(adev)) {
+ dev_info(adev->dev, "Fetched VBIOS from platform\n");
+ goto success;
+ }
+
+ if (amdgpu_read_bios(adev)) {
+ dev_info(adev->dev, "Fetched VBIOS from ROM BAR\n");
+ goto success;
+ }
}
if (amdgpu_read_bios_from_rom(adev)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index 702f6610d024..66fb37b64388 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -184,43 +184,36 @@ void amdgpu_bo_list_put(struct amdgpu_bo_list *list)
int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in,
struct drm_amdgpu_bo_list_entry **info_param)
{
- const void __user *uptr = u64_to_user_ptr(in->bo_info_ptr);
const uint32_t info_size = sizeof(struct drm_amdgpu_bo_list_entry);
+ const void __user *uptr = u64_to_user_ptr(in->bo_info_ptr);
+ const uint32_t bo_info_size = in->bo_info_size;
+ const uint32_t bo_number = in->bo_number;
struct drm_amdgpu_bo_list_entry *info;
- int r;
-
- info = kvmalloc_array(in->bo_number, info_size, GFP_KERNEL);
- if (!info)
- return -ENOMEM;
/* copy the handle array from userspace to a kernel buffer */
- r = -EFAULT;
- if (likely(info_size == in->bo_info_size)) {
- unsigned long bytes = in->bo_number *
- in->bo_info_size;
-
- if (copy_from_user(info, uptr, bytes))
- goto error_free;
-
+ if (likely(info_size == bo_info_size)) {
+ info = vmemdup_array_user(uptr, bo_number, info_size);
+ if (IS_ERR(info))
+ return PTR_ERR(info);
} else {
- unsigned long bytes = min(in->bo_info_size, info_size);
+ const uint32_t bytes = min(bo_info_size, info_size);
unsigned i;
- memset(info, 0, in->bo_number * info_size);
- for (i = 0; i < in->bo_number; ++i) {
- if (copy_from_user(&info[i], uptr, bytes))
- goto error_free;
+ info = kvmalloc_array(bo_number, info_size, GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
- uptr += in->bo_info_size;
+ memset(info, 0, bo_number * info_size);
+ for (i = 0; i < bo_number; ++i, uptr += bo_info_size) {
+ if (copy_from_user(&info[i], uptr, bytes)) {
+ kvfree(info);
+ return -EFAULT;
+ }
}
}
*info_param = info;
return 0;
-
-error_free:
- kvfree(info);
- return r;
}
int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
index 555cd6d877c3..a716c9886c74 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
@@ -38,7 +38,6 @@ struct amdgpu_bo_list_entry {
struct amdgpu_bo *bo;
struct amdgpu_bo_va *bo_va;
uint32_t priority;
- struct page **user_pages;
struct hmm_range *range;
bool user_invalidated;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 16153d275d7a..004a6a9d6b9f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -252,83 +252,22 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
if (!adev->pm.fw) {
switch (adev->asic_type) {
- case CHIP_TAHITI:
- strcpy(fw_name, "radeon/tahiti_smc.bin");
- break;
- case CHIP_PITCAIRN:
- if ((adev->pdev->revision == 0x81) &&
- ((adev->pdev->device == 0x6810) ||
- (adev->pdev->device == 0x6811))) {
- info->is_kicker = true;
- strcpy(fw_name, "radeon/pitcairn_k_smc.bin");
- } else {
- strcpy(fw_name, "radeon/pitcairn_smc.bin");
- }
- break;
- case CHIP_VERDE:
- if (((adev->pdev->device == 0x6820) &&
- ((adev->pdev->revision == 0x81) ||
- (adev->pdev->revision == 0x83))) ||
- ((adev->pdev->device == 0x6821) &&
- ((adev->pdev->revision == 0x83) ||
- (adev->pdev->revision == 0x87))) ||
- ((adev->pdev->revision == 0x87) &&
- ((adev->pdev->device == 0x6823) ||
- (adev->pdev->device == 0x682b)))) {
- info->is_kicker = true;
- strcpy(fw_name, "radeon/verde_k_smc.bin");
- } else {
- strcpy(fw_name, "radeon/verde_smc.bin");
- }
- break;
- case CHIP_OLAND:
- if (((adev->pdev->revision == 0x81) &&
- ((adev->pdev->device == 0x6600) ||
- (adev->pdev->device == 0x6604) ||
- (adev->pdev->device == 0x6605) ||
- (adev->pdev->device == 0x6610))) ||
- ((adev->pdev->revision == 0x83) &&
- (adev->pdev->device == 0x6610))) {
- info->is_kicker = true;
- strcpy(fw_name, "radeon/oland_k_smc.bin");
- } else {
- strcpy(fw_name, "radeon/oland_smc.bin");
- }
- break;
- case CHIP_HAINAN:
- if (((adev->pdev->revision == 0x81) &&
- (adev->pdev->device == 0x6660)) ||
- ((adev->pdev->revision == 0x83) &&
- ((adev->pdev->device == 0x6660) ||
- (adev->pdev->device == 0x6663) ||
- (adev->pdev->device == 0x6665) ||
- (adev->pdev->device == 0x6667)))) {
- info->is_kicker = true;
- strcpy(fw_name, "radeon/hainan_k_smc.bin");
- } else if ((adev->pdev->revision == 0xc3) &&
- (adev->pdev->device == 0x6665)) {
- info->is_kicker = true;
- strcpy(fw_name, "radeon/banks_k_2_smc.bin");
- } else {
- strcpy(fw_name, "radeon/hainan_smc.bin");
- }
- break;
case CHIP_BONAIRE:
if ((adev->pdev->revision == 0x80) ||
(adev->pdev->revision == 0x81) ||
(adev->pdev->device == 0x665f)) {
info->is_kicker = true;
- strcpy(fw_name, "amdgpu/bonaire_k_smc.bin");
+ strscpy(fw_name, "amdgpu/bonaire_k_smc.bin");
} else {
- strcpy(fw_name, "amdgpu/bonaire_smc.bin");
+ strscpy(fw_name, "amdgpu/bonaire_smc.bin");
}
break;
case CHIP_HAWAII:
if (adev->pdev->revision == 0x80) {
info->is_kicker = true;
- strcpy(fw_name, "amdgpu/hawaii_k_smc.bin");
+ strscpy(fw_name, "amdgpu/hawaii_k_smc.bin");
} else {
- strcpy(fw_name, "amdgpu/hawaii_smc.bin");
+ strscpy(fw_name, "amdgpu/hawaii_smc.bin");
}
break;
case CHIP_TOPAZ:
@@ -338,83 +277,85 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0xD1)) ||
((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0xD3))) {
info->is_kicker = true;
- strcpy(fw_name, "amdgpu/topaz_k_smc.bin");
+ strscpy(fw_name, "amdgpu/topaz_k_smc.bin");
} else
- strcpy(fw_name, "amdgpu/topaz_smc.bin");
+ strscpy(fw_name, "amdgpu/topaz_smc.bin");
break;
case CHIP_TONGA:
if (((adev->pdev->device == 0x6939) && (adev->pdev->revision == 0xf1)) ||
((adev->pdev->device == 0x6938) && (adev->pdev->revision == 0xf1))) {
info->is_kicker = true;
- strcpy(fw_name, "amdgpu/tonga_k_smc.bin");
+ strscpy(fw_name, "amdgpu/tonga_k_smc.bin");
} else
- strcpy(fw_name, "amdgpu/tonga_smc.bin");
+ strscpy(fw_name, "amdgpu/tonga_smc.bin");
break;
case CHIP_FIJI:
- strcpy(fw_name, "amdgpu/fiji_smc.bin");
+ strscpy(fw_name, "amdgpu/fiji_smc.bin");
break;
case CHIP_POLARIS11:
if (type == CGS_UCODE_ID_SMU) {
if (ASICID_IS_P21(adev->pdev->device, adev->pdev->revision)) {
info->is_kicker = true;
- strcpy(fw_name, "amdgpu/polaris11_k_smc.bin");
+ strscpy(fw_name, "amdgpu/polaris11_k_smc.bin");
} else if (ASICID_IS_P31(adev->pdev->device, adev->pdev->revision)) {
info->is_kicker = true;
- strcpy(fw_name, "amdgpu/polaris11_k2_smc.bin");
+ strscpy(fw_name, "amdgpu/polaris11_k2_smc.bin");
} else {
- strcpy(fw_name, "amdgpu/polaris11_smc.bin");
+ strscpy(fw_name, "amdgpu/polaris11_smc.bin");
}
} else if (type == CGS_UCODE_ID_SMU_SK) {
- strcpy(fw_name, "amdgpu/polaris11_smc_sk.bin");
+ strscpy(fw_name, "amdgpu/polaris11_smc_sk.bin");
}
break;
case CHIP_POLARIS10:
if (type == CGS_UCODE_ID_SMU) {
if (ASICID_IS_P20(adev->pdev->device, adev->pdev->revision)) {
info->is_kicker = true;
- strcpy(fw_name, "amdgpu/polaris10_k_smc.bin");
+ strscpy(fw_name, "amdgpu/polaris10_k_smc.bin");
} else if (ASICID_IS_P30(adev->pdev->device, adev->pdev->revision)) {
info->is_kicker = true;
- strcpy(fw_name, "amdgpu/polaris10_k2_smc.bin");
+ strscpy(fw_name, "amdgpu/polaris10_k2_smc.bin");
} else {
- strcpy(fw_name, "amdgpu/polaris10_smc.bin");
+ strscpy(fw_name, "amdgpu/polaris10_smc.bin");
}
} else if (type == CGS_UCODE_ID_SMU_SK) {
- strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin");
+ strscpy(fw_name, "amdgpu/polaris10_smc_sk.bin");
}
break;
case CHIP_POLARIS12:
if (ASICID_IS_P23(adev->pdev->device, adev->pdev->revision)) {
info->is_kicker = true;
- strcpy(fw_name, "amdgpu/polaris12_k_smc.bin");
+ strscpy(fw_name, "amdgpu/polaris12_k_smc.bin");
} else {
- strcpy(fw_name, "amdgpu/polaris12_smc.bin");
+ strscpy(fw_name, "amdgpu/polaris12_smc.bin");
}
break;
case CHIP_VEGAM:
- strcpy(fw_name, "amdgpu/vegam_smc.bin");
+ strscpy(fw_name, "amdgpu/vegam_smc.bin");
break;
case CHIP_VEGA10:
if ((adev->pdev->device == 0x687f) &&
((adev->pdev->revision == 0xc0) ||
(adev->pdev->revision == 0xc1) ||
(adev->pdev->revision == 0xc3)))
- strcpy(fw_name, "amdgpu/vega10_acg_smc.bin");
+ strscpy(fw_name, "amdgpu/vega10_acg_smc.bin");
else
- strcpy(fw_name, "amdgpu/vega10_smc.bin");
+ strscpy(fw_name, "amdgpu/vega10_smc.bin");
break;
case CHIP_VEGA12:
- strcpy(fw_name, "amdgpu/vega12_smc.bin");
+ strscpy(fw_name, "amdgpu/vega12_smc.bin");
break;
case CHIP_VEGA20:
- strcpy(fw_name, "amdgpu/vega20_smc.bin");
+ strscpy(fw_name, "amdgpu/vega20_smc.bin");
break;
default:
DRM_ERROR("SMC firmware not supported\n");
return -EINVAL;
}
- err = amdgpu_ucode_request(adev, &adev->pm.fw, "%s", fw_name);
+ err = amdgpu_ucode_request(adev, &adev->pm.fw,
+ AMDGPU_UCODE_REQUIRED,
+ "%s", fw_name);
if (err) {
DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
amdgpu_ucode_release(&adev->pm.fw);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 344e0a9ee08a..47e9bfba0642 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -398,30 +398,28 @@ static void amdgpu_connector_add_common_modes(struct drm_encoder *encoder,
struct drm_display_mode *mode = NULL;
struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
int i;
- static const struct mode_size {
+ int n;
+ struct mode_size {
+ char name[DRM_DISPLAY_MODE_LEN];
int w;
int h;
- } common_modes[17] = {
- { 640, 480},
- { 720, 480},
- { 800, 600},
- { 848, 480},
- {1024, 768},
- {1152, 768},
- {1280, 720},
- {1280, 800},
- {1280, 854},
- {1280, 960},
- {1280, 1024},
- {1440, 900},
- {1400, 1050},
- {1680, 1050},
- {1600, 1200},
- {1920, 1080},
- {1920, 1200}
+ } common_modes[] = {
+ { "640x480", 640, 480},
+ { "800x600", 800, 600},
+ { "1024x768", 1024, 768},
+ { "1280x720", 1280, 720},
+ { "1280x800", 1280, 800},
+ {"1280x1024", 1280, 1024},
+ { "1440x900", 1440, 900},
+ {"1680x1050", 1680, 1050},
+ {"1600x1200", 1600, 1200},
+ {"1920x1080", 1920, 1080},
+ {"1920x1200", 1920, 1200}
};
- for (i = 0; i < 17; i++) {
+ n = ARRAY_SIZE(common_modes);
+
+ for (i = 0; i < n; i++) {
if (amdgpu_encoder->devices & (ATOM_DEVICE_TV_SUPPORT)) {
if (common_modes[i].w > 1024 ||
common_modes[i].h > 768)
@@ -434,12 +432,11 @@ static void amdgpu_connector_add_common_modes(struct drm_encoder *encoder,
common_modes[i].h == native_mode->vdisplay))
continue;
}
- if (common_modes[i].w < 320 || common_modes[i].h < 200)
- continue;
mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false);
if (!mode)
return;
+ strscpy(mode->name, common_modes[i].name, DRM_DISPLAY_MODE_LEN);
drm_mode_probed_add(connector, mode);
}
@@ -674,7 +671,7 @@ static int amdgpu_connector_lvds_get_modes(struct drm_connector *connector)
}
static enum drm_mode_status amdgpu_connector_lvds_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector);
@@ -839,7 +836,7 @@ static int amdgpu_connector_vga_get_modes(struct drm_connector *connector)
}
static enum drm_mode_status amdgpu_connector_vga_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
@@ -1195,29 +1192,69 @@ static void amdgpu_connector_dvi_force(struct drm_connector *connector)
amdgpu_connector->use_digital = true;
}
+/**
+ * amdgpu_max_hdmi_pixel_clock - Return max supported HDMI (TMDS) pixel clock
+ * @adev: pointer to amdgpu_device
+ *
+ * Return: maximum supported HDMI (TMDS) pixel clock in KHz.
+ */
+static int amdgpu_max_hdmi_pixel_clock(const struct amdgpu_device *adev)
+{
+ if (adev->asic_type >= CHIP_POLARIS10)
+ return 600000;
+ else if (adev->asic_type >= CHIP_TONGA)
+ return 300000;
+ else
+ return 297000;
+}
+
+/**
+ * amdgpu_connector_dvi_mode_valid - Validate a mode on DVI/HDMI connectors
+ * @connector: DRM connector to validate the mode on
+ * @mode: display mode to validate
+ *
+ * Validate the given display mode on DVI and HDMI connectors, including
+ * analog signals on DVI-I.
+ *
+ * Return: drm_mode_status indicating whether the mode is valid.
+ */
static enum drm_mode_status amdgpu_connector_dvi_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+ const int max_hdmi_pixel_clock = amdgpu_max_hdmi_pixel_clock(adev);
+ const int max_dvi_single_link_pixel_clock = 165000;
+ int max_digital_pixel_clock_khz;
/* XXX check mode bandwidth */
- if (amdgpu_connector->use_digital && (mode->clock > 165000)) {
- if ((amdgpu_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I) ||
- (amdgpu_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D) ||
- (amdgpu_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_B)) {
- return MODE_OK;
- } else if (connector->display_info.is_hdmi) {
- /* HDMI 1.3+ supports max clock of 340 Mhz */
- if (mode->clock > 340000)
- return MODE_CLOCK_HIGH;
- else
- return MODE_OK;
- } else {
- return MODE_CLOCK_HIGH;
+ if (amdgpu_connector->use_digital) {
+ switch (amdgpu_connector->connector_object_id) {
+ case CONNECTOR_OBJECT_ID_HDMI_TYPE_A:
+ max_digital_pixel_clock_khz = max_hdmi_pixel_clock;
+ break;
+ case CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I:
+ case CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D:
+ max_digital_pixel_clock_khz = max_dvi_single_link_pixel_clock;
+ break;
+ case CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I:
+ case CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D:
+ case CONNECTOR_OBJECT_ID_HDMI_TYPE_B:
+ max_digital_pixel_clock_khz = max_dvi_single_link_pixel_clock * 2;
+ break;
}
+
+ /* When the display EDID claims that it's an HDMI display,
+ * we use the HDMI encoder mode of the display HW,
+ * so we should verify against the max HDMI clock here.
+ */
+ if (connector->display_info.is_hdmi)
+ max_digital_pixel_clock_khz = max_hdmi_pixel_clock;
+
+ if (mode->clock > max_digital_pixel_clock_khz)
+ return MODE_CLOCK_HIGH;
}
/* check against the max pixel clock */
@@ -1464,7 +1501,7 @@ out:
}
static enum drm_mode_status amdgpu_connector_dp_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
struct amdgpu_connector_atom_dig *amdgpu_dig_connector = amdgpu_connector->con_priv;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
new file mode 100644
index 000000000000..ef996493115f
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
@@ -0,0 +1,591 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/list.h>
+#include "amdgpu.h"
+
+static const guid_t MCE = CPER_NOTIFY_MCE;
+static const guid_t CMC = CPER_NOTIFY_CMC;
+static const guid_t BOOT = BOOT_TYPE;
+
+static const guid_t CRASHDUMP = AMD_CRASHDUMP;
+static const guid_t RUNTIME = AMD_GPU_NONSTANDARD_ERROR;
+
+static void __inc_entry_length(struct cper_hdr *hdr, uint32_t size)
+{
+ hdr->record_length += size;
+}
+
+static void amdgpu_cper_get_timestamp(struct cper_timestamp *timestamp)
+{
+ struct tm tm;
+ time64_t now = ktime_get_real_seconds();
+
+ time64_to_tm(now, 0, &tm);
+ timestamp->seconds = tm.tm_sec;
+ timestamp->minutes = tm.tm_min;
+ timestamp->hours = tm.tm_hour;
+ timestamp->flag = 0;
+ timestamp->day = tm.tm_mday;
+ timestamp->month = 1 + tm.tm_mon;
+ timestamp->year = (1900 + tm.tm_year) % 100;
+ timestamp->century = (1900 + tm.tm_year) / 100;
+}
+
+void amdgpu_cper_entry_fill_hdr(struct amdgpu_device *adev,
+ struct cper_hdr *hdr,
+ enum amdgpu_cper_type type,
+ enum cper_error_severity sev)
+{
+ char record_id[16];
+
+ hdr->signature[0] = 'C';
+ hdr->signature[1] = 'P';
+ hdr->signature[2] = 'E';
+ hdr->signature[3] = 'R';
+ hdr->revision = CPER_HDR_REV_1;
+ hdr->signature_end = 0xFFFFFFFF;
+ hdr->error_severity = sev;
+
+ hdr->valid_bits.platform_id = 1;
+ hdr->valid_bits.timestamp = 1;
+
+ amdgpu_cper_get_timestamp(&hdr->timestamp);
+
+ snprintf(record_id, 9, "%d:%X",
+ (adev->smuio.funcs && adev->smuio.funcs->get_socket_id) ?
+ adev->smuio.funcs->get_socket_id(adev) :
+ 0,
+ atomic_inc_return(&adev->cper.unique_id));
+ memcpy(hdr->record_id, record_id, 8);
+
+ snprintf(hdr->platform_id, 16, "0x%04X:0x%04X",
+ adev->pdev->vendor, adev->pdev->device);
+ /* pmfw version should be part of creator_id according to CPER spec */
+ snprintf(hdr->creator_id, 16, "%s", CPER_CREATOR_ID_AMDGPU);
+
+ switch (type) {
+ case AMDGPU_CPER_TYPE_BOOT:
+ hdr->notify_type = BOOT;
+ break;
+ case AMDGPU_CPER_TYPE_FATAL:
+ case AMDGPU_CPER_TYPE_BP_THRESHOLD:
+ hdr->notify_type = MCE;
+ break;
+ case AMDGPU_CPER_TYPE_RUNTIME:
+ if (sev == CPER_SEV_NON_FATAL_CORRECTED)
+ hdr->notify_type = CMC;
+ else
+ hdr->notify_type = MCE;
+ break;
+ default:
+ dev_err(adev->dev, "Unknown CPER Type\n");
+ break;
+ }
+
+ __inc_entry_length(hdr, HDR_LEN);
+}
+
+static int amdgpu_cper_entry_fill_section_desc(struct amdgpu_device *adev,
+ struct cper_sec_desc *section_desc,
+ bool bp_threshold,
+ bool poison,
+ enum cper_error_severity sev,
+ guid_t sec_type,
+ uint32_t section_length,
+ uint32_t section_offset)
+{
+ section_desc->revision_minor = CPER_SEC_MINOR_REV_1;
+ section_desc->revision_major = CPER_SEC_MAJOR_REV_22;
+ section_desc->sec_offset = section_offset;
+ section_desc->sec_length = section_length;
+ section_desc->valid_bits.fru_text = 1;
+ section_desc->flag_bits.primary = 1;
+ section_desc->severity = sev;
+ section_desc->sec_type = sec_type;
+
+ snprintf(section_desc->fru_text, 20, "OAM%d",
+ (adev->smuio.funcs && adev->smuio.funcs->get_socket_id) ?
+ adev->smuio.funcs->get_socket_id(adev) :
+ 0);
+
+ if (bp_threshold)
+ section_desc->flag_bits.exceed_err_threshold = 1;
+ if (poison)
+ section_desc->flag_bits.latent_err = 1;
+
+ return 0;
+}
+
+int amdgpu_cper_entry_fill_fatal_section(struct amdgpu_device *adev,
+ struct cper_hdr *hdr,
+ uint32_t idx,
+ struct cper_sec_crashdump_reg_data reg_data)
+{
+ struct cper_sec_desc *section_desc;
+ struct cper_sec_crashdump_fatal *section;
+
+ section_desc = (struct cper_sec_desc *)((uint8_t *)hdr + SEC_DESC_OFFSET(idx));
+ section = (struct cper_sec_crashdump_fatal *)((uint8_t *)hdr +
+ FATAL_SEC_OFFSET(hdr->sec_cnt, idx));
+
+ amdgpu_cper_entry_fill_section_desc(adev, section_desc, false, false,
+ CPER_SEV_FATAL, CRASHDUMP, FATAL_SEC_LEN,
+ FATAL_SEC_OFFSET(hdr->sec_cnt, idx));
+
+ section->body.reg_ctx_type = CPER_CTX_TYPE_CRASH;
+ section->body.reg_arr_size = sizeof(reg_data);
+ section->body.data = reg_data;
+
+ __inc_entry_length(hdr, SEC_DESC_LEN + FATAL_SEC_LEN);
+
+ return 0;
+}
+
+int amdgpu_cper_entry_fill_runtime_section(struct amdgpu_device *adev,
+ struct cper_hdr *hdr,
+ uint32_t idx,
+ enum cper_error_severity sev,
+ uint32_t *reg_dump,
+ uint32_t reg_count)
+{
+ struct cper_sec_desc *section_desc;
+ struct cper_sec_nonstd_err *section;
+ bool poison;
+
+ poison = sev != CPER_SEV_NON_FATAL_CORRECTED;
+ section_desc = (struct cper_sec_desc *)((uint8_t *)hdr + SEC_DESC_OFFSET(idx));
+ section = (struct cper_sec_nonstd_err *)((uint8_t *)hdr +
+ NONSTD_SEC_OFFSET(hdr->sec_cnt, idx));
+
+ amdgpu_cper_entry_fill_section_desc(adev, section_desc, false, poison,
+ sev, RUNTIME, NONSTD_SEC_LEN,
+ NONSTD_SEC_OFFSET(hdr->sec_cnt, idx));
+
+ reg_count = umin(reg_count, CPER_ACA_REG_COUNT);
+
+ section->hdr.valid_bits.err_info_cnt = 1;
+ section->hdr.valid_bits.err_context_cnt = 1;
+
+ section->info.error_type = RUNTIME;
+ section->info.ms_chk_bits.err_type_valid = 1;
+ section->ctx.reg_ctx_type = CPER_CTX_TYPE_CRASH;
+ section->ctx.reg_arr_size = sizeof(section->ctx.reg_dump);
+
+ memcpy(section->ctx.reg_dump, reg_dump, reg_count * sizeof(uint32_t));
+
+ __inc_entry_length(hdr, SEC_DESC_LEN + NONSTD_SEC_LEN);
+
+ return 0;
+}
+
+int amdgpu_cper_entry_fill_bad_page_threshold_section(struct amdgpu_device *adev,
+ struct cper_hdr *hdr,
+ uint32_t idx)
+{
+ struct cper_sec_desc *section_desc;
+ struct cper_sec_nonstd_err *section;
+ uint32_t socket_id;
+
+ section_desc = (struct cper_sec_desc *)((uint8_t *)hdr + SEC_DESC_OFFSET(idx));
+ section = (struct cper_sec_nonstd_err *)((uint8_t *)hdr +
+ NONSTD_SEC_OFFSET(hdr->sec_cnt, idx));
+
+ amdgpu_cper_entry_fill_section_desc(adev, section_desc, true, false,
+ CPER_SEV_FATAL, RUNTIME, NONSTD_SEC_LEN,
+ NONSTD_SEC_OFFSET(hdr->sec_cnt, idx));
+
+ section->hdr.valid_bits.err_info_cnt = 1;
+ section->hdr.valid_bits.err_context_cnt = 1;
+
+ section->info.error_type = RUNTIME;
+ section->info.valid_bits.ms_chk = 1;
+ section->info.ms_chk_bits.err_type_valid = 1;
+ section->info.ms_chk_bits.err_type = 1;
+ section->info.ms_chk_bits.pcc = 1;
+ section->ctx.reg_ctx_type = CPER_CTX_TYPE_CRASH;
+ section->ctx.reg_arr_size = sizeof(section->ctx.reg_dump);
+
+ /* Hardcoded Reg dump for bad page threshold CPER */
+ socket_id = (adev->smuio.funcs && adev->smuio.funcs->get_socket_id) ?
+ adev->smuio.funcs->get_socket_id(adev) :
+ 0;
+ section->ctx.reg_dump[CPER_ACA_REG_CTL_LO] = 0x1;
+ section->ctx.reg_dump[CPER_ACA_REG_CTL_HI] = 0x0;
+ section->ctx.reg_dump[CPER_ACA_REG_STATUS_LO] = 0x137;
+ section->ctx.reg_dump[CPER_ACA_REG_STATUS_HI] = 0xB0000000;
+ section->ctx.reg_dump[CPER_ACA_REG_ADDR_LO] = 0x0;
+ section->ctx.reg_dump[CPER_ACA_REG_ADDR_HI] = 0x0;
+ section->ctx.reg_dump[CPER_ACA_REG_MISC0_LO] = 0x0;
+ section->ctx.reg_dump[CPER_ACA_REG_MISC0_HI] = 0x0;
+ section->ctx.reg_dump[CPER_ACA_REG_CONFIG_LO] = 0x2;
+ section->ctx.reg_dump[CPER_ACA_REG_CONFIG_HI] = 0x1ff;
+ section->ctx.reg_dump[CPER_ACA_REG_IPID_LO] = (socket_id / 4) & 0x01;
+ section->ctx.reg_dump[CPER_ACA_REG_IPID_HI] = 0x096 | (((socket_id % 4) & 0x3) << 12);
+ section->ctx.reg_dump[CPER_ACA_REG_SYND_LO] = 0x0;
+ section->ctx.reg_dump[CPER_ACA_REG_SYND_HI] = 0x0;
+
+ __inc_entry_length(hdr, SEC_DESC_LEN + NONSTD_SEC_LEN);
+
+ return 0;
+}
+
+struct cper_hdr *amdgpu_cper_alloc_entry(struct amdgpu_device *adev,
+ enum amdgpu_cper_type type,
+ uint16_t section_count)
+{
+ struct cper_hdr *hdr;
+ uint32_t size = 0;
+
+ size += HDR_LEN;
+ size += (SEC_DESC_LEN * section_count);
+
+ switch (type) {
+ case AMDGPU_CPER_TYPE_RUNTIME:
+ case AMDGPU_CPER_TYPE_BP_THRESHOLD:
+ size += (NONSTD_SEC_LEN * section_count);
+ break;
+ case AMDGPU_CPER_TYPE_FATAL:
+ size += (FATAL_SEC_LEN * section_count);
+ break;
+ case AMDGPU_CPER_TYPE_BOOT:
+ size += (BOOT_SEC_LEN * section_count);
+ break;
+ default:
+ dev_err(adev->dev, "Unknown CPER Type!\n");
+ return NULL;
+ }
+
+ hdr = kzalloc(size, GFP_KERNEL);
+ if (!hdr)
+ return NULL;
+
+ /* Save this early */
+ hdr->sec_cnt = section_count;
+
+ return hdr;
+}
+
+int amdgpu_cper_generate_ue_record(struct amdgpu_device *adev,
+ struct aca_bank *bank)
+{
+ struct cper_hdr *fatal = NULL;
+ struct cper_sec_crashdump_reg_data reg_data = { 0 };
+ struct amdgpu_ring *ring = &adev->cper.ring_buf;
+ int ret;
+
+ fatal = amdgpu_cper_alloc_entry(adev, AMDGPU_CPER_TYPE_FATAL, 1);
+ if (!fatal) {
+ dev_err(adev->dev, "fail to alloc cper entry for ue record\n");
+ return -ENOMEM;
+ }
+
+ reg_data.status_lo = lower_32_bits(bank->regs[ACA_REG_IDX_STATUS]);
+ reg_data.status_hi = upper_32_bits(bank->regs[ACA_REG_IDX_STATUS]);
+ reg_data.addr_lo = lower_32_bits(bank->regs[ACA_REG_IDX_ADDR]);
+ reg_data.addr_hi = upper_32_bits(bank->regs[ACA_REG_IDX_ADDR]);
+ reg_data.ipid_lo = lower_32_bits(bank->regs[ACA_REG_IDX_IPID]);
+ reg_data.ipid_hi = upper_32_bits(bank->regs[ACA_REG_IDX_IPID]);
+ reg_data.synd_lo = lower_32_bits(bank->regs[ACA_REG_IDX_SYND]);
+ reg_data.synd_hi = upper_32_bits(bank->regs[ACA_REG_IDX_SYND]);
+
+ amdgpu_cper_entry_fill_hdr(adev, fatal, AMDGPU_CPER_TYPE_FATAL, CPER_SEV_FATAL);
+ ret = amdgpu_cper_entry_fill_fatal_section(adev, fatal, 0, reg_data);
+ if (ret)
+ return ret;
+
+ amdgpu_cper_ring_write(ring, fatal, fatal->record_length);
+ kfree(fatal);
+
+ return 0;
+}
+
+int amdgpu_cper_generate_bp_threshold_record(struct amdgpu_device *adev)
+{
+ struct cper_hdr *bp_threshold = NULL;
+ struct amdgpu_ring *ring = &adev->cper.ring_buf;
+ int ret;
+
+ bp_threshold = amdgpu_cper_alloc_entry(adev, AMDGPU_CPER_TYPE_BP_THRESHOLD, 1);
+ if (!bp_threshold) {
+ dev_err(adev->dev, "fail to alloc cper entry for bad page threshold record\n");
+ return -ENOMEM;
+ }
+
+ amdgpu_cper_entry_fill_hdr(adev, bp_threshold,
+ AMDGPU_CPER_TYPE_BP_THRESHOLD,
+ CPER_SEV_FATAL);
+ ret = amdgpu_cper_entry_fill_bad_page_threshold_section(adev, bp_threshold, 0);
+ if (ret)
+ return ret;
+
+ amdgpu_cper_ring_write(ring, bp_threshold, bp_threshold->record_length);
+ kfree(bp_threshold);
+
+ return 0;
+}
+
+static enum cper_error_severity amdgpu_aca_err_type_to_cper_sev(struct amdgpu_device *adev,
+ enum aca_error_type aca_err_type)
+{
+ switch (aca_err_type) {
+ case ACA_ERROR_TYPE_UE:
+ return CPER_SEV_FATAL;
+ case ACA_ERROR_TYPE_CE:
+ return CPER_SEV_NON_FATAL_CORRECTED;
+ case ACA_ERROR_TYPE_DEFERRED:
+ return CPER_SEV_NON_FATAL_UNCORRECTED;
+ default:
+ dev_err(adev->dev, "Unknown ACA error type!\n");
+ return CPER_SEV_FATAL;
+ }
+}
+
+int amdgpu_cper_generate_ce_records(struct amdgpu_device *adev,
+ struct aca_banks *banks,
+ uint16_t bank_count)
+{
+ struct cper_hdr *corrected = NULL;
+ enum cper_error_severity sev = CPER_SEV_NON_FATAL_CORRECTED;
+ struct amdgpu_ring *ring = &adev->cper.ring_buf;
+ uint32_t reg_data[CPER_ACA_REG_COUNT] = { 0 };
+ struct aca_bank_node *node;
+ struct aca_bank *bank;
+ uint32_t i = 0;
+ int ret;
+
+ corrected = amdgpu_cper_alloc_entry(adev, AMDGPU_CPER_TYPE_RUNTIME, bank_count);
+ if (!corrected) {
+ dev_err(adev->dev, "fail to allocate cper entry for ce records\n");
+ return -ENOMEM;
+ }
+
+ /* Raise severity if any DE is detected in the ACA bank list */
+ list_for_each_entry(node, &banks->list, node) {
+ bank = &node->bank;
+ if (bank->aca_err_type == ACA_ERROR_TYPE_DEFERRED) {
+ sev = CPER_SEV_NON_FATAL_UNCORRECTED;
+ break;
+ }
+ }
+
+ amdgpu_cper_entry_fill_hdr(adev, corrected, AMDGPU_CPER_TYPE_RUNTIME, sev);
+
+ /* Combine CE and DE in cper record */
+ list_for_each_entry(node, &banks->list, node) {
+ bank = &node->bank;
+ reg_data[CPER_ACA_REG_CTL_LO] = lower_32_bits(bank->regs[ACA_REG_IDX_CTL]);
+ reg_data[CPER_ACA_REG_CTL_HI] = upper_32_bits(bank->regs[ACA_REG_IDX_CTL]);
+ reg_data[CPER_ACA_REG_STATUS_LO] = lower_32_bits(bank->regs[ACA_REG_IDX_STATUS]);
+ reg_data[CPER_ACA_REG_STATUS_HI] = upper_32_bits(bank->regs[ACA_REG_IDX_STATUS]);
+ reg_data[CPER_ACA_REG_ADDR_LO] = lower_32_bits(bank->regs[ACA_REG_IDX_ADDR]);
+ reg_data[CPER_ACA_REG_ADDR_HI] = upper_32_bits(bank->regs[ACA_REG_IDX_ADDR]);
+ reg_data[CPER_ACA_REG_MISC0_LO] = lower_32_bits(bank->regs[ACA_REG_IDX_MISC0]);
+ reg_data[CPER_ACA_REG_MISC0_HI] = upper_32_bits(bank->regs[ACA_REG_IDX_MISC0]);
+ reg_data[CPER_ACA_REG_CONFIG_LO] = lower_32_bits(bank->regs[ACA_REG_IDX_CONFIG]);
+ reg_data[CPER_ACA_REG_CONFIG_HI] = upper_32_bits(bank->regs[ACA_REG_IDX_CONFIG]);
+ reg_data[CPER_ACA_REG_IPID_LO] = lower_32_bits(bank->regs[ACA_REG_IDX_IPID]);
+ reg_data[CPER_ACA_REG_IPID_HI] = upper_32_bits(bank->regs[ACA_REG_IDX_IPID]);
+ reg_data[CPER_ACA_REG_SYND_LO] = lower_32_bits(bank->regs[ACA_REG_IDX_SYND]);
+ reg_data[CPER_ACA_REG_SYND_HI] = upper_32_bits(bank->regs[ACA_REG_IDX_SYND]);
+
+ ret = amdgpu_cper_entry_fill_runtime_section(adev, corrected, i++,
+ amdgpu_aca_err_type_to_cper_sev(adev, bank->aca_err_type),
+ reg_data, CPER_ACA_REG_COUNT);
+ if (ret)
+ return ret;
+ }
+
+ amdgpu_cper_ring_write(ring, corrected, corrected->record_length);
+ kfree(corrected);
+
+ return 0;
+}
+
+static bool amdgpu_cper_is_hdr(struct amdgpu_ring *ring, u64 pos)
+{
+ struct cper_hdr *chdr;
+
+ chdr = (struct cper_hdr *)&(ring->ring[pos]);
+ return strcmp(chdr->signature, "CPER") ? false : true;
+}
+
+static u32 amdgpu_cper_ring_get_ent_sz(struct amdgpu_ring *ring, u64 pos)
+{
+ struct cper_hdr *chdr;
+ u64 p;
+ u32 chunk, rec_len = 0;
+
+ chdr = (struct cper_hdr *)&(ring->ring[pos]);
+ chunk = ring->ring_size - (pos << 2);
+
+ if (!strcmp(chdr->signature, "CPER")) {
+ rec_len = chdr->record_length;
+ goto calc;
+ }
+
+ /* ring buffer is not full, no cper data after ring->wptr */
+ if (ring->count_dw)
+ goto calc;
+
+ for (p = pos + 1; p <= ring->buf_mask; p++) {
+ chdr = (struct cper_hdr *)&(ring->ring[p]);
+ if (!strcmp(chdr->signature, "CPER")) {
+ rec_len = (p - pos) << 2;
+ goto calc;
+ }
+ }
+
+calc:
+ if (!rec_len)
+ return chunk;
+ else
+ return umin(rec_len, chunk);
+}
+
+void amdgpu_cper_ring_write(struct amdgpu_ring *ring, void *src, int count)
+{
+ u64 pos, wptr_old, rptr;
+ int rec_cnt_dw = count >> 2;
+ u32 chunk, ent_sz;
+ u8 *s = (u8 *)src;
+
+ if (count >= ring->ring_size - 4) {
+ dev_err(ring->adev->dev,
+ "CPER data size(%d) is larger than ring size(%d)\n",
+ count, ring->ring_size - 4);
+
+ return;
+ }
+
+ mutex_lock(&ring->adev->cper.ring_lock);
+
+ wptr_old = ring->wptr;
+ rptr = *ring->rptr_cpu_addr & ring->ptr_mask;
+
+ while (count) {
+ ent_sz = amdgpu_cper_ring_get_ent_sz(ring, ring->wptr);
+ chunk = umin(ent_sz, count);
+
+ memcpy(&ring->ring[ring->wptr], s, chunk);
+
+ ring->wptr += (chunk >> 2);
+ ring->wptr &= ring->ptr_mask;
+ count -= chunk;
+ s += chunk;
+ }
+
+ if (ring->count_dw < rec_cnt_dw)
+ ring->count_dw = 0;
+
+ /* the buffer is overflow, adjust rptr */
+ if (((wptr_old < rptr) && (rptr <= ring->wptr)) ||
+ ((ring->wptr < wptr_old) && (wptr_old < rptr)) ||
+ ((rptr <= ring->wptr) && (ring->wptr < wptr_old))) {
+ pos = (ring->wptr + 1) & ring->ptr_mask;
+
+ do {
+ ent_sz = amdgpu_cper_ring_get_ent_sz(ring, pos);
+
+ rptr += (ent_sz >> 2);
+ rptr &= ring->ptr_mask;
+ *ring->rptr_cpu_addr = rptr;
+
+ pos = rptr;
+ } while (!amdgpu_cper_is_hdr(ring, rptr));
+ }
+
+ if (ring->count_dw >= rec_cnt_dw)
+ ring->count_dw -= rec_cnt_dw;
+ mutex_unlock(&ring->adev->cper.ring_lock);
+}
+
+static u64 amdgpu_cper_ring_get_rptr(struct amdgpu_ring *ring)
+{
+ return *(ring->rptr_cpu_addr);
+}
+
+static u64 amdgpu_cper_ring_get_wptr(struct amdgpu_ring *ring)
+{
+ return ring->wptr;
+}
+
+static const struct amdgpu_ring_funcs cper_ring_funcs = {
+ .type = AMDGPU_RING_TYPE_CPER,
+ .align_mask = 0xff,
+ .support_64bit_ptrs = false,
+ .get_rptr = amdgpu_cper_ring_get_rptr,
+ .get_wptr = amdgpu_cper_ring_get_wptr,
+};
+
+static int amdgpu_cper_ring_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring = &(adev->cper.ring_buf);
+
+ mutex_init(&adev->cper.ring_lock);
+
+ ring->adev = NULL;
+ ring->ring_obj = NULL;
+ ring->use_doorbell = false;
+ ring->no_scheduler = true;
+ ring->funcs = &cper_ring_funcs;
+
+ sprintf(ring->name, "cper");
+ return amdgpu_ring_init(adev, ring, CPER_MAX_RING_SIZE, NULL, 0,
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
+}
+
+int amdgpu_cper_init(struct amdgpu_device *adev)
+{
+ int r;
+
+ if (!amdgpu_aca_is_enabled(adev) && !amdgpu_sriov_ras_cper_en(adev))
+ return 0;
+
+ r = amdgpu_cper_ring_init(adev);
+ if (r) {
+ dev_err(adev->dev, "failed to initialize cper ring, r = %d\n", r);
+ return r;
+ }
+
+ mutex_init(&adev->cper.cper_lock);
+
+ adev->cper.enabled = true;
+ adev->cper.max_count = CPER_MAX_ALLOWED_COUNT;
+
+ return 0;
+}
+
+int amdgpu_cper_fini(struct amdgpu_device *adev)
+{
+ if (!amdgpu_aca_is_enabled(adev) && !amdgpu_sriov_ras_cper_en(adev))
+ return 0;
+
+ adev->cper.enabled = false;
+
+ amdgpu_ring_fini(&(adev->cper.ring_buf));
+ adev->cper.count = 0;
+ adev->cper.wptr = 0;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h
new file mode 100644
index 000000000000..bcb97d245673
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_CPER_H__
+#define __AMDGPU_CPER_H__
+
+#include "amd_cper.h"
+#include "amdgpu_aca.h"
+
+#define CPER_MAX_ALLOWED_COUNT 0x1000
+#define CPER_MAX_RING_SIZE 0X100000
+#define HDR_LEN (sizeof(struct cper_hdr))
+#define SEC_DESC_LEN (sizeof(struct cper_sec_desc))
+
+#define BOOT_SEC_LEN (sizeof(struct cper_sec_crashdump_boot))
+#define FATAL_SEC_LEN (sizeof(struct cper_sec_crashdump_fatal))
+#define NONSTD_SEC_LEN (sizeof(struct cper_sec_nonstd_err))
+
+#define SEC_DESC_OFFSET(idx) (HDR_LEN + (SEC_DESC_LEN * idx))
+
+#define BOOT_SEC_OFFSET(count, idx) (HDR_LEN + (SEC_DESC_LEN * count) + (BOOT_SEC_LEN * idx))
+#define FATAL_SEC_OFFSET(count, idx) (HDR_LEN + (SEC_DESC_LEN * count) + (FATAL_SEC_LEN * idx))
+#define NONSTD_SEC_OFFSET(count, idx) (HDR_LEN + (SEC_DESC_LEN * count) + (NONSTD_SEC_LEN * idx))
+
+enum amdgpu_cper_type {
+ AMDGPU_CPER_TYPE_RUNTIME,
+ AMDGPU_CPER_TYPE_FATAL,
+ AMDGPU_CPER_TYPE_BOOT,
+ AMDGPU_CPER_TYPE_BP_THRESHOLD,
+};
+
+struct amdgpu_cper {
+ bool enabled;
+
+ atomic_t unique_id;
+ struct mutex cper_lock;
+
+ /* Lifetime CPERs generated */
+ uint32_t count;
+ uint32_t max_count;
+
+ uint32_t wptr;
+
+ void *ring[CPER_MAX_ALLOWED_COUNT];
+ struct amdgpu_ring ring_buf;
+ struct mutex ring_lock;
+};
+
+void amdgpu_cper_entry_fill_hdr(struct amdgpu_device *adev,
+ struct cper_hdr *hdr,
+ enum amdgpu_cper_type type,
+ enum cper_error_severity sev);
+int amdgpu_cper_entry_fill_fatal_section(struct amdgpu_device *adev,
+ struct cper_hdr *hdr,
+ uint32_t idx,
+ struct cper_sec_crashdump_reg_data reg_data);
+int amdgpu_cper_entry_fill_runtime_section(struct amdgpu_device *adev,
+ struct cper_hdr *hdr,
+ uint32_t idx,
+ enum cper_error_severity sev,
+ uint32_t *reg_dump,
+ uint32_t reg_count);
+int amdgpu_cper_entry_fill_bad_page_threshold_section(struct amdgpu_device *adev,
+ struct cper_hdr *hdr,
+ uint32_t section_idx);
+
+struct cper_hdr *amdgpu_cper_alloc_entry(struct amdgpu_device *adev,
+ enum amdgpu_cper_type type,
+ uint16_t section_count);
+/* UE must be encoded into separated cper entries, 1 UE 1 cper */
+int amdgpu_cper_generate_ue_record(struct amdgpu_device *adev,
+ struct aca_bank *bank);
+/* CEs and DEs are combined into 1 cper entry */
+int amdgpu_cper_generate_ce_records(struct amdgpu_device *adev,
+ struct aca_banks *banks,
+ uint16_t bank_count);
+/* Bad page threshold is encoded into separated cper entry */
+int amdgpu_cper_generate_bp_threshold_record(struct amdgpu_device *adev);
+void amdgpu_cper_ring_write(struct amdgpu_ring *ring,
+ void *src, int count);
+int amdgpu_cper_init(struct amdgpu_device *adev);
+int amdgpu_cper_fini(struct amdgpu_device *adev);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index d891ab779ca7..9cd7741d2254 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -29,6 +29,7 @@
#include <linux/pagemap.h>
#include <linux/sync_file.h>
#include <linux/dma-buf.h>
+#include <linux/hmm.h>
#include <drm/amdgpu_drm.h>
#include <drm/drm_syncobj.h>
@@ -178,25 +179,17 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
unsigned int num_ibs[AMDGPU_CS_GANG_SIZE] = { };
struct amdgpu_vm *vm = &fpriv->vm;
- uint64_t *chunk_array_user;
uint64_t *chunk_array;
uint32_t uf_offset = 0;
size_t size;
int ret;
int i;
- chunk_array = kvmalloc_array(cs->in.num_chunks, sizeof(uint64_t),
- GFP_KERNEL);
- if (!chunk_array)
- return -ENOMEM;
-
- /* get chunks */
- chunk_array_user = u64_to_user_ptr(cs->in.chunks);
- if (copy_from_user(chunk_array, chunk_array_user,
- sizeof(uint64_t)*cs->in.num_chunks)) {
- ret = -EFAULT;
- goto free_chunk;
- }
+ chunk_array = memdup_array_user(u64_to_user_ptr(cs->in.chunks),
+ cs->in.num_chunks,
+ sizeof(uint64_t));
+ if (IS_ERR(chunk_array))
+ return PTR_ERR(chunk_array);
p->nchunks = cs->in.num_chunks;
p->chunks = kvmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
@@ -209,7 +202,6 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
for (i = 0; i < p->nchunks; i++) {
struct drm_amdgpu_cs_chunk __user *chunk_ptr = NULL;
struct drm_amdgpu_cs_chunk user_chunk;
- uint32_t __user *cdata;
chunk_ptr = u64_to_user_ptr(chunk_array[i]);
if (copy_from_user(&user_chunk, chunk_ptr,
@@ -222,20 +214,16 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
p->chunks[i].length_dw = user_chunk.length_dw;
size = p->chunks[i].length_dw;
- cdata = u64_to_user_ptr(user_chunk.chunk_data);
- p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t),
- GFP_KERNEL);
- if (p->chunks[i].kdata == NULL) {
- ret = -ENOMEM;
+ p->chunks[i].kdata = vmemdup_array_user(u64_to_user_ptr(user_chunk.chunk_data),
+ size,
+ sizeof(uint32_t));
+ if (IS_ERR(p->chunks[i].kdata)) {
+ ret = PTR_ERR(p->chunks[i].kdata);
i--;
goto free_partial_kdata;
}
size *= sizeof(uint32_t);
- if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
- ret = -EFAULT;
- goto free_partial_kdata;
- }
/* Assume the worst on the following checks */
ret = -EINVAL;
@@ -286,17 +274,36 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
}
}
- if (!p->gang_size) {
+ if (!p->gang_size || (amdgpu_sriov_vf(p->adev) && p->gang_size > 1)) {
ret = -EINVAL;
goto free_all_kdata;
}
for (i = 0; i < p->gang_size; ++i) {
ret = amdgpu_job_alloc(p->adev, vm, p->entities[i], vm,
- num_ibs[i], &p->jobs[i]);
+ num_ibs[i], &p->jobs[i],
+ p->filp->client_id);
if (ret)
goto free_all_kdata;
- p->jobs[i]->enforce_isolation = p->adev->enforce_isolation[fpriv->xcp_id];
+ switch (p->adev->enforce_isolation[fpriv->xcp_id]) {
+ case AMDGPU_ENFORCE_ISOLATION_DISABLE:
+ default:
+ p->jobs[i]->enforce_isolation = false;
+ p->jobs[i]->run_cleaner_shader = false;
+ break;
+ case AMDGPU_ENFORCE_ISOLATION_ENABLE:
+ p->jobs[i]->enforce_isolation = true;
+ p->jobs[i]->run_cleaner_shader = true;
+ break;
+ case AMDGPU_ENFORCE_ISOLATION_ENABLE_LEGACY:
+ p->jobs[i]->enforce_isolation = true;
+ p->jobs[i]->run_cleaner_shader = false;
+ break;
+ case AMDGPU_ENFORCE_ISOLATION_NO_CLEANER_SHADER:
+ p->jobs[i]->enforce_isolation = true;
+ p->jobs[i]->run_cleaner_shader = false;
+ break;
+ }
}
p->gang_leader = p->jobs[p->gang_leader_idx];
@@ -349,6 +356,10 @@ static int amdgpu_cs_p2_ib(struct amdgpu_cs_parser *p,
ring = amdgpu_job_ring(job);
ib = &job->ibs[job->num_ibs++];
+ /* submissions to kernel queues are disabled */
+ if (ring->no_user_submission)
+ return -EINVAL;
+
/* MM engine doesn't support user fences */
if (p->uf_bo && ring->funcs->no_user_fence)
return -EINVAL;
@@ -373,7 +384,7 @@ static int amdgpu_cs_p2_ib(struct amdgpu_cs_parser *p,
chunk_ib->ib_bytes : 0,
AMDGPU_IB_POOL_DELAYED, ib);
if (r) {
- DRM_ERROR("Failed to get ib !\n");
+ drm_err(adev_to_drm(p->adev), "Failed to get ib !\n");
return r;
}
@@ -428,7 +439,7 @@ static int amdgpu_cs_p2_dependencies(struct amdgpu_cs_parser *p,
dma_fence_put(old);
}
- r = amdgpu_sync_fence(&p->sync, fence);
+ r = amdgpu_sync_fence(&p->sync, fence, GFP_KERNEL);
dma_fence_put(fence);
if (r)
return r;
@@ -445,12 +456,12 @@ static int amdgpu_syncobj_lookup_and_add(struct amdgpu_cs_parser *p,
r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence);
if (r) {
- DRM_ERROR("syncobj %u failed to find fence @ %llu (%d)!\n",
+ drm_err(adev_to_drm(p->adev), "syncobj %u failed to find fence @ %llu (%d)!\n",
handle, point, r);
return r;
}
- r = amdgpu_sync_fence(&p->sync, fence);
+ r = amdgpu_sync_fence(&p->sync, fence, GFP_KERNEL);
dma_fence_put(fence);
return r;
}
@@ -873,26 +884,13 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
bool userpage_invalidated = false;
struct amdgpu_bo *bo = e->bo;
- int i;
-
- e->user_pages = kvcalloc(bo->tbo.ttm->num_pages,
- sizeof(struct page *),
- GFP_KERNEL);
- if (!e->user_pages) {
- DRM_ERROR("kvmalloc_array failure\n");
- r = -ENOMEM;
- goto out_free_user_pages;
- }
- r = amdgpu_ttm_tt_get_user_pages(bo, e->user_pages, &e->range);
- if (r) {
- kvfree(e->user_pages);
- e->user_pages = NULL;
+ r = amdgpu_ttm_tt_get_user_pages(bo, &e->range);
+ if (r)
goto out_free_user_pages;
- }
for (i = 0; i < bo->tbo.ttm->num_pages; i++) {
- if (bo->tbo.ttm->pages[i] != e->user_pages[i]) {
+ if (bo->tbo.ttm->pages[i] != hmm_pfn_to_page(e->range->hmm_pfns[i])) {
userpage_invalidated = true;
break;
}
@@ -936,7 +934,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
}
if (amdgpu_ttm_tt_is_userptr(e->bo->tbo.ttm) &&
- e->user_invalidated && e->user_pages) {
+ e->user_invalidated) {
amdgpu_bo_placement_from_domain(e->bo,
AMDGPU_GEM_DOMAIN_CPU);
r = ttm_bo_validate(&e->bo->tbo, &e->bo->placement,
@@ -945,11 +943,8 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
goto out_free_user_pages;
amdgpu_ttm_tt_set_user_pages(e->bo->tbo.ttm,
- e->user_pages);
+ e->range);
}
-
- kvfree(e->user_pages);
- e->user_pages = NULL;
}
amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
@@ -960,7 +955,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
r = amdgpu_vm_validate(p->adev, &fpriv->vm, NULL,
amdgpu_cs_bo_validate, p);
if (r) {
- DRM_ERROR("amdgpu_vm_validate() failed.\n");
+ drm_err(adev_to_drm(p->adev), "amdgpu_vm_validate() failed.\n");
goto out_free_user_pages;
}
@@ -991,11 +986,7 @@ out_free_user_pages:
amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
struct amdgpu_bo *bo = e->bo;
- if (!e->user_pages)
- continue;
amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, e->range);
- kvfree(e->user_pages);
- e->user_pages = NULL;
e->range = NULL;
}
mutex_unlock(&p->bo_list->bo_list_mutex);
@@ -1038,13 +1029,13 @@ static int amdgpu_cs_patch_ibs(struct amdgpu_cs_parser *p,
va_start = ib->gpu_addr & AMDGPU_GMC_HOLE_MASK;
r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m);
if (r) {
- DRM_ERROR("IB va_start is invalid\n");
+ drm_err(adev_to_drm(p->adev), "IB va_start is invalid\n");
return r;
}
if ((va_start + ib->length_dw * 4) >
(m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
- DRM_ERROR("IB va_start+ib_bytes is invalid\n");
+ drm_err(adev_to_drm(p->adev), "IB va_start+ib_bytes is invalid\n");
return -EINVAL;
}
@@ -1105,17 +1096,20 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
* We can't use gang submit on with reserved VMIDs when the VM changes
* can't be invalidated by more than one engine at the same time.
*/
- if (p->gang_size > 1 && !p->adev->vm_manager.concurrent_flush) {
+ if (p->gang_size > 1 && !adev->vm_manager.concurrent_flush) {
for (i = 0; i < p->gang_size; ++i) {
struct drm_sched_entity *entity = p->entities[i];
struct drm_gpu_scheduler *sched = entity->rq->sched;
struct amdgpu_ring *ring = to_amdgpu_ring(sched);
- if (amdgpu_vmid_uses_reserved(adev, vm, ring->vm_hub))
+ if (amdgpu_vmid_uses_reserved(vm, ring->vm_hub))
return -EINVAL;
}
}
+ if (!amdgpu_vm_ready(vm))
+ return -EINVAL;
+
r = amdgpu_vm_clear_freed(adev, vm, NULL);
if (r)
return r;
@@ -1124,7 +1118,8 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (r)
return r;
- r = amdgpu_sync_fence(&p->sync, fpriv->prt_va->last_pt_update);
+ r = amdgpu_sync_fence(&p->sync, fpriv->prt_va->last_pt_update,
+ GFP_KERNEL);
if (r)
return r;
@@ -1135,7 +1130,8 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (r)
return r;
- r = amdgpu_sync_fence(&p->sync, bo_va->last_pt_update);
+ r = amdgpu_sync_fence(&p->sync, bo_va->last_pt_update,
+ GFP_KERNEL);
if (r)
return r;
}
@@ -1154,7 +1150,8 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (r)
return r;
- r = amdgpu_sync_fence(&p->sync, bo_va->last_pt_update);
+ r = amdgpu_sync_fence(&p->sync, bo_va->last_pt_update,
+ GFP_KERNEL);
if (r)
return r;
}
@@ -1167,7 +1164,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (r)
return r;
- r = amdgpu_sync_fence(&p->sync, vm->last_update);
+ r = amdgpu_sync_fence(&p->sync, vm->last_update, GFP_KERNEL);
if (r)
return r;
@@ -1189,7 +1186,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (!bo)
continue;
- amdgpu_vm_bo_invalidate(adev, bo, false);
+ amdgpu_vm_bo_invalidate(bo, false);
}
}
@@ -1209,7 +1206,7 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entities[p->gang_leader_idx]);
if (r) {
if (r != -ERESTARTSYS)
- DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n");
+ drm_err(adev_to_drm(p->adev), "amdgpu_ctx_wait_prev_fence failed.\n");
return r;
}
@@ -1248,7 +1245,8 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
continue;
}
- r = amdgpu_sync_fence(&p->gang_leader->explicit_sync, fence);
+ r = amdgpu_sync_fence(&p->gang_leader->explicit_sync, fence,
+ GFP_KERNEL);
dma_fence_put(fence);
if (r)
return r;
@@ -1421,7 +1419,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
r = amdgpu_cs_parser_init(&parser, adev, filp, data);
if (r) {
- DRM_ERROR_RATELIMITED("Failed to initialize parser %d!\n", r);
+ drm_err_ratelimited(dev, "Failed to initialize parser %d!\n", r);
return r;
}
@@ -1436,9 +1434,9 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
r = amdgpu_cs_parser_bos(&parser, data);
if (r) {
if (r == -ENOMEM)
- DRM_ERROR("Not enough memory for command submission!\n");
+ drm_err(dev, "Not enough memory for command submission!\n");
else if (r != -ERESTARTSYS && r != -EAGAIN)
- DRM_DEBUG("Failed to process the buffer list %d!\n", r);
+ drm_dbg(dev, "Failed to process the buffer list %d!\n", r);
goto error_fini;
}
@@ -1737,30 +1735,21 @@ int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
{
struct amdgpu_device *adev = drm_to_adev(dev);
union drm_amdgpu_wait_fences *wait = data;
- uint32_t fence_count = wait->in.fence_count;
- struct drm_amdgpu_fence *fences_user;
struct drm_amdgpu_fence *fences;
int r;
/* Get the fences from userspace */
- fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
- GFP_KERNEL);
- if (fences == NULL)
- return -ENOMEM;
-
- fences_user = u64_to_user_ptr(wait->in.fences);
- if (copy_from_user(fences, fences_user,
- sizeof(struct drm_amdgpu_fence) * fence_count)) {
- r = -EFAULT;
- goto err_free_fences;
- }
+ fences = memdup_array_user(u64_to_user_ptr(wait->in.fences),
+ wait->in.fence_count,
+ sizeof(struct drm_amdgpu_fence));
+ if (IS_ERR(fences))
+ return PTR_ERR(fences);
if (wait->in.wait_all)
r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences);
else
r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences);
-err_free_fences:
kfree(fences);
return r;
@@ -1801,13 +1790,18 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->exec.ticket)
return -EINVAL;
+ /* Make sure VRAM is allocated contigiously */
(*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
- amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains);
- for (i = 0; i < (*bo)->placement.num_placement; i++)
- (*bo)->placements[i].flags |= TTM_PL_FLAG_CONTIGUOUS;
- r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
- if (r)
- return r;
+ if ((*bo)->tbo.resource->mem_type == TTM_PL_VRAM &&
+ !((*bo)->tbo.resource->placement & TTM_PL_FLAG_CONTIGUOUS)) {
+
+ amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains);
+ for (i = 0; i < (*bo)->placement.num_placement; i++)
+ (*bo)->placements[i].flags |= TTM_PL_FLAG_CONTIGUOUS;
+ r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
+ if (r)
+ return r;
+ }
return amdgpu_ttm_alloc_gart(&(*bo)->tbo);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
index cfdf558b48b6..02138aa55793 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
@@ -109,7 +109,7 @@ int amdgpu_unmap_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct drm_exec exec;
int r;
- drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
+ drm_exec_init(&exec, 0, 0);
drm_exec_until_all_locked(&exec) {
r = amdgpu_vm_lock_pd(vm, &exec, 0);
if (likely(!r))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index c43d1b6e5d66..f5d5c45ddc0d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -919,7 +919,7 @@ long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout)
return timeout;
}
-void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
+static void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
{
struct amdgpu_ctx *ctx;
struct idr *idp;
@@ -944,24 +944,13 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
drm_sched_entity_fini(entity);
}
}
+ kref_put(&ctx->refcount, amdgpu_ctx_fini);
}
}
void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
{
- struct amdgpu_ctx *ctx;
- struct idr *idp;
- uint32_t id;
-
amdgpu_ctx_mgr_entity_fini(mgr);
-
- idp = &mgr->ctx_handles;
-
- idr_for_each_entry(idp, ctx, id) {
- if (kref_put(&ctx->refcount, amdgpu_ctx_fini) != 1)
- DRM_ERROR("ctx %p is still alive\n", ctx);
- }
-
idr_destroy(&mgr->ctx_handles);
mutex_destroy(&mgr->lock);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
index 85376baaa92f..090dfe86f75b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
@@ -92,7 +92,6 @@ int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr,
struct amdgpu_device *adev);
-void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr);
long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout);
void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
void amdgpu_ctx_mgr_usage(struct amdgpu_ctx_mgr *mgr,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index cbef720de779..a70651050acf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -402,7 +402,7 @@ static ssize_t amdgpu_debugfs_gprwave_read(struct file *f, char __user *buf, siz
int r;
uint32_t *data, x;
- if (size & 0x3 || *pos & 0x3)
+ if (size > 4096 || size & 0x3 || *pos & 0x3)
return -EINVAL;
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
@@ -1648,7 +1648,7 @@ int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
ent = debugfs_create_file(debugfs_regs_names[i],
- S_IFREG | 0444, root,
+ S_IFREG | 0400, root,
adev, debugfs_regs[i]);
if (!i && !IS_ERR_OR_NULL(ent))
i_size_write(ent->d_inode, adev->rmmio_size);
@@ -1786,7 +1786,7 @@ static int amdgpu_debugfs_vm_info_show(struct seq_file *m, void *unused)
ti = amdgpu_vm_get_task_info_vm(vm);
if (ti) {
- seq_printf(m, "pid:%d\tProcess:%s ----------\n", ti->pid, ti->process_name);
+ seq_printf(m, "pid:%d\tProcess:%s ----------\n", ti->task.pid, ti->process_name);
amdgpu_vm_put_task_info(ti);
}
@@ -1902,7 +1902,7 @@ no_preempt:
continue;
}
job = to_amdgpu_job(s_job);
- if (preempted && (&job->hw_fence) == fence)
+ if (preempted && (&job->hw_fence.base) == fence)
/* mark the job as preempted */
job->preemption_status |= AMDGPU_IB_PREEMPTED;
}
@@ -1990,7 +1990,7 @@ static int amdgpu_debugfs_sclk_set(void *data, u64 val)
uint32_t max_freq, min_freq;
struct amdgpu_device *adev = (struct amdgpu_device *)data;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ if (amdgpu_sriov_multi_vf_mode(adev))
return -EINVAL;
ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
@@ -2095,16 +2095,23 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
if (amdgpu_umsch_mm & amdgpu_umsch_mm_fwlog)
amdgpu_debugfs_umsch_fwlog_init(adev, &adev->umsch_mm);
+ amdgpu_debugfs_vcn_sched_mask_init(adev);
+ amdgpu_debugfs_jpeg_sched_mask_init(adev);
+ amdgpu_debugfs_gfx_sched_mask_init(adev);
+ amdgpu_debugfs_compute_sched_mask_init(adev);
+ amdgpu_debugfs_sdma_sched_mask_init(adev);
+
amdgpu_ras_debugfs_create_all(adev);
amdgpu_rap_debugfs_init(adev);
amdgpu_securedisplay_debugfs_init(adev);
amdgpu_fw_attestation_debugfs_init(adev);
+ amdgpu_psp_debugfs_init(adev);
- debugfs_create_file("amdgpu_evict_vram", 0444, root, adev,
+ debugfs_create_file("amdgpu_evict_vram", 0400, root, adev,
&amdgpu_evict_vram_fops);
- debugfs_create_file("amdgpu_evict_gtt", 0444, root, adev,
+ debugfs_create_file("amdgpu_evict_gtt", 0400, root, adev,
&amdgpu_evict_gtt_fops);
- debugfs_create_file("amdgpu_test_ib", 0444, root, adev,
+ debugfs_create_file("amdgpu_test_ib", 0400, root, adev,
&amdgpu_debugfs_test_ib_fops);
debugfs_create_file("amdgpu_vm_info", 0444, root, adev,
&amdgpu_debugfs_vm_info_fops);
@@ -2124,6 +2131,61 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
return 0;
}
+static int amdgpu_pt_info_read(struct seq_file *m, void *unused)
+{
+ struct drm_file *file;
+ struct amdgpu_fpriv *fpriv;
+ struct amdgpu_bo *root_bo;
+ struct amdgpu_device *adev;
+ int r;
+
+ file = m->private;
+ if (!file)
+ return -EINVAL;
+
+ adev = drm_to_adev(file->minor->dev);
+ fpriv = file->driver_priv;
+ if (!fpriv || !fpriv->vm.root.bo)
+ return -ENODEV;
+
+ root_bo = amdgpu_bo_ref(fpriv->vm.root.bo);
+ r = amdgpu_bo_reserve(root_bo, true);
+ if (r) {
+ amdgpu_bo_unref(&root_bo);
+ return -EINVAL;
+ }
+
+ seq_printf(m, "pd_address: 0x%llx\n", amdgpu_gmc_pd_addr(fpriv->vm.root.bo));
+ seq_printf(m, "max_pfn: 0x%llx\n", adev->vm_manager.max_pfn);
+ seq_printf(m, "num_level: 0x%x\n", adev->vm_manager.num_level);
+ seq_printf(m, "block_size: 0x%x\n", adev->vm_manager.block_size);
+ seq_printf(m, "fragment_size: 0x%x\n", adev->vm_manager.fragment_size);
+
+ amdgpu_bo_unreserve(root_bo);
+ amdgpu_bo_unref(&root_bo);
+
+ return 0;
+}
+
+static int amdgpu_pt_info_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, amdgpu_pt_info_read, inode->i_private);
+}
+
+static const struct file_operations amdgpu_pt_info_fops = {
+ .owner = THIS_MODULE,
+ .open = amdgpu_pt_info_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void amdgpu_debugfs_vm_init(struct drm_file *file)
+{
+ debugfs_create_file("vm_pagetable_info", 0444, file->debugfs_client, file,
+ &amdgpu_pt_info_fops);
+}
+
#else
int amdgpu_debugfs_init(struct amdgpu_device *adev)
{
@@ -2133,4 +2195,7 @@ int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
{
return 0;
}
+void amdgpu_debugfs_vm_init(struct drm_file *file)
+{
+}
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
index 0425432d8659..e7b3c38e5186 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
@@ -33,4 +33,5 @@ void amdgpu_debugfs_fence_init(struct amdgpu_device *adev);
void amdgpu_debugfs_firmware_init(struct amdgpu_device *adev);
void amdgpu_debugfs_gem_init(struct amdgpu_device *adev);
void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev);
+void amdgpu_debugfs_vm_init(struct drm_file *file);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
index 5ac59b62020c..8a026bc9ea44 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
@@ -203,6 +203,7 @@ amdgpu_devcoredump_read(char *buffer, loff_t offset, size_t count,
struct amdgpu_coredump_info *coredump = data;
struct drm_print_iterator iter;
struct amdgpu_vm_fault_info *fault_info;
+ struct amdgpu_ip_block *ip_block;
int ver;
iter.data = buffer;
@@ -219,10 +220,10 @@ amdgpu_devcoredump_read(char *buffer, loff_t offset, size_t count,
drm_printf(&p, "time: %lld.%09ld\n", coredump->reset_time.tv_sec,
coredump->reset_time.tv_nsec);
- if (coredump->reset_task_info.pid)
+ if (coredump->reset_task_info.task.pid)
drm_printf(&p, "process_name: %s PID: %d\n",
coredump->reset_task_info.process_name,
- coredump->reset_task_info.pid);
+ coredump->reset_task_info.task.pid);
/* SOC Information */
drm_printf(&p, "\nSOC Information\n");
@@ -282,13 +283,10 @@ amdgpu_devcoredump_read(char *buffer, loff_t offset, size_t count,
/* dump the ip state for each ip */
drm_printf(&p, "IP Dump\n");
for (int i = 0; i < coredump->adev->num_ip_blocks; i++) {
- if (coredump->adev->ip_blocks[i].version->funcs->print_ip_state) {
- drm_printf(&p, "IP: %s\n",
- coredump->adev->ip_blocks[i]
- .version->funcs->name);
- coredump->adev->ip_blocks[i]
- .version->funcs->print_ip_state(
- (void *)coredump->adev, &p);
+ ip_block = &coredump->adev->ip_blocks[i];
+ if (ip_block->version->funcs->print_ip_state) {
+ drm_printf(&p, "IP: %s\n", ip_block->version->funcs->name);
+ ip_block->version->funcs->print_ip_state(ip_block, &p);
drm_printf(&p, "\n");
}
}
@@ -345,11 +343,10 @@ void amdgpu_coredump(struct amdgpu_device *adev, bool skip_vram_check,
coredump->skip_vram_check = skip_vram_check;
coredump->reset_vram_lost = vram_lost;
- if (job && job->vm) {
- struct amdgpu_vm *vm = job->vm;
+ if (job && job->pasid) {
struct amdgpu_task_info *ti;
- ti = amdgpu_vm_get_task_info_vm(vm);
+ ti = amdgpu_vm_get_task_info_pasid(adev, job->pasid);
if (ti) {
coredump->reset_task_info = *ti;
amdgpu_vm_put_task_info(ti);
@@ -367,5 +364,9 @@ void amdgpu_coredump(struct amdgpu_device *adev, bool skip_vram_check,
dev_coredumpm(dev->dev, THIS_MODULE, coredump, 0, GFP_NOWAIT,
amdgpu_devcoredump_read, amdgpu_devcoredump_free);
+
+ drm_info(dev, "AMDGPU device coredump file has been created\n");
+ drm_info(dev, "Check your /sys/class/drm/card%d/device/devcoredump/data\n",
+ dev->primary->index);
}
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index c2394c8b4d6b..7a899fb4de29 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -25,6 +25,8 @@
* Alex Deucher
* Jerome Glisse
*/
+
+#include <linux/aperture.h>
#include <linux/power_supply.h>
#include <linux/kthread.h>
#include <linux/module.h>
@@ -35,10 +37,9 @@
#include <linux/pci-p2pdma.h>
#include <linux/apple-gmux.h>
-#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_client_event.h>
#include <drm/drm_crtc_helper.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/amdgpu_drm.h>
#include <linux/device.h>
@@ -84,6 +85,7 @@
#if IS_ENABLED(CONFIG_X86)
#include <asm/intel-family.h>
+#include <asm/cpu_device_id.h>
#endif
MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
@@ -93,6 +95,7 @@ MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
+MODULE_FIRMWARE("amdgpu/cyan_skillfish_gpu_info.bin");
#define AMDGPU_RESUME_MS 2000
#define AMDGPU_MAX_RETRY_LIMIT 2
@@ -101,6 +104,9 @@ MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
#define AMDGPU_PCIE_INDEX_HI_FALLBACK (0x44 >> 2)
#define AMDGPU_PCIE_DATA_FALLBACK (0x3C >> 2)
+#define AMDGPU_VBIOS_SKIP (1U << 0)
+#define AMDGPU_VBIOS_OPTIONAL (1U << 1)
+
static const struct drm_driver amdgpu_kms_driver;
const char *amdgpu_asic_name[] = {
@@ -144,15 +150,72 @@ const char *amdgpu_asic_name[] = {
"LAST",
};
+#define AMDGPU_IP_BLK_MASK_ALL GENMASK(AMD_IP_BLOCK_TYPE_NUM - 1, 0)
+/*
+ * Default init level where all blocks are expected to be initialized. This is
+ * the level of initialization expected by default and also after a full reset
+ * of the device.
+ */
+struct amdgpu_init_level amdgpu_init_default = {
+ .level = AMDGPU_INIT_LEVEL_DEFAULT,
+ .hwini_ip_block_mask = AMDGPU_IP_BLK_MASK_ALL,
+};
+
+struct amdgpu_init_level amdgpu_init_recovery = {
+ .level = AMDGPU_INIT_LEVEL_RESET_RECOVERY,
+ .hwini_ip_block_mask = AMDGPU_IP_BLK_MASK_ALL,
+};
+
+/*
+ * Minimal blocks needed to be initialized before a XGMI hive can be reset. This
+ * is used for cases like reset on initialization where the entire hive needs to
+ * be reset before first use.
+ */
+struct amdgpu_init_level amdgpu_init_minimal_xgmi = {
+ .level = AMDGPU_INIT_LEVEL_MINIMAL_XGMI,
+ .hwini_ip_block_mask =
+ BIT(AMD_IP_BLOCK_TYPE_GMC) | BIT(AMD_IP_BLOCK_TYPE_SMC) |
+ BIT(AMD_IP_BLOCK_TYPE_COMMON) | BIT(AMD_IP_BLOCK_TYPE_IH) |
+ BIT(AMD_IP_BLOCK_TYPE_PSP)
+};
+
+static void amdgpu_device_load_switch_state(struct amdgpu_device *adev);
+
+static inline bool amdgpu_ip_member_of_hwini(struct amdgpu_device *adev,
+ enum amd_ip_block_type block)
+{
+ return (adev->init_lvl->hwini_ip_block_mask & (1U << block)) != 0;
+}
+
+void amdgpu_set_init_level(struct amdgpu_device *adev,
+ enum amdgpu_init_lvl_id lvl)
+{
+ switch (lvl) {
+ case AMDGPU_INIT_LEVEL_MINIMAL_XGMI:
+ adev->init_lvl = &amdgpu_init_minimal_xgmi;
+ break;
+ case AMDGPU_INIT_LEVEL_RESET_RECOVERY:
+ adev->init_lvl = &amdgpu_init_recovery;
+ break;
+ case AMDGPU_INIT_LEVEL_DEFAULT:
+ fallthrough;
+ default:
+ adev->init_lvl = &amdgpu_init_default;
+ break;
+ }
+}
+
static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev);
+static int amdgpu_device_pm_notifier(struct notifier_block *nb, unsigned long mode,
+ void *data);
/**
* DOC: pcie_replay_count
*
* The amdgpu driver provides a sysfs API for reporting the total number
- * of PCIe replays (NAKs)
+ * of PCIe replays (NAKs).
* The file pcie_replay_count is used for this and returns the total
- * number of replays as a sum of the NAKs generated and NAKs received
+ * number of replays as a sum of the NAKs generated and NAKs received.
*/
static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
@@ -168,8 +231,26 @@ static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
static DEVICE_ATTR(pcie_replay_count, 0444,
amdgpu_device_get_pcie_replay_count, NULL);
+static int amdgpu_device_attr_sysfs_init(struct amdgpu_device *adev)
+{
+ int ret = 0;
+
+ if (amdgpu_nbio_is_replay_cnt_supported(adev))
+ ret = sysfs_create_file(&adev->dev->kobj,
+ &dev_attr_pcie_replay_count.attr);
+
+ return ret;
+}
+
+static void amdgpu_device_attr_sysfs_fini(struct amdgpu_device *adev)
+{
+ if (amdgpu_nbio_is_replay_cnt_supported(adev))
+ sysfs_remove_file(&adev->dev->kobj,
+ &dev_attr_pcie_replay_count.attr);
+}
+
static ssize_t amdgpu_sysfs_reg_state_get(struct file *f, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t ppos, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -205,8 +286,8 @@ static ssize_t amdgpu_sysfs_reg_state_get(struct file *f, struct kobject *kobj,
return bytes_read;
}
-BIN_ATTR(reg_state, 0444, amdgpu_sysfs_reg_state_get, NULL,
- AMDGPU_SYS_REG_STATE_END);
+static const BIN_ATTR(reg_state, 0444, amdgpu_sysfs_reg_state_get, NULL,
+ AMDGPU_SYS_REG_STATE_END);
int amdgpu_reg_state_sysfs_init(struct amdgpu_device *adev)
{
@@ -227,6 +308,42 @@ void amdgpu_reg_state_sysfs_fini(struct amdgpu_device *adev)
sysfs_remove_bin_file(&adev->dev->kobj, &bin_attr_reg_state);
}
+int amdgpu_ip_block_suspend(struct amdgpu_ip_block *ip_block)
+{
+ int r;
+
+ if (ip_block->version->funcs->suspend) {
+ r = ip_block->version->funcs->suspend(ip_block);
+ if (r) {
+ dev_err(ip_block->adev->dev,
+ "suspend of IP block <%s> failed %d\n",
+ ip_block->version->funcs->name, r);
+ return r;
+ }
+ }
+
+ ip_block->status.hw = false;
+ return 0;
+}
+
+int amdgpu_ip_block_resume(struct amdgpu_ip_block *ip_block)
+{
+ int r;
+
+ if (ip_block->version->funcs->resume) {
+ r = ip_block->version->funcs->resume(ip_block);
+ if (r) {
+ dev_err(ip_block->adev->dev,
+ "resume of IP block <%s> failed %d\n",
+ ip_block->version->funcs->name, r);
+ return r;
+ }
+ }
+
+ ip_block->status.hw = true;
+ return 0;
+}
+
/**
* DOC: board_info
*
@@ -297,19 +414,16 @@ static const struct attribute_group amdgpu_board_attrs_group = {
static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
-
/**
* amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
*
- * @dev: drm_device pointer
+ * @adev: amdgpu device pointer
*
* Returns true if the device is a dGPU with ATPX power control,
* otherwise return false.
*/
-bool amdgpu_device_supports_px(struct drm_device *dev)
+bool amdgpu_device_supports_px(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = drm_to_adev(dev);
-
if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
return true;
return false;
@@ -318,14 +432,15 @@ bool amdgpu_device_supports_px(struct drm_device *dev)
/**
* amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
*
- * @dev: drm_device pointer
+ * @adev: amdgpu device pointer
*
* Returns true if the device is a dGPU with ACPI power control,
* otherwise return false.
*/
-bool amdgpu_device_supports_boco(struct drm_device *dev)
+bool amdgpu_device_supports_boco(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = drm_to_adev(dev);
+ if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE))
+ return false;
if (adev->has_pr3 ||
((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
@@ -336,29 +451,24 @@ bool amdgpu_device_supports_boco(struct drm_device *dev)
/**
* amdgpu_device_supports_baco - Does the device support BACO
*
- * @dev: drm_device pointer
+ * @adev: amdgpu device pointer
*
* Return:
- * 1 if the device supporte BACO;
- * 3 if the device support MACO (only works if BACO is supported)
+ * 1 if the device supports BACO;
+ * 3 if the device supports MACO (only works if BACO is supported)
* otherwise return 0.
*/
-int amdgpu_device_supports_baco(struct drm_device *dev)
+int amdgpu_device_supports_baco(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = drm_to_adev(dev);
-
return amdgpu_asic_supports_baco(adev);
}
void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev)
{
- struct drm_device *dev;
int bamaco_support;
- dev = adev_to_drm(adev);
-
adev->pm.rpm_mode = AMDGPU_RUNPM_NONE;
- bamaco_support = amdgpu_device_supports_baco(dev);
+ bamaco_support = amdgpu_device_supports_baco(adev);
switch (amdgpu_runtime_pm) {
case 2:
@@ -378,10 +488,12 @@ void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev)
break;
case -1:
case -2:
- if (amdgpu_device_supports_px(dev)) { /* enable PX as runtime mode */
+ if (amdgpu_device_supports_px(adev)) {
+ /* enable PX as runtime mode */
adev->pm.rpm_mode = AMDGPU_RUNPM_PX;
dev_info(adev->dev, "Using ATPX for runtime pm\n");
- } else if (amdgpu_device_supports_boco(dev)) { /* enable boco as runtime mode */
+ } else if (amdgpu_device_supports_boco(adev)) {
+ /* enable boco as runtime mode */
adev->pm.rpm_mode = AMDGPU_RUNPM_BOCO;
dev_info(adev->dev, "Using BOCO for runtime pm\n");
} else {
@@ -395,12 +507,13 @@ void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev)
break;
case CHIP_VEGA10:
/* enable BACO as runpm mode if noretry=0 */
- if (!adev->gmc.noretry)
+ if (!adev->gmc.noretry && !amdgpu_passthrough(adev))
adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
break;
default:
/* enable BACO as runpm mode on CI+ */
- adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
+ if (!amdgpu_passthrough(adev))
+ adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
break;
}
@@ -429,14 +542,14 @@ no_runtime_pm:
* amdgpu_device_supports_smart_shift - Is the device dGPU with
* smart shift support
*
- * @dev: drm_device pointer
+ * @adev: amdgpu device pointer
*
* Returns true if the device is a dGPU with Smart Shift support,
* otherwise returns false.
*/
-bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
+bool amdgpu_device_supports_smart_shift(struct amdgpu_device *adev)
{
- return (amdgpu_device_supports_boco(dev) &&
+ return (amdgpu_device_supports_boco(adev) &&
amdgpu_acpi_is_power_shift_control_supported());
}
@@ -487,7 +600,7 @@ void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
}
/**
- * amdgpu_device_aper_access - access vram by vram aperature
+ * amdgpu_device_aper_access - access vram by vram aperture
*
* @adev: amdgpu_device pointer
* @pos: offset of the buffer in vram
@@ -578,7 +691,7 @@ bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
* here is that the GPU reset is not running on another thread in parallel.
*
* For this we trylock the read side of the reset semaphore, if that succeeds
- * we know that the reset is not running in paralell.
+ * we know that the reset is not running in parallel.
*
* If the trylock fails we assert that we are either already holding the read
* side of the lock or are the reset thread itself and hold the write side of
@@ -1170,14 +1283,14 @@ u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev)
*/
static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
{
- DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
+ dev_err(adev->dev, "Invalid callback to read register 0x%04X\n", reg);
BUG();
return 0;
}
static uint32_t amdgpu_invalid_rreg_ext(struct amdgpu_device *adev, uint64_t reg)
{
- DRM_ERROR("Invalid callback to read register 0x%llX\n", reg);
+ dev_err(adev->dev, "Invalid callback to read register 0x%llX\n", reg);
BUG();
return 0;
}
@@ -1194,15 +1307,17 @@ static uint32_t amdgpu_invalid_rreg_ext(struct amdgpu_device *adev, uint64_t reg
*/
static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
{
- DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
- reg, v);
+ dev_err(adev->dev,
+ "Invalid callback to write register 0x%04X with 0x%08X\n", reg,
+ v);
BUG();
}
static void amdgpu_invalid_wreg_ext(struct amdgpu_device *adev, uint64_t reg, uint32_t v)
{
- DRM_ERROR("Invalid callback to write register 0x%llX with 0x%08X\n",
- reg, v);
+ dev_err(adev->dev,
+ "Invalid callback to write register 0x%llX with 0x%08X\n", reg,
+ v);
BUG();
}
@@ -1218,14 +1333,15 @@ static void amdgpu_invalid_wreg_ext(struct amdgpu_device *adev, uint64_t reg, ui
*/
static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
{
- DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
+ dev_err(adev->dev, "Invalid callback to read 64 bit register 0x%04X\n",
+ reg);
BUG();
return 0;
}
static uint64_t amdgpu_invalid_rreg64_ext(struct amdgpu_device *adev, uint64_t reg)
{
- DRM_ERROR("Invalid callback to read register 0x%llX\n", reg);
+ dev_err(adev->dev, "Invalid callback to read register 0x%llX\n", reg);
BUG();
return 0;
}
@@ -1242,15 +1358,17 @@ static uint64_t amdgpu_invalid_rreg64_ext(struct amdgpu_device *adev, uint64_t r
*/
static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
{
- DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
- reg, v);
+ dev_err(adev->dev,
+ "Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
+ reg, v);
BUG();
}
static void amdgpu_invalid_wreg64_ext(struct amdgpu_device *adev, uint64_t reg, uint64_t v)
{
- DRM_ERROR("Invalid callback to write 64 bit register 0x%llX with 0x%08llX\n",
- reg, v);
+ dev_err(adev->dev,
+ "Invalid callback to write 64 bit register 0x%llX with 0x%08llX\n",
+ reg, v);
BUG();
}
@@ -1268,8 +1386,9 @@ static void amdgpu_invalid_wreg64_ext(struct amdgpu_device *adev, uint64_t reg,
static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
uint32_t block, uint32_t reg)
{
- DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
- reg, block);
+ dev_err(adev->dev,
+ "Invalid callback to read register 0x%04X in block 0x%04X\n",
+ reg, block);
BUG();
return 0;
}
@@ -1289,11 +1408,23 @@ static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
uint32_t block,
uint32_t reg, uint32_t v)
{
- DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
- reg, block, v);
+ dev_err(adev->dev,
+ "Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
+ reg, block, v);
BUG();
}
+static uint32_t amdgpu_device_get_vbios_flags(struct amdgpu_device *adev)
+{
+ if (hweight32(adev->aid_mask) && (adev->flags & AMD_IS_APU))
+ return AMDGPU_VBIOS_SKIP;
+
+ if (hweight32(adev->aid_mask) && amdgpu_passthrough(adev))
+ return AMDGPU_VBIOS_OPTIONAL;
+
+ return 0;
+}
+
/**
* amdgpu_device_asic_init - Wrapper for atom asic_init
*
@@ -1303,17 +1434,28 @@ static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
*/
static int amdgpu_device_asic_init(struct amdgpu_device *adev)
{
+ uint32_t flags;
+ bool optional;
int ret;
amdgpu_asic_pre_asic_init(adev);
+ flags = amdgpu_device_get_vbios_flags(adev);
+ optional = !!(flags & (AMDGPU_VBIOS_OPTIONAL | AMDGPU_VBIOS_SKIP));
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0) ||
amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0)) {
amdgpu_psp_wait_for_bootloader(adev);
+ if (optional && !adev->bios)
+ return 0;
+
ret = amdgpu_atomfirmware_asic_init(adev, true);
return ret;
} else {
+ if (optional && !adev->bios)
+ return 0;
+
return amdgpu_atom_asic_init(adev->mode_info.atom_context);
}
@@ -1542,9 +1684,21 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
if (amdgpu_sriov_vf(adev))
return 0;
+ if (!amdgpu_rebar)
+ return 0;
+
+ /* resizing on Dell G5 SE platforms causes problems with runtime pm */
+ if ((amdgpu_runtime_pm != 0) &&
+ adev->pdev->vendor == PCI_VENDOR_ID_ATI &&
+ adev->pdev->device == 0x731f &&
+ adev->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
+ return 0;
+
/* PCI_EXT_CAP_ID_VNDR extended capability is located at 0x100 */
if (!pci_find_ext_capability(adev->pdev, PCI_EXT_CAP_ID_VNDR))
- DRM_WARN("System can't access extended configuration space, please check!!\n");
+ dev_warn(
+ adev->dev,
+ "System can't access extended configuration space, please check!!\n");
/* skip if the bios has already enabled large BAR */
if (adev->gmc.real_vram_size &&
@@ -1584,9 +1738,10 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
r = pci_resize_resource(adev->pdev, 0, rbar_size);
if (r == -ENOSPC)
- DRM_INFO("Not enough PCI address space for a large BAR.");
+ dev_info(adev->dev,
+ "Not enough PCI address space for a large BAR.");
else if (r && r != -ENOTSUPP)
- DRM_ERROR("Problem resizing BAR0 (%d).", r);
+ dev_err(adev->dev, "Problem resizing BAR0 (%d).", r);
pci_assign_unassigned_bus_resources(adev->pdev->bus);
@@ -1602,14 +1757,6 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
return 0;
}
-static bool amdgpu_device_read_bios(struct amdgpu_device *adev)
-{
- if (hweight32(adev->aid_mask) && (adev->flags & AMD_IS_APU))
- return false;
-
- return true;
-}
-
/*
* GPU helpers function.
*/
@@ -1624,12 +1771,15 @@ static bool amdgpu_device_read_bios(struct amdgpu_device *adev)
*/
bool amdgpu_device_need_post(struct amdgpu_device *adev)
{
- uint32_t reg;
+ uint32_t reg, flags;
if (amdgpu_sriov_vf(adev))
return false;
- if (!amdgpu_device_read_bios(adev))
+ flags = amdgpu_device_get_vbios_flags(adev);
+ if (flags & AMDGPU_VBIOS_SKIP)
+ return false;
+ if ((flags & AMDGPU_VBIOS_OPTIONAL) && !adev->bios)
return false;
if (amdgpu_passthrough(adev)) {
@@ -1643,7 +1793,7 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev)
uint32_t fw_ver;
err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
- /* force vPost if error occured */
+ /* force vPost if error occurred */
if (err)
return true;
@@ -1655,7 +1805,7 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev)
}
/* Don't post if we need to reset whole hive on init */
- if (adev->gmc.xgmi.pending_reset)
+ if (adev->init_lvl->level == AMDGPU_INIT_LEVEL_MINIMAL_XGMI)
return false;
if (adev->has_hw_reset) {
@@ -1693,8 +1843,8 @@ bool amdgpu_device_seamless_boot_supported(struct amdgpu_device *adev)
case 0:
return false;
default:
- DRM_ERROR("Invalid value for amdgpu.seamless: %d\n",
- amdgpu_seamless);
+ dev_err(adev->dev, "Invalid value for amdgpu.seamless: %d\n",
+ amdgpu_seamless);
return false;
}
@@ -1730,6 +1880,35 @@ static bool amdgpu_device_pcie_dynamic_switching_supported(struct amdgpu_device
return true;
}
+static bool amdgpu_device_aspm_support_quirk(struct amdgpu_device *adev)
+{
+#if IS_ENABLED(CONFIG_X86)
+ struct cpuinfo_x86 *c = &cpu_data(0);
+
+ if (!(amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(12, 0, 0) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(12, 0, 1)))
+ return false;
+
+ if (c->x86 == 6 &&
+ adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5) {
+ switch (c->x86_model) {
+ case VFM_MODEL(INTEL_ALDERLAKE):
+ case VFM_MODEL(INTEL_ALDERLAKE_L):
+ case VFM_MODEL(INTEL_RAPTORLAKE):
+ case VFM_MODEL(INTEL_RAPTORLAKE_P):
+ case VFM_MODEL(INTEL_RAPTORLAKE_S):
+ return true;
+ default:
+ return false;
+ }
+ } else {
+ return false;
+ }
+#else
+ return false;
+#endif
+}
+
/**
* amdgpu_device_should_use_aspm - check if the device should program ASPM
*
@@ -1754,7 +1933,7 @@ bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev)
}
if (adev->flags & AMD_IS_APU)
return false;
- if (!(adev->pm.pp_feature & PP_PCIE_DPM_MASK))
+ if (amdgpu_device_aspm_support_quirk(adev))
return false;
return pcie_aspm_enabled(adev->pdev);
}
@@ -1841,7 +2020,7 @@ static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
return;
if (!is_os_64) {
- DRM_WARN("Not 64-bit OS, feature not supported\n");
+ dev_warn(adev->dev, "Not 64-bit OS, feature not supported\n");
goto def_value;
}
si_meminfo(&si);
@@ -1856,7 +2035,7 @@ static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
if (total_memory < dram_size_seven_GB)
goto def_value1;
} else {
- DRM_WARN("Smu memory pool size not supported\n");
+ dev_warn(adev->dev, "Smu memory pool size not supported\n");
goto def_value;
}
adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
@@ -1864,7 +2043,7 @@ static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
return;
def_value1:
- DRM_WARN("No enough system memory\n");
+ dev_warn(adev->dev, "No enough system memory\n");
def_value:
adev->pm.smu_prv_buffer_size = 0;
}
@@ -1972,8 +2151,31 @@ static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
- for (i = 0; i < MAX_XCP; i++)
- adev->enforce_isolation[i] = !!enforce_isolation;
+ for (i = 0; i < MAX_XCP; i++) {
+ switch (amdgpu_enforce_isolation) {
+ case -1:
+ case 0:
+ default:
+ /* disable */
+ adev->enforce_isolation[i] = AMDGPU_ENFORCE_ISOLATION_DISABLE;
+ break;
+ case 1:
+ /* enable */
+ adev->enforce_isolation[i] =
+ AMDGPU_ENFORCE_ISOLATION_ENABLE;
+ break;
+ case 2:
+ /* enable legacy mode */
+ adev->enforce_isolation[i] =
+ AMDGPU_ENFORCE_ISOLATION_ENABLE_LEGACY;
+ break;
+ case 3:
+ /* enable only process isolation without submitting cleaner shader */
+ adev->enforce_isolation[i] =
+ AMDGPU_ENFORCE_ISOLATION_NO_CLEANER_SHADER;
+ break;
+ }
+ }
return 0;
}
@@ -1993,7 +2195,8 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
struct drm_device *dev = pci_get_drvdata(pdev);
int r;
- if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
+ if (amdgpu_device_supports_px(drm_to_adev(dev)) &&
+ state == VGA_SWITCHEROO_OFF)
return;
if (state == VGA_SWITCHEROO_ON) {
@@ -2005,12 +2208,13 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
amdgpu_device_load_pci_state(pdev);
r = pci_enable_device(pdev);
if (r)
- DRM_WARN("pci_enable_device failed (%d)\n", r);
+ dev_warn(&pdev->dev, "pci_enable_device failed (%d)\n",
+ r);
amdgpu_device_resume(dev, true);
dev->switch_power_state = DRM_SWITCH_POWER_ON;
} else {
- pr_info("switched off\n");
+ dev_info(&pdev->dev, "switched off\n");
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
amdgpu_device_prepare(dev);
amdgpu_device_suspend(dev, true);
@@ -2075,10 +2279,11 @@ int amdgpu_device_ip_set_clockgating_state(void *dev,
if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
continue;
r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
- (void *)adev, state);
+ &adev->ip_blocks[i], state);
if (r)
- DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_err(adev->dev,
+ "set_clockgating_state of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
}
return r;
}
@@ -2109,10 +2314,11 @@ int amdgpu_device_ip_set_powergating_state(void *dev,
if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
continue;
r = adev->ip_blocks[i].version->funcs->set_powergating_state(
- (void *)adev, state);
+ &adev->ip_blocks[i], state);
if (r)
- DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_err(adev->dev,
+ "set_powergating_state of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
}
return r;
}
@@ -2137,7 +2343,8 @@ void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
if (!adev->ip_blocks[i].status.valid)
continue;
if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
- adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
+ adev->ip_blocks[i].version->funcs->get_clockgating_state(
+ &adev->ip_blocks[i], flags);
}
}
@@ -2159,9 +2366,12 @@ int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
if (!adev->ip_blocks[i].status.valid)
continue;
if (adev->ip_blocks[i].version->type == block_type) {
- r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
- if (r)
- return r;
+ if (adev->ip_blocks[i].version->funcs->wait_for_idle) {
+ r = adev->ip_blocks[i].version->funcs->wait_for_idle(
+ &adev->ip_blocks[i]);
+ if (r)
+ return r;
+ }
break;
}
}
@@ -2170,26 +2380,24 @@ int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
}
/**
- * amdgpu_device_ip_is_idle - is the hardware IP idle
+ * amdgpu_device_ip_is_valid - is the hardware IP enabled
*
* @adev: amdgpu_device pointer
* @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
*
- * Check if the hardware IP is idle or not.
- * Returns true if it the IP is idle, false if not.
+ * Check if the hardware IP is enable or not.
+ * Returns true if it the IP is enable, false if not.
*/
-bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
- enum amd_ip_block_type block_type)
+bool amdgpu_device_ip_is_valid(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type)
{
int i;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_blocks[i].status.valid)
- continue;
if (adev->ip_blocks[i].version->type == block_type)
- return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
+ return adev->ip_blocks[i].status.valid;
}
- return true;
+ return false;
}
@@ -2240,6 +2448,33 @@ int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
return 1;
}
+static const char *ip_block_names[] = {
+ [AMD_IP_BLOCK_TYPE_COMMON] = "common",
+ [AMD_IP_BLOCK_TYPE_GMC] = "gmc",
+ [AMD_IP_BLOCK_TYPE_IH] = "ih",
+ [AMD_IP_BLOCK_TYPE_SMC] = "smu",
+ [AMD_IP_BLOCK_TYPE_PSP] = "psp",
+ [AMD_IP_BLOCK_TYPE_DCE] = "dce",
+ [AMD_IP_BLOCK_TYPE_GFX] = "gfx",
+ [AMD_IP_BLOCK_TYPE_SDMA] = "sdma",
+ [AMD_IP_BLOCK_TYPE_UVD] = "uvd",
+ [AMD_IP_BLOCK_TYPE_VCE] = "vce",
+ [AMD_IP_BLOCK_TYPE_ACP] = "acp",
+ [AMD_IP_BLOCK_TYPE_VCN] = "vcn",
+ [AMD_IP_BLOCK_TYPE_MES] = "mes",
+ [AMD_IP_BLOCK_TYPE_JPEG] = "jpeg",
+ [AMD_IP_BLOCK_TYPE_VPE] = "vpe",
+ [AMD_IP_BLOCK_TYPE_UMSCH_MM] = "umsch_mm",
+ [AMD_IP_BLOCK_TYPE_ISP] = "isp",
+};
+
+static const char *ip_block_name(struct amdgpu_device *adev, enum amd_ip_block_type type)
+{
+ int idx = (int)type;
+
+ return idx < ARRAY_SIZE(ip_block_names) ? ip_block_names[idx] : "unknown";
+}
+
/**
* amdgpu_device_ip_block_add
*
@@ -2268,8 +2503,15 @@ int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
break;
}
- DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
- ip_block_version->funcs->name);
+ dev_info(adev->dev, "detected ip block number %d <%s_v%d_%d_%d> (%s)\n",
+ adev->num_ip_blocks,
+ ip_block_name(adev, ip_block_version->type),
+ ip_block_version->major,
+ ip_block_version->minor,
+ ip_block_version->rev,
+ ip_block_version->funcs->name);
+
+ adev->ip_blocks[adev->num_ip_blocks].adev = adev;
adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
@@ -2285,7 +2527,7 @@ int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
* the module parameter virtual_display. This feature provides a virtual
* display hardware on headless boards or in virtualized environments.
* This function parses and validates the configuration string specified by
- * the user and configues the virtual display configuration (number of
+ * the user and configures the virtual display configuration (number of
* virtual connectors, crtcs, etc.) specified.
*/
static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
@@ -2324,9 +2566,11 @@ static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
}
}
- DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
- amdgpu_virtual_display, pci_address_name,
- adev->enable_virtual_display, adev->mode_info.num_crtc);
+ dev_info(
+ adev->dev,
+ "virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
+ amdgpu_virtual_display, pci_address_name,
+ adev->enable_virtual_display, adev->mode_info.num_crtc);
kfree(pciaddstr);
}
@@ -2337,8 +2581,9 @@ void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev)
if (amdgpu_sriov_vf(adev) && !adev->enable_virtual_display) {
adev->mode_info.num_crtc = 1;
adev->enable_virtual_display = true;
- DRM_INFO("virtual_display:%d, num_crtc:%d\n",
- adev->enable_virtual_display, adev->mode_info.num_crtc);
+ dev_info(adev->dev, "virtual_display:%d, num_crtc:%d\n",
+ adev->enable_virtual_display,
+ adev->mode_info.num_crtc);
}
}
@@ -2348,7 +2593,7 @@ void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev)
* @adev: amdgpu_device pointer
*
* Parses the asic configuration parameters specified in the gpu info
- * firmware and makes them availale to the driver for use in configuring
+ * firmware and makes them available to the driver for use in configuring
* the asic.
* Returns 0 on success, -EINVAL on failure.
*/
@@ -2360,9 +2605,6 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
adev->firmware.gpu_info_fw = NULL;
- if (adev->mman.discovery_bin)
- return 0;
-
switch (adev->asic_type) {
default:
return 0;
@@ -2384,11 +2626,17 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
chip_name = "arcturus";
break;
case CHIP_NAVI12:
+ if (adev->mman.discovery_bin)
+ return 0;
chip_name = "navi12";
break;
+ case CHIP_CYAN_SKILLFISH:
+ chip_name = "cyan_skillfish";
+ break;
}
err = amdgpu_ucode_request(adev, &adev->firmware.gpu_info_fw,
+ AMDGPU_UCODE_OPTIONAL,
"amdgpu/%s_gpu_info.bin", chip_name);
if (err) {
dev_err(adev->dev,
@@ -2408,7 +2656,7 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
/*
- * Should be droped when DAL no longer needs it.
+ * Should be dropped when DAL no longer needs it.
*/
if (adev->asic_type == CHIP_NAVI12)
goto parse_soc_bounding_box;
@@ -2464,6 +2712,24 @@ out:
return err;
}
+static void amdgpu_uid_init(struct amdgpu_device *adev)
+{
+ /* Initialize the UID for the device */
+ adev->uid_info = kzalloc(sizeof(struct amdgpu_uid), GFP_KERNEL);
+ if (!adev->uid_info) {
+ dev_warn(adev->dev, "Failed to allocate memory for UID\n");
+ return;
+ }
+ adev->uid_info->adev = adev;
+}
+
+static void amdgpu_uid_fini(struct amdgpu_device *adev)
+{
+ /* Free the UID memory */
+ kfree(adev->uid_info);
+ adev->uid_info = NULL;
+}
+
/**
* amdgpu_device_ip_early_init - run early init for hardware IPs
*
@@ -2478,8 +2744,9 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
{
struct amdgpu_ip_block *ip_block;
struct pci_dev *parent;
+ bool total, skip_bios;
+ uint32_t bios_flags;
int i, r;
- bool total;
amdgpu_device_enable_virtual_display(adev);
@@ -2543,6 +2810,13 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
break;
}
+ /* Check for IP version 9.4.3 with A0 hardware */
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) &&
+ !amdgpu_device_get_rev_id(adev)) {
+ dev_err(adev->dev, "Unsupported A0 hardware\n");
+ return -ENODEV; /* device unsupported - no device error */
+ }
+
if (amdgpu_has_atpx() &&
(amdgpu_is_atpx_hybrid() ||
amdgpu_has_atpx_dgpu_power_cntl()) &&
@@ -2555,7 +2829,6 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
}
-
adev->pm.pp_feature = amdgpu_pp_feature_mask;
if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
@@ -2564,27 +2837,35 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
if (!amdgpu_device_pcie_dynamic_switching_supported(adev))
adev->pm.pp_feature &= ~PP_PCIE_DPM_MASK;
+ adev->virt.is_xgmi_node_migrate_enabled = false;
+ if (amdgpu_sriov_vf(adev)) {
+ adev->virt.is_xgmi_node_migrate_enabled =
+ amdgpu_ip_version((adev), GC_HWIP, 0) == IP_VERSION(9, 4, 4);
+ }
+
total = true;
for (i = 0; i < adev->num_ip_blocks; i++) {
+ ip_block = &adev->ip_blocks[i];
+
if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
- DRM_WARN("disabled ip block: %d <%s>\n",
- i, adev->ip_blocks[i].version->funcs->name);
+ dev_warn(adev->dev, "disabled ip block: %d <%s>\n", i,
+ adev->ip_blocks[i].version->funcs->name);
adev->ip_blocks[i].status.valid = false;
- } else {
- if (adev->ip_blocks[i].version->funcs->early_init) {
- r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
- if (r == -ENOENT) {
- adev->ip_blocks[i].status.valid = false;
- } else if (r) {
- DRM_ERROR("early_init of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
- total = false;
- } else {
- adev->ip_blocks[i].status.valid = true;
- }
+ } else if (ip_block->version->funcs->early_init) {
+ r = ip_block->version->funcs->early_init(ip_block);
+ if (r == -ENOENT) {
+ adev->ip_blocks[i].status.valid = false;
+ } else if (r) {
+ dev_err(adev->dev,
+ "early_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name,
+ r);
+ total = false;
} else {
adev->ip_blocks[i].status.valid = true;
}
+ } else {
+ adev->ip_blocks[i].status.valid = true;
}
/* get the vbios after the asic_funcs are set up */
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
@@ -2592,16 +2873,31 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
if (r)
return r;
+ bios_flags = amdgpu_device_get_vbios_flags(adev);
+ skip_bios = !!(bios_flags & AMDGPU_VBIOS_SKIP);
/* Read BIOS */
- if (amdgpu_device_read_bios(adev)) {
- if (!amdgpu_get_bios(adev))
+ if (!skip_bios) {
+ bool optional =
+ !!(bios_flags & AMDGPU_VBIOS_OPTIONAL);
+ if (!amdgpu_get_bios(adev) && !optional)
return -EINVAL;
- r = amdgpu_atombios_init(adev);
- if (r) {
- dev_err(adev->dev, "amdgpu_atombios_init failed\n");
- amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
- return r;
+ if (optional && !adev->bios)
+ dev_info(
+ adev->dev,
+ "VBIOS image optional, proceeding without VBIOS image");
+
+ if (adev->bios) {
+ r = amdgpu_atombios_init(adev);
+ if (r) {
+ dev_err(adev->dev,
+ "amdgpu_atombios_init failed\n");
+ amdgpu_vf_error_put(
+ adev,
+ AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL,
+ 0, 0);
+ return r;
+ }
}
}
@@ -2614,6 +2910,11 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
if (!total)
return -ENODEV;
+ if (adev->gmc.xgmi.supported)
+ amdgpu_xgmi_early_init(adev);
+
+ if (amdgpu_is_multi_aid(adev))
+ amdgpu_uid_init(adev);
ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
if (ip_block->status.valid != false)
amdgpu_amdkfd_device_probe(adev);
@@ -2633,13 +2934,18 @@ static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
continue;
if (adev->ip_blocks[i].status.hw)
continue;
+ if (!amdgpu_ip_member_of_hwini(
+ adev, adev->ip_blocks[i].version->type))
+ continue;
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
(amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
- r = adev->ip_blocks[i].version->funcs->hw_init(adev);
+ r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
if (r) {
- DRM_ERROR("hw_init of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_err(adev->dev,
+ "hw_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name,
+ r);
return r;
}
adev->ip_blocks[i].status.hw = true;
@@ -2658,10 +2964,14 @@ static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
continue;
if (adev->ip_blocks[i].status.hw)
continue;
- r = adev->ip_blocks[i].version->funcs->hw_init(adev);
+ if (!amdgpu_ip_member_of_hwini(
+ adev, adev->ip_blocks[i].version->type))
+ continue;
+ r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
if (r) {
- DRM_ERROR("hw_init of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_err(adev->dev,
+ "hw_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
return r;
}
adev->ip_blocks[i].status.hw = true;
@@ -2681,6 +2991,10 @@ static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
continue;
+ if (!amdgpu_ip_member_of_hwini(adev,
+ AMD_IP_BLOCK_TYPE_PSP))
+ break;
+
if (!adev->ip_blocks[i].status.sw)
continue;
@@ -2689,22 +3003,21 @@ static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
break;
if (amdgpu_in_reset(adev) || adev->in_suspend) {
- r = adev->ip_blocks[i].version->funcs->resume(adev);
- if (r) {
- DRM_ERROR("resume of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ r = amdgpu_ip_block_resume(&adev->ip_blocks[i]);
+ if (r)
return r;
- }
} else {
- r = adev->ip_blocks[i].version->funcs->hw_init(adev);
+ r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
if (r) {
- DRM_ERROR("hw_init of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_err(adev->dev,
+ "hw_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i]
+ .version->funcs->name,
+ r);
return r;
}
+ adev->ip_blocks[i].status.hw = true;
}
-
- adev->ip_blocks[i].status.hw = true;
break;
}
}
@@ -2717,6 +3030,12 @@ static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
{
+ struct drm_sched_init_args args = {
+ .ops = &amdgpu_sched_ops,
+ .num_rqs = DRM_SCHED_PRIORITY_COUNT,
+ .timeout_wq = adev->reset_domain->wq,
+ .dev = adev->dev,
+ };
long timeout;
int r, i;
@@ -2742,32 +3061,36 @@ static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
break;
}
- r = drm_sched_init(&ring->sched, &amdgpu_sched_ops, NULL,
- DRM_SCHED_PRIORITY_COUNT,
- ring->num_hw_submission, 0,
- timeout, adev->reset_domain->wq,
- ring->sched_score, ring->name,
- adev->dev);
+ args.timeout = timeout;
+ args.credit_limit = ring->num_hw_submission;
+ args.score = ring->sched_score;
+ args.name = ring->name;
+
+ r = drm_sched_init(&ring->sched, &args);
if (r) {
- DRM_ERROR("Failed to create scheduler on ring %s.\n",
- ring->name);
+ dev_err(adev->dev,
+ "Failed to create scheduler on ring %s.\n",
+ ring->name);
return r;
}
r = amdgpu_uvd_entity_init(adev, ring);
if (r) {
- DRM_ERROR("Failed to create UVD scheduling entity on ring %s.\n",
- ring->name);
+ dev_err(adev->dev,
+ "Failed to create UVD scheduling entity on ring %s.\n",
+ ring->name);
return r;
}
r = amdgpu_vce_entity_init(adev, ring);
if (r) {
- DRM_ERROR("Failed to create VCE scheduling entity on ring %s.\n",
- ring->name);
+ dev_err(adev->dev,
+ "Failed to create VCE scheduling entity on ring %s.\n",
+ ring->name);
return r;
}
}
- amdgpu_xcp_update_partition_sched_list(adev);
+ if (adev->xcp_mgr)
+ amdgpu_xcp_update_partition_sched_list(adev);
return 0;
}
@@ -2786,6 +3109,7 @@ static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
*/
static int amdgpu_device_ip_init(struct amdgpu_device *adev)
{
+ bool init_badpage;
int i, r;
r = amdgpu_ras_init(adev);
@@ -2795,19 +3119,28 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.valid)
continue;
- r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
- if (r) {
- DRM_ERROR("sw_init of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
- goto init_failed;
+ if (adev->ip_blocks[i].version->funcs->sw_init) {
+ r = adev->ip_blocks[i].version->funcs->sw_init(&adev->ip_blocks[i]);
+ if (r) {
+ dev_err(adev->dev,
+ "sw_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name,
+ r);
+ goto init_failed;
+ }
}
adev->ip_blocks[i].status.sw = true;
+ if (!amdgpu_ip_member_of_hwini(
+ adev, adev->ip_blocks[i].version->type))
+ continue;
+
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
/* need to do common hw init early so everything is set up for gmc */
- r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
+ r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
if (r) {
- DRM_ERROR("hw_init %d failed %d\n", i, r);
+ dev_err(adev->dev, "hw_init %d failed %d\n", i,
+ r);
goto init_failed;
}
adev->ip_blocks[i].status.hw = true;
@@ -2819,17 +3152,21 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
r = amdgpu_device_mem_scratch_init(adev);
if (r) {
- DRM_ERROR("amdgpu_mem_scratch_init failed %d\n", r);
+ dev_err(adev->dev,
+ "amdgpu_mem_scratch_init failed %d\n",
+ r);
goto init_failed;
}
- r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
+ r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
if (r) {
- DRM_ERROR("hw_init %d failed %d\n", i, r);
+ dev_err(adev->dev, "hw_init %d failed %d\n", i,
+ r);
goto init_failed;
}
r = amdgpu_device_wb_init(adev);
if (r) {
- DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
+ dev_err(adev->dev,
+ "amdgpu_device_wb_init failed %d\n", r);
goto init_failed;
}
adev->ip_blocks[i].status.hw = true;
@@ -2841,14 +3178,16 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
AMDGPU_GEM_DOMAIN_GTT,
AMDGPU_CSA_SIZE);
if (r) {
- DRM_ERROR("allocate CSA failed %d\n", r);
+ dev_err(adev->dev,
+ "allocate CSA failed %d\n", r);
goto init_failed;
}
}
r = amdgpu_seq64_init(adev);
if (r) {
- DRM_ERROR("allocate seq64 failed %d\n", r);
+ dev_err(adev->dev, "allocate seq64 failed %d\n",
+ r);
goto init_failed;
}
}
@@ -2895,7 +3234,8 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
* Note: theoretically, this should be called before all vram allocations
* to protect retired page from abusing
*/
- r = amdgpu_ras_recovery_init(adev);
+ init_badpage = (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI);
+ r = amdgpu_ras_recovery_init(adev, init_badpage);
if (r)
goto init_failed;
@@ -2935,13 +3275,16 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
amdgpu_ttm_set_buffer_funcs_status(adev, true);
/* Don't init kfd if whole hive need to be reset during init */
- if (!adev->gmc.xgmi.pending_reset) {
+ if (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) {
kgd2kfd_init_zone_device(adev);
amdgpu_amdkfd_device_init(adev);
}
amdgpu_fru_get_product_info(adev);
+ if (!amdgpu_sriov_vf(adev) || amdgpu_sriov_ras_cper_en(adev))
+ r = amdgpu_cper_init(adev);
+
init_failed:
return r;
@@ -2954,7 +3297,7 @@ init_failed:
*
* Writes a reset magic value to the gart pointer in VRAM. The driver calls
* this function before a GPU reset. If the value is retained after a
- * GPU reset, VRAM has not been lost. Some GPU resets may destry VRAM contents.
+ * GPU reset, VRAM has not been lost. Some GPU resets may destroy VRAM contents.
*/
static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
{
@@ -2985,6 +3328,8 @@ static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
* always assumed to be lost.
*/
switch (amdgpu_asic_reset_method(adev)) {
+ case AMD_RESET_METHOD_LEGACY:
+ case AMD_RESET_METHOD_LINK:
case AMD_RESET_METHOD_BACO:
case AMD_RESET_METHOD_MODE1:
return true;
@@ -3030,11 +3375,13 @@ int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
adev->ip_blocks[i].version->funcs->set_clockgating_state) {
/* enable clockgating to save power */
- r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
+ r = adev->ip_blocks[i].version->funcs->set_clockgating_state(&adev->ip_blocks[i],
state);
if (r) {
- DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_err(adev->dev,
+ "set_clockgating_state(gate) of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name,
+ r);
return r;
}
}
@@ -3067,11 +3414,13 @@ int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
adev->ip_blocks[i].version->funcs->set_powergating_state) {
/* enable powergating to save power */
- r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
+ r = adev->ip_blocks[i].version->funcs->set_powergating_state(&adev->ip_blocks[i],
state);
if (r) {
- DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_err(adev->dev,
+ "set_powergating_state(gate) of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name,
+ r);
return r;
}
}
@@ -3098,7 +3447,7 @@ static int amdgpu_device_enable_mgpu_fan_boost(void)
for (i = 0; i < mgpu_info.num_dgpu; i++) {
gpu_ins = &(mgpu_info.gpu_ins[i]);
adev = gpu_ins->adev;
- if (!(adev->flags & AMD_IS_APU) &&
+ if (!(adev->flags & AMD_IS_APU || amdgpu_sriov_multi_vf_mode(adev)) &&
!gpu_ins->mgpu_fan_enabled) {
ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
if (ret)
@@ -3135,10 +3484,12 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
if (!adev->ip_blocks[i].status.hw)
continue;
if (adev->ip_blocks[i].version->funcs->late_init) {
- r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
+ r = adev->ip_blocks[i].version->funcs->late_init(&adev->ip_blocks[i]);
if (r) {
- DRM_ERROR("late_init of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_err(adev->dev,
+ "late_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name,
+ r);
return r;
}
}
@@ -3147,11 +3498,11 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
r = amdgpu_ras_late_init(adev);
if (r) {
- DRM_ERROR("amdgpu_ras_late_init failed %d", r);
+ dev_err(adev->dev, "amdgpu_ras_late_init failed %d", r);
return r;
}
- if (!amdgpu_in_reset(adev))
+ if (!amdgpu_reset_in_recovery(adev))
amdgpu_ras_set_error_query_ready(adev, true);
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
@@ -3161,7 +3512,7 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
r = amdgpu_device_enable_mgpu_fan_boost();
if (r)
- DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
+ dev_err(adev->dev, "enable mgpu fan boost failed (%d).\n", r);
/* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */
if (amdgpu_passthrough(adev) &&
@@ -3194,7 +3545,9 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
AMDGPU_XGMI_PSTATE_MIN);
if (r) {
- DRM_ERROR("pstate setting failed (%d).\n", r);
+ dev_err(adev->dev,
+ "pstate setting failed (%d).\n",
+ r);
break;
}
}
@@ -3206,6 +3559,27 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
return 0;
}
+static void amdgpu_ip_block_hw_fini(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int r;
+
+ if (!ip_block->version->funcs->hw_fini) {
+ dev_err(adev->dev, "hw_fini of IP block <%s> not defined\n",
+ ip_block->version->funcs->name);
+ } else {
+ r = ip_block->version->funcs->hw_fini(ip_block);
+ /* XXX handle errors */
+ if (r) {
+ dev_dbg(adev->dev,
+ "hw_fini of IP block <%s> failed %d\n",
+ ip_block->version->funcs->name, r);
+ }
+ }
+
+ ip_block->status.hw = false;
+}
+
/**
* amdgpu_device_smu_fini_early - smu hw_fini wrapper
*
@@ -3215,7 +3589,7 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
*/
static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev)
{
- int i, r;
+ int i;
if (amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(9, 0, 0))
return;
@@ -3224,13 +3598,7 @@ static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev)
if (!adev->ip_blocks[i].status.hw)
continue;
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
- r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
- /* XXX handle errors */
- if (r) {
- DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
- }
- adev->ip_blocks[i].status.hw = false;
+ amdgpu_ip_block_hw_fini(&adev->ip_blocks[i]);
break;
}
}
@@ -3244,38 +3612,34 @@ static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
if (!adev->ip_blocks[i].version->funcs->early_fini)
continue;
- r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev);
+ r = adev->ip_blocks[i].version->funcs->early_fini(&adev->ip_blocks[i]);
if (r) {
- DRM_DEBUG("early_fini of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_dbg(adev->dev,
+ "early_fini of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
}
}
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
- amdgpu_amdkfd_suspend(adev, false);
+ amdgpu_amdkfd_suspend(adev, true);
+ amdgpu_userq_suspend(adev);
- /* Workaroud for ASICs need to disable SMC first */
+ /* Workaround for ASICs need to disable SMC first */
amdgpu_device_smu_fini_early(adev);
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_blocks[i].status.hw)
continue;
- r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
- /* XXX handle errors */
- if (r) {
- DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
- }
-
- adev->ip_blocks[i].status.hw = false;
+ amdgpu_ip_block_hw_fini(&adev->ip_blocks[i]);
}
if (amdgpu_sriov_vf(adev)) {
if (amdgpu_virt_release_full_gpu(adev, false))
- DRM_ERROR("failed to release exclusive mode on fini\n");
+ dev_err(adev->dev,
+ "failed to release exclusive mode on fini\n");
}
return 0;
@@ -3296,6 +3660,8 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
{
int i, r;
+ amdgpu_cper_fini(adev);
+
if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
amdgpu_virt_release_ras_err_handler_data(adev);
@@ -3315,13 +3681,17 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
amdgpu_device_mem_scratch_fini(adev);
amdgpu_ib_pool_fini(adev);
amdgpu_seq64_fini(adev);
+ amdgpu_doorbell_fini(adev);
}
-
- r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
- /* XXX handle errors */
- if (r) {
- DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ if (adev->ip_blocks[i].version->funcs->sw_fini) {
+ r = adev->ip_blocks[i].version->funcs->sw_fini(&adev->ip_blocks[i]);
+ /* XXX handle errors */
+ if (r) {
+ dev_dbg(adev->dev,
+ "sw_fini of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name,
+ r);
+ }
}
adev->ip_blocks[i].status.sw = false;
adev->ip_blocks[i].status.valid = false;
@@ -3331,11 +3701,12 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
if (!adev->ip_blocks[i].status.late_initialized)
continue;
if (adev->ip_blocks[i].version->funcs->late_fini)
- adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
+ adev->ip_blocks[i].version->funcs->late_fini(&adev->ip_blocks[i]);
adev->ip_blocks[i].status.late_initialized = false;
}
amdgpu_ras_fini(adev);
+ amdgpu_uid_fini(adev);
return 0;
}
@@ -3353,7 +3724,7 @@ static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
r = amdgpu_ib_ring_tests(adev);
if (r)
- DRM_ERROR("ib ring test failed (%d).\n", r);
+ dev_err(adev->dev, "ib ring test failed (%d).\n", r);
}
static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
@@ -3364,7 +3735,7 @@ static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
WARN_ON_ONCE(adev->gfx.gfx_off_state);
WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
- if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
+ if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true, 0))
adev->gfx.gfx_off_state = true;
}
@@ -3403,15 +3774,9 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
continue;
/* XXX handle errors */
- r = adev->ip_blocks[i].version->funcs->suspend(adev);
- /* XXX handle errors */
- if (r) {
- DRM_ERROR("suspend of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]);
+ if (r)
return r;
- }
-
- adev->ip_blocks[i].status.hw = false;
}
return 0;
@@ -3449,15 +3814,17 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
}
/* skip unnecessary suspend if we do not initialize them yet */
- if (adev->gmc.xgmi.pending_reset &&
- !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
- adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC ||
- adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
- adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) {
- adev->ip_blocks[i].status.hw = false;
+ if (!amdgpu_ip_member_of_hwini(
+ adev, adev->ip_blocks[i].version->type))
continue;
- }
+ /* Since we skip suspend for S0i3, we need to cancel the delayed
+ * idle work here as the suspend callback never gets called.
+ */
+ if (adev->in_s0ix &&
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX &&
+ amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 0, 0))
+ cancel_delayed_work_sync(&adev->gfx.idle_work);
/* skip suspend of gfx/mes and psp for S0ix
* gfx is in gfxoff state, so on resume it will exit gfxoff just
* like at runtime. PSP is also part of the always on hardware
@@ -3490,20 +3857,17 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
continue;
/* XXX handle errors */
- r = adev->ip_blocks[i].version->funcs->suspend(adev);
- /* XXX handle errors */
- if (r) {
- DRM_ERROR("suspend of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
- }
+ r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]);
adev->ip_blocks[i].status.hw = false;
+
/* handle putting the SMC in the appropriate state */
if (!amdgpu_sriov_vf(adev)) {
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
if (r) {
- DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
- adev->mp1_state, r);
+ dev_err(adev->dev,
+ "SMC failed to set mp1 state %d, %d\n",
+ adev->mp1_state, r);
return r;
}
}
@@ -3570,10 +3934,12 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
!block->status.valid)
continue;
- r = block->version->funcs->hw_init(adev);
- DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
- if (r)
+ r = block->version->funcs->hw_init(&adev->ip_blocks[i]);
+ if (r) {
+ dev_err(adev->dev, "RE-INIT-early: %s failed\n",
+ block->version->funcs->name);
return r;
+ }
block->status.hw = true;
}
}
@@ -3583,7 +3949,8 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
{
- int i, r;
+ struct amdgpu_ip_block *block;
+ int i, r = 0;
static enum amd_ip_block_type ip_order[] = {
AMD_IP_BLOCK_TYPE_SMC,
@@ -3598,30 +3965,28 @@ static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
};
for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
- int j;
- struct amdgpu_ip_block *block;
-
- for (j = 0; j < adev->num_ip_blocks; j++) {
- block = &adev->ip_blocks[j];
+ block = amdgpu_device_ip_get_ip_block(adev, ip_order[i]);
- if (block->version->type != ip_order[i] ||
- !block->status.valid ||
- block->status.hw)
- continue;
+ if (!block)
+ continue;
- if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
- r = block->version->funcs->resume(adev);
- else
- r = block->version->funcs->hw_init(adev);
+ if (block->status.valid && !block->status.hw) {
+ if (block->version->type == AMD_IP_BLOCK_TYPE_SMC) {
+ r = amdgpu_ip_block_resume(block);
+ } else {
+ r = block->version->funcs->hw_init(block);
+ }
- DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
- if (r)
- return r;
+ if (r) {
+ dev_err(adev->dev, "RE-INIT-late: %s failed\n",
+ block->version->funcs->name);
+ break;
+ }
block->status.hw = true;
}
}
- return 0;
+ return r;
}
/**
@@ -3648,13 +4013,9 @@ static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP && amdgpu_sriov_vf(adev))) {
- r = adev->ip_blocks[i].version->funcs->resume(adev);
- if (r) {
- DRM_ERROR("resume of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ r = amdgpu_ip_block_resume(&adev->ip_blocks[i]);
+ if (r)
return r;
- }
- adev->ip_blocks[i].status.hw = true;
}
}
@@ -3666,7 +4027,7 @@ static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
*
* @adev: amdgpu_device pointer
*
- * First resume function for hardware IPs. The list of all the hardware
+ * Second resume function for hardware IPs. The list of all the hardware
* IPs that make up the asic is walked and the resume callbacks are run for
* all blocks except COMMON, GMC, and IH. resume puts the hardware into a
* functional state after a suspend and updates the software state as
@@ -3684,15 +4045,42 @@ static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE ||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
continue;
- r = adev->ip_blocks[i].version->funcs->resume(adev);
- if (r) {
- DRM_ERROR("resume of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ r = amdgpu_ip_block_resume(&adev->ip_blocks[i]);
+ if (r)
return r;
+ }
+
+ return 0;
+}
+
+/**
+ * amdgpu_device_ip_resume_phase3 - run resume for hardware IPs
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Third resume function for hardware IPs. The list of all the hardware
+ * IPs that make up the asic is walked and the resume callbacks are run for
+ * all DCE. resume puts the hardware into a functional state after a suspend
+ * and updates the software state as necessary. This function is also used
+ * for restoring the GPU after a GPU reset.
+ *
+ * Returns 0 on success, negative error code on failure.
+ */
+static int amdgpu_device_ip_resume_phase3(struct amdgpu_device *adev)
+{
+ int i, r;
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
+ continue;
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) {
+ r = amdgpu_ip_block_resume(&adev->ip_blocks[i]);
+ if (r)
+ return r;
}
- adev->ip_blocks[i].status.hw = true;
}
return 0;
@@ -3727,6 +4115,13 @@ static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
if (adev->mman.buffer_funcs_ring->sched.ready)
amdgpu_ttm_set_buffer_funcs_status(adev, true);
+ if (r)
+ return r;
+
+ amdgpu_fence_driver_hw_init(adev);
+
+ r = amdgpu_device_ip_resume_phase3(adev);
+
return r;
}
@@ -3756,12 +4151,14 @@ static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
/**
* amdgpu_device_asic_has_dc_support - determine if DC supports the asic
*
+ * @pdev : pci device context
* @asic_type: AMD asic type
*
* Check if there is DC (new modesetting infrastructre) support for an asic.
* returns true if DC has support, false if not.
*/
-bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
+bool amdgpu_device_asic_has_dc_support(struct pci_dev *pdev,
+ enum amd_asic_type asic_type)
{
switch (asic_type) {
#ifdef CONFIG_DRM_AMDGPU_SI
@@ -3804,7 +4201,9 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
#else
default:
if (amdgpu_dc > 0)
- DRM_INFO_ONCE("Display Core has been requested via kernel parameter but isn't supported by ASIC, ignoring\n");
+ dev_info_once(
+ &pdev->dev,
+ "Display Core has been requested via kernel parameter but isn't supported by ASIC, ignoring\n");
return false;
#endif
}
@@ -3823,7 +4222,7 @@ bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
(adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
return false;
- return amdgpu_device_asic_has_dc_support(adev->asic_type);
+ return amdgpu_device_asic_has_dc_support(adev->pdev, adev->asic_type);
}
static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
@@ -3845,13 +4244,13 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
task_barrier_enter(&hive->tb);
- adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
+ adev->asic_reset_res = amdgpu_device_baco_enter(adev);
if (adev->asic_reset_res)
goto fail;
task_barrier_exit(&hive->tb);
- adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
+ adev->asic_reset_res = amdgpu_device_baco_exit(adev);
if (adev->asic_reset_res)
goto fail;
@@ -3865,7 +4264,8 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
fail:
if (adev->asic_reset_res)
- DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
+ dev_warn(adev->dev,
+ "ASIC reset failed with error, %d for drm dev, %s",
adev->asic_reset_res, adev_to_drm(adev)->unique);
amdgpu_put_xgmi_hive(hive);
}
@@ -3879,18 +4279,10 @@ static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
int ret = 0;
/*
- * By default timeout for non compute jobs is 10000
- * and 60000 for compute jobs.
- * In SR-IOV or passthrough mode, timeout for compute
- * jobs are 60000 by default.
+ * By default timeout for jobs is 10 sec
*/
- adev->gfx_timeout = msecs_to_jiffies(10000);
+ adev->compute_timeout = adev->gfx_timeout = msecs_to_jiffies(10000);
adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
- if (amdgpu_sriov_vf(adev))
- adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
- msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
- else
- adev->compute_timeout = msecs_to_jiffies(60000);
if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
while ((timeout_setting = strsep(&input, ",")) &&
@@ -3978,11 +4370,6 @@ static bool amdgpu_device_check_iommu_remap(struct amdgpu_device *adev)
}
#endif
-static const struct attribute *amdgpu_dev_attributes[] = {
- &dev_attr_pcie_replay_count.attr,
- NULL
-};
-
static void amdgpu_device_set_mcbp(struct amdgpu_device *adev)
{
if (amdgpu_mcbp == 1)
@@ -3994,7 +4381,7 @@ static void amdgpu_device_set_mcbp(struct amdgpu_device *adev)
adev->gfx.mcbp = true;
if (adev->gfx.mcbp)
- DRM_INFO("MCBP is enabled\n");
+ dev_info(adev->dev, "MCBP is enabled\n");
}
/**
@@ -4010,7 +4397,6 @@ static void amdgpu_device_set_mcbp(struct amdgpu_device *adev)
int amdgpu_device_init(struct amdgpu_device *adev,
uint32_t flags)
{
- struct drm_device *ddev = adev_to_drm(adev);
struct pci_dev *pdev = adev->pdev;
int r, i;
bool px = false;
@@ -4062,9 +4448,11 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
- DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
- amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
- pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
+ dev_info(
+ adev->dev,
+ "initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
+ amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
+ pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
/* mutex initialization are all done here so we
* can recall function without having locking issues
@@ -4079,7 +4467,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
mutex_init(&adev->grbm_idx_mutex);
mutex_init(&adev->mn_lock);
mutex_init(&adev->virt.vf_errors.lock);
- mutex_init(&adev->virt.rlcg_reg_lock);
hash_init(adev->mn_hash);
mutex_init(&adev->psp.mutex);
mutex_init(&adev->notifier_lock);
@@ -4088,7 +4475,15 @@ int amdgpu_device_init(struct amdgpu_device *adev,
mutex_init(&adev->gfx.reset_sem_mutex);
/* Initialize the mutex for cleaner shader isolation between GFX and compute processes */
mutex_init(&adev->enforce_isolation_mutex);
- mutex_init(&adev->gfx.kfd_sch_mutex);
+ for (i = 0; i < MAX_XCP; ++i) {
+ adev->isolation[i].spearhead = dma_fence_get_stub();
+ amdgpu_sync_create(&adev->isolation[i].active);
+ amdgpu_sync_create(&adev->isolation[i].prev);
+ }
+ mutex_init(&adev->gfx.userq_sch_mutex);
+ mutex_init(&adev->gfx.workload_profile_mutex);
+ mutex_init(&adev->vcn.workload_profile_mutex);
+ mutex_init(&adev->userq_mutex);
amdgpu_device_init_apu_flags(adev);
@@ -4105,14 +4500,19 @@ int amdgpu_device_init(struct amdgpu_device *adev,
spin_lock_init(&adev->se_cac_idx_lock);
spin_lock_init(&adev->audio_endpt_idx_lock);
spin_lock_init(&adev->mm_stats.lock);
+ spin_lock_init(&adev->virt.rlcg_reg_lock);
spin_lock_init(&adev->wb.lock);
+ xa_init_flags(&adev->userq_xa, XA_FLAGS_LOCK_IRQ);
+
INIT_LIST_HEAD(&adev->reset_list);
INIT_LIST_HEAD(&adev->ras_list);
INIT_LIST_HEAD(&adev->pm.od_kobj_list);
+ INIT_LIST_HEAD(&adev->userq_mgr_list);
+
INIT_DELAYED_WORK(&adev->delayed_init_work,
amdgpu_device_delayed_init_work_handler);
INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
@@ -4149,6 +4549,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
* for throttling interrupt) = 60 seconds.
*/
ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
+
ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
/* Registers mapping */
@@ -4168,12 +4569,14 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (!adev->rmmio)
return -ENOMEM;
- DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
- DRM_INFO("register mmio size: %u\n", (unsigned int)adev->rmmio_size);
+ dev_info(adev->dev, "register mmio base: 0x%08X\n",
+ (uint32_t)adev->rmmio_base);
+ dev_info(adev->dev, "register mmio size: %u\n",
+ (unsigned int)adev->rmmio_size);
/*
* Reset domain needs to be present early, before XGMI hive discovered
- * (if any) and intitialized to use reset sem and in_gpu reset flag
+ * (if any) and initialized to use reset sem and in_gpu reset flag
* early on during init and before calling to RREG32.
*/
adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev");
@@ -4181,7 +4584,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
return -ENOMEM;
/* detect hw virtualization here */
- amdgpu_detect_virtualization(adev);
+ amdgpu_virt_init(adev);
amdgpu_device_get_pcie_info(adev);
@@ -4193,15 +4596,28 @@ int amdgpu_device_init(struct amdgpu_device *adev,
amdgpu_device_set_mcbp(adev);
+ /*
+ * By default, use default mode where all blocks are expected to be
+ * initialized. At present a 'swinit' of blocks is required to be
+ * completed before the need for a different level is detected.
+ */
+ amdgpu_set_init_level(adev, AMDGPU_INIT_LEVEL_DEFAULT);
/* early init functions */
r = amdgpu_device_ip_early_init(adev);
if (r)
return r;
- /* Get rid of things like offb */
- r = drm_aperture_remove_conflicting_pci_framebuffers(adev->pdev, &amdgpu_kms_driver);
- if (r)
- return r;
+ /*
+ * No need to remove conflicting FBs for non-display class devices.
+ * This prevents the sysfb from being freed accidently.
+ */
+ if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA ||
+ (pdev->class >> 8) == PCI_CLASS_DISPLAY_OTHER) {
+ /* Get rid of things like offb */
+ r = aperture_remove_conflicting_pci_devices(adev->pdev, amdgpu_kms_driver.name);
+ if (r)
+ return r;
+ }
/* Enable TMZ based on IP_VERSION */
amdgpu_gmc_tmz_set(adev);
@@ -4265,20 +4681,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
if (adev->gmc.xgmi.num_physical_nodes) {
dev_info(adev->dev, "Pending hive reset.\n");
- adev->gmc.xgmi.pending_reset = true;
- /* Only need to init necessary block for SMU to handle the reset */
- for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_blocks[i].status.valid)
- continue;
- if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
- adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
- adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
- adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) {
- DRM_DEBUG("IP %s disabled for hw_init.\n",
- adev->ip_blocks[i].version->funcs->name);
- adev->ip_blocks[i].status.hw = true;
- }
- }
+ amdgpu_set_init_level(adev,
+ AMDGPU_INIT_LEVEL_MINIMAL_XGMI);
} else if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) &&
!amdgpu_device_has_display_hardware(adev)) {
r = psp_gpu_reset(adev);
@@ -4305,7 +4709,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = -EINVAL;
goto failed;
}
- DRM_INFO("GPU posting now...\n");
+ dev_info(adev->dev, "GPU posting now...\n");
r = amdgpu_device_asic_init(adev);
if (r) {
dev_err(adev->dev, "gpu post error!\n");
@@ -4331,8 +4735,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
goto failed;
}
/* init i2c buses */
- if (!amdgpu_device_has_dc_support(adev))
- amdgpu_atombios_i2c_init(adev);
+ amdgpu_i2c_init(adev);
}
}
@@ -4386,7 +4789,7 @@ fence_driver_init:
/* enable clockgating, etc. after ib tests, etc. since some blocks require
* explicit gating rather than handling it automatically.
*/
- if (!adev->gmc.xgmi.pending_reset) {
+ if (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) {
r = amdgpu_device_ip_late_init(adev);
if (r) {
dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
@@ -4416,16 +4819,16 @@ fence_driver_init:
r = amdgpu_pm_sysfs_init(adev);
if (r)
- DRM_ERROR("registering pm sysfs failed (%d).\n", r);
+ dev_err(adev->dev, "registering pm sysfs failed (%d).\n", r);
r = amdgpu_ucode_sysfs_init(adev);
if (r) {
adev->ucode_sysfs_en = false;
- DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
+ dev_err(adev->dev, "Creating firmware sysfs failed (%d).\n", r);
} else
adev->ucode_sysfs_en = true;
- r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
+ r = amdgpu_device_attr_sysfs_init(adev);
if (r)
dev_err(adev->dev, "Could not create amdgpu device attr\n");
@@ -4436,6 +4839,7 @@ fence_driver_init:
amdgpu_fru_sysfs_init(adev);
amdgpu_reg_state_sysfs_init(adev);
+ amdgpu_xcp_sysfs_init(adev);
if (IS_ENABLED(CONFIG_PERF_EVENTS))
r = amdgpu_pmu_init(adev);
@@ -4453,7 +4857,7 @@ fence_driver_init:
if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
- px = amdgpu_device_supports_px(ddev);
+ px = amdgpu_device_supports_px(adev);
if (px || (!dev_is_removable(&adev->pdev->dev) &&
apple_gmux_detect(NULL, NULL)))
@@ -4463,12 +4867,16 @@ fence_driver_init:
if (px)
vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
- if (adev->gmc.xgmi.pending_reset)
- queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
- msecs_to_jiffies(AMDGPU_RESUME_MS));
+ if (adev->init_lvl->level == AMDGPU_INIT_LEVEL_MINIMAL_XGMI)
+ amdgpu_xgmi_reset_on_init(adev);
amdgpu_device_check_iommu_direct_map(adev);
+ adev->pm_nb.notifier_call = amdgpu_device_pm_notifier;
+ r = register_pm_notifier(&adev->pm_nb);
+ if (r)
+ goto failed;
+
return 0;
release_ras_con:
@@ -4533,6 +4941,8 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
drain_workqueue(adev->mman.bdev.wq);
adev->shutdown = true;
+ unregister_pm_notifier(&adev->pm_nb);
+
/* make sure IB test finished before entering exclusive mode
* to avoid preemption on IB test
*/
@@ -4555,10 +4965,11 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
amdgpu_pm_sysfs_fini(adev);
if (adev->ucode_sysfs_en)
amdgpu_ucode_sysfs_fini(adev);
- sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
+ amdgpu_device_attr_sysfs_fini(adev);
amdgpu_fru_sysfs_fini(adev);
amdgpu_reg_state_sysfs_fini(adev);
+ amdgpu_xcp_sysfs_fini(adev);
/* disable ras feature must before hw fini */
amdgpu_ras_pre_fini(adev);
@@ -4581,31 +4992,38 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
void amdgpu_device_fini_sw(struct amdgpu_device *adev)
{
- int idx;
+ int i, idx;
bool px;
- amdgpu_fence_driver_sw_fini(adev);
amdgpu_device_ip_fini(adev);
+ amdgpu_fence_driver_sw_fini(adev);
amdgpu_ucode_release(&adev->firmware.gpu_info_fw);
adev->accel_working = false;
dma_fence_put(rcu_dereference_protected(adev->gang_submit, true));
+ for (i = 0; i < MAX_XCP; ++i) {
+ dma_fence_put(adev->isolation[i].spearhead);
+ amdgpu_sync_free(&adev->isolation[i].active);
+ amdgpu_sync_free(&adev->isolation[i].prev);
+ }
amdgpu_reset_fini(adev);
/* free i2c buses */
- if (!amdgpu_device_has_dc_support(adev))
- amdgpu_i2c_fini(adev);
-
- if (amdgpu_emu_mode != 1)
- amdgpu_atombios_fini(adev);
+ amdgpu_i2c_fini(adev);
- kfree(adev->bios);
- adev->bios = NULL;
+ if (adev->bios) {
+ if (amdgpu_emu_mode != 1)
+ amdgpu_atombios_fini(adev);
+ amdgpu_bios_release(adev);
+ }
kfree(adev->fru_info);
adev->fru_info = NULL;
- px = amdgpu_device_supports_px(adev_to_drm(adev));
+ kfree(adev->xcp_mgr);
+ adev->xcp_mgr = NULL;
+
+ px = amdgpu_device_supports_px(adev);
if (px || (!dev_is_removable(&adev->pdev->dev) &&
apple_gmux_detect(NULL, NULL)))
@@ -4621,7 +5039,6 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
iounmap(adev->rmmio);
adev->rmmio = NULL;
- amdgpu_doorbell_fini(adev);
drm_dev_exit(idx);
}
@@ -4634,7 +5051,8 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
adev->reset_domain = NULL;
kfree(adev->pci_state);
-
+ kfree(adev->pcie_reset_ctx.swds_pcistate);
+ kfree(adev->pcie_reset_ctx.swus_pcistate);
}
/**
@@ -4650,13 +5068,25 @@ static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
{
int ret;
- /* No need to evict vram on APUs for suspend to ram or s2idle */
- if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU))
+ /* No need to evict vram on APUs unless going to S4 */
+ if (!adev->in_s4 && (adev->flags & AMD_IS_APU))
+ return 0;
+
+ /* No need to evict when going to S5 through S4 callbacks */
+ if (system_state == SYSTEM_POWER_OFF)
return 0;
ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
- if (ret)
- DRM_WARN("evicting device resources failed\n");
+ if (ret) {
+ dev_warn(adev->dev, "evicting device resources failed\n");
+ return ret;
+ }
+
+ if (adev->in_s4) {
+ ret = ttm_device_prepare_hibernation(&adev->mman.bdev);
+ if (ret)
+ dev_err(adev->dev, "prepare hibernation failed, %d\n", ret);
+ }
return ret;
}
@@ -4664,6 +5094,33 @@ static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
* Suspend & resume.
*/
/**
+ * amdgpu_device_pm_notifier - Notification block for Suspend/Hibernate events
+ * @nb: notifier block
+ * @mode: suspend mode
+ * @data: data
+ *
+ * This function is called when the system is about to suspend or hibernate.
+ * It is used to set the appropriate flags so that eviction can be optimized
+ * in the pm prepare callback.
+ */
+static int amdgpu_device_pm_notifier(struct notifier_block *nb, unsigned long mode,
+ void *data)
+{
+ struct amdgpu_device *adev = container_of(nb, struct amdgpu_device, pm_nb);
+
+ switch (mode) {
+ case PM_HIBERNATION_PREPARE:
+ adev->in_s4 = true;
+ break;
+ case PM_POST_HIBERNATION:
+ adev->in_s4 = false;
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+/**
* amdgpu_device_prepare - prepare for device suspend
*
* @dev: drm dev pointer
@@ -4677,15 +5134,13 @@ int amdgpu_device_prepare(struct drm_device *dev)
struct amdgpu_device *adev = drm_to_adev(dev);
int i, r;
- amdgpu_choose_low_power_state(adev);
-
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
/* Evict the majority of BOs before starting suspend sequence */
r = amdgpu_device_evict_resources(adev);
if (r)
- goto unprepare;
+ return r;
flush_delayed_work(&adev->gfx.gfx_off_delay_work);
@@ -4694,30 +5149,47 @@ int amdgpu_device_prepare(struct drm_device *dev)
continue;
if (!adev->ip_blocks[i].version->funcs->prepare_suspend)
continue;
- r = adev->ip_blocks[i].version->funcs->prepare_suspend((void *)adev);
+ r = adev->ip_blocks[i].version->funcs->prepare_suspend(&adev->ip_blocks[i]);
if (r)
- goto unprepare;
+ return r;
}
return 0;
+}
-unprepare:
- adev->in_s0ix = adev->in_s3 = false;
+/**
+ * amdgpu_device_complete - complete power state transition
+ *
+ * @dev: drm dev pointer
+ *
+ * Undo the changes from amdgpu_device_prepare. This will be
+ * called on all resume transitions, including those that failed.
+ */
+void amdgpu_device_complete(struct drm_device *dev)
+{
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ int i;
- return r;
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_blocks[i].status.valid)
+ continue;
+ if (!adev->ip_blocks[i].version->funcs->complete)
+ continue;
+ adev->ip_blocks[i].version->funcs->complete(&adev->ip_blocks[i]);
+ }
}
/**
* amdgpu_device_suspend - initiate device suspend
*
* @dev: drm dev pointer
- * @fbcon : notify the fbdev of suspend
+ * @notify_clients: notify in-kernel DRM clients
*
* Puts the hw in the suspend state (all asics).
* Returns 0 for success or an error on failure.
* Called at driver suspend.
*/
-int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
+int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients)
{
struct amdgpu_device *adev = drm_to_adev(dev);
int r = 0;
@@ -4728,17 +5200,19 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
adev->in_suspend = true;
if (amdgpu_sriov_vf(adev)) {
+ if (!adev->in_runpm)
+ amdgpu_amdkfd_suspend_process(adev);
amdgpu_virt_fini_data_exchange(adev);
r = amdgpu_virt_request_full_gpu(adev, false);
if (r)
return r;
}
- if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
- DRM_WARN("smart shift update failed\n");
+ if (amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DEV_D3))
+ dev_warn(adev->dev, "smart shift update failed\n");
- if (fbcon)
- drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
+ if (notify_clients)
+ drm_client_dev_suspend(adev_to_drm(adev), false);
cancel_delayed_work_sync(&adev->delayed_init_work);
@@ -4746,8 +5220,8 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
amdgpu_device_ip_suspend_phase1(adev);
- if (!adev->in_s0ix)
- amdgpu_amdkfd_suspend(adev, adev->in_runpm);
+ amdgpu_amdkfd_suspend(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
+ amdgpu_userq_suspend(adev);
r = amdgpu_device_evict_resources(adev);
if (r)
@@ -4769,17 +5243,43 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
return 0;
}
+static inline int amdgpu_virt_resume(struct amdgpu_device *adev)
+{
+ int r;
+ unsigned int prev_physical_node_id = adev->gmc.xgmi.physical_node_id;
+
+ /* During VM resume, QEMU programming of VF MSIX table (register GFXMSIX_VECT0_ADDR_LO)
+ * may not work. The access could be blocked by nBIF protection as VF isn't in
+ * exclusive access mode. Exclusive access is enabled now, disable/enable MSIX
+ * so that QEMU reprograms MSIX table.
+ */
+ amdgpu_restore_msix(adev);
+
+ r = adev->gfxhub.funcs->get_xgmi_info(adev);
+ if (r)
+ return r;
+
+ dev_info(adev->dev, "xgmi node, old id %d, new id %d\n",
+ prev_physical_node_id, adev->gmc.xgmi.physical_node_id);
+
+ adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev);
+ adev->vm_manager.vram_base_offset +=
+ adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
+
+ return 0;
+}
+
/**
* amdgpu_device_resume - initiate device resume
*
* @dev: drm dev pointer
- * @fbcon : notify the fbdev of resume
+ * @notify_clients: notify in-kernel DRM clients
*
* Bring the hw back to operating state (all asics).
* Returns 0 for success or an error on failure.
* Called at driver resume.
*/
-int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
+int amdgpu_device_resume(struct drm_device *dev, bool notify_clients)
{
struct amdgpu_device *adev = drm_to_adev(dev);
int r = 0;
@@ -4790,6 +5290,12 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
return r;
}
+ if (amdgpu_virt_xgmi_migrate_enabled(adev)) {
+ r = amdgpu_virt_resume(adev);
+ if (r)
+ goto exit;
+ }
+
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
@@ -4809,13 +5315,14 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
goto exit;
}
- amdgpu_fence_driver_hw_init(adev);
- if (!adev->in_s0ix) {
- r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
- if (r)
- goto exit;
- }
+ r = amdgpu_amdkfd_resume(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
+ if (r)
+ goto exit;
+
+ r = amdgpu_userq_resume(adev);
+ if (r)
+ goto exit;
r = amdgpu_device_ip_late_init(adev);
if (r)
@@ -4827,6 +5334,9 @@ exit:
if (amdgpu_sriov_vf(adev)) {
amdgpu_virt_init_data_exchange(adev);
amdgpu_virt_release_full_gpu(adev, true);
+
+ if (!r && !adev->in_runpm)
+ r = amdgpu_amdkfd_resume_process(adev);
}
if (r)
@@ -4835,8 +5345,8 @@ exit:
/* Make sure IB tests flushed */
flush_delayed_work(&adev->delayed_init_work);
- if (fbcon)
- drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false);
+ if (notify_clients)
+ drm_client_dev_resume(adev_to_drm(adev), false);
amdgpu_ras_resume(adev);
@@ -4861,13 +5371,12 @@ exit:
dev->dev->power.disable_depth--;
#endif
}
- adev->in_suspend = false;
- if (adev->enable_mes)
- amdgpu_mes_self_test(adev);
+ amdgpu_vram_mgr_clear_reset_blocks(adev);
+ adev->in_suspend = false;
- if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
- DRM_WARN("smart shift update failed\n");
+ if (amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DEV_D0))
+ dev_warn(adev->dev, "smart shift update failed\n");
return 0;
}
@@ -4898,7 +5407,8 @@ static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
continue;
if (adev->ip_blocks[i].version->funcs->check_soft_reset)
adev->ip_blocks[i].status.hang =
- adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
+ adev->ip_blocks[i].version->funcs->check_soft_reset(
+ &adev->ip_blocks[i]);
if (adev->ip_blocks[i].status.hang) {
dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
asic_hang = true;
@@ -4927,7 +5437,7 @@ static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
continue;
if (adev->ip_blocks[i].status.hang &&
adev->ip_blocks[i].version->funcs->pre_soft_reset) {
- r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
+ r = adev->ip_blocks[i].version->funcs->pre_soft_reset(&adev->ip_blocks[i]);
if (r)
return r;
}
@@ -4989,7 +5499,7 @@ static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
continue;
if (adev->ip_blocks[i].status.hang &&
adev->ip_blocks[i].version->funcs->soft_reset) {
- r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
+ r = adev->ip_blocks[i].version->funcs->soft_reset(&adev->ip_blocks[i]);
if (r)
return r;
}
@@ -5018,7 +5528,7 @@ static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
continue;
if (adev->ip_blocks[i].status.hang &&
adev->ip_blocks[i].version->funcs->post_soft_reset)
- r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
+ r = adev->ip_blocks[i].version->funcs->post_soft_reset(&adev->ip_blocks[i]);
if (r)
return r;
}
@@ -5053,7 +5563,7 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
if (r)
return r;
- amdgpu_ras_set_fed(adev, false);
+ amdgpu_ras_clear_err_state(adev);
amdgpu_irq_gpu_reset_resume_helper(adev);
/* some sw clean up VF needs to do before recover */
@@ -5101,22 +5611,28 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) ||
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0) ||
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3))
amdgpu_ras_resume(adev);
+
+ amdgpu_virt_ras_telemetry_post_reset(adev);
+
return 0;
}
/**
- * amdgpu_device_has_job_running - check if there is any job in mirror list
+ * amdgpu_device_has_job_running - check if there is any unfinished job
*
* @adev: amdgpu_device pointer
*
- * check if there is any job in mirror list
+ * check if there is any job running on the device when guest driver receives
+ * FLR notification from host driver. If there are still jobs running, then
+ * the guest driver will not respond the FLR reset. Instead, let the job hit
+ * the timeout and guest driver then issue the reset request.
*/
bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
{
int i;
- struct drm_sched_job *job;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
@@ -5124,11 +5640,7 @@ bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
if (!amdgpu_ring_sched_ready(ring))
continue;
- spin_lock(&ring->sched.job_list_lock);
- job = list_first_entry_or_null(&ring->sched.pending_list,
- struct drm_sched_job, list);
- spin_unlock(&ring->sched.job_list_lock);
- if (job)
+ if (amdgpu_fence_count_emitted(ring))
return true;
}
return false;
@@ -5190,7 +5702,8 @@ int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
u32 i;
int ret = 0;
- amdgpu_atombios_scratch_regs_engine_hung(adev, true);
+ if (adev->bios)
+ amdgpu_atombios_scratch_regs_engine_hung(adev, true);
dev_info(adev->dev, "GPU mode1 reset\n");
@@ -5232,7 +5745,8 @@ int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
goto mode1_reset_failed;
}
- amdgpu_atombios_scratch_regs_engine_hung(adev, false);
+ if (adev->bios)
+ amdgpu_atombios_scratch_regs_engine_hung(adev, false);
return 0;
@@ -5241,6 +5755,29 @@ mode1_reset_failed:
return ret;
}
+int amdgpu_device_link_reset(struct amdgpu_device *adev)
+{
+ int ret = 0;
+
+ dev_info(adev->dev, "GPU link reset\n");
+
+ if (!amdgpu_reset_in_dpc(adev))
+ ret = amdgpu_dpm_link_reset(adev);
+
+ if (ret)
+ goto link_reset_failed;
+
+ ret = amdgpu_psp_wait_for_bootloader(adev);
+ if (ret)
+ goto link_reset_failed;
+
+ return 0;
+
+link_reset_failed:
+ dev_err(adev->dev, "GPU link reset failed\n");
+ return ret;
+}
+
int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
struct amdgpu_reset_context *reset_context)
{
@@ -5309,7 +5846,7 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
for (i = 0; i < tmp_adev->num_ip_blocks; i++)
if (tmp_adev->ip_blocks[i].version->funcs->dump_ip_state)
tmp_adev->ip_blocks[i].version->funcs
- ->dump_ip_state((void *)tmp_adev);
+ ->dump_ip_state((void *)&tmp_adev->ip_blocks[i]);
dev_info(tmp_adev->dev, "Dumping IP State Completed\n");
}
@@ -5325,76 +5862,36 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
return r;
}
-int amdgpu_do_asic_reset(struct list_head *device_list_handle,
- struct amdgpu_reset_context *reset_context)
+int amdgpu_device_reinit_after_reset(struct amdgpu_reset_context *reset_context)
{
- struct amdgpu_device *tmp_adev = NULL;
- bool need_full_reset, skip_hw_reset, vram_lost = false;
- int r = 0;
+ struct list_head *device_list_handle;
+ bool full_reset, vram_lost = false;
+ struct amdgpu_device *tmp_adev;
+ int r, init_level;
- /* Try reset handler method first */
- tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
- reset_list);
+ device_list_handle = reset_context->reset_device_list;
- reset_context->reset_device_list = device_list_handle;
- r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
- /* If reset handler not implemented, continue; otherwise return */
- if (r == -EOPNOTSUPP)
- r = 0;
- else
- return r;
+ if (!device_list_handle)
+ return -EINVAL;
- /* Reset handler not implemented, use the default method */
- need_full_reset =
- test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
- skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
+ full_reset = test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
- /*
- * ASIC reset has to be done on all XGMI hive nodes ASAP
- * to allow proper links negotiation in FW (within 1 sec)
+ /**
+ * If it's reset on init, it's default init level, otherwise keep level
+ * as recovery level.
*/
- if (!skip_hw_reset && need_full_reset) {
- list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
- /* For XGMI run all resets in parallel to speed up the process */
- if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
- tmp_adev->gmc.xgmi.pending_reset = false;
- if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
- r = -EALREADY;
- } else
- r = amdgpu_asic_reset(tmp_adev);
-
- if (r) {
- dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
- r, adev_to_drm(tmp_adev)->unique);
- goto out;
- }
- }
-
- /* For XGMI wait for all resets to complete before proceed */
- if (!r) {
- list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
- if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
- flush_work(&tmp_adev->xgmi_reset_work);
- r = tmp_adev->asic_reset_res;
- if (r)
- break;
- }
- }
- }
- }
-
- if (!r && amdgpu_ras_intr_triggered()) {
- list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
- amdgpu_ras_reset_error_count(tmp_adev, AMDGPU_RAS_BLOCK__MMHUB);
- }
-
- amdgpu_ras_intr_cleared();
- }
+ if (reset_context->method == AMD_RESET_METHOD_ON_INIT)
+ init_level = AMDGPU_INIT_LEVEL_DEFAULT;
+ else
+ init_level = AMDGPU_INIT_LEVEL_RESET_RECOVERY;
+ r = 0;
list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
- if (need_full_reset) {
+ amdgpu_set_init_level(tmp_adev, init_level);
+ if (full_reset) {
/* post card */
- amdgpu_ras_set_fed(tmp_adev, false);
+ amdgpu_reset_set_dpc_status(tmp_adev, false);
+ amdgpu_ras_clear_err_state(tmp_adev);
r = amdgpu_device_asic_init(tmp_adev);
if (r) {
dev_warn(tmp_adev->dev, "asic atom init failed!");
@@ -5411,7 +5908,9 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
amdgpu_coredump(tmp_adev, false, vram_lost, reset_context->job);
if (vram_lost) {
- DRM_INFO("VRAM is lost due to GPU reset!\n");
+ dev_info(
+ tmp_adev->dev,
+ "VRAM is lost due to GPU reset!\n");
amdgpu_inc_vram_lost(tmp_adev);
}
@@ -5431,6 +5930,10 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
if (tmp_adev->mman.buffer_funcs_ring->sched.ready)
amdgpu_ttm_set_buffer_funcs_status(tmp_adev, true);
+ r = amdgpu_device_ip_resume_phase3(tmp_adev);
+ if (r)
+ goto out;
+
if (vram_lost)
amdgpu_device_fill_reset_magic(tmp_adev);
@@ -5448,7 +5951,7 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
if (r)
goto out;
- drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, false);
+ drm_client_dev_resume(adev_to_drm(tmp_adev), false);
/*
* The GPU enters bad state once faulty pages
@@ -5478,11 +5981,13 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
out:
if (!r) {
+ /* IP init is complete now, set level as default */
+ amdgpu_set_init_level(tmp_adev,
+ AMDGPU_INIT_LEVEL_DEFAULT);
amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
r = amdgpu_ib_ring_tests(tmp_adev);
if (r) {
dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
- need_full_reset = true;
r = -EAGAIN;
goto end;
}
@@ -5493,10 +5998,85 @@ out:
}
end:
- if (need_full_reset)
+ return r;
+}
+
+int amdgpu_do_asic_reset(struct list_head *device_list_handle,
+ struct amdgpu_reset_context *reset_context)
+{
+ struct amdgpu_device *tmp_adev = NULL;
+ bool need_full_reset, skip_hw_reset;
+ int r = 0;
+
+ /* Try reset handler method first */
+ tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
+ reset_list);
+
+ reset_context->reset_device_list = device_list_handle;
+ r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
+ /* If reset handler not implemented, continue; otherwise return */
+ if (r == -EOPNOTSUPP)
+ r = 0;
+ else
+ return r;
+
+ /* Reset handler not implemented, use the default method */
+ need_full_reset =
+ test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
+ skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
+
+ /*
+ * ASIC reset has to be done on all XGMI hive nodes ASAP
+ * to allow proper links negotiation in FW (within 1 sec)
+ */
+ if (!skip_hw_reset && need_full_reset) {
+ list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
+ /* For XGMI run all resets in parallel to speed up the process */
+ if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
+ if (!queue_work(system_unbound_wq,
+ &tmp_adev->xgmi_reset_work))
+ r = -EALREADY;
+ } else
+ r = amdgpu_asic_reset(tmp_adev);
+
+ if (r) {
+ dev_err(tmp_adev->dev,
+ "ASIC reset failed with error, %d for drm dev, %s",
+ r, adev_to_drm(tmp_adev)->unique);
+ goto out;
+ }
+ }
+
+ /* For XGMI wait for all resets to complete before proceed */
+ if (!r) {
+ list_for_each_entry(tmp_adev, device_list_handle,
+ reset_list) {
+ if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
+ flush_work(&tmp_adev->xgmi_reset_work);
+ r = tmp_adev->asic_reset_res;
+ if (r)
+ break;
+ }
+ }
+ }
+ }
+
+ if (!r && amdgpu_ras_intr_triggered()) {
+ list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
+ amdgpu_ras_reset_error_count(tmp_adev,
+ AMDGPU_RAS_BLOCK__MMHUB);
+ }
+
+ amdgpu_ras_intr_cleared();
+ }
+
+ r = amdgpu_device_reinit_after_reset(reset_context);
+ if (r == -EAGAIN)
set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
else
clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
+
+out:
return r;
}
@@ -5505,6 +6085,7 @@ static void amdgpu_device_set_mp1_state(struct amdgpu_device *adev)
switch (amdgpu_asic_reset_method(adev)) {
case AMD_RESET_METHOD_MODE1:
+ case AMD_RESET_METHOD_LINK:
adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
break;
case AMD_RESET_METHOD_MODE2:
@@ -5608,105 +6189,76 @@ static int amdgpu_device_health_check(struct list_head *device_list_handle)
{
struct amdgpu_device *tmp_adev;
int ret = 0;
- u32 status;
list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
- pci_read_config_dword(tmp_adev->pdev, PCI_COMMAND, &status);
- if (PCI_POSSIBLE_ERROR(status)) {
- dev_err(tmp_adev->dev, "device lost from bus!");
- ret = -ENODEV;
- }
+ ret |= amdgpu_device_bus_status_check(tmp_adev);
}
return ret;
}
-/**
- * amdgpu_device_gpu_recover - reset the asic and recover scheduler
- *
- * @adev: amdgpu_device pointer
- * @job: which job trigger hang
- * @reset_context: amdgpu reset context pointer
- *
- * Attempt to reset the GPU if it has hung (all asics).
- * Attempt to do soft-reset or full-reset and reinitialize Asic
- * Returns 0 for success or an error on failure.
- */
-
-int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
- struct amdgpu_job *job,
- struct amdgpu_reset_context *reset_context)
+static void amdgpu_device_recovery_prepare(struct amdgpu_device *adev,
+ struct list_head *device_list,
+ struct amdgpu_hive_info *hive)
{
- struct list_head device_list, *device_list_handle = NULL;
- bool job_signaled = false;
- struct amdgpu_hive_info *hive = NULL;
struct amdgpu_device *tmp_adev = NULL;
- int i, r = 0;
- bool need_emergency_restart = false;
- bool audio_suspended = false;
- int retry_limit = AMDGPU_MAX_RETRY_LIMIT;
-
- /*
- * Special case: RAS triggered and full reset isn't supported
- */
- need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
-
- /*
- * Flush RAM to disk so that after reboot
- * the user can read log and see why the system rebooted.
- */
- if (need_emergency_restart && amdgpu_ras_get_context(adev) &&
- amdgpu_ras_get_context(adev)->reboot) {
- DRM_WARN("Emergency reboot.");
- ksys_sync_helper();
- emergency_restart();
- }
-
- dev_info(adev->dev, "GPU %s begin!\n",
- need_emergency_restart ? "jobs stop":"reset");
-
- if (!amdgpu_sriov_vf(adev))
- hive = amdgpu_get_xgmi_hive(adev);
- if (hive)
- mutex_lock(&hive->hive_lock);
-
- reset_context->job = job;
- reset_context->hive = hive;
/*
* Build list of devices to reset.
* In case we are in XGMI hive mode, resort the device list
* to put adev in the 1st position.
*/
- INIT_LIST_HEAD(&device_list);
if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1) && hive) {
list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
- list_add_tail(&tmp_adev->reset_list, &device_list);
+ list_add_tail(&tmp_adev->reset_list, device_list);
if (adev->shutdown)
tmp_adev->shutdown = true;
+ if (amdgpu_reset_in_dpc(adev))
+ tmp_adev->pcie_reset_ctx.in_link_reset = true;
}
- if (!list_is_first(&adev->reset_list, &device_list))
- list_rotate_to_front(&adev->reset_list, &device_list);
- device_list_handle = &device_list;
+ if (!list_is_first(&adev->reset_list, device_list))
+ list_rotate_to_front(&adev->reset_list, device_list);
} else {
- list_add_tail(&adev->reset_list, &device_list);
- device_list_handle = &device_list;
+ list_add_tail(&adev->reset_list, device_list);
}
+}
- if (!amdgpu_sriov_vf(adev)) {
- r = amdgpu_device_health_check(device_list_handle);
- if (r)
- goto end_reset;
- }
+static void amdgpu_device_recovery_get_reset_lock(struct amdgpu_device *adev,
+ struct list_head *device_list)
+{
+ struct amdgpu_device *tmp_adev = NULL;
- /* We need to lock reset domain only once both for XGMI and single device */
- tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
- reset_list);
+ if (list_empty(device_list))
+ return;
+ tmp_adev =
+ list_first_entry(device_list, struct amdgpu_device, reset_list);
amdgpu_device_lock_reset_domain(tmp_adev->reset_domain);
+}
- /* block all schedulers and reset given job's ring */
- list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
+static void amdgpu_device_recovery_put_reset_lock(struct amdgpu_device *adev,
+ struct list_head *device_list)
+{
+ struct amdgpu_device *tmp_adev = NULL;
+
+ if (list_empty(device_list))
+ return;
+ tmp_adev =
+ list_first_entry(device_list, struct amdgpu_device, reset_list);
+ amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
+}
+
+static void amdgpu_device_halt_activities(struct amdgpu_device *adev,
+ struct amdgpu_job *job,
+ struct amdgpu_reset_context *reset_context,
+ struct list_head *device_list,
+ struct amdgpu_hive_info *hive,
+ bool need_emergency_restart)
+{
+ struct amdgpu_device *tmp_adev = NULL;
+ int i;
+ /* block all schedulers and reset given job's ring */
+ list_for_each_entry(tmp_adev, device_list, reset_list) {
amdgpu_device_set_mp1_state(tmp_adev);
/*
@@ -5720,7 +6272,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
* some audio codec errors.
*/
if (!amdgpu_device_suspend_display_audio(tmp_adev))
- audio_suspended = true;
+ tmp_adev->pcie_reset_ctx.audio_suspended = true;
amdgpu_ras_set_error_query_ready(tmp_adev, false);
@@ -5729,16 +6281,16 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
amdgpu_amdkfd_pre_reset(tmp_adev, reset_context);
/*
- * Mark these ASICs to be reseted as untracked first
+ * Mark these ASICs to be reset as untracked first
* And add them back after reset completed
*/
amdgpu_unregister_gpu_instance(tmp_adev);
- drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, true);
+ drm_client_dev_suspend(adev_to_drm(tmp_adev), false);
/* disable ras on ALL IPs */
- if (!need_emergency_restart &&
- amdgpu_device_ip_need_full_reset(tmp_adev))
+ if (!need_emergency_restart && !amdgpu_reset_in_dpc(adev) &&
+ amdgpu_device_ip_need_full_reset(tmp_adev))
amdgpu_ras_suspend(tmp_adev);
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
@@ -5754,24 +6306,18 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
}
atomic_inc(&tmp_adev->gpu_reset_counter);
}
+}
- if (need_emergency_restart)
- goto skip_sched_resume;
-
- /*
- * Must check guilty signal here since after this point all old
- * HW fences are force signaled.
- *
- * job->base holds a reference to parent fence
- */
- if (job && dma_fence_is_signaled(&job->hw_fence)) {
- job_signaled = true;
- dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
- goto skip_hw_reset;
- }
+static int amdgpu_device_asic_reset(struct amdgpu_device *adev,
+ struct list_head *device_list,
+ struct amdgpu_reset_context *reset_context)
+{
+ struct amdgpu_device *tmp_adev = NULL;
+ int retry_limit = AMDGPU_MAX_RETRY_LIMIT;
+ int r = 0;
retry: /* Rest of adevs pre asic reset from XGMI hive. */
- list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
+ list_for_each_entry(tmp_adev, device_list, reset_list) {
r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context);
/*TODO Should we stop ?*/
if (r) {
@@ -5784,6 +6330,11 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
/* Actual ASIC resets if needed.*/
/* Host driver will handle XGMI hive reset for SRIOV */
if (amdgpu_sriov_vf(adev)) {
+
+ /* Bail out of reset early */
+ if (amdgpu_ras_is_rma(adev))
+ return -ENODEV;
+
if (amdgpu_ras_get_fed_status(adev) || amdgpu_virt_rcvd_ras_interrupt(adev)) {
dev_dbg(adev->dev, "Detected RAS error, wait for FLR completion\n");
amdgpu_ras_set_fed(adev, true);
@@ -5798,12 +6349,12 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
if (r)
adev->asic_reset_res = r;
} else {
- r = amdgpu_do_asic_reset(device_list_handle, reset_context);
+ r = amdgpu_do_asic_reset(device_list, reset_context);
if (r && r == -EAGAIN)
goto retry;
}
- list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
+ list_for_each_entry(tmp_adev, device_list, reset_list) {
/*
* Drop any pending non scheduler resets queued before reset is done.
* Any reset scheduled after this point would be valid. Scheduler resets
@@ -5813,10 +6364,18 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
amdgpu_device_stop_pending_resets(tmp_adev);
}
-skip_hw_reset:
+ return r;
+}
+
+static int amdgpu_device_sched_resume(struct list_head *device_list,
+ struct amdgpu_reset_context *reset_context,
+ bool job_signaled)
+{
+ struct amdgpu_device *tmp_adev = NULL;
+ int i, r = 0;
/* Post ASIC reset for all devs .*/
- list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
+ list_for_each_entry(tmp_adev, device_list, reset_list) {
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = tmp_adev->rings[i];
@@ -5824,36 +6383,51 @@ skip_hw_reset:
if (!amdgpu_ring_sched_ready(ring))
continue;
- drm_sched_start(&ring->sched);
+ drm_sched_start(&ring->sched, 0);
}
if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled)
drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
- if (tmp_adev->asic_reset_res)
- r = tmp_adev->asic_reset_res;
-
- tmp_adev->asic_reset_res = 0;
-
- if (r) {
+ if (tmp_adev->asic_reset_res) {
/* bad news, how to tell it to userspace ?
* for ras error, we should report GPU bad status instead of
* reset failure
*/
if (reset_context->src != AMDGPU_RESET_SRC_RAS ||
!amdgpu_ras_eeprom_check_err_threshold(tmp_adev))
- dev_info(tmp_adev->dev, "GPU reset(%d) failed\n",
- atomic_read(&tmp_adev->gpu_reset_counter));
- amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
+ dev_info(
+ tmp_adev->dev,
+ "GPU reset(%d) failed with error %d \n",
+ atomic_read(
+ &tmp_adev->gpu_reset_counter),
+ tmp_adev->asic_reset_res);
+ amdgpu_vf_error_put(tmp_adev,
+ AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0,
+ tmp_adev->asic_reset_res);
+ if (!r)
+ r = tmp_adev->asic_reset_res;
+ tmp_adev->asic_reset_res = 0;
} else {
- dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
- if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0))
- DRM_WARN("smart shift update failed\n");
+ dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n",
+ atomic_read(&tmp_adev->gpu_reset_counter));
+ if (amdgpu_acpi_smart_shift_update(tmp_adev,
+ AMDGPU_SS_DEV_D0))
+ dev_warn(tmp_adev->dev,
+ "smart shift update failed\n");
}
}
-skip_sched_resume:
- list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
+ return r;
+}
+
+static void amdgpu_device_gpu_resume(struct amdgpu_device *adev,
+ struct list_head *device_list,
+ bool need_emergency_restart)
+{
+ struct amdgpu_device *tmp_adev = NULL;
+
+ list_for_each_entry(tmp_adev, device_list, reset_list) {
/* unlock kfd: SRIOV would do it separately */
if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
amdgpu_amdkfd_post_reset(tmp_adev);
@@ -5864,18 +6438,120 @@ skip_sched_resume:
if (!adev->kfd.init_complete)
amdgpu_amdkfd_device_init(adev);
- if (audio_suspended)
+ if (tmp_adev->pcie_reset_ctx.audio_suspended)
amdgpu_device_resume_display_audio(tmp_adev);
amdgpu_device_unset_mp1_state(tmp_adev);
amdgpu_ras_set_error_query_ready(tmp_adev, true);
+
}
+}
- tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
- reset_list);
- amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
+/**
+ * amdgpu_device_gpu_recover - reset the asic and recover scheduler
+ *
+ * @adev: amdgpu_device pointer
+ * @job: which job trigger hang
+ * @reset_context: amdgpu reset context pointer
+ *
+ * Attempt to reset the GPU if it has hung (all asics).
+ * Attempt to do soft-reset or full-reset and reinitialize Asic
+ * Returns 0 for success or an error on failure.
+ */
+
+int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+ struct amdgpu_job *job,
+ struct amdgpu_reset_context *reset_context)
+{
+ struct list_head device_list;
+ bool job_signaled = false;
+ struct amdgpu_hive_info *hive = NULL;
+ int r = 0;
+ bool need_emergency_restart = false;
+
+ /*
+ * If it reaches here because of hang/timeout and a RAS error is
+ * detected at the same time, let RAS recovery take care of it.
+ */
+ if (amdgpu_ras_is_err_state(adev, AMDGPU_RAS_BLOCK__ANY) &&
+ !amdgpu_sriov_vf(adev) &&
+ reset_context->src != AMDGPU_RESET_SRC_RAS) {
+ dev_dbg(adev->dev,
+ "Gpu recovery from source: %d yielding to RAS error recovery handling",
+ reset_context->src);
+ return 0;
+ }
+
+ /*
+ * Special case: RAS triggered and full reset isn't supported
+ */
+ need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
+
+ /*
+ * Flush RAM to disk so that after reboot
+ * the user can read log and see why the system rebooted.
+ */
+ if (need_emergency_restart && amdgpu_ras_get_context(adev) &&
+ amdgpu_ras_get_context(adev)->reboot) {
+ dev_warn(adev->dev, "Emergency reboot.");
+
+ ksys_sync_helper();
+ emergency_restart();
+ }
+
+ dev_info(adev->dev, "GPU %s begin!. Source: %d\n",
+ need_emergency_restart ? "jobs stop" : "reset",
+ reset_context->src);
+
+ if (!amdgpu_sriov_vf(adev))
+ hive = amdgpu_get_xgmi_hive(adev);
+ if (hive)
+ mutex_lock(&hive->hive_lock);
+
+ reset_context->job = job;
+ reset_context->hive = hive;
+ INIT_LIST_HEAD(&device_list);
+
+ amdgpu_device_recovery_prepare(adev, &device_list, hive);
+
+ if (!amdgpu_sriov_vf(adev)) {
+ r = amdgpu_device_health_check(&device_list);
+ if (r)
+ goto end_reset;
+ }
+
+ /* We need to lock reset domain only once both for XGMI and single device */
+ amdgpu_device_recovery_get_reset_lock(adev, &device_list);
+
+ amdgpu_device_halt_activities(adev, job, reset_context, &device_list,
+ hive, need_emergency_restart);
+ if (need_emergency_restart)
+ goto skip_sched_resume;
+ /*
+ * Must check guilty signal here since after this point all old
+ * HW fences are force signaled.
+ *
+ * job->base holds a reference to parent fence
+ */
+ if (job && dma_fence_is_signaled(&job->hw_fence.base)) {
+ job_signaled = true;
+ dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
+ goto skip_hw_reset;
+ }
+
+ r = amdgpu_device_asic_reset(adev, &device_list, reset_context);
+ if (r)
+ goto reset_unlock;
+skip_hw_reset:
+ r = amdgpu_device_sched_resume(&device_list, reset_context, job_signaled);
+ if (r)
+ goto reset_unlock;
+skip_sched_resume:
+ amdgpu_device_gpu_resume(adev, &device_list, need_emergency_restart);
+reset_unlock:
+ amdgpu_device_recovery_put_reset_lock(adev, &device_list);
end_reset:
if (hive) {
mutex_unlock(&hive->hive_lock);
@@ -5886,6 +6562,19 @@ end_reset:
dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
atomic_set(&adev->reset_domain->reset_res, r);
+
+ if (!r) {
+ struct amdgpu_task_info *ti = NULL;
+
+ if (job)
+ ti = amdgpu_vm_get_task_info_pasid(adev, job->pasid);
+
+ drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE,
+ ti ? &ti->task : NULL);
+
+ amdgpu_vm_put_task_info(ti);
+ }
+
return r;
}
@@ -5928,19 +6617,56 @@ static void amdgpu_device_partner_bandwidth(struct amdgpu_device *adev,
}
/**
+ * amdgpu_device_gpu_bandwidth - find the bandwidth of the GPU
+ *
+ * @adev: amdgpu_device pointer
+ * @speed: pointer to the speed of the link
+ * @width: pointer to the width of the link
+ *
+ * Evaluate the hierarchy to find the speed and bandwidth capabilities of the
+ * AMD dGPU which may be a virtual upstream bridge.
+ */
+static void amdgpu_device_gpu_bandwidth(struct amdgpu_device *adev,
+ enum pci_bus_speed *speed,
+ enum pcie_link_width *width)
+{
+ struct pci_dev *parent = adev->pdev;
+
+ if (!speed || !width)
+ return;
+
+ parent = pci_upstream_bridge(parent);
+ if (parent && parent->vendor == PCI_VENDOR_ID_ATI) {
+ /* use the upstream/downstream switches internal to dGPU */
+ *speed = pcie_get_speed_cap(parent);
+ *width = pcie_get_width_cap(parent);
+ while ((parent = pci_upstream_bridge(parent))) {
+ if (parent->vendor == PCI_VENDOR_ID_ATI) {
+ /* use the upstream/downstream switches internal to dGPU */
+ *speed = pcie_get_speed_cap(parent);
+ *width = pcie_get_width_cap(parent);
+ }
+ }
+ } else {
+ /* use the device itself */
+ *speed = pcie_get_speed_cap(adev->pdev);
+ *width = pcie_get_width_cap(adev->pdev);
+ }
+}
+
+/**
* amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
*
* @adev: amdgpu_device pointer
*
- * Fetchs and stores in the driver the PCIE capabilities (gen speed
+ * Fetches and stores in the driver the PCIE capabilities (gen speed
* and lanes) of the slot the device is in. Handles APUs and
* virtualized environments where PCIE config space may not be available.
*/
static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
{
- struct pci_dev *pdev;
enum pci_bus_speed speed_cap, platform_speed_cap;
- enum pcie_link_width platform_link_width;
+ enum pcie_link_width platform_link_width, link_width;
if (amdgpu_pcie_gen_cap)
adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
@@ -5962,11 +6688,10 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
amdgpu_device_partner_bandwidth(adev, &platform_speed_cap,
&platform_link_width);
+ amdgpu_device_gpu_bandwidth(adev, &speed_cap, &link_width);
if (adev->pm.pcie_gen_mask == 0) {
/* asic caps */
- pdev = adev->pdev;
- speed_cap = pcie_get_speed_cap(pdev);
if (speed_cap == PCI_SPEED_UNKNOWN) {
adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
@@ -6022,51 +6747,103 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
}
}
if (adev->pm.pcie_mlw_mask == 0) {
+ /* asic caps */
+ if (link_width == PCIE_LNK_WIDTH_UNKNOWN) {
+ adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_ASIC_PCIE_MLW_MASK;
+ } else {
+ switch (link_width) {
+ case PCIE_LNK_X32:
+ adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X32 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X16 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X12 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
+ break;
+ case PCIE_LNK_X16:
+ adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X16 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X12 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
+ break;
+ case PCIE_LNK_X12:
+ adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X12 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
+ break;
+ case PCIE_LNK_X8:
+ adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
+ break;
+ case PCIE_LNK_X4:
+ adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
+ break;
+ case PCIE_LNK_X2:
+ adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
+ break;
+ case PCIE_LNK_X1:
+ adev->pm.pcie_mlw_mask |= CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1;
+ break;
+ default:
+ break;
+ }
+ }
+ /* platform caps */
if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
} else {
switch (platform_link_width) {
case PCIE_LNK_X32:
- adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+ adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
case PCIE_LNK_X16:
- adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+ adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
case PCIE_LNK_X12:
- adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+ adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
case PCIE_LNK_X8:
- adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+ adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
case PCIE_LNK_X4:
- adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+ adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
case PCIE_LNK_X2:
- adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+ adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
case PCIE_LNK_X1:
- adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
+ adev->pm.pcie_mlw_mask |= CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
break;
default:
break;
@@ -6092,6 +6869,9 @@ bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
bool p2p_access =
!adev->gmc.xgmi.connected_to_cpu &&
!(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0);
+ if (!p2p_access)
+ dev_info(adev->dev, "PCIe P2P access from peer device %s is not supported by the chipset\n",
+ pci_name(peer_adev->pdev));
bool is_large_bar = adev->gmc.visible_vram_size &&
adev->gmc.real_vram_size == adev->gmc.visible_vram_size;
@@ -6112,12 +6892,11 @@ bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
#endif
}
-int amdgpu_device_baco_enter(struct drm_device *dev)
+int amdgpu_device_baco_enter(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
- if (!amdgpu_device_supports_baco(dev))
+ if (!amdgpu_device_supports_baco(adev))
return -ENOTSUPP;
if (ras && adev->ras_enabled &&
@@ -6127,13 +6906,12 @@ int amdgpu_device_baco_enter(struct drm_device *dev)
return amdgpu_dpm_baco_enter(adev);
}
-int amdgpu_device_baco_exit(struct drm_device *dev)
+int amdgpu_device_baco_exit(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
int ret = 0;
- if (!amdgpu_device_supports_baco(dev))
+ if (!amdgpu_device_supports_baco(adev))
return -ENOTSUPP;
ret = amdgpu_dpm_baco_exit(adev);
@@ -6164,45 +6942,52 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta
{
struct drm_device *dev = pci_get_drvdata(pdev);
struct amdgpu_device *adev = drm_to_adev(dev);
- int i;
+ struct amdgpu_hive_info *hive __free(xgmi_put_hive) =
+ amdgpu_get_xgmi_hive(adev);
+ struct amdgpu_reset_context reset_context;
+ struct list_head device_list;
- DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
-
- if (adev->gmc.xgmi.num_physical_nodes > 1) {
- DRM_WARN("No support for XGMI hive yet...");
- return PCI_ERS_RESULT_DISCONNECT;
- }
+ dev_info(adev->dev, "PCI error: detected callback!!\n");
adev->pci_channel_state = state;
switch (state) {
case pci_channel_io_normal:
+ dev_info(adev->dev, "pci_channel_io_normal: state(%d)!!\n", state);
return PCI_ERS_RESULT_CAN_RECOVER;
- /* Fatal error, prepare for slot reset */
case pci_channel_io_frozen:
- /*
- * Locking adev->reset_domain->sem will prevent any external access
- * to GPU during PCI error recovery
- */
- amdgpu_device_lock_reset_domain(adev->reset_domain);
- amdgpu_device_set_mp1_state(adev);
-
- /*
- * Block any work scheduling as we do for regular GPU reset
- * for the duration of the recovery
- */
- for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
- struct amdgpu_ring *ring = adev->rings[i];
-
- if (!amdgpu_ring_sched_ready(ring))
- continue;
+ /* Fatal error, prepare for slot reset */
+ dev_info(adev->dev, "pci_channel_io_frozen: state(%d)!!\n", state);
+ if (hive) {
+ /* Hive devices should be able to support FW based
+ * link reset on other devices, if not return.
+ */
+ if (!amdgpu_dpm_is_link_reset_supported(adev)) {
+ dev_warn(adev->dev,
+ "No support for XGMI hive yet...\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+ /* Set dpc status only if device is part of hive
+ * Non-hive devices should be able to recover after
+ * link reset.
+ */
+ amdgpu_reset_set_dpc_status(adev, true);
- drm_sched_stop(&ring->sched, NULL);
+ mutex_lock(&hive->hive_lock);
}
- atomic_inc(&adev->gpu_reset_counter);
+ memset(&reset_context, 0, sizeof(reset_context));
+ INIT_LIST_HEAD(&device_list);
+
+ amdgpu_device_recovery_prepare(adev, &device_list, hive);
+ amdgpu_device_recovery_get_reset_lock(adev, &device_list);
+ amdgpu_device_halt_activities(adev, NULL, &reset_context, &device_list,
+ hive, false);
+ if (hive)
+ mutex_unlock(&hive->hive_lock);
return PCI_ERS_RESULT_NEED_RESET;
case pci_channel_io_perm_failure:
/* Permanent error, prepare for device removal */
+ dev_info(adev->dev, "pci_channel_io_perm_failure: state(%d)!!\n", state);
return PCI_ERS_RESULT_DISCONNECT;
}
@@ -6215,8 +7000,10 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta
*/
pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ struct amdgpu_device *adev = drm_to_adev(dev);
- DRM_INFO("PCI error: mmio enabled callback!!\n");
+ dev_info(adev->dev, "PCI error: mmio enabled callback!!\n");
/* TODO - dump whatever for debugging purposes */
@@ -6240,27 +7027,38 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
struct amdgpu_device *adev = drm_to_adev(dev);
- int r, i;
struct amdgpu_reset_context reset_context;
- u32 memsize;
+ struct amdgpu_device *tmp_adev;
+ struct amdgpu_hive_info *hive;
struct list_head device_list;
+ struct pci_dev *link_dev;
+ int r = 0, i, timeout;
+ u32 memsize;
+ u16 status;
- /* PCI error slot reset should be skipped During RAS recovery */
- if ((amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)) &&
- amdgpu_ras_in_recovery(adev))
- return PCI_ERS_RESULT_RECOVERED;
-
- DRM_INFO("PCI error: slot reset callback!!\n");
+ dev_info(adev->dev, "PCI error: slot reset callback!!\n");
memset(&reset_context, 0, sizeof(reset_context));
- INIT_LIST_HEAD(&device_list);
- list_add_tail(&adev->reset_list, &device_list);
+ if (adev->pcie_reset_ctx.swus)
+ link_dev = adev->pcie_reset_ctx.swus;
+ else
+ link_dev = adev->pdev;
+ /* wait for asic to come out of reset, timeout = 10s */
+ timeout = 10000;
+ do {
+ usleep_range(10000, 10500);
+ r = pci_read_config_word(link_dev, PCI_VENDOR_ID, &status);
+ timeout -= 10;
+ } while (timeout > 0 && (status != PCI_VENDOR_ID_ATI) &&
+ (status != PCI_VENDOR_ID_AMD));
- /* wait for asic to come out of reset */
- msleep(500);
+ if ((status != PCI_VENDOR_ID_ATI) && (status != PCI_VENDOR_ID_AMD)) {
+ r = -ETIME;
+ goto out;
+ }
+ amdgpu_device_load_switch_state(adev);
/* Restore PCI confspace */
amdgpu_device_load_pci_state(pdev);
@@ -6280,26 +7078,40 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
reset_context.method = AMD_RESET_METHOD_NONE;
reset_context.reset_req_dev = adev;
set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
- set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
-
- adev->no_hw_access = true;
- r = amdgpu_device_pre_asic_reset(adev, &reset_context);
- adev->no_hw_access = false;
- if (r)
- goto out;
+ set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);
+ INIT_LIST_HEAD(&device_list);
- r = amdgpu_do_asic_reset(&device_list, &reset_context);
+ hive = amdgpu_get_xgmi_hive(adev);
+ if (hive) {
+ mutex_lock(&hive->hive_lock);
+ reset_context.hive = hive;
+ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
+ tmp_adev->pcie_reset_ctx.in_link_reset = true;
+ list_add_tail(&tmp_adev->reset_list, &device_list);
+ }
+ } else {
+ set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
+ list_add_tail(&adev->reset_list, &device_list);
+ }
+ r = amdgpu_device_asic_reset(adev, &device_list, &reset_context);
out:
if (!r) {
if (amdgpu_device_cache_pci_state(adev->pdev))
pci_restore_state(adev->pdev);
-
- DRM_INFO("PCIe error recovery succeeded\n");
+ dev_info(adev->dev, "PCIe error recovery succeeded\n");
} else {
- DRM_ERROR("PCIe error recovery failed, err:%d", r);
- amdgpu_device_unset_mp1_state(adev);
- amdgpu_device_unlock_reset_domain(adev->reset_domain);
+ dev_err(adev->dev, "PCIe error recovery failed, err:%d\n", r);
+ if (hive) {
+ list_for_each_entry(tmp_adev, &device_list, reset_list)
+ amdgpu_device_unset_mp1_state(tmp_adev);
+ }
+ amdgpu_device_recovery_put_reset_lock(adev, &device_list);
+ }
+
+ if (hive) {
+ mutex_unlock(&hive->hive_lock);
+ amdgpu_put_xgmi_hive(hive);
}
return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
@@ -6316,26 +7128,95 @@ void amdgpu_pci_resume(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
struct amdgpu_device *adev = drm_to_adev(dev);
- int i;
-
+ struct list_head device_list;
+ struct amdgpu_hive_info *hive = NULL;
+ struct amdgpu_device *tmp_adev = NULL;
- DRM_INFO("PCI error: resume callback!!\n");
+ dev_info(adev->dev, "PCI error: resume callback!!\n");
/* Only continue execution for the case of pci_channel_io_frozen */
if (adev->pci_channel_state != pci_channel_io_frozen)
return;
- for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
- struct amdgpu_ring *ring = adev->rings[i];
+ INIT_LIST_HEAD(&device_list);
- if (!amdgpu_ring_sched_ready(ring))
- continue;
+ hive = amdgpu_get_xgmi_hive(adev);
+ if (hive) {
+ mutex_lock(&hive->hive_lock);
+ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
+ tmp_adev->pcie_reset_ctx.in_link_reset = false;
+ list_add_tail(&tmp_adev->reset_list, &device_list);
+ }
+ } else
+ list_add_tail(&adev->reset_list, &device_list);
+
+ amdgpu_device_sched_resume(&device_list, NULL, NULL);
+ amdgpu_device_gpu_resume(adev, &device_list, false);
+ amdgpu_device_recovery_put_reset_lock(adev, &device_list);
+
+ if (hive) {
+ mutex_unlock(&hive->hive_lock);
+ amdgpu_put_xgmi_hive(hive);
+ }
+}
+
+static void amdgpu_device_cache_switch_state(struct amdgpu_device *adev)
+{
+ struct pci_dev *swus, *swds;
+ int r;
+
+ swds = pci_upstream_bridge(adev->pdev);
+ if (!swds || swds->vendor != PCI_VENDOR_ID_ATI ||
+ pci_pcie_type(swds) != PCI_EXP_TYPE_DOWNSTREAM)
+ return;
+ swus = pci_upstream_bridge(swds);
+ if (!swus ||
+ (swus->vendor != PCI_VENDOR_ID_ATI &&
+ swus->vendor != PCI_VENDOR_ID_AMD) ||
+ pci_pcie_type(swus) != PCI_EXP_TYPE_UPSTREAM)
+ return;
+
+ /* If already saved, return */
+ if (adev->pcie_reset_ctx.swus)
+ return;
+ /* Upstream bridge is ATI, assume it's SWUS/DS architecture */
+ r = pci_save_state(swds);
+ if (r)
+ return;
+ adev->pcie_reset_ctx.swds_pcistate = pci_store_saved_state(swds);
+
+ r = pci_save_state(swus);
+ if (r)
+ return;
+ adev->pcie_reset_ctx.swus_pcistate = pci_store_saved_state(swus);
+
+ adev->pcie_reset_ctx.swus = swus;
+}
+
+static void amdgpu_device_load_switch_state(struct amdgpu_device *adev)
+{
+ struct pci_dev *pdev;
+ int r;
+
+ if (!adev->pcie_reset_ctx.swds_pcistate ||
+ !adev->pcie_reset_ctx.swus_pcistate)
+ return;
- drm_sched_start(&ring->sched);
+ pdev = adev->pcie_reset_ctx.swus;
+ r = pci_load_saved_state(pdev, adev->pcie_reset_ctx.swus_pcistate);
+ if (!r) {
+ pci_restore_state(pdev);
+ } else {
+ dev_warn(adev->dev, "Failed to load SWUS state, err:%d\n", r);
+ return;
}
- amdgpu_device_unset_mp1_state(adev);
- amdgpu_device_unlock_reset_domain(adev->reset_domain);
+ pdev = pci_upstream_bridge(adev->pdev);
+ r = pci_load_saved_state(pdev, adev->pcie_reset_ctx.swds_pcistate);
+ if (!r)
+ pci_restore_state(pdev);
+ else
+ dev_warn(adev->dev, "Failed to load SWDS state, err:%d\n", r);
}
bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
@@ -6344,6 +7225,9 @@ bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
struct amdgpu_device *adev = drm_to_adev(dev);
int r;
+ if (amdgpu_sriov_vf(adev))
+ return false;
+
r = pci_save_state(pdev);
if (!r) {
kfree(adev->pci_state);
@@ -6351,14 +7235,16 @@ bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
adev->pci_state = pci_store_saved_state(pdev);
if (!adev->pci_state) {
- DRM_ERROR("Failed to store PCI saved state");
+ dev_err(adev->dev, "Failed to store PCI saved state");
return false;
}
} else {
- DRM_WARN("Failed to save PCI state, err:%d\n", r);
+ dev_warn(adev->dev, "Failed to save PCI state, err:%d\n", r);
return false;
}
+ amdgpu_device_cache_switch_state(adev);
+
return true;
}
@@ -6376,7 +7262,7 @@ bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
if (!r) {
pci_restore_state(pdev);
} else {
- DRM_WARN("Failed to load PCI state, err:%d\n", r);
+ dev_warn(adev->dev, "Failed to load PCI state, err:%d\n", r);
return false;
}
@@ -6520,22 +7406,117 @@ struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
{
struct dma_fence *old = NULL;
+ dma_fence_get(gang);
do {
dma_fence_put(old);
old = amdgpu_device_get_gang(adev);
if (old == gang)
break;
- if (!dma_fence_is_signaled(old))
+ if (!dma_fence_is_signaled(old)) {
+ dma_fence_put(gang);
return old;
+ }
} while (cmpxchg((struct dma_fence __force **)&adev->gang_submit,
old, gang) != old);
+ /*
+ * Drop it once for the exchanged reference in adev and once for the
+ * thread local reference acquired in amdgpu_device_get_gang().
+ */
+ dma_fence_put(old);
dma_fence_put(old);
return NULL;
}
+/**
+ * amdgpu_device_enforce_isolation - enforce HW isolation
+ * @adev: the amdgpu device pointer
+ * @ring: the HW ring the job is supposed to run on
+ * @job: the job which is about to be pushed to the HW ring
+ *
+ * Makes sure that only one client at a time can use the GFX block.
+ * Returns: The dependency to wait on before the job can be pushed to the HW.
+ * The function is called multiple times until NULL is returned.
+ */
+struct dma_fence *amdgpu_device_enforce_isolation(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+ struct amdgpu_job *job)
+{
+ struct amdgpu_isolation *isolation = &adev->isolation[ring->xcp_id];
+ struct drm_sched_fence *f = job->base.s_fence;
+ struct dma_fence *dep;
+ void *owner;
+ int r;
+
+ /*
+ * For now enforce isolation only for the GFX block since we only need
+ * the cleaner shader on those rings.
+ */
+ if (ring->funcs->type != AMDGPU_RING_TYPE_GFX &&
+ ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
+ return NULL;
+
+ /*
+ * All submissions where enforce isolation is false are handled as if
+ * they come from a single client. Use ~0l as the owner to distinct it
+ * from kernel submissions where the owner is NULL.
+ */
+ owner = job->enforce_isolation ? f->owner : (void *)~0l;
+
+ mutex_lock(&adev->enforce_isolation_mutex);
+
+ /*
+ * The "spearhead" submission is the first one which changes the
+ * ownership to its client. We always need to wait for it to be
+ * pushed to the HW before proceeding with anything.
+ */
+ if (&f->scheduled != isolation->spearhead &&
+ !dma_fence_is_signaled(isolation->spearhead)) {
+ dep = isolation->spearhead;
+ goto out_grab_ref;
+ }
+
+ if (isolation->owner != owner) {
+
+ /*
+ * Wait for any gang to be assembled before switching to a
+ * different owner or otherwise we could deadlock the
+ * submissions.
+ */
+ if (!job->gang_submit) {
+ dep = amdgpu_device_get_gang(adev);
+ if (!dma_fence_is_signaled(dep))
+ goto out_return_dep;
+ dma_fence_put(dep);
+ }
+
+ dma_fence_put(isolation->spearhead);
+ isolation->spearhead = dma_fence_get(&f->scheduled);
+ amdgpu_sync_move(&isolation->active, &isolation->prev);
+ trace_amdgpu_isolation(isolation->owner, owner);
+ isolation->owner = owner;
+ }
+
+ /*
+ * Specifying the ring here helps to pipeline submissions even when
+ * isolation is enabled. If that is not desired for testing NULL can be
+ * used instead of the ring to enforce a CPU round trip while switching
+ * between clients.
+ */
+ dep = amdgpu_sync_peek_fence(&isolation->prev, ring);
+ r = amdgpu_sync_fence(&isolation->active, &f->finished, GFP_NOWAIT);
+ if (r)
+ dev_warn(adev->dev, "OOM tracking isolation\n");
+
+out_grab_ref:
+ dma_fence_get(dep);
+out_return_dep:
+ mutex_unlock(&adev->enforce_isolation_mutex);
+ return dep;
+}
+
bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
@@ -6595,12 +7576,108 @@ uint32_t amdgpu_device_wait_on_rreg(struct amdgpu_device *adev,
tmp_ = RREG32(reg_addr);
loop--;
if (!loop) {
- DRM_WARN("Register(%d) [%s] failed to reach value 0x%08x != 0x%08xn",
- inst, reg_name, (uint32_t)expected_value,
- (uint32_t)(tmp_ & (mask)));
+ dev_warn(
+ adev->dev,
+ "Register(%d) [%s] failed to reach value 0x%08x != 0x%08xn",
+ inst, reg_name, (uint32_t)expected_value,
+ (uint32_t)(tmp_ & (mask)));
ret = -ETIMEDOUT;
break;
}
}
return ret;
}
+
+ssize_t amdgpu_get_soft_full_reset_mask(struct amdgpu_ring *ring)
+{
+ ssize_t size = 0;
+
+ if (!ring || !ring->adev)
+ return size;
+
+ if (amdgpu_device_should_recover_gpu(ring->adev))
+ size |= AMDGPU_RESET_TYPE_FULL;
+
+ if (unlikely(!ring->adev->debug_disable_soft_recovery) &&
+ !amdgpu_sriov_vf(ring->adev) && ring->funcs->soft_recovery)
+ size |= AMDGPU_RESET_TYPE_SOFT_RESET;
+
+ return size;
+}
+
+ssize_t amdgpu_show_reset_mask(char *buf, uint32_t supported_reset)
+{
+ ssize_t size = 0;
+
+ if (supported_reset == 0) {
+ size += sysfs_emit_at(buf, size, "unsupported");
+ size += sysfs_emit_at(buf, size, "\n");
+ return size;
+
+ }
+
+ if (supported_reset & AMDGPU_RESET_TYPE_SOFT_RESET)
+ size += sysfs_emit_at(buf, size, "soft ");
+
+ if (supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE)
+ size += sysfs_emit_at(buf, size, "queue ");
+
+ if (supported_reset & AMDGPU_RESET_TYPE_PER_PIPE)
+ size += sysfs_emit_at(buf, size, "pipe ");
+
+ if (supported_reset & AMDGPU_RESET_TYPE_FULL)
+ size += sysfs_emit_at(buf, size, "full ");
+
+ size += sysfs_emit_at(buf, size, "\n");
+ return size;
+}
+
+void amdgpu_device_set_uid(struct amdgpu_uid *uid_info,
+ enum amdgpu_uid_type type, uint8_t inst,
+ uint64_t uid)
+{
+ if (!uid_info)
+ return;
+
+ if (type >= AMDGPU_UID_TYPE_MAX) {
+ dev_err_once(uid_info->adev->dev, "Invalid UID type %d\n",
+ type);
+ return;
+ }
+
+ if (inst >= AMDGPU_UID_INST_MAX) {
+ dev_err_once(uid_info->adev->dev, "Invalid UID instance %d\n",
+ inst);
+ return;
+ }
+
+ if (uid_info->uid[type][inst] != 0) {
+ dev_warn_once(
+ uid_info->adev->dev,
+ "Overwriting existing UID %llu for type %d instance %d\n",
+ uid_info->uid[type][inst], type, inst);
+ }
+
+ uid_info->uid[type][inst] = uid;
+}
+
+u64 amdgpu_device_get_uid(struct amdgpu_uid *uid_info,
+ enum amdgpu_uid_type type, uint8_t inst)
+{
+ if (!uid_info)
+ return 0;
+
+ if (type >= AMDGPU_UID_TYPE_MAX) {
+ dev_err_once(uid_info->adev->dev, "Invalid UID type %d\n",
+ type);
+ return 0;
+ }
+
+ if (inst >= AMDGPU_UID_INST_MAX) {
+ dev_err_once(uid_info->adev->dev, "Invalid UID instance %d\n",
+ inst);
+ return 0;
+ }
+
+ return uid_info->uid[type][inst];
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index 4bd61c169ca8..73401f0aeb34 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2018 Advanced Micro Devices, Inc.
+ * Copyright 2018-2024 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -104,15 +104,24 @@
#include "smuio_v13_0_6.h"
#include "smuio_v14_0_2.h"
#include "vcn_v5_0_0.h"
+#include "vcn_v5_0_1.h"
#include "jpeg_v5_0_0.h"
+#include "jpeg_v5_0_1.h"
#include "amdgpu_vpe.h"
#if defined(CONFIG_DRM_AMD_ISP)
#include "amdgpu_isp.h"
#endif
-#define FIRMWARE_IP_DISCOVERY "amdgpu/ip_discovery.bin"
-MODULE_FIRMWARE(FIRMWARE_IP_DISCOVERY);
+MODULE_FIRMWARE("amdgpu/ip_discovery.bin");
+MODULE_FIRMWARE("amdgpu/vega10_ip_discovery.bin");
+MODULE_FIRMWARE("amdgpu/vega12_ip_discovery.bin");
+MODULE_FIRMWARE("amdgpu/vega20_ip_discovery.bin");
+MODULE_FIRMWARE("amdgpu/raven_ip_discovery.bin");
+MODULE_FIRMWARE("amdgpu/raven2_ip_discovery.bin");
+MODULE_FIRMWARE("amdgpu/picasso_ip_discovery.bin");
+MODULE_FIRMWARE("amdgpu/arcturus_ip_discovery.bin");
+MODULE_FIRMWARE("amdgpu/aldebaran_ip_discovery.bin");
#define mmIP_DISCOVERY_VERSION 0x16A00
#define mmRCC_CONFIG_MEMSIZE 0xde3
@@ -261,12 +270,13 @@ static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev,
static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
uint8_t *binary)
{
+ bool sz_valid = true;
uint64_t vram_size;
- u32 msg;
int i, ret = 0;
+ u32 msg;
if (!amdgpu_sriov_vf(adev)) {
- /* It can take up to a second for IFWI init to complete on some dGPUs,
+ /* It can take up to two second for IFWI init to complete on some dGPUs,
* but generally it should be in the 60-100ms range. Normally this starts
* as soon as the device gets power so by the time the OS loads this has long
* completed. However, when a card is hotplugged via e.g., USB4, we need to
@@ -274,7 +284,7 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
* continue.
*/
- for (i = 0; i < 1000; i++) {
+ for (i = 0; i < 2000; i++) {
msg = RREG32(mmMP0_SMN_C2PMSG_33);
if (msg & 0x80000000)
break;
@@ -282,9 +292,13 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
}
}
- vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
+ vram_size = RREG32(mmRCC_CONFIG_MEMSIZE);
+ if (!vram_size || vram_size == U32_MAX)
+ sz_valid = false;
+ else
+ vram_size <<= 20;
- if (vram_size) {
+ if (sz_valid) {
uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
amdgpu_device_vram_access(adev, pos, (uint32_t *)binary,
adev->mman.discovery_tmr_size, false);
@@ -292,28 +306,27 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
ret = amdgpu_discovery_read_binary_from_sysmem(adev, binary);
}
+ if (ret)
+ dev_err(adev->dev,
+ "failed to read discovery info from memory, vram size read: %llx",
+ vram_size);
+
return ret;
}
-static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev, uint8_t *binary)
+static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev,
+ uint8_t *binary,
+ const char *fw_name)
{
const struct firmware *fw;
- const char *fw_name;
int r;
- switch (amdgpu_discovery) {
- case 2:
- fw_name = FIRMWARE_IP_DISCOVERY;
- break;
- default:
- dev_warn(adev->dev, "amdgpu_discovery is not set properly\n");
- return -EINVAL;
- }
-
- r = request_firmware(&fw, fw_name, adev->dev);
+ r = firmware_request_nowarn(&fw, fw_name, adev->dev);
if (r) {
- dev_err(adev->dev, "can't load firmware \"%s\"\n",
- fw_name);
+ if (amdgpu_discovery == 2)
+ dev_err(adev->dev, "can't load firmware \"%s\"\n", fw_name);
+ else
+ drm_info(&adev->ddev, "Optional firmware \"%s\" was not found\n", fw_name);
return r;
}
@@ -402,10 +415,39 @@ static int amdgpu_discovery_verify_npsinfo(struct amdgpu_device *adev,
return 0;
}
+static const char *amdgpu_discovery_get_fw_name(struct amdgpu_device *adev)
+{
+ if (amdgpu_discovery == 2)
+ return "amdgpu/ip_discovery.bin";
+
+ switch (adev->asic_type) {
+ case CHIP_VEGA10:
+ return "amdgpu/vega10_ip_discovery.bin";
+ case CHIP_VEGA12:
+ return "amdgpu/vega12_ip_discovery.bin";
+ case CHIP_RAVEN:
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
+ return "amdgpu/raven2_ip_discovery.bin";
+ else if (adev->apu_flags & AMD_APU_IS_PICASSO)
+ return "amdgpu/picasso_ip_discovery.bin";
+ else
+ return "amdgpu/raven_ip_discovery.bin";
+ case CHIP_VEGA20:
+ return "amdgpu/vega20_ip_discovery.bin";
+ case CHIP_ARCTURUS:
+ return "amdgpu/arcturus_ip_discovery.bin";
+ case CHIP_ALDEBARAN:
+ return "amdgpu/aldebaran_ip_discovery.bin";
+ default:
+ return NULL;
+ }
+}
+
static int amdgpu_discovery_init(struct amdgpu_device *adev)
{
struct table_info *info;
struct binary_header *bhdr;
+ const char *fw_name;
uint16_t offset;
uint16_t size;
uint16_t checksum;
@@ -417,17 +459,14 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
return -ENOMEM;
/* Read from file if it is the preferred option */
- if (amdgpu_discovery == 2) {
- dev_info(adev->dev, "use ip discovery information from file");
- r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin);
-
- if (r) {
- dev_err(adev->dev, "failed to read ip discovery binary from file\n");
- r = -EINVAL;
+ fw_name = amdgpu_discovery_get_fw_name(adev);
+ if (fw_name != NULL) {
+ drm_dbg(&adev->ddev, "use ip discovery information from file");
+ r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin, fw_name);
+ if (r)
goto out;
- }
-
} else {
+ drm_dbg(&adev->ddev, "use ip discovery information from memory");
r = amdgpu_discovery_read_binary_from_mem(
adev, adev->mman.discovery_bin);
if (r)
@@ -585,16 +624,19 @@ void amdgpu_discovery_fini(struct amdgpu_device *adev)
adev->mman.discovery_bin = NULL;
}
-static int amdgpu_discovery_validate_ip(const struct ip_v4 *ip)
+static int amdgpu_discovery_validate_ip(struct amdgpu_device *adev,
+ uint8_t instance, uint16_t hw_id)
{
- if (ip->instance_number >= HWIP_MAX_INSTANCE) {
- DRM_ERROR("Unexpected instance_number (%d) from ip discovery blob\n",
- ip->instance_number);
+ if (instance >= HWIP_MAX_INSTANCE) {
+ dev_err(adev->dev,
+ "Unexpected instance_number (%d) from ip discovery blob\n",
+ instance);
return -EINVAL;
}
- if (le16_to_cpu(ip->hw_id) >= HW_ID_MAX) {
- DRM_ERROR("Unexpected hw_id (%d) from ip discovery blob\n",
- le16_to_cpu(ip->hw_id));
+ if (hw_id >= HW_ID_MAX) {
+ dev_err(adev->dev,
+ "Unexpected hw_id (%d) from ip discovery blob\n",
+ hw_id);
return -EINVAL;
}
@@ -607,8 +649,10 @@ static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev,
struct binary_header *bhdr;
struct ip_discovery_header *ihdr;
struct die_header *dhdr;
- struct ip_v4 *ip;
+ struct ip *ip;
uint16_t die_offset, ip_offset, num_dies, num_ips;
+ uint16_t hw_id;
+ uint8_t inst;
int i, j;
bhdr = (struct binary_header *)adev->mman.discovery_bin;
@@ -624,16 +668,18 @@ static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev,
ip_offset = die_offset + sizeof(*dhdr);
for (j = 0; j < num_ips; j++) {
- ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
-
- if (amdgpu_discovery_validate_ip(ip))
+ ip = (struct ip *)(adev->mman.discovery_bin +
+ ip_offset);
+ inst = ip->number_instance;
+ hw_id = le16_to_cpu(ip->hw_id);
+ if (amdgpu_discovery_validate_ip(adev, inst, hw_id))
goto next_ip;
- if (le16_to_cpu(ip->variant) == 1) {
- switch (le16_to_cpu(ip->hw_id)) {
+ if (ip->harvest == 1) {
+ switch (hw_id) {
case VCN_HWID:
(*vcn_harvest_count)++;
- if (ip->instance_number == 0) {
+ if (inst == 0) {
adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0;
adev->vcn.inst_mask &=
~AMDGPU_VCN_HARVEST_VCN0;
@@ -655,10 +701,8 @@ static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev,
}
}
next_ip:
- if (ihdr->base_addr_64_bit)
- ip_offset += struct_size(ip, base_address_64, ip->num_base_address);
- else
- ip_offset += struct_size(ip, base_address, ip->num_base_address);
+ ip_offset += struct_size(ip, base_address,
+ ip->num_base_address);
}
}
}
@@ -1017,6 +1061,8 @@ static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev,
bool reg_base_64)
{
int ii, jj, kk, res;
+ uint16_t hw_id;
+ uint8_t inst;
DRM_DEBUG("num_ips:%d", num_ips);
@@ -1032,8 +1078,10 @@ static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev,
struct ip_hw_instance *ip_hw_instance;
ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
- if (amdgpu_discovery_validate_ip(ip) ||
- le16_to_cpu(ip->hw_id) != ii)
+ inst = ip->instance_number;
+ hw_id = le16_to_cpu(ip->hw_id);
+ if (amdgpu_discovery_validate_ip(adev, inst, hw_id) ||
+ hw_id != ii)
goto next_ip;
DRM_DEBUG("match:%d @ ip_offset:%zu", ii, ip_offset);
@@ -1279,17 +1327,19 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
uint16_t die_offset;
uint16_t ip_offset;
uint16_t num_dies;
+ uint32_t wafl_ver;
uint16_t num_ips;
+ uint16_t hw_id;
+ uint8_t inst;
int hw_ip;
int i, j, k;
int r;
r = amdgpu_discovery_init(adev);
- if (r) {
- DRM_ERROR("amdgpu_discovery_init failed\n");
+ if (r)
return r;
- }
+ wafl_ver = 0;
adev->gfx.xcc_mask = 0;
adev->sdma.sdma_mask = 0;
adev->vcn.inst_mask = 0;
@@ -1319,7 +1369,9 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
for (j = 0; j < num_ips; j++) {
ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
- if (amdgpu_discovery_validate_ip(ip))
+ inst = ip->instance_number;
+ hw_id = le16_to_cpu(ip->hw_id);
+ if (amdgpu_discovery_validate_ip(adev, inst, hw_id))
goto next_ip;
num_base_address = ip->num_base_address;
@@ -1340,7 +1392,7 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
*/
if (adev->vcn.num_vcn_inst <
AMDGPU_MAX_VCN_INSTANCES) {
- adev->vcn.vcn_config[adev->vcn.num_vcn_inst] =
+ adev->vcn.inst[adev->vcn.num_vcn_inst].vcn_config =
ip->revision & 0xc0;
adev->vcn.num_vcn_inst++;
adev->vcn.inst_mask |=
@@ -1388,6 +1440,10 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
adev->gfx.xcc_mask |=
(1U << ip->instance_number);
+ if (!wafl_ver && le16_to_cpu(ip->hw_id) == WAFLC_HWID)
+ wafl_ver = IP_VERSION_FULL(ip->major, ip->minor,
+ ip->revision, 0, 0);
+
for (k = 0; k < num_base_address; k++) {
/*
* convert the endianness of base addresses in place,
@@ -1453,22 +1509,32 @@ next_ip:
}
}
+ if (wafl_ver && !adev->ip_versions[XGMI_HWIP][0])
+ adev->ip_versions[XGMI_HWIP][0] = wafl_ver;
+
return 0;
}
static void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
{
+ struct ip_discovery_header *ihdr;
+ struct binary_header *bhdr;
int vcn_harvest_count = 0;
int umc_harvest_count = 0;
+ uint16_t offset, ihdr_ver;
+ bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ offset = le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset);
+ ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
+ offset);
+ ihdr_ver = le16_to_cpu(ihdr->version);
/*
* Harvest table does not fit Navi1x and legacy GPUs,
* so read harvest bit per IP data structure to set
* harvest configuration.
*/
if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 2, 0) &&
- amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 3) &&
- amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 4)) {
+ ihdr_ver <= 2) {
if ((adev->pdev->device == 0x731E &&
(adev->pdev->revision == 0xC6 ||
adev->pdev->revision == 0xC7)) ||
@@ -1705,7 +1771,7 @@ static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev)
* so this won't overflow.
*/
for (v = 0; v < adev->vcn.num_vcn_inst; v++) {
- adev->vcn.vcn_codec_disable_mask[v] =
+ adev->vcn.inst[v].vcn_codec_disable_mask =
le32_to_cpu(vcn_info->v1.instance_info[v].fuse_data.all_bits);
}
break;
@@ -1723,45 +1789,85 @@ union nps_info {
struct nps_info_v1_0 v1;
};
+static int amdgpu_discovery_refresh_nps_info(struct amdgpu_device *adev,
+ union nps_info *nps_data)
+{
+ uint64_t vram_size, pos, offset;
+ struct nps_info_header *nhdr;
+ struct binary_header bhdr;
+ uint16_t checksum;
+
+ vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
+ pos = vram_size - DISCOVERY_TMR_OFFSET;
+ amdgpu_device_vram_access(adev, pos, &bhdr, sizeof(bhdr), false);
+
+ offset = le16_to_cpu(bhdr.table_list[NPS_INFO].offset);
+ checksum = le16_to_cpu(bhdr.table_list[NPS_INFO].checksum);
+
+ amdgpu_device_vram_access(adev, (pos + offset), nps_data,
+ sizeof(*nps_data), false);
+
+ nhdr = (struct nps_info_header *)(nps_data);
+ if (!amdgpu_discovery_verify_checksum((uint8_t *)nps_data,
+ le32_to_cpu(nhdr->size_bytes),
+ checksum)) {
+ dev_err(adev->dev, "nps data refresh, checksum mismatch\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev,
uint32_t *nps_type,
struct amdgpu_gmc_memrange **ranges,
- int *range_cnt)
+ int *range_cnt, bool refresh)
{
struct amdgpu_gmc_memrange *mem_ranges;
struct binary_header *bhdr;
union nps_info *nps_info;
+ union nps_info nps_data;
u16 offset;
- int i;
+ int i, r;
if (!nps_type || !range_cnt || !ranges)
return -EINVAL;
- if (!adev->mman.discovery_bin) {
- dev_err(adev->dev,
- "fetch mem range failed, ip discovery uninitialized\n");
- return -EINVAL;
- }
+ if (refresh) {
+ r = amdgpu_discovery_refresh_nps_info(adev, &nps_data);
+ if (r)
+ return r;
+ nps_info = &nps_data;
+ } else {
+ if (!adev->mman.discovery_bin) {
+ dev_err(adev->dev,
+ "fetch mem range failed, ip discovery uninitialized\n");
+ return -EINVAL;
+ }
- bhdr = (struct binary_header *)adev->mman.discovery_bin;
- offset = le16_to_cpu(bhdr->table_list[NPS_INFO].offset);
+ bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ offset = le16_to_cpu(bhdr->table_list[NPS_INFO].offset);
- if (!offset)
- return -ENOENT;
+ if (!offset)
+ return -ENOENT;
- /* If verification fails, return as if NPS table doesn't exist */
- if (amdgpu_discovery_verify_npsinfo(adev, bhdr))
- return -ENOENT;
+ /* If verification fails, return as if NPS table doesn't exist */
+ if (amdgpu_discovery_verify_npsinfo(adev, bhdr))
+ return -ENOENT;
- nps_info = (union nps_info *)(adev->mman.discovery_bin + offset);
+ nps_info =
+ (union nps_info *)(adev->mman.discovery_bin + offset);
+ }
switch (le16_to_cpu(nps_info->v1.header.version_major)) {
case 1:
+ mem_ranges = kvcalloc(nps_info->v1.count,
+ sizeof(*mem_ranges),
+ GFP_KERNEL);
+ if (!mem_ranges)
+ return -ENOMEM;
*nps_type = nps_info->v1.nps_type;
*range_cnt = nps_info->v1.count;
- mem_ranges = kvzalloc(
- *range_cnt * sizeof(struct amdgpu_gmc_memrange),
- GFP_KERNEL);
for (i = 0; i < *range_cnt; i++) {
mem_ranges[i].base_address =
nps_info->v1.instance_info[i].base_address;
@@ -1796,6 +1902,7 @@ static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(9, 4, 2):
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
break;
case IP_VERSION(10, 1, 10):
@@ -1821,6 +1928,7 @@ static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(11, 5, 0):
case IP_VERSION(11, 5, 1):
case IP_VERSION(11, 5, 2):
+ case IP_VERSION(11, 5, 3):
amdgpu_device_ip_block_add(adev, &soc21_common_ip_block);
break;
case IP_VERSION(12, 0, 0):
@@ -1850,6 +1958,7 @@ static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(9, 4, 2):
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
break;
case IP_VERSION(10, 1, 10):
@@ -1875,6 +1984,7 @@ static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(11, 5, 0):
case IP_VERSION(11, 5, 1):
case IP_VERSION(11, 5, 2):
+ case IP_VERSION(11, 5, 3):
amdgpu_device_ip_block_add(adev, &gmc_v11_0_ip_block);
break;
case IP_VERSION(12, 0, 0):
@@ -1954,6 +2064,7 @@ static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(11, 0, 12):
case IP_VERSION(11, 0, 13):
case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 2):
amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
break;
case IP_VERSION(11, 0, 8):
@@ -1973,6 +2084,7 @@ static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(13, 0, 8):
case IP_VERSION(13, 0, 10):
case IP_VERSION(13, 0, 11):
+ case IP_VERSION(13, 0, 12):
case IP_VERSION(13, 0, 14):
case IP_VERSION(14, 0, 0):
case IP_VERSION(14, 0, 1):
@@ -1984,6 +2096,7 @@ static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)
break;
case IP_VERSION(14, 0, 2):
case IP_VERSION(14, 0, 3):
+ case IP_VERSION(14, 0, 5):
amdgpu_device_ip_block_add(adev, &psp_v14_0_ip_block);
break;
default:
@@ -2011,13 +2124,17 @@ static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(11, 0, 5):
case IP_VERSION(11, 0, 9):
case IP_VERSION(11, 0, 7):
- case IP_VERSION(11, 0, 8):
case IP_VERSION(11, 0, 11):
case IP_VERSION(11, 0, 12):
case IP_VERSION(11, 0, 13):
case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 2):
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
break;
+ case IP_VERSION(11, 0, 8):
+ if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2)
+ amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
+ break;
case IP_VERSION(12, 0, 0):
case IP_VERSION(12, 0, 1):
amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
@@ -2034,6 +2151,7 @@ static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(13, 0, 10):
case IP_VERSION(13, 0, 11):
case IP_VERSION(13, 0, 14):
+ case IP_VERSION(13, 0, 12):
amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
break;
case IP_VERSION(14, 0, 0):
@@ -2041,6 +2159,7 @@ static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(14, 0, 2):
case IP_VERSION(14, 0, 3):
case IP_VERSION(14, 0, 4):
+ case IP_VERSION(14, 0, 5):
amdgpu_device_ip_block_add(adev, &smu_v14_0_ip_block);
break;
default:
@@ -2092,6 +2211,7 @@ static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(3, 2, 1):
case IP_VERSION(3, 5, 0):
case IP_VERSION(3, 5, 1):
+ case IP_VERSION(3, 6, 0):
case IP_VERSION(4, 1, 0):
/* TODO: Fix IP version. DC code expects version 4.0.1 */
if (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(4, 1, 0))
@@ -2144,6 +2264,7 @@ static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev)
break;
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
amdgpu_device_ip_block_add(adev, &gfx_v9_4_3_ip_block);
break;
case IP_VERSION(10, 1, 10):
@@ -2169,6 +2290,7 @@ static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(11, 5, 0):
case IP_VERSION(11, 5, 1):
case IP_VERSION(11, 5, 2):
+ case IP_VERSION(11, 5, 3):
amdgpu_device_ip_block_add(adev, &gfx_v11_0_ip_block);
break;
case IP_VERSION(12, 0, 0):
@@ -2198,6 +2320,7 @@ static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)
break;
case IP_VERSION(4, 4, 2):
case IP_VERSION(4, 4, 5):
+ case IP_VERSION(4, 4, 4):
amdgpu_device_ip_block_add(adev, &sdma_v4_4_2_ip_block);
break;
case IP_VERSION(5, 0, 0):
@@ -2223,6 +2346,7 @@ static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(6, 1, 0):
case IP_VERSION(6, 1, 1):
case IP_VERSION(6, 1, 2):
+ case IP_VERSION(6, 1, 3):
amdgpu_device_ip_block_add(adev, &sdma_v6_0_ip_block);
break;
case IP_VERSION(7, 0, 0):
@@ -2321,6 +2445,10 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &vcn_v5_0_0_ip_block);
amdgpu_device_ip_block_add(adev, &jpeg_v5_0_0_ip_block);
break;
+ case IP_VERSION(5, 0, 1):
+ amdgpu_device_ip_block_add(adev, &vcn_v5_0_1_ip_block);
+ amdgpu_device_ip_block_add(adev, &jpeg_v5_0_1_ip_block);
+ break;
default:
dev_err(adev->dev,
"Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n",
@@ -2342,6 +2470,7 @@ static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(11, 5, 0):
case IP_VERSION(11, 5, 1):
case IP_VERSION(11, 5, 2):
+ case IP_VERSION(11, 5, 3):
amdgpu_device_ip_block_add(adev, &mes_v11_0_ip_block);
adev->enable_mes = true;
adev->enable_mes_kiq = true;
@@ -2365,6 +2494,7 @@ static void amdgpu_discovery_init_soc_config(struct amdgpu_device *adev)
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
aqua_vanjaram_init_soc_config(adev);
break;
default:
@@ -2428,6 +2558,11 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_VEGA10:
+ /* This is not fatal. We only need the discovery
+ * binary for sysfs. We don't need it for a
+ * functional system.
+ */
+ amdgpu_discovery_init(adev);
vega10_reg_base_init(adev);
adev->sdma.num_instances = 2;
adev->gmc.num_umc = 4;
@@ -2450,6 +2585,11 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 0);
break;
case CHIP_VEGA12:
+ /* This is not fatal. We only need the discovery
+ * binary for sysfs. We don't need it for a
+ * functional system.
+ */
+ amdgpu_discovery_init(adev);
vega10_reg_base_init(adev);
adev->sdma.num_instances = 2;
adev->gmc.num_umc = 4;
@@ -2472,6 +2612,11 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 1);
break;
case CHIP_RAVEN:
+ /* This is not fatal. We only need the discovery
+ * binary for sysfs. We don't need it for a
+ * functional system.
+ */
+ amdgpu_discovery_init(adev);
vega10_reg_base_init(adev);
adev->sdma.num_instances = 1;
adev->vcn.num_vcn_inst = 1;
@@ -2492,6 +2637,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 2);
adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 1);
adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 1);
+ adev->ip_versions[ISP_HWIP][0] = IP_VERSION(2, 0, 0);
} else {
adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 1, 0);
adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 1, 0);
@@ -2508,9 +2654,15 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 1, 0);
adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 0);
adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 0);
+ adev->ip_versions[ISP_HWIP][0] = IP_VERSION(2, 0, 0);
}
break;
case CHIP_VEGA20:
+ /* This is not fatal. We only need the discovery
+ * binary for sysfs. We don't need it for a
+ * functional system.
+ */
+ amdgpu_discovery_init(adev);
vega20_reg_base_init(adev);
adev->sdma.num_instances = 2;
adev->gmc.num_umc = 8;
@@ -2534,6 +2686,11 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 1, 0);
break;
case CHIP_ARCTURUS:
+ /* This is not fatal. We only need the discovery
+ * binary for sysfs. We don't need it for a
+ * functional system.
+ */
+ amdgpu_discovery_init(adev);
arct_reg_base_init(adev);
adev->sdma.num_instances = 8;
adev->vcn.num_vcn_inst = 2;
@@ -2562,6 +2719,11 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 5, 0);
break;
case CHIP_ALDEBARAN:
+ /* This is not fatal. We only need the discovery
+ * binary for sysfs. We don't need it for a
+ * functional system.
+ */
+ amdgpu_discovery_init(adev);
aldebaran_reg_base_init(adev);
adev->sdma.num_instances = 5;
adev->vcn.num_vcn_inst = 2;
@@ -2587,10 +2749,42 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 6, 0);
adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0);
break;
+ case CHIP_CYAN_SKILLFISH:
+ if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) {
+ r = amdgpu_discovery_reg_base_init(adev);
+ if (r)
+ return -EINVAL;
+
+ amdgpu_discovery_harvest_ip(adev);
+ amdgpu_discovery_get_gfx_info(adev);
+ amdgpu_discovery_get_mall_info(adev);
+ amdgpu_discovery_get_vcn_info(adev);
+ } else {
+ cyan_skillfish_reg_base_init(adev);
+ adev->sdma.num_instances = 2;
+ adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(2, 0, 3);
+ adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(2, 0, 3);
+ adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(5, 0, 1);
+ adev->ip_versions[HDP_HWIP][0] = IP_VERSION(5, 0, 1);
+ adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(5, 0, 1);
+ adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(5, 0, 1);
+ adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 5, 0);
+ adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(2, 1, 1);
+ adev->ip_versions[UMC_HWIP][0] = IP_VERSION(8, 1, 1);
+ adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 8);
+ adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 8);
+ adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 1);
+ adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 8);
+ adev->ip_versions[GC_HWIP][0] = IP_VERSION(10, 1, 3);
+ adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 0, 3);
+ }
+ break;
default:
r = amdgpu_discovery_reg_base_init(adev);
- if (r)
- return -EINVAL;
+ if (r) {
+ drm_err(&adev->ddev, "discovery failed: %d\n", r);
+ return r;
+ }
amdgpu_discovery_harvest_ip(adev);
amdgpu_discovery_get_gfx_info(adev);
@@ -2610,6 +2804,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(9, 4, 2):
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
adev->family = AMDGPU_FAMILY_AI;
break;
case IP_VERSION(9, 1, 0):
@@ -2653,6 +2848,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(11, 5, 0):
case IP_VERSION(11, 5, 1):
case IP_VERSION(11, 5, 2):
+ case IP_VERSION(11, 5, 3):
adev->family = AMDGPU_FAMILY_GC_11_5_0;
break;
case IP_VERSION(12, 0, 0):
@@ -2678,19 +2874,13 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(11, 5, 0):
case IP_VERSION(11, 5, 1):
case IP_VERSION(11, 5, 2):
+ case IP_VERSION(11, 5, 3):
adev->flags |= AMD_IS_APU;
break;
default:
break;
}
- if (amdgpu_ip_version(adev, XGMI_HWIP, 0) == IP_VERSION(4, 8, 0))
- adev->gmc.xgmi.supported = true;
-
- if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4))
- adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 4, 0);
-
/* set NBIO version */
switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
case IP_VERSION(6, 1, 0):
@@ -2711,11 +2901,13 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
break;
case IP_VERSION(7, 9, 0):
+ case IP_VERSION(7, 9, 1):
adev->nbio.funcs = &nbio_v7_9_funcs;
adev->nbio.hdp_flush_reg = &nbio_v7_9_hdp_flush_reg;
break;
case IP_VERSION(7, 11, 0):
case IP_VERSION(7, 11, 1):
+ case IP_VERSION(7, 11, 2):
case IP_VERSION(7, 11, 3):
adev->nbio.funcs = &nbio_v7_11_funcs;
adev->nbio.hdp_flush_reg = &nbio_v7_11_hdp_flush_reg;
@@ -2843,6 +3035,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(11, 0, 10):
case IP_VERSION(11, 0, 11):
case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 2):
case IP_VERSION(13, 0, 1):
case IP_VERSION(13, 0, 9):
case IP_VERSION(13, 0, 10):
@@ -2852,6 +3045,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->smuio.funcs = &smuio_v13_0_funcs;
break;
case IP_VERSION(13, 0, 3):
+ case IP_VERSION(13, 0, 11):
adev->smuio.funcs = &smuio_v13_0_3_funcs;
if (adev->smuio.funcs->get_pkg_type(adev) == AMDGPU_PKG_TYPE_APU) {
adev->flags |= AMD_IS_APU;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
index f5d36525ec3e..b44d56465c5b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
@@ -33,6 +33,6 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev);
int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev,
uint32_t *nps_type,
struct amdgpu_gmc_memrange **ranges,
- int *range_cnt);
+ int *range_cnt, bool refresh);
#endif /* __AMDGPU_DISCOVERY__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index b119d27271c1..51bab32fd8c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -33,6 +33,7 @@
#include "soc15_common.h"
#include "gc/gc_11_0_0_offset.h"
#include "gc/gc_11_0_0_sh_mask.h"
+#include "bif/bif_4_1_d.h"
#include <asm/div64.h>
#include <linux/pci.h>
@@ -1195,13 +1196,14 @@ static int amdgpu_display_get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb
static int amdgpu_display_gem_fb_verify_and_init(struct drm_device *dev,
struct amdgpu_framebuffer *rfb,
struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj)
{
int ret;
rfb->base.obj[0] = obj;
- drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, &rfb->base, info, mode_cmd);
/* Verify that the modifier is supported. */
if (!drm_any_plane_has_format(dev, mode_cmd->pixel_format,
mode_cmd->modifier[0])) {
@@ -1296,6 +1298,7 @@ static int amdgpu_display_framebuffer_init(struct drm_device *dev,
struct drm_framebuffer *
amdgpu_display_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct amdgpu_framebuffer *amdgpu_fb;
@@ -1316,7 +1319,7 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev,
/* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */
bo = gem_to_amdgpu_bo(obj);
domains = amdgpu_display_supported_domains(drm_to_adev(dev), bo->flags);
- if (obj->import_attach && !(domains & AMDGPU_GEM_DOMAIN_GTT)) {
+ if (drm_gem_is_imported(obj) && !(domains & AMDGPU_GEM_DOMAIN_GTT)) {
drm_dbg_kms(dev, "Cannot create framebuffer from imported dma_buf\n");
drm_gem_object_put(obj);
return ERR_PTR(-EINVAL);
@@ -1329,7 +1332,7 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev,
}
ret = amdgpu_display_gem_fb_verify_and_init(dev, amdgpu_fb, file_priv,
- mode_cmd, obj);
+ info, mode_cmd, obj);
if (ret) {
kfree(amdgpu_fb);
drm_gem_object_put(obj);
@@ -1788,3 +1791,82 @@ int amdgpu_display_resume_helper(struct amdgpu_device *adev)
return 0;
}
+/* panic_bo is set in amdgpu_dm_plane_get_scanout_buffer() and only used in amdgpu_dm_set_pixel()
+ * they are called from the panic handler, and protected by the drm_panic spinlock.
+ */
+static struct amdgpu_bo *panic_abo;
+
+/* Use the indirect MMIO to write each pixel to the GPU VRAM,
+ * This is a simplified version of amdgpu_device_mm_access()
+ */
+static void amdgpu_display_set_pixel(struct drm_scanout_buffer *sb,
+ unsigned int x,
+ unsigned int y,
+ u32 color)
+{
+ struct amdgpu_res_cursor cursor;
+ unsigned long offset;
+ struct amdgpu_bo *abo = panic_abo;
+ struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
+ uint32_t tmp;
+
+ offset = x * 4 + y * sb->pitch[0];
+ amdgpu_res_first(abo->tbo.resource, offset, 4, &cursor);
+
+ tmp = cursor.start >> 31;
+ WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t) cursor.start) | 0x80000000);
+ if (tmp != 0xffffffff)
+ WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
+ WREG32_NO_KIQ(mmMM_DATA, color);
+}
+
+int amdgpu_display_get_scanout_buffer(struct drm_plane *plane,
+ struct drm_scanout_buffer *sb)
+{
+ struct amdgpu_bo *abo;
+ struct drm_framebuffer *fb = plane->state->fb;
+
+ if (!fb)
+ return -EINVAL;
+
+ DRM_DEBUG_KMS("Framebuffer %dx%d %p4cc\n", fb->width, fb->height, &fb->format->format);
+
+ abo = gem_to_amdgpu_bo(fb->obj[0]);
+ if (!abo)
+ return -EINVAL;
+
+ sb->width = fb->width;
+ sb->height = fb->height;
+ /* Use the generic linear format, because tiling will be disabled in panic_flush() */
+ sb->format = drm_format_info(fb->format->format);
+ if (!sb->format)
+ return -EINVAL;
+
+ sb->pitch[0] = fb->pitches[0];
+
+ if (abo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) {
+ if (abo->tbo.resource->mem_type != TTM_PL_VRAM) {
+ drm_warn(plane->dev, "amdgpu panic, framebuffer not in VRAM\n");
+ return -EINVAL;
+ }
+ /* Only handle 32bits format, to simplify mmio access */
+ if (fb->format->cpp[0] != 4) {
+ drm_warn(plane->dev, "amdgpu panic, pixel format is not 32bits\n");
+ return -EINVAL;
+ }
+ sb->set_pixel = amdgpu_display_set_pixel;
+ panic_abo = abo;
+ return 0;
+ }
+ if (!abo->kmap.virtual &&
+ ttm_bo_kmap(&abo->tbo, 0, PFN_UP(abo->tbo.base.size), &abo->kmap)) {
+ drm_warn(plane->dev, "amdgpu bo map failed, panic won't be displayed\n");
+ return -ENOMEM;
+ }
+ if (abo->kmap.bo_kmap_type & TTM_BO_MAP_IOMEM_MASK)
+ iosys_map_set_vaddr_iomem(&sb->map[0], abo->kmap.virtual);
+ else
+ iosys_map_set_vaddr(&sb->map[0], abo->kmap.virtual);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
index 9d19940f73c8..930c171473b4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
@@ -23,6 +23,8 @@
#ifndef __AMDGPU_DISPLAY_H__
#define __AMDGPU_DISPLAY_H__
+#include <drm/drm_panic.h>
+
#define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc))
#define amdgpu_display_backlight_set_level(adev, e, l) (adev)->mode_info.funcs->backlight_set_level((e), (l))
#define amdgpu_display_backlight_get_level(adev, e) (adev)->mode_info.funcs->backlight_get_level((e))
@@ -42,6 +44,7 @@ uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
struct drm_framebuffer *
amdgpu_display_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd);
const struct drm_format_info *
amdgpu_lookup_format_info(u32 format, uint64_t modifier);
@@ -49,4 +52,7 @@ amdgpu_lookup_format_info(u32 format, uint64_t modifier);
int amdgpu_display_suspend_helper(struct amdgpu_device *adev);
int amdgpu_display_resume_helper(struct amdgpu_device *adev);
+int amdgpu_display_get_scanout_buffer(struct drm_plane *plane,
+ struct drm_scanout_buffer *sb);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index 8e81a83d37d8..8561ad7f6180 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -36,12 +36,36 @@
#include "amdgpu_gem.h"
#include "amdgpu_dma_buf.h"
#include "amdgpu_xgmi.h"
+#include "amdgpu_vm.h"
#include <drm/amdgpu_drm.h>
#include <drm/ttm/ttm_tt.h>
#include <linux/dma-buf.h>
#include <linux/dma-fence-array.h>
#include <linux/pci-p2pdma.h>
+static const struct dma_buf_attach_ops amdgpu_dma_buf_attach_ops;
+
+/**
+ * dma_buf_attach_adev - Helper to get adev of an attachment
+ *
+ * @attach: attachment
+ *
+ * Returns:
+ * A struct amdgpu_device * if the attaching device is an amdgpu device or
+ * partition, NULL otherwise.
+ */
+static struct amdgpu_device *dma_buf_attach_adev(struct dma_buf_attachment *attach)
+{
+ if (attach->importer_ops == &amdgpu_dma_buf_attach_ops) {
+ struct drm_gem_object *obj = attach->importer_priv;
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+
+ return amdgpu_ttm_adev(bo->tbo.bdev);
+ }
+
+ return NULL;
+}
+
/**
* amdgpu_dma_buf_attach - &dma_buf_ops.attach implementation
*
@@ -53,13 +77,17 @@
static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
struct dma_buf_attachment *attach)
{
+ struct amdgpu_device *attach_adev = dma_buf_attach_adev(attach);
struct drm_gem_object *obj = dmabuf->priv;
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- if (pci_p2pdma_distance(adev->pdev, attach->dev, false) < 0)
+ if (!amdgpu_dmabuf_is_xgmi_accessible(attach_adev, bo) &&
+ pci_p2pdma_distance(adev->pdev, attach->dev, false) < 0)
attach->peer2peer = false;
+ amdgpu_vm_bo_update_shared(bo);
+
return 0;
}
@@ -72,11 +100,35 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
*/
static int amdgpu_dma_buf_pin(struct dma_buf_attachment *attach)
{
- struct drm_gem_object *obj = attach->dmabuf->priv;
- struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+ struct dma_buf *dmabuf = attach->dmabuf;
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(dmabuf->priv);
+ u32 domains = bo->allowed_domains;
+
+ dma_resv_assert_held(dmabuf->resv);
+
+ /* Try pinning into VRAM to allow P2P with RDMA NICs without ODP
+ * support if all attachments can do P2P. If any attachment can't do
+ * P2P just pin into GTT instead.
+ *
+ * To avoid with conflicting pinnings between GPUs and RDMA when move
+ * notifiers are disabled, only allow pinning in VRAM when move
+ * notiers are enabled.
+ */
+ if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
+ domains &= ~AMDGPU_GEM_DOMAIN_VRAM;
+ } else {
+ list_for_each_entry(attach, &dmabuf->attachments, node)
+ if (!attach->peer2peer)
+ domains &= ~AMDGPU_GEM_DOMAIN_VRAM;
+ }
- /* pin buffer into GTT */
- return amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
+ if (domains & AMDGPU_GEM_DOMAIN_VRAM)
+ bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+
+ if (WARN_ON(!domains))
+ return -EINVAL;
+
+ return amdgpu_bo_pin(bo, domains);
}
/**
@@ -131,9 +183,6 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (r)
return ERR_PTR(r);
-
- } else if (bo->tbo.resource->mem_type != TTM_PL_TT) {
- return ERR_PTR(-EBUSY);
}
switch (bo->tbo.resource->mem_type) {
@@ -150,6 +199,11 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
break;
case TTM_PL_VRAM:
+ /* XGMI-accessible memory should never be DMA-mapped */
+ if (WARN_ON(amdgpu_dmabuf_is_xgmi_accessible(
+ dma_buf_attach_adev(attach), bo)))
+ return ERR_PTR(-EINVAL);
+
r = amdgpu_vram_mgr_alloc_sgt(adev, bo->tbo.resource, 0,
bo->tbo.base.size, attach->dev,
dir, &sgt);
@@ -181,7 +235,7 @@ static void amdgpu_dma_buf_unmap(struct dma_buf_attachment *attach,
struct sg_table *sgt,
enum dma_data_direction dir)
{
- if (sgt->sgl->page_link) {
+ if (sg_page(sgt->sgl)) {
dma_unmap_sgtable(attach->dev, sgt, dir, 0);
sg_free_table(sgt);
kfree(sgt);
@@ -231,6 +285,36 @@ static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
return ret;
}
+static int amdgpu_dma_buf_vmap(struct dma_buf *dma_buf, struct iosys_map *map)
+{
+ struct drm_gem_object *obj = dma_buf->priv;
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+ int ret;
+
+ /*
+ * Pin to keep buffer in place while it's vmap'ed. The actual
+ * domain is not that important as long as it's mapable. Using
+ * GTT and VRAM should be compatible with most use cases.
+ */
+ ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT | AMDGPU_GEM_DOMAIN_VRAM);
+ if (ret)
+ return ret;
+ ret = drm_gem_dmabuf_vmap(dma_buf, map);
+ if (ret)
+ amdgpu_bo_unpin(bo);
+
+ return ret;
+}
+
+static void amdgpu_dma_buf_vunmap(struct dma_buf *dma_buf, struct iosys_map *map)
+{
+ struct drm_gem_object *obj = dma_buf->priv;
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+
+ drm_gem_dmabuf_vunmap(dma_buf, map);
+ amdgpu_bo_unpin(bo);
+}
+
const struct dma_buf_ops amdgpu_dmabuf_ops = {
.attach = amdgpu_dma_buf_attach,
.pin = amdgpu_dma_buf_pin,
@@ -240,8 +324,8 @@ const struct dma_buf_ops amdgpu_dmabuf_ops = {
.release = drm_gem_dmabuf_release,
.begin_cpu_access = amdgpu_dma_buf_begin_cpu_access,
.mmap = drm_gem_dmabuf_mmap,
- .vmap = drm_gem_dmabuf_vmap,
- .vunmap = drm_gem_dmabuf_vunmap,
+ .vmap = amdgpu_dma_buf_vmap,
+ .vunmap = amdgpu_dma_buf_vunmap,
};
/**
@@ -259,11 +343,23 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_gem_object *gobj,
{
struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
struct dma_buf *buf;
+ struct ttm_operation_ctx ctx = {
+ .interruptible = true,
+ .no_wait_gpu = true,
+ /* We opt to avoid OOM on system pages allocations */
+ .gfp_retry_mayfail = true,
+ .allow_res_evict = false,
+ };
+ int ret;
if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
bo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
return ERR_PTR(-EPERM);
+ ret = ttm_bo_setup_export(&bo->tbo, &ctx);
+ if (ret)
+ return ERR_PTR(ret);
+
buf = drm_gem_prime_export(gobj, flags);
if (!IS_ERR(buf))
buf->ops = &amdgpu_dmabuf_ops;
@@ -345,7 +441,7 @@ amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
/* FIXME: This should be after the "if", but needs a fix to make sure
* DMABuf imports are initialized in the right VM list.
*/
- amdgpu_vm_bo_invalidate(adev, bo, false);
+ amdgpu_vm_bo_invalidate(bo, false);
if (!bo->tbo.resource || bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
return;
@@ -456,7 +552,10 @@ bool amdgpu_dmabuf_is_xgmi_accessible(struct amdgpu_device *adev,
struct drm_gem_object *obj = &bo->tbo.base;
struct drm_gem_object *gobj;
- if (obj->import_attach) {
+ if (!adev)
+ return false;
+
+ if (drm_gem_is_imported(obj)) {
struct dma_buf *dma_buf = obj->import_attach->dmabuf;
if (dma_buf->ops != &amdgpu_dmabuf_ops)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c
index 3f3662e8b871..3040437d99c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c
@@ -41,7 +41,8 @@ u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
if (index < adev->doorbell.num_kernel_doorbells)
return readl(adev->doorbell.cpu_addr + index);
- DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
+ dev_err(adev->dev, "reading beyond doorbell aperture: 0x%08x!\n",
+ index);
return 0;
}
@@ -63,7 +64,8 @@ void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
if (index < adev->doorbell.num_kernel_doorbells)
writel(v, adev->doorbell.cpu_addr + index);
else
- DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
+ dev_err(adev->dev,
+ "writing beyond doorbell aperture: 0x%08x!\n", index);
}
/**
@@ -83,7 +85,8 @@ u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
if (index < adev->doorbell.num_kernel_doorbells)
return atomic64_read((atomic64_t *)(adev->doorbell.cpu_addr + index));
- DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
+ dev_err(adev->dev, "reading beyond doorbell aperture: 0x%08x!\n",
+ index);
return 0;
}
@@ -105,7 +108,8 @@ void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
if (index < adev->doorbell.num_kernel_doorbells)
atomic64_set((atomic64_t *)(adev->doorbell.cpu_addr + index), v);
else
- DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
+ dev_err(adev->dev,
+ "writing beyond doorbell aperture: 0x%08x!\n", index);
}
/**
@@ -166,7 +170,8 @@ int amdgpu_doorbell_create_kernel_doorbells(struct amdgpu_device *adev)
NULL,
(void **)&adev->doorbell.cpu_addr);
if (r) {
- DRM_ERROR("Failed to allocate kernel doorbells, err=%d\n", r);
+ dev_err(adev->dev,
+ "Failed to allocate kernel doorbells, err=%d\n", r);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 81d9877c8735..bff25ef3e2d0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -23,6 +23,7 @@
*/
#include <drm/amdgpu_drm.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_ttm.h>
#include <drm/drm_gem.h>
@@ -50,6 +51,8 @@
#include "amdgpu_reset.h"
#include "amdgpu_sched.h"
#include "amdgpu_xgmi.h"
+#include "amdgpu_userq.h"
+#include "amdgpu_userq_fence.h"
#include "../amdxcp/amdgpu_xcp_drv.h"
/*
@@ -118,9 +121,14 @@
* - 3.57.0 - Compute tunneling on GFX10+
* - 3.58.0 - Add GFX12 DCC support
* - 3.59.0 - Cleared VRAM
+ * - 3.60.0 - Add AMDGPU_TILING_GFX12_DCC_WRITE_COMPRESS_DISABLE (Vulkan requirement)
+ * - 3.61.0 - Contains fix for RV/PCO compute queues
+ * - 3.62.0 - Add AMDGPU_IDS_FLAGS_MODE_PF, AMDGPU_IDS_FLAGS_MODE_VF & AMDGPU_IDS_FLAGS_MODE_PT
+ * - 3.63.0 - GFX12 display DCC supports 256B max compressed block size
+ * - 3.64.0 - Userq IP support query
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 59
+#define KMS_DRIVER_MINOR 64
#define KMS_DRIVER_PATCHLEVEL 0
/*
@@ -133,6 +141,10 @@ enum AMDGPU_DEBUG_MASK {
AMDGPU_DEBUG_USE_VRAM_FW_BUF = BIT(3),
AMDGPU_DEBUG_ENABLE_RAS_ACA = BIT(4),
AMDGPU_DEBUG_ENABLE_EXP_RESETS = BIT(5),
+ AMDGPU_DEBUG_DISABLE_GPU_RING_RESET = BIT(6),
+ AMDGPU_DEBUG_SMU_POOL = BIT(7),
+ AMDGPU_DEBUG_VM_USERPTR = BIT(8),
+ AMDGPU_DEBUG_DISABLE_RAS_CE_LOG = BIT(9)
};
unsigned int amdgpu_vram_limit = UINT_MAX;
@@ -169,7 +181,8 @@ uint amdgpu_pg_mask = 0xffffffff;
uint amdgpu_sdma_phase_quantum = 32;
char *amdgpu_disable_cu;
char *amdgpu_virtual_display;
-bool enforce_isolation;
+int amdgpu_enforce_isolation = -1;
+int amdgpu_modeset = -1;
/* Specifies the default granularity for SVM, used in buffer
* migration and restoration of backing memory when handling
@@ -230,8 +243,8 @@ int amdgpu_agp = -1; /* auto */
int amdgpu_wbrf = -1;
int amdgpu_damage_clips = -1; /* auto */
int amdgpu_umsch_mm_fwlog;
-
-static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work);
+int amdgpu_rebar = -1; /* auto */
+int amdgpu_user_queue = -1;
DECLARE_DYNDBG_CLASSMAP(drm_debug_classes, DD_CLASS_TYPE_DISJOINT_BITS, 0,
"DRM_UT_CORE",
@@ -247,9 +260,6 @@ DECLARE_DYNDBG_CLASSMAP(drm_debug_classes, DD_CLASS_TYPE_DISJOINT_BITS, 0,
struct amdgpu_mgpu_info mgpu_info = {
.mutex = __MUTEX_INITIALIZER(mgpu_info.mutex),
- .delayed_reset_work = __DELAYED_WORK_INITIALIZER(
- mgpu_info.delayed_reset_work,
- amdgpu_drv_delayed_reset_work_handler, 0),
};
int amdgpu_ras_enable = -1;
uint amdgpu_ras_mask = 0xffffffff;
@@ -284,7 +294,8 @@ module_param_named(gartsize, amdgpu_gart_size, uint, 0600);
/**
* DOC: gttsize (int)
* Restrict the size of GTT domain (for userspace use) in MiB for testing.
- * The default is -1 (Use 1/2 RAM, minimum value is 3GB).
+ * The default is -1 (Use value specified by TTM).
+ * This parameter is deprecated and will be removed in the future.
*/
MODULE_PARM_DESC(gttsize, "Size of the GTT userspace domain in megabytes (-1 = auto)");
module_param_named(gttsize, amdgpu_gtt_size, int, 0600);
@@ -351,12 +362,12 @@ module_param_named(svm_default_granularity, amdgpu_svm_default_granularity, uint
* The second one is for Compute. The third and fourth ones are
* for SDMA and Video.
*
- * By default(with no lockup_timeout settings), the timeout for all non-compute(GFX, SDMA and Video)
- * jobs is 10000. The timeout for compute is 60000.
+ * By default(with no lockup_timeout settings), the timeout for all jobs is 10000.
*/
-MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default: for bare metal 10000 for non-compute jobs and 60000 for compute jobs; "
- "for passthrough or sriov, 10000 for all jobs. 0: keep default value. negative: infinity timeout), format: for bare metal [Non-Compute] or [GFX,Compute,SDMA,Video]; "
- "for passthrough or sriov [all jobs] or [GFX,Compute,SDMA,Video].");
+MODULE_PARM_DESC(lockup_timeout,
+ "GPU lockup timeout in ms (default: 10000 for all jobs. "
+ "0: keep default value. negative: infinity timeout), format: for bare metal [Non-Compute] or [GFX,Compute,SDMA,Video]; "
+ "for passthrough or sriov [all jobs] or [GFX,Compute,SDMA,Video].");
module_param_string(lockup_timeout, amdgpu_lockup_timeout, sizeof(amdgpu_lockup_timeout), 0444);
/**
@@ -403,7 +414,7 @@ module_param_named(runpm, amdgpu_runtime_pm, int, 0444);
* the kernel log for the list of IPs on the asic. The default is 0xffffffff (enable all blocks on a device).
*/
MODULE_PARM_DESC(ip_block_mask, "IP Block Mask (all blocks enabled (default))");
-module_param_named(ip_block_mask, amdgpu_ip_block_mask, uint, 0444);
+module_param_named_unsafe(ip_block_mask, amdgpu_ip_block_mask, uint, 0444);
/**
* DOC: bapm (int)
@@ -461,7 +472,7 @@ module_param_named(vm_update_mode, amdgpu_vm_update_mode, int, 0444);
* Enable experimental hw support (1 = enable). The default is 0 (disabled).
*/
MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))");
-module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
+module_param_named_unsafe(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
/**
* DOC: dc (int)
@@ -572,14 +583,14 @@ module_param_named(compute_multipipe, amdgpu_compute_multipipe, int, 0444);
* Set to enable GPU recovery mechanism (1 = enable, 0 = disable). The default is -1 (auto, disabled except SRIOV).
*/
MODULE_PARM_DESC(gpu_recovery, "Enable GPU recovery mechanism, (1 = enable, 0 = disable, -1 = auto)");
-module_param_named(gpu_recovery, amdgpu_gpu_recovery, int, 0444);
+module_param_named_unsafe(gpu_recovery, amdgpu_gpu_recovery, int, 0444);
/**
* DOC: emu_mode (int)
* Set value 1 to enable emulation mode. This is only needed when running on an emulator. The default is 0 (disabled).
*/
MODULE_PARM_DESC(emu_mode, "Emulation mode, (1 = enable, 0 = disable)");
-module_param_named(emu_mode, amdgpu_emu_mode, int, 0444);
+module_param_named_unsafe(emu_mode, amdgpu_emu_mode, int, 0444);
/**
* DOC: ras_enable (int)
@@ -734,7 +745,7 @@ module_param_named(noretry, amdgpu_noretry, int, 0644);
*/
MODULE_PARM_DESC(force_asic_type,
"A non negative value used to specify the asic type for all supported GPUs");
-module_param_named(force_asic_type, amdgpu_force_asic_type, int, 0444);
+module_param_named_unsafe(force_asic_type, amdgpu_force_asic_type, int, 0444);
/**
* DOC: use_xgmi_p2p (int)
@@ -753,7 +764,7 @@ module_param_named(use_xgmi_p2p, amdgpu_use_xgmi_p2p, int, 0444);
* assigns queues to HQDs.
*/
int sched_policy = KFD_SCHED_POLICY_HWS;
-module_param(sched_policy, int, 0444);
+module_param_unsafe(sched_policy, int, 0444);
MODULE_PARM_DESC(sched_policy,
"Scheduling policy (0 = HWS (Default), 1 = HWS without over-subscription, 2 = Non-HWS (Used for debugging only)");
@@ -803,7 +814,7 @@ MODULE_PARM_DESC(send_sigterm,
* Setting 1 enables halt on hang.
*/
int halt_if_hws_hang;
-module_param(halt_if_hws_hang, int, 0644);
+module_param_unsafe(halt_if_hws_hang, int, 0644);
MODULE_PARM_DESC(halt_if_hws_hang, "Halt if HWS hang is detected (0 = off (default), 1 = on)");
/**
@@ -812,7 +823,7 @@ MODULE_PARM_DESC(halt_if_hws_hang, "Halt if HWS hang is detected (0 = off (defau
* check says. Default value: false (rely on MEC2 firmware version check).
*/
bool hws_gws_support;
-module_param(hws_gws_support, bool, 0444);
+module_param_unsafe(hws_gws_support, bool, 0444);
MODULE_PARM_DESC(hws_gws_support, "Assume MEC2 FW supports GWS barriers (false = rely on FW version check (Default), true = force supported)");
/**
@@ -845,7 +856,7 @@ MODULE_PARM_DESC(no_system_mem_limit, "disable system memory limit (false = defa
*/
int amdgpu_no_queue_eviction_on_vm_fault;
MODULE_PARM_DESC(no_queue_eviction_on_vm_fault, "No queue eviction on VM fault (0 = queue eviction, 1 = no queue eviction)");
-module_param_named(no_queue_eviction_on_vm_fault, amdgpu_no_queue_eviction_on_vm_fault, int, 0444);
+module_param_named_unsafe(no_queue_eviction_on_vm_fault, amdgpu_no_queue_eviction_on_vm_fault, int, 0444);
#endif
/**
@@ -853,7 +864,7 @@ module_param_named(no_queue_eviction_on_vm_fault, amdgpu_no_queue_eviction_on_vm
*/
int amdgpu_mtype_local;
MODULE_PARM_DESC(mtype_local, "MTYPE for local memory (0 = MTYPE_RW (default), 1 = MTYPE_NC, 2 = MTYPE_CC)");
-module_param_named(mtype_local, amdgpu_mtype_local, int, 0444);
+module_param_named_unsafe(mtype_local, amdgpu_mtype_local, int, 0444);
/**
* DOC: pcie_p2p (bool)
@@ -875,7 +886,7 @@ module_param_named(dcfeaturemask, amdgpu_dc_feature_mask, uint, 0444);
/**
* DOC: dcdebugmask (uint)
- * Override display features enabled. See enum DC_DEBUG_MASK in drivers/gpu/drm/amd/include/amd_shared.h.
+ * Display debug options. See enum DC_DEBUG_MASK in drivers/gpu/drm/amd/include/amd_shared.h.
*/
MODULE_PARM_DESC(dcdebugmask, "all debug options disabled (default))");
module_param_named(dcdebugmask, amdgpu_dc_debug_mask, uint, 0444);
@@ -892,7 +903,7 @@ module_param_named(visualconfirm, amdgpu_dc_visual_confirm, uint, 0444);
* the ABM algorithm, with 1 being the least reduction and 4 being the most
* reduction.
*
- * Defaults to -1, or disabled. Userspace can only override this level after
+ * Defaults to -1, or auto. Userspace can only override this level after
* boot if it's set to auto.
*/
int amdgpu_dm_abm_level = -1;
@@ -949,7 +960,7 @@ module_param_named(tmz, amdgpu_tmz, int, 0444);
*/
MODULE_PARM_DESC(
freesync_video,
- "Enable freesync modesetting optimization feature (0 = off (default), 1 = on)");
+ "Adds additional modes via VRR for refresh changes without a full modeset (0 = off (default), 1 = on)");
module_param_named(freesync_video, amdgpu_freesync_vid_mode, uint, 0444);
/**
@@ -957,7 +968,7 @@ module_param_named(freesync_video, amdgpu_freesync_vid_mode, uint, 0444);
* GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco)
*/
MODULE_PARM_DESC(reset_method, "GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco/bamaco)");
-module_param_named(reset_method, amdgpu_reset_method, int, 0644);
+module_param_named_unsafe(reset_method, amdgpu_reset_method, int, 0644);
/**
* DOC: bad_page_threshold (int) Bad page threshold is specifies the
@@ -965,7 +976,7 @@ module_param_named(reset_method, amdgpu_reset_method, int, 0644);
* result in the GPU entering bad status when the number of total
* faulty pages by ECC exceeds the threshold value.
*/
-MODULE_PARM_DESC(bad_page_threshold, "Bad page threshold(-1 = ignore threshold (default value), 0 = disable bad page retirement, -2 = driver sets threshold)");
+MODULE_PARM_DESC(bad_page_threshold, "Bad page threshold(-1 = ignore threshold (default value), 0 = disable bad page retirement, -2 = threshold determined by a formula, 0 < threshold < max records, user-defined threshold)");
module_param_named(bad_page_threshold, amdgpu_bad_page_threshold, int, 0444);
MODULE_PARM_DESC(num_kcq, "number of kernel compute queue user want to setup (8 if set to greater than 8 or less than 0, only affect gfx 8+)");
@@ -1029,11 +1040,20 @@ module_param_named(user_partt_mode, amdgpu_user_partt_mode, uint, 0444);
/**
- * DOC: enforce_isolation (bool)
- * enforce process isolation between graphics and compute via using the same reserved vmid.
+ * DOC: enforce_isolation (int)
+ * enforce process isolation between graphics and compute.
+ * (-1 = auto, 0 = disable, 1 = enable, 2 = enable legacy mode, 3 = enable without cleaner shader)
+ */
+module_param_named(enforce_isolation, amdgpu_enforce_isolation, int, 0444);
+MODULE_PARM_DESC(enforce_isolation,
+"enforce process isolation between graphics and compute. (-1 = auto, 0 = disable, 1 = enable, 2 = enable legacy mode, 3 = enable without cleaner shader)");
+
+/**
+ * DOC: modeset (int)
+ * Override nomodeset (1 = override, -1 = auto). The default is -1 (auto).
*/
-module_param(enforce_isolation, bool, 0444);
-MODULE_PARM_DESC(enforce_isolation, "enforce process isolation between graphics and compute . enforce_isolation = on");
+MODULE_PARM_DESC(modeset, "Override nomodeset (1 = enable, -1 = auto)");
+module_param_named(modeset, amdgpu_modeset, int, 0444);
/**
* DOC: seamless (int)
@@ -1051,9 +1071,14 @@ module_param_named(seamless, amdgpu_seamless, int, 0444);
* limits the VRAM size reported to ROCm applications to the visible
* size, usually 256MB.
* - 0x4: Disable GPU soft recovery, always do a full reset
+ * - 0x8: Use VRAM for firmware loading
+ * - 0x10: Enable ACA based RAS logging
+ * - 0x20: Enable experimental resets
+ * - 0x40: Disable ring resets
+ * - 0x80: Use VRAM for SMU pool
*/
MODULE_PARM_DESC(debug_mask, "debug options for amdgpu, disabled by default");
-module_param_named(debug_mask, amdgpu_debug_mask, uint, 0444);
+module_param_named_unsafe(debug_mask, amdgpu_debug_mask, uint, 0444);
/**
* DOC: agp (int)
@@ -1080,6 +1105,28 @@ MODULE_PARM_DESC(wbrf,
"Enable Wifi RFI interference mitigation (0 = disabled, 1 = enabled, -1 = auto(default)");
module_param_named(wbrf, amdgpu_wbrf, int, 0444);
+/**
+ * DOC: rebar (int)
+ * Allow BAR resizing. Disable this to prevent the driver from attempting
+ * to resize the BAR if the GPU supports it and there is available MMIO space.
+ * Note that this just prevents the driver from resizing the BAR. The BIOS
+ * may have already resized the BAR at boot time.
+ */
+MODULE_PARM_DESC(rebar, "Resizable BAR (-1 = auto (default), 0 = disable, 1 = enable)");
+module_param_named(rebar, amdgpu_rebar, int, 0444);
+
+/**
+ * DOC: user_queue (int)
+ * Enable user queues on systems that support user queues. Possible values:
+ *
+ * - -1 = auto (ASIC specific default)
+ * - 0 = user queues disabled
+ * - 1 = user queues enabled and kernel queues enabled (if supported)
+ * - 2 = user queues enabled and kernel queues disabled
+ */
+MODULE_PARM_DESC(user_queue, "Enable user queues (-1 = auto (default), 0 = disable, 1 = enable, 2 = enable UQs and disable KQs)");
+module_param_named(user_queue, amdgpu_user_queue, int, 0444);
+
/* These devices are not supported by amdgpu.
* They are supported by the mach64, r128, radeon drivers
*/
@@ -1793,7 +1840,6 @@ static const u16 amdgpu_unsupported_pciidlist[] = {
};
static const struct pci_device_id pciidlist[] = {
-#ifdef CONFIG_DRM_AMDGPU_SI
{0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
{0x1002, 0x6784, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
{0x1002, 0x6788, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
@@ -1866,8 +1912,6 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x6665, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
{0x1002, 0x6667, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
{0x1002, 0x666F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
-#endif
-#ifdef CONFIG_DRM_AMDGPU_CIK
/* Kaveri */
{0x1002, 0x1304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
{0x1002, 0x1305, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
@@ -1950,7 +1994,6 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x985D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
{0x1002, 0x985E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
{0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
-#endif
/* topaz */
{0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
{0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
@@ -2129,6 +2172,11 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x7410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN},
/* CYAN_SKILLFISH */
+ {0x1002, 0x13DB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU},
+ {0x1002, 0x13F9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU},
+ {0x1002, 0x13FA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU},
+ {0x1002, 0x13FB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU},
+ {0x1002, 0x13FC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU},
{0x1002, 0x13FE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU},
{0x1002, 0x143F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU},
@@ -2223,6 +2271,24 @@ static void amdgpu_init_debug_options(struct amdgpu_device *adev)
pr_info("debug: enable experimental reset features\n");
adev->debug_exp_resets = true;
}
+
+ if (amdgpu_debug_mask & AMDGPU_DEBUG_DISABLE_GPU_RING_RESET) {
+ pr_info("debug: ring reset disabled\n");
+ adev->debug_disable_gpu_ring_reset = true;
+ }
+ if (amdgpu_debug_mask & AMDGPU_DEBUG_SMU_POOL) {
+ pr_info("debug: use vram for smu pool\n");
+ adev->pm.smu_debug_mask |= SMU_DEBUG_POOL_USE_VRAM;
+ }
+ if (amdgpu_debug_mask & AMDGPU_DEBUG_VM_USERPTR) {
+ pr_info("debug: VM mode debug for userptr is enabled\n");
+ adev->debug_vm_userptr = true;
+ }
+
+ if (amdgpu_debug_mask & AMDGPU_DEBUG_DISABLE_RAS_CE_LOG) {
+ pr_info("debug: disable kernel logs of correctable errors\n");
+ adev->debug_disable_ce_logs = true;
+ }
}
static unsigned long amdgpu_fix_asic_type(struct pci_dev *pdev, unsigned long flags)
@@ -2250,6 +2316,12 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
int ret, retry = 0, i;
bool supports_atomic = false;
+ if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA ||
+ (pdev->class >> 8) == PCI_CLASS_DISPLAY_OTHER) {
+ if (drm_firmware_drivers_only() && amdgpu_modeset == -1)
+ return -EINVAL;
+ }
+
/* skip devices which are owned by radeon */
for (i = 0; i < ARRAY_SIZE(amdgpu_unsupported_pciidlist); i++) {
if (amdgpu_unsupported_pciidlist[i] == pdev->device)
@@ -2260,7 +2332,7 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
amdgpu_aspm = 0;
if (amdgpu_virtual_display ||
- amdgpu_device_asic_has_dc_support(flags & AMD_ASIC_MASK))
+ amdgpu_device_asic_has_dc_support(pdev, flags & AMD_ASIC_MASK))
supports_atomic = true;
if ((flags & AMD_EXP_HW_SUPPORT) && !amdgpu_exp_hw_support) {
@@ -2282,14 +2354,14 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
return -ENOTSUPP;
}
+ switch (flags & AMD_ASIC_MASK) {
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ case CHIP_VERDE:
+ case CHIP_OLAND:
+ case CHIP_HAINAN:
#ifdef CONFIG_DRM_AMDGPU_SI
- if (!amdgpu_si_support) {
- switch (flags & AMD_ASIC_MASK) {
- case CHIP_TAHITI:
- case CHIP_PITCAIRN:
- case CHIP_VERDE:
- case CHIP_OLAND:
- case CHIP_HAINAN:
+ if (!amdgpu_si_support) {
dev_info(&pdev->dev,
"SI support provided by radeon.\n");
dev_info(&pdev->dev,
@@ -2297,16 +2369,18 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
);
return -ENODEV;
}
- }
+ break;
+#else
+ dev_info(&pdev->dev, "amdgpu is built without SI support.\n");
+ return -ENODEV;
#endif
+ case CHIP_KAVERI:
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
#ifdef CONFIG_DRM_AMDGPU_CIK
- if (!amdgpu_cik_support) {
- switch (flags & AMD_ASIC_MASK) {
- case CHIP_KAVERI:
- case CHIP_BONAIRE:
- case CHIP_HAWAII:
- case CHIP_KABINI:
- case CHIP_MULLINS:
+ if (!amdgpu_cik_support) {
dev_info(&pdev->dev,
"CIK support provided by radeon.\n");
dev_info(&pdev->dev,
@@ -2314,8 +2388,14 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
);
return -ENODEV;
}
- }
+ break;
+#else
+ dev_info(&pdev->dev, "amdgpu is built without CIK support.\n");
+ return -ENODEV;
#endif
+ default:
+ break;
+ }
adev = devm_drm_dev_alloc(&pdev->dev, &amdgpu_kms_driver, typeof(*adev), ddev);
if (IS_ERR(adev))
@@ -2365,11 +2445,15 @@ retry_init:
*/
if (adev->mode_info.mode_config_initialized &&
!list_empty(&adev_to_drm(adev)->mode_config.connector_list)) {
+ const struct drm_format_info *format;
+
/* select 8 bpp console on low vram cards */
if (adev->gmc.real_vram_size <= (32*1024*1024))
- drm_fbdev_ttm_setup(adev_to_drm(adev), 8);
+ format = drm_format_info(DRM_FORMAT_C8);
else
- drm_fbdev_ttm_setup(adev_to_drm(adev), 32);
+ format = NULL;
+
+ drm_client_setup(adev_to_drm(adev), format);
}
ret = amdgpu_debugfs_init(adev);
@@ -2378,10 +2462,10 @@ retry_init:
if (adev->pm.rpm_mode != AMDGPU_RUNPM_NONE) {
/* only need to skip on ATPX */
- if (amdgpu_device_supports_px(ddev))
+ if (amdgpu_device_supports_px(adev))
dev_pm_set_driver_flags(ddev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
/* we want direct complete for BOCO */
- if (amdgpu_device_supports_boco(ddev))
+ if (amdgpu_device_supports_boco(adev))
dev_pm_set_driver_flags(ddev->dev, DPM_FLAG_SMART_PREPARE |
DPM_FLAG_SMART_SUSPEND |
DPM_FLAG_MAY_SKIP_RESUME);
@@ -2414,9 +2498,9 @@ retry_init:
* into D0 state. Then there will be a PMFW-aware D-state
* transition(D0->D3) on runpm suspend.
*/
- if (amdgpu_device_supports_baco(ddev) &&
+ if (amdgpu_device_supports_baco(adev) &&
!(adev->flags & AMD_IS_APU) &&
- (adev->asic_type >= CHIP_NAVI10))
+ adev->asic_type >= CHIP_NAVI10)
amdgpu_get_secondary_funcs(adev);
}
@@ -2433,7 +2517,9 @@ amdgpu_pci_remove(struct pci_dev *pdev)
struct drm_device *dev = pci_get_drvdata(pdev);
struct amdgpu_device *adev = drm_to_adev(dev);
+ amdgpu_ras_eeprom_check_and_recover(adev);
amdgpu_xcp_dev_unplug(adev);
+ amdgpu_gmc_prepare_nps_mode_change(adev);
drm_dev_unplug(dev);
if (adev->pm.rpm_mode != AMDGPU_RUNPM_NONE) {
@@ -2461,6 +2547,10 @@ amdgpu_pci_shutdown(struct pci_dev *pdev)
if (amdgpu_ras_intr_triggered())
return;
+ /* device maybe not resumed here, return immediately in this case */
+ if (adev->in_s4 && adev->in_suspend)
+ return;
+
/* if we are running in a VM, make sure the device
* torn down properly on reboot/shutdown.
* unfortunately we can't detect certain
@@ -2472,92 +2562,19 @@ amdgpu_pci_shutdown(struct pci_dev *pdev)
adev->mp1_state = PP_MP1_STATE_NONE;
}
-/**
- * amdgpu_drv_delayed_reset_work_handler - work handler for reset
- *
- * @work: work_struct.
- */
-static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work)
-{
- struct list_head device_list;
- struct amdgpu_device *adev;
- int i, r;
- struct amdgpu_reset_context reset_context;
-
- memset(&reset_context, 0, sizeof(reset_context));
-
- mutex_lock(&mgpu_info.mutex);
- if (mgpu_info.pending_reset == true) {
- mutex_unlock(&mgpu_info.mutex);
- return;
- }
- mgpu_info.pending_reset = true;
- mutex_unlock(&mgpu_info.mutex);
-
- /* Use a common context, just need to make sure full reset is done */
- reset_context.method = AMD_RESET_METHOD_NONE;
- set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
-
- for (i = 0; i < mgpu_info.num_dgpu; i++) {
- adev = mgpu_info.gpu_ins[i].adev;
- reset_context.reset_req_dev = adev;
- r = amdgpu_device_pre_asic_reset(adev, &reset_context);
- if (r) {
- dev_err(adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
- r, adev_to_drm(adev)->unique);
- }
- if (!queue_work(system_unbound_wq, &adev->xgmi_reset_work))
- r = -EALREADY;
- }
- for (i = 0; i < mgpu_info.num_dgpu; i++) {
- adev = mgpu_info.gpu_ins[i].adev;
- flush_work(&adev->xgmi_reset_work);
- adev->gmc.xgmi.pending_reset = false;
- }
-
- /* reset function will rebuild the xgmi hive info , clear it now */
- for (i = 0; i < mgpu_info.num_dgpu; i++)
- amdgpu_xgmi_remove_device(mgpu_info.gpu_ins[i].adev);
-
- INIT_LIST_HEAD(&device_list);
-
- for (i = 0; i < mgpu_info.num_dgpu; i++)
- list_add_tail(&mgpu_info.gpu_ins[i].adev->reset_list, &device_list);
-
- /* unregister the GPU first, reset function will add them back */
- list_for_each_entry(adev, &device_list, reset_list)
- amdgpu_unregister_gpu_instance(adev);
-
- /* Use a common context, just need to make sure full reset is done */
- set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
- set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);
- r = amdgpu_do_asic_reset(&device_list, &reset_context);
-
- if (r) {
- DRM_ERROR("reinit gpus failure");
- return;
- }
- for (i = 0; i < mgpu_info.num_dgpu; i++) {
- adev = mgpu_info.gpu_ins[i].adev;
- if (!adev->kfd.init_complete) {
- kgd2kfd_init_zone_device(adev);
- amdgpu_amdkfd_device_init(adev);
- amdgpu_amdkfd_drm_client_create(adev);
- }
- amdgpu_ttm_set_buffer_funcs_status(adev, true);
- }
-}
-
static int amdgpu_pmops_prepare(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(drm_dev);
+ /* device maybe not resumed here, return immediately in this case */
+ if (adev->in_s4 && adev->in_suspend)
+ return 0;
+
/* Return a positive number here so
* DPM_FLAG_SMART_SUSPEND works properly
*/
- if (amdgpu_device_supports_boco(drm_dev) &&
- pm_runtime_suspended(dev))
+ if (amdgpu_device_supports_boco(adev) && pm_runtime_suspended(dev))
return 1;
/* if we will not support s3 or s2i for the device
@@ -2572,7 +2589,7 @@ static int amdgpu_pmops_prepare(struct device *dev)
static void amdgpu_pmops_complete(struct device *dev)
{
- /* nothing to do */
+ amdgpu_device_complete(dev_get_drvdata(dev));
}
static int amdgpu_pmops_suspend(struct device *dev)
@@ -2580,13 +2597,28 @@ static int amdgpu_pmops_suspend(struct device *dev)
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(drm_dev);
- adev->suspend_complete = false;
if (amdgpu_acpi_is_s0ix_active(adev))
adev->in_s0ix = true;
else if (amdgpu_acpi_is_s3_active(adev))
adev->in_s3 = true;
- if (!adev->in_s0ix && !adev->in_s3)
+ if (!adev->in_s0ix && !adev->in_s3) {
+#if IS_ENABLED(CONFIG_SUSPEND)
+ /* don't allow going deep first time followed by s2idle the next time */
+ if (adev->last_suspend_state != PM_SUSPEND_ON &&
+ adev->last_suspend_state != pm_suspend_target_state) {
+ drm_err_once(drm_dev, "Unsupported suspend state %d\n",
+ pm_suspend_target_state);
+ return -EINVAL;
+ }
+#endif
return 0;
+ }
+
+#if IS_ENABLED(CONFIG_SUSPEND)
+ /* cache the state last used for suspend */
+ adev->last_suspend_state = pm_suspend_target_state;
+#endif
+
return amdgpu_device_suspend(drm_dev, true);
}
@@ -2595,7 +2627,6 @@ static int amdgpu_pmops_suspend_noirq(struct device *dev)
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(drm_dev);
- adev->suspend_complete = true;
if (amdgpu_acpi_should_gpu_reset(adev))
return amdgpu_asic_reset(adev);
@@ -2629,9 +2660,7 @@ static int amdgpu_pmops_freeze(struct device *dev)
struct amdgpu_device *adev = drm_to_adev(drm_dev);
int r;
- adev->in_s4 = true;
r = amdgpu_device_suspend(drm_dev, true);
- adev->in_s4 = false;
if (r)
return r;
@@ -2644,12 +2673,21 @@ static int amdgpu_pmops_thaw(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
+ /* do not resume device if it's normal hibernation */
+ if (!pm_hibernate_is_recovering() && !pm_hibernation_mode_is_suspend())
+ return 0;
+
return amdgpu_device_resume(drm_dev, true);
}
static int amdgpu_pmops_poweroff(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
+
+ /* device maybe not resumed here, return immediately in this case */
+ if (adev->in_s4 && adev->in_suspend)
+ return 0;
return amdgpu_device_suspend(drm_dev, true);
}
@@ -2728,6 +2766,29 @@ static int amdgpu_runtime_idle_check_display(struct device *dev)
return 0;
}
+static int amdgpu_runtime_idle_check_userq(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
+ struct amdgpu_usermode_queue *queue;
+ struct amdgpu_userq_mgr *uqm, *tmp;
+ int queue_id;
+ int ret = 0;
+
+ mutex_lock(&adev->userq_mutex);
+ list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
+ idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
+ ret = -EBUSY;
+ goto done;
+ }
+ }
+done:
+ mutex_unlock(&adev->userq_mutex);
+
+ return ret;
+}
+
static int amdgpu_pmops_runtime_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
@@ -2743,6 +2804,9 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
ret = amdgpu_runtime_idle_check_display(dev);
if (ret)
return ret;
+ ret = amdgpu_runtime_idle_check_userq(dev);
+ if (ret)
+ return ret;
/* wait for all rings to drain before suspending */
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
@@ -2796,7 +2860,7 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
/* nothing to do */
} else if ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) ||
(adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)) {
- amdgpu_device_baco_enter(drm_dev);
+ amdgpu_device_baco_enter(adev);
}
dev_dbg(&pdev->dev, "asic/device is runtime suspended\n");
@@ -2837,7 +2901,7 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
pci_set_master(pdev);
} else if ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) ||
(adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)) {
- amdgpu_device_baco_exit(drm_dev);
+ amdgpu_device_baco_exit(adev);
}
ret = amdgpu_device_resume(drm_dev, false);
if (ret) {
@@ -2864,12 +2928,33 @@ static int amdgpu_pmops_runtime_idle(struct device *dev)
}
ret = amdgpu_runtime_idle_check_display(dev);
+ if (ret)
+ goto done;
+ ret = amdgpu_runtime_idle_check_userq(dev);
+done:
pm_runtime_mark_last_busy(dev);
pm_runtime_autosuspend(dev);
return ret;
}
+static int amdgpu_drm_release(struct inode *inode, struct file *filp)
+{
+ struct drm_file *file_priv = filp->private_data;
+ struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
+ struct drm_device *dev = file_priv->minor->dev;
+ int idx;
+
+ if (fpriv && drm_dev_enter(dev, &idx)) {
+ fpriv->evf_mgr.fd_closing = true;
+ amdgpu_eviction_fence_destroy(&fpriv->evf_mgr);
+ amdgpu_userq_mgr_fini(&fpriv->userq_mgr);
+ drm_dev_exit(idx);
+ }
+
+ return drm_release(inode, filp);
+}
+
long amdgpu_drm_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg)
{
@@ -2891,15 +2976,15 @@ out:
}
static const struct dev_pm_ops amdgpu_pm_ops = {
- .prepare = amdgpu_pmops_prepare,
- .complete = amdgpu_pmops_complete,
- .suspend = amdgpu_pmops_suspend,
- .suspend_noirq = amdgpu_pmops_suspend_noirq,
- .resume = amdgpu_pmops_resume,
- .freeze = amdgpu_pmops_freeze,
- .thaw = amdgpu_pmops_thaw,
- .poweroff = amdgpu_pmops_poweroff,
- .restore = amdgpu_pmops_restore,
+ .prepare = pm_sleep_ptr(amdgpu_pmops_prepare),
+ .complete = pm_sleep_ptr(amdgpu_pmops_complete),
+ .suspend = pm_sleep_ptr(amdgpu_pmops_suspend),
+ .suspend_noirq = pm_sleep_ptr(amdgpu_pmops_suspend_noirq),
+ .resume = pm_sleep_ptr(amdgpu_pmops_resume),
+ .freeze = pm_sleep_ptr(amdgpu_pmops_freeze),
+ .thaw = pm_sleep_ptr(amdgpu_pmops_thaw),
+ .poweroff = pm_sleep_ptr(amdgpu_pmops_poweroff),
+ .restore = pm_sleep_ptr(amdgpu_pmops_restore),
.runtime_suspend = amdgpu_pmops_runtime_suspend,
.runtime_resume = amdgpu_pmops_runtime_resume,
.runtime_idle = amdgpu_pmops_runtime_idle,
@@ -2921,7 +3006,7 @@ static const struct file_operations amdgpu_driver_kms_fops = {
.owner = THIS_MODULE,
.open = drm_open,
.flush = amdgpu_flush,
- .release = drm_release,
+ .release = amdgpu_drm_release,
.unlocked_ioctl = amdgpu_drm_ioctl,
.mmap = drm_gem_mmap,
.poll = drm_poll,
@@ -2968,6 +3053,10 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(AMDGPU_USERQ, amdgpu_userq_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(AMDGPU_USERQ_SIGNAL, amdgpu_userq_signal_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(AMDGPU_USERQ_WAIT, amdgpu_userq_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(AMDGPU_GEM_LIST_HANDLES, amdgpu_gem_list_handles_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
};
static const struct drm_driver amdgpu_kms_driver = {
@@ -2982,6 +3071,7 @@ static const struct drm_driver amdgpu_kms_driver = {
.num_ioctls = ARRAY_SIZE(amdgpu_ioctls_kms),
.dumb_create = amdgpu_mode_dumb_create,
.dumb_map_offset = amdgpu_mode_dumb_mmap,
+ DRM_FBDEV_TTM_DRIVER_OPS,
.fops = &amdgpu_driver_kms_fops,
.release = &amdgpu_driver_release_kms,
#ifdef CONFIG_PROC_FS
@@ -2992,7 +3082,6 @@ static const struct drm_driver amdgpu_kms_driver = {
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = KMS_DRIVER_MAJOR,
.minor = KMS_DRIVER_MINOR,
.patchlevel = KMS_DRIVER_PATCHLEVEL,
@@ -3008,6 +3097,7 @@ const struct drm_driver amdgpu_partition_driver = {
.num_ioctls = ARRAY_SIZE(amdgpu_ioctls_kms),
.dumb_create = amdgpu_mode_dumb_create,
.dumb_map_offset = amdgpu_mode_dumb_mmap,
+ DRM_FBDEV_TTM_DRIVER_OPS,
.fops = &amdgpu_driver_kms_fops,
.release = &amdgpu_driver_release_kms,
@@ -3015,7 +3105,6 @@ const struct drm_driver amdgpu_partition_driver = {
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = KMS_DRIVER_MAJOR,
.minor = KMS_DRIVER_MINOR,
.patchlevel = KMS_DRIVER_PATCHLEVEL,
@@ -3041,7 +3130,7 @@ static struct pci_driver amdgpu_kms_pci_driver = {
.probe = amdgpu_pci_probe,
.remove = amdgpu_pci_remove,
.shutdown = amdgpu_pci_shutdown,
- .driver.pm = &amdgpu_pm_ops,
+ .driver.pm = pm_ptr(&amdgpu_pm_ops),
.err_handler = &amdgpu_pci_err_handler,
.dev_groups = amdgpu_sysfs_groups,
};
@@ -3050,14 +3139,11 @@ static int __init amdgpu_init(void)
{
int r;
- if (drm_firmware_drivers_only())
- return -EINVAL;
-
r = amdgpu_sync_init();
if (r)
goto error_sync;
- r = amdgpu_fence_slab_init();
+ r = amdgpu_userq_fence_slab_init();
if (r)
goto error_fence;
@@ -3068,6 +3154,12 @@ static int __init amdgpu_init(void)
/* Ignore KFD init failures. Normal when CONFIG_HSA_AMD is not set. */
amdgpu_amdkfd_init();
+ if (amdgpu_pp_feature_mask & PP_OVERDRIVE_MASK) {
+ add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
+ pr_crit("Overdrive is enabled, please disable it before "
+ "reporting any bugs unrelated to overdrive.\n");
+ }
+
/* let modprobe override vga console setting */
return pci_register_driver(&amdgpu_kms_pci_driver);
@@ -3085,7 +3177,7 @@ static void __exit amdgpu_exit(void)
amdgpu_unregister_atpx_handler();
amdgpu_acpi_release();
amdgpu_sync_fini();
- amdgpu_fence_slab_fini();
+ amdgpu_userq_fence_slab_fini();
mmu_notifier_synchronize();
amdgpu_xcp_drv_release();
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h
index 5bc2cb661af7..2d86cc6f7f4d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h
@@ -40,7 +40,6 @@
#define DRIVER_NAME "amdgpu"
#define DRIVER_DESC "AMD GPU"
-#define DRIVER_DATE "20150101"
extern const struct drm_driver amdgpu_partition_driver;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c
index 35fee3e8cde2..8cd69836dd99 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c
@@ -200,7 +200,7 @@ static int amdgpu_eeprom_xfer(struct i2c_adapter *i2c_adap, u32 eeprom_addr,
dev_err_ratelimited(&i2c_adap->dev,
"maddr:0x%04X size:0x%02X:quirk max_%s_len must be > %d",
eeprom_addr, buf_size,
- read ? "read" : "write", EEPROM_OFFSET_SIZE);
+ str_read_write(read), EEPROM_OFFSET_SIZE);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c
new file mode 100644
index 000000000000..23d7d0b0d625
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c
@@ -0,0 +1,241 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/sched.h>
+#include <drm/drm_exec.h>
+#include "amdgpu.h"
+
+#define work_to_evf_mgr(w, name) container_of(w, struct amdgpu_eviction_fence_mgr, name)
+#define evf_mgr_to_fpriv(e) container_of(e, struct amdgpu_fpriv, evf_mgr)
+
+static const char *
+amdgpu_eviction_fence_get_driver_name(struct dma_fence *fence)
+{
+ return "amdgpu_eviction_fence";
+}
+
+static const char *
+amdgpu_eviction_fence_get_timeline_name(struct dma_fence *f)
+{
+ struct amdgpu_eviction_fence *ef;
+
+ ef = container_of(f, struct amdgpu_eviction_fence, base);
+ return ef->timeline_name;
+}
+
+int
+amdgpu_eviction_fence_replace_fence(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct drm_exec *exec)
+{
+ struct amdgpu_eviction_fence *old_ef, *new_ef;
+ struct drm_gem_object *obj;
+ unsigned long index;
+ int ret;
+
+ if (evf_mgr->ev_fence &&
+ !dma_fence_is_signaled(&evf_mgr->ev_fence->base))
+ return 0;
+ /*
+ * Steps to replace eviction fence:
+ * * lock all objects in exec (caller)
+ * * create a new eviction fence
+ * * update new eviction fence in evf_mgr
+ * * attach the new eviction fence to BOs
+ * * release the old fence
+ * * unlock the objects (caller)
+ */
+ new_ef = amdgpu_eviction_fence_create(evf_mgr);
+ if (!new_ef) {
+ DRM_ERROR("Failed to create new eviction fence\n");
+ return -ENOMEM;
+ }
+
+ /* Update the eviction fence now */
+ spin_lock(&evf_mgr->ev_fence_lock);
+ old_ef = evf_mgr->ev_fence;
+ evf_mgr->ev_fence = new_ef;
+ spin_unlock(&evf_mgr->ev_fence_lock);
+
+ /* Attach the new fence */
+ drm_exec_for_each_locked_object(exec, index, obj) {
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+
+ if (!bo)
+ continue;
+ ret = amdgpu_eviction_fence_attach(evf_mgr, bo);
+ if (ret) {
+ DRM_ERROR("Failed to attch new eviction fence\n");
+ goto free_err;
+ }
+ }
+
+ /* Free old fence */
+ if (old_ef)
+ dma_fence_put(&old_ef->base);
+ return 0;
+
+free_err:
+ kfree(new_ef);
+ return ret;
+}
+
+static void
+amdgpu_eviction_fence_suspend_worker(struct work_struct *work)
+{
+ struct amdgpu_eviction_fence_mgr *evf_mgr = work_to_evf_mgr(work, suspend_work.work);
+ struct amdgpu_fpriv *fpriv = evf_mgr_to_fpriv(evf_mgr);
+ struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr;
+ struct amdgpu_eviction_fence *ev_fence;
+
+ mutex_lock(&uq_mgr->userq_mutex);
+ spin_lock(&evf_mgr->ev_fence_lock);
+ ev_fence = evf_mgr->ev_fence;
+ if (ev_fence)
+ dma_fence_get(&ev_fence->base);
+ else
+ goto unlock;
+ spin_unlock(&evf_mgr->ev_fence_lock);
+
+ amdgpu_userq_evict(uq_mgr, ev_fence);
+
+ mutex_unlock(&uq_mgr->userq_mutex);
+ dma_fence_put(&ev_fence->base);
+ return;
+
+unlock:
+ spin_unlock(&evf_mgr->ev_fence_lock);
+ mutex_unlock(&uq_mgr->userq_mutex);
+}
+
+static bool amdgpu_eviction_fence_enable_signaling(struct dma_fence *f)
+{
+ struct amdgpu_eviction_fence_mgr *evf_mgr;
+ struct amdgpu_eviction_fence *ev_fence;
+
+ if (!f)
+ return true;
+
+ ev_fence = to_ev_fence(f);
+ evf_mgr = ev_fence->evf_mgr;
+
+ schedule_delayed_work(&evf_mgr->suspend_work, 0);
+ return true;
+}
+
+static const struct dma_fence_ops amdgpu_eviction_fence_ops = {
+ .get_driver_name = amdgpu_eviction_fence_get_driver_name,
+ .get_timeline_name = amdgpu_eviction_fence_get_timeline_name,
+ .enable_signaling = amdgpu_eviction_fence_enable_signaling,
+};
+
+void amdgpu_eviction_fence_signal(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct amdgpu_eviction_fence *ev_fence)
+{
+ spin_lock(&evf_mgr->ev_fence_lock);
+ dma_fence_signal(&ev_fence->base);
+ spin_unlock(&evf_mgr->ev_fence_lock);
+}
+
+struct amdgpu_eviction_fence *
+amdgpu_eviction_fence_create(struct amdgpu_eviction_fence_mgr *evf_mgr)
+{
+ struct amdgpu_eviction_fence *ev_fence;
+
+ ev_fence = kzalloc(sizeof(*ev_fence), GFP_KERNEL);
+ if (!ev_fence)
+ return NULL;
+
+ ev_fence->evf_mgr = evf_mgr;
+ get_task_comm(ev_fence->timeline_name, current);
+ spin_lock_init(&ev_fence->lock);
+ dma_fence_init64(&ev_fence->base, &amdgpu_eviction_fence_ops,
+ &ev_fence->lock, evf_mgr->ev_fence_ctx,
+ atomic_inc_return(&evf_mgr->ev_fence_seq));
+ return ev_fence;
+}
+
+void amdgpu_eviction_fence_destroy(struct amdgpu_eviction_fence_mgr *evf_mgr)
+{
+ struct amdgpu_eviction_fence *ev_fence;
+
+ /* Wait for any pending work to execute */
+ flush_delayed_work(&evf_mgr->suspend_work);
+
+ spin_lock(&evf_mgr->ev_fence_lock);
+ ev_fence = evf_mgr->ev_fence;
+ spin_unlock(&evf_mgr->ev_fence_lock);
+
+ if (!ev_fence)
+ return;
+
+ dma_fence_wait(&ev_fence->base, false);
+
+ /* Last unref of ev_fence */
+ dma_fence_put(&ev_fence->base);
+}
+
+int amdgpu_eviction_fence_attach(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct amdgpu_bo *bo)
+{
+ struct amdgpu_eviction_fence *ev_fence;
+ struct dma_resv *resv = bo->tbo.base.resv;
+ int ret;
+
+ if (!resv)
+ return 0;
+
+ ret = dma_resv_reserve_fences(resv, 1);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Failed to resv fence space\n");
+ return ret;
+ }
+
+ spin_lock(&evf_mgr->ev_fence_lock);
+ ev_fence = evf_mgr->ev_fence;
+ if (ev_fence)
+ dma_resv_add_fence(resv, &ev_fence->base, DMA_RESV_USAGE_BOOKKEEP);
+ spin_unlock(&evf_mgr->ev_fence_lock);
+
+ return 0;
+}
+
+void amdgpu_eviction_fence_detach(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct amdgpu_bo *bo)
+{
+ struct dma_fence *stub = dma_fence_get_stub();
+
+ dma_resv_replace_fences(bo->tbo.base.resv, evf_mgr->ev_fence_ctx,
+ stub, DMA_RESV_USAGE_BOOKKEEP);
+ dma_fence_put(stub);
+}
+
+int amdgpu_eviction_fence_init(struct amdgpu_eviction_fence_mgr *evf_mgr)
+{
+ /* This needs to be done one time per open */
+ atomic_set(&evf_mgr->ev_fence_seq, 0);
+ evf_mgr->ev_fence_ctx = dma_fence_context_alloc(1);
+ spin_lock_init(&evf_mgr->ev_fence_lock);
+
+ INIT_DELAYED_WORK(&evf_mgr->suspend_work, amdgpu_eviction_fence_suspend_worker);
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.h
new file mode 100644
index 000000000000..fcd867b7147d
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef AMDGPU_EV_FENCE_H_
+#define AMDGPU_EV_FENCE_H_
+
+struct amdgpu_eviction_fence {
+ struct dma_fence base;
+ spinlock_t lock;
+ char timeline_name[TASK_COMM_LEN];
+ struct amdgpu_eviction_fence_mgr *evf_mgr;
+};
+
+struct amdgpu_eviction_fence_mgr {
+ u64 ev_fence_ctx;
+ atomic_t ev_fence_seq;
+ spinlock_t ev_fence_lock;
+ struct amdgpu_eviction_fence *ev_fence;
+ struct delayed_work suspend_work;
+ uint8_t fd_closing;
+};
+
+/* Eviction fence helper functions */
+struct amdgpu_eviction_fence *
+amdgpu_eviction_fence_create(struct amdgpu_eviction_fence_mgr *evf_mgr);
+
+void
+amdgpu_eviction_fence_destroy(struct amdgpu_eviction_fence_mgr *evf_mgr);
+
+int
+amdgpu_eviction_fence_attach(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct amdgpu_bo *bo);
+
+void
+amdgpu_eviction_fence_detach(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct amdgpu_bo *bo);
+
+int
+amdgpu_eviction_fence_init(struct amdgpu_eviction_fence_mgr *evf_mgr);
+
+void
+amdgpu_eviction_fence_signal(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct amdgpu_eviction_fence *ev_fence);
+
+int
+amdgpu_eviction_fence_replace_fence(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct drm_exec *exec);
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
index c7df7fa3459f..b349bb3676d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
@@ -33,6 +33,7 @@
#include <drm/amdgpu_drm.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
#include "amdgpu.h"
#include "amdgpu_vm.h"
@@ -59,20 +60,21 @@ void amdgpu_show_fdinfo(struct drm_printer *p, struct drm_file *file)
struct amdgpu_fpriv *fpriv = file->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
- struct amdgpu_mem_stats stats;
+ struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM];
ktime_t usage[AMDGPU_HW_IP_NUM];
- unsigned int hw_ip;
- int ret;
-
- memset(&stats, 0, sizeof(stats));
-
- ret = amdgpu_bo_reserve(vm->root.bo, false);
- if (ret)
- return;
-
- amdgpu_vm_get_memory(vm, &stats);
- amdgpu_bo_unreserve(vm->root.bo);
+ const char *pl_name[] = {
+ [TTM_PL_VRAM] = "vram",
+ [TTM_PL_TT] = "gtt",
+ [TTM_PL_SYSTEM] = "cpu",
+ [AMDGPU_PL_GDS] = "gds",
+ [AMDGPU_PL_GWS] = "gws",
+ [AMDGPU_PL_OA] = "oa",
+ [AMDGPU_PL_DOORBELL] = "doorbell",
+ [AMDGPU_PL_MMIO_REMAP] = "mmioremap",
+ };
+ unsigned int hw_ip, i;
+ amdgpu_vm_get_memory(vm, stats);
amdgpu_ctx_mgr_usage(&fpriv->ctx_mgr, usage);
/*
@@ -82,24 +84,35 @@ void amdgpu_show_fdinfo(struct drm_printer *p, struct drm_file *file)
*/
drm_printf(p, "pasid:\t%u\n", fpriv->vm.pasid);
- drm_printf(p, "drm-memory-vram:\t%llu KiB\n", stats.vram/1024UL);
- drm_printf(p, "drm-memory-gtt: \t%llu KiB\n", stats.gtt/1024UL);
- drm_printf(p, "drm-memory-cpu: \t%llu KiB\n", stats.cpu/1024UL);
- drm_printf(p, "amd-memory-visible-vram:\t%llu KiB\n",
- stats.visible_vram/1024UL);
+
+ for (i = 0; i < ARRAY_SIZE(pl_name); i++) {
+ if (!pl_name[i])
+ continue;
+
+ drm_print_memory_stats(p,
+ &stats[i].drm,
+ DRM_GEM_OBJECT_RESIDENT |
+ DRM_GEM_OBJECT_PURGEABLE,
+ pl_name[i]);
+ }
+
+ /* Legacy amdgpu keys, alias to drm-resident-memory-: */
+ drm_printf(p, "drm-memory-vram:\t%llu KiB\n",
+ stats[TTM_PL_VRAM].drm.resident/1024UL);
+ drm_printf(p, "drm-memory-gtt: \t%llu KiB\n",
+ stats[TTM_PL_TT].drm.resident/1024UL);
+ drm_printf(p, "drm-memory-cpu: \t%llu KiB\n",
+ stats[TTM_PL_SYSTEM].drm.resident/1024UL);
+
+ /* Amdgpu specific memory accounting keys: */
drm_printf(p, "amd-evicted-vram:\t%llu KiB\n",
- stats.evicted_vram/1024UL);
- drm_printf(p, "amd-evicted-visible-vram:\t%llu KiB\n",
- stats.evicted_visible_vram/1024UL);
+ stats[TTM_PL_VRAM].evicted/1024UL);
drm_printf(p, "amd-requested-vram:\t%llu KiB\n",
- stats.requested_vram/1024UL);
- drm_printf(p, "amd-requested-visible-vram:\t%llu KiB\n",
- stats.requested_visible_vram/1024UL);
+ (stats[TTM_PL_VRAM].drm.shared +
+ stats[TTM_PL_VRAM].drm.private) / 1024UL);
drm_printf(p, "amd-requested-gtt:\t%llu KiB\n",
- stats.requested_gtt/1024UL);
- drm_printf(p, "drm-shared-vram:\t%llu KiB\n", stats.vram_shared/1024UL);
- drm_printf(p, "drm-shared-gtt:\t%llu KiB\n", stats.gtt_shared/1024UL);
- drm_printf(p, "drm-shared-cpu:\t%llu KiB\n", stats.cpu_shared/1024UL);
+ (stats[TTM_PL_TT].drm.shared +
+ stats[TTM_PL_TT].drm.private) / 1024UL);
for (hw_ip = 0; hw_ip < AMDGPU_HW_IP_NUM; ++hw_ip) {
if (!usage[hw_ip])
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 2f24a6aa13bf..fd8cca241da6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -42,37 +42,6 @@
#include "amdgpu_reset.h"
/*
- * Fences mark an event in the GPUs pipeline and are used
- * for GPU/CPU synchronization. When the fence is written,
- * it is expected that all buffers associated with that fence
- * are no longer in use by the associated ring on the GPU and
- * that the relevant GPU caches have been flushed.
- */
-
-struct amdgpu_fence {
- struct dma_fence base;
-
- /* RB, DMA, etc. */
- struct amdgpu_ring *ring;
- ktime_t start_timestamp;
-};
-
-static struct kmem_cache *amdgpu_fence_slab;
-
-int amdgpu_fence_slab_init(void)
-{
- amdgpu_fence_slab = KMEM_CACHE(amdgpu_fence, SLAB_HWCACHE_ALIGN);
- if (!amdgpu_fence_slab)
- return -ENOMEM;
- return 0;
-}
-
-void amdgpu_fence_slab_fini(void)
-{
- rcu_barrier();
- kmem_cache_destroy(amdgpu_fence_slab);
-}
-/*
* Cast helper
*/
static const struct dma_fence_ops amdgpu_fence_ops;
@@ -130,14 +99,14 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
*
* @ring: ring the fence is associated with
* @f: resulting fence object
- * @job: job the fence is embedded in
+ * @af: amdgpu fence input
* @flags: flags to pass into the subordinate .emit_fence() call
*
* Emits a fence command on the requested ring (all asics).
* Returns 0 on success, -ENOMEM on failure.
*/
-int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amdgpu_job *job,
- unsigned int flags)
+int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
+ struct amdgpu_fence *af, unsigned int flags)
{
struct amdgpu_device *adev = ring->adev;
struct dma_fence *fence;
@@ -146,40 +115,34 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amd
uint32_t seq;
int r;
- if (job == NULL) {
- /* create a sperate hw fence */
- am_fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_ATOMIC);
- if (am_fence == NULL)
+ if (!af) {
+ /* create a separate hw fence */
+ am_fence = kzalloc(sizeof(*am_fence), GFP_KERNEL);
+ if (!am_fence)
return -ENOMEM;
- fence = &am_fence->base;
- am_fence->ring = ring;
} else {
- /* take use of job-embedded fence */
- fence = &job->hw_fence;
+ am_fence = af;
}
+ fence = &am_fence->base;
+ am_fence->ring = ring;
seq = ++ring->fence_drv.sync_seq;
- if (job && job->job_run_counter) {
- /* reinit seq for resubmitted jobs */
- fence->seqno = seq;
- /* TO be inline with external fence creation and other drivers */
+ am_fence->seq = seq;
+ if (af) {
+ dma_fence_init(fence, &amdgpu_job_fence_ops,
+ &ring->fence_drv.lock,
+ adev->fence_context + ring->idx, seq);
+ /* Against remove in amdgpu_job_{free, free_cb} */
dma_fence_get(fence);
} else {
- if (job) {
- dma_fence_init(fence, &amdgpu_job_fence_ops,
- &ring->fence_drv.lock,
- adev->fence_context + ring->idx, seq);
- /* Against remove in amdgpu_job_{free, free_cb} */
- dma_fence_get(fence);
- } else {
- dma_fence_init(fence, &amdgpu_fence_ops,
- &ring->fence_drv.lock,
- adev->fence_context + ring->idx, seq);
- }
+ dma_fence_init(fence, &amdgpu_fence_ops,
+ &ring->fence_drv.lock,
+ adev->fence_context + ring->idx, seq);
}
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
seq, flags | AMDGPU_FENCE_FLAG_INT);
+ amdgpu_fence_save_wptr(fence);
pm_runtime_get_noresume(adev_to_drm(adev)->dev);
ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
if (unlikely(rcu_dereference_protected(*ptr, 1))) {
@@ -280,7 +243,7 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring)
} while (atomic_cmpxchg(&drv->last_seq, last_seq, seq) != last_seq);
- if (del_timer(&ring->fence_drv.fallback_timer) &&
+ if (timer_delete(&ring->fence_drv.fallback_timer) &&
seq != ring->fence_drv.sync_seq)
amdgpu_fence_schedule_fallback(ring);
@@ -292,6 +255,7 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring)
do {
struct dma_fence *fence, **ptr;
+ struct amdgpu_fence *am_fence;
++last_seq;
last_seq &= drv->num_fences_mask;
@@ -304,6 +268,12 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring)
if (!fence)
continue;
+ /* Save the wptr in the fence driver so we know what the last processed
+ * wptr was. This is required for re-emitting the ring state for
+ * queues that are reset but are not guilty and thus have no guilty fence.
+ */
+ am_fence = container_of(fence, struct amdgpu_fence, base);
+ drv->signalled_wptr = am_fence->wptr;
dma_fence_signal(fence);
dma_fence_put(fence);
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
@@ -322,11 +292,13 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring)
*/
static void amdgpu_fence_fallback(struct timer_list *t)
{
- struct amdgpu_ring *ring = from_timer(ring, t,
- fence_drv.fallback_timer);
+ struct amdgpu_ring *ring = timer_container_of(ring, t,
+ fence_drv.fallback_timer);
if (amdgpu_fence_process(ring))
- DRM_WARN("Fence fallback timer expired on ring %s\n", ring->name);
+ dev_warn(ring->adev->dev,
+ "Fence fallback timer expired on ring %s\n",
+ ring->name);
}
/**
@@ -618,7 +590,7 @@ void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev)
amdgpu_irq_put(adev, ring->fence_drv.irq_src,
ring->fence_drv.irq_type);
- del_timer_sync(&ring->fence_drv.fallback_timer);
+ timer_delete_sync(&ring->fence_drv.fallback_timer);
}
}
@@ -718,7 +690,7 @@ void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring)
* it right here or we won't be able to track them in fence_drv
* and they will remain unsignaled during sa_bo free.
*/
- job = container_of(old, struct amdgpu_job, hw_fence);
+ job = container_of(old, struct amdgpu_job, hw_fence.base);
if (!job->base.s_fence && !dma_fence_is_signaled(old))
dma_fence_signal(old);
RCU_INIT_POINTER(*ptr, NULL);
@@ -764,6 +736,86 @@ void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring)
amdgpu_fence_process(ring);
}
+
+/*
+ * Kernel queue reset handling
+ *
+ * The driver can reset individual queues for most engines, but those queues
+ * may contain work from multiple contexts. Resetting the queue will reset
+ * lose all of that state. In order to minimize the collateral damage, the
+ * driver will save the ring contents which are not associated with the guilty
+ * context prior to resetting the queue. After resetting the queue the queue
+ * contents from the other contexts is re-emitted to the rings so that it can
+ * be processed by the engine. To handle this, we save the queue's write
+ * pointer (wptr) in the fences associated with each context. If we get a
+ * queue timeout, we can then use the wptrs from the fences to determine
+ * which data needs to be saved out of the queue's ring buffer.
+ */
+
+/**
+ * amdgpu_fence_driver_guilty_force_completion - force signal of specified sequence
+ *
+ * @fence: fence of the ring to signal
+ *
+ */
+void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *fence)
+{
+ dma_fence_set_error(&fence->base, -ETIME);
+ amdgpu_fence_write(fence->ring, fence->seq);
+ amdgpu_fence_process(fence->ring);
+}
+
+void amdgpu_fence_save_wptr(struct dma_fence *fence)
+{
+ struct amdgpu_fence *am_fence = container_of(fence, struct amdgpu_fence, base);
+
+ am_fence->wptr = am_fence->ring->wptr;
+}
+
+static void amdgpu_ring_backup_unprocessed_command(struct amdgpu_ring *ring,
+ u64 start_wptr, u32 end_wptr)
+{
+ unsigned int first_idx = start_wptr & ring->buf_mask;
+ unsigned int last_idx = end_wptr & ring->buf_mask;
+ unsigned int i;
+
+ /* Backup the contents of the ring buffer. */
+ for (i = first_idx; i != last_idx; ++i, i &= ring->buf_mask)
+ ring->ring_backup[ring->ring_backup_entries_to_copy++] = ring->ring[i];
+}
+
+void amdgpu_ring_backup_unprocessed_commands(struct amdgpu_ring *ring,
+ struct amdgpu_fence *guilty_fence)
+{
+ struct dma_fence *unprocessed;
+ struct dma_fence __rcu **ptr;
+ struct amdgpu_fence *fence;
+ u64 wptr, i, seqno;
+
+ seqno = amdgpu_fence_read(ring);
+ wptr = ring->fence_drv.signalled_wptr;
+ ring->ring_backup_entries_to_copy = 0;
+
+ for (i = seqno + 1; i <= ring->fence_drv.sync_seq; ++i) {
+ ptr = &ring->fence_drv.fences[i & ring->fence_drv.num_fences_mask];
+ rcu_read_lock();
+ unprocessed = rcu_dereference(*ptr);
+
+ if (unprocessed && !dma_fence_is_signaled(unprocessed)) {
+ fence = container_of(unprocessed, struct amdgpu_fence, base);
+
+ /* save everything if the ring is not guilty, otherwise
+ * just save the content from other contexts.
+ */
+ if (!guilty_fence || (fence->context != guilty_fence->context))
+ amdgpu_ring_backup_unprocessed_command(ring, wptr,
+ fence->wptr);
+ wptr = fence->wptr;
+ }
+ rcu_read_unlock();
+ }
+}
+
/*
* Common fence implementation
*/
@@ -780,7 +832,7 @@ static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
static const char *amdgpu_job_fence_get_timeline_name(struct dma_fence *f)
{
- struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
+ struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence.base);
return (const char *)to_amdgpu_ring(job->base.sched)->name;
}
@@ -810,7 +862,7 @@ static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
*/
static bool amdgpu_job_fence_enable_signaling(struct dma_fence *f)
{
- struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
+ struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence.base);
if (!timer_pending(&to_amdgpu_ring(job->base.sched)->fence_drv.fallback_timer))
amdgpu_fence_schedule_fallback(to_amdgpu_ring(job->base.sched));
@@ -830,7 +882,7 @@ static void amdgpu_fence_free(struct rcu_head *rcu)
struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
/* free fence_slab if it's separated fence*/
- kmem_cache_free(amdgpu_fence_slab, to_amdgpu_fence(f));
+ kfree(to_amdgpu_fence(f));
}
/**
@@ -845,7 +897,7 @@ static void amdgpu_job_fence_free(struct rcu_head *rcu)
struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
/* free job if fence has a parent job */
- kfree(container_of(f, struct amdgpu_job, hw_fence));
+ kfree(container_of(f, struct amdgpu_job, hw_fence.base));
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
index ceb5163480f4..b0082aa7f3c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
@@ -31,6 +31,7 @@
#define FRU_EEPROM_MADDR_6 0x60000
#define FRU_EEPROM_MADDR_8 0x80000
+#define FRU_EEPROM_MADDR_INV 0xFFFFF
static bool is_fru_eeprom_supported(struct amdgpu_device *adev, u32 *fru_addr)
{
@@ -63,10 +64,10 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev, u32 *fru_addr)
switch (adev->asic_type) {
case CHIP_VEGA20:
/* D161 and D163 are the VG20 server SKUs */
- if (strnstr(atom_ctx->vbios_pn, "D161",
- sizeof(atom_ctx->vbios_pn)) ||
- strnstr(atom_ctx->vbios_pn, "D163",
- sizeof(atom_ctx->vbios_pn))) {
+ if (atom_ctx && (strnstr(atom_ctx->vbios_pn, "D161",
+ sizeof(atom_ctx->vbios_pn)) ||
+ strnstr(atom_ctx->vbios_pn, "D163",
+ sizeof(atom_ctx->vbios_pn)))) {
if (fru_addr)
*fru_addr = FRU_EEPROM_MADDR_6;
return true;
@@ -78,8 +79,8 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev, u32 *fru_addr)
return false;
}
case IP_VERSION(11, 0, 7):
- if (strnstr(atom_ctx->vbios_pn, "D603",
- sizeof(atom_ctx->vbios_pn))) {
+ if (atom_ctx && strnstr(atom_ctx->vbios_pn, "D603",
+ sizeof(atom_ctx->vbios_pn))) {
if (strnstr(atom_ctx->vbios_pn, "D603GLXE",
sizeof(atom_ctx->vbios_pn))) {
return false;
@@ -94,8 +95,8 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev, u32 *fru_addr)
}
case IP_VERSION(13, 0, 2):
/* All Aldebaran SKUs have an FRU */
- if (!strnstr(atom_ctx->vbios_pn, "D673",
- sizeof(atom_ctx->vbios_pn)))
+ if (atom_ctx && !strnstr(atom_ctx->vbios_pn, "D673",
+ sizeof(atom_ctx->vbios_pn)))
if (fru_addr)
*fru_addr = FRU_EEPROM_MADDR_6;
return true;
@@ -104,6 +105,10 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev, u32 *fru_addr)
if (fru_addr)
*fru_addr = FRU_EEPROM_MADDR_8;
return true;
+ case IP_VERSION(13, 0, 12):
+ if (fru_addr)
+ *fru_addr = FRU_EEPROM_MADDR_INV;
+ return true;
default:
return false;
}
@@ -120,6 +125,10 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
if (!is_fru_eeprom_supported(adev, &fru_addr))
return 0;
+ /* FRU data avaialble, but no direct EEPROM access */
+ if (fru_addr == FRU_EEPROM_MADDR_INV)
+ return 0;
+
if (!adev->fru_info) {
adev->fru_info = kzalloc(sizeof(*adev->fru_info), GFP_KERNEL);
if (!adev->fru_info)
@@ -135,7 +144,8 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
/* If algo exists, it means that the i2c_adapter's initialized */
if (!adev->pm.fru_eeprom_i2c_bus || !adev->pm.fru_eeprom_i2c_bus->algo) {
- DRM_WARN("Cannot access FRU, EEPROM accessor not initialized");
+ dev_warn(adev->dev,
+ "Cannot access FRU, EEPROM accessor not initialized");
return -ENODEV;
}
@@ -143,19 +153,22 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
len = amdgpu_eeprom_read(adev->pm.fru_eeprom_i2c_bus, fru_addr, buf,
sizeof(buf));
if (len != 8) {
- DRM_ERROR("Couldn't read the IPMI Common Header: %d", len);
+ dev_err(adev->dev, "Couldn't read the IPMI Common Header: %d",
+ len);
return len < 0 ? len : -EIO;
}
if (buf[0] != 1) {
- DRM_ERROR("Bad IPMI Common Header version: 0x%02x", buf[0]);
+ dev_err(adev->dev, "Bad IPMI Common Header version: 0x%02x",
+ buf[0]);
return -EIO;
}
for (csum = 0; len > 0; len--)
csum += buf[len - 1];
if (csum) {
- DRM_ERROR("Bad IPMI Common Header checksum: 0x%02x", csum);
+ dev_err(adev->dev, "Bad IPMI Common Header checksum: 0x%02x",
+ csum);
return -EIO;
}
@@ -170,12 +183,14 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
/* Read the header of the PIA. */
len = amdgpu_eeprom_read(adev->pm.fru_eeprom_i2c_bus, addr, buf, 3);
if (len != 3) {
- DRM_ERROR("Couldn't read the Product Info Area header: %d", len);
+ dev_err(adev->dev,
+ "Couldn't read the Product Info Area header: %d", len);
return len < 0 ? len : -EIO;
}
if (buf[0] != 1) {
- DRM_ERROR("Bad IPMI Product Info Area version: 0x%02x", buf[0]);
+ dev_err(adev->dev, "Bad IPMI Product Info Area version: 0x%02x",
+ buf[0]);
return -EIO;
}
@@ -188,14 +203,16 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
len = amdgpu_eeprom_read(adev->pm.fru_eeprom_i2c_bus, addr, pia, size);
if (len != size) {
kfree(pia);
- DRM_ERROR("Couldn't read the Product Info Area: %d", len);
+ dev_err(adev->dev, "Couldn't read the Product Info Area: %d",
+ len);
return len < 0 ? len : -EIO;
}
for (csum = 0; size > 0; size--)
csum += pia[size - 1];
if (csum) {
- DRM_ERROR("Bad Product Info Area checksum: 0x%02x", csum);
+ dev_err(adev->dev, "Bad Product Info Area checksum: 0x%02x",
+ csum);
kfree(pia);
return -EIO;
}
@@ -384,7 +401,7 @@ int amdgpu_fru_sysfs_init(struct amdgpu_device *adev)
void amdgpu_fru_sysfs_fini(struct amdgpu_device *adev)
{
- if (!is_fru_eeprom_supported(adev, NULL) || !adev->fru_info)
+ if (!adev->fru_info)
return;
sysfs_remove_files(&adev->dev->kobj, amdgpu_fru_attributes);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h
index bc58dca18035..98f3196599ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h
@@ -32,7 +32,7 @@ struct amdgpu_fru_info {
char product_name[AMDGPU_PRODUCT_NAME_LEN];
char serial[20];
char manufacturer_name[32];
- char fru_id[32];
+ char fru_id[50];
};
int amdgpu_fru_get_product_info(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c
index 2d4b67175b55..328a1b963548 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c
@@ -122,6 +122,10 @@ static int amdgpu_is_fw_attestation_supported(struct amdgpu_device *adev)
if (adev->flags & AMD_IS_APU)
return 0;
+ if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(14, 0, 2) ||
+ amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(14, 0, 3))
+ return 0;
+
if (adev->asic_type >= CHIP_SIENNA_CICHLID)
return 1;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index 256b95232de5..b2033f8352f5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -78,8 +78,9 @@ static int amdgpu_gart_dummy_page_init(struct amdgpu_device *adev)
if (adev->dummy_page_addr)
return 0;
- adev->dummy_page_addr = dma_map_page(&adev->pdev->dev, dummy_page, 0,
- PAGE_SIZE, DMA_BIDIRECTIONAL);
+ adev->dummy_page_addr = dma_map_page_attrs(&adev->pdev->dev, dummy_page, 0,
+ PAGE_SIZE, DMA_BIDIRECTIONAL,
+ DMA_ATTR_SKIP_CPU_SYNC);
if (dma_mapping_error(&adev->pdev->dev, adev->dummy_page_addr)) {
dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
adev->dummy_page_addr = 0;
@@ -99,8 +100,9 @@ void amdgpu_gart_dummy_page_fini(struct amdgpu_device *adev)
{
if (!adev->dummy_page_addr)
return;
- dma_unmap_page(&adev->pdev->dev, adev->dummy_page_addr, PAGE_SIZE,
- DMA_BIDIRECTIONAL);
+ dma_unmap_page_attrs(&adev->pdev->dev, adev->dummy_page_addr, PAGE_SIZE,
+ DMA_BIDIRECTIONAL,
+ DMA_ATTR_SKIP_CPU_SYNC);
adev->dummy_page_addr = 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 1a5df8b94661..b7ebae289bea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -36,12 +36,122 @@
#include <drm/drm_exec.h>
#include <drm/drm_gem_ttm_helper.h>
#include <drm/ttm/ttm_tt.h>
+#include <drm/drm_syncobj.h>
#include "amdgpu.h"
#include "amdgpu_display.h"
#include "amdgpu_dma_buf.h"
#include "amdgpu_hmm.h"
#include "amdgpu_xgmi.h"
+#include "amdgpu_vm.h"
+
+static int
+amdgpu_gem_add_input_fence(struct drm_file *filp,
+ uint64_t syncobj_handles_array,
+ uint32_t num_syncobj_handles)
+{
+ struct dma_fence *fence;
+ uint32_t *syncobj_handles;
+ int ret, i;
+
+ if (!num_syncobj_handles)
+ return 0;
+
+ syncobj_handles = memdup_user(u64_to_user_ptr(syncobj_handles_array),
+ size_mul(sizeof(uint32_t), num_syncobj_handles));
+ if (IS_ERR(syncobj_handles))
+ return PTR_ERR(syncobj_handles);
+
+ for (i = 0; i < num_syncobj_handles; i++) {
+
+ if (!syncobj_handles[i]) {
+ ret = -EINVAL;
+ goto free_memdup;
+ }
+
+ ret = drm_syncobj_find_fence(filp, syncobj_handles[i], 0, 0, &fence);
+ if (ret)
+ goto free_memdup;
+
+ dma_fence_wait(fence, false);
+
+ /* TODO: optimize async handling */
+ dma_fence_put(fence);
+ }
+
+free_memdup:
+ kfree(syncobj_handles);
+ return ret;
+}
+
+static int
+amdgpu_gem_update_timeline_node(struct drm_file *filp,
+ uint32_t syncobj_handle,
+ uint64_t point,
+ struct drm_syncobj **syncobj,
+ struct dma_fence_chain **chain)
+{
+ if (!syncobj_handle)
+ return 0;
+
+ /* Find the sync object */
+ *syncobj = drm_syncobj_find(filp, syncobj_handle);
+ if (!*syncobj)
+ return -ENOENT;
+
+ if (!point)
+ return 0;
+
+ /* Allocate the chain node */
+ *chain = dma_fence_chain_alloc();
+ if (!*chain) {
+ drm_syncobj_put(*syncobj);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void
+amdgpu_gem_update_bo_mapping(struct drm_file *filp,
+ struct amdgpu_bo_va *bo_va,
+ uint32_t operation,
+ uint64_t point,
+ struct dma_fence *fence,
+ struct drm_syncobj *syncobj,
+ struct dma_fence_chain *chain)
+{
+ struct amdgpu_bo *bo = bo_va ? bo_va->base.bo : NULL;
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
+ struct amdgpu_vm *vm = &fpriv->vm;
+ struct dma_fence *last_update;
+
+ if (!syncobj)
+ return;
+
+ /* Find the last update fence */
+ switch (operation) {
+ case AMDGPU_VA_OP_MAP:
+ case AMDGPU_VA_OP_REPLACE:
+ if (bo && (bo->tbo.base.resv == vm->root.bo->tbo.base.resv))
+ last_update = vm->last_update;
+ else
+ last_update = bo_va->last_pt_update;
+ break;
+ case AMDGPU_VA_OP_UNMAP:
+ case AMDGPU_VA_OP_CLEAR:
+ last_update = fence;
+ break;
+ default:
+ return;
+ }
+
+ /* Add fence to timeline */
+ if (!point)
+ drm_syncobj_replace_fence(syncobj, last_update);
+ else
+ drm_syncobj_add_point(syncobj, chain, last_update, point);
+}
static vm_fault_t amdgpu_gem_fault(struct vm_fault *vmf)
{
@@ -87,10 +197,8 @@ static void amdgpu_gem_object_free(struct drm_gem_object *gobj)
{
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(gobj);
- if (aobj) {
- amdgpu_hmm_unregister(aobj);
- ttm_bo_put(&aobj->tbo);
- }
+ amdgpu_hmm_unregister(aobj);
+ ttm_bo_put(&aobj->tbo);
}
int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
@@ -179,11 +287,21 @@ static int amdgpu_gem_object_open(struct drm_gem_object *obj,
if (r)
return r;
+ amdgpu_vm_bo_update_shared(abo);
bo_va = amdgpu_vm_bo_find(vm, abo);
if (!bo_va)
bo_va = amdgpu_vm_bo_add(adev, vm, abo);
else
++bo_va->ref_count;
+
+ /* attach gfx eviction fence */
+ r = amdgpu_eviction_fence_attach(&fpriv->evf_mgr, abo);
+ if (r) {
+ DRM_DEBUG_DRIVER("Failed to attach eviction fence to BO\n");
+ amdgpu_bo_unreserve(abo);
+ return r;
+ }
+
amdgpu_bo_unreserve(abo);
/* Validate and add eviction fence to DMABuf imports with dynamic
@@ -199,7 +317,7 @@ static int amdgpu_gem_object_open(struct drm_gem_object *obj,
*/
if (!vm->is_compute_context || !vm->process_info)
return 0;
- if (!obj->import_attach ||
+ if (!drm_gem_is_imported(obj) ||
!dma_buf_is_dynamic(obj->import_attach->dmabuf))
return 0;
mutex_lock_nested(&vm->process_info->lock, 1);
@@ -211,7 +329,7 @@ static int amdgpu_gem_object_open(struct drm_gem_object *obj,
dev_warn(adev->dev, "validate_and_fence failed: %d\n", r);
if (ti) {
- dev_warn(adev->dev, "pid %d\n", ti->pid);
+ dev_warn(adev->dev, "pid %d\n", ti->task.pid);
amdgpu_vm_put_task_info(ti);
}
}
@@ -247,11 +365,15 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj,
goto out_unlock;
}
+ if (!amdgpu_vm_is_bo_always_valid(vm, bo))
+ amdgpu_eviction_fence_detach(&fpriv->evf_mgr, bo);
+
bo_va = amdgpu_vm_bo_find(vm, bo);
if (!bo_va || --bo_va->ref_count)
goto out_unlock;
amdgpu_vm_bo_del(adev, bo_va);
+ amdgpu_vm_bo_update_shared(bo);
if (!amdgpu_vm_ready(vm))
goto out_unlock;
@@ -320,20 +442,8 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
uint32_t handle, initial_domain;
int r;
- /* reject DOORBELLs until userspace code to use it is available */
- if (args->in.domains & AMDGPU_GEM_DOMAIN_DOORBELL)
- return -EINVAL;
-
/* reject invalid gem flags */
- if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
- AMDGPU_GEM_CREATE_CPU_GTT_USWC |
- AMDGPU_GEM_CREATE_VRAM_CLEARED |
- AMDGPU_GEM_CREATE_VM_ALWAYS_VALID |
- AMDGPU_GEM_CREATE_EXPLICIT_SYNC |
- AMDGPU_GEM_CREATE_ENCRYPTED |
- AMDGPU_GEM_CREATE_GFX12_DCC |
- AMDGPU_GEM_CREATE_DISCARDABLE))
+ if (flags & ~AMDGPU_GEM_CREATE_SETTABLE_MASK)
return -EINVAL;
/* reject invalid gem domains */
@@ -348,6 +458,9 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
/* always clear VRAM */
flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED;
+ if (args->in.domains & AMDGPU_GEM_DOMAIN_MMIO_REMAP)
+ return -EINVAL;
+
/* create a gem object to contain this object in */
if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
@@ -459,8 +572,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
goto release_object;
if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
- r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages,
- &range);
+ r = amdgpu_ttm_tt_get_user_pages(bo, &range);
if (r)
goto release_object;
@@ -468,6 +580,8 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
if (r)
goto user_pages_done;
+ amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, range);
+
amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
amdgpu_bo_unreserve(bo);
@@ -637,18 +751,23 @@ out:
*
* Update the bo_va directly after setting its address. Errors are not
* vital here, so they are not reported back to userspace.
+ *
+ * Returns resulting fence if freed BO(s) got cleared from the PT.
+ * otherwise stub fence in case of error.
*/
-static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- struct amdgpu_bo_va *bo_va,
- uint32_t operation)
+static struct dma_fence *
+amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo_va *bo_va,
+ uint32_t operation)
{
+ struct dma_fence *fence = dma_fence_get_stub();
int r;
if (!amdgpu_vm_ready(vm))
- return;
+ return fence;
- r = amdgpu_vm_clear_freed(adev, vm, NULL);
+ r = amdgpu_vm_clear_freed(adev, vm, &fence);
if (r)
goto error;
@@ -664,36 +783,8 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
error:
if (r && r != -ERESTARTSYS)
DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
-}
-/**
- * amdgpu_gem_va_map_flags - map GEM UAPI flags into hardware flags
- *
- * @adev: amdgpu_device pointer
- * @flags: GEM UAPI flags
- *
- * Returns the GEM UAPI flags mapped into hardware for the ASIC.
- */
-uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags)
-{
- uint64_t pte_flag = 0;
-
- if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
- pte_flag |= AMDGPU_PTE_EXECUTABLE;
- if (flags & AMDGPU_VM_PAGE_READABLE)
- pte_flag |= AMDGPU_PTE_READABLE;
- if (flags & AMDGPU_VM_PAGE_WRITEABLE)
- pte_flag |= AMDGPU_PTE_WRITEABLE;
- if (flags & AMDGPU_VM_PAGE_PRT)
- pte_flag |= AMDGPU_PTE_PRT_FLAG(adev);
- if (flags & AMDGPU_VM_PAGE_NOALLOC)
- pte_flag |= AMDGPU_PTE_NOALLOC;
-
- if (adev->gmc.gmc_funcs->map_mtype)
- pte_flag |= amdgpu_gmc_map_mtype(adev,
- flags & AMDGPU_VM_MTYPE_MASK);
-
- return pte_flag;
+ return fence;
}
int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
@@ -712,8 +803,10 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct amdgpu_fpriv *fpriv = filp->driver_priv;
struct amdgpu_bo *abo;
struct amdgpu_bo_va *bo_va;
+ struct drm_syncobj *timeline_syncobj = NULL;
+ struct dma_fence_chain *timeline_chain = NULL;
+ struct dma_fence *fence;
struct drm_exec exec;
- uint64_t va_flags;
uint64_t vm_size;
int r = 0;
@@ -773,6 +866,12 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
abo = NULL;
}
+ r = amdgpu_gem_add_input_fence(filp,
+ args->input_fence_syncobj_handles,
+ args->num_syncobj_handles);
+ if (r)
+ goto error_put_gobj;
+
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
DRM_EXEC_IGNORE_DUPLICATES, 0);
drm_exec_until_all_locked(&exec) {
@@ -801,12 +900,19 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
bo_va = NULL;
}
+ r = amdgpu_gem_update_timeline_node(filp,
+ args->vm_timeline_syncobj_out,
+ args->vm_timeline_point,
+ &timeline_syncobj,
+ &timeline_chain);
+ if (r)
+ goto error;
+
switch (args->operation) {
case AMDGPU_VA_OP_MAP:
- va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
args->offset_in_bo, args->map_size,
- va_flags);
+ args->flags);
break;
case AMDGPU_VA_OP_UNMAP:
r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address);
@@ -818,20 +924,31 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
args->map_size);
break;
case AMDGPU_VA_OP_REPLACE:
- va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
args->offset_in_bo, args->map_size,
- va_flags);
+ args->flags);
break;
default:
break;
}
- if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !adev->debug_vm)
- amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va,
- args->operation);
+ if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !adev->debug_vm) {
+ fence = amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va,
+ args->operation);
+
+ if (timeline_syncobj)
+ amdgpu_gem_update_bo_mapping(filp, bo_va,
+ args->operation,
+ args->vm_timeline_point,
+ fence, timeline_syncobj,
+ timeline_chain);
+ else
+ dma_fence_put(fence);
+
+ }
error:
drm_exec_fini(&exec);
+error_put_gobj:
drm_gem_object_put(gobj);
return r;
}
@@ -839,22 +956,38 @@ error:
int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
- struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_amdgpu_gem_op *args = data;
struct drm_gem_object *gobj;
struct amdgpu_vm_bo_base *base;
struct amdgpu_bo *robj;
+ struct drm_exec exec;
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
int r;
+ if (args->padding)
+ return -EINVAL;
+
gobj = drm_gem_object_lookup(filp, args->handle);
if (!gobj)
return -ENOENT;
robj = gem_to_amdgpu_bo(gobj);
- r = amdgpu_bo_reserve(robj, false);
- if (unlikely(r))
- goto out;
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
+ DRM_EXEC_IGNORE_DUPLICATES, 0);
+ drm_exec_until_all_locked(&exec) {
+ r = drm_exec_lock_obj(&exec, gobj);
+ drm_exec_retry_on_contention(&exec);
+ if (r)
+ goto out_exec;
+
+ if (args->op == AMDGPU_GEM_OP_GET_MAPPING_INFO) {
+ r = amdgpu_vm_lock_pd(&fpriv->vm, &exec, 0);
+ drm_exec_retry_on_contention(&exec);
+ if (r)
+ goto out_exec;
+ }
+ }
switch (args->op) {
case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
@@ -865,29 +998,26 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
info.alignment = robj->tbo.page_alignment << PAGE_SHIFT;
info.domains = robj->preferred_domains;
info.domain_flags = robj->flags;
- amdgpu_bo_unreserve(robj);
+ drm_exec_fini(&exec);
if (copy_to_user(out, &info, sizeof(info)))
r = -EFAULT;
break;
}
case AMDGPU_GEM_OP_SET_PLACEMENT:
- if (robj->tbo.base.import_attach &&
+ if (drm_gem_is_imported(&robj->tbo.base) &&
args->value & AMDGPU_GEM_DOMAIN_VRAM) {
r = -EINVAL;
- amdgpu_bo_unreserve(robj);
- break;
+ goto out_exec;
}
if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
r = -EPERM;
- amdgpu_bo_unreserve(robj);
- break;
+ goto out_exec;
}
for (base = robj->vm_bo; base; base = base->next)
if (amdgpu_xgmi_same_hive(amdgpu_ttm_adev(robj->tbo.bdev),
amdgpu_ttm_adev(base->vm->root.bo->tbo.bdev))) {
r = -EINVAL;
- amdgpu_bo_unreserve(robj);
- goto out;
+ goto out_exec;
}
@@ -899,18 +1029,147 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
- amdgpu_vm_bo_invalidate(adev, robj, true);
+ amdgpu_vm_bo_invalidate(robj, true);
+ drm_exec_fini(&exec);
+ break;
+ case AMDGPU_GEM_OP_GET_MAPPING_INFO: {
+ struct amdgpu_bo_va *bo_va = amdgpu_vm_bo_find(&fpriv->vm, robj);
+ struct drm_amdgpu_gem_vm_entry *vm_entries;
+ struct amdgpu_bo_va_mapping *mapping;
+ int num_mappings = 0;
+ /*
+ * num_entries is set as an input to the size of the user-allocated array of
+ * drm_amdgpu_gem_vm_entry stored at args->value.
+ * num_entries is sent back as output as the number of mappings the bo has.
+ * If that number is larger than the size of the array, the ioctl must
+ * be retried.
+ */
+ vm_entries = kvcalloc(args->num_entries, sizeof(*vm_entries), GFP_KERNEL);
+ if (!vm_entries)
+ return -ENOMEM;
+
+ amdgpu_vm_bo_va_for_each_valid_mapping(bo_va, mapping) {
+ if (num_mappings < args->num_entries) {
+ vm_entries[num_mappings].addr = mapping->start * AMDGPU_GPU_PAGE_SIZE;
+ vm_entries[num_mappings].size = (mapping->last - mapping->start + 1) * AMDGPU_GPU_PAGE_SIZE;
+ vm_entries[num_mappings].offset = mapping->offset;
+ vm_entries[num_mappings].flags = mapping->flags;
+ }
+ num_mappings += 1;
+ }
- amdgpu_bo_unreserve(robj);
+ amdgpu_vm_bo_va_for_each_invalid_mapping(bo_va, mapping) {
+ if (num_mappings < args->num_entries) {
+ vm_entries[num_mappings].addr = mapping->start * AMDGPU_GPU_PAGE_SIZE;
+ vm_entries[num_mappings].size = (mapping->last - mapping->start + 1) * AMDGPU_GPU_PAGE_SIZE;
+ vm_entries[num_mappings].offset = mapping->offset;
+ vm_entries[num_mappings].flags = mapping->flags;
+ }
+ num_mappings += 1;
+ }
+
+ drm_exec_fini(&exec);
+
+ if (num_mappings > 0 && num_mappings <= args->num_entries)
+ if (copy_to_user(u64_to_user_ptr(args->value), vm_entries, num_mappings * sizeof(*vm_entries)))
+ r = -EFAULT;
+
+ args->num_entries = num_mappings;
+
+ kvfree(vm_entries);
break;
+ }
default:
- amdgpu_bo_unreserve(robj);
+ drm_exec_fini(&exec);
r = -EINVAL;
}
-out:
drm_gem_object_put(gobj);
return r;
+out_exec:
+ drm_exec_fini(&exec);
+ drm_gem_object_put(gobj);
+ return r;
+}
+
+/**
+ * amdgpu_gem_list_handles_ioctl - get information about a process' buffer objects
+ *
+ * @dev: drm device pointer
+ * @data: drm_amdgpu_gem_list_handles
+ * @filp: drm file pointer
+ *
+ * num_entries is set as an input to the size of the entries array.
+ * num_entries is sent back as output as the number of bos in the process.
+ * If that number is larger than the size of the array, the ioctl must
+ * be retried.
+ *
+ * Returns:
+ * 0 for success, -errno for errors.
+ */
+int amdgpu_gem_list_handles_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct drm_amdgpu_gem_list_handles *args = data;
+ struct drm_amdgpu_gem_list_handles_entry *bo_entries;
+ struct drm_gem_object *gobj;
+ int id, ret = 0;
+ int bo_index = 0;
+ int num_bos = 0;
+
+ spin_lock(&filp->table_lock);
+ idr_for_each_entry(&filp->object_idr, gobj, id)
+ num_bos += 1;
+ spin_unlock(&filp->table_lock);
+
+ if (args->num_entries < num_bos) {
+ args->num_entries = num_bos;
+ return 0;
+ }
+
+ if (num_bos == 0) {
+ args->num_entries = 0;
+ return 0;
+ }
+
+ bo_entries = kvcalloc(num_bos, sizeof(*bo_entries), GFP_KERNEL);
+ if (!bo_entries)
+ return -ENOMEM;
+
+ spin_lock(&filp->table_lock);
+ idr_for_each_entry(&filp->object_idr, gobj, id) {
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
+ struct drm_amdgpu_gem_list_handles_entry *bo_entry;
+
+ if (bo_index >= num_bos) {
+ ret = -EAGAIN;
+ break;
+ }
+
+ bo_entry = &bo_entries[bo_index];
+
+ bo_entry->size = amdgpu_bo_size(bo);
+ bo_entry->alloc_flags = bo->flags & AMDGPU_GEM_CREATE_SETTABLE_MASK;
+ bo_entry->preferred_domains = bo->preferred_domains;
+ bo_entry->gem_handle = id;
+ bo_entry->alignment = bo->tbo.page_alignment;
+
+ if (bo->tbo.base.import_attach)
+ bo_entry->flags |= AMDGPU_GEM_LIST_HANDLES_FLAG_IS_IMPORT;
+
+ bo_index += 1;
+ }
+ spin_unlock(&filp->table_lock);
+
+ args->num_entries = bo_index;
+
+ if (!ret)
+ if (copy_to_user(u64_to_user_ptr(args->entries), bo_entries, num_bos * sizeof(*bo_entries)))
+ ret = -EFAULT;
+
+ kvfree(bo_entries);
+
+ return ret;
}
static int amdgpu_gem_align_pitch(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
index 3a8f57900a3a..b558336bc4c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
@@ -63,13 +63,28 @@ int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
-uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags);
int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
+int amdgpu_gem_list_handles_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
+#define AMDGPU_GEM_CREATE_SETTABLE_MASK (AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | \
+ AMDGPU_GEM_CREATE_NO_CPU_ACCESS | \
+ AMDGPU_GEM_CREATE_CPU_GTT_USWC | \
+ AMDGPU_GEM_CREATE_VRAM_CLEARED | \
+ AMDGPU_GEM_CREATE_VM_ALWAYS_VALID | \
+ AMDGPU_GEM_CREATE_EXPLICIT_SYNC | \
+ AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE | \
+ AMDGPU_GEM_CREATE_ENCRYPTED | \
+ AMDGPU_GEM_CREATE_GFX12_DCC | \
+ AMDGPU_GEM_CREATE_DISCARDABLE | \
+ AMDGPU_GEM_CREATE_COHERENT | \
+ AMDGPU_GEM_CREATE_UNCACHED | \
+ AMDGPU_GEM_CREATE_EXT_COHERENT)
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index f1ffab5a1eae..ebe2b4c68b0f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -33,6 +33,7 @@
#include "amdgpu_reset.h"
#include "amdgpu_xcp.h"
#include "amdgpu_xgmi.h"
+#include "nvd.h"
/* delay 0.1 second to enable gfx off feature */
#define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(100)
@@ -74,29 +75,20 @@ bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev,
adev->gfx.mec_bitmap[xcc_id].queue_bitmap);
}
-int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev,
- int me, int pipe, int queue)
+static int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev,
+ int me, int pipe, int queue)
{
+ int num_queue_per_pipe = 1; /* we only enable 1 KGQ per pipe */
int bit = 0;
bit += me * adev->gfx.me.num_pipe_per_me
- * adev->gfx.me.num_queue_per_pipe;
- bit += pipe * adev->gfx.me.num_queue_per_pipe;
+ * num_queue_per_pipe;
+ bit += pipe * num_queue_per_pipe;
bit += queue;
return bit;
}
-void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit,
- int *me, int *pipe, int *queue)
-{
- *queue = bit % adev->gfx.me.num_queue_per_pipe;
- *pipe = (bit / adev->gfx.me.num_queue_per_pipe)
- % adev->gfx.me.num_pipe_per_me;
- *me = (bit / adev->gfx.me.num_queue_per_pipe)
- / adev->gfx.me.num_pipe_per_me;
-}
-
bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev,
int me, int pipe, int queue)
{
@@ -157,7 +149,7 @@ static bool amdgpu_gfx_is_graphics_multipipe_capable(struct amdgpu_device *adev)
static bool amdgpu_gfx_is_compute_multipipe_capable(struct amdgpu_device *adev)
{
if (amdgpu_compute_multipipe != -1) {
- DRM_INFO("amdgpu: forcing compute pipe policy %d\n",
+ dev_info(adev->dev, "amdgpu: forcing compute pipe policy %d\n",
amdgpu_compute_multipipe);
return amdgpu_compute_multipipe == 1;
}
@@ -248,8 +240,8 @@ void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev)
{
int i, queue, pipe;
bool multipipe_policy = amdgpu_gfx_is_graphics_multipipe_capable(adev);
- int max_queues_per_me = adev->gfx.me.num_pipe_per_me *
- adev->gfx.me.num_queue_per_pipe;
+ int num_queue_per_pipe = 1; /* we only enable 1 KGQ per pipe */
+ int max_queues_per_me = adev->gfx.me.num_pipe_per_me * num_queue_per_pipe;
if (multipipe_policy) {
/* policy: amdgpu owns the first queue per pipe at this stage
@@ -257,9 +249,9 @@ void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev)
for (i = 0; i < max_queues_per_me; i++) {
pipe = i % adev->gfx.me.num_pipe_per_me;
queue = (i / adev->gfx.me.num_pipe_per_me) %
- adev->gfx.me.num_queue_per_pipe;
+ num_queue_per_pipe;
- set_bit(pipe * adev->gfx.me.num_queue_per_pipe + queue,
+ set_bit(pipe * num_queue_per_pipe + queue,
adev->gfx.me.queue_bitmap);
}
} else {
@@ -268,8 +260,9 @@ void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev)
}
/* update the number of active graphics rings */
- adev->gfx.num_gfx_rings =
- bitmap_weight(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
+ if (adev->gfx.num_gfx_rings)
+ adev->gfx.num_gfx_rings =
+ bitmap_weight(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
}
static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev,
@@ -415,7 +408,7 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
}
/* prepare MQD backup */
- kiq->mqd_backup = kmalloc(mqd_size, GFP_KERNEL);
+ kiq->mqd_backup = kzalloc(mqd_size, GFP_KERNEL);
if (!kiq->mqd_backup) {
dev_warn(adev->dev,
"no memory to create MQD backup for ring %s\n", ring->name);
@@ -438,7 +431,7 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
ring->mqd_size = mqd_size;
/* prepare MQD backup */
- adev->gfx.me.mqd_backup[i] = kmalloc(mqd_size, GFP_KERNEL);
+ adev->gfx.me.mqd_backup[i] = kzalloc(mqd_size, GFP_KERNEL);
if (!adev->gfx.me.mqd_backup[i]) {
dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
return -ENOMEM;
@@ -462,7 +455,7 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
ring->mqd_size = mqd_size;
/* prepare MQD backup */
- adev->gfx.mec.mqd_backup[j] = kmalloc(mqd_size, GFP_KERNEL);
+ adev->gfx.mec.mqd_backup[j] = kzalloc(mqd_size, GFP_KERNEL);
if (!adev->gfx.mec.mqd_backup[j]) {
dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
return -ENOMEM;
@@ -525,6 +518,9 @@ int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id)
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
return -EINVAL;
+ if (!kiq_ring->sched.ready || amdgpu_in_reset(adev))
+ return 0;
+
spin_lock(&kiq->ring_lock);
if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
adev->gfx.num_compute_rings)) {
@@ -538,20 +534,15 @@ int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id)
&adev->gfx.compute_ring[j],
RESET_QUEUES, 0, 0);
}
-
- /**
- * This is workaround: only skip kiq_ring test
- * during ras recovery in suspend stage for gfx9.4.3
+ /* Submit unmap queue packet */
+ amdgpu_ring_commit(kiq_ring);
+ /*
+ * Ring test will do a basic scratch register change check. Just run
+ * this to ensure that unmap queues that is submitted before got
+ * processed successfully before returning.
*/
- if ((amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)) &&
- amdgpu_ras_in_recovery(adev)) {
- spin_unlock(&kiq->ring_lock);
- return 0;
- }
+ r = amdgpu_ring_test_helper(kiq_ring);
- if (kiq_ring->sched.ready && !adev->job_hang)
- r = amdgpu_ring_test_helper(kiq_ring);
spin_unlock(&kiq->ring_lock);
return r;
@@ -579,8 +570,11 @@ int amdgpu_gfx_disable_kgq(struct amdgpu_device *adev, int xcc_id)
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
return -EINVAL;
- spin_lock(&kiq->ring_lock);
+ if (!adev->gfx.kiq[0].ring.sched.ready || amdgpu_in_reset(adev))
+ return 0;
+
if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {
+ spin_lock(&kiq->ring_lock);
if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
adev->gfx.num_gfx_rings)) {
spin_unlock(&kiq->ring_lock);
@@ -593,11 +587,17 @@ int amdgpu_gfx_disable_kgq(struct amdgpu_device *adev, int xcc_id)
&adev->gfx.gfx_ring[j],
PREEMPT_QUEUES, 0, 0);
}
- }
+ /* Submit unmap queue packet */
+ amdgpu_ring_commit(kiq_ring);
- if (adev->gfx.kiq[0].ring.sched.ready && !adev->job_hang)
+ /*
+ * Ring test will do a basic scratch register change check.
+ * Just run this to ensure that unmap queues that is submitted
+ * before got processed successfully before returning.
+ */
r = amdgpu_ring_test_helper(kiq_ring);
- spin_unlock(&kiq->ring_lock);
+ spin_unlock(&kiq->ring_lock);
+ }
return r;
}
@@ -674,7 +674,7 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id)
* generation exposes more than 64 queues. If so, the
* definition of queue_mask needs updating */
if (WARN_ON(i > (sizeof(queue_mask)*8))) {
- DRM_ERROR("Invalid KCQ enabled: %d\n", i);
+ dev_err(adev->dev, "Invalid KCQ enabled: %d\n", i);
break;
}
@@ -683,15 +683,15 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id)
amdgpu_device_flush_hdp(adev, NULL);
- DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe,
- kiq_ring->queue);
+ dev_info(adev->dev, "kiq ring mec %d pipe %d q %d\n", kiq_ring->me,
+ kiq_ring->pipe, kiq_ring->queue);
spin_lock(&kiq->ring_lock);
r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
adev->gfx.num_compute_rings +
kiq->pmf->set_resources_size);
if (r) {
- DRM_ERROR("Failed to lock KIQ (%d).\n", r);
+ dev_err(adev->dev, "Failed to lock KIQ (%d).\n", r);
spin_unlock(&kiq->ring_lock);
return r;
}
@@ -702,11 +702,17 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id)
kiq->pmf->kiq_map_queues(kiq_ring,
&adev->gfx.compute_ring[j]);
}
-
+ /* Submit map queue packet */
+ amdgpu_ring_commit(kiq_ring);
+ /*
+ * Ring test will do a basic scratch register change check. Just run
+ * this to ensure that map queues that is submitted before got
+ * processed successfully before returning.
+ */
r = amdgpu_ring_test_helper(kiq_ring);
spin_unlock(&kiq->ring_lock);
if (r)
- DRM_ERROR("KCQ enable failed\n");
+ dev_err(adev->dev, "KCQ enable failed\n");
return r;
}
@@ -728,7 +734,7 @@ int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id)
r = amdgpu_mes_map_legacy_queue(adev,
&adev->gfx.gfx_ring[j]);
if (r) {
- DRM_ERROR("failed to map gfx queue\n");
+ dev_err(adev->dev, "failed to map gfx queue\n");
return r;
}
}
@@ -742,7 +748,7 @@ int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id)
r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
adev->gfx.num_gfx_rings);
if (r) {
- DRM_ERROR("Failed to lock KIQ (%d).\n", r);
+ dev_err(adev->dev, "Failed to lock KIQ (%d).\n", r);
spin_unlock(&kiq->ring_lock);
return r;
}
@@ -753,27 +759,23 @@ int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id)
&adev->gfx.gfx_ring[j]);
}
}
-
+ /* Submit map queue packet */
+ amdgpu_ring_commit(kiq_ring);
+ /*
+ * Ring test will do a basic scratch register change check. Just run
+ * this to ensure that map queues that is submitted before got
+ * processed successfully before returning.
+ */
r = amdgpu_ring_test_helper(kiq_ring);
spin_unlock(&kiq->ring_lock);
if (r)
- DRM_ERROR("KGQ enable failed\n");
+ dev_err(adev->dev, "KGQ enable failed\n");
return r;
}
-/* amdgpu_gfx_off_ctrl - Handle gfx off feature enable/disable
- *
- * @adev: amdgpu_device pointer
- * @bool enable true: enable gfx off feature, false: disable gfx off feature
- *
- * 1. gfx off feature will be enabled by gfx ip after gfx cg gp enabled.
- * 2. other client can send request to disable gfx off feature, the request should be honored.
- * 3. other client can cancel their request of disable gfx off feature
- * 4. other client should not send request to enable gfx off feature before disable gfx off feature.
- */
-
-void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
+static void amdgpu_gfx_do_off_ctrl(struct amdgpu_device *adev, bool enable,
+ bool no_delay)
{
unsigned long delay = GFX_OFF_DELAY_ENABLE;
@@ -795,9 +797,9 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
if (adev->gfx.gfx_off_req_count == 0 &&
!adev->gfx.gfx_off_state) {
/* If going to s2idle, no need to wait */
- if (adev->in_s0ix) {
+ if (no_delay) {
if (!amdgpu_dpm_set_powergating_by_smu(adev,
- AMD_IP_BLOCK_TYPE_GFX, true))
+ AMD_IP_BLOCK_TYPE_GFX, true, 0))
adev->gfx.gfx_off_state = true;
} else {
schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
@@ -809,7 +811,7 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
if (adev->gfx.gfx_off_state &&
- !amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false)) {
+ !amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false, 0)) {
adev->gfx.gfx_off_state = false;
if (adev->gfx.funcs->init_spm_golden) {
@@ -827,6 +829,43 @@ unlock:
mutex_unlock(&adev->gfx.gfx_off_mutex);
}
+/* amdgpu_gfx_off_ctrl - Handle gfx off feature enable/disable
+ *
+ * @adev: amdgpu_device pointer
+ * @bool enable true: enable gfx off feature, false: disable gfx off feature
+ *
+ * 1. gfx off feature will be enabled by gfx ip after gfx cg pg enabled.
+ * 2. other client can send request to disable gfx off feature, the request should be honored.
+ * 3. other client can cancel their request of disable gfx off feature
+ * 4. other client should not send request to enable gfx off feature before disable gfx off feature.
+ *
+ * gfx off allow will be delayed by GFX_OFF_DELAY_ENABLE ms.
+ */
+void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
+{
+ /* If going to s2idle, no need to wait */
+ bool no_delay = adev->in_s0ix ? true : false;
+
+ amdgpu_gfx_do_off_ctrl(adev, enable, no_delay);
+}
+
+/* amdgpu_gfx_off_ctrl_immediate - Handle gfx off feature enable/disable
+ *
+ * @adev: amdgpu_device pointer
+ * @bool enable true: enable gfx off feature, false: disable gfx off feature
+ *
+ * 1. gfx off feature will be enabled by gfx ip after gfx cg pg enabled.
+ * 2. other client can send request to disable gfx off feature, the request should be honored.
+ * 3. other client can cancel their request of disable gfx off feature
+ * 4. other client should not send request to enable gfx off feature before disable gfx off feature.
+ *
+ * gfx off allow will be issued immediately.
+ */
+void amdgpu_gfx_off_ctrl_immediate(struct amdgpu_device *adev, bool enable)
+{
+ amdgpu_gfx_do_off_ctrl(adev, enable, true);
+}
+
int amdgpu_set_gfx_off_residency(struct amdgpu_device *adev, bool value)
{
int r = 0;
@@ -895,6 +934,9 @@ int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *r
if (r)
return r;
+ if (amdgpu_sriov_vf(adev))
+ return r;
+
if (adev->gfx.cp_ecc_error_irq.funcs) {
r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
if (r)
@@ -988,7 +1030,7 @@ int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
ih_data.head = *ras_if;
- DRM_ERROR("CP ECC ERROR IRQ\n");
+ dev_err(adev->dev, "CP ECC ERROR IRQ\n");
amdgpu_ras_interrupt_dispatch(adev, &ih_data);
return 0;
}
@@ -1060,6 +1102,9 @@ uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg, uint32_t xcc_
might_sleep();
while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
+ if (amdgpu_in_reset(adev))
+ goto failed_kiq_read;
+
msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
}
@@ -1129,6 +1174,8 @@ void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint3
might_sleep();
while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
+ if (amdgpu_in_reset(adev))
+ goto failed_kiq_write;
msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
@@ -1312,6 +1359,10 @@ static ssize_t amdgpu_gfx_get_current_compute_partition(struct device *dev,
struct amdgpu_device *adev = drm_to_adev(ddev);
int mode;
+ /* Only minimal precaution taken to reject requests while in reset.*/
+ if (amdgpu_in_reset(adev))
+ return -EPERM;
+
mode = amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
AMDGPU_XCP_FL_NONE);
@@ -1355,43 +1406,49 @@ static ssize_t amdgpu_gfx_set_compute_partition(struct device *dev,
return -EINVAL;
}
+ /* Don't allow a switch while under reset */
+ if (!down_read_trylock(&adev->reset_domain->sem))
+ return -EPERM;
+
ret = amdgpu_xcp_switch_partition_mode(adev->xcp_mgr, mode);
+ up_read(&adev->reset_domain->sem);
+
if (ret)
return ret;
return count;
}
+static const char *xcp_desc[] = {
+ [AMDGPU_SPX_PARTITION_MODE] = "SPX",
+ [AMDGPU_DPX_PARTITION_MODE] = "DPX",
+ [AMDGPU_TPX_PARTITION_MODE] = "TPX",
+ [AMDGPU_QPX_PARTITION_MODE] = "QPX",
+ [AMDGPU_CPX_PARTITION_MODE] = "CPX",
+};
+
static ssize_t amdgpu_gfx_get_available_compute_partition(struct device *dev,
struct device_attribute *addr,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
- char *supported_partition;
+ struct amdgpu_xcp_mgr *xcp_mgr = adev->xcp_mgr;
+ int size = 0, mode;
+ char *sep = "";
- /* TBD */
- switch (NUM_XCC(adev->gfx.xcc_mask)) {
- case 8:
- supported_partition = "SPX, DPX, QPX, CPX";
- break;
- case 6:
- supported_partition = "SPX, TPX, CPX";
- break;
- case 4:
- supported_partition = "SPX, DPX, CPX";
- break;
- /* this seems only existing in emulation phase */
- case 2:
- supported_partition = "SPX, CPX";
- break;
- default:
- supported_partition = "Not supported";
- break;
+ if (!xcp_mgr || !xcp_mgr->avail_xcp_modes)
+ return sysfs_emit(buf, "Not supported\n");
+
+ for_each_inst(mode, xcp_mgr->avail_xcp_modes) {
+ size += sysfs_emit_at(buf, size, "%s%s", sep, xcp_desc[mode]);
+ sep = ", ";
}
- return sysfs_emit(buf, "%s\n", supported_partition);
+ size += sysfs_emit_at(buf, size, "\n");
+
+ return size;
}
static int amdgpu_gfx_run_cleaner_shader_job(struct amdgpu_ring *ring)
@@ -1399,9 +1456,11 @@ static int amdgpu_gfx_run_cleaner_shader_job(struct amdgpu_ring *ring)
struct amdgpu_device *adev = ring->adev;
struct drm_gpu_scheduler *sched = &ring->sched;
struct drm_sched_entity entity;
+ static atomic_t counter;
struct dma_fence *f;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
+ void *owner;
int i, r;
/* Initialize the scheduler entity */
@@ -1412,13 +1471,22 @@ static int amdgpu_gfx_run_cleaner_shader_job(struct amdgpu_ring *ring)
goto err;
}
- r = amdgpu_job_alloc_with_ib(ring->adev, &entity, NULL,
- 64, 0,
- &job);
+ /*
+ * Use some unique dummy value as the owner to make sure we execute
+ * the cleaner shader on each submission. The value just need to change
+ * for each submission and is otherwise meaningless.
+ */
+ owner = (void *)(unsigned long)atomic_inc_return(&counter);
+
+ r = amdgpu_job_alloc_with_ib(ring->adev, &entity, owner,
+ 64, 0, &job,
+ AMDGPU_KERNEL_JOB_ID_CLEANER_SHADER);
if (r)
goto err;
job->enforce_isolation = true;
+ /* always run the cleaner shader */
+ job->run_cleaner_shader = true;
ib = &job->ibs[0];
for (i = 0; i <= ring->funcs->align_mask; ++i)
@@ -1472,6 +1540,24 @@ static int amdgpu_gfx_run_cleaner_shader(struct amdgpu_device *adev, int xcp_id)
return 0;
}
+/**
+ * amdgpu_gfx_set_run_cleaner_shader - Execute the AMDGPU GFX Cleaner Shader
+ * @dev: The device structure
+ * @attr: The device attribute structure
+ * @buf: The buffer containing the input data
+ * @count: The size of the input data
+ *
+ * Provides the sysfs interface to manually run a cleaner shader, which is
+ * used to clear the GPU state between different tasks. Writing a value to the
+ * 'run_cleaner_shader' sysfs file triggers the cleaner shader execution.
+ * The value written corresponds to the partition index on multi-partition
+ * devices. On single-partition devices, the value should be '0'.
+ *
+ * The cleaner shader clears the Local Data Store (LDS) and General Purpose
+ * Registers (GPRs) to ensure data isolation between GPU workloads.
+ *
+ * Return: The number of bytes written to the sysfs file.
+ */
static ssize_t amdgpu_gfx_set_run_cleaner_shader(struct device *dev,
struct device_attribute *attr,
const char *buf,
@@ -1487,6 +1573,9 @@ static ssize_t amdgpu_gfx_set_run_cleaner_shader(struct device *dev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
+ if (adev->gfx.disable_kq)
+ return -EPERM;
+
ret = kstrtol(buf, 0, &value);
if (ret)
@@ -1520,6 +1609,20 @@ static ssize_t amdgpu_gfx_set_run_cleaner_shader(struct device *dev,
return count;
}
+/**
+ * amdgpu_gfx_get_enforce_isolation - Query AMDGPU GFX Enforce Isolation Settings
+ * @dev: The device structure
+ * @attr: The device attribute structure
+ * @buf: The buffer to store the output data
+ *
+ * Provides the sysfs read interface to get the current settings of the 'enforce_isolation'
+ * feature for each GPU partition. Reading from the 'enforce_isolation'
+ * sysfs file returns the isolation settings for all partitions, where '0'
+ * indicates disabled, '1' indicates enabled, and '2' indicates enabled in legacy mode,
+ * and '3' indicates enabled without cleaner shader.
+ *
+ * Return: The number of bytes read from the sysfs file.
+ */
static ssize_t amdgpu_gfx_get_enforce_isolation(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -1543,6 +1646,23 @@ static ssize_t amdgpu_gfx_get_enforce_isolation(struct device *dev,
return size;
}
+/**
+ * amdgpu_gfx_set_enforce_isolation - Control AMDGPU GFX Enforce Isolation
+ * @dev: The device structure
+ * @attr: The device attribute structure
+ * @buf: The buffer containing the input data
+ * @count: The size of the input data
+ *
+ * This function allows control over the 'enforce_isolation' feature, which
+ * serializes access to the graphics engine. Writing '0' to disable, '1' to
+ * enable isolation with cleaner shader, '2' to enable legacy isolation without
+ * cleaner shader, or '3' to enable process isolation without submitting the
+ * cleaner shader to the 'enforce_isolation' sysfs file sets the isolation mode
+ * for each partition. The input should specify the setting for all
+ * partitions.
+ *
+ * Return: The number of bytes written to the sysfs file.
+ */
static ssize_t amdgpu_gfx_set_enforce_isolation(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
@@ -1576,28 +1696,67 @@ static ssize_t amdgpu_gfx_set_enforce_isolation(struct device *dev,
return -EINVAL;
for (i = 0; i < num_partitions; i++) {
- if (partition_values[i] != 0 && partition_values[i] != 1)
+ if (partition_values[i] != 0 &&
+ partition_values[i] != 1 &&
+ partition_values[i] != 2 &&
+ partition_values[i] != 3)
return -EINVAL;
}
mutex_lock(&adev->enforce_isolation_mutex);
-
for (i = 0; i < num_partitions; i++) {
- if (adev->enforce_isolation[i] && !partition_values[i]) {
- /* Going from enabled to disabled */
- amdgpu_vmid_free_reserved(adev, AMDGPU_GFXHUB(i));
- } else if (!adev->enforce_isolation[i] && partition_values[i]) {
- /* Going from disabled to enabled */
- amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(i));
+ switch (partition_values[i]) {
+ case 0:
+ default:
+ adev->enforce_isolation[i] = AMDGPU_ENFORCE_ISOLATION_DISABLE;
+ break;
+ case 1:
+ adev->enforce_isolation[i] =
+ AMDGPU_ENFORCE_ISOLATION_ENABLE;
+ break;
+ case 2:
+ adev->enforce_isolation[i] =
+ AMDGPU_ENFORCE_ISOLATION_ENABLE_LEGACY;
+ break;
+ case 3:
+ adev->enforce_isolation[i] =
+ AMDGPU_ENFORCE_ISOLATION_NO_CLEANER_SHADER;
+ break;
}
- adev->enforce_isolation[i] = partition_values[i];
}
-
mutex_unlock(&adev->enforce_isolation_mutex);
+ amdgpu_mes_update_enforce_isolation(adev);
+
return count;
}
+static ssize_t amdgpu_gfx_get_gfx_reset_mask(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+
+ if (!adev)
+ return -ENODEV;
+
+ return amdgpu_show_reset_mask(buf, adev->gfx.gfx_supported_reset);
+}
+
+static ssize_t amdgpu_gfx_get_compute_reset_mask(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+
+ if (!adev)
+ return -ENODEV;
+
+ return amdgpu_show_reset_mask(buf, adev->gfx.compute_supported_reset);
+}
+
static DEVICE_ATTR(run_cleaner_shader, 0200,
NULL, amdgpu_gfx_set_run_cleaner_shader);
@@ -1611,45 +1770,138 @@ static DEVICE_ATTR(current_compute_partition, 0644,
static DEVICE_ATTR(available_compute_partition, 0444,
amdgpu_gfx_get_available_compute_partition, NULL);
+static DEVICE_ATTR(gfx_reset_mask, 0444,
+ amdgpu_gfx_get_gfx_reset_mask, NULL);
-int amdgpu_gfx_sysfs_init(struct amdgpu_device *adev)
+static DEVICE_ATTR(compute_reset_mask, 0444,
+ amdgpu_gfx_get_compute_reset_mask, NULL);
+
+static int amdgpu_gfx_sysfs_xcp_init(struct amdgpu_device *adev)
{
+ struct amdgpu_xcp_mgr *xcp_mgr = adev->xcp_mgr;
+ bool xcp_switch_supported;
int r;
+ if (!xcp_mgr)
+ return 0;
+
+ xcp_switch_supported =
+ (xcp_mgr->funcs && xcp_mgr->funcs->switch_partition_mode);
+
+ if (!xcp_switch_supported)
+ dev_attr_current_compute_partition.attr.mode &=
+ ~(S_IWUSR | S_IWGRP | S_IWOTH);
+
r = device_create_file(adev->dev, &dev_attr_current_compute_partition);
if (r)
return r;
- r = device_create_file(adev->dev, &dev_attr_available_compute_partition);
+ if (xcp_switch_supported)
+ r = device_create_file(adev->dev,
+ &dev_attr_available_compute_partition);
return r;
}
-void amdgpu_gfx_sysfs_fini(struct amdgpu_device *adev)
+static void amdgpu_gfx_sysfs_xcp_fini(struct amdgpu_device *adev)
{
+ struct amdgpu_xcp_mgr *xcp_mgr = adev->xcp_mgr;
+ bool xcp_switch_supported;
+
+ if (!xcp_mgr)
+ return;
+
+ xcp_switch_supported =
+ (xcp_mgr->funcs && xcp_mgr->funcs->switch_partition_mode);
device_remove_file(adev->dev, &dev_attr_current_compute_partition);
- device_remove_file(adev->dev, &dev_attr_available_compute_partition);
+
+ if (xcp_switch_supported)
+ device_remove_file(adev->dev,
+ &dev_attr_available_compute_partition);
}
-int amdgpu_gfx_sysfs_isolation_shader_init(struct amdgpu_device *adev)
+static int amdgpu_gfx_sysfs_isolation_shader_init(struct amdgpu_device *adev)
{
int r;
r = device_create_file(adev->dev, &dev_attr_enforce_isolation);
if (r)
return r;
+ if (adev->gfx.enable_cleaner_shader)
+ r = device_create_file(adev->dev, &dev_attr_run_cleaner_shader);
- r = device_create_file(adev->dev, &dev_attr_run_cleaner_shader);
- if (r)
+ return r;
+}
+
+static void amdgpu_gfx_sysfs_isolation_shader_fini(struct amdgpu_device *adev)
+{
+ device_remove_file(adev->dev, &dev_attr_enforce_isolation);
+ if (adev->gfx.enable_cleaner_shader)
+ device_remove_file(adev->dev, &dev_attr_run_cleaner_shader);
+}
+
+static int amdgpu_gfx_sysfs_reset_mask_init(struct amdgpu_device *adev)
+{
+ int r = 0;
+
+ if (!amdgpu_gpu_recovery)
return r;
- return 0;
+ if (adev->gfx.num_gfx_rings) {
+ r = device_create_file(adev->dev, &dev_attr_gfx_reset_mask);
+ if (r)
+ return r;
+ }
+
+ if (adev->gfx.num_compute_rings) {
+ r = device_create_file(adev->dev, &dev_attr_compute_reset_mask);
+ if (r)
+ return r;
+ }
+
+ return r;
}
-void amdgpu_gfx_sysfs_isolation_shader_fini(struct amdgpu_device *adev)
+static void amdgpu_gfx_sysfs_reset_mask_fini(struct amdgpu_device *adev)
{
- device_remove_file(adev->dev, &dev_attr_enforce_isolation);
- device_remove_file(adev->dev, &dev_attr_run_cleaner_shader);
+ if (!amdgpu_gpu_recovery)
+ return;
+
+ if (adev->gfx.num_gfx_rings)
+ device_remove_file(adev->dev, &dev_attr_gfx_reset_mask);
+
+ if (adev->gfx.num_compute_rings)
+ device_remove_file(adev->dev, &dev_attr_compute_reset_mask);
+}
+
+int amdgpu_gfx_sysfs_init(struct amdgpu_device *adev)
+{
+ int r;
+
+ r = amdgpu_gfx_sysfs_xcp_init(adev);
+ if (r) {
+ dev_err(adev->dev, "failed to create xcp sysfs files");
+ return r;
+ }
+
+ r = amdgpu_gfx_sysfs_isolation_shader_init(adev);
+ if (r)
+ dev_err(adev->dev, "failed to create isolation sysfs files");
+
+ r = amdgpu_gfx_sysfs_reset_mask_init(adev);
+ if (r)
+ dev_err(adev->dev, "failed to create reset mask sysfs files");
+
+ return r;
+}
+
+void amdgpu_gfx_sysfs_fini(struct amdgpu_device *adev)
+{
+ if (adev->dev->kobj.sd) {
+ amdgpu_gfx_sysfs_xcp_fini(adev);
+ amdgpu_gfx_sysfs_isolation_shader_fini(adev);
+ amdgpu_gfx_sysfs_reset_mask_fini(adev);
+ }
}
int amdgpu_gfx_cleaner_shader_sw_init(struct amdgpu_device *adev,
@@ -1720,39 +1972,41 @@ void amdgpu_gfx_cleaner_shader_init(struct amdgpu_device *adev,
static void amdgpu_gfx_kfd_sch_ctrl(struct amdgpu_device *adev, u32 idx,
bool enable)
{
- mutex_lock(&adev->gfx.kfd_sch_mutex);
+ mutex_lock(&adev->gfx.userq_sch_mutex);
if (enable) {
/* If the count is already 0, it means there's an imbalance bug somewhere.
* Note that the bug may be in a different caller than the one which triggers the
* WARN_ON_ONCE.
*/
- if (WARN_ON_ONCE(adev->gfx.kfd_sch_req_count[idx] == 0)) {
+ if (WARN_ON_ONCE(adev->gfx.userq_sch_req_count[idx] == 0)) {
dev_err(adev->dev, "Attempted to enable KFD scheduler when reference count is already zero\n");
goto unlock;
}
- adev->gfx.kfd_sch_req_count[idx]--;
+ adev->gfx.userq_sch_req_count[idx]--;
- if (adev->gfx.kfd_sch_req_count[idx] == 0 &&
- adev->gfx.kfd_sch_inactive[idx]) {
+ if (adev->gfx.userq_sch_req_count[idx] == 0 &&
+ adev->gfx.userq_sch_inactive[idx]) {
schedule_delayed_work(&adev->gfx.enforce_isolation[idx].work,
- GFX_SLICE_PERIOD);
+ msecs_to_jiffies(adev->gfx.enforce_isolation_time[idx]));
}
} else {
- if (adev->gfx.kfd_sch_req_count[idx] == 0) {
+ if (adev->gfx.userq_sch_req_count[idx] == 0) {
cancel_delayed_work_sync(&adev->gfx.enforce_isolation[idx].work);
- if (!adev->gfx.kfd_sch_inactive[idx]) {
- amdgpu_amdkfd_stop_sched(adev, idx);
- adev->gfx.kfd_sch_inactive[idx] = true;
+ if (!adev->gfx.userq_sch_inactive[idx]) {
+ amdgpu_userq_stop_sched_for_enforce_isolation(adev, idx);
+ if (adev->kfd.init_complete)
+ amdgpu_amdkfd_stop_sched(adev, idx);
+ adev->gfx.userq_sch_inactive[idx] = true;
}
}
- adev->gfx.kfd_sch_req_count[idx]++;
+ adev->gfx.userq_sch_req_count[idx]++;
}
unlock:
- mutex_unlock(&adev->gfx.kfd_sch_mutex);
+ mutex_unlock(&adev->gfx.userq_sch_mutex);
}
/**
@@ -1792,24 +2046,92 @@ void amdgpu_gfx_enforce_isolation_handler(struct work_struct *work)
fences += amdgpu_fence_count_emitted(&adev->gfx.compute_ring[i]);
}
if (fences) {
+ /* we've already had our timeslice, so let's wrap this up */
schedule_delayed_work(&adev->gfx.enforce_isolation[idx].work,
- GFX_SLICE_PERIOD);
+ msecs_to_jiffies(1));
} else {
/* Tell KFD to resume the runqueue */
- if (adev->kfd.init_complete) {
- WARN_ON_ONCE(!adev->gfx.kfd_sch_inactive[idx]);
- WARN_ON_ONCE(adev->gfx.kfd_sch_req_count[idx]);
- amdgpu_amdkfd_start_sched(adev, idx);
- adev->gfx.kfd_sch_inactive[idx] = false;
+ WARN_ON_ONCE(!adev->gfx.userq_sch_inactive[idx]);
+ WARN_ON_ONCE(adev->gfx.userq_sch_req_count[idx]);
+
+ amdgpu_userq_start_sched_for_enforce_isolation(adev, idx);
+ if (adev->kfd.init_complete)
+ amdgpu_amdkfd_start_sched(adev, idx);
+ adev->gfx.userq_sch_inactive[idx] = false;
+ }
+ mutex_unlock(&adev->enforce_isolation_mutex);
+}
+
+/**
+ * amdgpu_gfx_enforce_isolation_wait_for_kfd - Manage KFD wait period for process isolation
+ * @adev: amdgpu_device pointer
+ * @idx: Index of the GPU partition
+ *
+ * When kernel submissions come in, the jobs are given a time slice and once
+ * that time slice is up, if there are KFD user queues active, kernel
+ * submissions are blocked until KFD has had its time slice. Once the KFD time
+ * slice is up, KFD user queues are preempted and kernel submissions are
+ * unblocked and allowed to run again.
+ */
+static void
+amdgpu_gfx_enforce_isolation_wait_for_kfd(struct amdgpu_device *adev,
+ u32 idx)
+{
+ unsigned long cjiffies;
+ bool wait = false;
+
+ mutex_lock(&adev->enforce_isolation_mutex);
+ if (adev->enforce_isolation[idx] == AMDGPU_ENFORCE_ISOLATION_ENABLE) {
+ /* set the initial values if nothing is set */
+ if (!adev->gfx.enforce_isolation_jiffies[idx]) {
+ adev->gfx.enforce_isolation_jiffies[idx] = jiffies;
+ adev->gfx.enforce_isolation_time[idx] = GFX_SLICE_PERIOD_MS;
+ }
+ /* Make sure KFD gets a chance to run */
+ if (amdgpu_amdkfd_compute_active(adev, idx)) {
+ cjiffies = jiffies;
+ if (time_after(cjiffies, adev->gfx.enforce_isolation_jiffies[idx])) {
+ cjiffies -= adev->gfx.enforce_isolation_jiffies[idx];
+ if ((jiffies_to_msecs(cjiffies) >= GFX_SLICE_PERIOD_MS)) {
+ /* if our time is up, let KGD work drain before scheduling more */
+ wait = true;
+ /* reset the timer period */
+ adev->gfx.enforce_isolation_time[idx] = GFX_SLICE_PERIOD_MS;
+ } else {
+ /* set the timer period to what's left in our time slice */
+ adev->gfx.enforce_isolation_time[idx] =
+ GFX_SLICE_PERIOD_MS - jiffies_to_msecs(cjiffies);
+ }
+ } else {
+ /* if jiffies wrap around we will just wait a little longer */
+ adev->gfx.enforce_isolation_jiffies[idx] = jiffies;
+ }
+ } else {
+ /* if there is no KFD work, then set the full slice period */
+ adev->gfx.enforce_isolation_jiffies[idx] = jiffies;
+ adev->gfx.enforce_isolation_time[idx] = GFX_SLICE_PERIOD_MS;
}
}
mutex_unlock(&adev->enforce_isolation_mutex);
+
+ if (wait)
+ msleep(GFX_SLICE_PERIOD_MS);
}
+/**
+ * amdgpu_gfx_enforce_isolation_ring_begin_use - Begin use of a ring with enforced isolation
+ * @ring: Pointer to the amdgpu_ring structure
+ *
+ * Ring begin_use helper implementation for gfx which serializes access to the
+ * gfx IP between kernel submission IOCTLs and KFD user queues when isolation
+ * enforcement is enabled. The kernel submission IOCTLs and KFD user queues
+ * each get a time slice when both are active.
+ */
void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u32 idx;
+ bool sched_work = false;
if (!adev->gfx.enable_cleaner_shader)
return;
@@ -1822,18 +2144,34 @@ void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring)
if (idx >= MAX_XCP)
return;
+ /* Don't submit more work until KFD has had some time */
+ amdgpu_gfx_enforce_isolation_wait_for_kfd(adev, idx);
+
mutex_lock(&adev->enforce_isolation_mutex);
- if (adev->enforce_isolation[idx]) {
+ if (adev->enforce_isolation[idx] == AMDGPU_ENFORCE_ISOLATION_ENABLE) {
if (adev->kfd.init_complete)
- amdgpu_gfx_kfd_sch_ctrl(adev, idx, false);
+ sched_work = true;
}
mutex_unlock(&adev->enforce_isolation_mutex);
+
+ if (sched_work)
+ amdgpu_gfx_kfd_sch_ctrl(adev, idx, false);
}
+/**
+ * amdgpu_gfx_enforce_isolation_ring_end_use - End use of a ring with enforced isolation
+ * @ring: Pointer to the amdgpu_ring structure
+ *
+ * Ring end_use helper implementation for gfx which serializes access to the
+ * gfx IP between kernel submission IOCTLs and KFD user queues when isolation
+ * enforcement is enabled. The kernel submission IOCTLs and KFD user queues
+ * each get a time slice when both are active.
+ */
void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u32 idx;
+ bool sched_work = false;
if (!adev->gfx.enable_cleaner_shader)
return;
@@ -1847,9 +2185,303 @@ void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring)
return;
mutex_lock(&adev->enforce_isolation_mutex);
- if (adev->enforce_isolation[idx]) {
+ if (adev->enforce_isolation[idx] == AMDGPU_ENFORCE_ISOLATION_ENABLE) {
if (adev->kfd.init_complete)
- amdgpu_gfx_kfd_sch_ctrl(adev, idx, true);
+ sched_work = true;
}
mutex_unlock(&adev->enforce_isolation_mutex);
+
+ if (sched_work)
+ amdgpu_gfx_kfd_sch_ctrl(adev, idx, true);
+}
+
+void amdgpu_gfx_profile_idle_work_handler(struct work_struct *work)
+{
+ struct amdgpu_device *adev =
+ container_of(work, struct amdgpu_device, gfx.idle_work.work);
+ enum PP_SMC_POWER_PROFILE profile;
+ u32 i, fences = 0;
+ int r;
+
+ if (adev->gfx.num_gfx_rings)
+ profile = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
+ else
+ profile = PP_SMC_POWER_PROFILE_COMPUTE;
+
+ for (i = 0; i < AMDGPU_MAX_GFX_RINGS; ++i)
+ fences += amdgpu_fence_count_emitted(&adev->gfx.gfx_ring[i]);
+ for (i = 0; i < (AMDGPU_MAX_COMPUTE_RINGS * AMDGPU_MAX_GC_INSTANCES); ++i)
+ fences += amdgpu_fence_count_emitted(&adev->gfx.compute_ring[i]);
+ if (!fences && !atomic_read(&adev->gfx.total_submission_cnt)) {
+ mutex_lock(&adev->gfx.workload_profile_mutex);
+ if (adev->gfx.workload_profile_active) {
+ r = amdgpu_dpm_switch_power_profile(adev, profile, false);
+ if (r)
+ dev_warn(adev->dev, "(%d) failed to disable %s power profile mode\n", r,
+ profile == PP_SMC_POWER_PROFILE_FULLSCREEN3D ?
+ "fullscreen 3D" : "compute");
+ adev->gfx.workload_profile_active = false;
+ }
+ mutex_unlock(&adev->gfx.workload_profile_mutex);
+ } else {
+ schedule_delayed_work(&adev->gfx.idle_work, GFX_PROFILE_IDLE_TIMEOUT);
+ }
+}
+
+void amdgpu_gfx_profile_ring_begin_use(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ enum PP_SMC_POWER_PROFILE profile;
+ int r;
+
+ if (amdgpu_dpm_is_overdrive_enabled(adev))
+ return;
+
+ if (adev->gfx.num_gfx_rings)
+ profile = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
+ else
+ profile = PP_SMC_POWER_PROFILE_COMPUTE;
+
+ atomic_inc(&adev->gfx.total_submission_cnt);
+
+ cancel_delayed_work_sync(&adev->gfx.idle_work);
+
+ /* We can safely return early here because we've cancelled the
+ * the delayed work so there is no one else to set it to false
+ * and we don't care if someone else sets it to true.
+ */
+ if (adev->gfx.workload_profile_active)
+ return;
+
+ mutex_lock(&adev->gfx.workload_profile_mutex);
+ if (!adev->gfx.workload_profile_active) {
+ r = amdgpu_dpm_switch_power_profile(adev, profile, true);
+ if (r)
+ dev_warn(adev->dev, "(%d) failed to disable %s power profile mode\n", r,
+ profile == PP_SMC_POWER_PROFILE_FULLSCREEN3D ?
+ "fullscreen 3D" : "compute");
+ adev->gfx.workload_profile_active = true;
+ }
+ mutex_unlock(&adev->gfx.workload_profile_mutex);
+}
+
+void amdgpu_gfx_profile_ring_end_use(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (amdgpu_dpm_is_overdrive_enabled(adev))
+ return;
+
+ atomic_dec(&ring->adev->gfx.total_submission_cnt);
+
+ schedule_delayed_work(&ring->adev->gfx.idle_work, GFX_PROFILE_IDLE_TIMEOUT);
+}
+
+/**
+ * amdgpu_gfx_csb_preamble_start - Set CSB preamble start
+ *
+ * @buffer: This is an output variable that gets the PACKET3 preamble setup.
+ *
+ * Return:
+ * return the latest index.
+ */
+u32 amdgpu_gfx_csb_preamble_start(u32 *buffer)
+{
+ u32 count = 0;
+
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
+ buffer[count++] = cpu_to_le32(0x80000000);
+ buffer[count++] = cpu_to_le32(0x80000000);
+
+ return count;
+}
+
+/**
+ * amdgpu_gfx_csb_data_parser - Parser CS data
+ *
+ * @adev: amdgpu_device pointer used to get the CS data and other gfx info.
+ * @buffer: This is an output variable that gets the PACKET3 preamble end.
+ * @count: Index to start set the preemble end.
+ *
+ * Return:
+ * return the latest index.
+ */
+u32 amdgpu_gfx_csb_data_parser(struct amdgpu_device *adev, u32 *buffer, u32 count)
+{
+ const struct cs_section_def *sect = NULL;
+ const struct cs_extent_def *ext = NULL;
+ u32 i;
+
+ for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
+ for (ext = sect->section; ext->extent != NULL; ++ext) {
+ if (sect->id == SECT_CONTEXT) {
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
+ buffer[count++] = cpu_to_le32(ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
+
+ for (i = 0; i < ext->reg_count; i++)
+ buffer[count++] = cpu_to_le32(ext->extent[i]);
+ }
+ }
+ }
+
+ return count;
+}
+
+/**
+ * amdgpu_gfx_csb_preamble_end - Set CSB preamble end
+ *
+ * @buffer: This is an output variable that gets the PACKET3 preamble end.
+ * @count: Index to start set the preemble end.
+ */
+void amdgpu_gfx_csb_preamble_end(u32 *buffer, u32 count)
+{
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
+
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
+ buffer[count++] = cpu_to_le32(0);
+}
+
+/*
+ * debugfs for to enable/disable gfx job submission to specific core.
+ */
+#if defined(CONFIG_DEBUG_FS)
+static int amdgpu_debugfs_gfx_sched_mask_set(void *data, u64 val)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)data;
+ u32 i;
+ u64 mask = 0;
+ struct amdgpu_ring *ring;
+
+ if (!adev)
+ return -ENODEV;
+
+ mask = (1ULL << adev->gfx.num_gfx_rings) - 1;
+ if ((val & mask) == 0)
+ return -EINVAL;
+
+ for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
+ ring = &adev->gfx.gfx_ring[i];
+ if (val & (1 << i))
+ ring->sched.ready = true;
+ else
+ ring->sched.ready = false;
+ }
+ /* publish sched.ready flag update effective immediately across smp */
+ smp_rmb();
+ return 0;
+}
+
+static int amdgpu_debugfs_gfx_sched_mask_get(void *data, u64 *val)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)data;
+ u32 i;
+ u64 mask = 0;
+ struct amdgpu_ring *ring;
+
+ if (!adev)
+ return -ENODEV;
+ for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
+ ring = &adev->gfx.gfx_ring[i];
+ if (ring->sched.ready)
+ mask |= 1ULL << i;
+ }
+
+ *val = mask;
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_gfx_sched_mask_fops,
+ amdgpu_debugfs_gfx_sched_mask_get,
+ amdgpu_debugfs_gfx_sched_mask_set, "%llx\n");
+
+#endif
+
+void amdgpu_debugfs_gfx_sched_mask_init(struct amdgpu_device *adev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ struct drm_minor *minor = adev_to_drm(adev)->primary;
+ struct dentry *root = minor->debugfs_root;
+ char name[32];
+
+ if (!(adev->gfx.num_gfx_rings > 1))
+ return;
+ sprintf(name, "amdgpu_gfx_sched_mask");
+ debugfs_create_file(name, 0600, root, adev,
+ &amdgpu_debugfs_gfx_sched_mask_fops);
+#endif
+}
+
+/*
+ * debugfs for to enable/disable compute job submission to specific core.
+ */
+#if defined(CONFIG_DEBUG_FS)
+static int amdgpu_debugfs_compute_sched_mask_set(void *data, u64 val)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)data;
+ u32 i;
+ u64 mask = 0;
+ struct amdgpu_ring *ring;
+
+ if (!adev)
+ return -ENODEV;
+
+ mask = (1ULL << adev->gfx.num_compute_rings) - 1;
+ if ((val & mask) == 0)
+ return -EINVAL;
+
+ for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
+ ring = &adev->gfx.compute_ring[i];
+ if (val & (1 << i))
+ ring->sched.ready = true;
+ else
+ ring->sched.ready = false;
+ }
+
+ /* publish sched.ready flag update effective immediately across smp */
+ smp_rmb();
+ return 0;
+}
+
+static int amdgpu_debugfs_compute_sched_mask_get(void *data, u64 *val)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)data;
+ u32 i;
+ u64 mask = 0;
+ struct amdgpu_ring *ring;
+
+ if (!adev)
+ return -ENODEV;
+ for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
+ ring = &adev->gfx.compute_ring[i];
+ if (ring->sched.ready)
+ mask |= 1ULL << i;
+ }
+
+ *val = mask;
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_compute_sched_mask_fops,
+ amdgpu_debugfs_compute_sched_mask_get,
+ amdgpu_debugfs_compute_sched_mask_set, "%llx\n");
+
+#endif
+
+void amdgpu_debugfs_compute_sched_mask_init(struct amdgpu_device *adev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ struct drm_minor *minor = adev_to_drm(adev)->primary;
+ struct dentry *root = minor->debugfs_root;
+ char name[32];
+
+ if (!(adev->gfx.num_compute_rings > 1))
+ return;
+ sprintf(name, "amdgpu_compute_sched_mask");
+ debugfs_create_file(name, 0600, root, adev,
+ &amdgpu_debugfs_compute_sched_mask_fops);
+#endif
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 5644e10a86a9..fb5f7a0ee029 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -57,6 +57,9 @@ enum amdgpu_gfx_pipe_priority {
#define AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM 0
#define AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM 15
+/* 1 second timeout */
+#define GFX_PROFILE_IDLE_TIMEOUT msecs_to_jiffies(1000)
+
enum amdgpu_gfx_partition {
AMDGPU_SPX_PARTITION_MODE = 0,
AMDGPU_DPX_PARTITION_MODE = 1,
@@ -167,10 +170,46 @@ struct amdgpu_kiq {
#define AMDGPU_GFX_MAX_SE 4
#define AMDGPU_GFX_MAX_SH_PER_SE 2
+/**
+ * amdgpu_rb_config - Configure a single Render Backend (RB)
+ *
+ * Bad RBs are fused off and there is a harvest register the driver reads to
+ * determine which RB(s) are fused off so that the driver can configure the
+ * hardware state so that nothing gets sent to them. There are also user
+ * harvest registers that the driver can program to disable additional RBs,
+ * etc., for testing purposes.
+ */
struct amdgpu_rb_config {
+ /**
+ * @rb_backend_disable:
+ *
+ * The value captured from register RB_BACKEND_DISABLE indicates if the
+ * RB backend is disabled or not.
+ */
uint32_t rb_backend_disable;
+
+ /**
+ * @user_rb_backend_disable:
+ *
+ * The value captured from register USER_RB_BACKEND_DISABLE indicates
+ * if the User RB backend is disabled or not.
+ */
uint32_t user_rb_backend_disable;
+
+ /**
+ * @raster_config:
+ *
+ * To set up all of the states, it is necessary to have two registers
+ * to keep all of the states. This field holds the first register.
+ */
uint32_t raster_config;
+
+ /**
+ * @raster_config_1:
+ *
+ * To set up all of the states, it is necessary to have two registers
+ * to keep all of the states. This field holds the second register.
+ */
uint32_t raster_config_1;
};
@@ -218,6 +257,13 @@ struct amdgpu_gfx_config {
uint32_t macrotile_mode_array[16];
struct gb_addr_config gb_addr_config_fields;
+
+ /**
+ * @rb_config:
+ *
+ * Matrix that keeps all the Render Backend (color and depth buffer
+ * handling) configuration on the 3D engine.
+ */
struct amdgpu_rb_config rb_config[AMDGPU_GFX_MAX_SE][AMDGPU_GFX_MAX_SH_PER_SE];
/* gfx configure feature */
@@ -302,7 +348,8 @@ struct amdgpu_gfx_funcs {
void (*init_spm_golden)(struct amdgpu_device *adev);
void (*update_perfmon_mgcg)(struct amdgpu_device *adev, bool enable);
int (*get_gfx_shadow_info)(struct amdgpu_device *adev,
- struct amdgpu_gfx_shadow_info *shadow_info);
+ struct amdgpu_gfx_shadow_info *shadow_info,
+ bool skip_check);
enum amdgpu_gfx_partition
(*query_partition_mode)(struct amdgpu_device *adev);
int (*switch_partition_mode)(struct amdgpu_device *adev,
@@ -424,6 +471,8 @@ struct amdgpu_gfx {
/* reset mask */
uint32_t grbm_soft_reset;
uint32_t srbm_soft_reset;
+ uint32_t gfx_supported_reset;
+ uint32_t compute_supported_reset;
/* gfx off */
bool gfx_off_state; /* true: enabled, false: disabled */
@@ -469,9 +518,19 @@ struct amdgpu_gfx {
bool enable_cleaner_shader;
struct amdgpu_isolation_work enforce_isolation[MAX_XCP];
/* Mutex for synchronizing KFD scheduler operations */
- struct mutex kfd_sch_mutex;
- u64 kfd_sch_req_count[MAX_XCP];
- bool kfd_sch_inactive[MAX_XCP];
+ struct mutex userq_sch_mutex;
+ u64 userq_sch_req_count[MAX_XCP];
+ bool userq_sch_inactive[MAX_XCP];
+ unsigned long enforce_isolation_jiffies[MAX_XCP];
+ unsigned long enforce_isolation_time[MAX_XCP];
+
+ atomic_t total_submission_cnt;
+ struct delayed_work idle_work;
+ bool workload_profile_active;
+ struct mutex workload_profile_mutex;
+
+ bool disable_kq;
+ bool disable_uq;
};
struct amdgpu_gfx_ras_reg_entry {
@@ -491,7 +550,7 @@ struct amdgpu_gfx_ras_mem_id_entry {
#define amdgpu_gfx_select_se_sh(adev, se, sh, instance, xcc_id) ((adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance), (xcc_id)))
#define amdgpu_gfx_select_me_pipe_q(adev, me, pipe, q, vmid, xcc_id) ((adev)->gfx.funcs->select_me_pipe_q((adev), (me), (pipe), (q), (vmid), (xcc_id)))
#define amdgpu_gfx_init_spm_golden(adev) (adev)->gfx.funcs->init_spm_golden((adev))
-#define amdgpu_gfx_get_gfx_shadow_info(adev, si) ((adev)->gfx.funcs->get_gfx_shadow_info((adev), (si)))
+#define amdgpu_gfx_get_gfx_shadow_info(adev, si) ((adev)->gfx.funcs->get_gfx_shadow_info((adev), (si), false))
/**
* amdgpu_gfx_create_bitmask - create a bitmask
@@ -538,13 +597,10 @@ bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev,
struct amdgpu_ring *ring);
bool amdgpu_gfx_is_high_priority_graphics_queue(struct amdgpu_device *adev,
struct amdgpu_ring *ring);
-int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev, int me,
- int pipe, int queue);
-void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit,
- int *me, int *pipe, int *queue);
bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev, int me,
int pipe, int queue);
void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable);
+void amdgpu_gfx_off_ctrl_immediate(struct amdgpu_device *adev, bool enable);
int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value);
int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block);
void amdgpu_gfx_ras_fini(struct amdgpu_device *adev);
@@ -579,12 +635,20 @@ void amdgpu_gfx_cleaner_shader_sw_fini(struct amdgpu_device *adev);
void amdgpu_gfx_cleaner_shader_init(struct amdgpu_device *adev,
unsigned int cleaner_shader_size,
const void *cleaner_shader_ptr);
-int amdgpu_gfx_sysfs_isolation_shader_init(struct amdgpu_device *adev);
-void amdgpu_gfx_sysfs_isolation_shader_fini(struct amdgpu_device *adev);
void amdgpu_gfx_enforce_isolation_handler(struct work_struct *work);
void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring);
void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring);
+void amdgpu_gfx_profile_idle_work_handler(struct work_struct *work);
+void amdgpu_gfx_profile_ring_begin_use(struct amdgpu_ring *ring);
+void amdgpu_gfx_profile_ring_end_use(struct amdgpu_ring *ring);
+u32 amdgpu_gfx_csb_preamble_start(u32 *buffer);
+u32 amdgpu_gfx_csb_data_parser(struct amdgpu_device *adev, u32 *buffer, u32 count);
+void amdgpu_gfx_csb_preamble_end(u32 *buffer, u32 count);
+
+void amdgpu_debugfs_gfx_sched_mask_init(struct amdgpu_device *adev);
+void amdgpu_debugfs_compute_sched_mask_init(struct amdgpu_device *adev);
+
static inline const char *amdgpu_gfx_compute_mode_desc(int mode)
{
switch (mode) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 17a19d49d30a..9dcf51991b5b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -38,6 +38,13 @@
#include <drm/drm_drv.h>
#include <drm/ttm/ttm_tt.h>
+static const u64 four_gb = 0x100000000ULL;
+
+bool amdgpu_gmc_is_pdb0_enabled(struct amdgpu_device *adev)
+{
+ return adev->gmc.xgmi.connected_to_cpu || amdgpu_virt_xgmi_migrate_enabled(adev);
+}
+
/**
* amdgpu_gmc_pdb0_alloc - allocate vram for pdb0
*
@@ -251,10 +258,20 @@ void amdgpu_gmc_sysvm_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc
u64 hive_vram_end = mc->xgmi.node_segment_size * mc->xgmi.num_physical_nodes - 1;
mc->vram_start = mc->xgmi.node_segment_size * mc->xgmi.physical_node_id;
mc->vram_end = mc->vram_start + mc->xgmi.node_segment_size - 1;
- mc->gart_start = hive_vram_end + 1;
+ /* node_segment_size may not 4GB aligned on SRIOV, align up is needed. */
+ mc->gart_start = ALIGN(hive_vram_end + 1, four_gb);
mc->gart_end = mc->gart_start + mc->gart_size - 1;
- mc->fb_start = hive_vram_start;
- mc->fb_end = hive_vram_end;
+ if (amdgpu_virt_xgmi_migrate_enabled(adev)) {
+ /* set mc->vram_start to 0 to switch the returned GPU address of
+ * amdgpu_bo_create_reserved() from FB aperture to GART aperture.
+ */
+ mc->vram_start = 0;
+ mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
+ mc->visible_vram_size = min(mc->visible_vram_size, mc->real_vram_size);
+ } else {
+ mc->fb_start = hive_vram_start;
+ mc->fb_end = hive_vram_end;
+ }
dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
mc->mc_vram_size >> 20, mc->vram_start,
mc->vram_end, mc->real_vram_size >> 20);
@@ -269,14 +286,13 @@ void amdgpu_gmc_sysvm_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc
* @mc: memory controller structure holding memory information
* @gart_placement: GART placement policy with respect to VRAM
*
- * Function will place try to place GART before or after VRAM.
+ * Function will try to place GART before or after VRAM.
* If GART size is bigger than space left then we ajust GART size.
* Thus function will never fails.
*/
void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc,
enum amdgpu_gart_placement gart_placement)
{
- const uint64_t four_gb = 0x100000000ULL;
u64 size_af, size_bf;
/*To avoid the hole, limit the max mc address to AMDGPU_GMC_HOLE_START*/
u64 max_mc_address = min(adev->gmc.mc_mask, AMDGPU_GMC_HOLE_START - 1);
@@ -573,6 +589,7 @@ int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev)
unsigned vm_inv_engs[AMDGPU_MAX_VMHUBS] = {0};
unsigned i;
unsigned vmhub, inv_eng;
+ struct amdgpu_ring *shared_ring;
/* init the vm inv eng for all vmhubs */
for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) {
@@ -591,7 +608,12 @@ int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev)
if (ring == &adev->mes.ring[0] ||
ring == &adev->mes.ring[1] ||
- ring == &adev->umsch_mm.ring)
+ ring == &adev->umsch_mm.ring ||
+ ring == &adev->cper.ring_buf)
+ continue;
+
+ /* Skip if the ring is a shared ring */
+ if (amdgpu_sdma_is_shared_inv_eng(adev, ring))
continue;
inv_eng = ffs(vm_inv_engs[vmhub]);
@@ -606,6 +628,21 @@ int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev)
dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n",
ring->name, ring->vm_inv_eng, ring->vm_hub);
+ /* SDMA has a special packet which allows it to use the same
+ * invalidation engine for all the rings in one instance.
+ * Therefore, we do not allocate a separate VM invalidation engine
+ * for SDMA page rings. Instead, they share the VM invalidation
+ * engine with the SDMA gfx ring. This change ensures efficient
+ * resource management and avoids the issue of insufficient VM
+ * invalidation engines.
+ */
+ shared_ring = amdgpu_sdma_get_shared_ring(adev, ring);
+ if (shared_ring) {
+ shared_ring->vm_inv_eng = ring->vm_inv_eng;
+ dev_info(adev->dev, "ring %s shares VM invalidation engine %u with ring %s on hub %u\n",
+ ring->name, ring->vm_inv_eng, shared_ring->name, ring->vm_hub);
+ continue;
+ }
}
return 0;
@@ -653,7 +690,7 @@ void amdgpu_gmc_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
r = amdgpu_job_alloc_with_ib(ring->adev, &adev->mman.high_pr,
AMDGPU_FENCE_OWNER_UNDEFINED,
16 * 4, AMDGPU_IB_POOL_IMMEDIATE,
- &job);
+ &job, AMDGPU_KERNEL_JOB_ID_FLUSH_GPU_TLB);
if (r)
goto error_alloc;
@@ -678,12 +715,10 @@ int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid,
uint32_t flush_type, bool all_hub,
uint32_t inst)
{
- u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT :
- adev->usec_timeout;
struct amdgpu_ring *ring = &adev->gfx.kiq[inst].ring;
struct amdgpu_kiq *kiq = &adev->gfx.kiq[inst];
unsigned int ndw;
- int r;
+ int r, cnt = 0;
uint32_t seq;
/*
@@ -740,10 +775,21 @@ int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid,
amdgpu_ring_commit(ring);
spin_unlock(&adev->gfx.kiq[inst].ring_lock);
- if (amdgpu_fence_wait_polling(ring, seq, usec_timeout) < 1) {
+
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+
+ might_sleep();
+ while (r < 1 && cnt++ < MAX_KIQ_REG_TRY &&
+ !amdgpu_reset_pending(adev->reset_domain)) {
+ msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+ }
+
+ if (cnt > MAX_KIQ_REG_TRY) {
dev_err(adev->dev, "timeout waiting for kiq fence\n");
r = -ETIME;
- }
+ } else
+ r = 0;
}
error_unlock_reset:
@@ -851,6 +897,7 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
case IP_VERSION(11, 5, 0):
case IP_VERSION(11, 5, 1):
case IP_VERSION(11, 5, 2):
+ case IP_VERSION(11, 5, 3):
/* Don't enable it by default yet.
*/
if (amdgpu_tmz < 1) {
@@ -888,6 +935,7 @@ void amdgpu_gmc_noretry_set(struct amdgpu_device *adev)
gc_ver == IP_VERSION(9, 4, 2) ||
gc_ver == IP_VERSION(9, 4, 3) ||
gc_ver == IP_VERSION(9, 4, 4) ||
+ gc_ver == IP_VERSION(9, 5, 0) ||
gc_ver >= IP_VERSION(10, 3, 0));
if (!amdgpu_sriov_xnack_support(adev))
@@ -1009,9 +1057,7 @@ void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev)
*/
u64 vram_size = adev->gmc.xgmi.node_segment_size * adev->gmc.xgmi.num_physical_nodes;
u64 pde0_page_size = (1ULL<<adev->gmc.vmid0_page_table_block_size)<<21;
- u64 vram_addr = adev->vm_manager.vram_base_offset -
- adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
- u64 vram_end = vram_addr + vram_size;
+ u64 vram_addr, vram_end;
u64 gart_ptb_gpu_pa = amdgpu_gmc_vram_pa(adev, adev->gart.bo);
int idx;
@@ -1024,6 +1070,11 @@ void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev)
flags |= AMDGPU_PTE_FRAG((adev->gmc.vmid0_page_table_block_size + 9*1));
flags |= AMDGPU_PDE_PTE_FLAG(adev);
+ vram_addr = adev->vm_manager.vram_base_offset;
+ if (!amdgpu_virt_xgmi_migrate_enabled(adev))
+ vram_addr -= adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
+ vram_end = vram_addr + vram_size;
+
/* The first n PDE0 entries are used as PTE,
* pointing to vram
*/
@@ -1065,18 +1116,6 @@ uint64_t amdgpu_gmc_vram_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo)
return amdgpu_gmc_vram_mc2pa(adev, amdgpu_bo_gpu_offset(bo));
}
-/**
- * amdgpu_gmc_vram_cpu_pa - calculate vram buffer object's physical address
- * from CPU's view
- *
- * @adev: amdgpu_device pointer
- * @bo: amdgpu buffer object
- */
-uint64_t amdgpu_gmc_vram_cpu_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo)
-{
- return amdgpu_bo_gpu_offset(bo) - adev->gmc.vram_start + adev->gmc.aper_base;
-}
-
int amdgpu_gmc_vram_checking(struct amdgpu_device *adev)
{
struct amdgpu_bo *vram_bo = NULL;
@@ -1130,6 +1169,79 @@ release_buffer:
return ret;
}
+static const char *nps_desc[] = {
+ [AMDGPU_NPS1_PARTITION_MODE] = "NPS1",
+ [AMDGPU_NPS2_PARTITION_MODE] = "NPS2",
+ [AMDGPU_NPS3_PARTITION_MODE] = "NPS3",
+ [AMDGPU_NPS4_PARTITION_MODE] = "NPS4",
+ [AMDGPU_NPS6_PARTITION_MODE] = "NPS6",
+ [AMDGPU_NPS8_PARTITION_MODE] = "NPS8",
+};
+
+static ssize_t available_memory_partition_show(struct device *dev,
+ struct device_attribute *addr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ int size = 0, mode;
+ char *sep = "";
+
+ for_each_inst(mode, adev->gmc.supported_nps_modes) {
+ size += sysfs_emit_at(buf, size, "%s%s", sep, nps_desc[mode]);
+ sep = ", ";
+ }
+ size += sysfs_emit_at(buf, size, "\n");
+
+ return size;
+}
+
+static ssize_t current_memory_partition_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ enum amdgpu_memory_partition mode;
+ struct amdgpu_hive_info *hive;
+ int i;
+
+ mode = UNKNOWN_MEMORY_PARTITION_MODE;
+ for_each_inst(i, adev->gmc.supported_nps_modes) {
+ if (!strncasecmp(nps_desc[i], buf, strlen(nps_desc[i]))) {
+ mode = i;
+ break;
+ }
+ }
+
+ if (mode == UNKNOWN_MEMORY_PARTITION_MODE)
+ return -EINVAL;
+
+ if (mode == adev->gmc.gmc_funcs->query_mem_partition_mode(adev)) {
+ dev_info(
+ adev->dev,
+ "requested NPS mode is same as current NPS mode, skipping\n");
+ return count;
+ }
+
+ /* If device is part of hive, all devices in the hive should request the
+ * same mode. Hence store the requested mode in hive.
+ */
+ hive = amdgpu_get_xgmi_hive(adev);
+ if (hive) {
+ atomic_set(&hive->requested_nps_mode, mode);
+ amdgpu_put_xgmi_hive(hive);
+ } else {
+ adev->gmc.requested_nps_mode = mode;
+ }
+
+ dev_info(
+ adev->dev,
+ "NPS mode change requested, please remove and reload the driver\n");
+
+ return count;
+}
+
static ssize_t current_memory_partition_show(
struct device *dev, struct device_attribute *addr, char *buf)
{
@@ -1137,54 +1249,70 @@ static ssize_t current_memory_partition_show(
struct amdgpu_device *adev = drm_to_adev(ddev);
enum amdgpu_memory_partition mode;
+ /* Only minimal precaution taken to reject requests while in reset */
+ if (amdgpu_in_reset(adev))
+ return -EPERM;
+
mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
- switch (mode) {
- case AMDGPU_NPS1_PARTITION_MODE:
- return sysfs_emit(buf, "NPS1\n");
- case AMDGPU_NPS2_PARTITION_MODE:
- return sysfs_emit(buf, "NPS2\n");
- case AMDGPU_NPS3_PARTITION_MODE:
- return sysfs_emit(buf, "NPS3\n");
- case AMDGPU_NPS4_PARTITION_MODE:
- return sysfs_emit(buf, "NPS4\n");
- case AMDGPU_NPS6_PARTITION_MODE:
- return sysfs_emit(buf, "NPS6\n");
- case AMDGPU_NPS8_PARTITION_MODE:
- return sysfs_emit(buf, "NPS8\n");
- default:
+ if ((mode >= ARRAY_SIZE(nps_desc)) ||
+ (BIT(mode) & AMDGPU_ALL_NPS_MASK) != BIT(mode))
return sysfs_emit(buf, "UNKNOWN\n");
- }
+
+ return sysfs_emit(buf, "%s\n", nps_desc[mode]);
}
-static DEVICE_ATTR_RO(current_memory_partition);
+static DEVICE_ATTR_RW(current_memory_partition);
+static DEVICE_ATTR_RO(available_memory_partition);
int amdgpu_gmc_sysfs_init(struct amdgpu_device *adev)
{
+ bool nps_switch_support;
+ int r = 0;
+
if (!adev->gmc.gmc_funcs->query_mem_partition_mode)
return 0;
+ nps_switch_support = (hweight32(adev->gmc.supported_nps_modes &
+ AMDGPU_ALL_NPS_MASK) > 1);
+ if (!nps_switch_support)
+ dev_attr_current_memory_partition.attr.mode &=
+ ~(S_IWUSR | S_IWGRP | S_IWOTH);
+ else
+ r = device_create_file(adev->dev,
+ &dev_attr_available_memory_partition);
+
+ if (r)
+ return r;
+
return device_create_file(adev->dev,
&dev_attr_current_memory_partition);
}
void amdgpu_gmc_sysfs_fini(struct amdgpu_device *adev)
{
+ if (!adev->gmc.gmc_funcs->query_mem_partition_mode)
+ return;
+
device_remove_file(adev->dev, &dev_attr_current_memory_partition);
+ device_remove_file(adev->dev, &dev_attr_available_memory_partition);
}
int amdgpu_gmc_get_nps_memranges(struct amdgpu_device *adev,
struct amdgpu_mem_partition_info *mem_ranges,
- int exp_ranges)
+ uint8_t *exp_ranges)
{
struct amdgpu_gmc_memrange *ranges;
int range_cnt, ret, i, j;
uint32_t nps_type;
+ bool refresh;
- if (!mem_ranges)
+ if (!mem_ranges || !exp_ranges)
return -EINVAL;
+ refresh = (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) &&
+ (adev->gmc.reset_flags & AMDGPU_GMC_INIT_RESET_NPS);
ret = amdgpu_discovery_get_nps_info(adev, &nps_type, &ranges,
- &range_cnt);
+ &range_cnt, refresh);
if (ret)
return ret;
@@ -1192,16 +1320,16 @@ int amdgpu_gmc_get_nps_memranges(struct amdgpu_device *adev,
/* TODO: For now, expect ranges and partition count to be the same.
* Adjust if there are holes expected in any NPS domain.
*/
- if (range_cnt != exp_ranges) {
+ if (*exp_ranges && (range_cnt != *exp_ranges)) {
dev_warn(
adev->dev,
"NPS config mismatch - expected ranges: %d discovery - nps mode: %d, nps ranges: %d",
- exp_ranges, nps_type, range_cnt);
+ *exp_ranges, nps_type, range_cnt);
ret = -EINVAL;
goto err;
}
- for (i = 0; i < exp_ranges; ++i) {
+ for (i = 0; i < range_cnt; ++i) {
if (ranges[i].base_address >= ranges[i].limit_address) {
dev_warn(
adev->dev,
@@ -1242,8 +1370,310 @@ int amdgpu_gmc_get_nps_memranges(struct amdgpu_device *adev,
ranges[i].limit_address - ranges[i].base_address + 1;
}
+ if (!*exp_ranges)
+ *exp_ranges = range_cnt;
err:
kfree(ranges);
return ret;
}
+
+int amdgpu_gmc_request_memory_partition(struct amdgpu_device *adev,
+ int nps_mode)
+{
+ /* Not supported on VF devices and APUs */
+ if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU))
+ return -EOPNOTSUPP;
+
+ if (!adev->psp.funcs) {
+ dev_err(adev->dev,
+ "PSP interface not available for nps mode change request");
+ return -EINVAL;
+ }
+
+ return psp_memory_partition(&adev->psp, nps_mode);
+}
+
+static inline bool amdgpu_gmc_need_nps_switch_req(struct amdgpu_device *adev,
+ int req_nps_mode,
+ int cur_nps_mode)
+{
+ return (((BIT(req_nps_mode) & adev->gmc.supported_nps_modes) ==
+ BIT(req_nps_mode)) &&
+ req_nps_mode != cur_nps_mode);
+}
+
+void amdgpu_gmc_prepare_nps_mode_change(struct amdgpu_device *adev)
+{
+ int req_nps_mode, cur_nps_mode, r;
+ struct amdgpu_hive_info *hive;
+
+ if (amdgpu_sriov_vf(adev) || !adev->gmc.supported_nps_modes ||
+ !adev->gmc.gmc_funcs->request_mem_partition_mode)
+ return;
+
+ cur_nps_mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
+ hive = amdgpu_get_xgmi_hive(adev);
+ if (hive) {
+ req_nps_mode = atomic_read(&hive->requested_nps_mode);
+ if (!amdgpu_gmc_need_nps_switch_req(adev, req_nps_mode,
+ cur_nps_mode)) {
+ amdgpu_put_xgmi_hive(hive);
+ return;
+ }
+ r = amdgpu_xgmi_request_nps_change(adev, hive, req_nps_mode);
+ amdgpu_put_xgmi_hive(hive);
+ goto out;
+ }
+
+ req_nps_mode = adev->gmc.requested_nps_mode;
+ if (!amdgpu_gmc_need_nps_switch_req(adev, req_nps_mode, cur_nps_mode))
+ return;
+
+ /* even if this fails, we should let driver unload w/o blocking */
+ r = adev->gmc.gmc_funcs->request_mem_partition_mode(adev, req_nps_mode);
+out:
+ if (r)
+ dev_err(adev->dev, "NPS mode change request failed\n");
+ else
+ dev_info(
+ adev->dev,
+ "NPS mode change request done, reload driver to complete the change\n");
+}
+
+bool amdgpu_gmc_need_reset_on_init(struct amdgpu_device *adev)
+{
+ if (adev->gmc.gmc_funcs->need_reset_on_init)
+ return adev->gmc.gmc_funcs->need_reset_on_init(adev);
+
+ return false;
+}
+
+enum amdgpu_memory_partition
+amdgpu_gmc_get_vf_memory_partition(struct amdgpu_device *adev)
+{
+ switch (adev->gmc.num_mem_partitions) {
+ case 0:
+ return UNKNOWN_MEMORY_PARTITION_MODE;
+ case 1:
+ return AMDGPU_NPS1_PARTITION_MODE;
+ case 2:
+ return AMDGPU_NPS2_PARTITION_MODE;
+ case 4:
+ return AMDGPU_NPS4_PARTITION_MODE;
+ case 8:
+ return AMDGPU_NPS8_PARTITION_MODE;
+ default:
+ return AMDGPU_NPS1_PARTITION_MODE;
+ }
+}
+
+enum amdgpu_memory_partition
+amdgpu_gmc_get_memory_partition(struct amdgpu_device *adev, u32 *supp_modes)
+{
+ enum amdgpu_memory_partition mode = UNKNOWN_MEMORY_PARTITION_MODE;
+
+ if (adev->nbio.funcs &&
+ adev->nbio.funcs->get_memory_partition_mode)
+ mode = adev->nbio.funcs->get_memory_partition_mode(adev,
+ supp_modes);
+ else
+ dev_warn(adev->dev, "memory partition mode query is not supported\n");
+
+ return mode;
+}
+
+enum amdgpu_memory_partition
+amdgpu_gmc_query_memory_partition(struct amdgpu_device *adev)
+{
+ if (amdgpu_sriov_vf(adev))
+ return amdgpu_gmc_get_vf_memory_partition(adev);
+ else
+ return amdgpu_gmc_get_memory_partition(adev, NULL);
+}
+
+static bool amdgpu_gmc_validate_partition_info(struct amdgpu_device *adev)
+{
+ enum amdgpu_memory_partition mode;
+ u32 supp_modes;
+ bool valid;
+
+ mode = amdgpu_gmc_get_memory_partition(adev, &supp_modes);
+
+ /* Mode detected by hardware not present in supported modes */
+ if ((mode != UNKNOWN_MEMORY_PARTITION_MODE) &&
+ !(BIT(mode - 1) & supp_modes))
+ return false;
+
+ switch (mode) {
+ case UNKNOWN_MEMORY_PARTITION_MODE:
+ case AMDGPU_NPS1_PARTITION_MODE:
+ valid = (adev->gmc.num_mem_partitions == 1);
+ break;
+ case AMDGPU_NPS2_PARTITION_MODE:
+ valid = (adev->gmc.num_mem_partitions == 2);
+ break;
+ case AMDGPU_NPS4_PARTITION_MODE:
+ valid = (adev->gmc.num_mem_partitions == 3 ||
+ adev->gmc.num_mem_partitions == 4);
+ break;
+ case AMDGPU_NPS8_PARTITION_MODE:
+ valid = (adev->gmc.num_mem_partitions == 8);
+ break;
+ default:
+ valid = false;
+ }
+
+ return valid;
+}
+
+static bool amdgpu_gmc_is_node_present(int *node_ids, int num_ids, int nid)
+{
+ int i;
+
+ /* Check if node with id 'nid' is present in 'node_ids' array */
+ for (i = 0; i < num_ids; ++i)
+ if (node_ids[i] == nid)
+ return true;
+
+ return false;
+}
+
+static void
+amdgpu_gmc_init_acpi_mem_ranges(struct amdgpu_device *adev,
+ struct amdgpu_mem_partition_info *mem_ranges)
+{
+ struct amdgpu_numa_info numa_info;
+ int node_ids[AMDGPU_MAX_MEM_RANGES];
+ int num_ranges = 0, ret;
+ int num_xcc, xcc_id;
+ uint32_t xcc_mask;
+
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ xcc_mask = (1U << num_xcc) - 1;
+
+ for_each_inst(xcc_id, xcc_mask) {
+ ret = amdgpu_acpi_get_mem_info(adev, xcc_id, &numa_info);
+ if (ret)
+ continue;
+
+ if (numa_info.nid == NUMA_NO_NODE) {
+ mem_ranges[0].size = numa_info.size;
+ mem_ranges[0].numa.node = numa_info.nid;
+ num_ranges = 1;
+ break;
+ }
+
+ if (amdgpu_gmc_is_node_present(node_ids, num_ranges,
+ numa_info.nid))
+ continue;
+
+ node_ids[num_ranges] = numa_info.nid;
+ mem_ranges[num_ranges].numa.node = numa_info.nid;
+ mem_ranges[num_ranges].size = numa_info.size;
+ ++num_ranges;
+ }
+
+ adev->gmc.num_mem_partitions = num_ranges;
+}
+
+void amdgpu_gmc_init_sw_mem_ranges(struct amdgpu_device *adev,
+ struct amdgpu_mem_partition_info *mem_ranges)
+{
+ enum amdgpu_memory_partition mode;
+ u32 start_addr = 0, size;
+ int i, r, l;
+
+ mode = amdgpu_gmc_query_memory_partition(adev);
+
+ switch (mode) {
+ case UNKNOWN_MEMORY_PARTITION_MODE:
+ adev->gmc.num_mem_partitions = 0;
+ break;
+ case AMDGPU_NPS1_PARTITION_MODE:
+ adev->gmc.num_mem_partitions = 1;
+ break;
+ case AMDGPU_NPS2_PARTITION_MODE:
+ adev->gmc.num_mem_partitions = 2;
+ break;
+ case AMDGPU_NPS4_PARTITION_MODE:
+ if (adev->flags & AMD_IS_APU)
+ adev->gmc.num_mem_partitions = 3;
+ else
+ adev->gmc.num_mem_partitions = 4;
+ break;
+ case AMDGPU_NPS8_PARTITION_MODE:
+ adev->gmc.num_mem_partitions = 8;
+ break;
+ default:
+ adev->gmc.num_mem_partitions = 1;
+ break;
+ }
+
+ /* Use NPS range info, if populated */
+ r = amdgpu_gmc_get_nps_memranges(adev, mem_ranges,
+ &adev->gmc.num_mem_partitions);
+ if (!r) {
+ l = 0;
+ for (i = 1; i < adev->gmc.num_mem_partitions; ++i) {
+ if (mem_ranges[i].range.lpfn >
+ mem_ranges[i - 1].range.lpfn)
+ l = i;
+ }
+
+ } else {
+ if (!adev->gmc.num_mem_partitions) {
+ dev_warn(adev->dev,
+ "Not able to detect NPS mode, fall back to NPS1\n");
+ adev->gmc.num_mem_partitions = 1;
+ }
+ /* Fallback to sw based calculation */
+ size = (adev->gmc.real_vram_size + SZ_16M) >> AMDGPU_GPU_PAGE_SHIFT;
+ size /= adev->gmc.num_mem_partitions;
+
+ for (i = 0; i < adev->gmc.num_mem_partitions; ++i) {
+ mem_ranges[i].range.fpfn = start_addr;
+ mem_ranges[i].size =
+ ((u64)size << AMDGPU_GPU_PAGE_SHIFT);
+ mem_ranges[i].range.lpfn = start_addr + size - 1;
+ start_addr += size;
+ }
+
+ l = adev->gmc.num_mem_partitions - 1;
+ }
+
+ /* Adjust the last one */
+ mem_ranges[l].range.lpfn =
+ (adev->gmc.real_vram_size >> AMDGPU_GPU_PAGE_SHIFT) - 1;
+ mem_ranges[l].size =
+ adev->gmc.real_vram_size -
+ ((u64)mem_ranges[l].range.fpfn << AMDGPU_GPU_PAGE_SHIFT);
+}
+
+int amdgpu_gmc_init_mem_ranges(struct amdgpu_device *adev)
+{
+ bool valid;
+
+ adev->gmc.mem_partitions = kcalloc(AMDGPU_MAX_MEM_RANGES,
+ sizeof(struct amdgpu_mem_partition_info),
+ GFP_KERNEL);
+ if (!adev->gmc.mem_partitions)
+ return -ENOMEM;
+
+ if (adev->gmc.is_app_apu)
+ amdgpu_gmc_init_acpi_mem_ranges(adev, adev->gmc.mem_partitions);
+ else
+ amdgpu_gmc_init_sw_mem_ranges(adev, adev->gmc.mem_partitions);
+
+ if (amdgpu_sriov_vf(adev))
+ valid = true;
+ else
+ valid = amdgpu_gmc_validate_partition_info(adev);
+ if (!valid) {
+ /* TODO: handle invalid case */
+ dev_warn(adev->dev,
+ "Mem ranges not matching with hardware config\n");
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index 4d951a1baefa..55097ca10738 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -29,6 +29,7 @@
#include <linux/types.h>
#include "amdgpu_irq.h"
+#include "amdgpu_xgmi.h"
#include "amdgpu_ras.h"
/* VA hole for 48bit addresses on Vega10 */
@@ -61,6 +62,9 @@
*/
#define AMDGPU_GMC_FAULT_TIMEOUT 5000ULL
+/* XNACK flags */
+#define AMDGPU_GMC_XNACK_FLAG_CHAIN BIT(0)
+
struct firmware;
enum amdgpu_memory_partition {
@@ -73,6 +77,15 @@ enum amdgpu_memory_partition {
AMDGPU_NPS8_PARTITION_MODE = 8,
};
+#define AMDGPU_ALL_NPS_MASK \
+ (BIT(AMDGPU_NPS1_PARTITION_MODE) | BIT(AMDGPU_NPS2_PARTITION_MODE) | \
+ BIT(AMDGPU_NPS3_PARTITION_MODE) | BIT(AMDGPU_NPS4_PARTITION_MODE) | \
+ BIT(AMDGPU_NPS6_PARTITION_MODE) | BIT(AMDGPU_NPS8_PARTITION_MODE))
+
+#define AMDGPU_GMC_INIT_RESET_NPS BIT(0)
+
+#define AMDGPU_MAX_MEM_RANGES 8
+
/*
* GMC page fault information
*/
@@ -141,15 +154,15 @@ struct amdgpu_gmc_funcs {
unsigned pasid);
/* enable/disable PRT support */
void (*set_prt)(struct amdgpu_device *adev, bool enable);
- /* map mtype to hardware flags */
- uint64_t (*map_mtype)(struct amdgpu_device *adev, uint32_t flags);
/* get the pde for a given mc addr */
void (*get_vm_pde)(struct amdgpu_device *adev, int level,
u64 *dst, u64 *flags);
- /* get the pte flags to use for a BO VA mapping */
+ /* get the pte flags to use for PTEs */
void (*get_vm_pte)(struct amdgpu_device *adev,
- struct amdgpu_bo_va_mapping *mapping,
- uint64_t *flags);
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo,
+ uint32_t vm_flags,
+ uint64_t *pte_flags);
/* override per-page pte flags */
void (*override_vm_pte_flags)(struct amdgpu_device *dev,
struct amdgpu_vm *vm,
@@ -161,29 +174,10 @@ struct amdgpu_gmc_funcs {
enum amdgpu_memory_partition (*query_mem_partition_mode)(
struct amdgpu_device *adev);
-};
-
-struct amdgpu_xgmi_ras {
- struct amdgpu_ras_block_object ras_block;
-};
-
-struct amdgpu_xgmi {
- /* from psp */
- u64 node_id;
- u64 hive_id;
- /* fixed per family */
- u64 node_segment_size;
- /* physical node (0-3) */
- unsigned physical_node_id;
- /* number of nodes (0-4) */
- unsigned num_physical_nodes;
- /* gpu list in the same hive */
- struct list_head head;
- bool supported;
- struct ras_common_if *ras_if;
- bool connected_to_cpu;
- bool pending_reset;
- struct amdgpu_xgmi_ras *ras;
+ /* Request NPS mode */
+ int (*request_mem_partition_mode)(struct amdgpu_device *adev,
+ int nps_mode);
+ bool (*need_reset_on_init)(struct amdgpu_device *adev);
};
struct amdgpu_mem_partition_info {
@@ -305,10 +299,14 @@ struct amdgpu_gmc {
struct amdgpu_mem_partition_info *mem_partitions;
uint8_t num_mem_partitions;
const struct amdgpu_gmc_funcs *gmc_funcs;
+ enum amdgpu_memory_partition requested_nps_mode;
+ uint32_t supported_nps_modes;
+ uint32_t reset_flags;
struct amdgpu_xgmi xgmi;
struct amdgpu_irq_src ecc_irq;
int noretry;
+ uint32_t xnack_flags;
uint32_t vmid0_page_table_block_size;
uint32_t vmid0_page_table_depth;
@@ -358,9 +356,10 @@ struct amdgpu_gmc {
#define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr))
#define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid))
-#define amdgpu_gmc_map_mtype(adev, flags) (adev)->gmc.gmc_funcs->map_mtype((adev),(flags))
#define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags))
-#define amdgpu_gmc_get_vm_pte(adev, mapping, flags) (adev)->gmc.gmc_funcs->get_vm_pte((adev), (mapping), (flags))
+#define amdgpu_gmc_get_vm_pte(adev, vm, bo, vm_flags, pte_flags) \
+ ((adev)->gmc.gmc_funcs->get_vm_pte((adev), (vm), (bo), (vm_flags), \
+ (pte_flags)))
#define amdgpu_gmc_override_vm_pte_flags(adev, vm, addr, pte_flags) \
(adev)->gmc.gmc_funcs->override_vm_pte_flags \
((adev), (vm), (addr), (pte_flags))
@@ -398,6 +397,7 @@ static inline uint64_t amdgpu_gmc_sign_extend(uint64_t addr)
return addr;
}
+bool amdgpu_gmc_is_pdb0_enabled(struct amdgpu_device *adev);
int amdgpu_gmc_pdb0_alloc(struct amdgpu_device *adev);
void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level,
uint64_t *addr, uint64_t *flags);
@@ -447,13 +447,25 @@ void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev);
void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev);
uint64_t amdgpu_gmc_vram_mc2pa(struct amdgpu_device *adev, uint64_t mc_addr);
uint64_t amdgpu_gmc_vram_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo);
-uint64_t amdgpu_gmc_vram_cpu_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo);
int amdgpu_gmc_vram_checking(struct amdgpu_device *adev);
int amdgpu_gmc_sysfs_init(struct amdgpu_device *adev);
void amdgpu_gmc_sysfs_fini(struct amdgpu_device *adev);
int amdgpu_gmc_get_nps_memranges(struct amdgpu_device *adev,
struct amdgpu_mem_partition_info *mem_ranges,
- int exp_ranges);
-
+ uint8_t *exp_ranges);
+
+int amdgpu_gmc_request_memory_partition(struct amdgpu_device *adev,
+ int nps_mode);
+void amdgpu_gmc_prepare_nps_mode_change(struct amdgpu_device *adev);
+bool amdgpu_gmc_need_reset_on_init(struct amdgpu_device *adev);
+enum amdgpu_memory_partition
+amdgpu_gmc_get_vf_memory_partition(struct amdgpu_device *adev);
+enum amdgpu_memory_partition
+amdgpu_gmc_get_memory_partition(struct amdgpu_device *adev, u32 *supp_modes);
+enum amdgpu_memory_partition
+amdgpu_gmc_query_memory_partition(struct amdgpu_device *adev);
+int amdgpu_gmc_init_mem_ranges(struct amdgpu_device *adev);
+void amdgpu_gmc_init_sw_mem_ranges(struct amdgpu_device *adev,
+ struct amdgpu_mem_partition_info *mem_ranges);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c
index b6cf801939aa..6e02fb9ac2f6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c
@@ -22,6 +22,7 @@
*/
#include "amdgpu.h"
#include "amdgpu_ras.h"
+#include <uapi/linux/kfd_ioctl.h>
int amdgpu_hdp_ras_sw_init(struct amdgpu_device *adev)
{
@@ -46,3 +47,22 @@ int amdgpu_hdp_ras_sw_init(struct amdgpu_device *adev)
/* hdp ras follows amdgpu_ras_block_late_init_default for late init */
return 0;
}
+
+void amdgpu_hdp_generic_flush(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring)
+{
+ if (!ring || !ring->funcs->emit_wreg) {
+ WREG32((adev->rmmio_remap.reg_offset +
+ KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >>
+ 2,
+ 0);
+ if (adev->nbio.funcs->get_memsize)
+ adev->nbio.funcs->get_memsize(adev);
+ } else {
+ amdgpu_ring_emit_wreg(ring,
+ (adev->rmmio_remap.reg_offset +
+ KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >>
+ 2,
+ 0);
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h
index 7b8a6152dc8d..4cfd932b7e91 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h
@@ -44,4 +44,6 @@ struct amdgpu_hdp {
};
int amdgpu_hdp_ras_sw_init(struct amdgpu_device *adev);
+void amdgpu_hdp_generic_flush(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring);
#endif /* __AMDGPU_HDP_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
index e36fede7f74c..2c6a6b858112 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
@@ -167,13 +167,12 @@ void amdgpu_hmm_unregister(struct amdgpu_bo *bo)
int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
uint64_t start, uint64_t npages, bool readonly,
- void *owner, struct page **pages,
+ void *owner,
struct hmm_range **phmm_range)
{
struct hmm_range *hmm_range;
unsigned long end;
unsigned long timeout;
- unsigned long i;
unsigned long *pfns;
int r = 0;
@@ -222,14 +221,6 @@ retry:
hmm_range->start = start;
hmm_range->hmm_pfns = pfns;
- /*
- * Due to default_flags, all pages are HMM_PFN_VALID or
- * hmm_range_fault() fails. FIXME: The pages cannot be touched outside
- * the notifier_lock, and mmu_interval_read_retry() must be done first.
- */
- for (i = 0; pages && i < npages; i++)
- pages[i] = hmm_pfn_to_page(pfns[i]);
-
*phmm_range = hmm_range;
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h
index e2edcd010ccc..953e1d06de20 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h
@@ -33,7 +33,7 @@
int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
uint64_t start, uint64_t npages, bool readonly,
- void *owner, struct page **pages,
+ void *owner,
struct hmm_range **phmm_range);
bool amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
index 00d6211e0fbf..9cb72f0c5277 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
@@ -24,7 +24,6 @@
* Alex Deucher
*/
-#include <linux/export.h>
#include <linux/pci.h>
#include <drm/drm_edid.h>
@@ -185,7 +184,7 @@ struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev,
snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
"AMDGPU i2c hw bus %s", name);
i2c->adapter.algo = &amdgpu_atombios_i2c_algo;
- ret = i2c_add_adapter(&i2c->adapter);
+ ret = devm_i2c_add_adapter(dev->dev, &i2c->adapter);
if (ret)
goto out_free;
} else {
@@ -216,22 +215,23 @@ out_free:
}
-void amdgpu_i2c_destroy(struct amdgpu_i2c_chan *i2c)
-{
- if (!i2c)
- return;
- WARN_ON(i2c->has_aux);
- i2c_del_adapter(&i2c->adapter);
- kfree(i2c);
-}
-
-/* Add the default buses */
void amdgpu_i2c_init(struct amdgpu_device *adev)
{
- if (amdgpu_hw_i2c)
- DRM_INFO("hw_i2c forced on, you may experience display detection problems!\n");
-
- amdgpu_atombios_i2c_init(adev);
+ if (!adev->is_atom_fw) {
+ if (!amdgpu_device_has_dc_support(adev)) {
+ amdgpu_atombios_i2c_init(adev);
+ } else {
+ switch (adev->asic_type) {
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
+ amdgpu_atombios_oem_i2c_init(adev, 0x97);
+ break;
+ default:
+ break;
+ }
+ }
+ }
}
/* remove all the buses */
@@ -239,28 +239,9 @@ void amdgpu_i2c_fini(struct amdgpu_device *adev)
{
int i;
- for (i = 0; i < AMDGPU_MAX_I2C_BUS; i++) {
- if (adev->i2c_bus[i]) {
- amdgpu_i2c_destroy(adev->i2c_bus[i]);
+ for (i = 0; i < AMDGPU_MAX_I2C_BUS; i++)
+ if (adev->i2c_bus[i])
adev->i2c_bus[i] = NULL;
- }
- }
-}
-
-/* Add additional buses */
-void amdgpu_i2c_add(struct amdgpu_device *adev,
- const struct amdgpu_i2c_bus_rec *rec,
- const char *name)
-{
- struct drm_device *dev = adev_to_drm(adev);
- int i;
-
- for (i = 0; i < AMDGPU_MAX_I2C_BUS; i++) {
- if (!adev->i2c_bus[i]) {
- adev->i2c_bus[i] = amdgpu_i2c_create(dev, rec, name);
- return;
- }
- }
}
/* looks up bus based on id */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h
index 63c2ff7499e1..1d3d3806e0dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h
@@ -30,9 +30,6 @@ struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev,
void amdgpu_i2c_destroy(struct amdgpu_i2c_chan *i2c);
void amdgpu_i2c_init(struct amdgpu_device *adev);
void amdgpu_i2c_fini(struct amdgpu_device *adev);
-void amdgpu_i2c_add(struct amdgpu_device *adev,
- const struct amdgpu_i2c_bus_rec *rec,
- const char *name);
struct amdgpu_i2c_chan *
amdgpu_i2c_lookup(struct amdgpu_device *adev,
const struct amdgpu_i2c_bus_rec *i2c_bus);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 8b512dc28df8..7d9bcb72e8dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -89,16 +89,14 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
/**
* amdgpu_ib_free - free an IB (Indirect Buffer)
*
- * @adev: amdgpu_device pointer
* @ib: IB object to free
* @f: the fence SA bo need wait on for the ib alloation
*
* Free an IB (all asics).
*/
-void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
- struct dma_fence *f)
+void amdgpu_ib_free(struct amdgpu_ib *ib, struct dma_fence *f)
{
- amdgpu_sa_bo_free(adev, &ib->sa_bo, f);
+ amdgpu_sa_bo_free(&ib->sa_bo, f);
}
/**
@@ -130,6 +128,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib *ib = &ibs[0];
struct dma_fence *tmp = NULL;
+ struct amdgpu_fence *af;
bool need_ctx_switch;
struct amdgpu_vm *vm;
uint64_t fence_ctx;
@@ -140,7 +139,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
int vmid = AMDGPU_JOB_GET_VMID(job);
bool need_pipe_sync = false;
unsigned int cond_exec;
-
unsigned int i;
int r = 0;
@@ -156,6 +154,12 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
csa_va = job->csa_va;
gds_va = job->gds_va;
init_shadow = job->init_shadow;
+ af = &job->hw_fence;
+ /* Save the context of the job for reset handling.
+ * The driver needs this so it can skip the ring
+ * contents for guilty contexts.
+ */
+ af->context = job->base.s_fence ? job->base.s_fence->finished.context : 0;
} else {
vm = NULL;
fence_ctx = 0;
@@ -163,14 +167,15 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
csa_va = 0;
gds_va = 0;
init_shadow = false;
+ af = NULL;
}
- if (!ring->sched.ready && !ring->is_mes_queue) {
+ if (!ring->sched.ready) {
dev_err(adev->dev, "couldn't schedule ib on ring <%s>\n", ring->name);
return -EINVAL;
}
- if (vm && !job->vmid && !ring->is_mes_queue) {
+ if (vm && !job->vmid) {
dev_err(adev->dev, "VM IB without ID\n");
return -EINVAL;
}
@@ -193,8 +198,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
need_ctx_switch = ring->current_ctx != fence_ctx;
if (ring->funcs->emit_pipeline_sync && job &&
((tmp = amdgpu_sync_get_fence(&job->explicit_sync)) ||
- (amdgpu_sriov_vf(adev) && need_ctx_switch) ||
- amdgpu_vm_need_pipeline_sync(ring, job))) {
+ need_ctx_switch || amdgpu_vm_need_pipeline_sync(ring, job))) {
+
need_pipe_sync = true;
if (tmp)
@@ -284,7 +289,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
amdgpu_ring_init_cond_exec(ring, ring->cond_exe_gpu_addr);
}
- r = amdgpu_fence_emit(ring, f, job, fence_flags);
+ r = amdgpu_fence_emit(ring, f, af, fence_flags);
if (r) {
dev_err(adev->dev, "failed to emit fence (%d)\n", r);
if (job && job->vmid)
@@ -299,15 +304,24 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
amdgpu_ring_patch_cond_exec(ring, cond_exec);
ring->current_ctx = fence_ctx;
- if (vm && ring->funcs->emit_switch_buffer)
+ if (job && ring->funcs->emit_switch_buffer)
amdgpu_ring_emit_switch_buffer(ring);
if (ring->funcs->emit_wave_limit &&
ring->hw_prio == AMDGPU_GFX_PIPE_PRIO_HIGH)
ring->funcs->emit_wave_limit(ring, false);
+ /* Save the wptr associated with this fence.
+ * This must be last for resets to work properly
+ * as we need to save the wptr associated with this
+ * fence so we know what rings contents to backup
+ * after we reset the queue.
+ */
+ amdgpu_fence_save_wptr(*f);
+
amdgpu_ring_ib_end(ring);
amdgpu_ring_commit(ring);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index 92d27d32de41..3ef5bc95642c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -209,7 +209,7 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_ring *ring,
return 0;
}
- fences = kmalloc_array(id_mgr->num_ids, sizeof(void *), GFP_KERNEL);
+ fences = kmalloc_array(id_mgr->num_ids, sizeof(void *), GFP_NOWAIT);
if (!fences)
return -ENOMEM;
@@ -275,58 +275,45 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
{
struct amdgpu_device *adev = ring->adev;
unsigned vmhub = ring->vm_hub;
- struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
uint64_t fence_context = adev->fence_context + ring->idx;
bool needs_flush = vm->use_cpu_for_update;
uint64_t updates = amdgpu_vm_tlb_seq(vm);
int r;
- *id = id_mgr->reserved;
+ *id = vm->reserved_vmid[vmhub];
if ((*id)->owner != vm->immediate.fence_context ||
!amdgpu_vmid_compatible(*id, job) ||
(*id)->flushed_updates < updates ||
!(*id)->last_flush ||
((*id)->last_flush->context != fence_context &&
- !dma_fence_is_signaled((*id)->last_flush))) {
+ !dma_fence_is_signaled((*id)->last_flush)))
+ needs_flush = true;
+
+ if ((*id)->owner != vm->immediate.fence_context ||
+ (!adev->vm_manager.concurrent_flush && needs_flush)) {
struct dma_fence *tmp;
- /* Wait for the gang to be assembled before using a
- * reserved VMID or otherwise the gang could deadlock.
+ /* Don't use per engine and per process VMID at the
+ * same time
*/
- tmp = amdgpu_device_get_gang(adev);
- if (!dma_fence_is_signaled(tmp) && tmp != job->gang_submit) {
+ if (adev->vm_manager.concurrent_flush)
+ ring = NULL;
+
+ /* to prevent one context starved by another context */
+ (*id)->pd_gpu_addr = 0;
+ tmp = amdgpu_sync_peek_fence(&(*id)->active, ring);
+ if (tmp) {
*id = NULL;
- *fence = tmp;
+ *fence = dma_fence_get(tmp);
return 0;
}
- dma_fence_put(tmp);
-
- /* Make sure the id is owned by the gang before proceeding */
- if (!job->gang_submit ||
- (*id)->owner != vm->immediate.fence_context) {
-
- /* Don't use per engine and per process VMID at the
- * same time
- */
- if (adev->vm_manager.concurrent_flush)
- ring = NULL;
-
- /* to prevent one context starved by another context */
- (*id)->pd_gpu_addr = 0;
- tmp = amdgpu_sync_peek_fence(&(*id)->active, ring);
- if (tmp) {
- *id = NULL;
- *fence = dma_fence_get(tmp);
- return 0;
- }
- }
- needs_flush = true;
}
/* Good we can use this VMID. Remember this submission as
* user of the VMID.
*/
- r = amdgpu_sync_fence(&(*id)->active, &job->base.s_fence->finished);
+ r = amdgpu_sync_fence(&(*id)->active, &job->base.s_fence->finished,
+ GFP_NOWAIT);
if (r)
return r;
@@ -342,15 +329,13 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
* @ring: ring we want to submit job to
* @job: job who wants to use the VMID
* @id: resulting VMID
- * @fence: fence to wait for if no id could be grabbed
*
* Try to reuse a VMID for this submission.
*/
static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
struct amdgpu_ring *ring,
struct amdgpu_job *job,
- struct amdgpu_vmid **id,
- struct dma_fence **fence)
+ struct amdgpu_vmid **id)
{
struct amdgpu_device *adev = ring->adev;
unsigned vmhub = ring->vm_hub;
@@ -387,7 +372,8 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
* user of the VMID.
*/
r = amdgpu_sync_fence(&(*id)->active,
- &job->base.s_fence->finished);
+ &job->base.s_fence->finished,
+ GFP_NOWAIT);
if (r)
return r;
@@ -424,12 +410,12 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
if (r || !idle)
goto error;
- if (amdgpu_vmid_uses_reserved(adev, vm, vmhub)) {
+ if (amdgpu_vmid_uses_reserved(vm, vmhub)) {
r = amdgpu_vmid_grab_reserved(vm, ring, job, &id, fence);
if (r || !id)
goto error;
} else {
- r = amdgpu_vmid_grab_used(vm, ring, job, &id, fence);
+ r = amdgpu_vmid_grab_used(vm, ring, job, &id);
if (r)
goto error;
@@ -439,7 +425,8 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
/* Remember this submission as user of the VMID */
r = amdgpu_sync_fence(&id->active,
- &job->base.s_fence->finished);
+ &job->base.s_fence->finished,
+ GFP_NOWAIT);
if (r)
goto error;
@@ -476,55 +463,71 @@ error:
/*
* amdgpu_vmid_uses_reserved - check if a VM will use a reserved VMID
- * @adev: amdgpu_device pointer
* @vm: the VM to check
* @vmhub: the VMHUB which will be used
*
* Returns: True if the VM will use a reserved VMID.
*/
-bool amdgpu_vmid_uses_reserved(struct amdgpu_device *adev,
- struct amdgpu_vm *vm, unsigned int vmhub)
+bool amdgpu_vmid_uses_reserved(struct amdgpu_vm *vm, unsigned int vmhub)
{
- return vm->reserved_vmid[vmhub] ||
- (adev->enforce_isolation[(vm->root.bo->xcp_id != AMDGPU_XCP_NO_PARTITION) ?
- vm->root.bo->xcp_id : 0] &&
- AMDGPU_IS_GFXHUB(vmhub));
+ return vm->reserved_vmid[vmhub];
}
-int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
+/*
+ * amdgpu_vmid_alloc_reserved - reserve a specific VMID for this vm
+ * @adev: amdgpu device structure
+ * @vm: the VM to reserve an ID for
+ * @vmhub: the VMHUB which should be used
+ *
+ * Mostly used to have a reserved VMID for debugging and SPM.
+ *
+ * Returns: 0 for success, -ENOENT if an ID is already reserved.
+ */
+int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev, struct amdgpu_vm *vm,
unsigned vmhub)
{
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
+ struct amdgpu_vmid *id;
+ int r = 0;
mutex_lock(&id_mgr->lock);
-
- ++id_mgr->reserved_use_count;
- if (!id_mgr->reserved) {
- struct amdgpu_vmid *id;
-
- id = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid,
- list);
- /* Remove from normal round robin handling */
- list_del_init(&id->list);
- id_mgr->reserved = id;
+ if (vm->reserved_vmid[vmhub])
+ goto unlock;
+ if (id_mgr->reserved_vmid) {
+ r = -ENOENT;
+ goto unlock;
}
-
+ /* Remove from normal round robin handling */
+ id = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid, list);
+ list_del_init(&id->list);
+ vm->reserved_vmid[vmhub] = id;
+ id_mgr->reserved_vmid = true;
mutex_unlock(&id_mgr->lock);
+
return 0;
+unlock:
+ mutex_unlock(&id_mgr->lock);
+ return r;
}
-void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
+/*
+ * amdgpu_vmid_free_reserved - free up a reserved VMID again
+ * @adev: amdgpu device structure
+ * @vm: the VM with the reserved ID
+ * @vmhub: the VMHUB which should be used
+ */
+void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, struct amdgpu_vm *vm,
unsigned vmhub)
{
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
mutex_lock(&id_mgr->lock);
- if (!--id_mgr->reserved_use_count) {
- /* give the reserved ID back to normal round robin */
- list_add(&id_mgr->reserved->list, &id_mgr->ids_lru);
- id_mgr->reserved = NULL;
+ if (vm->reserved_vmid[vmhub]) {
+ list_add(&vm->reserved_vmid[vmhub]->list,
+ &id_mgr->ids_lru);
+ vm->reserved_vmid[vmhub] = NULL;
+ id_mgr->reserved_vmid = false;
}
-
mutex_unlock(&id_mgr->lock);
}
@@ -591,10 +594,17 @@ void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
mutex_init(&id_mgr->lock);
INIT_LIST_HEAD(&id_mgr->ids_lru);
- id_mgr->reserved_use_count = 0;
- /* manage only VMIDs not used by KFD */
- id_mgr->num_ids = adev->vm_manager.first_kfd_vmid;
+ /* for GC <10, SDMA uses MMHUB so use first_kfd_vmid for both GC and MM */
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 0, 0))
+ /* manage only VMIDs not used by KFD */
+ id_mgr->num_ids = adev->vm_manager.first_kfd_vmid;
+ else if (AMDGPU_IS_MMHUB0(i) ||
+ AMDGPU_IS_MMHUB1(i))
+ id_mgr->num_ids = 16;
+ else
+ /* manage only VMIDs not used by KFD */
+ id_mgr->num_ids = adev->vm_manager.first_kfd_vmid;
/* skip over VMID 0, since it is the system VM */
for (j = 1; j < id_mgr->num_ids; ++j) {
@@ -603,11 +613,6 @@ void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru);
}
}
- /* alloc a default reserved vmid to enforce isolation */
- for (i = 0; i < (adev->xcp_mgr ? adev->xcp_mgr->num_xcps : 1); i++) {
- if (adev->enforce_isolation[i])
- amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(i));
- }
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
index 4012fb2dd08a..b3649cd3af56 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
@@ -67,8 +67,7 @@ struct amdgpu_vmid_mgr {
unsigned num_ids;
struct list_head ids_lru;
struct amdgpu_vmid ids[AMDGPU_NUM_VMID];
- struct amdgpu_vmid *reserved;
- unsigned int reserved_use_count;
+ bool reserved_vmid;
};
int amdgpu_pasid_alloc(unsigned int bits);
@@ -78,12 +77,11 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv,
bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
struct amdgpu_vmid *id);
-bool amdgpu_vmid_uses_reserved(struct amdgpu_device *adev,
- struct amdgpu_vm *vm, unsigned int vmhub);
-int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
- unsigned vmhub);
-void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
- unsigned vmhub);
+bool amdgpu_vmid_uses_reserved(struct amdgpu_vm *vm, unsigned int vmhub);
+int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ unsigned vmhub);
+void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ unsigned vmhub);
int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_job *job, struct dma_fence **fence);
void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
index f3b0aaf3ebc6..a6419246e9c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
@@ -25,6 +25,7 @@
#include "amdgpu.h"
#include "amdgpu_ih.h"
+#include "amdgpu_reset.h"
/**
* amdgpu_ih_ring_init - initialize the IH state
@@ -217,7 +218,7 @@ int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
restart_ih:
count = AMDGPU_IH_MAX_NUM_IVS;
- DRM_DEBUG("%s: rptr %d, wptr %d\n", __func__, ih->rptr, wptr);
+ dev_dbg(adev->dev, "%s: rptr %d, wptr %d\n", __func__, ih->rptr, wptr);
/* Order reading of wptr vs. reading of IH ring data */
rmb();
@@ -227,13 +228,23 @@ restart_ih:
ih->rptr &= ih->ptr_mask;
}
- amdgpu_ih_set_rptr(adev, ih);
+ if (!ih->overflow)
+ amdgpu_ih_set_rptr(adev, ih);
+
wake_up_all(&ih->wait_process);
/* make sure wptr hasn't changed while processing */
wptr = amdgpu_ih_get_wptr(adev, ih);
if (wptr != ih->rptr)
- goto restart_ih;
+ if (!ih->overflow)
+ goto restart_ih;
+
+ if (ih->overflow)
+ if (amdgpu_sriov_runtime(adev))
+ WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
+ &adev->virt.flr_work),
+ "Failed to queue work! at %s",
+ __func__);
return IRQ_HANDLED;
}
@@ -298,3 +309,9 @@ uint64_t amdgpu_ih_decode_iv_ts_helper(struct amdgpu_ih_ring *ih, u32 rptr,
dw2 = le32_to_cpu(ih->ring[ring_index + 2]);
return dw1 | ((u64)(dw2 & 0xffff) << 32);
}
+
+const char *amdgpu_ih_ring_name(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
+{
+ return ih == &adev->irq.ih ? "ih" : ih == &adev->irq.ih_soft ? "sw ih" :
+ ih == &adev->irq.ih1 ? "ih1" : ih == &adev->irq.ih2 ? "ih2" : "unknown";
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
index 508f02eb0cf8..f58b6be7fccc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
@@ -56,14 +56,14 @@ struct amdgpu_ih_ring {
bool use_bus_addr;
struct amdgpu_bo *ring_obj;
- volatile uint32_t *ring;
+ uint32_t *ring;
uint64_t gpu_addr;
uint64_t wptr_addr;
- volatile uint32_t *wptr_cpu;
+ uint32_t *wptr_cpu;
uint64_t rptr_addr;
- volatile uint32_t *rptr_cpu;
+ uint32_t *rptr_cpu;
bool enabled;
unsigned rptr;
@@ -72,12 +72,16 @@ struct amdgpu_ih_ring {
/* For waiting on IH processing at checkpoint. */
wait_queue_head_t wait_process;
uint64_t processed_timestamp;
+ bool overflow;
};
/* return true if time stamp t2 is after t1 with 48bit wrap around */
#define amdgpu_ih_ts_after(t1, t2) \
(((int64_t)((t2) << 16) - (int64_t)((t1) << 16)) > 0LL)
+#define amdgpu_ih_ts_after_or_equal(t1, t2) \
+ (((int64_t)((t2) << 16) - (int64_t)((t1) << 16)) >= 0LL)
+
/* provided by the ih block */
struct amdgpu_ih_funcs {
/* ring read/write ptr handling, called from interrupt context */
@@ -110,4 +114,5 @@ void amdgpu_ih_decode_iv_helper(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry);
uint64_t amdgpu_ih_decode_iv_ts_helper(struct amdgpu_ih_ring *ih, u32 rptr,
signed int offset);
+const char *amdgpu_ih_ring_name(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ip.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ip.c
new file mode 100644
index 000000000000..99e1cf4fc955
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ip.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_ip.h"
+
+static int8_t amdgpu_logical_to_dev_inst(struct amdgpu_device *adev,
+ enum amd_hw_ip_block_type block,
+ int8_t inst)
+{
+ int8_t dev_inst;
+
+ switch (block) {
+ case GC_HWIP:
+ case SDMA0_HWIP:
+ /* Both JPEG and VCN as JPEG is only alias of VCN */
+ case VCN_HWIP:
+ dev_inst = adev->ip_map.dev_inst[block][inst];
+ break;
+ default:
+ /* For rest of the IPs, no look up required.
+ * Assume 'logical instance == physical instance' for all configs. */
+ dev_inst = inst;
+ break;
+ }
+
+ return dev_inst;
+}
+
+static uint32_t amdgpu_logical_to_dev_mask(struct amdgpu_device *adev,
+ enum amd_hw_ip_block_type block,
+ uint32_t mask)
+{
+ uint32_t dev_mask = 0;
+ int8_t log_inst, dev_inst;
+
+ while (mask) {
+ log_inst = ffs(mask) - 1;
+ dev_inst = amdgpu_logical_to_dev_inst(adev, block, log_inst);
+ dev_mask |= (1 << dev_inst);
+ mask &= ~(1 << log_inst);
+ }
+
+ return dev_mask;
+}
+
+static void amdgpu_populate_ip_map(struct amdgpu_device *adev,
+ enum amd_hw_ip_block_type ip_block,
+ uint32_t inst_mask)
+{
+ int l = 0, i;
+
+ while (inst_mask) {
+ i = ffs(inst_mask) - 1;
+ adev->ip_map.dev_inst[ip_block][l++] = i;
+ inst_mask &= ~(1 << i);
+ }
+ for (; l < HWIP_MAX_INSTANCE; l++)
+ adev->ip_map.dev_inst[ip_block][l] = -1;
+}
+
+void amdgpu_ip_map_init(struct amdgpu_device *adev)
+{
+ u32 ip_map[][2] = {
+ { GC_HWIP, adev->gfx.xcc_mask },
+ { SDMA0_HWIP, adev->sdma.sdma_mask },
+ { VCN_HWIP, adev->vcn.inst_mask },
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ip_map); ++i)
+ amdgpu_populate_ip_map(adev, ip_map[i][0], ip_map[i][1]);
+
+ adev->ip_map.logical_to_dev_inst = amdgpu_logical_to_dev_inst;
+ adev->ip_map.logical_to_dev_mask = amdgpu_logical_to_dev_mask;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ip.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ip.h
new file mode 100644
index 000000000000..2490fd322aec
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ip.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_IP_H__
+#define __AMDGPU_IP_H__
+
+void amdgpu_ip_map_init(struct amdgpu_device *adev);
+
+#endif /* __AMDGPU_IP_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 19ce4da285e8..8112ffc85995 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -142,8 +142,9 @@ void amdgpu_irq_disable_all(struct amdgpu_device *adev)
r = src->funcs->set(adev, src, k,
AMDGPU_IRQ_STATE_DISABLE);
if (r)
- DRM_ERROR("error disabling interrupt (%d)\n",
- r);
+ dev_err(adev->dev,
+ "error disabling interrupt (%d)\n",
+ r);
}
}
}
@@ -242,7 +243,7 @@ static bool amdgpu_msi_ok(struct amdgpu_device *adev)
return true;
}
-static void amdgpu_restore_msix(struct amdgpu_device *adev)
+void amdgpu_restore_msix(struct amdgpu_device *adev)
{
u16 ctrl;
@@ -315,7 +316,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
adev->irq.irq = irq;
adev_to_drm(adev)->max_vblank_count = 0x00ffffff;
- DRM_DEBUG("amdgpu: irq initialized.\n");
+ dev_dbg(adev->dev, "amdgpu: irq initialized.\n");
return 0;
free_vectors:
@@ -461,10 +462,10 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
src_id = entry.src_id;
if (client_id >= AMDGPU_IRQ_CLIENTID_MAX) {
- DRM_DEBUG("Invalid client_id in IV: %d\n", client_id);
+ dev_dbg(adev->dev, "Invalid client_id in IV: %d\n", client_id);
} else if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) {
- DRM_DEBUG("Invalid src_id in IV: %d\n", src_id);
+ dev_dbg(adev->dev, "Invalid src_id in IV: %d\n", src_id);
} else if (((client_id == AMDGPU_IRQ_CLIENTID_LEGACY) ||
(client_id == SOC15_IH_CLIENTID_ISP)) &&
@@ -472,18 +473,21 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
generic_handle_domain_irq(adev->irq.domain, src_id);
} else if (!adev->irq.client[client_id].sources) {
- DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n",
- client_id, src_id);
+ dev_dbg(adev->dev,
+ "Unregistered interrupt client_id: %d src_id: %d\n",
+ client_id, src_id);
} else if ((src = adev->irq.client[client_id].sources[src_id])) {
r = src->funcs->process(adev, src, &entry);
if (r < 0)
- DRM_ERROR("error processing interrupt (%d)\n", r);
+ dev_err(adev->dev, "error processing interrupt (%d)\n",
+ r);
else if (r)
handled = true;
} else {
- DRM_DEBUG("Unregistered interrupt src_id: %d of client_id:%d\n",
+ dev_dbg(adev->dev,
+ "Unregistered interrupt src_id: %d of client_id:%d\n",
src_id, client_id);
}
@@ -619,6 +623,10 @@ int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
unsigned int type)
{
+ /* When the threshold is reached,the interrupt source may not be enabled.return -EINVAL */
+ if (amdgpu_ras_is_rma(adev) && !amdgpu_irq_enabled(adev, src, type))
+ return -EINVAL;
+
if (!adev->irq.installed)
return -ENOENT;
@@ -725,10 +733,10 @@ static const struct irq_domain_ops amdgpu_hw_irqdomain_ops = {
*/
int amdgpu_irq_add_domain(struct amdgpu_device *adev)
{
- adev->irq.domain = irq_domain_add_linear(NULL, AMDGPU_MAX_IRQ_SRC_ID,
- &amdgpu_hw_irqdomain_ops, adev);
+ adev->irq.domain = irq_domain_create_linear(NULL, AMDGPU_MAX_IRQ_SRC_ID,
+ &amdgpu_hw_irqdomain_ops, adev);
if (!adev->irq.domain) {
- DRM_ERROR("GPU irq add domain failed\n");
+ dev_err(adev->dev, "GPU irq add domain failed\n");
return -ENODEV;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
index 04c0b4fa17a4..9f0417456abd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
@@ -146,5 +146,6 @@ void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev);
int amdgpu_irq_add_domain(struct amdgpu_device *adev);
void amdgpu_irq_remove_domain(struct amdgpu_device *adev);
unsigned amdgpu_irq_create_mapping(struct amdgpu_device *adev, unsigned src_id);
+void amdgpu_restore_msix(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c
index 4766e99dd98f..9cddbf50442a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c
@@ -33,33 +33,19 @@
#include "isp_v4_1_0.h"
#include "isp_v4_1_1.h"
-static int isp_sw_init(void *handle)
-{
- return 0;
-}
-
-static int isp_sw_fini(void *handle)
-{
- return 0;
-}
+#define ISP_MC_ADDR_ALIGN (1024 * 32)
/**
* isp_hw_init - start and test isp block
*
- * @handle: handle for amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
*/
-static int isp_hw_init(void *handle)
+static int isp_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_isp *isp = &adev->isp;
- const struct amdgpu_ip_block *ip_block =
- amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ISP);
-
- if (!ip_block)
- return -EINVAL;
-
if (isp->funcs->hw_init != NULL)
return isp->funcs->hw_init(isp);
@@ -69,13 +55,12 @@ static int isp_hw_init(void *handle)
/**
* isp_hw_fini - stop the hardware block
*
- * @handle: handle for amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
*/
-static int isp_hw_fini(void *handle)
+static int isp_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct amdgpu_isp *isp = &adev->isp;
+ struct amdgpu_isp *isp = &ip_block->adev->isp;
if (isp->funcs->hw_fini != NULL)
return isp->funcs->hw_fini(isp);
@@ -83,16 +68,6 @@ static int isp_hw_fini(void *handle)
return -ENODEV;
}
-static int isp_suspend(void *handle)
-{
- return 0;
-}
-
-static int isp_resume(void *handle)
-{
- return 0;
-}
-
static int isp_load_fw_by_psp(struct amdgpu_device *adev)
{
const struct common_firmware_header *hdr;
@@ -104,7 +79,8 @@ static int isp_load_fw_by_psp(struct amdgpu_device *adev)
sizeof(ucode_prefix));
/* read isp fw */
- r = amdgpu_ucode_request(adev, &adev->isp.fw, "amdgpu/%s.bin", ucode_prefix);
+ r = amdgpu_ucode_request(adev, &adev->isp.fw, AMDGPU_UCODE_OPTIONAL,
+ "amdgpu/%s.bin", ucode_prefix);
if (r) {
amdgpu_ucode_release(&adev->isp.fw);
return r;
@@ -122,9 +98,10 @@ static int isp_load_fw_by_psp(struct amdgpu_device *adev)
return r;
}
-static int isp_early_init(void *handle)
+static int isp_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_isp *isp = &adev->isp;
switch (amdgpu_ip_version(adev, ISP_HWIP, 0)) {
@@ -149,46 +126,202 @@ static int isp_early_init(void *handle)
return 0;
}
-static bool isp_is_idle(void *handle)
+static bool isp_is_idle(struct amdgpu_ip_block *ip_block)
{
return true;
}
-static int isp_wait_for_idle(void *handle)
+static int isp_set_clockgating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_clockgating_state state)
{
return 0;
}
-static int isp_soft_reset(void *handle)
+static int isp_set_powergating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_powergating_state state)
{
return 0;
}
-static int isp_set_clockgating_state(void *handle,
- enum amd_clockgating_state state)
+static int is_valid_isp_device(struct device *isp_parent, struct device *amdgpu_dev)
{
+ if (isp_parent != amdgpu_dev)
+ return -EINVAL;
+
return 0;
}
-static int isp_set_powergating_state(void *handle,
- enum amd_powergating_state state)
+/**
+ * isp_user_buffer_alloc - create user buffer object (BO) for isp
+ *
+ * @dev: isp device handle
+ * @dmabuf: DMABUF handle for isp buffer allocated in system memory
+ * @buf_obj: GPU buffer object handle to initialize
+ * @buf_addr: GPU addr of the pinned BO to initialize
+ *
+ * Imports isp DMABUF to allocate and pin a user BO for isp internal use. It does
+ * GART alloc to generate GPU addr for BO to make it accessible through the
+ * GART aperture for ISP HW.
+ *
+ * This function is exported to allow the V4L2 isp device external to drm device
+ * to create and access the isp user BO.
+ *
+ * Returns:
+ * 0 on success, negative error code otherwise.
+ */
+int isp_user_buffer_alloc(struct device *dev, void *dmabuf,
+ void **buf_obj, u64 *buf_addr)
{
+ struct platform_device *ispdev = to_platform_device(dev);
+ const struct isp_platform_data *isp_pdata;
+ struct amdgpu_device *adev;
+ struct mfd_cell *mfd_cell;
+ struct amdgpu_bo *bo;
+ u64 gpu_addr;
+ int ret;
+
+ if (WARN_ON(!ispdev))
+ return -ENODEV;
+
+ if (WARN_ON(!buf_obj))
+ return -EINVAL;
+
+ if (WARN_ON(!buf_addr))
+ return -EINVAL;
+
+ mfd_cell = &ispdev->mfd_cell[0];
+ if (!mfd_cell)
+ return -ENODEV;
+
+ isp_pdata = mfd_cell->platform_data;
+ adev = isp_pdata->adev;
+
+ ret = is_valid_isp_device(ispdev->dev.parent, adev->dev);
+ if (ret)
+ return ret;
+
+ ret = amdgpu_bo_create_isp_user(adev, dmabuf,
+ AMDGPU_GEM_DOMAIN_GTT, &bo, &gpu_addr);
+ if (ret) {
+ drm_err(&adev->ddev, "failed to alloc gart user buffer (%d)", ret);
+ return ret;
+ }
+
+ *buf_obj = (void *)bo;
+ *buf_addr = gpu_addr;
+
return 0;
}
+EXPORT_SYMBOL(isp_user_buffer_alloc);
+
+/**
+ * isp_user_buffer_free - free isp user buffer object (BO)
+ *
+ * @buf_obj: amdgpu isp user BO to free
+ *
+ * unpin and unref BO for isp internal use.
+ *
+ * This function is exported to allow the V4L2 isp device
+ * external to drm device to free the isp user BO.
+ */
+void isp_user_buffer_free(void *buf_obj)
+{
+ amdgpu_bo_free_isp_user(buf_obj);
+}
+EXPORT_SYMBOL(isp_user_buffer_free);
+
+/**
+ * isp_kernel_buffer_alloc - create kernel buffer object (BO) for isp
+ *
+ * @dev: isp device handle
+ * @size: size for the new BO
+ * @buf_obj: GPU BO handle to initialize
+ * @gpu_addr: GPU addr of the pinned BO
+ * @cpu_addr: CPU address mapping of BO
+ *
+ * Allocates and pins a kernel BO for internal isp firmware use.
+ *
+ * This function is exported to allow the V4L2 isp device
+ * external to drm device to create and access the kernel BO.
+ *
+ * Returns:
+ * 0 on success, negative error code otherwise.
+ */
+int isp_kernel_buffer_alloc(struct device *dev, u64 size,
+ void **buf_obj, u64 *gpu_addr, void **cpu_addr)
+{
+ struct platform_device *ispdev = to_platform_device(dev);
+ struct amdgpu_bo **bo = (struct amdgpu_bo **)buf_obj;
+ const struct isp_platform_data *isp_pdata;
+ struct amdgpu_device *adev;
+ struct mfd_cell *mfd_cell;
+ int ret;
+
+ if (WARN_ON(!ispdev))
+ return -ENODEV;
+
+ if (WARN_ON(!buf_obj))
+ return -EINVAL;
+
+ if (WARN_ON(!gpu_addr))
+ return -EINVAL;
+
+ if (WARN_ON(!cpu_addr))
+ return -EINVAL;
+
+ mfd_cell = &ispdev->mfd_cell[0];
+ if (!mfd_cell)
+ return -ENODEV;
+
+ isp_pdata = mfd_cell->platform_data;
+ adev = isp_pdata->adev;
+
+ ret = is_valid_isp_device(ispdev->dev.parent, adev->dev);
+ if (ret)
+ return ret;
+
+ ret = amdgpu_bo_create_kernel(adev,
+ size,
+ ISP_MC_ADDR_ALIGN,
+ AMDGPU_GEM_DOMAIN_GTT,
+ bo,
+ gpu_addr,
+ cpu_addr);
+ if (!cpu_addr || ret) {
+ drm_err(&adev->ddev, "failed to alloc gart kernel buffer (%d)", ret);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(isp_kernel_buffer_alloc);
+
+/**
+ * isp_kernel_buffer_free - free isp kernel buffer object (BO)
+ *
+ * @buf_obj: amdgpu isp user BO to free
+ * @gpu_addr: GPU addr of isp kernel BO
+ * @cpu_addr: CPU addr of isp kernel BO
+ *
+ * unmaps and unpin a isp kernel BO.
+ *
+ * This function is exported to allow the V4L2 isp device
+ * external to drm device to free the kernel BO.
+ */
+void isp_kernel_buffer_free(void **buf_obj, u64 *gpu_addr, void **cpu_addr)
+{
+ struct amdgpu_bo **bo = (struct amdgpu_bo **)buf_obj;
+
+ amdgpu_bo_free_kernel(bo, gpu_addr, cpu_addr);
+}
+EXPORT_SYMBOL(isp_kernel_buffer_free);
static const struct amd_ip_funcs isp_ip_funcs = {
.name = "isp_ip",
.early_init = isp_early_init,
- .late_init = NULL,
- .sw_init = isp_sw_init,
- .sw_fini = isp_sw_fini,
.hw_init = isp_hw_init,
.hw_fini = isp_hw_fini,
- .suspend = isp_suspend,
- .resume = isp_resume,
.is_idle = isp_is_idle,
- .wait_for_idle = isp_wait_for_idle,
- .soft_reset = isp_soft_reset,
.set_clockgating_state = isp_set_clockgating_state,
.set_powergating_state = isp_set_powergating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h
index b03664c66dd6..d6f4ffa4c97c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h
@@ -28,16 +28,13 @@
#ifndef __AMDGPU_ISP_H__
#define __AMDGPU_ISP_H__
+#include <drm/amd/isp.h>
+#include <linux/pm_domain.h>
+
#define ISP_REGS_OFFSET_END 0x629A4
struct amdgpu_isp;
-struct isp_platform_data {
- void *adev;
- u32 asic_type;
- resource_size_t base_rmmio_size;
-};
-
struct isp_funcs {
int (*hw_init)(struct amdgpu_isp *isp);
int (*hw_fini)(struct amdgpu_isp *isp);
@@ -50,9 +47,11 @@ struct amdgpu_isp {
struct mfd_cell *isp_cell;
struct resource *isp_res;
struct resource *isp_i2c_res;
+ struct resource *isp_gpio_res;
struct isp_platform_data *isp_pdata;
unsigned int harvest_config;
const struct firmware *fw;
+ struct generic_pm_domain ispgpd;
};
extern const struct amdgpu_ip_block_version isp_v4_1_0_ip_block;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 16f2605ac50b..d020a890a0ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -42,7 +42,7 @@ static void amdgpu_job_do_core_dump(struct amdgpu_device *adev,
for (i = 0; i < adev->num_ip_blocks; i++)
if (adev->ip_blocks[i].version->funcs->dump_ip_state)
adev->ip_blocks[i].version->funcs
- ->dump_ip_state((void *)adev);
+ ->dump_ip_state((void *)&adev->ip_blocks[i]);
dev_info(adev->dev, "Dumping IP State Completed\n");
amdgpu_coredump(adev, true, false, job);
@@ -89,10 +89,10 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
{
struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
struct amdgpu_job *job = to_amdgpu_job(s_job);
- struct amdgpu_task_info *ti;
+ struct drm_wedge_task_info *info = NULL;
+ struct amdgpu_task_info *ti = NULL;
struct amdgpu_device *adev = ring->adev;
- int idx;
- int r;
+ int idx, r;
if (!drm_dev_enter(adev_to_drm(adev), &idx)) {
dev_info(adev->dev, "%s - device unplugged skipping recovery on scheduler:%s",
@@ -102,8 +102,6 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
return DRM_GPU_SCHED_STAT_ENODEV;
}
- adev->job_hang = true;
-
/*
* Do the coredump immediately after a job timeout to get a very
* close dump/snapshot/representation of GPU's current error status
@@ -114,6 +112,7 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
amdgpu_job_core_dump(adev, job);
if (amdgpu_gpu_recovery &&
+ amdgpu_ring_is_reset_type_supported(ring, AMDGPU_RESET_TYPE_SOFT_RESET) &&
amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
dev_err(adev->dev, "ring %s timeout, but soft recovered\n",
s_job->sched->name);
@@ -126,34 +125,32 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
ti = amdgpu_vm_get_task_info_pasid(ring->adev, job->pasid);
if (ti) {
- dev_err(adev->dev,
- "Process information: process %s pid %d thread %s pid %d\n",
- ti->process_name, ti->tgid, ti->task_name, ti->pid);
- amdgpu_vm_put_task_info(ti);
+ amdgpu_vm_print_task_info(adev, ti);
+ info = &ti->task;
}
- dma_fence_set_error(&s_job->s_fence->finished, -ETIME);
-
/* attempt a per ring reset */
- if (amdgpu_gpu_recovery &&
- ring->funcs->reset) {
- /* stop the scheduler, but don't mess with the
- * bad job yet because if ring reset fails
- * we'll fall back to full GPU reset.
- */
- drm_sched_wqueue_stop(&ring->sched);
- r = amdgpu_ring_reset(ring, job->vmid);
+ if (unlikely(adev->debug_disable_gpu_ring_reset)) {
+ dev_err(adev->dev, "Ring reset disabled by debug mask\n");
+ } else if (amdgpu_gpu_recovery &&
+ amdgpu_ring_is_reset_type_supported(ring, AMDGPU_RESET_TYPE_PER_QUEUE) &&
+ ring->funcs->reset) {
+ dev_err(adev->dev, "Starting %s ring reset\n",
+ s_job->sched->name);
+ r = amdgpu_ring_reset(ring, job->vmid, &job->hw_fence);
if (!r) {
- if (amdgpu_ring_sched_ready(ring))
- drm_sched_stop(&ring->sched, s_job);
atomic_inc(&ring->adev->gpu_reset_counter);
- amdgpu_fence_driver_force_completion(ring);
- if (amdgpu_ring_sched_ready(ring))
- drm_sched_start(&ring->sched);
+ dev_err(adev->dev, "Ring %s reset succeeded\n",
+ ring->sched.name);
+ drm_dev_wedged_event(adev_to_drm(adev),
+ DRM_WEDGE_RECOVERY_NONE, info);
goto exit;
}
+ dev_err(adev->dev, "Ring %s reset failed\n", ring->sched.name);
}
+ dma_fence_set_error(&s_job->s_fence->finished, -ETIME);
+
if (amdgpu_device_should_recover_gpu(ring->adev)) {
struct amdgpu_reset_context reset_context;
memset(&reset_context, 0, sizeof(reset_context));
@@ -179,14 +176,15 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
}
exit:
- adev->job_hang = false;
+ amdgpu_vm_put_task_info(ti);
drm_dev_exit(idx);
- return DRM_GPU_SCHED_STAT_NOMINAL;
+ return DRM_GPU_SCHED_STAT_RESET;
}
int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct drm_sched_entity *entity, void *owner,
- unsigned int num_ibs, struct amdgpu_job **job)
+ unsigned int num_ibs, struct amdgpu_job **job,
+ u64 drm_client_id)
{
if (num_ibs == 0)
return -EINVAL;
@@ -195,11 +193,6 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (!*job)
return -ENOMEM;
- /*
- * Initialize the scheduler to at least some ring so that we always
- * have a pointer to adev.
- */
- (*job)->base.sched = &adev->rings[0]->sched;
(*job)->vm = vm;
amdgpu_sync_create(&(*job)->explicit_sync);
@@ -209,17 +202,19 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (!entity)
return 0;
- return drm_sched_job_init(&(*job)->base, entity, 1, owner);
+ return drm_sched_job_init(&(*job)->base, entity, 1, owner,
+ drm_client_id);
}
int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev,
struct drm_sched_entity *entity, void *owner,
size_t size, enum amdgpu_ib_pool_type pool_type,
- struct amdgpu_job **job)
+ struct amdgpu_job **job, u64 k_job_id)
{
int r;
- r = amdgpu_job_alloc(adev, NULL, entity, owner, 1, job);
+ r = amdgpu_job_alloc(adev, NULL, entity, owner, 1, job,
+ k_job_id);
if (r)
return r;
@@ -253,20 +248,19 @@ void amdgpu_job_set_resources(struct amdgpu_job *job, struct amdgpu_bo *gds,
void amdgpu_job_free_resources(struct amdgpu_job *job)
{
- struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
struct dma_fence *f;
unsigned i;
/* Check if any fences where initialized */
if (job->base.s_fence && job->base.s_fence->finished.ops)
f = &job->base.s_fence->finished;
- else if (job->hw_fence.ops)
- f = &job->hw_fence;
+ else if (job->hw_fence.base.ops)
+ f = &job->hw_fence.base;
else
f = NULL;
for (i = 0; i < job->num_ibs; ++i)
- amdgpu_ib_free(ring->adev, &job->ibs[i], f);
+ amdgpu_ib_free(&job->ibs[i], f);
}
static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
@@ -278,10 +272,10 @@ static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
amdgpu_sync_free(&job->explicit_sync);
/* only put the hw fence if has embedded fence */
- if (!job->hw_fence.ops)
+ if (!job->hw_fence.base.ops)
kfree(job);
else
- dma_fence_put(&job->hw_fence);
+ dma_fence_put(&job->hw_fence.base);
}
void amdgpu_job_set_gang_leader(struct amdgpu_job *job,
@@ -310,10 +304,10 @@ void amdgpu_job_free(struct amdgpu_job *job)
if (job->gang_submit != &job->base.s_fence->scheduled)
dma_fence_put(job->gang_submit);
- if (!job->hw_fence.ops)
+ if (!job->hw_fence.base.ops)
kfree(job);
else
- dma_fence_put(&job->hw_fence);
+ dma_fence_put(&job->hw_fence.base);
}
struct dma_fence *amdgpu_job_submit(struct amdgpu_job *job)
@@ -349,25 +343,33 @@ amdgpu_job_prepare_job(struct drm_sched_job *sched_job,
{
struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched);
struct amdgpu_job *job = to_amdgpu_job(sched_job);
- struct dma_fence *fence = NULL;
+ struct dma_fence *fence;
int r;
r = drm_sched_entity_error(s_entity);
if (r)
goto error;
- if (!fence && job->gang_submit)
+ if (job->gang_submit) {
fence = amdgpu_device_switch_gang(ring->adev, job->gang_submit);
+ if (fence)
+ return fence;
+ }
+
+ fence = amdgpu_device_enforce_isolation(ring->adev, ring, job);
+ if (fence)
+ return fence;
- while (!fence && job->vm && !job->vmid) {
+ if (job->vm && !job->vmid) {
r = amdgpu_vmid_grab(job->vm, ring, job, &fence);
if (r) {
dev_err(ring->adev->dev, "Error getting VM ID (%d)\n", r);
goto error;
}
+ return fence;
}
- return fence;
+ return NULL;
error:
dma_fence_set_error(&job->base.s_fence->finished, r);
@@ -411,8 +413,24 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
return fence;
}
-#define to_drm_sched_job(sched_job) \
- container_of((sched_job), struct drm_sched_job, queue_node)
+/*
+ * This is a duplicate function from DRM scheduler sched_internal.h.
+ * Plan is to remove it when amdgpu_job_stop_all_jobs_on_sched is removed, due
+ * latter being incorrect and racy.
+ *
+ * See https://lore.kernel.org/amd-gfx/44edde63-7181-44fb-a4f7-94e50514f539@amd.com/
+ */
+static struct drm_sched_job *
+drm_sched_entity_queue_pop(struct drm_sched_entity *entity)
+{
+ struct spsc_node *node;
+
+ node = spsc_queue_pop(&entity->job_queue);
+ if (!node)
+ return NULL;
+
+ return container_of(node, struct drm_sched_job, queue_node);
+}
void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
{
@@ -425,7 +443,7 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
struct drm_sched_rq *rq = sched->sched_rq[i];
spin_lock(&rq->lock);
list_for_each_entry(s_entity, &rq->entities, list) {
- while ((s_job = to_drm_sched_job(spsc_queue_pop(&s_entity->job_queue)))) {
+ while ((s_job = drm_sched_entity_queue_pop(s_entity))) {
struct drm_sched_fence *s_fence = s_job->s_fence;
dma_fence_signal(&s_fence->scheduled);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
index ce6b9ba967ff..4a6487eb6cb5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
@@ -44,11 +44,27 @@
struct amdgpu_fence;
enum amdgpu_ib_pool_type;
+/* Internal kernel job ids. (decreasing values, starting from U64_MAX). */
+#define AMDGPU_KERNEL_JOB_ID_VM_UPDATE (18446744073709551615ULL)
+#define AMDGPU_KERNEL_JOB_ID_VM_UPDATE_PDES (18446744073709551614ULL)
+#define AMDGPU_KERNEL_JOB_ID_VM_UPDATE_RANGE (18446744073709551613ULL)
+#define AMDGPU_KERNEL_JOB_ID_VM_PT_CLEAR (18446744073709551612ULL)
+#define AMDGPU_KERNEL_JOB_ID_TTM_MAP_BUFFER (18446744073709551611ULL)
+#define AMDGPU_KERNEL_JOB_ID_TTM_ACCESS_MEMORY_SDMA (18446744073709551610ULL)
+#define AMDGPU_KERNEL_JOB_ID_TTM_COPY_BUFFER (18446744073709551609ULL)
+#define AMDGPU_KERNEL_JOB_ID_CLEAR_ON_RELEASE (18446744073709551608ULL)
+#define AMDGPU_KERNEL_JOB_ID_MOVE_BLIT (18446744073709551607ULL)
+#define AMDGPU_KERNEL_JOB_ID_TTM_CLEAR_BUFFER (18446744073709551606ULL)
+#define AMDGPU_KERNEL_JOB_ID_CLEANER_SHADER (18446744073709551605ULL)
+#define AMDGPU_KERNEL_JOB_ID_FLUSH_GPU_TLB (18446744073709551604ULL)
+#define AMDGPU_KERNEL_JOB_ID_KFD_GART_MAP (18446744073709551603ULL)
+#define AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST (18446744073709551602ULL)
+
struct amdgpu_job {
struct drm_sched_job base;
struct amdgpu_vm *vm;
struct amdgpu_sync explicit_sync;
- struct dma_fence hw_fence;
+ struct amdgpu_fence hw_fence;
struct dma_fence *gang_submit;
uint32_t preamble_status;
uint32_t preemption_status;
@@ -78,6 +94,7 @@ struct amdgpu_job {
/* enforce isolation */
bool enforce_isolation;
+ bool run_cleaner_shader;
uint32_t num_ibs;
struct amdgpu_ib ibs[];
@@ -90,11 +107,13 @@ static inline struct amdgpu_ring *amdgpu_job_ring(struct amdgpu_job *job)
int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct drm_sched_entity *entity, void *owner,
- unsigned int num_ibs, struct amdgpu_job **job);
+ unsigned int num_ibs, struct amdgpu_job **job,
+ u64 drm_client_id);
int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev,
struct drm_sched_entity *entity, void *owner,
size_t size, enum amdgpu_ib_pool_type pool_type,
- struct amdgpu_job **job);
+ struct amdgpu_job **job,
+ u64 k_job_id);
void amdgpu_job_set_resources(struct amdgpu_job *job, struct amdgpu_bo *gds,
struct amdgpu_bo *gws, struct amdgpu_bo *oa);
void amdgpu_job_free_resources(struct amdgpu_job *job);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
index 6df99cb00d9a..6b7d66b6d4cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
@@ -33,6 +33,7 @@
#define JPEG_IDLE_TIMEOUT msecs_to_jiffies(1000)
static void amdgpu_jpeg_idle_work_handler(struct work_struct *work);
+static void amdgpu_jpeg_reg_dump_fini(struct amdgpu_device *adev);
int amdgpu_jpeg_sw_init(struct amdgpu_device *adev)
{
@@ -47,7 +48,7 @@ int amdgpu_jpeg_sw_init(struct amdgpu_device *adev)
adev->jpeg.indirect_sram = true;
for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
- if (adev->jpeg.harvest_config & (1 << i))
+ if (adev->jpeg.harvest_config & (1U << i))
continue;
if (adev->jpeg.indirect_sram) {
@@ -73,7 +74,7 @@ int amdgpu_jpeg_sw_fini(struct amdgpu_device *adev)
int i, j;
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
- if (adev->jpeg.harvest_config & (1 << i))
+ if (adev->jpeg.harvest_config & (1U << i))
continue;
amdgpu_bo_free_kernel(
@@ -85,6 +86,9 @@ int amdgpu_jpeg_sw_fini(struct amdgpu_device *adev)
amdgpu_ring_fini(&adev->jpeg.inst[i].ring_dec[j]);
}
+ if (adev->jpeg.reg_list)
+ amdgpu_jpeg_reg_dump_fini(adev);
+
mutex_destroy(&adev->jpeg.jpeg_pg_lock);
return 0;
@@ -110,17 +114,19 @@ static void amdgpu_jpeg_idle_work_handler(struct work_struct *work)
unsigned int i, j;
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
- if (adev->jpeg.harvest_config & (1 << i))
+ if (adev->jpeg.harvest_config & (1U << i))
continue;
for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j)
fences += amdgpu_fence_count_emitted(&adev->jpeg.inst[i].ring_dec[j]);
}
- if (!fences && !atomic_read(&adev->jpeg.total_submission_cnt))
+ if (!fences && !atomic_read(&adev->jpeg.total_submission_cnt)) {
+ mutex_lock(&adev->jpeg.jpeg_pg_lock);
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_JPEG,
AMD_PG_STATE_GATE);
- else
+ mutex_unlock(&adev->jpeg.jpeg_pg_lock);
+ } else
schedule_delayed_work(&adev->jpeg.idle_work, JPEG_IDLE_TIMEOUT);
}
@@ -190,7 +196,8 @@ static int amdgpu_jpeg_dec_set_reg(struct amdgpu_ring *ring, uint32_t handle,
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
- AMDGPU_IB_POOL_DIRECT, &job);
+ AMDGPU_IB_POOL_DIRECT, &job,
+ AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
return r;
@@ -342,3 +349,259 @@ int amdgpu_jpeg_psp_update_sram(struct amdgpu_device *adev, int inst_idx,
return psp_execute_ip_fw_load(&adev->psp, &ucode);
}
+
+/*
+ * debugfs for to enable/disable jpeg job submission to specific core.
+ */
+#if defined(CONFIG_DEBUG_FS)
+static int amdgpu_debugfs_jpeg_sched_mask_set(void *data, u64 val)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)data;
+ u32 i, j;
+ u64 mask = 0;
+ struct amdgpu_ring *ring;
+
+ if (!adev)
+ return -ENODEV;
+
+ mask = (1ULL << (adev->jpeg.num_jpeg_inst * adev->jpeg.num_jpeg_rings)) - 1;
+ if ((val & mask) == 0)
+ return -EINVAL;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
+ ring = &adev->jpeg.inst[i].ring_dec[j];
+ if (val & (BIT_ULL(1) << ((i * adev->jpeg.num_jpeg_rings) + j)))
+ ring->sched.ready = true;
+ else
+ ring->sched.ready = false;
+ }
+ }
+ /* publish sched.ready flag update effective immediately across smp */
+ smp_rmb();
+ return 0;
+}
+
+static int amdgpu_debugfs_jpeg_sched_mask_get(void *data, u64 *val)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)data;
+ u32 i, j;
+ u64 mask = 0;
+ struct amdgpu_ring *ring;
+
+ if (!adev)
+ return -ENODEV;
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
+ ring = &adev->jpeg.inst[i].ring_dec[j];
+ if (ring->sched.ready)
+ mask |= 1ULL << ((i * adev->jpeg.num_jpeg_rings) + j);
+ }
+ }
+ *val = mask;
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_jpeg_sched_mask_fops,
+ amdgpu_debugfs_jpeg_sched_mask_get,
+ amdgpu_debugfs_jpeg_sched_mask_set, "%llx\n");
+
+#endif
+
+void amdgpu_debugfs_jpeg_sched_mask_init(struct amdgpu_device *adev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ struct drm_minor *minor = adev_to_drm(adev)->primary;
+ struct dentry *root = minor->debugfs_root;
+ char name[32];
+
+ if (!(adev->jpeg.num_jpeg_inst > 1) && !(adev->jpeg.num_jpeg_rings > 1))
+ return;
+ sprintf(name, "amdgpu_jpeg_sched_mask");
+ debugfs_create_file(name, 0600, root, adev,
+ &amdgpu_debugfs_jpeg_sched_mask_fops);
+#endif
+}
+
+static ssize_t amdgpu_get_jpeg_reset_mask(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+
+ if (!adev)
+ return -ENODEV;
+
+ return amdgpu_show_reset_mask(buf, adev->jpeg.supported_reset);
+}
+
+static DEVICE_ATTR(jpeg_reset_mask, 0444,
+ amdgpu_get_jpeg_reset_mask, NULL);
+
+int amdgpu_jpeg_sysfs_reset_mask_init(struct amdgpu_device *adev)
+{
+ int r = 0;
+
+ if (adev->jpeg.num_jpeg_inst) {
+ r = device_create_file(adev->dev, &dev_attr_jpeg_reset_mask);
+ if (r)
+ return r;
+ }
+
+ return r;
+}
+
+void amdgpu_jpeg_sysfs_reset_mask_fini(struct amdgpu_device *adev)
+{
+ if (adev->dev->kobj.sd) {
+ if (adev->jpeg.num_jpeg_inst)
+ device_remove_file(adev->dev, &dev_attr_jpeg_reset_mask);
+ }
+}
+
+int amdgpu_jpeg_reg_dump_init(struct amdgpu_device *adev,
+ const struct amdgpu_hwip_reg_entry *reg, u32 count)
+{
+ adev->jpeg.ip_dump = kcalloc(adev->jpeg.num_jpeg_inst * count,
+ sizeof(uint32_t), GFP_KERNEL);
+ if (!adev->jpeg.ip_dump) {
+ dev_err(adev->dev,
+ "Failed to allocate memory for JPEG IP Dump\n");
+ return -ENOMEM;
+ }
+ adev->jpeg.reg_list = reg;
+ adev->jpeg.reg_count = count;
+
+ return 0;
+}
+
+static void amdgpu_jpeg_reg_dump_fini(struct amdgpu_device *adev)
+{
+ kfree(adev->jpeg.ip_dump);
+ adev->jpeg.reg_list = NULL;
+ adev->jpeg.reg_count = 0;
+}
+
+void amdgpu_jpeg_dump_ip_state(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ u32 inst_off, inst_id, is_powered;
+ int i, j;
+
+ if (!adev->jpeg.ip_dump)
+ return;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
+ if (adev->jpeg.harvest_config & (1 << i))
+ continue;
+
+ inst_id = GET_INST(JPEG, i);
+ inst_off = i * adev->jpeg.reg_count;
+ /* check power status from UVD_JPEG_POWER_STATUS */
+ adev->jpeg.ip_dump[inst_off] =
+ RREG32(SOC15_REG_ENTRY_OFFSET_INST(adev->jpeg.reg_list[0],
+ inst_id));
+ is_powered = ((adev->jpeg.ip_dump[inst_off] & 0x1) != 1);
+
+ if (is_powered)
+ for (j = 1; j < adev->jpeg.reg_count; j++)
+ adev->jpeg.ip_dump[inst_off + j] =
+ RREG32(SOC15_REG_ENTRY_OFFSET_INST(adev->jpeg.reg_list[j],
+ inst_id));
+ }
+}
+
+void amdgpu_jpeg_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ u32 inst_off, is_powered;
+ int i, j;
+
+ if (!adev->jpeg.ip_dump)
+ return;
+
+ drm_printf(p, "num_instances:%d\n", adev->jpeg.num_jpeg_inst);
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
+ if (adev->jpeg.harvest_config & (1 << i)) {
+ drm_printf(p, "\nHarvested Instance:JPEG%d Skipping dump\n", i);
+ continue;
+ }
+
+ inst_off = i * adev->jpeg.reg_count;
+ is_powered = ((adev->jpeg.ip_dump[inst_off] & 0x1) != 1);
+
+ if (is_powered) {
+ drm_printf(p, "Active Instance:JPEG%d\n", i);
+ for (j = 0; j < adev->jpeg.reg_count; j++)
+ drm_printf(p, "%-50s \t 0x%08x\n", adev->jpeg.reg_list[j].reg_name,
+ adev->jpeg.ip_dump[inst_off + j]);
+ } else
+ drm_printf(p, "\nInactive Instance:JPEG%d\n", i);
+ }
+}
+
+static inline bool amdgpu_jpeg_reg_valid(u32 reg)
+{
+ if (reg < JPEG_REG_RANGE_START || reg > JPEG_REG_RANGE_END ||
+ (reg >= JPEG_ATOMIC_RANGE_START && reg <= JPEG_ATOMIC_RANGE_END))
+ return false;
+ else
+ return true;
+}
+
+/**
+ * amdgpu_jpeg_dec_parse_cs - command submission parser
+ *
+ * @parser: Command submission parser context
+ * @job: the job to parse
+ * @ib: the IB to parse
+ *
+ * Parse the command stream, return -EINVAL for invalid packet,
+ * 0 otherwise
+ */
+
+int amdgpu_jpeg_dec_parse_cs(struct amdgpu_cs_parser *parser,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib)
+{
+ u32 i, reg, res, cond, type;
+ struct amdgpu_device *adev = parser->adev;
+
+ for (i = 0; i < ib->length_dw ; i += 2) {
+ reg = CP_PACKETJ_GET_REG(ib->ptr[i]);
+ res = CP_PACKETJ_GET_RES(ib->ptr[i]);
+ cond = CP_PACKETJ_GET_COND(ib->ptr[i]);
+ type = CP_PACKETJ_GET_TYPE(ib->ptr[i]);
+
+ if (res) /* only support 0 at the moment */
+ return -EINVAL;
+
+ switch (type) {
+ case PACKETJ_TYPE0:
+ if (cond != PACKETJ_CONDITION_CHECK0 ||
+ !amdgpu_jpeg_reg_valid(reg)) {
+ dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
+ return -EINVAL;
+ }
+ break;
+ case PACKETJ_TYPE3:
+ if (cond != PACKETJ_CONDITION_CHECK3 ||
+ !amdgpu_jpeg_reg_valid(reg)) {
+ dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
+ return -EINVAL;
+ }
+ break;
+ case PACKETJ_TYPE6:
+ if (ib->ptr[i] == CP_PACKETJ_NOP)
+ continue;
+ dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
+ return -EINVAL;
+ default:
+ dev_err(adev->dev, "Unknown packet type %d !\n", type);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
index f9cdd873ac9b..346ae0ab09d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
@@ -25,9 +25,17 @@
#define __AMDGPU_JPEG_H__
#include "amdgpu_ras.h"
+#include "amdgpu_cs.h"
#define AMDGPU_MAX_JPEG_INSTANCES 4
-#define AMDGPU_MAX_JPEG_RINGS 8
+#define AMDGPU_MAX_JPEG_RINGS 10
+#define AMDGPU_MAX_JPEG_RINGS_4_0_3 8
+
+#define JPEG_REG_RANGE_START 0x4000
+#define JPEG_REG_RANGE_END 0x41c2
+#define JPEG_ATOMIC_RANGE_START 0x4120
+#define JPEG_ATOMIC_RANGE_END 0x412A
+
#define AMDGPU_JPEG_HARVEST_JPEG0 (1 << 0)
#define AMDGPU_JPEG_HARVEST_JPEG1 (1 << 1)
@@ -91,6 +99,14 @@
*adev->jpeg.inst[inst_idx].dpg_sram_curr_addr++ = value; \
} while (0)
+struct amdgpu_hwip_reg_entry;
+
+enum amdgpu_jpeg_caps {
+ AMDGPU_JPEG_RRMT_ENABLED,
+};
+
+#define AMDGPU_JPEG_CAPS(caps) BIT(AMDGPU_JPEG_##caps)
+
struct amdgpu_jpeg_reg{
unsigned jpeg_pitch[AMDGPU_MAX_JPEG_RINGS];
};
@@ -128,6 +144,11 @@ struct amdgpu_jpeg {
uint16_t inst_mask;
uint8_t num_inst_per_aid;
bool indirect_sram;
+ uint32_t supported_reset;
+ uint32_t caps;
+ u32 *ip_dump;
+ u32 reg_count;
+ const struct amdgpu_hwip_reg_entry *reg_list;
};
int amdgpu_jpeg_sw_init(struct amdgpu_device *adev);
@@ -149,5 +170,15 @@ int amdgpu_jpeg_ras_late_init(struct amdgpu_device *adev,
int amdgpu_jpeg_ras_sw_init(struct amdgpu_device *adev);
int amdgpu_jpeg_psp_update_sram(struct amdgpu_device *adev, int inst_idx,
enum AMDGPU_UCODE_ID ucode_id);
+void amdgpu_debugfs_jpeg_sched_mask_init(struct amdgpu_device *adev);
+int amdgpu_jpeg_sysfs_reset_mask_init(struct amdgpu_device *adev);
+void amdgpu_jpeg_sysfs_reset_mask_fini(struct amdgpu_device *adev);
+int amdgpu_jpeg_reg_dump_init(struct amdgpu_device *adev,
+ const struct amdgpu_hwip_reg_entry *reg, u32 count);
+void amdgpu_jpeg_dump_ip_state(struct amdgpu_ip_block *ip_block);
+void amdgpu_jpeg_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p);
+int amdgpu_jpeg_dec_parse_cs(struct amdgpu_cs_parser *parser,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib);
#endif /*__AMDGPU_JPEG_H__*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 016a6f6c4267..a9327472c651 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -45,6 +45,7 @@
#include "amdgpu_ras.h"
#include "amdgpu_reset.h"
#include "amd_pcie.h"
+#include "amdgpu_userq.h"
void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev)
{
@@ -90,7 +91,7 @@ void amdgpu_driver_unload_kms(struct drm_device *dev)
if (adev->rmmio == NULL)
return;
- if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DRV_UNLOAD))
+ if (amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DRV_UNLOAD))
DRM_WARN("smart shift update failed\n");
amdgpu_acpi_fini(adev);
@@ -160,7 +161,7 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
if (acpi_status)
dev_dbg(dev->dev, "Error during ACPI methods call\n");
- if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DRV_LOAD))
+ if (amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DRV_LOAD))
DRM_WARN("smart shift update failed\n");
out:
@@ -370,6 +371,26 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
return 0;
}
+static int amdgpu_userq_metadata_info_gfx(struct amdgpu_device *adev,
+ struct drm_amdgpu_info *info,
+ struct drm_amdgpu_info_uq_metadata_gfx *meta)
+{
+ int ret = -EOPNOTSUPP;
+
+ if (adev->gfx.funcs->get_gfx_shadow_info) {
+ struct amdgpu_gfx_shadow_info shadow = {};
+
+ adev->gfx.funcs->get_gfx_shadow_info(adev, &shadow, true);
+ meta->shadow_size = shadow.shadow_size;
+ meta->shadow_alignment = shadow.shadow_alignment;
+ meta->csa_size = shadow.csa_size;
+ meta->csa_alignment = shadow.csa_alignment;
+ ret = 0;
+ }
+
+ return ret;
+}
+
static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
struct drm_amdgpu_info *info,
struct drm_amdgpu_info_hw_ip *result)
@@ -378,6 +399,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
uint32_t ib_size_alignment = 0;
enum amd_ip_block_type type;
unsigned int num_rings = 0;
+ uint32_t num_slots = 0;
unsigned int i, j;
if (info->query_hw_ip.ip_instance >= AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
@@ -387,24 +409,45 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
case AMDGPU_HW_IP_GFX:
type = AMD_IP_BLOCK_TYPE_GFX;
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
- if (adev->gfx.gfx_ring[i].sched.ready)
+ if (adev->gfx.gfx_ring[i].sched.ready &&
+ !adev->gfx.gfx_ring[i].no_user_submission)
++num_rings;
+
+ if (!adev->gfx.disable_uq) {
+ for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++)
+ num_slots += hweight32(adev->mes.gfx_hqd_mask[i]);
+ }
+
ib_start_alignment = 32;
ib_size_alignment = 32;
break;
case AMDGPU_HW_IP_COMPUTE:
type = AMD_IP_BLOCK_TYPE_GFX;
for (i = 0; i < adev->gfx.num_compute_rings; i++)
- if (adev->gfx.compute_ring[i].sched.ready)
+ if (adev->gfx.compute_ring[i].sched.ready &&
+ !adev->gfx.compute_ring[i].no_user_submission)
++num_rings;
+
+ if (!adev->sdma.disable_uq) {
+ for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++)
+ num_slots += hweight32(adev->mes.compute_hqd_mask[i]);
+ }
+
ib_start_alignment = 32;
ib_size_alignment = 32;
break;
case AMDGPU_HW_IP_DMA:
type = AMD_IP_BLOCK_TYPE_SDMA;
for (i = 0; i < adev->sdma.num_instances; i++)
- if (adev->sdma.instance[i].ring.sched.ready)
+ if (adev->sdma.instance[i].ring.sched.ready &&
+ !adev->sdma.instance[i].ring.no_user_submission)
++num_rings;
+
+ if (!adev->gfx.disable_uq) {
+ for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++)
+ num_slots += hweight32(adev->mes.sdma_hqd_mask[i]);
+ }
+
ib_start_alignment = 256;
ib_size_alignment = 4;
break;
@@ -414,7 +457,8 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
if (adev->uvd.harvest_config & (1 << i))
continue;
- if (adev->uvd.inst[i].ring.sched.ready)
+ if (adev->uvd.inst[i].ring.sched.ready &&
+ !adev->uvd.inst[i].ring.no_user_submission)
++num_rings;
}
ib_start_alignment = 256;
@@ -423,7 +467,8 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
case AMDGPU_HW_IP_VCE:
type = AMD_IP_BLOCK_TYPE_VCE;
for (i = 0; i < adev->vce.num_rings; i++)
- if (adev->vce.ring[i].sched.ready)
+ if (adev->vce.ring[i].sched.ready &&
+ !adev->vce.ring[i].no_user_submission)
++num_rings;
ib_start_alignment = 256;
ib_size_alignment = 4;
@@ -435,7 +480,8 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
continue;
for (j = 0; j < adev->uvd.num_enc_rings; j++)
- if (adev->uvd.inst[i].ring_enc[j].sched.ready)
+ if (adev->uvd.inst[i].ring_enc[j].sched.ready &&
+ !adev->uvd.inst[i].ring_enc[j].no_user_submission)
++num_rings;
}
ib_start_alignment = 256;
@@ -447,7 +493,8 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
if (adev->vcn.harvest_config & (1 << i))
continue;
- if (adev->vcn.inst[i].ring_dec.sched.ready)
+ if (adev->vcn.inst[i].ring_dec.sched.ready &&
+ !adev->vcn.inst[i].ring_dec.no_user_submission)
++num_rings;
}
ib_start_alignment = 256;
@@ -459,8 +506,9 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
if (adev->vcn.harvest_config & (1 << i))
continue;
- for (j = 0; j < adev->vcn.num_enc_rings; j++)
- if (adev->vcn.inst[i].ring_enc[j].sched.ready)
+ for (j = 0; j < adev->vcn.inst[i].num_enc_rings; j++)
+ if (adev->vcn.inst[i].ring_enc[j].sched.ready &&
+ !adev->vcn.inst[i].ring_enc[j].no_user_submission)
++num_rings;
}
ib_start_alignment = 256;
@@ -475,7 +523,8 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
continue;
for (j = 0; j < adev->jpeg.num_jpeg_rings; j++)
- if (adev->jpeg.inst[i].ring_dec[j].sched.ready)
+ if (adev->jpeg.inst[i].ring_dec[j].sched.ready &&
+ !adev->jpeg.inst[i].ring_dec[j].no_user_submission)
++num_rings;
}
ib_start_alignment = 256;
@@ -483,7 +532,8 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
break;
case AMDGPU_HW_IP_VPE:
type = AMD_IP_BLOCK_TYPE_VPE;
- if (adev->vpe.ring.sched.ready)
+ if (adev->vpe.ring.sched.ready &&
+ !adev->vpe.ring.no_user_submission)
++num_rings;
ib_start_alignment = 256;
ib_size_alignment = 4;
@@ -539,6 +589,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
}
result->capabilities_flags = 0;
result->available_rings = (1 << num_rings) - 1;
+ result->userq_num_slots = num_slots;
result->ib_start_alignment = ib_start_alignment;
result->ib_size_alignment = ib_size_alignment;
return 0;
@@ -846,7 +897,7 @@ out:
case AMDGPU_INFO_DEV_INFO: {
struct drm_amdgpu_info_device *dev_info;
uint64_t vm_size;
- uint32_t pcie_gen_mask;
+ uint32_t pcie_gen_mask, pcie_width_mask;
dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
if (!dev_info)
@@ -888,6 +939,19 @@ out:
if (adev->gfx.config.ta_cntl2_truncate_coord_mode)
dev_info->ids_flags |= AMDGPU_IDS_FLAGS_CONFORMANT_TRUNC_COORD;
+ /* Gang submit is not supported under SRIOV currently */
+ if (!amdgpu_sriov_vf(adev))
+ dev_info->ids_flags |= AMDGPU_IDS_FLAGS_GANG_SUBMIT;
+
+ if (amdgpu_passthrough(adev))
+ dev_info->ids_flags |= (AMDGPU_IDS_FLAGS_MODE_PT <<
+ AMDGPU_IDS_FLAGS_MODE_SHIFT) &
+ AMDGPU_IDS_FLAGS_MODE_MASK;
+ else if (amdgpu_sriov_vf(adev))
+ dev_info->ids_flags |= (AMDGPU_IDS_FLAGS_MODE_VF <<
+ AMDGPU_IDS_FLAGS_MODE_SHIFT) &
+ AMDGPU_IDS_FLAGS_MODE_MASK;
+
vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
vm_size -= AMDGPU_VA_RESERVED_TOP;
@@ -934,15 +998,18 @@ out:
dev_info->tcc_disabled_mask = adev->gfx.config.tcc_disabled_mask;
/* Combine the chip gen mask with the platform (CPU/mobo) mask. */
- pcie_gen_mask = adev->pm.pcie_gen_mask & (adev->pm.pcie_gen_mask >> 16);
+ pcie_gen_mask = adev->pm.pcie_gen_mask &
+ (adev->pm.pcie_gen_mask >> CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT);
+ pcie_width_mask = adev->pm.pcie_mlw_mask &
+ (adev->pm.pcie_mlw_mask >> CAIL_PCIE_LINK_WIDTH_SUPPORT_SHIFT);
dev_info->pcie_gen = fls(pcie_gen_mask);
dev_info->pcie_num_lanes =
- adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 ? 32 :
- adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 ? 16 :
- adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 ? 12 :
- adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 ? 8 :
- adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 ? 4 :
- adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 ? 2 : 1;
+ pcie_width_mask & CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X32 ? 32 :
+ pcie_width_mask & CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X16 ? 16 :
+ pcie_width_mask & CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X12 ? 12 :
+ pcie_width_mask & CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 ? 8 :
+ pcie_width_mask & CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 ? 4 :
+ pcie_width_mask & CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 ? 2 : 1;
dev_info->tcp_cache_size = adev->gfx.config.gc_tcp_l1_size;
dev_info->num_sqc_per_wgp = adev->gfx.config.gc_num_sqc_per_wgp;
@@ -966,6 +1033,8 @@ out:
}
}
+ dev_info->userq_ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
+
ret = copy_to_user(out, dev_info,
min((size_t)size, sizeof(*dev_info))) ? -EFAULT : 0;
kfree(dev_info);
@@ -1281,6 +1350,22 @@ out:
return copy_to_user(out, &gpuvm_fault,
min((size_t)size, sizeof(gpuvm_fault))) ? -EFAULT : 0;
}
+ case AMDGPU_INFO_UQ_FW_AREAS: {
+ struct drm_amdgpu_info_uq_metadata meta_info = {};
+
+ switch (info->query_hw_ip.type) {
+ case AMDGPU_HW_IP_GFX:
+ ret = amdgpu_userq_metadata_info_gfx(adev, info, &meta_info.gfx);
+ if (ret)
+ return ret;
+
+ ret = copy_to_user(out, &meta_info,
+ min((size_t)size, sizeof(meta_info))) ? -EFAULT : 0;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+ }
default:
DRM_DEBUG_KMS("Invalid request %d\n", info->query);
return -EINVAL;
@@ -1334,13 +1419,11 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
if (r)
goto error_pasid;
- r = amdgpu_vm_init(adev, &fpriv->vm, fpriv->xcp_id);
- if (r)
- goto error_pasid;
+ amdgpu_debugfs_vm_init(file_priv);
- r = amdgpu_vm_set_pasid(adev, &fpriv->vm, pasid);
+ r = amdgpu_vm_init(adev, &fpriv->vm, fpriv->xcp_id, pasid);
if (r)
- goto error_vm;
+ goto error_pasid;
fpriv->prt_va = amdgpu_vm_bo_add(adev, &fpriv->vm, NULL);
if (!fpriv->prt_va) {
@@ -1364,6 +1447,14 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
mutex_init(&fpriv->bo_list_lock);
idr_init_base(&fpriv->bo_list_handles, 1);
+ r = amdgpu_userq_mgr_init(&fpriv->userq_mgr, file_priv, adev);
+ if (r)
+ DRM_WARN("Can't setup usermode queues, use legacy workload submission only\n");
+
+ r = amdgpu_eviction_fence_init(&fpriv->evf_mgr);
+ if (r)
+ goto error_vm;
+
amdgpu_ctx_mgr_init(&fpriv->ctx_mgr, adev);
file_priv->driver_priv = fpriv;
@@ -1373,10 +1464,8 @@ error_vm:
amdgpu_vm_fini(adev, &fpriv->vm);
error_pasid:
- if (pasid) {
+ if (pasid)
amdgpu_pasid_free(pasid);
- amdgpu_vm_set_pasid(adev, &fpriv->vm, 0);
- }
kfree(fpriv);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
index 18ee60378727..3ca03b5e0f91 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
@@ -348,6 +348,24 @@ static bool amdgpu_mca_bank_should_update(struct amdgpu_device *adev, enum amdgp
return ret;
}
+static bool amdgpu_mca_bank_should_dump(struct amdgpu_device *adev, enum amdgpu_mca_error_type type,
+ struct mca_bank_entry *entry)
+{
+ bool ret;
+
+ switch (type) {
+ case AMDGPU_MCA_ERROR_TYPE_CE:
+ ret = amdgpu_mca_is_deferred_error(adev, entry->regs[MCA_REG_IDX_STATUS]);
+ break;
+ case AMDGPU_MCA_ERROR_TYPE_UE:
+ default:
+ ret = true;
+ break;
+ }
+
+ return ret;
+}
+
static int amdgpu_mca_smu_get_mca_set(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, struct mca_bank_set *mca_set,
struct ras_query_context *qctx)
{
@@ -373,7 +391,8 @@ static int amdgpu_mca_smu_get_mca_set(struct amdgpu_device *adev, enum amdgpu_mc
amdgpu_mca_bank_set_add_entry(mca_set, &entry);
- amdgpu_mca_smu_mca_bank_dump(adev, i, &entry, qctx);
+ if (amdgpu_mca_bank_should_dump(adev, type, &entry))
+ amdgpu_mca_smu_mca_bank_dump(adev, i, &entry, qctx);
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
index 7d4b540340e0..5bf9be073cdd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
@@ -39,42 +39,6 @@ int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev)
PAGE_SIZE);
}
-static int amdgpu_mes_kernel_doorbell_get(struct amdgpu_device *adev,
- int ip_type, uint64_t *doorbell_index)
-{
- unsigned int offset, found;
- struct amdgpu_mes *mes = &adev->mes;
-
- if (ip_type == AMDGPU_RING_TYPE_SDMA)
- offset = adev->doorbell_index.sdma_engine[0];
- else
- offset = 0;
-
- found = find_next_zero_bit(mes->doorbell_bitmap, mes->num_mes_dbs, offset);
- if (found >= mes->num_mes_dbs) {
- DRM_WARN("No doorbell available\n");
- return -ENOSPC;
- }
-
- set_bit(found, mes->doorbell_bitmap);
-
- /* Get the absolute doorbell index on BAR */
- *doorbell_index = mes->db_start_dw_offset + found * 2;
- return 0;
-}
-
-static void amdgpu_mes_kernel_doorbell_free(struct amdgpu_device *adev,
- uint32_t doorbell_index)
-{
- unsigned int old, rel_index;
- struct amdgpu_mes *mes = &adev->mes;
-
- /* Find the relative index of the doorbell in this object */
- rel_index = (doorbell_index - mes->db_start_dw_offset) / 2;
- old = test_and_clear_bit(rel_index, mes->doorbell_bitmap);
- WARN_ON(!old);
-}
-
static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev)
{
int i;
@@ -83,7 +47,7 @@ static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev)
/* Bitmap for dynamic allocation of kernel doorbells */
mes->doorbell_bitmap = bitmap_zalloc(PAGE_SIZE / sizeof(u32), GFP_KERNEL);
if (!mes->doorbell_bitmap) {
- DRM_ERROR("Failed to allocate MES doorbell bitmap\n");
+ dev_err(adev->dev, "Failed to allocate MES doorbell bitmap\n");
return -ENOMEM;
}
@@ -104,7 +68,7 @@ static int amdgpu_mes_event_log_init(struct amdgpu_device *adev)
return 0;
r = amdgpu_bo_create_kernel(adev, adev->mes.event_log_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_GTT,
+ AMDGPU_GEM_DOMAIN_VRAM,
&adev->mes.event_log_gpu_obj,
&adev->mes.event_log_gpu_addr,
&adev->mes.event_log_cpu_addr);
@@ -126,7 +90,7 @@ static void amdgpu_mes_doorbell_free(struct amdgpu_device *adev)
int amdgpu_mes_init(struct amdgpu_device *adev)
{
- int i, r;
+ int i, r, num_pipes;
adev->mes.adev = adev;
@@ -142,27 +106,54 @@ int amdgpu_mes_init(struct amdgpu_device *adev)
adev->mes.total_max_queue = AMDGPU_FENCE_MES_QUEUE_ID_MASK;
adev->mes.vmid_mask_mmhub = 0xffffff00;
- adev->mes.vmid_mask_gfxhub = 0xffffff00;
+ adev->mes.vmid_mask_gfxhub = adev->gfx.disable_kq ? 0xfffffffe : 0xffffff00;
+
+ num_pipes = adev->gfx.me.num_pipe_per_me * adev->gfx.me.num_me;
+ if (num_pipes > AMDGPU_MES_MAX_GFX_PIPES)
+ dev_warn(adev->dev, "more gfx pipes than supported by MES! (%d vs %d)\n",
+ num_pipes, AMDGPU_MES_MAX_GFX_PIPES);
+
+ for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++) {
+ if (i >= num_pipes)
+ break;
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) >=
+ IP_VERSION(12, 0, 0))
+ /*
+ * GFX V12 has only one GFX pipe, but 8 queues in it.
+ * GFX pipe 0 queue 0 is being used by Kernel queue.
+ * Set GFX pipe 0 queue 1-7 for MES scheduling
+ * mask = 1111 1110b
+ */
+ adev->mes.gfx_hqd_mask[i] = adev->gfx.disable_kq ? 0xFF : 0xFE;
+ else
+ /*
+ * GFX pipe 0 queue 0 is being used by Kernel queue.
+ * Set GFX pipe 0 queue 1 for MES scheduling
+ * mask = 10b
+ */
+ adev->mes.gfx_hqd_mask[i] = adev->gfx.disable_kq ? 0x3 : 0x2;
+ }
+
+ num_pipes = adev->gfx.mec.num_pipe_per_mec * adev->gfx.mec.num_mec;
+ if (num_pipes > AMDGPU_MES_MAX_COMPUTE_PIPES)
+ dev_warn(adev->dev, "more compute pipes than supported by MES! (%d vs %d)\n",
+ num_pipes, AMDGPU_MES_MAX_COMPUTE_PIPES);
for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++) {
- /* use only 1st MEC pipes */
- if (i >= adev->gfx.mec.num_pipe_per_mec)
- continue;
- adev->mes.compute_hqd_mask[i] = 0xc;
+ if (i >= num_pipes)
+ break;
+ adev->mes.compute_hqd_mask[i] = adev->gfx.disable_kq ? 0xF : 0xC;
}
- for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++)
- adev->mes.gfx_hqd_mask[i] = i ? 0 : 0xfffffffe;
+ num_pipes = adev->sdma.num_instances;
+ if (num_pipes > AMDGPU_MES_MAX_SDMA_PIPES)
+ dev_warn(adev->dev, "more SDMA pipes than supported by MES! (%d vs %d)\n",
+ num_pipes, AMDGPU_MES_MAX_SDMA_PIPES);
for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) {
- if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) <
- IP_VERSION(6, 0, 0))
- adev->mes.sdma_hqd_mask[i] = i ? 0 : 0x3fc;
- /* zero sdma_hqd_mask for non-existent engine */
- else if (adev->sdma.num_instances == 1)
- adev->mes.sdma_hqd_mask[i] = i ? 0 : 0xfc;
- else
- adev->mes.sdma_hqd_mask[i] = 0xfc;
+ if (i >= num_pipes)
+ break;
+ adev->mes.sdma_hqd_mask[i] = 0xfc;
}
for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++) {
@@ -192,17 +183,6 @@ int amdgpu_mes_init(struct amdgpu_device *adev)
(uint64_t *)&adev->wb.wb[adev->mes.query_status_fence_offs[i]];
}
- r = amdgpu_device_wb_get(adev, &adev->mes.read_val_offs);
- if (r) {
- dev_err(adev->dev,
- "(%d) read_val_offs alloc failed\n", r);
- goto error;
- }
- adev->mes.read_val_gpu_addr =
- adev->wb.gpu_addr + (adev->mes.read_val_offs * 4);
- adev->mes.read_val_ptr =
- (uint32_t *)&adev->wb.wb[adev->mes.read_val_offs];
-
r = amdgpu_mes_doorbell_init(adev);
if (r)
goto error;
@@ -211,6 +191,20 @@ int amdgpu_mes_init(struct amdgpu_device *adev)
if (r)
goto error_doorbell;
+ if (adev->mes.hung_queue_db_array_size) {
+ r = amdgpu_bo_create_kernel(adev,
+ adev->mes.hung_queue_db_array_size * sizeof(u32),
+ PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->mes.hung_queue_db_array_gpu_obj,
+ &adev->mes.hung_queue_db_array_gpu_addr,
+ &adev->mes.hung_queue_db_array_cpu_addr);
+ if (r) {
+ dev_warn(adev->dev, "failed to create MES hung db array buffer (%d)", r);
+ goto error_doorbell;
+ }
+ }
+
return 0;
error_doorbell:
@@ -223,8 +217,6 @@ error:
amdgpu_device_wb_free(adev,
adev->mes.query_status_fence_offs[i]);
}
- if (adev->mes.read_val_ptr)
- amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
idr_destroy(&adev->mes.pasid_idr);
idr_destroy(&adev->mes.gang_id_idr);
@@ -238,6 +230,10 @@ void amdgpu_mes_fini(struct amdgpu_device *adev)
{
int i;
+ amdgpu_bo_free_kernel(&adev->mes.hung_queue_db_array_gpu_obj,
+ &adev->mes.hung_queue_db_array_gpu_addr,
+ &adev->mes.hung_queue_db_array_cpu_addr);
+
amdgpu_bo_free_kernel(&adev->mes.event_log_gpu_obj,
&adev->mes.event_log_gpu_addr,
&adev->mes.event_log_cpu_addr);
@@ -249,8 +245,6 @@ void amdgpu_mes_fini(struct amdgpu_device *adev)
amdgpu_device_wb_free(adev,
adev->mes.query_status_fence_offs[i]);
}
- if (adev->mes.read_val_ptr)
- amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
amdgpu_mes_doorbell_free(adev);
@@ -261,244 +255,6 @@ void amdgpu_mes_fini(struct amdgpu_device *adev)
mutex_destroy(&adev->mes.mutex_hidden);
}
-static void amdgpu_mes_queue_free_mqd(struct amdgpu_mes_queue *q)
-{
- amdgpu_bo_free_kernel(&q->mqd_obj,
- &q->mqd_gpu_addr,
- &q->mqd_cpu_ptr);
-}
-
-int amdgpu_mes_create_process(struct amdgpu_device *adev, int pasid,
- struct amdgpu_vm *vm)
-{
- struct amdgpu_mes_process *process;
- int r;
-
- /* allocate the mes process buffer */
- process = kzalloc(sizeof(struct amdgpu_mes_process), GFP_KERNEL);
- if (!process) {
- DRM_ERROR("no more memory to create mes process\n");
- return -ENOMEM;
- }
-
- /* allocate the process context bo and map it */
- r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_PROC_CTX_SIZE, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_GTT,
- &process->proc_ctx_bo,
- &process->proc_ctx_gpu_addr,
- &process->proc_ctx_cpu_ptr);
- if (r) {
- DRM_ERROR("failed to allocate process context bo\n");
- goto clean_up_memory;
- }
- memset(process->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
-
- /*
- * Avoid taking any other locks under MES lock to avoid circular
- * lock dependencies.
- */
- amdgpu_mes_lock(&adev->mes);
-
- /* add the mes process to idr list */
- r = idr_alloc(&adev->mes.pasid_idr, process, pasid, pasid + 1,
- GFP_KERNEL);
- if (r < 0) {
- DRM_ERROR("failed to lock pasid=%d\n", pasid);
- goto clean_up_ctx;
- }
-
- INIT_LIST_HEAD(&process->gang_list);
- process->vm = vm;
- process->pasid = pasid;
- process->process_quantum = adev->mes.default_process_quantum;
- process->pd_gpu_addr = amdgpu_bo_gpu_offset(vm->root.bo);
-
- amdgpu_mes_unlock(&adev->mes);
- return 0;
-
-clean_up_ctx:
- amdgpu_mes_unlock(&adev->mes);
- amdgpu_bo_free_kernel(&process->proc_ctx_bo,
- &process->proc_ctx_gpu_addr,
- &process->proc_ctx_cpu_ptr);
-clean_up_memory:
- kfree(process);
- return r;
-}
-
-void amdgpu_mes_destroy_process(struct amdgpu_device *adev, int pasid)
-{
- struct amdgpu_mes_process *process;
- struct amdgpu_mes_gang *gang, *tmp1;
- struct amdgpu_mes_queue *queue, *tmp2;
- struct mes_remove_queue_input queue_input;
- unsigned long flags;
- int r;
-
- /*
- * Avoid taking any other locks under MES lock to avoid circular
- * lock dependencies.
- */
- amdgpu_mes_lock(&adev->mes);
-
- process = idr_find(&adev->mes.pasid_idr, pasid);
- if (!process) {
- DRM_WARN("pasid %d doesn't exist\n", pasid);
- amdgpu_mes_unlock(&adev->mes);
- return;
- }
-
- /* Remove all queues from hardware */
- list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
- list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
- spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
- idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
- spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
-
- queue_input.doorbell_offset = queue->doorbell_off;
- queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
-
- r = adev->mes.funcs->remove_hw_queue(&adev->mes,
- &queue_input);
- if (r)
- DRM_WARN("failed to remove hardware queue\n");
- }
-
- idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
- }
-
- idr_remove(&adev->mes.pasid_idr, pasid);
- amdgpu_mes_unlock(&adev->mes);
-
- /* free all memory allocated by the process */
- list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
- /* free all queues in the gang */
- list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
- amdgpu_mes_queue_free_mqd(queue);
- list_del(&queue->list);
- kfree(queue);
- }
- amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
- &gang->gang_ctx_gpu_addr,
- &gang->gang_ctx_cpu_ptr);
- list_del(&gang->list);
- kfree(gang);
-
- }
- amdgpu_bo_free_kernel(&process->proc_ctx_bo,
- &process->proc_ctx_gpu_addr,
- &process->proc_ctx_cpu_ptr);
- kfree(process);
-}
-
-int amdgpu_mes_add_gang(struct amdgpu_device *adev, int pasid,
- struct amdgpu_mes_gang_properties *gprops,
- int *gang_id)
-{
- struct amdgpu_mes_process *process;
- struct amdgpu_mes_gang *gang;
- int r;
-
- /* allocate the mes gang buffer */
- gang = kzalloc(sizeof(struct amdgpu_mes_gang), GFP_KERNEL);
- if (!gang) {
- return -ENOMEM;
- }
-
- /* allocate the gang context bo and map it to cpu space */
- r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_GANG_CTX_SIZE, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_GTT,
- &gang->gang_ctx_bo,
- &gang->gang_ctx_gpu_addr,
- &gang->gang_ctx_cpu_ptr);
- if (r) {
- DRM_ERROR("failed to allocate process context bo\n");
- goto clean_up_mem;
- }
- memset(gang->gang_ctx_cpu_ptr, 0, AMDGPU_MES_GANG_CTX_SIZE);
-
- /*
- * Avoid taking any other locks under MES lock to avoid circular
- * lock dependencies.
- */
- amdgpu_mes_lock(&adev->mes);
-
- process = idr_find(&adev->mes.pasid_idr, pasid);
- if (!process) {
- DRM_ERROR("pasid %d doesn't exist\n", pasid);
- r = -EINVAL;
- goto clean_up_ctx;
- }
-
- /* add the mes gang to idr list */
- r = idr_alloc(&adev->mes.gang_id_idr, gang, 1, 0,
- GFP_KERNEL);
- if (r < 0) {
- DRM_ERROR("failed to allocate idr for gang\n");
- goto clean_up_ctx;
- }
-
- gang->gang_id = r;
- *gang_id = r;
-
- INIT_LIST_HEAD(&gang->queue_list);
- gang->process = process;
- gang->priority = gprops->priority;
- gang->gang_quantum = gprops->gang_quantum ?
- gprops->gang_quantum : adev->mes.default_gang_quantum;
- gang->global_priority_level = gprops->global_priority_level;
- gang->inprocess_gang_priority = gprops->inprocess_gang_priority;
- list_add_tail(&gang->list, &process->gang_list);
-
- amdgpu_mes_unlock(&adev->mes);
- return 0;
-
-clean_up_ctx:
- amdgpu_mes_unlock(&adev->mes);
- amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
- &gang->gang_ctx_gpu_addr,
- &gang->gang_ctx_cpu_ptr);
-clean_up_mem:
- kfree(gang);
- return r;
-}
-
-int amdgpu_mes_remove_gang(struct amdgpu_device *adev, int gang_id)
-{
- struct amdgpu_mes_gang *gang;
-
- /*
- * Avoid taking any other locks under MES lock to avoid circular
- * lock dependencies.
- */
- amdgpu_mes_lock(&adev->mes);
-
- gang = idr_find(&adev->mes.gang_id_idr, gang_id);
- if (!gang) {
- DRM_ERROR("gang id %d doesn't exist\n", gang_id);
- amdgpu_mes_unlock(&adev->mes);
- return -EINVAL;
- }
-
- if (!list_empty(&gang->queue_list)) {
- DRM_ERROR("queue list is not empty\n");
- amdgpu_mes_unlock(&adev->mes);
- return -EBUSY;
- }
-
- idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
- list_del(&gang->list);
- amdgpu_mes_unlock(&adev->mes);
-
- amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
- &gang->gang_ctx_gpu_addr,
- &gang->gang_ctx_cpu_ptr);
-
- kfree(gang);
-
- return 0;
-}
-
int amdgpu_mes_suspend(struct amdgpu_device *adev)
{
struct mes_suspend_gang_input input;
@@ -518,7 +274,7 @@ int amdgpu_mes_suspend(struct amdgpu_device *adev)
r = adev->mes.funcs->suspend_gang(&adev->mes, &input);
amdgpu_mes_unlock(&adev->mes);
if (r)
- DRM_ERROR("failed to suspend all gangs");
+ dev_err(adev->dev, "failed to suspend all gangs");
return r;
}
@@ -542,306 +298,8 @@ int amdgpu_mes_resume(struct amdgpu_device *adev)
r = adev->mes.funcs->resume_gang(&adev->mes, &input);
amdgpu_mes_unlock(&adev->mes);
if (r)
- DRM_ERROR("failed to resume all gangs");
-
- return r;
-}
-
-static int amdgpu_mes_queue_alloc_mqd(struct amdgpu_device *adev,
- struct amdgpu_mes_queue *q,
- struct amdgpu_mes_queue_properties *p)
-{
- struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
- u32 mqd_size = mqd_mgr->mqd_size;
- int r;
-
- r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_GTT,
- &q->mqd_obj,
- &q->mqd_gpu_addr, &q->mqd_cpu_ptr);
- if (r) {
- dev_warn(adev->dev, "failed to create queue mqd bo (%d)", r);
- return r;
- }
- memset(q->mqd_cpu_ptr, 0, mqd_size);
-
- r = amdgpu_bo_reserve(q->mqd_obj, false);
- if (unlikely(r != 0))
- goto clean_up;
-
- return 0;
-
-clean_up:
- amdgpu_bo_free_kernel(&q->mqd_obj,
- &q->mqd_gpu_addr,
- &q->mqd_cpu_ptr);
- return r;
-}
-
-static void amdgpu_mes_queue_init_mqd(struct amdgpu_device *adev,
- struct amdgpu_mes_queue *q,
- struct amdgpu_mes_queue_properties *p)
-{
- struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
- struct amdgpu_mqd_prop mqd_prop = {0};
-
- mqd_prop.mqd_gpu_addr = q->mqd_gpu_addr;
- mqd_prop.hqd_base_gpu_addr = p->hqd_base_gpu_addr;
- mqd_prop.rptr_gpu_addr = p->rptr_gpu_addr;
- mqd_prop.wptr_gpu_addr = p->wptr_gpu_addr;
- mqd_prop.queue_size = p->queue_size;
- mqd_prop.use_doorbell = true;
- mqd_prop.doorbell_index = p->doorbell_off;
- mqd_prop.eop_gpu_addr = p->eop_gpu_addr;
- mqd_prop.hqd_pipe_priority = p->hqd_pipe_priority;
- mqd_prop.hqd_queue_priority = p->hqd_queue_priority;
- mqd_prop.hqd_active = false;
-
- if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
- p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
- mutex_lock(&adev->srbm_mutex);
- amdgpu_gfx_select_me_pipe_q(adev, p->ring->me, p->ring->pipe, 0, 0, 0);
- }
-
- mqd_mgr->init_mqd(adev, q->mqd_cpu_ptr, &mqd_prop);
-
- if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
- p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
- amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0, 0);
- mutex_unlock(&adev->srbm_mutex);
- }
-
- amdgpu_bo_unreserve(q->mqd_obj);
-}
-
-int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id,
- struct amdgpu_mes_queue_properties *qprops,
- int *queue_id)
-{
- struct amdgpu_mes_queue *queue;
- struct amdgpu_mes_gang *gang;
- struct mes_add_queue_input queue_input;
- unsigned long flags;
- int r;
-
- memset(&queue_input, 0, sizeof(struct mes_add_queue_input));
-
- /* allocate the mes queue buffer */
- queue = kzalloc(sizeof(struct amdgpu_mes_queue), GFP_KERNEL);
- if (!queue) {
- DRM_ERROR("Failed to allocate memory for queue\n");
- return -ENOMEM;
- }
-
- /* Allocate the queue mqd */
- r = amdgpu_mes_queue_alloc_mqd(adev, queue, qprops);
- if (r)
- goto clean_up_memory;
-
- /*
- * Avoid taking any other locks under MES lock to avoid circular
- * lock dependencies.
- */
- amdgpu_mes_lock(&adev->mes);
-
- gang = idr_find(&adev->mes.gang_id_idr, gang_id);
- if (!gang) {
- DRM_ERROR("gang id %d doesn't exist\n", gang_id);
- r = -EINVAL;
- goto clean_up_mqd;
- }
-
- /* add the mes gang to idr list */
- spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
- r = idr_alloc(&adev->mes.queue_id_idr, queue, 1, 0,
- GFP_ATOMIC);
- if (r < 0) {
- spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
- goto clean_up_mqd;
- }
- spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
- *queue_id = queue->queue_id = r;
-
- /* allocate a doorbell index for the queue */
- r = amdgpu_mes_kernel_doorbell_get(adev,
- qprops->queue_type,
- &qprops->doorbell_off);
- if (r)
- goto clean_up_queue_id;
-
- /* initialize the queue mqd */
- amdgpu_mes_queue_init_mqd(adev, queue, qprops);
-
- /* add hw queue to mes */
- queue_input.process_id = gang->process->pasid;
-
- queue_input.page_table_base_addr =
- adev->vm_manager.vram_base_offset + gang->process->pd_gpu_addr -
- adev->gmc.vram_start;
-
- queue_input.process_va_start = 0;
- queue_input.process_va_end =
- (adev->vm_manager.max_pfn - 1) << AMDGPU_GPU_PAGE_SHIFT;
- queue_input.process_quantum = gang->process->process_quantum;
- queue_input.process_context_addr = gang->process->proc_ctx_gpu_addr;
- queue_input.gang_quantum = gang->gang_quantum;
- queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
- queue_input.inprocess_gang_priority = gang->inprocess_gang_priority;
- queue_input.gang_global_priority_level = gang->global_priority_level;
- queue_input.doorbell_offset = qprops->doorbell_off;
- queue_input.mqd_addr = queue->mqd_gpu_addr;
- queue_input.wptr_addr = qprops->wptr_gpu_addr;
- queue_input.wptr_mc_addr = qprops->wptr_mc_addr;
- queue_input.queue_type = qprops->queue_type;
- queue_input.paging = qprops->paging;
- queue_input.is_kfd_process = 0;
-
- r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input);
- if (r) {
- DRM_ERROR("failed to add hardware queue to MES, doorbell=0x%llx\n",
- qprops->doorbell_off);
- goto clean_up_doorbell;
- }
-
- DRM_DEBUG("MES hw queue was added, pasid=%d, gang id=%d, "
- "queue type=%d, doorbell=0x%llx\n",
- gang->process->pasid, gang_id, qprops->queue_type,
- qprops->doorbell_off);
-
- queue->ring = qprops->ring;
- queue->doorbell_off = qprops->doorbell_off;
- queue->wptr_gpu_addr = qprops->wptr_gpu_addr;
- queue->queue_type = qprops->queue_type;
- queue->paging = qprops->paging;
- queue->gang = gang;
- queue->ring->mqd_ptr = queue->mqd_cpu_ptr;
- list_add_tail(&queue->list, &gang->queue_list);
-
- amdgpu_mes_unlock(&adev->mes);
- return 0;
-
-clean_up_doorbell:
- amdgpu_mes_kernel_doorbell_free(adev, qprops->doorbell_off);
-clean_up_queue_id:
- spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
- idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
- spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
-clean_up_mqd:
- amdgpu_mes_unlock(&adev->mes);
- amdgpu_mes_queue_free_mqd(queue);
-clean_up_memory:
- kfree(queue);
- return r;
-}
-
-int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id)
-{
- unsigned long flags;
- struct amdgpu_mes_queue *queue;
- struct amdgpu_mes_gang *gang;
- struct mes_remove_queue_input queue_input;
- int r;
-
- /*
- * Avoid taking any other locks under MES lock to avoid circular
- * lock dependencies.
- */
- amdgpu_mes_lock(&adev->mes);
-
- /* remove the mes gang from idr list */
- spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
-
- queue = idr_find(&adev->mes.queue_id_idr, queue_id);
- if (!queue) {
- spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
- amdgpu_mes_unlock(&adev->mes);
- DRM_ERROR("queue id %d doesn't exist\n", queue_id);
- return -EINVAL;
- }
-
- idr_remove(&adev->mes.queue_id_idr, queue_id);
- spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
-
- DRM_DEBUG("try to remove queue, doorbell off = 0x%llx\n",
- queue->doorbell_off);
-
- gang = queue->gang;
- queue_input.doorbell_offset = queue->doorbell_off;
- queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
-
- r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input);
- if (r)
- DRM_ERROR("failed to remove hardware queue, queue id = %d\n",
- queue_id);
-
- list_del(&queue->list);
- amdgpu_mes_kernel_doorbell_free(adev, queue->doorbell_off);
- amdgpu_mes_unlock(&adev->mes);
-
- amdgpu_mes_queue_free_mqd(queue);
- kfree(queue);
- return 0;
-}
-
-int amdgpu_mes_reset_hw_queue(struct amdgpu_device *adev, int queue_id)
-{
- unsigned long flags;
- struct amdgpu_mes_queue *queue;
- struct amdgpu_mes_gang *gang;
- struct mes_reset_queue_input queue_input;
- int r;
-
- /*
- * Avoid taking any other locks under MES lock to avoid circular
- * lock dependencies.
- */
- amdgpu_mes_lock(&adev->mes);
-
- /* remove the mes gang from idr list */
- spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
-
- queue = idr_find(&adev->mes.queue_id_idr, queue_id);
- if (!queue) {
- spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
- amdgpu_mes_unlock(&adev->mes);
- DRM_ERROR("queue id %d doesn't exist\n", queue_id);
- return -EINVAL;
- }
- spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
-
- DRM_DEBUG("try to reset queue, doorbell off = 0x%llx\n",
- queue->doorbell_off);
-
- gang = queue->gang;
- queue_input.doorbell_offset = queue->doorbell_off;
- queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
-
- r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input);
- if (r)
- DRM_ERROR("failed to reset hardware queue, queue id = %d\n",
- queue_id);
-
- amdgpu_mes_unlock(&adev->mes);
-
- return 0;
-}
-
-int amdgpu_mes_reset_hw_queue_mmio(struct amdgpu_device *adev, int queue_type,
- int me_id, int pipe_id, int queue_id, int vmid)
-{
- struct mes_reset_queue_input queue_input;
- int r;
+ dev_err(adev->dev, "failed to resume all gangs");
- queue_input.queue_type = queue_type;
- queue_input.use_mmio = true;
- queue_input.me_id = me_id;
- queue_input.pipe_id = pipe_id;
- queue_input.queue_id = queue_id;
- queue_input.vmid = vmid;
- r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input);
- if (r)
- DRM_ERROR("failed to reset hardware queue by mmio, queue id = %d\n",
- queue_id);
return r;
}
@@ -860,9 +318,11 @@ int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev,
queue_input.mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
queue_input.wptr_addr = ring->wptr_gpu_addr;
+ amdgpu_mes_lock(&adev->mes);
r = adev->mes.funcs->map_legacy_queue(&adev->mes, &queue_input);
+ amdgpu_mes_unlock(&adev->mes);
if (r)
- DRM_ERROR("failed to map legacy queue\n");
+ dev_err(adev->dev, "failed to map legacy queue\n");
return r;
}
@@ -883,9 +343,11 @@ int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
queue_input.trail_fence_addr = gpu_addr;
queue_input.trail_fence_data = seq;
+ amdgpu_mes_lock(&adev->mes);
r = adev->mes.funcs->unmap_legacy_queue(&adev->mes, &queue_input);
+ amdgpu_mes_unlock(&adev->mes);
if (r)
- DRM_ERROR("failed to unmap legacy queue\n");
+ dev_err(adev->dev, "failed to unmap legacy queue\n");
return r;
}
@@ -895,7 +357,7 @@ int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev,
unsigned int vmid,
bool use_mmio)
{
- struct mes_reset_legacy_queue_input queue_input;
+ struct mes_reset_queue_input queue_input;
int r;
memset(&queue_input, 0, sizeof(queue_input));
@@ -905,14 +367,66 @@ int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev,
queue_input.me_id = ring->me;
queue_input.pipe_id = ring->pipe;
queue_input.queue_id = ring->queue;
- queue_input.mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
+ queue_input.mqd_addr = ring->mqd_obj ? amdgpu_bo_gpu_offset(ring->mqd_obj) : 0;
queue_input.wptr_addr = ring->wptr_gpu_addr;
queue_input.vmid = vmid;
queue_input.use_mmio = use_mmio;
+ queue_input.is_kq = true;
+ if (ring->funcs->type == AMDGPU_RING_TYPE_GFX)
+ queue_input.legacy_gfx = true;
- r = adev->mes.funcs->reset_legacy_queue(&adev->mes, &queue_input);
+ amdgpu_mes_lock(&adev->mes);
+ r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input);
+ amdgpu_mes_unlock(&adev->mes);
if (r)
- DRM_ERROR("failed to reset legacy queue\n");
+ dev_err(adev->dev, "failed to reset legacy queue\n");
+
+ return r;
+}
+
+int amdgpu_mes_get_hung_queue_db_array_size(struct amdgpu_device *adev)
+{
+ return adev->mes.hung_queue_db_array_size;
+}
+
+int amdgpu_mes_detect_and_reset_hung_queues(struct amdgpu_device *adev,
+ int queue_type,
+ bool detect_only,
+ unsigned int *hung_db_num,
+ u32 *hung_db_array)
+
+{
+ struct mes_detect_and_reset_queue_input input;
+ u32 *db_array = adev->mes.hung_queue_db_array_cpu_addr;
+ int r, i;
+
+ if (!hung_db_num || !hung_db_array)
+ return -EINVAL;
+
+ if ((queue_type != AMDGPU_RING_TYPE_GFX) &&
+ (queue_type != AMDGPU_RING_TYPE_COMPUTE) &&
+ (queue_type != AMDGPU_RING_TYPE_SDMA))
+ return -EINVAL;
+
+ /* Clear the doorbell array before detection */
+ memset(adev->mes.hung_queue_db_array_cpu_addr, 0,
+ adev->mes.hung_queue_db_array_size * sizeof(u32));
+ input.queue_type = queue_type;
+ input.detect_only = detect_only;
+
+ r = adev->mes.funcs->detect_and_reset_hung_queues(&adev->mes,
+ &input);
+ if (r) {
+ dev_err(adev->dev, "failed to detect and reset\n");
+ } else {
+ *hung_db_num = 0;
+ for (i = 0; i < adev->mes.hung_queue_db_array_size; i++) {
+ if (db_array[i] != AMDGPU_MES_INVALID_DB_OFFSET) {
+ hung_db_array[i] = db_array[i];
+ *hung_db_num += 1;
+ }
+ }
+ }
return r;
}
@@ -921,23 +435,36 @@ uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg)
{
struct mes_misc_op_input op_input;
int r, val = 0;
+ uint32_t addr_offset = 0;
+ uint64_t read_val_gpu_addr;
+ uint32_t *read_val_ptr;
+ if (amdgpu_device_wb_get(adev, &addr_offset)) {
+ dev_err(adev->dev, "critical bug! too many mes readers\n");
+ goto error;
+ }
+ read_val_gpu_addr = adev->wb.gpu_addr + (addr_offset * 4);
+ read_val_ptr = (uint32_t *)&adev->wb.wb[addr_offset];
op_input.op = MES_MISC_OP_READ_REG;
op_input.read_reg.reg_offset = reg;
- op_input.read_reg.buffer_addr = adev->mes.read_val_gpu_addr;
+ op_input.read_reg.buffer_addr = read_val_gpu_addr;
if (!adev->mes.funcs->misc_op) {
- DRM_ERROR("mes rreg is not supported!\n");
+ dev_err(adev->dev, "mes rreg is not supported!\n");
goto error;
}
+ amdgpu_mes_lock(&adev->mes);
r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
+ amdgpu_mes_unlock(&adev->mes);
if (r)
- DRM_ERROR("failed to read reg (0x%x)\n", reg);
+ dev_err(adev->dev, "failed to read reg (0x%x)\n", reg);
else
- val = *(adev->mes.read_val_ptr);
+ val = *(read_val_ptr);
error:
+ if (addr_offset)
+ amdgpu_device_wb_free(adev, addr_offset);
return val;
}
@@ -952,14 +479,16 @@ int amdgpu_mes_wreg(struct amdgpu_device *adev,
op_input.write_reg.reg_value = val;
if (!adev->mes.funcs->misc_op) {
- DRM_ERROR("mes wreg is not supported!\n");
+ dev_err(adev->dev, "mes wreg is not supported!\n");
r = -EINVAL;
goto error;
}
+ amdgpu_mes_lock(&adev->mes);
r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
+ amdgpu_mes_unlock(&adev->mes);
if (r)
- DRM_ERROR("failed to write reg (0x%x)\n", reg);
+ dev_err(adev->dev, "failed to write reg (0x%x)\n", reg);
error:
return r;
@@ -979,39 +508,16 @@ int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev,
op_input.wrm_reg.mask = mask;
if (!adev->mes.funcs->misc_op) {
- DRM_ERROR("mes reg_write_reg_wait is not supported!\n");
- r = -EINVAL;
- goto error;
- }
-
- r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
- if (r)
- DRM_ERROR("failed to reg_write_reg_wait\n");
-
-error:
- return r;
-}
-
-int amdgpu_mes_reg_wait(struct amdgpu_device *adev, uint32_t reg,
- uint32_t val, uint32_t mask)
-{
- struct mes_misc_op_input op_input;
- int r;
-
- op_input.op = MES_MISC_OP_WRM_REG_WAIT;
- op_input.wrm_reg.reg0 = reg;
- op_input.wrm_reg.ref = val;
- op_input.wrm_reg.mask = mask;
-
- if (!adev->mes.funcs->misc_op) {
- DRM_ERROR("mes reg wait is not supported!\n");
+ dev_err(adev->dev, "mes reg_write_reg_wait is not supported!\n");
r = -EINVAL;
goto error;
}
+ amdgpu_mes_lock(&adev->mes);
r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
+ amdgpu_mes_unlock(&adev->mes);
if (r)
- DRM_ERROR("failed to reg_write_reg_wait\n");
+ dev_err(adev->dev, "failed to reg_write_reg_wait\n");
error:
return r;
@@ -1028,7 +534,8 @@ int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
int r;
if (!adev->mes.funcs->misc_op) {
- DRM_ERROR("mes set shader debugger is not supported!\n");
+ dev_err(adev->dev,
+ "mes set shader debugger is not supported!\n");
return -EINVAL;
}
@@ -1052,7 +559,7 @@ int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
if (r)
- DRM_ERROR("failed to set_shader_debugger\n");
+ dev_err(adev->dev, "failed to set_shader_debugger\n");
amdgpu_mes_unlock(&adev->mes);
@@ -1066,7 +573,8 @@ int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev,
int r;
if (!adev->mes.funcs->misc_op) {
- DRM_ERROR("mes flush shader debugger is not supported!\n");
+ dev_err(adev->dev,
+ "mes flush shader debugger is not supported!\n");
return -EINVAL;
}
@@ -1078,515 +586,19 @@ int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev,
r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
if (r)
- DRM_ERROR("failed to set_shader_debugger\n");
-
- amdgpu_mes_unlock(&adev->mes);
-
- return r;
-}
-
-static void
-amdgpu_mes_ring_to_queue_props(struct amdgpu_device *adev,
- struct amdgpu_ring *ring,
- struct amdgpu_mes_queue_properties *props)
-{
- props->queue_type = ring->funcs->type;
- props->hqd_base_gpu_addr = ring->gpu_addr;
- props->rptr_gpu_addr = ring->rptr_gpu_addr;
- props->wptr_gpu_addr = ring->wptr_gpu_addr;
- props->wptr_mc_addr =
- ring->mes_ctx->meta_data_mc_addr + ring->wptr_offs;
- props->queue_size = ring->ring_size;
- props->eop_gpu_addr = ring->eop_gpu_addr;
- props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL;
- props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM;
- props->paging = false;
- props->ring = ring;
-}
-
-#define DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(_eng) \
-do { \
- if (id_offs < AMDGPU_MES_CTX_MAX_OFFS) \
- return offsetof(struct amdgpu_mes_ctx_meta_data, \
- _eng[ring->idx].slots[id_offs]); \
- else if (id_offs == AMDGPU_MES_CTX_RING_OFFS) \
- return offsetof(struct amdgpu_mes_ctx_meta_data, \
- _eng[ring->idx].ring); \
- else if (id_offs == AMDGPU_MES_CTX_IB_OFFS) \
- return offsetof(struct amdgpu_mes_ctx_meta_data, \
- _eng[ring->idx].ib); \
- else if (id_offs == AMDGPU_MES_CTX_PADDING_OFFS) \
- return offsetof(struct amdgpu_mes_ctx_meta_data, \
- _eng[ring->idx].padding); \
-} while(0)
-
-int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs)
-{
- switch (ring->funcs->type) {
- case AMDGPU_RING_TYPE_GFX:
- DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(gfx);
- break;
- case AMDGPU_RING_TYPE_COMPUTE:
- DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(compute);
- break;
- case AMDGPU_RING_TYPE_SDMA:
- DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(sdma);
- break;
- default:
- break;
- }
-
- WARN_ON(1);
- return -EINVAL;
-}
-
-int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
- int queue_type, int idx,
- struct amdgpu_mes_ctx_data *ctx_data,
- struct amdgpu_ring **out)
-{
- struct amdgpu_ring *ring;
- struct amdgpu_mes_gang *gang;
- struct amdgpu_mes_queue_properties qprops = {0};
- int r, queue_id, pasid;
-
- /*
- * Avoid taking any other locks under MES lock to avoid circular
- * lock dependencies.
- */
- amdgpu_mes_lock(&adev->mes);
- gang = idr_find(&adev->mes.gang_id_idr, gang_id);
- if (!gang) {
- DRM_ERROR("gang id %d doesn't exist\n", gang_id);
- amdgpu_mes_unlock(&adev->mes);
- return -EINVAL;
- }
- pasid = gang->process->pasid;
-
- ring = kzalloc(sizeof(struct amdgpu_ring), GFP_KERNEL);
- if (!ring) {
- amdgpu_mes_unlock(&adev->mes);
- return -ENOMEM;
- }
-
- ring->ring_obj = NULL;
- ring->use_doorbell = true;
- ring->is_mes_queue = true;
- ring->mes_ctx = ctx_data;
- ring->idx = idx;
- ring->no_scheduler = true;
-
- if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
- int offset = offsetof(struct amdgpu_mes_ctx_meta_data,
- compute[ring->idx].mec_hpd);
- ring->eop_gpu_addr =
- amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- }
-
- switch (queue_type) {
- case AMDGPU_RING_TYPE_GFX:
- ring->funcs = adev->gfx.gfx_ring[0].funcs;
- ring->me = adev->gfx.gfx_ring[0].me;
- ring->pipe = adev->gfx.gfx_ring[0].pipe;
- break;
- case AMDGPU_RING_TYPE_COMPUTE:
- ring->funcs = adev->gfx.compute_ring[0].funcs;
- ring->me = adev->gfx.compute_ring[0].me;
- ring->pipe = adev->gfx.compute_ring[0].pipe;
- break;
- case AMDGPU_RING_TYPE_SDMA:
- ring->funcs = adev->sdma.instance[0].ring.funcs;
- break;
- default:
- BUG();
- }
-
- r = amdgpu_ring_init(adev, ring, 1024, NULL, 0,
- AMDGPU_RING_PRIO_DEFAULT, NULL);
- if (r) {
- amdgpu_mes_unlock(&adev->mes);
- goto clean_up_memory;
- }
+ dev_err(adev->dev, "failed to set_shader_debugger\n");
- amdgpu_mes_ring_to_queue_props(adev, ring, &qprops);
-
- dma_fence_wait(gang->process->vm->last_update, false);
- dma_fence_wait(ctx_data->meta_data_va->last_pt_update, false);
amdgpu_mes_unlock(&adev->mes);
- r = amdgpu_mes_add_hw_queue(adev, gang_id, &qprops, &queue_id);
- if (r)
- goto clean_up_ring;
-
- ring->hw_queue_id = queue_id;
- ring->doorbell_index = qprops.doorbell_off;
-
- if (queue_type == AMDGPU_RING_TYPE_GFX)
- sprintf(ring->name, "gfx_%d.%d.%d", pasid, gang_id, queue_id);
- else if (queue_type == AMDGPU_RING_TYPE_COMPUTE)
- sprintf(ring->name, "compute_%d.%d.%d", pasid, gang_id,
- queue_id);
- else if (queue_type == AMDGPU_RING_TYPE_SDMA)
- sprintf(ring->name, "sdma_%d.%d.%d", pasid, gang_id,
- queue_id);
- else
- BUG();
-
- *out = ring;
- return 0;
-
-clean_up_ring:
- amdgpu_ring_fini(ring);
-clean_up_memory:
- kfree(ring);
return r;
}
-void amdgpu_mes_remove_ring(struct amdgpu_device *adev,
- struct amdgpu_ring *ring)
-{
- if (!ring)
- return;
-
- amdgpu_mes_remove_hw_queue(adev, ring->hw_queue_id);
- del_timer_sync(&ring->fence_drv.fallback_timer);
- amdgpu_ring_fini(ring);
- kfree(ring);
-}
-
uint32_t amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device *adev,
enum amdgpu_mes_priority_level prio)
{
return adev->mes.aggregated_doorbells[prio];
}
-int amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device *adev,
- struct amdgpu_mes_ctx_data *ctx_data)
-{
- int r;
-
- r = amdgpu_bo_create_kernel(adev,
- sizeof(struct amdgpu_mes_ctx_meta_data),
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
- &ctx_data->meta_data_obj,
- &ctx_data->meta_data_mc_addr,
- &ctx_data->meta_data_ptr);
- if (r) {
- dev_warn(adev->dev, "(%d) create CTX bo failed\n", r);
- return r;
- }
-
- if (!ctx_data->meta_data_obj)
- return -ENOMEM;
-
- memset(ctx_data->meta_data_ptr, 0,
- sizeof(struct amdgpu_mes_ctx_meta_data));
-
- return 0;
-}
-
-void amdgpu_mes_ctx_free_meta_data(struct amdgpu_mes_ctx_data *ctx_data)
-{
- if (ctx_data->meta_data_obj)
- amdgpu_bo_free_kernel(&ctx_data->meta_data_obj,
- &ctx_data->meta_data_mc_addr,
- &ctx_data->meta_data_ptr);
-}
-
-int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- struct amdgpu_mes_ctx_data *ctx_data)
-{
- struct amdgpu_bo_va *bo_va;
- struct amdgpu_sync sync;
- struct drm_exec exec;
- int r;
-
- amdgpu_sync_create(&sync);
-
- drm_exec_init(&exec, 0, 0);
- drm_exec_until_all_locked(&exec) {
- r = drm_exec_lock_obj(&exec,
- &ctx_data->meta_data_obj->tbo.base);
- drm_exec_retry_on_contention(&exec);
- if (unlikely(r))
- goto error_fini_exec;
-
- r = amdgpu_vm_lock_pd(vm, &exec, 0);
- drm_exec_retry_on_contention(&exec);
- if (unlikely(r))
- goto error_fini_exec;
- }
-
- bo_va = amdgpu_vm_bo_add(adev, vm, ctx_data->meta_data_obj);
- if (!bo_va) {
- DRM_ERROR("failed to create bo_va for meta data BO\n");
- r = -ENOMEM;
- goto error_fini_exec;
- }
-
- r = amdgpu_vm_bo_map(adev, bo_va, ctx_data->meta_data_gpu_addr, 0,
- sizeof(struct amdgpu_mes_ctx_meta_data),
- AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
- AMDGPU_PTE_EXECUTABLE);
-
- if (r) {
- DRM_ERROR("failed to do bo_map on meta data, err=%d\n", r);
- goto error_del_bo_va;
- }
-
- r = amdgpu_vm_bo_update(adev, bo_va, false);
- if (r) {
- DRM_ERROR("failed to do vm_bo_update on meta data\n");
- goto error_del_bo_va;
- }
- amdgpu_sync_fence(&sync, bo_va->last_pt_update);
-
- r = amdgpu_vm_update_pdes(adev, vm, false);
- if (r) {
- DRM_ERROR("failed to update pdes on meta data\n");
- goto error_del_bo_va;
- }
- amdgpu_sync_fence(&sync, vm->last_update);
-
- amdgpu_sync_wait(&sync, false);
- drm_exec_fini(&exec);
-
- amdgpu_sync_free(&sync);
- ctx_data->meta_data_va = bo_va;
- return 0;
-
-error_del_bo_va:
- amdgpu_vm_bo_del(adev, bo_va);
-
-error_fini_exec:
- drm_exec_fini(&exec);
- amdgpu_sync_free(&sync);
- return r;
-}
-
-int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev,
- struct amdgpu_mes_ctx_data *ctx_data)
-{
- struct amdgpu_bo_va *bo_va = ctx_data->meta_data_va;
- struct amdgpu_bo *bo = ctx_data->meta_data_obj;
- struct amdgpu_vm *vm = bo_va->base.vm;
- struct dma_fence *fence;
- struct drm_exec exec;
- long r;
-
- drm_exec_init(&exec, 0, 0);
- drm_exec_until_all_locked(&exec) {
- r = drm_exec_lock_obj(&exec,
- &ctx_data->meta_data_obj->tbo.base);
- drm_exec_retry_on_contention(&exec);
- if (unlikely(r))
- goto out_unlock;
-
- r = amdgpu_vm_lock_pd(vm, &exec, 0);
- drm_exec_retry_on_contention(&exec);
- if (unlikely(r))
- goto out_unlock;
- }
-
- amdgpu_vm_bo_del(adev, bo_va);
- if (!amdgpu_vm_ready(vm))
- goto out_unlock;
-
- r = dma_resv_get_singleton(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP,
- &fence);
- if (r)
- goto out_unlock;
- if (fence) {
- amdgpu_bo_fence(bo, fence, true);
- fence = NULL;
- }
-
- r = amdgpu_vm_clear_freed(adev, vm, &fence);
- if (r || !fence)
- goto out_unlock;
-
- dma_fence_wait(fence, false);
- amdgpu_bo_fence(bo, fence, true);
- dma_fence_put(fence);
-
-out_unlock:
- if (unlikely(r < 0))
- dev_err(adev->dev, "failed to clear page tables (%ld)\n", r);
- drm_exec_fini(&exec);
-
- return r;
-}
-
-static int amdgpu_mes_test_create_gang_and_queues(struct amdgpu_device *adev,
- int pasid, int *gang_id,
- int queue_type, int num_queue,
- struct amdgpu_ring **added_rings,
- struct amdgpu_mes_ctx_data *ctx_data)
-{
- struct amdgpu_ring *ring;
- struct amdgpu_mes_gang_properties gprops = {0};
- int r, j;
-
- /* create a gang for the process */
- gprops.priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
- gprops.gang_quantum = adev->mes.default_gang_quantum;
- gprops.inprocess_gang_priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
- gprops.priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
- gprops.global_priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
-
- r = amdgpu_mes_add_gang(adev, pasid, &gprops, gang_id);
- if (r) {
- DRM_ERROR("failed to add gang\n");
- return r;
- }
-
- /* create queues for the gang */
- for (j = 0; j < num_queue; j++) {
- r = amdgpu_mes_add_ring(adev, *gang_id, queue_type, j,
- ctx_data, &ring);
- if (r) {
- DRM_ERROR("failed to add ring\n");
- break;
- }
-
- DRM_INFO("ring %s was added\n", ring->name);
- added_rings[j] = ring;
- }
-
- return 0;
-}
-
-static int amdgpu_mes_test_queues(struct amdgpu_ring **added_rings)
-{
- struct amdgpu_ring *ring;
- int i, r;
-
- for (i = 0; i < AMDGPU_MES_CTX_MAX_RINGS; i++) {
- ring = added_rings[i];
- if (!ring)
- continue;
-
- r = amdgpu_ring_test_helper(ring);
- if (r)
- return r;
-
- r = amdgpu_ring_test_ib(ring, 1000 * 10);
- if (r) {
- DRM_DEV_ERROR(ring->adev->dev,
- "ring %s ib test failed (%d)\n",
- ring->name, r);
- return r;
- } else
- DRM_INFO("ring %s ib test pass\n", ring->name);
- }
-
- return 0;
-}
-
-int amdgpu_mes_self_test(struct amdgpu_device *adev)
-{
- struct amdgpu_vm *vm = NULL;
- struct amdgpu_mes_ctx_data ctx_data = {0};
- struct amdgpu_ring *added_rings[AMDGPU_MES_CTX_MAX_RINGS] = { NULL };
- int gang_ids[3] = {0};
- int queue_types[][2] = { { AMDGPU_RING_TYPE_GFX, 1 },
- { AMDGPU_RING_TYPE_COMPUTE, 1 },
- { AMDGPU_RING_TYPE_SDMA, 1} };
- int i, r, pasid, k = 0;
-
- pasid = amdgpu_pasid_alloc(16);
- if (pasid < 0) {
- dev_warn(adev->dev, "No more PASIDs available!");
- pasid = 0;
- }
-
- vm = kzalloc(sizeof(*vm), GFP_KERNEL);
- if (!vm) {
- r = -ENOMEM;
- goto error_pasid;
- }
-
- r = amdgpu_vm_init(adev, vm, -1);
- if (r) {
- DRM_ERROR("failed to initialize vm\n");
- goto error_pasid;
- }
-
- r = amdgpu_mes_ctx_alloc_meta_data(adev, &ctx_data);
- if (r) {
- DRM_ERROR("failed to alloc ctx meta data\n");
- goto error_fini;
- }
-
- ctx_data.meta_data_gpu_addr = AMDGPU_VA_RESERVED_BOTTOM;
- r = amdgpu_mes_ctx_map_meta_data(adev, vm, &ctx_data);
- if (r) {
- DRM_ERROR("failed to map ctx meta data\n");
- goto error_vm;
- }
-
- r = amdgpu_mes_create_process(adev, pasid, vm);
- if (r) {
- DRM_ERROR("failed to create MES process\n");
- goto error_vm;
- }
-
- for (i = 0; i < ARRAY_SIZE(queue_types); i++) {
- /* On GFX v10.3, fw hasn't supported to map sdma queue. */
- if (amdgpu_ip_version(adev, GC_HWIP, 0) >=
- IP_VERSION(10, 3, 0) &&
- amdgpu_ip_version(adev, GC_HWIP, 0) <
- IP_VERSION(11, 0, 0) &&
- queue_types[i][0] == AMDGPU_RING_TYPE_SDMA)
- continue;
-
- r = amdgpu_mes_test_create_gang_and_queues(adev, pasid,
- &gang_ids[i],
- queue_types[i][0],
- queue_types[i][1],
- &added_rings[k],
- &ctx_data);
- if (r)
- goto error_queues;
-
- k += queue_types[i][1];
- }
-
- /* start ring test and ib test for MES queues */
- amdgpu_mes_test_queues(added_rings);
-
-error_queues:
- /* remove all queues */
- for (i = 0; i < ARRAY_SIZE(added_rings); i++) {
- if (!added_rings[i])
- continue;
- amdgpu_mes_remove_ring(adev, added_rings[i]);
- }
-
- for (i = 0; i < ARRAY_SIZE(gang_ids); i++) {
- if (!gang_ids[i])
- continue;
- amdgpu_mes_remove_gang(adev, gang_ids[i]);
- }
-
- amdgpu_mes_destroy_process(adev, pasid);
-
-error_vm:
- amdgpu_mes_ctx_unmap_meta_data(adev, &ctx_data);
-
-error_fini:
- amdgpu_vm_fini(adev, vm);
-
-error_pasid:
- if (pasid)
- amdgpu_pasid_free(pasid);
-
- amdgpu_mes_ctx_free_meta_data(&ctx_data);
- kfree(vm);
- return 0;
-}
-
int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe)
{
const struct mes_firmware_header_v1_0 *mes_hdr;
@@ -1594,6 +606,7 @@ int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe)
char ucode_prefix[30];
char fw_name[50];
bool need_retry = false;
+ u32 *ucode_ptr;
int r;
amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix,
@@ -1613,10 +626,12 @@ int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe)
pipe == AMDGPU_MES_SCHED_PIPE ? "" : "1");
}
- r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], "%s", fw_name);
+ r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], AMDGPU_UCODE_REQUIRED,
+ "%s", fw_name);
if (r && need_retry && pipe == AMDGPU_MES_SCHED_PIPE) {
dev_info(adev->dev, "try to fall back to %s_mes.bin\n", ucode_prefix);
r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe],
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_mes.bin", ucode_prefix);
}
@@ -1631,6 +646,10 @@ int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe)
adev->mes.data_start_addr[pipe] =
le32_to_cpu(mes_hdr->mes_data_start_addr_lo) |
((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32);
+ ucode_ptr = (u32 *)(adev->mes.fw[pipe]->data +
+ sizeof(union amdgpu_firmware_header));
+ adev->mes.fw_version[pipe] =
+ le32_to_cpu(ucode_ptr[24]) & AMDGPU_MES_VERSION_MASK;
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
int ucode, ucode_data;
@@ -1677,6 +696,49 @@ bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev)
return is_supported;
}
+/* Fix me -- node_id is used to identify the correct MES instances in the future */
+static int amdgpu_mes_set_enforce_isolation(struct amdgpu_device *adev,
+ uint32_t node_id, bool enable)
+{
+ struct mes_misc_op_input op_input = {0};
+ int r;
+
+ op_input.op = MES_MISC_OP_CHANGE_CONFIG;
+ op_input.change_config.option.limit_single_process = enable ? 1 : 0;
+
+ if (!adev->mes.funcs->misc_op) {
+ dev_err(adev->dev, "mes change config is not supported!\n");
+ r = -EINVAL;
+ goto error;
+ }
+
+ amdgpu_mes_lock(&adev->mes);
+ r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
+ amdgpu_mes_unlock(&adev->mes);
+ if (r)
+ dev_err(adev->dev, "failed to change_config.\n");
+
+error:
+ return r;
+}
+
+int amdgpu_mes_update_enforce_isolation(struct amdgpu_device *adev)
+{
+ int i, r = 0;
+
+ if (adev->enable_mes && adev->gfx.enable_cleaner_shader) {
+ mutex_lock(&adev->enforce_isolation_mutex);
+ for (i = 0; i < (adev->xcp_mgr ? adev->xcp_mgr->num_xcps : 1); i++) {
+ if (adev->enforce_isolation[i] == AMDGPU_ENFORCE_ISOLATION_ENABLE)
+ r |= amdgpu_mes_set_enforce_isolation(adev, i, true);
+ else
+ r |= amdgpu_mes_set_enforce_isolation(adev, i, false);
+ }
+ mutex_unlock(&adev->enforce_isolation_mutex);
+ }
+ return r;
+}
+
#if defined(CONFIG_DEBUG_FS)
static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
index 96788c0f42f1..6b506fc72f58 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
@@ -40,6 +40,8 @@
#define AMDGPU_MES_VERSION_MASK 0x00000fff
#define AMDGPU_MES_API_VERSION_MASK 0x00fff000
#define AMDGPU_MES_FEAT_VERSION_MASK 0xff000000
+#define AMDGPU_MES_MSCRATCH_SIZE 0x40000
+#define AMDGPU_MES_INVALID_DB_OFFSET 0xffffffff
enum amdgpu_mes_priority_level {
AMDGPU_MES_PRIORITY_LEVEL_LOW = 0,
@@ -55,7 +57,7 @@ enum amdgpu_mes_priority_level {
struct amdgpu_mes_funcs;
-enum admgpu_mes_pipe {
+enum amdgpu_mes_pipe {
AMDGPU_MES_SCHED_PIPE = 0,
AMDGPU_MES_KIQ_PIPE,
AMDGPU_MAX_MES_PIPES = 2,
@@ -75,6 +77,7 @@ struct amdgpu_mes {
uint32_t sched_version;
uint32_t kiq_version;
+ uint32_t fw_version[AMDGPU_MAX_MES_PIPES];
bool enable_legacy_queue_map;
uint32_t total_max_queue;
@@ -109,8 +112,8 @@ struct amdgpu_mes {
uint32_t vmid_mask_gfxhub;
uint32_t vmid_mask_mmhub;
- uint32_t compute_hqd_mask[AMDGPU_MES_MAX_COMPUTE_PIPES];
uint32_t gfx_hqd_mask[AMDGPU_MES_MAX_GFX_PIPES];
+ uint32_t compute_hqd_mask[AMDGPU_MES_MAX_COMPUTE_PIPES];
uint32_t sdma_hqd_mask[AMDGPU_MES_MAX_SDMA_PIPES];
uint32_t aggregated_doorbells[AMDGPU_MES_PRIORITY_NUM_LEVELS];
uint32_t sch_ctx_offs[AMDGPU_MAX_MES_PIPES];
@@ -119,9 +122,6 @@ struct amdgpu_mes {
uint32_t query_status_fence_offs[AMDGPU_MAX_MES_PIPES];
uint64_t query_status_fence_gpu_addr[AMDGPU_MAX_MES_PIPES];
uint64_t *query_status_fence_ptr[AMDGPU_MAX_MES_PIPES];
- uint32_t read_val_offs;
- uint64_t read_val_gpu_addr;
- uint32_t *read_val_ptr;
uint32_t saved_flags;
@@ -144,23 +144,14 @@ struct amdgpu_mes {
const struct amdgpu_mes_funcs *funcs;
/* mes resource_1 bo*/
- struct amdgpu_bo *resource_1;
- uint64_t resource_1_gpu_addr;
- void *resource_1_addr;
-
-};
-
-struct amdgpu_mes_process {
- int pasid;
- struct amdgpu_vm *vm;
- uint64_t pd_gpu_addr;
- struct amdgpu_bo *proc_ctx_bo;
- uint64_t proc_ctx_gpu_addr;
- void *proc_ctx_cpu_ptr;
- uint64_t process_quantum;
- struct list_head gang_list;
- uint32_t doorbell_index;
- struct mutex doorbell_lock;
+ struct amdgpu_bo *resource_1[AMDGPU_MAX_MES_PIPES];
+ uint64_t resource_1_gpu_addr[AMDGPU_MAX_MES_PIPES];
+ void *resource_1_addr[AMDGPU_MAX_MES_PIPES];
+
+ int hung_queue_db_array_size;
+ struct amdgpu_bo *hung_queue_db_array_gpu_obj;
+ uint64_t hung_queue_db_array_gpu_addr;
+ void *hung_queue_db_array_cpu_addr;
};
struct amdgpu_mes_gang {
@@ -249,18 +240,6 @@ struct mes_remove_queue_input {
uint64_t gang_context_addr;
};
-struct mes_reset_queue_input {
- uint32_t doorbell_offset;
- uint64_t gang_context_addr;
- bool use_mmio;
- uint32_t queue_type;
- uint32_t me_id;
- uint32_t pipe_id;
- uint32_t queue_id;
- uint32_t xcc_id;
- uint32_t vmid;
-};
-
struct mes_map_legacy_queue_input {
uint32_t queue_type;
uint32_t doorbell_offset;
@@ -292,7 +271,7 @@ struct mes_resume_gang_input {
uint64_t gang_context_addr;
};
-struct mes_reset_legacy_queue_input {
+struct mes_reset_queue_input {
uint32_t queue_type;
uint32_t doorbell_offset;
bool use_mmio;
@@ -302,6 +281,20 @@ struct mes_reset_legacy_queue_input {
uint64_t mqd_addr;
uint64_t wptr_addr;
uint32_t vmid;
+ bool legacy_gfx;
+ bool is_kq;
+};
+
+struct mes_detect_and_reset_queue_input {
+ uint32_t queue_type;
+ bool detect_only;
+};
+
+struct mes_inv_tlbs_pasid_input {
+ uint32_t xcc_id;
+ uint16_t pasid;
+ uint8_t hub_id;
+ uint8_t flush_type;
};
enum mes_misc_opcode {
@@ -310,6 +303,7 @@ enum mes_misc_opcode {
MES_MISC_OP_WRM_REG_WAIT,
MES_MISC_OP_WRM_REG_WR_WAIT,
MES_MISC_OP_SET_SHADER_DEBUGGER,
+ MES_MISC_OP_CHANGE_CONFIG,
};
struct mes_misc_op_input {
@@ -348,6 +342,21 @@ struct mes_misc_op_input {
uint32_t tcp_watch_cntl[4];
uint32_t trap_en;
} set_shader_debugger;
+
+ struct {
+ union {
+ struct {
+ uint32_t limit_single_process : 1;
+ uint32_t enable_hws_logging_buffer : 1;
+ uint32_t reserved : 30;
+ };
+ uint32_t all;
+ } option;
+ struct {
+ uint32_t tdr_level;
+ uint32_t tdr_delay;
+ } tdr_config;
+ } change_config;
};
};
@@ -373,42 +382,27 @@ struct amdgpu_mes_funcs {
int (*misc_op)(struct amdgpu_mes *mes,
struct mes_misc_op_input *input);
- int (*reset_legacy_queue)(struct amdgpu_mes *mes,
- struct mes_reset_legacy_queue_input *input);
-
int (*reset_hw_queue)(struct amdgpu_mes *mes,
struct mes_reset_queue_input *input);
+
+ int (*detect_and_reset_hung_queues)(struct amdgpu_mes *mes,
+ struct mes_detect_and_reset_queue_input *input);
+
+
+ int (*invalidate_tlbs_pasid)(struct amdgpu_mes *mes,
+ struct mes_inv_tlbs_pasid_input *input);
};
#define amdgpu_mes_kiq_hw_init(adev) (adev)->mes.kiq_hw_init((adev))
#define amdgpu_mes_kiq_hw_fini(adev) (adev)->mes.kiq_hw_fini((adev))
-int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs);
-
int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe);
int amdgpu_mes_init(struct amdgpu_device *adev);
void amdgpu_mes_fini(struct amdgpu_device *adev);
-int amdgpu_mes_create_process(struct amdgpu_device *adev, int pasid,
- struct amdgpu_vm *vm);
-void amdgpu_mes_destroy_process(struct amdgpu_device *adev, int pasid);
-
-int amdgpu_mes_add_gang(struct amdgpu_device *adev, int pasid,
- struct amdgpu_mes_gang_properties *gprops,
- int *gang_id);
-int amdgpu_mes_remove_gang(struct amdgpu_device *adev, int gang_id);
-
int amdgpu_mes_suspend(struct amdgpu_device *adev);
int amdgpu_mes_resume(struct amdgpu_device *adev);
-int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id,
- struct amdgpu_mes_queue_properties *qprops,
- int *queue_id);
-int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id);
-int amdgpu_mes_reset_hw_queue(struct amdgpu_device *adev, int queue_id);
-int amdgpu_mes_reset_hw_queue_mmio(struct amdgpu_device *adev, int queue_type,
- int me_id, int pipe_id, int queue_id, int vmid);
-
int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev,
struct amdgpu_ring *ring);
int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
@@ -420,11 +414,16 @@ int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev,
unsigned int vmid,
bool use_mmio);
+int amdgpu_mes_get_hung_queue_db_array_size(struct amdgpu_device *adev);
+int amdgpu_mes_detect_and_reset_hung_queues(struct amdgpu_device *adev,
+ int queue_type,
+ bool detect_only,
+ unsigned int *hung_db_num,
+ u32 *hung_db_array);
+
uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg);
int amdgpu_mes_wreg(struct amdgpu_device *adev,
uint32_t reg, uint32_t val);
-int amdgpu_mes_reg_wait(struct amdgpu_device *adev, uint32_t reg,
- uint32_t val, uint32_t mask);
int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev,
uint32_t reg0, uint32_t reg1,
uint32_t ref, uint32_t mask);
@@ -436,27 +435,10 @@ int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
bool trap_en);
int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev,
uint64_t process_context_addr);
-int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
- int queue_type, int idx,
- struct amdgpu_mes_ctx_data *ctx_data,
- struct amdgpu_ring **out);
-void amdgpu_mes_remove_ring(struct amdgpu_device *adev,
- struct amdgpu_ring *ring);
uint32_t amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device *adev,
enum amdgpu_mes_priority_level prio);
-int amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device *adev,
- struct amdgpu_mes_ctx_data *ctx_data);
-void amdgpu_mes_ctx_free_meta_data(struct amdgpu_mes_ctx_data *ctx_data);
-int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- struct amdgpu_mes_ctx_data *ctx_data);
-int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev,
- struct amdgpu_mes_ctx_data *ctx_data);
-
-int amdgpu_mes_self_test(struct amdgpu_device *adev);
-
int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev);
/*
@@ -518,4 +500,7 @@ static inline void amdgpu_mes_unlock(struct amdgpu_mes *mes)
}
bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev);
+
+int amdgpu_mes_update_enforce_isolation(struct amdgpu_device *adev);
+
#endif /* __AMDGPU_MES_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 5e3faefc5510..20460cfd09bc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -496,8 +496,6 @@ struct amdgpu_crtc {
struct drm_connector *connector;
/* for dpm */
u32 line_time;
- u32 wm_low;
- u32 wm_high;
u32 lb_vblank_lead_lines;
struct drm_display_mode hw_mode;
/* for virtual dce */
@@ -609,6 +607,7 @@ struct amdgpu_i2c_adapter {
struct i2c_adapter base;
struct ddc_service *ddc_service;
+ bool oem;
};
#define TO_DM_AUX(x) container_of((x), struct amdgpu_dm_dp_aux, aux)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
index d085687a47ea..a974265837f0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
@@ -53,6 +53,16 @@ u64 amdgpu_nbio_get_pcie_replay_count(struct amdgpu_device *adev)
return 0;
}
+bool amdgpu_nbio_is_replay_cnt_supported(struct amdgpu_device *adev)
+{
+ if (amdgpu_sriov_vf(adev) || !adev->asic_funcs ||
+ !adev->asic_funcs->get_pcie_replay_count ||
+ (!adev->nbio.funcs || !adev->nbio.funcs->get_pcie_replay_count))
+ return false;
+
+ return true;
+}
+
int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
{
int r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
index f61d117b0caf..b528de6a01f6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
@@ -101,6 +101,7 @@ struct amdgpu_nbio_funcs {
int (*get_compute_partition_mode)(struct amdgpu_device *adev);
u32 (*get_memory_partition_mode)(struct amdgpu_device *adev,
u32 *supp_modes);
+ bool (*is_nps_switch_requested)(struct amdgpu_device *adev);
u64 (*get_pcie_replay_count)(struct amdgpu_device *adev);
void (*set_reg_remap)(struct amdgpu_device *adev);
};
@@ -118,4 +119,6 @@ int amdgpu_nbio_ras_sw_init(struct amdgpu_device *adev);
int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block);
u64 amdgpu_nbio_get_pcie_replay_count(struct amdgpu_device *adev);
+bool amdgpu_nbio_is_replay_cnt_supported(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 44819cdba7fb..e08f58de4b17 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -32,6 +32,7 @@
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/dma-buf.h>
+#include <linux/export.h>
#include <drm/drm_drv.h>
#include <drm/amdgpu_drm.h>
@@ -40,6 +41,8 @@
#include "amdgpu_trace.h"
#include "amdgpu_amdkfd.h"
#include "amdgpu_vram_mgr.h"
+#include "amdgpu_vm.h"
+#include "amdgpu_dma_buf.h"
/**
* DOC: amdgpu_object
@@ -60,7 +63,7 @@ static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
amdgpu_bo_kunmap(bo);
- if (bo->tbo.base.import_attach)
+ if (drm_gem_is_imported(&bo->tbo.base))
drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg);
drm_gem_object_release(&bo->tbo.base);
amdgpu_bo_unref(&bo->parent);
@@ -150,6 +153,14 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
c++;
}
+ if (domain & AMDGPU_GEM_DOMAIN_MMIO_REMAP) {
+ places[c].fpfn = 0;
+ places[c].lpfn = 0;
+ places[c].mem_type = AMDGPU_PL_MMIO_REMAP;
+ places[c].flags = 0;
+ c++;
+ }
+
if (domain & AMDGPU_GEM_DOMAIN_GTT) {
places[c].fpfn = 0;
places[c].lpfn = 0;
@@ -161,7 +172,8 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
* When GTT is just an alternative to VRAM make sure that we
* only use it as fallback and still try to fill up VRAM first.
*/
- if (domain & abo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM)
+ if (abo->tbo.resource && !(adev->flags & AMD_IS_APU) &&
+ domain & abo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM)
places[c].flags |= TTM_PL_FLAG_FALLBACK;
c++;
}
@@ -322,6 +334,9 @@ error_free:
*
* Allocates and pins a BO for kernel internal use.
*
+ * This function is exported to allow the V4L2 isp device
+ * external to drm device to create and access the kernel BO.
+ *
* Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
*
* Returns:
@@ -347,6 +362,74 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
}
/**
+ * amdgpu_bo_create_isp_user - create user BO for isp
+ *
+ * @adev: amdgpu device object
+ * @dma_buf: DMABUF handle for isp buffer
+ * @domain: where to place it
+ * @bo: used to initialize BOs in structures
+ * @gpu_addr: GPU addr of the pinned BO
+ *
+ * Imports isp DMABUF to allocate and pin a user BO for isp internal use. It does
+ * GART alloc to generate gpu_addr for BO to make it accessible through the
+ * GART aperture for ISP HW.
+ *
+ * This function is exported to allow the V4L2 isp device external to drm device
+ * to create and access the isp user BO.
+ *
+ * Returns:
+ * 0 on success, negative error code otherwise.
+ */
+int amdgpu_bo_create_isp_user(struct amdgpu_device *adev,
+ struct dma_buf *dma_buf, u32 domain, struct amdgpu_bo **bo,
+ u64 *gpu_addr)
+
+{
+ struct drm_gem_object *gem_obj;
+ int r;
+
+ gem_obj = amdgpu_gem_prime_import(&adev->ddev, dma_buf);
+ *bo = gem_to_amdgpu_bo(gem_obj);
+ if (!(*bo)) {
+ dev_err(adev->dev, "failed to get valid isp user bo\n");
+ return -EINVAL;
+ }
+
+ r = amdgpu_bo_reserve(*bo, false);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to reserve isp user bo\n", r);
+ return r;
+ }
+
+ r = amdgpu_bo_pin(*bo, domain);
+ if (r) {
+ dev_err(adev->dev, "(%d) isp user bo pin failed\n", r);
+ goto error_unreserve;
+ }
+
+ r = amdgpu_ttm_alloc_gart(&(*bo)->tbo);
+ if (r) {
+ dev_err(adev->dev, "%p bind failed\n", *bo);
+ goto error_unpin;
+ }
+
+ if (!WARN_ON(!gpu_addr))
+ *gpu_addr = amdgpu_bo_gpu_offset(*bo);
+
+ amdgpu_bo_unreserve(*bo);
+
+ return 0;
+
+error_unpin:
+ amdgpu_bo_unpin(*bo);
+error_unreserve:
+ amdgpu_bo_unreserve(*bo);
+ amdgpu_bo_unref(bo);
+
+ return r;
+}
+
+/**
* amdgpu_bo_create_kernel_at - create BO for kernel use at specific location
*
* @adev: amdgpu device object
@@ -421,6 +504,9 @@ error:
* @cpu_addr: pointer to where the BO's CPU memory space address was stored
*
* unmaps and unpin a BO for kernel internal use.
+ *
+ * This function is exported to allow the V4L2 isp device
+ * external to drm device to free the kernel BO.
*/
void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
void **cpu_addr)
@@ -446,6 +532,28 @@ void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
*cpu_addr = NULL;
}
+/**
+ * amdgpu_bo_free_isp_user - free BO for isp use
+ *
+ * @bo: amdgpu isp user BO to free
+ *
+ * unpin and unref BO for isp internal use.
+ *
+ * This function is exported to allow the V4L2 isp device
+ * external to drm device to free the isp user BO.
+ */
+void amdgpu_bo_free_isp_user(struct amdgpu_bo *bo)
+{
+ if (bo == NULL)
+ return;
+
+ if (amdgpu_bo_reserve(bo, true) == 0) {
+ amdgpu_bo_unpin(bo);
+ amdgpu_bo_unreserve(bo);
+ }
+ amdgpu_bo_unref(&bo);
+}
+
/* Validate bo size is bit bigger than the request domain */
static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
unsigned long size, u32 domain)
@@ -836,7 +944,7 @@ int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain)
domain = bo->preferred_domains & domain;
/* A shared bo cannot be migrated to VRAM */
- if (bo->tbo.base.import_attach) {
+ if (drm_gem_is_imported(&bo->tbo.base)) {
if (domain & AMDGPU_GEM_DOMAIN_GTT)
domain = AMDGPU_GEM_DOMAIN_GTT;
else
@@ -864,7 +972,7 @@ int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain)
*/
domain = amdgpu_bo_get_preferred_domain(adev, domain);
- if (bo->tbo.base.import_attach)
+ if (drm_gem_is_imported(&bo->tbo.base))
dma_buf_pin(bo->tbo.base.import_attach);
/* force to pin into visible video ram */
@@ -915,7 +1023,7 @@ void amdgpu_bo_unpin(struct amdgpu_bo *bo)
if (bo->tbo.pin_count)
return;
- if (bo->tbo.base.import_attach)
+ if (drm_gem_is_imported(&bo->tbo.base))
dma_buf_unpin(bo->tbo.base.import_attach);
if (bo->tbo.resource->mem_type == TTM_PL_VRAM) {
@@ -941,7 +1049,8 @@ static const char * const amdgpu_vram_names[] = {
"GDDR6",
"DDR5",
"LPDDR4",
- "LPDDR5"
+ "LPDDR5",
+ "HBM3E"
};
/**
@@ -1148,7 +1257,6 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
bool evict,
struct ttm_resource *new_mem)
{
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct ttm_resource *old_mem = bo->resource;
struct amdgpu_bo *abo;
@@ -1156,11 +1264,11 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
return;
abo = ttm_to_amdgpu_bo(bo);
- amdgpu_vm_bo_invalidate(adev, abo, evict);
+ amdgpu_vm_bo_move(abo, new_mem, evict);
amdgpu_bo_kunmap(abo);
- if (abo->tbo.base.dma_buf && !abo->tbo.base.import_attach &&
+ if (abo->tbo.base.dma_buf && !drm_gem_is_imported(&abo->tbo.base) &&
old_mem && old_mem->mem_type != TTM_PL_SYSTEM)
dma_buf_move_notify(abo->tbo.base.dma_buf);
@@ -1169,58 +1277,6 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
old_mem ? old_mem->mem_type : -1);
}
-void amdgpu_bo_get_memory(struct amdgpu_bo *bo,
- struct amdgpu_mem_stats *stats)
-{
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- struct ttm_resource *res = bo->tbo.resource;
- uint64_t size = amdgpu_bo_size(bo);
- struct drm_gem_object *obj;
- bool shared;
-
- /* Abort if the BO doesn't currently have a backing store */
- if (!res)
- return;
-
- obj = &bo->tbo.base;
- shared = drm_gem_object_is_shared_for_memory_stats(obj);
-
- switch (res->mem_type) {
- case TTM_PL_VRAM:
- stats->vram += size;
- if (amdgpu_res_cpu_visible(adev, res))
- stats->visible_vram += size;
- if (shared)
- stats->vram_shared += size;
- break;
- case TTM_PL_TT:
- stats->gtt += size;
- if (shared)
- stats->gtt_shared += size;
- break;
- case TTM_PL_SYSTEM:
- default:
- stats->cpu += size;
- if (shared)
- stats->cpu_shared += size;
- break;
- }
-
- if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) {
- stats->requested_vram += size;
- if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
- stats->requested_visible_vram += size;
-
- if (res->mem_type != TTM_PL_VRAM) {
- stats->evicted_vram += size;
- if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
- stats->evicted_visible_vram += size;
- }
- } else if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_GTT) {
- stats->requested_gtt += size;
- }
-}
-
/**
* amdgpu_bo_release_notify - notification about a BO being released
* @bo: pointer to a buffer object
@@ -1245,28 +1301,37 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
if (abo->kfd_bo)
amdgpu_amdkfd_release_notify(abo);
- /* We only remove the fence if the resv has individualized. */
- WARN_ON_ONCE(bo->type == ttm_bo_type_kernel
- && bo->base.resv != &bo->base._resv);
- if (bo->base.resv == &bo->base._resv)
- amdgpu_amdkfd_remove_fence_on_pt_pd_bos(abo);
+ /*
+ * We lock the private dma_resv object here and since the BO is about to
+ * be released nobody else should have a pointer to it.
+ * So when this locking here fails something is wrong with the reference
+ * counting.
+ */
+ if (WARN_ON_ONCE(!dma_resv_trylock(&bo->base._resv)))
+ return;
+
+ amdgpu_amdkfd_remove_all_eviction_fences(abo);
if (!bo->resource || bo->resource->mem_type != TTM_PL_VRAM ||
!(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE) ||
adev->in_suspend || drm_dev_is_unplugged(adev_to_drm(adev)))
- return;
+ goto out;
- if (WARN_ON_ONCE(!dma_resv_trylock(bo->base.resv)))
- return;
+ r = dma_resv_reserve_fences(&bo->base._resv, 1);
+ if (r)
+ goto out;
- r = amdgpu_fill_buffer(abo, 0, bo->base.resv, &fence, true);
- if (!WARN_ON(r)) {
- amdgpu_vram_mgr_set_cleared(bo->resource);
- amdgpu_bo_fence(abo, fence, false);
- dma_fence_put(fence);
- }
+ r = amdgpu_fill_buffer(abo, 0, &bo->base._resv, &fence, true,
+ AMDGPU_KERNEL_JOB_ID_CLEAR_ON_RELEASE);
+ if (WARN_ON(r))
+ goto out;
+
+ amdgpu_vram_mgr_set_cleared(bo->resource);
+ dma_resv_add_fence(&bo->base._resv, fence, DMA_RESV_USAGE_KERNEL);
+ dma_fence_put(fence);
- dma_resv_unlock(bo->base.resv);
+out:
+ dma_resv_unlock(&bo->base._resv);
}
/**
@@ -1414,6 +1479,26 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
}
/**
+ * amdgpu_bo_fb_aper_addr - return FB aperture GPU offset of the VRAM bo
+ * @bo: amdgpu VRAM buffer object for which we query the offset
+ *
+ * Returns:
+ * current FB aperture GPU offset of the object.
+ */
+u64 amdgpu_bo_fb_aper_addr(struct amdgpu_bo *bo)
+{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ uint64_t offset, fb_base;
+
+ WARN_ON_ONCE(bo->tbo.resource->mem_type != TTM_PL_VRAM);
+
+ fb_base = adev->gmc.fb_start;
+ fb_base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
+ offset = (bo->tbo.resource->start << PAGE_SHIFT) + fb_base;
+ return amdgpu_gmc_sign_extend(offset);
+}
+
+/**
* amdgpu_bo_gpu_offset_no_check - return GPU offset of bo
* @bo: amdgpu object for which we query the offset
*
@@ -1436,6 +1521,47 @@ u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo)
}
/**
+ * amdgpu_bo_mem_stats_placement - bo placement for memory accounting
+ * @bo: the buffer object we should look at
+ *
+ * BO can have multiple preferred placements, to avoid double counting we want
+ * to file it under a single placement for memory stats.
+ * Luckily, if we take the highest set bit in preferred_domains the result is
+ * quite sensible.
+ *
+ * Returns:
+ * Which of the placements should the BO be accounted under.
+ */
+uint32_t amdgpu_bo_mem_stats_placement(struct amdgpu_bo *bo)
+{
+ uint32_t domain = bo->preferred_domains & AMDGPU_GEM_DOMAIN_MASK;
+
+ if (!domain)
+ return TTM_PL_SYSTEM;
+
+ switch (rounddown_pow_of_two(domain)) {
+ case AMDGPU_GEM_DOMAIN_CPU:
+ return TTM_PL_SYSTEM;
+ case AMDGPU_GEM_DOMAIN_GTT:
+ return TTM_PL_TT;
+ case AMDGPU_GEM_DOMAIN_VRAM:
+ return TTM_PL_VRAM;
+ case AMDGPU_GEM_DOMAIN_GDS:
+ return AMDGPU_PL_GDS;
+ case AMDGPU_GEM_DOMAIN_GWS:
+ return AMDGPU_PL_GWS;
+ case AMDGPU_GEM_DOMAIN_OA:
+ return AMDGPU_PL_OA;
+ case AMDGPU_GEM_DOMAIN_DOORBELL:
+ return AMDGPU_PL_DOORBELL;
+ case AMDGPU_GEM_DOMAIN_MMIO_REMAP:
+ return AMDGPU_PL_MMIO_REMAP;
+ default:
+ return TTM_PL_SYSTEM;
+ }
+}
+
+/**
* amdgpu_bo_get_preferred_domain - get preferred domain
* @adev: amdgpu device object
* @domain: allowed :ref:`memory domains <amdgpu_memory_domains>`
@@ -1513,6 +1639,9 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
case AMDGPU_PL_DOORBELL:
placement = "DOORBELL";
break;
+ case AMDGPU_PL_MMIO_REMAP:
+ placement = "MMIO REMAP";
+ break;
case TTM_PL_SYSTEM:
default:
placement = "CPU";
@@ -1547,7 +1676,11 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
amdgpu_bo_print_flag(m, bo, VRAM_CONTIGUOUS);
amdgpu_bo_print_flag(m, bo, VM_ALWAYS_VALID);
amdgpu_bo_print_flag(m, bo, EXPLICIT_SYNC);
-
+ /* Add the gem obj resv fence dump*/
+ if (dma_resv_trylock(bo->tbo.base.resv)) {
+ dma_resv_describe(bo->tbo.base.resv, m);
+ dma_resv_unlock(bo->tbo.base.resv);
+ }
seq_puts(m, "\n");
return size;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 717e47b46167..656b8a931dae 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -69,7 +69,7 @@ struct amdgpu_bo_va_mapping {
uint64_t last;
uint64_t __subtree_last;
uint64_t offset;
- uint64_t flags;
+ uint32_t flags;
};
/* User space allocated BO in a VM */
@@ -139,33 +139,6 @@ struct amdgpu_bo_vm {
struct amdgpu_vm_bo_base entries[];
};
-struct amdgpu_mem_stats {
- /* current VRAM usage, includes visible VRAM */
- uint64_t vram;
- /* current shared VRAM usage, includes visible VRAM */
- uint64_t vram_shared;
- /* current visible VRAM usage */
- uint64_t visible_vram;
- /* current GTT usage */
- uint64_t gtt;
- /* current shared GTT usage */
- uint64_t gtt_shared;
- /* current system memory usage */
- uint64_t cpu;
- /* current shared system memory usage */
- uint64_t cpu_shared;
- /* sum of evicted buffers, includes visible VRAM */
- uint64_t evicted_vram;
- /* sum of evicted buffers due to CPU access */
- uint64_t evicted_visible_vram;
- /* how much userspace asked for, includes vis.VRAM */
- uint64_t requested_vram;
- /* how much userspace asked for */
- uint64_t requested_visible_vram;
- /* how much userspace asked for */
- uint64_t requested_gtt;
-};
-
static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo)
{
return container_of(tbo, struct amdgpu_bo, tbo);
@@ -194,6 +167,8 @@ static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type)
return AMDGPU_GEM_DOMAIN_OA;
case AMDGPU_PL_DOORBELL:
return AMDGPU_GEM_DOMAIN_DOORBELL;
+ case AMDGPU_PL_MMIO_REMAP:
+ return AMDGPU_GEM_DOMAIN_MMIO_REMAP;
default:
break;
}
@@ -287,6 +262,10 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
unsigned long size, int align,
u32 domain, struct amdgpu_bo **bo_ptr,
u64 *gpu_addr, void **cpu_addr);
+int amdgpu_bo_create_isp_user(struct amdgpu_device *adev,
+ struct dma_buf *dbuf, u32 domain,
+ struct amdgpu_bo **bo,
+ u64 *gpu_addr);
int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
uint64_t offset, uint64_t size,
struct amdgpu_bo **bo_ptr, void **cpu_addr);
@@ -298,6 +277,7 @@ int amdgpu_bo_create_vm(struct amdgpu_device *adev,
struct amdgpu_bo_vm **ubo_ptr);
void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
void **cpu_addr);
+void amdgpu_bo_free_isp_user(struct amdgpu_bo *bo);
int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
void *amdgpu_bo_kptr(struct amdgpu_bo *bo);
void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
@@ -326,9 +306,9 @@ int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
bool intr);
int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr);
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
+u64 amdgpu_bo_fb_aper_addr(struct amdgpu_bo *bo);
u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo);
-void amdgpu_bo_get_memory(struct amdgpu_bo *bo,
- struct amdgpu_mem_stats *stats);
+uint32_t amdgpu_bo_mem_stats_placement(struct amdgpu_bo *bo);
uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
uint32_t domain);
@@ -363,8 +343,7 @@ int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
struct drm_suballoc **sa_bo,
unsigned int size);
-void amdgpu_sa_bo_free(struct amdgpu_device *adev,
- struct drm_suballoc **sa_bo,
+void amdgpu_sa_bo_free(struct drm_suballoc **sa_bo,
struct dma_fence *fence);
#if defined(CONFIG_DEBUG_FS)
void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c
index e8adfd0a570a..34b5e22b44e5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c
@@ -137,7 +137,8 @@ void amdgpu_preempt_mgr_fini(struct amdgpu_device *adev)
if (ret)
return;
- device_remove_file(adev->dev, &dev_attr_mem_info_preempt_used);
+ if (adev->dev->kobj.sd)
+ device_remove_file(adev->dev, &dev_attr_mem_info_preempt_used);
ttm_resource_manager_cleanup(man);
ttm_set_driver_manager(&adev->mman.bdev, AMDGPU_PL_PREEMPT, NULL);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 0b28b2cf1517..8c0e5d03de50 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -44,7 +44,7 @@
#include "amdgpu_securedisplay.h"
#include "amdgpu_atomfirmware.h"
-#define AMD_VBIOS_FILE_MAX_SIZE_B (1024*1024*3)
+#define AMD_VBIOS_FILE_MAX_SIZE_B (1024*1024*16)
static int psp_load_smu_fw(struct psp_context *psp);
static int psp_rap_terminate(struct psp_context *psp);
@@ -153,15 +153,18 @@ static int psp_init_sriov_microcode(struct psp_context *psp)
adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MES1_DATA;
ret = psp_init_cap_microcode(psp, ucode_prefix);
break;
+ case IP_VERSION(13, 0, 12):
+ ret = psp_init_ta_microcode(psp, ucode_prefix);
+ break;
default:
return -EINVAL;
}
return ret;
}
-static int psp_early_init(void *handle)
+static int psp_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct psp_context *psp = &adev->psp;
psp->autoload_supported = true;
@@ -193,6 +196,7 @@ static int psp_early_init(void *handle)
case IP_VERSION(11, 0, 9):
case IP_VERSION(11, 0, 11):
case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 2):
case IP_VERSION(11, 0, 12):
case IP_VERSION(11, 0, 13):
psp_v11_0_set_psp_funcs(psp);
@@ -212,6 +216,11 @@ static int psp_early_init(void *handle)
psp_v13_0_set_psp_funcs(psp);
psp->autoload_supported = false;
break;
+ case IP_VERSION(13, 0, 12):
+ psp_v13_0_set_psp_funcs(psp);
+ psp->autoload_supported = false;
+ adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev);
+ break;
case IP_VERSION(13, 0, 1):
case IP_VERSION(13, 0, 3):
case IP_VERSION(13, 0, 5):
@@ -243,7 +252,12 @@ static int psp_early_init(void *handle)
break;
case IP_VERSION(14, 0, 2):
case IP_VERSION(14, 0, 3):
+ adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev);
+ psp_v14_0_set_psp_funcs(psp);
+ break;
+ case IP_VERSION(14, 0, 5):
psp_v14_0_set_psp_funcs(psp);
+ psp->boot_time_tmr = false;
break;
default:
return -EINVAL;
@@ -359,6 +373,7 @@ static bool psp_get_runtime_db_entry(struct amdgpu_device *adev,
int i;
if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
+ amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) ||
amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14))
return false;
@@ -421,9 +436,9 @@ static bool psp_get_runtime_db_entry(struct amdgpu_device *adev,
return ret;
}
-static int psp_sw_init(void *handle)
+static int psp_sw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct psp_context *psp = &adev->psp;
int ret;
struct psp_runtime_boot_cfg_entry boot_cfg_entry;
@@ -433,7 +448,7 @@ static int psp_sw_init(void *handle)
psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
if (!psp->cmd) {
dev_err(adev->dev, "Failed to allocate memory to command buffer!\n");
- ret = -ENOMEM;
+ return -ENOMEM;
}
adev->psp.xgmi_context.supports_extended_data =
@@ -527,11 +542,10 @@ failed1:
return ret;
}
-static int psp_sw_fini(void *handle)
+static int psp_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct psp_context *psp = &adev->psp;
- struct psp_gfx_cmd_resp *cmd = psp->cmd;
psp_memory_training_fini(psp);
@@ -541,8 +555,8 @@ static int psp_sw_fini(void *handle)
amdgpu_ucode_release(&psp->cap_fw);
amdgpu_ucode_release(&psp->toc_fw);
- kfree(cmd);
- cmd = NULL;
+ kfree(psp->cmd);
+ psp->cmd = NULL;
psp_free_shared_bufs(psp);
@@ -561,9 +575,11 @@ static int psp_sw_fini(void *handle)
return 0;
}
-int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
- uint32_t reg_val, uint32_t mask, bool check_changed)
+int psp_wait_for(struct psp_context *psp, uint32_t reg_index, uint32_t reg_val,
+ uint32_t mask, uint32_t flags)
{
+ bool check_changed = flags & PSP_WAITREG_CHANGED;
+ bool verbose = !(flags & PSP_WAITREG_NOVERBOSE);
uint32_t val;
int i;
struct amdgpu_device *adev = psp->adev;
@@ -583,6 +599,11 @@ int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
udelay(1);
}
+ if (verbose)
+ dev_err(adev->dev,
+ "psp reg (0x%x) wait timed out, mask: %x, read: %x exp: %x",
+ reg_index, mask, val, reg_val);
+
return -ETIME;
}
@@ -639,6 +660,16 @@ static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id)
return "AUTOLOAD_RLC";
case GFX_CMD_ID_BOOT_CFG:
return "BOOT_CFG";
+ case GFX_CMD_ID_CONFIG_SQ_PERFMON:
+ return "CONFIG_SQ_PERFMON";
+ case GFX_CMD_ID_FB_FW_RESERV_ADDR:
+ return "FB_FW_RESERV_ADDR";
+ case GFX_CMD_ID_FB_FW_RESERV_EXT_ADDR:
+ return "FB_FW_RESERV_EXT_ADDR";
+ case GFX_CMD_ID_SRIOV_SPATIAL_PART:
+ return "SPATIAL_PARTITION";
+ case GFX_CMD_ID_FB_NPS_MODE:
+ return "NPS_MODE_CHANGE";
default:
return "UNKNOWN CMD";
}
@@ -850,12 +881,12 @@ static int psp_tmr_init(struct psp_context *psp)
pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
ret = amdgpu_bo_create_kernel(psp->adev, tmr_size,
PSP_TMR_ALIGNMENT,
- AMDGPU_HAS_VRAM(psp->adev) ?
- AMDGPU_GEM_DOMAIN_VRAM :
- AMDGPU_GEM_DOMAIN_GTT,
+ AMDGPU_GEM_DOMAIN_GTT | AMDGPU_GEM_DOMAIN_VRAM,
&psp->tmr_bo, &psp->tmr_mc_addr,
pptr);
}
+ if (amdgpu_virt_xgmi_migrate_enabled(psp->adev) && psp->tmr_bo)
+ psp->tmr_mc_addr = amdgpu_bo_fb_aper_addr(psp->tmr_bo);
return ret;
}
@@ -868,6 +899,7 @@ static bool psp_skip_tmr(struct psp_context *psp)
case IP_VERSION(13, 0, 2):
case IP_VERSION(13, 0, 6):
case IP_VERSION(13, 0, 10):
+ case IP_VERSION(13, 0, 12):
case IP_VERSION(13, 0, 14):
return true;
default:
@@ -968,6 +1000,106 @@ int psp_get_fw_attestation_records_addr(struct psp_context *psp,
return ret;
}
+static int psp_get_fw_reservation_info(struct psp_context *psp,
+ uint32_t cmd_id,
+ uint64_t *addr,
+ uint32_t *size)
+{
+ int ret;
+ uint32_t status;
+ struct psp_gfx_cmd_resp *cmd;
+
+ cmd = acquire_psp_cmd_buf(psp);
+
+ cmd->cmd_id = cmd_id;
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd,
+ psp->fence_buf_mc_addr);
+ if (ret) {
+ release_psp_cmd_buf(psp);
+ return ret;
+ }
+
+ status = cmd->resp.status;
+ if (status == PSP_ERR_UNKNOWN_COMMAND) {
+ release_psp_cmd_buf(psp);
+ *addr = 0;
+ *size = 0;
+ return 0;
+ }
+
+ *addr = (uint64_t)cmd->resp.uresp.fw_reserve_info.reserve_base_address_hi << 32 |
+ cmd->resp.uresp.fw_reserve_info.reserve_base_address_lo;
+ *size = cmd->resp.uresp.fw_reserve_info.reserve_size;
+
+ release_psp_cmd_buf(psp);
+
+ return 0;
+}
+
+int psp_update_fw_reservation(struct psp_context *psp)
+{
+ int ret;
+ uint64_t reserv_addr, reserv_addr_ext;
+ uint32_t reserv_size, reserv_size_ext, mp0_ip_ver;
+ struct amdgpu_device *adev = psp->adev;
+
+ mp0_ip_ver = amdgpu_ip_version(adev, MP0_HWIP, 0);
+
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ switch (mp0_ip_ver) {
+ case IP_VERSION(14, 0, 2):
+ if (adev->psp.sos.fw_version < 0x3b0e0d)
+ return 0;
+ break;
+
+ case IP_VERSION(14, 0, 3):
+ if (adev->psp.sos.fw_version < 0x3a0e14)
+ return 0;
+ break;
+
+ default:
+ return 0;
+ }
+
+ ret = psp_get_fw_reservation_info(psp, GFX_CMD_ID_FB_FW_RESERV_ADDR, &reserv_addr, &reserv_size);
+ if (ret)
+ return ret;
+ ret = psp_get_fw_reservation_info(psp, GFX_CMD_ID_FB_FW_RESERV_EXT_ADDR, &reserv_addr_ext, &reserv_size_ext);
+ if (ret)
+ return ret;
+
+ if (reserv_addr != adev->gmc.real_vram_size - reserv_size) {
+ dev_warn(adev->dev, "reserve fw region is not valid!\n");
+ return 0;
+ }
+
+ amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL, NULL);
+
+ reserv_size = roundup(reserv_size, SZ_1M);
+
+ ret = amdgpu_bo_create_kernel_at(adev, reserv_addr, reserv_size, &adev->mman.fw_reserved_memory, NULL);
+ if (ret) {
+ dev_err(adev->dev, "reserve fw region failed(%d)!\n", ret);
+ amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL, NULL);
+ return ret;
+ }
+
+ reserv_size_ext = roundup(reserv_size_ext, SZ_1M);
+
+ ret = amdgpu_bo_create_kernel_at(adev, reserv_addr_ext, reserv_size_ext,
+ &adev->mman.fw_reserved_memory_extend, NULL);
+ if (ret) {
+ dev_err(adev->dev, "reserve extend fw region failed(%d)!\n", ret);
+ amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory_extend, NULL, NULL);
+ return ret;
+ }
+
+ return 0;
+}
+
static int psp_boot_config_get(struct amdgpu_device *adev, uint32_t *boot_cfg)
{
struct psp_context *psp = &adev->psp;
@@ -1043,6 +1175,31 @@ static int psp_rl_load(struct amdgpu_device *adev)
return ret;
}
+int psp_memory_partition(struct psp_context *psp, int mode)
+{
+ struct psp_gfx_cmd_resp *cmd;
+ int ret;
+
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ cmd = acquire_psp_cmd_buf(psp);
+
+ cmd->cmd_id = GFX_CMD_ID_FB_NPS_MODE;
+ cmd->cmd.cmd_memory_part.mode = mode;
+
+ dev_info(psp->adev->dev,
+ "Requesting %d memory partition change through PSP", mode);
+ ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+ if (ret)
+ dev_err(psp->adev->dev,
+ "PSP request failed to change to NPS%d mode\n", mode);
+
+ release_psp_cmd_buf(psp);
+
+ return ret;
+}
+
int psp_spatial_partition(struct psp_context *psp, int mode)
{
struct psp_gfx_cmd_resp *cmd;
@@ -1229,6 +1386,11 @@ int psp_ta_load(struct psp_context *psp, struct ta_context *context)
psp_copy_fw(psp, context->bin_desc.start_addr,
context->bin_desc.size_bytes);
+ if (amdgpu_virt_xgmi_migrate_enabled(psp->adev) &&
+ context->mem_context.shared_bo)
+ context->mem_context.shared_mc_addr =
+ amdgpu_bo_fb_aper_addr(context->mem_context.shared_bo);
+
psp_prep_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, context);
ret = psp_cmd_submit_buf(psp, NULL, cmd,
@@ -1756,34 +1918,47 @@ int psp_ras_initialize(struct psp_context *psp)
if (ret)
dev_warn(adev->dev, "PSP get boot config failed\n");
- if (!amdgpu_ras_is_supported(psp->adev, AMDGPU_RAS_BLOCK__UMC)) {
- if (!boot_cfg) {
- dev_info(adev->dev, "GECC is disabled\n");
- } else {
- /* disable GECC in next boot cycle if ras is
- * disabled by module parameter amdgpu_ras_enable
- * and/or amdgpu_ras_mask, or boot_config_get call
- * is failed
- */
- ret = psp_boot_config_set(adev, 0);
- if (ret)
- dev_warn(adev->dev, "PSP set boot config failed\n");
- else
- dev_warn(adev->dev, "GECC will be disabled in next boot cycle if set amdgpu_ras_enable and/or amdgpu_ras_mask to 0x0\n");
- }
+ if (boot_cfg == 1 && !adev->ras_default_ecc_enabled &&
+ amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) {
+ dev_warn(adev->dev, "GECC is currently enabled, which may affect performance\n");
+ dev_warn(adev->dev,
+ "To disable GECC, please reboot the system and load the amdgpu driver with the parameter amdgpu_ras_enable=0\n");
} else {
- if (boot_cfg == 1) {
- dev_info(adev->dev, "GECC is enabled\n");
+ if ((adev->ras_default_ecc_enabled || amdgpu_ras_enable == 1) &&
+ amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) {
+ if (boot_cfg == 1) {
+ dev_info(adev->dev, "GECC is enabled\n");
+ } else {
+ /* enable GECC in next boot cycle if it is disabled
+ * in boot config, or force enable GECC if failed to
+ * get boot configuration
+ */
+ ret = psp_boot_config_set(adev, BOOT_CONFIG_GECC);
+ if (ret)
+ dev_warn(adev->dev, "PSP set boot config failed\n");
+ else
+ dev_warn(adev->dev, "GECC will be enabled in next boot cycle\n");
+ }
} else {
- /* enable GECC in next boot cycle if it is disabled
- * in boot config, or force enable GECC if failed to
- * get boot configuration
- */
- ret = psp_boot_config_set(adev, BOOT_CONFIG_GECC);
- if (ret)
- dev_warn(adev->dev, "PSP set boot config failed\n");
- else
- dev_warn(adev->dev, "GECC will be enabled in next boot cycle\n");
+ if (!boot_cfg) {
+ if (!adev->ras_default_ecc_enabled &&
+ amdgpu_ras_enable != 1 &&
+ amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC))
+ dev_warn(adev->dev, "GECC is disabled, set amdgpu_ras_enable=1 to enable GECC in next boot cycle if needed\n");
+ else
+ dev_info(adev->dev, "GECC is disabled\n");
+ } else {
+ /* disable GECC in next boot cycle if ras is
+ * disabled by module parameter amdgpu_ras_enable
+ * and/or amdgpu_ras_mask, or boot_config_get call
+ * is failed
+ */
+ ret = psp_boot_config_set(adev, 0);
+ if (ret)
+ dev_warn(adev->dev, "PSP set boot config failed\n");
+ else
+ dev_warn(adev->dev, "GECC will be disabled in next boot cycle if set amdgpu_ras_enable and/or amdgpu_ras_mask to 0x0\n");
+ }
}
}
}
@@ -1807,6 +1982,10 @@ int psp_ras_initialize(struct psp_context *psp)
ras_cmd->ras_in_message.init_flags.xcc_mask =
adev->gfx.xcc_mask;
ras_cmd->ras_in_message.init_flags.channel_dis_num = hweight32(adev->gmc.m_half_use) * 2;
+ if (adev->gmc.gmc_funcs->query_mem_partition_mode)
+ ras_cmd->ras_in_message.init_flags.nps_mode =
+ adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
+ ras_cmd->ras_in_message.init_flags.active_umc_mask = adev->umc.active_mask;
ret = psp_ta_load(psp, &psp->ras_context.context);
@@ -2156,7 +2335,8 @@ static int psp_securedisplay_initialize(struct psp_context *psp)
if (!psp->securedisplay_context.context.bin_desc.size_bytes ||
!psp->securedisplay_context.context.bin_desc.start_addr) {
- dev_info(psp->adev->dev, "SECUREDISPLAY: securedisplay ta ucode is not available\n");
+ dev_info(psp->adev->dev,
+ "SECUREDISPLAY: optional securedisplay ta ucode is not available\n");
return 0;
}
@@ -2172,7 +2352,7 @@ static int psp_securedisplay_initialize(struct psp_context *psp)
}
ret = psp_ta_load(psp, &psp->securedisplay_context.context);
- if (!ret) {
+ if (!ret && !psp->securedisplay_context.context.resp_status) {
psp->securedisplay_context.context.initialized = true;
mutex_init(&psp->securedisplay_context.mutex);
} else
@@ -2234,7 +2414,8 @@ int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
return -EINVAL;
if (ta_cmd_id != TA_SECUREDISPLAY_COMMAND__QUERY_TA &&
- ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC)
+ ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC &&
+ ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC_V2)
return -EINVAL;
ret = psp_ta_invoke(psp, ta_cmd_id, &psp->securedisplay_context.context);
@@ -2264,11 +2445,40 @@ bool amdgpu_psp_get_ras_capability(struct psp_context *psp)
}
}
+bool amdgpu_psp_tos_reload_needed(struct amdgpu_device *adev)
+{
+ struct psp_context *psp = &adev->psp;
+
+ if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU))
+ return false;
+
+ if (psp->funcs && psp->funcs->is_reload_needed)
+ return psp->funcs->is_reload_needed(psp);
+
+ return false;
+}
+
+static void psp_update_gpu_addresses(struct amdgpu_device *adev)
+{
+ struct psp_context *psp = &adev->psp;
+
+ if (psp->cmd_buf_bo && psp->cmd_buf_mem) {
+ psp->fw_pri_mc_addr = amdgpu_bo_fb_aper_addr(psp->fw_pri_bo);
+ psp->fence_buf_mc_addr = amdgpu_bo_fb_aper_addr(psp->fence_buf_bo);
+ psp->cmd_buf_mc_addr = amdgpu_bo_fb_aper_addr(psp->cmd_buf_bo);
+ }
+ if (adev->firmware.rbuf && psp->km_ring.ring_mem)
+ psp->km_ring.ring_mem_mc_addr = amdgpu_bo_fb_aper_addr(adev->firmware.rbuf);
+}
+
static int psp_hw_start(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
int ret;
+ if (amdgpu_virt_xgmi_migrate_enabled(adev))
+ psp_update_gpu_addresses(adev);
+
if (!amdgpu_sriov_vf(adev)) {
if ((is_psp_fw_valid(psp->kdb)) &&
(psp->funcs->bootloader_load_kdb != NULL)) {
@@ -2342,6 +2552,15 @@ static int psp_hw_start(struct psp_context *psp)
}
}
+ if ((is_psp_fw_valid(psp->spdm_drv)) &&
+ (psp->funcs->bootloader_load_spdm_drv != NULL)) {
+ ret = psp_bootloader_load_spdm_drv(psp);
+ if (ret) {
+ dev_err(adev->dev, "PSP load spdm_drv failed!\n");
+ return ret;
+ }
+ }
+
if ((is_psp_fw_valid(psp->sos)) &&
(psp->funcs->bootloader_load_sos != NULL)) {
ret = psp_bootloader_load_sos(psp);
@@ -2358,6 +2577,14 @@ static int psp_hw_start(struct psp_context *psp)
return ret;
}
+ if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
+ ret = psp_update_fw_reservation(psp);
+ if (ret) {
+ dev_err(adev->dev, "update fw reservation failed!\n");
+ return ret;
+ }
+ }
+
if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev))
goto skip_pin_bo;
@@ -2958,16 +3185,13 @@ failed:
return ret;
}
-static int psp_hw_init(void *handle)
+static int psp_hw_init(struct amdgpu_ip_block *ip_block)
{
int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
mutex_lock(&adev->firmware.mutex);
- /*
- * This sequence is just used on hw_init only once, no need on
- * resume.
- */
+
ret = amdgpu_ucode_init_bo(adev);
if (ret)
goto failed;
@@ -2987,9 +3211,9 @@ failed:
return -EINVAL;
}
-static int psp_hw_fini(void *handle)
+static int psp_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct psp_context *psp = &adev->psp;
if (psp->ta_fw) {
@@ -3011,10 +3235,10 @@ static int psp_hw_fini(void *handle)
return 0;
}
-static int psp_suspend(void *handle)
+static int psp_suspend(struct amdgpu_ip_block *ip_block)
{
int ret = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct psp_context *psp = &adev->psp;
if (adev->gmc.xgmi.num_physical_nodes > 1 &&
@@ -3074,10 +3298,10 @@ out:
return ret;
}
-static int psp_resume(void *handle)
+static int psp_resume(struct amdgpu_ip_block *ip_block)
{
int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct psp_context *psp = &adev->psp;
dev_info(adev->dev, "PSP is resuming...\n");
@@ -3092,6 +3316,10 @@ static int psp_resume(void *handle)
mutex_lock(&adev->firmware.mutex);
+ ret = amdgpu_ucode_init_bo(adev);
+ if (ret)
+ goto failed;
+
ret = psp_hw_start(psp);
if (ret)
goto failed;
@@ -3246,7 +3474,8 @@ int psp_init_asd_microcode(struct psp_context *psp, const char *chip_name)
const struct psp_firmware_header_v1_0 *asd_hdr;
int err = 0;
- err = amdgpu_ucode_request(adev, &adev->psp.asd_fw, "amdgpu/%s_asd.bin", chip_name);
+ err = amdgpu_ucode_request(adev, &adev->psp.asd_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_asd.bin", chip_name);
if (err)
goto out;
@@ -3268,7 +3497,8 @@ int psp_init_toc_microcode(struct psp_context *psp, const char *chip_name)
const struct psp_firmware_header_v1_0 *toc_hdr;
int err = 0;
- err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, "amdgpu/%s_toc.bin", chip_name);
+ err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_toc.bin", chip_name);
if (err)
goto out;
@@ -3364,6 +3594,12 @@ static int parse_sos_bin_descriptor(struct psp_context *psp,
psp->ipkeymgr_drv.size_bytes = le32_to_cpu(desc->size_bytes);
psp->ipkeymgr_drv.start_addr = ucode_start_addr;
break;
+ case PSP_FW_TYPE_PSP_SPDM_DRV:
+ psp->spdm_drv.fw_version = le32_to_cpu(desc->fw_version);
+ psp->spdm_drv.feature_version = le32_to_cpu(desc->fw_version);
+ psp->spdm_drv.size_bytes = le32_to_cpu(desc->size_bytes);
+ psp->spdm_drv.start_addr = ucode_start_addr;
+ break;
default:
dev_warn(psp->adev->dev, "Unsupported PSP FW type: %d\n", desc->fw_type);
break;
@@ -3431,7 +3667,12 @@ int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name)
uint8_t *ucode_array_start_addr;
int err = 0;
- err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, "amdgpu/%s_sos.bin", chip_name);
+ if (amdgpu_is_kicker_fw(adev))
+ err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_sos_kicker.bin", chip_name);
+ else
+ err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_sos.bin", chip_name);
if (err)
goto out;
@@ -3523,6 +3764,36 @@ out:
return err;
}
+static bool is_ta_fw_applicable(struct psp_context *psp,
+ const struct psp_fw_bin_desc *desc)
+{
+ struct amdgpu_device *adev = psp->adev;
+ uint32_t fw_version;
+
+ switch (desc->fw_type) {
+ case TA_FW_TYPE_PSP_XGMI:
+ case TA_FW_TYPE_PSP_XGMI_AUX:
+ /* for now, AUX TA only exists on 13.0.6 ta bin,
+ * from v20.00.0x.14
+ */
+ if (amdgpu_ip_version(adev, MP0_HWIP, 0) ==
+ IP_VERSION(13, 0, 6)) {
+ fw_version = le32_to_cpu(desc->fw_version);
+
+ if (adev->flags & AMD_IS_APU &&
+ (fw_version & 0xff) >= 0x14)
+ return desc->fw_type == TA_FW_TYPE_PSP_XGMI_AUX;
+ else
+ return desc->fw_type == TA_FW_TYPE_PSP_XGMI;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return true;
+}
+
static int parse_ta_bin_descriptor(struct psp_context *psp,
const struct psp_fw_bin_desc *desc,
const struct ta_firmware_header_v2_0 *ta_hdr)
@@ -3532,6 +3803,9 @@ static int parse_ta_bin_descriptor(struct psp_context *psp,
if (!psp || !desc || !ta_hdr)
return -EINVAL;
+ if (!is_ta_fw_applicable(psp, desc))
+ return 0;
+
ucode_start_addr = (uint8_t *)ta_hdr +
le32_to_cpu(desc->offset_bytes) +
le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
@@ -3544,6 +3818,7 @@ static int parse_ta_bin_descriptor(struct psp_context *psp,
psp->asd_context.bin_desc.start_addr = ucode_start_addr;
break;
case TA_FW_TYPE_PSP_XGMI:
+ case TA_FW_TYPE_PSP_XGMI_AUX:
psp->xgmi_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
psp->xgmi_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
psp->xgmi_context.context.bin_desc.start_addr = ucode_start_addr;
@@ -3673,7 +3948,12 @@ int psp_init_ta_microcode(struct psp_context *psp, const char *chip_name)
struct amdgpu_device *adev = psp->adev;
int err;
- err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, "amdgpu/%s_ta.bin", chip_name);
+ if (amdgpu_is_kicker_fw(adev))
+ err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_ta_kicker.bin", chip_name);
+ else
+ err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_ta.bin", chip_name);
if (err)
return err;
@@ -3708,14 +3988,16 @@ int psp_init_cap_microcode(struct psp_context *psp, const char *chip_name)
return -EINVAL;
}
- err = amdgpu_ucode_request(adev, &adev->psp.cap_fw, "amdgpu/%s_cap.bin", chip_name);
+ err = amdgpu_ucode_request(adev, &adev->psp.cap_fw, AMDGPU_UCODE_OPTIONAL,
+ "amdgpu/%s_cap.bin", chip_name);
if (err) {
if (err == -ENODEV) {
dev_warn(adev->dev, "cap microcode does not exist, skip\n");
err = 0;
- goto out;
+ } else {
+ dev_err(adev->dev, "fail to initialize cap microcode\n");
}
- dev_err(adev->dev, "fail to initialize cap microcode\n");
+ goto out;
}
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CAP];
@@ -3736,13 +4018,49 @@ out:
return err;
}
-static int psp_set_clockgating_state(void *handle,
- enum amd_clockgating_state state)
+int psp_config_sq_perfmon(struct psp_context *psp,
+ uint32_t xcp_id, bool core_override_enable,
+ bool reg_override_enable, bool perfmon_override_enable)
+{
+ int ret;
+
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ if (xcp_id > MAX_XCP) {
+ dev_err(psp->adev->dev, "invalid xcp_id %d\n", xcp_id);
+ return -EINVAL;
+ }
+
+ if (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 6)) {
+ dev_err(psp->adev->dev, "Unsupported MP0 version 0x%x for CONFIG_SQ_PERFMON command\n",
+ amdgpu_ip_version(psp->adev, MP0_HWIP, 0));
+ return -EINVAL;
+ }
+ struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
+
+ cmd->cmd_id = GFX_CMD_ID_CONFIG_SQ_PERFMON;
+ cmd->cmd.config_sq_perfmon.gfx_xcp_mask = BIT_MASK(xcp_id);
+ cmd->cmd.config_sq_perfmon.core_override = core_override_enable;
+ cmd->cmd.config_sq_perfmon.reg_override = reg_override_enable;
+ cmd->cmd.config_sq_perfmon.perfmon_override = perfmon_override_enable;
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+ if (ret)
+ dev_warn(psp->adev->dev, "PSP failed to config sq: xcp%d core%d reg%d perfmon%d\n",
+ xcp_id, core_override_enable, reg_override_enable, perfmon_override_enable);
+
+ release_psp_cmd_buf(psp);
+ return ret;
+}
+
+static int psp_set_clockgating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_clockgating_state state)
{
return 0;
}
-static int psp_set_powergating_state(void *handle,
+static int psp_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
@@ -3754,10 +4072,12 @@ static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
+ struct amdgpu_ip_block *ip_block;
uint32_t fw_ver;
int ret;
- if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
+ ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP);
+ if (!ip_block || !ip_block->status.late_initialized) {
dev_info(adev->dev, "PSP block is not ready yet\n.");
return -EBUSY;
}
@@ -3786,8 +4106,10 @@ static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
struct amdgpu_bo *fw_buf_bo = NULL;
uint64_t fw_pri_mc_addr;
void *fw_pri_cpu_addr;
+ struct amdgpu_ip_block *ip_block;
- if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
+ ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP);
+ if (!ip_block || !ip_block->status.late_initialized) {
dev_err(adev->dev, "PSP block is not ready yet.");
return -EBUSY;
}
@@ -3795,7 +4117,8 @@ static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
if (!drm_dev_enter(ddev, &idx))
return -ENODEV;
- ret = amdgpu_ucode_request(adev, &usbc_pd_fw, "amdgpu/%s", buf);
+ ret = amdgpu_ucode_request(adev, &usbc_pd_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s", buf);
if (ret)
goto fail;
@@ -3856,7 +4179,7 @@ int is_psp_fw_valid(struct psp_bin_desc bin)
}
static ssize_t amdgpu_psp_vbflash_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -3892,7 +4215,7 @@ static ssize_t amdgpu_psp_vbflash_write(struct file *filp, struct kobject *kobj,
}
static ssize_t amdgpu_psp_vbflash_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buffer,
+ const struct bin_attribute *bin_attr, char *buffer,
loff_t pos, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -3944,7 +4267,7 @@ rel_buf:
* Writing to this file will stage an IFWI for update. Reading from this file
* will trigger the update process.
*/
-static struct bin_attribute psp_vbflash_bin_attr = {
+static const struct bin_attribute psp_vbflash_bin_attr = {
.attr = {.name = "psp_vbflash", .mode = 0660},
.size = 0,
.write = amdgpu_psp_vbflash_write,
@@ -3975,7 +4298,7 @@ static ssize_t amdgpu_psp_vbflash_status(struct device *dev,
}
static DEVICE_ATTR(psp_vbflash_status, 0440, amdgpu_psp_vbflash_status, NULL);
-static struct bin_attribute *bin_flash_attrs[] = {
+static const struct bin_attribute *const bin_flash_attrs[] = {
&psp_vbflash_bin_attr,
NULL
};
@@ -3999,7 +4322,7 @@ static umode_t amdgpu_flash_attr_is_visible(struct kobject *kobj, struct attribu
}
static umode_t amdgpu_bin_flash_attr_is_visible(struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
int idx)
{
struct device *dev = kobj_to_dev(kobj);
@@ -4016,20 +4339,119 @@ const struct attribute_group amdgpu_flash_attr_group = {
.is_visible = amdgpu_flash_attr_is_visible,
};
+#if defined(CONFIG_DEBUG_FS)
+static int psp_read_spirom_debugfs_open(struct inode *inode, struct file *filp)
+{
+ struct amdgpu_device *adev = filp->f_inode->i_private;
+ struct spirom_bo *bo_triplet;
+ int ret;
+
+ /* serialize the open() file calling */
+ if (!mutex_trylock(&adev->psp.mutex))
+ return -EBUSY;
+
+ /*
+ * make sure only one userpace process is alive for dumping so that
+ * only one memory buffer of AMD_VBIOS_FILE_MAX_SIZE * 2 is consumed.
+ * let's say the case where one process try opening the file while
+ * another one has proceeded to read or release. In this way, eliminate
+ * the use of mutex for read() or release() callback as well.
+ */
+ if (adev->psp.spirom_dump_trip) {
+ mutex_unlock(&adev->psp.mutex);
+ return -EBUSY;
+ }
+
+ bo_triplet = kzalloc(sizeof(struct spirom_bo), GFP_KERNEL);
+ if (!bo_triplet) {
+ mutex_unlock(&adev->psp.mutex);
+ return -ENOMEM;
+ }
+
+ ret = amdgpu_bo_create_kernel(adev, AMD_VBIOS_FILE_MAX_SIZE_B * 2,
+ AMDGPU_GPU_PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_GTT,
+ &bo_triplet->bo,
+ &bo_triplet->mc_addr,
+ &bo_triplet->cpu_addr);
+ if (ret)
+ goto rel_trip;
+
+ ret = psp_dump_spirom(&adev->psp, bo_triplet->mc_addr);
+ if (ret)
+ goto rel_bo;
+
+ adev->psp.spirom_dump_trip = bo_triplet;
+ mutex_unlock(&adev->psp.mutex);
+ return 0;
+rel_bo:
+ amdgpu_bo_free_kernel(&bo_triplet->bo, &bo_triplet->mc_addr,
+ &bo_triplet->cpu_addr);
+rel_trip:
+ kfree(bo_triplet);
+ mutex_unlock(&adev->psp.mutex);
+ dev_err(adev->dev, "Trying IFWI dump fails, err = %d\n", ret);
+ return ret;
+}
+
+static ssize_t psp_read_spirom_debugfs_read(struct file *filp, char __user *buf, size_t size,
+ loff_t *pos)
+{
+ struct amdgpu_device *adev = filp->f_inode->i_private;
+ struct spirom_bo *bo_triplet = adev->psp.spirom_dump_trip;
+
+ if (!bo_triplet)
+ return -EINVAL;
+
+ return simple_read_from_buffer(buf,
+ size,
+ pos, bo_triplet->cpu_addr,
+ AMD_VBIOS_FILE_MAX_SIZE_B * 2);
+}
+
+static int psp_read_spirom_debugfs_release(struct inode *inode, struct file *filp)
+{
+ struct amdgpu_device *adev = filp->f_inode->i_private;
+ struct spirom_bo *bo_triplet = adev->psp.spirom_dump_trip;
+
+ if (bo_triplet) {
+ amdgpu_bo_free_kernel(&bo_triplet->bo, &bo_triplet->mc_addr,
+ &bo_triplet->cpu_addr);
+ kfree(bo_triplet);
+ }
+
+ adev->psp.spirom_dump_trip = NULL;
+ return 0;
+}
+
+static const struct file_operations psp_dump_spirom_debugfs_ops = {
+ .owner = THIS_MODULE,
+ .open = psp_read_spirom_debugfs_open,
+ .read = psp_read_spirom_debugfs_read,
+ .release = psp_read_spirom_debugfs_release,
+ .llseek = default_llseek,
+};
+#endif
+
+void amdgpu_psp_debugfs_init(struct amdgpu_device *adev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ struct drm_minor *minor = adev_to_drm(adev)->primary;
+
+ debugfs_create_file_size("psp_spirom_dump", 0444, minor->debugfs_root,
+ adev, &psp_dump_spirom_debugfs_ops, AMD_VBIOS_FILE_MAX_SIZE_B * 2);
+#endif
+}
+
const struct amd_ip_funcs psp_ip_funcs = {
.name = "psp",
.early_init = psp_early_init,
- .late_init = NULL,
.sw_init = psp_sw_init,
.sw_fini = psp_sw_fini,
.hw_init = psp_hw_init,
.hw_fini = psp_hw_fini,
.suspend = psp_suspend,
.resume = psp_resume,
- .is_idle = NULL,
- .check_soft_reset = NULL,
- .wait_for_idle = NULL,
- .soft_reset = NULL,
.set_clockgating_state = psp_set_clockgating_state,
.set_powergating_state = psp_set_powergating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index e8abbbcb4326..237b624aa51c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -39,6 +39,29 @@
#define PSP_TMR_ALIGNMENT 0x100000
#define PSP_FW_NAME_LEN 0x24
+/* VBIOS gfl defines */
+#define MBOX_READY_MASK 0x80000000
+#define MBOX_STATUS_MASK 0x0000FFFF
+#define MBOX_COMMAND_MASK 0x00FF0000
+#define MBOX_READY_FLAG 0x80000000
+#define C2PMSG_CMD_SPI_UPDATE_ROM_IMAGE_ADDR_LO 0x2
+#define C2PMSG_CMD_SPI_UPDATE_ROM_IMAGE_ADDR_HI 0x3
+#define C2PMSG_CMD_SPI_UPDATE_FLASH_IMAGE 0x4
+#define C2PMSG_CMD_SPI_GET_ROM_IMAGE_ADDR_LO 0xf
+#define C2PMSG_CMD_SPI_GET_ROM_IMAGE_ADDR_HI 0x10
+#define C2PMSG_CMD_SPI_GET_FLASH_IMAGE 0x11
+
+/* Command register bit 31 set to indicate readiness */
+#define MBOX_TOS_READY_FLAG (GFX_FLAG_RESPONSE)
+#define MBOX_TOS_READY_MASK (GFX_CMD_RESPONSE_MASK | GFX_CMD_STATUS_MASK)
+
+/* Values to check for a successful GFX_CMD response wait. Check against
+ * both status bits and response state - helps to detect a command failure
+ * or other unexpected cases like a device drop reading all 0xFFs
+ */
+#define MBOX_TOS_RESP_FLAG (GFX_FLAG_RESPONSE)
+#define MBOX_TOS_RESP_MASK (GFX_CMD_RESPONSE_MASK | GFX_CMD_STATUS_MASK)
+
extern const struct attribute_group amdgpu_flash_attr_group;
enum psp_shared_mem_size {
@@ -80,6 +103,7 @@ enum psp_bootloader_cmd {
PSP_BL__DRAM_LONG_TRAIN = 0x100000,
PSP_BL__DRAM_SHORT_TRAIN = 0x200000,
PSP_BL__LOAD_TOS_SPL_TABLE = 0x10000000,
+ PSP_BL__LOAD_SPDMDRV = 0x20000000,
};
enum psp_ring_type {
@@ -106,9 +130,13 @@ enum psp_reg_prog_id {
PSP_REG_IH_RB_CNTL = 0, /* register IH_RB_CNTL */
PSP_REG_IH_RB_CNTL_RING1 = 1, /* register IH_RB_CNTL_RING1 */
PSP_REG_IH_RB_CNTL_RING2 = 2, /* register IH_RB_CNTL_RING2 */
+ PSP_REG_MMHUB_L1_TLB_CNTL = 25,
PSP_REG_LAST
};
+#define PSP_WAITREG_CHANGED BIT(0) /* check if the value has changed */
+#define PSP_WAITREG_NOVERBOSE BIT(1) /* No error verbose */
+
struct psp_funcs {
int (*init_microcode)(struct psp_context *psp);
int (*wait_for_bootloader)(struct psp_context *psp);
@@ -120,6 +148,7 @@ struct psp_funcs {
int (*bootloader_load_dbg_drv)(struct psp_context *psp);
int (*bootloader_load_ras_drv)(struct psp_context *psp);
int (*bootloader_load_ipkeymgr_drv)(struct psp_context *psp);
+ int (*bootloader_load_spdm_drv)(struct psp_context *psp);
int (*bootloader_load_sos)(struct psp_context *psp);
int (*ring_create)(struct psp_context *psp,
enum psp_ring_type ring_type);
@@ -135,10 +164,14 @@ struct psp_funcs {
int (*load_usbc_pd_fw)(struct psp_context *psp, uint64_t fw_pri_mc_addr);
int (*read_usbc_pd_fw)(struct psp_context *psp, uint32_t *fw_ver);
int (*update_spirom)(struct psp_context *psp, uint64_t fw_pri_mc_addr);
+ int (*dump_spirom)(struct psp_context *psp, uint64_t fw_pri_mc_addr);
int (*vbflash_stat)(struct psp_context *psp);
int (*fatal_error_recovery_quirk)(struct psp_context *psp);
bool (*get_ras_capability)(struct psp_context *psp);
bool (*is_aux_sos_load_required)(struct psp_context *psp);
+ bool (*is_reload_needed)(struct psp_context *psp);
+ int (*reg_program_no_ring)(struct psp_context *psp, uint32_t val,
+ enum psp_reg_prog_id id);
};
struct ta_funcs {
@@ -316,6 +349,14 @@ struct psp_runtime_scpm_entry {
enum psp_runtime_scpm_authentication scpm_status;
};
+#if defined(CONFIG_DEBUG_FS)
+struct spirom_bo {
+ struct amdgpu_bo *bo;
+ uint64_t mc_addr;
+ void *cpu_addr;
+};
+#endif
+
struct psp_context {
struct amdgpu_device *adev;
struct psp_ring km_ring;
@@ -342,6 +383,7 @@ struct psp_context {
struct psp_bin_desc dbg_drv;
struct psp_bin_desc ras_drv;
struct psp_bin_desc ipkeymgr_drv;
+ struct psp_bin_desc spdm_drv;
/* tmr buffer */
struct amdgpu_bo *tmr_bo;
@@ -402,6 +444,9 @@ struct psp_context {
char *vbflash_tmp_buf;
size_t vbflash_image_size;
bool vbflash_done;
+#if defined(CONFIG_DEBUG_FS)
+ struct spirom_bo *spirom_dump_trip;
+#endif
};
struct amdgpu_psp_funcs {
@@ -433,6 +478,9 @@ struct amdgpu_psp_funcs {
#define psp_bootloader_load_ipkeymgr_drv(psp) \
((psp)->funcs->bootloader_load_ipkeymgr_drv ? \
(psp)->funcs->bootloader_load_ipkeymgr_drv((psp)) : 0)
+#define psp_bootloader_load_spdm_drv(psp) \
+ ((psp)->funcs->bootloader_load_spdm_drv ? \
+ (psp)->funcs->bootloader_load_spdm_drv((psp)) : 0)
#define psp_bootloader_load_sos(psp) \
((psp)->funcs->bootloader_load_sos ? (psp)->funcs->bootloader_load_sos((psp)) : 0)
#define psp_smu_reload_quirk(psp) \
@@ -457,6 +505,10 @@ struct amdgpu_psp_funcs {
((psp)->funcs->update_spirom ? \
(psp)->funcs->update_spirom((psp), fw_pri_mc_addr) : -EINVAL)
+#define psp_dump_spirom(psp, fw_pri_mc_addr) \
+ ((psp)->funcs->dump_spirom ? \
+ (psp)->funcs->dump_spirom((psp), fw_pri_mc_addr) : -EINVAL)
+
#define psp_vbflash_status(psp) \
((psp)->funcs->vbflash_stat ? \
(psp)->funcs->vbflash_stat((psp)) : -EINVAL)
@@ -468,6 +520,10 @@ struct amdgpu_psp_funcs {
#define psp_is_aux_sos_load_required(psp) \
((psp)->funcs->is_aux_sos_load_required ? (psp)->funcs->is_aux_sos_load_required((psp)) : 0)
+#define psp_reg_program_no_ring(psp, val, id) \
+ ((psp)->funcs->reg_program_no_ring ? \
+ (psp)->funcs->reg_program_no_ring((psp), val, id) : -EINVAL)
+
extern const struct amd_ip_funcs psp_ip_funcs;
extern const struct amdgpu_ip_block_version psp_v3_1_ip_block;
@@ -479,8 +535,8 @@ extern const struct amdgpu_ip_block_version psp_v13_0_ip_block;
extern const struct amdgpu_ip_block_version psp_v13_0_4_ip_block;
extern const struct amdgpu_ip_block_version psp_v14_0_ip_block;
-extern int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
- uint32_t field_val, uint32_t mask, bool check_changed);
+int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
+ uint32_t field_val, uint32_t mask, uint32_t flags);
extern int psp_wait_for_spirom_update(struct psp_context *psp, uint32_t reg_index,
uint32_t field_val, uint32_t mask, uint32_t msec_timeout);
@@ -546,15 +602,25 @@ int psp_init_cap_microcode(struct psp_context *psp,
const char *chip_name);
int psp_get_fw_attestation_records_addr(struct psp_context *psp,
uint64_t *output_ptr);
-
+int psp_update_fw_reservation(struct psp_context *psp);
int psp_load_fw_list(struct psp_context *psp,
struct amdgpu_firmware_info **ucode_list, int ucode_count);
void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size);
int psp_spatial_partition(struct psp_context *psp, int mode);
+int psp_memory_partition(struct psp_context *psp, int mode);
int is_psp_fw_valid(struct psp_bin_desc bin);
int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev);
bool amdgpu_psp_get_ras_capability(struct psp_context *psp);
+
+int psp_config_sq_perfmon(struct psp_context *psp, uint32_t xcp_id,
+ bool core_override_enable, bool reg_override_enable, bool perfmon_override_enable);
+bool amdgpu_psp_tos_reload_needed(struct amdgpu_device *adev);
+int amdgpu_psp_reg_program_no_ring(struct psp_context *psp, uint32_t val,
+ enum psp_reg_prog_id id);
+void amdgpu_psp_debugfs_init(struct amdgpu_device *adev);
+
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
index 38face981c3e..6e8aad91bcd3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
@@ -171,13 +171,9 @@ static ssize_t ta_if_load_debugfs_write(struct file *fp, const char *buf, size_t
copy_pos += sizeof(uint32_t);
- ta_bin = kzalloc(ta_bin_len, GFP_KERNEL);
- if (!ta_bin)
- return -ENOMEM;
- if (copy_from_user((void *)ta_bin, &buf[copy_pos], ta_bin_len)) {
- ret = -EFAULT;
- goto err_free_bin;
- }
+ ta_bin = memdup_user(&buf[copy_pos], ta_bin_len);
+ if (IS_ERR(ta_bin))
+ return PTR_ERR(ta_bin);
/* Set TA context and functions */
set_ta_context_funcs(psp, ta_type, &context);
@@ -327,13 +323,9 @@ static ssize_t ta_if_invoke_debugfs_write(struct file *fp, const char *buf, size
return -EFAULT;
copy_pos += sizeof(uint32_t);
- shared_buf = kzalloc(shared_buf_len, GFP_KERNEL);
- if (!shared_buf)
- return -ENOMEM;
- if (copy_from_user((void *)shared_buf, &buf[copy_pos], shared_buf_len)) {
- ret = -EFAULT;
- goto err_free_shared_buf;
- }
+ shared_buf = memdup_user(&buf[copy_pos], shared_buf_len);
+ if (IS_ERR(shared_buf))
+ return PTR_ERR(shared_buf);
set_ta_context_funcs(psp, ta_type, &context);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 1a1395c5fff1..e0ee21150860 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -36,6 +36,7 @@
#include "amdgpu_xgmi.h"
#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
#include "nbio_v4_3.h"
+#include "nbif_v6_3_1.h"
#include "nbio_v7_9.h"
#include "atom.h"
#include "amdgpu_reset.h"
@@ -76,6 +77,7 @@ const char *ras_block_string[] = {
"jpeg",
"ih",
"mpio",
+ "mmsch",
};
const char *ras_mca_block_string[] = {
@@ -120,12 +122,15 @@ const char *get_ras_block_str(struct ras_common_if *ras_block)
/* typical ECC bad page rate is 1 bad page per 100MB VRAM */
#define RAS_BAD_PAGE_COVER (100 * 1024 * 1024ULL)
-#define MAX_UMC_POISON_POLLING_TIME_ASYNC 300 //ms
+#define MAX_UMC_POISON_POLLING_TIME_ASYNC 10
#define AMDGPU_RAS_RETIRE_PAGE_INTERVAL 100 //ms
#define MAX_FLUSH_RETIRE_DWORK_TIMES 100
+#define BYPASS_ALLOCATED_ADDRESS 0x0
+#define BYPASS_INITIALIZATION_ADDRESS 0x1
+
enum amdgpu_ras_retire_page_reservation {
AMDGPU_RAS_RETIRE_PAGE_RESERVED,
AMDGPU_RAS_RETIRE_PAGE_PENDING,
@@ -134,10 +139,14 @@ enum amdgpu_ras_retire_page_reservation {
atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0);
-static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
+static int amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
uint64_t addr);
-static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
+static int amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
uint64_t addr);
+
+static void amdgpu_ras_critical_region_init(struct amdgpu_device *adev);
+static void amdgpu_ras_critical_region_fini(struct amdgpu_device *adev);
+
#ifdef CONFIG_X86_MCE_AMD
static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev);
struct mce_notifier_adev_list {
@@ -167,18 +176,16 @@ static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t addre
struct eeprom_table_record err_rec;
int ret;
- if ((address >= adev->gmc.mc_vram_size) ||
- (address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
+ ret = amdgpu_ras_check_bad_page(adev, address);
+ if (ret == -EINVAL) {
dev_warn(adev->dev,
- "RAS WARN: input address 0x%llx is invalid.\n",
- address);
+ "RAS WARN: input address 0x%llx is invalid.\n",
+ address);
return -EINVAL;
- }
-
- if (amdgpu_ras_check_bad_page(adev, address)) {
+ } else if (ret == 1) {
dev_warn(adev->dev,
- "RAS WARN: 0x%llx has already been marked as bad page!\n",
- address);
+ "RAS WARN: 0x%llx has already been marked as bad page!\n",
+ address);
return 0;
}
@@ -192,7 +199,7 @@ static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t addre
if (amdgpu_bad_page_threshold != 0) {
amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
- err_data.err_addr_cnt);
+ err_data.err_addr_cnt, false);
amdgpu_ras_save_bad_pages(adev, NULL);
}
@@ -205,6 +212,56 @@ static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t addre
return 0;
}
+static int amdgpu_check_address_validity(struct amdgpu_device *adev,
+ uint64_t address, uint64_t flags)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct amdgpu_vram_block_info blk_info;
+ uint64_t page_pfns[32] = {0};
+ int i, ret, count;
+ bool hit = false;
+
+ if (amdgpu_ip_version(adev, UMC_HWIP, 0) < IP_VERSION(12, 0, 0))
+ return 0;
+
+ if (amdgpu_sriov_vf(adev)) {
+ if (amdgpu_virt_check_vf_critical_region(adev, address, &hit))
+ return -EPERM;
+ return hit ? -EACCES : 0;
+ }
+
+ if ((address >= adev->gmc.mc_vram_size) ||
+ (address >= RAS_UMC_INJECT_ADDR_LIMIT))
+ return -EFAULT;
+
+ count = amdgpu_umc_lookup_bad_pages_in_a_row(adev,
+ address, page_pfns, ARRAY_SIZE(page_pfns));
+ if (count <= 0)
+ return -EPERM;
+
+ for (i = 0; i < count; i++) {
+ memset(&blk_info, 0, sizeof(blk_info));
+ ret = amdgpu_vram_mgr_query_address_block_info(&adev->mman.vram_mgr,
+ page_pfns[i] << AMDGPU_GPU_PAGE_SHIFT, &blk_info);
+ if (!ret) {
+ /* The input address that needs to be checked is allocated by
+ * current calling process, so it is necessary to exclude
+ * the calling process.
+ */
+ if ((flags == BYPASS_ALLOCATED_ADDRESS) &&
+ ((blk_info.task.pid != task_pid_nr(current)) ||
+ strncmp(blk_info.task.comm, current->comm, TASK_COMM_LEN)))
+ return -EACCES;
+ else if ((flags == BYPASS_INITIALIZATION_ADDRESS) &&
+ (blk_info.task.pid == con->init_task_pid) &&
+ !strncmp(blk_info.task.comm, con->init_task_comm, TASK_COMM_LEN))
+ return -EACCES;
+ }
+ }
+
+ return 0;
+}
+
static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
@@ -295,6 +352,8 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
op = 2;
else if (strstr(str, "retire_page") != NULL)
op = 3;
+ else if (strstr(str, "check_address") != NULL)
+ op = 4;
else if (str[0] && str[1] && str[2] && str[3])
/* ascii string, but commands are not matched. */
return -EINVAL;
@@ -309,6 +368,15 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
data->inject.address = address;
return 0;
+ } else if (op == 4) {
+ if (sscanf(str, "%*s 0x%llx 0x%llx", &address, &value) != 2 &&
+ sscanf(str, "%*s %llu %llu", &address, &value) != 2)
+ return -EINVAL;
+
+ data->op = op;
+ data->inject.address = address;
+ data->inject.value = value;
+ return 0;
}
if (amdgpu_ras_find_block_id_by_name(block_name, &block_id))
@@ -498,6 +566,9 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f,
return size;
else
return ret;
+ } else if (data.op == 4) {
+ ret = amdgpu_check_address_validity(adev, data.inject.address, data.inject.value);
+ return ret ? ret : size;
}
if (!amdgpu_ras_is_supported(adev, data.head.block))
@@ -511,22 +582,16 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f,
ret = amdgpu_ras_feature_enable(adev, &data.head, 1);
break;
case 2:
- if ((data.inject.address >= adev->gmc.mc_vram_size &&
- adev->gmc.mc_vram_size) ||
- (data.inject.address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
- dev_warn(adev->dev, "RAS WARN: input address "
- "0x%llx is invalid.",
+ /* umc ce/ue error injection for a bad page is not allowed */
+ if (data.head.block == AMDGPU_RAS_BLOCK__UMC)
+ ret = amdgpu_ras_check_bad_page(adev, data.inject.address);
+ if (ret == -EINVAL) {
+ dev_warn(adev->dev, "RAS WARN: input address 0x%llx is invalid.",
data.inject.address);
- ret = -EINVAL;
break;
- }
-
- /* umc ce/ue error injection for a bad page is not allowed */
- if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) &&
- amdgpu_ras_check_bad_page(adev, data.inject.address)) {
- dev_warn(adev->dev, "RAS WARN: inject: 0x%llx has "
- "already been marked as bad!\n",
- data.inject.address);
+ } else if (ret == 1) {
+ dev_warn(adev->dev, "RAS WARN: inject: 0x%llx has already been marked as bad!\n",
+ data.inject.address);
break;
}
@@ -1105,6 +1170,9 @@ static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev,
err_info->de_count, blk_name);
}
} else {
+ if (adev->debug_disable_ce_logs)
+ return;
+
for_each_ras_error(err_node, err_data) {
err_info = &err_node->err_info;
mcm_info = &err_info->mcm_info;
@@ -1214,6 +1282,42 @@ static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev,
}
}
+static void amdgpu_ras_virt_error_generate_report(struct amdgpu_device *adev,
+ struct ras_query_if *query_if,
+ struct ras_err_data *err_data,
+ struct ras_query_context *qctx)
+{
+ unsigned long new_ue, new_ce, new_de;
+ struct ras_manager *obj = amdgpu_ras_find_obj(adev, &query_if->head);
+ const char *blk_name = get_ras_block_str(&query_if->head);
+ u64 event_id = qctx->evid.event_id;
+
+ new_ce = err_data->ce_count - obj->err_data.ce_count;
+ new_ue = err_data->ue_count - obj->err_data.ue_count;
+ new_de = err_data->de_count - obj->err_data.de_count;
+
+ if (new_ce) {
+ RAS_EVENT_LOG(adev, event_id, "%lu correctable hardware errors "
+ "detected in %s block\n",
+ new_ce,
+ blk_name);
+ }
+
+ if (new_ue) {
+ RAS_EVENT_LOG(adev, event_id, "%lu uncorrectable hardware errors "
+ "detected in %s block\n",
+ new_ue,
+ blk_name);
+ }
+
+ if (new_de) {
+ RAS_EVENT_LOG(adev, event_id, "%lu deferred hardware errors "
+ "detected in %s block\n",
+ new_de,
+ blk_name);
+ }
+}
+
static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, struct ras_err_data *err_data)
{
struct ras_err_node *err_node;
@@ -1237,6 +1341,15 @@ static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, s
}
}
+static void amdgpu_ras_mgr_virt_error_data_statistics_update(struct ras_manager *obj,
+ struct ras_err_data *err_data)
+{
+ /* Host reports absolute counts */
+ obj->err_data.ue_count = err_data->ue_count;
+ obj->err_data.ce_count = err_data->ce_count;
+ obj->err_data.de_count = err_data->de_count;
+}
+
static struct ras_manager *get_ras_manager(struct amdgpu_device *adev, enum amdgpu_ras_block blk)
{
struct ras_common_if head;
@@ -1253,7 +1366,7 @@ int amdgpu_ras_bind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
struct ras_manager *obj;
/* in resume phase, no need to create aca fs node */
- if (adev->in_suspend || amdgpu_in_reset(adev))
+ if (adev->in_suspend || amdgpu_reset_in_recovery(adev))
return 0;
obj = get_ras_manager(adev, blk);
@@ -1323,7 +1436,9 @@ static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev,
if (error_query_mode == AMDGPU_RAS_INVALID_ERROR_QUERY)
return -EINVAL;
- if (error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY) {
+ if (error_query_mode == AMDGPU_RAS_VIRT_ERROR_COUNT_QUERY) {
+ return amdgpu_virt_req_ras_err_count(adev, blk, err_data);
+ } else if (error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY) {
if (info->head.block == AMDGPU_RAS_BLOCK__UMC) {
amdgpu_ras_get_ecc_info(adev, err_data);
} else {
@@ -1405,14 +1520,22 @@ static int amdgpu_ras_query_error_status_with_event(struct amdgpu_device *adev,
if (ret)
goto out_fini_err_data;
- amdgpu_rasmgr_error_data_statistic_update(obj, &err_data);
+ if (error_query_mode != AMDGPU_RAS_VIRT_ERROR_COUNT_QUERY) {
+ amdgpu_rasmgr_error_data_statistic_update(obj, &err_data);
+ amdgpu_ras_error_generate_report(adev, info, &err_data, &qctx);
+ } else {
+ /* Host provides absolute error counts. First generate the report
+ * using the previous VF internal count against new host count.
+ * Then Update VF internal count.
+ */
+ amdgpu_ras_virt_error_generate_report(adev, info, &err_data, &qctx);
+ amdgpu_ras_mgr_virt_error_data_statistics_update(obj, &err_data);
+ }
info->ue_count = obj->err_data.ue_count;
info->ce_count = obj->err_data.ce_count;
info->de_count = obj->err_data.de_count;
- amdgpu_ras_error_generate_report(adev, info, &err_data, &qctx);
-
out_fini_err_data:
amdgpu_ras_error_data_fini(&err_data);
@@ -1441,6 +1564,9 @@ int amdgpu_ras_reset_error_count(struct amdgpu_device *adev,
!amdgpu_ras_get_aca_debug_mode(adev))
return -EOPNOTSUPP;
+ if (amdgpu_sriov_vf(adev))
+ return -EOPNOTSUPP;
+
/* skip ras error reset in gpu reset */
if ((amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev)) &&
((smu_funcs && smu_funcs->set_debug_mode) ||
@@ -1677,7 +1803,7 @@ static char *amdgpu_ras_badpage_flags_str(unsigned int flags)
*/
static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f,
- struct kobject *kobj, struct bin_attribute *attr,
+ struct kobject *kobj, const struct bin_attribute *attr,
char *buf, loff_t ppos, size_t count)
{
struct amdgpu_ras *con =
@@ -1808,6 +1934,9 @@ int amdgpu_ras_sysfs_create(struct amdgpu_device *adev,
if (!obj || obj->attr_inuse)
return -EINVAL;
+ if (amdgpu_sriov_vf(adev) && !amdgpu_virt_ras_telemetry_block_en(adev, head->block))
+ return 0;
+
get_obj(obj);
snprintf(obj->fs_data.sysfs_name, sizeof(obj->fs_data.sysfs_name),
@@ -1960,6 +2089,7 @@ static bool amdgpu_ras_aca_is_supported(struct amdgpu_device *adev)
switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 12):
case IP_VERSION(13, 0, 14):
ret = true;
break;
@@ -2008,8 +2138,8 @@ void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
/* debugfs end */
/* ras fs */
-static BIN_ATTR(gpu_vram_bad_pages, S_IRUGO,
- amdgpu_ras_sysfs_badpages_read, NULL, 0);
+static const BIN_ATTR(gpu_vram_bad_pages, S_IRUGO,
+ amdgpu_ras_sysfs_badpages_read, NULL, 0);
static DEVICE_ATTR(features, S_IRUGO,
amdgpu_ras_sysfs_features_read, NULL);
static DEVICE_ATTR(version, 0444,
@@ -2031,7 +2161,7 @@ static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
&con->event_state_attr.attr,
NULL
};
- struct bin_attribute *bin_attrs[] = {
+ const struct bin_attribute *bin_attrs[] = {
NULL,
NULL,
};
@@ -2057,11 +2187,10 @@ static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
if (amdgpu_bad_page_threshold != 0) {
/* add bad_page_features entry */
- bin_attr_gpu_vram_bad_pages.private = NULL;
con->badpages_attr = bin_attr_gpu_vram_bad_pages;
+ sysfs_bin_attr_init(&con->badpages_attr);
bin_attrs[0] = &con->badpages_attr;
group.bin_attrs = bin_attrs;
- sysfs_bin_attr_init(bin_attrs[0]);
}
r = sysfs_create_group(&adev->dev->kobj, &group);
@@ -2101,6 +2230,16 @@ void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev)
/* Fatal error events are handled on host side */
if (amdgpu_sriov_vf(adev))
return;
+ /*
+ * If the current interrupt is caused by a non-fatal RAS error, skip
+ * check for fatal error. For fatal errors, FED status of all devices
+ * in XGMI hive gets set when the first device gets fatal error
+ * interrupt. The error gets propagated to other devices as well, so
+ * make sure to ack the interrupt regardless of FED status.
+ */
+ if (!amdgpu_ras_get_fed_status(adev) &&
+ amdgpu_ras_is_err_state(adev, AMDGPU_RAS_BLOCK__ANY))
+ return;
if (adev->nbio.ras &&
adev->nbio.ras->handle_ras_controller_intr_no_bifring)
@@ -2130,6 +2269,7 @@ static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *
if (ret)
return;
+ amdgpu_ras_set_err_poison(adev, block_obj->ras_comm.block);
/* both query_poison_status and handle_poison_consumption are optional,
* but at least one of them should be implemented if we need poison
* consumption handler
@@ -2489,18 +2629,26 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
goto out;
}
- *bps = kmalloc(sizeof(struct ras_badpage) * data->count, GFP_KERNEL);
+ *bps = kmalloc_array(data->count, sizeof(struct ras_badpage), GFP_KERNEL);
if (!*bps) {
ret = -ENOMEM;
goto out;
}
for (; i < data->count; i++) {
+ if (!data->bps[i].ts)
+ continue;
+
(*bps)[i] = (struct ras_badpage){
.bp = data->bps[i].retired_page,
.size = AMDGPU_GPU_PAGE_SIZE,
.flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
};
+
+ if (amdgpu_ras_check_critical_address(adev,
+ data->bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT))
+ continue;
+
status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr,
data->bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT);
if (status == -EBUSY)
@@ -2509,7 +2657,7 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
(*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
}
- *count = data->count;
+ *count = con->bad_page_num;
out:
mutex_unlock(&con->recovery_lock);
return ret;
@@ -2561,6 +2709,7 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
struct amdgpu_device *adev = ras->adev;
struct list_head device_list, *device_list_handle = NULL;
struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
+ unsigned int error_query_mode;
enum ras_event_type type;
if (hive) {
@@ -2589,6 +2738,13 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
device_list_handle = &device_list;
}
+ if (amdgpu_ras_get_error_query_mode(adev, &error_query_mode)) {
+ if (error_query_mode == AMDGPU_RAS_FIRMWARE_ERROR_QUERY) {
+ /* wait 500ms to ensure pmfw polling mca bank info done */
+ msleep(500);
+ }
+ }
+
type = amdgpu_ras_get_fatal_error_event(adev);
list_for_each_entry(remote_adev,
device_list_handle, gmc.xgmi.head) {
@@ -2605,6 +2761,7 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
reset_context.method = AMD_RESET_METHOD_NONE;
reset_context.reset_req_dev = adev;
reset_context.src = AMDGPU_RESET_SRC_RAS;
+ set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);
/* Perform full reset in fatal error mode */
if (!amdgpu_ras_is_poison_mode_supported(ras->adev))
@@ -2644,7 +2801,7 @@ static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
unsigned int old_space = data->count + data->space_left;
unsigned int new_space = old_space + pages;
unsigned int align_space = ALIGN(new_space, 512);
- void *bps = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL);
+ void *bps = kmalloc_array(align_space, sizeof(*data->bps), GFP_KERNEL);
if (!bps) {
return -ENOMEM;
@@ -2661,41 +2818,269 @@ static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
return 0;
}
-/* it deal with vram only. */
-int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
- struct eeprom_table_record *bps, int pages)
+static int amdgpu_ras_mca2pa_by_idx(struct amdgpu_device *adev,
+ struct eeprom_table_record *bps,
+ struct ras_err_data *err_data)
{
- struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
- struct ras_err_handler_data *data;
+ struct ta_ras_query_address_input addr_in;
+ uint32_t socket = 0;
int ret = 0;
- uint32_t i;
- if (!con || !con->eh_data || !bps || pages <= 0)
- return 0;
+ if (adev->smuio.funcs && adev->smuio.funcs->get_socket_id)
+ socket = adev->smuio.funcs->get_socket_id(adev);
- mutex_lock(&con->recovery_lock);
- data = con->eh_data;
- if (!data)
- goto out;
+ /* reinit err_data */
+ err_data->err_addr_cnt = 0;
+ err_data->err_addr_len = adev->umc.retire_unit;
+
+ memset(&addr_in, 0, sizeof(addr_in));
+ addr_in.ma.err_addr = bps->address;
+ addr_in.ma.socket_id = socket;
+ addr_in.ma.ch_inst = bps->mem_channel;
+ /* tell RAS TA the node instance is not used */
+ addr_in.ma.node_inst = TA_RAS_INV_NODE;
+
+ if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr)
+ ret = adev->umc.ras->convert_ras_err_addr(adev, err_data,
+ &addr_in, NULL, false);
+
+ return ret;
+}
+
+static int amdgpu_ras_mca2pa(struct amdgpu_device *adev,
+ struct eeprom_table_record *bps,
+ struct ras_err_data *err_data)
+{
+ struct ta_ras_query_address_input addr_in;
+ uint32_t die_id, socket = 0;
+
+ if (adev->smuio.funcs && adev->smuio.funcs->get_socket_id)
+ socket = adev->smuio.funcs->get_socket_id(adev);
+
+ /* although die id is gotten from PA in nps1 mode, the id is
+ * fitable for any nps mode
+ */
+ if (adev->umc.ras && adev->umc.ras->get_die_id_from_pa)
+ die_id = adev->umc.ras->get_die_id_from_pa(adev, bps->address,
+ bps->retired_page << AMDGPU_GPU_PAGE_SHIFT);
+ else
+ return -EINVAL;
+
+ /* reinit err_data */
+ err_data->err_addr_cnt = 0;
+ err_data->err_addr_len = adev->umc.retire_unit;
- for (i = 0; i < pages; i++) {
+ memset(&addr_in, 0, sizeof(addr_in));
+ addr_in.ma.err_addr = bps->address;
+ addr_in.ma.ch_inst = bps->mem_channel;
+ addr_in.ma.umc_inst = bps->mcumc_id;
+ addr_in.ma.node_inst = die_id;
+ addr_in.ma.socket_id = socket;
+
+ if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr)
+ return adev->umc.ras->convert_ras_err_addr(adev, err_data,
+ &addr_in, NULL, false);
+ else
+ return -EINVAL;
+}
+
+static int __amdgpu_ras_restore_bad_pages(struct amdgpu_device *adev,
+ struct eeprom_table_record *bps, int count)
+{
+ int j;
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_err_handler_data *data = con->eh_data;
+
+ for (j = 0; j < count; j++) {
if (amdgpu_ras_check_bad_page_unlock(con,
- bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT))
+ bps[j].retired_page << AMDGPU_GPU_PAGE_SHIFT)) {
+ data->count++;
+ data->space_left--;
continue;
+ }
if (!data->space_left &&
- amdgpu_ras_realloc_eh_data_space(adev, data, 256)) {
- ret = -ENOMEM;
- goto out;
+ amdgpu_ras_realloc_eh_data_space(adev, data, 256)) {
+ return -ENOMEM;
}
- amdgpu_ras_reserve_page(adev, bps[i].retired_page);
+ amdgpu_ras_reserve_page(adev, bps[j].retired_page);
- memcpy(&data->bps[data->count], &bps[i], sizeof(*data->bps));
+ memcpy(&data->bps[data->count], &(bps[j]),
+ sizeof(struct eeprom_table_record));
data->count++;
data->space_left--;
+ con->bad_page_num++;
}
+
+ return 0;
+}
+
+static int __amdgpu_ras_convert_rec_array_from_rom(struct amdgpu_device *adev,
+ struct eeprom_table_record *bps, struct ras_err_data *err_data,
+ enum amdgpu_memory_partition nps)
+{
+ int i = 0;
+ enum amdgpu_memory_partition save_nps;
+
+ save_nps = (bps[0].retired_page >> UMC_NPS_SHIFT) & UMC_NPS_MASK;
+
+ /*old asics just have pa in eeprom*/
+ if (IP_VERSION_MAJ(amdgpu_ip_version(adev, UMC_HWIP, 0)) < 12) {
+ memcpy(err_data->err_addr, bps,
+ sizeof(struct eeprom_table_record) * adev->umc.retire_unit);
+ goto out;
+ }
+
+ for (i = 0; i < adev->umc.retire_unit; i++)
+ bps[i].retired_page &= ~(UMC_NPS_MASK << UMC_NPS_SHIFT);
+
+ if (save_nps) {
+ if (save_nps == nps) {
+ if (amdgpu_umc_pages_in_a_row(adev, err_data,
+ bps[0].retired_page << AMDGPU_GPU_PAGE_SHIFT))
+ return -EINVAL;
+ for (i = 0; i < adev->umc.retire_unit; i++) {
+ err_data->err_addr[i].address = bps[0].address;
+ err_data->err_addr[i].mem_channel = bps[0].mem_channel;
+ err_data->err_addr[i].bank = bps[0].bank;
+ err_data->err_addr[i].err_type = bps[0].err_type;
+ err_data->err_addr[i].mcumc_id = bps[0].mcumc_id;
+ }
+ } else {
+ if (amdgpu_ras_mca2pa_by_idx(adev, &bps[0], err_data))
+ return -EINVAL;
+ }
+ } else {
+ if (bps[0].address == 0) {
+ /* for specific old eeprom data, mca address is not stored,
+ * calc it from pa
+ */
+ if (amdgpu_umc_pa2mca(adev, bps[0].retired_page << AMDGPU_GPU_PAGE_SHIFT,
+ &(bps[0].address), AMDGPU_NPS1_PARTITION_MODE))
+ return -EINVAL;
+ }
+
+ if (amdgpu_ras_mca2pa(adev, &bps[0], err_data)) {
+ if (nps == AMDGPU_NPS1_PARTITION_MODE)
+ memcpy(err_data->err_addr, bps,
+ sizeof(struct eeprom_table_record) * adev->umc.retire_unit);
+ else
+ return -EOPNOTSUPP;
+ }
+ }
+
out:
+ return __amdgpu_ras_restore_bad_pages(adev, err_data->err_addr, adev->umc.retire_unit);
+}
+
+static int __amdgpu_ras_convert_rec_from_rom(struct amdgpu_device *adev,
+ struct eeprom_table_record *bps, struct ras_err_data *err_data,
+ enum amdgpu_memory_partition nps)
+{
+ int i = 0;
+ enum amdgpu_memory_partition save_nps;
+
+ save_nps = (bps->retired_page >> UMC_NPS_SHIFT) & UMC_NPS_MASK;
+ bps->retired_page &= ~(UMC_NPS_MASK << UMC_NPS_SHIFT);
+
+ if (save_nps == nps) {
+ if (amdgpu_umc_pages_in_a_row(adev, err_data,
+ bps->retired_page << AMDGPU_GPU_PAGE_SHIFT))
+ return -EINVAL;
+ for (i = 0; i < adev->umc.retire_unit; i++) {
+ err_data->err_addr[i].address = bps->address;
+ err_data->err_addr[i].mem_channel = bps->mem_channel;
+ err_data->err_addr[i].bank = bps->bank;
+ err_data->err_addr[i].err_type = bps->err_type;
+ err_data->err_addr[i].mcumc_id = bps->mcumc_id;
+ }
+ } else {
+ if (bps->address) {
+ if (amdgpu_ras_mca2pa_by_idx(adev, bps, err_data))
+ return -EINVAL;
+ } else {
+ /* for specific old eeprom data, mca address is not stored,
+ * calc it from pa
+ */
+ if (amdgpu_umc_pa2mca(adev, bps->retired_page << AMDGPU_GPU_PAGE_SHIFT,
+ &(bps->address), AMDGPU_NPS1_PARTITION_MODE))
+ return -EINVAL;
+
+ if (amdgpu_ras_mca2pa(adev, bps, err_data))
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return __amdgpu_ras_restore_bad_pages(adev, err_data->err_addr,
+ adev->umc.retire_unit);
+}
+
+/* it deal with vram only. */
+int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
+ struct eeprom_table_record *bps, int pages, bool from_rom)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_err_data err_data;
+ struct amdgpu_ras_eeprom_control *control =
+ &adev->psp.ras_context.ras->eeprom_control;
+ enum amdgpu_memory_partition nps = AMDGPU_NPS1_PARTITION_MODE;
+ int ret = 0;
+ uint32_t i = 0;
+
+ if (!con || !con->eh_data || !bps || pages <= 0)
+ return 0;
+
+ if (from_rom) {
+ err_data.err_addr =
+ kcalloc(adev->umc.retire_unit,
+ sizeof(struct eeprom_table_record), GFP_KERNEL);
+ if (!err_data.err_addr) {
+ dev_warn(adev->dev, "Failed to alloc UMC error address record in mca2pa conversion!\n");
+ return -ENOMEM;
+ }
+
+ if (adev->gmc.gmc_funcs->query_mem_partition_mode)
+ nps = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
+ }
+
+ mutex_lock(&con->recovery_lock);
+
+ if (from_rom) {
+ /* there is no pa recs in V3, so skip pa recs processing */
+ if (control->tbl_hdr.version < RAS_TABLE_VER_V3) {
+ for (i = 0; i < pages; i++) {
+ if (control->ras_num_recs - i >= adev->umc.retire_unit) {
+ if ((bps[i].address == bps[i + 1].address) &&
+ (bps[i].mem_channel == bps[i + 1].mem_channel)) {
+ /* deal with retire_unit records a time */
+ ret = __amdgpu_ras_convert_rec_array_from_rom(adev,
+ &bps[i], &err_data, nps);
+ if (ret)
+ con->bad_page_num -= adev->umc.retire_unit;
+ i += (adev->umc.retire_unit - 1);
+ } else {
+ break;
+ }
+ } else {
+ break;
+ }
+ }
+ }
+ for (; i < pages; i++) {
+ ret = __amdgpu_ras_convert_rec_from_rom(adev,
+ &bps[i], &err_data, nps);
+ if (ret)
+ con->bad_page_num -= adev->umc.retire_unit;
+ }
+
+ con->eh_data->count_saved = con->eh_data->count;
+ } else {
+ ret = __amdgpu_ras_restore_bad_pages(adev, bps, pages);
+ }
+
+ if (from_rom)
+ kfree(err_data.err_addr);
mutex_unlock(&con->recovery_lock);
return ret;
@@ -2712,7 +3097,7 @@ int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_err_handler_data *data;
struct amdgpu_ras_eeprom_control *control;
- int save_count;
+ int save_count, unit_num, i;
if (!con || !con->eh_data) {
if (new_cnt)
@@ -2721,25 +3106,47 @@ int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
return 0;
}
+ if (!con->eeprom_control.is_eeprom_valid) {
+ dev_warn(adev->dev,
+ "Failed to save EEPROM table data because of EEPROM data corruption!");
+ if (new_cnt)
+ *new_cnt = 0;
+
+ return 0;
+ }
+
mutex_lock(&con->recovery_lock);
control = &con->eeprom_control;
data = con->eh_data;
- save_count = data->count - control->ras_num_recs;
+ unit_num = data->count / adev->umc.retire_unit - control->ras_num_recs;
+ save_count = con->bad_page_num - control->ras_num_bad_pages;
mutex_unlock(&con->recovery_lock);
if (new_cnt)
- *new_cnt = save_count / adev->umc.retire_unit;
+ *new_cnt = unit_num;
/* only new entries are saved */
- if (save_count > 0) {
- if (amdgpu_ras_eeprom_append(control,
- &data->bps[control->ras_num_recs],
- save_count)) {
- dev_err(adev->dev, "Failed to save EEPROM table data!");
- return -EIO;
+ if (unit_num > 0) {
+ /*old asics only save pa to eeprom like before*/
+ if (IP_VERSION_MAJ(amdgpu_ip_version(adev, UMC_HWIP, 0)) < 12) {
+ if (amdgpu_ras_eeprom_append(control,
+ &data->bps[data->count_saved], unit_num)) {
+ dev_err(adev->dev, "Failed to save EEPROM table data!");
+ return -EIO;
+ }
+ } else {
+ for (i = 0; i < unit_num; i++) {
+ if (amdgpu_ras_eeprom_append(control,
+ &data->bps[data->count_saved +
+ i * adev->umc.retire_unit], 1)) {
+ dev_err(adev->dev, "Failed to save EEPROM table data!");
+ return -EIO;
+ }
+ }
}
dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count);
+ data->count_saved = data->count;
}
return 0;
@@ -2754,7 +3161,7 @@ static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
struct amdgpu_ras_eeprom_control *control =
&adev->psp.ras_context.ras->eeprom_control;
struct eeprom_table_record *bps;
- int ret;
+ int ret, i = 0;
/* no bad page record, skip eeprom access */
if (control->ras_num_recs == 0 || amdgpu_bad_page_threshold == 0)
@@ -2765,27 +3172,71 @@ static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
return -ENOMEM;
ret = amdgpu_ras_eeprom_read(control, bps, control->ras_num_recs);
- if (ret)
+ if (ret) {
dev_err(adev->dev, "Failed to load EEPROM table records!");
- else
- ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs);
+ } else {
+ if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr) {
+ /*In V3, there is no pa recs, and some cases(when address==0) may be parsed
+ as pa recs, so add verion check to avoid it.
+ */
+ if (control->tbl_hdr.version < RAS_TABLE_VER_V3) {
+ for (i = 0; i < control->ras_num_recs; i++) {
+ if ((control->ras_num_recs - i) >= adev->umc.retire_unit) {
+ if ((bps[i].address == bps[i + 1].address) &&
+ (bps[i].mem_channel == bps[i + 1].mem_channel)) {
+ control->ras_num_pa_recs += adev->umc.retire_unit;
+ i += (adev->umc.retire_unit - 1);
+ } else {
+ control->ras_num_mca_recs +=
+ (control->ras_num_recs - i);
+ break;
+ }
+ } else {
+ control->ras_num_mca_recs += (control->ras_num_recs - i);
+ break;
+ }
+ }
+ } else {
+ control->ras_num_mca_recs = control->ras_num_recs;
+ }
+ }
+
+ ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs, true);
+ if (ret)
+ goto out;
+
+ ret = amdgpu_ras_eeprom_check(control);
+ if (ret)
+ goto out;
+
+ /* HW not usable */
+ if (amdgpu_ras_is_rma(adev))
+ ret = -EHWPOISON;
+ }
+out:
kfree(bps);
return ret;
}
-static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
+static int amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
uint64_t addr)
{
struct ras_err_handler_data *data = con->eh_data;
+ struct amdgpu_device *adev = con->adev;
int i;
+ if ((addr >= adev->gmc.mc_vram_size &&
+ adev->gmc.mc_vram_size) ||
+ (addr >= RAS_UMC_INJECT_ADDR_LIMIT))
+ return -EINVAL;
+
addr >>= AMDGPU_GPU_PAGE_SHIFT;
for (i = 0; i < data->count; i++)
if (addr == data->bps[i].retired_page)
- return true;
+ return 1;
- return false;
+ return 0;
}
/*
@@ -2793,11 +3244,11 @@ static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
*
* Note: this check is only for umc block
*/
-static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
+static int amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
uint64_t addr)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
- bool ret = false;
+ int ret = 0;
if (!con || !con->eh_data)
return ret;
@@ -2814,31 +3265,29 @@ static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev,
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
/*
- * Justification of value bad_page_cnt_threshold in ras structure
- *
- * Generally, 0 <= amdgpu_bad_page_threshold <= max record length
- * in eeprom or amdgpu_bad_page_threshold == -2, introduce two
- * scenarios accordingly.
- *
- * Bad page retirement enablement:
- * - If amdgpu_bad_page_threshold = -2,
- * bad_page_cnt_threshold = typical value by formula.
- *
- * - When the value from user is 0 < amdgpu_bad_page_threshold <
- * max record length in eeprom, use it directly.
- *
- * Bad page retirement disablement:
- * - If amdgpu_bad_page_threshold = 0, bad page retirement
- * functionality is disabled, and bad_page_cnt_threshold will
- * take no effect.
+ * amdgpu_bad_page_threshold is used to config
+ * the threshold for the number of bad pages.
+ * -1: Threshold is set to default value
+ * Driver will issue a warning message when threshold is reached
+ * and continue runtime services.
+ * 0: Disable bad page retirement
+ * Driver will not retire bad pages
+ * which is intended for debugging purpose.
+ * -2: Threshold is determined by a formula
+ * that assumes 1 bad page per 100M of local memory.
+ * Driver will continue runtime services when threhold is reached.
+ * 0 < threshold < max number of bad page records in EEPROM,
+ * A user-defined threshold is set
+ * Driver will halt runtime services when this custom threshold is reached.
*/
-
- if (amdgpu_bad_page_threshold < 0) {
+ if (amdgpu_bad_page_threshold == -2) {
u64 val = adev->gmc.mc_vram_size;
do_div(val, RAS_BAD_PAGE_COVER);
con->bad_page_cnt_threshold = min(lower_32_bits(val),
max_count);
+ } else if (amdgpu_bad_page_threshold == -1) {
+ con->bad_page_cnt_threshold = ((con->reserved_pages_in_bytes) >> 21) << 4;
} else {
con->bad_page_cnt_threshold = min_t(int, max_count,
amdgpu_bad_page_threshold);
@@ -2883,7 +3332,7 @@ static void amdgpu_ras_ecc_log_init(struct ras_ecc_log_info *ecc_log)
INIT_RADIX_TREE(&ecc_log->de_page_tree, GFP_KERNEL);
ecc_log->de_queried_count = 0;
- ecc_log->prev_de_queried_count = 0;
+ ecc_log->consumption_q_count = 0;
}
static void amdgpu_ras_ecc_log_fini(struct ras_ecc_log_info *ecc_log)
@@ -2903,7 +3352,7 @@ static void amdgpu_ras_ecc_log_fini(struct ras_ecc_log_info *ecc_log)
mutex_destroy(&ecc_log->lock);
ecc_log->de_queried_count = 0;
- ecc_log->prev_de_queried_count = 0;
+ ecc_log->consumption_q_count = 0;
}
static bool amdgpu_ras_schedule_retirement_dwork(struct amdgpu_ras *con,
@@ -2929,7 +3378,6 @@ static void amdgpu_ras_do_page_retirement(struct work_struct *work)
page_retirement_dwork.work);
struct amdgpu_device *adev = con->adev;
struct ras_err_data err_data;
- unsigned long err_cnt;
/* If gpu reset is ongoing, delay retiring the bad pages */
if (amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev)) {
@@ -2941,13 +3389,9 @@ static void amdgpu_ras_do_page_retirement(struct work_struct *work)
amdgpu_ras_error_data_init(&err_data);
amdgpu_umc_handle_bad_pages(adev, &err_data);
- err_cnt = err_data.err_addr_cnt;
amdgpu_ras_error_data_fini(&err_data);
- if (err_cnt && amdgpu_ras_is_rma(adev))
- amdgpu_ras_reset_gpu(adev);
-
amdgpu_ras_schedule_retirement_dwork(con,
AMDGPU_RAS_RETIRE_PAGE_INTERVAL);
}
@@ -2958,58 +3402,39 @@ static int amdgpu_ras_poison_creation_handler(struct amdgpu_device *adev,
int ret = 0;
struct ras_ecc_log_info *ecc_log;
struct ras_query_if info;
- uint32_t timeout = 0;
+ u32 timeout = MAX_UMC_POISON_POLLING_TIME_ASYNC;
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
- uint64_t de_queried_count;
- uint32_t new_detect_count, total_detect_count;
- uint32_t need_query_count = poison_creation_count;
- bool query_data_timeout = false;
+ u64 de_queried_count;
+ u64 consumption_q_count;
enum ras_event_type type = RAS_EVENT_TYPE_POISON_CREATION;
memset(&info, 0, sizeof(info));
info.head.block = AMDGPU_RAS_BLOCK__UMC;
ecc_log = &ras->umc_ecc_log;
- total_detect_count = 0;
+ ecc_log->de_queried_count = 0;
+ ecc_log->consumption_q_count = 0;
+
do {
ret = amdgpu_ras_query_error_status_with_event(adev, &info, type);
if (ret)
return ret;
de_queried_count = ecc_log->de_queried_count;
- if (de_queried_count > ecc_log->prev_de_queried_count) {
- new_detect_count = de_queried_count - ecc_log->prev_de_queried_count;
- ecc_log->prev_de_queried_count = de_queried_count;
- timeout = 0;
- } else {
- new_detect_count = 0;
- }
+ consumption_q_count = ecc_log->consumption_q_count;
- if (new_detect_count) {
- total_detect_count += new_detect_count;
- } else {
- if (!timeout && need_query_count)
- timeout = MAX_UMC_POISON_POLLING_TIME_ASYNC;
+ if (de_queried_count && consumption_q_count)
+ break;
- if (timeout) {
- if (!--timeout) {
- query_data_timeout = true;
- break;
- }
- msleep(1);
- }
- }
- } while (total_detect_count < need_query_count);
+ msleep(100);
+ } while (--timeout);
- if (query_data_timeout) {
- dev_warn(adev->dev, "Can't find deferred error! count: %u\n",
- (need_query_count - total_detect_count));
- return -ENOENT;
- }
-
- if (total_detect_count)
+ if (de_queried_count)
schedule_delayed_work(&ras->page_retirement_dwork, 0);
+ if (amdgpu_ras_is_rma(adev) && atomic_cmpxchg(&ras->rma_in_recovery, 0, 1) == 0)
+ amdgpu_ras_reset_gpu(adev);
+
return 0;
}
@@ -3045,6 +3470,12 @@ static int amdgpu_ras_poison_consumption_handler(struct amdgpu_device *adev,
reset_flags |= msg.reset;
}
+ /*
+ * Try to ensure poison creation handler is completed first
+ * to set rma if bad page exceed threshold.
+ */
+ flush_delayed_work(&con->page_retirement_dwork);
+
/* for RMA, amdgpu_ras_poison_creation_handler will trigger gpu reset */
if (reset_flags && !amdgpu_ras_is_rma(adev)) {
if (reset_flags & AMDGPU_RAS_GPU_RESET_MODE1_RESET)
@@ -3054,8 +3485,6 @@ static int amdgpu_ras_poison_consumption_handler(struct amdgpu_device *adev,
else
reset = reset_flags;
- flush_delayed_work(&con->page_retirement_dwork);
-
con->gpu_reset_flags |= reset;
amdgpu_ras_reset_gpu(adev);
@@ -3085,6 +3514,7 @@ static int amdgpu_ras_page_retirement_thread(void *param)
if (kthread_should_stop())
break;
+ mutex_lock(&con->poison_lock);
gpu_reset = 0;
do {
@@ -3097,7 +3527,8 @@ static int amdgpu_ras_page_retirement_thread(void *param)
atomic_sub(poison_creation_count, &con->poison_creation_count);
atomic_sub(poison_creation_count, &con->page_retirement_req_cnt);
}
- } while (atomic_read(&con->poison_creation_count));
+ } while (atomic_read(&con->poison_creation_count) &&
+ !atomic_read(&con->poison_consumption_count));
if (ret != -EIO) {
msg_count = kfifo_len(&con->poison_fifo);
@@ -3114,6 +3545,7 @@ static int amdgpu_ras_page_retirement_thread(void *param)
/* gpu mode-1 reset is ongoing or just completed ras mode-1 reset */
/* Clear poison creation request */
atomic_set(&con->poison_creation_count, 0);
+ atomic_set(&con->poison_consumption_count, 0);
/* Clear poison fifo */
amdgpu_ras_clear_poison_fifo(adev);
@@ -3138,15 +3570,65 @@ static int amdgpu_ras_page_retirement_thread(void *param)
atomic_sub(msg_count, &con->page_retirement_req_cnt);
}
+ atomic_set(&con->poison_consumption_count, 0);
+
/* Wake up work to save bad pages to eeprom */
schedule_delayed_work(&con->page_retirement_dwork, 0);
}
+ mutex_unlock(&con->poison_lock);
+ }
+
+ return 0;
+}
+
+int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct amdgpu_ras_eeprom_control *control;
+ int ret;
+
+ if (!con || amdgpu_sriov_vf(adev))
+ return 0;
+
+ control = &con->eeprom_control;
+ ret = amdgpu_ras_eeprom_init(control);
+ control->is_eeprom_valid = !ret;
+
+ if (!adev->umc.ras || !adev->umc.ras->convert_ras_err_addr)
+ control->ras_num_pa_recs = control->ras_num_recs;
+
+ if (adev->umc.ras &&
+ adev->umc.ras->get_retire_flip_bits)
+ adev->umc.ras->get_retire_flip_bits(adev);
+
+ if (control->ras_num_recs && control->is_eeprom_valid) {
+ ret = amdgpu_ras_load_bad_pages(adev);
+ if (ret) {
+ control->is_eeprom_valid = false;
+ return 0;
+ }
+
+ amdgpu_dpm_send_hbm_bad_pages_num(
+ adev, control->ras_num_bad_pages);
+
+ if (con->update_channel_flag == true) {
+ amdgpu_dpm_send_hbm_bad_channel_flag(
+ adev, control->bad_channel_bitmap);
+ con->update_channel_flag = false;
+ }
+
+ /* The format action is only applied to new ASICs */
+ if (IP_VERSION_MAJ(amdgpu_ip_version(adev, UMC_HWIP, 0)) >= 12 &&
+ control->tbl_hdr.version < RAS_TABLE_VER_V3)
+ if (!amdgpu_ras_eeprom_reset_table(control))
+ if (amdgpu_ras_save_bad_pages(adev, NULL))
+ dev_warn(adev->dev, "Failed to format RAS EEPROM data in V3 version!\n");
}
return 0;
}
-int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
+int amdgpu_ras_recovery_init(struct amdgpu_device *adev, bool init_bp_info)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_err_handler_data **data;
@@ -3174,38 +3656,19 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
}
mutex_init(&con->recovery_lock);
+ mutex_init(&con->poison_lock);
INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery);
atomic_set(&con->in_recovery, 0);
+ atomic_set(&con->rma_in_recovery, 0);
con->eeprom_control.bad_channel_bitmap = 0;
max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count(&con->eeprom_control);
amdgpu_ras_validate_threshold(adev, max_eeprom_records_count);
- /* Todo: During test the SMU might fail to read the eeprom through I2C
- * when the GPU is pending on XGMI reset during probe time
- * (Mostly after second bus reset), skip it now
- */
- if (adev->gmc.xgmi.pending_reset)
- return 0;
- ret = amdgpu_ras_eeprom_init(&con->eeprom_control);
- /*
- * This calling fails when is_rma is true or
- * ret != 0.
- */
- if (amdgpu_ras_is_rma(adev) || ret)
- goto free;
-
- if (con->eeprom_control.ras_num_recs) {
- ret = amdgpu_ras_load_bad_pages(adev);
+ if (init_bp_info) {
+ ret = amdgpu_ras_init_badpage_info(adev);
if (ret)
goto free;
-
- amdgpu_dpm_send_hbm_bad_pages_num(adev, con->eeprom_control.ras_num_recs);
-
- if (con->update_channel_flag == true) {
- amdgpu_dpm_send_hbm_bad_channel_flag(adev, con->eeprom_control.bad_channel_bitmap);
- con->update_channel_flag = false;
- }
}
mutex_init(&con->page_rsv_lock);
@@ -3214,6 +3677,7 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
init_waitqueue_head(&con->page_retirement_wq);
atomic_set(&con->page_retirement_req_cnt, 0);
atomic_set(&con->poison_creation_count, 0);
+ atomic_set(&con->poison_consumption_count, 0);
con->page_retirement_thread =
kthread_run(amdgpu_ras_page_retirement_thread, adev, "umc_page_retirement");
if (IS_ERR(con->page_retirement_thread)) {
@@ -3286,6 +3750,8 @@ static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
kfree(data);
mutex_unlock(&con->recovery_lock);
+ amdgpu_ras_critical_region_init(adev);
+
return 0;
}
/* recovery end */
@@ -3296,6 +3762,7 @@ static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev)
switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
case IP_VERSION(13, 0, 2):
case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 12):
case IP_VERSION(13, 0, 14):
return true;
default:
@@ -3308,7 +3775,9 @@ static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev)
case IP_VERSION(13, 0, 0):
case IP_VERSION(13, 0, 6):
case IP_VERSION(13, 0, 10):
+ case IP_VERSION(13, 0, 12):
case IP_VERSION(13, 0, 14):
+ case IP_VERSION(14, 0, 3):
return true;
default:
return false;
@@ -3370,7 +3839,8 @@ static void amdgpu_ras_query_ras_capablity_from_vbios(struct amdgpu_device *adev
*/
if (amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(2, 6, 0) ||
amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 0) ||
- amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 3))
+ amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 3) ||
+ amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(5, 0, 1))
adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN |
1 << AMDGPU_RAS_BLOCK__JPEG);
else
@@ -3438,6 +3908,11 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
if (!amdgpu_ras_asic_supported(adev))
return;
+ if (amdgpu_sriov_vf(adev)) {
+ if (amdgpu_virt_get_ras_capability(adev))
+ goto init_ras_enabled_flag;
+ }
+
/* query ras capability from psp */
if (amdgpu_psp_get_ras_capability(&adev->psp))
goto init_ras_enabled_flag;
@@ -3466,8 +3941,13 @@ init_ras_enabled_flag:
adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 :
adev->ras_hw_enabled & amdgpu_ras_mask;
- /* aca is disabled by default */
- adev->aca.is_enabled = false;
+ /* aca is disabled by default except for psp v13_0_6/v13_0_12/v13_0_14 */
+ if (!amdgpu_sriov_vf(adev)) {
+ adev->aca.is_enabled =
+ (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
+ amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) ||
+ amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14));
+ }
/* bad page feature is not applicable to specific app platform */
if (adev->gmc.is_app_apu &&
@@ -3535,7 +4015,7 @@ static void amdgpu_ras_event_mgr_init(struct amdgpu_device *adev)
ras->event_mgr = hive ? &hive->event_mgr : &ras->__event_mgr;
/* init event manager with node 0 on xgmi system */
- if (!amdgpu_in_reset(adev)) {
+ if (!amdgpu_reset_in_recovery(adev)) {
if (!hive || adev->gmc.xgmi.node_id == 0)
ras_event_mgr_init(ras->event_mgr);
}
@@ -3554,8 +4034,11 @@ static void amdgpu_ras_init_reserved_vram_size(struct amdgpu_device *adev)
switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
case IP_VERSION(13, 0, 2):
case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 12):
+ con->reserved_pages_in_bytes = AMDGPU_RAS_RESERVED_VRAM_SIZE_DEFAULT;
+ break;
case IP_VERSION(13, 0, 14):
- con->reserved_pages_in_bytes = AMDGPU_RAS_RESERVED_VRAM_SIZE;
+ con->reserved_pages_in_bytes = (AMDGPU_RAS_RESERVED_VRAM_SIZE_DEFAULT << 1);
break;
default:
break;
@@ -3629,7 +4112,19 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
* check DF RAS */
adev->nbio.ras = &nbio_v4_3_ras;
break;
+ case IP_VERSION(6, 3, 1):
+ if (adev->ras_hw_enabled & (1 << AMDGPU_RAS_BLOCK__DF))
+ /* unlike other generation of nbio ras,
+ * nbif v6_3_1 only support fatal error interrupt
+ * to inform software that DF is freezed due to
+ * system fatal error event. driver should not
+ * enable nbio ras in such case. Instead,
+ * check DF RAS
+ */
+ adev->nbio.ras = &nbif_v6_3_1_ras;
+ break;
case IP_VERSION(7, 9, 0):
+ case IP_VERSION(7, 9, 1):
if (!adev->gmc.is_app_apu)
adev->nbio.ras = &nbio_v7_9_ras;
break;
@@ -3683,6 +4178,12 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
goto release_con;
}
+ con->init_task_pid = task_pid_nr(current);
+ get_task_comm(con->init_task_comm, current);
+
+ mutex_init(&con->critical_region_lock);
+ INIT_LIST_HEAD(&con->critical_region_head);
+
dev_info(adev->dev, "RAS INFO: ras initialized successfully, "
"hardware ability[%x] ras_mask[%x]\n",
adev->ras_hw_enabled, adev->ras_enabled);
@@ -3750,7 +4251,7 @@ int amdgpu_ras_block_late_init(struct amdgpu_device *adev,
r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1);
if (r) {
- if (adev->in_suspend || amdgpu_in_reset(adev)) {
+ if (adev->in_suspend || amdgpu_reset_in_recovery(adev)) {
/* in resume phase, if fail to enable ras,
* clean up all ras fs nodes, and disable ras */
goto cleanup;
@@ -3762,7 +4263,7 @@ int amdgpu_ras_block_late_init(struct amdgpu_device *adev,
amdgpu_persistent_edc_harvesting(adev, ras_block);
/* in resume phase, no need to create ras fs node */
- if (adev->in_suspend || amdgpu_in_reset(adev))
+ if (adev->in_suspend || amdgpu_reset_in_recovery(adev))
return 0;
ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
@@ -3892,7 +4393,7 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev)
amdgpu_ras_event_mgr_init(adev);
if (amdgpu_ras_aca_is_supported(adev)) {
- if (amdgpu_in_reset(adev)) {
+ if (amdgpu_reset_in_recovery(adev)) {
if (amdgpu_aca_is_enabled(adev))
r = amdgpu_aca_reset(adev);
else
@@ -3910,7 +4411,7 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev)
}
/* Guest side doesn't need init ras feature */
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_ras_telemetry_en(adev))
return 0;
list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
@@ -3962,6 +4463,9 @@ int amdgpu_ras_fini(struct amdgpu_device *adev)
if (!adev->ras_enabled || !con)
return 0;
+ amdgpu_ras_critical_region_fini(adev);
+ mutex_destroy(&con->critical_region_lock);
+
list_for_each_entry_safe(ras_node, tmp, &adev->ras_list, node) {
if (ras_node->ras_obj) {
obj = ras_node->ras_obj;
@@ -4008,7 +4512,7 @@ bool amdgpu_ras_get_fed_status(struct amdgpu_device *adev)
if (!ras)
return false;
- return atomic_read(&ras->fed);
+ return test_bit(AMDGPU_RAS_BLOCK__LAST, &ras->ras_err_state);
}
void amdgpu_ras_set_fed(struct amdgpu_device *adev, bool status)
@@ -4016,8 +4520,50 @@ void amdgpu_ras_set_fed(struct amdgpu_device *adev, bool status)
struct amdgpu_ras *ras;
ras = amdgpu_ras_get_context(adev);
+ if (ras) {
+ if (status)
+ set_bit(AMDGPU_RAS_BLOCK__LAST, &ras->ras_err_state);
+ else
+ clear_bit(AMDGPU_RAS_BLOCK__LAST, &ras->ras_err_state);
+ }
+}
+
+void amdgpu_ras_clear_err_state(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *ras;
+
+ ras = amdgpu_ras_get_context(adev);
+ if (ras) {
+ ras->ras_err_state = 0;
+ ras->gpu_reset_flags = 0;
+ }
+}
+
+void amdgpu_ras_set_err_poison(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block)
+{
+ struct amdgpu_ras *ras;
+
+ ras = amdgpu_ras_get_context(adev);
if (ras)
- atomic_set(&ras->fed, !!status);
+ set_bit(block, &ras->ras_err_state);
+}
+
+bool amdgpu_ras_is_err_state(struct amdgpu_device *adev, int block)
+{
+ struct amdgpu_ras *ras;
+
+ ras = amdgpu_ras_get_context(adev);
+ if (ras) {
+ if (block == AMDGPU_RAS_BLOCK__ANY)
+ return (ras->ras_err_state != 0);
+ else
+ return test_bit(block, &ras->ras_err_state) ||
+ test_bit(AMDGPU_RAS_BLOCK__LAST,
+ &ras->ras_err_state);
+ }
+
+ return false;
}
static struct ras_event_manager *__get_ras_event_mgr(struct amdgpu_device *adev)
@@ -4095,8 +4641,11 @@ void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
enum ras_event_type type = RAS_EVENT_TYPE_FATAL;
u64 event_id;
- if (amdgpu_ras_mark_ras_event(adev, type))
+ if (amdgpu_ras_mark_ras_event(adev, type)) {
+ dev_err(adev->dev,
+ "uncorrectable hardware error (ERREVENT_ATHUB_INTERRUPT) detected!\n");
return;
+ }
event_id = amdgpu_ras_acquire_event_id(adev, type);
@@ -4294,8 +4843,27 @@ int amdgpu_ras_reset_gpu(struct amdgpu_device *adev)
ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET;
}
- if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0)
+ if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0) {
+ struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
+ int hive_ras_recovery = 0;
+
+ if (hive) {
+ hive_ras_recovery = atomic_read(&hive->ras_recovery);
+ amdgpu_put_xgmi_hive(hive);
+ }
+ /* In the case of multiple GPUs, after a GPU has started
+ * resetting all GPUs on hive, other GPUs do not need to
+ * trigger GPU reset again.
+ */
+ if (!hive_ras_recovery)
+ amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work);
+ else
+ atomic_set(&ras->in_recovery, 0);
+ } else {
+ flush_work(&ras->recovery_work);
amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work);
+ }
+
return 0;
}
@@ -4358,11 +4926,14 @@ bool amdgpu_ras_get_error_query_mode(struct amdgpu_device *adev,
return false;
}
- if ((smu_funcs && smu_funcs->set_debug_mode) || (mca_funcs && mca_funcs->mca_set_debug_mode))
+ if (amdgpu_sriov_vf(adev)) {
+ *error_query_mode = AMDGPU_RAS_VIRT_ERROR_COUNT_QUERY;
+ } else if ((smu_funcs && smu_funcs->set_debug_mode) || (mca_funcs && mca_funcs->mca_set_debug_mode)) {
*error_query_mode =
(con->is_aca_debug_mode) ? AMDGPU_RAS_DIRECT_ERROR_QUERY : AMDGPU_RAS_FIRMWARE_ERROR_QUERY;
- else
+ } else {
*error_query_mode = AMDGPU_RAS_DIRECT_ERROR_QUERY;
+ }
return true;
}
@@ -4759,9 +5330,9 @@ static void amdgpu_ras_boot_time_error_reporting(struct amdgpu_device *adev,
"socket: %d, aid: %d, fw_status: 0x%x, data abort exception\n",
socket_id, aid_id, fw_status);
- if (AMDGPU_RAS_GPU_ERR_UNKNOWN(boot_error))
+ if (AMDGPU_RAS_GPU_ERR_GENERIC(boot_error))
dev_info(adev->dev,
- "socket: %d, aid: %d, fw_status: 0x%x, unknown boot time errors\n",
+ "socket: %d, aid: %d, fw_status: 0x%x, Boot Controller Generic Error\n",
socket_id, aid_id, fw_status);
}
@@ -4803,6 +5374,9 @@ int amdgpu_ras_reserve_page(struct amdgpu_device *adev, uint64_t pfn)
uint64_t start = pfn << AMDGPU_GPU_PAGE_SHIFT;
int ret = 0;
+ if (amdgpu_ras_check_critical_address(adev, start))
+ return 0;
+
mutex_lock(&con->page_rsv_lock);
ret = amdgpu_vram_mgr_query_page_status(mgr, start);
if (ret == -ENOENT)
@@ -4839,3 +5413,80 @@ bool amdgpu_ras_is_rma(struct amdgpu_device *adev)
return con->is_rma;
}
+
+int amdgpu_ras_add_critical_region(struct amdgpu_device *adev,
+ struct amdgpu_bo *bo)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct amdgpu_vram_mgr_resource *vres;
+ struct ras_critical_region *region;
+ struct drm_buddy_block *block;
+ int ret = 0;
+
+ if (!bo || !bo->tbo.resource)
+ return -EINVAL;
+
+ vres = to_amdgpu_vram_mgr_resource(bo->tbo.resource);
+
+ mutex_lock(&con->critical_region_lock);
+
+ /* Check if the bo had been recorded */
+ list_for_each_entry(region, &con->critical_region_head, node)
+ if (region->bo == bo)
+ goto out;
+
+ /* Record new critical amdgpu bo */
+ list_for_each_entry(block, &vres->blocks, link) {
+ region = kzalloc(sizeof(*region), GFP_KERNEL);
+ if (!region) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ region->bo = bo;
+ region->start = amdgpu_vram_mgr_block_start(block);
+ region->size = amdgpu_vram_mgr_block_size(block);
+ list_add_tail(&region->node, &con->critical_region_head);
+ }
+
+out:
+ mutex_unlock(&con->critical_region_lock);
+
+ return ret;
+}
+
+static void amdgpu_ras_critical_region_init(struct amdgpu_device *adev)
+{
+ amdgpu_ras_add_critical_region(adev, adev->mman.fw_reserved_memory);
+}
+
+static void amdgpu_ras_critical_region_fini(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_critical_region *region, *tmp;
+
+ mutex_lock(&con->critical_region_lock);
+ list_for_each_entry_safe(region, tmp, &con->critical_region_head, node) {
+ list_del(&region->node);
+ kfree(region);
+ }
+ mutex_unlock(&con->critical_region_lock);
+}
+
+bool amdgpu_ras_check_critical_address(struct amdgpu_device *adev, uint64_t addr)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_critical_region *region;
+ bool ret = false;
+
+ mutex_lock(&con->critical_region_lock);
+ list_for_each_entry(region, &con->critical_region_head, node) {
+ if ((region->start <= addr) &&
+ (addr < (region->start + region->size))) {
+ ret = true;
+ break;
+ }
+ }
+ mutex_unlock(&con->critical_region_lock);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index 669720a9c60a..6cf0dfd38be8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -47,7 +47,7 @@ struct amdgpu_iv_entry;
#define AMDGPU_RAS_GPU_ERR_AID_ID(x) AMDGPU_GET_REG_FIELD(x, 12, 11)
#define AMDGPU_RAS_GPU_ERR_HBM_ID(x) AMDGPU_GET_REG_FIELD(x, 14, 13)
#define AMDGPU_RAS_GPU_ERR_DATA_ABORT(x) AMDGPU_GET_REG_FIELD(x, 29, 29)
-#define AMDGPU_RAS_GPU_ERR_UNKNOWN(x) AMDGPU_GET_REG_FIELD(x, 30, 30)
+#define AMDGPU_RAS_GPU_ERR_GENERIC(x) AMDGPU_GET_REG_FIELD(x, 30, 30)
#define AMDGPU_RAS_BOOT_STATUS_POLLING_LIMIT 100
#define AMDGPU_RAS_BOOT_STEADY_STATUS 0xBA
@@ -65,7 +65,7 @@ struct amdgpu_iv_entry;
/* Reserve 8 physical dram row for possible retirement.
* In worst cases, it will lose 8 * 2MB memory in vram domain */
-#define AMDGPU_RAS_RESERVED_VRAM_SIZE (16ULL << 20)
+#define AMDGPU_RAS_RESERVED_VRAM_SIZE_DEFAULT (16ULL << 20)
/* The high three bits indicates socketid */
#define AMDGPU_RAS_GET_FEATURES(val) ((val) & ~AMDGPU_RAS_FEATURES_SOCKETID_MASK)
@@ -98,8 +98,10 @@ enum amdgpu_ras_block {
AMDGPU_RAS_BLOCK__JPEG,
AMDGPU_RAS_BLOCK__IH,
AMDGPU_RAS_BLOCK__MPIO,
+ AMDGPU_RAS_BLOCK__MMSCH,
- AMDGPU_RAS_BLOCK__LAST
+ AMDGPU_RAS_BLOCK__LAST,
+ AMDGPU_RAS_BLOCK__ANY = -1
};
enum amdgpu_ras_mca_block {
@@ -365,6 +367,7 @@ enum amdgpu_ras_error_query_mode {
AMDGPU_RAS_INVALID_ERROR_QUERY = 0,
AMDGPU_RAS_DIRECT_ERROR_QUERY = 1,
AMDGPU_RAS_FIRMWARE_ERROR_QUERY = 2,
+ AMDGPU_RAS_VIRT_ERROR_COUNT_QUERY = 3,
};
/* ras error status reisger fields */
@@ -481,14 +484,23 @@ struct ras_ecc_err {
uint64_t ipid;
uint64_t addr;
uint64_t pa_pfn;
+ /* save global channel index across all UMC instances */
+ uint32_t channel_idx;
struct ras_err_pages err_pages;
};
struct ras_ecc_log_info {
struct mutex lock;
struct radix_tree_root de_page_tree;
- uint64_t de_queried_count;
- uint64_t prev_de_queried_count;
+ uint64_t de_queried_count;
+ uint64_t consumption_q_count;
+};
+
+struct ras_critical_region {
+ struct list_head node;
+ struct amdgpu_bo *bo;
+ uint64_t start;
+ uint64_t size;
};
struct amdgpu_ras {
@@ -510,6 +522,7 @@ struct amdgpu_ras {
/* gpu recovery */
struct work_struct recovery_work;
atomic_t in_recovery;
+ atomic_t rma_in_recovery;
struct amdgpu_device *adev;
/* error handler data */
struct ras_err_handler_data *eh_data;
@@ -552,19 +565,31 @@ struct amdgpu_ras {
struct mutex page_retirement_lock;
atomic_t page_retirement_req_cnt;
atomic_t poison_creation_count;
+ atomic_t poison_consumption_count;
struct mutex page_rsv_lock;
DECLARE_KFIFO(poison_fifo, struct ras_poison_msg, 128);
struct ras_ecc_log_info umc_ecc_log;
struct delayed_work page_retirement_dwork;
- /* Fatal error detected flag */
- atomic_t fed;
+ /* ras errors detected */
+ unsigned long ras_err_state;
/* RAS event manager */
struct ras_event_manager __event_mgr;
struct ras_event_manager *event_mgr;
uint64_t reserved_pages_in_bytes;
+
+ pid_t init_task_pid;
+ char init_task_comm[TASK_COMM_LEN];
+
+ int bad_page_num;
+
+ struct list_head critical_region_head;
+ struct mutex critical_region_lock;
+
+ /* Protect poison injection */
+ struct mutex poison_lock;
};
struct ras_fs_data {
@@ -603,6 +628,7 @@ struct ras_err_handler_data {
struct eeprom_table_record *bps;
/* the count of entries */
int count;
+ int count_saved;
/* the space can place new entries */
int space_left;
};
@@ -736,8 +762,8 @@ struct amdgpu_ras_block_hw_ops {
* 8: feature disable
*/
-
-int amdgpu_ras_recovery_init(struct amdgpu_device *adev);
+int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev);
+int amdgpu_ras_recovery_init(struct amdgpu_device *adev, bool init_bp_info);
void amdgpu_ras_resume(struct amdgpu_device *adev);
void amdgpu_ras_suspend(struct amdgpu_device *adev);
@@ -749,7 +775,7 @@ int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
/* error handling functions */
int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
- struct eeprom_table_record *bps, int pages);
+ struct eeprom_table_record *bps, int pages, bool from_rom);
int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
unsigned long *new_cnt);
@@ -791,6 +817,12 @@ amdgpu_ras_block_to_ta(enum amdgpu_ras_block block) {
return TA_RAS_BLOCK__VCN;
case AMDGPU_RAS_BLOCK__JPEG:
return TA_RAS_BLOCK__JPEG;
+ case AMDGPU_RAS_BLOCK__IH:
+ return TA_RAS_BLOCK__IH;
+ case AMDGPU_RAS_BLOCK__MPIO:
+ return TA_RAS_BLOCK__MPIO;
+ case AMDGPU_RAS_BLOCK__MMSCH:
+ return TA_RAS_BLOCK__MMSCH;
default:
WARN_ONCE(1, "RAS ERROR: unexpected block id %d\n", block);
return TA_RAS_BLOCK__UMC;
@@ -951,6 +983,10 @@ ssize_t amdgpu_ras_aca_sysfs_read(struct device *dev, struct device_attribute *a
void amdgpu_ras_set_fed(struct amdgpu_device *adev, bool status);
bool amdgpu_ras_get_fed_status(struct amdgpu_device *adev);
+void amdgpu_ras_set_err_poison(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block);
+void amdgpu_ras_clear_err_state(struct amdgpu_device *adev);
+bool amdgpu_ras_is_err_state(struct amdgpu_device *adev, int block);
u64 amdgpu_ras_acquire_event_id(struct amdgpu_device *adev, enum ras_event_type type);
int amdgpu_ras_mark_ras_event_caller(struct amdgpu_device *adev, enum ras_event_type type,
@@ -958,6 +994,9 @@ int amdgpu_ras_mark_ras_event_caller(struct amdgpu_device *adev, enum ras_event_
int amdgpu_ras_reserve_page(struct amdgpu_device *adev, uint64_t pfn);
+int amdgpu_ras_add_critical_region(struct amdgpu_device *adev, struct amdgpu_bo *bo);
+bool amdgpu_ras_check_critical_address(struct amdgpu_device *adev, uint64_t addr);
+
int amdgpu_ras_put_poison_req(struct amdgpu_device *adev,
enum amdgpu_ras_block block, uint16_t pasid,
pasid_notify pasid_fn, void *data, uint32_t reset);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index f28f6b4ba765..3eb3fb55ccb0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -161,6 +161,7 @@ static bool __is_ras_eeprom_supported(struct amdgpu_device *adev)
case IP_VERSION(13, 0, 10):
return true;
case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 12):
case IP_VERSION(13, 0, 14):
return (adev->gmc.is_app_apu) ? false : true;
default:
@@ -177,7 +178,7 @@ static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
if (!control)
return false;
- if (amdgpu_atomfirmware_ras_rom_addr(adev, &i2c_addr)) {
+ if (adev->bios && amdgpu_atomfirmware_ras_rom_addr(adev, &i2c_addr)) {
/* The address given by VBIOS is an 8-bit, wire-format
* address, i.e. the most significant byte.
*
@@ -223,6 +224,7 @@ static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
return true;
case IP_VERSION(13, 0, 6):
case IP_VERSION(13, 0, 10):
+ case IP_VERSION(13, 0, 12):
case IP_VERSION(13, 0, 14):
control->i2c_address = EEPROM_I2C_MADDR_4;
return true;
@@ -275,10 +277,11 @@ static int __write_table_header(struct amdgpu_ras_eeprom_control *control)
up_read(&adev->reset_domain->sem);
if (res < 0) {
- DRM_ERROR("Failed to write EEPROM table header:%d", res);
+ dev_err(adev->dev, "Failed to write EEPROM table header:%d",
+ res);
} else if (res < RAS_TABLE_HEADER_SIZE) {
- DRM_ERROR("Short write:%d out of %d\n",
- res, RAS_TABLE_HEADER_SIZE);
+ dev_err(adev->dev, "Short write:%d out of %d\n", res,
+ RAS_TABLE_HEADER_SIZE);
res = -EIO;
} else {
res = 0;
@@ -321,7 +324,8 @@ static int __write_table_ras_info(struct amdgpu_ras_eeprom_control *control)
buf = kzalloc(RAS_TABLE_V2_1_INFO_SIZE, GFP_KERNEL);
if (!buf) {
- DRM_ERROR("Failed to alloc buf to write table ras info\n");
+ dev_err(adev->dev,
+ "Failed to alloc buf to write table ras info\n");
return -ENOMEM;
}
@@ -336,10 +340,11 @@ static int __write_table_ras_info(struct amdgpu_ras_eeprom_control *control)
up_read(&adev->reset_domain->sem);
if (res < 0) {
- DRM_ERROR("Failed to write EEPROM table ras info:%d", res);
+ dev_err(adev->dev, "Failed to write EEPROM table ras info:%d",
+ res);
} else if (res < RAS_TABLE_V2_1_INFO_SIZE) {
- DRM_ERROR("Short write:%d out of %d\n",
- res, RAS_TABLE_V2_1_INFO_SIZE);
+ dev_err(adev->dev, "Short write:%d out of %d\n", res,
+ RAS_TABLE_V2_1_INFO_SIZE);
res = -EIO;
} else {
res = 0;
@@ -413,9 +418,12 @@ static void amdgpu_ras_set_eeprom_table_version(struct amdgpu_ras_eeprom_control
switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) {
case IP_VERSION(8, 10, 0):
- case IP_VERSION(12, 0, 0):
hdr->version = RAS_TABLE_VER_V2_1;
return;
+ case IP_VERSION(12, 0, 0):
+ case IP_VERSION(12, 5, 0):
+ hdr->version = RAS_TABLE_VER_V3;
+ return;
default:
hdr->version = RAS_TABLE_VER_V1;
return;
@@ -443,7 +451,7 @@ int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control)
hdr->header = RAS_TABLE_HDR_VAL;
amdgpu_ras_set_eeprom_table_version(control);
- if (hdr->version == RAS_TABLE_VER_V2_1) {
+ if (hdr->version >= RAS_TABLE_VER_V2_1) {
hdr->first_rec_offset = RAS_RECORD_START_V2_1;
hdr->tbl_size = RAS_TABLE_HEADER_SIZE +
RAS_TABLE_V2_1_INFO_SIZE;
@@ -461,7 +469,7 @@ int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control)
}
csum = __calc_hdr_byte_sum(control);
- if (hdr->version == RAS_TABLE_VER_V2_1)
+ if (hdr->version >= RAS_TABLE_VER_V2_1)
csum += __calc_ras_info_byte_sum(control);
csum = -csum;
hdr->checksum = csum;
@@ -470,9 +478,12 @@ int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control)
res = __write_table_ras_info(control);
control->ras_num_recs = 0;
+ control->ras_num_bad_pages = 0;
+ control->ras_num_mca_recs = 0;
+ control->ras_num_pa_recs = 0;
control->ras_fri = 0;
- amdgpu_dpm_send_hbm_bad_pages_num(adev, control->ras_num_recs);
+ amdgpu_dpm_send_hbm_bad_pages_num(adev, control->ras_num_bad_pages);
control->bad_channel_bitmap = 0;
amdgpu_dpm_send_hbm_bad_channel_flag(adev, control->bad_channel_bitmap);
@@ -557,16 +568,17 @@ bool amdgpu_ras_eeprom_check_err_threshold(struct amdgpu_device *adev)
return false;
if (con->eeprom_control.tbl_hdr.header == RAS_TABLE_HDR_BAD) {
- if (amdgpu_bad_page_threshold == -1) {
+ if (con->eeprom_control.ras_num_bad_pages > con->bad_page_cnt_threshold)
dev_warn(adev->dev, "RAS records:%d exceed threshold:%d",
- con->eeprom_control.ras_num_recs, con->bad_page_cnt_threshold);
+ con->eeprom_control.ras_num_bad_pages, con->bad_page_cnt_threshold);
+ if ((amdgpu_bad_page_threshold == -1) ||
+ (amdgpu_bad_page_threshold == -2)) {
dev_warn(adev->dev,
- "But GPU can be operated due to bad_page_threshold = -1.\n");
+ "Please consult AMD Service Action Guide (SAG) for appropriate service procedures.\n");
return false;
} else {
- dev_warn(adev->dev, "This GPU is in BAD status.");
- dev_warn(adev->dev, "Please retire it or set a larger "
- "threshold value when reloading driver.\n");
+ dev_warn(adev->dev,
+ "Please consider adjusting the customized threshold.\n");
return true;
}
}
@@ -600,13 +612,13 @@ static int __amdgpu_ras_eeprom_write(struct amdgpu_ras_eeprom_control *control,
buf, buf_size);
up_read(&adev->reset_domain->sem);
if (res < 0) {
- DRM_ERROR("Writing %d EEPROM table records error:%d",
- num, res);
+ dev_err(adev->dev, "Writing %d EEPROM table records error:%d",
+ num, res);
} else if (res < buf_size) {
/* Short write, return error.
*/
- DRM_ERROR("Wrote %d records out of %d",
- res / RAS_TABLE_RECORD_SIZE, num);
+ dev_err(adev->dev, "Wrote %d records out of %d",
+ res / RAS_TABLE_RECORD_SIZE, num);
res = -EIO;
} else {
res = 0;
@@ -621,6 +633,7 @@ amdgpu_ras_eeprom_append_table(struct amdgpu_ras_eeprom_control *control,
const u32 num)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(to_amdgpu_device(control));
+ struct amdgpu_device *adev = to_amdgpu_device(control);
u32 a, b, i;
u8 *buf, *pp;
int res;
@@ -723,6 +736,14 @@ amdgpu_ras_eeprom_append_table(struct amdgpu_ras_eeprom_control *control,
control->ras_num_recs = 1 + (control->ras_max_record_count + b
- control->ras_fri)
% control->ras_max_record_count;
+
+ /*old asics only save pa to eeprom like before*/
+ if (IP_VERSION_MAJ(amdgpu_ip_version(adev, UMC_HWIP, 0)) < 12)
+ control->ras_num_pa_recs += num;
+ else
+ control->ras_num_mca_recs += num;
+
+ control->ras_num_bad_pages = con->bad_page_num;
Out:
kfree(buf);
return res;
@@ -740,24 +761,29 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control)
/* Modify the header if it exceeds.
*/
if (amdgpu_bad_page_threshold != 0 &&
- control->ras_num_recs >= ras->bad_page_cnt_threshold) {
+ control->ras_num_bad_pages > ras->bad_page_cnt_threshold) {
dev_warn(adev->dev,
"Saved bad pages %d reaches threshold value %d\n",
- control->ras_num_recs, ras->bad_page_cnt_threshold);
- control->tbl_hdr.header = RAS_TABLE_HDR_BAD;
- if (control->tbl_hdr.version == RAS_TABLE_VER_V2_1) {
- control->tbl_rai.rma_status = GPU_RETIRED__ECC_REACH_THRESHOLD;
- control->tbl_rai.health_percent = 0;
- }
+ control->ras_num_bad_pages, ras->bad_page_cnt_threshold);
+
+ if (adev->cper.enabled && amdgpu_cper_generate_bp_threshold_record(adev))
+ dev_warn(adev->dev, "fail to generate bad page threshold cper records\n");
- if (amdgpu_bad_page_threshold != -1)
+ if ((amdgpu_bad_page_threshold != -1) &&
+ (amdgpu_bad_page_threshold != -2)) {
+ control->tbl_hdr.header = RAS_TABLE_HDR_BAD;
+ if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1) {
+ control->tbl_rai.rma_status = GPU_RETIRED__ECC_REACH_THRESHOLD;
+ control->tbl_rai.health_percent = 0;
+ }
ras->is_rma = true;
+ }
/* ignore the -ENOTSUPP return value */
amdgpu_dpm_send_rma_reason(adev);
}
- if (control->tbl_hdr.version == RAS_TABLE_VER_V2_1)
+ if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1)
control->tbl_hdr.tbl_size = RAS_TABLE_HEADER_SIZE +
RAS_TABLE_V2_1_INFO_SIZE +
control->ras_num_recs * RAS_TABLE_RECORD_SIZE;
@@ -769,8 +795,9 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control)
buf_size = control->ras_num_recs * RAS_TABLE_RECORD_SIZE;
buf = kcalloc(control->ras_num_recs, RAS_TABLE_RECORD_SIZE, GFP_KERNEL);
if (!buf) {
- DRM_ERROR("allocating memory for table of size %d bytes failed\n",
- control->tbl_hdr.tbl_size);
+ dev_err(adev->dev,
+ "allocating memory for table of size %d bytes failed\n",
+ control->tbl_hdr.tbl_size);
res = -ENOMEM;
goto Out;
}
@@ -782,12 +809,11 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control)
buf, buf_size);
up_read(&adev->reset_domain->sem);
if (res < 0) {
- DRM_ERROR("EEPROM failed reading records:%d\n",
- res);
+ dev_err(adev->dev, "EEPROM failed reading records:%d\n", res);
goto Out;
} else if (res < buf_size) {
- DRM_ERROR("EEPROM read %d out of %d bytes\n",
- res, buf_size);
+ dev_err(adev->dev, "EEPROM read %d out of %d bytes\n", res,
+ buf_size);
res = -EIO;
goto Out;
}
@@ -797,10 +823,10 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control)
* now calculate gpu health percent
*/
if (amdgpu_bad_page_threshold != 0 &&
- control->tbl_hdr.version == RAS_TABLE_VER_V2_1 &&
- control->ras_num_recs < ras->bad_page_cnt_threshold)
+ control->tbl_hdr.version >= RAS_TABLE_VER_V2_1 &&
+ control->ras_num_bad_pages <= ras->bad_page_cnt_threshold)
control->tbl_rai.health_percent = ((ras->bad_page_cnt_threshold -
- control->ras_num_recs) * 100) /
+ control->ras_num_bad_pages) * 100) /
ras->bad_page_cnt_threshold;
/* Recalc the checksum.
@@ -810,7 +836,7 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control)
csum += *pp;
csum += __calc_hdr_byte_sum(control);
- if (control->tbl_hdr.version == RAS_TABLE_VER_V2_1)
+ if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1)
csum += __calc_ras_info_byte_sum(control);
/* avoid sign extension when assigning to "checksum" */
csum = -csum;
@@ -841,20 +867,29 @@ int amdgpu_ras_eeprom_append(struct amdgpu_ras_eeprom_control *control,
const u32 num)
{
struct amdgpu_device *adev = to_amdgpu_device(control);
- int res;
+ int res, i;
+ uint64_t nps = AMDGPU_NPS1_PARTITION_MODE;
if (!__is_ras_eeprom_supported(adev))
return 0;
if (num == 0) {
- DRM_ERROR("will not append 0 records\n");
+ dev_err(adev->dev, "will not append 0 records\n");
return -EINVAL;
} else if (num > control->ras_max_record_count) {
- DRM_ERROR("cannot append %d records than the size of table %d\n",
- num, control->ras_max_record_count);
+ dev_err(adev->dev,
+ "cannot append %d records than the size of table %d\n",
+ num, control->ras_max_record_count);
return -EINVAL;
}
+ if (adev->gmc.gmc_funcs->query_mem_partition_mode)
+ nps = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
+
+ /* set the new channel index flag */
+ for (i = 0; i < num; i++)
+ record[i].retired_page |= (nps << UMC_NPS_SHIFT);
+
mutex_lock(&control->ras_tbl_mutex);
res = amdgpu_ras_eeprom_append_table(control, record, num);
@@ -864,6 +899,11 @@ int amdgpu_ras_eeprom_append(struct amdgpu_ras_eeprom_control *control,
amdgpu_ras_debugfs_set_ret_size(control);
mutex_unlock(&control->ras_tbl_mutex);
+
+ /* clear channel index flag, the flag is only saved on eeprom */
+ for (i = 0; i < num; i++)
+ record[i].retired_page &= ~(nps << UMC_NPS_SHIFT);
+
return res;
}
@@ -893,13 +933,13 @@ static int __amdgpu_ras_eeprom_read(struct amdgpu_ras_eeprom_control *control,
buf, buf_size);
up_read(&adev->reset_domain->sem);
if (res < 0) {
- DRM_ERROR("Reading %d EEPROM table records error:%d",
- num, res);
+ dev_err(adev->dev, "Reading %d EEPROM table records error:%d",
+ num, res);
} else if (res < buf_size) {
/* Short read, return error.
*/
- DRM_ERROR("Read %d records out of %d",
- res / RAS_TABLE_RECORD_SIZE, num);
+ dev_err(adev->dev, "Read %d records out of %d",
+ res / RAS_TABLE_RECORD_SIZE, num);
res = -EIO;
} else {
res = 0;
@@ -933,11 +973,11 @@ int amdgpu_ras_eeprom_read(struct amdgpu_ras_eeprom_control *control,
return 0;
if (num == 0) {
- DRM_ERROR("will not read 0 records\n");
+ dev_err(adev->dev, "will not read 0 records\n");
return -EINVAL;
} else if (num > control->ras_num_recs) {
- DRM_ERROR("too many records to read:%d available:%d\n",
- num, control->ras_num_recs);
+ dev_err(adev->dev, "too many records to read:%d available:%d\n",
+ num, control->ras_num_recs);
return -EINVAL;
}
@@ -1014,7 +1054,7 @@ uint32_t amdgpu_ras_eeprom_max_record_count(struct amdgpu_ras_eeprom_control *co
/* get available eeprom table version first before eeprom table init */
amdgpu_ras_set_eeprom_table_version(control);
- if (control->tbl_hdr.version == RAS_TABLE_VER_V2_1)
+ if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1)
return RAS_MAX_RECORD_COUNT_V2_1;
else
return RAS_MAX_RECORD_COUNT;
@@ -1259,7 +1299,7 @@ static int __verify_ras_table_checksum(struct amdgpu_ras_eeprom_control *control
int buf_size, res;
u8 csum, *buf, *pp;
- if (control->tbl_hdr.version == RAS_TABLE_VER_V2_1)
+ if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1)
buf_size = RAS_TABLE_HEADER_SIZE +
RAS_TABLE_V2_1_INFO_SIZE +
control->ras_num_recs * RAS_TABLE_RECORD_SIZE;
@@ -1269,7 +1309,8 @@ static int __verify_ras_table_checksum(struct amdgpu_ras_eeprom_control *control
buf = kzalloc(buf_size, GFP_KERNEL);
if (!buf) {
- DRM_ERROR("Out of memory checking RAS table checksum.\n");
+ dev_err(adev->dev,
+ "Out of memory checking RAS table checksum.\n");
return -ENOMEM;
}
@@ -1278,7 +1319,7 @@ static int __verify_ras_table_checksum(struct amdgpu_ras_eeprom_control *control
control->ras_header_offset,
buf, buf_size);
if (res < buf_size) {
- DRM_ERROR("Partial read for checksum, res:%d\n", res);
+ dev_err(adev->dev, "Partial read for checksum, res:%d\n", res);
/* On partial reads, return -EIO.
*/
if (res >= 0)
@@ -1303,7 +1344,8 @@ static int __read_table_ras_info(struct amdgpu_ras_eeprom_control *control)
buf = kzalloc(RAS_TABLE_V2_1_INFO_SIZE, GFP_KERNEL);
if (!buf) {
- DRM_ERROR("Failed to alloc buf to read EEPROM table ras info\n");
+ dev_err(adev->dev,
+ "Failed to alloc buf to read EEPROM table ras info\n");
return -ENOMEM;
}
@@ -1315,7 +1357,8 @@ static int __read_table_ras_info(struct amdgpu_ras_eeprom_control *control)
control->i2c_address + control->ras_info_offset,
buf, RAS_TABLE_V2_1_INFO_SIZE);
if (res < RAS_TABLE_V2_1_INFO_SIZE) {
- DRM_ERROR("Failed to read EEPROM table ras info, res:%d", res);
+ dev_err(adev->dev,
+ "Failed to read EEPROM table ras info, res:%d", res);
res = res >= 0 ? -EIO : res;
goto Out;
}
@@ -1356,28 +1399,76 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
control->i2c_address + control->ras_header_offset,
buf, RAS_TABLE_HEADER_SIZE);
if (res < RAS_TABLE_HEADER_SIZE) {
- DRM_ERROR("Failed to read EEPROM table header, res:%d", res);
+ dev_err(adev->dev, "Failed to read EEPROM table header, res:%d",
+ res);
return res >= 0 ? -EIO : res;
}
__decode_table_header_from_buf(hdr, buf);
- if (hdr->version == RAS_TABLE_VER_V2_1) {
+ if (hdr->header != RAS_TABLE_HDR_VAL &&
+ hdr->header != RAS_TABLE_HDR_BAD) {
+ dev_info(adev->dev, "Creating a new EEPROM table");
+ return amdgpu_ras_eeprom_reset_table(control);
+ }
+
+ switch (hdr->version) {
+ case RAS_TABLE_VER_V2_1:
+ case RAS_TABLE_VER_V3:
control->ras_num_recs = RAS_NUM_RECS_V2_1(hdr);
control->ras_record_offset = RAS_RECORD_START_V2_1;
control->ras_max_record_count = RAS_MAX_RECORD_COUNT_V2_1;
- } else {
+ break;
+ case RAS_TABLE_VER_V1:
control->ras_num_recs = RAS_NUM_RECS(hdr);
control->ras_record_offset = RAS_RECORD_START;
control->ras_max_record_count = RAS_MAX_RECORD_COUNT;
+ break;
+ default:
+ dev_err(adev->dev,
+ "RAS header invalid, unsupported version: %u",
+ hdr->version);
+ return -EINVAL;
}
+
+ if (control->ras_num_recs > control->ras_max_record_count) {
+ dev_err(adev->dev,
+ "RAS header invalid, records in header: %u max allowed :%u",
+ control->ras_num_recs, control->ras_max_record_count);
+ return -EINVAL;
+ }
+
control->ras_fri = RAS_OFFSET_TO_INDEX(control, hdr->first_rec_offset);
+ control->ras_num_mca_recs = 0;
+ control->ras_num_pa_recs = 0;
+ return 0;
+}
+
+int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr;
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ int res = 0;
+
+ if (!__is_ras_eeprom_supported(adev))
+ return 0;
+
+ /* Verify i2c adapter is initialized */
+ if (!adev->pm.ras_eeprom_i2c_bus || !adev->pm.ras_eeprom_i2c_bus->algo)
+ return -ENOENT;
+
+ if (!__get_eeprom_i2c_addr(adev, control))
+ return -EINVAL;
+
+ control->ras_num_bad_pages = ras->bad_page_num;
if (hdr->header == RAS_TABLE_HDR_VAL) {
- DRM_DEBUG_DRIVER("Found existing EEPROM table with %d records",
- control->ras_num_recs);
+ dev_dbg(adev->dev,
+ "Found existing EEPROM table with %d records",
+ control->ras_num_bad_pages);
- if (hdr->version == RAS_TABLE_VER_V2_1) {
+ if (hdr->version >= RAS_TABLE_VER_V2_1) {
res = __read_table_ras_info(control);
if (res)
return res;
@@ -1385,28 +1476,32 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
res = __verify_ras_table_checksum(control);
if (res)
- DRM_ERROR("RAS table incorrect checksum or error:%d\n",
- res);
+ dev_err(adev->dev,
+ "RAS table incorrect checksum or error:%d\n",
+ res);
/* Warn if we are at 90% of the threshold or above
*/
- if (10 * control->ras_num_recs >= 9 * ras->bad_page_cnt_threshold)
+ if (10 * control->ras_num_bad_pages >= 9 * ras->bad_page_cnt_threshold)
dev_warn(adev->dev, "RAS records:%u exceeds 90%% of threshold:%d",
- control->ras_num_recs,
+ control->ras_num_bad_pages,
ras->bad_page_cnt_threshold);
} else if (hdr->header == RAS_TABLE_HDR_BAD &&
amdgpu_bad_page_threshold != 0) {
- if (hdr->version == RAS_TABLE_VER_V2_1) {
+ if (hdr->version >= RAS_TABLE_VER_V2_1) {
res = __read_table_ras_info(control);
if (res)
return res;
}
res = __verify_ras_table_checksum(control);
- if (res)
- DRM_ERROR("RAS Table incorrect checksum or error:%d\n",
- res);
- if (ras->bad_page_cnt_threshold > control->ras_num_recs) {
+ if (res) {
+ dev_err(adev->dev,
+ "RAS Table incorrect checksum or error:%d\n",
+ res);
+ return -EINVAL;
+ }
+ if (ras->bad_page_cnt_threshold >= control->ras_num_bad_pages) {
/* This means that, the threshold was increased since
* the last time the system was booted, and now,
* ras->bad_page_cnt_threshold - control->num_recs > 0,
@@ -1416,29 +1511,54 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
dev_info(adev->dev,
"records:%d threshold:%d, resetting "
"RAS table header signature",
- control->ras_num_recs,
+ control->ras_num_bad_pages,
ras->bad_page_cnt_threshold);
res = amdgpu_ras_eeprom_correct_header_tag(control,
RAS_TABLE_HDR_VAL);
} else {
- dev_err(adev->dev, "RAS records:%d exceed threshold:%d",
- control->ras_num_recs, ras->bad_page_cnt_threshold);
- if (amdgpu_bad_page_threshold == -1) {
- dev_warn(adev->dev, "GPU will be initialized due to bad_page_threshold = -1.");
+ dev_warn(adev->dev,
+ "RAS records:%d exceed threshold:%d\n",
+ control->ras_num_bad_pages, ras->bad_page_cnt_threshold);
+ if ((amdgpu_bad_page_threshold == -1) ||
+ (amdgpu_bad_page_threshold == -2)) {
res = 0;
+ dev_warn(adev->dev,
+ "Please consult AMD Service Action Guide (SAG) for appropriate service procedures\n");
} else {
ras->is_rma = true;
- dev_err(adev->dev,
- "RAS records:%d exceed threshold:%d, "
- "GPU will not be initialized. Replace this GPU or increase the threshold",
- control->ras_num_recs, ras->bad_page_cnt_threshold);
+ dev_warn(adev->dev,
+ "User defined threshold is set, runtime service will be halt when threshold is reached\n");
}
}
- } else {
- DRM_INFO("Creating a new EEPROM table");
-
- res = amdgpu_ras_eeprom_reset_table(control);
}
return res < 0 ? res : 0;
}
+
+void amdgpu_ras_eeprom_check_and_recover(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ struct amdgpu_ras_eeprom_control *control;
+ int res;
+
+ if (!__is_ras_eeprom_supported(adev) || !ras)
+ return;
+ control = &ras->eeprom_control;
+ if (!control->is_eeprom_valid)
+ return;
+ res = __verify_ras_table_checksum(control);
+ if (res) {
+ dev_warn(adev->dev,
+ "RAS table incorrect checksum or error:%d, try to recover\n",
+ res);
+ if (!amdgpu_ras_eeprom_reset_table(control))
+ if (!amdgpu_ras_save_bad_pages(adev, NULL))
+ if (!__verify_ras_table_checksum(control)) {
+ dev_info(adev->dev, "RAS table recovery succeed\n");
+ return;
+ }
+ dev_err(adev->dev, "RAS table recovery failed\n");
+ control->is_eeprom_valid = false;
+ }
+ return;
+} \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
index b9ebda577797..ebfca4cb5688 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
@@ -28,6 +28,7 @@
#define RAS_TABLE_VER_V1 0x00010000
#define RAS_TABLE_VER_V2_1 0x00021000
+#define RAS_TABLE_VER_V3 0x00030000
struct amdgpu_device;
@@ -82,6 +83,17 @@ struct amdgpu_ras_eeprom_control {
*/
u32 ras_num_recs;
+ /* the bad page number is ras_num_recs or
+ * ras_num_recs * umc.retire_unit
+ */
+ u32 ras_num_bad_pages;
+
+ /* Number of records store mca address */
+ u32 ras_num_mca_recs;
+
+ /* Number of records store physical address */
+ u32 ras_num_pa_recs;
+
/* First record index to read, 0-based.
* Range is [0, num_recs-1]. This is
* an absolute index, starting right after
@@ -102,6 +114,8 @@ struct amdgpu_ras_eeprom_control {
/* Record channel info which occurred bad pages
*/
u32 bad_channel_bitmap;
+
+ bool is_eeprom_valid;
};
/*
@@ -145,6 +159,10 @@ uint32_t amdgpu_ras_eeprom_max_record_count(struct amdgpu_ras_eeprom_control *co
void amdgpu_ras_debugfs_set_ret_size(struct amdgpu_ras_eeprom_control *control);
+int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control);
+
+void amdgpu_ras_eeprom_check_and_recover(struct amdgpu_device *adev);
+
extern const struct file_operations amdgpu_ras_debugfs_eeprom_size_ops;
extern const struct file_operations amdgpu_ras_debugfs_eeprom_table_ops;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
index 50fcd86e1033..be2e56ce1355 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
@@ -91,6 +91,7 @@ static inline void amdgpu_res_first(struct ttm_resource *res,
break;
case TTM_PL_TT:
case AMDGPU_PL_DOORBELL:
+ case AMDGPU_PL_MMIO_REMAP:
node = to_ttm_range_mgr_node(res)->mm_nodes;
while (start >= node->size << PAGE_SHIFT)
start -= node++->size << PAGE_SHIFT;
@@ -153,6 +154,7 @@ static inline void amdgpu_res_next(struct amdgpu_res_cursor *cur, uint64_t size)
break;
case TTM_PL_TT:
case AMDGPU_PL_DOORBELL:
+ case AMDGPU_PL_MMIO_REMAP:
node = cur->node;
cur->node = ++node;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
index 66c1a868c0e1..28c4ad62f50e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
@@ -26,6 +26,156 @@
#include "sienna_cichlid.h"
#include "smu_v13_0_10.h"
+static int amdgpu_reset_xgmi_reset_on_init_suspend(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
+ if (!adev->ip_blocks[i].status.valid)
+ continue;
+ if (!adev->ip_blocks[i].status.hw)
+ continue;
+ /* displays are handled in phase1 */
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
+ continue;
+
+ /* XXX handle errors */
+ amdgpu_ip_block_suspend(&adev->ip_blocks[i]);
+ adev->ip_blocks[i].status.hw = false;
+ }
+
+ /* VCN FW shared region is in frambuffer, there are some flags
+ * initialized in that region during sw_init. Make sure the region is
+ * backed up.
+ */
+ amdgpu_vcn_save_vcpu_bo(adev);
+
+ return 0;
+}
+
+static int amdgpu_reset_xgmi_reset_on_init_prep_hwctxt(
+ struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_context *reset_context)
+{
+ struct list_head *reset_device_list = reset_context->reset_device_list;
+ struct amdgpu_device *tmp_adev;
+ int r;
+
+ list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
+ amdgpu_unregister_gpu_instance(tmp_adev);
+ r = amdgpu_reset_xgmi_reset_on_init_suspend(tmp_adev);
+ if (r) {
+ dev_err(tmp_adev->dev,
+ "xgmi reset on init: prepare for reset failed");
+ return r;
+ }
+ }
+
+ return r;
+}
+
+static int amdgpu_reset_xgmi_reset_on_init_restore_hwctxt(
+ struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_context *reset_context)
+{
+ struct list_head *reset_device_list = reset_context->reset_device_list;
+ struct amdgpu_device *tmp_adev = NULL;
+ int r;
+
+ r = amdgpu_device_reinit_after_reset(reset_context);
+ if (r)
+ return r;
+ list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
+ if (!tmp_adev->kfd.init_complete) {
+ kgd2kfd_init_zone_device(tmp_adev);
+ amdgpu_amdkfd_device_init(tmp_adev);
+ amdgpu_amdkfd_drm_client_create(tmp_adev);
+ }
+ }
+
+ return r;
+}
+
+static int amdgpu_reset_xgmi_reset_on_init_perform_reset(
+ struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_context *reset_context)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
+ struct list_head *reset_device_list = reset_context->reset_device_list;
+ struct amdgpu_device *tmp_adev = NULL;
+ int r;
+
+ dev_dbg(adev->dev, "xgmi roi - hw reset\n");
+
+ list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
+ mutex_lock(&tmp_adev->reset_cntl->reset_lock);
+ tmp_adev->reset_cntl->active_reset =
+ amdgpu_asic_reset_method(adev);
+ }
+ r = 0;
+ /* Mode1 reset needs to be triggered on all devices together */
+ list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
+ /* For XGMI run all resets in parallel to speed up the process */
+ if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
+ r = -EALREADY;
+ if (r) {
+ dev_err(tmp_adev->dev,
+ "xgmi reset on init: reset failed with error, %d",
+ r);
+ break;
+ }
+ }
+
+ /* For XGMI wait for all resets to complete before proceed */
+ if (!r) {
+ list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
+ flush_work(&tmp_adev->xgmi_reset_work);
+ r = tmp_adev->asic_reset_res;
+ if (r)
+ break;
+ }
+ }
+
+ list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
+ mutex_unlock(&tmp_adev->reset_cntl->reset_lock);
+ tmp_adev->reset_cntl->active_reset = AMD_RESET_METHOD_NONE;
+ }
+
+ return r;
+}
+
+int amdgpu_reset_do_xgmi_reset_on_init(
+ struct amdgpu_reset_context *reset_context)
+{
+ struct list_head *reset_device_list = reset_context->reset_device_list;
+ struct amdgpu_device *adev;
+ int r;
+
+ if (!reset_device_list || list_empty(reset_device_list) ||
+ list_is_singular(reset_device_list))
+ return -EINVAL;
+
+ adev = list_first_entry(reset_device_list, struct amdgpu_device,
+ reset_list);
+ r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
+ if (r)
+ return r;
+
+ r = amdgpu_reset_perform_reset(adev, reset_context);
+
+ return r;
+}
+
+struct amdgpu_reset_handler xgmi_reset_on_init_handler = {
+ .reset_method = AMD_RESET_METHOD_ON_INIT,
+ .prepare_env = NULL,
+ .prepare_hwcontext = amdgpu_reset_xgmi_reset_on_init_prep_hwctxt,
+ .perform_reset = amdgpu_reset_xgmi_reset_on_init_perform_reset,
+ .restore_hwcontext = amdgpu_reset_xgmi_reset_on_init_restore_hwctxt,
+ .restore_env = NULL,
+ .do_reset = NULL,
+};
+
int amdgpu_reset_init(struct amdgpu_device *adev)
{
int ret = 0;
@@ -33,6 +183,7 @@ int amdgpu_reset_init(struct amdgpu_device *adev)
switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(13, 0, 2):
case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 12):
case IP_VERSION(13, 0, 14):
ret = aldebaran_reset_init(adev);
break;
@@ -56,6 +207,7 @@ int amdgpu_reset_fini(struct amdgpu_device *adev)
switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(13, 0, 2):
case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 12):
case IP_VERSION(13, 0, 14):
ret = aldebaran_reset_fini(adev);
break;
@@ -188,7 +340,15 @@ void amdgpu_reset_get_desc(struct amdgpu_reset_context *rst_ctxt, char *buf,
case AMDGPU_RESET_SRC_USER:
strscpy(buf, "user trigger", len);
break;
+ case AMDGPU_RESET_SRC_USERQ:
+ strscpy(buf, "user queue trigger", len);
+ break;
default:
strscpy(buf, "unknown", len);
}
}
+
+bool amdgpu_reset_in_recovery(struct amdgpu_device *adev)
+{
+ return (adev->init_lvl->level == AMDGPU_INIT_LEVEL_RESET_RECOVERY);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
index 1cb920abc2fe..07b4d37f1db6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
@@ -43,6 +43,7 @@ enum AMDGPU_RESET_SRCS {
AMDGPU_RESET_SRC_MES,
AMDGPU_RESET_SRC_HWS,
AMDGPU_RESET_SRC_USER,
+ AMDGPU_RESET_SRC_USERQ,
};
struct amdgpu_reset_context {
@@ -153,4 +154,23 @@ void amdgpu_reset_get_desc(struct amdgpu_reset_context *rst_ctxt, char *buf,
for (i = 0; (i < AMDGPU_RESET_MAX_HANDLERS) && \
(handler = (*reset_ctl->reset_handlers)[i]); \
++i)
+
+extern struct amdgpu_reset_handler xgmi_reset_on_init_handler;
+int amdgpu_reset_do_xgmi_reset_on_init(
+ struct amdgpu_reset_context *reset_context);
+
+bool amdgpu_reset_in_recovery(struct amdgpu_device *adev);
+
+static inline void amdgpu_reset_set_dpc_status(struct amdgpu_device *adev,
+ bool status)
+{
+ adev->pcie_reset_ctx.occurs_dpc = status;
+ adev->no_hw_access = status;
+}
+
+static inline bool amdgpu_reset_in_dpc(struct amdgpu_device *adev)
+{
+ return adev->pcie_reset_ctx.occurs_dpc;
+}
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 690976665cf6..8f6ce948c684 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -99,6 +99,29 @@ int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned int ndw)
return 0;
}
+/**
+ * amdgpu_ring_alloc_reemit - allocate space on the ring buffer for reemit
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ * @ndw: number of dwords to allocate in the ring buffer
+ *
+ * Allocate @ndw dwords in the ring buffer (all asics).
+ * doesn't check the max_dw limit as we may be reemitting
+ * several submissions.
+ */
+static void amdgpu_ring_alloc_reemit(struct amdgpu_ring *ring, unsigned int ndw)
+{
+ /* Align requested size with padding so unlock_commit can
+ * pad safely */
+ ndw = (ndw + ring->funcs->align_mask) & ~ring->funcs->align_mask;
+
+ ring->count_dw = ndw;
+ ring->wptr_old = ring->wptr;
+
+ if (ring->funcs->begin_use)
+ ring->funcs->begin_use(ring);
+}
+
/** amdgpu_ring_insert_nop - insert NOP packets
*
* @ring: amdgpu_ring structure holding ring information
@@ -108,10 +131,22 @@ int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned int ndw)
*/
void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
{
- int i;
+ uint32_t occupied, chunk1, chunk2;
- for (i = 0; i < count; i++)
- amdgpu_ring_write(ring, ring->funcs->nop);
+ occupied = ring->wptr & ring->buf_mask;
+ chunk1 = ring->buf_mask + 1 - occupied;
+ chunk1 = (chunk1 >= count) ? count : chunk1;
+ chunk2 = count - chunk1;
+
+ if (chunk1)
+ memset32(&ring->ring[occupied], ring->funcs->nop, chunk1);
+
+ if (chunk2)
+ memset32(ring->ring, ring->funcs->nop, chunk2);
+
+ ring->wptr += count;
+ ring->wptr &= ring->ptr_mask;
+ ring->count_dw -= count;
}
/**
@@ -141,6 +176,9 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring)
{
uint32_t count;
+ if (ring->count_dw < 0)
+ DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n");
+
/* We pad to match fetch size */
count = ring->funcs->align_mask + 1 -
(ring->wptr & ring->funcs->align_mask);
@@ -172,14 +210,10 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring)
}
#define amdgpu_ring_get_gpu_addr(ring, offset) \
- (ring->is_mes_queue ? \
- (ring->mes_ctx->meta_data_gpu_addr + offset) : \
- (ring->adev->wb.gpu_addr + offset * 4))
+ (ring->adev->wb.gpu_addr + offset * 4)
#define amdgpu_ring_get_cpu_addr(ring, offset) \
- (ring->is_mes_queue ? \
- (void *)((uint8_t *)(ring->mes_ctx->meta_data_ptr) + offset) : \
- (&ring->adev->wb.wb[offset]))
+ (&ring->adev->wb.wb[offset])
/**
* amdgpu_ring_init - init driver ring struct.
@@ -228,57 +262,42 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
ring->sched_score = sched_score;
ring->vmid_wait = dma_fence_get_stub();
- if (!ring->is_mes_queue) {
- ring->idx = adev->num_rings++;
- adev->rings[ring->idx] = ring;
- }
+ ring->idx = adev->num_rings++;
+ adev->rings[ring->idx] = ring;
r = amdgpu_fence_driver_init_ring(ring);
if (r)
return r;
}
- if (ring->is_mes_queue) {
- ring->rptr_offs = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_RPTR_OFFS);
- ring->wptr_offs = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_WPTR_OFFS);
- ring->fence_offs = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_FENCE_OFFS);
- ring->trail_fence_offs = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_TRAIL_FENCE_OFFS);
- ring->cond_exe_offs = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_COND_EXE_OFFS);
- } else {
- r = amdgpu_device_wb_get(adev, &ring->rptr_offs);
- if (r) {
- dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
- return r;
- }
+ r = amdgpu_device_wb_get(adev, &ring->rptr_offs);
+ if (r) {
+ dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
+ return r;
+ }
- r = amdgpu_device_wb_get(adev, &ring->wptr_offs);
- if (r) {
- dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r);
- return r;
- }
+ r = amdgpu_device_wb_get(adev, &ring->wptr_offs);
+ if (r) {
+ dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r);
+ return r;
+ }
- r = amdgpu_device_wb_get(adev, &ring->fence_offs);
- if (r) {
- dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r);
- return r;
- }
+ r = amdgpu_device_wb_get(adev, &ring->fence_offs);
+ if (r) {
+ dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r);
+ return r;
+ }
- r = amdgpu_device_wb_get(adev, &ring->trail_fence_offs);
- if (r) {
- dev_err(adev->dev, "(%d) ring trail_fence_offs wb alloc failed\n", r);
- return r;
- }
+ r = amdgpu_device_wb_get(adev, &ring->trail_fence_offs);
+ if (r) {
+ dev_err(adev->dev, "(%d) ring trail_fence_offs wb alloc failed\n", r);
+ return r;
+ }
- r = amdgpu_device_wb_get(adev, &ring->cond_exe_offs);
- if (r) {
- dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r);
- return r;
- }
+ r = amdgpu_device_wb_get(adev, &ring->cond_exe_offs);
+ if (r) {
+ dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r);
+ return r;
}
ring->fence_gpu_addr =
@@ -309,45 +328,51 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
/* always set cond_exec_polling to CONTINUE */
*ring->cond_exe_cpu_addr = 1;
- r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type);
- if (r) {
- dev_err(adev->dev, "failed initializing fences (%d).\n", r);
- return r;
- }
+ if (ring->funcs->type != AMDGPU_RING_TYPE_CPER) {
+ r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type);
+ if (r) {
+ dev_err(adev->dev, "failed initializing fences (%d).\n", r);
+ return r;
+ }
- max_ibs_dw = ring->funcs->emit_frame_size +
- amdgpu_ring_max_ibs(ring->funcs->type) * ring->funcs->emit_ib_size;
- max_ibs_dw = (max_ibs_dw + ring->funcs->align_mask) & ~ring->funcs->align_mask;
+ max_ibs_dw = ring->funcs->emit_frame_size +
+ amdgpu_ring_max_ibs(ring->funcs->type) * ring->funcs->emit_ib_size;
+ max_ibs_dw = (max_ibs_dw + ring->funcs->align_mask) & ~ring->funcs->align_mask;
- if (WARN_ON(max_ibs_dw > max_dw))
- max_dw = max_ibs_dw;
+ if (WARN_ON(max_ibs_dw > max_dw))
+ max_dw = max_ibs_dw;
- ring->ring_size = roundup_pow_of_two(max_dw * 4 * sched_hw_submission);
+ ring->ring_size = roundup_pow_of_two(max_dw * 4 * sched_hw_submission);
+ } else {
+ ring->ring_size = roundup_pow_of_two(max_dw * 4);
+ ring->count_dw = (ring->ring_size - 4) >> 2;
+ /* ring buffer is empty now */
+ ring->wptr = *ring->rptr_cpu_addr = 0;
+ }
ring->buf_mask = (ring->ring_size / 4) - 1;
ring->ptr_mask = ring->funcs->support_64bit_ptrs ?
0xffffffffffffffff : ring->buf_mask;
+ /* Initialize cached_rptr to 0 */
+ ring->cached_rptr = 0;
- /* Allocate ring buffer */
- if (ring->is_mes_queue) {
- int offset = 0;
-
- BUG_ON(ring->ring_size > PAGE_SIZE*4);
-
- offset = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_RING_OFFS);
- ring->gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- ring->ring = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
- amdgpu_ring_clear_ring(ring);
+ if (!ring->ring_backup) {
+ ring->ring_backup = kvzalloc(ring->ring_size, GFP_KERNEL);
+ if (!ring->ring_backup)
+ return -ENOMEM;
+ }
- } else if (ring->ring_obj == NULL) {
- r = amdgpu_bo_create_kernel(adev, ring->ring_size + ring->funcs->extra_dw, PAGE_SIZE,
+ /* Allocate ring buffer */
+ if (ring->ring_obj == NULL) {
+ r = amdgpu_bo_create_kernel(adev, ring->ring_size + ring->funcs->extra_bytes,
+ PAGE_SIZE,
AMDGPU_GEM_DOMAIN_GTT,
&ring->ring_obj,
&ring->gpu_addr,
(void **)&ring->ring);
if (r) {
dev_err(adev->dev, "(%d) ring create failed\n", r);
+ kvfree(ring->ring_backup);
return r;
}
amdgpu_ring_clear_ring(ring);
@@ -377,32 +402,26 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
{
/* Not to finish a ring which is not initialized */
- if (!(ring->adev) ||
- (!ring->is_mes_queue && !(ring->adev->rings[ring->idx])))
+ if (!(ring->adev) || !(ring->adev->rings[ring->idx]))
return;
ring->sched.ready = false;
- if (!ring->is_mes_queue) {
- amdgpu_device_wb_free(ring->adev, ring->rptr_offs);
- amdgpu_device_wb_free(ring->adev, ring->wptr_offs);
+ amdgpu_device_wb_free(ring->adev, ring->rptr_offs);
+ amdgpu_device_wb_free(ring->adev, ring->wptr_offs);
- amdgpu_device_wb_free(ring->adev, ring->cond_exe_offs);
- amdgpu_device_wb_free(ring->adev, ring->fence_offs);
+ amdgpu_device_wb_free(ring->adev, ring->cond_exe_offs);
+ amdgpu_device_wb_free(ring->adev, ring->fence_offs);
- amdgpu_bo_free_kernel(&ring->ring_obj,
- &ring->gpu_addr,
- (void **)&ring->ring);
- } else {
- kfree(ring->fence_drv.fences);
- }
+ amdgpu_bo_free_kernel(&ring->ring_obj,
+ &ring->gpu_addr,
+ (void **)&ring->ring);
+ kvfree(ring->ring_backup);
+ ring->ring_backup = NULL;
dma_fence_put(ring->vmid_wait);
ring->vmid_wait = NULL;
ring->me = 0;
-
- if (!ring->is_mes_queue)
- ring->adev->rings[ring->idx] = NULL;
}
/**
@@ -439,6 +458,7 @@ bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
{
unsigned long flags;
ktime_t deadline;
+ bool ret;
if (unlikely(ring->adev->debug_disable_soft_recovery))
return false;
@@ -453,12 +473,16 @@ bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
dma_fence_set_error(fence, -ENODATA);
spin_unlock_irqrestore(fence->lock, flags);
- atomic_inc(&ring->adev->gpu_reset_counter);
while (!dma_fence_is_signaled(fence) &&
ktime_to_ns(ktime_sub(deadline, ktime_get())) > 0)
ring->funcs->soft_recovery(ring, vmid);
- return dma_fence_is_signaled(fence);
+ ret = dma_fence_is_signaled(fence);
+ /* increment the counter only if soft reset worked */
+ if (ret)
+ atomic_inc(&ring->adev->gpu_reset_counter);
+
+ return ret;
}
/*
@@ -478,6 +502,7 @@ static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,
{
struct amdgpu_ring *ring = file_inode(f)->i_private;
uint32_t value, result, early[3];
+ uint64_t p;
loff_t i;
int r;
@@ -487,13 +512,18 @@ static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,
result = 0;
if (*pos < 12) {
+ if (ring->funcs->type == AMDGPU_RING_TYPE_CPER)
+ mutex_lock(&ring->adev->cper.ring_lock);
+
early[0] = amdgpu_ring_get_rptr(ring) & ring->buf_mask;
early[1] = amdgpu_ring_get_wptr(ring) & ring->buf_mask;
early[2] = ring->wptr & ring->buf_mask;
for (i = *pos / 4; i < 3 && size; i++) {
r = put_user(early[i], (uint32_t *)buf);
- if (r)
- return r;
+ if (r) {
+ result = r;
+ goto out;
+ }
buf += 4;
result += 4;
size -= 4;
@@ -501,86 +531,94 @@ static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,
}
}
- while (size) {
- if (*pos >= (ring->ring_size + 12))
- return result;
+ if (ring->funcs->type != AMDGPU_RING_TYPE_CPER) {
+ while (size) {
+ if (*pos >= (ring->ring_size + 12))
+ return result;
- value = ring->ring[(*pos - 12)/4];
- r = put_user(value, (uint32_t *)buf);
- if (r)
- return r;
- buf += 4;
- result += 4;
- size -= 4;
- *pos += 4;
+ value = ring->ring[(*pos - 12)/4];
+ r = put_user(value, (uint32_t *)buf);
+ if (r)
+ return r;
+ buf += 4;
+ result += 4;
+ size -= 4;
+ *pos += 4;
+ }
+ } else {
+ p = early[0];
+ if (early[0] <= early[1])
+ size = (early[1] - early[0]);
+ else
+ size = ring->ring_size - (early[0] - early[1]);
+
+ while (size) {
+ if (p == early[1])
+ goto out;
+
+ value = ring->ring[p];
+ r = put_user(value, (uint32_t *)buf);
+ if (r) {
+ result = r;
+ goto out;
+ }
+
+ buf += 4;
+ result += 4;
+ size--;
+ p++;
+ p &= ring->ptr_mask;
+ }
}
+out:
+ if (ring->funcs->type == AMDGPU_RING_TYPE_CPER)
+ mutex_unlock(&ring->adev->cper.ring_lock);
+
return result;
}
-static const struct file_operations amdgpu_debugfs_ring_fops = {
- .owner = THIS_MODULE,
- .read = amdgpu_debugfs_ring_read,
- .llseek = default_llseek
-};
-
-static ssize_t amdgpu_debugfs_mqd_read(struct file *f, char __user *buf,
- size_t size, loff_t *pos)
+static ssize_t amdgpu_debugfs_virt_ring_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
{
struct amdgpu_ring *ring = file_inode(f)->i_private;
- volatile u32 *mqd;
- u32 *kbuf;
- int r, i;
- uint32_t value, result;
if (*pos & 3 || size & 3)
return -EINVAL;
- kbuf = kmalloc(ring->mqd_size, GFP_KERNEL);
- if (!kbuf)
- return -ENOMEM;
-
- r = amdgpu_bo_reserve(ring->mqd_obj, false);
- if (unlikely(r != 0))
- goto err_free;
+ if (ring->funcs->type == AMDGPU_RING_TYPE_CPER)
+ amdgpu_virt_req_ras_cper_dump(ring->adev, false);
- r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&mqd);
- if (r)
- goto err_unreserve;
+ return amdgpu_debugfs_ring_read(f, buf, size, pos);
+}
- /*
- * Copy to local buffer to avoid put_user(), which might fault
- * and acquire mmap_sem, under reservation_ww_class_mutex.
- */
- for (i = 0; i < ring->mqd_size/sizeof(u32); i++)
- kbuf[i] = mqd[i];
+static const struct file_operations amdgpu_debugfs_ring_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_ring_read,
+ .llseek = default_llseek
+};
- amdgpu_bo_kunmap(ring->mqd_obj);
- amdgpu_bo_unreserve(ring->mqd_obj);
+static const struct file_operations amdgpu_debugfs_virt_ring_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_virt_ring_read,
+ .llseek = default_llseek
+};
- result = 0;
- while (size) {
- if (*pos >= ring->mqd_size)
- break;
+static ssize_t amdgpu_debugfs_mqd_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_ring *ring = file_inode(f)->i_private;
+ ssize_t bytes = min_t(ssize_t, ring->mqd_size - *pos, size);
+ void *from = ((u8 *)ring->mqd_ptr) + *pos;
- value = kbuf[*pos/4];
- r = put_user(value, (uint32_t *)buf);
- if (r)
- goto err_free;
- buf += 4;
- result += 4;
- size -= 4;
- *pos += 4;
- }
+ if (*pos > ring->mqd_size)
+ return 0;
- kfree(kbuf);
- return result;
+ if (copy_to_user(buf, from, bytes))
+ return -EFAULT;
-err_unreserve:
- amdgpu_bo_unreserve(ring->mqd_obj);
-err_free:
- kfree(kbuf);
- return r;
+ *pos += bytes;
+ return bytes;
}
static const struct file_operations amdgpu_debugfs_mqd_fops = {
@@ -611,9 +649,14 @@ void amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
char name[32];
sprintf(name, "amdgpu_ring_%s", ring->name);
- debugfs_create_file_size(name, S_IFREG | 0444, root, ring,
- &amdgpu_debugfs_ring_fops,
- ring->ring_size + 12);
+ if (amdgpu_sriov_vf(adev))
+ debugfs_create_file_size(name, S_IFREG | 0444, root, ring,
+ &amdgpu_debugfs_virt_ring_fops,
+ ring->ring_size + 12);
+ else
+ debugfs_create_file_size(name, S_IFREG | 0444, root, ring,
+ &amdgpu_debugfs_ring_fops,
+ ring->ring_size + 12);
if (ring->mqd_obj) {
sprintf(name, "amdgpu_mqd_%s", ring->name);
@@ -675,6 +718,7 @@ static void amdgpu_ring_to_mqd_prop(struct amdgpu_ring *ring,
prop->eop_gpu_addr = ring->eop_gpu_addr;
prop->use_doorbell = ring->use_doorbell;
prop->doorbell_index = ring->doorbell_index;
+ prop->kernel_queue = true;
/* map_queues packet doesn't need activate the queue,
* so only kiq need set this field.
@@ -746,3 +790,69 @@ bool amdgpu_ring_sched_ready(struct amdgpu_ring *ring)
return true;
}
+
+void amdgpu_ring_reset_helper_begin(struct amdgpu_ring *ring,
+ struct amdgpu_fence *guilty_fence)
+{
+ /* Stop the scheduler to prevent anybody else from touching the ring buffer. */
+ drm_sched_wqueue_stop(&ring->sched);
+ /* back up the non-guilty commands */
+ amdgpu_ring_backup_unprocessed_commands(ring, guilty_fence);
+}
+
+int amdgpu_ring_reset_helper_end(struct amdgpu_ring *ring,
+ struct amdgpu_fence *guilty_fence)
+{
+ unsigned int i;
+ int r;
+
+ /* verify that the ring is functional */
+ r = amdgpu_ring_test_ring(ring);
+ if (r)
+ return r;
+
+ /* signal the fence of the bad job */
+ if (guilty_fence)
+ amdgpu_fence_driver_guilty_force_completion(guilty_fence);
+ /* Re-emit the non-guilty commands */
+ if (ring->ring_backup_entries_to_copy) {
+ amdgpu_ring_alloc_reemit(ring, ring->ring_backup_entries_to_copy);
+ for (i = 0; i < ring->ring_backup_entries_to_copy; i++)
+ amdgpu_ring_write(ring, ring->ring_backup[i]);
+ amdgpu_ring_commit(ring);
+ }
+ /* Start the scheduler again */
+ drm_sched_wqueue_start(&ring->sched);
+ return 0;
+}
+
+bool amdgpu_ring_is_reset_type_supported(struct amdgpu_ring *ring,
+ u32 reset_type)
+{
+ switch (ring->funcs->type) {
+ case AMDGPU_RING_TYPE_GFX:
+ if (ring->adev->gfx.gfx_supported_reset & reset_type)
+ return true;
+ break;
+ case AMDGPU_RING_TYPE_COMPUTE:
+ if (ring->adev->gfx.compute_supported_reset & reset_type)
+ return true;
+ break;
+ case AMDGPU_RING_TYPE_SDMA:
+ if (ring->adev->sdma.supported_reset & reset_type)
+ return true;
+ break;
+ case AMDGPU_RING_TYPE_VCN_DEC:
+ case AMDGPU_RING_TYPE_VCN_ENC:
+ if (ring->adev->vcn.supported_reset & reset_type)
+ return true;
+ break;
+ case AMDGPU_RING_TYPE_VCN_JPEG:
+ if (ring->adev->jpeg.supported_reset & reset_type)
+ return true;
+ break;
+ default:
+ break;
+ }
+ return false;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index f93f51002201..b6b649179776 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -37,7 +37,7 @@ struct amdgpu_job;
struct amdgpu_vm;
/* max number of rings */
-#define AMDGPU_MAX_RINGS 124
+#define AMDGPU_MAX_RINGS 149
#define AMDGPU_MAX_HWIP_RINGS 64
#define AMDGPU_MAX_GFX_RINGS 2
#define AMDGPU_MAX_SW_GFX_RINGS 2
@@ -82,6 +82,7 @@ enum amdgpu_ring_type {
AMDGPU_RING_TYPE_KIQ,
AMDGPU_RING_TYPE_MES,
AMDGPU_RING_TYPE_UMSCH_MM,
+ AMDGPU_RING_TYPE_CPER,
};
enum amdgpu_ib_pool_type {
@@ -113,10 +114,11 @@ struct amdgpu_sched {
*/
struct amdgpu_fence_driver {
uint64_t gpu_addr;
- volatile uint32_t *cpu_addr;
+ uint32_t *cpu_addr;
/* sync_seq is protected by ring emission lock */
uint32_t sync_seq;
atomic_t last_seq;
+ u64 signalled_wptr;
bool initialized;
struct amdgpu_irq_src *irq_src;
unsigned irq_type;
@@ -126,11 +128,35 @@ struct amdgpu_fence_driver {
struct dma_fence **fences;
};
+/*
+ * Fences mark an event in the GPUs pipeline and are used
+ * for GPU/CPU synchronization. When the fence is written,
+ * it is expected that all buffers associated with that fence
+ * are no longer in use by the associated ring on the GPU and
+ * that the relevant GPU caches have been flushed.
+ */
+
+struct amdgpu_fence {
+ struct dma_fence base;
+
+ /* RB, DMA, etc. */
+ struct amdgpu_ring *ring;
+ ktime_t start_timestamp;
+
+ /* wptr for the fence for resets */
+ u64 wptr;
+ /* fence context for resets */
+ u64 context;
+ uint32_t seq;
+};
+
extern const struct drm_sched_backend_ops amdgpu_sched_ops;
void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring);
void amdgpu_fence_driver_set_error(struct amdgpu_ring *ring, int error);
void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring);
+void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *fence);
+void amdgpu_fence_save_wptr(struct dma_fence *fence);
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring);
int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
@@ -140,8 +166,8 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev);
void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev);
int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev);
void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev);
-int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence, struct amdgpu_job *job,
- unsigned flags);
+int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
+ struct amdgpu_fence *af, unsigned int flags);
int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s,
uint32_t timeout);
bool amdgpu_fence_process(struct amdgpu_ring *ring);
@@ -163,13 +189,40 @@ void amdgpu_fence_update_start_timestamp(struct amdgpu_ring *ring, uint32_t seq,
/* provided by hw blocks that expose a ring buffer for commands */
struct amdgpu_ring_funcs {
+ /**
+ * @type:
+ *
+ * GFX, Compute, SDMA, UVD, VCE, VCN, VPE, KIQ, MES, UMSCH, and CPER
+ * use ring buffers. The type field just identifies which component the
+ * ring buffer is associated with.
+ */
enum amdgpu_ring_type type;
uint32_t align_mask;
+
+ /**
+ * @nop:
+ *
+ * Every block in the amdgpu has no-op instructions (e.g., GFX 10
+ * uses PACKET3(PACKET3_NOP, 0x3FFF), VCN 5 uses VCN_ENC_CMD_NO_OP,
+ * etc). This field receives the specific no-op for the component
+ * that initializes the ring.
+ */
u32 nop;
bool support_64bit_ptrs;
bool no_user_fence;
bool secure_submission_supported;
- unsigned extra_dw;
+
+ /**
+ * @extra_bytes:
+ *
+ * Optional extra space in bytes that is added to the ring size
+ * when allocating the BO that holds the contents of the ring.
+ * This space isn't used for command submission to the ring,
+ * but is just there to satisfy some hardware requirements or
+ * implement workarounds. It's up to the implementation of each
+ * specific ring to initialize this space.
+ */
+ unsigned extra_bytes;
/* ring read/write ptr handling */
u64 (*get_rptr)(struct amdgpu_ring *ring);
@@ -235,10 +288,14 @@ struct amdgpu_ring_funcs {
void (*patch_cntl)(struct amdgpu_ring *ring, unsigned offset);
void (*patch_ce)(struct amdgpu_ring *ring, unsigned offset);
void (*patch_de)(struct amdgpu_ring *ring, unsigned offset);
- int (*reset)(struct amdgpu_ring *ring, unsigned int vmid);
+ int (*reset)(struct amdgpu_ring *ring, unsigned int vmid,
+ struct amdgpu_fence *timedout_fence);
void (*emit_cleaner_shader)(struct amdgpu_ring *ring);
};
+/**
+ * amdgpu_ring - Holds ring information
+ */
struct amdgpu_ring {
struct amdgpu_device *adev;
const struct amdgpu_ring_funcs *funcs;
@@ -246,17 +303,68 @@ struct amdgpu_ring {
struct drm_gpu_scheduler sched;
struct amdgpu_bo *ring_obj;
- volatile uint32_t *ring;
+ uint32_t *ring;
+ /* backups for resets */
+ uint32_t *ring_backup;
+ unsigned int ring_backup_entries_to_copy;
unsigned rptr_offs;
u64 rptr_gpu_addr;
- volatile u32 *rptr_cpu_addr;
+ u32 *rptr_cpu_addr;
+
+ /**
+ * @wptr:
+ *
+ * This is part of the Ring buffer implementation and represents the
+ * write pointer. The wptr determines where the host has written.
+ */
u64 wptr;
+
+ /**
+ * @wptr_old:
+ *
+ * Before update wptr with the new value, usually the old value is
+ * stored in the wptr_old.
+ */
u64 wptr_old;
unsigned ring_size;
+
+ /**
+ * @max_dw:
+ *
+ * Maximum number of DWords for ring allocation. This information is
+ * provided at the ring initialization time, and each IP block can
+ * specify a specific value. Check places that invoke
+ * amdgpu_ring_init() to see the maximum size per block.
+ */
unsigned max_dw;
+
+ /**
+ * @count_dw:
+ *
+ * This value starts with the maximum amount of DWords supported by the
+ * ring. This value is updated based on the ring manipulation.
+ */
int count_dw;
uint64_t gpu_addr;
+
+ /**
+ * @ptr_mask:
+ *
+ * Some IPs provide support for 64-bit pointers and others for 32-bit
+ * only; this behavior is component-specific and defined by the field
+ * support_64bit_ptr. If the IP block supports 64-bits, the mask
+ * 0xffffffffffffffff is set; otherwise, this value assumes buf_mask.
+ * Notice that this field is used to keep wptr under a valid range.
+ */
uint64_t ptr_mask;
+
+ /**
+ * @buf_mask:
+ *
+ * Buffer mask is a value used to keep wptr count under its
+ * thresholding. Buffer mask initialized during the ring buffer
+ * initialization time, and it is defined as (ring_size / 4) -1.
+ */
uint32_t buf_mask;
u32 idx;
u32 xcc_id;
@@ -274,39 +382,43 @@ struct amdgpu_ring {
bool use_pollmem;
unsigned wptr_offs;
u64 wptr_gpu_addr;
- volatile u32 *wptr_cpu_addr;
+
+ /**
+ * @wptr_cpu_addr:
+ *
+ * This is the CPU address pointer in the writeback slot. This is used
+ * to commit changes to the GPU.
+ */
+ u32 *wptr_cpu_addr;
unsigned fence_offs;
u64 fence_gpu_addr;
- volatile u32 *fence_cpu_addr;
+ u32 *fence_cpu_addr;
uint64_t current_ctx;
char name[16];
u32 trail_seq;
unsigned trail_fence_offs;
u64 trail_fence_gpu_addr;
- volatile u32 *trail_fence_cpu_addr;
+ u32 *trail_fence_cpu_addr;
unsigned cond_exe_offs;
u64 cond_exe_gpu_addr;
- volatile u32 *cond_exe_cpu_addr;
+ u32 *cond_exe_cpu_addr;
unsigned int set_q_mode_offs;
- volatile u32 *set_q_mode_ptr;
+ u32 *set_q_mode_ptr;
u64 set_q_mode_token;
unsigned vm_hub;
unsigned vm_inv_eng;
struct dma_fence *vmid_wait;
bool has_compute_vm_bug;
bool no_scheduler;
+ bool no_user_submission;
int hw_prio;
unsigned num_hw_submission;
atomic_t *sched_score;
- /* used for mes */
- bool is_mes_queue;
- uint32_t hw_queue_id;
- struct amdgpu_mes_ctx_data *mes_ctx;
-
bool is_sw_ring;
unsigned int entry_index;
-
+ /* store the cached rptr to restore after reset */
+ uint64_t cached_rptr;
};
#define amdgpu_ring_parse_cs(r, p, job, ib) ((r)->funcs->parse_cs((p), (job), (ib)))
@@ -336,7 +448,7 @@ struct amdgpu_ring {
#define amdgpu_ring_patch_cntl(r, o) ((r)->funcs->patch_cntl((r), (o)))
#define amdgpu_ring_patch_ce(r, o) ((r)->funcs->patch_ce((r), (o)))
#define amdgpu_ring_patch_de(r, o) ((r)->funcs->patch_de((r), (o)))
-#define amdgpu_ring_reset(r, v) (r)->funcs->reset((r), (v))
+#define amdgpu_ring_reset(r, v, f) (r)->funcs->reset((r), (v), (f))
unsigned int amdgpu_ring_max_ibs(enum amdgpu_ring_type type);
int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
@@ -369,16 +481,11 @@ static inline void amdgpu_ring_set_preempt_cond_exec(struct amdgpu_ring *ring,
static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring)
{
- int i = 0;
- while (i <= ring->buf_mask)
- ring->ring[i++] = ring->funcs->nop;
-
+ memset32(ring->ring, ring->funcs->nop, ring->buf_mask + 1);
}
static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
{
- if (ring->count_dw <= 0)
- DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n");
ring->ring[ring->wptr++ & ring->buf_mask] = v;
ring->wptr &= ring->ptr_mask;
ring->count_dw--;
@@ -388,13 +495,8 @@ static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring,
void *src, int count_dw)
{
unsigned occupied, chunk1, chunk2;
- void *dst;
-
- if (unlikely(ring->count_dw < count_dw))
- DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n");
occupied = ring->wptr & ring->buf_mask;
- dst = (void *)&ring->ring[occupied];
chunk1 = ring->buf_mask + 1 - occupied;
chunk1 = (chunk1 >= count_dw) ? count_dw : chunk1;
chunk2 = count_dw - chunk1;
@@ -402,12 +504,11 @@ static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring,
chunk2 <<= 2;
if (chunk1)
- memcpy(dst, src, chunk1);
+ memcpy(&ring->ring[occupied], src, chunk1);
if (chunk2) {
src += chunk1;
- dst = (void *)ring->ring;
- memcpy(dst, src, chunk2);
+ memcpy(ring->ring, src, chunk2);
}
ring->wptr += count_dw;
@@ -439,15 +540,6 @@ static inline void amdgpu_ring_patch_cond_exec(struct amdgpu_ring *ring,
ring->ring[offset] = cur - offset;
}
-#define amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset) \
- (ring->is_mes_queue && ring->mes_ctx ? \
- (ring->mes_ctx->meta_data_gpu_addr + offset) : 0)
-
-#define amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset) \
- (ring->is_mes_queue && ring->mes_ctx ? \
- (void *)((uint8_t *)(ring->mes_ctx->meta_data_ptr) + offset) : \
- NULL)
-
int amdgpu_ring_test_helper(struct amdgpu_ring *ring);
void amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
@@ -470,8 +562,7 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
unsigned size,
enum amdgpu_ib_pool_type pool,
struct amdgpu_ib *ib);
-void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
- struct dma_fence *f);
+void amdgpu_ib_free(struct amdgpu_ib *ib, struct dma_fence *f);
int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
struct amdgpu_ib *ibs, struct amdgpu_job *job,
struct dma_fence **f);
@@ -479,4 +570,12 @@ int amdgpu_ib_pool_init(struct amdgpu_device *adev);
void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
bool amdgpu_ring_sched_ready(struct amdgpu_ring *ring);
+void amdgpu_ring_backup_unprocessed_commands(struct amdgpu_ring *ring,
+ struct amdgpu_fence *guilty_fence);
+void amdgpu_ring_reset_helper_begin(struct amdgpu_ring *ring,
+ struct amdgpu_fence *guilty_fence);
+int amdgpu_ring_reset_helper_end(struct amdgpu_ring *ring,
+ struct amdgpu_fence *guilty_fence);
+bool amdgpu_ring_is_reset_type_supported(struct amdgpu_ring *ring,
+ u32 reset_type);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c
index 1c66da1c3fb4..7e7d6c3865bc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c
@@ -124,7 +124,7 @@ static void amdgpu_mux_resubmit_chunks(struct amdgpu_ring_mux *mux)
}
}
- del_timer(&mux->resubmit_timer);
+ timer_delete(&mux->resubmit_timer);
mux->s_resubmit = false;
}
@@ -135,7 +135,8 @@ static void amdgpu_ring_mux_schedule_resubmit(struct amdgpu_ring_mux *mux)
static void amdgpu_mux_resubmit_fallback(struct timer_list *t)
{
- struct amdgpu_ring_mux *mux = from_timer(mux, t, resubmit_timer);
+ struct amdgpu_ring_mux *mux = timer_container_of(mux, t,
+ resubmit_timer);
if (!spin_trylock(&mux->lock)) {
amdgpu_ring_mux_schedule_resubmit(mux);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
index db5791e1a7ce..5aa830a02d80 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
@@ -89,7 +89,7 @@ void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev, int xcc_id)
int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws)
{
const u32 *src_ptr;
- volatile u32 *dst_ptr;
+ u32 *dst_ptr;
u32 i;
int r;
@@ -189,7 +189,7 @@ int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev)
void amdgpu_gfx_rlc_setup_cp_table(struct amdgpu_device *adev)
{
const __le32 *fw_data;
- volatile u32 *dst_ptr;
+ u32 *dst_ptr;
int me, i, max_me;
u32 bo_offset = 0;
u32 table_offset, table_size;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
index fce22d3f816b..2ce310b31942 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
@@ -237,7 +237,21 @@ struct amdgpu_rlc_funcs {
void (*unset_safe_mode)(struct amdgpu_device *adev, int xcc_id);
int (*init)(struct amdgpu_device *adev);
u32 (*get_csb_size)(struct amdgpu_device *adev);
- void (*get_csb_buffer)(struct amdgpu_device *adev, volatile u32 *buffer);
+
+ /**
+ * @get_csb_buffer: Get the clear state to be put into the hardware.
+ *
+ * The parameter adev is used to get the CS data and other gfx info,
+ * and buffer is the RLC CS pointer
+ *
+ * Sometimes, the user space puts a request to clear the state in the
+ * command buffer; this function provides the clear state that gets put
+ * into the hardware. Note that the driver programs Clear State
+ * Indirect Buffer (CSB) explicitly when it sets up the kernel rings,
+ * and it also provides a pointer to it which is used by the firmware
+ * to load the clear state in some cases.
+ */
+ void (*get_csb_buffer)(struct amdgpu_device *adev, u32 *buffer);
int (*get_cp_table_num)(struct amdgpu_device *adev);
int (*resume)(struct amdgpu_device *adev);
void (*stop)(struct amdgpu_device *adev);
@@ -261,19 +275,19 @@ struct amdgpu_rlc {
/* for power gating */
struct amdgpu_bo *save_restore_obj;
uint64_t save_restore_gpu_addr;
- volatile uint32_t *sr_ptr;
+ uint32_t *sr_ptr;
const u32 *reg_list;
u32 reg_list_size;
/* for clear state */
struct amdgpu_bo *clear_state_obj;
uint64_t clear_state_gpu_addr;
- volatile uint32_t *cs_ptr;
+ uint32_t *cs_ptr;
const struct cs_section_def *cs_data;
u32 clear_state_size;
/* for cp tables */
struct amdgpu_bo *cp_table_obj;
uint64_t cp_table_gpu_addr;
- volatile uint32_t *cp_table_ptr;
+ uint32_t *cp_table_ptr;
u32 cp_table_size;
/* safe mode for updating CG/PG state */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index 10df731998b2..39070b2a4c04 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -93,8 +93,7 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
return 0;
}
-void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct drm_suballoc **sa_bo,
- struct dma_fence *fence)
+void amdgpu_sa_bo_free(struct drm_suballoc **sa_bo, struct dma_fence *fence)
{
if (sa_bo == NULL || *sa_bo == NULL) {
return;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
index b0a8abc7a8ec..341beec59537 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
@@ -35,21 +35,19 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
int fd,
int32_t priority)
{
- struct fd f = fdget(fd);
+ CLASS(fd, f)(fd);
struct amdgpu_fpriv *fpriv;
struct amdgpu_ctx_mgr *mgr;
struct amdgpu_ctx *ctx;
uint32_t id;
int r;
- if (!fd_file(f))
+ if (fd_empty(f))
return -EINVAL;
r = amdgpu_file_to_fpriv(fd_file(f), &fpriv);
- if (r) {
- fdput(f);
+ if (r)
return r;
- }
mgr = &fpriv->ctx_mgr;
mutex_lock(&mgr->lock);
@@ -57,7 +55,6 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
amdgpu_ctx_priority_override(ctx, priority);
mutex_unlock(&mgr->lock);
- fdput(f);
return 0;
}
@@ -66,31 +63,25 @@ static int amdgpu_sched_context_priority_override(struct amdgpu_device *adev,
unsigned ctx_id,
int32_t priority)
{
- struct fd f = fdget(fd);
+ CLASS(fd, f)(fd);
struct amdgpu_fpriv *fpriv;
struct amdgpu_ctx *ctx;
int r;
- if (!fd_file(f))
+ if (fd_empty(f))
return -EINVAL;
r = amdgpu_file_to_fpriv(fd_file(f), &fpriv);
- if (r) {
- fdput(f);
+ if (r)
return r;
- }
ctx = amdgpu_ctx_get(fpriv, ctx_id);
- if (!ctx) {
- fdput(f);
+ if (!ctx)
return -EINVAL;
- }
amdgpu_ctx_priority_override(ctx, priority);
amdgpu_ctx_put(ctx);
- fdput(f);
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
index 183a976ba29d..8b8a04138711 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
@@ -25,6 +25,9 @@
#include "amdgpu.h"
#include "amdgpu_sdma.h"
#include "amdgpu_ras.h"
+#include "amdgpu_reset.h"
+#include "gc/gc_10_1_0_offset.h"
+#include "gc/gc_10_3_0_sh_mask.h"
#define AMDGPU_CSA_SDMA_SIZE 64
/* SDMA CSA reside in the 3rd page of CSA */
@@ -75,22 +78,14 @@ uint64_t amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring *ring,
if (amdgpu_sriov_vf(adev) || vmid == 0 || !adev->gfx.mcbp)
return 0;
- if (ring->is_mes_queue) {
- uint32_t offset = 0;
+ r = amdgpu_sdma_get_index_from_ring(ring, &index);
- offset = offsetof(struct amdgpu_mes_ctx_meta_data,
- sdma[ring->idx].sdma_meta_data);
- csa_mc_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- } else {
- r = amdgpu_sdma_get_index_from_ring(ring, &index);
-
- if (r || index > 31)
- csa_mc_addr = 0;
- else
- csa_mc_addr = amdgpu_csa_vaddr(adev) +
- AMDGPU_CSA_SDMA_OFFSET +
- index * AMDGPU_CSA_SDMA_SIZE;
- }
+ if (r || index > 31)
+ csa_mc_addr = 0;
+ else
+ csa_mc_addr = amdgpu_csa_vaddr(adev) +
+ AMDGPU_CSA_SDMA_OFFSET +
+ index * AMDGPU_CSA_SDMA_SIZE;
return csa_mc_addr;
}
@@ -219,9 +214,11 @@ int amdgpu_sdma_init_microcode(struct amdgpu_device *adev,
amdgpu_ucode_ip_version_decode(adev, SDMA0_HWIP, ucode_prefix, sizeof(ucode_prefix));
if (instance == 0)
err = amdgpu_ucode_request(adev, &adev->sdma.instance[instance].fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s.bin", ucode_prefix);
else
err = amdgpu_ucode_request(adev, &adev->sdma.instance[instance].fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s%d.bin", ucode_prefix, instance);
if (err)
goto out;
@@ -261,6 +258,8 @@ int amdgpu_sdma_init_microcode(struct amdgpu_device *adev,
if ((amdgpu_ip_version(adev, SDMA0_HWIP, 0) ==
IP_VERSION(4, 4, 2) ||
amdgpu_ip_version(adev, SDMA0_HWIP, 0) ==
+ IP_VERSION(4, 4, 4) ||
+ amdgpu_ip_version(adev, SDMA0_HWIP, 0) ==
IP_VERSION(4, 4, 5)) &&
adev->firmware.load_type ==
AMDGPU_FW_LOAD_PSP &&
@@ -343,3 +342,270 @@ int amdgpu_sdma_ras_sw_init(struct amdgpu_device *adev)
return 0;
}
+
+/*
+ * debugfs for to enable/disable sdma job submission to specific core.
+ */
+#if defined(CONFIG_DEBUG_FS)
+static int amdgpu_debugfs_sdma_sched_mask_set(void *data, u64 val)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)data;
+ u64 i, num_ring;
+ u64 mask = 0;
+ struct amdgpu_ring *ring, *page = NULL;
+
+ if (!adev)
+ return -ENODEV;
+
+ /* Determine the number of rings per SDMA instance
+ * (1 for sdma gfx ring, 2 if page queue exists)
+ */
+ if (adev->sdma.has_page_queue)
+ num_ring = 2;
+ else
+ num_ring = 1;
+
+ /* Calculate the maximum possible mask value
+ * based on the number of SDMA instances and rings
+ */
+ mask = BIT_ULL(adev->sdma.num_instances * num_ring) - 1;
+
+ if ((val & mask) == 0)
+ return -EINVAL;
+
+ for (i = 0; i < adev->sdma.num_instances; ++i) {
+ ring = &adev->sdma.instance[i].ring;
+ if (adev->sdma.has_page_queue)
+ page = &adev->sdma.instance[i].page;
+ if (val & BIT_ULL(i * num_ring))
+ ring->sched.ready = true;
+ else
+ ring->sched.ready = false;
+
+ if (page) {
+ if (val & BIT_ULL(i * num_ring + 1))
+ page->sched.ready = true;
+ else
+ page->sched.ready = false;
+ }
+ }
+ /* publish sched.ready flag update effective immediately across smp */
+ smp_rmb();
+ return 0;
+}
+
+static int amdgpu_debugfs_sdma_sched_mask_get(void *data, u64 *val)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)data;
+ u64 i, num_ring;
+ u64 mask = 0;
+ struct amdgpu_ring *ring, *page = NULL;
+
+ if (!adev)
+ return -ENODEV;
+
+ /* Determine the number of rings per SDMA instance
+ * (1 for sdma gfx ring, 2 if page queue exists)
+ */
+ if (adev->sdma.has_page_queue)
+ num_ring = 2;
+ else
+ num_ring = 1;
+
+ for (i = 0; i < adev->sdma.num_instances; ++i) {
+ ring = &adev->sdma.instance[i].ring;
+ if (adev->sdma.has_page_queue)
+ page = &adev->sdma.instance[i].page;
+
+ if (ring->sched.ready)
+ mask |= BIT_ULL(i * num_ring);
+ else
+ mask &= ~BIT_ULL(i * num_ring);
+
+ if (page) {
+ if (page->sched.ready)
+ mask |= BIT_ULL(i * num_ring + 1);
+ else
+ mask &= ~BIT_ULL(i * num_ring + 1);
+ }
+ }
+
+ *val = mask;
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_sdma_sched_mask_fops,
+ amdgpu_debugfs_sdma_sched_mask_get,
+ amdgpu_debugfs_sdma_sched_mask_set, "%llx\n");
+
+#endif
+
+void amdgpu_debugfs_sdma_sched_mask_init(struct amdgpu_device *adev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ struct drm_minor *minor = adev_to_drm(adev)->primary;
+ struct dentry *root = minor->debugfs_root;
+ char name[32];
+
+ if (!(adev->sdma.num_instances > 1))
+ return;
+ sprintf(name, "amdgpu_sdma_sched_mask");
+ debugfs_create_file(name, 0600, root, adev,
+ &amdgpu_debugfs_sdma_sched_mask_fops);
+#endif
+}
+
+static ssize_t amdgpu_get_sdma_reset_mask(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+
+ if (!adev)
+ return -ENODEV;
+
+ return amdgpu_show_reset_mask(buf, adev->sdma.supported_reset);
+}
+
+static DEVICE_ATTR(sdma_reset_mask, 0444,
+ amdgpu_get_sdma_reset_mask, NULL);
+
+int amdgpu_sdma_sysfs_reset_mask_init(struct amdgpu_device *adev)
+{
+ int r = 0;
+
+ if (!amdgpu_gpu_recovery)
+ return r;
+
+ if (adev->sdma.num_instances) {
+ r = device_create_file(adev->dev, &dev_attr_sdma_reset_mask);
+ if (r)
+ return r;
+ }
+
+ return r;
+}
+
+void amdgpu_sdma_sysfs_reset_mask_fini(struct amdgpu_device *adev)
+{
+ if (!amdgpu_gpu_recovery)
+ return;
+
+ if (adev->dev->kobj.sd) {
+ if (adev->sdma.num_instances)
+ device_remove_file(adev->dev, &dev_attr_sdma_reset_mask);
+ }
+}
+
+struct amdgpu_ring *amdgpu_sdma_get_shared_ring(struct amdgpu_device *adev, struct amdgpu_ring *ring)
+{
+ if (adev->sdma.has_page_queue &&
+ (ring->me < adev->sdma.num_instances) &&
+ (ring == &adev->sdma.instance[ring->me].ring))
+ return &adev->sdma.instance[ring->me].page;
+ else
+ return NULL;
+}
+
+/**
+* amdgpu_sdma_is_shared_inv_eng - Check if a ring is an SDMA ring that shares a VM invalidation engine
+* @adev: Pointer to the AMDGPU device structure
+* @ring: Pointer to the ring structure to check
+*
+* This function checks if the given ring is an SDMA ring that shares a VM invalidation engine.
+* It returns true if the ring is such an SDMA ring, false otherwise.
+*/
+bool amdgpu_sdma_is_shared_inv_eng(struct amdgpu_device *adev, struct amdgpu_ring *ring)
+{
+ int i = ring->me;
+
+ if (!adev->sdma.has_page_queue || i >= adev->sdma.num_instances)
+ return false;
+
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))
+ return (ring == &adev->sdma.instance[i].page);
+ else
+ return false;
+}
+
+static int amdgpu_sdma_soft_reset(struct amdgpu_device *adev, u32 instance_id)
+{
+ struct amdgpu_sdma_instance *sdma_instance = &adev->sdma.instance[instance_id];
+
+ if (sdma_instance->funcs->soft_reset_kernel_queue)
+ return sdma_instance->funcs->soft_reset_kernel_queue(adev, instance_id);
+
+ return -EOPNOTSUPP;
+}
+
+/**
+ * amdgpu_sdma_reset_engine - Reset a specific SDMA engine
+ * @adev: Pointer to the AMDGPU device
+ * @instance_id: Logical ID of the SDMA engine instance to reset
+ * @caller_handles_kernel_queues: Skip kernel queue processing. Caller
+ * will handle it.
+ *
+ * Returns: 0 on success, or a negative error code on failure.
+ */
+int amdgpu_sdma_reset_engine(struct amdgpu_device *adev, uint32_t instance_id,
+ bool caller_handles_kernel_queues)
+{
+ int ret = 0;
+ struct amdgpu_sdma_instance *sdma_instance = &adev->sdma.instance[instance_id];
+ struct amdgpu_ring *gfx_ring = &sdma_instance->ring;
+ struct amdgpu_ring *page_ring = &sdma_instance->page;
+
+ mutex_lock(&sdma_instance->engine_reset_mutex);
+
+ if (!caller_handles_kernel_queues) {
+ /* Stop the scheduler's work queue for the GFX and page rings if they are running.
+ * This ensures that no new tasks are submitted to the queues while
+ * the reset is in progress.
+ */
+ drm_sched_wqueue_stop(&gfx_ring->sched);
+
+ if (adev->sdma.has_page_queue)
+ drm_sched_wqueue_stop(&page_ring->sched);
+ }
+
+ if (sdma_instance->funcs->stop_kernel_queue) {
+ sdma_instance->funcs->stop_kernel_queue(gfx_ring);
+ if (adev->sdma.has_page_queue)
+ sdma_instance->funcs->stop_kernel_queue(page_ring);
+ }
+
+ /* Perform the SDMA reset for the specified instance */
+ ret = amdgpu_sdma_soft_reset(adev, instance_id);
+ if (ret) {
+ dev_err(adev->dev, "Failed to reset SDMA logical instance %u\n", instance_id);
+ goto exit;
+ }
+
+ if (sdma_instance->funcs->start_kernel_queue) {
+ sdma_instance->funcs->start_kernel_queue(gfx_ring);
+ if (adev->sdma.has_page_queue)
+ sdma_instance->funcs->start_kernel_queue(page_ring);
+ }
+
+exit:
+ if (!caller_handles_kernel_queues) {
+ /* Restart the scheduler's work queue for the GFX and page rings
+ * if they were stopped by this function. This allows new tasks
+ * to be submitted to the queues after the reset is complete.
+ */
+ if (!ret) {
+ amdgpu_fence_driver_force_completion(gfx_ring);
+ drm_sched_wqueue_start(&gfx_ring->sched);
+ if (adev->sdma.has_page_queue) {
+ amdgpu_fence_driver_force_completion(page_ring);
+ drm_sched_wqueue_start(&page_ring->sched);
+ }
+ }
+ }
+ mutex_unlock(&sdma_instance->engine_reset_mutex);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
index 087ce0f6fa07..34311f32be4c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
@@ -50,6 +50,12 @@ enum amdgpu_sdma_irq {
#define NUM_SDMA(x) hweight32(x)
+struct amdgpu_sdma_funcs {
+ int (*stop_kernel_queue)(struct amdgpu_ring *ring);
+ int (*start_kernel_queue)(struct amdgpu_ring *ring);
+ int (*soft_reset_kernel_queue)(struct amdgpu_device *adev, u32 instance_id);
+};
+
struct amdgpu_sdma_instance {
/* SDMA firmware */
const struct firmware *fw;
@@ -64,6 +70,11 @@ struct amdgpu_sdma_instance {
struct amdgpu_bo *sdma_fw_obj;
uint64_t sdma_fw_gpu_addr;
uint32_t *sdma_fw_ptr;
+ struct mutex engine_reset_mutex;
+ /* track guilty state of GFX and PAGE queues */
+ bool gfx_guilty;
+ bool page_guilty;
+ const struct amdgpu_sdma_funcs *funcs;
};
enum amdgpu_sdma_ras_memory_id {
@@ -102,11 +113,13 @@ struct amdgpu_sdma {
struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
struct amdgpu_irq_src trap_irq;
struct amdgpu_irq_src illegal_inst_irq;
+ struct amdgpu_irq_src fence_irq;
struct amdgpu_irq_src ecc_irq;
struct amdgpu_irq_src vm_hole_irq;
struct amdgpu_irq_src doorbell_invalid_irq;
struct amdgpu_irq_src pool_timeout_irq;
struct amdgpu_irq_src srbm_write_irq;
+ struct amdgpu_irq_src ctxt_empty_irq;
int num_instances;
uint32_t sdma_mask;
@@ -116,6 +129,10 @@ struct amdgpu_sdma {
struct ras_common_if *ras_if;
struct amdgpu_sdma_ras *ras;
uint32_t *ip_dump;
+ uint32_t supported_reset;
+ struct list_head reset_callback_list;
+ bool no_user_submission;
+ bool disable_uq;
};
/*
@@ -155,6 +172,9 @@ struct amdgpu_buffer_funcs {
uint32_t byte_count);
};
+int amdgpu_sdma_reset_engine(struct amdgpu_device *adev, uint32_t instance_id,
+ bool caller_handles_kernel_queues);
+
#define amdgpu_emit_copy_buffer(adev, ib, s, d, b, t) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b), (t))
#define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b))
@@ -175,5 +195,10 @@ int amdgpu_sdma_init_microcode(struct amdgpu_device *adev, u32 instance,
void amdgpu_sdma_destroy_inst_ctx(struct amdgpu_device *adev,
bool duplicate);
int amdgpu_sdma_ras_sw_init(struct amdgpu_device *adev);
-
+void amdgpu_debugfs_sdma_sched_mask_init(struct amdgpu_device *adev);
+int amdgpu_sdma_sysfs_reset_mask_init(struct amdgpu_device *adev);
+void amdgpu_sdma_sysfs_reset_mask_fini(struct amdgpu_device *adev);
+bool amdgpu_sdma_is_shared_inv_eng(struct amdgpu_device *adev, struct amdgpu_ring *ring);
+struct amdgpu_ring *amdgpu_sdma_get_shared_ring(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c
index e22cb2b5cd92..a0b479d5fff1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c
@@ -45,7 +45,11 @@
*/
static inline u64 amdgpu_seq64_get_va_base(struct amdgpu_device *adev)
{
- return AMDGPU_VA_RESERVED_SEQ64_START(adev);
+ u64 addr = AMDGPU_VA_RESERVED_SEQ64_START(adev);
+
+ addr = amdgpu_gmc_sign_extend(addr);
+
+ return addr;
}
/**
@@ -88,9 +92,11 @@ int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm,
goto error;
}
- seq64_addr = amdgpu_seq64_get_va_base(adev);
- r = amdgpu_vm_bo_map(adev, *bo_va, seq64_addr, 0, AMDGPU_VA_RESERVED_SEQ64_SIZE,
- AMDGPU_PTE_READABLE);
+ seq64_addr = amdgpu_seq64_get_va_base(adev) & AMDGPU_GMC_HOLE_MASK;
+
+ r = amdgpu_vm_bo_map(adev, *bo_va, seq64_addr, 0,
+ AMDGPU_VA_RESERVED_SEQ64_SIZE,
+ AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_MTYPE_UC);
if (r) {
DRM_ERROR("failed to do bo_map on userq sem, err=%d\n", r);
amdgpu_vm_bo_del(adev, *bo_va);
@@ -133,7 +139,7 @@ void amdgpu_seq64_unmap(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv)
vm = &fpriv->vm;
- drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
+ drm_exec_init(&exec, 0, 0);
drm_exec_until_all_locked(&exec) {
r = amdgpu_vm_lock_pd(vm, &exec, 0);
if (likely(!r))
@@ -156,6 +162,7 @@ error:
*
* @adev: amdgpu_device pointer
* @va: VA to access the seq in process address space
+ * @gpu_addr: GPU address to access the seq
* @cpu_addr: CPU address to access the seq
*
* Alloc a 64 bit memory from seq64 pool.
@@ -163,7 +170,8 @@ error:
* Returns:
* 0 on success or a negative error code on failure
*/
-int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *va, u64 **cpu_addr)
+int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *va,
+ u64 *gpu_addr, u64 **cpu_addr)
{
unsigned long bit_pos;
@@ -172,7 +180,12 @@ int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *va, u64 **cpu_addr)
return -ENOSPC;
__set_bit(bit_pos, adev->seq64.used);
+
*va = bit_pos * sizeof(u64) + amdgpu_seq64_get_va_base(adev);
+
+ if (gpu_addr)
+ *gpu_addr = bit_pos * sizeof(u64) + adev->seq64.gpu_addr;
+
*cpu_addr = bit_pos + adev->seq64.cpu_base_addr;
return 0;
@@ -233,7 +246,7 @@ int amdgpu_seq64_init(struct amdgpu_device *adev)
*/
r = amdgpu_bo_create_kernel(adev, AMDGPU_VA_RESERVED_SEQ64_SIZE,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
- &adev->seq64.sbo, NULL,
+ &adev->seq64.sbo, &adev->seq64.gpu_addr,
(void **)&adev->seq64.cpu_base_addr);
if (r) {
dev_warn(adev->dev, "(%d) create seq64 failed\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h
index 4203b2ab318d..26a249aaaee1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h
@@ -32,13 +32,14 @@
struct amdgpu_seq64 {
struct amdgpu_bo *sbo;
u32 num_sem;
+ u64 gpu_addr;
u64 *cpu_base_addr;
DECLARE_BITMAP(used, AMDGPU_MAX_SEQ64_SLOTS);
};
void amdgpu_seq64_fini(struct amdgpu_device *adev);
int amdgpu_seq64_init(struct amdgpu_device *adev);
-int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *gpu_addr, u64 **cpu_addr);
+int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *va, u64 *gpu_addr, u64 **cpu_addr);
void amdgpu_seq64_free(struct amdgpu_device *adev, u64 gpu_addr);
int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_bo_va **bo_va);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index c586ab4c911b..d6ae9974c952 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -135,11 +135,16 @@ static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f)
struct amdgpu_sync_entry *e;
hash_for_each_possible(sync->fences, e, node, f->context) {
- if (unlikely(e->fence->context != f->context))
- continue;
+ if (dma_fence_is_signaled(e->fence)) {
+ dma_fence_put(e->fence);
+ e->fence = dma_fence_get(f);
+ return true;
+ }
- amdgpu_sync_keep_later(&e->fence, f);
- return true;
+ if (likely(e->fence->context == f->context)) {
+ amdgpu_sync_keep_later(&e->fence, f);
+ return true;
+ }
}
return false;
}
@@ -149,10 +154,12 @@ static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f)
*
* @sync: sync object to add fence to
* @f: fence to sync to
+ * @flags: memory allocation flags to use when allocating sync entry
*
* Add the fence to the sync object.
*/
-int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f)
+int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f,
+ gfp_t flags)
{
struct amdgpu_sync_entry *e;
@@ -162,7 +169,7 @@ int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f)
if (amdgpu_sync_add_later(sync, f))
return 0;
- e = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
+ e = kmem_cache_alloc(amdgpu_sync_slab, flags);
if (!e)
return -ENOMEM;
@@ -242,14 +249,13 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
if (resv == NULL)
return -EINVAL;
-
- /* TODO: Use DMA_RESV_USAGE_READ here */
- dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, f) {
+ /* Implicitly sync only to KERNEL, WRITE and READ */
+ dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_READ, f) {
dma_fence_chain_for_each(f, f) {
struct dma_fence *tmp = dma_fence_chain_contained(f);
if (amdgpu_sync_test_fence(adev, mode, owner, tmp)) {
- r = amdgpu_sync_fence(sync, f);
+ r = amdgpu_sync_fence(sync, f, GFP_KERNEL);
dma_fence_put(f);
if (r)
return r;
@@ -281,7 +287,7 @@ int amdgpu_sync_kfd(struct amdgpu_sync *sync, struct dma_resv *resv)
if (fence_owner != AMDGPU_FENCE_OWNER_KFD)
continue;
- r = amdgpu_sync_fence(sync, f);
+ r = amdgpu_sync_fence(sync, f, GFP_KERNEL);
if (r)
break;
}
@@ -388,7 +394,7 @@ int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone)
hash_for_each_safe(source->fences, i, tmp, e, node) {
f = e->fence;
if (!dma_fence_is_signaled(f)) {
- r = amdgpu_sync_fence(clone, f);
+ r = amdgpu_sync_fence(clone, f, GFP_KERNEL);
if (r)
return r;
} else {
@@ -400,6 +406,25 @@ int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone)
}
/**
+ * amdgpu_sync_move - move all fences from src to dst
+ *
+ * @src: source of the fences, empty after function
+ * @dst: destination for the fences
+ *
+ * Moves all fences from source to destination. All fences in destination are
+ * freed and source is empty after the function call.
+ */
+void amdgpu_sync_move(struct amdgpu_sync *src, struct amdgpu_sync *dst)
+{
+ unsigned int i;
+
+ amdgpu_sync_free(dst);
+
+ for (i = 0; i < HASH_SIZE(src->fences); ++i)
+ hlist_move_list(&src->fences[i], &dst->fences[i]);
+}
+
+/**
* amdgpu_sync_push_to_job - push fences into job
* @sync: sync object to get the fences from
* @job: job to push the fences into
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
index e3272dce798d..51eb4382c91e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
@@ -47,7 +47,8 @@ struct amdgpu_sync {
};
void amdgpu_sync_create(struct amdgpu_sync *sync);
-int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f);
+int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f,
+ gfp_t flags);
int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
struct dma_resv *resv, enum amdgpu_sync_mode mode,
void *owner);
@@ -56,6 +57,7 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
struct amdgpu_ring *ring);
struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone);
+void amdgpu_sync_move(struct amdgpu_sync *src, struct amdgpu_sync *dst);
int amdgpu_sync_push_to_job(struct amdgpu_sync *sync, struct amdgpu_job *job);
int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr);
void amdgpu_sync_free(struct amdgpu_sync *sync);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 383fce40d4dd..d13e64a69e25 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -167,25 +167,23 @@ TRACE_EVENT(amdgpu_cs_ioctl,
TP_PROTO(struct amdgpu_job *job),
TP_ARGS(job),
TP_STRUCT__entry(
- __field(uint64_t, sched_job_id)
__string(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
- __field(unsigned int, context)
- __field(unsigned int, seqno)
+ __field(u64, context)
+ __field(u64, seqno)
__field(struct dma_fence *, fence)
__string(ring, to_amdgpu_ring(job->base.sched)->name)
__field(u32, num_ibs)
),
TP_fast_assign(
- __entry->sched_job_id = job->base.id;
__assign_str(timeline);
__entry->context = job->base.s_fence->finished.context;
__entry->seqno = job->base.s_fence->finished.seqno;
__assign_str(ring);
__entry->num_ibs = job->num_ibs;
),
- TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
- __entry->sched_job_id, __get_str(timeline), __entry->context,
+ TP_printk("timeline=%s, fence=%llu:%llu, ring_name=%s, num_ibs=%u",
+ __get_str(timeline), __entry->context,
__entry->seqno, __get_str(ring), __entry->num_ibs)
);
@@ -193,24 +191,22 @@ TRACE_EVENT(amdgpu_sched_run_job,
TP_PROTO(struct amdgpu_job *job),
TP_ARGS(job),
TP_STRUCT__entry(
- __field(uint64_t, sched_job_id)
__string(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
- __field(unsigned int, context)
- __field(unsigned int, seqno)
+ __field(u64, context)
+ __field(u64, seqno)
__string(ring, to_amdgpu_ring(job->base.sched)->name)
__field(u32, num_ibs)
),
TP_fast_assign(
- __entry->sched_job_id = job->base.id;
__assign_str(timeline);
__entry->context = job->base.s_fence->finished.context;
__entry->seqno = job->base.s_fence->finished.seqno;
__assign_str(ring);
__entry->num_ibs = job->num_ibs;
),
- TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
- __entry->sched_job_id, __get_str(timeline), __entry->context,
+ TP_printk("timeline=%s, fence=%llu:%llu, ring_name=%s, num_ibs=%u",
+ __get_str(timeline), __entry->context,
__entry->seqno, __get_str(ring), __entry->num_ibs)
);
@@ -457,6 +453,38 @@ DEFINE_EVENT(amdgpu_pasid, amdgpu_pasid_freed,
TP_ARGS(pasid)
);
+TRACE_EVENT(amdgpu_isolation,
+ TP_PROTO(void *prev, void *next),
+ TP_ARGS(prev, next),
+ TP_STRUCT__entry(
+ __field(void *, prev)
+ __field(void *, next)
+ ),
+
+ TP_fast_assign(
+ __entry->prev = prev;
+ __entry->next = next;
+ ),
+ TP_printk("prev=%p, next=%p",
+ __entry->prev,
+ __entry->next)
+);
+
+TRACE_EVENT(amdgpu_cleaner_shader,
+ TP_PROTO(struct amdgpu_ring *ring, struct dma_fence *fence),
+ TP_ARGS(ring, fence),
+ TP_STRUCT__entry(
+ __string(ring, ring->name)
+ __field(u64, seqno)
+ ),
+
+ TP_fast_assign(
+ __assign_str(ring);
+ __entry->seqno = fence->seqno;
+ ),
+ TP_printk("ring=%s, seqno=%Lu", __get_str(ring), __entry->seqno)
+);
+
TRACE_EVENT(amdgpu_bo_list_set,
TP_PROTO(struct amdgpu_bo_list *list, struct amdgpu_bo *bo),
TP_ARGS(list, bo),
@@ -519,23 +547,19 @@ TRACE_EVENT(amdgpu_ib_pipe_sync,
TP_ARGS(sched_job, fence),
TP_STRUCT__entry(
__string(ring, sched_job->base.sched->name)
- __field(uint64_t, id)
__field(struct dma_fence *, fence)
- __field(uint64_t, ctx)
- __field(unsigned, seqno)
+ __field(u64, ctx)
+ __field(u64, seqno)
),
TP_fast_assign(
__assign_str(ring);
- __entry->id = sched_job->base.id;
__entry->fence = fence;
__entry->ctx = fence->context;
__entry->seqno = fence->seqno;
),
- TP_printk("job ring=%s, id=%llu, need pipe sync to fence=%p, context=%llu, seq=%u",
- __get_str(ring), __entry->id,
- __entry->fence, __entry->ctx,
- __entry->seqno)
+ TP_printk("job ring=%s need pipe sync to fence=%llu:%llu",
+ __get_str(ring), __entry->ctx, __entry->seqno)
);
TRACE_EVENT(amdgpu_reset_reg_dumps,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 74adb983ab03..aa9ee5dffa45 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -61,7 +61,7 @@
#include "amdgpu_res_cursor.h"
#include "bif/bif_4_1_d.h"
-MODULE_IMPORT_NS(DMA_BUF);
+MODULE_IMPORT_NS("DMA_BUF");
#define AMDGPU_TTM_VRAM_MAX_DW_READ ((size_t)128)
@@ -123,6 +123,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
case AMDGPU_PL_GWS:
case AMDGPU_PL_OA:
case AMDGPU_PL_DOORBELL:
+ case AMDGPU_PL_MMIO_REMAP:
placement->num_placement = 0;
return;
@@ -226,7 +227,8 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
r = amdgpu_job_alloc_with_ib(adev, &adev->mman.high_pr,
AMDGPU_FENCE_OWNER_UNDEFINED,
num_dw * 4 + num_bytes,
- AMDGPU_IB_POOL_DELAYED, &job);
+ AMDGPU_IB_POOL_DELAYED, &job,
+ AMDGPU_KERNEL_JOB_ID_TTM_MAP_BUFFER);
if (r)
return r;
@@ -299,7 +301,8 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
struct amdgpu_bo *abo_src, *abo_dst;
if (!adev->mman.buffer_funcs_enabled) {
- DRM_ERROR("Trying to move memory with ring turned off.\n");
+ dev_err(adev->dev,
+ "Trying to move memory with ring turned off.\n");
return -EINVAL;
}
@@ -309,7 +312,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
mutex_lock(&adev->mman.gtt_window_lock);
while (src_mm.remaining) {
uint64_t from, to, cur_size, tiling_flags;
- uint32_t num_type, data_format, max_com;
+ uint32_t num_type, data_format, max_com, write_compress_disable;
struct dma_fence *next;
/* Never copy more than 256MiB at once to avoid a timeout */
@@ -340,9 +343,13 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
max_com = AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_MAX_COMPRESSED_BLOCK);
num_type = AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_NUMBER_TYPE);
data_format = AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_DATA_FORMAT);
+ write_compress_disable =
+ AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_WRITE_COMPRESS_DISABLE);
copy_flags |= (AMDGPU_COPY_FLAGS_SET(MAX_COMPRESSED, max_com) |
AMDGPU_COPY_FLAGS_SET(NUMBER_TYPE, num_type) |
- AMDGPU_COPY_FLAGS_SET(DATA_FORMAT, data_format));
+ AMDGPU_COPY_FLAGS_SET(DATA_FORMAT, data_format) |
+ AMDGPU_COPY_FLAGS_SET(WRITE_COMPRESS_DISABLE,
+ write_compress_disable));
}
r = amdgpu_copy_buffer(ring, from, to, cur_size, resv,
@@ -401,7 +408,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
struct dma_fence *wipe_fence = NULL;
r = amdgpu_fill_buffer(abo, 0, NULL, &wipe_fence,
- false);
+ false, AMDGPU_KERNEL_JOB_ID_MOVE_BLIT);
if (r) {
goto error;
} else if (wipe_fence) {
@@ -442,7 +449,8 @@ bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
return false;
if (res->mem_type == TTM_PL_SYSTEM || res->mem_type == TTM_PL_TT ||
- res->mem_type == AMDGPU_PL_PREEMPT || res->mem_type == AMDGPU_PL_DOORBELL)
+ res->mem_type == AMDGPU_PL_PREEMPT || res->mem_type == AMDGPU_PL_DOORBELL ||
+ res->mem_type == AMDGPU_PL_MMIO_REMAP)
return true;
if (res->mem_type != TTM_PL_VRAM)
@@ -533,10 +541,12 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
old_mem->mem_type == AMDGPU_PL_GWS ||
old_mem->mem_type == AMDGPU_PL_OA ||
old_mem->mem_type == AMDGPU_PL_DOORBELL ||
+ old_mem->mem_type == AMDGPU_PL_MMIO_REMAP ||
new_mem->mem_type == AMDGPU_PL_GDS ||
new_mem->mem_type == AMDGPU_PL_GWS ||
new_mem->mem_type == AMDGPU_PL_OA ||
- new_mem->mem_type == AMDGPU_PL_DOORBELL) {
+ new_mem->mem_type == AMDGPU_PL_DOORBELL ||
+ new_mem->mem_type == AMDGPU_PL_MMIO_REMAP) {
/* Nothing to save here */
amdgpu_bo_move_notify(bo, evict, new_mem);
ttm_bo_move_null(bo, new_mem);
@@ -624,6 +634,12 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
mem->bus.is_iomem = true;
mem->bus.caching = ttm_uncached;
break;
+ case AMDGPU_PL_MMIO_REMAP:
+ mem->bus.offset = mem->start << PAGE_SHIFT;
+ mem->bus.offset += adev->rmmio_remap.bus_addr;
+ mem->bus.is_iomem = true;
+ mem->bus.caching = ttm_uncached;
+ break;
default:
return -EINVAL;
}
@@ -641,6 +657,8 @@ static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
if (bo->resource->mem_type == AMDGPU_PL_DOORBELL)
return ((uint64_t)(adev->doorbell.base + cursor.start)) >> PAGE_SHIFT;
+ else if (bo->resource->mem_type == AMDGPU_PL_MMIO_REMAP)
+ return ((uint64_t)(adev->rmmio_remap.bus_addr + cursor.start)) >> PAGE_SHIFT;
return (adev->gmc.aper_base + cursor.start) >> PAGE_SHIFT;
}
@@ -690,7 +708,7 @@ struct amdgpu_ttm_tt {
* Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only
* once afterwards to stop HMM tracking
*/
-int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages,
+int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
struct hmm_range **range)
{
struct ttm_tt *ttm = bo->tbo.ttm;
@@ -727,7 +745,7 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages,
readonly = amdgpu_ttm_tt_is_readonly(ttm);
r = amdgpu_hmm_range_get_pages(&bo->notifier, start, ttm->num_pages,
- readonly, NULL, pages, range);
+ readonly, NULL, range);
out_unlock:
mmap_read_unlock(mm);
if (r)
@@ -779,12 +797,12 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
* that backs user memory and will ultimately be mapped into the device
* address space.
*/
-void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
+void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct hmm_range *range)
{
unsigned long i;
for (i = 0; i < ttm->num_pages; ++i)
- ttm->pages[i] = pages ? pages[i] : NULL;
+ ttm->pages[i] = range ? hmm_pfn_to_page(range->hmm_pfns[i]) : NULL;
}
/*
@@ -812,7 +830,7 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev,
/* Map SG to device */
r = dma_map_sgtable(adev->dev, ttm->sg, direction, 0);
if (r)
- goto release_sg;
+ goto release_sg_table;
/* convert SG to linear array of pages and dma addresses */
drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
@@ -820,6 +838,8 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev,
return 0;
+release_sg_table:
+ sg_free_table(ttm->sg);
release_sg:
kfree(ttm->sg);
ttm->sg = NULL;
@@ -928,7 +948,7 @@ static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
if (gtt->userptr) {
r = amdgpu_ttm_tt_pin_userptr(bdev, ttm);
if (r) {
- DRM_ERROR("failed to pin userptr\n");
+ dev_err(adev->dev, "failed to pin userptr\n");
return r;
}
} else if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) {
@@ -1054,7 +1074,7 @@ static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
/* if the pages have userptr pinning then clear that first */
if (gtt->userptr) {
amdgpu_ttm_tt_unpin_userptr(bdev, ttm);
- } else if (ttm->sg && gtt->gobj->import_attach) {
+ } else if (ttm->sg && drm_gem_is_imported(gtt->gobj)) {
struct dma_buf_attachment *attach;
attach = gtt->gobj->import_attach;
@@ -1348,7 +1368,8 @@ uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem)
if (mem && (mem->mem_type == TTM_PL_TT ||
mem->mem_type == AMDGPU_PL_DOORBELL ||
- mem->mem_type == AMDGPU_PL_PREEMPT)) {
+ mem->mem_type == AMDGPU_PL_PREEMPT ||
+ mem->mem_type == AMDGPU_PL_MMIO_REMAP)) {
flags |= AMDGPU_PTE_SYSTEM;
if (ttm->caching == ttm_cached)
@@ -1503,7 +1524,8 @@ static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo,
r = amdgpu_job_alloc_with_ib(adev, &adev->mman.high_pr,
AMDGPU_FENCE_OWNER_UNDEFINED,
num_dw * 4, AMDGPU_IB_POOL_DELAYED,
- &job);
+ &job,
+ AMDGPU_KERNEL_JOB_ID_TTM_ACCESS_MEMORY_SDMA);
if (r)
goto out;
@@ -1760,7 +1782,8 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
if (!adev->bios &&
(amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)))
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0)))
reserve_size = max(reserve_size, (uint32_t)280 << 20);
else if (!reserve_size)
reserve_size = DISCOVERY_TMR_OFFSET;
@@ -1774,7 +1797,7 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
&ctx->c2p_bo,
NULL);
if (ret) {
- DRM_ERROR("alloc c2p_bo failed(%d)!\n", ret);
+ dev_err(adev->dev, "alloc c2p_bo failed(%d)!\n", ret);
amdgpu_ttm_training_reserve_vram_fini(adev);
return ret;
}
@@ -1786,7 +1809,7 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
adev, adev->gmc.real_vram_size - reserve_size,
reserve_size, &adev->mman.fw_reserved_memory, NULL);
if (ret) {
- DRM_ERROR("alloc tmr failed(%d)!\n", ret);
+ dev_err(adev->dev, "alloc tmr failed(%d)!\n", ret);
amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory,
NULL, NULL);
return ret;
@@ -1833,6 +1856,59 @@ static void amdgpu_ttm_pools_fini(struct amdgpu_device *adev)
adev->mman.ttm_pools = NULL;
}
+/**
+ * amdgpu_ttm_mmio_remap_bo_init - Allocate the singleton 4K MMIO_REMAP BO
+ * @adev: amdgpu device
+ *
+ * Allocates a one-page (4K) GEM BO in AMDGPU_GEM_DOMAIN_MMIO_REMAP when the
+ * hardware exposes a remap base (adev->rmmio_remap.bus_addr) and the host
+ * PAGE_SIZE is <= AMDGPU_GPU_PAGE_SIZE (4K). The BO is created as a regular
+ * GEM object (amdgpu_bo_create).
+ *
+ * Return:
+ * * 0 on success or intentional skip (feature not present/unsupported)
+ * * negative errno on allocation failure
+ */
+static int amdgpu_ttm_mmio_remap_bo_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_bo_param bp;
+ int r;
+
+ /* Skip if HW doesn't expose remap, or if PAGE_SIZE > AMDGPU_GPU_PAGE_SIZE (4K). */
+ if (!adev->rmmio_remap.bus_addr || PAGE_SIZE > AMDGPU_GPU_PAGE_SIZE)
+ return 0;
+
+ memset(&bp, 0, sizeof(bp));
+
+ /* Create exactly one GEM BO in the MMIO_REMAP domain. */
+ bp.type = ttm_bo_type_device; /* userspace-mappable GEM */
+ bp.size = AMDGPU_GPU_PAGE_SIZE; /* 4K */
+ bp.byte_align = AMDGPU_GPU_PAGE_SIZE;
+ bp.domain = AMDGPU_GEM_DOMAIN_MMIO_REMAP;
+ bp.flags = 0;
+ bp.resv = NULL;
+ bp.bo_ptr_size = sizeof(struct amdgpu_bo);
+
+ r = amdgpu_bo_create(adev, &bp, &adev->rmmio_remap.bo);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+/**
+ * amdgpu_ttm_mmio_remap_bo_fini - Free the singleton MMIO_REMAP BO
+ * @adev: amdgpu device
+ *
+ * Frees the kernel-owned MMIO_REMAP BO if it was allocated by
+ * amdgpu_ttm_mmio_remap_bo_init().
+ */
+static void amdgpu_ttm_mmio_remap_bo_fini(struct amdgpu_device *adev)
+{
+ amdgpu_bo_unref(&adev->rmmio_remap.bo);
+ adev->rmmio_remap.bo = NULL;
+}
+
/*
* amdgpu_ttm_init - Init the memory management (ttm) as well as various
* gtt/vram related fields.
@@ -1849,6 +1925,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
mutex_init(&adev->mman.gtt_window_lock);
+ dma_set_max_seg_size(adev->dev, UINT_MAX);
/* No others user of address space so set it to 0 */
r = ttm_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev,
adev_to_drm(adev)->anon_inode->i_mapping,
@@ -1856,22 +1933,25 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
adev->need_swiotlb,
dma_addressing_limited(adev->dev));
if (r) {
- DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
+ dev_err(adev->dev,
+ "failed initializing buffer object driver(%d).\n", r);
return r;
}
r = amdgpu_ttm_pools_init(adev);
if (r) {
- DRM_ERROR("failed to init ttm pools(%d).\n", r);
+ dev_err(adev->dev, "failed to init ttm pools(%d).\n", r);
return r;
}
adev->mman.initialized = true;
- /* Initialize VRAM pool with all of VRAM divided into pages */
- r = amdgpu_vram_mgr_init(adev);
- if (r) {
- DRM_ERROR("Failed initializing VRAM heap.\n");
- return r;
+ if (!adev->gmc.is_app_apu) {
+ /* Initialize VRAM pool with all of VRAM divided into pages */
+ r = amdgpu_vram_mgr_init(adev);
+ if (r) {
+ dev_err(adev->dev, "Failed initializing VRAM heap.\n");
+ return r;
+ }
}
/* Change the size here instead of the init above so only lpfn is affected */
@@ -1950,63 +2030,89 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
DRM_DEBUG_DRIVER("Skipped stolen memory reservation\n");
}
- DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
+ dev_info(adev->dev, "amdgpu: %uM of VRAM memory ready\n",
(unsigned int)(adev->gmc.real_vram_size / (1024 * 1024)));
/* Compute GTT size, either based on TTM limit
* or whatever the user passed on module init.
*/
- if (amdgpu_gtt_size == -1)
- gtt_size = ttm_tt_pages_limit() << PAGE_SHIFT;
- else
- gtt_size = (uint64_t)amdgpu_gtt_size << 20;
+ gtt_size = ttm_tt_pages_limit() << PAGE_SHIFT;
+ if (amdgpu_gtt_size != -1) {
+ uint64_t configured_size = (uint64_t)amdgpu_gtt_size << 20;
+
+ drm_warn(&adev->ddev,
+ "Configuring gttsize via module parameter is deprecated, please use ttm.pages_limit\n");
+ if (gtt_size != configured_size)
+ drm_warn(&adev->ddev,
+ "GTT size has been set as %llu but TTM size has been set as %llu, this is unusual\n",
+ configured_size, gtt_size);
+
+ gtt_size = configured_size;
+ }
/* Initialize GTT memory pool */
r = amdgpu_gtt_mgr_init(adev, gtt_size);
if (r) {
- DRM_ERROR("Failed initializing GTT heap.\n");
+ dev_err(adev->dev, "Failed initializing GTT heap.\n");
return r;
}
- DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
+ dev_info(adev->dev, "amdgpu: %uM of GTT memory ready.\n",
(unsigned int)(gtt_size / (1024 * 1024)));
+ if (adev->flags & AMD_IS_APU) {
+ if (adev->gmc.real_vram_size < gtt_size)
+ adev->apu_prefer_gtt = true;
+ }
+
/* Initialize doorbell pool on PCI BAR */
r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_DOORBELL, adev->doorbell.size / PAGE_SIZE);
if (r) {
- DRM_ERROR("Failed initializing doorbell heap.\n");
+ dev_err(adev->dev, "Failed initializing doorbell heap.\n");
return r;
}
/* Create a boorbell page for kernel usages */
r = amdgpu_doorbell_create_kernel_doorbells(adev);
if (r) {
- DRM_ERROR("Failed to initialize kernel doorbells.\n");
+ dev_err(adev->dev, "Failed to initialize kernel doorbells.\n");
return r;
}
+ /* Initialize MMIO-remap pool (single page 4K) */
+ r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_MMIO_REMAP, 1);
+ if (r) {
+ dev_err(adev->dev, "Failed initializing MMIO-remap heap.\n");
+ return r;
+ }
+
+ /* Allocate the singleton MMIO_REMAP BO (4K) if supported */
+ r = amdgpu_ttm_mmio_remap_bo_init(adev);
+ if (r)
+ return r;
+
/* Initialize preemptible memory pool */
r = amdgpu_preempt_mgr_init(adev);
if (r) {
- DRM_ERROR("Failed initializing PREEMPT heap.\n");
+ dev_err(adev->dev, "Failed initializing PREEMPT heap.\n");
return r;
}
/* Initialize various on-chip memory pools */
r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GDS, adev->gds.gds_size);
if (r) {
- DRM_ERROR("Failed initializing GDS heap.\n");
+ dev_err(adev->dev, "Failed initializing GDS heap.\n");
return r;
}
r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GWS, adev->gds.gws_size);
if (r) {
- DRM_ERROR("Failed initializing gws heap.\n");
+ dev_err(adev->dev, "Failed initializing gws heap.\n");
return r;
}
r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_OA, adev->gds.oa_size);
if (r) {
- DRM_ERROR("Failed initializing oa heap.\n");
+ dev_err(adev->dev, "Failed initializing oa heap.\n");
return r;
}
if (amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
@@ -2038,12 +2144,16 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
/* return the FW reserved memory back to VRAM */
amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL,
NULL);
+ amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory_extend, NULL,
+ NULL);
if (adev->mman.stolen_reserved_size)
amdgpu_bo_free_kernel(&adev->mman.stolen_reserved_memory,
NULL, NULL);
}
amdgpu_bo_free_kernel(&adev->mman.sdma_access_bo, NULL,
&adev->mman.sdma_access_ptr);
+
+ amdgpu_ttm_mmio_remap_bo_fini(adev);
amdgpu_ttm_fw_reserve_vram_fini(adev);
amdgpu_ttm_drv_reserve_vram_fini(adev);
@@ -2056,15 +2166,20 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
drm_dev_exit(idx);
}
- amdgpu_vram_mgr_fini(adev);
+ if (!adev->gmc.is_app_apu)
+ amdgpu_vram_mgr_fini(adev);
amdgpu_gtt_mgr_fini(adev);
amdgpu_preempt_mgr_fini(adev);
+ amdgpu_doorbell_fini(adev);
+
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS);
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS);
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA);
+ ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_DOORBELL);
+ ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_MMIO_REMAP);
ttm_device_fini(&adev->mman.bdev);
adev->mman.initialized = false;
- DRM_INFO("amdgpu: ttm finalized\n");
+ dev_info(adev->dev, "amdgpu: ttm finalized\n");
}
/**
@@ -2096,8 +2211,9 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
DRM_SCHED_PRIORITY_KERNEL, &sched,
1, NULL);
if (r) {
- DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
- r);
+ dev_err(adev->dev,
+ "Failed setting up TTM BO move entity (%d)\n",
+ r);
return;
}
@@ -2105,8 +2221,9 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
DRM_SCHED_PRIORITY_NORMAL, &sched,
1, NULL);
if (r) {
- DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
- r);
+ dev_err(adev->dev,
+ "Failed setting up TTM BO move entity (%d)\n",
+ r);
goto error_free_entity;
}
} else {
@@ -2136,7 +2253,7 @@ static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev,
struct dma_resv *resv,
bool vm_needs_flush,
struct amdgpu_job **job,
- bool delayed)
+ bool delayed, u64 k_job_id)
{
enum amdgpu_ib_pool_type pool = direct_submit ?
AMDGPU_IB_POOL_DIRECT :
@@ -2146,7 +2263,7 @@ static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev,
&adev->mman.high_pr;
r = amdgpu_job_alloc_with_ib(adev, entity,
AMDGPU_FENCE_OWNER_UNDEFINED,
- num_dw * 4, pool, job);
+ num_dw * 4, pool, job, k_job_id);
if (r)
return r;
@@ -2177,7 +2294,8 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
int r;
if (!direct_submit && !ring->sched.ready) {
- DRM_ERROR("Trying to move memory with ring turned off.\n");
+ dev_err(adev->dev,
+ "Trying to move memory with ring turned off.\n");
return -EINVAL;
}
@@ -2185,7 +2303,8 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
num_loops = DIV_ROUND_UP(byte_count, max_bytes);
num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8);
r = amdgpu_ttm_prepare_job(adev, direct_submit, num_dw,
- resv, vm_needs_flush, &job, false);
+ resv, vm_needs_flush, &job, false,
+ AMDGPU_KERNEL_JOB_ID_TTM_COPY_BUFFER);
if (r)
return r;
@@ -2212,7 +2331,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
error_free:
amdgpu_job_free(job);
- DRM_ERROR("Error scheduling IBs (%d)\n", r);
+ dev_err(adev->dev, "Error scheduling IBs (%d)\n", r);
return r;
}
@@ -2220,7 +2339,8 @@ static int amdgpu_ttm_fill_mem(struct amdgpu_ring *ring, uint32_t src_data,
uint64_t dst_addr, uint32_t byte_count,
struct dma_resv *resv,
struct dma_fence **fence,
- bool vm_needs_flush, bool delayed)
+ bool vm_needs_flush, bool delayed,
+ u64 k_job_id)
{
struct amdgpu_device *adev = ring->adev;
unsigned int num_loops, num_dw;
@@ -2233,7 +2353,7 @@ static int amdgpu_ttm_fill_mem(struct amdgpu_ring *ring, uint32_t src_data,
num_loops = DIV_ROUND_UP_ULL(byte_count, max_bytes);
num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->fill_num_dw, 8);
r = amdgpu_ttm_prepare_job(adev, false, num_dw, resv, vm_needs_flush,
- &job, delayed);
+ &job, delayed, k_job_id);
if (r)
return r;
@@ -2272,7 +2392,7 @@ int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo,
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
struct amdgpu_res_cursor cursor;
u64 addr;
- int r;
+ int r = 0;
if (!adev->mman.buffer_funcs_enabled)
return -EINVAL;
@@ -2303,7 +2423,8 @@ int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo,
goto err;
r = amdgpu_ttm_fill_mem(ring, 0, addr, size, resv,
- &next, true, true);
+ &next, true, true,
+ AMDGPU_KERNEL_JOB_ID_TTM_CLEAR_BUFFER);
if (r)
goto err;
@@ -2322,7 +2443,8 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
uint32_t src_data,
struct dma_resv *resv,
struct dma_fence **f,
- bool delayed)
+ bool delayed,
+ u64 k_job_id)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
@@ -2331,7 +2453,8 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
int r;
if (!adev->mman.buffer_funcs_enabled) {
- DRM_ERROR("Trying to clear memory with ring turned off.\n");
+ dev_err(adev->dev,
+ "Trying to clear memory with ring turned off.\n");
return -EINVAL;
}
@@ -2351,7 +2474,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
goto error;
r = amdgpu_ttm_fill_mem(ring, src_data, to, cur_size, resv,
- &next, true, delayed);
+ &next, true, delayed, k_job_id);
if (r)
goto error;
@@ -2391,7 +2514,7 @@ int amdgpu_ttm_evict_resources(struct amdgpu_device *adev, int mem_type)
man = ttm_manager_type(&adev->mman.bdev, mem_type);
break;
default:
- DRM_ERROR("Trying to evict invalid memory type\n");
+ dev_err(adev->dev, "Trying to evict invalid memory type\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 138d80017f35..0be2728aa872 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -26,14 +26,16 @@
#include <linux/dma-direction.h>
#include <drm/gpu_scheduler.h>
+#include <drm/ttm/ttm_placement.h>
#include "amdgpu_vram_mgr.h"
-#include "amdgpu.h"
#define AMDGPU_PL_GDS (TTM_PL_PRIV + 0)
#define AMDGPU_PL_GWS (TTM_PL_PRIV + 1)
#define AMDGPU_PL_OA (TTM_PL_PRIV + 2)
#define AMDGPU_PL_PREEMPT (TTM_PL_PRIV + 3)
#define AMDGPU_PL_DOORBELL (TTM_PL_PRIV + 4)
+#define AMDGPU_PL_MMIO_REMAP (TTM_PL_PRIV + 5)
+#define __AMDGPU_PL_NUM (TTM_PL_PRIV + 6)
#define AMDGPU_GTT_MAX_TRANSFER_SIZE 512
#define AMDGPU_GTT_NUM_TRANSFER_WINDOWS 2
@@ -85,6 +87,7 @@ struct amdgpu_mman {
uint32_t discovery_tmr_size;
/* fw reserved memory */
struct amdgpu_bo *fw_reserved_memory;
+ struct amdgpu_bo *fw_reserved_memory_extend;
/* firmware VRAM reservation */
u64 fw_vram_usage_start_offset;
@@ -118,6 +121,8 @@ struct amdgpu_copy_mem {
#define AMDGPU_COPY_FLAGS_NUMBER_TYPE_MASK 0x07
#define AMDGPU_COPY_FLAGS_DATA_FORMAT_SHIFT 8
#define AMDGPU_COPY_FLAGS_DATA_FORMAT_MASK 0x3f
+#define AMDGPU_COPY_FLAGS_WRITE_COMPRESS_DISABLE_SHIFT 14
+#define AMDGPU_COPY_FLAGS_WRITE_COMPRESS_DISABLE_MASK 0x1
#define AMDGPU_COPY_FLAGS_SET(field, value) \
(((__u32)(value) & AMDGPU_COPY_FLAGS_##field##_MASK) << AMDGPU_COPY_FLAGS_##field##_SHIFT)
@@ -151,6 +156,7 @@ int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr,
uint64_t start, uint64_t size);
int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,
uint64_t start);
+void amdgpu_vram_mgr_clear_reset_blocks(struct amdgpu_device *adev);
bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
struct ttm_resource *res);
@@ -177,14 +183,15 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
uint32_t src_data,
struct dma_resv *resv,
struct dma_fence **fence,
- bool delayed);
+ bool delayed,
+ u64 k_job_id);
int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo);
void amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo);
uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type);
#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
-int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages,
+int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
struct hmm_range **range);
void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm,
struct hmm_range *range);
@@ -192,7 +199,6 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
struct hmm_range *range);
#else
static inline int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
- struct page **pages,
struct hmm_range **range)
{
return -EPERM;
@@ -208,7 +214,7 @@ static inline bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
}
#endif
-void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages);
+void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct hmm_range *range);
int amdgpu_ttm_tt_get_userptr(const struct ttm_buffer_object *tbo,
uint64_t *user_addr);
int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 4c7b53648a50..e96f24e9ad57 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -30,6 +30,11 @@
#define AMDGPU_UCODE_NAME_MAX (128)
+static const struct kicker_device kicker_device_list[] = {
+ {0x744B, 0x00},
+ {0x7551, 0xC8}
+};
+
static void amdgpu_ucode_print_common_hdr(const struct common_firmware_header *hdr)
{
DRM_DEBUG("size_bytes: %u\n", le32_to_cpu(hdr->size_bytes));
@@ -765,8 +770,10 @@ FW_VERSION_ATTR(sdma_fw_version, 0444, sdma.instance[0].fw_version);
FW_VERSION_ATTR(sdma2_fw_version, 0444, sdma.instance[1].fw_version);
FW_VERSION_ATTR(vcn_fw_version, 0444, vcn.fw_version);
FW_VERSION_ATTR(dmcu_fw_version, 0444, dm.dmcu_fw_version);
+FW_VERSION_ATTR(dmcub_fw_version, 0444, dm.dmcub_fw_version);
FW_VERSION_ATTR(mes_fw_version, 0444, mes.sched_version & AMDGPU_MES_VERSION_MASK);
FW_VERSION_ATTR(mes_kiq_fw_version, 0444, mes.kiq_version & AMDGPU_MES_VERSION_MASK);
+FW_VERSION_ATTR(pldm_fw_version, 0444, firmware.pldm_version);
static struct attribute *fw_attrs[] = {
&dev_attr_vce_fw_version.attr, &dev_attr_uvd_fw_version.attr,
@@ -779,8 +786,9 @@ static struct attribute *fw_attrs[] = {
&dev_attr_ta_ras_fw_version.attr, &dev_attr_ta_xgmi_fw_version.attr,
&dev_attr_smc_fw_version.attr, &dev_attr_sdma_fw_version.attr,
&dev_attr_sdma2_fw_version.attr, &dev_attr_vcn_fw_version.attr,
- &dev_attr_dmcu_fw_version.attr, &dev_attr_imu_fw_version.attr,
- &dev_attr_mes_fw_version.attr, &dev_attr_mes_kiq_fw_version.attr,
+ &dev_attr_dmcu_fw_version.attr, &dev_attr_dmcub_fw_version.attr,
+ &dev_attr_imu_fw_version.attr, &dev_attr_mes_fw_version.attr,
+ &dev_attr_mes_kiq_fw_version.attr, &dev_attr_pldm_fw_version.attr,
NULL
};
@@ -1152,6 +1160,9 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
adev->firmware.max_ucodes = AMDGPU_UCODE_ID_MAXIMUM;
}
+ if (amdgpu_virt_xgmi_migrate_enabled(adev) && adev->firmware.fw_buf)
+ adev->firmware.fw_buf_mc = amdgpu_bo_fb_aper_addr(adev->firmware.fw_buf);
+
for (i = 0; i < adev->firmware.max_ucodes; i++) {
ucode = &adev->firmware.ucode[i];
if (ucode->fw) {
@@ -1216,6 +1227,7 @@ static const char *amdgpu_ucode_legacy_naming(struct amdgpu_device *adev, int bl
case IP_VERSION(11, 0, 13):
return "beige_goby";
case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 2):
return "vangogh";
case IP_VERSION(12, 0, 1):
return "green_sardine";
@@ -1383,6 +1395,19 @@ static const char *amdgpu_ucode_legacy_naming(struct amdgpu_device *adev, int bl
return NULL;
}
+bool amdgpu_is_kicker_fw(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(kicker_device_list); i++) {
+ if (adev->pdev->device == kicker_device_list[i].device &&
+ adev->pdev->revision == kicker_device_list[i].revision)
+ return true;
+ }
+
+ return false;
+}
+
void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type, char *ucode_prefix, int len)
{
int maj, min, rev;
@@ -1434,6 +1459,7 @@ void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type,
*
* @adev: amdgpu device
* @fw: pointer to load firmware to
+ * @required: whether the firmware is required
* @fmt: firmware name format string
* @...: variable arguments
*
@@ -1442,7 +1468,7 @@ void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type,
* the error code to -ENODEV, so that early_init functions will fail to load.
*/
int amdgpu_ucode_request(struct amdgpu_device *adev, const struct firmware **fw,
- const char *fmt, ...)
+ enum amdgpu_ucode_required required, const char *fmt, ...)
{
char fname[AMDGPU_UCODE_NAME_MAX];
va_list ap;
@@ -1456,16 +1482,24 @@ int amdgpu_ucode_request(struct amdgpu_device *adev, const struct firmware **fw,
return -EOVERFLOW;
}
- r = request_firmware(fw, fname, adev->dev);
+ if (required == AMDGPU_UCODE_REQUIRED)
+ r = request_firmware(fw, fname, adev->dev);
+ else {
+ r = firmware_request_nowarn(fw, fname, adev->dev);
+ if (r)
+ drm_info(&adev->ddev, "Optional firmware \"%s\" was not found\n", fname);
+ }
if (r)
return -ENODEV;
r = amdgpu_ucode_validate(*fw);
- if (r) {
+ if (r)
+ /*
+ * The amdgpu_ucode_request() should be paired with amdgpu_ucode_release()
+ * regardless of success/failure, and the amdgpu_ucode_release() takes care of
+ * firmware release and need to avoid redundant release FW operation here.
+ */
dev_dbg(adev->dev, "\"%s\" failed to validate\n", fname);
- release_firmware(*fw);
- *fw = NULL;
- }
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index 4e23419b92d4..6349aad6da35 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -25,6 +25,8 @@
#include "amdgpu_socbb.h"
+#define RS64_FW_UC_START_ADDR_LO 0x3000
+
struct common_firmware_header {
uint32_t size_bytes; /* size of the entire header+image(s) in bytes */
uint32_t header_size_bytes; /* size of just the header in bytes */
@@ -126,6 +128,7 @@ enum psp_fw_type {
PSP_FW_TYPE_PSP_DBG_DRV,
PSP_FW_TYPE_PSP_RAS_DRV,
PSP_FW_TYPE_PSP_IPKEYMGR_DRV,
+ PSP_FW_TYPE_PSP_SPDM_DRV,
PSP_FW_TYPE_MAX_INDEX,
};
@@ -163,6 +166,7 @@ enum ta_fw_type {
TA_FW_TYPE_PSP_DTM,
TA_FW_TYPE_PSP_RAP,
TA_FW_TYPE_PSP_SECUREDISPLAY,
+ TA_FW_TYPE_PSP_XGMI_AUX,
TA_FW_TYPE_MAX_INDEX,
};
@@ -550,6 +554,11 @@ enum amdgpu_firmware_load_type {
AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO,
};
+enum amdgpu_ucode_required {
+ AMDGPU_UCODE_OPTIONAL,
+ AMDGPU_UCODE_REQUIRED,
+};
+
/* conform to smu_ucode_xfer_cz.h */
#define AMDGPU_SDMA0_UCODE_LOADED 0x00000001
#define AMDGPU_SDMA1_UCODE_LOADED 0x00000002
@@ -593,6 +602,12 @@ struct amdgpu_firmware {
void *fw_buf_ptr;
uint64_t fw_buf_mc;
+ uint32_t pldm_version;
+};
+
+struct kicker_device{
+ unsigned short device;
+ u8 revision;
};
void amdgpu_ucode_print_mc_hdr(const struct common_firmware_header *hdr);
@@ -603,9 +618,9 @@ void amdgpu_ucode_print_rlc_hdr(const struct common_firmware_header *hdr);
void amdgpu_ucode_print_sdma_hdr(const struct common_firmware_header *hdr);
void amdgpu_ucode_print_psp_hdr(const struct common_firmware_header *hdr);
void amdgpu_ucode_print_gpu_info_hdr(const struct common_firmware_header *hdr);
-__printf(3, 4)
+__printf(4, 5)
int amdgpu_ucode_request(struct amdgpu_device *adev, const struct firmware **fw,
- const char *fmt, ...);
+ enum amdgpu_ucode_required required, const char *fmt, ...);
void amdgpu_ucode_release(const struct firmware **fw);
bool amdgpu_ucode_hdr_version(union amdgpu_firmware_header *hdr,
uint16_t hdr_major, uint16_t hdr_minor);
@@ -622,5 +637,6 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type);
const char *amdgpu_ucode_name(enum AMDGPU_UCODE_ID ucode_id);
void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type, char *ucode_prefix, int len);
+bool amdgpu_is_kicker_fw(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
index bb7b9b2eaac1..2e039fb778ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
@@ -78,7 +78,7 @@ int amdgpu_umc_page_retirement_mca(struct amdgpu_device *adev,
if (amdgpu_bad_page_threshold != 0) {
amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
- err_data.err_addr_cnt);
+ err_data.err_addr_cnt, false);
amdgpu_ras_save_bad_pages(adev, NULL);
}
@@ -166,10 +166,11 @@ void amdgpu_umc_handle_bad_pages(struct amdgpu_device *adev,
if ((amdgpu_bad_page_threshold != 0) &&
err_data->err_addr_cnt) {
amdgpu_ras_add_bad_pages(adev, err_data->err_addr,
- err_data->err_addr_cnt);
+ err_data->err_addr_cnt, false);
amdgpu_ras_save_bad_pages(adev, &err_count);
- amdgpu_dpm_send_hbm_bad_pages_num(adev, con->eeprom_control.ras_num_recs);
+ amdgpu_dpm_send_hbm_bad_pages_num(adev,
+ con->eeprom_control.ras_num_bad_pages);
if (con->update_channel_flag == true) {
amdgpu_dpm_send_hbm_bad_channel_flag(adev, con->eeprom_control.bad_channel_bitmap);
@@ -251,6 +252,7 @@ int amdgpu_umc_pasid_poison_handler(struct amdgpu_device *adev,
block, pasid, pasid_fn, data, reset);
if (!ret) {
atomic_inc(&con->page_retirement_req_cnt);
+ atomic_inc(&con->poison_consumption_count);
wake_up(&con->page_retirement_wq);
}
}
@@ -318,6 +320,9 @@ int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *r
if (r)
return r;
+ if (amdgpu_sriov_vf(adev))
+ return r;
+
if (amdgpu_ras_is_supported(adev, ras_block->block)) {
r = amdgpu_irq_get(adev, &adev->gmc.ecc_irq, 0);
if (r)
@@ -383,6 +388,45 @@ int amdgpu_umc_fill_error_record(struct ras_err_data *err_data,
return 0;
}
+static int amdgpu_umc_loop_all_aid(struct amdgpu_device *adev, umc_func func,
+ void *data)
+{
+ uint32_t umc_node_inst;
+ uint32_t node_inst;
+ uint32_t umc_inst;
+ uint32_t ch_inst;
+ int ret;
+
+ /*
+ * This loop is done based on the following -
+ * umc.active mask = mask of active umc instances across all nodes
+ * umc.umc_inst_num = maximum number of umc instancess per node
+ * umc.node_inst_num = maximum number of node instances
+ * Channel instances are not assumed to be harvested.
+ */
+ dev_dbg(adev->dev, "active umcs :%lx umc_inst per node: %d",
+ adev->umc.active_mask, adev->umc.umc_inst_num);
+ for_each_set_bit(umc_node_inst, &(adev->umc.active_mask),
+ adev->umc.node_inst_num * adev->umc.umc_inst_num) {
+ node_inst = umc_node_inst / adev->umc.umc_inst_num;
+ umc_inst = umc_node_inst % adev->umc.umc_inst_num;
+ LOOP_UMC_CH_INST(ch_inst) {
+ dev_dbg(adev->dev,
+ "node_inst :%d umc_inst: %d ch_inst: %d",
+ node_inst, umc_inst, ch_inst);
+ ret = func(adev, node_inst, umc_inst, ch_inst, data);
+ if (ret) {
+ dev_err(adev->dev,
+ "Node %d umc %d ch %d func returns %d\n",
+ node_inst, umc_inst, ch_inst, ret);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
int amdgpu_umc_loop_channels(struct amdgpu_device *adev,
umc_func func, void *data)
{
@@ -391,6 +435,9 @@ int amdgpu_umc_loop_channels(struct amdgpu_device *adev,
uint32_t ch_inst = 0;
int ret = 0;
+ if (adev->aid_mask)
+ return amdgpu_umc_loop_all_aid(adev, func, data);
+
if (adev->umc.node_inst_num) {
LOOP_UMC_EACH_NODE_INST_AND_CH(node_inst, umc_inst, ch_inst) {
ret = func(adev, node_inst, umc_inst, ch_inst, data);
@@ -441,3 +488,101 @@ int amdgpu_umc_logs_ecc_err(struct amdgpu_device *adev,
return ret;
}
+
+int amdgpu_umc_pages_in_a_row(struct amdgpu_device *adev,
+ struct ras_err_data *err_data, uint64_t pa_addr)
+{
+ struct ta_ras_query_address_output addr_out;
+
+ /* reinit err_data */
+ err_data->err_addr_cnt = 0;
+ err_data->err_addr_len = adev->umc.retire_unit;
+
+ addr_out.pa.pa = pa_addr;
+ if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr)
+ return adev->umc.ras->convert_ras_err_addr(adev, err_data, NULL,
+ &addr_out, false);
+ else
+ return -EINVAL;
+}
+
+int amdgpu_umc_lookup_bad_pages_in_a_row(struct amdgpu_device *adev,
+ uint64_t pa_addr, uint64_t *pfns, int len)
+{
+ int i, ret;
+ struct ras_err_data err_data;
+
+ err_data.err_addr = kcalloc(adev->umc.retire_unit,
+ sizeof(struct eeprom_table_record), GFP_KERNEL);
+ if (!err_data.err_addr) {
+ dev_warn(adev->dev, "Failed to alloc memory in bad page lookup!\n");
+ return 0;
+ }
+
+ ret = amdgpu_umc_pages_in_a_row(adev, &err_data, pa_addr);
+ if (ret)
+ goto out;
+
+ for (i = 0; i < adev->umc.retire_unit; i++) {
+ if (i >= len)
+ goto out;
+
+ pfns[i] = err_data.err_addr[i].retired_page;
+ }
+ ret = i;
+ adev->umc.err_addr_cnt = err_data.err_addr_cnt;
+
+out:
+ kfree(err_data.err_addr);
+ return ret;
+}
+
+int amdgpu_umc_mca_to_addr(struct amdgpu_device *adev,
+ uint64_t err_addr, uint32_t ch, uint32_t umc,
+ uint32_t node, uint32_t socket,
+ struct ta_ras_query_address_output *addr_out, bool dump_addr)
+{
+ struct ta_ras_query_address_input addr_in;
+ int ret;
+
+ memset(&addr_in, 0, sizeof(addr_in));
+ addr_in.ma.err_addr = err_addr;
+ addr_in.ma.ch_inst = ch;
+ addr_in.ma.umc_inst = umc;
+ addr_in.ma.node_inst = node;
+ addr_in.ma.socket_id = socket;
+
+ if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr) {
+ ret = adev->umc.ras->convert_ras_err_addr(adev, NULL, &addr_in,
+ addr_out, dump_addr);
+ if (ret)
+ return ret;
+ } else {
+ return 0;
+ }
+
+ return 0;
+}
+
+int amdgpu_umc_pa2mca(struct amdgpu_device *adev,
+ uint64_t pa, uint64_t *mca, enum amdgpu_memory_partition nps)
+{
+ struct ta_ras_query_address_input addr_in;
+ struct ta_ras_query_address_output addr_out;
+ int ret;
+
+ /* nps: the pa belongs to */
+ addr_in.pa.pa = pa | ((uint64_t)nps << 58);
+ addr_in.addr_type = TA_RAS_PA_TO_MCA;
+ ret = psp_ras_query_address(&adev->psp, &addr_in, &addr_out);
+ if (ret) {
+ dev_warn(adev->dev, "Failed to query RAS MCA address for 0x%llx",
+ pa);
+
+ return ret;
+ }
+
+ *mca = addr_out.ma.err_addr;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
index ce4179db2a6d..ec203f9e5ffa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
@@ -54,6 +54,41 @@
/* Page retirement tag */
#define UMC_ECC_NEW_DETECTED_TAG 0x1
+/*
+ * a flag to indicate v2 of channel index stored in eeprom
+ *
+ * v1 (legacy way): store channel index within a umc instance in eeprom
+ * range in UMC v12: 0 ~ 7
+ * v2: store global channel index in eeprom
+ * range in UMC v12: 0 ~ 127
+ *
+ * NOTE: it's better to store it in eeprom_table_record.mem_channel,
+ * but there is only 8 bits in mem_channel, and the channel number may
+ * increase in the future, we decide to save it in
+ * eeprom_table_record.retired_page. retired_page is useless in v2,
+ * we depend on eeprom_table_record.address instead of retired_page in v2.
+ * Only 48 bits are saved on eeprom, use bit 47 here.
+ */
+#define UMC_CHANNEL_IDX_V2 BIT_ULL(47)
+
+/*
+ * save nps value to eeprom_table_record.retired_page[47:40],
+ * the channel index flag above will be retired.
+ */
+#define UMC_NPS_SHIFT 40
+#define UMC_NPS_MASK 0xffULL
+
+/* three column bits and one row bit in MCA address flip
+ * in bad page retirement
+ */
+#define RETIRE_FLIP_BITS_NUM 4
+
+struct amdgpu_umc_flip_bits {
+ uint32_t flip_bits_in_pa[RETIRE_FLIP_BITS_NUM];
+ uint32_t flip_row_bit;
+ uint32_t r13_in_pa;
+ uint32_t bit_num;
+};
typedef int (*umc_func)(struct amdgpu_device *adev, uint32_t node_inst,
uint32_t umc_inst, uint32_t ch_inst, void *data);
@@ -70,6 +105,14 @@ struct amdgpu_umc_ras {
enum amdgpu_mca_error_type type, void *ras_error_status);
int (*update_ecc_status)(struct amdgpu_device *adev,
uint64_t status, uint64_t ipid, uint64_t addr);
+ int (*convert_ras_err_addr)(struct amdgpu_device *adev,
+ struct ras_err_data *err_data,
+ struct ta_ras_query_address_input *addr_in,
+ struct ta_ras_query_address_output *addr_out,
+ bool dump_addr);
+ uint32_t (*get_die_id_from_pa)(struct amdgpu_device *adev,
+ uint64_t mca_addr, uint64_t retired_page);
+ void (*get_retire_flip_bits)(struct amdgpu_device *adev);
};
struct amdgpu_umc_funcs {
@@ -100,6 +143,10 @@ struct amdgpu_umc {
/* active mask for umc node instance */
unsigned long active_mask;
+
+ struct amdgpu_umc_flip_bits flip_bits;
+
+ unsigned long err_addr_cnt;
};
int amdgpu_umc_ras_sw_init(struct amdgpu_device *adev);
@@ -134,4 +181,14 @@ int amdgpu_umc_logs_ecc_err(struct amdgpu_device *adev,
void amdgpu_umc_handle_bad_pages(struct amdgpu_device *adev,
void *ras_error_status);
+int amdgpu_umc_pages_in_a_row(struct amdgpu_device *adev,
+ struct ras_err_data *err_data, uint64_t pa_addr);
+int amdgpu_umc_lookup_bad_pages_in_a_row(struct amdgpu_device *adev,
+ uint64_t pa_addr, uint64_t *pfns, int len);
+int amdgpu_umc_mca_to_addr(struct amdgpu_device *adev,
+ uint64_t err_addr, uint32_t ch, uint32_t umc,
+ uint32_t node, uint32_t socket,
+ struct ta_ras_query_address_output *addr_out, bool dump_addr);
+int amdgpu_umc_pa2mca(struct amdgpu_device *adev,
+ uint64_t pa, uint64_t *mca, enum amdgpu_memory_partition nps);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
index 6162582d0aa2..cd707d70a0bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
@@ -32,462 +32,7 @@
#include "amdgpu_umsch_mm.h"
#include "umsch_mm_v4_0.h"
-struct umsch_mm_test_ctx_data {
- uint8_t process_csa[PAGE_SIZE];
- uint8_t vpe_ctx_csa[PAGE_SIZE];
- uint8_t vcn_ctx_csa[PAGE_SIZE];
-};
-
-struct umsch_mm_test_mqd_data {
- uint8_t vpe_mqd[PAGE_SIZE];
- uint8_t vcn_mqd[PAGE_SIZE];
-};
-
-struct umsch_mm_test_ring_data {
- uint8_t vpe_ring[PAGE_SIZE];
- uint8_t vpe_ib[PAGE_SIZE];
- uint8_t vcn_ring[PAGE_SIZE];
- uint8_t vcn_ib[PAGE_SIZE];
-};
-
-struct umsch_mm_test_queue_info {
- uint64_t mqd_addr;
- uint64_t csa_addr;
- uint32_t doorbell_offset_0;
- uint32_t doorbell_offset_1;
- enum UMSCH_SWIP_ENGINE_TYPE engine;
-};
-
-struct umsch_mm_test {
- struct amdgpu_bo *ctx_data_obj;
- uint64_t ctx_data_gpu_addr;
- uint32_t *ctx_data_cpu_addr;
-
- struct amdgpu_bo *mqd_data_obj;
- uint64_t mqd_data_gpu_addr;
- uint32_t *mqd_data_cpu_addr;
-
- struct amdgpu_bo *ring_data_obj;
- uint64_t ring_data_gpu_addr;
- uint32_t *ring_data_cpu_addr;
-
-
- struct amdgpu_vm *vm;
- struct amdgpu_bo_va *bo_va;
- uint32_t pasid;
- uint32_t vm_cntx_cntl;
- uint32_t num_queues;
-};
-
-static int map_ring_data(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- struct amdgpu_bo *bo, struct amdgpu_bo_va **bo_va,
- uint64_t addr, uint32_t size)
-{
- struct amdgpu_sync sync;
- struct drm_exec exec;
- int r;
-
- amdgpu_sync_create(&sync);
-
- drm_exec_init(&exec, 0, 0);
- drm_exec_until_all_locked(&exec) {
- r = drm_exec_lock_obj(&exec, &bo->tbo.base);
- drm_exec_retry_on_contention(&exec);
- if (unlikely(r))
- goto error_fini_exec;
-
- r = amdgpu_vm_lock_pd(vm, &exec, 0);
- drm_exec_retry_on_contention(&exec);
- if (unlikely(r))
- goto error_fini_exec;
- }
-
- *bo_va = amdgpu_vm_bo_add(adev, vm, bo);
- if (!*bo_va) {
- r = -ENOMEM;
- goto error_fini_exec;
- }
-
- r = amdgpu_vm_bo_map(adev, *bo_va, addr, 0, size,
- AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
- AMDGPU_PTE_EXECUTABLE);
-
- if (r)
- goto error_del_bo_va;
-
-
- r = amdgpu_vm_bo_update(adev, *bo_va, false);
- if (r)
- goto error_del_bo_va;
-
- amdgpu_sync_fence(&sync, (*bo_va)->last_pt_update);
-
- r = amdgpu_vm_update_pdes(adev, vm, false);
- if (r)
- goto error_del_bo_va;
-
- amdgpu_sync_fence(&sync, vm->last_update);
-
- amdgpu_sync_wait(&sync, false);
- drm_exec_fini(&exec);
-
- amdgpu_sync_free(&sync);
-
- return 0;
-
-error_del_bo_va:
- amdgpu_vm_bo_del(adev, *bo_va);
- amdgpu_sync_free(&sync);
-
-error_fini_exec:
- drm_exec_fini(&exec);
- amdgpu_sync_free(&sync);
- return r;
-}
-
-static int unmap_ring_data(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- struct amdgpu_bo *bo, struct amdgpu_bo_va *bo_va,
- uint64_t addr)
-{
- struct drm_exec exec;
- long r;
-
- drm_exec_init(&exec, 0, 0);
- drm_exec_until_all_locked(&exec) {
- r = drm_exec_lock_obj(&exec, &bo->tbo.base);
- drm_exec_retry_on_contention(&exec);
- if (unlikely(r))
- goto out_unlock;
-
- r = amdgpu_vm_lock_pd(vm, &exec, 0);
- drm_exec_retry_on_contention(&exec);
- if (unlikely(r))
- goto out_unlock;
- }
-
-
- r = amdgpu_vm_bo_unmap(adev, bo_va, addr);
- if (r)
- goto out_unlock;
-
- amdgpu_vm_bo_del(adev, bo_va);
-
-out_unlock:
- drm_exec_fini(&exec);
-
- return r;
-}
-
-static void setup_vpe_queue(struct amdgpu_device *adev,
- struct umsch_mm_test *test,
- struct umsch_mm_test_queue_info *qinfo)
-{
- struct MQD_INFO *mqd = (struct MQD_INFO *)test->mqd_data_cpu_addr;
- uint64_t ring_gpu_addr = test->ring_data_gpu_addr;
-
- mqd->rb_base_lo = (ring_gpu_addr >> 8);
- mqd->rb_base_hi = (ring_gpu_addr >> 40);
- mqd->rb_size = PAGE_SIZE / 4;
- mqd->wptr_val = 0;
- mqd->rptr_val = 0;
- mqd->unmapped = 1;
-
- if (adev->vpe.collaborate_mode)
- memcpy(++mqd, test->mqd_data_cpu_addr, sizeof(struct MQD_INFO));
-
- qinfo->mqd_addr = test->mqd_data_gpu_addr;
- qinfo->csa_addr = test->ctx_data_gpu_addr +
- offsetof(struct umsch_mm_test_ctx_data, vpe_ctx_csa);
- qinfo->doorbell_offset_0 = 0;
- qinfo->doorbell_offset_1 = 0;
-}
-
-static void setup_vcn_queue(struct amdgpu_device *adev,
- struct umsch_mm_test *test,
- struct umsch_mm_test_queue_info *qinfo)
-{
-}
-
-static int add_test_queue(struct amdgpu_device *adev,
- struct umsch_mm_test *test,
- struct umsch_mm_test_queue_info *qinfo)
-{
- struct umsch_mm_add_queue_input queue_input = {};
- int r;
-
- queue_input.process_id = test->pasid;
- queue_input.page_table_base_addr = amdgpu_gmc_pd_addr(test->vm->root.bo);
-
- queue_input.process_va_start = 0;
- queue_input.process_va_end = (adev->vm_manager.max_pfn - 1) << AMDGPU_GPU_PAGE_SHIFT;
-
- queue_input.process_quantum = 100000; /* 10ms */
- queue_input.process_csa_addr = test->ctx_data_gpu_addr +
- offsetof(struct umsch_mm_test_ctx_data, process_csa);
-
- queue_input.context_quantum = 10000; /* 1ms */
- queue_input.context_csa_addr = qinfo->csa_addr;
-
- queue_input.inprocess_context_priority = CONTEXT_PRIORITY_LEVEL_NORMAL;
- queue_input.context_global_priority_level = CONTEXT_PRIORITY_LEVEL_NORMAL;
- queue_input.doorbell_offset_0 = qinfo->doorbell_offset_0;
- queue_input.doorbell_offset_1 = qinfo->doorbell_offset_1;
-
- queue_input.engine_type = qinfo->engine;
- queue_input.mqd_addr = qinfo->mqd_addr;
- queue_input.vm_context_cntl = test->vm_cntx_cntl;
-
- amdgpu_umsch_mm_lock(&adev->umsch_mm);
- r = adev->umsch_mm.funcs->add_queue(&adev->umsch_mm, &queue_input);
- amdgpu_umsch_mm_unlock(&adev->umsch_mm);
- if (r)
- return r;
-
- return 0;
-}
-
-static int remove_test_queue(struct amdgpu_device *adev,
- struct umsch_mm_test *test,
- struct umsch_mm_test_queue_info *qinfo)
-{
- struct umsch_mm_remove_queue_input queue_input = {};
- int r;
-
- queue_input.doorbell_offset_0 = qinfo->doorbell_offset_0;
- queue_input.doorbell_offset_1 = qinfo->doorbell_offset_1;
- queue_input.context_csa_addr = qinfo->csa_addr;
-
- amdgpu_umsch_mm_lock(&adev->umsch_mm);
- r = adev->umsch_mm.funcs->remove_queue(&adev->umsch_mm, &queue_input);
- amdgpu_umsch_mm_unlock(&adev->umsch_mm);
- if (r)
- return r;
-
- return 0;
-}
-
-static int submit_vpe_queue(struct amdgpu_device *adev, struct umsch_mm_test *test)
-{
- struct MQD_INFO *mqd = (struct MQD_INFO *)test->mqd_data_cpu_addr;
- uint32_t *ring = test->ring_data_cpu_addr +
- offsetof(struct umsch_mm_test_ring_data, vpe_ring) / 4;
- uint32_t *ib = test->ring_data_cpu_addr +
- offsetof(struct umsch_mm_test_ring_data, vpe_ib) / 4;
- uint64_t ib_gpu_addr = test->ring_data_gpu_addr +
- offsetof(struct umsch_mm_test_ring_data, vpe_ib);
- uint32_t *fence = ib + 2048 / 4;
- uint64_t fence_gpu_addr = ib_gpu_addr + 2048;
- const uint32_t test_pattern = 0xdeadbeef;
- int i;
-
- ib[0] = VPE_CMD_HEADER(VPE_CMD_OPCODE_FENCE, 0);
- ib[1] = lower_32_bits(fence_gpu_addr);
- ib[2] = upper_32_bits(fence_gpu_addr);
- ib[3] = test_pattern;
-
- ring[0] = VPE_CMD_HEADER(VPE_CMD_OPCODE_INDIRECT, 0);
- ring[1] = (ib_gpu_addr & 0xffffffe0);
- ring[2] = upper_32_bits(ib_gpu_addr);
- ring[3] = 4;
- ring[4] = 0;
- ring[5] = 0;
-
- mqd->wptr_val = (6 << 2);
- if (adev->vpe.collaborate_mode)
- (++mqd)->wptr_val = (6 << 2);
-
- WDOORBELL32(adev->umsch_mm.agdb_index[CONTEXT_PRIORITY_LEVEL_NORMAL], mqd->wptr_val);
-
- for (i = 0; i < adev->usec_timeout; i++) {
- if (*fence == test_pattern)
- return 0;
- udelay(1);
- }
-
- dev_err(adev->dev, "vpe queue submission timeout\n");
-
- return -ETIMEDOUT;
-}
-
-static int submit_vcn_queue(struct amdgpu_device *adev, struct umsch_mm_test *test)
-{
- return 0;
-}
-
-static int setup_umsch_mm_test(struct amdgpu_device *adev,
- struct umsch_mm_test *test)
-{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
- int r;
-
- test->vm_cntx_cntl = hub->vm_cntx_cntl;
-
- test->vm = kzalloc(sizeof(*test->vm), GFP_KERNEL);
- if (!test->vm) {
- r = -ENOMEM;
- return r;
- }
-
- r = amdgpu_vm_init(adev, test->vm, -1);
- if (r)
- goto error_free_vm;
-
- r = amdgpu_pasid_alloc(16);
- if (r < 0)
- goto error_fini_vm;
- test->pasid = r;
-
- r = amdgpu_bo_create_kernel(adev, sizeof(struct umsch_mm_test_ctx_data),
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
- &test->ctx_data_obj,
- &test->ctx_data_gpu_addr,
- (void **)&test->ctx_data_cpu_addr);
- if (r)
- goto error_free_pasid;
-
- memset(test->ctx_data_cpu_addr, 0, sizeof(struct umsch_mm_test_ctx_data));
-
- r = amdgpu_bo_create_kernel(adev, PAGE_SIZE,
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
- &test->mqd_data_obj,
- &test->mqd_data_gpu_addr,
- (void **)&test->mqd_data_cpu_addr);
- if (r)
- goto error_free_ctx_data_obj;
-
- memset(test->mqd_data_cpu_addr, 0, PAGE_SIZE);
-
- r = amdgpu_bo_create_kernel(adev, sizeof(struct umsch_mm_test_ring_data),
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
- &test->ring_data_obj,
- NULL,
- (void **)&test->ring_data_cpu_addr);
- if (r)
- goto error_free_mqd_data_obj;
-
- memset(test->ring_data_cpu_addr, 0, sizeof(struct umsch_mm_test_ring_data));
-
- test->ring_data_gpu_addr = AMDGPU_VA_RESERVED_BOTTOM;
- r = map_ring_data(adev, test->vm, test->ring_data_obj, &test->bo_va,
- test->ring_data_gpu_addr, sizeof(struct umsch_mm_test_ring_data));
- if (r)
- goto error_free_ring_data_obj;
-
- return 0;
-
-error_free_ring_data_obj:
- amdgpu_bo_free_kernel(&test->ring_data_obj, NULL,
- (void **)&test->ring_data_cpu_addr);
-error_free_mqd_data_obj:
- amdgpu_bo_free_kernel(&test->mqd_data_obj, &test->mqd_data_gpu_addr,
- (void **)&test->mqd_data_cpu_addr);
-error_free_ctx_data_obj:
- amdgpu_bo_free_kernel(&test->ctx_data_obj, &test->ctx_data_gpu_addr,
- (void **)&test->ctx_data_cpu_addr);
-error_free_pasid:
- amdgpu_pasid_free(test->pasid);
-error_fini_vm:
- amdgpu_vm_fini(adev, test->vm);
-error_free_vm:
- kfree(test->vm);
-
- return r;
-}
-
-static void cleanup_umsch_mm_test(struct amdgpu_device *adev,
- struct umsch_mm_test *test)
-{
- unmap_ring_data(adev, test->vm, test->ring_data_obj,
- test->bo_va, test->ring_data_gpu_addr);
- amdgpu_bo_free_kernel(&test->mqd_data_obj, &test->mqd_data_gpu_addr,
- (void **)&test->mqd_data_cpu_addr);
- amdgpu_bo_free_kernel(&test->ring_data_obj, NULL,
- (void **)&test->ring_data_cpu_addr);
- amdgpu_bo_free_kernel(&test->ctx_data_obj, &test->ctx_data_gpu_addr,
- (void **)&test->ctx_data_cpu_addr);
- amdgpu_pasid_free(test->pasid);
- amdgpu_vm_fini(adev, test->vm);
- kfree(test->vm);
-}
-
-static int setup_test_queues(struct amdgpu_device *adev,
- struct umsch_mm_test *test,
- struct umsch_mm_test_queue_info *qinfo)
-{
- int i, r;
-
- for (i = 0; i < test->num_queues; i++) {
- if (qinfo[i].engine == UMSCH_SWIP_ENGINE_TYPE_VPE)
- setup_vpe_queue(adev, test, &qinfo[i]);
- else
- setup_vcn_queue(adev, test, &qinfo[i]);
-
- r = add_test_queue(adev, test, &qinfo[i]);
- if (r)
- return r;
- }
-
- return 0;
-}
-
-static int submit_test_queues(struct amdgpu_device *adev,
- struct umsch_mm_test *test,
- struct umsch_mm_test_queue_info *qinfo)
-{
- int i, r;
-
- for (i = 0; i < test->num_queues; i++) {
- if (qinfo[i].engine == UMSCH_SWIP_ENGINE_TYPE_VPE)
- r = submit_vpe_queue(adev, test);
- else
- r = submit_vcn_queue(adev, test);
- if (r)
- return r;
- }
-
- return 0;
-}
-
-static void cleanup_test_queues(struct amdgpu_device *adev,
- struct umsch_mm_test *test,
- struct umsch_mm_test_queue_info *qinfo)
-{
- int i;
-
- for (i = 0; i < test->num_queues; i++)
- remove_test_queue(adev, test, &qinfo[i]);
-}
-
-static int umsch_mm_test(struct amdgpu_device *adev)
-{
- struct umsch_mm_test_queue_info qinfo[] = {
- { .engine = UMSCH_SWIP_ENGINE_TYPE_VPE },
- };
- struct umsch_mm_test test = { .num_queues = ARRAY_SIZE(qinfo) };
- int r;
-
- r = setup_umsch_mm_test(adev, &test);
- if (r)
- return r;
-
- r = setup_test_queues(adev, &test, qinfo);
- if (r)
- goto cleanup;
-
- r = submit_test_queues(adev, &test, qinfo);
- if (r)
- goto cleanup;
-
- cleanup_test_queues(adev, &test, qinfo);
- cleanup_umsch_mm_test(adev, &test);
-
- return 0;
-
-cleanup:
- cleanup_test_queues(adev, &test, qinfo);
- cleanup_umsch_mm_test(adev, &test);
- return r;
-}
+MODULE_FIRMWARE("amdgpu/umsch_mm_4_0_0.bin");
int amdgpu_umsch_mm_submit_pkt(struct amdgpu_umsch_mm *umsch, void *pkt, int ndws)
{
@@ -581,13 +126,14 @@ int amdgpu_umsch_mm_init_microcode(struct amdgpu_umsch_mm *umsch)
switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) {
case IP_VERSION(4, 0, 5):
case IP_VERSION(4, 0, 6):
- fw_name = "amdgpu/umsch_mm_4_0_0.bin";
+ fw_name = "4_0_0";
break;
default:
- break;
+ return -EINVAL;
}
- r = amdgpu_ucode_request(adev, &adev->umsch_mm.fw, "%s", fw_name);
+ r = amdgpu_ucode_request(adev, &adev->umsch_mm.fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/umsch_mm_%s.bin", fw_name);
if (r) {
release_firmware(adev->umsch_mm.fw);
adev->umsch_mm.fw = NULL;
@@ -765,9 +311,9 @@ static int umsch_mm_init(struct amdgpu_device *adev)
}
-static int umsch_mm_early_init(void *handle)
+static int umsch_mm_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) {
case IP_VERSION(4, 0, 5):
@@ -784,19 +330,19 @@ static int umsch_mm_early_init(void *handle)
return 0;
}
-static int umsch_mm_late_init(void *handle)
+static int umsch_mm_late_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_in_reset(adev) || adev->in_s0ix || adev->in_suspend)
return 0;
- return umsch_mm_test(adev);
+ return 0;
}
-static int umsch_mm_sw_init(void *handle)
+static int umsch_mm_sw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
r = umsch_mm_init(adev);
@@ -815,9 +361,9 @@ static int umsch_mm_sw_init(void *handle)
return 0;
}
-static int umsch_mm_sw_fini(void *handle)
+static int umsch_mm_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
release_firmware(adev->umsch_mm.fw);
adev->umsch_mm.fw = NULL;
@@ -839,9 +385,9 @@ static int umsch_mm_sw_fini(void *handle)
return 0;
}
-static int umsch_mm_hw_init(void *handle)
+static int umsch_mm_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
r = umsch_mm_load_microcode(&adev->umsch_mm);
@@ -857,9 +403,9 @@ static int umsch_mm_hw_init(void *handle)
return 0;
}
-static int umsch_mm_hw_fini(void *handle)
+static int umsch_mm_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
umsch_mm_ring_stop(&adev->umsch_mm);
@@ -873,18 +419,14 @@ static int umsch_mm_hw_fini(void *handle)
return 0;
}
-static int umsch_mm_suspend(void *handle)
+static int umsch_mm_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return umsch_mm_hw_fini(adev);
+ return umsch_mm_hw_fini(ip_block);
}
-static int umsch_mm_resume(void *handle)
+static int umsch_mm_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return umsch_mm_hw_init(adev);
+ return umsch_mm_hw_init(ip_block);
}
void amdgpu_umsch_fwlog_init(struct amdgpu_umsch_mm *umsch_mm)
@@ -997,8 +539,6 @@ static const struct amd_ip_funcs umsch_mm_v4_0_ip_funcs = {
.hw_fini = umsch_mm_hw_fini,
.suspend = umsch_mm_suspend,
.resume = umsch_mm_resume,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
const struct amdgpu_ip_block_version umsch_mm_v4_0_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
new file mode 100644
index 000000000000..1add21160d21
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
@@ -0,0 +1,1090 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <drm/drm_auth.h>
+#include <drm/drm_exec.h>
+#include <linux/pm_runtime.h>
+
+#include "amdgpu.h"
+#include "amdgpu_vm.h"
+#include "amdgpu_userq.h"
+#include "amdgpu_userq_fence.h"
+
+u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev)
+{
+ int i;
+ u32 userq_ip_mask = 0;
+
+ for (i = 0; i < AMDGPU_HW_IP_NUM; i++) {
+ if (adev->userq_funcs[i])
+ userq_ip_mask |= (1 << i);
+ }
+
+ return userq_ip_mask;
+}
+
+int amdgpu_userq_input_va_validate(struct amdgpu_vm *vm, u64 addr,
+ u64 expected_size)
+{
+ struct amdgpu_bo_va_mapping *va_map;
+ u64 user_addr;
+ u64 size;
+ int r = 0;
+
+ user_addr = (addr & AMDGPU_GMC_HOLE_MASK) >> AMDGPU_GPU_PAGE_SHIFT;
+ size = expected_size >> AMDGPU_GPU_PAGE_SHIFT;
+
+ r = amdgpu_bo_reserve(vm->root.bo, false);
+ if (r)
+ return r;
+
+ va_map = amdgpu_vm_bo_lookup_mapping(vm, user_addr);
+ if (!va_map) {
+ r = -EINVAL;
+ goto out_err;
+ }
+ /* Only validate the userq whether resident in the VM mapping range */
+ if (user_addr >= va_map->start &&
+ va_map->last - user_addr + 1 >= size) {
+ amdgpu_bo_unreserve(vm->root.bo);
+ return 0;
+ }
+
+ r = -EINVAL;
+out_err:
+ amdgpu_bo_unreserve(vm->root.bo);
+ return r;
+}
+
+static int
+amdgpu_userq_preempt_helper(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ const struct amdgpu_userq_funcs *userq_funcs =
+ adev->userq_funcs[queue->queue_type];
+ int r = 0;
+
+ if (queue->state == AMDGPU_USERQ_STATE_MAPPED) {
+ r = userq_funcs->preempt(uq_mgr, queue);
+ if (r) {
+ queue->state = AMDGPU_USERQ_STATE_HUNG;
+ } else {
+ queue->state = AMDGPU_USERQ_STATE_PREEMPTED;
+ }
+ }
+
+ return r;
+}
+
+static int
+amdgpu_userq_restore_helper(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ const struct amdgpu_userq_funcs *userq_funcs =
+ adev->userq_funcs[queue->queue_type];
+ int r = 0;
+
+ if (queue->state == AMDGPU_USERQ_STATE_PREEMPTED) {
+ r = userq_funcs->restore(uq_mgr, queue);
+ if (r) {
+ queue->state = AMDGPU_USERQ_STATE_HUNG;
+ } else {
+ queue->state = AMDGPU_USERQ_STATE_MAPPED;
+ }
+ }
+
+ return r;
+}
+
+static int
+amdgpu_userq_unmap_helper(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ const struct amdgpu_userq_funcs *userq_funcs =
+ adev->userq_funcs[queue->queue_type];
+ int r = 0;
+
+ if ((queue->state == AMDGPU_USERQ_STATE_MAPPED) ||
+ (queue->state == AMDGPU_USERQ_STATE_PREEMPTED)) {
+ r = userq_funcs->unmap(uq_mgr, queue);
+ if (r)
+ queue->state = AMDGPU_USERQ_STATE_HUNG;
+ else
+ queue->state = AMDGPU_USERQ_STATE_UNMAPPED;
+ }
+ return r;
+}
+
+static int
+amdgpu_userq_map_helper(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ const struct amdgpu_userq_funcs *userq_funcs =
+ adev->userq_funcs[queue->queue_type];
+ int r = 0;
+
+ if (queue->state == AMDGPU_USERQ_STATE_UNMAPPED) {
+ r = userq_funcs->map(uq_mgr, queue);
+ if (r) {
+ queue->state = AMDGPU_USERQ_STATE_HUNG;
+ } else {
+ queue->state = AMDGPU_USERQ_STATE_MAPPED;
+ }
+ }
+ return r;
+}
+
+static void
+amdgpu_userq_wait_for_last_fence(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct dma_fence *f = queue->last_fence;
+ int ret;
+
+ if (f && !dma_fence_is_signaled(f)) {
+ ret = dma_fence_wait_timeout(f, true, msecs_to_jiffies(100));
+ if (ret <= 0)
+ drm_file_err(uq_mgr->file, "Timed out waiting for fence=%llu:%llu\n",
+ f->context, f->seqno);
+ }
+}
+
+static void
+amdgpu_userq_cleanup(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue,
+ int queue_id)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ const struct amdgpu_userq_funcs *uq_funcs = adev->userq_funcs[queue->queue_type];
+
+ uq_funcs->mqd_destroy(uq_mgr, queue);
+ amdgpu_userq_fence_driver_free(queue);
+ idr_remove(&uq_mgr->userq_idr, queue_id);
+ kfree(queue);
+}
+
+static struct amdgpu_usermode_queue *
+amdgpu_userq_find(struct amdgpu_userq_mgr *uq_mgr, int qid)
+{
+ return idr_find(&uq_mgr->userq_idr, qid);
+}
+
+void
+amdgpu_userq_ensure_ev_fence(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_eviction_fence_mgr *evf_mgr)
+{
+ struct amdgpu_eviction_fence *ev_fence;
+
+retry:
+ /* Flush any pending resume work to create ev_fence */
+ flush_delayed_work(&uq_mgr->resume_work);
+
+ mutex_lock(&uq_mgr->userq_mutex);
+ spin_lock(&evf_mgr->ev_fence_lock);
+ ev_fence = evf_mgr->ev_fence;
+ spin_unlock(&evf_mgr->ev_fence_lock);
+ if (!ev_fence || dma_fence_is_signaled(&ev_fence->base)) {
+ mutex_unlock(&uq_mgr->userq_mutex);
+ /*
+ * Looks like there was no pending resume work,
+ * add one now to create a valid eviction fence
+ */
+ schedule_delayed_work(&uq_mgr->resume_work, 0);
+ goto retry;
+ }
+}
+
+int amdgpu_userq_create_object(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_userq_obj *userq_obj,
+ int size)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ struct amdgpu_bo_param bp;
+ int r;
+
+ memset(&bp, 0, sizeof(bp));
+ bp.byte_align = PAGE_SIZE;
+ bp.domain = AMDGPU_GEM_DOMAIN_GTT;
+ bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+ bp.type = ttm_bo_type_kernel;
+ bp.size = size;
+ bp.resv = NULL;
+ bp.bo_ptr_size = sizeof(struct amdgpu_bo);
+
+ r = amdgpu_bo_create(adev, &bp, &userq_obj->obj);
+ if (r) {
+ drm_file_err(uq_mgr->file, "Failed to allocate BO for userqueue (%d)", r);
+ return r;
+ }
+
+ r = amdgpu_bo_reserve(userq_obj->obj, true);
+ if (r) {
+ drm_file_err(uq_mgr->file, "Failed to reserve BO to map (%d)", r);
+ goto free_obj;
+ }
+
+ r = amdgpu_ttm_alloc_gart(&(userq_obj->obj)->tbo);
+ if (r) {
+ drm_file_err(uq_mgr->file, "Failed to alloc GART for userqueue object (%d)", r);
+ goto unresv;
+ }
+
+ r = amdgpu_bo_kmap(userq_obj->obj, &userq_obj->cpu_ptr);
+ if (r) {
+ drm_file_err(uq_mgr->file, "Failed to map BO for userqueue (%d)", r);
+ goto unresv;
+ }
+
+ userq_obj->gpu_addr = amdgpu_bo_gpu_offset(userq_obj->obj);
+ amdgpu_bo_unreserve(userq_obj->obj);
+ memset(userq_obj->cpu_ptr, 0, size);
+ return 0;
+
+unresv:
+ amdgpu_bo_unreserve(userq_obj->obj);
+
+free_obj:
+ amdgpu_bo_unref(&userq_obj->obj);
+ return r;
+}
+
+void amdgpu_userq_destroy_object(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_userq_obj *userq_obj)
+{
+ amdgpu_bo_kunmap(userq_obj->obj);
+ amdgpu_bo_unref(&userq_obj->obj);
+}
+
+uint64_t
+amdgpu_userq_get_doorbell_index(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_db_info *db_info,
+ struct drm_file *filp)
+{
+ uint64_t index;
+ struct drm_gem_object *gobj;
+ struct amdgpu_userq_obj *db_obj = db_info->db_obj;
+ int r, db_size;
+
+ gobj = drm_gem_object_lookup(filp, db_info->doorbell_handle);
+ if (gobj == NULL) {
+ drm_file_err(uq_mgr->file, "Can't find GEM object for doorbell\n");
+ return -EINVAL;
+ }
+
+ db_obj->obj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
+ drm_gem_object_put(gobj);
+
+ r = amdgpu_bo_reserve(db_obj->obj, true);
+ if (r) {
+ drm_file_err(uq_mgr->file, "[Usermode queues] Failed to pin doorbell object\n");
+ goto unref_bo;
+ }
+
+ /* Pin the BO before generating the index, unpin in queue destroy */
+ r = amdgpu_bo_pin(db_obj->obj, AMDGPU_GEM_DOMAIN_DOORBELL);
+ if (r) {
+ drm_file_err(uq_mgr->file, "[Usermode queues] Failed to pin doorbell object\n");
+ goto unresv_bo;
+ }
+
+ switch (db_info->queue_type) {
+ case AMDGPU_HW_IP_GFX:
+ case AMDGPU_HW_IP_COMPUTE:
+ case AMDGPU_HW_IP_DMA:
+ db_size = sizeof(u64);
+ break;
+
+ case AMDGPU_HW_IP_VCN_ENC:
+ db_size = sizeof(u32);
+ db_info->doorbell_offset += AMDGPU_NAVI10_DOORBELL64_VCN0_1 << 1;
+ break;
+
+ case AMDGPU_HW_IP_VPE:
+ db_size = sizeof(u32);
+ db_info->doorbell_offset += AMDGPU_NAVI10_DOORBELL64_VPE << 1;
+ break;
+
+ default:
+ drm_file_err(uq_mgr->file, "[Usermode queues] IP %d not support\n",
+ db_info->queue_type);
+ r = -EINVAL;
+ goto unpin_bo;
+ }
+
+ index = amdgpu_doorbell_index_on_bar(uq_mgr->adev, db_obj->obj,
+ db_info->doorbell_offset, db_size);
+ drm_dbg_driver(adev_to_drm(uq_mgr->adev),
+ "[Usermode queues] doorbell index=%lld\n", index);
+ amdgpu_bo_unreserve(db_obj->obj);
+ return index;
+
+unpin_bo:
+ amdgpu_bo_unpin(db_obj->obj);
+unresv_bo:
+ amdgpu_bo_unreserve(db_obj->obj);
+unref_bo:
+ amdgpu_bo_unref(&db_obj->obj);
+ return r;
+}
+
+static int
+amdgpu_userq_destroy(struct drm_file *filp, int queue_id)
+{
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
+ struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr;
+ struct amdgpu_device *adev = uq_mgr->adev;
+ struct amdgpu_usermode_queue *queue;
+ int r = 0;
+
+ cancel_delayed_work_sync(&uq_mgr->resume_work);
+ mutex_lock(&uq_mgr->userq_mutex);
+
+ queue = amdgpu_userq_find(uq_mgr, queue_id);
+ if (!queue) {
+ drm_dbg_driver(adev_to_drm(uq_mgr->adev), "Invalid queue id to destroy\n");
+ mutex_unlock(&uq_mgr->userq_mutex);
+ return -EINVAL;
+ }
+ amdgpu_userq_wait_for_last_fence(uq_mgr, queue);
+ r = amdgpu_bo_reserve(queue->db_obj.obj, true);
+ if (!r) {
+ amdgpu_bo_unpin(queue->db_obj.obj);
+ amdgpu_bo_unreserve(queue->db_obj.obj);
+ }
+ amdgpu_bo_unref(&queue->db_obj.obj);
+
+#if defined(CONFIG_DEBUG_FS)
+ debugfs_remove_recursive(queue->debugfs_queue);
+#endif
+ r = amdgpu_userq_unmap_helper(uq_mgr, queue);
+ /*TODO: It requires a reset for userq hw unmap error*/
+ if (unlikely(r != AMDGPU_USERQ_STATE_UNMAPPED)) {
+ drm_warn(adev_to_drm(uq_mgr->adev), "trying to destroy a HW mapping userq\n");
+ queue->state = AMDGPU_USERQ_STATE_HUNG;
+ }
+ amdgpu_userq_cleanup(uq_mgr, queue, queue_id);
+ mutex_unlock(&uq_mgr->userq_mutex);
+
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+
+ return r;
+}
+
+static int amdgpu_userq_priority_permit(struct drm_file *filp,
+ int priority)
+{
+ if (priority < AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_HIGH)
+ return 0;
+
+ if (capable(CAP_SYS_NICE))
+ return 0;
+
+ if (drm_is_current_master(filp))
+ return 0;
+
+ return -EACCES;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+static int amdgpu_mqd_info_read(struct seq_file *m, void *unused)
+{
+ struct amdgpu_usermode_queue *queue = m->private;
+ struct amdgpu_bo *bo;
+ int r;
+
+ if (!queue || !queue->mqd.obj)
+ return -EINVAL;
+
+ bo = amdgpu_bo_ref(queue->mqd.obj);
+ r = amdgpu_bo_reserve(bo, true);
+ if (r) {
+ amdgpu_bo_unref(&bo);
+ return -EINVAL;
+ }
+
+ seq_printf(m, "queue_type: %d\n", queue->queue_type);
+ seq_printf(m, "mqd_gpu_address: 0x%llx\n", amdgpu_bo_gpu_offset(queue->mqd.obj));
+
+ amdgpu_bo_unreserve(bo);
+ amdgpu_bo_unref(&bo);
+
+ return 0;
+}
+
+static int amdgpu_mqd_info_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, amdgpu_mqd_info_read, inode->i_private);
+}
+
+static const struct file_operations amdgpu_mqd_info_fops = {
+ .owner = THIS_MODULE,
+ .open = amdgpu_mqd_info_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+#endif
+
+static int
+amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
+{
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
+ struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr;
+ struct amdgpu_device *adev = uq_mgr->adev;
+ const struct amdgpu_userq_funcs *uq_funcs;
+ struct amdgpu_usermode_queue *queue;
+ struct amdgpu_db_info db_info;
+ char *queue_name;
+ bool skip_map_queue;
+ uint64_t index;
+ int qid, r = 0;
+ int priority =
+ (args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK) >>
+ AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_SHIFT;
+
+ r = amdgpu_userq_priority_permit(filp, priority);
+ if (r)
+ return r;
+
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
+ if (r < 0) {
+ drm_file_err(uq_mgr->file, "pm_runtime_get_sync() failed for userqueue create\n");
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return r;
+ }
+
+ /*
+ * There could be a situation that we are creating a new queue while
+ * the other queues under this UQ_mgr are suspended. So if there is any
+ * resume work pending, wait for it to get done.
+ *
+ * This will also make sure we have a valid eviction fence ready to be used.
+ */
+ mutex_lock(&adev->userq_mutex);
+ amdgpu_userq_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr);
+
+ uq_funcs = adev->userq_funcs[args->in.ip_type];
+ if (!uq_funcs) {
+ drm_file_err(uq_mgr->file, "Usermode queue is not supported for this IP (%u)\n",
+ args->in.ip_type);
+ r = -EINVAL;
+ goto unlock;
+ }
+
+ queue = kzalloc(sizeof(struct amdgpu_usermode_queue), GFP_KERNEL);
+ if (!queue) {
+ drm_file_err(uq_mgr->file, "Failed to allocate memory for queue\n");
+ r = -ENOMEM;
+ goto unlock;
+ }
+
+ /* Validate the userq virtual address.*/
+ if (amdgpu_userq_input_va_validate(&fpriv->vm, args->in.queue_va, args->in.queue_size) ||
+ amdgpu_userq_input_va_validate(&fpriv->vm, args->in.rptr_va, AMDGPU_GPU_PAGE_SIZE) ||
+ amdgpu_userq_input_va_validate(&fpriv->vm, args->in.wptr_va, AMDGPU_GPU_PAGE_SIZE)) {
+ r = -EINVAL;
+ kfree(queue);
+ goto unlock;
+ }
+ queue->doorbell_handle = args->in.doorbell_handle;
+ queue->queue_type = args->in.ip_type;
+ queue->vm = &fpriv->vm;
+ queue->priority = priority;
+
+ db_info.queue_type = queue->queue_type;
+ db_info.doorbell_handle = queue->doorbell_handle;
+ db_info.db_obj = &queue->db_obj;
+ db_info.doorbell_offset = args->in.doorbell_offset;
+
+ /* Convert relative doorbell offset into absolute doorbell index */
+ index = amdgpu_userq_get_doorbell_index(uq_mgr, &db_info, filp);
+ if (index == (uint64_t)-EINVAL) {
+ drm_file_err(uq_mgr->file, "Failed to get doorbell for queue\n");
+ kfree(queue);
+ r = -EINVAL;
+ goto unlock;
+ }
+
+ queue->doorbell_index = index;
+ xa_init_flags(&queue->fence_drv_xa, XA_FLAGS_ALLOC);
+ r = amdgpu_userq_fence_driver_alloc(adev, queue);
+ if (r) {
+ drm_file_err(uq_mgr->file, "Failed to alloc fence driver\n");
+ goto unlock;
+ }
+
+ r = uq_funcs->mqd_create(uq_mgr, &args->in, queue);
+ if (r) {
+ drm_file_err(uq_mgr->file, "Failed to create Queue\n");
+ amdgpu_userq_fence_driver_free(queue);
+ kfree(queue);
+ goto unlock;
+ }
+
+
+ qid = idr_alloc(&uq_mgr->userq_idr, queue, 1, AMDGPU_MAX_USERQ_COUNT, GFP_KERNEL);
+ if (qid < 0) {
+ drm_file_err(uq_mgr->file, "Failed to allocate a queue id\n");
+ amdgpu_userq_fence_driver_free(queue);
+ uq_funcs->mqd_destroy(uq_mgr, queue);
+ kfree(queue);
+ r = -ENOMEM;
+ goto unlock;
+ }
+
+ /* don't map the queue if scheduling is halted */
+ if (adev->userq_halt_for_enforce_isolation &&
+ ((queue->queue_type == AMDGPU_HW_IP_GFX) ||
+ (queue->queue_type == AMDGPU_HW_IP_COMPUTE)))
+ skip_map_queue = true;
+ else
+ skip_map_queue = false;
+ if (!skip_map_queue) {
+ r = amdgpu_userq_map_helper(uq_mgr, queue);
+ if (r) {
+ drm_file_err(uq_mgr->file, "Failed to map Queue\n");
+ idr_remove(&uq_mgr->userq_idr, qid);
+ amdgpu_userq_fence_driver_free(queue);
+ uq_funcs->mqd_destroy(uq_mgr, queue);
+ kfree(queue);
+ goto unlock;
+ }
+ }
+
+ queue_name = kasprintf(GFP_KERNEL, "queue-%d", qid);
+ if (!queue_name) {
+ r = -ENOMEM;
+ goto unlock;
+ }
+
+#if defined(CONFIG_DEBUG_FS)
+ /* Queue dentry per client to hold MQD information */
+ queue->debugfs_queue = debugfs_create_dir(queue_name, filp->debugfs_client);
+ debugfs_create_file("mqd_info", 0444, queue->debugfs_queue, queue, &amdgpu_mqd_info_fops);
+#endif
+ kfree(queue_name);
+
+ args->out.queue_id = qid;
+
+unlock:
+ mutex_unlock(&uq_mgr->userq_mutex);
+ mutex_unlock(&adev->userq_mutex);
+
+ return r;
+}
+
+static int amdgpu_userq_input_args_validate(struct drm_device *dev,
+ union drm_amdgpu_userq *args,
+ struct drm_file *filp)
+{
+ struct amdgpu_device *adev = drm_to_adev(dev);
+
+ switch (args->in.op) {
+ case AMDGPU_USERQ_OP_CREATE:
+ if (args->in.flags & ~(AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK |
+ AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE))
+ return -EINVAL;
+ /* Usermode queues are only supported for GFX IP as of now */
+ if (args->in.ip_type != AMDGPU_HW_IP_GFX &&
+ args->in.ip_type != AMDGPU_HW_IP_DMA &&
+ args->in.ip_type != AMDGPU_HW_IP_COMPUTE) {
+ drm_file_err(filp, "Usermode queue doesn't support IP type %u\n",
+ args->in.ip_type);
+ return -EINVAL;
+ }
+
+ if ((args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE) &&
+ (args->in.ip_type != AMDGPU_HW_IP_GFX) &&
+ (args->in.ip_type != AMDGPU_HW_IP_COMPUTE) &&
+ !amdgpu_is_tmz(adev)) {
+ drm_file_err(filp, "Secure only supported on GFX/Compute queues\n");
+ return -EINVAL;
+ }
+
+ if (args->in.queue_va == AMDGPU_BO_INVALID_OFFSET ||
+ args->in.queue_va == 0 ||
+ args->in.queue_size == 0) {
+ drm_file_err(filp, "invalidate userq queue va or size\n");
+ return -EINVAL;
+ }
+ if (!args->in.wptr_va || !args->in.rptr_va) {
+ drm_file_err(filp, "invalidate userq queue rptr or wptr\n");
+ return -EINVAL;
+ }
+ break;
+ case AMDGPU_USERQ_OP_FREE:
+ if (args->in.ip_type ||
+ args->in.doorbell_handle ||
+ args->in.doorbell_offset ||
+ args->in.flags ||
+ args->in.queue_va ||
+ args->in.queue_size ||
+ args->in.rptr_va ||
+ args->in.wptr_va ||
+ args->in.mqd ||
+ args->in.mqd_size)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int amdgpu_userq_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ union drm_amdgpu_userq *args = data;
+ int r;
+
+ if (amdgpu_userq_input_args_validate(dev, args, filp) < 0)
+ return -EINVAL;
+
+ switch (args->in.op) {
+ case AMDGPU_USERQ_OP_CREATE:
+ r = amdgpu_userq_create(filp, args);
+ if (r)
+ drm_file_err(filp, "Failed to create usermode queue\n");
+ break;
+
+ case AMDGPU_USERQ_OP_FREE:
+ r = amdgpu_userq_destroy(filp, args->in.queue_id);
+ if (r)
+ drm_file_err(filp, "Failed to destroy usermode queue\n");
+ break;
+
+ default:
+ drm_dbg_driver(dev, "Invalid user queue op specified: %d\n", args->in.op);
+ return -EINVAL;
+ }
+
+ return r;
+}
+
+static int
+amdgpu_userq_restore_all(struct amdgpu_userq_mgr *uq_mgr)
+{
+ struct amdgpu_usermode_queue *queue;
+ int queue_id;
+ int ret = 0, r;
+
+ /* Resume all the queues for this process */
+ idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
+ r = amdgpu_userq_restore_helper(uq_mgr, queue);
+ if (r)
+ ret = r;
+ }
+
+ if (ret)
+ drm_file_err(uq_mgr->file, "Failed to map all the queues\n");
+ return ret;
+}
+
+static int amdgpu_userq_validate_vm(void *param, struct amdgpu_bo *bo)
+{
+ struct ttm_operation_ctx ctx = { false, false };
+
+ amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
+ return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+}
+
+/* Handle all BOs on the invalidated list, validate them and update the PTs */
+static int
+amdgpu_userq_bo_validate(struct amdgpu_device *adev, struct drm_exec *exec,
+ struct amdgpu_vm *vm)
+{
+ struct ttm_operation_ctx ctx = { false, false };
+ struct amdgpu_bo_va *bo_va;
+ struct amdgpu_bo *bo;
+ int ret;
+
+ spin_lock(&vm->status_lock);
+ while (!list_empty(&vm->invalidated)) {
+ bo_va = list_first_entry(&vm->invalidated,
+ struct amdgpu_bo_va,
+ base.vm_status);
+ spin_unlock(&vm->status_lock);
+
+ bo = bo_va->base.bo;
+ ret = drm_exec_prepare_obj(exec, &bo->tbo.base, 2);
+ if (unlikely(ret))
+ return ret;
+
+ amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
+ ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (ret)
+ return ret;
+
+ /* This moves the bo_va to the done list */
+ ret = amdgpu_vm_bo_update(adev, bo_va, false);
+ if (ret)
+ return ret;
+
+ spin_lock(&vm->status_lock);
+ }
+ spin_unlock(&vm->status_lock);
+
+ return 0;
+}
+
+/* Make sure the whole VM is ready to be used */
+static int
+amdgpu_userq_vm_validate(struct amdgpu_userq_mgr *uq_mgr)
+{
+ struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
+ struct amdgpu_device *adev = uq_mgr->adev;
+ struct amdgpu_vm *vm = &fpriv->vm;
+ struct amdgpu_bo_va *bo_va;
+ struct drm_exec exec;
+ int ret;
+
+ drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
+ drm_exec_until_all_locked(&exec) {
+ ret = amdgpu_vm_lock_pd(vm, &exec, 1);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(ret))
+ goto unlock_all;
+
+ ret = amdgpu_vm_lock_done_list(vm, &exec, 1);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(ret))
+ goto unlock_all;
+
+ /* This validates PDs, PTs and per VM BOs */
+ ret = amdgpu_vm_validate(adev, vm, NULL,
+ amdgpu_userq_validate_vm,
+ NULL);
+ if (unlikely(ret))
+ goto unlock_all;
+
+ /* This locks and validates the remaining evicted BOs */
+ ret = amdgpu_userq_bo_validate(adev, &exec, vm);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(ret))
+ goto unlock_all;
+ }
+
+ ret = amdgpu_vm_handle_moved(adev, vm, NULL);
+ if (ret)
+ goto unlock_all;
+
+ ret = amdgpu_vm_update_pdes(adev, vm, false);
+ if (ret)
+ goto unlock_all;
+
+ /*
+ * We need to wait for all VM updates to finish before restarting the
+ * queues. Using the done list like that is now ok since everything is
+ * locked in place.
+ */
+ list_for_each_entry(bo_va, &vm->done, base.vm_status)
+ dma_fence_wait(bo_va->last_pt_update, false);
+ dma_fence_wait(vm->last_update, false);
+
+ ret = amdgpu_eviction_fence_replace_fence(&fpriv->evf_mgr, &exec);
+ if (ret)
+ drm_file_err(uq_mgr->file, "Failed to replace eviction fence\n");
+
+unlock_all:
+ drm_exec_fini(&exec);
+ return ret;
+}
+
+static void amdgpu_userq_restore_worker(struct work_struct *work)
+{
+ struct amdgpu_userq_mgr *uq_mgr = work_to_uq_mgr(work, resume_work.work);
+ struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
+ int ret;
+
+ flush_delayed_work(&fpriv->evf_mgr.suspend_work);
+
+ mutex_lock(&uq_mgr->userq_mutex);
+
+ ret = amdgpu_userq_vm_validate(uq_mgr);
+ if (ret) {
+ drm_file_err(uq_mgr->file, "Failed to validate BOs to restore\n");
+ goto unlock;
+ }
+
+ ret = amdgpu_userq_restore_all(uq_mgr);
+ if (ret) {
+ drm_file_err(uq_mgr->file, "Failed to restore all queues\n");
+ goto unlock;
+ }
+
+unlock:
+ mutex_unlock(&uq_mgr->userq_mutex);
+}
+
+static int
+amdgpu_userq_evict_all(struct amdgpu_userq_mgr *uq_mgr)
+{
+ struct amdgpu_usermode_queue *queue;
+ int queue_id;
+ int ret = 0, r;
+
+ /* Try to unmap all the queues in this process ctx */
+ idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
+ r = amdgpu_userq_preempt_helper(uq_mgr, queue);
+ if (r)
+ ret = r;
+ }
+
+ if (ret)
+ drm_file_err(uq_mgr->file, "Couldn't unmap all the queues\n");
+ return ret;
+}
+
+static int
+amdgpu_userq_wait_for_signal(struct amdgpu_userq_mgr *uq_mgr)
+{
+ struct amdgpu_usermode_queue *queue;
+ int queue_id, ret;
+
+ idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
+ struct dma_fence *f = queue->last_fence;
+
+ if (!f || dma_fence_is_signaled(f))
+ continue;
+ ret = dma_fence_wait_timeout(f, true, msecs_to_jiffies(100));
+ if (ret <= 0) {
+ drm_file_err(uq_mgr->file, "Timed out waiting for fence=%llu:%llu\n",
+ f->context, f->seqno);
+ return -ETIMEDOUT;
+ }
+ }
+
+ return 0;
+}
+
+void
+amdgpu_userq_evict(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_eviction_fence *ev_fence)
+{
+ int ret;
+ struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
+ struct amdgpu_eviction_fence_mgr *evf_mgr = &fpriv->evf_mgr;
+
+ /* Wait for any pending userqueue fence work to finish */
+ ret = amdgpu_userq_wait_for_signal(uq_mgr);
+ if (ret) {
+ drm_file_err(uq_mgr->file, "Not evicting userqueue, timeout waiting for work\n");
+ return;
+ }
+
+ ret = amdgpu_userq_evict_all(uq_mgr);
+ if (ret) {
+ drm_file_err(uq_mgr->file, "Failed to evict userqueue\n");
+ return;
+ }
+
+ /* Signal current eviction fence */
+ amdgpu_eviction_fence_signal(evf_mgr, ev_fence);
+
+ if (evf_mgr->fd_closing) {
+ cancel_delayed_work_sync(&uq_mgr->resume_work);
+ return;
+ }
+
+ /* Schedule a resume work */
+ schedule_delayed_work(&uq_mgr->resume_work, 0);
+}
+
+int amdgpu_userq_mgr_init(struct amdgpu_userq_mgr *userq_mgr, struct drm_file *file_priv,
+ struct amdgpu_device *adev)
+{
+ mutex_init(&userq_mgr->userq_mutex);
+ idr_init_base(&userq_mgr->userq_idr, 1);
+ userq_mgr->adev = adev;
+ userq_mgr->file = file_priv;
+
+ mutex_lock(&adev->userq_mutex);
+ list_add(&userq_mgr->list, &adev->userq_mgr_list);
+ mutex_unlock(&adev->userq_mutex);
+
+ INIT_DELAYED_WORK(&userq_mgr->resume_work, amdgpu_userq_restore_worker);
+ return 0;
+}
+
+void amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr *userq_mgr)
+{
+ struct amdgpu_device *adev = userq_mgr->adev;
+ struct amdgpu_usermode_queue *queue;
+ struct amdgpu_userq_mgr *uqm, *tmp;
+ uint32_t queue_id;
+
+ cancel_delayed_work_sync(&userq_mgr->resume_work);
+
+ mutex_lock(&adev->userq_mutex);
+ mutex_lock(&userq_mgr->userq_mutex);
+ idr_for_each_entry(&userq_mgr->userq_idr, queue, queue_id) {
+ amdgpu_userq_wait_for_last_fence(userq_mgr, queue);
+ amdgpu_userq_unmap_helper(userq_mgr, queue);
+ amdgpu_userq_cleanup(userq_mgr, queue, queue_id);
+ }
+
+ list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
+ if (uqm == userq_mgr) {
+ list_del(&uqm->list);
+ break;
+ }
+ }
+ idr_destroy(&userq_mgr->userq_idr);
+ mutex_unlock(&userq_mgr->userq_mutex);
+ mutex_unlock(&adev->userq_mutex);
+ mutex_destroy(&userq_mgr->userq_mutex);
+}
+
+int amdgpu_userq_suspend(struct amdgpu_device *adev)
+{
+ u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
+ struct amdgpu_usermode_queue *queue;
+ struct amdgpu_userq_mgr *uqm, *tmp;
+ int queue_id;
+ int ret = 0, r;
+
+ if (!ip_mask)
+ return 0;
+
+ mutex_lock(&adev->userq_mutex);
+ list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
+ cancel_delayed_work_sync(&uqm->resume_work);
+ mutex_lock(&uqm->userq_mutex);
+ idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
+ if (adev->in_s0ix)
+ r = amdgpu_userq_preempt_helper(uqm, queue);
+ else
+ r = amdgpu_userq_unmap_helper(uqm, queue);
+ if (r)
+ ret = r;
+ }
+ mutex_unlock(&uqm->userq_mutex);
+ }
+ mutex_unlock(&adev->userq_mutex);
+ return ret;
+}
+
+int amdgpu_userq_resume(struct amdgpu_device *adev)
+{
+ u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
+ struct amdgpu_usermode_queue *queue;
+ struct amdgpu_userq_mgr *uqm, *tmp;
+ int queue_id;
+ int ret = 0, r;
+
+ if (!ip_mask)
+ return 0;
+
+ mutex_lock(&adev->userq_mutex);
+ list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
+ mutex_lock(&uqm->userq_mutex);
+ idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
+ if (adev->in_s0ix)
+ r = amdgpu_userq_restore_helper(uqm, queue);
+ else
+ r = amdgpu_userq_map_helper(uqm, queue);
+ if (r)
+ ret = r;
+ }
+ mutex_unlock(&uqm->userq_mutex);
+ }
+ mutex_unlock(&adev->userq_mutex);
+ return ret;
+}
+
+int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev,
+ u32 idx)
+{
+ u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
+ struct amdgpu_usermode_queue *queue;
+ struct amdgpu_userq_mgr *uqm, *tmp;
+ int queue_id;
+ int ret = 0, r;
+
+ /* only need to stop gfx/compute */
+ if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE))))
+ return 0;
+
+ mutex_lock(&adev->userq_mutex);
+ if (adev->userq_halt_for_enforce_isolation)
+ dev_warn(adev->dev, "userq scheduling already stopped!\n");
+ adev->userq_halt_for_enforce_isolation = true;
+ list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
+ cancel_delayed_work_sync(&uqm->resume_work);
+ mutex_lock(&uqm->userq_mutex);
+ idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
+ if (((queue->queue_type == AMDGPU_HW_IP_GFX) ||
+ (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) &&
+ (queue->xcp_id == idx)) {
+ r = amdgpu_userq_preempt_helper(uqm, queue);
+ if (r)
+ ret = r;
+ }
+ }
+ mutex_unlock(&uqm->userq_mutex);
+ }
+ mutex_unlock(&adev->userq_mutex);
+ return ret;
+}
+
+int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev,
+ u32 idx)
+{
+ u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
+ struct amdgpu_usermode_queue *queue;
+ struct amdgpu_userq_mgr *uqm, *tmp;
+ int queue_id;
+ int ret = 0, r;
+
+ /* only need to stop gfx/compute */
+ if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE))))
+ return 0;
+
+ mutex_lock(&adev->userq_mutex);
+ if (!adev->userq_halt_for_enforce_isolation)
+ dev_warn(adev->dev, "userq scheduling already started!\n");
+ adev->userq_halt_for_enforce_isolation = false;
+ list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
+ mutex_lock(&uqm->userq_mutex);
+ idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
+ if (((queue->queue_type == AMDGPU_HW_IP_GFX) ||
+ (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) &&
+ (queue->xcp_id == idx)) {
+ r = amdgpu_userq_restore_helper(uqm, queue);
+ if (r)
+ ret = r;
+ }
+ }
+ mutex_unlock(&uqm->userq_mutex);
+ }
+ mutex_unlock(&adev->userq_mutex);
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
new file mode 100644
index 000000000000..c027dd916672
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef AMDGPU_USERQ_H_
+#define AMDGPU_USERQ_H_
+#include "amdgpu_eviction_fence.h"
+
+#define AMDGPU_MAX_USERQ_COUNT 512
+
+#define to_ev_fence(f) container_of(f, struct amdgpu_eviction_fence, base)
+#define uq_mgr_to_fpriv(u) container_of(u, struct amdgpu_fpriv, userq_mgr)
+#define work_to_uq_mgr(w, name) container_of(w, struct amdgpu_userq_mgr, name)
+
+enum amdgpu_userq_state {
+ AMDGPU_USERQ_STATE_UNMAPPED = 0,
+ AMDGPU_USERQ_STATE_MAPPED,
+ AMDGPU_USERQ_STATE_PREEMPTED,
+ AMDGPU_USERQ_STATE_HUNG,
+};
+
+struct amdgpu_mqd_prop;
+
+struct amdgpu_userq_obj {
+ void *cpu_ptr;
+ uint64_t gpu_addr;
+ struct amdgpu_bo *obj;
+};
+
+struct amdgpu_usermode_queue {
+ int queue_type;
+ enum amdgpu_userq_state state;
+ uint64_t doorbell_handle;
+ uint64_t doorbell_index;
+ uint64_t flags;
+ struct amdgpu_mqd_prop *userq_prop;
+ struct amdgpu_userq_mgr *userq_mgr;
+ struct amdgpu_vm *vm;
+ struct amdgpu_userq_obj mqd;
+ struct amdgpu_userq_obj db_obj;
+ struct amdgpu_userq_obj fw_obj;
+ struct amdgpu_userq_obj wptr_obj;
+ struct xarray fence_drv_xa;
+ struct amdgpu_userq_fence_driver *fence_drv;
+ struct dma_fence *last_fence;
+ u32 xcp_id;
+ int priority;
+ struct dentry *debugfs_queue;
+};
+
+struct amdgpu_userq_funcs {
+ int (*mqd_create)(struct amdgpu_userq_mgr *uq_mgr,
+ struct drm_amdgpu_userq_in *args,
+ struct amdgpu_usermode_queue *queue);
+ void (*mqd_destroy)(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *uq);
+ int (*unmap)(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue);
+ int (*map)(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue);
+ int (*preempt)(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue);
+ int (*restore)(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue);
+ int (*detect_and_reset)(struct amdgpu_device *adev,
+ int queue_type);
+};
+
+/* Usermode queues for gfx */
+struct amdgpu_userq_mgr {
+ struct idr userq_idr;
+ struct mutex userq_mutex;
+ struct amdgpu_device *adev;
+ struct delayed_work resume_work;
+ struct list_head list;
+ struct drm_file *file;
+};
+
+struct amdgpu_db_info {
+ uint64_t doorbell_handle;
+ uint32_t queue_type;
+ uint32_t doorbell_offset;
+ struct amdgpu_userq_obj *db_obj;
+};
+
+int amdgpu_userq_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+
+int amdgpu_userq_mgr_init(struct amdgpu_userq_mgr *userq_mgr, struct drm_file *file_priv,
+ struct amdgpu_device *adev);
+
+void amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr *userq_mgr);
+
+int amdgpu_userq_create_object(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_userq_obj *userq_obj,
+ int size);
+
+void amdgpu_userq_destroy_object(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_userq_obj *userq_obj);
+
+void amdgpu_userq_evict(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_eviction_fence *ev_fence);
+
+void amdgpu_userq_ensure_ev_fence(struct amdgpu_userq_mgr *userq_mgr,
+ struct amdgpu_eviction_fence_mgr *evf_mgr);
+
+uint64_t amdgpu_userq_get_doorbell_index(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_db_info *db_info,
+ struct drm_file *filp);
+
+u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev);
+
+int amdgpu_userq_suspend(struct amdgpu_device *adev);
+int amdgpu_userq_resume(struct amdgpu_device *adev);
+
+int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev,
+ u32 idx);
+int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev,
+ u32 idx);
+
+int amdgpu_userq_input_va_validate(struct amdgpu_vm *vm, u64 addr,
+ u64 expected_size);
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
new file mode 100644
index 000000000000..761bad98da3e
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
@@ -0,0 +1,1009 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/kref.h>
+#include <linux/slab.h>
+#include <linux/dma-fence-unwrap.h>
+
+#include <drm/drm_exec.h>
+#include <drm/drm_syncobj.h>
+
+#include "amdgpu.h"
+#include "amdgpu_userq_fence.h"
+
+static const struct dma_fence_ops amdgpu_userq_fence_ops;
+static struct kmem_cache *amdgpu_userq_fence_slab;
+
+int amdgpu_userq_fence_slab_init(void)
+{
+ amdgpu_userq_fence_slab = kmem_cache_create("amdgpu_userq_fence",
+ sizeof(struct amdgpu_userq_fence),
+ 0,
+ SLAB_HWCACHE_ALIGN,
+ NULL);
+ if (!amdgpu_userq_fence_slab)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void amdgpu_userq_fence_slab_fini(void)
+{
+ rcu_barrier();
+ kmem_cache_destroy(amdgpu_userq_fence_slab);
+}
+
+static inline struct amdgpu_userq_fence *to_amdgpu_userq_fence(struct dma_fence *f)
+{
+ if (!f || f->ops != &amdgpu_userq_fence_ops)
+ return NULL;
+
+ return container_of(f, struct amdgpu_userq_fence, base);
+}
+
+static u64 amdgpu_userq_fence_read(struct amdgpu_userq_fence_driver *fence_drv)
+{
+ return le64_to_cpu(*fence_drv->cpu_addr);
+}
+
+static void
+amdgpu_userq_fence_write(struct amdgpu_userq_fence_driver *fence_drv,
+ u64 seq)
+{
+ if (fence_drv->cpu_addr)
+ *fence_drv->cpu_addr = cpu_to_le64(seq);
+}
+
+int amdgpu_userq_fence_driver_alloc(struct amdgpu_device *adev,
+ struct amdgpu_usermode_queue *userq)
+{
+ struct amdgpu_userq_fence_driver *fence_drv;
+ unsigned long flags;
+ int r;
+
+ fence_drv = kzalloc(sizeof(*fence_drv), GFP_KERNEL);
+ if (!fence_drv)
+ return -ENOMEM;
+
+ /* Acquire seq64 memory */
+ r = amdgpu_seq64_alloc(adev, &fence_drv->va, &fence_drv->gpu_addr,
+ &fence_drv->cpu_addr);
+ if (r)
+ goto free_fence_drv;
+
+ memset(fence_drv->cpu_addr, 0, sizeof(u64));
+
+ kref_init(&fence_drv->refcount);
+ INIT_LIST_HEAD(&fence_drv->fences);
+ spin_lock_init(&fence_drv->fence_list_lock);
+
+ fence_drv->adev = adev;
+ fence_drv->context = dma_fence_context_alloc(1);
+ get_task_comm(fence_drv->timeline_name, current);
+
+ xa_lock_irqsave(&adev->userq_xa, flags);
+ r = xa_err(__xa_store(&adev->userq_xa, userq->doorbell_index,
+ fence_drv, GFP_KERNEL));
+ xa_unlock_irqrestore(&adev->userq_xa, flags);
+ if (r)
+ goto free_seq64;
+
+ userq->fence_drv = fence_drv;
+
+ return 0;
+
+free_seq64:
+ amdgpu_seq64_free(adev, fence_drv->va);
+free_fence_drv:
+ kfree(fence_drv);
+
+ return r;
+}
+
+static void amdgpu_userq_walk_and_drop_fence_drv(struct xarray *xa)
+{
+ struct amdgpu_userq_fence_driver *fence_drv;
+ unsigned long index;
+
+ if (xa_empty(xa))
+ return;
+
+ xa_lock(xa);
+ xa_for_each(xa, index, fence_drv) {
+ __xa_erase(xa, index);
+ amdgpu_userq_fence_driver_put(fence_drv);
+ }
+
+ xa_unlock(xa);
+}
+
+void
+amdgpu_userq_fence_driver_free(struct amdgpu_usermode_queue *userq)
+{
+ amdgpu_userq_walk_and_drop_fence_drv(&userq->fence_drv_xa);
+ xa_destroy(&userq->fence_drv_xa);
+ /* Drop the fence_drv reference held by user queue */
+ amdgpu_userq_fence_driver_put(userq->fence_drv);
+}
+
+void amdgpu_userq_fence_driver_process(struct amdgpu_userq_fence_driver *fence_drv)
+{
+ struct amdgpu_userq_fence *userq_fence, *tmp;
+ struct dma_fence *fence;
+ u64 rptr;
+ int i;
+
+ if (!fence_drv)
+ return;
+
+ rptr = amdgpu_userq_fence_read(fence_drv);
+
+ spin_lock(&fence_drv->fence_list_lock);
+ list_for_each_entry_safe(userq_fence, tmp, &fence_drv->fences, link) {
+ fence = &userq_fence->base;
+
+ if (rptr < fence->seqno)
+ break;
+
+ dma_fence_signal(fence);
+
+ for (i = 0; i < userq_fence->fence_drv_array_count; i++)
+ amdgpu_userq_fence_driver_put(userq_fence->fence_drv_array[i]);
+
+ list_del(&userq_fence->link);
+ dma_fence_put(fence);
+ }
+ spin_unlock(&fence_drv->fence_list_lock);
+}
+
+void amdgpu_userq_fence_driver_destroy(struct kref *ref)
+{
+ struct amdgpu_userq_fence_driver *fence_drv = container_of(ref,
+ struct amdgpu_userq_fence_driver,
+ refcount);
+ struct amdgpu_userq_fence_driver *xa_fence_drv;
+ struct amdgpu_device *adev = fence_drv->adev;
+ struct amdgpu_userq_fence *fence, *tmp;
+ struct xarray *xa = &adev->userq_xa;
+ unsigned long index, flags;
+ struct dma_fence *f;
+
+ spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
+ list_for_each_entry_safe(fence, tmp, &fence_drv->fences, link) {
+ f = &fence->base;
+
+ if (!dma_fence_is_signaled(f)) {
+ dma_fence_set_error(f, -ECANCELED);
+ dma_fence_signal(f);
+ }
+
+ list_del(&fence->link);
+ dma_fence_put(f);
+ }
+ spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
+
+ xa_lock_irqsave(xa, flags);
+ xa_for_each(xa, index, xa_fence_drv)
+ if (xa_fence_drv == fence_drv)
+ __xa_erase(xa, index);
+ xa_unlock_irqrestore(xa, flags);
+
+ /* Free seq64 memory */
+ amdgpu_seq64_free(adev, fence_drv->va);
+ kfree(fence_drv);
+}
+
+void amdgpu_userq_fence_driver_get(struct amdgpu_userq_fence_driver *fence_drv)
+{
+ kref_get(&fence_drv->refcount);
+}
+
+void amdgpu_userq_fence_driver_put(struct amdgpu_userq_fence_driver *fence_drv)
+{
+ kref_put(&fence_drv->refcount, amdgpu_userq_fence_driver_destroy);
+}
+
+static int amdgpu_userq_fence_alloc(struct amdgpu_userq_fence **userq_fence)
+{
+ *userq_fence = kmem_cache_alloc(amdgpu_userq_fence_slab, GFP_ATOMIC);
+ return *userq_fence ? 0 : -ENOMEM;
+}
+
+static int amdgpu_userq_fence_create(struct amdgpu_usermode_queue *userq,
+ struct amdgpu_userq_fence *userq_fence,
+ u64 seq, struct dma_fence **f)
+{
+ struct amdgpu_userq_fence_driver *fence_drv;
+ struct dma_fence *fence;
+ unsigned long flags;
+
+ fence_drv = userq->fence_drv;
+ if (!fence_drv)
+ return -EINVAL;
+
+ spin_lock_init(&userq_fence->lock);
+ INIT_LIST_HEAD(&userq_fence->link);
+ fence = &userq_fence->base;
+ userq_fence->fence_drv = fence_drv;
+
+ dma_fence_init64(fence, &amdgpu_userq_fence_ops, &userq_fence->lock,
+ fence_drv->context, seq);
+
+ amdgpu_userq_fence_driver_get(fence_drv);
+ dma_fence_get(fence);
+
+ if (!xa_empty(&userq->fence_drv_xa)) {
+ struct amdgpu_userq_fence_driver *stored_fence_drv;
+ unsigned long index, count = 0;
+ int i = 0;
+
+ xa_lock(&userq->fence_drv_xa);
+ xa_for_each(&userq->fence_drv_xa, index, stored_fence_drv)
+ count++;
+
+ userq_fence->fence_drv_array =
+ kvmalloc_array(count,
+ sizeof(struct amdgpu_userq_fence_driver *),
+ GFP_ATOMIC);
+
+ if (userq_fence->fence_drv_array) {
+ xa_for_each(&userq->fence_drv_xa, index, stored_fence_drv) {
+ userq_fence->fence_drv_array[i] = stored_fence_drv;
+ __xa_erase(&userq->fence_drv_xa, index);
+ i++;
+ }
+ }
+
+ userq_fence->fence_drv_array_count = i;
+ xa_unlock(&userq->fence_drv_xa);
+ } else {
+ userq_fence->fence_drv_array = NULL;
+ userq_fence->fence_drv_array_count = 0;
+ }
+
+ /* Check if hardware has already processed the job */
+ spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
+ if (!dma_fence_is_signaled(fence))
+ list_add_tail(&userq_fence->link, &fence_drv->fences);
+ else
+ dma_fence_put(fence);
+
+ spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
+
+ *f = fence;
+
+ return 0;
+}
+
+static const char *amdgpu_userq_fence_get_driver_name(struct dma_fence *f)
+{
+ return "amdgpu_userq_fence";
+}
+
+static const char *amdgpu_userq_fence_get_timeline_name(struct dma_fence *f)
+{
+ struct amdgpu_userq_fence *fence = to_amdgpu_userq_fence(f);
+
+ return fence->fence_drv->timeline_name;
+}
+
+static bool amdgpu_userq_fence_signaled(struct dma_fence *f)
+{
+ struct amdgpu_userq_fence *fence = to_amdgpu_userq_fence(f);
+ struct amdgpu_userq_fence_driver *fence_drv = fence->fence_drv;
+ u64 rptr, wptr;
+
+ rptr = amdgpu_userq_fence_read(fence_drv);
+ wptr = fence->base.seqno;
+
+ if (rptr >= wptr)
+ return true;
+
+ return false;
+}
+
+static void amdgpu_userq_fence_free(struct rcu_head *rcu)
+{
+ struct dma_fence *fence = container_of(rcu, struct dma_fence, rcu);
+ struct amdgpu_userq_fence *userq_fence = to_amdgpu_userq_fence(fence);
+ struct amdgpu_userq_fence_driver *fence_drv = userq_fence->fence_drv;
+
+ /* Release the fence driver reference */
+ amdgpu_userq_fence_driver_put(fence_drv);
+
+ kvfree(userq_fence->fence_drv_array);
+ kmem_cache_free(amdgpu_userq_fence_slab, userq_fence);
+}
+
+static void amdgpu_userq_fence_release(struct dma_fence *f)
+{
+ call_rcu(&f->rcu, amdgpu_userq_fence_free);
+}
+
+static const struct dma_fence_ops amdgpu_userq_fence_ops = {
+ .get_driver_name = amdgpu_userq_fence_get_driver_name,
+ .get_timeline_name = amdgpu_userq_fence_get_timeline_name,
+ .signaled = amdgpu_userq_fence_signaled,
+ .release = amdgpu_userq_fence_release,
+};
+
+/**
+ * amdgpu_userq_fence_read_wptr - Read the userq wptr value
+ *
+ * @queue: user mode queue structure pointer
+ * @wptr: write pointer value
+ *
+ * Read the wptr value from userq's MQD. The userq signal IOCTL
+ * creates a dma_fence for the shared buffers that expects the
+ * RPTR value written to seq64 memory >= WPTR.
+ *
+ * Returns wptr value on success, error on failure.
+ */
+static int amdgpu_userq_fence_read_wptr(struct amdgpu_usermode_queue *queue,
+ u64 *wptr)
+{
+ struct amdgpu_bo_va_mapping *mapping;
+ struct amdgpu_bo *bo;
+ u64 addr, *ptr;
+ int r;
+
+ r = amdgpu_bo_reserve(queue->vm->root.bo, false);
+ if (r)
+ return r;
+
+ addr = queue->userq_prop->wptr_gpu_addr;
+ addr &= AMDGPU_GMC_HOLE_MASK;
+
+ mapping = amdgpu_vm_bo_lookup_mapping(queue->vm, addr >> PAGE_SHIFT);
+ if (!mapping) {
+ amdgpu_bo_unreserve(queue->vm->root.bo);
+ DRM_ERROR("Failed to lookup amdgpu_bo_va_mapping\n");
+ return -EINVAL;
+ }
+
+ bo = amdgpu_bo_ref(mapping->bo_va->base.bo);
+ amdgpu_bo_unreserve(queue->vm->root.bo);
+ r = amdgpu_bo_reserve(bo, true);
+ if (r) {
+ DRM_ERROR("Failed to reserve userqueue wptr bo");
+ return r;
+ }
+
+ r = amdgpu_bo_kmap(bo, (void **)&ptr);
+ if (r) {
+ DRM_ERROR("Failed mapping the userqueue wptr bo");
+ goto map_error;
+ }
+
+ *wptr = le64_to_cpu(*ptr);
+
+ amdgpu_bo_kunmap(bo);
+ amdgpu_bo_unreserve(bo);
+ amdgpu_bo_unref(&bo);
+
+ return 0;
+
+map_error:
+ amdgpu_bo_unreserve(bo);
+ amdgpu_bo_unref(&bo);
+
+ return r;
+}
+
+static void amdgpu_userq_fence_cleanup(struct dma_fence *fence)
+{
+ dma_fence_put(fence);
+}
+
+static void
+amdgpu_userq_fence_driver_set_error(struct amdgpu_userq_fence *fence,
+ int error)
+{
+ struct amdgpu_userq_fence_driver *fence_drv = fence->fence_drv;
+ unsigned long flags;
+ struct dma_fence *f;
+
+ spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
+
+ f = rcu_dereference_protected(&fence->base,
+ lockdep_is_held(&fence_drv->fence_list_lock));
+ if (f && !dma_fence_is_signaled_locked(f))
+ dma_fence_set_error(f, error);
+ spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
+}
+
+void
+amdgpu_userq_fence_driver_force_completion(struct amdgpu_usermode_queue *userq)
+{
+ struct dma_fence *f = userq->last_fence;
+
+ if (f) {
+ struct amdgpu_userq_fence *fence = to_amdgpu_userq_fence(f);
+ struct amdgpu_userq_fence_driver *fence_drv = fence->fence_drv;
+ u64 wptr = fence->base.seqno;
+
+ amdgpu_userq_fence_driver_set_error(fence, -ECANCELED);
+ amdgpu_userq_fence_write(fence_drv, wptr);
+ amdgpu_userq_fence_driver_process(fence_drv);
+
+ }
+}
+
+int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
+ struct amdgpu_userq_mgr *userq_mgr = &fpriv->userq_mgr;
+ struct drm_amdgpu_userq_signal *args = data;
+ struct drm_gem_object **gobj_write = NULL;
+ struct drm_gem_object **gobj_read = NULL;
+ struct amdgpu_usermode_queue *queue;
+ struct amdgpu_userq_fence *userq_fence;
+ struct drm_syncobj **syncobj = NULL;
+ u32 *bo_handles_write, num_write_bo_handles;
+ u32 *syncobj_handles, num_syncobj_handles;
+ u32 *bo_handles_read, num_read_bo_handles;
+ int r, i, entry, rentry, wentry;
+ struct dma_fence *fence;
+ struct drm_exec exec;
+ u64 wptr;
+
+ num_syncobj_handles = args->num_syncobj_handles;
+ syncobj_handles = memdup_user(u64_to_user_ptr(args->syncobj_handles),
+ size_mul(sizeof(u32), num_syncobj_handles));
+ if (IS_ERR(syncobj_handles))
+ return PTR_ERR(syncobj_handles);
+
+ /* Array of pointers to the looked up syncobjs */
+ syncobj = kmalloc_array(num_syncobj_handles, sizeof(*syncobj), GFP_KERNEL);
+ if (!syncobj) {
+ r = -ENOMEM;
+ goto free_syncobj_handles;
+ }
+
+ for (entry = 0; entry < num_syncobj_handles; entry++) {
+ syncobj[entry] = drm_syncobj_find(filp, syncobj_handles[entry]);
+ if (!syncobj[entry]) {
+ r = -ENOENT;
+ goto free_syncobj;
+ }
+ }
+
+ num_read_bo_handles = args->num_bo_read_handles;
+ bo_handles_read = memdup_user(u64_to_user_ptr(args->bo_read_handles),
+ sizeof(u32) * num_read_bo_handles);
+ if (IS_ERR(bo_handles_read)) {
+ r = PTR_ERR(bo_handles_read);
+ goto free_syncobj;
+ }
+
+ /* Array of pointers to the GEM read objects */
+ gobj_read = kmalloc_array(num_read_bo_handles, sizeof(*gobj_read), GFP_KERNEL);
+ if (!gobj_read) {
+ r = -ENOMEM;
+ goto free_bo_handles_read;
+ }
+
+ for (rentry = 0; rentry < num_read_bo_handles; rentry++) {
+ gobj_read[rentry] = drm_gem_object_lookup(filp, bo_handles_read[rentry]);
+ if (!gobj_read[rentry]) {
+ r = -ENOENT;
+ goto put_gobj_read;
+ }
+ }
+
+ num_write_bo_handles = args->num_bo_write_handles;
+ bo_handles_write = memdup_user(u64_to_user_ptr(args->bo_write_handles),
+ sizeof(u32) * num_write_bo_handles);
+ if (IS_ERR(bo_handles_write)) {
+ r = PTR_ERR(bo_handles_write);
+ goto put_gobj_read;
+ }
+
+ /* Array of pointers to the GEM write objects */
+ gobj_write = kmalloc_array(num_write_bo_handles, sizeof(*gobj_write), GFP_KERNEL);
+ if (!gobj_write) {
+ r = -ENOMEM;
+ goto free_bo_handles_write;
+ }
+
+ for (wentry = 0; wentry < num_write_bo_handles; wentry++) {
+ gobj_write[wentry] = drm_gem_object_lookup(filp, bo_handles_write[wentry]);
+ if (!gobj_write[wentry]) {
+ r = -ENOENT;
+ goto put_gobj_write;
+ }
+ }
+
+ /* Retrieve the user queue */
+ queue = idr_find(&userq_mgr->userq_idr, args->queue_id);
+ if (!queue) {
+ r = -ENOENT;
+ goto put_gobj_write;
+ }
+
+ r = amdgpu_userq_fence_read_wptr(queue, &wptr);
+ if (r)
+ goto put_gobj_write;
+
+ r = amdgpu_userq_fence_alloc(&userq_fence);
+ if (r)
+ goto put_gobj_write;
+
+ /* We are here means UQ is active, make sure the eviction fence is valid */
+ amdgpu_userq_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr);
+
+ /* Create a new fence */
+ r = amdgpu_userq_fence_create(queue, userq_fence, wptr, &fence);
+ if (r) {
+ mutex_unlock(&userq_mgr->userq_mutex);
+ kmem_cache_free(amdgpu_userq_fence_slab, userq_fence);
+ goto put_gobj_write;
+ }
+
+ dma_fence_put(queue->last_fence);
+ queue->last_fence = dma_fence_get(fence);
+ mutex_unlock(&userq_mgr->userq_mutex);
+
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT,
+ (num_read_bo_handles + num_write_bo_handles));
+
+ /* Lock all BOs with retry handling */
+ drm_exec_until_all_locked(&exec) {
+ r = drm_exec_prepare_array(&exec, gobj_read, num_read_bo_handles, 1);
+ drm_exec_retry_on_contention(&exec);
+ if (r) {
+ amdgpu_userq_fence_cleanup(fence);
+ goto exec_fini;
+ }
+
+ r = drm_exec_prepare_array(&exec, gobj_write, num_write_bo_handles, 1);
+ drm_exec_retry_on_contention(&exec);
+ if (r) {
+ amdgpu_userq_fence_cleanup(fence);
+ goto exec_fini;
+ }
+ }
+
+ for (i = 0; i < num_read_bo_handles; i++) {
+ if (!gobj_read || !gobj_read[i]->resv)
+ continue;
+
+ dma_resv_add_fence(gobj_read[i]->resv, fence,
+ DMA_RESV_USAGE_READ);
+ }
+
+ for (i = 0; i < num_write_bo_handles; i++) {
+ if (!gobj_write || !gobj_write[i]->resv)
+ continue;
+
+ dma_resv_add_fence(gobj_write[i]->resv, fence,
+ DMA_RESV_USAGE_WRITE);
+ }
+
+ /* Add the created fence to syncobj/BO's */
+ for (i = 0; i < num_syncobj_handles; i++)
+ drm_syncobj_replace_fence(syncobj[i], fence);
+
+ /* drop the reference acquired in fence creation function */
+ dma_fence_put(fence);
+
+exec_fini:
+ drm_exec_fini(&exec);
+put_gobj_write:
+ while (wentry-- > 0)
+ drm_gem_object_put(gobj_write[wentry]);
+ kfree(gobj_write);
+free_bo_handles_write:
+ kfree(bo_handles_write);
+put_gobj_read:
+ while (rentry-- > 0)
+ drm_gem_object_put(gobj_read[rentry]);
+ kfree(gobj_read);
+free_bo_handles_read:
+ kfree(bo_handles_read);
+free_syncobj:
+ while (entry-- > 0)
+ if (syncobj[entry])
+ drm_syncobj_put(syncobj[entry]);
+ kfree(syncobj);
+free_syncobj_handles:
+ kfree(syncobj_handles);
+
+ return r;
+}
+
+int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ u32 *syncobj_handles, *timeline_points, *timeline_handles, *bo_handles_read, *bo_handles_write;
+ u32 num_syncobj, num_read_bo_handles, num_write_bo_handles;
+ struct drm_amdgpu_userq_fence_info *fence_info = NULL;
+ struct drm_amdgpu_userq_wait *wait_info = data;
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
+ struct amdgpu_userq_mgr *userq_mgr = &fpriv->userq_mgr;
+ struct amdgpu_usermode_queue *waitq;
+ struct drm_gem_object **gobj_write;
+ struct drm_gem_object **gobj_read;
+ struct dma_fence **fences = NULL;
+ u16 num_points, num_fences = 0;
+ int r, i, rentry, wentry, cnt;
+ struct drm_exec exec;
+
+ num_read_bo_handles = wait_info->num_bo_read_handles;
+ bo_handles_read = memdup_user(u64_to_user_ptr(wait_info->bo_read_handles),
+ size_mul(sizeof(u32), num_read_bo_handles));
+ if (IS_ERR(bo_handles_read))
+ return PTR_ERR(bo_handles_read);
+
+ num_write_bo_handles = wait_info->num_bo_write_handles;
+ bo_handles_write = memdup_user(u64_to_user_ptr(wait_info->bo_write_handles),
+ size_mul(sizeof(u32), num_write_bo_handles));
+ if (IS_ERR(bo_handles_write)) {
+ r = PTR_ERR(bo_handles_write);
+ goto free_bo_handles_read;
+ }
+
+ num_syncobj = wait_info->num_syncobj_handles;
+ syncobj_handles = memdup_user(u64_to_user_ptr(wait_info->syncobj_handles),
+ size_mul(sizeof(u32), num_syncobj));
+ if (IS_ERR(syncobj_handles)) {
+ r = PTR_ERR(syncobj_handles);
+ goto free_bo_handles_write;
+ }
+
+ num_points = wait_info->num_syncobj_timeline_handles;
+ timeline_handles = memdup_user(u64_to_user_ptr(wait_info->syncobj_timeline_handles),
+ sizeof(u32) * num_points);
+ if (IS_ERR(timeline_handles)) {
+ r = PTR_ERR(timeline_handles);
+ goto free_syncobj_handles;
+ }
+
+ timeline_points = memdup_user(u64_to_user_ptr(wait_info->syncobj_timeline_points),
+ sizeof(u32) * num_points);
+ if (IS_ERR(timeline_points)) {
+ r = PTR_ERR(timeline_points);
+ goto free_timeline_handles;
+ }
+
+ gobj_read = kmalloc_array(num_read_bo_handles, sizeof(*gobj_read), GFP_KERNEL);
+ if (!gobj_read) {
+ r = -ENOMEM;
+ goto free_timeline_points;
+ }
+
+ for (rentry = 0; rentry < num_read_bo_handles; rentry++) {
+ gobj_read[rentry] = drm_gem_object_lookup(filp, bo_handles_read[rentry]);
+ if (!gobj_read[rentry]) {
+ r = -ENOENT;
+ goto put_gobj_read;
+ }
+ }
+
+ gobj_write = kmalloc_array(num_write_bo_handles, sizeof(*gobj_write), GFP_KERNEL);
+ if (!gobj_write) {
+ r = -ENOMEM;
+ goto put_gobj_read;
+ }
+
+ for (wentry = 0; wentry < num_write_bo_handles; wentry++) {
+ gobj_write[wentry] = drm_gem_object_lookup(filp, bo_handles_write[wentry]);
+ if (!gobj_write[wentry]) {
+ r = -ENOENT;
+ goto put_gobj_write;
+ }
+ }
+
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT,
+ (num_read_bo_handles + num_write_bo_handles));
+
+ /* Lock all BOs with retry handling */
+ drm_exec_until_all_locked(&exec) {
+ r = drm_exec_prepare_array(&exec, gobj_read, num_read_bo_handles, 1);
+ drm_exec_retry_on_contention(&exec);
+ if (r) {
+ drm_exec_fini(&exec);
+ goto put_gobj_write;
+ }
+
+ r = drm_exec_prepare_array(&exec, gobj_write, num_write_bo_handles, 1);
+ drm_exec_retry_on_contention(&exec);
+ if (r) {
+ drm_exec_fini(&exec);
+ goto put_gobj_write;
+ }
+ }
+
+ if (!wait_info->num_fences) {
+ if (num_points) {
+ struct dma_fence_unwrap iter;
+ struct dma_fence *fence;
+ struct dma_fence *f;
+
+ for (i = 0; i < num_points; i++) {
+ r = drm_syncobj_find_fence(filp, timeline_handles[i],
+ timeline_points[i],
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
+ &fence);
+ if (r)
+ goto exec_fini;
+
+ dma_fence_unwrap_for_each(f, &iter, fence)
+ num_fences++;
+
+ dma_fence_put(fence);
+ }
+ }
+
+ /* Count syncobj's fence */
+ for (i = 0; i < num_syncobj; i++) {
+ struct dma_fence *fence;
+
+ r = drm_syncobj_find_fence(filp, syncobj_handles[i],
+ 0,
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
+ &fence);
+ if (r)
+ goto exec_fini;
+
+ num_fences++;
+ dma_fence_put(fence);
+ }
+
+ /* Count GEM objects fence */
+ for (i = 0; i < num_read_bo_handles; i++) {
+ struct dma_resv_iter resv_cursor;
+ struct dma_fence *fence;
+
+ dma_resv_for_each_fence(&resv_cursor, gobj_read[i]->resv,
+ DMA_RESV_USAGE_READ, fence)
+ num_fences++;
+ }
+
+ for (i = 0; i < num_write_bo_handles; i++) {
+ struct dma_resv_iter resv_cursor;
+ struct dma_fence *fence;
+
+ dma_resv_for_each_fence(&resv_cursor, gobj_write[i]->resv,
+ DMA_RESV_USAGE_WRITE, fence)
+ num_fences++;
+ }
+
+ /*
+ * Passing num_fences = 0 means that userspace doesn't want to
+ * retrieve userq_fence_info. If num_fences = 0 we skip filling
+ * userq_fence_info and return the actual number of fences on
+ * args->num_fences.
+ */
+ wait_info->num_fences = num_fences;
+ } else {
+ /* Array of fence info */
+ fence_info = kmalloc_array(wait_info->num_fences, sizeof(*fence_info), GFP_KERNEL);
+ if (!fence_info) {
+ r = -ENOMEM;
+ goto exec_fini;
+ }
+
+ /* Array of fences */
+ fences = kmalloc_array(wait_info->num_fences, sizeof(*fences), GFP_KERNEL);
+ if (!fences) {
+ r = -ENOMEM;
+ goto free_fence_info;
+ }
+
+ /* Retrieve GEM read objects fence */
+ for (i = 0; i < num_read_bo_handles; i++) {
+ struct dma_resv_iter resv_cursor;
+ struct dma_fence *fence;
+
+ dma_resv_for_each_fence(&resv_cursor, gobj_read[i]->resv,
+ DMA_RESV_USAGE_READ, fence) {
+ if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) {
+ r = -EINVAL;
+ goto free_fences;
+ }
+
+ fences[num_fences++] = fence;
+ dma_fence_get(fence);
+ }
+ }
+
+ /* Retrieve GEM write objects fence */
+ for (i = 0; i < num_write_bo_handles; i++) {
+ struct dma_resv_iter resv_cursor;
+ struct dma_fence *fence;
+
+ dma_resv_for_each_fence(&resv_cursor, gobj_write[i]->resv,
+ DMA_RESV_USAGE_WRITE, fence) {
+ if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) {
+ r = -EINVAL;
+ goto free_fences;
+ }
+
+ fences[num_fences++] = fence;
+ dma_fence_get(fence);
+ }
+ }
+
+ if (num_points) {
+ struct dma_fence_unwrap iter;
+ struct dma_fence *fence;
+ struct dma_fence *f;
+
+ for (i = 0; i < num_points; i++) {
+ r = drm_syncobj_find_fence(filp, timeline_handles[i],
+ timeline_points[i],
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
+ &fence);
+ if (r)
+ goto free_fences;
+
+ dma_fence_unwrap_for_each(f, &iter, fence) {
+ if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) {
+ r = -EINVAL;
+ goto free_fences;
+ }
+
+ dma_fence_get(f);
+ fences[num_fences++] = f;
+ }
+
+ dma_fence_put(fence);
+ }
+ }
+
+ /* Retrieve syncobj's fence */
+ for (i = 0; i < num_syncobj; i++) {
+ struct dma_fence *fence;
+
+ r = drm_syncobj_find_fence(filp, syncobj_handles[i],
+ 0,
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
+ &fence);
+ if (r)
+ goto free_fences;
+
+ if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) {
+ r = -EINVAL;
+ goto free_fences;
+ }
+
+ fences[num_fences++] = fence;
+ }
+
+ /*
+ * Keep only the latest fences to reduce the number of values
+ * given back to userspace.
+ */
+ num_fences = dma_fence_dedup_array(fences, num_fences);
+
+ waitq = idr_find(&userq_mgr->userq_idr, wait_info->waitq_id);
+ if (!waitq) {
+ r = -EINVAL;
+ goto free_fences;
+ }
+
+ for (i = 0, cnt = 0; i < num_fences; i++) {
+ struct amdgpu_userq_fence_driver *fence_drv;
+ struct amdgpu_userq_fence *userq_fence;
+ u32 index;
+
+ userq_fence = to_amdgpu_userq_fence(fences[i]);
+ if (!userq_fence) {
+ /*
+ * Just waiting on other driver fences should
+ * be good for now
+ */
+ r = dma_fence_wait(fences[i], true);
+ if (r) {
+ dma_fence_put(fences[i]);
+ goto free_fences;
+ }
+
+ dma_fence_put(fences[i]);
+ continue;
+ }
+
+ fence_drv = userq_fence->fence_drv;
+ /*
+ * We need to make sure the user queue release their reference
+ * to the fence drivers at some point before queue destruction.
+ * Otherwise, we would gather those references until we don't
+ * have any more space left and crash.
+ */
+ r = xa_alloc(&waitq->fence_drv_xa, &index, fence_drv,
+ xa_limit_32b, GFP_KERNEL);
+ if (r)
+ goto free_fences;
+
+ amdgpu_userq_fence_driver_get(fence_drv);
+
+ /* Store drm syncobj's gpu va address and value */
+ fence_info[cnt].va = fence_drv->va;
+ fence_info[cnt].value = fences[i]->seqno;
+
+ dma_fence_put(fences[i]);
+ /* Increment the actual userq fence count */
+ cnt++;
+ }
+
+ wait_info->num_fences = cnt;
+ /* Copy userq fence info to user space */
+ if (copy_to_user(u64_to_user_ptr(wait_info->out_fences),
+ fence_info, wait_info->num_fences * sizeof(*fence_info))) {
+ r = -EFAULT;
+ goto free_fences;
+ }
+
+ kfree(fences);
+ kfree(fence_info);
+ }
+
+ drm_exec_fini(&exec);
+ for (i = 0; i < num_read_bo_handles; i++)
+ drm_gem_object_put(gobj_read[i]);
+ kfree(gobj_read);
+
+ for (i = 0; i < num_write_bo_handles; i++)
+ drm_gem_object_put(gobj_write[i]);
+ kfree(gobj_write);
+
+ kfree(timeline_points);
+ kfree(timeline_handles);
+ kfree(syncobj_handles);
+ kfree(bo_handles_write);
+ kfree(bo_handles_read);
+
+ return 0;
+
+free_fences:
+ while (num_fences-- > 0)
+ dma_fence_put(fences[num_fences]);
+ kfree(fences);
+free_fence_info:
+ kfree(fence_info);
+exec_fini:
+ drm_exec_fini(&exec);
+put_gobj_write:
+ while (wentry-- > 0)
+ drm_gem_object_put(gobj_write[wentry]);
+ kfree(gobj_write);
+put_gobj_read:
+ while (rentry-- > 0)
+ drm_gem_object_put(gobj_read[rentry]);
+ kfree(gobj_read);
+free_timeline_points:
+ kfree(timeline_points);
+free_timeline_handles:
+ kfree(timeline_handles);
+free_syncobj_handles:
+ kfree(syncobj_handles);
+free_bo_handles_write:
+ kfree(bo_handles_write);
+free_bo_handles_read:
+ kfree(bo_handles_read);
+
+ return r;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h
new file mode 100644
index 000000000000..d76add2afc77
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_USERQ_FENCE_H__
+#define __AMDGPU_USERQ_FENCE_H__
+
+#include <linux/types.h>
+
+#include "amdgpu_userq.h"
+
+struct amdgpu_userq_fence {
+ struct dma_fence base;
+ /*
+ * This lock is necessary to synchronize the
+ * userqueue dma fence operations.
+ */
+ spinlock_t lock;
+ struct list_head link;
+ unsigned long fence_drv_array_count;
+ struct amdgpu_userq_fence_driver *fence_drv;
+ struct amdgpu_userq_fence_driver **fence_drv_array;
+};
+
+struct amdgpu_userq_fence_driver {
+ struct kref refcount;
+ u64 va;
+ u64 gpu_addr;
+ u64 *cpu_addr;
+ u64 context;
+ /*
+ * This lock is necesaary to synchronize the access
+ * to the fences list by the fence driver.
+ */
+ spinlock_t fence_list_lock;
+ struct list_head fences;
+ struct amdgpu_device *adev;
+ char timeline_name[TASK_COMM_LEN];
+};
+
+int amdgpu_userq_fence_slab_init(void);
+void amdgpu_userq_fence_slab_fini(void);
+
+void amdgpu_userq_fence_driver_get(struct amdgpu_userq_fence_driver *fence_drv);
+void amdgpu_userq_fence_driver_put(struct amdgpu_userq_fence_driver *fence_drv);
+int amdgpu_userq_fence_driver_alloc(struct amdgpu_device *adev,
+ struct amdgpu_usermode_queue *userq);
+void amdgpu_userq_fence_driver_free(struct amdgpu_usermode_queue *userq);
+void amdgpu_userq_fence_driver_process(struct amdgpu_userq_fence_driver *fence_drv);
+void amdgpu_userq_fence_driver_force_completion(struct amdgpu_usermode_queue *userq);
+void amdgpu_userq_fence_driver_destroy(struct kref *ref);
+int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_utils.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_utils.h
new file mode 100644
index 000000000000..1e40ca3b1584
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_utils.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef AMDGPU_UTILS_H_
+#define AMDGPU_UTILS_H_
+
+/* ---------- Generic 2‑bit capability attribute encoding ----------
+ * 00 INVALID, 01 RO, 10 WO, 11 RW
+ */
+enum amdgpu_cap_attr {
+ AMDGPU_CAP_ATTR_INVALID = 0,
+ AMDGPU_CAP_ATTR_RO = 1 << 0,
+ AMDGPU_CAP_ATTR_WO = 1 << 1,
+ AMDGPU_CAP_ATTR_RW = (AMDGPU_CAP_ATTR_RO | AMDGPU_CAP_ATTR_WO),
+};
+
+#define AMDGPU_CAP_ATTR_BITS 2
+#define AMDGPU_CAP_ATTR_MAX ((1U << AMDGPU_CAP_ATTR_BITS) - 1)
+
+/* Internal helper to build helpers for a given enum NAME */
+#define DECLARE_ATTR_CAP_CLASS_HELPERS(NAME) \
+enum { NAME##_BITMAP_BITS = NAME##_COUNT * AMDGPU_CAP_ATTR_BITS }; \
+struct NAME##_caps { \
+ DECLARE_BITMAP(bmap, NAME##_BITMAP_BITS); \
+}; \
+static inline unsigned int NAME##_ATTR_START(enum NAME##_cap_id cap) \
+{ return (unsigned int)cap * AMDGPU_CAP_ATTR_BITS; } \
+static inline void NAME##_attr_init(struct NAME##_caps *c) \
+{ if (c) bitmap_zero(c->bmap, NAME##_BITMAP_BITS); } \
+static inline int NAME##_attr_set(struct NAME##_caps *c, \
+ enum NAME##_cap_id cap, enum amdgpu_cap_attr attr) \
+{ \
+ if (!c) \
+ return -EINVAL; \
+ if (cap >= NAME##_COUNT) \
+ return -EINVAL; \
+ if ((unsigned int)attr > AMDGPU_CAP_ATTR_MAX) \
+ return -EINVAL; \
+ bitmap_write(c->bmap, (unsigned long)attr, \
+ NAME##_ATTR_START(cap), AMDGPU_CAP_ATTR_BITS); \
+ return 0; \
+} \
+static inline int NAME##_attr_get(const struct NAME##_caps *c, \
+ enum NAME##_cap_id cap, enum amdgpu_cap_attr *out) \
+{ \
+ unsigned long v; \
+ if (!c || !out) \
+ return -EINVAL; \
+ if (cap >= NAME##_COUNT) \
+ return -EINVAL; \
+ v = bitmap_read(c->bmap, NAME##_ATTR_START(cap), AMDGPU_CAP_ATTR_BITS); \
+ *out = (enum amdgpu_cap_attr)v; \
+ return 0; \
+} \
+static inline bool NAME##_cap_is_ro(const struct NAME##_caps *c, enum NAME##_cap_id id) \
+{ enum amdgpu_cap_attr a; return !NAME##_attr_get(c, id, &a) && a == AMDGPU_CAP_ATTR_RO; } \
+static inline bool NAME##_cap_is_wo(const struct NAME##_caps *c, enum NAME##_cap_id id) \
+{ enum amdgpu_cap_attr a; return !NAME##_attr_get(c, id, &a) && a == AMDGPU_CAP_ATTR_WO; } \
+static inline bool NAME##_cap_is_rw(const struct NAME##_caps *c, enum NAME##_cap_id id) \
+{ enum amdgpu_cap_attr a; return !NAME##_attr_get(c, id, &a) && a == AMDGPU_CAP_ATTR_RW; }
+
+/* Element expander for enum creation */
+#define _CAP_ENUM_ELEM(x) x,
+
+/* Public macro: declare enum + helpers from an X‑macro list */
+#define DECLARE_ATTR_CAP_CLASS(NAME, LIST_MACRO) \
+ enum NAME##_cap_id { LIST_MACRO(_CAP_ENUM_ELEM) NAME##_COUNT }; \
+ DECLARE_ATTR_CAP_CLASS_HELPERS(NAME)
+
+#endif /* AMDGPU_UTILS_H_ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 31fd30dcd593..5c38f0d30c87 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -260,7 +260,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
return -EINVAL;
}
- r = amdgpu_ucode_request(adev, &adev->uvd.fw, "%s", fw_name);
+ r = amdgpu_ucode_request(adev, &adev->uvd.fw, AMDGPU_UCODE_REQUIRED, "%s", fw_name);
if (r) {
dev_err(adev->dev, "amdgpu_uvd: Can't validate firmware \"%s\"\n",
fw_name);
@@ -551,6 +551,8 @@ static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *abo)
for (i = 0; i < abo->placement.num_placement; ++i) {
abo->placements[i].fpfn = 0 >> PAGE_SHIFT;
abo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT;
+ if (abo->placements[i].mem_type == TTM_PL_VRAM)
+ abo->placements[i].flags |= TTM_PL_FLAG_CONTIGUOUS;
}
}
@@ -1134,7 +1136,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
r = amdgpu_job_alloc_with_ib(ring->adev, &adev->uvd.entity,
AMDGPU_FENCE_OWNER_UNDEFINED,
64, direct ? AMDGPU_IB_POOL_DIRECT :
- AMDGPU_IB_POOL_DELAYED, &job);
+ AMDGPU_IB_POOL_DELAYED, &job,
+ AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 74fdbf71d95b..ce318f5de047 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -158,7 +158,7 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
return -EINVAL;
}
- r = amdgpu_ucode_request(adev, &adev->vce.fw, "%s", fw_name);
+ r = amdgpu_ucode_request(adev, &adev->vce.fw, AMDGPU_UCODE_REQUIRED, "%s", fw_name);
if (r) {
dev_err(adev->dev, "amdgpu_vce: Can't validate firmware \"%s\"\n",
fw_name);
@@ -214,15 +214,15 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
drm_sched_entity_destroy(&adev->vce.entity);
- amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr,
- (void **)&adev->vce.cpu_addr);
-
for (i = 0; i < adev->vce.num_rings; i++)
amdgpu_ring_fini(&adev->vce.ring[i]);
amdgpu_ucode_release(&adev->vce.fw);
mutex_destroy(&adev->vce.idle_mutex);
+ amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr,
+ (void **)&adev->vce.cpu_addr);
+
return 0;
}
@@ -449,7 +449,7 @@ static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
r = amdgpu_job_alloc_with_ib(ring->adev, &ring->adev->vce.entity,
AMDGPU_FENCE_OWNER_UNDEFINED,
ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
- &job);
+ &job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
return r;
@@ -503,7 +503,7 @@ static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
ib->ptr[i] = 0x0;
r = amdgpu_job_submit_direct(job, ring, &f);
- amdgpu_ib_free(ring->adev, &ib_msg, f);
+ amdgpu_ib_free(&ib_msg, f);
if (r)
goto err;
@@ -540,7 +540,8 @@ static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
AMDGPU_FENCE_OWNER_UNDEFINED,
ib_size_dw * 4,
direct ? AMDGPU_IB_POOL_DIRECT :
- AMDGPU_IB_POOL_DELAYED, &job);
+ AMDGPU_IB_POOL_DELAYED, &job,
+ AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 43f44cc201cb..5e0786ea911b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2016 Advanced Micro Devices, Inc.
+ * Copyright 2016-2024 Advanced Micro Devices, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -62,6 +62,7 @@
#define FIRMWARE_VCN4_0_6 "amdgpu/vcn_4_0_6.bin"
#define FIRMWARE_VCN4_0_6_1 "amdgpu/vcn_4_0_6_1.bin"
#define FIRMWARE_VCN5_0_0 "amdgpu/vcn_5_0_0.bin"
+#define FIRMWARE_VCN5_0_1 "amdgpu/vcn_5_0_1.bin"
MODULE_FIRMWARE(FIRMWARE_RAVEN);
MODULE_FIRMWARE(FIRMWARE_PICASSO);
@@ -88,46 +89,59 @@ MODULE_FIRMWARE(FIRMWARE_VCN4_0_5);
MODULE_FIRMWARE(FIRMWARE_VCN4_0_6);
MODULE_FIRMWARE(FIRMWARE_VCN4_0_6_1);
MODULE_FIRMWARE(FIRMWARE_VCN5_0_0);
+MODULE_FIRMWARE(FIRMWARE_VCN5_0_1);
static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
+static void amdgpu_vcn_reg_dump_fini(struct amdgpu_device *adev);
-int amdgpu_vcn_early_init(struct amdgpu_device *adev)
+int amdgpu_vcn_early_init(struct amdgpu_device *adev, int i)
{
char ucode_prefix[25];
- int r, i;
+ int r;
+ adev->vcn.inst[i].adev = adev;
+ adev->vcn.inst[i].inst = i;
amdgpu_ucode_ip_version_decode(adev, UVD_HWIP, ucode_prefix, sizeof(ucode_prefix));
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (i == 1 && amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(4, 0, 6))
- r = amdgpu_ucode_request(adev, &adev->vcn.fw[i], "amdgpu/%s_%d.bin", ucode_prefix, i);
- else
- r = amdgpu_ucode_request(adev, &adev->vcn.fw[i], "amdgpu/%s.bin", ucode_prefix);
- if (r) {
- amdgpu_ucode_release(&adev->vcn.fw[i]);
- return r;
+
+ if (i != 0 && adev->vcn.per_inst_fw) {
+ r = amdgpu_ucode_request(adev, &adev->vcn.inst[i].fw,
+ AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_%d.bin", ucode_prefix, i);
+ if (r)
+ amdgpu_ucode_release(&adev->vcn.inst[i].fw);
+ } else {
+ if (!adev->vcn.inst[0].fw) {
+ r = amdgpu_ucode_request(adev, &adev->vcn.inst[0].fw,
+ AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s.bin", ucode_prefix);
+ if (r)
+ amdgpu_ucode_release(&adev->vcn.inst[0].fw);
+ } else {
+ r = 0;
}
+ adev->vcn.inst[i].fw = adev->vcn.inst[0].fw;
}
+
return r;
}
-int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
+int amdgpu_vcn_sw_init(struct amdgpu_device *adev, int i)
{
unsigned long bo_size;
const struct common_firmware_header *hdr;
unsigned char fw_check;
unsigned int fw_shared_size, log_offset;
- int i, r;
-
- INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
- mutex_init(&adev->vcn.vcn_pg_lock);
- mutex_init(&adev->vcn.vcn1_jpeg1_workaround);
- atomic_set(&adev->vcn.total_submission_cnt, 0);
- for (i = 0; i < adev->vcn.num_vcn_inst; i++)
- atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0);
+ int r;
+ mutex_init(&adev->vcn.inst[i].vcn1_jpeg1_workaround);
+ mutex_init(&adev->vcn.inst[i].vcn_pg_lock);
+ mutex_init(&adev->vcn.inst[i].engine_reset_mutex);
+ atomic_set(&adev->vcn.inst[i].total_submission_cnt, 0);
+ INIT_DELAYED_WORK(&adev->vcn.inst[i].idle_work, amdgpu_vcn_idle_work_handler);
+ atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0);
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
- adev->vcn.indirect_sram = true;
+ adev->vcn.inst[i].indirect_sram = true;
/*
* Some Steam Deck's BIOS versions are incompatible with the
@@ -140,18 +154,19 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
const char *bios_ver = dmi_get_system_info(DMI_BIOS_VERSION);
if (bios_ver && (!strncmp("F7A0113", bios_ver, 7) ||
- !strncmp("F7A0114", bios_ver, 7))) {
- adev->vcn.indirect_sram = false;
+ !strncmp("F7A0114", bios_ver, 7))) {
+ adev->vcn.inst[i].indirect_sram = false;
dev_info(adev->dev,
- "Steam Deck quirk: indirect SRAM disabled on BIOS %s\n", bios_ver);
+ "Steam Deck quirk: indirect SRAM disabled on BIOS %s\n", bios_ver);
}
}
/* from vcn4 and above, only unified queue is used */
- adev->vcn.using_unified_queue =
+ adev->vcn.inst[i].using_unified_queue =
amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(4, 0, 0);
- hdr = (const struct common_firmware_header *)adev->vcn.fw[0]->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.inst[i].fw->data;
+ adev->vcn.inst[i].fw_version = le32_to_cpu(hdr->ucode_version);
adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
/* Bit 20-23, it is encode major and non-zero for new naming convention.
@@ -169,16 +184,17 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
enc_major = fw_check;
dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf;
vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf;
- DRM_INFO("Found VCN firmware Version ENC: %u.%u DEC: %u VEP: %u Revision: %u\n",
- enc_major, enc_minor, dec_ver, vep, fw_rev);
+ dev_info(adev->dev,
+ "[VCN instance %d] Found VCN firmware Version ENC: %u.%u DEC: %u VEP: %u Revision: %u\n",
+ i, enc_major, enc_minor, dec_ver, vep, fw_rev);
} else {
unsigned int version_major, version_minor, family_id;
family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
- DRM_INFO("Found VCN firmware Version: %u.%u Family ID: %u\n",
- version_major, version_minor, family_id);
+ dev_info(adev->dev, "[VCN instance %d] Found VCN firmware Version: %u.%u Family ID: %u\n",
+ i, version_major, version_minor, family_id);
}
bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE;
@@ -201,88 +217,87 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
if (amdgpu_vcnfw_log)
bo_size += AMDGPU_VCNFW_LOG_SIZE;
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
+ r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->vcn.inst[i].vcpu_bo,
+ &adev->vcn.inst[i].gpu_addr,
+ &adev->vcn.inst[i].cpu_addr);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
+ return r;
+ }
+
+ adev->vcn.inst[i].fw_shared.cpu_addr = adev->vcn.inst[i].cpu_addr +
+ bo_size - fw_shared_size;
+ adev->vcn.inst[i].fw_shared.gpu_addr = adev->vcn.inst[i].gpu_addr +
+ bo_size - fw_shared_size;
+
+ adev->vcn.inst[i].fw_shared.mem_size = fw_shared_size;
+
+ if (amdgpu_vcnfw_log) {
+ adev->vcn.inst[i].fw_shared.cpu_addr -= AMDGPU_VCNFW_LOG_SIZE;
+ adev->vcn.inst[i].fw_shared.gpu_addr -= AMDGPU_VCNFW_LOG_SIZE;
+ adev->vcn.inst[i].fw_shared.log_offset = log_offset;
+ }
- r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
+ if (adev->vcn.inst[i].indirect_sram) {
+ r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT,
- &adev->vcn.inst[i].vcpu_bo,
- &adev->vcn.inst[i].gpu_addr,
- &adev->vcn.inst[i].cpu_addr);
+ &adev->vcn.inst[i].dpg_sram_bo,
+ &adev->vcn.inst[i].dpg_sram_gpu_addr,
+ &adev->vcn.inst[i].dpg_sram_cpu_addr);
if (r) {
- dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
+ dev_err(adev->dev, "VCN %d (%d) failed to allocate DPG bo\n", i, r);
return r;
}
-
- adev->vcn.inst[i].fw_shared.cpu_addr = adev->vcn.inst[i].cpu_addr +
- bo_size - fw_shared_size;
- adev->vcn.inst[i].fw_shared.gpu_addr = adev->vcn.inst[i].gpu_addr +
- bo_size - fw_shared_size;
-
- adev->vcn.inst[i].fw_shared.mem_size = fw_shared_size;
-
- if (amdgpu_vcnfw_log) {
- adev->vcn.inst[i].fw_shared.cpu_addr -= AMDGPU_VCNFW_LOG_SIZE;
- adev->vcn.inst[i].fw_shared.gpu_addr -= AMDGPU_VCNFW_LOG_SIZE;
- adev->vcn.inst[i].fw_shared.log_offset = log_offset;
- }
-
- if (adev->vcn.indirect_sram) {
- r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM |
- AMDGPU_GEM_DOMAIN_GTT,
- &adev->vcn.inst[i].dpg_sram_bo,
- &adev->vcn.inst[i].dpg_sram_gpu_addr,
- &adev->vcn.inst[i].dpg_sram_cpu_addr);
- if (r) {
- dev_err(adev->dev, "VCN %d (%d) failed to allocate DPG bo\n", i, r);
- return r;
- }
- }
}
return 0;
}
-int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
+void amdgpu_vcn_sw_fini(struct amdgpu_device *adev, int i)
{
- int i, j;
+ int j;
- for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
- if (adev->vcn.harvest_config & (1 << j))
- continue;
+ if (adev->vcn.harvest_config & (1 << i))
+ return;
- amdgpu_bo_free_kernel(
- &adev->vcn.inst[j].dpg_sram_bo,
- &adev->vcn.inst[j].dpg_sram_gpu_addr,
- (void **)&adev->vcn.inst[j].dpg_sram_cpu_addr);
+ amdgpu_bo_free_kernel(
+ &adev->vcn.inst[i].dpg_sram_bo,
+ &adev->vcn.inst[i].dpg_sram_gpu_addr,
+ (void **)&adev->vcn.inst[i].dpg_sram_cpu_addr);
- kvfree(adev->vcn.inst[j].saved_bo);
+ kvfree(adev->vcn.inst[i].saved_bo);
- amdgpu_bo_free_kernel(&adev->vcn.inst[j].vcpu_bo,
- &adev->vcn.inst[j].gpu_addr,
- (void **)&adev->vcn.inst[j].cpu_addr);
+ amdgpu_bo_free_kernel(&adev->vcn.inst[i].vcpu_bo,
+ &adev->vcn.inst[i].gpu_addr,
+ (void **)&adev->vcn.inst[i].cpu_addr);
- amdgpu_ring_fini(&adev->vcn.inst[j].ring_dec);
+ amdgpu_ring_fini(&adev->vcn.inst[i].ring_dec);
- for (i = 0; i < adev->vcn.num_enc_rings; ++i)
- amdgpu_ring_fini(&adev->vcn.inst[j].ring_enc[i]);
+ for (j = 0; j < adev->vcn.inst[i].num_enc_rings; ++j)
+ amdgpu_ring_fini(&adev->vcn.inst[i].ring_enc[j]);
- amdgpu_ucode_release(&adev->vcn.fw[j]);
+ if (adev->vcn.per_inst_fw) {
+ amdgpu_ucode_release(&adev->vcn.inst[i].fw);
+ } else {
+ amdgpu_ucode_release(&adev->vcn.inst[0].fw);
+ adev->vcn.inst[i].fw = NULL;
}
- mutex_destroy(&adev->vcn.vcn1_jpeg1_workaround);
- mutex_destroy(&adev->vcn.vcn_pg_lock);
+ if (adev->vcn.reg_list)
+ amdgpu_vcn_reg_dump_fini(adev);
- return 0;
+ mutex_destroy(&adev->vcn.inst[i].vcn_pg_lock);
+ mutex_destroy(&adev->vcn.inst[i].vcn1_jpeg1_workaround);
}
bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type type, uint32_t vcn_instance)
{
bool ret = false;
- int vcn_config = adev->vcn.vcn_config[vcn_instance];
+ int vcn_config = adev->vcn.inst[vcn_instance].vcn_config;
if ((type == VCN_ENCODE_RING) && (vcn_config & VCN_BLOCK_ENCODE_DISABLE_MASK))
ret = true;
@@ -294,173 +309,230 @@ bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type t
return ret;
}
-int amdgpu_vcn_suspend(struct amdgpu_device *adev)
+static int amdgpu_vcn_save_vcpu_bo_inst(struct amdgpu_device *adev, int i)
{
unsigned int size;
void *ptr;
- int i, idx;
+ int idx;
- bool in_ras_intr = amdgpu_ras_intr_triggered();
+ if (adev->vcn.harvest_config & (1 << i))
+ return 0;
+ if (adev->vcn.inst[i].vcpu_bo == NULL)
+ return 0;
- cancel_delayed_work_sync(&adev->vcn.idle_work);
+ size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
+ ptr = adev->vcn.inst[i].cpu_addr;
- /* err_event_athub will corrupt VCPU buffer, so we need to
- * restore fw data and clear buffer in amdgpu_vcn_resume() */
- if (in_ras_intr)
- return 0;
+ adev->vcn.inst[i].saved_bo = kvmalloc(size, GFP_KERNEL);
+ if (!adev->vcn.inst[i].saved_bo)
+ return -ENOMEM;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
- if (adev->vcn.inst[i].vcpu_bo == NULL)
- return 0;
+ if (drm_dev_enter(adev_to_drm(adev), &idx)) {
+ memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size);
+ drm_dev_exit(idx);
+ }
- size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
- ptr = adev->vcn.inst[i].cpu_addr;
+ return 0;
+}
- adev->vcn.inst[i].saved_bo = kvmalloc(size, GFP_KERNEL);
- if (!adev->vcn.inst[i].saved_bo)
- return -ENOMEM;
+int amdgpu_vcn_save_vcpu_bo(struct amdgpu_device *adev)
+{
+ int ret, i;
- if (drm_dev_enter(adev_to_drm(adev), &idx)) {
- memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size);
- drm_dev_exit(idx);
- }
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ ret = amdgpu_vcn_save_vcpu_bo_inst(adev, i);
+ if (ret)
+ return ret;
}
+
return 0;
}
-int amdgpu_vcn_resume(struct amdgpu_device *adev)
+int amdgpu_vcn_suspend(struct amdgpu_device *adev, int i)
+{
+ bool in_ras_intr = amdgpu_ras_intr_triggered();
+
+ if (adev->vcn.harvest_config & (1 << i))
+ return 0;
+
+ /* err_event_athub and dpc recovery will corrupt VCPU buffer, so we need to
+ * restore fw data and clear buffer in amdgpu_vcn_resume() */
+ if (in_ras_intr || adev->pcie_reset_ctx.in_link_reset)
+ return 0;
+
+ return amdgpu_vcn_save_vcpu_bo_inst(adev, i);
+}
+
+int amdgpu_vcn_resume(struct amdgpu_device *adev, int i)
{
unsigned int size;
void *ptr;
- int i, idx;
+ int idx;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
- if (adev->vcn.inst[i].vcpu_bo == NULL)
- return -EINVAL;
+ if (adev->vcn.harvest_config & (1 << i))
+ return 0;
+ if (adev->vcn.inst[i].vcpu_bo == NULL)
+ return -EINVAL;
+
+ size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
+ ptr = adev->vcn.inst[i].cpu_addr;
- size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
- ptr = adev->vcn.inst[i].cpu_addr;
+ if (adev->vcn.inst[i].saved_bo != NULL) {
+ if (drm_dev_enter(adev_to_drm(adev), &idx)) {
+ memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size);
+ drm_dev_exit(idx);
+ }
+ kvfree(adev->vcn.inst[i].saved_bo);
+ adev->vcn.inst[i].saved_bo = NULL;
+ } else {
+ const struct common_firmware_header *hdr;
+ unsigned int offset;
- if (adev->vcn.inst[i].saved_bo != NULL) {
+ hdr = (const struct common_firmware_header *)adev->vcn.inst[i].fw->data;
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
+ offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
- memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size);
+ memcpy_toio(adev->vcn.inst[i].cpu_addr,
+ adev->vcn.inst[i].fw->data + offset,
+ le32_to_cpu(hdr->ucode_size_bytes));
drm_dev_exit(idx);
}
- kvfree(adev->vcn.inst[i].saved_bo);
- adev->vcn.inst[i].saved_bo = NULL;
- } else {
- const struct common_firmware_header *hdr;
- unsigned int offset;
-
- hdr = (const struct common_firmware_header *)adev->vcn.fw[i]->data;
- if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
- offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
- if (drm_dev_enter(adev_to_drm(adev), &idx)) {
- memcpy_toio(adev->vcn.inst[i].cpu_addr,
- adev->vcn.fw[i]->data + offset,
- le32_to_cpu(hdr->ucode_size_bytes));
- drm_dev_exit(idx);
- }
- size -= le32_to_cpu(hdr->ucode_size_bytes);
- ptr += le32_to_cpu(hdr->ucode_size_bytes);
- }
- memset_io(ptr, 0, size);
+ size -= le32_to_cpu(hdr->ucode_size_bytes);
+ ptr += le32_to_cpu(hdr->ucode_size_bytes);
}
+ memset_io(ptr, 0, size);
}
+
return 0;
}
+void amdgpu_vcn_get_profile(struct amdgpu_device *adev)
+{
+ int r;
+
+ mutex_lock(&adev->vcn.workload_profile_mutex);
+
+ if (adev->vcn.workload_profile_active) {
+ mutex_unlock(&adev->vcn.workload_profile_mutex);
+ return;
+ }
+ r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
+ true);
+ if (r)
+ dev_warn(adev->dev,
+ "(%d) failed to enable video power profile mode\n", r);
+ else
+ adev->vcn.workload_profile_active = true;
+ mutex_unlock(&adev->vcn.workload_profile_mutex);
+}
+
+void amdgpu_vcn_put_profile(struct amdgpu_device *adev)
+{
+ bool pg = true;
+ int r, i;
+
+ mutex_lock(&adev->vcn.workload_profile_mutex);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.inst[i].cur_state != AMD_PG_STATE_GATE) {
+ pg = false;
+ break;
+ }
+ }
+
+ if (pg) {
+ r = amdgpu_dpm_switch_power_profile(
+ adev, PP_SMC_POWER_PROFILE_VIDEO, false);
+ if (r)
+ dev_warn(
+ adev->dev,
+ "(%d) failed to disable video power profile mode\n",
+ r);
+ else
+ adev->vcn.workload_profile_active = false;
+ }
+
+ mutex_unlock(&adev->vcn.workload_profile_mutex);
+}
+
static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
{
- struct amdgpu_device *adev =
- container_of(work, struct amdgpu_device, vcn.idle_work.work);
+ struct amdgpu_vcn_inst *vcn_inst =
+ container_of(work, struct amdgpu_vcn_inst, idle_work.work);
+ struct amdgpu_device *adev = vcn_inst->adev;
unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0};
- unsigned int i, j;
- int r = 0;
-
- for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
- if (adev->vcn.harvest_config & (1 << j))
- continue;
+ unsigned int i = vcn_inst->inst, j;
- for (i = 0; i < adev->vcn.num_enc_rings; ++i)
- fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]);
+ if (adev->vcn.harvest_config & (1 << i))
+ return;
- /* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
- !adev->vcn.using_unified_queue) {
- struct dpg_pause_state new_state;
+ for (j = 0; j < adev->vcn.inst[i].num_enc_rings; ++j)
+ fence[i] += amdgpu_fence_count_emitted(&vcn_inst->ring_enc[j]);
- if (fence[j] ||
- unlikely(atomic_read(&adev->vcn.inst[j].dpg_enc_submission_cnt)))
- new_state.fw_based = VCN_DPG_STATE__PAUSE;
- else
- new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
+ /* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
+ !adev->vcn.inst[i].using_unified_queue) {
+ struct dpg_pause_state new_state;
- adev->vcn.pause_dpg_mode(adev, j, &new_state);
- }
+ if (fence[i] ||
+ unlikely(atomic_read(&vcn_inst->dpg_enc_submission_cnt)))
+ new_state.fw_based = VCN_DPG_STATE__PAUSE;
+ else
+ new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
- fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_dec);
- fences += fence[j];
+ adev->vcn.inst[i].pause_dpg_mode(vcn_inst, &new_state);
}
- if (!fences && !atomic_read(&adev->vcn.total_submission_cnt)) {
- amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
- AMD_PG_STATE_GATE);
- r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
- false);
- if (r)
- dev_warn(adev->dev, "(%d) failed to disable video power profile mode\n", r);
+ fence[i] += amdgpu_fence_count_emitted(&vcn_inst->ring_dec);
+ fences += fence[i];
+
+ if (!fences && !atomic_read(&vcn_inst->total_submission_cnt)) {
+ mutex_lock(&vcn_inst->vcn_pg_lock);
+ vcn_inst->set_pg_state(vcn_inst, AMD_PG_STATE_GATE);
+ mutex_unlock(&vcn_inst->vcn_pg_lock);
+ amdgpu_vcn_put_profile(adev);
+
} else {
- schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
+ schedule_delayed_work(&vcn_inst->idle_work, VCN_IDLE_TIMEOUT);
}
}
void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- int r = 0;
+ struct amdgpu_vcn_inst *vcn_inst = &adev->vcn.inst[ring->me];
- atomic_inc(&adev->vcn.total_submission_cnt);
+ atomic_inc(&vcn_inst->total_submission_cnt);
- if (!cancel_delayed_work_sync(&adev->vcn.idle_work)) {
- r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
- true);
- if (r)
- dev_warn(adev->dev, "(%d) failed to switch to video power profile mode\n", r);
- }
+ cancel_delayed_work_sync(&vcn_inst->idle_work);
- mutex_lock(&adev->vcn.vcn_pg_lock);
- amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
- AMD_PG_STATE_UNGATE);
+ mutex_lock(&vcn_inst->vcn_pg_lock);
+ vcn_inst->set_pg_state(vcn_inst, AMD_PG_STATE_UNGATE);
/* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
- !adev->vcn.using_unified_queue) {
+ !vcn_inst->using_unified_queue) {
struct dpg_pause_state new_state;
if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) {
- atomic_inc(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
+ atomic_inc(&vcn_inst->dpg_enc_submission_cnt);
new_state.fw_based = VCN_DPG_STATE__PAUSE;
} else {
unsigned int fences = 0;
unsigned int i;
- for (i = 0; i < adev->vcn.num_enc_rings; ++i)
- fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]);
+ for (i = 0; i < vcn_inst->num_enc_rings; ++i)
+ fences += amdgpu_fence_count_emitted(&vcn_inst->ring_enc[i]);
- if (fences || atomic_read(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt))
+ if (fences || atomic_read(&vcn_inst->dpg_enc_submission_cnt))
new_state.fw_based = VCN_DPG_STATE__PAUSE;
else
new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
}
- adev->vcn.pause_dpg_mode(adev, ring->me, &new_state);
+ vcn_inst->pause_dpg_mode(vcn_inst, &new_state);
}
- mutex_unlock(&adev->vcn.vcn_pg_lock);
+ mutex_unlock(&vcn_inst->vcn_pg_lock);
+ amdgpu_vcn_get_profile(adev);
}
void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
@@ -470,12 +542,13 @@ void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
/* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
if (ring->adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC &&
- !adev->vcn.using_unified_queue)
+ !adev->vcn.inst[ring->me].using_unified_queue)
atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
- atomic_dec(&ring->adev->vcn.total_submission_cnt);
+ atomic_dec(&ring->adev->vcn.inst[ring->me].total_submission_cnt);
- schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
+ schedule_delayed_work(&ring->adev->vcn.inst[ring->me].idle_work,
+ VCN_IDLE_TIMEOUT);
}
int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
@@ -493,7 +566,7 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
r = amdgpu_ring_alloc(ring, 3);
if (r)
return r;
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.scratch9, 0));
amdgpu_ring_write(ring, 0xDEADBEEF);
amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
@@ -553,19 +626,19 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
64, AMDGPU_IB_POOL_DIRECT,
- &job);
+ &job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
goto err;
ib = &job->ibs[0];
- ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0);
+ ib->ptr[0] = PACKET0(adev->vcn.inst[ring->me].internal.data0, 0);
ib->ptr[1] = addr;
- ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0);
+ ib->ptr[2] = PACKET0(adev->vcn.inst[ring->me].internal.data1, 0);
ib->ptr[3] = addr >> 32;
- ib->ptr[4] = PACKET0(adev->vcn.internal.cmd, 0);
+ ib->ptr[4] = PACKET0(adev->vcn.inst[ring->me].internal.cmd, 0);
ib->ptr[5] = 0;
for (i = 6; i < 16; i += 2) {
- ib->ptr[i] = PACKET0(adev->vcn.internal.nop, 0);
+ ib->ptr[i] = PACKET0(adev->vcn.inst[ring->me].internal.nop, 0);
ib->ptr[i+1] = 0;
}
ib->length_dw = 16;
@@ -574,7 +647,7 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
if (r)
goto err_free;
- amdgpu_ib_free(adev, ib_msg, f);
+ amdgpu_ib_free(ib_msg, f);
if (fence)
*fence = dma_fence_get(f);
@@ -585,7 +658,7 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
err_free:
amdgpu_job_free(job);
err:
- amdgpu_ib_free(adev, ib_msg, f);
+ amdgpu_ib_free(ib_msg, f);
return r;
}
@@ -728,12 +801,12 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
uint32_t ib_pack_in_dw;
int i, r;
- if (adev->vcn.using_unified_queue)
+ if (adev->vcn.inst[ring->me].using_unified_queue)
ib_size_dw += 8;
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
- &job);
+ &job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
goto err;
@@ -741,7 +814,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
ib->length_dw = 0;
/* single queue headers */
- if (adev->vcn.using_unified_queue) {
+ if (adev->vcn.inst[ring->me].using_unified_queue) {
ib_pack_in_dw = sizeof(struct amdgpu_vcn_decode_buffer) / sizeof(uint32_t)
+ 4 + 2; /* engine info + decoding ib in dw */
ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, ib_pack_in_dw, false);
@@ -760,14 +833,14 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
- if (adev->vcn.using_unified_queue)
+ if (adev->vcn.inst[ring->me].using_unified_queue)
amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, ib_pack_in_dw);
r = amdgpu_job_submit_direct(job, ring, &f);
if (r)
goto err_free;
- amdgpu_ib_free(adev, ib_msg, f);
+ amdgpu_ib_free(ib_msg, f);
if (fence)
*fence = dma_fence_get(f);
@@ -778,7 +851,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
err_free:
amdgpu_job_free(job);
err:
- amdgpu_ib_free(adev, ib_msg, f);
+ amdgpu_ib_free(ib_msg, f);
return r;
}
@@ -858,12 +931,12 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
uint64_t addr;
int i, r;
- if (adev->vcn.using_unified_queue)
+ if (adev->vcn.inst[ring->me].using_unified_queue)
ib_size_dw += 8;
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
- &job);
+ &job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
return r;
@@ -872,7 +945,7 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
ib->length_dw = 0;
- if (adev->vcn.using_unified_queue)
+ if (adev->vcn.inst[ring->me].using_unified_queue)
ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
ib->ptr[ib->length_dw++] = 0x00000018;
@@ -894,7 +967,7 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
- if (adev->vcn.using_unified_queue)
+ if (adev->vcn.inst[ring->me].using_unified_queue)
amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
r = amdgpu_job_submit_direct(job, ring, &f);
@@ -925,12 +998,12 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
uint64_t addr;
int i, r;
- if (adev->vcn.using_unified_queue)
+ if (adev->vcn.inst[ring->me].using_unified_queue)
ib_size_dw += 8;
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
- &job);
+ &job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
return r;
@@ -939,7 +1012,7 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
ib->length_dw = 0;
- if (adev->vcn.using_unified_queue)
+ if (adev->vcn.inst[ring->me].using_unified_queue)
ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
ib->ptr[ib->length_dw++] = 0x00000018;
@@ -961,7 +1034,7 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
- if (adev->vcn.using_unified_queue)
+ if (adev->vcn.inst[ring->me].using_unified_queue)
amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
r = amdgpu_job_submit_direct(job, ring, &f);
@@ -1008,7 +1081,7 @@ int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = 0;
error:
- amdgpu_ib_free(adev, &ib, fence);
+ amdgpu_ib_free(&ib, fence);
dma_fence_put(fence);
return r;
@@ -1019,7 +1092,8 @@ int amdgpu_vcn_unified_ring_test_ib(struct amdgpu_ring *ring, long timeout)
struct amdgpu_device *adev = ring->adev;
long r;
- if (amdgpu_ip_version(adev, UVD_HWIP, 0) != IP_VERSION(4, 0, 3)) {
+ if ((amdgpu_ip_version(adev, UVD_HWIP, 0) != IP_VERSION(4, 0, 3)) &&
+ (amdgpu_ip_version(adev, UVD_HWIP, 0) != IP_VERSION(5, 0, 1))) {
r = amdgpu_vcn_enc_ring_test_ib(ring, timeout);
if (r)
goto error;
@@ -1045,34 +1119,32 @@ enum amdgpu_ring_priority_level amdgpu_vcn_get_enc_ring_prio(int ring)
}
}
-void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev)
+void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev, int i)
{
- int i;
unsigned int idx;
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
const struct common_firmware_header *hdr;
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
-
- hdr = (const struct common_firmware_header *)adev->vcn.fw[i]->data;
- /* currently only support 2 FW instances */
- if (i >= 2) {
- dev_info(adev->dev, "More then 2 VCN FW instances!\n");
- break;
- }
- idx = AMDGPU_UCODE_ID_VCN + i;
- adev->firmware.ucode[idx].ucode_id = idx;
- adev->firmware.ucode[idx].fw = adev->vcn.fw[i];
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
-
- if (amdgpu_ip_version(adev, UVD_HWIP, 0) ==
- IP_VERSION(4, 0, 3))
- break;
+ if (adev->vcn.harvest_config & (1 << i))
+ return;
+
+ if ((amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(4, 0, 3) ||
+ amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(5, 0, 1))
+ && (i > 0))
+ return;
+
+ hdr = (const struct common_firmware_header *)adev->vcn.inst[i].fw->data;
+ /* currently only support 2 FW instances */
+ if (i >= 2) {
+ dev_info(adev->dev, "More then 2 VCN FW instances!\n");
+ return;
}
+ idx = AMDGPU_UCODE_ID_VCN + i;
+ adev->firmware.ucode[idx].ucode_id = idx;
+ adev->firmware.ucode[idx].fw = adev->vcn.inst[i].fw;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
}
}
@@ -1085,7 +1157,7 @@ static ssize_t amdgpu_debugfs_vcn_fwlog_read(struct file *f, char __user *buf,
{
struct amdgpu_vcn_inst *vcn;
void *log_buf;
- volatile struct amdgpu_vcn_fwlog *plog;
+ struct amdgpu_vcn_fwlog *plog;
unsigned int read_pos, write_pos, available, i, read_bytes = 0;
unsigned int read_num[2] = {0};
@@ -1098,7 +1170,7 @@ static ssize_t amdgpu_debugfs_vcn_fwlog_read(struct file *f, char __user *buf,
log_buf = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size;
- plog = (volatile struct amdgpu_vcn_fwlog *)log_buf;
+ plog = (struct amdgpu_vcn_fwlog *)log_buf;
read_pos = plog->rptr;
write_pos = plog->wptr;
@@ -1165,11 +1237,11 @@ void amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device *adev, uint8_t i,
void amdgpu_vcn_fwlog_init(struct amdgpu_vcn_inst *vcn)
{
#if defined(CONFIG_DEBUG_FS)
- volatile uint32_t *flag = vcn->fw_shared.cpu_addr;
+ uint32_t *flag = vcn->fw_shared.cpu_addr;
void *fw_log_cpu_addr = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size;
uint64_t fw_log_gpu_addr = vcn->fw_shared.gpu_addr + vcn->fw_shared.mem_size;
- volatile struct amdgpu_vcn_fwlog *log_buf = fw_log_cpu_addr;
- volatile struct amdgpu_fw_shared_fw_logging *fw_log = vcn->fw_shared.cpu_addr
+ struct amdgpu_vcn_fwlog *log_buf = fw_log_cpu_addr;
+ struct amdgpu_fw_shared_fw_logging *fw_log = vcn->fw_shared.cpu_addr
+ vcn->fw_shared.log_offset;
*flag |= cpu_to_le32(AMDGPU_VCN_FW_LOGGING_FLAG);
fw_log->is_enabled = 1;
@@ -1277,3 +1349,289 @@ int amdgpu_vcn_psp_update_sram(struct amdgpu_device *adev, int inst_idx,
return psp_execute_ip_fw_load(&adev->psp, &ucode);
}
+
+static ssize_t amdgpu_get_vcn_reset_mask(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+
+ if (!adev)
+ return -ENODEV;
+
+ return amdgpu_show_reset_mask(buf, adev->vcn.supported_reset);
+}
+
+static DEVICE_ATTR(vcn_reset_mask, 0444,
+ amdgpu_get_vcn_reset_mask, NULL);
+
+int amdgpu_vcn_sysfs_reset_mask_init(struct amdgpu_device *adev)
+{
+ int r = 0;
+
+ if (adev->vcn.num_vcn_inst) {
+ r = device_create_file(adev->dev, &dev_attr_vcn_reset_mask);
+ if (r)
+ return r;
+ }
+
+ return r;
+}
+
+void amdgpu_vcn_sysfs_reset_mask_fini(struct amdgpu_device *adev)
+{
+ if (adev->dev->kobj.sd) {
+ if (adev->vcn.num_vcn_inst)
+ device_remove_file(adev->dev, &dev_attr_vcn_reset_mask);
+ }
+}
+
+/*
+ * debugfs to enable/disable vcn job submission to specific core or
+ * instance. It is created only if the queue type is unified.
+ */
+#if defined(CONFIG_DEBUG_FS)
+static int amdgpu_debugfs_vcn_sched_mask_set(void *data, u64 val)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)data;
+ u32 i;
+ u64 mask;
+ struct amdgpu_ring *ring;
+
+ if (!adev)
+ return -ENODEV;
+
+ mask = (1ULL << adev->vcn.num_vcn_inst) - 1;
+ if ((val & mask) == 0)
+ return -EINVAL;
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ ring = &adev->vcn.inst[i].ring_enc[0];
+ if (val & (1ULL << i))
+ ring->sched.ready = true;
+ else
+ ring->sched.ready = false;
+ }
+ /* publish sched.ready flag update effective immediately across smp */
+ smp_rmb();
+ return 0;
+}
+
+static int amdgpu_debugfs_vcn_sched_mask_get(void *data, u64 *val)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)data;
+ u32 i;
+ u64 mask = 0;
+ struct amdgpu_ring *ring;
+
+ if (!adev)
+ return -ENODEV;
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ ring = &adev->vcn.inst[i].ring_enc[0];
+ if (ring->sched.ready)
+ mask |= 1ULL << i;
+ }
+ *val = mask;
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_vcn_sched_mask_fops,
+ amdgpu_debugfs_vcn_sched_mask_get,
+ amdgpu_debugfs_vcn_sched_mask_set, "%llx\n");
+#endif
+
+void amdgpu_debugfs_vcn_sched_mask_init(struct amdgpu_device *adev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ struct drm_minor *minor = adev_to_drm(adev)->primary;
+ struct dentry *root = minor->debugfs_root;
+ char name[32];
+
+ if (adev->vcn.num_vcn_inst <= 1 || !adev->vcn.inst[0].using_unified_queue)
+ return;
+ sprintf(name, "amdgpu_vcn_sched_mask");
+ debugfs_create_file(name, 0600, root, adev,
+ &amdgpu_debugfs_vcn_sched_mask_fops);
+#endif
+}
+
+/**
+ * vcn_set_powergating_state - set VCN block powergating state
+ *
+ * @ip_block: amdgpu_ip_block pointer
+ * @state: power gating state
+ *
+ * Set VCN block powergating state
+ */
+int vcn_set_powergating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_powergating_state state)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int ret = 0, i;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
+
+ ret |= vinst->set_pg_state(vinst, state);
+ }
+
+ return ret;
+}
+
+/**
+ * amdgpu_vcn_reset_engine - Reset a specific VCN engine
+ * @adev: Pointer to the AMDGPU device
+ * @instance_id: VCN engine instance to reset
+ *
+ * Returns: 0 on success, or a negative error code on failure.
+ */
+static int amdgpu_vcn_reset_engine(struct amdgpu_device *adev,
+ uint32_t instance_id)
+{
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[instance_id];
+ int r, i;
+
+ mutex_lock(&vinst->engine_reset_mutex);
+ /* Stop the scheduler's work queue for the dec and enc rings if they are running.
+ * This ensures that no new tasks are submitted to the queues while
+ * the reset is in progress.
+ */
+ drm_sched_wqueue_stop(&vinst->ring_dec.sched);
+ for (i = 0; i < vinst->num_enc_rings; i++)
+ drm_sched_wqueue_stop(&vinst->ring_enc[i].sched);
+
+ /* Perform the VCN reset for the specified instance */
+ r = vinst->reset(vinst);
+ if (r)
+ goto unlock;
+ r = amdgpu_ring_test_ring(&vinst->ring_dec);
+ if (r)
+ goto unlock;
+ for (i = 0; i < vinst->num_enc_rings; i++) {
+ r = amdgpu_ring_test_ring(&vinst->ring_enc[i]);
+ if (r)
+ goto unlock;
+ }
+ amdgpu_fence_driver_force_completion(&vinst->ring_dec);
+ for (i = 0; i < vinst->num_enc_rings; i++)
+ amdgpu_fence_driver_force_completion(&vinst->ring_enc[i]);
+
+ /* Restart the scheduler's work queue for the dec and enc rings
+ * if they were stopped by this function. This allows new tasks
+ * to be submitted to the queues after the reset is complete.
+ */
+ drm_sched_wqueue_start(&vinst->ring_dec.sched);
+ for (i = 0; i < vinst->num_enc_rings; i++)
+ drm_sched_wqueue_start(&vinst->ring_enc[i].sched);
+
+unlock:
+ mutex_unlock(&vinst->engine_reset_mutex);
+
+ return r;
+}
+
+/**
+ * amdgpu_vcn_ring_reset - Reset a VCN ring
+ * @ring: ring to reset
+ * @vmid: vmid of guilty job
+ * @timedout_fence: fence of timed out job
+ *
+ * This helper is for VCN blocks without unified queues because
+ * resetting the engine resets all queues in that case. With
+ * unified queues we have one queue per engine.
+ * Returns: 0 on success, or a negative error code on failure.
+ */
+int amdgpu_vcn_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (adev->vcn.inst[ring->me].using_unified_queue)
+ return -EINVAL;
+
+ return amdgpu_vcn_reset_engine(adev, ring->me);
+}
+
+int amdgpu_vcn_reg_dump_init(struct amdgpu_device *adev,
+ const struct amdgpu_hwip_reg_entry *reg, u32 count)
+{
+ adev->vcn.ip_dump = kcalloc(adev->vcn.num_vcn_inst * count,
+ sizeof(uint32_t), GFP_KERNEL);
+ if (!adev->vcn.ip_dump)
+ return -ENOMEM;
+ adev->vcn.reg_list = reg;
+ adev->vcn.reg_count = count;
+
+ return 0;
+}
+
+static void amdgpu_vcn_reg_dump_fini(struct amdgpu_device *adev)
+{
+ kfree(adev->vcn.ip_dump);
+ adev->vcn.ip_dump = NULL;
+ adev->vcn.reg_list = NULL;
+ adev->vcn.reg_count = 0;
+}
+
+void amdgpu_vcn_dump_ip_state(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int i, j;
+ bool is_powered;
+ u32 inst_off;
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ inst_off = i * adev->vcn.reg_count;
+ /* mmUVD_POWER_STATUS is always readable and is the first in reg_list */
+ adev->vcn.ip_dump[inst_off] =
+ RREG32(SOC15_REG_ENTRY_OFFSET_INST(adev->vcn.reg_list[0], i));
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF) !=
+ UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
+
+ if (is_powered)
+ for (j = 1; j < adev->vcn.reg_count; j++)
+ adev->vcn.ip_dump[inst_off + j] =
+ RREG32(SOC15_REG_ENTRY_OFFSET_INST(adev->vcn.reg_list[j], i));
+ }
+}
+
+void amdgpu_vcn_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int i, j;
+ bool is_powered;
+ u32 inst_off;
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i)) {
+ drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
+ continue;
+ }
+
+ inst_off = i * adev->vcn.reg_count;
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF) !=
+ UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
+
+ if (is_powered) {
+ drm_printf(p, "\nActive Instance:VCN%d\n", i);
+ for (j = 0; j < adev->vcn.reg_count; j++)
+ drm_printf(p, "%-50s \t 0x%08x\n", adev->vcn.reg_list[j].reg_name,
+ adev->vcn.ip_dump[inst_off + j]);
+ } else {
+ drm_printf(p, "\nInactive Instance:VCN%d\n", i);
+ }
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index 2a1f3dbb14d3..dc8a17bcc3c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2016 Advanced Micro Devices, Inc.
+ * Copyright 2016-2024 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -66,7 +66,6 @@
#define VCN_ENC_CMD_REG_WAIT 0x0000000c
#define VCN_AON_SOC_ADDRESS_2_0 0x1f800
-#define VCN1_AON_SOC_ADDRESS_3_0 0x48000
#define VCN_VID_IP_ADDRESS_2_0 0x0
#define VCN_AON_IP_ADDRESS_2_0 0x30000
@@ -163,20 +162,30 @@
#define SOC24_DPG_MODE_OFFSET(ip, inst_idx, reg) \
({ \
uint32_t internal_reg_offset, addr; \
- bool video_range, aon_range; \
+ bool video_range, video1_range, aon_range, aon1_range; \
\
addr = (adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg); \
addr <<= 2; \
video_range = ((((0xFFFFF & addr) >= (VCN_VID_SOC_ADDRESS)) && \
((0xFFFFF & addr) < ((VCN_VID_SOC_ADDRESS + 0x2600))))); \
+ video1_range = ((((0xFFFFF & addr) >= (VCN1_VID_SOC_ADDRESS)) && \
+ ((0xFFFFF & addr) < ((VCN1_VID_SOC_ADDRESS + 0x2600))))); \
aon_range = ((((0xFFFFF & addr) >= (VCN_AON_SOC_ADDRESS)) && \
((0xFFFFF & addr) < ((VCN_AON_SOC_ADDRESS + 0x600))))); \
+ aon1_range = ((((0xFFFFF & addr) >= (VCN1_AON_SOC_ADDRESS)) && \
+ ((0xFFFFF & addr) < ((VCN1_AON_SOC_ADDRESS + 0x600))))); \
if (video_range) \
internal_reg_offset = ((0xFFFFF & addr) - (VCN_VID_SOC_ADDRESS) + \
(VCN_VID_IP_ADDRESS)); \
else if (aon_range) \
internal_reg_offset = ((0xFFFFF & addr) - (VCN_AON_SOC_ADDRESS) + \
(VCN_AON_IP_ADDRESS)); \
+ else if (video1_range) \
+ internal_reg_offset = ((0xFFFFF & addr) - (VCN1_VID_SOC_ADDRESS) + \
+ (VCN_VID_IP_ADDRESS)); \
+ else if (aon1_range) \
+ internal_reg_offset = ((0xFFFFF & addr) - (VCN1_AON_SOC_ADDRESS) + \
+ (VCN_AON_IP_ADDRESS)); \
else \
internal_reg_offset = (0xFFFFF & addr); \
\
@@ -228,6 +237,14 @@
#define AMDGPU_DRM_KEY_INJECT_WORKAROUND_VCNFW_ASD_HANDSHAKING 2
+struct amdgpu_hwip_reg_entry;
+
+enum amdgpu_vcn_caps {
+ AMDGPU_VCN_RRMT_ENABLED,
+};
+
+#define AMDGPU_VCN_CAPS(caps) BIT(AMDGPU_VCN_##caps)
+
enum fw_queue_mode {
FW_QUEUE_RING_RESET = 1,
FW_QUEUE_DPG_HOLD_OFF = 2,
@@ -279,6 +296,8 @@ struct amdgpu_vcn_fw_shared {
};
struct amdgpu_vcn_inst {
+ struct amdgpu_device *adev;
+ int inst;
struct amdgpu_bo *vcpu_bo;
void *cpu_addr;
uint64_t gpu_addr;
@@ -297,6 +316,25 @@ struct amdgpu_vcn_inst {
atomic_t dpg_enc_submission_cnt;
struct amdgpu_vcn_fw_shared fw_shared;
uint8_t aid_id;
+ const struct firmware *fw; /* VCN firmware */
+ uint8_t vcn_config;
+ uint32_t vcn_codec_disable_mask;
+ atomic_t total_submission_cnt;
+ struct mutex vcn_pg_lock;
+ enum amd_powergating_state cur_state;
+ struct delayed_work idle_work;
+ unsigned fw_version;
+ unsigned num_enc_rings;
+ bool indirect_sram;
+ struct amdgpu_vcn_reg internal;
+ struct mutex vcn1_jpeg1_workaround;
+ int (*pause_dpg_mode)(struct amdgpu_vcn_inst *vinst,
+ struct dpg_pause_state *new_state);
+ int (*set_pg_state)(struct amdgpu_vcn_inst *vinst,
+ enum amd_powergating_state state);
+ int (*reset)(struct amdgpu_vcn_inst *vinst);
+ bool using_unified_queue;
+ struct mutex engine_reset_mutex;
};
struct amdgpu_vcn_ras {
@@ -304,35 +342,30 @@ struct amdgpu_vcn_ras {
};
struct amdgpu_vcn {
- unsigned fw_version;
- struct delayed_work idle_work;
- const struct firmware *fw[AMDGPU_MAX_VCN_INSTANCES]; /* VCN firmware */
- unsigned num_enc_rings;
- enum amd_powergating_state cur_state;
- bool indirect_sram;
-
uint8_t num_vcn_inst;
struct amdgpu_vcn_inst inst[AMDGPU_MAX_VCN_INSTANCES];
- uint8_t vcn_config[AMDGPU_MAX_VCN_INSTANCES];
- uint32_t vcn_codec_disable_mask[AMDGPU_MAX_VCN_INSTANCES];
- struct amdgpu_vcn_reg internal;
- struct mutex vcn_pg_lock;
- struct mutex vcn1_jpeg1_workaround;
- atomic_t total_submission_cnt;
unsigned harvest_config;
- int (*pause_dpg_mode)(struct amdgpu_device *adev,
- int inst_idx, struct dpg_pause_state *new_state);
struct ras_common_if *ras_if;
struct amdgpu_vcn_ras *ras;
uint16_t inst_mask;
uint8_t num_inst_per_aid;
- bool using_unified_queue;
/* IP reg dump */
uint32_t *ip_dump;
+
+ uint32_t supported_reset;
+ uint32_t caps;
+
+ bool per_inst_fw;
+ unsigned fw_version;
+
+ bool workload_profile_active;
+ struct mutex workload_profile_mutex;
+ u32 reg_count;
+ const struct amdgpu_hwip_reg_entry *reg_list;
};
struct amdgpu_fw_shared_rb_ptrs_struct {
@@ -468,7 +501,7 @@ struct amdgpu_vcn5_fw_shared {
struct amdgpu_fw_shared_rb_setup rb_setup;
struct amdgpu_fw_shared_smu_interface_info smu_dpm_interface;
struct amdgpu_fw_shared_drm_key_wa drm_key_wa;
- uint8_t pad3[9];
+ uint8_t pad3[404];
};
#define VCN_BLOCK_ENCODE_DISABLE_MASK 0x80
@@ -481,11 +514,11 @@ enum vcn_ring_type {
VCN_UNIFIED_RING,
};
-int amdgpu_vcn_early_init(struct amdgpu_device *adev);
-int amdgpu_vcn_sw_init(struct amdgpu_device *adev);
-int amdgpu_vcn_sw_fini(struct amdgpu_device *adev);
-int amdgpu_vcn_suspend(struct amdgpu_device *adev);
-int amdgpu_vcn_resume(struct amdgpu_device *adev);
+int amdgpu_vcn_early_init(struct amdgpu_device *adev, int i);
+int amdgpu_vcn_sw_init(struct amdgpu_device *adev, int i);
+void amdgpu_vcn_sw_fini(struct amdgpu_device *adev, int i);
+int amdgpu_vcn_suspend(struct amdgpu_device *adev, int i);
+int amdgpu_vcn_resume(struct amdgpu_device *adev, int i);
void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring);
void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring);
@@ -503,7 +536,7 @@ int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout);
enum amdgpu_ring_priority_level amdgpu_vcn_get_enc_ring_prio(int ring);
-void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev);
+void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev, int i);
void amdgpu_vcn_fwlog_init(struct amdgpu_vcn_inst *vcn);
void amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device *adev,
@@ -518,5 +551,21 @@ int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev);
int amdgpu_vcn_psp_update_sram(struct amdgpu_device *adev, int inst_idx,
enum AMDGPU_UCODE_ID ucode_id);
+int amdgpu_vcn_save_vcpu_bo(struct amdgpu_device *adev);
+int amdgpu_vcn_sysfs_reset_mask_init(struct amdgpu_device *adev);
+void amdgpu_vcn_sysfs_reset_mask_fini(struct amdgpu_device *adev);
+void amdgpu_debugfs_vcn_sched_mask_init(struct amdgpu_device *adev);
+
+int vcn_set_powergating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_powergating_state state);
+int amdgpu_vcn_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *guilty_fence);
+int amdgpu_vcn_reg_dump_init(struct amdgpu_device *adev,
+ const struct amdgpu_hwip_reg_entry *reg, u32 count);
+void amdgpu_vcn_dump_ip_state(struct amdgpu_ip_block *ip_block);
+void amdgpu_vcn_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p);
+void amdgpu_vcn_get_profile(struct amdgpu_device *adev);
+void amdgpu_vcn_put_profile(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index b6397d3229e1..3328ab63376b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -523,6 +523,9 @@ static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev)
adev->unique_id =
((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->uuid;
+ adev->virt.ras_en_caps.all = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->ras_en_caps.all;
+ adev->virt.ras_telemetry_en_caps.all =
+ ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->ras_telemetry_en_caps.all;
break;
default:
dev_err(adev->dev, "invalid pf2vf version: 0x%x\n", pf2vf_info->version);
@@ -611,10 +614,11 @@ static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev)
vf2pf_info->decode_usage = 0;
vf2pf_info->dummy_page_addr = (uint64_t)adev->dummy_page_addr;
- vf2pf_info->mes_info_addr = (uint64_t)adev->mes.resource_1_gpu_addr;
-
- if (adev->mes.resource_1) {
- vf2pf_info->mes_info_size = adev->mes.resource_1->tbo.base.size;
+ if (amdgpu_sriov_is_mes_info_enable(adev)) {
+ vf2pf_info->mes_info_addr =
+ (uint64_t)(adev->mes.resource_1_gpu_addr[0] + AMDGPU_GPU_PAGE_SIZE);
+ vf2pf_info->mes_info_size =
+ adev->mes.resource_1[0]->tbo.base.size - AMDGPU_GPU_PAGE_SIZE;
}
vf2pf_info->checksum =
amd_sriov_msg_checksum(
@@ -703,6 +707,8 @@ void amdgpu_virt_exchange_data(struct amdgpu_device *adev)
adev->virt.fw_reserve.p_vf2pf =
(struct amd_sriov_msg_vf2pf_info_header *)
(adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10));
+ adev->virt.fw_reserve.ras_telemetry =
+ (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB << 10));
} else if (adev->mman.drv_vram_usage_va) {
adev->virt.fw_reserve.p_pf2vf =
(struct amd_sriov_msg_pf2vf_info_header *)
@@ -710,6 +716,8 @@ void amdgpu_virt_exchange_data(struct amdgpu_device *adev)
adev->virt.fw_reserve.p_vf2pf =
(struct amd_sriov_msg_vf2pf_info_header *)
(adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10));
+ adev->virt.fw_reserve.ras_telemetry =
+ (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB << 10));
}
amdgpu_virt_read_pf2vf_data(adev);
@@ -732,7 +740,7 @@ void amdgpu_virt_exchange_data(struct amdgpu_device *adev)
}
}
-void amdgpu_detect_virtualization(struct amdgpu_device *adev)
+static u32 amdgpu_virt_init_detect_asic(struct amdgpu_device *adev)
{
uint32_t reg;
@@ -768,8 +776,17 @@ void amdgpu_detect_virtualization(struct amdgpu_device *adev)
adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
}
+ return reg;
+}
+
+static bool amdgpu_virt_init_req_data(struct amdgpu_device *adev, u32 reg)
+{
+ bool is_sriov = false;
+
/* we have the ability to check now */
if (amdgpu_sriov_vf(adev)) {
+ is_sriov = true;
+
switch (adev->asic_type) {
case CHIP_TONGA:
case CHIP_FIJI:
@@ -798,10 +815,42 @@ void amdgpu_detect_virtualization(struct amdgpu_device *adev)
amdgpu_virt_request_init_data(adev);
break;
default: /* other chip doesn't support SRIOV */
+ is_sriov = false;
DRM_ERROR("Unknown asic type: %d!\n", adev->asic_type);
break;
}
}
+
+ return is_sriov;
+}
+
+static void amdgpu_virt_init_ras(struct amdgpu_device *adev)
+{
+ ratelimit_state_init(&adev->virt.ras.ras_error_cnt_rs, 5 * HZ, 1);
+ ratelimit_state_init(&adev->virt.ras.ras_cper_dump_rs, 5 * HZ, 1);
+ ratelimit_state_init(&adev->virt.ras.ras_chk_criti_rs, 5 * HZ, 1);
+
+ ratelimit_set_flags(&adev->virt.ras.ras_error_cnt_rs,
+ RATELIMIT_MSG_ON_RELEASE);
+ ratelimit_set_flags(&adev->virt.ras.ras_cper_dump_rs,
+ RATELIMIT_MSG_ON_RELEASE);
+ ratelimit_set_flags(&adev->virt.ras.ras_chk_criti_rs,
+ RATELIMIT_MSG_ON_RELEASE);
+
+ mutex_init(&adev->virt.ras.ras_telemetry_mutex);
+
+ adev->virt.ras.cper_rptr = 0;
+}
+
+void amdgpu_virt_init(struct amdgpu_device *adev)
+{
+ bool is_sriov = false;
+ uint32_t reg = amdgpu_virt_init_detect_asic(adev);
+
+ is_sriov = amdgpu_virt_init_req_data(adev, reg);
+
+ if (is_sriov)
+ amdgpu_virt_init_ras(adev);
}
static bool amdgpu_virt_access_debugfs_is_mmio(struct amdgpu_device *adev)
@@ -1010,6 +1059,7 @@ u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 f
void *scratch_reg2;
void *scratch_reg3;
void *spare_int;
+ unsigned long flags;
if (!adev->gfx.rlc.rlcg_reg_access_supported) {
dev_err(adev->dev,
@@ -1031,7 +1081,7 @@ u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 f
scratch_reg2 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg2;
scratch_reg3 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg3;
- mutex_lock(&adev->virt.rlcg_reg_lock);
+ spin_lock_irqsave(&adev->virt.rlcg_reg_lock, flags);
if (reg_access_ctrl->spare_int)
spare_int = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->spare_int;
@@ -1090,7 +1140,7 @@ u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 f
ret = readl(scratch_reg0);
- mutex_unlock(&adev->virt.rlcg_reg_lock);
+ spin_unlock_irqrestore(&adev->virt.rlcg_reg_lock, flags);
return ret;
}
@@ -1144,3 +1194,365 @@ bool amdgpu_sriov_xnack_support(struct amdgpu_device *adev)
return xnack_mode;
}
+
+bool amdgpu_virt_get_ras_capability(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+
+ if (!amdgpu_sriov_ras_caps_en(adev))
+ return false;
+
+ if (adev->virt.ras_en_caps.bits.block_umc)
+ adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__UMC);
+ if (adev->virt.ras_en_caps.bits.block_sdma)
+ adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__SDMA);
+ if (adev->virt.ras_en_caps.bits.block_gfx)
+ adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__GFX);
+ if (adev->virt.ras_en_caps.bits.block_mmhub)
+ adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__MMHUB);
+ if (adev->virt.ras_en_caps.bits.block_athub)
+ adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__ATHUB);
+ if (adev->virt.ras_en_caps.bits.block_pcie_bif)
+ adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__PCIE_BIF);
+ if (adev->virt.ras_en_caps.bits.block_hdp)
+ adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__HDP);
+ if (adev->virt.ras_en_caps.bits.block_xgmi_wafl)
+ adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__XGMI_WAFL);
+ if (adev->virt.ras_en_caps.bits.block_df)
+ adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__DF);
+ if (adev->virt.ras_en_caps.bits.block_smn)
+ adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__SMN);
+ if (adev->virt.ras_en_caps.bits.block_sem)
+ adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__SEM);
+ if (adev->virt.ras_en_caps.bits.block_mp0)
+ adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__MP0);
+ if (adev->virt.ras_en_caps.bits.block_mp1)
+ adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__MP1);
+ if (adev->virt.ras_en_caps.bits.block_fuse)
+ adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__FUSE);
+ if (adev->virt.ras_en_caps.bits.block_mca)
+ adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__MCA);
+ if (adev->virt.ras_en_caps.bits.block_vcn)
+ adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__VCN);
+ if (adev->virt.ras_en_caps.bits.block_jpeg)
+ adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__JPEG);
+ if (adev->virt.ras_en_caps.bits.block_ih)
+ adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__IH);
+ if (adev->virt.ras_en_caps.bits.block_mpio)
+ adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__MPIO);
+
+ if (adev->virt.ras_en_caps.bits.poison_propogation_mode)
+ con->poison_supported = true; /* Poison is handled by host */
+
+ return true;
+}
+
+static inline enum amd_sriov_ras_telemetry_gpu_block
+amdgpu_ras_block_to_sriov(struct amdgpu_device *adev, enum amdgpu_ras_block block) {
+ switch (block) {
+ case AMDGPU_RAS_BLOCK__UMC:
+ return RAS_TELEMETRY_GPU_BLOCK_UMC;
+ case AMDGPU_RAS_BLOCK__SDMA:
+ return RAS_TELEMETRY_GPU_BLOCK_SDMA;
+ case AMDGPU_RAS_BLOCK__GFX:
+ return RAS_TELEMETRY_GPU_BLOCK_GFX;
+ case AMDGPU_RAS_BLOCK__MMHUB:
+ return RAS_TELEMETRY_GPU_BLOCK_MMHUB;
+ case AMDGPU_RAS_BLOCK__ATHUB:
+ return RAS_TELEMETRY_GPU_BLOCK_ATHUB;
+ case AMDGPU_RAS_BLOCK__PCIE_BIF:
+ return RAS_TELEMETRY_GPU_BLOCK_PCIE_BIF;
+ case AMDGPU_RAS_BLOCK__HDP:
+ return RAS_TELEMETRY_GPU_BLOCK_HDP;
+ case AMDGPU_RAS_BLOCK__XGMI_WAFL:
+ return RAS_TELEMETRY_GPU_BLOCK_XGMI_WAFL;
+ case AMDGPU_RAS_BLOCK__DF:
+ return RAS_TELEMETRY_GPU_BLOCK_DF;
+ case AMDGPU_RAS_BLOCK__SMN:
+ return RAS_TELEMETRY_GPU_BLOCK_SMN;
+ case AMDGPU_RAS_BLOCK__SEM:
+ return RAS_TELEMETRY_GPU_BLOCK_SEM;
+ case AMDGPU_RAS_BLOCK__MP0:
+ return RAS_TELEMETRY_GPU_BLOCK_MP0;
+ case AMDGPU_RAS_BLOCK__MP1:
+ return RAS_TELEMETRY_GPU_BLOCK_MP1;
+ case AMDGPU_RAS_BLOCK__FUSE:
+ return RAS_TELEMETRY_GPU_BLOCK_FUSE;
+ case AMDGPU_RAS_BLOCK__MCA:
+ return RAS_TELEMETRY_GPU_BLOCK_MCA;
+ case AMDGPU_RAS_BLOCK__VCN:
+ return RAS_TELEMETRY_GPU_BLOCK_VCN;
+ case AMDGPU_RAS_BLOCK__JPEG:
+ return RAS_TELEMETRY_GPU_BLOCK_JPEG;
+ case AMDGPU_RAS_BLOCK__IH:
+ return RAS_TELEMETRY_GPU_BLOCK_IH;
+ case AMDGPU_RAS_BLOCK__MPIO:
+ return RAS_TELEMETRY_GPU_BLOCK_MPIO;
+ default:
+ DRM_WARN_ONCE("Unsupported SRIOV RAS telemetry block 0x%x\n",
+ block);
+ return RAS_TELEMETRY_GPU_BLOCK_COUNT;
+ }
+}
+
+static int amdgpu_virt_cache_host_error_counts(struct amdgpu_device *adev,
+ struct amdsriov_ras_telemetry *host_telemetry)
+{
+ struct amd_sriov_ras_telemetry_error_count *tmp = NULL;
+ uint32_t checksum, used_size;
+
+ checksum = host_telemetry->header.checksum;
+ used_size = host_telemetry->header.used_size;
+
+ if (used_size > (AMD_SRIOV_RAS_TELEMETRY_SIZE_KB << 10))
+ return 0;
+
+ tmp = kmemdup(&host_telemetry->body.error_count, used_size, GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ if (checksum != amd_sriov_msg_checksum(tmp, used_size, 0, 0))
+ goto out;
+
+ memcpy(&adev->virt.count_cache, tmp,
+ min(used_size, sizeof(adev->virt.count_cache)));
+out:
+ kfree(tmp);
+
+ return 0;
+}
+
+static int amdgpu_virt_req_ras_err_count_internal(struct amdgpu_device *adev, bool force_update)
+{
+ struct amdgpu_virt *virt = &adev->virt;
+
+ if (!virt->ops || !virt->ops->req_ras_err_count)
+ return -EOPNOTSUPP;
+
+ /* Host allows 15 ras telemetry requests per 60 seconds. Afterwhich, the Host
+ * will ignore incoming guest messages. Ratelimit the guest messages to
+ * prevent guest self DOS.
+ */
+ if (__ratelimit(&virt->ras.ras_error_cnt_rs) || force_update) {
+ mutex_lock(&virt->ras.ras_telemetry_mutex);
+ if (!virt->ops->req_ras_err_count(adev))
+ amdgpu_virt_cache_host_error_counts(adev,
+ virt->fw_reserve.ras_telemetry);
+ mutex_unlock(&virt->ras.ras_telemetry_mutex);
+ }
+
+ return 0;
+}
+
+/* Bypass ACA interface and query ECC counts directly from host */
+int amdgpu_virt_req_ras_err_count(struct amdgpu_device *adev, enum amdgpu_ras_block block,
+ struct ras_err_data *err_data)
+{
+ enum amd_sriov_ras_telemetry_gpu_block sriov_block;
+
+ sriov_block = amdgpu_ras_block_to_sriov(adev, block);
+
+ if (sriov_block >= RAS_TELEMETRY_GPU_BLOCK_COUNT ||
+ !amdgpu_sriov_ras_telemetry_block_en(adev, sriov_block))
+ return -EOPNOTSUPP;
+
+ /* Host Access may be lost during reset, just return last cached data. */
+ if (down_read_trylock(&adev->reset_domain->sem)) {
+ amdgpu_virt_req_ras_err_count_internal(adev, false);
+ up_read(&adev->reset_domain->sem);
+ }
+
+ err_data->ue_count = adev->virt.count_cache.block[sriov_block].ue_count;
+ err_data->ce_count = adev->virt.count_cache.block[sriov_block].ce_count;
+ err_data->de_count = adev->virt.count_cache.block[sriov_block].de_count;
+
+ return 0;
+}
+
+static int
+amdgpu_virt_write_cpers_to_ring(struct amdgpu_device *adev,
+ struct amdsriov_ras_telemetry *host_telemetry,
+ u32 *more)
+{
+ struct amd_sriov_ras_cper_dump *cper_dump = NULL;
+ struct cper_hdr *entry = NULL;
+ struct amdgpu_ring *ring = &adev->cper.ring_buf;
+ uint32_t checksum, used_size, i;
+ int ret = 0;
+
+ checksum = host_telemetry->header.checksum;
+ used_size = host_telemetry->header.used_size;
+
+ if (used_size > (AMD_SRIOV_RAS_TELEMETRY_SIZE_KB << 10))
+ return -EINVAL;
+
+ cper_dump = kmemdup(&host_telemetry->body.cper_dump, used_size, GFP_KERNEL);
+ if (!cper_dump)
+ return -ENOMEM;
+
+ if (checksum != amd_sriov_msg_checksum(cper_dump, used_size, 0, 0)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ *more = cper_dump->more;
+
+ if (cper_dump->wptr < adev->virt.ras.cper_rptr) {
+ dev_warn(
+ adev->dev,
+ "guest specified rptr that was too high! guest rptr: 0x%llx, host rptr: 0x%llx\n",
+ adev->virt.ras.cper_rptr, cper_dump->wptr);
+
+ adev->virt.ras.cper_rptr = cper_dump->wptr;
+ goto out;
+ }
+
+ entry = (struct cper_hdr *)&cper_dump->buf[0];
+
+ for (i = 0; i < cper_dump->count; i++) {
+ amdgpu_cper_ring_write(ring, entry, entry->record_length);
+ entry = (struct cper_hdr *)((char *)entry +
+ entry->record_length);
+ }
+
+ if (cper_dump->overflow_count)
+ dev_warn(adev->dev,
+ "host reported CPER overflow of 0x%llx entries!\n",
+ cper_dump->overflow_count);
+
+ adev->virt.ras.cper_rptr = cper_dump->wptr;
+out:
+ kfree(cper_dump);
+
+ return ret;
+}
+
+static int amdgpu_virt_req_ras_cper_dump_internal(struct amdgpu_device *adev)
+{
+ struct amdgpu_virt *virt = &adev->virt;
+ int ret = 0;
+ uint32_t more = 0;
+
+ if (!virt->ops || !virt->ops->req_ras_cper_dump)
+ return -EOPNOTSUPP;
+
+ do {
+ if (!virt->ops->req_ras_cper_dump(adev, virt->ras.cper_rptr))
+ ret = amdgpu_virt_write_cpers_to_ring(
+ adev, virt->fw_reserve.ras_telemetry, &more);
+ else
+ ret = 0;
+ } while (more && !ret);
+
+ return ret;
+}
+
+int amdgpu_virt_req_ras_cper_dump(struct amdgpu_device *adev, bool force_update)
+{
+ struct amdgpu_virt *virt = &adev->virt;
+ int ret = 0;
+
+ if (!amdgpu_sriov_ras_cper_en(adev))
+ return -EOPNOTSUPP;
+
+ if ((__ratelimit(&virt->ras.ras_cper_dump_rs) || force_update) &&
+ down_read_trylock(&adev->reset_domain->sem)) {
+ mutex_lock(&virt->ras.ras_telemetry_mutex);
+ ret = amdgpu_virt_req_ras_cper_dump_internal(adev);
+ mutex_unlock(&virt->ras.ras_telemetry_mutex);
+ up_read(&adev->reset_domain->sem);
+ }
+
+ return ret;
+}
+
+int amdgpu_virt_ras_telemetry_post_reset(struct amdgpu_device *adev)
+{
+ unsigned long ue_count, ce_count;
+
+ if (amdgpu_sriov_ras_telemetry_en(adev)) {
+ amdgpu_virt_req_ras_err_count_internal(adev, true);
+ amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, NULL);
+ }
+
+ return 0;
+}
+
+bool amdgpu_virt_ras_telemetry_block_en(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block)
+{
+ enum amd_sriov_ras_telemetry_gpu_block sriov_block;
+
+ sriov_block = amdgpu_ras_block_to_sriov(adev, block);
+
+ if (sriov_block >= RAS_TELEMETRY_GPU_BLOCK_COUNT ||
+ !amdgpu_sriov_ras_telemetry_block_en(adev, sriov_block))
+ return false;
+
+ return true;
+}
+
+/*
+ * amdgpu_virt_request_bad_pages() - request bad pages
+ * @adev: amdgpu device.
+ * Send command to GPU hypervisor to write new bad pages into the shared PF2VF region
+ */
+void amdgpu_virt_request_bad_pages(struct amdgpu_device *adev)
+{
+ struct amdgpu_virt *virt = &adev->virt;
+
+ if (virt->ops && virt->ops->req_bad_pages)
+ virt->ops->req_bad_pages(adev);
+}
+
+static int amdgpu_virt_cache_chk_criti_hit(struct amdgpu_device *adev,
+ struct amdsriov_ras_telemetry *host_telemetry,
+ bool *hit)
+{
+ struct amd_sriov_ras_chk_criti *tmp = NULL;
+ uint32_t checksum, used_size;
+
+ checksum = host_telemetry->header.checksum;
+ used_size = host_telemetry->header.used_size;
+
+ if (used_size > (AMD_SRIOV_RAS_TELEMETRY_SIZE_KB << 10))
+ return 0;
+
+ tmp = kmemdup(&host_telemetry->body.chk_criti, used_size, GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ if (checksum != amd_sriov_msg_checksum(tmp, used_size, 0, 0))
+ goto out;
+
+ if (hit)
+ *hit = tmp->hit ? true : false;
+
+out:
+ kfree(tmp);
+
+ return 0;
+}
+
+int amdgpu_virt_check_vf_critical_region(struct amdgpu_device *adev, u64 addr, bool *hit)
+{
+ struct amdgpu_virt *virt = &adev->virt;
+ int r = -EPERM;
+
+ if (!virt->ops || !virt->ops->req_ras_chk_criti)
+ return -EOPNOTSUPP;
+
+ /* Host allows 15 ras telemetry requests per 60 seconds. Afterwhich, the Host
+ * will ignore incoming guest messages. Ratelimit the guest messages to
+ * prevent guest self DOS.
+ */
+ if (__ratelimit(&virt->ras.ras_chk_criti_rs)) {
+ mutex_lock(&virt->ras.ras_telemetry_mutex);
+ if (!virt->ops->req_ras_chk_criti(adev, addr))
+ r = amdgpu_virt_cache_chk_criti_hit(
+ adev, virt->fw_reserve.ras_telemetry, hit);
+ mutex_unlock(&virt->ras.ras_telemetry_mutex);
+ }
+
+ return r;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index b650a2032c42..d1172c8e58c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -95,6 +95,10 @@ struct amdgpu_virt_ops {
void (*ras_poison_handler)(struct amdgpu_device *adev,
enum amdgpu_ras_block block);
bool (*rcvd_ras_intr)(struct amdgpu_device *adev);
+ int (*req_ras_err_count)(struct amdgpu_device *adev);
+ int (*req_ras_cper_dump)(struct amdgpu_device *adev, u64 vf_rptr);
+ int (*req_bad_pages)(struct amdgpu_device *adev);
+ int (*req_ras_chk_criti)(struct amdgpu_device *adev, u64 addr);
};
/*
@@ -103,6 +107,7 @@ struct amdgpu_virt_ops {
struct amdgpu_virt_fw_reserve {
struct amd_sriov_msg_pf2vf_info_header *p_pf2vf;
struct amd_sriov_msg_vf2pf_info_header *p_vf2pf;
+ void *ras_telemetry;
unsigned int checksum_key;
};
@@ -136,15 +141,22 @@ enum AMDGIM_FEATURE_FLAG {
AMDGIM_FEATURE_VCN_RB_DECOUPLE = (1 << 7),
/* MES info */
AMDGIM_FEATURE_MES_INFO_ENABLE = (1 << 8),
+ AMDGIM_FEATURE_RAS_CAPS = (1 << 9),
+ AMDGIM_FEATURE_RAS_TELEMETRY = (1 << 10),
+ AMDGIM_FEATURE_RAS_CPER = (1 << 11),
};
enum AMDGIM_REG_ACCESS_FLAG {
/* Use PSP to program IH_RB_CNTL */
- AMDGIM_FEATURE_IH_REG_PSP_EN = (1 << 0),
+ AMDGIM_FEATURE_IH_REG_PSP_EN = (1 << 0),
/* Use RLC to program MMHUB regs */
- AMDGIM_FEATURE_MMHUB_REG_RLC_EN = (1 << 1),
+ AMDGIM_FEATURE_MMHUB_REG_RLC_EN = (1 << 1),
/* Use RLC to program GC regs */
- AMDGIM_FEATURE_GC_REG_RLC_EN = (1 << 2),
+ AMDGIM_FEATURE_GC_REG_RLC_EN = (1 << 2),
+ /* Use PSP to program L1_TLB_CNTL */
+ AMDGIM_FEATURE_L1_TLB_CNTL_PSP_EN = (1 << 3),
+ /* Use RLCG to program SQ_CONFIG1 */
+ AMDGIM_FEATURE_REG_ACCESS_SQ_CONFIG = (1 << 4),
};
struct amdgim_pf2vf_info_v1 {
@@ -238,6 +250,18 @@ struct amdgpu_virt_ras_err_handler_data {
int last_reserved;
};
+struct amdgpu_virt_ras {
+ struct ratelimit_state ras_error_cnt_rs;
+ struct ratelimit_state ras_cper_dump_rs;
+ struct ratelimit_state ras_chk_criti_rs;
+ struct mutex ras_telemetry_mutex;
+ uint64_t cper_rptr;
+};
+
+#define AMDGPU_VIRT_CAPS_LIST(X) X(AMDGPU_VIRT_CAP_POWER_LIMIT)
+
+DECLARE_ATTR_CAP_CLASS(amdgpu_virt, AMDGPU_VIRT_CAPS_LIST);
+
/* GPU virtualization */
struct amdgpu_virt {
uint32_t caps;
@@ -247,11 +271,16 @@ struct amdgpu_virt {
uint32_t reg_val_offs;
struct amdgpu_irq_src ack_irq;
struct amdgpu_irq_src rcv_irq;
+
struct work_struct flr_work;
+ struct work_struct req_bad_pages_work;
+ struct work_struct handle_bad_pages_work;
+
struct amdgpu_mm_table mm_table;
const struct amdgpu_virt_ops *ops;
struct amdgpu_vf_error_buffer vf_errors;
struct amdgpu_virt_fw_reserve fw_reserve;
+ struct amdgpu_virt_caps virt_caps;
uint32_t gim_feature;
uint32_t reg_access_mode;
int req_init_data_ver;
@@ -275,7 +304,16 @@ struct amdgpu_virt {
/* the ucode id to signal the autoload */
uint32_t autoload_ucode_id;
- struct mutex rlcg_reg_lock;
+ /* Spinlock to protect access to the RLCG register interface */
+ spinlock_t rlcg_reg_lock;
+
+ union amd_sriov_ras_caps ras_en_caps;
+ union amd_sriov_ras_caps ras_telemetry_en_caps;
+ struct amdgpu_virt_ras ras;
+ struct amd_sriov_ras_telemetry_error_count count_cache;
+
+ /* hibernate and resume with different VF feature for xgmi enabled system */
+ bool is_xgmi_node_migrate_enabled;
};
struct amdgpu_video_codec_info;
@@ -311,15 +349,35 @@ struct amdgpu_video_codec_info;
(amdgpu_sriov_vf((adev)) && \
((adev)->virt.reg_access & (AMDGIM_FEATURE_GC_REG_RLC_EN)))
+#define amdgpu_sriov_reg_indirect_l1_tlb_cntl(adev) \
+(amdgpu_sriov_vf((adev)) && \
+ ((adev)->virt.reg_access & (AMDGIM_FEATURE_L1_TLB_CNTL_PSP_EN)))
+
#define amdgpu_sriov_rlcg_error_report_enabled(adev) \
(amdgpu_sriov_reg_indirect_mmhub(adev) || amdgpu_sriov_reg_indirect_gc(adev))
+#define amdgpu_sriov_reg_access_sq_config(adev) \
+(amdgpu_sriov_vf((adev)) && \
+ ((adev)->virt.reg_access & (AMDGIM_FEATURE_REG_ACCESS_SQ_CONFIG)))
+
#define amdgpu_passthrough(adev) \
((adev)->virt.caps & AMDGPU_PASSTHROUGH_MODE)
#define amdgpu_sriov_vf_mmio_access_protection(adev) \
((adev)->virt.caps & AMDGPU_VF_MMIO_ACCESS_PROTECT)
+#define amdgpu_sriov_ras_caps_en(adev) \
+((adev)->virt.gim_feature & AMDGIM_FEATURE_RAS_CAPS)
+
+#define amdgpu_sriov_ras_telemetry_en(adev) \
+(((adev)->virt.gim_feature & AMDGIM_FEATURE_RAS_TELEMETRY) && (adev)->virt.fw_reserve.ras_telemetry)
+
+#define amdgpu_sriov_ras_telemetry_block_en(adev, sriov_blk) \
+(amdgpu_sriov_ras_telemetry_en((adev)) && (adev)->virt.ras_telemetry_en_caps.all & BIT(sriov_blk))
+
+#define amdgpu_sriov_ras_cper_en(adev) \
+((adev)->virt.gim_feature & AMDGIM_FEATURE_RAS_CPER)
+
static inline bool is_virtual_machine(void)
{
#if defined(CONFIG_X86)
@@ -333,6 +391,8 @@ static inline bool is_virtual_machine(void)
#define amdgpu_sriov_is_pp_one_vf(adev) \
((adev)->virt.gim_feature & AMDGIM_FEATURE_PP_ONE_VF)
+#define amdgpu_sriov_multi_vf_mode(adev) \
+ (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
#define amdgpu_sriov_is_debug(adev) \
((!amdgpu_in_reset(adev)) && adev->virt.tdr_debug)
#define amdgpu_sriov_is_normal(adev) \
@@ -343,6 +403,10 @@ static inline bool is_virtual_machine(void)
((adev)->virt.gim_feature & AMDGIM_FEATURE_VCN_RB_DECOUPLE)
#define amdgpu_sriov_is_mes_info_enable(adev) \
((adev)->virt.gim_feature & AMDGIM_FEATURE_MES_INFO_ENABLE)
+
+#define amdgpu_virt_xgmi_migrate_enabled(adev) \
+ ((adev)->virt.is_xgmi_node_migrate_enabled && (adev)->gmc.xgmi.node_segment_size != 0)
+
bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
void amdgpu_virt_init_setting(struct amdgpu_device *adev);
int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init);
@@ -358,7 +422,7 @@ void amdgpu_virt_release_ras_err_handler_data(struct amdgpu_device *adev);
void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
void amdgpu_virt_exchange_data(struct amdgpu_device *adev);
void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev);
-void amdgpu_detect_virtualization(struct amdgpu_device *adev);
+void amdgpu_virt_init(struct amdgpu_device *adev);
bool amdgpu_virt_can_access_debugfs(struct amdgpu_device *adev);
int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev);
@@ -383,4 +447,13 @@ bool amdgpu_virt_get_rlcg_reg_access_flag(struct amdgpu_device *adev,
u32 acc_flags, u32 hwip,
bool write, u32 *rlcg_flag);
u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag, u32 xcc_id);
+bool amdgpu_virt_get_ras_capability(struct amdgpu_device *adev);
+int amdgpu_virt_req_ras_err_count(struct amdgpu_device *adev, enum amdgpu_ras_block block,
+ struct ras_err_data *err_data);
+int amdgpu_virt_req_ras_cper_dump(struct amdgpu_device *adev, bool force_update);
+int amdgpu_virt_ras_telemetry_post_reset(struct amdgpu_device *adev);
+bool amdgpu_virt_ras_telemetry_block_en(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block);
+void amdgpu_virt_request_bad_pages(struct amdgpu_device *adev);
+int amdgpu_virt_check_vf_critical_region(struct amdgpu_device *adev, u64 addr, bool *hit);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
index d4c2afafbb73..79bad9cbe2ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
@@ -14,7 +14,6 @@
#include "dce_v8_0.h"
#endif
#include "dce_v10_0.h"
-#include "dce_v11_0.h"
#include "ivsrcid/ivsrcid_vislands30.h"
#include "amdgpu_vkms.h"
#include "amdgpu_display.h"
@@ -188,8 +187,8 @@ static int amdgpu_vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
amdgpu_crtc->connector = NULL;
amdgpu_crtc->vsync_timer_enabled = AMDGPU_IRQ_STATE_DISABLE;
- hrtimer_init(&amdgpu_crtc->vblank_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- amdgpu_crtc->vblank_timer.function = &amdgpu_vkms_vblank_simulate;
+ hrtimer_setup(&amdgpu_crtc->vblank_timer, &amdgpu_vkms_vblank_simulate, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
return ret;
}
@@ -493,10 +492,10 @@ const struct drm_mode_config_funcs amdgpu_vkms_mode_funcs = {
.atomic_commit = drm_atomic_helper_commit,
};
-static int amdgpu_vkms_sw_init(void *handle)
+static int amdgpu_vkms_sw_init(struct amdgpu_ip_block *ip_block)
{
int r, i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->amdgpu_vkms_output = kcalloc(adev->mode_info.num_crtc,
sizeof(struct amdgpu_vkms_output), GFP_KERNEL);
@@ -536,9 +535,9 @@ static int amdgpu_vkms_sw_init(void *handle)
return 0;
}
-static int amdgpu_vkms_sw_fini(void *handle)
+static int amdgpu_vkms_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i = 0;
for (i = 0; i < adev->mode_info.num_crtc; i++)
@@ -555,9 +554,9 @@ static int amdgpu_vkms_sw_fini(void *handle)
return 0;
}
-static int amdgpu_vkms_hw_init(void *handle)
+static int amdgpu_vkms_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
switch (adev->asic_type) {
#ifdef CONFIG_DRM_AMDGPU_SI
@@ -581,13 +580,6 @@ static int amdgpu_vkms_hw_init(void *handle)
case CHIP_TONGA:
dce_v10_0_disable_dce(adev);
break;
- case CHIP_CARRIZO:
- case CHIP_STONEY:
- case CHIP_POLARIS10:
- case CHIP_POLARIS11:
- case CHIP_VEGAM:
- dce_v11_0_disable_dce(adev);
- break;
case CHIP_TOPAZ:
#ifdef CONFIG_DRM_AMDGPU_SI
case CHIP_HAINAN:
@@ -600,55 +592,45 @@ static int amdgpu_vkms_hw_init(void *handle)
return 0;
}
-static int amdgpu_vkms_hw_fini(void *handle)
+static int amdgpu_vkms_hw_fini(struct amdgpu_ip_block *ip_block)
{
return 0;
}
-static int amdgpu_vkms_suspend(void *handle)
+static int amdgpu_vkms_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
r = drm_mode_config_helper_suspend(adev_to_drm(adev));
if (r)
return r;
- return amdgpu_vkms_hw_fini(handle);
+
+ return 0;
}
-static int amdgpu_vkms_resume(void *handle)
+static int amdgpu_vkms_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
- r = amdgpu_vkms_hw_init(handle);
+ r = amdgpu_vkms_hw_init(ip_block);
if (r)
return r;
- return drm_mode_config_helper_resume(adev_to_drm(adev));
+ return drm_mode_config_helper_resume(adev_to_drm(ip_block->adev));
}
-static bool amdgpu_vkms_is_idle(void *handle)
+static bool amdgpu_vkms_is_idle(struct amdgpu_ip_block *ip_block)
{
return true;
}
-static int amdgpu_vkms_wait_for_idle(void *handle)
-{
- return 0;
-}
-
-static int amdgpu_vkms_soft_reset(void *handle)
-{
- return 0;
-}
-
-static int amdgpu_vkms_set_clockgating_state(void *handle,
+static int amdgpu_vkms_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int amdgpu_vkms_set_powergating_state(void *handle,
+static int amdgpu_vkms_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
@@ -656,8 +638,6 @@ static int amdgpu_vkms_set_powergating_state(void *handle,
static const struct amd_ip_funcs amdgpu_vkms_ip_funcs = {
.name = "amdgpu_vkms",
- .early_init = NULL,
- .late_init = NULL,
.sw_init = amdgpu_vkms_sw_init,
.sw_fini = amdgpu_vkms_sw_fini,
.hw_init = amdgpu_vkms_hw_init,
@@ -665,12 +645,8 @@ static const struct amd_ip_funcs amdgpu_vkms_ip_funcs = {
.suspend = amdgpu_vkms_suspend,
.resume = amdgpu_vkms_resume,
.is_idle = amdgpu_vkms_is_idle,
- .wait_for_idle = amdgpu_vkms_wait_for_idle,
- .soft_reset = amdgpu_vkms_soft_reset,
.set_clockgating_state = amdgpu_vkms_set_clockgating_state,
.set_powergating_state = amdgpu_vkms_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
const struct amdgpu_ip_block_version amdgpu_vkms_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 6005280f5f38..c1a801203949 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -36,6 +36,7 @@
#include <drm/ttm/ttm_tt.h>
#include <drm/drm_exec.h>
#include "amdgpu.h"
+#include "amdgpu_vm.h"
#include "amdgpu_trace.h"
#include "amdgpu_amdkfd.h"
#include "amdgpu_gmc.h"
@@ -127,43 +128,14 @@ struct amdgpu_vm_tlb_seq_struct {
};
/**
- * amdgpu_vm_set_pasid - manage pasid and vm ptr mapping
- *
- * @adev: amdgpu_device pointer
- * @vm: amdgpu_vm pointer
- * @pasid: the pasid the VM is using on this GPU
- *
- * Set the pasid this VM is using on this GPU, can also be used to remove the
- * pasid by passing in zero.
+ * amdgpu_vm_assert_locked - check if VM is correctly locked
+ * @vm: the VM which schould be tested
*
+ * Asserts that the VM root PD is locked.
*/
-int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- u32 pasid)
+static void amdgpu_vm_assert_locked(struct amdgpu_vm *vm)
{
- int r;
-
- if (vm->pasid == pasid)
- return 0;
-
- if (vm->pasid) {
- r = xa_err(xa_erase_irq(&adev->vm_manager.pasids, vm->pasid));
- if (r < 0)
- return r;
-
- vm->pasid = 0;
- }
-
- if (pasid) {
- r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm,
- GFP_KERNEL));
- if (r < 0)
- return r;
-
- vm->pasid = pasid;
- }
-
-
- return 0;
+ dma_resv_assert_held(vm->root.bo->tbo.base.resv);
}
/**
@@ -180,6 +152,7 @@ static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
struct amdgpu_bo *bo = vm_bo->bo;
vm_bo->moved = true;
+ amdgpu_vm_assert_locked(vm);
spin_lock(&vm_bo->vm->status_lock);
if (bo->tbo.type == ttm_bo_type_kernel)
list_move(&vm_bo->vm_status, &vm->evicted);
@@ -197,6 +170,7 @@ static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
*/
static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
{
+ amdgpu_vm_assert_locked(vm_bo->vm);
spin_lock(&vm_bo->vm->status_lock);
list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
spin_unlock(&vm_bo->vm->status_lock);
@@ -212,6 +186,7 @@ static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
*/
static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)
{
+ amdgpu_vm_assert_locked(vm_bo->vm);
spin_lock(&vm_bo->vm->status_lock);
list_move(&vm_bo->vm_status, &vm_bo->vm->idle);
spin_unlock(&vm_bo->vm->status_lock);
@@ -259,6 +234,7 @@ static void amdgpu_vm_bo_evicted_user(struct amdgpu_vm_bo_base *vm_bo)
*/
static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
{
+ amdgpu_vm_assert_locked(vm_bo->vm);
if (vm_bo->bo->parent) {
spin_lock(&vm_bo->vm->status_lock);
list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
@@ -278,6 +254,7 @@ static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
*/
static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo)
{
+ amdgpu_vm_assert_locked(vm_bo->vm);
spin_lock(&vm_bo->vm->status_lock);
list_move(&vm_bo->vm_status, &vm_bo->vm->done);
spin_unlock(&vm_bo->vm->status_lock);
@@ -294,10 +271,13 @@ static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm)
{
struct amdgpu_vm_bo_base *vm_bo, *tmp;
+ amdgpu_vm_assert_locked(vm);
+
spin_lock(&vm->status_lock);
list_splice_init(&vm->done, &vm->invalidated);
list_for_each_entry(vm_bo, &vm->invalidated, vm_status)
vm_bo->moved = true;
+
list_for_each_entry_safe(vm_bo, tmp, &vm->idle, vm_status) {
struct amdgpu_bo *bo = vm_bo->bo;
@@ -311,6 +291,112 @@ static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm)
}
/**
+ * amdgpu_vm_update_shared - helper to update shared memory stat
+ * @base: base structure for tracking BO usage in a VM
+ *
+ * Takes the vm status_lock and updates the shared memory stat. If the basic
+ * stat changed (e.g. buffer was moved) amdgpu_vm_update_stats need to be called
+ * as well.
+ */
+static void amdgpu_vm_update_shared(struct amdgpu_vm_bo_base *base)
+{
+ struct amdgpu_vm *vm = base->vm;
+ struct amdgpu_bo *bo = base->bo;
+ uint64_t size = amdgpu_bo_size(bo);
+ uint32_t bo_memtype = amdgpu_bo_mem_stats_placement(bo);
+ bool shared;
+
+ dma_resv_assert_held(bo->tbo.base.resv);
+ spin_lock(&vm->status_lock);
+ shared = drm_gem_object_is_shared_for_memory_stats(&bo->tbo.base);
+ if (base->shared != shared) {
+ base->shared = shared;
+ if (shared) {
+ vm->stats[bo_memtype].drm.shared += size;
+ vm->stats[bo_memtype].drm.private -= size;
+ } else {
+ vm->stats[bo_memtype].drm.shared -= size;
+ vm->stats[bo_memtype].drm.private += size;
+ }
+ }
+ spin_unlock(&vm->status_lock);
+}
+
+/**
+ * amdgpu_vm_bo_update_shared - callback when bo gets shared/unshared
+ * @bo: amdgpu buffer object
+ *
+ * Update the per VM stats for all the vm if needed from private to shared or
+ * vice versa.
+ */
+void amdgpu_vm_bo_update_shared(struct amdgpu_bo *bo)
+{
+ struct amdgpu_vm_bo_base *base;
+
+ for (base = bo->vm_bo; base; base = base->next)
+ amdgpu_vm_update_shared(base);
+}
+
+/**
+ * amdgpu_vm_update_stats_locked - helper to update normal memory stat
+ * @base: base structure for tracking BO usage in a VM
+ * @res: the ttm_resource to use for the purpose of accounting, may or may not
+ * be bo->tbo.resource
+ * @sign: if we should add (+1) or subtract (-1) from the stat
+ *
+ * Caller need to have the vm status_lock held. Useful for when multiple update
+ * need to happen at the same time.
+ */
+static void amdgpu_vm_update_stats_locked(struct amdgpu_vm_bo_base *base,
+ struct ttm_resource *res, int sign)
+{
+ struct amdgpu_vm *vm = base->vm;
+ struct amdgpu_bo *bo = base->bo;
+ int64_t size = sign * amdgpu_bo_size(bo);
+ uint32_t bo_memtype = amdgpu_bo_mem_stats_placement(bo);
+
+ /* For drm-total- and drm-shared-, BO are accounted by their preferred
+ * placement, see also amdgpu_bo_mem_stats_placement.
+ */
+ if (base->shared)
+ vm->stats[bo_memtype].drm.shared += size;
+ else
+ vm->stats[bo_memtype].drm.private += size;
+
+ if (res && res->mem_type < __AMDGPU_PL_NUM) {
+ uint32_t res_memtype = res->mem_type;
+
+ vm->stats[res_memtype].drm.resident += size;
+ /* BO only count as purgeable if it is resident,
+ * since otherwise there's nothing to purge.
+ */
+ if (bo->flags & AMDGPU_GEM_CREATE_DISCARDABLE)
+ vm->stats[res_memtype].drm.purgeable += size;
+ if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(res_memtype)))
+ vm->stats[bo_memtype].evicted += size;
+ }
+}
+
+/**
+ * amdgpu_vm_update_stats - helper to update normal memory stat
+ * @base: base structure for tracking BO usage in a VM
+ * @res: the ttm_resource to use for the purpose of accounting, may or may not
+ * be bo->tbo.resource
+ * @sign: if we should add (+1) or subtract (-1) from the stat
+ *
+ * Updates the basic memory stat when bo is added/deleted/moved.
+ */
+void amdgpu_vm_update_stats(struct amdgpu_vm_bo_base *base,
+ struct ttm_resource *res, int sign)
+{
+ struct amdgpu_vm *vm = base->vm;
+
+ spin_lock(&vm->status_lock);
+ amdgpu_vm_update_stats_locked(base, res, sign);
+ spin_unlock(&vm->status_lock);
+}
+
+/**
* amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
*
* @base: base structure for tracking BO usage in a VM
@@ -333,6 +419,11 @@ void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
base->next = bo->vm_bo;
bo->vm_bo = base;
+ spin_lock(&vm->status_lock);
+ base->shared = drm_gem_object_is_shared_for_memory_stats(&bo->tbo.base);
+ amdgpu_vm_update_stats_locked(base, bo->tbo.resource, +1);
+ spin_unlock(&vm->status_lock);
+
if (!amdgpu_vm_is_bo_always_valid(vm, bo))
return;
@@ -374,6 +465,42 @@ int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec,
}
/**
+ * amdgpu_vm_lock_done_list - lock all BOs on the done list
+ * @vm: vm providing the BOs
+ * @exec: drm execution context
+ * @num_fences: number of extra fences to reserve
+ *
+ * Lock the BOs on the done list in the DRM execution context.
+ */
+int amdgpu_vm_lock_done_list(struct amdgpu_vm *vm, struct drm_exec *exec,
+ unsigned int num_fences)
+{
+ struct list_head *prev = &vm->done;
+ struct amdgpu_bo_va *bo_va;
+ struct amdgpu_bo *bo;
+ int ret;
+
+ /* We can only trust prev->next while holding the lock */
+ spin_lock(&vm->status_lock);
+ while (!list_is_head(prev->next, &vm->done)) {
+ bo_va = list_entry(prev->next, typeof(*bo_va), base.vm_status);
+ spin_unlock(&vm->status_lock);
+
+ bo = bo_va->base.bo;
+ if (bo) {
+ ret = drm_exec_prepare_obj(exec, &bo->tbo.base, 1);
+ if (unlikely(ret))
+ return ret;
+ }
+ spin_lock(&vm->status_lock);
+ prev = prev->next;
+ }
+ spin_unlock(&vm->status_lock);
+
+ return 0;
+}
+
+/**
* amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU
*
* @adev: amdgpu device pointer
@@ -505,18 +632,7 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
spin_unlock(&vm->status_lock);
bo = bo_base->bo;
-
- if (dma_resv_locking_ctx(bo->tbo.base.resv) != ticket) {
- struct amdgpu_task_info *ti = amdgpu_vm_get_task_info_vm(vm);
-
- pr_warn_ratelimited("Evicted user BO is not reserved\n");
- if (ti) {
- pr_warn_ratelimited("pid %d\n", ti->pid);
- amdgpu_vm_put_task_info(ti);
- }
-
- return -EINVAL;
- }
+ dma_resv_assert_held(bo->tbo.base.resv);
r = validate(param, bo);
if (r)
@@ -543,22 +659,31 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
* Check if all VM PDs/PTs are ready for updates
*
* Returns:
- * True if VM is not evicting.
+ * True if VM is not evicting and all VM entities are not stopped
*/
bool amdgpu_vm_ready(struct amdgpu_vm *vm)
{
- bool empty;
bool ret;
+ amdgpu_vm_assert_locked(vm);
+
amdgpu_vm_eviction_lock(vm);
ret = !vm->evicting;
amdgpu_vm_eviction_unlock(vm);
spin_lock(&vm->status_lock);
- empty = list_empty(&vm->evicted);
+ ret &= list_empty(&vm->evicted);
spin_unlock(&vm->status_lock);
- return ret && empty;
+ spin_lock(&vm->immediate.lock);
+ ret &= !vm->immediate.stopped;
+ spin_unlock(&vm->immediate.lock);
+
+ spin_lock(&vm->delayed.lock);
+ ret &= !vm->delayed.stopped;
+ spin_unlock(&vm->delayed.lock);
+
+ return ret;
}
/**
@@ -643,6 +768,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
bool need_pipe_sync)
{
struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_isolation *isolation = &adev->isolation[ring->xcp_id];
unsigned vmhub = ring->vm_hub;
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
@@ -650,8 +776,10 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
bool gds_switch_needed = ring->funcs->emit_gds_switch &&
job->gds_switch_needed;
bool vm_flush_needed = job->vm_needs_flush;
- struct dma_fence *fence = NULL;
+ bool cleaner_shader_needed = false;
bool pasid_mapping_needed = false;
+ struct dma_fence *fence = NULL;
+ struct amdgpu_fence *af;
unsigned int patch;
int r;
@@ -674,12 +802,13 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
ring->funcs->emit_wreg;
- if (adev->gfx.enable_cleaner_shader &&
- ring->funcs->emit_cleaner_shader &&
- job->enforce_isolation)
- ring->funcs->emit_cleaner_shader(ring);
+ cleaner_shader_needed = job->run_cleaner_shader &&
+ adev->gfx.enable_cleaner_shader &&
+ ring->funcs->emit_cleaner_shader && job->base.s_fence &&
+ &job->base.s_fence->scheduled == isolation->spearhead;
- if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync)
+ if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync &&
+ !cleaner_shader_needed)
return 0;
amdgpu_ring_ib_begin(ring);
@@ -690,6 +819,9 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
if (need_pipe_sync)
amdgpu_ring_emit_pipeline_sync(ring);
+ if (cleaner_shader_needed)
+ ring->funcs->emit_cleaner_shader(ring);
+
if (vm_flush_needed) {
trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
@@ -701,7 +833,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
if (spm_update_needed && adev->gfx.rlc.funcs->update_spm_vmid)
adev->gfx.rlc.funcs->update_spm_vmid(adev, ring, job->vmid);
- if (!ring->is_mes_queue && ring->funcs->emit_gds_switch &&
+ if (ring->funcs->emit_gds_switch &&
gds_switch_needed) {
amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
job->gds_size, job->gws_base,
@@ -709,10 +841,13 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
job->oa_size);
}
- if (vm_flush_needed || pasid_mapping_needed) {
+ if (vm_flush_needed || pasid_mapping_needed || cleaner_shader_needed) {
r = amdgpu_fence_emit(ring, &fence, NULL, 0);
if (r)
return r;
+ /* this is part of the job's context */
+ af = container_of(fence, struct amdgpu_fence, base);
+ af->context = job->base.s_fence ? job->base.s_fence->finished.context : 0;
}
if (vm_flush_needed) {
@@ -731,6 +866,18 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
id->pasid_mapping = dma_fence_get(fence);
mutex_unlock(&id_mgr->lock);
}
+
+ /*
+ * Make sure that all other submissions wait for the cleaner shader to
+ * finish before we push them to the HW.
+ */
+ if (cleaner_shader_needed) {
+ trace_amdgpu_cleaner_shader(ring, fence);
+ mutex_lock(&adev->enforce_isolation_mutex);
+ dma_fence_put(isolation->spearhead);
+ isolation->spearhead = dma_fence_get(fence);
+ mutex_unlock(&adev->enforce_isolation_mutex);
+ }
dma_fence_put(fence);
amdgpu_ring_patch_cond_exec(ring, patch);
@@ -822,6 +969,8 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
LIST_HEAD(relocated);
int r, idx;
+ amdgpu_vm_assert_locked(vm);
+
spin_lock(&vm->status_lock);
list_splice_init(&vm->relocated, &relocated);
spin_unlock(&vm->status_lock);
@@ -837,7 +986,8 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
params.vm = vm;
params.immediate = immediate;
- r = vm->update_funcs->prepare(&params, NULL);
+ r = vm->update_funcs->prepare(&params, NULL,
+ AMDGPU_KERNEL_JOB_ID_VM_UPDATE_PDES);
if (r)
goto error;
@@ -1006,7 +1156,8 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
dma_fence_put(tmp);
}
- r = vm->update_funcs->prepare(&params, sync);
+ r = vm->update_funcs->prepare(&params, sync,
+ AMDGPU_KERNEL_JOB_ID_VM_UPDATE_RANGE);
if (r)
goto error_free;
@@ -1082,51 +1233,11 @@ error_free:
return r;
}
-static void amdgpu_vm_bo_get_memory(struct amdgpu_bo_va *bo_va,
- struct amdgpu_mem_stats *stats)
-{
- struct amdgpu_vm *vm = bo_va->base.vm;
- struct amdgpu_bo *bo = bo_va->base.bo;
-
- if (!bo)
- return;
-
- /*
- * For now ignore BOs which are currently locked and potentially
- * changing their location.
- */
- if (!amdgpu_vm_is_bo_always_valid(vm, bo) &&
- !dma_resv_trylock(bo->tbo.base.resv))
- return;
-
- amdgpu_bo_get_memory(bo, stats);
- if (!amdgpu_vm_is_bo_always_valid(vm, bo))
- dma_resv_unlock(bo->tbo.base.resv);
-}
-
void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
- struct amdgpu_mem_stats *stats)
+ struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM])
{
- struct amdgpu_bo_va *bo_va, *tmp;
-
spin_lock(&vm->status_lock);
- list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status)
- amdgpu_vm_bo_get_memory(bo_va, stats);
-
- list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status)
- amdgpu_vm_bo_get_memory(bo_va, stats);
-
- list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status)
- amdgpu_vm_bo_get_memory(bo_va, stats);
-
- list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status)
- amdgpu_vm_bo_get_memory(bo_va, stats);
-
- list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status)
- amdgpu_vm_bo_get_memory(bo_va, stats);
-
- list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status)
- amdgpu_vm_bo_get_memory(bo_va, stats);
+ memcpy(stats, vm->stats, sizeof(*stats) * __AMDGPU_PL_NUM);
spin_unlock(&vm->status_lock);
}
@@ -1159,7 +1270,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
int r;
amdgpu_sync_create(&sync);
- if (clear || !bo) {
+ if (clear) {
mem = NULL;
/* Implicitly sync to command submissions in the same VM before
@@ -1174,11 +1285,15 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
if (r)
goto error_free;
}
+ } else if (!bo) {
+ mem = NULL;
+
+ /* PRT map operations don't need to sync to anything. */
} else {
struct drm_gem_object *obj = &bo->tbo.base;
- if (obj->import_attach && bo_va->is_xgmi) {
+ if (drm_gem_is_imported(obj) && bo_va->is_xgmi) {
struct dma_buf *dma_buf = obj->import_attach->dmabuf;
struct drm_gem_object *gobj = dma_buf->priv;
struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
@@ -1235,13 +1350,14 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
* but in case of something, we filter the flags in first place
*/
- if (!(mapping->flags & AMDGPU_PTE_READABLE))
+ if (!(mapping->flags & AMDGPU_VM_PAGE_READABLE))
update_flags &= ~AMDGPU_PTE_READABLE;
- if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
+ if (!(mapping->flags & AMDGPU_VM_PAGE_WRITEABLE))
update_flags &= ~AMDGPU_PTE_WRITEABLE;
/* Apply ASIC specific mapping flags */
- amdgpu_gmc_get_vm_pte(adev, mapping, &update_flags);
+ amdgpu_gmc_get_vm_pte(adev, vm, bo, mapping->flags,
+ &update_flags);
trace_amdgpu_vm_bo_update(mapping);
@@ -1259,10 +1375,9 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
* next command submission.
*/
if (amdgpu_vm_is_bo_always_valid(vm, bo)) {
- uint32_t mem_type = bo->tbo.resource->mem_type;
-
- if (!(bo->preferred_domains &
- amdgpu_mem_type_to_domain(mem_type)))
+ if (bo->tbo.resource &&
+ !(bo->preferred_domains &
+ amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type)))
amdgpu_vm_bo_evicted(&bo_va->base);
else
amdgpu_vm_bo_idle(&bo_va->base);
@@ -1383,7 +1498,7 @@ static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *mapping,
struct dma_fence *fence)
{
- if (mapping->flags & AMDGPU_PTE_PRT_FLAG(adev))
+ if (mapping->flags & AMDGPU_VM_PAGE_PRT)
amdgpu_vm_add_prt_cb(adev, fence);
kfree(mapping);
}
@@ -1539,7 +1654,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
* validation
*/
if (vm->is_compute_context &&
- bo_va->base.bo->tbo.base.import_attach &&
+ drm_gem_is_imported(&bo_va->base.bo->tbo.base) &&
(!bo_va->base.bo->tbo.resource ||
bo_va->base.bo->tbo.resource->mem_type == TTM_PL_SYSTEM))
amdgpu_vm_bo_evicted_user(&bo_va->base);
@@ -1662,7 +1777,7 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
list_add(&mapping->list, &bo_va->invalids);
amdgpu_vm_it_insert(mapping, &vm->va);
- if (mapping->flags & AMDGPU_PTE_PRT_FLAG(adev))
+ if (mapping->flags & AMDGPU_VM_PAGE_PRT)
amdgpu_vm_prt_get(adev);
if (amdgpu_vm_is_bo_always_valid(vm, bo) && !bo_va->base.moved)
@@ -1722,7 +1837,7 @@ static int amdgpu_vm_verify_parameters(struct amdgpu_device *adev,
int amdgpu_vm_bo_map(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
uint64_t saddr, uint64_t offset,
- uint64_t size, uint64_t flags)
+ uint64_t size, uint32_t flags)
{
struct amdgpu_bo_va_mapping *mapping, *tmp;
struct amdgpu_bo *bo = bo_va->base.bo;
@@ -1781,7 +1896,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
uint64_t saddr, uint64_t offset,
- uint64_t size, uint64_t flags)
+ uint64_t size, uint32_t flags)
{
struct amdgpu_bo_va_mapping *mapping;
struct amdgpu_bo *bo = bo_va->base.bo;
@@ -2069,6 +2184,7 @@ void amdgpu_vm_bo_del(struct amdgpu_device *adev,
if (*base != &bo_va->base)
continue;
+ amdgpu_vm_update_stats(*base, bo->tbo.resource, -1);
*base = bo_va->base.next;
break;
}
@@ -2137,14 +2253,12 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
/**
* amdgpu_vm_bo_invalidate - mark the bo as invalid
*
- * @adev: amdgpu_device pointer
* @bo: amdgpu buffer object
* @evicted: is the BO evicted
*
* Mark @bo as invalid.
*/
-void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
- struct amdgpu_bo *bo, bool evicted)
+void amdgpu_vm_bo_invalidate(struct amdgpu_bo *bo, bool evicted)
{
struct amdgpu_vm_bo_base *bo_base;
@@ -2170,6 +2284,32 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
}
/**
+ * amdgpu_vm_bo_move - handle BO move
+ *
+ * @bo: amdgpu buffer object
+ * @new_mem: the new placement of the BO move
+ * @evicted: is the BO evicted
+ *
+ * Update the memory stats for the new placement and mark @bo as invalid.
+ */
+void amdgpu_vm_bo_move(struct amdgpu_bo *bo, struct ttm_resource *new_mem,
+ bool evicted)
+{
+ struct amdgpu_vm_bo_base *bo_base;
+
+ for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
+ struct amdgpu_vm *vm = bo_base->vm;
+
+ spin_lock(&vm->status_lock);
+ amdgpu_vm_update_stats_locked(bo_base, bo->tbo.resource, -1);
+ amdgpu_vm_update_stats_locked(bo_base, new_mem, +1);
+ spin_unlock(&vm->status_lock);
+ }
+
+ amdgpu_vm_bo_invalidate(bo, evicted);
+}
+
+/**
* amdgpu_vm_get_block_size - calculate VM page table size as power of two
*
* @vm_size: VM size
@@ -2278,10 +2418,11 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
else
adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
- DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
- vm_size, adev->vm_manager.num_level + 1,
- adev->vm_manager.block_size,
- adev->vm_manager.fragment_size);
+ dev_info(
+ adev->dev,
+ "vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
+ vm_size, adev->vm_manager.num_level + 1,
+ adev->vm_manager.block_size, adev->vm_manager.fragment_size);
}
/**
@@ -2292,13 +2433,11 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
*/
long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
{
- timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv,
- DMA_RESV_USAGE_BOOKKEEP,
- true, timeout);
+ timeout = drm_sched_entity_flush(&vm->immediate, timeout);
if (timeout <= 0)
return timeout;
- return dma_fence_wait_timeout(vm->last_unlocked, true, timeout);
+ return drm_sched_entity_flush(&vm->delayed, timeout);
}
static void amdgpu_vm_destroy_task_info(struct kref *kref)
@@ -2330,7 +2469,8 @@ amdgpu_vm_get_vm_from_pasid(struct amdgpu_device *adev, u32 pasid)
*/
void amdgpu_vm_put_task_info(struct amdgpu_task_info *task_info)
{
- kref_put(&task_info->refcount, amdgpu_vm_destroy_task_info);
+ if (task_info)
+ kref_put(&task_info->refcount, amdgpu_vm_destroy_task_info);
}
/**
@@ -2390,11 +2530,11 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
if (!vm->task_info)
return;
- if (vm->task_info->pid == current->pid)
+ if (vm->task_info->task.pid == current->pid)
return;
- vm->task_info->pid = current->pid;
- get_task_comm(vm->task_info->task_name, current);
+ vm->task_info->task.pid = current->pid;
+ get_task_comm(vm->task_info->task.comm, current);
if (current->group_leader->mm != current->mm)
return;
@@ -2409,6 +2549,7 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
* @adev: amdgpu_device pointer
* @vm: requested vm
* @xcp_id: GPU partition selection id
+ * @pasid: the pasid the VM is using on this GPU
*
* Init @vm fields.
*
@@ -2416,7 +2557,7 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
* 0 for success, error for failure.
*/
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- int32_t xcp_id)
+ int32_t xcp_id, uint32_t pasid)
{
struct amdgpu_bo *root_bo;
struct amdgpu_bo_vm *root;
@@ -2434,8 +2575,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
spin_lock_init(&vm->status_lock);
INIT_LIST_HEAD(&vm->freed);
INIT_LIST_HEAD(&vm->done);
- INIT_LIST_HEAD(&vm->pt_freed);
- INIT_WORK(&vm->pt_free_work, amdgpu_vm_pt_free_work);
INIT_KFIFO(vm->faults);
r = amdgpu_vm_init_entities(adev, vm);
@@ -2449,8 +2588,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
AMDGPU_VM_USE_CPU_FOR_GFX);
- DRM_DEBUG_DRIVER("VM update mode is %s\n",
- vm->use_cpu_for_update ? "CPU" : "SDMA");
+ dev_dbg(adev->dev, "VM update mode is %s\n",
+ vm->use_cpu_for_update ? "CPU" : "SDMA");
WARN_ONCE((vm->use_cpu_for_update &&
!amdgpu_gmc_vram_full_visible(&adev->gmc)),
"CPU update of VM recommended only for large BAR system\n");
@@ -2492,7 +2631,16 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
r = amdgpu_vm_create_task_info(vm);
if (r)
- DRM_DEBUG("Failed to create task info for VM\n");
+ dev_dbg(adev->dev, "Failed to create task info for VM\n");
+
+ /* Store new PASID in XArray (if non-zero) */
+ if (pasid != 0) {
+ r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm, GFP_KERNEL));
+ if (r < 0)
+ goto error_free_root;
+
+ vm->pasid = pasid;
+ }
amdgpu_bo_unreserve(vm->root.bo);
amdgpu_bo_unref(&root_bo);
@@ -2500,6 +2648,11 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
return 0;
error_free_root:
+ /* If PASID was partially set, erase it from XArray before failing */
+ if (vm->pasid != 0) {
+ xa_erase_irq(&adev->vm_manager.pasids, vm->pasid);
+ vm->pasid = 0;
+ }
amdgpu_vm_pt_free_root(adev, vm);
amdgpu_bo_unreserve(vm->root.bo);
amdgpu_bo_unref(&root_bo);
@@ -2543,8 +2696,8 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
/* Update VM state */
vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
AMDGPU_VM_USE_CPU_FOR_COMPUTE);
- DRM_DEBUG_DRIVER("VM update mode is %s\n",
- vm->use_cpu_for_update ? "CPU" : "SDMA");
+ dev_dbg(adev->dev, "VM update mode is %s\n",
+ vm->use_cpu_for_update ? "CPU" : "SDMA");
WARN_ONCE((vm->use_cpu_for_update &&
!amdgpu_gmc_vram_full_visible(&adev->gmc)),
"CPU update of VM recommended only for large BAR system\n");
@@ -2574,18 +2727,14 @@ unreserve_bo:
return r;
}
-/**
- * amdgpu_vm_release_compute - release a compute vm
- * @adev: amdgpu_device pointer
- * @vm: a vm turned into compute vm by calling amdgpu_vm_make_compute
- *
- * This is a correspondant of amdgpu_vm_make_compute. It decouples compute
- * pasid from vm. Compute should stop use of vm after this call.
- */
-void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
+static int amdgpu_vm_stats_is_zero(struct amdgpu_vm *vm)
{
- amdgpu_vm_set_pasid(adev, vm, 0);
- vm->is_compute_context = false;
+ for (int i = 0; i < __AMDGPU_PL_NUM; ++i) {
+ if (!(drm_memory_stats_is_zero(&vm->stats[i].drm) &&
+ vm->stats[i].evicted == 0))
+ return false;
+ }
+ return true;
}
/**
@@ -2607,12 +2756,13 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
- flush_work(&vm->pt_free_work);
-
root = amdgpu_bo_ref(vm->root.bo);
amdgpu_bo_reserve(root, true);
- amdgpu_vm_put_task_info(vm->task_info);
- amdgpu_vm_set_pasid(adev, vm, 0);
+ /* Remove PASID mapping before destroying VM */
+ if (vm->pasid != 0) {
+ xa_erase_irq(&adev->vm_manager.pasids, vm->pasid);
+ vm->pasid = 0;
+ }
dma_fence_wait(vm->last_unlocked, false);
dma_fence_put(vm->last_unlocked);
dma_fence_wait(vm->last_tlb_flush, false);
@@ -2622,7 +2772,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
dma_fence_put(vm->last_tlb_flush);
list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
- if (mapping->flags & AMDGPU_PTE_PRT_FLAG(adev) && prt_fini_needed) {
+ if (mapping->flags & AMDGPU_VM_PAGE_PRT && prt_fini_needed) {
amdgpu_vm_prt_fini(adev, vm);
prt_fini_needed = false;
}
@@ -2653,13 +2803,20 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
dma_fence_put(vm->last_update);
for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) {
- if (vm->reserved_vmid[i]) {
- amdgpu_vmid_free_reserved(adev, i);
- vm->reserved_vmid[i] = false;
- }
+ amdgpu_vmid_free_reserved(adev, vm, i);
}
ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move);
+
+ if (!amdgpu_vm_stats_is_zero(vm)) {
+ struct amdgpu_task_info *ti = vm->task_info;
+
+ dev_warn(adev->dev,
+ "VM memory stats for proc %s(%d) task %s(%d) is non-zero when fini\n",
+ ti->process_name, ti->task.pid, ti->task.comm, ti->tgid);
+ }
+
+ amdgpu_vm_put_task_info(vm->task_info);
}
/**
@@ -2742,6 +2899,7 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
union drm_amdgpu_vm *args = data;
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_fpriv *fpriv = filp->driver_priv;
+ struct amdgpu_vm *vm = &fpriv->vm;
/* No valid flags defined yet */
if (args->in.flags)
@@ -2750,17 +2908,10 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
switch (args->in.op) {
case AMDGPU_VM_OP_RESERVE_VMID:
/* We only have requirement to reserve vmid from gfxhub */
- if (!fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) {
- amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(0));
- fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = true;
- }
-
+ amdgpu_vmid_alloc_reserved(adev, vm, AMDGPU_GFXHUB(0));
break;
case AMDGPU_VM_OP_UNRESERVE_VMID:
- if (fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) {
- amdgpu_vmid_free_reserved(adev, AMDGPU_GFXHUB(0));
- fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = false;
- }
+ amdgpu_vmid_free_reserved(adev, vm, AMDGPU_GFXHUB(0));
break;
default:
return -EINVAL;
@@ -2864,7 +3015,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
error_unlock:
amdgpu_bo_unreserve(root);
if (r < 0)
- DRM_ERROR("Can't handle page fault (%d)\n", r);
+ dev_err(adev->dev, "Can't handle page fault (%d)\n", r);
error_unref:
amdgpu_bo_unref(&root);
@@ -2898,6 +3049,8 @@ void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)
unsigned int total_done_objs = 0;
unsigned int id = 0;
+ amdgpu_vm_assert_locked(vm);
+
spin_lock(&vm->status_lock);
seq_puts(m, "\tIdle BOs:\n");
list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) {
@@ -3038,3 +3191,12 @@ bool amdgpu_vm_is_bo_always_valid(struct amdgpu_vm *vm, struct amdgpu_bo *bo)
{
return bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv;
}
+
+void amdgpu_vm_print_task_info(struct amdgpu_device *adev,
+ struct amdgpu_task_info *task_info)
+{
+ dev_err(adev->dev,
+ " Process %s pid %d thread %s pid %d\n",
+ task_info->process_name, task_info->tgid,
+ task_info->task.comm, task_info->task.pid);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 52dd7cdfdc81..cf0ec94e8a07 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -35,6 +35,7 @@
#include "amdgpu_sync.h"
#include "amdgpu_ring.h"
#include "amdgpu_ids.h"
+#include "amdgpu_ttm.h"
struct drm_exec;
@@ -42,7 +43,6 @@ struct amdgpu_bo_va;
struct amdgpu_job;
struct amdgpu_bo_list_entry;
struct amdgpu_bo_vm;
-struct amdgpu_mem_stats;
/*
* GPUVM handling
@@ -203,9 +203,13 @@ struct amdgpu_vm_bo_base {
/* protected by bo being reserved */
struct amdgpu_vm_bo_base *next;
- /* protected by spinlock */
+ /* protected by vm status_lock */
struct list_head vm_status;
+ /* if the bo is counted as shared in mem stats
+ * protected by vm status_lock */
+ bool shared;
+
/* protected by the BO being reserved */
bool moved;
};
@@ -232,9 +236,8 @@ struct amdgpu_vm_pte_funcs {
};
struct amdgpu_task_info {
+ struct drm_wedge_task_info task;
char process_name[TASK_COMM_LEN];
- char task_name[TASK_COMM_LEN];
- pid_t pid;
pid_t tgid;
struct kref refcount;
};
@@ -305,7 +308,7 @@ struct amdgpu_vm_update_params {
struct amdgpu_vm_update_funcs {
int (*map_table)(struct amdgpu_bo_vm *bo);
int (*prepare)(struct amdgpu_vm_update_params *p,
- struct amdgpu_sync *sync);
+ struct amdgpu_sync *sync, u64 k_job_id);
int (*update)(struct amdgpu_vm_update_params *p,
struct amdgpu_bo_vm *bo, uint64_t pe, uint64_t addr,
unsigned count, uint32_t incr, uint64_t flags);
@@ -322,6 +325,13 @@ struct amdgpu_vm_fault_info {
unsigned int vmhub;
};
+struct amdgpu_mem_stats {
+ struct drm_memory_stats drm;
+
+ /* buffers that requested this placement but are currently evicted */
+ uint64_t evicted;
+};
+
struct amdgpu_vm {
/* tree of virtual addresses mapped */
struct rb_root_cached va;
@@ -336,12 +346,19 @@ struct amdgpu_vm {
/* Lock to protect vm_bo add/del/move on all lists of vm */
spinlock_t status_lock;
+ /* Memory statistics for this vm, protected by status_lock */
+ struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM];
+
+ /*
+ * The following lists contain amdgpu_vm_bo_base objects for either
+ * PDs, PTs or per VM BOs. The state transits are:
+ *
+ * evicted -> relocated (PDs, PTs) or moved (per VM BOs) -> idle
+ */
+
/* Per-VM and PT BOs who needs a validation */
struct list_head evicted;
- /* BOs for user mode queues that need a validation */
- struct list_head evicted_user;
-
/* PT BOs which relocated and their parent need an update */
struct list_head relocated;
@@ -351,18 +368,28 @@ struct amdgpu_vm {
/* All BOs of this VM not currently in the state machine */
struct list_head idle;
+ /*
+ * The following lists contain amdgpu_vm_bo_base objects for BOs which
+ * have their own dma_resv object and not depend on the root PD. Their
+ * state transits are:
+ *
+ * evicted_user or invalidated -> done
+ */
+
+ /* BOs for user mode queues that need a validation */
+ struct list_head evicted_user;
+
/* regular invalidated BOs, but not yet updated in the PT */
struct list_head invalidated;
- /* BO mappings freed, but not yet updated in the PT */
- struct list_head freed;
-
/* BOs which are invalidated, has been updated in the PTs */
struct list_head done;
- /* PT BOs scheduled to free and fill with zero if vm_resv is not hold */
- struct list_head pt_freed;
- struct work_struct pt_free_work;
+ /*
+ * This list contains amdgpu_bo_va_mapping objects which have been freed
+ * but not updated in the PTs
+ */
+ struct list_head freed;
/* contains the page directory */
struct amdgpu_vm_bo_base root;
@@ -385,7 +412,7 @@ struct amdgpu_vm {
struct dma_fence *last_unlocked;
unsigned int pasid;
- bool reserved_vmid[AMDGPU_MAX_VMHUBS];
+ struct amdgpu_vmid *reserved_vmid[AMDGPU_MAX_VMHUBS];
/* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */
bool use_cpu_for_update;
@@ -473,16 +500,14 @@ extern const struct amdgpu_vm_update_funcs amdgpu_vm_sdma_funcs;
void amdgpu_vm_manager_init(struct amdgpu_device *adev);
void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
-int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- u32 pasid);
-
long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout);
-int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id);
+int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id, uint32_t pasid);
int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
-void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec,
unsigned int num_fences);
+int amdgpu_vm_lock_done_list(struct amdgpu_vm *vm, struct drm_exec *exec,
+ unsigned int num_fences);
bool amdgpu_vm_ready(struct amdgpu_vm *vm);
uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm);
int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
@@ -515,8 +540,12 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
bool clear);
bool amdgpu_vm_evictable(struct amdgpu_bo *bo);
-void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
- struct amdgpu_bo *bo, bool evicted);
+void amdgpu_vm_bo_invalidate(struct amdgpu_bo *bo, bool evicted);
+void amdgpu_vm_update_stats(struct amdgpu_vm_bo_base *base,
+ struct ttm_resource *new_res, int sign);
+void amdgpu_vm_bo_update_shared(struct amdgpu_bo *bo);
+void amdgpu_vm_bo_move(struct amdgpu_bo *bo, struct ttm_resource *new_mem,
+ bool evicted);
uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
struct amdgpu_bo *bo);
@@ -526,11 +555,11 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
int amdgpu_vm_bo_map(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
uint64_t addr, uint64_t offset,
- uint64_t size, uint64_t flags);
+ uint64_t size, uint32_t flags);
int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
uint64_t addr, uint64_t offset,
- uint64_t size, uint64_t flags);
+ uint64_t size, uint32_t flags);
int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
uint64_t addr);
@@ -567,7 +596,7 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);
void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
- struct amdgpu_mem_stats *stats);
+ struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM]);
int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_bo_vm *vmbo, bool immediate);
@@ -655,4 +684,12 @@ void amdgpu_vm_tlb_fence_create(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct dma_fence **fence);
+void amdgpu_vm_print_task_info(struct amdgpu_device *adev,
+ struct amdgpu_task_info *task_info);
+
+#define amdgpu_vm_bo_va_for_each_valid_mapping(bo_va, mapping) \
+ list_for_each_entry(mapping, &(bo_va)->valids, list)
+#define amdgpu_vm_bo_va_for_each_invalid_mapping(bo_va, mapping) \
+ list_for_each_entry(mapping, &(bo_va)->invalids, list)
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
index 0c1ef5850a5e..22e2e5b47341 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
@@ -40,12 +40,14 @@ static int amdgpu_vm_cpu_map_table(struct amdgpu_bo_vm *table)
*
* @p: see amdgpu_vm_update_params definition
* @sync: sync obj with fences to wait on
+ * @k_job_id: the id for tracing/debug purposes
*
* Returns:
* Negativ errno, 0 for success.
*/
static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p,
- struct amdgpu_sync *sync)
+ struct amdgpu_sync *sync,
+ u64 k_job_id)
{
if (!sync)
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
index f78a0434a48f..f794fb1cc06e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
@@ -26,6 +26,7 @@
#include "amdgpu.h"
#include "amdgpu_trace.h"
#include "amdgpu_vm.h"
+#include "amdgpu_job.h"
/*
* amdgpu_vm_pt_cursor - state for for_each_amdgpu_vm_pt
@@ -395,7 +396,8 @@ int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm,
params.vm = vm;
params.immediate = immediate;
- r = vm->update_funcs->prepare(&params, NULL);
+ r = vm->update_funcs->prepare(&params, NULL,
+ AMDGPU_KERNEL_JOB_ID_VM_PT_CLEAR);
if (r)
goto exit;
@@ -537,6 +539,7 @@ static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry)
if (!entry->bo)
return;
+ amdgpu_vm_update_stats(entry, entry->bo->tbo.resource, -1);
entry->bo->vm_bo = NULL;
ttm_bo_set_bulk_move(&entry->bo->tbo, NULL);
@@ -546,27 +549,6 @@ static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry)
amdgpu_bo_unref(&entry->bo);
}
-void amdgpu_vm_pt_free_work(struct work_struct *work)
-{
- struct amdgpu_vm_bo_base *entry, *next;
- struct amdgpu_vm *vm;
- LIST_HEAD(pt_freed);
-
- vm = container_of(work, struct amdgpu_vm, pt_free_work);
-
- spin_lock(&vm->status_lock);
- list_splice_init(&vm->pt_freed, &pt_freed);
- spin_unlock(&vm->status_lock);
-
- /* flush_work in amdgpu_vm_fini ensure vm->root.bo is valid. */
- amdgpu_bo_reserve(vm->root.bo, true);
-
- list_for_each_entry_safe(entry, next, &pt_freed, vm_status)
- amdgpu_vm_pt_free(entry);
-
- amdgpu_bo_unreserve(vm->root.bo);
-}
-
/**
* amdgpu_vm_pt_free_list - free PD/PT levels
*
@@ -579,19 +561,15 @@ void amdgpu_vm_pt_free_list(struct amdgpu_device *adev,
struct amdgpu_vm_update_params *params)
{
struct amdgpu_vm_bo_base *entry, *next;
- struct amdgpu_vm *vm = params->vm;
bool unlocked = params->unlocked;
if (list_empty(&params->tlb_flush_waitlist))
return;
- if (unlocked) {
- spin_lock(&vm->status_lock);
- list_splice_init(&params->tlb_flush_waitlist, &vm->pt_freed);
- spin_unlock(&vm->status_lock);
- schedule_work(&vm->pt_free_work);
- return;
- }
+ /*
+ * unlocked unmap clear page table leaves, warning to free the page entry.
+ */
+ WARN_ON(unlocked);
list_for_each_entry_safe(entry, next, &params->tlb_flush_waitlist, vm_status)
amdgpu_vm_pt_free(entry);
@@ -899,7 +877,15 @@ int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params,
incr = (uint64_t)AMDGPU_GPU_PAGE_SIZE << shift;
mask = amdgpu_vm_pt_entries_mask(adev, cursor.level);
pe_start = ((cursor.pfn >> shift) & mask) * 8;
- entry_end = ((uint64_t)mask + 1) << shift;
+
+ if (cursor.level < AMDGPU_VM_PTB && params->unlocked)
+ /*
+ * MMU notifier callback unlocked unmap huge page, leave is PDE entry,
+ * only clear one entry. Next entry search again for PDE or PTE leave.
+ */
+ entry_end = 1ULL << shift;
+ else
+ entry_end = ((uint64_t)mask + 1) << shift;
entry_end += cursor.pfn & ~(entry_end - 1);
entry_end = min(entry_end, end);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
index 46d9fb433ab2..36805dcfa159 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
@@ -40,7 +40,7 @@ static int amdgpu_vm_sdma_map_table(struct amdgpu_bo_vm *table)
/* Allocate a new job for @count PTE updates */
static int amdgpu_vm_sdma_alloc_job(struct amdgpu_vm_update_params *p,
- unsigned int count)
+ unsigned int count, u64 k_job_id)
{
enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE
: AMDGPU_IB_POOL_DELAYED;
@@ -56,7 +56,7 @@ static int amdgpu_vm_sdma_alloc_job(struct amdgpu_vm_update_params *p,
ndw = min(ndw, AMDGPU_VM_SDMA_MAX_NUM_DW);
r = amdgpu_job_alloc_with_ib(p->adev, entity, AMDGPU_FENCE_OWNER_VM,
- ndw * 4, pool, &p->job);
+ ndw * 4, pool, &p->job, k_job_id);
if (r)
return r;
@@ -69,16 +69,17 @@ static int amdgpu_vm_sdma_alloc_job(struct amdgpu_vm_update_params *p,
*
* @p: see amdgpu_vm_update_params definition
* @sync: amdgpu_sync object with fences to wait for
+ * @k_job_id: identifier of the job, for tracing purpose
*
* Returns:
* Negativ errno, 0 for success.
*/
static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
- struct amdgpu_sync *sync)
+ struct amdgpu_sync *sync, u64 k_job_id)
{
int r;
- r = amdgpu_vm_sdma_alloc_job(p, 0);
+ r = amdgpu_vm_sdma_alloc_job(p, 0, k_job_id);
if (r)
return r;
@@ -249,7 +250,8 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
if (r)
return r;
- r = amdgpu_vm_sdma_alloc_job(p, count);
+ r = amdgpu_vm_sdma_alloc_job(p, count,
+ AMDGPU_KERNEL_JOB_ID_VM_UPDATE);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c
index 51cddfa3f1e8..5d26797356a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c
@@ -71,7 +71,6 @@ static void amdgpu_tlb_fence_work(struct work_struct *work)
}
static const struct dma_fence_ops amdgpu_tlb_fence_ops = {
- .use_64bit_seqno = true,
.get_driver_name = amdgpu_tlb_fence_get_driver_name,
.get_timeline_name = amdgpu_tlb_fence_get_timeline_name
};
@@ -101,8 +100,8 @@ void amdgpu_vm_tlb_fence_create(struct amdgpu_device *adev, struct amdgpu_vm *vm
INIT_WORK(&f->work, amdgpu_tlb_fence_work);
spin_lock_init(&f->lock);
- dma_fence_init(&f->base, &amdgpu_tlb_fence_ops, &f->lock,
- vm->tlb_fence_context, atomic64_read(&vm->tlb_seq));
+ dma_fence_init64(&f->base, &amdgpu_tlb_fence_ops, &f->lock,
+ vm->tlb_fence_context, atomic64_read(&vm->tlb_seq));
/* TODO: We probably need a separate wq here */
dma_fence_get(&f->base);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
index 5acd20ff5979..474bfe36c0c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
@@ -236,7 +236,8 @@ int amdgpu_vpe_init_microcode(struct amdgpu_vpe *vpe)
int ret;
amdgpu_ucode_ip_version_decode(adev, VPE_HWIP, fw_prefix, sizeof(fw_prefix));
- ret = amdgpu_ucode_request(adev, &adev->vpe.fw, "amdgpu/%s.bin", fw_prefix);
+ ret = amdgpu_ucode_request(adev, &adev->vpe.fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s.bin", fw_prefix);
if (ret)
goto out;
@@ -295,9 +296,9 @@ int amdgpu_vpe_ring_fini(struct amdgpu_vpe *vpe)
return 0;
}
-static int vpe_early_init(void *handle)
+static int vpe_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_vpe *vpe = &adev->vpe;
switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) {
@@ -356,9 +357,9 @@ static int vpe_common_init(struct amdgpu_vpe *vpe)
return 0;
}
-static int vpe_sw_init(void *handle)
+static int vpe_sw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_vpe *vpe = &adev->vpe;
int ret;
@@ -377,18 +378,27 @@ static int vpe_sw_init(void *handle)
ret = vpe_init_microcode(vpe);
if (ret)
goto out;
+
+ adev->vpe.supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->vpe.ring);
+ if (!amdgpu_sriov_vf(adev))
+ adev->vpe.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ ret = amdgpu_vpe_sysfs_reset_mask_init(adev);
+ if (ret)
+ goto out;
out:
return ret;
}
-static int vpe_sw_fini(void *handle)
+static int vpe_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->